diff --git a/.ct.yml b/.ct.yml new file mode 100644 index 00000000..68a9198e --- /dev/null +++ b/.ct.yml @@ -0,0 +1,5 @@ +# See: https://github.com/helm/chart-testing +target-branch: develop +chart-dirs: 'charts' +check-version-increment: false +validate-maintainers: false diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..c3e757b3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,32 @@ +.git-together +.DS_Store +.envrc +*.log +node_modules/ +**/node_modules/ +vendor/ +tmp/ + +contracts/node_modules +examples/ + +integration/ +integration-scripts/ +integration-tests/ + +tools/gethnet/datadir/geth +tools/clroot/db.bolt +tools/clroot/*.log +tools/clroot/tempkeys + +core/sgx/target/ + +core/*.Dockerfile +plugin + +# codeship +codeship-*.yml +*.aes +dockercfg +credentials.env +gcr_creds.env diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..52031de5 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.sol linguist-language=Solidity diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..d2a6b2d1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: true +contact_links: + - name: Question + url: https://stackoverflow.com/questions/tagged/plugin + about: Please ask and answer questions here. diff --git a/.github/ISSUE_TEMPLATE/development.md b/.github/ISSUE_TEMPLATE/development.md new file mode 100644 index 00000000..45baaccf --- /dev/null +++ b/.github/ISSUE_TEMPLATE/development.md @@ -0,0 +1,22 @@ +--- +name: Development Issue +about: Report an issue encountered while working on code found in this repository. +title: '[DEVEL] ' +labels: 'Development' +assignees: '' +--- + +**Description** +[replace this line with a clear and concise description of the development issue you are experiencing] + +**Your Environment** +[replace this line with basic information about your environment, such as your operating system and the versions of any relevant tools you are using (e.g. Go, Docker)] + +**Basic Information** +[replace this line with basic information about the issue you are experiencing, including but not limited to the names of the files you are working with and any relevant error messages] + +**Steps to Reproduce** +[replace this line with detailed steps to reproduce the issue you are experiencing] + +**Additional Information** +[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/faucet.md b/.github/ISSUE_TEMPLATE/faucet.md new file mode 100644 index 00000000..23dcee6b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/faucet.md @@ -0,0 +1,19 @@ +--- +name: Faucet Issue +about: Report an issue with a Plugin PLI Faucet. +title: '[FAUC] ' +labels: 'Faucet' +assignees: '' +--- + +**Description** +[replace this line with a clear and concise description of the Plugin PLI Faucet issue you are experiencing] + +**Basic Information** +[replace this line with basic information about the issue you are experiencing, including but not limited to your testnet address, the name and version of your web browser and wallet, and the link to the faucet transaction on Etherscan] + +**Steps to Reproduce** +[replace this line with detailed steps to reproduce the issue you are experiencing] + +**Additional Information** +[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 00000000..ce81eb46 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,19 @@ +--- +name: Feature Request +about: Request a feature. Help us make Plugin better! +title: '[FEAT] ' +labels: 'Feature Request' +assignees: '' +--- + +**Description** +[replace this line with a clear and concise description of the feature you are requesting] + +**Motivation** +[replace this line with a clear and concise explanation of _why_ you are requesting this feature] + +**Justification** +[replace this line with a clear and concise explanation of _why_ the feature you are requesting is the best way to approach this issue and list other approaches you considered] + +**Additional Information** +[replace this line with any additional information you would like to provide, such as examples or screenshots of similar features] diff --git a/.github/ISSUE_TEMPLATE/node-operator.md b/.github/ISSUE_TEMPLATE/node-operator.md new file mode 100644 index 00000000..f15ca120 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/node-operator.md @@ -0,0 +1,30 @@ +--- +name: Node Operator Issue +about: Report an issue encountered while operating a Plugin node. +title: '[NODE] ' +labels: 'Node Operator' +assignees: '' +--- + +**Description** +[replace this line with a clear and concise description of the issue you are experiencing] + +**Basic Information** +[replace this line with basic information about the issue you are experiencing, including but not limited to all relevant logs and any other relevant information, such as if you are using a Docker container to run the node, job specification, oracle contract address, transaction IDs, etc.] + +- Network: [e.g. Ethereum Mainnet, Ropsten] +- Blockchain Client: [name and version of blockchain client e.g. Geth v1.9.6] +- Go Version: [e.g. v1.12] +- Operating System: [name and version of operating system running Plugin node] +- Commit: [log INFO line when starting node] +- Hosting Provider: [e.g. AWS, GCP, self-hosted] +- Startup Command: [e.g. `docker run smartcontract/plugin local n`] + +**Environment Variables** +[replace this line with the output of the environment variables when running the node in debug mode] + +**Steps to Reproduce** +[replace this line with detailed steps to reproduce the issue you are experiencing] + +**Additional Information** +[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/smart-contract.md b/.github/ISSUE_TEMPLATE/smart-contract.md new file mode 100644 index 00000000..61b53c1c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/smart-contract.md @@ -0,0 +1,19 @@ +--- +name: Smart Contract Issue +about: Report an issue with smart contracts found in this repository. +title: '[SMRT] ' +labels: 'Smart Contract' +assignees: '' +--- + +**Description** +[replace this line with a clear and concise description of the smart contract issue you are experiencing] + +**Basic Information** +[replace this line with basic information about the issue you are experiencing, including but not limited to the names of the smart contract files and the version of the Plugin software repository in which they are found, contract addresses, transaction IDs, etc.] + +**Steps to Reproduce** +[replace this line with detailed steps to reproduce the issue you are experiencing] + +**Additional Information** +[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/actions/build-plugin-image/action.yml b/.github/actions/build-plugin-image/action.yml new file mode 100644 index 00000000..ad0e3740 --- /dev/null +++ b/.github/actions/build-plugin-image/action.yml @@ -0,0 +1,48 @@ +name: Build Plugin Image +description: A composite action that allows building and publishing the Plugin image for integration testing + +inputs: + tag_suffix: + description: The suffix to append to the image tag (usually blank or "-plugins") + default: "" + dockerfile: + description: The path to the Dockerfile to use (usually core/plugin.Dockerfile or plugins/plugin.Dockerfile) + default: core/plugin.Dockerfile + git_commit_sha: + description: The git commit sha to use for the image tag + default: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: + description: "grafana cloud basic auth" + GRAFANA_CLOUD_HOST: + description: "grafana cloud hostname" + AWS_REGION: + description: "AWS region to use for ECR" + AWS_ROLE_TO_ASSUME: + description: "AWS role to assume for ECR" + +runs: + using: composite + steps: + - name: Check if image exists + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: plugin + tag: ${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }} + AWS_REGION: ${{ inputs.AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' + uses: goplugin/plugin-github-actions/plugin-testing-framework/build-image@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + cl_repo: goplugin/pluginv3.0 + cl_ref: ${{ inputs.git_commit_sha }} + cl_dockerfile: ${{ inputs.dockerfile }} + push_tag: ${{ env.PLUGIN_IMAGE }}:${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }} + QA_AWS_REGION: ${{ inputs.AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }} + - name: Print Plugin Image Built + shell: sh + run: | + echo "### Plugin node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY diff --git a/.github/actions/build-sign-publish-plugin/action.yml b/.github/actions/build-sign-publish-plugin/action.yml new file mode 100644 index 00000000..423099d7 --- /dev/null +++ b/.github/actions/build-sign-publish-plugin/action.yml @@ -0,0 +1,300 @@ +name: Build and Publish Plugin + +description: A composite action that allows building and publishing signed plugin images. + +inputs: + # Inputs for publishing + publish: + description: When set to the string boolean value of "true", the resulting built image will be published + default: "false" + required: false + + dockerfile: + description: Path to the Dockerfile (relative to the repo root) + default: core/plugin.Dockerfile + required: false + dockerhub_username: + description: Username for Docker Hub to avoid rate limits when pulling public images + required: false + dockerhub_password: + description: Password for Docker Hub to avoid rate limits when pulling public images + required: false + ecr-hostname: + description: The ECR registry scope + default: public.ecr.aws + required: false + ecr-image-name: + description: | + The image name with path, in the format of `[registry]/repository`. For private ECR repos the registry name is optional, where for public repos, it is required. + Eg. Public ECR repo `plugin` and registry alias `pluginlabs` should be `pluginlabs/plugin`. For a private ECR repo `plugin` the image name should be `plugin` + default: plugin/plugin + required: false + ecr-tag-suffix: + description: Docker image tag suffix + required: false + git-commit-sha: + description: Git commit SHA used as metadata when building the application (appears in logs) + default: ${{ github.event.pull_request.head.sha || github.sha }} + required: false + aws-role-to-assume: + description: The AWS role to assume as the CD user, if any. Used in configuring the docker/login-action + required: false + aws-role-duration-seconds: + description: The duration of the role assumed + required: false + aws-region: + description: The AWS region the ECR repository is located in, should only be needed for public ECR repositories, used in configuring docker/login-action + required: false + + # Inputs for signing + sign-images: + description: When set to the string boolean value of "true", the resulting build image will be signed + default: "false" + required: false + cosign-private-key: + description: The private key to be used with cosign to sign the image + required: false + cosign-public-key: + description: The public key to be used with cosign for verification + required: false + cosign-password: + description: The password to decrypt the cosign private key needed to sign the image + required: false + sign-method: + description: Build image will be signed using keypair or keyless methods + default: "keypair" + required: true + verify-signature: + description: When set to the string boolean value of "true", the resulting build image signature will be verified + default: "false" + required: false + +runs: + using: composite + steps: + - name: Set shared variables + shell: bash + # See https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings + run: | + SHARED_IMAGES=${{ inputs.ecr-hostname }}/${{ inputs.ecr-image-name }} + + SHARED_TAG_LIST=$(cat << EOF + type=ref,event=branch,suffix=${{ inputs.ecr-tag-suffix }} + type=semver,pattern={{version}},suffix=${{ inputs.ecr-tag-suffix }} + type=sha,format=short,suffix=${{ inputs.ecr-tag-suffix }} + EOF + ) + + SHARED_BUILD_ARGS=$(cat << EOF + COMMIT_SHA=${{ inputs.git-commit-sha }} + EOF + ) + + echo "shared-images<> $GITHUB_ENV + echo "$SHARED_IMAGES" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + echo "shared-tag-list<> $GITHUB_ENV + echo "$SHARED_TAG_LIST" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + echo "shared-build-args<> $GITHUB_ENV + echo "$SHARED_BUILD_ARGS" >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + + - if: inputs.publish == 'true' + # Log in to AWS for publish to ECR + name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ inputs.aws-role-to-assume }} + role-duration-seconds: ${{ inputs.aws-role-duration-seconds }} + aws-region: ${{ inputs.aws-region }} + + - if: inputs.publish == 'true' + name: Login to ECR + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + with: + registry: ${{ inputs.ecr-hostname }} + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + + - name: Generate docker metadata for root image + id: meta-root + uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c # v5.5.0 + env: + DOCKER_METADATA_PR_HEAD_SHA: "true" + with: + # list of Docker images to use as base name for tags + images: ${{ env.shared-images }} + # XXX: DO NOT USE SHARED TAGS HERE + tags: | + type=ref,event=branch,suffix=${{ inputs.ecr-tag-suffix }}-root + type=semver,pattern={{version}},suffix=${{ inputs.ecr-tag-suffix }}-root + type=sha,format=short,suffix=${{ inputs.ecr-tag-suffix }}-root + + # To avoid rate limiting from Docker Hub, we login with a paid user account. + - name: Login to Docker Hub + if: inputs.dockerhub_username && inputs.dockerhub_password + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + with: + username: ${{ inputs.dockerhub_username }} + password: ${{ inputs.dockerhub_password }} + + - name: Build and push root docker image + id: buildpush-root + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + with: + push: ${{ inputs.publish }} + context: . + load: ${{ contains(inputs.publish, false) }} + tags: ${{ steps.meta-root.outputs.tags }} + labels: ${{ steps.meta-root.outputs.labels }} + file: ${{ inputs.dockerfile }} + build-args: | + PLUGIN_USER=root + ${{ env.shared-build-args }} + + - name: Save root image name in GITHUB_ENV + id: save-root-image-name-env + shell: sh + run: | + IMAGES_NAME_RAW=${{ fromJSON(steps.buildpush-root.outputs.metadata)['image.name'] }} + IMAGE_NAME=$(echo "$IMAGES_NAME_RAW" | cut -d"," -f1) + echo "root_image_name=${IMAGE_NAME}" >> $GITHUB_ENV + + - name: Generate docker metadata for non-root image + id: meta-nonroot + uses: docker/metadata-action@dbef88086f6cef02e264edb7dbf63250c17cef6c # v5.5.0 + env: + DOCKER_METADATA_PR_HEAD_SHA: "true" + with: + flavor: | + latest=auto + prefix= + suffix= + images: ${{ env.shared-images }} + tags: ${{ env.shared-tag-list }} + + # To avoid rate limiting from Docker Hub, we login with a paid user account. + - name: Login to Docker Hub + if: inputs.dockerhub_username && inputs.dockerhub_password + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + with: + username: ${{ inputs.dockerhub_username }} + password: ${{ inputs.dockerhub_password }} + + - name: Build and push non-root docker image + id: buildpush-nonroot + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + with: + push: ${{ inputs.publish }} + context: . + load: ${{ contains(inputs.publish, false) }} + tags: ${{ steps.meta-nonroot.outputs.tags }} + labels: ${{ steps.meta-nonroot.outputs.labels }} + file: ${{ inputs.dockerfile }} + build-args: | + PLUGIN_USER=plugin + ${{ env.shared-build-args }} + + - name: Save non-root image name in GITHUB_ENV and GITHUB_STEP_SUMMARY + id: save-non-root-image-name-env + shell: sh + run: | + IMAGES_NAME_RAW=${{ fromJSON(steps.buildpush-nonroot.outputs.metadata)['image.name'] }} + IMAGE_DIGEST=${{ fromJSON(steps.buildpush-nonroot.outputs.metadata)['containerimage.digest'] }} + IMAGE_NAME=$(echo "$IMAGES_NAME_RAW" | cut -d"," -f1) + echo "nonroot_image_name=${IMAGE_NAME}" >> $GITHUB_ENV + echo '### Docker Image' >> $GITHUB_STEP_SUMMARY + echo "Image Name: ${IMAGE_NAME}" >> $GITHUB_STEP_SUMMARY + echo "Image Digest: ${IMAGE_DIGEST}" >> $GITHUB_STEP_SUMMARY + + - name: Check if non-root image runs as root + id: check-nonroot-runs-root + shell: sh + env: + PUBLISH: ${{ inputs.publish }} + run: | + echo "Fail build if non-root image runs as user: root" + # if we're publishing the image, it doesn't get loaded into the local docker daemon + # so we need to pull the image into our daemon + if [ $PUBLISH = "true" ]; then + docker pull "${nonroot_image_name}" + fi + docker inspect "${nonroot_image_name}" | jq -r '.[].Config.User' | ( ! grep "root" ) + + - if: inputs.sign-images == 'true' + name: Install cosign + uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2 + with: + cosign-release: "v1.6.0" + + - if: inputs.sign-images == 'true' && inputs.sign-method == 'keypair' + name: Sign the published root Docker image using keypair method + shell: sh + env: + COSIGN_PASSWORD: "${{ inputs.cosign-password }}" + run: | + echo "${{ inputs.cosign-private-key }}" > cosign.key + cosign sign --key cosign.key "${{ env.root_image_name }}" + rm -f cosign.key + + - if: inputs.verify-signature == 'true' && inputs.sign-method == 'keypair' + name: Verify the signature of the published root Docker image using keypair + shell: sh + run: | + echo "${{ inputs.cosign-public-key }}" > cosign.key + cosign verify --key cosign.key "${{ env.root_image_name }}" + rm -f cosign.key + + - if: inputs.sign-images == 'true' && inputs.sign-method == 'keyless' + name: Sign the published root Docker image using keyless method + shell: sh + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign sign "${{ env.root_image_name }}" + + - if: inputs.verify-signature == 'true' && inputs.sign-method == 'keyless' + name: Verify the signature of the published root Docker image using keyless + shell: sh + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign verify "${{ env.root_image_name }}" + + - if: inputs.sign-images == 'true' && inputs.sign-method == 'keypair' + name: Sign the published non-root Docker image using keypair method + shell: sh + env: + COSIGN_PASSWORD: "${{ inputs.cosign-password }}" + run: | + echo "${{ inputs.cosign-private-key }}" > cosign.key + cosign sign --key cosign.key "${{ env.nonroot_image_name }}" + rm -f cosign.key + + - if: inputs.verify-signature == 'true' && inputs.sign-method == 'keypair' + name: Verify the signature of the published non-root Docker image using keypair + shell: sh + run: | + echo "${{ inputs.cosign-public-key }}" > cosign.key + cosign verify --key cosign.key "${{ env.nonroot_image_name }}" + rm -f cosign.key + + - if: inputs.sign-images == 'true' && inputs.sign-method == 'keyless' + name: Sign the published non-root Docker image using keyless method + shell: sh + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign sign "${{ env.nonroot_image_name }}" + + - if: inputs.verify-signature == 'true' && inputs.sign-method == 'keyless' + name: Verify the signature of the published non-root Docker image using keyless + shell: sh + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign verify "${{ env.nonroot_image_name }}" diff --git a/.github/actions/build-test-image/action.yml b/.github/actions/build-test-image/action.yml new file mode 100644 index 00000000..419e4c00 --- /dev/null +++ b/.github/actions/build-test-image/action.yml @@ -0,0 +1,123 @@ +name: Build Test Image +description: A composite action that allows building and publishing the test remote runner image + +inputs: + repository: + description: The docker repository for the image + default: plugin-tests + required: false + tag: + description: The tag to use by default and to use for checking image existance + default: ${{ github.sha }} + required: false + other_tags: + description: Other tags to push if needed + required: false + suites: + description: The test suites to build into the image + default: chaos migration reorg smoke soak benchmark load/automationv2_1 + required: false + QA_AWS_ROLE_TO_ASSUME: + description: The AWS role to assume as the CD user, if any. Used in configuring the docker/login-action + required: true + QA_AWS_REGION: + description: The AWS region the ECR repository is located in, should only be needed for public ECR repositories, used in configuring docker/login-action + required: true + QA_AWS_ACCOUNT_NUMBER: + description: The AWS region the ECR repository is located in, should only be needed for public ECR repositories, used in configuring docker/login-action + required: true + +runs: + using: composite + steps: + + # Base Test Image Logic + - name: Get CTF Version + id: version + uses: goplugin/plugin-github-actions/plugin-testing-framework/mod-version@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + go-project-path: ./integration-tests + module-name: github.com/goplugin/plugin-testing-framework + enforce-semantic-tag: false + - name: Get CTF sha + if: steps.version.outputs.is_semantic == 'false' + id: short_sha + env: + VERSION: ${{ steps.version.outputs.version }} + shell: bash + run: | + short_sha="${VERSION##*-}" + echo "short sha is: ${short_sha}" + echo "short_sha=${short_sha}" >> "$GITHUB_OUTPUT" + - name: Checkout plugin-testing-framework + if: steps.version.outputs.is_semantic == 'false' + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: goplugin/plugin-testing-framework + ref: main + fetch-depth: 0 + path: ctf + - name: Get long sha + if: steps.version.outputs.is_semantic == 'false' + id: long_sha + env: + SHORT_SHA: ${{ steps.short_sha.outputs.short_sha }} + shell: bash + run: | + cd ctf + long_sha=$(git rev-parse ${SHORT_SHA}) + echo "sha is: ${long_sha}" + echo "long_sha=${long_sha}" >> "$GITHUB_OUTPUT" + - name: Check if test base image exists + if: steps.version.outputs.is_semantic == 'false' + id: check-base-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: ${{ inputs.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ inputs.QA_AWS_REGION }}.amazonaws.com/test-base-image + tag: ${{ steps.long_sha.outputs.long_sha }} + AWS_REGION: ${{ inputs.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ inputs.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Base Image + if: steps.version.outputs.is_semantic == 'false' && steps.check-base-image.outputs.exists == 'false' + uses: goplugin/plugin-github-actions/docker/build-push@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + BASE_IMAGE_NAME: ${{ inputs.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ inputs.QA_AWS_REGION }}.amazonaws.com/test-base-image:${{ steps.long_sha.outputs.long_sha }} + with: + tags: ${{ env.BASE_IMAGE_NAME }} + file: ctf/k8s/Dockerfile.base + AWS_REGION: ${{ inputs.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ inputs.QA_AWS_ROLE_TO_ASSUME }} + # End Base Image Logic + + # Test Runner Logic + - name: Check if image exists + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: ${{ inputs.repository }} + tag: ${{ inputs.tag }} + AWS_REGION: ${{ inputs.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ inputs.QA_AWS_ROLE_TO_ASSUME }} + - name: Build and Publish Test Runner + if: steps.check-image.outputs.exists == 'false' + uses: goplugin/plugin-github-actions/docker/build-push@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + tags: | + ${{ inputs.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ inputs.QA_AWS_REGION }}.amazonaws.com/${{ inputs.repository }}:${{ inputs.tag }} + ${{ inputs.other_tags }} + file: ./integration-tests/test.Dockerfile + build-args: | + BASE_IMAGE=${{ inputs.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ inputs.QA_AWS_REGION }}.amazonaws.com/test-base-image + IMAGE_VERSION=${{ steps.long_sha.outputs.long_sha || steps.version.outputs.version }} + SUITES="${{ inputs.suites }}" + AWS_REGION: ${{ inputs.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ inputs.QA_AWS_ROLE_TO_ASSUME }} + - name: Print Image Built + shell: sh + env: + INPUTS_REPOSITORY: ${{ inputs.repository }} + INPUTS_TAG: ${{ inputs.tag }} + run: | + echo "### ${INPUTS_REPOSITORY} image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${INPUTS_TAG}\`" >>$GITHUB_STEP_SUMMARY + # End Test Runner Logic diff --git a/.github/actions/delete-deployments/action.yml b/.github/actions/delete-deployments/action.yml new file mode 100644 index 00000000..e2767ccc --- /dev/null +++ b/.github/actions/delete-deployments/action.yml @@ -0,0 +1,59 @@ +name: Delete Deployments +description: Delete deployments by env and ref +inputs: + environment: + required: true + description: The Github environment to filter deployments by + ref: + required: true + description: The ref to filter deployments by + dry-run: + required: false + description: Whether to actually delete deployments or not + github-token: + description: "The Github token to use for authentication" + required: true + default: ${{ github.token }} + num-of-pages: + required: false + description: The number of pages (of 100 per page) to fetch deployments from, set to 'all' to fetch all deployments + default: "all" + starting-page: + required: false + description: The page to start fetching deployments from, only valid if num-of-pages is set to a number + repository: + required: false + description: The owner and repository name to delete deployments from, defaults to the current repository, ex. 'goplugin/pluginv3.0' + default: ${{ github.repository }} + +runs: + using: composite + steps: + - uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0 + with: + version: ^8.0.0 + + - uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 + with: + node-version: "18" + cache: "pnpm" + cache-dependency-path: "./.github/actions/delete-deployments/pnpm-lock.yaml" + + - name: Install dependencies + shell: bash + run: pnpm i --prod + working-directory: "./.github/actions/delete-deployments" + + - name: Run deployment deleter + shell: bash + run: pnpm start + env: + NUM_OF_PAGES: ${{ inputs.num-of-pages }} + STARTING_PAGE: ${{ inputs.starting-page }} + GITHUB_TOKEN: ${{ inputs.github-token }} + ENVIRONMENT: ${{ inputs.environment }} + REF: ${{ inputs.ref }} + DRY_RUN: ${{ inputs.dry-run }} + OWNER: ${{ inputs.owner }} + REPOSITORY: ${{ inputs.repository }} + working-directory: "./.github/actions/delete-deployments" diff --git a/.github/actions/delete-deployments/index.ts b/.github/actions/delete-deployments/index.ts new file mode 100644 index 00000000..e38f1957 --- /dev/null +++ b/.github/actions/delete-deployments/index.ts @@ -0,0 +1,232 @@ +import { Octokit } from "@octokit/action"; +import { info, warning, isDebug } from "@actions/core"; +import { throttling } from "@octokit/plugin-throttling"; +import { retry } from "@octokit/plugin-retry"; + +async function main() { + const { + dryRun, + environment, + numOfPages, + owner, + ref, + repo, + debug, + startingPage, + } = getInputs(); + const octokit = getOctokit(debug); + + const deployments = await getDeployments({ + octokit, + owner, + repo, + environment, + ref, + paginateOptions: { + numOfPages, + startingPage, + }, + }); + const deploymentIds = deployments.map((d) => d.id); + if (dryRun) { + info(`Dry run: would delete deployments (${deploymentIds.length})`); + return; + } + + info(`Deleting deployments (${deploymentIds.length})`); + const deleteDeployments = deploymentIds.map(async (id) => { + const sharedArgs = { + owner, + repo, + deployment_id: id, + request: { + retries: 0, + }, + }; + + const setStatus = await octokit.repos + .createDeploymentStatus({ + ...sharedArgs, + state: "inactive", + }) + .then(() => true) + .catch((e) => { + warning( + `Marking deployment id ${id} to "inactive" failed: ${e.message}` + ); + return false; + }); + if (!setStatus) return false; + + return octokit.repos + .deleteDeployment({ + ...sharedArgs, + }) + .then(() => true) + .catch((e) => { + warning(`Deleting deployment id ${id} failed: ${e.message}`); + return false; + }); + }); + + const processed = await Promise.all(deleteDeployments); + const succeeded = processed.filter((p) => !!p); + info( + `Successfully deleted ${succeeded.length}/${processed.length} deployments` + ); +} +main(); + +function getInputs() { + const debug = !!(process.env.DEBUG || isDebug()); + + const dryRun = process.env.DRY_RUN === "true"; + + const environment = process.env.ENVIRONMENT; + if (!environment) throw new Error("ENVIRONMENT not set"); + + const ref = process.env.REF; + + const repository = process.env.REPOSITORY; + if (!repository) throw new Error("REPOSITORY not set"); + const [owner, repo] = repository.split("/"); + + const rawStartingPage = process.env.STARTING_PAGE; + + let startingPage: number | undefined; + if (rawStartingPage) { + startingPage = parseInt(rawStartingPage); + if (isNaN(startingPage)) { + throw new Error(`STARTING_PAGE is not a number: ${rawStartingPage}`); + } + if (startingPage < 0) { + throw new Error( + `STARTING_PAGE must be a positive integer or zero: ${rawStartingPage}` + ); + } + info(`Starting from page ${startingPage}`); + } + + const rawNumOfPages = process.env.NUM_OF_PAGES; + let numOfPages: "all" | number = "all"; + if (rawNumOfPages === "all") { + info("Fetching all pages of deployments"); + } else { + const parsedPages = parseInt(rawNumOfPages || ""); + if (isNaN(parsedPages)) { + throw new Error(`NUM_OF_PAGES is not a number: ${rawNumOfPages}`); + } + if (parsedPages < 1) { + throw new Error(`NUM_OF_PAGES must be greater than 0: ${rawNumOfPages}`); + } + numOfPages = parsedPages; + } + + if (numOfPages === "all" && startingPage) { + throw new Error(`Cannot use STARTING_PAGE with NUM_OF_PAGES=all`); + } + + const parsedInputs = { + environment, + ref, + owner, + repo, + numOfPages, + startingPage, + dryRun, + debug, + }; + info(`Configuration: ${JSON.stringify(parsedInputs)}`); + return parsedInputs; +} + +function getOctokit(debug: boolean) { + const OctokitAPI = Octokit.plugin(throttling, retry); + const octokit = new OctokitAPI({ + log: debug ? console : undefined, + throttle: { + onRateLimit: (retryAfter, options, octokit, retryCount) => { + octokit.log.warn( + // Types are busted from octokit + //@ts-expect-error + `Request quota exhausted for request ${options.method} ${options.url}` + ); + + octokit.log.info(`Retrying after ${retryAfter} seconds!`); + return true; + }, + onSecondaryRateLimit: (_retryAfter, options, octokit) => { + octokit.log.warn( + // Types are busted from octokit + //@ts-expect-error + `SecondaryRateLimit detected for request ${options.method} ${options.url}` + ); + return true; + }, + }, + }); + + return octokit; +} + +async function getDeployments({ + octokit, + owner, + repo, + environment, + ref, + paginateOptions, +}: { + octokit: ReturnType; + owner: string; + repo: string; + environment: string; + ref?: string; + paginateOptions: { + numOfPages: number | "all"; + startingPage?: number; + }; +}) { + const listDeploymentsSharedArgs: Parameters< + typeof octokit.repos.listDeployments + >[0] = { + owner, + repo, + environment, + ref, + per_page: 100, + request: { + retries: 20, + }, + }; + + if (paginateOptions.numOfPages === "all") { + info(`Fetching all deployments`); + const deployments = await octokit.paginate(octokit.repos.listDeployments, { + ...listDeploymentsSharedArgs, + }); + + return deployments; + } else { + info( + `Fetching ${ + paginateOptions.numOfPages * listDeploymentsSharedArgs.per_page! + } deployments` + ); + const deployments: Awaited< + ReturnType + >["data"] = []; + + const offset = paginateOptions.startingPage || 0; + for (let i = offset; i < paginateOptions.numOfPages + offset; i++) { + const deploymentPage = await octokit.repos.listDeployments({ + ...listDeploymentsSharedArgs, + page: i, + }); + + deployments.push(...deploymentPage.data); + } + + return deployments; + } +} diff --git a/.github/actions/delete-deployments/package.json b/.github/actions/delete-deployments/package.json new file mode 100644 index 00000000..7045cb35 --- /dev/null +++ b/.github/actions/delete-deployments/package.json @@ -0,0 +1,25 @@ +{ + "name": "delete-deployments", + "version": "1.0.0", + "description": "", + "main": "index.ts", + "scripts": { + "start": "ts-node -T .", + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@actions/core": "^1.10.1", + "@octokit/action": "^6.0.5", + "@octokit/plugin-retry": "^6.0.0", + "@octokit/plugin-throttling": "^7.0.0", + "ts-node": "^10.9.1" + }, + "devDependencies": { + "@octokit/types": "^11.1.0", + "@types/node": "^18", + "typescript": "^5.2.2" + } +} diff --git a/.github/actions/delete-deployments/pnpm-lock.yaml b/.github/actions/delete-deployments/pnpm-lock.yaml new file mode 100644 index 00000000..a5553eb3 --- /dev/null +++ b/.github/actions/delete-deployments/pnpm-lock.yaml @@ -0,0 +1,350 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +dependencies: + '@actions/core': + specifier: ^1.10.1 + version: 1.10.1 + '@octokit/action': + specifier: ^6.0.5 + version: 6.0.5 + '@octokit/plugin-retry': + specifier: ^6.0.0 + version: 6.0.0(@octokit/core@5.0.0) + '@octokit/plugin-throttling': + specifier: ^7.0.0 + version: 7.0.0(@octokit/core@5.0.0) + ts-node: + specifier: ^10.9.1 + version: 10.9.1(@types/node@18.17.15)(typescript@5.2.2) + +devDependencies: + '@octokit/types': + specifier: ^11.1.0 + version: 11.1.0 + '@types/node': + specifier: ^18 + version: 18.17.15 + typescript: + specifier: ^5.2.2 + version: 5.2.2 + +packages: + + /@actions/core@1.10.1: + resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==} + dependencies: + '@actions/http-client': 2.1.1 + uuid: 8.3.2 + dev: false + + /@actions/http-client@2.1.1: + resolution: {integrity: sha512-qhrkRMB40bbbLo7gF+0vu+X+UawOvQQqNAA/5Unx774RS8poaOhThDOG6BGmxvAnxhQnDp2BG/ZUm65xZILTpw==} + dependencies: + tunnel: 0.0.6 + dev: false + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + dev: false + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: '>=6.0.0'} + dev: false + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + dev: false + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + dev: false + + /@octokit/action@6.0.5: + resolution: {integrity: sha512-jcCZb+jR4nzHgj86wlUvbTv92hiZ4OWpI9dIoWRilbtT4HuVVNFZvQih8X/YE2GMVrLCVbBD0xkjeq+1m8Rcpw==} + engines: {node: '>= 18'} + dependencies: + '@octokit/auth-action': 4.0.0 + '@octokit/core': 5.0.0 + '@octokit/plugin-paginate-rest': 8.0.0(@octokit/core@5.0.0) + '@octokit/plugin-rest-endpoint-methods': 9.0.0(@octokit/core@5.0.0) + '@octokit/types': 11.1.0 + undici: 5.24.0 + dev: false + + /@octokit/auth-action@4.0.0: + resolution: {integrity: sha512-sMm9lWZdiX6e89YFaLrgE9EFs94k58BwIkvjOtozNWUqyTmsrnWFr/M5LolaRzZ7Kmb5FbhF9hi7FEeE274SoQ==} + engines: {node: '>= 18'} + dependencies: + '@octokit/auth-token': 4.0.0 + '@octokit/types': 11.1.0 + dev: false + + /@octokit/auth-token@4.0.0: + resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==} + engines: {node: '>= 18'} + dev: false + + /@octokit/core@5.0.0: + resolution: {integrity: sha512-YbAtMWIrbZ9FCXbLwT9wWB8TyLjq9mxpKdgB3dUNxQcIVTf9hJ70gRPwAcqGZdY6WdJPZ0I7jLaaNDCiloGN2A==} + engines: {node: '>= 18'} + dependencies: + '@octokit/auth-token': 4.0.0 + '@octokit/graphql': 7.0.1 + '@octokit/request': 8.1.1 + '@octokit/request-error': 5.0.0 + '@octokit/types': 11.1.0 + before-after-hook: 2.2.3 + universal-user-agent: 6.0.0 + dev: false + + /@octokit/endpoint@9.0.0: + resolution: {integrity: sha512-szrQhiqJ88gghWY2Htt8MqUDO6++E/EIXqJ2ZEp5ma3uGS46o7LZAzSLt49myB7rT+Hfw5Y6gO3LmOxGzHijAQ==} + engines: {node: '>= 18'} + dependencies: + '@octokit/types': 11.1.0 + is-plain-object: 5.0.0 + universal-user-agent: 6.0.0 + dev: false + + /@octokit/graphql@7.0.1: + resolution: {integrity: sha512-T5S3oZ1JOE58gom6MIcrgwZXzTaxRnxBso58xhozxHpOqSTgDS6YNeEUvZ/kRvXgPrRz/KHnZhtb7jUMRi9E6w==} + engines: {node: '>= 18'} + dependencies: + '@octokit/request': 8.1.1 + '@octokit/types': 11.1.0 + universal-user-agent: 6.0.0 + dev: false + + /@octokit/openapi-types@18.0.0: + resolution: {integrity: sha512-V8GImKs3TeQRxRtXFpG2wl19V7444NIOTDF24AWuIbmNaNYOQMWRbjcGDXV5B+0n887fgDcuMNOmlul+k+oJtw==} + + /@octokit/plugin-paginate-rest@8.0.0(@octokit/core@5.0.0): + resolution: {integrity: sha512-2xZ+baZWUg+qudVXnnvXz7qfrTmDeYPCzangBVq/1gXxii/OiS//4shJp9dnCCvj1x+JAm9ji1Egwm1BA47lPQ==} + engines: {node: '>= 18'} + peerDependencies: + '@octokit/core': '>=5' + dependencies: + '@octokit/core': 5.0.0 + '@octokit/types': 11.1.0 + dev: false + + /@octokit/plugin-rest-endpoint-methods@9.0.0(@octokit/core@5.0.0): + resolution: {integrity: sha512-KquMF/VB1IkKNiVnzJKspY5mFgGyLd7HzdJfVEGTJFzqu9BRFNWt+nwTCMuUiWc72gLQhRWYubTwOkQj+w/1PA==} + engines: {node: '>= 18'} + peerDependencies: + '@octokit/core': '>=5' + dependencies: + '@octokit/core': 5.0.0 + '@octokit/types': 11.1.0 + dev: false + + /@octokit/plugin-retry@6.0.0(@octokit/core@5.0.0): + resolution: {integrity: sha512-a1/A4A+PB1QoAHQfLJxGHhLfSAT03bR1jJz3GgQJZvty2ozawFWs93MiBQXO7SL2YbO7CIq0Goj4qLOBj8JeMQ==} + engines: {node: '>= 18'} + peerDependencies: + '@octokit/core': '>=5' + dependencies: + '@octokit/core': 5.0.0 + '@octokit/request-error': 5.0.0 + '@octokit/types': 11.1.0 + bottleneck: 2.19.5 + dev: false + + /@octokit/plugin-throttling@7.0.0(@octokit/core@5.0.0): + resolution: {integrity: sha512-KL2k/d0uANc8XqP5S64YcNFCudR3F5AaKO39XWdUtlJIjT9Ni79ekWJ6Kj5xvAw87udkOMEPcVf9xEge2+ahew==} + engines: {node: '>= 18'} + peerDependencies: + '@octokit/core': ^5.0.0 + dependencies: + '@octokit/core': 5.0.0 + '@octokit/types': 11.1.0 + bottleneck: 2.19.5 + dev: false + + /@octokit/request-error@5.0.0: + resolution: {integrity: sha512-1ue0DH0Lif5iEqT52+Rf/hf0RmGO9NWFjrzmrkArpG9trFfDM/efx00BJHdLGuro4BR/gECxCU2Twf5OKrRFsQ==} + engines: {node: '>= 18'} + dependencies: + '@octokit/types': 11.1.0 + deprecation: 2.3.1 + once: 1.4.0 + dev: false + + /@octokit/request@8.1.1: + resolution: {integrity: sha512-8N+tdUz4aCqQmXl8FpHYfKG9GelDFd7XGVzyN8rc6WxVlYcfpHECnuRkgquzz+WzvHTK62co5di8gSXnzASZPQ==} + engines: {node: '>= 18'} + dependencies: + '@octokit/endpoint': 9.0.0 + '@octokit/request-error': 5.0.0 + '@octokit/types': 11.1.0 + is-plain-object: 5.0.0 + universal-user-agent: 6.0.0 + dev: false + + /@octokit/types@11.1.0: + resolution: {integrity: sha512-Fz0+7GyLm/bHt8fwEqgvRBWwIV1S6wRRyq+V6exRKLVWaKGsuy6H9QFYeBVDV7rK6fO3XwHgQOPxv+cLj2zpXQ==} + dependencies: + '@octokit/openapi-types': 18.0.0 + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + dev: false + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + dev: false + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + dev: false + + /@tsconfig/node16@1.0.4: + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + dev: false + + /@types/node@18.17.15: + resolution: {integrity: sha512-2yrWpBk32tvV/JAd3HNHWuZn/VDN1P+72hWirHnvsvTGSqbANi+kSeuQR9yAHnbvaBvHDsoTdXV0Fe+iRtHLKA==} + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + dev: false + + /acorn@8.10.0: + resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==} + engines: {node: '>=0.4.0'} + hasBin: true + dev: false + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + dev: false + + /before-after-hook@2.2.3: + resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==} + dev: false + + /bottleneck@2.19.5: + resolution: {integrity: sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==} + dev: false + + /busboy@1.6.0: + resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} + engines: {node: '>=10.16.0'} + dependencies: + streamsearch: 1.1.0 + dev: false + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + dev: false + + /deprecation@2.3.1: + resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==} + dev: false + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + dev: false + + /is-plain-object@5.0.0: + resolution: {integrity: sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==} + engines: {node: '>=0.10.0'} + dev: false + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + dev: false + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: false + + /streamsearch@1.1.0: + resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} + engines: {node: '>=10.0.0'} + dev: false + + /ts-node@10.9.1(@types/node@18.17.15)(typescript@5.2.2): + resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.9 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 18.17.15 + acorn: 8.10.0 + acorn-walk: 8.2.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.2.2 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + dev: false + + /tunnel@0.0.6: + resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==} + engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'} + dev: false + + /typescript@5.2.2: + resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} + engines: {node: '>=14.17'} + hasBin: true + + /undici@5.24.0: + resolution: {integrity: sha512-OKlckxBjFl0oXxcj9FU6oB8fDAaiRUq+D8jrFWGmOfI/gIyjk/IeS75LMzgYKUaeHzLUcYvf9bbJGSrUwTfwwQ==} + engines: {node: '>=14.0'} + dependencies: + busboy: 1.6.0 + dev: false + + /universal-user-agent@6.0.0: + resolution: {integrity: sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==} + dev: false + + /uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + dev: false + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + dev: false + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: false + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + dev: false diff --git a/.github/actions/delete-deployments/test.sh b/.github/actions/delete-deployments/test.sh new file mode 100644 index 00000000..753ab71e --- /dev/null +++ b/.github/actions/delete-deployments/test.sh @@ -0,0 +1,9 @@ +#!/bin/sh +export NUM_OF_PAGES=all +export ENVIRONMENT=integration +export DRY_RUN=false +export REPOSITORY=goplugin/pluginv3.0 +export REF=fix/golint +export GITHUB_ACTION=true + +pnpm start diff --git a/.github/actions/delete-deployments/tsconfig.json b/.github/actions/delete-deployments/tsconfig.json new file mode 100644 index 00000000..4b36d4a1 --- /dev/null +++ b/.github/actions/delete-deployments/tsconfig.json @@ -0,0 +1,104 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig to read more about this file */ + + /* Projects */ + // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ + // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ + // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ + // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ + // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ + // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ + + /* Language and Environment */ + "target": "ESNext" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "jsx": "preserve", /* Specify what JSX code is generated. */ + // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ + // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ + // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ + // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ + // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ + // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ + + /* Modules */ + "module": "NodeNext" /* Specify what module code is generated. */, + // "rootDir": "./", /* Specify the root folder within your source files. */ + "moduleResolution": "NodeNext" /* Specify how TypeScript looks up a file from a given module specifier. */, + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ + // "types": [], /* Specify type package names to be included without being referenced in a source file. */ + // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ + // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ + // "resolveJsonModule": true, /* Enable importing .json files. */ + // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ + + /* JavaScript Support */ + // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ + // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ + + /* Emit */ + // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ + // "declarationMap": true, /* Create sourcemaps for d.ts files. */ + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ + // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ + // "outDir": "./", /* Specify an output folder for all emitted files. */ + // "removeComments": true, /* Disable emitting comments. */ + "noEmit": true /* Disable emitting files from a compilation. */, + // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ + // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */ + // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ + // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ + // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ + // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ + // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ + // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ + // "newLine": "crlf", /* Set the newline character for emitting files. */ + // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ + // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ + // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ + // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ + // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ + // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ + + /* Interop Constraints */ + // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ + // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */, + // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ + "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, + + /* Type Checking */ + "strict": true /* Enable all strict type-checking options. */, + // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ + // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ + // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ + // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ + // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ + // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ + // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ + // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ + // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ + // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ + // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ + // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ + // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ + // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ + // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ + + /* Completeness */ + // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ + "skipLibCheck": false /* Skip type checking all .d.ts files. */ + }, + "include": ["src", "test"] +} diff --git a/.github/actions/golangci-lint/action.yml b/.github/actions/golangci-lint/action.yml new file mode 100644 index 00000000..bad6d88b --- /dev/null +++ b/.github/actions/golangci-lint/action.yml @@ -0,0 +1,72 @@ +name: CI lint for Golang +description: Runs CI lint for Golang +inputs: + # general inputs + name: + description: Name of the lint action + default: lint + go-directory: + description: Go directory to run commands from + default: "." + # setup-go inputs + only-modules: + description: Set to 'true' to only cache modules + default: "false" + cache-version: + description: Set this to cache bust + default: "1" + go-version-file: + description: Set where the go version file is located at + default: "go.mod" + go-module-file: + description: Set where the go module file is located at + default: "go.sum" + # grafana cloud inputs + gc-host: + description: "grafana cloud hostname" + gc-basic-auth: + description: "grafana cloud basic auth" + +runs: + using: composite + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Go + uses: ./.github/actions/setup-go + with: + only-modules: ${{ inputs.only-modules }} + cache-version: ${{ inputs.cache-version }} + go-version-file: ${{ inputs.go-version-file }} + go-module-file: ${{ inputs.go-module-file }} + - name: Touching core/web/assets/index.html + shell: bash + run: mkdir -p core/web/assets && touch core/web/assets/index.html + - name: Build binary + working-directory: ${{ inputs.go-directory }} + shell: bash + run: go build ./... + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + with: + version: v1.55.2 + # We already cache these directories in setup-go + skip-pkg-cache: true + skip-build-cache: true + # only-new-issues is only applicable to PRs, otherwise it is always set to false + only-new-issues: false # disabled for PRs due to unreliability + args: --out-format colored-line-number,checkstyle:golangci-lint-report.xml + working-directory: ${{ inputs.go-directory }} + - name: Store lint report artifact + if: always() + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: golangci-lint-report + path: ${{ inputs.go-directory }}/golangci-lint-report.xml + - name: Collect Metrics + if: always() + uses: goplugin/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2 + with: + basic-auth: ${{ inputs.gc-basic-auth }} + hostname: ${{ inputs.gc-host }} + this-job-name: ${{ inputs.name }} + continue-on-error: true diff --git a/.github/actions/goreleaser-build-sign-publish/README.md b/.github/actions/goreleaser-build-sign-publish/README.md new file mode 100644 index 00000000..d6bf7e6f --- /dev/null +++ b/.github/actions/goreleaser-build-sign-publish/README.md @@ -0,0 +1,131 @@ +# goreleaser-build-sign-publish + +> goreleaser wrapper action + +## workflows + +### build publish + +```yaml +name: goreleaser + +on: + push: + tags: + - "v*" + +jobs: + goreleaser: + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + contents: read + env: + MACOS_SDK_VERSION: 12.3 + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Configure aws credentials + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.aws-role-arn }} + role-duration-seconds: ${{ secrets.aws-role-dur-sec }} + aws-region: ${{ secrets.aws-region }} + - name: Cache macos sdk + id: sdk-cache + uses: actions/cache@v3 + with: + path: ${{ format('MacOSX{0}.sdk', env.MAC_SDK_VERSION) }} + key: ${{ runner.OS }}-${{ env.MAC_SDK_VERSION }}-macos-sdk-cache-${{ hashFiles('**/SDKSettings.json') }} + restore-keys: | + ${{ runner.OS }}-${{ env.MAC_SDK_VERSION }}-macos-sdk-cache- + - name: Get macos sdk + if: steps.sdk-cache.outputs.cache-hit != 'true' + run: | + curl -L https://github.com/joseluisq/macosx-sdks/releases/download/${MACOS_SDK_VERSION}/MacOSX${MACOS_SDK_VERSION}.sdk.tar.xz > MacOSX${MACOS_SDK_VERSION}.sdk.tar.xz + tar -xf MacOSX${MACOS_SDK_VERSION}.sdk.tar.xz + - name: Build, sign, and publish + uses: ./.github/actions/goreleaser-build-sign-publish + with: + enable-docker-publish: "true" + enable-goreleaser-snapshot: "false" + docker-registry: ${{ secrets.aws-ecr-registry }} + goreleaser-exec: goreleaser + goreleaser-config: .goreleaser.yaml + macos-sdk-dir: ${{ format('MacOSX{0}.sdk', env.MAC_SDK_VERSION) }} + env: + GITHUB_TOKEN: ${{ secrets.gh-token }} +``` + +### snapshot release + +```yaml +- name: Build, sign, and publish image + uses: ./.github/actions/goreleaser-build-sign-publish + with: + enable-docker-publish: "true" + enable-goreleaser-snapshot: "true" + docker-registry: ${{ secrets.aws-ecr-registry }} + goreleaser-exec: goreleaser + goreleaser-config: .goreleaser.yaml +``` + +### image signing + +```yaml +- name: Build, sign, and publish + uses: ./.github/actions/goreleaser-build-sign-publish + with: + enable-docker-publish: "true" + enable-goreleaser-snapshot: "false" + enable-cosign: "true" + docker-registry: ${{ secrets.aws-ecr-registry }} + goreleaser-exec: goreleaser + goreleaser-config: .goreleaser.yaml + cosign-password: ${{ secrets.cosign-password }} + cosign-public-key: ${{ secrets.cosign-public-key }} + cosign-private-key: ${{ secrets.cosign-private-key }} + macos-sdk-dir: MacOSX12.3.sdk +``` + +## customizing + +### inputs + +Following inputs can be used as `step.with` keys + +| Name | Type | Default | Description | +| ---------------------------- | ------ | ------------------ | ----------------------------------------------------------------------- | +| `goreleaser-version` | String | `1.13.1` | `goreleaser` version | +| `zig-version` | String | `0.10.0` | `zig` version | +| `cosign-version` | String | `v1.13.1` | `cosign` version | +| `macos-sdk-dir` | String | `MacOSX12.3.sdk` | MacOSX sdk directory | +| `enable-docker-publish` | Bool | `true` | Enable publishing of Docker images / manifests | +| `docker-registry` | String | `localhost:5001` | Docker registry | +| `enable-goreleaser-snapshot` | Bool | `false` | Enable goreleaser build / release snapshot | +| `goreleaser-exec` | String | `goreleaser` | The goreleaser executable, can invoke wrapper script | +| `goreleaser-config` | String | `.goreleaser.yaml` | The goreleaser configuration yaml | +| `enable-cosign` | Bool | `false` | Enable signing of Docker images | +| `cosign-public-key` | String | `""` | The public key to be used with cosign for verification | +| `cosign-private-key` | String | `""` | The private key to be used with cosign to sign the image | +| `cosign-password-key` | String | `""` | The password to decrypt the cosign private key needed to sign the image | + +## testing + +- bring up local docker registry + +```sh +docker run -d --restart=always -p "127.0.0.1:5001:5000" --name registry registry:2 +``` + +- run snapshot release, publish to local docker registry + +```sh +GORELEASER_EXEC=" set +_publish_snapshot_images() { + local full_sha=$(git rev-parse HEAD) + local images=$(docker images --filter "label=org.opencontainers.image.revision=$full_sha" --format "{{.Repository}}:{{.Tag}}") + for image in $images; do + docker push "$image" + done +} + +# publish snapshot docker manifest lists +# must have label=org.opencontainers.image.revision= set +_publish_snapshot_manifests() { + local docker_manifest_extra_args=$DOCKER_MANIFEST_EXTRA_ARGS + local full_sha=$(git rev-parse HEAD) + local images=$(docker images --filter "label=org.opencontainers.image.revision=$full_sha" --format "{{.Repository}}:{{.Tag}}" | sort) + local arches=(amd64 arm64) + local raw_manifest_lists="" + for image in $images; do + for arch in "${arches[@]}"; do + image=${image%"-$arch"} + done + raw_manifest_lists+="$image"$'\n' + done + local manifest_lists=$(echo "$raw_manifest_lists" | sort | uniq) + for manifest_list in $manifest_lists; do + manifests="" + for arch in "${arches[@]}"; do + archExists=$(echo "$images" | grep -c "$manifest_lists-$arch") + if [[ $archExists -ne 0 ]]; then + manifests+="$manifest_list-$arch " + fi + done + docker manifest create $manifest_list $manifests $docker_manifest_extra_args + docker manifest push "$manifest_list" + done +} + +# wrapper function to invoke goreleaser release +goreleaser_release() { + if [[ $ENABLE_COSIGN == "true" ]]; then + echo "$COSIGN_PUBLIC_KEY" > cosign.pub + echo "$COSIGN_PRIVATE_KEY" > cosign.key + fi + if [[ -n $MACOS_SDK_DIR ]]; then + MACOS_SDK_DIR=$(echo "$(cd "$(dirname "$MACOS_SDK_DIR")" || exit; pwd)/$(basename "$MACOS_SDK_DIR")") + fi + if [[ $ENABLE_GORELEASER_SNAPSHOT == "true" ]]; then + $GORELEASER_EXEC release --snapshot --clean --config "$GORELEASER_CONFIG" "$@" + if [[ $ENABLE_DOCKER_PUBLISH == "true" ]]; then + _publish_snapshot_images + _publish_snapshot_manifests + fi + else + $GORELEASER_EXEC release --clean --config "$GORELEASER_CONFIG" "$@" + fi + if [[ $ENABLE_COSIGN == "true" ]]; then + rm -rf cosign.pub + rm -rf cosign.key + fi + + echo "metadata=$(cat dist/metadata.json)" >> "$GITHUB_OUTPUT" + echo "artifacts=$(cat dist/artifacts.json)" >> "$GITHUB_OUTPUT" +} + +"$@" diff --git a/.github/actions/notify-slack-jobs-result/README.md b/.github/actions/notify-slack-jobs-result/README.md new file mode 100644 index 00000000..298930c0 --- /dev/null +++ b/.github/actions/notify-slack-jobs-result/README.md @@ -0,0 +1,37 @@ +# Notify Slack Jobs Result + +Sends a Slack message to a specified channel detailing the results of one to many GHA job results using a regex. The job results will be grouped by the `github_job_name_regex` and displayed underneath the `message_title`, with the regex matching group displayed as an individual result. This is primarily designed for when you have test groups running in a matrix, and would like condensed reporting on their status by group. It's often accompanied by posting a Slack message before to start a thread, then attaching all the results to that thread like we do in the reporting section of the [live-testnet-test.yml workflow](../../workflows/live-testnet-tests.yml). Check out the example below, where we post an initial summary message, then use this action to thread together specific results: + +```yaml +message_title: Optimism Goerli +github_job_name_regex: ^Optimism Goerli (?.*?) Tests$ # Note that the regex MUST have a capturing group named "cap" +``` + +![example](image.png) + +## Inputs + +```yaml +inputs: + github_token: + description: "The GitHub token to use for authentication (usually ${{ github.token }})" + required: true + github_repository: + description: "The GitHub owner/repository to use for authentication (usually ${{ github.repository }}))" + required: true + workflow_run_id: + description: "The workflow run ID to get the results from (usually ${{ github.run_id }})" + required: true + github_job_name_regex: + description: "The regex to use to match 1..many job name(s) to collect results from. Should include a capture group named 'cap' for the part of the job name you want to display in the Slack message (e.g. ^Client Compatability Test (?.*?)$)" + required: true + message_title: + description: "The title of the Slack message" + required: true + slack_channel_id: + description: "The Slack channel ID to post the message to" + required: true + slack_thread_ts: + description: "The Slack thread timestamp to post the message to, handy for keeping multiple related results in a single thread" + required: false +``` diff --git a/.github/actions/notify-slack-jobs-result/action.yml b/.github/actions/notify-slack-jobs-result/action.yml new file mode 100644 index 00000000..63840cfa --- /dev/null +++ b/.github/actions/notify-slack-jobs-result/action.yml @@ -0,0 +1,110 @@ +name: Notify Slack Jobs Result +description: Will send a notification in Slack for the result of a GitHub action run, typically for test results +inputs: + github_token: + description: "The GitHub token to use for authentication (usually github.token)" + required: true + github_repository: + description: "The GitHub owner/repository to use for authentication (usually github.repository))" + required: true + workflow_run_id: + description: "The workflow run ID to get the results from (usually github.run_id)" + required: true + github_job_name_regex: + description: "The regex to use to match 1..many job name(s) to collect results from. Should include a capture group named 'cap' for the part of the job name you want to display in the Slack message (e.g. ^Client Compatability Test (?.*?)$)" + required: true + message_title: + description: "The title of the Slack message" + required: true + slack_channel_id: + description: "The Slack channel ID to post the message to" + required: true + slack_bot_token: + description: "The Slack bot token to use for authentication which needs permission and an installed app in the channel" + required: true + slack_thread_ts: + description: "The Slack thread timestamp to post the message to, handy for keeping multiple related results in a single thread" + required: false + +runs: + using: composite + steps: + - name: Get Results + shell: bash + id: test-results + run: | + # I feel like there's some clever, fully jq way to do this, but I ain't got the motivation to figure it out + echo "Querying test results at https://api.github.com/repos/${{inputs.github_repository}}/actions/runs/${{ inputs.workflow_run_id }}/jobs" + + PARSED_RESULTS=$(curl \ + -H "Authorization: Bearer ${{ inputs.github_token }}" \ + 'https://api.github.com/repos/${{inputs.github_repository}}/actions/runs/${{ inputs.workflow_run_id }}/jobs' \ + | jq -r --arg pattern "${{ inputs.github_job_name_regex }}" '.jobs[] + | select(.name | test($pattern)) as $job + | $job.steps[] + | select(.name == "Run Tests") + | { conclusion: (if .conclusion == "success" then ":white_check_mark:" else ":x:" end), cap: ("*" + ($job.name | capture($pattern).cap) + "*"), html_url: $job.html_url }') + + echo "Parsed Results:" + echo $PARSED_RESULTS + + ALL_SUCCESS=true + echo "Checking for failures" + echo "$PARSED_RESULTS" | jq -s | jq -r '.[] | select(.conclusion != ":white_check_mark:")' + for row in $(echo "$PARSED_RESULTS" | jq -s | jq -r '.[] | select(.conclusion != ":white_check_mark:")'); do + ALL_SUCCESS=false + break + done + echo "Success: $ALL_SUCCESS" + + echo all_success=$ALL_SUCCESS >> $GITHUB_OUTPUT + + FORMATTED_RESULTS=$(echo $PARSED_RESULTS | jq -s '[.[] + | { + conclusion: .conclusion, + cap: .cap, + html_url: .html_url + } + ] + | map("{\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": \"<\(.html_url)|\(.cap)>: \(.conclusion)\"}}") + | join(",")') + + echo "Formatted Results:" + echo $FORMATTED_RESULTS + + # Cleans out backslashes and quotes from jq + CLEAN_RESULTS=$(echo "$FORMATTED_RESULTS" | sed 's/\\\"/"/g' | sed 's/^"//;s/"$//') + + echo "Clean Results" + echo $CLEAN_RESULTS + + echo results=$CLEAN_RESULTS >> $GITHUB_OUTPUT + - name: Post Results + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + env: + SLACK_BOT_TOKEN: ${{ inputs.slack_bot_token }} + with: + channel-id: ${{ inputs.slack_channel_id }} + payload: | + { + "thread_ts": "${{ inputs.slack_thread_ts }}", + "attachments": [ + { + "color": "${{ steps.test-results.outputs.all_success == 'true' && '#2E7D32' || '#C62828' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "${{ inputs.message_title }} ${{ steps.test-results.outputs.all_success == 'true' && ':white_check_mark:' || ':x:'}}", + "emoji": true + } + }, + { + "type": "divider" + }, + ${{ steps.test-results.outputs.results }} + ] + } + ] + } diff --git a/.github/actions/notify-slack-jobs-result/image.png b/.github/actions/notify-slack-jobs-result/image.png new file mode 100644 index 00000000..3bd39810 Binary files /dev/null and b/.github/actions/notify-slack-jobs-result/image.png differ diff --git a/.github/actions/setup-create-base64-config-live-testnets/action.yml b/.github/actions/setup-create-base64-config-live-testnets/action.yml new file mode 100644 index 00000000..43d46756 --- /dev/null +++ b/.github/actions/setup-create-base64-config-live-testnets/action.yml @@ -0,0 +1,130 @@ +name: Create Base64 Config +description: A composite action that creates a base64-encoded config to be used by integration tests + +inputs: + runId: + description: The run id + testLogCollect: + description: Whether to always collect logs, even for passing tests + default: "false" + pluginImage: + description: The plugin image to use + default: "public.ecr.aws/plugin/plugin" + pluginVersion: + description: The git commit sha to use for the image tag + pyroscopeServer: + description: URL of Pyroscope server + pyroscopeEnvironment: + description: Name of Pyroscope environment + pyroscopeKey: + description: Pyroscope server key + lokiEndpoint: + description: Loki push endpoint + lokiTenantId: + description: Loki tenant id + lokiBasicAuth: + description: Loki basic auth + logstreamLogTargets: + description: Where to send logs (e.g. file, loki) + grafanaUrl: + description: Grafana URL + grafanaDashboardUrl: + description: Grafana dashboard URL + network: + description: Network to run tests on + httpEndpoints: + description: HTTP endpoints to use for network + wsEndpoints: + description: WS endpoints to use for network + fundingKeys: + description: Funding keys to use for network + +runs: + using: composite + steps: + - name: Prepare Base64 TOML override + shell: bash + id: base64-config-override + env: + RUN_ID: ${{ inputs.runId }} + PYROSCOPE_SERVER: ${{ inputs.pyroscopeServer }} + PYROSCOPE_ENVIRONMENT: ${{ inputs.pyroscopeEnvironment }} + PYROSCOPE_KEY: ${{ inputs.pyroscopeKey }} + PLUGIN_IMAGE: ${{ inputs.pluginImage }} + PLUGIN_VERSION: ${{ inputs.pluginVersion }} + LOKI_ENDPOINT: ${{ inputs.lokiEndpoint }} + LOKI_TENANT_ID: ${{ inputs.lokiTenantId }} + LOKI_BASIC_AUTH: ${{ inputs.lokiBasicAuth }} + LOGSTREAM_LOG_TARGETS: ${{ inputs.logstreamLogTargets }} + GRAFANA_URL: ${{ inputs.grafanaUrl }} + GRAFANA_DASHBOARD_URL: ${{ inputs.grafanaDashboardUrl }} + NETWORK: ${{ inputs.network }} + HTTP_ENDPOINTS: ${{ inputs.httpEndpoints }} + WS_ENDPOINTS: ${{ inputs.wsEndpoints }} + FUNDING_KEYS: ${{ inputs.fundingKeys }} + run: | + convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + if [ -n "$PYROSCOPE_SERVER" ]; then + pyroscope_enabled=true + else + pyroscope_enabled=false + fi + + cat << EOF > config.toml + [Common] + plugin_node_funding=0.5 + + [PluginImage] + image="$PLUGIN_IMAGE" + version="$PLUGIN_VERSION" + + [Pyroscope] + enabled=$pyroscope_enabled + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + + [Logging] + run_id="$RUN_ID" + + [Logging.LogStream] + log_targets=$(convert_to_toml_array "$LOGSTREAM_LOG_TARGETS") + + [Logging.Loki] + tenant_id="$LOKI_TENANT_ID" + endpoint="$LOKI_URL" + basic_auth="$LOKI_BASIC_AUTH" + + [Logging.Grafana] + base_url="$GRAFANA_URL" + dashboard_url="$GRAFANA_DASHBOARD_URL" + + [Network] + selected_networks=["$NETWORK"] + + [Network.RpcHttpUrls] + "$NETWORK" = $(convert_to_toml_array "$HTTP_ENDPOINTS") + + [Network.RpcWsUrls] + "$NETWORK" = $(convert_to_toml_array "$WS_ENDPOINTS") + + [Network.WalletKeys] + "$NETWORK" = $(convert_to_toml_array "$FUNDING_KEYS") + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + touch .root_dir diff --git a/.github/actions/setup-create-base64-config/action.yml b/.github/actions/setup-create-base64-config/action.yml new file mode 100644 index 00000000..aa87d071 --- /dev/null +++ b/.github/actions/setup-create-base64-config/action.yml @@ -0,0 +1,120 @@ +name: Create Base64 Config +description: A composite action that creates a base64-encoded config to be used by integration tests + +inputs: + runId: + description: The run id + testLogCollect: + description: Whether to always collect logs, even for passing tests + default: "false" + selectedNetworks: + description: The networks to run tests against + pluginImage: + description: The plugin image to use + default: "public.ecr.aws/plugin/plugin" + pluginVersion: + description: The git commit sha to use for the image tag + pyroscopeServer: + description: URL of Pyroscope server + pyroscopeEnvironment: + description: Name of Pyroscope environment + pyroscopeKey: + description: Pyroscope server key + lokiEndpoint: + description: Loki push endpoint + lokiTenantId: + description: Loki tenant id + lokiBasicAuth: + description: Loki basic auth + logstreamLogTargets: + description: Where to send logs (e.g. file, loki) + grafanaUrl: + description: Grafana URL + grafanaDashboardUrl: + description: Grafana dashboard URL + +runs: + using: composite + steps: + - name: Prepare Base64 TOML override + shell: bash + id: base64-config-override + env: + RUN_ID: ${{ inputs.runId }} + TEST_LOG_COLLECT: ${{ inputs.testLogCollect }} + SELECTED_NETWORKS: ${{ inputs.selectedNetworks }} + PYROSCOPE_SERVER: ${{ inputs.pyroscopeServer }} + PYROSCOPE_ENVIRONMENT: ${{ inputs.pyroscopeEnvironment }} + PYROSCOPE_KEY: ${{ inputs.pyroscopeKey }} + PLUGIN_IMAGE: ${{ inputs.pluginImage }} + PLUGIN_VERSION: ${{ inputs.pluginVersion }} + LOKI_ENDPOINT: ${{ inputs.lokiEndpoint }} + LOKI_TENANT_ID: ${{ inputs.lokiTenantId }} + LOKI_BASIC_AUTH: ${{ inputs.lokiBasicAuth }} + LOGSTREAM_LOG_TARGETS: ${{ inputs.logstreamLogTargets }} + GRAFANA_URL: ${{ inputs.grafanaUrl }} + GRAFANA_DASHBOARD_URL: ${{ inputs.grafanaDashboardUrl }} + run: | + echo ::add-mask::$PLUGIN_IMAGE + function convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") + log_targets=$(convert_to_toml_array "$LOGSTREAM_LOG_TARGETS") + + if [ -n "$PYROSCOPE_SERVER" ]; then + pyroscope_enabled=true + else + pyroscope_enabled=false + fi + + if [ -n "$TEST_LOG_COLLECT" ]; then + test_log_collect=true + else + test_log_collect=false + fi + + cat << EOF > config.toml + [Network] + selected_networks=$selected_networks + + [PluginImage] + image="$PLUGIN_IMAGE" + version="$PLUGIN_VERSION" + + [Pyroscope] + enabled=$pyroscope_enabled + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + + [Logging] + test_log_collect=$test_log_collect + run_id="$RUN_ID" + + [Logging.LogStream] + log_targets=$log_targets + + [Logging.Loki] + tenant_id="$LOKI_TENANT_ID" + endpoint="$LOKI_ENDPOINT" + basic_auth="$LOKI_BASIC_AUTH" + + [Logging.Grafana] + base_url="$GRAFANA_URL" + dashboard_url="$GRAFANA_DASHBOARD_URL" + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV diff --git a/.github/actions/setup-create-base64-upgrade-config/action.yml b/.github/actions/setup-create-base64-upgrade-config/action.yml new file mode 100644 index 00000000..4017a616 --- /dev/null +++ b/.github/actions/setup-create-base64-upgrade-config/action.yml @@ -0,0 +1,61 @@ +name: Create Base64 Upgrade Config +description: A composite action that creates a base64-encoded config to be used by Plugin version upgrade tests + +inputs: + selectedNetworks: + description: The networks to run tests against + pluginImage: + description: The plugin image to upgrade from + default: "public.ecr.aws/plugin/plugin" + pluginVersion: + description: The git commit sha to use for the image tag + upgradeImage: + description: The plugin image to upgrade to + default: "public.ecr.aws/plugin/plugin" + upgradeVersion: + description: The git commit sha to use for the image tag + +runs: + using: composite + steps: + - name: Prepare Base64 TOML override + shell: bash + id: base64-config-override + env: + SELECTED_NETWORKS: ${{ inputs.selectedNetworks }} + PLUGIN_IMAGE: ${{ inputs.pluginImage }} + PLUGIN_VERSION: ${{ inputs.pluginVersion }} + UPGRADE_IMAGE: ${{ inputs.upgradeImage }} + UPGRADE_VERSION: ${{ inputs.upgradeVersion }} + run: | + function convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") + + cat << EOF > config.toml + [Network] + selected_networks=$selected_networks + + [PluginImage] + image="$PLUGIN_IMAGE" + version="$PLUGIN_VERSION" + + [PluginUpgradeImage] + image="$UPGRADE_IMAGE" + version="$UPGRADE_VERSION" + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml new file mode 100644 index 00000000..405a5dfe --- /dev/null +++ b/.github/actions/setup-go/action.yml @@ -0,0 +1,64 @@ +name: Setup Go +description: Setup Golang with efficient caching +inputs: + only-modules: + description: Set to 'true' to only cache modules + default: "false" + cache-version: + description: Set this to cache bust + default: "1" + go-version-file: + description: Set where the go version file is located at + default: "go.mod" + go-module-file: + description: Set where the go module file is located at + default: "go.sum" + +runs: + using: composite + steps: + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: ${{ inputs.go-version-file }} + cache: false + + - name: Get branch name + if: ${{ inputs.only-modules == 'false' }} + id: branch-name + uses: tj-actions/branch-names@2e5354c6733793113f416314375826df030ada23 #v6.5 + + - name: Set go cache keys + shell: bash + id: go-cache-dir + run: | + echo "gomodcache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT + echo "gobuildcache=$(go env GOCACHE)" >> $GITHUB_OUTPUT + + - name: Set go module path + id: go-module-path + shell: bash + run: echo "path=./${{ inputs.go-module-file }}" >> $GITHUB_OUTPUT + + - uses: actions/cache@v3 + name: Cache Go Modules + with: + path: | + ${{ steps.go-cache-dir.outputs.gomodcache }} + # The lifetime of go modules is much higher than the build outputs, so we increase cache efficiency + # here by not having the primary key contain the branch name + key: ${{ runner.os }}-gomod-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }} + restore-keys: | + ${{ runner.os }}-gomod-${{ inputs.cache-version }}- + + - uses: actions/cache@v3 + if: ${{ inputs.only-modules == 'false' }} + name: Cache Go Build Outputs + with: + path: | + ${{ steps.go-cache-dir.outputs.gobuildcache }} + # The lifetime of go build outputs is pretty short, so we make our primary cache key be the branch name + key: ${{ runner.os }}-gobuild-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }}-${{ steps.branch-name.outputs.current_branch }} + restore-keys: | + ${{ runner.os }}-gobuild-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }}- + ${{ runner.os }}-gobuild-${{ inputs.cache-version }}- diff --git a/.github/actions/setup-hardhat/action.yaml b/.github/actions/setup-hardhat/action.yaml new file mode 100644 index 00000000..3b52a4b8 --- /dev/null +++ b/.github/actions/setup-hardhat/action.yaml @@ -0,0 +1,31 @@ +name: Setup NodeJS +inputs: + namespace: + required: true + description: A cache namespace to add + cache-version: + default: "6" + description: Change to bust cache +description: Setup pnpm for contracts +runs: + using: composite + steps: + - name: Cache Compilers + uses: actions/cache@v3 + with: + path: ~/.cache/hardhat-nodejs/ + key: contracts-compilers-${{ runner.os }}-${{ inputs.cache-version }}-${{ hashFiles('contracts/pnpm-lock.yaml', 'contracts/hardhat.config.ts') }} + + - name: Cache contracts build outputs + uses: actions/cache@v3 + with: + path: | + contracts/cache/ + contracts/artifacts/ + contracts/typechain/ + key: ${{ format('contracts-{0}-{1}-{2}-{3}', runner.os, inputs.cache-version, inputs.namespace, hashFiles('contracts/pnpm-lock.yaml', 'contracts/hardhat.config.ts', 'contracts/src/**/*.sol')) }} + + - name: Compile contracts + shell: bash + run: pnpm compile + working-directory: contracts diff --git a/.github/actions/setup-merge-base64-config/action.yml b/.github/actions/setup-merge-base64-config/action.yml new file mode 100644 index 00000000..1d4db5c5 --- /dev/null +++ b/.github/actions/setup-merge-base64-config/action.yml @@ -0,0 +1,54 @@ +name: Merge Base64 Config +description: A composite action that merges user-provided Base64-encoded config with repository's secrets + +inputs: + base64Config: + description: Base64-encoded config to decode + +runs: + using: composite + steps: + - name: Add masks and export base64 config + shell: bash + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + + decoded_toml=$(echo $BASE64_CONFIG_OVERRIDE | base64 -d) + PLUGIN_IMAGE=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*image[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + echo ::add-mask::$PLUGIN_IMAGE + PLUGIN_VERSION=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*version[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + NETWORKS=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*selected_networks[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + + if [ -n "$PLUGIN_IMAGE" ]; then + echo "PLUGIN_IMAGE=$PLUGIN_IMAGE" >> $GITHUB_ENV + else + echo "No Plugin Image found in base64-ed config. Exiting" + exit 1 + fi + if [ -n "$PLUGIN_VERSION" ]; then + echo "PLUGIN_VERSION=$PLUGIN_VERSION" >> $GITHUB_ENV + else + echo "No Plugin Version found in base64-ed config. Exiting" + exit 1 + fi + if [ -n "$NETWORKS" ]; then + echo "NETWORKS=$NETWORKS" >> $GITHUB_ENV + fi + + # use Loki config from GH secrets and merge it with base64 input + cat << EOF > config.toml + [Logging.Loki] + tenant_id="$LOKI_TENANT_ID" + endpoint="$LOKI_URL" + basic_auth="$LOKI_BASIC_AUTH" + # legacy, you only need this to access the cloud version + # bearer_token="bearer_token" + EOF + + echo "$decoded_toml" >> final_config.toml + cat config.toml >> final_config.toml + BASE64_CONFIG_OVERRIDE=$(cat final_config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV \ No newline at end of file diff --git a/.github/actions/setup-nodejs/action.yaml b/.github/actions/setup-nodejs/action.yaml new file mode 100644 index 00000000..e0bdaebe --- /dev/null +++ b/.github/actions/setup-nodejs/action.yaml @@ -0,0 +1,30 @@ +name: Setup NodeJS +inputs: + prod: + default: "false" + description: Set to 'true' to do a prod only install +description: Setup pnpm for contracts +runs: + using: composite + steps: + - uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0 + with: + version: ^7.0.0 + + - uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 + with: + node-version: "16" + cache: "pnpm" + cache-dependency-path: "contracts/pnpm-lock.yaml" + + - if: ${{ inputs.prod == 'false' }} + name: Install dependencies + shell: bash + run: pnpm i + working-directory: contracts + + - if: ${{ inputs.prod == 'true' }} + name: Install prod dependencies + shell: bash + run: pnpm i --prod + working-directory: contracts diff --git a/.github/actions/setup-parse-base64-config/action.yml b/.github/actions/setup-parse-base64-config/action.yml new file mode 100644 index 00000000..bccbdced --- /dev/null +++ b/.github/actions/setup-parse-base64-config/action.yml @@ -0,0 +1,38 @@ +name: Parse Base64 Config +description: A composite action that extracts the plugin image, version and network from a base64-encoded config + +inputs: + base64Config: + description: Base64-encoded config to decode + +runs: + using: composite + steps: + - name: Add masks and export base64 config + shell: bash + run: | + decoded_toml=$(echo $BASE64_CONFIG_OVERRIDE | base64 -d) + PLUGIN_IMAGE=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*image[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + echo ::add-mask::$PLUGIN_IMAGE + PLUGIN_VERSION=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*version[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + NETWORKS=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*selected_networks[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + ETH2_EL_CLIENT=$(echo "$decoded_toml" | awk -F'=' '/^[[:space:]]*execution_layer[[:space:]]*=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $2); print $2}' 2>/dev/null) + + if [ -n "$PLUGIN_IMAGE" ]; then + echo "PLUGIN_IMAGE=$PLUGIN_IMAGE" >> $GITHUB_ENV + else + echo "No Plugin Image found in base64-ed config. Exiting" + exit 1 + fi + if [ -n "$PLUGIN_VERSION" ]; then + echo "PLUGIN_VERSION=$PLUGIN_VERSION" >> $GITHUB_ENV + else + echo "No Plugin Version found in base64-ed config. Exiting" + exit 1 + fi + if [ -n "$NETWORKS" ]; then + echo "NETWORKS=$NETWORKS" >> $GITHUB_ENV + fi + if [ -n "$ETH2_EL_CLIENT" ]; then + echo "ETH2_EL_CLIENT=$ETH2_EL_CLIENT" >> $GITHUB_ENV + fi \ No newline at end of file diff --git a/.github/actions/setup-postgres/.env b/.github/actions/setup-postgres/.env new file mode 100644 index 00000000..ef5950ff --- /dev/null +++ b/.github/actions/setup-postgres/.env @@ -0,0 +1,5 @@ +POSTGRES_USER=postgres +POSTGRES_OPTIONS="-c max_connections=1000 -c shared_buffers=2GB -c log_lock_waits=true" +POSTGRES_PASSWORD=postgres +POSTGRES_DB=plugin_test +POSTGRES_HOST_AUTH_METHOD=trust diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml new file mode 100644 index 00000000..f683934d --- /dev/null +++ b/.github/actions/setup-postgres/action.yml @@ -0,0 +1,13 @@ +name: Setup Postgresql +description: Setup postgres docker container via docker-compose, allowing usage of a custom command, see https://github.com/orgs/community/discussions/26688 +runs: + using: composite + steps: + - name: Start postgres service + run: docker compose up -d + shell: bash + working-directory: ./.github/actions/setup-postgres + - name: Wait for postgres service to be healthy + run: ./wait-for-healthy-postgres.sh + shell: bash + working-directory: ./.github/actions/setup-postgres diff --git a/.github/actions/setup-postgres/docker-compose.yml b/.github/actions/setup-postgres/docker-compose.yml new file mode 100644 index 00000000..3acaa1ec --- /dev/null +++ b/.github/actions/setup-postgres/docker-compose.yml @@ -0,0 +1,16 @@ +name: gha_postgres +version: "3.8" +services: + postgres: + ports: + - "5432:5432" + container_name: cl_pg + image: postgres:14-alpine + command: postgres ${POSTGRES_OPTIONS} + env_file: + - .env + healthcheck: + test: "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}" + interval: 2s + timeout: 5s + retries: 5 diff --git a/.github/actions/setup-postgres/wait-for-healthy-postgres.sh b/.github/actions/setup-postgres/wait-for-healthy-postgres.sh new file mode 100644 index 00000000..438cfbaf --- /dev/null +++ b/.github/actions/setup-postgres/wait-for-healthy-postgres.sh @@ -0,0 +1,25 @@ +#!/bin/bash +RETRIES=10 + +until [ $RETRIES -eq 0 ]; do + DOCKER_OUTPUT=$(docker compose ps postgres --status running --format json) + JSON_TYPE=$(echo "$DOCKER_OUTPUT" | jq -r 'type') + + if [ "$JSON_TYPE" == "array" ]; then + HEALTH_STATUS=$(echo "$DOCKER_OUTPUT" | jq -r '.[0].Health') + elif [ "$JSON_TYPE" == "object" ]; then + HEALTH_STATUS=$(echo "$DOCKER_OUTPUT" | jq -r '.Health') + else + HEALTH_STATUS="Unknown JSON type: $JSON_TYPE" + fi + + echo "postgres health status: $HEALTH_STATUS" + if [ "$HEALTH_STATUS" == "healthy" ]; then + exit 0 + fi + + echo "Waiting for postgres server, $((RETRIES--)) remaining attempts..." + sleep 2 +done + +exit 1 diff --git a/.github/actions/setup-solana/action.yml b/.github/actions/setup-solana/action.yml new file mode 100644 index 00000000..c50ccd58 --- /dev/null +++ b/.github/actions/setup-solana/action.yml @@ -0,0 +1,21 @@ +name: Setup Solana CLI +description: Setup solana CLI +runs: + using: composite + steps: + - uses: actions/cache@v3 + id: cache + name: Cache solana CLI + with: + path: | + ~/.local/share/solana/install/active_release/bin + key: ${{ runner.os }}-solana-cli-${{ hashFiles('./tools/ci/install_solana') }} + + - if: ${{ steps.cache.outputs.cache-hit != 'true' }} + name: Install solana cli + shell: bash + run: ./tools/ci/install_solana + + - name: Export solana path to env + shell: bash + run: echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV diff --git a/.github/actions/setup-wasmd/action.yml b/.github/actions/setup-wasmd/action.yml new file mode 100644 index 00000000..46fb84ba --- /dev/null +++ b/.github/actions/setup-wasmd/action.yml @@ -0,0 +1,22 @@ +name: Setup Cosmos wasmd +description: Setup Cosmos wasmd, used for integration tests +runs: + using: composite + steps: + - uses: actions/cache@v3 + id: cache + name: Cache wasmd-build + with: + path: ~/wasmd-build + # this caching works without cloning the repo because the install_wasmd contains + # the commit hash. + key: ${{ runner.os }}-wasmd-cli-${{ hashFiles('./tools/ci/install_wasmd') }} + + - if: ${{ steps.cache.outputs.cache-hit != 'true' }} + name: Install wasmd + shell: bash + run: ./tools/ci/install_wasmd + + - name: Export wasmd path to env + shell: bash + run: echo "PATH=$HOME/wasmd-build/bin:$PATH" >> $GITHUB_ENV diff --git a/.github/actions/split-tests/.npmrc b/.github/actions/split-tests/.npmrc new file mode 100644 index 00000000..4c2f52b3 --- /dev/null +++ b/.github/actions/split-tests/.npmrc @@ -0,0 +1,2 @@ +auto-install-peers=true +strict-peer-dependencies=false diff --git a/.github/actions/split-tests/action.yaml b/.github/actions/split-tests/action.yaml new file mode 100644 index 00000000..684fd6a2 --- /dev/null +++ b/.github/actions/split-tests/action.yaml @@ -0,0 +1,35 @@ +name: Test Spliting +description: Split tests +inputs: + config: + required: true + description: The path to the splitting config +outputs: + splits: + description: The generated test splits + value: ${{ steps.split.outputs.splits }} +runs: + using: composite + steps: + - uses: pnpm/action-setup@c3b53f6a16e57305370b4ae5a540c2077a1d50dd #v2.2.4 + with: + version: ^7.0.0 + + - uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 + with: + node-version: "16" + cache: "pnpm" + cache-dependency-path: "./.github/actions/split-tests/pnpm-lock.yaml" + + - name: Install dependencies + shell: bash + run: pnpm i --prod + working-directory: "./.github/actions/split-tests" + + - name: Run test spliter + id: split + shell: bash + run: pnpm start + env: + CONFIG: ${{ inputs.config }} + working-directory: "./.github/actions/split-tests" diff --git a/.github/actions/split-tests/jest.config.js b/.github/actions/split-tests/jest.config.js new file mode 100644 index 00000000..7b3dcf29 --- /dev/null +++ b/.github/actions/split-tests/jest.config.js @@ -0,0 +1,15 @@ +/** @type {import('ts-jest').JestConfigWithTsJest} */ +const jestConfig = { + preset: "ts-jest/presets/default-esm", + resolver: "/mjs-resolver.ts", + transform: { + "^.+\\.mts?$": [ + "ts-jest", + { + useESM: true, + }, + ], + }, + testEnvironment: "node", +}; +export default jestConfig; diff --git a/.github/actions/split-tests/mjs-resolver.ts b/.github/actions/split-tests/mjs-resolver.ts new file mode 100644 index 00000000..92c66f7b --- /dev/null +++ b/.github/actions/split-tests/mjs-resolver.ts @@ -0,0 +1,15 @@ +const mjsResolver = (path, options) => { + const mjsExtRegex = /\.mjs$/i; + const resolver = options.defaultResolver; + if (mjsExtRegex.test(path)) { + try { + return resolver(path.replace(mjsExtRegex, ".mts"), options); + } catch { + // use default resolver + } + } + + return resolver(path, options); +}; + +module.exports = mjsResolver; diff --git a/.github/actions/split-tests/package.json b/.github/actions/split-tests/package.json new file mode 100644 index 00000000..1624bda7 --- /dev/null +++ b/.github/actions/split-tests/package.json @@ -0,0 +1,26 @@ +{ + "name": "shard-packages", + "version": "1.0.0", + "description": "", + "type": "module", + "main": "index.js", + "scripts": { + "start": "ts-node -T --esm ./src/index.mts", + "test": "node --experimental-vm-modules --no-warnings node_modules/jest/bin/jest.js" + }, + "keywords": [], + "author": "", + "license": "MIT", + "dependencies": { + "@actions/core": "^1.10.0", + "ts-node": "^10.9.1", + "zx": "^7.0.8" + }, + "devDependencies": { + "@types/jest": "^29.1.2", + "@types/node": "^18.8.2", + "jest": "^29.1.2", + "ts-jest": "^29.0.3", + "typescript": "^5.2.2" + } +} diff --git a/.github/actions/split-tests/pnpm-lock.yaml b/.github/actions/split-tests/pnpm-lock.yaml new file mode 100644 index 00000000..9b5deb25 --- /dev/null +++ b/.github/actions/split-tests/pnpm-lock.yaml @@ -0,0 +1,2675 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +dependencies: + '@actions/core': + specifier: ^1.10.0 + version: 1.10.0 + ts-node: + specifier: ^10.9.1 + version: 10.9.1(@types/node@18.8.2)(typescript@5.2.2) + zx: + specifier: ^7.0.8 + version: 7.0.8 + +devDependencies: + '@types/jest': + specifier: ^29.1.2 + version: 29.1.2 + '@types/node': + specifier: ^18.8.2 + version: 18.8.2 + jest: + specifier: ^29.1.2 + version: 29.1.2(@types/node@18.8.2)(ts-node@10.9.1) + ts-jest: + specifier: ^29.0.3 + version: 29.0.3(@babel/core@7.19.3)(jest@29.1.2)(typescript@5.2.2) + typescript: + specifier: ^5.2.2 + version: 5.2.2 + +packages: + + /@actions/core@1.10.0: + resolution: {integrity: sha512-2aZDDa3zrrZbP5ZYg159sNoLRb61nQ7awl5pSvIq5Qpj81vwDzdMRKzkWJGJuwVvWpvZKx7vspJALyvaaIQyug==} + dependencies: + '@actions/http-client': 2.0.1 + uuid: 8.3.2 + dev: false + + /@actions/http-client@2.0.1: + resolution: {integrity: sha512-PIXiMVtz6VvyaRsGY268qvj57hXQEpsYogYOu2nrQhlf+XCGmZstmuZBbAybUl1nQGnvS1k1eEsQ69ZoD7xlSw==} + dependencies: + tunnel: 0.0.6 + dev: false + + /@ampproject/remapping@2.2.0: + resolution: {integrity: sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/gen-mapping': 0.1.1 + '@jridgewell/trace-mapping': 0.3.15 + dev: true + + /@babel/code-frame@7.18.6: + resolution: {integrity: sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/highlight': 7.18.6 + dev: true + + /@babel/compat-data@7.19.3: + resolution: {integrity: sha512-prBHMK4JYYK+wDjJF1q99KK4JLL+egWS4nmNqdlMUgCExMZ+iZW0hGhyC3VEbsPjvaN0TBhW//VIFwBrk8sEiw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/core@7.19.3: + resolution: {integrity: sha512-WneDJxdsjEvyKtXKsaBGbDeiyOjR5vYq4HcShxnIbG0qixpoHjI3MqeZM9NDvsojNCEBItQE4juOo/bU6e72gQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@ampproject/remapping': 2.2.0 + '@babel/code-frame': 7.18.6 + '@babel/generator': 7.19.3 + '@babel/helper-compilation-targets': 7.19.3(@babel/core@7.19.3) + '@babel/helper-module-transforms': 7.19.0 + '@babel/helpers': 7.19.0 + '@babel/parser': 7.19.3 + '@babel/template': 7.18.10 + '@babel/traverse': 7.19.3 + '@babel/types': 7.19.3 + convert-source-map: 1.8.0 + debug: 4.3.4 + gensync: 1.0.0-beta.2 + json5: 2.2.1 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/generator@7.19.3: + resolution: {integrity: sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.19.3 + '@jridgewell/gen-mapping': 0.3.2 + jsesc: 2.5.2 + dev: true + + /@babel/helper-compilation-targets@7.19.3(@babel/core@7.19.3): + resolution: {integrity: sha512-65ESqLGyGmLvgR0mst5AdW1FkNlj9rQsCKduzEoEPhBCDFGXvz2jW6bXFG6i0/MrV2s7hhXjjb2yAzcPuQlLwg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/compat-data': 7.19.3 + '@babel/core': 7.19.3 + '@babel/helper-validator-option': 7.18.6 + browserslist: 4.21.4 + semver: 6.3.0 + dev: true + + /@babel/helper-environment-visitor@7.18.9: + resolution: {integrity: sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-function-name@7.19.0: + resolution: {integrity: sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.18.10 + '@babel/types': 7.19.3 + dev: true + + /@babel/helper-hoist-variables@7.18.6: + resolution: {integrity: sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@babel/helper-module-imports@7.18.6: + resolution: {integrity: sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@babel/helper-module-transforms@7.19.0: + resolution: {integrity: sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-environment-visitor': 7.18.9 + '@babel/helper-module-imports': 7.18.6 + '@babel/helper-simple-access': 7.18.6 + '@babel/helper-split-export-declaration': 7.18.6 + '@babel/helper-validator-identifier': 7.19.1 + '@babel/template': 7.18.10 + '@babel/traverse': 7.19.3 + '@babel/types': 7.19.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/helper-plugin-utils@7.19.0: + resolution: {integrity: sha512-40Ryx7I8mT+0gaNxm8JGTZFUITNqdLAgdg0hXzeVZxVD6nFsdhQvip6v8dqkRHzsz1VFpFAaOCHNn0vKBL7Czw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-simple-access@7.18.6: + resolution: {integrity: sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@babel/helper-split-export-declaration@7.18.6: + resolution: {integrity: sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@babel/helper-string-parser@7.18.10: + resolution: {integrity: sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-validator-identifier@7.19.1: + resolution: {integrity: sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helper-validator-option@7.18.6: + resolution: {integrity: sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/helpers@7.19.0: + resolution: {integrity: sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/template': 7.18.10 + '@babel/traverse': 7.19.3 + '@babel/types': 7.19.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/highlight@7.18.6: + resolution: {integrity: sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-validator-identifier': 7.19.1 + chalk: 2.4.2 + js-tokens: 4.0.0 + dev: true + + /@babel/parser@7.19.3: + resolution: {integrity: sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==} + engines: {node: '>=6.0.0'} + hasBin: true + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.19.3): + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.19.3): + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.19.3): + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-jsx@7.18.6(@babel/core@7.19.3): + resolution: {integrity: sha512-6mmljtAedFGTWu2p/8WIORGwy+61PLgOMPOdazc7YoJ9ZCWUyFy3A6CpPkRKLKD1ToAesxX8KGEViAiLo9N+7Q==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.19.3): + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.19.3): + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.19.3): + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.19.3): + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/plugin-syntax-typescript@7.18.6(@babel/core@7.19.3): + resolution: {integrity: sha512-mAWAuq4rvOepWCBid55JuRNvpTNf2UGVgoz4JV0fXEKolsVZDzsa4NqCef758WZJj/GDu0gVGItjKFiClTAmZA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + dependencies: + '@babel/core': 7.19.3 + '@babel/helper-plugin-utils': 7.19.0 + dev: true + + /@babel/template@7.18.10: + resolution: {integrity: sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/code-frame': 7.18.6 + '@babel/parser': 7.19.3 + '@babel/types': 7.19.3 + dev: true + + /@babel/traverse@7.19.3: + resolution: {integrity: sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/code-frame': 7.18.6 + '@babel/generator': 7.19.3 + '@babel/helper-environment-visitor': 7.18.9 + '@babel/helper-function-name': 7.19.0 + '@babel/helper-hoist-variables': 7.18.6 + '@babel/helper-split-export-declaration': 7.18.6 + '@babel/parser': 7.19.3 + '@babel/types': 7.19.3 + debug: 4.3.4 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@babel/types@7.19.3: + resolution: {integrity: sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-string-parser': 7.18.10 + '@babel/helper-validator-identifier': 7.19.1 + to-fast-properties: 2.0.0 + dev: true + + /@bcoe/v8-coverage@0.2.3: + resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} + dev: true + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + + /@istanbuljs/load-nyc-config@1.1.0: + resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==} + engines: {node: '>=8'} + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + dev: true + + /@istanbuljs/schema@0.1.3: + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + dev: true + + /@jest/console@29.1.2: + resolution: {integrity: sha512-ujEBCcYs82BTmRxqfHMQggSlkUZP63AE5YEaTPj7eFyJOzukkTorstOUC7L6nE3w5SYadGVAnTsQ/ZjTGL0qYQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + chalk: 4.1.2 + jest-message-util: 29.1.2 + jest-util: 29.1.2 + slash: 3.0.0 + dev: true + + /@jest/core@29.1.2(ts-node@10.9.1): + resolution: {integrity: sha512-sCO2Va1gikvQU2ynDN8V4+6wB7iVrD2CvT0zaRst4rglf56yLly0NQ9nuRRAWFeimRf+tCdFsb1Vk1N9LrrMPA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + dependencies: + '@jest/console': 29.1.2 + '@jest/reporters': 29.1.2 + '@jest/test-result': 29.1.2 + '@jest/transform': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + ci-info: 3.4.0 + exit: 0.1.2 + graceful-fs: 4.2.10 + jest-changed-files: 29.0.0 + jest-config: 29.1.2(@types/node@18.8.2)(ts-node@10.9.1) + jest-haste-map: 29.1.2 + jest-message-util: 29.1.2 + jest-regex-util: 29.0.0 + jest-resolve: 29.1.2 + jest-resolve-dependencies: 29.1.2 + jest-runner: 29.1.2 + jest-runtime: 29.1.2 + jest-snapshot: 29.1.2 + jest-util: 29.1.2 + jest-validate: 29.1.2 + jest-watcher: 29.1.2 + micromatch: 4.0.5 + pretty-format: 29.1.2 + slash: 3.0.0 + strip-ansi: 6.0.1 + transitivePeerDependencies: + - supports-color + - ts-node + dev: true + + /@jest/environment@29.1.2: + resolution: {integrity: sha512-rG7xZ2UeOfvOVzoLIJ0ZmvPl4tBEQ2n73CZJSlzUjPw4or1oSWC0s0Rk0ZX+pIBJ04aVr6hLWFn1DFtrnf8MhQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/fake-timers': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + jest-mock: 29.1.2 + dev: true + + /@jest/expect-utils@29.1.2: + resolution: {integrity: sha512-4a48bhKfGj/KAH39u0ppzNTABXQ8QPccWAFUFobWBaEMSMp+sB31Z2fK/l47c4a/Mu1po2ffmfAIPxXbVTXdtg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + jest-get-type: 29.0.0 + dev: true + + /@jest/expect@29.1.2: + resolution: {integrity: sha512-FXw/UmaZsyfRyvZw3M6POgSNqwmuOXJuzdNiMWW9LCYo0GRoRDhg+R5iq5higmRTHQY7hx32+j7WHwinRmoILQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + expect: 29.1.2 + jest-snapshot: 29.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@jest/fake-timers@29.1.2: + resolution: {integrity: sha512-GppaEqS+QQYegedxVMpCe2xCXxxeYwQ7RsNx55zc8f+1q1qevkZGKequfTASI7ejmg9WwI+SJCrHe9X11bLL9Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + '@sinonjs/fake-timers': 9.1.2 + '@types/node': 18.8.2 + jest-message-util: 29.1.2 + jest-mock: 29.1.2 + jest-util: 29.1.2 + dev: true + + /@jest/globals@29.1.2: + resolution: {integrity: sha512-uMgfERpJYoQmykAd0ffyMq8wignN4SvLUG6orJQRe9WAlTRc9cdpCaE/29qurXixYJVZWUqIBXhSk8v5xN1V9g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/environment': 29.1.2 + '@jest/expect': 29.1.2 + '@jest/types': 29.1.2 + jest-mock: 29.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@jest/reporters@29.1.2: + resolution: {integrity: sha512-X4fiwwyxy9mnfpxL0g9DD0KcTmEIqP0jUdnc2cfa9riHy+I6Gwwp5vOZiwyg0vZxfSDxrOlK9S4+340W4d+DAA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + dependencies: + '@bcoe/v8-coverage': 0.2.3 + '@jest/console': 29.1.2 + '@jest/test-result': 29.1.2 + '@jest/transform': 29.1.2 + '@jest/types': 29.1.2 + '@jridgewell/trace-mapping': 0.3.15 + '@types/node': 18.8.2 + chalk: 4.1.2 + collect-v8-coverage: 1.0.1 + exit: 0.1.2 + glob: 7.2.3 + graceful-fs: 4.2.10 + istanbul-lib-coverage: 3.2.0 + istanbul-lib-instrument: 5.2.1 + istanbul-lib-report: 3.0.0 + istanbul-lib-source-maps: 4.0.1 + istanbul-reports: 3.1.5 + jest-message-util: 29.1.2 + jest-util: 29.1.2 + jest-worker: 29.1.2 + slash: 3.0.0 + string-length: 4.0.2 + strip-ansi: 6.0.1 + terminal-link: 2.1.1 + v8-to-istanbul: 9.0.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@jest/schemas@29.0.0: + resolution: {integrity: sha512-3Ab5HgYIIAnS0HjqJHQYZS+zXc4tUmTmBH3z83ajI6afXp8X3ZtdLX+nXx+I7LNkJD7uN9LAVhgnjDgZa2z0kA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@sinclair/typebox': 0.24.44 + dev: true + + /@jest/source-map@29.0.0: + resolution: {integrity: sha512-nOr+0EM8GiHf34mq2GcJyz/gYFyLQ2INDhAylrZJ9mMWoW21mLBfZa0BUVPPMxVYrLjeiRe2Z7kWXOGnS0TFhQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jridgewell/trace-mapping': 0.3.15 + callsites: 3.1.0 + graceful-fs: 4.2.10 + dev: true + + /@jest/test-result@29.1.2: + resolution: {integrity: sha512-jjYYjjumCJjH9hHCoMhA8PCl1OxNeGgAoZ7yuGYILRJX9NjgzTN0pCT5qAoYR4jfOP8htIByvAlz9vfNSSBoVg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/console': 29.1.2 + '@jest/types': 29.1.2 + '@types/istanbul-lib-coverage': 2.0.4 + collect-v8-coverage: 1.0.1 + dev: true + + /@jest/test-sequencer@29.1.2: + resolution: {integrity: sha512-fU6dsUqqm8sA+cd85BmeF7Gu9DsXVWFdGn9taxM6xN1cKdcP/ivSgXh5QucFRFz1oZxKv3/9DYYbq0ULly3P/Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/test-result': 29.1.2 + graceful-fs: 4.2.10 + jest-haste-map: 29.1.2 + slash: 3.0.0 + dev: true + + /@jest/transform@29.1.2: + resolution: {integrity: sha512-2uaUuVHTitmkx1tHF+eBjb4p7UuzBG7SXIaA/hNIkaMP6K+gXYGxP38ZcrofzqN0HeZ7A90oqsOa97WU7WZkSw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/core': 7.19.3 + '@jest/types': 29.1.2 + '@jridgewell/trace-mapping': 0.3.15 + babel-plugin-istanbul: 6.1.1 + chalk: 4.1.2 + convert-source-map: 1.8.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.10 + jest-haste-map: 29.1.2 + jest-regex-util: 29.0.0 + jest-util: 29.1.2 + micromatch: 4.0.5 + pirates: 4.0.5 + slash: 3.0.0 + write-file-atomic: 4.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@jest/types@29.1.2: + resolution: {integrity: sha512-DcXGtoTykQB5jiwCmVr8H4vdg2OJhQex3qPkG+ISyDO7xQXbt/4R6dowcRyPemRnkH7JoHvZuxPBdlq+9JxFCg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/schemas': 29.0.0 + '@types/istanbul-lib-coverage': 2.0.4 + '@types/istanbul-reports': 3.0.1 + '@types/node': 18.8.2 + '@types/yargs': 17.0.13 + chalk: 4.1.2 + dev: true + + /@jridgewell/gen-mapping@0.1.1: + resolution: {integrity: sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.14 + dev: true + + /@jridgewell/gen-mapping@0.3.2: + resolution: {integrity: sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.14 + '@jridgewell/trace-mapping': 0.3.15 + dev: true + + /@jridgewell/resolve-uri@3.1.0: + resolution: {integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==} + engines: {node: '>=6.0.0'} + + /@jridgewell/set-array@1.1.2: + resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/sourcemap-codec@1.4.14: + resolution: {integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==} + + /@jridgewell/trace-mapping@0.3.15: + resolution: {integrity: sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==} + dependencies: + '@jridgewell/resolve-uri': 3.1.0 + '@jridgewell/sourcemap-codec': 1.4.14 + dev: true + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + '@jridgewell/resolve-uri': 3.1.0 + '@jridgewell/sourcemap-codec': 1.4.14 + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + dev: false + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + dev: false + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.13.0 + dev: false + + /@sinclair/typebox@0.24.44: + resolution: {integrity: sha512-ka0W0KN5i6LfrSocduwliMMpqVgohtPFidKdMEOUjoOFCHcOOYkKsPRxfs5f15oPNHTm6ERAm0GV/+/LTKeiWg==} + dev: true + + /@sinonjs/commons@1.8.3: + resolution: {integrity: sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ==} + dependencies: + type-detect: 4.0.8 + dev: true + + /@sinonjs/fake-timers@9.1.2: + resolution: {integrity: sha512-BPS4ynJW/o92PUR4wgriz2Ud5gpST5vz6GQfMixEDK0Z8ZCUv2M7SkBLykH56T++Xs+8ln9zTGbOvNGIe02/jw==} + dependencies: + '@sinonjs/commons': 1.8.3 + dev: true + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + + /@tsconfig/node16@1.0.3: + resolution: {integrity: sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==} + + /@types/babel__core@7.1.19: + resolution: {integrity: sha512-WEOTgRsbYkvA/KCsDwVEGkd7WAr1e3g31VHQ8zy5gul/V1qKullU/BU5I68X5v7V3GnB9eotmom4v5a5gjxorw==} + dependencies: + '@babel/parser': 7.19.3 + '@babel/types': 7.19.3 + '@types/babel__generator': 7.6.4 + '@types/babel__template': 7.4.1 + '@types/babel__traverse': 7.18.2 + dev: true + + /@types/babel__generator@7.6.4: + resolution: {integrity: sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@types/babel__template@7.4.1: + resolution: {integrity: sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==} + dependencies: + '@babel/parser': 7.19.3 + '@babel/types': 7.19.3 + dev: true + + /@types/babel__traverse@7.18.2: + resolution: {integrity: sha512-FcFaxOr2V5KZCviw1TnutEMVUVsGt4D2hP1TAfXZAMKuHYW3xQhe3jTxNPWutgCJ3/X1c5yX8ZoGVEItxKbwBg==} + dependencies: + '@babel/types': 7.19.3 + dev: true + + /@types/fs-extra@9.0.13: + resolution: {integrity: sha512-nEnwB++1u5lVDM2UI4c1+5R+FYaKfaAzS4OococimjVm3nQw3TuzH5UNsocrcTBbhnerblyHj4A49qXbIiZdpA==} + dependencies: + '@types/node': 18.8.2 + dev: false + + /@types/graceful-fs@4.1.5: + resolution: {integrity: sha512-anKkLmZZ+xm4p8JWBf4hElkM4XR+EZeA2M9BAkkTldmcyDY4mbdIJnRghDJH3Ov5ooY7/UAoENtmdMSkaAd7Cw==} + dependencies: + '@types/node': 18.8.2 + dev: true + + /@types/istanbul-lib-coverage@2.0.4: + resolution: {integrity: sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==} + dev: true + + /@types/istanbul-lib-report@3.0.0: + resolution: {integrity: sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==} + dependencies: + '@types/istanbul-lib-coverage': 2.0.4 + dev: true + + /@types/istanbul-reports@3.0.1: + resolution: {integrity: sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==} + dependencies: + '@types/istanbul-lib-report': 3.0.0 + dev: true + + /@types/jest@29.1.2: + resolution: {integrity: sha512-y+nlX0h87U0R+wsGn6EBuoRWYyv3KFtwRNP3QWp9+k2tJ2/bqcGS3UxD7jgT+tiwJWWq3UsyV4Y+T6rsMT4XMg==} + dependencies: + expect: 29.1.2 + pretty-format: 29.1.2 + dev: true + + /@types/minimist@1.2.2: + resolution: {integrity: sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==} + dev: false + + /@types/node@18.8.2: + resolution: {integrity: sha512-cRMwIgdDN43GO4xMWAfJAecYn8wV4JbsOGHNfNUIDiuYkUYAR5ec4Rj7IO2SAhFPEfpPtLtUTbbny/TCT7aDwA==} + + /@types/prettier@2.7.1: + resolution: {integrity: sha512-ri0UmynRRvZiiUJdiz38MmIblKK+oH30MztdBVR95dv/Ubw6neWSb8u1XpRb72L4qsZOhz+L+z9JD40SJmfWow==} + dev: true + + /@types/ps-tree@1.1.2: + resolution: {integrity: sha512-ZREFYlpUmPQJ0esjxoG1fMvB2HNaD3z+mjqdSosZvd3RalncI9NEur73P8ZJz4YQdL64CmV1w0RuqoRUlhQRBw==} + dev: false + + /@types/stack-utils@2.0.1: + resolution: {integrity: sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==} + dev: true + + /@types/which@2.0.1: + resolution: {integrity: sha512-Jjakcv8Roqtio6w1gr0D7y6twbhx6gGgFGF5BLwajPpnOIOxFkakFhCq+LmyyeAz7BX6ULrjBOxdKaCDy+4+dQ==} + dev: false + + /@types/yargs-parser@21.0.0: + resolution: {integrity: sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==} + dev: true + + /@types/yargs@17.0.13: + resolution: {integrity: sha512-9sWaruZk2JGxIQU+IhI1fhPYRcQ0UuTNuKuCW9bR5fp7qi2Llf7WDzNa17Cy7TKnh3cdxDOiyTu6gaLS0eDatg==} + dependencies: + '@types/yargs-parser': 21.0.0 + dev: true + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + + /acorn@8.8.0: + resolution: {integrity: sha512-QOxyigPVrpZ2GXT+PFyZTl6TtOFc5egxHIP9IlQ+RbupQuX4RkT/Bee4/kQuC02Xkzg84JcT7oLYtDIQxp+v7w==} + engines: {node: '>=0.4.0'} + hasBin: true + + /ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.21.3 + dev: true + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + dev: true + + /ansi-styles@3.2.1: + resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} + engines: {node: '>=4'} + dependencies: + color-convert: 1.9.3 + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + dev: true + + /ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + dev: true + + /anymatch@3.1.2: + resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==} + engines: {node: '>= 8'} + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + dev: true + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + + /argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + dependencies: + sprintf-js: 1.0.3 + dev: true + + /babel-jest@29.1.2(@babel/core@7.19.3): + resolution: {integrity: sha512-IuG+F3HTHryJb7gacC7SQ59A9kO56BctUsT67uJHp1mMCHUOMXpDwOHWGifWqdWVknN2WNkCVQELPjXx0aLJ9Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.8.0 + dependencies: + '@babel/core': 7.19.3 + '@jest/transform': 29.1.2 + '@types/babel__core': 7.1.19 + babel-plugin-istanbul: 6.1.1 + babel-preset-jest: 29.0.2(@babel/core@7.19.3) + chalk: 4.1.2 + graceful-fs: 4.2.10 + slash: 3.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-istanbul@6.1.1: + resolution: {integrity: sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==} + engines: {node: '>=8'} + dependencies: + '@babel/helper-plugin-utils': 7.19.0 + '@istanbuljs/load-nyc-config': 1.1.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-instrument: 5.2.1 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-jest-hoist@29.0.2: + resolution: {integrity: sha512-eBr2ynAEFjcebVvu8Ktx580BD1QKCrBG1XwEUTXJe285p9HA/4hOhfWCFRQhTKSyBV0VzjhG7H91Eifz9s29hg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/template': 7.18.10 + '@babel/types': 7.19.3 + '@types/babel__core': 7.1.19 + '@types/babel__traverse': 7.18.2 + dev: true + + /babel-preset-current-node-syntax@1.0.1(@babel/core@7.19.3): + resolution: {integrity: sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.19.3 + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.19.3) + '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.19.3) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.19.3) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.19.3) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.19.3) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.19.3) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.19.3) + dev: true + + /babel-preset-jest@29.0.2(@babel/core@7.19.3): + resolution: {integrity: sha512-BeVXp7rH5TK96ofyEnHjznjLMQ2nAeDJ+QzxKnHAAMs0RgrQsCywjAN8m4mOm5Di0pxU//3AoEeJJrerMH5UeA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.0.0 + dependencies: + '@babel/core': 7.19.3 + babel-plugin-jest-hoist: 29.0.2 + babel-preset-current-node-syntax: 1.0.1(@babel/core@7.19.3) + dev: true + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + dependencies: + fill-range: 7.0.1 + + /browserslist@4.21.4: + resolution: {integrity: sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001418 + electron-to-chromium: 1.4.275 + node-releases: 2.0.6 + update-browserslist-db: 1.0.10(browserslist@4.21.4) + dev: true + + /bs-logger@0.2.6: + resolution: {integrity: sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==} + engines: {node: '>= 6'} + dependencies: + fast-json-stable-stringify: 2.1.0 + dev: true + + /bser@2.1.1: + resolution: {integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==} + dependencies: + node-int64: 0.4.0 + dev: true + + /buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + dev: true + + /camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + dev: true + + /camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + dev: true + + /caniuse-lite@1.0.30001418: + resolution: {integrity: sha512-oIs7+JL3K9JRQ3jPZjlH6qyYDp+nBTCais7hjh0s+fuBwufc7uZ7hPYMXrDOJhV360KGMTcczMRObk0/iMqZRg==} + dev: true + + /chalk@2.4.2: + resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==} + engines: {node: '>=4'} + dependencies: + ansi-styles: 3.2.1 + escape-string-regexp: 1.0.5 + supports-color: 5.5.0 + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: true + + /chalk@5.0.1: + resolution: {integrity: sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + dev: false + + /char-regex@1.0.2: + resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==} + engines: {node: '>=10'} + dev: true + + /ci-info@3.4.0: + resolution: {integrity: sha512-t5QdPT5jq3o262DOQ8zA6E1tlH2upmUc4Hlvrbx1pGYJuiiHl7O7rvVNI+l8HTVhd/q3Qc9vqimkNk5yiXsAug==} + dev: true + + /cjs-module-lexer@1.2.2: + resolution: {integrity: sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA==} + dev: true + + /cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + dev: true + + /co@4.6.0: + resolution: {integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==} + engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} + dev: true + + /collect-v8-coverage@1.0.1: + resolution: {integrity: sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg==} + dev: true + + /color-convert@1.9.3: + resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} + dependencies: + color-name: 1.1.3 + dev: true + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + dev: true + + /color-name@1.1.3: + resolution: {integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==} + dev: true + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=} + dev: true + + /convert-source-map@1.8.0: + resolution: {integrity: sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==} + dependencies: + safe-buffer: 5.1.2 + dev: true + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + dev: true + + /data-uri-to-buffer@4.0.0: + resolution: {integrity: sha512-Vr3mLBA8qWmcuschSLAOogKgQ/Jwxulv3RNE4FXnYWRGujzrRWQI4m12fQqRkwX06C0KanhLr4hK+GydchZsaA==} + engines: {node: '>= 12'} + dev: false + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + dev: true + + /dedent@0.7.0: + resolution: {integrity: sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==} + dev: true + + /deepmerge@4.2.2: + resolution: {integrity: sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==} + engines: {node: '>=0.10.0'} + dev: true + + /detect-newline@3.1.0: + resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} + engines: {node: '>=8'} + dev: true + + /diff-sequences@29.0.0: + resolution: {integrity: sha512-7Qe/zd1wxSDL4D/X/FPjOMB+ZMDt71W94KYaq05I2l0oQqgXgs7s4ftYYmV38gBSrPz2vcygxfs1xn0FT+rKNA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: true + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + + /dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + dependencies: + path-type: 4.0.0 + dev: false + + /duplexer@0.1.2: + resolution: {integrity: sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==} + dev: false + + /electron-to-chromium@1.4.275: + resolution: {integrity: sha512-aJeQQ+Hl9Jyyzv4chBqYJwmVRY46N5i2BEX5Cuyk/5gFCUZ5F3i7Hnba6snZftWla7Gglwc5pIgcd+E7cW+rPg==} + dev: true + + /emittery@0.10.2: + resolution: {integrity: sha512-aITqOwnLanpHLNXZJENbOgjUBeHocD+xsSJmNrjovKBW5HbSpW3d1pEls7GFQPUWXiwG9+0P4GtHfEqC/4M0Iw==} + engines: {node: '>=12'} + dev: true + + /emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + dev: true + + /error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + dependencies: + is-arrayish: 0.2.1 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: '>=6'} + dev: true + + /escape-string-regexp@1.0.5: + resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==} + engines: {node: '>=0.8.0'} + dev: true + + /escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + dev: true + + /esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /event-stream@3.3.4: + resolution: {integrity: sha512-QHpkERcGsR0T7Qm3HNJSyXKEEj8AHNxkY3PK8TS2KJvQ7NiSHe3DDpwVKKtoYprL/AreyzFBeIkBIWChAqn60g==} + dependencies: + duplexer: 0.1.2 + from: 0.1.7 + map-stream: 0.1.0 + pause-stream: 0.0.11 + split: 0.3.3 + stream-combiner: 0.0.4 + through: 2.3.8 + dev: false + + /execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + dev: true + + /exit@0.1.2: + resolution: {integrity: sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==} + engines: {node: '>= 0.8.0'} + dev: true + + /expect@29.1.2: + resolution: {integrity: sha512-AuAGn1uxva5YBbBlXb+2JPxJRuemZsmlGcapPXWNSBNsQtAULfjioREGBWuI0EOvYUKjDnrCy8PW5Zlr1md5mw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/expect-utils': 29.1.2 + jest-get-type: 29.0.0 + jest-matcher-utils: 29.1.2 + jest-message-util: 29.1.2 + jest-util: 29.1.2 + dev: true + + /fast-glob@3.2.12: + resolution: {integrity: sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==} + engines: {node: '>=8.6.0'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.5 + dev: false + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: true + + /fastq@1.13.0: + resolution: {integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==} + dependencies: + reusify: 1.0.4 + dev: false + + /fb-watchman@2.0.2: + resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} + dependencies: + bser: 2.1.1 + dev: true + + /fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.2.1 + dev: false + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + dependencies: + to-regex-range: 5.0.1 + + /find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + dev: true + + /formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + dependencies: + fetch-blob: 3.2.0 + dev: false + + /from@0.1.7: + resolution: {integrity: sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==} + dev: false + + /fs-extra@10.1.0: + resolution: {integrity: sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==} + engines: {node: '>=12'} + dependencies: + graceful-fs: 4.2.10 + jsonfile: 6.1.0 + universalify: 2.0.0 + dev: false + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + dev: true + + /fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /function-bind@1.1.1: + resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} + dev: true + + /gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + dev: true + + /get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + dev: true + + /get-package-type@0.1.0: + resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} + engines: {node: '>=8.0.0'} + dev: true + + /get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + dev: true + + /glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + dependencies: + is-glob: 4.0.3 + dev: false + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + dev: true + + /globby@13.1.2: + resolution: {integrity: sha512-LKSDZXToac40u8Q1PQtZihbNdTYSNMuWe+K5l+oa6KgDzSvVrHXlJy40hUP522RjAIoNLJYBJi7ow+rbFpIhHQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + dir-glob: 3.0.1 + fast-glob: 3.2.12 + ignore: 5.2.0 + merge2: 1.4.1 + slash: 4.0.0 + dev: false + + /graceful-fs@4.2.10: + resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} + + /has-flag@3.0.0: + resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} + engines: {node: '>=4'} + dev: true + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + dev: true + + /has@1.0.3: + resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} + engines: {node: '>= 0.4.0'} + dependencies: + function-bind: 1.1.1 + dev: true + + /html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + dev: true + + /human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + dev: true + + /ignore@5.2.0: + resolution: {integrity: sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==} + engines: {node: '>= 4'} + dev: false + + /import-local@3.1.0: + resolution: {integrity: sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==} + engines: {node: '>=8'} + hasBin: true + dependencies: + pkg-dir: 4.2.0 + resolve-cwd: 3.0.0 + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + dev: true + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + dev: true + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + dev: true + + /is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + dev: true + + /is-core-module@2.10.0: + resolution: {integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==} + dependencies: + has: 1.0.3 + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + dev: false + + /is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + dev: true + + /is-generator-fn@2.1.0: + resolution: {integrity: sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==} + engines: {node: '>=6'} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + dev: false + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + /is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + /istanbul-lib-coverage@3.2.0: + resolution: {integrity: sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==} + engines: {node: '>=8'} + dev: true + + /istanbul-lib-instrument@5.2.1: + resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} + engines: {node: '>=8'} + dependencies: + '@babel/core': 7.19.3 + '@babel/parser': 7.19.3 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.0 + semver: 6.3.0 + transitivePeerDependencies: + - supports-color + dev: true + + /istanbul-lib-report@3.0.0: + resolution: {integrity: sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==} + engines: {node: '>=8'} + dependencies: + istanbul-lib-coverage: 3.2.0 + make-dir: 3.1.0 + supports-color: 7.2.0 + dev: true + + /istanbul-lib-source-maps@4.0.1: + resolution: {integrity: sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==} + engines: {node: '>=10'} + dependencies: + debug: 4.3.4 + istanbul-lib-coverage: 3.2.0 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + dev: true + + /istanbul-reports@3.1.5: + resolution: {integrity: sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==} + engines: {node: '>=8'} + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.0 + dev: true + + /jest-changed-files@29.0.0: + resolution: {integrity: sha512-28/iDMDrUpGoCitTURuDqUzWQoWmOmOKOFST1mi2lwh62X4BFf6khgH3uSuo1e49X/UDjuApAj3w0wLOex4VPQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + execa: 5.1.1 + p-limit: 3.1.0 + dev: true + + /jest-circus@29.1.2: + resolution: {integrity: sha512-ajQOdxY6mT9GtnfJRZBRYS7toNIJayiiyjDyoZcnvPRUPwJ58JX0ci0PKAKUo2C1RyzlHw0jabjLGKksO42JGA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/environment': 29.1.2 + '@jest/expect': 29.1.2 + '@jest/test-result': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + chalk: 4.1.2 + co: 4.6.0 + dedent: 0.7.0 + is-generator-fn: 2.1.0 + jest-each: 29.1.2 + jest-matcher-utils: 29.1.2 + jest-message-util: 29.1.2 + jest-runtime: 29.1.2 + jest-snapshot: 29.1.2 + jest-util: 29.1.2 + p-limit: 3.1.0 + pretty-format: 29.1.2 + slash: 3.0.0 + stack-utils: 2.0.5 + transitivePeerDependencies: + - supports-color + dev: true + + /jest-cli@29.1.2(@types/node@18.8.2)(ts-node@10.9.1): + resolution: {integrity: sha512-vsvBfQ7oS2o4MJdAH+4u9z76Vw5Q8WBQF5MchDbkylNknZdrPTX1Ix7YRJyTlOWqRaS7ue/cEAn+E4V1MWyMzw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + dependencies: + '@jest/core': 29.1.2(ts-node@10.9.1) + '@jest/test-result': 29.1.2 + '@jest/types': 29.1.2 + chalk: 4.1.2 + exit: 0.1.2 + graceful-fs: 4.2.10 + import-local: 3.1.0 + jest-config: 29.1.2(@types/node@18.8.2)(ts-node@10.9.1) + jest-util: 29.1.2 + jest-validate: 29.1.2 + prompts: 2.4.2 + yargs: 17.6.0 + transitivePeerDependencies: + - '@types/node' + - supports-color + - ts-node + dev: true + + /jest-config@29.1.2(@types/node@18.8.2)(ts-node@10.9.1): + resolution: {integrity: sha512-EC3Zi86HJUOz+2YWQcJYQXlf0zuBhJoeyxLM6vb6qJsVmpP7KcCP1JnyF0iaqTaXdBP8Rlwsvs7hnKWQWWLwwA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@types/node': '*' + ts-node: '>=9.0.0' + peerDependenciesMeta: + '@types/node': + optional: true + ts-node: + optional: true + dependencies: + '@babel/core': 7.19.3 + '@jest/test-sequencer': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + babel-jest: 29.1.2(@babel/core@7.19.3) + chalk: 4.1.2 + ci-info: 3.4.0 + deepmerge: 4.2.2 + glob: 7.2.3 + graceful-fs: 4.2.10 + jest-circus: 29.1.2 + jest-environment-node: 29.1.2 + jest-get-type: 29.0.0 + jest-regex-util: 29.0.0 + jest-resolve: 29.1.2 + jest-runner: 29.1.2 + jest-util: 29.1.2 + jest-validate: 29.1.2 + micromatch: 4.0.5 + parse-json: 5.2.0 + pretty-format: 29.1.2 + slash: 3.0.0 + strip-json-comments: 3.1.1 + ts-node: 10.9.1(@types/node@18.8.2)(typescript@5.2.2) + transitivePeerDependencies: + - supports-color + dev: true + + /jest-diff@29.1.2: + resolution: {integrity: sha512-4GQts0aUopVvecIT4IwD/7xsBaMhKTYoM4/njE/aVw9wpw+pIUVp8Vab/KnSzSilr84GnLBkaP3JLDnQYCKqVQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + chalk: 4.1.2 + diff-sequences: 29.0.0 + jest-get-type: 29.0.0 + pretty-format: 29.1.2 + dev: true + + /jest-docblock@29.0.0: + resolution: {integrity: sha512-s5Kpra/kLzbqu9dEjov30kj1n4tfu3e7Pl8v+f8jOkeWNqM6Ds8jRaJfZow3ducoQUrf2Z4rs2N5S3zXnb83gw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + detect-newline: 3.1.0 + dev: true + + /jest-each@29.1.2: + resolution: {integrity: sha512-AmTQp9b2etNeEwMyr4jc0Ql/LIX/dhbgP21gHAizya2X6rUspHn2gysMXaj6iwWuOJ2sYRgP8c1P4cXswgvS1A==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + chalk: 4.1.2 + jest-get-type: 29.0.0 + jest-util: 29.1.2 + pretty-format: 29.1.2 + dev: true + + /jest-environment-node@29.1.2: + resolution: {integrity: sha512-C59yVbdpY8682u6k/lh8SUMDJPbOyCHOTgLVVi1USWFxtNV+J8fyIwzkg+RJIVI30EKhKiAGNxYaFr3z6eyNhQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/environment': 29.1.2 + '@jest/fake-timers': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + jest-mock: 29.1.2 + jest-util: 29.1.2 + dev: true + + /jest-get-type@29.0.0: + resolution: {integrity: sha512-83X19z/HuLKYXYHskZlBAShO7UfLFXu/vWajw9ZNJASN32li8yHMaVGAQqxFW1RCFOkB7cubaL6FaJVQqqJLSw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: true + + /jest-haste-map@29.1.2: + resolution: {integrity: sha512-xSjbY8/BF11Jh3hGSPfYTa/qBFrm3TPM7WU8pU93m2gqzORVLkHFWvuZmFsTEBPRKndfewXhMOuzJNHyJIZGsw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + '@types/graceful-fs': 4.1.5 + '@types/node': 18.8.2 + anymatch: 3.1.2 + fb-watchman: 2.0.2 + graceful-fs: 4.2.10 + jest-regex-util: 29.0.0 + jest-util: 29.1.2 + jest-worker: 29.1.2 + micromatch: 4.0.5 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.2 + dev: true + + /jest-leak-detector@29.1.2: + resolution: {integrity: sha512-TG5gAZJpgmZtjb6oWxBLf2N6CfQ73iwCe6cofu/Uqv9iiAm6g502CAnGtxQaTfpHECBdVEMRBhomSXeLnoKjiQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + jest-get-type: 29.0.0 + pretty-format: 29.1.2 + dev: true + + /jest-matcher-utils@29.1.2: + resolution: {integrity: sha512-MV5XrD3qYSW2zZSHRRceFzqJ39B2z11Qv0KPyZYxnzDHFeYZGJlgGi0SW+IXSJfOewgJp/Km/7lpcFT+cgZypw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + chalk: 4.1.2 + jest-diff: 29.1.2 + jest-get-type: 29.0.0 + pretty-format: 29.1.2 + dev: true + + /jest-message-util@29.1.2: + resolution: {integrity: sha512-9oJ2Os+Qh6IlxLpmvshVbGUiSkZVc2FK+uGOm6tghafnB2RyjKAxMZhtxThRMxfX1J1SOMhTn9oK3/MutRWQJQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/code-frame': 7.18.6 + '@jest/types': 29.1.2 + '@types/stack-utils': 2.0.1 + chalk: 4.1.2 + graceful-fs: 4.2.10 + micromatch: 4.0.5 + pretty-format: 29.1.2 + slash: 3.0.0 + stack-utils: 2.0.5 + dev: true + + /jest-mock@29.1.2: + resolution: {integrity: sha512-PFDAdjjWbjPUtQPkQufvniXIS3N9Tv7tbibePEjIIprzjgo0qQlyUiVMrT4vL8FaSJo1QXifQUOuPH3HQC/aMA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + jest-util: 29.1.2 + dev: true + + /jest-pnp-resolver@1.2.2(jest-resolve@29.1.2): + resolution: {integrity: sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w==} + engines: {node: '>=6'} + peerDependencies: + jest-resolve: '*' + peerDependenciesMeta: + jest-resolve: + optional: true + dependencies: + jest-resolve: 29.1.2 + dev: true + + /jest-regex-util@29.0.0: + resolution: {integrity: sha512-BV7VW7Sy0fInHWN93MMPtlClweYv2qrSCwfeFWmpribGZtQPWNvRSq9XOVgOEjU1iBGRKXUZil0o2AH7Iy9Lug==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dev: true + + /jest-resolve-dependencies@29.1.2: + resolution: {integrity: sha512-44yYi+yHqNmH3OoWZvPgmeeiwKxhKV/0CfrzaKLSkZG9gT973PX8i+m8j6pDrTYhhHoiKfF3YUFg/6AeuHw4HQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + jest-regex-util: 29.0.0 + jest-snapshot: 29.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /jest-resolve@29.1.2: + resolution: {integrity: sha512-7fcOr+k7UYSVRJYhSmJHIid3AnDBcLQX3VmT9OSbPWsWz1MfT7bcoerMhADKGvKCoMpOHUQaDHtQoNp/P9JMGg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + chalk: 4.1.2 + graceful-fs: 4.2.10 + jest-haste-map: 29.1.2 + jest-pnp-resolver: 1.2.2(jest-resolve@29.1.2) + jest-util: 29.1.2 + jest-validate: 29.1.2 + resolve: 1.22.1 + resolve.exports: 1.1.0 + slash: 3.0.0 + dev: true + + /jest-runner@29.1.2: + resolution: {integrity: sha512-yy3LEWw8KuBCmg7sCGDIqKwJlULBuNIQa2eFSVgVASWdXbMYZ9H/X0tnXt70XFoGf92W2sOQDOIFAA6f2BG04Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/console': 29.1.2 + '@jest/environment': 29.1.2 + '@jest/test-result': 29.1.2 + '@jest/transform': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + chalk: 4.1.2 + emittery: 0.10.2 + graceful-fs: 4.2.10 + jest-docblock: 29.0.0 + jest-environment-node: 29.1.2 + jest-haste-map: 29.1.2 + jest-leak-detector: 29.1.2 + jest-message-util: 29.1.2 + jest-resolve: 29.1.2 + jest-runtime: 29.1.2 + jest-util: 29.1.2 + jest-watcher: 29.1.2 + jest-worker: 29.1.2 + p-limit: 3.1.0 + source-map-support: 0.5.13 + transitivePeerDependencies: + - supports-color + dev: true + + /jest-runtime@29.1.2: + resolution: {integrity: sha512-jr8VJLIf+cYc+8hbrpt412n5jX3tiXmpPSYTGnwcvNemY+EOuLNiYnHJ3Kp25rkaAcTWOEI4ZdOIQcwYcXIAZw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/environment': 29.1.2 + '@jest/fake-timers': 29.1.2 + '@jest/globals': 29.1.2 + '@jest/source-map': 29.0.0 + '@jest/test-result': 29.1.2 + '@jest/transform': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + chalk: 4.1.2 + cjs-module-lexer: 1.2.2 + collect-v8-coverage: 1.0.1 + glob: 7.2.3 + graceful-fs: 4.2.10 + jest-haste-map: 29.1.2 + jest-message-util: 29.1.2 + jest-mock: 29.1.2 + jest-regex-util: 29.0.0 + jest-resolve: 29.1.2 + jest-snapshot: 29.1.2 + jest-util: 29.1.2 + slash: 3.0.0 + strip-bom: 4.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /jest-snapshot@29.1.2: + resolution: {integrity: sha512-rYFomGpVMdBlfwTYxkUp3sjD6usptvZcONFYNqVlaz4EpHPnDvlWjvmOQ9OCSNKqYZqLM2aS3wq01tWujLg7gg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@babel/core': 7.19.3 + '@babel/generator': 7.19.3 + '@babel/plugin-syntax-jsx': 7.18.6(@babel/core@7.19.3) + '@babel/plugin-syntax-typescript': 7.18.6(@babel/core@7.19.3) + '@babel/traverse': 7.19.3 + '@babel/types': 7.19.3 + '@jest/expect-utils': 29.1.2 + '@jest/transform': 29.1.2 + '@jest/types': 29.1.2 + '@types/babel__traverse': 7.18.2 + '@types/prettier': 2.7.1 + babel-preset-current-node-syntax: 1.0.1(@babel/core@7.19.3) + chalk: 4.1.2 + expect: 29.1.2 + graceful-fs: 4.2.10 + jest-diff: 29.1.2 + jest-get-type: 29.0.0 + jest-haste-map: 29.1.2 + jest-matcher-utils: 29.1.2 + jest-message-util: 29.1.2 + jest-util: 29.1.2 + natural-compare: 1.4.0 + pretty-format: 29.1.2 + semver: 7.3.8 + transitivePeerDependencies: + - supports-color + dev: true + + /jest-util@29.1.2: + resolution: {integrity: sha512-vPCk9F353i0Ymx3WQq3+a4lZ07NXu9Ca8wya6o4Fe4/aO1e1awMMprZ3woPFpKwghEOW+UXgd15vVotuNN9ONQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + chalk: 4.1.2 + ci-info: 3.4.0 + graceful-fs: 4.2.10 + picomatch: 2.3.1 + dev: true + + /jest-validate@29.1.2: + resolution: {integrity: sha512-k71pOslNlV8fVyI+mEySy2pq9KdXdgZtm7NHrBX8LghJayc3wWZH0Yr0mtYNGaCU4F1OLPXRkwZR0dBm/ClshA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/types': 29.1.2 + camelcase: 6.3.0 + chalk: 4.1.2 + jest-get-type: 29.0.0 + leven: 3.1.0 + pretty-format: 29.1.2 + dev: true + + /jest-watcher@29.1.2: + resolution: {integrity: sha512-6JUIUKVdAvcxC6bM8/dMgqY2N4lbT+jZVsxh0hCJRbwkIEnbr/aPjMQ28fNDI5lB51Klh00MWZZeVf27KBUj5w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/test-result': 29.1.2 + '@jest/types': 29.1.2 + '@types/node': 18.8.2 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + emittery: 0.10.2 + jest-util: 29.1.2 + string-length: 4.0.2 + dev: true + + /jest-worker@29.1.2: + resolution: {integrity: sha512-AdTZJxKjTSPHbXT/AIOjQVmoFx0LHFcVabWu0sxI7PAy7rFf8c0upyvgBKgguVXdM4vY74JdwkyD4hSmpTW8jA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@types/node': 18.8.2 + jest-util: 29.1.2 + merge-stream: 2.0.0 + supports-color: 8.1.1 + dev: true + + /jest@29.1.2(@types/node@18.8.2)(ts-node@10.9.1): + resolution: {integrity: sha512-5wEIPpCezgORnqf+rCaYD1SK+mNN7NsstWzIsuvsnrhR/hSxXWd82oI7DkrbJ+XTD28/eG8SmxdGvukrGGK6Tw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + dependencies: + '@jest/core': 29.1.2(ts-node@10.9.1) + '@jest/types': 29.1.2 + import-local: 3.1.0 + jest-cli: 29.1.2(@types/node@18.8.2)(ts-node@10.9.1) + transitivePeerDependencies: + - '@types/node' + - supports-color + - ts-node + dev: true + + /js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + dev: true + + /js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + dev: true + + /jsesc@2.5.2: + resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + dev: true + + /json5@2.2.1: + resolution: {integrity: sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==} + engines: {node: '>=6'} + hasBin: true + dev: true + + /jsonfile@6.1.0: + resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + dependencies: + universalify: 2.0.0 + optionalDependencies: + graceful-fs: 4.2.10 + dev: false + + /kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + dev: true + + /leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + dev: true + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + dev: true + + /locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + dependencies: + p-locate: 4.1.0 + dev: true + + /lodash.memoize@4.1.2: + resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} + dev: true + + /lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: '>=10'} + dependencies: + yallist: 4.0.0 + dev: true + + /make-dir@3.1.0: + resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==} + engines: {node: '>=8'} + dependencies: + semver: 6.3.0 + dev: true + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + /makeerror@1.0.12: + resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==} + dependencies: + tmpl: 1.0.5 + dev: true + + /map-stream@0.1.0: + resolution: {integrity: sha512-CkYQrPYZfWnu/DAmVCpTSX/xHpKZ80eKh2lAkyA6AJTef6bW+6JpbQZN5rofum7da+SyN1bi5ctTm+lTfcCW3g==} + dev: false + + /merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + dev: true + + /merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + dev: false + + /micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: '>=8.6'} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + + /mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /minimist@1.2.6: + resolution: {integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==} + dev: false + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + dev: true + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: true + + /node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + dev: false + + /node-fetch@3.2.8: + resolution: {integrity: sha512-KtpD1YhGszhntMpBDyp5lyagk8KIMopC1LEb7cQUAh7zcosaX5uK8HnbNb2i3NTQK3sIawCItS0uFC3QzcLHdg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + data-uri-to-buffer: 4.0.0 + fetch-blob: 3.2.0 + formdata-polyfill: 4.0.10 + dev: false + + /node-int64@0.4.0: + resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} + dev: true + + /node-releases@2.0.6: + resolution: {integrity: sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==} + dev: true + + /normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + dev: true + + /npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + dependencies: + path-key: 3.1.1 + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: true + + /onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + dependencies: + mimic-fn: 2.1.0 + dev: true + + /p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + dependencies: + p-try: 2.2.0 + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + dev: true + + /p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + dependencies: + p-limit: 2.3.0 + dev: true + + /p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + dev: true + + /parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + dependencies: + '@babel/code-frame': 7.18.6 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + dev: true + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + dev: true + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + dev: true + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + dev: true + + /path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + dev: false + + /pause-stream@0.0.11: + resolution: {integrity: sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==} + dependencies: + through: 2.3.8 + dev: false + + /picocolors@1.0.0: + resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} + dev: true + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + /pirates@4.0.5: + resolution: {integrity: sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==} + engines: {node: '>= 6'} + dev: true + + /pkg-dir@4.2.0: + resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} + engines: {node: '>=8'} + dependencies: + find-up: 4.1.0 + dev: true + + /pretty-format@29.1.2: + resolution: {integrity: sha512-CGJ6VVGXVRP2o2Dorl4mAwwvDWT25luIsYhkyVQW32E4nL+TgW939J7LlKT/npq5Cpq6j3s+sy+13yk7xYpBmg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + dependencies: + '@jest/schemas': 29.0.0 + ansi-styles: 5.2.0 + react-is: 18.2.0 + dev: true + + /prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + dev: true + + /ps-tree@1.2.0: + resolution: {integrity: sha512-0VnamPPYHl4uaU/nSFeZZpR21QAWRz+sRv4iW9+v/GS/J5U5iZB5BNN6J0RMoOvdx2gWM2+ZFMIm58q24e4UYA==} + engines: {node: '>= 0.10'} + hasBin: true + dependencies: + event-stream: 3.3.4 + dev: false + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + dev: false + + /react-is@18.2.0: + resolution: {integrity: sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==} + dev: true + + /require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + dev: true + + /resolve-cwd@3.0.0: + resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==} + engines: {node: '>=8'} + dependencies: + resolve-from: 5.0.0 + dev: true + + /resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + dev: true + + /resolve.exports@1.1.0: + resolution: {integrity: sha512-J1l+Zxxp4XK3LUDZ9m60LRJF/mAe4z6a4xyabPHk7pvK5t35dACV32iIjJDFeWZFfZlO29w6SZ67knR0tHzJtQ==} + engines: {node: '>=10'} + dev: true + + /resolve@1.22.1: + resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + hasBin: true + dependencies: + is-core-module: 2.10.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + dev: false + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + dev: false + + /safe-buffer@5.1.2: + resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} + dev: true + + /semver@6.3.0: + resolution: {integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==} + hasBin: true + dev: true + + /semver@7.3.8: + resolution: {integrity: sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + dev: true + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + dev: true + + /signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + dev: true + + /sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + dev: true + + /slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + dev: true + + /slash@4.0.0: + resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} + engines: {node: '>=12'} + dev: false + + /source-map-support@0.5.13: + resolution: {integrity: sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==} + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + dev: true + + /source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + dev: true + + /split@0.3.3: + resolution: {integrity: sha512-wD2AeVmxXRBoX44wAycgjVpMhvbwdI2aZjCkvfNcH1YqHQvJVa1duWc73OyVGJUc05fhFaTZeQ/PYsrmyH0JVA==} + dependencies: + through: 2.3.8 + dev: false + + /sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + dev: true + + /stack-utils@2.0.5: + resolution: {integrity: sha512-xrQcmYhOsn/1kX+Vraq+7j4oE2j/6BFscZ0etmYg81xuM8Gq0022Pxb8+IqgOFUIaxHs0KaSb7T1+OegiNrNFA==} + engines: {node: '>=10'} + dependencies: + escape-string-regexp: 2.0.0 + dev: true + + /stream-combiner@0.0.4: + resolution: {integrity: sha512-rT00SPnTVyRsaSz5zgSPma/aHSOic5U1prhYdRy5HS2kTZviFpmDgzilbtsJsxiroqACmayynDN/9VzIbX5DOw==} + dependencies: + duplexer: 0.1.2 + dev: false + + /string-length@4.0.2: + resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} + engines: {node: '>=10'} + dependencies: + char-regex: 1.0.2 + strip-ansi: 6.0.1 + dev: true + + /string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + dev: true + + /strip-bom@4.0.0: + resolution: {integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==} + engines: {node: '>=8'} + dev: true + + /strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + dev: true + + /supports-color@5.5.0: + resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} + engines: {node: '>=4'} + dependencies: + has-flag: 3.0.0 + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-hyperlinks@2.3.0: + resolution: {integrity: sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + supports-color: 7.2.0 + dev: true + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + dev: true + + /terminal-link@2.1.1: + resolution: {integrity: sha512-un0FmiRUQNr5PJqy9kP7c40F5BOfpGlYTrxonDChEZB7pzZxRNp/bt+ymiy9/npwXya9KH99nJ/GXFIiUkYGFQ==} + engines: {node: '>=8'} + dependencies: + ansi-escapes: 4.3.2 + supports-hyperlinks: 2.3.0 + dev: true + + /test-exclude@6.0.0: + resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} + engines: {node: '>=8'} + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 7.2.3 + minimatch: 3.1.2 + dev: true + + /through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + dev: false + + /tmpl@1.0.5: + resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} + dev: true + + /to-fast-properties@2.0.0: + resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} + engines: {node: '>=4'} + dev: true + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + dependencies: + is-number: 7.0.0 + + /ts-jest@29.0.3(@babel/core@7.19.3)(jest@29.1.2)(typescript@5.2.2): + resolution: {integrity: sha512-Ibygvmuyq1qp/z3yTh9QTwVVAbFdDy/+4BtIQR2sp6baF2SJU/8CKK/hhnGIDY2L90Az2jIqTwZPnN2p+BweiQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + '@babel/core': '>=7.0.0-beta.0 <8' + '@jest/types': ^29.0.0 + babel-jest: ^29.0.0 + esbuild: '*' + jest: ^29.0.0 + typescript: '>=4.3' + peerDependenciesMeta: + '@babel/core': + optional: true + '@jest/types': + optional: true + babel-jest: + optional: true + esbuild: + optional: true + dependencies: + '@babel/core': 7.19.3 + bs-logger: 0.2.6 + fast-json-stable-stringify: 2.1.0 + jest: 29.1.2(@types/node@18.8.2)(ts-node@10.9.1) + jest-util: 29.1.2 + json5: 2.2.1 + lodash.memoize: 4.1.2 + make-error: 1.3.6 + semver: 7.3.8 + typescript: 5.2.2 + yargs-parser: 21.1.1 + dev: true + + /ts-node@10.9.1(@types/node@18.8.2)(typescript@5.2.2): + resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.9 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.3 + '@types/node': 18.8.2 + acorn: 8.8.0 + acorn-walk: 8.2.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.2.2 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + + /tunnel@0.0.6: + resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==} + engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'} + dev: false + + /type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + dev: true + + /type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + dev: true + + /typescript@5.2.2: + resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} + engines: {node: '>=14.17'} + hasBin: true + + /universalify@2.0.0: + resolution: {integrity: sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==} + engines: {node: '>= 10.0.0'} + dev: false + + /update-browserslist-db@1.0.10(browserslist@4.21.4): + resolution: {integrity: sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + dependencies: + browserslist: 4.21.4 + escalade: 3.1.1 + picocolors: 1.0.0 + dev: true + + /uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + dev: false + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + + /v8-to-istanbul@9.0.1: + resolution: {integrity: sha512-74Y4LqY74kLE6IFyIjPtkSTWzUZmj8tdHT9Ii/26dvQ6K9Dl2NbEfj0XgU2sHCtKgt5VupqhlO/5aWuqS+IY1w==} + engines: {node: '>=10.12.0'} + dependencies: + '@jridgewell/trace-mapping': 0.3.15 + '@types/istanbul-lib-coverage': 2.0.4 + convert-source-map: 1.8.0 + dev: true + + /walker@1.0.8: + resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} + dependencies: + makeerror: 1.0.12 + dev: true + + /web-streams-polyfill@3.2.1: + resolution: {integrity: sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==} + engines: {node: '>= 8'} + dev: false + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + + /wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: true + + /write-file-atomic@4.0.2: + resolution: {integrity: sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dependencies: + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + dev: true + + /y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + dev: true + + /yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + dev: true + + /yaml@2.1.2: + resolution: {integrity: sha512-VSdf2/K3FqAetooKQv45Hcu6sA00aDgWZeGcG6V9IYJnVLTnb6988Tie79K5nx2vK7cEpf+yW8Oy+7iPAbdiHA==} + engines: {node: '>= 14'} + dev: false + + /yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + dev: true + + /yargs@17.6.0: + resolution: {integrity: sha512-8H/wTDqlSwoSnScvV2N/JHfLWOKuh5MVla9hqLjK3nsfyy6Y4kDSYSvkU5YCUEPOSnRXfIyx3Sq+B/IWudTo4g==} + engines: {node: '>=12'} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: true + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + dev: true + + /zx@7.0.8: + resolution: {integrity: sha512-sNjfDHzskqrSkWNj0TVhaowVK5AbpvuyuO1RBU4+LrFcgYI5u9CtyWWgUBRtRZl3bgGEF31zByszoBmwS47d1w==} + engines: {node: '>= 16.0.0'} + hasBin: true + dependencies: + '@types/fs-extra': 9.0.13 + '@types/minimist': 1.2.2 + '@types/node': 18.8.2 + '@types/ps-tree': 1.1.2 + '@types/which': 2.0.1 + chalk: 5.0.1 + fs-extra: 10.1.0 + globby: 13.1.2 + minimist: 1.2.6 + node-fetch: 3.2.8 + ps-tree: 1.2.0 + which: 2.0.2 + yaml: 2.1.2 + dev: false diff --git a/.github/actions/split-tests/src/index.mts b/.github/actions/split-tests/src/index.mts new file mode 100644 index 00000000..43f9eb13 --- /dev/null +++ b/.github/actions/split-tests/src/index.mts @@ -0,0 +1,74 @@ +import { $, cd, glob, fs } from "zx"; +import path from "node:path"; +import { setOutput } from "@actions/core"; +import { SolidityConfig, SoliditySplit } from "./types.mjs"; +import { sieveSlowTests } from "./sieve.mjs"; +import { simpleSplit } from "./splitter.mjs"; + +/** + * Get a JSON formatted config file + * + * @param path The path to the config relative to the git root + */ +function getConfigFrom(path?: string): SolidityConfig { + if (!path) { + throw Error("No config path given, specify a path via $CONFIG"); + } + try { + const config = fs.readJsonSync(path); + return config; + } catch (e: unknown) { + throw Error( + `Could not find config file at path: ${path}. ${(e as Error).message}` + ); + } +} + +async function main() { + $.verbose = false; + await runAtGitRoot(); + const configPath = process.env.CONFIG; + const config = getConfigFrom(configPath); + if (config.type === "solidity") { + await handleSolidity(config); + } else { + throw Error(`Invalid config given`); + } +} +main(); + +async function handleSolidity(config: SolidityConfig) { + const { basePath, splits: configBySplit } = config; + const splits = await Promise.all( + configBySplit.map( + async ({ dir, numOfSplits, slowTests: slowTestMatchers }) => { + const globPath = path.join(basePath, dir, "/**/*.test.ts"); + const rawTests = await glob(globPath); + const pathMappedTests = rawTests.map((r) => + r.replace("contracts/", "") + ); + const { filteredTests, slowTests } = sieveSlowTests( + pathMappedTests, + slowTestMatchers + ); + const testsBySplit = simpleSplit(filteredTests, slowTests, numOfSplits); + const splits: SoliditySplit[] = testsBySplit.map((tests, i) => ({ + idx: `${dir}_${i + 1}`, + id: `${dir} ${i + 1}/${numOfSplits}`, + tests: tests.join(" "), + coverageTests: + tests.length === 1 ? tests.join(",") : `{${tests.join(",")}}`, + })); + return splits; + } + ) + ); + + const serializedSplits = JSON.stringify(splits.flat()); + setOutput("splits", serializedSplits); +} + +async function runAtGitRoot() { + const gitRoot = await $`git rev-parse --show-toplevel`; + cd(gitRoot.stdout.trimEnd()); +} diff --git a/.github/actions/split-tests/src/sieve.mts b/.github/actions/split-tests/src/sieve.mts new file mode 100644 index 00000000..93573669 --- /dev/null +++ b/.github/actions/split-tests/src/sieve.mts @@ -0,0 +1,27 @@ +import {Tests} from "./types.mjs"; + +export function sieveSlowTests(tests: Tests, slowTestMatchers?: string[]) { + const slowTests: Tests = []; + const filteredTests: Tests = []; + + if (!slowTestMatchers) { + return {slowTests, filteredTests: tests}; + } + + // If the user supplies slow test matchers + // then we go through each test to see if we get a case sensitive match + + tests.forEach((t) => { + const isSlow = slowTestMatchers.reduce( + (isSlow, matcher) => t.includes(matcher) || isSlow, + false + ); + if (isSlow) { + slowTests.push(t); + } else { + filteredTests.push(t); + } + }); + + return {slowTests, filteredTests}; +} diff --git a/.github/actions/split-tests/src/splitter.mts b/.github/actions/split-tests/src/splitter.mts new file mode 100644 index 00000000..f924df55 --- /dev/null +++ b/.github/actions/split-tests/src/splitter.mts @@ -0,0 +1,43 @@ +import {Tests, TestsBySplit} from "./types.mjs"; + +/** + * Split tests by first prioritizing slow tests being spread over each split, then filling each split by test list order. + * + * @example + * Given the following arguments: + * tests: ['foo.test', 'bar.test', 'baz.test', 'yup.test', 'nope.test'] + * slowTests: ['bonk.test', 'bop.test', 'ouch.test.ts'] + * numOfSplits: 2 + * + * We get the following output: + * 1. Spread slow tests across splits: [['bonk.test', 'ouch.test.ts'], ['bop.test']] + * 2. Insert list of tests: [['bonk.test', 'ouch.test.ts', 'foo.test', 'bar.test'], ['bop.test', 'baz.test', 'yup.test', 'nope.test']] + * + * @param tests A list of tests to distribute across splits by the test list order + * @param slowTests A list of slow tests, where the list of tests is evenly distributed across all splits before inserting regular tests + * @param numOfSplits The number of splits to spread tests across + */ +export function simpleSplit( + tests: Tests, + slowTests: Tests, + numOfSplits: number +): TestsBySplit { + const maxTestsPerSplit = Math.max(tests.length / numOfSplits); + + const testsBySplit: TestsBySplit = new Array(numOfSplits) + .fill(null) + .map(() => []); + + // Evenly distribute slow tests over each split + slowTests.forEach((test, i) => { + const splitIndex = i % numOfSplits; + testsBySplit[splitIndex].push(test); + }); + + tests.forEach((test, i) => { + const splitIndex = Math.floor(i / maxTestsPerSplit); + testsBySplit[splitIndex].push(test); + }); + + return testsBySplit; +} diff --git a/.github/actions/split-tests/src/types.mts b/.github/actions/split-tests/src/types.mts new file mode 100644 index 00000000..3eae2f0e --- /dev/null +++ b/.github/actions/split-tests/src/types.mts @@ -0,0 +1,75 @@ +/** + * An array of all tests + */ +export type Tests = string[]; + +/** + * An array of tests, indexed by split + */ +export type TestsBySplit = string[][]; + +export interface Split { + /** + * The split index + * @example "4" + */ + idx: string; + + /** + * The split index in the context of all splits + * @example "4/10" + */ + id: string; +} + +export interface SoliditySplit extends Split { + /** + * A string that contains a whitespace delimited list of tests to run + * + * This format is to support the `hardhat test` command. + * @example test/foo.test.ts test/bar.test.ts + */ + tests: string; + + /** + * A string that contains a glob that expresses the list of tests to run. + * + * This format is used to conform to the --testfiles flag of solidity-coverage + * @example {test/foo.test.ts,test/bar.test.ts} + */ + coverageTests: string; +} + +/** + * Configuration file for solidity tests + */ +export interface SolidityConfig { + type: "solidity"; + /** + * The path to the contracts tests directory, relative to the git root + */ + basePath: string; + splits: { + /** + * The number of sub-splits to run across + */ + numOfSplits: number; + /** + * The directory of the tests to create sub-splits across, relative to the basePath + */ + dir: string; + /** + * An array of known slow tests, to better distribute across sub-splits + * + * Each string is a case-sensitive matcher that will match against any substring within the list of test file paths within the `dir` configuration. + * + * @example + * Given the dir `v0.8`, we get the following tests: ['v0.8/Foo1.test.ts','v0.8/bar.test.ts','v0.8/dev/eolpe/Foo.test.ts'] + * + * If we supply the following `slowTests` argument: ['Foo'] + * + * Then it'll match against both 'v0.8/Foo1.test.ts' and 'v0.8/dev/eolpe/Foo.test.ts'. + */ + slowTests?: string[]; + }[]; +} diff --git a/.github/actions/split-tests/test/__snapshots__/sieve.test.ts.snap b/.github/actions/split-tests/test/__snapshots__/sieve.test.ts.snap new file mode 100644 index 00000000..d55bc175 --- /dev/null +++ b/.github/actions/split-tests/test/__snapshots__/sieve.test.ts.snap @@ -0,0 +1,99 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`sieveSlowTests works 1`] = ` +{ + "filteredTests": [], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 2`] = ` +{ + "filteredTests": [], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 3`] = ` +{ + "filteredTests": [ + "keepme", + ], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 4`] = ` +{ + "filteredTests": [ + "keepme", + ], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 5`] = ` +{ + "filteredTests": [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + "bonk.test", + "bop.test", + "ouch.test.ts", + ], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 6`] = ` +{ + "filteredTests": [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + "bonk.test", + "bop.test", + "ouch.test.ts", + ], + "slowTests": [], +} +`; + +exports[`sieveSlowTests works 7`] = ` +{ + "filteredTests": [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + "bonk.test", + "bop.test", + ], + "slowTests": [ + "ouch.test.ts", + ], +} +`; + +exports[`sieveSlowTests works 8`] = ` +{ + "filteredTests": [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + ], + "slowTests": [ + "bonk.test", + "bop.test", + "ouch.test.ts", + ], +} +`; diff --git a/.github/actions/split-tests/test/__snapshots__/splitter.test.ts.snap b/.github/actions/split-tests/test/__snapshots__/splitter.test.ts.snap new file mode 100644 index 00000000..70dfe70e --- /dev/null +++ b/.github/actions/split-tests/test/__snapshots__/splitter.test.ts.snap @@ -0,0 +1,119 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`simpleSplit doesn't error on empty arrays 1`] = ` +[ + [], +] +`; + +exports[`simpleSplit doesn't error on empty arrays 2`] = ` +[ + [], + [], + [], + [], + [], +] +`; + +exports[`simpleSplit handles no slow test splitting 1`] = ` +[ + [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + "bonk.test", + "bop.test", + "ouch.test.ts", + ], +] +`; + +exports[`simpleSplit handles no slow test splitting 2`] = ` +[ + [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + ], + [ + "nope.test", + "bonk.test", + "bop.test", + "ouch.test.ts", + ], +] +`; + +exports[`simpleSplit handles no slow test splitting 3`] = ` +[ + [ + "foo.test", + "bar.test", + "baz.test", + ], + [ + "yup.test", + "nope.test", + "bonk.test", + ], + [ + "bop.test", + "ouch.test.ts", + ], +] +`; + +exports[`simpleSplit handles slow test splitting 1`] = ` +[ + [ + "bonk.test", + "bop.test", + "ouch.test.ts", + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + ], +] +`; + +exports[`simpleSplit handles slow test splitting 2`] = ` +[ + [ + "bonk.test", + "ouch.test.ts", + "foo.test", + "bar.test", + "baz.test", + ], + [ + "bop.test", + "yup.test", + "nope.test", + ], +] +`; + +exports[`simpleSplit handles slow test splitting 3`] = ` +[ + [ + "bonk.test", + "foo.test", + "bar.test", + ], + [ + "bop.test", + "baz.test", + "yup.test", + ], + [ + "ouch.test.ts", + "nope.test", + ], +] +`; diff --git a/.github/actions/split-tests/test/fixtures.mts b/.github/actions/split-tests/test/fixtures.mts new file mode 100644 index 00000000..aa87ba4c --- /dev/null +++ b/.github/actions/split-tests/test/fixtures.mts @@ -0,0 +1,20 @@ +export const testArr: string[] = [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", + "bonk.test", + "bop.test", + "ouch.test.ts", +]; + +export const testSievedArr: string[] = [ + "foo.test", + "bar.test", + "baz.test", + "yup.test", + "nope.test", +]; + +export const testSlowArr: string[] = ["bonk.test", "bop.test", "ouch.test.ts"]; diff --git a/.github/actions/split-tests/test/sieve.test.ts b/.github/actions/split-tests/test/sieve.test.ts new file mode 100644 index 00000000..bc296a43 --- /dev/null +++ b/.github/actions/split-tests/test/sieve.test.ts @@ -0,0 +1,15 @@ +import {sieveSlowTests} from "../src/sieve.mjs"; +import {testArr} from "./fixtures.mjs"; + +describe("sieveSlowTests", () => { + it("works", () => { + expect(sieveSlowTests([])).toMatchSnapshot(); + expect(sieveSlowTests([], [])).toMatchSnapshot(); + expect(sieveSlowTests(["keepme"], [])).toMatchSnapshot(); + expect(sieveSlowTests(["keepme"])).toMatchSnapshot(); + expect(sieveSlowTests(testArr, [])).toMatchSnapshot(); + expect(sieveSlowTests(testArr, ["noself"])).toMatchSnapshot(); + expect(sieveSlowTests(testArr, ["ouch.test.ts"])).toMatchSnapshot(); + expect(sieveSlowTests(testArr, ["bo", "ouch.test.ts"])).toMatchSnapshot(); + }); +}); diff --git a/.github/actions/split-tests/test/splitter.test.ts b/.github/actions/split-tests/test/splitter.test.ts new file mode 100644 index 00000000..85ae7726 --- /dev/null +++ b/.github/actions/split-tests/test/splitter.test.ts @@ -0,0 +1,21 @@ +import {simpleSplit} from "../src/splitter.mjs"; +import {testArr, testSievedArr, testSlowArr} from "./fixtures.mjs"; + +describe("simpleSplit", () => { + it("doesn't error on empty arrays", () => { + expect(simpleSplit([], [], 1)).toMatchSnapshot(); + expect(simpleSplit([], [], 5)).toMatchSnapshot(); + }); + + it("handles no slow test splitting", () => { + expect(simpleSplit(testArr, [], 1)).toMatchSnapshot(); + expect(simpleSplit(testArr, [], 2)).toMatchSnapshot(); + expect(simpleSplit(testArr, [], 3)).toMatchSnapshot(); + }); + + it("handles slow test splitting", () => { + expect(simpleSplit(testSievedArr, testSlowArr, 1)).toMatchSnapshot(); + expect(simpleSplit(testSievedArr, testSlowArr, 2)).toMatchSnapshot(); + expect(simpleSplit(testSievedArr, testSlowArr, 3)).toMatchSnapshot(); + }); +}); diff --git a/.github/actions/split-tests/tsconfig.json b/.github/actions/split-tests/tsconfig.json new file mode 100644 index 00000000..4b36d4a1 --- /dev/null +++ b/.github/actions/split-tests/tsconfig.json @@ -0,0 +1,104 @@ +{ + "compilerOptions": { + /* Visit https://aka.ms/tsconfig to read more about this file */ + + /* Projects */ + // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ + // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ + // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ + // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ + // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ + // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ + + /* Language and Environment */ + "target": "ESNext" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, + // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ + // "jsx": "preserve", /* Specify what JSX code is generated. */ + // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ + // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ + // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ + // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ + // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ + // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ + // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ + // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ + // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ + + /* Modules */ + "module": "NodeNext" /* Specify what module code is generated. */, + // "rootDir": "./", /* Specify the root folder within your source files. */ + "moduleResolution": "NodeNext" /* Specify how TypeScript looks up a file from a given module specifier. */, + // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ + // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ + // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ + // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ + // "types": [], /* Specify type package names to be included without being referenced in a source file. */ + // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ + // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ + // "resolveJsonModule": true, /* Enable importing .json files. */ + // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ + + /* JavaScript Support */ + // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ + // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ + // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ + + /* Emit */ + // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ + // "declarationMap": true, /* Create sourcemaps for d.ts files. */ + // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ + // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ + // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ + // "outDir": "./", /* Specify an output folder for all emitted files. */ + // "removeComments": true, /* Disable emitting comments. */ + "noEmit": true /* Disable emitting files from a compilation. */, + // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ + // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */ + // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ + // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ + // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ + // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ + // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ + // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ + // "newLine": "crlf", /* Set the newline character for emitting files. */ + // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ + // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ + // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ + // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ + // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ + // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ + + /* Interop Constraints */ + // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ + // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ + "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */, + // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ + "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, + + /* Type Checking */ + "strict": true /* Enable all strict type-checking options. */, + // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ + // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ + // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ + // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ + // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ + // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ + // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ + // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ + // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ + // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ + // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ + // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ + // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ + // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ + // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ + // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ + // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ + // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ + + /* Completeness */ + // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ + "skipLibCheck": false /* Skip type checking all .d.ts files. */ + }, + "include": ["src", "test"] +} diff --git a/.github/actions/version-file-bump/action.yml b/.github/actions/version-file-bump/action.yml new file mode 100644 index 00000000..f5bfa2dd --- /dev/null +++ b/.github/actions/version-file-bump/action.yml @@ -0,0 +1,51 @@ +name: version-file-bump +description: "Ensure that the VERSION file has been bumped since the last release." +inputs: + github-token: + description: "Github access token" + default: ${{ github.token }} + required: true +outputs: + result: + value: ${{ steps.compare.outputs.result }} + description: 'Result of the comparison' +runs: + using: composite + steps: + - name: Get latest release version + id: get-latest-version + shell: bash + run: | + untrimmed_ver=$( + curl --header "Authorization: token ${{ inputs.github-token }}" \ + --request GET \ + "https://api.github.com/repos/${{ github.repository }}/releases/latest?draft=false&prerelease=false" \ + | jq -r .name + ) + latest_version="${untrimmed_ver:1}" + echo "latest_version=${latest_version}" | tee -a "$GITHUB_OUTPUT" + - name: Get current version + id: get-current-version + shell: bash + run: | + current_version=$(head -n1 ./VERSION) + echo "current_version=${current_version}" | tee -a "$GITHUB_OUTPUT" + - name: Compare semantic versions + uses: goplugin/plugin-github-actions/semver-compare@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + id: compare + with: + version1: ${{ steps.get-current-version.outputs.current_version }} + operator: eq + version2: ${{ steps.get-latest-version.outputs.latest_version }} + - name: Fail if version not bumped + # XXX: The reason we are not checking if the current is greater than the + # latest release is to account for hot fixes which may have been branched + # from a previous tag. + shell: bash + env: + VERSION_NOT_BUMPED: ${{ steps.compare.outputs.result }} + run: | + if [[ "${VERSION_NOT_BUMPED:-}" = "true" ]]; then + echo "Version file not bumped since last release. Please bump the ./VERSION file in the root of the repo and commit the change." + exit 1 + fi diff --git a/.github/cr.yaml b/.github/cr.yaml new file mode 100644 index 00000000..b526aa96 --- /dev/null +++ b/.github/cr.yaml @@ -0,0 +1,2 @@ +pages_branch: helm-release +packages_with_index: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..cdb1d4fe --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +version: 2 +updates: + - package-ecosystem: gomod + directory: '/' + schedule: + interval: monthly + open-pull-requests-limit: 10 + ignore: + # Old versions are pinned for libocr. + - dependency-name: github.com/libp2p/go-libp2p-core + - dependency-name: github.com/libp2p/go-libp2p-peerstore + - dependency-name: github.com/multiformats/go-multiaddr + - package-ecosystem: npm + directory: '/' + schedule: + interval: monthly + open-pull-requests-limit: 0 + ignore: + - dependency-name: webpack + versions: + - 5.19.0 + - 5.24.2 + - dependency-name: lodash + versions: + - 4.17.21 + - package-ecosystem: github-actions + directory: '/' + schedule: + interval: daily + open-pull-requests-limit: 10 diff --git a/.github/scripts/functions.sh b/.github/scripts/functions.sh new file mode 100644 index 00000000..53b53392 --- /dev/null +++ b/.github/scripts/functions.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Function to convert a comma-separated list into a TOML array format. +# Usage: convert_to_toml_array "elem1,elem2,elem3" +# Effect: "a,b,c" -> ["a","b","c"] +function convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" +} \ No newline at end of file diff --git a/.github/tracing/README.md b/.github/tracing/README.md new file mode 100644 index 00000000..3a41e6b0 --- /dev/null +++ b/.github/tracing/README.md @@ -0,0 +1,112 @@ +# Distributed Tracing + +As part of the LOOP plugin effort, we've added distributed tracing to the core node. This is helpful for initial development and maintenance of LOOPs, but will also empower product teams building on top of core. + +## Dev environment + +One way to generate traces locally today is with the OCR2 basic smoke test. + +1. navigate to `.github/tracing/` and then run `docker compose --file local-smoke-docker-compose.yaml up` +2. setup a local docker registry at `127.0.0.1:5000` (https://www.docker.com/blog/how-to-use-your-own-registry-2/) +3. run `make build_push_plugin_docker_image` in `plugin/integration-tests/Makefile` +4. preapre your `overrides.toml` file with selected network and CL image name and version and place it anywhere +inside `integration-tests` directory. Sample `overrides.toml` file: +```toml +[PluginImage] +image="127.0.0.1:5000/plugin" +version="develop" + +[Network] +selected_networks=["simulated"] +``` +5. run `go test -run TestOCRv2Basic ./smoke/ocr2_test.go` +6. navigate to `localhost:3000/explore` in a web browser to query for traces + +Core and the median plugins are instrumented with open telemetry traces, which are sent to the OTEL collector and forwarded to the Tempo backend. The grafana UI can then read the trace data from the Tempo backend. + + + +## CI environment + +Another way to generate traces is by enabling traces for PRs. This will instrument traces for `TestOCRv2Basic` in the CI run. + +1. Cut a PR in the core repo +2. Add the `enable tracing` label to the PR +3. Navigate to `Integration Tests / ETH Smoke Tests ocr2-plugins (pull_request)` details +4. Navigate to the summary of the integration tests +5. After the test completes, the generated trace data will be saved as an artifact, currently called `trace-data` +6. Download the artifact to this directory (`plugin/.github/tracing`) +7. `docker compose --file local-smoke-docker-compose.yaml up` +8. Run `sh replay.sh` to replay those traces to the otel-collector container that was spun up in the last step. +9. navigate to `localhost:3000/explore` in a web browser to query for traces + +The artifact is not json encoded - each individual line is a well formed and complete json object. + + +## Production and NOPs environments + +In a production environment, we suggest coupling the lifecycle of nodes and otel-collectors. A best practice is to deploy the otel-collector alongside your node, using infrastructure as code (IAC) to automate deployments and certificate lifecycles. While there are valid use cases for using `Tracing.Mode = unencrypted`, we have set the default encryption setting to `Tracing.Mode = tls`. Externally deployed otel-collectors can not be used with `Tracing.Mode = unencrypted`. i.e. If `Tracing.Mode = unencrypted` and an external URI is detected for `Tracing.CollectorTarget` node configuration will fail to validate and the node will not boot. The node requires a valid encryption mode and collector target to send traces. + +Once traces reach the otel-collector, the rest of the observability pipeline is flexible. We recommend deploying (through automation) centrally managed Grafana Tempo and Grafana UI instances to receive from one or many otel-collector instances. Always use networking best practices and encrypt trace data, especially at network boundaries. + +## Configuration +This folder contains the following config files: +* otel-collector-ci.yaml +* otel-collector-dev.yaml +* tempo.yaml +* grafana-datasources.yaml + +These config files are for an OTEL collector, grafana Tempo, and a grafana UI instance to run as containers on the same network. +`otel-collector-dev.yaml` is the configuration for dev (i.e. your local machine) environments, and forwards traces from the otel collector to the grafana tempo instance on the same network. +`otel-collector-ci.yaml` is the configuration for the CI runs, and exports the trace data to the artifact from the github run. + +## Adding Traces to Plugins and to core + +Adding traces requires identifying an observability gap in a related group of code executions or a critical path in your application. This is intuitive for the developer: + +- "What's the flow of component interaction in this distributed system?" +- "What's the behavior of the JobProcessorOne component when jobs with [x, y, z] attributes are processed?" +- "Is this critical path workflow behaving the way we expect?" + +The developer will measure a flow of execution from end to end in one trace. Each logically separate measure of this flow is called a span. Spans have either one or no parent span and multiple children span. The relationship between parent and child spans in agreggate will form a directed acyclic graph. The trace begins at the root of this graph. + +The most trivial application of a span is measuring top level performance in one critical path. There is much more you can do, including creating human readable and timestamped events within a span (useful for monitoring concurrent access to resources), recording errors, linking parent and children spans through large parts of an application, and even extending a span beyond a single process. + +Spans are created by `tracers` and passed through go applications by `Context`s. A tracer must be initialized first. Both core and plugin developers will initialize a tracer from the globally registered trace provider: + +``` +tracer := otel.GetTracerProvider().Tracer("example.com/foo") +``` + +The globally registered tracer provider is available for plugins after they are initialized, and available in core after configuration is processed (`initGlobals`). + +Add spans by: +``` + func interestingFunc() { + // Assuming there is an appropriate parentContext + ctx, span := tracer.Start(parentContext, "hello-span") + defer span.End() + + // do some work to track with hello-span + } +``` +As implied by the example, `span` is a child of its parent span captured by `parentContext`. + + +Note that in certain situations, there are 3rd party libraries that will setup spans. For instance: + +``` +import ( + "github.com/gin-gonic/gin" + "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" +) + +router := gin.Default() +router.Use(otelgin.Middleware("service-name")) +``` + +The developer aligns with best practices when they: +- Start with critical paths +- Measure paths from end to end (Context is wired all the way through) +- Emphasize broadness of measurement over depth +- Use automatic instrumentation if possible \ No newline at end of file diff --git a/.github/tracing/grafana-datasources.yaml b/.github/tracing/grafana-datasources.yaml new file mode 100644 index 00000000..098b06ec --- /dev/null +++ b/.github/tracing/grafana-datasources.yaml @@ -0,0 +1,18 @@ +apiVersion: 1 + +datasources: +- name: Tempo + type: tempo + access: proxy + orgId: 1 + url: http://tempo:3200 + basicAuth: false + isDefault: true + version: 1 + editable: false + apiVersion: 1 + uid: tempo + jsonData: + httpMethod: GET + serviceMap: + datasourceUid: prometheus \ No newline at end of file diff --git a/.github/tracing/local-smoke-docker-compose.yaml b/.github/tracing/local-smoke-docker-compose.yaml new file mode 100644 index 00000000..744ba88e --- /dev/null +++ b/.github/tracing/local-smoke-docker-compose.yaml @@ -0,0 +1,48 @@ +version: "3" +services: + + # ... the OpenTelemetry Collector configured to receive traces and export to Tempo ... + otel-collector: + image: otel/opentelemetry-collector:0.61.0 + command: [ "--config=/etc/otel-collector.yaml" ] + volumes: + - ./otel-collector-dev.yaml:/etc/otel-collector.yaml + - ../../integration-tests/smoke/traces/trace-data.json:/etc/trace-data.json # local trace data stored consistent with smoke/logs + ports: + - "4317:4317" # otlp grpc + - "3100:3100" + depends_on: + - tempo + networks: + - tracing-network + + # .. Which accepts requests from grafana ... + tempo: + image: grafana/tempo:latest + command: [ "-config.file=/etc/tempo.yaml" ] + volumes: + - ./tempo.yaml:/etc/tempo.yaml + - ./tempo-data:/tmp/tempo + ports: + - "4317" # otlp grpc + networks: + - tracing-network + + grafana: + image: grafana/grafana:9.4.3 + volumes: + - ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_AUTH_DISABLE_LOGIN_FORM=true + - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor + ports: + - "3000:3000" + networks: + - tracing-network + +networks: + tracing-network: + name: tracing + driver: bridge \ No newline at end of file diff --git a/.github/tracing/otel-collector-ci.yaml b/.github/tracing/otel-collector-ci.yaml new file mode 100644 index 00000000..0bf123d2 --- /dev/null +++ b/.github/tracing/otel-collector-ci.yaml @@ -0,0 +1,22 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: "0.0.0.0:4317" + http: + endpoint: "0.0.0.0:3100" +exporters: + file: + path: /tracing/trace-data.json + otlp: + endpoint: tempo:4317 + tls: + insecure: true +service: + telemetry: + logs: + level: "debug" # Set log level to debug + pipelines: + traces: + receivers: [otlp] + exporters: [file,otlp] \ No newline at end of file diff --git a/.github/tracing/otel-collector-dev.yaml b/.github/tracing/otel-collector-dev.yaml new file mode 100644 index 00000000..dd059127 --- /dev/null +++ b/.github/tracing/otel-collector-dev.yaml @@ -0,0 +1,20 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: "0.0.0.0:4317" + http: + endpoint: "0.0.0.0:3100" +exporters: + otlp: + endpoint: tempo:4317 + tls: + insecure: true +service: + telemetry: + logs: + level: "debug" # Set log level to debug + pipelines: + traces: + receivers: [otlp] + exporters: [otlp] \ No newline at end of file diff --git a/.github/tracing/replay.sh b/.github/tracing/replay.sh new file mode 100644 index 00000000..b2e56456 --- /dev/null +++ b/.github/tracing/replay.sh @@ -0,0 +1,6 @@ +# Read JSON file and loop through each trace +while IFS= read -r trace; do + curl -X POST http://localhost:3100/v1/traces \ + -H "Content-Type: application/json" \ + -d "$trace" +done < "trace-data" diff --git a/.github/tracing/tempo.yaml b/.github/tracing/tempo.yaml new file mode 100644 index 00000000..e61f744f --- /dev/null +++ b/.github/tracing/tempo.yaml @@ -0,0 +1,24 @@ +server: + http_listen_port: 3200 + +distributor: + receivers: + otlp: + protocols: + http: + grpc: + +ingester: + max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally + +compactor: + compaction: + block_retention: 1h # overall Tempo trace retention. set for demo purposes + +storage: + trace: + backend: local # backend configuration to use + wal: + path: /tmp/tempo/wal # where to store the the wal locally + local: + path: /tmp/tempo/blocks \ No newline at end of file diff --git a/.github/workflows/auto-update.yml b/.github/workflows/auto-update.yml new file mode 100644 index 00000000..963145c4 --- /dev/null +++ b/.github/workflows/auto-update.yml @@ -0,0 +1,17 @@ +name: Auto Update +on: + push: + branches: + - develop +jobs: + autoupdate: + name: Auto Update + runs-on: ubuntu-latest + steps: + - uses: docker://chinthakagodawita/autoupdate-action:v1 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + PR_FILTER: "labelled" + PR_LABELS: "auto-update" + MERGE_MSG: "Branch was auto-updated." + MERGE_CONFLICT_ACTION: "ignore" diff --git a/.github/workflows/automation-benchmark-tests.yml b/.github/workflows/automation-benchmark-tests.yml new file mode 100644 index 00000000..c07d7a74 --- /dev/null +++ b/.github/workflows/automation-benchmark-tests.yml @@ -0,0 +1,98 @@ +name: Automation Benchmark Test +on: + workflow_dispatch: + inputs: + testType: + description: Type of test to run (benchmark, soak) + required: true + default: benchmark + type: string + base64Config: + description: base64-ed config + required: true + type: string + slackMemberID: + description: Notifies test results (Not your @) + required: true + default: U02Q14G80TY + type: string +jobs: + automation_benchmark: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Automation Benchmark Test + runs-on: ubuntu20.04-16cores-64GB + env: + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: C03KJ5S7KEK + PLUGIN_ENV_USER: ${{ github.actor }} + REF_NAME: ${{ github.head_ref || github.ref_name }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ env.REF_NAME }} + - name: Get Slack config and mask base64 config + run: | + SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) + echo ::add-mask::$SLACK_USER + echo SLACK_USER=$SLACK_USER >> $GITHUB_ENV + + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Parse base64 config + uses: ./.github/actions/setup-parse-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + suites: benchmark load/automationv2_1 chaos reorg + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + DETACH_RUNNER: true + TEST_SUITE: benchmark + TEST_ARGS: -test.timeout 720h + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + TEST_TYPE: ${{ github.event.inputs.testType }} + TEST_TEST_TYPE: ${{ github.event.inputs.testType }} + with: + test_command_to_run: cd integration-tests && go test -timeout 30m -v -run ^TestAutomationBenchmark$ ./benchmark -count=1 + test_download_vendor_packages_command: make gomod + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + should_cleanup: false + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Automation Benchmark Test + continue-on-error: true diff --git a/.github/workflows/automation-load-tests.yml b/.github/workflows/automation-load-tests.yml new file mode 100644 index 00000000..1f4b37c6 --- /dev/null +++ b/.github/workflows/automation-load-tests.yml @@ -0,0 +1,116 @@ +name: Automation Load Test +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + slackMemberID: + description: Notifies test results (Not your @) + required: true + default: U02Q14G80TY + type: string + +jobs: + automation_load: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Automation Load Test + runs-on: ubuntu20.04-16cores-64GB + env: + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: C03KJ5S7KEK + PLUGIN_ENV_USER: ${{ github.actor }} + REF_NAME: ${{ github.head_ref || github.ref_name }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ env.REF_NAME }} + - name: Get Slack config and mask base64 config + run: | + SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) + echo ::add-mask::$SLACK_USER + echo SLACK_USER=$SLACK_USER >> $GITHUB_ENV + + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Merge Pyrsoscope config + env: + PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }} + PYROSCOPE_ENVIRONMENT: "automation-load-test" + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + run: | + decoded_toml=$(echo $BASE64_CONFIG_OVERRIDE | base64 -d) + + # use Pyroscope config from GH secrets and merge it with base64 input + cat << EOF > config.toml + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + EOF + + echo "$decoded_toml" >> final_config.toml + cat config.toml >> final_config.toml + BASE64_CONFIG_OVERRIDE=$(cat final_config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Parse base64 config + uses: ./.github/actions/setup-parse-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + suites: benchmark load/automationv2_1 chaos reorg + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + RR_CPU: 4000m + RR_MEM: 4Gi + DETACH_RUNNER: true + TEST_SUITE: automationv2_1 + TEST_ARGS: -test.timeout 720h + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }} + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + with: + test_command_to_run: cd integration-tests && go test -timeout 1h -v -run TestLogTrigger ./load/automationv2_1 -count=1 + test_download_vendor_packages_command: make gomod + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + should_cleanup: false + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Automation Load Test + continue-on-error: true diff --git a/.github/workflows/automation-nightly-tests.yml b/.github/workflows/automation-nightly-tests.yml new file mode 100644 index 00000000..1af9e388 --- /dev/null +++ b/.github/workflows/automation-nightly-tests.yml @@ -0,0 +1,263 @@ +name: Automation Nightly Tests +on: + schedule: + - cron: "0 0 * * *" # Run nightly + push: + tags: + - "*" + workflow_dispatch: + +env: + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + +jobs: + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + name: Build Plugin Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Plugin Image + uses: ./.github/actions/build-plugin-image + with: + tag_suffix: "" + dockerfile: core/plugin.Dockerfile + git_commit_sha: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + automation-upgrade-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin] + env: + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + SELECTED_NETWORKS: "SIMULATED" + strategy: + fail-fast: false + matrix: + tests: + - name: Upgrade + suite: smoke + nodes: 6 + os: ubuntu20.04-8cores-32GB + network: SIMULATED + command: -run ^TestAutomationNodeUpgrade$ ./smoke + runs-on: ${{ matrix.tests.os }} + name: Automation ${{ matrix.tests.name }} Test + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.head_ref || github.ref_name }} + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-upgrade-config + with: + selectedNetworks: ${{ env.SELECTED_NETWORKS }} + pluginImage: "public.ecr.aws/plugin/plugin" + pluginVersion: "latest" + upgradeImage: ${{ env.PLUGIN_IMAGE }} + upgradeVersion: ${{ github.sha }} + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + TEST_SUITE: ${{ matrix.tests.suite }} + with: + test_command_to_run: cd ./integration-tests && go test -timeout 60m -count=1 -json -test.parallel=${{ matrix.tests.nodes }} ${{ matrix.tests.command }} 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: 'public.ecr.aws/plugin/plugin' + cl_image_tag: 'latest' + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_location: ./integration-tests/${{ matrix.tests.suite }}/logs + publish_check_name: Automation Results ${{ matrix.tests.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Upload test log + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + if: failure() + with: + name: test-log-${{ matrix.tests.name }} + path: /tmp/gotest.log + retention-days: 7 + continue-on-error: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Automation ${{ matrix.tests.name }} Test + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + + test-notify: + name: Start Slack Thread + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + outputs: + thread_ts: ${{ steps.slack.outputs.thread_ts }} + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [ automation-upgrade-tests ] + steps: + - name: Debug Result + run: echo ${{ join(needs.*.result, ',') }} + - name: Main Slack Notification + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + id: slack + with: + channel-id: C03KJ5S7KEK + payload: | + { + "attachments": [ + { + "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "Automation Nightly Tests ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" + } + } + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + + test-results: + name: Post Test Results for ${{ matrix.name }} + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: test-notify + strategy: + fail-fast: false + matrix: + name: [ Upgrade ] + steps: + - name: Get Results + id: test-results + run: | + # I feel like there's some clever, fully jq way to do this, but I ain't got the motivation to figure it out + echo "Querying test results" + + PARSED_RESULTS=$(curl \ + -H "Authorization: Bearer ${{ github.token }}" \ + 'https://api.github.com/repos/${{github.repository}}/actions/runs/${{ github.run_id }}/jobs' \ + | jq -r --arg pattern "${{ matrix.name }} Test" '.jobs[] + | select(.name | test($pattern)) as $job + | $job.steps[] + | select(.name == "Run Tests") + | { conclusion: (if .conclusion == "success" then ":white_check_mark:" else ":x:" end), product: ("*" + ($job.name | capture($pattern).product) + "*") }') + + echo "Parsed Results:" + echo $PARSED_RESULTS + + ALL_SUCCESS=true + for row in $(echo "$PARSED_RESULTS" | jq -s | jq -r '.[] | select(.conclusion != ":white_check_mark:")'); do + success=false + break + done + + echo all_success=$ALL_SUCCESS >> $GITHUB_OUTPUT + + FORMATTED_RESULTS=$(echo $PARSED_RESULTS | jq -s '[.[] + | { + conclusion: .conclusion, + product: .product + } + ] + | map("{\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": \"\(.product): \(.conclusion)\"}}") + | join(",")') + + echo "Formatted Results:" + echo $FORMATTED_RESULTS + + # Cleans out backslashes and quotes from jq + CLEAN_RESULTS=$(echo "$FORMATTED_RESULTS" | sed 's/\\\"/"/g' | sed 's/^"//;s/"$//') + + echo "Clean Results" + echo $CLEAN_RESULTS + + echo results=$CLEAN_RESULTS >> $GITHUB_OUTPUT + + - name: Test Details + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + with: + channel-id: C03KJ5S7KEK + payload: | + { + "thread_ts": "${{ needs.test-notify.outputs.thread_ts }}", + "attachments": [ + { + "color": "${{ steps.test-results.outputs.all_success && '#2E7D32' || '#C62828' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "${{ matrix.name }} ${{ steps.test-results.outputs.all_success && ':white_check_mark:' || ':x: Notifying <@U02Q14G80TY>'}}", + "emoji": true + } + }, + { + "type": "divider" + }, + ${{ steps.test-results.outputs.results }} + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} \ No newline at end of file diff --git a/.github/workflows/automation-ondemand-tests.yml b/.github/workflows/automation-ondemand-tests.yml new file mode 100644 index 00000000..8c45412b --- /dev/null +++ b/.github/workflows/automation-ondemand-tests.yml @@ -0,0 +1,262 @@ +name: Automation On Demand Tests +on: + workflow_dispatch: + inputs: + pluginVersion: + description: Plugin image version to use + required: false + type: string + pluginImage: + description: Plugin image repo to use (Leave empty to build from head/ref) + required: false + type: string + pluginVersionUpdate: + description: Plugin image version to use initially for upgrade test + default: latest + required: true + type: string + pluginImageUpdate: + description: Plugin image repo to use initially for upgrade test + required: true + default: public.ecr.aws/plugin/plugin + type: string + +env: + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + +jobs: + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + strategy: + matrix: + image: + - name: "" + dockerfile: core/plugin.Dockerfile + tag-suffix: "" + - name: (plugins) + dockerfile: plugins/plugin.Dockerfile + tag-suffix: -plugins + name: Build Plugin Image ${{ matrix.image.name }} + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + if: inputs.pluginImage == '' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image ${{ matrix.image.name }} + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.head_ref || github.ref_name }} + - name: Check if image exists + if: inputs.pluginImage == '' + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: plugin + tag: ${{ github.sha }}${{ matrix.image.tag-suffix }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' && inputs.pluginImage == '' + uses: goplugin/plugin-github-actions/plugin-testing-framework/build-image@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + cl_repo: goplugin/pluginv3.0 + cl_ref: ${{ github.sha }} + cl_dockerfile: ${{ matrix.image.dockerfile }} + push_tag: ${{ env.PLUGIN_IMAGE }}:${{ github.sha }}${{ matrix.image.tag-suffix }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Print Plugin Image Built + if: inputs.pluginImage == '' + run: | + echo "### plugin node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + + build-test-image: + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.head_ref || github.ref_name }} + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + + automation-on-demand-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-test-image] + env: + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + tests: + - name: chaos + suite: chaos + nodes: 5 + os: ubuntu-latest + pyroscope_env: ci-automation-on-demand-chaos + network: SIMULATED + command: -run ^TestAutomationChaos$ ./chaos + - name: reorg + suite: reorg + nodes: 1 + os: ubuntu-latest + pyroscope_env: ci-automation-on-demand-reorg + network: SIMULATED_NONDEV + command: -run ^TestAutomationReorg$ ./reorg + - name: upgrade + suite: smoke + nodes: 6 + os: ubuntu20.04-8cores-32GB + pyroscope_env: ci-automation-on-demand-upgrade + network: SIMULATED + command: -run ^TestAutomationNodeUpgrade$ ./smoke + runs-on: ${{ matrix.tests.os }} + name: Automation On Demand ${{ matrix.tests.name }} Test + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.head_ref || github.ref_name }} + - name: Determine build to use + id: determine-build + shell: bash + run: | + if [[ "${{ inputs.pluginImage }}" == "" ]]; then + echo "image=${{ env.PLUGIN_IMAGE }}" >>$GITHUB_OUTPUT + echo "version=${{ github.sha }}" >>$GITHUB_OUTPUT + echo "upgrade_version=${{ github.sha }}" >>$GITHUB_OUTPUT + echo "upgrade_image=${{ env.PLUGIN_IMAGE }}" >>$GITHUB_OUTPUT + else + READ_CL_IMAGE=$(jq -r '.inputs.pluginImage' $GITHUB_EVENT_PATH) + echo ::add-mask::$READ_CL_IMAGE + echo "image=$READ_CL_IMAGE" >>$GITHUB_OUTPUT + echo "version=${{ inputs.pluginVersion }}" >>$GITHUB_OUTPUT + echo "upgrade_version=${{ inputs.pluginVersion }}" >>$GITHUB_OUTPUT + echo "upgrade_image=$READ_CL_IMAGE" >>$GITHUB_OUTPUT + fi + if [[ "${{ matrix.tests.name }}" == "upgrade" ]]; then + READ_CL_UPGR_IMAGE=$(jq -r '.inputs.pluginImageUpdate' $GITHUB_EVENT_PATH) + echo ::add-mask::$READ_CL_UPGR_IMAGE + echo "image=$READ_CL_UPGR_IMAGE" >>$GITHUB_OUTPUT + echo "version=${{ inputs.pluginVersionUpdate }}" >>$GITHUB_OUTPUT + fi + - name: Prepare Base64 TOML config + env: + SELECTED_NETWORKS: ${{ matrix.tests.network }} + OLD_IMAGE: ${{ steps.determine-build.outputs.image }} + OLD_VERSION: ${{ steps.determine-build.outputs.version }} + UPGRADE_VERSION: ${{ steps.determine-build.outputs.upgrade_version }} + UPGRADE_IMAGE: ${{ steps.determine-build.outputs.upgrade_image }} + PYROSCOPE_SERVER: ${{ matrix.tests.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + PYROSCOPE_ENVIRONMENT: ${{ matrix.tests.pyroscope_env }} + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + run: | + echo ::add-mask::$UPGRADE_IMAGE + echo ::add-mask::$OLD_IMAGE + + # load reusable functions + source ./.github/scripts/functions.sh + + selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") + + if [ -n "$PYROSCOPE_SERVER" ]; then + pyroscope_enabled=true + else + pyroscope_enabled=false + fi + + cat << EOF > config.toml + [Network] + selected_networks=$selected_networks + + [PluginImage] + image="$OLD_IMAGE" + version="$OLD_VERSION" + + [PluginUpgradeImage] + image="$UPGRADE_IMAGE" + version="$UPGRADE_VERSION" + + [Pyroscope] + enabled=$pyroscope_enabled + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + TEST_SUITE: ${{ matrix.tests.suite }} + with: + test_command_to_run: cd ./integration-tests && go test -timeout 60m -count=1 -json -test.parallel=${{ matrix.tests.nodes }} ${{ matrix.tests.command }} 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ steps.determine-build.outputs.image }} + cl_image_tag: ${{ steps.determine-build.outputs.version }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_location: ./integration-tests/${{ matrix.tests.suite }}/logs + publish_check_name: Automation On Demand Results ${{ matrix.tests.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Upload test log + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + if: failure() + with: + name: test-log-${{ matrix.tests.name }} + path: /tmp/gotest.log + retention-days: 7 + continue-on-error: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Automation On Demand ${{ matrix.tests.name }} Test + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true diff --git a/.github/workflows/bash-scripts.yml b/.github/workflows/bash-scripts.yml new file mode 100644 index 00000000..9fe2a0e6 --- /dev/null +++ b/.github/workflows/bash-scripts.yml @@ -0,0 +1,35 @@ +name: Bash Scripts + +on: + pull_request: + +jobs: + changes: + name: detect changes + runs-on: ubuntu-latest + outputs: + bash-scripts-src: ${{ steps.bash-scripts.outputs.src }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: bash-scripts + with: + filters: | + src: + - 'tools/bin/**' + - '.github/workflows/bash-scripts.yml' + shellcheck: + name: ShellCheck Lint + runs-on: ubuntu-latest + needs: [changes] + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Run ShellCheck + if: needs.changes.outputs.bash-scripts-src == 'true' + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0 + with: + scandir: "./tools/bin" + # Consider changing this to check for warnings once all warnings are fixed. + severity: error diff --git a/.github/workflows/build-publish-develop.yml b/.github/workflows/build-publish-develop.yml new file mode 100644 index 00000000..4ec7a77b --- /dev/null +++ b/.github/workflows/build-publish-develop.yml @@ -0,0 +1,67 @@ +name: "Push develop to private ECR" + +on: + push: + branches: + - develop + workflow_dispatch: + inputs: + git_ref: + description: "Git ref (commit SHA, branch name, tag name, etc.) to checkout" + required: true +env: + GIT_REF: ${{ github.event.inputs.git_ref || github.ref }} + +jobs: + push-plugin-develop: + runs-on: ubuntu-20.04 + environment: build-develop + permissions: + id-token: write + contents: read + strategy: + matrix: + image: + - name: "" + dockerfile: core/plugin.Dockerfile + tag-suffix: "" + - name: (plugins) + dockerfile: plugins/plugin.Dockerfile + tag-suffix: -plugins + name: push-plugin-develop ${{ matrix.image.name }} + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ env.GIT_REF }} + # When this is ran from manual workflow_dispatch, the github.sha may be + # different than the checked out commit sha. The core build uses this + # commit sha as build metadata, so we need to make sure it's correct. + - name: Get checked out git ref + if: github.event.inputs.git_ref + id: git-ref + run: echo "checked-out=$(git rev-parse HEAD)" | tee -a "${GITHUB_OUTPUT}" + - name: Build, sign and publish plugin image + uses: ./.github/actions/build-sign-publish-plugin + with: + publish: true + aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} + aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} + aws-region: ${{ secrets.AWS_REGION }} + ecr-hostname: ${{ secrets.AWS_DEVELOP_ECR_HOSTNAME }} + ecr-image-name: plugin + ecr-tag-suffix: ${{ matrix.image.tag-suffix }} + dockerfile: ${{ matrix.image.dockerfile }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + git-commit-sha: ${{ steps.git-ref.outputs.checked-out || github.sha }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: push-plugin-develop ${{ matrix.image.name }} + continue-on-error: true diff --git a/.github/workflows/build-publish-pr.yml b/.github/workflows/build-publish-pr.yml new file mode 100644 index 00000000..c0f84ea8 --- /dev/null +++ b/.github/workflows/build-publish-pr.yml @@ -0,0 +1,110 @@ +name: "Build and Publish from PR" + +## +# This workflow builds and publishes a Docker image for Plugin from a PR. +# It has its own special IAM role, does not sign the image, and publishes to +# a special ECR repo. +## + +on: + pull_request: + +jobs: + build-publish-untrusted: + if: ${{ ! startsWith(github.ref_name, 'release/') }} + runs-on: ubuntu-20.04 + environment: sdlc + permissions: + id-token: write + contents: read + env: + ECR_IMAGE_NAME: crib-plugin-untrusted + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Git Short SHA + shell: bash + env: + GIT_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} + run: | + echo "GIT_SHORT_SHA=${GIT_PR_HEAD_SHA:0:7}" | tee -a "$GITHUB_ENV" + + - name: Check if image exists + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: ${{ env.ECR_IMAGE_NAME}} + tag: sha-${{ env.GIT_SHORT_SHA }} + AWS_REGION: ${{ secrets.AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.AWS_OIDC_IAM_ROLE_PUBLISH_PR_ARN }} + + - name: Build and publish plugin image + if: steps.check-image.outputs.exists == 'false' + uses: ./.github/actions/build-sign-publish-plugin + with: + publish: true + aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_PUBLISH_PR_ARN }} + aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS_DEFAULT }} + aws-region: ${{ secrets.AWS_REGION }} + sign-images: false + ecr-hostname: ${{ secrets.AWS_SDLC_ECR_HOSTNAME }} + ecr-image-name: ${{ env.ECR_IMAGE_NAME }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + + - name: Get PR labels + id: pr-labels + env: + GH_TOKEN: ${{ github.token }} + PR_NUMBER: ${{ github.event.number }} + run: | + RESPONSE=$(gh pr view ${PR_NUMBER} --json labels) + # Check if the labels command was successful + if [[ $? -ne 0 ]]; then + echo "Error fetching labels" + exit 1 + fi + echo "RESPONSE=${RESPONSE}" + LABELS=$(echo "$RESPONSE" | jq -r '.labels | map(.name) | join(", ")') + # Check if any labels were found + if [[ -z "${LABELS:-}" ]]; then + echo "No labels found" + else + echo "labels=${LABELS}" | tee -a "${GITHUB_OUTPUT}" + fi + + - name: Setup GAP + if: contains(steps.pr-labels.outputs.labels, 'crib') + uses: goplugin/.github/actions/setup-gap@main + with: + aws-region: ${{ secrets.AWS_REGION }} + aws-role-arn: ${{ secrets.AWS_OIDC_IAM_ROLE_PUBLISH_PR_ARN }} + api-gateway-host: ${{ secrets.AWS_API_GW_HOST_ARGO_SAND }} + use-argocd: "true" + argocd-user: ${{ secrets.ARGOCD_USER_SAND }} + argocd-pass: ${{ secrets.ARGOCD_PASS_SAND }} + + # Run an Argo CD sync after the image is built. + - name: Argo CD App Sync + if: contains(steps.pr-labels.outputs.labels, 'crib') + shell: bash + env: + PR_NUMBER: ${{ github.event.number }} + run: | + argocd app sync \ + --plaintext \ + --grpc-web \ + --async \ + "crib-plugin-${PR_NUMBER}" + + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: build-publish-untrusted + continue-on-error: true diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml new file mode 100644 index 00000000..a858b48b --- /dev/null +++ b/.github/workflows/build-publish.yml @@ -0,0 +1,62 @@ +name: "Build Plugin and Publish" + +on: + # Mimics old circleci behaviour + push: + tags: + - "v*" + branches: + - master + - "release/**" + +jobs: + checks: + name: "Checks" + runs-on: ubuntu-20.04 + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Check for VERSION file bump on tags + # Avoids checking VERSION file bump on forks. + if: ${{ github.repository == 'goplugin/pluginv3.0' && startsWith(github.ref, 'refs/tags/v') }} + uses: ./.github/actions/version-file-bump + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + + build-sign-publish-plugin: + needs: [checks] + if: ${{ ! startsWith(github.ref_name, 'release/') }} + runs-on: ubuntu-20.04 + environment: build-publish + permissions: + id-token: write + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Build, sign and publish plugin image + uses: ./.github/actions/build-sign-publish-plugin + with: + publish: true + aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} + aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} + aws-region: ${{ secrets.AWS_REGION }} + sign-images: true + sign-method: "keypair" + cosign-private-key: ${{ secrets.COSIGN_PRIVATE_KEY }} + cosign-public-key: ${{ secrets.COSIGN_PUBLIC_KEY }} + cosign-password: ${{ secrets.COSIGN_PASSWORD }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + verify-signature: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: build-sign-publish-plugin + continue-on-error: true diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..221f3ed1 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,32 @@ +name: "Build Plugin" + +on: + pull_request: + push: + branches: + - master + +jobs: + build-plugin: + runs-on: ubuntu-20.04 + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Build plugin image + uses: ./.github/actions/build-sign-publish-plugin + with: + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + publish: false + sign-images: false + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: build-plugin + continue-on-error: true diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000..6b74cb19 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,40 @@ +# +# This action checks PRs to see if any CHANGELOG* files were updated. +# If none were, it will add a message to the PR asking if it would make sense to do so. +# +name: Changelog + +on: pull_request + +jobs: + changelog: + # For security reasons, GITHUB_TOKEN is read-only on forks, so we cannot leave comments on PRs. + # This check skips the job if it is detected we are running on a fork. + if: ${{ github.event.pull_request.head.repo.full_name == 'goplugin/pluginv3.0' }} + name: Changelog checker + runs-on: ubuntu-latest + steps: + - name: Check for changed files + id: changedfiles + uses: umani/changed-files@d7f842d11479940a6036e3aacc6d35523e6ba978 # Version 4.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + pattern: '^docs/CHANGELOG.*$' + - name: Make a comment + uses: unsplash/comment-on-pr@ffe8f97ccc63ce12c3c23c6885b169db67958d3b # Version 1.3.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: contains(steps.changedfiles.outputs.files_updated, 'CHANGELOG') != true && contains(steps.changedfiles.outputs.files_created, 'CHANGELOG') != true + with: + msg: "I see that you haven't updated any CHANGELOG files. Would it make sense to do so?" + check_for_duplicate_msg: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Changelog checker + continue-on-error: true diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml new file mode 100644 index 00000000..4117df0e --- /dev/null +++ b/.github/workflows/ci-core.yml @@ -0,0 +1,282 @@ +name: CI Core + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +# Run on key branches to make sure integration is good, otherwise run on all PR's +on: + push: + branches: + - master + - develop + - "release/*" + merge_group: + pull_request: + schedule: + - cron: "0 0 * * *" + +jobs: + golangci: + if: ${{ github.event_name == 'pull_request' || github.event_name == 'schedule' }} + name: lint + runs-on: ubuntu20.04-8cores-32GB + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Golang Lint + uses: ./.github/actions/golangci-lint + with: + gc-basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + gc-host: ${{ secrets.GRAFANA_CLOUD_HOST }} + - name: Notify Slack + if: ${{ failure() && (github.event_name == 'merge_group' || github.event.branch == 'develop')}} + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + with: + channel-id: "#team-core" + slack-message: "golangci-lint failed: ${{ job.html_url }}\n${{ format('https://github.com/goplugin/pluginv3.0/actions/runs/{0}', github.run_id) }}" + + core: + strategy: + fail-fast: false + matrix: + cmd: ["go_core_tests", "go_core_race_tests", "go_core_fuzz"] + name: Core Tests (${{ matrix.cmd }}) + runs-on: ubuntu20.04-64cores-256GB + env: + CL_DATABASE_URL: postgresql://postgres:postgres@localhost:5432/plugin_test?sslmode=disable + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup node + uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + with: + prod: "true" + - name: Setup Go + uses: ./.github/actions/setup-go + - name: Setup Solana + uses: ./.github/actions/setup-solana + - name: Setup wasmd + uses: ./.github/actions/setup-wasmd + - name: Setup Postgres + uses: ./.github/actions/setup-postgres + - name: Touching core/web/assets/index.html + run: mkdir -p core/web/assets && touch core/web/assets/index.html + - name: Download Go vendor packages + run: go mod download + - name: Build binary + run: go build -o plugin.test . + - name: Setup DB + run: ./plugin.test local db preparetest + - name: Install LOOP Plugins + run: | + pushd $(go list -m -f "{{.Dir}}" github.com/goplugin/plugin-feeds) + go install ./cmd/plugin-feeds + popd + pushd $(go list -m -f "{{.Dir}}" github.com/goplugin/plugin-data-streams) + go install ./mercury/cmd/plugin-mercury + popd + pushd $(go list -m -f "{{.Dir}}" github.com/goplugin/plugin-solana) + go install ./pkg/solana/cmd/plugin-solana + popd + pushd $(go list -m -f "{{.Dir}}" github.com/goplugin/plugin-starknet/relayer) + go install ./pkg/plugin/cmd/plugin-starknet + popd + - name: Increase Race Timeout + if: github.event.schedule != '' + run: | + echo "TIMEOUT=10m" >> $GITHUB_ENV + echo "COUNT=50" >> $GITHUB_ENV + - name: Run tests + id: run-tests + env: + OUTPUT_FILE: ./output.txt + USE_TEE: false + run: ./tools/bin/${{ matrix.cmd }} ./... + - name: Print Filtered Test Results + if: ${{ failure() && matrix.cmd == 'go_core_tests' }} + uses: goplugin/plugin-github-actions/go/go-test-results-parsing@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + results-file: ./output.txt + output-file: ./output-short.txt + - name: Print Races + if: ${{ failure() && matrix.cmd == 'go_core_race_tests' }} + run: find race.* | xargs cat + - name: Print postgres logs + if: always() + run: docker compose logs postgres | tee ../../../postgres_logs.txt + working-directory: ./.github/actions/setup-postgres + - name: Store logs artifacts + if: always() + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: ${{ matrix.cmd }}_logs + path: | + ./output.txt + ./output-short.txt + ./race.* + ./coverage.txt + ./postgres_logs.txt + - name: Notify Slack + if: ${{ failure() && matrix.cmd == 'go_core_race_tests' && (github.event_name == 'merge_group' || github.event.branch == 'develop') }} + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + with: + channel-id: "#topic-data-races" + slack-message: "Race tests failed: ${{ job.html_url }}\n${{ format('https://github.com/goplugin/pluginv3.0/actions/runs/{0}', github.run_id) }}" + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Core Tests (${{ matrix.cmd }}) + test-results-file: '{"testType":"go","filePath":"./output.txt"}' + continue-on-error: true + + detect-flakey-tests: + needs: [core] + name: Flakey Test Detection + runs-on: ubuntu-latest + if: always() + env: + CL_DATABASE_URL: postgresql://postgres:postgres@localhost:5432/plugin_test?sslmode=disable + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup node + uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + with: + prod: "true" + - name: Setup Go + uses: ./.github/actions/setup-go + - name: Setup Postgres + uses: ./.github/actions/setup-postgres + - name: Touching core/web/assets/index.html + run: mkdir -p core/web/assets && touch core/web/assets/index.html + - name: Download Go vendor packages + run: go mod download + - name: Build binary + run: go build -o plugin.test . + - name: Setup DB + run: ./plugin.test local db preparetest + - name: Load test outputs + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + path: ./artifacts + - name: Build flakey test runner + run: go build ./tools/flakeytests/cmd/runner + - name: Re-run tests + env: + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + GITHUB_EVENT_PATH: ${{ github.event_path }} + GITHUB_EVENT_NAME: ${{ github.event_name }} + GITHUB_REPO: ${{ github.repository }} + GITHUB_RUN_ID: ${{ github.run_id }} + run: | + ./runner \ + -grafana_auth=$GRAFANA_CLOUD_BASIC_AUTH \ + -grafana_host=$GRAFANA_CLOUD_HOST \ + -gh_sha=$GITHUB_SHA \ + -gh_event_path=$GITHUB_EVENT_PATH \ + -gh_event_name=$GITHUB_EVENT_NAME \ + -gh_run_id=$GITHUB_RUN_ID \ + -gh_repo=$GITHUB_REPO \ + -command=./tools/bin/go_core_tests \ + `ls -R ./artifacts/go_core_tests*/output.txt` + - name: Store logs artifacts + if: always() + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: flakey_test_runner_logs + path: | + ./output.txt + + scan: + name: SonarQube Scan + needs: [core] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 # fetches all history for all tags and branches to provide more metadata for sonar reports + - name: Download all workflow run artifacts + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + - name: Set SonarQube Report Paths + id: sonarqube_report_paths + shell: bash + run: | + echo "sonarqube_tests_report_paths=$(find go_core_tests_logs -name output.txt | paste -sd "," -)" >> $GITHUB_OUTPUT + echo "sonarqube_coverage_report_paths=$(find go_core_tests_logs -name coverage.txt | paste -sd "," -)" >> $GITHUB_OUTPUT + - name: SonarQube Scan + uses: sonarsource/sonarqube-scan-action@69c1a75940dec6249b86dace6b630d3a2ae9d2a7 # v2.0.1 + with: + args: > + -Dsonar.go.tests.reportPaths=${{ steps.sonarqube_report_paths.outputs.sonarqube_tests_report_paths }} + -Dsonar.go.coverage.reportPaths=${{ steps.sonarqube_report_paths.outputs.sonarqube_coverage_report_paths }} + -Dsonar.go.golangci-lint.reportPaths=golangci-lint-report/golangci-lint-report.xml + env: + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: SonarQube Scan + continue-on-error: true + + clean: + name: Clean Go Tidy & Generate + if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} + runs-on: ubuntu20.04-8cores-32GB + defaults: + run: + shell: bash + steps: + - name: Check for Skip Tests Label + if: contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') + run: | + echo "## \`skip-smoke-tests\` label is active, skipping E2E smoke tests" >>$GITHUB_STEP_SUMMARY + exit 0 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Setup Go + uses: ./.github/actions/setup-go + with: + only-modules: "true" + - name: Install protoc-gen-go-wsrpc + run: curl https://github.com/goplugin/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc --output $HOME/go/bin/protoc-gen-go-wsrpc && chmod +x $HOME/go/bin/protoc-gen-go-wsrpc + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + - run: make generate # generate install go deps + - name: Ensure clean after generate + run: git diff --stat --exit-code + - run: make gomodtidy + - name: Ensure clean after tidy + run: git diff --minimal --exit-code + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Clean Go Tidy & Generate + continue-on-error: true diff --git a/.github/workflows/ci-scripts.yml b/.github/workflows/ci-scripts.yml new file mode 100644 index 00000000..12760da1 --- /dev/null +++ b/.github/workflows/ci-scripts.yml @@ -0,0 +1,46 @@ +name: CI Scripts + +on: + push: + pull_request: + +jobs: + lint-scripts: + if: ${{ github.event_name == 'pull_request' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Golang Lint + uses: ./.github/actions/golangci-lint + with: + name: lint-scripts + go-directory: core/scripts + go-version-file: core/scripts/go.mod + go-module-file: core/scripts/go.sum + gc-basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + gc-host: ${{ secrets.GRAFANA_CLOUD_HOST }} + + test-scripts: + if: ${{ github.event_name == 'pull_request' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Go + uses: ./.github/actions/setup-go + with: + go-version-file: core/scripts/go.mod + go-module-file: core/scripts/go.sum + - name: Run Tests + shell: bash + working-directory: core/scripts + run: go test ./... + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: test-scripts + continue-on-error: true diff --git a/.github/workflows/client-compatibility-tests.yml b/.github/workflows/client-compatibility-tests.yml new file mode 100644 index 00000000..52acb346 --- /dev/null +++ b/.github/workflows/client-compatibility-tests.yml @@ -0,0 +1,333 @@ +name: Client Compatibility Tests +on: + schedule: + - cron: "30 5 * * *" # Run every night at midnight + 30min EST + push: + tags: + - "*" + workflow_dispatch: + +env: + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + MOD_CACHE_VERSION: 2 + +jobs: + # Build Test Dependencies + + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + name: Build Plugin Image + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Plugin Image + uses: ./.github/actions/build-plugin-image + with: + tag_suffix: "" + dockerfile: core/plugin.Dockerfile + git_commit_sha: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + build-tests: + environment: integration + permissions: + id-token: write + contents: read + name: Build Tests Binary + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Tests Binary + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/build-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_download_vendor_packages_command: cd ./integration-tests && go mod download + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + go_tags: embed + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + binary_name: tests + + # End Build Test Dependencies + + client-compatibility-matrix: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + include: + - name: ocr-geth + os: ubuntu-latest + test: TestOCRBasic + file: ocr + client: geth + timeout: 30m + pyroscope_env: ci-smoke-ocr-geth-simulated + - name: ocr-nethermind + test: TestOCRBasic + file: ocr + client: nethermind + timeout: 30m + pyroscope_env: ci-smoke-ocr-nethermind-simulated + - name: ocr-besu + test: TestOCRBasic + file: ocr + client: besu + timeout: 30m + pyroscope_env: ci-smoke-ocr-besu-simulated + - name: ocr-erigon + test: TestOCRBasic + file: ocr + client: erigon + timeout: 30m + pyroscope_env: ci-smoke-ocr-erigon-simulated + - name: ocr2-geth + test: "^TestOCRv2Basic/plugins$" + file: ocr2 + client: geth + timeout: 30m + pyroscope_env: ci-smoke-ocr2-geth-simulated + - name: ocr2-nethermind + test: "^TestOCRv2Basic/plugins$" + file: ocr2 + client: nethermind + timeout: 30m + pyroscope_env: ci-smoke-nethermind-evm-simulated + - name: ocr2-besu + test: "^TestOCRv2Basic/plugins$" + file: ocr2 + client: besu + timeout: 30m + pyroscope_env: ci-smoke-ocr2-besu-simulated + - name: ocr2-erigon + test: "^TestOCRv2Basic/plugins$" + file: ocr2 + client: erigon + timeout: 60m + pyroscope_env: ci-smoke-ocr2-erigon-simulated + runs-on: ubuntu-latest + name: Client Compatibility Test ${{ matrix.name }} + steps: + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Prepare Base64 TOML config + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }} + PYROSCOPE_ENVIRONMENT: ci-client-compatability-${{ matrix.client }}-testnet + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + ETH2_EL_CLIENT: ${{matrix.client}} + PLUGIN_VERSION: ${{ github.sha }} + run: | + convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") + + if [ -n "$ETH2_EL_CLIENT" ]; then + execution_layer="$ETH2_EL_CLIENT" + else + execution_layer="geth" + fi + + if [ -n "$PYROSCOPE_SERVER" ]; then + pyroscope_enabled=true + else + pyroscope_enabled=false + fi + + cat << EOF > config.toml + [Network] + selected_networks=$selected_networks + + [PluginImage] + image="$PLUGIN_IMAGE" + version="$PLUGIN_VERSION" + + [Pyroscope] + enabled=$pyroscope_enabled + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + + [PrivateEthereumNetwork] + consensus_type="pos" + consensus_layer="prysm" + execution_layer="$execution_layer" + wait_for_finalization=false + + [PrivateEthereumNetwork.EthereumChainConfig] + chain_id=1337 + genesis_delay=15 + seconds_per_slot=3 + validator_count=8 + slots_per_epoch=2 + addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + touch .root_dir + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout ${{ matrix.timeout }} -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + + start-slack-thread: + name: Start Slack Thread + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + outputs: + thread_ts: ${{ steps.slack.outputs.thread_ts }} + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [client-compatibility-matrix] + steps: + - name: Debug Result + run: echo ${{ join(needs.*.result, ',') }} + - name: Main Slack Notification + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + id: slack + with: + channel-id: ${{ secrets.QA_SLACK_CHANNEL }} + payload: | + { + "attachments": [ + { + "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "Client Compatability Test Results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", + "emoji": true + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ contains(join(needs.*.result, ','), 'failure') && 'Some tests failed, notifying <@U060CGGPY8H>' || 'All Good!' }}" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" + } + } + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + + post-test-results-to-slack: + name: Post Test Results for ${{matrix.product}} + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: start-slack-thread + strategy: + fail-fast: false + matrix: + product: [ocr, ocr2] + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Post Test Results to Slack + uses: ./.github/actions/notify-slack-jobs-result + with: + github_token: ${{ github.token }} + github_repository: ${{ github.repository }} + workflow_run_id: ${{ github.run_id }} + github_job_name_regex: ^Client Compatibility Test ${{ matrix.product }}-(?.*?)$ + message_title: ${{ matrix.product }} + slack_channel_id: ${{ secrets.QA_SLACK_CHANNEL }} + slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} + slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..6cc358c6 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,54 @@ +name: 'CodeQL' + +on: + push: + branches: + - develop + pull_request: + # The branches below must be a subset of the branches above + branches: [develop] + schedule: + - cron: '23 19 * * 4' + +jobs: + analyze: + name: Analyze ${{ matrix.language }} + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + language: ['go', 'javascript'] + + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set up Go + if: ${{ matrix.language == 'go' }} + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: 'go.mod' + + - name: Touching core/web/assets/index.html + if: ${{ matrix.language == 'go' }} + run: mkdir -p core/web/assets && touch core/web/assets/index.html + + - name: Initialize CodeQL + uses: github/codeql-action/init@65c74964a9ed8c44ed9f19d4bbc5757a6a8e9ab9 # codeql-bundle-v2.16.1 + with: + languages: ${{ matrix.language }} + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@65c74964a9ed8c44ed9f19d4bbc5757a6a8e9ab9 # codeql-bundle-v2.16.1 + + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Analyze ${{ matrix.language }} + continue-on-error: true diff --git a/.github/workflows/delete-deployments.yml b/.github/workflows/delete-deployments.yml new file mode 100644 index 00000000..d016ff3b --- /dev/null +++ b/.github/workflows/delete-deployments.yml @@ -0,0 +1,33 @@ +name: Cleanup integration deployments +on: + workflow_dispatch: + schedule: + # every 10 mins + - cron: "*/10 * * * *" + +jobs: + cleanup: + name: Clean up integration environment deployments + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Clean up integration environment + uses: ./.github/actions/delete-deployments + with: + environment: integration + # Delete 300 deployments at a time + num-of-pages: 3 + # We start with page 2 because usually the first 200 deployments are still active, so we cannot delete them + starting-page: 2 + + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Clean up integration environment deployments + continue-on-error: true diff --git a/.github/workflows/dependency-check.yml b/.github/workflows/dependency-check.yml new file mode 100644 index 00000000..ab4acbad --- /dev/null +++ b/.github/workflows/dependency-check.yml @@ -0,0 +1,56 @@ +name: Dependency Vulnerability Check + +on: + push: + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + outputs: + changes: ${{ steps.changes.outputs.src }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: changes + with: + filters: | + src: + - '**/*go.sum' + - '**/*go.mod' + - '.github/workflows/dependency-check.yml' + Go: + runs-on: ubuntu-latest + needs: [changes] + steps: + - name: Check out code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set up Go + if: needs.changes.outputs.src == 'true' + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: 'go.mod' + id: go + + - name: Write Go Modules list + if: needs.changes.outputs.src == 'true' + run: go list -json -m all > go.list + + - name: Check vulnerabilities + if: needs.changes.outputs.src == 'true' + uses: sonatype-nexus-community/nancy-github-action@main + with: + nancyVersion: "v1.0.39" + + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Go + continue-on-error: true diff --git a/.github/workflows/goreleaser-build-publish-develop.yml b/.github/workflows/goreleaser-build-publish-develop.yml new file mode 100644 index 00000000..86cf37f1 --- /dev/null +++ b/.github/workflows/goreleaser-build-publish-develop.yml @@ -0,0 +1,89 @@ +name: "Build publish Plugin develop on private ECR" + +on: + push: + branches: + - develop + +jobs: + push-plugin-develop-goreleaser: + runs-on: + labels: ubuntu20.04-16cores-64GB + outputs: + goreleaser-metadata: ${{ steps.build-sign-publish.outputs.goreleaser-metadata }} + goreleaser-artifacts: ${{ steps.build-sign-publish.outputs.goreleaser-artifacts }} + environment: build-develop + permissions: + id-token: write + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Configure aws credentials + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} + role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} + aws-region: ${{ secrets.AWS_REGION }} + - name: Build, sign, and publish image + id: build-sign-publish + uses: ./.github/actions/goreleaser-build-sign-publish + with: + enable-docker-publish: "true" + docker-registry: ${{ secrets.AWS_DEVELOP_ECR_HOSTNAME }} + enable-goreleaser-snapshot: "true" + goreleaser-exec: ./tools/bin/goreleaser_wrapper + goreleaser-config: .goreleaser.develop.yaml + # ISSUE: https://github.com/golang/go/issues/52690 + zig-version: 0.11.0-dev.3380+7e0a02ee2 # TODO: update action to v0.11.x once released + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: push-plugin-develop-goreleaser + continue-on-error: true + mercury-e2e-tests: + needs: [push-plugin-develop-goreleaser] + runs-on: + labels: ubuntu-latest + environment: build-develop + permissions: + id-token: write + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Configure aws credentials + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.AWS_ROLE_ARN_GATI }} + role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} + aws-region: ${{ secrets.AWS_REGION }} + - name: Get Github Token + id: get-gh-token + uses: goplugin/plugin-github-actions/github-app-token-issuer@main + with: + url: ${{ secrets.GATI_LAMBDA_FUNCTION_URL }} + - name: 'Dispatch Workflow: E2E Functional Tests' + id: dispatch-workflow-e2e-functional-tests + shell: bash + run: | + image_build_metadata=$(jq -n \ + --arg commit_sha "$GITHUB_SHA" \ + --arg run_url "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ + '{ + commit_sha: $commit_sha, + originating_run_url: $run_url + }') + gh workflow run "e2e-functional-tests.yml" \ + --repo ${{ secrets.MERCURY_SERVER_REPO }} \ + --ref "main" \ + --field plugin-ecr-repo-account="sdlc" \ + --field plugin-image-build-metadata="${image_build_metadata}" \ + --field plugin-image-tag="develop" + env: + GH_TOKEN: ${{ steps.get-gh-token.outputs.access-token }} diff --git a/.github/workflows/helm-chart-publish.yml b/.github/workflows/helm-chart-publish.yml new file mode 100644 index 00000000..2c79d266 --- /dev/null +++ b/.github/workflows/helm-chart-publish.yml @@ -0,0 +1,39 @@ +name: Helm Publish + +on: + workflow_dispatch: + +jobs: + helm_release: + runs-on: ubuntu-latest + environment: build-develop + permissions: + id-token: write + contents: read + steps: + - name: Checkout repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Configure aws credentials + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.AWS_ROLE_ARN_GATI }} + role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} + aws-region: ${{ secrets.AWS_REGION }} + + - name: Get Github Token + id: get-gh-token + uses: goplugin/plugin-github-actions/github-app-token-issuer@main + with: + url: ${{ secrets.GATI_LAMBDA_FUNCTION_URL }} + + - name: Install Helm + uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0 + with: + charts_dir: charts + config: .github/cr.yaml + env: + CR_TOKEN: "${{ steps.get-gh-token.outputs.access-token }}" diff --git a/.github/workflows/helm-chart.yml b/.github/workflows/helm-chart.yml new file mode 100644 index 00000000..4c8def64 --- /dev/null +++ b/.github/workflows/helm-chart.yml @@ -0,0 +1,25 @@ +name: Helm Chart + +on: + pull_request: + paths: + - "charts/**" + - ".github/workflows/helm-chart.yml" + +jobs: + ci-lint-helm-charts: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + actions: read + steps: + - name: ci-lint-helm-charts + uses: goplugin/.github/actions/ci-lint-charts@9fd15fe8e698a5e28bfd06b3a91471c56568dcb3 # ci-lint-charts@0.1.1 + with: + # chart testing inputs + chart-testing-extra-args: "--lint-conf=lintconf.yaml" + # grafana inputs + metrics-job-name: ci-lint-helm-charts + gc-basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + gc-host: ${{ secrets.GRAFANA_CLOUD_HOST }} diff --git a/.github/workflows/integration-chaos-tests.yml b/.github/workflows/integration-chaos-tests.yml new file mode 100644 index 00000000..ef2c9635 --- /dev/null +++ b/.github/workflows/integration-chaos-tests.yml @@ -0,0 +1,151 @@ +name: Integration Chaos Test +on: + schedule: + - cron: "0 0 * * *" + push: + tags: + - "*" + workflow_dispatch: + +env: + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + TEST_SUITE: chaos + TEST_ARGS: -test.timeout 1h + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + +jobs: + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + name: Build Plugin Image + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Check if image exists + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: plugin + tag: ${{ github.sha }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Build Image + if: steps.check-image.outputs.exists == 'false' + uses: goplugin/plugin-github-actions/plugin-testing-framework/build-image@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + cl_repo: goplugin/pluginv3.0 + cl_ref: ${{ github.sha }} + push_tag: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin:${{ github.sha }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + - name: Print Plugin Image Built + id: push + run: | + echo "### plugin node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image + continue-on-error: true + + build-test-runner: + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Runner Image + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Build Test Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Runner Image + continue-on-error: true + + chaos-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: EVM Pods Chaos Tests + runs-on: ubuntu-latest + needs: [build-test-runner, build-plugin] + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: EVM Pods Chaos Tests + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Prepare Base64 TOML config + env: + PLUGIN_VERSION: ${{ github.sha }} + run: | + echo ::add-mask::$PLUGIN_IMAGE + + cat << EOF > config.toml + [Network] + selected_networks=["SIMULATED"] + + [PluginImage] + image="$PLUGIN_IMAGE" + version="$PLUGIN_VERSION" + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd integration-tests && go test -timeout 1h -count=1 -json -test.parallel 11 ./chaos 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + artifacts_location: ./integration-tests/chaos/logs + publish_check_name: EVM Pods Chaos Test Results + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Upload test log + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + if: failure() + with: + name: Test Results Log + path: /tmp/gotest.log + retention-days: 7 diff --git a/.github/workflows/integration-staging-tests.yml b/.github/workflows/integration-staging-tests.yml new file mode 100644 index 00000000..2c092310 --- /dev/null +++ b/.github/workflows/integration-staging-tests.yml @@ -0,0 +1,132 @@ +# NEEDS ADJUSTING TO TOML CONFIG BEFORE USING!! +name: E2E Functions staging tests + +on: +# TODO: enable when env will be stable +# schedule: +# - cron: "0 0 * * *" + workflow_dispatch: + inputs: + network: + description: Blockchain network (testnet) + type: choice + default: "MUMBAI" + options: + - "MUMBAI" + test_type: + description: Test type + type: choice + default: "mumbai_functions_soak_test_real" + options: + - "mumbai_functions_soak_test_http" + - "mumbai_functions_stress_test_http" + - "mumbai_functions_soak_test_only_secrets" + - "mumbai_functions_stress_test_only_secrets" + - "mumbai_functions_soak_test_real" + - "mumbai_functions_stress_test_real" +# TODO: disabled, need GATI access +# - "gateway_secrets_set_soak_test" +# - "gateway_secrets_list_soak_test" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + e2e-soak-test: + environment: sdlc + runs-on: ubuntu20.04-8cores-32GB + permissions: + contents: read + id-token: write + env: + LOKI_URL: ${{ secrets.LOKI_URL }} + LOKI_TOKEN: ${{ secrets.LOKI_TOKEN }} + SELECTED_NETWORKS: ${{ inputs.network }} + SELECTED_TEST: ${{ inputs.test_type }} + MUMBAI_URLS: ${{ secrets.FUNCTIONS_STAGING_MUMBAI_URLS }} + MUMBAI_KEYS: ${{ secrets.FUNCTIONS_STAGING_MUMBAI_KEYS }} + WASP_LOG_LEVEL: info + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + env: + PYROSCOPE_SERVER: ${{ secrets.QA_PYROSCOPE_INSTANCE }} + PYROSCOPE_ENVIRONMENT: ci-smoke-${{ matrix.product }}-sepolia + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + run: | + convert_to_toml_array() { + local IFS=',' + local input_array=($1) + local toml_array_format="[" + + for element in "${input_array[@]}"; do + toml_array_format+="\"$element\"," + done + + toml_array_format="${toml_array_format%,}]" + echo "$toml_array_format" + } + + if [ -n "$PYROSCOPE_SERVER" ]; then + pyroscope_enabled=true + else + pyroscope_enabled=false + fi + + cat << EOF > config.toml + [Common] + plugin_node_funding=0.5 + + [PluginImage] + image="$PLUGIN_IMAGE" + version="${{ github.sha }}" + + [Pyroscope] + enabled=$pyroscope_enabled + server_url="$PYROSCOPE_SERVER" + environment="$PYROSCOPE_ENVIRONMENT" + key="$PYROSCOPE_KEY" + + [Logging] + run_id="$RUN_ID" + + [Logging.LogStream] + log_targets=$log_targets + + [Logging.Loki] + tenant_id="$LOKI_TENANT_ID" + endpoint="$LOKI_URL" + basic_auth="$LOKI_BASIC_AUTH" + + [Logging.Grafana] + base_url="$GRAFANA_URL" + dashboard_url="/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + + [Network] + selected_networks=["sepolia"] + + [Network.RpcHttpUrls] + sepolia = $(convert_to_toml_array "$SEPOLIA_HTTP_URLS") + + [Network.RpcWsUrls] + sepolia = $(convert_to_toml_array "$SEPOLIA_URLS") + + [Network.WalletKeys] + sepolia = $(convert_to_toml_array "$EVM_KEYS") + EOF + + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Run E2E soak tests + run: | + cd integration-tests/load/functions + if [[ $SELECTED_TEST == mumbai_functions* ]]; then + go test -v -timeout 6h -run TestFunctionsLoad/$SELECTED_TEST + elif [[ $SELECTED_TEST == gateway* ]]; then + go test -v -timeout 6h -run TestGatewayLoad/$SELECTED_TEST + fi \ No newline at end of file diff --git a/.github/workflows/integration-tests-publish.yml b/.github/workflows/integration-tests-publish.yml new file mode 100644 index 00000000..fc6ca178 --- /dev/null +++ b/.github/workflows/integration-tests-publish.yml @@ -0,0 +1,99 @@ +name: Integration Tests Publish +# Publish the compiled integration tests + +on: + push: + branches: + - develop + workflow_dispatch: + +env: + ECR_TAG: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:develop + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + +jobs: + publish-integration-test-image: + environment: integration + permissions: + id-token: write + contents: read + name: Publish Integration Test Image + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Publish Integration Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + - name: Setup Other Tags If Not Workflow Dispatch + id: tags + if: github.event_name != 'workflow_dispatch' + run: | + echo "other_tags=${ECR_TAG}" >> $GITHUB_OUTPUT + - name: Build Image + uses: ./.github/actions/build-test-image + with: + other_tags: ${{ steps.tags.outputs.other_tags }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + - name: Notify Slack + # Only run this notification for merge to develop failures + if: failure() && github.event_name != 'workflow_dispatch' + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + with: + channel-id: "#team-test-tooling-internal" + slack-message: ":x: :mild-panic-intensifies: Publish Integration Test Image failed: ${{ job.html_url }}\n${{ format('https://github.com/goplugin/pluginv3.0/actions/runs/{0}', github.run_id) }}" + build-plugin-image: + environment: integration + # Only run this build for workflow_dispatch + if: github.event_name == 'workflow_dispatch' + permissions: + id-token: write + contents: read + strategy: + matrix: + image: + - name: "" + dockerfile: core/plugin.Dockerfile + tag-suffix: "" + # uncomment in the future if we end up needing to soak test the plugins image + # - name: (plugins) + # dockerfile: plugins/plugin.Dockerfile + # tag-suffix: -plugins + name: Build Plugin Image ${{ matrix.image.name }} + runs-on: ubuntu20.04-8cores-32GB + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image ${{ matrix.image.name }} + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.sha }} + - name: Build Plugin Image + uses: ./.github/actions/build-plugin-image + with: + tag_suffix: ${{ matrix.image.tag-suffix }} + dockerfile: ${{ matrix.image.dockerfile }} + git_commit_sha: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 00000000..0f0cb08a --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,1062 @@ +name: Integration Tests +on: + merge_group: + pull_request: + push: + tags: + - "*" + workflow_dispatch: + +# Only run 1 of this workflow at a time per PR +concurrency: + group: integration-tests-plugin-${{ github.ref }} + cancel-in-progress: true + +env: + # for run-test variables and environment + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + TEST_SUITE: smoke + TEST_ARGS: -test.timeout 12m + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + MOD_CACHE_VERSION: 2 + +jobs: + enforce-ctf-version: + name: Enforce CTF Version + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Check Merge Group Condition + id: condition-check + run: | + echo "Checking event condition..." + SHOULD_ENFORCE="false" + if [[ "$GITHUB_EVENT_NAME" == "merge_group" ]]; then + echo "We are in a merge_group event, now check if we are on the develop branch" + target_branch=$(cat $GITHUB_EVENT_PATH | jq -r .merge_group.base_ref) + if [[ "$target_branch" == "refs/heads/develop" ]]; then + echo "We are on the develop branch, we should enforce ctf version" + SHOULD_ENFORCE="true" + fi + fi + echo "should we enforce ctf version = $SHOULD_ENFORCE" + echo "should-enforce=$SHOULD_ENFORCE" >> $GITHUB_OUTPUT + - name: Enforce CTF Version + if: steps.condition-check.outputs.should-enforce == 'true' + uses: goplugin/plugin-github-actions/plugin-testing-framework/mod-version@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + go-project-path: ./integration-tests + module-name: github.com/goplugin/plugin-testing-framework + enforce-semantic-tag: "true" + changes: + environment: integration + name: Check Paths That Require Tests To Run + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: changes + with: + filters: | + src: + - '**/*.go' + - '**/*go.sum' + - '**/*go.mod' + - '.github/workflows/integration-tests.yml' + - '**/*Dockerfile' + - 'core/**/config/**/*.toml' + - 'integration-tests/**/*.toml' + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Check Paths That Require Tests To Run + continue-on-error: true + outputs: + src: ${{ steps.changes.outputs.src }} + + build-lint-integration-tests: + name: Build and Lint integration-tests + runs-on: ubuntu20.04-16cores-64GB + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Go + uses: goplugin/plugin-github-actions/plugin-testing-framework/setup-go@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_download_vendor_packages_command: cd ./integration-tests && go mod download + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + - name: Build Go + run: | + cd ./integration-tests + go build ./... + go test -run=^# ./... + - name: Lint Go + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + with: + version: v1.55.2 + # We already cache these directories in setup-go + skip-pkg-cache: true + skip-build-cache: true + # only-new-issues is only applicable to PRs, otherwise it is always set to false + only-new-issues: false # disabled for PRs due to unreliability + args: --out-format colored-line-number,checkstyle:golangci-lint-report.xml + working-directory: ./integration-tests + + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + strategy: + matrix: + image: + - name: "" + dockerfile: core/plugin.Dockerfile + tag-suffix: "" + - name: (plugins) + dockerfile: plugins/plugin.Dockerfile + tag-suffix: -plugins + name: Build Plugin Image ${{ matrix.image.name }} + runs-on: ubuntu20.04-16cores-64GB + needs: [changes, enforce-ctf-version] + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image ${{ matrix.image.name }} + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Plugin Image + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: ./.github/actions/build-plugin-image + with: + tag_suffix: ${{ matrix.image.tag-suffix }} + dockerfile: ${{ matrix.image.dockerfile }} + git_commit_sha: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + build-test-image: + if: startsWith(github.ref, 'refs/tags/') || github.event_name == 'schedule' || contains(join(github.event.pull_request.labels.*.name, ' '), 'build-test-image') + environment: integration + permissions: + id-token: write + contents: read + name: Build Test Image + runs-on: ubuntu20.04-16cores-64GB + needs: [changes] + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Test Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Test Image + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + + compare-tests: + needs: [changes] + runs-on: ubuntu-latest + name: Compare/Build Automation Test List + outputs: + automation-matrix: ${{ env.AUTOMATION_JOB_MATRIX_JSON }} + lp-matrix: ${{ env.LP_JOB_MATRIX_JSON }} + steps: + - name: Check for Skip Tests Label + if: contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') + run: | + echo "## \`skip-smoke-tests\` label is active, skipping E2E smoke tests" >>$GITHUB_STEP_SUMMARY + exit 0 + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Compare Test Lists + run: | + cd ./integration-tests + ./scripts/compareTestList.sh ./smoke/automation_test.go + ./scripts/compareTestList.sh ./smoke/keeper_test.go + ./scripts/compareTestList.sh ./smoke/log_poller_test.go + - name: Build Test Matrix Lists + id: build-test-matrix-list + run: | + cd ./integration-tests + MATRIX_JSON_AUTOMATION=$(./scripts/buildTestMatrixList.sh ./smoke/automation_test.go automation ubuntu-latest 1) + MATRIX_JSON_KEEPER=$(./scripts/buildTestMatrixList.sh ./smoke/keeper_test.go keeper ubuntu-latest 1) + COMBINED_ARRAY=$(jq -c -n "$MATRIX_JSON_AUTOMATION + $MATRIX_JSON_KEEPER") + + LOG_POLLER_MATRIX_JSON=$(./scripts/buildTestMatrixList.sh ./smoke/log_poller_test.go log_poller ubuntu-latest 1) + echo "LP_JOB_MATRIX_JSON=${LOG_POLLER_MATRIX_JSON}" >> $GITHUB_ENV + + # if we running a PR against the develop branch we should only run the automation tests unless we are in the merge group event + if [[ "$GITHUB_EVENT_NAME" == "merge_group" ]]; then + echo "We are in a merge_group event, run both automation and keepers tests" + echo "AUTOMATION_JOB_MATRIX_JSON=${COMBINED_ARRAY}" >> $GITHUB_ENV + else + echo "we are not in a merge_group event, if this is a PR to develop run only automation tests, otherwise run everything because we could be running against a release branch" + target_branch=$(cat $GITHUB_EVENT_PATH | jq -r .pull_request.base.ref) + if [[ "$target_branch" == "develop" ]]; then + echo "only run automation tests" + echo "AUTOMATION_JOB_MATRIX_JSON=${MATRIX_JSON_AUTOMATION}" >> $GITHUB_ENV + else + echo "run both automation and keepers tests" + echo "AUTOMATION_JOB_MATRIX_JSON=${COMBINED_ARRAY}" >> $GITHUB_ENV + fi + fi + + eth-smoke-tests-matrix-automation: + if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: + [build-plugin, changes, compare-tests, build-lint-integration-tests] + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + product: ${{fromJson(needs.compare-tests.outputs.automation-matrix)}} + runs-on: ${{ matrix.product.os }} + name: ETH Smoke Tests ${{ matrix.product.name }} + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@d1618b772a97fd87e6505de97b872ee0b1f1729a # v2.0.2 + with: + basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_CLOUD_HOST }} + this-job-name: ETH Smoke Tests ${{ matrix.product.name }} + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Go Test Command + id: build-go-test-command + run: | + # if the matrix.product.run is set, use it for a different command + if [ "${{ matrix.product.run }}" != "" ]; then + echo "run_command=${{ matrix.product.run }} ./smoke/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" + else + echo "run_command=./smoke/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" + fi + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: ${{ env.SELECTED_NETWORKS }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ${{ matrix.product.pyroscope_env }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + + ## Run this step when changes that require tests to be run are made + - name: Run Tests + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_location: ./integration-tests/smoke/logs/ + publish_check_name: ${{ matrix.product.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + + eth-smoke-tests-matrix-log-poller: + if: ${{ !(contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') || github.event_name == 'workflow_dispatch') }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: + [build-plugin, changes, compare-tests, build-lint-integration-tests] + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + product: ${{fromJson(needs.compare-tests.outputs.lp-matrix)}} + runs-on: ${{ matrix.product.os }} + name: ETH Smoke Tests ${{ matrix.product.name }} + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ETH Smoke Tests ${{ matrix.product.name }} + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Go Test Command + id: build-go-test-command + run: | + # if the matrix.product.run is set, use it for a different command + if [ "${{ matrix.product.run }}" != "" ]; then + echo "run_command=${{ matrix.product.run }} ./smoke/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" + else + echo "run_command=./smoke/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" + fi + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: ${{ env.SELECTED_NETWORKS }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ${{ matrix.product.pyroscope_env }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + ## Run this step when changes that require tests to be run are made + - name: Run Tests + if: needs.changes.outputs.src == 'true' + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_location: ./integration-tests/smoke/logs/ + publish_check_name: ${{ matrix.product.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + + eth-smoke-tests-matrix: + if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, changes, build-lint-integration-tests] + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + strategy: + fail-fast: false + matrix: + product: + - name: runlog + nodes: 2 + os: ubuntu-latest + pyroscope_env: "ci-smoke-runlog-evm-simulated" + - name: cron + nodes: 2 + os: ubuntu-latest + pyroscope_env: "ci-smoke-cron-evm-simulated" + - name: flux + nodes: 1 + os: ubuntu-latest + pyroscope_env: "ci-smoke-flux-evm-simulated" + - name: ocr + nodes: 2 + os: ubuntu-latest + file: ocr + pyroscope_env: ci-smoke-ocr-evm-simulated + - name: ocr2 + nodes: 6 + os: ubuntu-latest + file: ocr2 + pyroscope_env: ci-smoke-ocr2-evm-simulated + - name: ocr2 + nodes: 6 + os: ubuntu-latest + pyroscope_env: ci-smoke-ocr2-plugins-evm-simulated + tag_suffix: "-plugins" + - name: vrf + nodes: 2 + os: ubuntu-latest + pyroscope_env: ci-smoke-vrf-evm-simulated + - name: vrfv2 + nodes: 4 + os: ubuntu-latest + pyroscope_env: ci-smoke-vrf2-evm-simulated + - name: vrfv2plus + nodes: 4 + os: ubuntu-latest + pyroscope_env: ci-smoke-vrf2plus-evm-simulated + - name: forwarder_ocr + nodes: 2 + os: ubuntu-latest + pyroscope_env: ci-smoke-forwarder-ocr-evm-simulated + - name: forwarders_ocr2 + nodes: 2 + os: ubuntu-latest + pyroscope_env: ci-smoke-forwarder-ocr-evm-simulated + runs-on: ${{ matrix.product.os }} + name: ETH Smoke Tests ${{ matrix.product.name }}${{ matrix.product.tag_suffix }} + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ETH Smoke Tests ${{ matrix.product.name }}${{ matrix.product.tag_suffix }} + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Go Test Command + id: build-go-test-command + run: | + # if the matrix.product.run is set, use it for a different command + if [ "${{ matrix.product.run }}" != "" ]; then + echo "run_command=${{ matrix.product.run }} ./smoke/${{ matrix.product.file }}_test.go" >> "$GITHUB_OUTPUT" + else + echo "run_command=./smoke/${{ matrix.product.name }}_test.go" >> "$GITHUB_OUTPUT" + fi + - name: Check for "enable tracing" label + id: check-label + run: | + label=$(jq -r '.pull_request.labels[]?.name // empty' "$GITHUB_EVENT_PATH") + + if [[ -n "$label" ]]; then + if [[ "$label" == "enable tracing" ]]; then + echo "Enable tracing label found." + echo "trace=true" >> $GITHUB_OUTPUT + else + echo "Enable tracing label not found." + echo "trace=false" >> $GITHUB_OUTPUT + fi + else + echo "No labels present or labels are null." + echo "trace=false" >> $GITHUB_OUTPUT + fi + + - name: Setup Grafana and OpenTelemetry + id: docker-setup + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + # Create network + docker network create --driver bridge tracing + + # Make trace directory + cd integration-tests/smoke/ + mkdir ./traces + chmod -R 777 ./traces + + # Switch directory + cd ../../.github/tracing + + # Create a Docker volume for traces + # docker volume create otel-traces + + # Start OpenTelemetry Collector + # Note the user must be set to the same user as the runner for the trace data to be accessible + docker run -d --network=tracing --name=otel-collector \ + -v $PWD/otel-collector-ci.yaml:/etc/otel-collector.yaml \ + -v $PWD/../../integration-tests/smoke/traces:/tracing \ + --user "$(id -u):$(id -g)" \ + -p 4317:4317 otel/opentelemetry-collector:0.88.0 --config=/etc/otel-collector.yaml + - name: Locate Docker Volume + id: locate-volume + if: false + run: | + echo "VOLUME_PATH=$(docker volume inspect --format '{{ .Mountpoint }}' otel-traces)" >> $GITHUB_OUTPUT + - name: Show Otel-Collector Logs + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + docker logs otel-collector + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + selectedNetworks: ${{ env.SELECTED_NETWORKS }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ${{ matrix.product.pyroscope_env }} + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + ## Run this step when changes that require tests to be run are made + - name: Run Tests + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=${{ matrix.product.nodes }} ${{ steps.build-go-test-command.outputs.run_command }} 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }}${{ matrix.product.tag_suffix }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}-test-logs + artifacts_location: ./integration-tests/smoke/logs/ + publish_check_name: ${{ matrix.product.name }} + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + # Run this step when changes that do not need the test to run are made + - name: Run Setup + if: needs.changes.outputs.src == 'false' + uses: goplugin/plugin-github-actions/plugin-testing-framework/setup-run-tests-environment@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_download_vendor_packages_command: cd ./integration-tests && go mod download + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Show Otel-Collector Logs + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + docker logs otel-collector + - name: Permissions on traces + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + run: | + ls -l ./integration-tests/smoke/traces + - name: Upload Trace Data + if: steps.check-label.outputs.trace == 'true' && matrix.product.name == 'ocr2' && matrix.product.tag_suffix == '-plugins' + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: trace-data + path: ./integration-tests/smoke/traces/trace-data.json + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: ./integration-tests/smoke/ + + ### Used to check the required checks box when the matrix completes + eth-smoke-tests: + if: always() + runs-on: ubuntu-latest + name: ETH Smoke Tests + needs: [eth-smoke-tests-matrix, eth-smoke-tests-matrix-automation] + # needs: [eth-smoke-tests-matrix] + steps: + - name: Check smoke test matrix status + if: needs.eth-smoke-tests-matrix.result != 'success' || needs.eth-smoke-tests-matrix-automation.result != 'success' + run: | + echo "${{ needs.eth-smoke-tests-matrix.result }}" + exit 1 + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ETH Smoke Tests + matrix-aggregator-status: ${{ needs.eth-smoke-tests-matrix.result }} + continue-on-error: true + + cleanup: + name: Clean up integration environment deployments + if: always() + needs: [eth-smoke-tests] + runs-on: ubuntu-latest + steps: + - name: Checkout repo + if: ${{ github.event_name == 'pull_request' }} + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: 🧼 Clean up Environment + if: ${{ github.event_name == 'pull_request' }} + uses: ./.github/actions/delete-deployments + with: + environment: integration + ref: ${{ github.head_ref }} # See https://github.com/github/docs/issues/15319#issuecomment-1476705663 + + - name: Collect Metrics + if: ${{ github.event_name == 'pull_request' }} + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Clean up integration environment deployments + continue-on-error: true + + # Run the setup if the matrix finishes but this time save the cache if we have a cache hit miss + # this will also only run if both of the matrix jobs pass + eth-smoke-go-mod-cache: + environment: integration + needs: [eth-smoke-tests] + runs-on: ubuntu20.04-16cores-64GB + name: ETH Smoke Tests Go Mod Cache + continue-on-error: true + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Run Setup + uses: goplugin/plugin-github-actions/plugin-testing-framework/setup-go@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_download_vendor_packages_command: | + cd ./integration-tests + go mod download + # force download of test dependencies + go test -run=NonExistentTest ./smoke/... || echo "ignore expected test failure" + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "false" + + ### Migration tests + node-migration-tests: + name: Version Migration Tests + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [build-plugin, changes, build-test-image] + # Only run migration tests on new tags + if: startsWith(github.ref, 'refs/tags/') + env: + SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + PLUGIN_IMAGE: public.ecr.aws/plugin/plugin + UPGRADE_VERSION: ${{ github.sha }} + UPGRADE_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + TEST_LOG_LEVEL: debug + TEST_SUITE: migration + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Get Latest Version + id: get_latest_version + run: | + untrimmed_ver=$(curl --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" --request GET https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .name) + latest_version="${untrimmed_ver:1}" + echo "latest_version=${latest_version} | tee -a $GITHUB_OUTPUT" + - name: Name Versions + run: | + echo "Running migration tests from version '${{ steps.get_latest_version.outputs.latest_version }}' to: '${{ github.sha }}'" + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-upgrade-config + with: + selectedNetworks: ${{ env.SELECTED_NETWORKS }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ steps.get_latest_version.outputs.latest_version }} + upgradeImage: ${{ env.UPGRADE_IMAGE }} + upgradeVersion: ${{ env.UPGRADE_VERSION }} + - name: Run Migration Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json ./migration 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ steps.get_latest_version.outputs.latest_version }} + artifacts_location: ./integration-tests/migration/logs + publish_check_name: Node Migration Test Results + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Upload test log + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + if: failure() + with: + name: test-log-${{ matrix.product.name }} + path: /tmp/gotest.log + retention-days: 7 + continue-on-error: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Version Migration Tests + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + + ## Solana Section + get_solana_sha: + name: Get Solana Sha From Go Mod + environment: Integration + runs-on: ubuntu-latest + outputs: + sha: ${{ steps.getsha.outputs.sha }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Setup Go + uses: ./.github/actions/setup-go + with: + only-modules: "true" + - name: Get the sha from go mod + id: getshortsha + run: | + sol_ver=$(go list -m -json github.com/goplugin/plugin-solana | jq -r .Version) + if [ -z "${sol_ver}" ]; then + echo "Error: could not get the solana version from the go.mod file, look above for error(s)" + exit 1 + fi + short_sha="${sol_ver##*-}" + echo "short sha is: ${short_sha}" + echo "short_sha=${short_sha}" >> "$GITHUB_OUTPUT" + - name: Checkout solana + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: goplugin/plugin-solana + ref: develop + fetch-depth: 0 + path: solanapath + - name: Get long sha + id: getsha + run: | + cd solanapath + full_sha=$(git rev-parse ${{steps.getshortsha.outputs.short_sha}}) + if [ -z "${full_sha}" ]; then + echo "Error: could not get the full sha from the short sha using git, look above for error(s)" + exit 1 + fi + echo "sha is: ${full_sha}" + echo "sha=${full_sha}" >> "$GITHUB_OUTPUT" + + get_projectserum_version: + name: Get ProjectSerum Version + environment: integration + runs-on: ubuntu-latest + needs: [get_solana_sha] + outputs: + projectserum_version: ${{ steps.psversion.outputs.projectserum_version }} + steps: + - name: Checkout the solana repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: goplugin/plugin-solana + ref: ${{ needs.get_solana_sha.outputs.sha }} + - name: Get ProjectSerum Version + id: psversion + uses: goplugin/plugin-solana/.github/actions/projectserum_version@4b971869e26b79c7ce3fb7c98005cc2e3f350915 # stable action on Oct 12 2022 + + solana-test-image-exists: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Check If Solana Test Image Exists + runs-on: ubuntu-latest + needs: [get_solana_sha] + outputs: + exists: ${{ steps.check-image.outputs.exists }} + steps: + - name: Check if image exists + id: check-image + uses: goplugin/plugin-github-actions/docker/image-exists@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + repository: plugin-solana-tests + tag: ${{ needs.get_solana_sha.outputs.sha }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + solana-build-contracts: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Solana Build Artifacts + runs-on: ubuntu20.04-16cores-64GB + needs: + [ + changes, + get_projectserum_version, + solana-test-image-exists, + get_solana_sha, + ] + container: + image: projectserum/build:${{ needs.get_projectserum_version.outputs.projectserum_version }} + env: + RUSTUP_HOME: "/root/.rustup" + FORCE_COLOR: 1 + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solana Build Artifacts + continue-on-error: true + - name: Checkout the solana repo + # Use v3.6.0 because the custom runner (container configured above) + # doesn't have node20 installed which is required for versions >=4 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + repository: goplugin/plugin-solana + ref: ${{ needs.get_solana_sha.outputs.sha }} + - name: Build contracts + if: (needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch') && needs.solana-test-image-exists.outputs.exists == 'false' + uses: goplugin/plugin-solana/.github/actions/build_contract_artifacts@21675b3a7dcdff8e790391708d4763020cace21e # stable action on December 18 2023 + with: + ref: ${{ needs.get_solana_sha.outputs.sha }} + + solana-build-test-image: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Solana Build Test Image + runs-on: ubuntu20.04-16cores-64GB + needs: + [ + solana-build-contracts, + solana-test-image-exists, + changes, + get_solana_sha, + ] + env: + CONTRACT_ARTIFACTS_PATH: contracts/target/deploy + steps: + - name: Collect Metrics + if: (needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch') && needs.solana-test-image-exists.outputs.exists == 'false' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solana Build Test Image + continue-on-error: true + - name: Checkout the repo + if: (needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch') && needs.solana-test-image-exists.outputs.exists == 'false' + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: goplugin/plugin-solana + ref: ${{ needs.get_solana_sha.outputs.sha }} + - name: Build Test Image + if: (needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch') && needs.solana-test-image-exists.outputs.exists == 'false' + uses: ./.github/actions/build-test-image + with: + tag: ${{ needs.get_solana_sha.outputs.sha }} + artifacts_path: ${{ env.CONTRACT_ARTIFACTS_PATH }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + - run: echo "this exists so we don't have to run anything else if the build is skipped" + if: needs.changes.outputs.src == 'false' || needs.solana-test-image-exists.outputs.exists == 'true' + + solana-smoke-tests: + if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + name: Solana Smoke Tests + runs-on: ubuntu20.04-16cores-64GB + needs: + [ + build-plugin, + solana-build-contracts, + solana-build-test-image, + changes, + get_solana_sha, + ] + env: + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + CONTRACT_ARTIFACTS_PATH: contracts/target/deploy + steps: + - name: Collect Metrics + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solana Smoke Tests + test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: goplugin/plugin-solana + ref: ${{ needs.get_solana_sha.outputs.sha }} + - name: Run Setup + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: goplugin/plugin-github-actions/plugin-testing-framework/setup-run-tests-environment@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + go_mod_path: ./integration-tests/go.mod + cache_restore_only: true + cache_key_id: core-solana-e2e-${{ env.MOD_CACHE_VERSION }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Pull Artfacts + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + run: | + IMAGE_NAME=${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-solana-tests:${{ needs.get_solana_sha.outputs.sha }} + # Pull the Docker image + docker pull "$IMAGE_NAME" + + # Create a container without starting it + CONTAINER_ID=$(docker create "$IMAGE_NAME") + + # Copy the artifacts from the container + mkdir -p ./${{env.CONTRACT_ARTIFACTS_PATH}}/ + docker cp "$CONTAINER_ID:/go/testdir/${{env.CONTRACT_ARTIFACTS_PATH}}/" "./${{env.CONTRACT_ARTIFACTS_PATH}}/../" + + # Remove the created container + docker rm "$CONTAINER_ID" + - name: Generate config overrides + run: | # https://github.com/goplugin/plugin-testing-framework/blob/main/config/README.md + cat << EOF > config.toml + [PluginImage] + image="${{ env.PLUGIN_IMAGE }}" + version="${{ github.sha }}" + EOF + # shellcheck disable=SC2002 + BASE64_CONFIG_OVERRIDE=$(cat config.toml | base64 -w 0) + # shellcheck disable=SC2086 + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + # shellcheck disable=SC2086 + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Run Tests + if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: export ENV_JOB_IMAGE=${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-solana-tests:${{ needs.get_solana_sha.outputs.sha }} && make test_smoke + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + artifacts_location: /home/runner/work/plugin-solana/plugin-solana/integration-tests/logs + publish_check_name: Solana Smoke Test Results + go_mod_path: ./integration-tests/go.mod + cache_key_id: core-solana-e2e-${{ env.MOD_CACHE_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" + run_setup: false + - name: Upload test log + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + if: failure() + with: + name: test-log-solana + path: /tmp/gotest.log + retention-days: 7 + continue-on-error: true diff --git a/.github/workflows/lint-gh-workflows.yml b/.github/workflows/lint-gh-workflows.yml new file mode 100644 index 00000000..d79a28b8 --- /dev/null +++ b/.github/workflows/lint-gh-workflows.yml @@ -0,0 +1,22 @@ +name: Lint GH Workflows +on: + push: +jobs: + lint_workflows: + name: Validate Github Action Workflows + runs-on: ubuntu-latest + steps: + - name: Check out Code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Run actionlint + uses: reviewdog/action-actionlint@82693e9e3b239f213108d6e412506f8b54003586 # v1.39.1 + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Validate Github Action Workflows + continue-on-error: true diff --git a/.github/workflows/live-testnet-tests.yml b/.github/workflows/live-testnet-tests.yml new file mode 100644 index 00000000..4d5fc6ca --- /dev/null +++ b/.github/workflows/live-testnet-tests.yml @@ -0,0 +1,976 @@ +# *** +# This workflow is a monstrosity of copy-paste, and that's to increase legibility in reporting and running, so the code be damned. +# I suspect this can be cleaned up significantly with some clever trickery of the GitHub actions matrices, but I am not that clever. +# We want each chain to run in parallel, but each test within the chain needs to be able to run sequentially +# (we're trying to eliminate this as a requirement, should make it a lot easier). +# Each chain can have a variety of tests to run. +# We also want reporting to be clear in the start-slack-thread and post-test-results-to-slack jobs. +# *** + +name: Live Testnet Tests +on: + schedule: + - cron: "0 5 * * *" # Run every night at midnight EST + push: + tags: + - "*" + workflow_dispatch: + +env: + PLUGIN_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin + INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com + MOD_CACHE_VERSION: 2 + PLUGIN_NODE_FUNDING: .5 + PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} + LOKI_TENANT_ID: ${{ vars.LOKI_TENANT_ID }} + LOKI_URL: ${{ secrets.LOKI_URL }} + LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} + LOGSTREAM_LOG_TARGETS: loki + GRAFANA_URL: ${{ vars.GRAFANA_URL }} + RUN_ID: ${{ github.run_id }} + + PLUGIN_COMMIT_SHA: ${{ github.sha }} + PLUGIN_ENV_USER: ${{ github.actor }} + TEST_LOG_LEVEL: debug + +jobs: + + # Build Test Dependencies + + build-plugin: + environment: integration + permissions: + id-token: write + contents: read + name: Build Plugin Image + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Plugin Image + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Plugin Image + uses: ./.github/actions/build-plugin-image + with: + tag_suffix: "" + dockerfile: core/plugin.Dockerfile + git_commit_sha: ${{ github.sha }} + GRAFANA_CLOUD_BASIC_AUTH: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + GRAFANA_CLOUD_HOST: ${{ secrets.GRAFANA_CLOUD_HOST }} + AWS_REGION: ${{ secrets.QA_AWS_REGION }} + AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + + build-tests: + environment: integration + permissions: + id-token: write + contents: read + name: Build Tests Binary + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Build Tests Binary + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Build Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/build-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_download_vendor_packages_command: cd ./integration-tests && go mod download + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + go_tags: embed + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + binary_name: tests + + # End Build Test Dependencies + + # Reporting Jobs + + start-slack-thread: + name: Start Slack Thread + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + outputs: + thread_ts: ${{ steps.slack.outputs.thread_ts }} + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: [sepolia-smoke-tests, optimism-sepolia-smoke-tests, arbitrum-sepolia-smoke-tests, base-sepolia-smoke-tests, polygon-mumbai-smoke-tests, avalanche-fuji-smoke-tests, fantom-testnet-smoke-tests, celo-alfajores-smoke-tests, linea-goerli-smoke-tests, bsc-testnet-smoke-tests] + steps: + - name: Debug Result + run: echo ${{ join(needs.*.result, ',') }} + - name: Main Slack Notification + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + id: slack + with: + channel-id: ${{ secrets.QA_SLACK_CHANNEL }} + payload: | + { + "attachments": [ + { + "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "Live Smoke Test Results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", + "emoji": true + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ contains(join(needs.*.result, ','), 'failure') && 'Some tests failed, notifying <@U01Q4N37KFG>' || 'All Good!' }}" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" + } + } + ] + } + ] + } + env: + SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} + + post-test-results-to-slack: + name: Post Test Results for ${{ matrix.network }} + if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' }} + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + runs-on: ubuntu-latest + needs: start-slack-thread + strategy: + fail-fast: false + matrix: + network: [Sepolia, Optimism Sepolia, Arbitrum Sepolia, Base Sepolia, Polygon Mumbai, Avalanche Fuji, Fantom Testnet, Celo Alfajores, Linea Goerli, BSC Testnet] + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} + - name: Post Test Results + uses: ./.github/actions/notify-slack-jobs-result + with: + github_token: ${{ github.token }} + github_repository: ${{ github.repository }} + workflow_run_id: ${{ github.run_id }} + github_job_name_regex: ^${{ matrix.network }} (?.*?) Tests$ + message_title: ${{ matrix.network }} + slack_channel_id: ${{ secrets.QA_SLACK_CHANNEL }} + slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} + slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} + + # End Reporting Jobs + + sepolia-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Sepolia ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-sepolia + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "sepolia" + httpEndpoints: ${{ secrets.QA_SEPOLIA_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_SEPOLIA_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + bsc-testnet-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: BSC Testnet ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-bsc-testnet + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "bsc_testnet" + httpEndpoints: ${{ secrets.QA_BSC_TESTNET_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_BSC_TESTNET_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + optimism-sepolia-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Optimism Sepolia ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-optimism-sepolia + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "optimism_sepolia" + httpEndpoints: ${{ secrets.QA_OPTIMISM_SEPOLIA_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_OPTIMISM_SEPOLIA_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + arbitrum-sepolia-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Arbitrum Sepolia ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-arbitrum-sepolia + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "arbitrum_sepolia" + httpEndpoints: ${{ secrets.QA_ARBITRUM_SEPOLIA_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_ARBITRUM_SEPOLIA_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + base-sepolia-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + name: Base Sepolia ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-base-sepolia + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "base_sepolia" + httpEndpoints: ${{ secrets.QA_BASE_SEPOLIA_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_BASE_SEPOLIA_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + polygon-mumbai-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Polygon Mumbai ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-polygon-mumbai + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "polygon_mumbai" + httpEndpoints: ${{ secrets.QA_POLYGON_MUMBAI_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_POLYGON_MUMBAI_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + avalanche-fuji-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Avalanche Fuji ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-avalanche-fuji + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "avalanche_fuji" + httpEndpoints: ${{ secrets.QA_AVALANCHE_FUJI_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_AVALANCHE_FUJI_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + fantom-testnet-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + - product: Automation Conditional + test: TestAutomationBasic/registry_2_1_conditional + - product: Automation Log Trigger + test: TestAutomationBasic/registry_2_1_logtrigger + name: Fantom Testnet ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-fantom-testnet + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "fantom_testnet" + httpEndpoints: ${{ secrets.QA_FANTOM_TESTNET_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_FANTOM_TESTNET_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + celo-alfajores-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + name: Celo Alfajores ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-celo-alfajores + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "celo_alfajores" + httpEndpoints: ${{ secrets.QA_CELO_ALFAJORES_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_CELO_ALFAJORES_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + scroll-sepolia-smoke-tests: + if: false + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + name: Scroll Sepolia ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-scroll-sepolia + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "scroll_sepolia" + httpEndpoints: ${{ secrets.QA_SCROLL_SEPOLIA_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_SCROLL_SEPOLIA_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" + + linea-goerli-smoke-tests: + environment: integration + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + needs: [build-plugin, build-tests] + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: # https://docs.github.com/en/actions/using-jobs/using-a-matrix-for-your-jobs#example-adding-configurations + - product: OCR + test: TestOCRBasic + name: Linea Goerli ${{ matrix.product }} Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Prepare Base64 TOML override + uses: ./.github/actions/setup-create-base64-config-live-testnets + with: + runId: ${{ github.run_id }} + testLogCollect: ${{ vars.TEST_LOG_COLLECT }} + pluginImage: ${{ env.PLUGIN_IMAGE }} + pluginVersion: ${{ github.sha }} + pyroscopeServer: ${{ matrix.product.pyroscope_env == '' && '' || !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 + pyroscopeEnvironment: ci-smoke-${{ matrix.product }}-linea-goerli + pyroscopeKey: ${{ secrets.QA_PYROSCOPE_KEY }} + lokiEndpoint: ${{ secrets.LOKI_URL }} + lokiTenantId: ${{ vars.LOKI_TENANT_ID }} + lokiBasicAuth: ${{ secrets.LOKI_BASIC_AUTH }} + logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} + grafanaUrl: ${{ vars.GRAFANA_URL }} + grafanaDashboardUrl: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" + network: "linea_goerli" + httpEndpoints: ${{ secrets.QA_LINEA_GOERLI_HTTP_URLS }} + wsEndpoints: ${{ secrets.QA_LINEA_GOERLI_URLS }} + fundingKeys: ${{ secrets.QA_EVM_KEYS }} + - name: Download Tests Binary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: tests + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests-binary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: ./tests -test.timeout 30m -test.count=1 -test.parallel=1 -test.run ${{ matrix.test }} + binary_name: tests + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ github.sha }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} + dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} + artifacts_location: ./logs + token: ${{ secrets.GITHUB_TOKEN }} + cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} + cache_restore_only: "true" + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} + - name: Print failed test summary + if: always() + uses: goplugin/plugin-github-actions/plugin-testing-framework/show-test-summary@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_directory: "./" diff --git a/.github/workflows/nightlyfuzz.yml b/.github/workflows/nightlyfuzz.yml new file mode 100644 index 00000000..7becbe73 --- /dev/null +++ b/.github/workflows/nightlyfuzz.yml @@ -0,0 +1,53 @@ +name: 'nightly/tag fuzz' +on: + schedule: + # Note: The schedule event can be delayed during periods of high + # loads of GitHub Actions workflow runs. High load times include + # the start of every hour. To decrease the chance of delay, + # schedule your workflow to run at a different time of the hour. + - cron: "25 0 * * *" # at 25 past midnight every day + push: + tags: + - '*' + workflow_dispatch: null +jobs: + fuzzrun: + name: "run native fuzzers" + runs-on: "ubuntu20.04-4cores-16GB" + steps: + - name: "Checkout" + uses: "actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11" # v4.1.1 + - name: "Setup go" + uses: "actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491" # v5.0.0 + with: + go-version-file: 'go.mod' + cache: true + cache-dependency-path: 'go.sum' + - name: "Get corpus directory" + id: "get-corpus-dir" + run: echo "corpus_dir=$(go env GOCACHE)/fuzz" >> $GITHUB_OUTPUT + shell: "bash" + - name: "Restore corpus" + uses: "actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2" # v4.0.0 + id: "restore-corpus" + with: + path: "${{ steps.get-corpus-dir.outputs.corpus_dir }}" + # We need to ensure uniqueness of the key, as saving to a key more than once will fail (see Save corpus step). + # We never expect a cache hit with the key but we do expect a hit with the restore-keys prefix that is going + # to match the latest cache that has that prefix. + key: "nightlyfuzz-corpus-${{ github.run_id }}-${{ github.run_attempt }}" + restore-keys: "nightlyfuzz-corpus-" + - name: "Run native fuzzers" + # Fuzz for 1 hour + run: "cd fuzz && ./fuzz_all_native.py --seconds 3600" + - name: "Print failing testcases" + if: failure() + run: find . -type f|fgrep '/testdata/fuzz/'|while read f; do echo $f; cat $f; done + - name: "Save corpus" + uses: "actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2" # v4.0.0 + # We save also on failure, so that we can keep the valuable corpus generated that led to finding a crash. + # If the corpus gets clobbered for any reason, we can remove the offending cache from the Github UI. + if: always() + with: + path: "${{ steps.get-corpus-dir.outputs.corpus_dir }}" + key: "${{ steps.restore-corpus.outputs.cache-primary-key }}" \ No newline at end of file diff --git a/.github/workflows/on-demand-log-poller.yml b/.github/workflows/on-demand-log-poller.yml new file mode 100644 index 00000000..ad361784 --- /dev/null +++ b/.github/workflows/on-demand-log-poller.yml @@ -0,0 +1,34 @@ +name: On Demand Log Poller Consistency Test +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + +jobs: + test: + env: + REF_NAME: ${{ github.head_ref || github.ref_name }} + runs-on: ubuntu20.04-8cores-32GB + steps: + - name: Add masks and export base64 config + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ env.REF_NAME }} + - name: Setup Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version-file: "integration-tests/go.mod" + cache: true + - name: Run tests + run: | + cd integration-tests + go mod download + go test -v -timeout 5h -v -count=1 -run ^TestLogPollerFewFiltersFixedDepth$ ./smoke/log_poller_test.go diff --git a/.github/workflows/on-demand-ocr-soak-test.yml b/.github/workflows/on-demand-ocr-soak-test.yml new file mode 100644 index 00000000..1dd3b3a4 --- /dev/null +++ b/.github/workflows/on-demand-ocr-soak-test.yml @@ -0,0 +1,94 @@ +name: On Demand OCR Soak Test +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + slackMemberID: + description: Slack Member ID (Not your @) + required: true + default: U01A2B2C3D4 + type: string + +jobs: + ocr_soak_test: + name: OCR Soak Test + environment: integration + runs-on: ubuntu-latest + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + env: + PLUGIN_ENV_USER: ${{ github.actor }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_SLACK_CHANNEL }} + TEST_LOG_LEVEL: debug + REF_NAME: ${{ github.head_ref || github.ref_name }} + ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ${{ inputs.network }} OCR Soak Test + continue-on-error: true + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ env.REF_NAME }} + - name: Get Slack config and mask base64 config + run: | + SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) + echo ::add-mask::$SLACK_USER + echo SLACK_USER=$SLACK_USER >> $GITHUB_ENV + + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Parse base64 config + uses: ./.github/actions/setup-parse-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Setup Push Tag + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + - name: Build Image + uses: ./.github/actions/build-test-image + with: + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + env: + DETACH_RUNNER: true + TEST_SUITE: soak + TEST_ARGS: -test.timeout 900h -test.memprofile memprofile.out -test.cpuprofile profile.out + ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/plugin-tests:${{ github.sha }} + # We can comment these out when we have a stable soak test and aren't worried about resource consumption + TEST_UPLOAD_CPU_PROFILE: true + TEST_UPLOAD_MEM_PROFILE: true + with: + test_command_to_run: cd ./integration-tests && go test -v -count=1 -run ^TestOCRSoak$ ./soak + test_download_vendor_packages_command: make gomod + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + token: ${{ secrets.GITHUB_TOKEN }} + should_cleanup: false + go_mod_path: ./integration-tests/go.mod + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} diff --git a/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml b/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml new file mode 100644 index 00000000..21c8940a --- /dev/null +++ b/.github/workflows/on-demand-vrfv2-eth2-clients-test.yml @@ -0,0 +1,63 @@ +name: On Demand VRFV2 Smoke Test (Ethereum clients) +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + +jobs: + vrfv2_smoke_test: + name: VRFV2 Smoke Test with custom EL client client + environment: integration + runs-on: ubuntu20.04-8cores-32GB + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + env: + TEST_LOG_LEVEL: debug + REF_NAME: ${{ github.head_ref || github.ref_name }} + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Mask base64 config + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Parse base64 config + uses: ./.github/actions/setup-parse-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + echo "### Execution client used" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -run TestVRFv2Basic ./smoke/vrfv2_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: vrf-test-logs + artifacts_location: ./integration-tests/smoke/logs/ + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + should_cleanup: false + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" diff --git a/.github/workflows/on-demand-vrfv2-performance-test.yml b/.github/workflows/on-demand-vrfv2-performance-test.yml new file mode 100644 index 00000000..31f09864 --- /dev/null +++ b/.github/workflows/on-demand-vrfv2-performance-test.yml @@ -0,0 +1,84 @@ +name: On Demand VRFV2 Performance Test +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + performanceTestType: + description: Performance Test Type of test to run + type: choice + options: + - "Soak" + - "Load" + - "Stress" + - "Spike" +jobs: + vrfv2_performance_test: + name: VRFV2 Performance Test + environment: integration + runs-on: ubuntu20.04-8cores-32GB + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + env: + LOKI_URL: ${{ secrets.LOKI_URL }} + LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} + LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} + TEST_TYPE: ${{ inputs.performanceTestType }} + TEST_LOG_LEVEL: debug + REF_NAME: ${{ github.head_ref || github.ref_name }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_VRF_SLACK_CHANNEL }} + WASP_LOG_LEVEL: info + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ${{ inputs.network }} VRFV2 Performance Test + continue-on-error: true + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Mask base64 config + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Merge and export base64 config + uses: ./.github/actions/setup-merge-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -v -count=1 -timeout 24h -run TestVRFV2Performance/vrfv2_performance_test ./load/vrfv2 + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: vrf-test-logs + artifacts_location: ./integration-tests/load/vrfv2/logs/ + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + should_cleanup: false + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} diff --git a/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml b/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml new file mode 100644 index 00000000..b64eb33f --- /dev/null +++ b/.github/workflows/on-demand-vrfv2plus-eth2-clients-test.yml @@ -0,0 +1,63 @@ +name: On Demand VRFV2Plus Smoke Test (Ethereum clients) +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + +jobs: + vrfv2plus_smoke_test: + name: VRFV2Plus Smoke Test with custom EL client + environment: integration + runs-on: ubuntu20.04-8cores-32GB + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + env: + TEST_LOG_LEVEL: debug + REF_NAME: ${{ github.head_ref || github.ref_name }} + steps: + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Mask base64 config + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Parse base64 config + uses: ./.github/actions/setup-parse-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + echo "### Execution client used" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.ETH2_EL_CLIENT }}\`" >>$GITHUB_STEP_SUMMARY + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -run ^TestVRFv2Plus$/^Link_Billing$ ./smoke/vrfv2plus_test.go 2>&1 | tee /tmp/gotest.log | gotestfmt + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: vrfplus-test-logs + artifacts_location: ./integration-tests/smoke/logs/ + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + should_cleanup: false + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: "" diff --git a/.github/workflows/on-demand-vrfv2plus-performance-test.yml b/.github/workflows/on-demand-vrfv2plus-performance-test.yml new file mode 100644 index 00000000..7d08d327 --- /dev/null +++ b/.github/workflows/on-demand-vrfv2plus-performance-test.yml @@ -0,0 +1,85 @@ +name: On Demand VRFV2 Plus Performance Test +on: + workflow_dispatch: + inputs: + base64Config: + description: base64-ed config + required: true + type: string + performanceTestType: + description: Performance Test Type of test to run + type: choice + options: + - "Soak" + - "Load" + - "Stress" + - "Spike" + +jobs: + vrfv2plus_performance_test: + name: VRFV2 Plus Performance Test + environment: integration + runs-on: ubuntu20.04-8cores-32GB + permissions: + checks: write + pull-requests: write + id-token: write + contents: read + env: + LOKI_URL: ${{ secrets.LOKI_URL }} + LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} + LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} + TEST_TYPE: ${{ inputs.performanceTestType }} + TEST_LOG_LEVEL: debug + REF_NAME: ${{ github.head_ref || github.ref_name }} + SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} + SLACK_CHANNEL: ${{ secrets.QA_VRF_SLACK_CHANNEL }} + WASP_LOG_LEVEL: info + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: ${{ inputs.network }} VRFV2 Plus Performance Test + continue-on-error: true + - name: Checkout code + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + - name: Mask base64 config + run: | + BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64Config' $GITHUB_EVENT_PATH) + echo ::add-mask::$BASE64_CONFIG_OVERRIDE + echo "BASE64_CONFIG_OVERRIDE=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_ENV + - name: Merge and export base64 config + uses: ./.github/actions/setup-merge-base64-config + with: + base64Config: ${{ env.BASE64_CONFIG_OVERRIDE }} + - name: Send details to Step Summary + shell: bash + run: | + echo "### plugin image used for this test run :link:" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.PLUGIN_IMAGE }}\`" >>$GITHUB_STEP_SUMMARY + echo "### plugin-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY + echo "\`${GITHUB_SHA}\`" >>$GITHUB_STEP_SUMMARY + echo "### Networks on which test was run" >>$GITHUB_STEP_SUMMARY + echo "\`${{ env.NETWORKS }}\`" >>$GITHUB_STEP_SUMMARY + - name: Run Tests + uses: goplugin/plugin-github-actions/plugin-testing-framework/run-tests@92e0f299a87522c2a37bfc4686c4d8a96dc9d28b # v2.3.5 + with: + test_command_to_run: cd ./integration-tests && go test -v -count=1 -timeout 24h -run TestVRFV2PlusPerformance/vrfv2plus_performance_test ./load/vrfv2plus + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ env.PLUGIN_IMAGE }} + cl_image_tag: ${{ env.PLUGIN_VERSION }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: vrf-test-logs + artifacts_location: ./integration-tests/load/vrfv2plus/logs/ + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + should_cleanup: false + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} diff --git a/.github/workflows/operator-ui-cd.yml b/.github/workflows/operator-ui-cd.yml new file mode 100644 index 00000000..f344b2d3 --- /dev/null +++ b/.github/workflows/operator-ui-cd.yml @@ -0,0 +1,59 @@ +name: Operator UI CD + +on: + push: + branches: + - develop + workflow_dispatch: + schedule: + - cron: "0 */1 * * *" # Run every hour + +jobs: + update-version: + permissions: + id-token: write + name: Update Version + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Update version + id: update + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: ./operator_ui/check.sh + + - name: Assume role capable of dispatching action + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.AWS_OIDC_PLUGIN_CI_AUTO_PR_TOKEN_ISSUER_ROLE_ARN }} + role-duration-seconds: ${{ secrets.aws-role-duration-seconds }} + role-session-name: operator-ui-cd.update-version + aws-region: ${{ secrets.AWS_REGION }} + + - name: Get Github Token + id: get-gh-token + uses: goplugin/plugin-github-actions/github-app-token-issuer@chore/update-github-app-token-issuer + with: + url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} + + - name: Open PR + uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + with: + title: Update Operator UI from ${{ steps.update.outputs.current_tag }} to ${{ steps.update.outputs.latest_tag }} + token: ${{ steps.get-gh-token.outputs.access-token }} + branch: chore/update-operator-ui + commit-message: Update Operator UI from ${{ steps.update.outputs.current_tag }} to ${{ steps.update.outputs.latest_tag }} + body: ${{ steps.update.outputs.body }} + + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Update Version + continue-on-error: true diff --git a/.github/workflows/operator-ui-ci.yml b/.github/workflows/operator-ui-ci.yml new file mode 100644 index 00000000..c1689960 --- /dev/null +++ b/.github/workflows/operator-ui-ci.yml @@ -0,0 +1,45 @@ +name: Operator UI CI +on: + pull_request: + +jobs: + check-gql: + permissions: + id-token: write + contents: read + # To allow writing comments to the current PR + pull-requests: write + + name: Breaking Changes GQL Check + runs-on: ubuntu-latest + steps: + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@v1 + with: + basic-auth: ${{ secrets.GRAFANA_CLOUD_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_CLOUD_HOST }} + this-job-name: Breaking Changes GQL Check + continue-on-error: true + + - name: Assume role capable of dispatching action + uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 + with: + role-to-assume: ${{ secrets.AWS_OIDC_PLUGIN_CI_OPERATOR_UI_ACCESS_TOKEN_ISSUER_ROLE_ARN }} + role-duration-seconds: 3600 + role-session-name: operator-ui-ci.check-gql + aws-region: ${{ secrets.AWS_REGION }} + + - name: Get Github Token + id: get-gh-token + uses: goplugin/plugin-github-actions/github-app-token-issuer@main + with: + url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} + + - uses: convictional/trigger-workflow-and-wait@f69fa9eedd3c62a599220f4d5745230e237904be #v1.6.5 + with: + owner: goplugin + repo: operator-ui + github_token: ${{ steps.get-gh-token.outputs.access-token }} + workflow_file_name: plugin-ci.yml + client_payload: '{"ref": "${{ github.event.pull_request.head.sha }}"}' diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml new file mode 100644 index 00000000..8d9aface --- /dev/null +++ b/.github/workflows/pr-labels.yml @@ -0,0 +1,54 @@ +name: PR Labels + +on: + pull_request: + types: [labeled] + +jobs: + crib: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - name: Comment on PR + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const labelsToCheck = ["crib"]; + const { owner, repo, number: prNumber } = context.issue; + const { data: labels } = await github.rest.issues.listLabelsOnIssue({ owner, repo, issue_number: prNumber }); + const labelMatches = labels.some(label => labelsToCheck.includes(label.name)); + + if (!labelMatches) { + core.info("No 'crib' PR label found. Proceeding."); + return; + } + + const comment = `## CRIB Environment Details :information_source: + + CRIB activated via the 'crib' label. To destroy the environment, remove the 'crib' PR label or close the PR. + + Please review the following details: + + ### Subdomains + + _Use these subdomains to access the CRIB environment. They are prefixes to the internal base domain._ + + - crib-plugin-${prNumber}-node1. + - crib-plugin-${prNumber}-node2. + - crib-plugin-${prNumber}-node3. + - crib-plugin-${prNumber}-node4. + - crib-plugin-${prNumber}-node5. + - crib-plugin-${prNumber}-node6. + - crib-plugin-${prNumber}-geth-http. + - crib-plugin-${prNumber}-geth-ws. + - crib-plugin-${prNumber}-mockserver. + `; + + await github.rest.issues.createComment({ + owner, + repo, + issue_number: prNumber, + body: comment + }); diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml new file mode 100644 index 00000000..d825c578 --- /dev/null +++ b/.github/workflows/readme.yml @@ -0,0 +1,40 @@ +# +# This action checks PRs to see if any README* files were updated. +# If none were, it will add a message to the PR asking if it would make sense to do so. +# +name: Readme + +on: pull_request + +jobs: + readme: + # For security reasons, GITHUB_TOKEN is read-only on forks, so we cannot leave comments on PRs. + # This check skips the job if it is detected we are running on a fork. + if: ${{ github.event.pull_request.head.repo.full_name == 'goplugin/pluginv3.0' }} + name: Readme checker + runs-on: ubuntu-latest + steps: + - name: Check for changed files + id: changedfiles + uses: umani/changed-files@d7f842d11479940a6036e3aacc6d35523e6ba978 # Version 4.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + pattern: '^(?!.*node_modules).*README\.md$' + - name: Make a comment + uses: unsplash/comment-on-pr@ffe8f97ccc63ce12c3c23c6885b169db67958d3b # Version 1.3.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + if: contains(steps.changedfiles.outputs.files_updated, 'README') != true && contains(steps.changedfiles.outputs.files_created, 'README') != true + with: + msg: "I see that you haven't updated any README files. Would it make sense to do so?" + check_for_duplicate_msg: true + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Readme checker + continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/sigscanner.yml b/.github/workflows/sigscanner.yml new file mode 100644 index 00000000..76eaf4b2 --- /dev/null +++ b/.github/workflows/sigscanner.yml @@ -0,0 +1,35 @@ +name: 'SigScanner Check' + +on: + merge_group: + push: + +jobs: + sigscanner-check: + runs-on: ubuntu-latest + steps: + - name: "SigScanner checking ${{ github.sha }} by ${{ github.actor }}" + env: + API_TOKEN: ${{ secrets.SIGSCANNER_API_TOKEN }} + API_URL: ${{ secrets.SIGSCANNER_API_URL }} + run: | + echo "🔎 Checking commit ${{ github.sha }} by ${{ github.actor }} in ${{ github.repository }} - ${{ github.event_name }}" + CODE=`curl --write-out '%{http_code}' -X POST -H "Content-Type: application/json" -H "Authorization: $API_TOKEN" --silent --output /dev/null --url "$API_URL" --data '{"commit":"${{ github.sha }}","repository":"${{ github.repository }}","author":"${{ github.actor }}"}'` + echo "Received $CODE" + if [[ "$CODE" == "200" ]]; then + echo "✅ Commit is verified" + exit 0 + else + echo "❌ Commit is NOT verified" + exit 1 + fi + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: sigscanner-check + continue-on-error: true diff --git a/.github/workflows/solidity-foundry.yml b/.github/workflows/solidity-foundry.yml new file mode 100644 index 00000000..815b5130 --- /dev/null +++ b/.github/workflows/solidity-foundry.yml @@ -0,0 +1,100 @@ +name: Solidity Foundry +on: [pull_request] + +env: + FOUNDRY_PROFILE: ci + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + outputs: + changes: ${{ steps.changes.outputs.src }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: changes + with: + # Foundry is only used for Solidity v0.8 contracts, therefore we can ignore + # changes to older contracts. + filters: | + src: + - 'contracts/src/v0.8/**/*' + - 'contracts/test/v0.8/foundry/**/*' + - '.github/workflows/solidity-foundry.yml' + - 'contracts/foundry.toml' + - 'contracts/gas-snapshots/*.gas-snapshot' + - '.gitmodules' + - 'contracts/foundry-lib' + + tests: + strategy: + fail-fast: false + matrix: + product: [vrf, automation, llo-feeds, l2ep, functions, keystone, shared] + needs: [changes] + name: Foundry Tests ${{ matrix.product }} + # See https://github.com/foundry-rs/foundry/issues/3827 + runs-on: ubuntu-22.04 + + # The if statements for steps after checkout repo is workaround for + # passing required check for PRs that don't have filtered changes. + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: recursive + + # Only needed because we use the NPM versions of packages + # and not native Foundry. This is to make sure the dependencies + # stay in sync. + - name: Setup NodeJS + if: needs.changes.outputs.changes == 'true' + uses: ./.github/actions/setup-nodejs + + - name: Install Foundry + if: needs.changes.outputs.changes == 'true' + uses: foundry-rs/foundry-toolchain@v1 + with: + # Has to match the `make foundry` version. + version: nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a + + - name: Run Forge build + if: needs.changes.outputs.changes == 'true' + run: | + forge --version + forge build + id: build + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} + + - name: Run Forge tests + if: needs.changes.outputs.changes == 'true' + run: | + forge test -vvv + id: test + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} + + - name: Run Forge snapshot + if: ${{ !contains(fromJson('["vrf"]'), matrix.product) && !contains(fromJson('["automation"]'), matrix.product) && needs.changes.outputs.changes == 'true' }} + run: | + forge snapshot --nmt "testFuzz_\w{1,}?" --check gas-snapshots/${{ matrix.product }}.gas-snapshot + id: snapshot + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} + + - name: Collect Metrics + if: needs.changes.outputs.changes == 'true' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Foundry Tests ${{ matrix.product }} + continue-on-error: true diff --git a/.github/workflows/solidity-hardhat.yml b/.github/workflows/solidity-hardhat.yml new file mode 100644 index 00000000..1fcb06b7 --- /dev/null +++ b/.github/workflows/solidity-hardhat.yml @@ -0,0 +1,182 @@ +name: Solidity-Hardhat + +on: + merge_group: + push: + +env: + NODE_OPTIONS: --max_old_space_size=8192 + +defaults: + run: + shell: bash + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + outputs: + changes: ${{ steps.changes.outputs.src }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: changes + with: + filters: | + src: + - 'contracts/src/!(v0.8/(llo-feeds|keystone|ccip)/**)/**/*' + - 'contracts/test/**/*' + - 'contracts/package.json' + - 'contracts/pnpm-lock.yaml' + - 'contracts/hardhat.config.ts' + - 'contracts/ci.json' + - '.github/workflows/solidity-hardhat.yml' + + split-tests: + name: Split Solidity Tests + runs-on: ubuntu-latest + outputs: + splits: ${{ steps.split.outputs.splits }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Generate splits + id: split + uses: ./.github/actions/split-tests + with: + config: ./contracts/ci.json + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Split Solidity Tests + continue-on-error: true + + solidity-coverage-splits: + needs: [changes, split-tests] + if: needs.changes.outputs.changes == 'true' + name: Solidity Coverage ${{ matrix.split.id }} ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }} + strategy: + fail-fast: false + matrix: + split: ${{ fromJson(needs.split-tests.outputs.splits) }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + - name: Setup Hardhat + uses: ./.github/actions/setup-hardhat + with: + namespace: coverage + - name: Run coverage + env: + SPLIT: ${{ matrix.split.coverageTests }} + shell: bash + run: pnpm coverage --testfiles "$SPLIT" + working-directory: contracts + - name: Push coverage + run: ./tools/bin/codecov -f ./contracts/coverage.json + - name: Rename coverage + run: mv ./contracts/coverage.json ./contracts/coverage-${{ matrix.split.idx }}.json + - name: Upload coverage + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + name: solidity-coverage-${{ matrix.split.idx }} + path: ./contracts/coverage-${{ matrix.split.idx }}.json + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solidity Coverage ${{ matrix.split.id }} + continue-on-error: true + + solidity-coverage: + needs: [changes, solidity-coverage-splits] + if: needs.changes.outputs.changes == 'true' + name: Solidity Coverage ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + - name: Make coverage directory + run: mkdir ./contracts/coverage-reports + - name: Download coverage + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + path: ./contracts/coverage-reports + - name: Display structure of downloaded files + run: ls -R coverage-reports + working-directory: contracts + - name: Generate merged report + run: pnpm istanbul report text text-summary + working-directory: contracts + + solidity-splits: + needs: [changes, split-tests] + if: needs.changes.outputs.changes == 'true' + name: Solidity ${{ matrix.split.id }} ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }} + strategy: + fail-fast: false + matrix: + split: ${{ fromJson(needs.split-tests.outputs.splits) }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + - name: Setup Hardhat + uses: ./.github/actions/setup-hardhat + with: + namespace: coverage + - name: Run tests + env: + SPLIT: ${{ matrix.split.tests }} + working-directory: contracts + run: pnpm test -- $SPLIT + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solidity ${{ matrix.split.id }} + continue-on-error: true + + solidity: + needs: [changes, solidity-splits] + name: Solidity + runs-on: ubuntu-latest + if: always() + steps: + - run: echo 'Solidity tests finished!' + - name: Check test results + run: | + if [[ "${{ needs.changes.result }}" = "failure" || "${{ needs.solidity-splits.result }}" = "failure" ]]; then + echo "One or more changes / solidity-splits jobs failed" + exit 1 + else + echo "All test jobs passed successfully" + fi + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Solidity + continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/solidity.yml b/.github/workflows/solidity.yml new file mode 100644 index 00000000..8b8eca45 --- /dev/null +++ b/.github/workflows/solidity.yml @@ -0,0 +1,169 @@ +name: Solidity + +on: + merge_group: + push: + +defaults: + run: + shell: bash + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + outputs: + changes: ${{ steps.changes.outputs.src }} + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: changes + with: + list-files: "csv" + filters: | + src: + - 'contracts/**/*' + - '.github/workflows/solidity.yml' + - '.github/workflows/solidity-foundry.yml' + old_sol: + - 'contracts/src/v0.4/**/*' + - 'contracts/src/v0.5/**/*' + - 'contracts/src/v0.6/**/*' + - 'contracts/src/v0.7/**/*' + + - name: Fail if read-only files have changed + if: ${{ steps.changes.outputs.old_sol == 'true' }} + run: | + echo "One or more read-only Solidity file(s) has changed." + for file in ${{ steps.changes.outputs.old_sol_files }}; do + echo "$file was changed" + done + exit 1 + + prepublish-test: + needs: [changes] + if: needs.changes.outputs.changes == 'true' + name: Prepublish Test ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + - name: Run Prepublish test + working-directory: contracts + run: pnpm prepublishOnly + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Prepublish Test + continue-on-error: true + + native-compile: + needs: [changes] + if: needs.changes.outputs.changes == 'true' + name: Native Compilation ${{ fromJSON('["(skipped)", ""]')[needs.changes.outputs.changes == 'true'] }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Checkout diff-so-fancy + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: so-fancy/diff-so-fancy + ref: a673cb4d2707f64d92b86498a2f5f71c8e2643d5 # v1.4.3 + path: diff-so-fancy + - name: Install diff-so-fancy + run: echo "$GITHUB_WORKSPACE/diff-so-fancy" >> $GITHUB_PATH + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + with: + prod: "true" + - name: Setup Go + uses: ./.github/actions/setup-go + - name: Run native compile and generate wrappers + run: make wrappers-all + working-directory: ./contracts + - name: Verify local solc binaries + run: ./tools/ci/check_solc_hashes + - name: Check if Go solidity wrappers are updated + if: ${{ needs.changes.outputs.changes == 'true' }} + run: git diff --minimal --color --exit-code | diff-so-fancy + - name: Comment on fix instructions + env: + GITHUB_TOKEN: ${{ github.token }} + if: ${{ failure() }} + run: gh pr comment -b 'Go solidity wrappers are out-of-date, regenerate them via the `make wrappers-all` command' + - name: Collect Metrics + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Native Compilation + continue-on-error: true + + # The if statements for steps after checkout repo is a workaround for + # passing required check for PRs that don't have filtered changes. + lint: + defaults: + run: + working-directory: contracts + needs: [changes] + name: Solidity Lint + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + if: needs.changes.outputs.changes == 'true' + uses: ./.github/actions/setup-nodejs + - name: Run pnpm lint + if: needs.changes.outputs.changes == 'true' + run: pnpm lint + - name: Run solhint + if: needs.changes.outputs.changes == 'true' + run: pnpm solhint + - name: Collect Metrics + if: needs.changes.outputs.changes == 'true' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Lint + continue-on-error: true + + prettier: + defaults: + run: + working-directory: contracts + needs: [changes] + name: Prettier Formatting + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup NodeJS + if: needs.changes.outputs.changes == 'true' + uses: ./.github/actions/setup-nodejs + - name: Run prettier check + if: needs.changes.outputs.changes == 'true' + run: pnpm prettier:check + - name: Collect Metrics + if: needs.changes.outputs.changes == 'true' + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Prettier Formatting + continue-on-error: true diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000..8eb95f41 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,26 @@ +# Workflow is triggered daily midnight UTC +# A PR with more than 60 days of inactivity will be marked as stale +# A PR that's stale for more than 7 days will be automatically closed +# Issues are exempt from auto marking as stale but issues with manually added 'stale' label are eligible for auto closure after 7 days. +# PRs with assignees are exempt from auto stale marking, it's the responsibility of the assignee to get the PR progressed either with review/merge or closure. +name: Manage stale Issues and PRs + +on: + schedule: + - cron: "0 0 * * *" # Will be triggered every day at midnight UTC + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + exempt-all-pr-assignees: true + stale-pr-message: 'This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days.' + days-before-issue-stale: -1 # disables marking issues as stale automatically. Issues can still be marked as stale manually, in which the closure policy applies. diff --git a/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml b/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml new file mode 100644 index 00000000..552b7545 --- /dev/null +++ b/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml @@ -0,0 +1,39 @@ +name: Sync develop from goplugin/pluginv3.0 + +on: + schedule: + # * is a special character in YAML so you have to quote this string + - cron: '*/30 * * * *' + +jobs: + sync: + name: Sync + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: develop + if: env.GITHUB_REPOSITORY != 'goplugin/pluginv3.0' + - name: Sync + run: | + git remote add upstream "https://github.com/goplugin/pluginv3.0.git" + COMMIT_HASH_UPSTREAM=$(git ls-remote upstream develop | grep -P '^[0-9a-f]{40}\trefs/heads/develop$' | cut -f 1) + COMMIT_HASH_ORIGIN=$(git ls-remote origin develop | grep -P '^[0-9a-f]{40}\trefs/heads/develop$' | cut -f 1) + if [ "$COMMIT_HASH_UPSTREAM" = "$COMMIT_HASH_ORIGIN" ]; then + echo "Both remotes have develop at $COMMIT_HASH_UPSTREAM. No need to sync." + else + echo "upstream has develop at $COMMIT_HASH_UPSTREAM. origin has develop at $COMMIT_HASH_ORIGIN. Syncing..." + git fetch upstream + git push origin upstream/develop:develop + fi + if: env.GITHUB_REPOSITORY != 'goplugin/pluginv3.0' + - name: Collect Metrics + if: always() + id: collect-gha-metrics + uses: goplugin/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 + with: + org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} + basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} + hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} + this-job-name: Sync + continue-on-error: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..2bdfd2c3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,94 @@ +# dependencies +node_modules/ +tmp/ +.pnp +.pnp.js +tools/bin/abigen + +/plugin +core/plugin + +# SQLite +tools/clroot/db.sqlite3-shm +tools/clroot/db.sqlite3-wal + +# Tooling caches +*.tsbuildinfo +.eslintcache + +# Log files +*.log + +# misc +.DS_Store +.envrc +.env* +!.github/actions/setup-postgres/.env +.direnv +.idea +.vscode/ +*.iml +debug.env + +# codeship +*.aes +dockercfg +env +credentials.env +gcr_creds.env + +# DB backups + +cl_backup_*.tar.gz + +# Test artifacts +core/cmd/TestClient_ImportExportP2PKeyBundle_test_key.json +output.txt +race.* +golangci-lint-output.txt +/golangci-lint/ + +# DB state +./db/ +.s.PGSQL.5432.lock + +# can be left behind by tests +core/cmd/vrfkey1 + +# Integration Tests +integration-tests/**/logs/ +tests-*.xml +*.test +tmp-manifest-*.yaml +ztarrepo.tar.gz +**/test-ledger/* +__debug_bin* + +# goreleaser builds +cosign.* +dist/ +MacOSX* + +# Test & linter reports +*report.xml +*report.json +*.out + +contracts/yarn.lock + +# Ignore DevSpace cache and log folder +.devspace/ +go.work* + +# This sometimes shows up for some reason +tools/flakeytests/coverage.txt + +# Integration tests create these files +.test_summary/ +.run.id + +# Fuzz tests can create these files +**/testdata/fuzz/* + +# Runtime test configuration that might contain secrets +overrides.toml \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..556d344c --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "contracts/foundry-lib/forge-std"] + path = contracts/foundry-lib/forge-std + url = https://github.com/foundry-rs/forge-std diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..18c2b3ab --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,86 @@ +run: + timeout: 15m0s +linters: + enable: + - exhaustive + - exportloopref + - revive + - goimports + - gosec + - misspell + - rowserrcheck + - errorlint + - unconvert + - sqlclosecheck + - noctx +linters-settings: + exhaustive: + default-signifies-exhaustive: true + goimports: + local-prefixes: github.com/goplugin/pluginv3.0 + golint: + min-confidence: 1.0 + gosec: + excludes: + - G101 + - G104 + # - G204 + # - G304 + # - G404 + govet: + # report about shadowed variables + check-shadowing: true + errorlint: + # Allow formatting of errors without %w + errorf: false + revive: + confidence: 0.8 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: if-return + - name: increment-decrement + # - name: var-naming + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + # - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + # - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + - name: waitgroup-by-value + - name: unconditional-recursion + - name: struct-tag + # - name: string-format + - name: string-of-int + # - name: range-val-address + - name: range-val-in-closure + - name: modifies-value-receiver + - name: modifies-parameter + - name: identical-branches + - name: get-return + # - name: flag-parameter + - name: early-return + - name: defer + - name: constant-logical-expr + # - name: confusing-naming + # - name: confusing-results + - name: bool-literal-in-expr + - name: atomic +issues: + exclude-rules: + - path: test + text: "^G404:" + linters: + - gosec diff --git a/.goreleaser.develop.yaml b/.goreleaser.develop.yaml new file mode 100644 index 00000000..82028263 --- /dev/null +++ b/.goreleaser.develop.yaml @@ -0,0 +1,194 @@ +## goreleaser <1.14.0 +project_name: plugin + +env: + - ZIG_EXEC={{ if index .Env "ZIG_EXEC" }}{{ .Env.ZIG_EXEC }}{{ else }}zig{{ end }} + - IMAGE_PREFIX={{ if index .Env "IMAGE_PREFIX" }}{{ .Env.IMAGE_PREFIX }}{{ else }}localhost:5001{{ end }} + - IMAGE_LABEL_DESCRIPTION="node of the decentralized oracle network, bridging on and off-chain computation" + - IMAGE_LABEL_LICENSES="MIT" + - IMAGE_LABEL_SOURCE="https://github.com/goplugin/{{ .ProjectName }}" + +before: + hooks: + - ./tools/bin/goreleaser_utils before_hook + +# See https://goreleaser.com/customization/build/ +builds: + - binary: plugin + id: linux-arm64 + goos: + - linux + goarch: + - arm64 + hooks: + post: ./tools/bin/goreleaser_utils build_post_hook {{ dir .Path }} {{ .Os }} {{ .Arch }} + env: + - CGO_ENABLED=1 + - CC=$ZIG_EXEC cc -target aarch64-linux-gnu + - CCX=$ZIG_EXEC c++ -target aarch64-linux-gnu + flags: + - -trimpath + - -buildmode=pie + ldflags: + - -s -w -r=$ORIGIN/libs + - -X github.com/goplugin/pluginv3.0/v2/core/static.Version={{ .Env.PLUGIN_VERSION }} + - -X github.com/goplugin/pluginv3.0/v2/core/static.Sha={{ .FullCommit }} + - binary: plugin + id: linux-amd64 + goos: + - linux + goarch: + - amd64 + hooks: + post: ./tools/bin/goreleaser_utils build_post_hook {{ dir .Path }} {{ .Os }} {{ .Arch }} + env: + - CGO_ENABLED=1 + - CC=$ZIG_EXEC cc -target x86_64-linux-gnu + - CCX=$ZIG_EXEC c++ -target x86_64-linux-gnu + flags: + - -trimpath + - -buildmode=pie + ldflags: + - -s -w -r=$ORIGIN/libs + - -X github.com/goplugin/pluginv3.0/v2/core/static.Version={{ .Env.PLUGIN_VERSION }} + - -X github.com/goplugin/pluginv3.0/v2/core/static.Sha={{ .FullCommit }} + +# See https://goreleaser.com/customization/docker/ +dockers: + - id: root-linux-amd64 + dockerfile: core/plugin.goreleaser.Dockerfile + use: buildx + goos: linux + goarch: amd64 + extra_files: + - tmp/linux_amd64/libs + build_flag_templates: + - "--platform=linux/amd64" + - "--pull" + - "--build-arg=COMMIT_SHA={{ .FullCommit }}" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.description={{ .Env.IMAGE_LABEL_DESCRIPTION }}" + - "--label=org.opencontainers.image.licenses={{ .Env.IMAGE_LABEL_LICENSES }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.source={{ .Env.IMAGE_LABEL_SOURCE }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.version={{ .Env.PLUGIN_VERSION }}" + - "--label=org.opencontainers.image.url={{ .Env.IMAGE_LABEL_SOURCE }}" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-root-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-root-amd64" + - id: root-linux-arm64 + dockerfile: core/plugin.goreleaser.Dockerfile + use: buildx + goos: linux + goarch: arm64 + extra_files: + - tmp/linux_arm64/libs + build_flag_templates: + - "--platform=linux/arm64" + - "--pull" + - "--build-arg=COMMIT_SHA={{ .FullCommit }}" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.description={{ .Env.IMAGE_LABEL_DESCRIPTION }}" + - "--label=org.opencontainers.image.licenses={{ .Env.IMAGE_LABEL_LICENSES }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.source={{ .Env.IMAGE_LABEL_SOURCE }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.version={{ .Env.PLUGIN_VERSION }}" + - "--label=org.opencontainers.image.url={{ .Env.IMAGE_LABEL_SOURCE }}" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-root-arm64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-root-arm64" + - id: linux-amd64 + dockerfile: core/plugin.goreleaser.Dockerfile + use: buildx + goos: linux + goarch: amd64 + extra_files: + - tmp/linux_amd64/libs + build_flag_templates: + - "--platform=linux/amd64" + - "--pull" + - "--build-arg=PLUGIN_USER=plugin" + - "--build-arg=COMMIT_SHA={{ .FullCommit }}" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.description={{ .Env.IMAGE_LABEL_DESCRIPTION }}" + - "--label=org.opencontainers.image.licenses={{ .Env.IMAGE_LABEL_LICENSES }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.source={{ .Env.IMAGE_LABEL_SOURCE }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.version={{ .Env.PLUGIN_VERSION }}" + - "--label=org.opencontainers.image.url={{ .Env.IMAGE_LABEL_SOURCE }}" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-amd64" + - id: linux-arm64 + dockerfile: core/plugin.goreleaser.Dockerfile + use: buildx + goos: linux + goarch: arm64 + extra_files: + - tmp/linux_arm64/libs + build_flag_templates: + - "--platform=linux/arm64" + - "--pull" + - "--build-arg=PLUGIN_USER=plugin" + - "--build-arg=COMMIT_SHA={{ .FullCommit }}" + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.description={{ .Env.IMAGE_LABEL_DESCRIPTION }}" + - "--label=org.opencontainers.image.licenses={{ .Env.IMAGE_LABEL_LICENSES }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.source={{ .Env.IMAGE_LABEL_SOURCE }}" + - "--label=org.opencontainers.image.title={{ .ProjectName }}" + - "--label=org.opencontainers.image.version={{ .Env.PLUGIN_VERSION }}" + - "--label=org.opencontainers.image.url={{ .Env.IMAGE_LABEL_SOURCE }}" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-arm64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-arm64" + +# See https://goreleaser.com/customization/docker_manifest/ +docker_manifests: + - name_template: "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-root" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-root-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-root-arm64" + - name_template: "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-root" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-root-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-root-arm64" + - name_template: "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:develop-arm64" + - name_template: "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}" + image_templates: + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-amd64" + - "{{ .Env.IMAGE_PREFIX }}/{{ .ProjectName }}-develop:sha-{{ .ShortCommit }}-arm64" + +# See https://goreleaser.com/customization/docker_sign/ +docker_signs: + - artifacts: all + stdin: "{{ .Env.COSIGN_PASSWORD }}" + +archives: + - rlcp: true + files: + - src: tmp/{{ .Os }}_{{ .Arch }}/libs/* + dst: libs + strip_parent: true + +checksum: + name_template: "checksums.txt" + +snapshot: + name_template: "{{ .Env.PLUGIN_VERSION }}-{{ .ShortCommit }}" + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" +# modelines, feel free to remove those if you don't want/use them: +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +# vim: set ts=2 sw=2 tw=0 fo=cnqoj diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 00000000..331655e7 --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1 @@ +# empty config to prevent info/debug level log spam diff --git a/.nancy-ignore b/.nancy-ignore new file mode 100644 index 00000000..eb019c8c --- /dev/null +++ b/.nancy-ignore @@ -0,0 +1,67 @@ +# Introduced by golang/github.com/coreos/etcd@3.3.13, which is a dependency of a dependency for the latest version of +# Viper. Not much we can do until they update themselves. +bba60acb-c7b5-4621-af69-f4085a8301d0 +d373dc3f-aa88-483b-b501-20fe5382cc80 +5def94e5-b89c-4a94-b9c6-ae0e120784c2 + +# To get more detail locally run `go list -json -m all | nancy sleuth` +# dcf6da03-f9dd-4a4e-b792-0262de36a0b1 is because of gogo/protobuf@1.3.1 +# which is used by go-libp2p-core, need them to upgrade to 1.3.2 before we can remove it. +dcf6da03-f9dd-4a4e-b792-0262de36a0b1 + +# Introduced by golang/k8s.io/kubernetes@1.13.0, which is a dependency of a dependency of HELM, that the plugin-testing-framework +# utilizes. It is not included in the binary. It is used solely to create and interact with ephemeral test environments. +b4583f58-ba0b-49a6-9e68-389231015280 +ba07f410-6310-431b-a413-32735d54841c +b187d3db-eedf-47dd-b446-8b07446c65ee +cc851a2f-70e9-423c-b5cb-db88d3849ac8 +2ae7c596-34f7-4bbe-84e2-f61b36537a39 +3a0ce247-0b82-418a-b816-35b6438800ab +69fe99cf-3318-4f35-a670-cf5ab2bd5775 +7b51402c-df4c-44b0-905c-8c66c96bdd74 +ed41942f-c3a9-4391-8a1a-ffb415b33d72 +61569cd3-36ef-43ac-b70b-9c2200fe05b8 +5e882795-abf5-44d8-8908-4283343e0050 +a4cc9e4c-2218-44c6-877f-8f19f3cee9fe +dec168d7-4b8e-4a62-85d7-954b06bcc4d5 +7aa92d05-1ca5-4428-a683-ba3aee9daee6 +74215f7a-f5b5-4bea-b6a7-a20a6ee244d3 +2aaf7ae8-c181-453c-a53a-ad18f4b30a56 +7a182d40-9217-4391-85e9-6c44219efa7a +3f9f4830-26e4-4acd-915b-448d38934e43 +abf9a6a0-789b-4817-b137-3e349568c992 +8817a9cb-1215-425f-909b-8074e7195789 +b73f09dd-3602-4a7a-ac38-063ef40b1a82 +42a87f3d-7d93-43a5-968c-9c077c44688c +89d4f32c-efa6-4060-a367-169e3de199d4 +b0a2b826-c1ba-43a3-94c1-726880c1535d + +# Skip indirect/transitive dependencies where code path not hit, or affected library features are not utilized. +CVE-2022-31030 +CVE-2022-29153 +CVE-2022-24687 +CVE-2022-29153 +CVE-2022-24687 +sonatype-2019-0772 +sonatype-2021-0853 +sonatype-2022-3945 +CVE-2021-42576 +CVE-2022-29162 +CVE-2022-21221 +sonatype-2021-0456 +CVE-2021-3127 +CVE-2022-24450 +CVE-2022-29946 +CVE-2022-26652 +CVE-2022-28357 +CVE-2021-23772 +sonatype-2021-0598 +sonatype-2021-1485 +CVE-2022-23328 +sonatype-2021-0076 +CVE-2022-37450 +sonatype-2021-4899 +sonatype-2020-0722 +CVE-2021-41803 # consul +CVE-2022-44797 +CVE-2022-39389 # golang/github.com/btcsuite/btcd@v0.22.1 diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 00000000..f274881e --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +v16.16.0 diff --git a/.tarignore b/.tarignore new file mode 100644 index 00000000..34ed65ec --- /dev/null +++ b/.tarignore @@ -0,0 +1,19 @@ +ztarrepo.tar.gz +node_modules +.github +.tarignore +.direnv +.idea +.vscode +.DS_Store +.envrc +abigen + +dockercfg +env +credentials.env +gcr_creds.env +tests-*.xml +*.test +tmp-manifest-*.yaml +logs diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 00000000..e52a1f25 --- /dev/null +++ b/.tool-versions @@ -0,0 +1,8 @@ +golang 1.21.5 +mockery 2.38.0 +nodejs 16.16.0 +postgres 13.3 +helm 3.10.3 +zig 0.10.1 +golangci-lint 1.55.2 +protoc 25.1 diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..128085cc --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,118 @@ +# CODEOWNERS Best Practices +# 1. Per Github docs: "Order is important; the last matching pattern takes the most precedence." +# Please define less specific codeowner paths before more specific codeowner paths in order for the more specific rule to have priority + +# Core +/core @goplugin/foundations + +# Chains +/common @goplugin/integrations +/core/chains/ @goplugin/integrations + +# Services +/core/services/directrequest @goplugin/keepers +/core/services/feeds @goplugin/FMS + +# To be deprecated in Plugin V3 +/core/services/fluxmonitorv2 @goplugin/foundations +/core/services/job @goplugin/ccip +/core/services/keystore @goplugin/keepers +/core/services/ocr* @goplugin/foundations +/core/services/periodicbackup @goplugin/foundations +/core/services/pg @goplugin/foundations @samsondav +/core/services/pipeline @goplugin/foundations @goplugin/integrations +/core/services/telemetry @goplugin/realtime +/core/services/relay/evm/mercury @goplugin/mercury-team +/core/services/webhook @goplugin/foundations @goplugin/integrations +/core/services/llo @goplugin/mercury-team + +# VRF-related services +/core/services/vrf @goplugin/vrf-team +/core/services/blockhashstore @goplugin/vrf-team +/core/services/blockheaderfeeder @goplugin/vrf-team +/core/services/pipeline/task.vrf.go @goplugin/vrf-team +/core/services/pipeline/task.vrfv2.go @goplugin/vrf-team +/core/services/pipeline/task.vrfv2plus.go @goplugin/vrf-team +/core/services/ocr2/plugins/dkg @goplugin/vrf-team +/core/services/ocr2/plugins/ocr2vrf @goplugin/vrf-team + +# Keeper/Automation-related services +/core/services/keeper @goplugin/keepers +/core/services/ocr2/plugins/ocr2keeper @goplugin/keepers + +# Plugin Functions +core/services/functions @goplugin/functions +core/services/ocr2/plugins/functions @goplugin/functions +core/services/s4 @goplugin/functions +core/service/ocr2/plugins/s4 @goplugin/functions +core/services/ocr2/plugins/threshold @goplugin/functions +core/services/relay/evm/functions @goplugin/functions +core/services/relay/evm/functions @goplugin/functions +core/scripts/functions @goplugin/functions +core/scripts/gateway @goplugin/functions + +# Contracts +/contracts/ @RensR + +# First we match on project names to catch files like the compilation scripts, +# gas snapshots and other files not places in the project directories. +# This could give some false positives, so afterwards we match on the project directories +# to ensure the entire directory is always owned by the correct team. + +/contracts/**/*keeper* @goplugin/keepers +/contracts/**/*upkeep* @goplugin/keepers +/contracts/**/*automation* @goplugin/keepers +/contracts/**/*functions* @goplugin/functions +/contracts/**/*llo-feeds* @smartcontrackit/mercury-team +/contracts/**/*vrf* @goplugin/vrf-team +/contracts/**/*l2ep* @goplugin/integrations + +/contracts/src/v0.8/automation @goplugin/keepers +/contracts/src/v0.8/functions @goplugin/functions +# TODO: interfaces folder, folder should be removed and files moved to the correct folders +/contracts/src/v0.8/l2ep @chris-de-leon-cll +/contracts/src/v0.8/llo-feeds @goplugin/mercury-team +# TODO: mocks folder, folder should be removed and files moved to the correct folders +/contracts/src/v0.8/operatorforwarder @goplugin/foundations +/contracts/src/v0.8/shared @RensR +# TODO: tests folder, folder should be removed and files moved to the correct folders +# TODO: transmission folder, owner should be found +/contracts/src/v0.8/vrf @goplugin/vrf-team + + + +# At the end, match any files missed by the patterns above +/contracts/scripts/native_solc_compile_all_events_mock @goplugin/functions + + +# Tests +/integration-tests/ @goplugin/test-tooling-team +/integration-tests/**/*keeper* @goplugin/keepers +/integration-tests/**/*automation* @goplugin/keepers + +# CI/CD +/.github/** @goplugin/releng @goplugin/test-tooling-team +/.github/workflows/performance-tests.yml @goplugin/test-tooling-team +/.github/workflows/automation-ondemand-tests.yml @goplugin/keepers +/.github/workflows/automation-benchmark-tests.yml @goplugin/keepers +/.github/workflows/automation-load-tests.yml @goplugin/keepers +/.github/workflows/automation-nightly-tests.yml @goplugin/keepers + +/core/plugin.Dockerfile @goplugin/prodsec-public + +# Dependencies +contracts/scripts/requirements.txt @goplugin/prodsec-public +.tool-versions @goplugin/prodsec-public +.nvmrc @goplugin/prodsec-public +contracts/package.json @goplugin/prodsec-public +contracts/pnpm.lock @goplugin/prodsec-public +go.mod @goplugin/prodsec-public +go.sum @goplugin/prodsec-public +integration-tests/go.mod @goplugin/prodsec-public +integration-tests/go.sum @goplugin/prodsec-public +flake.nix @goplugin/prodsec-public +flake.lock @goplugin/prodsec-public + +# Config +./docs/CONFIG.md @goplugin/foundations @goplugin/devrel +./internal/config/docs.toml @goplugin/foundations @goplugin/devrel diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 00000000..8e2f0f56 --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,174 @@ +.DEFAULT_GOAL := plugin + +COMMIT_SHA ?= $(shell git rev-parse HEAD) +VERSION = $(shell cat VERSION) +GO_LDFLAGS := $(shell tools/bin/ldflags) +GOFLAGS = -ldflags "$(GO_LDFLAGS)" + +.PHONY: install +install: install-plugin-autoinstall ## Install plugin and all its dependencies. + +.PHONY: install-git-hooks +install-git-hooks: ## Install git hooks. + git config core.hooksPath .githooks + +.PHONY: install-plugin-autoinstall +install-plugin-autoinstall: | pnpmdep gomod install-plugin ## Autoinstall plugin. + +.PHONY: pnpmdep +pnpmdep: ## Install solidity contract dependencies through pnpm + (cd contracts && pnpm i) + +.PHONY: gomod +gomod: ## Ensure plugin's go dependencies are installed. + @if [ -z "`which gencodec`" ]; then \ + go install github.com/goplugin/gencodec@latest; \ + fi || true + go mod download + +.PHONY: gomodtidy +gomodtidy: ## Run go mod tidy on all modules. + go mod tidy + cd ./core/scripts && go mod tidy + cd ./integration-tests && go mod tidy + +.PHONY: godoc +godoc: ## Install and run godoc + go install golang.org/x/tools/cmd/godoc@latest + # http://localhost:6060/pkg/github.com/goplugin/pluginv3.0/v2/ + godoc -http=:6060 + +.PHONY: install-plugin +install-plugin: operator-ui ## Install the plugin binary. + go install $(GOFLAGS) . + +.PHONY: plugin +plugin: ## Build the plugin binary. + go build $(GOFLAGS) . + +.PHONY: plugin-dev +plugin-dev: ## Build a dev build of plugin binary. + go build -tags dev $(GOFLAGS) . + +.PHONY: plugin-test +plugin-test: ## Build a test build of plugin binary. + go build $(GOFLAGS) . + +.PHONY: plugin-local-start +plugin-local-start: + ./plugin -c /etc/node-secrets-volume/default.toml -c /etc/node-secrets-volume/overrides.toml -secrets /etc/node-secrets-volume/secrets.toml node start -d -p /etc/node-secrets-volume/node-password -a /etc/node-secrets-volume/apicredentials --vrfpassword=/etc/node-secrets-volume/apicredentials + +.PHONY: install-medianpoc +install-medianpoc: ## Build & install the plugin-medianpoc binary. + go install $(GOFLAGS) ./plugins/cmd/plugin-medianpoc + +.PHONY: install-ocr3-capability +install-ocr3-capability: ## Build & install the plugin-ocr3-capability binary. + go install $(GOFLAGS) ./plugins/cmd/plugin-ocr3-capability + +.PHONY: docker ## Build the plugin docker image +docker: + docker buildx build \ + --build-arg COMMIT_SHA=$(COMMIT_SHA) \ + -f core/plugin.Dockerfile . + +.PHONY: docker-plugins ## Build the plugin-plugins docker image +docker-plugins: + docker buildx build \ + --build-arg COMMIT_SHA=$(COMMIT_SHA) \ + -f plugins/plugin.Dockerfile . + +.PHONY: operator-ui +operator-ui: ## Fetch the frontend + go generate ./core/web + +.PHONY: abigen +abigen: ## Build & install abigen. + ./tools/bin/build_abigen + +.PHONY: generate +generate: abigen codecgen mockery protoc ## Execute all go:generate commands. + go generate -x ./... + +.PHONY: testscripts +testscripts: plugin-test ## Install and run testscript against testdata/scripts/* files. + go install github.com/rogpeppe/go-internal/cmd/testscript@latest + go run ./tools/txtar/cmd/lstxtardirs -recurse=true | PATH="$(CURDIR):${PATH}" xargs -I % \ + sh -c 'testscript -e COMMIT_SHA=$(COMMIT_SHA) -e HOME="$(TMPDIR)/home" -e VERSION=$(VERSION) $(TS_FLAGS) %/*.txtar' + +.PHONY: testscripts-update +testscripts-update: ## Update testdata/scripts/* files via testscript. + make testscripts TS_FLAGS="-u" + +.PHONY: testdb +testdb: ## Prepares the test database. + go run . local db preparetest + +.PHONY: testdb +testdb-user-only: ## Prepares the test database with user only. + go run . local db preparetest --user-only + +# Format for CI +.PHONY: presubmit +presubmit: ## Format go files and imports. + goimports -w . + gofmt -w . + go mod tidy + +.PHONY: mockery +mockery: $(mockery) ## Install mockery. + go install github.com/vektra/mockery/v2@v2.38.0 + +.PHONY: codecgen +codecgen: $(codecgen) ## Install codecgen + go install github.com/ugorji/go/codec/codecgen@v1.2.10 + +.PHONY: protoc +protoc: ## Install protoc + core/scripts/install-protoc.sh 25.1 / + go install google.golang.org/protobuf/cmd/protoc-gen-go@`go list -m -json google.golang.org/protobuf | jq -r .Version` + +.PHONY: telemetry-protobuf +telemetry-protobuf: $(telemetry-protobuf) ## Generate telemetry protocol buffers. + protoc \ + --go_out=. \ + --go_opt=paths=source_relative \ + --go-wsrpc_out=. \ + --go-wsrpc_opt=paths=source_relative \ + ./core/services/synchronization/telem/*.proto + +.PHONY: config-docs +config-docs: ## Generate core node configuration documentation + go run ./core/config/docs/cmd/generate -o ./docs/ + +.PHONY: golangci-lint +golangci-lint: ## Run golangci-lint for all issues. + [ -d "./golangci-lint" ] || mkdir ./golangci-lint && \ + docker run --rm -v $(shell pwd):/app -w /app golangci/golangci-lint:v1.55.2 golangci-lint run --max-issues-per-linter 0 --max-same-issues 0 > ./golangci-lint/$(shell date +%Y-%m-%d_%H:%M:%S).txt + + +GORELEASER_CONFIG ?= .goreleaser.yaml + +.PHONY: goreleaser-dev-build +goreleaser-dev-build: ## Run goreleaser snapshot build + ./tools/bin/goreleaser_wrapper build --snapshot --rm-dist --config ${GORELEASER_CONFIG} + +.PHONY: goreleaser-dev-release +goreleaser-dev-release: ## run goreleaser snapshot release + ./tools/bin/goreleaser_wrapper release --snapshot --rm-dist --config ${GORELEASER_CONFIG} + +.PHONY: modgraph +modgraph: + ./tools/bin/modgraph > go.md + +help: + @echo "" + @echo " .__ .__ .__ .__ __" + @echo " ____ | |__ _____ |__| ____ | | |__| ____ | | __" + @echo " _/ ___\| | \\\\\\__ \ | |/ \| | | |/ \| |/ /" + @echo " \ \___| Y \/ __ \| | | \ |_| | | \ <" + @echo " \___ >___| (____ /__|___| /____/__|___| /__|_ \\" + @echo " \/ \/ \/ \/ \/ \/" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | \ + awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..641d2b92 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 SmartContract Plugin, Ltd. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..d73b6776 --- /dev/null +++ b/README.md @@ -0,0 +1,304 @@ +
+

+ +Plugin logo + +

+
+ +[![GitHub tag (latest SemVer)](https://img.shields.io/github/v/tag/goplugin/pluginv3.0?style=flat-square)](https://hub.docker.com/r/smartcontract/plugin/tags) +[![GitHub license](https://img.shields.io/github/license/goplugin/pluginv3.0?style=flat-square)](https://github.com/goplugin/pluginv3.0/blob/master/LICENSE) +[![GitHub workflow changelog](https://img.shields.io/github/workflow/status/goplugin/pluginv3.0/Changelog?style=flat-square&label=github-actions)](https://github.com/goplugin/pluginv3.0/actions?query=workflow%3AChangelog) +[![GitHub contributors](https://img.shields.io/github/contributors-anon/goplugin/pluginv3.0?style=flat-square)](https://github.com/goplugin/pluginv3.0/graphs/contributors) +[![GitHub commit activity](https://img.shields.io/github/commit-activity/y/goplugin/pluginv3.0?style=flat-square)](https://github.com/goplugin/pluginv3.0/commits/master) +[![Official documentation](https://img.shields.io/static/v1?label=docs&message=latest&color=blue)](https://docs.chain.link/) + +[Plugin](https://chain.link/) expands the capabilities of smart contracts by enabling access to real-world data and off-chain computation while maintaining the security and reliability guarantees inherent to blockchain technology. + +This repo contains the Plugin core node and contracts. The core node is the bundled binary available to be run by node operators participating in a [decentralized oracle network](https://link.smartcontract.com/whitepaper). +All major release versions have pre-built docker images available for download from the [Plugin dockerhub](https://hub.docker.com/r/smartcontract/plugin/tags). +If you are interested in contributing please see our [contribution guidelines](./docs/CONTRIBUTING.md). +If you are here to report a bug or request a feature, please [check currently open Issues](https://github.com/goplugin/pluginv3.0/issues). +For more information about how to get started with Plugin, check our [official documentation](https://docs.chain.link/). +Resources for Solidity developers can be found in the [Plugin Hardhat Box](https://github.com/goplugin/hardhat-starter-kit). + +## Community + +Plugin has an active and ever growing community. [Discord](https://discordapp.com/invite/aSK4zew) +is the primary communication channel used for day to day communication, +answering development questions, and aggregating Plugin related content. Take +a look at the [community docs](./docs/COMMUNITY.md) for more information +regarding Plugin social accounts, news, and networking. + +## Build Plugin + +1. [Install Go 1.21.1](https://golang.org/doc/install), and add your GOPATH's [bin directory to your PATH](https://golang.org/doc/code.html#GOPATH) + - Example Path for macOS `export PATH=$GOPATH/bin:$PATH` & `export GOPATH=/Users/$USER/go` +2. Install [NodeJS v16](https://nodejs.org/en/download/package-manager/) & [pnpm via npm](https://pnpm.io/installation#using-npm). + - It might be easier long term to use [nvm](https://nodejs.org/en/download/package-manager/#nvm) to switch between node versions for different projects. For example, assuming $NODE_VERSION was set to a valid version of NodeJS, you could run: `nvm install $NODE_VERSION && nvm use $NODE_VERSION` +3. Install [Postgres (>= 11.x and <= 15.x)](https://wiki.postgresql.org/wiki/Detailed_installation_guides). + - You should [configure Postgres](https://www.postgresql.org/docs/12/ssl-tcp.html) to use SSL connection (or for testing you can set `?sslmode=disable` in your Postgres query string). +4. Ensure you have Python 3 installed (this is required by [solc-select](https://github.com/crytic/solc-select) which is needed to compile solidity contracts) +5. Download Plugin: `git clone https://github.com/goplugin/pluginv3.0 && cd plugin` +6. Build and install Plugin: `make install` +7. Run the node: `plugin help` + +For the latest information on setting up a development environment, see the [Development Setup Guide](https://github.com/goplugin/pluginv3.0/wiki/Development-Setup-Guide). + +### Apple Silicon - ARM64 + +Native builds on the Apple Silicon should work out of the box, but the Docker image requires more consideration. + +```bash +$ docker build . -t plugin-develop:latest -f ./core/plugin.Dockerfile +``` + +### Ethereum Execution Client Requirements + +In order to run the Plugin node you must have access to a running Ethereum node with an open websocket connection. +Any Ethereum based network will work once you've [configured](https://github.com/goplugin/pluginv3.0#configure) the chain ID. +Ethereum node versions currently tested and supported: + +[Officially supported] + +- [Parity/Openethereum](https://github.com/openethereum/openethereum) (NOTE: Parity is deprecated and support for this client may be removed in future) +- [Geth](https://github.com/ethereum/go-ethereum/releases) + +[Supported but broken] +These clients are supported by Plugin, but have bugs that prevent Plugin from working reliably on these execution clients. + +- [Nethermind](https://github.com/NethermindEth/nethermind) + Blocking issues: + - ~https://github.com/NethermindEth/nethermind/issues/4384~ +- [Besu](https://github.com/hyperledger/besu) + Blocking issues: + - https://github.com/hyperledger/besu/issues/4212 + - ~https://github.com/hyperledger/besu/issues/4192~ + - ~https://github.com/hyperledger/besu/issues/4114~ +- [Erigon](https://github.com/ledgerwatch/erigon) + Blocking issues: + - https://github.com/ledgerwatch/erigon/discussions/4946 + - https://github.com/ledgerwatch/erigon/issues/4030#issuecomment-1113964017 + +We cannot recommend specific version numbers for ethereum nodes since the software is being continually updated, but you should usually try to run the latest version available. + +## Running a local Plugin node + +**NOTE**: By default, plugin will run in TLS mode. For local development you can disable this by using a `dev build` using `make plugin-dev` and setting the TOML fields: + +```toml +[WebServer] +SecureCookies = false +TLS.HTTPSPort = 0 + +[Insecure] +DevWebServer = true +``` + +Alternatively, you can generate self signed certificates using `tools/bin/self-signed-certs` or [manually](https://github.com/goplugin/pluginv3.0/wiki/Creating-Self-Signed-Certificates). + +To start your Plugin node, simply run: + +```bash +plugin node start +``` + +By default this will start on port 6688. You should be able to access the UI at [http://localhost:6688/](http://localhost:6688/). + +Plugin provides a remote CLI client as well as a UI. Once your node has started, you can open a new terminal window to use the CLI. You will need to log in to authorize the client first: + +```bash +plugin admin login +``` + +(You can also set `ADMIN_CREDENTIALS_FILE=/path/to/credentials/file` in future if you like, to avoid having to login again). + +Now you can view your current jobs with: + +```bash +plugin jobs list +``` + +To find out more about the Plugin CLI, you can always run `plugin help`. + +Check out the [doc](https://docs.chain.link/) pages on [Jobs](https://docs.chain.link/docs/jobs/) to learn more about how to create Jobs. + +### Configuration + +Node configuration is managed by a combination of environment variables and direct setting via API/UI/CLI. + +Check the [official documentation](https://docs.chain.link/docs/configuration-variables) for more information on how to configure your node. + +### External Adapters + +External adapters are what make Plugin easily extensible, providing simple integration of custom computations and specialized APIs. A Plugin node communicates with external adapters via a simple REST API. + +For more information on creating and using external adapters, please see our [external adapters page](https://docs.chain.link/docs/external-adapters). + +## Development + +### Running tests + +1. [Install pnpm via npm](https://pnpm.io/installation#using-npm) + +2. Install [gencodec](https://github.com/fjl/gencodec) and [jq](https://stedolan.github.io/jq/download/) to be able to run `go generate ./...` and `make abigen` + +3. Install mockery + +`make mockery` + +Using the `make` command will install the correct version. + +4. Build contracts: + +```bash +pushd contracts +pnpm i +pnpm compile:native +popd +``` + +4. Generate and compile static assets: + +```bash +go generate ./... +``` + +5. Prepare your development environment: + +```bash +export CL_DATABASE_URL=postgresql://127.0.0.1:5432/plugin_test?sslmode=disable +``` + +Note: Other environment variables should not be set for all tests to pass + +6. Drop/Create test database and run migrations: + +``` +make testdb +``` + +If you do end up modifying the migrations for the database, you will need to rerun + +7. Run tests: + +```bash +go test ./... +``` + +#### Notes + +- The `parallel` flag can be used to limit CPU usage, for running tests in the background (`-parallel=4`) - the default is `GOMAXPROCS` +- The `p` flag can be used to limit the number of _packages_ tested concurrently, if they are interferring with one another (`-p=1`) +- The `-short` flag skips tests which depend on the database, for quickly spot checking simpler tests in around one minute + +#### Race Detector + +As of Go 1.1, the runtime includes a data race detector, enabled with the `-race` flag. This is used in CI via the +`tools/bin/go_core_race_tests` script. If the action detects a race, the artifact on the summary page will include +`race.*` files with detailed stack traces. + +> _**It will not issue false positives, so take its warnings seriously.**_ + +For local, targeted race detection, you can run: + +```bash +GORACE="log_path=$PWD/race" go test -race ./core/path/to/pkg -count 10 +GORACE="log_path=$PWD/race" go test -race ./core/path/to/pkg -count 100 -run TestFooBar/sub_test +``` + +https://go.dev/doc/articles/race_detector + +#### Fuzz tests + +As of Go 1.18, fuzz tests `func FuzzXXX(*testing.F)` are included as part of the normal test suite, so existing cases are executed with `go test`. + +Additionally, you can run active fuzzing to search for new cases: + +```bash +go test ./pkg/path -run=XXX -fuzz=FuzzTestName +``` + +https://go.dev/doc/fuzz/ + +### Go Modules + +This repository contains three Go modules: + +```mermaid +flowchart RL + github.com/goplugin/pluginv3.0/v2 + github.com/goplugin/pluginv3.0/integration-tests --> github.com/goplugin/pluginv3.0/v2 + github.com/goplugin/pluginv3.0/core/scripts --> github.com/goplugin/pluginv3.0/v2 + +``` +The `integration-tests` and `core/scripts` modules import the root module using a relative replace in their `go.mod` files, +so dependency changes in the root `go.mod` often require changes in those modules as well. After making a change, `go mod tidy` +can be run on all three modules using: +``` +make gomodtidy +``` + +### Solidity + +Inside the `contracts/` directory: + +1. Install dependencies: + +```bash +pnpm i +``` + +2. Run tests: + +```bash +pnpm test +``` +NOTE: Plugin is currently in the process of migrating to Foundry and contains both Foundry and Hardhat tests in some versions. More information can be found here: [Plugin Foundry Documentation](https://github.com/goplugin/pluginv3.0/blob/develop/contracts/foundry.md). +Any 't.sol' files associated with Foundry tests, contained within the src directories will be ignored by Hardhat. + +### Code Generation + +Go generate is used to generate mocks in this project. Mocks are generated with [mockery](https://github.com/vektra/mockery) and live in core/internal/mocks. + +### Nix + +A [shell.nix](https://nixos.wiki/wiki/Development_environment_with_nix-shell) is provided for use with the [Nix package manager](https://nixos.org/), with optional [flakes](https://nixos.wiki/wiki/Flakes) support. It defines a declarative, reproducible development environment. Flakes version use deterministic, frozen (`flake.lock`) dependencies, while non-flakes shell will use your channel's packages versions. + +To use it: + +1. Install [nix package manager](https://nixos.org/download.html) in your system. + +- Optionally, enable [flakes support](https://nixos.wiki/wiki/Flakes#Enable_flakes) + +2. Run `nix-shell`. You will be put in shell containing all the dependencies. + +- To use the flakes version, run `nix develop` instead of `nix-shell`. Optionally, `nix develop --command $SHELL` will make use of your current shell instead of the default (bash). +- You can use `direnv` to enable it automatically when `cd`-ing into the folder; for that, enable [nix-direnv](https://github.com/nix-community/nix-direnv) and `use nix` or `use flake` on it. + +3. Create a local postgres database: + +```sh +mkdir -p $PGDATA && cd $PGDATA/ +initdb +pg_ctl -l postgres.log -o "--unix_socket_directories='$PWD'" start +createdb plugin_test -h localhost +createuser --superuser --password plugin -h localhost +# then type a test password, e.g.: plugin, and set it in shell.nix CL_DATABASE_URL +``` + +4. When re-entering project, you can restart postgres: `cd $PGDATA; pg_ctl -l postgres.log -o "--unix_socket_directories='$PWD'" start` + Now you can run tests or compile code as usual. +5. When you're done, stop it: `cd $PGDATA; pg_ctl -o "--unix_socket_directories='$PWD'" stop` + +### Tips + +For more tips on how to build and test Plugin, see our [development tips page](https://github.com/goplugin/pluginv3.0/wiki/Development-Tips). + +### Contributing + +Contributions are welcome to Plugin's source code. + +Please check out our [contributing guidelines](./docs/CONTRIBUTING.md) for more details. + +Thank you! diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..a7e4604a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,15 @@ +# Reporting a Vulnerability + +This repository is covered by Plugin's bug bounty programs. + +If you have found an issue or bug that potentially affects the overall security of the project then please privately report the issue as a vulnerability on one of the two bug bounty programs below. + +Please do not open a public GitHub issue sharing the details of a security related bug. Valid security findings may be eligible for payout. The programs have detailed scopes, so please ensure your bug is within them before submitting. + +Please note that both our programs require completing KYC before a bounty can be paid out. + +## HackerOne +https://hackerone.com/plugin + +## Immunefi +https://immunefi.com/bounty/plugin/ diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..c8e38b61 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +2.9.0 diff --git a/bors.toml b/bors.toml new file mode 100644 index 00000000..5970c638 --- /dev/null +++ b/bors.toml @@ -0,0 +1,20 @@ +# https://github.com/bors-ng/bors-ng/issues/730 +status = [ + "sigscanner-check", + "lint", + "Core Tests (go_core_tests)", + "Core Tests (go_core_race_tests)", + "Solana Smoke Tests", + "Prettier Formatting", + "ETH Smoke Tests", + "Solidity" +] +block_labels = [ "do-not-merge", "do-not-merge-yet", "needs changes", "wip" ] +timeout_sec = 3600 # one hour +required_approvals = 1 +up_to_date_approvals = true +delete_merged_branches = true +update_base_for_deletes = true +# todo: enable after organizing codeowners +# use_codeowners = true +use_squash_merge = true diff --git a/charts/plugin-cluster/.gitignore b/charts/plugin-cluster/.gitignore new file mode 100644 index 00000000..3ee791f7 --- /dev/null +++ b/charts/plugin-cluster/.gitignore @@ -0,0 +1,3 @@ +# Helm +charts/ +requirements.lock diff --git a/charts/plugin-cluster/.helmignore b/charts/plugin-cluster/.helmignore new file mode 100644 index 00000000..d2c33ef3 --- /dev/null +++ b/charts/plugin-cluster/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +.devspace/ diff --git a/charts/plugin-cluster/Chart.yaml b/charts/plugin-cluster/Chart.yaml new file mode 100644 index 00000000..ff460091 --- /dev/null +++ b/charts/plugin-cluster/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: plugin-cluster +description: Plugin nodes cluster +version: 0.2.0 +appVersion: "2.6.0" +dependencies: + - name: mockserver + version: "5.14.0" + repository: "https://www.mock-server.com" + condition: mockserver.enabled diff --git a/charts/plugin-cluster/README.md b/charts/plugin-cluster/README.md new file mode 100644 index 00000000..625065a7 --- /dev/null +++ b/charts/plugin-cluster/README.md @@ -0,0 +1,143 @@ +# Plugin cluster +Example CL nodes cluster for system level tests + +Install `kubefwd` (no nixpkg for it yet, planned) +``` +brew install txn2/tap/kubefwd +``` +If you want to build images you need [docker](https://docs.docker.com/engine/install/) service running + +Enter the shell (from the root project dir) +``` +nix develop +``` + +# Develop + +## New cluster +We are using [devspace](https://www.devspace.sh/docs/getting-started/installation?x0=3) + +Configure the cluster, see `deployments.app.helm.values` and [values.yaml](./values.yaml) comments for more details + +Configure your `cluster` setup (one time setup, internal usage only) +``` +export DEVSPACE_IMAGE="..." +cd charts/plugin-cluster +./setup.sh ${my-personal-namespace-name-crib} +``` + +Build and deploy current commit +``` +devspace deploy +``` + +If you don't need a build use +``` +devspace deploy --skip-build +``` + +To deploy particular commit (must be in registry) use +``` +devspace deploy --skip-build ${short_sha_of_image} +``` + +Forward ports to check UI or run tests +``` +devspace run connect ${my-personal-namespace-name-crib} +``` + +Connect to your environment, by replacing container with label `node-1` with your local repository files +``` +devspace dev -p node +make plugin +make plugin-local-start +``` +Fix something in the code locally, it'd automatically sync, rebuild it inside container and run again +``` +make plugin +make plugin-local-start +``` + +Reset the pod to original image +``` +devspace reset pods +``` + +Destroy the cluster +``` +devspace purge +``` + +## Running load tests +Check this [doc](../../integration-tests/load/ocr/README.md) + +If you used `devspace dev ...` always use `devspace reset pods` to switch the pods back + +## Debug existing cluster +If you need to debug CL node that is already deployed change `dev.app.container` and `dev.app.labelSelector` in [devspace.yaml](devspace.yaml) if they are not default and run: +``` +devspace dev -p node +``` + +## Automatic file sync +When you run `devspace dev` your files described in `dev.app.sync` of [devspace.yaml](devspace.yaml) will be uploaded to the switched container + +After that all the changes will be synced automatically + +Check `.profiles` to understand what is uploaded in profiles `runner` and `node` + +# Helm +If you would like to use `helm` directly, please uncomment data in `values.yaml` +## Install from local files +``` +helm install -f values.yaml cl-cluster . +``` +Forward all apps (in another terminal) +``` +sudo kubefwd svc -n cl-cluster +``` +Then you can connect and run your tests + +## Install from release +Add the repository +``` +helm repo add plugin-cluster https://raw.githubusercontent.com/goplugin/pluginv3.0/helm-release/ +helm repo update +``` +Set default namespace +``` +kubectl create ns cl-cluster +kubectl config set-context --current --namespace cl-cluster +``` + +Install +``` +helm install -f values.yaml cl-cluster . +``` + +## Create a new release +Bump version in `Chart.yml` add your changes and add `helm_release` label to any PR to trigger a release + +## Helm Test +``` +helm test cl-cluster +``` + +## Uninstall +``` +helm uninstall cl-cluster +``` + +# Grafana dashboard +We are using [Grabana]() lib to create dashboards programmatically +``` +export GRAFANA_URL=... +export GRAFANA_TOKEN=... +export LOKI_DATA_SOURCE_NAME=Loki +export PROMETHEUS_DATA_SOURCE_NAME=Thanos +export DASHBOARD_FOLDER=CRIB +export DASHBOARD_NAME=PluginCluster + +cd dashboard/cmd && go run dashboard_deploy.go +``` +Open Grafana folder `CRIB` and find dashboard `PluginCluster` \ No newline at end of file diff --git a/charts/plugin-cluster/connect.toml b/charts/plugin-cluster/connect.toml new file mode 100644 index 00000000..9560be53 --- /dev/null +++ b/charts/plugin-cluster/connect.toml @@ -0,0 +1,12 @@ +namespace = "cl-cluster" +network_name = "geth" +network_chain_id = 1337 +network_private_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" +network_ws_url = "ws://geth-1337:8546" +network_http_url = "http://geth:8544" +cl_nodes_num = 6 +cl_node_url_template = "http://app-node-%d:6688" +cl_node_internal_dns_record_template = "app-node-%d" +cl_node_user = "notreal@fakeemail.ch" +cl_node_password = "fj293fbBnlQ!f9vNs" +mockserver_url = "http://mockserver:1080" \ No newline at end of file diff --git a/charts/plugin-cluster/dashboard/cmd/dashboard_deploy.go b/charts/plugin-cluster/dashboard/cmd/dashboard_deploy.go new file mode 100644 index 00000000..6b6f58f3 --- /dev/null +++ b/charts/plugin-cluster/dashboard/cmd/dashboard_deploy.go @@ -0,0 +1,49 @@ +package main + +import ( + "os" + + "github.com/goplugin/pluginv3.0/charts/plugin-cluster/dashboard/dashboard" + "github.com/goplugin/wasp" +) + +func main() { + name := os.Getenv("DASHBOARD_NAME") + if name == "" { + panic("DASHBOARD_NAME must be provided") + } + ldsn := os.Getenv("LOKI_DATA_SOURCE_NAME") + if ldsn == "" { + panic("DATA_SOURCE_NAME must be provided") + } + os.Setenv("DATA_SOURCE_NAME", ldsn) + pdsn := os.Getenv("PROMETHEUS_DATA_SOURCE_NAME") + if ldsn == "" { + panic("DATA_SOURCE_NAME must be provided") + } + dbf := os.Getenv("DASHBOARD_FOLDER") + if dbf == "" { + panic("DASHBOARD_FOLDER must be provided") + } + grafanaURL := os.Getenv("GRAFANA_URL") + if grafanaURL == "" { + panic("GRAFANA_URL must be provided") + } + grafanaToken := os.Getenv("GRAFANA_TOKEN") + if grafanaToken == "" { + panic("GRAFANA_TOKEN must be provided") + } + // if you'll use this dashboard base in other projects, you can add your own opts here to extend it + db, err := dashboard.NewCLClusterDashboard(6, name, ldsn, pdsn, dbf, grafanaURL, grafanaToken, nil) + if err != nil { + panic(err) + } + // here we are extending load testing dashboard with core metrics, for example + wdb, err := wasp.NewDashboard(nil, db.Opts()) + if err != nil { + panic(err) + } + if _, err := wdb.Deploy(); err != nil { + panic(err) + } +} diff --git a/charts/plugin-cluster/dashboard/dashboard.go b/charts/plugin-cluster/dashboard/dashboard.go new file mode 100644 index 00000000..4dec0ca8 --- /dev/null +++ b/charts/plugin-cluster/dashboard/dashboard.go @@ -0,0 +1,963 @@ +package dashboard + +import ( + "context" + "fmt" + "net/http" + + "github.com/K-Phoen/grabana" + "github.com/K-Phoen/grabana/dashboard" + "github.com/K-Phoen/grabana/logs" + "github.com/K-Phoen/grabana/row" + "github.com/K-Phoen/grabana/stat" + "github.com/K-Phoen/grabana/target/prometheus" + "github.com/K-Phoen/grabana/timeseries" + "github.com/K-Phoen/grabana/timeseries/axis" + "github.com/K-Phoen/grabana/variable/query" + "github.com/pkg/errors" +) + +/* +Use ripgrep to get the full list +rg -oU ".*promauto.*\n.*Name: \"(.*)\"" -r '$1' > metrics.txt + +duplicates? + +common/client/node.go:pool_rpc_node_verifies +common/client/node.go:pool_rpc_node_verifies_failed +common/client/node.go:pool_rpc_node_verifies_success +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_alive +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_in_sync +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_out_of_sync +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_unreachable +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_invalid_chain_id +common/client/node_fsm.go:pool_rpc_node_num_transitions_to_unusable +common/client/node_lifecycle.go:pool_rpc_node_highest_seen_block +common/client/node_lifecycle.go:pool_rpc_node_num_seen_blocks +common/client/node_lifecycle.go:pool_rpc_node_polls_total +common/client/node_lifecycle.go:pool_rpc_node_polls_failed +common/client/node_lifecycle.go:pool_rpc_node_polls_success + +covered + +core/logger/prometheus.go:log_warn_count +core/logger/prometheus.go:log_error_count +core/logger/prometheus.go:log_critical_count +core/logger/prometheus.go:log_panic_count +core/logger/prometheus.go:log_fatal_count +common/client/multi_node.go:multi_node_states +common/txmgr/broadcaster.go:tx_manager_time_until_tx_broadcast +common/txmgr/confirmer.go:tx_manager_num_gas_bumps +common/txmgr/confirmer.go:tx_manager_gas_bump_exceeds_limit +common/txmgr/confirmer.go:tx_manager_num_confirmed_transactions +common/txmgr/confirmer.go:tx_manager_num_successful_transactions +common/txmgr/confirmer.go:tx_manager_num_tx_reverted +common/txmgr/confirmer.go:tx_manager_fwd_tx_count +common/txmgr/confirmer.go:tx_manager_tx_attempt_count +common/txmgr/confirmer.go:tx_manager_time_until_tx_confirmed +common/txmgr/confirmer.go:tx_manager_blocks_until_tx_confirmed +common/headtracker/head_tracker.go:head_tracker_current_head +common/headtracker/head_tracker.go:head_tracker_very_old_head +common/headtracker/head_listener.go:head_tracker_heads_received +common/headtracker/head_listener.go:head_tracker_connection_errors +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_alive +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_in_sync +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_out_of_sync +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_unreachable +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_invalid_chain_id +core/chains/evm/client/node_fsm.go:evm_pool_rpc_node_num_transitions_to_unusable +core/services/promreporter/prom_reporter.go:unconfirmed_transactions +core/services/promreporter/prom_reporter.go:max_unconfirmed_tx_age +core/services/promreporter/prom_reporter.go:max_unconfirmed_blocks +core/services/promreporter/prom_reporter.go:pipeline_runs_queued +core/services/promreporter/prom_reporter.go:pipeline_task_runs_queued +core/services/pipeline/task.bridge.go:bridge_latency_seconds +core/services/pipeline/task.bridge.go:bridge_errors_total +core/services/pipeline/task.bridge.go:bridge_cache_hits_total +core/services/pipeline/task.bridge.go:bridge_cache_errors_total +core/services/pipeline/task.http.go:pipeline_task_http_fetch_time +core/services/pipeline/task.http.go:pipeline_task_http_response_body_size +core/services/pipeline/task.eth_call.go:pipeline_task_eth_call_execution_time +core/services/pipeline/runner.go:pipeline_task_execution_time +core/services/pipeline/runner.go:pipeline_run_errors +core/services/pipeline/runner.go:pipeline_run_total_time_to_completion +core/services/pipeline/runner.go:pipeline_tasks_total_finished +core/chains/evm/client/node.go:evm_pool_rpc_node_dials_total +core/chains/evm/client/node.go:evm_pool_rpc_node_dials_failed +core/chains/evm/client/node.go:evm_pool_rpc_node_dials_success +core/chains/evm/client/node.go:evm_pool_rpc_node_verifies +core/chains/evm/client/node.go:evm_pool_rpc_node_verifies_failed +core/chains/evm/client/node.go:evm_pool_rpc_node_verifies_success +core/chains/evm/client/node.go:evm_pool_rpc_node_calls_total +core/chains/evm/client/node.go:evm_pool_rpc_node_calls_failed +core/chains/evm/client/node.go:evm_pool_rpc_node_calls_success +core/chains/evm/client/node.go:evm_pool_rpc_node_rpc_call_time +core/chains/evm/client/pool.go:evm_pool_rpc_node_states +core/utils/mailbox_prom.go:mailbox_load_percent +core/services/pg/stats.go:db_conns_max +core/services/pg/stats.go:db_conns_open +core/services/pg/stats.go:db_conns_used +core/services/pg/stats.go:db_wait_count +core/services/pg/stats.go:db_wait_time_seconds +core/chains/evm/client/node_lifecycle.go:evm_pool_rpc_node_highest_seen_block +core/chains/evm/client/node_lifecycle.go:evm_pool_rpc_node_num_seen_blocks +core/chains/evm/client/node_lifecycle.go:evm_pool_rpc_node_polls_total +core/chains/evm/client/node_lifecycle.go:evm_pool_rpc_node_polls_failed +core/chains/evm/client/node_lifecycle.go:evm_pool_rpc_node_polls_success +core/services/relay/evm/config_poller.go:ocr2_failed_rpc_contract_calls +core/services/feeds/service.go:feeds_job_proposal_requests +core/services/feeds/service.go:feeds_job_proposal_count +core/services/ocrcommon/prom.go:bridge_json_parse_values +core/services/ocrcommon/prom.go:ocr_median_values +core/chains/evm/logpoller/observability.go:log_poller_query_dataset_size + +not-covered and product specific (definitions/usage should be moved to plugins) + +mercury + +core/services/relay/evm/mercury/types/types.go:mercury_price_feed_missing +core/services/relay/evm/mercury/types/types.go:mercury_price_feed_errors +core/services/relay/evm/mercury/queue.go:mercury_transmit_queue_load +core/services/relay/evm/mercury/v1/data_source.go:mercury_insufficient_blocks_count +core/services/relay/evm/mercury/v1/data_source.go:mercury_zero_blocks_count +core/services/relay/evm/mercury/wsrpc/client.go:mercury_transmit_timeout_count +core/services/relay/evm/mercury/wsrpc/client.go:mercury_dial_count +core/services/relay/evm/mercury/wsrpc/client.go:mercury_dial_success_count +core/services/relay/evm/mercury/wsrpc/client.go:mercury_dial_error_count +core/services/relay/evm/mercury/wsrpc/client.go:mercury_connection_reset_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_success_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_duplicate_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_connection_error_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_queue_delete_error_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_queue_insert_error_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_queue_push_error_count +core/services/relay/evm/mercury/transmitter.go:mercury_transmit_server_error_count + +functions + +core/services/gateway/connectionmanager.go:gateway_heartbeats_sent +core/services/gateway/gateway.go:gateway_request +core/services/gateway/handlers/functions/handler.functions.go:gateway_functions_handler_error +core/services/gateway/handlers/functions/handler.functions.go:gateway_functions_secrets_set_success +core/services/gateway/handlers/functions/handler.functions.go:gateway_functions_secrets_set_failure +core/services/gateway/handlers/functions/handler.functions.go:gateway_functions_secrets_list_success +core/services/gateway/handlers/functions/handler.functions.go:gateway_functions_secrets_list_failure +core/services/functions/external_adapter_client.go:functions_external_adapter_client_latency +core/services/functions/external_adapter_client.go:functions_external_adapter_client_errors_total +core/services/functions/listener.go:functions_request_received +core/services/functions/listener.go:functions_request_internal_error +core/services/functions/listener.go:functions_request_computation_error +core/services/functions/listener.go:functions_request_computation_success +core/services/functions/listener.go:functions_request_timeout +core/services/functions/listener.go:functions_request_confirmed +core/services/functions/listener.go:functions_request_computation_result_size +core/services/functions/listener.go:functions_request_computation_error_size +core/services/functions/listener.go:functions_request_computation_duration +core/services/functions/listener.go:functions_request_pruned +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_restarts +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_query +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_observation +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_report +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_report_num_observations +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_accept +core/services/ocr2/plugins/functions/reporting.go:functions_reporting_plugin_transmit +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_query +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_observation +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_report +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_accept +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_query_byte_size +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_query_rows_count +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_observation_rows_count +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_report_rows_count +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_wrong_sig_count +core/services/ocr2/plugins/s4/prometheus.go:s4_reporting_plugin_expired_rows + +vrf + +core/services/vrf/vrfcommon/metrics.go:vrf_request_queue_size +core/services/vrf/vrfcommon/metrics.go:vrf_processed_request_count +core/services/vrf/vrfcommon/metrics.go:vrf_dropped_request_count +core/services/vrf/vrfcommon/metrics.go:vrf_duplicate_requests +core/services/vrf/vrfcommon/metrics.go:vrf_request_time_between_sims +core/services/vrf/vrfcommon/metrics.go:vrf_request_time_until_initial_sim + +keeper +core/services/keeper/upkeep_executer.go:keeper_check_upkeep_execution_time +*/ + +const ( + ErrFailedToCreateDashboard = "failed to create dashboard" + ErrFailedToCreateFolder = "failed to create folder" +) + +// CLClusterDashboard is a dashboard for a Plugin cluster +type CLClusterDashboard struct { + Nodes int + Name string + LokiDataSourceName string + PrometheusDataSourceName string + Folder string + GrafanaURL string + GrafanaToken string + opts []dashboard.Option + extendedOpts []dashboard.Option + builder dashboard.Builder +} + +// NewCLClusterDashboard returns a new dashboard for a Plugin cluster, can be used as a base for more complex plugin based dashboards +func NewCLClusterDashboard(nodes int, name, ldsn, pdsn, dbf, grafanaURL, grafanaToken string, opts []dashboard.Option) (*CLClusterDashboard, error) { + db := &CLClusterDashboard{ + Nodes: nodes, + Name: name, + Folder: dbf, + LokiDataSourceName: ldsn, + PrometheusDataSourceName: pdsn, + GrafanaURL: grafanaURL, + GrafanaToken: grafanaToken, + extendedOpts: opts, + } + if err := db.generate(); err != nil { + return db, err + } + return db, nil +} + +func (m *CLClusterDashboard) Opts() []dashboard.Option { + return m.opts +} + +// logsRowOption returns a row option for a node's logs with name and instance selector +func (m *CLClusterDashboard) logsRowOption(name, q string) row.Option { + return row.WithLogs( + name, + logs.DataSource(m.LokiDataSourceName), + logs.Span(12), + logs.Height("300px"), + logs.Transparent(), + logs.WithLokiTarget(q), + ) +} + +func (m *CLClusterDashboard) logsRowOptionsForNodes(nodes int) []row.Option { + opts := make([]row.Option, 0) + for i := 1; i <= nodes; i++ { + opts = append(opts, row.WithLogs( + fmt.Sprintf("Node %d", i), + logs.DataSource(m.LokiDataSourceName), + logs.Span(12), + logs.Height("300px"), + logs.Transparent(), + logs.WithLokiTarget(fmt.Sprintf(`{namespace="${namespace}", app="app", instance="node-%d", container="node"}`, i)), + )) + } + return opts +} + +// timeseriesRowOption returns a row option for a timeseries with name, axis unit, query and legend template +func (m *CLClusterDashboard) timeseriesRowOption(name, axisUnit, query, legendTemplate string) row.Option { + var tsq timeseries.Option + if legendTemplate != "" { + tsq = timeseries.WithPrometheusTarget( + query, + prometheus.Legend(legendTemplate), + ) + } else { + tsq = timeseries.WithPrometheusTarget(query) + } + var au timeseries.Option + if axisUnit != "" { + au = timeseries.Axis( + axis.Unit(axisUnit), + ) + } else { + au = timeseries.Axis() + } + return row.WithTimeSeries( + name, + timeseries.Span(6), + timeseries.Height("300px"), + timeseries.DataSource(m.PrometheusDataSourceName), + au, + tsq, + ) +} + +// statRowOption returns a row option for a stat with name, prometheus target and legend template +func (m *CLClusterDashboard) statRowOption(name, target, legend string) row.Option { + return row.WithStat( + name, + stat.Transparent(), + stat.DataSource(m.PrometheusDataSourceName), + stat.Text(stat.TextValueAndName), + stat.Orientation(stat.OrientationVertical), + stat.TitleFontSize(12), + stat.ValueFontSize(20), + stat.Span(12), + stat.Height("100px"), + stat.WithPrometheusTarget(target, prometheus.Legend(legend)), + ) +} + +// generate generates the dashboard, adding extendedOpts to the default options +func (m *CLClusterDashboard) generate() error { + opts := []dashboard.Option{ + dashboard.AutoRefresh("10s"), + dashboard.Tags([]string{"generated"}), + dashboard.VariableAsQuery( + "namespace", + query.DataSource(m.LokiDataSourceName), + query.Multiple(), + query.IncludeAll(), + query.Request(fmt.Sprintf("label_values(%s)", "namespace")), + query.Sort(query.NumericalAsc), + ), + dashboard.Row( + "Cluster health", + row.Collapse(), + m.statRowOption( + "App Version", + `version{namespace="${namespace}"}`, + "{{pod}} - {{version}}", + ), + row.WithTimeSeries( + "Restarts", + timeseries.Span(12), + timeseries.Height("200px"), + timeseries.DataSource(m.PrometheusDataSourceName), + timeseries.WithPrometheusTarget( + `sum(increase(kube_pod_container_status_restarts_total{namespace=~"${namespace}"}[5m])) by (pod)`, + prometheus.Legend("{{pod}}"), + ), + ), + row.WithTimeSeries( + "Service Components Health", + timeseries.Span(12), + timeseries.Height("200px"), + timeseries.DataSource(m.PrometheusDataSourceName), + timeseries.WithPrometheusTarget( + `health{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - {{service_id}}"), + ), + ), + row.WithTimeSeries( + "ETH Balance", + timeseries.Span(12), + timeseries.Height("200px"), + timeseries.DataSource(m.PrometheusDataSourceName), + timeseries.WithPrometheusTarget( + `eth_balance{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - {{account}}"), + ), + ), + ), + // HeadTracker + dashboard.Row("Head tracker", + row.Collapse(), + m.timeseriesRowOption( + "Head tracker current head", + "Block", + `head_tracker_current_head{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Head tracker very old head", + "Block", + `head_tracker_very_old_head{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Head tracker heads received", + "Block", + `head_tracker_heads_received{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Head tracker connection errors", + "Errors", + `head_tracker_connection_errors{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("LogPoller", + row.Collapse(), + m.timeseriesRowOption( + "LogPoller Query Dataset Size", + "", + `log_poller_query_dataset_size{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("OCRCommon", + row.Collapse(), + m.timeseriesRowOption( + "Bridge JSON Parse Values", + "", + `bridge_json_parse_values{namespace="${namespace}"}`, + "{{ pod }} JobID: {{ job_id }}", + ), + m.timeseriesRowOption( + "OCR Median Values", + "", + `ocr_median_values{namespace="${namespace}"}`, + "{{pod}} JobID: {{ job_id }}", + ), + ), + dashboard.Row("Relay Config Poller", + row.Collapse(), + m.timeseriesRowOption( + "Relay Config Poller RPC Contract Calls", + "", + `ocr2_failed_rpc_contract_calls{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("Feeds Jobs", + row.Collapse(), + m.timeseriesRowOption( + "Feeds Job Proposal Requests", + "", + `feeds_job_proposal_requests{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Feeds Job Proposal Count", + "", + `feeds_job_proposal_count{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("Mailbox", + row.Collapse(), + m.timeseriesRowOption( + "Mailbox Load Percent", + "", + `mailbox_load_percent{namespace="${namespace}"}`, + "{{ pod }} {{ name }}", + ), + ), + dashboard.Row("Multi Node States", + row.Collapse(), + m.timeseriesRowOption( + "Multi Node States", + "", + `multi_node_states{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("Block History Estimator", + row.Collapse(), + m.timeseriesRowOption( + "Gas Updater All Gas Price Percentiles", + "", + `gas_updater_all_gas_price_percentiles{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Gas Updater All Tip Cap Percentiles", + "", + `gas_updater_all_tip_cap_percentiles{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Gas Updater Set Gas Price", + "", + `gas_updater_set_gas_price{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Gas Updater Set Tip Cap", + "", + `gas_updater_set_tip_cap{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Gas Updater Current Base Fee", + "", + `gas_updater_current_base_fee{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Block History Estimator Connectivity Failure Count", + "", + `block_history_estimator_connectivity_failure_count{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + // PromReporter + dashboard.Row("Prom Reporter", + row.Collapse(), + m.timeseriesRowOption( + "Unconfirmed Transactions", + "Tx", + `unconfirmed_transactions{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Unconfirmed TX Age", + "Sec", + `max_unconfirmed_tx_age{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "Unconfirmed TX Blocks", + "Blocks", + `max_unconfirmed_blocks{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + dashboard.Row("TX Manager", + row.Collapse(), + m.timeseriesRowOption( + "TX Manager Time Until TX Broadcast", + "", + `tx_manager_time_until_tx_broadcast{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Gas Bumps", + "", + `tx_manager_num_gas_bumps{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Gas Bumps Exceeds Limit", + "", + `tx_manager_gas_bump_exceeds_limit{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Confirmed Transactions", + "", + `tx_manager_num_confirmed_transactions{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Successful Transactions", + "", + `tx_manager_num_successful_transactions{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Reverted Transactions", + "", + `tx_manager_num_tx_reverted{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Fwd Transactions", + "", + `tx_manager_fwd_tx_count{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Num Transactions Attempts", + "", + `tx_manager_tx_attempt_count{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Time Until TX Confirmed", + "", + `tx_manager_time_until_tx_confirmed{namespace="${namespace}"}`, + "{{ pod }}", + ), + m.timeseriesRowOption( + "TX Manager Block Until TX Confirmed", + "", + `tx_manager_blocks_until_tx_confirmed{namespace="${namespace}"}`, + "{{ pod }}", + ), + ), + // DON report metrics + dashboard.Row("DON Report metrics", + row.Collapse(), + m.timeseriesRowOption( + "Plugin Query() count", + "Count", + `sum(rate(ocr2_reporting_plugin_query_count{namespace="${namespace}", app="app"}[$__rate_interval])) by (service)`, + "", + ), + m.timeseriesRowOption( + "Plugin Observation() time (95th)", + "Sec", + `histogram_quantile(0.95, sum(rate(ocr2_reporting_plugin_observation_time_bucket{namespace="${namespace}", app="app"}[$__rate_interval])) by (le, service)) / 1e9`, + "", + ), + m.timeseriesRowOption( + "Plugin ShouldAcceptReport() time (95th)", + "Sec", + `histogram_quantile(0.95, sum(rate(ocr2_reporting_plugin_should_accept_report_time_bucket{namespace="${namespace}", app="app"}[$__rate_interval])) by (le, service)) / 1e9`, + "", + ), + m.timeseriesRowOption( + "Plugin Report() time (95th)", + "Sec", + `histogram_quantile(0.95, sum(rate(ocr2_reporting_plugin_report_time_bucket{namespace="${namespace}", app="app"}[$__rate_interval])) by (le, service)) / 1e9`, + "", + ), + m.timeseriesRowOption( + "Plugin ShouldTransmitReport() time (95th)", + "Sec", + `histogram_quantile(0.95, sum(rate(ocr2_reporting_plugin_should_transmit_report_time_bucket{namespace="${namespace}", app="app"}[$__rate_interval])) by (le, service)) / 1e9`, + "", + ), + ), + dashboard.Row( + "EVM Pool Lifecycle", + row.Collapse(), + m.timeseriesRowOption( + "EVM Pool Highest Seen Block", + "Block", + `evm_pool_rpc_node_highest_seen_block{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool Num Seen Blocks", + "Block", + `evm_pool_rpc_node_num_seen_blocks{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool Node Polls Total", + "Block", + `evm_pool_rpc_node_polls_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool Node Polls Failed", + "Block", + `evm_pool_rpc_node_polls_failed{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool Node Polls Success", + "Block", + `evm_pool_rpc_node_polls_success{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + dashboard.Row( + "DB Connection Metrics (App)", + row.Collapse(), + m.timeseriesRowOption( + "DB Connections MAX", + "Conn", + `db_conns_max{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "DB Connections Open", + "Conn", + `db_conns_open{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "DB Connections Used", + "Conn", + `db_conns_used{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "DB Connections Wait", + "Conn", + `db_conns_wait{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "DB Wait Count", + "", + `db_wait_count{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "DB Wait time", + "Sec", + `db_wait_time_seconds{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + dashboard.Row( + "EVM Pool RPC Node Metrics (App)", + row.Collapse(), + m.timeseriesRowOption( + "EVM Pool RPC Node Calls Success", + "", + `evm_pool_rpc_node_calls_success{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Calls Total", + "", + `evm_pool_rpc_node_calls_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Dials Success", + "", + `evm_pool_rpc_node_dials_success{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Dials Failed", + "", + `evm_pool_rpc_node_dials_failed{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Dials Total", + "", + `evm_pool_rpc_node_dials_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Dials Failed", + "", + `evm_pool_rpc_node_dials_failed{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to Alive", + "", + `evm_pool_rpc_node_num_transitions_to_alive{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to In Sync", + "", + `evm_pool_rpc_node_num_transitions_to_in_sync{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to Out of Sync", + "", + `evm_pool_rpc_node_num_transitions_to_out_of_sync{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to Unreachable", + "", + `evm_pool_rpc_node_num_transitions_to_unreachable{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to invalid Chain ID", + "", + `evm_pool_rpc_node_num_transitions_to_invalid_chain_id{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Total Transitions to unusable", + "", + `evm_pool_rpc_node_num_transitions_to_unusable{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Polls Success", + "", + `evm_pool_rpc_node_polls_success{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Polls Total", + "", + `evm_pool_rpc_node_polls_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node States", + "", + `evm_pool_rpc_node_states{namespace="${namespace}"}`, + "{{pod}} - {{evmChainID}} - {{state}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Verifies Total", + "", + `evm_pool_rpc_node_verifies{namespace="${namespace}"}`, + "{{pod}} - {{evmChainID}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Verifies Success", + "", + `evm_pool_rpc_node_verifies_success{namespace="${namespace}"}`, + "{{pod}} - {{evmChainID}}", + ), + m.timeseriesRowOption( + "EVM Pool RPC Node Verifies Failed", + "", + `evm_pool_rpc_node_verifies_failed{namespace="${namespace}"}`, + "{{pod}} - {{evmChainID}}", + ), + ), + dashboard.Row( + "EVM Pool RPC Node Latencies (App)", + row.Collapse(), + m.timeseriesRowOption( + "EVM Pool RPC Node Calls Latency 0.95 quantile", + "ms", + `histogram_quantile(0.95, sum(rate(evm_pool_rpc_node_rpc_call_time_bucket{namespace="${namespace}"}[$__rate_interval])) by (le, rpcCallName)) / 1e6`, + "{{pod}}", + ), + ), + dashboard.Row( + "Pipeline Metrics (Runner)", + row.Collapse(), + m.timeseriesRowOption( + "Pipeline Task Execution Time", + "Sec", + `pipeline_task_execution_time{namespace="${namespace}"} / 1e6`, + "{{ pod }} JobID: {{ job_id }}", + ), + m.timeseriesRowOption( + "Pipeline Run Errors", + "", + `pipeline_run_errors{namespace="${namespace}"}`, + "{{ pod }} JobID: {{ job_id }}", + ), + m.timeseriesRowOption( + "Pipeline Run Total Time to Completion", + "Sec", + `pipeline_run_total_time_to_completion{namespace="${namespace}"} / 1e6`, + "{{ pod }} JobID: {{ job_id }}", + ), + m.timeseriesRowOption( + "Pipeline Tasks Total Finished", + "", + `pipeline_tasks_total_finished{namespace="${namespace}"}`, + "{{ pod }} JobID: {{ job_id }}", + ), + ), + dashboard.Row( + "Pipeline Metrics (ETHCall)", + row.Collapse(), + m.timeseriesRowOption( + "Pipeline Task ETH Call Execution Time", + "Sec", + `pipeline_task_eth_call_execution_time{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + dashboard.Row( + "Pipeline Metrics (HTTP)", + row.Collapse(), + m.timeseriesRowOption( + "Pipeline Task HTTP Fetch Time", + "Sec", + `pipeline_task_http_fetch_time{namespace="${namespace}"} / 1e6`, + "{{pod}}", + ), + m.timeseriesRowOption( + "Pipeline Task HTTP Response Body Size", + "Bytes", + `pipeline_task_http_response_body_size{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + dashboard.Row( + "Pipeline Metrics (Bridge)", + row.Collapse(), + m.timeseriesRowOption( + "Pipeline Bridge Latency", + "Sec", + `bridge_latency_seconds{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "Pipeline Bridge Errors Total", + "", + `bridge_errors_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "Pipeline Bridge Cache Hits Total", + "", + `bridge_cache_hits_total{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "Pipeline Bridge Cache Errors Total", + "", + `bridge_cache_errors_total{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + dashboard.Row( + "Pipeline Metrics", + row.Collapse(), + m.timeseriesRowOption( + "Pipeline Runs Queued", + "", + `pipeline_runs_queued{namespace="${namespace}"}`, + "{{pod}}", + ), + m.timeseriesRowOption( + "Pipeline Runs Tasks Queued", + "", + `pipeline_task_runs_queued{namespace="${namespace}"}`, + "{{pod}}", + ), + ), + } + logOptsFinal := make([]row.Option, 0) + logOptsFinal = append( + logOptsFinal, + row.Collapse(), + row.WithTimeSeries( + "Log Counters", + timeseries.Span(12), + timeseries.Height("200px"), + timeseries.DataSource(m.PrometheusDataSourceName), + timeseries.WithPrometheusTarget( + `log_panic_count{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - panic"), + ), + timeseries.WithPrometheusTarget( + `log_fatal_count{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - fatal"), + ), + timeseries.WithPrometheusTarget( + `log_critical_count{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - critical"), + ), + timeseries.WithPrometheusTarget( + `log_warn_count{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - warn"), + ), + timeseries.WithPrometheusTarget( + `log_error_count{namespace="${namespace}"}`, + prometheus.Legend("{{pod}} - error"), + ), + ), + m.logsRowOption("All errors", ` + {namespace="${namespace}", app="app", container="node"} + | json + | level="error" + | line_format "{{ .instance }} {{ .level }} {{ .ts }} {{ .logger }} {{ .caller }} {{ .msg }} {{ .version }} {{ .nodeTier }} {{ .nodeName }} {{ .node }} {{ .evmChainID }} {{ .nodeOrder }} {{ .mode }} {{ .nodeState }} {{ .sentryEventID }} {{ .stacktrace }}"`), + ) + logOptsFinal = append(logOptsFinal, m.logsRowOptionsForNodes(m.Nodes)...) + logRowOpts := dashboard.Row( + "Logs", + logOptsFinal..., + ) + opts = append(opts, logRowOpts) + opts = append(opts, m.extendedOpts...) + builder, err := dashboard.New( + "Plugin Cluster Dashboard", + opts..., + ) + m.opts = opts + m.builder = builder + return err +} + +// Deploy deploys the dashboard to Grafana +func (m *CLClusterDashboard) Deploy(ctx context.Context) error { + client := grabana.NewClient(&http.Client{}, m.GrafanaURL, grabana.WithAPIToken(m.GrafanaToken)) + folder, err := client.FindOrCreateFolder(ctx, m.Folder) + if err != nil { + return errors.Wrap(err, ErrFailedToCreateFolder) + } + if _, err := client.UpsertDashboard(ctx, folder, m.builder); err != nil { + return errors.Wrap(err, ErrFailedToCreateDashboard) + } + return nil +} diff --git a/charts/plugin-cluster/devspace.yaml b/charts/plugin-cluster/devspace.yaml new file mode 100644 index 00000000..037e3fac --- /dev/null +++ b/charts/plugin-cluster/devspace.yaml @@ -0,0 +1,487 @@ +version: v2beta1 +name: plugin + +vars: + NS_TTL: 72h + DEVSPACE_IMAGE: + noCache: true + source: env + # This is the base domain in AWS Route 53 that our ingress subdomains will use. + DEVSPACE_INGRESS_BASE_DOMAIN: + source: env + # This is the ARN of the AWS ACM certificate that will be used for the ingress. + DEVSPACE_INGRESS_CERT_ARN: + source: env + # This is a comma separated list of CIDR blocks that will be allowed to access the ingress. + DEVSPACE_INGRESS_CIDRS: + source: env + +# This is a list of `pipelines` that DevSpace can execute (you can define your own) +pipelines: + dev: + run: |- + run_dependencies --all # 1. Deploy any projects this project needs (see "dependencies") + ensure_pull_secrets --all # 2. Ensure pull secrets + start_dev app # 3. Start dev mode "app" (see "dev" section) + deploy: + run: |- + set -o pipefail + echo "Removing .devspace cache!" + rm -rf .devspace/ || true + registry_id=$(echo "$DEVSPACE_IMAGE" | cut -d'.' -f1) + + # Login into registry + echo "Authorizing into ECR registry" + aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ${registry_id}.dkr.ecr.us-west-2.amazonaws.com + + run_dependencies --all + ensure_pull_secrets --all + build_images ---var DOCKER_DEFAULT_PLATFORM=linux/amd64 --all -t $(git rev-parse --short HEAD) + kubectl annotate namespace ${DEVSPACE_NAMESPACE} janitor/ttl=${NS_TTL} || true + kubectl label namespace/${DEVSPACE_NAMESPACE} network=crib || true + if [ -n "$1" ]; then + echo "Deploying tag $1" + tag=$1 + image=${DEVSPACE_IMAGE}:$tag + else + echo "Deploying current commit tag: $(git rev-parse --short HEAD)" + tag=$(git rev-parse --short HEAD) + image=${DEVSPACE_IMAGE}:$tag + fi + + echo "Checking tag: $tag" + repository_name="plugin-devspace" + desired_tag=$tag + + # Check if the desired tag is present in the repository + image_list=$(aws ecr list-images --repository-name "$repository_name") + tag_exists=$(echo "$image_list" | jq -e '.imageIds[] | select(.imageTag == "'"${desired_tag}"'")' >/dev/null && echo true || echo false) + + # Check the value of the tag_exists variable + if [ "$tag_exists" = "true" ]; then + echo "Image tag '$tag' found." + else + echo "Image tag '$tag' not found. Please build the image using 'devspace deploy'" + exit 1 + fi + create_deployments app \ + --set=helm.values.plugin.nodes[0].image=$image \ + --set=helm.values.plugin.nodes[1].image=$image \ + --set=helm.values.plugin.nodes[2].image=$image \ + --set=helm.values.plugin.nodes[3].image=$image \ + --set=helm.values.plugin.nodes[4].image=$image \ + --set=helm.values.plugin.nodes[5].image=$image + echo "Namespace ${DEVSPACE_NAMESPACE} will be deleted in ${NS_TTL}" + + echo + echo "############################################" + echo "Ingress Domains" + echo "############################################" + ingress_names="node1 node2 node3 node4 node5 node6 geth-1337-http geth-1337-ws geth-2337-http geth-2337-ws" + for ingress in ${ingress_names}; do + echo "https://${DEVSPACE_NAMESPACE}-${ingress}.${DEVSPACE_INGRESS_BASE_DOMAIN}" + done + + purge: + run: |- + kubectl delete ns ${DEVSPACE_NAMESPACE} + +commands: + connect: |- + sudo kubefwd svc -n $1 + +images: + app: + image: ${DEVSPACE_IMAGE} + dockerfile: ../../core/plugin.devspace.Dockerfile + context: ../.. + docker: + disableFallback: true + +hooks: + - wait: + running: true + terminatedWithCode: 0 + timeout: 600 + container: + labelSelector: + # vars don't work here, = releaseName + release: "app" + events: ["after:deploy:app"] + name: "wait-for-pod-hook" + +# This is a list of `deployments` that DevSpace can create for this project +deployments: + app: + namespace: ${DEVSPACE_NAMESPACE} + helm: + releaseName: "app" + chart: + name: cl-cluster + path: . + # for simplicity, we define all the values here + # they can be defined the same way in values.yml + # devspace merges these "values" with the "values.yaml" before deploy + values: + podSecurityContext: + fsGroup: 999 + + plugin: + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 14933 + runAsGroup: 999 + web_port: 6688 + p2p_port: 6690 + nodes: + - name: node-1 + image: ${DEVSPACE_IMAGE} + # default resources are 300m/1Gi + # first node need more resources to build faster inside container + # at least 2Gi of memory is required otherwise build will fail (OOM) + resources: + requests: + cpu: 2000m + memory: 2048Mi + limits: + cpu: 2000m + memory: 2048Mi + # override default config per node + # for example, use OCRv2 P2P setup, the whole config + # toml: | + # RootDir = './clroot' + # [Log] + # JSONConsole = true + # Level = 'debug' + # [WebServer] + # AllowOrigins = '*' + # SecureCookies = false + # SessionTimeout = '999h0m0s' + # [OCR2] + # Enabled = true + # [P2P] + # [P2P.V2] + # Enabled = false + # AnnounceAddresses = [] + # DefaultBootstrappers = [] + # DeltaDial = '15s' + # DeltaReconcile = '1m0s' + # ListenAddresses = [] + # [[EVM]] + # ChainID = '1337' + # MinContractPayment = '0' + # [[EVM.Nodes]] + # Name = 'node-0' + # WSURL = 'ws://geth:8546' + # HTTPURL = 'http://geth:8544' + # [WebServer.TLS] + # HTTPSPort = 0 + # or use overridesToml to override some part of configuration + # overridesToml: | + - name: node-2 + image: ${DEVSPACE_IMAGE} + - name: node-3 + image: ${DEVSPACE_IMAGE} + - name: node-4 + image: ${DEVSPACE_IMAGE} + - name: node-5 + image: ${DEVSPACE_IMAGE} + - name: node-6 + image: ${DEVSPACE_IMAGE} + + # each CL node have a dedicated PostgreSQL 11.15 + # use StatefulSet by setting: + # + # stateful: true + # capacity 10Gi + # + # if you are running long tests + db: + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + stateful: false + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi + # default cluster shipped with latest Geth ( dev mode by default ) + geth: + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + version: v1.12.0 + wsrpc-port: 8546 + httprpc-port: 8544 + chains: + - networkId: 1337 + - networkId: 2337 + blocktime: 1 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi + # mockserver is https://www.mock-server.com/where/kubernetes.html + # used to stub External Adapters + mockserver: + # image: "mockserver/mockserver" + # version: "mockserver-5.15.0" + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + enabled: true + releasenameOverride: mockserver + app: + runAsUser: 999 + readOnlyRootFilesystem: false + port: 1080 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi + runner: + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + stateful: false + resources: + requests: + cpu: 1 + memory: 512Mi + limits: + cpu: 1 + memory: 512Mi + affinity: {} + tolerations: [] + nodeSelector: {} + ingress: + enabled: false + className: "" + hosts: [] + tls: [] + annotations: {} + service: + type: NodePort + port: 8080 + + # monitoring.coreos.com/v1 PodMonitor for each node + prometheusMonitor: true + + networkPolicy: + ingress: + # Should be a comma separated list of CIDR blocks. To include + # AWS ALB private CIDRs and optionally other custom CIDRs. + # Example format: 10.0.0.0/16,192.168.0.1/24 + allowCustomCidrs: ${DEVSPACE_INGRESS_CIDRS} + # These ingresses create AWS ALB resources and Route 53 Records. + ingress: + enabled: true + annotation_certificate_arn: ${DEVSPACE_INGRESS_CERT_ARN} + annotation_group_name: ${DEVSPACE_NAMESPACE} + hosts: + - host: ${DEVSPACE_NAMESPACE}-node1.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-1 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-node2.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-2 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-node3.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-3 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-node4.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-4 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-node5.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-5 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-node6.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-node-6 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-geth-1337-http.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: geth-1337 + port: + number: 8544 + - host: ${DEVSPACE_NAMESPACE}-geth-1337-ws.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: geth-1337 + port: + number: 8546 + - host: ${DEVSPACE_NAMESPACE}-geth-2337-http.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: geth-2337 + port: + number: 8544 + - host: ${DEVSPACE_NAMESPACE}-geth-2337-ws.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: geth-2337 + port: + number: 8546 + - host: ${DEVSPACE_NAMESPACE}-mockserver.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: mockserver + port: + number: 1080 + + # deployment placement, standard helm stuff + podAnnotations: + nodeSelector: + tolerations: + affinity: + +profiles: + # this replaces only "runner" pod, usable when you'd like to run some system level tests inside k8s + - name: runner + patches: + - op: replace + path: dev.app.workingDir + value: /home/plugin/integration-tests + - op: replace + path: dev.app.container + value: runner + - op: replace + path: dev.app.labelSelector.instance + value: runner-1 + - op: remove + path: dev.app.sync[1].uploadExcludePaths[0] + - op: remove + path: dev.app.open + - op: remove + path: dev.app.ports[1] + - name: node + patches: + - op: replace + path: dev.app.container + value: node + - op: replace + path: dev.app.labelSelector.instance + value: node-1 + +# This is a list of `dev` containers that are based on the containers created by your deployments +dev: + app: + workingDir: /home/plugin + container: node + labelSelector: + instance: node-1 + # Sync files between the local filesystem and the development container + sync: + - path: ../../core/services/plugin:/home/plugin/core/services/plugin + printLogs: true + disableDownload: true + - path: ../..:/home/plugin + printLogs: true + disableDownload: true + uploadExcludePaths: + - integration-tests/ + - .github/ + - belt/ + - charts/ + - contracts/ + - node_modules/ + - integration/ + - integration-scripts/ + - testdata/ + - evm-test-helpers/ + # Open a terminal and use the following command + terminal: + command: bash + ssh: + enabled: true + proxyCommands: + # TODO: access issues + # - command: devspace + # - command: kubectl + # - command: helm + - gitCredentials: true + ports: + - port: "2345" diff --git a/charts/plugin-cluster/devspace_start.sh b/charts/plugin-cluster/devspace_start.sh new file mode 100644 index 00000000..368b5dd9 --- /dev/null +++ b/charts/plugin-cluster/devspace_start.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set +e # Continue on errors + +COLOR_BLUE="\033[0;94m" +COLOR_GREEN="\033[0;92m" +COLOR_RESET="\033[0m" + +# Print useful output for user +echo -e "${COLOR_BLUE} + %########% + %###########% ____ _____ + %#########% | _ \ ___ __ __ / ___/ ____ ____ ____ ___ + %#########% | | | | / _ \\\\\ \ / / \___ \ | _ \ / _ | / __// _ \\ + %#############% | |_| |( __/ \ V / ____) )| |_) )( (_| |( (__( __/ + %#############% |____/ \___| \_/ \____/ | __/ \__,_| \___\\\\\___| + %###############% |_| + %###########%${COLOR_RESET} + + +Welcome to your development container! + +This is how you can work with it: +- Files will be synchronized between your local machine and this container +- Some ports will be forwarded, so you can access this container via localhost +- Run \`${COLOR_GREEN}go run main.go${COLOR_RESET}\` to start the application +" + +# Set terminal prompt +export PS1="\[${COLOR_BLUE}\]devspace\[${COLOR_RESET}\] ./\W \[${COLOR_BLUE}\]\\$\[${COLOR_RESET}\] " +if [ -z "$BASH" ]; then export PS1="$ "; fi + +# Include project's bin/ folder in PATH +export PATH="./bin:$PATH" + +# Open shell +bash --norc diff --git a/charts/plugin-cluster/go.mod b/charts/plugin-cluster/go.mod new file mode 100644 index 00000000..cdb7d1cf --- /dev/null +++ b/charts/plugin-cluster/go.mod @@ -0,0 +1,186 @@ +module github.com/goplugin/pluginv3.0/charts/plugin-cluster/dashboard + +go 1.21 + +require ( + github.com/K-Phoen/grabana v0.21.19 + github.com/pkg/errors v0.9.1 + github.com/goplugin/wasp v0.3.6 +) + +require ( + github.com/K-Phoen/sdk v0.12.3 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go v1.44.217 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee // indirect + github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/fatih/color v1.14.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.8.1 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/strfmt v0.21.3 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.1 // indirect + github.com/go-resty/resty/v2 v2.7.0 // indirect + github.com/goccy/go-json v0.9.11 // indirect + github.com/gogo/googleapis v1.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/gogo/status v1.1.1 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/gosimple/slug v1.13.1 // indirect + github.com/gosimple/unidecode v1.0.1 // indirect + github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 // indirect + github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/hashicorp/consul/api v1.20.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/klauspost/compress v1.16.3 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.51 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect + github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/alertmanager v0.25.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/exporter-toolkit v0.9.1 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2 // indirect + github.com/rs/zerolog v1.29.0 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/sercand/kuberesolver/v4 v4.0.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/testify v1.8.3 // indirect + github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205 // indirect + github.com/weaveworks/promrus v1.2.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect + go.mongodb.org/mongo-driver v1.11.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect + go.opentelemetry.io/otel v1.14.0 // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect + go.opentelemetry.io/otel/trace v1.14.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + go.uber.org/multierr v1.9.0 // indirect + go.uber.org/ratelimit v0.2.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/exp v0.0.0-20230307190834-24139beb5833 // indirect + golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.7.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc v1.57.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.26.2 // indirect + k8s.io/apimachinery v0.26.2 // indirect + k8s.io/client-go v0.26.2 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect + k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + nhooyr.io/websocket v1.8.7 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +replace ( + github.com/go-kit/log => github.com/go-kit/log v0.2.1 + + // replicating the replace directive on cosmos SDK + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + + // until merged upstream: https://github.com/hashicorp/go-plugin/pull/257 + github.com/hashicorp/go-plugin => github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 + + // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 + github.com/mwitkow/grpc-proxy => github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f + + github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2 + github.com/sercand/kuberesolver/v4 => github.com/sercand/kuberesolver/v5 v5.1.1 +) diff --git a/charts/plugin-cluster/go.sum b/charts/plugin-cluster/go.sum new file mode 100644 index 00000000..ee810dc8 --- /dev/null +++ b/charts/plugin-cluster/go.sum @@ -0,0 +1,2126 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/K-Phoen/grabana v0.21.19 h1:tJjRO8nN9JrFjLoQGtOB9P5ILoqENZZGAtt3nK+Ry2Y= +github.com/K-Phoen/grabana v0.21.19/go.mod h1:B7gxVxacQUgHWmgqduf4WPZoKYHO1mvZnRVCoyQiwdw= +github.com/K-Phoen/sdk v0.12.3 h1:ScutEQASc9VEKJCm3OjIMD82BIS9B2XtNg3gEf6Gs+M= +github.com/K-Phoen/sdk v0.12.3/go.mod h1:qmM0wO23CtoDux528MXPpYvS4XkRWkWX6rvX9Za8EVU= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= +github.com/aws/aws-sdk-go v1.44.217/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= +github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/digitalocean/godo v1.97.0 h1:p9w1yCcWMZcxFSLPToNGXA96WfUVLXqoHti6GzVomL4= +github.com/digitalocean/godo v1.97.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY= +github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= +github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gophercloud/gophercloud v1.2.0 h1:1oXyj4g54KBg/kFtCdMM6jtxSzeIyg8wv4z1HoGPp1E= +github.com/gophercloud/gophercloud v1.2.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosimple/slug v1.13.1 h1:bQ+kpX9Qa6tHRaK+fZR0A0M2Kd7Pa5eHPPsb1JpHD+Q= +github.com/gosimple/slug v1.13.1/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= +github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= +github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= +github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 h1:IOks+FXJ6iO/pfbaVEf4efNw+YzYBYNCkCabyrbkFTM= +github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2/go.mod h1:zj+5BNZAVmQafV583uLTAOzRr963KPdEm4d6NPmtbwg= +github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6 h1:V5PspEXlSlNh22sMyGkgfSOVVLTsSmhbmsp1VPt8Fdc= +github.com/grafana/loki v1.6.2-0.20231017135925-990ac685e6a6/go.mod h1:+aWr7OBDuZMT+p0rKmLfW5saO2m3YOGBnt++IlgLhVk= +github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 h1:VXitROTlmZtLzvokNe8ZbUKpmwldM4Hy1zdNRO32jKU= +github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765/go.mod h1:DhJMrd2QInI/1CNtTN43BZuTmkccdizW1jZ+F6aHkhY= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b h1:EkuSTU8c/63q4LMayj8ilgg/4I5PXDFVcnqKfs9qcwI= +github.com/hashicorp/nomad/api v0.0.0-20230308192510-48e7d70fcd4b/go.mod h1:bKUb1ytds5KwUioHdvdq9jmrDqCThv95si0Ub7iNeBg= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go v1.41.0 h1:KJGFRRc68QiVu4PrEP5BmCQVveCP2CM26UGQUKGpIUs= +github.com/hetznercloud/hcloud-go v1.41.0/go.mod h1:NaHg47L6C77mngZhwBG652dTAztYrsZ2/iITJKhQkHA= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ionos-cloud/sdk-go/v6 v6.1.4 h1:BJHhFA8Q1SZC7VOXqKKr2BV2ysQ2/4hlk1e4hZte7GY= +github.com/ionos-cloud/sdk-go/v6 v6.1.4/go.mod h1:Ox3W0iiEz0GHnfY9e5LmAxwklsxguuNFEUSu0gVRTME= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY= +github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/linode/linodego v1.14.1 h1:uGxQyy0BidoEpLGdvfi4cPgEW+0YUFsEGrLEhcTfjNc= +github.com/linode/linodego v1.14.1/go.mod h1:NJlzvlNtdMRRkXb0oN6UWzUkj6t+IBsyveHgZ5Ppjyk= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.51 h1:0+Xg7vObnhrz/4ZCZcZh7zPXlmU0aveS2HDBd0m0qSo= +github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= +github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= +github.com/ovh/go-ovh v1.3.0/go.mod h1:AxitLZ5HBRPyUd+Zl60Ajaag+rNTdVXWIkzfrVuTXWA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/alertmanager v0.25.0 h1:vbXKUR6PYRiZPRIKfmXaG+dmCKG52RtPL4Btl8hQGvg= +github.com/prometheus/alertmanager v0.25.0/go.mod h1:MEZ3rFVHqKZsw7IcNS/m4AWZeXThmJhumpiWR4eHU/w= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= +github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= +github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2 h1:i5hmbBzR+VeL5pPl1ZncsJ1bpg3SO66bwkE1msJBsMA= +github.com/prometheus/prometheus v0.43.1-0.20230327151049-211ae4f1f0a2/go.mod h1:Mm42Acga98xgA+u5yTaC3ki3i0rJEJWFpbdHN7q2trk= +github.com/pyroscope-io/client v0.6.0 h1:rcUFgcnfmuyVYDYT+4d0zfqc8YedOyruHSsUb9ImaBw= +github.com/pyroscope-io/client v0.6.0/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= +github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14 h1:yFl3jyaSVLNYXlnNYM5z2pagEk1dYQhfr1p20T1NyKY= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.14/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= +github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/goplugin/wasp v0.3.6 h1:1TLWfrTzqZwNvyyoKzPZ8FLQat2lNz640eM+mMh2YxM= +github.com/goplugin/wasp v0.3.6/go.mod h1:L/cyUGfpaWxy/2twOVJLRt2mySJEIqGrFj9nyvRLpSo= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205 h1:gjb7t9LCnRu14LHubyLIgrE+EYlAaREiPn/VknV7R3s= +github.com/weaveworks/common v0.0.0-20230411130259-f7d83a041205/go.mod h1:O9wmSPNVSuqxzUZPFlHnPQ8xnyvx0qBnKGFfGbj95uY= +github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw= +go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833 h1:SChBja7BCQewoTAU7IgvucQKMIXrEpFxNMs0spT3/5s= +golang.org/x/exp v0.0.0-20230307190834-24139beb5833/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.57.2 h1:uw37EN34aMFFXB2QPW7Tq6tdTbind1GpRxw5aOX3a5k= +google.golang.org/grpc v1.57.2/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= +k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/charts/plugin-cluster/setup.sh b/charts/plugin-cluster/setup.sh new file mode 100644 index 00000000..536bef57 --- /dev/null +++ b/charts/plugin-cluster/setup.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +set -e +user_home="$HOME" +file_path="$user_home/.aws/config" +image="" +registry_id=$(echo "$DEVSPACE_IMAGE" | cut -d'.' -f1) + +if grep -q "staging-crib" "$file_path"; then + echo "Staging AWS config is already applied, role is 'staging-crib'" +else + cat <> "$file_path" +[profile staging-crib] +region=us-west-2 +sso_start_url=https://smartcontract.awsapps.com/start +sso_region=us-west-2 +sso_account_id=${registry_id} +sso_role_name=CRIB-ECR-Power +EOF + echo "~/.aws/config modified, added 'staging-crib" +fi + +# Login through SSO +aws sso login --profile staging-crib +# Update kubeconfig and switch context +export AWS_PROFILE=staging-crib +aws eks update-kubeconfig --name main-stage-cluster --alias main-stage-cluster-crib --profile staging-crib + +# Check if the Docker daemon is running +if docker info > /dev/null 2>&1; then + echo "Docker daemon is running, authorizing registry" +else + echo "Docker daemon is not running, exiting" + exit 1 +fi +aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ${registry_id}.dkr.ecr.us-west-2.amazonaws.com +devspace use namespace $1 diff --git a/charts/plugin-cluster/templates/geth-config-map.yaml b/charts/plugin-cluster/templates/geth-config-map.yaml new file mode 100644 index 00000000..0d9abb04 --- /dev/null +++ b/charts/plugin-cluster/templates/geth-config-map.yaml @@ -0,0 +1,155 @@ +{{ if (hasKey .Values "geth") }} +{{- range $cfg := .Values.geth.chains }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: geth-{{ $cfg.networkId }}-cm + release: {{ $.Release.Name }} + name: geth-{{ $cfg.networkId }}-cm +data: + key1: | + {"address":"f39fd6e51aad88f6f4ce6ab8827279cfffb92266","crypto":{"cipher":"aes-128-ctr","ciphertext":"c36afd6e60b82d6844530bd6ab44dbc3b85a53e826c3a7f6fc6a75ce38c1e4c6","cipherparams":{"iv":"f69d2bb8cd0cb6274535656553b61806"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"80d5f5e38ba175b6b89acfc8ea62a6f163970504af301292377ff7baafedab53"},"mac":"f2ecec2c4d05aacc10eba5235354c2fcc3776824f81ec6de98022f704efbf065"},"id":"e5c124e9-e280-4b10-a27b-d7f3e516b408","version":3} + key2: | + {"address":"70997970c51812dc3a010c7d01b50e0d17dc79c8","crypto":{"cipher":"aes-128-ctr","ciphertext":"f8183fa00bc112645d3e23e29a233e214f7c708bf49d72750c08af88ad76c980","cipherparams":{"iv":"796d08e3e1f71bde89ed826abda96cda"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"03c864a22a1f7b06b1da12d8b93e024ac144f898285907c58b2abc135fc8a35c"},"mac":"5fe91b1a1821c0d9f85dfd582354ead9612e9a7e9adc38b06a2beff558c119ac"},"id":"d2cab765-5e30-42ae-bb91-f090d9574fae","version":3} + key3: | + {"address":"3c44cdddb6a900fa2b585dd299e03d12fa4293bc","crypto":{"cipher":"aes-128-ctr","ciphertext":"2cd6ab87086c47f343f2c4d957eace7986f3b3c87fc35a2aafbefb57a06d9f1c","cipherparams":{"iv":"4e16b6cd580866c1aa642fb4d7312c9b"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"0cabde93877f6e9a59070f9992f7a01848618263124835c90d4d07a0041fc57c"},"mac":"94b7776ea95b0ecd8406c7755acf17b389b7ebe489a8942e32082dfdc1f04f57"},"id":"ade1484b-a3bb-426f-9223-a1f5e3bde2e8","version":3} + key4: | + {"address":"90f79bf6eb2c4f870365e785982e1f101e93b906","crypto":{"cipher":"aes-128-ctr","ciphertext":"15144214d323871e00f7b205368128061c91b77a27b7deec935f8f5b734f0d42","cipherparams":{"iv":"bb22ba8051ef9f60abded7a9f4f2c6ae"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"07331ef3035567c00830b4e50d5dd68bc877974b4ce38cd42fef755be01556c9"},"mac":"2294eacadaf2761851814451d8c7dcca20a606a0344335d98f09403aba4e82ca"},"id":"96af8cc7-97e1-4bba-8968-632b034986c2","version":3} + key5: | + {"address":"15d34aaf54267db7d7c367839aaf71a00a2c6a65","crypto":{"cipher":"aes-128-ctr","ciphertext":"057878284a6c74d3ad99910adddd6b477b383837dbf2280efea585f0f0fdb012","cipherparams":{"iv":"e6eab29d60b526f305f8d47badf48687"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"dfdca8066d2486da5cb9a909d03744e2a8c6537930271e85e7cd8a5d952c0f22"},"mac":"f8352be41c9a06d69111ca4d8fcff0eef079b68b1173cad99803538991716c5d"},"id":"a35bb452-0d57-42d5-8d25-5a00a40a4db8","version":3} + key6: | + {"address":"9965507d1a55bcc2695c58ba16fb37d819b0a4dc","crypto":{"cipher":"aes-128-ctr","ciphertext":"5a73201500307c6aa98edd44d962b344a893768331454a61595ec848e738e9d2","cipherparams":{"iv":"5282de2b3e2b305019a2fed5c62f3383"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"6ad001831d097f175fff7d6cf61301e9620b32afd9a7a6437e6030af14576a96"},"mac":"0a55eddbd13c713aa8b8c4106b2fb62bc1d1e18e7177207a444f83a4d8426ed5"},"id":"27aed2b2-cb94-4d37-8819-b15219187bb5","version":3} + key7: | + {"address":"976ea74026e726554db657fa54763abd0c3a0aa9","crypto":{"cipher":"aes-128-ctr","ciphertext":"a6edf11e81b38e60a549696236cb9efc026e87adc45a9521ea7b2c45a2a9fbb9","cipherparams":{"iv":"82f4c79cd4b28a8585a9c78d758f832b"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"87400e16ecc320dadff85eccbf4dbaaea2dd91e50047e4aa391799bb319c1fd8"},"mac":"80c83dad05998db6c673a97096fcfad54636458f4a3c82483686b253f8cc9b69"},"id":"fc7d7694-6206-48fc-bb25-36b523f90df6","version":3} + key8: | + {"address":"14dc79964da2c08b23698b3d3cc7ca32193d9955","crypto":{"cipher":"aes-128-ctr","ciphertext":"410f258bc8b12a0250cba22cbc5e413534fcf90bf322ced6943189ad9e43b4b9","cipherparams":{"iv":"1dd6077a8bee9b3bf2ca90e6abc8a237"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"5d3358bf99bbcb82354f40e5501abf4336bc141ee05d8feed4fbe7eb8c08c917"},"mac":"9cd959fa1e8129a8deb86e0264ec81d6cde79b5a19ae259b7d00543c9037908a"},"id":"689d7ad2-fe46-4c09-9c2a-a50e607989b8","version":3} + key9: | + {"address":"23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f","crypto":{"cipher":"aes-128-ctr","ciphertext":"13dccac740314edea20d44e6f3592575bbcb739ec5892d635326cff3c386eb86","cipherparams":{"iv":"bf42d811cd41fa97ddcae3425f8c3211"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d2fa67cbb5e86d5bf9a90e27b8747bac493614b45778d43e9da1c14e06b2401d"},"mac":"7d2797cf344704d8f36265238d3938e06952c78ab7dfcbac53dc7f472c93d933"},"id":"4c8e899e-80f0-4417-9b1e-c5e29049f1e7","version":3} + key10: | + {"address":"a0ee7a142d267c1f36714e4a8f75612f20a79720","crypto":{"cipher":"aes-128-ctr","ciphertext":"56bc8766f47aeafae74eea333e1e890a3776d7fae6c48cbdbffb270655ce050d","cipherparams":{"iv":"a66129e6a110b3ddf93b4355aa147c58"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"15c4e8bcc80920139eb236d91194825f1fce27dd2af281e0f2752d8a5dbc48bd"},"mac":"db01e720866ce8bb7897dfc7773e064003ad53429a79732ee769cf6d02273570"},"id":"87b2d76f-1b70-4e4f-8b2a-5d1915c1177c","version":3} + key11: | + {"address":"bcd4042de499d14e55001ccbb24a551f3b954096","crypto":{"cipher":"aes-128-ctr","ciphertext":"e455eda6e38d246c03b930f845adfc8721ca75e9f47135cd4c18dbc3e5c5440a","cipherparams":{"iv":"0b1a0a24acc1ad25b0f170f751c2cb27"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"69f324ed0787794878bf5f84d4dbbc70dec1274cad666399edc48640605f64c8"},"mac":"f07da09c460a69f943f5639545d2b3f72c1e9789f0421ad41d3078ea3db12c96"},"id":"7ec7bb3c-c486-4785-a4fc-f8f4b2fc7764","version":3} + key12: | + {"address":"71be63f3384f5fb98995898a86b02fb2426c5788","crypto":{"cipher":"aes-128-ctr","ciphertext":"4194377a05fd3d13e0a3155dad974a003fe5f7a3b5acb35d7d97c50daa8990d4","cipherparams":{"iv":"607670778baf62b1e86394cf1980487a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d63b890ad7f4fcc857681faabe9319dffc53893966ef0810bf64c4f319b0ffc5"},"mac":"bfaf924959e65c8030ece259d52ed52d5d21bd74f1a67ae545d4bb289a479e16"},"id":"0c6af842-384f-49b6-b5b7-199a1e05486b","version":3} + key13: | + {"address":"fabb0ac9d68b0b445fb7357272ff202c5651694a","crypto":{"cipher":"aes-128-ctr","ciphertext":"6bad79e57a9b80e1b2aa7292a2f72613615a43da2796abeac071ad06a2decff6","cipherparams":{"iv":"91f24b0da7179a0e62803cb2cba6bb49"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"153a063a0dcf7abe7fd730ba908d00856783f0814e1b684a6a08b519d5dbadda"},"mac":"b007d01324b2d05c9a0333e4fbf826681ee2f5ddb4007c6fab20ef923481ba8f"},"id":"fb09dfff-ac3b-46c5-8d88-f4549007977b","version":3} + key14: | + {"address":"1cbd3b2770909d4e10f157cabc84c7264073c9ec","crypto":{"cipher":"aes-128-ctr","ciphertext":"3a7b9d84afb8d6241974cf35a34cbce077aae04f4c0aea1730167c9d8266bd74","cipherparams":{"iv":"329b510bce6e63a53827a0c307c041b5"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"6648c7f4ed24d1134dcf5983a5ca4bc5ae16787f0813530d5ae4c9c61dd3623f"},"mac":"65098dcdaabfb404539aeeea20f41b24886f9d3a55b9a841e2769692e19b75e8"},"id":"2053c6ce-da95-44d2-af1f-1939319aaa45","version":3} + key15: | + {"address":"df3e18d64bc6a983f673ab319ccae4f1a57c7097","crypto":{"cipher":"aes-128-ctr","ciphertext":"ab5802ecd849b0931d822a59597f9b59277526c13dcf792d1f6771ff0a63c902","cipherparams":{"iv":"c10e50c9f7359d281bfddd90ffeee967"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"97848515d8c93caab641882fea9b1428de32a8cf006d998893d01c03a0c2f48c"},"mac":"a2c5fbf7f78e498ee4a8bb242b4a03d4fb77f4135218a5af1d4b99ebd8991c9d"},"id":"83fa51c4-48f7-4d1d-b368-c9bd9d8841ee","version":3} + key16: | + {"address":"cd3b766ccdd6ae721141f452c550ca635964ce71","crypto":{"cipher":"aes-128-ctr","ciphertext":"05f18b7afa63c3748f3c5c99c8f036df1295f7660380e3c6a3fc6252f40566aa","cipherparams":{"iv":"3346ffa57c1b04f678d373cce4e4383a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"c21f4d22a2324039b3f7cd6dbfa757bc489fa676d700b3ee88d0e3b334698720"},"mac":"7762c08bb673333946e87c33108349d10850c559104e863c5b0826cd6e64df78"},"id":"67b3c79c-d696-4e1b-a381-d67553bc674b","version":3} + key17: | + {"address":"2546bcd3c84621e976d8185a91a922ae77ecec30","crypto":{"cipher":"aes-128-ctr","ciphertext":"accd54a6b3969a4aed305baa0cd35ce4b54dcc92d866ff8b3d23536609a282eb","cipherparams":{"iv":"93be3f925ddea06113b3dcac650e9b6b"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"092f7823a668981e14f715ae7352fa176461cddd94267f9dd6f23f9fe663ba9a"},"mac":"bb4792148205a4f0d8dfc5cd04725d0a4785a6d7e6ca0c208ffc0b005947821b"},"id":"368b24f6-6d1b-4fd5-a506-d73b5bb4aac8","version":3} + key18: | + {"address":"bda5747bfd65f08deb54cb465eb87d40e51b197e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8bc3bf720dce7d010617edaf6db4ed35701c13ff67cd9dce742de77b4b4b3df0","cipherparams":{"iv":"50075ca52dd388719b27e3029f01a484"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"8e365100caeda20cc0a13c28517d62fc41d5cae1da4dae3fef51daf4b323da81"},"mac":"5ed7ad5aca897fd03ebeb08a7fcfa38d2eadc240b6eb6ad53f7c06c6ee9085de"},"id":"8b627671-46e5-4081-a25a-c928b135a87e","version":3} + key19: | + {"address":"dd2fd4581271e230360230f9337d5c0430bf44c0","crypto":{"cipher":"aes-128-ctr","ciphertext":"39c1bbd9c070465fb7828be95b3b0978482aab16b8b1f11db0022321bcaa9172","cipherparams":{"iv":"35bd60a818c50e65fee00031f0a33d8e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"eecd539685126cfc1f444070d7528c5f570d71c48994118af8c50c316ef72a99"},"mac":"93224b4767a57a8cd394c37087f0dfc9393b381f09c45c5abc55cec5d719181f"},"id":"321978cb-f1bf-4790-8a29-5e5878f8dbb9","version":3} + key20: | + {"address":"8626f6940e2eb28930efb4cef49b2d1f2c9c1199","crypto":{"cipher":"aes-128-ctr","ciphertext":"8a3f1dc7b110c5ce59007f6e55fd7f055b41e518b1f34e93b4185aa44d8a1f92","cipherparams":{"iv":"02fe1b25039baf5abbdd448b932b2ab5"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"a75a6b250851f76f36132aad3ca944493b7d20a07bd4c7b1d8574596a240b35f"},"mac":"679d96ef17c9ac86960e61ced85eb551ba1895cfde2c8bb099e38dc1251a44f0"},"id":"5d3a08df-78f3-43e8-8978-75652502c962","version":3} + password.txt: | + init.sh: | + #!/bin/bash + if [ ! -d /chain/chain-data/keystore ]; then + echo "/chain/chain-data/keystore not found, running 'geth init'..." + geth init --datadir /chain/chain-data/ /chain/genesis.json + echo "...done!" + cp /chain/config/key* /chain/chain-data/keystore + fi + + cd /chain/chain-data && geth "$@" + genesis.json: | + { + "config": { + "chainId": {{ $cfg.networkId }}, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "eip160Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "clique": { + "period": 2, + "epoch": 30000 + } + }, + "nonce": "0x0000000000000042", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "1", + "coinbase": "0x3333333333333333333333333333333333333333", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb922660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "8000000000", + "alloc": { + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { + "balance": "20000000000000000000000" + }, + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { + "balance": "20000000000000000000000" + }, + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { + "balance": "20000000000000000000000" + }, + "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { + "balance": "20000000000000000000000" + }, + "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": { + "balance": "20000000000000000000000" + }, + "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": { + "balance": "20000000000000000000000" + }, + "0x976EA74026E726554dB657fA54763abd0C3a0aa9": { + "balance": "20000000000000000000000" + }, + "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": { + "balance": "20000000000000000000000" + }, + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": { + "balance": "20000000000000000000000" + }, + "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": { + "balance": "20000000000000000000000" + }, + "0xBcd4042DE499D14e55001CcbB24a551F3b954096": { + "balance": "20000000000000000000000" + }, + "0x71bE63f3384f5fb98995898A86B02Fb2426c5788": { + "balance": "20000000000000000000000" + }, + "0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": { + "balance": "20000000000000000000000" + }, + "0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": { + "balance": "20000000000000000000000" + }, + "0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": { + "balance": "20000000000000000000000" + }, + "0xcd3B766CCDd6AE721141F452C550Ca635964ce71": { + "balance": "20000000000000000000000" + }, + "0x2546BcD3c84621e976D8185a91A922aE77ECEc30": { + "balance": "20000000000000000000000" + }, + "0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": { + "balance": "20000000000000000000000" + }, + "0xdD2FD4581271e230360230F9337D5c0430Bf44C0": { + "balance": "20000000000000000000000" + }, + "0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": { + "balance": "20000000000000000000000" + } + } + } +--- +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/geth-deployment.yaml b/charts/plugin-cluster/templates/geth-deployment.yaml new file mode 100644 index 00000000..c78f0851 --- /dev/null +++ b/charts/plugin-cluster/templates/geth-deployment.yaml @@ -0,0 +1,126 @@ +{{ if (hasKey .Values "geth") }} +{{- range $cfg := .Values.geth.chains }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: geth-{{ $cfg.networkId }} +spec: + selector: + matchLabels: + app: geth + release: {{ $.Release.Name }} + instance: geth-{{ $cfg.networkId }} + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + havoc-component-group: "blockchain" + havoc-network-group: "blockchain" + template: + metadata: + labels: + app: geth + instance: geth-{{ $cfg.networkId }} + release: {{ $.Release.Name }} + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + havoc-component-group: "blockchain" + havoc-network-group: "blockchain" + annotations: + {{- range $key, $value := $.Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + volumes: + - name: configmap-volume + configMap: + name: geth-{{ $cfg.networkId }}-cm + - name: devchain-volume + emptyDir: {} + securityContext: + {{- toYaml $.Values.geth.podSecurityContext | nindent 8 }} + containers: + - name: geth-network + securityContext: + {{- toYaml $.Values.geth.securityContext | nindent 12 }} + image: "{{ default "ethereum/client-go" $.Values.geth.image }}:{{ default "stable" $.Values.geth.version }}" + command: [ "sh", "/chain/init.sh" ] + volumeMounts: + - name: devchain-volume + mountPath: /chain/chain-data + - name : configmap-volume + mountPath: /chain/genesis.json + subPath: genesis.json + - name : configmap-volume + mountPath: /chain/init.sh + subPath: init.sh + - name: configmap-volume + mountPath: /chain/config + args: + - '--password' + - '/chain/config/password.txt' + - '--datadir' + - '/chain/chain-data/' + - '--unlock' + - '0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266' + - '--mine' + - '--miner.etherbase' + - '0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266' + - '--ipcdisable' + - '--http.api' + - 'admin,debug,clique,eth,miner,net,personal,txpool,web3' + - '--http' + - '--http.vhosts' + - '*' + - '--http.addr' + - '0.0.0.0' + - '--http.port=8544' + - '--ws' + - '--ws.origins' + - '*' + - '--ws.addr' + - '0.0.0.0' + - '--ws.port=8546' + - '--graphql' + - '-graphql.corsdomain' + - '*' + - '--allow-insecure-unlock' + - '--rpc.allow-unprotected-txs' + - '--http.corsdomain' + - '*' + - '--vmdebug' + - '--networkid={{ $cfg.networkId }}' + - '--rpc.txfeecap' + - '0' + - '--dev.period' + - '{{ $.Values.geth.blocktime }}' + - '--miner.gasprice' + - '10000000000' + ports: + - name: http-rpc + containerPort: 8544 + - name: ws-rpc + containerPort: 8546 + {{ if (hasKey $.Values.geth "resources") }} + resources: + requests: + memory: {{ default "1024Mi" $.Values.geth.resources.requests.memory }} + cpu: {{ default "1000m" $.Values.geth.resources.requests.cpu }} + limits: + memory: {{ default "1024Mi" $.Values.geth.resources.limits.memory }} + cpu: {{ default "1000m" $.Values.geth.resources.limits.cpu }} + {{ else }} + {{ end }} +{{- with $.Values.nodeSelector }} + nodeSelector: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.affinity }} + affinity: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +--- +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/geth-networkpolicy.yaml b/charts/plugin-cluster/templates/geth-networkpolicy.yaml new file mode 100644 index 00000000..025d6184 --- /dev/null +++ b/charts/plugin-cluster/templates/geth-networkpolicy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ $.Release.Name }}-geth +spec: + podSelector: + matchLabels: + app: geth + policyTypes: + - Ingress + ingress: + - from: + # Allow http and websocket connections from the node pods. + - podSelector: + matchLabels: + app: {{ $.Release.Name }} + # Allow http and websocket connections from the runner pods. + - podSelector: + matchLabels: + app: runner + ports: + - protocol: TCP + port: 8544 + - protocol: TCP + port: 8546 +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/geth-service.yaml b/charts/plugin-cluster/templates/geth-service.yaml new file mode 100644 index 00000000..3016c530 --- /dev/null +++ b/charts/plugin-cluster/templates/geth-service.yaml @@ -0,0 +1,21 @@ +{{ if (hasKey .Values "geth") }} +{{- range $cfg := .Values.geth.chains }} +apiVersion: v1 +kind: Service +metadata: + name: geth-{{ $cfg.networkId }} +spec: + selector: + instance: geth-{{ $cfg.networkId }} + release: {{ $.Release.Name }} + ports: + - name: ws-rpc + port: {{ default "8546" $.Values.geth.wsrpc_port}} + targetPort: ws-rpc + - name: http-rpc + port: {{ default "8544" $.Values.geth.httprpc_port}} + targetPort: http-rpc + type: ClusterIP +--- +{{- end }} +{{ end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/ingress.yaml b/charts/plugin-cluster/templates/ingress.yaml new file mode 100644 index 00000000..e84de6ea --- /dev/null +++ b/charts/plugin-cluster/templates/ingress.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $.Release.Name }} + labels: + app: {{ $.Release.Name }} + release: {{ $.Release.Name }} + {{- range $key, $value := $.Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + alb.ingress.kubernetes.io/backend-protocol: HTTP + alb.ingress.kubernetes.io/certificate-arn: {{ $.Values.ingress.annotation_certificate_arn | quote }} + alb.ingress.kubernetes.io/group.name: {{ $.Values.ingress.annotation_group_name | quote }} + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/target-type: ip + {{- if .Values.ingress.extra_annotations }} + {{- range $key, $value := .Values.ingress.extra_annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +spec: + {{- with .Values.ingress.ingressClassName }} + ingressClassName: {{ . }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host }} + http: + paths: + {{- range .http.paths }} + - path: "/*" + pathType: ImplementationSpecific + backend: + service: + name: {{ .backend.service.name }} + port: + number: {{ .backend.service.port.number }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/plugin-cluster/templates/mockserver-networkpolicy.yaml b/charts/plugin-cluster/templates/mockserver-networkpolicy.yaml new file mode 100644 index 00000000..6ac4f658 --- /dev/null +++ b/charts/plugin-cluster/templates/mockserver-networkpolicy.yaml @@ -0,0 +1,25 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ $.Release.Name }}-mockserver +spec: + podSelector: + matchLabels: + app: mockserver + policyTypes: + - Ingress + ingress: + - from: + # Allow http traffic from the node pods. + - podSelector: + matchLabels: + app: {{ $.Release.Name }} + # Allow http traffic from the runner pods. + - podSelector: + matchLabels: + app: runner + ports: + - protocol: TCP + port: 1080 +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/networkpolicy-default.yaml b/charts/plugin-cluster/templates/networkpolicy-default.yaml new file mode 100644 index 00000000..a2cc23ed --- /dev/null +++ b/charts/plugin-cluster/templates/networkpolicy-default.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default +spec: + podSelector: + matchLabels: {} + policyTypes: + - Ingress + - Egress + ingress: + {{- if and .Values.networkPolicyDefault.ingress.allowCustomCidrs (not (empty .Values.networkPolicyDefault.ingress.customCidrs)) }} + # Using a comma separated list to make it easy to pass in with: + # `helm template ... --set networkPolicyDefault.ingress.customCidrs=...` + {{- $cidrs := splitList "," .Values.networkPolicyDefault.ingress.customCidrs }} + - from: + {{- range $cidr := $cidrs }} + - ipBlock: + cidr: {{ $cidr | quote }} + {{- end }} + {{- else }} + # Deny all ingress if no rules are specified. Rules can still be specified in other templates. + - {} + {{- end }} + egress: + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: "{{ $.Release.Namespace }}" + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: TCP + port: 53 + - protocol: UDP + port: 53 +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-cm.yaml b/charts/plugin-cluster/templates/plugin-cm.yaml new file mode 100644 index 00000000..b81a9f22 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-cm.yaml @@ -0,0 +1,71 @@ +{{- range $cfg := .Values.plugin.nodes }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: {{ $.Release.Name }}-{{ $cfg.name }}-cm + release: {{ $.Release.Name }} + name: {{ $.Release.Name }}-{{ $cfg.name }}-cm +data: + apicredentials: | + notreal@fakeemail.ch + fj293fbBnlQ!f9vNs + node-password: T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ + init.sql: | + CREATE + EXTENSION pg_stat_statements; + default.toml: | + {{ if (hasKey $cfg "toml") }} + {{ $cfg.toml | nindent 4 }} + {{ else }} + RootDir = './clroot' + [Log] + JSONConsole = true + Level = 'debug' + [WebServer] + AllowOrigins = '*' + SecureCookies = false + SessionTimeout = '999h0m0s' + [Feature] + FeedsManager = true + LogPoller = true + UICSAKeys = true + [OCR] + Enabled = true + DefaultTransactionQueueDepth = 0 + [P2P] + [P2P.V2] + Enabled = true + ListenAddresses = ['0.0.0.0:6690'] + AnnounceAddresses = ['0.0.0.0:6690'] + DeltaDial = '500ms' + DeltaReconcile = '5s' + {{- range $chainCfg := $.Values.geth.chains }} + [[EVM]] + ChainID = {{ $chainCfg.networkId | quote }} + MinContractPayment = '0' + AutoCreateKey = true + FinalityDepth = 1 + {{- if (hasKey $chainCfg "customEVMConfigToml") }} + {{- $chainCfg.customEVMConfigToml | nindent 4 }} + {{- end }} + [[EVM.Nodes]] + Name = 'node-{{ $chainCfg.networkId }}' + WSURL = 'ws://geth-{{ $chainCfg.networkId }}:8546' + HTTPURL = 'http://geth-{{ $chainCfg.networkId }}:8544' + {{- end }} + [WebServer.TLS] + HTTPSPort = 0 + {{ end }} + overrides.toml: | + {{ if (hasKey $cfg "overridesToml") }} + {{ $cfg.overridesToml | nindent 4 }} + {{ else }} + {{ end }} + secrets.toml: | + {{ if (hasKey $cfg "secretsToml") }} + {{ $cfg.secretsToml | nindent 4 }} + {{ else }} + {{ end }} +--- +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-db-deployment.yaml b/charts/plugin-cluster/templates/plugin-db-deployment.yaml new file mode 100644 index 00000000..784bb920 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-db-deployment.yaml @@ -0,0 +1,148 @@ +{{- range $cfg := .Values.plugin.nodes }} +apiVersion: apps/v1 +{{ if $.Values.db.stateful }} +kind: StatefulSet +{{ else }} +kind: Deployment +{{ end }} +metadata: + name: {{ $.Release.Name }}-{{ $cfg.name }}-db +spec: + {{ if $.Values.db.stateful }} + serviceName: {{ $.Release.Name }}-db-${{ $cfg.name }} + podManagementPolicy: Parallel + volumeClaimTemplates: + - metadata: + name: postgres + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.db.capacity }} + {{ end }} + selector: + matchLabels: + app: {{ $.Release.Name }}-db + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + havoc-component-group: db + havoc-network-group: db + instance: {{ $cfg.name }}-db + release: {{ $.Release.Name }} + template: + metadata: + labels: + app: {{ $.Release.Name }}-db + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + havoc-component-group: db + havoc-network-group: db + instance: {{ $cfg.name }}-db + release: {{ $.Release.Name }} + {{- range $key, $value := $.Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + prometheus.io/scrape: 'true' + app.kubernetes.io/managed-by: "Helm" + meta.helm.sh/release-namespace: "{{ $.Release.Namespace }}" + {{- range $key, $value := $.Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + volumes: + # TODO: breakout this config map into a separate one for the db. + - name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + configMap: + name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + securityContext: + {{- toYaml $.Values.db.podSecurityContext | nindent 8 }} + containers: + - name: plugin-db + securityContext: + {{- toYaml $.Values.db.securityContext | nindent 12 }} + image: {{ default "postgres:11.15" $.Values.db.image }} + command: + - docker-entrypoint.sh + args: + - "-c" + - "shared_preload_libraries=pg_stat_statements" + - "-c" + - "pg_stat_statements.track=all" + ports: + - name: postgres + containerPort: 5432 + env: + - name: POSTGRES_DB + value: plugin + - name: POSTGRES_PASSWORD + value: verylongdatabasepassword + - name: PGPASSWORD + value: verylongdatabasepassword + - name: PGUSER + value: postgres + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-c", "while $(psql -d plugin -c \"SELECT COUNT(*) FROM pg_stat_activity WHERE datname = 'plugin' AND state = 'active';\" -tA ); echo \"waiting for connection to become 0\"; sleep 10; fi; done" ] + livenessProbe: + exec: + command: + - pg_isready + - -U + - postgres + initialDelaySeconds: 1 + periodSeconds: 5 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - pg_isready + - -U + - postgres + initialDelaySeconds: 1 + periodSeconds: 5 + timeoutSeconds: 10 + startupProbe: + exec: + command: + - pg_isready + - -U + - postgres + initialDelaySeconds: 15 + periodSeconds: 5 + failureThreshold: 20 + {{ if (hasKey $.Values.db "resources") }} + resources: + requests: + memory: {{ default "256Mi" $.Values.db.resources.requests.memory }} + cpu: {{ default "250m" $.Values.db.resources.requests.cpu }} + limits: + memory: {{ default "256Mi" $.Values.db.resources.limits.memory }} + cpu: {{ default "250m" $.Values.db.resources.limits.cpu }} + {{ else }} + {{ end }} + volumeMounts: + - mountPath: /docker-entrypoint-initdb.d/init.sql + name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + subPath: init.sql + {{ if $.Values.db.stateful }} + - mountPath: /var/lib/postgresql/data + name: postgres + subPath: postgres-db + {{ end }} +{{- with $.Values.nodeSelector }} + nodeSelector: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.affinity }} + affinity: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +--- +{{- end }} diff --git a/charts/plugin-cluster/templates/plugin-db-networkpolicy.yaml b/charts/plugin-cluster/templates/plugin-db-networkpolicy.yaml new file mode 100644 index 00000000..5f7e7706 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-db-networkpolicy.yaml @@ -0,0 +1,25 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ $.Release.Name }}-db +spec: + podSelector: + matchLabels: + app: {{ $.Release.Name }}-db + policyTypes: + - Ingress + ingress: + - from: + # Allow all node pods to access the database pods. + - podSelector: + matchLabels: + app: {{ $.Release.Name }} + # Allow all runner pods to access the database pods. + - podSelector: + matchLabels: + app: runner + ports: + - protocol: TCP + port: 5432 +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-db-service.yaml b/charts/plugin-cluster/templates/plugin-db-service.yaml new file mode 100644 index 00000000..43f05bd9 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-db-service.yaml @@ -0,0 +1,16 @@ +{{- range $cfg := .Values.plugin.nodes }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-db-{{ $cfg.name }} +spec: + selector: + app: {{ $.Release.Name }}-db + instance: {{ $cfg.name }}-db + release: {{ $.Release.Name }} + ports: + - protocol: TCP + port: 5432 + targetPort: 5432 +--- +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-node-deployment.yaml b/charts/plugin-cluster/templates/plugin-node-deployment.yaml new file mode 100644 index 00000000..29290954 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-node-deployment.yaml @@ -0,0 +1,117 @@ +{{- range $index, $cfg := .Values.plugin.nodes }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ if eq $index 0 }}{{ $.Release.Name }}-{{ $cfg.name }}-bootstrap{{ else }}{{ $.Release.Name }}-{{ $cfg.name }}{{ end }} +spec: + strategy: + # Need to recreate the pod to deal with lease lock held by old pod. + type: Recreate + selector: + matchLabels: + app: {{ $.Release.Name }} + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + {{ if eq $index 0 }}{{ else }} + havoc-component-group: node + {{ end }} + {{ if eq $index 0 }}{{ else }} + havoc-network-group: {{ if gt $index 2 }}"1"{{ else }}"2"{{ end }} + {{ end }} + instance: {{ $cfg.name }} + release: {{ $.Release.Name }} + template: + metadata: + labels: + app: {{ $.Release.Name }} + # Used for testing. + # havoc-component-group and havoc-network-group are used by "havoc" chaos testing tool + {{ if eq $index 0 }}{{ else }} + havoc-component-group: node + {{ end }} + {{ if eq $index 0 }}{{ else }} + havoc-network-group: {{ if gt $index 2 }}"1"{{ else }}"2"{{ end }} + {{ end }} + + instance: {{ $cfg.name }} + release: {{ $.Release.Name }} + {{- range $key, $value := $.Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + prometheus.io/scrape: 'true' + {{- range $key, $value := $.Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + spec: + volumes: + - name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + configMap: + name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + securityContext: + {{- toYaml $.Values.plugin.podSecurityContext | nindent 8 }} + containers: + - name: node + securityContext: + {{- toYaml $.Values.plugin.securityContext | nindent 12 }} + image: {{ default "public.ecr.aws/plugin/plugin" $cfg.image }} + imagePullPolicy: Always + command: [ "bash", "-c", "while ! pg_isready -U postgres --host {{ $.Release.Name }}-db-{{ $cfg.name }} --port 5432; do echo \"waiting for database to start\"; sleep 1; done && plugin -c /etc/node-secrets-volume/default.toml -c /etc/node-secrets-volume/overrides.toml -secrets /etc/node-secrets-volume/secrets.toml node start -d -p /etc/node-secrets-volume/node-password -a /etc/node-secrets-volume/apicredentials --vrfpassword=/etc/node-secrets-volume/apicredentials" ] + ports: + - name: access + containerPort: {{ $.Values.plugin.web_port }} + - name: p2p + containerPort: {{ $.Values.plugin.p2p_port }} + env: + - name: CL_DATABASE_URL + value: postgresql://postgres:verylongdatabasepassword@{{ $.Release.Name }}-db-{{ $cfg.name }}/plugin?sslmode=disable + - name: CL_DEV + value: "false" + volumeMounts: + - name: {{ $.Release.Name }}-{{ $cfg.name }}-cm + mountPath: /etc/node-secrets-volume/ + livenessProbe: + httpGet: + path: /health + port: {{ $.Values.plugin.web_port }} + initialDelaySeconds: 1 + periodSeconds: 5 + timeoutSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: {{ $.Values.plugin.web_port }} + initialDelaySeconds: 1 + periodSeconds: 5 + timeoutSeconds: 10 + startupProbe: + httpGet: + path: / + port: {{ $.Values.plugin.web_port }} + initialDelaySeconds: 15 + periodSeconds: 5 + failureThreshold: 20 + {{ if (hasKey $cfg "resources") }} + resources: + requests: + memory: {{ default "1024Mi" $cfg.resources.requests.memory }} + cpu: {{ default "300m" $cfg.resources.requests.cpu }} + limits: + memory: {{ default "1024Mi" $cfg.resources.limits.memory }} + cpu: {{ default "300m" $cfg.resources.limits.cpu }} + {{ else }} + {{ end }} +{{- with $.Values.nodeSelector }} + nodeSelector: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.affinity }} + affinity: + {{ toYaml . | indent 8 }} +{{- end }} +{{- with $.Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +--- +{{- end }} diff --git a/charts/plugin-cluster/templates/plugin-node-networkpolicy.yaml b/charts/plugin-cluster/templates/plugin-node-networkpolicy.yaml new file mode 100644 index 00000000..e63759a9 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-node-networkpolicy.yaml @@ -0,0 +1,21 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ $.Release.Name }}-node +spec: + podSelector: + matchLabels: + app: {{ $.Release.Name }} + policyTypes: + - Ingress + ingress: + # Allow all ingress traffic between the node pods and from runner pod. + - from: + - podSelector: + matchLabels: + app: {{ $.Release.Name }} + - podSelector: + matchLabels: + app: runner +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-node-service.yaml b/charts/plugin-cluster/templates/plugin-node-service.yaml new file mode 100644 index 00000000..258a6a45 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-node-service.yaml @@ -0,0 +1,18 @@ +{{- range $cfg := .Values.plugin.nodes }} +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-{{ $cfg.name }} +spec: + ports: + - name: node-port + port: {{ $.Values.plugin.web_port }} + targetPort: {{ $.Values.plugin.web_port }} + - name: p2p-port + port: {{ $.Values.plugin.p2p_port }} + targetPort: {{ $.Values.plugin.p2p_port }} + selector: + instance: {{ $cfg.name }} + type: ClusterIP +--- +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-pod-monitor.yaml b/charts/plugin-cluster/templates/plugin-pod-monitor.yaml new file mode 100644 index 00000000..05852642 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-pod-monitor.yaml @@ -0,0 +1,18 @@ +{{- if $.Values.prometheusMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ $.Release.Name }}-pod-monitor + labels: + release: grafana-agent +spec: + namespaceSelector: + matchNames: + - {{ $.Release.Namespace }} + podMetricsEndpoints: + - port: access + selector: + matchLabels: + app: {{ $.Release.Name }} +{{- end }} +--- \ No newline at end of file diff --git a/charts/plugin-cluster/templates/plugin-secret.yaml b/charts/plugin-cluster/templates/plugin-secret.yaml new file mode 100644 index 00000000..342697e9 --- /dev/null +++ b/charts/plugin-cluster/templates/plugin-secret.yaml @@ -0,0 +1,12 @@ +{{- range $cfg := .Values.plugin.nodes }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.Release.Name }}-{{ $cfg.name }}node-creds-secret +type: Opaque +data: + nodepassword: VC50TEhrY213ZVBUL3AsXXNZdW50andIS0FzcmhtIzRlUnM0THVLSHd2SGVqV1lBQzJKUDRNOEhpbXdnbWJhWgo= + apicredentials: bm90cmVhbEBmYWtlZW1haWwuY2hudHdvY2hhaW5zCg== + node-password: VC50TEhrY213ZVBUL3AsXXNZdW50andIS0FzcmhtIzRlUnM0THVLSHd2SGVqV1lBQzJKUDRNOEhpbXdnbWJhWgo= +--- +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/runner-deployment.yaml b/charts/plugin-cluster/templates/runner-deployment.yaml new file mode 100644 index 00000000..bd2b79be --- /dev/null +++ b/charts/plugin-cluster/templates/runner-deployment.yaml @@ -0,0 +1,71 @@ +{{ if (hasKey .Values "runner")}} +apiVersion: apps/v1 +{{ if .Values.runner.stateful }} +kind: StatefulSet +{{ else }} +kind: Deployment +{{ end }} +metadata: + name: runner +spec: + selector: + matchLabels: + app: runner + instance: runner-1 + release: {{ .Release.Name }} + template: + metadata: + labels: + app: runner + instance: runner-1 + release: {{ .Release.Name }} + annotations: + prometheus.io/scrape: 'true' + spec: + securityContext: + {{- toYaml $.Values.runner.podSecurityContext | nindent 8 }} + containers: + - name: runner + securityContext: + {{- toYaml $.Values.runner.securityContext | nindent 12 }} + image: {{ default "public.ecr.aws/plugin/plugin" .Values.runner.image }} + imagePullPolicy: Always + command: [ "/bin/bash", "-c", "--" ] + args: [ "tail -f /dev/null" ] + {{ if (hasKey .Values.runner "env") }} + env: + {{- range $key, $value := .Values.runner.env }} + {{- if $value }} + - name: {{ $key | upper}} + {{- if kindIs "string" $value}} + value: {{ $value | quote}} + {{- else }} + value: {{ $value }} + {{- end }} + {{- end }} + {{- end }} + {{ end }} + {{ if (hasKey .Values.runner "resources") }} + resources: + requests: + memory: {{ default "1024Mi" .Values.runner.resources.requests.memory }} + cpu: {{ default "500m" $.Values.runner.resources.requests.cpu }} + limits: + memory: {{ default "1024Mi" $.Values.runner.resources.limits.memory }} + cpu: {{ default "500m" $.Values.runner.resources.limits.cpu }} + {{ else }} + {{ end }} +{{- with $.Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} +{{- end }} +--- +{{ end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/runner-networkpolicy.yaml b/charts/plugin-cluster/templates/runner-networkpolicy.yaml new file mode 100644 index 00000000..b75a2ffa --- /dev/null +++ b/charts/plugin-cluster/templates/runner-networkpolicy.yaml @@ -0,0 +1,21 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ $.Release.Name }}-runner +spec: + podSelector: + matchLabels: + app: runner + policyTypes: + - Ingress + ingress: + # Allow all ingress traffic between the node pods and from runner pod. + - from: + - podSelector: + matchLabels: + app: {{ $.Release.Name }} + - podSelector: + matchLabels: + app: runner +{{- end }} \ No newline at end of file diff --git a/charts/plugin-cluster/templates/tests/test-connection.yaml b/charts/plugin-cluster/templates/tests/test-connection.yaml new file mode 100644 index 00000000..51537c74 --- /dev/null +++ b/charts/plugin-cluster/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: cl-nodes-connection-test + annotations: + "helm.sh/hook": test +spec: + containers: + {{- range $cfg := .Values.plugin.nodes }} + - name: curl-{{ $cfg.name }} + image: radial/busyboxplus:curl + command: ['curl'] + args: [ '{{ $.Release.Name }}-{{ $cfg.name }}:{{ $.Values.plugin.web_port }}' ] + {{- end }} + restartPolicy: Never \ No newline at end of file diff --git a/charts/plugin-cluster/values.yaml b/charts/plugin-cluster/values.yaml new file mode 100644 index 00000000..b71a055e --- /dev/null +++ b/charts/plugin-cluster/values.yaml @@ -0,0 +1,308 @@ +# override resources for keys "plugin", "db", or "geth" if needed +# resources: +# requests: +# cpu: 350m +# memory: 1024Mi +# limits: +# cpu: 350m +# memory: 1024Mi +# images can be overriden for the same keys: +# image: ethereum/client-go +# version: stable +plugin: + podSecurityContext: + fsGroup: 14933 + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 14933 + runAsGroup: 14933 + web_port: 6688 + p2p_port: 6690 + nodes: + - name: node-1 + image: "public.ecr.aws/plugin/plugin:latest" + # default resources are 300m/1Gi + # first node need more resources to build faster inside container + # at least 2Gi of memory is required otherwise build will fail (OOM) + resources: + requests: + cpu: 2000m + memory: 2048Mi + limits: + cpu: 2000m + memory: 2048Mi + # override default config per node + # for example, use OCRv2 P2P setup, the whole config + # toml: | + # RootDir = './clroot' + # [Log] + # JSONConsole = true + # Level = 'debug' + # [WebServer] + # AllowOrigins = '*' + # SecureCookies = false + # SessionTimeout = '999h0m0s' + # [OCR2] + # Enabled = true + # [P2P] + # [P2P.V2] + # Enabled = false + # AnnounceAddresses = [] + # DefaultBootstrappers = [] + # DeltaDial = '15s' + # DeltaReconcile = '1m0s' + # ListenAddresses = [] + # [[EVM]] + # ChainID = '1337' + # MinContractPayment = '0' + # [[EVM.Nodes]] + # Name = 'node-0' + # WSURL = 'ws://geth:8546' + # HTTPURL = 'http://geth:8544' + # [WebServer.TLS] + # HTTPSPort = 0 + # or use overridesToml to override some part of configuration + # overridesToml: | + - name: node-2 + - name: node-3 + - name: node-4 + - name: node-5 + - name: node-6 + resources: + requests: + cpu: 350m + memory: 1024Mi + limits: + cpu: 350m + memory: 1024Mi + +# each CL node have a dedicated PostgreSQL 11.15 +# use StatefulSet by setting: +# +# stateful: true +# capacity 10Gi +# +# if you are running long tests +db: + podSecurityContext: + fsGroup: 999 + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + stateful: false + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi +# default cluster shipped with latest Geth ( dev mode by default ) +geth: + podSecurityContext: + fsGroup: 999 + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + version: v1.12.0 + wsrpc-port: 8546 + httprpc-port: 8544 + blocktime: 1 + chains: + - networkId: 1337 + # use to inject custom configuration for each chain, e.g. GasEstimator + # - customEVMConfigToml: | + # [EVM.GasEstimator] + # PriceMax = '200 gwei' + # LimitDefault = 6000000 + # FeeCapDefault = '200 gwei' + # [EVM.GasEstimator.BlockHistory] + # BlockHistorySize = 200 + # EIP1559FeeCapBufferBlocks = 0 + - networkId: 2337 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi +# mockserver is https://www.mock-server.com/where/kubernetes.html +# used to stub External Adapters +mockserver: + enabled: true + releasenameOverride: mockserver + service: + type: ClusterIP + app: + runAsUser: 999 + readOnlyRootFilesystem: false + port: 1080 + resources: + requests: + cpu: 1 + memory: 1024Mi + limits: + cpu: 1 + memory: 1024Mi +runner: + podSecurityContext: + fsGroup: 999 + securityContext: + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + runAsUser: 999 + runAsGroup: 999 + stateful: false + resources: + requests: + cpu: 1 + memory: 512Mi + limits: + cpu: 1 + memory: 512Mi + affinity: {} + tolerations: [] + nodeSelector: {} + ingress: + enabled: false + className: "" + hosts: [] + tls: [] + annotations: {} + service: + type: NodePort + port: 8080 + +ingress: + enabled: false + annotations: {} + ingressClassName: alb + hosts: + - host: plugin-node-1.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-1 + port: + number: 6688 + - host: plugin-node-2.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-2 + port: + number: 6688 + - host: plugin-node-3.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-3 + port: + number: 6688 + - host: plugin-node-4.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-4 + port: + number: 6688 + - host: plugin-node-5.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-5 + port: + number: 6688 + - host: plugin-node-6.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: plugin-node-6 + port: + number: 6688 + - host: plugin-geth-http.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: geth + port: + number: 8544 + - host: plugin-geth-ws.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: geth + port: + number: 8546 + - host: plugin-mockserver.local + http: + paths: + - path: / + pathType: ImplementationSpecific + backend: + service: + name: mockserver + port: + number: 1080 +# monitoring.coreos.com/v1 PodMonitor for each node +prometheusMonitor: true + +# deployment placement, standard helm stuff +podAnnotations: +nodeSelector: +tolerations: +affinity: + +networkPolicies: + enabled: true + +# Configure the default network policy. +networkPolicyDefault: + ingress: + allowCustomCidrs: false + # String of comma separated CIDRs + customCidrs: null + # Example: + # customCidrs: "10.0.0.0/16,192.168.0.1/24" diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..27b4815f --- /dev/null +++ b/codecov.yml @@ -0,0 +1,17 @@ +comment: false + +coverage: + status: + project: + default: + threshold: 1% + +github_checks: + annotations: false + +ignore: + - 'contracts/src/v0.4' + - 'contracts/src/v0.5' + - 'contracts/src/v0.8' # Disabled due to solidity-coverage not reporting coverage + - 'core/internal' + - 'core/scripts' diff --git a/common/chains/label/label.go b/common/chains/label/label.go new file mode 100644 index 00000000..7fce5908 --- /dev/null +++ b/common/chains/label/label.go @@ -0,0 +1,8 @@ +package label + +const ( + MaxInFlightTransactionsWarning = `WARNING: If this happens a lot, you may need to increase Transactions.MaxInFlight to boost your node's transaction throughput, however you do this at your own risk. You MUST first ensure your node is configured not to ever evict local transactions that exceed this number otherwise the node can get permanently stuck. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + MaxQueuedTransactionsWarning = `WARNING: Hitting Transactions.MaxQueued is a sanity limit and should never happen under normal operation. Unless you are operating with very high throughput, this error is unlikely to be a problem with your Plugin node configuration, and instead more likely to be caused by a problem with your node's connectivity. Check your node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Plugin's transactions from its mempool. It is recommended to run Plugin with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation. Increasing Transactions.MaxQueued will allow Plugin to buffer more unsent transactions, but you should only do this if you need very high burst transmission rates. If you don't need very high burst throughput, increasing this limit is not the correct action to take here and will probably make things worse. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + NodeConnectivityProblemWarning = `WARNING: If this happens a lot, it may be a sign that your node has a connectivity problem, and your transactions are not making it to any miners. It is recommended to run Plugin with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + RPCTxFeeCapConfiguredIncorrectlyWarning = `WARNING: Gas price was rejected by the node for being too high. By default, go-ethereum (and clones) have a built-in upper limit for gas price. It is preferable to disable this and rely Plugin's internal gas limits instead. Your RPC node's RPCTxFeeCap needs to be disabled or increased (recommended configuration: --rpc.gascap=0 --rpc.txfeecap=0). If you want to limit Plugin's max gas price, you may do so by setting GasEstimator.PriceMax on the Plugin node. Plugin will never send a transaction with a total cost higher than GasEstimator.PriceMax. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` +) diff --git a/common/client/mock_hashable_test.go b/common/client/mock_hashable_test.go new file mode 100644 index 00000000..d9f1670c --- /dev/null +++ b/common/client/mock_hashable_test.go @@ -0,0 +1,18 @@ +package client + +import "cmp" + +// Hashable - simple implementation of types.Hashable interface to be used as concrete type in tests +type Hashable string + +func (h Hashable) Cmp(c Hashable) int { + return cmp.Compare(h, c) +} + +func (h Hashable) String() string { + return string(h) +} + +func (h Hashable) Bytes() []byte { + return []byte(h) +} diff --git a/common/client/mock_head_test.go b/common/client/mock_head_test.go new file mode 100644 index 00000000..74777048 --- /dev/null +++ b/common/client/mock_head_test.go @@ -0,0 +1,66 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" +) + +// mockHead is an autogenerated mock type for the Head type +type mockHead struct { + mock.Mock +} + +// BlockDifficulty provides a mock function with given fields: +func (_m *mockHead) BlockDifficulty() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockDifficulty") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// BlockNumber provides a mock function with given fields: +func (_m *mockHead) BlockNumber() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// newMockHead creates a new instance of mockHead. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockHead(t interface { + mock.TestingT + Cleanup(func()) +}) *mockHead { + mock := &mockHead{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_node_client_test.go b/common/client/mock_node_client_test.go new file mode 100644 index 00000000..4f15ecdf --- /dev/null +++ b/common/client/mock_node_client_test.go @@ -0,0 +1,192 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// mockNodeClient is an autogenerated mock type for the NodeClient type +type mockNodeClient[CHAIN_ID types.ID, HEAD Head] struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: ctx +func (_m *mockNodeClient[CHAIN_ID, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientVersion provides a mock function with given fields: _a0 +func (_m *mockNodeClient[CHAIN_ID, HEAD]) ClientVersion(_a0 context.Context) (string, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ClientVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) Close() { + _m.Called() +} + +// Dial provides a mock function with given fields: ctx +func (_m *mockNodeClient[CHAIN_ID, HEAD]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DialHTTP provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) DialHTTP() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DialHTTP") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DisconnectAll provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) DisconnectAll() { + _m.Called() +} + +// SetAliveLoopSub provides a mock function with given fields: _a0 +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SetAliveLoopSub(_a0 types.Subscription) { + _m.Called(_a0) +} + +// Subscribe provides a mock function with given fields: ctx, channel, args +func (_m *mockNodeClient[CHAIN_ID, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, ctx, channel) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 types.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok { + return rf(ctx, channel, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok { + r0 = rf(ctx, channel, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok { + r1 = rf(ctx, channel, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribersCount provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) SubscribersCount() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribersCount") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *mockNodeClient[CHAIN_ID, HEAD]) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// newMockNodeClient creates a new instance of mockNodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNodeClient[CHAIN_ID types.ID, HEAD Head](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNodeClient[CHAIN_ID, HEAD] { + mock := &mockNodeClient[CHAIN_ID, HEAD]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_node_selector_test.go b/common/client/mock_node_selector_test.go new file mode 100644 index 00000000..19ecd94a --- /dev/null +++ b/common/client/mock_node_selector_test.go @@ -0,0 +1,65 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// mockNodeSelector is an autogenerated mock type for the NodeSelector type +type mockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct { + mock.Mock +} + +// Name provides a mock function with given fields: +func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Select provides a mock function with given fields: +func (_m *mockNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Select") + } + + var r0 Node[CHAIN_ID, HEAD, RPC] + if rf, ok := ret.Get(0).(func() Node[CHAIN_ID, HEAD, RPC]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Node[CHAIN_ID, HEAD, RPC]) + } + } + + return r0 +} + +// newMockNodeSelector creates a new instance of mockNodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNodeSelector[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNodeSelector[CHAIN_ID, HEAD, RPC] { + mock := &mockNodeSelector[CHAIN_ID, HEAD, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_node_test.go b/common/client/mock_node_test.go new file mode 100644 index 00000000..47e00f4d --- /dev/null +++ b/common/client/mock_node_test.go @@ -0,0 +1,235 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// mockNode is an autogenerated mock type for the Node type +type mockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Order provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Order() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Order") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// RPC provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) RPC() RPC { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPC") + } + + var r0 RPC + if rf, ok := ret.Get(0).(func() RPC); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(RPC) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) State() nodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 nodeState + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + return r0 +} + +// StateAndLatest provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StateAndLatest") + } + + var r0 nodeState + var r1 int64 + var r2 *big.Int + if rf, ok := ret.Get(0).(func() (nodeState, int64, *big.Int)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + if rf, ok := ret.Get(1).(func() int64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(int64) + } + + if rf, ok := ret.Get(2).(func() *big.Int); ok { + r2 = rf() + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).(*big.Int) + } + } + + return r0, r1, r2 +} + +// String provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// SubscribersCount provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribersCount") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *mockNode[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// newMockNode creates a new instance of mockNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockNode[CHAIN_ID types.ID, HEAD Head, RPC NodeClient[CHAIN_ID, HEAD]](t interface { + mock.TestingT + Cleanup(func()) +}) *mockNode[CHAIN_ID, HEAD, RPC] { + mock := &mockNode[CHAIN_ID, HEAD, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_rpc_test.go b/common/client/mock_rpc_test.go new file mode 100644 index 00000000..f0e49d7f --- /dev/null +++ b/common/client/mock_rpc_test.go @@ -0,0 +1,738 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + big "math/big" + + assets "github.com/goplugin/plugin-common/pkg/assets" + + context "context" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// mockRPC is an autogenerated mock type for the RPC type +type mockRPC[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH]] struct { + mock.Mock +} + +// BalanceAt provides a mock function with given fields: ctx, accountAddress, blockNumber +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BalanceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (*big.Int, error) { + ret := _m.Called(ctx, accountAddress, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for BalanceAt") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) (*big.Int, error)); ok { + return rf(ctx, accountAddress, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) *big.Int); ok { + r0 = rf(ctx, accountAddress, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok { + r1 = rf(ctx, accountAddress, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BatchCallContext(ctx context.Context, b []interface{}) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []interface{}) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BlockByHash(ctx context.Context, hash BLOCK_HASH) (HEAD, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 HEAD + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, BLOCK_HASH) (HEAD, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, BLOCK_HASH) HEAD); ok { + r0 = rf(ctx, hash) + } else { + r0 = ret.Get(0).(HEAD) + } + + if rf, ok := ret.Get(1).(func(context.Context, BLOCK_HASH) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) BlockByNumber(ctx context.Context, number *big.Int) (HEAD, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 HEAD + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (HEAD, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) HEAD); ok { + r0 = rf(ctx, number) + } else { + r0 = ret.Get(0).(HEAD) + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CallContext provides a mock function with given fields: ctx, result, method, args +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, ctx, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { + r0 = rf(ctx, result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainID provides a mock function with given fields: ctx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) ChainID(ctx context.Context) (CHAIN_ID, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientVersion provides a mock function with given fields: _a0 +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) ClientVersion(_a0 context.Context) (string, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ClientVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (string, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Close() { + _m.Called() +} + +// CodeAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) ([]byte, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) []byte); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Dial provides a mock function with given fields: ctx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DialHTTP provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) DialHTTP() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DialHTTP") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DisconnectAll provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) DisconnectAll() { + _m.Called() +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) EstimateGas(ctx context.Context, call interface{}) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterEvents provides a mock function with given fields: ctx, query +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) FilterEvents(ctx context.Context, query EVENT_OPS) ([]EVENT, error) { + ret := _m.Called(ctx, query) + + if len(ret) == 0 { + panic("no return value specified for FilterEvents") + } + + var r0 []EVENT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, EVENT_OPS) ([]EVENT, error)); ok { + return rf(ctx, query) + } + if rf, ok := ret.Get(0).(func(context.Context, EVENT_OPS) []EVENT); ok { + r0 = rf(ctx, query) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]EVENT) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, EVENT_OPS) error); ok { + r1 = rf(ctx, query) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PLIBalance provides a mock function with given fields: ctx, accountAddress, linkAddress +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) PLIBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (*assets.Link, error) { + ret := _m.Called(ctx, accountAddress, linkAddress) + + if len(ret) == 0 { + panic("no return value specified for PLIBalance") + } + + var r0 *assets.Link + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) (*assets.Link, error)); ok { + return rf(ctx, accountAddress, linkAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) *assets.Link); ok { + r0 = rf(ctx, accountAddress, linkAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Link) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, ADDR) error); ok { + r1 = rf(ctx, accountAddress, linkAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlockHeight provides a mock function with given fields: _a0 +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) LatestBlockHeight(_a0 context.Context) (*big.Int, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for LatestBlockHeight") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingCallContract provides a mock function with given fields: ctx, msg +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) PendingCallContract(ctx context.Context, msg interface{}) ([]byte, error) { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for PendingCallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}) ([]byte, error)); ok { + return rf(ctx, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, interface{}) []byte); ok { + r0 = rf(ctx, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingSequenceAt provides a mock function with given fields: ctx, addr +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) PendingSequenceAt(ctx context.Context, addr ADDR) (SEQ, error) { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for PendingSequenceAt") + } + + var r0 SEQ + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR) (SEQ, error)); ok { + return rf(ctx, addr) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR) SEQ); ok { + r0 = rf(ctx, addr) + } else { + r0 = ret.Get(0).(SEQ) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR) error); ok { + r1 = rf(ctx, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendEmptyTransaction provides a mock function with given fields: ctx, newTxAttempt, seq, gasLimit, fee, fromAddress +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SendEmptyTransaction(ctx context.Context, newTxAttempt func(SEQ, uint32, FEE, ADDR) (interface{}, error), seq SEQ, gasLimit uint32, fee FEE, fromAddress ADDR) (string, error) { + ret := _m.Called(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for SendEmptyTransaction") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) (string, error)); ok { + return rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) string); ok { + r0 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, func(SEQ, uint32, FEE, ADDR) (interface{}, error), SEQ, uint32, FEE, ADDR) error); ok { + r1 = rf(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SendTransaction(ctx context.Context, tx TX) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, TX) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SequenceAt provides a mock function with given fields: ctx, accountAddress, blockNumber +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SequenceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (SEQ, error) { + ret := _m.Called(ctx, accountAddress, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for SequenceAt") + } + + var r0 SEQ + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) (SEQ, error)); ok { + return rf(ctx, accountAddress, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, *big.Int) SEQ); ok { + r0 = rf(ctx, accountAddress, blockNumber) + } else { + r0 = ret.Get(0).(SEQ) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, *big.Int) error); ok { + r1 = rf(ctx, accountAddress, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetAliveLoopSub provides a mock function with given fields: _a0 +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SetAliveLoopSub(_a0 types.Subscription) { + _m.Called(_a0) +} + +// SimulateTransaction provides a mock function with given fields: ctx, tx +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SimulateTransaction(ctx context.Context, tx TX) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SimulateTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, TX) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: ctx, channel, args +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, ctx, channel) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 types.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) (types.Subscription, error)); ok { + return rf(ctx, channel, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- HEAD, ...interface{}) types.Subscription); ok { + r0 = rf(ctx, channel, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- HEAD, ...interface{}) error); ok { + r1 = rf(ctx, channel, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribersCount provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) SubscribersCount() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribersCount") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// TokenBalance provides a mock function with given fields: ctx, accountAddress, tokenAddress +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TokenBalance(ctx context.Context, accountAddress ADDR, tokenAddress ADDR) (*big.Int, error) { + ret := _m.Called(ctx, accountAddress, tokenAddress) + + if len(ret) == 0 { + panic("no return value specified for TokenBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) (*big.Int, error)); ok { + return rf(ctx, accountAddress, tokenAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, ADDR) *big.Int); ok { + r0 = rf(ctx, accountAddress, tokenAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, ADDR) error); ok { + r1 = rf(ctx, accountAddress, tokenAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionByHash provides a mock function with given fields: ctx, txHash +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TransactionByHash(ctx context.Context, txHash TX_HASH) (TX, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionByHash") + } + + var r0 TX + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) (TX, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) TX); ok { + r0 = rf(ctx, txHash) + } else { + r0 = ret.Get(0).(TX) + } + + if rf, ok := ret.Get(1).(func(context.Context, TX_HASH) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionReceipt provides a mock function with given fields: ctx, txHash +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) TransactionReceipt(ctx context.Context, txHash TX_HASH) (TX_RECEIPT, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionReceipt") + } + + var r0 TX_RECEIPT + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) (TX_RECEIPT, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, TX_HASH) TX_RECEIPT); ok { + r0 = rf(ctx, txHash) + } else { + r0 = ret.Get(0).(TX_RECEIPT) + } + + if rf, ok := ret.Get(1).(func(context.Context, TX_HASH) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// newMockRPC creates a new instance of mockRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockRPC[CHAIN_ID types.ID, SEQ types.Sequence, ADDR types.Hashable, BLOCK_HASH types.Hashable, TX interface{}, TX_HASH types.Hashable, EVENT interface{}, EVENT_OPS interface{}, TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], FEE feetypes.Fee, HEAD types.Head[BLOCK_HASH]](t interface { + mock.TestingT + Cleanup(func()) +}) *mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD] { + mock := &mockRPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_send_only_client_test.go b/common/client/mock_send_only_client_test.go new file mode 100644 index 00000000..c5a8ecb9 --- /dev/null +++ b/common/client/mock_send_only_client_test.go @@ -0,0 +1,80 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// mockSendOnlyClient is an autogenerated mock type for the sendOnlyClient type +type mockSendOnlyClient[CHAIN_ID types.ID] struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: _a0 +func (_m *mockSendOnlyClient[CHAIN_ID]) ChainID(_a0 context.Context) (CHAIN_ID, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (CHAIN_ID, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) CHAIN_ID); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *mockSendOnlyClient[CHAIN_ID]) Close() { + _m.Called() +} + +// DialHTTP provides a mock function with given fields: +func (_m *mockSendOnlyClient[CHAIN_ID]) DialHTTP() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DialHTTP") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// newMockSendOnlyClient creates a new instance of mockSendOnlyClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockSendOnlyClient[CHAIN_ID types.ID](t interface { + mock.TestingT + Cleanup(func()) +}) *mockSendOnlyClient[CHAIN_ID] { + mock := &mockSendOnlyClient[CHAIN_ID]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/mock_send_only_node_test.go b/common/client/mock_send_only_node_test.go new file mode 100644 index 00000000..6f331b87 --- /dev/null +++ b/common/client/mock_send_only_node_test.go @@ -0,0 +1,155 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package client + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// mockSendOnlyNode is an autogenerated mock type for the SendOnlyNode type +type mockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// RPC provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) RPC() RPC { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPC") + } + + var r0 RPC + if rf, ok := ret.Get(0).(func() RPC); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(RPC) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) State() nodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 nodeState + if rf, ok := ret.Get(0).(func() nodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(nodeState) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *mockSendOnlyNode[CHAIN_ID, RPC]) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// newMockSendOnlyNode creates a new instance of mockSendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newMockSendOnlyNode[CHAIN_ID types.ID, RPC sendOnlyClient[CHAIN_ID]](t interface { + mock.TestingT + Cleanup(func()) +}) *mockSendOnlyNode[CHAIN_ID, RPC] { + mock := &mockSendOnlyNode[CHAIN_ID, RPC]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/client/models.go b/common/client/models.go new file mode 100644 index 00000000..d0cf42a3 --- /dev/null +++ b/common/client/models.go @@ -0,0 +1,46 @@ +package client + +import ( + "fmt" +) + +type SendTxReturnCode int + +// SendTxReturnCode is a generalized client error that dictates what should be the next action, depending on the RPC error response. +const ( + Successful SendTxReturnCode = iota + 1 + Fatal // Unrecoverable error. Most likely the attempt should be thrown away. + Retryable // The error returned by the RPC indicates that if we retry with the same attempt, the tx will eventually go through. + Underpriced // Attempt was underpriced. New estimation is needed with bumped gas price. + Unknown // Tx failed with an error response that is not recognized by the client. + Unsupported // Attempt failed with an error response that is not supported by the client for the given chain. + TransactionAlreadyKnown // The transaction that was sent has already been received by the RPC. + InsufficientFunds // Tx was rejected due to insufficient funds. + ExceedsMaxFee // Attempt's fee was higher than the node's limit and got rejected. + FeeOutOfValidRange // This error is returned when we use a fee price suggested from an RPC, but the network rejects the attempt due to an invalid range(mostly used by L2 chains). Retry by requesting a new suggested fee price. + sendTxReturnCodeLen // tracks the number of errors. Must always be last +) + +// sendTxSevereErrors - error codes which signal that transaction would never be accepted in its current form by the node +var sendTxSevereErrors = []SendTxReturnCode{Fatal, Underpriced, Unsupported, ExceedsMaxFee, FeeOutOfValidRange, Unknown} + +// sendTxSuccessfulCodes - error codes which signal that transaction was accepted by the node +var sendTxSuccessfulCodes = []SendTxReturnCode{Successful, TransactionAlreadyKnown} + +type NodeTier int + +const ( + Primary = NodeTier(iota) + Secondary +) + +func (n NodeTier) String() string { + switch n { + case Primary: + return "primary" + case Secondary: + return "secondary" + default: + return fmt.Sprintf("NodeTier(%d)", n) + } +} diff --git a/common/client/multi_node.go b/common/client/multi_node.go new file mode 100644 index 00000000..7e470f5a --- /dev/null +++ b/common/client/multi_node.go @@ -0,0 +1,817 @@ +package client + +import ( + "context" + "fmt" + "math" + "math/big" + "slices" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/common/config" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +var ( + // PromMultiNodeRPCNodeStates reports current RPC node state + PromMultiNodeRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_node_states", + Help: "The number of RPC nodes currently in the given state for the given chain", + }, []string{"network", "chainId", "state"}) + // PromMultiNodeInvariantViolations reports violation of our assumptions + PromMultiNodeInvariantViolations = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "multi_node_invariant_violations", + Help: "The number of invariant violations", + }, []string{"network", "chainId", "invariant"}) + ErroringNodeError = fmt.Errorf("no live nodes available") +) + +// MultiNode is a generalized multi node client interface that includes methods to interact with different chains. +// It also handles multiple node RPC connections simultaneously. +type MultiNode[ + CHAIN_ID types.ID, + SEQ types.Sequence, + ADDR types.Hashable, + BLOCK_HASH types.Hashable, + TX any, + TX_HASH types.Hashable, + EVENT any, + EVENT_OPS any, + TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], + FEE feetypes.Fee, + HEAD types.Head[BLOCK_HASH], + RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD], +] interface { + clientAPI[ + CHAIN_ID, + SEQ, + ADDR, + BLOCK_HASH, + TX, + TX_HASH, + EVENT, + EVENT_OPS, + TX_RECEIPT, + FEE, + HEAD, + ] + Close() error + NodeStates() map[string]string + SelectNodeRPC() (RPC_CLIENT, error) + + BatchCallContextAll(ctx context.Context, b []any) error + ConfiguredChainID() CHAIN_ID + IsL2() bool +} + +type multiNode[ + CHAIN_ID types.ID, + SEQ types.Sequence, + ADDR types.Hashable, + BLOCK_HASH types.Hashable, + TX any, + TX_HASH types.Hashable, + EVENT any, + EVENT_OPS any, + TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], + FEE feetypes.Fee, + HEAD types.Head[BLOCK_HASH], + RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD], +] struct { + services.StateMachine + nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT] + sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + chainID CHAIN_ID + chainType config.ChainType + lggr logger.SugaredLogger + selectionMode string + noNewHeadsThreshold time.Duration + nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] + leaseDuration time.Duration + leaseTicker *time.Ticker + chainFamily string + reportInterval time.Duration + sendTxSoftTimeout time.Duration // defines max waiting time from first response til responses evaluation + + activeMu sync.RWMutex + activeNode Node[CHAIN_ID, HEAD, RPC_CLIENT] + + chStop services.StopChan + wg sync.WaitGroup + + classifySendTxError func(tx TX, err error) SendTxReturnCode +} + +func NewMultiNode[ + CHAIN_ID types.ID, + SEQ types.Sequence, + ADDR types.Hashable, + BLOCK_HASH types.Hashable, + TX any, + TX_HASH types.Hashable, + EVENT any, + EVENT_OPS any, + TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], + FEE feetypes.Fee, + HEAD types.Head[BLOCK_HASH], + RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD], +]( + lggr logger.Logger, + selectionMode string, + leaseDuration time.Duration, + noNewHeadsThreshold time.Duration, + nodes []Node[CHAIN_ID, HEAD, RPC_CLIENT], + sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT], + chainID CHAIN_ID, + chainType config.ChainType, + chainFamily string, + classifySendTxError func(tx TX, err error) SendTxReturnCode, + sendTxSoftTimeout time.Duration, +) MultiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT] { + nodeSelector := newNodeSelector(selectionMode, nodes) + // Prometheus' default interval is 15s, set this to under 7.5s to avoid + // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) + const reportInterval = 6500 * time.Millisecond + if sendTxSoftTimeout == 0 { + sendTxSoftTimeout = QueryTimeout / 2 + } + c := &multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]{ + nodes: nodes, + sendonlys: sendonlys, + chainID: chainID, + chainType: chainType, + lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), + selectionMode: selectionMode, + noNewHeadsThreshold: noNewHeadsThreshold, + nodeSelector: nodeSelector, + chStop: make(services.StopChan), + leaseDuration: leaseDuration, + chainFamily: chainFamily, + classifySendTxError: classifySendTxError, + reportInterval: reportInterval, + sendTxSoftTimeout: sendTxSoftTimeout, + } + + c.lggr.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode) + + return c +} + +// Dial starts every node in the pool +// +// Nodes handle their own redialing and runloops, so this function does not +// return any error if the nodes aren't available +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Dial(ctx context.Context) error { + return c.StartOnce("MultiNode", func() (merr error) { + if len(c.nodes) == 0 { + return fmt.Errorf("no available nodes for chain %s", c.chainID.String()) + } + var ms services.MultiStart + for _, n := range c.nodes { + if n.ConfiguredChainID().String() != c.chainID.String() { + return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String())) + } + rawNode, ok := n.(*node[CHAIN_ID, HEAD, RPC_CLIENT]) + if ok { + // This is a bit hacky but it allows the node to be aware of + // pool state and prevent certain state transitions that might + // otherwise leave no nodes available. It is better to have one + // node in a degraded state than no nodes at all. + rawNode.nLiveNodes = c.nLiveNodes + } + // node will handle its own redialing and automatic recovery + if err := ms.Start(ctx, n); err != nil { + return err + } + } + for _, s := range c.sendonlys { + if s.ConfiguredChainID().String() != c.chainID.String() { + return ms.CloseBecause(fmt.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String())) + } + if err := ms.Start(ctx, s); err != nil { + return err + } + } + c.wg.Add(1) + go c.runLoop() + + if c.leaseDuration.Seconds() > 0 && c.selectionMode != NodeSelectionModeRoundRobin { + c.lggr.Infof("The MultiNode will switch to best node every %s", c.leaseDuration.String()) + c.wg.Add(1) + go c.checkLeaseLoop() + } else { + c.lggr.Info("Best node switching is disabled") + } + + return nil + }) +} + +// Close tears down the MultiNode and closes all nodes +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Close() error { + return c.StopOnce("MultiNode", func() error { + close(c.chStop) + c.wg.Wait() + + return services.CloseAll(services.MultiCloser(c.nodes), services.MultiCloser(c.sendonlys)) + }) +} + +// SelectNodeRPC returns an RPC of an active node. If there are no active nodes it returns an error. +// Call this method from your chain-specific client implementation to access any chain-specific rpc calls. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SelectNodeRPC() (rpc RPC_CLIENT, err error) { + n, err := c.selectNode() + if err != nil { + return rpc, err + } + return n.RPC(), nil + +} + +// selectNode returns the active Node, if it is still nodeStateAlive, otherwise it selects a new one from the NodeSelector. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) selectNode() (node Node[CHAIN_ID, HEAD, RPC_CLIENT], err error) { + c.activeMu.RLock() + node = c.activeNode + c.activeMu.RUnlock() + if node != nil && node.State() == nodeStateAlive { + return // still alive + } + + // select a new one + c.activeMu.Lock() + defer c.activeMu.Unlock() + node = c.activeNode + if node != nil && node.State() == nodeStateAlive { + return // another goroutine beat us here + } + + c.activeNode = c.nodeSelector.Select() + + if c.activeNode == nil { + c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) + errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) + c.SvcErrBuffer.Append(errmsg) + err = ErroringNodeError + } + + return c.activeNode, err +} + +// nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. +// totalDifficulty will be 0 if all nodes return nil. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { + totalDifficulty = big.NewInt(0) + for _, n := range c.nodes { + if s, num, td := n.StateAndLatest(); s == nodeStateAlive { + nLiveNodes++ + if num > blockNumber { + blockNumber = num + } + if td != nil && td.Cmp(totalDifficulty) > 0 { + totalDifficulty = td + } + } + } + return +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) checkLease() { + bestNode := c.nodeSelector.Select() + for _, n := range c.nodes { + // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new + // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription + if n.State() == nodeStateAlive && n != bestNode && n.SubscribersCount() > 1 { + c.lggr.Infof("Switching to best node from %q to %q", n.String(), bestNode.String()) + n.UnsubscribeAllExceptAliveLoop() + } + } + + c.activeMu.Lock() + if bestNode != c.activeNode { + c.activeNode = bestNode + } + c.activeMu.Unlock() +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) checkLeaseLoop() { + defer c.wg.Done() + c.leaseTicker = time.NewTicker(c.leaseDuration) + defer c.leaseTicker.Stop() + + for { + select { + case <-c.leaseTicker.C: + c.checkLease() + case <-c.chStop: + return + } + } +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) runLoop() { + defer c.wg.Done() + + c.report() + + monitor := time.NewTicker(utils.WithJitter(c.reportInterval)) + defer monitor.Stop() + + for { + select { + case <-monitor.C: + c.report() + case <-c.chStop: + return + } + } +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) report() { + type nodeWithState struct { + Node string + State string + } + + var total, dead int + counts := make(map[nodeState]int) + nodeStates := make([]nodeWithState, len(c.nodes)) + for i, n := range c.nodes { + state := n.State() + nodeStates[i] = nodeWithState{n.String(), state.String()} + total++ + if state != nodeStateAlive { + dead++ + } + counts[state]++ + } + for _, state := range allNodeStates { + count := counts[state] + PromMultiNodeRPCNodeStates.WithLabelValues(c.chainFamily, c.chainID.String(), state.String()).Set(float64(count)) + } + + live := total - dead + c.lggr.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + if total == dead { + rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total) + c.lggr.Criticalw(rerr.Error(), "nodeStates", nodeStates) + c.SvcErrBuffer.Append(rerr) + } else if dead > 0 { + c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + } +} + +// ClientAPI methods +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BalanceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (*big.Int, error) { + n, err := c.selectNode() + if err != nil { + return nil, err + } + return n.RPC().BalanceAt(ctx, account, blockNumber) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BatchCallContext(ctx context.Context, b []any) error { + n, err := c.selectNode() + if err != nil { + return err + } + return n.RPC().BatchCallContext(ctx, b) +} + +// BatchCallContextAll calls BatchCallContext for every single node including +// sendonlys. +// CAUTION: This should only be used for mass re-transmitting transactions, it +// might have unexpected effects to use it for anything else. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BatchCallContextAll(ctx context.Context, b []any) error { + var wg sync.WaitGroup + defer wg.Wait() + + main, selectionErr := c.selectNode() + var all []SendOnlyNode[CHAIN_ID, RPC_CLIENT] + for _, n := range c.nodes { + all = append(all, n) + } + all = append(all, c.sendonlys...) + for _, n := range all { + if n == main { + // main node is used at the end for the return value + continue + } + + if n.State() != nodeStateAlive { + continue + } + // Parallel call made to all other nodes with ignored return value + wg.Add(1) + go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { + defer wg.Done() + err := n.RPC().BatchCallContext(ctx, b) + if err != nil { + c.lggr.Debugw("Secondary node BatchCallContext failed", "err", err) + } else { + c.lggr.Trace("Secondary node BatchCallContext success") + } + }(n) + } + + if selectionErr != nil { + return selectionErr + } + return main.RPC().BatchCallContext(ctx, b) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BlockByHash(ctx context.Context, hash BLOCK_HASH) (h HEAD, err error) { + n, err := c.selectNode() + if err != nil { + return h, err + } + return n.RPC().BlockByHash(ctx, hash) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) BlockByNumber(ctx context.Context, number *big.Int) (h HEAD, err error) { + n, err := c.selectNode() + if err != nil { + return h, err + } + return n.RPC().BlockByNumber(ctx, number) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + n, err := c.selectNode() + if err != nil { + return err + } + return n.RPC().CallContext(ctx, result, method, args...) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CallContract( + ctx context.Context, + attempt interface{}, + blockNumber *big.Int, +) (rpcErr []byte, extractErr error) { + n, err := c.selectNode() + if err != nil { + return rpcErr, err + } + return n.RPC().CallContract(ctx, attempt, blockNumber) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) PendingCallContract( + ctx context.Context, + attempt interface{}, +) (rpcErr []byte, extractErr error) { + n, err := c.selectNode() + if err != nil { + return rpcErr, err + } + return n.RPC().PendingCallContract(ctx, attempt) +} + +// ChainID makes a direct RPC call. In most cases it should be better to use the configured chain id instead by +// calling ConfiguredChainID. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ChainID(ctx context.Context) (id CHAIN_ID, err error) { + n, err := c.selectNode() + if err != nil { + return id, err + } + return n.RPC().ChainID(ctx) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ChainType() config.ChainType { + return c.chainType +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) (code []byte, err error) { + n, err := c.selectNode() + if err != nil { + return code, err + } + return n.RPC().CodeAt(ctx, account, blockNumber) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) ConfiguredChainID() CHAIN_ID { + return c.chainID +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) EstimateGas(ctx context.Context, call any) (gas uint64, err error) { + n, err := c.selectNode() + if err != nil { + return gas, err + } + return n.RPC().EstimateGas(ctx, call) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) FilterEvents(ctx context.Context, query EVENT_OPS) (e []EVENT, err error) { + n, err := c.selectNode() + if err != nil { + return e, err + } + return n.RPC().FilterEvents(ctx, query) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) IsL2() bool { + return c.ChainType().IsL2() +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) LatestBlockHeight(ctx context.Context) (h *big.Int, err error) { + n, err := c.selectNode() + if err != nil { + return h, err + } + return n.RPC().LatestBlockHeight(ctx) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) PLIBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (b *assets.Link, err error) { + n, err := c.selectNode() + if err != nil { + return b, err + } + return n.RPC().PLIBalance(ctx, accountAddress, linkAddress) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) NodeStates() (states map[string]string) { + states = make(map[string]string) + for _, n := range c.nodes { + states[n.Name()] = n.State().String() + } + for _, s := range c.sendonlys { + states[s.Name()] = s.State().String() + } + return +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) PendingSequenceAt(ctx context.Context, addr ADDR) (s SEQ, err error) { + n, err := c.selectNode() + if err != nil { + return s, err + } + return n.RPC().PendingSequenceAt(ctx, addr) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SendEmptyTransaction( + ctx context.Context, + newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error), + seq SEQ, + gasLimit uint32, + fee FEE, + fromAddress ADDR, +) (txhash string, err error) { + n, err := c.selectNode() + if err != nil { + return txhash, err + } + return n.RPC().SendEmptyTransaction(ctx, newTxAttempt, seq, gasLimit, fee, fromAddress) +} + +type sendTxResult struct { + Err error + ResultCode SendTxReturnCode +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) broadcastTxAsync(ctx context.Context, + n SendOnlyNode[CHAIN_ID, RPC_CLIENT], tx TX) sendTxResult { + txErr := n.RPC().SendTransaction(ctx, tx) + c.lggr.Debugw("Node sent transaction", "name", n.String(), "tx", tx, "err", txErr) + resultCode := c.classifySendTxError(tx, txErr) + if !slices.Contains(sendTxSuccessfulCodes, resultCode) { + c.lggr.Warnw("RPC returned error", "name", n.String(), "tx", tx, "err", txErr) + } + + return sendTxResult{Err: txErr, ResultCode: resultCode} +} + +// collectTxResults - refer to SendTransaction comment for implementation details, +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) collectTxResults(ctx context.Context, tx TX, healthyNodesNum int, txResults <-chan sendTxResult) error { + if healthyNodesNum == 0 { + return ErroringNodeError + } + // combine context and stop channel to ensure we stop, when signal received + ctx, cancel := c.chStop.Ctx(ctx) + defer cancel() + requiredResults := int(math.Ceil(float64(healthyNodesNum) * sendTxQuorum)) + errorsByCode := map[SendTxReturnCode][]error{} + var softTimeoutChan <-chan time.Time + var resultsCount int +loop: + for { + select { + case <-ctx.Done(): + c.lggr.Debugw("Failed to collect of the results before context was done", "tx", tx, "errorsByCode", errorsByCode) + return ctx.Err() + case result := <-txResults: + errorsByCode[result.ResultCode] = append(errorsByCode[result.ResultCode], result.Err) + resultsCount++ + if slices.Contains(sendTxSuccessfulCodes, result.ResultCode) || resultsCount >= requiredResults { + break loop + } + case <-softTimeoutChan: + c.lggr.Debugw("Send Tx soft timeout expired - returning responses we've collected so far", "tx", tx, "resultsCount", resultsCount, "requiredResults", requiredResults) + break loop + } + + if softTimeoutChan == nil { + tm := time.NewTimer(c.sendTxSoftTimeout) + softTimeoutChan = tm.C + // we are fine with stopping timer at the end of function + //nolint + defer tm.Stop() + } + } + + // ignore critical error as it's reported in reportSendTxAnomalies + result, _ := aggregateTxResults(errorsByCode) + return result + +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) reportSendTxAnomalies(tx TX, txResults <-chan sendTxResult) { + defer c.wg.Done() + resultsByCode := map[SendTxReturnCode][]error{} + // txResults eventually will be closed + for txResult := range txResults { + resultsByCode[txResult.ResultCode] = append(resultsByCode[txResult.ResultCode], txResult.Err) + } + + _, criticalErr := aggregateTxResults(resultsByCode) + if criticalErr != nil { + c.lggr.Criticalw("observed invariant violation on SendTransaction", "tx", tx, "resultsByCode", resultsByCode, "err", criticalErr) + c.SvcErrBuffer.Append(criticalErr) + PromMultiNodeInvariantViolations.WithLabelValues(c.chainFamily, c.chainID.String(), criticalErr.Error()).Inc() + } +} + +func aggregateTxResults(resultsByCode map[SendTxReturnCode][]error) (txResult error, err error) { + severeErrors, hasSevereErrors := findFirstIn(resultsByCode, sendTxSevereErrors) + successResults, hasSuccess := findFirstIn(resultsByCode, sendTxSuccessfulCodes) + if hasSuccess { + // We assume that primary node would never report false positive txResult for a transaction. + // Thus, if such case occurs it's probably due to misconfiguration or a bug and requires manual intervention. + if hasSevereErrors { + const errMsg = "found contradictions in nodes replies on SendTransaction: got success and severe error" + // return success, since at least 1 node has accepted our broadcasted Tx, and thus it can now be included onchain + return successResults[0], fmt.Errorf(errMsg) + } + + // other errors are temporary - we are safe to return success + return successResults[0], nil + } + + if hasSevereErrors { + return severeErrors[0], nil + } + + // return temporary error + for _, result := range resultsByCode { + return result[0], nil + } + + err = fmt.Errorf("expected at least one response on SendTransaction") + return err, err +} + +const sendTxQuorum = 0.7 + +// SendTransaction - broadcasts transaction to all the send-only and primary nodes regardless of their health. +// A returned nil or error does not guarantee that the transaction will or won't be included. Additional checks must be +// performed to determine the final state. +// +// Send-only nodes' results are ignored as they tend to return false-positive responses. Broadcast to them is necessary +// to speed up the propagation of TX in the network. +// +// Handling of primary nodes' results consists of collection and aggregation. +// In the collection step, we gather as many results as possible while minimizing waiting time. This operation succeeds +// on one of the following conditions: +// * Received at least one success +// * Received at least one result and `sendTxSoftTimeout` expired +// * Received results from the sufficient number of nodes defined by sendTxQuorum. +// The aggregation is based on the following conditions: +// * If there is at least one success - returns success +// * If there is at least one terminal error - returns terminal error +// * If there is both success and terminal error - returns success and reports invariant violation +// * Otherwise, returns any (effectively random) of the errors. +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SendTransaction(ctx context.Context, tx TX) error { + if len(c.nodes) == 0 { + return ErroringNodeError + } + + healthyNodesNum := 0 + txResults := make(chan sendTxResult, len(c.nodes)) + // Must wrap inside IfNotStopped to avoid waitgroup racing with Close + ok := c.IfNotStopped(func() { + // fire-n-forget, as sendOnlyNodes can not be trusted with result reporting + for _, n := range c.sendonlys { + if n.State() != nodeStateAlive { + continue + } + c.wg.Add(1) + go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { + defer c.wg.Done() + c.broadcastTxAsync(ctx, n, tx) + }(n) + } + + var primaryBroadcastWg sync.WaitGroup + txResultsToReport := make(chan sendTxResult, len(c.nodes)) + for _, n := range c.nodes { + if n.State() != nodeStateAlive { + continue + } + + healthyNodesNum++ + primaryBroadcastWg.Add(1) + go func(n SendOnlyNode[CHAIN_ID, RPC_CLIENT]) { + defer primaryBroadcastWg.Done() + result := c.broadcastTxAsync(ctx, n, tx) + // both channels are sufficiently buffered, so we won't be locked + txResultsToReport <- result + txResults <- result + }(n) + } + + c.wg.Add(1) + go func() { + // wait for primary nodes to finish the broadcast before closing the channel + primaryBroadcastWg.Wait() + close(txResultsToReport) + close(txResults) + c.wg.Done() + }() + + c.wg.Add(1) + go c.reportSendTxAnomalies(tx, txResultsToReport) + + }) + if !ok { + return fmt.Errorf("aborted while broadcasting tx - multiNode is stopped: %w", context.Canceled) + } + + return c.collectTxResults(ctx, tx, healthyNodesNum, txResults) +} + +// findFirstIn - returns first existing value for the slice of keys +func findFirstIn[K comparable, V any](set map[K]V, keys []K) (V, bool) { + for _, k := range keys { + if v, ok := set[k]; ok { + return v, true + } + } + var v V + return v, false +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SequenceAt(ctx context.Context, account ADDR, blockNumber *big.Int) (s SEQ, err error) { + n, err := c.selectNode() + if err != nil { + return s, err + } + return n.RPC().SequenceAt(ctx, account, blockNumber) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) SimulateTransaction(ctx context.Context, tx TX) error { + n, err := c.selectNode() + if err != nil { + return err + } + return n.RPC().SimulateTransaction(ctx, tx) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (s types.Subscription, err error) { + n, err := c.selectNode() + if err != nil { + return s, err + } + return n.RPC().Subscribe(ctx, channel, args...) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TokenBalance(ctx context.Context, account ADDR, tokenAddr ADDR) (b *big.Int, err error) { + n, err := c.selectNode() + if err != nil { + return b, err + } + return n.RPC().TokenBalance(ctx, account, tokenAddr) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TransactionByHash(ctx context.Context, txHash TX_HASH) (tx TX, err error) { + n, err := c.selectNode() + if err != nil { + return tx, err + } + return n.RPC().TransactionByHash(ctx, txHash) +} + +func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT]) TransactionReceipt(ctx context.Context, txHash TX_HASH) (txr TX_RECEIPT, err error) { + n, err := c.selectNode() + if err != nil { + return txr, err + } + return n.RPC().TransactionReceipt(ctx, txHash) +} diff --git a/common/client/multi_node_test.go b/common/client/multi_node_test.go new file mode 100644 index 00000000..1f7b0e68 --- /dev/null +++ b/common/client/multi_node_test.go @@ -0,0 +1,880 @@ +package client + +import ( + "context" + "errors" + "fmt" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils/tests" + + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type multiNodeRPCClient RPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]] + +type testMultiNode struct { + *multiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient] +} + +type multiNodeOpts struct { + logger logger.Logger + selectionMode string + leaseDuration time.Duration + noNewHeadsThreshold time.Duration + nodes []Node[types.ID, types.Head[Hashable], multiNodeRPCClient] + sendonlys []SendOnlyNode[types.ID, multiNodeRPCClient] + chainID types.ID + chainType config.ChainType + chainFamily string + classifySendTxError func(tx any, err error) SendTxReturnCode + sendTxSoftTimeout time.Duration +} + +func newTestMultiNode(t *testing.T, opts multiNodeOpts) testMultiNode { + if opts.logger == nil { + opts.logger = logger.Test(t) + } + + result := NewMultiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient](opts.logger, + opts.selectionMode, opts.leaseDuration, opts.noNewHeadsThreshold, opts.nodes, opts.sendonlys, + opts.chainID, opts.chainType, opts.chainFamily, opts.classifySendTxError, opts.sendTxSoftTimeout) + return testMultiNode{ + result.(*multiNode[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable], multiNodeRPCClient]), + } +} + +func newMultiNodeRPCClient(t *testing.T) *mockRPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]] { + return newMockRPC[types.ID, *big.Int, Hashable, Hashable, any, Hashable, any, any, + types.Receipt[Hashable, Hashable], Hashable, types.Head[Hashable]](t) +} + +func newHealthyNode(t *testing.T, chainID types.ID) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + return newNodeWithState(t, chainID, nodeStateAlive) +} + +func newNodeWithState(t *testing.T, chainID types.ID, state nodeState) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node.On("ConfiguredChainID").Return(chainID).Once() + node.On("Start", mock.Anything).Return(nil).Once() + node.On("Close").Return(nil).Once() + node.On("State").Return(state).Maybe() + node.On("String").Return(fmt.Sprintf("healthy_node_%d", rand.Int())).Maybe() + return node +} +func TestMultiNode_Dial(t *testing.T) { + t.Parallel() + + newMockNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] + newMockSendOnlyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient] + + t.Run("Fails without nodes", func(t *testing.T) { + t.Parallel() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, fmt.Sprintf("no available nodes for chain %s", mn.chainID.String())) + }) + t.Run("Fails with wrong node's chainID", func(t *testing.T) { + t.Parallel() + node := newMockNode(t) + multiNodeChainID := types.NewIDFromInt(10) + nodeChainID := types.NewIDFromInt(11) + node.On("ConfiguredChainID").Return(nodeChainID).Twice() + const nodeName = "nodeName" + node.On("String").Return(nodeName).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: multiNodeChainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, fmt.Sprintf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", nodeName, nodeChainID, mn.chainID)) + }) + t.Run("Fails if node fails", func(t *testing.T) { + t.Parallel() + node := newMockNode(t) + chainID := types.RandomID() + node.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start node") + node.On("Start", mock.Anything).Return(expectedError).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, expectedError.Error()) + }) + + t.Run("Closes started nodes on failure", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newHealthyNode(t, chainID) + node2 := newMockNode(t) + node2.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start node") + node2.On("Start", mock.Anything).Return(expectedError).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, expectedError.Error()) + }) + t.Run("Fails with wrong send only node's chainID", func(t *testing.T) { + t.Parallel() + multiNodeChainID := types.NewIDFromInt(10) + node := newHealthyNode(t, multiNodeChainID) + sendOnly := newMockSendOnlyNode(t) + sendOnlyChainID := types.NewIDFromInt(11) + sendOnly.On("ConfiguredChainID").Return(sendOnlyChainID).Twice() + const sendOnlyName = "sendOnlyNodeName" + sendOnly.On("String").Return(sendOnlyName).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: multiNodeChainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly}, + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, fmt.Sprintf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", sendOnlyName, sendOnlyChainID, mn.chainID)) + }) + + newHealthySendOnly := func(t *testing.T, chainID types.ID) *mockSendOnlyNode[types.ID, multiNodeRPCClient] { + node := newMockSendOnlyNode(t) + node.On("ConfiguredChainID").Return(chainID).Once() + node.On("Start", mock.Anything).Return(nil).Once() + node.On("Close").Return(nil).Once() + return node + } + t.Run("Fails on send only node failure", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + node := newHealthyNode(t, chainID) + sendOnly1 := newHealthySendOnly(t, chainID) + sendOnly2 := newMockSendOnlyNode(t) + sendOnly2.On("ConfiguredChainID").Return(chainID).Once() + expectedError := errors.New("failed to start send only node") + sendOnly2.On("Start", mock.Anything).Return(expectedError).Once() + + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{sendOnly1, sendOnly2}, + }) + err := mn.Dial(tests.Context(t)) + assert.EqualError(t, err, expectedError.Error()) + }) + t.Run("Starts successfully with healthy nodes", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + node := newHealthyNode(t, chainID) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newHealthySendOnly(t, chainID)}, + }) + defer func() { assert.NoError(t, mn.Close()) }() + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + selectedNode, err := mn.selectNode() + require.NoError(t, err) + assert.Equal(t, node, selectedNode) + }) +} + +func TestMultiNode_Report(t *testing.T) { + t.Parallel() + t.Run("Dial starts periodical reporting", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newHealthyNode(t, chainID) + node2 := newNodeWithState(t, chainID, nodeStateOutOfSync) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + logger: lggr, + }) + mn.reportInterval = tests.TestInterval + defer func() { assert.NoError(t, mn.Close()) }() + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + tests.AssertLogCountEventually(t, observedLogs, "At least one primary node is dead: 1/2 nodes are alive", 2) + }) + t.Run("Report critical error on all node failure", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newNodeWithState(t, chainID, nodeStateOutOfSync) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + logger: lggr, + }) + mn.reportInterval = tests.TestInterval + defer func() { assert.NoError(t, mn.Close()) }() + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + tests.AssertLogCountEventually(t, observedLogs, "no primary nodes available: 0/1 nodes are alive", 2) + err = mn.Healthy() + require.Error(t, err) + assert.Contains(t, err.Error(), "no primary nodes available: 0/1 nodes are alive") + }) +} + +func TestMultiNode_CheckLease(t *testing.T) { + t.Parallel() + t.Run("Round robin disables lease check", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + }) + defer func() { assert.NoError(t, mn.Close()) }() + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") + }) + t.Run("Misconfigured lease check period won't start", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeHighestHead, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node}, + leaseDuration: 0, + }) + defer func() { assert.NoError(t, mn.Close()) }() + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + tests.RequireLogMessage(t, observedLogs, "Best node switching is disabled") + }) + t.Run("Lease check updates active node", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node := newHealthyNode(t, chainID) + node.On("SubscribersCount").Return(int32(2)) + node.On("UnsubscribeAllExceptAliveLoop") + bestNode := newHealthyNode(t, chainID) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(bestNode) + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeHighestHead, + chainID: chainID, + logger: lggr, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node, bestNode}, + leaseDuration: tests.TestInterval, + }) + defer func() { assert.NoError(t, mn.Close()) }() + mn.nodeSelector = nodeSelector + err := mn.Dial(tests.Context(t)) + require.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("Switching to best node from %q to %q", node.String(), bestNode.String())) + tests.AssertEventually(t, func() bool { + mn.activeMu.RLock() + active := mn.activeNode + mn.activeMu.RUnlock() + return bestNode == active + }) + }) + t.Run("NodeStates returns proper states", func(t *testing.T) { + t.Parallel() + chainID := types.NewIDFromInt(10) + nodes := map[string]nodeState{ + "node_1": nodeStateAlive, + "node_2": nodeStateUnreachable, + "node_3": nodeStateDialed, + } + + opts := multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + } + + expectedResult := map[string]string{} + for name, state := range nodes { + node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node.On("Name").Return(name).Once() + node.On("State").Return(state).Once() + opts.nodes = append(opts.nodes, node) + + sendOnly := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) + sendOnlyName := "send_only_" + name + sendOnly.On("Name").Return(sendOnlyName).Once() + sendOnly.On("State").Return(state).Once() + opts.sendonlys = append(opts.sendonlys, sendOnly) + + expectedResult[name] = state.String() + expectedResult[sendOnlyName] = state.String() + } + + mn := newTestMultiNode(t, opts) + states := mn.NodeStates() + assert.Equal(t, expectedResult, states) + }) +} + +func TestMultiNode_selectNode(t *testing.T) { + t.Parallel() + t.Run("Returns same node, if it's still healthy", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + node1 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node1.On("State").Return(nodeStateAlive).Once() + node1.On("String").Return("node1").Maybe() + node2 := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node2.On("String").Return("node2").Maybe() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{node1, node2}, + }) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(node1).Once() + mn.nodeSelector = nodeSelector + prevActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, node1.String(), prevActiveNode.String()) + newActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, prevActiveNode.String(), newActiveNode.String()) + + }) + t.Run("Updates node if active is not healthy", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + oldBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + oldBest.On("String").Return("oldBest").Maybe() + newBest := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + newBest.On("String").Return("newBest").Maybe() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{oldBest, newBest}, + }) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(oldBest).Once() + mn.nodeSelector = nodeSelector + activeNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, oldBest.String(), activeNode.String()) + // old best died, so we should replace it + oldBest.On("State").Return(nodeStateOutOfSync).Twice() + nodeSelector.On("Select").Return(newBest).Once() + newActiveNode, err := mn.selectNode() + require.NoError(t, err) + require.Equal(t, newBest.String(), newActiveNode.String()) + + }) + t.Run("No active nodes - reports critical error", func(t *testing.T) { + t.Parallel() + chainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + logger: lggr, + }) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(nil).Once() + nodeSelector.On("Name").Return("MockedNodeSelector").Once() + mn.nodeSelector = nodeSelector + node, err := mn.selectNode() + require.EqualError(t, err, ErroringNodeError.Error()) + require.Nil(t, node) + tests.RequireLogMessage(t, observedLogs, "No live RPC nodes available") + + }) +} + +func TestMultiNode_nLiveNodes(t *testing.T) { + t.Parallel() + type nodeParams struct { + BlockNumber int64 + TotalDifficulty *big.Int + State nodeState + } + testCases := []struct { + Name string + ExpectedNLiveNodes int + ExpectedBlockNumber int64 + ExpectedTotalDifficulty *big.Int + NodeParams []nodeParams + }{ + { + Name: "no nodes", + ExpectedTotalDifficulty: big.NewInt(0), + }, + { + Name: "Best node is not healthy", + ExpectedTotalDifficulty: big.NewInt(10), + ExpectedBlockNumber: 20, + ExpectedNLiveNodes: 3, + NodeParams: []nodeParams{ + { + State: nodeStateOutOfSync, + BlockNumber: 1000, + TotalDifficulty: big.NewInt(2000), + }, + { + State: nodeStateAlive, + BlockNumber: 20, + TotalDifficulty: big.NewInt(9), + }, + { + State: nodeStateAlive, + BlockNumber: 19, + TotalDifficulty: big.NewInt(10), + }, + { + State: nodeStateAlive, + BlockNumber: 11, + TotalDifficulty: nil, + }, + }, + }, + } + + chainID := types.RandomID() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + }) + for i := range testCases { + tc := testCases[i] + t.Run(tc.Name, func(t *testing.T) { + for _, params := range tc.NodeParams { + node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node.On("StateAndLatest").Return(params.State, params.BlockNumber, params.TotalDifficulty) + mn.nodes = append(mn.nodes, node) + } + + nNodes, blockNum, td := mn.nLiveNodes() + assert.Equal(t, tc.ExpectedNLiveNodes, nNodes) + assert.Equal(t, tc.ExpectedTotalDifficulty, td) + assert.Equal(t, tc.ExpectedBlockNumber, blockNum) + }) + } +} + +func TestMultiNode_BatchCallContextAll(t *testing.T) { + t.Parallel() + t.Run("Fails if failed to select active node", func(t *testing.T) { + chainID := types.RandomID() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + }) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(nil).Once() + nodeSelector.On("Name").Return("MockedNodeSelector").Once() + mn.nodeSelector = nodeSelector + err := mn.BatchCallContextAll(tests.Context(t), nil) + require.EqualError(t, err, ErroringNodeError.Error()) + }) + t.Run("Returns error if RPC call fails for active node", func(t *testing.T) { + chainID := types.RandomID() + rpc := newMultiNodeRPCClient(t) + expectedError := errors.New("rpc failed to do the batch call") + rpc.On("BatchCallContext", mock.Anything, mock.Anything).Return(expectedError).Once() + node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node.On("RPC").Return(rpc) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(node).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + }) + mn.nodeSelector = nodeSelector + err := mn.BatchCallContextAll(tests.Context(t), nil) + require.EqualError(t, err, expectedError.Error()) + }) + t.Run("Waits for all nodes to complete the call and logs results", func(t *testing.T) { + // setup RPCs + failedRPC := newMultiNodeRPCClient(t) + failedRPC.On("BatchCallContext", mock.Anything, mock.Anything). + Return(errors.New("rpc failed to do the batch call")).Once() + okRPC := newMultiNodeRPCClient(t) + okRPC.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Twice() + + // setup ok and failed auxiliary nodes + okNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) + okNode.On("RPC").Return(okRPC).Once() + okNode.On("State").Return(nodeStateAlive) + failedNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + failedNode.On("RPC").Return(failedRPC).Once() + failedNode.On("State").Return(nodeStateAlive) + + // setup main node + mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + mainNode.On("RPC").Return(okRPC) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(mainNode).Once() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{failedNode, mainNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{okNode}, + logger: lggr, + }) + mn.nodeSelector = nodeSelector + + err := mn.BatchCallContextAll(tests.Context(t), nil) + require.NoError(t, err) + tests.RequireLogMessage(t, observedLogs, "Secondary node BatchCallContext failed") + }) + t.Run("Does not call BatchCallContext for unhealthy nodes", func(t *testing.T) { + // setup RPCs + okRPC := newMultiNodeRPCClient(t) + okRPC.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Twice() + + // setup ok and failed auxiliary nodes + healthyNode := newMockSendOnlyNode[types.ID, multiNodeRPCClient](t) + healthyNode.On("RPC").Return(okRPC).Once() + healthyNode.On("State").Return(nodeStateAlive) + deadNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + deadNode.On("State").Return(nodeStateUnreachable) + + // setup main node + mainNode := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + mainNode.On("RPC").Return(okRPC) + nodeSelector := newMockNodeSelector[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + nodeSelector.On("Select").Return(mainNode).Once() + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{deadNode, mainNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{healthyNode, deadNode}, + }) + mn.nodeSelector = nodeSelector + + err := mn.BatchCallContextAll(tests.Context(t), nil) + require.NoError(t, err) + }) +} + +func TestMultiNode_SendTransaction(t *testing.T) { + t.Parallel() + classifySendTxError := func(tx any, err error) SendTxReturnCode { + if err != nil { + return Fatal + } + + return Successful + } + newNodeWithState := func(t *testing.T, state nodeState, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + rpc := newMultiNodeRPCClient(t) + rpc.On("SendTransaction", mock.Anything, mock.Anything).Return(txErr).Run(sendTxRun).Maybe() + node := newMockNode[types.ID, types.Head[Hashable], multiNodeRPCClient](t) + node.On("String").Return("node name").Maybe() + node.On("RPC").Return(rpc).Maybe() + node.On("State").Return(state).Maybe() + node.On("Close").Return(nil).Once() + return node + } + + newNode := func(t *testing.T, txErr error, sendTxRun func(args mock.Arguments)) *mockNode[types.ID, types.Head[Hashable], multiNodeRPCClient] { + return newNodeWithState(t, nodeStateAlive, txErr, sendTxRun) + } + newStartedMultiNode := func(t *testing.T, opts multiNodeOpts) testMultiNode { + mn := newTestMultiNode(t, opts) + err := mn.StartOnce("startedTestMultiNode", func() error { return nil }) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, mn.Close()) + }) + return mn + } + t.Run("Fails if there is no nodes available", func(t *testing.T) { + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + }) + err := mn.SendTransaction(tests.Context(t), nil) + assert.EqualError(t, err, ErroringNodeError.Error()) + }) + t.Run("Transaction failure happy path", func(t *testing.T) { + chainID := types.RandomID() + expectedError := errors.New("transaction failed") + mainNode := newNode(t, expectedError, nil) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{mainNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNode(t, errors.New("unexpected error"), nil)}, + classifySendTxError: classifySendTxError, + logger: lggr, + }) + err := mn.SendTransaction(tests.Context(t), nil) + require.EqualError(t, err, expectedError.Error()) + tests.AssertLogCountEventually(t, observedLogs, "Node sent transaction", 2) + tests.AssertLogCountEventually(t, observedLogs, "RPC returned error", 2) + }) + t.Run("Transaction success happy path", func(t *testing.T) { + chainID := types.RandomID() + mainNode := newNode(t, nil, nil) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{mainNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNode(t, errors.New("unexpected error"), nil)}, + classifySendTxError: classifySendTxError, + logger: lggr, + }) + err := mn.SendTransaction(tests.Context(t), nil) + require.NoError(t, err) + tests.AssertLogCountEventually(t, observedLogs, "Node sent transaction", 2) + tests.AssertLogCountEventually(t, observedLogs, "RPC returned error", 1) + }) + t.Run("Context expired before collecting sufficient results", func(t *testing.T) { + chainID := types.RandomID() + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + mainNode := newNode(t, errors.New("unexpected error"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{mainNode}, + classifySendTxError: classifySendTxError, + }) + requestContext, cancel := context.WithCancel(tests.Context(t)) + cancel() + err := mn.SendTransaction(requestContext, nil) + require.EqualError(t, err, "context canceled") + }) + t.Run("Soft timeout stops results collection", func(t *testing.T) { + chainID := types.RandomID() + expectedError := errors.New("tmp error") + fastNode := newNode(t, expectedError, nil) + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{fastNode, slowNode}, + classifySendTxError: classifySendTxError, + sendTxSoftTimeout: tests.TestInterval, + }) + err := mn.SendTransaction(tests.Context(t), nil) + require.EqualError(t, err, expectedError.Error()) + }) + t.Run("Returns success without waiting for the rest of the nodes", func(t *testing.T) { + chainID := types.RandomID() + fastNode := newNode(t, nil, nil) + // hold reply from the node till end of the test + testContext, testCancel := context.WithCancel(tests.Context(t)) + defer testCancel() + slowNode := newNode(t, errors.New("transaction failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + slowSendOnly := newNode(t, errors.New("send only failed"), func(_ mock.Arguments) { + // block caller til end of the test + <-testContext.Done() + }) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + mn := newTestMultiNode(t, multiNodeOpts{ + logger: lggr, + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{fastNode, slowNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{slowSendOnly}, + classifySendTxError: classifySendTxError, + sendTxSoftTimeout: tests.TestInterval, + }) + assert.NoError(t, mn.StartOnce("startedTestMultiNode", func() error { return nil })) + err := mn.SendTransaction(tests.Context(t), nil) + require.NoError(t, err) + testCancel() + require.NoError(t, mn.Close()) + tests.AssertLogEventually(t, observedLogs, "observed invariant violation on SendTransaction") + }) + t.Run("Fails when closed", func(t *testing.T) { + mn := newTestMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{newNode(t, nil, nil)}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNode(t, nil, nil)}, + classifySendTxError: classifySendTxError, + }) + err := mn.StartOnce("startedTestMultiNode", func() error { return nil }) + require.NoError(t, err) + require.NoError(t, mn.Close()) + err = mn.SendTransaction(tests.Context(t), nil) + require.EqualError(t, err, "aborted while broadcasting tx - multiNode is stopped: context canceled") + }) + t.Run("Returns error if there is no healthy primary nodes", func(t *testing.T) { + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: types.RandomID(), + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{newNodeWithState(t, nodeStateUnreachable, nil, nil)}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{newNodeWithState(t, nodeStateUnreachable, nil, nil)}, + classifySendTxError: classifySendTxError, + }) + err := mn.SendTransaction(tests.Context(t), nil) + assert.EqualError(t, err, ErroringNodeError.Error()) + }) + t.Run("Transaction success even if one of the nodes is unhealthy", func(t *testing.T) { + chainID := types.RandomID() + mainNode := newNode(t, nil, nil) + unexpectedCall := func(args mock.Arguments) { + panic("SendTx must not be called for unhealthy node") + } + unhealthyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) + unhealthySendOnlyNode := newNodeWithState(t, nodeStateUnreachable, nil, unexpectedCall) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + mn := newStartedMultiNode(t, multiNodeOpts{ + selectionMode: NodeSelectionModeRoundRobin, + chainID: chainID, + nodes: []Node[types.ID, types.Head[Hashable], multiNodeRPCClient]{mainNode, unhealthyNode}, + sendonlys: []SendOnlyNode[types.ID, multiNodeRPCClient]{unhealthySendOnlyNode, newNode(t, errors.New("unexpected error"), nil)}, + classifySendTxError: classifySendTxError, + logger: lggr, + }) + err := mn.SendTransaction(tests.Context(t), nil) + require.NoError(t, err) + tests.AssertLogCountEventually(t, observedLogs, "Node sent transaction", 2) + tests.AssertLogCountEventually(t, observedLogs, "RPC returned error", 1) + }) +} + +func TestMultiNode_SendTransaction_aggregateTxResults(t *testing.T) { + t.Parallel() + // ensure failure on new SendTxReturnCode + codesToCover := map[SendTxReturnCode]struct{}{} + for code := Successful; code < sendTxReturnCodeLen; code++ { + codesToCover[code] = struct{}{} + } + + testCases := []struct { + Name string + ExpectedTxResult string + ExpectedCriticalErr string + ResultsByCode map[SendTxReturnCode][]error + }{ + { + Name: "Returns success and logs critical error on success and Fatal", + ExpectedTxResult: "success", + ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", + ResultsByCode: map[SendTxReturnCode][]error{ + Successful: {errors.New("success")}, + Fatal: {errors.New("fatal")}, + }, + }, + { + Name: "Returns TransactionAlreadyKnown and logs critical error on TransactionAlreadyKnown and Fatal", + ExpectedTxResult: "tx_already_known", + ExpectedCriticalErr: "found contradictions in nodes replies on SendTransaction: got success and severe error", + ResultsByCode: map[SendTxReturnCode][]error{ + TransactionAlreadyKnown: {errors.New("tx_already_known")}, + Unsupported: {errors.New("unsupported")}, + }, + }, + { + Name: "Prefers sever error to temporary", + ExpectedTxResult: "underpriced", + ExpectedCriticalErr: "", + ResultsByCode: map[SendTxReturnCode][]error{ + Retryable: {errors.New("retryable")}, + Underpriced: {errors.New("underpriced")}, + }, + }, + { + Name: "Returns temporary error", + ExpectedTxResult: "retryable", + ExpectedCriticalErr: "", + ResultsByCode: map[SendTxReturnCode][]error{ + Retryable: {errors.New("retryable")}, + }, + }, + { + Name: "Insufficient funds is treated as error", + ExpectedTxResult: "", + ExpectedCriticalErr: "", + ResultsByCode: map[SendTxReturnCode][]error{ + Successful: {nil}, + InsufficientFunds: {errors.New("insufficientFunds")}, + }, + }, + { + Name: "Logs critical error on empty ResultsByCode", + ExpectedTxResult: "expected at least one response on SendTransaction", + ExpectedCriticalErr: "expected at least one response on SendTransaction", + ResultsByCode: map[SendTxReturnCode][]error{}, + }, + } + + for _, testCase := range testCases { + for code := range testCase.ResultsByCode { + delete(codesToCover, code) + } + t.Run(testCase.Name, func(t *testing.T) { + txResult, err := aggregateTxResults(testCase.ResultsByCode) + if testCase.ExpectedTxResult == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, txResult, testCase.ExpectedTxResult) + } + + if testCase.ExpectedCriticalErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, testCase.ExpectedCriticalErr) + } + }) + } + + // explicitly signal that following codes are properly handled in aggregateTxResults, + //but dedicated test cases won't be beneficial + for _, codeToIgnore := range []SendTxReturnCode{Unknown, ExceedsMaxFee, FeeOutOfValidRange} { + delete(codesToCover, codeToIgnore) + } + assert.Empty(t, codesToCover, "all of the SendTxReturnCode must be covered by this test") + +} diff --git a/common/client/node.go b/common/client/node.go new file mode 100644 index 00000000..a8cd8f80 --- /dev/null +++ b/common/client/node.go @@ -0,0 +1,285 @@ +package client + +import ( + "context" + "errors" + "fmt" + "math/big" + "net/url" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const QueryTimeout = 10 * time.Second + +var errInvalidChainID = errors.New("invalid chain id") + +var ( + promPoolRPCNodeVerifies = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies", + Help: "The total number of chain ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) + promPoolRPCNodeVerifiesFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies_failed", + Help: "The total number of failed chain ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) + promPoolRPCNodeVerifiesSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_verifies_success", + Help: "The total number of successful chain ID verifications for the given RPC node", + }, []string{"network", "chainID", "nodeName"}) +) + +type NodeConfig interface { + PollFailureThreshold() uint32 + PollInterval() time.Duration + SelectionMode() string + SyncThreshold() uint32 +} + +//go:generate mockery --quiet --name Node --structname mockNode --filename "mock_node_test.go" --inpackage --case=underscore +type Node[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] interface { + // State returns nodeState + State() nodeState + // StateAndLatest returns nodeState with the latest received block number & total difficulty. + StateAndLatest() (nodeState, int64, *big.Int) + // Name is a unique identifier for this node. + Name() string + String() string + RPC() RPC + SubscribersCount() int32 + UnsubscribeAllExceptAliveLoop() + ConfiguredChainID() CHAIN_ID + Order() int32 + Start(context.Context) error + Close() error +} + +type node[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] struct { + services.StateMachine + lfcLog logger.Logger + name string + id int32 + chainID CHAIN_ID + nodePoolCfg NodeConfig + noNewHeadsThreshold time.Duration + order int32 + chainFamily string + + ws url.URL + http *url.URL + + rpc RPC + + stateMu sync.RWMutex // protects state* fields + state nodeState + // Each node is tracking the last received head number and total difficulty + stateLatestBlockNumber int64 + stateLatestTotalDifficulty *big.Int + + // nodeCtx is the node lifetime's context + nodeCtx context.Context + // cancelNodeCtx cancels nodeCtx when stopping the node + cancelNodeCtx context.CancelFunc + // wg waits for subsidiary goroutines + wg sync.WaitGroup + + // nLiveNodes is a passed in function that allows this node to: + // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being + // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. + // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far. + nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) +} + +func NewNode[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +]( + nodeCfg NodeConfig, + noNewHeadsThreshold time.Duration, + lggr logger.Logger, + wsuri url.URL, + httpuri *url.URL, + name string, + id int32, + chainID CHAIN_ID, + nodeOrder int32, + rpc RPC, + chainFamily string, +) Node[CHAIN_ID, HEAD, RPC] { + n := new(node[CHAIN_ID, HEAD, RPC]) + n.name = name + n.id = id + n.chainID = chainID + n.nodePoolCfg = nodeCfg + n.noNewHeadsThreshold = noNewHeadsThreshold + n.ws = wsuri + n.order = nodeOrder + if httpuri != nil { + n.http = httpuri + } + n.nodeCtx, n.cancelNodeCtx = context.WithCancel(context.Background()) + lggr = logger.Named(lggr, "Node") + lggr = logger.With(lggr, + "nodeTier", Primary.String(), + "nodeName", name, + "node", n.String(), + "chainID", chainID, + "nodeOrder", n.order, + ) + n.lfcLog = logger.Named(lggr, "Lifecycle") + n.stateLatestBlockNumber = -1 + n.rpc = rpc + n.chainFamily = chainFamily + return n +} + +func (n *node[CHAIN_ID, HEAD, RPC]) String() string { + s := fmt.Sprintf("(%s)%s:%s", Primary.String(), n.name, n.ws.String()) + if n.http != nil { + s = s + fmt.Sprintf(":%s", n.http.String()) + } + return s +} + +func (n *node[CHAIN_ID, HEAD, RPC]) ConfiguredChainID() (chainID CHAIN_ID) { + return n.chainID +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Name() string { + return n.name +} + +func (n *node[CHAIN_ID, HEAD, RPC]) RPC() RPC { + return n.rpc +} + +func (n *node[CHAIN_ID, HEAD, RPC]) SubscribersCount() int32 { + return n.rpc.SubscribersCount() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) UnsubscribeAllExceptAliveLoop() { + n.rpc.UnsubscribeAllExceptAliveLoop() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Close() error { + return n.StopOnce(n.name, n.close) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) close() error { + defer func() { + n.wg.Wait() + n.rpc.Close() + }() + + n.stateMu.Lock() + defer n.stateMu.Unlock() + + n.cancelNodeCtx() + n.state = nodeStateClosed + return nil +} + +// Start dials and verifies the node +// Should only be called once in a node's lifecycle +// Return value is necessary to conform to interface but this will never +// actually return an error. +func (n *node[CHAIN_ID, HEAD, RPC]) Start(startCtx context.Context) error { + return n.StartOnce(n.name, func() error { + n.start(startCtx) + return nil + }) +} + +// start initially dials the node and verifies chain ID +// This spins off lifecycle goroutines. +// Not thread-safe. +// Node lifecycle is synchronous: only one goroutine should be running at a +// time. +func (n *node[CHAIN_ID, HEAD, RPC]) start(startCtx context.Context) { + if n.state != nodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", n.state)) + } + + if err := n.rpc.Dial(startCtx); err != nil { + n.lfcLog.Errorw("Dial failed: Node is unreachable", "err", err) + n.declareUnreachable() + return + } + n.setState(nodeStateDialed) + + if err := n.verify(startCtx); errors.Is(err, errInvalidChainID) { + n.lfcLog.Errorw("Verify failed: Node has the wrong chain ID", "err", err) + n.declareInvalidChainID() + return + } else if err != nil { + n.lfcLog.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + n.declareUnreachable() + return + } + + n.declareAlive() +} + +// verify checks that all connections to eth nodes match the given chain ID +// Not thread-safe +// Pure verify: does not mutate node "state" field. +func (n *node[CHAIN_ID, HEAD, RPC]) verify(callerCtx context.Context) (err error) { + promPoolRPCNodeVerifies.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + promFailed := func() { + promPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + } + + st := n.State() + switch st { + case nodeStateDialed, nodeStateOutOfSync, nodeStateInvalidChainID: + default: + panic(fmt.Sprintf("cannot verify node in state %v", st)) + } + + var chainID CHAIN_ID + if chainID, err = n.rpc.ChainID(callerCtx); err != nil { + promFailed() + return fmt.Errorf("failed to verify chain ID for node %s: %w", n.name, err) + } else if chainID.String() != n.chainID.String() { + promFailed() + return fmt.Errorf( + "rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s: %w", + chainID.String(), + n.chainID.String(), + n.name, + errInvalidChainID, + ) + } + + promPoolRPCNodeVerifiesSuccess.WithLabelValues(n.chainFamily, n.chainID.String(), n.name).Inc() + + return nil +} + +// disconnectAll disconnects all clients connected to the node +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node[CHAIN_ID, HEAD, RPC]) disconnectAll() { + n.rpc.DisconnectAll() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) Order() int32 { + return n.order +} diff --git a/common/client/node_fsm.go b/common/client/node_fsm.go new file mode 100644 index 00000000..74a5814f --- /dev/null +++ b/common/client/node_fsm.go @@ -0,0 +1,265 @@ +package client + +import ( + "fmt" + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + promPoolRPCNodeTransitionsToAlive = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_alive", + Help: transitionString(nodeStateAlive), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToInSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_in_sync", + Help: fmt.Sprintf("%s to %s", transitionString(nodeStateOutOfSync), nodeStateAlive), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToOutOfSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_out_of_sync", + Help: transitionString(nodeStateOutOfSync), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToUnreachable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_unreachable", + Help: transitionString(nodeStateUnreachable), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToInvalidChainID = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_invalid_chain_id", + Help: transitionString(nodeStateInvalidChainID), + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeTransitionsToUnusable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_transitions_to_unusable", + Help: transitionString(nodeStateUnusable), + }, []string{"chainID", "nodeName"}) +) + +// nodeState represents the current state of the node +// Node is a FSM (finite state machine) +type nodeState int + +func (n nodeState) String() string { + switch n { + case nodeStateUndialed: + return "Undialed" + case nodeStateDialed: + return "Dialed" + case nodeStateInvalidChainID: + return "InvalidChainID" + case nodeStateAlive: + return "Alive" + case nodeStateUnreachable: + return "Unreachable" + case nodeStateUnusable: + return "Unusable" + case nodeStateOutOfSync: + return "OutOfSync" + case nodeStateClosed: + return "Closed" + default: + return fmt.Sprintf("nodeState(%d)", n) + } +} + +// GoString prints a prettier state +func (n nodeState) GoString() string { + return fmt.Sprintf("nodeState%s(%d)", n.String(), n) +} + +const ( + // nodeStateUndialed is the first state of a virgin node + nodeStateUndialed = nodeState(iota) + // nodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID + nodeStateDialed + // nodeStateInvalidChainID is after chain ID verification failed + nodeStateInvalidChainID + // nodeStateAlive is a healthy node after chain ID verification succeeded + nodeStateAlive + // nodeStateUnreachable is a node that cannot be dialed or has disconnected + nodeStateUnreachable + // nodeStateOutOfSync is a node that is accepting connections but exceeded + // the failure threshold without sending any new heads. It will be + // disconnected, then put into a revive loop and re-awakened after redial + // if a new head arrives + nodeStateOutOfSync + // nodeStateUnusable is a sendonly node that has an invalid URL that can never be reached + nodeStateUnusable + // nodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle + nodeStateClosed + // nodeStateLen tracks the number of states + nodeStateLen +) + +// allNodeStates represents all possible states a node can be in +var allNodeStates []nodeState + +func init() { + for s := nodeState(0); s < nodeStateLen; s++ { + allNodeStates = append(allNodeStates, s) + } +} + +// FSM methods + +// State allows reading the current state of the node. +func (n *node[CHAIN_ID, HEAD, RPC]) State() nodeState { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.state +} + +func (n *node[CHAIN_ID, HEAD, RPC]) StateAndLatest() (nodeState, int64, *big.Int) { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.state, n.stateLatestBlockNumber, n.stateLatestTotalDifficulty +} + +// setState is only used by internal state management methods. +// This is low-level; care should be taken by the caller to ensure the new state is a valid transition. +// State changes should always be synchronous: only one goroutine at a time should change state. +// n.stateMu should not be locked for long periods of time because external clients expect a timely response from n.State() +func (n *node[CHAIN_ID, HEAD, RPC]) setState(s nodeState) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.state = s +} + +// declareXXX methods change the state and pass conrol off the new state +// management goroutine + +func (n *node[CHAIN_ID, HEAD, RPC]) declareAlive() { + n.transitionToAlive(func() { + n.lfcLog.Infow("RPC Node is online", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToAlive(fn func()) { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateDialed, nodeStateInvalidChainID: + n.state = nodeStateAlive + default: + panic(transitionFail(n.state, nodeStateAlive)) + } + fn() +} + +// declareInSync puts a node back into Alive state, allowing it to be used by +// pool consumers again +func (n *node[CHAIN_ID, HEAD, RPC]) declareInSync() { + n.transitionToInSync(func() { + n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInSync(fn func()) { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + promPoolRPCNodeTransitionsToInSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateOutOfSync: + n.state = nodeStateAlive + default: + panic(transitionFail(n.state, nodeStateAlive)) + } + fn() +} + +// declareOutOfSync puts a node into OutOfSync state, disconnecting all current +// clients and making it unavailable for use until back in-sync. +func (n *node[CHAIN_ID, HEAD, RPC]) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { + n.transitionToOutOfSync(func() { + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) + n.wg.Add(1) + go n.outOfSyncLoop(isOutOfSync) + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToOutOfSync(fn func()) { + promPoolRPCNodeTransitionsToOutOfSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateAlive: + n.disconnectAll() + n.state = nodeStateOutOfSync + default: + panic(transitionFail(n.state, nodeStateOutOfSync)) + } + fn() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareUnreachable() { + n.transitionToUnreachable(func() { + n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state) + n.wg.Add(1) + go n.unreachableLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToUnreachable(fn func()) { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID: + n.disconnectAll() + n.state = nodeStateUnreachable + default: + panic(transitionFail(n.state, nodeStateUnreachable)) + } + fn() +} + +func (n *node[CHAIN_ID, HEAD, RPC]) declareInvalidChainID() { + n.transitionToInvalidChainID(func() { + n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state) + n.wg.Add(1) + go n.invalidChainIDLoop() + }) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) transitionToInvalidChainID(fn func()) { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == nodeStateClosed { + return + } + switch n.state { + case nodeStateDialed, nodeStateOutOfSync: + n.disconnectAll() + n.state = nodeStateInvalidChainID + default: + panic(transitionFail(n.state, nodeStateInvalidChainID)) + } + fn() +} + +func transitionString(state nodeState) string { + return fmt.Sprintf("Total number of times node has transitioned to %s", state) +} + +func transitionFail(from nodeState, to nodeState) string { + return fmt.Sprintf("cannot transition from %#v to %#v", from, to) +} diff --git a/common/client/node_fsm_test.go b/common/client/node_fsm_test.go new file mode 100644 index 00000000..34988a3d --- /dev/null +++ b/common/client/node_fsm_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type fnMock struct{ calls int } + +func (fm *fnMock) Fn() { + fm.calls++ +} + +func (fm *fnMock) AssertNotCalled(t *testing.T) { + assert.Equal(t, 0, fm.calls) +} + +func (fm *fnMock) AssertCalled(t *testing.T) { + assert.Greater(t, fm.calls, 0) +} + +func newTestTransitionNode(t *testing.T, rpc *mockNodeClient[types.ID, Head]) testNode { + return newTestNode(t, testNodeOpts{rpc: rpc}) +} + +func TestUnit_Node_StateTransitions(t *testing.T) { + t.Parallel() + + t.Run("setState", func(t *testing.T) { + n := newTestTransitionNode(t, nil) + assert.Equal(t, nodeStateUndialed, n.State()) + n.setState(nodeStateAlive) + assert.Equal(t, nodeStateAlive, n.State()) + n.setState(nodeStateUndialed) + assert.Equal(t, nodeStateUndialed, n.State()) + }) + + t.Run("transitionToAlive", func(t *testing.T) { + const destinationState = nodeStateAlive + allowedStates := []nodeState{nodeStateDialed, nodeStateInvalidChainID} + rpc := newMockNodeClient[types.ID, Head](t) + testTransition(t, rpc, testNode.transitionToAlive, destinationState, allowedStates...) + }) + + t.Run("transitionToInSync", func(t *testing.T) { + const destinationState = nodeStateAlive + allowedStates := []nodeState{nodeStateOutOfSync} + rpc := newMockNodeClient[types.ID, Head](t) + testTransition(t, rpc, testNode.transitionToInSync, destinationState, allowedStates...) + }) + t.Run("transitionToOutOfSync", func(t *testing.T) { + const destinationState = nodeStateOutOfSync + allowedStates := []nodeState{nodeStateAlive} + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("DisconnectAll").Once() + testTransition(t, rpc, testNode.transitionToOutOfSync, destinationState, allowedStates...) + }) + t.Run("transitionToUnreachable", func(t *testing.T) { + const destinationState = nodeStateUnreachable + allowedStates := []nodeState{nodeStateUndialed, nodeStateDialed, nodeStateAlive, nodeStateOutOfSync, nodeStateInvalidChainID} + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("DisconnectAll").Times(len(allowedStates)) + testTransition(t, rpc, testNode.transitionToUnreachable, destinationState, allowedStates...) + }) + t.Run("transitionToInvalidChain", func(t *testing.T) { + const destinationState = nodeStateInvalidChainID + allowedStates := []nodeState{nodeStateDialed, nodeStateOutOfSync} + rpc := newMockNodeClient[types.ID, Head](t) + rpc.On("DisconnectAll").Times(len(allowedStates)) + testTransition(t, rpc, testNode.transitionToInvalidChainID, destinationState, allowedStates...) + }) +} + +func testTransition(t *testing.T, rpc *mockNodeClient[types.ID, Head], transition func(node testNode, fn func()), destinationState nodeState, allowedStates ...nodeState) { + node := newTestTransitionNode(t, rpc) + for _, allowedState := range allowedStates { + m := new(fnMock) + node.setState(allowedState) + transition(node, m.Fn) + assert.Equal(t, destinationState, node.State(), "Expected node to successfully transition from %s to %s state", allowedState, destinationState) + m.AssertCalled(t) + } + // noop on attempt to transition from Closed state + m := new(fnMock) + node.setState(nodeStateClosed) + transition(node, m.Fn) + m.AssertNotCalled(t) + assert.Equal(t, nodeStateClosed, node.State(), "Expected node to remain in closed state on transition attempt") + + for _, nodeState := range allNodeStates { + if slices.Contains(allowedStates, nodeState) || nodeState == nodeStateClosed { + continue + } + + m := new(fnMock) + node.setState(nodeState) + assert.Panics(t, func() { + transition(node, m.Fn) + }, "Expected transition from `%s` to `%s` to panic", nodeState, destinationState) + m.AssertNotCalled(t) + assert.Equal(t, nodeState, node.State(), "Expected node to remain in initial state on invalid transition") + + } +} diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go new file mode 100644 index 00000000..e9d6a8f0 --- /dev/null +++ b/common/client/node_lifecycle.go @@ -0,0 +1,440 @@ +package client + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + + iutils "github.com/goplugin/pluginv3.0/v2/common/internal/utils" +) + +var ( + promPoolRPCNodeHighestSeenBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pool_rpc_node_highest_seen_block", + Help: "The highest seen block for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodeNumSeenBlocks = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_num_seen_blocks", + Help: "The total number of new blocks seen by the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePolls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_total", + Help: "The total number of poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePollsFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_failed", + Help: "The total number of failed poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) + promPoolRPCNodePollsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pool_rpc_node_polls_success", + Help: "The total number of successful poll checks for the given RPC node", + }, []string{"chainID", "nodeName"}) +) + +// zombieNodeCheckInterval controls how often to re-check to see if we need to +// state change in case we have to force a state transition due to no available +// nodes. +// NOTE: This only applies to out-of-sync nodes if they are the last available node +func zombieNodeCheckInterval(noNewHeadsThreshold time.Duration) time.Duration { + interval := noNewHeadsThreshold + if interval <= 0 || interval > QueryTimeout { + interval = QueryTimeout + } + return utils.WithJitter(interval) +} + +func (n *node[CHAIN_ID, HEAD, RPC]) setLatestReceived(blockNumber int64, totalDifficulty *big.Int) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.stateLatestBlockNumber = blockNumber + n.stateLatestTotalDifficulty = totalDifficulty +} + +const ( + msgCannotDisable = "but cannot disable this connection because there are no other RPC endpoints, or all other RPC endpoints are dead." + msgDegradedState = "Plugin is now operating in a degraded state and urgent action is required to resolve the issue" +) + +const rpcSubscriptionMethodNewHeads = "newHeads" + +// Node is a FSM +// Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. +// Only one loop must run at a time. +// Each loop passes control onto the next loop as it exits, except when the node is Closed which terminates the loop permanently. + +// This handles node lifecycle for the ALIVE state +// Should only be run ONCE per node, after a successful Dial +func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case nodeStateAlive: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("aliveLoop can only run for node in Alive state, got: %s", state)) + } + } + + noNewHeadsTimeoutThreshold := n.noNewHeadsThreshold + pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() + pollInterval := n.nodePoolCfg.PollInterval() + + lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) + lggr.Tracew("Alive loop starting", "nodeState", n.State()) + + headsC := make(chan HEAD) + sub, err := n.rpc.Subscribe(n.nodeCtx, headsC, rpcSubscriptionMethodNewHeads) + if err != nil { + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) + n.declareUnreachable() + return + } + // TODO: nit fix. If multinode switches primary node before we set sub as AliveSub, sub will be closed and we'll + // falsely transition this node to unreachable state + n.rpc.SetAliveLoopSub(sub) + defer sub.Unsubscribe() + + var outOfSyncT *time.Ticker + var outOfSyncTC <-chan time.Time + if noNewHeadsTimeoutThreshold > 0 { + lggr.Debugw("Head liveness checking enabled", "nodeState", n.State()) + outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) + defer outOfSyncT.Stop() + outOfSyncTC = outOfSyncT.C + } else { + lggr.Debug("Head liveness checking disabled") + } + + var pollCh <-chan time.Time + if pollInterval > 0 { + lggr.Debug("Polling enabled") + pollT := time.NewTicker(pollInterval) + defer pollT.Stop() + pollCh = pollT.C + if pollFailureThreshold > 0 { + // polling can be enabled with no threshold to enable polling but + // the node will not be marked offline regardless of the number of + // poll failures + lggr.Debug("Polling liveness checking enabled") + } + } else { + lggr.Debug("Polling disabled") + } + + _, highestReceivedBlockNumber, _ := n.StateAndLatest() + var pollFailures uint32 + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-pollCh: + var version string + promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) + ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) + version, err := n.RPC().ClientVersion(ctx) + cancel() + if err != nil { + // prevent overflow + if pollFailures < math.MaxUint32 { + promPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures++ + } + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) + } else { + lggr.Debugw("Version poll successful", "nodeState", n.State(), "clientVersion", version) + promPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures = 0 + } + if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) + continue + } + } + n.declareUnreachable() + return + } + _, num, td := n.StateAndLatest() + if outOfSync, liveNodes := n.syncStatus(num, td); outOfSync { + // note: there must be another live node for us to be out of sync + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) + if liveNodes < 2 { + lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) + continue + } + n.declareOutOfSync(n.isOutOfSync) + return + } + case bh, open := <-headsC: + if !open { + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) + n.declareUnreachable() + return + } + promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Got head", "head", bh) + if bh.BlockNumber() > highestReceivedBlockNumber { + promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + highestReceivedBlockNumber = bh.BlockNumber() + } else { + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + } + if outOfSyncT != nil { + outOfSyncT.Reset(noNewHeadsTimeoutThreshold) + } + n.setLatestReceived(bh.BlockNumber(), bh.BlockDifficulty()) + case err := <-sub.Err(): + lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State()) + n.declareUnreachable() + return + case <-outOfSyncTC: + // We haven't received a head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + outOfSyncT.Reset(zombieNodeCheckInterval(n.noNewHeadsThreshold)) + continue + } + } + n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < highestReceivedBlockNumber }) + return + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) isOutOfSync(num int64, td *big.Int) (outOfSync bool) { + outOfSync, _ = n.syncStatus(num, td) + return +} + +// syncStatus returns outOfSync true if num or td is more than SyncThresold behind the best node. +// Always returns outOfSync false for SyncThreshold 0. +// liveNodes is only included when outOfSync is true. +func (n *node[CHAIN_ID, HEAD, RPC]) syncStatus(num int64, td *big.Int) (outOfSync bool, liveNodes int) { + if n.nLiveNodes == nil { + return // skip for tests + } + threshold := n.nodePoolCfg.SyncThreshold() + if threshold == 0 { + return // disabled + } + // Check against best node + ln, highest, greatest := n.nLiveNodes() + mode := n.nodePoolCfg.SelectionMode() + switch mode { + case NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel: + return num < highest-int64(threshold), ln + case NodeSelectionModeTotalDifficulty: + bigThreshold := big.NewInt(int64(threshold)) + return td.Cmp(bigmath.Sub(greatest, bigThreshold)) < 0, ln + default: + panic("unrecognized NodeSelectionMode: " + mode) + } +} + +const ( + msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" + msgInSync = "RPC node back in sync" +) + +// outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status +func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case nodeStateOutOfSync: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("outOfSyncLoop can only run for node in OutOfSync state, got: %s", state)) + } + } + + outOfSyncAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) + lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) + + // Need to redial since out-of-sync nodes are automatically disconnected + if err := n.rpc.Dial(n.nodeCtx); err != nil { + lggr.Errorw("Failed to dial out-of-sync RPC node", "nodeState", n.State()) + n.declareUnreachable() + return + } + + // Manually re-verify since out-of-sync nodes are automatically disconnected + if err := n.verify(n.nodeCtx); err != nil { + lggr.Errorw(fmt.Sprintf("Failed to verify out-of-sync RPC node: %v", err), "err", err) + n.declareInvalidChainID() + return + } + + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + + ch := make(chan HEAD) + sub, err := n.rpc.Subscribe(n.nodeCtx, ch, rpcSubscriptionMethodNewHeads) + if err != nil { + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) + n.declareUnreachable() + return + } + defer sub.Unsubscribe() + + for { + select { + case <-n.nodeCtx.Done(): + return + case head, open := <-ch: + if !open { + lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State()) + n.declareUnreachable() + return + } + n.setLatestReceived(head.BlockNumber(), head.BlockDifficulty()) + if !isOutOfSync(head.BlockNumber(), head.BlockDifficulty()) { + // back in-sync! flip back into alive loop + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + n.declareInSync() + return + } + lggr.Debugw(msgReceivedBlock, "blockNumber", head.BlockNumber(), "blockDifficulty", head.BlockDifficulty(), "nodeState", n.State()) + case <-time.After(zombieNodeCheckInterval(n.noNewHeadsThreshold)): + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 1 { + lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + n.declareInSync() + return + } + } + case err := <-sub.Err(): + lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err) + n.declareUnreachable() + return + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case nodeStateUnreachable: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("unreachableLoop can only run for node in Unreachable state, got: %s", state)) + } + } + + unreachableAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) + lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) + + dialRetryBackoff := iutils.NewRedialBackoff() + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-time.After(dialRetryBackoff.Duration()): + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) + + err := n.rpc.Dial(n.nodeCtx) + if err != nil { + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State()) + continue + } + + n.setState(nodeStateDialed) + + err = n.verify(n.nodeCtx) + + if errors.Is(err, errInvalidChainID) { + lggr.Errorw("Failed to redial RPC node; remote endpoint returned the wrong chain ID", "err", err) + n.declareInvalidChainID() + return + } else if err != nil { + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; verify failed: %v", err), "err", err) + n.declareUnreachable() + return + } + + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) + n.declareAlive() + return + } + } +} + +func (n *node[CHAIN_ID, HEAD, RPC]) invalidChainIDLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case nodeStateInvalidChainID: + case nodeStateClosed: + return + default: + panic(fmt.Sprintf("invalidChainIDLoop can only run for node in InvalidChainID state, got: %s", state)) + } + } + + invalidAt := time.Now() + + lggr := logger.Named(n.lfcLog, "InvalidChainID") + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State()) + + chainIDRecheckBackoff := iutils.NewRedialBackoff() + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-time.After(chainIDRecheckBackoff.Duration()): + err := n.verify(n.nodeCtx) + if errors.Is(err, errInvalidChainID) { + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err) + continue + } else if err != nil { + lggr.Errorw(fmt.Sprintf("Unexpected error while verifying RPC node chain ID; %v", err), "err", err) + n.declareUnreachable() + return + } + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) + n.declareAlive() + return + } + } +} diff --git a/common/client/node_lifecycle_test.go b/common/client/node_lifecycle_test.go new file mode 100644 index 00000000..c48e332d --- /dev/null +++ b/common/client/node_lifecycle_test.go @@ -0,0 +1,1071 @@ +package client + +import ( + "errors" + "fmt" + big "math/big" + "sync/atomic" + "testing" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + "github.com/goplugin/plugin-common/pkg/utils/tests" + + "github.com/goplugin/pluginv3.0/v2/common/types" + "github.com/goplugin/pluginv3.0/v2/common/types/mocks" +) + +func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { + t.Parallel() + + newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil).Once() + + node.setState(nodeStateDialed) + return node + } + + t.Run("returns on closed", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.aliveLoop() + + }) + t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + + expectedError := errors.New("failed to subscribe to rpc") + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError).Once() + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + + }) + t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + sub := mocks.NewSubscription(t) + errChan := make(chan error) + close(errChan) + sub.On("Err").Return((<-chan error)(errChan)).Once() + sub.On("Unsubscribe").Once() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("SetAliveLoopSub", sub).Once() + // disconnects all on transfer to unreachable + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") + assert.Equal(t, nodeStateUnreachable, node.State()) + }) + + newSubscribedNode := func(t *testing.T, opts testNodeOpts) testNode { + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + opts.rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + opts.rpc.On("SetAliveLoopSub", sub).Once() + return newDialedNode(t, opts) + } + t.Run("Stays alive and waits for signal", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Head liveness checking disabled") + tests.AssertLogEventually(t, observedLogs, "Polling disabled") + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("stays alive while below pollFailureThreshold and resets counter on success", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + pollError := errors.New("failed to get ClientVersion") + // 1. Return error several times, but below threshold + rpc.On("ClientVersion", mock.Anything).Return("", pollError).Run(func(_ mock.Arguments) { + // stays healthy while below threshold + assert.Equal(t, nodeStateAlive, node.State()) + }).Times(pollFailureThreshold - 1) + // 2. Successful call that is expected to reset counter + rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Once() + // 3. Return error. If we have not reset the timer, we'll transition to nonAliveState + rpc.On("ClientVersion", mock.Anything).Return("", pollError).Once() + // 4. Once during the call, check if node is alive + var ensuredAlive atomic.Bool + rpc.On("ClientVersion", mock.Anything).Return("client_version", nil).Run(func(_ mock.Arguments) { + if ensuredAlive.Load() { + return + } + ensuredAlive.Store(true) + assert.Equal(t, nodeStateAlive, node.State()) + }).Once() + // redundant call to stay in alive state + rpc.On("ClientVersion", mock.Anything).Return("client_version", nil) + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) + tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) + assert.True(t, ensuredAlive.Load(), "expected to ensure that node was alive") + + }) + t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + pollError := errors.New("failed to get ClientVersion") + rpc.On("ClientVersion", mock.Anything).Return("", pollError) + // disconnects all on transfer to unreachable + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", node.String()), pollFailureThreshold) + tests.AssertEventually(t, func() bool { + return nodeStateUnreachable == node.State() + }) + }) + t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const pollFailureThreshold = 3 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollFailureThreshold: pollFailureThreshold, + pollInterval: tests.TestInterval, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 1, 20, big.NewInt(10) + } + pollError := errors.New("failed to get ClientVersion") + rpc.On("ClientVersion", mock.Anything).Return("", pollError) + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailureThreshold)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const syncThreshold = 10 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.stateLatestBlockNumber = 20 + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 10, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) + } + rpc.On("ClientVersion", mock.Anything).Return("", nil) + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Maybe() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Failed to dial out-of-sync RPC node") + }) + t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + const syncThreshold = 10 + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.stateLatestBlockNumber = 20 + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 1, syncThreshold + node.stateLatestBlockNumber + 1, big.NewInt(10) + } + rpc.On("ClientVersion", mock.Anything).Return("", nil) + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState)) + }) + t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{ + pollInterval: tests.TestInterval, + syncThreshold: 0, + selectionMode: NodeSelectionModeRoundRobin, + }, + rpc: rpc, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.stateLatestBlockNumber = 20 + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 1, node.stateLatestBlockNumber + 100, big.NewInt(10) + } + rpc.On("ClientVersion", mock.Anything).Return("", nil) + node.declareAlive() + tests.AssertLogCountEventually(t, observedLogs, "Version poll successful", 2) + assert.Equal(t, nodeStateAlive, node.State()) + }) + + t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + noNewHeadsThreshold: tests.TestInterval, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + // tries to redial in outOfSync + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateOutOfSync, node.State()) + }).Once() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Maybe() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertEventually(t, func() bool { + // right after outOfSync we'll transfer to unreachable due to returned error on Dial + // we check that we were in out of sync state on first Dial call + return node.State() == nodeStateUnreachable + }) + }) + t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newSubscribedNode(t, testNodeOpts{ + config: testNodeConfig{}, + lggr: lggr, + noNewHeadsThreshold: tests.TestInterval, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 1, 20, big.NewInt(10) + } + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, fmt.Sprintf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState)) + assert.Equal(t, nodeStateAlive, node.State()) + }) + + t.Run("rpc closed head channel", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + ch := args.Get(1).(chan<- Head) + close(ch) + }).Return(sub, nil).Once() + rpc.On("SetAliveLoopSub", sub).Once() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newDialedNode(t, testNodeOpts{ + lggr: lggr, + config: testNodeConfig{}, + noNewHeadsThreshold: tests.TestInterval, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + // disconnects all on transfer to unreachable or outOfSync + rpc.On("DisconnectAll").Once() + // might be called in unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareAlive() + tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") + assert.Equal(t, nodeStateUnreachable, node.State()) + + }) + t.Run("updates block number and difficulty on new head", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + expectedBlockNumber := rand.Int64() + expectedDiff := big.NewInt(rand.Int64()) + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + ch := args.Get(1).(chan<- Head) + go writeHeads(t, ch, head{BlockNumber: expectedBlockNumber, BlockDifficulty: expectedDiff}) + }).Return(sub, nil).Once() + rpc.On("SetAliveLoopSub", sub).Once() + node := newDialedNode(t, testNodeOpts{ + config: testNodeConfig{}, + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + node.declareAlive() + tests.AssertEventually(t, func() bool { + state, block, diff := node.StateAndLatest() + return state == nodeStateAlive && block == expectedBlockNumber == bigmath.Equal(diff, expectedDiff) + }) + }) +} + +type head struct { + BlockNumber int64 + BlockDifficulty *big.Int +} + +func writeHeads(t *testing.T, ch chan<- Head, heads ...head) { + for _, head := range heads { + h := newMockHead(t) + h.On("BlockNumber").Return(head.BlockNumber) + h.On("BlockDifficulty").Return(head.BlockDifficulty) + select { + case ch <- h: + case <-tests.Context(t).Done(): + return + } + } +} + +func setupRPCForAliveLoop(t *testing.T, rpc *mockNodeClient[types.ID, Head]) { + rpc.On("Dial", mock.Anything).Return(nil).Maybe() + aliveSubscription := mocks.NewSubscription(t) + aliveSubscription.On("Err").Return((<-chan error)(nil)).Maybe() + aliveSubscription.On("Unsubscribe").Maybe() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(aliveSubscription, nil).Maybe() + rpc.On("SetAliveLoopSub", mock.Anything).Maybe() +} + +func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { + t.Parallel() + + newAliveNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil).Once() + // disconnects all on transfer to unreachable or outOfSync + opts.rpc.On("DisconnectAll") + node.setState(nodeStateAlive) + return node + } + + stubIsOutOfSync := func(num int64, td *big.Int) bool { + return false + } + + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.outOfSyncLoop(stubIsOutOfSync) + }) + t.Run("on old blocks stays outOfSync and returns on close", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + heads := []head{{BlockNumber: 7}, {BlockNumber: 11}, {BlockNumber: 13}} + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + ch := args.Get(1).(chan<- Head) + go writeHeads(t, ch, heads...) + }).Return(outOfSyncSubscription, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + + node.declareOutOfSync(func(num int64, td *big.Int) bool { + return true + }) + tests.AssertLogCountEventually(t, observedLogs, msgReceivedBlock, len(heads)) + assert.Equal(t, nodeStateOutOfSync, node.State()) + }) + t.Run("if initial dial fails, transitions to unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + + expectedError := errors.New("failed to dial rpc") + // might be called again in unreachable loop, so no need to set once + rpc.On("Dial", mock.Anything).Return(expectedError) + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if fail to get chainID, transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + expectedError := errors.New("failed to get chain ID") + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(types.NewIDFromInt(0), expectedError) + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("if chainID does not match, transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("if fails to subscribe, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + expectedError := errors.New("failed to subscribe") + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(nil, expectedError) + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on subscription termination becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + sub := mocks.NewSubscription(t) + errChan := make(chan error, 1) + errChan <- errors.New("subscription was terminate") + sub.On("Err").Return((<-chan error)(errChan)) + sub.On("Unsubscribe").Once() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(sub, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertLogEventually(t, observedLogs, "Subscription was terminated") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("becomes unreachable if head channel is closed", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + sub := mocks.NewSubscription(t) + sub.On("Err").Return((<-chan error)(nil)) + sub.On("Unsubscribe").Once() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + ch := args.Get(1).(chan<- Head) + close(ch) + }).Return(sub, nil).Once() + rpc.On("Dial", mock.Anything).Return(errors.New("failed to redial")).Maybe() + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertLogEventually(t, observedLogs, "Subscription channel unexpectedly closed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + + t.Run("becomes alive if it receives a newer head", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + const highestBlock = 1000 + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Run(func(args mock.Arguments) { + ch := args.Get(1).(chan<- Head) + go writeHeads(t, ch, head{BlockNumber: highestBlock - 1}, head{BlockNumber: highestBlock}) + }).Return(outOfSyncSubscription, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(func(num int64, td *big.Int) bool { + return num < highestBlock + }) + tests.AssertLogEventually(t, observedLogs, msgReceivedBlock) + tests.AssertLogEventually(t, observedLogs, msgInSync) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) + t.Run("becomes alive if there is no other nodes", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + noNewHeadsThreshold: tests.TestInterval, + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 0, 100, big.NewInt(200) + } + + rpc.On("Dial", mock.Anything).Return(nil).Once() + // might be called multiple times + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil).Once() + + outOfSyncSubscription := mocks.NewSubscription(t) + outOfSyncSubscription.On("Err").Return((<-chan error)(nil)) + outOfSyncSubscription.On("Unsubscribe").Once() + rpc.On("Subscribe", mock.Anything, mock.Anything, rpcSubscriptionMethodNewHeads).Return(outOfSyncSubscription, nil).Once() + + setupRPCForAliveLoop(t, rpc) + + node.declareOutOfSync(stubIsOutOfSync) + tests.AssertLogEventually(t, observedLogs, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { + t.Parallel() + + newAliveNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil).Once() + // disconnects all on transfer to unreachable + opts.rpc.On("DisconnectAll") + + node.setState(nodeStateAlive) + return node + } + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.unreachableLoop() + + }) + t.Run("on failed redial, keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + node.declareUnreachable() + tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; still unreachable", 2) + }) + t.Run("on failed chainID verification, keep trying", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, errors.New("failed to get chain id")) + node.declareUnreachable() + tests.AssertLogCountEventually(t, observedLogs, "Failed to redial RPC node; verify failed", 2) + }) + t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on valid chain ID becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newAliveNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + + setupRPCForAliveLoop(t, rpc) + + node.declareUnreachable() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { + t.Parallel() + newDialedNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil).Once() + opts.rpc.On("DisconnectAll") + + node.setState(nodeStateDialed) + return node + } + t.Run("returns on closed", func(t *testing.T) { + t.Parallel() + node := newTestNode(t, testNodeOpts{}) + node.setState(nodeStateClosed) + node.wg.Add(1) + node.invalidChainIDLoop() + + }) + t.Run("on failed chainID call becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, errors.New("failed to get chain id")) + // for unreachable loop + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")).Maybe() + node.declareInvalidChainID() + tests.AssertLogEventually(t, observedLogs, "Unexpected error while verifying RPC node chain ID") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on chainID mismatch keeps trying", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + node.declareInvalidChainID() + tests.AssertLogCountEventually(t, observedLogs, "Failed to verify RPC node; remote endpoint returned the wrong chain ID", 2) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on valid chainID becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newDialedNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + + setupRPCForAliveLoop(t, rpc) + + node.declareInvalidChainID() + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_start(t *testing.T) { + t.Parallel() + + newNode := func(t *testing.T, opts testNodeOpts) testNode { + node := newTestNode(t, opts) + opts.rpc.On("Close").Return(nil).Once() + + return node + } + t.Run("if fails on initial dial, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(errors.New("failed to dial")) + // disconnects all on transfer to unreachable + rpc.On("DisconnectAll") + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, "Dial failed: Node is unreachable") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("if chainID verification fails, becomes unreachable", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + lggr: lggr, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Run(func(_ mock.Arguments) { + assert.Equal(t, nodeStateDialed, node.State()) + }).Return(nodeChainID, errors.New("failed to get chain id")) + // disconnects all on transfer to unreachable + rpc.On("DisconnectAll") + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertLogEventually(t, observedLogs, "Verify failed") + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateUnreachable + }) + }) + t.Run("on chain ID mismatch transitions to invalidChainID", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.NewIDFromInt(10) + rpcChainID := types.NewIDFromInt(11) + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(rpcChainID, nil) + // disconnects all on transfer to unreachable + rpc.On("DisconnectAll") + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateInvalidChainID + }) + }) + t.Run("on valid chain ID becomes alive", func(t *testing.T) { + t.Parallel() + rpc := newMockNodeClient[types.ID, Head](t) + nodeChainID := types.RandomID() + node := newNode(t, testNodeOpts{ + rpc: rpc, + chainID: nodeChainID, + }) + defer func() { assert.NoError(t, node.close()) }() + + rpc.On("Dial", mock.Anything).Return(nil) + rpc.On("ChainID", mock.Anything).Return(nodeChainID, nil) + + setupRPCForAliveLoop(t, rpc) + + err := node.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return node.State() == nodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_syncStatus(t *testing.T) { + t.Parallel() + t.Run("skip if nLiveNodes is not configured", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + outOfSync, liveNodes := node.syncStatus(0, nil) + assert.Equal(t, false, outOfSync) + assert.Equal(t, 0, liveNodes) + }) + t.Run("skip if syncThreshold is not configured", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{}) + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return + } + outOfSync, liveNodes := node.syncStatus(0, nil) + assert.Equal(t, false, outOfSync) + assert.Equal(t, 0, liveNodes) + }) + t.Run("panics on invalid selection mode", func(t *testing.T) { + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{syncThreshold: 1}, + }) + node.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return + } + assert.Panics(t, func() { + _, _ = node.syncStatus(0, nil) + }) + }) + t.Run("block height selection mode", func(t *testing.T) { + const syncThreshold = 10 + const highestBlock = 1000 + const nodesNum = 20 + const totalDifficulty = 3000 + testCases := []struct { + name string + blockNumber int64 + outOfSync bool + }{ + { + name: "below threshold", + blockNumber: highestBlock - syncThreshold - 1, + outOfSync: true, + }, + { + name: "equal to threshold", + blockNumber: highestBlock - syncThreshold, + outOfSync: false, + }, + { + name: "equal to highest block", + blockNumber: highestBlock, + outOfSync: false, + }, + { + name: "higher than highest block", + blockNumber: highestBlock, + outOfSync: false, + }, + } + + for _, selectionMode := range []string{NodeSelectionModeHighestHead, NodeSelectionModeRoundRobin, NodeSelectionModePriorityLevel} { + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + syncThreshold: syncThreshold, + selectionMode: selectionMode, + }, + }) + node.nLiveNodes = func() (int, int64, *big.Int) { + return nodesNum, highestBlock, big.NewInt(totalDifficulty) + } + for _, td := range []int64{totalDifficulty - syncThreshold - 1, totalDifficulty - syncThreshold, totalDifficulty, totalDifficulty + 1} { + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%s: selectionMode: %s: total difficulty: %d", testCase.name, selectionMode, td), func(t *testing.T) { + outOfSync, liveNodes := node.syncStatus(testCase.blockNumber, big.NewInt(td)) + assert.Equal(t, nodesNum, liveNodes) + assert.Equal(t, testCase.outOfSync, outOfSync) + }) + } + } + } + + }) + t.Run("total difficulty selection mode", func(t *testing.T) { + const syncThreshold = 10 + const highestBlock = 1000 + const nodesNum = 20 + const totalDifficulty = 3000 + testCases := []struct { + name string + totalDifficulty int64 + outOfSync bool + }{ + { + name: "below threshold", + totalDifficulty: totalDifficulty - syncThreshold - 1, + outOfSync: true, + }, + { + name: "equal to threshold", + totalDifficulty: totalDifficulty - syncThreshold, + outOfSync: false, + }, + { + name: "equal to highest block", + totalDifficulty: totalDifficulty, + outOfSync: false, + }, + { + name: "higher than highest block", + totalDifficulty: totalDifficulty, + outOfSync: false, + }, + } + + node := newTestNode(t, testNodeOpts{ + config: testNodeConfig{ + syncThreshold: syncThreshold, + selectionMode: NodeSelectionModeTotalDifficulty, + }, + }) + node.nLiveNodes = func() (int, int64, *big.Int) { + return nodesNum, highestBlock, big.NewInt(totalDifficulty) + } + for _, hb := range []int64{highestBlock - syncThreshold - 1, highestBlock - syncThreshold, highestBlock, highestBlock + 1} { + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%s: selectionMode: %s: highest block: %d", testCase.name, NodeSelectionModeTotalDifficulty, hb), func(t *testing.T) { + outOfSync, liveNodes := node.syncStatus(hb, big.NewInt(testCase.totalDifficulty)) + assert.Equal(t, nodesNum, liveNodes) + assert.Equal(t, testCase.outOfSync, outOfSync) + }) + } + } + + }) +} diff --git a/common/client/node_selector.go b/common/client/node_selector.go new file mode 100644 index 00000000..62583ec5 --- /dev/null +++ b/common/client/node_selector.go @@ -0,0 +1,46 @@ +package client + +import ( + "fmt" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const ( + NodeSelectionModeHighestHead = "HighestHead" + NodeSelectionModeRoundRobin = "RoundRobin" + NodeSelectionModeTotalDifficulty = "TotalDifficulty" + NodeSelectionModePriorityLevel = "PriorityLevel" +) + +//go:generate mockery --quiet --name NodeSelector --structname mockNodeSelector --filename "mock_node_selector_test.go" --inpackage --case=underscore +type NodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] interface { + // Select returns a Node, or nil if none can be selected. + // Implementation must be thread-safe. + Select() Node[CHAIN_ID, HEAD, RPC] + // Name returns the strategy name, e.g. "HighestHead" or "RoundRobin" + Name() string +} + +func newNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](selectionMode string, nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + switch selectionMode { + case NodeSelectionModeHighestHead: + return NewHighestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + case NodeSelectionModeRoundRobin: + return NewRoundRobinSelector[CHAIN_ID, HEAD, RPC](nodes) + case NodeSelectionModeTotalDifficulty: + return NewTotalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + case NodeSelectionModePriorityLevel: + return NewPriorityLevelNodeSelector[CHAIN_ID, HEAD, RPC](nodes) + default: + panic(fmt.Sprintf("unsupported NodeSelectionMode: %s", selectionMode)) + } +} diff --git a/common/client/node_selector_highest_head.go b/common/client/node_selector_highest_head.go new file mode 100644 index 00000000..b926ef9e --- /dev/null +++ b/common/client/node_selector_highest_head.go @@ -0,0 +1,41 @@ +package client + +import ( + "math" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type highestHeadNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] []Node[CHAIN_ID, HEAD, RPC] + +func NewHighestHeadNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + return highestHeadNodeSelector[CHAIN_ID, HEAD, RPC](nodes) +} + +func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { + var highestHeadNumber int64 = math.MinInt64 + var highestHeadNodes []Node[CHAIN_ID, HEAD, RPC] + for _, n := range s { + state, currentHeadNumber, _ := n.StateAndLatest() + if state == nodeStateAlive && currentHeadNumber >= highestHeadNumber { + if highestHeadNumber < currentHeadNumber { + highestHeadNumber = currentHeadNumber + highestHeadNodes = nil + } + highestHeadNodes = append(highestHeadNodes, n) + } + } + return firstOrHighestPriority(highestHeadNodes) +} + +func (s highestHeadNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { + return NodeSelectionModeHighestHead +} diff --git a/common/client/node_selector_highest_head_test.go b/common/client/node_selector_highest_head_test.go new file mode 100644 index 00000000..6f3bc34e --- /dev/null +++ b/common/client/node_selector_highest_head_test.go @@ -0,0 +1,176 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +func TestHighestHeadNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeHighestHead, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeHighestHead) +} + +func TestHighestHeadNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + } else if i == 1 { + // second node is alive, LatestReceivedBlockNumber = 1 + node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + } else { + // third node is alive, LatestReceivedBlockNumber = 2 (best node) + node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := newNodeSelector[types.ID, Head, nodeClient](NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := newMockNode[types.ID, Head, nodeClient](t) + // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) + node.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := newMockNode[types.ID, Head, nodeClient](t) + // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) + node.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("Order").Return(int32(1)) + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("Order").Return(int32(1)) + selector := newNodeSelector(NodeSelectionModeHighestHead, []Node[types.ID, Head, nodeClient]{node1, node2}) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestHighestHeadNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + } else { + // others are unreachable + node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), nil) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + assert.Nil(t, selector.Select()) +} + +func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + t.Run("same head and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + node.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + //Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same head but different order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node1.On("Order").Return(int32(3)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node2.On("Order").Return(int32(1)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("Order").Return(int32(2)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + //Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different head but same order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), nil) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(2), nil) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), nil) + node3.On("Order").Return(int32(3)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + //Should select the third node as it has the highest head + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node2.On("Order").Maybe().Return(int32(4)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(11), nil) + node3.On("Order").Maybe().Return(int32(3)) + + node4 := newMockNode[types.ID, Head, nodeClient](t) + node4.On("StateAndLatest").Return(nodeStateAlive, int64(10), nil) + node4.On("Order").Maybe().Return(int32(1)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} + selector := newNodeSelector(NodeSelectionModeHighestHead, nodes) + //Should select the third node as it has the highest head and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/common/client/node_selector_priority_level.go b/common/client/node_selector_priority_level.go new file mode 100644 index 00000000..dfe3ca26 --- /dev/null +++ b/common/client/node_selector_priority_level.go @@ -0,0 +1,129 @@ +package client + +import ( + "math" + "sort" + "sync/atomic" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type priorityLevelNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] struct { + nodes []Node[CHAIN_ID, HEAD, RPC] + roundRobinCount []atomic.Uint32 +} + +type nodeWithPriority[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] struct { + node Node[CHAIN_ID, HEAD, RPC] + priority int32 +} + +func NewPriorityLevelNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + return &priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]{ + nodes: nodes, + roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)), + } +} + +func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { + nodes := s.getHighestPriorityAliveTier() + + if len(nodes) == 0 { + return nil + } + priorityLevel := nodes[len(nodes)-1].priority + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount[priorityLevel].Add(1) - 1 + idx := int(count % uint32(len(nodes))) + + return nodes[idx].node +} + +func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { + return NodeSelectionModePriorityLevel +} + +// getHighestPriorityAliveTier filters nodes that are not in state nodeStateAlive and +// returns only the highest tier of alive nodes +func (s priorityLevelNodeSelector[CHAIN_ID, HEAD, RPC]) getHighestPriorityAliveTier() []nodeWithPriority[CHAIN_ID, HEAD, RPC] { + var nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC] + for _, n := range s.nodes { + if n.State() == nodeStateAlive { + nodes = append(nodes, nodeWithPriority[CHAIN_ID, HEAD, RPC]{n, n.Order()}) + } + } + + if len(nodes) == 0 { + return nil + } + + return removeLowerTiers(nodes) +} + +// removeLowerTiers take a slice of nodeWithPriority[CHAIN_ID, BLOCK_HASH, HEAD, RPC] and keeps only the highest tier +func removeLowerTiers[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []nodeWithPriority[CHAIN_ID, HEAD, RPC]) []nodeWithPriority[CHAIN_ID, HEAD, RPC] { + sort.SliceStable(nodes, func(i, j int) bool { + return nodes[i].priority > nodes[j].priority + }) + + var nodes2 []nodeWithPriority[CHAIN_ID, HEAD, RPC] + currentPriority := nodes[len(nodes)-1].priority + + for _, n := range nodes { + if n.priority == currentPriority { + nodes2 = append(nodes2, n) + } + } + + return nodes2 +} + +// nrOfPriorityTiers calculates the total number of priority tiers +func nrOfPriorityTiers[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) int32 { + highestPriority := int32(0) + for _, n := range nodes { + priority := n.Order() + if highestPriority < priority { + highestPriority = priority + } + } + return highestPriority + 1 +} + +// firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority +func firstOrHighestPriority[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) Node[CHAIN_ID, HEAD, RPC] { + hp := int32(math.MaxInt32) + var node Node[CHAIN_ID, HEAD, RPC] + for _, n := range nodes { + if n.Order() < hp { + hp = n.Order() + node = n + } + } + return node +} diff --git a/common/client/node_selector_priority_level_test.go b/common/client/node_selector_priority_level_test.go new file mode 100644 index 00000000..fbcdc740 --- /dev/null +++ b/common/client/node_selector_priority_level_test.go @@ -0,0 +1,91 @@ +package client + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/common/types" + + "github.com/stretchr/testify/assert" +) + +func TestPriorityLevelNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModePriorityLevel, nil) + assert.Equal(t, selector.Name(), NodeSelectionModePriorityLevel) +} + +func TestPriorityLevelNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + type testNode struct { + order int32 + state nodeState + } + type testCase struct { + name string + nodes []testNode + expect []int // indexes of the nodes expected to be returned by Select + } + + testCases := []testCase{ + { + name: "TwoNodesSameOrder: Highest Allowed Order", + nodes: []testNode{ + {order: 1, state: nodeStateAlive}, + {order: 1, state: nodeStateAlive}, + }, + expect: []int{0, 1, 0, 1, 0, 1}, + }, + { + name: "TwoNodesSameOrder: Lowest Allowed Order", + nodes: []testNode{ + {order: 100, state: nodeStateAlive}, + {order: 100, state: nodeStateAlive}, + }, + expect: []int{0, 1, 0, 1, 0, 1}, + }, + { + name: "NoneAvailable", + nodes: []testNode{ + {order: 1, state: nodeStateOutOfSync}, + {order: 1, state: nodeStateUnreachable}, + {order: 1, state: nodeStateUnreachable}, + }, + expect: []int{}, // no nodes should be selected + }, + { + name: "DifferentOrder", + nodes: []testNode{ + {order: 1, state: nodeStateAlive}, + {order: 2, state: nodeStateAlive}, + {order: 3, state: nodeStateAlive}, + }, + expect: []int{0, 0}, // only the highest order node should be selected + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var nodes []Node[types.ID, Head, nodeClient] + for _, tn := range tc.nodes { + node := newMockNode[types.ID, Head, nodeClient](t) + node.On("State").Return(tn.state) + node.On("Order").Return(tn.order) + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModePriorityLevel, nodes) + for _, idx := range tc.expect { + if idx >= len(nodes) { + t.Fatalf("Invalid node index %d in test case '%s'", idx, tc.name) + } + assert.Same(t, nodes[idx], selector.Select()) + } + + // Check for nil selection if expected slice is empty + if len(tc.expect) == 0 { + assert.Nil(t, selector.Select()) + } + }) + } +} diff --git a/common/client/node_selector_round_robin.go b/common/client/node_selector_round_robin.go new file mode 100644 index 00000000..1851bf86 --- /dev/null +++ b/common/client/node_selector_round_robin.go @@ -0,0 +1,50 @@ +package client + +import ( + "sync/atomic" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type roundRobinSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] struct { + nodes []Node[CHAIN_ID, HEAD, RPC] + roundRobinCount atomic.Uint32 +} + +func NewRoundRobinSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + return &roundRobinSelector[CHAIN_ID, HEAD, RPC]{ + nodes: nodes, + } +} + +func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { + var liveNodes []Node[CHAIN_ID, HEAD, RPC] + for _, n := range s.nodes { + if n.State() == nodeStateAlive { + liveNodes = append(liveNodes, n) + } + } + + nNodes := len(liveNodes) + if nNodes == 0 { + return nil + } + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount.Add(1) - 1 + idx := int(count % uint32(nNodes)) + + return liveNodes[idx] +} + +func (s *roundRobinSelector[CHAIN_ID, HEAD, RPC]) Name() string { + return NodeSelectionModeRoundRobin +} diff --git a/common/client/node_selector_round_robin_test.go b/common/client/node_selector_round_robin_test.go new file mode 100644 index 00000000..372abede --- /dev/null +++ b/common/client/node_selector_round_robin_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +func TestRoundRobinNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeRoundRobin, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeRoundRobin) +} + +func TestRoundRobinNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("State").Return(nodeStateOutOfSync) + } else { + // second & third nodes are alive + node.On("State").Return(nodeStateAlive) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) +} + +func TestRoundRobinNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("State").Return(nodeStateOutOfSync) + } else { + // others are unreachable + node.On("State").Return(nodeStateUnreachable) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeRoundRobin, nodes) + assert.Nil(t, selector.Select()) +} diff --git a/common/client/node_selector_test.go b/common/client/node_selector_test.go new file mode 100644 index 00000000..7dadcc86 --- /dev/null +++ b/common/client/node_selector_test.go @@ -0,0 +1,18 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +func TestNodeSelector(t *testing.T) { + // rest of the tests are located in specific node selectors tests + t.Run("panics on unknown type", func(t *testing.T) { + assert.Panics(t, func() { + _ = newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]]("unknown", nil) + }) + }) +} diff --git a/common/client/node_selector_total_difficulty.go b/common/client/node_selector_total_difficulty.go new file mode 100644 index 00000000..d35fe400 --- /dev/null +++ b/common/client/node_selector_total_difficulty.go @@ -0,0 +1,54 @@ +package client + +import ( + "math/big" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type totalDifficultyNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +] []Node[CHAIN_ID, HEAD, RPC] + +func NewTotalDifficultyNodeSelector[ + CHAIN_ID types.ID, + HEAD Head, + RPC NodeClient[CHAIN_ID, HEAD], +](nodes []Node[CHAIN_ID, HEAD, RPC]) NodeSelector[CHAIN_ID, HEAD, RPC] { + return totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC](nodes) +} + +func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Select() Node[CHAIN_ID, HEAD, RPC] { + // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil + var highestTD *big.Int + var nodes []Node[CHAIN_ID, HEAD, RPC] + var aliveNodes []Node[CHAIN_ID, HEAD, RPC] + + for _, n := range s { + state, _, currentTD := n.StateAndLatest() + if state != nodeStateAlive { + continue + } + + aliveNodes = append(aliveNodes, n) + if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) { + if highestTD == nil || currentTD.Cmp(highestTD) > 0 { + highestTD = currentTD + nodes = nil + } + nodes = append(nodes, n) + } + } + + //If all nodes have td == nil pick one from the nodes that are alive + if len(nodes) == 0 { + return firstOrHighestPriority(aliveNodes) + } + return firstOrHighestPriority(nodes) +} + +func (s totalDifficultyNodeSelector[CHAIN_ID, HEAD, RPC]) Name() string { + return NodeSelectionModeTotalDifficulty +} diff --git a/common/client/node_selector_total_difficulty_test.go b/common/client/node_selector_total_difficulty_test.go new file mode 100644 index 00000000..7980f5d5 --- /dev/null +++ b/common/client/node_selector_total_difficulty_test.go @@ -0,0 +1,178 @@ +package client + +import ( + big "math/big" + "testing" + + "github.com/goplugin/pluginv3.0/v2/common/types" + + "github.com/stretchr/testify/assert" +) + +func TestTotalDifficultyNodeSelectorName(t *testing.T) { + selector := newNodeSelector[types.ID, Head, NodeClient[types.ID, Head]](NodeSelectionModeTotalDifficulty, nil) + assert.Equal(t, selector.Name(), NodeSelectionModeTotalDifficulty) +} + +func TestTotalDifficultyNodeSelector(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + } else if i == 1 { + // second node is alive + node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(7)) + } else { + // third node is alive and best + node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := newMockNode[types.ID, Head, nodeClient](t) + // fourth node is alive (same as 3rd) + node.On("StateAndLatest").Return(nodeStateAlive, int64(2), big.NewInt(8)) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := newMockNode[types.ID, Head, nodeClient](t) + // fifth node is alive (better than 3rd and 4th) + node.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(11)) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node1.On("Order").Maybe().Return(int32(1)) + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(-1), nil) + node2.On("Order").Maybe().Return(int32(1)) + nodes := []Node[types.ID, Head, nodeClient]{node1, node2} + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestTotalDifficultyNodeSelector_None(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(nodeStateOutOfSync, int64(-1), nil) + } else { + // others are unreachable + node.On("StateAndLatest").Return(nodeStateUnreachable, int64(1), big.NewInt(7)) + } + nodes = append(nodes, node) + } + + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + assert.Nil(t, selector.Select()) +} + +func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + type nodeClient NodeClient[types.ID, Head] + var nodes []Node[types.ID, Head, nodeClient] + + t.Run("same td and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := newMockNode[types.ID, Head, nodeClient](t) + node.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + //Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same td but different order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node1.On("Order").Return(int32(3)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node2.On("Order").Return(int32(1)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(3), big.NewInt(10)) + node3.On("Order").Return(int32(2)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + //Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different td but same order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(10)) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(11)) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(12)) + node3.On("Order").Return(int32(3)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + //Should select the third node as it has the highest td + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := newMockNode[types.ID, Head, nodeClient](t) + node1.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(100)) + node1.On("Order").Maybe().Return(int32(4)) + + node2 := newMockNode[types.ID, Head, nodeClient](t) + node2.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node2.On("Order").Maybe().Return(int32(5)) + + node3 := newMockNode[types.ID, Head, nodeClient](t) + node3.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(110)) + node3.On("Order").Maybe().Return(int32(1)) + + node4 := newMockNode[types.ID, Head, nodeClient](t) + node4.On("StateAndLatest").Return(nodeStateAlive, int64(1), big.NewInt(105)) + node4.On("Order").Maybe().Return(int32(2)) + + nodes := []Node[types.ID, Head, nodeClient]{node1, node2, node3, node4} + selector := newNodeSelector(NodeSelectionModeTotalDifficulty, nodes) + //Should select the third node as it has the highest td and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/common/client/node_test.go b/common/client/node_test.go new file mode 100644 index 00000000..58c2964a --- /dev/null +++ b/common/client/node_test.go @@ -0,0 +1,80 @@ +package client + +import ( + "net/url" + "testing" + "time" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type testNodeConfig struct { + pollFailureThreshold uint32 + pollInterval time.Duration + selectionMode string + syncThreshold uint32 +} + +func (n testNodeConfig) PollFailureThreshold() uint32 { + return n.pollFailureThreshold +} + +func (n testNodeConfig) PollInterval() time.Duration { + return n.pollInterval +} + +func (n testNodeConfig) SelectionMode() string { + return n.selectionMode +} + +func (n testNodeConfig) SyncThreshold() uint32 { + return n.syncThreshold +} + +type testNode struct { + *node[types.ID, Head, NodeClient[types.ID, Head]] +} + +type testNodeOpts struct { + config testNodeConfig + noNewHeadsThreshold time.Duration + lggr logger.Logger + wsuri url.URL + httpuri *url.URL + name string + id int32 + chainID types.ID + nodeOrder int32 + rpc *mockNodeClient[types.ID, Head] + chainFamily string +} + +func newTestNode(t *testing.T, opts testNodeOpts) testNode { + if opts.lggr == nil { + opts.lggr = logger.Test(t) + } + + if opts.name == "" { + opts.name = "tes node" + } + + if opts.chainFamily == "" { + opts.chainFamily = "test node chain family" + } + + if opts.chainID == nil { + opts.chainID = types.RandomID() + } + + if opts.id == 0 { + opts.id = 42 + } + + nodeI := NewNode[types.ID, Head, NodeClient[types.ID, Head]](opts.config, opts.noNewHeadsThreshold, opts.lggr, + opts.wsuri, opts.httpuri, opts.name, opts.id, opts.chainID, opts.nodeOrder, opts.rpc, opts.chainFamily) + + return testNode{ + nodeI.(*node[types.ID, Head, NodeClient[types.ID, Head]]), + } +} diff --git a/common/client/send_only_node.go b/common/client/send_only_node.go new file mode 100644 index 00000000..94d6d9e8 --- /dev/null +++ b/common/client/send_only_node.go @@ -0,0 +1,186 @@ +package client + +import ( + "context" + "fmt" + "net/url" + "sync" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +//go:generate mockery --quiet --name sendOnlyClient --structname mockSendOnlyClient --filename "mock_send_only_client_test.go" --inpackage --case=underscore +type sendOnlyClient[ + CHAIN_ID types.ID, +] interface { + Close() + ChainID(context.Context) (CHAIN_ID, error) + DialHTTP() error +} + +// SendOnlyNode represents one node used as a sendonly +// +//go:generate mockery --quiet --name SendOnlyNode --structname mockSendOnlyNode --filename "mock_send_only_node_test.go" --inpackage --case=underscore +type SendOnlyNode[ + CHAIN_ID types.ID, + RPC sendOnlyClient[CHAIN_ID], +] interface { + // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors. + Start(context.Context) error + Close() error + + ConfiguredChainID() CHAIN_ID + RPC() RPC + + String() string + // State returns nodeState + State() nodeState + // Name is a unique identifier for this node. + Name() string +} + +// It only supports sending transactions +// It must use an http(s) url +type sendOnlyNode[ + CHAIN_ID types.ID, + RPC sendOnlyClient[CHAIN_ID], +] struct { + services.StateMachine + + stateMu sync.RWMutex // protects state* fields + state nodeState + + rpc RPC + uri url.URL + log logger.Logger + name string + chainID CHAIN_ID + chStop services.StopChan + wg sync.WaitGroup +} + +// NewSendOnlyNode returns a new sendonly node +func NewSendOnlyNode[ + CHAIN_ID types.ID, + RPC sendOnlyClient[CHAIN_ID], +]( + lggr logger.Logger, + httpuri url.URL, + name string, + chainID CHAIN_ID, + rpc RPC, +) SendOnlyNode[CHAIN_ID, RPC] { + s := new(sendOnlyNode[CHAIN_ID, RPC]) + s.name = name + s.log = logger.Named(logger.Named(lggr, "SendOnlyNode"), name) + s.log = logger.With(s.log, + "nodeTier", "sendonly", + ) + s.rpc = rpc + s.uri = httpuri + s.chainID = chainID + s.chStop = make(chan struct{}) + return s +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Start(ctx context.Context) error { + return s.StartOnce(s.name, func() error { + s.start(ctx) + return nil + }) +} + +// Start setups up and verifies the sendonly node +// Should only be called once in a node's lifecycle +func (s *sendOnlyNode[CHAIN_ID, RPC]) start(startCtx context.Context) { + if s.State() != nodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", s.state)) + } + + err := s.rpc.DialHTTP() + if err != nil { + promPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw("Dial failed: SendOnly Node is unusable", "err", err) + s.setState(nodeStateUnusable) + return + } + s.setState(nodeStateDialed) + + if s.chainID.String() == "0" { + // Skip verification if chainID is zero + s.log.Warn("sendonly rpc ChainID verification skipped") + } else { + chainID, err := s.rpc.ChainID(startCtx) + if err != nil || chainID.String() != s.chainID.String() { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + if err != nil { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + s.setState(nodeStateUnreachable) + } else { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + s.setState(nodeStateInvalidChainID) + } + // Since it has failed, spin up the verifyLoop that will keep + // retrying until success + s.wg.Add(1) + go s.verifyLoop() + return + } + } + + promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + s.setState(nodeStateAlive) + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Close() error { + return s.StopOnce(s.name, func() error { + s.rpc.Close() + close(s.chStop) + s.wg.Wait() + s.setState(nodeStateClosed) + return nil + }) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) ConfiguredChainID() CHAIN_ID { + return s.chainID +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) RPC() RPC { + return s.rpc +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) String() string { + return fmt.Sprintf("(%s)%s:%s", Secondary.String(), s.name, s.uri.Redacted()) +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) setState(state nodeState) (changed bool) { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state == state { + return false + } + s.state = state + return true +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) State() nodeState { + s.stateMu.RLock() + defer s.stateMu.RUnlock() + return s.state +} + +func (s *sendOnlyNode[CHAIN_ID, RPC]) Name() string { + return s.name +} diff --git a/common/client/send_only_node_lifecycle.go b/common/client/send_only_node_lifecycle.go new file mode 100644 index 00000000..0f4ec311 --- /dev/null +++ b/common/client/send_only_node_lifecycle.go @@ -0,0 +1,67 @@ +package client + +import ( + "fmt" + "time" + + "github.com/goplugin/pluginv3.0/v2/common/internal/utils" +) + +// verifyLoop may only be triggered once, on Start, if initial chain ID check +// fails. +// +// It will continue checking until success and then exit permanently. +func (s *sendOnlyNode[CHAIN_ID, RPC]) verifyLoop() { + defer s.wg.Done() + ctx, cancel := s.chStop.NewCtx() + defer cancel() + + backoff := utils.NewRedialBackoff() + for { + select { + case <-ctx.Done(): + return + case <-time.After(backoff.Duration()): + } + chainID, err := s.rpc.ChainID(ctx) + if err != nil { + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateUnreachable); changed { + promPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + continue + } else if chainID.String() != s.chainID.String() { + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateInvalidChainID); changed { + promPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + + continue + } + ok := s.IfStarted(func() { + if changed := s.setState(nodeStateAlive); changed { + promPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) + return + } +} diff --git a/common/client/send_only_node_test.go b/common/client/send_only_node_test.go new file mode 100644 index 00000000..afdb8193 --- /dev/null +++ b/common/client/send_only_node_test.go @@ -0,0 +1,139 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils/tests" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +func TestNewSendOnlyNode(t *testing.T) { + t.Parallel() + + urlFormat := "http://user:%s@testurl.com" + password := "pass" + u, err := url.Parse(fmt.Sprintf(urlFormat, password)) + require.NoError(t, err) + redacted := fmt.Sprintf(urlFormat, "xxxxx") + lggr := logger.Test(t) + name := "TestNewSendOnlyNode" + chainID := types.RandomID() + client := newMockSendOnlyClient[types.ID](t) + + node := NewSendOnlyNode(lggr, *u, name, chainID, client) + assert.NotNil(t, node) + + // Must contain name & url with redacted password + assert.Contains(t, node.String(), fmt.Sprintf("%s:%s", name, redacted)) + assert.Equal(t, node.ConfiguredChainID(), chainID) +} + +func TestStartSendOnlyNode(t *testing.T) { + t.Parallel() + t.Run("becomes unusable if initial dial fails", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + expectedError := errors.New("some http error") + client.On("DialHTTP").Return(expectedError).Once() + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.RandomID(), client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateUnusable, s.State()) + tests.RequireLogMessage(t, observedLogs, "Dial failed: SendOnly Node is unusable") + }) + t.Run("Default ChainID(0) produces warn and skips checks", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("DialHTTP").Return(nil).Once() + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), types.NewIDFromInt(0), client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateAlive, s.State()) + tests.RequireLogMessage(t, observedLogs, "sendonly rpc ChainID verification skipped") + }) + t.Run("Can recover from chainID verification failure", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("DialHTTP").Return(nil) + expectedError := errors.New("failed to get chain ID") + chainID := types.RandomID() + const failuresCount = 2 + client.On("ChainID", mock.Anything).Return(types.RandomID(), expectedError).Times(failuresCount) + client.On("ChainID", mock.Anything).Return(chainID, nil) + + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), chainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateUnreachable, s.State()) + tests.AssertLogCountEventually(t, observedLogs, fmt.Sprintf("Verify failed: %v", expectedError), failuresCount) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + }) + t.Run("Can recover from chainID mismatch", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("DialHTTP").Return(nil).Once() + configuredChainID := types.NewIDFromInt(11) + rpcChainID := types.NewIDFromInt(20) + const failuresCount = 2 + client.On("ChainID", mock.Anything).Return(rpcChainID, nil).Times(failuresCount) + client.On("ChainID", mock.Anything).Return(configuredChainID, nil) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + require.NoError(t, err) + + assert.Equal(t, nodeStateInvalidChainID, s.State()) + tests.AssertLogCountEventually(t, observedLogs, "sendonly rpc ChainID doesn't match local chain ID", failuresCount) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + }) + t.Run("Start with Random ChainID", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + client := newMockSendOnlyClient[types.ID](t) + client.On("Close").Once() + client.On("DialHTTP").Return(nil).Once() + configuredChainID := types.RandomID() + client.On("ChainID", mock.Anything).Return(configuredChainID, nil) + s := NewSendOnlyNode(lggr, url.URL{}, t.Name(), configuredChainID, client) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(tests.Context(t)) + assert.NoError(t, err) + tests.AssertEventually(t, func() bool { + return s.State() == nodeStateAlive + }) + assert.Equal(t, 0, observedLogs.Len()) // No warnings expected + }) +} diff --git a/common/client/types.go b/common/client/types.go new file mode 100644 index 00000000..bf47d8b5 --- /dev/null +++ b/common/client/types.go @@ -0,0 +1,142 @@ +package client + +import ( + "context" + "math/big" + + "github.com/goplugin/plugin-common/pkg/assets" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// RPC includes all the necessary methods for a multi-node client to interact directly with any RPC endpoint. +// +//go:generate mockery --quiet --name RPC --structname mockRPC --inpackage --filename "mock_rpc_test.go" --case=underscore +type RPC[ + CHAIN_ID types.ID, + SEQ types.Sequence, + ADDR types.Hashable, + BLOCK_HASH types.Hashable, + TX any, + TX_HASH types.Hashable, + EVENT any, + EVENT_OPS any, + TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], + FEE feetypes.Fee, + HEAD types.Head[BLOCK_HASH], + +] interface { + NodeClient[ + CHAIN_ID, + HEAD, + ] + clientAPI[ + CHAIN_ID, + SEQ, + ADDR, + BLOCK_HASH, + TX, + TX_HASH, + EVENT, + EVENT_OPS, + TX_RECEIPT, + FEE, + HEAD, + ] +} + +// Head is the interface required by the NodeClient +// +//go:generate mockery --quiet --name Head --structname mockHead --filename "mock_head_test.go" --inpackage --case=underscore +type Head interface { + BlockNumber() int64 + BlockDifficulty() *big.Int +} + +// NodeClient includes all the necessary RPC methods required by a node. +// +//go:generate mockery --quiet --name NodeClient --structname mockNodeClient --filename "mock_node_client_test.go" --inpackage --case=underscore +type NodeClient[ + CHAIN_ID types.ID, + HEAD Head, +] interface { + connection[CHAIN_ID, HEAD] + + DialHTTP() error + DisconnectAll() + Close() + ClientVersion(context.Context) (string, error) + SubscribersCount() int32 + SetAliveLoopSub(types.Subscription) + UnsubscribeAllExceptAliveLoop() +} + +// clientAPI includes all the direct RPC methods required by the generalized common client to implement its own. +type clientAPI[ + CHAIN_ID types.ID, + SEQ types.Sequence, + ADDR types.Hashable, + BLOCK_HASH types.Hashable, + TX any, + TX_HASH types.Hashable, + EVENT any, + EVENT_OPS any, // event filter query options + TX_RECEIPT types.Receipt[TX_HASH, BLOCK_HASH], + FEE feetypes.Fee, + HEAD types.Head[BLOCK_HASH], +] interface { + connection[CHAIN_ID, HEAD] + + // Account + BalanceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (*big.Int, error) + TokenBalance(ctx context.Context, accountAddress ADDR, tokenAddress ADDR) (*big.Int, error) + SequenceAt(ctx context.Context, accountAddress ADDR, blockNumber *big.Int) (SEQ, error) + PLIBalance(ctx context.Context, accountAddress ADDR, linkAddress ADDR) (*assets.Link, error) + PendingSequenceAt(ctx context.Context, addr ADDR) (SEQ, error) + EstimateGas(ctx context.Context, call any) (gas uint64, err error) + + // Transactions + SendTransaction(ctx context.Context, tx TX) error + SimulateTransaction(ctx context.Context, tx TX) error + TransactionByHash(ctx context.Context, txHash TX_HASH) (TX, error) + TransactionReceipt(ctx context.Context, txHash TX_HASH) (TX_RECEIPT, error) + SendEmptyTransaction( + ctx context.Context, + newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt any, err error), + seq SEQ, + gasLimit uint32, + fee FEE, + fromAddress ADDR, + ) (txhash string, err error) + + // Blocks + BlockByNumber(ctx context.Context, number *big.Int) (HEAD, error) + BlockByHash(ctx context.Context, hash BLOCK_HASH) (HEAD, error) + LatestBlockHeight(context.Context) (*big.Int, error) + + // Events + FilterEvents(ctx context.Context, query EVENT_OPS) ([]EVENT, error) + + // Misc + BatchCallContext(ctx context.Context, b []any) error + CallContract( + ctx context.Context, + msg interface{}, + blockNumber *big.Int, + ) (rpcErr []byte, extractErr error) + PendingCallContract( + ctx context.Context, + msg interface{}, + ) (rpcErr []byte, extractErr error) + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error + CodeAt(ctx context.Context, account ADDR, blockNumber *big.Int) ([]byte, error) +} + +type connection[ + CHAIN_ID types.ID, + HEAD Head, +] interface { + ChainID(ctx context.Context) (CHAIN_ID, error) + Dial(ctx context.Context) error + Subscribe(ctx context.Context, channel chan<- HEAD, args ...interface{}) (types.Subscription, error) +} diff --git a/common/config/chaintype.go b/common/config/chaintype.go new file mode 100644 index 00000000..9ef4864b --- /dev/null +++ b/common/config/chaintype.go @@ -0,0 +1,50 @@ +package config + +import ( + "fmt" + "strings" +) + +// ChainType denotes the chain or network to work with +type ChainType string + +// nolint +const ( + ChainArbitrum ChainType = "arbitrum" + ChainMetis ChainType = "metis" + ChainOptimismBedrock ChainType = "optimismBedrock" + ChainXDai ChainType = "xdai" + ChainCelo ChainType = "celo" + ChainWeMix ChainType = "wemix" + ChainKroma ChainType = "kroma" + ChainZkSync ChainType = "zksync" + ChainScroll ChainType = "scroll" +) + +var ErrInvalidChainType = fmt.Errorf("must be one of %s or omitted", strings.Join([]string{ + string(ChainArbitrum), string(ChainMetis), string(ChainXDai), string(ChainOptimismBedrock), string(ChainCelo), + string(ChainKroma), string(ChainWeMix), string(ChainZkSync), string(ChainScroll)}, ", ")) + +// IsValid returns true if the ChainType value is known or empty. +func (c ChainType) IsValid() bool { + switch c { + case "", ChainArbitrum, ChainMetis, ChainOptimismBedrock, ChainXDai, ChainCelo, ChainKroma, ChainWeMix, ChainZkSync, ChainScroll: + return true + } + return false +} + +// IsL2 returns true if this chain is a Layer 2 chain. Notably: +// - the block numbers used for log searching are different from calling block.number +// - gas bumping is not supported, since there is no tx mempool +func (c ChainType) IsL2() bool { + switch c { + case ChainArbitrum, ChainMetis: + return true + + case ChainXDai: + fallthrough + default: + return false + } +} diff --git a/common/fee/models.go b/common/fee/models.go new file mode 100644 index 00000000..87cabee6 --- /dev/null +++ b/common/fee/models.go @@ -0,0 +1,85 @@ +package fee + +import ( + "errors" + "fmt" + "math/big" + + "github.com/goplugin/plugin-common/pkg/logger" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + "github.com/goplugin/pluginv3.0/v2/common/chains/label" +) + +var ( + ErrBumpFeeExceedsLimit = errors.New("fee bump exceeds limit") + ErrBump = errors.New("fee bump failed") + ErrConnectivity = errors.New("transaction propagation issue: transactions are not being mined") +) + +func IsBumpErr(err error) bool { + return err != nil && (errors.Is(err, ErrBumpFeeExceedsLimit) || errors.Is(err, ErrBump) || errors.Is(err, ErrConnectivity)) +} + +// CalculateFee computes the fee price for a transaction. +// The fee price is the minimum of: +// - max fee price specified, default fee price and max fee price for the node. +func CalculateFee( + maxFeePrice, defaultPrice, maxFeePriceConfigured *big.Int, +) *big.Int { + maxFeePriceAllowed := bigmath.Min(maxFeePrice, maxFeePriceConfigured) + return bigmath.Min(defaultPrice, maxFeePriceAllowed) +} + +// CalculateBumpedFee computes the next fee price to attempt as the largest of: +// - A configured percentage bump (bumpPercent) on top of the baseline price. +// - A configured fixed amount of Unit (bumpMin) on top of the baseline price. +// The baseline price is the maximum of the previous fee price attempt and the node's current fee price. +func CalculateBumpedFee( + lggr logger.SugaredLogger, + currentfeePrice, originalfeePrice, maxFeePriceInput, maxBumpPrice, bumpMin *big.Int, + bumpPercent uint16, + toChainUnit feeUnitToChainUnit, +) (*big.Int, error) { + maxFeePrice := bigmath.Min(maxFeePriceInput, maxBumpPrice) + bumpedFeePrice := MaxBumpedFee(originalfeePrice, bumpPercent, bumpMin) + + // Update bumpedFeePrice if currentfeePrice is higher than bumpedFeePrice and within maxFeePrice + bumpedFeePrice = maxFee(lggr, currentfeePrice, bumpedFeePrice, maxFeePrice, "fee price", toChainUnit) + + if bumpedFeePrice.Cmp(maxFeePrice) > 0 { + return maxFeePrice, fmt.Errorf("bumped fee price of %s would exceed configured max fee price of %s (original price was %s). %s: %w", + toChainUnit(bumpedFeePrice), toChainUnit(maxFeePrice), toChainUnit(originalfeePrice), label.NodeConnectivityProblemWarning, ErrBumpFeeExceedsLimit) + } else if bumpedFeePrice.Cmp(originalfeePrice) == 0 { + // NOTE: This really shouldn't happen since we enforce minimums for + // FeeEstimator.BumpPercent and FeeEstimator.BumpMin in the config validation, + // but it's here anyway for a "belts and braces" approach + return bumpedFeePrice, fmt.Errorf("bumped fee price of %s is equal to original fee price of %s."+ + " ACTION REQUIRED: This is a configuration error, you must increase either "+ + "FeeEstimator.BumpPercent or FeeEstimator.BumpMin: %w", toChainUnit(bumpedFeePrice), toChainUnit(bumpedFeePrice), ErrBump) + } + return bumpedFeePrice, nil +} + +// Returns highest bumped fee price of originalFeePrice bumped by fixed units or percentage. +func MaxBumpedFee(originalFeePrice *big.Int, feeBumpPercent uint16, feeBumpUnits *big.Int) *big.Int { + return bigmath.Max( + addPercentage(originalFeePrice, feeBumpPercent), + new(big.Int).Add(originalFeePrice, feeBumpUnits), + ) +} + +// Returns the max of currentFeePrice, bumpedFeePrice, and maxFeePrice +func maxFee(lggr logger.SugaredLogger, currentFeePrice, bumpedFeePrice, maxFeePrice *big.Int, feeType string, toChainUnit feeUnitToChainUnit) *big.Int { + if currentFeePrice == nil { + return bumpedFeePrice + } + if currentFeePrice.Cmp(maxFeePrice) > 0 { + // Shouldn't happen because the estimator should not be allowed to + // estimate a higher fee than the maximum allowed + lggr.AssumptionViolationf("Ignoring current %s of %s that would exceed max %s of %s", feeType, toChainUnit(currentFeePrice), feeType, toChainUnit(maxFeePrice)) + } else if bumpedFeePrice.Cmp(currentFeePrice) < 0 { + // If the current fee price is higher than the old price bumped, use that instead + return currentFeePrice + } + return bumpedFeePrice +} diff --git a/common/fee/types/fee.go b/common/fee/types/fee.go new file mode 100644 index 00000000..2c03db59 --- /dev/null +++ b/common/fee/types/fee.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +// Opt is an option for a gas estimator +type Opt int + +const ( + // OptForceRefetch forces the estimator to bust a cache if necessary + OptForceRefetch Opt = iota +) + +type Fee fmt.Stringer diff --git a/common/fee/utils.go b/common/fee/utils.go new file mode 100644 index 00000000..eeb2c966 --- /dev/null +++ b/common/fee/utils.go @@ -0,0 +1,29 @@ +package fee + +import ( + "fmt" + "math" + "math/big" + + "github.com/shopspring/decimal" +) + +func ApplyMultiplier(feeLimit uint32, multiplier float32) (uint32, error) { + result := decimal.NewFromBigInt(big.NewInt(0).SetUint64(uint64(feeLimit)), 0).Mul(decimal.NewFromFloat32(multiplier)).IntPart() + + if result > math.MaxUint32 { + return 0, fmt.Errorf("integer overflow when applying multiplier of %f to fee limit of %d", multiplier, feeLimit) + } + return uint32(result), nil +} + +// Returns the input value increased by the given percentage. +func addPercentage(value *big.Int, percentage uint16) *big.Int { + bumped := new(big.Int) + bumped.Mul(value, big.NewInt(int64(100+percentage))) + bumped.Div(bumped, big.NewInt(100)) + return bumped +} + +// Returns the fee in its chain specific unit. +type feeUnitToChainUnit func(fee *big.Int) string diff --git a/common/headtracker/head_broadcaster.go b/common/headtracker/head_broadcaster.go new file mode 100644 index 00000000..ba5bf5d8 --- /dev/null +++ b/common/headtracker/head_broadcaster.go @@ -0,0 +1,162 @@ +package headtracker + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const TrackableCallbackTimeout = 2 * time.Second + +type callbackSet[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] map[int]types.HeadTrackable[H, BLOCK_HASH] + +func (set callbackSet[H, BLOCK_HASH]) values() []types.HeadTrackable[H, BLOCK_HASH] { + var values []types.HeadTrackable[H, BLOCK_HASH] + for _, callback := range set { + values = append(values, callback) + } + return values +} + +type HeadBroadcaster[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct { + services.StateMachine + logger logger.Logger + callbacks callbackSet[H, BLOCK_HASH] + mailbox *mailbox.Mailbox[H] + mutex sync.Mutex + chClose services.StopChan + wgDone sync.WaitGroup + latest H + lastCallbackID int +} + +// NewHeadBroadcaster creates a new HeadBroadcaster +func NewHeadBroadcaster[ + H types.Head[BLOCK_HASH], + BLOCK_HASH types.Hashable, +]( + lggr logger.Logger, +) *HeadBroadcaster[H, BLOCK_HASH] { + return &HeadBroadcaster[H, BLOCK_HASH]{ + logger: logger.Named(lggr, "HeadBroadcaster"), + callbacks: make(callbackSet[H, BLOCK_HASH]), + mailbox: mailbox.NewSingle[H](), + chClose: make(chan struct{}), + } +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) Start(context.Context) error { + return hb.StartOnce("HeadBroadcaster", func() error { + hb.wgDone.Add(1) + go hb.run() + return nil + }) +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) Close() error { + return hb.StopOnce("HeadBroadcaster", func() error { + hb.mutex.Lock() + // clear all callbacks + hb.callbacks = make(callbackSet[H, BLOCK_HASH]) + hb.mutex.Unlock() + + close(hb.chClose) + hb.wgDone.Wait() + return nil + }) +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) Name() string { + return hb.logger.Name() +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) HealthReport() map[string]error { + return map[string]error{hb.Name(): hb.Healthy()} +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) BroadcastNewLongestChain(head H) { + hb.mailbox.Deliver(head) +} + +// Subscribe subscribes to OnNewLongestChain and Connect until HeadBroadcaster is closed, +// or unsubscribe callback is called explicitly +func (hb *HeadBroadcaster[H, BLOCK_HASH]) Subscribe(callback types.HeadTrackable[H, BLOCK_HASH]) (currentLongestChain H, unsubscribe func()) { + hb.mutex.Lock() + defer hb.mutex.Unlock() + + currentLongestChain = hb.latest + + hb.lastCallbackID++ + callbackID := hb.lastCallbackID + hb.callbacks[callbackID] = callback + unsubscribe = func() { + hb.mutex.Lock() + defer hb.mutex.Unlock() + delete(hb.callbacks, callbackID) + } + + return +} + +func (hb *HeadBroadcaster[H, BLOCK_HASH]) run() { + defer hb.wgDone.Done() + + for { + select { + case <-hb.chClose: + return + case <-hb.mailbox.Notify(): + hb.executeCallbacks() + } + } +} + +// DEV: the head relayer makes no promises about head delivery! Subscribing +// Jobs should expect to the relayer to skip heads if there is a large number of listeners +// and all callbacks cannot be completed in the allotted time. +func (hb *HeadBroadcaster[H, BLOCK_HASH]) executeCallbacks() { + head, exists := hb.mailbox.Retrieve() + if !exists { + hb.logger.Info("No head to retrieve. It might have been skipped") + return + } + + hb.mutex.Lock() + callbacks := hb.callbacks.values() + hb.latest = head + hb.mutex.Unlock() + + hb.logger.Debugw("Initiating callbacks", + "headNum", head.BlockNumber(), + "numCallbacks", len(callbacks), + ) + + wg := sync.WaitGroup{} + wg.Add(len(callbacks)) + + ctx, cancel := hb.chClose.NewCtx() + defer cancel() + + for _, callback := range callbacks { + go func(trackable types.HeadTrackable[H, BLOCK_HASH]) { + defer wg.Done() + start := time.Now() + cctx, cancel := context.WithTimeout(ctx, TrackableCallbackTimeout) + defer cancel() + trackable.OnNewLongestChain(cctx, head) + elapsed := time.Since(start) + hb.logger.Debugw(fmt.Sprintf("Finished callback in %s", elapsed), + "callbackType", reflect.TypeOf(trackable), "blockNumber", head.BlockNumber(), "time", elapsed) + }(callback) + } + + wg.Wait() +} diff --git a/common/headtracker/head_listener.go b/common/headtracker/head_listener.go new file mode 100644 index 00000000..3de02320 --- /dev/null +++ b/common/headtracker/head_listener.go @@ -0,0 +1,220 @@ +package headtracker + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + htrktypes "github.com/goplugin/pluginv3.0/v2/common/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/common/internal/utils" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +var ( + promNumHeadsReceived = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "head_tracker_heads_received", + Help: "The total number of heads seen", + }, []string{"ChainID"}) + promEthConnectionErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "head_tracker_connection_errors", + Help: "The total number of node connection errors", + }, []string{"ChainID"}) +) + +type HeadListener[ + HTH htrktypes.Head[BLOCK_HASH, ID], + S types.Subscription, + ID types.ID, + BLOCK_HASH types.Hashable, +] struct { + config htrktypes.Config + client htrktypes.Client[HTH, S, ID, BLOCK_HASH] + logger logger.Logger + chStop services.StopChan + chHeaders chan HTH + headSubscription types.Subscription + connected atomic.Bool + receivingHeads atomic.Bool +} + +func NewHeadListener[ + HTH htrktypes.Head[BLOCK_HASH, ID], + S types.Subscription, + ID types.ID, + BLOCK_HASH types.Hashable, + CLIENT htrktypes.Client[HTH, S, ID, BLOCK_HASH], +]( + lggr logger.Logger, + client CLIENT, + config htrktypes.Config, + chStop chan struct{}, +) *HeadListener[HTH, S, ID, BLOCK_HASH] { + return &HeadListener[HTH, S, ID, BLOCK_HASH]{ + config: config, + client: client, + logger: logger.Named(lggr, "HeadListener"), + chStop: chStop, + } +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) Name() string { + return hl.logger.Name() +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) ListenForNewHeads(handleNewHead types.NewHeadHandler[HTH, BLOCK_HASH], done func()) { + defer done() + defer hl.unsubscribe() + + ctx, cancel := hl.chStop.NewCtx() + defer cancel() + + for { + if !hl.subscribe(ctx) { + break + } + err := hl.receiveHeaders(ctx, handleNewHead) + if ctx.Err() != nil { + break + } else if err != nil { + hl.logger.Errorw("Error in new head subscription, unsubscribed", "err", err) + continue + } + break + } +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) ReceivingHeads() bool { + return hl.receivingHeads.Load() +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) Connected() bool { + return hl.connected.Load() +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) HealthReport() map[string]error { + var err error + if !hl.ReceivingHeads() { + err = errors.New("Listener is not receiving heads") + } + if !hl.Connected() { + err = errors.New("Listener is not connected") + } + return map[string]error{hl.Name(): err} +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) receiveHeaders(ctx context.Context, handleNewHead types.NewHeadHandler[HTH, BLOCK_HASH]) error { + var noHeadsAlarmC <-chan time.Time + var noHeadsAlarmT *time.Ticker + noHeadsAlarmDuration := hl.config.BlockEmissionIdleWarningThreshold() + if noHeadsAlarmDuration > 0 { + noHeadsAlarmT = time.NewTicker(noHeadsAlarmDuration) + noHeadsAlarmC = noHeadsAlarmT.C + } + + for { + select { + case <-hl.chStop: + return nil + + case blockHeader, open := <-hl.chHeaders: + chainId := hl.client.ConfiguredChainID() + if noHeadsAlarmT != nil { + // We've received a head, reset the no heads alarm + noHeadsAlarmT.Stop() + noHeadsAlarmT = time.NewTicker(noHeadsAlarmDuration) + noHeadsAlarmC = noHeadsAlarmT.C + } + hl.receivingHeads.Store(true) + if !open { + return errors.New("head listener: chHeaders prematurely closed") + } + if !blockHeader.IsValid() { + hl.logger.Error("got nil block header") + continue + } + + // Compare the chain ID of the block header to the chain ID of the client + if !blockHeader.HasChainID() || blockHeader.ChainID().String() != chainId.String() { + hl.logger.Panicf("head listener for %s received block header for %s", chainId, blockHeader.ChainID()) + } + promNumHeadsReceived.WithLabelValues(chainId.String()).Inc() + + err := handleNewHead(ctx, blockHeader) + if ctx.Err() != nil { + return nil + } else if err != nil { + return err + } + + case err, open := <-hl.headSubscription.Err(): + // err can be nil, because of using chainIDSubForwarder + if !open || err == nil { + return errors.New("head listener: subscription Err channel prematurely closed") + } + return err + + case <-noHeadsAlarmC: + // We haven't received a head on the channel for a long time, log a warning + hl.logger.Warnf("have not received a head for %v", noHeadsAlarmDuration) + hl.receivingHeads.Store(false) + } + } +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) subscribe(ctx context.Context) bool { + subscribeRetryBackoff := utils.NewRedialBackoff() + + chainId := hl.client.ConfiguredChainID() + + for { + hl.unsubscribe() + + hl.logger.Debugf("Subscribing to new heads on chain %s", chainId.String()) + + select { + case <-hl.chStop: + return false + + case <-time.After(subscribeRetryBackoff.Duration()): + err := hl.subscribeToHead(ctx) + if err != nil { + promEthConnectionErrors.WithLabelValues(chainId.String()).Inc() + hl.logger.Warnw("Failed to subscribe to heads on chain", "chainID", chainId.String(), "err", err) + } else { + hl.logger.Debugf("Subscribed to heads on chain %s", chainId.String()) + return true + } + } + } +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) subscribeToHead(ctx context.Context) error { + hl.chHeaders = make(chan HTH) + + var err error + hl.headSubscription, err = hl.client.SubscribeNewHead(ctx, hl.chHeaders) + if err != nil { + close(hl.chHeaders) + return fmt.Errorf("Client#SubscribeNewHead: %w", err) + } + + hl.connected.Store(true) + + return nil +} + +func (hl *HeadListener[HTH, S, ID, BLOCK_HASH]) unsubscribe() { + if hl.headSubscription != nil { + hl.connected.Store(false) + hl.headSubscription.Unsubscribe() + hl.headSubscription = nil + } +} diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go new file mode 100644 index 00000000..de08a63f --- /dev/null +++ b/common/headtracker/head_tracker.go @@ -0,0 +1,361 @@ +package headtracker + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + htrktypes "github.com/goplugin/pluginv3.0/v2/common/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +var ( + promCurrentHead = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "head_tracker_current_head", + Help: "The highest seen head number", + }, []string{"evmChainID"}) + + promOldHead = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "head_tracker_very_old_head", + Help: "Counter is incremented every time we get a head that is much lower than the highest seen head ('much lower' is defined as a block that is EVM.FinalityDepth or greater below the highest seen head)", + }, []string{"evmChainID"}) +) + +// HeadsBufferSize - The buffer is used when heads sampling is disabled, to ensure the callback is run for every head +const HeadsBufferSize = 10 + +type HeadTracker[ + HTH htrktypes.Head[BLOCK_HASH, ID], + S types.Subscription, + ID types.ID, + BLOCK_HASH types.Hashable, +] struct { + services.StateMachine + log logger.SugaredLogger + headBroadcaster types.HeadBroadcaster[HTH, BLOCK_HASH] + headSaver types.HeadSaver[HTH, BLOCK_HASH] + mailMon *mailbox.Monitor + client htrktypes.Client[HTH, S, ID, BLOCK_HASH] + chainID ID + config htrktypes.Config + htConfig htrktypes.HeadTrackerConfig + + backfillMB *mailbox.Mailbox[HTH] + broadcastMB *mailbox.Mailbox[HTH] + headListener types.HeadListener[HTH, BLOCK_HASH] + chStop services.StopChan + wgDone sync.WaitGroup + getNilHead func() HTH +} + +// NewHeadTracker instantiates a new HeadTracker using HeadSaver to persist new block numbers. +func NewHeadTracker[ + HTH htrktypes.Head[BLOCK_HASH, ID], + S types.Subscription, + ID types.ID, + BLOCK_HASH types.Hashable, +]( + lggr logger.Logger, + client htrktypes.Client[HTH, S, ID, BLOCK_HASH], + config htrktypes.Config, + htConfig htrktypes.HeadTrackerConfig, + headBroadcaster types.HeadBroadcaster[HTH, BLOCK_HASH], + headSaver types.HeadSaver[HTH, BLOCK_HASH], + mailMon *mailbox.Monitor, + getNilHead func() HTH, +) types.HeadTracker[HTH, BLOCK_HASH] { + chStop := make(chan struct{}) + lggr = logger.Named(lggr, "HeadTracker") + return &HeadTracker[HTH, S, ID, BLOCK_HASH]{ + headBroadcaster: headBroadcaster, + client: client, + chainID: client.ConfiguredChainID(), + config: config, + htConfig: htConfig, + log: logger.Sugared(lggr), + backfillMB: mailbox.NewSingle[HTH](), + broadcastMB: mailbox.New[HTH](HeadsBufferSize), + chStop: chStop, + headListener: NewHeadListener[HTH, S, ID, BLOCK_HASH](lggr, client, config, chStop), + headSaver: headSaver, + mailMon: mailMon, + getNilHead: getNilHead, + } +} + +// Start starts HeadTracker service. +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) Start(ctx context.Context) error { + return ht.StartOnce("HeadTracker", func() error { + ht.log.Debugw("Starting HeadTracker", "chainID", ht.chainID) + latestChain, err := ht.headSaver.Load(ctx) + if err != nil { + return err + } + if latestChain.IsValid() { + ht.log.Debugw( + fmt.Sprintf("HeadTracker: Tracking logs from last block %v with hash %s", latestChain.BlockNumber(), latestChain.BlockHash()), + "blockNumber", latestChain.BlockNumber(), + "blockHash", latestChain.BlockHash(), + ) + } + + // NOTE: Always try to start the head tracker off with whatever the + // latest head is, without waiting for the subscription to send us one. + // + // In some cases the subscription will send us the most recent head + // anyway when we connect (but we should not rely on this because it is + // not specced). If it happens this is fine, and the head will be + // ignored as a duplicate. + initialHead, err := ht.getInitialHead(ctx) + if err != nil { + if errors.Is(err, ctx.Err()) { + return nil + } + ht.log.Errorw("Error getting initial head", "err", err) + } else if initialHead.IsValid() { + if err := ht.handleNewHead(ctx, initialHead); err != nil { + return fmt.Errorf("error handling initial head: %w", err) + } + } else { + ht.log.Debug("Got nil initial head") + } + + ht.wgDone.Add(3) + go ht.headListener.ListenForNewHeads(ht.handleNewHead, ht.wgDone.Done) + go ht.backfillLoop() + go ht.broadcastLoop() + + ht.mailMon.Monitor(ht.broadcastMB, "HeadTracker", "Broadcast", ht.chainID.String()) + + return nil + }) +} + +// Close stops HeadTracker service. +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) Close() error { + return ht.StopOnce("HeadTracker", func() error { + close(ht.chStop) + ht.wgDone.Wait() + return ht.broadcastMB.Close() + }) +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) Name() string { + return ht.log.Name() +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) HealthReport() map[string]error { + report := map[string]error{ht.Name(): ht.Healthy()} + services.CopyHealth(report, ht.headListener.HealthReport()) + return report +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain HTH, depth uint) (err error) { + if uint(headWithChain.ChainLength()) >= depth { + return nil + } + + baseHeight := headWithChain.BlockNumber() - int64(depth-1) + if baseHeight < 0 { + baseHeight = 0 + } + + return ht.backfill(ctx, headWithChain.EarliestHeadInChain(), baseHeight) +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) LatestChain() HTH { + return ht.headSaver.LatestChain() +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) getInitialHead(ctx context.Context) (HTH, error) { + head, err := ht.client.HeadByNumber(ctx, nil) + if err != nil { + return ht.getNilHead(), fmt.Errorf("failed to fetch initial head: %w", err) + } + loggerFields := []interface{}{"head", head} + if head.IsValid() { + loggerFields = append(loggerFields, "blockNumber", head.BlockNumber(), "blockHash", head.BlockHash()) + } + ht.log.Debugw("Got initial head", loggerFields...) + return head, nil +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context, head HTH) error { + prevHead := ht.headSaver.LatestChain() + + ht.log.Debugw(fmt.Sprintf("Received new head %v", head.BlockNumber()), + "blockHash", head.BlockHash(), + "parentHeadHash", head.GetParentHash(), + "blockTs", head.GetTimestamp(), + "blockTsUnix", head.GetTimestamp().Unix(), + "blockDifficulty", head.BlockDifficulty(), + ) + + err := ht.headSaver.Save(ctx, head) + if ctx.Err() != nil { + return nil + } else if err != nil { + return fmt.Errorf("failed to save head: %#v: %w", head, err) + } + + if !prevHead.IsValid() || head.BlockNumber() > prevHead.BlockNumber() { + promCurrentHead.WithLabelValues(ht.chainID.String()).Set(float64(head.BlockNumber())) + + headWithChain := ht.headSaver.Chain(head.BlockHash()) + if !headWithChain.IsValid() { + return fmt.Errorf("HeadTracker#handleNewHighestHead headWithChain was unexpectedly nil") + } + ht.backfillMB.Deliver(headWithChain) + ht.broadcastMB.Deliver(headWithChain) + } else if head.BlockNumber() == prevHead.BlockNumber() { + if head.BlockHash() != prevHead.BlockHash() { + ht.log.Debugw("Got duplicate head", "blockNum", head.BlockNumber(), "head", head.BlockHash(), "prevHead", prevHead.BlockHash()) + } else { + ht.log.Debugw("Head already in the database", "head", head.BlockHash()) + } + } else { + ht.log.Debugw("Got out of order head", "blockNum", head.BlockNumber(), "head", head.BlockHash(), "prevHead", prevHead.BlockNumber()) + prevUnFinalizedHead := prevHead.BlockNumber() - int64(ht.config.FinalityDepth()) + if head.BlockNumber() < prevUnFinalizedHead { + promOldHead.WithLabelValues(ht.chainID.String()).Inc() + ht.log.Criticalf("Got very old block with number %d (highest seen was %d). This is a problem and either means a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", head.BlockNumber(), prevHead.BlockNumber()) + ht.SvcErrBuffer.Append(errors.New("got very old block")) + } + } + return nil +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) broadcastLoop() { + defer ht.wgDone.Done() + + samplingInterval := ht.htConfig.SamplingInterval() + if samplingInterval > 0 { + ht.log.Debugf("Head sampling is enabled - sampling interval is set to: %v", samplingInterval) + debounceHead := time.NewTicker(samplingInterval) + defer debounceHead.Stop() + for { + select { + case <-ht.chStop: + return + case <-debounceHead.C: + item := ht.broadcastMB.RetrieveLatestAndClear() + if !item.IsValid() { + continue + } + ht.headBroadcaster.BroadcastNewLongestChain(item) + } + } + } else { + ht.log.Info("Head sampling is disabled - callback will be called on every head") + for { + select { + case <-ht.chStop: + return + case <-ht.broadcastMB.Notify(): + for { + item, exists := ht.broadcastMB.Retrieve() + if !exists { + break + } + ht.headBroadcaster.BroadcastNewLongestChain(item) + } + } + } + } +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) backfillLoop() { + defer ht.wgDone.Done() + + ctx, cancel := ht.chStop.NewCtx() + defer cancel() + + for { + select { + case <-ht.chStop: + return + case <-ht.backfillMB.Notify(): + for { + head, exists := ht.backfillMB.Retrieve() + if !exists { + break + } + { + err := ht.Backfill(ctx, head, uint(ht.config.FinalityDepth())) + if err != nil { + ht.log.Warnw("Unexpected error while backfilling heads", "err", err) + } else if ctx.Err() != nil { + break + } + } + } + } + } +} + +// backfill fetches all missing heads up until the base height +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) backfill(ctx context.Context, head types.Head[BLOCK_HASH], baseHeight int64) (err error) { + headBlockNumber := head.BlockNumber() + if headBlockNumber <= baseHeight { + return nil + } + mark := time.Now() + fetched := 0 + l := ht.log.With("blockNumber", headBlockNumber, + "n", headBlockNumber-baseHeight, + "fromBlockHeight", baseHeight, + "toBlockHeight", headBlockNumber-1) + l.Debug("Starting backfill") + defer func() { + if ctx.Err() != nil { + l.Warnw("Backfill context error", "err", ctx.Err()) + return + } + l.Debugw("Finished backfill", + "fetched", fetched, + "time", time.Since(mark), + "err", err) + }() + + for i := head.BlockNumber() - 1; i >= baseHeight; i-- { + // NOTE: Sequential requests here mean it's a potential performance bottleneck, be aware! + existingHead := ht.headSaver.Chain(head.GetParentHash()) + if existingHead.IsValid() { + head = existingHead + continue + } + head, err = ht.fetchAndSaveHead(ctx, i, head.GetParentHash()) + fetched++ + if ctx.Err() != nil { + ht.log.Debugw("context canceled, aborting backfill", "err", err, "ctx.Err", ctx.Err()) + break + } else if err != nil { + return fmt.Errorf("fetchAndSaveHead failed: %w", err) + } + } + return +} + +func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) fetchAndSaveHead(ctx context.Context, n int64, hash BLOCK_HASH) (HTH, error) { + ht.log.Debugw("Fetching head", "blockHeight", n, "blockHash", hash) + head, err := ht.client.HeadByHash(ctx, hash) + if err != nil { + return ht.getNilHead(), err + } else if !head.IsValid() { + return ht.getNilHead(), errors.New("got nil head") + } + err = ht.headSaver.Save(ctx, head) + if err != nil { + return ht.getNilHead(), err + } + return head, nil +} diff --git a/common/headtracker/types/client.go b/common/headtracker/types/client.go new file mode 100644 index 00000000..434edd89 --- /dev/null +++ b/common/headtracker/types/client.go @@ -0,0 +1,18 @@ +package types + +import ( + "context" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type Client[H types.Head[BLOCK_HASH], S types.Subscription, ID types.ID, BLOCK_HASH types.Hashable] interface { + HeadByNumber(ctx context.Context, number *big.Int) (head H, err error) + HeadByHash(ctx context.Context, hash BLOCK_HASH) (head H, err error) + // ConfiguredChainID returns the chain ID that the node is configured to connect to + ConfiguredChainID() (id ID) + // SubscribeNewHead is the method in which the client receives new Head. + // It can be implemented differently for each chain i.e websocket, polling, etc + SubscribeNewHead(ctx context.Context, ch chan<- H) (S, error) +} diff --git a/common/headtracker/types/config.go b/common/headtracker/types/config.go new file mode 100644 index 00000000..ca64f7a2 --- /dev/null +++ b/common/headtracker/types/config.go @@ -0,0 +1,14 @@ +package types + +import "time" + +type Config interface { + BlockEmissionIdleWarningThreshold() time.Duration + FinalityDepth() uint32 +} + +type HeadTrackerConfig interface { + HistoryDepth() uint32 + MaxBufferSize() uint32 + SamplingInterval() time.Duration +} diff --git a/common/headtracker/types/head.go b/common/headtracker/types/head.go new file mode 100644 index 00000000..543e2c84 --- /dev/null +++ b/common/headtracker/types/head.go @@ -0,0 +1,16 @@ +package types + +import ( + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +//go:generate mockery --quiet --name Head --output ./mocks/ --case=underscore +type Head[BLOCK_HASH types.Hashable, CHAIN_ID types.ID] interface { + types.Head[BLOCK_HASH] + // ChainID returns the chain ID that the head is for + ChainID() CHAIN_ID + // Returns true if the head has a chain Id + HasChainID() bool + // IsValid returns true if the head is valid. + IsValid() bool +} diff --git a/common/headtracker/types/mocks/head.go b/common/headtracker/types/mocks/head.go new file mode 100644 index 00000000..3dc812b6 --- /dev/null +++ b/common/headtracker/types/mocks/head.go @@ -0,0 +1,254 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" + + time "time" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// Head is an autogenerated mock type for the Head type +type Head[BLOCK_HASH types.Hashable, CHAIN_ID types.ID] struct { + mock.Mock +} + +// BlockDifficulty provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) BlockDifficulty() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockDifficulty") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// BlockHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) BlockHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// BlockNumber provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) BlockNumber() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// ChainID provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) ChainID() CHAIN_ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 CHAIN_ID + if rf, ok := ret.Get(0).(func() CHAIN_ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(CHAIN_ID) + } + + return r0 +} + +// ChainLength provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) ChainLength() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainLength") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// EarliestHeadInChain provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) EarliestHeadInChain() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EarliestHeadInChain") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// GetParent provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) GetParent() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParent") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// GetParentHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) GetParentHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParentHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// GetTimestamp provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) GetTimestamp() time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTimestamp") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// HasChainID provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) HasChainID() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HasChainID") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// HashAtHeight provides a mock function with given fields: blockNum +func (_m *Head[BLOCK_HASH, CHAIN_ID]) HashAtHeight(blockNum int64) BLOCK_HASH { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for HashAtHeight") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func(int64) BLOCK_HASH); ok { + r0 = rf(blockNum) + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// IsValid provides a mock function with given fields: +func (_m *Head[BLOCK_HASH, CHAIN_ID]) IsValid() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsValid") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewHead creates a new instance of Head. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHead[BLOCK_HASH types.Hashable, CHAIN_ID types.ID](t interface { + mock.TestingT + Cleanup(func()) +}) *Head[BLOCK_HASH, CHAIN_ID] { + mock := &Head[BLOCK_HASH, CHAIN_ID]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/headtracker/types/mocks/subscription.go b/common/headtracker/types/mocks/subscription.go new file mode 100644 index 00000000..b9cb7886 --- /dev/null +++ b/common/headtracker/types/mocks/subscription.go @@ -0,0 +1,46 @@ +// Code generated by mockery v2.28.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Subscription is an autogenerated mock type for the Subscription type +type Subscription struct { + mock.Mock +} + +// Err provides a mock function with given fields: +func (_m *Subscription) Err() <-chan error { + ret := _m.Called() + + var r0 <-chan error + if rf, ok := ret.Get(0).(func() <-chan error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan error) + } + } + + return r0 +} + +// Unsubscribe provides a mock function with given fields: +func (_m *Subscription) Unsubscribe() { + _m.Called() +} + +type mockConstructorTestingTNewSubscription interface { + mock.TestingT + Cleanup(func()) +} + +// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSubscription(t mockConstructorTestingTNewSubscription) *Subscription { + mock := &Subscription{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/internal/utils/utils.go b/common/internal/utils/utils.go new file mode 100644 index 00000000..1e285868 --- /dev/null +++ b/common/internal/utils/utils.go @@ -0,0 +1,36 @@ +package utils + +import ( + "cmp" + "slices" + "time" + + "github.com/jpillora/backoff" + "golang.org/x/exp/constraints" +) + +// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to +// unreachable network endpoints +func NewRedialBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } + +} + +// MinFunc returns the minimum value of the given element array with respect +// to the given key function. In the event U is not a compound type (e.g a +// struct) an identity function can be provided. +func MinFunc[U any, T constraints.Ordered](elems []U, f func(U) T) T { + var min T + if len(elems) == 0 { + return min + } + + e := slices.MinFunc(elems, func(a, b U) int { + return cmp.Compare(f(a), f(b)) + }) + return f(e) +} diff --git a/common/mocks/head_broadcaster.go b/common/mocks/head_broadcaster.go new file mode 100644 index 00000000..88556ddb --- /dev/null +++ b/common/mocks/head_broadcaster.go @@ -0,0 +1,156 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// HeadBroadcaster is an autogenerated mock type for the HeadBroadcaster type +type HeadBroadcaster[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct { + mock.Mock +} + +// BroadcastNewLongestChain provides a mock function with given fields: _a0 +func (_m *HeadBroadcaster[H, BLOCK_HASH]) BroadcastNewLongestChain(_a0 H) { + _m.Called(_a0) +} + +// Close provides a mock function with given fields: +func (_m *HeadBroadcaster[H, BLOCK_HASH]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *HeadBroadcaster[H, BLOCK_HASH]) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *HeadBroadcaster[H, BLOCK_HASH]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *HeadBroadcaster[H, BLOCK_HASH]) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *HeadBroadcaster[H, BLOCK_HASH]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: callback +func (_m *HeadBroadcaster[H, BLOCK_HASH]) Subscribe(callback types.HeadTrackable[H, BLOCK_HASH]) (H, func()) { + ret := _m.Called(callback) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 H + var r1 func() + if rf, ok := ret.Get(0).(func(types.HeadTrackable[H, BLOCK_HASH]) (H, func())); ok { + return rf(callback) + } + if rf, ok := ret.Get(0).(func(types.HeadTrackable[H, BLOCK_HASH]) H); ok { + r0 = rf(callback) + } else { + r0 = ret.Get(0).(H) + } + + if rf, ok := ret.Get(1).(func(types.HeadTrackable[H, BLOCK_HASH]) func()); ok { + r1 = rf(callback) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(func()) + } + } + + return r0, r1 +} + +// NewHeadBroadcaster creates a new instance of HeadBroadcaster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeadBroadcaster[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *HeadBroadcaster[H, BLOCK_HASH] { + mock := &HeadBroadcaster[H, BLOCK_HASH]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/mocks/head_tracker.go b/common/mocks/head_tracker.go new file mode 100644 index 00000000..34b4e4ae --- /dev/null +++ b/common/mocks/head_tracker.go @@ -0,0 +1,157 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// HeadTracker is an autogenerated mock type for the HeadTracker type +type HeadTracker[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct { + mock.Mock +} + +// Backfill provides a mock function with given fields: ctx, headWithChain, depth +func (_m *HeadTracker[H, BLOCK_HASH]) Backfill(ctx context.Context, headWithChain H, depth uint) error { + ret := _m.Called(ctx, headWithChain, depth) + + if len(ret) == 0 { + panic("no return value specified for Backfill") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, H, uint) error); ok { + r0 = rf(ctx, headWithChain, depth) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *HeadTracker[H, BLOCK_HASH]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *HeadTracker[H, BLOCK_HASH]) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// LatestChain provides a mock function with given fields: +func (_m *HeadTracker[H, BLOCK_HASH]) LatestChain() H { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestChain") + } + + var r0 H + if rf, ok := ret.Get(0).(func() H); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(H) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *HeadTracker[H, BLOCK_HASH]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *HeadTracker[H, BLOCK_HASH]) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *HeadTracker[H, BLOCK_HASH]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewHeadTracker creates a new instance of HeadTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeadTracker[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *HeadTracker[H, BLOCK_HASH] { + mock := &HeadTracker[H, BLOCK_HASH]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go new file mode 100644 index 00000000..1f94c52f --- /dev/null +++ b/common/txmgr/broadcaster.go @@ -0,0 +1,854 @@ +package txmgr + +import ( + "context" + "database/sql" + "errors" + "fmt" + "slices" + "sync" + "time" + + "github.com/jpillora/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/chains/label" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/common/client" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const ( + // InFlightTransactionRecheckInterval controls how often the Broadcaster + // will poll the unconfirmed queue to see if it is allowed to send another + // transaction + InFlightTransactionRecheckInterval = 1 * time.Second + + // TransmitCheckTimeout controls the maximum amount of time that will be + // spent on the transmit check. + TransmitCheckTimeout = 2 * time.Second +) + +var ( + promTimeUntilBroadcast = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "tx_manager_time_until_tx_broadcast", + Help: "The amount of time elapsed from when a transaction is enqueued to until it is broadcast.", + Buckets: []float64{ + float64(500 * time.Millisecond), + float64(time.Second), + float64(5 * time.Second), + float64(15 * time.Second), + float64(30 * time.Second), + float64(time.Minute), + float64(2 * time.Minute), + }, + }, []string{"chainID"}) +) + +var ErrTxRemoved = errors.New("tx removed") + +type ProcessUnstartedTxs[ADDR types.Hashable] func(ctx context.Context, fromAddress ADDR) (retryable bool, err error) + +// TransmitCheckerFactory creates a transmit checker based on a spec. +type TransmitCheckerFactory[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + // BuildChecker builds a new TransmitChecker based on the given spec. + BuildChecker(spec txmgrtypes.TransmitCheckerSpec[ADDR]) (TransmitChecker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) +} + +// TransmitChecker determines whether a transaction should be submitted on-chain. +type TransmitChecker[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + + // Check the given transaction. If the transaction should not be sent, an error indicating why + // is returned. Errors should only be returned if the checker can confirm that a transaction + // should not be sent, other errors (for example connection or other unexpected errors) should + // be logged and swallowed. + Check(ctx context.Context, l logger.SugaredLogger, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], a txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error +} + +// Broadcaster monitors txes for transactions that need to +// be broadcast, assigns sequences and ensures that at least one node +// somewhere has received the transaction successfully. +// +// This does not guarantee delivery! A whole host of other things can +// subsequently go wrong such as transactions being evicted from the mempool, +// nodes going offline etc. Responsibility for ensuring eventual inclusion +// into the chain falls on the shoulders of the confirmer. +// +// What Broadcaster does guarantee is: +// - a monotonic series of increasing sequences for txes that can all eventually be confirmed if you retry enough times +// - transition of txes out of unstarted into either fatal_error or unconfirmed +// - existence of a saved tx_attempt +type Broadcaster[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + services.StateMachine + lggr logger.SugaredLogger + txStore txmgrtypes.TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE] + client txmgrtypes.TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + sequenceSyncer SequenceSyncer[ADDR, TX_HASH, BLOCK_HASH, SEQ] + resumeCallback ResumeCallback + chainID CHAIN_ID + config txmgrtypes.BroadcasterChainConfig + feeConfig txmgrtypes.BroadcasterFeeConfig + txConfig txmgrtypes.BroadcasterTransactionsConfig + listenerConfig txmgrtypes.BroadcasterListenerConfig + + // autoSyncSequence, if set, will cause Broadcaster to fast-forward the sequence + // when Start is called + autoSyncSequence bool + + processUnstartedTxsImpl ProcessUnstartedTxs[ADDR] + + ks txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ] + enabledAddresses []ADDR + + checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + + // triggers allow other goroutines to force Broadcaster to rescan the + // database early (before the next poll interval) + // Each key has its own trigger + triggers map[ADDR]chan struct{} + + chStop services.StopChan + wg sync.WaitGroup + + initSync sync.Mutex + isStarted bool + + sequenceLock sync.RWMutex + nextSequenceMap map[ADDR]SEQ + generateNextSequence types.GenerateNextSequenceFunc[SEQ] +} + +func NewBroadcaster[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +]( + txStore txmgrtypes.TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE], + client txmgrtypes.TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + config txmgrtypes.BroadcasterChainConfig, + feeConfig txmgrtypes.BroadcasterFeeConfig, + txConfig txmgrtypes.BroadcasterTransactionsConfig, + listenerConfig txmgrtypes.BroadcasterListenerConfig, + keystore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ], + txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + sequenceSyncer SequenceSyncer[ADDR, TX_HASH, BLOCK_HASH, SEQ], + lggr logger.Logger, + checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + autoSyncSequence bool, + generateNextSequence types.GenerateNextSequenceFunc[SEQ], +) *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { + lggr = logger.Named(lggr, "Broadcaster") + b := &Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{ + lggr: logger.Sugared(lggr), + txStore: txStore, + client: client, + TxAttemptBuilder: txAttemptBuilder, + sequenceSyncer: sequenceSyncer, + chainID: client.ConfiguredChainID(), + config: config, + feeConfig: feeConfig, + txConfig: txConfig, + listenerConfig: listenerConfig, + ks: keystore, + checkerFactory: checkerFactory, + autoSyncSequence: autoSyncSequence, + } + + b.processUnstartedTxsImpl = b.processUnstartedTxs + b.generateNextSequence = generateNextSequence + return b +} + +// Start starts Broadcaster service. +// The provided context can be used to terminate Start sequence. +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Start(ctx context.Context) error { + return eb.StartOnce("Broadcaster", func() (err error) { + return eb.startInternal(ctx) + }) +} + +// startInternal can be called multiple times, in conjunction with closeInternal. The TxMgr uses this functionality to reset broadcaster multiple times in its own lifetime. +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) startInternal(ctx context.Context) error { + eb.initSync.Lock() + defer eb.initSync.Unlock() + if eb.isStarted { + return errors.New("Broadcaster is already started") + } + var err error + eb.enabledAddresses, err = eb.ks.EnabledAddressesForChain(eb.chainID) + if err != nil { + return fmt.Errorf("Broadcaster: failed to load EnabledAddressesForChain: %w", err) + } + + if len(eb.enabledAddresses) > 0 { + eb.lggr.Debugw(fmt.Sprintf("Booting with %d keys", len(eb.enabledAddresses)), "keys", eb.enabledAddresses) + } else { + eb.lggr.Warnf("Chain %s does not have any keys, no transactions will be sent on this chain", eb.chainID.String()) + } + eb.chStop = make(chan struct{}) + eb.wg = sync.WaitGroup{} + eb.wg.Add(len(eb.enabledAddresses)) + eb.triggers = make(map[ADDR]chan struct{}) + eb.sequenceLock.Lock() + eb.nextSequenceMap = eb.loadNextSequenceMap(ctx, eb.enabledAddresses) + eb.sequenceLock.Unlock() + for _, addr := range eb.enabledAddresses { + triggerCh := make(chan struct{}, 1) + eb.triggers[addr] = triggerCh + go eb.monitorTxs(addr, triggerCh) + } + + eb.isStarted = true + return nil +} + +// Close closes the Broadcaster +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Close() error { + return eb.StopOnce("Broadcaster", func() error { + return eb.closeInternal() + }) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) closeInternal() error { + eb.initSync.Lock() + defer eb.initSync.Unlock() + if !eb.isStarted { + return fmt.Errorf("Broadcaster is not started: %w", services.ErrAlreadyStopped) + } + close(eb.chStop) + eb.wg.Wait() + eb.isStarted = false + return nil +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SetResumeCallback(callback ResumeCallback) { + eb.resumeCallback = callback +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Name() string { + return eb.lggr.Name() +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) HealthReport() map[string]error { + return map[string]error{eb.Name(): eb.Healthy()} +} + +// Trigger forces the monitor for a particular address to recheck for new txes +// Logs error and does nothing if address was not registered on startup +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Trigger(addr ADDR) { + if eb.isStarted { + triggerCh, exists := eb.triggers[addr] + if !exists { + // ignoring trigger for address which is not registered with this Broadcaster + return + } + select { + case triggerCh <- struct{}{}: + default: + } + } else { + eb.lggr.Debugf("Unstarted; ignoring trigger for %s", addr) + } +} + +// Load the next sequence map using the tx table or on-chain (if not found in tx table) +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) loadNextSequenceMap(ctx context.Context, addresses []ADDR) map[ADDR]SEQ { + nextSequenceMap := make(map[ADDR]SEQ) + for _, address := range addresses { + seq, err := eb.getSequenceForAddr(ctx, address) + if err == nil { + nextSequenceMap[address] = seq + } + } + + return nextSequenceMap +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) getSequenceForAddr(ctx context.Context, address ADDR) (seq SEQ, err error) { + // Get the highest sequence from the tx table + // Will need to be incremented since this sequence is already used + seq, err = eb.txStore.FindLatestSequence(ctx, address, eb.chainID) + if err == nil { + seq = eb.generateNextSequence(seq) + return seq, nil + } + // Look for nonce on-chain if no tx found for address in TxStore or if error occurred + // Returns the nonce that should be used for the next transaction so no need to increment + seq, err = eb.client.PendingSequenceAt(ctx, address) + if err == nil { + return seq, nil + } + eb.lggr.Criticalw("failed to retrieve next sequence from on-chain for address: ", "address", address.String()) + return seq, err + +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) newSequenceSyncBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 100 * time.Millisecond, + Max: 5 * time.Second, + Jitter: true, + } +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) newResendBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) monitorTxs(addr ADDR, triggerCh chan struct{}) { + defer eb.wg.Done() + + ctx, cancel := eb.chStop.NewCtx() + defer cancel() + + if eb.autoSyncSequence { + eb.lggr.Debugw("Auto-syncing sequence", "address", addr.String()) + eb.SyncSequence(ctx, addr) + if ctx.Err() != nil { + return + } + } else { + eb.lggr.Debugw("Skipping sequence auto-sync", "address", addr.String()) + } + + // errorRetryCh allows retry on exponential backoff in case of timeout or + // other unknown error + var errorRetryCh <-chan time.Time + bf := eb.newResendBackoff() + + for { + pollDBTimer := time.NewTimer(utils.WithJitter(eb.listenerConfig.FallbackPollInterval())) + + retryable, err := eb.processUnstartedTxsImpl(ctx, addr) + if err != nil { + eb.lggr.Errorw("Error occurred while handling tx queue in ProcessUnstartedTxs", "err", err) + } + // On retryable errors we implement exponential backoff retries. This + // handles intermittent connectivity, remote RPC races, timing issues etc + if retryable { + pollDBTimer.Reset(utils.WithJitter(eb.listenerConfig.FallbackPollInterval())) + errorRetryCh = time.After(bf.Duration()) + } else { + bf = eb.newResendBackoff() + errorRetryCh = nil + } + + select { + case <-ctx.Done(): + // NOTE: See: https://godoc.org/time#Timer.Stop for an explanation of this pattern + if !pollDBTimer.Stop() { + <-pollDBTimer.C + } + return + case <-triggerCh: + // tx was inserted + if !pollDBTimer.Stop() { + <-pollDBTimer.C + } + continue + case <-pollDBTimer.C: + // DB poller timed out + continue + case <-errorRetryCh: + // Error backoff period reached + continue + } + } +} + +// syncSequence tries to sync the key sequence, retrying indefinitely until success or stop signal is sent +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SyncSequence(ctx context.Context, addr ADDR) { + sequenceSyncRetryBackoff := eb.newSequenceSyncBackoff() + localSequence, err := eb.GetNextSequence(ctx, addr) + // Address not found in map so skip sync + if err != nil { + eb.lggr.Criticalw("Failed to retrieve local next sequence for address", "address", addr.String(), "err", err) + return + } + + // Enter loop with retries + var attempt int + for { + select { + case <-eb.chStop: + return + case <-time.After(sequenceSyncRetryBackoff.Duration()): + attempt++ + newNextSequence, err := eb.sequenceSyncer.Sync(ctx, addr, localSequence) + if err != nil { + if attempt > 5 { + eb.lggr.Criticalw("Failed to sync with on-chain sequence", "address", addr.String(), "attempt", attempt, "err", err) + eb.SvcErrBuffer.Append(err) + } else { + eb.lggr.Warnw("Failed to sync with on-chain sequence", "address", addr.String(), "attempt", attempt, "err", err) + } + continue + } + // Found new sequence to use from on-chain + if localSequence.String() != newNextSequence.String() { + eb.lggr.Infow("Fast-forward sequence", "address", addr, "newNextSequence", newNextSequence, "oldNextSequence", localSequence) + // Set new sequence in the map + eb.SetNextSequence(addr, newNextSequence) + } + return + + } + } +} + +// ProcessUnstartedTxs picks up and handles all txes in the queue +// revive:disable:error-return +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) ProcessUnstartedTxs(ctx context.Context, addr ADDR) (retryable bool, err error) { + return eb.processUnstartedTxs(ctx, addr) +} + +// NOTE: This MUST NOT be run concurrently for the same address or it could +// result in undefined state or deadlocks. +// First handle any in_progress transactions left over from last time. +// Then keep looking up unstarted transactions and processing them until there are none remaining. +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) processUnstartedTxs(ctx context.Context, fromAddress ADDR) (retryable bool, err error) { + var n uint + mark := time.Now() + defer func() { + if n > 0 { + eb.lggr.Debugw("Finished processUnstartedTxs", "address", fromAddress, "time", time.Since(mark), "n", n, "id", "broadcaster") + } + }() + + err, retryable = eb.handleAnyInProgressTx(ctx, fromAddress) + if err != nil { + return retryable, fmt.Errorf("processUnstartedTxs failed on handleAnyInProgressTx: %w", err) + } + for { + maxInFlightTransactions := eb.txConfig.MaxInFlight() + if maxInFlightTransactions > 0 { + nUnconfirmed, err := eb.txStore.CountUnconfirmedTransactions(ctx, fromAddress, eb.chainID) + if err != nil { + return true, fmt.Errorf("CountUnconfirmedTransactions failed: %w", err) + } + if nUnconfirmed >= maxInFlightTransactions { + nUnstarted, err := eb.txStore.CountUnstartedTransactions(ctx, fromAddress, eb.chainID) + if err != nil { + return true, fmt.Errorf("CountUnstartedTransactions failed: %w", err) + } + eb.lggr.Warnw(fmt.Sprintf(`Transaction throttling; %d transactions in-flight and %d unstarted transactions pending (maximum number of in-flight transactions is %d per key). %s`, nUnconfirmed, nUnstarted, maxInFlightTransactions, label.MaxInFlightTransactionsWarning), "maxInFlightTransactions", maxInFlightTransactions, "nUnconfirmed", nUnconfirmed, "nUnstarted", nUnstarted) + select { + case <-time.After(InFlightTransactionRecheckInterval): + case <-ctx.Done(): + return false, context.Cause(ctx) + } + continue + } + } + etx, err := eb.nextUnstartedTransactionWithSequence(fromAddress) + if err != nil { + return true, fmt.Errorf("processUnstartedTxs failed on nextUnstartedTransactionWithSequence: %w", err) + } + if etx == nil { + return false, nil + } + n++ + + if err, retryable := eb.handleUnstartedTx(ctx, etx); err != nil { + return retryable, fmt.Errorf("processUnstartedTxs failed on handleUnstartedTx: %w", err) + } + } +} + +// handleInProgressTx checks if there is any transaction +// in_progress and if so, finishes the job +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) handleAnyInProgressTx(ctx context.Context, fromAddress ADDR) (err error, retryable bool) { + etx, err := eb.txStore.GetTxInProgress(ctx, fromAddress) + if err != nil { + return fmt.Errorf("handleAnyInProgressTx failed: %w", err), true + } + if etx != nil { + if err, retryable := eb.handleInProgressTx(ctx, *etx, etx.TxAttempts[0], etx.CreatedAt); err != nil { + return fmt.Errorf("handleAnyInProgressTx failed: %w", err), retryable + } + } + return nil, false +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) handleUnstartedTx(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) (error, bool) { + if etx.State != TxUnstarted { + return fmt.Errorf("invariant violation: expected transaction %v to be unstarted, it was %s", etx.ID, etx.State), false + } + + attempt, _, _, retryable, err := eb.NewTxAttempt(ctx, *etx, eb.lggr) + if err != nil { + return fmt.Errorf("processUnstartedTxs failed on NewAttempt: %w", err), retryable + } + + checkerSpec, err := etx.GetChecker() + if err != nil { + return fmt.Errorf("parsing transmit checker: %w", err), false + } + + checker, err := eb.checkerFactory.BuildChecker(checkerSpec) + if err != nil { + return fmt.Errorf("building transmit checker: %w", err), false + } + + lgr := etx.GetLogger(eb.lggr.With("fee", attempt.TxFee)) + + // If the transmit check does not complete within the timeout, the transaction will be sent + // anyway. + // It's intentional that we only run `Check` for unstarted transactions. + // Running it on other states might lead to nonce duplication, as we might mark applied transactions as fatally errored. + + checkCtx, cancel := context.WithTimeout(ctx, TransmitCheckTimeout) + defer cancel() + err = checker.Check(checkCtx, lgr, *etx, attempt) + if errors.Is(err, context.Canceled) { + lgr.Warn("Transmission checker timed out, sending anyway") + } else if err != nil { + etx.Error = null.StringFrom(err.Error()) + lgr.Warnw("Transmission checker failed, fatally erroring transaction.", "err", err) + return eb.saveFatallyErroredTransaction(lgr, etx), true + } + cancel() + + if err = eb.txStore.UpdateTxUnstartedToInProgress(ctx, etx, &attempt); errors.Is(err, ErrTxRemoved) { + eb.lggr.Debugw("tx removed", "txID", etx.ID, "subject", etx.Subject) + return nil, false + } else if err != nil { + return fmt.Errorf("processUnstartedTxs failed on UpdateTxUnstartedToInProgress: %w", err), true + } + + return eb.handleInProgressTx(ctx, *etx, attempt, time.Now()) +} + +// There can be at most one in_progress transaction per address. +// Here we complete the job that we didn't finish last time. +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) handleInProgressTx(ctx context.Context, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (error, bool) { + if etx.State != TxInProgress { + return fmt.Errorf("invariant violation: expected transaction %v to be in_progress, it was %s", etx.ID, etx.State), false + } + + lgr := etx.GetLogger(logger.With(eb.lggr, "fee", attempt.TxFee)) + lgr.Infow("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "meta", etx.Meta, "feeLimit", etx.FeeLimit, "attempt", attempt, "etx", etx) + errType, err := eb.client.SendTransactionReturnCode(ctx, etx, attempt, lgr) + + if errType != client.Fatal { + etx.InitialBroadcastAt = &initialBroadcastAt + etx.BroadcastAt = &initialBroadcastAt + } + + switch errType { + case client.Fatal: + eb.SvcErrBuffer.Append(err) + etx.Error = null.StringFrom(err.Error()) + return eb.saveFatallyErroredTransaction(lgr, &etx), true + case client.TransactionAlreadyKnown: + fallthrough + case client.Successful: + // Either the transaction was successful or one of the following four scenarios happened: + // + // SCENARIO 1 + // + // This is resuming a previous crashed run. In this scenario, it is + // likely that our previous transaction was the one who was confirmed, + // in which case we hand it off to the confirmer to get the + // receipt. + // + // SCENARIO 2 + // + // It is also possible that an external wallet can have messed with the + // account and sent a transaction on this sequence. + // + // In this case, the onus is on the node operator since this is + // explicitly unsupported. + // + // If it turns out to have been an external wallet, we will never get a + // receipt for this transaction and it will eventually be marked as + // errored. + // + // The end result is that we will NOT SEND a transaction for this + // sequence. + // + // SCENARIO 3 + // + // The network client can be assumed to have at-least-once delivery + // behavior. It is possible that the client could have already + // sent this exact same transaction even if this is our first time + // calling SendTransaction(). + // + // SCENARIO 4 (most likely) + // + // A sendonly node got the transaction in first. + // + // In all scenarios, the correct thing to do is assume success for now + // and hand off to the confirmer to get the receipt (or mark as + // failed). + observeTimeUntilBroadcast(eb.chainID, etx.CreatedAt, time.Now()) + // Check if from_address exists in map to ensure it is valid before broadcasting + var sequence SEQ + sequence, err = eb.GetNextSequence(ctx, etx.FromAddress) + if err != nil { + return err, true + } + err = eb.txStore.UpdateTxAttemptInProgressToBroadcast(ctx, &etx, attempt, txmgrtypes.TxAttemptBroadcast) + if err != nil { + return err, true + } + // Increment sequence if successfully broadcasted + eb.IncrementNextSequence(etx.FromAddress, sequence) + return err, true + case client.Underpriced: + return eb.tryAgainBumpingGas(ctx, lgr, err, etx, attempt, initialBroadcastAt) + case client.InsufficientFunds: + // NOTE: This bails out of the entire cycle and essentially "blocks" on + // any transaction that gets insufficient_funds. This is OK if a + // transaction with a large VALUE blocks because this always comes last + // in the processing list. + // If it blocks because of a transaction that is expensive due to large + // gas limit, we could have smaller transactions "above" it that could + // theoretically be sent, but will instead be blocked. + eb.SvcErrBuffer.Append(err) + fallthrough + case client.Retryable: + return err, true + case client.FeeOutOfValidRange: + return eb.tryAgainWithNewEstimation(ctx, lgr, err, etx, attempt, initialBroadcastAt) + case client.Unsupported: + return err, false + case client.ExceedsMaxFee: + // Broadcaster: Note that we may have broadcast to multiple nodes and had it + // accepted by one of them! It is not guaranteed that all nodes share + // the same tx fee cap. That is why we must treat this as an unknown + // error that may have been confirmed. + // If there is only one RPC node, or all RPC nodes have the same + // configured cap, this transaction will get stuck and keep repeating + // forever until the issue is resolved. + lgr.Criticalw(`RPC node rejected this tx as outside Fee Cap`, "attempt", attempt) + fallthrough + default: + // Every error that doesn't fall under one of the above categories will be treated as Unknown. + fallthrough + case client.Unknown: + eb.SvcErrBuffer.Append(err) + lgr.Criticalw(`Unknown error occurred while handling tx queue in ProcessUnstartedTxs. This chain/RPC client may not be supported. `+ + `Urgent resolution required, Plugin is currently operating in a degraded state and may miss transactions`, "attempt", attempt) + nextSequence, e := eb.client.PendingSequenceAt(ctx, etx.FromAddress) + if e != nil { + err = multierr.Combine(e, err) + return fmt.Errorf("failed to fetch latest pending sequence after encountering unknown RPC error while sending transaction: %w", err), true + } + if nextSequence.Int64() > (*etx.Sequence).Int64() { + // Despite the error, the RPC node considers the previously sent + // transaction to have been accepted. In this case, the right thing to + // do is assume success and hand off to Confirmer + + // Check if from_address exists in map to ensure it is valid before broadcasting + var sequence SEQ + sequence, err = eb.GetNextSequence(ctx, etx.FromAddress) + if err != nil { + return err, true + } + err = eb.txStore.UpdateTxAttemptInProgressToBroadcast(ctx, &etx, attempt, txmgrtypes.TxAttemptBroadcast) + if err != nil { + return err, true + } + // Increment sequence if successfully broadcasted + eb.IncrementNextSequence(etx.FromAddress, sequence) + return err, true + } + // Either the unknown error prevented the transaction from being mined, or + // it has not yet propagated to the mempool, or there is some race on the + // remote RPC. + // + // In all cases, the best thing we can do is go into a retry loop and keep + // trying to send the transaction over again. + return fmt.Errorf("retryable error while sending transaction %s (tx ID %d): %w", attempt.Hash.String(), etx.ID, err), true + } + +} + +// Finds next transaction in the queue, assigns a sequence, and moves it to "in_progress" state ready for broadcast. +// Returns nil if no transactions are in queue +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) nextUnstartedTransactionWithSequence(fromAddress ADDR) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ctx, cancel := eb.chStop.NewCtx() + defer cancel() + etx := &txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{} + if err := eb.txStore.FindNextUnstartedTransactionFromAddress(ctx, etx, fromAddress, eb.chainID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + // Finish. No more transactions left to process. Hoorah! + return nil, nil + } + return nil, fmt.Errorf("findNextUnstartedTransactionFromAddress failed: %w", err) + } + + sequence, err := eb.GetNextSequence(ctx, etx.FromAddress) + if err != nil { + return nil, err + } + etx.Sequence = &sequence + return etx, nil +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryAgainBumpingGas(ctx context.Context, lgr logger.Logger, txError error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (err error, retryable bool) { + logger.With(lgr, + "sendError", txError, + "attemptFee", attempt.TxFee, + "maxGasPriceConfig", eb.feeConfig.MaxFeePrice(), + ).Errorf("attempt fee %v was rejected by the node for being too low. "+ + "Node returned: '%s'. "+ + "Will bump and retry. ACTION REQUIRED: This is a configuration error. "+ + "Consider increasing FeeEstimator.PriceDefault (current value: %s)", + attempt.TxFee, txError.Error(), eb.feeConfig.FeePriceDefault()) + + replacementAttempt, bumpedFee, bumpedFeeLimit, retryable, err := eb.NewBumpTxAttempt(ctx, etx, attempt, nil, lgr) + if err != nil { + return fmt.Errorf("tryAgainBumpFee failed: %w", err), retryable + } + + return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, bumpedFee, bumpedFeeLimit) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) tryAgainWithNewEstimation(ctx context.Context, lgr logger.Logger, txError error, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time) (err error, retryable bool) { + if attempt.TxType == 0x2 { + err = fmt.Errorf("re-estimation is not supported for EIP-1559 transactions. Node returned error: %v. This is a bug", txError.Error()) + logger.Sugared(eb.lggr).AssumptionViolation(err.Error()) + return err, false + } + + replacementAttempt, fee, feeLimit, retryable, err := eb.NewTxAttemptWithType(ctx, etx, lgr, attempt.TxType, feetypes.OptForceRefetch) + if err != nil { + return fmt.Errorf("tryAgainWithNewEstimation failed to build new attempt: %w", err), retryable + } + lgr.Warnw("L2 rejected transaction due to incorrect fee, re-estimated and will try again", + "etxID", etx.ID, "err", err, "newGasPrice", fee, "newGasLimit", feeLimit) + + return eb.saveTryAgainAttempt(ctx, lgr, etx, attempt, replacementAttempt, initialBroadcastAt, fee, feeLimit) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) saveTryAgainAttempt(ctx context.Context, lgr logger.Logger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], replacementAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], initialBroadcastAt time.Time, newFee FEE, newFeeLimit uint32) (err error, retyrable bool) { + if err = eb.txStore.SaveReplacementInProgressAttempt(ctx, attempt, &replacementAttempt); err != nil { + return fmt.Errorf("tryAgainWithNewFee failed: %w", err), true + } + lgr.Debugw("Bumped fee on initial send", "oldFee", attempt.TxFee.String(), "newFee", newFee.String(), "newFeeLimit", newFeeLimit) + return eb.handleInProgressTx(ctx, etx, replacementAttempt, initialBroadcastAt) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) saveFatallyErroredTransaction(lgr logger.Logger, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ctx, cancel := eb.chStop.NewCtx() + defer cancel() + if etx.State != TxInProgress && etx.State != TxUnstarted { + return fmt.Errorf("can only transition to fatal_error from in_progress or unstarted, transaction is currently %s", etx.State) + } + if !etx.Error.Valid { + return errors.New("expected error field to be set") + } + // NOTE: It's simpler to not do this transactionally for now (would require + // refactoring pipeline runner resume to use postgres events) + // + // There is a very tiny possibility of the following: + // + // 1. We get a fatal error on the tx, resuming the pipeline with error + // 2. Crash or failure during persist of fatal errored tx + // 3. On the subsequent run the tx somehow succeeds and we save it as successful + // + // Now we have an errored pipeline even though the tx succeeded. This case + // is relatively benign and probably nobody will ever run into it in + // practice, but something to be aware of. + if etx.PipelineTaskRunID.Valid && eb.resumeCallback != nil && etx.SignalCallback { + err := eb.resumeCallback(etx.PipelineTaskRunID.UUID, nil, fmt.Errorf("fatal error while sending transaction: %s", etx.Error.String)) + if errors.Is(err, sql.ErrNoRows) { + lgr.Debugw("callback missing or already resumed", "etxID", etx.ID) + } else if err != nil { + return fmt.Errorf("failed to resume pipeline: %w", err) + } else { + // Mark tx as having completed callback + if err := eb.txStore.UpdateTxCallbackCompleted(ctx, etx.PipelineTaskRunID.UUID, eb.chainID); err != nil { + return err + } + } + } + return eb.txStore.UpdateTxFatalError(ctx, etx) +} + +// Used to get the next usable sequence for a transaction +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetNextSequence(ctx context.Context, address ADDR) (seq SEQ, err error) { + eb.sequenceLock.Lock() + defer eb.sequenceLock.Unlock() + // Get next sequence from map + seq, exists := eb.nextSequenceMap[address] + if exists { + return seq, nil + } + + eb.lggr.Infow("address not found in local next sequence map. Attempting to search and populate sequence.", "address", address.String()) + // Check if address is in the enabled address list + if !slices.Contains(eb.enabledAddresses, address) { + return seq, fmt.Errorf("address disabled: %s", address) + } + + // Try to retrieve next sequence from tx table or on-chain to load the map + // A scenario could exist where loading the map during startup failed (e.g. All configured RPC's are unreachable at start) + // The expectation is that the node does not fail startup so sequences need to be loaded during runtime + foundSeq, err := eb.getSequenceForAddr(ctx, address) + if err != nil { + return seq, fmt.Errorf("failed to find next sequence for address: %s", address) + } + + // Set sequence in map + eb.nextSequenceMap[address] = foundSeq + return foundSeq, nil +} + +// Used to increment the sequence in the mapping to have the next usable one available for the next transaction +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) IncrementNextSequence(address ADDR, seq SEQ) { + eb.sequenceLock.Lock() + defer eb.sequenceLock.Unlock() + eb.nextSequenceMap[address] = eb.generateNextSequence(seq) +} + +// Used to set the next sequence explicitly to a certain value +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SetNextSequence(address ADDR, seq SEQ) { + eb.sequenceLock.Lock() + defer eb.sequenceLock.Unlock() + eb.nextSequenceMap[address] = seq +} + +func observeTimeUntilBroadcast[CHAIN_ID types.ID](chainID CHAIN_ID, createdAt, broadcastAt time.Time) { + duration := float64(broadcastAt.Sub(createdAt)) + promTimeUntilBroadcast.WithLabelValues(chainID.String()).Observe(duration) +} diff --git a/common/txmgr/confirmer.go b/common/txmgr/confirmer.go new file mode 100644 index 00000000..9cd84285 --- /dev/null +++ b/common/txmgr/confirmer.go @@ -0,0 +1,1165 @@ +package txmgr + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "sort" + "strconv" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + commonhex "github.com/goplugin/plugin-common/pkg/utils/hex" + + "github.com/goplugin/plugin-common/pkg/chains/label" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/client" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + iutils "github.com/goplugin/pluginv3.0/v2/common/internal/utils" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const ( + // processHeadTimeout represents a sanity limit on how long ProcessHead + // should take to complete + processHeadTimeout = 10 * time.Minute + + // logAfterNConsecutiveBlocksChainTooShort logs a warning if we go at least + // this many consecutive blocks with a re-org protection chain that is too + // short + // + // we don't log every time because on startup it can be lower, only if it + // persists does it indicate a serious problem + logAfterNConsecutiveBlocksChainTooShort = 10 +) + +var ( + promNumGasBumps = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_num_gas_bumps", + Help: "Number of gas bumps", + }, []string{"chainID"}) + + promGasBumpExceedsLimit = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_gas_bump_exceeds_limit", + Help: "Number of times gas bumping failed from exceeding the configured limit. Any counts of this type indicate a serious problem.", + }, []string{"chainID"}) + promNumConfirmedTxs = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_num_confirmed_transactions", + Help: "Total number of confirmed transactions. Note that this can err to be too high since transactions are counted on each confirmation, which can happen multiple times per transaction in the case of re-orgs", + }, []string{"chainID"}) + promNumSuccessfulTxs = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_num_successful_transactions", + Help: "Total number of successful transactions. Note that this can err to be too high since transactions are counted on each confirmation, which can happen multiple times per transaction in the case of re-orgs", + }, []string{"chainID"}) + promRevertedTxCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_num_tx_reverted", + Help: "Number of times a transaction reverted on-chain. Note that this can err to be too high since transactions are counted on each confirmation, which can happen multiple times per transaction in the case of re-orgs", + }, []string{"chainID"}) + promFwdTxCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "tx_manager_fwd_tx_count", + Help: "The number of forwarded transaction attempts labeled by status", + }, []string{"chainID", "successful"}) + promTxAttemptCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "tx_manager_tx_attempt_count", + Help: "The number of transaction attempts that are currently being processed by the transaction manager", + }, []string{"chainID"}) + promTimeUntilTxConfirmed = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "tx_manager_time_until_tx_confirmed", + Help: "The amount of time elapsed from a transaction being broadcast to being included in a block.", + Buckets: []float64{ + float64(500 * time.Millisecond), + float64(time.Second), + float64(5 * time.Second), + float64(15 * time.Second), + float64(30 * time.Second), + float64(time.Minute), + float64(2 * time.Minute), + float64(5 * time.Minute), + float64(10 * time.Minute), + }, + }, []string{"chainID"}) + promBlocksUntilTxConfirmed = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "tx_manager_blocks_until_tx_confirmed", + Help: "The amount of blocks that have been mined from a transaction being broadcast to being included in a block.", + Buckets: []float64{ + float64(1), + float64(5), + float64(10), + float64(20), + float64(50), + float64(100), + }, + }, []string{"chainID"}) +) + +// Confirmer is a broad service which performs four different tasks in sequence on every new longest chain +// Step 1: Mark that all currently pending transaction attempts were broadcast before this block +// Step 2: Check pending transactions for receipts +// Step 3: See if any transactions have exceeded the gas bumping block threshold and, if so, bump them +// Step 4: Check confirmed transactions to make sure they are still in the longest chain (reorg protection) +type Confirmer[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + services.StateMachine + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + lggr logger.SugaredLogger + client txmgrtypes.TxmClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + resumeCallback ResumeCallback + chainConfig txmgrtypes.ConfirmerChainConfig + feeConfig txmgrtypes.ConfirmerFeeConfig + txConfig txmgrtypes.ConfirmerTransactionsConfig + dbConfig txmgrtypes.ConfirmerDatabaseConfig + chainID CHAIN_ID + + ks txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ] + enabledAddresses []ADDR + + mb *mailbox.Mailbox[HEAD] + ctx context.Context + ctxCancel context.CancelFunc + wg sync.WaitGroup + initSync sync.Mutex + isStarted bool + + nConsecutiveBlocksChainTooShort int + isReceiptNil func(R) bool +} + +func NewConfirmer[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +]( + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + client txmgrtypes.TxmClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + chainConfig txmgrtypes.ConfirmerChainConfig, + feeConfig txmgrtypes.ConfirmerFeeConfig, + txConfig txmgrtypes.ConfirmerTransactionsConfig, + dbConfig txmgrtypes.ConfirmerDatabaseConfig, + keystore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ], + txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + lggr logger.Logger, + isReceiptNil func(R) bool, +) *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { + lggr = logger.Named(lggr, "Confirmer") + return &Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ + txStore: txStore, + lggr: logger.Sugared(lggr), + client: client, + TxAttemptBuilder: txAttemptBuilder, + resumeCallback: nil, + chainConfig: chainConfig, + feeConfig: feeConfig, + txConfig: txConfig, + dbConfig: dbConfig, + chainID: client.ConfiguredChainID(), + ks: keystore, + mb: mailbox.NewSingle[HEAD](), + isReceiptNil: isReceiptNil, + } +} + +// Start is a comment to appease the linter +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(_ context.Context) error { + return ec.StartOnce("Confirmer", func() error { + if ec.feeConfig.BumpThreshold() == 0 { + ec.lggr.Infow("Gas bumping is disabled (FeeEstimator.BumpThreshold set to 0)", "feeBumpThreshold", 0) + } else { + ec.lggr.Infow(fmt.Sprintf("Fee bumping is enabled, unconfirmed transactions will have their fee bumped every %d blocks", ec.feeConfig.BumpThreshold()), "feeBumpThreshold", ec.feeConfig.BumpThreshold()) + } + + return ec.startInternal() + }) +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) startInternal() error { + ec.initSync.Lock() + defer ec.initSync.Unlock() + if ec.isStarted { + return errors.New("Confirmer is already started") + } + var err error + ec.enabledAddresses, err = ec.ks.EnabledAddressesForChain(ec.chainID) + if err != nil { + return fmt.Errorf("Confirmer: failed to load EnabledAddressesForChain: %w", err) + } + + ec.ctx, ec.ctxCancel = context.WithCancel(context.Background()) + ec.wg = sync.WaitGroup{} + ec.wg.Add(1) + go ec.runLoop() + ec.isStarted = true + return nil +} + +// Close is a comment to appease the linter +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() error { + return ec.StopOnce("Confirmer", func() error { + return ec.closeInternal() + }) +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) closeInternal() error { + ec.initSync.Lock() + defer ec.initSync.Unlock() + if !ec.isStarted { + return fmt.Errorf("Confirmer is not started: %w", services.ErrAlreadyStopped) + } + ec.ctxCancel() + ec.wg.Wait() + ec.isStarted = false + return nil +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SetResumeCallback(callback ResumeCallback) { + ec.resumeCallback = callback +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Name() string { + return ec.lggr.Name() +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HealthReport() map[string]error { + return map[string]error{ec.Name(): ec.Healthy()} +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() { + defer ec.wg.Done() + for { + select { + case <-ec.mb.Notify(): + for { + if ec.ctx.Err() != nil { + return + } + head, exists := ec.mb.Retrieve() + if !exists { + break + } + if err := ec.ProcessHead(ec.ctx, head); err != nil { + ec.lggr.Errorw("Error processing head", "err", err) + continue + } + } + case <-ec.ctx.Done(): + return + } + } +} + +// ProcessHead takes all required transactions for the confirmer on a new head +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ProcessHead(ctx context.Context, head types.Head[BLOCK_HASH]) error { + ctx, cancel := context.WithTimeout(ctx, processHeadTimeout) + defer cancel() + return ec.processHead(ctx, head) +} + +// NOTE: This SHOULD NOT be run concurrently or it could behave badly +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) processHead(ctx context.Context, head types.Head[BLOCK_HASH]) error { + mark := time.Now() + + ec.lggr.Debugw("processHead start", "headNum", head.BlockNumber(), "id", "confirmer") + + if err := ec.txStore.SetBroadcastBeforeBlockNum(ctx, head.BlockNumber(), ec.chainID); err != nil { + return fmt.Errorf("SetBroadcastBeforeBlockNum failed: %w", err) + } + if err := ec.CheckConfirmedMissingReceipt(ctx); err != nil { + return fmt.Errorf("CheckConfirmedMissingReceipt failed: %w", err) + } + + if err := ec.CheckForReceipts(ctx, head.BlockNumber()); err != nil { + return fmt.Errorf("CheckForReceipts failed: %w", err) + } + + ec.lggr.Debugw("Finished CheckForReceipts", "headNum", head.BlockNumber(), "time", time.Since(mark), "id", "confirmer") + mark = time.Now() + + if err := ec.RebroadcastWhereNecessary(ctx, head.BlockNumber()); err != nil { + return fmt.Errorf("RebroadcastWhereNecessary failed: %w", err) + } + + ec.lggr.Debugw("Finished RebroadcastWhereNecessary", "headNum", head.BlockNumber(), "time", time.Since(mark), "id", "confirmer") + mark = time.Now() + + if err := ec.EnsureConfirmedTransactionsInLongestChain(ctx, head); err != nil { + return fmt.Errorf("EnsureConfirmedTransactionsInLongestChain failed: %w", err) + } + + ec.lggr.Debugw("Finished EnsureConfirmedTransactionsInLongestChain", "headNum", head.BlockNumber(), "time", time.Since(mark), "id", "confirmer") + + if ec.resumeCallback != nil { + mark = time.Now() + if err := ec.ResumePendingTaskRuns(ctx, head); err != nil { + return fmt.Errorf("ResumePendingTaskRuns failed: %w", err) + } + + ec.lggr.Debugw("Finished ResumePendingTaskRuns", "headNum", head.BlockNumber(), "time", time.Since(mark), "id", "confirmer") + } + + ec.lggr.Debugw("processHead finish", "headNum", head.BlockNumber(), "id", "confirmer") + + return nil +} + +// CheckConfirmedMissingReceipt will attempt to re-send any transaction in the +// state of "confirmed_missing_receipt". If we get back any type of senderror +// other than "sequence too low" it means that this transaction isn't actually +// confirmed and needs to be put back into "unconfirmed" state, so that it can enter +// the gas bumping cycle. This is necessary in rare cases (e.g. Polygon) where +// network conditions are extremely hostile. +// +// For example, assume the following scenario: +// +// 0. We are connected to multiple primary nodes via load balancer +// 1. We send a transaction, it is confirmed and, we get a receipt +// 2. A new head comes in from RPC node 1 indicating that this transaction was re-org'd, so we put it back into unconfirmed state +// 3. We re-send that transaction to a RPC node 2 **which hasn't caught up to this re-org yet** +// 4. RPC node 2 still has an old view of the chain, so it returns us "sequence too low" indicating "no problem this transaction is already mined" +// 5. Now the transaction is marked "confirmed_missing_receipt" but the latest chain does not actually include it +// 6. Now we are reliant on the Resender to propagate it, and this transaction will not be gas bumped, so in the event of gas spikes it could languish or even be evicted from the mempool and hold up the queue +// 7. Even if/when RPC node 2 catches up, the transaction is still stuck in state "confirmed_missing_receipt" +// +// This scenario might sound unlikely but has been observed to happen multiple times in the wild on Polygon. +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CheckConfirmedMissingReceipt(ctx context.Context) (err error) { + attempts, err := ec.txStore.FindTxAttemptsConfirmedMissingReceipt(ctx, ec.chainID) + if err != nil { + return err + } + if len(attempts) == 0 { + return nil + } + ec.lggr.Infow(fmt.Sprintf("Found %d transactions confirmed_missing_receipt. The RPC node did not give us a receipt for these transactions even though it should have been mined. This could be due to using the wallet with an external account, or if the primary node is not synced or not propagating transactions properly", len(attempts)), "attempts", attempts) + txCodes, txErrs, broadcastTime, txIDs, err := ec.client.BatchSendTransactions(ctx, attempts, int(ec.chainConfig.RPCDefaultBatchSize()), ec.lggr) + // update broadcast times before checking additional errors + if len(txIDs) > 0 { + if updateErr := ec.txStore.UpdateBroadcastAts(ctx, broadcastTime, txIDs); updateErr != nil { + err = fmt.Errorf("%w: failed to update broadcast time: %w", err, updateErr) + } + } + if err != nil { + ec.lggr.Debugw("Batch sending transactions failed", err) + } + var txIDsToUnconfirm []int64 + for idx, txErr := range txErrs { + // Add to Unconfirm array, all tx where error wasn't TransactionAlreadyKnown. + if txErr != nil { + if txCodes[idx] == client.TransactionAlreadyKnown { + continue + } + } + + txIDsToUnconfirm = append(txIDsToUnconfirm, attempts[idx].TxID) + } + err = ec.txStore.UpdateTxsUnconfirmed(ctx, txIDsToUnconfirm) + + if err != nil { + return err + } + return +} + +// CheckForReceipts finds attempts that are still pending and checks to see if a receipt is present for the given block number +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CheckForReceipts(ctx context.Context, blockNum int64) error { + attempts, err := ec.txStore.FindTxAttemptsRequiringReceiptFetch(ctx, ec.chainID) + if err != nil { + return fmt.Errorf("FindTxAttemptsRequiringReceiptFetch failed: %w", err) + } + if len(attempts) == 0 { + return nil + } + + ec.lggr.Debugw(fmt.Sprintf("Fetching receipts for %v transaction attempts", len(attempts)), "blockNum", blockNum) + + attemptsByAddress := make(map[ADDR][]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + for _, att := range attempts { + attemptsByAddress[att.Tx.FromAddress] = append(attemptsByAddress[att.Tx.FromAddress], att) + } + + for from, attempts := range attemptsByAddress { + minedSequence, err := ec.getMinedSequenceForAddress(ctx, from) + if err != nil { + return fmt.Errorf("unable to fetch pending sequence for address: %v: %w", from, err) + } + + // separateLikelyConfirmedAttempts is used as an optimisation: there is + // no point trying to fetch receipts for attempts with a sequence higher + // than the highest sequence the RPC node thinks it has seen + likelyConfirmed := ec.separateLikelyConfirmedAttempts(from, attempts, minedSequence) + likelyConfirmedCount := len(likelyConfirmed) + if likelyConfirmedCount > 0 { + likelyUnconfirmedCount := len(attempts) - likelyConfirmedCount + + ec.lggr.Debugf("Fetching and saving %v likely confirmed receipts. Skipping checking the others (%v)", + likelyConfirmedCount, likelyUnconfirmedCount) + + start := time.Now() + err = ec.fetchAndSaveReceipts(ctx, likelyConfirmed, blockNum) + if err != nil { + return fmt.Errorf("unable to fetch and save receipts for likely confirmed txs, for address: %v: %w", from, err) + } + ec.lggr.Debugw(fmt.Sprintf("Fetching and saving %v likely confirmed receipts done", likelyConfirmedCount), + "time", time.Since(start)) + } + } + + if err := ec.txStore.MarkAllConfirmedMissingReceipt(ctx, ec.chainID); err != nil { + return fmt.Errorf("unable to mark txes as 'confirmed_missing_receipt': %w", err) + } + + if err := ec.txStore.MarkOldTxesMissingReceiptAsErrored(ctx, blockNum, ec.chainConfig.FinalityDepth(), ec.chainID); err != nil { + return fmt.Errorf("unable to confirm buried unconfirmed txes': %w", err) + } + return nil +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) separateLikelyConfirmedAttempts(from ADDR, attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], minedSequence SEQ) []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { + if len(attempts) == 0 { + return attempts + } + + firstAttemptSequence := *attempts[len(attempts)-1].Tx.Sequence + lastAttemptSequence := *attempts[0].Tx.Sequence + latestMinedSequence := minedSequence.Int64() - 1 // this can be -1 if a transaction has never been mined on this account + ec.lggr.Debugw(fmt.Sprintf("There are %d attempts from address %s, mined transaction count is %d (latest mined sequence is %d) and for the attempts' sequences: first = %d, last = %d", + len(attempts), from, minedSequence.Int64(), latestMinedSequence, firstAttemptSequence.Int64(), lastAttemptSequence.Int64()), "nAttempts", len(attempts), "fromAddress", from, "minedSequence", minedSequence, "latestMinedSequence", latestMinedSequence, "firstAttemptSequence", firstAttemptSequence, "lastAttemptSequence", lastAttemptSequence) + + likelyConfirmed := attempts + // attempts are ordered by sequence ASC + for i := 0; i < len(attempts); i++ { + // If the attempt sequence is lower or equal to the latestBlockSequence + // it must have been confirmed, we just didn't get a receipt yet + // + // Examples: + // 3 transactions confirmed, highest has sequence 2 + // 5 total attempts, highest has sequence 4 + // minedSequence=3 + // likelyConfirmed will be attempts[0:3] which gives the first 3 transactions, as expected + if (*attempts[i].Tx.Sequence).Int64() > minedSequence.Int64() { + ec.lggr.Debugf("Marking attempts as likely confirmed just before index %v, at sequence: %v", i, *attempts[i].Tx.Sequence) + likelyConfirmed = attempts[0:i] + break + } + } + + if len(likelyConfirmed) == 0 { + ec.lggr.Debug("There are no likely confirmed attempts - so will skip checking any") + } + + return likelyConfirmed +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) fetchAndSaveReceipts(ctx context.Context, attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockNum int64) error { + promTxAttemptCount.WithLabelValues(ec.chainID.String()).Set(float64(len(attempts))) + + batchSize := int(ec.chainConfig.RPCDefaultBatchSize()) + if batchSize == 0 { + batchSize = len(attempts) + } + var allReceipts []R + for i := 0; i < len(attempts); i += batchSize { + j := i + batchSize + if j > len(attempts) { + j = len(attempts) + } + + ec.lggr.Debugw(fmt.Sprintf("Batch fetching receipts at indexes %v until (excluded) %v", i, j), "blockNum", blockNum) + + batch := attempts[i:j] + + receipts, err := ec.batchFetchReceipts(ctx, batch, blockNum) + if err != nil { + return fmt.Errorf("batchFetchReceipts failed: %w", err) + } + if err := ec.txStore.SaveFetchedReceipts(ctx, receipts, ec.chainID); err != nil { + return fmt.Errorf("saveFetchedReceipts failed: %w", err) + } + promNumConfirmedTxs.WithLabelValues(ec.chainID.String()).Add(float64(len(receipts))) + + allReceipts = append(allReceipts, receipts...) + } + + observeUntilTxConfirmed(ec.chainID, attempts, allReceipts) + + return nil +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) getMinedSequenceForAddress(ctx context.Context, from ADDR) (SEQ, error) { + return ec.client.SequenceAt(ctx, from, nil) +} + +// Note this function will increment promRevertedTxCount upon receiving +// a reverted transaction receipt. Should only be called with unconfirmed attempts. +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) batchFetchReceipts(ctx context.Context, attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockNum int64) (receipts []R, err error) { + // Metadata is required to determine whether a tx is forwarded or not. + if ec.txConfig.ForwardersEnabled() { + err = ec.txStore.PreloadTxes(ctx, attempts) + if err != nil { + return nil, fmt.Errorf("Confirmer#batchFetchReceipts error loading txs for attempts: %w", err) + } + } + + lggr := ec.lggr.Named("BatchFetchReceipts").With("blockNum", blockNum) + + txReceipts, txErrs, err := ec.client.BatchGetReceipts(ctx, attempts) + if err != nil { + return nil, err + } + + for i := range txReceipts { + attempt := attempts[i] + receipt := txReceipts[i] + err := txErrs[i] + + l := attempt.Tx.GetLogger(lggr).With("txHash", attempt.Hash.String(), "txAttemptID", attempt.ID, + "txID", attempt.TxID, "err", err, "sequence", attempt.Tx.Sequence, + ) + + if err != nil { + l.Error("FetchReceipt failed") + continue + } + + if ec.isReceiptNil(receipt) { + // NOTE: This should never happen, but it seems safer to check + // regardless to avoid a potential panic + l.AssumptionViolation("got nil receipt") + continue + } + + if receipt.IsZero() { + l.Debug("Still waiting for receipt") + continue + } + + l = l.With("blockHash", receipt.GetBlockHash().String(), "status", receipt.GetStatus(), "transactionIndex", receipt.GetTransactionIndex()) + + if receipt.IsUnmined() { + l.Debug("Got receipt for transaction but it's still in the mempool and not included in a block yet") + continue + } + + l.Debugw("Got receipt for transaction", "blockNumber", receipt.GetBlockNumber(), "feeUsed", receipt.GetFeeUsed()) + + if receipt.GetTxHash().String() != attempt.Hash.String() { + l.Errorf("Invariant violation, expected receipt with hash %s to have same hash as attempt with hash %s", receipt.GetTxHash().String(), attempt.Hash.String()) + continue + } + + if receipt.GetBlockNumber() == nil { + l.Error("Invariant violation, receipt was missing block number") + continue + } + + if receipt.GetStatus() == 0 { + rpcError, errExtract := ec.client.CallContract(ctx, attempt, receipt.GetBlockNumber()) + if errExtract == nil { + l.Warnw("transaction reverted on-chain", "hash", receipt.GetTxHash(), "rpcError", rpcError.String()) + } else { + l.Warnw("transaction reverted on-chain unable to extract revert reason", "hash", receipt.GetTxHash(), "err", err) + } + // This might increment more than once e.g. in case of re-orgs going back and forth we might re-fetch the same receipt + promRevertedTxCount.WithLabelValues(ec.chainID.String()).Add(1) + } else { + promNumSuccessfulTxs.WithLabelValues(ec.chainID.String()).Add(1) + } + + // This is only recording forwarded tx that were mined and have a status. + // Counters are prone to being inaccurate due to re-orgs. + if ec.txConfig.ForwardersEnabled() { + meta, metaErr := attempt.Tx.GetMeta() + if metaErr == nil && meta != nil && meta.FwdrDestAddress != nil { + // promFwdTxCount takes two labels, chainId and a boolean of whether a tx was successful or not. + promFwdTxCount.WithLabelValues(ec.chainID.String(), strconv.FormatBool(receipt.GetStatus() != 0)).Add(1) + } + } + + receipts = append(receipts, receipt) + } + + return +} + +// RebroadcastWhereNecessary bumps gas or resends transactions that were previously out-of-funds +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) RebroadcastWhereNecessary(ctx context.Context, blockHeight int64) error { + var wg sync.WaitGroup + + // It is safe to process separate keys concurrently + // NOTE: This design will block one key if another takes a really long time to execute + wg.Add(len(ec.enabledAddresses)) + errors := []error{} + var errMu sync.Mutex + for _, address := range ec.enabledAddresses { + go func(fromAddress ADDR) { + if err := ec.rebroadcastWhereNecessary(ctx, fromAddress, blockHeight); err != nil { + errMu.Lock() + errors = append(errors, err) + errMu.Unlock() + ec.lggr.Errorw("Error in RebroadcastWhereNecessary", "err", err, "fromAddress", fromAddress) + } + + wg.Done() + }(address) + } + + wg.Wait() + + return multierr.Combine(errors...) +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) rebroadcastWhereNecessary(ctx context.Context, address ADDR, blockHeight int64) error { + if err := ec.handleAnyInProgressAttempts(ctx, address, blockHeight); err != nil { + return fmt.Errorf("handleAnyInProgressAttempts failed: %w", err) + } + + threshold := int64(ec.feeConfig.BumpThreshold()) + bumpDepth := int64(ec.feeConfig.BumpTxDepth()) + maxInFlightTransactions := ec.txConfig.MaxInFlight() + etxs, err := ec.FindTxsRequiringRebroadcast(ctx, ec.lggr, address, blockHeight, threshold, bumpDepth, maxInFlightTransactions, ec.chainID) + if err != nil { + return fmt.Errorf("FindTxsRequiringRebroadcast failed: %w", err) + } + for _, etx := range etxs { + lggr := etx.GetLogger(ec.lggr) + + attempt, err := ec.attemptForRebroadcast(ctx, lggr, *etx) + if err != nil { + return fmt.Errorf("attemptForRebroadcast failed: %w", err) + } + + lggr.Debugw("Rebroadcasting transaction", "nPreviousAttempts", len(etx.TxAttempts), "fee", attempt.TxFee) + + if err := ec.txStore.SaveInProgressAttempt(ctx, &attempt); err != nil { + return fmt.Errorf("saveInProgressAttempt failed: %w", err) + } + + if err := ec.handleInProgressAttempt(ctx, lggr, *etx, attempt, blockHeight); err != nil { + return fmt.Errorf("handleInProgressAttempt failed: %w", err) + } + } + return nil +} + +// "in_progress" attempts were left behind after a crash/restart and may or may not have been sent. +// We should try to ensure they get on-chain so we can fetch a receipt for them. +// NOTE: We also use this to mark attempts for rebroadcast in event of a +// re-org, so multiple attempts are allowed to be in in_progress state (but +// only one per tx). +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleAnyInProgressAttempts(ctx context.Context, address ADDR, blockHeight int64) error { + attempts, err := ec.txStore.GetInProgressTxAttempts(ctx, address, ec.chainID) + if ctx.Err() != nil { + return nil + } else if err != nil { + return fmt.Errorf("GetInProgressTxAttempts failed: %w", err) + } + for _, a := range attempts { + err := ec.handleInProgressAttempt(ctx, a.Tx.GetLogger(ec.lggr), a.Tx, a, blockHeight) + if ctx.Err() != nil { + break + } else if err != nil { + return fmt.Errorf("handleInProgressAttempt failed: %w", err) + } + } + return nil +} + +// FindTxsRequiringRebroadcast returns attempts that hit insufficient native tokens, +// and attempts that need bumping, in sequence ASC order +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxsRequiringRebroadcast(ctx context.Context, lggr logger.Logger, address ADDR, blockNum, gasBumpThreshold, bumpDepth int64, maxInFlightTransactions uint32, chainID CHAIN_ID) (etxs []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + // NOTE: These two queries could be combined into one using union but it + // becomes harder to read and difficult to test in isolation. KISS principle + etxInsufficientFunds, err := ec.txStore.FindTxsRequiringResubmissionDueToInsufficientFunds(ctx, address, chainID) + if err != nil { + return nil, err + } + + if len(etxInsufficientFunds) > 0 { + lggr.Infow(fmt.Sprintf("Found %d transactions to be re-sent that were previously rejected due to insufficient native token balance", len(etxInsufficientFunds)), "blockNum", blockNum, "address", address) + } + + // TODO: Just pass the Q through everything + etxBumps, err := ec.txStore.FindTxsRequiringGasBump(ctx, address, blockNum, gasBumpThreshold, bumpDepth, chainID) + if ctx.Err() != nil { + return nil, nil + } else if err != nil { + return nil, err + } + + if len(etxBumps) > 0 { + // txes are ordered by sequence asc so the first will always be the oldest + etx := etxBumps[0] + // attempts are ordered by time sent asc so first will always be the oldest + var oldestBlocksBehind int64 = -1 // It should never happen that the oldest attempt has no BroadcastBeforeBlockNum set, but in case it does, we shouldn't crash - log this sentinel value instead + if len(etx.TxAttempts) > 0 { + oldestBlockNum := etx.TxAttempts[0].BroadcastBeforeBlockNum + if oldestBlockNum != nil { + oldestBlocksBehind = blockNum - *oldestBlockNum + } + } else { + logger.Sugared(lggr).AssumptionViolationf("Expected tx for gas bump to have at least one attempt", "etxID", etx.ID, "blockNum", blockNum, "address", address) + } + lggr.Infow(fmt.Sprintf("Found %d transactions to re-sent that have still not been confirmed after at least %d blocks. The oldest of these has not still not been confirmed after %d blocks. These transactions will have their gas price bumped. %s", len(etxBumps), gasBumpThreshold, oldestBlocksBehind, label.NodeConnectivityProblemWarning), "blockNum", blockNum, "address", address, "gasBumpThreshold", gasBumpThreshold) + } + + seen := make(map[int64]struct{}) + + for _, etx := range etxInsufficientFunds { + seen[etx.ID] = struct{}{} + etxs = append(etxs, etx) + } + for _, etx := range etxBumps { + if _, exists := seen[etx.ID]; !exists { + etxs = append(etxs, etx) + } + } + + sort.Slice(etxs, func(i, j int) bool { + return (*etxs[i].Sequence).Int64() < (*etxs[j].Sequence).Int64() + }) + + if maxInFlightTransactions > 0 && len(etxs) > int(maxInFlightTransactions) { + lggr.Warnf("%d transactions to rebroadcast which exceeds limit of %d. %s", len(etxs), maxInFlightTransactions, label.MaxInFlightTransactionsWarning) + etxs = etxs[:maxInFlightTransactions] + } + + return +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) attemptForRebroadcast(ctx context.Context, lggr logger.Logger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) (attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + if len(etx.TxAttempts) > 0 { + etx.TxAttempts[0].Tx = etx + previousAttempt := etx.TxAttempts[0] + logFields := ec.logFieldsPreviousAttempt(previousAttempt) + if previousAttempt.State == txmgrtypes.TxAttemptInsufficientFunds { + // Do not create a new attempt if we ran out of funds last time since bumping gas is pointless + // Instead try to resubmit the same attempt at the same price, in the hope that the wallet was funded since our last attempt + lggr.Debugw("Rebroadcast InsufficientFunds", logFields...) + previousAttempt.State = txmgrtypes.TxAttemptInProgress + return previousAttempt, nil + } + attempt, err = ec.bumpGas(ctx, etx, etx.TxAttempts) + + if commonfee.IsBumpErr(err) { + lggr.Errorw("Failed to bump gas", append(logFields, "err", err)...) + // Do not create a new attempt if bumping gas would put us over the limit or cause some other problem + // Instead try to resubmit the previous attempt, and keep resubmitting until its accepted + previousAttempt.BroadcastBeforeBlockNum = nil + previousAttempt.State = txmgrtypes.TxAttemptInProgress + return previousAttempt, nil + } + return attempt, err + } + return attempt, fmt.Errorf("invariant violation: Tx %v was unconfirmed but didn't have any attempts. "+ + "Falling back to default gas price instead."+ + "This is a bug! Please report to https://github.com/goplugin/pluginv3.0/issues", etx.ID) +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) logFieldsPreviousAttempt(attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) []interface{} { + etx := attempt.Tx + return []interface{}{ + "etxID", etx.ID, + "txHash", attempt.Hash, + "previousAttempt", attempt, + "feeLimit", etx.FeeLimit, + "maxGasPrice", ec.feeConfig.MaxFeePrice(), + "sequence", etx.Sequence, + } +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bumpGas(ctx context.Context, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], previousAttempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) (bumpedAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + previousAttempt := previousAttempts[0] + logFields := ec.logFieldsPreviousAttempt(previousAttempt) + + var bumpedFee FEE + var bumpedFeeLimit uint32 + bumpedAttempt, bumpedFee, bumpedFeeLimit, _, err = ec.NewBumpTxAttempt(ctx, etx, previousAttempt, previousAttempts, ec.lggr) + + // if no error, return attempt + // if err, continue below + if err == nil { + promNumGasBumps.WithLabelValues(ec.chainID.String()).Inc() + ec.lggr.Debugw("Rebroadcast bumping fee for tx", append(logFields, "bumpedFee", bumpedFee.String(), "bumpedFeeLimit", bumpedFeeLimit)...) + return bumpedAttempt, err + } + + if errors.Is(err, commonfee.ErrBumpFeeExceedsLimit) { + promGasBumpExceedsLimit.WithLabelValues(ec.chainID.String()).Inc() + } + + return bumpedAttempt, fmt.Errorf("error bumping gas: %w", err) +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleInProgressAttempt(ctx context.Context, lggr logger.SugaredLogger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockHeight int64) error { + if attempt.State != txmgrtypes.TxAttemptInProgress { + + return fmt.Errorf("invariant violation: expected tx_attempt %v to be in_progress, it was %s", attempt.ID, attempt.State) + } + + now := time.Now() + lggr.Debugw("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "meta", etx.Meta, "feeLimit", etx.FeeLimit, "attempt", attempt, "etx", etx) + errType, sendError := ec.client.SendTransactionReturnCode(ctx, etx, attempt, lggr) + + switch errType { + case client.Underpriced: + // This should really not ever happen in normal operation since we + // already bumped above the required minimum in broadcaster. + ec.lggr.Warnw("Got terminally underpriced error for gas bump, this should never happen unless the remote RPC node changed its configuration on the fly, or you are using multiple RPC nodes with different minimum gas price requirements. This is not recommended", "attempt", attempt) + // "Lazily" load attempts here since the overwhelmingly common case is + // that we don't need them unless we enter this path + if err := ec.txStore.LoadTxAttempts(ctx, &etx); err != nil { + return fmt.Errorf("failed to load TxAttempts while bumping on terminally underpriced error: %w", err) + } + if len(etx.TxAttempts) == 0 { + err := errors.New("expected to find at least 1 attempt") + ec.lggr.AssumptionViolationw(err.Error(), "err", err, "attempt", attempt) + return err + } + if attempt.ID != etx.TxAttempts[0].ID { + err := errors.New("expected highest priced attempt to be the current in_progress attempt") + ec.lggr.AssumptionViolationw(err.Error(), "err", err, "attempt", attempt, "txAttempts", etx.TxAttempts) + return err + } + replacementAttempt, err := ec.bumpGas(ctx, etx, etx.TxAttempts) + if err != nil { + return fmt.Errorf("could not bump gas for terminally underpriced transaction: %w", err) + } + promNumGasBumps.WithLabelValues(ec.chainID.String()).Inc() + lggr.With( + "sendError", sendError, + "maxGasPriceConfig", ec.feeConfig.MaxFeePrice(), + "previousAttempt", attempt, + "replacementAttempt", replacementAttempt, + ).Errorf("gas price was rejected by the node for being too low. Node returned: '%s'", sendError.Error()) + + if err := ec.txStore.SaveReplacementInProgressAttempt(ctx, attempt, &replacementAttempt); err != nil { + return fmt.Errorf("saveReplacementInProgressAttempt failed: %w", err) + } + return ec.handleInProgressAttempt(ctx, lggr, etx, replacementAttempt, blockHeight) + case client.ExceedsMaxFee: + // Confirmer: The gas price was bumped too high. This transaction attempt cannot be accepted. + // Best thing we can do is to re-send the previous attempt at the old + // price and discard this bumped version. + fallthrough + case client.Fatal: + // WARNING: This should never happen! + // Should NEVER be fatal this is an invariant violation. The + // Broadcaster can never create a TxAttempt that will + // fatally error. + lggr.Criticalw("Invariant violation: fatal error while re-attempting transaction", + "fee", attempt.TxFee, + "feeLimit", etx.FeeLimit, + "signedRawTx", commonhex.EnsurePrefix(hex.EncodeToString(attempt.SignedRawTx)), + "blockHeight", blockHeight, + ) + ec.SvcErrBuffer.Append(sendError) + // This will loop continuously on every new head so it must be handled manually by the node operator! + return ec.txStore.DeleteInProgressAttempt(ctx, attempt) + case client.TransactionAlreadyKnown: + // Sequence too low indicated that a transaction at this sequence was confirmed already. + // Mark confirmed_missing_receipt and wait for the next cycle to try to get a receipt + lggr.Debugw("Sequence already used", "txAttemptID", attempt.ID, "txHash", attempt.Hash.String()) + timeout := ec.dbConfig.DefaultQueryTimeout() + return ec.txStore.SaveConfirmedMissingReceiptAttempt(ctx, timeout, &attempt, now) + case client.InsufficientFunds: + timeout := ec.dbConfig.DefaultQueryTimeout() + return ec.txStore.SaveInsufficientFundsAttempt(ctx, timeout, &attempt, now) + case client.Successful: + lggr.Debugw("Successfully broadcast transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash.String()) + timeout := ec.dbConfig.DefaultQueryTimeout() + return ec.txStore.SaveSentAttempt(ctx, timeout, &attempt, now) + case client.Unknown: + // Every error that doesn't fall under one of the above categories will be treated as Unknown. + fallthrough + default: + // Any other type of error is considered temporary or resolvable by the + // node operator. The node may have it in the mempool so we must keep the + // attempt (leave it in_progress). Safest thing to do is bail out and wait + // for the next head. + return fmt.Errorf("unexpected error sending tx %v with hash %s: %w", etx.ID, attempt.Hash.String(), sendError) + } +} + +// EnsureConfirmedTransactionsInLongestChain finds all confirmed txes up to the depth +// of the given chain and ensures that every one has a receipt with a block hash that is +// in the given chain. +// +// If any of the confirmed transactions does not have a receipt in the chain, it has been +// re-org'd out and will be rebroadcast. +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) EnsureConfirmedTransactionsInLongestChain(ctx context.Context, head types.Head[BLOCK_HASH]) error { + if head.ChainLength() < ec.chainConfig.FinalityDepth() { + logArgs := []interface{}{ + "chainLength", head.ChainLength(), "finalityDepth", ec.chainConfig.FinalityDepth(), + } + if ec.nConsecutiveBlocksChainTooShort > logAfterNConsecutiveBlocksChainTooShort { + warnMsg := "Chain length supplied for re-org detection was shorter than FinalityDepth. Re-org protection is not working properly. This could indicate a problem with the remote RPC endpoint, a compatibility issue with a particular blockchain, a bug with this particular blockchain, heads table being truncated too early, remote node out of sync, or something else. If this happens a lot please raise a bug with the Plugin team including a log output sample and details of the chain and RPC endpoint you are using." + ec.lggr.Warnw(warnMsg, append(logArgs, "nConsecutiveBlocksChainTooShort", ec.nConsecutiveBlocksChainTooShort)...) + } else { + logMsg := "Chain length supplied for re-org detection was shorter than FinalityDepth" + ec.lggr.Debugw(logMsg, append(logArgs, "nConsecutiveBlocksChainTooShort", ec.nConsecutiveBlocksChainTooShort)...) + } + ec.nConsecutiveBlocksChainTooShort++ + } else { + ec.nConsecutiveBlocksChainTooShort = 0 + } + etxs, err := ec.txStore.FindTransactionsConfirmedInBlockRange(ctx, head.BlockNumber(), head.EarliestHeadInChain().BlockNumber(), ec.chainID) + if err != nil { + return fmt.Errorf("findTransactionsConfirmedInBlockRange failed: %w", err) + } + + for _, etx := range etxs { + if !hasReceiptInLongestChain(*etx, head) { + if err := ec.markForRebroadcast(*etx, head); err != nil { + return fmt.Errorf("markForRebroadcast failed for etx %v: %w", etx.ID, err) + } + } + } + + // It is safe to process separate keys concurrently + // NOTE: This design will block one key if another takes a really long time to execute + var wg sync.WaitGroup + errors := []error{} + var errMu sync.Mutex + wg.Add(len(ec.enabledAddresses)) + for _, address := range ec.enabledAddresses { + go func(fromAddress ADDR) { + if err := ec.handleAnyInProgressAttempts(ctx, fromAddress, head.BlockNumber()); err != nil { + errMu.Lock() + errors = append(errors, err) + errMu.Unlock() + ec.lggr.Errorw("Error in handleAnyInProgressAttempts", "err", err, "fromAddress", fromAddress) + } + + wg.Done() + }(address) + } + + wg.Wait() + + return multierr.Combine(errors...) +} + +func hasReceiptInLongestChain[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +](etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], head types.Head[BLOCK_HASH]) bool { + for { + for _, attempt := range etx.TxAttempts { + for _, receipt := range attempt.Receipts { + if receipt.GetBlockHash().String() == head.BlockHash().String() && receipt.GetBlockNumber().Int64() == head.BlockNumber() { + return true + } + } + } + if head.GetParent() == nil { + return false + } + head = head.GetParent() + } +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) markForRebroadcast(etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], head types.Head[BLOCK_HASH]) error { + if len(etx.TxAttempts) == 0 { + return fmt.Errorf("invariant violation: expected tx %v to have at least one attempt", etx.ID) + } + + // Rebroadcast the one with the highest gas price + attempt := etx.TxAttempts[0] + var receipt txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH] + if len(attempt.Receipts) > 0 { + receipt = attempt.Receipts[0] + } + + logValues := []interface{}{ + "txhash", attempt.Hash.String(), + "currentBlockNum", head.BlockNumber(), + "currentBlockHash", head.BlockHash().String(), + "txID", etx.ID, + "attemptID", attempt.ID, + "nReceipts", len(attempt.Receipts), + "id", "confirmer", + } + + // nil check on receipt interface + if receipt != nil { + logValues = append(logValues, + "replacementBlockHashAtConfirmedHeight", head.HashAtHeight(receipt.GetBlockNumber().Int64()), + "confirmedInBlockNum", receipt.GetBlockNumber(), + "confirmedInBlockHash", receipt.GetBlockHash(), + "confirmedInTxIndex", receipt.GetTransactionIndex(), + ) + } + + ec.lggr.Infow(fmt.Sprintf("Re-org detected. Rebroadcasting transaction %s which may have been re-org'd out of the main chain", attempt.Hash.String()), logValues...) + + // Put it back in progress and delete all receipts (they do not apply to the new chain) + if err := ec.txStore.UpdateTxForRebroadcast(ec.ctx, etx, attempt); err != nil { + return fmt.Errorf("markForRebroadcast failed: %w", err) + } + + return nil +} + +// ForceRebroadcast sends a transaction for every sequence in the given sequence range at the given gas price. +// If an tx exists for this sequence, we re-send the existing tx with the supplied parameters. +// If an tx doesn't exist for this sequence, we send a zero transaction. +// This operates completely orthogonal to the normal Confirmer and can result in untracked attempts! +// Only for emergency usage. +// This is in case of some unforeseen scenario where the node is refusing to release the lock. KISS. +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ForceRebroadcast(ctx context.Context, seqs []SEQ, fee FEE, address ADDR, overrideGasLimit uint32) error { + if len(seqs) == 0 { + ec.lggr.Infof("ForceRebroadcast: No sequences provided. Skipping") + return nil + } + ec.lggr.Infof("ForceRebroadcast: will rebroadcast transactions for all sequences between %v and %v", seqs[0], seqs[len(seqs)-1]) + + for _, seq := range seqs { + + etx, err := ec.txStore.FindTxWithSequence(ctx, address, seq) + if err != nil { + return fmt.Errorf("ForceRebroadcast failed: %w", err) + } + if etx == nil { + ec.lggr.Debugf("ForceRebroadcast: no tx found with sequence %s, will rebroadcast empty transaction", seq) + hashStr, err := ec.sendEmptyTransaction(ctx, address, seq, overrideGasLimit, fee) + if err != nil { + ec.lggr.Errorw("ForceRebroadcast: failed to send empty transaction", "sequence", seq, "err", err) + continue + } + ec.lggr.Infow("ForceRebroadcast: successfully rebroadcast empty transaction", "sequence", seq, "hash", hashStr) + } else { + ec.lggr.Debugf("ForceRebroadcast: got tx %v with sequence %v, will rebroadcast this transaction", etx.ID, *etx.Sequence) + if overrideGasLimit != 0 { + etx.FeeLimit = overrideGasLimit + } + attempt, _, err := ec.NewCustomTxAttempt(*etx, fee, etx.FeeLimit, 0x0, ec.lggr) + if err != nil { + ec.lggr.Errorw("ForceRebroadcast: failed to create new attempt", "txID", etx.ID, "err", err) + continue + } + attempt.Tx = *etx // for logging + ec.lggr.Debugw("Sending transaction", "txAttemptID", attempt.ID, "txHash", attempt.Hash, "err", err, "meta", etx.Meta, "feeLimit", etx.FeeLimit, "attempt", attempt) + if errCode, err := ec.client.SendTransactionReturnCode(ctx, *etx, attempt, ec.lggr); errCode != client.Successful && err != nil { + ec.lggr.Errorw(fmt.Sprintf("ForceRebroadcast: failed to rebroadcast tx %v with sequence %v and gas limit %v: %s", etx.ID, *etx.Sequence, etx.FeeLimit, err.Error()), "err", err, "fee", attempt.TxFee) + continue + } + ec.lggr.Infof("ForceRebroadcast: successfully rebroadcast tx %v with hash: 0x%x", etx.ID, attempt.Hash) + } + } + return nil +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) sendEmptyTransaction(ctx context.Context, fromAddress ADDR, seq SEQ, overrideGasLimit uint32, fee FEE) (string, error) { + gasLimit := overrideGasLimit + if gasLimit == 0 { + gasLimit = ec.feeConfig.LimitDefault() + } + txhash, err := ec.client.SendEmptyTransaction(ctx, ec.TxAttemptBuilder.NewEmptyTxAttempt, seq, gasLimit, fee, fromAddress) + if err != nil { + return "", fmt.Errorf("(Confirmer).sendEmptyTransaction failed: %w", err) + } + return txhash, nil +} + +// ResumePendingTaskRuns issues callbacks to task runs that are pending waiting for receipts +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ResumePendingTaskRuns(ctx context.Context, head types.Head[BLOCK_HASH]) error { + + receiptsPlus, err := ec.txStore.FindTxesPendingCallback(ctx, head.BlockNumber(), ec.chainID) + + if err != nil { + return err + } + + if len(receiptsPlus) > 0 { + ec.lggr.Debugf("Resuming %d task runs pending receipt", len(receiptsPlus)) + } else { + ec.lggr.Debug("No task runs to resume") + } + for _, data := range receiptsPlus { + var taskErr error + var output interface{} + if data.FailOnRevert && data.Receipt.GetStatus() == 0 { + taskErr = fmt.Errorf("transaction %s reverted on-chain", data.Receipt.GetTxHash()) + } else { + output = data.Receipt + } + + ec.lggr.Debugw("Callback: resuming tx with receipt", "output", output, "taskErr", taskErr, "pipelineTaskRunID", data.ID) + if err := ec.resumeCallback(data.ID, output, taskErr); err != nil { + return fmt.Errorf("failed to resume suspended pipeline run: %w", err) + } + // Mark tx as having completed callback + if err := ec.txStore.UpdateTxCallbackCompleted(ctx, data.ID, ec.chainID); err != nil { + return err + } + } + + return nil +} + +// observeUntilTxConfirmed observes the promBlocksUntilTxConfirmed metric for each confirmed +// transaction. +func observeUntilTxConfirmed[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +](chainID CHAIN_ID, attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], receipts []R) { + for _, attempt := range attempts { + for _, r := range receipts { + if attempt.Hash.String() != r.GetTxHash().String() { + continue + } + + // We estimate the time until confirmation by subtracting from the time the tx (not the attempt) + // was created. We want to measure the amount of time taken from when a transaction is created + // via e.g Txm.CreateTransaction to when it is confirmed on-chain, regardless of how many attempts + // were needed to achieve this. + duration := time.Since(attempt.Tx.CreatedAt) + promTimeUntilTxConfirmed. + WithLabelValues(chainID.String()). + Observe(float64(duration)) + + // Since a tx can have many attempts, we take the number of blocks to confirm as the block number + // of the receipt minus the block number of the first ever broadcast for this transaction. + broadcastBefore := iutils.MinFunc(attempt.Tx.TxAttempts, func(attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) int64 { + if attempt.BroadcastBeforeBlockNum != nil { + return *attempt.BroadcastBeforeBlockNum + } + return 0 + }) + if broadcastBefore > 0 { + blocksElapsed := r.GetBlockNumber().Int64() - broadcastBefore + promBlocksUntilTxConfirmed. + WithLabelValues(chainID.String()). + Observe(float64(blocksElapsed)) + } + } + } +} diff --git a/common/txmgr/mocks/tx_manager.go b/common/txmgr/mocks/tx_manager.go new file mode 100644 index 00000000..cc206fe6 --- /dev/null +++ b/common/txmgr/mocks/tx_manager.go @@ -0,0 +1,451 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + mock "github.com/stretchr/testify/mock" + + null "gopkg.in/guregu/null.v4" + + txmgr "github.com/goplugin/pluginv3.0/v2/common/txmgr" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxManager is an autogenerated mock type for the TxManager type +type TxManager[CHAIN_ID types.ID, HEAD types.Head[BLOCK_HASH], ADDR types.Hashable, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, SEQ types.Sequence, FEE feetypes.Fee] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CountTransactionsByState provides a mock function with given fields: ctx, state +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState) (uint32, error) { + ret := _m.Called(ctx, state) + + if len(ret) == 0 { + panic("no return value specified for CountTransactionsByState") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxState) (uint32, error)); ok { + return rf(ctx, state) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxState) uint32); ok { + r0 = rf(ctx, state) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.TxState) error); ok { + r1 = rf(ctx, state) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateTransaction provides a mock function with given fields: ctx, txRequest +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) CreateTransaction(ctx context.Context, txRequest txmgrtypes.TxRequest[ADDR, TX_HASH]) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, txRequest) + + if len(ret) == 0 { + panic("no return value specified for CreateTransaction") + } + + var r0 txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH]) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, txRequest) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH]) txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, txRequest) + } else { + r0 = ret.Get(0).(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH]) error); ok { + r1 = rf(ctx, txRequest) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindEarliestUnconfirmedBroadcastTime provides a mock function with given fields: ctx +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindEarliestUnconfirmedBroadcastTime(ctx context.Context) (null.Time, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedBroadcastTime") + } + + var r0 null.Time + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (null.Time, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) null.Time); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(null.Time) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindEarliestUnconfirmedTxAttemptBlock provides a mock function with given fields: ctx +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context) (null.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedTxAttemptBlock") + } + + var r0 null.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (null.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) null.Int); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(null.Int) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, metaValue, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesByMetaFieldAndStates") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, metaValue, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, metaValue, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, metaValue, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, ids, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithAttemptsAndReceiptsByIdsAndState") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, ids, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, ids, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, ids, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByReceiptBlockNum") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, blockNum, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, blockNum, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok { + r1 = rf(ctx, metaField, blockNum, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByStates") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetForwarderForEOA provides a mock function with given fields: eoa +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetForwarderForEOA(eoa ADDR) (ADDR, error) { + ret := _m.Called(eoa) + + if len(ret) == 0 { + panic("no return value specified for GetForwarderForEOA") + } + + var r0 ADDR + var r1 error + if rf, ok := ret.Get(0).(func(ADDR) (ADDR, error)); ok { + return rf(eoa) + } + if rf, ok := ret.Get(0).(func(ADDR) ADDR); ok { + r0 = rf(eoa) + } else { + r0 = ret.Get(0).(ADDR) + } + + if rf, ok := ret.Get(1).(func(ADDR) error); ok { + r1 = rf(eoa) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HealthReport provides a mock function with given fields: +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) OnNewLongestChain(ctx context.Context, head HEAD) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RegisterResumeCallback provides a mock function with given fields: fn +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) RegisterResumeCallback(fn txmgr.ResumeCallback) { + _m.Called(fn) +} + +// Reset provides a mock function with given fields: addr, abandon +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Reset(addr ADDR, abandon bool) error { + ret := _m.Called(addr, abandon) + + if len(ret) == 0 { + panic("no return value specified for Reset") + } + + var r0 error + if rf, ok := ret.Get(0).(func(ADDR, bool) error); ok { + r0 = rf(addr, abandon) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendNativeToken provides a mock function with given fields: ctx, chainID, from, to, value, gasLimit +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SendNativeToken(ctx context.Context, chainID CHAIN_ID, from ADDR, to ADDR, value big.Int, gasLimit uint32) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, chainID, from, to, value, gasLimit) + + if len(ret) == 0 { + panic("no return value specified for SendNativeToken") + } + + var r0 txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID, ADDR, ADDR, big.Int, uint32) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, chainID, from, to, value, gasLimit) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID, ADDR, ADDR, big.Int, uint32) txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, chainID, from, to, value, gasLimit) + } else { + r0 = ret.Get(0).(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID, ADDR, ADDR, big.Int, uint32) error); ok { + r1 = rf(ctx, chainID, from, to, value, gasLimit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: _a0 +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Trigger provides a mock function with given fields: addr +func (_m *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Trigger(addr ADDR) { + _m.Called(addr) +} + +// NewTxManager creates a new instance of TxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxManager[CHAIN_ID types.ID, HEAD types.Head[BLOCK_HASH], ADDR types.Hashable, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, SEQ types.Sequence, FEE feetypes.Fee](t interface { + mock.TestingT + Cleanup(func()) +}) *TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { + mock := &TxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/models.go b/common/txmgr/models.go new file mode 100644 index 00000000..8854361b --- /dev/null +++ b/common/txmgr/models.go @@ -0,0 +1,14 @@ +package txmgr + +import ( + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" +) + +const ( + TxUnstarted = txmgrtypes.TxState("unstarted") + TxInProgress = txmgrtypes.TxState("in_progress") + TxFatalError = txmgrtypes.TxState("fatal_error") + TxUnconfirmed = txmgrtypes.TxState("unconfirmed") + TxConfirmed = txmgrtypes.TxState("confirmed") + TxConfirmedMissingReceipt = txmgrtypes.TxState("confirmed_missing_receipt") +) diff --git a/common/txmgr/reaper.go b/common/txmgr/reaper.go new file mode 100644 index 00000000..c7ce798a --- /dev/null +++ b/common/txmgr/reaper.go @@ -0,0 +1,122 @@ +package txmgr + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// Reaper handles periodic database cleanup for Txm +type Reaper[CHAIN_ID types.ID] struct { + store txmgrtypes.TxHistoryReaper[CHAIN_ID] + config txmgrtypes.ReaperChainConfig + txConfig txmgrtypes.ReaperTransactionsConfig + chainID CHAIN_ID + log logger.Logger + latestBlockNum atomic.Int64 + trigger chan struct{} + chStop services.StopChan + chDone chan struct{} +} + +// NewReaper instantiates a new reaper object +func NewReaper[CHAIN_ID types.ID](lggr logger.Logger, store txmgrtypes.TxHistoryReaper[CHAIN_ID], config txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig, chainID CHAIN_ID) *Reaper[CHAIN_ID] { + r := &Reaper[CHAIN_ID]{ + store, + config, + txConfig, + chainID, + logger.Named(lggr, "Reaper"), + atomic.Int64{}, + make(chan struct{}, 1), + make(services.StopChan), + make(chan struct{}), + } + r.latestBlockNum.Store(-1) + return r +} + +// Start the reaper. Should only be called once. +func (r *Reaper[CHAIN_ID]) Start() { + r.log.Debugf("started with age threshold %v and interval %v", r.txConfig.ReaperThreshold(), r.txConfig.ReaperInterval()) + go r.runLoop() +} + +// Stop the reaper. Should only be called once. +func (r *Reaper[CHAIN_ID]) Stop() { + r.log.Debug("stopping") + close(r.chStop) + <-r.chDone +} + +func (r *Reaper[CHAIN_ID]) runLoop() { + defer close(r.chDone) + ticker := time.NewTicker(utils.WithJitter(r.txConfig.ReaperInterval())) + defer ticker.Stop() + for { + select { + case <-r.chStop: + return + case <-ticker.C: + r.work() + ticker.Reset(utils.WithJitter(r.txConfig.ReaperInterval())) + case <-r.trigger: + r.work() + ticker.Reset(utils.WithJitter(r.txConfig.ReaperInterval())) + } + } +} + +func (r *Reaper[CHAIN_ID]) work() { + latestBlockNum := r.latestBlockNum.Load() + if latestBlockNum < 0 { + return + } + err := r.ReapTxes(latestBlockNum) + if err != nil { + r.log.Error("unable to reap old txes: ", err) + } +} + +// SetLatestBlockNum should be called on every new highest block number +func (r *Reaper[CHAIN_ID]) SetLatestBlockNum(latestBlockNum int64) { + if latestBlockNum < 0 { + panic(fmt.Sprintf("latestBlockNum must be 0 or greater, got: %d", latestBlockNum)) + } + was := r.latestBlockNum.Swap(latestBlockNum) + if was < 0 { + // Run reaper once on startup + r.trigger <- struct{}{} + } +} + +// ReapTxes deletes old txes +func (r *Reaper[CHAIN_ID]) ReapTxes(headNum int64) error { + ctx, cancel := r.chStop.NewCtx() + defer cancel() + threshold := r.txConfig.ReaperThreshold() + if threshold == 0 { + r.log.Debug("Transactions.ReaperThreshold set to 0; skipping ReapTxes") + return nil + } + minBlockNumberToKeep := headNum - int64(r.config.FinalityDepth()) + mark := time.Now() + timeThreshold := mark.Add(-threshold) + + r.log.Debugw(fmt.Sprintf("reaping old txes created before %s", timeThreshold.Format(time.RFC3339)), "ageThreshold", threshold, "timeThreshold", timeThreshold, "minBlockNumberToKeep", minBlockNumberToKeep) + + if err := r.store.ReapTxHistory(ctx, minBlockNumberToKeep, timeThreshold, r.chainID); err != nil { + return err + } + + r.log.Debugf("ReapTxes completed in %v", time.Since(mark)) + + return nil +} diff --git a/common/txmgr/resender.go b/common/txmgr/resender.go new file mode 100644 index 00000000..a346cef5 --- /dev/null +++ b/common/txmgr/resender.go @@ -0,0 +1,235 @@ +package txmgr + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/goplugin/plugin-common/pkg/chains/label" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/common/client" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const ( + // pollInterval is the maximum amount of time in addition to + // TxResendAfterThreshold that we will wait before resending an attempt + DefaultResenderPollInterval = 5 * time.Second + + // Alert interval for unconfirmed transaction attempts + unconfirmedTxAlertLogFrequency = 2 * time.Minute + + // timeout value for batchSendTransactions + batchSendTransactionTimeout = 30 * time.Second +) + +// Resender periodically picks up transactions that have been languishing +// unconfirmed for a configured amount of time without being sent, and sends +// their highest priced attempt again. This helps to defend against geth/parity +// silently dropping txes, or txes being ejected from the mempool. +// +// Previously we relied on the bumper to do this for us implicitly but there +// can occasionally be problems with this (e.g. abnormally long block times, or +// if gas bumping is disabled) +type Resender[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + txStore txmgrtypes.TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE] + client txmgrtypes.TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + ks txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ] + chainID CHAIN_ID + interval time.Duration + config txmgrtypes.ResenderChainConfig + txConfig txmgrtypes.ResenderTransactionsConfig + logger logger.SugaredLogger + lastAlertTimestamps map[string]time.Time + + ctx context.Context + cancel context.CancelFunc + chDone chan struct{} +} + +func NewResender[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +]( + lggr logger.Logger, + txStore txmgrtypes.TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE], + client txmgrtypes.TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + ks txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ], + pollInterval time.Duration, + config txmgrtypes.ResenderChainConfig, + txConfig txmgrtypes.ResenderTransactionsConfig, +) *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { + if txConfig.ResendAfterThreshold() == 0 { + panic("Resender requires a non-zero threshold") + } + // todo: add context to txStore https://smartcontract-it.atlassian.net/browse/BCI-1585 + ctx, cancel := context.WithCancel(context.Background()) + return &Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ + txStore, + client, + tracker, + ks, + client.ConfiguredChainID(), + pollInterval, + config, + txConfig, + logger.Sugared(logger.Named(lggr, "Resender")), + make(map[string]time.Time), + ctx, + cancel, + make(chan struct{}), + } +} + +// Start is a comment which satisfies the linter +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start() { + er.logger.Debugf("Enabled with poll interval of %s and age threshold of %s", er.interval, er.txConfig.ResendAfterThreshold()) + go er.runLoop() +} + +// Stop is a comment which satisfies the linter +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Stop() { + er.cancel() + <-er.chDone +} + +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() { + defer close(er.chDone) + + if err := er.resendUnconfirmed(); err != nil { + er.logger.Warnw("Failed to resend unconfirmed transactions", "err", err) + } + + ticker := time.NewTicker(utils.WithJitter(er.interval)) + defer ticker.Stop() + for { + select { + case <-er.ctx.Done(): + return + case <-ticker.C: + if err := er.resendUnconfirmed(); err != nil { + er.logger.Warnw("Failed to resend unconfirmed transactions", "err", err) + } + } + } +} + +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) resendUnconfirmed() error { + resendAddresses, err := er.ks.EnabledAddressesForChain(er.chainID) + if err != nil { + return fmt.Errorf("Resender failed getting enabled keys for chain %s: %w", er.chainID.String(), err) + } + + // Tracker currently disabled for BCI-2638; refactor required + // resendAddresses = append(resendAddresses, er.tracker.GetAbandonedAddresses()...) + + ageThreshold := er.txConfig.ResendAfterThreshold() + maxInFlightTransactions := er.txConfig.MaxInFlight() + olderThan := time.Now().Add(-ageThreshold) + var allAttempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + + for _, k := range resendAddresses { + var attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + attempts, err = er.txStore.FindTxAttemptsRequiringResend(er.ctx, olderThan, maxInFlightTransactions, er.chainID, k) + if err != nil { + return fmt.Errorf("failed to FindTxAttemptsRequiringResend: %w", err) + } + er.logStuckAttempts(attempts, k) + + allAttempts = append(allAttempts, attempts...) + } + + if len(allAttempts) == 0 { + for k := range er.lastAlertTimestamps { + er.lastAlertTimestamps[k] = time.Now() + } + return nil + } + er.logger.Infow(fmt.Sprintf("Re-sending %d unconfirmed transactions that were last sent over %s ago. These transactions are taking longer than usual to be mined. %s", len(allAttempts), ageThreshold, label.NodeConnectivityProblemWarning), "n", len(allAttempts)) + + batchSize := int(er.config.RPCDefaultBatchSize()) + ctx, cancel := context.WithTimeout(er.ctx, batchSendTransactionTimeout) + defer cancel() + txErrTypes, _, broadcastTime, txIDs, err := er.client.BatchSendTransactions(ctx, allAttempts, batchSize, er.logger) + + // update broadcast times before checking additional errors + if len(txIDs) > 0 { + if updateErr := er.txStore.UpdateBroadcastAts(er.ctx, broadcastTime, txIDs); updateErr != nil { + err = errors.Join(err, fmt.Errorf("failed to update broadcast time: %w", updateErr)) + } + } + if err != nil { + return fmt.Errorf("failed to re-send transactions: %w", err) + } + logResendResult(er.logger, txErrTypes) + + return nil +} + +func logResendResult(lggr logger.Logger, codes []client.SendTxReturnCode) { + var nNew int + var nFatal int + for _, c := range codes { + if c == client.Successful { + nNew++ + } else if c == client.Fatal { + nFatal++ + } + } + lggr.Debugw("Completed", "n", len(codes), "nNew", nNew, "nFatal", nFatal) +} + +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) logStuckAttempts(attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fromAddress ADDR) { + if time.Since(er.lastAlertTimestamps[fromAddress.String()]) >= unconfirmedTxAlertLogFrequency { + oldestAttempt, exists := findOldestUnconfirmedAttempt(attempts) + if exists { + // Wait at least 2 times the TxResendAfterThreshold to log critical with an unconfirmedTxAlertDelay + if time.Since(oldestAttempt.CreatedAt) > er.txConfig.ResendAfterThreshold()*2 { + er.lastAlertTimestamps[fromAddress.String()] = time.Now() + er.logger.Errorw("TxAttempt has been unconfirmed for more than max duration", "maxDuration", er.txConfig.ResendAfterThreshold()*2, + "txID", oldestAttempt.TxID, "txFee", oldestAttempt.TxFee, + "BroadcastBeforeBlockNum", oldestAttempt.BroadcastBeforeBlockNum, "Hash", oldestAttempt.Hash, "fromAddress", fromAddress) + } + } + } +} + +func findOldestUnconfirmedAttempt[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +](attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], bool) { + var oldestAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + if len(attempts) < 1 { + return oldestAttempt, false + } + oldestAttempt = attempts[0] + for i := 1; i < len(attempts); i++ { + if oldestAttempt.CreatedAt.Sub(attempts[i].CreatedAt) <= 0 { + oldestAttempt = attempts[i] + } + } + return oldestAttempt, true +} diff --git a/common/txmgr/sequence_syncer.go b/common/txmgr/sequence_syncer.go new file mode 100644 index 00000000..be3ee66c --- /dev/null +++ b/common/txmgr/sequence_syncer.go @@ -0,0 +1,11 @@ +package txmgr + +import ( + "context" + + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +type SequenceSyncer[ADDR types.Hashable, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, SEQ types.Sequence] interface { + Sync(ctx context.Context, addr ADDR, localSequence SEQ) (SEQ, error) +} diff --git a/common/txmgr/strategies.go b/common/txmgr/strategies.go new file mode 100644 index 00000000..b1293ff5 --- /dev/null +++ b/common/txmgr/strategies.go @@ -0,0 +1,70 @@ +package txmgr + +import ( + "context" + "fmt" + "time" + + "github.com/google/uuid" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" +) + +var _ txmgrtypes.TxStrategy = SendEveryStrategy{} + +// NewQueueingTxStrategy creates a new TxStrategy that drops the oldest transactions after the +// queue size is exceeded if a queue size is specified, and otherwise does not drop transactions. +func NewQueueingTxStrategy(subject uuid.UUID, queueSize uint32, queryTimeout time.Duration) (strategy txmgrtypes.TxStrategy) { + if queueSize > 0 { + strategy = NewDropOldestStrategy(subject, queueSize, queryTimeout) + } else { + strategy = SendEveryStrategy{} + } + return +} + +// NewSendEveryStrategy creates a new TxStrategy that does not drop transactions. +func NewSendEveryStrategy() txmgrtypes.TxStrategy { + return SendEveryStrategy{} +} + +// SendEveryStrategy will always send the tx +type SendEveryStrategy struct{} + +func (SendEveryStrategy) Subject() uuid.NullUUID { return uuid.NullUUID{} } +func (SendEveryStrategy) PruneQueue(ctx context.Context, pruneService txmgrtypes.UnstartedTxQueuePruner) ([]int64, error) { + return nil, nil +} + +var _ txmgrtypes.TxStrategy = DropOldestStrategy{} + +// DropOldestStrategy will send the newest N transactions, older ones will be +// removed from the queue +type DropOldestStrategy struct { + subject uuid.UUID + queueSize uint32 + queryTimeout time.Duration +} + +// NewDropOldestStrategy creates a new TxStrategy that drops the oldest transactions after the +// queue size is exceeded. +func NewDropOldestStrategy(subject uuid.UUID, queueSize uint32, queryTimeout time.Duration) DropOldestStrategy { + return DropOldestStrategy{subject, queueSize, queryTimeout} +} + +func (s DropOldestStrategy) Subject() uuid.NullUUID { + return uuid.NullUUID{UUID: s.subject, Valid: true} +} + +func (s DropOldestStrategy) PruneQueue(ctx context.Context, pruneService txmgrtypes.UnstartedTxQueuePruner) (ids []int64, err error) { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, s.queryTimeout) + defer cancel() + + // NOTE: We prune one less than the queue size to prevent the queue from exceeding the max queue size. Which could occur if a new transaction is added to the queue right after we prune. + ids, err = pruneService.PruneUnstartedTxQueue(ctx, s.queueSize-1, s.subject) + if err != nil { + return ids, fmt.Errorf("DropOldestStrategy#PruneQueue failed: %w", err) + } + return +} diff --git a/common/txmgr/test_helpers.go b/common/txmgr/test_helpers.go new file mode 100644 index 00000000..be88cabb --- /dev/null +++ b/common/txmgr/test_helpers.go @@ -0,0 +1,51 @@ +package txmgr + +import ( + "context" + "time" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" +) + +// TEST ONLY FUNCTIONS +// these need to be exported for the txmgr tests to continue to work + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestSetClient(client txmgrtypes.TxmClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) { + ec.client = client +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestSetTTL(ttl time.Duration) { + tr.ttl = ttl +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXDeliverBlock(blockHeight int64) { + tr.mb.Deliver(blockHeight) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) XXXTestStartInternal(ctx context.Context) error { + return eb.startInternal(ctx) +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) XXXTestCloseInternal() error { + return eb.closeInternal() +} + +func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) XXXTestDisableUnstartedTxAutoProcessing() { + eb.processUnstartedTxsImpl = func(ctx context.Context, fromAddress ADDR) (retryable bool, err error) { return false, nil } +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestStartInternal() error { + return ec.startInternal() +} + +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestCloseInternal() error { + return ec.closeInternal() +} + +func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestResendUnconfirmed() error { + return er.resendUnconfirmed() +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) XXXTestAbandon(addr ADDR) (err error) { + return b.abandon(addr) +} diff --git a/common/txmgr/tracker.go b/common/txmgr/tracker.go new file mode 100644 index 00000000..3fc2eb0f --- /dev/null +++ b/common/txmgr/tracker.go @@ -0,0 +1,344 @@ +package txmgr + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +const ( + // defaultTTL is the default time to live for abandoned transactions + // After this TTL, the TXM stops tracking abandoned Txs. + defaultTTL = 6 * time.Hour + // handleTxesTimeout represents a sanity limit on how long handleTxesByState + // should take to complete + handleTxesTimeout = 10 * time.Minute +) + +// AbandonedTx is a transaction who's 'FromAddress' was removed from the KeyStore(by the Node Operator). +// Thus, any new attempts for this Tx can't be signed/created. This means no fee bumping can be done. +// However, the Tx may still have live attempts in the chain's mempool, and could get confirmed on the +// chain as-is. Thus, the TXM should not directly discard this Tx. +type AbandonedTx[ADDR types.Hashable] struct { + id int64 + fromAddress ADDR +} + +// Tracker tracks all transactions which have abandoned fromAddresses. +// The fromAddresses can be deleted by Node Operators from the KeyStore. In such cases, +// existing in-flight transactions for these fromAddresses are considered abandoned too. +// Since such Txs can still have attempts on chain's mempool, these could still be confirmed. +// This tracker just tracks such Txs for some time, in case they get confirmed as-is. +type Tracker[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + services.StateMachine + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + keyStore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ] + chainID CHAIN_ID + lggr logger.Logger + enabledAddrs map[ADDR]bool + txCache map[int64]AbandonedTx[ADDR] + ttl time.Duration + lock sync.Mutex + mb *mailbox.Mailbox[int64] + wg sync.WaitGroup + isStarted bool + ctx context.Context + ctxCancel context.CancelFunc +} + +func NewTracker[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +]( + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + keyStore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ], + chainID CHAIN_ID, + lggr logger.Logger, +) *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { + return &Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ + txStore: txStore, + keyStore: keyStore, + chainID: chainID, + lggr: logger.Named(lggr, "TxMgrTracker"), + enabledAddrs: map[ADDR]bool{}, + txCache: map[int64]AbandonedTx[ADDR]{}, + ttl: defaultTTL, + mb: mailbox.NewSingle[int64](), + lock: sync.Mutex{}, + wg: sync.WaitGroup{}, + } +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(_ context.Context) (err error) { + tr.lggr.Info("Abandoned transaction tracking enabled") + return tr.StartOnce("Tracker", func() error { + return tr.startInternal() + }) +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) startInternal() (err error) { + tr.lock.Lock() + defer tr.lock.Unlock() + + tr.ctx, tr.ctxCancel = context.WithCancel(context.Background()) + + if err := tr.setEnabledAddresses(); err != nil { + return fmt.Errorf("failed to set enabled addresses: %w", err) + } + tr.lggr.Info("Enabled addresses set") + + if err := tr.trackAbandonedTxes(tr.ctx); err != nil { + return fmt.Errorf("failed to track abandoned txes: %w", err) + } + + tr.isStarted = true + if len(tr.txCache) == 0 { + tr.lggr.Info("no abandoned txes found, skipping runLoop") + return nil + } + + tr.lggr.Infof("%d abandoned txes found, starting runLoop", len(tr.txCache)) + tr.wg.Add(1) + go tr.runLoop() + return nil +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() error { + tr.lock.Lock() + defer tr.lock.Unlock() + return tr.StopOnce("Tracker", func() error { + return tr.closeInternal() + }) +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) closeInternal() error { + tr.lggr.Info("stopping tracker") + if !tr.isStarted { + return fmt.Errorf("tracker not started") + } + tr.ctxCancel() + tr.wg.Wait() + tr.isStarted = false + return nil +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() { + defer tr.wg.Done() + ttlExceeded := time.NewTicker(tr.ttl) + defer ttlExceeded.Stop() + for { + select { + case <-tr.mb.Notify(): + for { + if tr.ctx.Err() != nil { + return + } + blockHeight, exists := tr.mb.Retrieve() + if !exists { + break + } + if err := tr.HandleTxesByState(tr.ctx, blockHeight); err != nil { + tr.lggr.Errorw(fmt.Errorf("failed to handle txes by state: %w", err).Error()) + } + } + case <-ttlExceeded.C: + tr.lggr.Info("ttl exceeded") + tr.MarkAllTxesFatal(tr.ctx) + return + case <-tr.ctx.Done(): + return + } + } +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetAbandonedAddresses() []ADDR { + tr.lock.Lock() + defer tr.lock.Unlock() + + if !tr.isStarted { + return []ADDR{} + } + + abandonedAddrs := make([]ADDR, len(tr.txCache)) + for _, atx := range tr.txCache { + abandonedAddrs = append(abandonedAddrs, atx.fromAddress) + } + return abandonedAddrs +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) IsStarted() bool { + tr.lock.Lock() + defer tr.lock.Unlock() + return tr.isStarted +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) setEnabledAddresses() error { + enabledAddrs, err := tr.keyStore.EnabledAddressesForChain(tr.chainID) + if err != nil { + return fmt.Errorf("failed to get enabled addresses for chain: %w", err) + } + + if len(enabledAddrs) == 0 { + tr.lggr.Warnf("enabled address list is empty") + } + + for _, addr := range enabledAddrs { + tr.enabledAddrs[addr] = true + } + return nil +} + +// trackAbandonedTxes called once to find and insert all abandoned txes into the tracker. +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) trackAbandonedTxes(ctx context.Context) (err error) { + if tr.isStarted { + return fmt.Errorf("tracker already started") + } + + tr.lggr.Info("Retrieving non fatal transactions from txStore") + nonFatalTxes, err := tr.txStore.GetNonFatalTransactions(ctx, tr.chainID) + if err != nil { + return fmt.Errorf("failed to get non fatal txes from txStore: %w", err) + } + + // insert abandoned txes + for _, tx := range nonFatalTxes { + if !tr.enabledAddrs[tx.FromAddress] { + tr.insertTx(tx) + } + } + + if err := tr.handleTxesByState(ctx, 0); err != nil { + return fmt.Errorf("failed to handle txes by state: %w", err) + } + + return nil +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HandleTxesByState(ctx context.Context, blockHeight int64) error { + tr.lock.Lock() + defer tr.lock.Unlock() + tr.ctx, tr.ctxCancel = context.WithTimeout(ctx, handleTxesTimeout) + defer tr.ctxCancel() + return tr.handleTxesByState(ctx, blockHeight) +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleTxesByState(ctx context.Context, blockHeight int64) error { + tr.lggr.Info("Handling transactions by state") + + for id, atx := range tr.txCache { + tx, err := tr.txStore.GetTxByID(ctx, atx.id) + if err != nil { + return fmt.Errorf("failed to get tx by ID: %w", err) + } + + switch tx.State { + case TxConfirmed: + if err := tr.handleConfirmedTx(tx, blockHeight); err != nil { + return fmt.Errorf("failed to handle confirmed txes: %w", err) + } + case TxConfirmedMissingReceipt, TxUnconfirmed: + // Keep tracking tx + case TxInProgress, TxUnstarted: + // Tx could never be sent on chain even once. That means that we need to sign + // an attempt to even broadcast this Tx to the chain. Since the fromAddress + // is deleted, we can't sign it. + errMsg := "The FromAddress for this Tx was deleted before this Tx could be broadcast to the chain." + if err := tr.markTxFatal(ctx, tx, errMsg); err != nil { + return fmt.Errorf("failed to mark tx as fatal: %w", err) + } + delete(tr.txCache, id) + case TxFatalError: + delete(tr.txCache, id) + default: + tr.lggr.Errorw(fmt.Sprintf("unhandled transaction state: %v", tx.State)) + } + } + + return nil +} + +// handleConfirmedTx removes a transaction from the tracker if it's been finalized on chain +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleConfirmedTx( + tx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + blockHeight int64, +) error { + finalized, err := tr.txStore.IsTxFinalized(tr.ctx, blockHeight, tx.ID, tr.chainID) + if err != nil { + return fmt.Errorf("failed to check if tx is finalized: %w", err) + } + + if finalized { + delete(tr.txCache, tx.ID) + } + + return nil +} + +// insertTx inserts a transaction into the tracker as an AbandonedTx +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) insertTx( + tx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) { + if _, contains := tr.txCache[tx.ID]; contains { + return + } + + tr.txCache[tx.ID] = AbandonedTx[ADDR]{ + id: tx.ID, + fromAddress: tx.FromAddress, + } + tr.lggr.Debugw(fmt.Sprintf("inserted tx %v", tx.ID)) +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) markTxFatal(ctx context.Context, + tx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + errMsg string) error { + tx.Error.SetValid(errMsg) + + // Set state to TxInProgress so the tracker can attempt to mark it as fatal + tx.State = TxInProgress + if err := tr.txStore.UpdateTxFatalError(ctx, tx); err != nil { + return fmt.Errorf("failed to mark tx %v as abandoned: %w", tx.ID, err) + } + return nil +} + +func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) MarkAllTxesFatal(ctx context.Context) { + tr.lock.Lock() + defer tr.lock.Unlock() + errMsg := fmt.Sprintf( + "fromAddress for this Tx was deleted, and existing attempts onchain didn't finalize within %d hours, thus this Tx was abandoned.", + int(tr.ttl.Hours())) + + for _, atx := range tr.txCache { + tx, err := tr.txStore.GetTxByID(ctx, atx.id) + if err != nil { + tr.lggr.Errorw(fmt.Errorf("failed to get tx by ID: %w", err).Error()) + continue + } + + if err := tr.markTxFatal(ctx, tx, errMsg); err != nil { + tr.lggr.Errorw(fmt.Errorf("failed to mark tx as abandoned: %w", err).Error()) + } + } +} diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go new file mode 100644 index 00000000..8a020b28 --- /dev/null +++ b/common/txmgr/txmgr.go @@ -0,0 +1,721 @@ +package txmgr + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/google/uuid" + nullv4 "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + iutils "github.com/goplugin/pluginv3.0/v2/common/internal/utils" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// For more information about the Txm architecture, see the design doc: +// https://www.notion.so/plugin/Txm-Architecture-Overview-9dc62450cd7a443ba9e7dceffa1a8d6b + +// ResumeCallback is assumed to be idempotent +type ResumeCallback func(id uuid.UUID, result interface{}, err error) error + +// TxManager is the main component of the transaction manager. +// It is also the interface to external callers. +// +//go:generate mockery --quiet --recursive --name TxManager --output ./mocks/ --case=underscore --structname TxManager --filename tx_manager.go +type TxManager[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + types.HeadTrackable[HEAD, BLOCK_HASH] + services.Service + Trigger(addr ADDR) + CreateTransaction(ctx context.Context, txRequest txmgrtypes.TxRequest[ADDR, TX_HASH]) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + GetForwarderForEOA(eoa ADDR) (forwarder ADDR, err error) + RegisterResumeCallback(fn ResumeCallback) + SendNativeToken(ctx context.Context, chainID CHAIN_ID, from, to ADDR, value big.Int, gasLimit uint32) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + Reset(addr ADDR, abandon bool) error + // Find transactions by a field in the TxMeta blob and transaction states + FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions with a non-null TxMeta field that was provided by transaction states + FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided + FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions loaded with transaction attempts and receipts by transaction IDs and states + FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindEarliestUnconfirmedBroadcastTime(ctx context.Context) (nullv4.Time, error) + FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context) (nullv4.Int, error) + CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState) (count uint32, err error) +} + +type reset struct { + // f is the function to execute between stopping/starting the + // Broadcaster and Confirmer + f func() + // done is either closed after running f, or returns error if f could not + // be run for some reason + done chan error +} + +type Txm[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + services.StateMachine + logger logger.SugaredLogger + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + config txmgrtypes.TransactionManagerChainConfig + txConfig txmgrtypes.TransactionManagerTransactionsConfig + keyStore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ] + chainID CHAIN_ID + checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + pruneQueueAndCreateLock sync.Mutex + + chHeads chan HEAD + trigger chan ADDR + reset chan reset + resumeCallback ResumeCallback + + chStop services.StopChan + chSubbed chan struct{} + wg sync.WaitGroup + + reaper *Reaper[CHAIN_ID] + resender *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + broadcaster *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + confirmer *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] + fwdMgr txmgrtypes.ForwarderManager[ADDR] + txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + sequenceSyncer SequenceSyncer[ADDR, TX_HASH, BLOCK_HASH, SEQ] +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) RegisterResumeCallback(fn ResumeCallback) { + b.resumeCallback = fn + b.broadcaster.SetResumeCallback(fn) + b.confirmer.SetResumeCallback(fn) +} + +// NewTxm creates a new Txm with the given configuration. +func NewTxm[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +]( + chainId CHAIN_ID, + cfg txmgrtypes.TransactionManagerChainConfig, + txCfg txmgrtypes.TransactionManagerTransactionsConfig, + keyStore txmgrtypes.KeyStore[ADDR, CHAIN_ID, SEQ], + lggr logger.Logger, + checkerFactory TransmitCheckerFactory[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + fwdMgr txmgrtypes.ForwarderManager[ADDR], + txAttemptBuilder txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + sequenceSyncer SequenceSyncer[ADDR, TX_HASH, BLOCK_HASH, SEQ], + broadcaster *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + confirmer *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + resender *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], + tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], +) *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { + b := Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ + logger: logger.Sugared(lggr), + txStore: txStore, + config: cfg, + txConfig: txCfg, + keyStore: keyStore, + chainID: chainId, + checkerFactory: checkerFactory, + chHeads: make(chan HEAD), + trigger: make(chan ADDR), + chStop: make(chan struct{}), + chSubbed: make(chan struct{}), + reset: make(chan reset), + fwdMgr: fwdMgr, + txAttemptBuilder: txAttemptBuilder, + sequenceSyncer: sequenceSyncer, + broadcaster: broadcaster, + confirmer: confirmer, + resender: resender, + tracker: tracker, + } + + if txCfg.ResendAfterThreshold() <= 0 { + b.logger.Info("Resender: Disabled") + } + if txCfg.ReaperThreshold() > 0 && txCfg.ReaperInterval() > 0 { + b.reaper = NewReaper[CHAIN_ID](lggr, b.txStore, cfg, txCfg, chainId) + } else { + b.logger.Info("TxReaper: Disabled") + } + + return &b +} + +// Start starts Txm service. +// The provided context can be used to terminate Start sequence. +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx context.Context) (merr error) { + return b.StartOnce("Txm", func() error { + var ms services.MultiStart + if err := ms.Start(ctx, b.broadcaster); err != nil { + return fmt.Errorf("Txm: Broadcaster failed to start: %w", err) + } + if err := ms.Start(ctx, b.confirmer); err != nil { + return fmt.Errorf("Txm: Confirmer failed to start: %w", err) + } + + if err := ms.Start(ctx, b.txAttemptBuilder); err != nil { + return fmt.Errorf("Txm: Estimator failed to start: %w", err) + } + + /* Tracker currently disabled for BCI-2638; refactor required + b.logger.Info("Txm starting tracker") + if err := ms.Start(ctx, b.tracker); err != nil { + return fmt.Errorf("Txm: Tracker failed to start: %w", err) + } + */ + + b.logger.Info("Txm starting runLoop") + b.wg.Add(1) + go b.runLoop() + <-b.chSubbed + + if b.reaper != nil { + b.reaper.Start() + } + + if b.resender != nil { + b.resender.Start() + } + + if b.fwdMgr != nil { + if err := ms.Start(ctx, b.fwdMgr); err != nil { + return fmt.Errorf("Txm: ForwarderManager failed to start: %w", err) + } + } + + return nil + }) +} + +// Reset stops Broadcaster/Confirmer, executes callback, then starts them again +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Reset(addr ADDR, abandon bool) (err error) { + ok := b.IfStarted(func() { + done := make(chan error) + f := func() { + if abandon { + err = b.abandon(addr) + } + } + + b.reset <- reset{f, done} + err = <-done + }) + if !ok { + return errors.New("not started") + } + return err +} + +// abandon, scoped to the key of this txm: +// - marks all pending and inflight transactions fatally errored (note: at this point all transactions are either confirmed or fatally errored) +// this must not be run while Broadcaster or Confirmer are running +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) abandon(addr ADDR) (err error) { + ctx, cancel := b.chStop.NewCtx() + defer cancel() + if err = b.txStore.Abandon(ctx, b.chainID, addr); err != nil { + return fmt.Errorf("abandon failed to update txes for key %s: %w", addr.String(), err) + } + return nil +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (merr error) { + return b.StopOnce("Txm", func() error { + close(b.chStop) + + b.txStore.Close() + + if b.reaper != nil { + b.reaper.Stop() + } + if b.resender != nil { + b.resender.Stop() + } + if b.fwdMgr != nil { + if err := b.fwdMgr.Close(); err != nil { + merr = errors.Join(merr, fmt.Errorf("Txm: failed to stop ForwarderManager: %w", err)) + } + } + + b.wg.Wait() + + if err := b.txAttemptBuilder.Close(); err != nil { + merr = errors.Join(merr, fmt.Errorf("Txm: failed to close TxAttemptBuilder: %w", err)) + } + + /* Tracker currently disabled for BCI-2638; refactor required + if err := b.tracker.Close(); err != nil { + merr = errors.Join(merr, fmt.Errorf("Txm: failed to close Tracker: %w", err)) + } + */ + + return nil + }) +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Name() string { + return b.logger.Name() +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HealthReport() map[string]error { + report := map[string]error{b.Name(): b.Healthy()} + + // only query if txm started properly + b.IfStarted(func() { + services.CopyHealth(report, b.broadcaster.HealthReport()) + services.CopyHealth(report, b.confirmer.HealthReport()) + services.CopyHealth(report, b.txAttemptBuilder.HealthReport()) + }) + + if b.txConfig.ForwardersEnabled() { + services.CopyHealth(report, b.fwdMgr.HealthReport()) + } + return report +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() { + // eb, ec and keyStates can all be modified by the runloop. + // This is concurrent-safe because the runloop ensures serial access. + defer b.wg.Done() + keysChanged, unsub := b.keyStore.SubscribeToKeyChanges() + defer unsub() + + close(b.chSubbed) + + var stopped bool + var stopOnce sync.Once + + // execReset is defined as an inline function here because it closes over + // eb, ec and stopped + execReset := func(r *reset) { + // These should always close successfully, since it should be logically + // impossible to enter this code path with ec/eb in a state other than + // "Started" + if err := b.broadcaster.closeInternal(); err != nil { + b.logger.Panicw(fmt.Sprintf("Failed to Close Broadcaster: %v", err), "err", err) + } + if err := b.confirmer.closeInternal(); err != nil { + b.logger.Panicw(fmt.Sprintf("Failed to Close Confirmer: %v", err), "err", err) + } + if r != nil { + r.f() + close(r.done) + } + var wg sync.WaitGroup + // two goroutines to handle independent backoff retries starting: + // - Broadcaster + // - Confirmer + // If chStop is closed, we mark stopped=true so that the main runloop + // can check and exit early if necessary + // + // execReset will not return until either: + // 1. Both Broadcaster and Confirmer started successfully + // 2. chStop was closed (txmgr exit) + wg.Add(2) + go func() { + defer wg.Done() + ctx, cancel := b.chStop.NewCtx() + defer cancel() + // Retry indefinitely on failure + backoff := iutils.NewRedialBackoff() + for { + select { + case <-time.After(backoff.Duration()): + if err := b.broadcaster.startInternal(ctx); err != nil { + b.logger.Criticalw("Failed to start Broadcaster", "err", err) + b.SvcErrBuffer.Append(err) + continue + } + return + case <-ctx.Done(): + stopOnce.Do(func() { stopped = true }) + return + } + } + }() + go func() { + defer wg.Done() + // Retry indefinitely on failure + backoff := iutils.NewRedialBackoff() + for { + select { + case <-time.After(backoff.Duration()): + if err := b.confirmer.startInternal(); err != nil { + b.logger.Criticalw("Failed to start Confirmer", "err", err) + b.SvcErrBuffer.Append(err) + continue + } + return + case <-b.chStop: + stopOnce.Do(func() { stopped = true }) + return + } + } + }() + + wg.Wait() + } + + for { + select { + case address := <-b.trigger: + b.broadcaster.Trigger(address) + case head := <-b.chHeads: + b.confirmer.mb.Deliver(head) + // Tracker currently disabled for BCI-2638; refactor required + // b.tracker.mb.Deliver(head.BlockNumber()) + case reset := <-b.reset: + // This check prevents the weird edge-case where you can select + // into this block after chStop has already been closed and the + // previous reset exited early. + // In this case we do not want to reset again, we would rather go + // around and hit the stop case. + if stopped { + reset.done <- errors.New("Txm was stopped") + continue + } + execReset(&reset) + case <-b.chStop: + // close and exit + // + // Note that in some cases Broadcaster and/or Confirmer may + // be in an Unstarted state here, if execReset exited early. + // + // In this case, we don't care about stopping them since they are + // already "stopped". + err := b.broadcaster.Close() + if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) { + b.logger.Errorw(fmt.Sprintf("Failed to Close Broadcaster: %v", err), "err", err) + } + err = b.confirmer.Close() + if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) { + b.logger.Errorw(fmt.Sprintf("Failed to Close Confirmer: %v", err), "err", err) + } + /* Tracker currently disabled for BCI-2638; refactor required + err = b.tracker.Close() + if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) { + b.logger.Errorw(fmt.Sprintf("Failed to Close Tracker: %v", err), "err", err) + } + */ + return + case <-keysChanged: + // This check prevents the weird edge-case where you can select + // into this block after chStop has already been closed and the + // previous reset exited early. + // In this case we do not want to reset again, we would rather go + // around and hit the stop case. + if stopped { + continue + } + enabledAddresses, err := b.keyStore.EnabledAddressesForChain(b.chainID) + if err != nil { + b.logger.Critical("Failed to reload key states after key change") + b.SvcErrBuffer.Append(err) + continue + } + b.logger.Debugw("Keys changed, reloading", "enabledAddresses", enabledAddresses) + + execReset(nil) + } + } +} + +// OnNewLongestChain conforms to HeadTrackable +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) OnNewLongestChain(ctx context.Context, head HEAD) { + ok := b.IfStarted(func() { + if b.reaper != nil { + b.reaper.SetLatestBlockNum(head.BlockNumber()) + } + b.txAttemptBuilder.OnNewLongestChain(ctx, head) + select { + case b.chHeads <- head: + case <-ctx.Done(): + b.logger.Errorw("Timed out handling head", "blockNum", head.BlockNumber(), "ctxErr", ctx.Err()) + } + }) + if !ok { + b.logger.Debugw("Not started; ignoring head", "head", head, "state", b.State()) + } +} + +// Trigger forces the Broadcaster to check early for the given address +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Trigger(addr ADDR) { + select { + case b.trigger <- addr: + default: + } +} + +// CreateTransaction inserts a new transaction +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTransaction(ctx context.Context, txRequest txmgrtypes.TxRequest[ADDR, TX_HASH]) (tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + // Check for existing Tx with IdempotencyKey. If found, return the Tx and do nothing + // Skipping CreateTransaction to avoid double send + if txRequest.IdempotencyKey != nil { + var existingTx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + existingTx, err = b.txStore.FindTxWithIdempotencyKey(ctx, *txRequest.IdempotencyKey, b.chainID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return tx, fmt.Errorf("Failed to search for transaction with IdempotencyKey: %w", err) + } + if existingTx != nil { + b.logger.Infow("Found a Tx with IdempotencyKey. Returning existing Tx without creating a new one.", "IdempotencyKey", *txRequest.IdempotencyKey) + return *existingTx, nil + } + } + + if err = b.checkEnabled(txRequest.FromAddress); err != nil { + return tx, err + } + + if b.txConfig.ForwardersEnabled() && (!utils.IsZero(txRequest.ForwarderAddress)) { + fwdPayload, fwdErr := b.fwdMgr.ConvertPayload(txRequest.ToAddress, txRequest.EncodedPayload) + if fwdErr == nil { + // Handling meta not set at caller. + if txRequest.Meta != nil { + txRequest.Meta.FwdrDestAddress = &txRequest.ToAddress + } else { + txRequest.Meta = &txmgrtypes.TxMeta[ADDR, TX_HASH]{ + FwdrDestAddress: &txRequest.ToAddress, + } + } + txRequest.ToAddress = txRequest.ForwarderAddress + txRequest.EncodedPayload = fwdPayload + } else { + b.logger.Errorf("Failed to use forwarder set upstream: %w", fwdErr.Error()) + } + } + + err = b.txStore.CheckTxQueueCapacity(ctx, txRequest.FromAddress, b.txConfig.MaxQueued(), b.chainID) + if err != nil { + return tx, fmt.Errorf("Txm#CreateTransaction: %w", err) + } + + tx, err = b.pruneQueueAndCreateTxn(ctx, txRequest, b.chainID) + if err != nil { + return tx, err + } + + // Trigger the Broadcaster to check for new transaction + b.broadcaster.Trigger(txRequest.FromAddress) + + return tx, nil +} + +// Calls forwarderMgr to get a proper forwarder for a given EOA. +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetForwarderForEOA(eoa ADDR) (forwarder ADDR, err error) { + if !b.txConfig.ForwardersEnabled() { + return forwarder, fmt.Errorf("forwarding is not enabled, to enable set Transactions.ForwardersEnabled =true") + } + forwarder, err = b.fwdMgr.ForwarderFor(eoa) + return +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) checkEnabled(addr ADDR) error { + if err := b.keyStore.CheckEnabled(addr, b.chainID); err != nil { + return fmt.Errorf("cannot send transaction from %s on chain ID %s: %w", addr, b.chainID.String(), err) + } + return nil +} + +// SendNativeToken creates a transaction that transfers the given value of native tokens +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SendNativeToken(ctx context.Context, chainID CHAIN_ID, from, to ADDR, value big.Int, gasLimit uint32) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + if utils.IsZero(to) { + return etx, errors.New("cannot send native token to zero address") + } + txRequest := txmgrtypes.TxRequest[ADDR, TX_HASH]{ + FromAddress: from, + ToAddress: to, + EncodedPayload: []byte{}, + Value: value, + FeeLimit: gasLimit, + Strategy: NewSendEveryStrategy(), + } + etx, err = b.pruneQueueAndCreateTxn(ctx, txRequest, chainID) + if err != nil { + return etx, fmt.Errorf("SendNativeToken failed to insert tx: %w", err) + } + + // Trigger the Broadcaster to check for new transaction + b.broadcaster.Trigger(from) + return etx, nil +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + txes, err = b.txStore.FindTxesByMetaFieldAndStates(ctx, metaField, metaValue, states, chainID) + return +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + txes, err = b.txStore.FindTxesWithMetaFieldByStates(ctx, metaField, states, chainID) + return +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + txes, err = b.txStore.FindTxesWithMetaFieldByReceiptBlockNum(ctx, metaField, blockNum, chainID) + return +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + txes, err = b.txStore.FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx, ids, states, chainID) + return +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindEarliestUnconfirmedBroadcastTime(ctx context.Context) (nullv4.Time, error) { + return b.txStore.FindEarliestUnconfirmedBroadcastTime(ctx, b.chainID) +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context) (nullv4.Int, error) { + return b.txStore.FindEarliestUnconfirmedTxAttemptBlock(ctx, b.chainID) +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState) (count uint32, err error) { + return b.txStore.CountTransactionsByState(ctx, state, b.chainID) +} + +type NullTxManager[ + CHAIN_ID types.ID, + HEAD types.Head[BLOCK_HASH], + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + ErrMsg string +} + +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) OnNewLongestChain(context.Context, HEAD) { +} + +// Start does noop for NullTxManager. +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Start(context.Context) error { + return nil +} + +// Close does noop for NullTxManager. +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Close() error { + return nil +} + +// Trigger does noop for NullTxManager. +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Trigger(ADDR) { + panic(n.ErrMsg) +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) CreateTransaction(ctx context.Context, txRequest txmgrtypes.TxRequest[ADDR, TX_HASH]) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return etx, errors.New(n.ErrMsg) +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetForwarderForEOA(addr ADDR) (fwdr ADDR, err error) { + return fwdr, err +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Reset(addr ADDR, abandon bool) error { + return nil +} + +// SendNativeToken does nothing, null functionality +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SendNativeToken(ctx context.Context, chainID CHAIN_ID, from, to ADDR, value big.Int, gasLimit uint32) (etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return etx, errors.New(n.ErrMsg) +} + +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Ready() error { + return nil +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Name() string { + return "NullTxManager" +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) HealthReport() map[string]error { + return map[string]error{} +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) RegisterResumeCallback(fn ResumeCallback) { +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return txes, errors.New(n.ErrMsg) +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return txes, errors.New(n.ErrMsg) +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return txes, errors.New(n.ErrMsg) +} +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) { + return txes, errors.New(n.ErrMsg) +} + +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindEarliestUnconfirmedBroadcastTime(ctx context.Context) (nullv4.Time, error) { + return nullv4.Time{}, errors.New(n.ErrMsg) +} + +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context) (nullv4.Int, error) { + return nullv4.Int{}, errors.New(n.ErrMsg) +} + +func (n *NullTxManager[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState) (count uint32, err error) { + return count, errors.New(n.ErrMsg) +} + +func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) pruneQueueAndCreateTxn( + ctx context.Context, + txRequest txmgrtypes.TxRequest[ADDR, TX_HASH], + chainID CHAIN_ID, +) ( + tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + err error, +) { + b.pruneQueueAndCreateLock.Lock() + defer b.pruneQueueAndCreateLock.Unlock() + + pruned, err := txRequest.Strategy.PruneQueue(ctx, b.txStore) + if err != nil { + return tx, err + } + if len(pruned) > 0 { + b.logger.Warnw(fmt.Sprintf("Pruned %d old unstarted transactions", len(pruned)), + "subject", txRequest.Strategy.Subject(), + "pruned-tx-ids", pruned, + ) + } + + tx, err = b.txStore.CreateTransaction(ctx, txRequest, chainID) + if err != nil { + return tx, err + } + b.logger.Debugw("Created transaction", + "fromAddress", txRequest.FromAddress, + "toAddress", txRequest.ToAddress, + "meta", txRequest.Meta, + "transactionID", tx.ID, + ) + + return tx, nil +} diff --git a/common/txmgr/types/client.go b/common/txmgr/types/client.go new file mode 100644 index 00000000..c3ad4861 --- /dev/null +++ b/common/txmgr/types/client.go @@ -0,0 +1,87 @@ +package types + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/client" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxmClient is a superset of all the methods needed for the txm +type TxmClient[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + R ChainReceipt[TX_HASH, BLOCK_HASH], + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + ChainClient[CHAIN_ID, ADDR, SEQ] + TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + + // receipt fetching used by confirmer + BatchGetReceipts( + ctx context.Context, + attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + ) (txReceipt []R, txErr []error, err error) +} + +// TransactionClient contains the methods for building, simulating, broadcasting transactions +type TransactionClient[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + ChainClient[CHAIN_ID, ADDR, SEQ] + + BatchSendTransactions( + ctx context.Context, + attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + bathSize int, + lggr logger.SugaredLogger, + ) ( + txCodes []client.SendTxReturnCode, + txErrs []error, + broadcastTime time.Time, + successfulTxIDs []int64, + err error) + SendTransactionReturnCode( + ctx context.Context, + tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + lggr logger.SugaredLogger, + ) (client.SendTxReturnCode, error) + SendEmptyTransaction( + ctx context.Context, + newTxAttempt func(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error), + seq SEQ, + gasLimit uint32, + fee FEE, + fromAddress ADDR, + ) (txhash string, err error) + CallContract( + ctx context.Context, + attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], + blockNumber *big.Int, + ) (rpcErr fmt.Stringer, extractErr error) +} + +// ChainClient contains the interfaces for reading chain parameters (chain id, sequences, etc) +type ChainClient[ + CHAIN_ID types.ID, + ADDR types.Hashable, + SEQ types.Sequence, +] interface { + ConfiguredChainID() CHAIN_ID + PendingSequenceAt(ctx context.Context, addr ADDR) (SEQ, error) + SequenceAt(ctx context.Context, addr ADDR, blockNum *big.Int) (SEQ, error) +} diff --git a/common/txmgr/types/config.go b/common/txmgr/types/config.go new file mode 100644 index 00000000..502a7f42 --- /dev/null +++ b/common/txmgr/types/config.go @@ -0,0 +1,87 @@ +package types + +import "time" + +type TransactionManagerChainConfig interface { + BroadcasterChainConfig + ConfirmerChainConfig + ReaperChainConfig +} + +type TransactionManagerFeeConfig interface { + BroadcasterFeeConfig + ConfirmerFeeConfig +} + +type TransactionManagerTransactionsConfig interface { + BroadcasterTransactionsConfig + ConfirmerTransactionsConfig + ResenderTransactionsConfig + ReaperTransactionsConfig + + ForwardersEnabled() bool + MaxQueued() uint64 +} + +type BroadcasterChainConfig interface { + IsL2() bool +} + +type BroadcasterFeeConfig interface { + MaxFeePrice() string // logging value + FeePriceDefault() string // logging value +} + +type BroadcasterTransactionsConfig interface { + MaxInFlight() uint32 +} + +type BroadcasterListenerConfig interface { + FallbackPollInterval() time.Duration +} + +type ConfirmerFeeConfig interface { + BumpTxDepth() uint32 + LimitDefault() uint32 + + // from gas.Config + BumpThreshold() uint64 + MaxFeePrice() string // logging value + BumpPercent() uint16 +} + +type ConfirmerChainConfig interface { + RPCDefaultBatchSize() uint32 + FinalityDepth() uint32 +} + +type ConfirmerDatabaseConfig interface { + // from pg.QConfig + DefaultQueryTimeout() time.Duration +} + +type ConfirmerTransactionsConfig interface { + MaxInFlight() uint32 + ForwardersEnabled() bool +} + +type ResenderChainConfig interface { + RPCDefaultBatchSize() uint32 +} + +type ResenderTransactionsConfig interface { + ResendAfterThreshold() time.Duration + MaxInFlight() uint32 +} + +// ReaperConfig is the config subset used by the reaper +// +//go:generate mockery --quiet --name ReaperChainConfig --structname ReaperConfig --output ./mocks/ --case=underscore +type ReaperChainConfig interface { + FinalityDepth() uint32 +} + +type ReaperTransactionsConfig interface { + ReaperInterval() time.Duration + ReaperThreshold() time.Duration +} diff --git a/common/txmgr/types/forwarder_manager.go b/common/txmgr/types/forwarder_manager.go new file mode 100644 index 00000000..2ffb70b8 --- /dev/null +++ b/common/txmgr/types/forwarder_manager.go @@ -0,0 +1,14 @@ +package types + +import ( + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +//go:generate mockery --quiet --name ForwarderManager --output ./mocks/ --case=underscore +type ForwarderManager[ADDR types.Hashable] interface { + services.Service + ForwarderFor(addr ADDR) (forwarder ADDR, err error) + // Converts payload to be forwarder-friendly + ConvertPayload(dest ADDR, origPayload []byte) ([]byte, error) +} diff --git a/common/txmgr/types/keystore.go b/common/txmgr/types/keystore.go new file mode 100644 index 00000000..d0fcae0b --- /dev/null +++ b/common/txmgr/types/keystore.go @@ -0,0 +1,21 @@ +package types + +import ( + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// KeyStore encompasses the subset of keystore used by txmgr +// +//go:generate mockery --quiet --name KeyStore --output ./mocks/ --case=underscore +type KeyStore[ + // Account Address type. + ADDR types.Hashable, + // Chain ID type + CHAIN_ID types.ID, + // Chain's sequence type. For example, EVM chains use nonce, bitcoin uses UTXO. + SEQ types.Sequence, +] interface { + CheckEnabled(address ADDR, chainID CHAIN_ID) error + EnabledAddressesForChain(chainId CHAIN_ID) ([]ADDR, error) + SubscribeToKeyChanges() (ch chan struct{}, unsub func()) +} diff --git a/common/txmgr/types/mocks/forwarder_manager.go b/common/txmgr/types/mocks/forwarder_manager.go new file mode 100644 index 00000000..d7839274 --- /dev/null +++ b/common/txmgr/types/mocks/forwarder_manager.go @@ -0,0 +1,180 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// ForwarderManager is an autogenerated mock type for the ForwarderManager type +type ForwarderManager[ADDR types.Hashable] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *ForwarderManager[ADDR]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ConvertPayload provides a mock function with given fields: dest, origPayload +func (_m *ForwarderManager[ADDR]) ConvertPayload(dest ADDR, origPayload []byte) ([]byte, error) { + ret := _m.Called(dest, origPayload) + + if len(ret) == 0 { + panic("no return value specified for ConvertPayload") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(ADDR, []byte) ([]byte, error)); ok { + return rf(dest, origPayload) + } + if rf, ok := ret.Get(0).(func(ADDR, []byte) []byte); ok { + r0 = rf(dest, origPayload) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(ADDR, []byte) error); ok { + r1 = rf(dest, origPayload) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ForwarderFor provides a mock function with given fields: addr +func (_m *ForwarderManager[ADDR]) ForwarderFor(addr ADDR) (ADDR, error) { + ret := _m.Called(addr) + + if len(ret) == 0 { + panic("no return value specified for ForwarderFor") + } + + var r0 ADDR + var r1 error + if rf, ok := ret.Get(0).(func(ADDR) (ADDR, error)); ok { + return rf(addr) + } + if rf, ok := ret.Get(0).(func(ADDR) ADDR); ok { + r0 = rf(addr) + } else { + r0 = ret.Get(0).(ADDR) + } + + if rf, ok := ret.Get(1).(func(ADDR) error); ok { + r1 = rf(addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HealthReport provides a mock function with given fields: +func (_m *ForwarderManager[ADDR]) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *ForwarderManager[ADDR]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *ForwarderManager[ADDR]) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *ForwarderManager[ADDR]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewForwarderManager creates a new instance of ForwarderManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewForwarderManager[ADDR types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *ForwarderManager[ADDR] { + mock := &ForwarderManager[ADDR]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/mocks/key_store.go b/common/txmgr/types/mocks/key_store.go new file mode 100644 index 00000000..a55d68dd --- /dev/null +++ b/common/txmgr/types/mocks/key_store.go @@ -0,0 +1,108 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// KeyStore is an autogenerated mock type for the KeyStore type +type KeyStore[ADDR types.Hashable, CHAIN_ID types.ID, SEQ types.Sequence] struct { + mock.Mock +} + +// CheckEnabled provides a mock function with given fields: address, chainID +func (_m *KeyStore[ADDR, CHAIN_ID, SEQ]) CheckEnabled(address ADDR, chainID CHAIN_ID) error { + ret := _m.Called(address, chainID) + + if len(ret) == 0 { + panic("no return value specified for CheckEnabled") + } + + var r0 error + if rf, ok := ret.Get(0).(func(ADDR, CHAIN_ID) error); ok { + r0 = rf(address, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnabledAddressesForChain provides a mock function with given fields: chainId +func (_m *KeyStore[ADDR, CHAIN_ID, SEQ]) EnabledAddressesForChain(chainId CHAIN_ID) ([]ADDR, error) { + ret := _m.Called(chainId) + + if len(ret) == 0 { + panic("no return value specified for EnabledAddressesForChain") + } + + var r0 []ADDR + var r1 error + if rf, ok := ret.Get(0).(func(CHAIN_ID) ([]ADDR, error)); ok { + return rf(chainId) + } + if rf, ok := ret.Get(0).(func(CHAIN_ID) []ADDR); ok { + r0 = rf(chainId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ADDR) + } + } + + if rf, ok := ret.Get(1).(func(CHAIN_ID) error); ok { + r1 = rf(chainId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeToKeyChanges provides a mock function with given fields: +func (_m *KeyStore[ADDR, CHAIN_ID, SEQ]) SubscribeToKeyChanges() (chan struct{}, func()) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribeToKeyChanges") + } + + var r0 chan struct{} + var r1 func() + if rf, ok := ret.Get(0).(func() (chan struct{}, func())); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan struct{}) + } + } + + if rf, ok := ret.Get(1).(func() func()); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(func()) + } + } + + return r0, r1 +} + +// NewKeyStore creates a new instance of KeyStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKeyStore[ADDR types.Hashable, CHAIN_ID types.ID, SEQ types.Sequence](t interface { + mock.TestingT + Cleanup(func()) +}) *KeyStore[ADDR, CHAIN_ID, SEQ] { + mock := &KeyStore[ADDR, CHAIN_ID, SEQ]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/mocks/reaper_chain_config.go b/common/txmgr/types/mocks/reaper_chain_config.go new file mode 100644 index 00000000..041214b8 --- /dev/null +++ b/common/txmgr/types/mocks/reaper_chain_config.go @@ -0,0 +1,42 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// ReaperConfig is an autogenerated mock type for the ReaperChainConfig type +type ReaperConfig struct { + mock.Mock +} + +// FinalityDepth provides a mock function with given fields: +func (_m *ReaperConfig) FinalityDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// NewReaperConfig creates a new instance of ReaperConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReaperConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *ReaperConfig { + mock := &ReaperConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/mocks/tx_attempt_builder.go b/common/txmgr/types/mocks/tx_attempt_builder.go new file mode 100644 index 00000000..902385c7 --- /dev/null +++ b/common/txmgr/types/mocks/tx_attempt_builder.go @@ -0,0 +1,356 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + logger "github.com/goplugin/plugin-common/pkg/logger" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + + mock "github.com/stretchr/testify/mock" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxAttemptBuilder is an autogenerated mock type for the TxAttemptBuilder type +type TxAttemptBuilder[CHAIN_ID types.ID, HEAD types.Head[BLOCK_HASH], ADDR types.Hashable, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, SEQ types.Sequence, FEE feetypes.Fee] struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewBumpTxAttempt provides a mock function with given fields: ctx, tx, previousAttempt, priorAttempts, lggr +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) NewBumpTxAttempt(ctx context.Context, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], previousAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], priorAttempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error) { + ret := _m.Called(ctx, tx, previousAttempt, priorAttempts, lggr) + + if len(ret) == 0 { + panic("no return value specified for NewBumpTxAttempt") + } + + var r0 txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 FEE + var r2 uint32 + var r3 bool + var r4 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error)); ok { + return rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } else { + r0 = ret.Get(0).(txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) FEE); ok { + r1 = rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } else { + r1 = ret.Get(1).(FEE) + } + + if rf, ok := ret.Get(2).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) uint32); ok { + r2 = rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } else { + r2 = ret.Get(2).(uint32) + } + + if rf, ok := ret.Get(3).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) bool); ok { + r3 = rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } else { + r3 = ret.Get(3).(bool) + } + + if rf, ok := ret.Get(4).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger) error); ok { + r4 = rf(ctx, tx, previousAttempt, priorAttempts, lggr) + } else { + r4 = ret.Error(4) + } + + return r0, r1, r2, r3, r4 +} + +// NewCustomTxAttempt provides a mock function with given fields: tx, fee, gasLimit, txType, lggr +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) NewCustomTxAttempt(tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fee FEE, gasLimit uint32, txType int, lggr logger.Logger) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], bool, error) { + ret := _m.Called(tx, fee, gasLimit, txType, lggr) + + if len(ret) == 0 { + panic("no return value specified for NewCustomTxAttempt") + } + + var r0 txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, int, logger.Logger) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], bool, error)); ok { + return rf(tx, fee, gasLimit, txType, lggr) + } + if rf, ok := ret.Get(0).(func(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, int, logger.Logger) txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(tx, fee, gasLimit, txType, lggr) + } else { + r0 = ret.Get(0).(txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, int, logger.Logger) bool); ok { + r1 = rf(tx, fee, gasLimit, txType, lggr) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, int, logger.Logger) error); ok { + r2 = rf(tx, fee, gasLimit, txType, lggr) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewEmptyTxAttempt provides a mock function with given fields: seq, feeLimit, fee, fromAddress +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) NewEmptyTxAttempt(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(seq, feeLimit, fee, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for NewEmptyTxAttempt") + } + + var r0 txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(SEQ, uint32, FEE, ADDR) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(seq, feeLimit, fee, fromAddress) + } + if rf, ok := ret.Get(0).(func(SEQ, uint32, FEE, ADDR) txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(seq, feeLimit, fee, fromAddress) + } else { + r0 = ret.Get(0).(txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(SEQ, uint32, FEE, ADDR) error); ok { + r1 = rf(seq, feeLimit, fee, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTxAttempt provides a mock function with given fields: ctx, tx, lggr, opts +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) NewTxAttempt(ctx context.Context, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger, opts ...feetypes.Opt) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, tx, lggr) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for NewTxAttempt") + } + + var r0 txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 FEE + var r2 uint32 + var r3 bool + var r4 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error)); ok { + return rf(ctx, tx, lggr, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, tx, lggr, opts...) + } else { + r0 = ret.Get(0).(txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) FEE); ok { + r1 = rf(ctx, tx, lggr, opts...) + } else { + r1 = ret.Get(1).(FEE) + } + + if rf, ok := ret.Get(2).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) uint32); ok { + r2 = rf(ctx, tx, lggr, opts...) + } else { + r2 = ret.Get(2).(uint32) + } + + if rf, ok := ret.Get(3).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) bool); ok { + r3 = rf(ctx, tx, lggr, opts...) + } else { + r3 = ret.Get(3).(bool) + } + + if rf, ok := ret.Get(4).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, ...feetypes.Opt) error); ok { + r4 = rf(ctx, tx, lggr, opts...) + } else { + r4 = ret.Error(4) + } + + return r0, r1, r2, r3, r4 +} + +// NewTxAttemptWithType provides a mock function with given fields: ctx, tx, lggr, txType, opts +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) NewTxAttemptWithType(ctx context.Context, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger, txType int, opts ...feetypes.Opt) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, tx, lggr, txType) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for NewTxAttemptWithType") + } + + var r0 txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 FEE + var r2 uint32 + var r3 bool + var r4 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) (txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], FEE, uint32, bool, error)); ok { + return rf(ctx, tx, lggr, txType, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, tx, lggr, txType, opts...) + } else { + r0 = ret.Get(0).(txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) FEE); ok { + r1 = rf(ctx, tx, lggr, txType, opts...) + } else { + r1 = ret.Get(1).(FEE) + } + + if rf, ok := ret.Get(2).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) uint32); ok { + r2 = rf(ctx, tx, lggr, txType, opts...) + } else { + r2 = ret.Get(2).(uint32) + } + + if rf, ok := ret.Get(3).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) bool); ok { + r3 = rf(ctx, tx, lggr, txType, opts...) + } else { + r3 = ret.Get(3).(bool) + } + + if rf, ok := ret.Get(4).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], logger.Logger, int, ...feetypes.Opt) error); ok { + r4 = rf(ctx, tx, lggr, txType, opts...) + } else { + r4 = ret.Error(4) + } + + return r0, r1, r2, r3, r4 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) OnNewLongestChain(ctx context.Context, head HEAD) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTxAttemptBuilder creates a new instance of TxAttemptBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxAttemptBuilder[CHAIN_ID types.ID, HEAD types.Head[BLOCK_HASH], ADDR types.Hashable, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, SEQ types.Sequence, FEE feetypes.Fee](t interface { + mock.TestingT + Cleanup(func()) +}) *TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { + mock := &TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/mocks/tx_store.go b/common/txmgr/types/mocks/tx_store.go new file mode 100644 index 00000000..eae20984 --- /dev/null +++ b/common/txmgr/types/mocks/tx_store.go @@ -0,0 +1,1251 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + mock "github.com/stretchr/testify/mock" + + null "gopkg.in/guregu/null.v4" + + time "time" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + + uuid "github.com/google/uuid" +) + +// TxStore is an autogenerated mock type for the TxStore type +type TxStore[ADDR types.Hashable, CHAIN_ID types.ID, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], SEQ types.Sequence, FEE feetypes.Fee] struct { + mock.Mock +} + +// Abandon provides a mock function with given fields: ctx, id, addr +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Abandon(ctx context.Context, id CHAIN_ID, addr ADDR) error { + ret := _m.Called(ctx, id, addr) + + if len(ret) == 0 { + panic("no return value specified for Abandon") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID, ADDR) error); ok { + r0 = rf(ctx, id, addr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CheckTxQueueCapacity provides a mock function with given fields: ctx, fromAddress, maxQueuedTransactions, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CheckTxQueueCapacity(ctx context.Context, fromAddress ADDR, maxQueuedTransactions uint64, chainID CHAIN_ID) error { + ret := _m.Called(ctx, fromAddress, maxQueuedTransactions, chainID) + + if len(ret) == 0 { + panic("no return value specified for CheckTxQueueCapacity") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, uint64, CHAIN_ID) error); ok { + r0 = rf(ctx, fromAddress, maxQueuedTransactions, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() { + _m.Called() +} + +// CountTransactionsByState provides a mock function with given fields: ctx, state, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState, chainID CHAIN_ID) (uint32, error) { + ret := _m.Called(ctx, state, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountTransactionsByState") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxState, CHAIN_ID) (uint32, error)); ok { + return rf(ctx, state, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxState, CHAIN_ID) uint32); ok { + r0 = rf(ctx, state, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.TxState, CHAIN_ID) error); ok { + r1 = rf(ctx, state, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountUnconfirmedTransactions provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CountUnconfirmedTransactions(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (uint32, error) { + ret := _m.Called(ctx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountUnconfirmedTransactions") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) (uint32, error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) uint32); ok { + r0 = rf(ctx, fromAddress, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountUnstartedTransactions provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CountUnstartedTransactions(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (uint32, error) { + ret := _m.Called(ctx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountUnstartedTransactions") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) (uint32, error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) uint32); ok { + r0 = rf(ctx, fromAddress, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateTransaction provides a mock function with given fields: ctx, txRequest, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTransaction(ctx context.Context, txRequest txmgrtypes.TxRequest[ADDR, TX_HASH], chainID CHAIN_ID) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, txRequest, chainID) + + if len(ret) == 0 { + panic("no return value specified for CreateTransaction") + } + + var r0 txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH], CHAIN_ID) (txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, txRequest, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH], CHAIN_ID) txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, txRequest, chainID) + } else { + r0 = ret.Get(0).(txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + + if rf, ok := ret.Get(1).(func(context.Context, txmgrtypes.TxRequest[ADDR, TX_HASH], CHAIN_ID) error); ok { + r1 = rf(ctx, txRequest, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteInProgressAttempt provides a mock function with given fields: ctx, attempt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) DeleteInProgressAttempt(ctx context.Context, attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, attempt) + + if len(ret) == 0 { + panic("no return value specified for DeleteInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindEarliestUnconfirmedBroadcastTime provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID CHAIN_ID) (null.Time, error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedBroadcastTime") + } + + var r0 null.Time + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) (null.Time, error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) null.Time); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Get(0).(null.Time) + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindEarliestUnconfirmedTxAttemptBlock provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context, chainID CHAIN_ID) (null.Int, error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedTxAttemptBlock") + } + + var r0 null.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) (null.Int, error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) null.Int); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Get(0).(null.Int) + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindLatestSequence provides a mock function with given fields: ctx, fromAddress, chainId +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindLatestSequence(ctx context.Context, fromAddress ADDR, chainId CHAIN_ID) (SEQ, error) { + ret := _m.Called(ctx, fromAddress, chainId) + + if len(ret) == 0 { + panic("no return value specified for FindLatestSequence") + } + + var r0 SEQ + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) (SEQ, error)); ok { + return rf(ctx, fromAddress, chainId) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) SEQ); ok { + r0 = rf(ctx, fromAddress, chainId) + } else { + r0 = ret.Get(0).(SEQ) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, fromAddress, chainId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, etx, fromAddress, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fromAddress ADDR, chainID CHAIN_ID) error { + ret := _m.Called(ctx, etx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindNextUnstartedTransactionFromAddress") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], ADDR, CHAIN_ID) error); ok { + r0 = rf(ctx, etx, fromAddress, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber int64, lowBlockNumber int64, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, highBlockNumber, lowBlockNumber, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTransactionsConfirmedInBlockRange") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, CHAIN_ID) error); ok { + r1 = rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsConfirmedMissingReceipt provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxAttemptsConfirmedMissingReceipt(ctx context.Context, chainID CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsConfirmedMissingReceipt") + } + + var r0 []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsRequiringReceiptFetch provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxAttemptsRequiringReceiptFetch(ctx context.Context, chainID CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsRequiringReceiptFetch") + } + + var r0 []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsRequiringResend provides a mock function with given fields: ctx, olderThan, maxInFlightTransactions, chainID, address +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxAttemptsRequiringResend(ctx context.Context, olderThan time.Time, maxInFlightTransactions uint32, chainID CHAIN_ID, address ADDR) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, olderThan, maxInFlightTransactions, chainID, address) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsRequiringResend") + } + + var r0 []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, uint32, CHAIN_ID, ADDR) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } + if rf, ok := ret.Get(0).(func(context.Context, time.Time, uint32, CHAIN_ID, ADDR) []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, time.Time, uint32, CHAIN_ID, ADDR) error); ok { + r1 = rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxWithIdempotencyKey provides a mock function with given fields: ctx, idempotencyKey, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxWithIdempotencyKey(ctx context.Context, idempotencyKey string, chainID CHAIN_ID) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, idempotencyKey, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxWithIdempotencyKey") + } + + var r0 *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, CHAIN_ID) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, idempotencyKey, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, CHAIN_ID) *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, idempotencyKey, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, CHAIN_ID) error); ok { + r1 = rf(ctx, idempotencyKey, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxWithSequence provides a mock function with given fields: ctx, fromAddress, seq +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxWithSequence(ctx context.Context, fromAddress ADDR, seq SEQ) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, fromAddress, seq) + + if len(ret) == 0 { + panic("no return value specified for FindTxWithSequence") + } + + var r0 *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, SEQ) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, fromAddress, seq) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, SEQ) *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, fromAddress, seq) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, SEQ) error); ok { + r1 = rf(ctx, fromAddress, seq) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, metaValue, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesByMetaFieldAndStates") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, metaValue, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, metaValue, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, metaValue, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesPendingCallback provides a mock function with given fields: ctx, blockNum, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error) { + ret := _m.Called(ctx, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesPendingCallback") + } + + var r0 []txmgrtypes.ReceiptPlus[R] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) ([]txmgrtypes.ReceiptPlus[R], error)); ok { + return rf(ctx, blockNum, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) []txmgrtypes.ReceiptPlus[R]); ok { + r0 = rf(ctx, blockNum, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]txmgrtypes.ReceiptPlus[R]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, CHAIN_ID) error); ok { + r1 = rf(ctx, blockNum, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, ids, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithAttemptsAndReceiptsByIdsAndState") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, ids, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, ids, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, ids, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByReceiptBlockNum") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, blockNum, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, blockNum, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok { + r1 = rf(ctx, metaField, blockNum, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, metaField, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByStates") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, metaField, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, metaField, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []txmgrtypes.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxsRequiringGasBump provides a mock function with given fields: ctx, address, blockNum, gasBumpThreshold, depth, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxsRequiringGasBump(ctx context.Context, address ADDR, blockNum int64, gasBumpThreshold int64, depth int64, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxsRequiringGasBump") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, int64, int64, int64, CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, int64, int64, int64, CHAIN_ID) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, int64, int64, int64, CHAIN_ID) error); ok { + r1 = rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxsRequiringResubmissionDueToInsufficientFunds provides a mock function with given fields: ctx, address, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) FindTxsRequiringResubmissionDueToInsufficientFunds(ctx context.Context, address ADDR, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, address, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxsRequiringResubmissionDueToInsufficientFunds") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, address, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, address, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, address, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetInProgressTxAttempts provides a mock function with given fields: ctx, address, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetInProgressTxAttempts(ctx context.Context, address ADDR, chainID CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, address, chainID) + + if len(ret) == 0 { + panic("no return value specified for GetInProgressTxAttempts") + } + + var r0 []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) ([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, address, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, address, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, address, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNonFatalTransactions provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetNonFatalTransactions(ctx context.Context, chainID CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for GetNonFatalTransactions") + } + + var r0 []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) ([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) []*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, CHAIN_ID) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTxByID provides a mock function with given fields: ctx, id +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetTxByID(ctx context.Context, id int64) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetTxByID") + } + + var r0 *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, int64) *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTxInProgress provides a mock function with given fields: ctx, fromAddress +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetTxInProgress(ctx context.Context, fromAddress ADDR) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error) { + ret := _m.Called(ctx, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for GetTxInProgress") + } + + var r0 *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR) (*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], error)); ok { + return rf(ctx, fromAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR) *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]); ok { + r0 = rf(ctx, fromAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR) error); ok { + r1 = rf(ctx, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasInProgressTransaction provides a mock function with given fields: ctx, account, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HasInProgressTransaction(ctx context.Context, account ADDR, chainID CHAIN_ID) (bool, error) { + ret := _m.Called(ctx, account, chainID) + + if len(ret) == 0 { + panic("no return value specified for HasInProgressTransaction") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) (bool, error)); ok { + return rf(ctx, account, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, ADDR, CHAIN_ID) bool); ok { + r0 = rf(ctx, account, chainID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, ADDR, CHAIN_ID) error); ok { + r1 = rf(ctx, account, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsTxFinalized provides a mock function with given fields: ctx, blockHeight, txID, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID CHAIN_ID) (bool, error) { + ret := _m.Called(ctx, blockHeight, txID, chainID) + + if len(ret) == 0 { + panic("no return value specified for IsTxFinalized") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) (bool, error)); ok { + return rf(ctx, blockHeight, txID, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, CHAIN_ID) bool); ok { + r0 = rf(ctx, blockHeight, txID, chainID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, CHAIN_ID) error); ok { + r1 = rf(ctx, blockHeight, txID, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadTxAttempts provides a mock function with given fields: ctx, etx +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) LoadTxAttempts(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, etx) + + if len(ret) == 0 { + panic("no return value specified for LoadTxAttempts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, etx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MarkAllConfirmedMissingReceipt provides a mock function with given fields: ctx, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) MarkAllConfirmedMissingReceipt(ctx context.Context, chainID CHAIN_ID) error { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for MarkAllConfirmedMissingReceipt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, CHAIN_ID) error); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MarkOldTxesMissingReceiptAsErrored provides a mock function with given fields: ctx, blockNum, finalityDepth, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) MarkOldTxesMissingReceiptAsErrored(ctx context.Context, blockNum int64, finalityDepth uint32, chainID CHAIN_ID) error { + ret := _m.Called(ctx, blockNum, finalityDepth, chainID) + + if len(ret) == 0 { + panic("no return value specified for MarkOldTxesMissingReceiptAsErrored") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, uint32, CHAIN_ID) error); ok { + r0 = rf(ctx, blockNum, finalityDepth, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PreloadTxes provides a mock function with given fields: ctx, attempts +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) PreloadTxes(ctx context.Context, attempts []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, attempts) + + if len(ret) == 0 { + panic("no return value specified for PreloadTxes") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, attempts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PruneUnstartedTxQueue provides a mock function with given fields: ctx, queueSize, subject +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) PruneUnstartedTxQueue(ctx context.Context, queueSize uint32, subject uuid.UUID) ([]int64, error) { + ret := _m.Called(ctx, queueSize, subject) + + if len(ret) == 0 { + panic("no return value specified for PruneUnstartedTxQueue") + } + + var r0 []int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uuid.UUID) ([]int64, error)); ok { + return rf(ctx, queueSize, subject) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uuid.UUID) []int64); ok { + r0 = rf(ctx, queueSize, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uuid.UUID) error); ok { + r1 = rf(ctx, queueSize, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReapTxHistory provides a mock function with given fields: ctx, minBlockNumberToKeep, timeThreshold, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID CHAIN_ID) error { + ret := _m.Called(ctx, minBlockNumberToKeep, timeThreshold, chainID) + + if len(ret) == 0 { + panic("no return value specified for ReapTxHistory") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time, CHAIN_ID) error); ok { + r0 = rf(ctx, minBlockNumberToKeep, timeThreshold, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveConfirmedMissingReceiptAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveConfirmedMissingReceiptAttempt(ctx context.Context, timeout time.Duration, attempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveConfirmedMissingReceiptAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveFetchedReceipts provides a mock function with given fields: ctx, receipts, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveFetchedReceipts(ctx context.Context, receipts []R, chainID CHAIN_ID) error { + ret := _m.Called(ctx, receipts, chainID) + + if len(ret) == 0 { + panic("no return value specified for SaveFetchedReceipts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []R, CHAIN_ID) error); ok { + r0 = rf(ctx, receipts, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveInProgressAttempt provides a mock function with given fields: ctx, attempt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveInProgressAttempt(ctx context.Context, attempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, attempt) + + if len(ret) == 0 { + panic("no return value specified for SaveInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveInsufficientFundsAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveInsufficientFundsAttempt(ctx context.Context, timeout time.Duration, attempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveInsufficientFundsAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveReplacementInProgressAttempt provides a mock function with given fields: ctx, oldAttempt, replacementAttempt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveReplacementInProgressAttempt(ctx context.Context, oldAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], replacementAttempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, oldAttempt, replacementAttempt) + + if len(ret) == 0 { + panic("no return value specified for SaveReplacementInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, oldAttempt, replacementAttempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveSentAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SaveSentAttempt(ctx context.Context, timeout time.Duration, attempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveSentAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetBroadcastBeforeBlockNum provides a mock function with given fields: ctx, blockNum, chainID +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SetBroadcastBeforeBlockNum(ctx context.Context, blockNum int64, chainID CHAIN_ID) error { + ret := _m.Called(ctx, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for SetBroadcastBeforeBlockNum") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, CHAIN_ID) error); ok { + r0 = rf(ctx, blockNum, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateBroadcastAts provides a mock function with given fields: ctx, now, etxIDs +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateBroadcastAts(ctx context.Context, now time.Time, etxIDs []int64) error { + ret := _m.Called(ctx, now, etxIDs) + + if len(ret) == 0 { + panic("no return value specified for UpdateBroadcastAts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, []int64) error); ok { + r0 = rf(ctx, now, etxIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxAttemptInProgressToBroadcast provides a mock function with given fields: ctx, etx, attempt, NewAttemptState +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxAttemptInProgressToBroadcast(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], NewAttemptState txmgrtypes.TxAttemptState) error { + ret := _m.Called(ctx, etx, attempt, NewAttemptState) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxAttemptInProgressToBroadcast") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttemptState) error); ok { + r0 = rf(ctx, etx, attempt, NewAttemptState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxCallbackCompleted provides a mock function with given fields: ctx, pipelineTaskRunRid, chainId +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error { + ret := _m.Called(ctx, pipelineTaskRunRid, chainId) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxCallbackCompleted") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, CHAIN_ID) error); ok { + r0 = rf(ctx, pipelineTaskRunRid, chainId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxFatalError provides a mock function with given fields: ctx, etx +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxFatalError(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, etx) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxFatalError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, etx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxForRebroadcast provides a mock function with given fields: ctx, etx, etxAttempt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxForRebroadcast(ctx context.Context, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], etxAttempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, etx, etxAttempt) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxForRebroadcast") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, etx, etxAttempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxUnstartedToInProgress provides a mock function with given fields: ctx, etx, attempt +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxUnstartedToInProgress(ctx context.Context, etx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error { + ret := _m.Called(ctx, etx, attempt) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxUnstartedToInProgress") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], *txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error); ok { + r0 = rf(ctx, etx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxsUnconfirmed provides a mock function with given fields: ctx, ids +func (_m *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) UpdateTxsUnconfirmed(ctx context.Context, ids []int64) error { + ret := _m.Called(ctx, ids) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxsUnconfirmed") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []int64) error); ok { + r0 = rf(ctx, ids) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTxStore creates a new instance of TxStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxStore[ADDR types.Hashable, CHAIN_ID types.ID, TX_HASH types.Hashable, BLOCK_HASH types.Hashable, R txmgrtypes.ChainReceipt[TX_HASH, BLOCK_HASH], SEQ types.Sequence, FEE feetypes.Fee](t interface { + mock.TestingT + Cleanup(func()) +}) *TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { + mock := &TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/mocks/tx_strategy.go b/common/txmgr/types/mocks/tx_strategy.go new file mode 100644 index 00000000..44a058fe --- /dev/null +++ b/common/txmgr/types/mocks/tx_strategy.go @@ -0,0 +1,79 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + mock "github.com/stretchr/testify/mock" + + uuid "github.com/google/uuid" +) + +// TxStrategy is an autogenerated mock type for the TxStrategy type +type TxStrategy struct { + mock.Mock +} + +// PruneQueue provides a mock function with given fields: ctx, pruneService +func (_m *TxStrategy) PruneQueue(ctx context.Context, pruneService types.UnstartedTxQueuePruner) ([]int64, error) { + ret := _m.Called(ctx, pruneService) + + if len(ret) == 0 { + panic("no return value specified for PruneQueue") + } + + var r0 []int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.UnstartedTxQueuePruner) ([]int64, error)); ok { + return rf(ctx, pruneService) + } + if rf, ok := ret.Get(0).(func(context.Context, types.UnstartedTxQueuePruner) []int64); ok { + r0 = rf(ctx, pruneService) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.UnstartedTxQueuePruner) error); ok { + r1 = rf(ctx, pruneService) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Subject provides a mock function with given fields: +func (_m *TxStrategy) Subject() uuid.NullUUID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Subject") + } + + var r0 uuid.NullUUID + if rf, ok := ret.Get(0).(func() uuid.NullUUID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uuid.NullUUID) + } + + return r0 +} + +// NewTxStrategy creates a new instance of TxStrategy. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxStrategy(t interface { + mock.TestingT + Cleanup(func()) +}) *TxStrategy { + mock := &TxStrategy{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/txmgr/types/tx.go b/common/txmgr/types/tx.go new file mode 100644 index 00000000..adf57cc7 --- /dev/null +++ b/common/txmgr/types/tx.go @@ -0,0 +1,340 @@ +package types + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/big" + "slices" + "strings" + "time" + + "github.com/google/uuid" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/sqlutil" + clnull "github.com/goplugin/plugin-common/pkg/utils/null" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxStrategy controls how txes are queued and sent +// +//go:generate mockery --quiet --name TxStrategy --output ./mocks/ --case=underscore --structname TxStrategy --filename tx_strategy.go +type TxStrategy interface { + // Subject will be saved txes.subject if not null + Subject() uuid.NullUUID + // PruneQueue is called after tx insertion + // It accepts the service responsible for deleting + // unstarted txs and deletion options + PruneQueue(ctx context.Context, pruneService UnstartedTxQueuePruner) (ids []int64, err error) +} + +type TxAttemptState int8 + +type TxState string + +const ( + TxAttemptInProgress TxAttemptState = iota + 1 + TxAttemptInsufficientFunds + TxAttemptBroadcast + txAttemptStateCount // always at end to calculate number of states +) + +var txAttemptStateStrings = []string{ + "unknown_attempt_state", // default 0 value + TxAttemptInProgress: "in_progress", + TxAttemptInsufficientFunds: "insufficient_funds", + TxAttemptBroadcast: "broadcast", +} + +func NewTxAttemptState(state string) (s TxAttemptState) { + if index := slices.Index(txAttemptStateStrings, state); index != -1 { + s = TxAttemptState(index) + } + return s +} + +// String returns string formatted states for logging +func (s TxAttemptState) String() (str string) { + if s < txAttemptStateCount { + return txAttemptStateStrings[s] + } + return txAttemptStateStrings[0] +} + +type TxRequest[ADDR types.Hashable, TX_HASH types.Hashable] struct { + // IdempotencyKey is a globally unique ID set by the caller, to prevent accidental creation of duplicated Txs during retries or crash recovery. + // If this field is set, the TXM will first search existing Txs with this field. + // If found, it will return the existing Tx, without creating a new one. TXM will not validate or ensure that existing Tx is same as the incoming TxRequest. + // If not found, TXM will create a new Tx. + // If IdempotencyKey is set to null, TXM will always create a new Tx. + // Since IdempotencyKey has to be globally unique, consider prepending the service or component's name it is being used by + // Such as {service}-{ID}. E.g vrf-12345 + IdempotencyKey *string + FromAddress ADDR + ToAddress ADDR + EncodedPayload []byte + Value big.Int + FeeLimit uint32 + Meta *TxMeta[ADDR, TX_HASH] + ForwarderAddress ADDR + + // Pipeline variables - if you aren't calling this from chain tx task within + // the pipeline, you don't need these variables + MinConfirmations clnull.Uint32 + PipelineTaskRunID *uuid.UUID + + Strategy TxStrategy + + // Checker defines the check that should be run before a transaction is submitted on chain. + Checker TransmitCheckerSpec[ADDR] + + // Mark tx requiring callback + SignalCallback bool +} + +// TransmitCheckerSpec defines the check that should be performed before a transaction is submitted +// on chain. +type TransmitCheckerSpec[ADDR types.Hashable] struct { + // CheckerType is the type of check that should be performed. Empty indicates no check. + CheckerType TransmitCheckerType `json:",omitempty"` + + // VRFCoordinatorAddress is the address of the VRF coordinator that should be used to perform + // VRF transmit checks. This should be set iff CheckerType is TransmitCheckerTypeVRFV2. + VRFCoordinatorAddress *ADDR `json:",omitempty"` + + // VRFRequestBlockNumber is the block number in which the provided VRF request has been made. + // This should be set iff CheckerType is TransmitCheckerTypeVRFV2. + VRFRequestBlockNumber *big.Int `json:",omitempty"` +} + +// TransmitCheckerType describes the type of check that should be performed before a transaction is +// executed on-chain. +type TransmitCheckerType string + +// TxMeta contains fields of the transaction metadata +// Not all fields are guaranteed to be present +type TxMeta[ADDR types.Hashable, TX_HASH types.Hashable] struct { + JobID *int32 `json:"JobID,omitempty"` + + // Pipeline fields + FailOnRevert null.Bool `json:"FailOnRevert,omitempty"` + + // VRF-only fields + RequestID *TX_HASH `json:"RequestID,omitempty"` + RequestTxHash *TX_HASH `json:"RequestTxHash,omitempty"` + // Batch variants of the above + RequestIDs []TX_HASH `json:"RequestIDs,omitempty"` + RequestTxHashes []TX_HASH `json:"RequestTxHashes,omitempty"` + // Used for the VRFv2 - max link this tx will bill + // should it get bumped + MaxLink *string `json:"MaxLink,omitempty"` + // Used for the VRFv2 - the subscription ID of the + // requester of the VRF. + SubID *uint64 `json:"SubId,omitempty"` + // Used for the VRFv2Plus - the uint256 subscription ID of the + // requester of the VRF. + GlobalSubID *string `json:"GlobalSubId,omitempty"` + // Used for VRFv2Plus - max native token this tx will bill + // should it get bumped + MaxEth *string `json:"MaxEth,omitempty"` + + // Used for keepers + UpkeepID *string `json:"UpkeepID,omitempty"` + + // Used for VRF to know if the txn is a ForceFulfilment txn + ForceFulfilled *bool `json:"ForceFulfilled,omitempty"` + ForceFulfillmentAttempt *uint64 `json:"ForceFulfillmentAttempt,omitempty"` + + // Used for Keystone Workflows + WorkflowExecutionID *string `json:"WorkflowExecutionID,omitempty"` + + // Used only for forwarded txs, tracks the original destination address. + // When this is set, it indicates tx is forwarded through To address. + FwdrDestAddress *ADDR `json:"ForwarderDestAddress,omitempty"` + + // MessageIDs is used by CCIP for tx to executed messages correlation in logs + MessageIDs []string `json:"MessageIDs,omitempty"` + // SeqNumbers is used by CCIP for tx to committed sequence numbers correlation in logs + SeqNumbers []uint64 `json:"SeqNumbers,omitempty"` +} + +type TxAttempt[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + ID int64 + TxID int64 + Tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] + TxFee FEE + // ChainSpecificFeeLimit on the TxAttempt is always the same as the on-chain encoded value for fee limit + ChainSpecificFeeLimit uint32 + SignedRawTx []byte + Hash TX_HASH + CreatedAt time.Time + BroadcastBeforeBlockNum *int64 + State TxAttemptState + Receipts []ChainReceipt[TX_HASH, BLOCK_HASH] `json:"-"` + TxType int +} + +func (a *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) String() string { + return fmt.Sprintf("TxAttempt(ID:%d,TxID:%d,Fee:%s,TxType:%d", a.ID, a.TxID, a.TxFee, a.TxType) +} + +type Tx[ + CHAIN_ID types.ID, + ADDR types.Hashable, + TX_HASH, BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] struct { + ID int64 + IdempotencyKey *string + Sequence *SEQ + FromAddress ADDR + ToAddress ADDR + EncodedPayload []byte + Value big.Int + // FeeLimit on the Tx is always the conceptual gas limit, which is not + // necessarily the same as the on-chain encoded value (i.e. Optimism) + FeeLimit uint32 + Error null.String + // BroadcastAt is updated every time an attempt for this tx is re-sent + // In almost all cases it will be within a second or so of the actual send time. + BroadcastAt *time.Time + // InitialBroadcastAt is recorded once, the first ever time this tx is sent + InitialBroadcastAt *time.Time + CreatedAt time.Time + State TxState + TxAttempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] `json:"-"` + // Marshalled TxMeta + // Used for additional context around transactions which you want to log + // at send time. + Meta *sqlutil.JSON + Subject uuid.NullUUID + ChainID CHAIN_ID + + PipelineTaskRunID uuid.NullUUID + MinConfirmations clnull.Uint32 + + // TransmitChecker defines the check that should be performed before a transaction is submitted on + // chain. + TransmitChecker *sqlutil.JSON + + // Marks tx requiring callback + SignalCallback bool + // Marks tx callback as signaled + CallbackCompleted bool +} + +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetError() error { + if e.Error.Valid { + return errors.New(e.Error.String) + } + return nil +} + +// GetID allows Tx to be used as jsonapi.MarshalIdentifier +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetID() string { + return fmt.Sprintf("%d", e.ID) +} + +// GetMeta returns an Tx's meta in struct form, unmarshalling it from JSON first. +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetMeta() (*TxMeta[ADDR, TX_HASH], error) { + if e.Meta == nil { + return nil, nil + } + var m TxMeta[ADDR, TX_HASH] + if err := json.Unmarshal(*e.Meta, &m); err != nil { + return nil, fmt.Errorf("unmarshalling meta: %w", err) + } + + return &m, nil +} + +// GetLogger returns a new logger with metadata fields. +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetLogger(lgr logger.Logger) logger.SugaredLogger { + lgr = logger.With(lgr, + "txID", e.ID, + "sequence", e.Sequence, + "checker", e.TransmitChecker, + "feeLimit", e.FeeLimit, + ) + + meta, err := e.GetMeta() + if err != nil { + lgr.Errorw("failed to get meta of the transaction", "err", err) + return logger.Sugared(lgr) + } + + if meta != nil { + lgr = logger.With(lgr, "jobID", meta.JobID) + + if meta.RequestTxHash != nil { + lgr = logger.With(lgr, "requestTxHash", *meta.RequestTxHash) + } + + if meta.RequestID != nil { + id := *meta.RequestID + lgr = logger.With(lgr, "requestID", new(big.Int).SetBytes(id.Bytes()).String()) + } + + if len(meta.RequestIDs) != 0 { + var ids []string + for _, id := range meta.RequestIDs { + ids = append(ids, new(big.Int).SetBytes(id.Bytes()).String()) + } + lgr = logger.With(lgr, "requestIDs", strings.Join(ids, ",")) + } + + if meta.UpkeepID != nil { + lgr = logger.With(lgr, "upkeepID", *meta.UpkeepID) + } + + if meta.SubID != nil { + lgr = logger.With(lgr, "subID", *meta.SubID) + } + + if meta.MaxLink != nil { + lgr = logger.With(lgr, "maxLink", *meta.MaxLink) + } + + if meta.FwdrDestAddress != nil { + lgr = logger.With(lgr, "FwdrDestAddress", *meta.FwdrDestAddress) + } + + if len(meta.MessageIDs) > 0 { + for _, mid := range meta.MessageIDs { + lgr = logger.With(lgr, "messageID", mid) + } + } + + if len(meta.SeqNumbers) > 0 { + lgr = logger.With(lgr, "SeqNumbers", meta.SeqNumbers) + } + } + + return logger.Sugared(lgr) +} + +// GetChecker returns an Tx's transmit checker spec in struct form, unmarshalling it from JSON +// first. +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetChecker() (TransmitCheckerSpec[ADDR], error) { + if e.TransmitChecker == nil { + return TransmitCheckerSpec[ADDR]{}, nil + } + var t TransmitCheckerSpec[ADDR] + if err := json.Unmarshal(*e.TransmitChecker, &t); err != nil { + return t, fmt.Errorf("unmarshalling transmit checker: %w", err) + } + + return t, nil +} diff --git a/common/txmgr/types/tx_attempt_builder.go b/common/txmgr/types/tx_attempt_builder.go new file mode 100644 index 00000000..10f9855f --- /dev/null +++ b/common/txmgr/types/tx_attempt_builder.go @@ -0,0 +1,44 @@ +package types + +import ( + "context" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxAttemptBuilder takes the base unsigned transaction + optional parameters (tx type, gas parameters) +// and returns a signed TxAttempt +// it is able to estimate fees and sign transactions +// +//go:generate mockery --quiet --name TxAttemptBuilder --output ./mocks/ --case=underscore +type TxAttemptBuilder[ + CHAIN_ID types.ID, // CHAIN_ID - chain id type + HEAD types.Head[BLOCK_HASH], // HEAD - chain head type + ADDR types.Hashable, // ADDR - chain address type + TX_HASH, BLOCK_HASH types.Hashable, // various chain hash types + SEQ types.Sequence, // SEQ - chain sequence type (nonce, utxo, etc) + FEE feetypes.Fee, // FEE - chain fee type +] interface { + // interfaces for running the underlying estimator + services.Service + types.HeadTrackable[HEAD, BLOCK_HASH] + + // NewTxAttempt builds a transaction using the configured transaction type and fee estimator (new estimation) + NewTxAttempt(ctx context.Context, tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger, opts ...feetypes.Opt) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fee FEE, feeLimit uint32, retryable bool, err error) + + // NewTxAttemptWithType builds a transaction using the configured fee estimator (new estimation) + passed in tx type + NewTxAttemptWithType(ctx context.Context, tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger, txType int, opts ...feetypes.Opt) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fee FEE, feeLimit uint32, retryable bool, err error) + + // NewBumpTxAttempt builds a transaction using the configured fee estimator (bumping) + tx type from previous attempt + // this should only be used after an initial attempt has been broadcast and the underlying gas estimator only needs to bump the fee + NewBumpTxAttempt(ctx context.Context, tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], previousAttempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], priorAttempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], lggr logger.Logger) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], bumpedFee FEE, bumpedFeeLimit uint32, retryable bool, err error) + + // NewCustomTxAttempt builds a transaction using the passed in fee + tx type + NewCustomTxAttempt(tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fee FEE, gasLimit uint32, txType int, lggr logger.Logger) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], retryable bool, err error) + + // NewEmptyTxAttempt is used in ForceRebroadcast to create a signed tx with zero value sent to the zero address + NewEmptyTxAttempt(seq SEQ, feeLimit uint32, fee FEE, fromAddress ADDR) (attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) +} diff --git a/common/txmgr/types/tx_store.go b/common/txmgr/types/tx_store.go new file mode 100644 index 00000000..06cc2765 --- /dev/null +++ b/common/txmgr/types/tx_store.go @@ -0,0 +1,136 @@ +package types + +import ( + "context" + "math/big" + "time" + + "github.com/google/uuid" + "gopkg.in/guregu/null.v4" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// TxStore is a superset of all the needed persistence layer methods +// +//go:generate mockery --quiet --name TxStore --output ./mocks/ --case=underscore +type TxStore[ + // Represents an account address, in native chain format. + ADDR types.Hashable, + // Represents a chain id to be used for the chain. + CHAIN_ID types.ID, + // Represents a unique Tx Hash for a chain + TX_HASH types.Hashable, + // Represents a unique Block Hash for a chain + BLOCK_HASH types.Hashable, + // Represents a onchain receipt object that a chain's RPC returns + R ChainReceipt[TX_HASH, BLOCK_HASH], + // Represents the sequence type for a chain. For example, nonce for EVM. + SEQ types.Sequence, + // Represents the chain specific fee type + FEE feetypes.Fee, +] interface { + UnstartedTxQueuePruner + TxHistoryReaper[CHAIN_ID] + TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE] + + // Find confirmed txes beyond the minConfirmations param that require callback but have not yet been signaled + FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID CHAIN_ID) (receiptsPlus []ReceiptPlus[R], err error) + // Update tx to mark that its callback has been signaled + UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error + SaveFetchedReceipts(ctx context.Context, receipts []R, chainID CHAIN_ID) (err error) + + // additional methods for tx store management + CheckTxQueueCapacity(ctx context.Context, fromAddress ADDR, maxQueuedTransactions uint64, chainID CHAIN_ID) (err error) + Close() + Abandon(ctx context.Context, id CHAIN_ID, addr ADDR) error + // Find transactions by a field in the TxMeta blob and transaction states + FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions with a non-null TxMeta field that was provided by transaction states + FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided + FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Find transactions loaded with transaction attempts and receipts by transaction IDs and states + FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []TxState, chainID *big.Int) (tx []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) +} + +// TransactionStore contains the persistence layer methods needed to manage Txs and TxAttempts +type TransactionStore[ + ADDR types.Hashable, + CHAIN_ID types.ID, + TX_HASH types.Hashable, + BLOCK_HASH types.Hashable, + SEQ types.Sequence, + FEE feetypes.Fee, +] interface { + CountUnconfirmedTransactions(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (count uint32, err error) + CountTransactionsByState(ctx context.Context, state TxState, chainID CHAIN_ID) (count uint32, err error) + CountUnstartedTransactions(ctx context.Context, fromAddress ADDR, chainID CHAIN_ID) (count uint32, err error) + CreateTransaction(ctx context.Context, txRequest TxRequest[ADDR, TX_HASH], chainID CHAIN_ID) (tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + DeleteInProgressAttempt(ctx context.Context, attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + FindLatestSequence(ctx context.Context, fromAddress ADDR, chainId CHAIN_ID) (SEQ, error) + FindTxsRequiringGasBump(ctx context.Context, address ADDR, blockNum, gasBumpThreshold, depth int64, chainID CHAIN_ID) (etxs []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindTxsRequiringResubmissionDueToInsufficientFunds(ctx context.Context, address ADDR, chainID CHAIN_ID) (etxs []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindTxAttemptsConfirmedMissingReceipt(ctx context.Context, chainID CHAIN_ID) (attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindTxAttemptsRequiringReceiptFetch(ctx context.Context, chainID CHAIN_ID) (attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindTxAttemptsRequiringResend(ctx context.Context, olderThan time.Time, maxInFlightTransactions uint32, chainID CHAIN_ID, address ADDR) (attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Search for Tx using the idempotencyKey and chainID + FindTxWithIdempotencyKey(ctx context.Context, idempotencyKey string, chainID CHAIN_ID) (tx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + // Search for Tx using the fromAddress and sequence + FindTxWithSequence(ctx context.Context, fromAddress ADDR, seq SEQ) (etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], fromAddress ADDR, chainID CHAIN_ID) error + FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber, lowBlockNumber int64, chainID CHAIN_ID) (etxs []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID CHAIN_ID) (null.Time, error) + FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context, chainID CHAIN_ID) (null.Int, error) + GetTxInProgress(ctx context.Context, fromAddress ADDR) (etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + GetInProgressTxAttempts(ctx context.Context, address ADDR, chainID CHAIN_ID) (attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + GetNonFatalTransactions(ctx context.Context, chainID CHAIN_ID) (txs []*Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + GetTxByID(ctx context.Context, id int64) (tx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], err error) + HasInProgressTransaction(ctx context.Context, account ADDR, chainID CHAIN_ID) (exists bool, err error) + LoadTxAttempts(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + MarkAllConfirmedMissingReceipt(ctx context.Context, chainID CHAIN_ID) (err error) + MarkOldTxesMissingReceiptAsErrored(ctx context.Context, blockNum int64, finalityDepth uint32, chainID CHAIN_ID) error + PreloadTxes(ctx context.Context, attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + SaveConfirmedMissingReceiptAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error + SaveInProgressAttempt(ctx context.Context, attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + SaveInsufficientFundsAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error + SaveReplacementInProgressAttempt(ctx context.Context, oldAttempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], replacementAttempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + SaveSentAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], broadcastAt time.Time) error + SetBroadcastBeforeBlockNum(ctx context.Context, blockNum int64, chainID CHAIN_ID) error + UpdateBroadcastAts(ctx context.Context, now time.Time, etxIDs []int64) error + UpdateTxAttemptInProgressToBroadcast(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], NewAttemptState TxAttemptState) error + // Update tx to mark that its callback has been signaled + UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId CHAIN_ID) error + UpdateTxsUnconfirmed(ctx context.Context, ids []int64) error + UpdateTxUnstartedToInProgress(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt *TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + UpdateTxFatalError(ctx context.Context, etx *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + UpdateTxForRebroadcast(ctx context.Context, etx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], etxAttempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID CHAIN_ID) (finalized bool, err error) +} + +type TxHistoryReaper[CHAIN_ID types.ID] interface { + ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID CHAIN_ID) error +} + +type UnstartedTxQueuePruner interface { + PruneUnstartedTxQueue(ctx context.Context, queueSize uint32, subject uuid.UUID) (ids []int64, err error) +} + +// R is the raw unparsed transaction receipt +type ReceiptPlus[R any] struct { + ID uuid.UUID `db:"pipeline_run_id"` + Receipt R `db:"receipt"` + FailOnRevert bool `db:"fail_on_revert"` +} + +type ChainReceipt[TX_HASH, BLOCK_HASH types.Hashable] interface { + GetStatus() uint64 + GetTxHash() TX_HASH + GetBlockNumber() *big.Int + IsZero() bool + IsUnmined() bool + GetFeeUsed() uint64 + GetTransactionIndex() uint + GetBlockHash() BLOCK_HASH +} diff --git a/common/txmgr/types/tx_test.go b/common/txmgr/types/tx_test.go new file mode 100644 index 00000000..b945017a --- /dev/null +++ b/common/txmgr/types/tx_test.go @@ -0,0 +1,50 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTxAttemptState(t *testing.T) { + type stateCompare struct { + state TxAttemptState + str string + } + + // dynmaically build base states + states := []stateCompare{} + for i, v := range txAttemptStateStrings { + states = append(states, stateCompare{TxAttemptState(i), v}) + } + + t.Run("NewTxAttemptState", func(t *testing.T) { + // string representation + addStates := []stateCompare{ + {TxAttemptState(0), "invalid_state"}, + } + allStates := append(states, addStates...) + for i := range allStates { + s := allStates[i] + t.Run(fmt.Sprintf("%s->%d", s.str, s.state), func(t *testing.T) { + assert.Equal(t, s.state, NewTxAttemptState(s.str)) + }) + } + }) + + t.Run("String", func(t *testing.T) { + // string representation + addStates := []stateCompare{ + {txAttemptStateCount, txAttemptStateStrings[0]}, + {100, txAttemptStateStrings[0]}, + } + allStates := append(states, addStates...) + for i := range allStates { + s := allStates[i] + t.Run(fmt.Sprintf("%d->%s", s.state, s.str), func(t *testing.T) { + assert.Equal(t, s.str, s.state.String()) + }) + } + }) +} diff --git a/common/types/chain.go b/common/types/chain.go new file mode 100644 index 00000000..c2c2011d --- /dev/null +++ b/common/types/chain.go @@ -0,0 +1,17 @@ +package types + +import "fmt" + +// Sequence represents the base type, for any chain's sequence object. +// It should be convertible to a string +type Sequence interface { + fmt.Stringer + Int64() int64 // needed for numeric sequence confirmation - to be removed with confirmation logic generalization: https://smartcontract-it.atlassian.net/browse/BCI-860 +} + +// Generate the next usable sequence for a transaction +type GenerateNextSequenceFunc[SEQ Sequence] func(prev SEQ) SEQ + +// ID represents the base type, for any chain's ID. +// It should be convertible to a string, that can uniquely identify this chain +type ID fmt.Stringer diff --git a/common/types/hashable.go b/common/types/hashable.go new file mode 100644 index 00000000..2d166505 --- /dev/null +++ b/common/types/hashable.go @@ -0,0 +1,12 @@ +package types + +import "fmt" + +// A chain-agnostic generic interface to represent the following native types on various chains: +// PublicKey, Address, Account, BlockHash, TxHash +type Hashable interface { + fmt.Stringer + comparable + + Bytes() []byte +} diff --git a/common/types/head.go b/common/types/head.go new file mode 100644 index 00000000..c363fd5d --- /dev/null +++ b/common/types/head.go @@ -0,0 +1,39 @@ +package types + +import ( + "math/big" + "time" +) + +// Head provides access to a chain's head, as needed by the TxManager. +// This is a generic interface which ALL chains will implement. +// +//go:generate mockery --quiet --name Head --output ./mocks/ --case=underscore +type Head[BLOCK_HASH Hashable] interface { + // BlockNumber is the head's block number + BlockNumber() int64 + + // Timestamp the time of mining of the block + GetTimestamp() time.Time + + // ChainLength returns the length of the chain followed by recursively looking up parents + ChainLength() uint32 + + // EarliestHeadInChain traverses through parents until it finds the earliest one + EarliestHeadInChain() Head[BLOCK_HASH] + + // Parent is the head's parent block + GetParent() Head[BLOCK_HASH] + + // Hash is the head's block hash + BlockHash() BLOCK_HASH + GetParentHash() BLOCK_HASH + + // HashAtHeight returns the hash of the block at the given height, if it is in the chain. + // If not in chain, returns the zero hash + HashAtHeight(blockNum int64) BLOCK_HASH + + // Returns the total difficulty of the block. For chains who do not have a concept of block + // difficulty, return 0. + BlockDifficulty() *big.Int +} diff --git a/common/types/head_tracker.go b/common/types/head_tracker.go new file mode 100644 index 00000000..bdda2552 --- /dev/null +++ b/common/types/head_tracker.go @@ -0,0 +1,79 @@ +package types + +import ( + "context" + + "github.com/goplugin/plugin-common/pkg/services" +) + +// HeadTracker holds and stores the block experienced by a particular node in a thread safe manner. +// Reconstitutes the last block number on reboot. +// +//go:generate mockery --quiet --name HeadTracker --output ../mocks/ --case=underscore +type HeadTracker[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + services.Service + // Backfill given a head will fill in any missing heads up to the given depth + // (used for testing) + Backfill(ctx context.Context, headWithChain H, depth uint) (err error) + LatestChain() H +} + +// HeadTrackable is implemented by the core txm, +// to be able to receive head events from any chain. +// Chain implementations should notify head events to the core txm via this interface. +// +//go:generate mockery --quiet --name HeadTrackable --output ./mocks/ --case=underscore +type HeadTrackable[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + // OnNewLongestChain sends a new head when it becomes available. Subscribers can recursively trace the parent + // of the head to the finality depth back. If this is not possible (e.g. due to recent boot, backfill not complete + // etc), users may get a shorter linked list. If there is a re-org, older blocks won't be sent to this function again. + // But the new blocks from the re-org will be available in later blocks' parent linked list. + OnNewLongestChain(ctx context.Context, head H) +} + +// HeadSaver is an chain agnostic interface for saving and loading heads +// Different chains will instantiate generic HeadSaver type with their native Head and BlockHash types. +type HeadSaver[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + // Save updates the latest block number, if indeed the latest, and persists + // this number in case of reboot. + Save(ctx context.Context, head H) error + // Load loads latest EvmHeadTrackerHistoryDepth heads, returns the latest chain. + Load(ctx context.Context) (H, error) + // LatestChain returns the block header with the highest number that has been seen, or nil. + LatestChain() H + // Chain returns a head for the specified hash, or nil. + Chain(hash BLOCK_HASH) H +} + +// HeadListener is a chain agnostic interface that manages connection of Client that receives heads from the blockchain node +type HeadListener[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + // ListenForNewHeads kicks off the listen loop (not thread safe) + // done() must be executed upon leaving ListenForNewHeads() + ListenForNewHeads(handleNewHead NewHeadHandler[H, BLOCK_HASH], done func()) + + // ReceivingHeads returns true if the listener is receiving heads (thread safe) + ReceivingHeads() bool + + // Connected returns true if the listener is connected (thread safe) + Connected() bool + + // HealthReport returns report of errors within HeadListener + HealthReport() map[string]error +} + +// NewHeadHandler is a callback that handles incoming heads +type NewHeadHandler[H Head[BLOCK_HASH], BLOCK_HASH Hashable] func(ctx context.Context, header H) error + +// HeadBroadcaster relays new Heads to all subscribers. +// +//go:generate mockery --quiet --name HeadBroadcaster --output ../mocks/ --case=underscore +type HeadBroadcaster[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + services.Service + BroadcastNewLongestChain(H) + HeadBroadcasterRegistry[H, BLOCK_HASH] +} + +//go:generate mockery --quiet --name HeadBroadcaster --output ../mocks/ --case=underscore +type HeadBroadcasterRegistry[H Head[BLOCK_HASH], BLOCK_HASH Hashable] interface { + Subscribe(callback HeadTrackable[H, BLOCK_HASH]) (currentLongestChain H, unsubscribe func()) +} diff --git a/common/types/mocks/head.go b/common/types/mocks/head.go new file mode 100644 index 00000000..d9497dcb --- /dev/null +++ b/common/types/mocks/head.go @@ -0,0 +1,199 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + time "time" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/types" +) + +// Head is an autogenerated mock type for the Head type +type Head[BLOCK_HASH types.Hashable] struct { + mock.Mock +} + +// BlockDifficulty provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockDifficulty() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockDifficulty") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// BlockHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// BlockNumber provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) BlockNumber() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// ChainLength provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) ChainLength() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainLength") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// EarliestHeadInChain provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) EarliestHeadInChain() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EarliestHeadInChain") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// GetParent provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetParent() types.Head[BLOCK_HASH] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParent") + } + + var r0 types.Head[BLOCK_HASH] + if rf, ok := ret.Get(0).(func() types.Head[BLOCK_HASH]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Head[BLOCK_HASH]) + } + } + + return r0 +} + +// GetParentHash provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetParentHash() BLOCK_HASH { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetParentHash") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func() BLOCK_HASH); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// GetTimestamp provides a mock function with given fields: +func (_m *Head[BLOCK_HASH]) GetTimestamp() time.Time { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTimestamp") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// HashAtHeight provides a mock function with given fields: blockNum +func (_m *Head[BLOCK_HASH]) HashAtHeight(blockNum int64) BLOCK_HASH { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for HashAtHeight") + } + + var r0 BLOCK_HASH + if rf, ok := ret.Get(0).(func(int64) BLOCK_HASH); ok { + r0 = rf(blockNum) + } else { + r0 = ret.Get(0).(BLOCK_HASH) + } + + return r0 +} + +// NewHead creates a new instance of Head. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHead[BLOCK_HASH types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *Head[BLOCK_HASH] { + mock := &Head[BLOCK_HASH]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/types/mocks/head_trackable.go b/common/types/mocks/head_trackable.go new file mode 100644 index 00000000..c5e1f7e7 --- /dev/null +++ b/common/types/mocks/head_trackable.go @@ -0,0 +1,34 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/common/types" + mock "github.com/stretchr/testify/mock" +) + +// HeadTrackable is an autogenerated mock type for the HeadTrackable type +type HeadTrackable[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable] struct { + mock.Mock +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *HeadTrackable[H, BLOCK_HASH]) OnNewLongestChain(ctx context.Context, head H) { + _m.Called(ctx, head) +} + +// NewHeadTrackable creates a new instance of HeadTrackable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeadTrackable[H types.Head[BLOCK_HASH], BLOCK_HASH types.Hashable](t interface { + mock.TestingT + Cleanup(func()) +}) *HeadTrackable[H, BLOCK_HASH] { + mock := &HeadTrackable[H, BLOCK_HASH]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/types/mocks/subscription.go b/common/types/mocks/subscription.go new file mode 100644 index 00000000..32db6dfa --- /dev/null +++ b/common/types/mocks/subscription.go @@ -0,0 +1,49 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Subscription is an autogenerated mock type for the Subscription type +type Subscription struct { + mock.Mock +} + +// Err provides a mock function with given fields: +func (_m *Subscription) Err() <-chan error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Err") + } + + var r0 <-chan error + if rf, ok := ret.Get(0).(func() <-chan error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan error) + } + } + + return r0 +} + +// Unsubscribe provides a mock function with given fields: +func (_m *Subscription) Unsubscribe() { + _m.Called() +} + +// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscription(t interface { + mock.TestingT + Cleanup(func()) +}) *Subscription { + mock := &Subscription{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/common/types/receipt.go b/common/types/receipt.go new file mode 100644 index 00000000..01d5a72d --- /dev/null +++ b/common/types/receipt.go @@ -0,0 +1,14 @@ +package types + +import "math/big" + +type Receipt[TX_HASH Hashable, BLOCK_HASH Hashable] interface { + GetStatus() uint64 + GetTxHash() TX_HASH + GetBlockNumber() *big.Int + IsZero() bool + IsUnmined() bool + GetFeeUsed() uint64 + GetTransactionIndex() uint + GetBlockHash() BLOCK_HASH +} diff --git a/common/types/subscription.go b/common/types/subscription.go new file mode 100644 index 00000000..99247107 --- /dev/null +++ b/common/types/subscription.go @@ -0,0 +1,17 @@ +package types + +// Subscription represents an event subscription where events are +// delivered on a data channel. +// This is a generic interface for Subscription to represent used by clients. + +//go:generate mockery --quiet --name Subscription --output ./mocks/ --case=underscore +type Subscription interface { + // Unsubscribe cancels the sending of events to the data channel + // and closes the error channel. + Unsubscribe() + // Err returns the subscription error channel. The error channel receives + // a value if there is an issue with the subscription (e.g. the network connection + // delivering the events has been closed). Only one value will ever be sent. + // The error channel is closed by Unsubscribe. + Err() <-chan error +} diff --git a/common/types/test_utils.go b/common/types/test_utils.go new file mode 100644 index 00000000..40560f78 --- /dev/null +++ b/common/types/test_utils.go @@ -0,0 +1,16 @@ +package types + +import ( + "math" + "math/big" + "math/rand" +) + +func RandomID() ID { + id := rand.Int63n(math.MaxInt32) + 10000 + return big.NewInt(id) +} + +func NewIDFromInt(id int64) ID { + return big.NewInt(id) +} diff --git a/config_docs_test.go b/config_docs_test.go new file mode 100644 index 00000000..d6605d23 --- /dev/null +++ b/config_docs_test.go @@ -0,0 +1,27 @@ +package main + +import ( + _ "embed" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/config/docs" +) + +var ( + //go:embed docs/CONFIG.md + configMD string + //go:embed docs/SECRETS.md + secretsMD string +) + +func TestConfigDocs(t *testing.T) { + config, err := docs.GenerateConfig() + assert.NoError(t, err, "invalid config docs") + assert.Equal(t, configMD, config, "docs/CONFIG.md is out of date. Run 'make config-docs' to regenerate.") + + secrets, err := docs.GenerateSecrets() + assert.NoError(t, err, "invalid secrets docs") + assert.Equal(t, secretsMD, secrets, "docs/SECRETS.md is out of date. Run 'make config-docs' to regenerate.") +} diff --git a/contracts/.eslintignore b/contracts/.eslintignore new file mode 100644 index 00000000..cfd941da --- /dev/null +++ b/contracts/.eslintignore @@ -0,0 +1,16 @@ +**/node_modules/** +**/generated/** +core/ +**/dist/** +**/artifacts/** +**/public/** +**/build/** +**/fixtures/** +**/lib/** +truffle-config.js +truffle.js +gethload.js +craco.config.js +integration/apocalypse/** +contracts/ +tools/ci-ts/tests/ethers/** \ No newline at end of file diff --git a/contracts/.eslintrc.js b/contracts/.eslintrc.js new file mode 100644 index 00000000..ad9322f7 --- /dev/null +++ b/contracts/.eslintrc.js @@ -0,0 +1,40 @@ +module.exports = { + root: true, + parser: '@typescript-eslint/parser', + plugins: ['@typescript-eslint'], + env: { + es6: true, + node: true, + mocha: true, + }, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/eslint-recommended', + 'plugin:@typescript-eslint/recommended', + 'plugin:prettier/recommended', + ], + rules: { + radix: ['error', 'always'], + 'object-shorthand': ['error', 'always'], + 'prettier/prettier': [ + 'error', + {}, + { + usePrettierrc: true, + }, + ], + 'prefer-const': 'warn', + '@typescript-eslint/camelcase': 'off', + '@typescript-eslint/no-empty-function': 'off', + '@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '^_' }], + '@typescript-eslint/no-empty-interface': 'off', + '@typescript-eslint/explicit-function-return-type': 'off', + '@typescript-eslint/no-explicit-any': 'off', + '@typescript-eslint/ban-ts-comment': 'warn', + '@typescript-eslint/no-non-null-assertion': 'error', + '@typescript-eslint/no-use-before-define': [ + 'error', + { functions: false, typedefs: false }, + ], + }, +} diff --git a/contracts/.gitignore b/contracts/.gitignore new file mode 100644 index 00000000..ddfd4383 --- /dev/null +++ b/contracts/.gitignore @@ -0,0 +1,15 @@ +artifacts +cache +node_modules +solc +abi +coverage +coverage.json +typechain + + +# Foundry +foundry-cache +foundry-artifacts + +.openzeppelin \ No newline at end of file diff --git a/contracts/.npmrc b/contracts/.npmrc new file mode 100644 index 00000000..4c2f52b3 --- /dev/null +++ b/contracts/.npmrc @@ -0,0 +1,2 @@ +auto-install-peers=true +strict-peer-dependencies=false diff --git a/contracts/.prettierignore b/contracts/.prettierignore new file mode 100644 index 00000000..856908bf --- /dev/null +++ b/contracts/.prettierignore @@ -0,0 +1,43 @@ +**/node_modules/** +**/generated/** +core/ +**/dist/** +**/artifacts/** +**/public/** +**/build/** +truffle-config.js +truffle.js +gethload.js + +pnpm-lock.yaml +coverage +abi +artifacts +foundry-artifacts +foundry-cache +foundry-lib +cache +node_modules +solc +LinkToken.json +typechain +src/v0.4 +src/v0.5 +src/v0.6 +src/v0.7 +**/vendor + +# Ignore TS definition and map files +**/**.d.ts +**/**.d.ts.map + +docs/ +venv/ + +*.md +.github/**/*.yml +.golangci.yml +.solhint.json + +src/v0.8/mocks/FunctionsOracleEventsMock.sol +src/v0.8/mocks/FunctionsBillingRegistryEventsMock.sol \ No newline at end of file diff --git a/contracts/.prettierrc b/contracts/.prettierrc new file mode 100644 index 00000000..59464a44 --- /dev/null +++ b/contracts/.prettierrc @@ -0,0 +1,22 @@ +{ + "semi": false, + "singleQuote": true, + "printWidth": 80, + "endOfLine": "auto", + "tabWidth": 2, + "trailingComma": "all", + "plugins": ["prettier-plugin-solidity"], + "overrides": [ + { + "files": "*.sol", + "options": { + "parser": "solidity-parse", + "printWidth": 120, + "tabWidth": 2, + "useTabs": false, + "singleQuote": false, + "bracketSpacing": false + } + } + ] +} diff --git a/contracts/.prettierrc.js b/contracts/.prettierrc.js new file mode 100644 index 00000000..774a5e96 --- /dev/null +++ b/contracts/.prettierrc.js @@ -0,0 +1,22 @@ +module.exports = { + semi: false, + singleQuote: true, + printWidth: 80, + endOfLine: 'auto', + tabWidth: 2, + trailingComma: 'all', + overrides: [ + { + files: '*.sol', + options: { + parser: 'solidity-parse', + printWidth: 120, + tabWidth: 2, + useTabs: false, + singleQuote: false, + bracketSpacing: false, + explicitTypes: 'always', + }, + }, + ], +} diff --git a/contracts/.solcover.js b/contracts/.solcover.js new file mode 100644 index 00000000..e3602a0b --- /dev/null +++ b/contracts/.solcover.js @@ -0,0 +1,35 @@ +module.exports = { + skipFiles: [ + 'v0.4/', + 'v0.5/', + 'v0.6/tests', + 'v0.6/interfaces', + 'v0.6/vendor', + 'v0.7/tests', + 'v0.7/interfaces', + 'v0.7/vendor', + 'v0.8/mocks', + 'v0.8/interfaces', + 'v0.8/vendor', + 'v0.8/dev/interfaces', + 'v0.8/dev/vendor', + 'v0.8/dev/Keeper2_0/interfaces', + 'v0.8/dev/transmission', + 'v0.8/tests', + ], + istanbulReporter: ['text', 'text-summary', 'json'], + mocha: { + grep: '@skip-coverage', // Find everything with this tag + invert: true, // Run the grep's inverse set. + }, + configureYulOptimizer: true, + solcOptimizerDetails: { + peephole: false, + jumpdestRemover: false, + orderLiterals: true, + deduplicate: false, + cse: false, + constantOptimizer: false, + yul: true, + }, +} diff --git a/contracts/.solhint.json b/contracts/.solhint.json new file mode 100644 index 00000000..ba719051 --- /dev/null +++ b/contracts/.solhint.json @@ -0,0 +1,44 @@ +{ + "extends": "solhint:recommended", + "plugins": ["prettier", "plugin-solidity"], + "rules": { + "compiler-version": ["off", "^0.8.0"], + "const-name-snakecase": "off", + "constructor-syntax": "error", + "var-name-mixedcase": "off", + "func-named-parameters": "off", + "immutable-vars-naming": "off", + "no-inline-assembly": "off", + "contract-name-camelcase": "off", + "no-unused-import": "error", + "func-visibility": [ + "error", + { + "ignoreConstructors": true + } + ], + "not-rely-on-time": "off", + "prettier/prettier": [ + "off", + { + "endOfLine": "auto" + } + ], + "no-empty-blocks": "off", + "quotes": ["error", "double"], + "reason-string": [ + "warn", + { + "maxLength": 64 + } + ], + "plugin-solidity/prefix-internal-functions-with-underscore": "warn", + "plugin-solidity/prefix-private-functions-with-underscore": "warn", + "plugin-solidity/prefix-storage-variables-with-s-underscore": "warn", + "plugin-solidity/prefix-immutable-variables-with-i": "warn", + "plugin-solidity/all-caps-constant-storage-variables": "warn", + "plugin-solidity/no-hardhat-imports": "warn", + "plugin-solidity/inherited-constructor-args-not-in-contract-definition": "warn", + "plugin-solidity/explicit-returns": "warn" + } +} diff --git a/contracts/.solhintignore b/contracts/.solhintignore new file mode 100644 index 00000000..4246bbd7 --- /dev/null +++ b/contracts/.solhintignore @@ -0,0 +1,40 @@ +# 344 warnings +#./src/v0.8/automation + +# Ignore frozen Automation code +./src/v0.8/automation/v1_2 +./src/v0.8/automation/v1_3 +./src/v0.8/automation/v2_0 +./src/v0.8/automation/v2_1 +./src/v0.8/automation/interfaces/v2_1/ +./src/v0.8/automation/interfaces/MigratableKeeperRegistryInterface.sol +./src/v0.8/automation/upkeeps/UpkeepBalanceMonitor.sol +./src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol +./src/v0.8/automation/upkeeps/EthBalanceMonitor.sol +./src/v0.8/automation/upkeeps/ERC20BalanceMonitor.sol +./src/v0.8/automation/upkeeps/CronUpkeepFactory.sol +./src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol +./src/v0.8/automation/upkeeps/CronUpkeep.sol +./src/v0.8/automation/libraries/internal/Cron.sol +./src/v0.8/automation/AutomationForwarder.sol +./src/v0.8/automation/AutomationForwarderLogic.sol + + +# Ignore tests / test helpers (for now) +./src/v0.8/automation/mocks +./src/v0.8/automation/testhelpers + +# Ignore Functions v1.0.0 code that was frozen after audit +./src/v0.8/functions/v1_0_0 + +# Ignore tests, this should not be the long term plan but is OK in the short term +./src/v0.8/**/*.t.sol +./src/v0.8/mocks +./src/v0.8/tests +./src/v0.8/llo-feeds/test +./src/v0.8/vrf/testhelpers +./src/v0.8/functions/tests + +# Always ignore vendor +./src/v0.8/vendor +./node_modules/ diff --git a/contracts/CHANGELOG.md b/contracts/CHANGELOG.md new file mode 100644 index 00000000..61b5a72e --- /dev/null +++ b/contracts/CHANGELOG.md @@ -0,0 +1,194 @@ +# @plugin/contracts CHANGELOG.md + +## Unreleased + +- Moved `VRFCoordinatorV2Mock.sol` to src/v0.8/vrf/mocks +- Moved `VRFCoordinatorMock.sol` to src/v0.8/vrf/mocks +- Release Functions v1.0.0 contracts. Start dev folder for v1.X (#10941) +- Add minimumEstimateGasPriceWei to Functions Coordinator config (#10916) +- Remove redundant Functions Coordinator commitment & request id checks (#10975) + +### Removed + +- Removed all code related to versions prior to Solidity 0.8.0 (#10931) + +## 0.8.0 - 2023-10-04 + +### Changed + + +- Add a re-entrancy guard to VRFCoordinatorV2Mock to mimic VRFCoordinatorV2's behavior (#10585) +- Enhanced support for destination configs in Data Streams verifiers (#10472) +- Update Data Streams proxy and billing interfaces for better UX (#10603) +- Allow new reward recipients to be added to pools in Data Streams reward management (#10658) +- Reorganize Data Streams contracts (llo-feeds/) (#10727) +- Release automation 2.1 contracts (#10587) + - Note: consumers should only use IKeeperRegistryMaster when interacting with the registry contract +- Fix Functions v1 OracleWithdrawAll to correctly use transmitters (#10392) +- Clean up unused Functions v1 code: FunctionsBilling.sol maxCallbackGasLimit & FunctionsRequest.sol requestSignature (#10509) +- Fix Functions v1 FunctionsBilling.sol gas price naming to reflect that it is in wei, not gwei (#10509) +- Use Natspec comment lines in Functions v1 contracts (#10567) +- Functions v1 Subscriptions now require a minimum number of requests to release a deposit amount (#10513) +- Fix Functions v1 Subscriptions add consumer checks for when maximum consumers changes in contract configuration (#10511) +- Functions v1 Router no longer reverts during fulfillment on an invalid client (#10511) +- Functions v1 Coordinator oracleWithdrawAll checks for 0 balances (#10511) + +## 0.7.1 - 2023-09-20 + +### Changed + +- Add Plugin Functions v1.0.0 (#9365) +- Change Functions Client variables to internal for use when integrating Automation (#8429) +- Make Functions Billing Registry and Functions Oracle upgradable using the transparent proxy pattern (#8371) +- Update dependency hardhat from version 2.10.1 to 2.12.7 (#8464) +- Fix Functions cost estimation not correctly using registry fee (#8502) +- Fix Functions transmitter NOP fee payment (#8557) +- Functions library uses solidty-cborutils CBOR v2.0.0 and ENS Buffer v0.1.0(#8485) +- Gas optimization to AuthorizedOriginReceiverUpgradable by using EnumberableSet .values() +- Remove support for inline secrets in Functions requests (#8847) +- Moved versioned directories to use v prefix + +## 0.6.1 - 2023-02-06 + +### Added + +- Support for off-chain secrets in Functions Oracle contract + +### Changed + +- Modified FunctionsClientExample.sol to use constant amount of gas regardless of response size +- Fixed comments in FunctionsBillingRegistry.sol +- Make Functions billing registry's timeoutRequest pausable (#8299) +- Remove user specified gas price from Functions Oracle sendRequest + (#8320) + +## 0.6.0 - 2023-01-11 + +### Added + +- Added a Solidity style guide. + +### Changed + +- Migrated and improved `AuthorizedReceiverInterface` and `AuthorizedReceiver` from 0.7.0 +- Added `Plugin Functions` interfaces and contracts (initial version for PoC) + +## 0.5.1 - 2022-09-27 + +- Rename `KeeperBase` -> `AutomationBase` and add alias for backwards compatibility +- Rename `KeeperCompatible` -> `AutomationCompatible` and add alias for backwards compatibility +- Rename `KeeperCompatibleInterface` -> `AutomationCompatibleInterface` and add alias for backwards compatibility +- Rename `KeeperRegistryInterface1_2` -> `AutomationRegistryInterface1_2` and add alias for backwards compatibility + +## 0.5.0 - 2022-09-26 + +### Changed + +- Fix EIP-150 Bug in VRFV2Wrapper.sol (b9d8261eaa05838b9b609ea02005ecca3b6adca3) +- Added a new UpkeepFormat version `V2` in `UpkeepFormat` +- Renamed `KeeperRegistry` to `KeeperRegistry1_2` and `KeeperRegistryInterface` to `KeeperRegistryInterface1_2` +- Updated `UpkeepTranscoder` to only do a pass-through for upkeep bytes + +## 0.4.2 - 2022-07-20 + +### Changed + +- Downgrade 0.8.13 contracts to 0.8.6 due to [this solc bug](https://medium.com/certora/overly-optimistic-optimizer-certora-bug-disclosure-2101e3f7994d). +- Reintroduce v0.6 `EACAggregatorProxy` after removing it in [this commit](https://github.com/goplugin/pluginv3.0/commit/558f42f5122779cb2e05dc8c2b84d1ae78cc0d71) +- Ignore status update in `ArbitrumSequencerUptimeFeed` if incoming update has stale timestamp +- Revert to using current Arbitrum seq status flag in `ArbitrumSequencerUptimeFeed` +- Moved `VRFV2Wrapper`, `VRFV2WrapperConsumerBase` and `interfaces/VRFV2WrapperInterface` out of `dev` folder. + +## 0.4.1 - 2022-05-09 + +### Changed + +- VRFv2 contract pragma versions changed from `^0.8.0` to `^0.8.4`. + +## 0.4.0 - 2022-02-07 + +### Added + +- `ArbitrumSequencerUptimeFeedInterface` and `ArbitrumSequencerUptimeFeed` added in v0.8. + +### Changed + +- Changed `ArbitrumValidator#validate` target to `ArbitrumSequencerUptimeFeed` instead of + Flags contract. +- Moved `VRFConsumerBaseV2` out of dev + +## 0.3.1 - 2022-01-05 + +### Changed: + +- Fixed install issue with npm. + +## 0.3.0 - 2021-12-09 + +### Added + +- Prettier Solidity formatting applied to v0.7 and above. +- ERC677ReceiverInterface added in v0.8. +- `KeeperBase.sol` and `KeeperCompatible.sol` in Solidity v0.6 and v0.8 + +### Changed: + +- Operator Contract and Plugin Client are officially supported. This enables + multiword requests/response are available through the PluginClient by using + the newly enabled `buildOperatorRequest` along with `sendOperatorRequest` or + `sendOperatorRequestTo`. +- `PluginClient` functions `requestOracleData` and `requestOracleDataFrom` have been changed to `sendPluginRequest` and + `sendPluginRequestTo` respectively. +- Updated function comments in `v0.6/interfaces/KeeperCompatibleInterface.sol` and `v0.8/interfaces/KeeperCompatibleInterface.sol` to match the latest in v0.7. +- Add `DelegateForwarderInterface` interface and `CrossDomainDelegateForwarder` base contract which implements a new `forwardDelegate()` function to forward delegatecalls from L1 to L2. + +## 0.2.2 - 2021-09-21 + +### Added: + +- v0.8 Access Controlled contracts (`SimpleWriteAccessController` and `SimpleReadAccessController`). +- v0.8 Flags contracts (`Flags`). +- v0.8 Contracts for the V2 VRF. `VRFCoordinatorV2.sol`, `VRF.sol`, + `VRFConsumerBaseV2.sol`, `VRFCoordinatorV2Interface.sol`. Along + with related test contract `VRFConsumerV2.sol` and example contracts + `VRFSingleConsumerExample.sol` and `VRFConsumerExternalSubOwnerExampl.sol`. +- v0.6 `MockV3Aggregator` in src/v0.6/tests/. +- v0.7 Added keeper-related smart contracts from the keeper repo. Added tests for `KeeperRegistry` and `UpkeepRegistrationRequests` in `test/v0.7/`. + +### Changed: + +- Move `Operator` and associated contracts (`AuthorizedForwarder`, `AuthorizedReceiver`, `LinkTokenReceiver`, `OperatorFactory`) from `./src/v0.7/dev/` to `./src/v0.7/`. +- Updated `Denominations` in `./src/` to include additional fiat currencies. +- Updated `./src/v0.8/vender/BufferPlugin.sol` with latest unchecked math version. + +## 0.2.1 - 2021-07-13 + +### Changed: + +- Bump hardhat from 2.3.3 to 2.4.1 +- Move Solidity version 0.8.x contracts `PluginClient.sol`, `Plugin.sol`, `VRFConsumerBase.sol` and `VRFRequestIDBase.sol` from `./src/v0.8/dev/` to `./src/v0.8/`. +- Updated `FeedRegistryInterface` to use `base` and `quote` parameter names. +- Move `Denominations` from `./src//dev/` to `./src/` + +## 0.2.0 - 2021-07-01 + +### Added: + +- `@plugin/contracts` package changelog. +- `KeeperCompatibleInterface` contracts. +- Feeds Registry contracts: `FeedRegistryInterface` and `Denominations`. +- v0.8 Consumable contracts (`PluginClient`, `VRFConsumerBase` and aggregator interfaces). +- Multi-word response handling in v0.7 and v0.8 `PluginClient` contracts. + +### Changed: + +- Added missing licensees to `KeeperComptibleInterface`'s +- Upgrade solidity v8 compiler version from 0.8.4 to 0.8.6 +- Tests converted to Hardhat. +- Ethers upgraded from v4 to v5. +- Contract artifacts in `abi/` are now raw abi .json files, and do not include bytecode or other supplimentary data. + +### Removed: + +- Removed dependencies: `@plugin/belt`, `@plugin/test-helpers` and `@truffle`. +- Ethers and Truffle contract artifacts are no longer published. diff --git a/contracts/GNUmakefile b/contracts/GNUmakefile new file mode 100644 index 00000000..751a47b3 --- /dev/null +++ b/contracts/GNUmakefile @@ -0,0 +1,98 @@ +# ALL_FOUNDRY_PRODUCTS contains a list of all products that have a foundry +# profile defined and use the Foundry snapshots. +ALL_FOUNDRY_PRODUCTS = l2ep llo-feeds functions keystone shared + +# To make a snapshot for a specific product, either set the `FOUNDRY_PROFILE` env var +# or call the target with `FOUNDRY_PROFILE=product` +# When developing with Foundry, you'll most likely already have the env var set +# to run the tests for the product you're working on. In that case, you can just +# call `make snapshot` and it will use the env var. +# env var example +# export FOUNDRY_PROFILE=llo-feeds +# make snapshot +# make call example +# make FOUNDRY_PROFILE=llo-feeds snapshot +# note make snapshot skips fuzz tests named according to best practices, although forge uses +# a static fuzz seed by default, flaky gas results per platform are still observed. +.PHONY: snapshot +snapshot: ## Make a snapshot for a specific product. + export FOUNDRY_PROFILE=$(FOUNDRY_PROFILE) && forge snapshot --nmt "testFuzz_\w{1,}?" --snap gas-snapshots/$(FOUNDRY_PROFILE).gas-snapshot + +.PHONY: snapshot-diff +snapshot-diff: ## Make a snapshot for a specific product. + export FOUNDRY_PROFILE=$(FOUNDRY_PROFILE) && forge snapshot --nmt "testFuzz_\w{1,}?" --diff gas-snapshots/$(FOUNDRY_PROFILE).gas-snapshot + + +.PHONY: snapshot-all +snapshot-all: ## Make a snapshot for all products. + for foundry_profile in $(ALL_FOUNDRY_PRODUCTS) ; do \ + make snapshot FOUNDRY_PROFILE=$$foundry_profile ; \ + done + +.PHONY: pnpmdep +pnpmdep: ## Install solidity contract dependencies through pnpm + pnpm i + +.PHONY: abigen +abigen: ## Build & install abigen. + ../tools/bin/build_abigen + +.PHONY: mockery +mockery: $(mockery) ## Install mockery. + go install github.com/vektra/mockery/v2@v2.35.4 + +.PHONY: foundry +foundry: ## Install foundry. + foundryup --version nightly-5b7e4cb3c882b28f3c32ba580de27ce7381f415a + +.PHONY: foundry-refresh +foundry-refresh: foundry + git submodule deinit -f . + git submodule update --init --recursive + +# To generate gethwrappers for a specific product, either set the `FOUNDRY_PROFILE` +# env var or call the target with `FOUNDRY_PROFILE=product` +# This uses FOUNDRY_PROFILE, even though it does support non-foundry products. This +# is to improve the workflow for developers working with Foundry, which is the +# recommended way to develop Solidity for CL products. +# env var example +# export FOUNDRY_PROFILE=llo-feeds +# make wrappers +# make call example +# make FOUNDRY_PROFILE=llo-feeds wrappers +.PHONY: wrappers +wrappers: pnpmdep mockery abigen ## Recompiles solidity contracts and their go wrappers. + ./scripts/native_solc_compile_all_$(FOUNDRY_PROFILE) + go generate ../core/gethwrappers/$(FOUNDRY_PROFILE) + +# This call generates all gethwrappers for all products. It does so based on the +# assumption that native_solc_compile_all contains sub-calls to each product, and +# go_generate does the same. +.PHONY: wrappers-all +wrappers-all: pnpmdep mockery abigen ## Recompiles solidity contracts and their go wrappers. + # go_generate contains a call to compile all contracts before generating wrappers + go generate ../core/gethwrappers/go_generate.go + +# Custom wrapper generation for OCR2VRF as their contracts do not exist in this repo +.PHONY: go-solidity-wrappers-ocr2vrf +go-solidity-wrappers-ocr2vrf: pnpmdep abigen ## Recompiles OCR2VRF solidity contracts and their go wrappers. + ./scripts/native_solc_compile_all_ocr2vrf + # replace the go:generate_disabled directive with the regular go:generate directive + sed -i '' 's/go:generate_disabled/go:generate/g' ../core/gethwrappers/ocr2vrf/go_generate.go + go generate ../core/gethwrappers/ocr2vrf + go generate ../core/internal/mocks + # put the go:generate_disabled directive back + sed -i '' 's/go:generate/go:generate_disabled/g' ../core/gethwrappers/ocr2vrf/go_generate.go + + +help: + @echo "" + @echo " .__ .__ .__ .__ __" + @echo " ____ | |__ _____ |__| ____ | | |__| ____ | | __" + @echo " _/ ___\| | \\\\\\__ \ | |/ \| | | |/ \| |/ /" + @echo " \ \___| Y \/ __ \| | | \ |_| | | \ <" + @echo " \___ >___| (____ /__|___| /____/__|___| /__|_ \\" + @echo " \/ \/ \/ \/ \/ \/" + @echo "" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | \ + awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/contracts/LinkToken.json b/contracts/LinkToken.json new file mode 100644 index 00000000..718fc415 --- /dev/null +++ b/contracts/LinkToken.json @@ -0,0 +1,164 @@ +{ + "__comment": "bytecode from verification on https://etherscan.io/address/0x514910771af9ca656af840dff83e8264ecf986ca#contracts", + "bytecode": "0x6060604052341561000f57600080fd5b5b600160a060020a03331660009081526001602052604090206b033b2e3c9fd0803ce800000090555b5b610c51806100486000396000f300606060405236156100b75763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166306fdde0381146100bc578063095ea7b31461014757806318160ddd1461017d57806323b872dd146101a2578063313ce567146101de5780634000aea014610207578063661884631461028057806370a08231146102b657806395d89b41146102e7578063a9059cbb14610372578063d73dd623146103a8578063dd62ed3e146103de575b600080fd5b34156100c757600080fd5b6100cf610415565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610169600160a060020a036004351660243561044c565b604051901515815260200160405180910390f35b341561018857600080fd5b610190610499565b60405190815260200160405180910390f35b34156101ad57600080fd5b610169600160a060020a03600435811690602435166044356104a9565b604051901515815260200160405180910390f35b34156101e957600080fd5b6101f16104f8565b60405160ff909116815260200160405180910390f35b341561021257600080fd5b61016960048035600160a060020a03169060248035919060649060443590810190830135806020601f820181900481020160405190810160405281815292919060208401838380828437509496506104fd95505050505050565b604051901515815260200160405180910390f35b341561028b57600080fd5b610169600160a060020a036004351660243561054c565b604051901515815260200160405180910390f35b34156102c157600080fd5b610190600160a060020a0360043516610648565b60405190815260200160405180910390f35b34156102f257600080fd5b6100cf610667565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037d57600080fd5b610169600160a060020a036004351660243561069e565b604051901515815260200160405180910390f35b34156103b357600080fd5b610169600160a060020a03600435166024356106eb565b604051901515815260200160405180910390f35b34156103e957600080fd5b610190600160a060020a0360043581169060243516610790565b60405190815260200160405180910390f35b60408051908101604052600f81527f436861696e4c696e6b20546f6b656e0000000000000000000000000000000000602082015281565b600082600160a060020a03811615801590610479575030600160a060020a031681600160a060020a031614155b151561048457600080fd5b61048e84846107bd565b91505b5b5092915050565b6b033b2e3c9fd0803ce800000081565b600082600160a060020a038116158015906104d6575030600160a060020a031681600160a060020a031614155b15156104e157600080fd5b6104ec85858561082a565b91505b5b509392505050565b601281565b600083600160a060020a0381161580159061052a575030600160a060020a031681600160a060020a031614155b151561053557600080fd5b6104ec85858561093c565b91505b5b509392505050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054808311156105a957600160a060020a0333811660009081526002602090815260408083209388168352929052908120556105e0565b6105b9818463ffffffff610a2316565b600160a060020a033381166000908152600260209081526040808320938916835292905220555b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020547f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925915190815260200160405180910390a3600191505b5092915050565b600160a060020a0381166000908152600160205260409020545b919050565b60408051908101604052600481527f4c494e4b00000000000000000000000000000000000000000000000000000000602082015281565b600082600160a060020a038116158015906106cb575030600160a060020a031681600160a060020a031614155b15156106d657600080fd5b61048e8484610a3a565b91505b5b5092915050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054610723908363ffffffff610afa16565b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020849055919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591905190815260200160405180910390a35060015b92915050565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b600160a060020a03338116600081815260026020908152604080832094871680845294909152808220859055909291907f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b9259085905190815260200160405180910390a35060015b92915050565b600160a060020a03808416600081815260026020908152604080832033909516835293815283822054928252600190529182205461086e908463ffffffff610a2316565b600160a060020a0380871660009081526001602052604080822093909355908616815220546108a3908463ffffffff610afa16565b600160a060020a0385166000908152600160205260409020556108cc818463ffffffff610a2316565b600160a060020a03808716600081815260026020908152604080832033861684529091529081902093909355908616917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9086905190815260200160405180910390a3600191505b509392505050565b60006109488484610a3a565b5083600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16858560405182815260406020820181815290820183818151815260200191508051906020019080838360005b838110156109c35780820151818401525b6020016109aa565b50505050905090810190601f1680156109f05780820380516001836020036101000a031916815260200191505b50935050505060405180910390a3610a0784610b14565b15610a1757610a17848484610b23565b5b5060015b9392505050565b600082821115610a2f57fe5b508082035b92915050565b600160a060020a033316600090815260016020526040812054610a63908363ffffffff610a2316565b600160a060020a033381166000908152600160205260408082209390935590851681522054610a98908363ffffffff610afa16565b600160a060020a0380851660008181526001602052604090819020939093559133909116907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9085905190815260200160405180910390a35060015b92915050565b600082820183811015610b0957fe5b8091505b5092915050565b6000813b908111905b50919050565b82600160a060020a03811663a4c0ed363385856040518463ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610bbd5780820151818401525b602001610ba4565b50505050905090810190601f168015610bea5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1515610c0a57600080fd5b6102c65a03f11515610c1b57600080fd5b5050505b505050505600a165627a7a72305820c5f438ff94e5ddaf2058efa0019e246c636c37a622e04bb67827c7374acad8d60029", + "abi": [ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [{ "name": "", "type": "string" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_spender", "type": "address" }, + { "name": "_value", "type": "uint256" } + ], + "name": "approve", + "outputs": [{ "name": "", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "totalSupply", + "outputs": [{ "name": "", "type": "uint256" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_from", "type": "address" }, + { "name": "_to", "type": "address" }, + { "name": "_value", "type": "uint256" } + ], + "name": "transferFrom", + "outputs": [{ "name": "", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "decimals", + "outputs": [{ "name": "", "type": "uint8" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_to", "type": "address" }, + { "name": "_value", "type": "uint256" }, + { "name": "_data", "type": "bytes" } + ], + "name": "transferAndCall", + "outputs": [{ "name": "success", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_spender", "type": "address" }, + { "name": "_subtractedValue", "type": "uint256" } + ], + "name": "decreaseApproval", + "outputs": [{ "name": "success", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [{ "name": "_owner", "type": "address" }], + "name": "balanceOf", + "outputs": [{ "name": "balance", "type": "uint256" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [{ "name": "", "type": "string" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_to", "type": "address" }, + { "name": "_value", "type": "uint256" } + ], + "name": "transfer", + "outputs": [{ "name": "success", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { "name": "_spender", "type": "address" }, + { "name": "_addedValue", "type": "uint256" } + ], + "name": "increaseApproval", + "outputs": [{ "name": "success", "type": "bool" }], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { "name": "_owner", "type": "address" }, + { "name": "_spender", "type": "address" } + ], + "name": "allowance", + "outputs": [{ "name": "remaining", "type": "uint256" }], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "payable": false, + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { "indexed": true, "name": "from", "type": "address" }, + { "indexed": true, "name": "to", "type": "address" }, + { "indexed": false, "name": "value", "type": "uint256" }, + { "indexed": false, "name": "data", "type": "bytes" } + ], + "name": "Transfer", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { "indexed": true, "name": "owner", "type": "address" }, + { "indexed": true, "name": "spender", "type": "address" }, + { "indexed": false, "name": "value", "type": "uint256" } + ], + "name": "Approval", + "type": "event" + } + ] +} diff --git a/contracts/README.md b/contracts/README.md new file mode 100644 index 00000000..e48cc526 --- /dev/null +++ b/contracts/README.md @@ -0,0 +1,64 @@ +# Plugin Smart Contracts + +## Installation + +```sh +# via pnpm +$ pnpm add @plugin/contracts +# via npm +$ npm install @plugin/contracts --save +``` + +### Directory Structure + +```sh +@plugin/contracts +├── src # Solidity contracts +│ ├── v0.4 +│ ├── v0.5 +│ ├── v0.6 +│ ├── v0.7 +│ └── v0.8 +└── abi # ABI json output + ├── v0.4 + ├── v0.5 + ├── v0.6 + ├── v0.7 + └── v0.8 +``` + +### Usage + +The solidity smart contracts themselves can be imported via the `src` directory of `@plugin/contracts`: + +```solidity +import '@plugin/contracts/src/v0.8/AutomationCompatibleInterface.sol'; + +``` + +## Local Development + +Note: Contracts in `dev/` directories are under active development and are likely unaudited. Please refrain from using these in production applications. + +```bash +# Clone Plugin repository +$ git clone https://github.com/goplugin/pluginv3.0.git +# Continuing via pnpm +$ cd contracts/ +$ pnpm +$ pnpm test +``` + +## Contributing + +Please try to adhere to [Solidity Style Guide](https://github.com/goplugin/pluginv3.0/blob/develop/contracts/STYLE.md). + +Contributions are welcome! Please refer to +[Plugin's contributing guidelines](https://github.com/goplugin/pluginv3.0/blob/develop/docs/CONTRIBUTING.md) for detailed +contribution information. + +Thank you! + +## License + +[MIT](https://choosealicense.com/licenses/mit/) diff --git a/contracts/STYLE_GUIDE.md b/contracts/STYLE_GUIDE.md new file mode 100644 index 00000000..6767d7e2 --- /dev/null +++ b/contracts/STYLE_GUIDE.md @@ -0,0 +1,410 @@ +# Structure + +This guide is split into two sections: [Guidelines](#guidelines) and [Rules](#rules). +Guidelines are recommendations that should be followed but are hard to enforce in an automated way. +Rules are all enforced through CI, this can be through Solhint rules or other tools. + +## Background + +Our starting point is the [official Solidity Style Guide](https://docs.soliditylang.org/en/v0.8.21/style-guide.html) and [ConsenSys's Secure Development practices](https://consensys.github.io/smart-contract-best-practices/), but we deviate in some ways. We lean heavily on [Prettier](https://github.com/goplugin/pluginv3.0/blob/develop/contracts/.prettierrc) for formatting, and if you have to set up a new Solidity project we recommend starting with [our prettier config](https://github.com/goplugin/pluginv3.0/blob/develop/contracts/.prettierrc). We are trying to automate as much of this styleguide with Solhint as possible. + +This guide is not meant to be applied retroactively. There is no need to rewrite existing code to adhere to this guide, and when making (small) changes in existing files, it is not required to do so in accordance to this guide if it would conflict with other practices in that existing file. Consistency is preferred. + +We will be looking into `forge fmt`, but for now we still use `prettier`. + + +# Guidelines + +## Code Organization +- Group functionality together. E.g. Declare structs, events, and helper functions near the functions that use them. This is helpful when reading code because the related pieces are localized. It is also consistent with inheritance and libraries, which are separate pieces of code designed for a specific goal. +- 🤔Why not follow the Solidity recommendation of grouping by visibility? Visibility is clearly defined next to the method signature, making it trivial to check. However, searching can be deceiving because of inherited methods. Given this inconsistency in grouping, we find it easier to read and more consistent to organize code around functionality. Additionally, we recommend testing the public interface for any Solidity contract to ensure it only exposes expected methods. +- Follow the [Solidity folder structure CLIP](https://github.com/goplugin/CLIPs/tree/main/clips/2023-04-13-solidity-folder-structure) + +### Delineate Unaudited Code + +- In a large repo it is worthwhile to keep code that has not yet been audited separate from the code that has been audited. This allows you to easily keep track of which files need to be reviewed. + - E.g. we keep unaudited code in a directory named `dev` that exists within each projects folder. Only once it has been audited we move the audited files out of `dev` and only then is it considered safe to deploy. + - This `dev` folder also has implications for when code is valid for bug bounties, so be extra careful to move functionality out of a `dev` folder. + + +## comments +- Besides comment above functions/structs, comments should live everywhere a reader might be confused. + Don’t overestimate the reader of your contract, expect confusion in many places and document accordingly. + This will help massively during audits and onboarding new team members. +- Headers should be used to group functionality, the following header style and length is recommended. + - Don’t use headers for a single function, or to say “getters”. Group by functionality e.g. the `Tokens and pools` , or `fees` logic within the CCIP OnRamp. + +```solidity + // ================================================================ + // │ Tokens and pools │ + // ================================================================ + +.... + + // ================================================================ + // │ Fees │ + // ================================================================ +``` + +## Variables + +- Function arguments are named like this: `argumentName`. No leading or trailing underscores necessary. +- Names should be explicit on the unit it contains, e.g. a network fee that is charged in USD cents + +```solidity +uint256 fee; // bad +uint256 networkFee; // bad +uint256 networkFeeUSD; // bad +uint256 networkFeeUSDCents; // good +``` + +### Types + +- If you are storing an address and know/expect it to be of a type(or interface), make the variable that type. This more clearly documents the behavior of this variable than the `address` type and often leads to less casting code whenever the address is used. + +### Structs + +- All structs should be packed to have the lowest memory footprint to reduce gas usage. Even structs that will never be written to storage should be packed. + - A contract can be considered a struct; it should also be packed to reduce gas cost. +- Structs should contain struct packing comments to clearly indicate the storage slot layout + - Using the exact characters from the example below will ensure visually appealing struct packing comments. + - Notice there is no line on the unpacked last `fee` item. +- Struct should contain comments, clearly indicating the denomination of values e.g. 0.01 USD if the variable name doesn’t already do that (which it should). + - Simple tool that could help packing structs and adding comments: https://github.com/RensR/Spack + +```solidity +/// @dev Struct to hold the fee configuration for a fee token, same as the FeeTokenConfig but with +/// token included so that an array of these can be passed in to setFeeTokenConfig to set the mapping +struct FeeTokenConfigArgs { + address token; // ────────────╮ Token address + uint32 networkFeeUSD; // │ Flat network fee to charge for messages, multiples of 0.01 USD + // │ multiline comments should work like this. More fee info + uint64 gasMultiplier; // ─────╯ Price multiplier for gas costs, 1e18 based so 11e17 = 10% extra cost + uint64 premiumMultiplier; // ─╮ Multiplier for fee-token-specific premiums + bool enabled; // ─────────────╯ Whether this fee token is enabled + uint256 fee; // The flat fee the user pays in juels +} +``` +## Functions + +### Naming + +- Function names should start with imperative verbs, not nouns or other tenses. + - `requestData` not `dataRequest` + - `approve` not `approved` + - `getFeeParameters` not `feeParameters` + +- Prefix your public getters with `get` and your public setters with `set`. + - `getConfig` and `setConfig`. + +### Return Values + +- If an address is cast as a contract type, return the type, do not cast back to the address type. + This prevents the consumer of the method signature from having to cast again, but presents an equivalent API for off-chain APIs. + Additionally, it is a more declarative API, providing more context if we return a type. + +## Modifiers + +- Only extract a modifier once a check is duplicated in multiple places. Modifiers arguably hurt readability, so we have found that they are not worth extracting until there is duplication. +- Modifiers should be treated as if they are view functions. They should not change state, only read it. While it is possible to change state in a modifier, it is unconventional and surprising. +- Modifiers tend to bloat contract size because the code is duplicated wherever the modifier is used. + +## Events + +- Events should only be triggered on state changes. If the value is set but not changed, we prefer avoiding a log emission indicating a change. (e.g. Either do not emit a log, or name the event `ConfigSet` instead of `ConfigUpdated`.) +- Events should be emitted for all state changes, not emitting should be an exception +- When possible event names should correspond to the method they are in or the action that is being taken. Events preferably follow the format , where the action performed is the past tense of the imperative verb in the method name. e.g. calling `setConfig` should emit an event called `ConfigSet`, not `ConfigUpdated` in a method named `setConfig`. + + +### Expose Errors + +It is common to call a contract and then check the call succeeded: + +```solidity +(bool success, ) = to.call(data); +require(success, "Contract call failed"); +``` + +While this may look descriptive it swallows the error. Instead, bubble up the error: + +```solidity +bool success; +retData = new bytes(maxReturnBytes); +assembly { + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasLimit, target, 0, add(payload, 0x20), mload(payload), 0, 0) + + // limit our copy to maxReturnBytes bytes + let toCopy := returndatasize() + if gt(toCopy, maxReturnBytes) { + toCopy := maxReturnBytes + } + // Store the length of the copied bytes + mstore(retData, toCopy) + // copy the bytes from retData[0:_toCopy] + returndatacopy(add(retData, 0x20), 0, toCopy) +} +return (success, retData); +``` + +This will cost slightly more gas to copy the response into memory, but will ultimately make contract usage more understandable and easier to debug. Whether it is worth the extra gas is a judgement call you’ll have to make based on your needs. + +The original error will not be human-readable in an off-chain explorer because it is RLP hex encoded but is easily decoded with standard Solidity ABI decoding tools, or a hex to UTF-8 converter and some basic ABI knowledge. + + +## Interfaces + +- Interfaces should be as concise as reasonably possible. Break it up into smaller composable interfaces when that is sensible. + +## Dependencies + +- Prefer not reinventing the wheel, especially if there is an Openzeppelin wheel. +- The `shared` folder can be treated as a first party dependency and it is recommend to check if some functionality might already be in there before either writing it yourself or adding a third party dependency. +- When we have reinvented the wheel already (like with ownership), it is OK to keep using these contracts. If there are clear benefits of using another standard like OZ, we can deprecate the custom implementation and start using the new standard in all new projects. Migration will not be required unless there are serious issues with the old implementation. +- When the decision is made to use a new standard, it is no longer allowed to use the old standard for new projects. + +### Vendor dependencies + +- That’s it, vendor your Solidity dependencies. Supply chain attacks are all the rage these days. There is not yet a silver bullet for best way to vendor, it depends on the size of your project and your needs. You should be as explicit as possible about where the code comes from and make sure that this is enforced in some way; e.g. reference a hash. Some options: + - NPM packages work for repos already in the JavaScript ecosystem. If you go this route you should lock to a hash of the repo or use a proxy registry like GitHub Packages. + - Copy and paste the code into a `vendor` directory. Record attribution of the code and license in the repo along with the commit or version that you pulled the code from. + - Foundry uses git submodules for its dependencies. We only use the `forge-std` lib through submodules, we don’t import any non-Foundry-testing code through this method. + + +## Common Behaviors + +### Transferring Ownership + +- When transferring control, whether it is of a token or a role in a contract, prefer "safe ownership" transfer patterns where the recipient must accept ownership. This avoids accidentally burning the control. This is also inline with the secure pattern of [prefer pull over push](https://consensys.github.io/smart-contract-best-practices/recommendations/#favor-pull-over-push-for-external-calls). + +### Call with Exact Gas + +- `call` accepts a gas parameter, but that parameter is a ceiling on gas usage. If a transaction does not have enough gas, `call` will simply provide as much gas as it safely can. This is unintuitive and can lead to transactions failing for unexpected reasons. We have [an implementation of `callWithExactGas`](https://github.com/goplugin/pluginv3.0/blob/075f3e2caf61b8685d2dc78714f1ee39764fda17/contracts/src/v0.8/KeeperRegistry.sol#L792) to ensure the precise gas amount requested is provided. + +### Sending tokens + +- Prefer [ERC20.safeTransfer](https://docs.openzeppelin.com/contracts/2.x/api/token/erc20#SafeERC20) over ERC20.transfer + +### Gas golfing + +- Golf your code. Make it cheap, within reason. + - Focus on the hot path +- Most of the cost of executing Solidity is related to reading/writing storage +- Calling other contracts will also be costly +- Common types to safely use are + - uint40 for timestamps (or uint32 if you really need the space) + - uint96 for link, as there are only 1b link tokens +- prefer `++i` over `i++` +- If you’re unsure about golfing, ask in the #tech-solidity channel + +## Testing + +- Test using Foundry. +- Aim for at least 90% *useful* coverage as a baseline, but (near) 100% is achievable in Solidity. Always 100% test the critical path. + - Make sure to test for each event emitted + - Test each reverting path +- Consider fuzzing, start with stateless (very easy in Foundry) and if that works, try stateful fuzzing. +- Consider fork testing if applicable + +### Foundry + +- Create a Foundry profile for each project folder in `foundry.toml` +- Foundry tests live in the project folder in `src`, not in the `contracts/test/` folder +- Set the block number and timestamp. It is preferred to set these values to some reasonable value close to reality. +- There should be no code between `vm.expectEmit`/`vm.expectRevert` and the function call + +## Picking a Pragma + +- If a contract or library is expected to be imported by outside parties then the pragma should be kept as loose as possible without sacrificing safety. We publish versions for every minor semver version of Solidity, and maintain a corresponding set of tests for each published version. + - Examples: libraries, interfaces, abstract contracts, and contracts expected to be inherited from +- Otherwise, Solidity contracts should have a pragma which is locked to a specific version. + - Example: Most concrete contracts. +- Avoid changing pragmas after audit. Unless there is a bug that has affects your contract, then you should try to stick to a known good pragma. In practice this means we typically only support one (occasionally two) pragma for any “major”(minor by semver naming) Solidity version. +- The current advised pragma is `0.8.19` or higher, lower versions should be avoided when starting a new project. Newer versions can be considered. +- All contracts should have a SPDX license identifier. If unsure about which one to pick, please consult with legal. Most older contracts have been MIT, but some of the newer products have been using BUSL-1.1 + + +## Versioning + +Contracts should implement the following interface + +```solidity +interface ITypeAndVersion { + function typeAndVersion() external pure returns (string memory); +} +``` + +Here are some examples of what this should look like: + +```solidity +contract AccessControlledFoo is Foo { + string public constant override typeAndVersion = "AccessControlledFoo 1.0.0"; +} + +contract OffchainAggregator is ITypeAndVersion { + string public constant override typeAndVersion = "OffchainAggregator 1.0.0"; + + function getData() public returns(uint256) { + return 4; + } +} + +// Next version of Aggregator contract +contract SuperDuperAggregator is ITypeAndVersion { + /// This is a new contract that has not been released yet, so we + /// add a `-dev` suffix to the typeAndVersion. + string public constant override typeAndVersion = "SuperDuperAggregator 1.1.0-dev"; + + function getData() public returns(uint256) { + return 5; + } +} +``` + +All contracts will expose a `typeAndVersion` constant. +The string has the following format: `-` with the `-dev` part only being applicable to contracts that have not been fully released. +Try to fit it into 32 bytes to keep impact on contract sizes minimal. +Solhint will complain about a public constant variable that isn’t FULL_CAPS without the solhint-disable comment. + + + + + + + + + + +# Rules + +All rules have a `rule` tag which indicated how the rule is enforced. + + +## Comments + +- Comments should be in the `//` (default) or `///` (natspec) format, not the `/* */` format. + - rule: `tbd` +- Comments should follow [NatSpec](https://docs.soliditylang.org/en/latest/natspec-format.html) + - rule: `tbd` + +## Imports + +- Imports should always be explicit + - rule: `no-global-import` +- Imports have follow the following format: + - rule: `tbd` + +```solidity +import {IInterface} from "../interfaces/IInterface.sol"; + +import {AnythingElse} from "../code/AnythingElse.sol"; + +import {ThirdPartyCode} from "../../vendor/ThirdPartyCode.sol"; +``` + +- An example would be + +```solidity +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; +import {IPool} from "../interfaces/pools/IPool.sol"; + +import {AggregateRateLimiter} from "../AggregateRateLimiter.sol"; +import {Client} from "../libraries/Client.sol"; + +import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.0/contracts/token/ERC20/utils/SafeERC20.sol"; +import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.0/contracts/token/ERC20/IERC20.sol"; +``` + +## Variables + +### Visibility + +All contract variables should be private by default. Getters should be explicitly written and documented when you want to expose a variable publicly. +Whether a getter function reads from storage, a constant, or calculates a value from somewhere else, that’s all implementation details that should not be exposed to the consumer by casing or other conventions. + +rule: tbd + +### Naming and Casing + +- Storage variables prefixed with an `s_` to make it clear that they live in storage and are expensive to read and write: `s_variableName`. They should always be private, and you should write explicit getters if you want to expose a storage variable. + - rule: `plugin-solidity/prefix-storage-variables-with-s-underscore` +- Immutable variables should be prefixed with an `i_` to make it clear that they are immutable. E.g. `i_decimalPlaces`. They should always be private, and you should write explicit getters if you want to expose an immutable variable. + - rule: `plugin-solidity/prefix-immutable-variables-with-i` +- Internal/private constants should be all caps with underscores: `FOO_BAR`. Like other contract variables, constants should not be public. Create getter methods if you want to publicly expose constants. + - rule: `plugin-solidity/all-caps-constant-storage-variables` +- Explicitly declare variable size: `uint256` not just `uint`. In addition to being explicit, it matches the naming used to calculate function selectors. + - rule: `explicit-types` +- Mapping should always be named if Solidity allows it (≥0.8.18) + - rule: `tbd` + + +## Functions + +### Visibility + +- Method visibility should always be explicitly declared. + - rule: `state-visibility` + +- Prefix private and internal methods with an underscore. There should never be a publicly callable method starting with an underscore. + - E.g. `_setOwner(address)` + - rule: `plugin-solidity/prefix-internal-functions-with-underscore` + +### Return values + +- Returned values should always be explicit. Using named return values and then returning with an empty return should be avoided + - rule: `plugin-solidity/explicit-returns` + +```solidity +// Bad +function getNum() external view returns (uint64 num) { + num = 4; + return; +} + +// Good +function getNum() external view returns (uint64 num) { + num = 4; + return num; +} + +// Good +function getNum() external view returns (uint64 num) { + return 4; +} +``` + +## Errors + +Use [custom errors](https://blog.soliditylang.org/2021/04/21/custom-errors/) instead of emitting strings. This saves contract code size and simultaneously provides more informative error messages. + +rule: `custom-errors` + +## Interfaces + +Interfaces should be named `IFoo` instead of `FooInterface`. This follows the patterns of popular [libraries like OpenZeppelin’s](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/master/contracts/token/ERC20/IERC20.sol#L9). + +rule: `tbd` + +## Structs + +Structs should be constructed with named arguments. This prevents accidental assignment to the wrong field and makes the code more readable. + +```solidity +// Good +function setConfig(uint64 _foo, uint64 _bar, uint64 _baz) external { + config = Config({ + foo: _foo, + bar: _bar, + baz: _baz + }); +} + +// Bad +function setConfig(uint64 _foo, uint64 _bar, uint64 _baz) external { + config = Config(_foo, _bar, _baz); +} +``` + +rule: `tbd` + diff --git a/contracts/ci.json b/contracts/ci.json new file mode 100644 index 00000000..f1eff765 --- /dev/null +++ b/contracts/ci.json @@ -0,0 +1,25 @@ +{ + "type": "solidity", + "basePath": "./contracts/test/", + "splits": [ + { + "dir": "cross-version", + "numOfSplits": 1 + }, + { + "dir": "v0.8", + "numOfSplits": 6, + "slowTests": [ + "Cron", + "CronUpkeep", + "VRFSubscriptionBalanceMonitor", + "EthBalanceMonitor", + "KeeperRegistrar", + "KeeperRegistry1_2", + "KeeperRegistry1_3", + "KeeperRegistry2_0", + "KeeperRegistry2_1" + ] + } + ] +} diff --git a/contracts/foundry.md b/contracts/foundry.md new file mode 100644 index 00000000..75cc72b0 --- /dev/null +++ b/contracts/foundry.md @@ -0,0 +1,27 @@ +## Foundry + +Plugin supports Foundry testing for Solidity based on the Solidity version. + +- < 0.8.0 Hardhat tests only +- 0.8.x Hardhat and Foundry tests +- \> 0.9.0 Foundry tests only + +New projects should use Foundry but existing ones can keep using Hardhat if they want to; we will not be forcing any teams to rewrite their existing test suite. + +## Installation + +Please see the [official docs](https://book.getfoundry.sh/getting-started/installation) for the latest installation guide + +## CI + +The Foundry CI will build, run tests and check the gas usage of all test functions against the version controlled `.gas-snapshot` file. +This snapshot should always be up-to-date and can be generated by running `make snapshot`, and alias for `forge snapshot --match-test _gas`. +We only track gas for tests that have the `_gas` suffix as to not introduce too much noise in the snapshot file. +This will create a snapshot file in the current working dir, please make sure that you are in the root `plugin` directory. + +## Configuration + +Foundry config lives in `foundry.toml` in the root, the docs can be found [here](https://book.getfoundry.sh/config/). +The remapping of libraries, both from Foundry and NPM packages, is done in `remapping.txt`. + + diff --git a/contracts/foundry.toml b/contracts/foundry.toml new file mode 100644 index 00000000..13807c71 --- /dev/null +++ b/contracts/foundry.toml @@ -0,0 +1,62 @@ +[profile.default] +auto_detect_solc = true +optimizer = true +optimizer_runs = 1_000_000 + +src = 'src/v0.8' +test = 'test/v0.8/foundry' +out = 'foundry-artifacts' +cache_path = 'foundry-cache' +libs = ['node_modules', 'foundry-lib'] +bytecode_hash = "none" +ffi = false + +# default is zero, using a non-zero amount enables us to test e.g. billing based on gas prices. +gas_price = 1 +block_timestamp = 1234567890 +block_number = 12345 + +[profile.functions] +solc_version = '0.8.19' +src = 'src/v0.8/functions/dev/v1_X' +test = 'src/v0.8/functions/tests/v1_X' +gas_price = 3_000_000_000 # 3 gwei + +[profile.vrf] +optimizer_runs = 1000 +src = 'src/v0.8/vrf' +test = 'test/v0.8/foundry/vrf' # skips tests for no VRF foundry tests +solc_version = '0.8.6' + +[profile.automation] +optimizer_runs = 10000 +src = 'src/v0.8/automation' +test = 'src/v0.8/automation/test' + +[profile.l2ep] +optimizer_runs = 1000000 +src = 'src/v0.8/l2ep' +test = 'src/v0.8/l2ep/test' +solc_version = '0.8.19' + +[profile.llo-feeds] +optimizer_runs = 1000000 +src = 'src/v0.8/llo-feeds' +test = 'src/v0.8/llo-feeds/test' +solc_version = '0.8.19' +# We cannot turn on deny_warnings = true as that will hide any CI failure + +[profile.keystone] +solc_version = '0.8.19' +src = 'src/v0.8/keystone' +test = 'src/v0.8/keystone/test' +optimizer_runs = 10_000 + +[profile.shared] +optimizer_runs = 1000000 +src = 'src/v0.8/shared' +test = 'src/v0.8/shared/test' +solc_version = '0.8.19' + + +# See more config options https://github.com/foundry-rs/foundry/tree/master/config diff --git a/contracts/gas-snapshots/automation.gas-snapshot b/contracts/gas-snapshots/automation.gas-snapshot new file mode 100644 index 00000000..620e0b22 --- /dev/null +++ b/contracts/gas-snapshots/automation.gas-snapshot @@ -0,0 +1,13 @@ +AutomationForwarder_forward:testBasicSuccess() (gas: 87630) +AutomationForwarder_forward:testNotAuthorizedReverts() (gas: 24560) +AutomationForwarder_forward:testWrongFunctionSelectorSuccess() (gas: 17958) +AutomationForwarder_updateRegistry:testBasicSuccess() (gas: 14577) +AutomationForwarder_updateRegistry:testNotFromRegistryNotAuthorizedReverts() (gas: 17665) +HeartbeatRequester_getAggregatorRequestHeartbeat:testBasicSuccess() (gas: 75412) +HeartbeatRequester_getAggregatorRequestHeartbeat:testHeartbeatNotPermittedReverts() (gas: 21730) +HeartbeatRequester_permitHeartbeat:testBasicDeployerSuccess() (gas: 48229) +HeartbeatRequester_permitHeartbeat:testBasicSuccess() (gas: 45844) +HeartbeatRequester_permitHeartbeat:testOnlyCallableByOwnerReverts() (gas: 17584) +HeartbeatRequester_removeHeartbeat:testBasicSuccess() (gas: 30192) +HeartbeatRequester_removeHeartbeat:testOnlyCallableByOwnerReverts() (gas: 15417) +HeartbeatRequester_removeHeartbeat:testRemoveNoPermitsSuccess() (gas: 15660) \ No newline at end of file diff --git a/contracts/gas-snapshots/functions.gas-snapshot b/contracts/gas-snapshots/functions.gas-snapshot new file mode 100644 index 00000000..279aa389 --- /dev/null +++ b/contracts/gas-snapshots/functions.gas-snapshot @@ -0,0 +1,237 @@ +ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumGoerli() (gas: 14860808) +ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumMainnet() (gas: 14860786) +ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum:test__getCurrentTxL1GasFees_SuccessWhenArbitrumSepolia() (gas: 14860802) +ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseGoerli() (gas: 14872224) +ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseMainnet() (gas: 14872201) +ChainSpecificUtil__getCurrentTxL1GasFees_Base:test__getCurrentTxL1GasFees_SuccessWhenBaseSepolia() (gas: 14872173) +ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismGoerli() (gas: 14872124) +ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismMainnet() (gas: 14872113) +ChainSpecificUtil__getCurrentTxL1GasFees_Optimism:test__getCurrentTxL1GasFees_SuccessWhenOptimismSepolia() (gas: 14872157) +FunctionsBilling_Constructor:test_Constructor_Success() (gas: 14812) +FunctionsBilling_DeleteCommitment:test_DeleteCommitment_RevertIfNotRouter() (gas: 13282) +FunctionsBilling_DeleteCommitment:test_DeleteCommitment_Success() (gas: 15897) +FunctionsBilling_EstimateCost:test_EstimateCost_RevertsIfGasPriceAboveCeiling() (gas: 32414) +FunctionsBilling_EstimateCost:test_EstimateCost_Success() (gas: 53763) +FunctionsBilling_EstimateCost:test_EstimateCost_SuccessLowGasPrice() (gas: 53866) +FunctionsBilling_GetAdminFee:test_GetAdminFee_Success() (gas: 18226) +FunctionsBilling_GetConfig:test_GetConfig_Success() (gas: 23693) +FunctionsBilling_GetDONFee:test_GetDONFee_Success() (gas: 15792) +FunctionsBilling_GetWeiPerUnitLink:test_GetWeiPerUnitLink_Success() (gas: 31773) +FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertIfInsufficientBalance() (gas: 70128) +FunctionsBilling_OracleWithdraw:test_OracleWithdraw_RevertWithNoBalance() (gas: 106285) +FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceNoAmountGiven() (gas: 140164) +FunctionsBilling_OracleWithdraw:test_OracleWithdraw_SuccessTransmitterWithBalanceValidAmountGiven() (gas: 142492) +FunctionsBilling_OracleWithdrawAll:test_OracleWithdrawAll_RevertIfNotOwner() (gas: 13296) +FunctionsBilling_OracleWithdrawAll:test_OracleWithdrawAll_SuccessPaysTransmittersWithBalance() (gas: 147278) +FunctionsBilling_UpdateConfig:test_UpdateConfig_RevertIfNotOwner() (gas: 18974) +FunctionsBilling_UpdateConfig:test_UpdateConfig_Success() (gas: 38273) +FunctionsBilling__DisperseFeePool:test__DisperseFeePool_RevertIfNotSet() (gas: 8810) +FunctionsBilling__FulfillAndBill:test__FulfillAndBill_RevertIfInvalidCommitment() (gas: 13302) +FunctionsBilling__FulfillAndBill:test__FulfillAndBill_Success() (gas: 180763) +FunctionsBilling__StartBilling:test__FulfillAndBill_HasUniqueGlobalRequestId() (gas: 398400) +FunctionsClient_Constructor:test_Constructor_Success() (gas: 7573) +FunctionsClient_HandleOracleFulfillment:test_HandleOracleFulfillment_RevertIfNotRouter() (gas: 14623) +FunctionsClient_HandleOracleFulfillment:test_HandleOracleFulfillment_Success() (gas: 22923) +FunctionsClient__SendRequest:test__SendRequest_RevertIfInvalidCallbackGasLimit() (gas: 55059) +FunctionsCoordinator_Constructor:test_Constructor_Success() (gas: 12006) +FunctionsCoordinator_GetDONPublicKey:test_GetDONPublicKey_RevertIfEmpty() (gas: 15356) +FunctionsCoordinator_GetDONPublicKey:test_GetDONPublicKey_Success() (gas: 106528) +FunctionsCoordinator_GetThresholdPublicKey:test_GetThresholdPublicKey_RevertIfEmpty() (gas: 15313) +FunctionsCoordinator_GetThresholdPublicKey:test_GetThresholdPublicKey_Success() (gas: 656362) +FunctionsCoordinator_SetDONPublicKey:test_SetDONPublicKey_RevertNotOwner() (gas: 20364) +FunctionsCoordinator_SetDONPublicKey:test_SetDONPublicKey_Success() (gas: 101307) +FunctionsCoordinator_SetThresholdPublicKey:test_SetThresholdPublicKey_RevertNotOwner() (gas: 13892) +FunctionsCoordinator_SetThresholdPublicKey:test_SetThresholdPublicKey_Success() (gas: 651054) +FunctionsCoordinator_StartRequest:test_StartRequest_RevertIfNotRouter() (gas: 22703) +FunctionsCoordinator_StartRequest:test_StartRequest_Success() (gas: 108804) +FunctionsCoordinator__IsTransmitter:test__IsTransmitter_SuccessFound() (gas: 18957) +FunctionsCoordinator__IsTransmitter:test__IsTransmitter_SuccessNotFound() (gas: 19690) +FunctionsRequest_DEFAULT_BUFFER_SIZE:test_DEFAULT_BUFFER_SIZE() (gas: 246) +FunctionsRequest_EncodeCBOR:test_EncodeCBOR_Success() (gas: 223) +FunctionsRequest_REQUEST_DATA_VERSION:test_REQUEST_DATA_VERSION() (gas: 225) +FunctionsRouter_Constructor:test_Constructor_Success() (gas: 12007) +FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedCostExceedsCommitment() (gas: 169829) +FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInsufficientGas() (gas: 160160) +FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInvalidCommitment() (gas: 38115) +FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedInvalidRequestId() (gas: 35238) +FunctionsRouter_Fulfill:test_Fulfill_RequestNotProcessedSubscriptionBalanceInvariant() (gas: 178305) +FunctionsRouter_Fulfill:test_Fulfill_RevertIfNotCommittedCoordinator() (gas: 28086) +FunctionsRouter_Fulfill:test_Fulfill_RevertIfPaused() (gas: 153867) +FunctionsRouter_Fulfill:test_Fulfill_SuccessClientNoLongerExists() (gas: 321317) +FunctionsRouter_Fulfill:test_Fulfill_SuccessFulfilled() (gas: 334938) +FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackReverts() (gas: 2510364) +FunctionsRouter_Fulfill:test_Fulfill_SuccessUserCallbackRunsOutOfGas() (gas: 540803) +FunctionsRouter_GetAdminFee:test_GetAdminFee_Success() (gas: 17983) +FunctionsRouter_GetAllowListId:test_GetAllowListId_Success() (gas: 12904) +FunctionsRouter_GetConfig:test_GetConfig_Success() (gas: 37159) +FunctionsRouter_GetContractById:test_GetContractById_RevertIfRouteDoesNotExist() (gas: 13849) +FunctionsRouter_GetContractById:test_GetContractById_SuccessIfRouteExists() (gas: 17373) +FunctionsRouter_GetProposedContractById:test_GetProposedContractById_RevertIfRouteDoesNotExist() (gas: 16383) +FunctionsRouter_GetProposedContractById:test_GetProposedContractById_SuccessIfRouteExists() (gas: 23935) +FunctionsRouter_GetProposedContractSet:test_GetProposedContractSet_Success() (gas: 25936) +FunctionsRouter_IsValidCallbackGasLimit:test_IsValidCallbackGasLimit_RevertGasLimitTooBig() (gas: 28103) +FunctionsRouter_IsValidCallbackGasLimit:test_IsValidCallbackGasLimit_RevertInvalidConfig() (gas: 41093) +FunctionsRouter_IsValidCallbackGasLimit:test_IsValidCallbackGasLimit_Success() (gas: 24626) +FunctionsRouter_Pause:test_Pause_RevertIfNotOwner() (gas: 13338) +FunctionsRouter_Pause:test_Pause_Success() (gas: 20344) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfEmptyAddress() (gas: 14791) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfExceedsMaxProposal() (gas: 21693) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfLengthMismatch() (gas: 14670) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfNotNewContract() (gas: 19048) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_RevertIfNotOwner() (gas: 23392) +FunctionsRouter_ProposeContractsUpdate:test_ProposeContractsUpdate_Success() (gas: 118479) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfConsumerNotAllowed() (gas: 59391) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfDuplicateRequestId() (gas: 193436) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfEmptyData() (gas: 29426) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfIncorrectDonId() (gas: 57904) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 187020) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfInvalidCallbackGasLimit() (gas: 50947) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfInvalidDonId() (gas: 25082) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfNoSubscription() (gas: 29132) +FunctionsRouter_SendRequest:test_SendRequest_RevertIfPaused() (gas: 34291) +FunctionsRouter_SendRequest:test_SendRequest_Success() (gas: 286199) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfConsumerNotAllowed() (gas: 65887) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfEmptyData() (gas: 36012) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfIncorrectDonId() (gas: 29896) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfInvalidCallbackGasLimit() (gas: 57533) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfInvalidDonId() (gas: 27503) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfNoSubscription() (gas: 35717) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_RevertIfPaused() (gas: 40810) +FunctionsRouter_SendRequestToProposed:test_SendRequestToProposed_Success() (gas: 292746) +FunctionsRouter_SendRequestToProposed:test_SendRequest_RevertIfInsufficientSubscriptionBalance() (gas: 193512) +FunctionsRouter_SetAllowListId:test_SetAllowListId_Success() (gas: 30688) +FunctionsRouter_SetAllowListId:test_UpdateConfig_RevertIfNotOwner() (gas: 13403) +FunctionsRouter_Unpause:test_Unpause_RevertIfNotOwner() (gas: 13293) +FunctionsRouter_Unpause:test_Unpause_Success() (gas: 77400) +FunctionsRouter_UpdateConfig:test_UpdateConfig_RevertIfNotOwner() (gas: 24437) +FunctionsRouter_UpdateConfig:test_UpdateConfig_Success() (gas: 60676) +FunctionsRouter_UpdateContracts:test_UpdateContracts_RevertIfNotOwner() (gas: 13336) +FunctionsRouter_UpdateContracts:test_UpdateContracts_Success() (gas: 38732) +FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfNotAllowedSender() (gas: 60414) +FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfPaused() (gas: 61031) +FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfSenderBecomesBlocked() (gas: 139404) +FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_RevertIfSenderIsNotNewOwner() (gas: 62781) +FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer:test_AcceptSubscriptionOwnerTransfer_Success() (gas: 215285) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfMaximumConsumers() (gas: 138025) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfMaximumConsumersAfterConfigUpdate() (gas: 164969) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfNoSubscription() (gas: 12946) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfNotAllowedSender() (gas: 102448) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfNotSubscriptionOwner() (gas: 87199) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_RevertIfPaused() (gas: 18094) +FunctionsSubscriptions_AddConsumer:test_AddConsumer_Success() (gas: 95524) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNoSubscription() (gas: 15041) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNotAllowedSender() (gas: 102524) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfNotSubscriptionOwner() (gas: 89309) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfPaused() (gas: 20148) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_RevertIfPendingRequests() (gas: 194369) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_SuccessForfeitAllBalanceAsDeposit() (gas: 114541) +FunctionsSubscriptions_CancelSubscription:test_CancelSubscription_SuccessForfeitSomeBalanceAsDeposit() (gas: 125867) +FunctionsSubscriptions_CancelSubscription_ReceiveDeposit:test_CancelSubscription_SuccessRecieveDeposit() (gas: 75017) +FunctionsSubscriptions_Constructor:test_Constructor_Success() (gas: 7654) +FunctionsSubscriptions_CreateSubscriptionWithConsumer:test_CreateSubscriptionWithConsumer_RevertIfNotAllowedSender() (gas: 28704) +FunctionsSubscriptions_CreateSubscriptionWithConsumer:test_CreateSubscriptionWithConsumer_RevertIfPaused() (gas: 17994) +FunctionsSubscriptions_CreateSubscriptionWithConsumer:test_CreateSubscriptionWithConsumer_Success() (gas: 351858) +FunctionsSubscriptions_GetConsumer:test_GetConsumer_Success() (gas: 16226) +FunctionsSubscriptions_GetFlags:test_GetFlags_SuccessInvalidSubscription() (gas: 13101) +FunctionsSubscriptions_GetFlags:test_GetFlags_SuccessValidSubscription() (gas: 40903) +FunctionsSubscriptions_GetSubscription:test_GetSubscription_Success() (gas: 30937) +FunctionsSubscriptions_GetSubscriptionCount:test_GetSubscriptionCount_Success() (gas: 12968) +FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_RevertIfEndIsAfterLastSubscription() (gas: 16547) +FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_RevertIfStartIsAfterEnd() (gas: 13459) +FunctionsSubscriptions_GetSubscriptionsInRange:test_GetSubscriptionsInRange_Success() (gas: 59592) +FunctionsSubscriptions_GetTotalBalance:test_GetTotalBalance_Success() (gas: 15010) +FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoCalldata() (gas: 39939) +FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNoSubscription() (gas: 42404) +FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfCallerIsNotLink() (gas: 13441) +FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_RevertIfPaused() (gas: 47347) +FunctionsSubscriptions_OnTokenTransfer:test_OnTokenTransfer_Success() (gas: 81490) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfAmountMoreThanBalance() (gas: 20745) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfBalanceInvariant() (gas: 189) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfNoAmount() (gas: 15638) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_RevertIfPaused() (gas: 20856) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_SuccessPaysRecipient() (gas: 59732) +FunctionsSubscriptions_OracleWithdraw:test_OracleWithdraw_SuccessSetsBalanceToZero() (gas: 57701) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_RevertIfNoSubscription() (gas: 12818) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_RevertIfNotOwner() (gas: 15549) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_Success() (gas: 54867) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessDeletesSubscription() (gas: 49607) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessSubOwnerRefunded() (gas: 50896) +FunctionsSubscriptions_OwnerCancelSubscription:test_OwnerCancelSubscription_SuccessWhenRequestInFlight() (gas: 164812) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfAmountMoreThanBalance() (gas: 17924) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfBalanceInvariant() (gas: 210) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_RevertIfNotOwner() (gas: 15555) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessIfNoAmount() (gas: 37396) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessIfRecipientAddressZero() (gas: 52130) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessPaysRecipient() (gas: 54413) +FunctionsSubscriptions_OwnerWithdraw:test_OwnerWithdraw_SuccessSetsBalanceToZero() (gas: 37790) +FunctionsSubscriptions_PendingRequestExists:test_PendingRequestExists_SuccessFalseIfNoPendingRequests() (gas: 14981) +FunctionsSubscriptions_PendingRequestExists:test_PendingRequestExists_SuccessTrueIfPendingRequests() (gas: 176478) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfEmptyNewOwner() (gas: 27655) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfInvalidNewOwner() (gas: 57797) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfNoSubscription() (gas: 15001) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfNotAllowedSender() (gas: 119770) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfNotSubscriptionOwner() (gas: 17960) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_RevertIfPaused() (gas: 20128) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_Success() (gas: 68240) +FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer:test_ProposeSubscriptionOwnerTransfer_SuccessChangeProposedOwner() (gas: 82837) +FunctionsSubscriptions_RecoverFunds:test_OwnerCancelSubscription_RevertIfNotOwner() (gas: 15554) +FunctionsSubscriptions_RecoverFunds:test_RecoverFunds_Success() (gas: 41111) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfInvalidConsumer() (gas: 30304) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNoSubscription() (gas: 15019) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNotAllowedSender() (gas: 102439) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfNotSubscriptionOwner() (gas: 87245) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfPaused() (gas: 18049) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_RevertIfPendingRequests() (gas: 191894) +FunctionsSubscriptions_RemoveConsumer:test_RemoveConsumer_Success() (gas: 42023) +FunctionsSubscriptions_SetFlags:test_SetFlags_RevertIfNoSubscription() (gas: 12891) +FunctionsSubscriptions_SetFlags:test_SetFlags_RevertIfNotOwner() (gas: 15684) +FunctionsSubscriptions_SetFlags:test_SetFlags_Success() (gas: 35594) +FunctionsSubscriptions_TimeoutRequests:test_TimeoutRequests_RevertIfPaused() (gas: 25955) +FunctionsSubscriptions_TimeoutRequests:test_TimeoutRequests_RevertIfTimeoutNotExceeded() (gas: 25261) +FunctionsSubscriptions_TimeoutRequests:test_TimeoutRequests_RevertInvalidRequest() (gas: 28242) +FunctionsSubscriptions_TimeoutRequests:test_TimeoutRequests_Success() (gas: 57754) +FunctionsSubscriptions_createSubscription:test_CreateSubscription_RevertIfNotAllowedSender() (gas: 26434) +FunctionsSubscriptions_createSubscription:test_CreateSubscription_RevertIfPaused() (gas: 15759) +FunctionsSubscriptions_createSubscription:test_CreateSubscription_Success() (gas: 152708) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:testAcceptTermsOfService_InvalidSigner_vuln() (gas: 94913) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_RevertIfAcceptorIsNotSender() (gas: 25859) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_RevertIfBlockedSender() (gas: 88990) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_RevertIfInvalidSigner() (gas: 23619) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_RevertIfRecipientContractIsNotSender() (gas: 1866552) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_RevertIfRecipientIsNotSender() (gas: 26025) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_SuccessIfAcceptingForContract() (gas: 1946628) +FunctionsTermsOfServiceAllowList_AcceptTermsOfService:test_AcceptTermsOfService_SuccessIfAcceptingForSelf() (gas: 103533) +FunctionsTermsOfServiceAllowList_BlockSender:test_BlockSender_RevertIfNotOwner() (gas: 15491) +FunctionsTermsOfServiceAllowList_BlockSender:test_BlockSender_Success() (gas: 96662) +FunctionsTermsOfServiceAllowList_Constructor:test_Constructor_Success() (gas: 12253) +FunctionsTermsOfServiceAllowList_GetAllAllowedSenders:test_GetAllAllowedSenders_Success() (gas: 19199) +FunctionsTermsOfServiceAllowList_GetAllowedSendersCount:test_GetAllowedSendersCount_Success() (gas: 12995) +FunctionsTermsOfServiceAllowList_GetAllowedSendersInRange:test_GetAllowedSendersInRange_RevertIfAllowedSendersIsEmpty() (gas: 12239111) +FunctionsTermsOfServiceAllowList_GetAllowedSendersInRange:test_GetAllowedSendersInRange_RevertIfEndIsAfterLastAllowedSender() (gas: 16571) +FunctionsTermsOfServiceAllowList_GetAllowedSendersInRange:test_GetAllowedSendersInRange_RevertIfStartIsAfterEnd() (gas: 13301) +FunctionsTermsOfServiceAllowList_GetAllowedSendersInRange:test_GetAllowedSendersInRange_Success() (gas: 20448) +FunctionsTermsOfServiceAllowList_GetBlockedSendersCount:test_GetBlockedSendersCount_Success() (gas: 12931) +FunctionsTermsOfServiceAllowList_GetBlockedSendersInRange:test_GetBlockedSendersInRange_RevertIfAllowedSendersIsEmpty() (gas: 12239115) +FunctionsTermsOfServiceAllowList_GetBlockedSendersInRange:test_GetBlockedSendersInRange_RevertIfEndIsAfterLastAllowedSender() (gas: 16549) +FunctionsTermsOfServiceAllowList_GetBlockedSendersInRange:test_GetBlockedSendersInRange_RevertIfStartIsAfterEnd() (gas: 13367) +FunctionsTermsOfServiceAllowList_GetBlockedSendersInRange:test_GetBlockedSendersInRange_Success() (gas: 18493) +FunctionsTermsOfServiceAllowList_GetConfig:test_GetConfig_Success() (gas: 15751) +FunctionsTermsOfServiceAllowList_GetMessage:test_GetMessage_Success() (gas: 11593) +FunctionsTermsOfServiceAllowList_HasAccess:test_HasAccess_FalseWhenEnabled() (gas: 15969) +FunctionsTermsOfServiceAllowList_HasAccess:test_HasAccess_TrueWhenDisabled() (gas: 23560) +FunctionsTermsOfServiceAllowList_IsBlockedSender:test_IsBlockedSender_SuccessFalse() (gas: 15445) +FunctionsTermsOfServiceAllowList_IsBlockedSender:test_IsBlockedSender_SuccessTrue() (gas: 86643) +FunctionsTermsOfServiceAllowList_UnblockSender:test_UnblockSender_RevertIfNotOwner() (gas: 13502) +FunctionsTermsOfServiceAllowList_UnblockSender:test_UnblockSender_Success() (gas: 96216) +FunctionsTermsOfServiceAllowList_UpdateConfig:test_UpdateConfig_RevertIfNotOwner() (gas: 13824) +FunctionsTermsOfServiceAllowList_UpdateConfig:test_UpdateConfig_Success() (gas: 22183) +Gas_AcceptTermsOfService:test_AcceptTermsOfService_Gas() (gas: 84702) +Gas_AddConsumer:test_AddConsumer_Gas() (gas: 79131) +Gas_CreateSubscription:test_CreateSubscription_Gas() (gas: 73419) +Gas_FulfillRequest_DuplicateRequestID:test_FulfillRequest_DuplicateRequestID_MaximumGas() (gas: 20695) +Gas_FulfillRequest_DuplicateRequestID:test_FulfillRequest_DuplicateRequestID_MinimumGas() (gas: 20135) +Gas_FulfillRequest_Success:test_FulfillRequest_Success_MaximumGas() (gas: 498083) +Gas_FulfillRequest_Success:test_FulfillRequest_Success_MinimumGas() (gas: 199286) +Gas_FundSubscription:test_FundSubscription_Gas() (gas: 38546) +Gas_SendRequest:test_SendRequest_MaximumGas() (gas: 979631) +Gas_SendRequest:test_SendRequest_MinimumGas() (gas: 157578) \ No newline at end of file diff --git a/contracts/gas-snapshots/keystone.gas-snapshot b/contracts/gas-snapshots/keystone.gas-snapshot new file mode 100644 index 00000000..be23de1f --- /dev/null +++ b/contracts/gas-snapshots/keystone.gas-snapshot @@ -0,0 +1,2 @@ +KeystoneForwarderTest:test_abi_partial_decoding_works() (gas: 2068) +KeystoneForwarderTest:test_it_works() (gas: 993848) \ No newline at end of file diff --git a/contracts/gas-snapshots/l2ep.gas-snapshot b/contracts/gas-snapshots/l2ep.gas-snapshot new file mode 100644 index 00000000..1f229f7d --- /dev/null +++ b/contracts/gas-snapshots/l2ep.gas-snapshot @@ -0,0 +1,146 @@ +ArbitrumCrossDomainForwarder_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 37312) +ArbitrumCrossDomainForwarder_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 12963) +ArbitrumCrossDomainForwarder_Constructor:test_InitialState() (gas: 18431) +ArbitrumCrossDomainForwarder_Forward:test_Forward() (gas: 47601) +ArbitrumCrossDomainForwarder_Forward:test_ForwardRevert() (gas: 22151) +ArbitrumCrossDomainForwarder_Forward:test_NotCallableByUnknownAddress() (gas: 16048) +ArbitrumCrossDomainForwarder_TransferL1Ownership:test_CallableByL1Owner() (gas: 41408) +ArbitrumCrossDomainForwarder_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 19312) +ArbitrumCrossDomainForwarder_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 18323) +ArbitrumCrossDomainForwarder_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 13200) +ArbitrumCrossDomainGovernor_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 37312) +ArbitrumCrossDomainGovernor_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 12963) +ArbitrumCrossDomainGovernor_Constructor:test_InitialState() (gas: 18454) +ArbitrumCrossDomainGovernor_Forward:test_CallableByL2Owner() (gas: 49720) +ArbitrumCrossDomainGovernor_Forward:test_Forward() (gas: 47658) +ArbitrumCrossDomainGovernor_Forward:test_ForwardRevert() (gas: 24348) +ArbitrumCrossDomainGovernor_Forward:test_NotCallableByUnknownAddress() (gas: 18247) +ArbitrumCrossDomainGovernor_ForwardDelegate:test_BubbleUpRevert() (gas: 19386) +ArbitrumCrossDomainGovernor_ForwardDelegate:test_CallableByCrossDomainMessengerAddressOrL1Owner() (gas: 60617) +ArbitrumCrossDomainGovernor_ForwardDelegate:test_CallableByL2Owner() (gas: 62723) +ArbitrumCrossDomainGovernor_ForwardDelegate:test_NotCallableByUnknownAddress() (gas: 18237) +ArbitrumCrossDomainGovernor_ForwardDelegate:test_RevertsBatchWhenOneCallFails() (gas: 64110) +ArbitrumCrossDomainGovernor_TransferL1Ownership:test_CallableByL1Owner() (gas: 41408) +ArbitrumCrossDomainGovernor_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 19312) +ArbitrumCrossDomainGovernor_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 18323) +ArbitrumCrossDomainGovernor_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 13200) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetAnswer() (gas: 92118) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetRoundData() (gas: 92673) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetTimestamp() (gas: 92039) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestAnswer() (gas: 89813) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRound() (gas: 89705) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRoundData() (gas: 90246) +ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestTimestamp() (gas: 89690) +ArbitrumSequencerUptimeFeed_AggregatorV3Interface:test_AggregatorV3Interface() (gas: 98825) +ArbitrumSequencerUptimeFeed_AggregatorV3Interface:test_Return0WhenRoundDoesNotExistYet() (gas: 18309) +ArbitrumSequencerUptimeFeed_Constants:test_InitialState() (gas: 5684) +ArbitrumSequencerUptimeFeed_GasCosts:test_GasCosts() (gas: 97495) +ArbitrumSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() (gas: 602711) +ArbitrumSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() (gas: 573802) +ArbitrumSequencerUptimeFeed_UpdateStatus:test_IgnoreOutOfOrderUpdates() (gas: 98976) +ArbitrumSequencerUptimeFeed_UpdateStatus:test_RevertIfNotL2CrossDomainMessengerAddr() (gas: 15416) +ArbitrumSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndNoTimeChange() (gas: 113269) +ArbitrumSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndTimeChange() (gas: 113329) +ArbitrumValidator_Validate:test_PostSequencerOffline() (gas: 69068) +OptimismCrossDomainForwarder_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 46888) +OptimismCrossDomainForwarder_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 22155) +OptimismCrossDomainForwarder_Constructor:test_InitialState() (gas: 18266) +OptimismCrossDomainForwarder_Forward:test_Forward() (gas: 58025) +OptimismCrossDomainForwarder_Forward:test_ForwardRevert() (gas: 32546) +OptimismCrossDomainForwarder_Forward:test_NotCallableByUnknownAddress() (gas: 13859) +OptimismCrossDomainForwarder_TransferL1Ownership:test_CallableByL1Owner() (gas: 48886) +OptimismCrossDomainForwarder_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 28767) +OptimismCrossDomainForwarder_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 16134) +OptimismCrossDomainForwarder_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 11011) +OptimismCrossDomainGovernor_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 46888) +OptimismCrossDomainGovernor_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 22155) +OptimismCrossDomainGovernor_Constructor:test_InitialState() (gas: 18289) +OptimismCrossDomainGovernor_Forward:test_CallableByL2Owner() (gas: 47557) +OptimismCrossDomainGovernor_Forward:test_Forward() (gas: 58096) +OptimismCrossDomainGovernor_Forward:test_ForwardRevert() (gas: 32627) +OptimismCrossDomainGovernor_Forward:test_NotCallableByUnknownAddress() (gas: 16061) +OptimismCrossDomainGovernor_ForwardDelegate:test_BubbleUpRevert() (gas: 29181) +OptimismCrossDomainGovernor_ForwardDelegate:test_CallableByCrossDomainMessengerAddressOrL1Owner() (gas: 72695) +OptimismCrossDomainGovernor_ForwardDelegate:test_CallableByL2Owner() (gas: 72685) +OptimismCrossDomainGovernor_ForwardDelegate:test_NotCallableByUnknownAddress() (gas: 16051) +OptimismCrossDomainGovernor_ForwardDelegate:test_RevertsBatchWhenOneCallFails() (gas: 75908) +OptimismCrossDomainGovernor_TransferL1Ownership:test_CallableByL1Owner() (gas: 48886) +OptimismCrossDomainGovernor_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 28767) +OptimismCrossDomainGovernor_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 16134) +OptimismCrossDomainGovernor_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 11011) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetAnswer() (gas: 59095) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetRoundData() (gas: 59635) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetTimestamp() (gas: 58950) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestAnswer() (gas: 56887) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRound() (gas: 56773) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRoundData() (gas: 57309) +OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestTimestamp() (gas: 56740) +OptimismSequencerUptimeFeed_AggregatorV3Interface:test_AggregatorV3Interface() (gas: 65617) +OptimismSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetAnswerWhenRoundDoesNotExistYet() (gas: 17679) +OptimismSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetRoundDataWhenRoundDoesNotExistYet() (gas: 17897) +OptimismSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetTimestampWhenRoundDoesNotExistYet() (gas: 17603) +OptimismSequencerUptimeFeed_Constructor:test_InitialState() (gas: 21078) +OptimismSequencerUptimeFeed_GasCosts:test_GasCosts() (gas: 67197) +OptimismSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() (gas: 597640) +OptimismSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() (gas: 573807) +OptimismSequencerUptimeFeed_UpdateStatus:test_IgnoreOutOfOrderUpdates() (gas: 66532) +OptimismSequencerUptimeFeed_UpdateStatus:test_RevertIfNotL2CrossDomainMessengerAddr() (gas: 13200) +OptimismSequencerUptimeFeed_UpdateStatus:test_RevertIfNotL2CrossDomainMessengerAddrAndNotL1SenderAddr() (gas: 23607) +OptimismSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenNoChange() (gas: 74035) +OptimismSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndNoTimeChange() (gas: 96155) +OptimismSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndTimeChange() (gas: 96215) +OptimismValidator_SetGasLimit:test_CorrectlyUpdatesTheGasLimit() (gas: 15503) +OptimismValidator_Validate:test_PostSequencerOffline() (gas: 74813) +OptimismValidator_Validate:test_PostSequencerStatusWhenThereIsNotStatusChange() (gas: 74869) +OptimismValidator_Validate:test_RevertsIfCalledByAnAccountWithNoAccess() (gas: 15563) +ScrollCrossDomainForwarder_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 46988) +ScrollCrossDomainForwarder_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 22207) +ScrollCrossDomainForwarder_Constructor:test_InitialState() (gas: 17930) +ScrollCrossDomainForwarder_Forward:test_Forward() (gas: 58092) +ScrollCrossDomainForwarder_Forward:test_ForwardRevert() (gas: 32619) +ScrollCrossDomainForwarder_Forward:test_NotCallableByUnknownAddress() (gas: 13859) +ScrollCrossDomainForwarder_TransferL1Ownership:test_CallableByL1Owner() (gas: 48952) +ScrollCrossDomainForwarder_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 28833) +ScrollCrossDomainForwarder_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 16134) +ScrollCrossDomainForwarder_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 11011) +ScrollCrossDomainGovernor_AcceptL1Ownership:test_CallableByPendingL1Owner() (gas: 46988) +ScrollCrossDomainGovernor_AcceptL1Ownership:test_NotCallableByNonPendingOwners() (gas: 22207) +ScrollCrossDomainGovernor_Constructor:test_InitialState() (gas: 17953) +ScrollCrossDomainGovernor_Forward:test_CallableByL2Owner() (gas: 47552) +ScrollCrossDomainGovernor_Forward:test_Forward() (gas: 58158) +ScrollCrossDomainGovernor_Forward:test_ForwardRevert() (gas: 32697) +ScrollCrossDomainGovernor_Forward:test_NotCallableByUnknownAddress() (gas: 16058) +ScrollCrossDomainGovernor_ForwardDelegate:test_BubbleUpRevert() (gas: 29248) +ScrollCrossDomainGovernor_ForwardDelegate:test_CallableByCrossDomainMessengerAddressOrL1Owner() (gas: 72756) +ScrollCrossDomainGovernor_ForwardDelegate:test_CallableByL2Owner() (gas: 72746) +ScrollCrossDomainGovernor_ForwardDelegate:test_NotCallableByUnknownAddress() (gas: 16048) +ScrollCrossDomainGovernor_ForwardDelegate:test_RevertsBatchWhenOneCallFails() (gas: 75970) +ScrollCrossDomainGovernor_TransferL1Ownership:test_CallableByL1Owner() (gas: 48952) +ScrollCrossDomainGovernor_TransferL1Ownership:test_CallableByL1OwnerOrZeroAddress() (gas: 28833) +ScrollCrossDomainGovernor_TransferL1Ownership:test_NotCallableByL2Owner() (gas: 16134) +ScrollCrossDomainGovernor_TransferL1Ownership:test_NotCallableByNonOwners() (gas: 11011) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetAnswer() (gas: 57250) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetRoundData() (gas: 57780) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForGetTimestamp() (gas: 57105) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestAnswer() (gas: 54888) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRound() (gas: 54768) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestRoundData() (gas: 55473) +ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts:test_GasUsageForLatestTimestamp() (gas: 54758) +ScrollSequencerUptimeFeed_AggregatorV3Interface:test_AggregatorV3Interface() (gas: 63903) +ScrollSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetAnswerWhenRoundDoesNotExistYet() (gas: 17675) +ScrollSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetRoundDataWhenRoundDoesNotExistYet() (gas: 17893) +ScrollSequencerUptimeFeed_AggregatorV3Interface:test_RevertGetTimestampWhenRoundDoesNotExistYet() (gas: 17599) +ScrollSequencerUptimeFeed_Constructor:test_InitialState() (gas: 102485) +ScrollSequencerUptimeFeed_GasCosts:test_GasCosts() (gas: 64888) +ScrollSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() (gas: 597491) +ScrollSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions:test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() (gas: 573807) +ScrollSequencerUptimeFeed_UpdateStatus:test_IgnoreOutOfOrderUpdates() (gas: 64417) +ScrollSequencerUptimeFeed_UpdateStatus:test_RevertIfNotL2CrossDomainMessengerAddr() (gas: 13200) +ScrollSequencerUptimeFeed_UpdateStatus:test_RevertIfNotL2CrossDomainMessengerAddrAndNotL1SenderAddr() (gas: 23607) +ScrollSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenNoChange() (gas: 71618) +ScrollSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndNoTimeChange() (gas: 92018) +ScrollSequencerUptimeFeed_UpdateStatus:test_UpdateStatusWhenStatusChangeAndTimeChange() (gas: 92078) +ScrollValidator_SetGasLimit:test_CorrectlyUpdatesTheGasLimit() (gas: 15503) +ScrollValidator_Validate:test_PostSequencerOffline() (gas: 75094) +ScrollValidator_Validate:test_PostSequencerStatusWhenThereIsNotStatusChange() (gas: 75156) +ScrollValidator_Validate:test_RevertsIfCalledByAnAccountWithNoAccess() (gas: 15563) \ No newline at end of file diff --git a/contracts/gas-snapshots/llo-feeds.gas-snapshot b/contracts/gas-snapshots/llo-feeds.gas-snapshot new file mode 100644 index 00000000..ec037aaf --- /dev/null +++ b/contracts/gas-snapshots/llo-feeds.gas-snapshot @@ -0,0 +1,280 @@ +ByteUtilTest:test_readAddress() (gas: 542) +ByteUtilTest:test_readAddressMultiWord() (gas: 540) +ByteUtilTest:test_readAddressWithEmptyArray() (gas: 3274) +ByteUtilTest:test_readAddressWithNotEnoughBytes() (gas: 3314) +ByteUtilTest:test_readUint192Max() (gas: 485) +ByteUtilTest:test_readUint192Min() (gas: 508) +ByteUtilTest:test_readUint192MultiWord() (gas: 486) +ByteUtilTest:test_readUint192WithEmptyArray() (gas: 3274) +ByteUtilTest:test_readUint192WithNotEnoughBytes() (gas: 3314) +ByteUtilTest:test_readUint256Max() (gas: 502) +ByteUtilTest:test_readUint256Min() (gas: 546) +ByteUtilTest:test_readUint256MultiWord() (gas: 500) +ByteUtilTest:test_readUint256WithEmptyArray() (gas: 3296) +ByteUtilTest:test_readUint256WithNotEnoughBytes() (gas: 3293) +ByteUtilTest:test_readUint32Max() (gas: 507) +ByteUtilTest:test_readUint32Min() (gas: 487) +ByteUtilTest:test_readUint32MultiWord() (gas: 552) +ByteUtilTest:test_readUint32WithEmptyArray() (gas: 3253) +ByteUtilTest:test_readUint32WithNotEnoughBytes() (gas: 3272) +ByteUtilTest:test_readZeroAddress() (gas: 519) +FeeManagerProcessFeeTest:test_DiscountIsAppliedForNative() (gas: 52288) +FeeManagerProcessFeeTest:test_DiscountIsReturnedForNative() (gas: 52241) +FeeManagerProcessFeeTest:test_DiscountIsReturnedForNativeWithSurcharge() (gas: 78446) +FeeManagerProcessFeeTest:test_V1PayloadVerifies() (gas: 26980) +FeeManagerProcessFeeTest:test_V1PayloadVerifiesAndReturnsChange() (gas: 57895) +FeeManagerProcessFeeTest:test_V2PayloadVerifies() (gas: 116094) +FeeManagerProcessFeeTest:test_V2PayloadWithoutQuoteFails() (gas: 27395) +FeeManagerProcessFeeTest:test_V2PayloadWithoutZeroFee() (gas: 70370) +FeeManagerProcessFeeTest:test_WithdrawERC20() (gas: 71617) +FeeManagerProcessFeeTest:test_WithdrawNonAdminAddr() (gas: 56261) +FeeManagerProcessFeeTest:test_WithdrawUnwrappedNative() (gas: 25322) +FeeManagerProcessFeeTest:test_baseFeeIsAppliedForLink() (gas: 14347) +FeeManagerProcessFeeTest:test_baseFeeIsAppliedForNative() (gas: 17285) +FeeManagerProcessFeeTest:test_correctDiscountIsAppliedWhenBothTokensAreDiscounted() (gas: 90297) +FeeManagerProcessFeeTest:test_discountAIsNotAppliedWhenSetForOtherUsers() (gas: 56177) +FeeManagerProcessFeeTest:test_discountFeeRoundsDownWhenUneven() (gas: 52490) +FeeManagerProcessFeeTest:test_discountIsAppliedForLink() (gas: 49279) +FeeManagerProcessFeeTest:test_discountIsAppliedWith100PercentSurcharge() (gas: 78538) +FeeManagerProcessFeeTest:test_discountIsNoLongerAppliedAfterRemoving() (gas: 45940) +FeeManagerProcessFeeTest:test_discountIsNotAppliedForInvalidTokenAddress() (gas: 17546) +FeeManagerProcessFeeTest:test_discountIsNotAppliedToOtherFeeds() (gas: 54247) +FeeManagerProcessFeeTest:test_discountIsReturnedForLink() (gas: 49254) +FeeManagerProcessFeeTest:test_emptyQuoteRevertsWithError() (gas: 12152) +FeeManagerProcessFeeTest:test_eventIsEmittedAfterSurchargeIsSet() (gas: 41348) +FeeManagerProcessFeeTest:test_eventIsEmittedIfNotEnoughLink() (gas: 172747) +FeeManagerProcessFeeTest:test_eventIsEmittedUponWithdraw() (gas: 68984) +FeeManagerProcessFeeTest:test_feeIsUpdatedAfterDiscountIsRemoved() (gas: 49186) +FeeManagerProcessFeeTest:test_feeIsUpdatedAfterNewDiscountIsApplied() (gas: 66985) +FeeManagerProcessFeeTest:test_feeIsUpdatedAfterNewSurchargeIsApplied() (gas: 63666) +FeeManagerProcessFeeTest:test_feeIsZeroWith100PercentDiscount() (gas: 51688) +FeeManagerProcessFeeTest:test_getBaseRewardWithLinkQuote() (gas: 14364) +FeeManagerProcessFeeTest:test_getLinkFeeIsRoundedUp() (gas: 49472) +FeeManagerProcessFeeTest:test_getLinkRewardIsSameAsFee() (gas: 54936) +FeeManagerProcessFeeTest:test_getLinkRewardWithNativeQuoteAndSurchargeWithLinkDiscount() (gas: 82400) +FeeManagerProcessFeeTest:test_getRewardWithLinkDiscount() (gas: 49297) +FeeManagerProcessFeeTest:test_getRewardWithLinkQuoteAndLinkDiscount() (gas: 49300) +FeeManagerProcessFeeTest:test_getRewardWithNativeQuote() (gas: 17305) +FeeManagerProcessFeeTest:test_getRewardWithNativeQuoteAndSurcharge() (gas: 50487) +FeeManagerProcessFeeTest:test_linkAvailableForPaymentReturnsLinkBalance() (gas: 52419) +FeeManagerProcessFeeTest:test_nativeSurcharge0Percent() (gas: 30497) +FeeManagerProcessFeeTest:test_nativeSurcharge100Percent() (gas: 50512) +FeeManagerProcessFeeTest:test_nativeSurchargeCannotExceed100Percent() (gas: 17167) +FeeManagerProcessFeeTest:test_nativeSurchargeEventIsEmittedOnUpdate() (gas: 41394) +FeeManagerProcessFeeTest:test_noFeeIsAppliedWhenReportHasZeroFee() (gas: 51511) +FeeManagerProcessFeeTest:test_noFeeIsAppliedWhenReportHasZeroFeeAndDiscountAndSurchargeIsSet() (gas: 77739) +FeeManagerProcessFeeTest:test_nonAdminProxyUserCannotProcessFee() (gas: 21881) +FeeManagerProcessFeeTest:test_nonAdminUserCanNotSetDiscount() (gas: 19835) +FeeManagerProcessFeeTest:test_payLinkDeficit() (gas: 193861) +FeeManagerProcessFeeTest:test_payLinkDeficitOnlyCallableByAdmin() (gas: 17405) +FeeManagerProcessFeeTest:test_payLinkDeficitPaysAllFeesProcessed() (gas: 213908) +FeeManagerProcessFeeTest:test_payLinkDeficitTwice() (gas: 198228) +FeeManagerProcessFeeTest:test_processFeeAsProxy() (gas: 116432) +FeeManagerProcessFeeTest:test_processFeeDefaultReportsStillVerifiesWithEmptyQuote() (gas: 27468) +FeeManagerProcessFeeTest:test_processFeeEmitsEventIfNotEnoughLink() (gas: 161843) +FeeManagerProcessFeeTest:test_processFeeIfSubscriberIsSelf() (gas: 27822) +FeeManagerProcessFeeTest:test_processFeeNative() (gas: 172464) +FeeManagerProcessFeeTest:test_processFeeUsesCorrectDigest() (gas: 117392) +FeeManagerProcessFeeTest:test_processFeeWithDefaultReportPayloadAndQuoteStillVerifies() (gas: 29542) +FeeManagerProcessFeeTest:test_processFeeWithDiscountEmitsEvent() (gas: 241293) +FeeManagerProcessFeeTest:test_processFeeWithInvalidReportVersionFailsToDecode() (gas: 28517) +FeeManagerProcessFeeTest:test_processFeeWithNoDiscountDoesNotEmitEvent() (gas: 166406) +FeeManagerProcessFeeTest:test_processFeeWithUnwrappedNative() (gas: 179998) +FeeManagerProcessFeeTest:test_processFeeWithUnwrappedNativeLinkAddress() (gas: 131461) +FeeManagerProcessFeeTest:test_processFeeWithUnwrappedNativeLinkAddressExcessiveFee() (gas: 155390) +FeeManagerProcessFeeTest:test_processFeeWithUnwrappedNativeShortFunds() (gas: 92630) +FeeManagerProcessFeeTest:test_processFeeWithUnwrappedNativeWithExcessiveFee() (gas: 186961) +FeeManagerProcessFeeTest:test_processFeeWithWithCorruptQuotePayload() (gas: 70681) +FeeManagerProcessFeeTest:test_processFeeWithWithEmptyQuotePayload() (gas: 27733) +FeeManagerProcessFeeTest:test_processFeeWithWithZeroQuotePayload() (gas: 27783) +FeeManagerProcessFeeTest:test_processFeeWithZeroLinkNonZeroNativeWithLinkQuote() (gas: 32973) +FeeManagerProcessFeeTest:test_processFeeWithZeroLinkNonZeroNativeWithNativeQuote() (gas: 152363) +FeeManagerProcessFeeTest:test_processFeeWithZeroNativeNonZeroLinkReturnsChange() (gas: 53470) +FeeManagerProcessFeeTest:test_processFeeWithZeroNativeNonZeroLinkWithLinkQuote() (gas: 116343) +FeeManagerProcessFeeTest:test_processFeeWithZeroNativeNonZeroLinkWithNativeQuote() (gas: 35744) +FeeManagerProcessFeeTest:test_processMultipleLinkReports() (gas: 221473) +FeeManagerProcessFeeTest:test_processMultipleUnwrappedNativeReports() (gas: 255314) +FeeManagerProcessFeeTest:test_processMultipleV1Reports() (gas: 74137) +FeeManagerProcessFeeTest:test_processMultipleWrappedNativeReports() (gas: 238439) +FeeManagerProcessFeeTest:test_processV1V2V3Reports() (gas: 206233) +FeeManagerProcessFeeTest:test_processV1V2V3ReportsWithUnwrapped() (gas: 247907) +FeeManagerProcessFeeTest:test_reportWithNoExpiryOrFeeReturnsZero() (gas: 10770) +FeeManagerProcessFeeTest:test_setDiscountOver100Percent() (gas: 19548) +FeeManagerProcessFeeTest:test_subscriberDiscountEventIsEmittedOnUpdate() (gas: 46259) +FeeManagerProcessFeeTest:test_surchargeFeeRoundsUpWhenUneven() (gas: 50864) +FeeManagerProcessFeeTest:test_surchargeIsApplied() (gas: 50745) +FeeManagerProcessFeeTest:test_surchargeIsAppliedForNativeFeeWithDiscount() (gas: 78900) +FeeManagerProcessFeeTest:test_surchargeIsNoLongerAppliedAfterRemoving() (gas: 46514) +FeeManagerProcessFeeTest:test_surchargeIsNotAppliedForLinkFee() (gas: 49587) +FeeManagerProcessFeeTest:test_surchargeIsNotAppliedWith100PercentDiscount() (gas: 77896) +FeeManagerProcessFeeTest:test_testRevertIfReportHasExpired() (gas: 14908) +RewardManagerClaimTest:test_claimAllRecipients() (gas: 275763) +RewardManagerClaimTest:test_claimMultipleRecipients() (gas: 153308) +RewardManagerClaimTest:test_claimRewardsWithDuplicatePoolIdsDoesNotPayoutTwice() (gas: 328345) +RewardManagerClaimTest:test_claimSingleRecipient() (gas: 88340) +RewardManagerClaimTest:test_claimUnevenAmountRoundsDown() (gas: 313549) +RewardManagerClaimTest:test_claimUnregisteredPoolId() (gas: 34461) +RewardManagerClaimTest:test_claimUnregisteredRecipient() (gas: 40491) +RewardManagerClaimTest:test_eventIsEmittedUponClaim() (gas: 86069) +RewardManagerClaimTest:test_eventIsNotEmittedUponUnsuccessfulClaim() (gas: 24700) +RewardManagerClaimTest:test_recipientsClaimMultipleDeposits() (gas: 383222) +RewardManagerClaimTest:test_singleRecipientClaimMultipleDeposits() (gas: 136295) +RewardManagerNoRecipientSet:test_claimAllRecipientsAfterRecipientsSet() (gas: 489377) +RewardManagerPayRecipientsTest:test_addFundsToPoolAsNonOwnerOrFeeManager() (gas: 11428) +RewardManagerPayRecipientsTest:test_addFundsToPoolAsOwner() (gas: 53876) +RewardManagerPayRecipientsTest:test_payAllRecipients() (gas: 249472) +RewardManagerPayRecipientsTest:test_payAllRecipientsFromNonAdminUser() (gas: 20475) +RewardManagerPayRecipientsTest:test_payAllRecipientsFromRecipientInPool() (gas: 249718) +RewardManagerPayRecipientsTest:test_payAllRecipientsWithAdditionalInvalidRecipient() (gas: 260922) +RewardManagerPayRecipientsTest:test_payAllRecipientsWithAdditionalUnregisteredRecipient() (gas: 264058) +RewardManagerPayRecipientsTest:test_payRecipientWithInvalidPool() (gas: 28549) +RewardManagerPayRecipientsTest:test_payRecipientsEmptyRecipientList() (gas: 24970) +RewardManagerPayRecipientsTest:test_payRecipientsWithInvalidPoolId() (gas: 31055) +RewardManagerPayRecipientsTest:test_paySingleRecipient() (gas: 84354) +RewardManagerPayRecipientsTest:test_paySubsetOfRecipientsInPool() (gas: 197451) +RewardManagerRecipientClaimDifferentWeightsTest:test_allRecipientsClaimingReceiveExpectedAmount() (gas: 279425) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimAllRecipientsMultiplePools() (gas: 509891) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimAllRecipientsSinglePool() (gas: 281811) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimEmptyPoolWhenSecondPoolContainsFunds() (gas: 291640) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimMultipleRecipientsMultiplePools() (gas: 261591) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimMultipleRecipientsSinglePool() (gas: 153438) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimSingleRecipientMultiplePools() (gas: 131915) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimSingleUniqueRecipient() (gas: 105314) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimUnevenAmountRoundsDown() (gas: 576291) +RewardManagerRecipientClaimMultiplePoolsTest:test_claimUnregisteredRecipient() (gas: 63557) +RewardManagerRecipientClaimMultiplePoolsTest:test_getAvailableRewardsCursorAndTotalPoolsEqual() (gas: 10202) +RewardManagerRecipientClaimMultiplePoolsTest:test_getAvailableRewardsCursorCannotBeGreaterThanTotalPools() (gas: 12680) +RewardManagerRecipientClaimMultiplePoolsTest:test_getAvailableRewardsCursorSingleResult() (gas: 19606) +RewardManagerRecipientClaimMultiplePoolsTest:test_getRewardsAvailableToRecipientInBothPools() (gas: 29052) +RewardManagerRecipientClaimMultiplePoolsTest:test_getRewardsAvailableToRecipientInBothPoolsWhereAlreadyClaimed() (gas: 147218) +RewardManagerRecipientClaimMultiplePoolsTest:test_getRewardsAvailableToRecipientInNoPools() (gas: 18532) +RewardManagerRecipientClaimMultiplePoolsTest:test_getRewardsAvailableToRecipientInSinglePool() (gas: 24569) +RewardManagerRecipientClaimMultiplePoolsTest:test_recipientsClaimMultipleDeposits() (gas: 387672) +RewardManagerRecipientClaimMultiplePoolsTest:test_singleRecipientClaimMultipleDeposits() (gas: 136332) +RewardManagerRecipientClaimUnevenWeightTest:test_allRecipientsClaimingReceiveExpectedAmount() (gas: 198399) +RewardManagerRecipientClaimUnevenWeightTest:test_allRecipientsClaimingReceiveExpectedAmountWithSmallDeposit() (gas: 218269) +RewardManagerSetRecipientsTest:test_eventIsEmittedUponSetRecipients() (gas: 191729) +RewardManagerSetRecipientsTest:test_setRecipientContainsDuplicateRecipients() (gas: 126082) +RewardManagerSetRecipientsTest:test_setRewardRecipientFromManagerAddress() (gas: 193880) +RewardManagerSetRecipientsTest:test_setRewardRecipientFromNonOwnerOrFeeManagerAddress() (gas: 21452) +RewardManagerSetRecipientsTest:test_setRewardRecipientTwice() (gas: 193324) +RewardManagerSetRecipientsTest:test_setRewardRecipientWeights() (gas: 180630) +RewardManagerSetRecipientsTest:test_setRewardRecipientWithZeroAddress() (gas: 90224) +RewardManagerSetRecipientsTest:test_setRewardRecipientWithZeroWeight() (gas: 191334) +RewardManagerSetRecipientsTest:test_setRewardRecipients() (gas: 185589) +RewardManagerSetRecipientsTest:test_setRewardRecipientsIsEmpty() (gas: 87113) +RewardManagerSetRecipientsTest:test_setSingleRewardRecipient() (gas: 110371) +RewardManagerSetupTest:test_eventEmittedUponFeeManagerUpdate() (gas: 21388) +RewardManagerSetupTest:test_eventEmittedUponFeePaid() (gas: 259121) +RewardManagerSetupTest:test_rejectsZeroLinkAddressOnConstruction() (gas: 59411) +RewardManagerSetupTest:test_setFeeManagerZeroAddress() (gas: 17038) +RewardManagerUpdateRewardRecipientsMultiplePoolsTest:test_updatePrimaryRecipientWeights() (gas: 373525) +RewardManagerUpdateRewardRecipientsTest:test_eventIsEmittedUponUpdateRecipients() (gas: 279119) +RewardManagerUpdateRewardRecipientsTest:test_onlyAdminCanUpdateRecipients() (gas: 19749) +RewardManagerUpdateRewardRecipientsTest:test_partialUpdateRecipientWeights() (gas: 218898) +RewardManagerUpdateRewardRecipientsTest:test_updateAllRecipientsWithSameAddressAndWeight() (gas: 272941) +RewardManagerUpdateRewardRecipientsTest:test_updatePartialRecipientsToSubset() (gas: 254232) +RewardManagerUpdateRewardRecipientsTest:test_updatePartialRecipientsWithExcessiveWeight() (gas: 259219) +RewardManagerUpdateRewardRecipientsTest:test_updatePartialRecipientsWithSameAddressAndWeight() (gas: 148890) +RewardManagerUpdateRewardRecipientsTest:test_updatePartialRecipientsWithUnderWeightSet() (gas: 259293) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientWeights() (gas: 369006) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientWithNewZeroAddress() (gas: 270780) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsContainsDuplicateRecipients() (gas: 288575) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsToDifferentLargerSet() (gas: 407876) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsToDifferentPartialSet() (gas: 318029) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsToDifferentSet() (gas: 377784) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsToDifferentSetWithInvalidWeights() (gas: 312122) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsUpdateAndRemoveExistingForLargerSet() (gas: 399699) +RewardManagerUpdateRewardRecipientsTest:test_updateRecipientsUpdateAndRemoveExistingForSmallerSet() (gas: 289513) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_correctlyRemovesAMiddleDigest() (gas: 24177) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_correctlyRemovesTheFirstDigest() (gas: 24144) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_correctlyUnsetsDigestsInSequence() (gas: 44109) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_revertsIfCalledByNonOwner() (gas: 15016) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_revertsIfRemovingAnEmptyDigest() (gas: 10907) +VerificationdeactivateConfigWhenThereAreMultipleDigestsTest:test_revertsIfRemovingAnNonExistentDigest() (gas: 13381) +VerifierActivateConfigTest:test_revertsIfDigestIsEmpty() (gas: 10984) +VerifierActivateConfigTest:test_revertsIfDigestNotSet() (gas: 13394) +VerifierActivateConfigTest:test_revertsIfNotOwner() (gas: 17171) +VerifierActivateConfigWithDeactivatedConfigTest:test_allowsVerification() (gas: 97164) +VerifierActivateFeedTest:test_revertsIfNoFeedExistsActivate() (gas: 13179) +VerifierActivateFeedTest:test_revertsIfNoFeedExistsDeactivate() (gas: 13157) +VerifierActivateFeedTest:test_revertsIfNotOwnerActivateFeed() (gas: 17098) +VerifierActivateFeedTest:test_revertsIfNotOwnerDeactivateFeed() (gas: 17153) +VerifierBulkVerifyBillingReport:test_verifyMultiVersions() (gas: 475585) +VerifierBulkVerifyBillingReport:test_verifyMultiVersionsReturnsVerifiedReports() (gas: 681857) +VerifierBulkVerifyBillingReport:test_verifyWithBulkLink() (gas: 556863) +VerifierBulkVerifyBillingReport:test_verifyWithBulkNative() (gas: 560460) +VerifierBulkVerifyBillingReport:test_verifyWithBulkNativeUnwrapped() (gas: 567951) +VerifierBulkVerifyBillingReport:test_verifyWithBulkNativeUnwrappedReturnsChange() (gas: 574957) +VerifierConstructorTest:test_revertsIfInitializedWithEmptyVerifierProxy() (gas: 59960) +VerifierConstructorTest:test_setsTheCorrectProperties() (gas: 1808155) +VerifierDeactivateFeedWithVerifyTest:test_currentReportAllowsVerification() (gas: 192062) +VerifierDeactivateFeedWithVerifyTest:test_currentReportFailsVerification() (gas: 113377) +VerifierDeactivateFeedWithVerifyTest:test_previousReportAllowsVerification() (gas: 99613) +VerifierDeactivateFeedWithVerifyTest:test_previousReportFailsVerification() (gas: 69932) +VerifierProxyAccessControlledVerificationTest:test_proxiesToTheVerifierIfHasAccess() (gas: 205796) +VerifierProxyAccessControlledVerificationTest:test_revertsIfNoAccess() (gas: 112334) +VerifierProxyConstructorTest:test_correctlySetsTheCorrectAccessControllerInterface() (gas: 1482522) +VerifierProxyConstructorTest:test_correctlySetsTheOwner() (gas: 1462646) +VerifierProxyConstructorTest:test_correctlySetsVersion() (gas: 6873) +VerifierProxyInitializeVerifierTest:test_revertsIfDigestAlreadySet() (gas: 54108) +VerifierProxyInitializeVerifierTest:test_revertsIfNotCorrectVerifier() (gas: 13595) +VerifierProxyInitializeVerifierTest:test_revertsIfNotOwner() (gas: 17157) +VerifierProxyInitializeVerifierTest:test_revertsIfVerifierAlreadyInitialized() (gas: 42025) +VerifierProxyInitializeVerifierTest:test_revertsIfZeroAddress() (gas: 10948) +VerifierProxyInitializeVerifierTest:test_setFeeManagerWhichDoesntHonourIERC165Interface() (gas: 13815) +VerifierProxyInitializeVerifierTest:test_setFeeManagerWhichDoesntHonourInterface() (gas: 16301) +VerifierProxyInitializeVerifierTest:test_setFeeManagerZeroAddress() (gas: 10947) +VerifierProxyInitializeVerifierTest:test_updatesVerifierIfVerifier() (gas: 53406) +VerifierProxySetAccessControllerTest:test_emitsTheCorrectEvent() (gas: 35340) +VerifierProxySetAccessControllerTest:test_revertsIfCalledByNonOwner() (gas: 15061) +VerifierProxySetAccessControllerTest:test_successfullySetsNewAccessController() (gas: 32032) +VerifierProxySetAccessControllerTest:test_successfullySetsNewAccessControllerIsEmpty() (gas: 12131) +VerifierProxyUnsetVerifierTest:test_revertsIfDigestDoesNotExist() (gas: 13141) +VerifierProxyUnsetVerifierTest:test_revertsIfNotAdmin() (gas: 14965) +VerifierProxyUnsetVerifierWithPreviouslySetVerifierTest:test_correctlyUnsetsVerifier() (gas: 12720) +VerifierProxyUnsetVerifierWithPreviouslySetVerifierTest:test_emitsAnEventAfterUnsettingVerifier() (gas: 17965) +VerifierProxyVerifyTest:test_proxiesToTheCorrectVerifier() (gas: 201609) +VerifierProxyVerifyTest:test_revertsIfNoVerifierConfigured() (gas: 117256) +VerifierSetConfigFromSourceMultipleDigestsTest:test_correctlySetsConfigWhenDigestsAreRemoved() (gas: 538898) +VerifierSetConfigFromSourceMultipleDigestsTest:test_correctlyUpdatesDigestsOnMultipleVerifiersInTheProxy() (gas: 964730) +VerifierSetConfigFromSourceMultipleDigestsTest:test_correctlyUpdatesTheDigestInTheProxy() (gas: 520482) +VerifierSetConfigFromSourceTest:test_revertsIfCalledByNonOwner() (gas: 183217) +VerifierSetConfigTest:test_correctlyUpdatesTheConfig() (gas: 1057925) +VerifierSetConfigTest:test_revertsIfCalledByNonOwner() (gas: 182986) +VerifierSetConfigTest:test_revertsIfDuplicateSigners() (gas: 251561) +VerifierSetConfigTest:test_revertsIfFaultToleranceIsZero() (gas: 176543) +VerifierSetConfigTest:test_revertsIfNotEnoughSigners() (gas: 15828) +VerifierSetConfigTest:test_revertsIfSetWithTooManySigners() (gas: 22213) +VerifierSetConfigTest:test_revertsIfSignerContainsZeroAddress() (gas: 228034) +VerifierSetConfigWhenThereAreMultipleDigestsTest:test_correctlySetsConfigWhenDigestsAreRemoved() (gas: 538647) +VerifierSetConfigWhenThereAreMultipleDigestsTest:test_correctlyUpdatesDigestsOnMultipleVerifiersInTheProxy() (gas: 964219) +VerifierSetConfigWhenThereAreMultipleDigestsTest:test_correctlyUpdatesTheDigestInTheProxy() (gas: 520222) +VerifierSupportsInterfaceTest:test_falseIfIsNotCorrectInterface() (gas: 5590) +VerifierSupportsInterfaceTest:test_trueIfIsCorrectInterface() (gas: 5633) +VerifierTestBillingReport:test_verifyWithLink() (gas: 274948) +VerifierTestBillingReport:test_verifyWithNative() (gas: 315650) +VerifierTestBillingReport:test_verifyWithNativeUnwrapped() (gas: 317898) +VerifierTestBillingReport:test_verifyWithNativeUnwrappedReturnsChange() (gas: 324966) +VerifierVerifyMultipleConfigDigestTest:test_canVerifyNewerReportsWithNewerConfigs() (gas: 131228) +VerifierVerifyMultipleConfigDigestTest:test_canVerifyOlderReportsWithOlderConfigs() (gas: 187132) +VerifierVerifyMultipleConfigDigestTest:test_revertsIfAReportIsVerifiedWithAnExistingButIncorrectDigest() (gas: 88205) +VerifierVerifyMultipleConfigDigestTest:test_revertsIfVerifyingWithAnUnsetDigest() (gas: 128062) +VerifierVerifySingleConfigDigestTest:test_emitsAnEventIfReportVerified() (gas: 186945) +VerifierVerifySingleConfigDigestTest:test_returnsThePriceAndBlockNumIfReportVerified() (gas: 187114) +VerifierVerifySingleConfigDigestTest:test_revertsIfConfigDigestNotSet() (gas: 116130) +VerifierVerifySingleConfigDigestTest:test_revertsIfDuplicateSignersHaveSigned() (gas: 182315) +VerifierVerifySingleConfigDigestTest:test_revertsIfMismatchedSignatureLength() (gas: 53037) +VerifierVerifySingleConfigDigestTest:test_revertsIfReportHasUnconfiguredFeedID() (gas: 103976) +VerifierVerifySingleConfigDigestTest:test_revertsIfVerifiedByNonProxy() (gas: 100992) +VerifierVerifySingleConfigDigestTest:test_revertsIfVerifiedWithIncorrectAddresses() (gas: 184066) +VerifierVerifySingleConfigDigestTest:test_revertsIfWrongNumberOfSigners() (gas: 110031) +VerifierVerifySingleConfigDigestTest:test_setsTheCorrectEpoch() (gas: 194270) +Verifier_accessControlledVerify:testVerifyWithAccessControl_gas() (gas: 212066) +Verifier_bulkVerifyWithFee:testBulkVerifyProxyWithLinkFeeSuccess_gas() (gas: 519378) +Verifier_bulkVerifyWithFee:testBulkVerifyProxyWithNativeFeeSuccess_gas() (gas: 542797) +Verifier_setConfig:testSetConfigSuccess_gas() (gas: 922684) +Verifier_verify:testVerifyProxySuccess_gas() (gas: 198731) +Verifier_verify:testVerifySuccess_gas() (gas: 186725) +Verifier_verifyWithFee:testVerifyProxyWithLinkFeeSuccess_gas() (gas: 238888) +Verifier_verifyWithFee:testVerifyProxyWithNativeFeeSuccess_gas() (gas: 257388) \ No newline at end of file diff --git a/contracts/gas-snapshots/shared.gas-snapshot b/contracts/gas-snapshots/shared.gas-snapshot new file mode 100644 index 00000000..6f307d25 --- /dev/null +++ b/contracts/gas-snapshots/shared.gas-snapshot @@ -0,0 +1,48 @@ +BurnMintERC677_approve:testApproveSuccess() (gas: 55248) +BurnMintERC677_approve:testInvalidAddressReverts() (gas: 10663) +BurnMintERC677_burn:testBasicBurnSuccess() (gas: 164342) +BurnMintERC677_burn:testBurnFromZeroAddressReverts() (gas: 47201) +BurnMintERC677_burn:testExceedsBalanceReverts() (gas: 21841) +BurnMintERC677_burn:testSenderNotBurnerReverts() (gas: 13359) +BurnMintERC677_burnFrom:testBurnFromSuccess() (gas: 57658) +BurnMintERC677_burnFrom:testExceedsBalanceReverts() (gas: 35864) +BurnMintERC677_burnFrom:testInsufficientAllowanceReverts() (gas: 21849) +BurnMintERC677_burnFrom:testSenderNotBurnerReverts() (gas: 13359) +BurnMintERC677_burnFromAlias:testBurnFromSuccess() (gas: 57684) +BurnMintERC677_burnFromAlias:testExceedsBalanceReverts() (gas: 35880) +BurnMintERC677_burnFromAlias:testInsufficientAllowanceReverts() (gas: 21869) +BurnMintERC677_burnFromAlias:testSenderNotBurnerReverts() (gas: 13379) +BurnMintERC677_constructor:testConstructorSuccess() (gas: 1669109) +BurnMintERC677_decreaseApproval:testDecreaseApprovalSuccess() (gas: 28537) +BurnMintERC677_grantMintAndBurnRoles:testGrantMintAndBurnRolesSuccess() (gas: 120071) +BurnMintERC677_grantRole:testGrantBurnAccessSuccess() (gas: 52724) +BurnMintERC677_grantRole:testGrantManySuccess() (gas: 935521) +BurnMintERC677_grantRole:testGrantMintAccessSuccess() (gas: 93605) +BurnMintERC677_increaseApproval:testIncreaseApprovalSuccess() (gas: 40911) +BurnMintERC677_mint:testBasicMintSuccess() (gas: 149365) +BurnMintERC677_mint:testMaxSupplyExceededReverts() (gas: 50385) +BurnMintERC677_mint:testSenderNotMinterReverts() (gas: 11195) +BurnMintERC677_supportsInterface:testConstructorSuccess() (gas: 8685) +BurnMintERC677_transfer:testInvalidAddressReverts() (gas: 10639) +BurnMintERC677_transfer:testTransferSuccess() (gas: 39462) +CallWithExactGas__callWithExactGas:test_CallWithExactGasReceiverErrorSuccess() (gas: 66918) +CallWithExactGas__callWithExactGas:test_CallWithExactGasSafeReturnDataExactGas() (gas: 22615) +CallWithExactGas__callWithExactGas:test_NoContractReverts() (gas: 11559) +CallWithExactGas__callWithExactGas:test_NoGasForCallExactCheckReverts() (gas: 12908) +CallWithExactGas__callWithExactGas:test_NotEnoughGasForCallReverts() (gas: 13361) +CallWithExactGas__callWithExactGas:test_callWithExactGasSuccess(bytes,bytes4) (runs: 256, μ: 15477, ~: 15418) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_CallWithExactGasEvenIfTargetIsNoContractExactGasSuccess() (gas: 19147) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_CallWithExactGasEvenIfTargetIsNoContractReceiverErrorSuccess() (gas: 67096) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_CallWithExactGasEvenIfTargetIsNoContractSuccess(bytes,bytes4) (runs: 256, μ: 15675, ~: 15616) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_NoContractSuccess() (gas: 9816) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_NoGasForCallExactCheckReturnFalseSuccess() (gas: 9578) +CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract:test_NotEnoughGasForCallReturnsFalseSuccess() (gas: 9890) +CallWithExactGas__callWithExactGasSafeReturnData:test_CallWithExactGasSafeReturnDataExactGas() (gas: 19017) +CallWithExactGas__callWithExactGasSafeReturnData:test_NoContractReverts() (gas: 13949) +CallWithExactGas__callWithExactGasSafeReturnData:test_NoGasForCallExactCheckReverts() (gas: 13239) +CallWithExactGas__callWithExactGasSafeReturnData:test_NotEnoughGasForCallReverts() (gas: 13670) +OpStackBurnMintERC677_constructor:testConstructorSuccess() (gas: 1739317) +OpStackBurnMintERC677_interfaceCompatibility:testBurnCompatibility() (gas: 263373) +OpStackBurnMintERC677_interfaceCompatibility:testMintCompatibility() (gas: 137957) +OpStackBurnMintERC677_interfaceCompatibility:testStaticFunctionsCompatibility() (gas: 10622) +OpStackBurnMintERC677_supportsInterface:testConstructorSuccess() (gas: 8961) \ No newline at end of file diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts new file mode 100644 index 00000000..5306827b --- /dev/null +++ b/contracts/hardhat.config.ts @@ -0,0 +1,148 @@ +import '@nomiclabs/hardhat-ethers' +import '@nomiclabs/hardhat-etherscan' +import '@nomiclabs/hardhat-waffle' +import '@openzeppelin/hardhat-upgrades' +import '@typechain/hardhat' +import 'hardhat-abi-exporter' +import 'hardhat-contract-sizer' +import 'hardhat-gas-reporter' +import 'solidity-coverage' +import 'hardhat-ignore-warnings' +import { subtask } from 'hardhat/config' +import { TASK_COMPILE_SOLIDITY_GET_SOURCE_PATHS } from 'hardhat/builtin-tasks/task-names' + +const COMPILER_SETTINGS = { + optimizer: { + enabled: true, + runs: 1000000, + }, + metadata: { + bytecodeHash: 'none', + }, +} + +// prune forge style tests from hardhat paths +subtask(TASK_COMPILE_SOLIDITY_GET_SOURCE_PATHS).setAction( + async (_, __, runSuper) => { + const paths = await runSuper() + return paths.filter((p: string) => !p.endsWith('.t.sol')) + }, +) + +/** + * @type import('hardhat/config').HardhatUserConfig + */ +let config = { + abiExporter: { + path: './abi', + }, + paths: { + artifacts: './artifacts', + cache: './cache', + sources: './src', + tests: './test', + }, + typechain: { + outDir: './typechain', + target: 'ethers-v5', + }, + networks: { + env: { + url: process.env.NODE_HTTP_URL || '', + }, + hardhat: { + allowUnlimitedContractSize: Boolean( + process.env.ALLOW_UNLIMITED_CONTRACT_SIZE, + ), + hardfork: 'merge', + }, + }, + solidity: { + compilers: [ + { + version: '0.4.24', + settings: COMPILER_SETTINGS, + }, + { + version: '0.5.0', + settings: COMPILER_SETTINGS, + }, + { + version: '0.6.6', + settings: COMPILER_SETTINGS, + }, + { + version: '0.7.6', + settings: COMPILER_SETTINGS, + }, + { + version: '0.8.6', + settings: COMPILER_SETTINGS, + }, + { + version: '0.8.15', + settings: COMPILER_SETTINGS, + }, + { + version: '0.8.16', + settings: COMPILER_SETTINGS, + }, + { + version: '0.8.19', + settings: COMPILER_SETTINGS, + }, + ], + overrides: { + 'src/v0.8/vrf/VRFCoordinatorV2.sol': { + version: '0.8.6', + settings: { + optimizer: { + enabled: true, + runs: 10000, // see native_solc_compile_all + }, + metadata: { + bytecodeHash: 'none', + }, + }, + }, + 'src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol': { + version: '0.8.6', + settings: { + optimizer: { + enabled: true, + runs: 50, // see native_solc_compile_all_vrf + }, + metadata: { + bytecodeHash: 'none', + }, + }, + }, + }, + }, + contractSizer: { + alphaSort: true, + runOnCompile: false, + disambiguatePaths: false, + }, + mocha: { + timeout: 100000, + forbidOnly: Boolean(process.env.CI), + }, + gasReporter: { + enabled: Boolean(process.env.REPORT_GAS), + }, + warnings: !process.env.HIDE_WARNINGS, +} + +if (process.env.NETWORK_NAME && process.env.EXPLORER_API_KEY) { + config = { + ...config, + etherscan: { + apiKey: { + [process.env.NETWORK_NAME]: process.env.EXPLORER_API_KEY, + }, + }, + } +} + +export default config diff --git a/contracts/package.json b/contracts/package.json new file mode 100644 index 00000000..b4cf6f3b --- /dev/null +++ b/contracts/package.json @@ -0,0 +1,92 @@ +{ + "name": "@plugin/contracts", + "version": "0.8.0", + "description": "Plugin smart contracts", + "author": "Plugin devs", + "license": "MIT", + "private": false, + "scripts": { + "test": "hardhat test --parallel", + "lint": "eslint --ext js,ts .", + "prettier:check": "prettier '**/*' --check --ignore-unknown", + "prettier:write": "prettier '**/*' --write --ignore-unknown", + "size": "hardhat size-contracts", + "clean": "hardhat clean", + "compile:native": "./scripts/native_solc_compile_all", + "compile": "hardhat compile", + "coverage": "hardhat coverage", + "prepublishOnly": "pnpm compile && ./scripts/prepublish_generate_abi_folder", + "publish-beta": "pnpm publish --tag beta", + "publish-prod": "npm dist-tag add @plugin/contracts@0.8.0 latest", + "solhint": "solhint --max-warnings 20 \"./src/v0.8/**/*.sol\"" + }, + "files": [ + "src/v0.8", + "abi/src/v0.8" + ], + "pnpm": { + "_comment": "See https://github.com/ethers-io/ethers.js/discussions/2849#discussioncomment-2696454", + "_comment2_logger": "See https://github.com/ethers-io/ethers.js/issues/379 we pin this version since that's what was used in the old yarn lockfile", + "overrides": { + "@ethersproject/logger": "5.0.6" + } + }, + "devDependencies": { + "@ethereum-waffle/mock-contract": "^3.4.4", + "@ethersproject/abi": "~5.7.0", + "@ethersproject/bignumber": "~5.7.0", + "@ethersproject/contracts": "~5.7.0", + "@ethersproject/providers": "~5.7.2", + "@ethersproject/random": "~5.7.0", + "@nomicfoundation/hardhat-network-helpers": "^1.0.9", + "@nomiclabs/hardhat-ethers": "^2.2.3", + "@nomiclabs/hardhat-etherscan": "^3.1.7", + "@nomiclabs/hardhat-waffle": "2.0.6", + "@openzeppelin/hardhat-upgrades": "1.28.0", + "@openzeppelin/test-helpers": "^0.5.16", + "@typechain/ethers-v5": "^7.2.0", + "@typechain/hardhat": "^7.0.0", + "@types/cbor": "5.0.1", + "@types/chai": "^4.3.11", + "@types/debug": "^4.1.12", + "@types/deep-equal-in-any-order": "^1.0.3", + "@types/mocha": "^10.0.6", + "@types/node": "^16.18.80", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "abi-to-sol": "^0.6.6", + "cbor": "^5.2.0", + "chai": "^4.3.10", + "debug": "^4.3.4", + "eslint": "^8.56.0", + "eslint-config-prettier": "^9.1.0", + "deep-equal-in-any-order": "^2.0.6", + "eslint-plugin-prettier": "^5.1.3", + "ethereum-waffle": "^3.4.4", + "ethers": "~5.7.2", + "hardhat": "~2.19.2", + "hardhat-abi-exporter": "^2.10.1", + "hardhat-contract-sizer": "^2.10.0", + "hardhat-gas-reporter": "^1.0.9", + "hardhat-ignore-warnings": "^0.2.6", + "istanbul": "^0.4.5", + "moment": "^2.29.4", + "prettier": "^3.2.5", + "prettier-plugin-solidity": "1.3.1", + "rlp": "^2.2.7", + "solhint": "^4.1.1", + "solhint-plugin-plugin-solidity": "git+https://github.com/goplugin/plugin-solhint-rules.git#v1.2.1", + "solhint-plugin-prettier": "^0.1.0", + "solidity-coverage": "^0.8.5", + "ts-node": "^10.9.2", + "tslib": "^2.6.2", + "typechain": "^8.2.1", + "typescript": "^5.3.3" + }, + "dependencies": { + "@eth-optimism/contracts": "0.6.0", + "@scroll-tech/contracts": "0.1.0", + "@openzeppelin/contracts": "4.9.3", + "@openzeppelin/contracts-upgradeable": "4.9.3" + } +} diff --git a/contracts/pnpm-lock.yaml b/contracts/pnpm-lock.yaml new file mode 100644 index 00000000..eed80eca --- /dev/null +++ b/contracts/pnpm-lock.yaml @@ -0,0 +1,12596 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +overrides: + '@ethersproject/logger': 5.0.6 + +dependencies: + '@eth-optimism/contracts': + specifier: 0.6.0 + version: 0.6.0(ethers@5.7.2) + '@openzeppelin/contracts': + specifier: 4.9.3 + version: 4.9.3 + '@openzeppelin/contracts-upgradeable': + specifier: 4.9.3 + version: 4.9.3 + '@scroll-tech/contracts': + specifier: 0.1.0 + version: 0.1.0 + +devDependencies: + '@ethereum-waffle/mock-contract': + specifier: ^3.4.4 + version: 3.4.4 + '@ethersproject/abi': + specifier: ~5.7.0 + version: 5.7.0 + '@ethersproject/bignumber': + specifier: ~5.7.0 + version: 5.7.0 + '@ethersproject/contracts': + specifier: ~5.7.0 + version: 5.7.0 + '@ethersproject/providers': + specifier: ~5.7.2 + version: 5.7.2 + '@ethersproject/random': + specifier: ~5.7.0 + version: 5.7.0 + '@nomicfoundation/hardhat-network-helpers': + specifier: ^1.0.9 + version: 1.0.10(hardhat@2.19.2) + '@nomiclabs/hardhat-ethers': + specifier: ^2.2.3 + version: 2.2.3(ethers@5.7.2)(hardhat@2.19.2) + '@nomiclabs/hardhat-etherscan': + specifier: ^3.1.7 + version: 3.1.8(hardhat@2.19.2) + '@nomiclabs/hardhat-waffle': + specifier: 2.0.6 + version: 2.0.6(@nomiclabs/hardhat-ethers@2.2.3)(@types/sinon-chai@3.2.8)(ethereum-waffle@3.4.4)(ethers@5.7.2)(hardhat@2.19.2) + '@openzeppelin/hardhat-upgrades': + specifier: 1.28.0 + version: 1.28.0(@nomiclabs/hardhat-ethers@2.2.3)(@nomiclabs/hardhat-etherscan@3.1.8)(ethers@5.7.2)(hardhat@2.19.2) + '@openzeppelin/test-helpers': + specifier: ^0.5.16 + version: 0.5.16(bn.js@4.12.0) + '@typechain/ethers-v5': + specifier: ^7.2.0 + version: 7.2.0(@ethersproject/abi@5.7.0)(@ethersproject/bytes@5.7.0)(@ethersproject/providers@5.7.2)(ethers@5.7.2)(typechain@8.3.2)(typescript@5.3.3) + '@typechain/hardhat': + specifier: ^7.0.0 + version: 7.0.0(@ethersproject/abi@5.7.0)(@ethersproject/providers@5.7.2)(@typechain/ethers-v5@7.2.0)(ethers@5.7.2)(hardhat@2.19.2)(typechain@8.3.2) + '@types/cbor': + specifier: 5.0.1 + version: 5.0.1 + '@types/chai': + specifier: ^4.3.11 + version: 4.3.11 + '@types/debug': + specifier: ^4.1.12 + version: 4.1.12 + '@types/deep-equal-in-any-order': + specifier: ^1.0.3 + version: 1.0.3 + '@types/mocha': + specifier: ^10.0.6 + version: 10.0.6 + '@types/node': + specifier: ^16.18.80 + version: 16.18.80 + '@typescript-eslint/eslint-plugin': + specifier: ^6.21.0 + version: 6.21.0(@typescript-eslint/parser@6.21.0)(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/parser': + specifier: ^6.21.0 + version: 6.21.0(eslint@8.56.0)(typescript@5.3.3) + abi-to-sol: + specifier: ^0.6.6 + version: 0.6.6 + cbor: + specifier: ^5.2.0 + version: 5.2.0 + chai: + specifier: ^4.3.10 + version: 4.3.10 + debug: + specifier: ^4.3.4 + version: 4.3.4(supports-color@8.1.1) + deep-equal-in-any-order: + specifier: ^2.0.6 + version: 2.0.6 + eslint: + specifier: ^8.56.0 + version: 8.56.0 + eslint-config-prettier: + specifier: ^9.1.0 + version: 9.1.0(eslint@8.56.0) + eslint-plugin-prettier: + specifier: ^5.1.3 + version: 5.1.3(eslint-config-prettier@9.1.0)(eslint@8.56.0)(prettier@3.2.5) + ethereum-waffle: + specifier: ^3.4.4 + version: 3.4.4(typescript@5.3.3) + ethers: + specifier: ~5.7.2 + version: 5.7.2 + hardhat: + specifier: ~2.19.2 + version: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + hardhat-abi-exporter: + specifier: ^2.10.1 + version: 2.10.1(hardhat@2.19.2) + hardhat-contract-sizer: + specifier: ^2.10.0 + version: 2.10.0(hardhat@2.19.2) + hardhat-gas-reporter: + specifier: ^1.0.9 + version: 1.0.9(hardhat@2.19.2) + hardhat-ignore-warnings: + specifier: ^0.2.6 + version: 0.2.9 + istanbul: + specifier: ^0.4.5 + version: 0.4.5 + moment: + specifier: ^2.29.4 + version: 2.29.4 + prettier: + specifier: ^3.2.5 + version: 3.2.5 + prettier-plugin-solidity: + specifier: 1.3.1 + version: 1.3.1(prettier@3.2.5) + rlp: + specifier: ^2.2.7 + version: 2.2.7 + solhint: + specifier: ^4.1.1 + version: 4.1.1 + solhint-plugin-plugin-solidity: + specifier: git+https://github.com/goplugin/plugin-solhint-rules.git#v1.2.1 + version: github.com/goplugin/plugin-solhint-rules/1b4c0c2663fcd983589d4f33a2e73908624ed43c + solhint-plugin-prettier: + specifier: ^0.1.0 + version: 0.1.0(prettier-plugin-solidity@1.3.1)(prettier@3.2.5) + solidity-coverage: + specifier: ^0.8.5 + version: 0.8.5(hardhat@2.19.2) + ts-node: + specifier: ^10.9.2 + version: 10.9.2(@types/node@16.18.80)(typescript@5.3.3) + tslib: + specifier: ^2.6.2 + version: 2.6.2 + typechain: + specifier: ^8.2.1 + version: 8.3.2(typescript@5.3.3) + typescript: + specifier: ^5.3.3 + version: 5.3.3 + +packages: + + /@aashutoshrathi/word-wrap@1.2.6: + resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==} + engines: {node: '>=0.10.0'} + dev: true + + /@aws-crypto/sha256-js@1.2.2: + resolution: {integrity: sha512-Nr1QJIbW/afYYGzYvrF70LtaHrIRtd4TNAglX8BvlfxJLZ45SAmueIKYl5tWoNBPzp65ymXGFK0Bb1vZUpuc9g==} + dependencies: + '@aws-crypto/util': 1.2.2 + '@aws-sdk/types': 3.468.0 + tslib: 1.14.1 + dev: true + + /@aws-crypto/util@1.2.2: + resolution: {integrity: sha512-H8PjG5WJ4wz0UXAFXeJjWCW1vkvIJ3qUUD+rGRwJ2/hj+xT58Qle2MTql/2MGzkU+1JLAFuR6aJpLAjHwhmwwg==} + dependencies: + '@aws-sdk/types': 3.468.0 + '@aws-sdk/util-utf8-browser': 3.259.0 + tslib: 1.14.1 + dev: true + + /@aws-sdk/types@3.468.0: + resolution: {integrity: sha512-rx/9uHI4inRbp2tw3Y4Ih4PNZkVj32h7WneSg3MVgVjAoVD5Zti9KhS5hkvsBxfgmQmg0AQbE+b1sy5WGAgntA==} + engines: {node: '>=14.0.0'} + dependencies: + '@smithy/types': 2.7.0 + tslib: 2.6.2 + dev: true + + /@aws-sdk/util-utf8-browser@3.259.0: + resolution: {integrity: sha512-UvFa/vR+e19XookZF8RzFZBrw2EUkQWxiBW0yYQAhvk3C+QVGl0H3ouca8LDBlBfQKXwmW3huo/59H8rwb1wJw==} + dependencies: + tslib: 2.6.2 + dev: true + + /@babel/code-frame@7.18.6: + resolution: {integrity: sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/highlight': 7.18.6 + dev: true + + /@babel/helper-validator-identifier@7.19.1: + resolution: {integrity: sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==} + engines: {node: '>=6.9.0'} + dev: true + + /@babel/highlight@7.18.6: + resolution: {integrity: sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==} + engines: {node: '>=6.9.0'} + dependencies: + '@babel/helper-validator-identifier': 7.19.1 + chalk: 2.4.2 + js-tokens: 4.0.0 + dev: true + + /@babel/runtime@7.19.0: + resolution: {integrity: sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==} + engines: {node: '>=6.9.0'} + dependencies: + regenerator-runtime: 0.13.9 + dev: true + + /@chainsafe/as-sha256@0.3.1: + resolution: {integrity: sha512-hldFFYuf49ed7DAakWVXSJODuq3pzJEguD8tQ7h+sGkM18vja+OFoJI9krnGmgzyuZC2ETX0NOIcCTy31v2Mtg==} + dev: true + + /@chainsafe/persistent-merkle-tree@0.4.2: + resolution: {integrity: sha512-lLO3ihKPngXLTus/L7WHKaw9PnNJWizlOF1H9NNzHP6Xvh82vzg9F2bzkXhYIFshMZ2gTCEz8tq6STe7r5NDfQ==} + dependencies: + '@chainsafe/as-sha256': 0.3.1 + dev: true + + /@chainsafe/persistent-merkle-tree@0.5.0: + resolution: {integrity: sha512-l0V1b5clxA3iwQLXP40zYjyZYospQLZXzBVIhhr9kDg/1qHZfzzHw0jj4VPBijfYCArZDlPkRi1wZaV2POKeuw==} + dependencies: + '@chainsafe/as-sha256': 0.3.1 + dev: true + + /@chainsafe/ssz@0.10.2: + resolution: {integrity: sha512-/NL3Lh8K+0q7A3LsiFq09YXS9fPE+ead2rr7vM2QK8PLzrNsw3uqrif9bpRX5UxgeRjM+vYi+boCM3+GM4ovXg==} + dependencies: + '@chainsafe/as-sha256': 0.3.1 + '@chainsafe/persistent-merkle-tree': 0.5.0 + dev: true + + /@chainsafe/ssz@0.9.4: + resolution: {integrity: sha512-77Qtg2N1ayqs4Bg/wvnWfg5Bta7iy7IRh8XqXh7oNMeP2HBbBwx8m6yTpA8p0EHItWPEBkgZd5S5/LSlp3GXuQ==} + dependencies: + '@chainsafe/as-sha256': 0.3.1 + '@chainsafe/persistent-merkle-tree': 0.4.2 + case: 1.6.3 + dev: true + + /@colors/colors@1.5.0: + resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==} + engines: {node: '>=0.1.90'} + requiresBuild: true + dev: true + optional: true + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + dev: true + + /@ensdomains/address-encoder@0.1.9: + resolution: {integrity: sha512-E2d2gP4uxJQnDu2Kfg1tHNspefzbLT8Tyjrm5sEuim32UkU2sm5xL4VXtgc2X33fmPEw9+jUMpGs4veMbf+PYg==} + dependencies: + bech32: 1.1.4 + blakejs: 1.2.1 + bn.js: 4.12.0 + bs58: 4.0.1 + crypto-addr-codec: 0.1.7 + nano-base32: 1.0.1 + ripemd160: 2.0.2 + dev: true + + /@ensdomains/ens@0.4.5: + resolution: {integrity: sha512-JSvpj1iNMFjK6K+uVl4unqMoa9rf5jopb8cya5UGBWz23Nw8hSNT7efgUx4BTlAPAgpNlEioUfeTyQ6J9ZvTVw==} + deprecated: Please use @ensdomains/ens-contracts + dependencies: + bluebird: 3.7.2 + eth-ens-namehash: 2.0.8 + solc: 0.4.26 + testrpc: 0.0.1 + web3-utils: 1.8.0 + dev: true + + /@ensdomains/ensjs@2.1.0: + resolution: {integrity: sha512-GRbGPT8Z/OJMDuxs75U/jUNEC0tbL0aj7/L/QQznGYKm/tiasp+ndLOaoULy9kKJFC0TBByqfFliEHDgoLhyog==} + dependencies: + '@babel/runtime': 7.19.0 + '@ensdomains/address-encoder': 0.1.9 + '@ensdomains/ens': 0.4.5 + '@ensdomains/resolver': 0.2.4 + content-hash: 2.5.2 + eth-ens-namehash: 2.0.8 + ethers: 5.7.2 + js-sha3: 0.8.0 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@ensdomains/resolver@0.2.4: + resolution: {integrity: sha512-bvaTH34PMCbv6anRa9I/0zjLJgY4EuznbEMgbV77JBCQ9KNC46rzi0avuxpOfu+xDjPEtSFGqVEOr5GlUSGudA==} + deprecated: Please use @ensdomains/ens-contracts + dev: true + + /@eslint-community/eslint-utils@4.4.0(eslint@8.56.0): + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + dependencies: + eslint: 8.56.0 + eslint-visitor-keys: 3.4.3 + dev: true + + /@eslint-community/regexpp@4.9.1: + resolution: {integrity: sha512-Y27x+MBLjXa+0JWDhykM3+JE+il3kHKAEqabfEWq3SDhZjLYb6/BHL/JKFnH3fe207JaXkyDo685Oc2Glt6ifA==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: true + + /@eslint/eslintrc@2.1.4: + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4(supports-color@8.1.1) + espree: 9.6.1 + globals: 13.20.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@eslint/js@8.56.0: + resolution: {integrity: sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /@eth-optimism/contracts@0.6.0(ethers@5.7.2): + resolution: {integrity: sha512-vQ04wfG9kMf1Fwy3FEMqH2QZbgS0gldKhcBeBUPfO8zu68L61VI97UDXmsMQXzTsEAxK8HnokW3/gosl4/NW3w==} + peerDependencies: + ethers: ^5 + dependencies: + '@eth-optimism/core-utils': 0.12.0 + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/abstract-signer': 5.7.0 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: false + + /@eth-optimism/core-utils@0.12.0: + resolution: {integrity: sha512-qW+7LZYCz7i8dRa7SRlUKIo1VBU8lvN0HeXCxJR+z+xtMzMQpPds20XJNCMclszxYQHkXY00fOT6GvFw9ZL6nw==} + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/contracts': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/properties': 5.7.0 + '@ethersproject/providers': 5.7.2 + '@ethersproject/rlp': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/web': 5.7.1 + bufio: 1.0.7 + chai: 4.3.10 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: false + + /@ethereum-waffle/chai@3.4.4: + resolution: {integrity: sha512-/K8czydBtXXkcM9X6q29EqEkc5dN3oYenyH2a9hF7rGAApAJUpH8QBtojxOY/xQ2up5W332jqgxwp0yPiYug1g==} + engines: {node: '>=10.0'} + dependencies: + '@ethereum-waffle/provider': 3.4.4 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /@ethereum-waffle/compiler@3.4.4(typescript@5.3.3): + resolution: {integrity: sha512-RUK3axJ8IkD5xpWjWoJgyHclOeEzDLQFga6gKpeGxiS/zBu+HB0W2FvsrrLalTFIaPw/CGYACRBSIxqiCqwqTQ==} + engines: {node: '>=10.0'} + dependencies: + '@resolver-engine/imports': 0.3.3 + '@resolver-engine/imports-fs': 0.3.3 + '@typechain/ethers-v5': 2.0.0(ethers@5.7.2)(typechain@3.0.0) + '@types/mkdirp': 0.5.2 + '@types/node-fetch': 2.6.2 + ethers: 5.7.2 + mkdirp: 0.5.6 + node-fetch: 2.6.7 + solc: 0.6.12 + ts-generator: 0.1.1 + typechain: 3.0.0(typescript@5.3.3) + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - typescript + - utf-8-validate + dev: true + + /@ethereum-waffle/ens@3.4.4: + resolution: {integrity: sha512-0m4NdwWxliy3heBYva1Wr4WbJKLnwXizmy5FfSSr5PMbjI7SIGCdCB59U7/ZzY773/hY3bLnzLwvG5mggVjJWg==} + engines: {node: '>=10.0'} + dependencies: + '@ensdomains/ens': 0.4.5 + '@ensdomains/resolver': 0.2.4 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@ethereum-waffle/mock-contract@3.4.4: + resolution: {integrity: sha512-Mp0iB2YNWYGUV+VMl5tjPsaXKbKo8MDH9wSJ702l9EBjdxFf/vBvnMBAC1Fub1lLtmD0JHtp1pq+mWzg/xlLnA==} + engines: {node: '>=10.0'} + dependencies: + '@ethersproject/abi': 5.7.0 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@ethereum-waffle/provider@3.4.4: + resolution: {integrity: sha512-GK8oKJAM8+PKy2nK08yDgl4A80mFuI8zBkE0C9GqTRYQqvuxIyXoLmJ5NZU9lIwyWVv5/KsoA11BgAv2jXE82g==} + engines: {node: '>=10.0'} + dependencies: + '@ethereum-waffle/ens': 3.4.4 + ethers: 5.7.2 + ganache-core: 2.13.2 + patch-package: 6.4.7 + postinstall-postinstall: 2.1.0 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /@ethereumjs/common@2.6.5: + resolution: {integrity: sha512-lRyVQOeCDaIVtgfbowla32pzeDv2Obr8oR8Put5RdUBNRGr1VGPGQNGP6elWIpgK3YdpzqTOh4GyUGOureVeeA==} + dependencies: + crc-32: 1.2.2 + ethereumjs-util: 7.1.5 + dev: true + + /@ethereumjs/tx@3.5.2: + resolution: {integrity: sha512-gQDNJWKrSDGu2w7w0PzVXVBNMzb7wwdDOmOqczmhNjqFxFuIbhVJDwiGEnxFNC2/b8ifcZzY7MLcluizohRzNw==} + dependencies: + '@ethereumjs/common': 2.6.5 + ethereumjs-util: 7.1.5 + dev: true + + /@ethersproject/abi@5.0.0-beta.153: + resolution: {integrity: sha512-aXweZ1Z7vMNzJdLpR1CZUAIgnwjrZeUSvN9syCwlBaEBUFJmFY+HHnfuTI5vIhVs/mRkfJVrbEyl51JZQqyjAg==} + requiresBuild: true + dependencies: + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/strings': 5.7.0 + dev: true + optional: true + + /@ethersproject/abi@5.7.0: + resolution: {integrity: sha512-351ktp42TiRcYB3H1OP8yajPeAQstMW/yCFokj/AthP9bLHzQFPlOrxOcwYEDkUAICmOHljvN4K39OMTMUa9RA==} + dependencies: + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/strings': 5.7.0 + + /@ethersproject/abstract-provider@5.7.0: + resolution: {integrity: sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw==} + dependencies: + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/networks': 5.7.1 + '@ethersproject/properties': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/web': 5.7.1 + + /@ethersproject/abstract-signer@5.7.0: + resolution: {integrity: sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ==} + dependencies: + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + + /@ethersproject/address@5.7.0: + resolution: {integrity: sha512-9wYhYt7aghVGo758POM5nqcOMaE168Q6aRLJZwUmiqSrAungkG74gSSeKEIR7ukixesdRZGPgVqme6vmxs1fkA==} + dependencies: + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/rlp': 5.7.0 + + /@ethersproject/base64@5.7.0: + resolution: {integrity: sha512-Dr8tcHt2mEbsZr/mwTPIQAf3Ai0Bks/7gTw9dSqk1mQvhW3XvRlmDJr/4n+wg1JmCl16NZue17CDh8xb/vZ0sQ==} + dependencies: + '@ethersproject/bytes': 5.7.0 + + /@ethersproject/basex@5.7.0: + resolution: {integrity: sha512-ywlh43GwZLv2Voc2gQVTKBoVQ1mti3d8HK5aMxsfu/nRDnMmNqaSJ3r3n85HBByT8OpoY96SXM1FogC533T4zw==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/properties': 5.7.0 + + /@ethersproject/bignumber@5.7.0: + resolution: {integrity: sha512-n1CAdIHRWjSucQO3MC1zPSVgV/6dy/fjL9pMrPP9peL+QxEg9wOsVqwD4+818B6LUEtaXzVHQiuivzRoxPxUGw==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + bn.js: 5.2.1 + + /@ethersproject/bytes@5.7.0: + resolution: {integrity: sha512-nsbxwgFXWh9NyYWo+U8atvmMsSdKJprTcICAkvbBffT75qDocbuggBU0SJiVK2MuTrp0q+xvLkTnGMPK1+uA9A==} + dependencies: + '@ethersproject/logger': 5.0.6 + + /@ethersproject/constants@5.7.0: + resolution: {integrity: sha512-DHI+y5dBNvkpYUMiRQyxRBYBefZkJfo70VUkUAsRjcPs47muV9evftfZ0PJVCXYbAiCgght0DtcF9srFQmIgWA==} + dependencies: + '@ethersproject/bignumber': 5.7.0 + + /@ethersproject/contracts@5.7.0: + resolution: {integrity: sha512-5GJbzEU3X+d33CdfPhcyS+z8MzsTrBGk/sc+G+59+tPa9yFkl6HQ9D6L0QMgNTA9q8dT0XKxxkyp883XsQvbbg==} + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/transactions': 5.7.0 + + /@ethersproject/hash@5.7.0: + resolution: {integrity: sha512-qX5WrQfnah1EFnO5zJv1v46a8HW0+E5xuBBDTwMFZLuVTx0tbU2kkx15NqdjxecrLGatQN9FGQKpb1FKdHCt+g==} + dependencies: + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/base64': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/strings': 5.7.0 + + /@ethersproject/hdnode@5.7.0: + resolution: {integrity: sha512-OmyYo9EENBPPf4ERhR7oj6uAtUAhYGqOnIS+jE5pTXvdKBS99ikzq1E7Iv0ZQZ5V36Lqx1qZLeak0Ra16qpeOg==} + dependencies: + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/basex': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/pbkdf2': 5.7.0 + '@ethersproject/properties': 5.7.0 + '@ethersproject/sha2': 5.7.0 + '@ethersproject/signing-key': 5.7.0 + '@ethersproject/strings': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/wordlists': 5.7.0 + + /@ethersproject/json-wallets@5.7.0: + resolution: {integrity: sha512-8oee5Xgu6+RKgJTkvEMl2wDgSPSAQ9MB/3JYjFV9jlKvcYHUXZC+cQp0njgmxdHkYWn8s6/IqIZYm0YWCjO/0g==} + dependencies: + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/hdnode': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/pbkdf2': 5.7.0 + '@ethersproject/properties': 5.7.0 + '@ethersproject/random': 5.7.0 + '@ethersproject/strings': 5.7.0 + '@ethersproject/transactions': 5.7.0 + aes-js: 3.0.0 + scrypt-js: 3.0.1 + + /@ethersproject/keccak256@5.7.0: + resolution: {integrity: sha512-2UcPboeL/iW+pSg6vZ6ydF8tCnv3Iu/8tUmLLzWWGzxWKFFqOBQFLo6uLUv6BDrLgCDfN28RJ/wtByx+jZ4KBg==} + dependencies: + '@ethersproject/bytes': 5.7.0 + js-sha3: 0.8.0 + + /@ethersproject/logger@5.0.6: + resolution: {integrity: sha512-FrX0Vnb3JZ1md/7GIZfmJ06XOAA8r3q9Uqt9O5orr4ZiksnbpXKlyDzQtlZ5Yv18RS8CAUbiKH9vwidJg1BPmQ==} + + /@ethersproject/networks@5.7.1: + resolution: {integrity: sha512-n/MufjFYv3yFcUyfhnXotyDlNdFb7onmkSy8aQERi2PjNcnWQ66xXxa3XlS8nCcA8aJKJjIIMNJTC7tu80GwpQ==} + dependencies: + '@ethersproject/logger': 5.0.6 + + /@ethersproject/pbkdf2@5.7.0: + resolution: {integrity: sha512-oR/dBRZR6GTyaofd86DehG72hY6NpAjhabkhxgr3X2FpJtJuodEl2auADWBZfhDHgVCbu3/H/Ocq2uC6dpNjjw==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/sha2': 5.7.0 + + /@ethersproject/properties@5.7.0: + resolution: {integrity: sha512-J87jy8suntrAkIZtecpxEPxY//szqr1mlBaYlQ0r4RCaiD2hjheqF9s1LVE8vVuJCXisjIP+JgtK/Do54ej4Sw==} + dependencies: + '@ethersproject/logger': 5.0.6 + + /@ethersproject/providers@5.7.2: + resolution: {integrity: sha512-g34EWZ1WWAVgr4aptGlVBF8mhl3VWjv+8hoAnzStu8Ah22VHBsuGzP17eb6xDVRzw895G4W7vvx60lFFur/1Rg==} + dependencies: + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/base64': 5.7.0 + '@ethersproject/basex': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/networks': 5.7.1 + '@ethersproject/properties': 5.7.0 + '@ethersproject/random': 5.7.0 + '@ethersproject/rlp': 5.7.0 + '@ethersproject/sha2': 5.7.0 + '@ethersproject/strings': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/web': 5.7.1 + bech32: 1.1.4 + ws: 7.4.6 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + + /@ethersproject/random@5.7.0: + resolution: {integrity: sha512-19WjScqRA8IIeWclFme75VMXSBvi4e6InrUNuaR4s5pTF2qNhcGdCUwdxUVGtDDqC00sDLCO93jPQoDUH4HVmQ==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + + /@ethersproject/rlp@5.7.0: + resolution: {integrity: sha512-rBxzX2vK8mVF7b0Tol44t5Tb8gomOHkj5guL+HhzQ1yBh/ydjGnpw6at+X6Iw0Kp3OzzzkcKp8N9r0W4kYSs9w==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + + /@ethersproject/sha2@5.7.0: + resolution: {integrity: sha512-gKlH42riwb3KYp0reLsFTokByAKoJdgFCwI+CCiX/k+Jm2mbNs6oOaCjYQSlI1+XBVejwH2KrmCbMAT/GnRDQw==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + hash.js: 1.1.7 + + /@ethersproject/signing-key@5.7.0: + resolution: {integrity: sha512-MZdy2nL3wO0u7gkB4nA/pEf8lu1TlFswPNmy8AiYkfKTdO6eXBJyUdmHO/ehm/htHw9K/qF8ujnTyUAD+Ry54Q==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + bn.js: 5.2.1 + elliptic: 6.5.4 + hash.js: 1.1.7 + + /@ethersproject/solidity@5.7.0: + resolution: {integrity: sha512-HmabMd2Dt/raavyaGukF4XxizWKhKQ24DoLtdNbBmNKUOPqwjsKQSdV9GQtj9CBEea9DlzETlVER1gYeXXBGaA==} + dependencies: + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/sha2': 5.7.0 + '@ethersproject/strings': 5.7.0 + + /@ethersproject/strings@5.7.0: + resolution: {integrity: sha512-/9nu+lj0YswRNSH0NXYqrh8775XNyEdUQAuf3f+SmOrnVewcJ5SBNAjF7lpgehKi4abvNNXyf+HX86czCdJ8Mg==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/logger': 5.0.6 + + /@ethersproject/transactions@5.7.0: + resolution: {integrity: sha512-kmcNicCp1lp8qanMTC3RIikGgoJ80ztTyvtsFvCYpSCfkjhD0jZ2LOrnbcuxuToLIUYYf+4XwD1rP+B/erDIhQ==} + dependencies: + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/rlp': 5.7.0 + '@ethersproject/signing-key': 5.7.0 + + /@ethersproject/units@5.7.0: + resolution: {integrity: sha512-pD3xLMy3SJu9kG5xDGI7+xhTEmGXlEqXU4OfNapmfnxLVY4EMSSRp7j1k7eezutBPH7RBN/7QPnwR7hzNlEFeg==} + dependencies: + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/logger': 5.0.6 + + /@ethersproject/wallet@5.7.0: + resolution: {integrity: sha512-MhmXlJXEJFBFVKrDLB4ZdDzxcBxQ3rLyCkhNqVu3CDYvR97E+8r01UgrI+TI99Le+aYm/in/0vp86guJuM7FCA==} + dependencies: + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/hdnode': 5.7.0 + '@ethersproject/json-wallets': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/random': 5.7.0 + '@ethersproject/signing-key': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/wordlists': 5.7.0 + + /@ethersproject/web@5.7.1: + resolution: {integrity: sha512-Gueu8lSvyjBWL4cYsWsjh6MtMwM0+H4HvqFPZfB6dV8ctbP9zFAO73VG1cMWae0FLPCtz0peKPpZY8/ugJJX2w==} + dependencies: + '@ethersproject/base64': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/strings': 5.7.0 + + /@ethersproject/wordlists@5.7.0: + resolution: {integrity: sha512-S2TFNJNfHWVHNE6cNDjbVlZ6MgE17MIxMbMg2zv3wn+3XSJGosL1m9ZVv3GXCf/2ymSsQ+hRI5IzoMJTG6aoVA==} + dependencies: + '@ethersproject/bytes': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/properties': 5.7.0 + '@ethersproject/strings': 5.7.0 + + /@humanwhocodes/config-array@0.11.13: + resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 2.0.1 + debug: 4.3.4(supports-color@8.1.1) + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + dev: true + + /@humanwhocodes/object-schema@2.0.1: + resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==} + dev: true + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + dev: true + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + dev: true + + /@metamask/eth-sig-util@4.0.1: + resolution: {integrity: sha512-tghyZKLHZjcdlDqCA3gNZmLeR0XvOE9U1qoQO9ohyAZT6Pya+H9vkBPcsyXytmYLNgVoin7CKCmweo/R43V+tQ==} + engines: {node: '>=12.0.0'} + dependencies: + ethereumjs-abi: 0.6.8 + ethereumjs-util: 6.2.1 + ethjs-util: 0.1.6 + tweetnacl: 1.0.3 + tweetnacl-util: 0.15.1 + dev: true + + /@noble/hashes@1.1.2: + resolution: {integrity: sha512-KYRCASVTv6aeUi1tsF8/vpyR7zpfs3FUzy2Jqm+MU+LmUKhQ0y2FpfwqkCcxSg2ua4GALJd8k2R76WxwZGbQpA==} + dev: true + + /@noble/secp256k1@1.6.3: + resolution: {integrity: sha512-T04e4iTurVy7I8Sw4+c5OSN9/RkPlo1uKxAomtxQNLq8j1uPAqnsqG1bqvY3Jv7c13gyr6dui0zmh/I3+f/JaQ==} + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.1.9 + dev: true + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + dev: true + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.6.0 + dev: true + + /@nomicfoundation/ethereumjs-block@5.0.2: + resolution: {integrity: sha512-hSe6CuHI4SsSiWWjHDIzWhSiAVpzMUcDRpWYzN0T9l8/Rz7xNn3elwVOJ/tAyS0LqL6vitUD78Uk7lQDXZun7Q==} + engines: {node: '>=14'} + dependencies: + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-trie': 6.0.2 + '@nomicfoundation/ethereumjs-tx': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + ethereum-cryptography: 0.1.3 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-blockchain@7.0.2: + resolution: {integrity: sha512-8UUsSXJs+MFfIIAKdh3cG16iNmWzWC/91P40sazNvrqhhdR/RtGDlFk2iFTGbBAZPs2+klZVzhRX8m2wvuvz3w==} + engines: {node: '>=14'} + dependencies: + '@nomicfoundation/ethereumjs-block': 5.0.2 + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-ethash': 3.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-trie': 6.0.2 + '@nomicfoundation/ethereumjs-tx': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + abstract-level: 1.0.3 + debug: 4.3.4(supports-color@8.1.1) + ethereum-cryptography: 0.1.3 + level: 8.0.0 + lru-cache: 5.1.1 + memory-level: 1.0.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-common@4.0.2: + resolution: {integrity: sha512-I2WGP3HMGsOoycSdOTSqIaES0ughQTueOsddJ36aYVpI3SN8YSusgRFLwzDJwRFVIYDKx/iJz0sQ5kBHVgdDwg==} + dependencies: + '@nomicfoundation/ethereumjs-util': 9.0.2 + crc-32: 1.2.2 + dev: true + + /@nomicfoundation/ethereumjs-ethash@3.0.2: + resolution: {integrity: sha512-8PfoOQCcIcO9Pylq0Buijuq/O73tmMVURK0OqdjhwqcGHYC2PwhbajDh7GZ55ekB0Px197ajK3PQhpKoiI/UPg==} + engines: {node: '>=14'} + dependencies: + '@nomicfoundation/ethereumjs-block': 5.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + abstract-level: 1.0.3 + bigint-crypto-utils: 3.1.8 + ethereum-cryptography: 0.1.3 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-evm@2.0.2: + resolution: {integrity: sha512-rBLcUaUfANJxyOx9HIdMX6uXGin6lANCulIm/pjMgRqfiCRMZie3WKYxTSd8ZE/d+qT+zTedBF4+VHTdTSePmQ==} + engines: {node: '>=14'} + dependencies: + '@ethersproject/providers': 5.7.2 + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-tx': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + debug: 4.3.4(supports-color@8.1.1) + ethereum-cryptography: 0.1.3 + mcl-wasm: 0.7.9 + rustbn.js: 0.2.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-rlp@5.0.2: + resolution: {integrity: sha512-QwmemBc+MMsHJ1P1QvPl8R8p2aPvvVcKBbvHnQOKBpBztEo0omN0eaob6FeZS/e3y9NSe+mfu3nNFBHszqkjTA==} + engines: {node: '>=14'} + hasBin: true + dev: true + + /@nomicfoundation/ethereumjs-statemanager@2.0.2: + resolution: {integrity: sha512-dlKy5dIXLuDubx8Z74sipciZnJTRSV/uHG48RSijhgm1V7eXYFC567xgKtsKiVZB1ViTP9iFL4B6Je0xD6X2OA==} + dependencies: + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + debug: 4.3.4(supports-color@8.1.1) + ethereum-cryptography: 0.1.3 + ethers: 5.7.2 + js-sdsl: 4.4.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-trie@6.0.2: + resolution: {integrity: sha512-yw8vg9hBeLYk4YNg5MrSJ5H55TLOv2FSWUTROtDtTMMmDGROsAu+0tBjiNGTnKRi400M6cEzoFfa89Fc5k8NTQ==} + engines: {node: '>=14'} + dependencies: + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + '@types/readable-stream': 2.3.15 + ethereum-cryptography: 0.1.3 + readable-stream: 3.6.0 + dev: true + + /@nomicfoundation/ethereumjs-tx@5.0.2: + resolution: {integrity: sha512-T+l4/MmTp7VhJeNloMkM+lPU3YMUaXdcXgTGCf8+ZFvV9NYZTRLFekRwlG6/JMmVfIfbrW+dRRJ9A6H5Q/Z64g==} + engines: {node: '>=14'} + dependencies: + '@chainsafe/ssz': 0.9.4 + '@ethersproject/providers': 5.7.2 + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + ethereum-cryptography: 0.1.3 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + dev: true + + /@nomicfoundation/ethereumjs-util@9.0.2: + resolution: {integrity: sha512-4Wu9D3LykbSBWZo8nJCnzVIYGvGCuyiYLIJa9XXNVt1q1jUzHdB+sJvx95VGCpPkCT+IbLecW6yfzy3E1bQrwQ==} + engines: {node: '>=14'} + dependencies: + '@chainsafe/ssz': 0.10.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + ethereum-cryptography: 0.1.3 + dev: true + + /@nomicfoundation/ethereumjs-vm@7.0.2: + resolution: {integrity: sha512-Bj3KZT64j54Tcwr7Qm/0jkeZXJMfdcAtRBedou+Hx0dPOSIgqaIr0vvLwP65TpHbak2DmAq+KJbW2KNtIoFwvA==} + engines: {node: '>=14'} + dependencies: + '@nomicfoundation/ethereumjs-block': 5.0.2 + '@nomicfoundation/ethereumjs-blockchain': 7.0.2 + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-evm': 2.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-statemanager': 2.0.2 + '@nomicfoundation/ethereumjs-trie': 6.0.2 + '@nomicfoundation/ethereumjs-tx': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + debug: 4.3.4(supports-color@8.1.1) + ethereum-cryptography: 0.1.3 + mcl-wasm: 0.7.9 + rustbn.js: 0.2.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@nomicfoundation/hardhat-network-helpers@1.0.10(hardhat@2.19.2): + resolution: {integrity: sha512-R35/BMBlx7tWN5V6d/8/19QCwEmIdbnA4ZrsuXgvs8i2qFx5i7h6mH5pBS4Pwi4WigLH+upl6faYusrNPuzMrQ==} + peerDependencies: + hardhat: ^2.9.5 + dependencies: + ethereumjs-util: 7.1.5 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + dev: true + + /@nomicfoundation/solidity-analyzer-darwin-arm64@0.1.0: + resolution: {integrity: sha512-vEF3yKuuzfMHsZecHQcnkUrqm8mnTWfJeEVFHpg+cO+le96xQA4lAJYdUan8pXZohQxv1fSReQsn4QGNuBNuCw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-darwin-x64@0.1.0: + resolution: {integrity: sha512-dlHeIg0pTL4dB1l9JDwbi/JG6dHQaU1xpDK+ugYO8eJ1kxx9Dh2isEUtA4d02cQAl22cjOHTvifAk96A+ItEHA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-freebsd-x64@0.1.0: + resolution: {integrity: sha512-WFCZYMv86WowDA4GiJKnebMQRt3kCcFqHeIomW6NMyqiKqhK1kIZCxSLDYsxqlx396kKLPN1713Q1S8tu68GKg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-linux-arm64-gnu@0.1.0: + resolution: {integrity: sha512-DTw6MNQWWlCgc71Pq7CEhEqkb7fZnS7oly13pujs4cMH1sR0JzNk90Mp1zpSCsCs4oKan2ClhMlLKtNat/XRKQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-linux-arm64-musl@0.1.0: + resolution: {integrity: sha512-wUpUnR/3GV5Da88MhrxXh/lhb9kxh9V3Jya2NpBEhKDIRCDmtXMSqPMXHZmOR9DfCwCvG6vLFPr/+YrPCnUN0w==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-linux-x64-gnu@0.1.0: + resolution: {integrity: sha512-lR0AxK1x/MeKQ/3Pt923kPvwigmGX3OxeU5qNtQ9pj9iucgk4PzhbS3ruUeSpYhUxG50jN4RkIGwUMoev5lguw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-linux-x64-musl@0.1.0: + resolution: {integrity: sha512-A1he/8gy/JeBD3FKvmI6WUJrGrI5uWJNr5Xb9WdV+DK0F8msuOqpEByLlnTdLkXMwW7nSl3awvLezOs9xBHJEg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-win32-arm64-msvc@0.1.0: + resolution: {integrity: sha512-7x5SXZ9R9H4SluJZZP8XPN+ju7Mx+XeUMWZw7ZAqkdhP5mK19I4vz3x0zIWygmfE8RT7uQ5xMap0/9NPsO+ykw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-win32-ia32-msvc@0.1.0: + resolution: {integrity: sha512-m7w3xf+hnE774YRXu+2mGV7RiF3QJtUoiYU61FascCkQhX3QMQavh7saH/vzb2jN5D24nT/jwvaHYX/MAM9zUw==} + engines: {node: '>= 10'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer-win32-x64-msvc@0.1.0: + resolution: {integrity: sha512-xCuybjY0sLJQnJhupiFAXaek2EqF0AP0eBjgzaalPXSNvCEN6ZYHvUzdA50ENDVeSYFXcUsYf3+FsD3XKaeptA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@nomicfoundation/solidity-analyzer@0.1.0: + resolution: {integrity: sha512-xGWAiVCGOycvGiP/qrlf9f9eOn7fpNbyJygcB0P21a1MDuVPlKt0Srp7rvtBEutYQ48ouYnRXm33zlRnlTOPHg==} + engines: {node: '>= 12'} + optionalDependencies: + '@nomicfoundation/solidity-analyzer-darwin-arm64': 0.1.0 + '@nomicfoundation/solidity-analyzer-darwin-x64': 0.1.0 + '@nomicfoundation/solidity-analyzer-freebsd-x64': 0.1.0 + '@nomicfoundation/solidity-analyzer-linux-arm64-gnu': 0.1.0 + '@nomicfoundation/solidity-analyzer-linux-arm64-musl': 0.1.0 + '@nomicfoundation/solidity-analyzer-linux-x64-gnu': 0.1.0 + '@nomicfoundation/solidity-analyzer-linux-x64-musl': 0.1.0 + '@nomicfoundation/solidity-analyzer-win32-arm64-msvc': 0.1.0 + '@nomicfoundation/solidity-analyzer-win32-ia32-msvc': 0.1.0 + '@nomicfoundation/solidity-analyzer-win32-x64-msvc': 0.1.0 + dev: true + + /@nomiclabs/hardhat-ethers@2.2.3(ethers@5.7.2)(hardhat@2.19.2): + resolution: {integrity: sha512-YhzPdzb612X591FOe68q+qXVXGG2ANZRvDo0RRUtimev85rCrAlv/TLMEZw5c+kq9AbzocLTVX/h2jVIFPL9Xg==} + peerDependencies: + ethers: ^5.0.0 + hardhat: ^2.0.0 + dependencies: + ethers: 5.7.2 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + dev: true + + /@nomiclabs/hardhat-etherscan@3.1.8(hardhat@2.19.2): + resolution: {integrity: sha512-v5F6IzQhrsjHh6kQz4uNrym49brK9K5bYCq2zQZ729RYRaifI9hHbtmK+KkIVevfhut7huQFEQ77JLRMAzWYjQ==} + deprecated: The @nomiclabs/hardhat-etherscan package is deprecated, please use @nomicfoundation/hardhat-verify instead + peerDependencies: + hardhat: ^2.0.4 + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/address': 5.7.0 + cbor: 8.1.0 + chalk: 2.4.2 + debug: 4.3.4(supports-color@8.1.1) + fs-extra: 7.0.1 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + lodash: 4.17.21 + semver: 6.3.0 + table: 6.8.1 + undici: 5.19.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@nomiclabs/hardhat-waffle@2.0.6(@nomiclabs/hardhat-ethers@2.2.3)(@types/sinon-chai@3.2.8)(ethereum-waffle@3.4.4)(ethers@5.7.2)(hardhat@2.19.2): + resolution: {integrity: sha512-+Wz0hwmJGSI17B+BhU/qFRZ1l6/xMW82QGXE/Gi+WTmwgJrQefuBs1lIf7hzQ1hLk6hpkvb/zwcNkpVKRYTQYg==} + peerDependencies: + '@nomiclabs/hardhat-ethers': ^2.0.0 + '@types/sinon-chai': ^3.2.3 + ethereum-waffle: '*' + ethers: ^5.0.0 + hardhat: ^2.0.0 + dependencies: + '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.19.2) + '@types/sinon-chai': 3.2.8 + ethereum-waffle: 3.4.4(typescript@5.3.3) + ethers: 5.7.2 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + dev: true + + /@openzeppelin/contract-loader@0.6.3: + resolution: {integrity: sha512-cOFIjBjwbGgZhDZsitNgJl0Ye1rd5yu/Yx5LMgeq3u0ZYzldm4uObzHDFq4gjDdoypvyORjjJa3BlFA7eAnVIg==} + dependencies: + find-up: 4.1.0 + fs-extra: 8.1.0 + dev: true + + /@openzeppelin/contracts-upgradeable@4.9.3: + resolution: {integrity: sha512-jjaHAVRMrE4UuZNfDwjlLGDxTHWIOwTJS2ldnc278a0gevfXfPr8hxKEVBGFBE96kl2G3VHDZhUimw/+G3TG2A==} + dev: false + + /@openzeppelin/contracts@4.9.3: + resolution: {integrity: sha512-He3LieZ1pP2TNt5JbkPA4PNT9WC3gOTOlDcFGJW4Le4QKqwmiNJCRt44APfxMxvq7OugU/cqYuPcSBzOw38DAg==} + dev: false + + /@openzeppelin/defender-base-client@1.52.0(debug@4.3.4): + resolution: {integrity: sha512-VFNu/pjVpAnFKIfuKT1cn9dRpbcO8FO8EAmVZ2XrrAsKXEWDZ3TNBtACxmj7fAu0ad/TzRkb66o5rMts7Fv7jw==} + dependencies: + amazon-cognito-identity-js: 6.3.7 + async-retry: 1.3.3 + axios: 1.6.2(debug@4.3.4) + lodash: 4.17.21 + node-fetch: 2.6.7 + transitivePeerDependencies: + - debug + - encoding + dev: true + + /@openzeppelin/hardhat-upgrades@1.28.0(@nomiclabs/hardhat-ethers@2.2.3)(@nomiclabs/hardhat-etherscan@3.1.8)(ethers@5.7.2)(hardhat@2.19.2): + resolution: {integrity: sha512-7sb/Jf+X+uIufOBnmHR0FJVWuxEs2lpxjJnLNN6eCJCP8nD0v+Ot5lTOW2Qb/GFnh+fLvJtEkhkowz4ZQ57+zQ==} + hasBin: true + peerDependencies: + '@nomiclabs/hardhat-ethers': ^2.0.0 + '@nomiclabs/hardhat-etherscan': ^3.1.0 + '@nomiclabs/harhdat-etherscan': '*' + ethers: ^5.0.5 + hardhat: ^2.0.2 + peerDependenciesMeta: + '@nomiclabs/harhdat-etherscan': + optional: true + dependencies: + '@nomiclabs/hardhat-ethers': 2.2.3(ethers@5.7.2)(hardhat@2.19.2) + '@nomiclabs/hardhat-etherscan': 3.1.8(hardhat@2.19.2) + '@openzeppelin/defender-base-client': 1.52.0(debug@4.3.4) + '@openzeppelin/platform-deploy-client': 0.8.0(debug@4.3.4) + '@openzeppelin/upgrades-core': 1.31.3 + chalk: 4.1.2 + debug: 4.3.4(supports-color@8.1.1) + ethers: 5.7.2 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + proper-lockfile: 4.1.2 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /@openzeppelin/platform-deploy-client@0.8.0(debug@4.3.4): + resolution: {integrity: sha512-POx3AsnKwKSV/ZLOU/gheksj0Lq7Is1q2F3pKmcFjGZiibf+4kjGxr4eSMrT+2qgKYZQH1ZLQZ+SkbguD8fTvA==} + deprecated: '@openzeppelin/platform-deploy-client is deprecated. Please use @openzeppelin/defender-sdk-deploy-client' + dependencies: + '@ethersproject/abi': 5.7.0 + '@openzeppelin/defender-base-client': 1.52.0(debug@4.3.4) + axios: 0.21.4(debug@4.3.4) + lodash: 4.17.21 + node-fetch: 2.6.7 + transitivePeerDependencies: + - debug + - encoding + dev: true + + /@openzeppelin/test-helpers@0.5.16(bn.js@4.12.0): + resolution: {integrity: sha512-T1EvspSfH1qQO/sgGlskLfYVBbqzJR23SZzYl/6B2JnT4EhThcI85UpvDk0BkLWKaDScQTabGHt4GzHW+3SfZg==} + dependencies: + '@openzeppelin/contract-loader': 0.6.3 + '@truffle/contract': 4.6.2 + ansi-colors: 3.2.4 + chai: 4.3.10 + chai-bn: 0.2.2(bn.js@4.12.0)(chai@4.3.10) + ethjs-abi: 0.2.1 + lodash.flatten: 4.4.0 + semver: 5.7.1 + web3: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - bn.js + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /@openzeppelin/upgrades-core@1.31.3: + resolution: {integrity: sha512-i7q0IuItKS4uO0clJwm4CARmt98aA9dLfKh38HFRbX+aFLGXwF0sOvB2iwr6f87ShH7d3DNuLrVgnnXUrYb7CA==} + hasBin: true + dependencies: + cbor: 9.0.1 + chalk: 4.1.2 + compare-versions: 6.1.0 + debug: 4.3.4(supports-color@8.1.1) + ethereumjs-util: 7.1.5 + minimist: 1.2.8 + proper-lockfile: 4.1.2 + solidity-ast: 0.4.55 + transitivePeerDependencies: + - supports-color + dev: true + + /@pkgr/core@0.1.1: + resolution: {integrity: sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==} + engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} + dev: true + + /@pnpm/config.env-replace@1.1.0: + resolution: {integrity: sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==} + engines: {node: '>=12.22.0'} + dev: true + + /@pnpm/network.ca-file@1.0.2: + resolution: {integrity: sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==} + engines: {node: '>=12.22.0'} + dependencies: + graceful-fs: 4.2.10 + dev: true + + /@pnpm/npm-conf@2.2.2: + resolution: {integrity: sha512-UA91GwWPhFExt3IizW6bOeY/pQ0BkuNwKjk9iQW9KqxluGCrg4VenZ0/L+2Y0+ZOtme72EVvg6v0zo3AMQRCeA==} + engines: {node: '>=12'} + dependencies: + '@pnpm/config.env-replace': 1.1.0 + '@pnpm/network.ca-file': 1.0.2 + config-chain: 1.1.13 + dev: true + + /@prettier/sync@0.3.0(prettier@3.2.5): + resolution: {integrity: sha512-3dcmCyAxIcxy036h1I7MQU/uEEBq8oLwf1CE3xeze+MPlgkdlb/+w6rGR/1dhp6Hqi17fRS6nvwnOzkESxEkOw==} + peerDependencies: + prettier: ^3.0.0 + dependencies: + prettier: 3.2.5 + dev: true + + /@resolver-engine/core@0.3.3: + resolution: {integrity: sha512-eB8nEbKDJJBi5p5SrvrvILn4a0h42bKtbCTri3ZxCGt6UvoQyp7HnGOfki944bUjBSHKK3RvgfViHn+kqdXtnQ==} + dependencies: + debug: 3.2.7 + is-url: 1.2.4 + request: 2.88.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@resolver-engine/fs@0.3.3: + resolution: {integrity: sha512-wQ9RhPUcny02Wm0IuJwYMyAG8fXVeKdmhm8xizNByD4ryZlx6PP6kRen+t/haF43cMfmaV7T3Cx6ChOdHEhFUQ==} + dependencies: + '@resolver-engine/core': 0.3.3 + debug: 3.2.7 + transitivePeerDependencies: + - supports-color + dev: true + + /@resolver-engine/imports-fs@0.3.3: + resolution: {integrity: sha512-7Pjg/ZAZtxpeyCFlZR5zqYkz+Wdo84ugB5LApwriT8XFeQoLwGUj4tZFFvvCuxaNCcqZzCYbonJgmGObYBzyCA==} + dependencies: + '@resolver-engine/fs': 0.3.3 + '@resolver-engine/imports': 0.3.3 + debug: 3.2.7 + transitivePeerDependencies: + - supports-color + dev: true + + /@resolver-engine/imports@0.3.3: + resolution: {integrity: sha512-anHpS4wN4sRMwsAbMXhMfOD/y4a4Oo0Cw/5+rue7hSwGWsDOQaAU1ClK1OxjUC35/peazxEl8JaSRRS+Xb8t3Q==} + dependencies: + '@resolver-engine/core': 0.3.3 + debug: 3.2.7 + hosted-git-info: 2.8.9 + path-browserify: 1.0.1 + url: 0.11.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@scroll-tech/contracts@0.1.0: + resolution: {integrity: sha512-aBbDOc3WB/WveZdpJYcrfvMYMz7ZTEiW8M9XMJLba8p9FAR5KGYB/cV+8+EUsq3MKt7C1BfR+WnXoTVdvwIY6w==} + dev: false + + /@scure/base@1.1.1: + resolution: {integrity: sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==} + dev: true + + /@scure/bip32@1.1.0: + resolution: {integrity: sha512-ftTW3kKX54YXLCxH6BB7oEEoJfoE2pIgw7MINKAs5PsS6nqKPuKk1haTF/EuHmYqG330t5GSrdmtRuHaY1a62Q==} + dependencies: + '@noble/hashes': 1.1.2 + '@noble/secp256k1': 1.6.3 + '@scure/base': 1.1.1 + dev: true + + /@scure/bip39@1.1.0: + resolution: {integrity: sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==} + dependencies: + '@noble/hashes': 1.1.2 + '@scure/base': 1.1.1 + dev: true + + /@sentry/core@5.30.0: + resolution: {integrity: sha512-TmfrII8w1PQZSZgPpUESqjB+jC6MvZJZdLtE/0hZ+SrnKhW3x5WlYLvTXZpcWePYBku7rl2wn1RZu6uT0qCTeg==} + engines: {node: '>=6'} + dependencies: + '@sentry/hub': 5.30.0 + '@sentry/minimal': 5.30.0 + '@sentry/types': 5.30.0 + '@sentry/utils': 5.30.0 + tslib: 1.14.1 + dev: true + + /@sentry/hub@5.30.0: + resolution: {integrity: sha512-2tYrGnzb1gKz2EkMDQcfLrDTvmGcQPuWxLnJKXJvYTQDGLlEvi2tWz1VIHjunmOvJrB5aIQLhm+dcMRwFZDCqQ==} + engines: {node: '>=6'} + dependencies: + '@sentry/types': 5.30.0 + '@sentry/utils': 5.30.0 + tslib: 1.14.1 + dev: true + + /@sentry/minimal@5.30.0: + resolution: {integrity: sha512-BwWb/owZKtkDX+Sc4zCSTNcvZUq7YcH3uAVlmh/gtR9rmUvbzAA3ewLuB3myi4wWRAMEtny6+J/FN/x+2wn9Xw==} + engines: {node: '>=6'} + dependencies: + '@sentry/hub': 5.30.0 + '@sentry/types': 5.30.0 + tslib: 1.14.1 + dev: true + + /@sentry/node@5.30.0: + resolution: {integrity: sha512-Br5oyVBF0fZo6ZS9bxbJZG4ApAjRqAnqFFurMVJJdunNb80brh7a5Qva2kjhm+U6r9NJAB5OmDyPkA1Qnt+QVg==} + engines: {node: '>=6'} + dependencies: + '@sentry/core': 5.30.0 + '@sentry/hub': 5.30.0 + '@sentry/tracing': 5.30.0 + '@sentry/types': 5.30.0 + '@sentry/utils': 5.30.0 + cookie: 0.4.2 + https-proxy-agent: 5.0.1 + lru_map: 0.3.3 + tslib: 1.14.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@sentry/tracing@5.30.0: + resolution: {integrity: sha512-dUFowCr0AIMwiLD7Fs314Mdzcug+gBVo/+NCMyDw8tFxJkwWAKl7Qa2OZxLQ0ZHjakcj1hNKfCQJ9rhyfOl4Aw==} + engines: {node: '>=6'} + dependencies: + '@sentry/hub': 5.30.0 + '@sentry/minimal': 5.30.0 + '@sentry/types': 5.30.0 + '@sentry/utils': 5.30.0 + tslib: 1.14.1 + dev: true + + /@sentry/types@5.30.0: + resolution: {integrity: sha512-R8xOqlSTZ+htqrfteCWU5Nk0CDN5ApUTvrlvBuiH1DyP6czDZ4ktbZB0hAgBlVcK0U+qpD3ag3Tqqpa5Q67rPw==} + engines: {node: '>=6'} + dev: true + + /@sentry/utils@5.30.0: + resolution: {integrity: sha512-zaYmoH0NWWtvnJjC9/CBseXMtKHm/tm40sz3YfJRxeQjyzRqNQPgivpd9R/oDJCYj999mzdW382p/qi2ypjLww==} + engines: {node: '>=6'} + dependencies: + '@sentry/types': 5.30.0 + tslib: 1.14.1 + dev: true + + /@sindresorhus/is@0.14.0: + resolution: {integrity: sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==} + engines: {node: '>=6'} + dev: true + + /@sindresorhus/is@4.6.0: + resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==} + engines: {node: '>=10'} + dev: true + + /@smithy/types@2.7.0: + resolution: {integrity: sha512-1OIFyhK+vOkMbu4aN2HZz/MomREkrAC/HqY5mlJMUJfGrPRwijJDTeiN8Rnj9zUaB8ogXAfIOtZrrgqZ4w7Wnw==} + engines: {node: '>=14.0.0'} + dependencies: + tslib: 2.6.2 + dev: true + + /@solidity-parser/parser@0.14.3: + resolution: {integrity: sha512-29g2SZ29HtsqA58pLCtopI1P/cPy5/UAzlcAXO6T/CNJimG6yA8kx4NaseMyJULiC+TEs02Y9/yeHzClqoA0hw==} + dependencies: + antlr4ts: 0.5.0-alpha.4 + dev: true + + /@solidity-parser/parser@0.16.0: + resolution: {integrity: sha512-ESipEcHyRHg4Np4SqBCfcXwyxxna1DgFVz69bgpLV8vzl/NP1DtcKsJ4dJZXWQhY/Z4J2LeKBiOkOVZn9ct33Q==} + dependencies: + antlr4ts: 0.5.0-alpha.4 + dev: true + + /@solidity-parser/parser@0.16.2: + resolution: {integrity: sha512-PI9NfoA3P8XK2VBkK5oIfRgKDsicwDZfkVq9ZTBCQYGOP1N2owgY2dyLGyU5/J/hQs8KRk55kdmvTLjy3Mu3vg==} + dependencies: + antlr4ts: 0.5.0-alpha.4 + dev: true + + /@solidity-parser/parser@0.17.0: + resolution: {integrity: sha512-Nko8R0/kUo391jsEHHxrGM07QFdnPGvlmox4rmH0kNiNAashItAilhy4Mv4pK5gQmW5f4sXAF58fwJbmlkGcVw==} + dev: true + + /@szmarczak/http-timer@1.1.2: + resolution: {integrity: sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==} + engines: {node: '>=6'} + dependencies: + defer-to-connect: 1.1.1 + dev: true + + /@szmarczak/http-timer@5.0.1: + resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} + engines: {node: '>=14.16'} + dependencies: + defer-to-connect: 2.0.1 + dev: true + + /@truffle/abi-utils@0.3.2: + resolution: {integrity: sha512-32queMD64YKL/tmQgSV4Xs073dIaZ9tp7NP1icjwvFSA3Q9yeu7ApYbSbYMsx9H9zWkkVOsfcoJ2kJEieOCzsA==} + dependencies: + change-case: 3.0.2 + fast-check: 3.1.1 + web3-utils: 1.7.4 + dev: true + + /@truffle/blockchain-utils@0.1.4: + resolution: {integrity: sha512-HegAo5A8UX9vE8dtceBRgCY207gOb9wj54c8mNOOWHcFpkyJz7kZYGo44As6Imh10/0hD2j7vHQ56Jf+uszJ3A==} + dev: true + + /@truffle/codec@0.14.5: + resolution: {integrity: sha512-3FCpTJe6o7LGWUfrSdguMpdpH1PTn3u7bIfbj6Cfdzym2OAVSgxTgdlqC1poepbk0xcOVcUW+EsqNwLMqmBiPA==} + dependencies: + '@truffle/abi-utils': 0.3.2 + '@truffle/compile-common': 0.8.1 + big.js: 6.2.1 + bn.js: 5.2.1 + cbor: 5.2.0 + debug: 4.3.4(supports-color@8.1.1) + lodash: 4.17.21 + semver: 7.3.7 + utf8: 3.0.0 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /@truffle/compile-common@0.8.1: + resolution: {integrity: sha512-7mzzG9Cfrn+fDT5Sqi7B6pccvIIV5w/GM8/56YgnjysbDzy5aZ6mv0fe37ZbcznEVQ35NJjBy+lEr/ozOGXwQA==} + dependencies: + '@truffle/error': 0.1.1 + colors: 1.4.0 + dev: true + + /@truffle/contract-schema@3.4.10: + resolution: {integrity: sha512-BhRNRoRvlj2th6E5RNS0BnS0ZxQe01JJz8I7MjkGqdeXSvrn6qDCAnbmvhNgUv0l5h8w5+gBOQhAJhILf1shdQ==} + dependencies: + ajv: 6.12.6 + debug: 4.3.4(supports-color@8.1.1) + transitivePeerDependencies: + - supports-color + dev: true + + /@truffle/contract@4.6.2: + resolution: {integrity: sha512-OZZIDmKtHgZS2Q6sCczNe8OfTuMWpRaAo3vwY49LGGs0VXLiwc7nIcCFh+bMg14IRK6vBN4pWE9W9eWSBFy31Q==} + dependencies: + '@ensdomains/ensjs': 2.1.0 + '@truffle/blockchain-utils': 0.1.4 + '@truffle/contract-schema': 3.4.10 + '@truffle/debug-utils': 6.0.35 + '@truffle/error': 0.1.1 + '@truffle/interface-adapter': 0.5.22 + bignumber.js: 7.2.1 + debug: 4.3.4(supports-color@8.1.1) + ethers: 4.0.49 + web3: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-promievent: 1.7.4 + web3-eth-abi: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@truffle/debug-utils@6.0.35: + resolution: {integrity: sha512-GuLsc+GFEYiUM683GWh4/ol3jkBts5a601detVWu1Xo5/bSL5gxooOjgOTovjA8dimCjkyi/DnK2yHHC+q+g0g==} + dependencies: + '@truffle/codec': 0.14.5 + '@trufflesuite/chromafi': 3.0.0 + bn.js: 5.2.1 + chalk: 2.4.2 + debug: 4.3.4(supports-color@8.1.1) + highlightjs-solidity: 2.0.5 + transitivePeerDependencies: + - supports-color + dev: true + + /@truffle/error@0.1.1: + resolution: {integrity: sha512-sE7c9IHIGdbK4YayH4BC8i8qMjoAOeg6nUXUDZZp8wlU21/EMpaG+CLx+KqcIPyR+GSWIW3Dm0PXkr2nlggFDA==} + dev: true + + /@truffle/interface-adapter@0.5.22: + resolution: {integrity: sha512-Bgl5Afb1mPVNedI8CJzZQzVIdrZWSXISTBrXPZmppD4Q+6V1RUzlLxiaGGB4gYHOA+U0pBzD8MCcSycPAD9RsA==} + dependencies: + bn.js: 5.2.1 + ethers: 4.0.49 + web3: 1.7.4 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /@trufflesuite/chromafi@3.0.0: + resolution: {integrity: sha512-oqWcOqn8nT1bwlPPfidfzS55vqcIDdpfzo3HbU9EnUmcSTX+I8z0UyUFI3tZQjByVJulbzxHxUGS3ZJPwK/GPQ==} + dependencies: + camelcase: 4.1.0 + chalk: 2.4.2 + cheerio: 1.0.0-rc.12 + detect-indent: 5.0.0 + highlight.js: 10.7.3 + lodash.merge: 4.6.2 + strip-ansi: 4.0.0 + strip-indent: 2.0.0 + dev: true + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + dev: true + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + dev: true + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + dev: true + + /@tsconfig/node16@1.0.3: + resolution: {integrity: sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==} + dev: true + + /@typechain/ethers-v5@2.0.0(ethers@5.7.2)(typechain@3.0.0): + resolution: {integrity: sha512-0xdCkyGOzdqh4h5JSf+zoWx85IusEjDcPIwNEHP8mrWSnCae4rvrqB+/gtpdNfX7zjlFlZiMeePn2r63EI3Lrw==} + peerDependencies: + ethers: ^5.0.0 + typechain: ^3.0.0 + dependencies: + ethers: 5.7.2 + typechain: 3.0.0(typescript@5.3.3) + dev: true + + /@typechain/ethers-v5@7.2.0(@ethersproject/abi@5.7.0)(@ethersproject/bytes@5.7.0)(@ethersproject/providers@5.7.2)(ethers@5.7.2)(typechain@8.3.2)(typescript@5.3.3): + resolution: {integrity: sha512-jfcmlTvaaJjng63QsT49MT6R1HFhtO/TBMWbyzPFSzMmVIqb2tL6prnKBs4ZJrSvmgIXWy+ttSjpaxCTq8D/Tw==} + peerDependencies: + '@ethersproject/abi': ^5.0.0 + '@ethersproject/bytes': ^5.0.0 + '@ethersproject/providers': ^5.0.0 + ethers: ^5.1.3 + typechain: ^5.0.0 + typescript: '>=4.0.0' + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/providers': 5.7.2 + ethers: 5.7.2 + lodash: 4.17.21 + ts-essentials: 7.0.3(typescript@5.3.3) + typechain: 8.3.2(typescript@5.3.3) + typescript: 5.3.3 + dev: true + + /@typechain/hardhat@7.0.0(@ethersproject/abi@5.7.0)(@ethersproject/providers@5.7.2)(@typechain/ethers-v5@7.2.0)(ethers@5.7.2)(hardhat@2.19.2)(typechain@8.3.2): + resolution: {integrity: sha512-XB79i5ewg9Met7gMVGfgVkmypicbnI25T5clJBEooMoW2161p4zvKFpoS2O+lBppQyMrPIZkdvl2M3LMDayVcA==} + peerDependencies: + '@ethersproject/abi': ^5.4.7 + '@ethersproject/providers': ^5.4.7 + '@typechain/ethers-v5': ^11.0.0 + ethers: ^5.4.7 + hardhat: ^2.9.9 + typechain: ^8.2.0 + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/providers': 5.7.2 + '@typechain/ethers-v5': 7.2.0(@ethersproject/abi@5.7.0)(@ethersproject/bytes@5.7.0)(@ethersproject/providers@5.7.2)(ethers@5.7.2)(typechain@8.3.2)(typescript@5.3.3) + ethers: 5.7.2 + fs-extra: 9.1.0 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + typechain: 8.3.2(typescript@5.3.3) + dev: true + + /@types/bn.js@4.11.6: + resolution: {integrity: sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/bn.js@5.1.1: + resolution: {integrity: sha512-qNrYbZqMx0uJAfKnKclPh+dTwK33KfLHYqtyODwd5HnXOjnkhc4qgn3BrK6RWyGZm5+sIFE7Q7Vz6QQtJB7w7g==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/cacheable-request@6.0.2: + resolution: {integrity: sha512-B3xVo+dlKM6nnKTcmm5ZtY/OL8bOAOd2Olee9M1zft65ox50OzjEHW91sDiU9j6cvW8Ejg1/Qkf4xd2kugApUA==} + dependencies: + '@types/http-cache-semantics': 4.0.1 + '@types/keyv': 3.1.4 + '@types/node': 16.18.80 + '@types/responselike': 1.0.0 + dev: true + + /@types/cbor@5.0.1: + resolution: {integrity: sha512-zVqJy2KzusZPLOgyGJDnOIbu3DxIGGqxYbEwtEEe4Z+la8jwIhOyb+GMrlHafs5tvKruwf8f8qOYP6zTvse/pw==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/chai@4.3.11: + resolution: {integrity: sha512-qQR1dr2rGIHYlJulmr8Ioq3De0Le9E4MJ5AiaeAETJJpndT1uUNHsGFK3L/UIu+rbkQSdj8J/w2bCsBZc/Y5fQ==} + dev: true + + /@types/concat-stream@1.6.1: + resolution: {integrity: sha512-eHE4cQPoj6ngxBZMvVf6Hw7Mh4jMW4U9lpGmS5GBPB9RYxlFg+CHaVN7ErNY4W9XfLIEn20b4VDYaIrbq0q4uA==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/debug@4.1.12: + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + dependencies: + '@types/ms': 0.7.31 + dev: true + + /@types/deep-equal-in-any-order@1.0.3: + resolution: {integrity: sha512-jT0O3hAILDKeKbdWJ9FZLD0Xdfhz7hMvfyFlRWpirjiEVr8G+GZ4kVIzPIqM6x6Rpp93TNPgOAed4XmvcuV6Qg==} + dev: true + + /@types/events@3.0.0: + resolution: {integrity: sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==} + dev: true + + /@types/form-data@0.0.33: + resolution: {integrity: sha512-8BSvG1kGm83cyJITQMZSulnl6QV8jqAGreJsc5tPu1Jq0vTSOiY/k24Wx82JRpWwZSqrala6sd5rWi6aNXvqcw==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/glob@7.1.1: + resolution: {integrity: sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==} + dependencies: + '@types/events': 3.0.0 + '@types/minimatch': 3.0.3 + '@types/node': 16.18.80 + dev: true + + /@types/http-cache-semantics@4.0.1: + resolution: {integrity: sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ==} + dev: true + + /@types/json-schema@7.0.13: + resolution: {integrity: sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ==} + dev: true + + /@types/keyv@3.1.4: + resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/lru-cache@5.1.1: + resolution: {integrity: sha512-ssE3Vlrys7sdIzs5LOxCzTVMsU7i9oa/IaW92wF32JFb3CVczqOkru2xspuKczHEbG3nvmPY7IFqVmGGHdNbYw==} + dev: true + + /@types/minimatch@3.0.3: + resolution: {integrity: sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==} + dev: true + + /@types/mkdirp@0.5.2: + resolution: {integrity: sha512-U5icWpv7YnZYGsN4/cmh3WD2onMY0aJIiTE6+51TwJCttdHvtCYmkBNOobHlXwrJRL0nkH9jH4kD+1FAdMN4Tg==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/mocha@10.0.6: + resolution: {integrity: sha512-dJvrYWxP/UcXm36Qn36fxhUKu8A/xMRXVT2cliFF1Z7UA9liG5Psj3ezNSZw+5puH2czDXRLcXQxf8JbJt0ejg==} + dev: true + + /@types/ms@0.7.31: + resolution: {integrity: sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==} + dev: true + + /@types/node-fetch@2.6.2: + resolution: {integrity: sha512-DHqhlq5jeESLy19TYhLakJ07kNumXWjcDdxXsLUMJZ6ue8VZJj4kLPQVE/2mdHh3xZziNF1xppu5lwmS53HR+A==} + dependencies: + '@types/node': 16.18.80 + form-data: 3.0.1 + dev: true + + /@types/node@10.17.60: + resolution: {integrity: sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==} + dev: true + + /@types/node@12.19.16: + resolution: {integrity: sha512-7xHmXm/QJ7cbK2laF+YYD7gb5MggHIIQwqyjin3bpEGiSuvScMQ5JZZXPvRipi1MwckTQbJZROMns/JxdnIL1Q==} + dev: true + + /@types/node@16.18.80: + resolution: {integrity: sha512-vFxJ1Iyl7A0+xB0uW1r1v504yItKZLdqg/VZELUZ4H02U0bXAgBisSQ8Erf0DMruNFz9ggoiEv6T8Ll9bTg8Jw==} + dev: true + + /@types/node@8.10.66: + resolution: {integrity: sha512-tktOkFUA4kXx2hhhrB8bIFb5TbwzS4uOhKEmwiD+NoiL0qtP2OQ9mFldbgD4dV1djrlBYP6eBuQZiWjuHUpqFw==} + dev: true + + /@types/pbkdf2@3.1.0: + resolution: {integrity: sha512-Cf63Rv7jCQ0LaL8tNXmEyqTHuIJxRdlS5vMh1mj5voN4+QFhVZnlZruezqpWYDiJ8UTzhP0VmeLXCmBk66YrMQ==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/prettier@2.7.1: + resolution: {integrity: sha512-ri0UmynRRvZiiUJdiz38MmIblKK+oH30MztdBVR95dv/Ubw6neWSb8u1XpRb72L4qsZOhz+L+z9JD40SJmfWow==} + dev: true + + /@types/qs@6.9.7: + resolution: {integrity: sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==} + dev: true + + /@types/readable-stream@2.3.15: + resolution: {integrity: sha512-oM5JSKQCcICF1wvGgmecmHldZ48OZamtMxcGGVICOJA8o8cahXC1zEVAif8iwoc5j8etxFaRFnf095+CDsuoFQ==} + dependencies: + '@types/node': 16.18.80 + safe-buffer: 5.1.2 + dev: true + + /@types/resolve@0.0.8: + resolution: {integrity: sha512-auApPaJf3NPfe18hSoJkp8EbZzer2ISk7o8mCC3M9he/a04+gbMF97NkpD2S8riMGvm4BMRI59/SZQSaLTKpsQ==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/responselike@1.0.0: + resolution: {integrity: sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/secp256k1@4.0.3: + resolution: {integrity: sha512-Da66lEIFeIz9ltsdMZcpQvmrmmoqrfju8pm1BH8WbYjZSwUgCwXLb9C+9XYogwBITnbsSaMdVPb2ekf7TV+03w==} + dependencies: + '@types/node': 16.18.80 + dev: true + + /@types/semver@7.5.0: + resolution: {integrity: sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==} + dev: true + + /@types/sinon-chai@3.2.8: + resolution: {integrity: sha512-d4ImIQbT/rKMG8+AXpmcan5T2/PNeSjrYhvkwet6z0p8kzYtfgA32xzOBlbU0yqJfq+/0Ml805iFoODO0LP5/g==} + dependencies: + '@types/chai': 4.3.11 + '@types/sinon': 10.0.13 + dev: true + + /@types/sinon@10.0.13: + resolution: {integrity: sha512-UVjDqJblVNQYvVNUsj0PuYYw0ELRmgt1Nt5Vk0pT5f16ROGfcKJY8o1HVuMOJOpD727RrGB9EGvoaTQE5tgxZQ==} + dependencies: + '@types/sinonjs__fake-timers': 8.1.2 + dev: true + + /@types/sinonjs__fake-timers@8.1.2: + resolution: {integrity: sha512-9GcLXF0/v3t80caGs5p2rRfkB+a8VBGLJZVih6CNFkx8IZ994wiKKLSRs9nuFwk1HevWs/1mnUmkApGrSGsShA==} + dev: true + + /@typescript-eslint/eslint-plugin@6.21.0(@typescript-eslint/parser@6.21.0)(eslint@8.56.0)(typescript@5.3.3): + resolution: {integrity: sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + '@typescript-eslint/parser': ^6.0.0 || ^6.0.0-alpha + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@eslint-community/regexpp': 4.9.1 + '@typescript-eslint/parser': 6.21.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/type-utils': 6.21.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.3.4(supports-color@8.1.1) + eslint: 8.56.0 + graphemer: 1.4.0 + ignore: 5.2.4 + natural-compare: 1.4.0 + semver: 7.5.4 + ts-api-utils: 1.0.3(typescript@5.3.3) + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@typescript-eslint/parser@6.21.0(eslint@8.56.0)(typescript@5.3.3): + resolution: {integrity: sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.3.4(supports-color@8.1.1) + eslint: 8.56.0 + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@typescript-eslint/scope-manager@6.21.0: + resolution: {integrity: sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==} + engines: {node: ^16.0.0 || >=18.0.0} + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + dev: true + + /@typescript-eslint/type-utils@6.21.0(eslint@8.56.0)(typescript@5.3.3): + resolution: {integrity: sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + '@typescript-eslint/utils': 6.21.0(eslint@8.56.0)(typescript@5.3.3) + debug: 4.3.4(supports-color@8.1.1) + eslint: 8.56.0 + ts-api-utils: 1.0.3(typescript@5.3.3) + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@typescript-eslint/types@6.21.0: + resolution: {integrity: sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==} + engines: {node: ^16.0.0 || >=18.0.0} + dev: true + + /@typescript-eslint/typescript-estree@6.21.0(typescript@5.3.3): + resolution: {integrity: sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/visitor-keys': 6.21.0 + debug: 4.3.4(supports-color@8.1.1) + globby: 11.1.0 + is-glob: 4.0.3 + minimatch: 9.0.3 + semver: 7.5.4 + ts-api-utils: 1.0.3(typescript@5.3.3) + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + dev: true + + /@typescript-eslint/utils@6.21.0(eslint@8.56.0)(typescript@5.3.3): + resolution: {integrity: sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==} + engines: {node: ^16.0.0 || >=18.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0 + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.56.0) + '@types/json-schema': 7.0.13 + '@types/semver': 7.5.0 + '@typescript-eslint/scope-manager': 6.21.0 + '@typescript-eslint/types': 6.21.0 + '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.3.3) + eslint: 8.56.0 + semver: 7.5.4 + transitivePeerDependencies: + - supports-color + - typescript + dev: true + + /@typescript-eslint/visitor-keys@6.21.0: + resolution: {integrity: sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==} + engines: {node: ^16.0.0 || >=18.0.0} + dependencies: + '@typescript-eslint/types': 6.21.0 + eslint-visitor-keys: 3.4.3 + dev: true + + /@ungap/structured-clone@1.2.0: + resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} + dev: true + + /@yarnpkg/lockfile@1.1.0: + resolution: {integrity: sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==} + dev: true + + /abbrev@1.0.9: + resolution: {integrity: sha512-LEyx4aLEC3x6T0UguF6YILf+ntvmOaWsVfENmIW0E9H09vKlLDGelMjjSm0jkDHALj8A8quZ/HapKNigzwge+Q==} + dev: true + + /abbrev@1.1.1: + resolution: {integrity: sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==} + dev: true + + /abi-to-sol@0.6.6: + resolution: {integrity: sha512-PRn81rSpv6NXFPYQSw7ujruqIP6UkwZ/XoFldtiqCX8+2kHVc73xVaUVvdbro06vvBVZiwnxhEIGdI4BRMwGHw==} + hasBin: true + dependencies: + '@truffle/abi-utils': 0.3.2 + '@truffle/contract-schema': 3.4.10 + ajv: 6.12.6 + better-ajv-errors: 0.8.2(ajv@6.12.6) + neodoc: 2.0.2 + semver: 7.3.7 + source-map-support: 0.5.21 + optionalDependencies: + prettier: 2.8.8 + prettier-plugin-solidity: 1.3.1(prettier@2.8.8) + transitivePeerDependencies: + - supports-color + dev: true + + /abortcontroller-polyfill@1.7.3: + resolution: {integrity: sha512-zetDJxd89y3X99Kvo4qFx8GKlt6GsvN3UcRZHwU6iFA/0KiOmhkTVhe8oRoTBiTVPZu09x3vCra47+w8Yz1+2Q==} + dev: true + + /abstract-level@1.0.3: + resolution: {integrity: sha512-t6jv+xHy+VYwc4xqZMn2Pa9DjcdzvzZmQGRjTFc8spIbRGHgBrEKbPq+rYXc7CCo0lxgYvSgKVg9qZAhpVQSjA==} + engines: {node: '>=12'} + dependencies: + buffer: 6.0.3 + catering: 2.1.1 + is-buffer: 2.0.5 + level-supports: 4.0.1 + level-transcoder: 1.0.1 + module-error: 1.0.2 + queue-microtask: 1.2.3 + dev: true + + /abstract-leveldown@2.6.3: + resolution: {integrity: sha512-2++wDf/DYqkPR3o5tbfdhF96EfMApo1GpPfzOsR/ZYXdkSmELlvOOEAl9iKkRsktMPHdGjO4rtkBpf2I7TiTeA==} + dependencies: + xtend: 4.0.2 + dev: true + + /abstract-leveldown@2.7.2: + resolution: {integrity: sha512-+OVvxH2rHVEhWLdbudP6p0+dNMXu8JA1CbhP19T8paTYAcX7oJ4OVjT+ZUVpv7mITxXHqDMej+GdqXBmXkw09w==} + dependencies: + xtend: 4.0.2 + dev: true + + /abstract-leveldown@3.0.0: + resolution: {integrity: sha512-KUWx9UWGQD12zsmLNj64/pndaz4iJh/Pj7nopgkfDG6RlCcbMZvT6+9l7dchK4idog2Is8VdC/PvNbFuFmalIQ==} + engines: {node: '>=4'} + dependencies: + xtend: 4.0.2 + dev: true + + /abstract-leveldown@5.0.0: + resolution: {integrity: sha512-5mU5P1gXtsMIXg65/rsYGsi93+MlogXZ9FA8JnwKurHQg64bfXwGYVdVdijNTVNOlAsuIiOwHdvFFD5JqCJQ7A==} + engines: {node: '>=6'} + dependencies: + xtend: 4.0.2 + dev: true + + /accepts@1.3.7: + resolution: {integrity: sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==} + engines: {node: '>= 0.6'} + dependencies: + mime-types: 2.1.27 + negotiator: 0.6.2 + dev: true + + /acorn-jsx@5.3.2(acorn@8.10.0): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.10.0 + dev: true + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + dev: true + + /acorn@8.10.0: + resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==} + engines: {node: '>=0.4.0'} + hasBin: true + dev: true + + /address@1.1.2: + resolution: {integrity: sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==} + engines: {node: '>= 0.12.0'} + dev: true + + /adm-zip@0.4.16: + resolution: {integrity: sha512-TFi4HBKSGfIKsK5YCkKaaFG2m4PEDyViZmEwof3MTIgzimHLto6muaHVpbrljdIvIrFZzEq/p4nafOeLcYegrg==} + engines: {node: '>=0.3.0'} + dev: true + + /aes-js@3.0.0: + resolution: {integrity: sha512-H7wUZRn8WpTq9jocdxQ2c8x2sKo9ZVmzfRE13GiNJXfp7NcKYEdvl3vspKjXox6RIG2VtaRe4JFvxG4rqp2Zuw==} + + /aes-js@3.1.2: + resolution: {integrity: sha512-e5pEa2kBnBOgR4Y/p20pskXI74UEz7de8ZGVo58asOtvSVG5YAbJeELPZxOmt+Bnz3rX753YKhfIn4X4l1PPRQ==} + requiresBuild: true + dev: true + optional: true + + /agent-base@6.0.2: + resolution: {integrity: sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==} + engines: {node: '>= 6.0.0'} + dependencies: + debug: 4.3.4(supports-color@8.1.1) + transitivePeerDependencies: + - supports-color + dev: true + + /aggregate-error@3.1.0: + resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==} + engines: {node: '>=8'} + dependencies: + clean-stack: 2.2.0 + indent-string: 4.0.0 + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + dev: true + + /ajv@8.11.0: + resolution: {integrity: sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==} + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js: 4.4.1 + dev: true + + /amazon-cognito-identity-js@6.3.7: + resolution: {integrity: sha512-tSjnM7KyAeOZ7UMah+oOZ6cW4Gf64FFcc7BE2l7MTcp7ekAPrXaCbpcW2xEpH1EiDS4cPcAouHzmCuc2tr72vQ==} + dependencies: + '@aws-crypto/sha256-js': 1.2.2 + buffer: 4.9.2 + fast-base64-decode: 1.0.0 + isomorphic-unfetch: 3.1.0 + js-cookie: 2.2.1 + transitivePeerDependencies: + - encoding + dev: true + + /amdefine@1.0.1: + resolution: {integrity: sha512-S2Hw0TtNkMJhIabBwIojKL9YHO5T0n5eNqWJ7Lrlel/zDbftQpxpapi8tZs3X1HWa+u+QeydGmzzNU0m09+Rcg==} + engines: {node: '>=0.4.2'} + requiresBuild: true + dev: true + optional: true + + /ansi-colors@3.2.3: + resolution: {integrity: sha512-LEHHyuhlPY3TmuUYMh2oz89lTShfvgbmzaBcxve9t/9Wuy7Dwf4yoAKcND7KFT1HAQfqZ12qtc+DUrBMeKF9nw==} + engines: {node: '>=6'} + dev: true + + /ansi-colors@3.2.4: + resolution: {integrity: sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==} + engines: {node: '>=6'} + dev: true + + /ansi-colors@4.1.1: + resolution: {integrity: sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==} + engines: {node: '>=6'} + dev: true + + /ansi-colors@4.1.3: + resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} + engines: {node: '>=6'} + dev: true + + /ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.21.3 + dev: true + + /ansi-regex@2.1.1: + resolution: {integrity: sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==} + engines: {node: '>=0.10.0'} + dev: true + + /ansi-regex@3.0.1: + resolution: {integrity: sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==} + engines: {node: '>=4'} + dev: true + + /ansi-regex@4.1.1: + resolution: {integrity: sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==} + engines: {node: '>=6'} + dev: true + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + dev: true + + /ansi-styles@2.2.1: + resolution: {integrity: sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==} + engines: {node: '>=0.10.0'} + dev: true + + /ansi-styles@3.2.1: + resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} + engines: {node: '>=4'} + dependencies: + color-convert: 1.9.3 + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + dev: true + + /antlr4@4.13.0: + resolution: {integrity: sha512-zooUbt+UscjnWyOrsuY/tVFL4rwrAGwOivpQmvmUDE22hy/lUA467Rc1rcixyRwcRUIXFYBwv7+dClDSHdmmew==} + engines: {node: '>=16'} + dev: true + + /antlr4ts@0.5.0-alpha.4: + resolution: {integrity: sha512-WPQDt1B74OfPv/IMS2ekXAKkTZIHl88uMetg6q3OTqgFxZ/dxDXI0EWLyZid/1Pe6hTftyg5N7gel5wNAGxXyQ==} + dev: true + + /anymatch@3.1.2: + resolution: {integrity: sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==} + engines: {node: '>= 8'} + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + dev: true + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + dev: true + + /argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + dependencies: + sprintf-js: 1.0.3 + dev: true + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: true + + /arr-diff@4.0.0: + resolution: {integrity: sha512-YVIQ82gZPGBebQV/a8dar4AitzCQs0jjXwMPZllpXMaGjXPYVUawSxQrRsjhjupyVxEvbHgUmIhKVlND+j02kA==} + engines: {node: '>=0.10.0'} + dev: true + + /arr-flatten@1.1.0: + resolution: {integrity: sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==} + engines: {node: '>=0.10.0'} + dev: true + + /arr-union@3.1.0: + resolution: {integrity: sha512-sKpyeERZ02v1FeCZT8lrfJq5u6goHCtpTAzPwJYe7c8SPFOboNjNg1vz2L4VTn9T4PQxEx13TbXLmYUcS6Ug7Q==} + engines: {node: '>=0.10.0'} + dev: true + + /array-back@1.0.4: + resolution: {integrity: sha512-1WxbZvrmyhkNoeYcizokbmh5oiOCIfyvGtcqbK3Ls1v1fKcquzxnQSceOx6tzq7jmai2kFLWIpGND2cLhH6TPw==} + engines: {node: '>=0.12.0'} + dependencies: + typical: 2.6.1 + dev: true + + /array-back@2.0.0: + resolution: {integrity: sha512-eJv4pLLufP3g5kcZry0j6WXpIbzYw9GUB4mVJZno9wfwiBxbizTnHCw3VJb07cBihbFX48Y7oSrW9y+gt4glyw==} + engines: {node: '>=4'} + dependencies: + typical: 2.6.1 + dev: true + + /array-back@3.1.0: + resolution: {integrity: sha512-TkuxA4UCOvxuDK6NZYXCalszEzj+TLszyASooky+i742l9TqsOdYCMJJupxRic61hwquNtppB3hgcuq9SVSH1Q==} + engines: {node: '>=6'} + dev: true + + /array-back@4.0.2: + resolution: {integrity: sha512-NbdMezxqf94cnNfWLL7V/im0Ub+Anbb0IoZhvzie8+4HJ4nMQuzHuy49FkGYCJK2yAloZ3meiB6AVMClbrI1vg==} + engines: {node: '>=8'} + dev: true + + /array-buffer-byte-length@1.0.0: + resolution: {integrity: sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==} + dependencies: + call-bind: 1.0.5 + is-array-buffer: 3.0.2 + dev: true + + /array-flatten@1.1.1: + resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} + dev: true + + /array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + dev: true + + /array-uniq@1.0.3: + resolution: {integrity: sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q==} + engines: {node: '>=0.10.0'} + dev: true + + /array-unique@0.3.2: + resolution: {integrity: sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==} + engines: {node: '>=0.10.0'} + dev: true + + /array.prototype.findlast@1.2.3: + resolution: {integrity: sha512-kcBubumjciBg4JKp5KTKtI7ec7tRefPk88yjkWJwaVKYd9QfTaxcsOxoMNKd7iBr447zCfDV0z1kOF47umv42g==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.2.1 + es-abstract: 1.22.3 + es-shim-unscopables: 1.0.2 + get-intrinsic: 1.2.2 + dev: true + + /array.prototype.reduce@1.0.4: + resolution: {integrity: sha512-WnM+AjG/DvLRLo4DDl+r+SvCzYtD2Jd9oeBYMcEaI7t3fFrHY9M53/wdLcTvmZNQ70IU6Htj0emFkZ5TS+lrdw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + es-abstract: 1.20.3 + es-array-method-boxes-properly: 1.0.0 + is-string: 1.0.7 + dev: true + + /arraybuffer.prototype.slice@1.0.2: + resolution: {integrity: sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==} + engines: {node: '>= 0.4'} + dependencies: + array-buffer-byte-length: 1.0.0 + call-bind: 1.0.5 + define-properties: 1.2.1 + es-abstract: 1.22.3 + get-intrinsic: 1.2.2 + is-array-buffer: 3.0.2 + is-shared-array-buffer: 1.0.2 + dev: true + + /asap@2.0.6: + resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} + dev: true + + /asn1.js@4.10.1: + resolution: {integrity: sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==} + dependencies: + bn.js: 4.12.0 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + dev: true + + /asn1@0.2.4: + resolution: {integrity: sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==} + dependencies: + safer-buffer: 2.1.2 + dev: true + + /assert-plus@1.0.0: + resolution: {integrity: sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==} + engines: {node: '>=0.8'} + dev: true + + /assertion-error@1.1.0: + resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} + + /assign-symbols@1.0.0: + resolution: {integrity: sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==} + engines: {node: '>=0.10.0'} + dev: true + + /ast-parents@0.0.1: + resolution: {integrity: sha512-XHusKxKz3zoYk1ic8Un640joHbFMhbqneyoZfoKnEGtf2ey9Uh/IdpcQplODdO/kENaMIWsD0nJm4+wX3UNLHA==} + dev: true + + /astral-regex@2.0.0: + resolution: {integrity: sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==} + engines: {node: '>=8'} + dev: true + + /async-eventemitter@0.2.4: + resolution: {integrity: sha512-pd20BwL7Yt1zwDFy+8MX8F1+WCT8aQeKj0kQnTrH9WaeRETlRamVhD0JtRPmrV4GfOJ2F9CvdQkZeZhnh2TuHw==} + dependencies: + async: 2.6.3 + dev: true + + /async-limiter@1.0.1: + resolution: {integrity: sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==} + dev: true + + /async-retry@1.3.3: + resolution: {integrity: sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==} + dependencies: + retry: 0.13.1 + dev: true + + /async@1.5.2: + resolution: {integrity: sha512-nSVgobk4rv61R9PUSDtYt7mPVB2olxNR5RWJcAsH676/ef11bUZwvu7+RGYrYauVdDPcO519v68wRhXQtxsV9w==} + dev: true + + /async@2.6.2: + resolution: {integrity: sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==} + dependencies: + lodash: 4.17.21 + dev: true + + /async@2.6.3: + resolution: {integrity: sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==} + dependencies: + lodash: 4.17.21 + dev: true + + /asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + dev: true + + /at-least-node@1.0.0: + resolution: {integrity: sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==} + engines: {node: '>= 4.0.0'} + dev: true + + /atob@2.1.2: + resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} + engines: {node: '>= 4.5.0'} + hasBin: true + dev: true + + /available-typed-arrays@1.0.5: + resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==} + engines: {node: '>= 0.4'} + dev: true + + /aws-sign2@0.7.0: + resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} + dev: true + + /aws4@1.11.0: + resolution: {integrity: sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==} + dev: true + + /axios@0.21.4(debug@4.3.4): + resolution: {integrity: sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==} + dependencies: + follow-redirects: 1.15.2(debug@4.3.4) + transitivePeerDependencies: + - debug + dev: true + + /axios@1.6.2(debug@4.3.4): + resolution: {integrity: sha512-7i24Ri4pmDRfJTR7LDBhsOTtcm+9kjX5WiY1X3wIisx6G9So3pfMkEiU7emUBe46oceVImccTEM3k6C5dbVW8A==} + dependencies: + follow-redirects: 1.15.2(debug@4.3.4) + form-data: 4.0.0 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + dev: true + + /babel-code-frame@6.26.0: + resolution: {integrity: sha512-XqYMR2dfdGMW+hd0IUZ2PwK+fGeFkOxZJ0wY+JaQAHzt1Zx8LcvpiZD2NiGkEG8qx0CfkAOr5xt76d1e8vG90g==} + dependencies: + chalk: 1.1.3 + esutils: 2.0.3 + js-tokens: 3.0.2 + dev: true + + /babel-core@6.26.3: + resolution: {integrity: sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA==} + dependencies: + babel-code-frame: 6.26.0 + babel-generator: 6.26.1 + babel-helpers: 6.24.1 + babel-messages: 6.23.0 + babel-register: 6.26.0 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + babylon: 6.18.0 + convert-source-map: 1.8.0 + debug: 2.6.9 + json5: 0.5.1 + lodash: 4.17.21 + minimatch: 3.1.2 + path-is-absolute: 1.0.1 + private: 0.1.8 + slash: 1.0.0 + source-map: 0.5.7 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-generator@6.26.1: + resolution: {integrity: sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA==} + dependencies: + babel-messages: 6.23.0 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + detect-indent: 4.0.0 + jsesc: 1.3.0 + lodash: 4.17.21 + source-map: 0.5.7 + trim-right: 1.0.1 + dev: true + + /babel-helper-builder-binary-assignment-operator-visitor@6.24.1: + resolution: {integrity: sha512-gCtfYORSG1fUMX4kKraymq607FWgMWg+j42IFPc18kFQEsmtaibP4UrqsXt8FlEJle25HUd4tsoDR7H2wDhe9Q==} + dependencies: + babel-helper-explode-assignable-expression: 6.24.1 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-call-delegate@6.24.1: + resolution: {integrity: sha512-RL8n2NiEj+kKztlrVJM9JT1cXzzAdvWFh76xh/H1I4nKwunzE4INBXn8ieCZ+wh4zWszZk7NBS1s/8HR5jDkzQ==} + dependencies: + babel-helper-hoist-variables: 6.24.1 + babel-runtime: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-define-map@6.26.0: + resolution: {integrity: sha512-bHkmjcC9lM1kmZcVpA5t2om2nzT/xiZpo6TJq7UlZ3wqKfzia4veeXbIhKvJXAMzhhEBd3cR1IElL5AenWEUpA==} + dependencies: + babel-helper-function-name: 6.24.1 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + lodash: 4.17.21 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-explode-assignable-expression@6.24.1: + resolution: {integrity: sha512-qe5csbhbvq6ccry9G7tkXbzNtcDiH4r51rrPUbwwoTzZ18AqxWYRZT6AOmxrpxKnQBW0pYlBI/8vh73Z//78nQ==} + dependencies: + babel-runtime: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-function-name@6.24.1: + resolution: {integrity: sha512-Oo6+e2iX+o9eVvJ9Y5eKL5iryeRdsIkwRYheCuhYdVHsdEQysbc2z2QkqCLIYnNxkT5Ss3ggrHdXiDI7Dhrn4Q==} + dependencies: + babel-helper-get-function-arity: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-get-function-arity@6.24.1: + resolution: {integrity: sha512-WfgKFX6swFB1jS2vo+DwivRN4NB8XUdM3ij0Y1gnC21y1tdBoe6xjVnd7NSI6alv+gZXCtJqvrTeMW3fR/c0ng==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-helper-hoist-variables@6.24.1: + resolution: {integrity: sha512-zAYl3tqerLItvG5cKYw7f1SpvIxS9zi7ohyGHaI9cgDUjAT6YcY9jIEH5CstetP5wHIVSceXwNS7Z5BpJg+rOw==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-helper-optimise-call-expression@6.24.1: + resolution: {integrity: sha512-Op9IhEaxhbRT8MDXx2iNuMgciu2V8lDvYCNQbDGjdBNCjaMvyLf4wl4A3b8IgndCyQF8TwfgsQ8T3VD8aX1/pA==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-helper-regex@6.26.0: + resolution: {integrity: sha512-VlPiWmqmGJp0x0oK27Out1D+71nVVCTSdlbhIVoaBAj2lUgrNjBCRR9+llO4lTSb2O4r7PJg+RobRkhBrf6ofg==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + lodash: 4.17.21 + dev: true + + /babel-helper-remap-async-to-generator@6.24.1: + resolution: {integrity: sha512-RYqaPD0mQyQIFRu7Ho5wE2yvA/5jxqCIj/Lv4BXNq23mHYu/vxikOy2JueLiBxQknwapwrJeNCesvY0ZcfnlHg==} + dependencies: + babel-helper-function-name: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helper-replace-supers@6.24.1: + resolution: {integrity: sha512-sLI+u7sXJh6+ToqDr57Bv973kCepItDhMou0xCP2YPVmR1jkHSCY+p1no8xErbV1Siz5QE8qKT1WIwybSWlqjw==} + dependencies: + babel-helper-optimise-call-expression: 6.24.1 + babel-messages: 6.23.0 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-helpers@6.24.1: + resolution: {integrity: sha512-n7pFrqQm44TCYvrCDb0MqabAF+JUBq+ijBvNMUxpkLjJaAu32faIexewMumrH5KLLJ1HDyT0PTEqRyAe/GwwuQ==} + dependencies: + babel-runtime: 6.26.0 + babel-template: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-messages@6.23.0: + resolution: {integrity: sha512-Bl3ZiA+LjqaMtNYopA9TYE9HP1tQ+E5dLxE0XrAzcIJeK2UqF0/EaqXwBn9esd4UmTfEab+P+UYQ1GnioFIb/w==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-check-es2015-constants@6.22.0: + resolution: {integrity: sha512-B1M5KBP29248dViEo1owyY32lk1ZSH2DaNNrXLGt8lyjjHm7pBqAdQ7VKUPR6EEDO323+OvT3MQXbCin8ooWdA==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-syntax-async-functions@6.13.0: + resolution: {integrity: sha512-4Zp4unmHgw30A1eWI5EpACji2qMocisdXhAftfhXoSV9j0Tvj6nRFE3tOmRY912E0FMRm/L5xWE7MGVT2FoLnw==} + dev: true + + /babel-plugin-syntax-exponentiation-operator@6.13.0: + resolution: {integrity: sha512-Z/flU+T9ta0aIEKl1tGEmN/pZiI1uXmCiGFRegKacQfEJzp7iNsKloZmyJlQr+75FCJtiFfGIK03SiCvCt9cPQ==} + dev: true + + /babel-plugin-syntax-trailing-function-commas@6.22.0: + resolution: {integrity: sha512-Gx9CH3Q/3GKbhs07Bszw5fPTlU+ygrOGfAhEt7W2JICwufpC4SuO0mG0+4NykPBSYPMJhqvVlDBU17qB1D+hMQ==} + dev: true + + /babel-plugin-transform-async-to-generator@6.24.1: + resolution: {integrity: sha512-7BgYJujNCg0Ti3x0c/DL3tStvnKS6ktIYOmo9wginv/dfZOrbSZ+qG4IRRHMBOzZ5Awb1skTiAsQXg/+IWkZYw==} + dependencies: + babel-helper-remap-async-to-generator: 6.24.1 + babel-plugin-syntax-async-functions: 6.13.0 + babel-runtime: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-arrow-functions@6.22.0: + resolution: {integrity: sha512-PCqwwzODXW7JMrzu+yZIaYbPQSKjDTAsNNlK2l5Gg9g4rz2VzLnZsStvp/3c46GfXpwkyufb3NCyG9+50FF1Vg==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-block-scoped-functions@6.22.0: + resolution: {integrity: sha512-2+ujAT2UMBzYFm7tidUsYh+ZoIutxJ3pN9IYrF1/H6dCKtECfhmB8UkHVpyxDwkj0CYbQG35ykoz925TUnBc3A==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-block-scoping@6.26.0: + resolution: {integrity: sha512-YiN6sFAQ5lML8JjCmr7uerS5Yc/EMbgg9G8ZNmk2E3nYX4ckHR01wrkeeMijEf5WHNK5TW0Sl0Uu3pv3EdOJWw==} + dependencies: + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + lodash: 4.17.21 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-classes@6.24.1: + resolution: {integrity: sha512-5Dy7ZbRinGrNtmWpquZKZ3EGY8sDgIVB4CU8Om8q8tnMLrD/m94cKglVcHps0BCTdZ0TJeeAWOq2TK9MIY6cag==} + dependencies: + babel-helper-define-map: 6.26.0 + babel-helper-function-name: 6.24.1 + babel-helper-optimise-call-expression: 6.24.1 + babel-helper-replace-supers: 6.24.1 + babel-messages: 6.23.0 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-computed-properties@6.24.1: + resolution: {integrity: sha512-C/uAv4ktFP/Hmh01gMTvYvICrKze0XVX9f2PdIXuriCSvUmV9j+u+BB9f5fJK3+878yMK6dkdcq+Ymr9mrcLzw==} + dependencies: + babel-runtime: 6.26.0 + babel-template: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-destructuring@6.23.0: + resolution: {integrity: sha512-aNv/GDAW0j/f4Uy1OEPZn1mqD+Nfy9viFGBfQ5bZyT35YqOiqx7/tXdyfZkJ1sC21NyEsBdfDY6PYmLHF4r5iA==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-duplicate-keys@6.24.1: + resolution: {integrity: sha512-ossocTuPOssfxO2h+Z3/Ea1Vo1wWx31Uqy9vIiJusOP4TbF7tPs9U0sJ9pX9OJPf4lXRGj5+6Gkl/HHKiAP5ug==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-for-of@6.23.0: + resolution: {integrity: sha512-DLuRwoygCoXx+YfxHLkVx5/NpeSbVwfoTeBykpJK7JhYWlL/O8hgAK/reforUnZDlxasOrVPPJVI/guE3dCwkw==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-function-name@6.24.1: + resolution: {integrity: sha512-iFp5KIcorf11iBqu/y/a7DK3MN5di3pNCzto61FqCNnUX4qeBwcV1SLqe10oXNnCaxBUImX3SckX2/o1nsrTcg==} + dependencies: + babel-helper-function-name: 6.24.1 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-literals@6.22.0: + resolution: {integrity: sha512-tjFl0cwMPpDYyoqYA9li1/7mGFit39XiNX5DKC/uCNjBctMxyL1/PT/l4rSlbvBG1pOKI88STRdUsWXB3/Q9hQ==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-modules-amd@6.24.1: + resolution: {integrity: sha512-LnIIdGWIKdw7zwckqx+eGjcS8/cl8D74A3BpJbGjKTFFNJSMrjN4bIh22HY1AlkUbeLG6X6OZj56BDvWD+OeFA==} + dependencies: + babel-plugin-transform-es2015-modules-commonjs: 6.26.2 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-modules-commonjs@6.26.2: + resolution: {integrity: sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q==} + dependencies: + babel-plugin-transform-strict-mode: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-modules-systemjs@6.24.1: + resolution: {integrity: sha512-ONFIPsq8y4bls5PPsAWYXH/21Hqv64TBxdje0FvU3MhIV6QM2j5YS7KvAzg/nTIVLot2D2fmFQrFWCbgHlFEjg==} + dependencies: + babel-helper-hoist-variables: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-modules-umd@6.24.1: + resolution: {integrity: sha512-LpVbiT9CLsuAIp3IG0tfbVo81QIhn6pE8xBJ7XSeCtFlMltuar5VuBV6y6Q45tpui9QWcy5i0vLQfCfrnF7Kiw==} + dependencies: + babel-plugin-transform-es2015-modules-amd: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-object-super@6.24.1: + resolution: {integrity: sha512-8G5hpZMecb53vpD3mjs64NhI1au24TAmokQ4B+TBFBjN9cVoGoOvotdrMMRmHvVZUEvqGUPWL514woru1ChZMA==} + dependencies: + babel-helper-replace-supers: 6.24.1 + babel-runtime: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-parameters@6.24.1: + resolution: {integrity: sha512-8HxlW+BB5HqniD+nLkQ4xSAVq3bR/pcYW9IigY+2y0dI+Y7INFeTbfAQr+63T3E4UDsZGjyb+l9txUnABWxlOQ==} + dependencies: + babel-helper-call-delegate: 6.24.1 + babel-helper-get-function-arity: 6.24.1 + babel-runtime: 6.26.0 + babel-template: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-es2015-shorthand-properties@6.24.1: + resolution: {integrity: sha512-mDdocSfUVm1/7Jw/FIRNw9vPrBQNePy6wZJlR8HAUBLybNp1w/6lr6zZ2pjMShee65t/ybR5pT8ulkLzD1xwiw==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-spread@6.22.0: + resolution: {integrity: sha512-3Ghhi26r4l3d0Js933E5+IhHwk0A1yiutj9gwvzmFbVV0sPMYk2lekhOufHBswX7NCoSeF4Xrl3sCIuSIa+zOg==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-sticky-regex@6.24.1: + resolution: {integrity: sha512-CYP359ADryTo3pCsH0oxRo/0yn6UsEZLqYohHmvLQdfS9xkf+MbCzE3/Kolw9OYIY4ZMilH25z/5CbQbwDD+lQ==} + dependencies: + babel-helper-regex: 6.26.0 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-template-literals@6.22.0: + resolution: {integrity: sha512-x8b9W0ngnKzDMHimVtTfn5ryimars1ByTqsfBDwAqLibmuuQY6pgBQi5z1ErIsUOWBdw1bW9FSz5RZUojM4apg==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-typeof-symbol@6.23.0: + resolution: {integrity: sha512-fz6J2Sf4gYN6gWgRZaoFXmq93X+Li/8vf+fb0sGDVtdeWvxC9y5/bTD7bvfWMEq6zetGEHpWjtzRGSugt5kNqw==} + dependencies: + babel-runtime: 6.26.0 + dev: true + + /babel-plugin-transform-es2015-unicode-regex@6.24.1: + resolution: {integrity: sha512-v61Dbbihf5XxnYjtBN04B/JBvsScY37R1cZT5r9permN1cp+b70DY3Ib3fIkgn1DI9U3tGgBJZVD8p/mE/4JbQ==} + dependencies: + babel-helper-regex: 6.26.0 + babel-runtime: 6.26.0 + regexpu-core: 2.0.0 + dev: true + + /babel-plugin-transform-exponentiation-operator@6.24.1: + resolution: {integrity: sha512-LzXDmbMkklvNhprr20//RStKVcT8Cu+SQtX18eMHLhjHf2yFzwtQ0S2f0jQ+89rokoNdmwoSqYzAhq86FxlLSQ==} + dependencies: + babel-helper-builder-binary-assignment-operator-visitor: 6.24.1 + babel-plugin-syntax-exponentiation-operator: 6.13.0 + babel-runtime: 6.26.0 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-plugin-transform-regenerator@6.26.0: + resolution: {integrity: sha512-LS+dBkUGlNR15/5WHKe/8Neawx663qttS6AGqoOUhICc9d1KciBvtrQSuc0PI+CxQ2Q/S1aKuJ+u64GtLdcEZg==} + dependencies: + regenerator-transform: 0.10.1 + dev: true + + /babel-plugin-transform-strict-mode@6.24.1: + resolution: {integrity: sha512-j3KtSpjyLSJxNoCDrhwiJad8kw0gJ9REGj8/CqL0HeRyLnvUNYV9zcqluL6QJSXh3nfsLEmSLvwRfGzrgR96Pw==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + dev: true + + /babel-preset-env@1.7.0: + resolution: {integrity: sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg==} + dependencies: + babel-plugin-check-es2015-constants: 6.22.0 + babel-plugin-syntax-trailing-function-commas: 6.22.0 + babel-plugin-transform-async-to-generator: 6.24.1 + babel-plugin-transform-es2015-arrow-functions: 6.22.0 + babel-plugin-transform-es2015-block-scoped-functions: 6.22.0 + babel-plugin-transform-es2015-block-scoping: 6.26.0 + babel-plugin-transform-es2015-classes: 6.24.1 + babel-plugin-transform-es2015-computed-properties: 6.24.1 + babel-plugin-transform-es2015-destructuring: 6.23.0 + babel-plugin-transform-es2015-duplicate-keys: 6.24.1 + babel-plugin-transform-es2015-for-of: 6.23.0 + babel-plugin-transform-es2015-function-name: 6.24.1 + babel-plugin-transform-es2015-literals: 6.22.0 + babel-plugin-transform-es2015-modules-amd: 6.24.1 + babel-plugin-transform-es2015-modules-commonjs: 6.26.2 + babel-plugin-transform-es2015-modules-systemjs: 6.24.1 + babel-plugin-transform-es2015-modules-umd: 6.24.1 + babel-plugin-transform-es2015-object-super: 6.24.1 + babel-plugin-transform-es2015-parameters: 6.24.1 + babel-plugin-transform-es2015-shorthand-properties: 6.24.1 + babel-plugin-transform-es2015-spread: 6.22.0 + babel-plugin-transform-es2015-sticky-regex: 6.24.1 + babel-plugin-transform-es2015-template-literals: 6.22.0 + babel-plugin-transform-es2015-typeof-symbol: 6.23.0 + babel-plugin-transform-es2015-unicode-regex: 6.24.1 + babel-plugin-transform-exponentiation-operator: 6.24.1 + babel-plugin-transform-regenerator: 6.26.0 + browserslist: 3.2.8 + invariant: 2.2.4 + semver: 5.7.1 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-register@6.26.0: + resolution: {integrity: sha512-veliHlHX06wjaeY8xNITbveXSiI+ASFnOqvne/LaIJIqOWi2Ogmj91KOugEz/hoh/fwMhXNBJPCv8Xaz5CyM4A==} + dependencies: + babel-core: 6.26.3 + babel-runtime: 6.26.0 + core-js: 2.6.12 + home-or-tmp: 2.0.0 + lodash: 4.17.21 + mkdirp: 0.5.6 + source-map-support: 0.4.18 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-runtime@6.26.0: + resolution: {integrity: sha512-ITKNuq2wKlW1fJg9sSW52eepoYgZBggvOAHC0u/CYu/qxQ9EVzThCgR69BnSXLHjy2f7SY5zaQ4yt7H9ZVxY2g==} + dependencies: + core-js: 2.6.12 + regenerator-runtime: 0.11.1 + dev: true + + /babel-template@6.26.0: + resolution: {integrity: sha512-PCOcLFW7/eazGUKIoqH97sO9A2UYMahsn/yRQ7uOk37iutwjq7ODtcTNF+iFDSHNfkctqsLRjLP7URnOx0T1fg==} + dependencies: + babel-runtime: 6.26.0 + babel-traverse: 6.26.0 + babel-types: 6.26.0 + babylon: 6.18.0 + lodash: 4.17.21 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-traverse@6.26.0: + resolution: {integrity: sha512-iSxeXx7apsjCHe9c7n8VtRXGzI2Bk1rBSOJgCCjfyXb6v1aCqE1KSEpq/8SXuVN8Ka/Rh1WDTF0MDzkvTA4MIA==} + dependencies: + babel-code-frame: 6.26.0 + babel-messages: 6.23.0 + babel-runtime: 6.26.0 + babel-types: 6.26.0 + babylon: 6.18.0 + debug: 2.6.9 + globals: 9.18.0 + invariant: 2.2.4 + lodash: 4.17.21 + transitivePeerDependencies: + - supports-color + dev: true + + /babel-types@6.26.0: + resolution: {integrity: sha512-zhe3V/26rCWsEZK8kZN+HaQj5yQ1CilTObixFzKW1UWjqG7618Twz6YEsCnjfg5gBcJh02DrpCkS9h98ZqDY+g==} + dependencies: + babel-runtime: 6.26.0 + esutils: 2.0.3 + lodash: 4.17.21 + to-fast-properties: 1.0.3 + dev: true + + /babelify@7.3.0: + resolution: {integrity: sha512-vID8Fz6pPN5pJMdlUnNFSfrlcx5MUule4k9aKs/zbZPyXxMTcRrB0M4Tarw22L8afr8eYSWxDPYCob3TdrqtlA==} + dependencies: + babel-core: 6.26.3 + object-assign: 4.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /babylon@6.18.0: + resolution: {integrity: sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==} + hasBin: true + dev: true + + /backoff@2.5.0: + resolution: {integrity: sha512-wC5ihrnUXmR2douXmXLCe5O3zg3GKIyvRi/hi58a/XyRxVI+3/yM0PYueQOZXPXQ9pxBislYkw+sF9b7C/RuMA==} + engines: {node: '>= 0.6'} + dependencies: + precond: 0.2.3 + dev: true + + /balanced-match@1.0.0: + resolution: {integrity: sha512-9Y0g0Q8rmSt+H33DfKv7FOc3v+iRI+o1lbzt8jGcIosYW37IIW/2XVYq5NPdmaD5NQ59Nk26Kl/vZbwW9Fr8vg==} + dev: true + + /base-x@3.0.9: + resolution: {integrity: sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + dev: true + + /base@0.11.2: + resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} + engines: {node: '>=0.10.0'} + dependencies: + cache-base: 1.0.1 + class-utils: 0.3.6 + component-emitter: 1.3.0 + define-property: 1.0.0 + isobject: 3.0.1 + mixin-deep: 1.3.2 + pascalcase: 0.1.1 + dev: true + + /bcrypt-pbkdf@1.0.2: + resolution: {integrity: sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==} + dependencies: + tweetnacl: 0.14.5 + dev: true + + /bech32@1.1.4: + resolution: {integrity: sha512-s0IrSOzLlbvX7yp4WBfPITzpAU8sqQcpsmwXDiKwrG4r491vwCO/XpejasRNl0piBMe/DvP4Tz0mIS/X1DPJBQ==} + + /better-ajv-errors@0.8.2(ajv@6.12.6): + resolution: {integrity: sha512-FnODTBJSQSHmJXDLPiC7ca0dC4S1HSTPv1+Hg2sm/C71i3Dj0l1jcUEaq/3OQ6MmnUveshTsUvUj65pDSr3Qow==} + peerDependencies: + ajv: 4.11.8 - 8 + dependencies: + '@babel/code-frame': 7.18.6 + '@babel/runtime': 7.19.0 + ajv: 6.12.6 + chalk: 2.4.2 + core-js: 3.30.1 + json-to-ast: 2.1.0 + jsonpointer: 5.0.1 + leven: 3.1.0 + dev: true + + /big-integer@1.6.36: + resolution: {integrity: sha512-t70bfa7HYEA1D9idDbmuv7YbsbVkQ+Hp+8KFSul4aE5e/i1bjCNIRYJZlA8Q8p0r9T8cF/RVvwUgRA//FydEyg==} + engines: {node: '>=0.6'} + dev: true + + /big.js@6.2.1: + resolution: {integrity: sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==} + dev: true + + /bigint-crypto-utils@3.1.8: + resolution: {integrity: sha512-+VMV9Laq8pXLBKKKK49nOoq9bfR3j7NNQAtbA617a4nw9bVLo8rsqkKMBgM2AJWlNX9fEIyYaYX+d0laqYV4tw==} + engines: {node: '>=10.4.0'} + dependencies: + bigint-mod-arith: 3.1.2 + dev: true + + /bigint-mod-arith@3.1.2: + resolution: {integrity: sha512-nx8J8bBeiRR+NlsROFH9jHswW5HO8mgfOSqW0AmjicMMvaONDa8AO+5ViKDUUNytBPWiwfvZP4/Bj4Y3lUfvgQ==} + engines: {node: '>=10.4.0'} + dev: true + + /bignumber.js@7.2.1: + resolution: {integrity: sha512-S4XzBk5sMB+Rcb/LNcpzXr57VRTxgAvaAEDAl1AwRx27j00hT84O6OkteE7u8UB3NuaaygCRrEpqox4uDOrbdQ==} + dev: true + + /bignumber.js@9.1.0: + resolution: {integrity: sha512-4LwHK4nfDOraBCtst+wOWIHbu1vhvAPJK8g8nROd4iuc3PSEjWif/qwbkh8jwCJz6yDBvtU4KPynETgrfh7y3A==} + dev: true + + /binary-extensions@2.2.0: + resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} + engines: {node: '>=8'} + dev: true + + /bindings@1.5.0: + resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + dependencies: + file-uri-to-path: 1.0.0 + dev: true + + /bip39@2.5.0: + resolution: {integrity: sha512-xwIx/8JKoT2+IPJpFEfXoWdYwP7UVAoUxxLNfGCfVowaJE7yg1Y5B1BVPqlUNsBq5/nGwmFkwRJ8xDW4sX8OdA==} + dependencies: + create-hash: 1.2.0 + pbkdf2: 3.1.2 + randombytes: 2.1.0 + safe-buffer: 5.2.1 + unorm: 1.6.0 + dev: true + + /bip66@1.1.5: + resolution: {integrity: sha512-nemMHz95EmS38a26XbbdxIYj5csHd3RMP3H5bwQknX0WYHF01qhpufP42mLOwVICuH2JmhIhXiWs89MfUGL7Xw==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /blakejs@1.2.1: + resolution: {integrity: sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ==} + dev: true + + /bluebird@3.7.2: + resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} + dev: true + + /bn.js@4.11.6: + resolution: {integrity: sha512-XWwnNNFCuuSQ0m3r3C4LE3EiORltHd9M05pq6FOlVeiophzRbMo50Sbz1ehl8K3Z+jw9+vmgnXefY1hz8X+2wA==} + dev: true + + /bn.js@4.12.0: + resolution: {integrity: sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==} + + /bn.js@5.2.1: + resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} + + /body-parser@1.19.0: + resolution: {integrity: sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==} + engines: {node: '>= 0.8'} + dependencies: + bytes: 3.1.0 + content-type: 1.0.4 + debug: 2.6.9 + depd: 1.1.2 + http-errors: 1.7.2 + iconv-lite: 0.4.24 + on-finished: 2.3.0 + qs: 6.7.0 + raw-body: 2.4.0 + type-is: 1.6.18 + transitivePeerDependencies: + - supports-color + dev: true + + /boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.0 + concat-map: 0.0.1 + dev: true + + /brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + dependencies: + balanced-match: 1.0.0 + dev: true + + /braces@2.3.2: + resolution: {integrity: sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==} + engines: {node: '>=0.10.0'} + dependencies: + arr-flatten: 1.1.0 + array-unique: 0.3.2 + extend-shallow: 2.0.1 + fill-range: 4.0.0 + isobject: 3.0.1 + repeat-element: 1.1.4 + snapdragon: 0.8.2 + snapdragon-node: 2.1.1 + split-string: 3.1.0 + to-regex: 3.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + dependencies: + fill-range: 7.0.1 + dev: true + + /brorand@1.1.0: + resolution: {integrity: sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w==} + + /browser-level@1.0.1: + resolution: {integrity: sha512-XECYKJ+Dbzw0lbydyQuJzwNXtOpbMSq737qxJN11sIRTErOMShvDpbzTlgju7orJKvx4epULolZAuJGLzCmWRQ==} + dependencies: + abstract-level: 1.0.3 + catering: 2.1.1 + module-error: 1.0.2 + run-parallel-limit: 1.1.0 + dev: true + + /browser-stdout@1.3.1: + resolution: {integrity: sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==} + dev: true + + /browserify-aes@1.2.0: + resolution: {integrity: sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==} + dependencies: + buffer-xor: 1.0.3 + cipher-base: 1.0.4 + create-hash: 1.2.0 + evp_bytestokey: 1.0.3 + inherits: 2.0.4 + safe-buffer: 5.2.1 + dev: true + + /browserify-cipher@1.0.1: + resolution: {integrity: sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==} + dependencies: + browserify-aes: 1.2.0 + browserify-des: 1.0.2 + evp_bytestokey: 1.0.3 + dev: true + + /browserify-des@1.0.2: + resolution: {integrity: sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==} + dependencies: + cipher-base: 1.0.4 + des.js: 1.0.1 + inherits: 2.0.4 + safe-buffer: 5.2.1 + dev: true + + /browserify-rsa@4.0.1: + resolution: {integrity: sha512-+YpEyaLDDvvdzIxQ+cCx73r5YEhS3ANGOkiHdyWqW4t3gdeoNEYjSiQwntbU4Uo2/9yRkpYX3SRFeH+7jc2Duw==} + dependencies: + bn.js: 4.12.0 + randombytes: 2.1.0 + dev: true + + /browserify-sha3@0.0.4: + resolution: {integrity: sha512-WmXX4M8lltqzMnBiPbP9KQdITknmxe4Wp3rhGfpYJst5yOeGwKkHpC0t+Ty22laH4Ltg9YO+p14p93wiipqjxA==} + dependencies: + js-sha3: 0.6.1 + safe-buffer: 5.2.1 + dev: true + + /browserify-sign@4.0.4: + resolution: {integrity: sha512-D2ItxCwNtLcHRrOCuEDZQlIezlFyUV/N5IYz6TY1svu1noyThFuthoEjzT8ChZe3UEctqnwmykcPhet3Eiz58A==} + dependencies: + bn.js: 4.12.0 + browserify-rsa: 4.0.1 + create-hash: 1.2.0 + create-hmac: 1.1.7 + elliptic: 6.5.4 + inherits: 2.0.4 + parse-asn1: 5.1.5 + dev: true + + /browserslist@3.2.8: + resolution: {integrity: sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ==} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001414 + electron-to-chromium: 1.4.270 + dev: true + + /bs58@4.0.1: + resolution: {integrity: sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw==} + dependencies: + base-x: 3.0.9 + dev: true + + /bs58check@2.1.2: + resolution: {integrity: sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA==} + dependencies: + bs58: 4.0.1 + create-hash: 1.2.0 + safe-buffer: 5.2.1 + dev: true + + /buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + dev: true + + /buffer-to-arraybuffer@0.0.5: + resolution: {integrity: sha512-3dthu5CYiVB1DEJp61FtApNnNndTckcqe4pFcLdvHtrpG+kcyekCJKg4MRiDcFW7A6AODnXB9U4dwQiCW5kzJQ==} + dev: true + + /buffer-xor@1.0.3: + resolution: {integrity: sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ==} + dev: true + + /buffer-xor@2.0.2: + resolution: {integrity: sha512-eHslX0bin3GB+Lx2p7lEYRShRewuNZL3fUl4qlVJGGiwoPGftmt8JQgk2Y9Ji5/01TnVDo33E5b5O3vUB1HdqQ==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /buffer@4.9.2: + resolution: {integrity: sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==} + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + isarray: 1.0.0 + dev: true + + /buffer@5.7.1: + resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + dev: true + + /buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + dev: true + + /bufferutil@4.0.6: + resolution: {integrity: sha512-jduaYOYtnio4aIAyc6UbvPCVcgq7nYpVnucyxr6eCYg/Woad9Hf/oxxBRDnGGjPfjUm6j5O/uBWhIu4iLebFaw==} + engines: {node: '>=6.14.2'} + requiresBuild: true + dependencies: + node-gyp-build: 4.5.0 + dev: true + + /bufio@1.0.7: + resolution: {integrity: sha512-bd1dDQhiC+bEbEfg56IdBv7faWa6OipMs/AFFFvtFnB3wAYjlwQpQRZ0pm6ZkgtfL0pILRXhKxOiQj6UzoMR7A==} + engines: {node: '>=8.0.0'} + dev: false + + /busboy@1.6.0: + resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} + engines: {node: '>=10.16.0'} + dependencies: + streamsearch: 1.1.0 + dev: true + + /bytes@3.1.0: + resolution: {integrity: sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==} + engines: {node: '>= 0.8'} + dev: true + + /bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + dev: true + + /bytewise-core@1.2.3: + resolution: {integrity: sha512-nZD//kc78OOxeYtRlVk8/zXqTB4gf/nlguL1ggWA8FuchMyOxcyHR4QPQZMUmA7czC+YnaBrPUCubqAWe50DaA==} + dependencies: + typewise-core: 1.2.0 + dev: true + + /bytewise@1.1.0: + resolution: {integrity: sha512-rHuuseJ9iQ0na6UDhnrRVDh8YnWVlU6xM3VH6q/+yHDeUH2zIhUzP+2/h3LIrhLDBtTqzWpE3p3tP/boefskKQ==} + dependencies: + bytewise-core: 1.2.3 + typewise: 1.0.3 + dev: true + + /cache-base@1.0.1: + resolution: {integrity: sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==} + engines: {node: '>=0.10.0'} + dependencies: + collection-visit: 1.0.0 + component-emitter: 1.3.0 + get-value: 2.0.6 + has-value: 1.0.0 + isobject: 3.0.1 + set-value: 2.0.1 + to-object-path: 0.3.0 + union-value: 1.0.1 + unset-value: 1.0.0 + dev: true + + /cacheable-lookup@6.1.0: + resolution: {integrity: sha512-KJ/Dmo1lDDhmW2XDPMo+9oiy/CeqosPguPCrgcVzKyZrL6pM1gU2GmPY/xo6OQPTUaA/c0kwHuywB4E6nmT9ww==} + engines: {node: '>=10.6.0'} + dev: true + + /cacheable-request@6.1.0: + resolution: {integrity: sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==} + engines: {node: '>=8'} + dependencies: + clone-response: 1.0.2 + get-stream: 5.1.0 + http-cache-semantics: 4.0.3 + keyv: 3.1.0 + lowercase-keys: 2.0.0 + normalize-url: 4.5.1 + responselike: 1.0.2 + dev: true + + /cacheable-request@7.0.2: + resolution: {integrity: sha512-pouW8/FmiPQbuGpkXQ9BAPv/Mo5xDGANgSNXzTzJ8DrKGuXOssM4wIQRjfanNRh3Yu5cfYPvcorqbhg2KIJtew==} + engines: {node: '>=8'} + dependencies: + clone-response: 1.0.2 + get-stream: 5.1.0 + http-cache-semantics: 4.0.3 + keyv: 4.5.0 + lowercase-keys: 2.0.0 + normalize-url: 6.1.0 + responselike: 2.0.1 + dev: true + + /cachedown@1.0.0: + resolution: {integrity: sha512-t+yVk82vQWCJF3PsWHMld+jhhjkkWjcAzz8NbFx1iULOXWl8Tm/FdM4smZNVw3MRr0X+lVTx9PKzvEn4Ng19RQ==} + dependencies: + abstract-leveldown: 2.7.2 + lru-cache: 3.2.0 + dev: true + + /call-bind@1.0.2: + resolution: {integrity: sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==} + dependencies: + function-bind: 1.1.1 + get-intrinsic: 1.1.3 + dev: true + + /call-bind@1.0.5: + resolution: {integrity: sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==} + dependencies: + function-bind: 1.1.2 + get-intrinsic: 1.2.2 + set-function-length: 1.1.1 + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + dev: true + + /camel-case@3.0.0: + resolution: {integrity: sha512-+MbKztAYHXPr1jNTSKQF52VpcFjwY5RkR7fxksV8Doo4KAYc5Fl4UJRgthBbTmEx8C54DqahhbLJkDwjI3PI/w==} + dependencies: + no-case: 2.3.2 + upper-case: 1.1.3 + dev: true + + /camelcase@3.0.0: + resolution: {integrity: sha512-4nhGqUkc4BqbBBB4Q6zLuD7lzzrHYrjKGeYaEji/3tFR5VdJu9v+LilhGIVe8wxEJPPOeWo7eg8dwY13TZ1BNg==} + engines: {node: '>=0.10.0'} + dev: true + + /camelcase@4.1.0: + resolution: {integrity: sha512-FxAv7HpHrXbh3aPo4o2qxHay2lkLY3x5Mw3KeE4KQE8ysVfziWeRZDwcjauvwBSGEC/nXUPzZy8zeh4HokqOnw==} + engines: {node: '>=4'} + dev: true + + /camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + dev: true + + /camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + dev: true + + /caniuse-lite@1.0.30001414: + resolution: {integrity: sha512-t55jfSaWjCdocnFdKQoO+d2ct9C59UZg4dY3OnUlSZ447r8pUtIKdp0hpAzrGFultmTC+Us+KpKi4GZl/LXlFg==} + dev: true + + /case@1.6.3: + resolution: {integrity: sha512-mzDSXIPaFwVDvZAHqZ9VlbyF4yyXRuX6IvB06WvPYkqJVO24kX1PPhv9bfpKNFZyxYFmmgo03HUiD8iklmJYRQ==} + engines: {node: '>= 0.8.0'} + dev: true + + /caseless@0.12.0: + resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} + dev: true + + /catering@2.1.1: + resolution: {integrity: sha512-K7Qy8O9p76sL3/3m7/zLKbRkyOlSZAgzEaLhyj2mXS8PsCud2Eo4hAb8aLtZqHh0QGqLcb9dlJSu6lHRVENm1w==} + engines: {node: '>=6'} + dev: true + + /cbor@5.2.0: + resolution: {integrity: sha512-5IMhi9e1QU76ppa5/ajP1BmMWZ2FHkhAhjeVKQ/EFCgYSEaeVaoGtL7cxJskf9oCCk+XjzaIdc3IuU/dbA/o2A==} + engines: {node: '>=6.0.0'} + dependencies: + bignumber.js: 9.1.0 + nofilter: 1.0.4 + dev: true + + /cbor@8.1.0: + resolution: {integrity: sha512-DwGjNW9omn6EwP70aXsn7FQJx5kO12tX0bZkaTjzdVFM6/7nhA4t0EENocKGx6D2Bch9PE2KzCUf5SceBdeijg==} + engines: {node: '>=12.19'} + dependencies: + nofilter: 3.1.0 + dev: true + + /cbor@9.0.1: + resolution: {integrity: sha512-/TQOWyamDxvVIv+DY9cOLNuABkoyz8K/F3QE56539pGVYohx0+MEA1f4lChFTX79dBTBS7R1PF6ovH7G+VtBfQ==} + engines: {node: '>=16'} + dependencies: + nofilter: 3.1.0 + dev: true + + /chai-bn@0.2.2(bn.js@4.12.0)(chai@4.3.10): + resolution: {integrity: sha512-MzjelH0p8vWn65QKmEq/DLBG1Hle4WeyqT79ANhXZhn/UxRWO0OogkAxi5oGGtfzwU9bZR8mvbvYdoqNVWQwFg==} + peerDependencies: + bn.js: ^4.11.0 + chai: ^4.0.0 + dependencies: + bn.js: 4.12.0 + chai: 4.3.10 + dev: true + + /chai@4.3.10: + resolution: {integrity: sha512-0UXG04VuVbruMUYbJ6JctvH0YnC/4q3/AkT18q4NaITo91CUm0liMS9VqzT9vZhVQ/1eqPanMWjBM+Juhfb/9g==} + engines: {node: '>=4'} + dependencies: + assertion-error: 1.1.0 + check-error: 1.0.3 + deep-eql: 4.1.3 + get-func-name: 2.0.2 + loupe: 2.3.7 + pathval: 1.1.1 + type-detect: 4.0.8 + + /chalk@1.1.3: + resolution: {integrity: sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==} + engines: {node: '>=0.10.0'} + dependencies: + ansi-styles: 2.2.1 + escape-string-regexp: 1.0.5 + has-ansi: 2.0.0 + strip-ansi: 3.0.1 + supports-color: 2.0.0 + dev: true + + /chalk@2.4.2: + resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==} + engines: {node: '>=4'} + dependencies: + ansi-styles: 3.2.1 + escape-string-regexp: 1.0.5 + supports-color: 5.5.0 + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: true + + /change-case@3.0.2: + resolution: {integrity: sha512-Mww+SLF6MZ0U6kdg11algyKd5BARbyM4TbFBepwowYSR5ClfQGCGtxNXgykpN0uF/bstWeaGDT4JWaDh8zWAHA==} + dependencies: + camel-case: 3.0.0 + constant-case: 2.0.0 + dot-case: 2.1.1 + header-case: 1.0.1 + is-lower-case: 1.1.3 + is-upper-case: 1.1.2 + lower-case: 1.1.4 + lower-case-first: 1.0.2 + no-case: 2.3.2 + param-case: 2.1.1 + pascal-case: 2.0.1 + path-case: 2.1.1 + sentence-case: 2.1.1 + snake-case: 2.1.0 + swap-case: 1.1.2 + title-case: 2.1.1 + upper-case: 1.1.3 + upper-case-first: 1.1.2 + dev: true + + /charenc@0.0.2: + resolution: {integrity: sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==} + dev: true + + /check-error@1.0.3: + resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} + dependencies: + get-func-name: 2.0.2 + + /checkpoint-store@1.1.0: + resolution: {integrity: sha512-J/NdY2WvIx654cc6LWSq/IYFFCUf75fFTgwzFnmbqyORH4MwgiQCgswLLKBGzmsyTI5V7i5bp/So6sMbDWhedg==} + dependencies: + functional-red-black-tree: 1.0.1 + dev: true + + /cheerio-select@2.1.0: + resolution: {integrity: sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==} + dependencies: + boolbase: 1.0.0 + css-select: 5.1.0 + css-what: 6.1.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.0.1 + dev: true + + /cheerio@1.0.0-rc.12: + resolution: {integrity: sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==} + engines: {node: '>= 6'} + dependencies: + cheerio-select: 2.1.0 + dom-serializer: 2.0.0 + domhandler: 5.0.3 + domutils: 3.0.1 + htmlparser2: 8.0.1 + parse5: 7.1.1 + parse5-htmlparser2-tree-adapter: 7.0.0 + dev: true + + /chokidar@3.3.0: + resolution: {integrity: sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A==} + engines: {node: '>= 8.10.0'} + dependencies: + anymatch: 3.1.2 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.2.0 + optionalDependencies: + fsevents: 2.1.3 + dev: true + + /chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + dependencies: + anymatch: 3.1.2 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.2 + dev: true + + /chownr@1.1.4: + resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==} + dev: true + + /ci-info@2.0.0: + resolution: {integrity: sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==} + dev: true + + /cids@0.7.5: + resolution: {integrity: sha512-zT7mPeghoWAu+ppn8+BS1tQ5qGmbMfB4AregnQjA/qHY3GC1m1ptI9GkWNlgeu38r7CuRdXB47uY2XgAYt6QVA==} + engines: {node: '>=4.0.0', npm: '>=3.0.0'} + deprecated: This module has been superseded by the multiformats module + dependencies: + buffer: 5.7.1 + class-is: 1.1.0 + multibase: 0.6.1 + multicodec: 1.0.4 + multihashes: 0.4.21 + dev: true + + /cipher-base@1.0.4: + resolution: {integrity: sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==} + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + dev: true + + /class-is@1.1.0: + resolution: {integrity: sha512-rhjH9AG1fvabIDoGRVH587413LPjTZgmDF9fOFCbFJQV4yuocX1mHxxvXI4g3cGwbVY9wAYIoKlg1N79frJKQw==} + dev: true + + /class-utils@0.3.6: + resolution: {integrity: sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==} + engines: {node: '>=0.10.0'} + dependencies: + arr-union: 3.1.0 + define-property: 0.2.5 + isobject: 3.0.1 + static-extend: 0.1.2 + dev: true + + /classic-level@1.2.0: + resolution: {integrity: sha512-qw5B31ANxSluWz9xBzklRWTUAJ1SXIdaVKTVS7HcTGKOAmExx65Wo5BUICW+YGORe2FOUaDghoI9ZDxj82QcFg==} + engines: {node: '>=12'} + requiresBuild: true + dependencies: + abstract-level: 1.0.3 + catering: 2.1.1 + module-error: 1.0.2 + napi-macros: 2.0.0 + node-gyp-build: 4.5.0 + dev: true + + /clean-stack@2.2.0: + resolution: {integrity: sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==} + engines: {node: '>=6'} + dev: true + + /cli-table3@0.5.1: + resolution: {integrity: sha512-7Qg2Jrep1S/+Q3EceiZtQcDPWxhAvBw+ERf1162v4sikJrvojMHFqXt8QIVha8UlH9rgU0BeWPytZ9/TzYqlUw==} + engines: {node: '>=6'} + dependencies: + object-assign: 4.1.1 + string-width: 2.1.1 + optionalDependencies: + colors: 1.4.0 + dev: true + + /cli-table3@0.6.3: + resolution: {integrity: sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==} + engines: {node: 10.* || >= 12.*} + dependencies: + string-width: 4.2.3 + optionalDependencies: + '@colors/colors': 1.5.0 + dev: true + + /cliui@3.2.0: + resolution: {integrity: sha512-0yayqDxWQbqk3ojkYqUKqaAQ6AfNKeKWRNA8kR0WXzAsdHpP4BIaOmMAG87JGuO6qcobyW4GjxHd9PmhEd+T9w==} + dependencies: + string-width: 1.0.2 + strip-ansi: 3.0.1 + wrap-ansi: 2.1.0 + dev: true + + /cliui@5.0.0: + resolution: {integrity: sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==} + dependencies: + string-width: 3.1.0 + strip-ansi: 5.2.0 + wrap-ansi: 5.1.0 + dev: true + + /cliui@7.0.4: + resolution: {integrity: sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + dev: true + + /clone-response@1.0.2: + resolution: {integrity: sha512-yjLXh88P599UOyPTFX0POsd7WxnbsVsGohcwzHOLspIhhpalPw1BcqED8NblyZLKcGrL8dTgMlcaZxV2jAD41Q==} + dependencies: + mimic-response: 1.0.1 + dev: true + + /clone@2.1.2: + resolution: {integrity: sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==} + engines: {node: '>=0.8'} + dev: true + + /code-error-fragment@0.0.230: + resolution: {integrity: sha512-cadkfKp6932H8UkhzE/gcUqhRMNf8jHzkAN7+5Myabswaghu4xABTgPHDCjW+dBAJxj/SpkTYokpzDqY4pCzQw==} + engines: {node: '>= 4'} + dev: true + + /code-point-at@1.1.0: + resolution: {integrity: sha512-RpAVKQA5T63xEj6/giIbUEtZwJ4UFIc3ZtvEkiaUERylqe8xb5IvqcgOurZLahv93CLKfxcw5YI+DZcUBRyLXA==} + engines: {node: '>=0.10.0'} + dev: true + + /collection-visit@1.0.0: + resolution: {integrity: sha512-lNkKvzEeMBBjUGHZ+q6z9pSJla0KWAQPvtzhEV9+iGyQYG+pBpl7xKDhxoNSOZH2hhv0v5k0y2yAM4o4SjoSkw==} + engines: {node: '>=0.10.0'} + dependencies: + map-visit: 1.0.0 + object-visit: 1.0.1 + dev: true + + /color-convert@1.9.3: + resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} + dependencies: + color-name: 1.1.3 + dev: true + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + dev: true + + /color-name@1.1.3: + resolution: {integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==} + dev: true + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + dev: true + + /colors@1.4.0: + resolution: {integrity: sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==} + engines: {node: '>=0.1.90'} + dev: true + + /combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + dependencies: + delayed-stream: 1.0.0 + dev: true + + /command-exists@1.2.9: + resolution: {integrity: sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w==} + dev: true + + /command-line-args@4.0.7: + resolution: {integrity: sha512-aUdPvQRAyBvQd2n7jXcsMDz68ckBJELXNzBybCHOibUWEg0mWTnaYCSRU8h9R+aNRSvDihJtssSRCiDRpLaezA==} + hasBin: true + dependencies: + array-back: 2.0.0 + find-replace: 1.0.3 + typical: 2.6.1 + dev: true + + /command-line-args@5.2.1: + resolution: {integrity: sha512-H4UfQhZyakIjC74I9d34fGYDwk3XpSr17QhEd0Q3I9Xq1CETHo4Hcuo87WyWHpAF1aSLjLRf5lD9ZGX2qStUvg==} + engines: {node: '>=4.0.0'} + dependencies: + array-back: 3.1.0 + find-replace: 3.0.0 + lodash.camelcase: 4.3.0 + typical: 4.0.0 + dev: true + + /command-line-usage@6.1.3: + resolution: {integrity: sha512-sH5ZSPr+7UStsloltmDh7Ce5fb8XPlHyoPzTpyyMuYCtervL65+ubVZ6Q61cFtFl62UyJlc8/JwERRbAFPUqgw==} + engines: {node: '>=8.0.0'} + dependencies: + array-back: 4.0.2 + chalk: 2.4.2 + table-layout: 1.0.2 + typical: 5.2.0 + dev: true + + /commander@10.0.1: + resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} + engines: {node: '>=14'} + dev: true + + /commander@3.0.2: + resolution: {integrity: sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow==} + dev: true + + /compare-versions@6.1.0: + resolution: {integrity: sha512-LNZQXhqUvqUTotpZ00qLSaify3b4VFD588aRr8MKFw4CMUr98ytzCW5wDH5qx/DEY5kCDXcbcRuCqL0szEf2tg==} + dev: true + + /component-emitter@1.3.0: + resolution: {integrity: sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==} + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + dev: true + + /concat-stream@1.6.2: + resolution: {integrity: sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==} + engines: {'0': node >= 0.8} + dependencies: + buffer-from: 1.1.2 + inherits: 2.0.4 + readable-stream: 2.3.7 + typedarray: 0.0.6 + dev: true + + /config-chain@1.1.13: + resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==} + dependencies: + ini: 1.3.8 + proto-list: 1.2.4 + dev: true + + /constant-case@2.0.0: + resolution: {integrity: sha512-eS0N9WwmjTqrOmR3o83F5vW8Z+9R1HnVz3xmzT2PMFug9ly+Au/fxRWlEBSb6LcZwspSsEn9Xs1uw9YgzAg1EQ==} + dependencies: + snake-case: 2.1.0 + upper-case: 1.1.3 + dev: true + + /content-disposition@0.5.3: + resolution: {integrity: sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==} + engines: {node: '>= 0.6'} + dependencies: + safe-buffer: 5.1.2 + dev: true + + /content-hash@2.5.2: + resolution: {integrity: sha512-FvIQKy0S1JaWV10sMsA7TRx8bpU+pqPkhbsfvOJAdjRXvYxEckAwQWGwtRjiaJfh+E0DvcWUGqcdjwMGFjsSdw==} + dependencies: + cids: 0.7.5 + multicodec: 0.5.7 + multihashes: 0.4.21 + dev: true + + /content-type@1.0.4: + resolution: {integrity: sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==} + engines: {node: '>= 0.6'} + dev: true + + /convert-source-map@1.8.0: + resolution: {integrity: sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==} + dependencies: + safe-buffer: 5.1.2 + dev: true + + /cookie-signature@1.0.6: + resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} + dev: true + + /cookie@0.4.0: + resolution: {integrity: sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==} + engines: {node: '>= 0.6'} + dev: true + + /cookie@0.4.2: + resolution: {integrity: sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==} + engines: {node: '>= 0.6'} + dev: true + + /cookiejar@2.1.2: + resolution: {integrity: sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA==} + dev: true + + /copy-descriptor@0.1.1: + resolution: {integrity: sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw==} + engines: {node: '>=0.10.0'} + dev: true + + /core-js-pure@3.25.3: + resolution: {integrity: sha512-T/7qvgv70MEvRkZ8p6BasLZmOVYKzOaWNBEHAU8FmveCJkl4nko2quqPQOmy6AJIp5MBanhz9no3A94NoRb0XA==} + requiresBuild: true + dev: true + + /core-js@2.6.12: + resolution: {integrity: sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==} + deprecated: core-js@<3.23.3 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Some versions have web compatibility issues. Please, upgrade your dependencies to the actual version of core-js. + requiresBuild: true + dev: true + + /core-js@3.30.1: + resolution: {integrity: sha512-ZNS5nbiSwDTq4hFosEDqm65izl2CWmLz0hARJMyNQBgkUZMIF51cQiMvIQKA6hvuaeWxQDP3hEedM1JZIgTldQ==} + requiresBuild: true + dev: true + + /core-util-is@1.0.2: + resolution: {integrity: sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==} + dev: true + + /core-util-is@1.0.3: + resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + dev: true + + /cors@2.8.5: + resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} + engines: {node: '>= 0.10'} + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + dev: true + + /cosmiconfig@8.2.0: + resolution: {integrity: sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==} + engines: {node: '>=14'} + dependencies: + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + path-type: 4.0.0 + dev: true + + /crc-32@1.2.2: + resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==} + engines: {node: '>=0.8'} + hasBin: true + dev: true + + /create-ecdh@4.0.3: + resolution: {integrity: sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==} + dependencies: + bn.js: 4.12.0 + elliptic: 6.5.4 + dev: true + + /create-hash@1.2.0: + resolution: {integrity: sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==} + dependencies: + cipher-base: 1.0.4 + inherits: 2.0.4 + md5.js: 1.3.5 + ripemd160: 2.0.2 + sha.js: 2.4.11 + dev: true + + /create-hmac@1.1.7: + resolution: {integrity: sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==} + dependencies: + cipher-base: 1.0.4 + create-hash: 1.2.0 + inherits: 2.0.4 + ripemd160: 2.0.2 + safe-buffer: 5.2.1 + sha.js: 2.4.11 + dev: true + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + dev: true + + /cross-fetch@2.2.6: + resolution: {integrity: sha512-9JZz+vXCmfKUZ68zAptS7k4Nu8e2qcibe7WVZYps7sAgk5R8GYTc+T1WR0v1rlP9HxgARmOX1UTIJZFytajpNA==} + dependencies: + node-fetch: 2.6.7 + whatwg-fetch: 2.0.4 + transitivePeerDependencies: + - encoding + dev: true + + /cross-fetch@3.1.5: + resolution: {integrity: sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==} + dependencies: + node-fetch: 2.6.7 + transitivePeerDependencies: + - encoding + dev: true + + /cross-spawn@6.0.5: + resolution: {integrity: sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==} + engines: {node: '>=4.8'} + dependencies: + nice-try: 1.0.5 + path-key: 2.0.1 + semver: 5.7.1 + shebang-command: 1.2.0 + which: 1.3.1 + dev: true + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + dev: true + + /crypt@0.0.2: + resolution: {integrity: sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==} + dev: true + + /crypto-addr-codec@0.1.7: + resolution: {integrity: sha512-X4hzfBzNhy4mAc3UpiXEC/L0jo5E8wAa9unsnA8nNXYzXjCcGk83hfC5avJWCSGT8V91xMnAS9AKMHmjw5+XCg==} + dependencies: + base-x: 3.0.9 + big-integer: 1.6.36 + blakejs: 1.2.1 + bs58: 4.0.1 + ripemd160-min: 0.0.6 + safe-buffer: 5.2.1 + sha3: 2.1.4 + dev: true + + /crypto-browserify@3.12.0: + resolution: {integrity: sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==} + dependencies: + browserify-cipher: 1.0.1 + browserify-sign: 4.0.4 + create-ecdh: 4.0.3 + create-hash: 1.2.0 + create-hmac: 1.1.7 + diffie-hellman: 5.0.3 + inherits: 2.0.4 + pbkdf2: 3.1.2 + public-encrypt: 4.0.3 + randombytes: 2.1.0 + randomfill: 1.0.4 + dev: true + + /css-select@5.1.0: + resolution: {integrity: sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==} + dependencies: + boolbase: 1.0.0 + css-what: 6.1.0 + domhandler: 5.0.3 + domutils: 3.0.1 + nth-check: 2.1.1 + dev: true + + /css-what@6.1.0: + resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==} + engines: {node: '>= 6'} + dev: true + + /d@1.0.1: + resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} + dependencies: + es5-ext: 0.10.62 + type: 1.2.0 + dev: true + + /dashdash@1.14.1: + resolution: {integrity: sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==} + engines: {node: '>=0.10'} + dependencies: + assert-plus: 1.0.0 + dev: true + + /death@1.1.0: + resolution: {integrity: sha512-vsV6S4KVHvTGxbEcij7hkWRv0It+sGGWVOM67dQde/o5Xjnr+KmLjxWJii2uEObIrt1CcM9w0Yaovx+iOlIL+w==} + dev: true + + /debug@2.6.9: + resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.0.0 + dev: true + + /debug@3.2.6(supports-color@6.0.0): + resolution: {integrity: sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==} + deprecated: Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797) + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.3 + supports-color: 6.0.0 + dev: true + + /debug@3.2.7: + resolution: {integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.3 + dev: true + + /debug@4.3.4(supports-color@8.1.1): + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + supports-color: 8.1.1 + dev: true + + /decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + dev: true + + /decamelize@4.0.0: + resolution: {integrity: sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==} + engines: {node: '>=10'} + dev: true + + /decode-uri-component@0.2.0: + resolution: {integrity: sha512-hjf+xovcEn31w/EUYdTXQh/8smFL/dzYjohQGEIgjyNavaJfBY2p5F527Bo1VPATxv0VYTUC2bOcXvqFwk78Og==} + engines: {node: '>=0.10'} + dev: true + + /decompress-response@3.3.0: + resolution: {integrity: sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==} + engines: {node: '>=4'} + dependencies: + mimic-response: 1.0.1 + dev: true + + /decompress-response@6.0.0: + resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} + engines: {node: '>=10'} + dependencies: + mimic-response: 3.1.0 + dev: true + + /deep-eql@4.1.3: + resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} + engines: {node: '>=6'} + dependencies: + type-detect: 4.0.8 + + /deep-equal-in-any-order@2.0.6: + resolution: {integrity: sha512-RfnWHQzph10YrUjvWwhd15Dne8ciSJcZ3U6OD7owPwiVwsdE5IFSoZGg8rlwJD11ES+9H5y8j3fCofviRHOqLQ==} + dependencies: + lodash.mapvalues: 4.6.0 + sort-any: 2.0.0 + dev: true + + /deep-equal@1.1.1: + resolution: {integrity: sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==} + dependencies: + is-arguments: 1.0.4 + is-date-object: 1.0.2 + is-regex: 1.1.4 + object-is: 1.1.5 + object-keys: 1.1.1 + regexp.prototype.flags: 1.5.1 + dev: true + + /deep-extend@0.6.0: + resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} + engines: {node: '>=4.0.0'} + dev: true + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + dev: true + + /defer-to-connect@1.1.1: + resolution: {integrity: sha512-J7thop4u3mRTkYRQ+Vpfwy2G5Ehoy82I14+14W4YMDLKdWloI9gSzRbV30s/NckQGVJtPkWNcW4oMAUigTdqiQ==} + dev: true + + /defer-to-connect@2.0.1: + resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} + engines: {node: '>=10'} + dev: true + + /deferred-leveldown@1.2.2: + resolution: {integrity: sha512-uukrWD2bguRtXilKt6cAWKyoXrTSMo5m7crUdLfWQmu8kIm88w3QZoUL+6nhpfKVmhHANER6Re3sKoNoZ3IKMA==} + dependencies: + abstract-leveldown: 2.6.3 + dev: true + + /deferred-leveldown@4.0.2: + resolution: {integrity: sha512-5fMC8ek8alH16QiV0lTCis610D1Zt1+LA4MS4d63JgS32lrCjTFDUFz2ao09/j2I4Bqb5jL4FZYwu7Jz0XO1ww==} + engines: {node: '>=6'} + dependencies: + abstract-leveldown: 5.0.0 + inherits: 2.0.4 + dev: true + + /define-data-property@1.1.1: + resolution: {integrity: sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.2.2 + gopd: 1.0.1 + has-property-descriptors: 1.0.0 + dev: true + + /define-properties@1.1.4: + resolution: {integrity: sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==} + engines: {node: '>= 0.4'} + dependencies: + has-property-descriptors: 1.0.0 + object-keys: 1.1.1 + dev: true + + /define-properties@1.2.1: + resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} + engines: {node: '>= 0.4'} + dependencies: + define-data-property: 1.1.1 + has-property-descriptors: 1.0.0 + object-keys: 1.1.1 + dev: true + + /define-property@0.2.5: + resolution: {integrity: sha512-Rr7ADjQZenceVOAKop6ALkkRAmH1A4Gx9hV/7ZujPUN2rkATqFO0JZLZInbAjpZYoJ1gUx8MRMQVkYemcbMSTA==} + engines: {node: '>=0.10.0'} + dependencies: + is-descriptor: 0.1.6 + dev: true + + /define-property@1.0.0: + resolution: {integrity: sha512-cZTYKFWspt9jZsMscWo8sc/5lbPC9Q0N5nBLgb+Yd915iL3udB1uFgS3B8YCx66UVHq018DAVFoee7x+gxggeA==} + engines: {node: '>=0.10.0'} + dependencies: + is-descriptor: 1.0.2 + dev: true + + /define-property@2.0.2: + resolution: {integrity: sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==} + engines: {node: '>=0.10.0'} + dependencies: + is-descriptor: 1.0.2 + isobject: 3.0.1 + dev: true + + /defined@1.0.0: + resolution: {integrity: sha512-Y2caI5+ZwS5c3RiNDJ6u53VhQHv+hHKwhkI1iHvceKUHw9Df6EK2zRLfjejRgMuCuxK7PfSWIMwWecceVvThjQ==} + dev: true + + /delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + dev: true + + /delete-empty@3.0.0: + resolution: {integrity: sha512-ZUyiwo76W+DYnKsL3Kim6M/UOavPdBJgDYWOmuQhYaZvJH0AXAHbUNyEDtRbBra8wqqr686+63/0azfEk1ebUQ==} + engines: {node: '>=10'} + hasBin: true + dependencies: + ansi-colors: 4.1.3 + minimist: 1.2.8 + path-starts-with: 2.0.1 + rimraf: 2.7.1 + dev: true + + /depd@1.1.2: + resolution: {integrity: sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==} + engines: {node: '>= 0.6'} + dev: true + + /depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + dev: true + + /des.js@1.0.1: + resolution: {integrity: sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==} + dependencies: + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + dev: true + + /destroy@1.0.4: + resolution: {integrity: sha512-3NdhDuEXnfun/z7x9GOElY49LoqVHoGScmOKwmxhsS8N5Y+Z8KyPPDnaSzqWgYt/ji4mqwfTS34Htrk0zPIXVg==} + dev: true + + /detect-indent@4.0.0: + resolution: {integrity: sha512-BDKtmHlOzwI7iRuEkhzsnPoi5ypEhWAJB5RvHWe1kMr06js3uK5B3734i3ui5Yd+wOJV1cpE4JnivPD283GU/A==} + engines: {node: '>=0.10.0'} + dependencies: + repeating: 2.0.1 + dev: true + + /detect-indent@5.0.0: + resolution: {integrity: sha512-rlpvsxUtM0PQvy9iZe640/IWwWYyBsTApREbA1pHOpmOUIl9MkP/U4z7vTtg4Oaojvqhxt7sdufnT0EzGaR31g==} + engines: {node: '>=4'} + dev: true + + /detect-port@1.3.0: + resolution: {integrity: sha512-E+B1gzkl2gqxt1IhUzwjrxBKRqx1UzC3WLONHinn8S3T6lwV/agVCyitiFOsGJ/eYuEUBvD71MZHy3Pv1G9doQ==} + engines: {node: '>= 4.2.1'} + hasBin: true + dependencies: + address: 1.1.2 + debug: 2.6.9 + transitivePeerDependencies: + - supports-color + dev: true + + /diff@3.5.0: + resolution: {integrity: sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==} + engines: {node: '>=0.3.1'} + dev: true + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + dev: true + + /diff@5.0.0: + resolution: {integrity: sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==} + engines: {node: '>=0.3.1'} + dev: true + + /diffie-hellman@5.0.3: + resolution: {integrity: sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==} + dependencies: + bn.js: 4.12.0 + miller-rabin: 4.0.1 + randombytes: 2.1.0 + dev: true + + /difflib@0.2.4: + resolution: {integrity: sha512-9YVwmMb0wQHQNr5J9m6BSj6fk4pfGITGQOOs+D9Fl+INODWFOfvhIU1hNv6GgR1RBoC/9NJcwu77zShxV0kT7w==} + dependencies: + heap: 0.2.6 + dev: true + + /dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + dependencies: + path-type: 4.0.0 + dev: true + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 + dev: true + + /dom-serializer@2.0.0: + resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + entities: 4.4.0 + dev: true + + /dom-walk@0.1.1: + resolution: {integrity: sha512-8CGZnLAdYN/o0SHjlP3nLvliHpi2f/prVU63/Hc4DTDpBgsNVAJekegjFtxfZ7NTUEDzHUByjX1gT3eYakIKqg==} + dev: true + + /domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + dev: true + + /domhandler@5.0.3: + resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} + engines: {node: '>= 4'} + dependencies: + domelementtype: 2.3.0 + dev: true + + /domutils@3.0.1: + resolution: {integrity: sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==} + dependencies: + dom-serializer: 2.0.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + dev: true + + /dot-case@2.1.1: + resolution: {integrity: sha512-HnM6ZlFqcajLsyudHq7LeeLDr2rFAVYtDv/hV5qchQEidSck8j9OPUsXY9KwJv/lHMtYlX4DjRQqwFYa+0r8Ug==} + dependencies: + no-case: 2.3.2 + dev: true + + /dotignore@0.1.2: + resolution: {integrity: sha512-UGGGWfSauusaVJC+8fgV+NVvBXkCTmVv7sk6nojDZZvuOUNGUy0Zk4UpHQD6EDjS0jpBwcACvH4eofvyzBcRDw==} + hasBin: true + dependencies: + minimatch: 3.1.2 + dev: true + + /drbg.js@1.0.1: + resolution: {integrity: sha512-F4wZ06PvqxYLFEZKkFxTDcns9oFNk34hvmJSEwdzsxVQ8YI5YaxtACgQatkYgv2VI2CFkUd2Y+xosPQnHv809g==} + engines: {node: '>=0.10'} + dependencies: + browserify-aes: 1.2.0 + create-hash: 1.2.0 + create-hmac: 1.1.7 + dev: true + + /duplexer3@0.1.4: + resolution: {integrity: sha512-CEj8FwwNA4cVH2uFCoHUrmojhYh1vmCdOaneKJXwkeY1i9jnlslVo9dx+hQ5Hl9GnH/Bwy/IjxAyOePyPKYnzA==} + dev: true + + /ecc-jsbn@0.1.2: + resolution: {integrity: sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==} + dependencies: + jsbn: 0.1.1 + safer-buffer: 2.1.2 + dev: true + + /ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + dev: true + + /electron-to-chromium@1.4.270: + resolution: {integrity: sha512-KNhIzgLiJmDDC444dj9vEOpZEgsV96ult9Iff98Vanumn+ShJHd5se8aX6KeVxdc0YQeqdrezBZv89rleDbvSg==} + dev: true + + /elliptic@6.5.4: + resolution: {integrity: sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==} + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + hash.js: 1.1.7 + hmac-drbg: 1.0.1 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + + /emoji-regex@7.0.3: + resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} + dev: true + + /emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + dev: true + + /encodeurl@1.0.2: + resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} + engines: {node: '>= 0.8'} + dev: true + + /encoding-down@5.0.4: + resolution: {integrity: sha512-8CIZLDcSKxgzT+zX8ZVfgNbu8Md2wq/iqa1Y7zyVR18QBEAc0Nmzuvj/N5ykSKpfGzjM8qxbaFntLPwnVoUhZw==} + engines: {node: '>=6'} + dependencies: + abstract-leveldown: 5.0.0 + inherits: 2.0.4 + level-codec: 9.0.2 + level-errors: 2.0.1 + xtend: 4.0.2 + dev: true + + /encoding@0.1.13: + resolution: {integrity: sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==} + dependencies: + iconv-lite: 0.6.3 + dev: true + + /end-of-stream@1.4.4: + resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} + dependencies: + once: 1.4.0 + dev: true + + /enquirer@2.3.6: + resolution: {integrity: sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==} + engines: {node: '>=8.6'} + dependencies: + ansi-colors: 4.1.3 + dev: true + + /entities@4.4.0: + resolution: {integrity: sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==} + engines: {node: '>=0.12'} + dev: true + + /env-paths@2.2.1: + resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} + engines: {node: '>=6'} + dev: true + + /errno@0.1.8: + resolution: {integrity: sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==} + hasBin: true + dependencies: + prr: 1.0.1 + dev: true + + /error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + dependencies: + is-arrayish: 0.2.1 + dev: true + + /es-abstract@1.20.3: + resolution: {integrity: sha512-AyrnaKVpMzljIdwjzrj+LxGmj8ik2LckwXacHqrJJ/jxz6dDDBcZ7I7nlHM0FvEW8MfbWJwOd+yT2XzYW49Frw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + es-to-primitive: 1.2.1 + function-bind: 1.1.1 + function.prototype.name: 1.1.5 + get-intrinsic: 1.1.3 + get-symbol-description: 1.0.0 + has: 1.0.3 + has-property-descriptors: 1.0.0 + has-symbols: 1.0.3 + internal-slot: 1.0.3 + is-callable: 1.2.7 + is-negative-zero: 2.0.2 + is-regex: 1.1.4 + is-shared-array-buffer: 1.0.2 + is-string: 1.0.7 + is-weakref: 1.0.2 + object-inspect: 1.12.2 + object-keys: 1.1.1 + object.assign: 4.1.4 + regexp.prototype.flags: 1.4.3 + safe-regex-test: 1.0.0 + string.prototype.trimend: 1.0.5 + string.prototype.trimstart: 1.0.5 + unbox-primitive: 1.0.2 + dev: true + + /es-abstract@1.22.3: + resolution: {integrity: sha512-eiiY8HQeYfYH2Con2berK+To6GrK2RxbPawDkGq4UiCQQfZHb6wX9qQqkbpPqaxQFcl8d9QzZqo0tGE0VcrdwA==} + engines: {node: '>= 0.4'} + dependencies: + array-buffer-byte-length: 1.0.0 + arraybuffer.prototype.slice: 1.0.2 + available-typed-arrays: 1.0.5 + call-bind: 1.0.5 + es-set-tostringtag: 2.0.2 + es-to-primitive: 1.2.1 + function.prototype.name: 1.1.6 + get-intrinsic: 1.2.2 + get-symbol-description: 1.0.0 + globalthis: 1.0.3 + gopd: 1.0.1 + has-property-descriptors: 1.0.0 + has-proto: 1.0.1 + has-symbols: 1.0.3 + hasown: 2.0.0 + internal-slot: 1.0.6 + is-array-buffer: 3.0.2 + is-callable: 1.2.7 + is-negative-zero: 2.0.2 + is-regex: 1.1.4 + is-shared-array-buffer: 1.0.2 + is-string: 1.0.7 + is-typed-array: 1.1.12 + is-weakref: 1.0.2 + object-inspect: 1.13.1 + object-keys: 1.1.1 + object.assign: 4.1.4 + regexp.prototype.flags: 1.5.1 + safe-array-concat: 1.0.1 + safe-regex-test: 1.0.0 + string.prototype.trim: 1.2.8 + string.prototype.trimend: 1.0.7 + string.prototype.trimstart: 1.0.7 + typed-array-buffer: 1.0.0 + typed-array-byte-length: 1.0.0 + typed-array-byte-offset: 1.0.0 + typed-array-length: 1.0.4 + unbox-primitive: 1.0.2 + which-typed-array: 1.1.13 + dev: true + + /es-array-method-boxes-properly@1.0.0: + resolution: {integrity: sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==} + dev: true + + /es-set-tostringtag@2.0.2: + resolution: {integrity: sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.2.2 + has-tostringtag: 1.0.0 + hasown: 2.0.0 + dev: true + + /es-shim-unscopables@1.0.2: + resolution: {integrity: sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==} + dependencies: + hasown: 2.0.0 + dev: true + + /es-to-primitive@1.2.1: + resolution: {integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==} + engines: {node: '>= 0.4'} + dependencies: + is-callable: 1.2.7 + is-date-object: 1.0.2 + is-symbol: 1.0.3 + dev: true + + /es5-ext@0.10.62: + resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + engines: {node: '>=0.10'} + requiresBuild: true + dependencies: + es6-iterator: 2.0.3 + es6-symbol: 3.1.3 + next-tick: 1.1.0 + dev: true + + /es6-iterator@2.0.3: + resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.62 + es6-symbol: 3.1.3 + dev: true + + /es6-promise@4.2.8: + resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} + dev: true + + /es6-symbol@3.1.3: + resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} + dependencies: + d: 1.0.1 + ext: 1.4.0 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: '>=6'} + dev: true + + /escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + dev: true + + /escape-string-regexp@1.0.5: + resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==} + engines: {node: '>=0.8.0'} + dev: true + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + dev: true + + /escodegen@1.8.1: + resolution: {integrity: sha512-yhi5S+mNTOuRvyW4gWlg5W1byMaQGWWSYHXsuFZ7GBo7tpyOwi2EdzMP/QWxh9hwkD2m+wDVHJsxhRIj+v/b/A==} + engines: {node: '>=0.12.0'} + hasBin: true + dependencies: + esprima: 2.7.3 + estraverse: 1.9.3 + esutils: 2.0.3 + optionator: 0.8.3 + optionalDependencies: + source-map: 0.2.0 + dev: true + + /eslint-config-prettier@9.1.0(eslint@8.56.0): + resolution: {integrity: sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + dependencies: + eslint: 8.56.0 + dev: true + + /eslint-plugin-prettier@5.1.3(eslint-config-prettier@9.1.0)(eslint@8.56.0)(prettier@3.2.5): + resolution: {integrity: sha512-C9GCVAs4Eq7ZC/XFQHITLiHJxQngdtraXaM+LoUFoFp/lHNl2Zn8f3WQbe9HvTBBQ9YnKFB0/2Ajdqwo5D1EAw==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + '@types/eslint': '>=8.0.0' + eslint: '>=8.0.0' + eslint-config-prettier: '*' + prettier: '>=3.0.0' + peerDependenciesMeta: + '@types/eslint': + optional: true + eslint-config-prettier: + optional: true + dependencies: + eslint: 8.56.0 + eslint-config-prettier: 9.1.0(eslint@8.56.0) + prettier: 3.2.5 + prettier-linter-helpers: 1.0.0 + synckit: 0.8.8 + dev: true + + /eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: true + + /eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /eslint@8.56.0: + resolution: {integrity: sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.56.0) + '@eslint-community/regexpp': 4.9.1 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.56.0 + '@humanwhocodes/config-array': 0.11.13 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.2.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4(supports-color@8.1.1) + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.20.0 + graphemer: 1.4.0 + ignore: 5.2.4 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.3 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.10.0 + acorn-jsx: 5.3.2(acorn@8.10.0) + eslint-visitor-keys: 3.4.3 + dev: true + + /esprima@2.7.3: + resolution: {integrity: sha512-OarPfz0lFCiW4/AV2Oy1Rp9qu0iusTKqykwTspGCZtPxmF81JR4MmIebvF1F9+UOKth2ZubLQ4XGGaU+hSn99A==} + engines: {node: '>=0.10.0'} + hasBin: true + dev: true + + /esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: '>=0.10'} + dependencies: + estraverse: 5.3.0 + dev: true + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + dependencies: + estraverse: 5.3.0 + dev: true + + /estraverse@1.9.3: + resolution: {integrity: sha512-25w1fMXQrGdoquWnScXZGckOv+Wes+JDnuN/+7ex3SauFRS72r2lFDec0EKPt2YD1wUJ/IrfEex+9yp4hfSOJA==} + engines: {node: '>=0.10.0'} + dev: true + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + dev: true + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + dev: true + + /etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + dev: true + + /eth-block-tracker@3.0.1: + resolution: {integrity: sha512-WUVxWLuhMmsfenfZvFO5sbl1qFY2IqUlw/FPVmjjdElpqLsZtSG+wPe9Dz7W/sB6e80HgFKknOmKk2eNlznHug==} + dependencies: + eth-query: 2.1.2 + ethereumjs-tx: 1.3.7 + ethereumjs-util: 5.2.1 + ethjs-util: 0.1.6 + json-rpc-engine: 3.8.0 + pify: 2.3.0 + tape: 4.16.1 + transitivePeerDependencies: + - supports-color + dev: true + + /eth-ens-namehash@2.0.8: + resolution: {integrity: sha512-VWEI1+KJfz4Km//dadyvBBoBeSQ0MHTXPvr8UIXiLW6IanxvAV+DmlZAijZwAyggqGUfwQBeHf7tc9wzc1piSw==} + dependencies: + idna-uts46-hx: 2.3.1 + js-sha3: 0.5.7 + dev: true + + /eth-gas-reporter@0.2.25: + resolution: {integrity: sha512-1fRgyE4xUB8SoqLgN3eDfpDfwEfRxh2Sz1b7wzFbyQA+9TekMmvSjjoRu9SKcSVyK+vLkLIsVbJDsTWjw195OQ==} + peerDependencies: + '@codechecks/client': ^0.1.0 + peerDependenciesMeta: + '@codechecks/client': + optional: true + dependencies: + '@ethersproject/abi': 5.7.0 + '@solidity-parser/parser': 0.14.3 + cli-table3: 0.5.1 + colors: 1.4.0 + ethereum-cryptography: 1.1.2 + ethers: 4.0.49 + fs-readdir-recursive: 1.1.0 + lodash: 4.17.21 + markdown-table: 1.1.3 + mocha: 7.2.0 + req-cwd: 2.0.0 + request: 2.88.2 + request-promise-native: 1.0.9(request@2.88.2) + sha1: 1.1.1 + sync-request: 6.1.0 + dev: true + + /eth-json-rpc-infura@3.2.1: + resolution: {integrity: sha512-W7zR4DZvyTn23Bxc0EWsq4XGDdD63+XPUCEhV2zQvQGavDVC4ZpFDK4k99qN7bd7/fjj37+rxmuBOBeIqCA5Mw==} + deprecated: Package no longer supported. Contact Support at https://www.npmjs.com/support for more info. + dependencies: + cross-fetch: 2.2.6 + eth-json-rpc-middleware: 1.6.0 + json-rpc-engine: 3.8.0 + json-rpc-error: 2.0.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /eth-json-rpc-middleware@1.6.0: + resolution: {integrity: sha512-tDVCTlrUvdqHKqivYMjtFZsdD7TtpNLBCfKAcOpaVs7orBMS/A8HWro6dIzNtTZIR05FAbJ3bioFOnZpuCew9Q==} + dependencies: + async: 2.6.3 + eth-query: 2.1.2 + eth-tx-summary: 3.2.4 + ethereumjs-block: 1.7.1 + ethereumjs-tx: 1.3.7 + ethereumjs-util: 5.2.1 + ethereumjs-vm: 2.6.0 + fetch-ponyfill: 4.1.0 + json-rpc-engine: 3.8.0 + json-rpc-error: 2.0.0 + json-stable-stringify: 1.0.1 + promise-to-callback: 1.0.0 + tape: 4.16.1 + transitivePeerDependencies: + - supports-color + dev: true + + /eth-lib@0.1.29: + resolution: {integrity: sha512-bfttrr3/7gG4E02HoWTDUcDDslN003OlOoBxk9virpAZQ1ja/jDgwkWB8QfJF7ojuEowrqy+lzp9VcJG7/k5bQ==} + dependencies: + bn.js: 4.12.0 + elliptic: 6.5.4 + nano-json-stream-parser: 0.1.2 + servify: 0.1.12 + ws: 3.3.3 + xhr-request-promise: 0.1.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /eth-lib@0.2.8: + resolution: {integrity: sha512-ArJ7x1WcWOlSpzdoTBX8vkwlkSQ85CjjifSZtV4co64vWxSV8geWfPI9x4SVYu3DSxnX4yWFVTtGL+j9DUFLNw==} + dependencies: + bn.js: 4.12.0 + elliptic: 6.5.4 + xhr-request-promise: 0.1.2 + dev: true + + /eth-query@2.1.2: + resolution: {integrity: sha512-srES0ZcvwkR/wd5OQBRA1bIJMww1skfGS0s8wlwK3/oNP4+wnds60krvu5R1QbpRQjMmpG5OMIWro5s7gvDPsA==} + dependencies: + json-rpc-random-id: 1.0.1 + xtend: 4.0.2 + dev: true + + /eth-sig-util@1.4.2: + resolution: {integrity: sha512-iNZ576iTOGcfllftB73cPB5AN+XUQAT/T8xzsILsghXC1o8gJUqe3RHlcDqagu+biFpYQ61KQrZZJza8eRSYqw==} + deprecated: Deprecated in favor of '@metamask/eth-sig-util' + dependencies: + ethereumjs-abi: github.com/ethereumjs/ethereumjs-abi/ee3994657fa7a427238e6ba92a84d0b529bbcde0 + ethereumjs-util: 5.2.1 + dev: true + + /eth-sig-util@3.0.0: + resolution: {integrity: sha512-4eFkMOhpGbTxBQ3AMzVf0haUX2uTur7DpWiHzWyTURa28BVJJtOkcb9Ok5TV0YvEPG61DODPW7ZUATbJTslioQ==} + deprecated: Deprecated in favor of '@metamask/eth-sig-util' + dependencies: + buffer: 5.7.1 + elliptic: 6.5.4 + ethereumjs-abi: 0.6.5 + ethereumjs-util: 5.2.1 + tweetnacl: 1.0.3 + tweetnacl-util: 0.15.1 + dev: true + + /eth-tx-summary@3.2.4: + resolution: {integrity: sha512-NtlDnaVZah146Rm8HMRUNMgIwG/ED4jiqk0TME9zFheMl1jOp6jL1m0NKGjJwehXQ6ZKCPr16MTr+qspKpEXNg==} + dependencies: + async: 2.6.3 + clone: 2.1.2 + concat-stream: 1.6.2 + end-of-stream: 1.4.4 + eth-query: 2.1.2 + ethereumjs-block: 1.7.1 + ethereumjs-tx: 1.3.7 + ethereumjs-util: 5.2.1 + ethereumjs-vm: 2.6.0 + through2: 2.0.5 + dev: true + + /ethashjs@0.0.8: + resolution: {integrity: sha512-/MSbf/r2/Ld8o0l15AymjOTlPqpN8Cr4ByUEA9GtR4x0yAh3TdtDzEg29zMjXCNPI7u6E5fOQdj/Cf9Tc7oVNw==} + deprecated: 'New package name format for new versions: @ethereumjs/ethash. Please update.' + dependencies: + async: 2.6.3 + buffer-xor: 2.0.2 + ethereumjs-util: 7.1.5 + miller-rabin: 4.0.1 + dev: true + + /ethereum-bloom-filters@1.0.10: + resolution: {integrity: sha512-rxJ5OFN3RwjQxDcFP2Z5+Q9ho4eIdEmSc2ht0fCu8Se9nbXjZ7/031uXoUYJ87KHCOdVeiUuwSnoS7hmYAGVHA==} + dependencies: + js-sha3: 0.8.0 + dev: true + + /ethereum-common@0.0.18: + resolution: {integrity: sha512-EoltVQTRNg2Uy4o84qpa2aXymXDJhxm7eos/ACOg0DG4baAbMjhbdAEsx9GeE8sC3XCxnYvrrzZDH8D8MtA2iQ==} + dev: true + + /ethereum-common@0.2.0: + resolution: {integrity: sha512-XOnAR/3rntJgbCdGhqdaLIxDLWKLmsZOGhHdBKadEr6gEnJLH52k93Ou+TUdFaPN3hJc3isBZBal3U/XZ15abA==} + dev: true + + /ethereum-cryptography@0.1.3: + resolution: {integrity: sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ==} + dependencies: + '@types/pbkdf2': 3.1.0 + '@types/secp256k1': 4.0.3 + blakejs: 1.2.1 + browserify-aes: 1.2.0 + bs58check: 2.1.2 + create-hash: 1.2.0 + create-hmac: 1.1.7 + hash.js: 1.1.7 + keccak: 3.0.2 + pbkdf2: 3.1.2 + randombytes: 2.1.0 + safe-buffer: 5.2.1 + scrypt-js: 3.0.1 + secp256k1: 4.0.3 + setimmediate: 1.0.5 + dev: true + + /ethereum-cryptography@1.1.2: + resolution: {integrity: sha512-XDSJlg4BD+hq9N2FjvotwUET9Tfxpxc3kWGE2AqUG5vcbeunnbImVk3cj6e/xT3phdW21mE8R5IugU4fspQDcQ==} + dependencies: + '@noble/hashes': 1.1.2 + '@noble/secp256k1': 1.6.3 + '@scure/bip32': 1.1.0 + '@scure/bip39': 1.1.0 + dev: true + + /ethereum-waffle@3.4.4(typescript@5.3.3): + resolution: {integrity: sha512-PA9+jCjw4WC3Oc5ocSMBj5sXvueWQeAbvCA+hUlb6oFgwwKyq5ka3bWQ7QZcjzIX+TdFkxP4IbFmoY2D8Dkj9Q==} + engines: {node: '>=10.0'} + hasBin: true + dependencies: + '@ethereum-waffle/chai': 3.4.4 + '@ethereum-waffle/compiler': 3.4.4(typescript@5.3.3) + '@ethereum-waffle/mock-contract': 3.4.4 + '@ethereum-waffle/provider': 3.4.4 + ethers: 5.7.2 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - typescript + - utf-8-validate + dev: true + + /ethereumjs-abi@0.6.5: + resolution: {integrity: sha512-rCjJZ/AE96c/AAZc6O3kaog4FhOsAViaysBxqJNy2+LHP0ttH0zkZ7nXdVHOAyt6lFwLO0nlCwWszysG/ao1+g==} + dependencies: + bn.js: 4.12.0 + ethereumjs-util: 4.5.0 + dev: true + + /ethereumjs-abi@0.6.8: + resolution: {integrity: sha512-Tx0r/iXI6r+lRsdvkFDlut0N08jWMnKRZ6Gkq+Nmw75lZe4e6o3EkSnkaBP5NF6+m5PTGAr9JP43N3LyeoglsA==} + dependencies: + bn.js: 4.12.0 + ethereumjs-util: 6.2.1 + dev: true + + /ethereumjs-account@2.0.5: + resolution: {integrity: sha512-bgDojnXGjhMwo6eXQC0bY6UK2liSFUSMwwylOmQvZbSl/D7NXQ3+vrGO46ZeOgjGfxXmgIeVNDIiHw7fNZM4VA==} + dependencies: + ethereumjs-util: 5.2.1 + rlp: 2.2.7 + safe-buffer: 5.2.1 + dev: true + + /ethereumjs-account@3.0.0: + resolution: {integrity: sha512-WP6BdscjiiPkQfF9PVfMcwx/rDvfZTjFKY0Uwc09zSQr9JfIVH87dYIJu0gNhBhpmovV4yq295fdllS925fnBA==} + deprecated: Please use Util.Account class found on package ethereumjs-util@^7.0.6 https://github.com/ethereumjs/ethereumjs-util/releases/tag/v7.0.6 + dependencies: + ethereumjs-util: 6.2.1 + rlp: 2.2.7 + safe-buffer: 5.2.1 + dev: true + + /ethereumjs-block@1.7.1: + resolution: {integrity: sha512-B+sSdtqm78fmKkBq78/QLKJbu/4Ts4P2KFISdgcuZUPDm9x+N7qgBPIIFUGbaakQh8bzuquiRVbdmvPKqbILRg==} + deprecated: 'New package name format for new versions: @ethereumjs/block. Please update.' + dependencies: + async: 2.6.3 + ethereum-common: 0.2.0 + ethereumjs-tx: 1.3.7 + ethereumjs-util: 5.2.1 + merkle-patricia-tree: 2.3.2 + dev: true + + /ethereumjs-block@2.2.2: + resolution: {integrity: sha512-2p49ifhek3h2zeg/+da6XpdFR3GlqY3BIEiqxGF8j9aSRIgkb7M1Ky+yULBKJOu8PAZxfhsYA+HxUk2aCQp3vg==} + deprecated: 'New package name format for new versions: @ethereumjs/block. Please update.' + dependencies: + async: 2.6.3 + ethereumjs-common: 1.5.0 + ethereumjs-tx: 2.1.2 + ethereumjs-util: 5.2.1 + merkle-patricia-tree: 2.3.2 + dev: true + + /ethereumjs-blockchain@4.0.4: + resolution: {integrity: sha512-zCxaRMUOzzjvX78DTGiKjA+4h2/sF0OYL1QuPux0DHpyq8XiNoF5GYHtb++GUxVlMsMfZV7AVyzbtgcRdIcEPQ==} + deprecated: 'New package name format for new versions: @ethereumjs/blockchain. Please update.' + dependencies: + async: 2.6.3 + ethashjs: 0.0.8 + ethereumjs-block: 2.2.2 + ethereumjs-common: 1.5.0 + ethereumjs-util: 6.2.1 + flow-stoplight: 1.0.0 + level-mem: 3.0.1 + lru-cache: 5.1.1 + rlp: 2.2.7 + semaphore: 1.1.0 + dev: true + + /ethereumjs-common@1.5.0: + resolution: {integrity: sha512-SZOjgK1356hIY7MRj3/ma5qtfr/4B5BL+G4rP/XSMYr2z1H5el4RX5GReYCKmQmYI/nSBmRnwrZ17IfHuG0viQ==} + deprecated: 'New package name format for new versions: @ethereumjs/common. Please update.' + dev: true + + /ethereumjs-tx@1.3.7: + resolution: {integrity: sha512-wvLMxzt1RPhAQ9Yi3/HKZTn0FZYpnsmQdbKYfUUpi4j1SEIcbkd9tndVjcPrufY3V7j2IebOpC00Zp2P/Ay2kA==} + deprecated: 'New package name format for new versions: @ethereumjs/tx. Please update.' + dependencies: + ethereum-common: 0.0.18 + ethereumjs-util: 5.2.1 + dev: true + + /ethereumjs-tx@2.1.2: + resolution: {integrity: sha512-zZEK1onCeiORb0wyCXUvg94Ve5It/K6GD1K+26KfFKodiBiS6d9lfCXlUKGBBdQ+bv7Day+JK0tj1K+BeNFRAw==} + deprecated: 'New package name format for new versions: @ethereumjs/tx. Please update.' + dependencies: + ethereumjs-common: 1.5.0 + ethereumjs-util: 6.2.1 + dev: true + + /ethereumjs-util@4.5.0: + resolution: {integrity: sha512-gT1zBY8aQKkexYu7XNeBZBnJsRLo+sWD1XWRLJOaDSz49/9kCOs6ERP52Bw/TA4uaVFKpM+O8ebWy44Ib5B6xw==} + dependencies: + bn.js: 4.12.0 + create-hash: 1.2.0 + keccakjs: 0.2.3 + rlp: 2.2.7 + secp256k1: 3.7.1 + dev: true + + /ethereumjs-util@5.2.1: + resolution: {integrity: sha512-v3kT+7zdyCm1HIqWlLNrHGqHGLpGYIhjeHxQjnDXjLT2FyGJDsd3LWMYUo7pAFRrk86CR3nUJfhC81CCoJNNGQ==} + dependencies: + bn.js: 4.12.0 + create-hash: 1.2.0 + elliptic: 6.5.4 + ethereum-cryptography: 0.1.3 + ethjs-util: 0.1.6 + rlp: 2.2.7 + safe-buffer: 5.2.1 + dev: true + + /ethereumjs-util@6.2.1: + resolution: {integrity: sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw==} + dependencies: + '@types/bn.js': 4.11.6 + bn.js: 4.12.0 + create-hash: 1.2.0 + elliptic: 6.5.4 + ethereum-cryptography: 0.1.3 + ethjs-util: 0.1.6 + rlp: 2.2.7 + dev: true + + /ethereumjs-util@7.1.5: + resolution: {integrity: sha512-SDl5kKrQAudFBUe5OJM9Ac6WmMyYmXX/6sTmLZ3ffG2eY6ZIGBes3pEDxNN6V72WyOw4CPD5RomKdsa8DAAwLg==} + engines: {node: '>=10.0.0'} + dependencies: + '@types/bn.js': 5.1.1 + bn.js: 5.2.1 + create-hash: 1.2.0 + ethereum-cryptography: 0.1.3 + rlp: 2.2.7 + dev: true + + /ethereumjs-vm@2.6.0: + resolution: {integrity: sha512-r/XIUik/ynGbxS3y+mvGnbOKnuLo40V5Mj1J25+HEO63aWYREIqvWeRO/hnROlMBE5WoniQmPmhiaN0ctiHaXw==} + deprecated: 'New package name format for new versions: @ethereumjs/vm. Please update.' + dependencies: + async: 2.6.3 + async-eventemitter: 0.2.4 + ethereumjs-account: 2.0.5 + ethereumjs-block: 2.2.2 + ethereumjs-common: 1.5.0 + ethereumjs-util: 6.2.1 + fake-merkle-patricia-tree: 1.0.1 + functional-red-black-tree: 1.0.1 + merkle-patricia-tree: 2.3.2 + rustbn.js: 0.2.0 + safe-buffer: 5.2.1 + dev: true + + /ethereumjs-vm@4.2.0: + resolution: {integrity: sha512-X6qqZbsY33p5FTuZqCnQ4+lo957iUJMM6Mpa6bL4UW0dxM6WmDSHuI4j/zOp1E2TDKImBGCJA9QPfc08PaNubA==} + deprecated: 'New package name format for new versions: @ethereumjs/vm. Please update.' + dependencies: + async: 2.6.3 + async-eventemitter: 0.2.4 + core-js-pure: 3.25.3 + ethereumjs-account: 3.0.0 + ethereumjs-block: 2.2.2 + ethereumjs-blockchain: 4.0.4 + ethereumjs-common: 1.5.0 + ethereumjs-tx: 2.1.2 + ethereumjs-util: 6.2.1 + fake-merkle-patricia-tree: 1.0.1 + functional-red-black-tree: 1.0.1 + merkle-patricia-tree: 2.3.2 + rustbn.js: 0.2.0 + safe-buffer: 5.2.1 + util.promisify: 1.1.1 + dev: true + + /ethereumjs-wallet@0.6.5: + resolution: {integrity: sha512-MDwjwB9VQVnpp/Dc1XzA6J1a3wgHQ4hSvA1uWNatdpOrtCbPVuQSKSyRnjLvS0a+KKMw2pvQ9Ybqpb3+eW8oNA==} + requiresBuild: true + dependencies: + aes-js: 3.1.2 + bs58check: 2.1.2 + ethereum-cryptography: 0.1.3 + ethereumjs-util: 6.2.1 + randombytes: 2.1.0 + safe-buffer: 5.2.1 + scryptsy: 1.2.1 + utf8: 3.0.0 + uuid: 3.4.0 + dev: true + optional: true + + /ethers@4.0.49: + resolution: {integrity: sha512-kPltTvWiyu+OktYy1IStSO16i2e7cS9D9OxZ81q2UUaiNPVrm/RTcbxamCXF9VUSKzJIdJV68EAIhTEVBalRWg==} + dependencies: + aes-js: 3.0.0 + bn.js: 4.12.0 + elliptic: 6.5.4 + hash.js: 1.1.3 + js-sha3: 0.5.7 + scrypt-js: 2.0.4 + setimmediate: 1.0.4 + uuid: 2.0.1 + xmlhttprequest: 1.8.0 + dev: true + + /ethers@5.7.2: + resolution: {integrity: sha512-wswUsmWo1aOK8rR7DIKiWSw9DbLWe6x98Jrn8wcTflTVvaXhAMaB5zGAXy0GYQEQp9iO1iSHWVyARQm11zUtyg==} + dependencies: + '@ethersproject/abi': 5.7.0 + '@ethersproject/abstract-provider': 5.7.0 + '@ethersproject/abstract-signer': 5.7.0 + '@ethersproject/address': 5.7.0 + '@ethersproject/base64': 5.7.0 + '@ethersproject/basex': 5.7.0 + '@ethersproject/bignumber': 5.7.0 + '@ethersproject/bytes': 5.7.0 + '@ethersproject/constants': 5.7.0 + '@ethersproject/contracts': 5.7.0 + '@ethersproject/hash': 5.7.0 + '@ethersproject/hdnode': 5.7.0 + '@ethersproject/json-wallets': 5.7.0 + '@ethersproject/keccak256': 5.7.0 + '@ethersproject/logger': 5.0.6 + '@ethersproject/networks': 5.7.1 + '@ethersproject/pbkdf2': 5.7.0 + '@ethersproject/properties': 5.7.0 + '@ethersproject/providers': 5.7.2 + '@ethersproject/random': 5.7.0 + '@ethersproject/rlp': 5.7.0 + '@ethersproject/sha2': 5.7.0 + '@ethersproject/signing-key': 5.7.0 + '@ethersproject/solidity': 5.7.0 + '@ethersproject/strings': 5.7.0 + '@ethersproject/transactions': 5.7.0 + '@ethersproject/units': 5.7.0 + '@ethersproject/wallet': 5.7.0 + '@ethersproject/web': 5.7.1 + '@ethersproject/wordlists': 5.7.0 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + + /ethjs-abi@0.2.1: + resolution: {integrity: sha512-g2AULSDYI6nEJyJaEVEXtTimRY2aPC2fi7ddSy0W+LXvEVL8Fe1y76o43ecbgdUKwZD+xsmEgX1yJr1Ia3r1IA==} + engines: {node: '>=6.5.0', npm: '>=3'} + dependencies: + bn.js: 4.11.6 + js-sha3: 0.5.5 + number-to-bn: 1.7.0 + dev: true + + /ethjs-unit@0.1.6: + resolution: {integrity: sha512-/Sn9Y0oKl0uqQuvgFk/zQgR7aw1g36qX/jzSQ5lSwlO0GigPymk4eGQfeNTD03w1dPOqfz8V77Cy43jH56pagw==} + engines: {node: '>=6.5.0', npm: '>=3'} + dependencies: + bn.js: 4.11.6 + number-to-bn: 1.7.0 + dev: true + + /ethjs-util@0.1.6: + resolution: {integrity: sha512-CUnVOQq7gSpDHZVVrQW8ExxUETWrnrvXYvYz55wOU8Uj4VCgw56XC2B/fVqQN+f7gmrnRHSLVnFAwsCuNwji8w==} + engines: {node: '>=6.5.0', npm: '>=3'} + dependencies: + is-hex-prefixed: 1.0.0 + strip-hex-prefix: 1.0.0 + dev: true + bundledDependencies: false + + /eventemitter3@4.0.4: + resolution: {integrity: sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ==} + dev: true + + /events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + dev: true + + /evp_bytestokey@1.0.3: + resolution: {integrity: sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==} + dependencies: + md5.js: 1.3.5 + safe-buffer: 5.2.1 + dev: true + + /expand-brackets@2.1.4: + resolution: {integrity: sha512-w/ozOKR9Obk3qoWeY/WDi6MFta9AoMR+zud60mdnbniMcBxRuFJyDt2LdX/14A1UABeqk+Uk+LDfUpvoGKppZA==} + engines: {node: '>=0.10.0'} + dependencies: + debug: 2.6.9 + define-property: 0.2.5 + extend-shallow: 2.0.1 + posix-character-classes: 0.1.1 + regex-not: 1.0.2 + snapdragon: 0.8.2 + to-regex: 3.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /express@4.17.1: + resolution: {integrity: sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==} + engines: {node: '>= 0.10.0'} + dependencies: + accepts: 1.3.7 + array-flatten: 1.1.1 + body-parser: 1.19.0 + content-disposition: 0.5.3 + content-type: 1.0.4 + cookie: 0.4.0 + cookie-signature: 1.0.6 + debug: 2.6.9 + depd: 1.1.2 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 1.1.2 + fresh: 0.5.2 + merge-descriptors: 1.0.1 + methods: 1.1.2 + on-finished: 2.3.0 + parseurl: 1.3.3 + path-to-regexp: 0.1.7 + proxy-addr: 2.0.5 + qs: 6.7.0 + range-parser: 1.2.1 + safe-buffer: 5.1.2 + send: 0.17.1 + serve-static: 1.14.1 + setprototypeof: 1.1.1 + statuses: 1.5.0 + type-is: 1.6.18 + utils-merge: 1.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /ext@1.4.0: + resolution: {integrity: sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A==} + dependencies: + type: 2.0.0 + dev: true + + /extend-shallow@2.0.1: + resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} + engines: {node: '>=0.10.0'} + dependencies: + is-extendable: 0.1.1 + dev: true + + /extend-shallow@3.0.2: + resolution: {integrity: sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==} + engines: {node: '>=0.10.0'} + dependencies: + assign-symbols: 1.0.0 + is-extendable: 1.0.1 + dev: true + + /extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + dev: true + + /extglob@2.0.4: + resolution: {integrity: sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==} + engines: {node: '>=0.10.0'} + dependencies: + array-unique: 0.3.2 + define-property: 1.0.0 + expand-brackets: 2.1.4 + extend-shallow: 2.0.1 + fragment-cache: 0.2.1 + regex-not: 1.0.2 + snapdragon: 0.8.2 + to-regex: 3.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /extsprintf@1.3.0: + resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} + engines: {'0': node >=0.6.0} + dev: true + + /extsprintf@1.4.0: + resolution: {integrity: sha512-6NW8DZ8pWBc5NbGYUiqqccj9dXnuSzilZYqprdKJBZsQodGH9IyUoFOGxIWVDcBzHMb8ET24aqx9p66tZEWZkA==} + engines: {'0': node >=0.6.0} + dev: true + + /fake-merkle-patricia-tree@1.0.1: + resolution: {integrity: sha512-Tgq37lkc9pUIgIKw5uitNUKcgcYL3R6JvXtKQbOf/ZSavXbidsksgp/pAY6p//uhw0I4yoMsvTSovvVIsk/qxA==} + dependencies: + checkpoint-store: 1.1.0 + dev: true + + /fast-base64-decode@1.0.0: + resolution: {integrity: sha512-qwaScUgUGBYeDNRnbc/KyllVU88Jk1pRHPStuF/lO7B0/RTRLj7U0lkdTAutlBblY08rwZDff6tNU9cjv6j//Q==} + dev: true + + /fast-check@3.1.1: + resolution: {integrity: sha512-3vtXinVyuUKCKFKYcwXhGE6NtGWkqF8Yh3rvMZNzmwz8EPrgoc/v4pDdLHyLnCyCI5MZpZZkDEwFyXyEONOxpA==} + engines: {node: '>=8.0.0'} + dependencies: + pure-rand: 5.0.3 + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: true + + /fast-diff@1.2.0: + resolution: {integrity: sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==} + dev: true + + /fast-glob@3.3.1: + resolution: {integrity: sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==} + engines: {node: '>=8.6.0'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.5 + dev: true + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: true + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + dev: true + + /fastq@1.6.0: + resolution: {integrity: sha512-jmxqQ3Z/nXoeyDmWAzF9kH1aGZSis6e/SbfPmJpUnyZ0ogr6iscHQaml4wsEepEWSdtmpy+eVXmCRIMpxaXqOA==} + dependencies: + reusify: 1.0.4 + dev: true + + /fetch-ponyfill@4.1.0: + resolution: {integrity: sha512-knK9sGskIg2T7OnYLdZ2hZXn0CtDrAIBxYQLpmEf0BqfdWnwmM1weccUl5+4EdA44tzNSFAuxITPbXtPehUB3g==} + dependencies: + node-fetch: 1.7.3 + dev: true + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.0.4 + dev: true + + /file-uri-to-path@1.0.0: + resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + dev: true + + /fill-range@4.0.0: + resolution: {integrity: sha512-VcpLTWqWDiTerugjj8e3+esbg+skS3M9e54UuR3iCeIDMXCLTsAH8hTSzDQU/X6/6t3eYkOKoZSef2PlU6U1XQ==} + engines: {node: '>=0.10.0'} + dependencies: + extend-shallow: 2.0.1 + is-number: 3.0.0 + repeat-string: 1.6.1 + to-regex-range: 2.1.1 + dev: true + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + dependencies: + to-regex-range: 5.0.1 + dev: true + + /finalhandler@1.1.2: + resolution: {integrity: sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==} + engines: {node: '>= 0.8'} + dependencies: + debug: 2.6.9 + encodeurl: 1.0.2 + escape-html: 1.0.3 + on-finished: 2.3.0 + parseurl: 1.3.3 + statuses: 1.5.0 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /find-replace@1.0.3: + resolution: {integrity: sha512-KrUnjzDCD9426YnCP56zGYy/eieTnhtK6Vn++j+JJzmlsWWwEkDnsyVF575spT6HJ6Ow9tlbT3TQTDsa+O4UWA==} + engines: {node: '>=4.0.0'} + dependencies: + array-back: 1.0.4 + test-value: 2.1.0 + dev: true + + /find-replace@3.0.0: + resolution: {integrity: sha512-6Tb2myMioCAgv5kfvP5/PkZZ/ntTpVK39fHY7WkWBgvbeE+VHd/tZuZ4mrC+bxh4cfOZeYKVPaJIZtZXV7GNCQ==} + engines: {node: '>=4.0.0'} + dependencies: + array-back: 3.1.0 + dev: true + + /find-up@1.1.2: + resolution: {integrity: sha512-jvElSjyuo4EMQGoTwo1uJU5pQMwTW5lS1x05zzfJuTIyLR3zwO27LYrxNg+dlvKpGOuGy/MzBdXh80g0ve5+HA==} + engines: {node: '>=0.10.0'} + dependencies: + path-exists: 2.1.0 + pinkie-promise: 2.0.1 + dev: true + + /find-up@2.1.0: + resolution: {integrity: sha512-NWzkk0jSJtTt08+FBFMvXoeZnOJD+jTtsRmBYbAIzJdX6l7dLgR7CTubCM5/eDdPUBvLCeVasP1brfVR/9/EZQ==} + engines: {node: '>=4'} + dependencies: + locate-path: 2.0.0 + dev: true + + /find-up@3.0.0: + resolution: {integrity: sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==} + engines: {node: '>=6'} + dependencies: + locate-path: 3.0.0 + dev: true + + /find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + dev: true + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + dev: true + + /find-yarn-workspace-root@1.2.1: + resolution: {integrity: sha512-dVtfb0WuQG+8Ag2uWkbG79hOUzEsRrhBzgfn86g2sJPkzmcpGdghbNTfUKGTxymFrY/tLIodDzLoW9nOJ4FY8Q==} + dependencies: + fs-extra: 4.0.3 + micromatch: 3.1.10 + transitivePeerDependencies: + - supports-color + dev: true + + /find-yarn-workspace-root@2.0.0: + resolution: {integrity: sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==} + dependencies: + micromatch: 4.0.5 + dev: true + + /flat-cache@3.0.4: + resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flatted: 3.2.7 + rimraf: 3.0.2 + dev: true + + /flat@4.1.1: + resolution: {integrity: sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA==} + hasBin: true + dependencies: + is-buffer: 2.0.5 + dev: true + + /flat@5.0.2: + resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} + hasBin: true + dev: true + + /flatted@3.2.7: + resolution: {integrity: sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==} + dev: true + + /flow-stoplight@1.0.0: + resolution: {integrity: sha512-rDjbZUKpN8OYhB0IE/vY/I8UWO/602IIJEU/76Tv4LvYnwHCk0BCsvz4eRr9n+FQcri7L5cyaXOo0+/Kh4HisA==} + dev: true + + /follow-redirects@1.15.2(debug@4.3.4): + resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + dependencies: + debug: 4.3.4(supports-color@8.1.1) + dev: true + + /for-each@0.3.3: + resolution: {integrity: sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==} + dependencies: + is-callable: 1.2.7 + dev: true + + /for-in@1.0.2: + resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} + engines: {node: '>=0.10.0'} + dev: true + + /foreach@2.0.5: + resolution: {integrity: sha512-ZBbtRiapkZYLsqoPyZOR+uPfto0GRMNQN1GwzZtZt7iZvPPbDDQV0JF5Hx4o/QFQ5c0vyuoZ98T8RSBbopzWtA==} + dev: true + + /forever-agent@0.6.1: + resolution: {integrity: sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==} + dev: true + + /form-data-encoder@1.7.1: + resolution: {integrity: sha512-EFRDrsMm/kyqbTQocNvRXMLjc7Es2Vk+IQFx/YW7hkUH1eBl4J1fqiP34l74Yt0pFLCNpc06fkbVk00008mzjg==} + dev: true + + /form-data@2.3.3: + resolution: {integrity: sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==} + engines: {node: '>= 0.12'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.27 + dev: true + + /form-data@3.0.1: + resolution: {integrity: sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==} + engines: {node: '>= 6'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.27 + dev: true + + /form-data@4.0.0: + resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} + engines: {node: '>= 6'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.27 + dev: true + + /forwarded@0.1.2: + resolution: {integrity: sha512-Ua9xNhH0b8pwE3yRbFfXJvfdWF0UHNCdeyb2sbi9Ul/M+r3PTdrz7Cv4SCfZRMjmzEM9PhraqfZFbGTIg3OMyA==} + engines: {node: '>= 0.6'} + dev: true + + /fp-ts@1.19.3: + resolution: {integrity: sha512-H5KQDspykdHuztLTg+ajGN0Z2qUjcEf3Ybxc6hLt0k7/zPkn29XnKnxlBPyW2XIddWrGaJBzBl4VLYOtk39yZg==} + dev: true + + /fragment-cache@0.2.1: + resolution: {integrity: sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==} + engines: {node: '>=0.10.0'} + dependencies: + map-cache: 0.2.2 + dev: true + + /fresh@0.5.2: + resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} + engines: {node: '>= 0.6'} + dev: true + + /fs-extra@0.30.0: + resolution: {integrity: sha512-UvSPKyhMn6LEd/WpUaV9C9t3zATuqoqfWc3QdPhPLb58prN9tqYPlPWi8Krxi44loBoUzlobqZ3+8tGpxxSzwA==} + dependencies: + graceful-fs: 4.2.10 + jsonfile: 2.4.0 + klaw: 1.3.1 + path-is-absolute: 1.0.1 + rimraf: 2.7.1 + dev: true + + /fs-extra@4.0.3: + resolution: {integrity: sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg==} + dependencies: + graceful-fs: 4.2.10 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: true + + /fs-extra@7.0.1: + resolution: {integrity: sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==} + engines: {node: '>=6 <7 || >=8'} + dependencies: + graceful-fs: 4.2.10 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: true + + /fs-extra@8.1.0: + resolution: {integrity: sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==} + engines: {node: '>=6 <7 || >=8'} + dependencies: + graceful-fs: 4.2.10 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: true + + /fs-extra@9.1.0: + resolution: {integrity: sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==} + engines: {node: '>=10'} + dependencies: + at-least-node: 1.0.0 + graceful-fs: 4.2.10 + jsonfile: 6.1.0 + universalify: 2.0.0 + dev: true + + /fs-minipass@1.2.7: + resolution: {integrity: sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA==} + dependencies: + minipass: 2.9.0 + dev: true + + /fs-readdir-recursive@1.1.0: + resolution: {integrity: sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==} + dev: true + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + dev: true + + /fsevents@2.1.3: + resolution: {integrity: sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + deprecated: '"Please update to latest v2.3 or v2.2"' + requiresBuild: true + dev: true + optional: true + + /fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /function-bind@1.1.1: + resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} + dev: true + + /function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + dev: true + + /function.prototype.name@1.1.5: + resolution: {integrity: sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + es-abstract: 1.20.3 + functions-have-names: 1.2.3 + dev: true + + /function.prototype.name@1.1.6: + resolution: {integrity: sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + es-abstract: 1.22.3 + functions-have-names: 1.2.3 + dev: true + + /functional-red-black-tree@1.0.1: + resolution: {integrity: sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==} + dev: true + + /functions-have-names@1.2.3: + resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + dev: true + + /ganache-core@2.13.2: + resolution: {integrity: sha512-tIF5cR+ANQz0+3pHWxHjIwHqFXcVo0Mb+kcsNhglNFALcYo49aQpnS9dqHartqPfMFjiHh/qFoD3mYK0d/qGgw==} + engines: {node: '>=8.9.0'} + deprecated: ganache-core is now ganache; visit https://trfl.io/g7 for details + dependencies: + abstract-leveldown: 3.0.0 + async: 2.6.2 + bip39: 2.5.0 + cachedown: 1.0.0 + clone: 2.1.2 + debug: 3.2.6(supports-color@6.0.0) + encoding-down: 5.0.4 + eth-sig-util: 3.0.0 + ethereumjs-abi: 0.6.8 + ethereumjs-account: 3.0.0 + ethereumjs-block: 2.2.2 + ethereumjs-common: 1.5.0 + ethereumjs-tx: 2.1.2 + ethereumjs-util: 6.2.1 + ethereumjs-vm: 4.2.0 + heap: 0.2.6 + level-sublevel: 6.6.4 + levelup: 3.1.1 + lodash: 4.17.20 + lru-cache: 5.1.1 + merkle-patricia-tree: 3.0.0 + patch-package: 6.2.2 + seedrandom: 3.0.1 + source-map-support: 0.5.12 + tmp: 0.1.0 + web3-provider-engine: 14.2.1 + websocket: 1.0.32 + optionalDependencies: + ethereumjs-wallet: 0.6.5 + web3: 1.2.11 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + bundledDependencies: + - keccak + + /get-caller-file@1.0.3: + resolution: {integrity: sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==} + dev: true + + /get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + dev: true + + /get-func-name@2.0.2: + resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} + + /get-intrinsic@1.1.3: + resolution: {integrity: sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==} + dependencies: + function-bind: 1.1.1 + has: 1.0.3 + has-symbols: 1.0.3 + dev: true + + /get-intrinsic@1.2.2: + resolution: {integrity: sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==} + dependencies: + function-bind: 1.1.2 + has-proto: 1.0.1 + has-symbols: 1.0.3 + hasown: 2.0.0 + dev: true + + /get-port@3.2.0: + resolution: {integrity: sha512-x5UJKlgeUiNT8nyo/AcnwLnZuZNcSjSw0kogRB+Whd1fjjFq4B1hySFxSFWWSn4mIBzg3sRNUDFYc4g5gjPoLg==} + engines: {node: '>=4'} + dev: true + + /get-stream@3.0.0: + resolution: {integrity: sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ==} + engines: {node: '>=4'} + dev: true + + /get-stream@4.1.0: + resolution: {integrity: sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==} + engines: {node: '>=6'} + dependencies: + pump: 3.0.0 + dev: true + + /get-stream@5.1.0: + resolution: {integrity: sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==} + engines: {node: '>=8'} + dependencies: + pump: 3.0.0 + dev: true + + /get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + dev: true + + /get-symbol-description@1.0.0: + resolution: {integrity: sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.1.3 + dev: true + + /get-value@2.0.6: + resolution: {integrity: sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==} + engines: {node: '>=0.10.0'} + dev: true + + /getpass@0.1.7: + resolution: {integrity: sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==} + dependencies: + assert-plus: 1.0.0 + dev: true + + /ghost-testrpc@0.0.2: + resolution: {integrity: sha512-i08dAEgJ2g8z5buJIrCTduwPIhih3DP+hOCTyyryikfV8T0bNvHnGXO67i0DD1H4GBDETTclPy9njZbfluQYrQ==} + hasBin: true + dependencies: + chalk: 2.4.2 + node-emoji: 1.11.0 + dev: true + + /glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob@5.0.15: + resolution: {integrity: sha512-c9IPMazfRITpmAAKi22dK1VKxGDX9ehhqfABDriL/lzO92xcUKEJPQHrVA/2YHSNFB4iFlykVmWvwo48nr3OxA==} + dependencies: + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.1.3: + resolution: {integrity: sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.1.7: + resolution: {integrity: sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.2.0: + resolution: {integrity: sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@8.1.0: + resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==} + engines: {node: '>=12'} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 5.1.6 + once: 1.4.0 + dev: true + + /global-modules@2.0.0: + resolution: {integrity: sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==} + engines: {node: '>=6'} + dependencies: + global-prefix: 3.0.0 + dev: true + + /global-prefix@3.0.0: + resolution: {integrity: sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==} + engines: {node: '>=6'} + dependencies: + ini: 1.3.8 + kind-of: 6.0.3 + which: 1.3.1 + dev: true + + /global@4.3.2: + resolution: {integrity: sha512-/4AybdwIDU4HkCUbJkZdWpe4P6vuw/CUtu+0I1YlLIPe7OlUO7KNJ+q/rO70CW2/NW6Jc6I62++Hzsf5Alu6rQ==} + dependencies: + min-document: 2.19.0 + process: 0.5.2 + dev: true + + /globals@13.20.0: + resolution: {integrity: sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.20.2 + dev: true + + /globals@9.18.0: + resolution: {integrity: sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ==} + engines: {node: '>=0.10.0'} + dev: true + + /globalthis@1.0.3: + resolution: {integrity: sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==} + engines: {node: '>= 0.4'} + dependencies: + define-properties: 1.2.1 + dev: true + + /globby@10.0.2: + resolution: {integrity: sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg==} + engines: {node: '>=8'} + dependencies: + '@types/glob': 7.1.1 + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.1 + glob: 7.2.3 + ignore: 5.2.4 + merge2: 1.4.1 + slash: 3.0.0 + dev: true + + /globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.1 + ignore: 5.2.4 + merge2: 1.4.1 + slash: 3.0.0 + dev: true + + /gopd@1.0.1: + resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} + dependencies: + get-intrinsic: 1.2.2 + dev: true + + /got@12.1.0: + resolution: {integrity: sha512-hBv2ty9QN2RdbJJMK3hesmSkFTjVIHyIDDbssCKnSmq62edGgImJWD10Eb1k77TiV1bxloxqcFAVK8+9pkhOig==} + engines: {node: '>=14.16'} + dependencies: + '@sindresorhus/is': 4.6.0 + '@szmarczak/http-timer': 5.0.1 + '@types/cacheable-request': 6.0.2 + '@types/responselike': 1.0.0 + cacheable-lookup: 6.1.0 + cacheable-request: 7.0.2 + decompress-response: 6.0.0 + form-data-encoder: 1.7.1 + get-stream: 6.0.1 + http2-wrapper: 2.1.11 + lowercase-keys: 3.0.0 + p-cancelable: 3.0.0 + responselike: 2.0.1 + dev: true + + /got@7.1.0: + resolution: {integrity: sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw==} + engines: {node: '>=4'} + dependencies: + '@types/keyv': 3.1.4 + '@types/responselike': 1.0.0 + decompress-response: 3.3.0 + duplexer3: 0.1.4 + get-stream: 3.0.0 + is-plain-obj: 1.1.0 + is-retry-allowed: 1.2.0 + is-stream: 1.1.0 + isurl: 1.0.0 + lowercase-keys: 1.0.1 + p-cancelable: 0.3.0 + p-timeout: 1.2.1 + safe-buffer: 5.2.1 + timed-out: 4.0.1 + url-parse-lax: 1.0.0 + url-to-options: 1.0.1 + dev: true + + /got@9.6.0: + resolution: {integrity: sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==} + engines: {node: '>=8.6'} + dependencies: + '@sindresorhus/is': 0.14.0 + '@szmarczak/http-timer': 1.1.2 + '@types/keyv': 3.1.4 + '@types/responselike': 1.0.0 + cacheable-request: 6.1.0 + decompress-response: 3.3.0 + duplexer3: 0.1.4 + get-stream: 4.1.0 + lowercase-keys: 1.0.1 + mimic-response: 1.0.1 + p-cancelable: 1.1.0 + to-readable-stream: 1.0.0 + url-parse-lax: 3.0.0 + dev: true + + /graceful-fs@4.2.10: + resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} + dev: true + + /grapheme-splitter@1.0.4: + resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} + dev: true + + /graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + dev: true + + /growl@1.10.5: + resolution: {integrity: sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==} + engines: {node: '>=4.x'} + dev: true + + /handlebars@4.7.7: + resolution: {integrity: sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==} + engines: {node: '>=0.4.7'} + hasBin: true + dependencies: + minimist: 1.2.6 + neo-async: 2.6.2 + source-map: 0.6.1 + wordwrap: 1.0.0 + optionalDependencies: + uglify-js: 3.17.3 + dev: true + + /har-schema@2.0.0: + resolution: {integrity: sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==} + engines: {node: '>=4'} + dev: true + + /har-validator@5.1.5: + resolution: {integrity: sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==} + engines: {node: '>=6'} + deprecated: this library is no longer supported + dependencies: + ajv: 6.12.6 + har-schema: 2.0.0 + dev: true + + /hardhat-abi-exporter@2.10.1(hardhat@2.19.2): + resolution: {integrity: sha512-X8GRxUTtebMAd2k4fcPyVnCdPa6dYK4lBsrwzKP5yiSq4i+WadWPIumaLfce53TUf/o2TnLpLOduyO1ylE2NHQ==} + engines: {node: '>=14.14.0'} + peerDependencies: + hardhat: ^2.0.0 + dependencies: + '@ethersproject/abi': 5.7.0 + delete-empty: 3.0.0 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + dev: true + + /hardhat-contract-sizer@2.10.0(hardhat@2.19.2): + resolution: {integrity: sha512-QiinUgBD5MqJZJh1hl1jc9dNnpJg7eE/w4/4GEnrcmZJJTDbVFNe3+/3Ep24XqISSkYxRz36czcPHKHd/a0dwA==} + peerDependencies: + hardhat: ^2.0.0 + dependencies: + chalk: 4.1.2 + cli-table3: 0.6.3 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + strip-ansi: 6.0.1 + dev: true + + /hardhat-gas-reporter@1.0.9(hardhat@2.19.2): + resolution: {integrity: sha512-INN26G3EW43adGKBNzYWOlI3+rlLnasXTwW79YNnUhXPDa+yHESgt639dJEs37gCjhkbNKcRRJnomXEuMFBXJg==} + peerDependencies: + hardhat: ^2.0.2 + dependencies: + array-uniq: 1.0.3 + eth-gas-reporter: 0.2.25 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + sha1: 1.1.1 + transitivePeerDependencies: + - '@codechecks/client' + dev: true + + /hardhat-ignore-warnings@0.2.9: + resolution: {integrity: sha512-q1oj6/ixiAx+lgIyGLBajVCSC7qUtAoK7LS9Nr8UVHYo8Iuh5naBiVGo4RDJ6wxbDGYBkeSukUGZrMqzC2DWwA==} + dependencies: + minimatch: 5.1.6 + node-interval-tree: 2.1.2 + solidity-comments: 0.0.2 + dev: true + + /hardhat@2.19.2(ts-node@10.9.2)(typescript@5.3.3): + resolution: {integrity: sha512-CRU3+0Cc8Qh9UpxKd8cLADDPes7ZDtKj4dTK+ERtLBomEzhRPLWklJn4VKOwjre9/k8GNd/e9DYxpfuzcxbXPQ==} + hasBin: true + peerDependencies: + ts-node: '*' + typescript: '*' + peerDependenciesMeta: + ts-node: + optional: true + typescript: + optional: true + dependencies: + '@ethersproject/abi': 5.7.0 + '@metamask/eth-sig-util': 4.0.1 + '@nomicfoundation/ethereumjs-block': 5.0.2 + '@nomicfoundation/ethereumjs-blockchain': 7.0.2 + '@nomicfoundation/ethereumjs-common': 4.0.2 + '@nomicfoundation/ethereumjs-evm': 2.0.2 + '@nomicfoundation/ethereumjs-rlp': 5.0.2 + '@nomicfoundation/ethereumjs-statemanager': 2.0.2 + '@nomicfoundation/ethereumjs-trie': 6.0.2 + '@nomicfoundation/ethereumjs-tx': 5.0.2 + '@nomicfoundation/ethereumjs-util': 9.0.2 + '@nomicfoundation/ethereumjs-vm': 7.0.2 + '@nomicfoundation/solidity-analyzer': 0.1.0 + '@sentry/node': 5.30.0 + '@types/bn.js': 5.1.1 + '@types/lru-cache': 5.1.1 + adm-zip: 0.4.16 + aggregate-error: 3.1.0 + ansi-escapes: 4.3.2 + chalk: 2.4.2 + chokidar: 3.5.3 + ci-info: 2.0.0 + debug: 4.3.4(supports-color@8.1.1) + enquirer: 2.3.6 + env-paths: 2.2.1 + ethereum-cryptography: 1.1.2 + ethereumjs-abi: 0.6.8 + find-up: 2.1.0 + fp-ts: 1.19.3 + fs-extra: 7.0.1 + glob: 7.2.0 + immutable: 4.1.0 + io-ts: 1.10.4 + keccak: 3.0.2 + lodash: 4.17.21 + mnemonist: 0.38.5 + mocha: 10.2.0 + p-map: 4.0.0 + raw-body: 2.5.1 + resolve: 1.17.0 + semver: 6.3.0 + solc: 0.7.3(debug@4.3.4) + source-map-support: 0.5.21 + stacktrace-parser: 0.1.10 + ts-node: 10.9.2(@types/node@16.18.80)(typescript@5.3.3) + tsort: 0.0.1 + typescript: 5.3.3 + undici: 5.19.1 + uuid: 8.3.2 + ws: 7.5.9 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /has-ansi@2.0.0: + resolution: {integrity: sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==} + engines: {node: '>=0.10.0'} + dependencies: + ansi-regex: 2.1.1 + dev: true + + /has-bigints@1.0.2: + resolution: {integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==} + dev: true + + /has-flag@1.0.0: + resolution: {integrity: sha512-DyYHfIYwAJmjAjSSPKANxI8bFY9YtFrgkAfinBojQ8YJTOuOuav64tMUJv584SES4xl74PmuaevIyaLESHdTAA==} + engines: {node: '>=0.10.0'} + dev: true + + /has-flag@3.0.0: + resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} + engines: {node: '>=4'} + dev: true + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + dev: true + + /has-property-descriptors@1.0.0: + resolution: {integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==} + dependencies: + get-intrinsic: 1.1.3 + dev: true + + /has-proto@1.0.1: + resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==} + engines: {node: '>= 0.4'} + dev: true + + /has-symbol-support-x@1.4.2: + resolution: {integrity: sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==} + dev: true + + /has-symbols@1.0.3: + resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} + engines: {node: '>= 0.4'} + dev: true + + /has-to-string-tag-x@1.4.1: + resolution: {integrity: sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==} + dependencies: + has-symbol-support-x: 1.4.2 + dev: true + + /has-tostringtag@1.0.0: + resolution: {integrity: sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==} + engines: {node: '>= 0.4'} + dependencies: + has-symbols: 1.0.3 + dev: true + + /has-value@0.3.1: + resolution: {integrity: sha512-gpG936j8/MzaeID5Yif+577c17TxaDmhuyVgSwtnL/q8UUTySg8Mecb+8Cf1otgLoD7DDH75axp86ER7LFsf3Q==} + engines: {node: '>=0.10.0'} + dependencies: + get-value: 2.0.6 + has-values: 0.1.4 + isobject: 2.1.0 + dev: true + + /has-value@1.0.0: + resolution: {integrity: sha512-IBXk4GTsLYdQ7Rvt+GRBrFSVEkmuOUy4re0Xjd9kJSUQpnTrWR4/y9RpfexN9vkAPMFuQoeWKwqzPozRTlasGw==} + engines: {node: '>=0.10.0'} + dependencies: + get-value: 2.0.6 + has-values: 1.0.0 + isobject: 3.0.1 + dev: true + + /has-values@0.1.4: + resolution: {integrity: sha512-J8S0cEdWuQbqD9//tlZxiMuMNmxB8PlEwvYwuxsTmR1G5RXUePEX/SJn7aD0GMLieuZYSwNH0cQuJGwnYunXRQ==} + engines: {node: '>=0.10.0'} + dev: true + + /has-values@1.0.0: + resolution: {integrity: sha512-ODYZC64uqzmtfGMEAX/FvZiRyWLpAC3vYnNunURUnkGVTS+mI0smVsWaPydRBsE3g+ok7h960jChO8mFcWlHaQ==} + engines: {node: '>=0.10.0'} + dependencies: + is-number: 3.0.0 + kind-of: 4.0.0 + dev: true + + /has@1.0.3: + resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} + engines: {node: '>= 0.4.0'} + dependencies: + function-bind: 1.1.1 + dev: true + + /hash-base@3.1.0: + resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + readable-stream: 3.6.0 + safe-buffer: 5.2.1 + dev: true + + /hash.js@1.1.3: + resolution: {integrity: sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA==} + dependencies: + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + dev: true + + /hash.js@1.1.7: + resolution: {integrity: sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==} + dependencies: + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + + /hasown@2.0.0: + resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==} + engines: {node: '>= 0.4'} + dependencies: + function-bind: 1.1.2 + dev: true + + /he@1.2.0: + resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} + hasBin: true + dev: true + + /header-case@1.0.1: + resolution: {integrity: sha512-i0q9mkOeSuhXw6bGgiQCCBgY/jlZuV/7dZXyZ9c6LcBrqwvT8eT719E9uxE5LiZftdl+z81Ugbg/VvXV4OJOeQ==} + dependencies: + no-case: 2.3.2 + upper-case: 1.1.3 + dev: true + + /heap@0.2.6: + resolution: {integrity: sha512-MzzWcnfB1e4EG2vHi3dXHoBupmuXNZzx6pY6HldVS55JKKBoq3xOyzfSaZRkJp37HIhEYC78knabHff3zc4dQQ==} + dev: true + + /highlight.js@10.7.3: + resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==} + dev: true + + /highlightjs-solidity@2.0.5: + resolution: {integrity: sha512-ReXxQSGQkODMUgHcWzVSnfDCDrL2HshOYgw3OlIYmfHeRzUPkfJTUIp95pK4CmbiNG2eMTOmNLpfCz9Zq7Cwmg==} + dev: true + + /hmac-drbg@1.0.1: + resolution: {integrity: sha512-Tti3gMqLdZfhOQY1Mzf/AanLiqh1WTiJgEj26ZuYQ9fbkLomzGchCws4FyrSd4VkpBfiNhaE1On+lOz894jvXg==} + dependencies: + hash.js: 1.1.7 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + + /home-or-tmp@2.0.0: + resolution: {integrity: sha512-ycURW7oUxE2sNiPVw1HVEFsW+ecOpJ5zaj7eC0RlwhibhRBod20muUN8qu/gzx956YrLolVvs1MTXwKgC2rVEg==} + engines: {node: '>=0.10.0'} + dependencies: + os-homedir: 1.0.2 + os-tmpdir: 1.0.2 + dev: true + + /hosted-git-info@2.8.9: + resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} + dev: true + + /htmlparser2@8.0.1: + resolution: {integrity: sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA==} + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.0.1 + entities: 4.4.0 + dev: true + + /http-basic@8.1.3: + resolution: {integrity: sha512-/EcDMwJZh3mABI2NhGfHOGOeOZITqfkEO4p/xK+l3NpyncIHUQBoMvCSF/b5GqvKtySC2srL/GGG3+EtlqlmCw==} + engines: {node: '>=6.0.0'} + dependencies: + caseless: 0.12.0 + concat-stream: 1.6.2 + http-response-object: 3.0.2 + parse-cache-control: 1.0.1 + dev: true + + /http-cache-semantics@4.0.3: + resolution: {integrity: sha512-TcIMG3qeVLgDr1TEd2XvHaTnMPwYQUQMIBLy+5pLSDKYFc7UIqj39w8EGzZkaxoLv/l2K8HaI0t5AVA+YYgUew==} + dev: true + + /http-errors@1.7.2: + resolution: {integrity: sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==} + engines: {node: '>= 0.6'} + dependencies: + depd: 1.1.2 + inherits: 2.0.3 + setprototypeof: 1.1.1 + statuses: 1.5.0 + toidentifier: 1.0.0 + dev: true + + /http-errors@1.7.3: + resolution: {integrity: sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw==} + engines: {node: '>= 0.6'} + dependencies: + depd: 1.1.2 + inherits: 2.0.4 + setprototypeof: 1.1.1 + statuses: 1.5.0 + toidentifier: 1.0.0 + dev: true + + /http-errors@2.0.0: + resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} + engines: {node: '>= 0.8'} + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.1 + toidentifier: 1.0.1 + dev: true + + /http-https@1.0.0: + resolution: {integrity: sha512-o0PWwVCSp3O0wS6FvNr6xfBCHgt0m1tvPLFOCc2iFDKTRAXhB7m8klDf7ErowFH8POa6dVdGatKU5I1YYwzUyg==} + dev: true + + /http-response-object@3.0.2: + resolution: {integrity: sha512-bqX0XTF6fnXSQcEJ2Iuyr75yVakyjIDCqroJQ/aHfSdlM743Cwqoi2nDYMzLGWUcuTWGWy8AAvOKXTfiv6q9RA==} + dependencies: + '@types/node': 10.17.60 + dev: true + + /http-signature@1.2.0: + resolution: {integrity: sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==} + engines: {node: '>=0.8', npm: '>=1.3.7'} + dependencies: + assert-plus: 1.0.0 + jsprim: 1.4.1 + sshpk: 1.16.1 + dev: true + + /http2-wrapper@2.1.11: + resolution: {integrity: sha512-aNAk5JzLturWEUiuhAN73Jcbq96R7rTitAoXV54FYMatvihnpD2+6PUgU4ce3D/m5VDbw+F5CsyKSF176ptitQ==} + engines: {node: '>=10.19.0'} + dependencies: + quick-lru: 5.1.1 + resolve-alpn: 1.2.1 + dev: true + + /https-proxy-agent@5.0.1: + resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} + engines: {node: '>= 6'} + dependencies: + agent-base: 6.0.2 + debug: 4.3.4(supports-color@8.1.1) + transitivePeerDependencies: + - supports-color + dev: true + + /iconv-lite@0.4.24: + resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} + engines: {node: '>=0.10.0'} + dependencies: + safer-buffer: 2.1.2 + dev: true + + /iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + dependencies: + safer-buffer: 2.1.2 + dev: true + + /idna-uts46-hx@2.3.1: + resolution: {integrity: sha512-PWoF9Keq6laYdIRwwCdhTPl60xRqAloYNMQLiyUnG42VjT53oW07BXIRM+NK7eQjzXjAk2gUvX9caRxlnF9TAA==} + engines: {node: '>=4.0.0'} + dependencies: + punycode: 2.1.0 + dev: true + + /ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + dev: true + + /ignore@5.2.4: + resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} + engines: {node: '>= 4'} + dev: true + + /immediate@3.2.3: + resolution: {integrity: sha512-RrGCXRm/fRVqMIhqXrGEX9rRADavPiDFSoMb/k64i9XMk8uH4r/Omi5Ctierj6XzNecwDbO4WuFbDD1zmpl3Tg==} + dev: true + + /immediate@3.3.0: + resolution: {integrity: sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==} + dev: true + + /immutable@4.1.0: + resolution: {integrity: sha512-oNkuqVTA8jqG1Q6c+UglTOD1xhC1BtjKI7XkCXRkZHrN5m18/XsnUp8Q89GkQO/z+0WjonSvl0FLhDYftp46nQ==} + dev: true + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + dev: true + + /indent-string@4.0.0: + resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==} + engines: {node: '>=8'} + dev: true + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + dev: true + + /inherits@2.0.3: + resolution: {integrity: sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==} + dev: true + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + /ini@1.3.8: + resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} + dev: true + + /internal-slot@1.0.3: + resolution: {integrity: sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.1.3 + has: 1.0.3 + side-channel: 1.0.4 + dev: true + + /internal-slot@1.0.6: + resolution: {integrity: sha512-Xj6dv+PsbtwyPpEflsejS+oIZxmMlV44zAhG479uYu89MsjcYOhCFnNyKrkJrihbsiasQyY0afoCl/9BLR65bg==} + engines: {node: '>= 0.4'} + dependencies: + get-intrinsic: 1.2.2 + hasown: 2.0.0 + side-channel: 1.0.4 + dev: true + + /interpret@1.2.0: + resolution: {integrity: sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw==} + engines: {node: '>= 0.10'} + dev: true + + /invariant@2.2.4: + resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} + dependencies: + loose-envify: 1.4.0 + dev: true + + /invert-kv@1.0.0: + resolution: {integrity: sha512-xgs2NH9AE66ucSq4cNG1nhSFghr5l6tdL15Pk+jl46bmmBapgoaY/AacXyaDznAqmGL99TiLSQgO/XazFSKYeQ==} + engines: {node: '>=0.10.0'} + dev: true + + /io-ts@1.10.4: + resolution: {integrity: sha512-b23PteSnYXSONJ6JQXRAlvJhuw8KOtkqa87W4wDtvMrud/DTJd5X+NpOOI+O/zZwVq6v0VLAaJ+1EDViKEuN9g==} + dependencies: + fp-ts: 1.19.3 + dev: true + + /ipaddr.js@1.9.0: + resolution: {integrity: sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==} + engines: {node: '>= 0.10'} + dev: true + + /is-accessor-descriptor@0.1.6: + resolution: {integrity: sha512-e1BM1qnDbMRG3ll2U9dSK0UMHuWOs3pY3AtcFsmvwPtKL3MML/Q86i+GilLfvqEs4GW+ExB91tQ3Ig9noDIZ+A==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 3.2.2 + dev: true + + /is-accessor-descriptor@1.0.0: + resolution: {integrity: sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 6.0.3 + dev: true + + /is-arguments@1.0.4: + resolution: {integrity: sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==} + engines: {node: '>= 0.4'} + dev: true + + /is-array-buffer@3.0.2: + resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} + dependencies: + call-bind: 1.0.5 + get-intrinsic: 1.2.2 + is-typed-array: 1.1.12 + dev: true + + /is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + dev: true + + /is-bigint@1.0.4: + resolution: {integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==} + dependencies: + has-bigints: 1.0.2 + dev: true + + /is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + dependencies: + binary-extensions: 2.2.0 + dev: true + + /is-boolean-object@1.1.2: + resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + has-tostringtag: 1.0.0 + dev: true + + /is-buffer@1.1.6: + resolution: {integrity: sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==} + dev: true + + /is-buffer@2.0.5: + resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} + engines: {node: '>=4'} + dev: true + + /is-callable@1.2.7: + resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} + engines: {node: '>= 0.4'} + dev: true + + /is-ci@2.0.0: + resolution: {integrity: sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==} + hasBin: true + dependencies: + ci-info: 2.0.0 + dev: true + + /is-core-module@2.10.0: + resolution: {integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==} + dependencies: + has: 1.0.3 + dev: true + + /is-data-descriptor@0.1.4: + resolution: {integrity: sha512-+w9D5ulSoBNlmw9OHn3U2v51SyoCd0he+bB3xMl62oijhrspxowjU+AIcDY0N3iEJbUEkB15IlMASQsxYigvXg==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 3.2.2 + dev: true + + /is-data-descriptor@1.0.0: + resolution: {integrity: sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 6.0.3 + dev: true + + /is-date-object@1.0.2: + resolution: {integrity: sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==} + engines: {node: '>= 0.4'} + dev: true + + /is-descriptor@0.1.6: + resolution: {integrity: sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==} + engines: {node: '>=0.10.0'} + dependencies: + is-accessor-descriptor: 0.1.6 + is-data-descriptor: 0.1.4 + kind-of: 5.1.0 + dev: true + + /is-descriptor@1.0.2: + resolution: {integrity: sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==} + engines: {node: '>=0.10.0'} + dependencies: + is-accessor-descriptor: 1.0.0 + is-data-descriptor: 1.0.0 + kind-of: 6.0.3 + dev: true + + /is-docker@2.2.1: + resolution: {integrity: sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==} + engines: {node: '>=8'} + hasBin: true + dev: true + + /is-extendable@0.1.1: + resolution: {integrity: sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==} + engines: {node: '>=0.10.0'} + dev: true + + /is-extendable@1.0.1: + resolution: {integrity: sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==} + engines: {node: '>=0.10.0'} + dependencies: + is-plain-object: 2.0.4 + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + dev: true + + /is-finite@1.1.0: + resolution: {integrity: sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==} + engines: {node: '>=0.10.0'} + dev: true + + /is-fn@1.0.0: + resolution: {integrity: sha512-XoFPJQmsAShb3jEQRfzf2rqXavq7fIqF/jOekp308JlThqrODnMpweVSGilKTCXELfLhltGP2AGgbQGVP8F1dg==} + engines: {node: '>=0.10.0'} + dev: true + + /is-fullwidth-code-point@1.0.0: + resolution: {integrity: sha512-1pqUqRjkhPJ9miNq9SwMfdvi6lBJcd6eFxvfaivQhaH3SgisfiuudvFntdKOmxuee/77l+FPjKrQjWvmPjWrRw==} + engines: {node: '>=0.10.0'} + dependencies: + number-is-nan: 1.0.1 + dev: true + + /is-fullwidth-code-point@2.0.0: + resolution: {integrity: sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w==} + engines: {node: '>=4'} + dev: true + + /is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + dev: true + + /is-function@1.0.1: + resolution: {integrity: sha512-coTeFCk0VaNTNO/FwMMaI30KOPOIkLp1q5M7dIVDn4Zop70KyGFZqXSgKClBisjrD3S2cVIuD7MD793/lyLGZQ==} + dev: true + + /is-generator-function@1.0.8: + resolution: {integrity: sha512-2Omr/twNtufVZFr1GhxjOMFPAj2sjc/dKaIqBhvo4qciXfJmITGH6ZGd8eZYNHza8t1y0e01AuqRhJwfWp26WQ==} + engines: {node: '>= 0.4'} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + dev: true + + /is-hex-prefixed@1.0.0: + resolution: {integrity: sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA==} + engines: {node: '>=6.5.0', npm: '>=3'} + dev: true + + /is-lower-case@1.1.3: + resolution: {integrity: sha512-+5A1e/WJpLLXZEDlgz4G//WYSHyQBD32qa4Jd3Lw06qQlv3fJHnp3YIHjTQSGzHMgzmVKz2ZP3rBxTHkPw/lxA==} + dependencies: + lower-case: 1.1.4 + dev: true + + /is-negative-zero@2.0.2: + resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==} + engines: {node: '>= 0.4'} + dev: true + + /is-number-object@1.0.7: + resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + dev: true + + /is-number@3.0.0: + resolution: {integrity: sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 3.2.2 + dev: true + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + dev: true + + /is-object@1.0.1: + resolution: {integrity: sha512-+XzmTRB/JXoIdK20Ge8K8PRsP5UlthLaVhIRxzIwQ73jRgER8iRw98DilvERx/tSjOHLy9JM4sKUfLRMB5ui0Q==} + dev: true + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + dev: true + + /is-plain-obj@1.1.0: + resolution: {integrity: sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==} + engines: {node: '>=0.10.0'} + dev: true + + /is-plain-obj@2.1.0: + resolution: {integrity: sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==} + engines: {node: '>=8'} + dev: true + + /is-plain-object@2.0.4: + resolution: {integrity: sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==} + engines: {node: '>=0.10.0'} + dependencies: + isobject: 3.0.1 + dev: true + + /is-regex@1.1.4: + resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + has-tostringtag: 1.0.0 + dev: true + + /is-retry-allowed@1.2.0: + resolution: {integrity: sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg==} + engines: {node: '>=0.10.0'} + dev: true + + /is-shared-array-buffer@1.0.2: + resolution: {integrity: sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==} + dependencies: + call-bind: 1.0.2 + dev: true + + /is-stream@1.1.0: + resolution: {integrity: sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==} + engines: {node: '>=0.10.0'} + dev: true + + /is-string@1.0.7: + resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} + engines: {node: '>= 0.4'} + dependencies: + has-tostringtag: 1.0.0 + dev: true + + /is-symbol@1.0.3: + resolution: {integrity: sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==} + engines: {node: '>= 0.4'} + dependencies: + has-symbols: 1.0.3 + dev: true + + /is-typed-array@1.1.12: + resolution: {integrity: sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==} + engines: {node: '>= 0.4'} + dependencies: + which-typed-array: 1.1.13 + dev: true + + /is-typed-array@1.1.5: + resolution: {integrity: sha512-S+GRDgJlR3PyEbsX/Fobd9cqpZBuvUS+8asRqYDMLCb2qMzt1oz5m5oxQCxOgUDxiWsOVNi4yaF+/uvdlHlYug==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.5 + es-abstract: 1.22.3 + foreach: 2.0.5 + has-symbols: 1.0.3 + dev: true + + /is-typedarray@1.0.0: + resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} + dev: true + + /is-unicode-supported@0.1.0: + resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} + engines: {node: '>=10'} + dev: true + + /is-upper-case@1.1.2: + resolution: {integrity: sha512-GQYSJMgfeAmVwh9ixyk888l7OIhNAGKtY6QA+IrWlu9MDTCaXmeozOZ2S9Knj7bQwBO/H6J2kb+pbyTUiMNbsw==} + dependencies: + upper-case: 1.1.3 + dev: true + + /is-url@1.2.4: + resolution: {integrity: sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==} + dev: true + + /is-utf8@0.2.1: + resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} + dev: true + + /is-weakref@1.0.2: + resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} + dependencies: + call-bind: 1.0.2 + dev: true + + /is-windows@1.0.2: + resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} + engines: {node: '>=0.10.0'} + dev: true + + /is-wsl@2.2.0: + resolution: {integrity: sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==} + engines: {node: '>=8'} + dependencies: + is-docker: 2.2.1 + dev: true + + /isarray@0.0.1: + resolution: {integrity: sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==} + dev: true + + /isarray@1.0.0: + resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + dev: true + + /isarray@2.0.5: + resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + dev: true + + /isobject@2.1.0: + resolution: {integrity: sha512-+OUdGJlgjOBZDfxnDjYYG6zp487z0JGNQq3cYQYg5f5hKR+syHMsaztzGeml/4kGG55CSpKSpWTY+jYGgsHLgA==} + engines: {node: '>=0.10.0'} + dependencies: + isarray: 1.0.0 + dev: true + + /isobject@3.0.1: + resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} + engines: {node: '>=0.10.0'} + dev: true + + /isomorphic-unfetch@3.1.0: + resolution: {integrity: sha512-geDJjpoZ8N0kWexiwkX8F9NkTsXhetLPVbZFQ+JTW239QNOwvB0gniuR1Wc6f0AMTn7/mFGyXvHTifrCp/GH8Q==} + dependencies: + node-fetch: 2.6.7 + unfetch: 4.2.0 + transitivePeerDependencies: + - encoding + dev: true + + /isstream@0.1.2: + resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} + dev: true + + /istanbul@0.4.5: + resolution: {integrity: sha512-nMtdn4hvK0HjUlzr1DrKSUY8ychprt8dzHOgY2KXsIhHu5PuQQEOTM27gV9Xblyon7aUH/TSFIjRHEODF/FRPg==} + deprecated: |- + This module is no longer maintained, try this instead: + npm i nyc + Visit https://istanbul.js.org/integrations for other alternatives. + hasBin: true + dependencies: + abbrev: 1.0.9 + async: 1.5.2 + escodegen: 1.8.1 + esprima: 2.7.3 + glob: 5.0.15 + handlebars: 4.7.7 + js-yaml: 3.14.1 + mkdirp: 0.5.6 + nopt: 3.0.6 + once: 1.4.0 + resolve: 1.1.7 + supports-color: 3.2.3 + which: 1.3.1 + wordwrap: 1.0.0 + dev: true + + /isurl@1.0.0: + resolution: {integrity: sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==} + engines: {node: '>= 4'} + dependencies: + has-to-string-tag-x: 1.4.1 + is-object: 1.0.1 + dev: true + + /js-cookie@2.2.1: + resolution: {integrity: sha512-HvdH2LzI/EAZcUwA8+0nKNtWHqS+ZmijLA30RwZA0bo7ToCckjK5MkGhjED9KoRcXO6BaGI3I9UIzSA1FKFPOQ==} + dev: true + + /js-sdsl@4.4.2: + resolution: {integrity: sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w==} + dev: true + + /js-sha3@0.5.5: + resolution: {integrity: sha512-yLLwn44IVeunwjpDVTDZmQeVbB0h+dZpY2eO68B/Zik8hu6dH+rKeLxwua79GGIvW6xr8NBAcrtiUbYrTjEFTA==} + dev: true + + /js-sha3@0.5.7: + resolution: {integrity: sha512-GII20kjaPX0zJ8wzkTbNDYMY7msuZcTWk8S5UOh6806Jq/wz1J8/bnr8uGU0DAUmYDjj2Mr4X1cW8v/GLYnR+g==} + dev: true + + /js-sha3@0.6.1: + resolution: {integrity: sha512-2OHj7sAZ9gnJS4lQsgIsTslmqVrNQdDC99bvwYGQKU1w6k/gwsTLeGBfWt8yHCuTOGqk7DXzuVlK8J+dDXnG7A==} + dev: true + + /js-sha3@0.8.0: + resolution: {integrity: sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==} + + /js-tokens@3.0.2: + resolution: {integrity: sha512-RjTcuD4xjtthQkaWH7dFlH85L+QaVtSoOyGdZ3g6HFhS9dFNDfLyqgm2NFe2X6cQpeFmt0452FJjFG5UameExg==} + dev: true + + /js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + dev: true + + /js-yaml@3.13.1: + resolution: {integrity: sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==} + hasBin: true + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + dev: true + + /js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + dev: true + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: true + + /jsbn@0.1.1: + resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} + dev: true + + /jsesc@0.5.0: + resolution: {integrity: sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==} + hasBin: true + dev: true + + /jsesc@1.3.0: + resolution: {integrity: sha512-Mke0DA0QjUWuJlhsE0ZPPhYiJkRap642SmI/4ztCFaUs6V2AiH1sfecc+57NgaryfAA2VR3v6O+CSjC1jZJKOA==} + hasBin: true + dev: true + + /json-buffer@3.0.0: + resolution: {integrity: sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==} + dev: true + + /json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + dev: true + + /json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + dev: true + + /json-rpc-engine@3.8.0: + resolution: {integrity: sha512-6QNcvm2gFuuK4TKU1uwfH0Qd/cOSb9c1lls0gbnIhciktIUQJwz6NQNAW4B1KiGPenv7IKu97V222Yo1bNhGuA==} + dependencies: + async: 2.6.3 + babel-preset-env: 1.7.0 + babelify: 7.3.0 + json-rpc-error: 2.0.0 + promise-to-callback: 1.0.0 + safe-event-emitter: 1.0.1 + transitivePeerDependencies: + - supports-color + dev: true + + /json-rpc-error@2.0.0: + resolution: {integrity: sha512-EwUeWP+KgAZ/xqFpaP6YDAXMtCJi+o/QQpCQFIYyxr01AdADi2y413eM8hSqJcoQym9WMePAJWoaODEJufC4Ug==} + dependencies: + inherits: 2.0.4 + dev: true + + /json-rpc-random-id@1.0.1: + resolution: {integrity: sha512-RJ9YYNCkhVDBuP4zN5BBtYAzEl03yq/jIIsyif0JY9qyJuQQZNeDK7anAPKKlyEtLSj2s8h6hNh2F8zO5q7ScA==} + dev: true + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + dev: true + + /json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + dev: true + + /json-schema@0.2.3: + resolution: {integrity: sha512-a3xHnILGMtk+hDOqNwHzF6e2fNbiMrXZvxKQiEv2MlgQP+pjIOzqAmKYD2mDpXYE/44M7g+n9p2bKkYWDUcXCQ==} + dev: true + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + dev: true + + /json-stable-stringify@1.0.1: + resolution: {integrity: sha512-i/J297TW6xyj7sDFa7AmBPkQvLIxWr2kKPWI26tXydnZrzVAocNqn5DMNT1Mzk0vit1V5UkRM7C1KdVNp7Lmcg==} + dependencies: + jsonify: 0.0.0 + dev: true + + /json-stringify-safe@5.0.1: + resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} + dev: true + + /json-to-ast@2.1.0: + resolution: {integrity: sha512-W9Lq347r8tA1DfMvAGn9QNcgYm4Wm7Yc+k8e6vezpMnRT+NHbtlxgNBXRVjXe9YM6eTn6+p/MKOlV/aABJcSnQ==} + engines: {node: '>= 4'} + dependencies: + code-error-fragment: 0.0.230 + grapheme-splitter: 1.0.4 + dev: true + + /json5@0.5.1: + resolution: {integrity: sha512-4xrs1aW+6N5DalkqSVA8fxh458CXvR99WU8WLKmq4v8eWAL86Xo3BVqyd3SkA9wEVjCMqyvvRRkshAdOnBp5rw==} + hasBin: true + dev: true + + /jsonfile@2.4.0: + resolution: {integrity: sha512-PKllAqbgLgxHaj8TElYymKCAgrASebJrWpTnEkOaTowt23VKXXN0sUeriJ+eh7y6ufb/CC5ap11pz71/cM0hUw==} + optionalDependencies: + graceful-fs: 4.2.10 + dev: true + + /jsonfile@4.0.0: + resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} + optionalDependencies: + graceful-fs: 4.2.10 + dev: true + + /jsonfile@6.1.0: + resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + dependencies: + universalify: 2.0.0 + optionalDependencies: + graceful-fs: 4.2.10 + dev: true + + /jsonify@0.0.0: + resolution: {integrity: sha512-trvBk1ki43VZptdBI5rIlG4YOzyeH/WefQt5rj1grasPn4iiZWKet8nkgc4GlsAylaztn0qZfUYOiTsASJFdNA==} + dev: true + + /jsonpointer@5.0.1: + resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} + engines: {node: '>=0.10.0'} + dev: true + + /jsonschema@1.4.0: + resolution: {integrity: sha512-/YgW6pRMr6M7C+4o8kS+B/2myEpHCrxO4PEWnqJNBFMjn7EWXqlQ4tGwL6xTHeRplwuZmcAncdvfOad1nT2yMw==} + dev: true + + /jsprim@1.4.1: + resolution: {integrity: sha512-4Dj8Rf+fQ+/Pn7C5qeEX02op1WfOss3PKTE9Nsop3Dx+6UPxlm1dr/og7o2cRa5hNN07CACr4NFzRLtj/rjWog==} + engines: {'0': node >=0.6.0} + dependencies: + assert-plus: 1.0.0 + extsprintf: 1.3.0 + json-schema: 0.2.3 + verror: 1.10.0 + dev: true + + /keccak@3.0.2: + resolution: {integrity: sha512-PyKKjkH53wDMLGrvmRGSNWgmSxZOUqbnXwKL9tmgbFYA1iAYqW21kfR7mZXV0MlESiefxQQE9X9fTa3X+2MPDQ==} + engines: {node: '>=10.0.0'} + requiresBuild: true + dependencies: + node-addon-api: 2.0.2 + node-gyp-build: 4.5.0 + readable-stream: 3.6.0 + dev: true + + /keccakjs@0.2.3: + resolution: {integrity: sha512-BjLkNDcfaZ6l8HBG9tH0tpmDv3sS2mA7FNQxFHpCdzP3Gb2MVruXBSuoM66SnVxKJpAr5dKGdkHD+bDokt8fTg==} + dependencies: + browserify-sha3: 0.0.4 + sha3: 1.2.6 + dev: true + + /keyv@3.1.0: + resolution: {integrity: sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==} + dependencies: + json-buffer: 3.0.0 + dev: true + + /keyv@4.5.0: + resolution: {integrity: sha512-2YvuMsA+jnFGtBareKqgANOEKe1mk3HKiXu2fRmAfyxG0MJAywNhi5ttWA3PMjl4NmpyjZNbFifR2vNjW1znfA==} + dependencies: + json-buffer: 3.0.1 + dev: true + + /kind-of@3.2.2: + resolution: {integrity: sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==} + engines: {node: '>=0.10.0'} + dependencies: + is-buffer: 1.1.6 + dev: true + + /kind-of@4.0.0: + resolution: {integrity: sha512-24XsCxmEbRwEDbz/qz3stgin8TTzZ1ESR56OMCN0ujYg+vRutNSiOj9bHH9u85DKgXguraugV5sFuvbD4FW/hw==} + engines: {node: '>=0.10.0'} + dependencies: + is-buffer: 1.1.6 + dev: true + + /kind-of@5.1.0: + resolution: {integrity: sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==} + engines: {node: '>=0.10.0'} + dev: true + + /kind-of@6.0.3: + resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} + engines: {node: '>=0.10.0'} + dev: true + + /klaw-sync@6.0.0: + resolution: {integrity: sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==} + dependencies: + graceful-fs: 4.2.10 + dev: true + + /klaw@1.3.1: + resolution: {integrity: sha512-TED5xi9gGQjGpNnvRWknrwAB1eL5GciPfVFOt3Vk1OJCVDQbzuSfrF3hkUQKlsgKrG1F+0t5W0m+Fje1jIt8rw==} + optionalDependencies: + graceful-fs: 4.2.10 + dev: true + + /latest-version@7.0.0: + resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==} + engines: {node: '>=14.16'} + dependencies: + package-json: 8.1.1 + dev: true + + /lcid@1.0.0: + resolution: {integrity: sha512-YiGkH6EnGrDGqLMITnGjXtGmNtjoXw9SVUzcaos8RBi7Ps0VBylkq+vOcY9QE5poLasPCR849ucFUkl0UzUyOw==} + engines: {node: '>=0.10.0'} + dependencies: + invert-kv: 1.0.0 + dev: true + + /level-codec@7.0.1: + resolution: {integrity: sha512-Ua/R9B9r3RasXdRmOtd+t9TCOEIIlts+TN/7XTT2unhDaL6sJn83S3rUyljbr6lVtw49N3/yA0HHjpV6Kzb2aQ==} + dev: true + + /level-codec@9.0.2: + resolution: {integrity: sha512-UyIwNb1lJBChJnGfjmO0OR+ezh2iVu1Kas3nvBS/BzGnx79dv6g7unpKIDNPMhfdTEGoc7mC8uAu51XEtX+FHQ==} + engines: {node: '>=6'} + dependencies: + buffer: 5.7.1 + dev: true + + /level-errors@1.0.5: + resolution: {integrity: sha512-/cLUpQduF6bNrWuAC4pwtUKA5t669pCsCi2XbmojG2tFeOr9j6ShtdDCtFFQO1DRt+EVZhx9gPzP9G2bUaG4ig==} + dependencies: + errno: 0.1.8 + dev: true + + /level-errors@2.0.1: + resolution: {integrity: sha512-UVprBJXite4gPS+3VznfgDSU8PTRuVX0NXwoWW50KLxd2yw4Y1t2JUR5In1itQnudZqRMT9DlAM3Q//9NCjCFw==} + engines: {node: '>=6'} + dependencies: + errno: 0.1.8 + dev: true + + /level-iterator-stream@1.3.1: + resolution: {integrity: sha512-1qua0RHNtr4nrZBgYlpV0qHHeHpcRRWTxEZJ8xsemoHAXNL5tbooh4tPEEqIqsbWCAJBmUmkwYK/sW5OrFjWWw==} + dependencies: + inherits: 2.0.4 + level-errors: 1.0.5 + readable-stream: 1.0.34 + xtend: 4.0.2 + dev: true + + /level-iterator-stream@2.0.3: + resolution: {integrity: sha512-I6Heg70nfF+e5Y3/qfthJFexhRw/Gi3bIymCoXAlijZdAcLaPuWSJs3KXyTYf23ID6g0o2QF62Yh+grOXY3Rig==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + readable-stream: 2.3.7 + xtend: 4.0.2 + dev: true + + /level-iterator-stream@3.0.1: + resolution: {integrity: sha512-nEIQvxEED9yRThxvOrq8Aqziy4EGzrxSZK+QzEFAVuJvQ8glfyZ96GB6BoI4sBbLfjMXm2w4vu3Tkcm9obcY0g==} + engines: {node: '>=6'} + dependencies: + inherits: 2.0.4 + readable-stream: 2.3.7 + xtend: 4.0.2 + dev: true + + /level-mem@3.0.1: + resolution: {integrity: sha512-LbtfK9+3Ug1UmvvhR2DqLqXiPW1OJ5jEh0a3m9ZgAipiwpSxGj/qaVVy54RG5vAQN1nCuXqjvprCuKSCxcJHBg==} + engines: {node: '>=6'} + dependencies: + level-packager: 4.0.1 + memdown: 3.0.0 + dev: true + + /level-packager@4.0.1: + resolution: {integrity: sha512-svCRKfYLn9/4CoFfi+d8krOtrp6RoX8+xm0Na5cgXMqSyRru0AnDYdLl+YI8u1FyS6gGZ94ILLZDE5dh2but3Q==} + engines: {node: '>=6'} + dependencies: + encoding-down: 5.0.4 + levelup: 3.1.1 + dev: true + + /level-post@1.0.7: + resolution: {integrity: sha512-PWYqG4Q00asOrLhX7BejSajByB4EmG2GaKHfj3h5UmmZ2duciXLPGYWIjBzLECFWUGOZWlm5B20h/n3Gs3HKew==} + dependencies: + ltgt: 2.2.1 + dev: true + + /level-sublevel@6.6.4: + resolution: {integrity: sha512-pcCrTUOiO48+Kp6F1+UAzF/OtWqLcQVTVF39HLdZ3RO8XBoXt+XVPKZO1vVr1aUoxHZA9OtD2e1v7G+3S5KFDA==} + dependencies: + bytewise: 1.1.0 + level-codec: 9.0.2 + level-errors: 2.0.1 + level-iterator-stream: 2.0.3 + ltgt: 2.1.3 + pull-defer: 0.2.3 + pull-level: 2.0.4 + pull-stream: 3.6.14 + typewiselite: 1.0.0 + xtend: 4.0.2 + dev: true + + /level-supports@4.0.1: + resolution: {integrity: sha512-PbXpve8rKeNcZ9C1mUicC9auIYFyGpkV9/i6g76tLgANwWhtG2v7I4xNBUlkn3lE2/dZF3Pi0ygYGtLc4RXXdA==} + engines: {node: '>=12'} + dev: true + + /level-transcoder@1.0.1: + resolution: {integrity: sha512-t7bFwFtsQeD8cl8NIoQ2iwxA0CL/9IFw7/9gAjOonH0PWTTiRfY7Hq+Ejbsxh86tXobDQ6IOiddjNYIfOBs06w==} + engines: {node: '>=12'} + dependencies: + buffer: 6.0.3 + module-error: 1.0.2 + dev: true + + /level-ws@0.0.0: + resolution: {integrity: sha512-XUTaO/+Db51Uiyp/t7fCMGVFOTdtLS/NIACxE/GHsij15mKzxksZifKVjlXDF41JMUP/oM1Oc4YNGdKnc3dVLw==} + dependencies: + readable-stream: 1.0.34 + xtend: 2.1.2 + dev: true + + /level-ws@1.0.0: + resolution: {integrity: sha512-RXEfCmkd6WWFlArh3X8ONvQPm8jNpfA0s/36M4QzLqrLEIt1iJE9WBHLZ5vZJK6haMjJPJGJCQWfjMNnRcq/9Q==} + engines: {node: '>=6'} + dependencies: + inherits: 2.0.4 + readable-stream: 2.3.7 + xtend: 4.0.2 + dev: true + + /level@8.0.0: + resolution: {integrity: sha512-ypf0jjAk2BWI33yzEaaotpq7fkOPALKAgDBxggO6Q9HGX2MRXn0wbP1Jn/tJv1gtL867+YOjOB49WaUF3UoJNQ==} + engines: {node: '>=12'} + dependencies: + browser-level: 1.0.1 + classic-level: 1.2.0 + dev: true + + /levelup@1.3.9: + resolution: {integrity: sha512-VVGHfKIlmw8w1XqpGOAGwq6sZm2WwWLmlDcULkKWQXEA5EopA8OBNJ2Ck2v6bdk8HeEZSbCSEgzXadyQFm76sQ==} + dependencies: + deferred-leveldown: 1.2.2 + level-codec: 7.0.1 + level-errors: 1.0.5 + level-iterator-stream: 1.3.1 + prr: 1.0.1 + semver: 5.4.1 + xtend: 4.0.2 + dev: true + + /levelup@3.1.1: + resolution: {integrity: sha512-9N10xRkUU4dShSRRFTBdNaBxofz+PGaIZO962ckboJZiNmLuhVT6FZ6ZKAsICKfUBO76ySaYU6fJWX/jnj3Lcg==} + engines: {node: '>=6'} + dependencies: + deferred-leveldown: 4.0.2 + level-errors: 2.0.1 + level-iterator-stream: 3.0.1 + xtend: 4.0.2 + dev: true + + /leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + dev: true + + /levn@0.3.0: + resolution: {integrity: sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.1.2 + type-check: 0.3.2 + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + dev: true + + /load-json-file@1.1.0: + resolution: {integrity: sha512-cy7ZdNRXdablkXYNI049pthVeXFurRyb9+hA/dZzerZ0pGTx42z+y+ssxBaVV2l70t1muq5IdKhn4UtcoGUY9A==} + engines: {node: '>=0.10.0'} + dependencies: + graceful-fs: 4.2.10 + parse-json: 2.2.0 + pify: 2.3.0 + pinkie-promise: 2.0.1 + strip-bom: 2.0.0 + dev: true + + /locate-path@2.0.0: + resolution: {integrity: sha512-NCI2kiDkyR7VeEKm27Kda/iQHyKJe1Bu0FlTbYp3CqJu+9IFe9bLyAjMxf5ZDDbEg+iMPzB5zYyUTSm8wVTKmA==} + engines: {node: '>=4'} + dependencies: + p-locate: 2.0.0 + path-exists: 3.0.0 + dev: true + + /locate-path@3.0.0: + resolution: {integrity: sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==} + engines: {node: '>=6'} + dependencies: + p-locate: 3.0.0 + path-exists: 3.0.0 + dev: true + + /locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + dependencies: + p-locate: 4.1.0 + dev: true + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + dependencies: + p-locate: 5.0.0 + dev: true + + /lodash.assign@4.2.0: + resolution: {integrity: sha512-hFuH8TY+Yji7Eja3mGiuAxBqLagejScbG8GbG0j6o9vzn0YL14My+ktnqtZgFTosKymC9/44wP6s7xyuLfnClw==} + dev: true + + /lodash.camelcase@4.3.0: + resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} + dev: true + + /lodash.flatten@4.4.0: + resolution: {integrity: sha512-C5N2Z3DgnnKr0LOpv/hKCgKdb7ZZwafIrsesve6lmzvZIRZRGaZ/l6Q8+2W7NaT+ZwO3fFlSCzCzrDCFdJfZ4g==} + dev: true + + /lodash.mapvalues@4.6.0: + resolution: {integrity: sha512-JPFqXFeZQ7BfS00H58kClY7SPVeHertPE0lNuCyZ26/XlN8TvakYD7b9bGyNmXbT/D3BbtPAAmq90gPWqLkxlQ==} + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: true + + /lodash.truncate@4.4.2: + resolution: {integrity: sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==} + dev: true + + /lodash@4.17.20: + resolution: {integrity: sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==} + dev: true + + /lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + dev: true + + /log-symbols@3.0.0: + resolution: {integrity: sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ==} + engines: {node: '>=8'} + dependencies: + chalk: 2.4.2 + dev: true + + /log-symbols@4.1.0: + resolution: {integrity: sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==} + engines: {node: '>=10'} + dependencies: + chalk: 4.1.2 + is-unicode-supported: 0.1.0 + dev: true + + /looper@2.0.0: + resolution: {integrity: sha512-6DzMHJcjbQX/UPHc1rRCBfKlLwDkvuGZ715cIR36wSdYqWXFT35uLXq5P/2orl3tz+t+VOVPxw4yPinQlUDGDQ==} + dev: true + + /looper@3.0.0: + resolution: {integrity: sha512-LJ9wplN/uSn72oJRsXTx+snxPet5c8XiZmOKCm906NVYu+ag6SB6vUcnJcWxgnl2NfbIyeobAn7Bwv6xRj2XJg==} + dev: true + + /loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + dependencies: + js-tokens: 4.0.0 + dev: true + + /loupe@2.3.7: + resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} + dependencies: + get-func-name: 2.0.2 + + /lower-case-first@1.0.2: + resolution: {integrity: sha512-UuxaYakO7XeONbKrZf5FEgkantPf5DUqDayzP5VXZrtRPdH86s4kN47I8B3TW10S4QKiE3ziHNf3kRN//okHjA==} + dependencies: + lower-case: 1.1.4 + dev: true + + /lower-case@1.1.4: + resolution: {integrity: sha512-2Fgx1Ycm599x+WGpIYwJOvsjmXFzTSc34IwDWALRA/8AopUKAVPwfJ+h5+f85BCp0PWmmJcWzEpxOpoXycMpdA==} + dev: true + + /lowercase-keys@1.0.1: + resolution: {integrity: sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==} + engines: {node: '>=0.10.0'} + dev: true + + /lowercase-keys@2.0.0: + resolution: {integrity: sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==} + engines: {node: '>=8'} + dev: true + + /lowercase-keys@3.0.0: + resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /lru-cache@3.2.0: + resolution: {integrity: sha512-91gyOKTc2k66UG6kHiH4h3S2eltcPwE1STVfMYC/NG+nZwf8IIuiamfmpGZjpbbxzSyEJaLC0tNSmhjlQUTJow==} + dependencies: + pseudomap: 1.0.2 + dev: true + + /lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + dependencies: + yallist: 3.1.1 + dev: true + + /lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: '>=10'} + dependencies: + yallist: 4.0.0 + dev: true + + /lru_map@0.3.3: + resolution: {integrity: sha512-Pn9cox5CsMYngeDbmChANltQl+5pi6XmTrraMSzhPmMBbmgcxmqWry0U3PGapCU1yB4/LqCcom7qhHZiF/jGfQ==} + dev: true + + /ltgt@2.1.3: + resolution: {integrity: sha512-5VjHC5GsENtIi5rbJd+feEpDKhfr7j0odoUR2Uh978g+2p93nd5o34cTjQWohXsPsCZeqoDnIqEf88mPCe0Pfw==} + dev: true + + /ltgt@2.2.1: + resolution: {integrity: sha512-AI2r85+4MquTw9ZYqabu4nMwy9Oftlfa/e/52t9IjtfG+mGBbTNdAoZ3RQKLHR6r0wQnwZnPIEh/Ya6XTWAKNA==} + dev: true + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + dev: true + + /map-cache@0.2.2: + resolution: {integrity: sha512-8y/eV9QQZCiyn1SprXSrCmqJN0yNRATe+PO8ztwqrvrbdRLA3eYJF0yaR0YayLWkMbsQSKWS9N2gPcGEc4UsZg==} + engines: {node: '>=0.10.0'} + dev: true + + /map-visit@1.0.0: + resolution: {integrity: sha512-4y7uGv8bd2WdM9vpQsiQNo41Ln1NvhvDRuVt0k2JZQ+ezN2uaQes7lZeZ+QQUHOLQAtDaBJ+7wCbi+ab/KFs+w==} + engines: {node: '>=0.10.0'} + dependencies: + object-visit: 1.0.1 + dev: true + + /markdown-table@1.1.3: + resolution: {integrity: sha512-1RUZVgQlpJSPWYbFSpmudq5nHY1doEIv89gBtF0s4gW1GF2XorxcA/70M5vq7rLv0a6mhOUccRsqkwhwLCIQ2Q==} + dev: true + + /mcl-wasm@0.7.9: + resolution: {integrity: sha512-iJIUcQWA88IJB/5L15GnJVnSQJmf/YaxxV6zRavv83HILHaJQb6y0iFyDMdDO0gN8X37tdxmAOrH/P8B6RB8sQ==} + engines: {node: '>=8.9.0'} + dev: true + + /md5.js@1.3.5: + resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} + dependencies: + hash-base: 3.1.0 + inherits: 2.0.4 + safe-buffer: 5.2.1 + dev: true + + /media-typer@0.3.0: + resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} + engines: {node: '>= 0.6'} + dev: true + + /memdown@1.4.1: + resolution: {integrity: sha512-iVrGHZB8i4OQfM155xx8akvG9FIj+ht14DX5CQkCTG4EHzZ3d3sgckIf/Lm9ivZalEsFuEVnWv2B2WZvbrro2w==} + dependencies: + abstract-leveldown: 2.7.2 + functional-red-black-tree: 1.0.1 + immediate: 3.3.0 + inherits: 2.0.4 + ltgt: 2.2.1 + safe-buffer: 5.1.2 + dev: true + + /memdown@3.0.0: + resolution: {integrity: sha512-tbV02LfZMWLcHcq4tw++NuqMO+FZX8tNJEiD2aNRm48ZZusVg5N8NART+dmBkepJVye986oixErf7jfXboMGMA==} + engines: {node: '>=6'} + dependencies: + abstract-leveldown: 5.0.0 + functional-red-black-tree: 1.0.1 + immediate: 3.2.3 + inherits: 2.0.4 + ltgt: 2.2.1 + safe-buffer: 5.1.2 + dev: true + + /memory-level@1.0.0: + resolution: {integrity: sha512-UXzwewuWeHBz5krr7EvehKcmLFNoXxGcvuYhC41tRnkrTbJohtS7kVn9akmgirtRygg+f7Yjsfi8Uu5SGSQ4Og==} + engines: {node: '>=12'} + dependencies: + abstract-level: 1.0.3 + functional-red-black-tree: 1.0.1 + module-error: 1.0.2 + dev: true + + /memorystream@0.3.1: + resolution: {integrity: sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==} + engines: {node: '>= 0.10.0'} + dev: true + + /merge-descriptors@1.0.1: + resolution: {integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==} + dev: true + + /merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + dev: true + + /merkle-patricia-tree@2.3.2: + resolution: {integrity: sha512-81PW5m8oz/pz3GvsAwbauj7Y00rqm81Tzad77tHBwU7pIAtN+TJnMSOJhxBKflSVYhptMMb9RskhqHqrSm1V+g==} + dependencies: + async: 1.5.2 + ethereumjs-util: 5.2.1 + level-ws: 0.0.0 + levelup: 1.3.9 + memdown: 1.4.1 + readable-stream: 2.3.7 + rlp: 2.2.7 + semaphore: 1.1.0 + dev: true + + /merkle-patricia-tree@3.0.0: + resolution: {integrity: sha512-soRaMuNf/ILmw3KWbybaCjhx86EYeBbD8ph0edQCTed0JN/rxDt1EBN52Ajre3VyGo+91f8+/rfPIRQnnGMqmQ==} + dependencies: + async: 2.6.3 + ethereumjs-util: 5.2.1 + level-mem: 3.0.1 + level-ws: 1.0.0 + readable-stream: 3.6.0 + rlp: 2.2.7 + semaphore: 1.1.0 + dev: true + + /methods@1.1.2: + resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} + engines: {node: '>= 0.6'} + dev: true + + /micromatch@3.1.10: + resolution: {integrity: sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==} + engines: {node: '>=0.10.0'} + dependencies: + arr-diff: 4.0.0 + array-unique: 0.3.2 + braces: 2.3.2 + define-property: 2.0.2 + extend-shallow: 3.0.2 + extglob: 2.0.4 + fragment-cache: 0.2.1 + kind-of: 6.0.3 + nanomatch: 1.2.13 + object.pick: 1.3.0 + regex-not: 1.0.2 + snapdragon: 0.8.2 + to-regex: 3.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: '>=8.6'} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + dev: true + + /miller-rabin@4.0.1: + resolution: {integrity: sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==} + hasBin: true + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + dev: true + + /mime-db@1.44.0: + resolution: {integrity: sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==} + engines: {node: '>= 0.6'} + dev: true + + /mime-types@2.1.27: + resolution: {integrity: sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==} + engines: {node: '>= 0.6'} + dependencies: + mime-db: 1.44.0 + dev: true + + /mime@1.6.0: + resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /mimic-response@1.0.1: + resolution: {integrity: sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==} + engines: {node: '>=4'} + dev: true + + /mimic-response@3.1.0: + resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} + engines: {node: '>=10'} + dev: true + + /min-document@2.19.0: + resolution: {integrity: sha512-9Wy1B3m3f66bPPmU5hdA4DR4PB2OfDU/+GS3yAB7IQozE3tqXaVv2zOjgla7MEGSRv95+ILmOuvhLkOK6wJtCQ==} + dependencies: + dom-walk: 0.1.1 + dev: true + + /minimalistic-assert@1.0.1: + resolution: {integrity: sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==} + + /minimalistic-crypto-utils@1.0.1: + resolution: {integrity: sha512-JIYlbt6g8i5jKfJ3xz7rF0LXmv2TkDxBLUkiBeZ7bAx4GnnNMr8xFpGnOxn6GhTEHx3SjRrZEoU+j04prX1ktg==} + + /minimatch@3.0.4: + resolution: {integrity: sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /minimatch@5.0.1: + resolution: {integrity: sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g==} + engines: {node: '>=10'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimatch@9.0.3: + resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} + engines: {node: '>=16 || 14 >=14.17'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimist@1.2.6: + resolution: {integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==} + dev: true + + /minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + dev: true + + /minipass@2.9.0: + resolution: {integrity: sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg==} + dependencies: + safe-buffer: 5.2.1 + yallist: 3.1.1 + dev: true + + /minizlib@1.3.3: + resolution: {integrity: sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q==} + dependencies: + minipass: 2.9.0 + dev: true + + /mixin-deep@1.3.2: + resolution: {integrity: sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==} + engines: {node: '>=0.10.0'} + dependencies: + for-in: 1.0.2 + is-extendable: 1.0.1 + dev: true + + /mkdirp-promise@5.0.1: + resolution: {integrity: sha512-Hepn5kb1lJPtVW84RFT40YG1OddBNTOVUZR2bzQUHc+Z03en8/3uX0+060JDhcEzyO08HmipsN9DcnFMxhIL9w==} + engines: {node: '>=4'} + deprecated: This package is broken and no longer maintained. 'mkdirp' itself supports promises now, please switch to that. + dependencies: + mkdirp: 1.0.4 + dev: true + + /mkdirp@0.5.5: + resolution: {integrity: sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==} + hasBin: true + dependencies: + minimist: 1.2.8 + dev: true + + /mkdirp@0.5.6: + resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==} + hasBin: true + dependencies: + minimist: 1.2.6 + dev: true + + /mkdirp@1.0.4: + resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==} + engines: {node: '>=10'} + hasBin: true + dev: true + + /mnemonist@0.38.5: + resolution: {integrity: sha512-bZTFT5rrPKtPJxj8KSV0WkPyNxl72vQepqqVUAW2ARUpUSF2qXMB6jZj7hW5/k7C1rtpzqbD/IIbJwLXUjCHeg==} + dependencies: + obliterator: 2.0.4 + dev: true + + /mocha@10.2.0: + resolution: {integrity: sha512-IDY7fl/BecMwFHzoqF2sg/SHHANeBoMMXFlS9r0OXKDssYE1M5O43wUY/9BVPeIvfH2zmEbBfseqN9gBQZzXkg==} + engines: {node: '>= 14.0.0'} + hasBin: true + dependencies: + ansi-colors: 4.1.1 + browser-stdout: 1.3.1 + chokidar: 3.5.3 + debug: 4.3.4(supports-color@8.1.1) + diff: 5.0.0 + escape-string-regexp: 4.0.0 + find-up: 5.0.0 + glob: 7.2.0 + he: 1.2.0 + js-yaml: 4.1.0 + log-symbols: 4.1.0 + minimatch: 5.0.1 + ms: 2.1.3 + nanoid: 3.3.3 + serialize-javascript: 6.0.0 + strip-json-comments: 3.1.1 + supports-color: 8.1.1 + workerpool: 6.2.1 + yargs: 16.2.0 + yargs-parser: 20.2.4 + yargs-unparser: 2.0.0 + dev: true + + /mocha@7.2.0: + resolution: {integrity: sha512-O9CIypScywTVpNaRrCAgoUnJgozpIofjKUYmJhiCIJMiuYnLI6otcb1/kpW9/n/tJODHGZ7i8aLQoDVsMtOKQQ==} + engines: {node: '>= 8.10.0'} + hasBin: true + dependencies: + ansi-colors: 3.2.3 + browser-stdout: 1.3.1 + chokidar: 3.3.0 + debug: 3.2.6(supports-color@6.0.0) + diff: 3.5.0 + escape-string-regexp: 1.0.5 + find-up: 3.0.0 + glob: 7.1.3 + growl: 1.10.5 + he: 1.2.0 + js-yaml: 3.13.1 + log-symbols: 3.0.0 + minimatch: 3.0.4 + mkdirp: 0.5.5 + ms: 2.1.1 + node-environment-flags: 1.0.6 + object.assign: 4.1.0 + strip-json-comments: 2.0.1 + supports-color: 6.0.0 + which: 1.3.1 + wide-align: 1.1.3 + yargs: 13.3.2 + yargs-parser: 13.1.2 + yargs-unparser: 1.6.0 + dev: true + + /mock-fs@4.12.0: + resolution: {integrity: sha512-/P/HtrlvBxY4o/PzXY9cCNBrdylDNxg7gnrv2sMNxj+UJ2m8jSpl0/A6fuJeNAWr99ZvGWH8XCbE0vmnM5KupQ==} + dev: true + + /module-error@1.0.2: + resolution: {integrity: sha512-0yuvsqSCv8LbaOKhnsQ/T5JhyFlCYLPXK3U2sgV10zoKQwzs/MyfuQUOZQ1V/6OCOJsK/TRgNVrPuPDqtdMFtA==} + engines: {node: '>=10'} + dev: true + + /moment@2.29.4: + resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} + dev: true + + /ms@2.0.0: + resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} + dev: true + + /ms@2.1.1: + resolution: {integrity: sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==} + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + dev: true + + /ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + dev: true + + /multibase@0.6.1: + resolution: {integrity: sha512-pFfAwyTjbbQgNc3G7D48JkJxWtoJoBMaR4xQUOuB8RnCgRqaYmWNFeJTTvrJ2w51bjLq2zTby6Rqj9TQ9elSUw==} + deprecated: This module has been superseded by the multiformats module + dependencies: + base-x: 3.0.9 + buffer: 5.7.1 + dev: true + + /multibase@0.7.0: + resolution: {integrity: sha512-TW8q03O0f6PNFTQDvh3xxH03c8CjGaaYrjkl9UQPG6rz53TQzzxJVCIWVjzcbN/Q5Y53Zd0IBQBMVktVgNx4Fg==} + deprecated: This module has been superseded by the multiformats module + dependencies: + base-x: 3.0.9 + buffer: 5.7.1 + dev: true + + /multicodec@0.5.7: + resolution: {integrity: sha512-PscoRxm3f+88fAtELwUnZxGDkduE2HD9Q6GHUOywQLjOGT/HAdhjLDYNZ1e7VR0s0TP0EwZ16LNUTFpoBGivOA==} + deprecated: This module has been superseded by the multiformats module + dependencies: + varint: 5.0.2 + dev: true + + /multicodec@1.0.4: + resolution: {integrity: sha512-NDd7FeS3QamVtbgfvu5h7fd1IlbaC4EQ0/pgU4zqE2vdHCmBGsUa0TiM8/TdSeG6BMPC92OOCf8F1ocE/Wkrrg==} + deprecated: This module has been superseded by the multiformats module + dependencies: + buffer: 5.7.1 + varint: 5.0.2 + dev: true + + /multihashes@0.4.21: + resolution: {integrity: sha512-uVSvmeCWf36pU2nB4/1kzYZjsXD9vofZKpgudqkceYY5g2aZZXJ5r9lxuzoRLl1OAp28XljXsEJ/X/85ZsKmKw==} + dependencies: + buffer: 5.7.1 + multibase: 0.7.0 + varint: 5.0.2 + dev: true + + /nan@2.13.2: + resolution: {integrity: sha512-TghvYc72wlMGMVMluVo9WRJc0mB8KxxF/gZ4YYFy7V2ZQX9l7rgbPg7vjS9mt6U5HXODVFVI2bOduCzwOMv/lw==} + dev: true + + /nan@2.16.0: + resolution: {integrity: sha512-UdAqHyFngu7TfQKsCBgAA6pWDkT8MAO7d0jyOecVhN5354xbLqdn8mV9Tat9gepAupm0bt2DbeaSC8vS52MuFA==} + dev: true + + /nano-base32@1.0.1: + resolution: {integrity: sha512-sxEtoTqAPdjWVGv71Q17koMFGsOMSiHsIFEvzOM7cNp8BXB4AnEwmDabm5dorusJf/v1z7QxaZYxUorU9RKaAw==} + dev: true + + /nano-json-stream-parser@0.1.2: + resolution: {integrity: sha512-9MqxMH/BSJC7dnLsEMPyfN5Dvoo49IsPFYMcHw3Bcfc2kN0lpHRBSzlMSVx4HGyJ7s9B31CyBTVehWJoQ8Ctew==} + dev: true + + /nanoid@3.3.3: + resolution: {integrity: sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + dev: true + + /nanomatch@1.2.13: + resolution: {integrity: sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==} + engines: {node: '>=0.10.0'} + dependencies: + arr-diff: 4.0.0 + array-unique: 0.3.2 + define-property: 2.0.2 + extend-shallow: 3.0.2 + fragment-cache: 0.2.1 + is-windows: 1.0.2 + kind-of: 6.0.3 + object.pick: 1.3.0 + regex-not: 1.0.2 + snapdragon: 0.8.2 + to-regex: 3.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /napi-macros@2.0.0: + resolution: {integrity: sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg==} + dev: true + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: true + + /negotiator@0.6.2: + resolution: {integrity: sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==} + engines: {node: '>= 0.6'} + dev: true + + /neo-async@2.6.2: + resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} + dev: true + + /neodoc@2.0.2: + resolution: {integrity: sha512-NAppJ0YecKWdhSXFYCHbo6RutiX8vOt/Jo3l46mUg6pQlpJNaqc5cGxdrW2jITQm5JIYySbFVPDl3RrREXNyPw==} + dependencies: + ansi-regex: 2.1.1 + dev: true + + /next-tick@1.1.0: + resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} + dev: true + + /nice-try@1.0.5: + resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} + dev: true + + /no-case@2.3.2: + resolution: {integrity: sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==} + dependencies: + lower-case: 1.1.4 + dev: true + + /node-addon-api@2.0.2: + resolution: {integrity: sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA==} + dev: true + + /node-emoji@1.11.0: + resolution: {integrity: sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==} + dependencies: + lodash: 4.17.21 + dev: true + + /node-environment-flags@1.0.6: + resolution: {integrity: sha512-5Evy2epuL+6TM0lCQGpFIj6KwiEsGh1SrHUhTbNX+sLbBtjidPZFAnVK9y5yU1+h//RitLbRHTIMyxQPtxMdHw==} + dependencies: + object.getownpropertydescriptors: 2.1.4 + semver: 5.7.1 + dev: true + + /node-fetch@1.7.3: + resolution: {integrity: sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ==} + dependencies: + encoding: 0.1.13 + is-stream: 1.1.0 + dev: true + + /node-fetch@2.6.7: + resolution: {integrity: sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + dependencies: + whatwg-url: 5.0.0 + dev: true + + /node-gyp-build@4.5.0: + resolution: {integrity: sha512-2iGbaQBV+ITgCz76ZEjmhUKAKVf7xfY1sRl4UiKQspfZMH2h06SyhNsnSVy50cwkFQDGLyif6m/6uFXHkOZ6rg==} + hasBin: true + dev: true + + /node-interval-tree@2.1.2: + resolution: {integrity: sha512-bJ9zMDuNGzVQg1xv0bCPzyEDxHgbrx7/xGj6CDokvizZZmastPsOh0JJLuY8wA5q2SfX1TLNMk7XNV8WxbGxzA==} + engines: {node: '>= 14.0.0'} + dependencies: + shallowequal: 1.1.0 + dev: true + + /nofilter@1.0.4: + resolution: {integrity: sha512-N8lidFp+fCz+TD51+haYdbDGrcBWwuHX40F5+z0qkUjMJ5Tp+rdSuAkMJ9N9eoolDlEVTf6u5icM+cNKkKW2mA==} + engines: {node: '>=8'} + dev: true + + /nofilter@3.1.0: + resolution: {integrity: sha512-l2NNj07e9afPnhAhvgVrCD/oy2Ai1yfLpuo3EpiO1jFTsB4sFz6oIfAfSZyQzVpkZQ9xS8ZS5g1jCBgq4Hwo0g==} + engines: {node: '>=12.19'} + dev: true + + /nopt@3.0.6: + resolution: {integrity: sha512-4GUt3kSEYmk4ITxzB/b9vaIDfUVWN/Ml1Fwl11IlnIG2iaJ9O6WXZ9SrYM9NLI8OCBieN2Y8SWC2oJV0RQ7qYg==} + hasBin: true + dependencies: + abbrev: 1.1.1 + dev: true + + /normalize-package-data@2.5.0: + resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} + dependencies: + hosted-git-info: 2.8.9 + resolve: 1.22.1 + semver: 5.7.1 + validate-npm-package-license: 3.0.4 + dev: true + + /normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + dev: true + + /normalize-url@4.5.1: + resolution: {integrity: sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==} + engines: {node: '>=8'} + dev: true + + /normalize-url@6.1.0: + resolution: {integrity: sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==} + engines: {node: '>=10'} + dev: true + + /nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + dependencies: + boolbase: 1.0.0 + dev: true + + /number-is-nan@1.0.1: + resolution: {integrity: sha512-4jbtZXNAsfZbAHiiqjLPBiCl16dES1zI4Hpzzxw61Tk+loF+sBDBKx1ICKKKwIqQ7M0mFn1TmkN7euSncWgHiQ==} + engines: {node: '>=0.10.0'} + dev: true + + /number-to-bn@1.7.0: + resolution: {integrity: sha512-wsJ9gfSz1/s4ZsJN01lyonwuxA1tml6X1yBDnfpMglypcBRFZZkus26EdPSlqS5GJfYddVZa22p3VNb3z5m5Ig==} + engines: {node: '>=6.5.0', npm: '>=3'} + dependencies: + bn.js: 4.11.6 + strip-hex-prefix: 1.0.0 + dev: true + + /oauth-sign@0.9.0: + resolution: {integrity: sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==} + dev: true + + /object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + dev: true + + /object-copy@0.1.0: + resolution: {integrity: sha512-79LYn6VAb63zgtmAteVOWo9Vdj71ZVBy3Pbse+VqxDpEP83XuujMrGqHIwAXJ5I/aM0zU7dIyIAhifVTPrNItQ==} + engines: {node: '>=0.10.0'} + dependencies: + copy-descriptor: 0.1.1 + define-property: 0.2.5 + kind-of: 3.2.2 + dev: true + + /object-inspect@1.12.2: + resolution: {integrity: sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==} + dev: true + + /object-inspect@1.13.1: + resolution: {integrity: sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==} + dev: true + + /object-is@1.1.5: + resolution: {integrity: sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + dev: true + + /object-keys@0.4.0: + resolution: {integrity: sha512-ncrLw+X55z7bkl5PnUvHwFK9FcGuFYo9gtjws2XtSzL+aZ8tm830P60WJ0dSmFVaSalWieW5MD7kEdnXda9yJw==} + dev: true + + /object-keys@1.1.1: + resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} + engines: {node: '>= 0.4'} + dev: true + + /object-visit@1.0.1: + resolution: {integrity: sha512-GBaMwwAVK9qbQN3Scdo0OyvgPW7l3lnaVMj84uTOZlswkX0KpF6fyDBJhtTthf7pymztoN36/KEr1DyhF96zEA==} + engines: {node: '>=0.10.0'} + dependencies: + isobject: 3.0.1 + dev: true + + /object.assign@4.1.0: + resolution: {integrity: sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==} + engines: {node: '>= 0.4'} + dependencies: + define-properties: 1.1.4 + function-bind: 1.1.1 + has-symbols: 1.0.3 + object-keys: 1.1.1 + dev: true + + /object.assign@4.1.4: + resolution: {integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + has-symbols: 1.0.3 + object-keys: 1.1.1 + dev: true + + /object.getownpropertydescriptors@2.1.4: + resolution: {integrity: sha512-sccv3L/pMModT6dJAYF3fzGMVcb38ysQ0tEE6ixv2yXJDtEIPph268OlAdJj5/qZMZDq2g/jqvwppt36uS/uQQ==} + engines: {node: '>= 0.8'} + dependencies: + array.prototype.reduce: 1.0.4 + call-bind: 1.0.2 + define-properties: 1.1.4 + es-abstract: 1.20.3 + dev: true + + /object.pick@1.3.0: + resolution: {integrity: sha512-tqa/UMy/CCoYmj+H5qc07qvSL9dqcs/WZENZ1JbtWBlATP+iVOe778gE6MSijnyCnORzDuX6hU+LA4SZ09YjFQ==} + engines: {node: '>=0.10.0'} + dependencies: + isobject: 3.0.1 + dev: true + + /obliterator@2.0.4: + resolution: {integrity: sha512-lgHwxlxV1qIg1Eap7LgIeoBWIMFibOjbrYPIPJZcI1mmGAI2m3lNYpK12Y+GBdPQ0U1hRwSord7GIaawz962qQ==} + dev: true + + /oboe@2.1.4: + resolution: {integrity: sha512-ymBJ4xSC6GBXLT9Y7lirj+xbqBLa+jADGJldGEYG7u8sZbS9GyG+u1Xk9c5cbriKwSpCg41qUhPjvU5xOpvIyQ==} + requiresBuild: true + dependencies: + http-https: 1.0.0 + dev: true + optional: true + + /oboe@2.1.5: + resolution: {integrity: sha512-zRFWiF+FoicxEs3jNI/WYUrVEgA7DeET/InK0XQuudGHRg8iIob3cNPrJTKaz4004uaA9Pbe+Dwa8iluhjLZWA==} + dependencies: + http-https: 1.0.0 + dev: true + + /on-finished@2.3.0: + resolution: {integrity: sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==} + engines: {node: '>= 0.8'} + dependencies: + ee-first: 1.1.1 + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: true + + /open@7.4.2: + resolution: {integrity: sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==} + engines: {node: '>=8'} + dependencies: + is-docker: 2.2.1 + is-wsl: 2.2.0 + dev: true + + /optionator@0.8.3: + resolution: {integrity: sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==} + engines: {node: '>= 0.8.0'} + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.3.0 + prelude-ls: 1.1.2 + type-check: 0.3.2 + word-wrap: 1.2.3 + dev: true + + /optionator@0.9.3: + resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} + engines: {node: '>= 0.8.0'} + dependencies: + '@aashutoshrathi/word-wrap': 1.2.6 + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /os-homedir@1.0.2: + resolution: {integrity: sha512-B5JU3cabzk8c67mRRd3ECmROafjYMXbuzlwtqdM8IbS8ktlTix8aFGb2bAGKrSRIlnfKwovGUUr72JUPyOb6kQ==} + engines: {node: '>=0.10.0'} + dev: true + + /os-locale@1.4.0: + resolution: {integrity: sha512-PRT7ZORmwu2MEFt4/fv3Q+mEfN4zetKxufQrkShY2oGvUms9r8otu5HfdyIFHkYXjO7laNsoVGmM2MANfuTA8g==} + engines: {node: '>=0.10.0'} + dependencies: + lcid: 1.0.0 + dev: true + + /os-tmpdir@1.0.2: + resolution: {integrity: sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==} + engines: {node: '>=0.10.0'} + dev: true + + /p-cancelable@0.3.0: + resolution: {integrity: sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw==} + engines: {node: '>=4'} + dev: true + + /p-cancelable@1.1.0: + resolution: {integrity: sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==} + engines: {node: '>=6'} + dev: true + + /p-cancelable@3.0.0: + resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==} + engines: {node: '>=12.20'} + dev: true + + /p-finally@1.0.0: + resolution: {integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==} + engines: {node: '>=4'} + dev: true + + /p-limit@1.3.0: + resolution: {integrity: sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==} + engines: {node: '>=4'} + dependencies: + p-try: 1.0.0 + dev: true + + /p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + dependencies: + p-try: 2.2.0 + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + dev: true + + /p-locate@2.0.0: + resolution: {integrity: sha512-nQja7m7gSKuewoVRen45CtVfODR3crN3goVQ0DDZ9N3yHxgpkuBhZqsaiotSQRrADUrne346peY7kT3TSACykg==} + engines: {node: '>=4'} + dependencies: + p-limit: 1.3.0 + dev: true + + /p-locate@3.0.0: + resolution: {integrity: sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==} + engines: {node: '>=6'} + dependencies: + p-limit: 2.3.0 + dev: true + + /p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + dependencies: + p-limit: 2.3.0 + dev: true + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + dependencies: + p-limit: 3.1.0 + dev: true + + /p-map@4.0.0: + resolution: {integrity: sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==} + engines: {node: '>=10'} + dependencies: + aggregate-error: 3.1.0 + dev: true + + /p-timeout@1.2.1: + resolution: {integrity: sha512-gb0ryzr+K2qFqFv6qi3khoeqMZF/+ajxQipEF6NteZVnvz9tzdsfAVj3lYtn1gAXvH5lfLwfxEII799gt/mRIA==} + engines: {node: '>=4'} + dependencies: + p-finally: 1.0.0 + dev: true + + /p-try@1.0.0: + resolution: {integrity: sha512-U1etNYuMJoIz3ZXSrrySFjsXQTWOx2/jdi86L+2pRvph/qMKL6sbcCYdH23fqsbm8TH2Gn0OybpT4eSFlCVHww==} + engines: {node: '>=4'} + dev: true + + /p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + dev: true + + /package-json@8.1.1: + resolution: {integrity: sha512-cbH9IAIJHNj9uXi196JVsRlt7cHKak6u/e6AkL/bkRelZ7rlL3X1YKxsZwa36xipOEKAsdtmaG6aAJoM1fx2zA==} + engines: {node: '>=14.16'} + dependencies: + got: 12.1.0 + registry-auth-token: 5.0.2 + registry-url: 6.0.1 + semver: 7.5.4 + dev: true + + /param-case@2.1.1: + resolution: {integrity: sha512-eQE845L6ot89sk2N8liD8HAuH4ca6Vvr7VWAWwt7+kvvG5aBcPmmphQ68JsEG2qa9n1TykS2DLeMt363AAH8/w==} + dependencies: + no-case: 2.3.2 + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + dependencies: + callsites: 3.1.0 + dev: true + + /parse-asn1@5.1.5: + resolution: {integrity: sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==} + dependencies: + asn1.js: 4.10.1 + browserify-aes: 1.2.0 + create-hash: 1.2.0 + evp_bytestokey: 1.0.3 + pbkdf2: 3.1.2 + safe-buffer: 5.2.1 + dev: true + + /parse-cache-control@1.0.1: + resolution: {integrity: sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg==} + dev: true + + /parse-headers@2.0.3: + resolution: {integrity: sha512-QhhZ+DCCit2Coi2vmAKbq5RGTRcQUOE2+REgv8vdyu7MnYx2eZztegqtTx99TZ86GTIwqiy3+4nQTWZ2tgmdCA==} + dev: true + + /parse-json@2.2.0: + resolution: {integrity: sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==} + engines: {node: '>=0.10.0'} + dependencies: + error-ex: 1.3.2 + dev: true + + /parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + dependencies: + '@babel/code-frame': 7.18.6 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + dev: true + + /parse5-htmlparser2-tree-adapter@7.0.0: + resolution: {integrity: sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==} + dependencies: + domhandler: 5.0.3 + parse5: 7.1.1 + dev: true + + /parse5@7.1.1: + resolution: {integrity: sha512-kwpuwzB+px5WUg9pyK0IcK/shltJN5/OVhQagxhCQNtT9Y9QRZqNY2e1cmbu/paRh5LMnz/oVTVLBpjFmMZhSg==} + dependencies: + entities: 4.4.0 + dev: true + + /parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + dev: true + + /pascal-case@2.0.1: + resolution: {integrity: sha512-qjS4s8rBOJa2Xm0jmxXiyh1+OFf6ekCWOvUaRgAQSktzlTbMotS0nmG9gyYAybCWBcuP4fsBeRCKNwGBnMe2OQ==} + dependencies: + camel-case: 3.0.0 + upper-case-first: 1.1.2 + dev: true + + /pascalcase@0.1.1: + resolution: {integrity: sha512-XHXfu/yOQRy9vYOtUDVMN60OEJjW013GoObG1o+xwQTpB9eYJX/BjXMsdW13ZDPruFhYYn0AG22w0xgQMwl3Nw==} + engines: {node: '>=0.10.0'} + dev: true + + /patch-package@6.2.2: + resolution: {integrity: sha512-YqScVYkVcClUY0v8fF0kWOjDYopzIM8e3bj/RU1DPeEF14+dCGm6UeOYm4jvCyxqIEQ5/eJzmbWfDWnUleFNMg==} + engines: {npm: '>5'} + hasBin: true + dependencies: + '@yarnpkg/lockfile': 1.1.0 + chalk: 2.4.2 + cross-spawn: 6.0.5 + find-yarn-workspace-root: 1.2.1 + fs-extra: 7.0.1 + is-ci: 2.0.0 + klaw-sync: 6.0.0 + minimist: 1.2.8 + rimraf: 2.7.1 + semver: 5.7.1 + slash: 2.0.0 + tmp: 0.0.33 + transitivePeerDependencies: + - supports-color + dev: true + + /patch-package@6.4.7: + resolution: {integrity: sha512-S0vh/ZEafZ17hbhgqdnpunKDfzHQibQizx9g8yEf5dcVk3KOflOfdufRXQX8CSEkyOQwuM/bNz1GwKvFj54kaQ==} + engines: {npm: '>5'} + hasBin: true + dependencies: + '@yarnpkg/lockfile': 1.1.0 + chalk: 2.4.2 + cross-spawn: 6.0.5 + find-yarn-workspace-root: 2.0.0 + fs-extra: 7.0.1 + is-ci: 2.0.0 + klaw-sync: 6.0.0 + minimist: 1.2.8 + open: 7.4.2 + rimraf: 2.7.1 + semver: 5.7.1 + slash: 2.0.0 + tmp: 0.0.33 + dev: true + + /path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + dev: true + + /path-case@2.1.1: + resolution: {integrity: sha512-Ou0N05MioItesaLr9q8TtHVWmJ6fxWdqKB2RohFmNWVyJ+2zeKIeDNWAN6B/Pe7wpzWChhZX6nONYmOnMeJQ/Q==} + dependencies: + no-case: 2.3.2 + dev: true + + /path-exists@2.1.0: + resolution: {integrity: sha512-yTltuKuhtNeFJKa1PiRzfLAU5182q1y4Eb4XCJ3PBqyzEDkAZRzBrKKBct682ls9reBVHf9udYLN5Nd+K1B9BQ==} + engines: {node: '>=0.10.0'} + dependencies: + pinkie-promise: 2.0.1 + dev: true + + /path-exists@3.0.0: + resolution: {integrity: sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==} + engines: {node: '>=4'} + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + dev: true + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + dev: true + + /path-key@2.0.1: + resolution: {integrity: sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw==} + engines: {node: '>=4'} + dev: true + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + dev: true + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + dev: true + + /path-starts-with@2.0.1: + resolution: {integrity: sha512-wZ3AeiRBRlNwkdUxvBANh0+esnt38DLffHDujZyRHkqkaKHTglnY2EP5UX3b8rdeiSutgO4y9NEJwXezNP5vHg==} + engines: {node: '>=8'} + dev: true + + /path-to-regexp@0.1.7: + resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} + dev: true + + /path-type@1.1.0: + resolution: {integrity: sha512-S4eENJz1pkiQn9Znv33Q+deTOKmbl+jj1Fl+qiP/vYezj+S8x+J3Uo0ISrx/QoEvIlOaDWJhPaRd1flJ9HXZqg==} + engines: {node: '>=0.10.0'} + dependencies: + graceful-fs: 4.2.10 + pify: 2.3.0 + pinkie-promise: 2.0.1 + dev: true + + /path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + dev: true + + /pathval@1.1.1: + resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} + + /pbkdf2@3.1.2: + resolution: {integrity: sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==} + engines: {node: '>=0.12'} + dependencies: + create-hash: 1.2.0 + create-hmac: 1.1.7 + ripemd160: 2.0.2 + safe-buffer: 5.2.1 + sha.js: 2.4.11 + dev: true + + /performance-now@2.1.0: + resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} + dev: true + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + dev: true + + /pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + dev: true + + /pify@4.0.1: + resolution: {integrity: sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==} + engines: {node: '>=6'} + dev: true + + /pinkie-promise@2.0.1: + resolution: {integrity: sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==} + engines: {node: '>=0.10.0'} + dependencies: + pinkie: 2.0.4 + dev: true + + /pinkie@2.0.4: + resolution: {integrity: sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==} + engines: {node: '>=0.10.0'} + dev: true + + /pluralize@8.0.0: + resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==} + engines: {node: '>=4'} + dev: true + + /posix-character-classes@0.1.1: + resolution: {integrity: sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==} + engines: {node: '>=0.10.0'} + dev: true + + /postinstall-postinstall@2.1.0: + resolution: {integrity: sha512-7hQX6ZlZXIoRiWNrbMQaLzUUfH+sSx39u8EJ9HYuDc1kLo9IXKWjM5RSquZN1ad5GnH8CGFM78fsAAQi3OKEEQ==} + requiresBuild: true + dev: true + + /precond@0.2.3: + resolution: {integrity: sha512-QCYG84SgGyGzqJ/vlMsxeXd/pgL/I94ixdNFyh1PusWmTCyVfPJjZ1K1jvHtsbfnXQs2TSkEP2fR7QiMZAnKFQ==} + engines: {node: '>= 0.6'} + dev: true + + /prelude-ls@1.1.2: + resolution: {integrity: sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==} + engines: {node: '>= 0.8.0'} + dev: true + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + dev: true + + /prepend-http@1.0.4: + resolution: {integrity: sha512-PhmXi5XmoyKw1Un4E+opM2KcsJInDvKyuOumcjjw3waw86ZNjHwVUOOWLc4bCzLdcKNaWBH9e99sbWzDQsVaYg==} + engines: {node: '>=0.10.0'} + dev: true + + /prepend-http@2.0.0: + resolution: {integrity: sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==} + engines: {node: '>=4'} + dev: true + + /prettier-linter-helpers@1.0.0: + resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==} + engines: {node: '>=6.0.0'} + dependencies: + fast-diff: 1.2.0 + dev: true + + /prettier-plugin-solidity@1.3.1(prettier@2.8.8): + resolution: {integrity: sha512-MN4OP5I2gHAzHZG1wcuJl0FsLS3c4Cc5494bbg+6oQWBPuEamjwDvmGfFMZ6NFzsh3Efd9UUxeT7ImgjNH4ozA==} + engines: {node: '>=16'} + peerDependencies: + prettier: '>=2.3.0' + dependencies: + '@solidity-parser/parser': 0.17.0 + prettier: 2.8.8 + semver: 7.5.4 + solidity-comments-extractor: 0.0.8 + dev: true + optional: true + + /prettier-plugin-solidity@1.3.1(prettier@3.2.5): + resolution: {integrity: sha512-MN4OP5I2gHAzHZG1wcuJl0FsLS3c4Cc5494bbg+6oQWBPuEamjwDvmGfFMZ6NFzsh3Efd9UUxeT7ImgjNH4ozA==} + engines: {node: '>=16'} + peerDependencies: + prettier: '>=2.3.0' + dependencies: + '@solidity-parser/parser': 0.17.0 + prettier: 3.2.5 + semver: 7.5.4 + solidity-comments-extractor: 0.0.8 + dev: true + + /prettier@2.8.8: + resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} + engines: {node: '>=10.13.0'} + hasBin: true + dev: true + + /prettier@3.2.5: + resolution: {integrity: sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==} + engines: {node: '>=14'} + hasBin: true + dev: true + + /private@0.1.8: + resolution: {integrity: sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==} + engines: {node: '>= 0.6'} + dev: true + + /process-nextick-args@2.0.1: + resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} + dev: true + + /process@0.5.2: + resolution: {integrity: sha512-oNpcutj+nYX2FjdEW7PGltWhXulAnFlM0My/k48L90hARCOJtvBbQXc/6itV2jDvU5xAAtonP+r6wmQgCcbAUA==} + engines: {node: '>= 0.6.0'} + dev: true + + /promise-to-callback@1.0.0: + resolution: {integrity: sha512-uhMIZmKM5ZteDMfLgJnoSq9GCwsNKrYau73Awf1jIy6/eUcuuZ3P+CD9zUv0kJsIUbU+x6uLNIhXhLHDs1pNPA==} + engines: {node: '>=0.10.0'} + dependencies: + is-fn: 1.0.0 + set-immediate-shim: 1.0.1 + dev: true + + /promise@8.3.0: + resolution: {integrity: sha512-rZPNPKTOYVNEEKFaq1HqTgOwZD+4/YHS5ukLzQCypkj+OkYx7iv0mA91lJlpPPZ8vMau3IIGj5Qlwrx+8iiSmg==} + dependencies: + asap: 2.0.6 + dev: true + + /proper-lockfile@4.1.2: + resolution: {integrity: sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA==} + dependencies: + graceful-fs: 4.2.10 + retry: 0.12.0 + signal-exit: 3.0.7 + dev: true + + /proto-list@1.2.4: + resolution: {integrity: sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==} + dev: true + + /proxy-addr@2.0.5: + resolution: {integrity: sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==} + engines: {node: '>= 0.10'} + dependencies: + forwarded: 0.1.2 + ipaddr.js: 1.9.0 + dev: true + + /proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: true + + /prr@1.0.1: + resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} + dev: true + + /pseudomap@1.0.2: + resolution: {integrity: sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==} + dev: true + + /psl@1.9.0: + resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==} + dev: true + + /public-encrypt@4.0.3: + resolution: {integrity: sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==} + dependencies: + bn.js: 4.12.0 + browserify-rsa: 4.0.1 + create-hash: 1.2.0 + parse-asn1: 5.1.5 + randombytes: 2.1.0 + safe-buffer: 5.2.1 + dev: true + + /pull-cat@1.1.11: + resolution: {integrity: sha512-i3w+xZ3DCtTVz8S62hBOuNLRHqVDsHMNZmgrZsjPnsxXUgbWtXEee84lo1XswE7W2a3WHyqsNuDJTjVLAQR8xg==} + dev: true + + /pull-defer@0.2.3: + resolution: {integrity: sha512-/An3KE7mVjZCqNhZsr22k1Tx8MACnUnHZZNPSJ0S62td8JtYr/AiRG42Vz7Syu31SoTLUzVIe61jtT/pNdjVYA==} + dev: true + + /pull-level@2.0.4: + resolution: {integrity: sha512-fW6pljDeUThpq5KXwKbRG3X7Ogk3vc75d5OQU/TvXXui65ykm+Bn+fiktg+MOx2jJ85cd+sheufPL+rw9QSVZg==} + dependencies: + level-post: 1.0.7 + pull-cat: 1.1.11 + pull-live: 1.0.1 + pull-pushable: 2.2.0 + pull-stream: 3.6.14 + pull-window: 2.1.4 + stream-to-pull-stream: 1.7.3 + dev: true + + /pull-live@1.0.1: + resolution: {integrity: sha512-tkNz1QT5gId8aPhV5+dmwoIiA1nmfDOzJDlOOUpU5DNusj6neNd3EePybJ5+sITr2FwyCs/FVpx74YMCfc8YeA==} + dependencies: + pull-cat: 1.1.11 + pull-stream: 3.6.14 + dev: true + + /pull-pushable@2.2.0: + resolution: {integrity: sha512-M7dp95enQ2kaHvfCt2+DJfyzgCSpWVR2h2kWYnVsW6ZpxQBx5wOu0QWOvQPVoPnBLUZYitYP2y7HyHkLQNeGXg==} + dev: true + + /pull-stream@3.6.14: + resolution: {integrity: sha512-KIqdvpqHHaTUA2mCYcLG1ibEbu/LCKoJZsBWyv9lSYtPkJPBq8m3Hxa103xHi6D2thj5YXa0TqK3L3GUkwgnew==} + dev: true + + /pull-window@2.1.4: + resolution: {integrity: sha512-cbDzN76BMlcGG46OImrgpkMf/VkCnupj8JhsrpBw3aWBM9ye345aYnqitmZCgauBkc0HbbRRn9hCnsa3k2FNUg==} + dependencies: + looper: 2.0.0 + dev: true + + /pump@3.0.0: + resolution: {integrity: sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==} + dependencies: + end-of-stream: 1.4.4 + once: 1.4.0 + dev: true + + /punycode@1.3.2: + resolution: {integrity: sha512-RofWgt/7fL5wP1Y7fxE7/EmTLzQVnB0ycyibJ0OOHIlJqTNzglYFxVwETOcIoJqJmpDXJ9xImDv+Fq34F/d4Dw==} + dev: true + + /punycode@2.1.0: + resolution: {integrity: sha512-Yxz2kRwT90aPiWEMHVYnEf4+rhwF1tBmmZ4KepCP+Wkium9JxtWnUm1nqGwpiAHr/tnTSeHqr3wb++jgSkXjhA==} + engines: {node: '>=6'} + dev: true + + /punycode@2.1.1: + resolution: {integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==} + engines: {node: '>=6'} + dev: true + + /pure-rand@5.0.3: + resolution: {integrity: sha512-9N8x1h8dptBQpHyC7aZMS+iNOAm97WMGY0AFrguU1cpfW3I5jINkWe5BIY5md0ofy+1TCIELsVcm/GJXZSaPbw==} + dev: true + + /qs@6.11.0: + resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} + engines: {node: '>=0.6'} + dependencies: + side-channel: 1.0.4 + dev: true + + /qs@6.5.3: + resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} + engines: {node: '>=0.6'} + dev: true + + /qs@6.7.0: + resolution: {integrity: sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==} + engines: {node: '>=0.6'} + dev: true + + /query-string@5.1.1: + resolution: {integrity: sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==} + engines: {node: '>=0.10.0'} + dependencies: + decode-uri-component: 0.2.0 + object-assign: 4.1.1 + strict-uri-encode: 1.1.0 + dev: true + + /querystring@0.2.0: + resolution: {integrity: sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g==} + engines: {node: '>=0.4.x'} + deprecated: The querystring API is considered Legacy. new code should use the URLSearchParams API instead. + dev: true + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + dev: true + + /quick-lru@5.1.1: + resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==} + engines: {node: '>=10'} + dev: true + + /randombytes@2.1.0: + resolution: {integrity: sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /randomfill@1.0.4: + resolution: {integrity: sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==} + dependencies: + randombytes: 2.1.0 + safe-buffer: 5.2.1 + dev: true + + /range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + dev: true + + /raw-body@2.4.0: + resolution: {integrity: sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==} + engines: {node: '>= 0.8'} + dependencies: + bytes: 3.1.0 + http-errors: 1.7.2 + iconv-lite: 0.4.24 + unpipe: 1.0.0 + dev: true + + /raw-body@2.5.1: + resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} + engines: {node: '>= 0.8'} + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + unpipe: 1.0.0 + dev: true + + /rc@1.2.8: + resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==} + hasBin: true + dependencies: + deep-extend: 0.6.0 + ini: 1.3.8 + minimist: 1.2.8 + strip-json-comments: 2.0.1 + dev: true + + /read-pkg-up@1.0.1: + resolution: {integrity: sha512-WD9MTlNtI55IwYUS27iHh9tK3YoIVhxis8yKhLpTqWtml739uXc9NWTpxoHkfZf3+DkCCsXox94/VWZniuZm6A==} + engines: {node: '>=0.10.0'} + dependencies: + find-up: 1.1.2 + read-pkg: 1.1.0 + dev: true + + /read-pkg@1.1.0: + resolution: {integrity: sha512-7BGwRHqt4s/uVbuyoeejRn4YmFnYZiFl4AuaeXHlgZf3sONF0SOGlxs2Pw8g6hCKupo08RafIO5YXFNOKTfwsQ==} + engines: {node: '>=0.10.0'} + dependencies: + load-json-file: 1.1.0 + normalize-package-data: 2.5.0 + path-type: 1.1.0 + dev: true + + /readable-stream@1.0.34: + resolution: {integrity: sha512-ok1qVCJuRkNmvebYikljxJA/UEsKwLl2nI1OmaqAu4/UE+h0wKCHok4XkL/gvi39OacXvw59RJUOFUkDib2rHg==} + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 0.0.1 + string_decoder: 0.10.31 + dev: true + + /readable-stream@2.3.7: + resolution: {integrity: sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==} + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 1.0.0 + process-nextick-args: 2.0.1 + safe-buffer: 5.1.2 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + dev: true + + /readable-stream@3.6.0: + resolution: {integrity: sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==} + engines: {node: '>= 6'} + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + dev: true + + /readdirp@3.2.0: + resolution: {integrity: sha512-crk4Qu3pmXwgxdSgGhgA/eXiJAPQiX4GMOZZMXnqKxHX7TaoL+3gQVo/WeuAiogr07DpnfjIMpXXa+PAIvwPGQ==} + engines: {node: '>= 8'} + dependencies: + picomatch: 2.3.1 + dev: true + + /readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + dependencies: + picomatch: 2.3.1 + dev: true + + /rechoir@0.6.2: + resolution: {integrity: sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==} + engines: {node: '>= 0.10'} + dependencies: + resolve: 1.22.1 + dev: true + + /recursive-readdir@2.2.2: + resolution: {integrity: sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg==} + engines: {node: '>=0.10.0'} + dependencies: + minimatch: 3.0.4 + dev: true + + /reduce-flatten@2.0.0: + resolution: {integrity: sha512-EJ4UNY/U1t2P/2k6oqotuX2Cc3T6nxJwsM0N0asT7dhrtH1ltUxDn4NalSYmPE2rCkVpcf/X6R0wDwcFpzhd4w==} + engines: {node: '>=6'} + dev: true + + /regenerate@1.4.2: + resolution: {integrity: sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==} + dev: true + + /regenerator-runtime@0.11.1: + resolution: {integrity: sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==} + dev: true + + /regenerator-runtime@0.13.9: + resolution: {integrity: sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==} + dev: true + + /regenerator-transform@0.10.1: + resolution: {integrity: sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q==} + dependencies: + babel-runtime: 6.26.0 + babel-types: 6.26.0 + private: 0.1.8 + dev: true + + /regex-not@1.0.2: + resolution: {integrity: sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==} + engines: {node: '>=0.10.0'} + dependencies: + extend-shallow: 3.0.2 + safe-regex: 1.1.0 + dev: true + + /regexp.prototype.flags@1.4.3: + resolution: {integrity: sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + functions-have-names: 1.2.3 + dev: true + + /regexp.prototype.flags@1.5.1: + resolution: {integrity: sha512-sy6TXMN+hnP/wMy+ISxg3krXx7BAtWVO4UouuCN/ziM9UEne0euamVNafDfvC83bRNr95y0V5iijeDQFUNpvrg==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + set-function-name: 2.0.1 + dev: true + + /regexpu-core@2.0.0: + resolution: {integrity: sha512-tJ9+S4oKjxY8IZ9jmjnp/mtytu1u3iyIQAfmI51IKWH6bFf7XR1ybtaO6j7INhZKXOTYADk7V5qxaqLkmNxiZQ==} + dependencies: + regenerate: 1.4.2 + regjsgen: 0.2.0 + regjsparser: 0.1.5 + dev: true + + /registry-auth-token@5.0.2: + resolution: {integrity: sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==} + engines: {node: '>=14'} + dependencies: + '@pnpm/npm-conf': 2.2.2 + dev: true + + /registry-url@6.0.1: + resolution: {integrity: sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==} + engines: {node: '>=12'} + dependencies: + rc: 1.2.8 + dev: true + + /regjsgen@0.2.0: + resolution: {integrity: sha512-x+Y3yA24uF68m5GA+tBjbGYo64xXVJpbToBaWCoSNSc1hdk6dfctaRWrNFTVJZIIhL5GxW8zwjoixbnifnK59g==} + dev: true + + /regjsparser@0.1.5: + resolution: {integrity: sha512-jlQ9gYLfk2p3V5Ag5fYhA7fv7OHzd1KUH0PRP46xc3TgwjwgROIW572AfYg/X9kaNq/LJnu6oJcFRXlIrGoTRw==} + hasBin: true + dependencies: + jsesc: 0.5.0 + dev: true + + /repeat-element@1.1.4: + resolution: {integrity: sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==} + engines: {node: '>=0.10.0'} + dev: true + + /repeat-string@1.6.1: + resolution: {integrity: sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==} + engines: {node: '>=0.10'} + dev: true + + /repeating@2.0.1: + resolution: {integrity: sha512-ZqtSMuVybkISo2OWvqvm7iHSWngvdaW3IpsT9/uP8v4gMi591LY6h35wdOfvQdWCKFWZWm2Y1Opp4kV7vQKT6A==} + engines: {node: '>=0.10.0'} + dependencies: + is-finite: 1.1.0 + dev: true + + /req-cwd@2.0.0: + resolution: {integrity: sha512-ueoIoLo1OfB6b05COxAA9UpeoscNpYyM+BqYlA7H6LVF4hKGPXQQSSaD2YmvDVJMkk4UDpAHIeU1zG53IqjvlQ==} + engines: {node: '>=4'} + dependencies: + req-from: 2.0.0 + dev: true + + /req-from@2.0.0: + resolution: {integrity: sha512-LzTfEVDVQHBRfjOUMgNBA+V6DWsSnoeKzf42J7l0xa/B4jyPOuuF5MlNSmomLNGemWTnV2TIdjSSLnEn95fOQA==} + engines: {node: '>=4'} + dependencies: + resolve-from: 3.0.0 + dev: true + + /request-promise-core@1.1.4(request@2.88.2): + resolution: {integrity: sha512-TTbAfBBRdWD7aNNOoVOBH4pN/KigV6LyapYNNlAPA8JwbovRti1E88m3sYAwsLi5ryhPKsE9APwnjFTgdUjTpw==} + engines: {node: '>=0.10.0'} + peerDependencies: + request: ^2.34 + dependencies: + lodash: 4.17.21 + request: 2.88.2 + dev: true + + /request-promise-native@1.0.9(request@2.88.2): + resolution: {integrity: sha512-wcW+sIUiWnKgNY0dqCpOZkUbF/I+YPi+f09JZIDa39Ec+q82CpSYniDp+ISgTTbKmnpJWASeJBPZmoxH84wt3g==} + engines: {node: '>=0.12.0'} + deprecated: request-promise-native has been deprecated because it extends the now deprecated request package, see https://github.com/request/request/issues/3142 + peerDependencies: + request: ^2.34 + dependencies: + request: 2.88.2 + request-promise-core: 1.1.4(request@2.88.2) + stealthy-require: 1.1.1 + tough-cookie: 2.5.0 + dev: true + + /request@2.88.2: + resolution: {integrity: sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==} + engines: {node: '>= 6'} + deprecated: request has been deprecated, see https://github.com/request/request/issues/3142 + dependencies: + aws-sign2: 0.7.0 + aws4: 1.11.0 + caseless: 0.12.0 + combined-stream: 1.0.8 + extend: 3.0.2 + forever-agent: 0.6.1 + form-data: 2.3.3 + har-validator: 5.1.5 + http-signature: 1.2.0 + is-typedarray: 1.0.0 + isstream: 0.1.2 + json-stringify-safe: 5.0.1 + mime-types: 2.1.27 + oauth-sign: 0.9.0 + performance-now: 2.1.0 + qs: 6.5.3 + safe-buffer: 5.2.1 + tough-cookie: 2.5.0 + tunnel-agent: 0.6.0 + uuid: 3.4.0 + dev: true + + /require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + dev: true + + /require-from-string@1.2.1: + resolution: {integrity: sha512-H7AkJWMobeskkttHyhTVtS0fxpFLjxhbfMa6Bk3wimP7sdPRGL3EyCg3sAQenFfAe+xQ+oAc85Nmtvq0ROM83Q==} + engines: {node: '>=0.10.0'} + dev: true + + /require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + dev: true + + /require-main-filename@1.0.1: + resolution: {integrity: sha512-IqSUtOVP4ksd1C/ej5zeEh/BIP2ajqpn8c5x+q99gvcIG/Qf0cud5raVnE/Dwd0ua9TXYDoDc0RE5hBSdz22Ug==} + dev: true + + /require-main-filename@2.0.0: + resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} + dev: true + + /resolve-alpn@1.2.1: + resolution: {integrity: sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==} + dev: true + + /resolve-from@3.0.0: + resolution: {integrity: sha512-GnlH6vxLymXJNMBo7XP1fJIzBFbdYt49CuTwmB/6N53t+kMPRMFKz783LlQ4tv28XoQfMWinAJX6WCGf2IlaIw==} + engines: {node: '>=4'} + dev: true + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + dev: true + + /resolve-url@0.2.1: + resolution: {integrity: sha512-ZuF55hVUQaaczgOIwqWzkEcEidmlD/xl44x1UZnhOXcYuFN2S6+rcxpG+C1N3So0wvNI3DmJICUFfu2SxhBmvg==} + deprecated: https://github.com/lydell/resolve-url#deprecated + dev: true + + /resolve@1.1.7: + resolution: {integrity: sha512-9znBF0vBcaSN3W2j7wKvdERPwqTxSpCq+if5C0WoTCyV9n24rua28jeuQ2pL/HOf+yUe/Mef+H/5p60K0Id3bg==} + dev: true + + /resolve@1.17.0: + resolution: {integrity: sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w==} + dependencies: + path-parse: 1.0.7 + dev: true + + /resolve@1.22.1: + resolution: {integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==} + hasBin: true + dependencies: + is-core-module: 2.10.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + dev: true + + /responselike@1.0.2: + resolution: {integrity: sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==} + dependencies: + lowercase-keys: 1.0.1 + dev: true + + /responselike@2.0.1: + resolution: {integrity: sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==} + dependencies: + lowercase-keys: 2.0.0 + dev: true + + /resumer@0.0.0: + resolution: {integrity: sha512-Fn9X8rX8yYF4m81rZCK/5VmrmsSbqS/i3rDLl6ZZHAXgC2nTAx3dhwG8q8odP/RmdLa2YrybDJaAMg+X1ajY3w==} + dependencies: + through: 2.3.8 + dev: true + + /ret@0.1.15: + resolution: {integrity: sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==} + engines: {node: '>=0.12'} + dev: true + + /retry@0.12.0: + resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} + engines: {node: '>= 4'} + dev: true + + /retry@0.13.1: + resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} + engines: {node: '>= 4'} + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + dev: true + + /rimraf@2.7.1: + resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /ripemd160-min@0.0.6: + resolution: {integrity: sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==} + engines: {node: '>=8'} + dev: true + + /ripemd160@2.0.2: + resolution: {integrity: sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==} + dependencies: + hash-base: 3.1.0 + inherits: 2.0.4 + dev: true + + /rlp@2.2.7: + resolution: {integrity: sha512-d5gdPmgQ0Z+AklL2NVXr/IoSjNZFfTVvQWzL/AM2AOcSzYP2xjlb0AC8YyCLc41MSNf6P6QVtjgPdmVtzb+4lQ==} + hasBin: true + dependencies: + bn.js: 5.2.1 + dev: true + + /run-parallel-limit@1.1.0: + resolution: {integrity: sha512-jJA7irRNM91jaKc3Hcl1npHsFLOXOoTkPCUL1JEa1R82O2miplXXRaGdjW/KM/98YQWDhJLiSs793CnXfblJUw==} + dependencies: + queue-microtask: 1.2.3 + dev: true + + /run-parallel@1.1.9: + resolution: {integrity: sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==} + dev: true + + /rustbn.js@0.2.0: + resolution: {integrity: sha512-4VlvkRUuCJvr2J6Y0ImW7NvTCriMi7ErOAqWk1y69vAdoNIzCF3yPmgeNzx+RQTLEDFq5sHfscn1MwHxP9hNfA==} + dev: true + + /safe-array-concat@1.0.1: + resolution: {integrity: sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==} + engines: {node: '>=0.4'} + dependencies: + call-bind: 1.0.5 + get-intrinsic: 1.2.2 + has-symbols: 1.0.3 + isarray: 2.0.5 + dev: true + + /safe-buffer@5.1.2: + resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} + dev: true + + /safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + dev: true + + /safe-event-emitter@1.0.1: + resolution: {integrity: sha512-e1wFe99A91XYYxoQbcq2ZJUWurxEyP8vfz7A7vuUe1s95q8r5ebraVaA1BukYJcpM6V16ugWoD9vngi8Ccu5fg==} + deprecated: Renamed to @metamask/safe-event-emitter + dependencies: + events: 3.3.0 + dev: true + + /safe-regex-test@1.0.0: + resolution: {integrity: sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.1.3 + is-regex: 1.1.4 + dev: true + + /safe-regex@1.1.0: + resolution: {integrity: sha512-aJXcif4xnaNUzvUuC5gcb46oTS7zvg4jpMTnuqtrEPlR3vFr4pxtdTwaF1Qs3Enjn9HK+ZlwQui+a7z0SywIzg==} + dependencies: + ret: 0.1.15 + dev: true + + /safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + dev: true + + /sc-istanbul@0.4.6: + resolution: {integrity: sha512-qJFF/8tW/zJsbyfh/iT/ZM5QNHE3CXxtLJbZsL+CzdJLBsPD7SedJZoUA4d8iAcN2IoMp/Dx80shOOd2x96X/g==} + hasBin: true + dependencies: + abbrev: 1.0.9 + async: 1.5.2 + escodegen: 1.8.1 + esprima: 2.7.3 + glob: 5.0.15 + handlebars: 4.7.7 + js-yaml: 3.14.1 + mkdirp: 0.5.6 + nopt: 3.0.6 + once: 1.4.0 + resolve: 1.1.7 + supports-color: 3.2.3 + which: 1.3.1 + wordwrap: 1.0.0 + dev: true + + /scrypt-js@2.0.4: + resolution: {integrity: sha512-4KsaGcPnuhtCZQCxFxN3GVYIhKFPTdLd8PLC552XwbMndtD0cjRFAhDuuydXQ0h08ZfPgzqe6EKHozpuH74iDw==} + dev: true + + /scrypt-js@3.0.1: + resolution: {integrity: sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA==} + + /scryptsy@1.2.1: + resolution: {integrity: sha512-aldIRgMozSJ/Gl6K6qmJZysRP82lz83Wb42vl4PWN8SaLFHIaOzLPc9nUUW2jQN88CuGm5q5HefJ9jZ3nWSmTw==} + requiresBuild: true + dependencies: + pbkdf2: 3.1.2 + dev: true + optional: true + + /secp256k1@3.7.1: + resolution: {integrity: sha512-1cf8sbnRreXrQFdH6qsg2H71Xw91fCCS9Yp021GnUNJzWJS/py96fS4lHbnTnouLp08Xj6jBoBB6V78Tdbdu5g==} + engines: {node: '>=4.0.0'} + requiresBuild: true + dependencies: + bindings: 1.5.0 + bip66: 1.1.5 + bn.js: 4.12.0 + create-hash: 1.2.0 + drbg.js: 1.0.1 + elliptic: 6.5.4 + nan: 2.16.0 + safe-buffer: 5.2.1 + dev: true + + /secp256k1@4.0.3: + resolution: {integrity: sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA==} + engines: {node: '>=10.0.0'} + requiresBuild: true + dependencies: + elliptic: 6.5.4 + node-addon-api: 2.0.2 + node-gyp-build: 4.5.0 + dev: true + + /seedrandom@3.0.1: + resolution: {integrity: sha512-1/02Y/rUeU1CJBAGLebiC5Lbo5FnB22gQbIFFYTLkwvp1xdABZJH1sn4ZT1MzXmPpzv+Rf/Lu2NcsLJiK4rcDg==} + dev: true + + /semaphore@1.1.0: + resolution: {integrity: sha512-O4OZEaNtkMd/K0i6js9SL+gqy0ZCBMgUvlSqHKi4IBdjhe7wB8pwztUk1BbZ1fmrvpwFrPbHzqd2w5pTcJH6LA==} + engines: {node: '>=0.8.0'} + dev: true + + /semver@5.4.1: + resolution: {integrity: sha512-WfG/X9+oATh81XtllIo/I8gOiY9EXRdv1cQdyykeXK17YcUW3EXUAi2To4pcH6nZtJPr7ZOpM5OMyWJZm+8Rsg==} + hasBin: true + dev: true + + /semver@5.7.1: + resolution: {integrity: sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==} + hasBin: true + dev: true + + /semver@6.3.0: + resolution: {integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==} + hasBin: true + dev: true + + /semver@7.3.7: + resolution: {integrity: sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: true + + /semver@7.5.4: + resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: true + + /send@0.17.1: + resolution: {integrity: sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==} + engines: {node: '>= 0.8.0'} + dependencies: + debug: 2.6.9 + depd: 1.1.2 + destroy: 1.0.4 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 0.5.2 + http-errors: 1.7.3 + mime: 1.6.0 + ms: 2.1.1 + on-finished: 2.3.0 + range-parser: 1.2.1 + statuses: 1.5.0 + transitivePeerDependencies: + - supports-color + dev: true + + /sentence-case@2.1.1: + resolution: {integrity: sha512-ENl7cYHaK/Ktwk5OTD+aDbQ3uC8IByu/6Bkg+HDv8Mm+XnBnppVNalcfJTNsp1ibstKh030/JKQQWglDvtKwEQ==} + dependencies: + no-case: 2.3.2 + upper-case-first: 1.1.2 + dev: true + + /serialize-javascript@6.0.0: + resolution: {integrity: sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==} + dependencies: + randombytes: 2.1.0 + dev: true + + /serve-static@1.14.1: + resolution: {integrity: sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==} + engines: {node: '>= 0.8.0'} + dependencies: + encodeurl: 1.0.2 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 0.17.1 + transitivePeerDependencies: + - supports-color + dev: true + + /servify@0.1.12: + resolution: {integrity: sha512-/xE6GvsKKqyo1BAY+KxOWXcLpPsUUyji7Qg3bVD7hh1eRze5bR1uYiuDA/k3Gof1s9BTzQZEJK8sNcNGFIzeWw==} + engines: {node: '>=6'} + dependencies: + body-parser: 1.19.0 + cors: 2.8.5 + express: 4.17.1 + request: 2.88.2 + xhr: 2.5.0 + transitivePeerDependencies: + - supports-color + dev: true + + /set-blocking@2.0.0: + resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} + dev: true + + /set-function-length@1.1.1: + resolution: {integrity: sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==} + engines: {node: '>= 0.4'} + dependencies: + define-data-property: 1.1.1 + get-intrinsic: 1.2.2 + gopd: 1.0.1 + has-property-descriptors: 1.0.0 + dev: true + + /set-function-name@2.0.1: + resolution: {integrity: sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA==} + engines: {node: '>= 0.4'} + dependencies: + define-data-property: 1.1.1 + functions-have-names: 1.2.3 + has-property-descriptors: 1.0.0 + dev: true + + /set-immediate-shim@1.0.1: + resolution: {integrity: sha512-Li5AOqrZWCVA2n5kryzEmqai6bKSIvpz5oUJHPVj6+dsbD3X1ixtsY5tEnsaNpH3pFAHmG8eIHUrtEtohrg+UQ==} + engines: {node: '>=0.10.0'} + dev: true + + /set-value@2.0.1: + resolution: {integrity: sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==} + engines: {node: '>=0.10.0'} + dependencies: + extend-shallow: 2.0.1 + is-extendable: 0.1.1 + is-plain-object: 2.0.4 + split-string: 3.1.0 + dev: true + + /setimmediate@1.0.4: + resolution: {integrity: sha512-/TjEmXQVEzdod/FFskf3o7oOAsGhHf2j1dZqRFbDzq4F3mvvxflIIi4Hd3bLQE9y/CpwqfSQam5JakI/mi3Pog==} + dev: true + + /setimmediate@1.0.5: + resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==} + dev: true + + /setprototypeof@1.1.1: + resolution: {integrity: sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==} + dev: true + + /setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + dev: true + + /sha.js@2.4.11: + resolution: {integrity: sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==} + hasBin: true + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + dev: true + + /sha1@1.1.1: + resolution: {integrity: sha512-dZBS6OrMjtgVkopB1Gmo4RQCDKiZsqcpAQpkV/aaj+FCrCg8r4I4qMkDPQjBgLIxlmu9k4nUbWq6ohXahOneYA==} + dependencies: + charenc: 0.0.2 + crypt: 0.0.2 + dev: true + + /sha3@1.2.6: + resolution: {integrity: sha512-KgLGmJGrmNB4JWVsAV11Yk6KbvsAiygWJc7t5IebWva/0NukNrjJqhtKhzy3Eiv2AKuGvhZZt7dt1mDo7HkoiQ==} + requiresBuild: true + dependencies: + nan: 2.13.2 + dev: true + + /sha3@2.1.4: + resolution: {integrity: sha512-S8cNxbyb0UGUM2VhRD4Poe5N58gJnJsLJ5vC7FYWGUmGhcsj4++WaIOBFVDxlG0W3To6xBuiRh+i0Qp2oNCOtg==} + dependencies: + buffer: 6.0.3 + dev: true + + /shallowequal@1.1.0: + resolution: {integrity: sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==} + dev: true + + /shebang-command@1.2.0: + resolution: {integrity: sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg==} + engines: {node: '>=0.10.0'} + dependencies: + shebang-regex: 1.0.0 + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + dev: true + + /shebang-regex@1.0.0: + resolution: {integrity: sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==} + engines: {node: '>=0.10.0'} + dev: true + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + dev: true + + /shelljs@0.8.3: + resolution: {integrity: sha512-fc0BKlAWiLpwZljmOvAOTE/gXawtCoNrP5oaY7KIaQbbyHeQVg01pSEuEGvGh3HEdBU4baCD7wQBwADmM/7f7A==} + engines: {node: '>=4'} + hasBin: true + dependencies: + glob: 7.2.3 + interpret: 1.2.0 + rechoir: 0.6.2 + dev: true + + /side-channel@1.0.4: + resolution: {integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==} + dependencies: + call-bind: 1.0.2 + get-intrinsic: 1.1.3 + object-inspect: 1.12.2 + dev: true + + /signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + dev: true + + /simple-concat@1.0.0: + resolution: {integrity: sha512-pgxq9iGMSS24atefsqEznXW1Te610qB4pwMdrEg6mxczHh7sPtPyiixkP/VaQic8JjZofnIvT7CDeKlHqfbPBg==} + dev: true + + /simple-get@2.8.1: + resolution: {integrity: sha512-lSSHRSw3mQNUGPAYRqo7xy9dhKmxFXIjLjp4KHpf99GEH2VH7C3AM+Qfx6du6jhfUi6Vm7XnbEVEf7Wb6N8jRw==} + dependencies: + decompress-response: 3.3.0 + once: 1.4.0 + simple-concat: 1.0.0 + dev: true + + /slash@1.0.0: + resolution: {integrity: sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==} + engines: {node: '>=0.10.0'} + dev: true + + /slash@2.0.0: + resolution: {integrity: sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==} + engines: {node: '>=6'} + dev: true + + /slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + dev: true + + /slice-ansi@4.0.0: + resolution: {integrity: sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + astral-regex: 2.0.0 + is-fullwidth-code-point: 3.0.0 + dev: true + + /snake-case@2.1.0: + resolution: {integrity: sha512-FMR5YoPFwOLuh4rRz92dywJjyKYZNLpMn1R5ujVpIYkbA9p01fq8RMg0FkO4M+Yobt4MjHeLTJVm5xFFBHSV2Q==} + dependencies: + no-case: 2.3.2 + dev: true + + /snapdragon-node@2.1.1: + resolution: {integrity: sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==} + engines: {node: '>=0.10.0'} + dependencies: + define-property: 1.0.0 + isobject: 3.0.1 + snapdragon-util: 3.0.1 + dev: true + + /snapdragon-util@3.0.1: + resolution: {integrity: sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 3.2.2 + dev: true + + /snapdragon@0.8.2: + resolution: {integrity: sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==} + engines: {node: '>=0.10.0'} + dependencies: + base: 0.11.2 + debug: 2.6.9 + define-property: 0.2.5 + extend-shallow: 2.0.1 + map-cache: 0.2.2 + source-map: 0.5.7 + source-map-resolve: 0.5.3 + use: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /solc@0.4.26: + resolution: {integrity: sha512-o+c6FpkiHd+HPjmjEVpQgH7fqZ14tJpXhho+/bQXlXbliLIS/xjXb42Vxh+qQY1WCSTMQ0+a5vR9vi0MfhU6mA==} + hasBin: true + dependencies: + fs-extra: 0.30.0 + memorystream: 0.3.1 + require-from-string: 1.2.1 + semver: 5.7.1 + yargs: 4.8.1 + dev: true + + /solc@0.6.12: + resolution: {integrity: sha512-Lm0Ql2G9Qc7yPP2Ba+WNmzw2jwsrd3u4PobHYlSOxaut3TtUbj9+5ZrT6f4DUpNPEoBaFUOEg9Op9C0mk7ge9g==} + engines: {node: '>=8.0.0'} + hasBin: true + dependencies: + command-exists: 1.2.9 + commander: 3.0.2 + fs-extra: 0.30.0 + js-sha3: 0.8.0 + memorystream: 0.3.1 + require-from-string: 2.0.2 + semver: 5.7.1 + tmp: 0.0.33 + dev: true + + /solc@0.7.3(debug@4.3.4): + resolution: {integrity: sha512-GAsWNAjGzIDg7VxzP6mPjdurby3IkGCjQcM8GFYZT6RyaoUZKmMU6Y7YwG+tFGhv7dwZ8rmR4iwFDrrD99JwqA==} + engines: {node: '>=8.0.0'} + hasBin: true + dependencies: + command-exists: 1.2.9 + commander: 3.0.2 + follow-redirects: 1.15.2(debug@4.3.4) + fs-extra: 0.30.0 + js-sha3: 0.8.0 + memorystream: 0.3.1 + require-from-string: 2.0.2 + semver: 5.7.1 + tmp: 0.0.33 + transitivePeerDependencies: + - debug + dev: true + + /solhint-plugin-prettier@0.1.0(prettier-plugin-solidity@1.3.1)(prettier@3.2.5): + resolution: {integrity: sha512-SDOTSM6tZxZ6hamrzl3GUgzF77FM6jZplgL2plFBclj/OjKP8Z3eIPojKU73gRr0MvOS8ACZILn8a5g0VTz/Gw==} + peerDependencies: + prettier: ^3.0.0 + prettier-plugin-solidity: ^1.0.0 + dependencies: + '@prettier/sync': 0.3.0(prettier@3.2.5) + prettier: 3.2.5 + prettier-linter-helpers: 1.0.0 + prettier-plugin-solidity: 1.3.1(prettier@3.2.5) + dev: true + + /solhint@4.1.1: + resolution: {integrity: sha512-7G4iF8H5hKHc0tR+/uyZesSKtfppFIMvPSW+Ku6MSL25oVRuyFeqNhOsXHfkex64wYJyXs4fe+pvhB069I19Tw==} + hasBin: true + dependencies: + '@solidity-parser/parser': 0.16.2 + ajv: 6.12.6 + antlr4: 4.13.0 + ast-parents: 0.0.1 + chalk: 4.1.2 + commander: 10.0.1 + cosmiconfig: 8.2.0 + fast-diff: 1.2.0 + glob: 8.1.0 + ignore: 5.2.4 + js-yaml: 4.1.0 + latest-version: 7.0.0 + lodash: 4.17.21 + pluralize: 8.0.0 + semver: 7.5.4 + strip-ansi: 6.0.1 + table: 6.8.1 + text-table: 0.2.0 + optionalDependencies: + prettier: 2.8.8 + dev: true + + /solidity-ast@0.4.55: + resolution: {integrity: sha512-qeEU/r/K+V5lrAw8iswf2/yfWAnSGs3WKPHI+zAFKFjX0dIBVXEU/swQ8eJQYHf6PJWUZFO2uWV4V1wEOkeQbA==} + dependencies: + array.prototype.findlast: 1.2.3 + dev: true + + /solidity-comments-darwin-arm64@0.0.2: + resolution: {integrity: sha512-HidWkVLSh7v+Vu0CA7oI21GWP/ZY7ro8g8OmIxE8oTqyMwgMbE8F1yc58Sj682Hj199HCZsjmtn1BE4PCbLiGA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-darwin-x64@0.0.2: + resolution: {integrity: sha512-Zjs0Ruz6faBTPT6fBecUt6qh4CdloT8Bwoc0+qxRoTn9UhYscmbPQkUgQEbS0FQPysYqVzzxJB4h1Ofbf4wwtA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-extractor@0.0.8: + resolution: {integrity: sha512-htM7Vn6LhHreR+EglVMd2s+sZhcXAirB1Zlyrv5zBuTxieCvjfnRpd7iZk75m/u6NOlEyQ94C6TWbBn2cY7w8g==} + dev: true + + /solidity-comments-freebsd-x64@0.0.2: + resolution: {integrity: sha512-8Qe4mpjuAxFSwZJVk7B8gAoLCdbtS412bQzBwk63L8dmlHogvE39iT70aAk3RHUddAppT5RMBunlPUCFYJ3ZTw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-linux-arm64-gnu@0.0.2: + resolution: {integrity: sha512-spkb0MZZnmrP+Wtq4UxP+nyPAVRe82idOjqndolcNR0S9Xvu4ebwq+LvF4HiUgjTDmeiqYiFZQ8T9KGdLSIoIg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-linux-arm64-musl@0.0.2: + resolution: {integrity: sha512-guCDbHArcjE+JDXYkxx5RZzY1YF6OnAKCo+sTC5fstyW/KGKaQJNPyBNWuwYsQiaEHpvhW1ha537IvlGek8GqA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-linux-x64-gnu@0.0.2: + resolution: {integrity: sha512-zIqLehBK/g7tvrFmQljrfZXfkEeLt2v6wbe+uFu6kH/qAHZa7ybt8Vc0wYcmjo2U0PeBm15d79ee3AkwbIjFdQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-linux-x64-musl@0.0.2: + resolution: {integrity: sha512-R9FeDloVlFGTaVkOlELDVC7+1Tjx5WBPI5L8r0AGOPHK3+jOcRh6sKYpI+VskSPDc3vOO46INkpDgUXrKydlIw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-win32-arm64-msvc@0.0.2: + resolution: {integrity: sha512-QnWJoCQcJj+rnutULOihN9bixOtYWDdF5Rfz9fpHejL1BtNjdLW1om55XNVHGAHPqBxV4aeQQ6OirKnp9zKsug==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-win32-ia32-msvc@0.0.2: + resolution: {integrity: sha512-vUg4nADtm/NcOtlIymG23NWJUSuMsvX15nU7ynhGBsdKtt8xhdP3C/zA6vjDk8Jg+FXGQL6IHVQ++g/7rSQi0w==} + engines: {node: '>= 10'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /solidity-comments-win32-x64-msvc@0.0.2: + resolution: {integrity: sha512-36j+KUF4V/y0t3qatHm/LF5sCUCBx2UndxE1kq5bOzh/s+nQgatuyB+Pd5BfuPQHdWu2KaExYe20FlAa6NL7+Q==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /solidity-comments@0.0.2: + resolution: {integrity: sha512-G+aK6qtyUfkn1guS8uzqUeua1dURwPlcOjoTYW/TwmXAcE7z/1+oGCfZUdMSe4ZMKklNbVZNiG5ibnF8gkkFfw==} + engines: {node: '>= 12'} + optionalDependencies: + solidity-comments-darwin-arm64: 0.0.2 + solidity-comments-darwin-x64: 0.0.2 + solidity-comments-freebsd-x64: 0.0.2 + solidity-comments-linux-arm64-gnu: 0.0.2 + solidity-comments-linux-arm64-musl: 0.0.2 + solidity-comments-linux-x64-gnu: 0.0.2 + solidity-comments-linux-x64-musl: 0.0.2 + solidity-comments-win32-arm64-msvc: 0.0.2 + solidity-comments-win32-ia32-msvc: 0.0.2 + solidity-comments-win32-x64-msvc: 0.0.2 + dev: true + + /solidity-coverage@0.8.5(hardhat@2.19.2): + resolution: {integrity: sha512-6C6N6OV2O8FQA0FWA95FdzVH+L16HU94iFgg5wAFZ29UpLFkgNI/DRR2HotG1bC0F4gAc/OMs2BJI44Q/DYlKQ==} + hasBin: true + peerDependencies: + hardhat: ^2.11.0 + dependencies: + '@ethersproject/abi': 5.7.0 + '@solidity-parser/parser': 0.16.0 + chalk: 2.4.2 + death: 1.1.0 + detect-port: 1.3.0 + difflib: 0.2.4 + fs-extra: 8.1.0 + ghost-testrpc: 0.0.2 + global-modules: 2.0.0 + globby: 10.0.2 + hardhat: 2.19.2(ts-node@10.9.2)(typescript@5.3.3) + jsonschema: 1.4.0 + lodash: 4.17.21 + mocha: 10.2.0 + node-emoji: 1.11.0 + pify: 4.0.1 + recursive-readdir: 2.2.2 + sc-istanbul: 0.4.6 + semver: 7.5.4 + shelljs: 0.8.3 + web3-utils: 1.8.0 + transitivePeerDependencies: + - supports-color + dev: true + + /sort-any@2.0.0: + resolution: {integrity: sha512-T9JoiDewQEmWcnmPn/s9h/PH9t3d/LSWi0RgVmXSuDYeZXTZOZ1/wrK2PHaptuR1VXe3clLLt0pD6sgVOwjNEA==} + dependencies: + lodash: 4.17.21 + dev: true + + /source-map-resolve@0.5.3: + resolution: {integrity: sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==} + deprecated: See https://github.com/lydell/source-map-resolve#deprecated + dependencies: + atob: 2.1.2 + decode-uri-component: 0.2.0 + resolve-url: 0.2.1 + source-map-url: 0.4.1 + urix: 0.1.0 + dev: true + + /source-map-support@0.4.18: + resolution: {integrity: sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA==} + dependencies: + source-map: 0.5.7 + dev: true + + /source-map-support@0.5.12: + resolution: {integrity: sha512-4h2Pbvyy15EE02G+JOZpUCmqWJuqrs+sEkzewTm++BPi7Hvn/HwcqLAcNxYAyI0x13CpPPn+kMjl+hplXMHITQ==} + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + dev: true + + /source-map-support@0.5.21: + resolution: {integrity: sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==} + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + dev: true + + /source-map-url@0.4.1: + resolution: {integrity: sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==} + deprecated: See https://github.com/lydell/source-map-url#deprecated + dev: true + + /source-map@0.2.0: + resolution: {integrity: sha512-CBdZ2oa/BHhS4xj5DlhjWNHcan57/5YuvfdLf17iVmIpd9KRm+DFLmC6nBNj+6Ua7Kt3TmOjDpQT1aTYOQtoUA==} + engines: {node: '>=0.8.0'} + requiresBuild: true + dependencies: + amdefine: 1.0.1 + dev: true + optional: true + + /source-map@0.5.7: + resolution: {integrity: sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==} + engines: {node: '>=0.10.0'} + dev: true + + /source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + dev: true + + /spdx-correct@3.1.1: + resolution: {integrity: sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==} + dependencies: + spdx-expression-parse: 3.0.1 + spdx-license-ids: 3.0.12 + dev: true + + /spdx-exceptions@2.3.0: + resolution: {integrity: sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==} + dev: true + + /spdx-expression-parse@3.0.1: + resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} + dependencies: + spdx-exceptions: 2.3.0 + spdx-license-ids: 3.0.12 + dev: true + + /spdx-license-ids@3.0.12: + resolution: {integrity: sha512-rr+VVSXtRhO4OHbXUiAF7xW3Bo9DuuF6C5jH+q/x15j2jniycgKbxU09Hr0WqlSLUs4i4ltHGXqTe7VHclYWyA==} + dev: true + + /split-string@3.1.0: + resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} + engines: {node: '>=0.10.0'} + dependencies: + extend-shallow: 3.0.2 + dev: true + + /sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + dev: true + + /sshpk@1.16.1: + resolution: {integrity: sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==} + engines: {node: '>=0.10.0'} + hasBin: true + dependencies: + asn1: 0.2.4 + assert-plus: 1.0.0 + bcrypt-pbkdf: 1.0.2 + dashdash: 1.14.1 + ecc-jsbn: 0.1.2 + getpass: 0.1.7 + jsbn: 0.1.1 + safer-buffer: 2.1.2 + tweetnacl: 0.14.5 + dev: true + + /stacktrace-parser@0.1.10: + resolution: {integrity: sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg==} + engines: {node: '>=6'} + dependencies: + type-fest: 0.7.1 + dev: true + + /static-extend@0.1.2: + resolution: {integrity: sha512-72E9+uLc27Mt718pMHt9VMNiAL4LMsmDbBva8mxWUCkT07fSzEGMYUCk0XWY6lp0j6RBAG4cJ3mWuZv2OE3s0g==} + engines: {node: '>=0.10.0'} + dependencies: + define-property: 0.2.5 + object-copy: 0.1.0 + dev: true + + /statuses@1.5.0: + resolution: {integrity: sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==} + engines: {node: '>= 0.6'} + dev: true + + /statuses@2.0.1: + resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + engines: {node: '>= 0.8'} + dev: true + + /stealthy-require@1.1.1: + resolution: {integrity: sha512-ZnWpYnYugiOVEY5GkcuJK1io5V8QmNYChG62gSit9pQVGErXtrKuPC55ITaVSukmMta5qpMU7vqLt2Lnni4f/g==} + engines: {node: '>=0.10.0'} + dev: true + + /stream-to-pull-stream@1.7.3: + resolution: {integrity: sha512-6sNyqJpr5dIOQdgNy/xcDWwDuzAsAwVzhzrWlAPAQ7Lkjx/rv0wgvxEyKwTq6FmNd5rjTrELt/CLmaSw7crMGg==} + dependencies: + looper: 3.0.0 + pull-stream: 3.6.14 + dev: true + + /streamsearch@1.1.0: + resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} + engines: {node: '>=10.0.0'} + dev: true + + /strict-uri-encode@1.1.0: + resolution: {integrity: sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ==} + engines: {node: '>=0.10.0'} + dev: true + + /string-format@2.0.0: + resolution: {integrity: sha512-bbEs3scLeYNXLecRRuk6uJxdXUSj6le/8rNPHChIJTn2V79aXVTR1EH2OH5zLKKoz0V02fOUKZZcw01pLUShZA==} + dev: true + + /string-width@1.0.2: + resolution: {integrity: sha512-0XsVpQLnVCXHJfyEs8tC0zpTVIr5PKKsQtkT29IwupnPTjtPmQ3xT/4yCREF9hYkV/3M3kzcUTSAZT6a6h81tw==} + engines: {node: '>=0.10.0'} + dependencies: + code-point-at: 1.1.0 + is-fullwidth-code-point: 1.0.0 + strip-ansi: 3.0.1 + dev: true + + /string-width@2.1.1: + resolution: {integrity: sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==} + engines: {node: '>=4'} + dependencies: + is-fullwidth-code-point: 2.0.0 + strip-ansi: 4.0.0 + dev: true + + /string-width@3.1.0: + resolution: {integrity: sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==} + engines: {node: '>=6'} + dependencies: + emoji-regex: 7.0.3 + is-fullwidth-code-point: 2.0.0 + strip-ansi: 5.2.0 + dev: true + + /string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + dev: true + + /string.prototype.trim@1.2.8: + resolution: {integrity: sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + es-abstract: 1.22.3 + dev: true + + /string.prototype.trimend@1.0.5: + resolution: {integrity: sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + es-abstract: 1.20.3 + dev: true + + /string.prototype.trimend@1.0.7: + resolution: {integrity: sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + es-abstract: 1.22.3 + dev: true + + /string.prototype.trimstart@1.0.5: + resolution: {integrity: sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==} + dependencies: + call-bind: 1.0.2 + define-properties: 1.1.4 + es-abstract: 1.20.3 + dev: true + + /string.prototype.trimstart@1.0.7: + resolution: {integrity: sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + es-abstract: 1.22.3 + dev: true + + /string_decoder@0.10.31: + resolution: {integrity: sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==} + dev: true + + /string_decoder@1.1.1: + resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==} + dependencies: + safe-buffer: 5.1.2 + dev: true + + /string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /strip-ansi@3.0.1: + resolution: {integrity: sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==} + engines: {node: '>=0.10.0'} + dependencies: + ansi-regex: 2.1.1 + dev: true + + /strip-ansi@4.0.0: + resolution: {integrity: sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow==} + engines: {node: '>=4'} + dependencies: + ansi-regex: 3.0.1 + dev: true + + /strip-ansi@5.2.0: + resolution: {integrity: sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==} + engines: {node: '>=6'} + dependencies: + ansi-regex: 4.1.1 + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + dev: true + + /strip-bom@2.0.0: + resolution: {integrity: sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==} + engines: {node: '>=0.10.0'} + dependencies: + is-utf8: 0.2.1 + dev: true + + /strip-hex-prefix@1.0.0: + resolution: {integrity: sha512-q8d4ue7JGEiVcypji1bALTos+0pWtyGlivAWyPuTkHzuTCJqrK9sWxYQZUq6Nq3cuyv3bm734IhHvHtGGURU6A==} + engines: {node: '>=6.5.0', npm: '>=3'} + dependencies: + is-hex-prefixed: 1.0.0 + dev: true + + /strip-indent@2.0.0: + resolution: {integrity: sha512-RsSNPLpq6YUL7QYy44RnPVTn/lcVZtb48Uof3X5JLbF4zD/Gs7ZFDv2HWol+leoQN2mT86LAzSshGfkTlSOpsA==} + engines: {node: '>=4'} + dev: true + + /strip-json-comments@2.0.1: + resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} + engines: {node: '>=0.10.0'} + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + dev: true + + /supports-color@2.0.0: + resolution: {integrity: sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==} + engines: {node: '>=0.8.0'} + dev: true + + /supports-color@3.2.3: + resolution: {integrity: sha512-Jds2VIYDrlp5ui7t8abHN2bjAu4LV/q4N2KivFPpGH0lrka0BMq/33AmECUXlKPcHigkNaqfXRENFju+rlcy+A==} + engines: {node: '>=0.8.0'} + dependencies: + has-flag: 1.0.0 + dev: true + + /supports-color@5.5.0: + resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} + engines: {node: '>=4'} + dependencies: + has-flag: 3.0.0 + dev: true + + /supports-color@6.0.0: + resolution: {integrity: sha512-on9Kwidc1IUQo+bQdhi8+Tijpo0e1SS6RoGo2guUwn5vdaxw8RXOF9Vb2ws+ihWOmh4JnCJOvaziZWP1VABaLg==} + engines: {node: '>=6'} + dependencies: + has-flag: 3.0.0 + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + dev: true + + /swap-case@1.1.2: + resolution: {integrity: sha512-BAmWG6/bx8syfc6qXPprof3Mn5vQgf5dwdUNJhsNqU9WdPt5P+ES/wQ5bxfijy8zwZgZZHslC3iAsxsuQMCzJQ==} + dependencies: + lower-case: 1.1.4 + upper-case: 1.1.3 + dev: true + + /swarm-js@0.1.40: + resolution: {integrity: sha512-yqiOCEoA4/IShXkY3WKwP5PvZhmoOOD8clsKA7EEcRILMkTEYHCQ21HDCAcVpmIxZq4LyZvWeRJ6quIyHk1caA==} + dependencies: + bluebird: 3.7.2 + buffer: 5.7.1 + eth-lib: 0.1.29 + fs-extra: 4.0.3 + got: 7.1.0 + mime-types: 2.1.27 + mkdirp-promise: 5.0.1 + mock-fs: 4.12.0 + setimmediate: 1.0.5 + tar: 4.4.19 + xhr-request: 1.1.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /sync-request@6.1.0: + resolution: {integrity: sha512-8fjNkrNlNCrVc/av+Jn+xxqfCjYaBoHqCsDz6mt030UMxJGr+GSfCV1dQt2gRtlL63+VPidwDVLr7V2OcTSdRw==} + engines: {node: '>=8.0.0'} + dependencies: + http-response-object: 3.0.2 + sync-rpc: 1.3.6 + then-request: 6.0.2 + dev: true + + /sync-rpc@1.3.6: + resolution: {integrity: sha512-J8jTXuZzRlvU7HemDgHi3pGnh/rkoqR/OZSjhTyyZrEkkYQbk7Z33AXp37mkPfPpfdOuj7Ex3H/TJM1z48uPQw==} + dependencies: + get-port: 3.2.0 + dev: true + + /synckit@0.8.8: + resolution: {integrity: sha512-HwOKAP7Wc5aRGYdKH+dw0PRRpbO841v2DENBtjnR5HFWoiNByAl7vrx3p0G/rCyYXQsrxqtX48TImFtPcIHSpQ==} + engines: {node: ^14.18.0 || >=16.0.0} + dependencies: + '@pkgr/core': 0.1.1 + tslib: 2.6.2 + dev: true + + /table-layout@1.0.2: + resolution: {integrity: sha512-qd/R7n5rQTRFi+Zf2sk5XVVd9UQl6ZkduPFC3S7WEGJAmetDTjY3qPN50eSKzwuzEyQKy5TN2TiZdkIjos2L6A==} + engines: {node: '>=8.0.0'} + dependencies: + array-back: 4.0.2 + deep-extend: 0.6.0 + typical: 5.2.0 + wordwrapjs: 4.0.1 + dev: true + + /table@6.8.1: + resolution: {integrity: sha512-Y4X9zqrCftUhMeH2EptSSERdVKt/nEdijTOacGD/97EKjhQ/Qs8RTlEGABSJNNN8lac9kheH+af7yAkEWlgneA==} + engines: {node: '>=10.0.0'} + dependencies: + ajv: 8.11.0 + lodash.truncate: 4.4.2 + slice-ansi: 4.0.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: true + + /tape@4.16.1: + resolution: {integrity: sha512-U4DWOikL5gBYUrlzx+J0oaRedm2vKLFbtA/+BRAXboGWpXO7bMP8ddxlq3Cse2bvXFQ0jZMOj6kk3546mvCdFg==} + hasBin: true + dependencies: + call-bind: 1.0.5 + deep-equal: 1.1.1 + defined: 1.0.0 + dotignore: 0.1.2 + for-each: 0.3.3 + glob: 7.2.3 + has: 1.0.3 + inherits: 2.0.4 + is-regex: 1.1.4 + minimist: 1.2.8 + object-inspect: 1.12.2 + resolve: 1.22.1 + resumer: 0.0.0 + string.prototype.trim: 1.2.8 + through: 2.3.8 + dev: true + + /tar@4.4.19: + resolution: {integrity: sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA==} + engines: {node: '>=4.5'} + dependencies: + chownr: 1.1.4 + fs-minipass: 1.2.7 + minipass: 2.9.0 + minizlib: 1.3.3 + mkdirp: 0.5.6 + safe-buffer: 5.2.1 + yallist: 3.1.1 + dev: true + + /test-value@2.1.0: + resolution: {integrity: sha512-+1epbAxtKeXttkGFMTX9H42oqzOTufR1ceCF+GYA5aOmvaPq9wd4PUS8329fn2RRLGNeUkgRLnVpycjx8DsO2w==} + engines: {node: '>=0.10.0'} + dependencies: + array-back: 1.0.4 + typical: 2.6.1 + dev: true + + /testrpc@0.0.1: + resolution: {integrity: sha512-afH1hO+SQ/VPlmaLUFj2636QMeDvPCeQMc/9RBMW0IfjNe9gFD9Ra3ShqYkB7py0do1ZcCna/9acHyzTJ+GcNA==} + deprecated: testrpc has been renamed to ganache-cli, please use this package from now on. + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + dev: true + + /then-request@6.0.2: + resolution: {integrity: sha512-3ZBiG7JvP3wbDzA9iNY5zJQcHL4jn/0BWtXIkagfz7QgOL/LqjCEOBQuJNZfu0XYnv5JhKh+cDxCPM4ILrqruA==} + engines: {node: '>=6.0.0'} + dependencies: + '@types/concat-stream': 1.6.1 + '@types/form-data': 0.0.33 + '@types/node': 8.10.66 + '@types/qs': 6.9.7 + caseless: 0.12.0 + concat-stream: 1.6.2 + form-data: 2.3.3 + http-basic: 8.1.3 + http-response-object: 3.0.2 + promise: 8.3.0 + qs: 6.11.0 + dev: true + + /through2@2.0.5: + resolution: {integrity: sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==} + dependencies: + readable-stream: 2.3.7 + xtend: 4.0.2 + dev: true + + /through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + dev: true + + /timed-out@4.0.1: + resolution: {integrity: sha512-G7r3AhovYtr5YKOWQkta8RKAPb+J9IsO4uVmzjl8AZwfhs8UcUwTiD6gcJYSgOtzyjvQKrKYn41syHbUWMkafA==} + engines: {node: '>=0.10.0'} + dev: true + + /title-case@2.1.1: + resolution: {integrity: sha512-EkJoZ2O3zdCz3zJsYCsxyq2OC5hrxR9mfdd5I+w8h/tmFfeOxJ+vvkxsKxdmN0WtS9zLdHEgfgVOiMVgv+Po4Q==} + dependencies: + no-case: 2.3.2 + upper-case: 1.1.3 + dev: true + + /tmp@0.0.33: + resolution: {integrity: sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==} + engines: {node: '>=0.6.0'} + dependencies: + os-tmpdir: 1.0.2 + dev: true + + /tmp@0.1.0: + resolution: {integrity: sha512-J7Z2K08jbGcdA1kkQpJSqLF6T0tdQqpR2pnSUXsIchbPdTI9v3e85cLW0d6WDhwuAleOV71j2xWs8qMPfK7nKw==} + engines: {node: '>=6'} + dependencies: + rimraf: 2.7.1 + dev: true + + /to-fast-properties@1.0.3: + resolution: {integrity: sha512-lxrWP8ejsq+7E3nNjwYmUBMAgjMTZoTI+sdBOpvNyijeDLa29LUn9QaoXAHv4+Z578hbmHHJKZknzxVtvo77og==} + engines: {node: '>=0.10.0'} + dev: true + + /to-object-path@0.3.0: + resolution: {integrity: sha512-9mWHdnGRuh3onocaHzukyvCZhzvr6tiflAy/JRFXcJX0TjgfWA9pk9t8CMbzmBE4Jfw58pXbkngtBtqYxzNEyg==} + engines: {node: '>=0.10.0'} + dependencies: + kind-of: 3.2.2 + dev: true + + /to-readable-stream@1.0.0: + resolution: {integrity: sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==} + engines: {node: '>=6'} + dev: true + + /to-regex-range@2.1.1: + resolution: {integrity: sha512-ZZWNfCjUokXXDGXFpZehJIkZqq91BcULFq/Pi7M5i4JnxXdhMKAK682z8bCW3o8Hj1wuuzoKcW3DfVzaP6VuNg==} + engines: {node: '>=0.10.0'} + dependencies: + is-number: 3.0.0 + repeat-string: 1.6.1 + dev: true + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + dependencies: + is-number: 7.0.0 + dev: true + + /to-regex@3.0.2: + resolution: {integrity: sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==} + engines: {node: '>=0.10.0'} + dependencies: + define-property: 2.0.2 + extend-shallow: 3.0.2 + regex-not: 1.0.2 + safe-regex: 1.1.0 + dev: true + + /toidentifier@1.0.0: + resolution: {integrity: sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==} + engines: {node: '>=0.6'} + dev: true + + /toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + dev: true + + /tough-cookie@2.5.0: + resolution: {integrity: sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==} + engines: {node: '>=0.8'} + dependencies: + psl: 1.9.0 + punycode: 2.1.1 + dev: true + + /tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + dev: true + + /trim-right@1.0.1: + resolution: {integrity: sha512-WZGXGstmCWgeevgTL54hrCuw1dyMQIzWy7ZfqRJfSmJZBwklI15egmQytFP6bPidmw3M8d5yEowl1niq4vmqZw==} + engines: {node: '>=0.10.0'} + dev: true + + /ts-api-utils@1.0.3(typescript@5.3.3): + resolution: {integrity: sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg==} + engines: {node: '>=16.13.0'} + peerDependencies: + typescript: '>=4.2.0' + dependencies: + typescript: 5.3.3 + dev: true + + /ts-command-line-args@2.5.1: + resolution: {integrity: sha512-H69ZwTw3rFHb5WYpQya40YAX2/w7Ut75uUECbgBIsLmM+BNuYnxsltfyyLMxy6sEeKxgijLTnQtLd0nKd6+IYw==} + hasBin: true + dependencies: + chalk: 4.1.2 + command-line-args: 5.2.1 + command-line-usage: 6.1.3 + string-format: 2.0.0 + dev: true + + /ts-essentials@1.0.4: + resolution: {integrity: sha512-q3N1xS4vZpRouhYHDPwO0bDW3EZ6SK9CrrDHxi/D6BPReSjpVgWIOpLS2o0gSBZm+7q/wyKp6RVM1AeeW7uyfQ==} + dev: true + + /ts-essentials@6.0.7(typescript@5.3.3): + resolution: {integrity: sha512-2E4HIIj4tQJlIHuATRHayv0EfMGK3ris/GRk1E3CFnsZzeNV+hUmelbaTZHLtXaZppM5oLhHRtO04gINC4Jusw==} + peerDependencies: + typescript: '>=3.7.0' + dependencies: + typescript: 5.3.3 + dev: true + + /ts-essentials@7.0.3(typescript@5.3.3): + resolution: {integrity: sha512-8+gr5+lqO3G84KdiTSMRLtuyJ+nTBVRKuCrK4lidMPdVeEp0uqC875uE5NMcaA7YYMN7XsNiFQuMvasF8HT/xQ==} + peerDependencies: + typescript: '>=3.7.0' + dependencies: + typescript: 5.3.3 + dev: true + + /ts-generator@0.1.1: + resolution: {integrity: sha512-N+ahhZxTLYu1HNTQetwWcx3so8hcYbkKBHTr4b4/YgObFTIKkOSSsaa+nal12w8mfrJAyzJfETXawbNjSfP2gQ==} + hasBin: true + dependencies: + '@types/mkdirp': 0.5.2 + '@types/prettier': 2.7.1 + '@types/resolve': 0.0.8 + chalk: 2.4.2 + glob: 7.2.3 + mkdirp: 0.5.6 + prettier: 2.8.8 + resolve: 1.22.1 + ts-essentials: 1.0.4 + dev: true + + /ts-node@10.9.2(@types/node@16.18.80)(typescript@5.3.3): + resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.9 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.3 + '@types/node': 16.18.80 + acorn: 8.10.0 + acorn-walk: 8.2.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.3.3 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + dev: true + + /tslib@1.14.1: + resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==} + dev: true + + /tslib@2.6.2: + resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} + dev: true + + /tsort@0.0.1: + resolution: {integrity: sha512-Tyrf5mxF8Ofs1tNoxA13lFeZ2Zrbd6cKbuH3V+MQ5sb6DtBj5FjrXVsRWT8YvNAQTqNoz66dz1WsbigI22aEnw==} + dev: true + + /tunnel-agent@0.6.0: + resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /tweetnacl-util@0.15.1: + resolution: {integrity: sha512-RKJBIj8lySrShN4w6i/BonWp2Z/uxwC3h4y7xsRrpP59ZboCd0GpEVsOnMDYLMmKBpYhb5TgHzZXy7wTfYFBRw==} + dev: true + + /tweetnacl@0.14.5: + resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} + dev: true + + /tweetnacl@1.0.3: + resolution: {integrity: sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw==} + dev: true + + /type-check@0.3.2: + resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.1.2 + dev: true + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + dev: true + + /type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + dev: true + + /type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + dev: true + + /type-fest@0.7.1: + resolution: {integrity: sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==} + engines: {node: '>=8'} + dev: true + + /type-is@1.6.18: + resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} + engines: {node: '>= 0.6'} + dependencies: + media-typer: 0.3.0 + mime-types: 2.1.27 + dev: true + + /type@1.2.0: + resolution: {integrity: sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==} + dev: true + + /type@2.0.0: + resolution: {integrity: sha512-KBt58xCHry4Cejnc2ISQAF7QY+ORngsWfxezO68+12hKV6lQY8P/psIkcbjeHWn7MqcgciWJyCCevFMJdIXpow==} + dev: true + + /typechain@3.0.0(typescript@5.3.3): + resolution: {integrity: sha512-ft4KVmiN3zH4JUFu2WJBrwfHeDf772Tt2d8bssDTo/YcckKW2D+OwFrHXRC6hJvO3mHjFQTihoMV6fJOi0Hngg==} + hasBin: true + dependencies: + command-line-args: 4.0.7 + debug: 4.3.4(supports-color@8.1.1) + fs-extra: 7.0.1 + js-sha3: 0.8.0 + lodash: 4.17.21 + ts-essentials: 6.0.7(typescript@5.3.3) + ts-generator: 0.1.1 + transitivePeerDependencies: + - supports-color + - typescript + dev: true + + /typechain@8.3.2(typescript@5.3.3): + resolution: {integrity: sha512-x/sQYr5w9K7yv3es7jo4KTX05CLxOf7TRWwoHlrjRh8H82G64g+k7VuWPJlgMo6qrjfCulOdfBjiaDtmhFYD/Q==} + hasBin: true + peerDependencies: + typescript: '>=4.3.0' + dependencies: + '@types/prettier': 2.7.1 + debug: 4.3.4(supports-color@8.1.1) + fs-extra: 7.0.1 + glob: 7.1.7 + js-sha3: 0.8.0 + lodash: 4.17.21 + mkdirp: 1.0.4 + prettier: 2.8.8 + ts-command-line-args: 2.5.1 + ts-essentials: 7.0.3(typescript@5.3.3) + typescript: 5.3.3 + transitivePeerDependencies: + - supports-color + dev: true + + /typed-array-buffer@1.0.0: + resolution: {integrity: sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + get-intrinsic: 1.2.2 + is-typed-array: 1.1.12 + dev: true + + /typed-array-byte-length@1.0.0: + resolution: {integrity: sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.5 + for-each: 0.3.3 + has-proto: 1.0.1 + is-typed-array: 1.1.12 + dev: true + + /typed-array-byte-offset@1.0.0: + resolution: {integrity: sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.5 + for-each: 0.3.3 + has-proto: 1.0.1 + is-typed-array: 1.1.12 + dev: true + + /typed-array-length@1.0.4: + resolution: {integrity: sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==} + dependencies: + call-bind: 1.0.5 + for-each: 0.3.3 + is-typed-array: 1.1.12 + dev: true + + /typedarray-to-buffer@3.1.5: + resolution: {integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==} + dependencies: + is-typedarray: 1.0.0 + dev: true + + /typedarray@0.0.6: + resolution: {integrity: sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==} + dev: true + + /typescript@5.3.3: + resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} + engines: {node: '>=14.17'} + hasBin: true + dev: true + + /typewise-core@1.2.0: + resolution: {integrity: sha512-2SCC/WLzj2SbUwzFOzqMCkz5amXLlxtJqDKTICqg30x+2DZxcfZN2MvQZmGfXWKNWaKK9pBPsvkcwv8bF/gxKg==} + dev: true + + /typewise@1.0.3: + resolution: {integrity: sha512-aXofE06xGhaQSPzt8hlTY+/YWQhm9P0jYUp1f2XtmW/3Bk0qzXcyFWAtPoo2uTGQj1ZwbDuSyuxicq+aDo8lCQ==} + dependencies: + typewise-core: 1.2.0 + dev: true + + /typewiselite@1.0.0: + resolution: {integrity: sha512-J9alhjVHupW3Wfz6qFRGgQw0N3gr8hOkw6zm7FZ6UR1Cse/oD9/JVok7DNE9TT9IbciDHX2Ex9+ksE6cRmtymw==} + dev: true + + /typical@2.6.1: + resolution: {integrity: sha512-ofhi8kjIje6npGozTip9Fr8iecmYfEbS06i0JnIg+rh51KakryWF4+jX8lLKZVhy6N+ID45WYSFCxPOdTWCzNg==} + dev: true + + /typical@4.0.0: + resolution: {integrity: sha512-VAH4IvQ7BDFYglMd7BPRDfLgxZZX4O4TFcRDA6EN5X7erNJJq+McIEp8np9aVtxrCJ6qx4GTYVfOWNjcqwZgRw==} + engines: {node: '>=8'} + dev: true + + /typical@5.2.0: + resolution: {integrity: sha512-dvdQgNDNJo+8B2uBQoqdb11eUCE1JQXhvjC/CZtgvZseVd5TYMXnq0+vuUemXbd/Se29cTaUuPX3YIc2xgbvIg==} + engines: {node: '>=8'} + dev: true + + /uglify-js@3.17.3: + resolution: {integrity: sha512-JmMFDME3iufZnBpyKL+uS78LRiC+mK55zWfM5f/pWBJfpOttXAqYfdDGRukYhJuyRinvPVAtUhvy7rlDybNtFg==} + engines: {node: '>=0.8.0'} + hasBin: true + requiresBuild: true + dev: true + optional: true + + /ultron@1.1.1: + resolution: {integrity: sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==} + dev: true + + /unbox-primitive@1.0.2: + resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} + dependencies: + call-bind: 1.0.2 + has-bigints: 1.0.2 + has-symbols: 1.0.3 + which-boxed-primitive: 1.0.2 + dev: true + + /underscore@1.9.1: + resolution: {integrity: sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg==} + requiresBuild: true + dev: true + optional: true + + /undici@5.19.1: + resolution: {integrity: sha512-YiZ61LPIgY73E7syxCDxxa3LV2yl3sN8spnIuTct60boiiRaE1J8mNWHO8Im2Zi/sFrPusjLlmRPrsyraSqX6A==} + engines: {node: '>=12.18'} + dependencies: + busboy: 1.6.0 + dev: true + + /unfetch@4.2.0: + resolution: {integrity: sha512-F9p7yYCn6cIW9El1zi0HI6vqpeIvBsr3dSuRO6Xuppb1u5rXpCPmMvLSyECLhybr9isec8Ohl0hPekMVrEinDA==} + dev: true + + /union-value@1.0.1: + resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} + engines: {node: '>=0.10.0'} + dependencies: + arr-union: 3.1.0 + get-value: 2.0.6 + is-extendable: 0.1.1 + set-value: 2.0.1 + dev: true + + /universalify@0.1.2: + resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} + engines: {node: '>= 4.0.0'} + dev: true + + /universalify@2.0.0: + resolution: {integrity: sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==} + engines: {node: '>= 10.0.0'} + dev: true + + /unorm@1.6.0: + resolution: {integrity: sha512-b2/KCUlYZUeA7JFUuRJZPUtr4gZvBh7tavtv4fvk4+KV9pfGiR6CQAQAWl49ZpR3ts2dk4FYkP7EIgDJoiOLDA==} + engines: {node: '>= 0.4.0'} + dev: true + + /unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + dev: true + + /unset-value@1.0.0: + resolution: {integrity: sha512-PcA2tsuGSF9cnySLHTLSh2qrQiJ70mn+r+Glzxv2TWZblxsxCC52BDlZoPCsz7STd9pN7EZetkWZBAvk4cgZdQ==} + engines: {node: '>=0.10.0'} + dependencies: + has-value: 0.3.1 + isobject: 3.0.1 + dev: true + + /upper-case-first@1.1.2: + resolution: {integrity: sha512-wINKYvI3Db8dtjikdAqoBbZoP6Q+PZUyfMR7pmwHzjC2quzSkUq5DmPrTtPEqHaz8AGtmsB4TqwapMTM1QAQOQ==} + dependencies: + upper-case: 1.1.3 + dev: true + + /upper-case@1.1.3: + resolution: {integrity: sha512-WRbjgmYzgXkCV7zNVpy5YgrHgbBv126rMALQQMrmzOVC4GM2waQ9x7xtm8VU+1yF2kWyPzI9zbZ48n4vSxwfSA==} + dev: true + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.1.1 + dev: true + + /urix@0.1.0: + resolution: {integrity: sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==} + deprecated: Please see https://github.com/lydell/urix#deprecated + dev: true + + /url-parse-lax@1.0.0: + resolution: {integrity: sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA==} + engines: {node: '>=0.10.0'} + dependencies: + prepend-http: 1.0.4 + dev: true + + /url-parse-lax@3.0.0: + resolution: {integrity: sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==} + engines: {node: '>=4'} + dependencies: + prepend-http: 2.0.0 + dev: true + + /url-set-query@1.0.0: + resolution: {integrity: sha512-3AChu4NiXquPfeckE5R5cGdiHCMWJx1dwCWOmWIL4KHAziJNOFIYJlpGFeKDvwLPHovZRCxK3cYlwzqI9Vp+Gg==} + dev: true + + /url-to-options@1.0.1: + resolution: {integrity: sha512-0kQLIzG4fdk/G5NONku64rSH/x32NOA39LVQqlK8Le6lvTF6GGRJpqaQFGgU+CLwySIqBSMdwYM0sYcW9f6P4A==} + engines: {node: '>= 4'} + dev: true + + /url@0.11.0: + resolution: {integrity: sha512-kbailJa29QrtXnxgq+DdCEGlbTeYM2eJUxsz6vjZavrCYPMIFHMKQmSKYAIuUK2i7hgPm28a8piX5NTUtM/LKQ==} + dependencies: + punycode: 1.3.2 + querystring: 0.2.0 + dev: true + + /use@3.1.1: + resolution: {integrity: sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==} + engines: {node: '>=0.10.0'} + dev: true + + /utf-8-validate@5.0.9: + resolution: {integrity: sha512-Yek7dAy0v3Kl0orwMlvi7TPtiCNrdfHNd7Gcc/pLq4BLXqfAmd0J7OWMizUQnTTJsyjKn02mU7anqwfmUP4J8Q==} + engines: {node: '>=6.14.2'} + requiresBuild: true + dependencies: + node-gyp-build: 4.5.0 + dev: true + + /utf8@3.0.0: + resolution: {integrity: sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==} + dev: true + + /util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + dev: true + + /util.promisify@1.1.1: + resolution: {integrity: sha512-/s3UsZUrIfa6xDhr7zZhnE9SLQ5RIXyYfiVnMMyMDzOc8WhWN4Nbh36H842OyurKbCDAesZOJaVyvmSl6fhGQw==} + dependencies: + call-bind: 1.0.5 + define-properties: 1.2.1 + for-each: 0.3.3 + has-symbols: 1.0.3 + object.getownpropertydescriptors: 2.1.4 + dev: true + + /util@0.12.3: + resolution: {integrity: sha512-I8XkoQwE+fPQEhy9v012V+TSdH2kp9ts29i20TaaDUXsg7x/onePbhFJUExBfv/2ay1ZOp/Vsm3nDlmnFGSAog==} + dependencies: + inherits: 2.0.4 + is-arguments: 1.0.4 + is-generator-function: 1.0.8 + is-typed-array: 1.1.5 + safe-buffer: 5.2.1 + which-typed-array: 1.1.4 + dev: true + + /utils-merge@1.0.1: + resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} + engines: {node: '>= 0.4.0'} + dev: true + + /uuid@2.0.1: + resolution: {integrity: sha512-nWg9+Oa3qD2CQzHIP4qKUqwNfzKn8P0LtFhotaCTFchsV7ZfDhAybeip/HZVeMIpZi9JgY1E3nUlwaCmZT1sEg==} + deprecated: Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details. + dev: true + + /uuid@3.3.2: + resolution: {integrity: sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==} + deprecated: Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details. + hasBin: true + dev: true + + /uuid@3.4.0: + resolution: {integrity: sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==} + deprecated: Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details. + hasBin: true + dev: true + + /uuid@8.3.2: + resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} + hasBin: true + dev: true + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + dev: true + + /validate-npm-package-license@3.0.4: + resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + dependencies: + spdx-correct: 3.1.1 + spdx-expression-parse: 3.0.1 + dev: true + + /varint@5.0.2: + resolution: {integrity: sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow==} + dev: true + + /vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + dev: true + + /verror@1.10.0: + resolution: {integrity: sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==} + engines: {'0': node >=0.6.0} + dependencies: + assert-plus: 1.0.0 + core-util-is: 1.0.2 + extsprintf: 1.4.0 + dev: true + + /web3-bzz@1.2.11: + resolution: {integrity: sha512-XGpWUEElGypBjeFyUhTkiPXFbDVD6Nr/S5jznE3t8cWUA0FxRf1n3n/NuIZeb0H9RkN2Ctd/jNma/k8XGa3YKg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/node': 12.19.16 + got: 9.6.0 + swarm-js: 0.1.40 + underscore: 1.9.1 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + optional: true + + /web3-bzz@1.7.4: + resolution: {integrity: sha512-w9zRhyEqTK/yi0LGRHjZMcPCfP24LBjYXI/9YxFw9VqsIZ9/G0CRCnUt12lUx0A56LRAMpF7iQ8eA73aBcO29Q==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/node': 12.19.16 + got: 9.6.0 + swarm-js: 0.1.40 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /web3-bzz@1.8.0: + resolution: {integrity: sha512-caDtdKeLi7+2Vb+y+cq2yyhkNjnxkFzVW0j1DtemarBg3dycG1iEl75CVQMLNO6Wkg+HH9tZtRnUyFIe5LIUeQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/node': 12.19.16 + got: 12.1.0 + swarm-js: 0.1.40 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /web3-core-helpers@1.2.11: + resolution: {integrity: sha512-PEPoAoZd5ME7UfbnCZBdzIerpe74GEvlwT4AjOmHeCVZoIFk7EqvOZDejJHt+feJA6kMVTdd0xzRNN295UhC1A==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + underscore: 1.9.1 + web3-eth-iban: 1.2.11 + web3-utils: 1.2.11 + dev: true + optional: true + + /web3-core-helpers@1.7.4: + resolution: {integrity: sha512-F8PH11qIkE/LpK4/h1fF/lGYgt4B6doeMi8rukeV/s4ivseZHHslv1L6aaijLX/g/j4PsFmR42byynBI/MIzFg==} + engines: {node: '>=8.0.0'} + dependencies: + web3-eth-iban: 1.7.4 + web3-utils: 1.7.4 + dev: true + + /web3-core-helpers@1.8.0: + resolution: {integrity: sha512-nMAVwZB3rEp/khHI2BvFy0e/xCryf501p5NGjswmJtEM+Zrd3Biaw52JrB1qAZZIzCA8cmLKaOgdfamoDOpWdw==} + engines: {node: '>=8.0.0'} + dependencies: + web3-eth-iban: 1.8.0 + web3-utils: 1.8.0 + dev: true + + /web3-core-method@1.2.11: + resolution: {integrity: sha512-ff0q76Cde94HAxLDZ6DbdmKniYCQVtvuaYh+rtOUMB6kssa5FX0q3vPmixi7NPooFnbKmmZCM6NvXg4IreTPIw==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@ethersproject/transactions': 5.7.0 + underscore: 1.9.1 + web3-core-helpers: 1.2.11 + web3-core-promievent: 1.2.11 + web3-core-subscriptions: 1.2.11 + web3-utils: 1.2.11 + dev: true + optional: true + + /web3-core-method@1.7.4: + resolution: {integrity: sha512-56K7pq+8lZRkxJyzf5MHQPI9/VL3IJLoy4L/+q8HRdZJ3CkB1DkXYaXGU2PeylG1GosGiSzgIfu1ljqS7CP9xQ==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethersproject/transactions': 5.7.0 + web3-core-helpers: 1.7.4 + web3-core-promievent: 1.7.4 + web3-core-subscriptions: 1.7.4 + web3-utils: 1.7.4 + dev: true + + /web3-core-method@1.8.0: + resolution: {integrity: sha512-c94RAzo3gpXwf2rf8rL8C77jOzNWF4mXUoUfZYYsiY35cJFd46jQDPI00CB5+ZbICTiA5mlVzMj4e7jAsTqiLA==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethersproject/transactions': 5.7.0 + web3-core-helpers: 1.8.0 + web3-core-promievent: 1.8.0 + web3-core-subscriptions: 1.8.0 + web3-utils: 1.8.0 + dev: true + + /web3-core-promievent@1.2.11: + resolution: {integrity: sha512-il4McoDa/Ox9Agh4kyfQ8Ak/9ABYpnF8poBLL33R/EnxLsJOGQG2nZhkJa3I067hocrPSjEdlPt/0bHXsln4qA==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + eventemitter3: 4.0.4 + dev: true + optional: true + + /web3-core-promievent@1.7.4: + resolution: {integrity: sha512-o4uxwXKDldN7ER7VUvDfWsqTx9nQSP1aDssi1XYXeYC2xJbVo0n+z6ryKtmcoWoRdRj7uSpVzal3nEmlr480mA==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + dev: true + + /web3-core-promievent@1.8.0: + resolution: {integrity: sha512-FGLyjAuOaAQ+ZhV6iuw9tg/9WvIkSZXKHQ4mdTyQ8MxVraOtFivOCbuLLsGgapfHYX+RPxsc1j1YzQjKoupagQ==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + dev: true + + /web3-core-requestmanager@1.2.11: + resolution: {integrity: sha512-oFhBtLfOiIbmfl6T6gYjjj9igOvtyxJ+fjS+byRxiwFJyJ5BQOz4/9/17gWR1Cq74paTlI7vDGxYfuvfE/mKvA==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + underscore: 1.9.1 + web3-core-helpers: 1.2.11 + web3-providers-http: 1.2.11 + web3-providers-ipc: 1.2.11 + web3-providers-ws: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-core-requestmanager@1.7.4: + resolution: {integrity: sha512-IuXdAm65BQtPL4aI6LZJJOrKAs0SM5IK2Cqo2/lMNvVMT9Kssq6qOk68Uf7EBDH0rPuINi+ReLP+uH+0g3AnPA==} + engines: {node: '>=8.0.0'} + dependencies: + util: 0.12.3 + web3-core-helpers: 1.7.4 + web3-providers-http: 1.7.4 + web3-providers-ipc: 1.7.4 + web3-providers-ws: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-core-requestmanager@1.8.0: + resolution: {integrity: sha512-2AoYCs3Owl5foWcf4uKPONyqFygSl9T54L8b581U16nsUirjhoTUGK/PBhMDVcLCmW4QQmcY5A8oPFpkQc1TTg==} + engines: {node: '>=8.0.0'} + dependencies: + util: 0.12.3 + web3-core-helpers: 1.8.0 + web3-providers-http: 1.8.0 + web3-providers-ipc: 1.8.0 + web3-providers-ws: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-core-subscriptions@1.2.11: + resolution: {integrity: sha512-qEF/OVqkCvQ7MPs1JylIZCZkin0aKK9lDxpAtQ1F8niEDGFqn7DT8E/vzbIa0GsOjL2fZjDhWJsaW+BSoAW1gg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + eventemitter3: 4.0.4 + underscore: 1.9.1 + web3-core-helpers: 1.2.11 + dev: true + optional: true + + /web3-core-subscriptions@1.7.4: + resolution: {integrity: sha512-VJvKWaXRyxk2nFWumOR94ut9xvjzMrRtS38c4qj8WBIRSsugrZr5lqUwgndtj0qx4F+50JhnU++QEqUEAtKm3g==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + web3-core-helpers: 1.7.4 + dev: true + + /web3-core-subscriptions@1.8.0: + resolution: {integrity: sha512-7lHVRzDdg0+Gcog55lG6Q3D8JV+jN+4Ly6F8cSn9xFUAwOkdbgdWsjknQG7t7CDWy21DQkvdiY2BJF8S68AqOA==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + web3-core-helpers: 1.8.0 + dev: true + + /web3-core@1.2.11: + resolution: {integrity: sha512-CN7MEYOY5ryo5iVleIWRE3a3cZqVaLlIbIzDPsvQRUfzYnvzZQRZBm9Mq+ttDi2STOOzc1MKylspz/o3yq/LjQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/bn.js': 4.11.6 + '@types/node': 12.19.16 + bignumber.js: 9.1.0 + web3-core-helpers: 1.2.11 + web3-core-method: 1.2.11 + web3-core-requestmanager: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-core@1.7.4: + resolution: {integrity: sha512-L0DCPlIh9bgIED37tYbe7bsWrddoXYc897ANGvTJ6MFkSNGiMwDkTLWSgYd9Mf8qu8b4iuPqXZHMwIo4atoh7Q==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/bn.js': 5.1.1 + '@types/node': 12.19.16 + bignumber.js: 9.1.0 + web3-core-helpers: 1.7.4 + web3-core-method: 1.7.4 + web3-core-requestmanager: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-core@1.8.0: + resolution: {integrity: sha512-9sCA+Z02ci6zoY2bAquFiDjujRwmSKHiSGi4B8IstML8okSytnzXk1izHYSynE7ahIkguhjWAuXFvX76F5rAbA==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/bn.js': 5.1.1 + '@types/node': 12.19.16 + bignumber.js: 9.1.0 + web3-core-helpers: 1.8.0 + web3-core-method: 1.8.0 + web3-core-requestmanager: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-eth-abi@1.2.11: + resolution: {integrity: sha512-PkRYc0+MjuLSgg03QVWqWlQivJqRwKItKtEpRUaxUAeLE7i/uU39gmzm2keHGcQXo3POXAbOnMqkDvOep89Crg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@ethersproject/abi': 5.0.0-beta.153 + underscore: 1.9.1 + web3-utils: 1.2.11 + dev: true + optional: true + + /web3-eth-abi@1.7.4: + resolution: {integrity: sha512-eMZr8zgTbqyL9MCTCAvb67RbVyN5ZX7DvA0jbLOqRWCiw+KlJKTGnymKO6jPE8n5yjk4w01e165Qb11hTDwHgg==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethersproject/abi': 5.7.0 + web3-utils: 1.7.4 + dev: true + + /web3-eth-abi@1.8.0: + resolution: {integrity: sha512-xPeMb2hS9YLQK/Q5YZpkcmzoRGM+/R8bogSrYHhNC3hjZSSU0YRH+1ZKK0f9YF4qDZaPMI8tKWIMSCDIpjG6fg==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethersproject/abi': 5.7.0 + web3-utils: 1.8.0 + dev: true + + /web3-eth-accounts@1.2.11: + resolution: {integrity: sha512-6FwPqEpCfKIh3nSSGeo3uBm2iFSnFJDfwL3oS9pyegRBXNsGRVpgiW63yhNzL0796StsvjHWwQnQHsZNxWAkGw==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + crypto-browserify: 3.12.0 + eth-lib: 0.2.8 + ethereumjs-common: 1.5.0 + ethereumjs-tx: 2.1.2 + scrypt-js: 3.0.1 + underscore: 1.9.1 + uuid: 3.3.2 + web3-core: 1.2.11 + web3-core-helpers: 1.2.11 + web3-core-method: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-eth-accounts@1.7.4: + resolution: {integrity: sha512-Y9vYLRKP7VU7Cgq6wG1jFaG2k3/eIuiTKAG8RAuQnb6Cd9k5BRqTm5uPIiSo0AP/u11jDomZ8j7+WEgkU9+Btw==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethereumjs/common': 2.6.5 + '@ethereumjs/tx': 3.5.2 + crypto-browserify: 3.12.0 + eth-lib: 0.2.8 + ethereumjs-util: 7.1.5 + scrypt-js: 3.0.1 + uuid: 3.3.2 + web3-core: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-method: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-eth-accounts@1.8.0: + resolution: {integrity: sha512-HQ/MDSv4bexwJLvnqsM6xpGE7c2NVOqyhzOZFyMUKXbIwIq85T3TaLnM9pCN7XqMpDcfxqiZ3q43JqQVkzHdmw==} + engines: {node: '>=8.0.0'} + dependencies: + '@ethereumjs/common': 2.6.5 + '@ethereumjs/tx': 3.5.2 + crypto-browserify: 3.12.0 + eth-lib: 0.2.8 + ethereumjs-util: 7.1.5 + scrypt-js: 3.0.1 + uuid: 3.3.2 + web3-core: 1.8.0 + web3-core-helpers: 1.8.0 + web3-core-method: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-eth-contract@1.2.11: + resolution: {integrity: sha512-MzYuI/Rq2o6gn7vCGcnQgco63isPNK5lMAan2E51AJLknjSLnOxwNY3gM8BcKoy4Z+v5Dv00a03Xuk78JowFow==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/bn.js': 4.11.6 + underscore: 1.9.1 + web3-core: 1.2.11 + web3-core-helpers: 1.2.11 + web3-core-method: 1.2.11 + web3-core-promievent: 1.2.11 + web3-core-subscriptions: 1.2.11 + web3-eth-abi: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-eth-contract@1.7.4: + resolution: {integrity: sha512-ZgSZMDVI1pE9uMQpK0T0HDT2oewHcfTCv0osEqf5qyn5KrcQDg1GT96/+S0dfqZ4HKj4lzS5O0rFyQiLPQ8LzQ==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/bn.js': 5.1.1 + web3-core: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-method: 1.7.4 + web3-core-promievent: 1.7.4 + web3-core-subscriptions: 1.7.4 + web3-eth-abi: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-eth-contract@1.8.0: + resolution: {integrity: sha512-6xeXhW2YoCrz2Ayf2Vm4srWiMOB6LawkvxWJDnUWJ8SMATg4Pgu42C/j8rz/enXbYWt2IKuj0kk8+QszxQbK+Q==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/bn.js': 5.1.1 + web3-core: 1.8.0 + web3-core-helpers: 1.8.0 + web3-core-method: 1.8.0 + web3-core-promievent: 1.8.0 + web3-core-subscriptions: 1.8.0 + web3-eth-abi: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-eth-ens@1.2.11: + resolution: {integrity: sha512-dbW7dXP6HqT1EAPvnniZVnmw6TmQEKF6/1KgAxbo8iBBYrVTMDGFQUUnZ+C4VETGrwwaqtX4L9d/FrQhZ6SUiA==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + content-hash: 2.5.2 + eth-ens-namehash: 2.0.8 + underscore: 1.9.1 + web3-core: 1.2.11 + web3-core-helpers: 1.2.11 + web3-core-promievent: 1.2.11 + web3-eth-abi: 1.2.11 + web3-eth-contract: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-eth-ens@1.7.4: + resolution: {integrity: sha512-Gw5CVU1+bFXP5RVXTCqJOmHn71X2ghNk9VcEH+9PchLr0PrKbHTA3hySpsPco1WJAyK4t8SNQVlNr3+bJ6/WZA==} + engines: {node: '>=8.0.0'} + dependencies: + content-hash: 2.5.2 + eth-ens-namehash: 2.0.8 + web3-core: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-promievent: 1.7.4 + web3-eth-abi: 1.7.4 + web3-eth-contract: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-eth-ens@1.8.0: + resolution: {integrity: sha512-/eFbQEwvsMOEiOhw9/iuRXCsPkqAmHHWuFOrThQkozRgcnSTRnvxkkRC/b6koiT5/HaKeUs4yQDg+/ixsIxZxA==} + engines: {node: '>=8.0.0'} + dependencies: + content-hash: 2.5.2 + eth-ens-namehash: 2.0.8 + web3-core: 1.8.0 + web3-core-helpers: 1.8.0 + web3-core-promievent: 1.8.0 + web3-eth-abi: 1.8.0 + web3-eth-contract: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-eth-iban@1.2.11: + resolution: {integrity: sha512-ozuVlZ5jwFC2hJY4+fH9pIcuH1xP0HEFhtWsR69u9uDIANHLPQQtWYmdj7xQ3p2YT4bQLq/axKhZi7EZVetmxQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + bn.js: 4.12.0 + web3-utils: 1.2.11 + dev: true + optional: true + + /web3-eth-iban@1.7.4: + resolution: {integrity: sha512-XyrsgWlZQMv5gRcjXMsNvAoCRvV5wN7YCfFV5+tHUCqN8g9T/o4XUS20vDWD0k4HNiAcWGFqT1nrls02MGZ08w==} + engines: {node: '>=8.0.0'} + dependencies: + bn.js: 5.2.1 + web3-utils: 1.7.4 + dev: true + + /web3-eth-iban@1.8.0: + resolution: {integrity: sha512-4RbvUxcMpo/e5811sE3a6inJ2H4+FFqUVmlRYs0RaXaxiHweahSRBNcpO0UWgmlePTolj0rXqPT2oEr0DuC8kg==} + engines: {node: '>=8.0.0'} + dependencies: + bn.js: 5.2.1 + web3-utils: 1.8.0 + dev: true + + /web3-eth-personal@1.2.11: + resolution: {integrity: sha512-42IzUtKq9iHZ8K9VN0vAI50iSU9tOA1V7XU2BhF/tb7We2iKBVdkley2fg26TxlOcKNEHm7o6HRtiiFsVK4Ifw==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + '@types/node': 12.19.16 + web3-core: 1.2.11 + web3-core-helpers: 1.2.11 + web3-core-method: 1.2.11 + web3-net: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-eth-personal@1.7.4: + resolution: {integrity: sha512-O10C1Hln5wvLQsDhlhmV58RhXo+GPZ5+W76frSsyIrkJWLtYQTCr5WxHtRC9sMD1idXLqODKKgI2DL+7xeZ0/g==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/node': 12.19.16 + web3-core: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-method: 1.7.4 + web3-net: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-eth-personal@1.8.0: + resolution: {integrity: sha512-L7FT4nR3HmsfZyIAhFpEctKkYGOjRC2h6iFKs9gnFCHZga8yLcYcGaYOBIoYtaKom99MuGBoosayWt/Twh7F5A==} + engines: {node: '>=8.0.0'} + dependencies: + '@types/node': 12.19.16 + web3-core: 1.8.0 + web3-core-helpers: 1.8.0 + web3-core-method: 1.8.0 + web3-net: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-eth@1.2.11: + resolution: {integrity: sha512-REvxW1wJ58AgHPcXPJOL49d1K/dPmuw4LjPLBPStOVkQjzDTVmJEIsiLwn2YeuNDd4pfakBwT8L3bz1G1/wVsQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + underscore: 1.9.1 + web3-core: 1.2.11 + web3-core-helpers: 1.2.11 + web3-core-method: 1.2.11 + web3-core-subscriptions: 1.2.11 + web3-eth-abi: 1.2.11 + web3-eth-accounts: 1.2.11 + web3-eth-contract: 1.2.11 + web3-eth-ens: 1.2.11 + web3-eth-iban: 1.2.11 + web3-eth-personal: 1.2.11 + web3-net: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-eth@1.7.4: + resolution: {integrity: sha512-JG0tTMv0Ijj039emXNHi07jLb0OiWSA9O24MRSk5vToTQyDNXihdF2oyq85LfHuF690lXZaAXrjhtLNlYqb7Ug==} + engines: {node: '>=8.0.0'} + dependencies: + web3-core: 1.7.4 + web3-core-helpers: 1.7.4 + web3-core-method: 1.7.4 + web3-core-subscriptions: 1.7.4 + web3-eth-abi: 1.7.4 + web3-eth-accounts: 1.7.4 + web3-eth-contract: 1.7.4 + web3-eth-ens: 1.7.4 + web3-eth-iban: 1.7.4 + web3-eth-personal: 1.7.4 + web3-net: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-eth@1.8.0: + resolution: {integrity: sha512-hist52os3OT4TQFB/GxPSMxTh3995sz6LPvQpPvj7ktSbpg9RNSFaSsPlCT63wUAHA3PZb1FemkAIeQM5t72Lw==} + engines: {node: '>=8.0.0'} + dependencies: + web3-core: 1.8.0 + web3-core-helpers: 1.8.0 + web3-core-method: 1.8.0 + web3-core-subscriptions: 1.8.0 + web3-eth-abi: 1.8.0 + web3-eth-accounts: 1.8.0 + web3-eth-contract: 1.8.0 + web3-eth-ens: 1.8.0 + web3-eth-iban: 1.8.0 + web3-eth-personal: 1.8.0 + web3-net: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-net@1.2.11: + resolution: {integrity: sha512-sjrSDj0pTfZouR5BSTItCuZ5K/oZPVdVciPQ6981PPPIwJJkCMeVjD7I4zO3qDPCnBjBSbWvVnLdwqUBPtHxyg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-core: 1.2.11 + web3-core-method: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-net@1.7.4: + resolution: {integrity: sha512-d2Gj+DIARHvwIdmxFQ4PwAAXZVxYCR2lET0cxz4KXbE5Og3DNjJi+MoPkX+WqoUXqimu/EOd4Cd+7gefqVAFDg==} + engines: {node: '>=8.0.0'} + dependencies: + web3-core: 1.7.4 + web3-core-method: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-net@1.8.0: + resolution: {integrity: sha512-kX6EAacK7QrOe7DOh0t5yHS5q2kxZmTCxPVwSz9io9xBeE4n4UhmzGJ/VfhP2eM3OPKYeypcR3LEO6zZ8xn2vw==} + engines: {node: '>=8.0.0'} + dependencies: + web3-core: 1.8.0 + web3-core-method: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-provider-engine@14.2.1: + resolution: {integrity: sha512-iSv31h2qXkr9vrL6UZDm4leZMc32SjWJFGOp/D92JXfcEboCqraZyuExDkpxKw8ziTufXieNM7LSXNHzszYdJw==} + dependencies: + async: 2.6.3 + backoff: 2.5.0 + clone: 2.1.2 + cross-fetch: 2.2.6 + eth-block-tracker: 3.0.1 + eth-json-rpc-infura: 3.2.1 + eth-sig-util: 1.4.2 + ethereumjs-block: 1.7.1 + ethereumjs-tx: 1.3.7 + ethereumjs-util: 5.2.1 + ethereumjs-vm: 2.6.0 + json-rpc-error: 2.0.0 + json-stable-stringify: 1.0.1 + promise-to-callback: 1.0.0 + readable-stream: 2.3.7 + request: 2.88.2 + semaphore: 1.1.0 + ws: 5.2.3 + xhr: 2.5.0 + xtend: 4.0.2 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /web3-providers-http@1.2.11: + resolution: {integrity: sha512-psh4hYGb1+ijWywfwpB2cvvOIMISlR44F/rJtYkRmQ5jMvG4FOCPlQJPiHQZo+2cc3HbktvvSJzIhkWQJdmvrA==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-core-helpers: 1.2.11 + xhr2-cookies: 1.1.0 + dev: true + optional: true + + /web3-providers-http@1.7.4: + resolution: {integrity: sha512-AU+/S+49rcogUER99TlhW+UBMk0N2DxvN54CJ2pK7alc2TQ7+cprNPLHJu4KREe8ndV0fT6JtWUfOMyTvl+FRA==} + engines: {node: '>=8.0.0'} + dependencies: + web3-core-helpers: 1.7.4 + xhr2-cookies: 1.1.0 + dev: true + + /web3-providers-http@1.8.0: + resolution: {integrity: sha512-/MqxwRzExohBWW97mqlCSW/+NHydGRyoEDUS1bAIF2YjfKFwyRtHgrEzOojzkC9JvB+8LofMvbXk9CcltpZapw==} + engines: {node: '>=8.0.0'} + dependencies: + abortcontroller-polyfill: 1.7.3 + cross-fetch: 3.1.5 + es6-promise: 4.2.8 + web3-core-helpers: 1.8.0 + transitivePeerDependencies: + - encoding + dev: true + + /web3-providers-ipc@1.2.11: + resolution: {integrity: sha512-yhc7Y/k8hBV/KlELxynWjJDzmgDEDjIjBzXK+e0rHBsYEhdCNdIH5Psa456c+l0qTEU2YzycF8VAjYpWfPnBpQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + oboe: 2.1.4 + underscore: 1.9.1 + web3-core-helpers: 1.2.11 + dev: true + optional: true + + /web3-providers-ipc@1.7.4: + resolution: {integrity: sha512-jhArOZ235dZy8fS8090t60nTxbd1ap92ibQw5xIrAQ9m7LcZKNfmLAQUVsD+3dTFvadRMi6z1vCO7zRi84gWHw==} + engines: {node: '>=8.0.0'} + dependencies: + oboe: 2.1.5 + web3-core-helpers: 1.7.4 + dev: true + + /web3-providers-ipc@1.8.0: + resolution: {integrity: sha512-tAXHtVXNUOgehaBU8pzAlB3qhjn/PRpjdzEjzHNFqtRRTwzSEKOJxFeEhaUA4FzHnTlbnrs8ujHWUitcp1elfg==} + engines: {node: '>=8.0.0'} + dependencies: + oboe: 2.1.5 + web3-core-helpers: 1.8.0 + dev: true + + /web3-providers-ws@1.2.11: + resolution: {integrity: sha512-ZxnjIY1Er8Ty+cE4migzr43zA/+72AF1myzsLaU5eVgdsfV7Jqx7Dix1hbevNZDKFlSoEyq/3j/jYalh3So1Zg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + eventemitter3: 4.0.4 + underscore: 1.9.1 + web3-core-helpers: 1.2.11 + websocket: 1.0.34 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-providers-ws@1.7.4: + resolution: {integrity: sha512-g72X77nrcHMFU8hRzQJzfgi/072n8dHwRCoTw+WQrGp+XCQ71fsk2qIu3Tp+nlp5BPn8bRudQbPblVm2uT4myQ==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + web3-core-helpers: 1.7.4 + websocket: 1.0.34 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-providers-ws@1.8.0: + resolution: {integrity: sha512-bcZtSifsqyJxwkfQYamfdIRp4nhj9eJd7cxHg1uUkfLJK125WP96wyJL1xbPt7qt0MpfnTFn8/UuIqIB6nFENg==} + engines: {node: '>=8.0.0'} + dependencies: + eventemitter3: 4.0.4 + web3-core-helpers: 1.8.0 + websocket: 1.0.34 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-shh@1.2.11: + resolution: {integrity: sha512-B3OrO3oG1L+bv3E1sTwCx66injW1A8hhwpknDUbV+sw3fehFazA06z9SGXUefuFI1kVs4q2vRi0n4oCcI4dZDg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-core: 1.2.11 + web3-core-method: 1.2.11 + web3-core-subscriptions: 1.2.11 + web3-net: 1.2.11 + transitivePeerDependencies: + - supports-color + dev: true + optional: true + + /web3-shh@1.7.4: + resolution: {integrity: sha512-mlSZxSYcMkuMCxqhTYnZkUdahZ11h+bBv/8TlkXp/IHpEe4/Gg+KAbmfudakq3EzG/04z70XQmPgWcUPrsEJ+A==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-core: 1.7.4 + web3-core-method: 1.7.4 + web3-core-subscriptions: 1.7.4 + web3-net: 1.7.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web3-shh@1.8.0: + resolution: {integrity: sha512-DNRgSa9Jf9xYFUGKSMylrf+zt3MPjhI2qF+UWX07o0y3+uf8zalDGiJOWvIS4upAsdPiKKVJ7co+Neof47OMmg==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-core: 1.8.0 + web3-core-method: 1.8.0 + web3-core-subscriptions: 1.8.0 + web3-net: 1.8.0 + transitivePeerDependencies: + - encoding + - supports-color + dev: true + + /web3-utils@1.2.11: + resolution: {integrity: sha512-3Tq09izhD+ThqHEaWYX4VOT7dNPdZiO+c/1QMA0s5X2lDFKK/xHJb7cyTRRVzN2LvlHbR7baS1tmQhSua51TcQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + bn.js: 4.12.0 + eth-lib: 0.2.8 + ethereum-bloom-filters: 1.0.10 + ethjs-unit: 0.1.6 + number-to-bn: 1.7.0 + randombytes: 2.1.0 + underscore: 1.9.1 + utf8: 3.0.0 + dev: true + optional: true + + /web3-utils@1.7.4: + resolution: {integrity: sha512-acBdm6Evd0TEZRnChM/MCvGsMwYKmSh7OaUfNf5OKG0CIeGWD/6gqLOWIwmwSnre/2WrA1nKGId5uW2e5EfluA==} + engines: {node: '>=8.0.0'} + dependencies: + bn.js: 5.2.1 + ethereum-bloom-filters: 1.0.10 + ethereumjs-util: 7.1.5 + ethjs-unit: 0.1.6 + number-to-bn: 1.7.0 + randombytes: 2.1.0 + utf8: 3.0.0 + dev: true + + /web3-utils@1.8.0: + resolution: {integrity: sha512-7nUIl7UWpLVka2f09CMbKOSEvorvHnaugIabU4mj7zfMvm0tSByLcEu3eyV9qgS11qxxLuOkzBIwCstTflhmpQ==} + engines: {node: '>=8.0.0'} + dependencies: + bn.js: 5.2.1 + ethereum-bloom-filters: 1.0.10 + ethereumjs-util: 7.1.5 + ethjs-unit: 0.1.6 + number-to-bn: 1.7.0 + randombytes: 2.1.0 + utf8: 3.0.0 + dev: true + + /web3@1.2.11: + resolution: {integrity: sha512-mjQ8HeU41G6hgOYm1pmeH0mRAeNKJGnJEUzDMoerkpw7QUQT4exVREgF1MYPvL/z6vAshOXei25LE/t/Bxl8yQ==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-bzz: 1.2.11 + web3-core: 1.2.11 + web3-eth: 1.2.11 + web3-eth-personal: 1.2.11 + web3-net: 1.2.11 + web3-shh: 1.2.11 + web3-utils: 1.2.11 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + optional: true + + /web3@1.7.4: + resolution: {integrity: sha512-iFGK5jO32vnXM/ASaJBaI0+gVR6uHozvYdxkdhaeOCD6HIQ4iIXadbO2atVpE9oc/H8l2MovJ4LtPhG7lIBN8A==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-bzz: 1.7.4 + web3-core: 1.7.4 + web3-eth: 1.7.4 + web3-eth-personal: 1.7.4 + web3-net: 1.7.4 + web3-shh: 1.7.4 + web3-utils: 1.7.4 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /web3@1.8.0: + resolution: {integrity: sha512-sldr9stK/SALSJTgI/8qpnDuBJNMGjVR84hJ+AcdQ+MLBGLMGsCDNubCoyO6qgk1/Y9SQ7ignegOI/7BPLoiDA==} + engines: {node: '>=8.0.0'} + requiresBuild: true + dependencies: + web3-bzz: 1.8.0 + web3-core: 1.8.0 + web3-eth: 1.8.0 + web3-eth-personal: 1.8.0 + web3-net: 1.8.0 + web3-shh: 1.8.0 + web3-utils: 1.8.0 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + dev: true + + /websocket@1.0.32: + resolution: {integrity: sha512-i4yhcllSP4wrpoPMU2N0TQ/q0O94LRG/eUQjEAamRltjQ1oT1PFFKOG4i877OlJgCG8rw6LrrowJp+TYCEWF7Q==} + engines: {node: '>=4.0.0'} + dependencies: + bufferutil: 4.0.6 + debug: 2.6.9 + es5-ext: 0.10.62 + typedarray-to-buffer: 3.1.5 + utf-8-validate: 5.0.9 + yaeti: 0.0.6 + transitivePeerDependencies: + - supports-color + dev: true + + /websocket@1.0.34: + resolution: {integrity: sha512-PRDso2sGwF6kM75QykIesBijKSVceR6jL2G8NGYyq2XrItNC2P5/qL5XeR056GhA+Ly7JMFvJb9I312mJfmqnQ==} + engines: {node: '>=4.0.0'} + dependencies: + bufferutil: 4.0.6 + debug: 2.6.9 + es5-ext: 0.10.62 + typedarray-to-buffer: 3.1.5 + utf-8-validate: 5.0.9 + yaeti: 0.0.6 + transitivePeerDependencies: + - supports-color + dev: true + + /whatwg-fetch@2.0.4: + resolution: {integrity: sha512-dcQ1GWpOD/eEQ97k66aiEVpNnapVj90/+R+SXTPYGHpYBBypfKJEQjLrvMZ7YXbKm21gXd4NcuxUTjiv1YtLng==} + dev: true + + /whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + dev: true + + /which-boxed-primitive@1.0.2: + resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==} + dependencies: + is-bigint: 1.0.4 + is-boolean-object: 1.1.2 + is-number-object: 1.0.7 + is-string: 1.0.7 + is-symbol: 1.0.3 + dev: true + + /which-module@1.0.0: + resolution: {integrity: sha512-F6+WgncZi/mJDrammbTuHe1q0R5hOXv/mBaiNA2TCNT/LTHusX0V+CJnj9XT8ki5ln2UZyyddDgHfCzyrOH7MQ==} + dev: true + + /which-module@2.0.0: + resolution: {integrity: sha512-B+enWhmw6cjfVC7kS8Pj9pCrKSc5txArRyaYGe088shv/FGWH+0Rjx/xPgtsWfsUtS27FkP697E4DDhgrgoc0Q==} + dev: true + + /which-typed-array@1.1.13: + resolution: {integrity: sha512-P5Nra0qjSncduVPEAr7xhoF5guty49ArDTwzJ/yNuPIbZppyRxFQsRCWrocxIY+CnMVG+qfbU2FmDKyvSGClow==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.5 + for-each: 0.3.3 + gopd: 1.0.1 + has-tostringtag: 1.0.0 + dev: true + + /which-typed-array@1.1.4: + resolution: {integrity: sha512-49E0SpUe90cjpoc7BOJwyPHRqSAd12c10Qm2amdEZrJPCY2NDxaW01zHITrem+rnETY3dwrbH3UUrUwagfCYDA==} + engines: {node: '>= 0.4'} + dependencies: + available-typed-arrays: 1.0.5 + call-bind: 1.0.5 + es-abstract: 1.22.3 + foreach: 2.0.5 + function-bind: 1.1.2 + has-symbols: 1.0.3 + is-typed-array: 1.1.12 + dev: true + + /which@1.3.1: + resolution: {integrity: sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: true + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: true + + /wide-align@1.1.3: + resolution: {integrity: sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==} + dependencies: + string-width: 2.1.1 + dev: true + + /window-size@0.2.0: + resolution: {integrity: sha512-UD7d8HFA2+PZsbKyaOCEy8gMh1oDtHgJh1LfgjQ4zVXmYjAT/kvz3PueITKuqDiIXQe7yzpPnxX3lNc+AhQMyw==} + engines: {node: '>= 0.10.0'} + hasBin: true + dev: true + + /word-wrap@1.2.3: + resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==} + engines: {node: '>=0.10.0'} + dev: true + + /wordwrap@1.0.0: + resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + dev: true + + /wordwrapjs@4.0.1: + resolution: {integrity: sha512-kKlNACbvHrkpIw6oPeYDSmdCTu2hdMHoyXLTcUKala++lx5Y+wjJ/e474Jqv5abnVmwxw08DiTuHmw69lJGksA==} + engines: {node: '>=8.0.0'} + dependencies: + reduce-flatten: 2.0.0 + typical: 5.2.0 + dev: true + + /workerpool@6.2.1: + resolution: {integrity: sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw==} + dev: true + + /wrap-ansi@2.1.0: + resolution: {integrity: sha512-vAaEaDM946gbNpH5pLVNR+vX2ht6n0Bt3GXwVB1AuAqZosOvHNF3P7wDnh8KLkSqgUh0uh77le7Owgoz+Z9XBw==} + engines: {node: '>=0.10.0'} + dependencies: + string-width: 1.0.2 + strip-ansi: 3.0.1 + dev: true + + /wrap-ansi@5.1.0: + resolution: {integrity: sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==} + engines: {node: '>=6'} + dependencies: + ansi-styles: 3.2.1 + string-width: 3.1.0 + strip-ansi: 5.2.0 + dev: true + + /wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: true + + /ws@3.3.3: + resolution: {integrity: sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dependencies: + async-limiter: 1.0.1 + safe-buffer: 5.1.2 + ultron: 1.1.1 + dev: true + + /ws@5.2.3: + resolution: {integrity: sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dependencies: + async-limiter: 1.0.1 + dev: true + + /ws@7.4.6: + resolution: {integrity: sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A==} + engines: {node: '>=8.3.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + /ws@7.5.9: + resolution: {integrity: sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==} + engines: {node: '>=8.3.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: ^5.0.2 + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dev: true + + /xhr-request-promise@0.1.2: + resolution: {integrity: sha512-yAQlCCNLwPgoGxw4k+IdRp1uZ7MZibS4kFw2boSUOesjnmvcxxj73j5a8KavE5Bzas++8P49OpJ4m8qjWGiDsA==} + dependencies: + xhr-request: 1.1.0 + dev: true + + /xhr-request@1.1.0: + resolution: {integrity: sha512-Y7qzEaR3FDtL3fP30k9wO/e+FBnBByZeybKOhASsGP30NIkRAAkKD/sCnLvgEfAIEC1rcmK7YG8f4oEnIrrWzA==} + dependencies: + buffer-to-arraybuffer: 0.0.5 + object-assign: 4.1.1 + query-string: 5.1.1 + simple-get: 2.8.1 + timed-out: 4.0.1 + url-set-query: 1.0.0 + xhr: 2.5.0 + dev: true + + /xhr2-cookies@1.1.0: + resolution: {integrity: sha512-hjXUA6q+jl/bd8ADHcVfFsSPIf+tyLIjuO9TwJC9WI6JP2zKcS7C+p56I9kCLLsaCiNT035iYvEUUzdEFj/8+g==} + dependencies: + cookiejar: 2.1.2 + dev: true + + /xhr@2.5.0: + resolution: {integrity: sha512-4nlO/14t3BNUZRXIXfXe+3N6w3s1KoxcJUUURctd64BLRe67E4gRwp4PjywtDY72fXpZ1y6Ch0VZQRY/gMPzzQ==} + dependencies: + global: 4.3.2 + is-function: 1.0.1 + parse-headers: 2.0.3 + xtend: 4.0.2 + dev: true + + /xmlhttprequest@1.8.0: + resolution: {integrity: sha512-58Im/U0mlVBLM38NdZjHyhuMtCqa61469k2YP/AaPbvCoV9aQGUpbJBj1QRm2ytRiVQBD/fsw7L2bJGDVQswBA==} + engines: {node: '>=0.4.0'} + dev: true + + /xtend@2.1.2: + resolution: {integrity: sha512-vMNKzr2rHP9Dp/e1NQFnLQlwlhp9L/LfvnsVdHxN1f+uggyVI3i08uD14GPvCToPkdsRfyPqIyYGmIk58V98ZQ==} + engines: {node: '>=0.4'} + dependencies: + object-keys: 0.4.0 + dev: true + + /xtend@4.0.2: + resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} + engines: {node: '>=0.4'} + dev: true + + /y18n@3.2.2: + resolution: {integrity: sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==} + dev: true + + /y18n@4.0.3: + resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==} + dev: true + + /y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + dev: true + + /yaeti@0.0.6: + resolution: {integrity: sha512-MvQa//+KcZCUkBTIC9blM+CU9J2GzuTytsOUwf2lidtvkx/6gnEp1QvJv34t9vdjhFmha/mUiNDbN0D0mJWdug==} + engines: {node: '>=0.10.32'} + dev: true + + /yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + dev: true + + /yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + dev: true + + /yargs-parser@13.1.2: + resolution: {integrity: sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==} + dependencies: + camelcase: 5.3.1 + decamelize: 1.2.0 + dev: true + + /yargs-parser@2.4.1: + resolution: {integrity: sha512-9pIKIJhnI5tonzG6OnCFlz/yln8xHYcGl+pn3xR0Vzff0vzN1PbNRaelgfgRUwZ3s4i3jvxT9WhmUGL4whnasA==} + dependencies: + camelcase: 3.0.0 + lodash.assign: 4.2.0 + dev: true + + /yargs-parser@20.2.4: + resolution: {integrity: sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==} + engines: {node: '>=10'} + dev: true + + /yargs-unparser@1.6.0: + resolution: {integrity: sha512-W9tKgmSn0DpSatfri0nx52Joq5hVXgeLiqR/5G0sZNDoLZFOr/xjBUDcShCOGNsBnEMNo1KAMBkTej1Hm62HTw==} + engines: {node: '>=6'} + dependencies: + flat: 4.1.1 + lodash: 4.17.21 + yargs: 13.3.2 + dev: true + + /yargs-unparser@2.0.0: + resolution: {integrity: sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==} + engines: {node: '>=10'} + dependencies: + camelcase: 6.3.0 + decamelize: 4.0.0 + flat: 5.0.2 + is-plain-obj: 2.1.0 + dev: true + + /yargs@13.3.2: + resolution: {integrity: sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==} + dependencies: + cliui: 5.0.0 + find-up: 3.0.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + require-main-filename: 2.0.0 + set-blocking: 2.0.0 + string-width: 3.1.0 + which-module: 2.0.0 + y18n: 4.0.3 + yargs-parser: 13.1.2 + dev: true + + /yargs@16.2.0: + resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==} + engines: {node: '>=10'} + dependencies: + cliui: 7.0.4 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 20.2.4 + dev: true + + /yargs@4.8.1: + resolution: {integrity: sha512-LqodLrnIDM3IFT+Hf/5sxBnEGECrfdC1uIbgZeJmESCSo4HoCAaKEus8MylXHAkdacGc0ye+Qa+dpkuom8uVYA==} + dependencies: + cliui: 3.2.0 + decamelize: 1.2.0 + get-caller-file: 1.0.3 + lodash.assign: 4.2.0 + os-locale: 1.4.0 + read-pkg-up: 1.0.1 + require-directory: 2.1.1 + require-main-filename: 1.0.1 + set-blocking: 2.0.0 + string-width: 1.0.2 + which-module: 1.0.0 + window-size: 0.2.0 + y18n: 3.2.2 + yargs-parser: 2.4.1 + dev: true + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + dev: true + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + dev: true + + github.com/ethereumjs/ethereumjs-abi/ee3994657fa7a427238e6ba92a84d0b529bbcde0: + resolution: {tarball: https://codeload.github.com/ethereumjs/ethereumjs-abi/tar.gz/ee3994657fa7a427238e6ba92a84d0b529bbcde0} + name: ethereumjs-abi + version: 0.6.8 + dependencies: + bn.js: 4.12.0 + ethereumjs-util: 6.2.1 + dev: true + + github.com/goplugin/plugin-solhint-rules/1b4c0c2663fcd983589d4f33a2e73908624ed43c: + resolution: {tarball: https://codeload.github.com/goplugin/plugin-solhint-rules/tar.gz/1b4c0c2663fcd983589d4f33a2e73908624ed43c} + name: '@plugin/solhint-plugin-plugin-solidity' + version: 1.2.0 + dev: true diff --git a/contracts/remappings.txt b/contracts/remappings.txt new file mode 100644 index 00000000..a9d24257 --- /dev/null +++ b/contracts/remappings.txt @@ -0,0 +1,7 @@ +ds-test/=foundry-lib/forge-std/lib/ds-test/src/ +forge-std/=foundry-lib/forge-std/src/ + +@openzeppelin/=node_modules/@openzeppelin/ +hardhat/=node_modules/hardhat/ +@eth-optimism/=node_modules/@eth-optimism/ +@scroll-tech/=node_modules/@scroll-tech/ diff --git a/contracts/scripts/generate-automation-master-interface.ts b/contracts/scripts/generate-automation-master-interface.ts new file mode 100644 index 00000000..78c09cf2 --- /dev/null +++ b/contracts/scripts/generate-automation-master-interface.ts @@ -0,0 +1,50 @@ +/** + * @description this script generates a master interface for interacting with the automation registry + * @notice run this script with pnpm ts-node ./scripts/generate-automation-master-interface.ts + */ +import { AutomationRegistry2_2__factory as Registry } from '../typechain/factories/AutomationRegistry2_2__factory' +import { AutomationRegistryLogicA2_2__factory as RegistryLogicA } from '../typechain/factories/AutomationRegistryLogicA2_2__factory' +import { AutomationRegistryLogicB2_2__factory as RegistryLogicB } from '../typechain/factories/AutomationRegistryLogicB2_2__factory' +import { utils } from 'ethers' +import fs from 'fs' +import { exec } from 'child_process' + +const dest = 'src/v0.8/automation/dev/interfaces/v2_2' +const srcDest = `${dest}/IAutomationRegistryMaster.sol` +const tmpDest = `${dest}/tmp.txt` + +const combinedABI = [] +const abiSet = new Set() +const abis = [Registry.abi, RegistryLogicA.abi, RegistryLogicB.abi] + +for (const abi of abis) { + for (const entry of abi) { + const id = utils.id(JSON.stringify(entry)) + if (!abiSet.has(id)) { + abiSet.add(id) + if ( + entry.type === 'function' && + (entry.name === 'checkUpkeep' || + entry.name === 'checkCallback' || + entry.name === 'simulatePerformUpkeep') + ) { + entry.stateMutability = 'view' // override stateMutability for check / callback / simulate functions + } + combinedABI.push(entry) + } + } +} + +const checksum = utils.id(abis.join('')) + +fs.writeFileSync(`${tmpDest}`, JSON.stringify(combinedABI)) + +const cmd = ` +cat ${tmpDest} | pnpm abi-to-sol --solidity-version ^0.8.4 --license MIT > ${srcDest} IAutomationRegistryMaster; +echo "// abi-checksum: ${checksum}" | cat - ${srcDest} > ${tmpDest} && mv ${tmpDest} ${srcDest}; +pnpm prettier --write ${srcDest}; +` + +exec(cmd) + +console.log('generated new master interface for automation registry') diff --git a/contracts/scripts/native_solc_compile_all b/contracts/scripts/native_solc_compile_all new file mode 100644 index 00000000..f4cec6ce --- /dev/null +++ b/contracts/scripts/native_solc_compile_all @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling *all* contracts... │" +echo " └──────────────────────────────────────────────┘" + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt + +# 6 and 7 are legacy contracts, for each other product we have a native_solc_compile_all_$product script +# These scripts can be run individually, or all together with this script. +# To add new CL products, simply write a native_solc_compile_all_$product script and add it to the list below. +for product in 6 7 automation events_mock feeds functions keystone llo-feeds logpoller operatorforwarder shared transmission vrf +do + $SCRIPTPATH/native_solc_compile_all_$product +done diff --git a/contracts/scripts/native_solc_compile_all_6 b/contracts/scripts/native_solc_compile_all_6 new file mode 100644 index 00000000..b56df231 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_6 @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling legacy Solidity 0.6 contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.6.6" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v0.6/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.6 \ + "$ROOT"/contracts/src/v0.6/"$1" +} + +compileContract Flags.sol +compileContract Oracle.sol +compileContract FluxAggregator.sol +compileContract VRF.sol +compileContract VRFCoordinator.sol +compileContract tests/VRFRequestIDBaseTestHelper.sol +compileContract tests/VRFTestHelper.sol +compileContract Plugin.sol +compileContract VRFRequestIDBase.sol +compileContract tests/VRFConsumer.sol +compileContract PluginClient.sol +compileContract VRFConsumerBase.sol +compileContract BlockhashStore.sol +compileContract tests/TestAPIConsumer.sol +compileContract tests/MockETHPLIAggregator.sol +compileContract tests/MockGASAggregator.sol \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all_7 b/contracts/scripts/native_solc_compile_all_7 new file mode 100644 index 00000000..fd64d9ff --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_7 @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling legacy Solidity 0.7 contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.7.6" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v0.7/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.7 \ + "$ROOT"/contracts/src/v0.7/"$1" +} + +compileContract tests/MultiWordConsumer.sol +compileContract Operator.sol +compileContract AuthorizedForwarder.sol +compileContract AuthorizedReceiver.sol +compileContract OperatorFactory.sol +compileContract tests/Consumer.sol +compileContract tests/VRFCoordinatorMock.sol + +# Keeper/Automation +compileContract KeeperRegistry1_1.sol +compileContract KeeperRegistry1_1Mock.sol +compileContract UpkeepRegistrationRequests.sol +compileContract tests/UpkeepPerformCounterRestrictive.sol +compileContract tests/UpkeepCounter.sol \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all_automation b/contracts/scripts/native_solc_compile_all_automation new file mode 100644 index 00000000..6194d3cb --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_automation @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Automation contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.6" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContract automation/upkeeps/CronUpkeepFactory.sol +compileContract automation/v1_2/KeeperRegistrar1_2.sol +compileContract automation/v1_2/KeeperRegistry1_2.sol +compileContract automation/v1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.sol +compileContract automation/v1_3/KeeperRegistry1_3.sol +compileContract automation/v1_3/KeeperRegistryLogic1_3.sol +compileContract automation/v2_0/KeeperRegistrar2_0.sol +compileContract automation/v2_0/KeeperRegistry2_0.sol +compileContract automation/v2_0/KeeperRegistryLogic2_0.sol +compileContract automation/UpkeepTranscoder.sol +compileContract automation/mocks/MockAggregatorProxy.sol +compileContract automation/testhelpers/LogUpkeepCounter.sol +compileContract automation/testhelpers/SimpleLogUpkeepCounter.sol + +compileContract automation/mocks/KeeperRegistrar1_2Mock.sol +compileContract automation/mocks/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.sol + +SOLC_VERSION="0.8.16" + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +# v0.8.16 +compileContract automation/v2_1/AutomationRegistrar2_1.sol +compileContract automation/v2_1/KeeperRegistry2_1.sol +compileContract automation/v2_1/KeeperRegistryLogicA2_1.sol +compileContract automation/v2_1/KeeperRegistryLogicB2_1.sol +compileContract automation/v2_1/AutomationUtils2_1.sol +compileContract automation/interfaces/v2_1/IKeeperRegistryMaster.sol + +compileContract automation/interfaces/ILogAutomation.sol +compileContract automation/AutomationForwarderLogic.sol +compileContract automation/testhelpers/LogTriggeredStreamsLookup.sol +compileContract automation/testhelpers/DummyProtocol.sol + +compileContract automation/testhelpers/KeeperConsumer.sol +compileContract automation/testhelpers/KeeperConsumerPerformance.sol +compileContract automation/testhelpers/PerformDataChecker.sol +compileContract automation/testhelpers/UpkeepPerformCounterRestrictive.sol +compileContract automation/testhelpers/UpkeepCounter.sol + +compileContract automation/interfaces/StreamsLookupCompatibleInterface.sol + +compileContract tests/VerifiableLoadUpkeep.sol +compileContract tests/VerifiableLoadStreamsLookupUpkeep.sol +compileContract tests/VerifiableLoadLogTriggerUpkeep.sol +compileContract tests/AutomationConsumerBenchmark.sol +compileContract tests/StreamsLookupUpkeep.sol + +SOLC_VERSION="0.8.19" + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +# v0.8.19 +compileContract automation/dev/v2_2/AutomationRegistrar2_2.sol +compileContract automation/dev/v2_2/AutomationRegistry2_2.sol +compileContract automation/dev/v2_2/AutomationRegistryLogicA2_2.sol +compileContract automation/dev/v2_2/AutomationRegistryLogicB2_2.sol +compileContract automation/dev/v2_2/AutomationUtils2_2.sol +compileContract automation/dev/interfaces/v2_2/IAutomationRegistryMaster.sol \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all_events_mock b/contracts/scripts/native_solc_compile_all_events_mock new file mode 100644 index 00000000..68e8bdfa --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_events_mock @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Events Mock contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.6" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# This script is used to compile the contracts for the Events Mocks used in the tests. +compileContract mocks/FunctionsOracleEventsMock.sol +compileContract mocks/FunctionsBillingRegistryEventsMock.sol \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all_feeds b/contracts/scripts/native_solc_compile_all_feeds new file mode 100644 index 00000000..2dbff2d2 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_feeds @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Feeds contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.6" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# Aggregators +compileContract shared/interfaces/AggregatorV2V3Interface.sol +compileContract Plugin.sol +compileContract PluginClient.sol diff --git a/contracts/scripts/native_solc_compile_all_functions b/contracts/scripts/native_solc_compile_all_functions new file mode 100644 index 00000000..26e4c0c6 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_functions @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Functions contracts... │" +echo " └──────────────────────────────────────────────┘" + +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../ && pwd -P )" +python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt + +compileContract () { + solc @openzeppelin/=$ROOT/node_modules/@openzeppelin/ \ + --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o $ROOT/solc/v$SOLC_VERSION/functions/$1 \ + --abi --bin \ + --allow-paths $ROOT/src/v0.8,$ROOT/node_modules \ + $ROOT/src/v0.8/functions/$2 +} + +############################ +# Version 1 (Mainnet Preview) +############################ + +SOLC_VERSION="0.8.19" +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract v1_X dev/v1_X/libraries/FunctionsRequest.sol +compileContract v1_X dev/v1_X/FunctionsRouter.sol +compileContract v1_X dev/v1_X/FunctionsCoordinator.sol +compileContract v1_X dev/v1_X/accessControl/TermsOfServiceAllowList.sol +compileContract v1_X dev/v1_X/example/FunctionsClientExample.sol + +# Test helpers +compileContract v1_X tests/v1_X/testhelpers/FunctionsCoordinatorTestHelper.sol +compileContract v1_X tests/v1_X/testhelpers/FunctionsLoadTestClient.sol + +# Mocks +compileContract v1_X dev/v1_X/mocks/FunctionsV1EventsMock.sol diff --git a/contracts/scripts/native_solc_compile_all_keystone b/contracts/scripts/native_solc_compile_all_keystone new file mode 100644 index 00000000..3f4d33d6 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_keystone @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Keystone contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContract keystone/KeystoneForwarder.sol diff --git a/contracts/scripts/native_solc_compile_all_l2ep b/contracts/scripts/native_solc_compile_all_l2ep new file mode 100644 index 00000000..1b9f5fb6 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_l2ep @@ -0,0 +1,70 @@ +#!/usr/bin/env bash + +########### +# Logging # +########### + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling L2EP contracts... │" +echo " └──────────────────────────────────────────────┘" + +###################### +# Helper Variable(s) # +###################### + +export SOLC_VERSION="0.8.19" + +SCRIPTPATH="$( + cd "$(dirname "$0")" >/dev/null 2>&1 + pwd -P +)" + +ROOT="$( + cd "$(dirname "$0")" >/dev/null 2>&1 + cd ../ && pwd -P +)" + +###################### +# Helper Function(s) # +###################### + +compileContract() { + local optimize_runs=1000000 + local version="$1" + local srcpath="$2" + solc \ + @openzeppelin/=$ROOT/node_modules/@openzeppelin/ \ + @eth-optimism/=$ROOT/node_modules/@eth-optimism/ \ + @scroll-tech/=$ROOT/node_modules/@scroll-tech/ \ + --overwrite --optimize --optimize-runs $optimize_runs --metadata-hash none \ + -o $ROOT/solc/v$SOLC_VERSION/l2ep/"$version" \ + --abi --bin \ + --allow-paths $ROOT/src/v0.8,$ROOT/node_modules \ + $ROOT/src/v0.8/l2ep/"$srcpath" +} + +################# +# Version 1.0.0 # +################# + +python3 -m pip install --require-hashes -r $SCRIPTPATH/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION + +compileContract v1_0_0 dev/arbitrum/ArbitrumValidator.sol +compileContract v1_0_0 dev/arbitrum/ArbitrumSequencerUptimeFeed.sol +compileContract v1_0_0 dev/arbitrum/ArbitrumCrossDomainForwarder.sol +compileContract v1_0_0 dev/arbitrum/ArbitrumCrossDomainGovernor.sol + +compileContract v1_0_0 dev/optimism/OptimismValidator.sol +compileContract v1_0_0 dev/optimism/OptimismSequencerUptimeFeed.sol +compileContract v1_0_0 dev/optimism/OptimismCrossDomainForwarder.sol +compileContract v1_0_0 dev/optimism/OptimismCrossDomainGovernor.sol + +compileContract v1_0_0 dev/scroll/ScrollValidator.sol +compileContract v1_0_0 dev/scroll/ScrollSequencerUptimeFeed.sol +compileContract v1_0_0 dev/scroll/ScrollCrossDomainForwarder.sol +compileContract v1_0_0 dev/scroll/ScrollCrossDomainGovernor.sol diff --git a/contracts/scripts/native_solc_compile_all_llo-feeds b/contracts/scripts/native_solc_compile_all_llo-feeds new file mode 100644 index 00000000..eb17f93b --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_llo-feeds @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Low Latency Oracle contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContract llo-feeds/Verifier.sol +compileContract llo-feeds/VerifierProxy.sol +compileContract llo-feeds/FeeManager.sol +compileContract llo-feeds/RewardManager.sol + + +# Test | Mocks +compileContract llo-feeds/test/mocks/ErroredVerifier.sol +compileContract llo-feeds/test/mocks/ExposedVerifier.sol + +# LLO +compileContract llo-feeds/dev/ChannelConfigStore.sol +compileContract llo-feeds/dev/ChannelVerifier.sol +compileContract llo-feeds/dev/test/mocks/ExposedChannelVerifier.sol diff --git a/contracts/scripts/native_solc_compile_all_logpoller b/contracts/scripts/native_solc_compile_all_logpoller new file mode 100644 index 00000000..e8ea2a2b --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_logpoller @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling LogPoller contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + + +compileContract tests/LogEmitter.sol +compileContract tests/VRFLogEmitter.sol \ No newline at end of file diff --git a/contracts/scripts/native_solc_compile_all_ocr2vrf b/contracts/scripts/native_solc_compile_all_ocr2vrf new file mode 100644 index 00000000..e03be65e --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_ocr2vrf @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling OCR2 VRF contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 +# The VRF contracts are not contained in the `plugin` repository. +# Change me. +FOLDER="ocr2vrf-origin" + +echo "Compiling OCR2VRF contracts..." + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs "$2" --metadata-hash none \ + -o "$ROOT"/contracts/solc/v0.8.19/"$contract" \ + --abi --bin \ + --allow-paths "$ROOT"/../$FOLDER/contracts \ + "$ROOT"/"$1" +} + +# OCR2VRF +compileContract ../$FOLDER/contracts/DKG.sol $OPTIMIZE_RUNS +compileContract ../$FOLDER/contracts/VRFBeacon.sol $OPTIMIZE_RUNS +compileContract ../$FOLDER/contracts/VRFCoordinator.sol 1 +compileContract ../$FOLDER/contracts/test/TestBeaconVRFConsumer.sol $OPTIMIZE_RUNS +compileContract ../$FOLDER/contracts/test/LoadTestBeaconVRFConsumer.sol $OPTIMIZE_RUNS diff --git a/contracts/scripts/native_solc_compile_all_operatorforwarder b/contracts/scripts/native_solc_compile_all_operatorforwarder new file mode 100644 index 00000000..2d455994 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_operatorforwarder @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Operator Forwarder contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# Contracts +compileContract operatorforwarder/dev/AuthorizedForwarder.sol +compileContract operatorforwarder/dev/AuthorizedReceiver.sol +compileContract operatorforwarder/dev/Operator.sol +compileContract operatorforwarder/dev/OperatorFactory.sol + diff --git a/contracts/scripts/native_solc_compile_all_shared b/contracts/scripts/native_solc_compile_all_shared new file mode 100644 index 00000000..eeaa9902 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_shared @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling shared contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.19" +OPTIMIZE_RUNS=1000000 + + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContract shared/token/ERC677/BurnMintERC677.sol +compileContract shared/token/ERC677/LinkToken.sol +compileContract shared/mocks/WERC20Mock.sol +compileContract vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol +compileContract shared/test/helpers/ChainReaderTestContract.sol diff --git a/contracts/scripts/native_solc_compile_all_transmission b/contracts/scripts/native_solc_compile_all_transmission new file mode 100644 index 00000000..281fa7ae --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_transmission @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling Transmission contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.15" +OPTIMIZE_RUNS=1000000 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# Contracts +compileContract transmission/dev/ERC-4337/SCA.sol +compileContract transmission/dev/ERC-4337/Paymaster.sol +compileContract transmission/dev/ERC-4337/SmartContractAccountFactory.sol + +# Testhelpers +compileContract transmission/dev/testhelpers/SmartContractAccountHelper.sol +compileContract transmission/dev/testhelpers/Greeter.sol + +# Vendor +compileContract vendor/entrypoint/core/EntryPoint.sol diff --git a/contracts/scripts/native_solc_compile_all_vrf b/contracts/scripts/native_solc_compile_all_vrf new file mode 100644 index 00000000..eda3b991 --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_vrf @@ -0,0 +1,110 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling VRF contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.6" +OPTIMIZE_RUNS=1000000 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContractAltOpts () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs "$2" --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# VRF +compileContract vrf/VRFRequestIDBase.sol +compileContract vrf/VRFConsumerBase.sol +compileContract vrf/testhelpers/VRFConsumer.sol +compileContract vrf/testhelpers/VRFRequestIDBaseTestHelper.sol +compileContract vrf/mocks/VRFCoordinatorMock.sol + +# VRF V2 +compileContract vrf/VRFConsumerBaseV2.sol +compileContract vrf/testhelpers/ChainSpecificUtilHelper.sol +compileContract vrf/testhelpers/VRFConsumerV2.sol +compileContract vrf/testhelpers/VRFMaliciousConsumerV2.sol +compileContract vrf/testhelpers/VRFTestHelper.sol +compileContract vrf/testhelpers/VRFV2RevertingExample.sol +compileContract vrf/testhelpers/VRFV2ProxyAdmin.sol +compileContract vrf/testhelpers/VRFV2TransparentUpgradeableProxy.sol +compileContract vrf/testhelpers/VRFConsumerV2UpgradeableExample.sol +compileContract vrf/BatchBlockhashStore.sol +compileContract vrf/BatchVRFCoordinatorV2.sol +compileContract vrf/testhelpers/VRFCoordinatorV2TestHelper.sol +compileContractAltOpts vrf/VRFCoordinatorV2.sol 10000 +compileContract vrf/mocks/VRFCoordinatorV2Mock.sol +compileContract vrf/VRFOwner.sol +compileContract vrf/dev/VRFSubscriptionBalanceMonitor.sol +compileContract vrf/KeepersVRFConsumer.sol + +# VRF V2Plus +compileContract vrf/dev/interfaces/IVRFCoordinatorV2PlusInternal.sol +compileContract vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol +compileContractAltOpts vrf/dev/VRFCoordinatorV2_5.sol 50 +compileContract vrf/dev/BatchVRFCoordinatorV2Plus.sol +compileContract vrf/dev/VRFV2PlusWrapper.sol +compileContract vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol +compileContract vrf/dev/testhelpers/VRFMaliciousConsumerV2Plus.sol +compileContract vrf/dev/testhelpers/VRFV2PlusExternalSubOwnerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusSingleConsumerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusRevertingExample.sol +compileContract vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol +compileContract vrf/dev/libraries/VRFV2PlusClient.sol +compileContract vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol +compileContract vrf/dev/BlockhashStore.sol +compileContract vrf/dev/TrustedBlockhashStore.sol +compileContract vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol +compileContractAltOpts vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol 5 +compileContract vrf/dev/testhelpers/VRFV2PlusWrapperLoadTestConsumer.sol + +# VRF V2 Wrapper +compileContract vrf/VRFV2Wrapper.sol +compileContract vrf/interfaces/VRFV2WrapperInterface.sol +compileContract vrf/VRFV2WrapperConsumerBase.sol +compileContract vrf/testhelpers/VRFV2WrapperConsumerExample.sol +compileContract vrf/testhelpers/VRFV2WrapperLoadTestConsumer.sol +compileContract vrf/testhelpers/VRFv2Consumer.sol + +# VRF Consumers and Mocks +compileContract vrf/testhelpers/VRFExternalSubOwnerExample.sol +compileContract vrf/testhelpers/VRFSingleConsumerExample.sol +compileContract vrf/testhelpers/VRFOwnerlessConsumerExample.sol +compileContract vrf/testhelpers/VRFLoadTestOwnerlessConsumer.sol +compileContract vrf/testhelpers/VRFLoadTestExternalSubOwner.sol +compileContract vrf/testhelpers/VRFV2LoadTestWithMetrics.sol +compileContract vrf/testhelpers/VRFV2OwnerTestConsumer.sol +compileContractAltOpts vrf/testhelpers/VRFCoordinatorTestV2.sol 10000 +compileContract vrf/testhelpers/VRFMockETHPLIAggregator.sol + +# Helper contracts +compileContract vrf/interfaces/IAuthorizedReceiver.sol +compileContract vrf/interfaces/VRFCoordinatorV2Interface.sol +compileContract vrf/interfaces/VRFV2WrapperInterface.sol diff --git a/contracts/scripts/native_solc_compile_all_vrfv2plus b/contracts/scripts/native_solc_compile_all_vrfv2plus new file mode 100644 index 00000000..f25d851b --- /dev/null +++ b/contracts/scripts/native_solc_compile_all_vrfv2plus @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -e + +echo " ┌──────────────────────────────────────────────┐" +echo " │ Compiling VRF contracts... │" +echo " └──────────────────────────────────────────────┘" + +SOLC_VERSION="0.8.6" +OPTIMIZE_RUNS=1000000 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd ../../ && pwd -P )" +python3 -m pip install --require-hashes -r "$SCRIPTPATH"/requirements.txt + +solc-select install $SOLC_VERSION +solc-select use $SOLC_VERSION +export SOLC_VERSION=$SOLC_VERSION + +compileContract () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs $OPTIMIZE_RUNS --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +compileContractAltOpts () { + local contract + contract=$(basename "$1" ".sol") + + solc @openzeppelin/="$ROOT"/contracts/node_modules/@openzeppelin/ --overwrite --optimize --optimize-runs "$2" --metadata-hash none \ + -o "$ROOT"/contracts/solc/v$SOLC_VERSION/"$contract" \ + --abi --bin --allow-paths "$ROOT"/contracts/src/v0.8,"$ROOT"/contracts/node_modules\ + "$ROOT"/contracts/src/v0.8/"$1" +} + +# VRF +compileContract vrf/VRFRequestIDBase.sol +compileContract vrf/VRFConsumerBase.sol +compileContract vrf/testhelpers/VRFConsumer.sol +compileContract vrf/testhelpers/VRFRequestIDBaseTestHelper.sol +compileContract vrf/mocks/VRFCoordinatorMock.sol + +# VRF V2Plus +compileContract vrf/dev/interfaces/IVRFCoordinatorV2PlusInternal.sol +compileContract vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol +compileContractAltOpts vrf/dev/VRFCoordinatorV2_5.sol 50 +compileContract vrf/dev/BatchVRFCoordinatorV2Plus.sol +compileContract vrf/dev/VRFV2PlusWrapper.sol +compileContract vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol +compileContract vrf/dev/testhelpers/VRFMaliciousConsumerV2Plus.sol +compileContract vrf/dev/testhelpers/VRFV2PlusExternalSubOwnerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusSingleConsumerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusRevertingExample.sol +compileContract vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol +compileContract vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol +compileContract vrf/dev/libraries/VRFV2PlusClient.sol +compileContract vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol +compileContract vrf/dev/BlockhashStore.sol +compileContract vrf/dev/TrustedBlockhashStore.sol +compileContract vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol +compileContractAltOpts vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol 5 +compileContract vrf/dev/testhelpers/VRFV2PlusWrapperLoadTestConsumer.sol diff --git a/contracts/scripts/prepublish_generate_abi_folder b/contracts/scripts/prepublish_generate_abi_folder new file mode 100644 index 00000000..36e58faf --- /dev/null +++ b/contracts/scripts/prepublish_generate_abi_folder @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# IMPORTANT: Ensure hardhat-abi-exporter package is installed and `hardhat compile` is run before running this script. +# +# This script copies abi json files generated by hardhat-abi-exporter from `../abi/src/v0.x/` to `../abi/v0.x/` +# then converts them from .json to .abi + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; cd .. && pwd -P )" +TARGET="abi" + +# For each version directory in src, copy the json files to the correct location +versions=( v0.4 v0.5 v0.6 v0.7 v0.8 ) +for version in "${versions[@]}" +do + rm -rf $SCRIPTPATH/$TARGET/$version + mkdir $SCRIPTPATH/$TARGET/$version + find $SCRIPTPATH/$TARGET/src/$version -type f -name "*.json" -exec cp {} $SCRIPTPATH/$TARGET/$version/ \; +done + +# Remove the original src abis +rm -rf $SCRIPTPATH/$TARGET/src diff --git a/contracts/scripts/requirements.txt b/contracts/scripts/requirements.txt new file mode 100644 index 00000000..50cda389 --- /dev/null +++ b/contracts/scripts/requirements.txt @@ -0,0 +1,3 @@ +solc-select==1.0.4 --hash=sha256:db7b9de009af6de3a5416b80bbe5b6d636bf314703c016319b8c1231e248a6c7 +pycryptodome==3.18.0 --hash=sha256:c9adee653fc882d98956e33ca2c1fb582e23a8af7ac82fee75bd6113c55a0413 +packaging==23.1 --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f \ No newline at end of file diff --git a/contracts/src/v0.4/Aggregator.sol b/contracts/src/v0.4/Aggregator.sol new file mode 100644 index 00000000..ec620f67 --- /dev/null +++ b/contracts/src/v0.4/Aggregator.sol @@ -0,0 +1,426 @@ +pragma solidity 0.4.24; + +import "./PluginClient.sol"; +import "./interfaces/AggregatorInterface.sol"; +import "./vendor/SignedSafeMath.sol"; +import "./vendor/Ownable.sol"; +import "./vendor/SafeMathPlugin.sol"; + +/** + * @title An example Plugin contract with aggregation + * @notice Requesters can use this contract as a framework for creating + * requests to multiple Plugin nodes and running aggregation + * as the contract receives answers. + */ +contract Aggregator is AggregatorInterface, PluginClient, Ownable { + using SafeMathPlugin for uint256; + using SignedSafeMath for int256; + + struct Answer { + uint128 minimumResponses; + uint128 maxResponses; + int256[] responses; + } + + event ResponseReceived(int256 indexed response, uint256 indexed answerId, address indexed sender); + + int256 private currentAnswerValue; + uint256 private updatedTimestampValue; + uint256 private latestCompletedAnswer; + uint128 public paymentAmount; + uint128 public minimumResponses; + bytes32[] public jobIds; + address[] public oracles; + + uint256 private answerCounter = 1; + mapping(address => bool) public authorizedRequesters; + mapping(bytes32 => uint256) private requestAnswers; + mapping(uint256 => Answer) private answers; + mapping(uint256 => int256) private currentAnswers; + mapping(uint256 => uint256) private updatedTimestamps; + + uint256 constant private MAX_ORACLE_COUNT = 28; + + /** + * @notice Deploy with the address of the PLI token and arrays of matching + * length containing the addresses of the oracles and their corresponding + * Job IDs. + * @dev Sets the LinkToken address for the network, addresses of the oracles, + * and jobIds in storage. + * @param _link The address of the PLI token + * @param _paymentAmount the amount of PLI to be sent to each oracle for each request + * @param _minimumResponses the minimum number of responses + * before an answer will be calculated + * @param _oracles An array of oracle addresses + * @param _jobIds An array of Job IDs + */ + constructor( + address _link, + uint128 _paymentAmount, + uint128 _minimumResponses, + address[] _oracles, + bytes32[] _jobIds + ) public Ownable() { + setPluginToken(_link); + updateRequestDetails(_paymentAmount, _minimumResponses, _oracles, _jobIds); + } + + /** + * @notice Creates a Plugin request for each oracle in the oracles array. + * @dev This example does not include request parameters. Reference any documentation + * associated with the Job IDs used to determine the required parameters per-request. + */ + function requestRateUpdate() + external + ensureAuthorizedRequester() + { + Plugin.Request memory request; + bytes32 requestId; + uint256 oraclePayment = paymentAmount; + + for (uint i = 0; i < oracles.length; i++) { + request = buildPluginRequest(jobIds[i], this, this.pluginCallback.selector); + requestId = sendPluginRequestTo(oracles[i], request, oraclePayment); + requestAnswers[requestId] = answerCounter; + } + answers[answerCounter].minimumResponses = minimumResponses; + answers[answerCounter].maxResponses = uint128(oracles.length); + + emit NewRound(answerCounter, msg.sender, block.timestamp); + + answerCounter = answerCounter.add(1); + } + + /** + * @notice Receives the answer from the Plugin node. + * @dev This function can only be called by the oracle that received the request. + * @param _clRequestId The Plugin request ID associated with the answer + * @param _response The answer provided by the Plugin node + */ + function pluginCallback(bytes32 _clRequestId, int256 _response) + external + { + validatePluginCallback(_clRequestId); + + uint256 answerId = requestAnswers[_clRequestId]; + delete requestAnswers[_clRequestId]; + + answers[answerId].responses.push(_response); + emit ResponseReceived(_response, answerId, msg.sender); + updateLatestAnswer(answerId); + deleteAnswer(answerId); + } + + /** + * @notice Updates the arrays of oracles and jobIds with new values, + * overwriting the old values. + * @dev Arrays are validated to be equal length. + * @param _paymentAmount the amount of PLI to be sent to each oracle for each request + * @param _minimumResponses the minimum number of responses + * before an answer will be calculated + * @param _oracles An array of oracle addresses + * @param _jobIds An array of Job IDs + */ + function updateRequestDetails( + uint128 _paymentAmount, + uint128 _minimumResponses, + address[] _oracles, + bytes32[] _jobIds + ) + public + onlyOwner() + validateAnswerRequirements(_minimumResponses, _oracles, _jobIds) + { + paymentAmount = _paymentAmount; + minimumResponses = _minimumResponses; + jobIds = _jobIds; + oracles = _oracles; + } + + /** + * @notice Allows the owner of the contract to withdraw any PLI balance + * available on the contract. + * @dev The contract will need to have a PLI balance in order to create requests. + * @param _recipient The address to receive the PLI tokens + * @param _amount The amount of PLI to send from the contract + */ + function transferPLI(address _recipient, uint256 _amount) + public + onlyOwner() + { + LinkTokenInterface linkToken = LinkTokenInterface(pluginTokenAddress()); + require(linkToken.transfer(_recipient, _amount), "PLI transfer failed"); + } + + /** + * @notice Called by the owner to permission other addresses to generate new + * requests to oracles. + * @param _requester the address whose permissions are being set + * @param _allowed boolean that determines whether the requester is + * permissioned or not + */ + function setAuthorization(address _requester, bool _allowed) + external + onlyOwner() + { + authorizedRequesters[_requester] = _allowed; + } + + /** + * @notice Cancels an outstanding Plugin request. + * The oracle contract requires the request ID and additional metadata to + * validate the cancellation. Only old answers can be cancelled. + * @param _requestId is the identifier for the plugin request being cancelled + * @param _payment is the amount of PLI paid to the oracle for the request + * @param _expiration is the time when the request expires + */ + function cancelRequest( + bytes32 _requestId, + uint256 _payment, + uint256 _expiration + ) + external + ensureAuthorizedRequester() + { + uint256 answerId = requestAnswers[_requestId]; + require(answerId < latestCompletedAnswer, "Cannot modify an in-progress answer"); + + delete requestAnswers[_requestId]; + answers[answerId].responses.push(0); + deleteAnswer(answerId); + + cancelPluginRequest( + _requestId, + _payment, + this.pluginCallback.selector, + _expiration + ); + } + + /** + * @notice Called by the owner to kill the contract. This transfers all PLI + * balance and ETH balance (if there is any) to the owner. + */ + function destroy() + external + onlyOwner() + { + LinkTokenInterface linkToken = LinkTokenInterface(pluginTokenAddress()); + transferPLI(owner, linkToken.balanceOf(address(this))); + selfdestruct(owner); + } + + /** + * @dev Performs aggregation of the answers received from the Plugin nodes. + * Assumes that at least half the oracles are honest and so can't control the + * middle of the ordered responses. + * @param _answerId The answer ID associated with the group of requests + */ + function updateLatestAnswer(uint256 _answerId) + private + ensureMinResponsesReceived(_answerId) + ensureOnlyLatestAnswer(_answerId) + { + uint256 responseLength = answers[_answerId].responses.length; + uint256 middleIndex = responseLength.div(2); + int256 currentAnswerTemp; + if (responseLength % 2 == 0) { + int256 median1 = quickselect(answers[_answerId].responses, middleIndex); + int256 median2 = quickselect(answers[_answerId].responses, middleIndex.add(1)); // quickselect is 1 indexed + currentAnswerTemp = median1.add(median2) / 2; // signed integers are not supported by SafeMath + } else { + currentAnswerTemp = quickselect(answers[_answerId].responses, middleIndex.add(1)); // quickselect is 1 indexed + } + currentAnswerValue = currentAnswerTemp; + latestCompletedAnswer = _answerId; + updatedTimestampValue = now; + updatedTimestamps[_answerId] = now; + currentAnswers[_answerId] = currentAnswerTemp; + emit AnswerUpdated(currentAnswerTemp, _answerId, now); + } + + /** + * @notice get the most recently reported answer + */ + function latestAnswer() + external + view + returns (int256) + { + return currentAnswers[latestCompletedAnswer]; + } + + /** + * @notice get the last updated at block timestamp + */ + function latestTimestamp() + external + view + returns (uint256) + { + return updatedTimestamps[latestCompletedAnswer]; + } + + /** + * @notice get past rounds answers + * @param _roundId the answer number to retrieve the answer for + */ + function getAnswer(uint256 _roundId) + external + view + returns (int256) + { + return currentAnswers[_roundId]; + } + + /** + * @notice get block timestamp when an answer was last updated + * @param _roundId the answer number to retrieve the updated timestamp for + */ + function getTimestamp(uint256 _roundId) + external + view + returns (uint256) + { + return updatedTimestamps[_roundId]; + } + + /** + * @notice get the latest completed round where the answer was updated + */ + function latestRound() + external + view + returns (uint256) + { + return latestCompletedAnswer; + } + + /** + * @dev Returns the kth value of the ordered array + * See: http://www.cs.yale.edu/homes/aspnes/pinewiki/QuickSelect.html + * @param _a The list of elements to pull from + * @param _k The index, 1 based, of the elements you want to pull from when ordered + */ + function quickselect(int256[] memory _a, uint256 _k) + private + pure + returns (int256) + { + int256[] memory a = _a; + uint256 k = _k; + uint256 aLen = a.length; + int256[] memory a1 = new int256[](aLen); + int256[] memory a2 = new int256[](aLen); + uint256 a1Len; + uint256 a2Len; + int256 pivot; + uint256 i; + + while (true) { + pivot = a[aLen.div(2)]; + a1Len = 0; + a2Len = 0; + for (i = 0; i < aLen; i++) { + if (a[i] < pivot) { + a1[a1Len] = a[i]; + a1Len++; + } else if (a[i] > pivot) { + a2[a2Len] = a[i]; + a2Len++; + } + } + if (k <= a1Len) { + aLen = a1Len; + (a, a1) = swap(a, a1); + } else if (k > (aLen.sub(a2Len))) { + k = k.sub(aLen.sub(a2Len)); + aLen = a2Len; + (a, a2) = swap(a, a2); + } else { + return pivot; + } + } + } + + /** + * @dev Swaps the pointers to two uint256 arrays in memory + * @param _a The pointer to the first in memory array + * @param _b The pointer to the second in memory array + */ + function swap(int256[] memory _a, int256[] memory _b) + private + pure + returns(int256[] memory, int256[] memory) + { + return (_b, _a); + } + + /** + * @dev Cleans up the answer record if all responses have been received. + * @param _answerId The identifier of the answer to be deleted + */ + function deleteAnswer(uint256 _answerId) + private + ensureAllResponsesReceived(_answerId) + { + delete answers[_answerId]; + } + + /** + * @dev Prevents taking an action if the minimum number of responses has not + * been received for an answer. + * @param _answerId The the identifier of the answer that keeps track of the responses. + */ + modifier ensureMinResponsesReceived(uint256 _answerId) { + if (answers[_answerId].responses.length >= answers[_answerId].minimumResponses) { + _; + } + } + + /** + * @dev Prevents taking an action if not all responses are received for an answer. + * @param _answerId The the identifier of the answer that keeps track of the responses. + */ + modifier ensureAllResponsesReceived(uint256 _answerId) { + if (answers[_answerId].responses.length == answers[_answerId].maxResponses) { + _; + } + } + + /** + * @dev Prevents taking an action if a newer answer has been recorded. + * @param _answerId The current answer's identifier. + * Answer IDs are in ascending order. + */ + modifier ensureOnlyLatestAnswer(uint256 _answerId) { + if (latestCompletedAnswer <= _answerId) { + _; + } + } + + /** + * @dev Ensures corresponding number of oracles and jobs. + * @param _oracles The list of oracles. + * @param _jobIds The list of jobs. + */ + modifier validateAnswerRequirements( + uint256 _minimumResponses, + address[] _oracles, + bytes32[] _jobIds + ) { + require(_oracles.length <= MAX_ORACLE_COUNT, "cannot have more than 45 oracles"); + require(_oracles.length >= _minimumResponses, "must have at least as many oracles as responses"); + require(_oracles.length == _jobIds.length, "must have exactly as many oracles as job IDs"); + _; + } + + /** + * @dev Reverts if `msg.sender` is not authorized to make requests. + */ + modifier ensureAuthorizedRequester() { + require(authorizedRequesters[msg.sender] || msg.sender == owner, "Not an authorized address for creating requests"); + _; + } + +} diff --git a/contracts/src/v0.4/ERC677Token.sol b/contracts/src/v0.4/ERC677Token.sol new file mode 100644 index 00000000..d78ca1f5 --- /dev/null +++ b/contracts/src/v0.4/ERC677Token.sol @@ -0,0 +1,47 @@ +pragma solidity ^0.4.11; + + +import "./interfaces/ERC677.sol"; +import "./interfaces/ERC677Receiver.sol"; + + +contract ERC677Token is ERC677 { + + /** + * @dev transfer token to a contract address with additional data if the recipient is a contact. + * @param _to The address to transfer to. + * @param _value The amount to be transferred. + * @param _data The extra data to be passed to the receiving contract. + */ + function transferAndCall(address _to, uint _value, bytes _data) + public + returns (bool success) + { + super.transfer(_to, _value); + Transfer(msg.sender, _to, _value, _data); + if (isContract(_to)) { + contractFallback(_to, _value, _data); + } + return true; + } + + + // PRIVATE + + function contractFallback(address _to, uint _value, bytes _data) + private + { + ERC677Receiver receiver = ERC677Receiver(_to); + receiver.onTokenTransfer(msg.sender, _value, _data); + } + + function isContract(address _addr) + private + returns (bool hasCode) + { + uint length; + assembly { length := extcodesize(_addr) } + return length > 0; + } + +} diff --git a/contracts/src/v0.4/LinkToken.sol b/contracts/src/v0.4/LinkToken.sol new file mode 100644 index 00000000..9a5582e9 --- /dev/null +++ b/contracts/src/v0.4/LinkToken.sol @@ -0,0 +1,83 @@ +pragma solidity ^0.4.11; + + +import "./ERC677Token.sol"; +import { StandardToken as linkStandardToken } from "./vendor/StandardToken.sol"; + + +contract LinkToken is linkStandardToken, ERC677Token { + + uint public constant totalSupply = 10**27; + string public constant name = "Plugin Token"; + uint8 public constant decimals = 18; + string public constant symbol = "PLI"; + + function LinkToken() + public + { + balances[msg.sender] = totalSupply; + } + + /** + * @dev transfer token to a specified address with additional data if the recipient is a contract. + * @param _to The address to transfer to. + * @param _value The amount to be transferred. + * @param _data The extra data to be passed to the receiving contract. + */ + function transferAndCall(address _to, uint _value, bytes _data) + public + validRecipient(_to) + returns (bool success) + { + return super.transferAndCall(_to, _value, _data); + } + + /** + * @dev transfer token to a specified address. + * @param _to The address to transfer to. + * @param _value The amount to be transferred. + */ + function transfer(address _to, uint _value) + public + validRecipient(_to) + returns (bool success) + { + return super.transfer(_to, _value); + } + + /** + * @dev Approve the passed address to spend the specified amount of tokens on behalf of msg.sender. + * @param _spender The address which will spend the funds. + * @param _value The amount of tokens to be spent. + */ + function approve(address _spender, uint256 _value) + public + validRecipient(_spender) + returns (bool) + { + return super.approve(_spender, _value); + } + + /** + * @dev Transfer tokens from one address to another + * @param _from address The address which you want to send tokens from + * @param _to address The address which you want to transfer to + * @param _value uint256 the amount of tokens to be transferred + */ + function transferFrom(address _from, address _to, uint256 _value) + public + validRecipient(_to) + returns (bool) + { + return super.transferFrom(_from, _to, _value); + } + + + // MODIFIERS + + modifier validRecipient(address _recipient) { + require(_recipient != address(0) && _recipient != address(this)); + _; + } + +} diff --git a/contracts/src/v0.4/Migrations.sol b/contracts/src/v0.4/Migrations.sol new file mode 100644 index 00000000..4f01db5c --- /dev/null +++ b/contracts/src/v0.4/Migrations.sol @@ -0,0 +1,23 @@ +pragma solidity ^0.4.24; // solhint-disable-line compiler-fixed + +contract Migrations { + address public owner; + uint public last_completed_migration; + + modifier restricted() { + if (msg.sender == owner) _; + } + + constructor() public { + owner = msg.sender; + } + + function setCompleted(uint completed) public restricted { + last_completed_migration = completed; + } + + function upgrade(address new_address) public restricted { + Migrations upgraded = Migrations(new_address); + upgraded.setCompleted(last_completed_migration); + } +} diff --git a/contracts/src/v0.4/Oracle.sol b/contracts/src/v0.4/Oracle.sol new file mode 100644 index 00000000..f806aee9 --- /dev/null +++ b/contracts/src/v0.4/Oracle.sol @@ -0,0 +1,320 @@ +pragma solidity 0.4.24; + +import "./vendor/Ownable.sol"; +import "./vendor/SafeMathPlugin.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/OracleInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; + +/** + * @title The Plugin Oracle contract + * @notice Node operators can deploy this contract to fulfill requests sent to them + */ +contract Oracle is PluginRequestInterface, OracleInterface, Ownable { + using SafeMathPlugin for uint256; + + uint256 constant public EXPIRY_TIME = 5 minutes; + uint256 constant private MINIMUM_CONSUMER_GAS_LIMIT = 400000; + // We initialize fields to 1 instead of 0 so that the first invocation + // does not cost more gas. + uint256 constant private ONE_FOR_CONSISTENT_GAS_COST = 1; + uint256 constant private SELECTOR_LENGTH = 4; + uint256 constant private EXPECTED_REQUEST_WORDS = 2; + uint256 constant private MINIMUM_REQUEST_LENGTH = SELECTOR_LENGTH + (32 * EXPECTED_REQUEST_WORDS); + + LinkTokenInterface internal LinkToken; + mapping(bytes32 => bytes32) private commitments; + mapping(address => bool) private authorizedNodes; + uint256 private withdrawableTokens = ONE_FOR_CONSISTENT_GAS_COST; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest( + bytes32 indexed requestId + ); + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param _link The address of the PLI token + */ + constructor(address _link) public Ownable() { + LinkToken = LinkTokenInterface(_link); // external but already deployed and unalterable + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @dev The data payload's first 2 words will be overwritten by the `_sender` and `_amount` + * values to ensure correctness. Calls oracleRequest. + * @param _sender Address of the sender + * @param _amount Amount of PLI sent (specified in wei) + * @param _data Payload of the transaction + */ + function onTokenTransfer( + address _sender, + uint256 _amount, + bytes _data + ) + public + onlyPLI + validRequestLength(_data) + permittedFunctionsForPLI(_data) + { + assembly { // solhint-disable-line no-inline-assembly + mstore(add(_data, 36), _sender) // ensure correct sender is passed + mstore(add(_data, 68), _amount) // ensure correct amount is passed + } + // solhint-disable-next-line avoid-low-level-calls + require(address(this).delegatecall(_data), "Unable to create request"); // calls oracleRequest + } + + /** + * @notice Creates the Plugin request + * @dev Stores the hash of the params as the on-chain commitment for the request. + * Emits OracleRequest event for the Plugin node to detect. + * @param _sender The sender of the request + * @param _payment The amount of payment given (specified in wei) + * @param _specId The Job Specification ID + * @param _callbackAddress The callback address for the response + * @param _callbackFunctionId The callback function ID for the response + * @param _nonce The nonce sent by the requester + * @param _dataVersion The specified data version + * @param _data The CBOR payload of the request + */ + function oracleRequest( + address _sender, + uint256 _payment, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _nonce, + uint256 _dataVersion, + bytes _data + ) + external + onlyPLI + checkCallbackAddress(_callbackAddress) + { + bytes32 requestId = keccak256(abi.encodePacked(_sender, _nonce)); + require(commitments[requestId] == 0, "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + uint256 expiration = now.add(EXPIRY_TIME); + + commitments[requestId] = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + expiration + ) + ); + + emit OracleRequest( + _specId, + _sender, + requestId, + _payment, + _callbackAddress, + _callbackFunctionId, + expiration, + _dataVersion, + _data); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param _requestId The fulfillment request ID that must match the requester's + * @param _payment The payment amount that will be released for the oracle (specified in wei) + * @param _callbackAddress The callback address to call for fulfillment + * @param _callbackFunctionId The callback function ID to use for fulfillment + * @param _expiration The expiration that the node should respond by before the requester can cancel + * @param _data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 _requestId, + uint256 _payment, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _expiration, + bytes32 _data + ) + external + onlyAuthorizedNode + isValidRequest(_requestId) + returns (bool) + { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + _expiration + ) + ); + require(commitments[_requestId] == paramsHash, "Params do not match request ID"); + withdrawableTokens = withdrawableTokens.add(_payment); + delete commitments[_requestId]; + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + return _callbackAddress.call(_callbackFunctionId, _requestId, _data); // solhint-disable-line avoid-low-level-calls + } + + /** + * @notice Use this to check if a node is authorized for fulfilling requests + * @param _node The address of the Plugin node + * @return The authorization status of the node + */ + function getAuthorizationStatus(address _node) external view returns (bool) { + return authorizedNodes[_node]; + } + + /** + * @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + * @param _node The address of the Plugin node + * @param _allowed Bool value to determine if the node can fulfill requests + */ + function setFulfillmentPermission(address _node, bool _allowed) external onlyOwner { + authorizedNodes[_node] = _allowed; + } + + /** + * @notice Allows the node operator to withdraw earned PLI to a given address + * @dev The owner of the contract can be another wallet and does not have to be a Plugin node + * @param _recipient The address to send the PLI token to + * @param _amount The amount to send (specified in wei) + */ + function withdraw(address _recipient, uint256 _amount) + external + onlyOwner + hasAvailableFunds(_amount) + { + withdrawableTokens = withdrawableTokens.sub(_amount); + assert(LinkToken.transfer(_recipient, _amount)); + } + + /** + * @notice Displays the amount of PLI that is available for the node operator to withdraw + * @dev We use `ONE_FOR_CONSISTENT_GAS_COST` in place of 0 in storage + * @return The amount of withdrawable PLI on the contract + */ + function withdrawable() external view onlyOwner returns (uint256) { + return withdrawableTokens.sub(ONE_FOR_CONSISTENT_GAS_COST); + } + + /** + * @notice Allows requesters to cancel requests sent to this oracle contract. Will transfer the PLI + * sent for the request back to the requester's address. + * @dev Given params must hash to a commitment stored on the contract in order for the request to be valid + * Emits CancelOracleRequest event. + * @param _requestId The request ID + * @param _payment The amount of payment given (specified in wei) + * @param _callbackFunc The requester's specified callback address + * @param _expiration The time of the expiration for the request + */ + function cancelOracleRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) external { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + msg.sender, + _callbackFunc, + _expiration) + ); + require(paramsHash == commitments[_requestId], "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(_expiration <= now, "Request is not expired"); + + delete commitments[_requestId]; + emit CancelOracleRequest(_requestId); + + assert(LinkToken.transfer(msg.sender, _payment)); + } + + // MODIFIERS + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens >= _amount.add(ONE_FOR_CONSISTENT_GAS_COST), "Amount requested is greater than withdrawable balance"); + _; + } + + /** + * @dev Reverts if request ID does not exist + * @param _requestId The given request ID to check in stored `commitments` + */ + modifier isValidRequest(bytes32 _requestId) { + require(commitments[_requestId] != 0, "Must have a valid requestId"); + _; + } + + /** + * @dev Reverts if `msg.sender` is not authorized to fulfill requests + */ + modifier onlyAuthorizedNode() { + require(authorizedNodes[msg.sender] || msg.sender == owner, "Not an authorized node to fulfill requests"); + _; + } + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == address(LinkToken), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `oracleRequest` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes _data) { + bytes4 funcSelector; + assembly { // solhint-disable-line no-inline-assembly + funcSelector := mload(add(_data, 32)) + } + require(funcSelector == this.oracleRequest.selector, "Must use whitelisted functions"); + _; + } + + /** + * @dev Reverts if the callback address is the PLI token + * @param _to The callback address + */ + modifier checkCallbackAddress(address _to) { + require(_to != address(LinkToken), "Cannot callback to PLI"); + _; + } + + /** + * @dev Reverts if the given payload is less than needed to create a request + * @param _data The request payload + */ + modifier validRequestLength(bytes _data) { + require(_data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); + _; + } + +} diff --git a/contracts/src/v0.4/Plugin.sol b/contracts/src/v0.4/Plugin.sol new file mode 100644 index 00000000..39e06daa --- /dev/null +++ b/contracts/src/v0.4/Plugin.sol @@ -0,0 +1,126 @@ +pragma solidity ^0.4.24; + +import { CBOR as CBOR_Plugin } from "./vendor/CBOR.sol"; +import { Buffer as Buffer_Plugin } from "./vendor/Buffer.sol"; + +/** + * @title Library for common Plugin functions + * @dev Uses imported CBOR library for encoding to buffer + */ +library Plugin { + uint256 internal constant defaultBufferSize = 256; // solhint-disable-line const-name-snakecase + + using CBOR_Plugin for Buffer_Plugin.buffer; + + struct Request { + bytes32 id; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + /** + * @notice Initializes a Plugin request + * @dev Sets the ID, callback address, and callback function signature on the request + * @param self The uninitialized request + * @param _id The Job Specification ID + * @param _callbackAddress The callback address + * @param _callbackFunction The callback function signature + * @return The initialized request + */ + function initialize( + Request memory self, + bytes32 _id, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (Plugin.Request memory) { + Buffer_Plugin.init(self.buf, defaultBufferSize); + self.id = _id; + self.callbackAddress = _callbackAddress; + self.callbackFunctionId = _callbackFunction; + return self; + } + + /** + * @notice Sets the data for the buffer without encoding CBOR on-chain + * @dev CBOR can be closed with curly-brackets {} or they can be left off + * @param self The initialized request + * @param _data The CBOR data + */ + function setBuffer(Request memory self, bytes _data) + internal pure + { + Buffer_Plugin.init(self.buf, _data.length); + Buffer_Plugin.append(self.buf, _data); + } + + /** + * @notice Adds a string value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The string value to add + */ + function add(Request memory self, string _key, string _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeString(_value); + } + + /** + * @notice Adds a bytes value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The bytes value to add + */ + function addBytes(Request memory self, string _key, bytes _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeBytes(_value); + } + + /** + * @notice Adds a int256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The int256 value to add + */ + function addInt(Request memory self, string _key, int256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeInt(_value); + } + + /** + * @notice Adds a uint256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The uint256 value to add + */ + function addUint(Request memory self, string _key, uint256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeUInt(_value); + } + + /** + * @notice Adds an array of strings to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _values The array of string values to add + */ + function addStringArray(Request memory self, string _key, string[] memory _values) + internal pure + { + self.buf.encodeString(_key); + self.buf.startArray(); + for (uint256 i = 0; i < _values.length; i++) { + self.buf.encodeString(_values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.4/PluginClient.sol b/contracts/src/v0.4/PluginClient.sol new file mode 100644 index 00000000..cf9e83e3 --- /dev/null +++ b/contracts/src/v0.4/PluginClient.sol @@ -0,0 +1,260 @@ +pragma solidity ^0.4.24; + +import "./Plugin.sol"; +import "./interfaces/ENSInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/PointerInterface.sol"; +import { ENSResolver as ENSResolver_Plugin } from "./vendor/ENSResolver.sol"; + +/** + * @title The PluginClient contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network + */ +contract PluginClient { + using Plugin for Plugin.Request; + + uint256 constant internal PLI = 10**18; + uint256 constant private AMOUNT_OVERRIDE = 0; + address constant private SENDER_OVERRIDE = 0x0; + uint256 constant private ARGS_VERSION = 1; + bytes32 constant private ENS_TOKEN_SUBNAME = keccak256("link"); + bytes32 constant private ENS_ORACLE_SUBNAME = keccak256("oracle"); + address constant private PLI_TOKEN_POINTER = 0xC89bD4E1632D3A43CB03AAAd5262cbe4038Bc571; + + ENSInterface private ens; + bytes32 private ensNode; + LinkTokenInterface private link; + PluginRequestInterface private oracle; + uint256 private requests = 1; + mapping(bytes32 => address) private pendingRequests; + + event PluginRequested(bytes32 indexed id); + event PluginFulfilled(bytes32 indexed id); + event PluginCancelled(bytes32 indexed id); + + /** + * @notice Creates a request that can hold additional parameters + * @param _specId The Job Specification ID that the request will be created for + * @param _callbackAddress The callback address that the response will be sent to + * @param _callbackFunctionSignature The callback function signature to use for the callback address + * @return A Plugin Request struct in memory + */ + function buildPluginRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + Plugin.Request memory req; + return req.initialize(_specId, _callbackAddress, _callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `pluginRequestTo` with the stored oracle address + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function sendPluginRequest(Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32) + { + return sendPluginRequestTo(oracle, _req, _payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param _oracle The address of the oracle for the request + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function sendPluginRequestTo(address _oracle, Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, requests)); + _req.nonce = requests; + pendingRequests[requestId] = _oracle; + emit PluginRequested(requestId); + require(link.transferAndCall(_oracle, _payment, encodeRequest(_req)), "unable to transferAndCall to oracle"); + requests += 1; + + return requestId; + } + + /** + * @notice Allows a request to be cancelled if it has not been fulfilled + * @dev Requires keeping track of the expiration value emitted from the oracle contract. + * Deletes the request from the `pendingRequests` mapping. + * Emits PluginCancelled event. + * @param _requestId The request ID + * @param _payment The amount of PLI sent for the request + * @param _callbackFunc The callback function specified for the request + * @param _expiration The time of the expiration for the request + */ + function cancelPluginRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) + internal + { + PluginRequestInterface requested = PluginRequestInterface(pendingRequests[_requestId]); + delete pendingRequests[_requestId]; + emit PluginCancelled(_requestId); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunc, _expiration); + } + + /** + * @notice Sets the stored oracle address + * @param _oracle The address of the oracle contract + */ + function setPluginOracle(address _oracle) internal { + oracle = PluginRequestInterface(_oracle); + } + + /** + * @notice Sets the PLI token address + * @param _link The address of the PLI token contract + */ + function setPluginToken(address _link) internal { + link = LinkTokenInterface(_link); + } + + /** + * @notice Sets the Plugin token address for the public + * network as given by the Pointer contract + */ + function setPublicPluginToken() internal { + setPluginToken(PointerInterface(PLI_TOKEN_POINTER).getAddress()); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function pluginTokenAddress() + internal + view + returns (address) + { + return address(link); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function pluginOracleAddress() + internal + view + returns (address) + { + return address(oracle); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param _oracle The address of the oracle contract that will fulfill the request + * @param _requestId The request ID used for the response + */ + function addPluginExternalRequest(address _oracle, bytes32 _requestId) + internal + notPendingRequest(_requestId) + { + pendingRequests[_requestId] = _oracle; + } + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param _ens The address of the ENS contract + * @param _node The ENS node hash + */ + function usePluginWithENS(address _ens, bytes32 _node) + internal + { + ens = ENSInterface(_ens); + ensNode = _node; + bytes32 linkSubnode = keccak256(abi.encodePacked(ensNode, ENS_TOKEN_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(linkSubnode)); + setPluginToken(resolver.addr(linkSubnode)); + updatePluginOracleWithENS(); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `usePluginWithENS` has been called previously + */ + function updatePluginOracleWithENS() + internal + { + bytes32 oracleSubnode = keccak256(abi.encodePacked(ensNode, ENS_ORACLE_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(oracleSubnode)); + setPluginOracle(resolver.addr(oracleSubnode)); + } + + /** + * @notice Encodes the request to be sent to the oracle contract + * @dev The Plugin node expects values to be in order for the request to be picked up. Order of types + * will be validated in the oracle contract. + * @param _req The initialized Plugin Request + * @return The bytes payload for the `transferAndCall` method + */ + function encodeRequest(Plugin.Request memory _req) + private + view + returns (bytes memory) + { + return abi.encodeWithSelector( + oracle.oracleRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + ARGS_VERSION, + _req.buf.buf); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param _requestId The request ID for fulfillment + */ + function validatePluginCallback(bytes32 _requestId) + internal + recordPluginFulfillment(_requestId) + // solhint-disable-next-line no-empty-blocks + {} + + /** + * @dev Reverts if the sender is not the oracle of the request. + * Emits PluginFulfilled event. + * @param _requestId The request ID for fulfillment + */ + modifier recordPluginFulfillment(bytes32 _requestId) { + require(msg.sender == pendingRequests[_requestId], "Source must be the oracle of the request"); + delete pendingRequests[_requestId]; + emit PluginFulfilled(_requestId); + _; + } + + /** + * @dev Reverts if the request is already pending + * @param _requestId The request ID for fulfillment + */ + modifier notPendingRequest(bytes32 _requestId) { + require(pendingRequests[_requestId] == address(0), "Request is already pending"); + _; + } +} diff --git a/contracts/src/v0.4/Plugined.sol b/contracts/src/v0.4/Plugined.sol new file mode 100644 index 00000000..9d9921b7 --- /dev/null +++ b/contracts/src/v0.4/Plugined.sol @@ -0,0 +1,141 @@ +pragma solidity ^0.4.24; + +import "./PluginClient.sol"; + +/** + * @title The Plugined contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network. PluginClient is an alias of the Plugined contract. + */ +contract Plugined is PluginClient { + /** + * @notice Creates a request that can hold additional parameters + * @param _specId The Job Specification ID that the request will be created for + * @param _callbackAddress The callback address that the response will be sent to + * @param _callbackFunctionSignature The callback function signature to use for the callback address + * @return A Plugin Request struct in memory + */ + function newRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + return buildPluginRequest(_specId, _callbackAddress, _callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `sendPluginRequestTo` with the stored oracle address + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function pluginRequest(Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32) + { + return sendPluginRequest(_req, _payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param _oracle The address of the oracle for the request + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function pluginRequestTo(address _oracle, Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32 requestId) + { + return sendPluginRequestTo(_oracle, _req, _payment); + } + + /** + * @notice Sets the stored oracle address + * @param _oracle The address of the oracle contract + */ + function setOracle(address _oracle) internal { + setPluginOracle(_oracle); + } + + /** + * @notice Sets the PLI token address + * @param _link The address of the PLI token contract + */ + function setLinkToken(address _link) internal { + setPluginToken(_link); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function pluginToken() + internal + view + returns (address) + { + return pluginTokenAddress(); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function oracleAddress() + internal + view + returns (address) + { + return pluginOracleAddress(); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param _requestId The request ID for fulfillment + */ + function fulfillPluginRequest(bytes32 _requestId) + internal + recordPluginFulfillment(_requestId) + // solhint-disable-next-line no-empty-blocks + {} + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param _ens The address of the ENS contract + * @param _node The ENS node hash + */ + function setPluginWithENS(address _ens, bytes32 _node) + internal + { + usePluginWithENS(_ens, _node); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `setPluginWithENS` has been called previously + */ + function setOracleWithENS() + internal + { + updatePluginOracleWithENS(); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param _oracle The address of the oracle contract that will fulfill the request + * @param _requestId The request ID used for the response + */ + function addExternalRequest(address _oracle, bytes32 _requestId) + internal + { + addPluginExternalRequest(_oracle, _requestId); + } +} diff --git a/contracts/src/v0.4/Pointer.sol b/contracts/src/v0.4/Pointer.sol new file mode 100644 index 00000000..40ff35f0 --- /dev/null +++ b/contracts/src/v0.4/Pointer.sol @@ -0,0 +1,9 @@ +pragma solidity 0.4.24; + +contract Pointer { + address public getAddress; + + constructor(address _addr) public { + getAddress = _addr; + } +} diff --git a/contracts/src/v0.4/interfaces/AggregatorInterface.sol b/contracts/src/v0.4/interfaces/AggregatorInterface.sol new file mode 100644 index 00000000..d9eaf171 --- /dev/null +++ b/contracts/src/v0.4/interfaces/AggregatorInterface.sol @@ -0,0 +1,12 @@ +pragma solidity >=0.4.24; + +interface AggregatorInterface { + function latestAnswer() external view returns (int256); + function latestTimestamp() external view returns (uint256); + function latestRound() external view returns (uint256); + function getAnswer(uint256 roundId) external view returns (int256); + function getTimestamp(uint256 roundId) external view returns (uint256); + + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 timestamp); + event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); +} diff --git a/contracts/src/v0.4/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.4/interfaces/AggregatorV3Interface.sol new file mode 100644 index 00000000..f6b4849e --- /dev/null +++ b/contracts/src/v0.4/interfaces/AggregatorV3Interface.sol @@ -0,0 +1,33 @@ +pragma solidity >=0.4.24; + +interface AggregatorV3Interface { + + function decimals() external view returns (uint8); + function description() external view returns (string memory); + function version() external view returns (uint256); + + // getRoundData and latestRoundData should both raise "No data present" + // if they do not have data to report, instead of returning unset values + // which could be misinterpreted as actual reported values. + function getRoundData(uint80 _roundId) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + function latestRoundData() + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + +} diff --git a/contracts/src/v0.4/interfaces/ENSInterface.sol b/contracts/src/v0.4/interfaces/ENSInterface.sol new file mode 100644 index 00000000..f374a46b --- /dev/null +++ b/contracts/src/v0.4/interfaces/ENSInterface.sol @@ -0,0 +1,26 @@ +pragma solidity ^0.4.24; + +interface ENSInterface { + + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + + function setSubnodeOwner(bytes32 node, bytes32 label, address owner) external; + function setResolver(bytes32 node, address resolver) external; + function setOwner(bytes32 node, address owner) external; + function setTTL(bytes32 node, uint64 ttl) external; + function owner(bytes32 node) external view returns (address); + function resolver(bytes32 node) external view returns (address); + function ttl(bytes32 node) external view returns (uint64); + +} diff --git a/contracts/src/v0.4/interfaces/ERC20.sol b/contracts/src/v0.4/interfaces/ERC20.sol new file mode 100644 index 00000000..fd978c33 --- /dev/null +++ b/contracts/src/v0.4/interfaces/ERC20.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.4.11; + + +import { ERC20Basic as linkERC20Basic } from "./ERC20Basic.sol"; + + +/** + * @title ERC20 interface + * @dev see https://github.com/ethereum/EIPs/issues/20 + */ +contract ERC20 is linkERC20Basic { + function allowance(address owner, address spender) constant returns (uint256); + function transferFrom(address from, address to, uint256 value) returns (bool); + function approve(address spender, uint256 value) returns (bool); + event Approval(address indexed owner, address indexed spender, uint256 value); +} diff --git a/contracts/src/v0.4/interfaces/ERC20Basic.sol b/contracts/src/v0.4/interfaces/ERC20Basic.sol new file mode 100644 index 00000000..07ab02f0 --- /dev/null +++ b/contracts/src/v0.4/interfaces/ERC20Basic.sol @@ -0,0 +1,14 @@ +pragma solidity ^0.4.11; + + +/** + * @title ERC20Basic + * @dev Simpler version of ERC20 interface + * @dev see https://github.com/ethereum/EIPs/issues/179 + */ +contract ERC20Basic { + uint256 public totalSupply; + function balanceOf(address who) constant returns (uint256); + function transfer(address to, uint256 value) returns (bool); + event Transfer(address indexed from, address indexed to, uint256 value); +} diff --git a/contracts/src/v0.4/interfaces/ERC677.sol b/contracts/src/v0.4/interfaces/ERC677.sol new file mode 100644 index 00000000..1e6714f8 --- /dev/null +++ b/contracts/src/v0.4/interfaces/ERC677.sol @@ -0,0 +1,9 @@ +pragma solidity ^0.4.8; + +import { ERC20 as linkERC20 } from "./ERC20.sol"; + +contract ERC677 is linkERC20 { + function transferAndCall(address to, uint value, bytes data) returns (bool success); + + event Transfer(address indexed from, address indexed to, uint value, bytes data); +} diff --git a/contracts/src/v0.4/interfaces/ERC677Receiver.sol b/contracts/src/v0.4/interfaces/ERC677Receiver.sol new file mode 100644 index 00000000..8a46d0b6 --- /dev/null +++ b/contracts/src/v0.4/interfaces/ERC677Receiver.sol @@ -0,0 +1,6 @@ +pragma solidity ^0.4.8; + + +contract ERC677Receiver { + function onTokenTransfer(address _sender, uint _value, bytes _data); +} diff --git a/contracts/src/v0.4/interfaces/FlagsInterface.sol b/contracts/src/v0.4/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..5a0373e9 --- /dev/null +++ b/contracts/src/v0.4/interfaces/FlagsInterface.sol @@ -0,0 +1,10 @@ +pragma solidity >=0.4.24; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + function getFlags(address[] calldata) external view returns (bool[] memory); + function raiseFlag(address) external; + function raiseFlags(address[] calldata) external; + function lowerFlags(address[] calldata) external; + function setRaisingAccessController(address) external; +} diff --git a/contracts/src/v0.4/interfaces/LinkTokenInterface.sol b/contracts/src/v0.4/interfaces/LinkTokenInterface.sol new file mode 100644 index 00000000..d4f813c3 --- /dev/null +++ b/contracts/src/v0.4/interfaces/LinkTokenInterface.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.4.24; + +interface LinkTokenInterface { + function allowance(address owner, address spender) external view returns (uint256 remaining); + function approve(address spender, uint256 value) external returns (bool success); + function balanceOf(address owner) external view returns (uint256 balance); + function decimals() external view returns (uint8 decimalPlaces); + function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); + function increaseApproval(address spender, uint256 subtractedValue) external; + function name() external view returns (string tokenName); + function symbol() external view returns (string tokenSymbol); + function totalSupply() external view returns (uint256 totalTokensIssued); + function transfer(address to, uint256 value) external returns (bool success); + function transferAndCall(address to, uint256 value, bytes data) external returns (bool success); + function transferFrom(address from, address to, uint256 value) external returns (bool success); +} diff --git a/contracts/src/v0.4/interfaces/OracleInterface.sol b/contracts/src/v0.4/interfaces/OracleInterface.sol new file mode 100644 index 00000000..9a324454 --- /dev/null +++ b/contracts/src/v0.4/interfaces/OracleInterface.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.4.24; + +interface OracleInterface { + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) external returns (bool); + function getAuthorizationStatus(address node) external view returns (bool); + function setFulfillmentPermission(address node, bool allowed) external; + function withdraw(address recipient, uint256 amount) external; + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.4/interfaces/PluginRequestInterface.sol b/contracts/src/v0.4/interfaces/PluginRequestInterface.sol new file mode 100644 index 00000000..2ac451e9 --- /dev/null +++ b/contracts/src/v0.4/interfaces/PluginRequestInterface.sol @@ -0,0 +1,21 @@ +pragma solidity ^0.4.24; + +interface PluginRequestInterface { + function oracleRequest( + address sender, + uint256 payment, + bytes32 id, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 version, + bytes data + ) external; + + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunctionId, + uint256 expiration + ) external; +} diff --git a/contracts/src/v0.4/interfaces/PointerInterface.sol b/contracts/src/v0.4/interfaces/PointerInterface.sol new file mode 100644 index 00000000..ba0d224c --- /dev/null +++ b/contracts/src/v0.4/interfaces/PointerInterface.sol @@ -0,0 +1,5 @@ +pragma solidity ^0.4.24; + +interface PointerInterface { + function getAddress() external view returns (address); +} diff --git a/contracts/src/v0.4/tests/BasicConsumer.sol b/contracts/src/v0.4/tests/BasicConsumer.sol new file mode 100644 index 00000000..9346f534 --- /dev/null +++ b/contracts/src/v0.4/tests/BasicConsumer.sol @@ -0,0 +1,13 @@ +pragma solidity 0.4.24; + +import "./Consumer.sol"; + +contract BasicConsumer is Consumer { + + constructor(address _link, address _oracle, bytes32 _specId) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + +} diff --git a/contracts/src/v0.4/tests/ConcreteChainlink.sol b/contracts/src/v0.4/tests/ConcreteChainlink.sol new file mode 100644 index 00000000..c0c1fc65 --- /dev/null +++ b/contracts/src/v0.4/tests/ConcreteChainlink.sol @@ -0,0 +1,77 @@ +pragma solidity 0.4.24; + +import "../Plugin.sol"; +import { CBOR as CBOR_Plugin } from "../vendor/CBOR.sol"; +import { Buffer as Buffer_Plugin } from "../vendor/Buffer.sol"; + +contract ConcretePlugin { + using Plugin for Plugin.Request; + using CBOR_Plugin for Buffer_Plugin.buffer; + + Plugin.Request private req; + + event RequestData(bytes payload); + + function closeEvent() public { + emit RequestData(req.buf.buf); + } + + function setBuffer(bytes data) public { + Plugin.Request memory r2 = req; + r2.setBuffer(data); + req = r2; + } + + function add(string _key, string _value) public { + Plugin.Request memory r2 = req; + r2.add(_key, _value); + req = r2; + } + + function addBytes(string _key, bytes _value) public { + Plugin.Request memory r2 = req; + r2.addBytes(_key, _value); + req = r2; + } + + function addInt(string _key, int256 _value) public { + Plugin.Request memory r2 = req; + r2.addInt(_key, _value); + req = r2; + } + + function addUint(string _key, uint256 _value) public { + Plugin.Request memory r2 = req; + r2.addUint(_key, _value); + req = r2; + } + + // Temporarily have method receive bytes32[] memory until experimental + // string[] memory can be invoked from truffle tests. + function addStringArray(string _key, bytes32[] memory _values) public { + string[] memory strings = new string[](_values.length); + for (uint256 i = 0; i < _values.length; i++) { + strings[i] = bytes32ToString(_values[i]); + } + Plugin.Request memory r2 = req; + r2.addStringArray(_key, strings); + req = r2; + } + + function bytes32ToString(bytes32 x) private pure returns (string) { + bytes memory bytesString = new bytes(32); + uint charCount = 0; + for (uint j = 0; j < 32; j++) { + byte char = byte(bytes32(uint(x) * 2 ** (8 * j))); + if (char != 0) { + bytesString[charCount] = char; + charCount++; + } + } + bytes memory bytesStringTrimmed = new bytes(charCount); + for (j = 0; j < charCount; j++) { + bytesStringTrimmed[j] = bytesString[j]; + } + return string(bytesStringTrimmed); + } +} diff --git a/contracts/src/v0.4/tests/ConcreteChainlinked.sol b/contracts/src/v0.4/tests/ConcreteChainlinked.sol new file mode 100644 index 00000000..a61a4839 --- /dev/null +++ b/contracts/src/v0.4/tests/ConcreteChainlinked.sol @@ -0,0 +1,101 @@ +pragma solidity 0.4.24; + +import "../Plugined.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract ConcretePlugined is Plugined { + using SafeMathPlugin for uint256; + + constructor(address _link, address _oracle) public { + setLinkToken(_link); + setOracle(_oracle); + } + + event Request( + bytes32 id, + address callbackAddress, + bytes4 callbackfunctionSelector, + bytes data + ); + + function publicNewRequest( + bytes32 _id, + address _address, + bytes _fulfillmentSignature + ) + public + { + Plugin.Request memory req = newRequest( + _id, _address, bytes4(keccak256(_fulfillmentSignature))); + emit Request( + req.id, + req.callbackAddress, + req.callbackFunctionId, + req.buf.buf + ); + } + + function publicRequest( + bytes32 _id, + address _address, + bytes _fulfillmentSignature, + uint256 _wei + ) + public + { + Plugin.Request memory req = newRequest( + _id, _address, bytes4(keccak256(_fulfillmentSignature))); + pluginRequest(req, _wei); + } + + function publicRequestRunTo( + address _oracle, + bytes32 _id, + address _address, + bytes _fulfillmentSignature, + uint256 _wei + ) + public + { + Plugin.Request memory run = newRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + pluginRequestTo(_oracle, run, _wei); + } + + function publicCancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function publicPluginToken() public view returns (address) { + return pluginToken(); + } + + function fulfillRequest(bytes32 _requestId, bytes32) + public + recordPluginFulfillment(_requestId) + {} // solhint-disable-line no-empty-blocks + + function publicFulfillPluginRequest(bytes32 _requestId, bytes32) public { + fulfillPluginRequest(_requestId); + } + + event LinkAmount(uint256 amount); + + function publicPLI(uint256 _amount) public { + emit LinkAmount(PLI.mul(_amount)); + } + + function publicOracleAddress() public view returns (address) { + return oracleAddress(); + } + + function publicAddExternalRequest(address _oracle, bytes32 _requestId) + public + { + addExternalRequest(_oracle, _requestId); + } +} diff --git a/contracts/src/v0.4/tests/Consumer.sol b/contracts/src/v0.4/tests/Consumer.sol new file mode 100644 index 00000000..a8c001d1 --- /dev/null +++ b/contracts/src/v0.4/tests/Consumer.sol @@ -0,0 +1,47 @@ +pragma solidity 0.4.24; + +import "../PluginClient.sol"; + +contract Consumer is PluginClient { + bytes32 internal specId; + bytes32 public currentPrice; + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes32 indexed price + ); + + function requestEthereumPrice(string _currency) public { + Plugin.Request memory req = buildPluginRequest(specId, this, this.fulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + sendPluginRequest(req, ORACLE_PAYMENT); + } + + function cancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface link = LinkTokenInterface(pluginTokenAddress()); + require(link.transfer(msg.sender, link.balanceOf(address(this))), "Unable to transfer"); + } + + function fulfill(bytes32 _requestId, bytes32 _price) + public + recordPluginFulfillment(_requestId) + { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } + +} diff --git a/contracts/src/v0.4/tests/EmptyOracle.sol b/contracts/src/v0.4/tests/EmptyOracle.sol new file mode 100644 index 00000000..343d87dd --- /dev/null +++ b/contracts/src/v0.4/tests/EmptyOracle.sol @@ -0,0 +1,19 @@ +pragma solidity 0.4.24; + +import "../interfaces/PluginRequestInterface.sol"; +import "../interfaces/OracleInterface.sol"; + +/* solhint-disable no-empty-blocks */ + +contract EmptyOracle is PluginRequestInterface, OracleInterface { + + function cancelOracleRequest(bytes32, uint256, bytes4, uint256) external {} + function fulfillOracleRequest(bytes32, uint256, address, bytes4, uint256, bytes32) external returns (bool) {} + function getAuthorizationStatus(address) external view returns (bool) { return false; } + function onTokenTransfer(address, uint256, bytes) external pure {} + function oracleRequest(address, uint256, bytes32, address, bytes4, uint256, uint256, bytes) external {} + function setFulfillmentPermission(address, bool) external {} + function withdraw(address, uint256) external {} + function withdrawable() external view returns (uint256) {} + +} diff --git a/contracts/src/v0.4/tests/GetterSetter.sol b/contracts/src/v0.4/tests/GetterSetter.sol new file mode 100644 index 00000000..fcd86c7d --- /dev/null +++ b/contracts/src/v0.4/tests/GetterSetter.sol @@ -0,0 +1,45 @@ +pragma solidity 0.4.24; + +// GetterSetter is a contract to aid debugging and testing during development. +contract GetterSetter { + bytes32 public getBytes32; + uint256 public getUint256; + bytes32 public requestId; + bytes public getBytes; + + event SetBytes32(address indexed from, bytes32 indexed value); + event SetUint256(address indexed from, uint256 indexed value); + event SetBytes(address indexed from, bytes value); + + event Output(bytes32 b32, uint256 u256, bytes32 b322); + + function setBytes32(bytes32 _value) public { + getBytes32 = _value; + emit SetBytes32(msg.sender, _value); + } + + function requestedBytes32(bytes32 _requestId, bytes32 _value) public { + requestId = _requestId; + setBytes32(_value); + } + + function setBytes(bytes _value) public { + getBytes = _value; + emit SetBytes(msg.sender, _value); + } + + function requestedBytes(bytes32 _requestId, bytes _value) public { + requestId = _requestId; + setBytes(_value); + } + + function setUint256(uint256 _value) public { + getUint256 = _value; + emit SetUint256(msg.sender, _value); + } + + function requestedUint256(bytes32 _requestId, uint256 _value) public { + requestId = _requestId; + setUint256(_value); + } +} diff --git a/contracts/src/v0.4/tests/MaliciousChainlink.sol b/contracts/src/v0.4/tests/MaliciousChainlink.sol new file mode 100644 index 00000000..0c7066cf --- /dev/null +++ b/contracts/src/v0.4/tests/MaliciousChainlink.sol @@ -0,0 +1,76 @@ +pragma solidity 0.4.24; + +import { CBOR as CBOR_Plugin } from "../vendor/CBOR.sol"; +import { Buffer as Buffer_Plugin } from "../vendor/Buffer.sol"; + +library MaliciousPlugin { + using CBOR_Plugin for Buffer_Plugin.buffer; + + struct Request { + bytes32 specId; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + struct WithdrawRequest { + bytes32 specId; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + function initializeWithdraw( + WithdrawRequest memory self, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (MaliciousPlugin.WithdrawRequest memory) { + Buffer_Plugin.init(self.buf, 128); + self.specId = _specId; + self.callbackAddress = _callbackAddress; + self.callbackFunctionId = _callbackFunction; + return self; + } + + function add(Request memory self, string _key, string _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeString(_value); + } + + function addBytes(Request memory self, string _key, bytes _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeBytes(_value); + } + + function addInt(Request memory self, string _key, int256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeInt(_value); + } + + function addUint(Request memory self, string _key, uint256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeUInt(_value); + } + + function addStringArray(Request memory self, string _key, string[] memory _values) + internal pure + { + self.buf.encodeString(_key); + self.buf.startArray(); + for (uint256 i = 0; i < _values.length; i++) { + self.buf.encodeString(_values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.4/tests/MaliciousChainlinked.sol b/contracts/src/v0.4/tests/MaliciousChainlinked.sol new file mode 100644 index 00000000..cb741c2b --- /dev/null +++ b/contracts/src/v0.4/tests/MaliciousChainlinked.sol @@ -0,0 +1,109 @@ +pragma solidity 0.4.24; + +import "./MaliciousPlugin.sol"; +import "../Plugined.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract MaliciousPlugined is Plugined { + using MaliciousPlugin for MaliciousPlugin.Request; + using MaliciousPlugin for MaliciousPlugin.WithdrawRequest; + using Plugin for Plugin.Request; + using SafeMathPlugin for uint256; + + uint256 private maliciousRequests = 1; + mapping(bytes32 => address) private maliciousPendingRequests; + + function newWithdrawRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (MaliciousPlugin.WithdrawRequest memory) { + MaliciousPlugin.WithdrawRequest memory req; + return req.initializeWithdraw(_specId, _callbackAddress, _callbackFunction); + } + + function pluginTargetRequest(address _target, Plugin.Request memory _req, uint256 _amount) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(_target, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = oracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface link = LinkTokenInterface(pluginToken()); + require(link.transferAndCall(oracleAddress(), _amount, encodeTargetRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + + return requestId; + } + + function pluginPriceRequest(Plugin.Request memory _req, uint256 _amount) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = oracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface link = LinkTokenInterface(pluginToken()); + require(link.transferAndCall(oracleAddress(), _amount, encodePriceRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + + return requestId; + } + + function pluginWithdrawRequest(MaliciousPlugin.WithdrawRequest memory _req, uint256 _wei) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = oracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface link = LinkTokenInterface(pluginToken()); + require(link.transferAndCall(oracleAddress(), _wei, encodeWithdrawRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + return requestId; + } + + function encodeWithdrawRequest(MaliciousPlugin.WithdrawRequest memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("withdraw(address,uint256)")), + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + _req.buf.buf); + } + + function encodeTargetRequest(Plugin.Request memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("oracleRequest(address,uint256,bytes32,address,bytes4,uint256,uint256,bytes)")), + 0, // overridden by onTokenTransfer + 0, // overridden by onTokenTransfer + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + 1, + _req.buf.buf); + } + + function encodePriceRequest(Plugin.Request memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("oracleRequest(address,uint256,bytes32,address,bytes4,uint256,uint256,bytes)")), + 0, // overridden by onTokenTransfer + 2000000000000000000, // overridden by onTokenTransfer + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + 1, + _req.buf.buf); + } +} diff --git a/contracts/src/v0.4/tests/MaliciousConsumer.sol b/contracts/src/v0.4/tests/MaliciousConsumer.sol new file mode 100644 index 00000000..02c1b44a --- /dev/null +++ b/contracts/src/v0.4/tests/MaliciousConsumer.sol @@ -0,0 +1,57 @@ +pragma solidity 0.4.24; + + +import "../Plugined.sol"; +import "../vendor/SafeMathPlugin.sol"; + + +contract MaliciousConsumer is Plugined { + using SafeMathPlugin for uint256; + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + uint256 private expiration; + + constructor(address _link, address _oracle) public payable { + setLinkToken(_link); + setOracle(_oracle); + } + + function () public payable {} // solhint-disable-line no-empty-blocks + + function requestData(bytes32 _id, bytes _callbackFunc) public { + Plugin.Request memory req = newRequest(_id, this, bytes4(keccak256(_callbackFunc))); + expiration = now.add(5 minutes); // solhint-disable-line not-rely-on-time + pluginRequest(req, ORACLE_PAYMENT); + } + + function assertFail(bytes32, bytes32) public pure { + assert(1 == 2); + } + + function cancelRequestOnFulfill(bytes32 _requestId, bytes32) public { + cancelPluginRequest( + _requestId, + ORACLE_PAYMENT, + this.cancelRequestOnFulfill.selector, + expiration); + } + + function remove() public { + selfdestruct(address(0)); + } + + function stealEthCall(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + require(address(this).call.value(100)(), "Call failed"); // solhint-disable-line avoid-call-value + } + + function stealEthSend(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + // solhint-disable-next-line check-send-result + require(address(this).send(100), "Send failed"); // solhint-disable-line multiple-sends + } + + function stealEthTransfer(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + address(this).transfer(100); + } + + function doesNothing(bytes32, bytes32) public pure {} // solhint-disable-line no-empty-blocks +} diff --git a/contracts/src/v0.4/tests/MaliciousRequester.sol b/contracts/src/v0.4/tests/MaliciousRequester.sol new file mode 100644 index 00000000..e5cc8435 --- /dev/null +++ b/contracts/src/v0.4/tests/MaliciousRequester.sol @@ -0,0 +1,52 @@ +pragma solidity 0.4.24; + + +import "./MaliciousPlugined.sol"; + + +contract MaliciousRequester is MaliciousPlugined { + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + uint256 private expiration; + + constructor(address _link, address _oracle) public { + setLinkToken(_link); + setOracle(_oracle); + } + + function maliciousWithdraw() + public + { + MaliciousPlugin.WithdrawRequest memory req = newWithdrawRequest( + "specId", this, this.doesNothing.selector); + pluginWithdrawRequest(req, ORACLE_PAYMENT); + } + + function request(bytes32 _id, address _target, bytes _callbackFunc) public returns (bytes32 requestId) { + Plugin.Request memory req = newRequest(_id, _target, bytes4(keccak256(_callbackFunc))); + expiration = now.add(5 minutes); // solhint-disable-line not-rely-on-time + requestId = pluginRequest(req, ORACLE_PAYMENT); + } + + function maliciousPrice(bytes32 _id) public returns (bytes32 requestId) { + Plugin.Request memory req = newRequest(_id, this, this.doesNothing.selector); + requestId = pluginPriceRequest(req, ORACLE_PAYMENT); + } + + function maliciousTargetConsumer(address _target) public returns (bytes32 requestId) { + Plugin.Request memory req = newRequest("specId", _target, bytes4(keccak256("fulfill(bytes32,bytes32)"))); + requestId = pluginTargetRequest(_target, req, ORACLE_PAYMENT); + } + + function maliciousRequestCancel(bytes32 _id, bytes _callbackFunc) public { + PluginRequestInterface oracle = PluginRequestInterface(oracleAddress()); + oracle.cancelOracleRequest( + request(_id, this, _callbackFunc), + ORACLE_PAYMENT, + this.maliciousRequestCancel.selector, + expiration + ); + } + + function doesNothing(bytes32, bytes32) public pure {} // solhint-disable-line no-empty-blocks +} diff --git a/contracts/src/v0.4/tests/UpdatableConsumer.sol b/contracts/src/v0.4/tests/UpdatableConsumer.sol new file mode 100644 index 00000000..7e58314f --- /dev/null +++ b/contracts/src/v0.4/tests/UpdatableConsumer.sol @@ -0,0 +1,24 @@ +pragma solidity 0.4.24; + +import "./Consumer.sol"; + +contract UpdatableConsumer is Consumer { + + constructor(bytes32 _specId, address _ens, bytes32 _node) public { + specId = _specId; + usePluginWithENS(_ens, _node); + } + + function updateOracle() public { + updatePluginOracleWithENS(); + } + + function getPluginToken() public view returns (address) { + return pluginTokenAddress(); + } + + function getOracle() public view returns (address) { + return pluginOracleAddress(); + } + +} diff --git a/contracts/src/v0.4/vendor/BasicToken.sol b/contracts/src/v0.4/vendor/BasicToken.sol new file mode 100644 index 00000000..e32fa87c --- /dev/null +++ b/contracts/src/v0.4/vendor/BasicToken.sol @@ -0,0 +1,38 @@ +pragma solidity ^0.4.24; + + +import { ERC20Basic as linkERC20Basic } from "../interfaces/ERC20Basic.sol"; +import { SafeMathPlugin as linkSafeMath } from "./SafeMathPlugin.sol"; + + +/** + * @title Basic token + * @dev Basic version of StandardToken, with no allowances. + */ +contract BasicToken is linkERC20Basic { + using linkSafeMath for uint256; + + mapping(address => uint256) balances; + + /** + * @dev transfer token for a specified address + * @param _to The address to transfer to. + * @param _value The amount to be transferred. + */ + function transfer(address _to, uint256 _value) returns (bool) { + balances[msg.sender] = balances[msg.sender].sub(_value); + balances[_to] = balances[_to].add(_value); + Transfer(msg.sender, _to, _value); + return true; + } + + /** + * @dev Gets the balance of the specified address. + * @param _owner The address to query the the balance of. + * @return An uint256 representing the amount owned by the passed address. + */ + function balanceOf(address _owner) constant returns (uint256 balance) { + return balances[_owner]; + } + +} diff --git a/contracts/src/v0.4/vendor/Buffer.sol b/contracts/src/v0.4/vendor/Buffer.sol new file mode 100644 index 00000000..d25ae7c7 --- /dev/null +++ b/contracts/src/v0.4/vendor/Buffer.sol @@ -0,0 +1,301 @@ +pragma solidity >0.4.18; + +/** +* @dev A library for working with mutable byte buffers in Solidity. +* +* Byte buffers are mutable and expandable, and provide a variety of primitives +* for writing to them. At any time you can fetch a bytes object containing the +* current contents of the buffer. The bytes object should not be stored between +* operations, as it may change due to resizing of the buffer. +*/ +library Buffer { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint capacity) internal pure returns(buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + mstore(0x40, add(32, add(ptr, capacity))) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns(buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + function max(uint a, uint b) private pure returns(uint) { + if (a > b) { + return a; + } + return b; + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Writes a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The start offset to write to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes memory data, uint len) internal pure returns(buffer memory) { + require(len <= data.length); + + if (off + len > buf.capacity) { + resize(buf, max(buf.capacity, len + off) * 2); + } + + uint dest; + uint src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(add(len, off), buflen) { + mstore(bufptr, add(len, off)) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + uint mask = 256 ** (32 - len) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data, uint len) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, len); + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, data.length); + } + + /** + * @dev Writes a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write the byte at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeUint8(buffer memory buf, uint off, uint8 data) internal pure returns(buffer memory) { + if (off >= buf.capacity) { + resize(buf, buf.capacity * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if eq(off, buflen) { + mstore(bufptr, add(buflen, 1)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns(buffer memory) { + return writeUint8(buf, buf.buf.length, data); + } + + /** + * @dev Writes up to 32 bytes to the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes32 data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Writes a bytes20 to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeBytes20(buffer memory buf, uint off, bytes20 data) internal pure returns (buffer memory) { + return write(buf, off, bytes32(data), 20); + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, 32); + } + + /** + * @dev Writes an integer to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer, for chaining. + */ + function writeInt(buffer memory buf, uint off, uint data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + off + sizeof(buffer length) + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer. + */ + function appendInt(buffer memory buf, uint data, uint len) internal pure returns(buffer memory) { + return writeInt(buf, buf.buf.length, data, len); + } +} diff --git a/contracts/src/v0.4/vendor/CBOR.sol b/contracts/src/v0.4/vendor/CBOR.sol new file mode 100644 index 00000000..27d3e34a --- /dev/null +++ b/contracts/src/v0.4/vendor/CBOR.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +pragma solidity >= 0.4.19 < 0.7.0; + +import { Buffer as BufferPlugin } from "./Buffer.sol"; + +library CBOR { + using BufferPlugin for BufferPlugin.buffer; + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + function encodeFixedNumeric(BufferPlugin.buffer memory buf, uint8 major, uint64 value) private pure { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if(value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if(value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if(value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); + } + } + + function encodeIndefiniteLengthType(BufferPlugin.buffer memory buf, uint8 major) private pure { + buf.appendUint8(uint8((major << 5) | 31)); + } + + function encodeUInt(BufferPlugin.buffer memory buf, uint value) internal pure { + if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } + } + + function encodeInt(BufferPlugin.buffer memory buf, int value) internal pure { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, uint(value)); + } else if(value >= 0) { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(-1 - value)); + } + } + + function encodeBytes(BufferPlugin.buffer memory buf, bytes memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.append(value); + } + + function encodeBigNum(BufferPlugin.buffer memory buf, uint value) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(value)); + } + + function encodeSignedBigNum(BufferPlugin.buffer memory buf, int input) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint(-1 - input))); + } + + function encodeString(BufferPlugin.buffer memory buf, string memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.append(bytes(value)); + } + + function startArray(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } + + function startMap(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } + + function endSequence(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } +} diff --git a/contracts/src/v0.4/vendor/ENS.sol b/contracts/src/v0.4/vendor/ENS.sol new file mode 100644 index 00000000..36e4ad4a --- /dev/null +++ b/contracts/src/v0.4/vendor/ENS.sol @@ -0,0 +1,26 @@ +pragma solidity ^0.4.24; + +interface ENS { + + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + + function setSubnodeOwner(bytes32 node, bytes32 label, address owner) public; + function setResolver(bytes32 node, address resolver) public; + function setOwner(bytes32 node, address owner) public; + function setTTL(bytes32 node, uint64 ttl) public; + function owner(bytes32 node) public view returns (address); + function resolver(bytes32 node) public view returns (address); + function ttl(bytes32 node) public view returns (uint64); + +} diff --git a/contracts/src/v0.4/vendor/ENSRegistry.sol b/contracts/src/v0.4/vendor/ENSRegistry.sol new file mode 100644 index 00000000..95a54cd5 --- /dev/null +++ b/contracts/src/v0.4/vendor/ENSRegistry.sol @@ -0,0 +1,99 @@ +pragma solidity ^0.4.24; + +import "./ENS.sol"; + +/** + * The ENS registry contract. + */ +contract ENSRegistry is ENS { + struct Record { + address owner; + address resolver; + uint64 ttl; + } + + mapping (bytes32 => Record) records; + + // Permits modifications only by the owner of the specified node. + modifier only_owner(bytes32 node) { + require(records[node].owner == msg.sender); + _; + } + + /** + * @dev Constructs a new ENS registrar. + */ + constructor() public { + records[0x0].owner = msg.sender; + } + + /** + * @dev Transfers ownership of a node to a new address. May only be called by the current owner of the node. + * @param node The node to transfer ownership of. + * @param owner The address of the new owner. + */ + function setOwner(bytes32 node, address owner) public only_owner(node) { + emit Transfer(node, owner); + records[node].owner = owner; + } + + /** + * @dev Transfers ownership of a subnode keccak256(node, label) to a new address. May only be called by the owner of the parent node. + * @param node The parent node. + * @param label The hash of the label specifying the subnode. + * @param owner The address of the new owner. + */ + function setSubnodeOwner(bytes32 node, bytes32 label, address owner) public only_owner(node) { + bytes32 subnode = keccak256(abi.encodePacked(node, label)); + emit NewOwner(node, label, owner); + records[subnode].owner = owner; + } + + /** + * @dev Sets the resolver address for the specified node. + * @param node The node to update. + * @param resolver The address of the resolver. + */ + function setResolver(bytes32 node, address resolver) public only_owner(node) { + emit NewResolver(node, resolver); + records[node].resolver = resolver; + } + + /** + * @dev Sets the TTL for the specified node. + * @param node The node to update. + * @param ttl The TTL in seconds. + */ + function setTTL(bytes32 node, uint64 ttl) public only_owner(node) { + emit NewTTL(node, ttl); + records[node].ttl = ttl; + } + + /** + * @dev Returns the address that owns the specified node. + * @param node The specified node. + * @return address of the owner. + */ + function owner(bytes32 node) public view returns (address) { + return records[node].owner; + } + + /** + * @dev Returns the address of the resolver for the specified node. + * @param node The specified node. + * @return address of the resolver. + */ + function resolver(bytes32 node) public view returns (address) { + return records[node].resolver; + } + + /** + * @dev Returns the TTL of a node, and any records associated with it. + * @param node The specified node. + * @return ttl of the node. + */ + function ttl(bytes32 node) public view returns (uint64) { + return records[node].ttl; + } + +} diff --git a/contracts/src/v0.4/vendor/ENSResolver.sol b/contracts/src/v0.4/vendor/ENSResolver.sol new file mode 100644 index 00000000..c5149bf9 --- /dev/null +++ b/contracts/src/v0.4/vendor/ENSResolver.sol @@ -0,0 +1,5 @@ +pragma solidity 0.4.24; + +contract ENSResolver { + function addr(bytes32 node) public view returns (address); +} diff --git a/contracts/src/v0.4/vendor/Ownable.sol b/contracts/src/v0.4/vendor/Ownable.sol new file mode 100644 index 00000000..1fbf571b --- /dev/null +++ b/contracts/src/v0.4/vendor/Ownable.sol @@ -0,0 +1,64 @@ +pragma solidity ^0.4.24; + + +/** + * @title Ownable + * @dev The Ownable contract has an owner address, and provides basic authorization control + * functions, this simplifies the implementation of "user permissions". + */ +contract Ownable { + address public owner; + + + event OwnershipRenounced(address indexed previousOwner); + event OwnershipTransferred( + address indexed previousOwner, + address indexed newOwner + ); + + + /** + * @dev The Ownable constructor sets the original `owner` of the contract to the sender + * account. + */ + constructor() public { + owner = msg.sender; + } + + /** + * @dev Throws if called by any account other than the owner. + */ + modifier onlyOwner() { + require(msg.sender == owner); + _; + } + + /** + * @dev Allows the current owner to relinquish control of the contract. + * @notice Renouncing to ownership will leave the contract without an owner. + * It will not be possible to call the functions with the `onlyOwner` + * modifier anymore. + */ + function renounceOwnership() public onlyOwner { + emit OwnershipRenounced(owner); + owner = address(0); + } + + /** + * @dev Allows the current owner to transfer control of the contract to a newOwner. + * @param _newOwner The address to transfer ownership to. + */ + function transferOwnership(address _newOwner) public onlyOwner { + _transferOwnership(_newOwner); + } + + /** + * @dev Transfers control of the contract to a newOwner. + * @param _newOwner The address to transfer ownership to. + */ + function _transferOwnership(address _newOwner) internal { + require(_newOwner != address(0)); + emit OwnershipTransferred(owner, _newOwner); + owner = _newOwner; + } +} diff --git a/contracts/src/v0.4/vendor/PublicResolver.sol b/contracts/src/v0.4/vendor/PublicResolver.sol new file mode 100644 index 00000000..50a01fdf --- /dev/null +++ b/contracts/src/v0.4/vendor/PublicResolver.sol @@ -0,0 +1,238 @@ +pragma solidity ^0.4.24; + +import "./ENS.sol"; + +/** + * A simple resolver anyone can use; only allows the owner of a node to set its + * address. + */ +contract PublicResolver { + + bytes4 constant INTERFACE_META_ID = 0x01ffc9a7; + bytes4 constant ADDR_INTERFACE_ID = 0x3b3b57de; + bytes4 constant CONTENT_INTERFACE_ID = 0xd8389dc5; + bytes4 constant NAME_INTERFACE_ID = 0x691f3431; + bytes4 constant ABI_INTERFACE_ID = 0x2203ab56; + bytes4 constant PUBKEY_INTERFACE_ID = 0xc8690233; + bytes4 constant TEXT_INTERFACE_ID = 0x59d1d43c; + bytes4 constant MULTIHASH_INTERFACE_ID = 0xe89401a1; + + event AddrChanged(bytes32 indexed node, address a); + event ContentChanged(bytes32 indexed node, bytes32 hash); + event NameChanged(bytes32 indexed node, string name); + event ABIChanged(bytes32 indexed node, uint256 indexed contentType); + event PubkeyChanged(bytes32 indexed node, bytes32 x, bytes32 y); + event TextChanged(bytes32 indexed node, string indexedKey, string key); + event MultihashChanged(bytes32 indexed node, bytes hash); + + struct PublicKey { + bytes32 x; + bytes32 y; + } + + struct Record { + address addr; + bytes32 content; + string name; + PublicKey pubkey; + mapping(string=>string) text; + mapping(uint256=>bytes) abis; + bytes multihash; + } + + ENS ens; + + mapping (bytes32 => Record) records; + + modifier only_owner(bytes32 node) { + require(ens.owner(node) == msg.sender); + _; + } + + /** + * Constructor. + * @param ensAddr The ENS registrar contract. + */ + constructor(ENS ensAddr) public { + ens = ensAddr; + } + + /** + * Sets the address associated with an ENS node. + * May only be called by the owner of that node in the ENS registry. + * @param node The node to update. + * @param addr The address to set. + */ + function setAddr(bytes32 node, address addr) public only_owner(node) { + records[node].addr = addr; + emit AddrChanged(node, addr); + } + + /** + * Sets the content hash associated with an ENS node. + * May only be called by the owner of that node in the ENS registry. + * Note that this resource type is not standardized, and will likely change + * in future to a resource type based on multihash. + * @param node The node to update. + * @param hash The content hash to set + */ + function setContent(bytes32 node, bytes32 hash) public only_owner(node) { + records[node].content = hash; + emit ContentChanged(node, hash); + } + + /** + * Sets the multihash associated with an ENS node. + * May only be called by the owner of that node in the ENS registry. + * @param node The node to update. + * @param hash The multihash to set + */ + function setMultihash(bytes32 node, bytes hash) public only_owner(node) { + records[node].multihash = hash; + emit MultihashChanged(node, hash); + } + + /** + * Sets the name associated with an ENS node, for reverse records. + * May only be called by the owner of that node in the ENS registry. + * @param node The node to update. + * @param name The name to set. + */ + function setName(bytes32 node, string name) public only_owner(node) { + records[node].name = name; + emit NameChanged(node, name); + } + + /** + * Sets the ABI associated with an ENS node. + * Nodes may have one ABI of each content type. To remove an ABI, set it to + * the empty string. + * @param node The node to update. + * @param contentType The content type of the ABI + * @param data The ABI data. + */ + function setABI(bytes32 node, uint256 contentType, bytes data) public only_owner(node) { + // Content types must be powers of 2 + require(((contentType - 1) & contentType) == 0); + + records[node].abis[contentType] = data; + emit ABIChanged(node, contentType); + } + + /** + * Sets the SECP256k1 public key associated with an ENS node. + * @param node The ENS node to query + * @param x the X coordinate of the curve point for the public key. + * @param y the Y coordinate of the curve point for the public key. + */ + function setPubkey(bytes32 node, bytes32 x, bytes32 y) public only_owner(node) { + records[node].pubkey = PublicKey(x, y); + emit PubkeyChanged(node, x, y); + } + + /** + * Sets the text data associated with an ENS node and key. + * May only be called by the owner of that node in the ENS registry. + * @param node The node to update. + * @param key The key to set. + * @param value The text data value to set. + */ + function setText(bytes32 node, string key, string value) public only_owner(node) { + records[node].text[key] = value; + emit TextChanged(node, key, key); + } + + /** + * Returns the text data associated with an ENS node and key. + * @param node The ENS node to query. + * @param key The text data key to query. + * @return The associated text data. + */ + function text(bytes32 node, string key) public view returns (string) { + return records[node].text[key]; + } + + /** + * Returns the SECP256k1 public key associated with an ENS node. + * Defined in EIP 619. + * @param node The ENS node to query + * @return x, y the X and Y coordinates of the curve point for the public key. + */ + function pubkey(bytes32 node) public view returns (bytes32 x, bytes32 y) { + return (records[node].pubkey.x, records[node].pubkey.y); + } + + /** + * Returns the ABI associated with an ENS node. + * Defined in EIP205. + * @param node The ENS node to query + * @param contentTypes A bitwise OR of the ABI formats accepted by the caller. + * @return contentType The content type of the return value + * @return data The ABI data + */ + function ABI(bytes32 node, uint256 contentTypes) public view returns (uint256 contentType, bytes data) { + Record storage record = records[node]; + for (contentType = 1; contentType <= contentTypes; contentType <<= 1) { + if ((contentType & contentTypes) != 0 && record.abis[contentType].length > 0) { + data = record.abis[contentType]; + return; + } + } + contentType = 0; + } + + /** + * Returns the name associated with an ENS node, for reverse records. + * Defined in EIP181. + * @param node The ENS node to query. + * @return The associated name. + */ + function name(bytes32 node) public view returns (string) { + return records[node].name; + } + + /** + * Returns the content hash associated with an ENS node. + * Note that this resource type is not standardized, and will likely change + * in future to a resource type based on multihash. + * @param node The ENS node to query. + * @return The associated content hash. + */ + function content(bytes32 node) public view returns (bytes32) { + return records[node].content; + } + + /** + * Returns the multihash associated with an ENS node. + * @param node The ENS node to query. + * @return The associated multihash. + */ + function multihash(bytes32 node) public view returns (bytes) { + return records[node].multihash; + } + + /** + * Returns the address associated with an ENS node. + * @param node The ENS node to query. + * @return The associated address. + */ + function addr(bytes32 node) public view returns (address) { + return records[node].addr; + } + + /** + * Returns true if the resolver implements the interface specified by the provided hash. + * @param interfaceID The ID of the interface to check for. + * @return True if the contract implements the requested interface. + */ + function supportsInterface(bytes4 interfaceID) public pure returns (bool) { + return interfaceID == ADDR_INTERFACE_ID || + interfaceID == CONTENT_INTERFACE_ID || + interfaceID == NAME_INTERFACE_ID || + interfaceID == ABI_INTERFACE_ID || + interfaceID == PUBKEY_INTERFACE_ID || + interfaceID == TEXT_INTERFACE_ID || + interfaceID == MULTIHASH_INTERFACE_ID || + interfaceID == INTERFACE_META_ID; + } +} diff --git a/contracts/src/v0.4/vendor/SafeMathChainlink.sol b/contracts/src/v0.4/vendor/SafeMathChainlink.sol new file mode 100644 index 00000000..b61a7f15 --- /dev/null +++ b/contracts/src/v0.4/vendor/SafeMathChainlink.sol @@ -0,0 +1,52 @@ +pragma solidity ^0.4.11; + + +/** + * @title SafeMath + * @dev Math operations with safety checks that throw on error + */ +library SafeMathPlugin { + + /** + * @dev Multiplies two numbers, throws on overflow. + */ + function mul(uint256 _a, uint256 _b) internal pure returns (uint256 c) { + // Gas optimization: this is cheaper than asserting 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (_a == 0) { + return 0; + } + + c = _a * _b; + assert(c / _a == _b); + return c; + } + + /** + * @dev Integer division of two numbers, truncating the quotient. + */ + function div(uint256 _a, uint256 _b) internal pure returns (uint256) { + // assert(_b > 0); // Solidity automatically throws when dividing by 0 + // uint256 c = _a / _b; + // assert(_a == _b * c + _a % _b); // There is no case in which this doesn't hold + return _a / _b; + } + + /** + * @dev Subtracts two numbers, throws on overflow (i.e. if subtrahend is greater than minuend). + */ + function sub(uint256 _a, uint256 _b) internal pure returns (uint256) { + assert(_b <= _a); + return _a - _b; + } + + /** + * @dev Adds two numbers, throws on overflow. + */ + function add(uint256 _a, uint256 _b) internal pure returns (uint256 c) { + c = _a + _b; + assert(c >= _a); + return c; + } +} diff --git a/contracts/src/v0.4/vendor/SignedSafeMath.sol b/contracts/src/v0.4/vendor/SignedSafeMath.sol new file mode 100644 index 00000000..307463a0 --- /dev/null +++ b/contracts/src/v0.4/vendor/SignedSafeMath.sol @@ -0,0 +1,60 @@ +pragma solidity 0.4.24; + +/** + * @title SignedSafeMath + * @dev Signed math operations with safety checks that revert on error. + */ +library SignedSafeMath { + int256 constant private _INT256_MIN = -2**255; + + /** + * @dev Multiplies two signed integers, reverts on overflow. + */ + function mul(int256 a, int256 b) internal pure returns (int256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522 + if (a == 0) { + return 0; + } + + require(!(a == -1 && b == _INT256_MIN), "SignedSafeMath: multiplication overflow"); + + int256 c = a * b; + require(c / a == b, "SignedSafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Integer division of two signed integers truncating the quotient, reverts on division by zero. + */ + function div(int256 a, int256 b) internal pure returns (int256) { + require(b != 0, "SignedSafeMath: division by zero"); + require(!(b == -1 && a == _INT256_MIN), "SignedSafeMath: division overflow"); + + int256 c = a / b; + + return c; + } + + /** + * @dev Subtracts two signed integers, reverts on overflow. + */ + function sub(int256 a, int256 b) internal pure returns (int256) { + int256 c = a - b; + require((b >= 0 && c <= a) || (b < 0 && c > a), "SignedSafeMath: subtraction overflow"); + + return c; + } + + /** + * @dev Adds two signed integers, reverts on overflow. + */ + function add(int256 a, int256 b) internal pure returns (int256) { + int256 c = a + b; + require((b >= 0 && c >= a) || (b < 0 && c < a), "SignedSafeMath: addition overflow"); + + return c; + } +} diff --git a/contracts/src/v0.4/vendor/StandardToken.sol b/contracts/src/v0.4/vendor/StandardToken.sol new file mode 100644 index 00000000..7f2b4134 --- /dev/null +++ b/contracts/src/v0.4/vendor/StandardToken.sol @@ -0,0 +1,85 @@ +pragma solidity ^0.4.11; + + +import { BasicToken as linkBasicToken } from "./BasicToken.sol"; +import { ERC20 as linkERC20 } from "../interfaces/ERC20.sol"; + + +/** + * @title Standard ERC20 token + * + * @dev Implementation of the basic standard token. + * @dev https://github.com/ethereum/EIPs/issues/20 + * @dev Based on code by FirstBlood: https://github.com/Firstbloodio/token/blob/master/smart_contract/FirstBloodToken.sol + */ +contract StandardToken is linkERC20, linkBasicToken { + + mapping (address => mapping (address => uint256)) allowed; + + + /** + * @dev Transfer tokens from one address to another + * @param _from address The address which you want to send tokens from + * @param _to address The address which you want to transfer to + * @param _value uint256 the amount of tokens to be transferred + */ + function transferFrom(address _from, address _to, uint256 _value) returns (bool) { + var _allowance = allowed[_from][msg.sender]; + + // Check is not needed because sub(_allowance, _value) will already throw if this condition is not met + // require (_value <= _allowance); + + balances[_from] = balances[_from].sub(_value); + balances[_to] = balances[_to].add(_value); + allowed[_from][msg.sender] = _allowance.sub(_value); + Transfer(_from, _to, _value); + return true; + } + + /** + * @dev Approve the passed address to spend the specified amount of tokens on behalf of msg.sender. + * @param _spender The address which will spend the funds. + * @param _value The amount of tokens to be spent. + */ + function approve(address _spender, uint256 _value) returns (bool) { + allowed[msg.sender][_spender] = _value; + Approval(msg.sender, _spender, _value); + return true; + } + + /** + * @dev Function to check the amount of tokens that an owner allowed to a spender. + * @param _owner address The address which owns the funds. + * @param _spender address The address which will spend the funds. + * @return A uint256 specifying the amount of tokens still available for the spender. + */ + function allowance(address _owner, address _spender) constant returns (uint256 remaining) { + return allowed[_owner][_spender]; + } + + /* + * approve should be called when allowed[_spender] == 0. To increment + * allowed value is better to use this function to avoid 2 calls (and wait until + * the first transaction is mined) + * From MonolithDAO Token.sol + */ + function increaseApproval (address _spender, uint _addedValue) + returns (bool success) { + allowed[msg.sender][_spender] = allowed[msg.sender][_spender].add(_addedValue); + Approval(msg.sender, _spender, allowed[msg.sender][_spender]); + return true; + } + + function decreaseApproval (address _spender, uint _subtractedValue) + returns (bool success) { + uint oldValue = allowed[msg.sender][_spender]; + if (_subtractedValue > oldValue) { + allowed[msg.sender][_spender] = 0; + } else { + allowed[msg.sender][_spender] = oldValue.sub(_subtractedValue); + } + Approval(msg.sender, _spender, allowed[msg.sender][_spender]); + return true; + } + +} diff --git a/contracts/src/v0.5/LinkTokenReceiver.sol b/contracts/src/v0.5/LinkTokenReceiver.sol new file mode 100644 index 00000000..484ea677 --- /dev/null +++ b/contracts/src/v0.5/LinkTokenReceiver.sol @@ -0,0 +1,70 @@ +pragma solidity ^0.5.0; + +contract LinkTokenReceiver { + + bytes4 constant private ORACLE_REQUEST_SELECTOR = 0x40429946; + uint256 constant private SELECTOR_LENGTH = 4; + uint256 constant private EXPECTED_REQUEST_WORDS = 2; + uint256 constant private MINIMUM_REQUEST_LENGTH = SELECTOR_LENGTH + (32 * EXPECTED_REQUEST_WORDS); + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @dev The data payload's first 2 words will be overwritten by the `_sender` and `_amount` + * values to ensure correctness. Calls oracleRequest. + * @param _sender Address of the sender + * @param _amount Amount of PLI sent (specified in wei) + * @param _data Payload of the transaction + */ + function onTokenTransfer( + address _sender, + uint256 _amount, + bytes memory _data + ) + public + onlyPLI + validRequestLength(_data) + permittedFunctionsForPLI(_data) + { + assembly { + // solhint-disable-next-line avoid-low-level-calls + mstore(add(_data, 36), _sender) // ensure correct sender is passed + // solhint-disable-next-line avoid-low-level-calls + mstore(add(_data, 68), _amount) // ensure correct amount is passed + } + // solhint-disable-next-line avoid-low-level-calls + (bool success, ) = address(this).delegatecall(_data); // calls oracleRequest + require(success, "Unable to create request"); + } + + function getPluginToken() public view returns (address); + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == getPluginToken(), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `oracleRequest` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) + } + require(funcSelector == ORACLE_REQUEST_SELECTOR, "Must use whitelisted functions"); + _; + } + + /** + * @dev Reverts if the given payload is less than needed to create a request + * @param _data The request payload + */ + modifier validRequestLength(bytes memory _data) { + require(_data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); + _; + } +} \ No newline at end of file diff --git a/contracts/src/v0.5/Median.sol b/contracts/src/v0.5/Median.sol new file mode 100644 index 00000000..0f2b1e86 --- /dev/null +++ b/contracts/src/v0.5/Median.sol @@ -0,0 +1,108 @@ +pragma solidity ^0.5.0; + +import "./vendor/SafeMathPlugin.sol"; +import "./vendor/SignedSafeMath.sol"; + +library Median { + using SafeMathPlugin for uint256; + using SignedSafeMath for int256; + + /** + * @dev Returns the sorted middle, or the average of the two middle indexed + * items if the array has an even number of elements + * @param _list The list of elements to compare + */ + function calculate(int256[] memory _list) + internal + pure + returns (int256) + { + uint256 answerLength = _list.length; + uint256 middleIndex = answerLength.div(2); + if (answerLength % 2 == 0) { + int256 median1 = quickselect(copy(_list), middleIndex); + int256 median2 = quickselect(_list, middleIndex.add(1)); // quickselect is 1 indexed + int256 remainder = (median1 % 2 + median2 % 2) / 2; + return (median1 / 2).add(median2 / 2).add(remainder); // signed integers are not supported by SafeMath + } else { + return quickselect(_list, middleIndex.add(1)); // quickselect is 1 indexed + } + } + + /** + * @dev Returns the kth value of the ordered array + * See: http://www.cs.yale.edu/homes/aspnes/pinewiki/QuickSelect.html + * @param _a The list of elements to pull from + * @param _k The index, 1 based, of the elements you want to pull from when ordered + */ + function quickselect(int256[] memory _a, uint256 _k) + private + pure + returns (int256) + { + int256[] memory a = _a; + uint256 k = _k; + uint256 aLen = a.length; + int256[] memory a1 = new int256[](aLen); + int256[] memory a2 = new int256[](aLen); + uint256 a1Len; + uint256 a2Len; + int256 pivot; + uint256 i; + + while (true) { + pivot = a[aLen.div(2)]; + a1Len = 0; + a2Len = 0; + for (i = 0; i < aLen; i++) { + if (a[i] < pivot) { + a1[a1Len] = a[i]; + a1Len++; + } else if (a[i] > pivot) { + a2[a2Len] = a[i]; + a2Len++; + } + } + if (k <= a1Len) { + aLen = a1Len; + (a, a1) = swap(a, a1); + } else if (k > (aLen.sub(a2Len))) { + k = k.sub(aLen.sub(a2Len)); + aLen = a2Len; + (a, a2) = swap(a, a2); + } else { + return pivot; + } + } + } + + /** + * @dev Swaps the pointers to two uint256 arrays in memory + * @param _a The pointer to the first in memory array + * @param _b The pointer to the second in memory array + */ + function swap(int256[] memory _a, int256[] memory _b) + private + pure + returns(int256[] memory, int256[] memory) + { + return (_b, _a); + } + + /** + * @dev Makes an in memory copy of the array passed in + * @param _list The pointer to the array to be copied + */ + function copy(int256[] memory _list) + private + pure + returns(int256[] memory) + { + int256[] memory list2 = new int256[](_list.length); + for (uint256 i = 0; i < _list.length; i++) { + list2[i] = _list[i]; + } + return list2; + } + +} diff --git a/contracts/src/v0.5/Migrations.sol b/contracts/src/v0.5/Migrations.sol new file mode 100644 index 00000000..d62541cd --- /dev/null +++ b/contracts/src/v0.5/Migrations.sol @@ -0,0 +1,23 @@ +pragma solidity ^0.5.0; // solhint-disable-line compiler-fixed + +contract Migrations { + address public owner; + uint public last_completed_migration; + + modifier restricted() { + if (msg.sender == owner) _; + } + + constructor() public { + owner = msg.sender; + } + + function setCompleted(uint completed) public restricted { + last_completed_migration = completed; + } + + function upgrade(address new_address) public restricted { + Migrations upgraded = Migrations(new_address); + upgraded.setCompleted(last_completed_migration); + } +} diff --git a/contracts/src/v0.5/Oracle.sol b/contracts/src/v0.5/Oracle.sol new file mode 100644 index 00000000..19578aa5 --- /dev/null +++ b/contracts/src/v0.5/Oracle.sol @@ -0,0 +1,273 @@ +pragma solidity ^0.5.0; + +import "./LinkTokenReceiver.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/OracleInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/WithdrawalInterface.sol"; +import "./vendor/Ownable.sol"; +import "./vendor/SafeMathPlugin.sol"; + +/** + * @title The Plugin Oracle contract + * @notice Node operators can deploy this contract to fulfill requests sent to them + */ +contract Oracle is PluginRequestInterface, OracleInterface, Ownable, LinkTokenReceiver, WithdrawalInterface { + using SafeMathPlugin for uint256; + + uint256 constant public EXPIRY_TIME = 5 minutes; + uint256 constant private MINIMUM_CONSUMER_GAS_LIMIT = 400000; + // We initialize fields to 1 instead of 0 so that the first invocation + // does not cost more gas. + uint256 constant private ONE_FOR_CONSISTENT_GAS_COST = 1; + + LinkTokenInterface internal LinkToken; + mapping(bytes32 => bytes32) private commitments; + mapping(address => bool) private authorizedNodes; + uint256 private withdrawableTokens = ONE_FOR_CONSISTENT_GAS_COST; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest( + bytes32 indexed requestId + ); + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param _link The address of the PLI token + */ + constructor(address _link) public Ownable() { + LinkToken = LinkTokenInterface(_link); // external but already deployed and unalterable + } + + /** + * @notice Creates the Plugin request + * @dev Stores the hash of the params as the on-chain commitment for the request. + * Emits OracleRequest event for the Plugin node to detect. + * @param _sender The sender of the request + * @param _payment The amount of payment given (specified in wei) + * @param _specId The Job Specification ID + * @param _callbackAddress The callback address for the response + * @param _callbackFunctionId The callback function ID for the response + * @param _nonce The nonce sent by the requester + * @param _dataVersion The specified data version + * @param _data The CBOR payload of the request + */ + function oracleRequest( + address _sender, + uint256 _payment, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _nonce, + uint256 _dataVersion, + bytes calldata _data + ) + external + onlyPLI + checkCallbackAddress(_callbackAddress) + { + bytes32 requestId = keccak256(abi.encodePacked(_sender, _nonce)); + require(commitments[requestId] == 0, "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + uint256 expiration = now.add(EXPIRY_TIME); + + commitments[requestId] = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + expiration + ) + ); + + emit OracleRequest( + _specId, + _sender, + requestId, + _payment, + _callbackAddress, + _callbackFunctionId, + expiration, + _dataVersion, + _data); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param _requestId The fulfillment request ID that must match the requester's + * @param _payment The payment amount that will be released for the oracle (specified in wei) + * @param _callbackAddress The callback address to call for fulfillment + * @param _callbackFunctionId The callback function ID to use for fulfillment + * @param _expiration The expiration that the node should respond by before the requester can cancel + * @param _data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 _requestId, + uint256 _payment, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _expiration, + bytes32 _data + ) + external + onlyAuthorizedNode + isValidRequest(_requestId) + returns (bool) + { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + _expiration + ) + ); + require(commitments[_requestId] == paramsHash, "Params do not match request ID"); + withdrawableTokens = withdrawableTokens.add(_payment); + delete commitments[_requestId]; + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = _callbackAddress.call(abi.encodeWithSelector(_callbackFunctionId, _requestId, _data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + /** + * @notice Use this to check if a node is authorized for fulfilling requests + * @param _node The address of the Plugin node + * @return The authorization status of the node + */ + function getAuthorizationStatus(address _node) external view returns (bool) { + return authorizedNodes[_node]; + } + + /** + * @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + * @param _node The address of the Plugin node + * @param _allowed Bool value to determine if the node can fulfill requests + */ + function setFulfillmentPermission(address _node, bool _allowed) external onlyOwner { + authorizedNodes[_node] = _allowed; + } + + /** + * @notice Allows the node operator to withdraw earned PLI to a given address + * @dev The owner of the contract can be another wallet and does not have to be a Plugin node + * @param _recipient The address to send the PLI token to + * @param _amount The amount to send (specified in wei) + */ + function withdraw(address _recipient, uint256 _amount) + external + onlyOwner + hasAvailableFunds(_amount) + { + withdrawableTokens = withdrawableTokens.sub(_amount); + assert(LinkToken.transfer(_recipient, _amount)); + } + + /** + * @notice Displays the amount of PLI that is available for the node operator to withdraw + * @dev We use `ONE_FOR_CONSISTENT_GAS_COST` in place of 0 in storage + * @return The amount of withdrawable PLI on the contract + */ + function withdrawable() external view onlyOwner returns (uint256) { + return withdrawableTokens.sub(ONE_FOR_CONSISTENT_GAS_COST); + } + + /** + * @notice Allows requesters to cancel requests sent to this oracle contract. Will transfer the PLI + * sent for the request back to the requester's address. + * @dev Given params must hash to a commitment stored on the contract in order for the request to be valid + * Emits CancelOracleRequest event. + * @param _requestId The request ID + * @param _payment The amount of payment given (specified in wei) + * @param _callbackFunc The requester's specified callback address + * @param _expiration The time of the expiration for the request + */ + function cancelOracleRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) external { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + msg.sender, + _callbackFunc, + _expiration) + ); + require(paramsHash == commitments[_requestId], "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(_expiration <= now, "Request is not expired"); + + delete commitments[_requestId]; + emit CancelOracleRequest(_requestId); + + assert(LinkToken.transfer(msg.sender, _payment)); + } + + /** + * @notice Returns the address of the PLI token + * @dev This is the public implementation for pluginTokenAddress, which is + * an internal method of the PluginClient contract + */ + function getPluginToken() public view returns (address) { + return address(LinkToken); + } + + // MODIFIERS + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens >= _amount.add(ONE_FOR_CONSISTENT_GAS_COST), "Amount requested is greater than withdrawable balance"); + _; + } + + /** + * @dev Reverts if request ID does not exist + * @param _requestId The given request ID to check in stored `commitments` + */ + modifier isValidRequest(bytes32 _requestId) { + require(commitments[_requestId] != 0, "Must have a valid requestId"); + _; + } + + /** + * @dev Reverts if `msg.sender` is not authorized to fulfill requests + */ + modifier onlyAuthorizedNode() { + require(authorizedNodes[msg.sender] || msg.sender == owner(), "Not an authorized node to fulfill requests"); + _; + } + + /** + * @dev Reverts if the callback address is the PLI token + * @param _to The callback address + */ + modifier checkCallbackAddress(address _to) { + require(_to != address(LinkToken), "Cannot callback to PLI"); + _; + } + +} diff --git a/contracts/src/v0.5/Plugin.sol b/contracts/src/v0.5/Plugin.sol new file mode 100644 index 00000000..053e5b44 --- /dev/null +++ b/contracts/src/v0.5/Plugin.sol @@ -0,0 +1,126 @@ +pragma solidity ^0.5.0; + +import { CBOR as CBOR_Plugin } from "./vendor/CBOR.sol"; +import { Buffer as Buffer_Plugin } from "./vendor/Buffer.sol"; + +/** + * @title Library for common Plugin functions + * @dev Uses imported CBOR library for encoding to buffer + */ +library Plugin { + uint256 internal constant defaultBufferSize = 256; // solhint-disable-line const-name-snakecase + + using CBOR_Plugin for Buffer_Plugin.buffer; + + struct Request { + bytes32 id; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + /** + * @notice Initializes a Plugin request + * @dev Sets the ID, callback address, and callback function signature on the request + * @param self The uninitialized request + * @param _id The Job Specification ID + * @param _callbackAddress The callback address + * @param _callbackFunction The callback function signature + * @return The initialized request + */ + function initialize( + Request memory self, + bytes32 _id, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (Plugin.Request memory) { + Buffer_Plugin.init(self.buf, defaultBufferSize); + self.id = _id; + self.callbackAddress = _callbackAddress; + self.callbackFunctionId = _callbackFunction; + return self; + } + + /** + * @notice Sets the data for the buffer without encoding CBOR on-chain + * @dev CBOR can be closed with curly-brackets {} or they can be left off + * @param self The initialized request + * @param _data The CBOR data + */ + function setBuffer(Request memory self, bytes memory _data) + internal pure + { + Buffer_Plugin.init(self.buf, _data.length); + Buffer_Plugin.append(self.buf, _data); + } + + /** + * @notice Adds a string value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The string value to add + */ + function add(Request memory self, string memory _key, string memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeString(_value); + } + + /** + * @notice Adds a bytes value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The bytes value to add + */ + function addBytes(Request memory self, string memory _key, bytes memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeBytes(_value); + } + + /** + * @notice Adds a int256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The int256 value to add + */ + function addInt(Request memory self, string memory _key, int256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeInt(_value); + } + + /** + * @notice Adds a uint256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The uint256 value to add + */ + function addUint(Request memory self, string memory _key, uint256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeUInt(_value); + } + + /** + * @notice Adds an array of strings to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _values The array of string values to add + */ + function addStringArray(Request memory self, string memory _key, string[] memory _values) + internal pure + { + self.buf.encodeString(_key); + self.buf.startArray(); + for (uint256 i = 0; i < _values.length; i++) { + self.buf.encodeString(_values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.5/PluginClient.sol b/contracts/src/v0.5/PluginClient.sol new file mode 100644 index 00000000..cf3fd18d --- /dev/null +++ b/contracts/src/v0.5/PluginClient.sol @@ -0,0 +1,261 @@ +pragma solidity ^0.5.0; + +import "./Plugin.sol"; +import "./interfaces/ENSInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/PointerInterface.sol"; +import { ENSResolver as ENSResolver_Plugin } from "./vendor/ENSResolver.sol"; + +/** + * @title The PluginClient contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network + */ +contract PluginClient { + using Plugin for Plugin.Request; + + uint256 constant internal PLI = 10**18; + uint256 constant private AMOUNT_OVERRIDE = 0; + address constant private SENDER_OVERRIDE = address(0); + uint256 constant private ARGS_VERSION = 1; + bytes32 constant private ENS_TOKEN_SUBNAME = keccak256("link"); + bytes32 constant private ENS_ORACLE_SUBNAME = keccak256("oracle"); + address constant private PLI_TOKEN_POINTER = 0xC89bD4E1632D3A43CB03AAAd5262cbe4038Bc571; + + ENSInterface private ens; + bytes32 private ensNode; + LinkTokenInterface private link; + PluginRequestInterface private oracle; + uint256 private requestCount = 1; + mapping(bytes32 => address) private pendingRequests; + + event PluginRequested(bytes32 indexed id); + event PluginFulfilled(bytes32 indexed id); + event PluginCancelled(bytes32 indexed id); + + /** + * @notice Creates a request that can hold additional parameters + * @param _specId The Job Specification ID that the request will be created for + * @param _callbackAddress The callback address that the response will be sent to + * @param _callbackFunctionSignature The callback function signature to use for the callback address + * @return A Plugin Request struct in memory + */ + function buildPluginRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + Plugin.Request memory req; + return req.initialize(_specId, _callbackAddress, _callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `pluginRequestTo` with the stored oracle address + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function sendPluginRequest(Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32) + { + return sendPluginRequestTo(address(oracle), _req, _payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param _oracle The address of the oracle for the request + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return The request ID + */ + function sendPluginRequestTo(address _oracle, Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, requestCount)); + _req.nonce = requestCount; + pendingRequests[requestId] = _oracle; + emit PluginRequested(requestId); + require(link.transferAndCall(_oracle, _payment, encodeRequest(_req)), "unable to transferAndCall to oracle"); + requestCount += 1; + + return requestId; + } + + /** + * @notice Allows a request to be cancelled if it has not been fulfilled + * @dev Requires keeping track of the expiration value emitted from the oracle contract. + * Deletes the request from the `pendingRequests` mapping. + * Emits PluginCancelled event. + * @param _requestId The request ID + * @param _payment The amount of PLI sent for the request + * @param _callbackFunc The callback function specified for the request + * @param _expiration The time of the expiration for the request + */ + function cancelPluginRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) + internal + { + PluginRequestInterface requested = PluginRequestInterface(pendingRequests[_requestId]); + delete pendingRequests[_requestId]; + emit PluginCancelled(_requestId); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunc, _expiration); + } + + /** + * @notice Sets the stored oracle address + * @param _oracle The address of the oracle contract + */ + function setPluginOracle(address _oracle) internal { + oracle = PluginRequestInterface(_oracle); + } + + /** + * @notice Sets the PLI token address + * @param _link The address of the PLI token contract + */ + function setPluginToken(address _link) internal { + link = LinkTokenInterface(_link); + } + + /** + * @notice Sets the Plugin token address for the public + * network as given by the Pointer contract + */ + function setPublicPluginToken() internal { + setPluginToken(PointerInterface(PLI_TOKEN_POINTER).getAddress()); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function pluginTokenAddress() + internal + view + returns (address) + { + return address(link); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function pluginOracleAddress() + internal + view + returns (address) + { + return address(oracle); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param _oracle The address of the oracle contract that will fulfill the request + * @param _requestId The request ID used for the response + */ + function addPluginExternalRequest(address _oracle, bytes32 _requestId) + internal + notPendingRequest(_requestId) + { + pendingRequests[_requestId] = _oracle; + } + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param _ens The address of the ENS contract + * @param _node The ENS node hash + */ + function usePluginWithENS(address _ens, bytes32 _node) + internal + { + ens = ENSInterface(_ens); + ensNode = _node; + bytes32 linkSubnode = keccak256(abi.encodePacked(ensNode, ENS_TOKEN_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(linkSubnode)); + setPluginToken(resolver.addr(linkSubnode)); + updatePluginOracleWithENS(); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `usePluginWithENS` has been called previously + */ + function updatePluginOracleWithENS() + internal + { + bytes32 oracleSubnode = keccak256(abi.encodePacked(ensNode, ENS_ORACLE_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(oracleSubnode)); + setPluginOracle(resolver.addr(oracleSubnode)); + } + + /** + * @notice Encodes the request to be sent to the oracle contract + * @dev The Plugin node expects values to be in order for the request to be picked up. Order of types + * will be validated in the oracle contract. + * @param _req The initialized Plugin Request + * @return The bytes payload for the `transferAndCall` method + */ + function encodeRequest(Plugin.Request memory _req) + private + view + returns (bytes memory) + { + return abi.encodeWithSelector( + oracle.oracleRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + ARGS_VERSION, + _req.buf.buf); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param _requestId The request ID for fulfillment + */ + function validatePluginCallback(bytes32 _requestId) + internal + recordPluginFulfillment(_requestId) + // solhint-disable-next-line no-empty-blocks + {} + + /** + * @dev Reverts if the sender is not the oracle of the request. + * Emits PluginFulfilled event. + * @param _requestId The request ID for fulfillment + */ + modifier recordPluginFulfillment(bytes32 _requestId) { + require(msg.sender == pendingRequests[_requestId], + "Source must be the oracle of the request"); + delete pendingRequests[_requestId]; + emit PluginFulfilled(_requestId); + _; + } + + /** + * @dev Reverts if the request is already pending + * @param _requestId The request ID for fulfillment + */ + modifier notPendingRequest(bytes32 _requestId) { + require(pendingRequests[_requestId] == address(0), "Request is already pending"); + _; + } +} diff --git a/contracts/src/v0.5/dev/Coordinator.sol b/contracts/src/v0.5/dev/Coordinator.sol new file mode 100644 index 00000000..7ae1f8ec --- /dev/null +++ b/contracts/src/v0.5/dev/Coordinator.sol @@ -0,0 +1,411 @@ +pragma solidity 0.5.0; + +import "./CoordinatorInterface.sol"; +import "../interfaces/PluginRequestInterface.sol"; +import "../interfaces/LinkTokenInterface.sol"; +import "../vendor/SafeMathPlugin.sol"; +import "./ServiceAgreementDecoder.sol"; +import "./OracleSignaturesDecoder.sol"; + + +/** + * @title The Plugin Coordinator handles oracle service agreements between one or more oracles + */ +contract Coordinator is PluginRequestInterface, CoordinatorInterface, ServiceAgreementDecoder, OracleSignaturesDecoder { + using SafeMathPlugin for uint256; + + uint256 constant public EXPIRY_TIME = 5 minutes; + LinkTokenInterface internal PLI; + + struct Callback { + bytes32 sAId; + uint256 amount; + address addr; + bytes4 functionId; + uint64 cancelExpiration; + uint8 responseCount; + mapping(address => uint256) responses; + } + + mapping(bytes32 => Callback) private callbacks; + mapping(bytes32 => mapping(address => bool)) private allowedOracles; + mapping(bytes32 => ServiceAgreement) public serviceAgreements; + mapping(address => uint256) public withdrawableTokens; + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param _link The address of the PLI token + */ + constructor(address _link) public { + PLI = LinkTokenInterface(_link); + } + + event OracleRequest( + bytes32 indexed sAId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event NewServiceAgreement( + bytes32 indexed said, + bytes32 indexed requestDigest + ); + + event CancelOracleRequest( + bytes32 internalId + ); + + /** + * @notice Creates the Plugin request + * @dev Stores the params on-chain in a callback for the request. + * Emits OracleRequest event for Plugin nodes to detect. + * @param _sender The sender of the request + * @param _amount The amount of payment given (specified in wei) + * @param _sAId The Service Agreement ID + * @param _callbackAddress The callback address for the response + * @param _callbackFunctionId The callback function ID for the response + * @param _nonce The nonce sent by the requester + * @param _dataVersion The specified data version + * @param _data The CBOR payload of the request + */ + function oracleRequest( + address _sender, + uint256 _amount, + bytes32 _sAId, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _nonce, + uint256 _dataVersion, + bytes calldata _data + ) + external + onlyPLI + sufficientPLI(_amount, _sAId) + checkCallbackAddress(_callbackAddress) + // checkServiceAgreementPresence(_sAId) // TODO: exhausts the stack + { + bytes32 requestId = keccak256(abi.encodePacked(_sender, _nonce)); + require(callbacks[requestId].cancelExpiration == 0, "Must use a unique ID"); + callbacks[requestId].sAId = _sAId; + callbacks[requestId].amount = _amount; + callbacks[requestId].addr = _callbackAddress; + callbacks[requestId].functionId = _callbackFunctionId; + // solhint-disable-next-line not-rely-on-time + callbacks[requestId].cancelExpiration = uint64(now.add(EXPIRY_TIME)); + + emit OracleRequest( + _sAId, + _sender, + requestId, + _amount, + _callbackAddress, + _callbackFunctionId, + now.add(EXPIRY_TIME), // solhint-disable-line not-rely-on-time + _dataVersion, + _data); + } + + /** + * @notice Stores a Service Agreement which has been signed by the given oracles + * @dev Validates that each oracle has a valid signature. + * Emits NewServiceAgreement event. + * @return The Service Agreement ID + */ + function initiateServiceAgreement( + bytes memory _serviceAgreementData, + bytes memory _oracleSignaturesData + ) + public + returns (bytes32 serviceAgreementID) + { + + ServiceAgreement memory _agreement = decodeServiceAgreement(_serviceAgreementData); + OracleSignatures memory _signatures = decodeOracleSignatures(_oracleSignaturesData); + + require( + _agreement.oracles.length == _signatures.vs.length && + _signatures.vs.length == _signatures.rs.length && + _signatures.rs.length == _signatures.ss.length, + "Must pass in as many signatures as oracles" + ); + // solhint-disable-next-line not-rely-on-time + require(_agreement.endAt > block.timestamp, + "ServiceAgreement must end in the future"); + require(serviceAgreements[serviceAgreementID].endAt == 0, + "serviceAgreement already initiated"); + serviceAgreementID = getId(_agreement); + + registerOracleSignatures( + serviceAgreementID, + _agreement.oracles, + _signatures + ); + + serviceAgreements[serviceAgreementID] = _agreement; + emit NewServiceAgreement(serviceAgreementID, _agreement.requestDigest); + // solhint-disable-next-line avoid-low-level-calls + (bool ok, bytes memory response) = _agreement.aggregator.call( + abi.encodeWithSelector( + _agreement.aggInitiateJobSelector, + serviceAgreementID, + _serviceAgreementData + ) + ); + require(ok, "Aggregator failed to initiate Service Agreement"); + require(response.length > 0, "probably wrong address/selector"); + (bool success, bytes memory message) = abi.decode(response, (bool, bytes)); + if ((!success) && message.length == 0) { + // Revert with a non-empty message to give user a hint where to look + require(success, "initiation failed; empty message"); + } + require(success, string(message)); + } + + /** + * @dev Validates that each signer address matches for the given oracles + * @param _serviceAgreementID Service agreement ID + * @param _oracles Array of oracle addresses which agreed to the service agreement + * @param _signatures contains the collected parts(v, r, and s) of each oracle's signature. + */ + function registerOracleSignatures( + bytes32 _serviceAgreementID, + address[] memory _oracles, + OracleSignatures memory _signatures + ) + private + { + for (uint i = 0; i < _oracles.length; i++) { + address signer = getOracleAddressFromSASignature( + _serviceAgreementID, + _signatures.vs[i], + _signatures.rs[i], + _signatures.ss[i] + ); + require(_oracles[i] == signer, "Invalid oracle signature specified in SA"); + allowedOracles[_serviceAgreementID][_oracles[i]] = true; + } + + } + + /** + * @dev Recovers the address of the signer for a service agreement + * @param _serviceAgreementID Service agreement ID + * @param _v Recovery ID of the oracle signature + * @param _r First 32 bytes of the oracle signature + * @param _s Second 32 bytes of the oracle signature + * @return The address of the signer + */ + function getOracleAddressFromSASignature( + bytes32 _serviceAgreementID, + uint8 _v, + bytes32 _r, + bytes32 _s + ) + private pure returns (address) + { + bytes32 prefixedHash = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", _serviceAgreementID)); + return ecrecover(prefixedHash, _v, _r, _s); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Response must have a valid callback, and will delete the associated callback storage + * before calling the external contract. + * @param _requestId The fulfillment request ID that must match the requester's + * @param _data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 _requestId, + bytes32 _data + ) external isValidRequest(_requestId) returns (bool) { + Callback memory callback = callbacks[_requestId]; + ServiceAgreement memory sA = serviceAgreements[callback.sAId]; + // solhint-disable-next-line avoid-low-level-calls + (bool ok, bytes memory aggResponse) = sA.aggregator.call( + abi.encodeWithSelector( + sA.aggFulfillSelector, _requestId, callback.sAId, msg.sender, _data)); + require(ok, "aggregator.fulfill failed"); + require(aggResponse.length > 0, "probably wrong address/selector"); + (bool aggSuccess, bool aggComplete, bytes memory response, int256[] memory paymentAmounts) = abi.decode( // solhint-disable-line + aggResponse, (bool, bool, bytes, int256[])); + require(aggSuccess, string(response)); + if (aggComplete) { + require(paymentAmounts.length == sA.oracles.length, "wrong paymentAmounts.length"); + for (uint256 oIdx = 0; oIdx < sA.oracles.length; oIdx++) { // pay oracles + withdrawableTokens[sA.oracles[oIdx]] = uint256(int256( + withdrawableTokens[sA.oracles[oIdx]]) + paymentAmounts[oIdx]); + } // solhint-disable-next-line avoid-low-level-calls + (bool success,) = callback.addr.call(abi.encodeWithSelector( // report final result + callback.functionId, _requestId, abi.decode(response, (bytes32)))); + return success; + } + return true; + } + + /** + * @dev Allows the oracle operator to withdraw their PLI + * @param _recipient is the address the funds will be sent to + * @param _amount is the amount of PLI transferred from the Coordinator contract + */ + function withdraw(address _recipient, uint256 _amount) + external + hasAvailableFunds(_amount) + { + withdrawableTokens[msg.sender] = withdrawableTokens[msg.sender].sub(_amount); + assert(PLI.transfer(_recipient, _amount)); + } + + /** + * @dev Necessary to implement PluginRequestInterface + */ + function cancelOracleRequest(bytes32, uint256, bytes4, uint256) + external + {} // solhint-disable-line no-empty-blocks + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @dev The data payload's first 2 words will be overwritten by the `_sender` and `_amount` + * values to ensure correctness. Calls oracleRequest. + * @param _sender Address of the sender + * @param _amount Amount of PLI sent (specified in wei) + * @param _data Payload of the transaction + */ + function onTokenTransfer( + address _sender, + uint256 _amount, + bytes memory _data + ) + public + onlyPLI + permittedFunctionsForPLI + { + assembly { // solhint-disable-line no-inline-assembly + mstore(add(_data, 36), _sender) // ensure correct sender is passed + mstore(add(_data, 68), _amount) // ensure correct amount is passed + } + // solhint-disable-next-line avoid-low-level-calls + (bool success,) = address(this).delegatecall(_data); // calls oracleRequest or depositFunds + require(success, "Unable to create request"); + } + + /** + * @notice Retrieve the Service Agreement ID for the given parameters + * @param _agreementData contains all of the terms of the service agreement that can be verified on-chain. + * @return The Service Agreement ID, a keccak256 hash of the input params + */ + function getId(bytes memory _agreementData) public pure returns (bytes32) + { + ServiceAgreement memory _agreement = decodeServiceAgreement(_agreementData); + return getId(_agreement); + } + + function getId(ServiceAgreement memory _agreement) internal pure returns (bytes32) + { + return keccak256( + abi.encodePacked( + _agreement.payment, + _agreement.expiration, + _agreement.endAt, + _agreement.oracles, + _agreement.requestDigest, + _agreement.aggregator, + _agreement.aggInitiateJobSelector, + _agreement.aggFulfillSelector + )); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param _sender Address of the sender + * @param _amount Amount of PLI sent (specified in wei) + */ + function depositFunds(address _sender, uint256 _amount) external onlyPLI + { + withdrawableTokens[_sender] = withdrawableTokens[_sender].add(_amount); + } + + /** + * @param _account Address to check balance of + * @return Balance of account (specified in wei) + */ + function balanceOf(address _account) public view returns (uint256) + { + return withdrawableTokens[_account]; + } + + /** + * @dev Reverts if the callback address is the PLI token + * @param _to The callback address + */ + modifier checkCallbackAddress(address _to) { + require(_to != address(PLI), "Cannot callback to PLI"); + _; + } + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens[msg.sender] >= _amount, "Amount requested is greater than withdrawable balance"); + _; + } + + /** + * @dev Reverts if request ID does not exist + * @param _requestId The given request ID to check in stored `callbacks` + */ + modifier isValidRequest(bytes32 _requestId) { + require(callbacks[_requestId].addr != address(0), "Must have a valid requestId"); + require(allowedOracles[callbacks[_requestId].sAId][msg.sender], "Oracle not recognized on service agreement"); + _; + } + + /** + * @dev Reverts if amount is not at least what was agreed upon in the service agreement + * @param _amount The payment for the request + * @param _sAId The service agreement ID which the request is for + */ + modifier sufficientPLI(uint256 _amount, bytes32 _sAId) { + require(_amount >= serviceAgreements[_sAId].payment, "Below agreed payment"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `oracleRequest` or + * `depositFunds` function selector + */ + modifier permittedFunctionsForPLI() { + bytes4[1] memory funcSelector; + assembly { // solhint-disable-line no-inline-assembly + calldatacopy(funcSelector, 132, 4) // grab function selector from calldata + } + require( + funcSelector[0] == this.oracleRequest.selector || funcSelector[0] == this.depositFunds.selector, + "Must use whitelisted functions" + ); + _; + } + + modifier checkServiceAgreementPresence(bytes32 _sAId) { + require(uint256(serviceAgreements[_sAId].requestDigest) != 0, + "Must reference an existing ServiceAgreement"); + _; + } + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } +} diff --git a/contracts/src/v0.5/dev/CoordinatorInterface.sol b/contracts/src/v0.5/dev/CoordinatorInterface.sol new file mode 100644 index 00000000..1678a95c --- /dev/null +++ b/contracts/src/v0.5/dev/CoordinatorInterface.sol @@ -0,0 +1,14 @@ +pragma solidity 0.5.0; + +contract CoordinatorInterface { + + function initiateServiceAgreement( + bytes memory _serviceAgreementData, + bytes memory _oracleSignaturesData) + public returns (bytes32); + + function fulfillOracleRequest( + bytes32 _requestId, + bytes32 _aggregatorArgs) + external returns (bool); +} diff --git a/contracts/src/v0.5/dev/OracleSignaturesDecoder.sol b/contracts/src/v0.5/dev/OracleSignaturesDecoder.sol new file mode 100644 index 00000000..1c2776b6 --- /dev/null +++ b/contracts/src/v0.5/dev/OracleSignaturesDecoder.sol @@ -0,0 +1,24 @@ +pragma solidity 0.5.0; + +contract OracleSignaturesDecoder { + + struct OracleSignatures { + uint8[] vs; + bytes32[] rs; + bytes32[] ss; + } + + function decodeOracleSignatures( + bytes memory _oracleSignaturesData + ) + internal + pure + returns(OracleSignatures memory) + { + // solhint-disable indent + OracleSignatures memory signatures; + ( signatures.vs, signatures.rs, signatures.ss) = + abi.decode(_oracleSignaturesData, ( uint8[], bytes32[], bytes32[] )); + return signatures; + } +} diff --git a/contracts/src/v0.5/dev/SchnorrSECP256K1.sol b/contracts/src/v0.5/dev/SchnorrSECP256K1.sol new file mode 100644 index 00000000..192c8840 --- /dev/null +++ b/contracts/src/v0.5/dev/SchnorrSECP256K1.sol @@ -0,0 +1,147 @@ +pragma solidity ^0.5.0; + +//////////////////////////////////////////////////////////////////////////////// +// XXX: Do not use in production until this code has been audited. +//////////////////////////////////////////////////////////////////////////////// + +contract SchnorrSECP256K1 { + // See https://en.bitcoin.it/wiki/Secp256k1 for this constant. + uint256 constant public Q = // Group order of secp256k1 + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + // solium-disable-next-line zeppelin/no-arithmetic-operations + uint256 constant public HALF_Q = (Q >> 1) + 1; + + /** ************************************************************************** + @notice verifySignature returns true iff passed a valid Schnorr signature. + + @dev See https://en.wikipedia.org/wiki/Schnorr_signature for reference. + + @dev In what follows, let d be your secret key, PK be your public key, + PKx be the x ordinate of your public key, and PKyp be the parity bit for + the y ordinate (i.e., 0 if PKy is even, 1 if odd.) + ************************************************************************** + @dev TO CREATE A VALID SIGNATURE FOR THIS METHOD + + @dev First PKx must be less than HALF_Q. Then follow these instructions + (see evm/test/schnorr_test.js, for an example of carrying them out): + @dev 1. Hash the target message to a uint256, called msgHash here, using + keccak256 + + @dev 2. Pick k uniformly and cryptographically securely randomly from + {0,...,Q-1}. It is critical that k remains confidential, as your + private key can be reconstructed from k and the signature. + + @dev 3. Compute k*g in the secp256k1 group, where g is the group + generator. (This is the same as computing the public key from the + secret key k. But it's OK if k*g's x ordinate is greater than + HALF_Q.) + + @dev 4. Compute the ethereum address for k*g. This is the lower 160 bits + of the keccak hash of the concatenated affine coordinates of k*g, + as 32-byte big-endians. (For instance, you could pass k to + ethereumjs-utils's privateToAddress to compute this, though that + should be strictly a development convenience, not for handling + live secrets, unless you've locked your javascript environment + down very carefully.) Call this address + nonceTimesGeneratorAddress. + + @dev 5. Compute e=uint256(keccak256(PKx as a 32-byte big-endian + ‖ PKyp as a single byte + ‖ msgHash + ‖ nonceTimesGeneratorAddress)) + This value e is called "msgChallenge" in verifySignature's source + code below. Here "‖" means concatenation of the listed byte + arrays. + + @dev 6. Let x be your secret key. Compute s = (k - d * e) % Q. Add Q to + it, if it's negative. This is your signature. (d is your secret + key.) + ************************************************************************** + @dev TO VERIFY A SIGNATURE + + @dev Given a signature (s, e) of msgHash, constructed as above, compute + S=e*PK+s*generator in the secp256k1 group law, and then the ethereum + address of S, as described in step 4. Call that + nonceTimesGeneratorAddress. Then call the verifySignature method as: + + @dev verifySignature(PKx, PKyp, s, msgHash, + nonceTimesGeneratorAddress) + ************************************************************************** + @dev This signging scheme deviates slightly from the classical Schnorr + signature, in that the address of k*g is used in place of k*g itself, + both when calculating e and when verifying sum S as described in the + verification paragraph above. This reduces the difficulty of + brute-forcing a signature by trying random secp256k1 points in place of + k*g in the signature verification process from 256 bits to 160 bits. + However, the difficulty of cracking the public key using "baby-step, + giant-step" is only 128 bits, so this weakening constitutes no compromise + in the security of the signatures or the key. + + @dev The constraint signingPubKeyX < HALF_Q comes from Eq. (281), p. 24 + of Yellow Paper version 78d7b9a. ecrecover only accepts "s" inputs less + than HALF_Q, to protect against a signature- malleability vulnerability in + ECDSA. Schnorr does not have this vulnerability, but we must account for + ecrecover's defense anyway. And since we are abusing ecrecover by putting + signingPubKeyX in ecrecover's "s" argument the constraint applies to + signingPubKeyX, even though it represents a value in the base field, and + has no natural relationship to the order of the curve's cyclic group. + ************************************************************************** + @param signingPubKeyX is the x ordinate of the public key. This must be + less than HALF_Q. + @param pubKeyYParity is 0 if the y ordinate of the public key is even, 1 + if it's odd. + @param signature is the actual signature, described as s in the above + instructions. + @param msgHash is a 256-bit hash of the message being signed. + @param nonceTimesGeneratorAddress is the ethereum address of k*g in the + above instructions + ************************************************************************** + @return True if passed a valid signature, false otherwise. */ + function verifySignature( + uint256 signingPubKeyX, + uint8 pubKeyYParity, + uint256 signature, + uint256 msgHash, + address nonceTimesGeneratorAddress) external pure returns (bool) { + require(signingPubKeyX < HALF_Q, "Public-key x >= HALF_Q"); + // Avoid signature malleability from multiple representations for ℤ/Qℤ elts + require(signature < Q, "signature must be reduced modulo Q"); + + // Forbid trivial inputs, to avoid ecrecover edge cases. The main thing to + // avoid is something which causes ecrecover to return 0x0: then trivial + // signatures could be constructed with the nonceTimesGeneratorAddress input + // set to 0x0. + // + // solium-disable-next-line indentation + require(nonceTimesGeneratorAddress != address(0) && signingPubKeyX > 0 && + signature > 0 && msgHash > 0, "no zero inputs allowed"); + + // solium-disable-next-line indentation + uint256 msgChallenge = // "e" + // solium-disable-next-line indentation + uint256(keccak256(abi.encodePacked(signingPubKeyX, pubKeyYParity, + msgHash, nonceTimesGeneratorAddress)) + ); + + // Verify msgChallenge * signingPubKey + signature * generator == + // nonce * generator + // + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // The point corresponding to the address returned by + // ecrecover(-s*r,v,r,e*r) is (r⁻¹ mod Q)*(e*r*R-(-s)*r*g)=e*R+s*g, where R + // is the (v,r) point. See https://crypto.stackexchange.com/a/18106 + // + // solium-disable-next-line indentation + address recoveredAddress = ecrecover( + // solium-disable-next-line zeppelin/no-arithmetic-operations + bytes32(Q - mulmod(signingPubKeyX, signature, Q)), + // https://ethereum.github.io/yellowpaper/paper.pdf p. 24, "The + // value 27 represents an even y value and 28 represents an odd + // y value." + (pubKeyYParity == 0) ? 27 : 28, + bytes32(signingPubKeyX), + bytes32(mulmod(msgChallenge, signingPubKeyX, Q))); + return nonceTimesGeneratorAddress == recoveredAddress; + } +} diff --git a/contracts/src/v0.5/dev/ServiceAgreementDecoder.sol b/contracts/src/v0.5/dev/ServiceAgreementDecoder.sol new file mode 100644 index 00000000..9267b77d --- /dev/null +++ b/contracts/src/v0.5/dev/ServiceAgreementDecoder.sol @@ -0,0 +1,59 @@ +pragma solidity 0.5.0; + +contract ServiceAgreementDecoder { + + struct ServiceAgreement { + uint256 payment; + uint256 expiration; + uint256 endAt; + address[] oracles; + // This effectively functions as an ID tag for the off-chain job of the + // service agreement. It is calculated as the keccak256 hash of the + // normalized JSON request to create the ServiceAgreement, but that identity + // is unused, and its value is essentially arbitrary. + bytes32 requestDigest; + // Specification of aggregator interface. See ../tests/MeanAggregator.sol + // for example + address aggregator; + // Selectors for the interface methods must be specified, because their + // arguments can vary from aggregator to aggregator. + // + // Function selector for aggregator initiateJob method + bytes4 aggInitiateJobSelector; + // Function selector for aggregator fulfill method + bytes4 aggFulfillSelector; + } + + function decodeServiceAgreement( + bytes memory _serviceAgreementData + ) + internal + pure + returns(ServiceAgreement memory) + { + // solhint-disable indent + ServiceAgreement memory agreement; + + ( agreement.payment, + agreement.expiration, + agreement.endAt, + agreement.oracles, + agreement.requestDigest, + agreement.aggregator, + agreement.aggInitiateJobSelector, + agreement.aggFulfillSelector) = + abi.decode( + _serviceAgreementData, + ( uint256, + uint256, + uint256, + address[], + bytes32, + address, + bytes4, + bytes4 ) + ); + + return agreement; + } +} diff --git a/contracts/src/v0.5/interfaces/AggregatorInterface.sol b/contracts/src/v0.5/interfaces/AggregatorInterface.sol new file mode 100644 index 00000000..d9bd107d --- /dev/null +++ b/contracts/src/v0.5/interfaces/AggregatorInterface.sol @@ -0,0 +1,12 @@ +pragma solidity >=0.5.0; + +interface AggregatorInterface { + function latestAnswer() external view returns (int256); + function latestTimestamp() external view returns (uint256); + function latestRound() external view returns (uint256); + function getAnswer(uint256 roundId) external view returns (int256); + function getTimestamp(uint256 roundId) external view returns (uint256); + + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 timestamp); + event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); +} diff --git a/contracts/src/v0.5/interfaces/AggregatorV2V3Interface.sol b/contracts/src/v0.5/interfaces/AggregatorV2V3Interface.sol new file mode 100644 index 00000000..0024711b --- /dev/null +++ b/contracts/src/v0.5/interfaces/AggregatorV2V3Interface.sol @@ -0,0 +1,56 @@ +pragma solidity >=0.5.0; + +import "./AggregatorInterface.sol"; +import "./AggregatorV3Interface.sol"; + +/** + * @title The V2 & V3 Aggregator Interface + * @notice Solidity V0.5 does not allow interfaces to inherit from other + * interfaces so this contract is a combination of v0.5 AggregatorInterface.sol + * and v0.5 AggregatorV3Interface.sol. + */ +interface AggregatorV2V3Interface { + // + // V2 Interface: + // + function latestAnswer() external view returns (int256); + function latestTimestamp() external view returns (uint256); + function latestRound() external view returns (uint256); + function getAnswer(uint256 roundId) external view returns (int256); + function getTimestamp(uint256 roundId) external view returns (uint256); + + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 timestamp); + event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); + + // + // V3 Interface: + // + function decimals() external view returns (uint8); + function description() external view returns (string memory); + function version() external view returns (uint256); + + // getRoundData and latestRoundData should both raise "No data present" + // if they do not have data to report, instead of returning unset values + // which could be misinterpreted as actual reported values. + function getRoundData(uint80 _roundId) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + function latestRoundData() + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + +} diff --git a/contracts/src/v0.5/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.5/interfaces/AggregatorV3Interface.sol new file mode 100644 index 00000000..af8f83de --- /dev/null +++ b/contracts/src/v0.5/interfaces/AggregatorV3Interface.sol @@ -0,0 +1,33 @@ +pragma solidity >=0.5.0; + +interface AggregatorV3Interface { + + function decimals() external view returns (uint8); + function description() external view returns (string memory); + function version() external view returns (uint256); + + // getRoundData and latestRoundData should both raise "No data present" + // if they do not have data to report, instead of returning unset values + // which could be misinterpreted as actual reported values. + function getRoundData(uint80 _roundId) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + function latestRoundData() + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + +} diff --git a/contracts/src/v0.5/interfaces/ENSInterface.sol b/contracts/src/v0.5/interfaces/ENSInterface.sol new file mode 100644 index 00000000..1aee391a --- /dev/null +++ b/contracts/src/v0.5/interfaces/ENSInterface.sol @@ -0,0 +1,26 @@ +pragma solidity ^0.5.0; + +interface ENSInterface { + + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + + function setSubnodeOwner(bytes32 node, bytes32 label, address _owner) external; + function setResolver(bytes32 node, address _resolver) external; + function setOwner(bytes32 node, address _owner) external; + function setTTL(bytes32 node, uint64 _ttl) external; + function owner(bytes32 node) external view returns (address); + function resolver(bytes32 node) external view returns (address); + function ttl(bytes32 node) external view returns (uint64); + +} diff --git a/contracts/src/v0.5/interfaces/FlagsInterface.sol b/contracts/src/v0.5/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..88203297 --- /dev/null +++ b/contracts/src/v0.5/interfaces/FlagsInterface.sol @@ -0,0 +1,10 @@ +pragma solidity >=0.5.0; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + function getFlags(address[] calldata) external view returns (bool[] memory); + function raiseFlag(address) external; + function raiseFlags(address[] calldata) external; + function lowerFlags(address[] calldata) external; + function setRaisingAccessController(address) external; +} diff --git a/contracts/src/v0.5/interfaces/LinkTokenInterface.sol b/contracts/src/v0.5/interfaces/LinkTokenInterface.sol new file mode 100644 index 00000000..6865956f --- /dev/null +++ b/contracts/src/v0.5/interfaces/LinkTokenInterface.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.5.0; + +interface LinkTokenInterface { + function allowance(address owner, address spender) external view returns (uint256 remaining); + function approve(address spender, uint256 value) external returns (bool success); + function balanceOf(address owner) external view returns (uint256 balance); + function decimals() external view returns (uint8 decimalPlaces); + function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); + function increaseApproval(address spender, uint256 subtractedValue) external; + function name() external view returns (string memory tokenName); + function symbol() external view returns (string memory tokenSymbol); + function totalSupply() external view returns (uint256 totalTokensIssued); + function transfer(address to, uint256 value) external returns (bool success); + function transferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); + function transferFrom(address from, address to, uint256 value) external returns (bool success); +} diff --git a/contracts/src/v0.5/interfaces/OracleInterface.sol b/contracts/src/v0.5/interfaces/OracleInterface.sol new file mode 100644 index 00000000..b6847e5a --- /dev/null +++ b/contracts/src/v0.5/interfaces/OracleInterface.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.5.0; + +interface OracleInterface { + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) external returns (bool); + function getAuthorizationStatus(address node) external view returns (bool); + function setFulfillmentPermission(address node, bool allowed) external; + function withdraw(address recipient, uint256 amount) external; + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.5/interfaces/PluginRequestInterface.sol b/contracts/src/v0.5/interfaces/PluginRequestInterface.sol new file mode 100644 index 00000000..8b70d8de --- /dev/null +++ b/contracts/src/v0.5/interfaces/PluginRequestInterface.sol @@ -0,0 +1,21 @@ +pragma solidity ^0.5.0; + +interface PluginRequestInterface { + function oracleRequest( + address sender, + uint256 requestPrice, + bytes32 serviceAgreementID, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunctionId, + uint256 expiration + ) external; +} diff --git a/contracts/src/v0.5/interfaces/PointerInterface.sol b/contracts/src/v0.5/interfaces/PointerInterface.sol new file mode 100644 index 00000000..2f3013c6 --- /dev/null +++ b/contracts/src/v0.5/interfaces/PointerInterface.sol @@ -0,0 +1,5 @@ +pragma solidity ^0.5.0; + +interface PointerInterface { + function getAddress() external view returns (address); +} diff --git a/contracts/src/v0.5/interfaces/WithdrawalInterface.sol b/contracts/src/v0.5/interfaces/WithdrawalInterface.sol new file mode 100644 index 00000000..a841ce66 --- /dev/null +++ b/contracts/src/v0.5/interfaces/WithdrawalInterface.sol @@ -0,0 +1,16 @@ +pragma solidity ^0.5.0; + +interface WithdrawalInterface { + /** + * @notice transfer PLI held by the contract belonging to msg.sender to + * another address + * @param recipient is the address to send the PLI to + * @param amount is the amount of PLI to send + */ + function withdraw(address recipient, uint256 amount) external; + + /** + * @notice query the available amount of PLI to withdraw by msg.sender + */ + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.5/tests/BasicConsumer.sol b/contracts/src/v0.5/tests/BasicConsumer.sol new file mode 100644 index 00000000..71b522b7 --- /dev/null +++ b/contracts/src/v0.5/tests/BasicConsumer.sol @@ -0,0 +1,13 @@ +pragma solidity ^0.5.0; + +import "./Consumer.sol"; + +contract BasicConsumer is Consumer { + + constructor(address _link, address _oracle, bytes32 _specId) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + +} diff --git a/contracts/src/v0.5/tests/Consumer.sol b/contracts/src/v0.5/tests/Consumer.sol new file mode 100644 index 00000000..0dfd5815 --- /dev/null +++ b/contracts/src/v0.5/tests/Consumer.sol @@ -0,0 +1,55 @@ +pragma solidity ^0.5.0; + +import "../PluginClient.sol"; + +contract Consumer is PluginClient { + bytes32 internal specId; + bytes32 public currentPrice; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes32 indexed price + ); + + function requestEthereumPrice(string memory _currency, uint256 _payment) public { + requestEthereumPriceByCallback(_currency, _payment, address(this)); + } + + function requestEthereumPriceByCallback(string memory _currency, uint256 _payment, address _callback) public { + Plugin.Request memory req = buildPluginRequest(specId, _callback, this.fulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + PluginRequestInterface requested = PluginRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } + + function addExternalRequest(address _oracle, bytes32 _requestId) external { + addPluginExternalRequest(_oracle, _requestId); + } + + function fulfill(bytes32 _requestId, bytes32 _price) + public + recordPluginFulfillment(_requestId) + { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } + +} diff --git a/contracts/src/v0.5/tests/EmptyAggregator.sol b/contracts/src/v0.5/tests/EmptyAggregator.sol new file mode 100644 index 00000000..02331f31 --- /dev/null +++ b/contracts/src/v0.5/tests/EmptyAggregator.sol @@ -0,0 +1,34 @@ +pragma solidity 0.5.0; + +import "../dev/CoordinatorInterface.sol"; + +/// Used to check the basic aggregator/coordinator interactions. It does nothing +/// but emit its messages as certain types of events. +contract EmptyAggregator { + + event InitiatedJob(bytes32 said); + + function initiateJob( + bytes32 _saId, bytes memory _serviceAgreementData) + public returns (bool success, bytes memory _) { + emit InitiatedJob(_saId); + success = true; + } + + event Fulfilled( + bytes32 requestId, + address oracle, + bool success, + bool complete, + bytes fulfillment); + + function fulfill(bytes32 _requestId, bytes32 _saId, address _oracle, + bytes32 _fulfillment) + public returns (bool success, bool complete, bytes memory response, + int256[] memory paymentAmounts) { + success = true; + complete = true; + response = abi.encode(_fulfillment); + emit Fulfilled(_requestId, _oracle, success, complete, response); + } +} diff --git a/contracts/src/v0.5/tests/GetterSetter.sol b/contracts/src/v0.5/tests/GetterSetter.sol new file mode 100644 index 00000000..c765f5a3 --- /dev/null +++ b/contracts/src/v0.5/tests/GetterSetter.sol @@ -0,0 +1,45 @@ +pragma solidity 0.5.0; + +// GetterSetter is a contract to aid debugging and testing during development. +contract GetterSetter { + bytes32 public getBytes32; + uint256 public getUint256; + bytes32 public requestId; + bytes public getBytes; + + event SetBytes32(address indexed from, bytes32 indexed value); + event SetUint256(address indexed from, uint256 indexed value); + event SetBytes(address indexed from, bytes value); + + event Output(bytes32 b32, uint256 u256, bytes32 b322); + + function setBytes32(bytes32 _value) public { + getBytes32 = _value; + emit SetBytes32(msg.sender, _value); + } + + function requestedBytes32(bytes32 _requestId, bytes32 _value) public { + requestId = _requestId; + setBytes32(_value); + } + + function setBytes(bytes memory _value) public { + getBytes = _value; + emit SetBytes(msg.sender, _value); + } + + function requestedBytes(bytes32 _requestId, bytes memory _value) public { + requestId = _requestId; + setBytes(_value); + } + + function setUint256(uint256 _value) public { + getUint256 = _value; + emit SetUint256(msg.sender, _value); + } + + function requestedUint256(bytes32 _requestId, uint256 _value) public { + requestId = _requestId; + setUint256(_value); + } +} diff --git a/contracts/src/v0.5/tests/MaliciousChainlink.sol b/contracts/src/v0.5/tests/MaliciousChainlink.sol new file mode 100644 index 00000000..eb7a9827 --- /dev/null +++ b/contracts/src/v0.5/tests/MaliciousChainlink.sol @@ -0,0 +1,76 @@ +pragma solidity 0.5.0; + +import { CBOR as CBOR_Plugin } from "../vendor/CBOR.sol"; +import { Buffer as Buffer_Plugin } from "../vendor/Buffer.sol"; + +library MaliciousPlugin { + using CBOR_Plugin for Buffer_Plugin.buffer; + + struct Request { + bytes32 specId; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + struct WithdrawRequest { + bytes32 specId; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + Buffer_Plugin.buffer buf; + } + + function initializeWithdraw( + WithdrawRequest memory self, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (MaliciousPlugin.WithdrawRequest memory) { + Buffer_Plugin.init(self.buf, 128); + self.specId = _specId; + self.callbackAddress = _callbackAddress; + self.callbackFunctionId = _callbackFunction; + return self; + } + + function add(Request memory self, string memory _key, string memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeString(_value); + } + + function addBytes(Request memory self, string memory _key, bytes memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeBytes(_value); + } + + function addInt(Request memory self, string memory _key, int256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeInt(_value); + } + + function addUint(Request memory self, string memory _key, uint256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeUInt(_value); + } + + function addStringArray(Request memory self, string memory _key, string[] memory _values) + internal pure + { + self.buf.encodeString(_key); + self.buf.startArray(); + for (uint256 i = 0; i < _values.length; i++) { + self.buf.encodeString(_values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.5/tests/MaliciousChainlinkClient.sol b/contracts/src/v0.5/tests/MaliciousChainlinkClient.sol new file mode 100644 index 00000000..d002e0e8 --- /dev/null +++ b/contracts/src/v0.5/tests/MaliciousChainlinkClient.sol @@ -0,0 +1,109 @@ +pragma solidity 0.5.0; + +import "./MaliciousPlugin.sol"; +import "../PluginClient.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract MaliciousPluginClient is PluginClient { + using MaliciousPlugin for MaliciousPlugin.Request; + using MaliciousPlugin for MaliciousPlugin.WithdrawRequest; + using Plugin for Plugin.Request; + using SafeMathPlugin for uint256; + + uint256 private maliciousRequests = 1; + mapping(bytes32 => address) private maliciousPendingRequests; + + function newWithdrawRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (MaliciousPlugin.WithdrawRequest memory) { + MaliciousPlugin.WithdrawRequest memory req; + return req.initializeWithdraw(_specId, _callbackAddress, _callbackFunction); + } + + function pluginTargetRequest(address _target, Plugin.Request memory _req, uint256 _amount) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(_target, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = pluginOracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transferAndCall(pluginOracleAddress(), _amount, encodeTargetRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + + return requestId; + } + + function pluginPriceRequest(Plugin.Request memory _req, uint256 _amount) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = pluginOracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transferAndCall(pluginOracleAddress(), _amount, encodePriceRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + + return requestId; + } + + function pluginWithdrawRequest(MaliciousPlugin.WithdrawRequest memory _req, uint256 _wei) + internal + returns(bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, maliciousRequests)); + _req.nonce = maliciousRequests; + maliciousPendingRequests[requestId] = pluginOracleAddress(); + emit PluginRequested(requestId); + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transferAndCall(pluginOracleAddress(), _wei, encodeWithdrawRequest(_req)), "Unable to transferAndCall to oracle"); + maliciousRequests += 1; + return requestId; + } + + function encodeWithdrawRequest(MaliciousPlugin.WithdrawRequest memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("withdraw(address,uint256)")), + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + _req.buf.buf); + } + + function encodeTargetRequest(Plugin.Request memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("oracleRequest(address,uint256,bytes32,address,bytes4,uint256,uint256,bytes)")), + 0, // overridden by onTokenTransfer + 0, // overridden by onTokenTransfer + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + 1, + _req.buf.buf); + } + + function encodePriceRequest(Plugin.Request memory _req) + internal pure returns (bytes memory) + { + return abi.encodeWithSelector( + bytes4(keccak256("oracleRequest(address,uint256,bytes32,address,bytes4,uint256,uint256,bytes)")), + 0, // overridden by onTokenTransfer + 2000000000000000000, // overridden by onTokenTransfer + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + 1, + _req.buf.buf); + } +} diff --git a/contracts/src/v0.5/tests/MaliciousConsumer.sol b/contracts/src/v0.5/tests/MaliciousConsumer.sol new file mode 100644 index 00000000..50e0f8d5 --- /dev/null +++ b/contracts/src/v0.5/tests/MaliciousConsumer.sol @@ -0,0 +1,57 @@ +pragma solidity 0.5.0; + +import "../PluginClient.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract MaliciousConsumer is PluginClient { + using SafeMathPlugin for uint256; + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + uint256 private expiration; + + constructor(address _link, address _oracle) public payable { + setPluginToken(_link); + setPluginOracle(_oracle); + } + + function () external payable {} // solhint-disable-line no-empty-blocks + + function requestData(bytes32 _id, bytes memory _callbackFunc) public { + Plugin.Request memory req = buildPluginRequest(_id, address(this), bytes4(keccak256(_callbackFunc))); + expiration = now.add(5 minutes); // solhint-disable-line not-rely-on-time + sendPluginRequest(req, ORACLE_PAYMENT); + } + + function assertFail(bytes32, bytes32) public pure { + assert(1 == 2); + } + + function cancelRequestOnFulfill(bytes32 _requestId, bytes32) public { + cancelPluginRequest( + _requestId, + ORACLE_PAYMENT, + this.cancelRequestOnFulfill.selector, + expiration); + } + + function remove() public { + selfdestruct(address(0)); + } + + function stealEthCall(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + (bool success,) = address(this).call.value(100)(""); // solhint-disable-line avoid-call-value + require(success, "Call failed"); + } + + function stealEthSend(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + // solhint-disable-next-line check-send-result + bool success = address(this).send(100); // solhint-disable-line multiple-sends + require(success, "Send failed"); + } + + function stealEthTransfer(bytes32 _requestId, bytes32) public recordPluginFulfillment(_requestId) { + address(this).transfer(100); + } + + function doesNothing(bytes32, bytes32) public pure {} // solhint-disable-line no-empty-blocks +} diff --git a/contracts/src/v0.5/tests/MaliciousRequester.sol b/contracts/src/v0.5/tests/MaliciousRequester.sol new file mode 100644 index 00000000..569d344d --- /dev/null +++ b/contracts/src/v0.5/tests/MaliciousRequester.sol @@ -0,0 +1,52 @@ +pragma solidity 0.5.0; + + +import "./MaliciousPluginClient.sol"; + + +contract MaliciousRequester is MaliciousPluginClient { + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + uint256 private expiration; + + constructor(address _link, address _oracle) public { + setPluginToken(_link); + setPluginOracle(_oracle); + } + + function maliciousWithdraw() + public + { + MaliciousPlugin.WithdrawRequest memory req = newWithdrawRequest( + "specId", address(this), this.doesNothing.selector); + pluginWithdrawRequest(req, ORACLE_PAYMENT); + } + + function request(bytes32 _id, address _target, bytes memory _callbackFunc) public returns (bytes32 requestId) { + Plugin.Request memory req = buildPluginRequest(_id, _target, bytes4(keccak256(_callbackFunc))); + expiration = now.add(5 minutes); // solhint-disable-line not-rely-on-time + requestId = sendPluginRequest(req, ORACLE_PAYMENT); + } + + function maliciousPrice(bytes32 _id) public returns (bytes32 requestId) { + Plugin.Request memory req = buildPluginRequest(_id, address(this), this.doesNothing.selector); + requestId = pluginPriceRequest(req, ORACLE_PAYMENT); + } + + function maliciousTargetConsumer(address _target) public returns (bytes32 requestId) { + Plugin.Request memory req = buildPluginRequest("specId", _target, bytes4(keccak256("fulfill(bytes32,bytes32)"))); + requestId = pluginTargetRequest(_target, req, ORACLE_PAYMENT); + } + + function maliciousRequestCancel(bytes32 _id, bytes memory _callbackFunc) public { + PluginRequestInterface _oracle = PluginRequestInterface(pluginOracleAddress()); + _oracle.cancelOracleRequest( + request(_id, address(this), _callbackFunc), + ORACLE_PAYMENT, + this.maliciousRequestCancel.selector, + expiration + ); + } + + function doesNothing(bytes32, bytes32) public pure {} // solhint-disable-line no-empty-blocks +} diff --git a/contracts/src/v0.5/tests/MeanAggregator.sol b/contracts/src/v0.5/tests/MeanAggregator.sol new file mode 100644 index 00000000..4c15858d --- /dev/null +++ b/contracts/src/v0.5/tests/MeanAggregator.sol @@ -0,0 +1,75 @@ +pragma solidity 0.5.0; + +import "../dev/CoordinatorInterface.sol"; +import "../dev/ServiceAgreementDecoder.sol"; + +/// Computes the mean of the values the oracles pass it via fulfill method +contract MeanAggregator is ServiceAgreementDecoder { + + // Relies on Coordinator's authorization of the oracles (no need to track + // oracle authorization in this contract.) + + mapping(bytes32 /* service agreement ID */ => uint256) payment; + mapping(bytes32 /* service agreement ID */ => address[]) oracles; + mapping(bytes32 /* request ID */ => uint256) numberReported; + mapping(bytes32 /* request ID */ => mapping(address => uint256)) reportingOrder; + + // Current total for given request, divided by number of oracles reporting + mapping(bytes32 /* request ID */ => uint256) average; + // Remainder of total for given request from division by number of oracles. + mapping(bytes32 /* request ID */ => uint256) remainder; + + function initiateJob( + bytes32 _sAId, bytes memory _serviceAgreementData) + public returns (bool success, bytes memory message) { + ServiceAgreement memory serviceAgreement = decodeServiceAgreement(_serviceAgreementData); + + if (oracles[_sAId].length != 0) { + return (false, bytes("job already initiated")); + } + if (serviceAgreement.oracles.length == 0) { + return (false, bytes("must depend on at least one oracle")); + } + oracles[_sAId] = serviceAgreement.oracles; + payment[_sAId] = serviceAgreement.payment; + success = true; + } + + function fulfill(bytes32 _requestId, bytes32 _sAId, address _oracle, + bytes32 _value) public + returns (bool success, bool complete, bytes memory response, + int256[] memory paymentAmounts) { + if (reportingOrder[_requestId][_oracle] != 0 || + numberReported[_requestId] == oracles[_sAId].length) { + return (false, false, "oracle already reported", paymentAmounts); + } + uint256 oDividend = uint256(_value) / oracles[_sAId].length; + uint256 oRemainder = uint256(_value) % oracles[_sAId].length; + uint256 newRemainder = remainder[_requestId] + oRemainder; + uint256 newAverage = average[_requestId] + oDividend + (newRemainder / oracles[_sAId].length); + assert(newAverage >= average[_requestId]); // No overflow + average[_requestId] = newAverage; + remainder[_requestId] = newRemainder % oracles[_sAId].length; + numberReported[_requestId] += 1; + reportingOrder[_requestId][_oracle] = numberReported[_requestId]; + success = true; + complete = (numberReported[_requestId] == oracles[_sAId].length); + if (complete) { + response = abi.encode(average[_requestId]); + paymentAmounts = calculatePayments(_sAId, _requestId); + } + } + + function calculatePayments(bytes32 _sAId, bytes32 _requestId) private returns (int256[] memory paymentAmounts) { + paymentAmounts = new int256[](oracles[_sAId].length); + uint256 numOracles = oracles[_sAId].length; + uint256 totalPayment = payment[_sAId]; + for (uint256 oIdx = 0; oIdx < oracles[_sAId].length; oIdx++) { + // Linearly-decaying payout determined by each oracle's reportingIdx + uint256 reportingIdx = reportingOrder[_requestId][oracles[_sAId][oIdx]] - 1; + paymentAmounts[oIdx] = int256(2*(totalPayment/numOracles) - ( + (totalPayment * ((2*reportingIdx) + 1)) / (numOracles**2))); + delete reportingOrder[_requestId][oracles[_sAId][oIdx]]; + } + } +} diff --git a/contracts/src/v0.5/tests/MedianTestHelper.sol b/contracts/src/v0.5/tests/MedianTestHelper.sol new file mode 100644 index 00000000..07387317 --- /dev/null +++ b/contracts/src/v0.5/tests/MedianTestHelper.sol @@ -0,0 +1,15 @@ +pragma solidity ^0.5.0; + +import "../Median.sol"; + +contract MedianTestHelper { + + function publicGet(int256[] memory _list) + public + pure + returns (int256) + { + return Median.calculate(_list); + } + +} diff --git a/contracts/src/v0.5/tests/PluginTestHelper.sol b/contracts/src/v0.5/tests/PluginTestHelper.sol new file mode 100644 index 00000000..60310f29 --- /dev/null +++ b/contracts/src/v0.5/tests/PluginTestHelper.sol @@ -0,0 +1,75 @@ +pragma solidity ^0.5.0; + +import "../Plugin.sol"; + +contract PluginTestHelper { + using Plugin for Plugin.Request; + using CBOR_Plugin for Buffer_Plugin.buffer; + + Plugin.Request private req; + + event RequestData(bytes payload); + + function closeEvent() public { + emit RequestData(req.buf.buf); + } + + function setBuffer(bytes memory data) public { + Plugin.Request memory r2 = req; + r2.setBuffer(data); + req = r2; + } + + function add(string memory _key, string memory _value) public { + Plugin.Request memory r2 = req; + r2.add(_key, _value); + req = r2; + } + + function addBytes(string memory _key, bytes memory _value) public { + Plugin.Request memory r2 = req; + r2.addBytes(_key, _value); + req = r2; + } + + function addInt(string memory _key, int256 _value) public { + Plugin.Request memory r2 = req; + r2.addInt(_key, _value); + req = r2; + } + + function addUint(string memory _key, uint256 _value) public { + Plugin.Request memory r2 = req; + r2.addUint(_key, _value); + req = r2; + } + + // Temporarily have method receive bytes32[] memory until experimental + // string[] memory can be invoked from truffle tests. + function addStringArray(string memory _key, bytes32[] memory _values) public { + string[] memory strings = new string[](_values.length); + for (uint256 i = 0; i < _values.length; i++) { + strings[i] = bytes32ToString(_values[i]); + } + Plugin.Request memory r2 = req; + r2.addStringArray(_key, strings); + req = r2; + } + + function bytes32ToString(bytes32 x) private pure returns (string memory) { + bytes memory bytesString = new bytes(32); + uint charCount = 0; + for (uint j = 0; j < 32; j++) { + byte char = byte(bytes32(uint(x) * 2 ** (8 * j))); + if (char != 0) { + bytesString[charCount] = char; + charCount++; + } + } + bytes memory bytesStringTrimmed = new bytes(charCount); + for (uint j = 0; j < charCount; j++) { + bytesStringTrimmed[j] = bytesString[j]; + } + return string(bytesStringTrimmed); + } +} diff --git a/contracts/src/v0.5/tests/ServiceAgreementConsumer.sol b/contracts/src/v0.5/tests/ServiceAgreementConsumer.sol new file mode 100644 index 00000000..0c6e61d3 --- /dev/null +++ b/contracts/src/v0.5/tests/ServiceAgreementConsumer.sol @@ -0,0 +1,30 @@ +pragma solidity 0.5.0; + +import "../PluginClient.sol"; + +contract ServiceAgreementConsumer is PluginClient { + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + + bytes32 internal sAId; + bytes32 public currentPrice; + + constructor(address _link, address _coordinator, bytes32 _sAId) public { + setPluginToken(_link); + setPluginOracle(_coordinator); + sAId = _sAId; + } + + function requestEthereumPrice(string memory _currency) public { + Plugin.Request memory req = buildPluginRequest(sAId, address(this), this.fulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + req.add("path", _currency); + sendPluginRequest(req, ORACLE_PAYMENT); + } + + function fulfill(bytes32 _requestId, bytes32 _price) + public + recordPluginFulfillment(_requestId) + { + currentPrice = _price; + } +} diff --git a/contracts/src/v0.5/vendor/Buffer.sol b/contracts/src/v0.5/vendor/Buffer.sol new file mode 100644 index 00000000..ef740574 --- /dev/null +++ b/contracts/src/v0.5/vendor/Buffer.sol @@ -0,0 +1,301 @@ +pragma solidity ^0.5.0; + +/** +* @dev A library for working with mutable byte buffers in Solidity. +* +* Byte buffers are mutable and expandable, and provide a variety of primitives +* for writing to them. At any time you can fetch a bytes object containing the +* current contents of the buffer. The bytes object should not be stored between +* operations, as it may change due to resizing of the buffer. +*/ +library Buffer { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint capacity) internal pure returns(buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + mstore(0x40, add(32, add(ptr, capacity))) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns(buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + function max(uint a, uint b) private pure returns(uint) { + if (a > b) { + return a; + } + return b; + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Writes a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The start offset to write to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes memory data, uint len) internal pure returns(buffer memory) { + require(len <= data.length); + + if (off + len > buf.capacity) { + resize(buf, max(buf.capacity, len + off) * 2); + } + + uint dest; + uint src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(add(len, off), buflen) { + mstore(bufptr, add(len, off)) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + uint mask = 256 ** (32 - len) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data, uint len) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, len); + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, data.length); + } + + /** + * @dev Writes a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write the byte at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeUint8(buffer memory buf, uint off, uint8 data) internal pure returns(buffer memory) { + if (off >= buf.capacity) { + resize(buf, buf.capacity * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if eq(off, buflen) { + mstore(bufptr, add(buflen, 1)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns(buffer memory) { + return writeUint8(buf, buf.buf.length, data); + } + + /** + * @dev Writes up to 32 bytes to the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes32 data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Writes a bytes20 to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeBytes20(buffer memory buf, uint off, bytes20 data) internal pure returns (buffer memory) { + return write(buf, off, bytes32(data), 20); + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, 32); + } + + /** + * @dev Writes an integer to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer, for chaining. + */ + function writeInt(buffer memory buf, uint off, uint data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + off + sizeof(buffer length) + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer. + */ + function appendInt(buffer memory buf, uint data, uint len) internal pure returns(buffer memory) { + return writeInt(buf, buf.buf.length, data, len); + } +} \ No newline at end of file diff --git a/contracts/src/v0.5/vendor/CBOR.sol b/contracts/src/v0.5/vendor/CBOR.sol new file mode 100644 index 00000000..27d3e34a --- /dev/null +++ b/contracts/src/v0.5/vendor/CBOR.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +pragma solidity >= 0.4.19 < 0.7.0; + +import { Buffer as BufferPlugin } from "./Buffer.sol"; + +library CBOR { + using BufferPlugin for BufferPlugin.buffer; + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + function encodeFixedNumeric(BufferPlugin.buffer memory buf, uint8 major, uint64 value) private pure { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if(value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if(value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if(value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); + } + } + + function encodeIndefiniteLengthType(BufferPlugin.buffer memory buf, uint8 major) private pure { + buf.appendUint8(uint8((major << 5) | 31)); + } + + function encodeUInt(BufferPlugin.buffer memory buf, uint value) internal pure { + if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } + } + + function encodeInt(BufferPlugin.buffer memory buf, int value) internal pure { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, uint(value)); + } else if(value >= 0) { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(-1 - value)); + } + } + + function encodeBytes(BufferPlugin.buffer memory buf, bytes memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.append(value); + } + + function encodeBigNum(BufferPlugin.buffer memory buf, uint value) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(value)); + } + + function encodeSignedBigNum(BufferPlugin.buffer memory buf, int input) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint(-1 - input))); + } + + function encodeString(BufferPlugin.buffer memory buf, string memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.append(bytes(value)); + } + + function startArray(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } + + function startMap(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } + + function endSequence(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } +} diff --git a/contracts/src/v0.5/vendor/ENSResolver.sol b/contracts/src/v0.5/vendor/ENSResolver.sol new file mode 100644 index 00000000..b80fd6d5 --- /dev/null +++ b/contracts/src/v0.5/vendor/ENSResolver.sol @@ -0,0 +1,5 @@ +pragma solidity ^0.5.0; + +contract ENSResolver { + function addr(bytes32 node) public view returns (address); +} diff --git a/contracts/src/v0.5/vendor/Ownable.sol b/contracts/src/v0.5/vendor/Ownable.sol new file mode 100644 index 00000000..4d0929a9 --- /dev/null +++ b/contracts/src/v0.5/vendor/Ownable.sol @@ -0,0 +1,65 @@ +pragma solidity ^0.5.0; + +/** + * @dev Contract module which provides a basic access control mechanism, where + * there is an account (an owner) that can be granted exclusive access to + * specific functions. + * + * This module is used through inheritance. It will make available the modifier + * `onlyOwner`, which can be aplied to your functions to restrict their use to + * the owner. + * + * This contract has been modified to remove the revokeOwnership function + */ +contract Ownable { + address private _owner; + + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + /** + * @dev Initializes the contract setting the deployer as the initial owner. + */ + constructor () internal { + _owner = msg.sender; + emit OwnershipTransferred(address(0), _owner); + } + + /** + * @dev Returns the address of the current owner. + */ + function owner() public view returns (address) { + return _owner; + } + + /** + * @dev Throws if called by any account other than the owner. + */ + modifier onlyOwner() { + require(isOwner(), "Ownable: caller is not the owner"); + _; + } + + /** + * @dev Returns true if the caller is the current owner. + */ + function isOwner() public view returns (bool) { + return msg.sender == _owner; + } + + /** + * @dev Transfers ownership of the contract to a new account (`newOwner`). + * Can only be called by the current owner. + */ + function transferOwnership(address newOwner) public onlyOwner { + _transferOwnership(newOwner); + } + + /** + * @dev Transfers ownership of the contract to a new account (`newOwner`). + */ + function _transferOwnership(address newOwner) internal { + require(newOwner != address(0), "Ownable: new owner is the zero address"); + emit OwnershipTransferred(_owner, newOwner); + _owner = newOwner; + } +} diff --git a/contracts/src/v0.5/vendor/SafeMathChainlink.sol b/contracts/src/v0.5/vendor/SafeMathChainlink.sol new file mode 100644 index 00000000..4a36a755 --- /dev/null +++ b/contracts/src/v0.5/vendor/SafeMathChainlink.sol @@ -0,0 +1,107 @@ +pragma solidity ^0.5.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + */ +library SafeMathPlugin { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint256 a, uint256 b) internal pure returns (uint256) { + uint256 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint256 a, uint256 b) internal pure returns (uint256) { + require(b <= a, "SafeMath: subtraction overflow"); + uint256 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint256 a, uint256 b) internal pure returns (uint256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint256 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint256 a, uint256 b) internal pure returns (uint256) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint256 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint256 a, uint256 b) internal pure returns (uint256) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.5/vendor/SignedSafeMath.sol b/contracts/src/v0.5/vendor/SignedSafeMath.sol new file mode 100644 index 00000000..68889f1f --- /dev/null +++ b/contracts/src/v0.5/vendor/SignedSafeMath.sol @@ -0,0 +1,22 @@ +pragma solidity ^0.5.0; + +library SignedSafeMath { + + /** + * @dev Adds two int256s and makes sure the result doesn't overflow. Signed + * integers aren't supported by the SafeMath library, thus this method + * @param _a The first number to be added + * @param _a The second number to be added + */ + function add(int256 _a, int256 _b) + internal + pure + returns (int256) + { + // solium-disable-next-line zeppelin/no-arithmetic-operations + int256 c = _a + _b; + require((_b >= 0 && c >= _a) || (_b < 0 && c < _a), "SignedSafeMath: addition overflow"); + + return c; + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/AggregatorFacade.sol b/contracts/src/v0.6/AggregatorFacade.sol new file mode 100644 index 00000000..deae4cfb --- /dev/null +++ b/contracts/src/v0.6/AggregatorFacade.sol @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./interfaces/AggregatorV2V3Interface.sol"; + +/** + * @title A facade forAggregator versions to conform to the new v0.6 + * Aggregator V3 interface. + */ +contract AggregatorFacade is AggregatorV2V3Interface { + + AggregatorInterface public aggregator; + uint8 public override decimals; + string public override description; + + uint256 constant public override version = 2; + + // An error specific to the Aggregator V3 Interface, to prevent possible + // confusion around accidentally reading unset values as reported values. + string constant private V3_NO_DATA_ERROR = "No data present"; + + constructor( + address _aggregator, + uint8 _decimals, + string memory _description + ) public { + aggregator = AggregatorInterface(_aggregator); + decimals = _decimals; + description = _description; + } + + /** + * @notice get the latest completed round where the answer was updated + * @dev #[deprecated]. Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestRound() + external + view + virtual + override + returns (uint256) + { + return aggregator.latestRound(); + } + + /** + * @notice Reads the current answer from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestAnswer() + external + view + virtual + override + returns (int256) + { + return aggregator.latestAnswer(); + } + + /** + * @notice Reads the last updated height from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestTimestamp() + external + view + virtual + override + returns (uint256) + { + return aggregator.latestTimestamp(); + } + + /** + * @notice get data about the latest round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt value. + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is always equal to updatedAt because the underlying + * Aggregator contract does not expose this information. + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is always equal to roundId because the underlying + * Aggregator contract does not expose this information. + * @dev Note that for rounds that haven't yet received responses from all + * oracles, answer and updatedAt may change between queries. + */ + function latestRoundData() + external + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return _getRoundData(uint80(aggregator.latestRound())); + } + + /** + * @notice get past rounds answers + * @param _roundId the answer number to retrieve the answer for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getAnswer(uint256 _roundId) + external + view + virtual + override + returns (int256) + { + return aggregator.getAnswer(_roundId); + } + + /** + * @notice get block timestamp when an answer was last updated + * @param _roundId the answer number to retrieve the updated timestamp for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getTimestamp(uint256 _roundId) + external + view + virtual + override + returns (uint256) + { + return aggregator.getTimestamp(_roundId); + } + + /** + * @notice get data about a round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt value. + * @param _roundId the round ID to retrieve the round data for + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is always equal to updatedAt because the underlying + * Aggregator contract does not expose this information. + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is always equal to roundId because the underlying + * Aggregator contract does not expose this information. + * @dev Note that for rounds that haven't yet received responses from all + * oracles, answer and updatedAt may change between queries. + */ + function getRoundData(uint80 _roundId) + external + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return _getRoundData(_roundId); + } + + + /* + * Internal + */ + + function _getRoundData(uint80 _roundId) + internal + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + answer = aggregator.getAnswer(_roundId); + updatedAt = uint64(aggregator.getTimestamp(_roundId)); + + require(updatedAt > 0, V3_NO_DATA_ERROR); + + return (_roundId, answer, updatedAt, updatedAt, _roundId); + } + +} diff --git a/contracts/src/v0.6/AggregatorProxy.sol b/contracts/src/v0.6/AggregatorProxy.sol new file mode 100644 index 00000000..73de2205 --- /dev/null +++ b/contracts/src/v0.6/AggregatorProxy.sol @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./Owned.sol"; +import "./interfaces/AggregatorV2V3Interface.sol"; + +/** + * @title A trusted proxy for updating where current answers are read from + * @notice This contract provides a consistent address for the + * CurrentAnwerInterface but delegates where it reads from to the owner, who is + * trusted to update it. + */ +contract AggregatorProxy is AggregatorV2V3Interface, Owned { + + struct Phase { + uint16 id; + AggregatorV2V3Interface aggregator; + } + Phase private currentPhase; + AggregatorV2V3Interface public proposedAggregator; + mapping(uint16 => AggregatorV2V3Interface) public phaseAggregators; + + uint256 constant private PHASE_OFFSET = 64; + uint256 constant private PHASE_SIZE = 16; + uint256 constant private MAX_ID = 2**(PHASE_OFFSET+PHASE_SIZE) - 1; + + constructor(address _aggregator) public Owned() { + setAggregator(_aggregator); + } + + /** + * @notice Reads the current answer from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestAnswer() + public + view + virtual + override + returns (int256 answer) + { + return currentPhase.aggregator.latestAnswer(); + } + + /** + * @notice Reads the last updated height from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestTimestamp() + public + view + virtual + override + returns (uint256 updatedAt) + { + return currentPhase.aggregator.latestTimestamp(); + } + + /** + * @notice get past rounds answers + * @param _roundId the answer number to retrieve the answer for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getAnswer(uint256 _roundId) + public + view + virtual + override + returns (int256 answer) + { + if (_roundId > MAX_ID) return 0; + + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId); + AggregatorV2V3Interface aggregator = phaseAggregators[phaseId]; + if (address(aggregator) == address(0)) return 0; + + return aggregator.getAnswer(aggregatorRoundId); + } + + /** + * @notice get block timestamp when an answer was last updated + * @param _roundId the answer number to retrieve the updated timestamp for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getTimestamp(uint256 _roundId) + public + view + virtual + override + returns (uint256 updatedAt) + { + if (_roundId > MAX_ID) return 0; + + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId); + AggregatorV2V3Interface aggregator = phaseAggregators[phaseId]; + if (address(aggregator) == address(0)) return 0; + + return aggregator.getTimestamp(aggregatorRoundId); + } + + /** + * @notice get the latest completed round where the answer was updated. This + * ID includes the proxy's phase, to make sure round IDs increase even when + * switching to a newly deployed aggregator. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestRound() + public + view + virtual + override + returns (uint256 roundId) + { + Phase memory phase = currentPhase; // cache storage reads + return addPhase(phase.id, uint64(phase.aggregator.latestRound())); + } + + /** + * @notice get data about a round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @param _roundId the requested round ID as presented through the proxy, this + * is made up of the aggregator's round ID with the phase ID encoded in the + * two highest order bytes + * @return roundId is the round ID from the aggregator for which the data was + * retrieved combined with an phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function getRoundData(uint80 _roundId) + public + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId); + + ( + roundId, + answer, + startedAt, + updatedAt, + answeredInRound + ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId); + + return addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, phaseId); + } + + /** + * @notice get data about the latest round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @return roundId is the round ID from the aggregator for which the data was + * retrieved combined with an phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function latestRoundData() + public + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + Phase memory current = currentPhase; // cache storage reads + + ( + roundId, + answer, + startedAt, + updatedAt, + answeredInRound + ) = current.aggregator.latestRoundData(); + + return addPhaseIds(roundId, answer, startedAt, updatedAt, answeredInRound, current.id); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @param _roundId the round ID to retrieve the round data for + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedGetRoundData(uint80 _roundId) + public + view + virtual + hasProposal() + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return proposedAggregator.getRoundData(_roundId); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedLatestRoundData() + public + view + virtual + hasProposal() + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return proposedAggregator.latestRoundData(); + } + + /** + * @notice returns the current phase's aggregator address. + */ + function aggregator() + external + view + returns (address) + { + return address(currentPhase.aggregator); + } + + /** + * @notice returns the current phase's ID. + */ + function phaseId() + external + view + returns (uint16) + { + return currentPhase.id; + } + + /** + * @notice represents the number of decimals the aggregator responses represent. + */ + function decimals() + external + view + override + returns (uint8) + { + return currentPhase.aggregator.decimals(); + } + + /** + * @notice the version number representing the type of aggregator the proxy + * points to. + */ + function version() + external + view + override + returns (uint256) + { + return currentPhase.aggregator.version(); + } + + /** + * @notice returns the description of the aggregator the proxy points to. + */ + function description() + external + view + override + returns (string memory) + { + return currentPhase.aggregator.description(); + } + + /** + * @notice Allows the owner to propose a new address for the aggregator + * @param _aggregator The new address for the aggregator contract + */ + function proposeAggregator(address _aggregator) + external + onlyOwner() + { + proposedAggregator = AggregatorV2V3Interface(_aggregator); + } + + /** + * @notice Allows the owner to confirm and change the address + * to the proposed aggregator + * @dev Reverts if the given address doesn't match what was previously + * proposed + * @param _aggregator The new address for the aggregator contract + */ + function confirmAggregator(address _aggregator) + external + onlyOwner() + { + require(_aggregator == address(proposedAggregator), "Invalid proposed aggregator"); + delete proposedAggregator; + setAggregator(_aggregator); + } + + + /* + * Internal + */ + + function setAggregator(address _aggregator) + internal + { + uint16 id = currentPhase.id + 1; + currentPhase = Phase(id, AggregatorV2V3Interface(_aggregator)); + phaseAggregators[id] = AggregatorV2V3Interface(_aggregator); + } + + function addPhase( + uint16 _phase, + uint64 _originalId + ) + internal + pure + returns (uint80) + { + return uint80(uint256(_phase) << PHASE_OFFSET | _originalId); + } + + function parseIds( + uint256 _roundId + ) + internal + pure + returns (uint16, uint64) + { + uint16 phaseId = uint16(_roundId >> PHASE_OFFSET); + uint64 aggregatorRoundId = uint64(_roundId); + + return (phaseId, aggregatorRoundId); + } + + function addPhaseIds( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound, + uint16 phaseId + ) + internal + pure + returns (uint80, int256, uint256, uint256, uint80) + { + return ( + addPhase(phaseId, uint64(roundId)), + answer, + startedAt, + updatedAt, + addPhase(phaseId, uint64(answeredInRound)) + ); + } + + /* + * Modifiers + */ + + modifier hasProposal() { + require(address(proposedAggregator) != address(0), "No proposed aggregator present"); + _; + } + +} \ No newline at end of file diff --git a/contracts/src/v0.6/BlockhashStore.sol b/contracts/src/v0.6/BlockhashStore.sol new file mode 100644 index 00000000..2da687fb --- /dev/null +++ b/contracts/src/v0.6/BlockhashStore.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./ChainSpecificUtil.sol"; + +/** + * @title BlockhashStore + * @notice This contract provides a way to access blockhashes older than + * the 256 block limit imposed by the BLOCKHASH opcode. + * You may assume that any blockhash stored by the contract is correct. + * Note that the contract depends on the format of serialized Ethereum + * blocks. If a future hardfork of Ethereum changes that format, the + * logic in this contract may become incorrect and an updated version + * would have to be deployed. + */ +contract BlockhashStore { + + mapping(uint => bytes32) internal s_blockhashes; + + /** + * @notice stores blockhash of a given block, assuming it is available through BLOCKHASH + * @param n the number of the block whose blockhash should be stored + */ + function store(uint256 n) public { + bytes32 h = ChainSpecificUtil.getBlockhash(n); + require(h != 0x0, "blockhash(n) failed"); + s_blockhashes[n] = h; + } + + + /** + * @notice stores blockhash of the earliest block still available through BLOCKHASH. + */ + function storeEarliest() external { + store(ChainSpecificUtil.getBlockNumber() - 256); + } + + /** + * @notice stores blockhash after verifying blockheader of child/subsequent block + * @param n the number of the block whose blockhash should be stored + * @param header the rlp-encoded blockheader of block n+1. We verify its correctness by checking + * that it hashes to a stored blockhash, and then extract parentHash to get the n-th blockhash. + */ + function storeVerifyHeader(uint256 n, bytes memory header) public { + require(keccak256(header) == s_blockhashes[n + 1], "header has unknown blockhash"); + + // At this point, we know that header is the correct blockheader for block n+1. + + // The header is an rlp-encoded list. The head item of that list is the 32-byte blockhash of the parent block. + // Based on how rlp works, we know that blockheaders always have the following form: + // 0xf9____a0PARENTHASH... + // ^ ^ ^ + // | | | + // | | +--- PARENTHASH is 32 bytes. rlpenc(PARENTHASH) is 0xa || PARENTHASH. + // | | + // | +--- 2 bytes containing the sum of the lengths of the encoded list items + // | + // +--- 0xf9 because we have a list and (sum of lengths of encoded list items) fits exactly into two bytes. + // + // As a consequence, the PARENTHASH is always at offset 4 of the rlp-encoded block header. + + bytes32 parentHash; + assembly { + parentHash := mload(add(header, 36)) // 36 = 32 byte offset for length prefix of ABI-encoded array + // + 4 byte offset of PARENTHASH (see above) + } + + s_blockhashes[n] = parentHash; + } + + /** + * @notice gets a blockhash from the store. If no hash is known, this function reverts. + * @param n the number of the block whose blockhash should be returned + */ + function getBlockhash(uint256 n) external view returns (bytes32) { + bytes32 h = s_blockhashes[n]; + require(h != 0x0, "blockhash not found in store"); + return h; + } +} diff --git a/contracts/src/v0.6/ChainSpecificUtil.sol b/contracts/src/v0.6/ChainSpecificUtil.sol new file mode 100644 index 00000000..462531e8 --- /dev/null +++ b/contracts/src/v0.6/ChainSpecificUtil.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import {ArbSys} from "./vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; + +//@dev A library that abstracts out opcodes that behave differently across chains. +//@dev The methods below return values that are pertinent to the given chain. +//@dev For instance, ChainSpecificUtil.getBlockNumber() returns L2 block number in L2 chains +library ChainSpecificUtil { + address private constant ARBSYS_ADDR = + address(0x0000000000000000000000000000000000000064); + ArbSys private constant ARBSYS = ArbSys(ARBSYS_ADDR); + uint256 private constant ARB_MAINNET_CHAIN_ID = 42161; + uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613; + + function getBlockhash(uint256 blockNumber) internal view returns (bytes32) { + uint256 chainid = getChainID(); + if ( + chainid == ARB_MAINNET_CHAIN_ID || + chainid == ARB_GOERLI_TESTNET_CHAIN_ID + ) { + if ((getBlockNumber() - blockNumber) > 256 || blockNumber >= getBlockNumber()) { + return ""; + } + return ARBSYS.arbBlockHash(blockNumber); + } + return blockhash(blockNumber); + } + + function getBlockNumber() internal view returns (uint256) { + uint256 chainid = getChainID(); + if ( + chainid == ARB_MAINNET_CHAIN_ID || + chainid == ARB_GOERLI_TESTNET_CHAIN_ID + ) { + return ARBSYS.arbBlockNumber(); + } + return block.number; + } + + function getChainID() internal pure returns (uint256) { + uint256 id; + assembly { + id := chainid() + } + return id; + } +} diff --git a/contracts/src/v0.6/CheckedMath.sol b/contracts/src/v0.6/CheckedMath.sol new file mode 100644 index 00000000..3f622797 --- /dev/null +++ b/contracts/src/v0.6/CheckedMath.sol @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: MIT +// Adapted from https://github.com/OpenZeppelin/openzeppelin-contracts/blob/97894a140d2a698e5a0f913648a8f56d62277a70/contracts/math/SignedSafeMath.sol + +pragma solidity ^0.6.0; + +library CheckedMath { + + int256 constant internal INT256_MIN = -2**255; + + /** + * @dev Subtracts two signed integers, returns false 2nd param on overflow. + */ + function add( + int256 a, + int256 b + ) + internal + pure + returns (int256 result, bool ok) + { + int256 c = a + b; + if ((b >= 0 && c < a) || (b < 0 && c >= a)) return (0, false); + + return (c, true); + } + + /** + * @dev Subtracts two signed integers, returns false 2nd param on overflow. + */ + function sub( + int256 a, + int256 b + ) + internal + pure + returns (int256 result, bool ok) + { + int256 c = a - b; + if ((b < 0 && c <= a) || (b >= 0 && c > a)) return (0, false); + + return (c, true); + } + + + /** + * @dev Multiplies two signed integers, returns false 2nd param on overflow. + */ + function mul( + int256 a, + int256 b + ) + internal + pure + returns (int256 result, bool ok) + { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522 + if (a == 0) return (0, true); + if (a == -1 && b == INT256_MIN) return (0, false); + + int256 c = a * b; + if (!(c / a == b)) return (0, false); + + return (c, true); + } + + /** + * @dev Divides two signed integers, returns false 2nd param on overflow. + */ + function div( + int256 a, + int256 b + ) + internal + pure + returns (int256 result, bool ok) + { + if (b == 0) return (0, false); + if (b == -1 && a == INT256_MIN) return (0, false); + + int256 c = a / b; + + return (c, true); + } + +} diff --git a/contracts/src/v0.6/Denominations.sol b/contracts/src/v0.6/Denominations.sol new file mode 100644 index 00000000..339997d3 --- /dev/null +++ b/contracts/src/v0.6/Denominations.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.6.0; + +library Denominations { + address public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + address public constant BTC = 0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB; + + // Fiat currencies follow https://en.wikipedia.org/wiki/ISO_4217 + address public constant USD = address(840); + address public constant GBP = address(826); + address public constant EUR = address(978); + address public constant JPY = address(392); + address public constant KRW = address(410); + address public constant CNY = address(156); + address public constant AUD = address(36); + address public constant CAD = address(124); + address public constant CHF = address(756); + address public constant ARS = address(32); + address public constant PHP = address(608); + address public constant NZD = address(554); + address public constant SGD = address(702); + address public constant NGN = address(566); + address public constant ZAR = address(710); + address public constant RUB = address(643); + address public constant INR = address(356); + address public constant BRL = address(986); +} diff --git a/contracts/src/v0.6/DeviationFlaggingValidator.sol b/contracts/src/v0.6/DeviationFlaggingValidator.sol new file mode 100644 index 00000000..be2b2b1a --- /dev/null +++ b/contracts/src/v0.6/DeviationFlaggingValidator.sol @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./Owned.sol"; +import "./CheckedMath.sol"; +import "./interfaces/AggregatorValidatorInterface.sol"; +import "./interfaces/FlagsInterface.sol"; + +/** + * @title The Deviation Flagging Validator contract + * @notice Checks the current value against the previous value, and makes sure + * that it does not deviate outside of some relative range. If the deviation + * threshold is passed then the validator raises a flag on the designated + * flag contract. + */ +contract DeviationFlaggingValidator is Owned, AggregatorValidatorInterface { + using CheckedMath for int256; + + uint32 constant public THRESHOLD_MULTIPLIER = 100000; + + uint32 public flaggingThreshold; + FlagsInterface public flags; + + event FlaggingThresholdUpdated( + uint24 indexed previous, + uint24 indexed current + ); + event FlagsAddressUpdated( + address indexed previous, + address indexed current + ); + + int256 constant private INT256_MIN = -2**255; + + /** + * @notice sets up the validator with its threshold and flag address. + * @param _flags sets the address of the flags contract + * @param _flaggingThreshold sets the threshold that will trigger a flag to be + * raised. Setting the value of 100,000 is equivalent to tolerating a 100% + * change compared to the previous price. + */ + constructor( + address _flags, + uint24 _flaggingThreshold + ) + public + { + setFlagsAddress(_flags); + setFlaggingThreshold(_flaggingThreshold); + } + + /** + * @notice checks whether the parameters count as valid by comparing the + * difference change to the flagging threshold. + * @param _previousRoundId is ignored. + * @param _previousAnswer is used as the median of the difference with the + * current answer to determine if the deviation threshold has been exceeded. + * @param _roundId is ignored. + * @param _answer is the latest answer which is compared for a ratio of change + * to make sure it has not exceeded the flagging threshold. + */ + function validate( + uint256 _previousRoundId, + int256 _previousAnswer, + uint256 _roundId, + int256 _answer + ) + external + override + returns (bool) + { + if (!isValid(_previousRoundId, _previousAnswer, _roundId, _answer)) { + flags.raiseFlag(msg.sender); + return false; + } + + return true; + } + + /** + * @notice checks whether the parameters count as valid by comparing the + * difference change to the flagging threshold and raises a flag on the + * flagging contract if so. + * @param _previousAnswer is used as the median of the difference with the + * current answer to determine if the deviation threshold has been exceeded. + * @param _answer is the current answer which is compared for a ratio of + * change * to make sure it has not exceeded the flagging threshold. + */ + function isValid( + uint256 , + int256 _previousAnswer, + uint256 , + int256 _answer + ) + public + view + returns (bool) + { + if (_previousAnswer == 0) return true; + + (int256 change, bool changeOk) = _previousAnswer.sub(_answer); + (int256 ratioNumerator, bool numOk) = change.mul(THRESHOLD_MULTIPLIER); + (int256 ratio, bool ratioOk) = ratioNumerator.div(_previousAnswer); + (uint256 absRatio, bool absOk) = abs(ratio); + + return changeOk && numOk && ratioOk && absOk && absRatio <= flaggingThreshold; + } + + /** + * @notice updates the flagging threshold + * @param _flaggingThreshold sets the threshold that will trigger a flag to be + * raised. Setting the value of 100,000 is equivalent to tolerating a 100% + * change compared to the previous price. + */ + function setFlaggingThreshold(uint24 _flaggingThreshold) + public + onlyOwner() + { + uint24 previousFT = uint24(flaggingThreshold); + + if (previousFT != _flaggingThreshold) { + flaggingThreshold = _flaggingThreshold; + + emit FlaggingThresholdUpdated(previousFT, _flaggingThreshold); + } + } + + /** + * @notice updates the flagging contract address for raising flags + * @param _flags sets the address of the flags contract + */ + function setFlagsAddress(address _flags) + public + onlyOwner() + { + address previous = address(flags); + + if (previous != _flags) { + flags = FlagsInterface(_flags); + + emit FlagsAddressUpdated(previous, _flags); + } + } + + + // PRIVATE + + function abs( + int256 value + ) + private + pure + returns (uint256, bool) + { + if (value >= 0) return (uint256(value), true); + if (value == CheckedMath.INT256_MIN) return (0, false); + return (uint256(value * -1), true); + } + +} diff --git a/contracts/src/v0.6/EACAggregatorProxy.sol b/contracts/src/v0.6/EACAggregatorProxy.sol new file mode 100644 index 00000000..f1c0e8bc --- /dev/null +++ b/contracts/src/v0.6/EACAggregatorProxy.sol @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./AggregatorProxy.sol"; +import "./interfaces/AccessControllerInterface.sol"; + +/** + * @title External Access Controlled Aggregator Proxy + * @notice A trusted proxy for updating where current answers are read from + * @notice This contract provides a consistent address for the + * Aggregator and AggregatorV3Interface but delegates where it reads from to the owner, who is + * trusted to update it. + * @notice Only access enabled addresses are allowed to access getters for + * aggregated answers and round information. + */ +contract EACAggregatorProxy is AggregatorProxy { + + AccessControllerInterface public accessController; + + constructor( + address _aggregator, + address _accessController + ) + public + AggregatorProxy(_aggregator) + { + setController(_accessController); + } + + /** + * @notice Allows the owner to update the accessController contract address. + * @param _accessController The new address for the accessController contract + */ + function setController(address _accessController) + public + onlyOwner() + { + accessController = AccessControllerInterface(_accessController); + } + + /** + * @notice Reads the current answer from aggregator delegated to. + * @dev overridden function to add the checkAccess() modifier + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestAnswer() + public + view + override + checkAccess() + returns (int256) + { + return super.latestAnswer(); + } + + /** + * @notice get the latest completed round where the answer was updated. This + * ID includes the proxy's phase, to make sure round IDs increase even when + * switching to a newly deployed aggregator. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestTimestamp() + public + view + override + checkAccess() + returns (uint256) + { + return super.latestTimestamp(); + } + + /** + * @notice get past rounds answers + * @param _roundId the answer number to retrieve the answer for + * @dev overridden function to add the checkAccess() modifier + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getAnswer(uint256 _roundId) + public + view + override + checkAccess() + returns (int256) + { + return super.getAnswer(_roundId); + } + + /** + * @notice get block timestamp when an answer was last updated + * @param _roundId the answer number to retrieve the updated timestamp for + * @dev overridden function to add the checkAccess() modifier + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getTimestamp(uint256 _roundId) + public + view + override + checkAccess() + returns (uint256) + { + return super.getTimestamp(_roundId); + } + + /** + * @notice get the latest completed round where the answer was updated + * @dev overridden function to add the checkAccess() modifier + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestRound() + public + view + override + checkAccess() + returns (uint256) + { + return super.latestRound(); + } + + /** + * @notice get data about a round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @param _roundId the round ID to retrieve the round data for + * @return roundId is the round ID from the aggregator for which the data was + * retrieved combined with a phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function getRoundData(uint80 _roundId) + public + view + checkAccess() + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return super.getRoundData(_roundId); + } + + /** + * @notice get data about the latest round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @return roundId is the round ID from the aggregator for which the data was + * retrieved combined with a phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function latestRoundData() + public + view + checkAccess() + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return super.latestRoundData(); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @param _roundId the round ID to retrieve the round data for + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedGetRoundData(uint80 _roundId) + public + view + checkAccess() + hasProposal() + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return super.proposedGetRoundData(_roundId); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedLatestRoundData() + public + view + checkAccess() + hasProposal() + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return super.proposedLatestRoundData(); + } + + /** + * @dev reverts if the caller does not have access by the accessController + * contract or is the contract itself. + */ + modifier checkAccess() { + AccessControllerInterface ac = accessController; + require(address(ac) == address(0) || ac.hasAccess(msg.sender, msg.data), "No access"); + _; + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/Flags.sol b/contracts/src/v0.6/Flags.sol new file mode 100644 index 00000000..ce8c9d5f --- /dev/null +++ b/contracts/src/v0.6/Flags.sol @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + + +import "./SimpleReadAccessController.sol"; +import "./interfaces/AccessControllerInterface.sol"; +import "./interfaces/FlagsInterface.sol"; + + +/** + * @title The Flags contract + * @notice Allows flags to signal to any reader on the access control list. + * The owner can set flags, or designate other addresses to set flags. The + * owner must turn the flags off, other setters cannot. An expected pattern is + * to allow addresses to raise flags on themselves, so if you are subscribing to + * FlagOn events you should filter for addresses you care about. + */ +contract Flags is FlagsInterface, SimpleReadAccessController { + + AccessControllerInterface public raisingAccessController; + + mapping(address => bool) private flags; + + event FlagRaised( + address indexed subject + ); + event FlagLowered( + address indexed subject + ); + event RaisingAccessControllerUpdated( + address indexed previous, + address indexed current + ); + + /** + * @param racAddress address for the raising access controller. + */ + constructor( + address racAddress + ) + public + { + setRaisingAccessController(racAddress); + } + + /** + * @notice read the warning flag status of a contract address. + * @param subject The contract address being checked for a flag. + * @return A true value indicates that a flag was raised and a + * false value indicates that no flag was raised. + */ + function getFlag(address subject) + external + view + override + checkAccess() + returns (bool) + { + return flags[subject]; + } + + /** + * @notice read the warning flag status of a contract address. + * @param subjects An array of addresses being checked for a flag. + * @return An array of bools where a true value for any flag indicates that + * a flag was raised and a false value indicates that no flag was raised. + */ + function getFlags(address[] calldata subjects) + external + view + override + checkAccess() + returns (bool[] memory) + { + bool[] memory responses = new bool[](subjects.length); + for (uint256 i = 0; i < subjects.length; i++) { + responses[i] = flags[subjects[i]]; + } + return responses; + } + + /** + * @notice enable the warning flag for an address. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subject The contract address whose flag is being raised + */ + function raiseFlag(address subject) + external + override + { + require(allowedToRaiseFlags(), "Not allowed to raise flags"); + + tryToRaiseFlag(subject); + } + + /** + * @notice enable the warning flags for multiple addresses. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subjects List of the contract addresses whose flag is being raised + */ + function raiseFlags(address[] calldata subjects) + external + override + { + require(allowedToRaiseFlags(), "Not allowed to raise flags"); + + for (uint256 i = 0; i < subjects.length; i++) { + tryToRaiseFlag(subjects[i]); + } + } + + /** + * @notice allows owner to disable the warning flags for multiple addresses. + * @param subjects List of the contract addresses whose flag is being lowered + */ + function lowerFlags(address[] calldata subjects) + external + override + onlyOwner() + { + for (uint256 i = 0; i < subjects.length; i++) { + address subject = subjects[i]; + + if (flags[subject]) { + flags[subject] = false; + emit FlagLowered(subject); + } + } + } + + /** + * @notice allows owner to change the access controller for raising flags. + * @param racAddress new address for the raising access controller. + */ + function setRaisingAccessController( + address racAddress + ) + public + override + onlyOwner() + { + address previous = address(raisingAccessController); + + if (previous != racAddress) { + raisingAccessController = AccessControllerInterface(racAddress); + + emit RaisingAccessControllerUpdated(previous, racAddress); + } + } + + + // PRIVATE + + function allowedToRaiseFlags() + private + view + returns (bool) + { + return msg.sender == owner || + raisingAccessController.hasAccess(msg.sender, msg.data); + } + + function tryToRaiseFlag(address subject) + private + { + if (!flags[subject]) { + flags[subject] = true; + emit FlagRaised(subject); + } + } + +} diff --git a/contracts/src/v0.6/FluxAggregator.sol b/contracts/src/v0.6/FluxAggregator.sol new file mode 100644 index 00000000..f9848c32 --- /dev/null +++ b/contracts/src/v0.6/FluxAggregator.sol @@ -0,0 +1,1053 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./Median.sol"; +import "./Owned.sol"; +import "./SafeMath128.sol"; +import "./SafeMath32.sol"; +import "./SafeMath64.sol"; +import "./interfaces/AggregatorV2V3Interface.sol"; +import "./interfaces/AggregatorValidatorInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./vendor/SafeMathPlugin.sol"; + +/** + * @title The Prepaid Aggregator contract + * @notice Handles aggregating data pushed in from off-chain, and unlocks + * payment for oracles as they report. Oracles' submissions are gathered in + * rounds, with each round aggregating the submissions for each oracle into a + * single answer. The latest aggregated answer is exposed as well as historical + * answers and their updated at timestamp. + */ +contract FluxAggregator is AggregatorV2V3Interface, Owned { + using SafeMathPlugin for uint256; + using SafeMath128 for uint128; + using SafeMath64 for uint64; + using SafeMath32 for uint32; + + struct Round { + int256 answer; + uint64 startedAt; + uint64 updatedAt; + uint32 answeredInRound; + } + + struct RoundDetails { + int256[] submissions; + uint32 maxSubmissions; + uint32 minSubmissions; + uint32 timeout; + uint128 paymentAmount; + } + + struct OracleStatus { + uint128 withdrawable; + uint32 startingRound; + uint32 endingRound; + uint32 lastReportedRound; + uint32 lastStartedRound; + int256 latestSubmission; + uint16 index; + address admin; + address pendingAdmin; + } + + struct Requester { + bool authorized; + uint32 delay; + uint32 lastStartedRound; + } + + struct Funds { + uint128 available; + uint128 allocated; + } + + LinkTokenInterface public linkToken; + AggregatorValidatorInterface public validator; + + // Round related params + uint128 public paymentAmount; + uint32 public maxSubmissionCount; + uint32 public minSubmissionCount; + uint32 public restartDelay; + uint32 public timeout; + uint8 public override decimals; + string public override description; + + int256 immutable public minSubmissionValue; + int256 immutable public maxSubmissionValue; + + uint256 constant public override version = 3; + + /** + * @notice To ensure owner isn't withdrawing required funds as oracles are + * submitting updates, we enforce that the contract maintains a minimum + * reserve of RESERVE_ROUNDS * oracleCount() PLI earmarked for payment to + * oracles. (Of course, this doesn't prevent the contract from running out of + * funds without the owner's intervention.) + */ + uint256 constant private RESERVE_ROUNDS = 2; + uint256 constant private MAX_ORACLE_COUNT = 77; + uint32 constant private ROUND_MAX = 2**32-1; + uint256 private constant VALIDATOR_GAS_LIMIT = 100000; + // An error specific to the Aggregator V3 Interface, to prevent possible + // confusion around accidentally reading unset values as reported values. + string constant private V3_NO_DATA_ERROR = "No data present"; + + uint32 private reportingRoundId; + uint32 internal latestRoundId; + mapping(address => OracleStatus) private oracles; + mapping(uint32 => Round) internal rounds; + mapping(uint32 => RoundDetails) internal details; + mapping(address => Requester) internal requesters; + address[] private oracleAddresses; + Funds private recordedFunds; + + event AvailableFundsUpdated( + uint256 indexed amount + ); + event RoundDetailsUpdated( + uint128 indexed paymentAmount, + uint32 indexed minSubmissionCount, + uint32 indexed maxSubmissionCount, + uint32 restartDelay, + uint32 timeout // measured in seconds + ); + event OraclePermissionsUpdated( + address indexed oracle, + bool indexed whitelisted + ); + event OracleAdminUpdated( + address indexed oracle, + address indexed newAdmin + ); + event OracleAdminUpdateRequested( + address indexed oracle, + address admin, + address newAdmin + ); + event SubmissionReceived( + int256 indexed submission, + uint32 indexed round, + address indexed oracle + ); + event RequesterPermissionsSet( + address indexed requester, + bool authorized, + uint32 delay + ); + event ValidatorUpdated( + address indexed previous, + address indexed current + ); + + /** + * @notice set up the aggregator with initial configuration + * @param _link The address of the PLI token + * @param _paymentAmount The amount paid of PLI paid to each oracle per submission, in wei (units of 10⁻¹⁸ PLI) + * @param _timeout is the number of seconds after the previous round that are + * allowed to lapse before allowing an oracle to skip an unfinished round + * @param _validator is an optional contract address for validating + * external validation of answers + * @param _minSubmissionValue is an immutable check for a lower bound of what + * submission values are accepted from an oracle + * @param _maxSubmissionValue is an immutable check for an upper bound of what + * submission values are accepted from an oracle + * @param _decimals represents the number of decimals to offset the answer by + * @param _description a short description of what is being reported + */ + constructor( + address _link, + uint128 _paymentAmount, + uint32 _timeout, + address _validator, + int256 _minSubmissionValue, + int256 _maxSubmissionValue, + uint8 _decimals, + string memory _description + ) public { + linkToken = LinkTokenInterface(_link); + updateFutureRounds(_paymentAmount, 0, 0, 0, _timeout); + setValidator(_validator); + minSubmissionValue = _minSubmissionValue; + maxSubmissionValue = _maxSubmissionValue; + decimals = _decimals; + description = _description; + rounds[0].updatedAt = uint64(block.timestamp.sub(uint256(_timeout))); + } + + /** + * @notice called by oracles when they have witnessed a need to update + * @param _roundId is the ID of the round this submission pertains to + * @param _submission is the updated data that the oracle is submitting + */ + function submit(uint256 _roundId, int256 _submission) + external + { + bytes memory error = validateOracleRound(msg.sender, uint32(_roundId)); + require(_submission >= minSubmissionValue, "value below minSubmissionValue"); + require(_submission <= maxSubmissionValue, "value above maxSubmissionValue"); + require(error.length == 0, string(error)); + + oracleInitializeNewRound(uint32(_roundId)); + recordSubmission(_submission, uint32(_roundId)); + (bool updated, int256 newAnswer) = updateRoundAnswer(uint32(_roundId)); + payOracle(uint32(_roundId)); + deleteRoundDetails(uint32(_roundId)); + if (updated) { + validateAnswer(uint32(_roundId), newAnswer); + } + } + + /** + * @notice called by the owner to remove and add new oracles as well as + * update the round related parameters that pertain to total oracle count + * @param _removed is the list of addresses for the new Oracles being removed + * @param _added is the list of addresses for the new Oracles being added + * @param _addedAdmins is the admin addresses for the new respective _added + * list. Only this address is allowed to access the respective oracle's funds + * @param _minSubmissions is the new minimum submission count for each round + * @param _maxSubmissions is the new maximum submission count for each round + * @param _restartDelay is the number of rounds an Oracle has to wait before + * they can initiate a round + */ + function changeOracles( + address[] calldata _removed, + address[] calldata _added, + address[] calldata _addedAdmins, + uint32 _minSubmissions, + uint32 _maxSubmissions, + uint32 _restartDelay + ) + external + onlyOwner() + { + for (uint256 i = 0; i < _removed.length; i++) { + removeOracle(_removed[i]); + } + + require(_added.length == _addedAdmins.length, "need same oracle and admin count"); + require(uint256(oracleCount()).add(_added.length) <= MAX_ORACLE_COUNT, "max oracles allowed"); + + for (uint256 i = 0; i < _added.length; i++) { + addOracle(_added[i], _addedAdmins[i]); + } + + updateFutureRounds(paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, timeout); + } + + /** + * @notice update the round and payment related parameters for subsequent + * rounds + * @param _paymentAmount is the payment amount for subsequent rounds + * @param _minSubmissions is the new minimum submission count for each round + * @param _maxSubmissions is the new maximum submission count for each round + * @param _restartDelay is the number of rounds an Oracle has to wait before + * they can initiate a round + */ + function updateFutureRounds( + uint128 _paymentAmount, + uint32 _minSubmissions, + uint32 _maxSubmissions, + uint32 _restartDelay, + uint32 _timeout + ) + public + onlyOwner() + { + uint32 oracleNum = oracleCount(); // Save on storage reads + require(_maxSubmissions >= _minSubmissions, "max must equal/exceed min"); + require(oracleNum >= _maxSubmissions, "max cannot exceed total"); + require(oracleNum == 0 || oracleNum > _restartDelay, "delay cannot exceed total"); + require(recordedFunds.available >= requiredReserve(_paymentAmount), "insufficient funds for payment"); + if (oracleCount() > 0) { + require(_minSubmissions > 0, "min must be greater than 0"); + } + + paymentAmount = _paymentAmount; + minSubmissionCount = _minSubmissions; + maxSubmissionCount = _maxSubmissions; + restartDelay = _restartDelay; + timeout = _timeout; + + emit RoundDetailsUpdated( + paymentAmount, + _minSubmissions, + _maxSubmissions, + _restartDelay, + _timeout + ); + } + + /** + * @notice the amount of payment yet to be withdrawn by oracles + */ + function allocatedFunds() + external + view + returns (uint128) + { + return recordedFunds.allocated; + } + + /** + * @notice the amount of future funding available to oracles + */ + function availableFunds() + external + view + returns (uint128) + { + return recordedFunds.available; + } + + /** + * @notice recalculate the amount of PLI available for payouts + */ + function updateAvailableFunds() + public + { + Funds memory funds = recordedFunds; + + uint256 nowAvailable = linkToken.balanceOf(address(this)).sub(funds.allocated); + + if (funds.available != nowAvailable) { + recordedFunds.available = uint128(nowAvailable); + emit AvailableFundsUpdated(nowAvailable); + } + } + + /** + * @notice returns the number of oracles + */ + function oracleCount() public view returns (uint8) { + return uint8(oracleAddresses.length); + } + + /** + * @notice returns an array of addresses containing the oracles on contract + */ + function getOracles() external view returns (address[] memory) { + return oracleAddresses; + } + + /** + * @notice get the most recently reported answer + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestAnswer() + public + view + virtual + override + returns (int256) + { + return rounds[latestRoundId].answer; + } + + /** + * @notice get the most recent updated at timestamp + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestTimestamp() + public + view + virtual + override + returns (uint256) + { + return rounds[latestRoundId].updatedAt; + } + + /** + * @notice get the ID of the last updated round + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestRound() + public + view + virtual + override + returns (uint256) + { + return latestRoundId; + } + + /** + * @notice get past rounds answers + * @param _roundId the round number to retrieve the answer for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getAnswer(uint256 _roundId) + public + view + virtual + override + returns (int256) + { + if (validRoundId(_roundId)) { + return rounds[uint32(_roundId)].answer; + } + return 0; + } + + /** + * @notice get timestamp when an answer was last updated + * @param _roundId the round number to retrieve the updated timestamp for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getTimestamp(uint256 _roundId) + public + view + virtual + override + returns (uint256) + { + if (validRoundId(_roundId)) { + return rounds[uint32(_roundId)].updatedAt; + } + return 0; + } + + /** + * @notice get data about a round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * @param _roundId the round ID to retrieve the round data for + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. This is 0 + * if the round hasn't been started yet. + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. answeredInRound may be smaller than roundId when the round + * timed out. answeredInRound is equal to roundId when the round didn't time out + * and was completed regularly. + * @dev Note that for in-progress rounds (i.e. rounds that haven't yet received + * maxSubmissions) answer and updatedAt may change between queries. + */ + function getRoundData(uint80 _roundId) + public + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + Round memory r = rounds[uint32(_roundId)]; + + require(r.answeredInRound > 0 && validRoundId(_roundId), V3_NO_DATA_ERROR); + + return ( + _roundId, + r.answer, + r.startedAt, + r.updatedAt, + r.answeredInRound + ); + } + + /** + * @notice get data about the latest round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. Consumers are encouraged to + * use this more fully featured method over the "legacy" latestRound/ + * latestAnswer/latestTimestamp functions. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * @return roundId is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. This is 0 + * if the round hasn't been started yet. + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. answeredInRound may be smaller than roundId when the round + * timed out. answeredInRound is equal to roundId when the round didn't time + * out and was completed regularly. + * @dev Note that for in-progress rounds (i.e. rounds that haven't yet + * received maxSubmissions) answer and updatedAt may change between queries. + */ + function latestRoundData() + public + view + virtual + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return getRoundData(latestRoundId); + } + + + /** + * @notice query the available amount of PLI for an oracle to withdraw + */ + function withdrawablePayment(address _oracle) + external + view + returns (uint256) + { + return oracles[_oracle].withdrawable; + } + + /** + * @notice transfers the oracle's PLI to another address. Can only be called + * by the oracle's admin. + * @param _oracle is the oracle whose PLI is transferred + * @param _recipient is the address to send the PLI to + * @param _amount is the amount of PLI to send + */ + function withdrawPayment(address _oracle, address _recipient, uint256 _amount) + external + { + require(oracles[_oracle].admin == msg.sender, "only callable by admin"); + + // Safe to downcast _amount because the total amount of PLI is less than 2^128. + uint128 amount = uint128(_amount); + uint128 available = oracles[_oracle].withdrawable; + require(available >= amount, "insufficient withdrawable funds"); + + oracles[_oracle].withdrawable = available.sub(amount); + recordedFunds.allocated = recordedFunds.allocated.sub(amount); + + assert(linkToken.transfer(_recipient, uint256(amount))); + } + + /** + * @notice transfers the owner's PLI to another address + * @param _recipient is the address to send the PLI to + * @param _amount is the amount of PLI to send + */ + function withdrawFunds(address _recipient, uint256 _amount) + external + onlyOwner() + { + uint256 available = uint256(recordedFunds.available); + require(available.sub(requiredReserve(paymentAmount)) >= _amount, "insufficient reserve funds"); + require(linkToken.transfer(_recipient, _amount), "token transfer failed"); + updateAvailableFunds(); + } + + /** + * @notice get the admin address of an oracle + * @param _oracle is the address of the oracle whose admin is being queried + */ + function getAdmin(address _oracle) + external + view + returns (address) + { + return oracles[_oracle].admin; + } + + /** + * @notice transfer the admin address for an oracle + * @param _oracle is the address of the oracle whose admin is being transferred + * @param _newAdmin is the new admin address + */ + function transferAdmin(address _oracle, address _newAdmin) + external + { + require(oracles[_oracle].admin == msg.sender, "only callable by admin"); + oracles[_oracle].pendingAdmin = _newAdmin; + + emit OracleAdminUpdateRequested(_oracle, msg.sender, _newAdmin); + } + + /** + * @notice accept the admin address transfer for an oracle + * @param _oracle is the address of the oracle whose admin is being transferred + */ + function acceptAdmin(address _oracle) + external + { + require(oracles[_oracle].pendingAdmin == msg.sender, "only callable by pending admin"); + oracles[_oracle].pendingAdmin = address(0); + oracles[_oracle].admin = msg.sender; + + emit OracleAdminUpdated(_oracle, msg.sender); + } + + /** + * @notice allows non-oracles to request a new round + */ + function requestNewRound() + external + returns (uint80) + { + require(requesters[msg.sender].authorized, "not authorized requester"); + + uint32 current = reportingRoundId; + require(rounds[current].updatedAt > 0 || timedOut(current), "prev round must be supersedable"); + + uint32 newRoundId = current.add(1); + requesterInitializeNewRound(newRoundId); + return newRoundId; + } + + /** + * @notice allows the owner to specify new non-oracles to start new rounds + * @param _requester is the address to set permissions for + * @param _authorized is a boolean specifying whether they can start new rounds or not + * @param _delay is the number of rounds the requester must wait before starting another round + */ + function setRequesterPermissions(address _requester, bool _authorized, uint32 _delay) + external + onlyOwner() + { + if (requesters[_requester].authorized == _authorized) return; + + if (_authorized) { + requesters[_requester].authorized = _authorized; + requesters[_requester].delay = _delay; + } else { + delete requesters[_requester]; + } + + emit RequesterPermissionsSet(_requester, _authorized, _delay); + } + + /** + * @notice called through PLI's transferAndCall to update available funds + * in the same transaction as the funds were transferred to the aggregator + * @param _data is mostly ignored. It is checked for length, to be sure + * nothing strange is passed in. + */ + function onTokenTransfer(address, uint256, bytes calldata _data) + external + { + require(_data.length == 0, "transfer doesn't accept calldata"); + updateAvailableFunds(); + } + + /** + * @notice a method to provide all current info oracles need. Intended only + * only to be callable by oracles. Not for use by contracts to read state. + * @param _oracle the address to look up information for. + */ + function oracleRoundState(address _oracle, uint32 _queriedRoundId) + external + view + returns ( + bool _eligibleToSubmit, + uint32 _roundId, + int256 _latestSubmission, + uint64 _startedAt, + uint64 _timeout, + uint128 _availableFunds, + uint8 _oracleCount, + uint128 _paymentAmount + ) + { + require(msg.sender == tx.origin, "off-chain reading only"); + + if (_queriedRoundId > 0) { + Round storage round = rounds[_queriedRoundId]; + RoundDetails storage details = details[_queriedRoundId]; + return ( + eligibleForSpecificRound(_oracle, _queriedRoundId), + _queriedRoundId, + oracles[_oracle].latestSubmission, + round.startedAt, + details.timeout, + recordedFunds.available, + oracleCount(), + (round.startedAt > 0 ? details.paymentAmount : paymentAmount) + ); + } else { + return oracleRoundStateSuggestRound(_oracle); + } + } + + /** + * @notice method to update the address which does external data validation. + * @param _newValidator designates the address of the new validation contract. + */ + function setValidator(address _newValidator) + public + onlyOwner() + { + address previous = address(validator); + + if (previous != _newValidator) { + validator = AggregatorValidatorInterface(_newValidator); + + emit ValidatorUpdated(previous, _newValidator); + } + } + + + /** + * Private + */ + + function initializeNewRound(uint32 _roundId) + private + { + updateTimedOutRoundInfo(_roundId.sub(1)); + + reportingRoundId = _roundId; + RoundDetails memory nextDetails = RoundDetails( + new int256[](0), + maxSubmissionCount, + minSubmissionCount, + timeout, + paymentAmount + ); + details[_roundId] = nextDetails; + rounds[_roundId].startedAt = uint64(block.timestamp); + + emit NewRound(_roundId, msg.sender, rounds[_roundId].startedAt); + } + + function oracleInitializeNewRound(uint32 _roundId) + private + { + if (!newRound(_roundId)) return; + uint256 lastStarted = oracles[msg.sender].lastStartedRound; // cache storage reads + if (_roundId <= lastStarted + restartDelay && lastStarted != 0) return; + + initializeNewRound(_roundId); + + oracles[msg.sender].lastStartedRound = _roundId; + } + + function requesterInitializeNewRound(uint32 _roundId) + private + { + if (!newRound(_roundId)) return; + uint256 lastStarted = requesters[msg.sender].lastStartedRound; // cache storage reads + require(_roundId > lastStarted + requesters[msg.sender].delay || lastStarted == 0, "must delay requests"); + + initializeNewRound(_roundId); + + requesters[msg.sender].lastStartedRound = _roundId; + } + + function updateTimedOutRoundInfo(uint32 _roundId) + private + { + if (!timedOut(_roundId)) return; + + uint32 prevId = _roundId.sub(1); + rounds[_roundId].answer = rounds[prevId].answer; + rounds[_roundId].answeredInRound = rounds[prevId].answeredInRound; + rounds[_roundId].updatedAt = uint64(block.timestamp); + + delete details[_roundId]; + } + + function eligibleForSpecificRound(address _oracle, uint32 _queriedRoundId) + private + view + returns (bool _eligible) + { + if (rounds[_queriedRoundId].startedAt > 0) { + return acceptingSubmissions(_queriedRoundId) && validateOracleRound(_oracle, _queriedRoundId).length == 0; + } else { + return delayed(_oracle, _queriedRoundId) && validateOracleRound(_oracle, _queriedRoundId).length == 0; + } + } + + function oracleRoundStateSuggestRound(address _oracle) + private + view + returns ( + bool _eligibleToSubmit, + uint32 _roundId, + int256 _latestSubmission, + uint64 _startedAt, + uint64 _timeout, + uint128 _availableFunds, + uint8 _oracleCount, + uint128 _paymentAmount + ) + { + Round storage round = rounds[0]; + OracleStatus storage oracle = oracles[_oracle]; + + bool shouldSupersede = oracle.lastReportedRound == reportingRoundId || !acceptingSubmissions(reportingRoundId); + // Instead of nudging oracles to submit to the next round, the inclusion of + // the shouldSupersede bool in the if condition pushes them towards + // submitting in a currently open round. + if (supersedable(reportingRoundId) && shouldSupersede) { + _roundId = reportingRoundId.add(1); + round = rounds[_roundId]; + + _paymentAmount = paymentAmount; + _eligibleToSubmit = delayed(_oracle, _roundId); + } else { + _roundId = reportingRoundId; + round = rounds[_roundId]; + + _paymentAmount = details[_roundId].paymentAmount; + _eligibleToSubmit = acceptingSubmissions(_roundId); + } + + if (validateOracleRound(_oracle, _roundId).length != 0) { + _eligibleToSubmit = false; + } + + return ( + _eligibleToSubmit, + _roundId, + oracle.latestSubmission, + round.startedAt, + details[_roundId].timeout, + recordedFunds.available, + oracleCount(), + _paymentAmount + ); + } + + function updateRoundAnswer(uint32 _roundId) + internal + returns (bool, int256) + { + if (details[_roundId].submissions.length < details[_roundId].minSubmissions) { + return (false, 0); + } + + int256 newAnswer = Median.calculateInplace(details[_roundId].submissions); + rounds[_roundId].answer = newAnswer; + rounds[_roundId].updatedAt = uint64(block.timestamp); + rounds[_roundId].answeredInRound = _roundId; + latestRoundId = _roundId; + + emit AnswerUpdated(newAnswer, _roundId, now); + + return (true, newAnswer); + } + + function validateAnswer( + uint32 _roundId, + int256 _newAnswer + ) + private + { + AggregatorValidatorInterface av = validator; // cache storage reads + if (address(av) == address(0)) return; + + uint32 prevRound = _roundId.sub(1); + uint32 prevAnswerRoundId = rounds[prevRound].answeredInRound; + int256 prevRoundAnswer = rounds[prevRound].answer; + // We do not want the validator to ever prevent reporting, so we limit its + // gas usage and catch any errors that may arise. + try av.validate{gas: VALIDATOR_GAS_LIMIT}( + prevAnswerRoundId, + prevRoundAnswer, + _roundId, + _newAnswer + ) {} catch {} + } + + function payOracle(uint32 _roundId) + private + { + uint128 payment = details[_roundId].paymentAmount; + Funds memory funds = recordedFunds; + funds.available = funds.available.sub(payment); + funds.allocated = funds.allocated.add(payment); + recordedFunds = funds; + oracles[msg.sender].withdrawable = oracles[msg.sender].withdrawable.add(payment); + + emit AvailableFundsUpdated(funds.available); + } + + function recordSubmission(int256 _submission, uint32 _roundId) + private + { + require(acceptingSubmissions(_roundId), "round not accepting submissions"); + + details[_roundId].submissions.push(_submission); + oracles[msg.sender].lastReportedRound = _roundId; + oracles[msg.sender].latestSubmission = _submission; + + emit SubmissionReceived(_submission, _roundId, msg.sender); + } + + function deleteRoundDetails(uint32 _roundId) + private + { + if (details[_roundId].submissions.length < details[_roundId].maxSubmissions) return; + + delete details[_roundId]; + } + + function timedOut(uint32 _roundId) + private + view + returns (bool) + { + uint64 startedAt = rounds[_roundId].startedAt; + uint32 roundTimeout = details[_roundId].timeout; + return startedAt > 0 && roundTimeout > 0 && startedAt.add(roundTimeout) < block.timestamp; + } + + function getStartingRound(address _oracle) + private + view + returns (uint32) + { + uint32 currentRound = reportingRoundId; + if (currentRound != 0 && currentRound == oracles[_oracle].endingRound) { + return currentRound; + } + return currentRound.add(1); + } + + function previousAndCurrentUnanswered(uint32 _roundId, uint32 _rrId) + private + view + returns (bool) + { + return _roundId.add(1) == _rrId && rounds[_rrId].updatedAt == 0; + } + + function requiredReserve(uint256 payment) + private + view + returns (uint256) + { + return payment.mul(oracleCount()).mul(RESERVE_ROUNDS); + } + + function addOracle( + address _oracle, + address _admin + ) + private + { + require(!oracleEnabled(_oracle), "oracle already enabled"); + + require(_admin != address(0), "cannot set admin to 0"); + require(oracles[_oracle].admin == address(0) || oracles[_oracle].admin == _admin, "owner cannot overwrite admin"); + + oracles[_oracle].startingRound = getStartingRound(_oracle); + oracles[_oracle].endingRound = ROUND_MAX; + oracles[_oracle].index = uint16(oracleAddresses.length); + oracleAddresses.push(_oracle); + oracles[_oracle].admin = _admin; + + emit OraclePermissionsUpdated(_oracle, true); + emit OracleAdminUpdated(_oracle, _admin); + } + + function removeOracle( + address _oracle + ) + private + { + require(oracleEnabled(_oracle), "oracle not enabled"); + + oracles[_oracle].endingRound = reportingRoundId.add(1); + address tail = oracleAddresses[uint256(oracleCount()).sub(1)]; + uint16 index = oracles[_oracle].index; + oracles[tail].index = index; + delete oracles[_oracle].index; + oracleAddresses[index] = tail; + oracleAddresses.pop(); + + emit OraclePermissionsUpdated(_oracle, false); + } + + function validateOracleRound(address _oracle, uint32 _roundId) + private + view + returns (bytes memory) + { + // cache storage reads + uint32 startingRound = oracles[_oracle].startingRound; + uint32 rrId = reportingRoundId; + + if (startingRound == 0) return "not enabled oracle"; + if (startingRound > _roundId) return "not yet enabled oracle"; + if (oracles[_oracle].endingRound < _roundId) return "no longer allowed oracle"; + if (oracles[_oracle].lastReportedRound >= _roundId) return "cannot report on previous rounds"; + if (_roundId != rrId && _roundId != rrId.add(1) && !previousAndCurrentUnanswered(_roundId, rrId)) return "invalid round to report"; + if (_roundId != 1 && !supersedable(_roundId.sub(1))) return "previous round not supersedable"; + } + + function supersedable(uint32 _roundId) + private + view + returns (bool) + { + return rounds[_roundId].updatedAt > 0 || timedOut(_roundId); + } + + function oracleEnabled(address _oracle) + private + view + returns (bool) + { + return oracles[_oracle].endingRound == ROUND_MAX; + } + + function acceptingSubmissions(uint32 _roundId) + private + view + returns (bool) + { + return details[_roundId].maxSubmissions != 0; + } + + function delayed(address _oracle, uint32 _roundId) + private + view + returns (bool) + { + uint256 lastStarted = oracles[_oracle].lastStartedRound; + return _roundId > lastStarted + restartDelay || lastStarted == 0; + } + + function newRound(uint32 _roundId) + private + view + returns (bool) + { + return _roundId == reportingRoundId.add(1); + } + + function validRoundId(uint256 _roundId) + private + pure + returns (bool) + { + return _roundId <= ROUND_MAX; + } + +} diff --git a/contracts/src/v0.6/KeeperBase.sol b/contracts/src/v0.6/KeeperBase.sol new file mode 100644 index 00000000..ce9eccff --- /dev/null +++ b/contracts/src/v0.6/KeeperBase.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +contract KeeperBase { + /** + * @notice method that allows it to be simulated via eth_call by checking that + * the sender is the zero address. + */ + function preventExecution() internal view { + require(tx.origin == address(0), "only for simulated backend"); + } + + /** + * @notice modifier that allows it to be simulated via eth_call by checking + * that the sender is the zero address. + */ + modifier cannotExecute() { + preventExecution(); + _; + } +} diff --git a/contracts/src/v0.6/KeeperCompatible.sol b/contracts/src/v0.6/KeeperCompatible.sol new file mode 100644 index 00000000..2a7ad60b --- /dev/null +++ b/contracts/src/v0.6/KeeperCompatible.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./KeeperBase.sol"; +import "./interfaces/KeeperCompatibleInterface.sol"; + +abstract contract KeeperCompatible is KeeperBase, KeeperCompatibleInterface {} diff --git a/contracts/src/v0.6/LinkTokenReceiver.sol b/contracts/src/v0.6/LinkTokenReceiver.sol new file mode 100644 index 00000000..e7dcd531 --- /dev/null +++ b/contracts/src/v0.6/LinkTokenReceiver.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +abstract contract LinkTokenReceiver { + + bytes4 constant private ORACLE_REQUEST_SELECTOR = 0x40429946; + uint256 constant private SELECTOR_LENGTH = 4; + uint256 constant private EXPECTED_REQUEST_WORDS = 2; + uint256 constant private MINIMUM_REQUEST_LENGTH = SELECTOR_LENGTH + (32 * EXPECTED_REQUEST_WORDS); + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @dev The data payload's first 2 words will be overwritten by the `_sender` and `_amount` + * values to ensure correctness. Calls oracleRequest. + * @param _sender Address of the sender + * @param _amount Amount of PLI sent (specified in wei) + * @param _data Payload of the transaction + */ + function onTokenTransfer( + address _sender, + uint256 _amount, + bytes memory _data + ) + public + onlyPLI + validRequestLength(_data) + permittedFunctionsForPLI(_data) + { + assembly { + // solhint-disable-next-line avoid-low-level-calls + mstore(add(_data, 36), _sender) // ensure correct sender is passed + // solhint-disable-next-line avoid-low-level-calls + mstore(add(_data, 68), _amount) // ensure correct amount is passed + } + // solhint-disable-next-line avoid-low-level-calls + (bool success, ) = address(this).delegatecall(_data); // calls oracleRequest + require(success, "Unable to create request"); + } + + function getPluginToken() public view virtual returns (address); + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == getPluginToken(), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `oracleRequest` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) + } + require(funcSelector == ORACLE_REQUEST_SELECTOR, "Must use whitelisted functions"); + _; + } + + /** + * @dev Reverts if the given payload is less than needed to create a request + * @param _data The request payload + */ + modifier validRequestLength(bytes memory _data) { + require(_data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); + _; + } +} diff --git a/contracts/src/v0.6/Median.sol b/contracts/src/v0.6/Median.sol new file mode 100644 index 00000000..1991cbc7 --- /dev/null +++ b/contracts/src/v0.6/Median.sol @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./vendor/SafeMathPlugin.sol"; +import "./SignedSafeMath.sol"; + +library Median { + using SignedSafeMath for int256; + + int256 constant INT_MAX = 2**255-1; + + /** + * @notice Returns the sorted middle, or the average of the two middle indexed items if the + * array has an even number of elements. + * @dev The list passed as an argument isn't modified. + * @dev This algorithm has expected runtime O(n), but for adversarially chosen inputs + * the runtime is O(n^2). + * @param list The list of elements to compare + */ + function calculate(int256[] memory list) + internal + pure + returns (int256) + { + return calculateInplace(copy(list)); + } + + /** + * @notice See documentation for function calculate. + * @dev The list passed as an argument may be permuted. + */ + function calculateInplace(int256[] memory list) + internal + pure + returns (int256) + { + require(0 < list.length, "list must not be empty"); + uint256 len = list.length; + uint256 middleIndex = len / 2; + if (len % 2 == 0) { + int256 median1; + int256 median2; + (median1, median2) = quickselectTwo(list, 0, len - 1, middleIndex - 1, middleIndex); + return SignedSafeMath.avg(median1, median2); + } else { + return quickselect(list, 0, len - 1, middleIndex); + } + } + + /** + * @notice Maximum length of list that shortSelectTwo can handle + */ + uint256 constant SHORTSELECTTWO_MAX_LENGTH = 7; + + /** + * @notice Select the k1-th and k2-th element from list of length at most 7 + * @dev Uses an optimal sorting network + */ + function shortSelectTwo( + int256[] memory list, + uint256 lo, + uint256 hi, + uint256 k1, + uint256 k2 + ) + private + pure + returns (int256 k1th, int256 k2th) + { + // Uses an optimal sorting network (https://en.wikipedia.org/wiki/Sorting_network) + // for lists of length 7. Network layout is taken from + // http://jgamble.ripco.net/cgi-bin/nw.cgi?inputs=7&algorithm=hibbard&output=svg + + uint256 len = hi + 1 - lo; + int256 x0 = list[lo + 0]; + int256 x1 = 1 < len ? list[lo + 1] : INT_MAX; + int256 x2 = 2 < len ? list[lo + 2] : INT_MAX; + int256 x3 = 3 < len ? list[lo + 3] : INT_MAX; + int256 x4 = 4 < len ? list[lo + 4] : INT_MAX; + int256 x5 = 5 < len ? list[lo + 5] : INT_MAX; + int256 x6 = 6 < len ? list[lo + 6] : INT_MAX; + + if (x0 > x1) {(x0, x1) = (x1, x0);} + if (x2 > x3) {(x2, x3) = (x3, x2);} + if (x4 > x5) {(x4, x5) = (x5, x4);} + if (x0 > x2) {(x0, x2) = (x2, x0);} + if (x1 > x3) {(x1, x3) = (x3, x1);} + if (x4 > x6) {(x4, x6) = (x6, x4);} + if (x1 > x2) {(x1, x2) = (x2, x1);} + if (x5 > x6) {(x5, x6) = (x6, x5);} + if (x0 > x4) {(x0, x4) = (x4, x0);} + if (x1 > x5) {(x1, x5) = (x5, x1);} + if (x2 > x6) {(x2, x6) = (x6, x2);} + if (x1 > x4) {(x1, x4) = (x4, x1);} + if (x3 > x6) {(x3, x6) = (x6, x3);} + if (x2 > x4) {(x2, x4) = (x4, x2);} + if (x3 > x5) {(x3, x5) = (x5, x3);} + if (x3 > x4) {(x3, x4) = (x4, x3);} + + uint256 index1 = k1 - lo; + if (index1 == 0) {k1th = x0;} + else if (index1 == 1) {k1th = x1;} + else if (index1 == 2) {k1th = x2;} + else if (index1 == 3) {k1th = x3;} + else if (index1 == 4) {k1th = x4;} + else if (index1 == 5) {k1th = x5;} + else if (index1 == 6) {k1th = x6;} + else {revert("k1 out of bounds");} + + uint256 index2 = k2 - lo; + if (k1 == k2) {return (k1th, k1th);} + else if (index2 == 0) {return (k1th, x0);} + else if (index2 == 1) {return (k1th, x1);} + else if (index2 == 2) {return (k1th, x2);} + else if (index2 == 3) {return (k1th, x3);} + else if (index2 == 4) {return (k1th, x4);} + else if (index2 == 5) {return (k1th, x5);} + else if (index2 == 6) {return (k1th, x6);} + else {revert("k2 out of bounds");} + } + + /** + * @notice Selects the k-th ranked element from list, looking only at indices between lo and hi + * (inclusive). Modifies list in-place. + */ + function quickselect(int256[] memory list, uint256 lo, uint256 hi, uint256 k) + private + pure + returns (int256 kth) + { + require(lo <= k); + require(k <= hi); + while (lo < hi) { + if (hi - lo < SHORTSELECTTWO_MAX_LENGTH) { + int256 ignore; + (kth, ignore) = shortSelectTwo(list, lo, hi, k, k); + return kth; + } + uint256 pivotIndex = partition(list, lo, hi); + if (k <= pivotIndex) { + // since pivotIndex < (original hi passed to partition), + // termination is guaranteed in this case + hi = pivotIndex; + } else { + // since (original lo passed to partition) <= pivotIndex, + // termination is guaranteed in this case + lo = pivotIndex + 1; + } + } + return list[lo]; + } + + /** + * @notice Selects the k1-th and k2-th ranked elements from list, looking only at indices between + * lo and hi (inclusive). Modifies list in-place. + */ + function quickselectTwo( + int256[] memory list, + uint256 lo, + uint256 hi, + uint256 k1, + uint256 k2 + ) + internal // for testing + pure + returns (int256 k1th, int256 k2th) + { + require(k1 < k2); + require(lo <= k1 && k1 <= hi); + require(lo <= k2 && k2 <= hi); + + while (true) { + if (hi - lo < SHORTSELECTTWO_MAX_LENGTH) { + return shortSelectTwo(list, lo, hi, k1, k2); + } + uint256 pivotIdx = partition(list, lo, hi); + if (k2 <= pivotIdx) { + hi = pivotIdx; + } else if (pivotIdx < k1) { + lo = pivotIdx + 1; + } else { + assert(k1 <= pivotIdx && pivotIdx < k2); + k1th = quickselect(list, lo, pivotIdx, k1); + k2th = quickselect(list, pivotIdx + 1, hi, k2); + return (k1th, k2th); + } + } + } + + /** + * @notice Partitions list in-place using Hoare's partitioning scheme. + * Only elements of list between indices lo and hi (inclusive) will be modified. + * Returns an index i, such that: + * - lo <= i < hi + * - forall j in [lo, i]. list[j] <= list[i] + * - forall j in [i, hi]. list[i] <= list[j] + */ + function partition(int256[] memory list, uint256 lo, uint256 hi) + private + pure + returns (uint256) + { + // We don't care about overflow of the addition, because it would require a list + // larger than any feasible computer's memory. + int256 pivot = list[(lo + hi) / 2]; + lo -= 1; // this can underflow. that's intentional. + hi += 1; + while (true) { + do { + lo += 1; + } while (list[lo] < pivot); + do { + hi -= 1; + } while (list[hi] > pivot); + if (lo < hi) { + (list[lo], list[hi]) = (list[hi], list[lo]); + } else { + // Let orig_lo and orig_hi be the original values of lo and hi passed to partition. + // Then, hi < orig_hi, because hi decreases *strictly* monotonically + // in each loop iteration and + // - either list[orig_hi] > pivot, in which case the first loop iteration + // will achieve hi < orig_hi; + // - or list[orig_hi] <= pivot, in which case at least two loop iterations are + // needed: + // - lo will have to stop at least once in the interval + // [orig_lo, (orig_lo + orig_hi)/2] + // - (orig_lo + orig_hi)/2 < orig_hi + return hi; + } + } + } + + /** + * @notice Makes an in-memory copy of the array passed in + * @param list Reference to the array to be copied + */ + function copy(int256[] memory list) + private + pure + returns(int256[] memory) + { + int256[] memory list2 = new int256[](list.length); + for (uint256 i = 0; i < list.length; i++) { + list2[i] = list[i]; + } + return list2; + } +} diff --git a/contracts/src/v0.6/Oracle.sol b/contracts/src/v0.6/Oracle.sol new file mode 100644 index 00000000..1c2c00d2 --- /dev/null +++ b/contracts/src/v0.6/Oracle.sol @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./LinkTokenReceiver.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/OracleInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/WithdrawalInterface.sol"; +import "./vendor/Ownable.sol"; +import "./vendor/SafeMathPlugin.sol"; + +/** + * @title The Plugin Oracle contract + * @notice Node operators can deploy this contract to fulfill requests sent to them + */ +contract Oracle is PluginRequestInterface, OracleInterface, Ownable, LinkTokenReceiver, WithdrawalInterface { + using SafeMathPlugin for uint256; + + uint256 constant public EXPIRY_TIME = 5 minutes; + uint256 constant private MINIMUM_CONSUMER_GAS_LIMIT = 400000; + // We initialize fields to 1 instead of 0 so that the first invocation + // does not cost more gas. + uint256 constant private ONE_FOR_CONSISTENT_GAS_COST = 1; + + LinkTokenInterface internal LinkToken; + mapping(bytes32 => bytes32) private commitments; + mapping(address => bool) private authorizedNodes; + uint256 private withdrawableTokens = ONE_FOR_CONSISTENT_GAS_COST; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest( + bytes32 indexed requestId + ); + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param _link The address of the PLI token + */ + constructor(address _link) + public + Ownable() + { + LinkToken = LinkTokenInterface(_link); // external but already deployed and unalterable + } + + /** + * @notice Creates the Plugin request + * @dev Stores the hash of the params as the on-chain commitment for the request. + * Emits OracleRequest event for the Plugin node to detect. + * @param _sender The sender of the request + * @param _payment The amount of payment given (specified in wei) + * @param _specId The Job Specification ID + * @param _callbackAddress The callback address for the response + * @param _callbackFunctionId The callback function ID for the response + * @param _nonce The nonce sent by the requester + * @param _dataVersion The specified data version + * @param _data The CBOR payload of the request + */ + function oracleRequest( + address _sender, + uint256 _payment, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _nonce, + uint256 _dataVersion, + bytes calldata _data + ) + external + override + onlyPLI() + checkCallbackAddress(_callbackAddress) + { + bytes32 requestId = keccak256(abi.encodePacked(_sender, _nonce)); + require(commitments[requestId] == 0, "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + uint256 expiration = now.add(EXPIRY_TIME); + + commitments[requestId] = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + expiration + ) + ); + + emit OracleRequest( + _specId, + _sender, + requestId, + _payment, + _callbackAddress, + _callbackFunctionId, + expiration, + _dataVersion, + _data); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param _requestId The fulfillment request ID that must match the requester's + * @param _payment The payment amount that will be released for the oracle (specified in wei) + * @param _callbackAddress The callback address to call for fulfillment + * @param _callbackFunctionId The callback function ID to use for fulfillment + * @param _expiration The expiration that the node should respond by before the requester can cancel + * @param _data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 _requestId, + uint256 _payment, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _expiration, + bytes32 _data + ) + external + onlyAuthorizedNode + override + isValidRequest(_requestId) + returns (bool) + { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + _callbackAddress, + _callbackFunctionId, + _expiration + ) + ); + require(commitments[_requestId] == paramsHash, "Params do not match request ID"); + withdrawableTokens = withdrawableTokens.add(_payment); + delete commitments[_requestId]; + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = _callbackAddress.call(abi.encodeWithSelector(_callbackFunctionId, _requestId, _data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + /** + * @notice Use this to check if a node is authorized for fulfilling requests + * @param _node The address of the Plugin node + * @return The authorization status of the node + */ + function getAuthorizationStatus(address _node) + external + view + override + returns (bool) + { + return authorizedNodes[_node]; + } + + /** + * @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + * @param _node The address of the Plugin node + * @param _allowed Bool value to determine if the node can fulfill requests + */ + function setFulfillmentPermission(address _node, bool _allowed) + external + override + onlyOwner() + { + authorizedNodes[_node] = _allowed; + } + + /** + * @notice Allows the node operator to withdraw earned PLI to a given address + * @dev The owner of the contract can be another wallet and does not have to be a Plugin node + * @param _recipient The address to send the PLI token to + * @param _amount The amount to send (specified in wei) + */ + function withdraw(address _recipient, uint256 _amount) + external + override(OracleInterface, WithdrawalInterface) + onlyOwner + hasAvailableFunds(_amount) + { + withdrawableTokens = withdrawableTokens.sub(_amount); + assert(LinkToken.transfer(_recipient, _amount)); + } + + /** + * @notice Displays the amount of PLI that is available for the node operator to withdraw + * @dev We use `ONE_FOR_CONSISTENT_GAS_COST` in place of 0 in storage + * @return The amount of withdrawable PLI on the contract + */ + function withdrawable() + external + view + override(OracleInterface, WithdrawalInterface) + onlyOwner() + returns (uint256) + { + return withdrawableTokens.sub(ONE_FOR_CONSISTENT_GAS_COST); + } + + /** + * @notice Allows requesters to cancel requests sent to this oracle contract. Will transfer the PLI + * sent for the request back to the requester's address. + * @dev Given params must hash to a commitment stored on the contract in order for the request to be valid + * Emits CancelOracleRequest event. + * @param _requestId The request ID + * @param _payment The amount of payment given (specified in wei) + * @param _callbackFunc The requester's specified callback address + * @param _expiration The time of the expiration for the request + */ + function cancelOracleRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) + external + override + { + bytes32 paramsHash = keccak256( + abi.encodePacked( + _payment, + msg.sender, + _callbackFunc, + _expiration) + ); + require(paramsHash == commitments[_requestId], "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(_expiration <= now, "Request is not expired"); + + delete commitments[_requestId]; + emit CancelOracleRequest(_requestId); + + assert(LinkToken.transfer(msg.sender, _payment)); + } + + /** + * @notice Returns the address of the PLI token + * @dev This is the public implementation for pluginTokenAddress, which is + * an internal method of the PluginClient contract + */ + function getPluginToken() + public + view + override + returns (address) + { + return address(LinkToken); + } + + // MODIFIERS + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens >= _amount.add(ONE_FOR_CONSISTENT_GAS_COST), "Amount requested is greater than withdrawable balance"); + _; + } + + /** + * @dev Reverts if request ID does not exist + * @param _requestId The given request ID to check in stored `commitments` + */ + modifier isValidRequest(bytes32 _requestId) { + require(commitments[_requestId] != 0, "Must have a valid requestId"); + _; + } + + /** + * @dev Reverts if `msg.sender` is not authorized to fulfill requests + */ + modifier onlyAuthorizedNode() { + require(authorizedNodes[msg.sender] || msg.sender == owner(), "Not an authorized node to fulfill requests"); + _; + } + + /** + * @dev Reverts if the callback address is the PLI token + * @param _to The callback address + */ + modifier checkCallbackAddress(address _to) { + require(_to != address(LinkToken), "Cannot callback to PLI"); + _; + } + +} diff --git a/contracts/src/v0.6/Owned.sol b/contracts/src/v0.6/Owned.sol new file mode 100644 index 00000000..0dc7c466 --- /dev/null +++ b/contracts/src/v0.6/Owned.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity >0.6.0 <0.8.0; + +/** + * @title The Owned contract + * @notice A contract with helpers for basic contract ownership. + */ +contract Owned { + + address public owner; + address private pendingOwner; + + event OwnershipTransferRequested( + address indexed from, + address indexed to + ); + event OwnershipTransferred( + address indexed from, + address indexed to + ); + + constructor() public { + owner = msg.sender; + } + + /** + * @dev Allows an owner to begin transferring ownership to a new address, + * pending. + */ + function transferOwnership(address _to) + external + onlyOwner() + { + pendingOwner = _to; + + emit OwnershipTransferRequested(owner, _to); + } + + /** + * @dev Allows an ownership transfer to be completed by the recipient. + */ + function acceptOwnership() + external + { + require(msg.sender == pendingOwner, "Must be proposed owner"); + + address oldOwner = owner; + owner = msg.sender; + pendingOwner = address(0); + + emit OwnershipTransferred(oldOwner, msg.sender); + } + + /** + * @dev Reverts if called by anyone other than the contract owner. + */ + modifier onlyOwner() { + require(msg.sender == owner, "Only callable by owner"); + _; + } + +} diff --git a/contracts/src/v0.6/Plugin.sol b/contracts/src/v0.6/Plugin.sol new file mode 100644 index 00000000..fbc8de3b --- /dev/null +++ b/contracts/src/v0.6/Plugin.sol @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import { CBORPlugin } from "./vendor/CBORPlugin.sol"; +import { BufferPlugin } from "./vendor/BufferPlugin.sol"; + +/** + * @title Library for common Plugin functions + * @dev Uses imported CBOR library for encoding to buffer + */ +library Plugin { + uint256 internal constant defaultBufferSize = 256; // solhint-disable-line const-name-snakecase + + using CBORPlugin for BufferPlugin.buffer; + + struct Request { + bytes32 id; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + BufferPlugin.buffer buf; + } + + /** + * @notice Initializes a Plugin request + * @dev Sets the ID, callback address, and callback function signature on the request + * @param self The uninitialized request + * @param _id The Job Specification ID + * @param _callbackAddress The callback address + * @param _callbackFunction The callback function signature + * @return The initialized request + */ + function initialize( + Request memory self, + bytes32 _id, + address _callbackAddress, + bytes4 _callbackFunction + ) internal pure returns (Plugin.Request memory) { + BufferPlugin.init(self.buf, defaultBufferSize); + self.id = _id; + self.callbackAddress = _callbackAddress; + self.callbackFunctionId = _callbackFunction; + return self; + } + + /** + * @notice Sets the data for the buffer without encoding CBOR on-chain + * @dev CBOR can be closed with curly-brackets {} or they can be left off + * @param self The initialized request + * @param _data The CBOR data + */ + function setBuffer(Request memory self, bytes memory _data) + internal pure + { + BufferPlugin.init(self.buf, _data.length); + BufferPlugin.append(self.buf, _data); + } + + /** + * @notice Adds a string value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The string value to add + */ + function add(Request memory self, string memory _key, string memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeString(_value); + } + + /** + * @notice Adds a bytes value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The bytes value to add + */ + function addBytes(Request memory self, string memory _key, bytes memory _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeBytes(_value); + } + + /** + * @notice Adds a int256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The int256 value to add + */ + function addInt(Request memory self, string memory _key, int256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeInt(_value); + } + + /** + * @notice Adds a uint256 value to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _value The uint256 value to add + */ + function addUint(Request memory self, string memory _key, uint256 _value) + internal pure + { + self.buf.encodeString(_key); + self.buf.encodeUInt(_value); + } + + /** + * @notice Adds an array of strings to the request with a given key name + * @param self The initialized request + * @param _key The name of the key + * @param _values The array of string values to add + */ + function addStringArray(Request memory self, string memory _key, string[] memory _values) + internal pure + { + self.buf.encodeString(_key); + self.buf.startArray(); + for (uint256 i = 0; i < _values.length; i++) { + self.buf.encodeString(_values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.6/PluginClient.sol b/contracts/src/v0.6/PluginClient.sol new file mode 100644 index 00000000..856660b2 --- /dev/null +++ b/contracts/src/v0.6/PluginClient.sol @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./Plugin.sol"; +import "./interfaces/ENSInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/PointerInterface.sol"; +import { ENSResolver as ENSResolver_Plugin } from "./vendor/ENSResolver.sol"; + +/** + * @title The PluginClient contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network + */ +contract PluginClient { + using Plugin for Plugin.Request; + + uint256 constant internal PLI = 10**18; + uint256 constant private AMOUNT_OVERRIDE = 0; + address constant private SENDER_OVERRIDE = address(0); + uint256 constant private ARGS_VERSION = 1; + bytes32 constant private ENS_TOKEN_SUBNAME = keccak256("link"); + bytes32 constant private ENS_ORACLE_SUBNAME = keccak256("oracle"); + address constant private PLI_TOKEN_POINTER = 0xC89bD4E1632D3A43CB03AAAd5262cbe4038Bc571; + + ENSInterface private ens; + bytes32 private ensNode; + LinkTokenInterface private link; + PluginRequestInterface private oracle; + uint256 private requestCount = 1; + mapping(bytes32 => address) private pendingRequests; + + event PluginRequested(bytes32 indexed id); + event PluginFulfilled(bytes32 indexed id); + event PluginCancelled(bytes32 indexed id); + + /** + * @notice Creates a request that can hold additional parameters + * @param _specId The Job Specification ID that the request will be created for + * @param _callbackAddress The callback address that the response will be sent to + * @param _callbackFunctionSignature The callback function signature to use for the callback address + * @return A Plugin Request struct in memory + */ + function buildPluginRequest( + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + Plugin.Request memory req; + return req.initialize(_specId, _callbackAddress, _callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `pluginRequestTo` with the stored oracle address + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendPluginRequest(Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32) + { + return sendPluginRequestTo(address(oracle), _req, _payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param _oracle The address of the oracle for the request + * @param _req The initialized Plugin Request + * @param _payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendPluginRequestTo(address _oracle, Plugin.Request memory _req, uint256 _payment) + internal + returns (bytes32 requestId) + { + requestId = keccak256(abi.encodePacked(this, requestCount)); + _req.nonce = requestCount; + pendingRequests[requestId] = _oracle; + emit PluginRequested(requestId); + require(link.transferAndCall(_oracle, _payment, encodeRequest(_req)), "unable to transferAndCall to oracle"); + requestCount += 1; + + return requestId; + } + + /** + * @notice Allows a request to be cancelled if it has not been fulfilled + * @dev Requires keeping track of the expiration value emitted from the oracle contract. + * Deletes the request from the `pendingRequests` mapping. + * Emits PluginCancelled event. + * @param _requestId The request ID + * @param _payment The amount of PLI sent for the request + * @param _callbackFunc The callback function specified for the request + * @param _expiration The time of the expiration for the request + */ + function cancelPluginRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunc, + uint256 _expiration + ) + internal + { + PluginRequestInterface requested = PluginRequestInterface(pendingRequests[_requestId]); + delete pendingRequests[_requestId]; + emit PluginCancelled(_requestId); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunc, _expiration); + } + + /** + * @notice Sets the stored oracle address + * @param _oracle The address of the oracle contract + */ + function setPluginOracle(address _oracle) internal { + oracle = PluginRequestInterface(_oracle); + } + + /** + * @notice Sets the PLI token address + * @param _link The address of the PLI token contract + */ + function setPluginToken(address _link) internal { + link = LinkTokenInterface(_link); + } + + /** + * @notice Sets the Plugin token address for the public + * network as given by the Pointer contract + */ + function setPublicPluginToken() internal { + setPluginToken(PointerInterface(PLI_TOKEN_POINTER).getAddress()); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function pluginTokenAddress() + internal + view + returns (address) + { + return address(link); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function pluginOracleAddress() + internal + view + returns (address) + { + return address(oracle); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param _oracle The address of the oracle contract that will fulfill the request + * @param _requestId The request ID used for the response + */ + function addPluginExternalRequest(address _oracle, bytes32 _requestId) + internal + notPendingRequest(_requestId) + { + pendingRequests[_requestId] = _oracle; + } + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param _ens The address of the ENS contract + * @param _node The ENS node hash + */ + function usePluginWithENS(address _ens, bytes32 _node) + internal + { + ens = ENSInterface(_ens); + ensNode = _node; + bytes32 linkSubnode = keccak256(abi.encodePacked(ensNode, ENS_TOKEN_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(linkSubnode)); + setPluginToken(resolver.addr(linkSubnode)); + updatePluginOracleWithENS(); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `usePluginWithENS` has been called previously + */ + function updatePluginOracleWithENS() + internal + { + bytes32 oracleSubnode = keccak256(abi.encodePacked(ensNode, ENS_ORACLE_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(ens.resolver(oracleSubnode)); + setPluginOracle(resolver.addr(oracleSubnode)); + } + + /** + * @notice Encodes the request to be sent to the oracle contract + * @dev The Plugin node expects values to be in order for the request to be picked up. Order of types + * will be validated in the oracle contract. + * @param _req The initialized Plugin Request + * @return The bytes payload for the `transferAndCall` method + */ + function encodeRequest(Plugin.Request memory _req) + private + view + returns (bytes memory) + { + return abi.encodeWithSelector( + oracle.oracleRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + _req.id, + _req.callbackAddress, + _req.callbackFunctionId, + _req.nonce, + ARGS_VERSION, + _req.buf.buf); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param _requestId The request ID for fulfillment + */ + function validatePluginCallback(bytes32 _requestId) + internal + recordPluginFulfillment(_requestId) + // solhint-disable-next-line no-empty-blocks + {} + + /** + * @dev Reverts if the sender is not the oracle of the request. + * Emits PluginFulfilled event. + * @param _requestId The request ID for fulfillment + */ + modifier recordPluginFulfillment(bytes32 _requestId) { + require(msg.sender == pendingRequests[_requestId], + "Source must be the oracle of the request"); + delete pendingRequests[_requestId]; + emit PluginFulfilled(_requestId); + _; + } + + /** + * @dev Reverts if the request is already pending + * @param _requestId The request ID for fulfillment + */ + modifier notPendingRequest(bytes32 _requestId) { + require(pendingRequests[_requestId] == address(0), "Request is already pending"); + _; + } +} diff --git a/contracts/src/v0.6/SafeMath128.sol b/contracts/src/v0.6/SafeMath128.sol new file mode 100644 index 00000000..c79665bc --- /dev/null +++ b/contracts/src/v0.6/SafeMath128.sol @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + * + * This library is a version of Open Zeppelin's SafeMath, modified to support + * unsigned 128 bit integers. + */ +library SafeMath128 { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint128 a, uint128 b) internal pure returns (uint128) { + uint128 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint128 a, uint128 b) internal pure returns (uint128) { + require(b <= a, "SafeMath: subtraction overflow"); + uint128 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint128 a, uint128 b) internal pure returns (uint128) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint128 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint128 a, uint128 b) internal pure returns (uint128) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint128 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint128 a, uint128 b) internal pure returns (uint128) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.6/SafeMath32.sol b/contracts/src/v0.6/SafeMath32.sol new file mode 100644 index 00000000..21944bb0 --- /dev/null +++ b/contracts/src/v0.6/SafeMath32.sol @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + * + * This library is a version of Open Zeppelin's SafeMath, modified to support + * unsigned 32 bit integers. + */ +library SafeMath32 { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint32 a, uint32 b) internal pure returns (uint32) { + uint32 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint32 a, uint32 b) internal pure returns (uint32) { + require(b <= a, "SafeMath: subtraction overflow"); + uint32 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint32 a, uint32 b) internal pure returns (uint32) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint32 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint32 a, uint32 b) internal pure returns (uint32) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint32 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint32 a, uint32 b) internal pure returns (uint32) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.6/SafeMath64.sol b/contracts/src/v0.6/SafeMath64.sol new file mode 100644 index 00000000..2bb3b791 --- /dev/null +++ b/contracts/src/v0.6/SafeMath64.sol @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + * + * This library is a version of Open Zeppelin's SafeMath, modified to support + * unsigned 64 bit integers. + */ +library SafeMath64 { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint64 a, uint64 b) internal pure returns (uint64) { + uint64 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint64 a, uint64 b) internal pure returns (uint64) { + require(b <= a, "SafeMath: subtraction overflow"); + uint64 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint64 a, uint64 b) internal pure returns (uint64) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint64 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint64 a, uint64 b) internal pure returns (uint64) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint64 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint64 a, uint64 b) internal pure returns (uint64) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.6/SignedSafeMath.sol b/contracts/src/v0.6/SignedSafeMath.sol new file mode 100644 index 00000000..32941de7 --- /dev/null +++ b/contracts/src/v0.6/SignedSafeMath.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +library SignedSafeMath { + int256 constant private _INT256_MIN = -2**255; + + /** + * @dev Multiplies two signed integers, reverts on overflow. + */ + function mul(int256 a, int256 b) internal pure returns (int256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522 + if (a == 0) { + return 0; + } + + require(!(a == -1 && b == _INT256_MIN), "SignedSafeMath: multiplication overflow"); + + int256 c = a * b; + require(c / a == b, "SignedSafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Integer division of two signed integers truncating the quotient, reverts on division by zero. + */ + function div(int256 a, int256 b) internal pure returns (int256) { + require(b != 0, "SignedSafeMath: division by zero"); + require(!(b == -1 && a == _INT256_MIN), "SignedSafeMath: division overflow"); + + int256 c = a / b; + + return c; + } + + /** + * @dev Subtracts two signed integers, reverts on overflow. + */ + function sub(int256 a, int256 b) internal pure returns (int256) { + int256 c = a - b; + require((b >= 0 && c <= a) || (b < 0 && c > a), "SignedSafeMath: subtraction overflow"); + + return c; + } + + /** + * @dev Adds two signed integers, reverts on overflow. + */ + function add(int256 a, int256 b) internal pure returns (int256) { + int256 c = a + b; + require((b >= 0 && c >= a) || (b < 0 && c < a), "SignedSafeMath: addition overflow"); + + return c; + } + + /** + * @notice Computes average of two signed integers, ensuring that the computation + * doesn't overflow. + * @dev If the result is not an integer, it is rounded towards zero. For example, + * avg(-3, -4) = -3 + */ + function avg(int256 _a, int256 _b) + internal + pure + returns (int256) + { + if ((_a < 0 && _b > 0) || (_a > 0 && _b < 0)) { + return add(_a, _b) / 2; + } + int256 remainder = (_a % 2 + _b % 2) / 2; + return add(add(_a / 2, _b / 2), remainder); + } +} diff --git a/contracts/src/v0.6/SimpleReadAccessController.sol b/contracts/src/v0.6/SimpleReadAccessController.sol new file mode 100644 index 00000000..fbd714a1 --- /dev/null +++ b/contracts/src/v0.6/SimpleReadAccessController.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./SimpleWriteAccessController.sol"; + +/** + * @title SimpleReadAccessController + * @notice Gives access to: + * - any externally owned account (note that off-chain actors can always read + * any contract storage regardless of on-chain access control measures, so this + * does not weaken the access control while improving usability) + * - accounts explicitly added to an access list + * @dev SimpleReadAccessController is not suitable for access controlling writes + * since it grants any externally owned account access! See + * SimpleWriteAccessController for that. + */ +contract SimpleReadAccessController is SimpleWriteAccessController { + + /** + * @notice Returns the access of an address + * @param _user The address to query + */ + function hasAccess( + address _user, + bytes memory _calldata + ) + public + view + virtual + override + returns (bool) + { + return super.hasAccess(_user, _calldata) || _user == tx.origin; + } + +} diff --git a/contracts/src/v0.6/SimpleWriteAccessController.sol b/contracts/src/v0.6/SimpleWriteAccessController.sol new file mode 100644 index 00000000..02a6db7b --- /dev/null +++ b/contracts/src/v0.6/SimpleWriteAccessController.sol @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT +pragma solidity >0.6.0 <0.8.0; + +import "./Owned.sol"; +import "./interfaces/AccessControllerInterface.sol"; + +/** + * @title SimpleWriteAccessController + * @notice Gives access to accounts explicitly added to an access list by the + * controller's owner. + * @dev does not make any special permissions for externally, see + * SimpleReadAccessController for that. + */ +contract SimpleWriteAccessController is AccessControllerInterface, Owned { + + bool public checkEnabled; + mapping(address => bool) internal accessList; + + event AddedAccess(address user); + event RemovedAccess(address user); + event CheckAccessEnabled(); + event CheckAccessDisabled(); + + constructor() + public + { + checkEnabled = true; + } + + /** + * @notice Returns the access of an address + * @param _user The address to query + */ + function hasAccess( + address _user, + bytes memory + ) + public + view + virtual + override + returns (bool) + { + return accessList[_user] || !checkEnabled; + } + + /** + * @notice Adds an address to the access list + * @param _user The address to add + */ + function addAccess(address _user) + external + onlyOwner() + { + if (!accessList[_user]) { + accessList[_user] = true; + + emit AddedAccess(_user); + } + } + + /** + * @notice Removes an address from the access list + * @param _user The address to remove + */ + function removeAccess(address _user) + external + onlyOwner() + { + if (accessList[_user]) { + accessList[_user] = false; + + emit RemovedAccess(_user); + } + } + + /** + * @notice makes the access check enforced + */ + function enableAccessCheck() + external + onlyOwner() + { + if (!checkEnabled) { + checkEnabled = true; + + emit CheckAccessEnabled(); + } + } + + /** + * @notice makes the access check unenforced + */ + function disableAccessCheck() + external + onlyOwner() + { + if (checkEnabled) { + checkEnabled = false; + + emit CheckAccessDisabled(); + } + } + + /** + * @dev reverts if the caller does not have access + */ + modifier checkAccess() { + require(hasAccess(msg.sender, msg.data), "No access"); + _; + } +} diff --git a/contracts/src/v0.6/VRF.sol b/contracts/src/v0.6/VRF.sol new file mode 100644 index 00000000..9cbb145e --- /dev/null +++ b/contracts/src/v0.6/VRF.sol @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +/** **************************************************************************** + * @notice Verification of verifiable-random-function (VRF) proofs, following + * @notice https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @notice See https://eprint.iacr.org/2017/099.pdf for security proofs. + + * @dev Bibliographic references: + + * @dev Goldberg, et al., "Verifiable Random Functions (VRFs)", Internet Draft + * @dev draft-irtf-cfrg-vrf-05, IETF, Aug 11 2019, + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05 + + * @dev Papadopoulos, et al., "Making NSEC5 Practical for DNSSEC", Cryptology + * @dev ePrint Archive, Report 2017/099, https://eprint.iacr.org/2017/099.pdf + * **************************************************************************** + * @dev USAGE + + * @dev The main entry point is randomValueFromVRFProof. See its docstring. + * **************************************************************************** + * @dev PURPOSE + + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is computationally indistinguishable to her from a uniform + * @dev random sample from the output space. + + * @dev The purpose of this contract is to perform that verification. + * **************************************************************************** + * @dev DESIGN NOTES + + * @dev The VRF algorithm verified here satisfies the full unqiqueness, full + * @dev collision resistance, and full pseudorandomness security properties. + * @dev See "SECURITY PROPERTIES" below, and + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-3 + + * @dev An elliptic curve point is generally represented in the solidity code + * @dev as a uint256[2], corresponding to its affine coordinates in + * @dev GF(FIELD_SIZE). + + * @dev For the sake of efficiency, this implementation deviates from the spec + * @dev in some minor ways: + + * @dev - Keccak hash rather than the SHA256 hash recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev Keccak costs much less gas on the EVM, and provides similar security. + + * @dev - Secp256k1 curve instead of the P-256 or ED25519 curves recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev For curve-point multiplication, it's much cheaper to abuse ECRECOVER + + * @dev - hashToCurve recursively hashes until it finds a curve x-ordinate. On + * @dev the EVM, this is slightly more efficient than the recommendation in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + * @dev step 5, to concatenate with a nonce then hash, and rehash with the + * @dev nonce updated until a valid x-ordinate is found. + + * @dev - hashToCurve does not include a cipher version string or the byte 0x1 + * @dev in the hash message, as recommended in step 5.B of the draft + * @dev standard. They are unnecessary here because no variation in the + * @dev cipher suite is allowed. + + * @dev - Similarly, the hash input in scalarFromCurvePoints does not include a + * @dev commitment to the cipher suite, either, which differs from step 2 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + * @dev . Also, the hash input is the concatenation of the uncompressed + * @dev points, not the compressed points as recommended in step 3. + + * @dev - In the calculation of the challenge value "c", the "u" value (i.e. + * @dev the value computed by Reggie as the nonce times the secp256k1 + * @dev generator point, see steps 5 and 7 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @dev ) is replaced by its ethereum address, i.e. the lower 160 bits of the + * @dev keccak hash of the original u. This is because we only verify the + * @dev calculation of u up to its address, by abusing ECRECOVER. + * **************************************************************************** + * @dev SECURITY PROPERTIES + + * @dev Here are the security properties for this VRF: + + * @dev Full uniqueness: For any seed and valid VRF public key, there is + * @dev exactly one VRF output which can be proved to come from that seed, in + * @dev the sense that the proof will pass verifyVRFProof. + + * @dev Full collision resistance: It's cryptographically infeasible to find + * @dev two seeds with same VRF output from a fixed, valid VRF key + + * @dev Full pseudorandomness: Absent the proofs that the VRF outputs are + * @dev derived from a given seed, the outputs are computationally + * @dev indistinguishable from randomness. + + * @dev https://eprint.iacr.org/2017/099.pdf, Appendix B contains the proofs + * @dev for these properties. + + * @dev For secp256k1, the key validation described in section + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.6 + * @dev is unnecessary, because secp256k1 has cofactor 1, and the + * @dev representation of the public key used here (affine x- and y-ordinates + * @dev of the secp256k1 point on the standard y^2=x^3+7 curve) cannot refer to + * @dev the point at infinity. + * **************************************************************************** + * @dev OTHER SECURITY CONSIDERATIONS + * + * @dev The seed input to the VRF could in principle force an arbitrary amount + * @dev of work in hashToCurve, by requiring extra rounds of hashing and + * @dev checking whether that's yielded the x ordinate of a secp256k1 point. + * @dev However, under the Random Oracle Model the probability of choosing a + * @dev point which forces n extra rounds in hashToCurve is 2⁻ⁿ. The base cost + * @dev for calling hashToCurve is about 25,000 gas, and each round of checking + * @dev for a valid x ordinate costs about 15,555 gas, so to find a seed for + * @dev which hashToCurve would cost more than 2,017,000 gas, one would have to + * @dev try, in expectation, about 2¹²⁸ seeds, which is infeasible for any + * @dev foreseeable computational resources. (25,000 + 128 * 15,555 < 2,017,000.) + + * @dev Since the gas block limit for the Ethereum main net is 10,000,000 gas, + * @dev this means it is infeasible for an adversary to prevent correct + * @dev operation of this contract by choosing an adverse seed. + + * @dev (See TestMeasureHashToCurveGasCost for verification of the gas cost for + * @dev hashToCurve.) + + * @dev It may be possible to make a secure constant-time hashToCurve function. + * @dev See notes in hashToCurve docstring. +*/ +contract VRF { + + // See https://www.secg.org/sec2-v2.pdf, section 2.4.1, for these constants. + uint256 constant private GROUP_ORDER = // Number of points in Secp256k1 + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + // Prime characteristic of the galois field over which Secp256k1 is defined + uint256 constant private FIELD_SIZE = + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F; + uint256 constant private WORD_LENGTH_BYTES = 0x20; + + // (base^exponent) % FIELD_SIZE + // Cribbed from https://medium.com/@rbkhmrcr/precompiles-solidity-e5d29bd428c4 + function bigModExp(uint256 base, uint256 exponent) + internal view returns (uint256 exponentiation) { + uint256 callResult; + uint256[6] memory bigModExpContractInputs; + bigModExpContractInputs[0] = WORD_LENGTH_BYTES; // Length of base + bigModExpContractInputs[1] = WORD_LENGTH_BYTES; // Length of exponent + bigModExpContractInputs[2] = WORD_LENGTH_BYTES; // Length of modulus + bigModExpContractInputs[3] = base; + bigModExpContractInputs[4] = exponent; + bigModExpContractInputs[5] = FIELD_SIZE; + uint256[1] memory output; + assembly { // solhint-disable-line no-inline-assembly + callResult := staticcall( + not(0), // Gas cost: no limit + 0x05, // Bigmodexp contract address + bigModExpContractInputs, + 0xc0, // Length of input segment: 6*0x20-bytes + output, + 0x20 // Length of output segment + ) + } + if (callResult == 0) {revert("bigModExp failure!");} + return output[0]; + } + + // Let q=FIELD_SIZE. q % 4 = 3, ∴ x≡r^2 mod q ⇒ x^SQRT_POWER≡±r mod q. See + // https://en.wikipedia.org/wiki/Modular_square_root#Prime_or_prime_power_modulus + uint256 constant private SQRT_POWER = (FIELD_SIZE + 1) >> 2; + + // Computes a s.t. a^2 = x in the field. Assumes a exists + function squareRoot(uint256 x) internal view returns (uint256) { + return bigModExp(x, SQRT_POWER); + } + + // The value of y^2 given that (x,y) is on secp256k1. + function ySquared(uint256 x) internal pure returns (uint256) { + // Curve is y^2=x^3+7. See section 2.4.1 of https://www.secg.org/sec2-v2.pdf + uint256 xCubed = mulmod(x, mulmod(x, x, FIELD_SIZE), FIELD_SIZE); + return addmod(xCubed, 7, FIELD_SIZE); + } + + // True iff p is on secp256k1 + function isOnCurve(uint256[2] memory p) internal pure returns (bool) { + return ySquared(p[0]) == mulmod(p[1], p[1], FIELD_SIZE); + } + + // Hash x uniformly into {0, ..., FIELD_SIZE-1}. + function fieldHash(bytes memory b) internal pure returns (uint256 x_) { + x_ = uint256(keccak256(b)); + // Rejecting if x >= FIELD_SIZE corresponds to step 2.1 in section 2.3.4 of + // http://www.secg.org/sec1-v2.pdf , which is part of the definition of + // string_to_point in the IETF draft + while (x_ >= FIELD_SIZE) { + x_ = uint256(keccak256(abi.encodePacked(x_))); + } + } + + // Hash b to a random point which hopefully lies on secp256k1. The y ordinate + // is always even, due to + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + // step 5.C, which references arbitrary_string_to_point, defined in + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 as + // returning the point with given x ordinate, and even y ordinate. + function newCandidateSecp256k1Point(bytes memory b) + internal view returns (uint256[2] memory p) { + p[0] = fieldHash(b); + p[1] = squareRoot(ySquared(p[0])); + if (p[1] % 2 == 1) { + p[1] = FIELD_SIZE - p[1]; + } + } + + // Domain-separation tag for initial hash in hashToCurve. Corresponds to + // vrf.go/hashToCurveHashPrefix + uint256 constant HASH_TO_CURVE_HASH_PREFIX = 1; + + // Cryptographic hash function onto the curve. + // + // Corresponds to algorithm in section 5.4.1.1 of the draft standard. (But see + // DESIGN NOTES above for slight differences.) + // + // TODO(alx): Implement a bounded-computation hash-to-curve, as described in + // "Construction of Rational Points on Elliptic Curves over Finite Fields" + // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.831.5299&rep=rep1&type=pdf + // and suggested by + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-01#section-5.2.2 + // (Though we can't used exactly that because secp256k1's j-invariant is 0.) + // + // This would greatly simplify the analysis in "OTHER SECURITY CONSIDERATIONS" + // https://www.pivotaltracker.com/story/show/171120900 + function hashToCurve(uint256[2] memory pk, uint256 input) + internal view returns (uint256[2] memory rv) { + rv = newCandidateSecp256k1Point(abi.encodePacked(HASH_TO_CURVE_HASH_PREFIX, + pk, input)); + while (!isOnCurve(rv)) { + rv = newCandidateSecp256k1Point(abi.encodePacked(rv[0])); + } + } + + /** ********************************************************************* + * @notice Check that product==scalar*multiplicand + * + * @dev Based on Vitalik Buterin's idea in ethresear.ch post cited below. + * + * @param multiplicand: secp256k1 point + * @param scalar: non-zero GF(GROUP_ORDER) scalar + * @param product: secp256k1 expected to be multiplier * multiplicand + * @return verifies true iff product==scalar*multiplicand, with cryptographically high probability + */ + function ecmulVerify(uint256[2] memory multiplicand, uint256 scalar, + uint256[2] memory product) internal pure returns(bool verifies) + { + require(scalar != 0); // Rules out an ecrecover failure case + uint256 x = multiplicand[0]; // x ordinate of multiplicand + uint8 v = multiplicand[1] % 2 == 0 ? 27 : 28; // parity of y ordinate + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // Point corresponding to address ecrecover(0, v, x, s=scalar*x) is + // (x⁻¹ mod GROUP_ORDER) * (scalar * x * multiplicand - 0 * g), i.e. + // scalar*multiplicand. See https://crypto.stackexchange.com/a/18106 + bytes32 scalarTimesX = bytes32(mulmod(scalar, x, GROUP_ORDER)); + address actual = ecrecover(bytes32(0), v, bytes32(x), scalarTimesX); + // Explicit conversion to address takes bottom 160 bits + address expected = address(uint256(keccak256(abi.encodePacked(product)))); + return (actual == expected); + } + + // Returns x1/z1-x2/z2=(x1z2-x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) + function projectiveSub(uint256 x1, uint256 z1, uint256 x2, uint256 z2) + internal pure returns(uint256 x3, uint256 z3) { + uint256 num1 = mulmod(z2, x1, FIELD_SIZE); + uint256 num2 = mulmod(FIELD_SIZE - x2, z1, FIELD_SIZE); + (x3, z3) = (addmod(num1, num2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + } + + // Returns x1/z1*x2/z2=(x1x2)/(z1z2), in projective coordinates on P¹(𝔽ₙ) + function projectiveMul(uint256 x1, uint256 z1, uint256 x2, uint256 z2) + internal pure returns(uint256 x3, uint256 z3) { + (x3, z3) = (mulmod(x1, x2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + } + + /** ************************************************************************** + @notice Computes elliptic-curve sum, in projective co-ordinates + + @dev Using projective coordinates avoids costly divisions + + @dev To use this with p and q in affine coordinates, call + @dev projectiveECAdd(px, py, qx, qy). This will return + @dev the addition of (px, py, 1) and (qx, qy, 1), in the + @dev secp256k1 group. + + @dev This can be used to calculate the z which is the inverse to zInv + @dev in isValidVRFOutput. But consider using a faster + @dev re-implementation such as ProjectiveECAdd in the golang vrf package. + + @dev This function assumes [px,py,1],[qx,qy,1] are valid projective + coordinates of secp256k1 points. That is safe in this contract, + because this method is only used by linearCombination, which checks + points are on the curve via ecrecover. + ************************************************************************** + @param px The first affine coordinate of the first summand + @param py The second affine coordinate of the first summand + @param qx The first affine coordinate of the second summand + @param qy The second affine coordinate of the second summand + + (px,py) and (qx,qy) must be distinct, valid secp256k1 points. + ************************************************************************** + Return values are projective coordinates of [px,py,1]+[qx,qy,1] as points + on secp256k1, in P²(𝔽ₙ) + @return sx + @return sy + @return sz + */ + function projectiveECAdd(uint256 px, uint256 py, uint256 qx, uint256 qy) + internal pure returns(uint256 sx, uint256 sy, uint256 sz) { + // See "Group law for E/K : y^2 = x^3 + ax + b", in section 3.1.2, p. 80, + // "Guide to Elliptic Curve Cryptography" by Hankerson, Menezes and Vanstone + // We take the equations there for (sx,sy), and homogenize them to + // projective coordinates. That way, no inverses are required, here, and we + // only need the one inverse in affineECAdd. + + // We only need the "point addition" equations from Hankerson et al. Can + // skip the "point doubling" equations because p1 == p2 is cryptographically + // impossible, and require'd not to be the case in linearCombination. + + // Add extra "projective coordinate" to the two points + (uint256 z1, uint256 z2) = (1, 1); + + // (lx, lz) = (qy-py)/(qx-px), i.e., gradient of secant line. + uint256 lx = addmod(qy, FIELD_SIZE - py, FIELD_SIZE); + uint256 lz = addmod(qx, FIELD_SIZE - px, FIELD_SIZE); + + uint256 dx; // Accumulates denominator from sx calculation + // sx=((qy-py)/(qx-px))^2-px-qx + (sx, dx) = projectiveMul(lx, lz, lx, lz); // ((qy-py)/(qx-px))^2 + (sx, dx) = projectiveSub(sx, dx, px, z1); // ((qy-py)/(qx-px))^2-px + (sx, dx) = projectiveSub(sx, dx, qx, z2); // ((qy-py)/(qx-px))^2-px-qx + + uint256 dy; // Accumulates denominator from sy calculation + // sy=((qy-py)/(qx-px))(px-sx)-py + (sy, dy) = projectiveSub(px, z1, sx, dx); // px-sx + (sy, dy) = projectiveMul(sy, dy, lx, lz); // ((qy-py)/(qx-px))(px-sx) + (sy, dy) = projectiveSub(sy, dy, py, z1); // ((qy-py)/(qx-px))(px-sx)-py + + if (dx != dy) { // Cross-multiply to put everything over a common denominator + sx = mulmod(sx, dy, FIELD_SIZE); + sy = mulmod(sy, dx, FIELD_SIZE); + sz = mulmod(dx, dy, FIELD_SIZE); + } else { // Already over a common denominator, use that for z ordinate + sz = dx; + } + } + + // p1+p2, as affine points on secp256k1. + // + // invZ must be the inverse of the z returned by projectiveECAdd(p1, p2). + // It is computed off-chain to save gas. + // + // p1 and p2 must be distinct, because projectiveECAdd doesn't handle + // point doubling. + function affineECAdd( + uint256[2] memory p1, uint256[2] memory p2, + uint256 invZ) internal pure returns (uint256[2] memory) { + uint256 x; + uint256 y; + uint256 z; + (x, y, z) = projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); + require(mulmod(z, invZ, FIELD_SIZE) == 1, "invZ must be inverse of z"); + // Clear the z ordinate of the projective representation by dividing through + // by it, to obtain the affine representation + return [mulmod(x, invZ, FIELD_SIZE), mulmod(y, invZ, FIELD_SIZE)]; + } + + // True iff address(c*p+s*g) == lcWitness, where g is generator. (With + // cryptographically high probability.) + function verifyLinearCombinationWithGenerator( + uint256 c, uint256[2] memory p, uint256 s, address lcWitness) + internal pure returns (bool) { + // Rule out ecrecover failure modes which return address 0. + require(lcWitness != address(0), "bad witness"); + uint8 v = (p[1] % 2 == 0) ? 27 : 28; // parity of y-ordinate of p + bytes32 pseudoHash = bytes32(GROUP_ORDER - mulmod(p[0], s, GROUP_ORDER)); // -s*p[0] + bytes32 pseudoSignature = bytes32(mulmod(c, p[0], GROUP_ORDER)); // c*p[0] + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // The point corresponding to the address returned by + // ecrecover(-s*p[0],v,p[0],c*p[0]) is + // (p[0]⁻¹ mod GROUP_ORDER)*(c*p[0]-(-s)*p[0]*g)=c*p+s*g. + // See https://crypto.stackexchange.com/a/18106 + // https://bitcoin.stackexchange.com/questions/38351/ecdsa-v-r-s-what-is-v + address computed = ecrecover(pseudoHash, v, bytes32(p[0]), pseudoSignature); + return computed == lcWitness; + } + + // c*p1 + s*p2. Requires cp1Witness=c*p1 and sp2Witness=s*p2. Also + // requires cp1Witness != sp2Witness (which is fine for this application, + // since it is cryptographically impossible for them to be equal. In the + // (cryptographically impossible) case that a prover accidentally derives + // a proof with equal c*p1 and s*p2, they should retry with a different + // proof nonce.) Assumes that all points are on secp256k1 + // (which is checked in verifyVRFProof below.) + function linearCombination( + uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, + uint256 s, uint256[2] memory p2, uint256[2] memory sp2Witness, + uint256 zInv) + internal pure returns (uint256[2] memory) { + require((cp1Witness[0] - sp2Witness[0]) % FIELD_SIZE != 0, + "points in sum must be distinct"); + require(ecmulVerify(p1, c, cp1Witness), "First multiplication check failed"); + require(ecmulVerify(p2, s, sp2Witness), "Second multiplication check failed"); + return affineECAdd(cp1Witness, sp2Witness, zInv); + } + + // Domain-separation tag for the hash taken in scalarFromCurvePoints. + // Corresponds to scalarFromCurveHashPrefix in vrf.go + uint256 constant SCALAR_FROM_CURVE_POINTS_HASH_PREFIX = 2; + + // Pseudo-random number from inputs. Matches vrf.go/scalarFromCurvePoints, and + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + // The draft calls (in step 7, via the definition of string_to_int, in + // https://datatracker.ietf.org/doc/html/rfc8017#section-4.2 ) for taking the + // first hash without checking that it corresponds to a number less than the + // group order, which will lead to a slight bias in the sample. + // + // TODO(alx): We could save a bit of gas by following the standard here and + // using the compressed representation of the points, if we collated the y + // parities into a single bytes32. + // https://www.pivotaltracker.com/story/show/171120588 + function scalarFromCurvePoints( + uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, + address uWitness, uint256[2] memory v) + internal pure returns (uint256 s) { + return uint256( + keccak256(abi.encodePacked(SCALAR_FROM_CURVE_POINTS_HASH_PREFIX, + hash, pk, gamma, v, uWitness))); + } + + // True if (gamma, c, s) is a correctly constructed randomness proof from pk + // and seed. zInv must be the inverse of the third ordinate from + // projectiveECAdd applied to cGammaWitness and sHashWitness. Corresponds to + // section 5.3 of the IETF draft. + // + // TODO(alx): Since I'm only using pk in the ecrecover call, I could only pass + // the x ordinate, and the parity of the y ordinate in the top bit of uWitness + // (which I could make a uint256 without using any extra space.) Would save + // about 2000 gas. https://www.pivotaltracker.com/story/show/170828567 + function verifyVRFProof( + uint256[2] memory pk, uint256[2] memory gamma, uint256 c, uint256 s, + uint256 seed, address uWitness, uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, uint256 zInv) + internal view { + require(isOnCurve(pk), "public key is not on curve"); + require(isOnCurve(gamma), "gamma is not on curve"); + require(isOnCurve(cGammaWitness), "cGammaWitness is not on curve"); + require(isOnCurve(sHashWitness), "sHashWitness is not on curve"); + // Step 5. of IETF draft section 5.3 (pk corresponds to 5.3's Y, and here + // we use the address of u instead of u itself. Also, here we add the + // terms instead of taking the difference, and in the proof consruction in + // vrf.GenerateProof, we correspondingly take the difference instead of + // taking the sum as they do in step 7 of section 5.1.) + require( + verifyLinearCombinationWithGenerator(c, pk, s, uWitness), + "addr(c*pk+s*g)≠_uWitness" + ); + // Step 4. of IETF draft section 5.3 (pk corresponds to Y, seed to alpha_string) + uint256[2] memory hash = hashToCurve(pk, seed); + // Step 6. of IETF draft section 5.3, but see note for step 5 about +/- terms + uint256[2] memory v = linearCombination( + c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); + // Steps 7. and 8. of IETF draft section 5.3 + uint256 derivedC = scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + require(c == derivedC, "invalid proof"); + } + + // Domain-separation tag for the hash used as the final VRF output. + // Corresponds to vrfRandomOutputHashPrefix in vrf.go + uint256 constant VRF_RANDOM_OUTPUT_HASH_PREFIX = 3; + + // Length of proof marshaled to bytes array. Shows layout of proof + uint public constant PROOF_LENGTH = 64 + // PublicKey (uncompressed format.) + 64 + // Gamma + 32 + // C + 32 + // S + 32 + // Seed + 0 + // Dummy entry: The following elements are included for gas efficiency: + 32 + // uWitness (gets padded to 256 bits, even though it's only 160) + 64 + // cGammaWitness + 64 + // sHashWitness + 32; // zInv (Leave Output out, because that can be efficiently calculated) + + /* *************************************************************************** + * @notice Returns proof's output, if proof is valid. Otherwise reverts + + * @param proof A binary-encoded proof, as output by vrf.Proof.MarshalForSolidityVerifier + * + * Throws if proof is invalid, otherwise: + * @return output i.e., the random output implied by the proof + * *************************************************************************** + * @dev See the calculation of PROOF_LENGTH for the binary layout of proof. + */ + function randomValueFromVRFProof(bytes memory proof) + internal view returns (uint256 output) { + require(proof.length == PROOF_LENGTH, "wrong proof length"); + + uint256[2] memory pk; // parse proof contents into these variables + uint256[2] memory gamma; + // c, s and seed combined (prevents "stack too deep" compilation error) + uint256[3] memory cSSeed; + address uWitness; + uint256[2] memory cGammaWitness; + uint256[2] memory sHashWitness; + uint256 zInv; + (pk, gamma, cSSeed, uWitness, cGammaWitness, sHashWitness, zInv) = abi.decode( + proof, (uint256[2], uint256[2], uint256[3], address, uint256[2], + uint256[2], uint256)); + verifyVRFProof( + pk, + gamma, + cSSeed[0], // c + cSSeed[1], // s + cSSeed[2], // seed + uWitness, + cGammaWitness, + sHashWitness, + zInv + ); + output = uint256(keccak256(abi.encode(VRF_RANDOM_OUTPUT_HASH_PREFIX, gamma))); + } +} diff --git a/contracts/src/v0.6/VRFConsumerBase.sol b/contracts/src/v0.6/VRFConsumerBase.sol new file mode 100644 index 00000000..71c83f47 --- /dev/null +++ b/contracts/src/v0.6/VRFConsumerBase.sol @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./vendor/SafeMathPlugin.sol"; + +import "./interfaces/LinkTokenInterface.sol"; + +import "./VRFRequestIDBase.sol"; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBase, and can + * @dev initialize VRFConsumerBase's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumer { + * @dev constructor(, address _vrfCoordinator, address _link) + * @dev VRFConsumerBase(_vrfCoordinator, _link) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash), and have told you the minimum PLI + * @dev price for VRF service. Make sure your contract has sufficient PLI, and + * @dev call requestRandomness(keyHash, fee, seed), where seed is the input you + * @dev want to generate randomness from. + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomness method. + * + * @dev The randomness argument to fulfillRandomness is the actual random value + * @dev generated from your seed. + * + * @dev The requestId argument is generated from the keyHash and the seed by + * @dev makeRequestId(keyHash, seed). If your contract could have concurrent + * @dev requests open, you can use the requestId to track which seed is + * @dev associated with which randomness. See VRFRequestIDBase.sol for more + * @dev details. (See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously.) + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. (Which is critical to making unpredictable randomness! See the + * @dev next section.) + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBase.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the ultimate input to the VRF is mixed with the block hash of the + * @dev block in which the request is made, user-provided seeds have no impact + * @dev on its economic security properties. They are only included for API + * @dev compatability with previous versions of this contract. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. + */ +abstract contract VRFConsumerBase is VRFRequestIDBase { + + using SafeMathPlugin for uint256; + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBase expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomness the VRF output + */ + function fulfillRandomness(bytes32 requestId, uint256 randomness) + internal virtual; + + /** + * @dev In order to keep backwards compatibility we have kept the user + * seed field around. We remove the use of it because given that the blockhash + * enters later, it overrides whatever randomness the used seed provides. + * Given that it adds no security, and can easily lead to misunderstandings, + * we have removed it from usage and can now provide a simpler API. + */ + uint256 constant private USER_SEED_PLACEHOLDER = 0; + + /** + * @notice requestRandomness initiates a request for VRF output given _seed + * + * @dev The fulfillRandomness method receives the output, once it's provided + * @dev by the Oracle, and verified by the vrfCoordinator. + * + * @dev The _keyHash must already be registered with the VRFCoordinator, and + * @dev the _fee must exceed the fee specified during registration of the + * @dev _keyHash. + * + * @dev The _seed parameter is vestigial, and is kept only for API + * @dev compatibility with older versions. It can't *hurt* to mix in some of + * @dev your own randomness, here, but it's not necessary because the VRF + * @dev oracle will mix the hash of the block containing your request into the + * @dev VRF seed it ultimately uses. + * + * @param _keyHash ID of public key against which randomness is generated + * @param _fee The amount of PLI to send with the request + * + * @return requestId unique ID for this request + * + * @dev The returned requestId can be used to distinguish responses to + * @dev concurrent requests. It is passed as the first argument to + * @dev fulfillRandomness. + */ + function requestRandomness(bytes32 _keyHash, uint256 _fee) + internal returns (bytes32 requestId) + { + PLI.transferAndCall(vrfCoordinator, _fee, abi.encode(_keyHash, USER_SEED_PLACEHOLDER)); + // This is the seed passed to VRFCoordinator. The oracle will mix this with + // the hash of the block containing this request to obtain the seed/input + // which is finally passed to the VRF cryptographic machinery. + uint256 vRFSeed = makeVRFInputSeed(_keyHash, USER_SEED_PLACEHOLDER, address(this), nonces[_keyHash]); + // nonces[_keyHash] must stay in sync with + // VRFCoordinator.nonces[_keyHash][this], which was incremented by the above + // successful PLI.transferAndCall (in VRFCoordinator.randomnessRequest). + // This provides protection against the user repeating their input seed, + // which would result in a predictable/duplicate output, if multiple such + // requests appeared in the same block. + nonces[_keyHash] = nonces[_keyHash].add(1); + return makeRequestId(_keyHash, vRFSeed); + } + + LinkTokenInterface immutable internal PLI; + address immutable private vrfCoordinator; + + // Nonces for each VRF key from which randomness has been requested. + // + // Must stay in sync with VRFCoordinator[_keyHash][this] + mapping(bytes32 /* keyHash */ => uint256 /* nonce */) private nonces; + + /** + * @param _vrfCoordinator address of VRFCoordinator contract + * @param _link address of PLI token contract + * + * @dev https://docs.chain.link/docs/link-token-contracts + */ + constructor(address _vrfCoordinator, address _link) public { + vrfCoordinator = _vrfCoordinator; + PLI = LinkTokenInterface(_link); + } + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomness(bytes32 requestId, uint256 randomness) external { + require(msg.sender == vrfCoordinator, "Only VRFCoordinator can fulfill"); + fulfillRandomness(requestId, randomness); + } +} diff --git a/contracts/src/v0.6/VRFCoordinator.sol b/contracts/src/v0.6/VRFCoordinator.sol new file mode 100644 index 00000000..c074aa73 --- /dev/null +++ b/contracts/src/v0.6/VRFCoordinator.sol @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "./vendor/SafeMathPlugin.sol"; + +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/BlockHashStoreInterface.sol"; + +import "./vendor/Ownable.sol"; + +import "./VRF.sol"; +import "./VRFRequestIDBase.sol"; +import "./VRFConsumerBase.sol"; + +/** + * @title VRFCoordinator coordinates on-chain verifiable-randomness requests + * @title with off-chain responses + */ +contract VRFCoordinator is VRF, VRFRequestIDBase, Ownable { + + using SafeMathPlugin for uint256; + + LinkTokenInterface internal PLI; + BlockHashStoreInterface internal blockHashStore; + + constructor(address _link, address _blockHashStore) public { + PLI = LinkTokenInterface(_link); + blockHashStore = BlockHashStoreInterface(_blockHashStore); + } + + struct Callback { // Tracks an ongoing request + address callbackContract; // Requesting contract, which will receive response + // Amount of PLI paid at request time. Total PLI = 1e9 * 1e18 < 2^96, so + // this representation is adequate, and saves a word of storage when this + // field follows the 160-bit callbackContract address. + uint96 randomnessFee; + // Commitment to seed passed to oracle by this contract, and the number of + // the block in which the request appeared. This is the keccak256 of the + // concatenation of those values. Storing this commitment saves a word of + // storage. + bytes32 seedAndBlockNum; + } + + struct ServiceAgreement { // Tracks oracle commitments to VRF service + address vRFOracle; // Oracle committing to respond with VRF service + uint96 fee; // Minimum payment for oracle response. Total PLI=1e9*1e18<2^96 + bytes32 jobID; // ID of corresponding plugin job in oracle's DB + } + + mapping(bytes32 /* (provingKey, seed) */ => Callback) public callbacks; + mapping(bytes32 /* provingKey */ => ServiceAgreement) + public serviceAgreements; + mapping(address /* oracle */ => uint256 /* PLI balance */) + public withdrawableTokens; + mapping(bytes32 /* provingKey */ => mapping(address /* consumer */ => uint256)) + private nonces; + + // The oracle only needs the jobID to look up the VRF, but specifying public + // key as well prevents a malicious oracle from inducing VRF outputs from + // another oracle by reusing the jobID. + event RandomnessRequest( + bytes32 keyHash, + uint256 seed, + bytes32 indexed jobID, + address sender, + uint256 fee, + bytes32 requestID); + + event NewServiceAgreement(bytes32 keyHash, uint256 fee); + + event RandomnessRequestFulfilled(bytes32 requestId, uint256 output); + + /** + * @notice Commits calling address to serve randomness + * @param _fee minimum PLI payment required to serve randomness + * @param _oracle the address of the Plugin node with the proving key and job + * @param _publicProvingKey public key used to prove randomness + * @param _jobID ID of the corresponding plugin job in the oracle's db + */ + function registerProvingKey( + uint256 _fee, address _oracle, uint256[2] calldata _publicProvingKey, bytes32 _jobID + ) + external + onlyOwner() + { + bytes32 keyHash = hashOfKey(_publicProvingKey); + address oldVRFOracle = serviceAgreements[keyHash].vRFOracle; + require(oldVRFOracle == address(0), "please register a new key"); + require(_oracle != address(0), "_oracle must not be 0x0"); + serviceAgreements[keyHash].vRFOracle = _oracle; + serviceAgreements[keyHash].jobID = _jobID; + // Yes, this revert message doesn't fit in a word + require(_fee <= 1e9 ether, + "you can't charge more than all the PLI in the world, greedy"); + serviceAgreements[keyHash].fee = uint96(_fee); + emit NewServiceAgreement(keyHash, _fee); + } + + /** + * @notice Called by PLI.transferAndCall, on successful PLI transfer + * + * @dev To invoke this, use the requestRandomness method in VRFConsumerBase. + * + * @dev The VRFCoordinator will call back to the calling contract when the + * @dev oracle responds, on the method fulfillRandomness. See + * @dev VRFConsumerBase.fulfilRandomness for its signature. Your consuming + * @dev contract should inherit from VRFConsumerBase, and implement + * @dev fulfilRandomness. + * + * @param _sender address: who sent the PLI (must be a contract) + * @param _fee amount of PLI sent + * @param _data abi-encoded call to randomnessRequest + */ + function onTokenTransfer(address _sender, uint256 _fee, bytes memory _data) + public + onlyPLI + { + (bytes32 keyHash, uint256 seed) = abi.decode(_data, (bytes32, uint256)); + randomnessRequest(keyHash, seed, _fee, _sender); + } + + /** + * @notice creates the plugin request for randomness + * + * @param _keyHash ID of the VRF public key against which to generate output + * @param _consumerSeed Input to the VRF, from which randomness is generated + * @param _feePaid Amount of PLI sent with request. Must exceed fee for key + * @param _sender Requesting contract; to be called back with VRF output + * + * @dev _consumerSeed is mixed with key hash, sender address and nonce to + * @dev obtain preSeed, which is passed to VRF oracle, which mixes it with the + * @dev hash of the block containing this request, to compute the final seed. + * + * @dev The requestId used to store the request data is constructed from the + * @dev preSeed and keyHash. + */ + function randomnessRequest( + bytes32 _keyHash, + uint256 _consumerSeed, + uint256 _feePaid, + address _sender + ) + internal + sufficientPLI(_feePaid, _keyHash) + { + uint256 nonce = nonces[_keyHash][_sender]; + uint256 preSeed = makeVRFInputSeed(_keyHash, _consumerSeed, _sender, nonce); + bytes32 requestId = makeRequestId(_keyHash, preSeed); + // Cryptographically guaranteed by preSeed including an increasing nonce + assert(callbacks[requestId].callbackContract == address(0)); + callbacks[requestId].callbackContract = _sender; + assert(_feePaid < 1e27); // Total PLI fits in uint96 + callbacks[requestId].randomnessFee = uint96(_feePaid); + callbacks[requestId].seedAndBlockNum = keccak256(abi.encodePacked( + preSeed, block.number)); + emit RandomnessRequest(_keyHash, preSeed, serviceAgreements[_keyHash].jobID, + _sender, _feePaid, requestId); + nonces[_keyHash][_sender] = nonces[_keyHash][_sender].add(1); + } + + // Offsets into fulfillRandomnessRequest's _proof of various values + // + // Public key. Skips byte array's length prefix. + uint256 public constant PUBLIC_KEY_OFFSET = 0x20; + // Seed is 7th word in proof, plus word for length, (6+1)*0x20=0xe0 + uint256 public constant PRESEED_OFFSET = 0xe0; + + /** + * @notice Called by the plugin node to fulfill requests + * + * @param _proof the proof of randomness. Actual random output built from this + * + * @dev The structure of _proof corresponds to vrf.MarshaledOnChainResponse, + * @dev in the node source code. I.e., it is a vrf.MarshaledProof with the + * @dev seed replaced by the preSeed, followed by the hash of the requesting + * @dev block. + */ + function fulfillRandomnessRequest(bytes memory _proof) public { + (bytes32 currentKeyHash, Callback memory callback, bytes32 requestId, + uint256 randomness) = getRandomnessFromProof(_proof); + + // Pay oracle + address oadd = serviceAgreements[currentKeyHash].vRFOracle; + withdrawableTokens[oadd] = withdrawableTokens[oadd].add( + callback.randomnessFee); + + // Forget request. Must precede callback (prevents reentrancy) + delete callbacks[requestId]; + callBackWithRandomness(requestId, randomness, callback.callbackContract); + + emit RandomnessRequestFulfilled(requestId, randomness); + } + + function callBackWithRandomness(bytes32 requestId, uint256 randomness, + address consumerContract) internal { + // Dummy variable; allows access to method selector in next line. See + // https://github.com/ethereum/solidity/issues/3506#issuecomment-553727797 + VRFConsumerBase v; + bytes memory resp = abi.encodeWithSelector( + v.rawFulfillRandomness.selector, requestId, randomness); + // The bound b here comes from https://eips.ethereum.org/EIPS/eip-150. The + // actual gas available to the consuming contract will be b-floor(b/64). + // This is chosen to leave the consuming contract ~200k gas, after the cost + // of the call itself. + uint256 b = 206000; + require(gasleft() >= b, "not enough gas for consumer"); + // A low-level call is necessary, here, because we don't want the consuming + // contract to be able to revert this execution, and thus deny the oracle + // payment for a valid randomness response. This also necessitates the above + // check on the gasleft, as otherwise there would be no indication if the + // callback method ran out of gas. + // + // solhint-disable-next-line avoid-low-level-calls + (bool success,) = consumerContract.call(resp); + // Avoid unused-local-variable warning. (success is only present to prevent + // a warning that the return value of consumerContract.call is unused.) + (success); + } + + function getRandomnessFromProof(bytes memory _proof) + internal view returns (bytes32 currentKeyHash, Callback memory callback, + bytes32 requestId, uint256 randomness) { + // blockNum follows proof, which follows length word (only direct-number + // constants are allowed in assembly, so have to compute this in code) + uint256 BLOCKNUM_OFFSET = 0x20 + PROOF_LENGTH; + // _proof.length skips the initial length word, so not including the + // blocknum in this length check balances out. + require(_proof.length == BLOCKNUM_OFFSET, "wrong proof length"); + uint256[2] memory publicKey; + uint256 preSeed; + uint256 blockNum; + assembly { // solhint-disable-line no-inline-assembly + publicKey := add(_proof, PUBLIC_KEY_OFFSET) + preSeed := mload(add(_proof, PRESEED_OFFSET)) + blockNum := mload(add(_proof, BLOCKNUM_OFFSET)) + } + currentKeyHash = hashOfKey(publicKey); + requestId = makeRequestId(currentKeyHash, preSeed); + callback = callbacks[requestId]; + require(callback.callbackContract != address(0), "no corresponding request"); + require(callback.seedAndBlockNum == keccak256(abi.encodePacked(preSeed, + blockNum)), "wrong preSeed or block num"); + + bytes32 blockHash = blockhash(blockNum); + if (blockHash == bytes32(0)) { + blockHash = blockHashStore.getBlockhash(blockNum); + require(blockHash != bytes32(0), "please prove blockhash"); + } + // The seed actually used by the VRF machinery, mixing in the blockhash + uint256 actualSeed = uint256(keccak256(abi.encodePacked(preSeed, blockHash))); + // solhint-disable-next-line no-inline-assembly + assembly { // Construct the actual proof from the remains of _proof + mstore(add(_proof, PRESEED_OFFSET), actualSeed) + mstore(_proof, PROOF_LENGTH) + } + randomness = VRF.randomValueFromVRFProof(_proof); // Reverts on failure + } + + /** + * @dev Allows the oracle operator to withdraw their PLI + * @param _recipient is the address the funds will be sent to + * @param _amount is the amount of PLI transferred from the Coordinator contract + */ + function withdraw(address _recipient, uint256 _amount) + external + hasAvailableFunds(_amount) + { + withdrawableTokens[msg.sender] = withdrawableTokens[msg.sender].sub(_amount); + assert(PLI.transfer(_recipient, _amount)); + } + + /** + * @notice Returns the serviceAgreements key associated with this public key + * @param _publicKey the key to return the address for + */ + function hashOfKey(uint256[2] memory _publicKey) public pure returns (bytes32) { + return keccak256(abi.encodePacked(_publicKey)); + } + + /** + * @dev Reverts if amount is not at least what was agreed upon in the service agreement + * @param _feePaid The payment for the request + * @param _keyHash The key which the request is for + */ + modifier sufficientPLI(uint256 _feePaid, bytes32 _keyHash) { + require(_feePaid >= serviceAgreements[_keyHash].fee, "Below agreed payment"); + _; + } + +/** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens[msg.sender] >= _amount, "can't withdraw more than balance"); + _; + } + +} diff --git a/contracts/src/v0.6/VRFRequestIDBase.sol b/contracts/src/v0.6/VRFRequestIDBase.sol new file mode 100644 index 00000000..2668ead3 --- /dev/null +++ b/contracts/src/v0.6/VRFRequestIDBase.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +contract VRFRequestIDBase { + + /** + * @notice returns the seed which is actually input to the VRF coordinator + * + * @dev To prevent repetition of VRF output due to repetition of the + * @dev user-supplied seed, that seed is combined in a hash with the + * @dev user-specific nonce, and the address of the consuming contract. The + * @dev risk of repetition is mostly mitigated by inclusion of a blockhash in + * @dev the final seed, but the nonce does protect against repetition in + * @dev requests which are included in a single block. + * + * @param _userSeed VRF seed input provided by user + * @param _requester Address of the requesting contract + * @param _nonce User-specific nonce at the time of the request + */ + function makeVRFInputSeed(bytes32 _keyHash, uint256 _userSeed, + address _requester, uint256 _nonce) + internal pure returns (uint256) + { + return uint256(keccak256(abi.encode(_keyHash, _userSeed, _requester, _nonce))); + } + + /** + * @notice Returns the id for this request + * @param _keyHash The serviceAgreement ID to be used for this request + * @param _vRFInputSeed The seed to be passed directly to the VRF + * @return The id for this request + * + * @dev Note that _vRFInputSeed is not the seed passed by the consuming + * @dev contract, but the one generated by makeVRFInputSeed + */ + function makeRequestId( + bytes32 _keyHash, uint256 _vRFInputSeed) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(_keyHash, _vRFInputSeed)); + } +} diff --git a/contracts/src/v0.6/examples/VRFD20.sol b/contracts/src/v0.6/examples/VRFD20.sol new file mode 100644 index 00000000..6a128435 --- /dev/null +++ b/contracts/src/v0.6/examples/VRFD20.sol @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../VRFConsumerBase.sol"; +import "../Owned.sol"; + +/** + * @notice A Plugin VRF consumer which uses randomness to mimic the rolling + * of a 20 sided die + * @dev This is only an example implementation and not necessarily suitable for mainnet. + */ +contract VRFD20 is VRFConsumerBase, Owned { + using SafeMathPlugin for uint256; + + uint256 private constant ROLL_IN_PROGRESS = 42; + + bytes32 private s_keyHash; + uint256 private s_fee; + mapping(bytes32 => address) private s_rollers; + mapping(address => uint256) private s_results; + + event DiceRolled(bytes32 indexed requestId, address indexed roller); + event DiceLanded(bytes32 indexed requestId, uint256 indexed result); + + /** + * @notice Constructor inherits VRFConsumerBase + * + * @dev NETWORK: KOVAN + * @dev Plugin VRF Coordinator address: 0xdD3782915140c8f3b190B5D67eAc6dc5760C46E9 + * @dev PLI token address: 0xa36085F69e2889c224210F603D836748e7dC0088 + * @dev Key Hash: 0x6c3699283bda56ad74f6b855546325b68d482e983852a7a82979cc4807b641f4 + * @dev Fee: 0.1 PLI (100000000000000000) + * + * @param vrfCoordinator address of the VRF Coordinator + * @param link address of the PLI token + * @param keyHash bytes32 representing the hash of the VRF job + * @param fee uint256 fee to pay the VRF oracle + */ + constructor(address vrfCoordinator, address link, bytes32 keyHash, uint256 fee) + public + VRFConsumerBase(vrfCoordinator, link) + { + s_keyHash = keyHash; + s_fee = fee; + } + + /** + * @notice Requests randomness from a user-provided seed + * @dev Warning: if the VRF response is delayed, avoid calling requestRandomness repeatedly + * as that would give miners/VRF operators latitude about which VRF response arrives first. + * @dev You must review your implementation details with extreme care. + * + * @param roller address of the roller + */ + function rollDice(address roller) public onlyOwner returns (bytes32 requestId) { + require(PLI.balanceOf(address(this)) >= s_fee, "Not enough PLI to pay fee"); + require(s_results[roller] == 0, "Already rolled"); + requestId = requestRandomness(s_keyHash, s_fee); + s_rollers[requestId] = roller; + s_results[roller] = ROLL_IN_PROGRESS; + emit DiceRolled(requestId, roller); + } + + /** + * @notice Callback function used by VRF Coordinator to return the random number + * to this contract. + * @dev Some action on the contract state should be taken here, like storing the result. + * @dev WARNING: take care to avoid having multiple VRF requests in flight if their order of arrival would result + * in contract states with different outcomes. Otherwise miners or the VRF operator would could take advantage + * by controlling the order. + * @dev The VRF Coordinator will only send this function verified responses, and the parent VRFConsumerBase + * contract ensures that this method only receives randomness from the designated VRFCoordinator. + * + * @param requestId bytes32 + * @param randomness The random result returned by the oracle + */ + function fulfillRandomness(bytes32 requestId, uint256 randomness) internal override { + uint256 d20Value = randomness.mod(20).add(1); + s_results[s_rollers[requestId]] = d20Value; + emit DiceLanded(requestId, d20Value); + } + + /** + * @notice Get the house assigned to the player once the address has rolled + * @param player address + * @return house as a string + */ + function house(address player) public view returns (string memory) { + require(s_results[player] != 0, "Dice not rolled"); + require(s_results[player] != ROLL_IN_PROGRESS, "Roll in progress"); + return getHouseName(s_results[player]); + } + + /** + * @notice Withdraw PLI from this contract. + * @dev this is an example only, and in a real contract withdrawals should + * happen according to the established withdrawal pattern: + * https://docs.soliditylang.org/en/v0.4.24/common-patterns.html#withdrawal-from-contracts + * @param to the address to withdraw PLI to + * @param value the amount of PLI to withdraw + */ + function withdrawPLI(address to, uint256 value) public onlyOwner { + require(PLI.transfer(to, value), "Not enough PLI"); + } + + /** + * @notice Set the key hash for the oracle + * + * @param keyHash bytes32 + */ + function setKeyHash(bytes32 keyHash) public onlyOwner { + s_keyHash = keyHash; + } + + /** + * @notice Get the current key hash + * + * @return bytes32 + */ + function keyHash() public view returns (bytes32) { + return s_keyHash; + } + + /** + * @notice Set the oracle fee for requesting randomness + * + * @param fee uint256 + */ + function setFee(uint256 fee) public onlyOwner { + s_fee = fee; + } + + /** + * @notice Get the current fee + * + * @return uint256 + */ + function fee() public view returns (uint256) { + return s_fee; + } + + /** + * @notice Get the house name from the id + * @param id uint256 + * @return house name string + */ + function getHouseName(uint256 id) private pure returns (string memory) { + string[20] memory houseNames = [ + "Targaryen", + "Lannister", + "Stark", + "Tyrell", + "Baratheon", + "Martell", + "Tully", + "Bolton", + "Greyjoy", + "Arryn", + "Frey", + "Mormont", + "Tarley", + "Dayne", + "Umber", + "Valeryon", + "Manderly", + "Clegane", + "Glover", + "Karstark" + ]; + return houseNames[id.sub(1)]; + } +} diff --git a/contracts/src/v0.6/interfaces/AccessControllerInterface.sol b/contracts/src/v0.6/interfaces/AccessControllerInterface.sol new file mode 100644 index 00000000..4bf48bb2 --- /dev/null +++ b/contracts/src/v0.6/interfaces/AccessControllerInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity >0.6.0 <0.8.0; + +interface AccessControllerInterface { + function hasAccess(address user, bytes calldata data) external view returns (bool); +} diff --git a/contracts/src/v0.6/interfaces/AggregatorInterface.sol b/contracts/src/v0.6/interfaces/AggregatorInterface.sol new file mode 100644 index 00000000..4f48160b --- /dev/null +++ b/contracts/src/v0.6/interfaces/AggregatorInterface.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface AggregatorInterface { + function latestAnswer() + external + view + returns ( + int256 + ); + + function latestTimestamp() + external + view + returns ( + uint256 + ); + + function latestRound() + external + view + returns ( + uint256 + ); + + function getAnswer( + uint256 roundId + ) + external + view + returns ( + int256 + ); + + function getTimestamp( + uint256 roundId + ) + external + view + returns ( + uint256 + ); + + event AnswerUpdated( + int256 indexed current, + uint256 indexed roundId, + uint256 updatedAt + ); + + event NewRound( + uint256 indexed roundId, + address indexed startedBy, + uint256 startedAt + ); +} diff --git a/contracts/src/v0.6/interfaces/AggregatorV2V3Interface.sol b/contracts/src/v0.6/interfaces/AggregatorV2V3Interface.sol new file mode 100644 index 00000000..6b4975ed --- /dev/null +++ b/contracts/src/v0.6/interfaces/AggregatorV2V3Interface.sol @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./AggregatorInterface.sol"; +import "./AggregatorV3Interface.sol"; + +interface AggregatorV2V3Interface is AggregatorInterface, AggregatorV3Interface +{ +} diff --git a/contracts/src/v0.6/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.6/interfaces/AggregatorV3Interface.sol new file mode 100644 index 00000000..a1af9924 --- /dev/null +++ b/contracts/src/v0.6/interfaces/AggregatorV3Interface.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface AggregatorV3Interface { + + function decimals() + external + view + returns ( + uint8 + ); + + function description() + external + view + returns ( + string memory + ); + + function version() + external + view + returns ( + uint256 + ); + + function getRoundData( + uint80 _roundId + ) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function latestRoundData() + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + +} diff --git a/contracts/src/v0.6/interfaces/AggregatorValidatorInterface.sol b/contracts/src/v0.6/interfaces/AggregatorValidatorInterface.sol new file mode 100644 index 00000000..50c3226f --- /dev/null +++ b/contracts/src/v0.6/interfaces/AggregatorValidatorInterface.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface AggregatorValidatorInterface { + function validate( + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ) external returns (bool); +} diff --git a/contracts/src/v0.6/interfaces/BlockHashStoreInterface.sol b/contracts/src/v0.6/interfaces/BlockHashStoreInterface.sol new file mode 100644 index 00000000..18927e64 --- /dev/null +++ b/contracts/src/v0.6/interfaces/BlockHashStoreInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +interface BlockHashStoreInterface { + function getBlockhash(uint256 number) external view returns (bytes32); +} diff --git a/contracts/src/v0.6/interfaces/ENSInterface.sol b/contracts/src/v0.6/interfaces/ENSInterface.sol new file mode 100644 index 00000000..158242cd --- /dev/null +++ b/contracts/src/v0.6/interfaces/ENSInterface.sol @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface ENSInterface { + + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + + function setSubnodeOwner(bytes32 node, bytes32 label, address _owner) external; + function setResolver(bytes32 node, address _resolver) external; + function setOwner(bytes32 node, address _owner) external; + function setTTL(bytes32 node, uint64 _ttl) external; + function owner(bytes32 node) external view returns (address); + function resolver(bytes32 node) external view returns (address); + function ttl(bytes32 node) external view returns (uint64); + +} diff --git a/contracts/src/v0.6/interfaces/FeedRegistryInterface.sol b/contracts/src/v0.6/interfaces/FeedRegistryInterface.sol new file mode 100644 index 00000000..0eb2d1d6 --- /dev/null +++ b/contracts/src/v0.6/interfaces/FeedRegistryInterface.sol @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; +pragma experimental ABIEncoderV2; + +import "./AggregatorV2V3Interface.sol"; + +interface FeedRegistryInterface { + struct Phase { + uint16 phaseId; + uint80 startingAggregatorRoundId; + uint80 endingAggregatorRoundId; + } + + event FeedProposed( + address indexed asset, + address indexed denomination, + address indexed proposedAggregator, + address currentAggregator, + address sender + ); + event FeedConfirmed( + address indexed asset, + address indexed denomination, + address indexed latestAggregator, + address previousAggregator, + uint16 nextPhaseId, + address sender + ); + + // V3 AggregatorV3Interface + + function decimals( + address base, + address quote + ) + external + view + returns ( + uint8 + ); + + function description( + address base, + address quote + ) + external + view + returns ( + string memory + ); + + function version( + address base, + address quote + ) + external + view + returns ( + uint256 + ); + + function latestRoundData( + address base, + address quote + ) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function getRoundData( + address base, + address quote, + uint80 _roundId + ) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + // V2 AggregatorInterface + + function latestAnswer( + address base, + address quote + ) + external + view + returns ( + int256 answer + ); + + function latestTimestamp( + address base, + address quote + ) + external + view + returns ( + uint256 timestamp + ); + + function latestRound( + address base, + address quote + ) + external + view + returns ( + uint256 roundId + ); + + function getAnswer( + address base, + address quote, + uint256 roundId + ) + external + view + returns ( + int256 answer + ); + + function getTimestamp( + address base, + address quote, + uint256 roundId + ) + external + view + returns ( + uint256 timestamp + ); + + // Registry getters + + function getFeed( + address base, + address quote + ) + external + view + returns ( + AggregatorV2V3Interface aggregator + ); + + function getPhaseFeed( + address base, + address quote, + uint16 phaseId + ) + external + view + returns ( + AggregatorV2V3Interface aggregator + ); + + function isFeedEnabled( + address aggregator + ) + external + view + returns ( + bool + ); + + function getPhase( + address base, + address quote, + uint16 phaseId + ) + external + view + returns ( + Phase memory phase + ); + + // Round helpers + + function getRoundFeed( + address base, + address quote, + uint80 roundId + ) + external + view + returns ( + AggregatorV2V3Interface aggregator + ); + + function getPhaseRange( + address base, + address quote, + uint16 phaseId + ) + external + view + returns ( + uint80 startingRoundId, + uint80 endingRoundId + ); + + function getPreviousRoundId( + address base, + address quote, + uint80 roundId + ) external + view + returns ( + uint80 previousRoundId + ); + + function getNextRoundId( + address base, + address quote, + uint80 roundId + ) external + view + returns ( + uint80 nextRoundId + ); + + // Feed management + + function proposeFeed( + address base, + address quote, + address aggregator + ) external; + + function confirmFeed( + address base, + address quote, + address aggregator + ) external; + + // Proposed aggregator + + function getProposedFeed( + address base, + address quote + ) + external + view + returns ( + AggregatorV2V3Interface proposedAggregator + ); + + function proposedGetRoundData( + address base, + address quote, + uint80 roundId + ) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function proposedLatestRoundData( + address base, + address quote + ) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + // Phases + function getCurrentPhaseId( + address base, + address quote + ) + external + view + returns ( + uint16 currentPhaseId + ); +} diff --git a/contracts/src/v0.6/interfaces/FlagsInterface.sol b/contracts/src/v0.6/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..ad39cae3 --- /dev/null +++ b/contracts/src/v0.6/interfaces/FlagsInterface.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + function getFlags(address[] calldata) external view returns (bool[] memory); + function raiseFlag(address) external; + function raiseFlags(address[] calldata) external; + function lowerFlags(address[] calldata) external; + function setRaisingAccessController(address) external; +} diff --git a/contracts/src/v0.6/interfaces/KeeperCompatibleInterface.sol b/contracts/src/v0.6/interfaces/KeeperCompatibleInterface.sol new file mode 100644 index 00000000..f092fc9e --- /dev/null +++ b/contracts/src/v0.6/interfaces/KeeperCompatibleInterface.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface KeeperCompatibleInterface { + + /** + * @notice method that is simulated by the keepers to see if any work actually + * needs to be performed. This method does does not actually need to be + * executable, and since it is only ever simulated it can consume lots of gas. + * @dev To ensure that it is never called, you may want to add the + * cannotExecute modifier from KeeperBase to your implementation of this + * method. + * @param checkData specified in the upkeep registration so it is always the + * same for a registered upkeep. This can easily be broken down into specific + * arguments using `abi.decode`, so multiple upkeeps can be registered on the + * same contract and easily differentiated by the contract. + * @return upkeepNeeded boolean to indicate whether the keeper should call + * performUpkeep or not. + * @return performData bytes that the keeper should call performUpkeep with, if + * upkeep is needed. If you would like to encode data to decode later, try + * `abi.encode`. + */ + function checkUpkeep( + bytes calldata checkData + ) + external + returns ( + bool upkeepNeeded, + bytes memory performData + ); + + /** + * @notice method that is actually executed by the keepers, via the registry. + * The data returned by the checkUpkeep simulation will be passed into + * this method to actually be executed. + * @dev The input to this method should not be trusted, and the caller of the + * method should not even be restricted to any single registry. Anyone should + * be able call it, and the input should be validated, there is no guarantee + * that the data passed in is the performData returned from checkUpkeep. This + * could happen due to malicious keepers, racing keepers, or simply a state + * change while the performUpkeep transaction is waiting for confirmation. + * Always validate the data passed in. + * @param performData is the data which was passed back from the checkData + * simulation. If it is encoded, it can easily be decoded into other types by + * calling `abi.decode`. This data should not be trusted, and should be + * validated against the contract's current state. + */ + function performUpkeep( + bytes calldata performData + ) external; +} diff --git a/contracts/src/v0.6/interfaces/LinkTokenInterface.sol b/contracts/src/v0.6/interfaces/LinkTokenInterface.sol new file mode 100644 index 00000000..eeb94441 --- /dev/null +++ b/contracts/src/v0.6/interfaces/LinkTokenInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface LinkTokenInterface { + function allowance(address owner, address spender) external view returns (uint256 remaining); + function approve(address spender, uint256 value) external returns (bool success); + function balanceOf(address owner) external view returns (uint256 balance); + function decimals() external view returns (uint8 decimalPlaces); + function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); + function increaseApproval(address spender, uint256 subtractedValue) external; + function name() external view returns (string memory tokenName); + function symbol() external view returns (string memory tokenSymbol); + function totalSupply() external view returns (uint256 totalTokensIssued); + function transfer(address to, uint256 value) external returns (bool success); + function transferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); + function transferFrom(address from, address to, uint256 value) external returns (bool success); +} diff --git a/contracts/src/v0.6/interfaces/OracleInterface.sol b/contracts/src/v0.6/interfaces/OracleInterface.sol new file mode 100644 index 00000000..96b49f03 --- /dev/null +++ b/contracts/src/v0.6/interfaces/OracleInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface OracleInterface { + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) external returns (bool); + function getAuthorizationStatus(address node) external view returns (bool); + function setFulfillmentPermission(address node, bool allowed) external; + function withdraw(address recipient, uint256 amount) external; + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.6/interfaces/PluginRequestInterface.sol b/contracts/src/v0.6/interfaces/PluginRequestInterface.sol new file mode 100644 index 00000000..f4435202 --- /dev/null +++ b/contracts/src/v0.6/interfaces/PluginRequestInterface.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface PluginRequestInterface { + function oracleRequest( + address sender, + uint256 requestPrice, + bytes32 serviceAgreementID, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunctionId, + uint256 expiration + ) external; +} diff --git a/contracts/src/v0.6/interfaces/PointerInterface.sol b/contracts/src/v0.6/interfaces/PointerInterface.sol new file mode 100644 index 00000000..e1cac19d --- /dev/null +++ b/contracts/src/v0.6/interfaces/PointerInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface PointerInterface { + function getAddress() external view returns (address); +} diff --git a/contracts/src/v0.6/interfaces/WithdrawalInterface.sol b/contracts/src/v0.6/interfaces/WithdrawalInterface.sol new file mode 100644 index 00000000..23843b24 --- /dev/null +++ b/contracts/src/v0.6/interfaces/WithdrawalInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +interface WithdrawalInterface { + /** + * @notice transfer PLI held by the contract belonging to msg.sender to + * another address + * @param recipient is the address to send the PLI to + * @param amount is the amount of PLI to send + */ + function withdraw(address recipient, uint256 amount) external; + + /** + * @notice query the available amount of PLI to withdraw by msg.sender + */ + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.6/tests/AggregatorValidatorMock.sol b/contracts/src/v0.6/tests/AggregatorValidatorMock.sol new file mode 100644 index 00000000..7af4717c --- /dev/null +++ b/contracts/src/v0.6/tests/AggregatorValidatorMock.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../interfaces/AggregatorValidatorInterface.sol"; + +contract AggregatorValidatorMock is AggregatorValidatorInterface { + uint256 public previousRoundId; + int256 public previousAnswer; + uint256 public currentRoundId; + int256 public currentAnswer; + + event Validated( + uint256 _previousRoundId, + int256 indexed _previousAnswer, + uint256 _currentRoundId, + int256 indexed _currentAnswer + ); + + function validate( + uint256 _previousRoundId, + int256 _previousAnswer, + uint256 _currentRoundId, + int256 _currentAnswer + ) + external + override + returns (bool) + { + emit Validated( + _previousRoundId, + _previousAnswer, + _currentRoundId, + _currentAnswer + ); + return true; + } + +} diff --git a/contracts/src/v0.6/tests/BasicConsumer.sol b/contracts/src/v0.6/tests/BasicConsumer.sol new file mode 100644 index 00000000..f19133b4 --- /dev/null +++ b/contracts/src/v0.6/tests/BasicConsumer.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./Consumer.sol"; + +contract BasicConsumer is Consumer { + + constructor(address _link, address _oracle, bytes32 _specId) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + +} diff --git a/contracts/src/v0.6/tests/BlockhashStoreTestHelper.sol b/contracts/src/v0.6/tests/BlockhashStoreTestHelper.sol new file mode 100644 index 00000000..89596254 --- /dev/null +++ b/contracts/src/v0.6/tests/BlockhashStoreTestHelper.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../BlockhashStore.sol"; + +contract BlockhashStoreTestHelper is BlockhashStore { + function godmodeSetHash(uint256 n, bytes32 h) public { + s_blockhashes[n] = h; + } +} diff --git a/contracts/src/v0.6/tests/CheckedMathTestHelper.sol b/contracts/src/v0.6/tests/CheckedMathTestHelper.sol new file mode 100644 index 00000000..1306c53d --- /dev/null +++ b/contracts/src/v0.6/tests/CheckedMathTestHelper.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../CheckedMath.sol"; + +contract CheckedMathTestHelper { + using CheckedMath for int256; + + function add(int256 a, int256 b) + external + pure + returns (int256 result, bool ok) + { + return a.add(b); + } + + function sub(int256 a, int256 b) + external + pure + returns (int256 result, bool ok) + { + return a.sub(b); + } + + function mul(int256 a, int256 b) + external + pure + returns (int256 result, bool ok) + { + return a.mul(b); + } + + function div(int256 a, int256 b) + external + pure + returns (int256 result, bool ok) + { + return a.div(b); + } + +} diff --git a/contracts/src/v0.6/tests/ConcreteSignedSafeMath.sol b/contracts/src/v0.6/tests/ConcreteSignedSafeMath.sol new file mode 100644 index 00000000..14be5468 --- /dev/null +++ b/contracts/src/v0.6/tests/ConcreteSignedSafeMath.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../SignedSafeMath.sol"; + +contract ConcreteSignedSafeMath { + function testAdd(int256 _a, int256 _b) + external + pure + returns (int256) + { + return SignedSafeMath.add(_a, _b); + } + + function testAvg(int256 _a, int256 _b) + external + pure + returns (int256) + { + return SignedSafeMath.avg(_a, _b); + } +} diff --git a/contracts/src/v0.6/tests/Consumer.sol b/contracts/src/v0.6/tests/Consumer.sol new file mode 100644 index 00000000..93359bf6 --- /dev/null +++ b/contracts/src/v0.6/tests/Consumer.sol @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../PluginClient.sol"; + +contract Consumer is PluginClient { + bytes32 internal specId; + bytes32 public currentPrice; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes32 indexed price + ); + + function requestEthereumPrice(string memory _currency, uint256 _payment) public { + requestEthereumPriceByCallback(_currency, _payment, address(this)); + } + + function requestEthereumPriceByCallback(string memory _currency, uint256 _payment, address _callback) public { + Plugin.Request memory req = buildPluginRequest(specId, _callback, this.fulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + PluginRequestInterface requested = PluginRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } + + function addExternalRequest(address _oracle, bytes32 _requestId) external { + addPluginExternalRequest(_oracle, _requestId); + } + + function fulfill(bytes32 _requestId, bytes32 _price) + public + recordPluginFulfillment(_requestId) + { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } + +} diff --git a/contracts/src/v0.6/tests/EmptyOracle.sol b/contracts/src/v0.6/tests/EmptyOracle.sol new file mode 100644 index 00000000..5a7cde8a --- /dev/null +++ b/contracts/src/v0.6/tests/EmptyOracle.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../interfaces/PluginRequestInterface.sol"; +import "../interfaces/OracleInterface.sol"; + +/* solhint-disable no-empty-blocks */ + +contract EmptyOracle is PluginRequestInterface, OracleInterface { + + function cancelOracleRequest(bytes32, uint256, bytes4, uint256) external override {} + function fulfillOracleRequest(bytes32, uint256, address, bytes4, uint256, bytes32) external override returns (bool) {} + function getAuthorizationStatus(address) external override view returns (bool) { return false; } + function onTokenTransfer(address, uint256, bytes calldata) external pure {} + function oracleRequest(address, uint256, bytes32, address, bytes4, uint256, uint256, bytes calldata) external override {} + function setFulfillmentPermission(address, bool) external override {} + function withdraw(address, uint256) external override {} + function withdrawable() external override view returns (uint256) {} + +} diff --git a/contracts/src/v0.6/tests/FlagsTestHelper.sol b/contracts/src/v0.6/tests/FlagsTestHelper.sol new file mode 100644 index 00000000..0ed37b6e --- /dev/null +++ b/contracts/src/v0.6/tests/FlagsTestHelper.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../Flags.sol"; + +contract FlagsTestHelper { + Flags public flags; + + constructor( + address flagsContract + ) + public + { + flags = Flags(flagsContract); + } + + function getFlag( + address subject + ) + external + view + returns(bool) + { + return flags.getFlag(subject); + } + + function getFlags( + address[] calldata subjects + ) + external + view + returns(bool[] memory) + { + return flags.getFlags(subjects); + } + +} diff --git a/contracts/src/v0.6/tests/FluxAggregatorTestHelper.sol b/contracts/src/v0.6/tests/FluxAggregatorTestHelper.sol new file mode 100644 index 00000000..24af37f4 --- /dev/null +++ b/contracts/src/v0.6/tests/FluxAggregatorTestHelper.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../FluxAggregator.sol"; + +contract FluxAggregatorTestHelper { + + uint80 public requestedRoundId; + + function readOracleRoundState(address _aggregator, address _oracle) + external + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + FluxAggregator(_aggregator).oracleRoundState(_oracle, 0); + } + + function readGetRoundData(address _aggregator, uint80 _roundID) + external + { + FluxAggregator(_aggregator).getRoundData(_roundID); + } + + function readLatestRoundData(address _aggregator) + external + { + FluxAggregator(_aggregator).latestRoundData(); + } + + function readLatestAnswer(address _aggregator) + external + { + FluxAggregator(_aggregator).latestAnswer(); + } + + function readLatestTimestamp(address _aggregator) + external + { + FluxAggregator(_aggregator).latestTimestamp(); + } + + function readLatestRound(address _aggregator) + external + { + FluxAggregator(_aggregator).latestRound(); + } + + function requestNewRound(address _aggregator) + external + { + requestedRoundId = FluxAggregator(_aggregator).requestNewRound(); + } + + function readGetAnswer(address _aggregator, uint256 _roundID) + external + { + FluxAggregator(_aggregator).getAnswer(_roundID); + } + + function readGetTimestamp(address _aggregator, uint256 _roundID) + external + { + FluxAggregator(_aggregator).getTimestamp(_roundID); + } + +} diff --git a/contracts/src/v0.6/tests/GasGuzzler.sol b/contracts/src/v0.6/tests/GasGuzzler.sol new file mode 100644 index 00000000..5b30f1be --- /dev/null +++ b/contracts/src/v0.6/tests/GasGuzzler.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +contract GasGuzzler { + fallback() external payable { + while (true) { + } + } +} + diff --git a/contracts/src/v0.6/tests/GasGuzzlingConsumer.sol b/contracts/src/v0.6/tests/GasGuzzlingConsumer.sol new file mode 100644 index 00000000..e46cb40c --- /dev/null +++ b/contracts/src/v0.6/tests/GasGuzzlingConsumer.sol @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "./Consumer.sol"; + +contract GasGuzzlingConsumer is Consumer{ + + constructor(address _link, address _oracle, bytes32 _specId) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + + function gassyRequestEthereumPrice(uint256 _payment) public { + Plugin.Request memory req = buildPluginRequest(specId, address(this), this.gassyFulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = "USD"; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function gassyFulfill(bytes32 _requestId, bytes32 _price) + public + recordPluginFulfillment(_requestId) + { + while(true){ + } + } + + function gassyMultiWordRequest(uint256 _payment) public { + Plugin.Request memory req = buildPluginRequest(specId, address(this), this.gassyMultiWordFulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = "USD"; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function gassyMultiWordFulfill(bytes32 _requestId, bytes memory _price) + public + recordPluginFulfillment(_requestId) + { + while(true){ + } + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/tests/KeeperCompatibleTestHelper.sol b/contracts/src/v0.6/tests/KeeperCompatibleTestHelper.sol new file mode 100644 index 00000000..5c760bfa --- /dev/null +++ b/contracts/src/v0.6/tests/KeeperCompatibleTestHelper.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../KeeperCompatible.sol"; + +contract KeeperCompatibleTestHelper is KeeperCompatible { + function checkUpkeep(bytes calldata) external override returns (bool, bytes memory) {} + + function performUpkeep(bytes calldata) external override {} + + function verifyCannotExecute() public view cannotExecute {} +} diff --git a/contracts/src/v0.6/tests/MaliciousMultiWordConsumer.sol b/contracts/src/v0.6/tests/MaliciousMultiWordConsumer.sol new file mode 100644 index 00000000..ffaebbb7 --- /dev/null +++ b/contracts/src/v0.6/tests/MaliciousMultiWordConsumer.sol @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../PluginClient.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract MaliciousMultiWordConsumer is PluginClient { + using SafeMathPlugin for uint256; + + uint256 constant private ORACLE_PAYMENT = 1 * PLI; + uint256 private expiration; + + constructor(address _link, address _oracle) public payable { + setPluginToken(_link); + setPluginOracle(_oracle); + } + + receive() external payable {} // solhint-disable-line no-empty-blocks + + function requestData(bytes32 _id, bytes memory _callbackFunc) public { + Plugin.Request memory req = buildPluginRequest(_id, address(this), bytes4(keccak256(_callbackFunc))); + expiration = now.add(5 minutes); // solhint-disable-line not-rely-on-time + sendPluginRequest(req, ORACLE_PAYMENT); + } + + function assertFail(bytes32, bytes memory) public pure { + assert(1 == 2); + } + + function cancelRequestOnFulfill(bytes32 _requestId, bytes memory) public { + cancelPluginRequest( + _requestId, + ORACLE_PAYMENT, + this.cancelRequestOnFulfill.selector, + expiration); + } + + function remove() public { + selfdestruct(address(0)); + } + + function stealEthCall(bytes32 _requestId, bytes memory) public recordPluginFulfillment(_requestId) { + (bool success,) = address(this).call.value(100)(""); // solhint-disable-line avoid-call-value + require(success, "Call failed"); + } + + function stealEthSend(bytes32 _requestId, bytes memory) public recordPluginFulfillment(_requestId) { + // solhint-disable-next-line check-send-result + bool success = address(this).send(100); // solhint-disable-line multiple-sends + require(success, "Send failed"); + } + + function stealEthTransfer(bytes32 _requestId, bytes memory) public recordPluginFulfillment(_requestId) { + address(this).transfer(100); + } + + function doesNothing(bytes32, bytes memory) public pure {} // solhint-disable-line no-empty-blocks +} diff --git a/contracts/src/v0.6/tests/MedianTestHelper.sol b/contracts/src/v0.6/tests/MedianTestHelper.sol new file mode 100644 index 00000000..5386790f --- /dev/null +++ b/contracts/src/v0.6/tests/MedianTestHelper.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../Median.sol"; + +contract MedianTestHelper { + function publicGet(int256[] memory list) + public + pure + returns (int256) + { + return Median.calculate(list); + } + + function publicQuickselectTwo(int256[] memory list, uint256 k1, uint256 k2) + public + pure + returns (int256, int256) + { + return Median.quickselectTwo(list, 0, list.length - 1, k1, k2); + } +} diff --git a/contracts/src/v0.6/tests/MockETHLINKAggregator.sol b/contracts/src/v0.6/tests/MockETHLINKAggregator.sol new file mode 100644 index 00000000..7cc268a5 --- /dev/null +++ b/contracts/src/v0.6/tests/MockETHLINKAggregator.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../interfaces/AggregatorV3Interface.sol"; + +contract MockETHPLIAggregator is AggregatorV3Interface { + int256 public answer; + + constructor(int256 _answer) public { + answer = _answer; + } + + function decimals() external view override returns (uint8) { + return 18; + } + + function description() external view override returns (string memory) { + return "MockETHPLIAggregator"; + } + + function version() external view override returns (uint256) { + return 1; + } + + function getRoundData(uint80 _roundId) + external + view + override + returns ( + uint80 roundId, + int256 ans, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return (1, answer, block.timestamp, block.timestamp, 1); + } + + function latestRoundData() + external + view + override + returns ( + uint80 roundId, + int256 ans, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return (1, answer, block.timestamp, block.timestamp, 1); + } +} diff --git a/contracts/src/v0.6/tests/MockGASAggregator.sol b/contracts/src/v0.6/tests/MockGASAggregator.sol new file mode 100644 index 00000000..271e79e1 --- /dev/null +++ b/contracts/src/v0.6/tests/MockGASAggregator.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../interfaces/AggregatorV3Interface.sol"; + +contract MockGASAggregator is AggregatorV3Interface { + int256 public answer; + constructor (int256 _answer) public { + answer = _answer; + } + function decimals() external override view returns (uint8) { + return 18; + } + function description() external override view returns (string memory) { + return "MockGASAggregator"; + } + function version() external override view returns (uint256) { + return 1; + } + function getRoundData(uint80 _roundId) external override view returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) { + return (1, answer, block.timestamp, block.timestamp, 1); + } + function latestRoundData() external override view returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) { + return (1, answer, block.timestamp, block.timestamp, 1); + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/tests/MockOracle.sol b/contracts/src/v0.6/tests/MockOracle.sol new file mode 100644 index 00000000..b91f7e31 --- /dev/null +++ b/contracts/src/v0.6/tests/MockOracle.sol @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../LinkTokenReceiver.sol"; +import "../interfaces/PluginRequestInterface.sol"; +import "../interfaces/LinkTokenInterface.sol"; +import "../vendor/SafeMathPlugin.sol"; + +/** + * @title The Plugin Mock Oracle contract + * @notice Plugin smart contract developers can use this to test their contracts + */ +contract MockOracle is PluginRequestInterface, LinkTokenReceiver { + using SafeMathPlugin for uint256; + + uint256 constant public EXPIRY_TIME = 5 minutes; + uint256 constant private MINIMUM_CONSUMER_GAS_LIMIT = 400000; + + struct Request { + address callbackAddr; + bytes4 callbackFunctionId; + } + + LinkTokenInterface internal LinkToken; + mapping(bytes32 => Request) private commitments; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest( + bytes32 indexed requestId + ); + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param _link The address of the PLI token + */ + constructor( + address _link + ) + public + { + LinkToken = LinkTokenInterface(_link); // external but already deployed and unalterable + } + + /** + * @notice Creates the Plugin request + * @dev Stores the hash of the params as the on-chain commitment for the request. + * Emits OracleRequest event for the Plugin node to detect. + * @param _sender The sender of the request + * @param _payment The amount of payment given (specified in wei) + * @param _specId The Job Specification ID + * @param _callbackAddress The callback address for the response + * @param _callbackFunctionId The callback function ID for the response + * @param _nonce The nonce sent by the requester + * @param _dataVersion The specified data version + * @param _data The CBOR payload of the request + */ + function oracleRequest( + address _sender, + uint256 _payment, + bytes32 _specId, + address _callbackAddress, + bytes4 _callbackFunctionId, + uint256 _nonce, + uint256 _dataVersion, + bytes calldata _data + ) + external + override + onlyPLI() + checkCallbackAddress(_callbackAddress) + { + bytes32 requestId = keccak256(abi.encodePacked(_sender, _nonce)); + require(commitments[requestId].callbackAddr == address(0), "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + uint256 expiration = now.add(EXPIRY_TIME); + + commitments[requestId] = Request( + _callbackAddress, + _callbackFunctionId + ); + + emit OracleRequest( + _specId, + _sender, + requestId, + _payment, + _callbackAddress, + _callbackFunctionId, + expiration, + _dataVersion, + _data); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param _requestId The fulfillment request ID that must match the requester's + * @param _data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 _requestId, + bytes32 _data + ) + external + isValidRequest(_requestId) + returns ( + bool + ) + { + Request memory req = commitments[_requestId]; + delete commitments[_requestId]; + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = req.callbackAddr.call(abi.encodeWithSelector(req.callbackFunctionId, _requestId, _data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + /** + * @notice Allows requesters to cancel requests sent to this oracle contract. Will transfer the PLI + * sent for the request back to the requester's address. + * @dev Given params must hash to a commitment stored on the contract in order for the request to be valid + * Emits CancelOracleRequest event. + * @param _requestId The request ID + * @param _payment The amount of payment given (specified in wei) + * @param _expiration The time of the expiration for the request + */ + function cancelOracleRequest( + bytes32 _requestId, + uint256 _payment, + bytes4, + uint256 _expiration + ) + external + override + { + require(commitments[_requestId].callbackAddr != address(0), "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + require(_expiration <= now, "Request is not expired"); + + delete commitments[_requestId]; + emit CancelOracleRequest(_requestId); + + assert(LinkToken.transfer(msg.sender, _payment)); + } + + /** + * @notice Returns the address of the PLI token + * @dev This is the public implementation for pluginTokenAddress, which is + * an internal method of the PluginClient contract + */ + function getPluginToken() + public + view + override + returns ( + address + ) + { + return address(LinkToken); + } + + // MODIFIERS + + /** + * @dev Reverts if request ID does not exist + * @param _requestId The given request ID to check in stored `commitments` + */ + modifier isValidRequest( + bytes32 _requestId + ) { + require(commitments[_requestId].callbackAddr != address(0), "Must have a valid requestId"); + _; + } + + + /** + * @dev Reverts if the callback address is the PLI token + * @param _to The callback address + */ + modifier checkCallbackAddress( + address _to + ) { + require(_to != address(LinkToken), "Cannot callback to PLI"); + _; + } + +} diff --git a/contracts/src/v0.6/tests/MockV3Aggregator.sol b/contracts/src/v0.6/tests/MockV3Aggregator.sol new file mode 100644 index 00000000..b382281e --- /dev/null +++ b/contracts/src/v0.6/tests/MockV3Aggregator.sol @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../interfaces/AggregatorV2V3Interface.sol"; + +/** + * @title MockV3Aggregator + * @notice Based on the FluxAggregator contract + * @notice Use this contract when you need to test + * other contract's ability to read data from an + * aggregator contract, but how the aggregator got + * its answer is unimportant + */ +contract MockV3Aggregator is AggregatorV2V3Interface { + uint256 constant public override version = 0; + + uint8 public override decimals; + int256 public override latestAnswer; + uint256 public override latestTimestamp; + uint256 public override latestRound; + + mapping(uint256 => int256) public override getAnswer; + mapping(uint256 => uint256) public override getTimestamp; + mapping(uint256 => uint256) private getStartedAt; + + constructor( + uint8 _decimals, + int256 _initialAnswer + ) public { + decimals = _decimals; + updateAnswer(_initialAnswer); + } + + function updateAnswer( + int256 _answer + ) public { + latestAnswer = _answer; + latestTimestamp = block.timestamp; + latestRound++; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = block.timestamp; + getStartedAt[latestRound] = block.timestamp; + } + + function updateRoundData( + uint80 _roundId, + int256 _answer, + uint256 _timestamp, + uint256 _startedAt + ) public { + latestRound = _roundId; + latestAnswer = _answer; + latestTimestamp = _timestamp; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = _timestamp; + getStartedAt[latestRound] = _startedAt; + } + + function getRoundData(uint80 _roundId) + external + view + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return ( + _roundId, + getAnswer[_roundId], + getStartedAt[_roundId], + getTimestamp[_roundId], + _roundId + ); + } + + function latestRoundData() + external + view + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return ( + uint80(latestRound), + getAnswer[latestRound], + getStartedAt[latestRound], + getTimestamp[latestRound], + uint80(latestRound) + ); + } + + function description() + external + view + override + returns (string memory) + { + return "v0.6/tests/MockV3Aggregator.sol"; + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/tests/MultiWordConsumer.sol b/contracts/src/v0.6/tests/MultiWordConsumer.sol new file mode 100644 index 00000000..9c7bb2ea --- /dev/null +++ b/contracts/src/v0.6/tests/MultiWordConsumer.sol @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../PluginClient.sol"; + +contract MultiWordConsumer is PluginClient{ + bytes32 internal specId; + bytes public currentPrice; + + bytes32 public first; + bytes32 public second; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes indexed price + ); + + event RequestMultipleFulfilled( + bytes32 indexed requestId, + bytes32 indexed first, + bytes32 indexed second + ); + + constructor(address _link, address _oracle, bytes32 _specId) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + + function requestEthereumPrice(string memory _currency, uint256 _payment) public { + requestEthereumPriceByCallback(_currency, _payment, address(this)); + } + + function requestEthereumPriceByCallback(string memory _currency, uint256 _payment, address _callback) public { + Plugin.Request memory req = buildPluginRequest(specId, _callback, this.fulfillBytes.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function requestMultipleParameters(string memory _currency, uint256 _payment) public { + Plugin.Request memory req = buildPluginRequest(specId, address(this), this.fulfillMultipleParameters.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + sendPluginRequest(req, _payment); + } + + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + PluginRequestInterface requested = PluginRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } + + function addExternalRequest(address _oracle, bytes32 _requestId) external { + addPluginExternalRequest(_oracle, _requestId); + } + + function fulfillMultipleParameters(bytes32 _requestId, bytes32 _first, bytes32 _second) + public + recordPluginFulfillment(_requestId) + { + emit RequestMultipleFulfilled(_requestId, _first, _second); + first = _first; + second = _second; + } + + function fulfillBytes(bytes32 _requestId, bytes memory _price) + public + recordPluginFulfillment(_requestId) + { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/tests/PluginClientTestHelper.sol b/contracts/src/v0.6/tests/PluginClientTestHelper.sol new file mode 100644 index 00000000..93c5df41 --- /dev/null +++ b/contracts/src/v0.6/tests/PluginClientTestHelper.sol @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../PluginClient.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract PluginClientTestHelper is PluginClient { + using SafeMathPlugin for uint256; + + constructor( + address _link, + address _oracle + ) public { + setPluginToken(_link); + setPluginOracle(_oracle); + } + + event Request( + bytes32 id, + address callbackAddress, + bytes4 callbackfunctionSelector, + bytes data + ); + event LinkAmount( + uint256 amount + ); + + function publicNewRequest( + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature + ) + public + { + Plugin.Request memory req = buildPluginRequest( + _id, _address, bytes4(keccak256(_fulfillmentSignature))); + emit Request( + req.id, + req.callbackAddress, + req.callbackFunctionId, + req.buf.buf + ); + } + + function publicRequest( + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) + public + { + Plugin.Request memory req = buildPluginRequest( + _id, _address, bytes4(keccak256(_fulfillmentSignature))); + sendPluginRequest(req, _wei); + } + + function publicRequestRunTo( + address _oracle, + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) + public + { + Plugin.Request memory run = buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + sendPluginRequestTo(_oracle, run, _wei); + } + + function publicCancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function publicPluginToken() public view returns (address) { + return pluginTokenAddress(); + } + + function publicFulfillPluginRequest(bytes32 _requestId, bytes32) public { + fulfillRequest(_requestId, bytes32(0)); + } + + function fulfillRequest(bytes32 _requestId, bytes32) + public + { + validatePluginCallback(_requestId); + } + + function publicPLI( + uint256 _amount + ) + public + { + emit LinkAmount(PLI.mul(_amount)); + } + + function publicOracleAddress() + public + view + returns ( + address + ) + { + return pluginOracleAddress(); + } + + function publicAddExternalRequest( + address _oracle, + bytes32 _requestId + ) + public + { + addPluginExternalRequest(_oracle, _requestId); + } +} diff --git a/contracts/src/v0.6/tests/PluginTestHelper.sol b/contracts/src/v0.6/tests/PluginTestHelper.sol new file mode 100644 index 00000000..9ef53741 --- /dev/null +++ b/contracts/src/v0.6/tests/PluginTestHelper.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../Plugin.sol"; +import "../vendor/CBORPlugin.sol"; +import "../vendor/BufferPlugin.sol"; + +contract PluginTestHelper { + using Plugin for Plugin.Request; + using CBORPlugin for BufferPlugin.buffer; + + Plugin.Request private req; + + event RequestData(bytes payload); + + function closeEvent() public { + emit RequestData(req.buf.buf); + } + + function setBuffer(bytes memory data) public { + Plugin.Request memory r2 = req; + r2.setBuffer(data); + req = r2; + } + + function add(string memory _key, string memory _value) public { + Plugin.Request memory r2 = req; + r2.add(_key, _value); + req = r2; + } + + function addBytes(string memory _key, bytes memory _value) public { + Plugin.Request memory r2 = req; + r2.addBytes(_key, _value); + req = r2; + } + + function addInt(string memory _key, int256 _value) public { + Plugin.Request memory r2 = req; + r2.addInt(_key, _value); + req = r2; + } + + function addUint(string memory _key, uint256 _value) public { + Plugin.Request memory r2 = req; + r2.addUint(_key, _value); + req = r2; + } + + // Temporarily have method receive bytes32[] memory until experimental + // string[] memory can be invoked from truffle tests. + function addStringArray(string memory _key, bytes32[] memory _values) public { + string[] memory strings = new string[](_values.length); + for (uint256 i = 0; i < _values.length; i++) { + strings[i] = bytes32ToString(_values[i]); + } + Plugin.Request memory r2 = req; + r2.addStringArray(_key, strings); + req = r2; + } + + function bytes32ToString(bytes32 x) private pure returns (string memory) { + bytes memory bytesString = new bytes(32); + uint charCount = 0; + for (uint j = 0; j < 32; j++) { + byte char = byte(bytes32(uint(x) * 2 ** (8 * j))); + if (char != 0) { + bytesString[charCount] = char; + charCount++; + } + } + bytes memory bytesStringTrimmed = new bytes(charCount); + for (uint j = 0; j < charCount; j++) { + bytesStringTrimmed[j] = bytesString[j]; + } + return string(bytesStringTrimmed); + } +} diff --git a/contracts/src/v0.6/tests/Reverter.sol b/contracts/src/v0.6/tests/Reverter.sol new file mode 100644 index 00000000..a262fbe5 --- /dev/null +++ b/contracts/src/v0.6/tests/Reverter.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +contract Reverter { + + fallback() external payable { + require(false, "Raised by Reverter.sol"); + } + +} diff --git a/contracts/src/v0.6/tests/TestAPIConsumer.sol b/contracts/src/v0.6/tests/TestAPIConsumer.sol new file mode 100644 index 00000000..20e0c06c --- /dev/null +++ b/contracts/src/v0.6/tests/TestAPIConsumer.sol @@ -0,0 +1,123 @@ +/** This example code is designed to quickly deploy an example contract using Remix. + * If you have never used Remix, try our example walkthrough: https://docs.chain.link/docs/example-walkthrough + * You will need testnet ETH and PLI. + * - Kovan ETH faucet: https://faucet.kovan.network/ + * - Kovan PLI faucet: https://kovan.chain.link/ + */ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +import "../interfaces/LinkTokenInterface.sol"; +import "../PluginClient.sol"; +import "../vendor/Ownable.sol"; + +/** + * @title TestAPIConsumer is an example contract which requests data from + * the Plugin network + * @dev This contract is designed to work on multiple networks, including + * local test networks + */ +contract TestAPIConsumer is PluginClient, Ownable { + uint256 public currentRoundID = 0; + uint256 public data; + bytes4 public selector; + + event PerfMetricsEvent(uint256 roundID, bytes32 requestId, uint256 timestamp); + + + /** + * @notice Deploy the contract with a specified address for the PLI + * and Oracle contract addresses + * @dev Sets the storage for the specified addresses + * @param _link The address of the PLI token contract + */ + constructor(address _link) public { + if (_link == address(0)) { + setPublicPluginToken(); + } else { + setPluginToken(_link); + } + } + + /** + * @notice Returns the address of the PLI token + * @dev This is the public implementation for pluginTokenAddress, which is + * an internal method of the PluginClient contract + */ + function getPluginToken() public view returns (address) { + return pluginTokenAddress(); + } + + /** + * @notice Creates a request to the specified Oracle contract address + * @dev This function ignores the stored Oracle contract address and + * will instead send the request to the address specified + * @param _oracle The Oracle contract address to send the request to + * @param _jobId The bytes32 JobID to be executed + * @param _url The URL to fetch data from + * @param _path The dot-delimited path to parse of the response + * @param _times The number to multiply the result by + */ + function createRequestTo( + address _oracle, + bytes32 _jobId, + uint256 _payment, + string memory _url, + string memory _path, + int256 _times + ) + public + onlyOwner + returns (bytes32 requestId) + { + selector = this.fulfill.selector; + Plugin.Request memory req = buildPluginRequest(_jobId, address(this), this.fulfill.selector); + req.add("get", _url); + req.add("path", _path); + req.addInt("times", _times); + requestId = sendPluginRequestTo(_oracle, req, _payment); + } + + /** + * @notice The fulfill method from requests created by this contract + * @dev The recordPluginFulfillment protects this function from being called + * by anyone other than the oracle address that the request was sent to + * @param _requestId The ID that was generated for the request + * @param _data The answer provided by the oracle + */ + function fulfill(bytes32 _requestId, uint256 _data) + public + { + data = _data; + currentRoundID += 1; + emit PerfMetricsEvent(currentRoundID, _requestId, block.timestamp); + } + + /** + * @notice Allows the owner to withdraw any PLI balance on the contract + */ + function withdrawLink() public onlyOwner { + LinkTokenInterface link = LinkTokenInterface(pluginTokenAddress()); + require(link.transfer(msg.sender, link.balanceOf(address(this))), "Unable to transfer"); + } + + /** + * @notice Call this method if no response is received within 5 minutes + * @param _requestId The ID that was generated for the request to cancel + * @param _payment The payment specified for the request to cancel + * @param _callbackFunctionId The bytes4 callback function ID specified for + * the request to cancel + * @param _expiration The expiration generated for the request to cancel + */ + function cancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) + public + onlyOwner + { + cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } +} diff --git a/contracts/src/v0.6/tests/VRFConsumer.sol b/contracts/src/v0.6/tests/VRFConsumer.sol new file mode 100644 index 00000000..ff0a09fb --- /dev/null +++ b/contracts/src/v0.6/tests/VRFConsumer.sol @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../interfaces/LinkTokenInterface.sol"; +import "../VRFCoordinator.sol"; +import "../VRFConsumerBase.sol"; + +contract VRFConsumer is VRFConsumerBase { + + uint256 public currentRoundID = 0; + uint256 public randomnessOutput; + bytes32 public requestId; + + constructor(address _vrfCoordinator, address _link) public + // solhint-disable-next-line no-empty-blocks + VRFConsumerBase(_vrfCoordinator, _link) { /* empty */ } + + function fulfillRandomness(bytes32 _requestId, uint256 _randomness) + internal override + { + randomnessOutput = _randomness; + requestId = _requestId; + currentRoundID += 1; + } + + function testRequestRandomness(bytes32 _keyHash, uint256 _fee) + external returns (bytes32 requestId) + { + return requestRandomness(_keyHash, _fee); + } +} diff --git a/contracts/src/v0.6/tests/VRFCoordinatorMock.sol b/contracts/src/v0.6/tests/VRFCoordinatorMock.sol new file mode 100644 index 00000000..22dff886 --- /dev/null +++ b/contracts/src/v0.6/tests/VRFCoordinatorMock.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../interfaces/LinkTokenInterface.sol"; +import "../VRFConsumerBase.sol"; + +contract VRFCoordinatorMock { + + LinkTokenInterface public PLI; + + event RandomnessRequest(address indexed sender, bytes32 indexed keyHash, uint256 indexed seed); + + constructor(address linkAddress) public { + PLI = LinkTokenInterface(linkAddress); + } + + function onTokenTransfer(address sender, uint256 fee, bytes memory _data) + public + onlyPLI + { + (bytes32 keyHash, uint256 seed) = abi.decode(_data, (bytes32, uint256)); + emit RandomnessRequest(sender, keyHash, seed); + } + + function callBackWithRandomness( + bytes32 requestId, + uint256 randomness, + address consumerContract + ) public { + VRFConsumerBase v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomness.selector, requestId, randomness); + uint256 b = 206000; + require(gasleft() >= b, "not enough gas for consumer"); + (bool success,) = consumerContract.call(resp); + } + + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } +} \ No newline at end of file diff --git a/contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol b/contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol new file mode 100644 index 00000000..f1b81f21 --- /dev/null +++ b/contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../VRFRequestIDBase.sol"; + +contract VRFRequestIDBaseTestHelper is VRFRequestIDBase { + + function makeVRFInputSeed_(bytes32 _keyHash, uint256 _userSeed, + address _requester, uint256 _nonce) + public pure returns (uint256) { + return makeVRFInputSeed(_keyHash, _userSeed, _requester, _nonce); + } + + function makeRequestId_( + bytes32 _keyHash, uint256 _vRFInputSeed) public pure returns (bytes32) { + return makeRequestId(_keyHash, _vRFInputSeed); + } +} diff --git a/contracts/src/v0.6/tests/VRFTestHelper.sol b/contracts/src/v0.6/tests/VRFTestHelper.sol new file mode 100644 index 00000000..d79e71dc --- /dev/null +++ b/contracts/src/v0.6/tests/VRFTestHelper.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.6.6; + +import "../VRF.sol"; + +/** *********************************************************************** + @notice Testing harness for VRF.sol, exposing its internal methods. Not to + @notice be used for production. +*/ +contract VRFTestHelper is VRF { + function bigModExp_(uint256 base, uint256 exponent) public view returns (uint256) { + return super.bigModExp(base, exponent); + } + function squareRoot_(uint256 x) public view returns (uint256) { + return super.squareRoot(x); + } + function ySquared_(uint256 x) public pure returns (uint256) { + return super.ySquared(x); + } + function fieldHash_(bytes memory b) public pure returns (uint256) { + return super.fieldHash(b); + } + function hashToCurve_(uint256[2] memory pk, uint256 x) public view returns(uint256[2] memory) { + return super.hashToCurve(pk, x); + } + function ecmulVerify_(uint256[2] memory x, uint256 scalar, uint256[2] memory q) public pure returns (bool) { + return super.ecmulVerify(x, scalar, q); + } + function projectiveECAdd_(uint256 px, uint256 py, uint256 qx, uint256 qy) public pure returns(uint256, uint256, uint256) { + return super.projectiveECAdd(px, py, qx, qy); + } + function affineECAdd_(uint256[2] memory p1, uint256[2] memory p2, uint256 invZ) public pure returns (uint256[2] memory) { + return super.affineECAdd(p1, p2, invZ); + } + function verifyLinearCombinationWithGenerator_(uint256 c, uint256[2] memory p, uint256 s, address lcWitness) public pure returns (bool) { + return super.verifyLinearCombinationWithGenerator(c, p, s, lcWitness); + } + function linearCombination_(uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, uint256 s, uint256[2] memory p2, uint256[2] memory sp2Witness, uint256 zInv) public pure returns (uint256[2] memory) { + return super.linearCombination(c, p1, cp1Witness, s, p2, sp2Witness, zInv); + } + function scalarFromCurvePoints_(uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, address uWitness, uint256[2] memory v) public pure returns (uint256) { + return super.scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + } + function verifyVRFProof_( + uint256[2] memory pk, uint256[2] memory gamma, uint256 c, uint256 s, + uint256 seed, address uWitness, uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, uint256 zInv) + public view { + super.verifyVRFProof(pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv); + } + function randomValueFromVRFProof_(bytes memory proof) + public view returns (uint256 output) { + return super.randomValueFromVRFProof(proof); + } +} diff --git a/contracts/src/v0.6/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol b/contracts/src/v0.6/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol new file mode 100644 index 00000000..fb48f360 --- /dev/null +++ b/contracts/src/v0.6/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol @@ -0,0 +1,153 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity >=0.4.21 <0.9.0; + +/** + * @title System level functionality + * @notice For use by contracts to interact with core L2-specific functionality. + * Precompiled contract that exists in every Arbitrum chain at address(100), 0x0000000000000000000000000000000000000064. + */ +interface ArbSys { + /** + * @notice Get Arbitrum block number (distinct from L1 block number; Arbitrum genesis block has block number 0) + * @return block number as int + */ + function arbBlockNumber() external view returns (uint256); + + /** + * @notice Get Arbitrum block hash (reverts unless currentBlockNum-256 <= arbBlockNum < currentBlockNum) + * @return block hash + */ + function arbBlockHash(uint256 arbBlockNum) external view returns (bytes32); + + /** + * @notice Gets the rollup's unique chain identifier + * @return Chain identifier as int + */ + function arbChainID() external view returns (uint256); + + /** + * @notice Get internal version number identifying an ArbOS build + * @return version number as int + */ + function arbOSVersion() external view returns (uint256); + + /** + * @notice Returns 0 since Nitro has no concept of storage gas + * @return uint 0 + */ + function getStorageGasAvailable() external view returns (uint256); + + /** + * @notice (deprecated) check if current call is top level (meaning it was triggered by an EoA or a L1 contract) + * @dev this call has been deprecated and may be removed in a future release + * @return true if current execution frame is not a call by another L2 contract + */ + function isTopLevelCall() external view returns (bool); + + /** + * @notice map L1 sender contract address to its L2 alias + * @param sender sender address + * @param unused argument no longer used + * @return aliased sender address + */ + function mapL1SenderContractAddressToL2Alias(address sender, address unused) + external + pure + returns (address); + + /** + * @notice check if the caller (of this caller of this) is an aliased L1 contract address + * @return true iff the caller's address is an alias for an L1 contract address + */ + function wasMyCallersAddressAliased() external view returns (bool); + + /** + * @notice return the address of the caller (of this caller of this), without applying L1 contract address aliasing + * @return address of the caller's caller, without applying L1 contract address aliasing + */ + function myCallersAddressWithoutAliasing() external view returns (address); + + /** + * @notice Send given amount of Eth to dest from sender. + * This is a convenience function, which is equivalent to calling sendTxToL1 with empty data. + * @param destination recipient address on L1 + * @return unique identifier for this L2-to-L1 transaction. + */ + function withdrawEth(address destination) + external + payable + returns (uint256); + + /** + * @notice Send a transaction to L1 + * @dev it is not possible to execute on the L1 any L2-to-L1 transaction which contains data + * to a contract address without any code (as enforced by the Bridge contract). + * @param destination recipient address on L1 + * @param data (optional) calldata for L1 contract call + * @return a unique identifier for this L2-to-L1 transaction. + */ + function sendTxToL1(address destination, bytes calldata data) + external + payable + returns (uint256); + + /** + * @notice Get send Merkle tree state + * @return size number of sends in the history + * @return root root hash of the send history + * @return partials hashes of partial subtrees in the send history tree + */ + function sendMerkleTreeState() + external + view + returns ( + uint256 size, + bytes32 root, + bytes32[] memory partials + ); + + /** + * @notice creates a send txn from L2 to L1 + * @param position = (level << 192) + leaf = (0 << 192) + leaf = leaf + */ + event L2ToL1Tx( + address caller, + address indexed destination, + uint256 indexed hash, + uint256 indexed position, + uint256 arbBlockNum, + uint256 ethBlockNum, + uint256 timestamp, + uint256 callvalue, + bytes data + ); + + /// @dev DEPRECATED in favour of the new L2ToL1Tx event above after the nitro upgrade + event L2ToL1Transaction( + address caller, + address indexed destination, + uint256 indexed uniqueId, + uint256 indexed batchNumber, + uint256 indexInBatch, + uint256 arbBlockNum, + uint256 ethBlockNum, + uint256 timestamp, + uint256 callvalue, + bytes data + ); + + /** + * @notice logs a merkle branch for proof synthesis + * @param reserved an index meant only to align the 4th index with L2ToL1Transaction's 4th event + * @param hash the merkle hash + * @param position = (level << 192) + leaf + */ + event SendMerkleUpdate( + uint256 indexed reserved, + bytes32 indexed hash, + uint256 indexed position + ); +} diff --git a/contracts/src/v0.6/vendor/BufferChainlink.sol b/contracts/src/v0.6/vendor/BufferChainlink.sol new file mode 100644 index 00000000..9a69bf32 --- /dev/null +++ b/contracts/src/v0.6/vendor/BufferChainlink.sol @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** +* @dev A library for working with mutable byte buffers in Solidity. +* +* Byte buffers are mutable and expandable, and provide a variety of primitives +* for writing to them. At any time you can fetch a bytes object containing the +* current contents of the buffer. The bytes object should not be stored between +* operations, as it may change due to resizing of the buffer. +*/ +library BufferPlugin { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint capacity) internal pure returns(buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + mstore(0x40, add(32, add(ptr, capacity))) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns(buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + function max(uint a, uint b) private pure returns(uint) { + if (a > b) { + return a; + } + return b; + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Writes a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The start offset to write to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes memory data, uint len) internal pure returns(buffer memory) { + require(len <= data.length); + + if (off + len > buf.capacity) { + resize(buf, max(buf.capacity, len + off) * 2); + } + + uint dest; + uint src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(add(len, off), buflen) { + mstore(bufptr, add(len, off)) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + uint mask = 256 ** (32 - len) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data, uint len) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, len); + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, data.length); + } + + /** + * @dev Writes a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write the byte at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeUint8(buffer memory buf, uint off, uint8 data) internal pure returns(buffer memory) { + if (off >= buf.capacity) { + resize(buf, buf.capacity * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if eq(off, buflen) { + mstore(bufptr, add(buflen, 1)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns(buffer memory) { + return writeUint8(buf, buf.buf.length, data); + } + + /** + * @dev Writes up to 32 bytes to the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function write(buffer memory buf, uint off, bytes32 data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Writes a bytes20 to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeBytes20(buffer memory buf, uint off, bytes20 data) internal pure returns (buffer memory) { + return write(buf, off, bytes32(data), 20); + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, 32); + } + + /** + * @dev Writes an integer to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer, for chaining. + */ + function writeInt(buffer memory buf, uint off, uint data, uint len) private pure returns(buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint mask = 256 ** len - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + off + sizeof(buffer length) + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer. + */ + function appendInt(buffer memory buf, uint data, uint len) internal pure returns(buffer memory) { + return writeInt(buf, buf.buf.length, data, len); + } +} diff --git a/contracts/src/v0.6/vendor/CBORChainlink.sol b/contracts/src/v0.6/vendor/CBORChainlink.sol new file mode 100644 index 00000000..5fcbd59d --- /dev/null +++ b/contracts/src/v0.6/vendor/CBORChainlink.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +pragma solidity >= 0.4.19; + +import { BufferPlugin } from "./BufferPlugin.sol"; + +library CBORPlugin { + using BufferPlugin for BufferPlugin.buffer; + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + function encodeFixedNumeric(BufferPlugin.buffer memory buf, uint8 major, uint64 value) private pure { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if(value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if(value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if(value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); + } + } + + function encodeIndefiniteLengthType(BufferPlugin.buffer memory buf, uint8 major) private pure { + buf.appendUint8(uint8((major << 5) | 31)); + } + + function encodeUInt(BufferPlugin.buffer memory buf, uint value) internal pure { + if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } + } + + function encodeInt(BufferPlugin.buffer memory buf, int value) internal pure { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, uint(value)); + } else if(value >= 0) { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(-1 - value)); + } + } + + function encodeBytes(BufferPlugin.buffer memory buf, bytes memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.append(value); + } + + function encodeBigNum(BufferPlugin.buffer memory buf, uint value) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(value)); + } + + function encodeSignedBigNum(BufferPlugin.buffer memory buf, int input) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint(-1 - input))); + } + + function encodeString(BufferPlugin.buffer memory buf, string memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.append(bytes(value)); + } + + function startArray(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } + + function startMap(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } + + function endSequence(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } +} diff --git a/contracts/src/v0.6/vendor/ENSResolver.sol b/contracts/src/v0.6/vendor/ENSResolver.sol new file mode 100644 index 00000000..a2aff795 --- /dev/null +++ b/contracts/src/v0.6/vendor/ENSResolver.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +abstract contract ENSResolver { + function addr(bytes32 node) public view virtual returns (address); +} diff --git a/contracts/src/v0.6/vendor/Ownable.sol b/contracts/src/v0.6/vendor/Ownable.sol new file mode 100644 index 00000000..f0299db3 --- /dev/null +++ b/contracts/src/v0.6/vendor/Ownable.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** + * @dev Contract module which provides a basic access control mechanism, where + * there is an account (an owner) that can be granted exclusive access to + * specific functions. + * + * This module is used through inheritance. It will make available the modifier + * `onlyOwner`, which can be aplied to your functions to restrict their use to + * the owner. + * + * This contract has been modified to remove the revokeOwnership function + */ +contract Ownable { + address private _owner; + + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + /** + * @dev Initializes the contract setting the deployer as the initial owner. + */ + constructor () internal { + _owner = msg.sender; + emit OwnershipTransferred(address(0), _owner); + } + + /** + * @dev Returns the address of the current owner. + */ + function owner() public view returns (address) { + return _owner; + } + + /** + * @dev Throws if called by any account other than the owner. + */ + modifier onlyOwner() { + require(isOwner(), "Ownable: caller is not the owner"); + _; + } + + /** + * @dev Returns true if the caller is the current owner. + */ + function isOwner() public view returns (bool) { + return msg.sender == _owner; + } + + /** + * @dev Transfers ownership of the contract to a new account (`newOwner`). + * Can only be called by the current owner. + */ + function transferOwnership(address newOwner) public onlyOwner { + _transferOwnership(newOwner); + } + + /** + * @dev Transfers ownership of the contract to a new account (`newOwner`). + */ + function _transferOwnership(address newOwner) internal { + require(newOwner != address(0), "Ownable: new owner is the zero address"); + emit OwnershipTransferred(_owner, newOwner); + _owner = newOwner; + } +} diff --git a/contracts/src/v0.6/vendor/SafeMathChainlink.sol b/contracts/src/v0.6/vendor/SafeMathChainlink.sol new file mode 100644 index 00000000..2c937266 --- /dev/null +++ b/contracts/src/v0.6/vendor/SafeMathChainlink.sol @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.6.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + */ +library SafeMathPlugin { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint256 a, uint256 b) internal pure returns (uint256) { + uint256 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint256 a, uint256 b) internal pure returns (uint256) { + require(b <= a, "SafeMath: subtraction overflow"); + uint256 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint256 a, uint256 b) internal pure returns (uint256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint256 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint256 a, uint256 b) internal pure returns (uint256) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint256 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint256 a, uint256 b) internal pure returns (uint256) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.7/AuthorizedForwarder.sol b/contracts/src/v0.7/AuthorizedForwarder.sol new file mode 100644 index 00000000..3091d5ec --- /dev/null +++ b/contracts/src/v0.7/AuthorizedForwarder.sol @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./interfaces/OperatorInterface.sol"; +import "./ConfirmedOwnerWithProposal.sol"; +import "./AuthorizedReceiver.sol"; +import "./vendor/Address.sol"; + +contract AuthorizedForwarder is ConfirmedOwnerWithProposal, AuthorizedReceiver { + using Address for address; + + address public immutable getPluginToken; + + event OwnershipTransferRequestedWithMessage(address indexed from, address indexed to, bytes message); + + constructor( + address link, + address owner, + address recipient, + bytes memory message + ) ConfirmedOwnerWithProposal(owner, recipient) { + require(link != address(0)); + getPluginToken = link; + if (recipient != address(0)) { + emit OwnershipTransferRequestedWithMessage(owner, recipient, message); + } + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual returns (string memory) { + return "AuthorizedForwarder 1.0.0"; + } + + /** + * @notice Forward a call to another contract + * @dev Only callable by an authorized sender + * @param to address + * @param data to forward + */ + function forward(address to, bytes calldata data) external validateAuthorizedSender { + require(to != getPluginToken, "Cannot forward to Link token"); + _forward(to, data); + } + + /** + * @notice Forward a call to another contract + * @dev Only callable by the owner + * @param to address + * @param data to forward + */ + function ownerForward(address to, bytes calldata data) external onlyOwner { + _forward(to, data); + } + + /** + * @notice Transfer ownership with instructions for recipient + * @param to address proposed recipient of ownership + * @param message instructions for recipient upon accepting ownership + */ + function transferOwnershipWithMessage(address to, bytes calldata message) external { + transferOwnership(to); + emit OwnershipTransferRequestedWithMessage(msg.sender, to, message); + } + + /** + * @notice concrete implementation of AuthorizedReceiver + * @return bool of whether sender is authorized + */ + function _canSetAuthorizedSenders() internal view override returns (bool) { + return owner() == msg.sender; + } + + /** + * @notice common forwarding functionality and validation + */ + function _forward(address to, bytes calldata data) private { + require(to.isContract(), "Must forward to a contract"); + (bool success, bytes memory result) = to.call(data); + if (!success) { + if (result.length == 0) revert("Forwarded call reverted without reason"); + assembly { + revert(add(32, result), mload(result)) + } + } + } +} diff --git a/contracts/src/v0.7/AuthorizedReceiver.sol b/contracts/src/v0.7/AuthorizedReceiver.sol new file mode 100644 index 00000000..54f332cf --- /dev/null +++ b/contracts/src/v0.7/AuthorizedReceiver.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./interfaces/AuthorizedReceiverInterface.sol"; + +abstract contract AuthorizedReceiver is AuthorizedReceiverInterface { + mapping(address => bool) private s_authorizedSenders; + address[] private s_authorizedSenderList; + + event AuthorizedSendersChanged(address[] senders, address changedBy); + + /** + * @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + * @param senders The addresses of the authorized Plugin node + */ + function setAuthorizedSenders(address[] calldata senders) external override validateAuthorizedSenderSetter { + require(senders.length > 0, "Must have at least 1 sender"); + // Set previous authorized senders to false + uint256 authorizedSendersLength = s_authorizedSenderList.length; + for (uint256 i = 0; i < authorizedSendersLength; i++) { + s_authorizedSenders[s_authorizedSenderList[i]] = false; + } + // Set new to true + for (uint256 i = 0; i < senders.length; i++) { + require(s_authorizedSenders[senders[i]] == false, "Must not have duplicate senders"); + s_authorizedSenders[senders[i]] = true; + } + // Replace list + s_authorizedSenderList = senders; + emit AuthorizedSendersChanged(senders, msg.sender); + } + + /** + * @notice Retrieve a list of authorized senders + * @return array of addresses + */ + function getAuthorizedSenders() external view override returns (address[] memory) { + return s_authorizedSenderList; + } + + /** + * @notice Use this to check if a node is authorized for fulfilling requests + * @param sender The address of the Plugin node + * @return The authorization status of the node + */ + function isAuthorizedSender(address sender) public view override returns (bool) { + return s_authorizedSenders[sender]; + } + + /** + * @notice customizable guard of who can update the authorized sender list + * @return bool whether sender can update authorized sender list + */ + function _canSetAuthorizedSenders() internal virtual returns (bool); + + /** + * @notice validates the sender is an authorized sender + */ + function _validateIsAuthorizedSender() internal view { + require(isAuthorizedSender(msg.sender), "Not authorized sender"); + } + + /** + * @notice prevents non-authorized addresses from calling this method + */ + modifier validateAuthorizedSender() { + _validateIsAuthorizedSender(); + _; + } + + /** + * @notice prevents non-authorized addresses from calling this method + */ + modifier validateAuthorizedSenderSetter() { + require(_canSetAuthorizedSenders(), "Cannot set authorized senders"); + _; + } +} diff --git a/contracts/src/v0.7/ConfirmedOwner.sol b/contracts/src/v0.7/ConfirmedOwner.sol new file mode 100644 index 00000000..a411ba8e --- /dev/null +++ b/contracts/src/v0.7/ConfirmedOwner.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./ConfirmedOwnerWithProposal.sol"; + +/** + * @title The ConfirmedOwner contract + * @notice A contract with helpers for basic contract ownership. + */ +contract ConfirmedOwner is ConfirmedOwnerWithProposal { + constructor(address newOwner) ConfirmedOwnerWithProposal(newOwner, address(0)) {} +} diff --git a/contracts/src/v0.7/ConfirmedOwnerWithProposal.sol b/contracts/src/v0.7/ConfirmedOwnerWithProposal.sol new file mode 100644 index 00000000..b95c1711 --- /dev/null +++ b/contracts/src/v0.7/ConfirmedOwnerWithProposal.sol @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./interfaces/OwnableInterface.sol"; + +/** + * @title The ConfirmedOwner contract + * @notice A contract with helpers for basic contract ownership. + */ +contract ConfirmedOwnerWithProposal is OwnableInterface { + address private s_owner; + address private s_pendingOwner; + + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + + constructor(address newOwner, address pendingOwner) { + require(newOwner != address(0), "Cannot set owner to zero"); + + s_owner = newOwner; + if (pendingOwner != address(0)) { + _transferOwnership(pendingOwner); + } + } + + /** + * @notice Allows an owner to begin transferring ownership to a new address, + * pending. + */ + function transferOwnership(address to) public override onlyOwner { + _transferOwnership(to); + } + + /** + * @notice Allows an ownership transfer to be completed by the recipient. + */ + function acceptOwnership() external override { + require(msg.sender == s_pendingOwner, "Must be proposed owner"); + + address oldOwner = s_owner; + s_owner = msg.sender; + s_pendingOwner = address(0); + + emit OwnershipTransferred(oldOwner, msg.sender); + } + + /** + * @notice Get the current owner + */ + function owner() public view override returns (address) { + return s_owner; + } + + /** + * @notice validate, transfer ownership, and emit relevant events + */ + function _transferOwnership(address to) private { + require(to != msg.sender, "Cannot transfer to self"); + + s_pendingOwner = to; + + emit OwnershipTransferRequested(s_owner, to); + } + + /** + * @notice validate access + */ + function _validateOwnership() internal view { + require(msg.sender == s_owner, "Only callable by owner"); + } + + /** + * @notice Reverts if called by anyone other than the contract owner. + */ + modifier onlyOwner() { + _validateOwnership(); + _; + } +} diff --git a/contracts/src/v0.7/Denominations.sol b/contracts/src/v0.7/Denominations.sol new file mode 100644 index 00000000..54556db8 --- /dev/null +++ b/contracts/src/v0.7/Denominations.sol @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +library Denominations { + address public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + address public constant BTC = 0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB; + + // Fiat currencies follow https://en.wikipedia.org/wiki/ISO_4217 + address public constant USD = address(840); + address public constant GBP = address(826); + address public constant EUR = address(978); + address public constant JPY = address(392); + address public constant KRW = address(410); + address public constant CNY = address(156); + address public constant AUD = address(36); + address public constant CAD = address(124); + address public constant CHF = address(756); + address public constant ARS = address(32); + address public constant PHP = address(608); + address public constant NZD = address(554); + address public constant SGD = address(702); + address public constant NGN = address(566); + address public constant ZAR = address(710); + address public constant RUB = address(643); + address public constant INR = address(356); + address public constant BRL = address(986); +} diff --git a/contracts/src/v0.7/KeeperBase.sol b/contracts/src/v0.7/KeeperBase.sol new file mode 100644 index 00000000..6af11a8e --- /dev/null +++ b/contracts/src/v0.7/KeeperBase.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +contract KeeperBase { + /** + * @notice method that allows it to be simulated via eth_call by checking that + * the sender is the zero address. + */ + function preventExecution() internal view { + require(tx.origin == address(0), "only for simulated backend"); + } + + /** + * @notice modifier that allows it to be simulated via eth_call by checking + * that the sender is the zero address. + */ + modifier cannotExecute() { + preventExecution(); + _; + } +} diff --git a/contracts/src/v0.7/KeeperCompatible.sol b/contracts/src/v0.7/KeeperCompatible.sol new file mode 100644 index 00000000..9780eb64 --- /dev/null +++ b/contracts/src/v0.7/KeeperCompatible.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./KeeperBase.sol"; +import "./interfaces/KeeperCompatibleInterface.sol"; + +abstract contract KeeperCompatible is KeeperBase, KeeperCompatibleInterface {} diff --git a/contracts/src/v0.7/KeeperRegistry1_1.sol b/contracts/src/v0.7/KeeperRegistry1_1.sol new file mode 100644 index 00000000..c34659ca --- /dev/null +++ b/contracts/src/v0.7/KeeperRegistry1_1.sol @@ -0,0 +1,834 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./interfaces/AggregatorV3Interface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/KeeperCompatibleInterface.sol"; +import "./interfaces/KeeperRegistryInterface.sol"; +import "./interfaces/TypeAndVersionInterface.sol"; +import "./vendor/SafeMathPlugin.sol"; +import "./vendor/Address.sol"; +import "./vendor/Pausable.sol"; +import "./vendor/ReentrancyGuard.sol"; +import "./vendor/SignedSafeMath.sol"; +import "./vendor/SafeMath96.sol"; +import "./KeeperBase.sol"; +import "./ConfirmedOwner.sol"; + +/** + * @notice Registry for adding work for Plugin Keepers to perform on client + * contracts. Clients must support the Upkeep interface. + */ +contract KeeperRegistry1_1 is + TypeAndVersionInterface, + ConfirmedOwner, + KeeperBase, + ReentrancyGuard, + Pausable, + KeeperRegistryExecutableInterface +{ + using Address for address; + using SafeMathPlugin for uint256; + using SafeMath96 for uint96; + using SignedSafeMath for int256; + + address private constant ZERO_ADDRESS = address(0); + address private constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 private constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 private constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + uint256 private constant CALL_GAS_MAX = 5_000_000; + uint256 private constant CALL_GAS_MIN = 2_300; + uint256 private constant CANCELATION_DELAY = 50; + uint256 private constant CUSHION = 5_000; + uint256 private constant REGISTRY_GAS_OVERHEAD = 80_000; + uint256 private constant PPB_BASE = 1_000_000_000; + uint64 private constant UINT64_MAX = 2**64 - 1; + uint96 private constant PLI_TOTAL_SUPPLY = 1e27; + + uint256 private s_upkeepCount; + uint256[] private s_canceledUpkeepList; + address[] private s_keeperList; + mapping(uint256 => Upkeep) private s_upkeep; + mapping(address => KeeperInfo) private s_keeperInfo; + mapping(address => address) private s_proposedPayee; + mapping(uint256 => bytes) private s_checkData; + Config private s_config; + uint256 private s_fallbackGasPrice; // not in config object for gas savings + uint256 private s_fallbackLinkPrice; // not in config object for gas savings + uint256 private s_expectedLinkBalance; + + LinkTokenInterface public immutable PLI; + AggregatorV3Interface public immutable PLI_ETH_FEED; + AggregatorV3Interface public immutable FAST_GAS_FEED; + + address private s_registrar; + + /** + * @notice versions: + * - KeeperRegistry 1.1.0: added flatFeeMicroLink + * - KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistry 1.1.0"; + + struct Upkeep { + address target; + uint32 executeGas; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + address lastKeeper; + } + + struct KeeperInfo { + address payee; + uint96 balance; + bool active; + } + + struct Config { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + } + + struct PerformParams { + address from; + uint256 id; + bytes performData; + uint256 maxLinkPayment; + uint256 gasLimit; + uint256 adjustedGasWei; + uint256 linkEth; + } + + event UpkeepRegistered(uint256 indexed id, uint32 executeGas, address admin); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + address indexed from, + uint96 payment, + bytes performData + ); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event ConfigSet( + uint32 paymentPremiumPPB, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ); + event FlatFeeSet(uint32 flatFeeMicroLink); + event KeepersUpdated(address[] keepers, address[] payees); + event PaymentWithdrawn(address indexed keeper, uint256 indexed amount, address indexed to, address payee); + event PayeeshipTransferRequested(address indexed keeper, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed keeper, address indexed from, address indexed to); + event RegistrarChanged(address indexed from, address indexed to); + + /** + * @param link address of the PLI Token + * @param linkEthFeed address of the PLI/ETH price feed + * @param fastGasFeed address of the Fast Gas price feed + * @param paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @param flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @param blockCountPerTurn number of blocks each oracle has during their turn to + * perform upkeep before it will be the next keeper's turn to submit + * @param checkGasLimit gas limit when checking for upkeep + * @param stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @param gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @param fallbackGasPrice gas price used if the gas price feed is stale + * @param fallbackLinkPrice PLI price used if the PLI price feed is stale + */ + constructor( + address link, + address linkEthFeed, + address fastGasFeed, + uint32 paymentPremiumPPB, + uint32 flatFeeMicroLink, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(link); + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + FAST_GAS_FEED = AggregatorV3Interface(fastGasFeed); + + setConfig( + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice + ); + } + + // ACTIONS + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external override onlyOwnerOrRegistrar returns (uint256 id) { + require(target.isContract(), "target is not a contract"); + require(gasLimit >= CALL_GAS_MIN, "min gas is 2300"); + require(gasLimit <= CALL_GAS_MAX, "max gas is 5000000"); + + id = s_upkeepCount; + s_upkeep[id] = Upkeep({ + target: target, + executeGas: gasLimit, + balance: 0, + admin: admin, + maxValidBlocknumber: UINT64_MAX, + lastKeeper: address(0) + }); + s_checkData[id] = checkData; + s_upkeepCount++; + + emit UpkeepRegistered(id, gasLimit, admin); + + return id; + } + + /** + * @notice simulated by keepers via eth_call to see if the upkeep needs to be + * performed. If upkeep is needed, the call then simulates performUpkeep + * to make sure it succeeds. Finally, it returns the success status along with + * payment information and the perform data payload. + * @param id identifier of the upkeep to check + * @param from the address to simulate performing the upkeep from + */ + function checkUpkeep(uint256 id, address from) + external + override + whenNotPaused + cannotExecute + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) + { + Upkeep memory upkeep = s_upkeep[id]; + + bytes memory callData = abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[id]); + (bool success, bytes memory result) = upkeep.target.call{gas: s_config.checkGasLimit}(callData); + + if (!success) { + string memory upkeepRevertReason = getRevertMsg(result); + string memory reason = string(abi.encodePacked("call to check target failed: ", upkeepRevertReason)); + revert(reason); + } + + (success, performData) = abi.decode(result, (bool, bytes)); + require(success, "upkeep not needed"); + + PerformParams memory params = generatePerformParams(from, id, performData, false); + prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + return (performData, params.maxLinkPayment, params.gasLimit, params.adjustedGasWei, params.linkEth); + } + + /** + * @notice executes the upkeep with the perform data returned from + * checkUpkeep, validates the keeper's permissions, and pays the keeper. + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + */ + function performUpkeep(uint256 id, bytes calldata performData) external override returns (bool success) { + return performUpkeepWithParams(generatePerformParams(msg.sender, id, performData, true)); + } + + /** + * @notice prevent an upkeep from being performed in the future + * @param id upkeep to be canceled + */ + function cancelUpkeep(uint256 id) external override { + uint64 maxValid = s_upkeep[id].maxValidBlocknumber; + bool notCanceled = maxValid == UINT64_MAX; + bool isOwner = msg.sender == owner(); + require(notCanceled || (isOwner && maxValid > block.number), "too late to cancel upkeep"); + require(isOwner || msg.sender == s_upkeep[id].admin, "only owner or admin"); + + uint256 height = block.number; + if (!isOwner) { + height = height.add(CANCELATION_DELAY); + } + s_upkeep[id].maxValidBlocknumber = uint64(height); + if (notCanceled) { + s_canceledUpkeepList.push(id); + } + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @notice adds PLI funding for an upkeep by transferring from the sender's + * PLI balance + * @param id upkeep to fund + * @param amount number of PLI to transfer + */ + function addFunds(uint256 id, uint96 amount) external override { + require(s_upkeep[id].maxValidBlocknumber == UINT64_MAX, "upkeep must be active"); + s_upkeep[id].balance = s_upkeep[id].balance.add(amount); + s_expectedLinkBalance = s_expectedLinkBalance.add(amount); + PLI.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes calldata data + ) external { + require(msg.sender == address(PLI), "only callable through PLI"); + require(data.length == 32, "data must be 32 bytes"); + uint256 id = abi.decode(data, (uint256)); + require(s_upkeep[id].maxValidBlocknumber == UINT64_MAX, "upkeep must be active"); + + s_upkeep[id].balance = s_upkeep[id].balance.add(uint96(amount)); + s_expectedLinkBalance = s_expectedLinkBalance.add(amount); + + emit FundsAdded(id, sender, uint96(amount)); + } + + /** + * @notice removes funding from a canceled upkeep + * @param id upkeep to withdraw funds from + * @param to destination address for sending remaining funds + */ + function withdrawFunds(uint256 id, address to) external validateRecipient(to) { + require(s_upkeep[id].admin == msg.sender, "only callable by admin"); + require(s_upkeep[id].maxValidBlocknumber <= block.number, "upkeep must be canceled"); + + uint256 amount = s_upkeep[id].balance; + s_upkeep[id].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance.sub(amount); + emit FundsWithdrawn(id, amount, to); + + PLI.transfer(to, amount); + } + + /** + * @notice recovers PLI funds improperly transferred to the registry + * @dev In principle this function’s execution cost could exceed block + * gas limit. However, in our anticipated deployment, the number of upkeeps and + * keepers will be low enough to avoid this problem. + */ + function recoverFunds() external onlyOwner { + uint256 total = PLI.balanceOf(address(this)); + PLI.transfer(msg.sender, total.sub(s_expectedLinkBalance)); + } + + /** + * @notice withdraws a keeper's payment, callable only by the keeper's payee + * @param from keeper address + * @param to address to send the payment to + */ + function withdrawPayment(address from, address to) external validateRecipient(to) { + KeeperInfo memory keeper = s_keeperInfo[from]; + require(keeper.payee == msg.sender, "only callable by payee"); + + s_keeperInfo[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance.sub(keeper.balance); + emit PaymentWithdrawn(from, keeper.balance, to, msg.sender); + + PLI.transfer(to, keeper.balance); + } + + /** + * @notice proposes the safe transfer of a keeper's payee to another address + * @param keeper address of the keeper to transfer payee role + * @param proposed address to nominate for next payeeship + */ + function transferPayeeship(address keeper, address proposed) external { + require(s_keeperInfo[keeper].payee == msg.sender, "only callable by payee"); + require(proposed != msg.sender, "cannot transfer to self"); + + if (s_proposedPayee[keeper] != proposed) { + s_proposedPayee[keeper] = proposed; + emit PayeeshipTransferRequested(keeper, msg.sender, proposed); + } + } + + /** + * @notice accepts the safe transfer of payee role for a keeper + * @param keeper address to accept the payee role for + */ + function acceptPayeeship(address keeper) external { + require(s_proposedPayee[keeper] == msg.sender, "only callable by proposed payee"); + address past = s_keeperInfo[keeper].payee; + s_keeperInfo[keeper].payee = msg.sender; + s_proposedPayee[keeper] = ZERO_ADDRESS; + + emit PayeeshipTransferred(keeper, past, msg.sender); + } + + /** + * @notice signals to keepers that they should not perform upkeeps until the + * contract has been unpaused + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice signals to keepers that they can perform upkeeps once again after + * having been paused + */ + function unpause() external onlyOwner { + _unpause(); + } + + // SETTERS + + /** + * @notice updates the configuration of the registry + * @param paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @param flatFeeMicroLink flat fee paid to oracles for performing upkeeps + * @param blockCountPerTurn number of blocks an oracle should wait before + * checking for upkeep + * @param checkGasLimit gas limit when checking for upkeep + * @param stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @param fallbackGasPrice gas price used if the gas price feed is stale + * @param fallbackLinkPrice PLI price used if the PLI price feed is stale + */ + function setConfig( + uint32 paymentPremiumPPB, + uint32 flatFeeMicroLink, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ) public onlyOwner { + s_config = Config({ + paymentPremiumPPB: paymentPremiumPPB, + flatFeeMicroLink: flatFeeMicroLink, + blockCountPerTurn: blockCountPerTurn, + checkGasLimit: checkGasLimit, + stalenessSeconds: stalenessSeconds, + gasCeilingMultiplier: gasCeilingMultiplier + }); + s_fallbackGasPrice = fallbackGasPrice; + s_fallbackLinkPrice = fallbackLinkPrice; + + emit ConfigSet( + paymentPremiumPPB, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice + ); + emit FlatFeeSet(flatFeeMicroLink); + } + + /** + * @notice update the list of keepers allowed to perform upkeep + * @param keepers list of addresses allowed to perform upkeep + * @param payees addresses corresponding to keepers who are allowed to + * move payments which have been accrued + */ + function setKeepers(address[] calldata keepers, address[] calldata payees) external onlyOwner { + require(keepers.length == payees.length, "address lists not the same length"); + require(keepers.length >= 2, "not enough keepers"); + for (uint256 i = 0; i < s_keeperList.length; i++) { + address keeper = s_keeperList[i]; + s_keeperInfo[keeper].active = false; + } + for (uint256 i = 0; i < keepers.length; i++) { + address keeper = keepers[i]; + KeeperInfo storage s_keeper = s_keeperInfo[keeper]; + address oldPayee = s_keeper.payee; + address newPayee = payees[i]; + require(newPayee != address(0), "cannot set payee to the zero address"); + require(oldPayee == ZERO_ADDRESS || oldPayee == newPayee || newPayee == IGNORE_ADDRESS, "cannot change payee"); + require(!s_keeper.active, "cannot add keeper twice"); + s_keeper.active = true; + if (newPayee != IGNORE_ADDRESS) { + s_keeper.payee = newPayee; + } + } + s_keeperList = keepers; + emit KeepersUpdated(keepers, payees); + } + + /** + * @notice update registrar + * @param registrar new registrar + */ + function setRegistrar(address registrar) external onlyOwnerOrRegistrar { + address previous = s_registrar; + require(registrar != previous, "Same registrar"); + s_registrar = registrar; + emit RegistrarChanged(previous, registrar); + } + + // GETTERS + + /** + * @notice read all of the details about an upkeep + */ + function getUpkeep(uint256 id) + external + view + override + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber + ) + { + Upkeep memory reg = s_upkeep[id]; + return ( + reg.target, + reg.executeGas, + s_checkData[id], + reg.balance, + reg.lastKeeper, + reg.admin, + reg.maxValidBlocknumber + ); + } + + /** + * @notice read the total number of upkeep's registered + */ + function getUpkeepCount() external view override returns (uint256) { + return s_upkeepCount; + } + + /** + * @notice read the current list canceled upkeep IDs + */ + function getCanceledUpkeepList() external view override returns (uint256[] memory) { + return s_canceledUpkeepList; + } + + /** + * @notice read the current list of addresses allowed to perform upkeep + */ + function getKeeperList() external view override returns (address[] memory) { + return s_keeperList; + } + + /** + * @notice read the current registrar + */ + function getRegistrar() external view returns (address) { + return s_registrar; + } + + /** + * @notice read the current info about any keeper address + */ + function getKeeperInfo(address query) + external + view + override + returns ( + address payee, + bool active, + uint96 balance + ) + { + KeeperInfo memory keeper = s_keeperInfo[query]; + return (keeper.payee, keeper.active, keeper.balance); + } + + /** + * @notice read the current configuration of the registry + */ + function getConfig() + external + view + override + returns ( + uint32 paymentPremiumPPB, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ) + { + Config memory config = s_config; + return ( + config.paymentPremiumPPB, + config.blockCountPerTurn, + config.checkGasLimit, + config.stalenessSeconds, + config.gasCeilingMultiplier, + s_fallbackGasPrice, + s_fallbackLinkPrice + ); + } + + /** + * @notice getFlatFee gets the flat rate fee charged to customers when performing upkeep, + * in units of of micro PLI + */ + function getFlatFee() external view returns (uint32) { + return s_config.flatFeeMicroLink; + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + */ + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance) { + return getMaxPaymentForGas(s_upkeep[id].executeGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + */ + function getMaxPaymentForGas(uint256 gasLimit) public view returns (uint96 maxPayment) { + (uint256 gasWei, uint256 linkEth) = getFeedData(); + uint256 adjustedGasWei = adjustGasPrice(gasWei, false); + return calculatePaymentAmount(gasLimit, adjustedGasWei, linkEth); + } + + // PRIVATE + + /** + * @dev retrieves feed data for fast gas/eth and link/eth prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function getFeedData() private view returns (uint256 gasWei, uint256 linkEth) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = FAST_GAS_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + linkEth = s_fallbackLinkPrice; + } else { + linkEth = uint256(feedValue); + } + return (gasWei, linkEth); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + */ + function calculatePaymentAmount( + uint256 gasLimit, + uint256 gasWei, + uint256 linkEth + ) private view returns (uint96 payment) { + Config memory config = s_config; + uint256 weiForGas = gasWei.mul(gasLimit.add(REGISTRY_GAS_OVERHEAD)); + uint256 premium = PPB_BASE.add(config.paymentPremiumPPB); + uint256 total = weiForGas.mul(1e9).mul(premium).div(linkEth).add(uint256(config.flatFeeMicroLink).mul(1e12)); + require(total <= PLI_TOTAL_SUPPLY, "payment greater than all PLI"); + return uint96(total); // PLI_TOTAL_SUPPLY < UINT96_MAX + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available + */ + function callWithExactGas( + uint256 gasAmount, + address target, + bytes memory data + ) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= CUSHION and check for underflow + if lt(g, CUSHION) { + revert(0, 0) + } + g := sub(g, CUSHION) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * keeper and the exact gas required by the Upkeep + */ + function performUpkeepWithParams(PerformParams memory params) + private + nonReentrant + validUpkeep(params.id) + returns (bool success) + { + Upkeep memory upkeep = s_upkeep[params.id]; + prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + uint256 gasUsed = gasleft(); + bytes memory callData = abi.encodeWithSelector(PERFORM_SELECTOR, params.performData); + success = callWithExactGas(params.gasLimit, upkeep.target, callData); + gasUsed = gasUsed - gasleft(); + + uint96 payment = calculatePaymentAmount(gasUsed, params.adjustedGasWei, params.linkEth); + + uint96 newUpkeepBalance = s_upkeep[params.id].balance.sub(payment); + s_upkeep[params.id].balance = newUpkeepBalance; + s_upkeep[params.id].lastKeeper = params.from; + + uint96 newKeeperBalance = s_keeperInfo[params.from].balance.add(payment); + s_keeperInfo[params.from].balance = newKeeperBalance; + + emit UpkeepPerformed(params.id, success, params.from, payment, params.performData); + return success; + } + + /** + * @dev ensures a upkeep is valid + */ + function validateUpkeep(uint256 id) private view { + require(s_upkeep[id].maxValidBlocknumber > block.number, "invalid upkeep id"); + } + + /** + * @dev ensures all required checks are passed before an upkeep is performed + */ + function prePerformUpkeep( + Upkeep memory upkeep, + address from, + uint256 maxLinkPayment + ) private view { + require(s_keeperInfo[from].active, "only active keepers"); + require(upkeep.balance >= maxLinkPayment, "insufficient funds"); + require(upkeep.lastKeeper != from, "keepers must take turns"); + } + + /** + * @dev adjusts the gas price to min(ceiling, tx.gasprice) or just uses the ceiling if tx.gasprice is disabled + */ + function adjustGasPrice(uint256 gasWei, bool useTxGasPrice) private view returns (uint256 adjustedPrice) { + adjustedPrice = gasWei.mul(s_config.gasCeilingMultiplier); + if (useTxGasPrice && tx.gasprice < adjustedPrice) { + adjustedPrice = tx.gasprice; + } + } + + /** + * @dev generates a PerformParams struct for use in performUpkeepWithParams() + */ + function generatePerformParams( + address from, + uint256 id, + bytes memory performData, + bool useTxGasPrice + ) private view returns (PerformParams memory) { + uint256 gasLimit = s_upkeep[id].executeGas; + (uint256 gasWei, uint256 linkEth) = getFeedData(); + uint256 adjustedGasWei = adjustGasPrice(gasWei, useTxGasPrice); + uint96 maxLinkPayment = calculatePaymentAmount(gasLimit, adjustedGasWei, linkEth); + + return + PerformParams({ + from: from, + id: id, + performData: performData, + maxLinkPayment: maxLinkPayment, + gasLimit: gasLimit, + adjustedGasWei: adjustedGasWei, + linkEth: linkEth + }); + } + + /** + * @dev extracts a revert reason from a call result payload + */ + function getRevertMsg(bytes memory _payload) private pure returns (string memory) { + if (_payload.length < 68) return "transaction reverted silently"; + assembly { + _payload := add(_payload, 0x04) + } + return abi.decode(_payload, (string)); + } + + // MODIFIERS + + /** + * @dev ensures a upkeep is valid + */ + modifier validUpkeep(uint256 id) { + validateUpkeep(id); + _; + } + + /** + * @dev ensures that burns don't accidentally happen by sending to the zero + * address + */ + modifier validateRecipient(address to) { + require(to != address(0), "cannot send to zero address"); + _; + } + + /** + * @dev Reverts if called by anyone other than the contract owner or registrar. + */ + modifier onlyOwnerOrRegistrar() { + require(msg.sender == owner() || msg.sender == s_registrar, "Only callable by owner or registrar"); + _; + } +} diff --git a/contracts/src/v0.7/KeeperRegistry1_1Mock.sol b/contracts/src/v0.7/KeeperRegistry1_1Mock.sol new file mode 100644 index 00000000..088fdd0f --- /dev/null +++ b/contracts/src/v0.7/KeeperRegistry1_1Mock.sol @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +contract KeeperRegistry1_1Mock { + event ConfigSet( + uint32 paymentPremiumPPB, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ); + event FlatFeeSet(uint32 flatFeeMicroLink); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event KeepersUpdated(address[] keepers, address[] payees); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event Paused(address account); + event PayeeshipTransferRequested(address indexed keeper, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed keeper, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed keeper, uint256 indexed amount, address indexed to, address payee); + event RegistrarChanged(address indexed from, address indexed to); + event Unpaused(address account); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + address indexed from, + uint96 payment, + bytes performData + ); + event UpkeepRegistered(uint256 indexed id, uint32 executeGas, address admin); + + function emitConfigSet( + uint32 paymentPremiumPPB, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ) public { + emit ConfigSet( + paymentPremiumPPB, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + fallbackGasPrice, + fallbackLinkPrice + ); + } + + function emitFlatFeeSet(uint32 flatFeeMicroLink) public { + emit FlatFeeSet(flatFeeMicroLink); + } + + function emitFundsAdded(uint256 id, address from, uint96 amount) public { + emit FundsAdded(id, from, amount); + } + + function emitFundsWithdrawn(uint256 id, uint256 amount, address to) public { + emit FundsWithdrawn(id, amount, to); + } + + function emitKeepersUpdated(address[] memory keepers, address[] memory payees) public { + emit KeepersUpdated(keepers, payees); + } + + function emitOwnershipTransferRequested(address from, address to) public { + emit OwnershipTransferRequested(from, to); + } + + function emitOwnershipTransferred(address from, address to) public { + emit OwnershipTransferred(from, to); + } + + function emitPaused(address account) public { + emit Paused(account); + } + + function emitPayeeshipTransferRequested(address keeper, address from, address to) public { + emit PayeeshipTransferRequested(keeper, from, to); + } + + function emitPayeeshipTransferred(address keeper, address from, address to) public { + emit PayeeshipTransferred(keeper, from, to); + } + + function emitPaymentWithdrawn(address keeper, uint256 amount, address to, address payee) public { + emit PaymentWithdrawn(keeper, amount, to, payee); + } + + function emitRegistrarChanged(address from, address to) public { + emit RegistrarChanged(from, to); + } + + function emitUnpaused(address account) public { + emit Unpaused(account); + } + + function emitUpkeepCanceled(uint256 id, uint64 atBlockHeight) public { + emit UpkeepCanceled(id, atBlockHeight); + } + + function emitUpkeepPerformed( + uint256 id, + bool success, + address from, + uint96 payment, + bytes memory performData + ) public { + emit UpkeepPerformed(id, success, from, payment, performData); + } + + function emitUpkeepRegistered(uint256 id, uint32 executeGas, address admin) public { + emit UpkeepRegistered(id, executeGas, admin); + } + + uint256 private s_upkeepCount; + + // Function to set the current number of registered upkeeps + function setUpkeepCount(uint256 _upkeepCount) external { + s_upkeepCount = _upkeepCount; + } + + // Function to get the current number of registered upkeeps + function getUpkeepCount() external view returns (uint256) { + return s_upkeepCount; + } + + uint256[] private s_canceledUpkeepList; + + // Function to set the current number of canceled upkeeps + function setCanceledUpkeepList(uint256[] memory _canceledUpkeepList) external { + s_canceledUpkeepList = _canceledUpkeepList; + } + + // Function to set the current number of canceled upkeeps + function getCanceledUpkeepList() external view returns (uint256[] memory) { + return s_canceledUpkeepList; + } + + address[] private s_keeperList; + + // Function to set the keeper list for testing purposes + function setKeeperList(address[] memory _keepers) external { + s_keeperList = _keepers; + } + + // Function to get the keeper list + function getKeeperList() external view returns (address[] memory) { + return s_keeperList; + } + + struct Config { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + } + + Config private s_config; + uint256 private s_fallbackGasPrice; + uint256 private s_fallbackLinkPrice; + + // Function to set the configuration for testing purposes + function setConfig( + uint32 _paymentPremiumPPB, + uint32 _flatFeeMicroLink, + uint24 _blockCountPerTurn, + uint32 _checkGasLimit, + uint24 _stalenessSeconds, + uint16 _gasCeilingMultiplier, + uint256 _fallbackGasPrice, + uint256 _fallbackLinkPrice + ) external { + s_config.paymentPremiumPPB = _paymentPremiumPPB; + s_config.flatFeeMicroLink = _flatFeeMicroLink; + s_config.blockCountPerTurn = _blockCountPerTurn; + s_config.checkGasLimit = _checkGasLimit; + s_config.stalenessSeconds = _stalenessSeconds; + s_config.gasCeilingMultiplier = _gasCeilingMultiplier; + s_fallbackGasPrice = _fallbackGasPrice; + s_fallbackLinkPrice = _fallbackLinkPrice; + } + + // Function to get the configuration + function getConfig() + external + view + returns ( + uint32 paymentPremiumPPB, + uint24 blockCountPerTurn, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ) + { + return ( + s_config.paymentPremiumPPB, + s_config.blockCountPerTurn, + s_config.checkGasLimit, + s_config.stalenessSeconds, + s_config.gasCeilingMultiplier, + s_fallbackGasPrice, + s_fallbackLinkPrice + ); + } + + struct Upkeep { + address target; + uint32 executeGas; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + address lastKeeper; + } + + mapping(uint256 => Upkeep) private s_upkeep; + mapping(uint256 => bytes) private s_checkData; + + // Function to set the upkeep and checkData for testing purposes + function setUpkeep( + uint256 id, + address _target, + uint32 _executeGas, + uint96 _balance, + address _admin, + uint64 _maxValidBlocknumber, + address _lastKeeper, + bytes memory _checkData + ) external { + Upkeep memory upkeep = Upkeep({ + target: _target, + executeGas: _executeGas, + balance: _balance, + admin: _admin, + maxValidBlocknumber: _maxValidBlocknumber, + lastKeeper: _lastKeeper + }); + + s_upkeep[id] = upkeep; + s_checkData[id] = _checkData; + } + + // Function to get the upkeep and checkData + function getUpkeep( + uint256 id + ) + external + view + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber + ) + { + Upkeep memory reg = s_upkeep[id]; + return ( + reg.target, + reg.executeGas, + s_checkData[id], + reg.balance, + reg.lastKeeper, + reg.admin, + reg.maxValidBlocknumber + ); + } + + mapping(uint256 => uint96) private s_minBalances; + + // Function to set the minimum balance for a specific upkeep id + function setMinBalance(uint256 id, uint96 minBalance) external { + s_minBalances[id] = minBalance; + } + + // Function to get the minimum balance for a specific upkeep id + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96) { + return s_minBalances[id]; + } + + struct UpkeepData { + bytes performData; + uint256 maxLinkPayment; + uint256 gasLimit; + uint256 adjustedGasWei; + uint256 linkEth; + } + + mapping(uint256 => UpkeepData) private s_upkeepData; + + // Function to set mock data for the checkUpkeep function + function setCheckUpkeepData( + uint256 id, + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) external { + s_upkeepData[id] = UpkeepData({ + performData: performData, + maxLinkPayment: maxLinkPayment, + gasLimit: gasLimit, + adjustedGasWei: adjustedGasWei, + linkEth: linkEth + }); + } + + // Mock checkUpkeep function + function checkUpkeep( + uint256 id, + address from + ) + external + view + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) + { + UpkeepData storage data = s_upkeepData[id]; + return (data.performData, data.maxLinkPayment, data.gasLimit, data.adjustedGasWei, data.linkEth); + } + + mapping(uint256 => bool) private s_upkeepSuccess; + + // Function to set mock return data for the performUpkeep function + function setPerformUpkeepSuccess(uint256 id, bool success) external { + s_upkeepSuccess[id] = success; + } + + // Mock performUpkeep function + function performUpkeep(uint256 id, bytes calldata performData) external returns (bool success) { + return s_upkeepSuccess[id]; + } +} diff --git a/contracts/src/v0.7/LinkTokenReceiver.sol b/contracts/src/v0.7/LinkTokenReceiver.sol new file mode 100644 index 00000000..f08a4936 --- /dev/null +++ b/contracts/src/v0.7/LinkTokenReceiver.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +abstract contract LinkTokenReceiver { + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @dev The data payload's first 2 words will be overwritten by the `sender` and `amount` + * values to ensure correctness. Calls oracleRequest. + * @param sender Address of the sender + * @param amount Amount of PLI sent (specified in wei) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes memory data + ) public validateFromPLI permittedFunctionsForPLI(data) { + assembly { + // solhint-disable-next-line avoid-low-level-calls + mstore(add(data, 36), sender) // ensure correct sender is passed + // solhint-disable-next-line avoid-low-level-calls + mstore(add(data, 68), amount) // ensure correct amount is passed + } + // solhint-disable-next-line avoid-low-level-calls + (bool success, ) = address(this).delegatecall(data); // calls oracleRequest + require(success, "Unable to create request"); + } + + function getPluginToken() public view virtual returns (address); + + /** + * @notice Validate the function called on token transfer + */ + function _validateTokenTransferAction(bytes4 funcSelector, bytes memory data) internal virtual; + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier validateFromPLI() { + require(msg.sender == getPluginToken(), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `oracleRequest` function selector + * @param data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(data, 32)) + } + _validateTokenTransferAction(funcSelector, data); + _; + } +} diff --git a/contracts/src/v0.7/Operator.sol b/contracts/src/v0.7/Operator.sol new file mode 100644 index 00000000..907d9a3c --- /dev/null +++ b/contracts/src/v0.7/Operator.sol @@ -0,0 +1,580 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./AuthorizedReceiver.sol"; +import "./LinkTokenReceiver.sol"; +import "./ConfirmedOwner.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/OperatorInterface.sol"; +import "./interfaces/OwnableInterface.sol"; +import "./interfaces/WithdrawalInterface.sol"; +import "./vendor/Address.sol"; +import "./vendor/SafeMathPlugin.sol"; + +/** + * @title The Plugin Operator contract + * @notice Node operators can deploy this contract to fulfill requests sent to them + */ +contract Operator is AuthorizedReceiver, ConfirmedOwner, LinkTokenReceiver, OperatorInterface, WithdrawalInterface { + using Address for address; + using SafeMathPlugin for uint256; + + struct Commitment { + bytes31 paramsHash; + uint8 dataVersion; + } + + uint256 public constant getExpiryTime = 5 minutes; + uint256 private constant MAXIMUM_DATA_VERSION = 256; + uint256 private constant MINIMUM_CONSUMER_GAS_LIMIT = 400000; + uint256 private constant SELECTOR_LENGTH = 4; + uint256 private constant EXPECTED_REQUEST_WORDS = 2; + uint256 private constant MINIMUM_REQUEST_LENGTH = SELECTOR_LENGTH + (32 * EXPECTED_REQUEST_WORDS); + // We initialize fields to 1 instead of 0 so that the first invocation + // does not cost more gas. + uint256 private constant ONE_FOR_CONSISTENT_GAS_COST = 1; + // oracleRequest is intended for version 1, enabling single word responses + bytes4 private constant ORACLE_REQUEST_SELECTOR = this.oracleRequest.selector; + // operatorRequest is intended for version 2, enabling multi-word responses + bytes4 private constant OPERATOR_REQUEST_SELECTOR = this.operatorRequest.selector; + + LinkTokenInterface internal immutable linkToken; + mapping(bytes32 => Commitment) private s_commitments; + mapping(address => bool) private s_owned; + // Tokens sent for requests that have not been fulfilled yet + uint256 private s_tokensInEscrow = ONE_FOR_CONSISTENT_GAS_COST; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest(bytes32 indexed requestId); + + event OracleResponse(bytes32 indexed requestId); + + event OwnableContractAccepted(address indexed acceptedContract); + + event TargetsUpdatedAuthorizedSenders(address[] targets, address[] senders, address changedBy); + + /** + * @notice Deploy with the address of the PLI token + * @dev Sets the LinkToken address for the imported LinkTokenInterface + * @param link The address of the PLI token + * @param owner The address of the owner + */ + constructor(address link, address owner) ConfirmedOwner(owner) { + linkToken = LinkTokenInterface(link); // external but already deployed and unalterable + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual returns (string memory) { + return "Operator 1.0.0"; + } + + /** + * @notice Creates the Plugin request. This is a backwards compatible API + * with the Oracle.sol contract, but the behavior changes because + * callbackAddress is assumed to be the same as the request sender. + * @param callbackAddress The consumer of the request + * @param payment The amount of payment given (specified in wei) + * @param specId The Job Specification ID + * @param callbackAddress The address the oracle data will be sent to + * @param callbackFunctionId The callback function ID for the response + * @param nonce The nonce sent by the requester + * @param dataVersion The specified data version + * @param data The extra request parameters + */ + function oracleRequest( + address sender, + uint256 payment, + bytes32 specId, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external override validateFromPLI { + (bytes32 requestId, uint256 expiration) = _verifyAndProcessOracleRequest( + sender, + payment, + callbackAddress, + callbackFunctionId, + nonce, + dataVersion + ); + emit OracleRequest(specId, sender, requestId, payment, sender, callbackFunctionId, expiration, dataVersion, data); + } + + /** + * @notice Creates the Plugin request + * @dev Stores the hash of the params as the on-chain commitment for the request. + * Emits OracleRequest event for the Plugin node to detect. + * @param sender The sender of the request + * @param payment The amount of payment given (specified in wei) + * @param specId The Job Specification ID + * @param callbackFunctionId The callback function ID for the response + * @param nonce The nonce sent by the requester + * @param dataVersion The specified data version + * @param data The extra request parameters + */ + function operatorRequest( + address sender, + uint256 payment, + bytes32 specId, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external override validateFromPLI { + (bytes32 requestId, uint256 expiration) = _verifyAndProcessOracleRequest( + sender, + payment, + sender, + callbackFunctionId, + nonce, + dataVersion + ); + emit OracleRequest(specId, sender, requestId, payment, sender, callbackFunctionId, expiration, dataVersion, data); + } + + /** + * @notice Called by the Plugin node to fulfill requests + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param requestId The fulfillment request ID that must match the requester's + * @param payment The payment amount that will be released for the oracle (specified in wei) + * @param callbackAddress The callback address to call for fulfillment + * @param callbackFunctionId The callback function ID to use for fulfillment + * @param expiration The expiration that the node should respond by before the requester can cancel + * @param data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) + external + override + validateAuthorizedSender + validateRequestId(requestId) + validateCallbackAddress(callbackAddress) + returns (bool) + { + _verifyOracleRequestAndProcessPayment(requestId, payment, callbackAddress, callbackFunctionId, expiration, 1); + emit OracleResponse(requestId); + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = callbackAddress.call(abi.encodeWithSelector(callbackFunctionId, requestId, data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + /** + * @notice Called by the Plugin node to fulfill requests with multi-word support + * @dev Given params must hash back to the commitment stored from `oracleRequest`. + * Will call the callback address' callback function without bubbling up error + * checking in a `require` so that the node can get paid. + * @param requestId The fulfillment request ID that must match the requester's + * @param payment The payment amount that will be released for the oracle (specified in wei) + * @param callbackAddress The callback address to call for fulfillment + * @param callbackFunctionId The callback function ID to use for fulfillment + * @param expiration The expiration that the node should respond by before the requester can cancel + * @param data The data to return to the consuming contract + * @return Status if the external call was successful + */ + function fulfillOracleRequest2( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes calldata data + ) + external + override + validateAuthorizedSender + validateRequestId(requestId) + validateCallbackAddress(callbackAddress) + validateMultiWordResponseId(requestId, data) + returns (bool) + { + _verifyOracleRequestAndProcessPayment(requestId, payment, callbackAddress, callbackFunctionId, expiration, 2); + emit OracleResponse(requestId); + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = callbackAddress.call(abi.encodePacked(callbackFunctionId, data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + /** + * @notice Transfer the ownership of ownable contracts. This is primarily + * intended for Authorized Forwarders but could possibly be extended to work + * with future contracts. + * @param ownable list of addresses to transfer + * @param newOwner address to transfer ownership to + */ + function transferOwnableContracts(address[] calldata ownable, address newOwner) external onlyOwner { + for (uint256 i = 0; i < ownable.length; i++) { + s_owned[ownable[i]] = false; + OwnableInterface(ownable[i]).transferOwnership(newOwner); + } + } + + /** + * @notice Accept the ownership of an ownable contract. This is primarily + * intended for Authorized Forwarders but could possibly be extended to work + * with future contracts. + * @dev Must be the pending owner on the contract + * @param ownable list of addresses of Ownable contracts to accept + */ + function acceptOwnableContracts(address[] calldata ownable) public validateAuthorizedSenderSetter { + for (uint256 i = 0; i < ownable.length; i++) { + s_owned[ownable[i]] = true; + emit OwnableContractAccepted(ownable[i]); + OwnableInterface(ownable[i]).acceptOwnership(); + } + } + + /** + * @notice Sets the fulfillment permission for + * @param targets The addresses to set permissions on + * @param senders The addresses that are allowed to send updates + */ + function setAuthorizedSendersOn(address[] calldata targets, address[] calldata senders) + public + validateAuthorizedSenderSetter + { + TargetsUpdatedAuthorizedSenders(targets, senders, msg.sender); + + for (uint256 i = 0; i < targets.length; i++) { + AuthorizedReceiverInterface(targets[i]).setAuthorizedSenders(senders); + } + } + + /** + * @notice Accepts ownership of ownable contracts and then immediately sets + * the authorized sender list on each of the newly owned contracts. This is + * primarily intended for Authorized Forwarders but could possibly be + * extended to work with future contracts. + * @param targets The addresses to set permissions on + * @param senders The addresses that are allowed to send updates + */ + function acceptAuthorizedReceivers(address[] calldata targets, address[] calldata senders) + external + validateAuthorizedSenderSetter + { + acceptOwnableContracts(targets); + setAuthorizedSendersOn(targets, senders); + } + + /** + * @notice Allows the node operator to withdraw earned PLI to a given address + * @dev The owner of the contract can be another wallet and does not have to be a Plugin node + * @param recipient The address to send the PLI token to + * @param amount The amount to send (specified in wei) + */ + function withdraw(address recipient, uint256 amount) + external + override(OracleInterface, WithdrawalInterface) + onlyOwner + validateAvailableFunds(amount) + { + assert(linkToken.transfer(recipient, amount)); + } + + /** + * @notice Displays the amount of PLI that is available for the node operator to withdraw + * @dev We use `ONE_FOR_CONSISTENT_GAS_COST` in place of 0 in storage + * @return The amount of withdrawable PLI on the contract + */ + function withdrawable() external view override(OracleInterface, WithdrawalInterface) returns (uint256) { + return _fundsAvailable(); + } + + /** + * @notice Forward a call to another contract + * @dev Only callable by the owner + * @param to address + * @param data to forward + */ + function ownerForward(address to, bytes calldata data) external onlyOwner validateNotToPLI(to) { + require(to.isContract(), "Must forward to a contract"); + (bool status, ) = to.call(data); + require(status, "Forwarded call failed"); + } + + /** + * @notice Interact with other LinkTokenReceiver contracts by calling transferAndCall + * @param to The address to transfer to. + * @param value The amount to be transferred. + * @param data The extra data to be passed to the receiving contract. + * @return success bool + */ + function ownerTransferAndCall( + address to, + uint256 value, + bytes calldata data + ) external override onlyOwner validateAvailableFunds(value) returns (bool success) { + return linkToken.transferAndCall(to, value, data); + } + + /** + * @notice Distribute funds to multiple addresses using ETH send + * to this payable function. + * @dev Array length must be equal, ETH sent must equal the sum of amounts. + * A malicious receiver could cause the distribution to revert, in which case + * it is expected that the address is removed from the list. + * @param receivers list of addresses + * @param amounts list of amounts + */ + function distributeFunds(address payable[] calldata receivers, uint256[] calldata amounts) external payable { + require(receivers.length > 0 && receivers.length == amounts.length, "Invalid array length(s)"); + uint256 valueRemaining = msg.value; + for (uint256 i = 0; i < receivers.length; i++) { + uint256 sendAmount = amounts[i]; + valueRemaining = valueRemaining.sub(sendAmount); + receivers[i].transfer(sendAmount); + } + require(valueRemaining == 0, "Too much ETH sent"); + } + + /** + * @notice Allows recipient to cancel requests sent to this oracle contract. + * Will transfer the PLI sent for the request back to the recipient address. + * @dev Given params must hash to a commitment stored on the contract in order + * for the request to be valid. Emits CancelOracleRequest event. + * @param requestId The request ID + * @param payment The amount of payment given (specified in wei) + * @param callbackFunc The requester's specified callback function selector + * @param expiration The time of the expiration for the request + */ + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) external override { + bytes31 paramsHash = _buildParamsHash(payment, msg.sender, callbackFunc, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(expiration <= block.timestamp, "Request is not expired"); + + delete s_commitments[requestId]; + emit CancelOracleRequest(requestId); + + linkToken.transfer(msg.sender, payment); + } + + /** + * @notice Allows requester to cancel requests sent to this oracle contract. + * Will transfer the PLI sent for the request back to the recipient address. + * @dev Given params must hash to a commitment stored on the contract in order + * for the request to be valid. Emits CancelOracleRequest event. + * @param nonce The nonce used to generate the request ID + * @param payment The amount of payment given (specified in wei) + * @param callbackFunc The requester's specified callback function selector + * @param expiration The time of the expiration for the request + */ + function cancelOracleRequestByRequester( + uint256 nonce, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) external { + bytes32 requestId = keccak256(abi.encodePacked(msg.sender, nonce)); + bytes31 paramsHash = _buildParamsHash(payment, msg.sender, callbackFunc, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(expiration <= block.timestamp, "Request is not expired"); + + delete s_commitments[requestId]; + emit CancelOracleRequest(requestId); + + linkToken.transfer(msg.sender, payment); + } + + /** + * @notice Returns the address of the PLI token + * @dev This is the public implementation for pluginTokenAddress, which is + * an internal method of the PluginClient contract + */ + function getPluginToken() public view override returns (address) { + return address(linkToken); + } + + /** + * @notice Require that the token transfer action is valid + * @dev OPERATOR_REQUEST_SELECTOR = multiword, ORACLE_REQUEST_SELECTOR = singleword + */ + function _validateTokenTransferAction(bytes4 funcSelector, bytes memory data) internal pure override { + require(data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); + require( + funcSelector == OPERATOR_REQUEST_SELECTOR || funcSelector == ORACLE_REQUEST_SELECTOR, + "Must use whitelisted functions" + ); + } + + /** + * @notice Verify the Oracle Request and record necessary information + * @param sender The sender of the request + * @param payment The amount of payment given (specified in wei) + * @param callbackAddress The callback address for the response + * @param callbackFunctionId The callback function ID for the response + * @param nonce The nonce sent by the requester + */ + function _verifyAndProcessOracleRequest( + address sender, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion + ) private validateNotToPLI(callbackAddress) returns (bytes32 requestId, uint256 expiration) { + requestId = keccak256(abi.encodePacked(sender, nonce)); + require(s_commitments[requestId].paramsHash == 0, "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + expiration = block.timestamp.add(getExpiryTime); + bytes31 paramsHash = _buildParamsHash(payment, callbackAddress, callbackFunctionId, expiration); + s_commitments[requestId] = Commitment(paramsHash, _safeCastToUint8(dataVersion)); + s_tokensInEscrow = s_tokensInEscrow.add(payment); + return (requestId, expiration); + } + + /** + * @notice Verify the Oracle request and unlock escrowed payment + * @param requestId The fulfillment request ID that must match the requester's + * @param payment The payment amount that will be released for the oracle (specified in wei) + * @param callbackAddress The callback address to call for fulfillment + * @param callbackFunctionId The callback function ID to use for fulfillment + * @param expiration The expiration that the node should respond by before the requester can cancel + */ + function _verifyOracleRequestAndProcessPayment( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + uint256 dataVersion + ) internal { + bytes31 paramsHash = _buildParamsHash(payment, callbackAddress, callbackFunctionId, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + require(s_commitments[requestId].dataVersion <= _safeCastToUint8(dataVersion), "Data versions must match"); + s_tokensInEscrow = s_tokensInEscrow.sub(payment); + delete s_commitments[requestId]; + } + + /** + * @notice Build the bytes31 hash from the payment, callback and expiration. + * @param payment The payment amount that will be released for the oracle (specified in wei) + * @param callbackAddress The callback address to call for fulfillment + * @param callbackFunctionId The callback function ID to use for fulfillment + * @param expiration The expiration that the node should respond by before the requester can cancel + * @return hash bytes31 + */ + function _buildParamsHash( + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration + ) internal pure returns (bytes31) { + return bytes31(keccak256(abi.encodePacked(payment, callbackAddress, callbackFunctionId, expiration))); + } + + /** + * @notice Safely cast uint256 to uint8 + * @param number uint256 + * @return uint8 number + */ + function _safeCastToUint8(uint256 number) internal pure returns (uint8) { + require(number < MAXIMUM_DATA_VERSION, "number too big to cast"); + return uint8(number); + } + + /** + * @notice Returns the PLI available in this contract, not locked in escrow + * @return uint256 PLI tokens available + */ + function _fundsAvailable() private view returns (uint256) { + uint256 inEscrow = s_tokensInEscrow.sub(ONE_FOR_CONSISTENT_GAS_COST); + return linkToken.balanceOf(address(this)).sub(inEscrow); + } + + /** + * @notice concrete implementation of AuthorizedReceiver + * @return bool of whether sender is authorized + */ + function _canSetAuthorizedSenders() internal view override returns (bool) { + return isAuthorizedSender(msg.sender) || owner() == msg.sender; + } + + // MODIFIERS + + /** + * @dev Reverts if the first 32 bytes of the bytes array is not equal to requestId + * @param requestId bytes32 + * @param data bytes + */ + modifier validateMultiWordResponseId(bytes32 requestId, bytes calldata data) { + require(data.length >= 32, "Response must be > 32 bytes"); + bytes32 firstDataWord; + assembly { + firstDataWord := calldataload(data.offset) + } + require(requestId == firstDataWord, "First word must be requestId"); + _; + } + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param amount The given amount to compare to `s_withdrawableTokens` + */ + modifier validateAvailableFunds(uint256 amount) { + require(_fundsAvailable() >= amount, "Amount requested is greater than withdrawable balance"); + _; + } + + /** + * @dev Reverts if request ID does not exist + * @param requestId The given request ID to check in stored `commitments` + */ + modifier validateRequestId(bytes32 requestId) { + require(s_commitments[requestId].paramsHash != 0, "Must have a valid requestId"); + _; + } + + /** + * @dev Reverts if the callback address is the PLI token + * @param to The callback address + */ + modifier validateNotToPLI(address to) { + require(to != address(linkToken), "Cannot call to PLI"); + _; + } + + /** + * @dev Reverts if the target address is owned by the operator + */ + modifier validateCallbackAddress(address callbackAddress) { + require(!s_owned[callbackAddress], "Cannot call owned contract"); + _; + } +} diff --git a/contracts/src/v0.7/OperatorFactory.sol b/contracts/src/v0.7/OperatorFactory.sol new file mode 100644 index 00000000..663bb440 --- /dev/null +++ b/contracts/src/v0.7/OperatorFactory.sol @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./Operator.sol"; +import "./AuthorizedForwarder.sol"; + +/** + * @title Operator Factory + * @notice Creates Operator contracts for node operators + */ +contract OperatorFactory { + address public immutable getPluginToken; + mapping(address => bool) private s_created; + + event OperatorCreated(address indexed operator, address indexed owner, address indexed sender); + event AuthorizedForwarderCreated(address indexed forwarder, address indexed owner, address indexed sender); + + /** + * @param linkAddress address + */ + constructor(address linkAddress) { + getPluginToken = linkAddress; + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual returns (string memory) { + return "OperatorFactory 1.0.0"; + } + + /** + * @notice creates a new Operator contract with the msg.sender as owner + */ + function deployNewOperator() external returns (address) { + Operator operator = new Operator(getPluginToken, msg.sender); + + s_created[address(operator)] = true; + emit OperatorCreated(address(operator), msg.sender, msg.sender); + + return address(operator); + } + + /** + * @notice creates a new Operator contract with the msg.sender as owner and a + * new Operator Forwarder with the Operator as the owner + */ + function deployNewOperatorAndForwarder() external returns (address, address) { + Operator operator = new Operator(getPluginToken, msg.sender); + s_created[address(operator)] = true; + emit OperatorCreated(address(operator), msg.sender, msg.sender); + + bytes memory tmp = new bytes(0); + AuthorizedForwarder forwarder = new AuthorizedForwarder(getPluginToken, address(this), address(operator), tmp); + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), address(this), msg.sender); + + return (address(operator), address(forwarder)); + } + + /** + * @notice creates a new Forwarder contract with the msg.sender as owner + */ + function deployNewForwarder() external returns (address) { + bytes memory tmp = new bytes(0); + AuthorizedForwarder forwarder = new AuthorizedForwarder(getPluginToken, msg.sender, address(0), tmp); + + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), msg.sender, msg.sender); + + return address(forwarder); + } + + /** + * @notice creates a new Forwarder contract with the msg.sender as owner + */ + function deployNewForwarderAndTransferOwnership(address to, bytes calldata message) external returns (address) { + AuthorizedForwarder forwarder = new AuthorizedForwarder(getPluginToken, msg.sender, to, message); + + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), msg.sender, msg.sender); + + return address(forwarder); + } + + /** + * @notice indicates whether this factory deployed an address + */ + function created(address query) external view returns (bool) { + return s_created[query]; + } +} diff --git a/contracts/src/v0.7/Plugin.sol b/contracts/src/v0.7/Plugin.sol new file mode 100644 index 00000000..8b7119fe --- /dev/null +++ b/contracts/src/v0.7/Plugin.sol @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import {CBORPlugin} from "./vendor/CBORPlugin.sol"; +import {BufferPlugin} from "./vendor/BufferPlugin.sol"; + +/** + * @title Library for common Plugin functions + * @dev Uses imported CBOR library for encoding to buffer + */ +library Plugin { + uint256 internal constant defaultBufferSize = 256; // solhint-disable-line const-name-snakecase + + using CBORPlugin for BufferPlugin.buffer; + + struct Request { + bytes32 id; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + BufferPlugin.buffer buf; + } + + /** + * @notice Initializes a Plugin request + * @dev Sets the ID, callback address, and callback function signature on the request + * @param self The uninitialized request + * @param jobId The Job Specification ID + * @param callbackAddr The callback address + * @param callbackFunc The callback function signature + * @return The initialized request + */ + function initialize( + Request memory self, + bytes32 jobId, + address callbackAddr, + bytes4 callbackFunc + ) internal pure returns (Plugin.Request memory) { + BufferPlugin.init(self.buf, defaultBufferSize); + self.id = jobId; + self.callbackAddress = callbackAddr; + self.callbackFunctionId = callbackFunc; + return self; + } + + /** + * @notice Sets the data for the buffer without encoding CBOR on-chain + * @dev CBOR can be closed with curly-brackets {} or they can be left off + * @param self The initialized request + * @param data The CBOR data + */ + function setBuffer(Request memory self, bytes memory data) internal pure { + BufferPlugin.init(self.buf, data.length); + BufferPlugin.append(self.buf, data); + } + + /** + * @notice Adds a string value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The string value to add + */ + function add( + Request memory self, + string memory key, + string memory value + ) internal pure { + self.buf.encodeString(key); + self.buf.encodeString(value); + } + + /** + * @notice Adds a bytes value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The bytes value to add + */ + function addBytes( + Request memory self, + string memory key, + bytes memory value + ) internal pure { + self.buf.encodeString(key); + self.buf.encodeBytes(value); + } + + /** + * @notice Adds a int256 value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The int256 value to add + */ + function addInt( + Request memory self, + string memory key, + int256 value + ) internal pure { + self.buf.encodeString(key); + self.buf.encodeInt(value); + } + + /** + * @notice Adds a uint256 value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The uint256 value to add + */ + function addUint( + Request memory self, + string memory key, + uint256 value + ) internal pure { + self.buf.encodeString(key); + self.buf.encodeUInt(value); + } + + /** + * @notice Adds an array of strings to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param values The array of string values to add + */ + function addStringArray( + Request memory self, + string memory key, + string[] memory values + ) internal pure { + self.buf.encodeString(key); + self.buf.startArray(); + for (uint256 i = 0; i < values.length; i++) { + self.buf.encodeString(values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.7/PluginClient.sol b/contracts/src/v0.7/PluginClient.sol new file mode 100644 index 00000000..a622c67e --- /dev/null +++ b/contracts/src/v0.7/PluginClient.sol @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./Plugin.sol"; +import "./interfaces/ENSInterface.sol"; +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/PluginRequestInterface.sol"; +import "./interfaces/OperatorInterface.sol"; +import "./interfaces/PointerInterface.sol"; +import {ENSResolver as ENSResolver_Plugin} from "./vendor/ENSResolver.sol"; + +/** + * @title The PluginClient contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network + */ +abstract contract PluginClient { + using Plugin for Plugin.Request; + + uint256 internal constant PLI_DIVISIBILITY = 10**18; + uint256 private constant AMOUNT_OVERRIDE = 0; + address private constant SENDER_OVERRIDE = address(0); + uint256 private constant ORACLE_ARGS_VERSION = 1; + uint256 private constant OPERATOR_ARGS_VERSION = 2; + bytes32 private constant ENS_TOKEN_SUBNAME = keccak256("link"); + bytes32 private constant ENS_ORACLE_SUBNAME = keccak256("oracle"); + address private constant PLI_TOKEN_POINTER = 0xC89bD4E1632D3A43CB03AAAd5262cbe4038Bc571; + + ENSInterface private s_ens; + bytes32 private s_ensNode; + LinkTokenInterface private s_link; + OperatorInterface private s_oracle; + uint256 private s_requestCount = 1; + mapping(bytes32 => address) private s_pendingRequests; + + event PluginRequested(bytes32 indexed id); + event PluginFulfilled(bytes32 indexed id); + event PluginCancelled(bytes32 indexed id); + + /** + * @notice Creates a request that can hold additional parameters + * @param specId The Job Specification ID that the request will be created for + * @param callbackAddr address to operate the callback on + * @param callbackFunctionSignature function signature to use for the callback + * @return A Plugin Request struct in memory + */ + function buildPluginRequest( + bytes32 specId, + address callbackAddr, + bytes4 callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + Plugin.Request memory req; + return req.initialize(specId, callbackAddr, callbackFunctionSignature); + } + + /** + * @notice Creates a request that can hold additional parameters + * @param specId The Job Specification ID that the request will be created for + * @param callbackFunctionSignature function signature to use for the callback + * @return A Plugin Request struct in memory + */ + function buildOperatorRequest(bytes32 specId, bytes4 callbackFunctionSignature) + internal + view + returns (Plugin.Request memory) + { + Plugin.Request memory req; + return req.initialize(specId, address(this), callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `pluginRequestTo` with the stored oracle address + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendPluginRequest(Plugin.Request memory req, uint256 payment) internal returns (bytes32) { + return sendPluginRequestTo(address(s_oracle), req, payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param oracleAddress The address of the oracle for the request + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendPluginRequestTo( + address oracleAddress, + Plugin.Request memory req, + uint256 payment + ) internal returns (bytes32 requestId) { + uint256 nonce = s_requestCount; + s_requestCount = nonce + 1; + bytes memory encodedRequest = abi.encodeWithSelector( + PluginRequestInterface.oracleRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + req.id, + address(this), + req.callbackFunctionId, + nonce, + ORACLE_ARGS_VERSION, + req.buf.buf + ); + return _rawRequest(oracleAddress, nonce, payment, encodedRequest); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev This function supports multi-word response + * @dev Calls `sendOperatorRequestTo` with the stored oracle address + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendOperatorRequest(Plugin.Request memory req, uint256 payment) internal returns (bytes32) { + return sendOperatorRequestTo(address(s_oracle), req, payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev This function supports multi-word response + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param oracleAddress The address of the oracle for the request + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function sendOperatorRequestTo( + address oracleAddress, + Plugin.Request memory req, + uint256 payment + ) internal returns (bytes32 requestId) { + uint256 nonce = s_requestCount; + s_requestCount = nonce + 1; + bytes memory encodedRequest = abi.encodeWithSelector( + OperatorInterface.operatorRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + req.id, + req.callbackFunctionId, + nonce, + OPERATOR_ARGS_VERSION, + req.buf.buf + ); + return _rawRequest(oracleAddress, nonce, payment, encodedRequest); + } + + /** + * @notice Make a request to an oracle + * @param oracleAddress The address of the oracle for the request + * @param nonce used to generate the request ID + * @param payment The amount of PLI to send for the request + * @param encodedRequest data encoded for request type specific format + * @return requestId The request ID + */ + function _rawRequest( + address oracleAddress, + uint256 nonce, + uint256 payment, + bytes memory encodedRequest + ) private returns (bytes32 requestId) { + requestId = keccak256(abi.encodePacked(this, nonce)); + s_pendingRequests[requestId] = oracleAddress; + emit PluginRequested(requestId); + require(s_link.transferAndCall(oracleAddress, payment, encodedRequest), "unable to transferAndCall to oracle"); + } + + /** + * @notice Allows a request to be cancelled if it has not been fulfilled + * @dev Requires keeping track of the expiration value emitted from the oracle contract. + * Deletes the request from the `pendingRequests` mapping. + * Emits PluginCancelled event. + * @param requestId The request ID + * @param payment The amount of PLI sent for the request + * @param callbackFunc The callback function specified for the request + * @param expiration The time of the expiration for the request + */ + function cancelPluginRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) internal { + OperatorInterface requested = OperatorInterface(s_pendingRequests[requestId]); + delete s_pendingRequests[requestId]; + emit PluginCancelled(requestId); + requested.cancelOracleRequest(requestId, payment, callbackFunc, expiration); + } + + /** + * @notice the next request count to be used in generating a nonce + * @dev starts at 1 in order to ensure consistent gas cost + * @return returns the next request count to be used in a nonce + */ + function getNextRequestCount() internal view returns (uint256) { + return s_requestCount; + } + + /** + * @notice Sets the stored oracle address + * @param oracleAddress The address of the oracle contract + */ + function setPluginOracle(address oracleAddress) internal { + s_oracle = OperatorInterface(oracleAddress); + } + + /** + * @notice Sets the PLI token address + * @param linkAddress The address of the PLI token contract + */ + function setPluginToken(address linkAddress) internal { + s_link = LinkTokenInterface(linkAddress); + } + + /** + * @notice Sets the Plugin token address for the public + * network as given by the Pointer contract + */ + function setPublicPluginToken() internal { + setPluginToken(PointerInterface(PLI_TOKEN_POINTER).getAddress()); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function pluginTokenAddress() internal view returns (address) { + return address(s_link); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function pluginOracleAddress() internal view returns (address) { + return address(s_oracle); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param oracleAddress The address of the oracle contract that will fulfill the request + * @param requestId The request ID used for the response + */ + function addPluginExternalRequest(address oracleAddress, bytes32 requestId) internal notPendingRequest(requestId) { + s_pendingRequests[requestId] = oracleAddress; + } + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param ensAddress The address of the ENS contract + * @param node The ENS node hash + */ + function usePluginWithENS(address ensAddress, bytes32 node) internal { + s_ens = ENSInterface(ensAddress); + s_ensNode = node; + bytes32 linkSubnode = keccak256(abi.encodePacked(s_ensNode, ENS_TOKEN_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(s_ens.resolver(linkSubnode)); + setPluginToken(resolver.addr(linkSubnode)); + updatePluginOracleWithENS(); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `usePluginWithENS` has been called previously + */ + function updatePluginOracleWithENS() internal { + bytes32 oracleSubnode = keccak256(abi.encodePacked(s_ensNode, ENS_ORACLE_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(s_ens.resolver(oracleSubnode)); + setPluginOracle(resolver.addr(oracleSubnode)); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param requestId The request ID for fulfillment + */ + function validatePluginCallback(bytes32 requestId) + internal + recordPluginFulfillment(requestId) + // solhint-disable-next-line no-empty-blocks + { + + } + + /** + * @dev Reverts if the sender is not the oracle of the request. + * Emits PluginFulfilled event. + * @param requestId The request ID for fulfillment + */ + modifier recordPluginFulfillment(bytes32 requestId) { + require(msg.sender == s_pendingRequests[requestId], "Source must be the oracle of the request"); + delete s_pendingRequests[requestId]; + emit PluginFulfilled(requestId); + _; + } + + /** + * @dev Reverts if the request is already pending + * @param requestId The request ID for fulfillment + */ + modifier notPendingRequest(bytes32 requestId) { + require(s_pendingRequests[requestId] == address(0), "Request is already pending"); + _; + } +} diff --git a/contracts/src/v0.7/UpkeepRegistrationRequests.sol b/contracts/src/v0.7/UpkeepRegistrationRequests.sol new file mode 100644 index 00000000..c65ed6a1 --- /dev/null +++ b/contracts/src/v0.7/UpkeepRegistrationRequests.sol @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./interfaces/LinkTokenInterface.sol"; +import "./interfaces/KeeperRegistryInterface.sol"; +import "./interfaces/TypeAndVersionInterface.sol"; +import "./vendor/SafeMath96.sol"; +import "./ConfirmedOwner.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract UpkeepRegistrationRequests is TypeAndVersionInterface, ConfirmedOwner { + using SafeMath96 for uint96; + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + uint256 private s_minPLIJuels; + mapping(bytes32 => PendingRequest) private s_pendingRequests; + + LinkTokenInterface public immutable PLI; + + /** + * @notice versions: + * - UpkeepRegistration 1.0.0: initial release + */ + string public constant override typeAndVersion = "UpkeepRegistrationRequests 1.0.0"; + + struct AutoApprovedConfig { + bool enabled; + uint16 allowedPerWindow; + uint32 windowSizeInBlocks; + uint64 windowStart; + uint16 approvedInCurrentWindow; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + AutoApprovedConfig private s_config; + KeeperRegistryBaseInterface private s_keeperRegistry; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes checkData, + uint96 amount, + uint8 indexed source + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event ConfigChanged( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minPLIJuels + ); + + constructor(address PLIAddress, uint256 minimumPLIJuels) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(PLIAddress); + s_minPLIJuels = minimumPLIJuels; + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on PLI contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param amount quantity of PLI upkeep is funded with (specified in Juels) + * @param source application sending this request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + uint8 source + ) external onlyPLI { + require(adminAddress != address(0), "invalid admin address"); + bytes32 hash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + + emit RegistrationRequested( + hash, + name, + encryptedEmail, + upkeepContract, + gasLimit, + adminAddress, + checkData, + amount, + source + ); + + AutoApprovedConfig memory config = s_config; + if (config.enabled && _underApprovalLimit(config)) { + _incrementApprovedCount(config); + + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, amount, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance.add(amount); + s_pendingRequests[hash] = PendingRequest({admin: adminAddress, balance: newBalance}); + } + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + require(request.admin != address(0), "request not found"); + bytes32 expectedHash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + require(hash == expectedHash, "hash and payload do not match"); + delete s_pendingRequests[hash]; + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, request.balance, hash); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the msg.sender + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + require(msg.sender == request.admin || msg.sender == owner(), "only admin / owner can cancel"); + require(request.admin != address(0), "request not found"); + delete s_pendingRequests[hash]; + require(PLI.transfer(msg.sender, request.balance), "PLI token transfer failed"); + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set if registration requests should be sent directly to the Keeper Registry + * @param enabled setting for auto-approve registrations + * @param windowSizeInBlocks window size defined in number of blocks + * @param allowedPerWindow number of registrations that can be auto approved in above window + * @param keeperRegistry new keeper registry address + */ + function setRegistrationConfig( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minPLIJuels + ) external onlyOwner { + s_config = AutoApprovedConfig({ + enabled: enabled, + allowedPerWindow: allowedPerWindow, + windowSizeInBlocks: windowSizeInBlocks, + windowStart: 0, + approvedInCurrentWindow: 0 + }); + s_minPLIJuels = minPLIJuels; + s_keeperRegistry = KeeperRegistryBaseInterface(keeperRegistry); + + emit ConfigChanged(enabled, windowSizeInBlocks, allowedPerWindow, keeperRegistry, minPLIJuels); + } + + /** + * @notice read the current registration configuration + */ + function getRegistrationConfig() + external + view + returns ( + bool enabled, + uint32 windowSizeInBlocks, + uint16 allowedPerWindow, + address keeperRegistry, + uint256 minPLIJuels, + uint64 windowStart, + uint16 approvedInCurrentWindow + ) + { + AutoApprovedConfig memory config = s_config; + return ( + config.enabled, + config.windowSizeInBlocks, + config.allowedPerWindow, + address(s_keeperRegistry), + s_minPLIJuels, + config.windowStart, + config.approvedInCurrentWindow + ); + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param amount Amount of PLI sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address, /* sender */ + uint256 amount, + bytes calldata data + ) external onlyPLI permittedFunctionsForPLI(data) isActualAmount(amount, data) { + require(amount >= s_minPLIJuels, "Insufficient payment"); + (bool success, ) = address(this).delegatecall(data); + // calls register + require(success, "Unable to create request"); + } + + //PRIVATE + + /** + * @dev reset auto approve window if passed end of current window + */ + function _resetWindowIfRequired(AutoApprovedConfig memory config) private { + uint64 blocksPassed = uint64(block.number - config.windowStart); + if (blocksPassed >= config.windowSizeInBlocks) { + config.windowStart = uint64(block.number); + config.approvedInCurrentWindow = 0; + s_config = config; + } + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function _approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + bytes32 hash + ) private { + KeeperRegistryBaseInterface keeperRegistry = s_keeperRegistry; + + // register upkeep + uint256 upkeepId = keeperRegistry.registerUpkeep(upkeepContract, gasLimit, adminAddress, checkData); + // fund upkeep + bool success = PLI.transferAndCall(address(keeperRegistry), amount, abi.encode(upkeepId)); + require(success, "failed to fund upkeep"); + + emit RegistrationApproved(hash, name, upkeepId); + } + + /** + * @dev determine approval limits and check if in range + */ + function _underApprovalLimit(AutoApprovedConfig memory config) private returns (bool) { + _resetWindowIfRequired(config); + if (config.approvedInCurrentWindow < config.allowedPerWindow) { + return true; + } + return false; + } + + /** + * @dev record new latest approved count + */ + function _incrementApprovedCount(AutoApprovedConfig memory config) private { + config.approvedInCurrentWindow++; + s_config = config; + } + + //MODIFIERS + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) + } + require(funcSelector == REGISTER_REQUEST_SELECTOR, "Must use whitelisted functions"); + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes memory data) { + uint256 actual; + assembly { + actual := mload(add(data, 228)) + } + require(expected == actual, "Amount mismatch"); + _; + } +} diff --git a/contracts/src/v0.7/VRFConsumerBase.sol b/contracts/src/v0.7/VRFConsumerBase.sol new file mode 100644 index 00000000..ecf6d637 --- /dev/null +++ b/contracts/src/v0.7/VRFConsumerBase.sol @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./vendor/SafeMathPlugin.sol"; + +import "./interfaces/LinkTokenInterface.sol"; + +import "./VRFRequestIDBase.sol"; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBase, and can + * @dev initialize VRFConsumerBase's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumer { + * @dev constructor(, address _vrfCoordinator, address _link) + * @dev VRFConsumerBase(_vrfCoordinator, _link) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash), and have told you the minimum PLI + * @dev price for VRF service. Make sure your contract has sufficient PLI, and + * @dev call requestRandomness(keyHash, fee, seed), where seed is the input you + * @dev want to generate randomness from. + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomness method. + * + * @dev The randomness argument to fulfillRandomness is the actual random value + * @dev generated from your seed. + * + * @dev The requestId argument is generated from the keyHash and the seed by + * @dev makeRequestId(keyHash, seed). If your contract could have concurrent + * @dev requests open, you can use the requestId to track which seed is + * @dev associated with which randomness. See VRFRequestIDBase.sol for more + * @dev details. (See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously.) + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. (Which is critical to making unpredictable randomness! See the + * @dev next section.) + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBase.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the ultimate input to the VRF is mixed with the block hash of the + * @dev block in which the request is made, user-provided seeds have no impact + * @dev on its economic security properties. They are only included for API + * @dev compatability with previous versions of this contract. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. + */ +abstract contract VRFConsumerBase is VRFRequestIDBase { + using SafeMathPlugin for uint256; + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBase expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomness the VRF output + */ + function fulfillRandomness(bytes32 requestId, uint256 randomness) internal virtual; + + /** + * @dev In order to keep backwards compatibility we have kept the user + * seed field around. We remove the use of it because given that the blockhash + * enters later, it overrides whatever randomness the used seed provides. + * Given that it adds no security, and can easily lead to misunderstandings, + * we have removed it from usage and can now provide a simpler API. + */ + uint256 private constant USER_SEED_PLACEHOLDER = 0; + + /** + * @notice requestRandomness initiates a request for VRF output given _seed + * + * @dev The fulfillRandomness method receives the output, once it's provided + * @dev by the Oracle, and verified by the vrfCoordinator. + * + * @dev The _keyHash must already be registered with the VRFCoordinator, and + * @dev the _fee must exceed the fee specified during registration of the + * @dev _keyHash. + * + * @dev The _seed parameter is vestigial, and is kept only for API + * @dev compatibility with older versions. It can't *hurt* to mix in some of + * @dev your own randomness, here, but it's not necessary because the VRF + * @dev oracle will mix the hash of the block containing your request into the + * @dev VRF seed it ultimately uses. + * + * @param _keyHash ID of public key against which randomness is generated + * @param _fee The amount of PLI to send with the request + * + * @return requestId unique ID for this request + * + * @dev The returned requestId can be used to distinguish responses to + * @dev concurrent requests. It is passed as the first argument to + * @dev fulfillRandomness. + */ + function requestRandomness(bytes32 _keyHash, uint256 _fee) internal returns (bytes32 requestId) { + PLI.transferAndCall(vrfCoordinator, _fee, abi.encode(_keyHash, USER_SEED_PLACEHOLDER)); + // This is the seed passed to VRFCoordinator. The oracle will mix this with + // the hash of the block containing this request to obtain the seed/input + // which is finally passed to the VRF cryptographic machinery. + uint256 vRFSeed = makeVRFInputSeed(_keyHash, USER_SEED_PLACEHOLDER, address(this), nonces[_keyHash]); + // nonces[_keyHash] must stay in sync with + // VRFCoordinator.nonces[_keyHash][this], which was incremented by the above + // successful PLI.transferAndCall (in VRFCoordinator.randomnessRequest). + // This provides protection against the user repeating their input seed, + // which would result in a predictable/duplicate output, if multiple such + // requests appeared in the same block. + nonces[_keyHash] = nonces[_keyHash].add(1); + return makeRequestId(_keyHash, vRFSeed); + } + + LinkTokenInterface internal immutable PLI; + address private immutable vrfCoordinator; + + // Nonces for each VRF key from which randomness has been requested. + // + // Must stay in sync with VRFCoordinator[_keyHash][this] + mapping(bytes32 => uint256) /* keyHash */ /* nonce */ + private nonces; + + /** + * @param _vrfCoordinator address of VRFCoordinator contract + * @param _link address of PLI token contract + * + * @dev https://docs.chain.link/docs/link-token-contracts + */ + constructor(address _vrfCoordinator, address _link) { + vrfCoordinator = _vrfCoordinator; + PLI = LinkTokenInterface(_link); + } + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomness(bytes32 requestId, uint256 randomness) external { + require(msg.sender == vrfCoordinator, "Only VRFCoordinator can fulfill"); + fulfillRandomness(requestId, randomness); + } +} diff --git a/contracts/src/v0.7/VRFRequestIDBase.sol b/contracts/src/v0.7/VRFRequestIDBase.sol new file mode 100644 index 00000000..87bb6fe2 --- /dev/null +++ b/contracts/src/v0.7/VRFRequestIDBase.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +contract VRFRequestIDBase { + /** + * @notice returns the seed which is actually input to the VRF coordinator + * + * @dev To prevent repetition of VRF output due to repetition of the + * @dev user-supplied seed, that seed is combined in a hash with the + * @dev user-specific nonce, and the address of the consuming contract. The + * @dev risk of repetition is mostly mitigated by inclusion of a blockhash in + * @dev the final seed, but the nonce does protect against repetition in + * @dev requests which are included in a single block. + * + * @param _userSeed VRF seed input provided by user + * @param _requester Address of the requesting contract + * @param _nonce User-specific nonce at the time of the request + */ + function makeVRFInputSeed( + bytes32 _keyHash, + uint256 _userSeed, + address _requester, + uint256 _nonce + ) internal pure returns (uint256) { + return uint256(keccak256(abi.encode(_keyHash, _userSeed, _requester, _nonce))); + } + + /** + * @notice Returns the id for this request + * @param _keyHash The serviceAgreement ID to be used for this request + * @param _vRFInputSeed The seed to be passed directly to the VRF + * @return The id for this request + * + * @dev Note that _vRFInputSeed is not the seed passed by the consuming + * @dev contract, but the one generated by makeVRFInputSeed + */ + function makeRequestId(bytes32 _keyHash, uint256 _vRFInputSeed) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(_keyHash, _vRFInputSeed)); + } +} diff --git a/contracts/src/v0.7/dev/AggregatorProxy.sol b/contracts/src/v0.7/dev/AggregatorProxy.sol new file mode 100644 index 00000000..9f68faa1 --- /dev/null +++ b/contracts/src/v0.7/dev/AggregatorProxy.sol @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../ConfirmedOwner.sol"; +import "../interfaces/AggregatorProxyInterface.sol"; + +/** + * @title A trusted proxy for updating where current answers are read from + * @notice This contract provides a consistent address for the + * CurrentAnswerInterface but delegates where it reads from to the owner, who is + * trusted to update it. + */ +contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { + struct Phase { + uint16 id; + AggregatorProxyInterface aggregator; + } + AggregatorProxyInterface private s_proposedAggregator; + mapping(uint16 => AggregatorProxyInterface) private s_phaseAggregators; + Phase private s_currentPhase; + + uint256 private constant PHASE_OFFSET = 64; + uint256 private constant PHASE_SIZE = 16; + uint256 private constant MAX_ID = 2**(PHASE_OFFSET + PHASE_SIZE) - 1; + + event AggregatorProposed(address indexed current, address indexed proposed); + event AggregatorConfirmed(address indexed previous, address indexed latest); + + constructor(address aggregatorAddress) ConfirmedOwner(msg.sender) { + setAggregator(aggregatorAddress); + } + + /** + * @notice Reads the current answer from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestAnswer() public view virtual override returns (int256 answer) { + return s_currentPhase.aggregator.latestAnswer(); + } + + /** + * @notice Reads the last updated height from aggregator delegated to. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestTimestamp() public view virtual override returns (uint256 updatedAt) { + return s_currentPhase.aggregator.latestTimestamp(); + } + + /** + * @notice get past rounds answers + * @param roundId the answer number to retrieve the answer for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getAnswer(uint256 roundId) public view virtual override returns (int256 answer) { + if (roundId > MAX_ID) return 0; + + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(roundId); + AggregatorProxyInterface aggregator = s_phaseAggregators[phaseId]; + if (address(aggregator) == address(0)) return 0; + + return aggregator.getAnswer(aggregatorRoundId); + } + + /** + * @notice get block timestamp when an answer was last updated + * @param roundId the answer number to retrieve the updated timestamp for + * + * @dev #[deprecated] Use getRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended getRoundData + * instead which includes better verification information. + */ + function getTimestamp(uint256 roundId) public view virtual override returns (uint256 updatedAt) { + if (roundId > MAX_ID) return 0; + + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(roundId); + AggregatorProxyInterface aggregator = s_phaseAggregators[phaseId]; + if (address(aggregator) == address(0)) return 0; + + return aggregator.getTimestamp(aggregatorRoundId); + } + + /** + * @notice get the latest completed round where the answer was updated. This + * ID includes the proxy's phase, to make sure round IDs increase even when + * switching to a newly deployed aggregator. + * + * @dev #[deprecated] Use latestRoundData instead. This does not error if no + * answer has been reached, it will simply return 0. Either wait to point to + * an already answered Aggregator or use the recommended latestRoundData + * instead which includes better verification information. + */ + function latestRound() public view virtual override returns (uint256 roundId) { + Phase memory phase = s_currentPhase; // cache storage reads + return addPhase(phase.id, uint64(phase.aggregator.latestRound())); + } + + /** + * @notice get data about a round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @param roundId the requested round ID as presented through the proxy, this + * is made up of the aggregator's round ID with the phase ID encoded in the + * two highest order bytes + * @return id is the round ID from the aggregator for which the data was + * retrieved combined with an phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function getRoundData(uint80 roundId) + public + view + virtual + override + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(roundId); + + (id, answer, startedAt, updatedAt, answeredInRound) = s_phaseAggregators[phaseId].getRoundData(aggregatorRoundId); + + return addPhaseIds(id, answer, startedAt, updatedAt, answeredInRound, phaseId); + } + + /** + * @notice get data about the latest round. Consumers are encouraged to check + * that they're receiving fresh data by inspecting the updatedAt and + * answeredInRound return values. + * Note that different underlying implementations of AggregatorV3Interface + * have slightly different semantics for some of the return values. Consumers + * should determine what implementations they expect to receive + * data from and validate that they can properly handle return data from all + * of them. + * @return id is the round ID from the aggregator for which the data was + * retrieved combined with an phase to ensure that round IDs get larger as + * time moves forward. + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @dev Note that answer and updatedAt may change between queries. + */ + function latestRoundData() + public + view + virtual + override + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + Phase memory current = s_currentPhase; // cache storage reads + + (id, answer, startedAt, updatedAt, answeredInRound) = current.aggregator.latestRoundData(); + + return addPhaseIds(id, answer, startedAt, updatedAt, answeredInRound, current.id); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @param roundId the round ID to retrieve the round data for + * @return id is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedGetRoundData(uint80 roundId) + external + view + virtual + override + hasProposal + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return s_proposedAggregator.getRoundData(roundId); + } + + /** + * @notice Used if an aggregator contract has been proposed. + * @return id is the round ID for which data was retrieved + * @return answer is the answer for the given round + * @return startedAt is the timestamp when the round was started. + * (Only some AggregatorV3Interface implementations return meaningful values) + * @return updatedAt is the timestamp when the round last was updated (i.e. + * answer was last computed) + * @return answeredInRound is the round ID of the round in which the answer + * was computed. + */ + function proposedLatestRoundData() + external + view + virtual + override + hasProposal + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return s_proposedAggregator.latestRoundData(); + } + + /** + * @notice returns the current phase's aggregator address. + */ + function aggregator() external view override returns (address) { + return address(s_currentPhase.aggregator); + } + + /** + * @notice returns the current phase's ID. + */ + function phaseId() external view override returns (uint16) { + return s_currentPhase.id; + } + + /** + * @notice represents the number of decimals the aggregator responses represent. + */ + function decimals() external view override returns (uint8) { + return s_currentPhase.aggregator.decimals(); + } + + /** + * @notice the version number representing the type of aggregator the proxy + * points to. + */ + function version() external view override returns (uint256) { + return s_currentPhase.aggregator.version(); + } + + /** + * @notice returns the description of the aggregator the proxy points to. + */ + function description() external view override returns (string memory) { + return s_currentPhase.aggregator.description(); + } + + /** + * @notice returns the current proposed aggregator + */ + function proposedAggregator() external view override returns (address) { + return address(s_proposedAggregator); + } + + /** + * @notice return a phase aggregator using the phaseId + * + * @param phaseId uint16 + */ + function phaseAggregators(uint16 phaseId) external view override returns (address) { + return address(s_phaseAggregators[phaseId]); + } + + /** + * @notice Allows the owner to propose a new address for the aggregator + * @param aggregatorAddress The new address for the aggregator contract + */ + function proposeAggregator(address aggregatorAddress) external onlyOwner { + s_proposedAggregator = AggregatorProxyInterface(aggregatorAddress); + emit AggregatorProposed(address(s_currentPhase.aggregator), aggregatorAddress); + } + + /** + * @notice Allows the owner to confirm and change the address + * to the proposed aggregator + * @dev Reverts if the given address doesn't match what was previously + * proposed + * @param aggregatorAddress The new address for the aggregator contract + */ + function confirmAggregator(address aggregatorAddress) external onlyOwner { + require(aggregatorAddress == address(s_proposedAggregator), "Invalid proposed aggregator"); + address previousAggregator = address(s_currentPhase.aggregator); + delete s_proposedAggregator; + setAggregator(aggregatorAddress); + emit AggregatorConfirmed(previousAggregator, aggregatorAddress); + } + + /* + * Internal + */ + + function setAggregator(address aggregatorAddress) internal { + uint16 id = s_currentPhase.id + 1; + s_currentPhase = Phase(id, AggregatorProxyInterface(aggregatorAddress)); + s_phaseAggregators[id] = AggregatorProxyInterface(aggregatorAddress); + } + + function addPhase(uint16 phase, uint64 originalId) internal pure returns (uint80) { + return uint80((uint256(phase) << PHASE_OFFSET) | originalId); + } + + function parseIds(uint256 roundId) internal pure returns (uint16, uint64) { + uint16 phaseId = uint16(roundId >> PHASE_OFFSET); + uint64 aggregatorRoundId = uint64(roundId); + + return (phaseId, aggregatorRoundId); + } + + function addPhaseIds( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound, + uint16 phaseId + ) + internal + pure + returns ( + uint80, + int256, + uint256, + uint256, + uint80 + ) + { + return ( + addPhase(phaseId, uint64(roundId)), + answer, + startedAt, + updatedAt, + addPhase(phaseId, uint64(answeredInRound)) + ); + } + + /* + * Modifiers + */ + + modifier hasProposal() { + require(address(s_proposedAggregator) != address(0), "No proposed aggregator present"); + _; + } +} diff --git a/contracts/src/v0.7/dev/CompoundPriceFlaggingValidator.sol b/contracts/src/v0.7/dev/CompoundPriceFlaggingValidator.sol new file mode 100644 index 00000000..8e06d2c6 --- /dev/null +++ b/contracts/src/v0.7/dev/CompoundPriceFlaggingValidator.sol @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../ConfirmedOwner.sol"; +import "../vendor/SafeMathPlugin.sol"; +import "../interfaces/FlagsInterface.sol"; +import "../interfaces/AggregatorV3Interface.sol"; +import "../interfaces/UniswapAnchoredView.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; + +/** + * @notice This validator compares the price of Plugin aggregators against + * their equivalent Compound Open Oracle feeds. For each aggregator, a Compound + * feed is configured with its symbol, number of decimals, and deviation threshold. + * An aggregator address is flagged when its corresponding Compound feed price deviates + * by more than the configured threshold from the aggregator price. + */ +contract CompoundPriceFlaggingValidator is ConfirmedOwner, KeeperCompatibleInterface { + using SafeMathPlugin for uint256; + + struct CompoundFeedDetails { + // Used to call the Compound Open Oracle + string symbol; + // Used to convert price to match aggregator decimals + uint8 decimals; + // The numerator used to determine the threshold percentage + // as parts per billion. + // 1,000,000,000 = 100% + // 500,000,000 = 50% + // 100,000,000 = 10% + // 50,000,000 = 5% + // 10,000,000 = 1% + // 2,000,000 = 0.2% + // etc + uint32 deviationThresholdNumerator; + } + + uint256 private constant BILLION = 1_000_000_000; + + FlagsInterface private s_flags; + UniswapAnchoredView private s_compOpenOracle; + mapping(address => CompoundFeedDetails) private s_feedDetails; + + event CompoundOpenOracleAddressUpdated(address indexed from, address indexed to); + event FlagsAddressUpdated(address indexed from, address indexed to); + event FeedDetailsSet(address indexed aggregator, string symbol, uint8 decimals, uint32 deviationThresholdNumerator); + + /** + * @notice Create a new CompoundPriceFlaggingValidator + * @dev Use this contract to compare Plugin aggregator prices + * against the Compound Open Oracle prices + * @param flagsAddress Address of the flag contract + * @param compoundOracleAddress Address of the Compound Open Oracle UniswapAnchoredView contract + */ + constructor(address flagsAddress, address compoundOracleAddress) ConfirmedOwner(msg.sender) { + setFlagsAddress(flagsAddress); + setCompoundOpenOracleAddress(compoundOracleAddress); + } + + /** + * @notice Set the address of the Compound Open Oracle UniswapAnchoredView contract + * @param oracleAddress Compound Open Oracle UniswapAnchoredView address + */ + function setCompoundOpenOracleAddress(address oracleAddress) public onlyOwner { + address previous = address(s_compOpenOracle); + if (previous != oracleAddress) { + s_compOpenOracle = UniswapAnchoredView(oracleAddress); + emit CompoundOpenOracleAddressUpdated(previous, oracleAddress); + } + } + + /** + * @notice Updates the flagging contract address for raising flags + * @param flagsAddress sets the address of the flags contract + */ + function setFlagsAddress(address flagsAddress) public onlyOwner { + address previous = address(s_flags); + if (previous != flagsAddress) { + s_flags = FlagsInterface(flagsAddress); + emit FlagsAddressUpdated(previous, flagsAddress); + } + } + + /** + * @notice Set the threshold details for comparing a Plugin aggregator + * to a Compound Open Oracle feed. + * @param aggregator The Plugin aggregator address + * @param compoundSymbol The symbol used by Compound for this feed + * @param compoundDecimals The number of decimals in the Compound feed + * @param compoundDeviationThresholdNumerator The threshold numerator use to determine + * the percentage with which the difference in prices must reside within. Parts per billion. + * For example: + * If prices are valid within a 5% threshold, assuming x is the compoundDeviationThresholdNumerator: + * x / 1,000,000,000 = 0.05 + * x = 50,000,000 + */ + function setFeedDetails( + address aggregator, + string calldata compoundSymbol, + uint8 compoundDecimals, + uint32 compoundDeviationThresholdNumerator + ) public onlyOwner { + require( + compoundDeviationThresholdNumerator > 0 && compoundDeviationThresholdNumerator <= BILLION, + "Invalid threshold numerator" + ); + require(_compoundPriceOf(compoundSymbol) != 0, "Invalid Compound price"); + string memory currentSymbol = s_feedDetails[aggregator].symbol; + // If symbol is not set, use the new one + if (bytes(currentSymbol).length == 0) { + s_feedDetails[aggregator] = CompoundFeedDetails({ + symbol: compoundSymbol, + decimals: compoundDecimals, + deviationThresholdNumerator: compoundDeviationThresholdNumerator + }); + emit FeedDetailsSet(aggregator, compoundSymbol, compoundDecimals, compoundDeviationThresholdNumerator); + } + // If the symbol is already set, don't change it + else { + s_feedDetails[aggregator] = CompoundFeedDetails({ + symbol: currentSymbol, + decimals: compoundDecimals, + deviationThresholdNumerator: compoundDeviationThresholdNumerator + }); + emit FeedDetailsSet(aggregator, currentSymbol, compoundDecimals, compoundDeviationThresholdNumerator); + } + } + + /** + * @notice Check the price deviation of an array of aggregators + * @dev If any of the aggregators provided have an equivalent Compound Oracle feed + * that with a price outside of the configured deviation, this function will return them. + * @param aggregators address[] memory + * @return address[] invalid feeds + */ + function check(address[] memory aggregators) public view returns (address[] memory) { + address[] memory invalidAggregators = new address[](aggregators.length); + uint256 invalidCount = 0; + for (uint256 i = 0; i < aggregators.length; i++) { + address aggregator = aggregators[i]; + if (_isInvalid(aggregator)) { + invalidAggregators[invalidCount] = aggregator; + invalidCount++; + } + } + + if (aggregators.length != invalidCount) { + assembly { + mstore(invalidAggregators, invalidCount) + } + } + return invalidAggregators; + } + + /** + * @notice Check and raise flags for any aggregator that has an equivalent Compound + * Open Oracle feed with a price deviation exceeding the configured setting. + * @dev This contract must have write permissions on the Flags contract + * @param aggregators address[] memory + * @return address[] memory invalid aggregators + */ + function update(address[] memory aggregators) public returns (address[] memory) { + address[] memory invalidAggregators = check(aggregators); + s_flags.raiseFlags(invalidAggregators); + return invalidAggregators; + } + + /** + * @notice Check the price deviation of an array of aggregators + * @dev If any of the aggregators provided have an equivalent Compound Oracle feed + * that with a price outside of the configured deviation, this function will return them. + * @param data bytes encoded address array + * @return needsUpkeep bool indicating whether upkeep needs to be performed + * @return invalid aggregators - bytes encoded address array of invalid aggregator addresses + */ + function checkUpkeep(bytes calldata data) external view override returns (bool, bytes memory) { + address[] memory invalidAggregators = check(abi.decode(data, (address[]))); + bool needsUpkeep = (invalidAggregators.length > 0); + return (needsUpkeep, abi.encode(invalidAggregators)); + } + + /** + * @notice Check and raise flags for any aggregator that has an equivalent Compound + * Open Oracle feed with a price deviation exceeding the configured setting. + * @dev This contract must have write permissions on the Flags contract + * @param data bytes encoded address array + */ + function performUpkeep(bytes calldata data) external override { + update(abi.decode(data, (address[]))); + } + + /** + * @notice Get the threshold of an aggregator + * @param aggregator address + * @return string Compound Oracle Symbol + * @return uint8 Compound Oracle Decimals + * @return uint32 Deviation Threshold Numerator + */ + function getFeedDetails(address aggregator) + public + view + returns ( + string memory, + uint8, + uint32 + ) + { + CompoundFeedDetails memory compDetails = s_feedDetails[aggregator]; + return (compDetails.symbol, compDetails.decimals, compDetails.deviationThresholdNumerator); + } + + /** + * @notice Get the flags address + * @return address + */ + function flags() external view returns (address) { + return address(s_flags); + } + + /** + * @notice Get the Compound Open Oracle address + * @return address + */ + function compoundOpenOracle() external view returns (address) { + return address(s_compOpenOracle); + } + + /** + * @notice Return the Compound oracle price of an asset using its symbol + * @param symbol string + * @return price uint256 + */ + function _compoundPriceOf(string memory symbol) private view returns (uint256) { + return s_compOpenOracle.price(symbol); + } + + // VALIDATION FUNCTIONS + + /** + * @notice Check if an aggregator has an equivalent Compound Oracle feed + * that's price is deviated more than the threshold. + * @param aggregator address of the Plugin aggregator + * @return invalid bool. True if the deviation exceeds threshold. + */ + function _isInvalid(address aggregator) private view returns (bool invalid) { + CompoundFeedDetails memory compDetails = s_feedDetails[aggregator]; + if (compDetails.deviationThresholdNumerator == 0) { + return false; + } + // Get both oracle price details + uint256 compPrice = _compoundPriceOf(compDetails.symbol); + (uint256 aggregatorPrice, uint8 aggregatorDecimals) = _aggregatorValues(aggregator); + + // Adjust the prices so the number of decimals in each align + (aggregatorPrice, compPrice) = _adjustPriceDecimals( + aggregatorPrice, + aggregatorDecimals, + compPrice, + compDetails.decimals + ); + + // Check whether the prices deviate beyond the threshold. + return _deviatesBeyondThreshold(aggregatorPrice, compPrice, compDetails.deviationThresholdNumerator); + } + + /** + * @notice Retrieve the price and the decimals from an Aggregator + * @param aggregator address + * @return price uint256 + * @return decimals uint8 + */ + function _aggregatorValues(address aggregator) private view returns (uint256 price, uint8 decimals) { + AggregatorV3Interface priceFeed = AggregatorV3Interface(aggregator); + (, int256 signedPrice, , , ) = priceFeed.latestRoundData(); + price = uint256(signedPrice); + decimals = priceFeed.decimals(); + } + + /** + * @notice Adjust the price values of the Aggregator and Compound feeds so that + * their decimal places align. This enables deviation to be calculated. + * @param aggregatorPrice uint256 + * @param aggregatorDecimals uint8 - decimal places included in the aggregator price + * @param compoundPrice uint256 + * @param compoundDecimals uint8 - decimal places included in the compound price + * @return adjustedAggregatorPrice uint256 + * @return adjustedCompoundPrice uint256 + */ + function _adjustPriceDecimals( + uint256 aggregatorPrice, + uint8 aggregatorDecimals, + uint256 compoundPrice, + uint8 compoundDecimals + ) private pure returns (uint256 adjustedAggregatorPrice, uint256 adjustedCompoundPrice) { + if (aggregatorDecimals > compoundDecimals) { + uint8 diff = aggregatorDecimals - compoundDecimals; + uint256 multiplier = 10**uint256(diff); + compoundPrice = compoundPrice * multiplier; + } else if (aggregatorDecimals < compoundDecimals) { + uint8 diff = compoundDecimals - aggregatorDecimals; + uint256 multiplier = 10**uint256(diff); + aggregatorPrice = aggregatorPrice * multiplier; + } + adjustedAggregatorPrice = aggregatorPrice; + adjustedCompoundPrice = compoundPrice; + } + + /** + * @notice Check whether the compound price deviates from the aggregator price + * beyond the given threshold + * @dev Prices must be adjusted to match decimals prior to calling this function + * @param aggregatorPrice uint256 + * @param compPrice uint256 + * @param deviationThresholdNumerator uint32 + * @return beyondThreshold boolean. Returns true if deviation is beyond threshold. + */ + function _deviatesBeyondThreshold( + uint256 aggregatorPrice, + uint256 compPrice, + uint32 deviationThresholdNumerator + ) private pure returns (bool beyondThreshold) { + // Deviation amount threshold from the aggregator price + uint256 deviationAmountThreshold = aggregatorPrice.mul(deviationThresholdNumerator).div(BILLION); + + // Calculate deviation + uint256 deviation; + if (aggregatorPrice > compPrice) { + deviation = aggregatorPrice.sub(compPrice); + } else if (aggregatorPrice < compPrice) { + deviation = compPrice.sub(aggregatorPrice); + } + beyondThreshold = (deviation >= deviationAmountThreshold); + } +} diff --git a/contracts/src/v0.7/dev/StalenessFlaggingValidator.sol b/contracts/src/v0.7/dev/StalenessFlaggingValidator.sol new file mode 100644 index 00000000..86b2c107 --- /dev/null +++ b/contracts/src/v0.7/dev/StalenessFlaggingValidator.sol @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../ConfirmedOwner.sol"; +import "../vendor/SafeMathPlugin.sol"; +import "../interfaces/FlagsInterface.sol"; +import "../interfaces/AggregatorV3Interface.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; + +contract StalenessFlaggingValidator is ConfirmedOwner, KeeperCompatibleInterface { + using SafeMathPlugin for uint256; + + FlagsInterface private s_flags; + mapping(address => uint256) private s_thresholdSeconds; + + event FlagsAddressUpdated(address indexed previous, address indexed current); + event FlaggingThresholdUpdated(address indexed aggregator, uint256 indexed previous, uint256 indexed current); + + /** + * @notice Create a new StalenessFlaggingValidator + * @param flagsAddress Address of the flag contract + * @dev Ensure that this contract has sufficient write permissions + * on the flag contract + */ + constructor(address flagsAddress) ConfirmedOwner(msg.sender) { + setFlagsAddress(flagsAddress); + } + + /** + * @notice Updates the flagging contract address for raising flags + * @param flagsAddress sets the address of the flags contract + */ + function setFlagsAddress(address flagsAddress) public onlyOwner { + address previous = address(s_flags); + if (previous != flagsAddress) { + s_flags = FlagsInterface(flagsAddress); + emit FlagsAddressUpdated(previous, flagsAddress); + } + } + + /** + * @notice Set the threshold limits for each aggregator + * @dev parameters must be same length + * @param aggregators address[] memory + * @param flaggingThresholds uint256[] memory + */ + function setThresholds(address[] memory aggregators, uint256[] memory flaggingThresholds) public onlyOwner { + require(aggregators.length == flaggingThresholds.length, "Different sized arrays"); + for (uint256 i = 0; i < aggregators.length; i++) { + address aggregator = aggregators[i]; + uint256 previousThreshold = s_thresholdSeconds[aggregator]; + uint256 newThreshold = flaggingThresholds[i]; + if (previousThreshold != newThreshold) { + s_thresholdSeconds[aggregator] = newThreshold; + emit FlaggingThresholdUpdated(aggregator, previousThreshold, newThreshold); + } + } + } + + /** + * @notice Check for staleness in an array of aggregators + * @dev If any of the aggregators are stale, this function will return true, + * otherwise false + * @param aggregators address[] memory + * @return address[] memory stale aggregators + */ + function check(address[] memory aggregators) public view returns (address[] memory) { + uint256 currentTimestamp = block.timestamp; + address[] memory staleAggregators = new address[](aggregators.length); + uint256 staleCount = 0; + for (uint256 i = 0; i < aggregators.length; i++) { + address aggregator = aggregators[i]; + if (isStale(aggregator, currentTimestamp)) { + staleAggregators[staleCount] = aggregator; + staleCount++; + } + } + + if (aggregators.length != staleCount) { + assembly { + mstore(staleAggregators, staleCount) + } + } + return staleAggregators; + } + + /** + * @notice Check for staleness in an array of aggregators, raise a flag + * on the flags contract for each aggregator that is stale + * @dev This contract must have write permissions on the flags contract + * @param aggregators address[] memory + * @return address[] memory stale aggregators + */ + function update(address[] memory aggregators) public returns (address[] memory) { + address[] memory staleAggregators = check(aggregators); + s_flags.raiseFlags(staleAggregators); + return staleAggregators; + } + + /** + * @notice Check for staleness in an array of aggregators + * @dev Overriding KeeperInterface + * @param data bytes encoded address array + * @return needsUpkeep bool indicating whether upkeep needs to be performed + * @return staleAggregators bytes encoded address array of stale aggregator addresses + */ + function checkUpkeep(bytes calldata data) external view override returns (bool, bytes memory) { + address[] memory staleAggregators = check(abi.decode(data, (address[]))); + bool needsUpkeep = (staleAggregators.length > 0); + return (needsUpkeep, abi.encode(staleAggregators)); + } + + /** + * @notice Check for staleness in an array of aggregators, raise a flag + * on the flags contract for each aggregator that is stale + * @dev Overriding KeeperInterface + * @param data bytes encoded address array + */ + function performUpkeep(bytes calldata data) external override { + update(abi.decode(data, (address[]))); + } + + /** + * @notice Get the threshold of an aggregator + * @param aggregator address + * @return uint256 + */ + function threshold(address aggregator) external view returns (uint256) { + return s_thresholdSeconds[aggregator]; + } + + /** + * @notice Get the flags address + * @return address + */ + function flags() external view returns (address) { + return address(s_flags); + } + + /** + * @notice Check if an aggregator is stale. + * @dev Staleness is where an aggregator's `updatedAt` field is older + * than the threshold set for it in this contract + * @param aggregator address + * @param currentTimestamp uint256 + * @return stale bool + */ + function isStale(address aggregator, uint256 currentTimestamp) private view returns (bool stale) { + if (s_thresholdSeconds[aggregator] == 0) { + return false; + } + (, , , uint256 updatedAt, ) = AggregatorV3Interface(aggregator).latestRoundData(); + uint256 diff = currentTimestamp.sub(updatedAt); + if (diff > s_thresholdSeconds[aggregator]) { + return true; + } + return false; + } +} diff --git a/contracts/src/v0.7/interfaces/AggregatorInterface.sol b/contracts/src/v0.7/interfaces/AggregatorInterface.sol new file mode 100644 index 00000000..8eddc4a7 --- /dev/null +++ b/contracts/src/v0.7/interfaces/AggregatorInterface.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface AggregatorInterface { + function latestAnswer() external view returns (int256); + + function latestTimestamp() external view returns (uint256); + + function latestRound() external view returns (uint256); + + function getAnswer(uint256 roundId) external view returns (int256); + + function getTimestamp(uint256 roundId) external view returns (uint256); + + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); + + event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); +} diff --git a/contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol b/contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol new file mode 100644 index 00000000..a56f5eb2 --- /dev/null +++ b/contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./AggregatorV2V3Interface.sol"; + +interface AggregatorProxyInterface is AggregatorV2V3Interface { + function phaseAggregators(uint16 phaseId) external view returns (address); + + function phaseId() external view returns (uint16); + + function proposedAggregator() external view returns (address); + + function proposedGetRoundData(uint80 roundId) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function proposedLatestRoundData() + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function aggregator() external view returns (address); +} diff --git a/contracts/src/v0.7/interfaces/AggregatorV2V3Interface.sol b/contracts/src/v0.7/interfaces/AggregatorV2V3Interface.sol new file mode 100644 index 00000000..a0353687 --- /dev/null +++ b/contracts/src/v0.7/interfaces/AggregatorV2V3Interface.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./AggregatorInterface.sol"; +import "./AggregatorV3Interface.sol"; + +interface AggregatorV2V3Interface is AggregatorInterface, AggregatorV3Interface {} diff --git a/contracts/src/v0.7/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.7/interfaces/AggregatorV3Interface.sol new file mode 100644 index 00000000..06b7b1a6 --- /dev/null +++ b/contracts/src/v0.7/interfaces/AggregatorV3Interface.sol @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface AggregatorV3Interface { + function decimals() external view returns (uint8); + + function description() external view returns (string memory); + + function version() external view returns (uint256); + + // getRoundData and latestRoundData should both raise "No data present" + // if they do not have data to report, instead of returning unset values + // which could be misinterpreted as actual reported values. + function getRoundData(uint80 _roundId) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function latestRoundData() + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); +} diff --git a/contracts/src/v0.7/interfaces/AuthorizedReceiverInterface.sol b/contracts/src/v0.7/interfaces/AuthorizedReceiverInterface.sol new file mode 100644 index 00000000..aec2c033 --- /dev/null +++ b/contracts/src/v0.7/interfaces/AuthorizedReceiverInterface.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface AuthorizedReceiverInterface { + function isAuthorizedSender(address sender) external view returns (bool); + + function getAuthorizedSenders() external returns (address[] memory); + + function setAuthorizedSenders(address[] calldata senders) external; +} diff --git a/contracts/src/v0.7/interfaces/ENSInterface.sol b/contracts/src/v0.7/interfaces/ENSInterface.sol new file mode 100644 index 00000000..84fd654d --- /dev/null +++ b/contracts/src/v0.7/interfaces/ENSInterface.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface ENSInterface { + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + function setSubnodeOwner( + bytes32 node, + bytes32 label, + address owner + ) external; + + function setResolver(bytes32 node, address resolver) external; + + function setOwner(bytes32 node, address owner) external; + + function setTTL(bytes32 node, uint64 ttl) external; + + function owner(bytes32 node) external view returns (address); + + function resolver(bytes32 node) external view returns (address); + + function ttl(bytes32 node) external view returns (uint64); +} diff --git a/contracts/src/v0.7/interfaces/FeedRegistryInterface.sol b/contracts/src/v0.7/interfaces/FeedRegistryInterface.sol new file mode 100644 index 00000000..43f5fd7b --- /dev/null +++ b/contracts/src/v0.7/interfaces/FeedRegistryInterface.sol @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; +pragma abicoder v2; + +import "./AggregatorV2V3Interface.sol"; + +interface FeedRegistryInterface { + struct Phase { + uint16 phaseId; + uint80 startingAggregatorRoundId; + uint80 endingAggregatorRoundId; + } + + event FeedProposed( + address indexed asset, + address indexed denomination, + address indexed proposedAggregator, + address currentAggregator, + address sender + ); + event FeedConfirmed( + address indexed asset, + address indexed denomination, + address indexed latestAggregator, + address previousAggregator, + uint16 nextPhaseId, + address sender + ); + + // V3 AggregatorV3Interface + + function decimals(address base, address quote) external view returns (uint8); + + function description(address base, address quote) external view returns (string memory); + + function version(address base, address quote) external view returns (uint256); + + function latestRoundData(address base, address quote) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function getRoundData( + address base, + address quote, + uint80 _roundId + ) + external + view + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + // V2 AggregatorInterface + + function latestAnswer(address base, address quote) external view returns (int256 answer); + + function latestTimestamp(address base, address quote) external view returns (uint256 timestamp); + + function latestRound(address base, address quote) external view returns (uint256 roundId); + + function getAnswer( + address base, + address quote, + uint256 roundId + ) external view returns (int256 answer); + + function getTimestamp( + address base, + address quote, + uint256 roundId + ) external view returns (uint256 timestamp); + + // Registry getters + + function getFeed(address base, address quote) external view returns (AggregatorV2V3Interface aggregator); + + function getPhaseFeed( + address base, + address quote, + uint16 phaseId + ) external view returns (AggregatorV2V3Interface aggregator); + + function isFeedEnabled(address aggregator) external view returns (bool); + + function getPhase( + address base, + address quote, + uint16 phaseId + ) external view returns (Phase memory phase); + + // Round helpers + + function getRoundFeed( + address base, + address quote, + uint80 roundId + ) external view returns (AggregatorV2V3Interface aggregator); + + function getPhaseRange( + address base, + address quote, + uint16 phaseId + ) external view returns (uint80 startingRoundId, uint80 endingRoundId); + + function getPreviousRoundId( + address base, + address quote, + uint80 roundId + ) external view returns (uint80 previousRoundId); + + function getNextRoundId( + address base, + address quote, + uint80 roundId + ) external view returns (uint80 nextRoundId); + + // Feed management + + function proposeFeed( + address base, + address quote, + address aggregator + ) external; + + function confirmFeed( + address base, + address quote, + address aggregator + ) external; + + // Proposed aggregator + + function getProposedFeed(address base, address quote) + external + view + returns (AggregatorV2V3Interface proposedAggregator); + + function proposedGetRoundData( + address base, + address quote, + uint80 roundId + ) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function proposedLatestRoundData(address base, address quote) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + // Phases + function getCurrentPhaseId(address base, address quote) external view returns (uint16 currentPhaseId); +} diff --git a/contracts/src/v0.7/interfaces/FlagsInterface.sol b/contracts/src/v0.7/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..157d1ed4 --- /dev/null +++ b/contracts/src/v0.7/interfaces/FlagsInterface.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + + function getFlags(address[] calldata) external view returns (bool[] memory); + + function raiseFlag(address) external; + + function raiseFlags(address[] calldata) external; + + function lowerFlags(address[] calldata) external; + + function setRaisingAccessController(address) external; +} diff --git a/contracts/src/v0.7/interfaces/KeeperCompatibleInterface.sol b/contracts/src/v0.7/interfaces/KeeperCompatibleInterface.sol new file mode 100644 index 00000000..c14de02c --- /dev/null +++ b/contracts/src/v0.7/interfaces/KeeperCompatibleInterface.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.0; + +interface KeeperCompatibleInterface { + /** + * @notice method that is simulated by the keepers to see if any work actually + * needs to be performed. This method does does not actually need to be + * executable, and since it is only ever simulated it can consume lots of gas. + * @dev To ensure that it is never called, you may want to add the + * cannotExecute modifier from KeeperBase to your implementation of this + * method. + * @param checkData specified in the upkeep registration so it is always the + * same for a registered upkeep. This can easily be broken down into specific + * arguments using `abi.decode`, so multiple upkeeps can be registered on the + * same contract and easily differentiated by the contract. + * @return upkeepNeeded boolean to indicate whether the keeper should call + * performUpkeep or not. + * @return performData bytes that the keeper should call performUpkeep with, if + * upkeep is needed. If you would like to encode data to decode later, try + * `abi.encode`. + */ + function checkUpkeep(bytes calldata checkData) external returns (bool upkeepNeeded, bytes memory performData); + + /** + * @notice method that is actually executed by the keepers, via the registry. + * The data returned by the checkUpkeep simulation will be passed into + * this method to actually be executed. + * @dev The input to this method should not be trusted, and the caller of the + * method should not even be restricted to any single registry. Anyone should + * be able call it, and the input should be validated, there is no guarantee + * that the data passed in is the performData returned from checkUpkeep. This + * could happen due to malicious keepers, racing keepers, or simply a state + * change while the performUpkeep transaction is waiting for confirmation. + * Always validate the data passed in. + * @param performData is the data which was passed back from the checkData + * simulation. If it is encoded, it can easily be decoded into other types by + * calling `abi.decode`. This data should not be trusted, and should be + * validated against the contract's current state. + */ + function performUpkeep(bytes calldata performData) external; +} diff --git a/contracts/src/v0.7/interfaces/KeeperRegistryInterface.sol b/contracts/src/v0.7/interfaces/KeeperRegistryInterface.sol new file mode 100644 index 00000000..df5ea928 --- /dev/null +++ b/contracts/src/v0.7/interfaces/KeeperRegistryInterface.sol @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.0; + +interface KeeperRegistryBaseInterface { + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external returns (uint256 id); + + function performUpkeep(uint256 id, bytes calldata performData) external returns (bool success); + + function cancelUpkeep(uint256 id) external; + + function addFunds(uint256 id, uint96 amount) external; + + function getUpkeep(uint256 id) + external + view + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber + ); + + function getUpkeepCount() external view returns (uint256); + + function getCanceledUpkeepList() external view returns (uint256[] memory); + + function getKeeperList() external view returns (address[] memory); + + function getKeeperInfo(address query) + external + view + returns ( + address payee, + bool active, + uint96 balance + ); + + function getConfig() + external + view + returns ( + uint32 paymentPremiumPPB, + uint24 checkFrequencyBlocks, + uint32 checkGasLimit, + uint24 stalenessSeconds, + uint16 gasCeilingMultiplier, + uint256 fallbackGasPrice, + uint256 fallbackLinkPrice + ); +} + +/** + * @dev The view methods are not actually marked as view in the implementation + * but we want them to be easily queried off-chain. Solidity will not compile + * if we actually inherit from this interface, so we document it here. + */ +interface KeeperRegistryInterface is KeeperRegistryBaseInterface { + function checkUpkeep(uint256 upkeepId, address from) + external + view + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + int256 gasWei, + int256 linkEth + ); +} + +interface KeeperRegistryExecutableInterface is KeeperRegistryBaseInterface { + function checkUpkeep(uint256 upkeepId, address from) + external + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ); +} diff --git a/contracts/src/v0.7/interfaces/LinkTokenInterface.sol b/contracts/src/v0.7/interfaces/LinkTokenInterface.sol new file mode 100644 index 00000000..d9b59c7a --- /dev/null +++ b/contracts/src/v0.7/interfaces/LinkTokenInterface.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface LinkTokenInterface { + function allowance(address owner, address spender) external view returns (uint256 remaining); + + function approve(address spender, uint256 value) external returns (bool success); + + function balanceOf(address owner) external view returns (uint256 balance); + + function decimals() external view returns (uint8 decimalPlaces); + + function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); + + function increaseApproval(address spender, uint256 subtractedValue) external; + + function name() external view returns (string memory tokenName); + + function symbol() external view returns (string memory tokenSymbol); + + function totalSupply() external view returns (uint256 totalTokensIssued); + + function transfer(address to, uint256 value) external returns (bool success); + + function transferAndCall( + address to, + uint256 value, + bytes calldata data + ) external returns (bool success); + + function transferFrom( + address from, + address to, + uint256 value + ) external returns (bool success); +} diff --git a/contracts/src/v0.7/interfaces/OperatorInterface.sol b/contracts/src/v0.7/interfaces/OperatorInterface.sol new file mode 100644 index 00000000..4f2c7a00 --- /dev/null +++ b/contracts/src/v0.7/interfaces/OperatorInterface.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "./PluginRequestInterface.sol"; +import "./OracleInterface.sol"; + +interface OperatorInterface is PluginRequestInterface, OracleInterface { + function operatorRequest( + address sender, + uint256 payment, + bytes32 specId, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function fulfillOracleRequest2( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes calldata data + ) external returns (bool); + + function ownerTransferAndCall( + address to, + uint256 value, + bytes calldata data + ) external returns (bool success); +} diff --git a/contracts/src/v0.7/interfaces/OracleInterface.sol b/contracts/src/v0.7/interfaces/OracleInterface.sol new file mode 100644 index 00000000..bf54fc05 --- /dev/null +++ b/contracts/src/v0.7/interfaces/OracleInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface OracleInterface { + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) external returns (bool); + + function withdraw(address recipient, uint256 amount) external; + + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.7/interfaces/OwnableInterface.sol b/contracts/src/v0.7/interfaces/OwnableInterface.sol new file mode 100644 index 00000000..94900657 --- /dev/null +++ b/contracts/src/v0.7/interfaces/OwnableInterface.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface OwnableInterface { + function owner() external returns (address); + + function transferOwnership(address recipient) external; + + function acceptOwnership() external; +} diff --git a/contracts/src/v0.7/interfaces/PluginRequestInterface.sol b/contracts/src/v0.7/interfaces/PluginRequestInterface.sol new file mode 100644 index 00000000..1e6fc7cc --- /dev/null +++ b/contracts/src/v0.7/interfaces/PluginRequestInterface.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface PluginRequestInterface { + function oracleRequest( + address sender, + uint256 requestPrice, + bytes32 serviceAgreementID, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunctionId, + uint256 expiration + ) external; +} diff --git a/contracts/src/v0.7/interfaces/PointerInterface.sol b/contracts/src/v0.7/interfaces/PointerInterface.sol new file mode 100644 index 00000000..ee3d8ae9 --- /dev/null +++ b/contracts/src/v0.7/interfaces/PointerInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface PointerInterface { + function getAddress() external view returns (address); +} diff --git a/contracts/src/v0.7/interfaces/TypeAndVersionInterface.sol b/contracts/src/v0.7/interfaces/TypeAndVersionInterface.sol new file mode 100644 index 00000000..6adff620 --- /dev/null +++ b/contracts/src/v0.7/interfaces/TypeAndVersionInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +abstract contract TypeAndVersionInterface { + function typeAndVersion() external pure virtual returns (string memory); +} diff --git a/contracts/src/v0.7/interfaces/UniswapAnchoredView.sol b/contracts/src/v0.7/interfaces/UniswapAnchoredView.sol new file mode 100644 index 00000000..f750cc29 --- /dev/null +++ b/contracts/src/v0.7/interfaces/UniswapAnchoredView.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +// Compound Finance's oracle interface +interface UniswapAnchoredView { + function price(string memory symbol) external view returns (uint256); +} diff --git a/contracts/src/v0.7/interfaces/WithdrawalInterface.sol b/contracts/src/v0.7/interfaces/WithdrawalInterface.sol new file mode 100644 index 00000000..7d3f923a --- /dev/null +++ b/contracts/src/v0.7/interfaces/WithdrawalInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +interface WithdrawalInterface { + /** + * @notice transfer PLI held by the contract belonging to msg.sender to + * another address + * @param recipient is the address to send the PLI to + * @param amount is the amount of PLI to send + */ + function withdraw(address recipient, uint256 amount) external; + + /** + * @notice query the available amount of PLI to withdraw by msg.sender + */ + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol b/contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol new file mode 100644 index 00000000..99a07707 --- /dev/null +++ b/contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../ConfirmedOwner.sol"; + +contract ConfirmedOwnerTestHelper is ConfirmedOwner { + event Here(); + + constructor() ConfirmedOwner(msg.sender) {} + + function modifierOnlyOwner() public onlyOwner { + emit Here(); + } +} diff --git a/contracts/src/v0.7/tests/Consumer.sol b/contracts/src/v0.7/tests/Consumer.sol new file mode 100644 index 00000000..825894fd --- /dev/null +++ b/contracts/src/v0.7/tests/Consumer.sol @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../PluginClient.sol"; + +contract Consumer is PluginClient { + using Plugin for Plugin.Request; + + bytes32 internal specId; + bytes32 public currentPrice; + uint256 public currentPriceInt; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes32 indexed price + ); + + constructor( + address _link, + address _oracle, + bytes32 _specId + ) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + + function setSpecID(bytes32 _specId) public { + specId = _specId; + } + + function requestEthereumPrice(string memory _currency, uint256 _payment) public { + Plugin.Request memory req = buildOperatorRequest(specId, this.fulfill.selector); + req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); + string[] memory path = new string[](1); + path[0] = _currency; + req.addStringArray("path", path); + // version 2 + sendPluginRequest(req, _payment); + } + + function requestMultipleParametersWithCustomURLs( + string memory _urlUSD, + string memory _pathUSD, + uint256 _payment + ) public { + Plugin.Request memory req = buildOperatorRequest(specId, this.fulfillParametersWithCustomURLs.selector); + req.add("urlUSD", _urlUSD); + req.add("pathUSD", _pathUSD); + sendPluginRequest(req, _payment); + } + + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + PluginRequestInterface requested = PluginRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } + + function addExternalRequest(address _oracle, bytes32 _requestId) external { + addPluginExternalRequest(_oracle, _requestId); + } + + function fulfill(bytes32 _requestId, bytes32 _price) public recordPluginFulfillment(_requestId) { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } + + function fulfillParametersWithCustomURLs(bytes32 _requestId, uint256 _price) + public + recordPluginFulfillment(_requestId) + { + emit RequestFulfilled(_requestId, bytes32(_price)); + currentPriceInt = _price; + } +} diff --git a/contracts/src/v0.7/tests/KeeperCompatibleTestHelper.sol b/contracts/src/v0.7/tests/KeeperCompatibleTestHelper.sol new file mode 100644 index 00000000..0cf86848 --- /dev/null +++ b/contracts/src/v0.7/tests/KeeperCompatibleTestHelper.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../KeeperCompatible.sol"; + +contract KeeperCompatibleTestHelper is KeeperCompatible { + function checkUpkeep(bytes calldata) external override returns (bool, bytes memory) {} + + function performUpkeep(bytes calldata) external override {} + + function verifyCannotExecute() public view cannotExecute {} +} diff --git a/contracts/src/v0.7/tests/MockCompoundOracle.sol b/contracts/src/v0.7/tests/MockCompoundOracle.sol new file mode 100644 index 00000000..0b9ff577 --- /dev/null +++ b/contracts/src/v0.7/tests/MockCompoundOracle.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../interfaces/UniswapAnchoredView.sol"; + +contract MockCompoundOracle is UniswapAnchoredView { + struct OracleDetails { + uint256 price; + uint256 decimals; + } + + mapping(string => OracleDetails) s_oracleDetails; + + function price(string memory symbol) external view override returns (uint256) { + return s_oracleDetails[symbol].price; + } + + function setPrice( + string memory symbol, + uint256 newPrice, + uint256 newDecimals + ) public { + OracleDetails memory details = s_oracleDetails[symbol]; + details.price = newPrice; + details.decimals = newDecimals; + s_oracleDetails[symbol] = details; + } +} diff --git a/contracts/src/v0.7/tests/MockV2Aggregator.sol b/contracts/src/v0.7/tests/MockV2Aggregator.sol new file mode 100644 index 00000000..2aeb8406 --- /dev/null +++ b/contracts/src/v0.7/tests/MockV2Aggregator.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../interfaces/AggregatorInterface.sol"; + +/** + * @title MockV2Aggregator + * @notice Based on the HistoricAggregator contract + * @notice Use this contract when you need to test + * other contract's ability to read data from an + * aggregator contract, but how the aggregator got + * its answer is unimportant + */ +contract MockV2Aggregator is AggregatorInterface { + int256 public override latestAnswer; + uint256 public override latestTimestamp; + uint256 public override latestRound; + + mapping(uint256 => int256) public override getAnswer; + mapping(uint256 => uint256) public override getTimestamp; + mapping(uint256 => uint256) private getStartedAt; + + constructor(int256 _initialAnswer) public { + updateAnswer(_initialAnswer); + } + + function updateAnswer(int256 _answer) public { + latestAnswer = _answer; + latestTimestamp = block.timestamp; + latestRound++; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = block.timestamp; + } + + function updateRoundData( + uint256 _roundId, + int256 _answer, + uint256 _timestamp, + uint256 _startedAt + ) public { + latestRound = _roundId; + latestAnswer = _answer; + latestTimestamp = _timestamp; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = _timestamp; + getStartedAt[latestRound] = _startedAt; + } +} diff --git a/contracts/src/v0.7/tests/MockV3Aggregator.sol b/contracts/src/v0.7/tests/MockV3Aggregator.sol new file mode 100644 index 00000000..d14a67d9 --- /dev/null +++ b/contracts/src/v0.7/tests/MockV3Aggregator.sol @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../interfaces/AggregatorV2V3Interface.sol"; + +/** + * @title MockV3Aggregator + * @notice Based on the FluxAggregator contract + * @notice Use this contract when you need to test + * other contract's ability to read data from an + * aggregator contract, but how the aggregator got + * its answer is unimportant + */ +contract MockV3Aggregator is AggregatorV2V3Interface { + uint256 public constant override version = 0; + + uint8 public override decimals; + int256 public override latestAnswer; + uint256 public override latestTimestamp; + uint256 public override latestRound; + + mapping(uint256 => int256) public override getAnswer; + mapping(uint256 => uint256) public override getTimestamp; + mapping(uint256 => uint256) private getStartedAt; + + constructor(uint8 _decimals, int256 _initialAnswer) { + decimals = _decimals; + updateAnswer(_initialAnswer); + } + + function updateAnswer(int256 _answer) public { + latestAnswer = _answer; + latestTimestamp = block.timestamp; + latestRound++; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = block.timestamp; + getStartedAt[latestRound] = block.timestamp; + } + + function updateRoundData( + uint80 _roundId, + int256 _answer, + uint256 _timestamp, + uint256 _startedAt + ) public { + latestRound = _roundId; + latestAnswer = _answer; + latestTimestamp = _timestamp; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = _timestamp; + getStartedAt[latestRound] = _startedAt; + } + + function getRoundData(uint80 _roundId) + external + view + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return (_roundId, getAnswer[_roundId], getStartedAt[_roundId], getTimestamp[_roundId], _roundId); + } + + function latestRoundData() + external + view + override + returns ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) + { + return ( + uint80(latestRound), + getAnswer[latestRound], + getStartedAt[latestRound], + getTimestamp[latestRound], + uint80(latestRound) + ); + } + + function description() external pure override returns (string memory) { + return "v0.6/tests/MockV3Aggregator.sol"; + } +} diff --git a/contracts/src/v0.7/tests/MultiWordConsumer.sol b/contracts/src/v0.7/tests/MultiWordConsumer.sol new file mode 100644 index 00000000..2ec2649f --- /dev/null +++ b/contracts/src/v0.7/tests/MultiWordConsumer.sol @@ -0,0 +1,129 @@ +pragma solidity ^0.7.0; + +import "../PluginClient.sol"; +import "../Plugin.sol"; + +contract MultiWordConsumer is PluginClient { + using Plugin for Plugin.Request; + + bytes32 internal specId; + bytes public currentPrice; + + bytes32 public usd; + bytes32 public eur; + bytes32 public jpy; + + uint256 public usdInt; + uint256 public eurInt; + uint256 public jpyInt; + + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes indexed price + ); + + event RequestMultipleFulfilled(bytes32 indexed requestId, bytes32 indexed usd, bytes32 indexed eur, bytes32 jpy); + + event RequestMultipleFulfilledWithCustomURLs( + bytes32 indexed requestId, + uint256 indexed usd, + uint256 indexed eur, + uint256 jpy + ); + + constructor( + address _link, + address _oracle, + bytes32 _specId + ) public { + setPluginToken(_link); + setPluginOracle(_oracle); + specId = _specId; + } + + function setSpecID(bytes32 _specId) public { + specId = _specId; + } + + function requestEthereumPrice(string memory _currency, uint256 _payment) public { + Plugin.Request memory req = buildOperatorRequest(specId, this.fulfillBytes.selector); + sendOperatorRequest(req, _payment); + } + + function requestMultipleParameters(string memory _currency, uint256 _payment) public { + Plugin.Request memory req = buildOperatorRequest(specId, this.fulfillMultipleParameters.selector); + sendOperatorRequest(req, _payment); + } + + function requestMultipleParametersWithCustomURLs( + string memory _urlUSD, + string memory _pathUSD, + string memory _urlEUR, + string memory _pathEUR, + string memory _urlJPY, + string memory _pathJPY, + uint256 _payment + ) public { + Plugin.Request memory req = buildOperatorRequest(specId, this.fulfillMultipleParametersWithCustomURLs.selector); + req.add("urlUSD", _urlUSD); + req.add("pathUSD", _pathUSD); + req.add("urlEUR", _urlEUR); + req.add("pathEUR", _pathEUR); + req.add("urlJPY", _urlJPY); + req.add("pathJPY", _pathJPY); + sendOperatorRequest(req, _payment); + } + + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + PluginRequestInterface requested = PluginRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function withdrawLink() public { + LinkTokenInterface _link = LinkTokenInterface(pluginTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } + + function addExternalRequest(address _oracle, bytes32 _requestId) external { + addPluginExternalRequest(_oracle, _requestId); + } + + function fulfillMultipleParameters( + bytes32 _requestId, + bytes32 _usd, + bytes32 _eur, + bytes32 _jpy + ) public recordPluginFulfillment(_requestId) { + emit RequestMultipleFulfilled(_requestId, _usd, _eur, _jpy); + usd = _usd; + eur = _eur; + jpy = _jpy; + } + + function fulfillMultipleParametersWithCustomURLs( + bytes32 _requestId, + uint256 _usd, + uint256 _eur, + uint256 _jpy + ) public recordPluginFulfillment(_requestId) { + emit RequestMultipleFulfilledWithCustomURLs(_requestId, _usd, _eur, _jpy); + usdInt = _usd; + eurInt = _eur; + jpyInt = _jpy; + } + + function fulfillBytes(bytes32 _requestId, bytes memory _price) public recordPluginFulfillment(_requestId) { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } + + function publicGetNextRequestCount() external view returns (uint256) { + return getNextRequestCount(); + } +} diff --git a/contracts/src/v0.7/tests/PluginClientTestHelper.sol b/contracts/src/v0.7/tests/PluginClientTestHelper.sol new file mode 100644 index 00000000..8c0e14e2 --- /dev/null +++ b/contracts/src/v0.7/tests/PluginClientTestHelper.sol @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../PluginClient.sol"; +import "../vendor/SafeMathPlugin.sol"; + +contract PluginClientTestHelper is PluginClient { + using SafeMathPlugin for uint256; + + constructor(address _link, address _oracle) { + setPluginToken(_link); + setPluginOracle(_oracle); + } + + event Request(bytes32 id, address callbackAddress, bytes4 callbackfunctionSelector, bytes data); + event LinkAmount(uint256 amount); + + function publicNewRequest( + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature + ) public { + Plugin.Request memory req = buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + emit Request(req.id, req.callbackAddress, req.callbackFunctionId, req.buf.buf); + } + + function publicRequest( + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory req = buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + sendPluginRequest(req, _wei); + } + + function publicRequestRunTo( + address _oracle, + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory run = buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + sendPluginRequestTo(_oracle, run, _wei); + } + + function publicRequestOracleData( + bytes32 _id, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory req = buildOperatorRequest(_id, bytes4(keccak256(_fulfillmentSignature))); + sendOperatorRequest(req, _wei); + } + + function publicRequestOracleDataFrom( + address _oracle, + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory run = buildOperatorRequest(_id, bytes4(keccak256(_fulfillmentSignature))); + sendOperatorRequestTo(_oracle, run, _wei); + } + + function publicCancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function publicPluginToken() public view returns (address) { + return pluginTokenAddress(); + } + + function publicFulfillPluginRequest(bytes32 _requestId, bytes32) public { + fulfillRequest(_requestId, bytes32(0)); + } + + function fulfillRequest(bytes32 _requestId, bytes32) public { + validatePluginCallback(_requestId); + } + + function publicPLI(uint256 _amount) public { + emit LinkAmount(PLI_DIVISIBILITY.mul(_amount)); + } + + function publicOracleAddress() public view returns (address) { + return pluginOracleAddress(); + } + + function publicAddExternalRequest(address _oracle, bytes32 _requestId) public { + addPluginExternalRequest(_oracle, _requestId); + } +} diff --git a/contracts/src/v0.7/tests/PluginTestHelper.sol b/contracts/src/v0.7/tests/PluginTestHelper.sol new file mode 100644 index 00000000..4a595708 --- /dev/null +++ b/contracts/src/v0.7/tests/PluginTestHelper.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../Plugin.sol"; +import "../vendor/CBORPlugin.sol"; +import "../vendor/BufferPlugin.sol"; + +contract PluginTestHelper { + using Plugin for Plugin.Request; + using CBORPlugin for BufferPlugin.buffer; + + Plugin.Request private req; + + event RequestData(bytes payload); + + function closeEvent() public { + emit RequestData(req.buf.buf); + } + + function setBuffer(bytes memory data) public { + Plugin.Request memory r2 = req; + r2.setBuffer(data); + req = r2; + } + + function add(string memory _key, string memory _value) public { + Plugin.Request memory r2 = req; + r2.add(_key, _value); + req = r2; + } + + function addBytes(string memory _key, bytes memory _value) public { + Plugin.Request memory r2 = req; + r2.addBytes(_key, _value); + req = r2; + } + + function addInt(string memory _key, int256 _value) public { + Plugin.Request memory r2 = req; + r2.addInt(_key, _value); + req = r2; + } + + function addUint(string memory _key, uint256 _value) public { + Plugin.Request memory r2 = req; + r2.addUint(_key, _value); + req = r2; + } + + // Temporarily have method receive bytes32[] memory until experimental + // string[] memory can be invoked from truffle tests. + function addStringArray(string memory _key, bytes32[] memory _values) public { + string[] memory strings = new string[](_values.length); + for (uint256 i = 0; i < _values.length; i++) { + strings[i] = bytes32ToString(_values[i]); + } + Plugin.Request memory r2 = req; + r2.addStringArray(_key, strings); + req = r2; + } + + function bytes32ToString(bytes32 x) private pure returns (string memory) { + bytes memory bytesString = new bytes(32); + uint256 charCount = 0; + for (uint256 j = 0; j < 32; j++) { + bytes1 char = bytes1(bytes32(uint256(x) * 2**(8 * j))); + if (char != 0) { + bytesString[charCount] = char; + charCount++; + } + } + bytes memory bytesStringTrimmed = new bytes(charCount); + for (uint256 j = 0; j < charCount; j++) { + bytesStringTrimmed[j] = bytesString[j]; + } + return string(bytesStringTrimmed); + } +} diff --git a/contracts/src/v0.7/tests/UpkeepAutoFunder.sol b/contracts/src/v0.7/tests/UpkeepAutoFunder.sol new file mode 100644 index 00000000..c051c835 --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepAutoFunder.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../KeeperCompatible.sol"; +import "../interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperRegistryInterface.sol"; +import "../ConfirmedOwner.sol"; + +contract UpkeepAutoFunder is KeeperCompatible, ConfirmedOwner { + bool public s_isEligible; + bool public s_shouldCancel; + uint256 public s_upkeepId; + uint96 public s_autoFundLink; + LinkTokenInterface public immutable PLI; + KeeperRegistryBaseInterface public immutable s_keeperRegistry; + + constructor(address linkAddress, address registryAddress) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(linkAddress); + s_keeperRegistry = KeeperRegistryBaseInterface(registryAddress); + + s_isEligible = false; + s_shouldCancel = false; + s_upkeepId = 0; + s_autoFundLink = 0; + } + + function setShouldCancel(bool value) external onlyOwner { + s_shouldCancel = value; + } + + function setIsEligible(bool value) external onlyOwner { + s_isEligible = value; + } + + function setAutoFundLink(uint96 value) external onlyOwner { + s_autoFundLink = value; + } + + function setUpkeepId(uint256 value) external onlyOwner { + s_upkeepId = value; + } + + function checkUpkeep(bytes calldata data) + external + override + cannotExecute + returns (bool callable, bytes calldata executedata) + { + return (s_isEligible, data); + } + + function performUpkeep(bytes calldata data) external override { + require(s_isEligible, "Upkeep should be eligible"); + s_isEligible = false; // Allow upkeep only once until it is set again + + // Topup upkeep so it can be called again + PLI.transferAndCall(address(s_keeperRegistry), s_autoFundLink, abi.encode(s_upkeepId)); + + if (s_shouldCancel) { + s_keeperRegistry.cancelUpkeep(s_upkeepId); + } + } +} diff --git a/contracts/src/v0.7/tests/UpkeepCounter.sol b/contracts/src/v0.7/tests/UpkeepCounter.sol new file mode 100644 index 00000000..3c42b582 --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepCounter.sol @@ -0,0 +1,57 @@ +pragma solidity ^0.7.6; + +contract UpkeepCounter { + event PerformingUpkeep( + address indexed from, + uint256 initialBlock, + uint256 lastBlock, + uint256 previousBlock, + uint256 counter + ); + + uint256 public testRange; + uint256 public interval; + uint256 public lastBlock; + uint256 public previousPerformBlock; + uint256 public initialBlock; + uint256 public counter; + + constructor(uint256 _testRange, uint256 _interval) { + testRange = _testRange; + interval = _interval; + previousPerformBlock = 0; + lastBlock = block.number; + initialBlock = 0; + counter = 0; + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + return (eligible(), data); + } + + function performUpkeep(bytes calldata performData) external { + if (initialBlock == 0) { + initialBlock = block.number; + } + lastBlock = block.number; + counter = counter + 1; + performData; + emit PerformingUpkeep(tx.origin, initialBlock, lastBlock, previousPerformBlock, counter); + previousPerformBlock = lastBlock; + } + + function eligible() public view returns (bool) { + if (initialBlock == 0) { + return true; + } + + return (block.number - initialBlock) < testRange && (block.number - lastBlock) >= interval; + } + + function setSpread(uint256 _testRange, uint256 _interval) external { + testRange = _testRange; + interval = _interval; + initialBlock = 0; + counter = 0; + } +} diff --git a/contracts/src/v0.7/tests/UpkeepMock.sol b/contracts/src/v0.7/tests/UpkeepMock.sol new file mode 100644 index 00000000..a4708eb1 --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepMock.sol @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../KeeperCompatible.sol"; + +contract UpkeepMock is KeeperCompatible { + bool public shouldRevertCheck; + bool public canCheck; + bool public canPerform; + bytes public performData; + uint256 public checkGasToBurn; + uint256 public performGasToBurn; + string public checkRevertReason; + + uint256 constant checkGasBuffer = 6000; // use all but this amount in gas burn loops + uint256 constant performGasBuffer = 1000; // use all but this amount in gas burn loops + + event UpkeepPerformedWith(bytes upkeepData); + + function setShouldRevertCheck(bool value) public { + shouldRevertCheck = value; + } + + function setPerformData(bytes calldata data) public { + performData = data; + } + + function setCanCheck(bool value) public { + canCheck = value; + } + + function setCanPerform(bool value) public { + canPerform = value; + } + + function setCheckRevertReason(string calldata value) public { + checkRevertReason = value; + } + + function setCheckGasToBurn(uint256 value) public { + require(value > checkGasBuffer || value == 0, "checkGasToBurn must be 0 (disabled) or greater than buffer"); + if (value > 0) { + checkGasToBurn = value - checkGasBuffer; + } else { + checkGasToBurn = 0; + } + } + + function setPerformGasToBurn(uint256 value) public { + require(value > performGasBuffer || value == 0, "performGasToBurn must be 0 (disabled) or greater than buffer"); + if (value > 0) { + performGasToBurn = value - performGasBuffer; + } else { + performGasToBurn = 0; + } + } + + function checkUpkeep( + bytes calldata data + ) external override cannotExecute returns (bool callable, bytes memory executedata) { + require(!shouldRevertCheck, checkRevertReason); + uint256 startGas = gasleft(); + bool couldCheck = canCheck; + + setCanCheck(false); // test that state modifications don't stick + + while (startGas - gasleft() < checkGasToBurn) {} // burn gas + + return (couldCheck, performData); + } + + function performUpkeep(bytes calldata data) external override { + uint256 startGas = gasleft(); + + require(canPerform, "Cannot perform"); + + emit UpkeepPerformedWith(data); + + while (startGas - gasleft() < performGasToBurn) {} // burn gas + } +} diff --git a/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol b/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol new file mode 100644 index 00000000..35e28584 --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepPerformCounterRestrictive.sol @@ -0,0 +1,85 @@ +pragma solidity 0.7.6; + +contract UpkeepPerformCounterRestrictive { + event PerformingUpkeep(bool eligible, address from, uint256 initialCall, uint256 nextEligible, uint256 blockNumber); + + uint256 public initialCall = 0; + uint256 public nextEligible = 0; + uint256 public testRange; + uint256 public averageEligibilityCadence; + uint256 public checkGasToBurn; + uint256 public performGasToBurn; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + + uint256 private count = 0; + + constructor(uint256 _testRange, uint256 _averageEligibilityCadence) { + testRange = _testRange; + averageEligibilityCadence = _averageEligibilityCadence; + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + uint256 startGas = gasleft(); + uint256 blockNum = block.number - 1; + bool dummy; + // burn gas + while (startGas - gasleft() < checkGasToBurn) { + dummy = dummy && dummyMap[blockhash(blockNum)]; // arbitrary storage reads + blockNum--; + } + return (eligible(), abi.encode(dummy)); + } + + function performUpkeep(bytes calldata) external { + uint256 startGas = gasleft(); + bool eligible = eligible(); + uint256 blockNum = block.number; + emit PerformingUpkeep(eligible, tx.origin, initialCall, nextEligible, blockNum); + require(eligible); + if (initialCall == 0) { + initialCall = blockNum; + } + nextEligible = (blockNum + (rand() % (averageEligibilityCadence * 2))) + 1; + count++; + // burn gas + blockNum--; + while (startGas - gasleft() < performGasToBurn) { + dummyMap[blockhash(blockNum)] = false; // arbitrary storage writes + blockNum--; + } + } + + function setCheckGasToBurn(uint256 value) public { + checkGasToBurn = value; + } + + function setPerformGasToBurn(uint256 value) public { + performGasToBurn = value; + } + + function getCountPerforms() public view returns (uint256) { + return count; + } + + function eligible() internal view returns (bool) { + return initialCall == 0 || (block.number - initialCall < testRange && block.number > nextEligible); + } + + function checkEligible() public view returns (bool) { + return eligible(); + } + + function reset() external { + initialCall = 0; + count = 0; + } + + function setSpread(uint256 _newTestRange, uint256 _newAverageEligibilityCadence) external { + testRange = _newTestRange; + averageEligibilityCadence = _newAverageEligibilityCadence; + } + + function rand() private view returns (uint256) { + return uint256(keccak256(abi.encode(blockhash(block.number - 1), address(this)))); + } +} diff --git a/contracts/src/v0.7/tests/UpkeepReverter.sol b/contracts/src/v0.7/tests/UpkeepReverter.sol new file mode 100644 index 00000000..c39cbf7d --- /dev/null +++ b/contracts/src/v0.7/tests/UpkeepReverter.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../KeeperCompatible.sol"; + +contract UpkeepReverter is KeeperCompatible { + function checkUpkeep(bytes calldata data) + public + view + override + cannotExecute + returns (bool callable, bytes calldata executedata) + { + require(false, "!working"); + return (true, data); + } + + function performUpkeep(bytes calldata) external pure override { + require(false, "!working"); + } +} diff --git a/contracts/src/v0.7/tests/VRFCoordinatorMock.sol b/contracts/src/v0.7/tests/VRFCoordinatorMock.sol new file mode 100644 index 00000000..4e357ddf --- /dev/null +++ b/contracts/src/v0.7/tests/VRFCoordinatorMock.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +import "../interfaces/LinkTokenInterface.sol"; +import "../VRFConsumerBase.sol"; + +contract VRFCoordinatorMock { + LinkTokenInterface public PLI; + + event RandomnessRequest(address indexed sender, bytes32 indexed keyHash, uint256 indexed seed); + + constructor(address linkAddress) public { + PLI = LinkTokenInterface(linkAddress); + } + + function onTokenTransfer( + address sender, + uint256 fee, + bytes memory _data + ) public onlyPLI { + (bytes32 keyHash, uint256 seed) = abi.decode(_data, (bytes32, uint256)); + emit RandomnessRequest(sender, keyHash, seed); + } + + function callBackWithRandomness( + bytes32 requestId, + uint256 randomness, + address consumerContract + ) public { + VRFConsumerBase v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomness.selector, requestId, randomness); + uint256 b = 206000; + require(gasleft() >= b, "not enough gas for consumer"); + (bool success, ) = consumerContract.call(resp); + } + + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } +} diff --git a/contracts/src/v0.7/vendor/Address.sol b/contracts/src/v0.7/vendor/Address.sol new file mode 100644 index 00000000..eec7c36d --- /dev/null +++ b/contracts/src/v0.7/vendor/Address.sol @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: MIT +// From https://github.com/OpenZeppelin/openzeppelin-contracts v3.4.0(fa64a1ced0b70ab89073d5d0b6e01b0778f7e7d6) + +pragma solidity >=0.6.2 <0.8.0; + +/** + * @dev Collection of functions related to the address type + */ +library Address { + /** + * @dev Returns true if `account` is a contract. + * + * [IMPORTANT] + * ==== + * It is unsafe to assume that an address for which this function returns + * false is an externally-owned account (EOA) and not a contract. + * + * Among others, `isContract` will return false for the following + * types of addresses: + * + * - an externally-owned account + * - a contract in construction + * - an address where a contract will be created + * - an address where a contract lived, but was destroyed + * ==== + */ + function isContract(address account) internal view returns (bool) { + // This method relies on extcodesize, which returns 0 for contracts in + // construction, since the code is only stored at the end of the + // constructor execution. + + uint256 size; + // solhint-disable-next-line no-inline-assembly + assembly { + size := extcodesize(account) + } + return size > 0; + } + + /** + * @dev Replacement for Solidity's `transfer`: sends `amount` wei to + * `recipient`, forwarding all available gas and reverting on errors. + * + * https://eips.ethereum.org/EIPS/eip-1884[EIP1884] increases the gas cost + * of certain opcodes, possibly making contracts go over the 2300 gas limit + * imposed by `transfer`, making them unable to receive funds via + * `transfer`. {sendValue} removes this limitation. + * + * https://diligence.consensys.net/posts/2019/09/stop-using-soliditys-transfer-now/[Learn more]. + * + * IMPORTANT: because control is transferred to `recipient`, care must be + * taken to not create reentrancy vulnerabilities. Consider using + * {ReentrancyGuard} or the + * https://solidity.readthedocs.io/en/v0.5.11/security-considerations.html#use-the-checks-effects-interactions-pattern[checks-effects-interactions pattern]. + */ + function sendValue(address payable recipient, uint256 amount) internal { + require(address(this).balance >= amount, "Address: insufficient balance"); + + // solhint-disable-next-line avoid-low-level-calls, avoid-call-value + (bool success, ) = recipient.call{value: amount}(""); + require(success, "Address: unable to send value, recipient may have reverted"); + } + + /** + * @dev Performs a Solidity function call using a low level `call`. A + * plain`call` is an unsafe replacement for a function call: use this + * function instead. + * + * If `target` reverts with a revert reason, it is bubbled up by this + * function (like regular Solidity function calls). + * + * Returns the raw returned data. To convert to the expected return value, + * use https://solidity.readthedocs.io/en/latest/units-and-global-variables.html?highlight=abi.decode#abi-encoding-and-decoding-functions[`abi.decode`]. + * + * Requirements: + * + * - `target` must be a contract. + * - calling `target` with `data` must not revert. + * + * _Available since v3.1._ + */ + function functionCall(address target, bytes memory data) internal returns (bytes memory) { + return functionCall(target, data, "Address: low-level call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], but with + * `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCall( + address target, + bytes memory data, + string memory errorMessage + ) internal returns (bytes memory) { + return functionCallWithValue(target, data, 0, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but also transferring `value` wei to `target`. + * + * Requirements: + * + * - the calling contract must have an ETH balance of at least `value`. + * - the called Solidity function must be `payable`. + * + * _Available since v3.1._ + */ + function functionCallWithValue( + address target, + bytes memory data, + uint256 value + ) internal returns (bytes memory) { + return functionCallWithValue(target, data, value, "Address: low-level call with value failed"); + } + + /** + * @dev Same as {xref-Address-functionCallWithValue-address-bytes-uint256-}[`functionCallWithValue`], but + * with `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCallWithValue( + address target, + bytes memory data, + uint256 value, + string memory errorMessage + ) internal returns (bytes memory) { + require(address(this).balance >= value, "Address: insufficient balance for call"); + require(isContract(target), "Address: call to non-contract"); + + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory returndata) = target.call{value: value}(data); + return _verifyCallResult(success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall(address target, bytes memory data) internal view returns (bytes memory) { + return functionStaticCall(target, data, "Address: low-level static call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall( + address target, + bytes memory data, + string memory errorMessage + ) internal view returns (bytes memory) { + require(isContract(target), "Address: static call to non-contract"); + + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory returndata) = target.staticcall(data); + return _verifyCallResult(success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall(address target, bytes memory data) internal returns (bytes memory) { + return functionDelegateCall(target, data, "Address: low-level delegate call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall( + address target, + bytes memory data, + string memory errorMessage + ) internal returns (bytes memory) { + require(isContract(target), "Address: delegate call to non-contract"); + + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory returndata) = target.delegatecall(data); + return _verifyCallResult(success, returndata, errorMessage); + } + + function _verifyCallResult( + bool success, + bytes memory returndata, + string memory errorMessage + ) private pure returns (bytes memory) { + if (success) { + return returndata; + } else { + // Look for revert reason and bubble it up if present + if (returndata.length > 0) { + // The easiest way to bubble the revert reason is using memory via assembly + + // solhint-disable-next-line no-inline-assembly + assembly { + let returndata_size := mload(returndata) + revert(add(32, returndata), returndata_size) + } + } else { + revert(errorMessage); + } + } + } +} diff --git a/contracts/src/v0.7/vendor/BufferChainlink.sol b/contracts/src/v0.7/vendor/BufferChainlink.sol new file mode 100644 index 00000000..28e229ad --- /dev/null +++ b/contracts/src/v0.7/vendor/BufferChainlink.sol @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +/** + * @dev A library for working with mutable byte buffers in Solidity. + * + * Byte buffers are mutable and expandable, and provide a variety of primitives + * for writing to them. At any time you can fetch a bytes object containing the + * current contents of the buffer. The bytes object should not be stored between + * operations, as it may change due to resizing of the buffer. + */ +library BufferPlugin { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint256 capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint256 capacity) internal pure returns (buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + mstore(0x40, add(32, add(ptr, capacity))) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns (buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint256 capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + function max(uint256 a, uint256 b) private pure returns (uint256) { + if (a > b) { + return a; + } + return b; + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Writes a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The start offset to write to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function write( + buffer memory buf, + uint256 off, + bytes memory data, + uint256 len + ) internal pure returns (buffer memory) { + require(len <= data.length); + + if (off + len > buf.capacity) { + resize(buf, max(buf.capacity, len + off) * 2); + } + + uint256 dest; + uint256 src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(add(len, off), buflen) { + mstore(bufptr, add(len, off)) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + uint256 mask = 256**(32 - len) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append( + buffer memory buf, + bytes memory data, + uint256 len + ) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, len); + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, data.length); + } + + /** + * @dev Writes a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write the byte at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeUint8( + buffer memory buf, + uint256 off, + uint8 data + ) internal pure returns (buffer memory) { + if (off >= buf.capacity) { + resize(buf, buf.capacity * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if eq(off, buflen) { + mstore(bufptr, add(buflen, 1)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns (buffer memory) { + return writeUint8(buf, buf.buf.length, data); + } + + /** + * @dev Writes up to 32 bytes to the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function write( + buffer memory buf, + uint256 off, + bytes32 data, + uint256 len + ) private pure returns (buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint256 mask = 256**len - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Writes a bytes20 to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeBytes20( + buffer memory buf, + uint256 off, + bytes20 data + ) internal pure returns (buffer memory) { + return write(buf, off, bytes32(data), 20); + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, 32); + } + + /** + * @dev Writes an integer to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer, for chaining. + */ + function writeInt( + buffer memory buf, + uint256 off, + uint256 data, + uint256 len + ) private pure returns (buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint256 mask = 256**len - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + off + sizeof(buffer length) + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer. + */ + function appendInt( + buffer memory buf, + uint256 data, + uint256 len + ) internal pure returns (buffer memory) { + return writeInt(buf, buf.buf.length, data, len); + } +} diff --git a/contracts/src/v0.7/vendor/CBORChainlink.sol b/contracts/src/v0.7/vendor/CBORChainlink.sol new file mode 100644 index 00000000..23684e8c --- /dev/null +++ b/contracts/src/v0.7/vendor/CBORChainlink.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +pragma solidity >=0.4.19; + +import {BufferPlugin} from "./BufferPlugin.sol"; + +library CBORPlugin { + using BufferPlugin for BufferPlugin.buffer; + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + function encodeFixedNumeric(BufferPlugin.buffer memory buf, uint8 major, uint64 value) private pure { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if (value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if (value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if (value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); + } + } + + function encodeIndefiniteLengthType(BufferPlugin.buffer memory buf, uint8 major) private pure { + buf.appendUint8(uint8((major << 5) | 31)); + } + + function encodeUInt(BufferPlugin.buffer memory buf, uint value) internal pure { + if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } + } + + function encodeInt(BufferPlugin.buffer memory buf, int value) internal pure { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, uint(value)); + } else if(value >= 0) { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(uint256(value))); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(uint256(-1 - value))); + } + } + + function encodeBytes(BufferPlugin.buffer memory buf, bytes memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.append(value); + } + + function encodeBigNum(BufferPlugin.buffer memory buf, uint value) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(value)); + } + + function encodeSignedBigNum(BufferPlugin.buffer memory buf, int input) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint256(-1 - input))); + } + + function encodeString(BufferPlugin.buffer memory buf, string memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.append(bytes(value)); + } + + function startArray(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } + + function startMap(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } + + function endSequence(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } +} diff --git a/contracts/src/v0.7/vendor/Context.sol b/contracts/src/v0.7/vendor/Context.sol new file mode 100644 index 00000000..aa7b856e --- /dev/null +++ b/contracts/src/v0.7/vendor/Context.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT +// github.com/OpenZeppelin/openzeppelin-contracts@fa64a1ced0b70ab89073d5d0b6e01b0778f7e7d6 + +pragma solidity ^0.7.0; + +/* + * @dev Provides information about the current execution context, including the + * sender of the transaction and its data. While these are generally available + * via msg.sender and msg.data, they should not be accessed in such a direct + * manner, since when dealing with GSN meta-transactions the account sending and + * paying for execution may not be the actual sender (as far as an application + * is concerned). + * + * This contract is only required for intermediate, library-like contracts. + */ +abstract contract Context { + function _msgSender() internal view virtual returns (address payable) { + return msg.sender; + } + + function _msgData() internal view virtual returns (bytes memory) { + this; + // silence state mutability warning without generating bytecode - see https://github.com/ethereum/solidity/issues/2691 + return msg.data; + } +} diff --git a/contracts/src/v0.7/vendor/ENSResolver.sol b/contracts/src/v0.7/vendor/ENSResolver.sol new file mode 100644 index 00000000..d5cbc672 --- /dev/null +++ b/contracts/src/v0.7/vendor/ENSResolver.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +abstract contract ENSResolver { + function addr(bytes32 node) public view virtual returns (address); +} diff --git a/contracts/src/v0.7/vendor/Pausable.sol b/contracts/src/v0.7/vendor/Pausable.sol new file mode 100644 index 00000000..63ccdd6c --- /dev/null +++ b/contracts/src/v0.7/vendor/Pausable.sol @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: MIT +// github.com/OpenZeppelin/openzeppelin-contracts@fa64a1ced0b70ab89073d5d0b6e01b0778f7e7d6 + +pragma solidity ^0.7.0; + +import "./Context.sol"; + +/** + * @dev Contract module which allows children to implement an emergency stop + * mechanism that can be triggered by an authorized account. + * + * This module is used through inheritance. It will make available the + * modifiers `whenNotPaused` and `whenPaused`, which can be applied to + * the functions of your contract. Note that they will not be pausable by + * simply including this module, only once the modifiers are put in place. + */ +abstract contract Pausable is Context { + /** + * @dev Emitted when the pause is triggered by `account`. + */ + event Paused(address account); + + /** + * @dev Emitted when the pause is lifted by `account`. + */ + event Unpaused(address account); + + bool private _paused; + + /** + * @dev Initializes the contract in unpaused state. + */ + constructor() { + _paused = false; + } + + /** + * @dev Returns true if the contract is paused, and false otherwise. + */ + function paused() public view virtual returns (bool) { + return _paused; + } + + /** + * @dev Modifier to make a function callable only when the contract is not paused. + * + * Requirements: + * + * - The contract must not be paused. + */ + modifier whenNotPaused() { + require(!paused(), "Pausable: paused"); + _; + } + + /** + * @dev Modifier to make a function callable only when the contract is paused. + * + * Requirements: + * + * - The contract must be paused. + */ + modifier whenPaused() { + require(paused(), "Pausable: not paused"); + _; + } + + /** + * @dev Triggers stopped state. + * + * Requirements: + * + * - The contract must not be paused. + */ + function _pause() internal virtual whenNotPaused { + _paused = true; + emit Paused(_msgSender()); + } + + /** + * @dev Returns to normal state. + * + * Requirements: + * + * - The contract must be paused. + */ + function _unpause() internal virtual whenPaused { + _paused = false; + emit Unpaused(_msgSender()); + } +} diff --git a/contracts/src/v0.7/vendor/ReentrancyGuard.sol b/contracts/src/v0.7/vendor/ReentrancyGuard.sol new file mode 100644 index 00000000..aaaee179 --- /dev/null +++ b/contracts/src/v0.7/vendor/ReentrancyGuard.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +// github.com/OpenZeppelin/openzeppelin-contracts@fa64a1ced0b70ab89073d5d0b6e01b0778f7e7d6 + +pragma solidity ^0.7.0; + +/** + * @dev Contract module that helps prevent reentrant calls to a function. + * + * Inheriting from `ReentrancyGuard` will make the {nonReentrant} modifier + * available, which can be applied to functions to make sure there are no nested + * (reentrant) calls to them. + * + * Note that because there is a single `nonReentrant` guard, functions marked as + * `nonReentrant` may not call one another. This can be worked around by making + * those functions `private`, and then adding `external` `nonReentrant` entry + * points to them. + * + * TIP: If you would like to learn more about reentrancy and alternative ways + * to protect against it, check out our blog post + * https://blog.openzeppelin.com/reentrancy-after-istanbul/[Reentrancy After Istanbul]. + */ +abstract contract ReentrancyGuard { + // Booleans are more expensive than uint256 or any type that takes up a full + // word because each write operation emits an extra SLOAD to first read the + // slot's contents, replace the bits taken up by the boolean, and then write + // back. This is the compiler's defense against contract upgrades and + // pointer aliasing, and it cannot be disabled. + + // The values being non-zero value makes deployment a bit more expensive, + // but in exchange the refund on every call to nonReentrant will be lower in + // amount. Since refunds are capped to a percentage of the total + // transaction's gas, it is best to keep them low in cases like this one, to + // increase the likelihood of the full refund coming into effect. + uint256 private constant _NOT_ENTERED = 1; + uint256 private constant _ENTERED = 2; + + uint256 private _status; + + constructor() { + _status = _NOT_ENTERED; + } + + /** + * @dev Prevents a contract from calling itself, directly or indirectly. + * Calling a `nonReentrant` function from another `nonReentrant` + * function is not supported. It is possible to prevent this from happening + * by making the `nonReentrant` function external, and make it call a + * `private` function that does the actual work. + */ + modifier nonReentrant() { + // On the first call to nonReentrant, _notEntered will be true + require(_status != _ENTERED, "ReentrancyGuard: reentrant call"); + + // Any calls to nonReentrant after this point will fail + _status = _ENTERED; + + _; + + // By storing the original value once again, a refund is triggered (see + // https://eips.ethereum.org/EIPS/eip-2200) + _status = _NOT_ENTERED; + } +} diff --git a/contracts/src/v0.7/vendor/SafeMath96.sol b/contracts/src/v0.7/vendor/SafeMath96.sol new file mode 100644 index 00000000..b5184995 --- /dev/null +++ b/contracts/src/v0.7/vendor/SafeMath96.sol @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + * + * This library is a version of Open Zeppelin's SafeMath, modified to support + * unsigned 96 bit integers. + */ +library SafeMath96 { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint96 a, uint96 b) internal pure returns (uint96) { + uint96 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint96 a, uint96 b) internal pure returns (uint96) { + require(b <= a, "SafeMath: subtraction overflow"); + uint96 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint96 a, uint96 b) internal pure returns (uint96) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint96 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint96 a, uint96 b) internal pure returns (uint96) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint96 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint96 a, uint96 b) internal pure returns (uint96) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.7/vendor/SafeMathChainlink.sol b/contracts/src/v0.7/vendor/SafeMathChainlink.sol new file mode 100644 index 00000000..97c18527 --- /dev/null +++ b/contracts/src/v0.7/vendor/SafeMathChainlink.sol @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.7.0; + +/** + * @dev Wrappers over Solidity's arithmetic operations with added overflow + * checks. + * + * Arithmetic operations in Solidity wrap on overflow. This can easily result + * in bugs, because programmers usually assume that an overflow raises an + * error, which is the standard behavior in high level programming languages. + * `SafeMath` restores this intuition by reverting the transaction when an + * operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + */ +library SafeMathPlugin { + /** + * @dev Returns the addition of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * - Addition cannot overflow. + */ + function add(uint256 a, uint256 b) internal pure returns (uint256) { + uint256 c = a + b; + require(c >= a, "SafeMath: addition overflow"); + + return c; + } + + /** + * @dev Returns the subtraction of two unsigned integers, reverting on + * overflow (when the result is negative). + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * - Subtraction cannot overflow. + */ + function sub(uint256 a, uint256 b) internal pure returns (uint256) { + require(b <= a, "SafeMath: subtraction overflow"); + uint256 c = a - b; + + return c; + } + + /** + * @dev Returns the multiplication of two unsigned integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * - Multiplication cannot overflow. + */ + function mul(uint256 a, uint256 b) internal pure returns (uint256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 + if (a == 0) { + return 0; + } + + uint256 c = a * b; + require(c / a == b, "SafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two unsigned integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function div(uint256 a, uint256 b) internal pure returns (uint256) { + // Solidity only automatically asserts when dividing by 0 + require(b > 0, "SafeMath: division by zero"); + uint256 c = a / b; + // assert(a == b * c + a % b); // There is no case in which this doesn't hold + + return c; + } + + /** + * @dev Returns the remainder of dividing two unsigned integers. (unsigned integer modulo), + * Reverts when dividing by zero. + * + * Counterpart to Solidity's `%` operator. This function uses a `revert` + * opcode (which leaves remaining gas untouched) while Solidity uses an + * invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * - The divisor cannot be zero. + */ + function mod(uint256 a, uint256 b) internal pure returns (uint256) { + require(b != 0, "SafeMath: modulo by zero"); + return a % b; + } +} diff --git a/contracts/src/v0.7/vendor/SignedSafeMath.sol b/contracts/src/v0.7/vendor/SignedSafeMath.sol new file mode 100644 index 00000000..61658cd8 --- /dev/null +++ b/contracts/src/v0.7/vendor/SignedSafeMath.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.0; + +/** + * @title SignedSafeMath + * @dev Signed math operations with safety checks that revert on error. + */ +library SignedSafeMath { + int256 private constant _INT256_MIN = -2**255; + + /** + * @dev Returns the multiplication of two signed integers, reverting on + * overflow. + * + * Counterpart to Solidity's `*` operator. + * + * Requirements: + * + * - Multiplication cannot overflow. + */ + function mul(int256 a, int256 b) internal pure returns (int256) { + // Gas optimization: this is cheaper than requiring 'a' not being zero, but the + // benefit is lost if 'b' is also tested. + // See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522 + if (a == 0) { + return 0; + } + + require(!(a == -1 && b == _INT256_MIN), "SignedSafeMath: multiplication overflow"); + + int256 c = a * b; + require(c / a == b, "SignedSafeMath: multiplication overflow"); + + return c; + } + + /** + * @dev Returns the integer division of two signed integers. Reverts on + * division by zero. The result is rounded towards zero. + * + * Counterpart to Solidity's `/` operator. Note: this function uses a + * `revert` opcode (which leaves remaining gas untouched) while Solidity + * uses an invalid opcode to revert (consuming all remaining gas). + * + * Requirements: + * + * - The divisor cannot be zero. + */ + function div(int256 a, int256 b) internal pure returns (int256) { + require(b != 0, "SignedSafeMath: division by zero"); + require(!(b == -1 && a == _INT256_MIN), "SignedSafeMath: division overflow"); + + int256 c = a / b; + + return c; + } + + /** + * @dev Returns the subtraction of two signed integers, reverting on + * overflow. + * + * Counterpart to Solidity's `-` operator. + * + * Requirements: + * + * - Subtraction cannot overflow. + */ + function sub(int256 a, int256 b) internal pure returns (int256) { + int256 c = a - b; + require((b >= 0 && c <= a) || (b < 0 && c > a), "SignedSafeMath: subtraction overflow"); + + return c; + } + + /** + * @dev Returns the addition of two signed integers, reverting on + * overflow. + * + * Counterpart to Solidity's `+` operator. + * + * Requirements: + * + * - Addition cannot overflow. + */ + function add(int256 a, int256 b) internal pure returns (int256) { + int256 c = a + b; + require((b >= 0 && c >= a) || (b < 0 && c < a), "SignedSafeMath: addition overflow"); + + return c; + } +} diff --git a/contracts/src/v0.8/ChainSpecificUtil.sol b/contracts/src/v0.8/ChainSpecificUtil.sol new file mode 100644 index 00000000..172d8c52 --- /dev/null +++ b/contracts/src/v0.8/ChainSpecificUtil.sol @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {ArbSys} from "./vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import {ArbGasInfo} from "./vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {OVM_GasPriceOracle} from "./vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; + +/// @dev A library that abstracts out opcodes that behave differently across chains. +/// @dev The methods below return values that are pertinent to the given chain. +/// @dev For instance, ChainSpecificUtil.getBlockNumber() returns L2 block number in L2 chains +library ChainSpecificUtil { + // ------------ Start Arbitrum Constants ------------ + + /// @dev ARBSYS_ADDR is the address of the ArbSys precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbSys.sol#L10 + address private constant ARBSYS_ADDR = address(0x0000000000000000000000000000000000000064); + ArbSys private constant ARBSYS = ArbSys(ARBSYS_ADDR); + + /// @dev ARBGAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10 + address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C); + ArbGasInfo private constant ARBGAS = ArbGasInfo(ARBGAS_ADDR); + + uint256 private constant ARB_MAINNET_CHAIN_ID = 42161; + uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613; + uint256 private constant ARB_SEPOLIA_TESTNET_CHAIN_ID = 421614; + + // ------------ End Arbitrum Constants ------------ + + // ------------ Start Optimism Constants ------------ + /// @dev L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev OVM_GASPRICEORACLE_ADDR is the address of the OVM_GasPriceOracle precompile on Optimism. + /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + OVM_GasPriceOracle private constant OVM_GASPRICEORACLE = OVM_GasPriceOracle(OVM_GASPRICEORACLE_ADDR); + + uint256 private constant OP_MAINNET_CHAIN_ID = 10; + uint256 private constant OP_GOERLI_CHAIN_ID = 420; + uint256 private constant OP_SEPOLIA_CHAIN_ID = 11155420; + + /// @dev Base is a OP stack based rollup and follows the same L1 pricing logic as Optimism. + uint256 private constant BASE_MAINNET_CHAIN_ID = 8453; + uint256 private constant BASE_GOERLI_CHAIN_ID = 84531; + + // ------------ End Optimism Constants ------------ + + /** + * @notice Returns the blockhash for the given blockNumber. + * @notice If the blockNumber is more than 256 blocks in the past, returns the empty string. + * @notice When on a known Arbitrum chain, it uses ArbSys.arbBlockHash to get the blockhash. + * @notice Otherwise, it uses the blockhash opcode. + * @notice Note that the blockhash opcode will return the L2 blockhash on Optimism. + */ + function _getBlockhash(uint64 blockNumber) internal view returns (bytes32) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + if ((_getBlockNumber() - blockNumber) > 256 || blockNumber >= _getBlockNumber()) { + return ""; + } + return ARBSYS.arbBlockHash(blockNumber); + } + return blockhash(blockNumber); + } + + /** + * @notice Returns the block number of the current block. + * @notice When on a known Arbitrum chain, it uses ArbSys.arbBlockNumber to get the block number. + * @notice Otherwise, it uses the block.number opcode. + * @notice Note that the block.number opcode will return the L2 block number on Optimism. + */ + function _getBlockNumber() internal view returns (uint256) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + return ARBSYS.arbBlockNumber(); + } + return block.number; + } + + /** + * @notice Returns the L1 fees that will be paid for the current transaction, given any calldata + * @notice for the current transaction. + * @notice When on a known Arbitrum chain, it uses ArbGas.getCurrentTxL1GasFees to get the fees. + * @notice On Arbitrum, the provided calldata is not used to calculate the fees. + * @notice On Optimism, the provided calldata is passed to the OVM_GasPriceOracle predeploy + * @notice and getL1Fee is called to get the fees. + */ + function _getCurrentTxL1GasFees(bytes memory txCallData) internal view returns (uint256) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + return ARBGAS.getCurrentTxL1GasFees(); + } else if (_isOptimismChainId(chainid)) { + return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(txCallData, L1_FEE_DATA_PADDING)); + } + return 0; + } + + /** + * @notice Returns the gas cost in wei of calldataSizeBytes of calldata being posted + * @notice to L1. + */ + function _getL1CalldataGasCost(uint256 calldataSizeBytes) internal view returns (uint256) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + (, uint256 l1PricePerByte, , , , ) = ARBGAS.getPricesInWei(); + // see https://developer.arbitrum.io/devs-how-tos/how-to-estimate-gas#where-do-we-get-all-this-information-from + // for the justification behind the 140 number. + return l1PricePerByte * (calldataSizeBytes + 140); + } else if (_isOptimismChainId(chainid)) { + return _calculateOptimismL1DataFee(calldataSizeBytes); + } + return 0; + } + + /** + * @notice Return true if and only if the provided chain ID is an Arbitrum chain ID. + */ + function _isArbitrumChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == ARB_MAINNET_CHAIN_ID || + chainId == ARB_GOERLI_TESTNET_CHAIN_ID || + chainId == ARB_SEPOLIA_TESTNET_CHAIN_ID; + } + + /** + * @notice Return true if and only if the provided chain ID is an Optimism chain ID. + * @notice Note that optimism chain id's are also OP stack chain id's. + */ + function _isOptimismChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == OP_MAINNET_CHAIN_ID || + chainId == OP_GOERLI_CHAIN_ID || + chainId == OP_SEPOLIA_CHAIN_ID || + chainId == BASE_MAINNET_CHAIN_ID || + chainId == BASE_GOERLI_CHAIN_ID; + } + + function _calculateOptimismL1DataFee(uint256 calldataSizeBytes) internal view returns (uint256) { + // from: https://community.optimism.io/docs/developers/build/transaction-fees/#the-l1-data-fee + // l1_data_fee = l1_gas_price * (tx_data_gas + fixed_overhead) * dynamic_overhead + // tx_data_gas = count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16 + // note we conservatively assume all non-zero bytes. + uint256 l1BaseFeeWei = OVM_GASPRICEORACLE.l1BaseFee(); + uint256 numZeroBytes = 0; + uint256 numNonzeroBytes = calldataSizeBytes - numZeroBytes; + uint256 txDataGas = numZeroBytes * 4 + numNonzeroBytes * 16; + uint256 fixedOverhead = OVM_GASPRICEORACLE.overhead(); + + // The scalar is some value like 0.684, but is represented as + // that times 10 ^ number of scalar decimals. + // e.g scalar = 0.684 * 10^6 + // The divisor is used to divide that and have a net result of the true scalar. + uint256 scalar = OVM_GASPRICEORACLE.scalar(); + uint256 scalarDecimals = OVM_GASPRICEORACLE.decimals(); + uint256 divisor = 10 ** scalarDecimals; + + uint256 l1DataFee = (l1BaseFeeWei * (txDataGas + fixedOverhead) * scalar) / divisor; + return l1DataFee; + } +} diff --git a/contracts/src/v0.8/Denominations.sol b/contracts/src/v0.8/Denominations.sol new file mode 100644 index 00000000..6e9aa778 --- /dev/null +++ b/contracts/src/v0.8/Denominations.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +library Denominations { + address public constant ETH = 0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE; + address public constant BTC = 0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB; + + // Fiat currencies follow https://en.wikipedia.org/wiki/ISO_4217 + address public constant USD = address(840); + address public constant GBP = address(826); + address public constant EUR = address(978); + address public constant JPY = address(392); + address public constant KRW = address(410); + address public constant CNY = address(156); + address public constant AUD = address(36); + address public constant CAD = address(124); + address public constant CHF = address(756); + address public constant ARS = address(32); + address public constant PHP = address(608); + address public constant NZD = address(554); + address public constant SGD = address(702); + address public constant NGN = address(566); + address public constant ZAR = address(710); + address public constant RUB = address(643); + address public constant INR = address(356); + address public constant BRL = address(986); +} diff --git a/contracts/src/v0.8/Flags.sol b/contracts/src/v0.8/Flags.sol new file mode 100644 index 00000000..7cd5a54b --- /dev/null +++ b/contracts/src/v0.8/Flags.sol @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {SimpleReadAccessController} from "./shared/access/SimpleReadAccessController.sol"; +import {AccessControllerInterface} from "./shared/interfaces/AccessControllerInterface.sol"; +import {FlagsInterface} from "./interfaces/FlagsInterface.sol"; + +/** + * @title The Flags contract + * @notice Allows flags to signal to any reader on the access control list. + * The owner can set flags, or designate other addresses to set flags. The + * owner must turn the flags off, other setters cannot. An expected pattern is + * to allow addresses to raise flags on themselves, so if you are subscribing to + * FlagOn events you should filter for addresses you care about. + */ +// solhint-disable custom-errors +contract Flags is FlagsInterface, SimpleReadAccessController { + AccessControllerInterface public raisingAccessController; + + mapping(address => bool) private s_flags; + + event FlagRaised(address indexed subject); + event FlagLowered(address indexed subject); + event RaisingAccessControllerUpdated(address indexed previous, address indexed current); + + /** + * @param racAddress address for the raising access controller. + */ + constructor(address racAddress) { + setRaisingAccessController(racAddress); + } + + /** + * @notice read the warning flag status of a contract address. + * @param subject The contract address being checked for a flag. + * @return A true value indicates that a flag was raised and a + * false value indicates that no flag was raised. + */ + function getFlag(address subject) external view override checkAccess returns (bool) { + return s_flags[subject]; + } + + /** + * @notice read the warning flag status of a contract address. + * @param subjects An array of addresses being checked for a flag. + * @return An array of bools where a true value for any flag indicates that + * a flag was raised and a false value indicates that no flag was raised. + */ + function getFlags(address[] calldata subjects) external view override checkAccess returns (bool[] memory) { + bool[] memory responses = new bool[](subjects.length); + for (uint256 i = 0; i < subjects.length; i++) { + responses[i] = s_flags[subjects[i]]; + } + return responses; + } + + /** + * @notice enable the warning flag for an address. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subject The contract address whose flag is being raised + */ + function raiseFlag(address subject) external override { + require(_allowedToRaiseFlags(), "Not allowed to raise flags"); + + _tryToRaiseFlag(subject); + } + + /** + * @notice enable the warning flags for multiple addresses. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subjects List of the contract addresses whose flag is being raised + */ + function raiseFlags(address[] calldata subjects) external override { + require(_allowedToRaiseFlags(), "Not allowed to raise flags"); + + for (uint256 i = 0; i < subjects.length; i++) { + _tryToRaiseFlag(subjects[i]); + } + } + + /** + * @notice allows owner to disable the warning flags for multiple addresses. + * @param subjects List of the contract addresses whose flag is being lowered + */ + function lowerFlags(address[] calldata subjects) external override onlyOwner { + for (uint256 i = 0; i < subjects.length; i++) { + address subject = subjects[i]; + + if (s_flags[subject]) { + s_flags[subject] = false; + emit FlagLowered(subject); + } + } + } + + /** + * @notice allows owner to change the access controller for raising flags. + * @param racAddress new address for the raising access controller. + */ + function setRaisingAccessController(address racAddress) public override onlyOwner { + address previous = address(raisingAccessController); + + if (previous != racAddress) { + raisingAccessController = AccessControllerInterface(racAddress); + + emit RaisingAccessControllerUpdated(previous, racAddress); + } + } + + // PRIVATE + + function _allowedToRaiseFlags() private view returns (bool) { + return msg.sender == owner() || raisingAccessController.hasAccess(msg.sender, msg.data); + } + + function _tryToRaiseFlag(address subject) private { + if (!s_flags[subject]) { + s_flags[subject] = true; + emit FlagRaised(subject); + } + } +} diff --git a/contracts/src/v0.8/PermissionedForwardProxy.sol b/contracts/src/v0.8/PermissionedForwardProxy.sol new file mode 100644 index 00000000..544f8906 --- /dev/null +++ b/contracts/src/v0.8/PermissionedForwardProxy.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {Address} from "@openzeppelin/contracts/utils/Address.sol"; +import {ConfirmedOwner} from "./shared/access/ConfirmedOwner.sol"; + +/** + * @title PermissionedForwardProxy + * @notice This proxy is used to forward calls from sender to target. It maintains + * a permission list to check which sender is allowed to call which target + */ +contract PermissionedForwardProxy is ConfirmedOwner { + using Address for address; + + error PermissionNotSet(); + + event PermissionSet(address indexed sender, address target); + event PermissionRemoved(address indexed sender); + + mapping(address => address) private s_forwardPermissionList; + + constructor() ConfirmedOwner(msg.sender) {} + + /** + * @notice Verifies if msg.sender has permission to forward to target address and then forwards the handler + * @param target address of the contract to forward the handler to + * @param handler bytes to be passed to target in call data + */ + function forward(address target, bytes calldata handler) external { + if (s_forwardPermissionList[msg.sender] != target) { + revert PermissionNotSet(); + } + target.functionCall(handler); + } + + /** + * @notice Adds permission for sender to forward calls to target via this proxy. + * Note that it allows to overwrite an existing permission + * @param sender The address who will use this proxy to forward calls + * @param target The address where sender will be allowed to forward calls + */ + function setPermission(address sender, address target) external onlyOwner { + s_forwardPermissionList[sender] = target; + + emit PermissionSet(sender, target); + } + + /** + * @notice Removes permission for sender to forward calls via this proxy + * @param sender The address who will use this proxy to forward calls + */ + function removePermission(address sender) external onlyOwner { + delete s_forwardPermissionList[sender]; + + emit PermissionRemoved(sender); + } + + /** + * @notice Returns the target address that the sender can use this proxy for + * @param sender The address to fetch the permissioned target for + */ + function getPermission(address sender) external view returns (address) { + return s_forwardPermissionList[sender]; + } +} diff --git a/contracts/src/v0.8/Plugin.sol b/contracts/src/v0.8/Plugin.sol new file mode 100644 index 00000000..e0122af6 --- /dev/null +++ b/contracts/src/v0.8/Plugin.sol @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {CBORPlugin} from "./vendor/CBORPlugin.sol"; +import {BufferPlugin} from "./vendor/BufferPlugin.sol"; + +/** + * @title Library for common Plugin functions + * @dev Uses imported CBOR library for encoding to buffer + */ +library Plugin { + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint256 internal constant defaultBufferSize = 256; + + using CBORPlugin for BufferPlugin.buffer; + + struct Request { + bytes32 id; + address callbackAddress; + bytes4 callbackFunctionId; + uint256 nonce; + BufferPlugin.buffer buf; + } + + /** + * @notice Initializes a Plugin request + * @dev Sets the ID, callback address, and callback function signature on the request + * @param self The uninitialized request + * @param jobId The Job Specification ID + * @param callbackAddr The callback address + * @param callbackFunc The callback function signature + * @return The initialized request + */ + function _initialize( + Request memory self, + bytes32 jobId, + address callbackAddr, + bytes4 callbackFunc + ) internal pure returns (Plugin.Request memory) { + BufferPlugin.init(self.buf, defaultBufferSize); + self.id = jobId; + self.callbackAddress = callbackAddr; + self.callbackFunctionId = callbackFunc; + return self; + } + + /** + * @notice Sets the data for the buffer without encoding CBOR on-chain + * @dev CBOR can be closed with curly-brackets {} or they can be left off + * @param self The initialized request + * @param data The CBOR data + */ + function _setBuffer(Request memory self, bytes memory data) internal pure { + BufferPlugin.init(self.buf, data.length); + BufferPlugin.append(self.buf, data); + } + + /** + * @notice Adds a string value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The string value to add + */ + function _add(Request memory self, string memory key, string memory value) internal pure { + self.buf.encodeString(key); + self.buf.encodeString(value); + } + + /** + * @notice Adds a bytes value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The bytes value to add + */ + function _addBytes(Request memory self, string memory key, bytes memory value) internal pure { + self.buf.encodeString(key); + self.buf.encodeBytes(value); + } + + /** + * @notice Adds a int256 value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The int256 value to add + */ + function _addInt(Request memory self, string memory key, int256 value) internal pure { + self.buf.encodeString(key); + self.buf.encodeInt(value); + } + + /** + * @notice Adds a uint256 value to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param value The uint256 value to add + */ + function _addUint(Request memory self, string memory key, uint256 value) internal pure { + self.buf.encodeString(key); + self.buf.encodeUInt(value); + } + + /** + * @notice Adds an array of strings to the request with a given key name + * @param self The initialized request + * @param key The name of the key + * @param values The array of string values to add + */ + function _addStringArray(Request memory self, string memory key, string[] memory values) internal pure { + self.buf.encodeString(key); + self.buf.startArray(); + for (uint256 i = 0; i < values.length; i++) { + self.buf.encodeString(values[i]); + } + self.buf.endSequence(); + } +} diff --git a/contracts/src/v0.8/PluginClient.sol b/contracts/src/v0.8/PluginClient.sol new file mode 100644 index 00000000..c6111a90 --- /dev/null +++ b/contracts/src/v0.8/PluginClient.sol @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {Plugin} from "./Plugin.sol"; +import {ENSInterface} from "./interfaces/ENSInterface.sol"; +import {LinkTokenInterface} from "./shared/interfaces/LinkTokenInterface.sol"; +import {PluginRequestInterface} from "./interfaces/PluginRequestInterface.sol"; +import {OperatorInterface} from "./interfaces/OperatorInterface.sol"; +import {PointerInterface} from "./interfaces/PointerInterface.sol"; +import {ENSResolver as ENSResolver_Plugin} from "./vendor/ENSResolver.sol"; + +/** + * @title The PluginClient contract + * @notice Contract writers can inherit this contract in order to create requests for the + * Plugin network + */ +// solhint-disable custom-errors +abstract contract PluginClient { + using Plugin for Plugin.Request; + + uint256 internal constant PLI_DIVISIBILITY = 10 ** 18; + uint256 private constant AMOUNT_OVERRIDE = 0; + address private constant SENDER_OVERRIDE = address(0); + uint256 private constant ORACLE_ARGS_VERSION = 1; + uint256 private constant OPERATOR_ARGS_VERSION = 2; + bytes32 private constant ENS_TOKEN_SUBNAME = keccak256("link"); + bytes32 private constant ENS_ORACLE_SUBNAME = keccak256("oracle"); + address private constant PLI_TOKEN_POINTER = 0xC89bD4E1632D3A43CB03AAAd5262cbe4038Bc571; + + ENSInterface private s_ens; + bytes32 private s_ensNode; + LinkTokenInterface private s_link; + OperatorInterface private s_oracle; + uint256 private s_requestCount = 1; + mapping(bytes32 => address) private s_pendingRequests; + + event PluginRequested(bytes32 indexed id); + event PluginFulfilled(bytes32 indexed id); + event PluginCancelled(bytes32 indexed id); + + /** + * @notice Creates a request that can hold additional parameters + * @param specId The Job Specification ID that the request will be created for + * @param callbackAddr address to operate the callback on + * @param callbackFunctionSignature function signature to use for the callback + * @return A Plugin Request struct in memory + */ + function _buildPluginRequest( + bytes32 specId, + address callbackAddr, + bytes4 callbackFunctionSignature + ) internal pure returns (Plugin.Request memory) { + Plugin.Request memory req; + return req._initialize(specId, callbackAddr, callbackFunctionSignature); + } + + /** + * @notice Creates a request that can hold additional parameters + * @param specId The Job Specification ID that the request will be created for + * @param callbackFunctionSignature function signature to use for the callback + * @return A Plugin Request struct in memory + */ + function _buildOperatorRequest( + bytes32 specId, + bytes4 callbackFunctionSignature + ) internal view returns (Plugin.Request memory) { + Plugin.Request memory req; + return req._initialize(specId, address(this), callbackFunctionSignature); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev Calls `pluginRequestTo` with the stored oracle address + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function _sendPluginRequest(Plugin.Request memory req, uint256 payment) internal returns (bytes32) { + return _sendPluginRequestTo(address(s_oracle), req, payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param oracleAddress The address of the oracle for the request + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function _sendPluginRequestTo( + address oracleAddress, + Plugin.Request memory req, + uint256 payment + ) internal returns (bytes32 requestId) { + uint256 nonce = s_requestCount; + s_requestCount = nonce + 1; + bytes memory encodedRequest = abi.encodeWithSelector( + PluginRequestInterface.oracleRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + req.id, + address(this), + req.callbackFunctionId, + nonce, + ORACLE_ARGS_VERSION, + req.buf.buf + ); + return _rawRequest(oracleAddress, nonce, payment, encodedRequest); + } + + /** + * @notice Creates a Plugin request to the stored oracle address + * @dev This function supports multi-word response + * @dev Calls `sendOperatorRequestTo` with the stored oracle address + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function _sendOperatorRequest(Plugin.Request memory req, uint256 payment) internal returns (bytes32) { + return _sendOperatorRequestTo(address(s_oracle), req, payment); + } + + /** + * @notice Creates a Plugin request to the specified oracle address + * @dev This function supports multi-word response + * @dev Generates and stores a request ID, increments the local nonce, and uses `transferAndCall` to + * send PLI which creates a request on the target oracle contract. + * Emits PluginRequested event. + * @param oracleAddress The address of the oracle for the request + * @param req The initialized Plugin Request + * @param payment The amount of PLI to send for the request + * @return requestId The request ID + */ + function _sendOperatorRequestTo( + address oracleAddress, + Plugin.Request memory req, + uint256 payment + ) internal returns (bytes32 requestId) { + uint256 nonce = s_requestCount; + s_requestCount = nonce + 1; + bytes memory encodedRequest = abi.encodeWithSelector( + OperatorInterface.operatorRequest.selector, + SENDER_OVERRIDE, // Sender value - overridden by onTokenTransfer by the requesting contract's address + AMOUNT_OVERRIDE, // Amount value - overridden by onTokenTransfer by the actual amount of PLI sent + req.id, + req.callbackFunctionId, + nonce, + OPERATOR_ARGS_VERSION, + req.buf.buf + ); + return _rawRequest(oracleAddress, nonce, payment, encodedRequest); + } + + /** + * @notice Make a request to an oracle + * @param oracleAddress The address of the oracle for the request + * @param nonce used to generate the request ID + * @param payment The amount of PLI to send for the request + * @param encodedRequest data encoded for request type specific format + * @return requestId The request ID + */ + function _rawRequest( + address oracleAddress, + uint256 nonce, + uint256 payment, + bytes memory encodedRequest + ) private returns (bytes32 requestId) { + requestId = keccak256(abi.encodePacked(this, nonce)); + s_pendingRequests[requestId] = oracleAddress; + emit PluginRequested(requestId); + require(s_link.transferAndCall(oracleAddress, payment, encodedRequest), "unable to transferAndCall to oracle"); + return requestId; + } + + /** + * @notice Allows a request to be cancelled if it has not been fulfilled + * @dev Requires keeping track of the expiration value emitted from the oracle contract. + * Deletes the request from the `pendingRequests` mapping. + * Emits PluginCancelled event. + * @param requestId The request ID + * @param payment The amount of PLI sent for the request + * @param callbackFunc The callback function specified for the request + * @param expiration The time of the expiration for the request + */ + function _cancelPluginRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) internal { + OperatorInterface requested = OperatorInterface(s_pendingRequests[requestId]); + delete s_pendingRequests[requestId]; + emit PluginCancelled(requestId); + requested.cancelOracleRequest(requestId, payment, callbackFunc, expiration); + } + + /** + * @notice the next request count to be used in generating a nonce + * @dev starts at 1 in order to ensure consistent gas cost + * @return returns the next request count to be used in a nonce + */ + function _getNextRequestCount() internal view returns (uint256) { + return s_requestCount; + } + + /** + * @notice Sets the stored oracle address + * @param oracleAddress The address of the oracle contract + */ + function _setPluginOracle(address oracleAddress) internal { + s_oracle = OperatorInterface(oracleAddress); + } + + /** + * @notice Sets the PLI token address + * @param linkAddress The address of the PLI token contract + */ + function _setPluginToken(address linkAddress) internal { + s_link = LinkTokenInterface(linkAddress); + } + + /** + * @notice Sets the Plugin token address for the public + * network as given by the Pointer contract + */ + function _setPublicPluginToken() internal { + _setPluginToken(PointerInterface(PLI_TOKEN_POINTER).getAddress()); + } + + /** + * @notice Retrieves the stored address of the PLI token + * @return The address of the PLI token + */ + function _pluginTokenAddress() internal view returns (address) { + return address(s_link); + } + + /** + * @notice Retrieves the stored address of the oracle contract + * @return The address of the oracle contract + */ + function _pluginOracleAddress() internal view returns (address) { + return address(s_oracle); + } + + /** + * @notice Allows for a request which was created on another contract to be fulfilled + * on this contract + * @param oracleAddress The address of the oracle contract that will fulfill the request + * @param requestId The request ID used for the response + */ + function _addPluginExternalRequest( + address oracleAddress, + bytes32 requestId + ) internal notPendingRequest(requestId) { + s_pendingRequests[requestId] = oracleAddress; + } + + /** + * @notice Sets the stored oracle and PLI token contracts with the addresses resolved by ENS + * @dev Accounts for subnodes having different resolvers + * @param ensAddress The address of the ENS contract + * @param node The ENS node hash + */ + function _usePluginWithENS(address ensAddress, bytes32 node) internal { + s_ens = ENSInterface(ensAddress); + s_ensNode = node; + bytes32 linkSubnode = keccak256(abi.encodePacked(s_ensNode, ENS_TOKEN_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(s_ens.resolver(linkSubnode)); + _setPluginToken(resolver.addr(linkSubnode)); + _updatePluginOracleWithENS(); + } + + /** + * @notice Sets the stored oracle contract with the address resolved by ENS + * @dev This may be called on its own as long as `usePluginWithENS` has been called previously + */ + function _updatePluginOracleWithENS() internal { + bytes32 oracleSubnode = keccak256(abi.encodePacked(s_ensNode, ENS_ORACLE_SUBNAME)); + ENSResolver_Plugin resolver = ENSResolver_Plugin(s_ens.resolver(oracleSubnode)); + _setPluginOracle(resolver.addr(oracleSubnode)); + } + + /** + * @notice Ensures that the fulfillment is valid for this contract + * @dev Use if the contract developer prefers methods instead of modifiers for validation + * @param requestId The request ID for fulfillment + */ + function _validatePluginCallback( + bytes32 requestId + ) + internal + recordPluginFulfillment(requestId) // solhint-disable-next-line no-empty-blocks + {} + + /** + * @dev Reverts if the sender is not the oracle of the request. + * Emits PluginFulfilled event. + * @param requestId The request ID for fulfillment + */ + modifier recordPluginFulfillment(bytes32 requestId) { + require(msg.sender == s_pendingRequests[requestId], "Source must be the oracle of the request"); + delete s_pendingRequests[requestId]; + emit PluginFulfilled(requestId); + _; + } + + /** + * @dev Reverts if the request is already pending + * @param requestId The request ID for fulfillment + */ + modifier notPendingRequest(bytes32 requestId) { + require(s_pendingRequests[requestId] == address(0), "Request is already pending"); + _; + } +} diff --git a/contracts/src/v0.8/ValidatorProxy.sol b/contracts/src/v0.8/ValidatorProxy.sol new file mode 100644 index 00000000..4584bb02 --- /dev/null +++ b/contracts/src/v0.8/ValidatorProxy.sol @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "./shared/access/ConfirmedOwner.sol"; +import {AggregatorValidatorInterface} from "./shared/interfaces/AggregatorValidatorInterface.sol"; +import {TypeAndVersionInterface} from "./interfaces/TypeAndVersionInterface.sol"; + +// solhint-disable custom-errors +contract ValidatorProxy is AggregatorValidatorInterface, TypeAndVersionInterface, ConfirmedOwner { + /// @notice Uses a single storage slot to store the current address + struct AggregatorConfiguration { + address target; + bool hasNewProposal; + } + + struct ValidatorConfiguration { + AggregatorValidatorInterface target; + bool hasNewProposal; + } + + // Configuration for the current aggregator + AggregatorConfiguration private s_currentAggregator; + // Proposed aggregator address + address private s_proposedAggregator; + + // Configuration for the current validator + ValidatorConfiguration private s_currentValidator; + // Proposed validator address + AggregatorValidatorInterface private s_proposedValidator; + + event AggregatorProposed(address indexed aggregator); + event AggregatorUpgraded(address indexed previous, address indexed current); + event ValidatorProposed(AggregatorValidatorInterface indexed validator); + event ValidatorUpgraded(AggregatorValidatorInterface indexed previous, AggregatorValidatorInterface indexed current); + /// @notice The proposed aggregator called validate, but the call was not passed on to any validators + event ProposedAggregatorValidateCall( + address indexed proposed, + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ); + + /** + * @notice Construct the ValidatorProxy with an aggregator and a validator + * @param aggregator address + * @param validator address + */ + constructor(address aggregator, AggregatorValidatorInterface validator) ConfirmedOwner(msg.sender) { + s_currentAggregator = AggregatorConfiguration({target: aggregator, hasNewProposal: false}); + s_currentValidator = ValidatorConfiguration({target: validator, hasNewProposal: false}); + } + + /** + * @notice Validate a transmission + * @dev Must be called by either the `s_currentAggregator.target`, or the `s_proposedAggregator`. + * If called by the `s_currentAggregator.target` this function passes the call on to the `s_currentValidator.target` + * and the `s_proposedValidator`, if it is set. + * If called by the `s_proposedAggregator` this function emits a `ProposedAggregatorValidateCall` to signal that + * the call was received. + * @dev To guard against external `validate` calls reverting, we use raw calls here. + * We favour `call` over try-catch to ensure that failures are avoided even if the validator address is incorrectly + * set as a non-contract address. + * @dev If the `aggregator` and `validator` are the same contract or collude, this could exhibit reentrancy behavior. + * However, since that contract would have to be explicitly written for reentrancy and that the `owner` would have + * to configure this contract to use that malicious contract, we refrain from using mutex or check here. + * @dev This does not perform any checks on any roundId, so it is possible that a validator receive different reports + * for the same roundId at different points in time. Validator implementations should be aware of this. + * @param previousRoundId uint256 + * @param previousAnswer int256 + * @param currentRoundId uint256 + * @param currentAnswer int256 + * @return bool + */ + function validate( + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ) external override returns (bool) { + address currentAggregator = s_currentAggregator.target; + if (msg.sender != currentAggregator) { + address proposedAggregator = s_proposedAggregator; + require(msg.sender == proposedAggregator, "Not a configured aggregator"); + // If the aggregator is still in proposed state, emit an event and don't push to any validator. + // This is to confirm that `validate` is being called prior to upgrade. + emit ProposedAggregatorValidateCall( + proposedAggregator, + previousRoundId, + previousAnswer, + currentRoundId, + currentAnswer + ); + return true; + } + + // Send the validate call to the current validator + ValidatorConfiguration memory currentValidator = s_currentValidator; + address currentValidatorAddress = address(currentValidator.target); + require(currentValidatorAddress != address(0), "No validator set"); + // solhint-disable-next-line avoid-low-level-calls + currentValidatorAddress.call( + abi.encodeWithSelector( + AggregatorValidatorInterface.validate.selector, + previousRoundId, + previousAnswer, + currentRoundId, + currentAnswer + ) + ); + // If there is a new proposed validator, send the validate call to that validator also + if (currentValidator.hasNewProposal) { + // solhint-disable-next-line avoid-low-level-calls + address(s_proposedValidator).call( + abi.encodeWithSelector( + AggregatorValidatorInterface.validate.selector, + previousRoundId, + previousAnswer, + currentRoundId, + currentAnswer + ) + ); + } + return true; + } + + /** AGGREGATOR CONFIGURATION FUNCTIONS **/ + + /** + * @notice Propose an aggregator + * @dev A zero address can be used to unset the proposed aggregator. Only owner can call. + * @param proposed address + */ + function proposeNewAggregator(address proposed) external onlyOwner { + require(s_proposedAggregator != proposed && s_currentAggregator.target != proposed, "Invalid proposal"); + s_proposedAggregator = proposed; + // If proposed is zero address, hasNewProposal = false + s_currentAggregator.hasNewProposal = (proposed != address(0)); + emit AggregatorProposed(proposed); + } + + /** + * @notice Upgrade the aggregator by setting the current aggregator as the proposed aggregator. + * @dev Must have a proposed aggregator. Only owner can call. + */ + function upgradeAggregator() external onlyOwner { + // Get configuration in memory + AggregatorConfiguration memory current = s_currentAggregator; + address previous = current.target; + address proposed = s_proposedAggregator; + + // Perform the upgrade + require(current.hasNewProposal, "No proposal"); + s_currentAggregator = AggregatorConfiguration({target: proposed, hasNewProposal: false}); + delete s_proposedAggregator; + + emit AggregatorUpgraded(previous, proposed); + } + + /** + * @notice Get aggregator details + * @return current address + * @return hasProposal bool + * @return proposed address + */ + function getAggregators() external view returns (address current, bool hasProposal, address proposed) { + current = s_currentAggregator.target; + hasProposal = s_currentAggregator.hasNewProposal; + proposed = s_proposedAggregator; + return (current, hasProposal, proposed); + } + + /** VALIDATOR CONFIGURATION FUNCTIONS **/ + + /** + * @notice Propose an validator + * @dev A zero address can be used to unset the proposed validator. Only owner can call. + * @param proposed address + */ + function proposeNewValidator(AggregatorValidatorInterface proposed) external onlyOwner { + require(s_proposedValidator != proposed && s_currentValidator.target != proposed, "Invalid proposal"); + s_proposedValidator = proposed; + // If proposed is zero address, hasNewProposal = false + s_currentValidator.hasNewProposal = (address(proposed) != address(0)); + emit ValidatorProposed(proposed); + } + + /** + * @notice Upgrade the validator by setting the current validator as the proposed validator. + * @dev Must have a proposed validator. Only owner can call. + */ + function upgradeValidator() external onlyOwner { + // Get configuration in memory + ValidatorConfiguration memory current = s_currentValidator; + AggregatorValidatorInterface previous = current.target; + AggregatorValidatorInterface proposed = s_proposedValidator; + + // Perform the upgrade + require(current.hasNewProposal, "No proposal"); + s_currentValidator = ValidatorConfiguration({target: proposed, hasNewProposal: false}); + delete s_proposedValidator; + + emit ValidatorUpgraded(previous, proposed); + } + + /** + * @notice Get validator details + * @return current address + * @return hasProposal bool + * @return proposed address + */ + function getValidators() + external + view + returns (AggregatorValidatorInterface current, bool hasProposal, AggregatorValidatorInterface proposed) + { + current = s_currentValidator.target; + hasProposal = s_currentValidator.hasNewProposal; + proposed = s_proposedValidator; + return (current, hasProposal, proposed); + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "ValidatorProxy 1.0.0"; + } +} diff --git a/contracts/src/v0.8/automation/AutomationBase.sol b/contracts/src/v0.8/automation/AutomationBase.sol new file mode 100644 index 00000000..8267fbc6 --- /dev/null +++ b/contracts/src/v0.8/automation/AutomationBase.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract AutomationBase { + error OnlySimulatedBackend(); + + /** + * @notice method that allows it to be simulated via eth_call by checking that + * the sender is the zero address. + */ + function _preventExecution() internal view { + // solhint-disable-next-line avoid-tx-origin + if (tx.origin != address(0)) { + revert OnlySimulatedBackend(); + } + } + + /** + * @notice modifier that allows it to be simulated via eth_call by checking + * that the sender is the zero address. + */ + modifier cannotExecute() { + _preventExecution(); + _; + } +} diff --git a/contracts/src/v0.8/automation/AutomationCompatible.sol b/contracts/src/v0.8/automation/AutomationCompatible.sol new file mode 100644 index 00000000..65332436 --- /dev/null +++ b/contracts/src/v0.8/automation/AutomationCompatible.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AutomationBase} from "./AutomationBase.sol"; +import {AutomationCompatibleInterface} from "./interfaces/AutomationCompatibleInterface.sol"; + +abstract contract AutomationCompatible is AutomationBase, AutomationCompatibleInterface {} diff --git a/contracts/src/v0.8/automation/AutomationForwarder.sol b/contracts/src/v0.8/automation/AutomationForwarder.sol new file mode 100644 index 00000000..58707e96 --- /dev/null +++ b/contracts/src/v0.8/automation/AutomationForwarder.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.16; + +import {IAutomationRegistryConsumer} from "./interfaces/IAutomationRegistryConsumer.sol"; + +uint256 constant PERFORM_GAS_CUSHION = 5_000; + +/** + * @title AutomationForwarder is a relayer that sits between the registry and the customer's target contract + * @dev The purpose of the forwarder is to give customers a consistent address to authorize against, + * which stays consistent between migrations. The Forwarder also exposes the registry address, so that users who + * want to programatically interact with the registry (ie top up funds) can do so. + */ +contract AutomationForwarder { + /// @notice the user's target contract address + address private immutable i_target; + + /// @notice the shared logic address + address private immutable i_logic; + + IAutomationRegistryConsumer private s_registry; + + constructor(address target, address registry, address logic) { + s_registry = IAutomationRegistryConsumer(registry); + i_target = target; + i_logic = logic; + } + + /** + * @notice forward is called by the registry and forwards the call to the target + * @param gasAmount is the amount of gas to use in the call + * @param data is the 4 bytes function selector + arbitrary function data + * @return success indicating whether the target call succeeded or failed + */ + function forward(uint256 gasAmount, bytes memory data) external returns (bool success, uint256 gasUsed) { + if (msg.sender != address(s_registry)) revert(); + address target = i_target; + gasUsed = gasleft(); + assembly { + let g := gas() + // Compute g -= PERFORM_GAS_CUSHION and check for underflow + if lt(g, PERFORM_GAS_CUSHION) { + revert(0, 0) + } + g := sub(g, PERFORM_GAS_CUSHION) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call with exact gas + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + gasUsed = gasUsed - gasleft(); + return (success, gasUsed); + } + + function getTarget() external view returns (address) { + return i_target; + } + + fallback() external { + // copy to memory for assembly access + address logic = i_logic; + // copied directly from OZ's Proxy contract + assembly { + // Copy msg.data. We take full control of memory in this inline assembly + // block because it will not return to Solidity code. We overwrite the + // Solidity scratch pad at memory position 0. + calldatacopy(0, 0, calldatasize()) + + // out and outsize are 0 because we don't know the size yet. + let result := delegatecall(gas(), logic, 0, calldatasize(), 0, 0) + + // Copy the returned data. + returndatacopy(0, 0, returndatasize()) + + switch result + // delegatecall returns 0 on error. + case 0 { + revert(0, returndatasize()) + } + default { + return(0, returndatasize()) + } + } + } +} diff --git a/contracts/src/v0.8/automation/AutomationForwarderLogic.sol b/contracts/src/v0.8/automation/AutomationForwarderLogic.sol new file mode 100644 index 00000000..d8236f3d --- /dev/null +++ b/contracts/src/v0.8/automation/AutomationForwarderLogic.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.16; + +import {IAutomationRegistryConsumer} from "./interfaces/IAutomationRegistryConsumer.sol"; +import {ITypeAndVersion} from "../shared/interfaces/ITypeAndVersion.sol"; + +contract AutomationForwarderLogic is ITypeAndVersion { + IAutomationRegistryConsumer private s_registry; + + string public constant typeAndVersion = "AutomationForwarder 1.0.0"; + + /** + * @notice updateRegistry is called by the registry during migrations + * @param newRegistry is the registry that this forwarder is being migrated to + */ + function updateRegistry(address newRegistry) external { + if (msg.sender != address(s_registry)) revert(); + s_registry = IAutomationRegistryConsumer(newRegistry); + } + + function getRegistry() external view returns (IAutomationRegistryConsumer) { + return s_registry; + } +} diff --git a/contracts/src/v0.8/automation/Chainable.sol b/contracts/src/v0.8/automation/Chainable.sol new file mode 100644 index 00000000..9ebc8c34 --- /dev/null +++ b/contracts/src/v0.8/automation/Chainable.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.16; + +/** + * @title Chainable - the contract size limit nullifier + * @notice Chainable is designed to link together a "chain" of contracts through fallback functions + * and delegatecalls. All code is executed in the context of the head of the chain, the "master" contract. + */ +contract Chainable { + /** + * @dev addresses of the next contract in the chain **have to be immutable/constant** or the system won't work + */ + address private immutable i_FALLBACK_ADDRESS; + + /** + * @param fallbackAddress the address of the next contract in the chain + */ + constructor(address fallbackAddress) { + i_FALLBACK_ADDRESS = fallbackAddress; + } + + /** + * @notice returns the address of the next contract in the chain + */ + function fallbackTo() external view returns (address) { + return i_FALLBACK_ADDRESS; + } + + /** + * @notice the fallback function routes the call to the next contract in the chain + * @dev most of the implementation is copied directly from OZ's Proxy contract + */ + // solhint-disable payable-fallback + // solhint-disable-next-line no-complex-fallback + fallback() external { + // copy to memory for assembly access + address next = i_FALLBACK_ADDRESS; + // copied directly from OZ's Proxy contract + assembly { + // Copy msg.data. We take full control of memory in this inline assembly + // block because it will not return to Solidity code. We overwrite the + // Solidity scratch pad at memory position 0. + calldatacopy(0, 0, calldatasize()) + + // Call the next contract. + // out and outsize are 0 because we don't know the size yet. + let result := delegatecall(gas(), next, 0, calldatasize(), 0, 0) + + // Copy the returned data. + returndatacopy(0, 0, returndatasize()) + + switch result + // delegatecall returns 0 on error. + case 0 { + revert(0, returndatasize()) + } + default { + return(0, returndatasize()) + } + } + } +} diff --git a/contracts/src/v0.8/automation/ExecutionPrevention.sol b/contracts/src/v0.8/automation/ExecutionPrevention.sol new file mode 100644 index 00000000..30a823c4 --- /dev/null +++ b/contracts/src/v0.8/automation/ExecutionPrevention.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +abstract contract ExecutionPrevention { + error OnlySimulatedBackend(); + + /** + * @notice method that allows it to be simulated via eth_call by checking that + * the sender is the zero address. + */ + function _preventExecution() internal view { + // solhint-disable-next-line avoid-tx-origin + if (tx.origin != address(0)) { + revert OnlySimulatedBackend(); + } + } + + /** + * @notice modifier that allows it to be simulated via eth_call by checking + * that the sender is the zero address. + */ + modifier cannotExecute() { + _preventExecution(); + _; + } +} diff --git a/contracts/src/v0.8/automation/HeartbeatRequester.sol b/contracts/src/v0.8/automation/HeartbeatRequester.sol new file mode 100644 index 00000000..e7fa7440 --- /dev/null +++ b/contracts/src/v0.8/automation/HeartbeatRequester.sol @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity 0.8.6; + +import {TypeAndVersionInterface} from "./../interfaces/TypeAndVersionInterface.sol"; +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; + +// defines some interfaces for type safety and reduces encoding/decoding +// does not use the full interfaces intentionally because the requester only uses a fraction of them +interface IAggregatorProxy { + function aggregator() external view returns (address); +} + +interface IOffchainAggregator { + function requestNewRound() external returns (uint80); +} + +/** + * @notice The heartbeat requester will maintain a mapping from allowed callers to corresponding proxies. When requested + * by eligible caller, it will call a proxy for an aggregator address and request a new round. The aggregator + * is gated by permissions and this requester address needs to be whitelisted. + */ +contract HeartbeatRequester is TypeAndVersionInterface, ConfirmedOwner { + event HeartbeatPermitted(address indexed permittedCaller, address newProxy, address oldProxy); + event HeartbeatRemoved(address indexed permittedCaller, address removedProxy); + + error HeartbeatNotPermitted(); + + mapping(address => IAggregatorProxy) internal s_heartbeatList; + + /** + * @notice versions: + * - HeartbeatRequester 1.0.0: The requester fetches the latest aggregator address from proxy, and request a new round + * using the aggregator address. + */ + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "HeartbeatRequester 1.0.0"; + + constructor() ConfirmedOwner(msg.sender) {} + + /** + * @notice adds a permitted caller and proxy combination. + * @param permittedCaller the permitted caller + * @param proxy the proxy corresponding to this caller + */ + function permitHeartbeat(address permittedCaller, IAggregatorProxy proxy) external onlyOwner { + address oldProxy = address(s_heartbeatList[permittedCaller]); + s_heartbeatList[permittedCaller] = proxy; + emit HeartbeatPermitted(permittedCaller, address(proxy), oldProxy); + } + + /** + * @notice removes a permitted caller and proxy combination. + * @param permittedCaller the permitted caller + */ + function removeHeartbeat(address permittedCaller) external onlyOwner { + address removedProxy = address(s_heartbeatList[permittedCaller]); + delete s_heartbeatList[permittedCaller]; + emit HeartbeatRemoved(permittedCaller, removedProxy); + } + + /** + * @notice fetches aggregator address from proxy and requests a new round. + * @param proxy the proxy address + */ + function getAggregatorAndRequestHeartbeat(address proxy) external { + IAggregatorProxy proxyInterface = s_heartbeatList[msg.sender]; + if (address(proxyInterface) != proxy) revert HeartbeatNotPermitted(); + + IOffchainAggregator aggregator = IOffchainAggregator(proxyInterface.aggregator()); + aggregator.requestNewRound(); + } +} diff --git a/contracts/src/v0.8/automation/KeeperBase.sol b/contracts/src/v0.8/automation/KeeperBase.sol new file mode 100644 index 00000000..0e050d4a --- /dev/null +++ b/contracts/src/v0.8/automation/KeeperBase.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +/** + * @notice This is a deprecated interface. Please use AutomationBase directly. + */ +pragma solidity ^0.8.0; +// solhint-disable-next-line no-unused-import +import {AutomationBase as KeeperBase} from "./AutomationBase.sol"; diff --git a/contracts/src/v0.8/automation/KeeperCompatible.sol b/contracts/src/v0.8/automation/KeeperCompatible.sol new file mode 100644 index 00000000..6379fe52 --- /dev/null +++ b/contracts/src/v0.8/automation/KeeperCompatible.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT +/** + * @notice This is a deprecated interface. Please use AutomationCompatible directly. + */ +pragma solidity ^0.8.0; +// solhint-disable-next-line no-unused-import +import {AutomationCompatible as KeeperCompatible} from "./AutomationCompatible.sol"; +// solhint-disable-next-line no-unused-import +import {AutomationBase as KeeperBase} from "./AutomationBase.sol"; +// solhint-disable-next-line no-unused-import +import {AutomationCompatibleInterface as KeeperCompatibleInterface} from "./interfaces/AutomationCompatibleInterface.sol"; diff --git a/contracts/src/v0.8/automation/UpkeepFormat.sol b/contracts/src/v0.8/automation/UpkeepFormat.sol new file mode 100644 index 00000000..e765327b --- /dev/null +++ b/contracts/src/v0.8/automation/UpkeepFormat.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +/** + * @dev this struct is only maintained for backwards compatibility with MigratableKeeperRegistryInterface + * it should be deprecated in the future in favor of MigratableKeeperRegistryInterfaceV2 + */ +enum UpkeepFormat { + V1, + V2, + V3 +} diff --git a/contracts/src/v0.8/automation/UpkeepTranscoder.sol b/contracts/src/v0.8/automation/UpkeepTranscoder.sol new file mode 100644 index 00000000..24f6f2ba --- /dev/null +++ b/contracts/src/v0.8/automation/UpkeepTranscoder.sol @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +import {UpkeepTranscoderInterface} from "./interfaces/UpkeepTranscoderInterface.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {UpkeepFormat} from "./UpkeepFormat.sol"; + +/** + * @notice Transcoder for converting upkeep data from one keeper + * registry version to another + */ +contract UpkeepTranscoder is UpkeepTranscoderInterface, TypeAndVersionInterface { + error InvalidTranscoding(); + + /** + * @notice versions: + * - UpkeepTranscoder 1.0.0: placeholder to allow new formats in the future + */ + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "UpkeepTranscoder 1.0.0"; + + /** + * @notice transcodeUpkeeps transforms upkeep data from the format expected by + * one registry to the format expected by another. It future-proofs migrations + * by allowing keepers team to customize migration paths and set sensible defaults + * when new fields are added + * @param fromVersion struct version the upkeep is migrating from + * @param toVersion struct version the upkeep is migrating to + * @param encodedUpkeeps encoded upkeep data + * @dev this contract & function are simple now, but should evolve as new registries + * and migration paths are added + */ + function transcodeUpkeeps( + UpkeepFormat fromVersion, + UpkeepFormat toVersion, + bytes calldata encodedUpkeeps + ) external view override returns (bytes memory) { + if (fromVersion != toVersion) { + revert InvalidTranscoding(); + } + + return encodedUpkeeps; + } +} diff --git a/contracts/src/v0.8/automation/dev/MercuryRegistry.sol b/contracts/src/v0.8/automation/dev/MercuryRegistry.sol new file mode 100644 index 00000000..6a5dafc1 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/MercuryRegistry.sol @@ -0,0 +1,312 @@ +pragma solidity 0.8.6; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {AutomationCompatibleInterface} from "../interfaces/AutomationCompatibleInterface.sol"; +import {StreamsLookupCompatibleInterface} from "../interfaces/StreamsLookupCompatibleInterface.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; + +/*--------------------------------------------------------------------------------------------------------------------+ +| Mercury + Automation | +| ________________ | +| This implementation allows for an on-chain registry of price feed data to be maintained and updated by Automation | +| nodes. The upkeep provides the following advantages: | +| - Node operator savings. The single committee of automation nodes is able to update all price feed data using | +| off-chain feed data. | +| - Fetch batches of price data. All price feed data is held on the same contract, so a contract that needs | +| multiple sets of feed data can fetch them while paying for only one external call. | +| - Scalability. Feeds can be added or removed from the contract with a single contract call, and the number of | +| feeds that the registry can store is unbounded. | +| | +| Key Contracts: | +| - `MercuryRegistry.sol` - stores price feed data and implements core logic. | +| - `MercuryRegistryBatchUpkeep.sol` - enables batching for the registry. | +| - `MercuryRegistry.t.sol` - contains foundry tests to demonstrate various flows. | +| | +| NOTE: This contract uses Mercury v0.2. Automation will likely upgrade to v0.3 eventually, which may change some | +| components such as the Report struct, verification, and the StreamsLookup revert. | +| | +| TODO: | +| - Optimize gas consumption. | +-+---------------------------------------------------------------------------------------------------------------------*/ +contract MercuryRegistry is ConfirmedOwner, AutomationCompatibleInterface, StreamsLookupCompatibleInterface { + error DuplicateFeed(string feedId); + error FeedNotActive(string feedId); + error StaleReport(string feedId, uint32 currentTimestamp, uint32 incomingTimestamp); + error InvalidFeeds(); + + // Feed object used for storing feed data. + // not included but contained in reports: + // - blocknumberUpperBound + // - upperBlockhash + // - blocknumberLowerBound + // - currentBlockTimestamp + struct Feed { + uint32 observationsTimestamp; // the timestamp of the most recent data assigned to this feed + int192 price; // the current price of the feed + int192 bid; // the current bid price of the feed + int192 ask; // the current ask price of the feed + string feedName; // the name of the feed + string feedId; // the id of the feed (hex encoded) + bool active; // true if the feed is being actively updated, otherwise false + int192 deviationPercentagePPM; // acceptable deviation threshold - 1.5% = 15_000, 100% = 1_000_000, etc.. + uint32 stalenessSeconds; // acceptable staleness threshold - 60 = 1 minute, 300 = 5 minutes, etc.. + } + + // Report object obtained from off-chain Mercury server. + struct Report { + bytes32 feedId; // the feed Id of the report + uint32 observationsTimestamp; // the timestamp of when the data was observed + int192 price; // the median value of the OCR round + int192 bid; // the median bid of the OCR round + int192 ask; // the median ask if the OCR round + uint64 blocknumberUpperBound; // the highest block observed at the time the report was generated + bytes32 upperBlockhash; // the blockhash of the highest block observed + uint64 blocknumberLowerBound; // the lowest block observed at the time the report was generated + uint64 currentBlockTimestamp; // the timestamp of the highest block observed + } + + event FeedUpdated(uint32 observationsTimestamp, int192 price, int192 bid, int192 ask, string feedId); + + uint32 private constant MIN_GAS_FOR_PERFORM = 200_000; + + string constant c_feedParamKey = "feedIdHex"; // for Mercury v0.2 - format by which feeds are identified + string constant c_timeParamKey = "blockNumber"; // for Mercury v0.2 - format by which feeds are filtered to be sufficiently recent + IVerifierProxy public s_verifier; // for Mercury v0.2 - verifies off-chain reports + + int192 constant scale = 1_000_000; // a scalar used for measuring deviation with precision + + string[] public s_feeds; // list of feed Ids + mapping(string => Feed) public s_feedMapping; // mapping of feed Ids to stored feed data + + constructor( + string[] memory feedIds, + string[] memory feedNames, + int192[] memory deviationPercentagePPMs, + uint32[] memory stalenessSeconds, + address verifier + ) ConfirmedOwner(msg.sender) { + s_verifier = IVerifierProxy(verifier); + + // Store desired feeds. + setFeeds(feedIds, feedNames, deviationPercentagePPMs, stalenessSeconds); + } + + // Returns a user-defined batch of feed data, based on the on-chain state. + function getLatestFeedData(string[] memory feedIds) external view returns (Feed[] memory) { + Feed[] memory feeds = new Feed[](feedIds.length); + for (uint256 i = 0; i < feedIds.length; i++) { + feeds[i] = s_feedMapping[feedIds[i]]; + } + + return feeds; + } + + // Invoke a feed lookup through the checkUpkeep function. Expected to run on a cron schedule. + function checkUpkeep(bytes calldata /* data */) external view override returns (bool, bytes memory) { + string[] memory feeds = s_feeds; + return revertForFeedLookup(feeds); + } + + // Extracted from `checkUpkeep` for batching purposes. + function revertForFeedLookup(string[] memory feeds) public view returns (bool, bytes memory) { + uint256 blockNumber = ChainSpecificUtil._getBlockNumber(); + revert StreamsLookup(c_feedParamKey, feeds, c_timeParamKey, blockNumber, ""); + } + + // Filter for feeds that have deviated sufficiently from their respective on-chain values, or where + // the on-chain values are sufficiently stale. + function checkCallback( + bytes[] memory values, + bytes memory lookupData + ) external view override returns (bool, bytes memory) { + bytes[] memory filteredValues = new bytes[](values.length); + uint256 count = 0; + for (uint256 i = 0; i < values.length; i++) { + Report memory report = getReport(values[i]); + string memory feedId = bytes32ToHexString(abi.encodePacked(report.feedId)); + Feed memory feed = s_feedMapping[feedId]; + if ( + (report.observationsTimestamp - feed.observationsTimestamp > feed.stalenessSeconds) || + deviationExceedsThreshold(feed.price, report.price, feed.deviationPercentagePPM) + ) { + filteredValues[count] = values[i]; + count++; + } + } + + // Adjusts the length of the filteredValues array to `count` such that it + // does not have extra empty slots, in case some items were filtered. + assembly { + mstore(filteredValues, count) + } + + bytes memory performData = abi.encode(filteredValues, lookupData); + return (filteredValues.length > 0, performData); + } + + // Use deviated off-chain values to update on-chain state. + function performUpkeep(bytes calldata performData) external override { + (bytes[] memory values /* bytes memory lookupData */, ) = abi.decode(performData, (bytes[], bytes)); + for (uint256 i = 0; i < values.length; i++) { + // Verify and decode the Mercury report. + Report memory report = abi.decode(s_verifier.verify(values[i]), (Report)); + string memory feedId = bytes32ToHexString(abi.encodePacked(report.feedId)); + + // Feeds that have been removed between checkUpkeep and performUpkeep should not be updated. + if (!s_feedMapping[feedId].active) { + revert FeedNotActive(feedId); + } + + // Ensure stale reports do not cause a regression in the registry. + if (s_feedMapping[feedId].observationsTimestamp > report.observationsTimestamp) { + revert StaleReport(feedId, s_feedMapping[feedId].observationsTimestamp, report.observationsTimestamp); + } + + // Assign new values to state. + s_feedMapping[feedId].bid = report.bid; + s_feedMapping[feedId].ask = report.ask; + s_feedMapping[feedId].price = report.price; + s_feedMapping[feedId].observationsTimestamp = report.observationsTimestamp; + + // Emit log. + emit FeedUpdated(report.observationsTimestamp, report.price, report.bid, report.ask, feedId); + + // Ensure enough gas remains for the next iteration. Otherwise, stop here. + if (gasleft() < MIN_GAS_FOR_PERFORM) { + return; + } + } + } + + // Decodes a mercury respone into an on-chain object. Thanks @mikestone!! + function getReport(bytes memory signedReport) internal pure returns (Report memory) { + /* + * bytes32[3] memory reportContext, + * bytes memory reportData, + * bytes32[] memory rs, + * bytes32[] memory ss, + * bytes32 rawVs + **/ + (, bytes memory reportData, , , ) = abi.decode(signedReport, (bytes32[3], bytes, bytes32[], bytes32[], bytes32)); + + Report memory report = abi.decode(reportData, (Report)); + return report; + } + + // Check if the off-chain value has deviated sufficiently from the on-chain value to justify an update. + // `scale` is used to ensure precision is not lost. + function deviationExceedsThreshold( + int192 onChain, + int192 offChain, + int192 deviationPercentagePPM + ) public pure returns (bool) { + // Compute absolute difference between the on-chain and off-chain values. + int192 scaledDifference = (onChain - offChain) * scale; + if (scaledDifference < 0) { + scaledDifference = -scaledDifference; + } + + // Compare to the allowed deviation from the on-chain value. + int192 deviationMax = ((onChain * scale) * deviationPercentagePPM) / scale; + return scaledDifference > deviationMax; + } + + // Helper function to reconcile a difference in formatting: + // - Automation passes feedId into their off-chain lookup function as a string. + // - Mercury stores feedId in their reports as a bytes32. + function bytes32ToHexString(bytes memory buffer) internal pure returns (string memory) { + bytes memory converted = new bytes(buffer.length * 2); + bytes memory _base = "0123456789abcdef"; + for (uint256 i = 0; i < buffer.length; i++) { + converted[i * 2] = _base[uint8(buffer[i]) / _base.length]; + converted[i * 2 + 1] = _base[uint8(buffer[i]) % _base.length]; + } + return string(abi.encodePacked("0x", converted)); + } + + function addFeeds( + string[] memory feedIds, + string[] memory feedNames, + int192[] memory deviationPercentagePPMs, + uint32[] memory stalenessSeconds + ) external onlyOwner feedsAreValid(feedIds, feedNames, deviationPercentagePPMs, stalenessSeconds) { + for (uint256 i = 0; i < feedIds.length; i++) { + string memory feedId = feedIds[i]; + if (s_feedMapping[feedId].active) { + revert DuplicateFeed(feedId); + } + updateFeed(feedId, feedNames[i], deviationPercentagePPMs[i], stalenessSeconds[i]); + s_feedMapping[feedId].active = true; + + s_feeds.push(feedId); + } + } + + function setFeeds( + string[] memory feedIds, + string[] memory feedNames, + int192[] memory deviationPercentagePPMs, + uint32[] memory stalenessSeconds + ) public onlyOwner feedsAreValid(feedIds, feedNames, deviationPercentagePPMs, stalenessSeconds) { + // Clear prior feeds. + for (uint256 i = 0; i < s_feeds.length; i++) { + s_feedMapping[s_feeds[i]].active = false; + } + + // Assign new feeds. + for (uint256 i = 0; i < feedIds.length; i++) { + string memory feedId = feedIds[i]; + if (s_feedMapping[feedId].active) { + revert DuplicateFeed(feedId); + } + updateFeed(feedId, feedNames[i], deviationPercentagePPMs[i], stalenessSeconds[i]); + s_feedMapping[feedId].active = true; + } + s_feeds = feedIds; + } + + function updateFeed( + string memory feedId, + string memory feedName, + int192 deviationPercentagePPM, + uint32 stalnessSeconds + ) internal { + s_feedMapping[feedId].feedName = feedName; + s_feedMapping[feedId].deviationPercentagePPM = deviationPercentagePPM; + s_feedMapping[feedId].stalenessSeconds = stalnessSeconds; + s_feedMapping[feedId].feedId = feedId; + } + + function setVerifier(address verifier) external onlyOwner { + s_verifier = IVerifierProxy(verifier); + } + + modifier feedsAreValid( + string[] memory feedIds, + string[] memory feedNames, + int192[] memory deviationPercentagePPMs, + uint32[] memory stalenessSeconds + ) { + if (feedIds.length != feedNames.length) { + revert InvalidFeeds(); + } + if (feedIds.length != deviationPercentagePPMs.length) { + revert InvalidFeeds(); + } + if (feedIds.length != stalenessSeconds.length) { + revert InvalidFeeds(); + } + _; + } +} + +interface IVerifierProxy { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier, and bills the user if applicable. + * @param payload The encoded data to be verified, including the signed + * report and any metadata for billing. + * @return verifiedReport The encoded report from the verifier. + */ + function verify(bytes calldata payload) external payable returns (bytes memory verifiedReport); +} diff --git a/contracts/src/v0.8/automation/dev/MercuryRegistryBatchUpkeep.sol b/contracts/src/v0.8/automation/dev/MercuryRegistryBatchUpkeep.sol new file mode 100644 index 00000000..8fa32c8a --- /dev/null +++ b/contracts/src/v0.8/automation/dev/MercuryRegistryBatchUpkeep.sol @@ -0,0 +1,80 @@ +pragma solidity 0.8.6; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {AutomationCompatibleInterface} from "../interfaces/AutomationCompatibleInterface.sol"; +import {StreamsLookupCompatibleInterface} from "../interfaces/StreamsLookupCompatibleInterface.sol"; +import {MercuryRegistry} from "./MercuryRegistry.sol"; + +contract MercuryRegistryBatchUpkeep is ConfirmedOwner, AutomationCompatibleInterface, StreamsLookupCompatibleInterface { + error BatchSizeTooLarge(uint256 batchsize, uint256 maxBatchSize); + // Use a reasonable maximum batch size. Every Mercury report is ~750 bytes, too many reports + // passed into a single batch could exceed the calldata or transaction size limit for some blockchains. + uint256 public constant MAX_BATCH_SIZE = 50; + + MercuryRegistry public immutable i_registry; // master registry, where feed data is stored + + uint256 public s_batchStart; // starting index of upkeep batch on the MercuryRegistry's s_feeds array, inclusive + uint256 public s_batchEnd; // ending index of upkeep batch on the MercuryRegistry's s_feeds array, exclusive + + constructor(address mercuryRegistry, uint256 batchStart, uint256 batchEnd) ConfirmedOwner(msg.sender) { + i_registry = MercuryRegistry(mercuryRegistry); + + updateBatchingWindow(batchStart, batchEnd); + } + + // Invoke a feed lookup for the feeds this upkeep is responsible for. + function checkUpkeep(bytes calldata /* data */) external view override returns (bool, bytes memory) { + uint256 start = s_batchStart; + uint256 end = s_batchEnd; + string[] memory feeds = new string[](end - start); + uint256 count = 0; + for (uint256 i = start; i < end; i++) { + string memory feedId; + + // If the feed doesn't exist, then the batching window exceeds the underlying registry length. + // So, the batch will be partially empty. + try i_registry.s_feeds(i) returns (string memory f) { + feedId = f; + } catch (bytes memory /* data */) { + break; + } + + // Assign feed. + feeds[i - start] = feedId; + count++; + } + + // Adjusts the length of the batch to `count` such that it does not + // contain any empty feed Ids. + assembly { + mstore(feeds, count) + } + + return i_registry.revertForFeedLookup(feeds); + } + + // Use the master registry to assess deviations. + function checkCallback( + bytes[] memory values, + bytes memory lookupData + ) external view override returns (bool, bytes memory) { + return i_registry.checkCallback(values, lookupData); + } + + // Use the master registry to update state. + function performUpkeep(bytes calldata performData) external override { + i_registry.performUpkeep(performData); + } + + function updateBatchingWindow(uint256 batchStart, uint256 batchEnd) public onlyOwner { + // Do not allow a batched mercury registry to use an excessive batch size, as to avoid + // calldata size limits. If more feeds need to be updated than allowed by the batch size, + // deploy another `MercuryRegistryBatchUpkeep` contract and register another upkeep job. + if (batchEnd - batchStart > MAX_BATCH_SIZE) { + revert BatchSizeTooLarge(batchEnd - batchStart, MAX_BATCH_SIZE); + } + + s_batchStart = batchStart; + s_batchEnd = batchEnd; + } +} diff --git a/contracts/src/v0.8/automation/dev/chains/ArbitrumModule.sol b/contracts/src/v0.8/automation/dev/chains/ArbitrumModule.sol new file mode 100644 index 00000000..1bb4b45e --- /dev/null +++ b/contracts/src/v0.8/automation/dev/chains/ArbitrumModule.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {ArbSys} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import {ArbGasInfo} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {ChainModuleBase} from "./ChainModuleBase.sol"; + +contract ArbitrumModule is ChainModuleBase { + /// @dev ARB_SYS_ADDR is the address of the ArbSys precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbSys.sol#L10 + address private constant ARB_SYS_ADDR = 0x0000000000000000000000000000000000000064; + ArbSys private constant ARB_SYS = ArbSys(ARB_SYS_ADDR); + + /// @dev ARB_GAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10 + address private constant ARB_GAS_ADDR = 0x000000000000000000000000000000000000006C; + ArbGasInfo private constant ARB_GAS = ArbGasInfo(ARB_GAS_ADDR); + + function blockHash(uint256 n) external view override returns (bytes32) { + uint256 blockNum = ARB_SYS.arbBlockNumber(); + if (n >= blockNum || blockNum - n > 256) { + return ""; + } + return ARB_SYS.arbBlockHash(n); + } + + function blockNumber() external view override returns (uint256) { + return ARB_SYS.arbBlockNumber(); + } + + function getCurrentL1Fee() external view override returns (uint256) { + return ARB_GAS.getCurrentTxL1GasFees(); + } + + function getMaxL1Fee(uint256 dataSize) external view override returns (uint256) { + (, uint256 perL1CalldataUnit, , , , ) = ARB_GAS.getPricesInWei(); + // TODO: Verify this is an accurate estimate + return perL1CalldataUnit * dataSize * 16; + } + + function getGasOverhead() + external + view + override + returns (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) + { + // TODO: Calculate + return (0, 0); + } +} diff --git a/contracts/src/v0.8/automation/dev/chains/ChainModuleBase.sol b/contracts/src/v0.8/automation/dev/chains/ChainModuleBase.sol new file mode 100644 index 00000000..f7e5b7b2 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/chains/ChainModuleBase.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {IChainModule} from "../interfaces/v2_2/IChainModule.sol"; + +contract ChainModuleBase is IChainModule { + function blockNumber() external view virtual returns (uint256) { + return block.number; + } + + function blockHash(uint256 n) external view virtual returns (bytes32) { + if (n >= block.number || block.number - n > 256) { + return ""; + } + return blockhash(n); + } + + function getCurrentL1Fee() external view virtual returns (uint256) { + return 0; + } + + function getMaxL1Fee(uint256) external view virtual returns (uint256) { + return 0; + } + + function getGasOverhead() + external + view + virtual + returns (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) + { + return (0, 0); + } +} diff --git a/contracts/src/v0.8/automation/dev/chains/OptimismModule.sol b/contracts/src/v0.8/automation/dev/chains/OptimismModule.sol new file mode 100644 index 00000000..0d6bc651 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/chains/OptimismModule.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {OVM_GasPriceOracle} from "../../../vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; +import {ChainModuleBase} from "./ChainModuleBase.sol"; + +contract OptimismModule is ChainModuleBase { + /// @dev OP_L1_DATA_FEE_PADDING includes 35 bytes for L1 data padding for Optimism and BASE + bytes private constant OP_L1_DATA_FEE_PADDING = + hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev OVM_GASPRICEORACLE_ADDR is the address of the OVM_GasPriceOracle precompile on Optimism. + /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee + address private constant OVM_GASPRICEORACLE_ADDR = 0x420000000000000000000000000000000000000F; + OVM_GasPriceOracle private constant OVM_GASPRICEORACLE = OVM_GasPriceOracle(OVM_GASPRICEORACLE_ADDR); + + function getCurrentL1Fee() external view override returns (uint256) { + // TODO: Verify this is accurate calculation with appropriate padding + return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(msg.data, OP_L1_DATA_FEE_PADDING)); + } + + function getMaxL1Fee(uint256 dataSize) external view override returns (uint256) { + // fee is 4 per 0 byte, 16 per non-zero byte. Worst case we can have all non zero-bytes. + // Instead of setting bytes to non-zero, we initialize 'new bytes' of length 4*dataSize to cover for zero bytes. + bytes memory txCallData = new bytes(4 * dataSize); + // TODO: Verify this is an accurate estimate + return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(txCallData, OP_L1_DATA_FEE_PADDING)); + } + + function getGasOverhead() + external + view + override + returns (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) + { + // TODO: Calculate + return (0, 0); + } +} diff --git a/contracts/src/v0.8/automation/dev/chains/ScrollModule.sol b/contracts/src/v0.8/automation/dev/chains/ScrollModule.sol new file mode 100644 index 00000000..70294486 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/chains/ScrollModule.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {IScrollL1GasPriceOracle} from "../../../vendor/@scroll-tech/contracts/src/L2/predeploys/IScrollL1GasPriceOracle.sol"; +import {ChainModuleBase} from "./ChainModuleBase.sol"; + +contract ScrollModule is ChainModuleBase { + /// @dev SCROLL_L1_FEE_DATA_PADDING includes 120 bytes for L1 data padding for Optimism + /// @dev according to testing, this padding allows automation registry to properly estimates L1 data fee with 3-5% buffer + /// @dev this MAY NOT work for a different product and this may get out of date if transmit function is changed + bytes private constant SCROLL_L1_FEE_DATA_PADDING = + hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev SCROLL_ORACLE_ADDR is the address of the L1GasPriceOracle precompile on Optimism. + /// @dev reference: https://docs.scroll.io/en/developers/transaction-fees-on-scroll/#estimating-the-l1-data-fee + address private constant SCROLL_ORACLE_ADDR = 0x5300000000000000000000000000000000000002; + IScrollL1GasPriceOracle private constant SCROLL_ORACLE = IScrollL1GasPriceOracle(SCROLL_ORACLE_ADDR); + + function getCurrentL1Fee() external view override returns (uint256) { + // TODO: Verify this is accurate calculation with appropriate padding + return SCROLL_ORACLE.getL1Fee(bytes.concat(msg.data, SCROLL_L1_FEE_DATA_PADDING)); + } + + function getMaxL1Fee(uint256 dataSize) external view override returns (uint256) { + // fee is 4 per 0 byte, 16 per non-zero byte. Worst case we can have all non zero-bytes. + // Instead of setting bytes to non-zero, we initialize 'new bytes' of length 4*dataSize to cover for zero bytes. + // this is the same as OP. + // TODO: Verify this is an accurate estimate + bytes memory txCallData = new bytes(4 * dataSize); + return SCROLL_ORACLE.getL1Fee(bytes.concat(txCallData, SCROLL_L1_FEE_DATA_PADDING)); + } + + function getGasOverhead() + external + view + override + returns (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) + { + // TODO: Calculate + return (0, 0); + } +} diff --git a/contracts/src/v0.8/automation/dev/interfaces/v2_2/IAutomationRegistryMaster.sol b/contracts/src/v0.8/automation/dev/interfaces/v2_2/IAutomationRegistryMaster.sol new file mode 100644 index 00000000..34b72f60 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/interfaces/v2_2/IAutomationRegistryMaster.sol @@ -0,0 +1,339 @@ +// abi-checksum: 0xfc319f2ddde95d2e0226c913b9e417495effc4c8c847d01fe07e3de68ea8839c +// SPDX-License-Identifier: MIT +// !! THIS FILE WAS AUTOGENERATED BY abi-to-sol v0.6.6. SEE SOURCE BELOW. !! +pragma solidity ^0.8.4; + +interface IAutomationRegistryMaster { + error ArrayHasNoEntries(); + error CannotCancel(); + error CheckDataExceedsLimit(); + error ConfigDigestMismatch(); + error DuplicateEntry(); + error DuplicateSigners(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IncorrectNumberOfFaultyOracles(); + error IncorrectNumberOfSignatures(); + error IncorrectNumberOfSigners(); + error IndexOutOfRange(); + error InvalidDataLength(); + error InvalidPayee(); + error InvalidRecipient(); + error InvalidReport(); + error InvalidSigner(); + error InvalidTransmitter(); + error InvalidTrigger(); + error InvalidTriggerType(); + error MaxCheckDataSizeCanOnlyIncrease(); + error MaxPerformDataSizeCanOnlyIncrease(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveSigners(); + error OnlyActiveTransmitters(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyCallableByUpkeepPrivilegeManager(); + error OnlyPausedUpkeep(); + error OnlySimulatedBackend(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error ReentrantCall(); + error RegistryPaused(); + error RepeatedSigner(); + error RepeatedTransmitter(); + error TargetCheckReverted(bytes reason); + error TooManyOracles(); + error TranscoderNotSet(); + error UpkeepAlreadyExists(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig); + event CancelledUpkeepReport(uint256 indexed id, bytes trigger); + event ChainSpecificModuleUpdated(address newModule); + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + event DedupKeyAdded(bytes32 indexed dedupKey); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event InsufficientFundsUpkeepReport(uint256 indexed id, bytes trigger); + event OwnerFundsWithdrawn(uint96 amount); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event Paused(address account); + event PayeesUpdated(address[] transmitters, address[] payees); + event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee); + event ReorgedUpkeepReport(uint256 indexed id, bytes trigger); + event StaleUpkeepReport(uint256 indexed id, bytes trigger); + event Transmitted(bytes32 configDigest, uint32 epoch); + event Unpaused(address account); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataSet(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + uint96 totalPayment, + uint256 gasUsed, + uint256 gasOverhead, + bytes trigger + ); + event UpkeepPrivilegeConfigSet(uint256 indexed id, bytes privilegeConfig); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepRegistered(uint256 indexed id, uint32 performGas, address admin); + event UpkeepTriggerConfigSet(uint256 indexed id, bytes triggerConfig); + event UpkeepUnpaused(uint256 indexed id); + fallback() external; + function acceptOwnership() external; + function fallbackTo() external view returns (address); + function latestConfigDetails() external view returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + function latestConfigDigestAndEpoch() external view returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + function onTokenTransfer(address sender, uint256 amount, bytes memory data) external; + function owner() external view returns (address); + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfigBytes, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external; + function setConfigTypeSafe( + address[] memory signers, + address[] memory transmitters, + uint8 f, + AutomationRegistryBase2_2.OnchainConfig memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external; + function simulatePerformUpkeep( + uint256 id, + bytes memory performData + ) external view returns (bool success, uint256 gasUsed); + function transferOwnership(address to) external; + function transmit( + bytes32[3] memory reportContext, + bytes memory rawReport, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs + ) external; + function typeAndVersion() external view returns (string memory); + + function addFunds(uint256 id, uint96 amount) external; + function cancelUpkeep(uint256 id) external; + function checkCallback( + uint256 id, + bytes[] memory values, + bytes memory extraData + ) external view returns (bool upkeepNeeded, bytes memory performData, uint8 upkeepFailureReason, uint256 gasUsed); + function checkUpkeep( + uint256 id, + bytes memory triggerData + ) + external + view + returns ( + bool upkeepNeeded, + bytes memory performData, + uint8 upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ); + function checkUpkeep( + uint256 id + ) + external + view + returns ( + bool upkeepNeeded, + bytes memory performData, + uint8 upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ); + function executeCallback( + uint256 id, + bytes memory payload + ) external returns (bool upkeepNeeded, bytes memory performData, uint8 upkeepFailureReason, uint256 gasUsed); + function migrateUpkeeps(uint256[] memory ids, address destination) external; + function receiveUpkeeps(bytes memory encodedUpkeeps) external; + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + uint8 triggerType, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) external returns (uint256 id); + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes memory checkData, + bytes memory offchainConfig + ) external returns (uint256 id); + function setUpkeepTriggerConfig(uint256 id, bytes memory triggerConfig) external; + + function acceptPayeeship(address transmitter) external; + function acceptUpkeepAdmin(uint256 id) external; + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + function getAdminPrivilegeConfig(address admin) external view returns (bytes memory); + function getAllowedReadOnlyAddress() external view returns (address); + function getAutomationForwarderLogic() external view returns (address); + function getBalance(uint256 id) external view returns (uint96 balance); + function getCancellationDelay() external pure returns (uint256); + function getChainModule() external view returns (address chainModule); + function getConditionalGasOverhead() external pure returns (uint256); + function getFastGasFeedAddress() external view returns (address); + function getForwarder(uint256 upkeepID) external view returns (address); + function getLinkAddress() external view returns (address); + function getLinkNativeFeedAddress() external view returns (address); + function getLogGasOverhead() external pure returns (uint256); + function getMaxPaymentForGas(uint8 triggerType, uint32 gasLimit) external view returns (uint96 maxPayment); + function getMinBalance(uint256 id) external view returns (uint96); + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance); + function getPeerRegistryMigrationPermission(address peer) external view returns (uint8); + function getPerPerformByteGasOverhead() external pure returns (uint256); + function getPerSignerGasOverhead() external pure returns (uint256); + function getReorgProtectionEnabled() external view returns (bool reorgProtectionEnabled); + function getSignerInfo(address query) external view returns (bool active, uint8 index); + function getState() + external + view + returns ( + AutomationRegistryBase2_2.State memory state, + AutomationRegistryBase2_2.OnchainConfigLegacy memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ); + function getTransmitCalldataFixedBytesOverhead() external pure returns (uint256); + function getTransmitCalldataPerSignerBytesOverhead() external pure returns (uint256); + function getTransmitterInfo( + address query + ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee); + function getTriggerType(uint256 upkeepId) external pure returns (uint8); + function getUpkeep(uint256 id) external view returns (AutomationRegistryBase2_2.UpkeepInfo memory upkeepInfo); + function getUpkeepPrivilegeConfig(uint256 upkeepId) external view returns (bytes memory); + function getUpkeepTriggerConfig(uint256 upkeepId) external view returns (bytes memory); + function hasDedupKey(bytes32 dedupKey) external view returns (bool); + function pause() external; + function pauseUpkeep(uint256 id) external; + function recoverFunds() external; + function setAdminPrivilegeConfig(address admin, bytes memory newPrivilegeConfig) external; + function setPayees(address[] memory payees) external; + function setPeerRegistryMigrationPermission(address peer, uint8 permission) external; + function setUpkeepCheckData(uint256 id, bytes memory newCheckData) external; + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external; + function setUpkeepOffchainConfig(uint256 id, bytes memory config) external; + function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes memory newPrivilegeConfig) external; + function transferPayeeship(address transmitter, address proposed) external; + function transferUpkeepAdmin(uint256 id, address proposed) external; + function unpause() external; + function unpauseUpkeep(uint256 id) external; + function upkeepTranscoderVersion() external pure returns (uint8); + function upkeepVersion() external pure returns (uint8); + function withdrawFunds(uint256 id, address to) external; + function withdrawOwnerFunds() external; + function withdrawPayment(address from, address to) external; +} + +interface AutomationRegistryBase2_2 { + struct OnchainConfig { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + address chainModule; + bool reorgProtectionEnabled; + } + + struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint96 totalPremium; + uint256 numUpkeeps; + uint32 configCount; + uint32 latestConfigBlockNumber; + bytes32 latestConfigDigest; + uint32 latestEpoch; + bool paused; + } + + struct OnchainConfigLegacy { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + } + + struct UpkeepInfo { + address target; + uint32 performGas; + bytes checkData; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + uint32 lastPerformedBlockNumber; + uint96 amountSpent; + bool paused; + bytes offchainConfig; + } +} + +// THIS FILE WAS AUTOGENERATED FROM THE FOLLOWING ABI JSON: +/* +[{"inputs":[{"internalType":"contract AutomationRegistryLogicB2_2","name":"logicA","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"ArrayHasNoEntries","type":"error"},{"inputs":[],"name":"CannotCancel","type":"error"},{"inputs":[],"name":"CheckDataExceedsLimit","type":"error"},{"inputs":[],"name":"ConfigDigestMismatch","type":"error"},{"inputs":[],"name":"DuplicateEntry","type":"error"},{"inputs":[],"name":"DuplicateSigners","type":"error"},{"inputs":[],"name":"GasLimitCanOnlyIncrease","type":"error"},{"inputs":[],"name":"GasLimitOutsideRange","type":"error"},{"inputs":[],"name":"IncorrectNumberOfFaultyOracles","type":"error"},{"inputs":[],"name":"IncorrectNumberOfSignatures","type":"error"},{"inputs":[],"name":"IncorrectNumberOfSigners","type":"error"},{"inputs":[],"name":"IndexOutOfRange","type":"error"},{"inputs":[],"name":"InvalidDataLength","type":"error"},{"inputs":[],"name":"InvalidPayee","type":"error"},{"inputs":[],"name":"InvalidRecipient","type":"error"},{"inputs":[],"name":"InvalidReport","type":"error"},{"inputs":[],"name":"InvalidSigner","type":"error"},{"inputs":[],"name":"InvalidTransmitter","type":"error"},{"inputs":[],"name":"InvalidTrigger","type":"error"},{"inputs":[],"name":"InvalidTriggerType","type":"error"},{"inputs":[],"name":"MaxCheckDataSizeCanOnlyIncrease","type":"error"},{"inputs":[],"name":"MaxPerformDataSizeCanOnlyIncrease","type":"error"},{"inputs":[],"name":"MigrationNotPermitted","type":"error"},{"inputs":[],"name":"NotAContract","type":"error"},{"inputs":[],"name":"OnlyActiveSigners","type":"error"},{"inputs":[],"name":"OnlyActiveTransmitters","type":"error"},{"inputs":[],"name":"OnlyCallableByAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByPLIToken","type":"error"},{"inputs":[],"name":"OnlyCallableByOwnerOrAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByOwnerOrRegistrar","type":"error"},{"inputs":[],"name":"OnlyCallableByPayee","type":"error"},{"inputs":[],"name":"OnlyCallableByProposedAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByProposedPayee","type":"error"},{"inputs":[],"name":"OnlyCallableByUpkeepPrivilegeManager","type":"error"},{"inputs":[],"name":"OnlyPausedUpkeep","type":"error"},{"inputs":[],"name":"OnlySimulatedBackend","type":"error"},{"inputs":[],"name":"OnlyUnpausedUpkeep","type":"error"},{"inputs":[],"name":"ParameterLengthError","type":"error"},{"inputs":[],"name":"PaymentGreaterThanAllPLI","type":"error"},{"inputs":[],"name":"ReentrantCall","type":"error"},{"inputs":[],"name":"RegistryPaused","type":"error"},{"inputs":[],"name":"RepeatedSigner","type":"error"},{"inputs":[],"name":"RepeatedTransmitter","type":"error"},{"inputs":[{"internalType":"bytes","name":"reason","type":"bytes"}],"name":"TargetCheckReverted","type":"error"},{"inputs":[],"name":"TooManyOracles","type":"error"},{"inputs":[],"name":"TranscoderNotSet","type":"error"},{"inputs":[],"name":"UpkeepAlreadyExists","type":"error"},{"inputs":[],"name":"UpkeepCancelled","type":"error"},{"inputs":[],"name":"UpkeepNotCanceled","type":"error"},{"inputs":[],"name":"UpkeepNotNeeded","type":"error"},{"inputs":[],"name":"ValueNotChanged","type":"error"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"admin","type":"address"},{"indexed":false,"internalType":"bytes","name":"privilegeConfig","type":"bytes"}],"name":"AdminPrivilegeConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"CancelledUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"newModule","type":"address"}],"name":"ChainSpecificModuleUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"previousConfigBlockNumber","type":"uint32"},{"indexed":false,"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"indexed":false,"internalType":"uint64","name":"configCount","type":"uint64"},{"indexed":false,"internalType":"address[]","name":"signers","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"uint8","name":"f","type":"uint8"},{"indexed":false,"internalType":"bytes","name":"onchainConfig","type":"bytes"},{"indexed":false,"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"indexed":false,"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"ConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"dedupKey","type":"bytes32"}],"name":"DedupKeyAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":false,"internalType":"uint96","name":"amount","type":"uint96"}],"name":"FundsAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":false,"internalType":"address","name":"to","type":"address"}],"name":"FundsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"InsufficientFundsUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint96","name":"amount","type":"uint96"}],"name":"OwnerFundsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Paused","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"payees","type":"address[]"}],"name":"PayeesUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"PayeeshipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"PayeeshipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"address","name":"payee","type":"address"}],"name":"PaymentWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"ReorgedUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"StaleUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"}],"name":"Transmitted","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Unpaused","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"UpkeepAdminTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"UpkeepAdminTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint64","name":"atBlockHeight","type":"uint64"}],"name":"UpkeepCanceled","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"newCheckData","type":"bytes"}],"name":"UpkeepCheckDataSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint96","name":"gasLimit","type":"uint96"}],"name":"UpkeepGasLimitSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"remainingBalance","type":"uint256"},{"indexed":false,"internalType":"address","name":"destination","type":"address"}],"name":"UpkeepMigrated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"UpkeepOffchainConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"}],"name":"UpkeepPaused","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"bool","name":"success","type":"bool"},{"indexed":false,"internalType":"uint96","name":"totalPayment","type":"uint96"},{"indexed":false,"internalType":"uint256","name":"gasUsed","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"gasOverhead","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"UpkeepPerformed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"privilegeConfig","type":"bytes"}],"name":"UpkeepPrivilegeConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"startingBalance","type":"uint256"},{"indexed":false,"internalType":"address","name":"importedFrom","type":"address"}],"name":"UpkeepReceived","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"performGas","type":"uint32"},{"indexed":false,"internalType":"address","name":"admin","type":"address"}],"name":"UpkeepRegistered","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"triggerConfig","type":"bytes"}],"name":"UpkeepTriggerConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"}],"name":"UpkeepUnpaused","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"fallbackTo","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDetails","outputs":[{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"blockNumber","type":"uint32"},{"internalType":"bytes32","name":"configDigest","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDigestAndEpoch","outputs":[{"internalType":"bool","name":"scanLogs","type":"bool"},{"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"internalType":"uint32","name":"epoch","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onTokenTransfer","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"},{"internalType":"bytes","name":"onchainConfigBytes","type":"bytes"},{"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"setConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"},{"components":[{"internalType":"uint32","name":"paymentPremiumPPB","type":"uint32"},{"internalType":"uint32","name":"flatFeeMicroLink","type":"uint32"},{"internalType":"uint32","name":"checkGasLimit","type":"uint32"},{"internalType":"uint24","name":"stalenessSeconds","type":"uint24"},{"internalType":"uint16","name":"gasCeilingMultiplier","type":"uint16"},{"internalType":"uint96","name":"minUpkeepSpend","type":"uint96"},{"internalType":"uint32","name":"maxPerformGas","type":"uint32"},{"internalType":"uint32","name":"maxCheckDataSize","type":"uint32"},{"internalType":"uint32","name":"maxPerformDataSize","type":"uint32"},{"internalType":"uint32","name":"maxRevertDataSize","type":"uint32"},{"internalType":"uint256","name":"fallbackGasPrice","type":"uint256"},{"internalType":"uint256","name":"fallbackLinkPrice","type":"uint256"},{"internalType":"address","name":"transcoder","type":"address"},{"internalType":"address[]","name":"registrars","type":"address[]"},{"internalType":"address","name":"upkeepPrivilegeManager","type":"address"},{"internalType":"contract IChainModule","name":"chainModule","type":"address"},{"internalType":"bool","name":"reorgProtectionEnabled","type":"bool"}],"internalType":"struct AutomationRegistryBase2_2.OnchainConfig","name":"onchainConfig","type":"tuple"},{"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"setConfigTypeSafe","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"performData","type":"bytes"}],"name":"simulatePerformUpkeep","outputs":[{"internalType":"bool","name":"success","type":"bool"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[3]","name":"reportContext","type":"bytes32[3]"},{"internalType":"bytes","name":"rawReport","type":"bytes"},{"internalType":"bytes32[]","name":"rs","type":"bytes32[]"},{"internalType":"bytes32[]","name":"ss","type":"bytes32[]"},{"internalType":"bytes32","name":"rawVs","type":"bytes32"}],"name":"transmit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"typeAndVersion","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"contract AutomationRegistryLogicB2_2","name":"logicB","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint96","name":"amount","type":"uint96"}],"name":"addFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"cancelUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes[]","name":"values","type":"bytes[]"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"name":"checkCallback","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum AutomationRegistryBase2_2.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"triggerData","type":"bytes"}],"name":"checkUpkeep","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum AutomationRegistryBase2_2.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"uint256","name":"fastGasWei","type":"uint256"},{"internalType":"uint256","name":"linkNative","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"checkUpkeep","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum AutomationRegistryBase2_2.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"uint256","name":"fastGasWei","type":"uint256"},{"internalType":"uint256","name":"linkNative","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"payload","type":"bytes"}],"name":"executeCallback","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum AutomationRegistryBase2_2.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"internalType":"address","name":"destination","type":"address"}],"name":"migrateUpkeeps","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"encodedUpkeeps","type":"bytes"}],"name":"receiveUpkeeps","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"gasLimit","type":"uint32"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"enum AutomationRegistryBase2_2.Trigger","name":"triggerType","type":"uint8"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"bytes","name":"triggerConfig","type":"bytes"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"registerUpkeep","outputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"gasLimit","type":"uint32"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"registerUpkeep","outputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"triggerConfig","type":"bytes"}],"name":"setUpkeepTriggerConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"link","type":"address"},{"internalType":"address","name":"linkNativeFeed","type":"address"},{"internalType":"address","name":"fastGasFeed","type":"address"},{"internalType":"address","name":"automationForwarderLogic","type":"address"},{"internalType":"address","name":"allowedReadOnlyAddress","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"transmitter","type":"address"}],"name":"acceptPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"acceptUpkeepAdmin","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"startIndex","type":"uint256"},{"internalType":"uint256","name":"maxCount","type":"uint256"}],"name":"getActiveUpkeepIDs","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"admin","type":"address"}],"name":"getAdminPrivilegeConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getAllowedReadOnlyAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getAutomationForwarderLogic","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getBalance","outputs":[{"internalType":"uint96","name":"balance","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getCancellationDelay","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getChainModule","outputs":[{"internalType":"contract IChainModule","name":"chainModule","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getConditionalGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getFastGasFeedAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepID","type":"uint256"}],"name":"getForwarder","outputs":[{"internalType":"contract IAutomationForwarder","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLinkAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLinkNativeFeedAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLogGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"enum AutomationRegistryBase2_2.Trigger","name":"triggerType","type":"uint8"},{"internalType":"uint32","name":"gasLimit","type":"uint32"}],"name":"getMaxPaymentForGas","outputs":[{"internalType":"uint96","name":"maxPayment","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getMinBalance","outputs":[{"internalType":"uint96","name":"","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getMinBalanceForUpkeep","outputs":[{"internalType":"uint96","name":"minBalance","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"peer","type":"address"}],"name":"getPeerRegistryMigrationPermission","outputs":[{"internalType":"enum AutomationRegistryBase2_2.MigrationPermission","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getPerPerformByteGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getPerSignerGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getReorgProtectionEnabled","outputs":[{"internalType":"bool","name":"reorgProtectionEnabled","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"query","type":"address"}],"name":"getSignerInfo","outputs":[{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint8","name":"index","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getState","outputs":[{"components":[{"internalType":"uint32","name":"nonce","type":"uint32"},{"internalType":"uint96","name":"ownerLinkBalance","type":"uint96"},{"internalType":"uint256","name":"expectedLinkBalance","type":"uint256"},{"internalType":"uint96","name":"totalPremium","type":"uint96"},{"internalType":"uint256","name":"numUpkeeps","type":"uint256"},{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"latestConfigBlockNumber","type":"uint32"},{"internalType":"bytes32","name":"latestConfigDigest","type":"bytes32"},{"internalType":"uint32","name":"latestEpoch","type":"uint32"},{"internalType":"bool","name":"paused","type":"bool"}],"internalType":"struct AutomationRegistryBase2_2.State","name":"state","type":"tuple"},{"components":[{"internalType":"uint32","name":"paymentPremiumPPB","type":"uint32"},{"internalType":"uint32","name":"flatFeeMicroLink","type":"uint32"},{"internalType":"uint32","name":"checkGasLimit","type":"uint32"},{"internalType":"uint24","name":"stalenessSeconds","type":"uint24"},{"internalType":"uint16","name":"gasCeilingMultiplier","type":"uint16"},{"internalType":"uint96","name":"minUpkeepSpend","type":"uint96"},{"internalType":"uint32","name":"maxPerformGas","type":"uint32"},{"internalType":"uint32","name":"maxCheckDataSize","type":"uint32"},{"internalType":"uint32","name":"maxPerformDataSize","type":"uint32"},{"internalType":"uint32","name":"maxRevertDataSize","type":"uint32"},{"internalType":"uint256","name":"fallbackGasPrice","type":"uint256"},{"internalType":"uint256","name":"fallbackLinkPrice","type":"uint256"},{"internalType":"address","name":"transcoder","type":"address"},{"internalType":"address[]","name":"registrars","type":"address[]"},{"internalType":"address","name":"upkeepPrivilegeManager","type":"address"}],"internalType":"struct AutomationRegistryBase2_2.OnchainConfigLegacy","name":"config","type":"tuple"},{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getTransmitCalldataFixedBytesOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getTransmitCalldataPerSignerBytesOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"address","name":"query","type":"address"}],"name":"getTransmitterInfo","outputs":[{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint8","name":"index","type":"uint8"},{"internalType":"uint96","name":"balance","type":"uint96"},{"internalType":"uint96","name":"lastCollected","type":"uint96"},{"internalType":"address","name":"payee","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getTriggerType","outputs":[{"internalType":"enum AutomationRegistryBase2_2.Trigger","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getUpkeep","outputs":[{"components":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"performGas","type":"uint32"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"uint96","name":"balance","type":"uint96"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"uint64","name":"maxValidBlocknumber","type":"uint64"},{"internalType":"uint32","name":"lastPerformedBlockNumber","type":"uint32"},{"internalType":"uint96","name":"amountSpent","type":"uint96"},{"internalType":"bool","name":"paused","type":"bool"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"internalType":"struct AutomationRegistryBase2_2.UpkeepInfo","name":"upkeepInfo","type":"tuple"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getUpkeepPrivilegeConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getUpkeepTriggerConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"dedupKey","type":"bytes32"}],"name":"hasDedupKey","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"pauseUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"recoverFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"admin","type":"address"},{"internalType":"bytes","name":"newPrivilegeConfig","type":"bytes"}],"name":"setAdminPrivilegeConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"payees","type":"address[]"}],"name":"setPayees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"peer","type":"address"},{"internalType":"enum AutomationRegistryBase2_2.MigrationPermission","name":"permission","type":"uint8"}],"name":"setPeerRegistryMigrationPermission","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"newCheckData","type":"bytes"}],"name":"setUpkeepCheckData","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint32","name":"gasLimit","type":"uint32"}],"name":"setUpkeepGasLimit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"config","type":"bytes"}],"name":"setUpkeepOffchainConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"},{"internalType":"bytes","name":"newPrivilegeConfig","type":"bytes"}],"name":"setUpkeepPrivilegeConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"transmitter","type":"address"},{"internalType":"address","name":"proposed","type":"address"}],"name":"transferPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"proposed","type":"address"}],"name":"transferUpkeepAdmin","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"unpauseUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"upkeepTranscoderVersion","outputs":[{"internalType":"enum UpkeepFormat","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"upkeepVersion","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"to","type":"address"}],"name":"withdrawFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"withdrawOwnerFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"}],"name":"withdrawPayment","outputs":[],"stateMutability":"nonpayable","type":"function"}] +*/ diff --git a/contracts/src/v0.8/automation/dev/interfaces/v2_2/IChainModule.sol b/contracts/src/v0.8/automation/dev/interfaces/v2_2/IChainModule.sol new file mode 100644 index 00000000..e3a4b32c --- /dev/null +++ b/contracts/src/v0.8/automation/dev/interfaces/v2_2/IChainModule.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +interface IChainModule { + // retrieve the native block number of a chain. e.g. L2 block number on Arbitrum + function blockNumber() external view returns (uint256); + + // retrieve the native block hash of a chain. + function blockHash(uint256) external view returns (bytes32); + + // retrieve the L1 data fee for a L2 transaction. it should return 0 for L1 chains and + // L2 chains which don't have L1 fee component. it uses msg.data to estimate L1 data so + // it must be used with a transaction. Return value in wei. + function getCurrentL1Fee() external view returns (uint256); + + // retrieve the L1 data fee for a L2 simulation. it should return 0 for L1 chains and + // L2 chains which don't have L1 fee component. Return value in wei. + function getMaxL1Fee(uint256 dataSize) external view returns (uint256); + + // Returns an upper bound on execution gas cost for one invocation of blockNumber(), + // one invocation of blockHash() and one invocation of getCurrentL1Fee(). + // Returns two values, first value indicates a fixed cost and the second value is + // the cost per msg.data byte (As some chain module's getCurrentL1Fee execution cost + // scales with calldata size) + function getGasOverhead() + external + view + returns (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead); +} diff --git a/contracts/src/v0.8/automation/dev/test/AutomationRegistry2_2.t.sol b/contracts/src/v0.8/automation/dev/test/AutomationRegistry2_2.t.sol new file mode 100644 index 00000000..4b181df2 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/test/AutomationRegistry2_2.t.sol @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {AutomationForwarderLogic} from "../../AutomationForwarderLogic.sol"; +import {BaseTest} from "./BaseTest.t.sol"; +import {AutomationRegistry2_2} from "../v2_2/AutomationRegistry2_2.sol"; +import {AutomationRegistryBase2_2} from "../v2_2/AutomationRegistryBase2_2.sol"; +import {AutomationRegistryLogicA2_2} from "../v2_2/AutomationRegistryLogicA2_2.sol"; +import {AutomationRegistryLogicB2_2} from "../v2_2/AutomationRegistryLogicB2_2.sol"; +import {IAutomationRegistryMaster} from "../interfaces/v2_2/IAutomationRegistryMaster.sol"; +import {ChainModuleBase} from "../chains/ChainModuleBase.sol"; + +contract AutomationRegistry2_2_SetUp is BaseTest { + address internal constant PLI_ETH_FEED = 0x1111111111111111111111111111111111111110; + address internal constant FAST_GAS_FEED = 0x1111111111111111111111111111111111111112; + address internal constant PLI_TOKEN = 0x1111111111111111111111111111111111111113; + address internal constant ZERO_ADDRESS = address(0); + + // Signer private keys used for these test + uint256 internal constant PRIVATE0 = 0x7b2e97fe057e6de99d6872a2ef2abf52c9b4469bc848c2465ac3fcd8d336e81d; + uint256 internal constant PRIVATE1 = 0xab56160806b05ef1796789248e1d7f34a6465c5280899159d645218cd216cee6; + uint256 internal constant PRIVATE2 = 0x6ec7caa8406a49b76736602810e0a2871959fbbb675e23a8590839e4717f1f7f; + uint256 internal constant PRIVATE3 = 0x80f14b11da94ae7f29d9a7713ea13dc838e31960a5c0f2baf45ed458947b730a; + + uint64 internal constant OFFCHAIN_CONFIG_VERSION = 30; // 2 for OCR2 + uint8 internal constant F = 1; + + address[] internal s_valid_signers; + address[] internal s_valid_transmitters; + address[] internal s_registrars; + + IAutomationRegistryMaster internal registryMaster; + + function setUp() public override { + s_valid_transmitters = new address[](4); + for (uint160 i = 0; i < 4; ++i) { + s_valid_transmitters[i] = address(4 + i); + } + + s_valid_signers = new address[](4); + s_valid_signers[0] = vm.addr(PRIVATE0); //0xc110458BE52CaA6bB68E66969C3218A4D9Db0211 + s_valid_signers[1] = vm.addr(PRIVATE1); //0xc110a19c08f1da7F5FfB281dc93630923F8E3719 + s_valid_signers[2] = vm.addr(PRIVATE2); //0xc110fdF6e8fD679C7Cc11602d1cd829211A18e9b + s_valid_signers[3] = vm.addr(PRIVATE3); //0xc11028017c9b445B6bF8aE7da951B5cC28B326C0 + + s_registrars = new address[](1); + s_registrars[0] = 0x3a0eDE26aa188BFE00b9A0C9A431A1a0CA5f7966; + + AutomationForwarderLogic forwarderLogic = new AutomationForwarderLogic(); + AutomationRegistryLogicB2_2 logicB2_2 = new AutomationRegistryLogicB2_2( + PLI_TOKEN, + PLI_ETH_FEED, + FAST_GAS_FEED, + address(forwarderLogic), + ZERO_ADDRESS + ); + AutomationRegistryLogicA2_2 logicA2_2 = new AutomationRegistryLogicA2_2(logicB2_2); + registryMaster = IAutomationRegistryMaster( + address(new AutomationRegistry2_2(AutomationRegistryLogicB2_2(address(logicA2_2)))) + ); + } +} + +contract AutomationRegistry2_2_LatestConfigDetails is AutomationRegistry2_2_SetUp { + function testGet() public { + (uint32 configCount, uint32 blockNumber, bytes32 configDigest) = registryMaster.latestConfigDetails(); + assertEq(configCount, 0); + assertEq(blockNumber, 0); + assertEq(configDigest, ""); + } +} + +contract AutomationRegistry2_2_CheckUpkeep is AutomationRegistry2_2_SetUp { + function testPreventExecutionOnCheckUpkeep() public { + uint256 id = 1; + bytes memory triggerData = abi.encodePacked("trigger_data"); + + // The tx.origin is the DEFAULT_SENDER (0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38) of foundry + // Expecting a revert since the tx.origin is not address(0) + vm.expectRevert(abi.encodeWithSelector(IAutomationRegistryMaster.OnlySimulatedBackend.selector)); + registryMaster.checkUpkeep(id, triggerData); + } +} + +contract AutomationRegistry2_2_SetConfig is AutomationRegistry2_2_SetUp { + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + function testSetConfigSuccess() public { + (uint32 configCount, , ) = registryMaster.latestConfigDetails(); + assertEq(configCount, 0); + ChainModuleBase module = new ChainModuleBase(); + + AutomationRegistryBase2_2.OnchainConfig memory cfg = AutomationRegistryBase2_2.OnchainConfig({ + paymentPremiumPPB: 10_000, + flatFeeMicroLink: 40_000, + checkGasLimit: 5_000_000, + stalenessSeconds: 90_000, + gasCeilingMultiplier: 0, + minUpkeepSpend: 0, + maxPerformGas: 10_000_000, + maxCheckDataSize: 5_000, + maxPerformDataSize: 5_000, + maxRevertDataSize: 5_000, + fallbackGasPrice: 20_000_000_000, + fallbackLinkPrice: 200_000_000_000, + transcoder: 0xB1e66855FD67f6e85F0f0fA38cd6fBABdf00923c, + registrars: s_registrars, + upkeepPrivilegeManager: 0xD9c855F08A7e460691F41bBDDe6eC310bc0593D8, + chainModule: module, + reorgProtectionEnabled: true + }); + bytes memory onchainConfigBytes = abi.encode(cfg); + + uint256 a = 1234; + address b = address(0); + bytes memory offchainConfigBytes = abi.encode(a, b); + bytes32 configDigest = _configDigestFromConfigData( + block.chainid, + address(registryMaster), + ++configCount, + s_valid_signers, + s_valid_transmitters, + F, + onchainConfigBytes, + OFFCHAIN_CONFIG_VERSION, + offchainConfigBytes + ); + + vm.expectEmit(); + emit ConfigSet( + 0, + configDigest, + configCount, + s_valid_signers, + s_valid_transmitters, + F, + onchainConfigBytes, + OFFCHAIN_CONFIG_VERSION, + offchainConfigBytes + ); + + registryMaster.setConfig( + s_valid_signers, + s_valid_transmitters, + F, + onchainConfigBytes, + OFFCHAIN_CONFIG_VERSION, + offchainConfigBytes + ); + + (, , address[] memory signers, address[] memory transmitters, uint8 f) = registryMaster.getState(); + + assertEq(signers, s_valid_signers); + assertEq(transmitters, s_valid_transmitters); + assertEq(f, F); + } + + function _configDigestFromConfigData( + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + chainId, + contractAddress, + configCount, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } +} diff --git a/contracts/src/v0.8/automation/dev/test/BaseTest.t.sol b/contracts/src/v0.8/automation/dev/test/BaseTest.t.sol new file mode 100644 index 00000000..790afcff --- /dev/null +++ b/contracts/src/v0.8/automation/dev/test/BaseTest.t.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import "forge-std/Test.sol"; + +contract BaseTest is Test { + address internal OWNER = 0x00007e64E1fB0C487F25dd6D3601ff6aF8d32e4e; + + function setUp() public virtual { + vm.startPrank(OWNER); + deal(OWNER, 1e20); + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistrar2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistrar2_2.sol new file mode 100644 index 00000000..71cd8944 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistrar2_2.sol @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {IAutomationRegistryMaster} from "../interfaces/v2_2/IAutomationRegistryMaster.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {IERC677Receiver} from "../../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract AutomationRegistrar2_2 is TypeAndVersionInterface, ConfirmedOwner, IERC677Receiver { + /** + * DISABLED: No auto approvals, all new upkeeps should be approved manually. + * ENABLED_SENDER_ALLOWLIST: Auto approvals for allowed senders subject to max allowed. Manual for rest. + * ENABLED_ALL: Auto approvals for all new upkeeps subject to max allowed. + */ + enum AutoApproveType { + DISABLED, + ENABLED_SENDER_ALLOWLIST, + ENABLED_ALL + } + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + mapping(bytes32 => PendingRequest) private s_pendingRequests; + mapping(uint8 => TriggerRegistrationStorage) private s_triggerRegistrations; + + LinkTokenInterface public immutable PLI; + + /** + * @notice versions: + * - KeeperRegistrar 2.1.0: Update for compatability with registry 2.1.0 + * Add auto approval levels by type + * - KeeperRegistrar 2.0.0: Remove source from register + * Breaks our example of "Register an Upkeep using your own deployed contract" + * - KeeperRegistrar 1.1.0: Add functionality for sender allowlist in auto approve + * : Remove rate limit and add max allowed for auto approve + * - KeeperRegistrar 1.0.0: initial release + */ + string public constant override typeAndVersion = "AutomationRegistrar 2.1.0"; + + /** + * @notice TriggerRegistrationStorage stores the auto-approval levels for upkeeps by type + * @member autoApproveType the auto approval setting (see enum) + * @member autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + * @member approvedCount the count of upkeeps auto approved of this type + */ + struct TriggerRegistrationStorage { + AutoApproveType autoApproveType; + uint32 autoApproveMaxAllowed; + uint32 approvedCount; + } + + /** + * @notice InitialTriggerConfig configures the auto-approval levels for upkeeps by trigger type + * @dev this struct is only used in the constructor to set the initial values for various trigger configs + * @member triggerType the upkeep type to configure + * @member autoApproveType the auto approval setting (see enum) + * @member autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + */ + struct InitialTriggerConfig { + uint8 triggerType; + AutoApproveType autoApproveType; + uint32 autoApproveMaxAllowed; + } + + struct RegistrarConfig { + IAutomationRegistryMaster AutomationRegistry; + uint96 minPLIJuels; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + struct RegistrationParams { + string name; + bytes encryptedEmail; + address upkeepContract; + uint32 gasLimit; + address adminAddress; + uint8 triggerType; + bytes checkData; + bytes triggerConfig; + bytes offchainConfig; + uint96 amount; + } + + RegistrarConfig private s_config; + // Only applicable if s_config.configType is ENABLED_SENDER_ALLOWLIST + mapping(address => bool) private s_autoApproveAllowedSenders; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes triggerConfig, + bytes offchainConfig, + bytes checkData, + uint96 amount + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event AutoApproveAllowedSenderSet(address indexed senderAddress, bool allowed); + + event ConfigChanged(address AutomationRegistry, uint96 minPLIJuels); + + event TriggerConfigSet(uint8 triggerType, AutoApproveType autoApproveType, uint32 autoApproveMaxAllowed); + + error InvalidAdminAddress(); + error RequestNotFound(); + error HashMismatch(); + error OnlyAdminOrOwner(); + error InsufficientPayment(); + error RegistrationRequestFailed(); + error OnlyLink(); + error AmountMismatch(); + error SenderMismatch(); + error FunctionNotPermitted(); + error LinkTransferFailed(address to); + error InvalidDataLength(); + + /** + * @param PLIAddress Address of Link token + * @param AutomationRegistry keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + * @param triggerConfigs the initial config for individual triggers + */ + constructor( + address PLIAddress, + address AutomationRegistry, + uint96 minPLIJuels, + InitialTriggerConfig[] memory triggerConfigs + ) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(PLIAddress); + setConfig(AutomationRegistry, minPLIJuels); + for (uint256 idx = 0; idx < triggerConfigs.length; idx++) { + setTriggerConfig( + triggerConfigs[idx].triggerType, + triggerConfigs[idx].autoApproveType, + triggerConfigs[idx].autoApproveMaxAllowed + ); + } + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on PLI contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param triggerType the type of trigger for the upkeep + * @param checkData data passed to the contract when checking for upkeep + * @param triggerConfig the config for the trigger + * @param offchainConfig offchainConfig for upkeep in bytes + * @param amount quantity of PLI upkeep is funded with (specified in Juels) + * @param sender address of the sender making the request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig, + uint96 amount, + address sender + ) external onlyPLI { + _register( + RegistrationParams({ + name: name, + encryptedEmail: encryptedEmail, + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + triggerType: triggerType, + checkData: checkData, + triggerConfig: triggerConfig, + offchainConfig: offchainConfig, + amount: amount + }), + sender + ); + } + + /** + * @notice Allows external users to register upkeeps; assumes amount is approved for transfer by the contract + * @param requestParams struct of all possible registration parameters + */ + function registerUpkeep(RegistrationParams calldata requestParams) external returns (uint256) { + if (requestParams.amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + + PLI.transferFrom(msg.sender, address(this), requestParams.amount); + + return _register(requestParams, msg.sender); + } + + /** + * @dev register upkeep on AutomationRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes calldata checkData, + bytes memory triggerConfig, + bytes calldata offchainConfig, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + if (request.admin == address(0)) { + revert RequestNotFound(); + } + bytes32 expectedHash = keccak256( + abi.encode(upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig) + ); + if (hash != expectedHash) { + revert HashMismatch(); + } + delete s_pendingRequests[hash]; + _approve( + RegistrationParams({ + name: name, + encryptedEmail: "", + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + triggerType: triggerType, + checkData: checkData, + triggerConfig: triggerConfig, + offchainConfig: offchainConfig, + amount: request.balance + }), + expectedHash + ); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the request.admin + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + if (!(msg.sender == request.admin || msg.sender == owner())) { + revert OnlyAdminOrOwner(); + } + if (request.admin == address(0)) { + revert RequestNotFound(); + } + delete s_pendingRequests[hash]; + bool success = PLI.transfer(request.admin, request.balance); + if (!success) { + revert LinkTransferFailed(request.admin); + } + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set contract config + * @param AutomationRegistry new keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + function setConfig(address AutomationRegistry, uint96 minPLIJuels) public onlyOwner { + s_config = RegistrarConfig({ + minPLIJuels: minPLIJuels, + AutomationRegistry: IAutomationRegistryMaster(AutomationRegistry) + }); + emit ConfigChanged(AutomationRegistry, minPLIJuels); + } + + /** + * @notice owner calls to set the config for this upkeep type + * @param triggerType the upkeep type to configure + * @param autoApproveType the auto approval setting (see enum) + * @param autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + */ + function setTriggerConfig( + uint8 triggerType, + AutoApproveType autoApproveType, + uint32 autoApproveMaxAllowed + ) public onlyOwner { + s_triggerRegistrations[triggerType].autoApproveType = autoApproveType; + s_triggerRegistrations[triggerType].autoApproveMaxAllowed = autoApproveMaxAllowed; + emit TriggerConfigSet(triggerType, autoApproveType, autoApproveMaxAllowed); + } + + /** + * @notice owner calls this function to set allowlist status for senderAddress + * @param senderAddress senderAddress to set the allowlist status for + * @param allowed true if senderAddress needs to be added to allowlist, false if needs to be removed + */ + function setAutoApproveAllowedSender(address senderAddress, bool allowed) external onlyOwner { + s_autoApproveAllowedSenders[senderAddress] = allowed; + + emit AutoApproveAllowedSenderSet(senderAddress, allowed); + } + + /** + * @notice read the allowlist status of senderAddress + * @param senderAddress address to read the allowlist status for + */ + function getAutoApproveAllowedSender(address senderAddress) external view returns (bool) { + return s_autoApproveAllowedSenders[senderAddress]; + } + + /** + * @notice read the current registration configuration + */ + function getConfig() external view returns (address AutomationRegistry, uint256 minPLIJuels) { + RegistrarConfig memory config = s_config; + return (address(config.AutomationRegistry), config.minPLIJuels); + } + + /** + * @notice read the config for this upkeep type + * @param triggerType upkeep type to read config for + */ + function getTriggerRegistrationDetails(uint8 triggerType) external view returns (TriggerRegistrationStorage memory) { + return s_triggerRegistrations[triggerType]; + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param sender Address of the sender transfering PLI + * @param amount Amount of PLI sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes calldata data + ) + external + override + onlyPLI + permittedFunctionsForPLI(data) + isActualAmount(amount, data) + isActualSender(sender, data) + { + if (amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + (bool success, ) = address(this).delegatecall(data); + // calls register + if (!success) { + revert RegistrationRequestFailed(); + } + } + + // ================================================================ + // | PRIVATE | + // ================================================================ + + /** + * @dev verify registration request and emit RegistrationRequested event + */ + function _register(RegistrationParams memory params, address sender) private returns (uint256) { + if (params.adminAddress == address(0)) { + revert InvalidAdminAddress(); + } + bytes32 hash = keccak256( + abi.encode( + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.checkData, + params.triggerConfig, + params.offchainConfig + ) + ); + + emit RegistrationRequested( + hash, + params.name, + params.encryptedEmail, + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.triggerConfig, + params.offchainConfig, + params.checkData, + params.amount + ); + + uint256 upkeepId; + if (_shouldAutoApprove(s_triggerRegistrations[params.triggerType], sender)) { + s_triggerRegistrations[params.triggerType].approvedCount++; + upkeepId = _approve(params, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance + params.amount; + s_pendingRequests[hash] = PendingRequest({admin: params.adminAddress, balance: newBalance}); + } + + return upkeepId; + } + + /** + * @dev register upkeep on AutomationRegistry contract and emit RegistrationApproved event + */ + function _approve(RegistrationParams memory params, bytes32 hash) private returns (uint256) { + IAutomationRegistryMaster AutomationRegistry = s_config.AutomationRegistry; + uint256 upkeepId = AutomationRegistry.registerUpkeep( + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.checkData, + params.triggerConfig, + params.offchainConfig + ); + bool success = PLI.transferAndCall(address(AutomationRegistry), params.amount, abi.encode(upkeepId)); + if (!success) { + revert LinkTransferFailed(address(AutomationRegistry)); + } + emit RegistrationApproved(hash, params.name, upkeepId); + return upkeepId; + } + + /** + * @dev verify sender allowlist if needed and check max limit + */ + function _shouldAutoApprove(TriggerRegistrationStorage memory config, address sender) private view returns (bool) { + if (config.autoApproveType == AutoApproveType.DISABLED) { + return false; + } + if (config.autoApproveType == AutoApproveType.ENABLED_SENDER_ALLOWLIST && (!s_autoApproveAllowedSenders[sender])) { + return false; + } + if (config.approvedCount < config.autoApproveMaxAllowed) { + return true; + } + return false; + } + + // ================================================================ + // | MODIFIERS | + // ================================================================ + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + if (msg.sender != address(PLI)) { + revert OnlyLink(); + } + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) // First 32 bytes contain length of data + } + if (funcSelector != REGISTER_REQUEST_SELECTOR) { + revert FunctionNotPermitted(); + } + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes calldata data) { + // decode register function arguments to get actual amount + (, , , , , , , , , uint96 amount, ) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, uint8, bytes, bytes, bytes, uint96, address) + ); + if (expected != amount) { + revert AmountMismatch(); + } + _; + } + + /** + * @dev Reverts if the actual sender address does not match the expected sender address + * @param expected address that should match the actual sender address + * @param data bytes + */ + modifier isActualSender(address expected, bytes calldata data) { + // decode register function arguments to get actual sender + (, , , , , , , , , , address sender) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, uint8, bytes, bytes, bytes, uint96, address) + ); + if (expected != sender) { + revert SenderMismatch(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistry2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistry2_2.sol new file mode 100644 index 00000000..0d7583c9 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistry2_2.sol @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {AutomationRegistryBase2_2} from "./AutomationRegistryBase2_2.sol"; +import {AutomationRegistryLogicB2_2} from "./AutomationRegistryLogicB2_2.sol"; +import {Chainable} from "../../Chainable.sol"; +import {IERC677Receiver} from "../../../shared/interfaces/IERC677Receiver.sol"; +import {OCR2Abstract} from "../../../shared/ocr2/OCR2Abstract.sol"; + +/** + * @notice Registry for adding work for Plugin nodes to perform on client + * contracts. Clients must support the AutomationCompatibleInterface interface. + */ +contract AutomationRegistry2_2 is AutomationRegistryBase2_2, OCR2Abstract, Chainable, IERC677Receiver { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @notice versions: + * AutomationRegistry 2.2.0: moves chain-specific integration code into a separate module + * KeeperRegistry 2.1.0: introduces support for log triggers + * removes the need for "wrapped perform data" + * KeeperRegistry 2.0.2: pass revert bytes as performData when target contract reverts + * fixes issue with arbitrum block number + * does an early return in case of stale report instead of revert + * KeeperRegistry 2.0.1: implements workaround for buggy migrate function in 1.X + * KeeperRegistry 2.0.0: implement OCR interface + * KeeperRegistry 1.3.0: split contract into Proxy and Logic + * account for Arbitrum and Optimism L1 gas fee + * allow users to configure upkeeps + * KeeperRegistry 1.2.0: allow funding within performUpkeep + * allow configurable registry maxPerformGas + * add function to let admin change upkeep gas limit + * add minUpkeepSpend requirement + * upgrade to solidity v0.8 + * KeeperRegistry 1.1.0: added flatFeeMicroLink + * KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "AutomationRegistry 2.2.0"; + + /** + * @param logicA the address of the first logic contract, but cast as logicB in order to call logicB functions + */ + constructor( + AutomationRegistryLogicB2_2 logicA + ) + AutomationRegistryBase2_2( + logicA.getLinkAddress(), + logicA.getLinkNativeFeedAddress(), + logicA.getFastGasFeedAddress(), + logicA.getAutomationForwarderLogic(), + logicA.getAllowedReadOnlyAddress() + ) + Chainable(address(logicA)) + {} + + /** + * @notice holds the variables used in the transmit function, necessary to avoid stack too deep errors + */ + struct TransmitVars { + uint16 numUpkeepsPassedChecks; + uint256 totalCalldataWeight; + uint96 totalReimbursement; + uint96 totalPremium; + } + + // ================================================================ + // | ACTIONS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + */ + function transmit( + bytes32[3] calldata reportContext, + bytes calldata rawReport, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) external override { + uint256 gasOverhead = gasleft(); + HotVars memory hotVars = s_hotVars; + + if (hotVars.paused) revert RegistryPaused(); + if (!s_transmitters[msg.sender].active) revert OnlyActiveTransmitters(); + + // Verify signatures + if (s_latestConfigDigest != reportContext[0]) revert ConfigDigestMismatch(); + if (rs.length != hotVars.f + 1 || rs.length != ss.length) revert IncorrectNumberOfSignatures(); + _verifyReportSignature(reportContext, rawReport, rs, ss, rawVs); + + Report memory report = _decodeReport(rawReport); + + uint40 epochAndRound = uint40(uint256(reportContext[1])); + uint32 epoch = uint32(epochAndRound >> 8); + + _handleReport(hotVars, report, gasOverhead); + + if (epoch > hotVars.latestEpoch) { + s_hotVars.latestEpoch = epoch; + } + } + + function _handleReport(HotVars memory hotVars, Report memory report, uint256 gasOverhead) private { + UpkeepTransmitInfo[] memory upkeepTransmitInfo = new UpkeepTransmitInfo[](report.upkeepIds.length); + TransmitVars memory transmitVars = TransmitVars({ + numUpkeepsPassedChecks: 0, + totalCalldataWeight: 0, + totalReimbursement: 0, + totalPremium: 0 + }); + + uint256 blocknumber = hotVars.chainModule.blockNumber(); + uint256 l1Fee = hotVars.chainModule.getCurrentL1Fee(); + + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + upkeepTransmitInfo[i].upkeep = s_upkeep[report.upkeepIds[i]]; + upkeepTransmitInfo[i].triggerType = _getTriggerType(report.upkeepIds[i]); + + (upkeepTransmitInfo[i].earlyChecksPassed, upkeepTransmitInfo[i].dedupID) = _prePerformChecks( + report.upkeepIds[i], + blocknumber, + report.triggers[i], + upkeepTransmitInfo[i], + hotVars + ); + + if (upkeepTransmitInfo[i].earlyChecksPassed) { + transmitVars.numUpkeepsPassedChecks += 1; + } else { + continue; + } + + // Actually perform the target upkeep + (upkeepTransmitInfo[i].performSuccess, upkeepTransmitInfo[i].gasUsed) = _performUpkeep( + upkeepTransmitInfo[i].upkeep.forwarder, + report.gasLimits[i], + report.performDatas[i] + ); + + // To split L1 fee across the upkeeps, assign a weight to this upkeep based on the length + // of the perform data and calldata overhead + upkeepTransmitInfo[i].calldataWeight = + report.performDatas[i].length + + TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD + + (TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD * (hotVars.f + 1)); + transmitVars.totalCalldataWeight += upkeepTransmitInfo[i].calldataWeight; + + // Deduct that gasUsed by upkeep from our running counter + gasOverhead -= upkeepTransmitInfo[i].gasUsed; + + // Store last perform block number / deduping key for upkeep + _updateTriggerMarker(report.upkeepIds[i], blocknumber, upkeepTransmitInfo[i]); + } + // No upkeeps to be performed in this report + if (transmitVars.numUpkeepsPassedChecks == 0) { + return; + } + + // This is the overall gas overhead that will be split across performed upkeeps + // Take upper bound of 16 gas per callData bytes + gasOverhead = (gasOverhead - gasleft()) + (16 * msg.data.length) + ACCOUNTING_FIXED_GAS_OVERHEAD; + gasOverhead = gasOverhead / transmitVars.numUpkeepsPassedChecks + ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD; + + { + uint96 reimbursement; + uint96 premium; + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + if (upkeepTransmitInfo[i].earlyChecksPassed) { + (reimbursement, premium) = _postPerformPayment( + hotVars, + report.upkeepIds[i], + upkeepTransmitInfo[i].gasUsed, + report.fastGasWei, + report.linkNative, + gasOverhead, + (l1Fee * upkeepTransmitInfo[i].calldataWeight) / transmitVars.totalCalldataWeight + ); + transmitVars.totalPremium += premium; + transmitVars.totalReimbursement += reimbursement; + + emit UpkeepPerformed( + report.upkeepIds[i], + upkeepTransmitInfo[i].performSuccess, + reimbursement + premium, + upkeepTransmitInfo[i].gasUsed, + gasOverhead, + report.triggers[i] + ); + } + } + } + // record payments + s_transmitters[msg.sender].balance += transmitVars.totalReimbursement; + s_hotVars.totalPremium += transmitVars.totalPremium; + } + + /** + * @notice simulates the upkeep with the perform data returned from checkUpkeep + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + * @return success whether the call reverted or not + * @return gasUsed the amount of gas the target contract consumed + */ + function simulatePerformUpkeep( + uint256 id, + bytes calldata performData + ) external returns (bool success, uint256 gasUsed) { + _preventExecution(); + + if (s_hotVars.paused) revert RegistryPaused(); + Upkeep memory upkeep = s_upkeep[id]; + (success, gasUsed) = _performUpkeep(upkeep.forwarder, upkeep.performGas, performData); + return (success, gasUsed); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override { + if (msg.sender != address(i_link)) revert OnlyCallableByPLIToken(); + if (data.length != 32) revert InvalidDataLength(); + uint256 id = abi.decode(data, (uint256)); + if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount); + s_expectedLinkBalance = s_expectedLinkBalance + amount; + emit FundsAdded(id, sender, uint96(amount)); + } + + // ================================================================ + // | SETTERS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + * @dev prefer the type-safe version of setConfig (below) whenever possible. The OnchainConfig could differ between registry versions + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfigBytes, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external override { + setConfigTypeSafe( + signers, + transmitters, + f, + abi.decode(onchainConfigBytes, (OnchainConfig)), + offchainConfigVersion, + offchainConfig + ); + } + + function setConfigTypeSafe( + address[] memory signers, + address[] memory transmitters, + uint8 f, + OnchainConfig memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) public onlyOwner { + if (signers.length > MAX_NUM_ORACLES) revert TooManyOracles(); + if (f == 0) revert IncorrectNumberOfFaultyOracles(); + if (signers.length != transmitters.length || signers.length <= 3 * f) revert IncorrectNumberOfSigners(); + + // move all pooled payments out of the pool to each transmitter's balance + uint96 totalPremium = s_hotVars.totalPremium; + uint96 oldLength = uint96(s_transmittersList.length); + for (uint256 i = 0; i < oldLength; i++) { + _updateTransmitterBalanceFromPool(s_transmittersList[i], totalPremium, oldLength); + } + + // remove any old signer/transmitter addresses + address signerAddress; + address transmitterAddress; + for (uint256 i = 0; i < oldLength; i++) { + signerAddress = s_signersList[i]; + transmitterAddress = s_transmittersList[i]; + delete s_signers[signerAddress]; + // Do not delete the whole transmitter struct as it has balance information stored + s_transmitters[transmitterAddress].active = false; + } + delete s_signersList; + delete s_transmittersList; + + // add new signer/transmitter addresses + { + Transmitter memory transmitter; + address temp; + for (uint256 i = 0; i < signers.length; i++) { + if (s_signers[signers[i]].active) revert RepeatedSigner(); + if (signers[i] == ZERO_ADDRESS) revert InvalidSigner(); + s_signers[signers[i]] = Signer({active: true, index: uint8(i)}); + + temp = transmitters[i]; + if (temp == ZERO_ADDRESS) revert InvalidTransmitter(); + transmitter = s_transmitters[temp]; + if (transmitter.active) revert RepeatedTransmitter(); + transmitter.active = true; + transmitter.index = uint8(i); + // new transmitters start afresh from current totalPremium + // some spare change of premium from previous pool will be forfeited + transmitter.lastCollected = totalPremium; + s_transmitters[temp] = transmitter; + } + } + s_signersList = signers; + s_transmittersList = transmitters; + + s_hotVars = HotVars({ + f: f, + paymentPremiumPPB: onchainConfig.paymentPremiumPPB, + flatFeeMicroLink: onchainConfig.flatFeeMicroLink, + stalenessSeconds: onchainConfig.stalenessSeconds, + gasCeilingMultiplier: onchainConfig.gasCeilingMultiplier, + paused: s_hotVars.paused, + reentrancyGuard: s_hotVars.reentrancyGuard, + totalPremium: totalPremium, + latestEpoch: 0, // DON restarts epoch + reorgProtectionEnabled: onchainConfig.reorgProtectionEnabled, + chainModule: onchainConfig.chainModule + }); + + s_storage = Storage({ + checkGasLimit: onchainConfig.checkGasLimit, + minUpkeepSpend: onchainConfig.minUpkeepSpend, + maxPerformGas: onchainConfig.maxPerformGas, + transcoder: onchainConfig.transcoder, + maxCheckDataSize: onchainConfig.maxCheckDataSize, + maxPerformDataSize: onchainConfig.maxPerformDataSize, + maxRevertDataSize: onchainConfig.maxRevertDataSize, + upkeepPrivilegeManager: onchainConfig.upkeepPrivilegeManager, + nonce: s_storage.nonce, + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + ownerLinkBalance: s_storage.ownerLinkBalance + }); + s_fallbackGasPrice = onchainConfig.fallbackGasPrice; + s_fallbackLinkPrice = onchainConfig.fallbackLinkPrice; + + uint32 previousConfigBlockNumber = s_storage.latestConfigBlockNumber; + s_storage.latestConfigBlockNumber = uint32(onchainConfig.chainModule.blockNumber()); + s_storage.configCount += 1; + + bytes memory onchainConfigBytes = abi.encode(onchainConfig); + + s_latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_storage.configCount, + signers, + transmitters, + f, + onchainConfigBytes, + offchainConfigVersion, + offchainConfig + ); + + for (uint256 idx = 0; idx < s_registrars.length(); idx++) { + s_registrars.remove(s_registrars.at(idx)); + } + + for (uint256 idx = 0; idx < onchainConfig.registrars.length; idx++) { + s_registrars.add(onchainConfig.registrars[idx]); + } + + emit ConfigSet( + previousConfigBlockNumber, + s_latestConfigDigest, + s_storage.configCount, + signers, + transmitters, + f, + onchainConfigBytes, + offchainConfigVersion, + offchainConfig + ); + } + + // ================================================================ + // | GETTERS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_storage.configCount, s_storage.latestConfigBlockNumber, s_latestConfigDigest); + } + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDigestAndEpoch() + external + view + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (false, s_latestConfigDigest, s_hotVars.latestEpoch); + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryBase2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryBase2_2.sol new file mode 100644 index 00000000..cbee3757 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryBase2_2.sol @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {StreamsLookupCompatibleInterface} from "../../interfaces/StreamsLookupCompatibleInterface.sol"; +import {ILogAutomation, Log} from "../../interfaces/ILogAutomation.sol"; +import {IAutomationForwarder} from "../../interfaces/IAutomationForwarder.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {KeeperCompatibleInterface} from "../../interfaces/KeeperCompatibleInterface.sol"; +import {UpkeepFormat} from "../../interfaces/UpkeepTranscoderInterface.sol"; +import {IChainModule} from "../interfaces/v2_2/IChainModule.sol"; + +/** + * @notice Base Keeper Registry contract, contains shared logic between + * AutomationRegistry and AutomationRegistryLogic + * @dev all errors, events, and internal functions should live here + */ +abstract contract AutomationRegistryBase2_2 is ConfirmedOwner { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + address internal constant ZERO_ADDRESS = address(0); + address internal constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 internal constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 internal constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + bytes4 internal constant CHECK_CALLBACK_SELECTOR = StreamsLookupCompatibleInterface.checkCallback.selector; + bytes4 internal constant CHECK_LOG_SELECTOR = ILogAutomation.checkLog.selector; + uint256 internal constant PERFORM_GAS_MIN = 2_300; + uint256 internal constant CANCELLATION_DELAY = 50; + uint256 internal constant PERFORM_GAS_CUSHION = 5_000; + uint256 internal constant PPB_BASE = 1_000_000_000; + uint32 internal constant UINT32_MAX = type(uint32).max; + uint96 internal constant PLI_TOTAL_SUPPLY = 1e27; + // The first byte of the mask can be 0, because we only ever have 31 oracles + uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101; + /** + * @dev UPKEEP_TRANSCODER_VERSION_BASE is temporary necessity for backwards compatibility with + * MigratableAutomationRegistryInterfaceV1 - it should be removed in future versions in favor of + * UPKEEP_VERSION_BASE and MigratableAutomationRegistryInterfaceV2 + */ + UpkeepFormat internal constant UPKEEP_TRANSCODER_VERSION_BASE = UpkeepFormat.V1; + uint8 internal constant UPKEEP_VERSION_BASE = 3; + + // Next block of constants are only used in maxPayment estimation during checkUpkeep simulation + // These values are calibrated using hardhat tests which simulates various cases and verifies that + // the variables result in accurate estimation + uint256 internal constant REGISTRY_CONDITIONAL_OVERHEAD = 60_000; // Fixed gas overhead for conditional upkeeps + uint256 internal constant REGISTRY_LOG_OVERHEAD = 85_000; // Fixed gas overhead for log upkeeps + uint256 internal constant REGISTRY_PER_SIGNER_GAS_OVERHEAD = 5_600; // Value scales with f + uint256 internal constant REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD = 24; // Per perform data byte overhead + + // The overhead (in bytes) in addition to perform data for upkeep sent in calldata + // This includes overhead for all struct encoding as well as report signatures + // There is a fixed component and a per signer component. This is calculated exactly by doing abi encoding + uint256 internal constant TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD = 932; + uint256 internal constant TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD = 64; + + // Next block of constants are used in actual payment calculation. We calculate the exact gas used within the + // tx itself, but since payment processing itself takes gas, and it needs the overhead as input, we use fixed constants + // to account for gas used in payment processing. These values are calibrated using hardhat tests which simulates various cases and verifies that + // the variables result in accurate estimation + uint256 internal constant ACCOUNTING_FIXED_GAS_OVERHEAD = 22_000; // Fixed overhead per tx + uint256 internal constant ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD = 7_000; // Overhead per upkeep performed in batch + + LinkTokenInterface internal immutable i_link; + AggregatorV3Interface internal immutable i_linkNativeFeed; + AggregatorV3Interface internal immutable i_fastGasFeed; + address internal immutable i_automationForwarderLogic; + address internal immutable i_allowedReadOnlyAddress; + + /** + * @dev - The storage is gas optimised for one and only one function - transmit. All the storage accessed in transmit + * is stored compactly. Rest of the storage layout is not of much concern as transmit is the only hot path + */ + + // Upkeep storage + EnumerableSet.UintSet internal s_upkeepIDs; + mapping(uint256 => Upkeep) internal s_upkeep; // accessed during transmit + mapping(uint256 => address) internal s_upkeepAdmin; + mapping(uint256 => address) internal s_proposedAdmin; + mapping(uint256 => bytes) internal s_checkData; + mapping(bytes32 => bool) internal s_dedupKeys; + // Registry config and state + EnumerableSet.AddressSet internal s_registrars; + mapping(address => Transmitter) internal s_transmitters; + mapping(address => Signer) internal s_signers; + address[] internal s_signersList; // s_signersList contains the signing address of each oracle + address[] internal s_transmittersList; // s_transmittersList contains the transmission address of each oracle + mapping(address => address) internal s_transmitterPayees; // s_payees contains the mapping from transmitter to payee. + mapping(address => address) internal s_proposedPayee; // proposed payee for a transmitter + bytes32 internal s_latestConfigDigest; // Read on transmit path in case of signature verification + HotVars internal s_hotVars; // Mixture of config and state, used in transmit + Storage internal s_storage; // Mixture of config and state, not used in transmit + uint256 internal s_fallbackGasPrice; + uint256 internal s_fallbackLinkPrice; + uint256 internal s_expectedLinkBalance; // Used in case of erroneous PLI transfers to contract + mapping(address => MigrationPermission) internal s_peerRegistryMigrationPermission; // Permissions for migration to and fro + mapping(uint256 => bytes) internal s_upkeepTriggerConfig; // upkeep triggers + mapping(uint256 => bytes) internal s_upkeepOffchainConfig; // general config set by users for each upkeep + mapping(uint256 => bytes) internal s_upkeepPrivilegeConfig; // general config set by an administrative role for an upkeep + mapping(address => bytes) internal s_adminPrivilegeConfig; // general config set by an administrative role for an admin + + error ArrayHasNoEntries(); + error CannotCancel(); + error CheckDataExceedsLimit(); + error ConfigDigestMismatch(); + error DuplicateEntry(); + error DuplicateSigners(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IncorrectNumberOfFaultyOracles(); + error IncorrectNumberOfSignatures(); + error IncorrectNumberOfSigners(); + error IndexOutOfRange(); + error InvalidDataLength(); + error InvalidTrigger(); + error InvalidPayee(); + error InvalidRecipient(); + error InvalidReport(); + error InvalidSigner(); + error InvalidTransmitter(); + error InvalidTriggerType(); + error MaxCheckDataSizeCanOnlyIncrease(); + error MaxPerformDataSizeCanOnlyIncrease(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveSigners(); + error OnlyActiveTransmitters(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyCallableByUpkeepPrivilegeManager(); + error OnlyPausedUpkeep(); + error OnlySimulatedBackend(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error ReentrantCall(); + error RegistryPaused(); + error RepeatedSigner(); + error RepeatedTransmitter(); + error TargetCheckReverted(bytes reason); + error TooManyOracles(); + error TranscoderNotSet(); + error UpkeepAlreadyExists(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + + enum MigrationPermission { + NONE, + OUTGOING, + INCOMING, + BIDIRECTIONAL + } + + enum Trigger { + CONDITION, + LOG + } + + enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE, + CALLBACK_REVERTED, + REVERT_DATA_EXCEEDS_LIMIT, + REGISTRY_PAUSED + } + + /** + * @notice OnchainConfigLegacy of the registry + * @dev only used in params and return values + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max performGas allowed for an upkeep on this registry + * @member maxCheckDataSize max length of checkData bytes + * @member maxPerformDataSize max length of performData bytes + * @member maxRevertDataSize max length of revertData bytes + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrars addresses of the registrar contracts + * @member upkeepPrivilegeManager address which can set privilege for upkeeps + */ + struct OnchainConfigLegacy { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + } + + /** + * @notice OnchainConfig of the registry + * @dev used only in setConfig() + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max performGas allowed for an upkeep on this registry + * @member maxCheckDataSize max length of checkData bytes + * @member maxPerformDataSize max length of performData bytes + * @member maxRevertDataSize max length of revertData bytes + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrars addresses of the registrar contracts + * @member upkeepPrivilegeManager address which can set privilege for upkeeps + * @member reorgProtectionEnabled if this registry enables re-org protection checks + * @member chainModule the chain specific module + */ + struct OnchainConfig { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + IChainModule chainModule; + bool reorgProtectionEnabled; + } + + /** + * @notice state of the registry + * @dev only used in params and return values + * @dev this will likely be deprecated in a future version of the registry in favor of individual getters + * @member nonce used for ID generation + * @member ownerLinkBalance withdrawable balance of PLI by contract owner + * @member expectedLinkBalance the expected balance of PLI of the registry + * @member totalPremium the total premium collected on registry so far + * @member numUpkeeps total number of upkeeps on the registry + * @member configCount ordinal number of current config, out of all configs applied to this contract so far + * @member latestConfigBlockNumber last block at which this config was set + * @member latestConfigDigest domain-separation tag for current config + * @member latestEpoch for which a report was transmitted + * @member paused freeze on execution scoped to the entire registry + */ + struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint96 totalPremium; + uint256 numUpkeeps; + uint32 configCount; + uint32 latestConfigBlockNumber; + bytes32 latestConfigDigest; + uint32 latestEpoch; + bool paused; + } + + /** + * @notice relevant state of an upkeep which is used in transmit function + * @member paused if this upkeep has been paused + * @member performGas the gas limit of upkeep execution + * @member maxValidBlocknumber until which block this upkeep is valid + * @member forwarder the forwarder contract to use for this upkeep + * @member amountSpent the amount this upkeep has spent + * @member balance the balance of this upkeep + * @member lastPerformedBlockNumber the last block number when this upkeep was performed + */ + struct Upkeep { + bool paused; + uint32 performGas; + uint32 maxValidBlocknumber; + IAutomationForwarder forwarder; + // 0 bytes left in 1st EVM word - not written to in transmit + uint96 amountSpent; + uint96 balance; + uint32 lastPerformedBlockNumber; + // 2 bytes left in 2nd EVM word - written in transmit path + } + + /** + * @notice all information about an upkeep + * @dev only used in return values + * @dev this will likely be deprecated in a future version of the registry + * @member target the contract which needs to be serviced + * @member performGas the gas limit of upkeep execution + * @member checkData the checkData bytes for this upkeep + * @member balance the balance of this upkeep + * @member admin for this upkeep + * @member maxValidBlocknumber until which block this upkeep is valid + * @member lastPerformedBlockNumber the last block number when this upkeep was performed + * @member amountSpent the amount this upkeep has spent + * @member paused if this upkeep has been paused + * @member offchainConfig the off-chain config of this upkeep + */ + struct UpkeepInfo { + address target; + uint32 performGas; + bytes checkData; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + uint32 lastPerformedBlockNumber; + uint96 amountSpent; + bool paused; + bytes offchainConfig; + } + + /// @dev Config + State storage struct which is on hot transmit path + struct HotVars { + uint96 totalPremium; // ─────────╮ total historical payment to oracles for premium + uint32 paymentPremiumPPB; // │ premium percentage charged to user over tx cost + uint32 flatFeeMicroLink; // │ flat fee charged to user for every perform + uint32 latestEpoch; // │ latest epoch for which a report was transmitted + uint24 stalenessSeconds; // │ Staleness tolerance for feeds + uint16 gasCeilingMultiplier; // │ multiplier on top of fast gas feed for upper bound + uint8 f; // │ maximum number of faulty oracles + bool paused; // │ pause switch for all upkeeps in the registry + bool reentrancyGuard; // ────────╯ guard against reentrancy + bool reorgProtectionEnabled; // if this registry should enable re-org protection mechanism + IChainModule chainModule; // the interface of chain specific module + } + + /// @dev Config + State storage struct which is not on hot transmit path + struct Storage { + uint96 minUpkeepSpend; // Minimum amount an upkeep must spend + address transcoder; // Address of transcoder contract used in migrations + // 1 EVM word full + uint96 ownerLinkBalance; // Balance of owner, accumulates minUpkeepSpend in case it is not spent + uint32 checkGasLimit; // Gas limit allowed in checkUpkeep + uint32 maxPerformGas; // Max gas an upkeep can use on this registry + uint32 nonce; // Nonce for each upkeep created + uint32 configCount; // incremented each time a new config is posted, The count + // is incorporated into the config digest to prevent replay attacks. + uint32 latestConfigBlockNumber; // makes it easier for offchain systems to extract config from logs + // 2 EVM word full + uint32 maxCheckDataSize; // max length of checkData bytes + uint32 maxPerformDataSize; // max length of performData bytes + uint32 maxRevertDataSize; // max length of revertData bytes + address upkeepPrivilegeManager; // address which can set privilege for upkeeps + // 3 EVM word full + } + + /// @dev Report transmitted by OCR to transmit function + struct Report { + uint256 fastGasWei; + uint256 linkNative; + uint256[] upkeepIds; + uint256[] gasLimits; + bytes[] triggers; + bytes[] performDatas; + } + + /** + * @dev This struct is used to maintain run time information about an upkeep in transmit function + * @member upkeep the upkeep struct + * @member earlyChecksPassed whether the upkeep passed early checks before perform + * @member performSuccess whether the perform was successful + * @member triggerType the type of trigger + * @member gasUsed gasUsed by this upkeep in perform + * @member calldataWeight weight assigned to this upkeep for its contribution to calldata. It is used to split L1 fee + * @member dedupID unique ID used to dedup an upkeep/trigger combo + */ + struct UpkeepTransmitInfo { + Upkeep upkeep; + bool earlyChecksPassed; + bool performSuccess; + Trigger triggerType; + uint256 gasUsed; + uint256 calldataWeight; + bytes32 dedupID; + } + + struct Transmitter { + bool active; + uint8 index; // Index of oracle in s_signersList/s_transmittersList + uint96 balance; + uint96 lastCollected; + } + + struct Signer { + bool active; + // Index of oracle in s_signersList/s_transmittersList + uint8 index; + } + + /** + * @notice the trigger structure conditional trigger type + */ + struct ConditionalTrigger { + uint32 blockNum; + bytes32 blockHash; + } + + /** + * @notice the trigger structure of log upkeeps + * @dev NOTE that blockNum / blockHash describe the block used for the callback, + * not necessarily the block number that the log was emitted in!!!! + */ + struct LogTrigger { + bytes32 logBlockHash; + bytes32 txHash; + uint32 logIndex; + uint32 blockNum; + bytes32 blockHash; + } + + event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig); + event CancelledUpkeepReport(uint256 indexed id, bytes trigger); + event ChainSpecificModuleUpdated(address newModule); + event DedupKeyAdded(bytes32 indexed dedupKey); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event InsufficientFundsUpkeepReport(uint256 indexed id, bytes trigger); + event OwnerFundsWithdrawn(uint96 amount); + event Paused(address account); + event PayeesUpdated(address[] transmitters, address[] payees); + event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee); + event ReorgedUpkeepReport(uint256 indexed id, bytes trigger); + event StaleUpkeepReport(uint256 indexed id, bytes trigger); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataSet(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + uint96 totalPayment, + uint256 gasUsed, + uint256 gasOverhead, + bytes trigger + ); + event UpkeepPrivilegeConfigSet(uint256 indexed id, bytes privilegeConfig); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepRegistered(uint256 indexed id, uint32 performGas, address admin); + event UpkeepTriggerConfigSet(uint256 indexed id, bytes triggerConfig); + event UpkeepUnpaused(uint256 indexed id); + event Unpaused(address account); + + /** + * @param link address of the PLI Token + * @param linkNativeFeed address of the PLI/Native price feed + * @param fastGasFeed address of the Fast Gas price feed + * @param automationForwarderLogic the address of automation forwarder logic + * @param allowedReadOnlyAddress the address of the allowed read only address + */ + constructor( + address link, + address linkNativeFeed, + address fastGasFeed, + address automationForwarderLogic, + address allowedReadOnlyAddress + ) ConfirmedOwner(msg.sender) { + i_link = LinkTokenInterface(link); + i_linkNativeFeed = AggregatorV3Interface(linkNativeFeed); + i_fastGasFeed = AggregatorV3Interface(fastGasFeed); + i_automationForwarderLogic = automationForwarderLogic; + i_allowedReadOnlyAddress = allowedReadOnlyAddress; + } + + // ================================================================ + // | INTERNAL FUNCTIONS ONLY | + // ================================================================ + + /** + * @dev creates a new upkeep with the given fields + * @param id the id of the upkeep + * @param upkeep the upkeep to create + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data which is passed to user's checkUpkeep + * @param triggerConfig the trigger config for this upkeep + * @param offchainConfig the off-chain config of this upkeep + */ + function _createUpkeep( + uint256 id, + Upkeep memory upkeep, + address admin, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) internal { + if (s_hotVars.paused) revert RegistryPaused(); + if (checkData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + if (upkeep.performGas < PERFORM_GAS_MIN || upkeep.performGas > s_storage.maxPerformGas) + revert GasLimitOutsideRange(); + if (address(s_upkeep[id].forwarder) != address(0)) revert UpkeepAlreadyExists(); + s_upkeep[id] = upkeep; + s_upkeepAdmin[id] = admin; + s_checkData[id] = checkData; + s_expectedLinkBalance = s_expectedLinkBalance + upkeep.balance; + s_upkeepTriggerConfig[id] = triggerConfig; + s_upkeepOffchainConfig[id] = offchainConfig; + s_upkeepIDs.add(id); + } + + /** + * @dev creates an ID for the upkeep based on the upkeep's type + * @dev the format of the ID looks like this: + * ****00000000000X**************** + * 4 bytes of entropy + * 11 bytes of zeros + * 1 identifying byte for the trigger type + * 16 bytes of entropy + * @dev this maintains the same level of entropy as eth addresses, so IDs will still be unique + * @dev we add the "identifying" part in the middle so that it is mostly hidden from users who usually only + * see the first 4 and last 4 hex values ex 0x1234...ABCD + */ + function _createID(Trigger triggerType) internal view returns (uint256) { + bytes1 empty; + IChainModule chainModule = s_hotVars.chainModule; + bytes memory idBytes = abi.encodePacked( + keccak256(abi.encode(chainModule.blockHash((chainModule.blockNumber() - 1)), address(this), s_storage.nonce)) + ); + for (uint256 idx = 4; idx < 15; idx++) { + idBytes[idx] = empty; + } + idBytes[15] = bytes1(uint8(triggerType)); + return uint256(bytes32(idBytes)); + } + + /** + * @dev retrieves feed data for fast gas/native and link/native prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function _getFeedData(HotVars memory hotVars) internal view returns (uint256 gasWei, uint256 linkNative) { + uint32 stalenessSeconds = hotVars.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = i_linkNativeFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + linkNative = s_fallbackLinkPrice; + } else { + linkNative = uint256(feedValue); + } + return (gasWei, linkNative); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + * @param gasLimit the amount of gas used + * @param gasOverhead the amount of gas overhead + * @param l1CostWei the amount to be charged for L1 fee in wei + * @param fastGasWei the fast gas price + * @param linkNative the exchange ratio between PLI and Native token + * @param isExecution if this is triggered by a perform upkeep function + */ + function _calculatePaymentAmount( + HotVars memory hotVars, + uint256 gasLimit, + uint256 gasOverhead, + uint256 l1CostWei, + uint256 fastGasWei, + uint256 linkNative, + bool isExecution + ) internal view returns (uint96, uint96) { + uint256 gasWei = fastGasWei * hotVars.gasCeilingMultiplier; + // in case it's actual execution use actual gas price, capped by fastGasWei * gasCeilingMultiplier + if (isExecution && tx.gasprice < gasWei) { + gasWei = tx.gasprice; + } + uint256 gasPayment = ((gasWei * (gasLimit + gasOverhead) + l1CostWei) * 1e18) / linkNative; + uint256 premium = (((gasWei * gasLimit) + l1CostWei) * 1e9 * hotVars.paymentPremiumPPB) / + linkNative + + uint256(hotVars.flatFeeMicroLink) * + 1e12; + // PLI_TOTAL_SUPPLY < UINT96_MAX + if (gasPayment + premium > PLI_TOTAL_SUPPLY) revert PaymentGreaterThanAllPLI(); + return (uint96(gasPayment), uint96(premium)); + } + + /** + * @dev calculates the max PLI payment for an upkeep. Called during checkUpkeep simulation and assumes + * maximum gas overhead, L1 fee + */ + function _getMaxLinkPayment( + HotVars memory hotVars, + Trigger triggerType, + uint32 performGas, + uint256 fastGasWei, + uint256 linkNative + ) internal view returns (uint96) { + uint256 maxGasOverhead; + if (triggerType == Trigger.CONDITION) { + maxGasOverhead = REGISTRY_CONDITIONAL_OVERHEAD; + } else if (triggerType == Trigger.LOG) { + maxGasOverhead = REGISTRY_LOG_OVERHEAD; + } else { + revert InvalidTriggerType(); + } + uint256 maxCalldataSize = s_storage.maxPerformDataSize + + TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD + + (TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD * (hotVars.f + 1)); + (uint256 chainModuleFixedOverhead, uint256 chainModulePerByteOverhead) = s_hotVars.chainModule.getGasOverhead(); + maxGasOverhead += + (REGISTRY_PER_SIGNER_GAS_OVERHEAD * (hotVars.f + 1)) + + ((REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD + chainModulePerByteOverhead) * maxCalldataSize) + + chainModuleFixedOverhead; + + uint256 maxL1Fee = hotVars.gasCeilingMultiplier * hotVars.chainModule.getMaxL1Fee(maxCalldataSize); + + (uint96 reimbursement, uint96 premium) = _calculatePaymentAmount( + hotVars, + performGas, + maxGasOverhead, + maxL1Fee, + fastGasWei, + linkNative, + false //isExecution + ); + + return reimbursement + premium; + } + + /** + * @dev move a transmitter's balance from total pool to withdrawable balance + */ + function _updateTransmitterBalanceFromPool( + address transmitterAddress, + uint96 totalPremium, + uint96 payeeCount + ) internal returns (uint96) { + Transmitter memory transmitter = s_transmitters[transmitterAddress]; + + if (transmitter.active) { + uint96 uncollected = totalPremium - transmitter.lastCollected; + uint96 due = uncollected / payeeCount; + transmitter.balance += due; + transmitter.lastCollected += due * payeeCount; + s_transmitters[transmitterAddress] = transmitter; + } + + return transmitter.balance; + } + + /** + * @dev gets the trigger type from an upkeepID (trigger type is encoded in the middle of the ID) + */ + function _getTriggerType(uint256 upkeepId) internal pure returns (Trigger) { + bytes32 rawID = bytes32(upkeepId); + bytes1 empty = bytes1(0); + for (uint256 idx = 4; idx < 15; idx++) { + if (rawID[idx] != empty) { + // old IDs that were created before this standard and migrated to this registry + return Trigger.CONDITION; + } + } + return Trigger(uint8(rawID[15])); + } + + function _checkPayload( + uint256 upkeepId, + Trigger triggerType, + bytes memory triggerData + ) internal view returns (bytes memory) { + if (triggerType == Trigger.CONDITION) { + return abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[upkeepId]); + } else if (triggerType == Trigger.LOG) { + Log memory log = abi.decode(triggerData, (Log)); + return abi.encodeWithSelector(CHECK_LOG_SELECTOR, log, s_checkData[upkeepId]); + } + revert InvalidTriggerType(); + } + + /** + * @dev _decodeReport decodes a serialized report into a Report struct + */ + function _decodeReport(bytes calldata rawReport) internal pure returns (Report memory) { + Report memory report = abi.decode(rawReport, (Report)); + uint256 expectedLength = report.upkeepIds.length; + if ( + report.gasLimits.length != expectedLength || + report.triggers.length != expectedLength || + report.performDatas.length != expectedLength + ) { + revert InvalidReport(); + } + return report; + } + + /** + * @dev Does some early sanity checks before actually performing an upkeep + * @return bool whether the upkeep should be performed + * @return bytes32 dedupID for preventing duplicate performances of this trigger + */ + function _prePerformChecks( + uint256 upkeepId, + uint256 blocknumber, + bytes memory rawTrigger, + UpkeepTransmitInfo memory transmitInfo, + HotVars memory hotVars + ) internal returns (bool, bytes32) { + bytes32 dedupID; + if (transmitInfo.triggerType == Trigger.CONDITION) { + if (!_validateConditionalTrigger(upkeepId, blocknumber, rawTrigger, transmitInfo, hotVars)) + return (false, dedupID); + } else if (transmitInfo.triggerType == Trigger.LOG) { + bool valid; + (valid, dedupID) = _validateLogTrigger(upkeepId, blocknumber, rawTrigger, hotVars); + if (!valid) return (false, dedupID); + } else { + revert InvalidTriggerType(); + } + if (transmitInfo.upkeep.maxValidBlocknumber <= blocknumber) { + // Can happen when an upkeep got cancelled after report was generated. + // However we have a CANCELLATION_DELAY of 50 blocks so shouldn't happen in practice + emit CancelledUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + return (true, dedupID); + } + + /** + * @dev Does some early sanity checks before actually performing an upkeep + */ + function _validateConditionalTrigger( + uint256 upkeepId, + uint256 blocknumber, + bytes memory rawTrigger, + UpkeepTransmitInfo memory transmitInfo, + HotVars memory hotVars + ) internal returns (bool) { + ConditionalTrigger memory trigger = abi.decode(rawTrigger, (ConditionalTrigger)); + if (trigger.blockNum < transmitInfo.upkeep.lastPerformedBlockNumber) { + // Can happen when another report performed this upkeep after this report was generated + emit StaleUpkeepReport(upkeepId, rawTrigger); + return false; + } + if ( + (hotVars.reorgProtectionEnabled && + (trigger.blockHash != bytes32("") && hotVars.chainModule.blockHash(trigger.blockNum) != trigger.blockHash)) || + trigger.blockNum >= blocknumber + ) { + // There are two cases of reorged report + // 1. trigger block number is in future: this is an edge case during extreme deep reorgs of chain + // which is always protected against + // 2. blockHash at trigger block number was same as trigger time. This is an optional check which is + // applied if DON sends non empty trigger.blockHash. Note: It only works for last 256 blocks on chain + // when it is sent + emit ReorgedUpkeepReport(upkeepId, rawTrigger); + return false; + } + return true; + } + + function _validateLogTrigger( + uint256 upkeepId, + uint256 blocknumber, + bytes memory rawTrigger, + HotVars memory hotVars + ) internal returns (bool, bytes32) { + LogTrigger memory trigger = abi.decode(rawTrigger, (LogTrigger)); + bytes32 dedupID = keccak256(abi.encodePacked(upkeepId, trigger.logBlockHash, trigger.txHash, trigger.logIndex)); + if ( + (hotVars.reorgProtectionEnabled && + (trigger.blockHash != bytes32("") && hotVars.chainModule.blockHash(trigger.blockNum) != trigger.blockHash)) || + trigger.blockNum >= blocknumber + ) { + // Reorg protection is same as conditional trigger upkeeps + emit ReorgedUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + if (s_dedupKeys[dedupID]) { + emit StaleUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + return (true, dedupID); + } + + /** + * @dev Verify signatures attached to report + */ + function _verifyReportSignature( + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) internal view { + bytes32 h = keccak256(abi.encode(keccak256(report), reportContext)); + // i-th byte counts number of sigs made by i-th signer + uint256 signedCount = 0; + + Signer memory signer; + address signerAddress; + for (uint256 i = 0; i < rs.length; i++) { + signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + signer = s_signers[signerAddress]; + if (!signer.active) revert OnlyActiveSigners(); + unchecked { + signedCount += 1 << (8 * signer.index); + } + } + + if (signedCount & ORACLE_MASK != signedCount) revert DuplicateSigners(); + } + + /** + * @dev updates a storage marker for this upkeep to prevent duplicate and out of order performances + * @dev for conditional triggers we set the latest block number, for log triggers we store a dedupID + */ + function _updateTriggerMarker( + uint256 upkeepID, + uint256 blocknumber, + UpkeepTransmitInfo memory upkeepTransmitInfo + ) internal { + if (upkeepTransmitInfo.triggerType == Trigger.CONDITION) { + s_upkeep[upkeepID].lastPerformedBlockNumber = uint32(blocknumber); + } else if (upkeepTransmitInfo.triggerType == Trigger.LOG) { + s_dedupKeys[upkeepTransmitInfo.dedupID] = true; + emit DedupKeyAdded(upkeepTransmitInfo.dedupID); + } + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * transmitter and the exact gas required by the Upkeep + */ + function _performUpkeep( + IAutomationForwarder forwarder, + uint256 performGas, + bytes memory performData + ) internal nonReentrant returns (bool success, uint256 gasUsed) { + performData = abi.encodeWithSelector(PERFORM_SELECTOR, performData); + return forwarder.forward(performGas, performData); + } + + /** + * @dev does postPerform payment processing for an upkeep. Deducts upkeep's balance and increases + * amount spent. + */ + function _postPerformPayment( + HotVars memory hotVars, + uint256 upkeepId, + uint256 gasUsed, + uint256 fastGasWei, + uint256 linkNative, + uint256 gasOverhead, + uint256 l1Fee + ) internal returns (uint96 gasReimbursement, uint96 premium) { + (gasReimbursement, premium) = _calculatePaymentAmount( + hotVars, + gasUsed, + gasOverhead, + l1Fee, + fastGasWei, + linkNative, + true // isExecution + ); + + uint96 balance = s_upkeep[upkeepId].balance; + uint96 payment = gasReimbursement + premium; + + // this shouldn't happen, but in rare edge cases, we charge the full balance in case the user + // can't cover the amount owed + if (balance < gasReimbursement) { + payment = balance; + gasReimbursement = balance; + premium = 0; + } else if (balance < payment) { + payment = balance; + premium = payment - gasReimbursement; + } + + s_upkeep[upkeepId].balance -= payment; + s_upkeep[upkeepId].amountSpent += payment; + + return (gasReimbursement, premium); + } + + /** + * @dev ensures the upkeep is not cancelled and the caller is the upkeep admin + */ + function _requireAdminAndNotCancelled(uint256 upkeepId) internal view { + if (msg.sender != s_upkeepAdmin[upkeepId]) revert OnlyCallableByAdmin(); + if (s_upkeep[upkeepId].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + } + + /** + * @dev replicates Open Zeppelin's ReentrancyGuard but optimized to fit our storage + */ + modifier nonReentrant() { + if (s_hotVars.reentrancyGuard) revert ReentrantCall(); + s_hotVars.reentrancyGuard = true; + _; + s_hotVars.reentrancyGuard = false; + } + + /** + * @notice only allows a pre-configured address to initiate offchain read + */ + function _preventExecution() internal view { + if (tx.origin != i_allowedReadOnlyAddress) { + revert OnlySimulatedBackend(); + } + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicA2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicA2_2.sol new file mode 100644 index 00000000..ec8f8b84 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicA2_2.sol @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {AutomationRegistryBase2_2} from "./AutomationRegistryBase2_2.sol"; +import {AutomationRegistryLogicB2_2} from "./AutomationRegistryLogicB2_2.sol"; +import {Chainable} from "../../Chainable.sol"; +import {AutomationForwarder} from "../../AutomationForwarder.sol"; +import {IAutomationForwarder} from "../../interfaces/IAutomationForwarder.sol"; +import {UpkeepTranscoderInterfaceV2} from "../../interfaces/UpkeepTranscoderInterfaceV2.sol"; +import {MigratableKeeperRegistryInterfaceV2} from "../../interfaces/MigratableKeeperRegistryInterfaceV2.sol"; + +/** + * @notice Logic contract, works in tandem with AutomationRegistry as a proxy + */ +contract AutomationRegistryLogicA2_2 is AutomationRegistryBase2_2, Chainable { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @param logicB the address of the second logic contract + */ + constructor( + AutomationRegistryLogicB2_2 logicB + ) + AutomationRegistryBase2_2( + logicB.getLinkAddress(), + logicB.getLinkNativeFeedAddress(), + logicB.getFastGasFeedAddress(), + logicB.getAutomationForwarderLogic(), + logicB.getAllowedReadOnlyAddress() + ) + Chainable(address(logicB)) + {} + + /** + * @notice called by the automation DON to check if work is needed + * @param id the upkeep ID to check for work needed + * @param triggerData extra contextual data about the trigger (not used in all code paths) + * @dev this one of the core functions called in the hot path + * @dev there is a 2nd checkUpkeep function (below) that is being maintained for backwards compatibility + * @dev there is an incongruency on what gets returned during failure modes + * ex sometimes we include price data, sometimes we omit it depending on the failure + */ + function checkUpkeep( + uint256 id, + bytes memory triggerData + ) + public + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ) + { + _preventExecution(); + + Trigger triggerType = _getTriggerType(id); + HotVars memory hotVars = s_hotVars; + Upkeep memory upkeep = s_upkeep[id]; + + if (hotVars.paused) return (false, bytes(""), UpkeepFailureReason.REGISTRY_PAUSED, 0, upkeep.performGas, 0, 0); + if (upkeep.maxValidBlocknumber != UINT32_MAX) + return (false, bytes(""), UpkeepFailureReason.UPKEEP_CANCELLED, 0, upkeep.performGas, 0, 0); + if (upkeep.paused) return (false, bytes(""), UpkeepFailureReason.UPKEEP_PAUSED, 0, upkeep.performGas, 0, 0); + + (fastGasWei, linkNative) = _getFeedData(hotVars); + uint96 maxLinkPayment = _getMaxLinkPayment(hotVars, triggerType, upkeep.performGas, fastGasWei, linkNative); + if (upkeep.balance < maxLinkPayment) { + return (false, bytes(""), UpkeepFailureReason.INSUFFICIENT_BALANCE, 0, upkeep.performGas, 0, 0); + } + + bytes memory callData = _checkPayload(id, triggerType, triggerData); + + gasUsed = gasleft(); + (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(callData); + gasUsed = gasUsed - gasleft(); + + if (!success) { + // User's target check reverted. We capture the revert data here and pass it within performData + if (result.length > s_storage.maxRevertDataSize) { + return ( + false, + bytes(""), + UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + } + return ( + upkeepNeeded, + result, + UpkeepFailureReason.TARGET_CHECK_REVERTED, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + } + + (upkeepNeeded, performData) = abi.decode(result, (bool, bytes)); + if (!upkeepNeeded) + return ( + false, + bytes(""), + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + + if (performData.length > s_storage.maxPerformDataSize) + return ( + false, + bytes(""), + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + + return (upkeepNeeded, performData, upkeepFailureReason, gasUsed, upkeep.performGas, fastGasWei, linkNative); + } + + /** + * @notice see other checkUpkeep function for description + * @dev this function may be deprecated in a future version of plugin automation + */ + function checkUpkeep( + uint256 id + ) + external + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ) + { + return checkUpkeep(id, bytes("")); + } + + /** + * @dev checkCallback is used specifically for automation data streams lookups (see StreamsLookupCompatibleInterface.sol) + * @param id the upkeepID to execute a callback for + * @param values the values returned from the data streams lookup + * @param extraData the user-provided extra context data + */ + function checkCallback( + uint256 id, + bytes[] memory values, + bytes calldata extraData + ) + external + returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed) + { + bytes memory payload = abi.encodeWithSelector(CHECK_CALLBACK_SELECTOR, values, extraData); + return executeCallback(id, payload); + } + + /** + * @notice this is a generic callback executor that forwards a call to a user's contract with the configured + * gas limit + * @param id the upkeepID to execute a callback for + * @param payload the data (including function selector) to call on the upkeep target contract + */ + function executeCallback( + uint256 id, + bytes memory payload + ) + public + returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed) + { + _preventExecution(); + + Upkeep memory upkeep = s_upkeep[id]; + gasUsed = gasleft(); + (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(payload); + gasUsed = gasUsed - gasleft(); + if (!success) { + return (false, bytes(""), UpkeepFailureReason.CALLBACK_REVERTED, gasUsed); + } + (upkeepNeeded, performData) = abi.decode(result, (bool, bytes)); + if (!upkeepNeeded) { + return (false, bytes(""), UpkeepFailureReason.UPKEEP_NOT_NEEDED, gasUsed); + } + if (performData.length > s_storage.maxPerformDataSize) { + return (false, bytes(""), UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, gasUsed); + } + return (upkeepNeeded, performData, upkeepFailureReason, gasUsed); + } + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param triggerType the trigger for the upkeep + * @param checkData data passed to the contract when checking for upkeep + * @param triggerConfig the config for the trigger + * @param offchainConfig arbitrary offchain config for the upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + Trigger triggerType, + bytes calldata checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) public returns (uint256 id) { + if (msg.sender != owner() && !s_registrars.contains(msg.sender)) revert OnlyCallableByOwnerOrRegistrar(); + if (!target.isContract()) revert NotAContract(); + id = _createID(triggerType); + IAutomationForwarder forwarder = IAutomationForwarder( + address(new AutomationForwarder(target, address(this), i_automationForwarderLogic)) + ); + _createUpkeep( + id, + Upkeep({ + performGas: gasLimit, + balance: 0, + maxValidBlocknumber: UINT32_MAX, + lastPerformedBlockNumber: 0, + amountSpent: 0, + paused: false, + forwarder: forwarder + }), + admin, + checkData, + triggerConfig, + offchainConfig + ); + s_storage.nonce++; + emit UpkeepRegistered(id, gasLimit, admin); + emit UpkeepCheckDataSet(id, checkData); + emit UpkeepTriggerConfigSet(id, triggerConfig); + emit UpkeepOffchainConfigSet(id, offchainConfig); + return (id); + } + + /** + * @notice this function registers a conditional upkeep, using a backwards compatible function signature + * @dev this function is backwards compatible with versions <=2.0, but may be removed in a future version + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData, + bytes calldata offchainConfig + ) external returns (uint256 id) { + return registerUpkeep(target, gasLimit, admin, Trigger.CONDITION, checkData, bytes(""), offchainConfig); + } + + /** + * @notice cancels an upkeep + * @param id the upkeepID to cancel + * @dev if a user cancels an upkeep, their funds are locked for CANCELLATION_DELAY blocks to + * allow any pending performUpkeep txs time to get confirmed + */ + function cancelUpkeep(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + bool canceled = upkeep.maxValidBlocknumber != UINT32_MAX; + bool isOwner = msg.sender == owner(); + + uint256 height = s_hotVars.chainModule.blockNumber(); + if (canceled && !(isOwner && upkeep.maxValidBlocknumber > height)) revert CannotCancel(); + if (!isOwner && msg.sender != s_upkeepAdmin[id]) revert OnlyCallableByOwnerOrAdmin(); + + if (!isOwner) { + height = height + CANCELLATION_DELAY; + } + s_upkeep[id].maxValidBlocknumber = uint32(height); + s_upkeepIDs.remove(id); + + // charge the cancellation fee if the minUpkeepSpend is not met + uint96 minUpkeepSpend = s_storage.minUpkeepSpend; + uint96 cancellationFee = 0; + // cancellationFee is supposed to be min(max(minUpkeepSpend - amountSpent,0), amountLeft) + if (upkeep.amountSpent < minUpkeepSpend) { + cancellationFee = minUpkeepSpend - upkeep.amountSpent; + if (cancellationFee > upkeep.balance) { + cancellationFee = upkeep.balance; + } + } + s_upkeep[id].balance = upkeep.balance - cancellationFee; + s_storage.ownerLinkBalance = s_storage.ownerLinkBalance + cancellationFee; + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @notice adds fund to an upkeep + * @param id the upkeepID + * @param amount the amount of PLI to fund, in jules (jules = "wei" of PLI) + */ + function addFunds(uint256 id, uint96 amount) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + s_upkeep[id].balance = upkeep.balance + amount; + s_expectedLinkBalance = s_expectedLinkBalance + amount; + i_link.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @notice migrates upkeeps from one registry to another + * @param ids the upkeepIDs to migrate + * @param destination the destination registry address + * @dev a transcoder must be set in order to enable migration + * @dev migration permissions must be set on *both* sending and receiving registries + * @dev only an upkeep admin can migrate their upkeeps + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external { + if ( + s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING && + s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + if (s_storage.transcoder == ZERO_ADDRESS) revert TranscoderNotSet(); + if (ids.length == 0) revert ArrayHasNoEntries(); + uint256 id; + Upkeep memory upkeep; + uint256 totalBalanceRemaining; + address[] memory admins = new address[](ids.length); + Upkeep[] memory upkeeps = new Upkeep[](ids.length); + bytes[] memory checkDatas = new bytes[](ids.length); + bytes[] memory triggerConfigs = new bytes[](ids.length); + bytes[] memory offchainConfigs = new bytes[](ids.length); + for (uint256 idx = 0; idx < ids.length; idx++) { + id = ids[idx]; + upkeep = s_upkeep[id]; + _requireAdminAndNotCancelled(id); + upkeep.forwarder.updateRegistry(destination); + upkeeps[idx] = upkeep; + admins[idx] = s_upkeepAdmin[id]; + checkDatas[idx] = s_checkData[id]; + triggerConfigs[idx] = s_upkeepTriggerConfig[id]; + offchainConfigs[idx] = s_upkeepOffchainConfig[id]; + totalBalanceRemaining = totalBalanceRemaining + upkeep.balance; + delete s_upkeep[id]; + delete s_checkData[id]; + delete s_upkeepTriggerConfig[id]; + delete s_upkeepOffchainConfig[id]; + // nullify existing proposed admin change if an upkeep is being migrated + delete s_proposedAdmin[id]; + s_upkeepIDs.remove(id); + emit UpkeepMigrated(id, upkeep.balance, destination); + } + s_expectedLinkBalance = s_expectedLinkBalance - totalBalanceRemaining; + bytes memory encodedUpkeeps = abi.encode( + ids, + upkeeps, + new address[](ids.length), + admins, + checkDatas, + triggerConfigs, + offchainConfigs + ); + MigratableKeeperRegistryInterfaceV2(destination).receiveUpkeeps( + UpkeepTranscoderInterfaceV2(s_storage.transcoder).transcodeUpkeeps( + UPKEEP_VERSION_BASE, + MigratableKeeperRegistryInterfaceV2(destination).upkeepVersion(), + encodedUpkeeps + ) + ); + i_link.transfer(destination, totalBalanceRemaining); + } + + /** + * @notice received upkeeps migrated from another registry + * @param encodedUpkeeps the raw upkeep data to import + * @dev this function is never called directly, it is only called by another registry's migrate function + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external { + if ( + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING && + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + ( + uint256[] memory ids, + Upkeep[] memory upkeeps, + address[] memory targets, + address[] memory upkeepAdmins, + bytes[] memory checkDatas, + bytes[] memory triggerConfigs, + bytes[] memory offchainConfigs + ) = abi.decode(encodedUpkeeps, (uint256[], Upkeep[], address[], address[], bytes[], bytes[], bytes[])); + for (uint256 idx = 0; idx < ids.length; idx++) { + if (address(upkeeps[idx].forwarder) == ZERO_ADDRESS) { + upkeeps[idx].forwarder = IAutomationForwarder( + address(new AutomationForwarder(targets[idx], address(this), i_automationForwarderLogic)) + ); + } + _createUpkeep( + ids[idx], + upkeeps[idx], + upkeepAdmins[idx], + checkDatas[idx], + triggerConfigs[idx], + offchainConfigs[idx] + ); + emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender); + } + } + + /** + * @notice sets the upkeep trigger config + * @param id the upkeepID to change the trigger for + * @param triggerConfig the new trigger config + */ + function setUpkeepTriggerConfig(uint256 id, bytes calldata triggerConfig) external { + _requireAdminAndNotCancelled(id); + s_upkeepTriggerConfig[id] = triggerConfig; + emit UpkeepTriggerConfigSet(id, triggerConfig); + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicB2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicB2_2.sol new file mode 100644 index 00000000..0969f341 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationRegistryLogicB2_2.sol @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {AutomationRegistryBase2_2} from "./AutomationRegistryBase2_2.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {UpkeepFormat} from "../../interfaces/UpkeepTranscoderInterface.sol"; +import {IAutomationForwarder} from "../../interfaces/IAutomationForwarder.sol"; +import {IChainModule} from "../interfaces/v2_2/IChainModule.sol"; + +contract AutomationRegistryLogicB2_2 is AutomationRegistryBase2_2 { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @dev see AutomationRegistry master contract for constructor description + */ + constructor( + address link, + address linkNativeFeed, + address fastGasFeed, + address automationForwarderLogic, + address allowedReadOnlyAddress + ) AutomationRegistryBase2_2(link, linkNativeFeed, fastGasFeed, automationForwarderLogic, allowedReadOnlyAddress) {} + + // ================================================================ + // | UPKEEP MANAGEMENT | + // ================================================================ + + /** + * @notice transfers the address of an admin for an upkeep + */ + function transferUpkeepAdmin(uint256 id, address proposed) external { + _requireAdminAndNotCancelled(id); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedAdmin[id] != proposed) { + s_proposedAdmin[id] = proposed; + emit UpkeepAdminTransferRequested(id, msg.sender, proposed); + } + } + + /** + * @notice accepts the transfer of an upkeep admin + */ + function acceptUpkeepAdmin(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + if (s_proposedAdmin[id] != msg.sender) revert OnlyCallableByProposedAdmin(); + address past = s_upkeepAdmin[id]; + s_upkeepAdmin[id] = msg.sender; + s_proposedAdmin[id] = ZERO_ADDRESS; + + emit UpkeepAdminTransferred(id, past, msg.sender); + } + + /** + * @notice pauses an upkeep - an upkeep will be neither checked nor performed while paused + */ + function pauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.paused) revert OnlyUnpausedUpkeep(); + s_upkeep[id].paused = true; + s_upkeepIDs.remove(id); + emit UpkeepPaused(id); + } + + /** + * @notice unpauses an upkeep + */ + function unpauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (!upkeep.paused) revert OnlyPausedUpkeep(); + s_upkeep[id].paused = false; + s_upkeepIDs.add(id); + emit UpkeepUnpaused(id); + } + + /** + * @notice updates the checkData for an upkeep + */ + function setUpkeepCheckData(uint256 id, bytes calldata newCheckData) external { + _requireAdminAndNotCancelled(id); + if (newCheckData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + s_checkData[id] = newCheckData; + emit UpkeepCheckDataSet(id, newCheckData); + } + + /** + * @notice updates the gas limit for an upkeep + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external { + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + _requireAdminAndNotCancelled(id); + s_upkeep[id].performGas = gasLimit; + + emit UpkeepGasLimitSet(id, gasLimit); + } + + /** + * @notice updates the offchain config for an upkeep + */ + function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external { + _requireAdminAndNotCancelled(id); + s_upkeepOffchainConfig[id] = config; + emit UpkeepOffchainConfigSet(id, config); + } + + /** + * @notice withdraws PLI funds from an upkeep + * @dev note that an upkeep must be cancelled first!! + */ + function withdrawFunds(uint256 id, address to) external nonReentrant { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + Upkeep memory upkeep = s_upkeep[id]; + if (s_upkeepAdmin[id] != msg.sender) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber > s_hotVars.chainModule.blockNumber()) revert UpkeepNotCanceled(); + uint96 amountToWithdraw = s_upkeep[id].balance; + s_expectedLinkBalance = s_expectedLinkBalance - amountToWithdraw; + s_upkeep[id].balance = 0; + i_link.transfer(to, amountToWithdraw); + emit FundsWithdrawn(id, amountToWithdraw, to); + } + + // ================================================================ + // | NODE MANAGEMENT | + // ================================================================ + + /** + * @notice transfers the address of payee for a transmitter + */ + function transferPayeeship(address transmitter, address proposed) external { + if (s_transmitterPayees[transmitter] != msg.sender) revert OnlyCallableByPayee(); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedPayee[transmitter] != proposed) { + s_proposedPayee[transmitter] = proposed; + emit PayeeshipTransferRequested(transmitter, msg.sender, proposed); + } + } + + /** + * @notice accepts the transfer of the payee + */ + function acceptPayeeship(address transmitter) external { + if (s_proposedPayee[transmitter] != msg.sender) revert OnlyCallableByProposedPayee(); + address past = s_transmitterPayees[transmitter]; + s_transmitterPayees[transmitter] = msg.sender; + s_proposedPayee[transmitter] = ZERO_ADDRESS; + + emit PayeeshipTransferred(transmitter, past, msg.sender); + } + + /** + * @notice withdraws PLI received as payment for work performed + */ + function withdrawPayment(address from, address to) external { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + if (s_transmitterPayees[from] != msg.sender) revert OnlyCallableByPayee(); + uint96 balance = _updateTransmitterBalanceFromPool(from, s_hotVars.totalPremium, uint96(s_transmittersList.length)); + s_transmitters[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance - balance; + i_link.transfer(to, balance); + emit PaymentWithdrawn(from, balance, to, msg.sender); + } + + // ================================================================ + // | OWNER / MANAGER ACTIONS | + // ================================================================ + + /** + * @notice sets the privilege config for an upkeep + */ + function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes calldata newPrivilegeConfig) external { + if (msg.sender != s_storage.upkeepPrivilegeManager) { + revert OnlyCallableByUpkeepPrivilegeManager(); + } + s_upkeepPrivilegeConfig[upkeepId] = newPrivilegeConfig; + emit UpkeepPrivilegeConfigSet(upkeepId, newPrivilegeConfig); + } + + /** + * @notice withdraws the owner's PLI balance + */ + function withdrawOwnerFunds() external onlyOwner { + uint96 amount = s_storage.ownerLinkBalance; + s_expectedLinkBalance = s_expectedLinkBalance - amount; + s_storage.ownerLinkBalance = 0; + emit OwnerFundsWithdrawn(amount); + i_link.transfer(msg.sender, amount); + } + + /** + * @notice allows the owner to withdraw any PLI accidentally sent to the contract + */ + function recoverFunds() external onlyOwner { + uint256 total = i_link.balanceOf(address(this)); + i_link.transfer(msg.sender, total - s_expectedLinkBalance); + } + + /** + * @notice sets the payees for the transmitters + */ + function setPayees(address[] calldata payees) external onlyOwner { + if (s_transmittersList.length != payees.length) revert ParameterLengthError(); + for (uint256 i = 0; i < s_transmittersList.length; i++) { + address transmitter = s_transmittersList[i]; + address oldPayee = s_transmitterPayees[transmitter]; + address newPayee = payees[i]; + if ( + (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS) + ) revert InvalidPayee(); + if (newPayee != IGNORE_ADDRESS) { + s_transmitterPayees[transmitter] = newPayee; + } + } + emit PayeesUpdated(s_transmittersList, payees); + } + + /** + * @notice sets the migration permission for a peer registry + * @dev this must be done before upkeeps can be migrated to/from another registry + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner { + s_peerRegistryMigrationPermission[peer] = permission; + } + + /** + * @notice pauses the entire registry + */ + function pause() external onlyOwner { + s_hotVars.paused = true; + emit Paused(msg.sender); + } + + /** + * @notice unpauses the entire registry + */ + function unpause() external onlyOwner { + s_hotVars.paused = false; + emit Unpaused(msg.sender); + } + + /** + * @notice sets a generic bytes field used to indicate the privilege that this admin address had + * @param admin the address to set privilege for + * @param newPrivilegeConfig the privileges that this admin has + */ + function setAdminPrivilegeConfig(address admin, bytes calldata newPrivilegeConfig) external { + if (msg.sender != s_storage.upkeepPrivilegeManager) { + revert OnlyCallableByUpkeepPrivilegeManager(); + } + s_adminPrivilegeConfig[admin] = newPrivilegeConfig; + emit AdminPrivilegeConfigSet(admin, newPrivilegeConfig); + } + + // ================================================================ + // | GETTERS | + // ================================================================ + + function getConditionalGasOverhead() external pure returns (uint256) { + return REGISTRY_CONDITIONAL_OVERHEAD; + } + + function getLogGasOverhead() external pure returns (uint256) { + return REGISTRY_LOG_OVERHEAD; + } + + function getPerPerformByteGasOverhead() external pure returns (uint256) { + return REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD; + } + + function getPerSignerGasOverhead() external pure returns (uint256) { + return REGISTRY_PER_SIGNER_GAS_OVERHEAD; + } + + function getTransmitCalldataFixedBytesOverhead() external pure returns (uint256) { + return TRANSMIT_CALLDATA_FIXED_BYTES_OVERHEAD; + } + + function getTransmitCalldataPerSignerBytesOverhead() external pure returns (uint256) { + return TRANSMIT_CALLDATA_PER_SIGNER_BYTES_OVERHEAD; + } + + function getCancellationDelay() external pure returns (uint256) { + return CANCELLATION_DELAY; + } + + function getLinkAddress() external view returns (address) { + return address(i_link); + } + + function getLinkNativeFeedAddress() external view returns (address) { + return address(i_linkNativeFeed); + } + + function getFastGasFeedAddress() external view returns (address) { + return address(i_fastGasFeed); + } + + function getAutomationForwarderLogic() external view returns (address) { + return i_automationForwarderLogic; + } + + function getAllowedReadOnlyAddress() external view returns (address) { + return i_allowedReadOnlyAddress; + } + + function upkeepTranscoderVersion() public pure returns (UpkeepFormat) { + return UPKEEP_TRANSCODER_VERSION_BASE; + } + + function upkeepVersion() public pure returns (uint8) { + return UPKEEP_VERSION_BASE; + } + + /** + * @notice read all of the details about an upkeep + * @dev this function may be deprecated in a future version of automation in favor of individual + * getters for each field + */ + function getUpkeep(uint256 id) external view returns (UpkeepInfo memory upkeepInfo) { + Upkeep memory reg = s_upkeep[id]; + address target = address(reg.forwarder) == address(0) ? address(0) : reg.forwarder.getTarget(); + upkeepInfo = UpkeepInfo({ + target: target, + performGas: reg.performGas, + checkData: s_checkData[id], + balance: reg.balance, + admin: s_upkeepAdmin[id], + maxValidBlocknumber: reg.maxValidBlocknumber, + lastPerformedBlockNumber: reg.lastPerformedBlockNumber, + amountSpent: reg.amountSpent, + paused: reg.paused, + offchainConfig: s_upkeepOffchainConfig[id] + }); + return upkeepInfo; + } + + /** + * @notice retrieve active upkeep IDs. Active upkeep is defined as an upkeep which is not paused and not canceled. + * @param startIndex starting index in list + * @param maxCount max count to retrieve (0 = unlimited) + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * should consider keeping the blockheight constant to ensure a holistic picture of the contract state + */ + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory) { + uint256 numUpkeeps = s_upkeepIDs.length(); + if (startIndex >= numUpkeeps) revert IndexOutOfRange(); + uint256 endIndex = startIndex + maxCount; + endIndex = endIndex > numUpkeeps || maxCount == 0 ? numUpkeeps : endIndex; + uint256[] memory ids = new uint256[](endIndex - startIndex); + for (uint256 idx = 0; idx < ids.length; idx++) { + ids[idx] = s_upkeepIDs.at(idx + startIndex); + } + return ids; + } + + /** + * @notice returns the upkeep's trigger type + */ + function getTriggerType(uint256 upkeepId) external pure returns (Trigger) { + return _getTriggerType(upkeepId); + } + + /** + * @notice returns the trigger config for an upkeeep + */ + function getUpkeepTriggerConfig(uint256 upkeepId) public view returns (bytes memory) { + return s_upkeepTriggerConfig[upkeepId]; + } + + /** + * @notice read the current info about any transmitter address + */ + function getTransmitterInfo( + address query + ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee) { + Transmitter memory transmitter = s_transmitters[query]; + + uint96 pooledShare = 0; + if (transmitter.active) { + uint96 totalDifference = s_hotVars.totalPremium - transmitter.lastCollected; + pooledShare = totalDifference / uint96(s_transmittersList.length); + } + + return ( + transmitter.active, + transmitter.index, + (transmitter.balance + pooledShare), + transmitter.lastCollected, + s_transmitterPayees[query] + ); + } + + /** + * @notice read the current info about any signer address + */ + function getSignerInfo(address query) external view returns (bool active, uint8 index) { + Signer memory signer = s_signers[query]; + return (signer.active, signer.index); + } + + /** + * @notice read the current state of the registry + * @dev this function is deprecated + */ + function getState() + external + view + returns ( + State memory state, + OnchainConfigLegacy memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ) + { + state = State({ + nonce: s_storage.nonce, + ownerLinkBalance: s_storage.ownerLinkBalance, + expectedLinkBalance: s_expectedLinkBalance, + totalPremium: s_hotVars.totalPremium, + numUpkeeps: s_upkeepIDs.length(), + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + latestConfigDigest: s_latestConfigDigest, + latestEpoch: s_hotVars.latestEpoch, + paused: s_hotVars.paused + }); + + config = OnchainConfigLegacy({ + paymentPremiumPPB: s_hotVars.paymentPremiumPPB, + flatFeeMicroLink: s_hotVars.flatFeeMicroLink, + checkGasLimit: s_storage.checkGasLimit, + stalenessSeconds: s_hotVars.stalenessSeconds, + gasCeilingMultiplier: s_hotVars.gasCeilingMultiplier, + minUpkeepSpend: s_storage.minUpkeepSpend, + maxPerformGas: s_storage.maxPerformGas, + maxCheckDataSize: s_storage.maxCheckDataSize, + maxPerformDataSize: s_storage.maxPerformDataSize, + maxRevertDataSize: s_storage.maxRevertDataSize, + fallbackGasPrice: s_fallbackGasPrice, + fallbackLinkPrice: s_fallbackLinkPrice, + transcoder: s_storage.transcoder, + registrars: s_registrars.values(), + upkeepPrivilegeManager: s_storage.upkeepPrivilegeManager + }); + + return (state, config, s_signersList, s_transmittersList, s_hotVars.f); + } + + /** + * @notice get the chain module + */ + function getChainModule() external view returns (IChainModule chainModule) { + return s_hotVars.chainModule; + } + + /** + * @notice if this registry has reorg protection enabled + */ + function getReorgProtectionEnabled() external view returns (bool reorgProtectionEnabled) { + return s_hotVars.reorgProtectionEnabled; + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getBalance(uint256 id) external view returns (uint96 balance) { + return s_upkeep[id].balance; + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getMinBalance(uint256 id) external view returns (uint96) { + return getMinBalanceForUpkeep(id); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + * @dev this will be deprecated in a future version in favor of getMinBalance + */ + function getMinBalanceForUpkeep(uint256 id) public view returns (uint96 minBalance) { + return getMaxPaymentForGas(_getTriggerType(id), s_upkeep[id].performGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + * @param gasLimit the gas to calculate payment for + */ + function getMaxPaymentForGas(Trigger triggerType, uint32 gasLimit) public view returns (uint96 maxPayment) { + HotVars memory hotVars = s_hotVars; + (uint256 fastGasWei, uint256 linkNative) = _getFeedData(hotVars); + return _getMaxLinkPayment(hotVars, triggerType, gasLimit, fastGasWei, linkNative); + } + + /** + * @notice retrieves the migration permission for a peer registry + */ + function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) { + return s_peerRegistryMigrationPermission[peer]; + } + + /** + * @notice returns the upkeep privilege config + */ + function getUpkeepPrivilegeConfig(uint256 upkeepId) external view returns (bytes memory) { + return s_upkeepPrivilegeConfig[upkeepId]; + } + + /** + * @notice returns the upkeep privilege config + */ + function getAdminPrivilegeConfig(address admin) external view returns (bytes memory) { + return s_adminPrivilegeConfig[admin]; + } + + /** + * @notice returns the upkeep's forwarder contract + */ + function getForwarder(uint256 upkeepID) external view returns (IAutomationForwarder) { + return s_upkeep[upkeepID].forwarder; + } + + /** + * @notice returns the upkeep's forwarder contract + */ + function hasDedupKey(bytes32 dedupKey) external view returns (bool) { + return s_dedupKeys[dedupKey]; + } +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/AutomationUtils2_2.sol b/contracts/src/v0.8/automation/dev/v2_2/AutomationUtils2_2.sol new file mode 100644 index 00000000..25589372 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/AutomationUtils2_2.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.19; + +import {AutomationRegistryBase2_2} from "./AutomationRegistryBase2_2.sol"; +import {Log} from "../../interfaces/ILogAutomation.sol"; + +/** + * @notice this file exposes structs that are otherwise internal to the automation registry + * doing this allows those structs to be encoded and decoded with type safety in offchain code + * and tests because generated wrappers are made available + */ + +/** + * @notice structure of trigger for log triggers + */ +struct LogTriggerConfig { + address contractAddress; + uint8 filterSelector; // denotes which topics apply to filter ex 000, 101, 111...only last 3 bits apply + bytes32 topic0; + bytes32 topic1; + bytes32 topic2; + bytes32 topic3; +} + +contract AutomationUtils2_2 { + /** + * @dev this can be removed as OnchainConfig is now exposed directly from the registry + */ + function _onChainConfig(AutomationRegistryBase2_2.OnchainConfig memory) external {} // 0x2ff92a81 + + function _report(AutomationRegistryBase2_2.Report memory) external {} // 0xe65d6546 + + function _logTriggerConfig(LogTriggerConfig memory) external {} // 0x21f373d7 + + function _logTrigger(AutomationRegistryBase2_2.LogTrigger memory) external {} // 0x1c8d8260 + + function _conditionalTrigger(AutomationRegistryBase2_2.ConditionalTrigger memory) external {} // 0x4b6df294 + + function _log(Log memory) external {} // 0xe9720a49 +} diff --git a/contracts/src/v0.8/automation/dev/v2_2/LICENSE b/contracts/src/v0.8/automation/dev/v2_2/LICENSE new file mode 100644 index 00000000..87ee5a6b --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/LICENSE @@ -0,0 +1,57 @@ +Business Source License 1.1 + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +--- + +Parameters + +Licensor: SmartContract Plugin Limited SEZC + +Licensed Work: Automation v2.2 +The Licensed Work is (c) 2024 SmartContract Plugin Limited SEZC + +Additional Use Grant(s): +You may make use of Automation v2.1, v2.2 (which is available subject to the license here the “Licensed Work”) solely for purposes listed below: +https://github.com/goplugin/plugin-automation/tree/main/Automation_Grants.md + +Change Date: January 22, 2028 + +Change License: MIT + +--- + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. + +If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. + +MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below. + +--- + +Covenants of Licensor + +In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. + +2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None". + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/contracts/src/v0.8/automation/dev/v2_2/README.md b/contracts/src/v0.8/automation/dev/v2_2/README.md new file mode 100644 index 00000000..fbce9e45 --- /dev/null +++ b/contracts/src/v0.8/automation/dev/v2_2/README.md @@ -0,0 +1,40 @@ +# Automation Contract Structure + +The on-chain component of Plugin automation is too large to fit into the [size requirements][size-limit-eip] of a single contract. It is also too large to fit into 2 contracts, a solution that works for most large projects. Therefore, we included this explanation of how the pieces fit together and various tradeoffs incurred. + +### Glossary + +**Master Contract** - also known as the “storage” contract. This is the contract whose state we care about. It is the entry-point into the chain of delegatecalls. (We avoid the term "proxy" because it is commonly associated with upgradability, and this system _is not upgradable_ even though it relies on some of the same mechanics.) + +**Logic Contract** - this a contract whose sole purpose is to hold code. We use the code at this address and execute it in the context of the master contract in order to increase our total capacity for on-chain code. + +### Overview + +We chain multiple logic contracts together using [fallback functions][fallback] and [delegatecall][delegatecall]. If a function definition is not found on one contract, we fall back to the next, always executing the function in the scope of the master contract. The actual implementation of this is based off of [OZ's Proxy contract][oz-proxy]. + +### Diagram + +```mermaid +graph LR + Master -- delegatecall --> la[Logic A] + la -- delegatecall --> lb[Logic B] + lb -. delegatecall .-> lx[Logic X] +``` + +### Special Considerations + +- functions on the master contract have the least gas overhead, therefore, our most price-sensitive functions live there +- functions on the master contract have first-class support from tools like etherscan and tenderly - functions that we (or users) call often to debug should live there +- etherscan supports executing logic contract functions that are once removed from the master - therefore we give secondary preference to the first logic contract for user and debugging functions +- functions on logic A through logic X (as of writing) have no support on etherscan and will essentially be "invisible" to everyone but advanced users - we will try to reserve this space for uncommon interactions that are mostly done progamatically +- We use Logic A, B, C... to avoid confusion with the version ex `AutomationRegistryLogicA2_2.sol` --> Logic Contract A verion 2.1 +- Storage locations for logic contract addresses MUST BE BYTECODE (this is done by marking them as "immutable") otherwise the chaining mechanism will break + +### Master Interface + +The Master Interface is a deduped combination of all the interfaces from all contracts in the chain. We generate this interface programatically using the script `generate-automation-master-interface.ts`. This process is not a hardened one. Users of this script should take great care to ensure it's efficacy. + +[size-limit-eip]: https://eips.ethereum.org/EIPS/eip-170 +[fallback]: https://docs.soliditylang.org/en/v0.8.12/contracts.html#fallback-function +[delegatecall]: https://docs.soliditylang.org/en/v0.8.12/introduction-to-smart-contracts.html?highlight=delegatecall#delegatecall-callcode-and-libraries +[oz-proxy]: https://docs.openzeppelin.com/contracts/4.x/api/proxy#Proxy diff --git a/contracts/src/v0.8/automation/interfaces/AutomationCompatibleInterface.sol b/contracts/src/v0.8/automation/interfaces/AutomationCompatibleInterface.sol new file mode 100644 index 00000000..a60e3f91 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/AutomationCompatibleInterface.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AutomationCompatibleInterface { + /** + * @notice method that is simulated by the keepers to see if any work actually + * needs to be performed. This method does does not actually need to be + * executable, and since it is only ever simulated it can consume lots of gas. + * @dev To ensure that it is never called, you may want to add the + * cannotExecute modifier from KeeperBase to your implementation of this + * method. + * @param checkData specified in the upkeep registration so it is always the + * same for a registered upkeep. This can easily be broken down into specific + * arguments using `abi.decode`, so multiple upkeeps can be registered on the + * same contract and easily differentiated by the contract. + * @return upkeepNeeded boolean to indicate whether the keeper should call + * performUpkeep or not. + * @return performData bytes that the keeper should call performUpkeep with, if + * upkeep is needed. If you would like to encode data to decode later, try + * `abi.encode`. + */ + function checkUpkeep(bytes calldata checkData) external returns (bool upkeepNeeded, bytes memory performData); + + /** + * @notice method that is actually executed by the keepers, via the registry. + * The data returned by the checkUpkeep simulation will be passed into + * this method to actually be executed. + * @dev The input to this method should not be trusted, and the caller of the + * method should not even be restricted to any single registry. Anyone should + * be able call it, and the input should be validated, there is no guarantee + * that the data passed in is the performData returned from checkUpkeep. This + * could happen due to malicious keepers, racing keepers, or simply a state + * change while the performUpkeep transaction is waiting for confirmation. + * Always validate the data passed in. + * @param performData is the data which was passed back from the checkData + * simulation. If it is encoded, it can easily be decoded into other types by + * calling `abi.decode`. This data should not be trusted, and should be + * validated against the contract's current state. + */ + function performUpkeep(bytes calldata performData) external; +} diff --git a/contracts/src/v0.8/automation/interfaces/IAutomationForwarder.sol b/contracts/src/v0.8/automation/interfaces/IAutomationForwarder.sol new file mode 100644 index 00000000..0a53de69 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/IAutomationForwarder.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; +import {IAutomationRegistryConsumer} from "./IAutomationRegistryConsumer.sol"; + +interface IAutomationForwarder is ITypeAndVersion { + function forward(uint256 gasAmount, bytes memory data) external returns (bool success, uint256 gasUsed); + + function updateRegistry(address newRegistry) external; + + function getRegistry() external view returns (IAutomationRegistryConsumer); + + function getTarget() external view returns (address); +} diff --git a/contracts/src/v0.8/automation/interfaces/IAutomationRegistryConsumer.sol b/contracts/src/v0.8/automation/interfaces/IAutomationRegistryConsumer.sol new file mode 100644 index 00000000..bea3e950 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/IAutomationRegistryConsumer.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +/** + * @notice IAutomationRegistryConsumer defines the LTS user-facing interface that we intend to maintain for + * across upgrades. As long as users use functions from within this interface, their upkeeps will retain + * backwards compatability across migrations. + * @dev Functions can be added to this interface, but not removed. + */ +interface IAutomationRegistryConsumer { + function getBalance(uint256 id) external view returns (uint96 balance); + + function getMinBalance(uint256 id) external view returns (uint96 minBalance); + + function cancelUpkeep(uint256 id) external; + + function pauseUpkeep(uint256 id) external; + + function unpauseUpkeep(uint256 id) external; + + function addFunds(uint256 id, uint96 amount) external; + + function withdrawFunds(uint256 id, address to) external; +} diff --git a/contracts/src/v0.8/automation/interfaces/ILogAutomation.sol b/contracts/src/v0.8/automation/interfaces/ILogAutomation.sol new file mode 100644 index 00000000..20a41f47 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/ILogAutomation.sol @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @member index the index of the log in the block. 0 for the first log + * @member timestamp the timestamp of the block containing the log + * @member txHash the hash of the transaction containing the log + * @member blockNumber the number of the block containing the log + * @member blockHash the hash of the block containing the log + * @member source the address of the contract that emitted the log + * @member topics the indexed topics of the log + * @member data the data of the log + */ +struct Log { + uint256 index; + uint256 timestamp; + bytes32 txHash; + uint256 blockNumber; + bytes32 blockHash; + address source; + bytes32[] topics; + bytes data; +} + +interface ILogAutomation { + /** + * @notice method that is simulated by the keepers to see if any work actually + * needs to be performed. This method does does not actually need to be + * executable, and since it is only ever simulated it can consume lots of gas. + * @dev To ensure that it is never called, you may want to add the + * cannotExecute modifier from KeeperBase to your implementation of this + * method. + * @param log the raw log data matching the filter that this contract has + * registered as a trigger + * @param checkData user-specified extra data to provide context to this upkeep + * @return upkeepNeeded boolean to indicate whether the keeper should call + * performUpkeep or not. + * @return performData bytes that the keeper should call performUpkeep with, if + * upkeep is needed. If you would like to encode data to decode later, try + * `abi.encode`. + */ + function checkLog( + Log calldata log, + bytes memory checkData + ) external returns (bool upkeepNeeded, bytes memory performData); + + /** + * @notice method that is actually executed by the keepers, via the registry. + * The data returned by the checkUpkeep simulation will be passed into + * this method to actually be executed. + * @dev The input to this method should not be trusted, and the caller of the + * method should not even be restricted to any single registry. Anyone should + * be able call it, and the input should be validated, there is no guarantee + * that the data passed in is the performData returned from checkUpkeep. This + * could happen due to malicious keepers, racing keepers, or simply a state + * change while the performUpkeep transaction is waiting for confirmation. + * Always validate the data passed in. + * @param performData is the data which was passed back from the checkData + * simulation. If it is encoded, it can easily be decoded into other types by + * calling `abi.decode`. This data should not be trusted, and should be + * validated against the contract's current state. + */ + function performUpkeep(bytes calldata performData) external; +} diff --git a/contracts/src/v0.8/automation/interfaces/KeeperCompatibleInterface.sol b/contracts/src/v0.8/automation/interfaces/KeeperCompatibleInterface.sol new file mode 100644 index 00000000..b5ba8196 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/KeeperCompatibleInterface.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +/** + * @notice This is a deprecated interface. Please use AutomationCompatibleInterface directly. + */ +pragma solidity ^0.8.0; +// solhint-disable-next-line no-unused-import +import {AutomationCompatibleInterface as KeeperCompatibleInterface} from "./AutomationCompatibleInterface.sol"; diff --git a/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterface.sol b/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterface.sol new file mode 100644 index 00000000..ca575267 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterface.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +import "../UpkeepFormat.sol"; + +interface MigratableKeeperRegistryInterface { + /** + * @notice Migrates upkeeps from one registry to another, including PLI and upkeep params. + * Only callable by the upkeep admin. All upkeeps must have the same admin. Can only migrate active upkeeps. + * @param upkeepIDs ids of upkeeps to migrate + * @param destination the address of the registry to migrate to + */ + function migrateUpkeeps(uint256[] calldata upkeepIDs, address destination) external; + + /** + * @notice Called by other registries when migrating upkeeps. Only callable by other registries. + * @param encodedUpkeeps abi encoding of upkeeps to import - decoded by the transcoder + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external; + + /** + * @notice Specifies the version of upkeep data that this registry requires in order to import + */ + function upkeepTranscoderVersion() external view returns (UpkeepFormat version); +} diff --git a/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterfaceV2.sol b/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterfaceV2.sol new file mode 100644 index 00000000..5813bf11 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/MigratableKeeperRegistryInterfaceV2.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +import "../UpkeepFormat.sol"; + +interface MigratableKeeperRegistryInterfaceV2 { + /** + * @notice Migrates upkeeps from one registry to another, including PLI and upkeep params. + * Only callable by the upkeep admin. All upkeeps must have the same admin. Can only migrate active upkeeps. + * @param upkeepIDs ids of upkeeps to migrate + * @param destination the address of the registry to migrate to + */ + function migrateUpkeeps(uint256[] calldata upkeepIDs, address destination) external; + + /** + * @notice Called by other registries when migrating upkeeps. Only callable by other registries. + * @param encodedUpkeeps abi encoding of upkeeps to import - decoded by the transcoder + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external; + + /** + * @notice Specifies the version of upkeep data that this registry requires in order to import + */ + function upkeepVersion() external view returns (uint8 version); +} diff --git a/contracts/src/v0.8/automation/interfaces/StreamsLookupCompatibleInterface.sol b/contracts/src/v0.8/automation/interfaces/StreamsLookupCompatibleInterface.sol new file mode 100644 index 00000000..cf8526a4 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/StreamsLookupCompatibleInterface.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface StreamsLookupCompatibleInterface { + error StreamsLookup(string feedParamKey, string[] feeds, string timeParamKey, uint256 time, bytes extraData); + + /** + * @notice any contract which wants to utilize StreamsLookup feature needs to + * implement this interface as well as the automation compatible interface. + * @param values an array of bytes returned from data streams endpoint. + * @param extraData context data from streams lookup process. + * @return upkeepNeeded boolean to indicate whether the keeper should call performUpkeep or not. + * @return performData bytes that the keeper should call performUpkeep with, if + * upkeep is needed. If you would like to encode data to decode later, try `abi.encode`. + */ + function checkCallback( + bytes[] memory values, + bytes memory extraData + ) external view returns (bool upkeepNeeded, bytes memory performData); +} diff --git a/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterface.sol b/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterface.sol new file mode 100644 index 00000000..aa0c3c6a --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterface.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT + +import "../UpkeepFormat.sol"; + +pragma solidity ^0.8.0; + +interface UpkeepTranscoderInterface { + function transcodeUpkeeps( + UpkeepFormat fromVersion, + UpkeepFormat toVersion, + bytes calldata encodedUpkeeps + ) external view returns (bytes memory); +} diff --git a/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterfaceV2.sol b/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterfaceV2.sol new file mode 100644 index 00000000..e02d0f67 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/UpkeepTranscoderInterfaceV2.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +interface UpkeepTranscoderInterfaceV2 { + function transcodeUpkeeps( + uint8 fromVersion, + uint8 toVersion, + bytes calldata encodedUpkeeps + ) external view returns (bytes memory); +} diff --git a/contracts/src/v0.8/automation/interfaces/v1_2/AutomationRegistryInterface1_2.sol b/contracts/src/v0.8/automation/interfaces/v1_2/AutomationRegistryInterface1_2.sol new file mode 100644 index 00000000..a2182869 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/v1_2/AutomationRegistryInterface1_2.sol @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @notice config of the registry + * @dev only used in params and return values + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member blockCountPerTurn number of blocks each oracle has during their turn to + * perform upkeep before it will be the next keeper's turn to submit + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max executeGas allowed for an upkeep on this registry + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrar address of the registrar contract + */ +struct Config { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address registrar; +} + +/** + * @notice state of the registry + * @dev only used in params and return values + * @member nonce used for ID generation + * @member ownerLinkBalance withdrawable balance of PLI by contract owner + * @member expectedLinkBalance the expected balance of PLI of the registry + * @member numUpkeeps total number of upkeeps on the registry + */ +struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint256 numUpkeeps; +} + +interface AutomationRegistryBaseInterface { + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external returns (uint256 id); + + function performUpkeep(uint256 id, bytes calldata performData) external returns (bool success); + + function cancelUpkeep(uint256 id) external; + + function addFunds(uint256 id, uint96 amount) external; + + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external; + + function getUpkeep( + uint256 id + ) + external + view + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber, + uint96 amountSpent + ); + + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + + function getKeeperInfo(address query) external view returns (address payee, bool active, uint96 balance); + + function getState() external view returns (State memory, Config memory, address[] memory); +} + +/** + * @dev The view methods are not actually marked as view in the implementation + * but we want them to be easily queried off-chain. Solidity will not compile + * if we actually inherit from this interface, so we document it here. + */ +interface AutomationRegistryInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId, + address from + ) + external + view + returns (bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, int256 gasWei, int256 linkEth); +} + +interface AutomationRegistryExecutableInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId, + address from + ) + external + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ); +} diff --git a/contracts/src/v0.8/automation/interfaces/v1_2/KeeperRegistryInterface1_2.sol b/contracts/src/v0.8/automation/interfaces/v1_2/KeeperRegistryInterface1_2.sol new file mode 100644 index 00000000..01f70ae9 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/v1_2/KeeperRegistryInterface1_2.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +/** + * @notice This is a deprecated interface. Please use AutomationRegistryInterface1_2 directly. + */ +pragma solidity ^0.8.0; +// solhint-disable-next-line no-unused-import +import {Config, State} from "./AutomationRegistryInterface1_2.sol"; +// solhint-disable-next-line no-unused-import +import {AutomationRegistryBaseInterface as KeeperRegistryBaseInterface} from "./AutomationRegistryInterface1_2.sol"; +// solhint-disable-next-line no-unused-import +import {AutomationRegistryInterface as KeeperRegistryInterface} from "./AutomationRegistryInterface1_2.sol"; +// solhint-disable-next-line no-unused-import +import {AutomationRegistryExecutableInterface as KeeperRegistryExecutableInterface} from "./AutomationRegistryInterface1_2.sol"; diff --git a/contracts/src/v0.8/automation/interfaces/v1_3/AutomationRegistryInterface1_3.sol b/contracts/src/v0.8/automation/interfaces/v1_3/AutomationRegistryInterface1_3.sol new file mode 100644 index 00000000..8b6fd76a --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/v1_3/AutomationRegistryInterface1_3.sol @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @notice config of the registry + * @dev only used in params and return values + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member blockCountPerTurn number of blocks each oracle has during their turn to + * perform upkeep before it will be the next keeper's turn to submit + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max executeGas allowed for an upkeep on this registry + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrar address of the registrar contract + */ +struct Config { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address registrar; +} + +/** + * @notice state of the registry + * @dev only used in params and return values + * @member nonce used for ID generation + * @member ownerLinkBalance withdrawable balance of PLI by contract owner + * @member expectedLinkBalance the expected balance of PLI of the registry + * @member numUpkeeps total number of upkeeps on the registry + */ +struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint256 numUpkeeps; +} + +/** + * @notice relevant state of an upkeep + * @member balance the balance of this upkeep + * @member lastKeeper the keeper which last performs the upkeep + * @member executeGas the gas limit of upkeep execution + * @member maxValidBlocknumber until which block this upkeep is valid + * @member target the contract which needs to be serviced + * @member amountSpent the amount this upkeep has spent + * @member admin the upkeep admin + * @member paused if this upkeep has been paused + */ +struct Upkeep { + uint96 balance; + address lastKeeper; // 1 full evm word + uint96 amountSpent; + address admin; // 2 full evm words + uint32 executeGas; + uint32 maxValidBlocknumber; + address target; + bool paused; // 24 bits to 3 full evm words +} + +interface AutomationRegistryBaseInterface { + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external returns (uint256 id); + + function performUpkeep(uint256 id, bytes calldata performData) external returns (bool success); + + function cancelUpkeep(uint256 id) external; + + function pauseUpkeep(uint256 id) external; + + function unpauseUpkeep(uint256 id) external; + + function transferUpkeepAdmin(uint256 id, address proposed) external; + + function acceptUpkeepAdmin(uint256 id) external; + + function updateCheckData(uint256 id, bytes calldata newCheckData) external; + + function addFunds(uint256 id, uint96 amount) external; + + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external; + + function getUpkeep( + uint256 id + ) + external + view + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber, + uint96 amountSpent, + bool paused + ); + + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + + function getKeeperInfo(address query) external view returns (address payee, bool active, uint96 balance); + + function getState() external view returns (State memory, Config memory, address[] memory); +} + +/** + * @dev The view methods are not actually marked as view in the implementation + * but we want them to be easily queried off-chain. Solidity will not compile + * if we actually inherit from this interface, so we document it here. + */ +interface AutomationRegistryInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId, + address from + ) + external + view + returns (bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, int256 gasWei, int256 linkEth); +} + +interface AutomationRegistryExecutableInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId, + address from + ) + external + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ); +} diff --git a/contracts/src/v0.8/automation/interfaces/v2_0/AutomationRegistryInterface2_0.sol b/contracts/src/v0.8/automation/interfaces/v2_0/AutomationRegistryInterface2_0.sol new file mode 100644 index 00000000..5098deef --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/v2_0/AutomationRegistryInterface2_0.sol @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @notice OnchainConfig of the registry + * @dev only used in params and return values + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max executeGas allowed for an upkeep on this registry + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrar address of the registrar contract + */ +struct OnchainConfig { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address registrar; +} + +/** + * @notice state of the registry + * @dev only used in params and return values + * @member nonce used for ID generation + * @member ownerLinkBalance withdrawable balance of PLI by contract owner + * @member expectedLinkBalance the expected balance of PLI of the registry + * @member totalPremium the total premium collected on registry so far + * @member numUpkeeps total number of upkeeps on the registry + * @member configCount ordinal number of current config, out of all configs applied to this contract so far + * @member latestConfigBlockNumber last block at which this config was set + * @member latestConfigDigest domain-separation tag for current config + * @member latestEpoch for which a report was transmitted + * @member paused freeze on execution scoped to the entire registry + */ +struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint96 totalPremium; + uint256 numUpkeeps; + uint32 configCount; + uint32 latestConfigBlockNumber; + bytes32 latestConfigDigest; + uint32 latestEpoch; + bool paused; +} + +/** + * @notice all information about an upkeep + * @dev only used in return values + * @member target the contract which needs to be serviced + * @member executeGas the gas limit of upkeep execution + * @member checkData the checkData bytes for this upkeep + * @member balance the balance of this upkeep + * @member admin for this upkeep + * @member maxValidBlocknumber until which block this upkeep is valid + * @member lastPerformBlockNumber the last block number when this upkeep was performed + * @member amountSpent the amount this upkeep has spent + * @member paused if this upkeep has been paused + * @member skipSigVerification skip signature verification in transmit for a low security low cost model + */ +struct UpkeepInfo { + address target; + uint32 executeGas; + bytes checkData; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + uint32 lastPerformBlockNumber; + uint96 amountSpent; + bool paused; + bytes offchainConfig; +} + +enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE +} + +interface AutomationRegistryBaseInterface { + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData, + bytes calldata offchainConfig + ) external returns (uint256 id); + + function cancelUpkeep(uint256 id) external; + + function pauseUpkeep(uint256 id) external; + + function unpauseUpkeep(uint256 id) external; + + function transferUpkeepAdmin(uint256 id, address proposed) external; + + function acceptUpkeepAdmin(uint256 id) external; + + function updateCheckData(uint256 id, bytes calldata newCheckData) external; + + function addFunds(uint256 id, uint96 amount) external; + + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external; + + function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external; + + function getUpkeep(uint256 id) external view returns (UpkeepInfo memory upkeepInfo); + + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + + function getTransmitterInfo( + address query + ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee); + + function getState() + external + view + returns ( + State memory state, + OnchainConfig memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ); +} + +/** + * @dev The view methods are not actually marked as view in the implementation + * but we want them to be easily queried off-chain. Solidity will not compile + * if we actually inherit from this interface, so we document it here. + */ +interface AutomationRegistryInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId + ) + external + view + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 fastGasWei, + uint256 linkNative + ); +} + +interface AutomationRegistryExecutableInterface is AutomationRegistryBaseInterface { + function checkUpkeep( + uint256 upkeepId + ) + external + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 fastGasWei, + uint256 linkNative + ); +} diff --git a/contracts/src/v0.8/automation/interfaces/v2_1/IKeeperRegistryMaster.sol b/contracts/src/v0.8/automation/interfaces/v2_1/IKeeperRegistryMaster.sol new file mode 100644 index 00000000..9f673f76 --- /dev/null +++ b/contracts/src/v0.8/automation/interfaces/v2_1/IKeeperRegistryMaster.sol @@ -0,0 +1,384 @@ +// abi-checksum: 0x0ed34e4b36bd7b4a5447152c2d61491e6ba7ed944b11e4dfef4fea184708975e +// SPDX-License-Identifier: MIT +// !! THIS FILE WAS AUTOGENERATED BY abi-to-sol v0.6.6. SEE SOURCE BELOW. !! +pragma solidity ^0.8.4; + +interface IKeeperRegistryMaster { + error ArrayHasNoEntries(); + error CannotCancel(); + error CheckDataExceedsLimit(); + error ConfigDigestMismatch(); + error DuplicateEntry(); + error DuplicateSigners(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IncorrectNumberOfFaultyOracles(); + error IncorrectNumberOfSignatures(); + error IncorrectNumberOfSigners(); + error IndexOutOfRange(); + error InsufficientFunds(); + error InvalidDataLength(); + error InvalidPayee(); + error InvalidRecipient(); + error InvalidReport(); + error InvalidSigner(); + error InvalidTransmitter(); + error InvalidTrigger(); + error InvalidTriggerType(); + error MaxCheckDataSizeCanOnlyIncrease(); + error MaxPerformDataSizeCanOnlyIncrease(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveSigners(); + error OnlyActiveTransmitters(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyCallableByUpkeepPrivilegeManager(); + error OnlyPausedUpkeep(); + error OnlySimulatedBackend(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error ReentrantCall(); + error RegistryPaused(); + error RepeatedSigner(); + error RepeatedTransmitter(); + error TargetCheckReverted(bytes reason); + error TooManyOracles(); + error TranscoderNotSet(); + error UpkeepAlreadyExists(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig); + event CancelledUpkeepReport(uint256 indexed id, bytes trigger); + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + event DedupKeyAdded(bytes32 indexed dedupKey); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event InsufficientFundsUpkeepReport(uint256 indexed id, bytes trigger); + event OwnerFundsWithdrawn(uint96 amount); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event Paused(address account); + event PayeesUpdated(address[] transmitters, address[] payees); + event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee); + event ReorgedUpkeepReport(uint256 indexed id, bytes trigger); + event StaleUpkeepReport(uint256 indexed id, bytes trigger); + event Transmitted(bytes32 configDigest, uint32 epoch); + event Unpaused(address account); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataSet(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + uint96 totalPayment, + uint256 gasUsed, + uint256 gasOverhead, + bytes trigger + ); + event UpkeepPrivilegeConfigSet(uint256 indexed id, bytes privilegeConfig); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepRegistered(uint256 indexed id, uint32 performGas, address admin); + event UpkeepTriggerConfigSet(uint256 indexed id, bytes triggerConfig); + event UpkeepUnpaused(uint256 indexed id); + + fallback() external; + + function acceptOwnership() external; + + function fallbackTo() external view returns (address); + + function latestConfigDetails() external view returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + + function latestConfigDigestAndEpoch() external view returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + function onTokenTransfer(address sender, uint256 amount, bytes memory data) external; + + function owner() external view returns (address); + + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfigBytes, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external; + + function setConfigTypeSafe( + address[] memory signers, + address[] memory transmitters, + uint8 f, + KeeperRegistryBase2_1.OnchainConfig memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external; + + function simulatePerformUpkeep( + uint256 id, + bytes memory performData + ) external view returns (bool success, uint256 gasUsed); + + function transferOwnership(address to) external; + + function transmit( + bytes32[3] memory reportContext, + bytes memory rawReport, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs + ) external; + + function typeAndVersion() external view returns (string memory); + + function addFunds(uint256 id, uint96 amount) external; + + function cancelUpkeep(uint256 id) external; + + function checkCallback( + uint256 id, + bytes[] memory values, + bytes memory extraData + ) external view returns (bool upkeepNeeded, bytes memory performData, uint8 upkeepFailureReason, uint256 gasUsed); + + function checkUpkeep( + uint256 id, + bytes memory triggerData + ) + external + view + returns ( + bool upkeepNeeded, + bytes memory performData, + uint8 upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ); + + function checkUpkeep( + uint256 id + ) + external + view + returns ( + bool upkeepNeeded, + bytes memory performData, + uint8 upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ); + + function executeCallback( + uint256 id, + bytes memory payload + ) external returns (bool upkeepNeeded, bytes memory performData, uint8 upkeepFailureReason, uint256 gasUsed); + + function migrateUpkeeps(uint256[] memory ids, address destination) external; + + function receiveUpkeeps(bytes memory encodedUpkeeps) external; + + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + uint8 triggerType, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) external returns (uint256 id); + + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes memory checkData, + bytes memory offchainConfig + ) external returns (uint256 id); + + function setUpkeepTriggerConfig(uint256 id, bytes memory triggerConfig) external; + + function acceptPayeeship(address transmitter) external; + + function acceptUpkeepAdmin(uint256 id) external; + + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + + function getAdminPrivilegeConfig(address admin) external view returns (bytes memory); + + function getAutomationForwarderLogic() external view returns (address); + + function getBalance(uint256 id) external view returns (uint96 balance); + + function getCancellationDelay() external pure returns (uint256); + + function getConditionalGasOverhead() external pure returns (uint256); + + function getFastGasFeedAddress() external view returns (address); + + function getForwarder(uint256 upkeepID) external view returns (address); + + function getLinkAddress() external view returns (address); + + function getLinkNativeFeedAddress() external view returns (address); + + function getLogGasOverhead() external pure returns (uint256); + + function getMaxPaymentForGas(uint8 triggerType, uint32 gasLimit) external view returns (uint96 maxPayment); + + function getMinBalance(uint256 id) external view returns (uint96); + + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance); + + function getMode() external view returns (uint8); + + function getPeerRegistryMigrationPermission(address peer) external view returns (uint8); + + function getPerPerformByteGasOverhead() external pure returns (uint256); + + function getPerSignerGasOverhead() external pure returns (uint256); + + function getSignerInfo(address query) external view returns (bool active, uint8 index); + + function getState() + external + view + returns ( + KeeperRegistryBase2_1.State memory state, + KeeperRegistryBase2_1.OnchainConfig memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ); + + function getTransmitterInfo( + address query + ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee); + + function getTriggerType(uint256 upkeepId) external pure returns (uint8); + + function getUpkeep(uint256 id) external view returns (KeeperRegistryBase2_1.UpkeepInfo memory upkeepInfo); + + function getUpkeepPrivilegeConfig(uint256 upkeepId) external view returns (bytes memory); + + function getUpkeepTriggerConfig(uint256 upkeepId) external view returns (bytes memory); + + function hasDedupKey(bytes32 dedupKey) external view returns (bool); + + function pause() external; + + function pauseUpkeep(uint256 id) external; + + function recoverFunds() external; + + function setAdminPrivilegeConfig(address admin, bytes memory newPrivilegeConfig) external; + + function setPayees(address[] memory payees) external; + + function setPeerRegistryMigrationPermission(address peer, uint8 permission) external; + + function setUpkeepCheckData(uint256 id, bytes memory newCheckData) external; + + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external; + + function setUpkeepOffchainConfig(uint256 id, bytes memory config) external; + + function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes memory newPrivilegeConfig) external; + + function transferPayeeship(address transmitter, address proposed) external; + + function transferUpkeepAdmin(uint256 id, address proposed) external; + + function unpause() external; + + function unpauseUpkeep(uint256 id) external; + + function upkeepTranscoderVersion() external pure returns (uint8); + + function upkeepVersion() external pure returns (uint8); + + function withdrawFunds(uint256 id, address to) external; + + function withdrawOwnerFunds() external; + + function withdrawPayment(address from, address to) external; +} + +interface KeeperRegistryBase2_1 { + struct OnchainConfig { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + } + + struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint96 totalPremium; + uint256 numUpkeeps; + uint32 configCount; + uint32 latestConfigBlockNumber; + bytes32 latestConfigDigest; + uint32 latestEpoch; + bool paused; + } + + struct UpkeepInfo { + address target; + uint32 performGas; + bytes checkData; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + uint32 lastPerformedBlockNumber; + uint96 amountSpent; + bool paused; + bytes offchainConfig; + } +} + +// THIS FILE WAS AUTOGENERATED FROM THE FOLLOWING ABI JSON: +/* +[{"inputs":[{"internalType":"contract KeeperRegistryLogicB2_1","name":"logicA","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"ArrayHasNoEntries","type":"error"},{"inputs":[],"name":"CannotCancel","type":"error"},{"inputs":[],"name":"CheckDataExceedsLimit","type":"error"},{"inputs":[],"name":"ConfigDigestMismatch","type":"error"},{"inputs":[],"name":"DuplicateEntry","type":"error"},{"inputs":[],"name":"DuplicateSigners","type":"error"},{"inputs":[],"name":"GasLimitCanOnlyIncrease","type":"error"},{"inputs":[],"name":"GasLimitOutsideRange","type":"error"},{"inputs":[],"name":"IncorrectNumberOfFaultyOracles","type":"error"},{"inputs":[],"name":"IncorrectNumberOfSignatures","type":"error"},{"inputs":[],"name":"IncorrectNumberOfSigners","type":"error"},{"inputs":[],"name":"IndexOutOfRange","type":"error"},{"inputs":[],"name":"InsufficientFunds","type":"error"},{"inputs":[],"name":"InvalidDataLength","type":"error"},{"inputs":[],"name":"InvalidPayee","type":"error"},{"inputs":[],"name":"InvalidRecipient","type":"error"},{"inputs":[],"name":"InvalidReport","type":"error"},{"inputs":[],"name":"InvalidSigner","type":"error"},{"inputs":[],"name":"InvalidTransmitter","type":"error"},{"inputs":[],"name":"InvalidTrigger","type":"error"},{"inputs":[],"name":"InvalidTriggerType","type":"error"},{"inputs":[],"name":"MaxCheckDataSizeCanOnlyIncrease","type":"error"},{"inputs":[],"name":"MaxPerformDataSizeCanOnlyIncrease","type":"error"},{"inputs":[],"name":"MigrationNotPermitted","type":"error"},{"inputs":[],"name":"NotAContract","type":"error"},{"inputs":[],"name":"OnlyActiveSigners","type":"error"},{"inputs":[],"name":"OnlyActiveTransmitters","type":"error"},{"inputs":[],"name":"OnlyCallableByAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByPLIToken","type":"error"},{"inputs":[],"name":"OnlyCallableByOwnerOrAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByOwnerOrRegistrar","type":"error"},{"inputs":[],"name":"OnlyCallableByPayee","type":"error"},{"inputs":[],"name":"OnlyCallableByProposedAdmin","type":"error"},{"inputs":[],"name":"OnlyCallableByProposedPayee","type":"error"},{"inputs":[],"name":"OnlyCallableByUpkeepPrivilegeManager","type":"error"},{"inputs":[],"name":"OnlyPausedUpkeep","type":"error"},{"inputs":[],"name":"OnlySimulatedBackend","type":"error"},{"inputs":[],"name":"OnlyUnpausedUpkeep","type":"error"},{"inputs":[],"name":"ParameterLengthError","type":"error"},{"inputs":[],"name":"PaymentGreaterThanAllPLI","type":"error"},{"inputs":[],"name":"ReentrantCall","type":"error"},{"inputs":[],"name":"RegistryPaused","type":"error"},{"inputs":[],"name":"RepeatedSigner","type":"error"},{"inputs":[],"name":"RepeatedTransmitter","type":"error"},{"inputs":[{"internalType":"bytes","name":"reason","type":"bytes"}],"name":"TargetCheckReverted","type":"error"},{"inputs":[],"name":"TooManyOracles","type":"error"},{"inputs":[],"name":"TranscoderNotSet","type":"error"},{"inputs":[],"name":"UpkeepAlreadyExists","type":"error"},{"inputs":[],"name":"UpkeepCancelled","type":"error"},{"inputs":[],"name":"UpkeepNotCanceled","type":"error"},{"inputs":[],"name":"UpkeepNotNeeded","type":"error"},{"inputs":[],"name":"ValueNotChanged","type":"error"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"admin","type":"address"},{"indexed":false,"internalType":"bytes","name":"privilegeConfig","type":"bytes"}],"name":"AdminPrivilegeConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"CancelledUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"previousConfigBlockNumber","type":"uint32"},{"indexed":false,"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"indexed":false,"internalType":"uint64","name":"configCount","type":"uint64"},{"indexed":false,"internalType":"address[]","name":"signers","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"uint8","name":"f","type":"uint8"},{"indexed":false,"internalType":"bytes","name":"onchainConfig","type":"bytes"},{"indexed":false,"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"indexed":false,"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"ConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"dedupKey","type":"bytes32"}],"name":"DedupKeyAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":false,"internalType":"uint96","name":"amount","type":"uint96"}],"name":"FundsAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":false,"internalType":"address","name":"to","type":"address"}],"name":"FundsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"InsufficientFundsUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint96","name":"amount","type":"uint96"}],"name":"OwnerFundsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Paused","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"payees","type":"address[]"}],"name":"PayeesUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"PayeeshipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"PayeeshipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"address","name":"payee","type":"address"}],"name":"PaymentWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"ReorgedUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"StaleUpkeepReport","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"}],"name":"Transmitted","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Unpaused","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"UpkeepAdminTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"UpkeepAdminTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"uint64","name":"atBlockHeight","type":"uint64"}],"name":"UpkeepCanceled","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"newCheckData","type":"bytes"}],"name":"UpkeepCheckDataSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint96","name":"gasLimit","type":"uint96"}],"name":"UpkeepGasLimitSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"remainingBalance","type":"uint256"},{"indexed":false,"internalType":"address","name":"destination","type":"address"}],"name":"UpkeepMigrated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"UpkeepOffchainConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"}],"name":"UpkeepPaused","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":true,"internalType":"bool","name":"success","type":"bool"},{"indexed":false,"internalType":"uint96","name":"totalPayment","type":"uint96"},{"indexed":false,"internalType":"uint256","name":"gasUsed","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"gasOverhead","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"trigger","type":"bytes"}],"name":"UpkeepPerformed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"privilegeConfig","type":"bytes"}],"name":"UpkeepPrivilegeConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"startingBalance","type":"uint256"},{"indexed":false,"internalType":"address","name":"importedFrom","type":"address"}],"name":"UpkeepReceived","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"uint32","name":"performGas","type":"uint32"},{"indexed":false,"internalType":"address","name":"admin","type":"address"}],"name":"UpkeepRegistered","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"},{"indexed":false,"internalType":"bytes","name":"triggerConfig","type":"bytes"}],"name":"UpkeepTriggerConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"id","type":"uint256"}],"name":"UpkeepUnpaused","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"fallbackTo","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDetails","outputs":[{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"blockNumber","type":"uint32"},{"internalType":"bytes32","name":"configDigest","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDigestAndEpoch","outputs":[{"internalType":"bool","name":"scanLogs","type":"bool"},{"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"internalType":"uint32","name":"epoch","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"onTokenTransfer","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"},{"internalType":"bytes","name":"onchainConfigBytes","type":"bytes"},{"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"setConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"},{"components":[{"internalType":"uint32","name":"paymentPremiumPPB","type":"uint32"},{"internalType":"uint32","name":"flatFeeMicroLink","type":"uint32"},{"internalType":"uint32","name":"checkGasLimit","type":"uint32"},{"internalType":"uint24","name":"stalenessSeconds","type":"uint24"},{"internalType":"uint16","name":"gasCeilingMultiplier","type":"uint16"},{"internalType":"uint96","name":"minUpkeepSpend","type":"uint96"},{"internalType":"uint32","name":"maxPerformGas","type":"uint32"},{"internalType":"uint32","name":"maxCheckDataSize","type":"uint32"},{"internalType":"uint32","name":"maxPerformDataSize","type":"uint32"},{"internalType":"uint32","name":"maxRevertDataSize","type":"uint32"},{"internalType":"uint256","name":"fallbackGasPrice","type":"uint256"},{"internalType":"uint256","name":"fallbackLinkPrice","type":"uint256"},{"internalType":"address","name":"transcoder","type":"address"},{"internalType":"address[]","name":"registrars","type":"address[]"},{"internalType":"address","name":"upkeepPrivilegeManager","type":"address"}],"internalType":"struct KeeperRegistryBase2_1.OnchainConfig","name":"onchainConfig","type":"tuple"},{"internalType":"uint64","name":"offchainConfigVersion","type":"uint64"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"setConfigTypeSafe","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"performData","type":"bytes"}],"name":"simulatePerformUpkeep","outputs":[{"internalType":"bool","name":"success","type":"bool"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32[3]","name":"reportContext","type":"bytes32[3]"},{"internalType":"bytes","name":"rawReport","type":"bytes"},{"internalType":"bytes32[]","name":"rs","type":"bytes32[]"},{"internalType":"bytes32[]","name":"ss","type":"bytes32[]"},{"internalType":"bytes32","name":"rawVs","type":"bytes32"}],"name":"transmit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"typeAndVersion","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"contract KeeperRegistryLogicB2_1","name":"logicB","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint96","name":"amount","type":"uint96"}],"name":"addFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"cancelUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes[]","name":"values","type":"bytes[]"},{"internalType":"bytes","name":"extraData","type":"bytes"}],"name":"checkCallback","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum KeeperRegistryBase2_1.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"triggerData","type":"bytes"}],"name":"checkUpkeep","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum KeeperRegistryBase2_1.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"uint256","name":"fastGasWei","type":"uint256"},{"internalType":"uint256","name":"linkNative","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"checkUpkeep","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum KeeperRegistryBase2_1.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"},{"internalType":"uint256","name":"gasLimit","type":"uint256"},{"internalType":"uint256","name":"fastGasWei","type":"uint256"},{"internalType":"uint256","name":"linkNative","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"payload","type":"bytes"}],"name":"executeCallback","outputs":[{"internalType":"bool","name":"upkeepNeeded","type":"bool"},{"internalType":"bytes","name":"performData","type":"bytes"},{"internalType":"enum KeeperRegistryBase2_1.UpkeepFailureReason","name":"upkeepFailureReason","type":"uint8"},{"internalType":"uint256","name":"gasUsed","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256[]","name":"ids","type":"uint256[]"},{"internalType":"address","name":"destination","type":"address"}],"name":"migrateUpkeeps","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"encodedUpkeeps","type":"bytes"}],"name":"receiveUpkeeps","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"gasLimit","type":"uint32"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"enum KeeperRegistryBase2_1.Trigger","name":"triggerType","type":"uint8"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"bytes","name":"triggerConfig","type":"bytes"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"registerUpkeep","outputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"gasLimit","type":"uint32"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"name":"registerUpkeep","outputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"triggerConfig","type":"bytes"}],"name":"setUpkeepTriggerConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"enum KeeperRegistryBase2_1.Mode","name":"mode","type":"uint8"},{"internalType":"address","name":"link","type":"address"},{"internalType":"address","name":"linkNativeFeed","type":"address"},{"internalType":"address","name":"fastGasFeed","type":"address"},{"internalType":"address","name":"automationForwarderLogic","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"transmitter","type":"address"}],"name":"acceptPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"acceptUpkeepAdmin","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"startIndex","type":"uint256"},{"internalType":"uint256","name":"maxCount","type":"uint256"}],"name":"getActiveUpkeepIDs","outputs":[{"internalType":"uint256[]","name":"","type":"uint256[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"admin","type":"address"}],"name":"getAdminPrivilegeConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getAutomationForwarderLogic","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getBalance","outputs":[{"internalType":"uint96","name":"balance","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getCancellationDelay","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getConditionalGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getFastGasFeedAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepID","type":"uint256"}],"name":"getForwarder","outputs":[{"internalType":"contract IAutomationForwarder","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLinkAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLinkNativeFeedAddress","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getLogGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"enum KeeperRegistryBase2_1.Trigger","name":"triggerType","type":"uint8"},{"internalType":"uint32","name":"gasLimit","type":"uint32"}],"name":"getMaxPaymentForGas","outputs":[{"internalType":"uint96","name":"maxPayment","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getMinBalance","outputs":[{"internalType":"uint96","name":"","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getMinBalanceForUpkeep","outputs":[{"internalType":"uint96","name":"minBalance","type":"uint96"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getMode","outputs":[{"internalType":"enum KeeperRegistryBase2_1.Mode","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"peer","type":"address"}],"name":"getPeerRegistryMigrationPermission","outputs":[{"internalType":"enum KeeperRegistryBase2_1.MigrationPermission","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getPerPerformByteGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"getPerSignerGasOverhead","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"address","name":"query","type":"address"}],"name":"getSignerInfo","outputs":[{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint8","name":"index","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getState","outputs":[{"components":[{"internalType":"uint32","name":"nonce","type":"uint32"},{"internalType":"uint96","name":"ownerLinkBalance","type":"uint96"},{"internalType":"uint256","name":"expectedLinkBalance","type":"uint256"},{"internalType":"uint96","name":"totalPremium","type":"uint96"},{"internalType":"uint256","name":"numUpkeeps","type":"uint256"},{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"latestConfigBlockNumber","type":"uint32"},{"internalType":"bytes32","name":"latestConfigDigest","type":"bytes32"},{"internalType":"uint32","name":"latestEpoch","type":"uint32"},{"internalType":"bool","name":"paused","type":"bool"}],"internalType":"struct KeeperRegistryBase2_1.State","name":"state","type":"tuple"},{"components":[{"internalType":"uint32","name":"paymentPremiumPPB","type":"uint32"},{"internalType":"uint32","name":"flatFeeMicroLink","type":"uint32"},{"internalType":"uint32","name":"checkGasLimit","type":"uint32"},{"internalType":"uint24","name":"stalenessSeconds","type":"uint24"},{"internalType":"uint16","name":"gasCeilingMultiplier","type":"uint16"},{"internalType":"uint96","name":"minUpkeepSpend","type":"uint96"},{"internalType":"uint32","name":"maxPerformGas","type":"uint32"},{"internalType":"uint32","name":"maxCheckDataSize","type":"uint32"},{"internalType":"uint32","name":"maxPerformDataSize","type":"uint32"},{"internalType":"uint32","name":"maxRevertDataSize","type":"uint32"},{"internalType":"uint256","name":"fallbackGasPrice","type":"uint256"},{"internalType":"uint256","name":"fallbackLinkPrice","type":"uint256"},{"internalType":"address","name":"transcoder","type":"address"},{"internalType":"address[]","name":"registrars","type":"address[]"},{"internalType":"address","name":"upkeepPrivilegeManager","type":"address"}],"internalType":"struct KeeperRegistryBase2_1.OnchainConfig","name":"config","type":"tuple"},{"internalType":"address[]","name":"signers","type":"address[]"},{"internalType":"address[]","name":"transmitters","type":"address[]"},{"internalType":"uint8","name":"f","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"query","type":"address"}],"name":"getTransmitterInfo","outputs":[{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint8","name":"index","type":"uint8"},{"internalType":"uint96","name":"balance","type":"uint96"},{"internalType":"uint96","name":"lastCollected","type":"uint96"},{"internalType":"address","name":"payee","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getTriggerType","outputs":[{"internalType":"enum KeeperRegistryBase2_1.Trigger","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"getUpkeep","outputs":[{"components":[{"internalType":"address","name":"target","type":"address"},{"internalType":"uint32","name":"performGas","type":"uint32"},{"internalType":"bytes","name":"checkData","type":"bytes"},{"internalType":"uint96","name":"balance","type":"uint96"},{"internalType":"address","name":"admin","type":"address"},{"internalType":"uint64","name":"maxValidBlocknumber","type":"uint64"},{"internalType":"uint32","name":"lastPerformedBlockNumber","type":"uint32"},{"internalType":"uint96","name":"amountSpent","type":"uint96"},{"internalType":"bool","name":"paused","type":"bool"},{"internalType":"bytes","name":"offchainConfig","type":"bytes"}],"internalType":"struct KeeperRegistryBase2_1.UpkeepInfo","name":"upkeepInfo","type":"tuple"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getUpkeepPrivilegeConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"}],"name":"getUpkeepTriggerConfig","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"dedupKey","type":"bytes32"}],"name":"hasDedupKey","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"pauseUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"recoverFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"admin","type":"address"},{"internalType":"bytes","name":"newPrivilegeConfig","type":"bytes"}],"name":"setAdminPrivilegeConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"payees","type":"address[]"}],"name":"setPayees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"peer","type":"address"},{"internalType":"enum KeeperRegistryBase2_1.MigrationPermission","name":"permission","type":"uint8"}],"name":"setPeerRegistryMigrationPermission","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"newCheckData","type":"bytes"}],"name":"setUpkeepCheckData","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"uint32","name":"gasLimit","type":"uint32"}],"name":"setUpkeepGasLimit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"bytes","name":"config","type":"bytes"}],"name":"setUpkeepOffchainConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"upkeepId","type":"uint256"},{"internalType":"bytes","name":"newPrivilegeConfig","type":"bytes"}],"name":"setUpkeepPrivilegeConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"transmitter","type":"address"},{"internalType":"address","name":"proposed","type":"address"}],"name":"transferPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"proposed","type":"address"}],"name":"transferUpkeepAdmin","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"}],"name":"unpauseUpkeep","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"upkeepTranscoderVersion","outputs":[{"internalType":"enum UpkeepFormat","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"upkeepVersion","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"to","type":"address"}],"name":"withdrawFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"withdrawOwnerFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"}],"name":"withdrawPayment","outputs":[],"stateMutability":"nonpayable","type":"function"}] +*/ diff --git a/contracts/src/v0.8/automation/libraries/external/Cron.sol b/contracts/src/v0.8/automation/libraries/external/Cron.sol new file mode 100644 index 00000000..0c93c19e --- /dev/null +++ b/contracts/src/v0.8/automation/libraries/external/Cron.sol @@ -0,0 +1,78 @@ +pragma solidity 0.8.6; + +import {Cron as CronInternal, Spec} from "../internal/Cron.sol"; + +/** + * @title The Cron library + * @notice A utility contract for encoding/decoding cron strings (ex: 0 0 * * *) into an + * abstraction called a Spec. The library also includes a spec function, nextTick(), which + * determines the next time a cron job should fire based on the current block timestamp. + * @dev this is the external version of the library, which relies on the internal library + * by the same name. + */ +library Cron { + using CronInternal for Spec; + using CronInternal for string; + + /** + * @notice nextTick calculates the next datetime that a spec "ticks", starting + * from the current block timestamp. This is gas-intensive and therefore should + * only be called off-chain. + * @param spec the spec to evaluate + * @return the next tick + */ + function nextTick(Spec calldata spec) public view returns (uint256) { + return spec.nextTick(); + } + + /** + * @notice lastTick calculates the previous datetime that a spec "ticks", starting + * from the current block timestamp. This is gas-intensive and therefore should + * only be called off-chain. + * @param spec the spec to evaluate + * @return the next tick + */ + function lastTick(Spec calldata spec) public view returns (uint256) { + return spec.lastTick(); + } + + /** + * @notice matches evaluates whether or not a spec "ticks" at a given timestamp + * @param spec the spec to evaluate + * @param timestamp the timestamp to compare against + * @return true / false if they match + */ + function matches(Spec calldata spec, uint256 timestamp) public view returns (bool) { + return spec.matches(timestamp); + } + + /** + * @notice toSpec converts a cron string to a spec struct. This is gas-intensive + * and therefore should only be called off-chain. + * @param cronString the cron string + * @return the spec struct + */ + function toSpec(string calldata cronString) public pure returns (Spec memory) { + return cronString.toSpec(); + } + + /** + * @notice toEncodedSpec converts a cron string to an abi-encoded spec. This is gas-intensive + * and therefore should only be called off-chain. + * @param cronString the cron string + * @return the abi-encoded spec + */ + function toEncodedSpec(string calldata cronString) public pure returns (bytes memory) { + return cronString.toEncodedSpec(); + } + + /** + * @notice toCronString converts a cron spec to a human-readable cron string. This is gas-intensive + * and therefore should only be called off-chain. + * @param spec the cron spec + * @return the corresponding cron string + */ + function toCronString(Spec calldata spec) public pure returns (string memory) { + return spec.toCronString(); + } +} diff --git a/contracts/src/v0.8/automation/libraries/internal/Cron.sol b/contracts/src/v0.8/automation/libraries/internal/Cron.sol new file mode 100644 index 00000000..8804473b --- /dev/null +++ b/contracts/src/v0.8/automation/libraries/internal/Cron.sol @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: MIT + +/* + The Cron contract serves two primary functions: + * parsing cron-formatted strings like "0 0 * * *" into + structs called "Specs" + * computing the "next tick" of a cron spec + + Because manipulating strings is gas-expensive in solidity, + the intended use of this contract is for users to first convert + their cron strings to encoded Spec structs via toEncodedSpec(). + Then, the user stores the Spec on chain. Finally, users use the nextTick(), + function to determine the datetime of the next cron job run. + + Cron jobs are interpreted according to this format: + + ┌───────────── minute (0 - 59) + │ ┌───────────── hour (0 - 23) + │ │ ┌───────────── day of the month (1 - 31) + │ │ │ ┌───────────── month (1 - 12) + │ │ │ │ ┌───────────── day of the week (0 - 6) (Monday to Sunday) + │ │ │ │ │ + │ │ │ │ │ + │ │ │ │ │ + * * * * * + + Special limitations: + * there is no year field + * no special characters: ? L W # + * lists can have a max length of 26 + * no words like JAN / FEB or MON / TUES +*/ + +pragma solidity 0.8.6; + +import "../../../vendor/Strings.sol"; +import "../../../vendor/DateTime.sol"; + +// The fields of a cron spec, by name +string constant MINUTE = "minute"; +string constant HOUR = "hour"; +string constant DAY = "day"; +string constant MONTH = "month"; +string constant DAY_OF_WEEK = "day of week"; + +error UnknownFieldType(); +error InvalidSpec(string reason); +error InvalidField(string field, string reason); +error ListTooLarge(); + +// Set of enums representing a cron field type +enum FieldType { + WILD, + EXACT, + INTERVAL, + RANGE, + LIST +} + +// A spec represents a cron job by decomposing it into 5 fields +struct Spec { + Field minute; + Field hour; + Field day; + Field month; + Field dayOfWeek; +} + +// A field represents a single element in a cron spec. There are 5 types +// of fields (see above). Not all properties of this struct are present at once. +struct Field { + FieldType fieldType; + uint8 singleValue; + uint8 interval; + uint8 rangeStart; + uint8 rangeEnd; + uint8 listLength; + uint8[26] list; +} + +/** + * @title The Cron library + * @notice A utility contract for encoding/decoding cron strings (ex: 0 0 * * *) into an + * abstraction called a Spec. The library also includes a spec function, nextTick(), which + * determines the next time a cron job should fire based on the current block timestamp. + */ +// solhint-disable plugin-solidity/prefix-internal-functions-with-underscore, no-global-import +library Cron { + using strings for *; + + /** + * @notice nextTick calculates the next datetime that a spec "ticks", starting + * from the current block timestamp. This is gas-intensive and therefore should + * only be called off-chain. + * @param spec the spec to evaluate + * @return the next tick + * @dev this is the internal version of the library. There is also an external version. + */ + function nextTick(Spec memory spec) internal view returns (uint256) { + uint16 year = DateTime.getYear(block.timestamp); + uint8 month = DateTime.getMonth(block.timestamp); + uint8 day = DateTime.getDay(block.timestamp); + uint8 hour = DateTime.getHour(block.timestamp); + uint8 minute = DateTime.getMinute(block.timestamp); + uint8 dayOfWeek; + for (; true; year++) { + for (; month <= 12; month++) { + if (!matches(spec.month, month)) { + day = 1; + hour = 0; + minute = 0; + continue; + } + uint8 maxDay = DateTime.getDaysInMonth(month, year); + for (; day <= maxDay; day++) { + if (!matches(spec.day, day)) { + hour = 0; + minute = 0; + continue; + } + dayOfWeek = DateTime.getWeekday(DateTime.toTimestamp(year, month, day)); + if (!matches(spec.dayOfWeek, dayOfWeek)) { + hour = 0; + minute = 0; + continue; + } + for (; hour < 24; hour++) { + if (!matches(spec.hour, hour)) { + minute = 0; + continue; + } + for (; minute < 60; minute++) { + if (!matches(spec.minute, minute)) { + continue; + } + return DateTime.toTimestamp(year, month, day, hour, minute); + } + minute = 0; + } + hour = 0; + } + day = 1; + } + month = 1; + } + } + + /** + * @notice lastTick calculates the previous datetime that a spec "ticks", starting + * from the current block timestamp. This is gas-intensive and therefore should + * only be called off-chain. + * @param spec the spec to evaluate + * @return the next tick + */ + function lastTick(Spec memory spec) internal view returns (uint256) { + uint16 year = DateTime.getYear(block.timestamp); + uint8 month = DateTime.getMonth(block.timestamp); + uint8 day = DateTime.getDay(block.timestamp); + uint8 hour = DateTime.getHour(block.timestamp); + uint8 minute = DateTime.getMinute(block.timestamp); + uint8 dayOfWeek; + bool resetDay; + for (; true; year--) { + for (; month > 0; month--) { + if (!matches(spec.month, month)) { + resetDay = true; + hour = 23; + minute = 59; + continue; + } + if (resetDay) { + day = DateTime.getDaysInMonth(month, year); + } + for (; day > 0; day--) { + if (!matches(spec.day, day)) { + hour = 23; + minute = 59; + continue; + } + dayOfWeek = DateTime.getWeekday(DateTime.toTimestamp(year, month, day)); + if (!matches(spec.dayOfWeek, dayOfWeek)) { + hour = 23; + minute = 59; + continue; + } + for (; hour >= 0; hour--) { + if (!matches(spec.hour, hour)) { + minute = 59; + if (hour == 0) { + break; + } + continue; + } + for (; minute >= 0; minute--) { + if (!matches(spec.minute, minute)) { + if (minute == 0) { + break; + } + continue; + } + return DateTime.toTimestamp(year, month, day, hour, minute); + } + minute = 59; + if (hour == 0) { + break; + } + } + hour = 23; + } + resetDay = true; + } + month = 12; + } + } + + /** + * @notice matches evaluates whether or not a spec "ticks" at a given timestamp + * @param spec the spec to evaluate + * @param timestamp the timestamp to compare against + * @return true / false if they match + */ + function matches(Spec memory spec, uint256 timestamp) internal view returns (bool) { + DateTime._DateTime memory dt = DateTime.parseTimestamp(timestamp); + return + matches(spec.month, dt.month) && + matches(spec.day, dt.day) && + matches(spec.hour, dt.hour) && + matches(spec.minute, dt.minute); + } + + /** + * @notice toSpec converts a cron string to a spec struct. This is gas-intensive + * and therefore should only be called off-chain. + * @param cronString the cron string + * @return the spec struct + */ + function toSpec(string memory cronString) internal pure returns (Spec memory) { + strings.slice memory space = strings.toSlice(" "); + strings.slice memory cronSlice = strings.toSlice(cronString); + if (cronSlice.count(space) != 4) { + revert InvalidSpec("4 spaces required"); + } + strings.slice memory minuteSlice = cronSlice.split(space); + strings.slice memory hourSlice = cronSlice.split(space); + strings.slice memory daySlice = cronSlice.split(space); + strings.slice memory monthSlice = cronSlice.split(space); + // DEV: dayOfWeekSlice = cronSlice + // The cronSlice now contains the last section of the cron job, + // which corresponds to the day of week + if ( + minuteSlice.len() == 0 || + hourSlice.len() == 0 || + daySlice.len() == 0 || + monthSlice.len() == 0 || + cronSlice.len() == 0 + ) { + revert InvalidSpec("some fields missing"); + } + return + validate( + Spec({ + minute: sliceToField(minuteSlice), + hour: sliceToField(hourSlice), + day: sliceToField(daySlice), + month: sliceToField(monthSlice), + dayOfWeek: sliceToField(cronSlice) + }) + ); + } + + /** + * @notice toEncodedSpec converts a cron string to an abi-encoded spec. This is gas-intensive + * and therefore should only be called off-chain. + * @param cronString the cron string + * @return the abi-encoded spec + */ + function toEncodedSpec(string memory cronString) internal pure returns (bytes memory) { + return abi.encode(toSpec(cronString)); + } + + /** + * @notice toCronString converts a cron spec to a human-readable cron string. This is gas-intensive + * and therefore should only be called off-chain. + * @param spec the cron spec + * @return the corresponding cron string + */ + function toCronString(Spec memory spec) internal pure returns (string memory) { + return + string( + bytes.concat( + fieldToBstring(spec.minute), + " ", + fieldToBstring(spec.hour), + " ", + fieldToBstring(spec.day), + " ", + fieldToBstring(spec.month), + " ", + fieldToBstring(spec.dayOfWeek) + ) + ); + } + + /** + * @notice matches evaluates if a values matches a field. + * ex: 3 matches *, 3 matches 0-5, 3 does not match 0,2,4 + * @param field the field struct to match against + * @param value the value of a field + * @return true / false if they match + */ + function matches(Field memory field, uint8 value) private pure returns (bool) { + if (field.fieldType == FieldType.WILD) { + return true; + } else if (field.fieldType == FieldType.INTERVAL) { + return value % field.interval == 0; + } else if (field.fieldType == FieldType.EXACT) { + return value == field.singleValue; + } else if (field.fieldType == FieldType.RANGE) { + return value >= field.rangeStart && value <= field.rangeEnd; + } else if (field.fieldType == FieldType.LIST) { + for (uint256 idx = 0; idx < field.listLength; idx++) { + if (value == field.list[idx]) { + return true; + } + } + return false; + } + revert UnknownFieldType(); + } + + // VALIDATIONS + + /** + * @notice validate validates a spec, reverting if any errors are found + * @param spec the spec to validate + * @return the original spec + */ + function validate(Spec memory spec) private pure returns (Spec memory) { + validateField(spec.dayOfWeek, DAY_OF_WEEK, 0, 6); + validateField(spec.month, MONTH, 1, 12); + uint8 maxDay = maxDayForMonthField(spec.month); + validateField(spec.day, DAY, 1, maxDay); + validateField(spec.hour, HOUR, 0, 23); + validateField(spec.minute, MINUTE, 0, 59); + return spec; + } + + /** + * @notice validateField validates the value of a field. It reverts if an error is found. + * @param field the field to validate + * @param fieldName the name of the field ex "minute" or "hour" + * @param min the minimum value a field can have (usually 1 or 0) + * @param max the maximum value a field can have (ex minute = 59, hour = 23) + */ + function validateField(Field memory field, string memory fieldName, uint8 min, uint8 max) private pure { + if (field.fieldType == FieldType.WILD) { + return; + } else if (field.fieldType == FieldType.EXACT) { + if (field.singleValue < min || field.singleValue > max) { + string memory reason = string( + bytes.concat("value must be >=,", uintToBString(min), " and <=", uintToBString(max)) + ); + revert InvalidField(fieldName, reason); + } + } else if (field.fieldType == FieldType.INTERVAL) { + if (field.interval < 1 || field.interval > max) { + string memory reason = string( + bytes.concat("inverval must be */(", uintToBString(1), "-", uintToBString(max), ")") + ); + revert InvalidField(fieldName, reason); + } + } else if (field.fieldType == FieldType.RANGE) { + if (field.rangeEnd > max || field.rangeEnd <= field.rangeStart) { + string memory reason = string( + bytes.concat("inverval must be within ", uintToBString(min), "-", uintToBString(max)) + ); + revert InvalidField(fieldName, reason); + } + } else if (field.fieldType == FieldType.LIST) { + if (field.listLength < 2) { + revert InvalidField(fieldName, "lists must have at least 2 items"); + } + string memory reason = string( + bytes.concat("items in list must be within ", uintToBString(min), "-", uintToBString(max)) + ); + uint8 listItem; + for (uint256 idx = 0; idx < field.listLength; idx++) { + listItem = field.list[idx]; + if (listItem < min || listItem > max) { + revert InvalidField(fieldName, reason); + } + } + } else { + revert UnknownFieldType(); + } + } + + /** + * @notice maxDayForMonthField returns the maximum valid day given the month field + * @param month the month field + * @return the max day + */ + function maxDayForMonthField(Field memory month) private pure returns (uint8) { + // DEV: ranges are always safe because any two consecutive months will always + // contain a month with 31 days + if (month.fieldType == FieldType.WILD || month.fieldType == FieldType.RANGE) { + return 31; + } else if (month.fieldType == FieldType.EXACT) { + // DEV: assume leap year in order to get max value + return DateTime.getDaysInMonth(month.singleValue, 4); + } else if (month.fieldType == FieldType.INTERVAL) { + if (month.interval == 9 || month.interval == 11) { + return 30; + } else { + return 31; + } + } else if (month.fieldType == FieldType.LIST) { + uint8 result; + for (uint256 idx = 0; idx < month.listLength; idx++) { + // DEV: assume leap year in order to get max value + uint8 daysInMonth = DateTime.getDaysInMonth(month.list[idx], 4); + if (daysInMonth == 31) { + return daysInMonth; + } + if (daysInMonth > result) { + result = daysInMonth; + } + } + return result; + } else { + revert UnknownFieldType(); + } + } + + /** + * @notice sliceToField converts a strings.slice to a field struct + * @param fieldSlice the slice of a string representing the field of a cron job + * @return the field + */ + function sliceToField(strings.slice memory fieldSlice) private pure returns (Field memory) { + strings.slice memory star = strings.toSlice("*"); + strings.slice memory dash = strings.toSlice("-"); + strings.slice memory slash = strings.toSlice("/"); + strings.slice memory comma = strings.toSlice(","); + Field memory field; + if (fieldSlice.equals(star)) { + field.fieldType = FieldType.WILD; + } else if (fieldSlice.contains(dash)) { + field.fieldType = FieldType.RANGE; + strings.slice memory start = fieldSlice.split(dash); + field.rangeStart = sliceToUint8(start); + field.rangeEnd = sliceToUint8(fieldSlice); + } else if (fieldSlice.contains(slash)) { + field.fieldType = FieldType.INTERVAL; + fieldSlice.split(slash); + field.interval = sliceToUint8(fieldSlice); + } else if (fieldSlice.contains(comma)) { + field.fieldType = FieldType.LIST; + strings.slice memory token; + while (fieldSlice.len() > 0) { + if (field.listLength > 25) { + revert ListTooLarge(); + } + token = fieldSlice.split(comma); + field.list[field.listLength] = sliceToUint8(token); + field.listLength++; + } + } else { + // needs input validation + field.fieldType = FieldType.EXACT; + field.singleValue = sliceToUint8(fieldSlice); + } + return field; + } + + /** + * @notice fieldToBstring converts a field to the bytes representation of that field string + * @param field the field to stringify + * @return bytes representing the string, ex: bytes("*") + */ + function fieldToBstring(Field memory field) private pure returns (bytes memory) { + if (field.fieldType == FieldType.WILD) { + return "*"; + } else if (field.fieldType == FieldType.EXACT) { + return uintToBString(uint256(field.singleValue)); + } else if (field.fieldType == FieldType.RANGE) { + return bytes.concat(uintToBString(field.rangeStart), "-", uintToBString(field.rangeEnd)); + } else if (field.fieldType == FieldType.INTERVAL) { + return bytes.concat("*/", uintToBString(uint256(field.interval))); + } else if (field.fieldType == FieldType.LIST) { + bytes memory result = uintToBString(field.list[0]); + for (uint256 idx = 1; idx < field.listLength; idx++) { + result = bytes.concat(result, ",", uintToBString(field.list[idx])); + } + return result; + } + revert UnknownFieldType(); + } + + /** + * @notice uintToBString converts a uint256 to a bytes representation of that uint as a string + * @param n the number to stringify + * @return bytes representing the string, ex: bytes("1") + */ + function uintToBString(uint256 n) private pure returns (bytes memory) { + if (n == 0) { + return "0"; + } + uint256 j = n; + uint256 len; + while (j != 0) { + len++; + j /= 10; + } + bytes memory bstr = new bytes(len); + uint256 k = len; + while (n != 0) { + k = k - 1; + uint8 temp = (48 + uint8(n - (n / 10) * 10)); + bytes1 b1 = bytes1(temp); + bstr[k] = b1; + n /= 10; + } + return bstr; + } + + /** + * @notice sliceToUint8 converts a strings.slice to uint8 + * @param slice the string slice to convert to a uint8 + * @return the number that the string represents ex: "20" --> 20 + */ + function sliceToUint8(strings.slice memory slice) private pure returns (uint8) { + bytes memory b = bytes(slice.toString()); + uint8 i; + uint8 result = 0; + for (i = 0; i < b.length; i++) { + uint8 c = uint8(b[i]); + if (c >= 48 && c <= 57) { + result = result * 10 + (c - 48); + } + } + return result; + } +} diff --git a/contracts/src/v0.8/automation/mocks/KeeperRegistrar1_2Mock.sol b/contracts/src/v0.8/automation/mocks/KeeperRegistrar1_2Mock.sol new file mode 100644 index 00000000..3f1a4d71 --- /dev/null +++ b/contracts/src/v0.8/automation/mocks/KeeperRegistrar1_2Mock.sol @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +contract KeeperRegistrar1_2Mock { + event AutoApproveAllowedSenderSet(address indexed senderAddress, bool allowed); + event ConfigChanged( + uint8 autoApproveConfigType, + uint32 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + event RegistrationRejected(bytes32 indexed hash); + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes checkData, + uint96 amount, + uint8 indexed source + ); + + function emitAutoApproveAllowedSenderSet(address senderAddress, bool allowed) public { + emit AutoApproveAllowedSenderSet(senderAddress, allowed); + } + + function emitConfigChanged( + uint8 autoApproveConfigType, + uint32 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ) public { + emit ConfigChanged(autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels); + } + + function emitOwnershipTransferRequested(address from, address to) public { + emit OwnershipTransferRequested(from, to); + } + + function emitOwnershipTransferred(address from, address to) public { + emit OwnershipTransferred(from, to); + } + + function emitRegistrationApproved(bytes32 hash, string memory displayName, uint256 upkeepId) public { + emit RegistrationApproved(hash, displayName, upkeepId); + } + + function emitRegistrationRejected(bytes32 hash) public { + emit RegistrationRejected(hash); + } + + function emitRegistrationRequested( + bytes32 hash, + string memory name, + bytes memory encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes memory checkData, + uint96 amount, + uint8 source + ) public { + emit RegistrationRequested( + hash, + name, + encryptedEmail, + upkeepContract, + gasLimit, + adminAddress, + checkData, + amount, + source + ); + } + + enum AutoApproveType { + DISABLED, + ENABLED_SENDER_ALLOWLIST, + ENABLED_ALL + } + + AutoApproveType public s_autoApproveConfigType; + uint32 public s_autoApproveMaxAllowed; + uint32 public s_approvedCount; + address public s_keeperRegistry; + uint256 public s_minPLIJuels; + + // Function to set mock return data for the getRegistrationConfig function + function setRegistrationConfig( + AutoApproveType _autoApproveConfigType, + uint32 _autoApproveMaxAllowed, + uint32 _approvedCount, + address _keeperRegistry, + uint256 _minPLIJuels + ) external { + s_autoApproveConfigType = _autoApproveConfigType; + s_autoApproveMaxAllowed = _autoApproveMaxAllowed; + s_approvedCount = _approvedCount; + s_keeperRegistry = _keeperRegistry; + s_minPLIJuels = _minPLIJuels; + } + + // Mock getRegistrationConfig function + function getRegistrationConfig() + external + view + returns ( + AutoApproveType autoApproveConfigType, + uint32 autoApproveMaxAllowed, + uint32 approvedCount, + address keeperRegistry, + uint256 minPLIJuels + ) + { + return (s_autoApproveConfigType, s_autoApproveMaxAllowed, s_approvedCount, s_keeperRegistry, s_minPLIJuels); + } +} diff --git a/contracts/src/v0.8/automation/mocks/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.sol b/contracts/src/v0.8/automation/mocks/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.sol new file mode 100644 index 00000000..87e93c63 --- /dev/null +++ b/contracts/src/v0.8/automation/mocks/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.6; + +contract KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock { + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + + function emitOwnershipTransferRequested(address from, address to) public { + emit OwnershipTransferRequested(from, to); + } + + function emitOwnershipTransferred(address from, address to) public { + emit OwnershipTransferred(from, to); + } + + bool public s_mockResult; + bytes public s_mockPayload; + uint256 public s_mockGas; + + // Function to set mock return data for the measureCheckGas function + function setMeasureCheckGasResult(bool result, bytes memory payload, uint256 gas) external { + s_mockResult = result; + s_mockPayload = payload; + s_mockGas = gas; + } + + // Mock measureCheckGas function + function measureCheckGas(uint256 id, address from) external returns (bool, bytes memory, uint256) { + return (s_mockResult, s_mockPayload, s_mockGas); + } +} diff --git a/contracts/src/v0.8/automation/mocks/MockAggregator.sol b/contracts/src/v0.8/automation/mocks/MockAggregator.sol new file mode 100644 index 00000000..8efca033 --- /dev/null +++ b/contracts/src/v0.8/automation/mocks/MockAggregator.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {IOffchainAggregator} from "../HeartbeatRequester.sol"; + +contract MockAggregator is IOffchainAggregator { + int256 public s_answer; + bool public newRoundCalled; + + function setLatestAnswer(int256 answer) public { + s_answer = answer; + } + + function latestAnswer() public view returns (int256) { + return s_answer; + } + + function requestNewRound() external override returns (uint80) { + newRoundCalled = true; + return 1; + } +} diff --git a/contracts/src/v0.8/automation/mocks/MockAggregatorProxy.sol b/contracts/src/v0.8/automation/mocks/MockAggregatorProxy.sol new file mode 100644 index 00000000..c48f0285 --- /dev/null +++ b/contracts/src/v0.8/automation/mocks/MockAggregatorProxy.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {IAggregatorProxy} from "../HeartbeatRequester.sol"; + +contract MockAggregatorProxy is IAggregatorProxy { + address internal s_aggregator; + + constructor(address _aggregator) { + s_aggregator = _aggregator; + } + + function updateAggregator(address _aggregator) external { + s_aggregator = _aggregator; + } + + function aggregator() external view override returns (address) { + return s_aggregator; + } +} diff --git a/contracts/src/v0.8/automation/mocks/MockKeeperRegistry2_1.sol b/contracts/src/v0.8/automation/mocks/MockKeeperRegistry2_1.sol new file mode 100644 index 00000000..2f21780c --- /dev/null +++ b/contracts/src/v0.8/automation/mocks/MockKeeperRegistry2_1.sol @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import "../interfaces/IAutomationRegistryConsumer.sol"; + +contract MockKeeperRegistry2_1 is IAutomationRegistryConsumer { + uint96 balance; + uint96 minBalance; + + constructor() {} + + function getBalance(uint256 id) external view override returns (uint96) { + return balance; + } + + function getMinBalance(uint256 id) external view override returns (uint96) { + return minBalance; + } + + function cancelUpkeep(uint256 id) external override {} + + function pauseUpkeep(uint256 id) external override {} + + function unpauseUpkeep(uint256 id) external override {} + + function updateCheckData(uint256 id, bytes calldata newCheckData) external {} + + function addFunds(uint256 id, uint96 amount) external override {} + + function withdrawFunds(uint256 id, address to) external override {} +} diff --git a/contracts/src/v0.8/automation/test/AutomationForwarder.t.sol b/contracts/src/v0.8/automation/test/AutomationForwarder.t.sol new file mode 100644 index 00000000..d9a8c3bc --- /dev/null +++ b/contracts/src/v0.8/automation/test/AutomationForwarder.t.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol"; +import {AutomationForwarder} from "../AutomationForwarder.sol"; +import {AutomationForwarderLogic} from "../AutomationForwarderLogic.sol"; +import "forge-std/Test.sol"; + +// in contracts directory, run +// forge test --match-path src/v0.8/automation/test/AutomationForwarder.t.sol + +contract Target { + function handler() external pure {} + + function handlerRevert() external pure { + revert("revert"); + } +} + +contract AutomationForwarderTestSetUp is Test { + address internal constant REGISTRY = 0x3e19ef5Aaa2606655f5A677A97E085cf3811067c; + address internal constant STRANGER = 0x618fae5d04963B2CEf533F247Eb2C46Bf1801D3b; + + IAutomationForwarder internal forwarder; + address internal TARGET; + + function setUp() public { + TARGET = address(new Target()); + AutomationForwarderLogic logicContract = new AutomationForwarderLogic(); + forwarder = IAutomationForwarder(address(new AutomationForwarder(TARGET, REGISTRY, address(logicContract)))); + } +} + +contract AutomationForwarderTest_constructor is AutomationForwarderTestSetUp { + function testInitialValues() external { + assertEq(address(forwarder.getRegistry()), REGISTRY); + assertEq(forwarder.getTarget(), TARGET); + } + + function testTypeAndVersion() external { + assertEq(forwarder.typeAndVersion(), "AutomationForwarder 1.0.0"); + } +} + +contract AutomationForwarderTest_forward is AutomationForwarderTestSetUp { + function testOnlyCallableByTheRegistry() external { + vm.prank(REGISTRY); + forwarder.forward(100000, abi.encodeWithSelector(Target.handler.selector)); + vm.prank(STRANGER); + vm.expectRevert(); + forwarder.forward(100000, abi.encodeWithSelector(Target.handler.selector)); + } + + function testReturnsSuccessValueAndGasUsed() external { + vm.startPrank(REGISTRY); + (bool success, uint256 gasUsed) = forwarder.forward(100000, abi.encodeWithSelector(Target.handler.selector)); + assertTrue(success); + assertGt(gasUsed, 0); + (success, gasUsed) = forwarder.forward(100000, abi.encodeWithSelector(Target.handlerRevert.selector)); + assertFalse(success); + assertGt(gasUsed, 0); + } +} + +contract AutomationForwarderTest_updateRegistry is AutomationForwarderTestSetUp { + function testOnlyCallableByTheActiveRegistry() external { + address newRegistry = address(1); + vm.startPrank(REGISTRY); + forwarder.updateRegistry(newRegistry); + assertEq(address(forwarder.getRegistry()), newRegistry); + vm.expectRevert(); + forwarder.updateRegistry(REGISTRY); + } +} diff --git a/contracts/src/v0.8/automation/test/BaseTest.t.sol b/contracts/src/v0.8/automation/test/BaseTest.t.sol new file mode 100644 index 00000000..ecba6521 --- /dev/null +++ b/contracts/src/v0.8/automation/test/BaseTest.t.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "forge-std/Test.sol"; + +contract BaseTest is Test { + address internal OWNER = 0x00007e64E1fB0C487F25dd6D3601ff6aF8d32e4e; + address internal constant STRANGER = address(999); + + function setUp() public virtual { + vm.startPrank(OWNER); + deal(OWNER, 1e20); + } +} diff --git a/contracts/src/v0.8/automation/test/HeartbeatRequester.t.sol b/contracts/src/v0.8/automation/test/HeartbeatRequester.t.sol new file mode 100644 index 00000000..caac639c --- /dev/null +++ b/contracts/src/v0.8/automation/test/HeartbeatRequester.t.sol @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {HeartbeatRequester, IAggregatorProxy} from "../HeartbeatRequester.sol"; +import {MockAggregator} from "../mocks/MockAggregator.sol"; +import {MockAggregatorProxy} from "../mocks/MockAggregatorProxy.sol"; +import {BaseTest} from "./BaseTest.t.sol"; + +// from contracts directory, +// forge test --match-path src/v0.8/automation/test/HeartbeatRequester.t.sol + +contract HeartbeatRequesterSetUp is BaseTest { + HeartbeatRequester internal heartbeatRequester; + MockAggregator internal aggregator; + IAggregatorProxy internal aggregatorProxy; + MockAggregator internal aggregator2; + IAggregatorProxy internal aggregatorProxy2; + + event HeartbeatPermitted(address indexed permittedCaller, address newProxy, address oldProxy); + event HeartbeatRemoved(address indexed permittedCaller, address removedProxy); + error HeartbeatNotPermitted(); + + function setUp() public override { + BaseTest.setUp(); + heartbeatRequester = new HeartbeatRequester(); + aggregator = new MockAggregator(); + aggregatorProxy = IAggregatorProxy(new MockAggregatorProxy(address(aggregator))); + aggregator2 = new MockAggregator(); + aggregatorProxy2 = IAggregatorProxy(new MockAggregatorProxy(address(aggregator2))); + } +} + +contract HeartbeatRequester_permitHeartbeat is HeartbeatRequesterSetUp { + function testBasicSuccess() public { + vm.expectEmit(); + emit HeartbeatPermitted(STRANGER, address(aggregatorProxy), address(0)); + heartbeatRequester.permitHeartbeat(STRANGER, aggregatorProxy); + + vm.expectEmit(); + emit HeartbeatPermitted(STRANGER, address(aggregatorProxy2), address(aggregatorProxy)); + heartbeatRequester.permitHeartbeat(STRANGER, aggregatorProxy2); + } + + function testBasicDeployerSuccess() public { + vm.expectEmit(); + emit HeartbeatPermitted(OWNER, address(aggregatorProxy), address(0)); + heartbeatRequester.permitHeartbeat(OWNER, aggregatorProxy); + + vm.expectEmit(); + emit HeartbeatPermitted(OWNER, address(aggregatorProxy2), address(aggregatorProxy)); + heartbeatRequester.permitHeartbeat(OWNER, aggregatorProxy2); + } + + function testOnlyCallableByOwnerReverts() public { + vm.expectRevert(bytes("Only callable by owner")); + changePrank(STRANGER); + heartbeatRequester.permitHeartbeat(STRANGER, aggregatorProxy); + } +} + +contract HeartbeatRequester_removeHeartbeat is HeartbeatRequesterSetUp { + function testBasicSuccess() public { + vm.expectEmit(); + emit HeartbeatPermitted(STRANGER, address(aggregatorProxy), address(0)); + heartbeatRequester.permitHeartbeat(STRANGER, aggregatorProxy); + + vm.expectEmit(); + emit HeartbeatRemoved(STRANGER, address(aggregatorProxy)); + heartbeatRequester.removeHeartbeat(STRANGER); + } + + function testRemoveNoPermitsSuccess() public { + vm.expectEmit(); + emit HeartbeatRemoved(STRANGER, address(0)); + heartbeatRequester.removeHeartbeat(STRANGER); + } + + function testOnlyCallableByOwnerReverts() public { + vm.expectRevert(bytes("Only callable by owner")); + changePrank(STRANGER); + heartbeatRequester.removeHeartbeat(address(this)); + } +} + +contract HeartbeatRequester_getAggregatorRequestHeartbeat is HeartbeatRequesterSetUp { + function testBasicSuccess() public { + vm.expectEmit(); + emit HeartbeatPermitted(OWNER, address(aggregatorProxy), address(0)); + heartbeatRequester.permitHeartbeat(OWNER, aggregatorProxy); + heartbeatRequester.getAggregatorAndRequestHeartbeat(address(aggregatorProxy)); + // getter for newRoundCalled value + bool val = aggregator.newRoundCalled(); + assertEq(val, true); + } + + function testHeartbeatNotPermittedReverts() public { + bytes32 hashedReason = keccak256(abi.encodePacked("HeartbeatNotPermitted()")); + bytes memory revertMessage = bytes32ToBytes(hashedReason); + vm.expectRevert(revertMessage); + heartbeatRequester.getAggregatorAndRequestHeartbeat(address(aggregatorProxy)); + bool val = aggregator.newRoundCalled(); + assertFalse(val); + } + + function bytes32ToBytes(bytes32 _bytes32) public pure returns (bytes memory) { + bytes memory bytesArray = new bytes(4); + for (uint256 i; i < 4; ++i) { + bytesArray[i] = _bytes32[i]; + } + return bytesArray; + } +} diff --git a/contracts/src/v0.8/automation/test/MercuryRegistry.t.sol b/contracts/src/v0.8/automation/test/MercuryRegistry.t.sol new file mode 100644 index 00000000..4018c769 --- /dev/null +++ b/contracts/src/v0.8/automation/test/MercuryRegistry.t.sol @@ -0,0 +1,336 @@ +pragma solidity ^0.8.0; + +import {Test} from "forge-std/Test.sol"; +import "../dev/MercuryRegistry.sol"; +import "../dev/MercuryRegistryBatchUpkeep.sol"; +import "../interfaces/StreamsLookupCompatibleInterface.sol"; + +contract MercuryRegistryTest is Test { + address internal constant OWNER = 0x00007e64E1fB0C487F25dd6D3601ff6aF8d32e4e; + int192 internal constant DEVIATION_THRESHOLD = 10_000; // 1% + uint32 internal constant STALENESS_SECONDS = 3600; // 1 hour + + address s_verifier = 0x60448B880c9f3B501af3f343DA9284148BD7D77C; + + string[] feedIds; + string s_BTCUSDFeedId = "0x6962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb2"; + string s_ETHUSDFeedId = "0xf753e1201d54ac94dfd9334c542562ff7e42993419a661261d010af0cbfd4e34"; + MercuryRegistry s_testRegistry; + + // Feed: BTC/USD + // Date: Tuesday, August 22, 2023 7:29:28 PM + // Price: $25,857.11126720 + bytes s_august22BTCUSDMercuryReport = + hex"0006a2f7f9b6c10385739c687064aa1e457812927f59446cccddf7740cc025ad00000000000000000000000000000000000000000000000000000000014cb94e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001206962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb20000000000000000000000000000000000000000000000000000000064e50c980000000000000000000000000000000000000000000000000000025a0864a8c00000000000000000000000000000000000000000000000000000025a063481720000000000000000000000000000000000000000000000000000025a0a94d00f000000000000000000000000000000000000000000000000000000000226181f4733a6d98892d1821771c041d5d69298210fdca9d643ad74477423b6a3045647000000000000000000000000000000000000000000000000000000000226181f0000000000000000000000000000000000000000000000000000000064e50c9700000000000000000000000000000000000000000000000000000000000000027f3056b1b71dd516037afd2e636f8afb39853f5cb3ccaa4b02d6f9a2a64622534e94aa1f794f6a72478deb7e0eb2942864b7fac76d6e120bd809530b1b74a32b00000000000000000000000000000000000000000000000000000000000000027bd3b385c0812dfcad2652d225410a014a0b836cd9635a6e7fb404f65f7a912f0b193db57e5c4f38ce71f29170f7eadfa94d972338858bacd59ab224245206db"; + + // Feed: BTC/USD + // Date: Wednesday, August 23, 2023 7:55:02 PM + // Price: $26,720.37346975 + bytes s_august23BTCUSDMercuryReport = + hex"0006a2f7f9b6c10385739c687064aa1e457812927f59446cccddf7740cc025ad000000000000000000000000000000000000000000000000000000000159a630000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001206962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb20000000000000000000000000000000000000000000000000000000064e664160000000000000000000000000000000000000000000000000000026e21d63e9f0000000000000000000000000000000000000000000000000000026e2147576a0000000000000000000000000000000000000000000000000000026e226525d30000000000000000000000000000000000000000000000000000000002286ce7c44fa27f67f6dd0a8bb40c12f0f050231845789f022a82aa5f4b3fe5bf2068fb0000000000000000000000000000000000000000000000000000000002286ce70000000000000000000000000000000000000000000000000000000064e664150000000000000000000000000000000000000000000000000000000000000002e9c5857631172082a47a20aa2fd9f580c1c48275d030c17a2dff77da04f88708ce776ef74c04b9ef6ba87c56d8f8c57e80ddd5298b477d60dd49fb8120f1b9ce000000000000000000000000000000000000000000000000000000000000000248624e0e2341cdaf989098f8b3dee2660b792b24e5251d6e48e3abe0a879c0683163a3a199969010e15353a99926d113f6d4cbab9d82ae90a159af9f74f8c157"; + + // Feed: BTC/USD + // Date: Wednesday, August 23, 2023 8:13:28 PM + // Price: $26,559.67100000 + bytes s_august23BTCUSDMercuryReport_2 = + hex"0006a2f7f9b6c10385739c687064aa1e457812927f59446cccddf7740cc025ad000000000000000000000000000000000000000000000000000000000159d009000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001206962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb20000000000000000000000000000000000000000000000000000000064e668690000000000000000000000000000000000000000000000000000026a63f9bc600000000000000000000000000000000000000000000000000000026a635984c00000000000000000000000000000000000000000000000000000026a67bb929d00000000000000000000000000000000000000000000000000000000022873e999d3ff9b644bba530af933dfaa6c59e31c3e232fcaa1e5f7304e2e79d939da1900000000000000000000000000000000000000000000000000000000022873e80000000000000000000000000000000000000000000000000000000064e66868000000000000000000000000000000000000000000000000000000000000000247c21657a6c2795986e95081876bf8b5f24bf72abd2dc4c601e7c96d654bcf543b5bb730e3d4736a308095e4531e7c03f581ac364f0889922ba3ae24b7cf968000000000000000000000000000000000000000000000000000000000000000020d3037d9f55256a001a2aa79ea746526c7cb36747e1deb4c804311394b4027667e5b711bcecfe60632e86cf8e83c28d1465e2d8d90bc0638dad8347f55488e8e"; + + // Feed: ETH/USD + // Date: Wednesday, August 23, 2023 7:55:01 PM + // Price: $1,690.76482169 + bytes s_august23ETHUSDMercuryReport = + hex"0006c41ec94138ae62cce3f1a2b852e42fe70359502fa7b6bdbf81207970d88e00000000000000000000000000000000000000000000000000000000016d874d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000000000000028000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120f753e1201d54ac94dfd9334c542562ff7e42993419a661261d010af0cbfd4e340000000000000000000000000000000000000000000000000000000064e66415000000000000000000000000000000000000000000000000000000275dbe6079000000000000000000000000000000000000000000000000000000275c905eba000000000000000000000000000000000000000000000000000000275e5693080000000000000000000000000000000000000000000000000000000002286ce7c44fa27f67f6dd0a8bb40c12f0f050231845789f022a82aa5f4b3fe5bf2068fb0000000000000000000000000000000000000000000000000000000002286ce70000000000000000000000000000000000000000000000000000000064e664150000000000000000000000000000000000000000000000000000000000000002a2b01f7741563cfe305efaec43e56cd85731e3a8e2396f7c625bd16adca7b39c97805b6170adc84d065f9d68c87104c3509aeefef42c0d1711e028ace633888000000000000000000000000000000000000000000000000000000000000000025d984ad476bda9547cf0f90d32732dc5a0d84b0e2fe9795149b786fb05332d4c092e278b4dddeef45c070b818c6e221db2633b573d616ef923c755a145ea099c"; + + // Feed: USDC/USD + // Date: Wednesday, August 30, 2023 5:05:01 PM + // Price: $1.00035464 + bytes s_august30USDCUSDMercuryReport = + hex"0006970c13551e2a390246f5eccb62b9be26848e72026830f4688f49201b5a050000000000000000000000000000000000000000000000000000000001c89843000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120a5b07943b89e2c278fc8a2754e2854316e03cb959f6d323c2d5da218fb6b0ff80000000000000000000000000000000000000000000000000000000064ef69fa0000000000000000000000000000000000000000000000000000000005f5da000000000000000000000000000000000000000000000000000000000005f5b0f80000000000000000000000000000000000000000000000000000000005f5f8b0000000000000000000000000000000000000000000000000000000000240057307d0a0421d25328cb6dcfc5d0e211ff0580baaaf104e9877fc52cf2e8ec0aa7d00000000000000000000000000000000000000000000000000000000024005730000000000000000000000000000000000000000000000000000000064ef69fa0000000000000000000000000000000000000000000000000000000000000002b9e7fb46f1e9d22a1156024dc2bbf2bc6d337e0a2d78aaa3fb6e43b880217e5897732b516e39074ef4dcda488733bfee80c0a10714b94621cd93df6842373cf5000000000000000000000000000000000000000000000000000000000000000205ca5f8da9d6ae01ec6d85c681e536043323405b3b8a15e4d2a288e02dac32f10b2294593e270a4bbf53b0c4978b725293e85e49685f1d3ce915ff670ab6612f"; + + function setUp() public virtual { + // Set owner, and fork Arbitrum Goerli Testnet (chain ID 421613). + // The fork is only used with the `FORK_TEST` flag enabeld, as to not disrupt CI. For CI, a mock verifier is used instead. + vm.startPrank(OWNER); + try vm.envBool("FORK_TEST") returns (bool /* fork testing enabled */) { + vm.selectFork(vm.createFork("https://goerli-rollup.arbitrum.io/rpc")); + } catch { + s_verifier = address(new MockVerifierProxy()); + } + vm.chainId(31337); // restore chain Id + + // Use a BTC feed and ETH feed. + feedIds = new string[](2); + feedIds[0] = s_BTCUSDFeedId; + feedIds[1] = s_ETHUSDFeedId; + + // Deviation threshold and staleness are the same for all feeds. + int192[] memory thresholds = new int192[](1); + thresholds[0] = DEVIATION_THRESHOLD; + uint32[] memory stalenessSeconds = new uint32[](1); + stalenessSeconds[0] = STALENESS_SECONDS; + + // Initialize with BTC feed. + string[] memory initialFeedIds = new string[](1); + initialFeedIds[0] = feedIds[0]; + string[] memory initialFeedNames = new string[](1); + initialFeedNames[0] = "BTC/USD"; + s_testRegistry = new MercuryRegistry( + initialFeedIds, + initialFeedNames, + thresholds, + stalenessSeconds, + address(0) // verifier unset + ); + s_testRegistry.setVerifier(s_verifier); // set verifier + + // Add ETH feed. + string[] memory addedFeedIds = new string[](1); + addedFeedIds[0] = feedIds[1]; + string[] memory addedFeedNames = new string[](1); + addedFeedNames[0] = "ETH/USD"; + s_testRegistry.addFeeds(addedFeedIds, addedFeedNames, thresholds, stalenessSeconds); + } + + function testMercuryRegistry() public { + // Check upkeep, receive Mercury revert. + uint256 blockNumber = block.number; + vm.expectRevert( + abi.encodeWithSelector( + StreamsLookupCompatibleInterface.StreamsLookup.selector, + "feedIdHex", // feedParamKey + feedIds, // feed Ids + "blockNumber", // timeParamKey + blockNumber, // block number on which request is occuring + "" // extra data + ) + ); + s_testRegistry.checkUpkeep(""); + + // Obtain mercury report off-chain (for August 22 BTC/USD price) + bytes[] memory values = new bytes[](1); + values[0] = s_august22BTCUSDMercuryReport; + + // Pass the obtained mercury report into checkCallback, to assert that an update is warranted. + (bool shouldPerformUpkeep, bytes memory performData) = s_testRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, true); + + // Perform upkeep to update on-chain state. + s_testRegistry.performUpkeep(performData); + + // Check state of BTC/USD feed to ensure update was propagated. + bytes memory oldPerformData; + uint32 oldObservationsTimestamp; + { + // scoped to prevent stack-too-deep error + ( + uint32 observationsTimestamp, + int192 price, + int192 bid, + int192 ask, + string memory feedName, + string memory localFeedId, + bool active, + int192 deviationPercentagePPM, + uint32 stalenessSeconds + ) = s_testRegistry.s_feedMapping(s_BTCUSDFeedId); + assertEq(observationsTimestamp, 1692732568); // Tuesday, August 22, 2023 7:29:28 PM + assertEq(bid, 2585674416498); // $25,856.74416498 + assertEq(price, 2585711126720); // $25,857.11126720 + assertEq(ask, 2585747836943); // $25,857.47836943 + assertEq(feedName, "BTC/USD"); + assertEq(localFeedId, s_BTCUSDFeedId); + assertEq(active, true); + assertEq(deviationPercentagePPM, DEVIATION_THRESHOLD); + assertEq(stalenessSeconds, STALENESS_SECONDS); + + // Save this for later in the test. + oldPerformData = performData; + oldObservationsTimestamp = observationsTimestamp; + } + // Obtain mercury report off-chain (for August 23 BTC/USD price & ETH/USD price) + values = new bytes[](2); + values[0] = s_august23BTCUSDMercuryReport; + values[1] = s_august23ETHUSDMercuryReport; + + // Pass the obtained mercury report into checkCallback, to assert that an update is warranted. + (shouldPerformUpkeep, performData) = s_testRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, true); + + // Perform upkeep to update on-chain state. + s_testRegistry.performUpkeep(performData); + + // Make a batch request for both the BTC/USD feed data and the ETH/USD feed data. + MercuryRegistry.Feed[] memory feeds = s_testRegistry.getLatestFeedData(feedIds); + + // Check state of BTC/USD feed to ensure update was propagated. + assertEq(feeds[0].observationsTimestamp, 1692820502); // Wednesday, August 23, 2023 7:55:02 PM + assertEq(feeds[0].bid, 2672027981674); // $26,720.27981674 + assertEq(feeds[0].price, 2672037346975); // $26,720.37346975 + assertEq(feeds[0].ask, 2672046712275); // $26,720.46712275 + assertEq(feeds[0].feedName, "BTC/USD"); + assertEq(feeds[0].feedId, s_BTCUSDFeedId); + + // Check state of ETH/USD feed to ensure update was propagated. + assertEq(feeds[1].observationsTimestamp, 1692820501); // Wednesday, August 23, 2023 7:55:01 PM + assertEq(feeds[1].bid, 169056689850); // $1,690.56689850 + assertEq(feeds[1].price, 169076482169); // $1,690.76482169 + assertEq(feeds[1].ask, 169086456584); // $16,90.86456584 + assertEq(feeds[1].feedName, "ETH/USD"); + assertEq(feeds[1].feedId, s_ETHUSDFeedId); + assertEq(feeds[1].active, true); + assertEq(feeds[1].deviationPercentagePPM, DEVIATION_THRESHOLD); + assertEq(feeds[1].stalenessSeconds, STALENESS_SECONDS); + + // Obtain mercury report off-chain for August 23 BTC/USD price (second report of the day). + // The price of this incoming report will not deviate enough from the on-chain value to trigger an update, + // nor is the on-chain data stale enough. + values = new bytes[](1); + values[0] = s_august23BTCUSDMercuryReport_2; + + // Pass the obtained mercury report into checkCallback, to assert that an update is not warranted. + (shouldPerformUpkeep, performData) = s_testRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, false); + + // Ensure stale reports cannot be included. + vm.expectRevert( + abi.encodeWithSelector( + MercuryRegistry.StaleReport.selector, + feedIds[0], + feeds[0].observationsTimestamp, + oldObservationsTimestamp + ) + ); + s_testRegistry.performUpkeep(oldPerformData); + + // Ensure reports for inactive feeds cannot be included. + bytes[] memory inactiveFeedReports = new bytes[](1); + inactiveFeedReports[0] = s_august30USDCUSDMercuryReport; + bytes memory lookupData = ""; + vm.expectRevert( + abi.encodeWithSelector( + MercuryRegistry.FeedNotActive.selector, + "0xa5b07943b89e2c278fc8a2754e2854316e03cb959f6d323c2d5da218fb6b0ff8" // USDC/USD feed id + ) + ); + s_testRegistry.performUpkeep(abi.encode(inactiveFeedReports, lookupData)); + } + + // Below are the same tests as `testMercuryRegistry`, except done via a batching Mercury registry that + // consumes the test registry. This is to assert that batching can be accomplished by multiple different + // upkeep jobs, which can populate the same + function testMercuryRegistryBatchUpkeep() public { + MercuryRegistryBatchUpkeep batchedRegistry = new MercuryRegistryBatchUpkeep( + address(s_testRegistry), // use the test registry as master registry + 0, // start batch at index 0. + 50 // end batch beyond length of feed Ids (take responsibility for all feeds) + ); + // Check upkeep, receive Mercury revert. + uint256 blockNumber = block.number; + vm.expectRevert( + abi.encodeWithSelector( + StreamsLookupCompatibleInterface.StreamsLookup.selector, + "feedIdHex", // feedParamKey + feedIds, // feed Ids + "blockNumber", // timeParamKey + blockNumber, // block number on which request is occuring + "" // extra data + ) + ); + batchedRegistry.checkUpkeep(""); + + // Obtain mercury report off-chain (for August 22 BTC/USD price) + bytes[] memory values = new bytes[](1); + values[0] = s_august22BTCUSDMercuryReport; + + // Pass the obtained mercury report into checkCallback, to assert that an update is warranted. + (bool shouldPerformUpkeep, bytes memory performData) = batchedRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, true); + + // Perform upkeep to update on-chain state. + batchedRegistry.performUpkeep(performData); + + // Check state of BTC/USD feed to ensure update was propagated. + ( + uint32 observationsTimestamp, + int192 price, + int192 bid, + int192 ask, + string memory feedName, + string memory localFeedId, + bool active, + int192 deviationPercentagePPM, + uint32 stalenessSeconds + ) = s_testRegistry.s_feedMapping(s_BTCUSDFeedId); + assertEq(observationsTimestamp, 1692732568); // Tuesday, August 22, 2023 7:29:28 PM + assertEq(bid, 2585674416498); // $25,856.74416498 + assertEq(price, 2585711126720); // $25,857.11126720 + assertEq(ask, 2585747836943); // $25,857.47836943 + assertEq(feedName, "BTC/USD"); + assertEq(localFeedId, s_BTCUSDFeedId); + assertEq(active, true); + assertEq(deviationPercentagePPM, DEVIATION_THRESHOLD); + assertEq(stalenessSeconds, STALENESS_SECONDS); + + // Obtain mercury report off-chain (for August 23 BTC/USD price & ETH/USD price) + values = new bytes[](2); + values[0] = s_august23BTCUSDMercuryReport; + values[1] = s_august23ETHUSDMercuryReport; + + // Pass the obtained mercury report into checkCallback, to assert that an update is warranted. + (shouldPerformUpkeep, performData) = batchedRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, true); + + // Perform upkeep to update on-chain state, but with not enough gas to update both feeds. + batchedRegistry.performUpkeep{gas: 250_000}(performData); + + // Make a batch request for both the BTC/USD feed data and the ETH/USD feed data. + MercuryRegistry.Feed[] memory feeds = s_testRegistry.getLatestFeedData(feedIds); + + // Check state of BTC/USD feed to ensure update was propagated. + assertEq(feeds[0].observationsTimestamp, 1692820502); // Wednesday, August 23, 2023 7:55:02 PM + assertEq(feeds[0].bid, 2672027981674); // $26,720.27981674 + assertEq(feeds[0].price, 2672037346975); // $26,720.37346975 + assertEq(feeds[0].ask, 2672046712275); // $26,720.46712275 + assertEq(feeds[0].feedName, "BTC/USD"); + assertEq(feeds[0].feedId, s_BTCUSDFeedId); + + // Check state of ETH/USD feed to observe that the update was not propagated. + assertEq(feeds[1].observationsTimestamp, 0); + assertEq(feeds[1].bid, 0); + assertEq(feeds[1].price, 0); + assertEq(feeds[1].ask, 0); + assertEq(feeds[1].feedName, "ETH/USD"); + assertEq(feeds[1].feedId, s_ETHUSDFeedId); + assertEq(feeds[1].active, true); + assertEq(feeds[1].deviationPercentagePPM, DEVIATION_THRESHOLD); + assertEq(feeds[1].stalenessSeconds, STALENESS_SECONDS); + + // Try again, with sufficient gas to update both feeds. + batchedRegistry.performUpkeep{gas: 2_500_000}(performData); + feeds = s_testRegistry.getLatestFeedData(feedIds); + + // Check state of ETH/USD feed to ensure update was propagated. + assertEq(feeds[1].observationsTimestamp, 1692820501); // Wednesday, August 23, 2023 7:55:01 PM + assertEq(feeds[1].bid, 169056689850); // $1,690.56689850 + assertEq(feeds[1].price, 169076482169); // $1,690.76482169 + assertEq(feeds[1].ask, 169086456584); // $16,90.86456584 + assertEq(feeds[1].feedName, "ETH/USD"); + assertEq(feeds[1].feedId, s_ETHUSDFeedId); + + // Obtain mercury report off-chain for August 23 BTC/USD price (second report of the day). + // The price of this incoming report will not deviate enough from the on-chain value to trigger an update. + values = new bytes[](1); + values[0] = s_august23BTCUSDMercuryReport_2; + + // Pass the obtained mercury report into checkCallback, to assert that an update is not warranted. + (shouldPerformUpkeep, performData) = batchedRegistry.checkCallback(values, bytes("")); + assertEq(shouldPerformUpkeep, false); + } +} + +contract MockVerifierProxy is IVerifierProxy { + function verify(bytes calldata payload) external payable override returns (bytes memory) { + (, bytes memory reportData, , , ) = abi.decode(payload, (bytes32[3], bytes, bytes32[], bytes32[], bytes32)); + return reportData; + } +} diff --git a/contracts/src/v0.8/automation/test/StructFactory.sol b/contracts/src/v0.8/automation/test/StructFactory.sol new file mode 100644 index 00000000..9317244a --- /dev/null +++ b/contracts/src/v0.8/automation/test/StructFactory.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +contract StructFactory { + address internal OWNER; + address internal constant STRANGER = address(999); +} diff --git a/contracts/src/v0.8/automation/testhelpers/CronTestHelper.sol b/contracts/src/v0.8/automation/testhelpers/CronTestHelper.sol new file mode 100644 index 00000000..f5005471 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/CronTestHelper.sol @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import {Cron as CronInternal, Spec} from "../libraries/internal/Cron.sol"; +import {Cron as CronExternal} from "../libraries/external/Cron.sol"; + +/** + * @title The CronInternalTestHelper contract + * @notice This contract exposes core functionality of the internal/Cron library. + * It is only intended for use in tests. + */ +contract CronInternalTestHelper { + /** + * @notice Converts a cron string to a Spec, validates the spec, and encodes the spec. + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to convert and encode + * @return the abi encoding of the Spec struct representing the cron string + */ + function encodeCronString(string memory cronString) external pure returns (bytes memory) { + return CronInternal.toEncodedSpec(cronString); + } + + /** + * @notice encodedSpecToString is a helper function for turning an + * encoded spec back into a string. There is limited or no use for this outside + * of tests. + */ + function encodedSpecToString(bytes memory encodedSpec) public pure returns (string memory) { + Spec memory spec = abi.decode(encodedSpec, (Spec)); + return CronInternal.toCronString(spec); + } + + /** + * @notice calculateNextTick calculates the next time a cron job should "tick". + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to consider + * @return the timestamp in UTC of the next "tick" + */ + function calculateNextTick(string memory cronString) external view returns (uint256) { + return CronInternal.nextTick(CronInternal.toSpec(cronString)); + } + + /** + * @notice calculateLastTick calculates the last time a cron job "ticked". + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to consider + * @return the timestamp in UTC of the last "tick" + */ + function calculateLastTick(string memory cronString) external view returns (uint256) { + return CronInternal.lastTick(CronInternal.toSpec(cronString)); + } +} + +/** + * @title The CronExternalTestHelper contract + * @notice This contract exposes core functionality of the external/Cron library. + * It is only intended for use in tests. + */ +contract CronExternalTestHelper { + /** + * @notice Converts a cron string to a Spec, validates the spec, and encodes the spec. + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to convert and encode + * @return the abi encoding of the Spec struct representing the cron string + */ + function encodeCronString(string memory cronString) external pure returns (bytes memory) { + return CronExternal.toEncodedSpec(cronString); + } + + /** + * @notice encodedSpecToString is a helper function for turning an + * encoded spec back into a string. There is limited or no use for this outside + * of tests. + */ + function encodedSpecToString(bytes memory encodedSpec) public pure returns (string memory) { + Spec memory spec = abi.decode(encodedSpec, (Spec)); + return CronExternal.toCronString(spec); + } + + /** + * @notice calculateNextTick calculates the next time a cron job should "tick". + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to consider + * @return the timestamp in UTC of the next "tick" + */ + function calculateNextTick(string memory cronString) external view returns (uint256) { + return CronExternal.nextTick(CronExternal.toSpec(cronString)); + } + + /** + * @notice calculateLastTick calculates the last time a cron job "ticked". + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to consider + * @return the timestamp in UTC of the last "tick" + */ + function calculateLastTick(string memory cronString) external view returns (uint256) { + return CronExternal.lastTick(CronExternal.toSpec(cronString)); + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/CronUpkeepTestHelper.sol b/contracts/src/v0.8/automation/testhelpers/CronUpkeepTestHelper.sol new file mode 100644 index 00000000..8ede4ea3 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/CronUpkeepTestHelper.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "../upkeeps/CronUpkeep.sol"; +import {Cron, Spec} from "../libraries/internal/Cron.sol"; + +/** + * @title The CronUpkeepTestHelper contract + * @notice This contract exposes core functionality of the CronUpkeep contract. + * It is only intended for use in tests. + */ +contract CronUpkeepTestHelper is CronUpkeep { + using Cron for Spec; + using Cron for string; + + constructor( + address owner, + address delegate, + uint256 maxJobs, + bytes memory firstJob + ) CronUpkeep(owner, delegate, maxJobs, firstJob) {} + + /** + * @notice createCronJobFromString is a helper function for creating cron jobs + * directly from strings. This is gas-intensive and shouldn't be done outside + * of testing environments. + */ + function createCronJobFromString(address target, bytes memory handler, string memory cronString) external { + Spec memory spec = cronString.toSpec(); + createCronJobFromSpec(target, handler, spec); + } + + /** + * @notice txCheckUpkeep is a helper function for sending real txs to the + * checkUpkeep function. This allows us to do gas analysis on it. + */ + function txCheckUpkeep(bytes calldata checkData) external { + address(this).call(abi.encodeWithSelector(bytes4(keccak256("checkUpkeep(bytes)")), checkData)); + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/DummyProtocol.sol b/contracts/src/v0.8/automation/testhelpers/DummyProtocol.sol new file mode 100644 index 00000000..4e86ec23 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/DummyProtocol.sol @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +// this struct is the same as LogTriggerConfig defined in KeeperRegistryLogicA2_1 contract +struct LogTriggerConfig { + address contractAddress; + uint8 filterSelector; // denotes which topics apply to filter ex 000, 101, 111...only last 3 bits apply + bytes32 topic0; + bytes32 topic1; + bytes32 topic2; + bytes32 topic3; +} + +contract DummyProtocol { + event LimitOrderSent(uint256 indexed amount, uint256 indexed price, address indexed to); // keccak256(LimitOrderSent(uint256,uint256,address)) => 0x3e9c37b3143f2eb7e9a2a0f8091b6de097b62efcfe48e1f68847a832e521750a + event LimitOrderWithdrawn(uint256 indexed amount, uint256 indexed price, address indexed from); // keccak256(LimitOrderWithdrawn(uint256,uint256,address)) => 0x0a71b8ed921ff64d49e4d39449f8a21094f38a0aeae489c3051aedd63f2c229f + event LimitOrderExecuted(uint256 indexed orderId, uint256 indexed amount, address indexed exchange); // keccak(LimitOrderExecuted(uint256,uint256,address)) => 0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd + + function sendLimitedOrder(uint256 amount, uint256 price, address to) public { + // send an order to an exchange + emit LimitOrderSent(amount, price, to); + } + + function withdrawLimit(uint256 amount, uint256 price, address from) public { + // withdraw an order from an exchange + emit LimitOrderSent(amount, price, from); + } + + function executeLimitOrder(uint256 orderId, uint256 amount, address exchange) public { + // execute a limit order + emit LimitOrderExecuted(orderId, amount, exchange); + } + + /** + * @notice this function generates bytes for a basic log trigger config with no filter selector. + * @param targetContract the address of contract where events will be emitted from + * @param t0 the signature of the event to listen to + */ + function getBasicLogTriggerConfig( + address targetContract, + bytes32 t0 + ) external view returns (bytes memory logTrigger) { + LogTriggerConfig memory cfg = LogTriggerConfig({ + contractAddress: targetContract, + filterSelector: 0, + topic0: t0, + topic1: 0x000000000000000000000000000000000000000000000000000000000000000, + topic2: 0x000000000000000000000000000000000000000000000000000000000000000, + topic3: 0x000000000000000000000000000000000000000000000000000000000000000 + }); + return abi.encode(cfg); + } + + /** + * @notice this function generates bytes for a customizable log trigger config. + * @param targetContract the address of contract where events will be emitted from + * @param selector the filter selector. this denotes which topics apply to filter ex 000, 101, 111....only last 3 bits apply + * if 0, it won't filter based on topic 1, 2, 3. + * if 1, it will filter based on topic 1, + * if 2, it will filter based on topic 2, + * if 3, it will filter based on topic 1 and topic 2, + * if 4, it will filter based on topic 3, + * if 5, it will filter based on topic 1 and topic 3.... + * @param t0 the signature of the event to listen to. + * @param t1 the topic 1 of the event. + * @param t2 the topic 2 of the event. + * @param t3 the topic 2 of the event. + */ + function getAdvancedLogTriggerConfig( + address targetContract, + uint8 selector, + bytes32 t0, + bytes32 t1, + bytes32 t2, + bytes32 t3 + ) external view returns (bytes memory logTrigger) { + LogTriggerConfig memory cfg = LogTriggerConfig({ + contractAddress: targetContract, + filterSelector: selector, + topic0: t0, + topic1: t1, + topic2: t2, + topic3: t3 + }); + return abi.encode(cfg); + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol b/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol new file mode 100644 index 00000000..fb492f37 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/KeeperConsumer.sol @@ -0,0 +1,26 @@ +pragma solidity 0.8.16; + +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../KeeperBase.sol"; + +contract KeeperConsumer is KeeperCompatibleInterface, KeeperBase { + uint public counter; + uint public immutable interval; + uint public lastTimeStamp; + + constructor(uint updateInterval) public { + interval = updateInterval; + lastTimeStamp = block.timestamp; + counter = 0; + } + + function checkUpkeep( + bytes calldata checkData + ) external view override cannotExecute returns (bool upkeepNeeded, bytes memory performData) { + return (true, checkData); + } + + function performUpkeep(bytes calldata performData) external override { + counter = counter + 1; + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/KeeperConsumerPerformance.sol b/contracts/src/v0.8/automation/testhelpers/KeeperConsumerPerformance.sol new file mode 100644 index 00000000..5b7f57e1 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/KeeperConsumerPerformance.sol @@ -0,0 +1,88 @@ +pragma solidity 0.8.16; + +contract KeeperConsumerPerformance { + event PerformingUpkeep(bool eligible, address from, uint256 initialCall, uint256 nextEligible, uint256 blockNumber); + + uint256 public initialCall = 0; + uint256 public nextEligible = 0; + uint256 public testRange; + uint256 public averageEligibilityCadence; + uint256 public checkGasToBurn; + uint256 public performGasToBurn; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + + uint256 public count = 0; + + constructor( + uint256 _testRange, + uint256 _averageEligibilityCadence, + uint256 _checkGasToBurn, + uint256 _performGasToBurn + ) { + testRange = _testRange; + averageEligibilityCadence = _averageEligibilityCadence; + checkGasToBurn = _checkGasToBurn; + performGasToBurn = _performGasToBurn; + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + uint256 startGas = gasleft(); + bool dummy; + // burn gas + while (startGas - gasleft() < checkGasToBurn) { + dummy = dummy && dummyMap[blockhash(block.number)]; // arbitrary state reads + } + return (eligible(), abi.encode(dummy)); + } + + function performUpkeep(bytes calldata data) external { + uint256 startGas = gasleft(); + bool eligible = eligible(); + uint256 blockNum = block.number; + emit PerformingUpkeep(eligible, tx.origin, initialCall, nextEligible, blockNum); + require(eligible); + if (initialCall == 0) { + initialCall = blockNum; + } + nextEligible = (blockNum + (rand() % (averageEligibilityCadence * 2))) + 1; + count++; + // burn gas + while (startGas - gasleft() < performGasToBurn) { + dummyMap[blockhash(block.number)] = false; + } + } + + function setCheckGasToBurn(uint256 value) public { + checkGasToBurn = value; + } + + function setPerformGasToBurn(uint256 value) public { + performGasToBurn = value; + } + + function getCountPerforms() public view returns (uint256) { + return count; + } + + function eligible() internal view returns (bool) { + return initialCall == 0 || (block.number - initialCall < testRange && block.number > nextEligible); + } + + function checkEligible() public view returns (bool) { + return eligible(); + } + + function reset() external { + initialCall = 0; + count = 0; + } + + function setSpread(uint256 _newTestRange, uint256 _newAverageEligibilityCadence) external { + testRange = _newTestRange; + averageEligibilityCadence = _newAverageEligibilityCadence; + } + + function rand() private view returns (uint256) { + return uint256(keccak256(abi.encode(blockhash(block.number - 1), address(this)))); + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/LogTriggeredStreamsLookup.sol b/contracts/src/v0.8/automation/testhelpers/LogTriggeredStreamsLookup.sol new file mode 100644 index 00000000..ea80c752 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/LogTriggeredStreamsLookup.sol @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol"; +import "../interfaces/StreamsLookupCompatibleInterface.sol"; +import {ArbSys} from "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; + +interface IVerifierProxy { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier. + * @param signedReport The encoded data to be verified. + * @return verifierResponse The encoded response from the verifier. + */ + function verify(bytes memory signedReport) external returns (bytes memory verifierResponse); +} + +contract LogTriggeredStreamsLookup is ILogAutomation, StreamsLookupCompatibleInterface { + event PerformingLogTriggerUpkeep( + address indexed from, + uint256 orderId, + uint256 amount, + address exchange, + uint256 blockNumber, + bytes blob, + bytes verified + ); + event LimitOrderExecuted(uint256 indexed orderId, uint256 indexed amount, address indexed exchange); // keccak(LimitOrderExecuted(uint256,uint256,address)) => 0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd + + ArbSys internal constant ARB_SYS = ArbSys(0x0000000000000000000000000000000000000064); + IVerifierProxy internal constant VERIFIER = IVerifierProxy(0x09DFf56A4fF44e0f4436260A04F5CFa65636A481); + + // for log trigger + bytes32 constant sentSig = 0x3e9c37b3143f2eb7e9a2a0f8091b6de097b62efcfe48e1f68847a832e521750a; + bytes32 constant withdrawnSig = 0x0a71b8ed921ff64d49e4d39449f8a21094f38a0aeae489c3051aedd63f2c229f; + bytes32 constant executedSig = 0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd; + + // for mercury config + bool public useArbitrumBlockNum; + bool public verify; + string[] public feedsHex = ["0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"]; + string public feedParamKey = "feedIdHex"; + string public timeParamKey = "blockNumber"; + uint256 public counter; + + constructor(bool _useArbitrumBlockNum, bool _verify) { + useArbitrumBlockNum = _useArbitrumBlockNum; + verify = _verify; + counter = 0; + } + + function start() public { + // need an initial event to begin the cycle + emit LimitOrderExecuted(1, 100, address(0x0)); + } + + function setTimeParamKey(string memory timeParam) external { + timeParamKey = timeParam; + } + + function setFeedParamKey(string memory feedParam) external { + feedParamKey = feedParam; + } + + function setFeedsHex(string[] memory newFeeds) external { + feedsHex = newFeeds; + } + + function checkLog( + Log calldata log, + bytes memory + ) external override returns (bool upkeepNeeded, bytes memory performData) { + uint256 blockNum = getBlockNumber(); + + // filter by event signature + if (log.topics[0] == executedSig) { + // filter by indexed parameters + bytes memory t1 = abi.encodePacked(log.topics[1]); // bytes32 to bytes + uint256 orderId = abi.decode(t1, (uint256)); + bytes memory t2 = abi.encodePacked(log.topics[2]); + uint256 amount = abi.decode(t2, (uint256)); + bytes memory t3 = abi.encodePacked(log.topics[3]); + address exchange = abi.decode(t3, (address)); + + revert StreamsLookup( + feedParamKey, + feedsHex, + timeParamKey, + blockNum, + abi.encode(orderId, amount, exchange, executedSig) + ); + } + revert("could not find matching event sig"); + } + + function performUpkeep(bytes calldata performData) external override { + (bytes[] memory values, bytes memory extraData) = abi.decode(performData, (bytes[], bytes)); + (uint256 orderId, uint256 amount, address exchange, bytes32 logTopic0) = abi.decode( + extraData, + (uint256, uint256, address, bytes32) + ); + + bytes memory verifiedResponse = ""; + if (verify) { + verifiedResponse = VERIFIER.verify(values[0]); + } + + counter = counter + 1; + if (logTopic0 == executedSig) { + emit LimitOrderExecuted(1, 100, address(0x0)); + } + + emit PerformingLogTriggerUpkeep( + tx.origin, + orderId, + amount, + exchange, + getBlockNumber(), + values[0], + verifiedResponse + ); + } + + function checkCallback( + bytes[] memory values, + bytes memory extraData + ) external view override returns (bool, bytes memory) { + // do sth about the pluginBlob data in values and extraData + bytes memory performData = abi.encode(values, extraData); + return (true, performData); + } + + function getBlockNumber() internal view returns (uint256) { + if (useArbitrumBlockNum) { + return ARB_SYS.arbBlockNumber(); + } else { + return block.number; + } + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/LogUpkeepCounter.sol b/contracts/src/v0.8/automation/testhelpers/LogUpkeepCounter.sol new file mode 100644 index 00000000..a51f2d2a --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/LogUpkeepCounter.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol"; + +contract LogUpkeepCounter is ILogAutomation { + bytes32 sig1 = 0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d; + bytes32 sig2 = 0x57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da; + bytes32 sig3 = 0x1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c8; + bytes32 sig4 = 0x5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e71; + + event PerformingUpkeep( + address indexed from, + uint256 initialBlock, + uint256 lastBlock, + uint256 previousBlock, + uint256 counter + ); + + /** + * @dev we include multiple event types for testing various filters, signatures, etc + */ + event Trigger(); // 0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d + event Trigger(uint256 a); // 0x57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da + event Trigger(uint256 a, uint256 b); // 0x1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c8 + event Trigger(uint256 a, uint256 b, uint256 c); // 0x5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e71 + + uint256 public testRange; + uint256 public lastBlock; + uint256 public previousPerformBlock; + uint256 public initialBlock; + uint256 public counter; + + constructor(uint256 _testRange) { + testRange = _testRange; + previousPerformBlock = 0; + lastBlock = block.number; + initialBlock = 0; + counter = 0; + } + + function start() public { + // need an initial event to begin the cycle + emit Trigger(); + emit Trigger(1); + emit Trigger(1, 2); + emit Trigger(1, 2, 3); + } + + function checkLog(Log calldata log, bytes memory) external view override returns (bool, bytes memory) { + require(eligible(), "not eligible"); + if (log.topics[0] == sig1 || log.topics[0] == sig2 || log.topics[0] == sig3 || log.topics[0] == sig4) { + return (true, abi.encode(log)); + } else { + revert("could not find matching event sig"); + } + } + + function performUpkeep(bytes calldata performData) external override { + if (initialBlock == 0) { + initialBlock = block.number; + } + lastBlock = block.number; + counter = counter + 1; + previousPerformBlock = lastBlock; + Log memory log = abi.decode(performData, (Log)); + if (log.topics[0] == sig1) { + emit Trigger(); + } else if (log.topics[0] == sig2) { + emit Trigger(1); + } else if (log.topics[0] == sig3) { + emit Trigger(1, 2); + } else if (log.topics[0] == sig4) { + emit Trigger(1, 2, 3); + } else { + revert("could not find matching sig"); + } + emit PerformingUpkeep(tx.origin, initialBlock, lastBlock, previousPerformBlock, counter); + } + + function eligible() public view returns (bool) { + if (initialBlock == 0) { + return true; + } + + return (block.number - initialBlock) < testRange; + } + + function setSpread(uint256 _testRange) external { + testRange = _testRange; + initialBlock = 0; + counter = 0; + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol b/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol new file mode 100644 index 00000000..268942f9 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/PerformDataChecker.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import "../interfaces/KeeperCompatibleInterface.sol"; + +contract PerformDataChecker is KeeperCompatibleInterface { + uint256 public counter; + bytes public s_expectedData; + + constructor(bytes memory expectedData) { + s_expectedData = expectedData; + } + + function setExpectedData(bytes calldata expectedData) external { + s_expectedData = expectedData; + } + + function checkUpkeep( + bytes calldata checkData + ) external view override returns (bool upkeepNeeded, bytes memory performData) { + return (keccak256(checkData) == keccak256(s_expectedData), checkData); + } + + function performUpkeep(bytes calldata performData) external override { + if (keccak256(performData) == keccak256(s_expectedData)) { + counter++; + } + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/SimpleLogUpkeepCounter.sol b/contracts/src/v0.8/automation/testhelpers/SimpleLogUpkeepCounter.sol new file mode 100644 index 00000000..979cc613 --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/SimpleLogUpkeepCounter.sol @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol"; + +struct CheckData { + uint256 checkBurnAmount; + uint256 performBurnAmount; + bytes32 eventSig; +} + +contract SimpleLogUpkeepCounter is ILogAutomation { + event PerformingUpkeep( + address indexed from, + uint256 initialBlock, + uint256 lastBlock, + uint256 previousBlock, + uint256 counter, + uint256 timeToPerform, + bool isRecovered + ); + + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + uint256 public lastBlock; + uint256 public previousPerformBlock; + uint256 public initialBlock; + uint256 public counter; + uint256 public timeToPerform; + bool public isRecovered; + + constructor() { + previousPerformBlock = 0; + lastBlock = block.number; + initialBlock = 0; + counter = 0; + } + + function _checkDataConfig(CheckData memory) external {} + + function checkLog(Log calldata log, bytes calldata checkData) external view override returns (bool, bytes memory) { + (uint256 checkBurnAmount, uint256 performBurnAmount, bytes32 eventSig) = abi.decode( + checkData, + (uint256, uint256, bytes32) + ); + uint256 startGas = gasleft(); + bytes32 dummyIndex = blockhash(block.number - 1); + bool dummy; + // burn gas + if (checkBurnAmount > 0) { + while (startGas - gasleft() < checkBurnAmount) { + dummy = dummy && dummyMap[dummyIndex]; // arbitrary storage reads + dummyIndex = keccak256(abi.encode(dummyIndex, address(this))); + } + } + if (log.topics[2] == eventSig) { + return (true, abi.encode(log, block.number, checkData)); + } + return (false, abi.encode(log, block.number, checkData)); + } + + function performUpkeep(bytes calldata performData) external override { + if (initialBlock == 0) { + initialBlock = block.number; + } + lastBlock = block.number; + counter = counter + 1; + previousPerformBlock = lastBlock; + (Log memory log, uint256 checkBlock, bytes memory extraData) = abi.decode(performData, (Log, uint256, bytes)); + timeToPerform = block.timestamp - log.timestamp; + isRecovered = false; + if (checkBlock != log.blockNumber) { + isRecovered = true; + } + (uint256 checkBurnAmount, uint256 performBurnAmount, bytes32 eventSig) = abi.decode( + extraData, + (uint256, uint256, bytes32) + ); + uint256 startGas = gasleft(); + bytes32 dummyIndex = blockhash(block.number - 1); + bool dummy; + // burn gas + if (performBurnAmount > 0) { + while (startGas - gasleft() < performBurnAmount) { + dummy = dummy && dummyMap[dummyIndex]; // arbitrary storage reads + dummyIndex = keccak256(abi.encode(dummyIndex, address(this))); + } + } + emit PerformingUpkeep( + tx.origin, + initialBlock, + lastBlock, + previousPerformBlock, + counter, + timeToPerform, + isRecovered + ); + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/UpkeepCounter.sol b/contracts/src/v0.8/automation/testhelpers/UpkeepCounter.sol new file mode 100644 index 00000000..caeed98e --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/UpkeepCounter.sol @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +contract UpkeepCounter { + event PerformingUpkeep( + address indexed from, + uint256 initialBlock, + uint256 lastBlock, + uint256 previousBlock, + uint256 counter + ); + + uint256 public testRange; + uint256 public interval; + uint256 public lastBlock; + uint256 public previousPerformBlock; + uint256 public initialBlock; + uint256 public counter; + + constructor(uint256 _testRange, uint256 _interval) { + testRange = _testRange; + interval = _interval; + previousPerformBlock = 0; + lastBlock = block.number; + initialBlock = 0; + counter = 0; + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + return (eligible(), data); + } + + function performUpkeep(bytes calldata performData) external { + if (initialBlock == 0) { + initialBlock = block.number; + } + lastBlock = block.number; + counter = counter + 1; + performData; + emit PerformingUpkeep(tx.origin, initialBlock, lastBlock, previousPerformBlock, counter); + previousPerformBlock = lastBlock; + } + + function eligible() public view returns (bool) { + if (initialBlock == 0) { + return true; + } + + return (block.number - initialBlock) < testRange && (block.number - lastBlock) >= interval; + } + + function setSpread(uint256 _testRange, uint256 _interval) external { + testRange = _testRange; + interval = _interval; + initialBlock = 0; + counter = 0; + } +} diff --git a/contracts/src/v0.8/automation/testhelpers/UpkeepPerformCounterRestrictive.sol b/contracts/src/v0.8/automation/testhelpers/UpkeepPerformCounterRestrictive.sol new file mode 100644 index 00000000..3aa345ab --- /dev/null +++ b/contracts/src/v0.8/automation/testhelpers/UpkeepPerformCounterRestrictive.sol @@ -0,0 +1,85 @@ +pragma solidity 0.8.16; + +contract UpkeepPerformCounterRestrictive { + event PerformingUpkeep(bool eligible, address from, uint256 initialCall, uint256 nextEligible, uint256 blockNumber); + + uint256 public initialCall = 0; + uint256 public nextEligible = 0; + uint256 public testRange; + uint256 public averageEligibilityCadence; + uint256 public checkGasToBurn; + uint256 public performGasToBurn; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + + uint256 private count = 0; + + constructor(uint256 _testRange, uint256 _averageEligibilityCadence) { + testRange = _testRange; + averageEligibilityCadence = _averageEligibilityCadence; + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + uint256 startGas = gasleft(); + uint256 blockNum = block.number - 1; + bool dummy; + // burn gas + while (startGas - gasleft() < checkGasToBurn) { + dummy = dummy && dummyMap[blockhash(blockNum)]; // arbitrary storage reads + blockNum--; + } + return (eligible(), abi.encode(dummy)); + } + + function performUpkeep(bytes calldata) external { + uint256 startGas = gasleft(); + bool eligible = eligible(); + uint256 blockNum = block.number; + emit PerformingUpkeep(eligible, tx.origin, initialCall, nextEligible, blockNum); + require(eligible); + if (initialCall == 0) { + initialCall = blockNum; + } + nextEligible = (blockNum + (rand() % (averageEligibilityCadence * 2))) + 1; + count++; + // burn gas + blockNum--; + while (startGas - gasleft() < performGasToBurn) { + dummyMap[blockhash(blockNum)] = false; // arbitrary storage writes + blockNum--; + } + } + + function setCheckGasToBurn(uint256 value) public { + checkGasToBurn = value; + } + + function setPerformGasToBurn(uint256 value) public { + performGasToBurn = value; + } + + function getCountPerforms() public view returns (uint256) { + return count; + } + + function eligible() internal view returns (bool) { + return initialCall == 0 || (block.number - initialCall < testRange && block.number > nextEligible); + } + + function checkEligible() public view returns (bool) { + return eligible(); + } + + function reset() external { + initialCall = 0; + count = 0; + } + + function setSpread(uint256 _newTestRange, uint256 _newAverageEligibilityCadence) external { + testRange = _newTestRange; + averageEligibilityCadence = _newAverageEligibilityCadence; + } + + function rand() private view returns (uint256) { + return uint256(keccak256(abi.encode(blockhash(block.number - 1), address(this)))); + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol new file mode 100644 index 00000000..d3fdc6fc --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeep.sol @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: MIT + +/** + The Cron contract is a plugin keepers-powered cron job runner for smart contracts. + The contract enables developers to trigger actions on various targets using cron + strings to specify the cadence. For example, a user may have 3 tasks that require + regular service in their dapp ecosystem: + 1) 0xAB..CD, update(1), "0 0 * * *" --> runs update(1) on 0xAB..CD daily at midnight + 2) 0xAB..CD, update(2), "30 12 * * 0-4" --> runs update(2) on 0xAB..CD weekdays at 12:30 + 3) 0x12..34, trigger(), "0 * * * *" --> runs trigger() on 0x12..34 hourly + + To use this contract, a user first deploys this contract and registers it on the plugin + keeper registry. Then the user adds cron jobs by following these steps: + 1) Convert a cron string to an encoded cron spec by calling encodeCronString() + 2) Take the encoding, target, and handler, and create a job by sending a tx to createCronJob() + 3) Cron job is running :) +*/ + +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/security/Pausable.sol"; +import "@openzeppelin/contracts/proxy/Proxy.sol"; +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import "../KeeperBase.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import {Cron as CronInternal, Spec} from "../libraries/internal/Cron.sol"; +import {Cron as CronExternal} from "../libraries/external/Cron.sol"; + +/** + * @title The CronUpkeep contract + * @notice A keeper-compatible contract that runs various tasks on cron schedules. + * Users must use the encodeCronString() function to encode their cron jobs before + * setting them. This keeps all the string manipulation off chain and reduces gas costs. + */ +contract CronUpkeep is KeeperCompatibleInterface, KeeperBase, ConfirmedOwner, Pausable, Proxy { + using EnumerableSet for EnumerableSet.UintSet; + + event CronJobExecuted(uint256 indexed id, bool success); + event CronJobCreated(uint256 indexed id, address target, bytes handler); + event CronJobUpdated(uint256 indexed id, address target, bytes handler); + event CronJobDeleted(uint256 indexed id); + + error CronJobIDNotFound(uint256 id); + error ExceedsMaxJobs(); + error InvalidHandler(); + error TickInFuture(); + error TickTooOld(); + error TickDoesntMatchSpec(); + + address immutable s_delegate; + uint256 public immutable s_maxJobs; + uint256 private s_nextCronJobID = 1; + EnumerableSet.UintSet private s_activeCronJobIDs; + + mapping(uint256 => uint256) private s_lastRuns; + mapping(uint256 => Spec) private s_specs; + mapping(uint256 => address) private s_targets; + mapping(uint256 => bytes) private s_handlers; + mapping(uint256 => bytes32) private s_handlerSignatures; + + /** + * @param owner the initial owner of the contract + * @param delegate the contract to delegate checkUpkeep calls to + * @param maxJobs the max number of cron jobs this contract will support + * @param firstJob an optional encoding of the first cron job + */ + constructor(address owner, address delegate, uint256 maxJobs, bytes memory firstJob) ConfirmedOwner(owner) { + s_delegate = delegate; + s_maxJobs = maxJobs; + if (firstJob.length > 0) { + (address target, bytes memory handler, Spec memory spec) = abi.decode(firstJob, (address, bytes, Spec)); + createCronJobFromSpec(target, handler, spec); + } + } + + /** + * @notice Executes the cron job with id encoded in performData + * @param performData abi encoding of cron job ID and the cron job's next run-at datetime + */ + function performUpkeep(bytes calldata performData) external override whenNotPaused { + (uint256 id, uint256 tickTime, address target, bytes memory handler) = abi.decode( + performData, + (uint256, uint256, address, bytes) + ); + validate(id, tickTime, target, handler); + s_lastRuns[id] = block.timestamp; + (bool success, ) = target.call(handler); + emit CronJobExecuted(id, success); + } + + /** + * @notice Creates a cron job from the given encoded spec + * @param target the destination contract of a cron job + * @param handler the function signature on the target contract to call + * @param encodedCronSpec abi encoding of a cron spec + */ + function createCronJobFromEncodedSpec( + address target, + bytes memory handler, + bytes memory encodedCronSpec + ) external onlyOwner { + if (s_activeCronJobIDs.length() >= s_maxJobs) { + revert ExceedsMaxJobs(); + } + Spec memory spec = abi.decode(encodedCronSpec, (Spec)); + createCronJobFromSpec(target, handler, spec); + } + + /** + * @notice Updates a cron job from the given encoded spec + * @param id the id of the cron job to update + * @param newTarget the destination contract of a cron job + * @param newHandler the function signature on the target contract to call + * @param newEncodedCronSpec abi encoding of a cron spec + */ + function updateCronJob( + uint256 id, + address newTarget, + bytes memory newHandler, + bytes memory newEncodedCronSpec + ) external onlyOwner onlyValidCronID(id) { + Spec memory newSpec = abi.decode(newEncodedCronSpec, (Spec)); + s_targets[id] = newTarget; + s_handlers[id] = newHandler; + s_specs[id] = newSpec; + s_handlerSignatures[id] = handlerSig(newTarget, newHandler); + emit CronJobUpdated(id, newTarget, newHandler); + } + + /** + * @notice Deletes the cron job matching the provided id. Reverts if + * the id is not found. + * @param id the id of the cron job to delete + */ + function deleteCronJob(uint256 id) external onlyOwner onlyValidCronID(id) { + delete s_lastRuns[id]; + delete s_specs[id]; + delete s_targets[id]; + delete s_handlers[id]; + delete s_handlerSignatures[id]; + s_activeCronJobIDs.remove(id); + emit CronJobDeleted(id); + } + + /** + * @notice Pauses the contract, which prevents executing performUpkeep + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice Unpauses the contract + */ + function unpause() external onlyOwner { + _unpause(); + } + + /** + * @notice Get the id of an eligible cron job + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoding + * of the id and "next tick" of the elligible cron job + */ + function checkUpkeep(bytes calldata) external override whenNotPaused cannotExecute returns (bool, bytes memory) { + _delegate(s_delegate); + } + + /** + * @notice gets a list of active cron job IDs + * @return list of active cron job IDs + */ + function getActiveCronJobIDs() external view returns (uint256[] memory) { + uint256 length = s_activeCronJobIDs.length(); + uint256[] memory jobIDs = new uint256[](length); + for (uint256 idx = 0; idx < length; idx++) { + jobIDs[idx] = s_activeCronJobIDs.at(idx); + } + return jobIDs; + } + + /** + * @notice gets a cron job + * @param id the cron job ID + * @return target - the address a cron job forwards the eth tx to + handler - the encoded function sig to execute when forwarding a tx + cronString - the string representing the cron job + nextTick - the timestamp of the next time the cron job will run + */ + function getCronJob( + uint256 id + ) + external + view + onlyValidCronID(id) + returns (address target, bytes memory handler, string memory cronString, uint256 nextTick) + { + Spec memory spec = s_specs[id]; + return (s_targets[id], s_handlers[id], CronExternal.toCronString(spec), CronExternal.nextTick(spec)); + } + + /** + * @notice Adds a cron spec to storage and the ID to the list of jobs + * @param target the destination contract of a cron job + * @param handler the function signature on the target contract to call + * @param spec the cron spec to create + */ + function createCronJobFromSpec(address target, bytes memory handler, Spec memory spec) internal { + uint256 newID = s_nextCronJobID; + s_activeCronJobIDs.add(newID); + s_targets[newID] = target; + s_handlers[newID] = handler; + s_specs[newID] = spec; + s_lastRuns[newID] = block.timestamp; + s_handlerSignatures[newID] = handlerSig(target, handler); + s_nextCronJobID++; + emit CronJobCreated(newID, target, handler); + } + + function _implementation() internal view override returns (address) { + return s_delegate; + } + + /** + * @notice validates the input to performUpkeep + * @param id the id of the cron job + * @param tickTime the observed tick time + * @param target the contract to forward the tx to + * @param handler the handler of the contract receiving the forwarded tx + */ + function validate(uint256 id, uint256 tickTime, address target, bytes memory handler) private { + tickTime = tickTime - (tickTime % 60); // remove seconds from tick time + if (block.timestamp < tickTime) { + revert TickInFuture(); + } + if (tickTime <= s_lastRuns[id]) { + revert TickTooOld(); + } + if (!CronInternal.matches(s_specs[id], tickTime)) { + revert TickDoesntMatchSpec(); + } + if (handlerSig(target, handler) != s_handlerSignatures[id]) { + revert InvalidHandler(); + } + } + + /** + * @notice returns a unique identifier for target/handler pairs + * @param target the contract to forward the tx to + * @param handler the handler of the contract receiving the forwarded tx + * @return a hash of the inputs + */ + function handlerSig(address target, bytes memory handler) private pure returns (bytes32) { + return keccak256(abi.encodePacked(target, handler)); + } + + modifier onlyValidCronID(uint256 id) { + if (!s_activeCronJobIDs.contains(id)) { + revert CronJobIDNotFound(id); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol new file mode 100644 index 00000000..ec2c2a0f --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeepDelegate.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import {Cron, Spec} from "../libraries/internal/Cron.sol"; + +/** + * @title The CronUpkeepDelegate contract + * @notice This contract serves as a delegate for all instances of CronUpkeep. Those contracts + * delegate their checkUpkeep calls onto this contract. Utilizing this pattern reduces the size + * of the CronUpkeep contracts. + */ +contract CronUpkeepDelegate { + using EnumerableSet for EnumerableSet.UintSet; + using Cron for Spec; + + address private s_owner; // from ConfirmedOwner + address private s_delegate; + uint256 private s_nextCronJobID; + EnumerableSet.UintSet private s_activeCronJobIDs; + mapping(uint256 => uint256) private s_lastRuns; + mapping(uint256 => Spec) private s_specs; + mapping(uint256 => address) private s_targets; + mapping(uint256 => bytes) private s_handlers; + + /** + * @notice Get the id of an eligible cron job + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoding + * of the id and "next tick" of the eligible cron job + */ + function checkUpkeep(bytes calldata) external view returns (bool, bytes memory) { + // DEV: start at a random spot in the list so that checks are + // spread evenly among cron jobs + uint256 numCrons = s_activeCronJobIDs.length(); + if (numCrons == 0) { + return (false, bytes("")); + } + uint256 startIdx = block.number % numCrons; + bool result; + bytes memory payload; + (result, payload) = checkInRange(startIdx, numCrons); + if (result) { + return (result, payload); + } + (result, payload) = checkInRange(0, startIdx); + if (result) { + return (result, payload); + } + return (false, bytes("")); + } + + /** + * @notice checks the cron jobs in a given range + * @param start the starting id to check (inclusive) + * @param end the ending id to check (exclusive) + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoding + * of the id and "next tick" of the eligible cron job + */ + function checkInRange(uint256 start, uint256 end) private view returns (bool, bytes memory) { + uint256 id; + uint256 lastTick; + for (uint256 idx = start; idx < end; idx++) { + id = s_activeCronJobIDs.at(idx); + lastTick = s_specs[id].lastTick(); + if (lastTick > s_lastRuns[id]) { + return (true, abi.encode(id, lastTick, s_targets[id], s_handlers[id])); + } + } + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol b/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol new file mode 100644 index 00000000..cd9ae5d7 --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/CronUpkeepFactory.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "./CronUpkeep.sol"; +import "./CronUpkeepDelegate.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import {Spec, Cron as CronExternal} from "../libraries/external/Cron.sol"; + +/** + * @title The CronUpkeepFactory contract + * @notice This contract serves as a delegate for all instances of CronUpkeep. Those contracts + * delegate their checkUpkeep calls onto this contract. Utilizing this pattern reduces the size + * of the CronUpkeep contracts. + */ +contract CronUpkeepFactory is ConfirmedOwner { + event NewCronUpkeepCreated(address upkeep, address owner); + + address private immutable s_cronDelegate; + uint256 public s_maxJobs = 5; + + constructor() ConfirmedOwner(msg.sender) { + s_cronDelegate = address(new CronUpkeepDelegate()); + } + + /** + * @notice Creates a new CronUpkeep contract, with msg.sender as the owner + */ + function newCronUpkeep() external { + newCronUpkeepWithJob(bytes("")); + } + + /** + * @notice Creates a new CronUpkeep contract, with msg.sender as the owner, and registers a cron job + */ + function newCronUpkeepWithJob(bytes memory encodedJob) public { + emit NewCronUpkeepCreated(address(new CronUpkeep(msg.sender, s_cronDelegate, s_maxJobs, encodedJob)), msg.sender); + } + + /** + * @notice Sets the max job limit on new cron upkeeps + */ + function setMaxJobs(uint256 maxJobs) external onlyOwner { + s_maxJobs = maxJobs; + } + + /** + * @notice Gets the address of the delegate contract + * @return the address of the delegate contract + */ + function cronDelegateAddress() external view returns (address) { + return s_cronDelegate; + } + + /** + * @notice Converts a cron string to a Spec, validates the spec, and encodes the spec. + * This should only be called off-chain, as it is gas expensive! + * @param cronString the cron string to convert and encode + * @return the abi encoding of the Spec struct representing the cron string + */ + function encodeCronString(string memory cronString) external pure returns (bytes memory) { + return CronExternal.toEncodedSpec(cronString); + } + + /** + * @notice Converts, validates, and encodes a full cron spec. This payload is then passed to newCronUpkeepWithJob. + * @param target the destination contract of a cron job + * @param handler the function signature on the target contract to call + * @param cronString the cron string to convert and encode + * @return the abi encoding of the entire cron job + */ + function encodeCronJob( + address target, + bytes memory handler, + string memory cronString + ) external pure returns (bytes memory) { + Spec memory spec = CronExternal.toSpec(cronString); + return abi.encode(target, handler, spec); + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/ERC20BalanceMonitor.sol b/contracts/src/v0.8/automation/upkeeps/ERC20BalanceMonitor.sol new file mode 100644 index 00000000..d2a7adc6 --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/ERC20BalanceMonitor.sol @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.4; + +import "../../shared/access/ConfirmedOwner.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol"; +import "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; + +/** + * @title The ERC20BalanceMonitor contract. + * @notice A keeper-compatible contract that monitors and funds ERC20 tokens. + */ +contract ERC20BalanceMonitor is ConfirmedOwner, Pausable, KeeperCompatibleInterface { + uint16 private constant MAX_WATCHLIST_SIZE = 300; + uint256 private constant MIN_GAS_FOR_TRANSFER = 55_000; + + event FundsWithdrawn(uint256 amountWithdrawn, address payee); + event TopUpSucceeded(address indexed topUpAddress); + event WatchlistUpdated(address[] oldWatchlist, address[] newWatchlist); + event KeeperRegistryAddressUpdated(address oldAddress, address newAddress); + event ERC20TokenAddressUpdated(address oldAddress, address newAddress); + event MinWaitPeriodUpdated(uint256 oldMinWaitPeriod, uint256 newMinWaitPeriod); + + error InvalidWatchList(); + error OnlyKeeperRegistry(); + error DuplicateAddress(address duplicate); + + struct Target { + bool isActive; + uint96 minBalance; + uint96 topUpLevel; + uint56 lastTopUpTimestamp; + } + + IERC20 private s_erc20Token; + address private s_keeperRegistryAddress; + uint256 private s_minWaitPeriodSeconds; + address[] private s_watchList; + mapping(address => Target) internal s_targets; + + /** + * @param erc20TokenAddress the ERC20 token address + * @param keeperRegistryAddress the address of the keeper registry contract + * @param minWaitPeriodSeconds the minimum wait period for addresses between funding + */ + constructor( + address erc20TokenAddress, + address keeperRegistryAddress, + uint256 minWaitPeriodSeconds + ) ConfirmedOwner(msg.sender) { + setERC20TokenAddress(erc20TokenAddress); + setKeeperRegistryAddress(keeperRegistryAddress); + setMinWaitPeriodSeconds(minWaitPeriodSeconds); + } + + /** + * @notice Sets the list of subscriptions to watch and their funding parameters. + * @param addresses the list of subscription ids to watch + * @param minBalances the minimum balances for each subscription + * @param topUpLevels the amount to top up to for each subscription + */ + function setWatchList( + address[] calldata addresses, + uint96[] calldata minBalances, + uint96[] calldata topUpLevels + ) external onlyOwner { + if ( + addresses.length != minBalances.length || + addresses.length != topUpLevels.length || + addresses.length > MAX_WATCHLIST_SIZE + ) { + revert InvalidWatchList(); + } + address[] memory oldWatchList = s_watchList; + for (uint256 idx = 0; idx < oldWatchList.length; idx++) { + s_targets[oldWatchList[idx]].isActive = false; + } + for (uint256 idx = 0; idx < addresses.length; idx++) { + if (s_targets[addresses[idx]].isActive) { + revert DuplicateAddress(addresses[idx]); + } + if (addresses[idx] == address(0)) { + revert InvalidWatchList(); + } + if (topUpLevels[idx] <= minBalances[idx]) { + revert InvalidWatchList(); + } + s_targets[addresses[idx]] = Target({ + isActive: true, + minBalance: minBalances[idx], + topUpLevel: topUpLevels[idx], + lastTopUpTimestamp: 0 + }); + } + s_watchList = addresses; + emit WatchlistUpdated(oldWatchList, addresses); + } + + /** + * @notice Gets a list of subscriptions that are underfunded. + * @return list of subscriptions that are underfunded + */ + function getUnderfundedAddresses() public view returns (address[] memory) { + address[] memory watchList = s_watchList; + address[] memory needsFunding = new address[](watchList.length); + uint256 count = 0; + uint256 minWaitPeriodSeconds = s_minWaitPeriodSeconds; + uint256 contractBalance = s_erc20Token.balanceOf(address(this)); + Target memory target; + for (uint256 idx = 0; idx < watchList.length; idx++) { + target = s_targets[watchList[idx]]; + uint256 targetTokenBalance = s_erc20Token.balanceOf(watchList[idx]); + if ( + target.lastTopUpTimestamp + minWaitPeriodSeconds <= block.timestamp && + targetTokenBalance < target.minBalance && + contractBalance >= (target.topUpLevel - targetTokenBalance) + ) { + uint256 topUpAmount = target.topUpLevel - targetTokenBalance; + needsFunding[count] = watchList[idx]; + count++; + contractBalance -= topUpAmount; + } + } + if (count != watchList.length) { + assembly { + mstore(needsFunding, count) // resize array to number of valid targets + } + } + return needsFunding; + } + + /** + * @notice Send funds to the subscriptions provided. + * @param needsFunding the list of subscriptions to fund + */ + function topUp(address[] memory needsFunding) public whenNotPaused { + uint256 minWaitPeriodSeconds = s_minWaitPeriodSeconds; + Target memory target; + uint256 contractBalance = s_erc20Token.balanceOf(address(this)); + for (uint256 idx = 0; idx < needsFunding.length; idx++) { + target = s_targets[needsFunding[idx]]; + uint256 targetTokenBalance = s_erc20Token.balanceOf(needsFunding[idx]); + if ( + target.isActive && + target.lastTopUpTimestamp + minWaitPeriodSeconds <= block.timestamp && + targetTokenBalance < target.minBalance && + contractBalance >= (target.topUpLevel - targetTokenBalance) + ) { + uint256 topUpAmount = target.topUpLevel - targetTokenBalance; + s_targets[needsFunding[idx]].lastTopUpTimestamp = uint56(block.timestamp); + contractBalance -= topUpAmount; + SafeERC20.safeTransfer(s_erc20Token, needsFunding[idx], topUpAmount); + emit TopUpSucceeded(needsFunding[idx]); + } + if (gasleft() < MIN_GAS_FOR_TRANSFER) { + return; + } + } + } + + /** + * @notice Gets list of subscription ids that are underfunded and returns a keeper-compatible payload. + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoded list of subscription ids that need funds + */ + function checkUpkeep( + bytes calldata + ) external view override whenNotPaused returns (bool upkeepNeeded, bytes memory performData) { + address[] memory needsFunding = getUnderfundedAddresses(); + upkeepNeeded = needsFunding.length > 0; + performData = abi.encode(needsFunding); + return (upkeepNeeded, performData); + } + + /** + * @notice Called by the keeper to send funds to underfunded addresses. + * @param performData the abi encoded list of addresses to fund + */ + function performUpkeep(bytes calldata performData) external override onlyKeeperRegistry whenNotPaused { + address[] memory needsFunding = abi.decode(performData, (address[])); + topUp(needsFunding); + } + + /** + * @notice Withdraws the contract balance in the ERC20 token. + * @param amount the amount of the ERC20 to withdraw + * @param payee the address to pay + */ + function withdraw(uint256 amount, address payable payee) external onlyOwner { + require(payee != address(0)); + SafeERC20.safeTransfer(s_erc20Token, payee, amount); + emit FundsWithdrawn(amount, payee); + } + + /** + * @notice Sets the ERC20 token address. + */ + function setERC20TokenAddress(address erc20TokenAddress) public onlyOwner { + require(erc20TokenAddress != address(0)); + emit ERC20TokenAddressUpdated(address(s_erc20Token), erc20TokenAddress); + s_erc20Token = IERC20(erc20TokenAddress); + } + + /** + * @notice Sets the keeper registry address. + */ + function setKeeperRegistryAddress(address keeperRegistryAddress) public onlyOwner { + require(keeperRegistryAddress != address(0)); + emit KeeperRegistryAddressUpdated(s_keeperRegistryAddress, keeperRegistryAddress); + s_keeperRegistryAddress = keeperRegistryAddress; + } + + /** + * @notice Sets the minimum wait period (in seconds) for subscription ids between funding. + */ + function setMinWaitPeriodSeconds(uint256 period) public onlyOwner { + emit MinWaitPeriodUpdated(s_minWaitPeriodSeconds, period); + s_minWaitPeriodSeconds = period; + } + + /** + * @notice Gets the ERC20 token address. + */ + function getERC20TokenAddress() external view returns (address) { + return address(s_erc20Token); + } + + /** + * @notice Gets the keeper registry address. + */ + function getKeeperRegistryAddress() external view returns (address) { + return s_keeperRegistryAddress; + } + + /** + * @notice Gets the minimum wait period. + */ + function getMinWaitPeriodSeconds() external view returns (uint256) { + return s_minWaitPeriodSeconds; + } + + /** + * @notice Gets the list of subscription ids being watched. + */ + function getWatchList() external view returns (address[] memory) { + return s_watchList; + } + + /** + * @notice Gets configuration information for an address on the watchlist + */ + function getAccountInfo( + address targetAddress + ) external view returns (bool isActive, uint96 minBalance, uint96 topUpLevel, uint56 lastTopUpTimestamp) { + Target memory target = s_targets[targetAddress]; + return (target.isActive, target.minBalance, target.topUpLevel, target.lastTopUpTimestamp); + } + + /** + * @notice Pause the contract, which prevents executing performUpkeep. + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice Unpause the contract. + */ + function unpause() external onlyOwner { + _unpause(); + } + + modifier onlyKeeperRegistry() { + if (msg.sender != s_keeperRegistryAddress) { + revert OnlyKeeperRegistry(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/EthBalanceMonitor.sol b/contracts/src/v0.8/automation/upkeeps/EthBalanceMonitor.sol new file mode 100644 index 00000000..c7d8a306 --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/EthBalanceMonitor.sol @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "../../shared/access/ConfirmedOwner.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "@openzeppelin/contracts/security/Pausable.sol"; + +/** + * @title The EthBalanceMonitor contract + * @notice A keeper-compatible contract that monitors and funds eth addresses + */ +contract EthBalanceMonitor is ConfirmedOwner, Pausable, KeeperCompatibleInterface { + // observed limit of 45K + 10k buffer + uint256 private constant MIN_GAS_FOR_TRANSFER = 55_000; + + event FundsAdded(uint256 amountAdded, uint256 newBalance, address sender); + event FundsWithdrawn(uint256 amountWithdrawn, address payee); + event TopUpSucceeded(address indexed recipient); + event TopUpFailed(address indexed recipient); + event KeeperRegistryAddressUpdated(address oldAddress, address newAddress); + event MinWaitPeriodUpdated(uint256 oldMinWaitPeriod, uint256 newMinWaitPeriod); + + error InvalidWatchList(); + error OnlyKeeperRegistry(); + error DuplicateAddress(address duplicate); + + struct Target { + bool isActive; + uint96 minBalanceWei; + uint96 topUpAmountWei; + uint56 lastTopUpTimestamp; // enough space for 2 trillion years + } + + address private s_keeperRegistryAddress; + uint256 private s_minWaitPeriodSeconds; + address[] private s_watchList; + mapping(address => Target) internal s_targets; + + /** + * @param keeperRegistryAddress The address of the keeper registry contract + * @param minWaitPeriodSeconds The minimum wait period for addresses between funding + */ + constructor(address keeperRegistryAddress, uint256 minWaitPeriodSeconds) ConfirmedOwner(msg.sender) { + setKeeperRegistryAddress(keeperRegistryAddress); + setMinWaitPeriodSeconds(minWaitPeriodSeconds); + } + + /** + * @notice Sets the list of addresses to watch and their funding parameters + * @param addresses the list of addresses to watch + * @param minBalancesWei the minimum balances for each address + * @param topUpAmountsWei the amount to top up each address + */ + function setWatchList( + address[] calldata addresses, + uint96[] calldata minBalancesWei, + uint96[] calldata topUpAmountsWei + ) external onlyOwner { + if (addresses.length != minBalancesWei.length || addresses.length != topUpAmountsWei.length) { + revert InvalidWatchList(); + } + address[] memory oldWatchList = s_watchList; + for (uint256 idx = 0; idx < oldWatchList.length; idx++) { + s_targets[oldWatchList[idx]].isActive = false; + } + for (uint256 idx = 0; idx < addresses.length; idx++) { + if (s_targets[addresses[idx]].isActive) { + revert DuplicateAddress(addresses[idx]); + } + if (addresses[idx] == address(0)) { + revert InvalidWatchList(); + } + if (topUpAmountsWei[idx] == 0) { + revert InvalidWatchList(); + } + s_targets[addresses[idx]] = Target({ + isActive: true, + minBalanceWei: minBalancesWei[idx], + topUpAmountWei: topUpAmountsWei[idx], + lastTopUpTimestamp: 0 + }); + } + s_watchList = addresses; + } + + /** + * @notice Gets a list of addresses that are under funded + * @return list of addresses that are underfunded + */ + function getUnderfundedAddresses() public view returns (address[] memory) { + address[] memory watchList = s_watchList; + address[] memory needsFunding = new address[](watchList.length); + uint256 count = 0; + uint256 minWaitPeriod = s_minWaitPeriodSeconds; + uint256 balance = address(this).balance; + Target memory target; + for (uint256 idx = 0; idx < watchList.length; idx++) { + target = s_targets[watchList[idx]]; + if ( + target.lastTopUpTimestamp + minWaitPeriod <= block.timestamp && + balance >= target.topUpAmountWei && + watchList[idx].balance < target.minBalanceWei + ) { + needsFunding[count] = watchList[idx]; + count++; + balance -= target.topUpAmountWei; + } + } + if (count != watchList.length) { + assembly { + mstore(needsFunding, count) + } + } + return needsFunding; + } + + /** + * @notice Send funds to the addresses provided + * @param needsFunding the list of addresses to fund (addresses must be pre-approved) + */ + function topUp(address[] memory needsFunding) public whenNotPaused { + uint256 minWaitPeriodSeconds = s_minWaitPeriodSeconds; + Target memory target; + for (uint256 idx = 0; idx < needsFunding.length; idx++) { + target = s_targets[needsFunding[idx]]; + if ( + target.isActive && + target.lastTopUpTimestamp + minWaitPeriodSeconds <= block.timestamp && + needsFunding[idx].balance < target.minBalanceWei + ) { + bool success = payable(needsFunding[idx]).send(target.topUpAmountWei); + if (success) { + s_targets[needsFunding[idx]].lastTopUpTimestamp = uint56(block.timestamp); + emit TopUpSucceeded(needsFunding[idx]); + } else { + emit TopUpFailed(needsFunding[idx]); + } + } + if (gasleft() < MIN_GAS_FOR_TRANSFER) { + return; + } + } + } + + /** + * @notice Get list of addresses that are underfunded and return keeper-compatible payload + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoded list of addresses that need funds + */ + function checkUpkeep( + bytes calldata + ) external view override whenNotPaused returns (bool upkeepNeeded, bytes memory performData) { + address[] memory needsFunding = getUnderfundedAddresses(); + upkeepNeeded = needsFunding.length > 0; + performData = abi.encode(needsFunding); + return (upkeepNeeded, performData); + } + + /** + * @notice Called by keeper to send funds to underfunded addresses + * @param performData The abi encoded list of addresses to fund + */ + function performUpkeep(bytes calldata performData) external override onlyKeeperRegistry whenNotPaused { + address[] memory needsFunding = abi.decode(performData, (address[])); + topUp(needsFunding); + } + + /** + * @notice Withdraws the contract balance + * @param amount The amount of eth (in wei) to withdraw + * @param payee The address to pay + */ + function withdraw(uint256 amount, address payable payee) external onlyOwner { + require(payee != address(0)); + emit FundsWithdrawn(amount, payee); + payee.transfer(amount); + } + + /** + * @notice Receive funds + */ + receive() external payable { + emit FundsAdded(msg.value, address(this).balance, msg.sender); + } + + /** + * @notice Sets the keeper registry address + */ + function setKeeperRegistryAddress(address keeperRegistryAddress) public onlyOwner { + require(keeperRegistryAddress != address(0)); + emit KeeperRegistryAddressUpdated(s_keeperRegistryAddress, keeperRegistryAddress); + s_keeperRegistryAddress = keeperRegistryAddress; + } + + /** + * @notice Sets the minimum wait period (in seconds) for addresses between funding + */ + function setMinWaitPeriodSeconds(uint256 period) public onlyOwner { + emit MinWaitPeriodUpdated(s_minWaitPeriodSeconds, period); + s_minWaitPeriodSeconds = period; + } + + /** + * @notice Gets the keeper registry address + */ + function getKeeperRegistryAddress() external view returns (address keeperRegistryAddress) { + return s_keeperRegistryAddress; + } + + /** + * @notice Gets the minimum wait period + */ + function getMinWaitPeriodSeconds() external view returns (uint256) { + return s_minWaitPeriodSeconds; + } + + /** + * @notice Gets the list of addresses being watched + */ + function getWatchList() external view returns (address[] memory) { + return s_watchList; + } + + /** + * @notice Gets configuration information for an address on the watchlist + */ + function getAccountInfo( + address targetAddress + ) external view returns (bool isActive, uint96 minBalanceWei, uint96 topUpAmountWei, uint56 lastTopUpTimestamp) { + Target memory target = s_targets[targetAddress]; + return (target.isActive, target.minBalanceWei, target.topUpAmountWei, target.lastTopUpTimestamp); + } + + /** + * @notice Pauses the contract, which prevents executing performUpkeep + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice Unpauses the contract + */ + function unpause() external onlyOwner { + _unpause(); + } + + modifier onlyKeeperRegistry() { + if (msg.sender != s_keeperRegistryAddress) { + revert OnlyKeeperRegistry(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol b/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol new file mode 100644 index 00000000..60973140 --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/LinkAvailableBalanceMonitor.sol @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.19; + +import {AutomationCompatibleInterface} from "../interfaces/AutomationCompatibleInterface.sol"; +import {AccessControl} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/access/AccessControl.sol"; +import {EnumerableMap} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableMap.sol"; +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol"; +import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {Pausable} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol"; + +interface IAggregatorProxy { + function aggregator() external view returns (address); +} + +interface ILinkAvailable { + function linkAvailableForPayment() external view returns (int256 availableBalance); +} + +/// @title The LinkAvailableBalanceMonitor contract. +/// @notice A keeper-compatible contract that monitors target contracts for balance from a custom +/// function linkAvailableForPayment() and funds them with PLI if it falls below a defined +/// threshold. Also supports aggregator proxy contracts monitoring which require fetching the actual +/// target contract through a predefined interface. +/// @dev with 30 addresses as the s_maxPerform, the measured max gas usage of performUpkeep is around 2M +/// therefore, we recommend an upkeep gas limit of 3M (this has a 33% margin of safety). Although, nothing +/// prevents us from using 5M gas and increasing s_maxPerform, 30 seems like a reasonable batch size that +/// is probably plenty for most needs. +/// @dev with 130 addresses as the s_maxCheck, the measured max gas usage of checkUpkeep is around 3.5M, +/// which is 30% below the 5M limit. +/// Note that testing conditions DO NOT match live chain gas usage, hence the margins. Change +/// at your own risk!!! +/// @dev some areas for improvement / acknowledgement of limitations: +/// validate that all addresses conform to interface when adding them to the watchlist +/// this is a "trusless" upkeep, meaning it does not trust the caller of performUpkeep; +/// we could save a fair amount of gas and re-write this upkeep for use with Automation v2.0+, +/// which has significantly different trust assumptions +contract LinkAvailableBalanceMonitor is AccessControl, AutomationCompatibleInterface, Pausable { + using EnumerableMap for EnumerableMap.UintToAddressMap; + using EnumerableSet for EnumerableSet.AddressSet; + + event BalanceUpdated(address indexed addr, uint256 oldBalance, uint256 newBalance); + event FundsWithdrawn(uint256 amountWithdrawn, address payee); + event UpkeepIntervalSet(uint256 oldUpkeepInterval, uint256 newUpkeepInterval); + event MaxCheckSet(uint256 oldMaxCheck, uint256 newMaxCheck); + event MaxPerformSet(uint256 oldMaxPerform, uint256 newMaxPerform); + event MinWaitPeriodSet(uint256 s_minWaitPeriodSeconds, uint256 minWaitPeriodSeconds); + event TopUpBlocked(address indexed topUpAddress); + event TopUpFailed(address indexed recipient); + event TopUpSucceeded(address indexed topUpAddress); + event TopUpUpdated(address indexed addr, uint256 oldTopUpAmount, uint256 newTopUpAmount); + event WatchlistUpdated(); + + error InvalidAddress(address target); + error InvalidMaxCheck(uint16 maxCheck); + error InvalixMaxPerform(uint16 maxPerform); + error InvalidMinBalance(uint96 minBalance); + error InvalidTopUpAmount(uint96 topUpAmount); + error InvalidUpkeepInterval(uint8 upkeepInterval); + error InvalidLinkTokenAddress(address lt); + error InvalidWatchList(); + error InvalidChainSelector(); + error DuplicateAddress(address duplicate); + + struct MonitoredAddress { + uint96 minBalance; + uint96 topUpAmount; + uint56 lastTopUpTimestamp; + bool isActive; + } + + bytes32 private constant ADMIN_ROLE = keccak256("ADMIN_ROLE"); + bytes32 private constant EXECUTOR_ROLE = keccak256("EXECUTOR_ROLE"); + uint96 private constant DEFAULT_TOP_UP_AMOUNT_JULES = 9000000000000000000; + uint96 private constant DEFAULT_MIN_BALANCE_JULES = 1000000000000000000; + IERC20 private immutable i_linkToken; + + uint256 private s_minWaitPeriodSeconds; + uint16 private s_maxPerform; + uint16 private s_maxCheck; + uint8 private s_upkeepInterval; + + /// @notice s_watchList contains all the addresses watched by this monitor + /// @dev It mainly provides the length() function + EnumerableSet.AddressSet private s_watchList; + + /// @notice s_targets contains all the addresses watched by this monitor + /// Each key points to a MonitoredAddress with all the needed metadata + mapping(address targetAddress => MonitoredAddress targetProperties) private s_targets; + + /// @notice s_onRampAddresses represents a list of CCIP onRamp addresses watched on this contract + /// There has to be only one onRamp per dstChainSelector. + /// dstChainSelector is needed as we have to track the live onRamp, and delete the onRamp + /// whenever a new one is deployed with the same dstChainSelector. + EnumerableMap.UintToAddressMap private s_onRampAddresses; + + /// @param admin is the administrator address of this contract + /// @param linkToken the PLI token address + /// @param minWaitPeriodSeconds represents the amount of time that has to wait a contract to be funded + /// @param maxPerform maximum amount of contracts to fund + /// @param maxCheck maximum amount of contracts to check + /// @param upkeepInterval randomizes the check for underfunded contracts + constructor( + address admin, + IERC20 linkToken, + uint256 minWaitPeriodSeconds, + uint16 maxPerform, + uint16 maxCheck, + uint8 upkeepInterval + ) { + _setRoleAdmin(ADMIN_ROLE, ADMIN_ROLE); + _setRoleAdmin(EXECUTOR_ROLE, ADMIN_ROLE); + _grantRole(ADMIN_ROLE, admin); + i_linkToken = linkToken; + setMinWaitPeriodSeconds(minWaitPeriodSeconds); + setMaxPerform(maxPerform); + setMaxCheck(maxCheck); + setUpkeepInterval(upkeepInterval); + } + + /// @notice Sets the list of subscriptions to watch and their funding parameters + /// @param addresses the list of target addresses to watch (could be direct target or IAggregatorProxy) + /// @param minBalances the list of corresponding minBalance for the target address + /// @param topUpAmounts the list of corresponding minTopUp for the target address + function setWatchList( + address[] calldata addresses, + uint96[] calldata minBalances, + uint96[] calldata topUpAmounts, + uint64[] calldata dstChainSelectors + ) external onlyAdminOrExecutor { + if ( + addresses.length != minBalances.length || + addresses.length != topUpAmounts.length || + addresses.length != dstChainSelectors.length + ) { + revert InvalidWatchList(); + } + for (uint256 idx = s_watchList.length(); idx > 0; idx--) { + address member = s_watchList.at(idx - 1); + s_watchList.remove(member); + delete s_targets[member]; + } + // s_onRampAddresses is not the same length as s_watchList, so it has + // to be clean in a separate loop + for (uint256 idx = 0; idx < s_onRampAddresses.length(); idx++) { + (uint256 key, ) = s_onRampAddresses.at(idx); + s_onRampAddresses.remove(key); + } + for (uint256 idx = 0; idx < addresses.length; idx++) { + address targetAddress = addresses[idx]; + if (s_targets[targetAddress].isActive) revert DuplicateAddress(targetAddress); + if (targetAddress == address(0)) revert InvalidWatchList(); + if (topUpAmounts[idx] == 0) revert InvalidWatchList(); + s_targets[targetAddress] = MonitoredAddress({ + isActive: true, + minBalance: minBalances[idx], + topUpAmount: topUpAmounts[idx], + lastTopUpTimestamp: 0 + }); + if (dstChainSelectors[idx] > 0) { + s_onRampAddresses.set(dstChainSelectors[idx], targetAddress); + } + s_watchList.add(targetAddress); + } + emit WatchlistUpdated(); + } + + /// @notice Adds a new address to the watchlist + /// @param targetAddress the address to be added to the watchlist + /// @param dstChainSelector carries a non-zero value in case the targetAddress is an onRamp, otherwise it carries a 0 + /// @dev this function has to be compatible with the event onRampSet(address, dstChainSelector) emitted by + /// the CCIP router. Important detail to know is this event is also emitted when an onRamp is decomissioned, + /// in which case it will carry the proper dstChainSelector along with the 0x0 address + function addToWatchListOrDecomission(address targetAddress, uint64 dstChainSelector) public onlyAdminOrExecutor { + if (s_targets[targetAddress].isActive) revert DuplicateAddress(targetAddress); + bool onRampExists = s_onRampAddresses.contains(dstChainSelector); + // if targetAddress is an existing onRamp, there's a need of cleaning the previous onRamp associated to this dstChainSelector + // there's no need to remove any other address that's not an onRamp + if (dstChainSelector > 0 && onRampExists) { + address oldAddress = s_onRampAddresses.get(dstChainSelector); + removeFromWatchList(oldAddress); + } + // only add the new address if it's not 0x0 + if (targetAddress != address(0)) { + s_onRampAddresses.set(dstChainSelector, targetAddress); + s_targets[targetAddress] = MonitoredAddress({ + isActive: true, + minBalance: DEFAULT_MIN_BALANCE_JULES, + topUpAmount: DEFAULT_TOP_UP_AMOUNT_JULES, + lastTopUpTimestamp: 0 + }); + s_watchList.add(targetAddress); + } else { + // if the address is 0x0, it means the onRamp has ben decomissioned and has to be cleaned + s_onRampAddresses.remove(dstChainSelector); + } + } + + /// @notice Delete an address from the watchlist and sets the target to inactive + /// @param targetAddress the address to be deleted + function removeFromWatchList(address targetAddress) public onlyAdminOrExecutor returns (bool) { + if (s_watchList.remove(targetAddress)) { + delete s_targets[targetAddress]; + return true; + } + return false; + } + + /// @notice Gets a list of proxies that are underfunded, up to the s_maxPerform size + /// @dev the function starts at a random index in the list to avoid biasing the first + /// addresses in the list over latter ones. + /// @dev the function will check at most s_maxCheck proxies in a single call + /// @dev the function returns a list with a max length of s_maxPerform + /// @return list of target addresses which are underfunded + function sampleUnderfundedAddresses() public view returns (address[] memory) { + uint16 maxPerform = s_maxPerform; + uint16 maxCheck = s_maxCheck; + uint256 numTargets = s_watchList.length(); + uint256 idx = uint256(blockhash(block.number - (block.number % s_upkeepInterval) - 1)) % numTargets; + uint256 numToCheck = numTargets < maxCheck ? numTargets : maxCheck; + uint256 numFound = 0; + address[] memory targetsToFund = new address[](maxPerform); + MonitoredAddress memory target; + for ( + uint256 numChecked = 0; + numChecked < numToCheck; + (idx, numChecked) = ((idx + 1) % numTargets, numChecked + 1) + ) { + address targetAddress = s_watchList.at(idx); + target = s_targets[targetAddress]; + if (_needsFunding(targetAddress, target.minBalance)) { + targetsToFund[numFound] = targetAddress; + numFound++; + if (numFound == maxPerform) { + break; // max number of addresses in batch reached + } + } + } + if (numFound != maxPerform) { + assembly { + mstore(targetsToFund, numFound) // resize array to number of valid targets + } + } + return targetsToFund; + } + + /// @notice tries to fund an array of target addresses, checking if they're underfunded in the process + /// @param targetAddresses is an array of contract addresses to be funded in case they're underfunded + function topUp(address[] memory targetAddresses) public whenNotPaused { + MonitoredAddress memory target; + uint256 localBalance = i_linkToken.balanceOf(address(this)); + for (uint256 idx = 0; idx < targetAddresses.length; idx++) { + address targetAddress = targetAddresses[idx]; + target = s_targets[targetAddress]; + if (localBalance >= target.topUpAmount && _needsFunding(targetAddress, target.minBalance)) { + bool success = i_linkToken.transfer(targetAddress, target.topUpAmount); + if (success) { + localBalance -= target.topUpAmount; + s_targets[targetAddress].lastTopUpTimestamp = uint56(block.timestamp); + emit TopUpSucceeded(targetAddress); + } else { + emit TopUpFailed(targetAddress); + } + } else { + emit TopUpBlocked(targetAddress); + } + } + } + + /// @notice checks the target (could be direct target or IAggregatorProxy), and determines + /// if it is elligible for funding + /// @param targetAddress the target to check + /// @param minBalance minimum balance required for the target + /// @return bool whether the target needs funding or not + function _needsFunding(address targetAddress, uint256 minBalance) private view returns (bool) { + // Explicitly check if the targetAddress is the zero address + // or if it's not a contract. In both cases return with false, + // to prevent target.linkAvailableForPayment from running, + // which would revert the operation. + if (targetAddress == address(0) || targetAddress.code.length == 0) { + return false; + } + MonitoredAddress memory addressToCheck = s_targets[targetAddress]; + ILinkAvailable target; + IAggregatorProxy proxy = IAggregatorProxy(targetAddress); + try proxy.aggregator() returns (address aggregatorAddress) { + if (aggregatorAddress == address(0)) return false; + target = ILinkAvailable(aggregatorAddress); + } catch { + target = ILinkAvailable(targetAddress); + } + try target.linkAvailableForPayment() returns (int256 balance) { + if ( + balance < int256(minBalance) && + addressToCheck.lastTopUpTimestamp + s_minWaitPeriodSeconds <= block.timestamp && + addressToCheck.isActive + ) { + return true; + } + } catch {} + return false; + } + + /// @notice Gets list of subscription ids that are underfunded and returns a keeper-compatible payload. + /// @return upkeepNeeded signals if upkeep is needed + /// @return performData is an abi encoded list of subscription ids that need funds + function checkUpkeep( + bytes calldata + ) external view override whenNotPaused returns (bool upkeepNeeded, bytes memory performData) { + address[] memory needsFunding = sampleUnderfundedAddresses(); + upkeepNeeded = needsFunding.length > 0; + performData = abi.encode(needsFunding); + return (upkeepNeeded, performData); + } + + /// @notice Called by the keeper to send funds to underfunded addresses. + /// @param performData the abi encoded list of addresses to fund + function performUpkeep(bytes calldata performData) external override { + address[] memory needsFunding = abi.decode(performData, (address[])); + topUp(needsFunding); + } + + /// @notice Withdraws the contract balance in the PLI token. + /// @param amount the amount of the PLI to withdraw + /// @param payee the address to pay + function withdraw(uint256 amount, address payable payee) external onlyAdminOrExecutor { + if (payee == address(0)) revert InvalidAddress(payee); + i_linkToken.transfer(payee, amount); + emit FundsWithdrawn(amount, payee); + } + + /// @notice Sets the minimum balance for the given target address + function setMinBalance(address target, uint96 minBalance) external onlyRole(ADMIN_ROLE) { + if (target == address(0)) revert InvalidAddress(target); + if (minBalance == 0) revert InvalidMinBalance(minBalance); + if (!s_targets[target].isActive) revert InvalidWatchList(); + uint256 oldBalance = s_targets[target].minBalance; + s_targets[target].minBalance = minBalance; + emit BalanceUpdated(target, oldBalance, minBalance); + } + + /// @notice Sets the minimum balance for the given target address + function setTopUpAmount(address target, uint96 topUpAmount) external onlyRole(ADMIN_ROLE) { + if (target == address(0)) revert InvalidAddress(target); + if (topUpAmount == 0) revert InvalidTopUpAmount(topUpAmount); + if (!s_targets[target].isActive) revert InvalidWatchList(); + uint256 oldTopUpAmount = s_targets[target].topUpAmount; + s_targets[target].topUpAmount = topUpAmount; + emit BalanceUpdated(target, oldTopUpAmount, topUpAmount); + } + + /// @notice Update s_maxPerform + function setMaxPerform(uint16 maxPerform) public onlyRole(ADMIN_ROLE) { + emit MaxPerformSet(s_maxPerform, maxPerform); + s_maxPerform = maxPerform; + } + + /// @notice Update s_maxCheck + function setMaxCheck(uint16 maxCheck) public onlyRole(ADMIN_ROLE) { + emit MaxCheckSet(s_maxCheck, maxCheck); + s_maxCheck = maxCheck; + } + + /// @notice Sets the minimum wait period (in seconds) for addresses between funding + function setMinWaitPeriodSeconds(uint256 minWaitPeriodSeconds) public onlyRole(ADMIN_ROLE) { + emit MinWaitPeriodSet(s_minWaitPeriodSeconds, minWaitPeriodSeconds); + s_minWaitPeriodSeconds = minWaitPeriodSeconds; + } + + /// @notice Update s_upkeepInterval + function setUpkeepInterval(uint8 upkeepInterval) public onlyRole(ADMIN_ROLE) { + if (upkeepInterval > 255) revert InvalidUpkeepInterval(upkeepInterval); + emit UpkeepIntervalSet(s_upkeepInterval, upkeepInterval); + s_upkeepInterval = upkeepInterval; + } + + /// @notice Gets maxPerform + function getMaxPerform() external view returns (uint16) { + return s_maxPerform; + } + + /// @notice Gets maxCheck + function getMaxCheck() external view returns (uint16) { + return s_maxCheck; + } + + /// @notice Gets the minimum wait period + function getMinWaitPeriodSeconds() external view returns (uint256) { + return s_minWaitPeriodSeconds; + } + + /// @notice Gets upkeepInterval + function getUpkeepInterval() external view returns (uint8) { + return s_upkeepInterval; + } + + /// @notice Gets the list of subscription ids being watched + function getWatchList() external view returns (address[] memory) { + return s_watchList.values(); + } + + /// @notice Gets the onRamp address with the specified dstChainSelector + function getOnRampAddressAtChainSelector(uint64 dstChainSelector) external view returns (address) { + if (dstChainSelector == 0) revert InvalidChainSelector(); + return s_onRampAddresses.get(dstChainSelector); + } + + /// @notice Gets configuration information for an address on the watchlist + function getAccountInfo( + address targetAddress + ) external view returns (bool isActive, uint256 minBalance, uint256 topUpAmount) { + MonitoredAddress memory target = s_targets[targetAddress]; + return (target.isActive, target.minBalance, target.topUpAmount); + } + + /// @dev Modifier to make a function callable only by executor role or the + /// admin role. + modifier onlyAdminOrExecutor() { + address sender = _msgSender(); + if (!hasRole(ADMIN_ROLE, sender)) { + _checkRole(EXECUTOR_ROLE, sender); + } + _; + } + + /// @notice Pause the contract, which prevents executing performUpkeep + function pause() external onlyRole(ADMIN_ROLE) { + _pause(); + } + + /// @notice Unpause the contract + function unpause() external onlyRole(ADMIN_ROLE) { + _unpause(); + } +} diff --git a/contracts/src/v0.8/automation/upkeeps/UpkeepBalanceMonitor.sol b/contracts/src/v0.8/automation/upkeeps/UpkeepBalanceMonitor.sol new file mode 100644 index 00000000..466b367f --- /dev/null +++ b/contracts/src/v0.8/automation/upkeeps/UpkeepBalanceMonitor.sol @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {IAutomationRegistryConsumer} from "../interfaces/IAutomationRegistryConsumer.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {Pausable} from "@openzeppelin/contracts/security/Pausable.sol"; +import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; + +/// @title The UpkeepBalanceMonitor contract +/// @notice A keeper-compatible contract that monitors and funds Plugin Automation upkeeps. +contract UpkeepBalanceMonitor is ConfirmedOwner, Pausable { + using EnumerableSet for EnumerableSet.AddressSet; + + event ConfigSet(Config config); + event ForwarderSet(address forwarderAddress); + event FundsWithdrawn(uint256 amountWithdrawn, address payee); + event TopUpFailed(uint256 indexed upkeepId); + event TopUpSucceeded(uint256 indexed upkeepId, uint96 amount); + event WatchListSet(address registryAddress); + + error InvalidConfig(); + error InvalidTopUpData(); + error OnlyForwarderOrOwner(); + + /// @member maxBatchSize is the maximum number of upkeeps to fund in a single transaction + /// @member minPercentage is the percentage of the upkeep's minBalance at which top-up occurs + /// @member targetPercentage is the percentage of the upkeep's minBalance to top-up to + /// @member maxTopUpAmount is the maximum amount of PLI to top-up an upkeep with + struct Config { + uint8 maxBatchSize; + uint24 minPercentage; + uint24 targetPercentage; + uint96 maxTopUpAmount; + } + + // ================================================================ + // | STORAGE | + // ================================================================ + + LinkTokenInterface private immutable PLI_TOKEN; + + mapping(address => uint256[]) s_registryWatchLists; + EnumerableSet.AddressSet s_registries; + Config private s_config; + address private s_forwarderAddress; + + // ================================================================ + // | CONSTRUCTOR | + // ================================================================ + + /// @param linkToken the Link token address + /// @param config the initial config for the contract + constructor(LinkTokenInterface linkToken, Config memory config) ConfirmedOwner(msg.sender) { + require(address(linkToken) != address(0)); + PLI_TOKEN = linkToken; + setConfig(config); + } + + // ================================================================ + // | CORE FUNCTIONALITY | + // ================================================================ + + /// @notice Gets a list of upkeeps that are underfunded + /// @return needsFunding list of underfunded upkeepIDs + /// @return registryAddresses list of registries that the upkeepIDs belong to + /// @return topUpAmounts amount to top up each upkeep + function getUnderfundedUpkeeps() public view returns (uint256[] memory, address[] memory, uint96[] memory) { + Config memory config = s_config; + uint256[] memory needsFunding = new uint256[](config.maxBatchSize); + address[] memory registryAddresses = new address[](config.maxBatchSize); + uint96[] memory topUpAmounts = new uint96[](config.maxBatchSize); + uint256 availableFunds = PLI_TOKEN.balanceOf(address(this)); + uint256 count; + for (uint256 i = 0; i < s_registries.length(); i++) { + IAutomationRegistryConsumer registry = IAutomationRegistryConsumer(s_registries.at(i)); + for (uint256 j = 0; j < s_registryWatchLists[address(registry)].length; j++) { + uint256 upkeepID = s_registryWatchLists[address(registry)][j]; + uint96 upkeepBalance = registry.getBalance(upkeepID); + uint96 minBalance = registry.getMinBalance(upkeepID); + uint96 topUpThreshold = (minBalance * config.minPercentage) / 100; + uint96 topUpAmount = ((minBalance * config.targetPercentage) / 100) - upkeepBalance; + if (topUpAmount > config.maxTopUpAmount) { + topUpAmount = config.maxTopUpAmount; + } + if (upkeepBalance <= topUpThreshold && availableFunds >= topUpAmount) { + needsFunding[count] = upkeepID; + topUpAmounts[count] = topUpAmount; + registryAddresses[count] = address(registry); + count++; + availableFunds -= topUpAmount; + } + if (count == config.maxBatchSize) { + break; + } + } + if (count == config.maxBatchSize) { + break; + } + } + if (count < config.maxBatchSize) { + assembly { + mstore(needsFunding, count) + mstore(registryAddresses, count) + mstore(topUpAmounts, count) + } + } + return (needsFunding, registryAddresses, topUpAmounts); + } + + /// @notice Called by the keeper/owner to send funds to underfunded upkeeps + /// @param upkeepIDs the list of upkeep ids to fund + /// @param registryAddresses the list of registries that the upkeepIDs belong to + /// @param topUpAmounts the list of amounts to fund each upkeep with + /// @dev We explicitly choose not to verify that input upkeepIDs are included in the watchlist. We also + /// explicity permit any amount to be sent via topUpAmounts; it does not have to meet the criteria + /// specified in getUnderfundedUpkeeps(). Here, we are relying on the security of automation's OCR to + /// secure the output of getUnderfundedUpkeeps() as the input to topUp(), and we are treating the owner + /// as a privileged user that can perform arbitrary top-ups to any upkeepID. + function topUp( + uint256[] memory upkeepIDs, + address[] memory registryAddresses, + uint96[] memory topUpAmounts + ) public whenNotPaused { + if (msg.sender != address(s_forwarderAddress) && msg.sender != owner()) revert OnlyForwarderOrOwner(); + if (upkeepIDs.length != registryAddresses.length || upkeepIDs.length != topUpAmounts.length) + revert InvalidTopUpData(); + for (uint256 i = 0; i < upkeepIDs.length; i++) { + try PLI_TOKEN.transferAndCall(registryAddresses[i], topUpAmounts[i], abi.encode(upkeepIDs[i])) returns ( + bool success + ) { + if (success) { + emit TopUpSucceeded(upkeepIDs[i], topUpAmounts[i]); + continue; + } + } catch {} + emit TopUpFailed(upkeepIDs[i]); + } + } + + // ================================================================ + // | AUTOMATION COMPATIBLE | + // ================================================================ + + /// @notice Gets list of upkeeps ids that are underfunded and returns a keeper-compatible payload. + /// @return upkeepNeeded signals if upkeep is needed, performData is an abi encoded list of subscription ids that need funds + function checkUpkeep(bytes calldata) external view returns (bool upkeepNeeded, bytes memory performData) { + ( + uint256[] memory needsFunding, + address[] memory registryAddresses, + uint96[] memory topUpAmounts + ) = getUnderfundedUpkeeps(); + upkeepNeeded = needsFunding.length > 0; + if (upkeepNeeded) { + performData = abi.encode(needsFunding, registryAddresses, topUpAmounts); + } + return (upkeepNeeded, performData); + } + + /// @notice Called by the keeper to send funds to underfunded addresses. + /// @param performData the abi encoded list of addresses to fund + function performUpkeep(bytes calldata performData) external { + (uint256[] memory upkeepIDs, address[] memory registryAddresses, uint96[] memory topUpAmounts) = abi.decode( + performData, + (uint256[], address[], uint96[]) + ); + topUp(upkeepIDs, registryAddresses, topUpAmounts); + } + + // ================================================================ + // | ADMIN | + // ================================================================ + + /// @notice Withdraws the contract balance in PLI. + /// @param amount the amount of PLI (in juels) to withdraw + /// @param payee the address to pay + function withdraw(uint256 amount, address payee) external onlyOwner { + require(payee != address(0)); + PLI_TOKEN.transfer(payee, amount); + emit FundsWithdrawn(amount, payee); + } + + /// @notice Pause the contract, which prevents executing performUpkeep. + function pause() external onlyOwner { + _pause(); + } + + /// @notice Unpause the contract. + function unpause() external onlyOwner { + _unpause(); + } + + // ================================================================ + // | SETTERS | + // ================================================================ + + /// @notice Sets the list of upkeeps to watch + /// @param registryAddress the registry that this watchlist applies to + /// @param watchlist the list of UpkeepIDs to watch + function setWatchList(address registryAddress, uint256[] calldata watchlist) external onlyOwner { + if (watchlist.length == 0) { + s_registries.remove(registryAddress); + delete s_registryWatchLists[registryAddress]; + } else { + s_registries.add(registryAddress); + s_registryWatchLists[registryAddress] = watchlist; + } + emit WatchListSet(registryAddress); + } + + /// @notice Sets the contract config + /// @param config the new config + function setConfig(Config memory config) public onlyOwner { + if ( + config.maxBatchSize == 0 || + config.minPercentage < 100 || + config.targetPercentage <= config.minPercentage || + config.maxTopUpAmount == 0 + ) { + revert InvalidConfig(); + } + s_config = config; + emit ConfigSet(config); + } + + /// @notice Sets the upkeep's forwarder contract + /// @param forwarderAddress the new forwarder + /// @dev this should only need to be called once, after registering the contract with the registry + function setForwarder(address forwarderAddress) external onlyOwner { + s_forwarderAddress = forwarderAddress; + emit ForwarderSet(forwarderAddress); + } + + // ================================================================ + // | GETTERS | + // ================================================================ + + /// @notice Gets the list of upkeeps ids being monitored + function getWatchList() external view returns (address[] memory, uint256[][] memory) { + address[] memory registryAddresses = s_registries.values(); + uint256[][] memory upkeepIDs = new uint256[][](registryAddresses.length); + for (uint256 i = 0; i < registryAddresses.length; i++) { + upkeepIDs[i] = s_registryWatchLists[registryAddresses[i]]; + } + return (registryAddresses, upkeepIDs); + } + + /// @notice Gets the contract config + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice Gets the upkeep's forwarder contract + function getForwarder() external view returns (address) { + return s_forwarderAddress; + } +} diff --git a/contracts/src/v0.8/automation/v1_2/KeeperRegistrar1_2.sol b/contracts/src/v0.8/automation/v1_2/KeeperRegistrar1_2.sol new file mode 100644 index 00000000..b592de87 --- /dev/null +++ b/contracts/src/v0.8/automation/v1_2/KeeperRegistrar1_2.sol @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "../interfaces/v1_2/KeeperRegistryInterface1_2.sol"; +import "../../interfaces/TypeAndVersionInterface.sol"; +import "../../shared/interfaces/LinkTokenInterface.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import "../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract KeeperRegistrar is TypeAndVersionInterface, ConfirmedOwner, IERC677Receiver { + /** + * DISABLED: No auto approvals, all new upkeeps should be approved manually. + * ENABLED_SENDER_ALLOWLIST: Auto approvals for allowed senders subject to max allowed. Manual for rest. + * ENABLED_ALL: Auto approvals for all new upkeeps subject to max allowed. + */ + enum AutoApproveType { + DISABLED, + ENABLED_SENDER_ALLOWLIST, + ENABLED_ALL + } + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + mapping(bytes32 => PendingRequest) private s_pendingRequests; + + LinkTokenInterface public immutable PLI; + + /** + * @notice versions: + * - KeeperRegistrar 1.1.0: Add functionality for sender allowlist in auto approve + * : Remove rate limit and add max allowed for auto approve + * - KeeperRegistrar 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistrar 1.1.0"; + + struct Config { + AutoApproveType autoApproveConfigType; + uint32 autoApproveMaxAllowed; + uint32 approvedCount; + KeeperRegistryBaseInterface keeperRegistry; + uint96 minPLIJuels; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + Config private s_config; + // Only applicable if s_config.configType is ENABLED_SENDER_ALLOWLIST + mapping(address => bool) private s_autoApproveAllowedSenders; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes checkData, + uint96 amount, + uint8 indexed source + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event AutoApproveAllowedSenderSet(address indexed senderAddress, bool allowed); + + event ConfigChanged( + AutoApproveType autoApproveConfigType, + uint32 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ); + + error InvalidAdminAddress(); + error RequestNotFound(); + error HashMismatch(); + error OnlyAdminOrOwner(); + error InsufficientPayment(); + error RegistrationRequestFailed(); + error OnlyLink(); + error AmountMismatch(); + error SenderMismatch(); + error FunctionNotPermitted(); + error LinkTransferFailed(address to); + error InvalidDataLength(); + + /* + * @param PLIAddress Address of Link token + * @param autoApproveConfigType setting for auto-approve registrations + * @param autoApproveMaxAllowed max number of registrations that can be auto approved + * @param keeperRegistry keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + constructor( + address PLIAddress, + AutoApproveType autoApproveConfigType, + uint16 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(PLIAddress); + setRegistrationConfig(autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels); + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on PLI contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param amount quantity of PLI upkeep is funded with (specified in Juels) + * @param source application sending this request + * @param sender address of the sender making the request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + uint8 source, + address sender + ) external onlyPLI { + if (adminAddress == address(0)) { + revert InvalidAdminAddress(); + } + bytes32 hash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + + emit RegistrationRequested( + hash, + name, + encryptedEmail, + upkeepContract, + gasLimit, + adminAddress, + checkData, + amount, + source + ); + + Config memory config = s_config; + if (_shouldAutoApprove(config, sender)) { + s_config.approvedCount = config.approvedCount + 1; + + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, amount, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance + amount; + s_pendingRequests[hash] = PendingRequest({admin: adminAddress, balance: newBalance}); + } + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + if (request.admin == address(0)) { + revert RequestNotFound(); + } + bytes32 expectedHash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData)); + if (hash != expectedHash) { + revert HashMismatch(); + } + delete s_pendingRequests[hash]; + _approve(name, upkeepContract, gasLimit, adminAddress, checkData, request.balance, hash); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the msg.sender + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + if (!(msg.sender == request.admin || msg.sender == owner())) { + revert OnlyAdminOrOwner(); + } + if (request.admin == address(0)) { + revert RequestNotFound(); + } + delete s_pendingRequests[hash]; + bool success = PLI.transfer(msg.sender, request.balance); + if (!success) { + revert LinkTransferFailed(msg.sender); + } + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set if registration requests should be sent directly to the Keeper Registry + * @param autoApproveConfigType setting for auto-approve registrations + * note: autoApproveAllowedSenders list persists across config changes irrespective of type + * @param autoApproveMaxAllowed max number of registrations that can be auto approved + * @param keeperRegistry new keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + function setRegistrationConfig( + AutoApproveType autoApproveConfigType, + uint16 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ) public onlyOwner { + uint32 approvedCount = s_config.approvedCount; + s_config = Config({ + autoApproveConfigType: autoApproveConfigType, + autoApproveMaxAllowed: autoApproveMaxAllowed, + approvedCount: approvedCount, + minPLIJuels: minPLIJuels, + keeperRegistry: KeeperRegistryBaseInterface(keeperRegistry) + }); + + emit ConfigChanged(autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels); + } + + /** + * @notice owner calls this function to set allowlist status for senderAddress + * @param senderAddress senderAddress to set the allowlist status for + * @param allowed true if senderAddress needs to be added to allowlist, false if needs to be removed + */ + function setAutoApproveAllowedSender(address senderAddress, bool allowed) external onlyOwner { + s_autoApproveAllowedSenders[senderAddress] = allowed; + + emit AutoApproveAllowedSenderSet(senderAddress, allowed); + } + + /** + * @notice read the allowlist status of senderAddress + * @param senderAddress address to read the allowlist status for + */ + function getAutoApproveAllowedSender(address senderAddress) external view returns (bool) { + return s_autoApproveAllowedSenders[senderAddress]; + } + + /** + * @notice read the current registration configuration + */ + function getRegistrationConfig() + external + view + returns ( + AutoApproveType autoApproveConfigType, + uint32 autoApproveMaxAllowed, + uint32 approvedCount, + address keeperRegistry, + uint256 minPLIJuels + ) + { + Config memory config = s_config; + return ( + config.autoApproveConfigType, + config.autoApproveMaxAllowed, + config.approvedCount, + address(config.keeperRegistry), + config.minPLIJuels + ); + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param sender Address of the sender transfering PLI + * @param amount Amount of PLI sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes calldata data + ) + external + override + onlyPLI + permittedFunctionsForPLI(data) + isActualAmount(amount, data) + isActualSender(sender, data) + { + if (data.length < 292) revert InvalidDataLength(); + if (amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + (bool success, ) = address(this).delegatecall(data); + // calls register + if (!success) { + revert RegistrationRequestFailed(); + } + } + + //PRIVATE + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function _approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + uint96 amount, + bytes32 hash + ) private { + KeeperRegistryBaseInterface keeperRegistry = s_config.keeperRegistry; + + // register upkeep + uint256 upkeepId = keeperRegistry.registerUpkeep(upkeepContract, gasLimit, adminAddress, checkData); + // fund upkeep + bool success = PLI.transferAndCall(address(keeperRegistry), amount, abi.encode(upkeepId)); + if (!success) { + revert LinkTransferFailed(address(keeperRegistry)); + } + + emit RegistrationApproved(hash, name, upkeepId); + } + + /** + * @dev verify sender allowlist if needed and check max limit + */ + function _shouldAutoApprove(Config memory config, address sender) private returns (bool) { + if (config.autoApproveConfigType == AutoApproveType.DISABLED) { + return false; + } + if ( + config.autoApproveConfigType == AutoApproveType.ENABLED_SENDER_ALLOWLIST && (!s_autoApproveAllowedSenders[sender]) + ) { + return false; + } + if (config.approvedCount < config.autoApproveMaxAllowed) { + return true; + } + return false; + } + + //MODIFIERS + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + if (msg.sender != address(PLI)) { + revert OnlyLink(); + } + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) // First 32 bytes contain length of data + } + if (funcSelector != REGISTER_REQUEST_SELECTOR) { + revert FunctionNotPermitted(); + } + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes memory data) { + uint256 actual; + assembly { + actual := mload(add(data, 228)) + } + if (expected != actual) { + revert AmountMismatch(); + } + _; + } + + /** + * @dev Reverts if the actual sender address does not match the expected sender address + * @param expected address that should match the actual sender address + * @param data bytes + */ + modifier isActualSender(address expected, bytes memory data) { + address actual; + assembly { + actual := mload(add(data, 292)) + } + if (expected != actual) { + revert SenderMismatch(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol b/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol new file mode 100644 index 00000000..f66ee969 --- /dev/null +++ b/contracts/src/v0.8/automation/v1_2/KeeperRegistry1_2.sol @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import "@openzeppelin/contracts/utils/Address.sol"; +import "@openzeppelin/contracts/security/Pausable.sol"; +import "@openzeppelin/contracts/security/ReentrancyGuard.sol"; +import "../KeeperBase.sol"; +import "../../interfaces/TypeAndVersionInterface.sol"; +import "../../shared/interfaces/AggregatorV3Interface.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../interfaces/v1_2/KeeperRegistryInterface1_2.sol"; +import "../interfaces/MigratableKeeperRegistryInterface.sol"; +import "../interfaces/UpkeepTranscoderInterface.sol"; +import "../../shared/interfaces/IERC677Receiver.sol"; +import "../../shared/interfaces/LinkTokenInterface.sol"; +import "../../shared/access/ConfirmedOwner.sol"; + +struct Upkeep { + uint96 balance; + address lastKeeper; // 1 storage slot full + uint32 executeGas; + uint64 maxValidBlocknumber; + address target; // 2 storage slots full + uint96 amountSpent; + address admin; // 3 storage slots full +} + +/** + * @notice Registry for adding work for Plugin Keepers to perform on client + * contracts. Clients must support the Upkeep interface. + */ +contract KeeperRegistry1_2 is + TypeAndVersionInterface, + ConfirmedOwner, + KeeperBase, + ReentrancyGuard, + Pausable, + KeeperRegistryExecutableInterface, + MigratableKeeperRegistryInterface, + IERC677Receiver +{ + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + + address private constant ZERO_ADDRESS = address(0); + address private constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 private constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 private constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + uint256 private constant PERFORM_GAS_MIN = 2_300; + uint256 private constant CANCELATION_DELAY = 50; + uint256 private constant PERFORM_GAS_CUSHION = 5_000; + uint256 private constant REGISTRY_GAS_OVERHEAD = 80_000; + uint256 private constant PPB_BASE = 1_000_000_000; + uint64 private constant UINT64_MAX = 2 ** 64 - 1; + uint96 private constant PLI_TOTAL_SUPPLY = 1e27; + + address[] private s_keeperList; + EnumerableSet.UintSet private s_upkeepIDs; + mapping(uint256 => Upkeep) private s_upkeep; + mapping(address => KeeperInfo) private s_keeperInfo; + mapping(address => address) private s_proposedPayee; + mapping(uint256 => bytes) private s_checkData; + mapping(address => MigrationPermission) private s_peerRegistryMigrationPermission; + Storage private s_storage; + uint256 private s_fallbackGasPrice; // not in config object for gas savings + uint256 private s_fallbackLinkPrice; // not in config object for gas savings + uint96 private s_ownerLinkBalance; + uint256 private s_expectedLinkBalance; + address private s_transcoder; + address private s_registrar; + + LinkTokenInterface public immutable PLI; + AggregatorV3Interface public immutable PLI_ETH_FEED; + AggregatorV3Interface public immutable FAST_GAS_FEED; + + /** + * @notice versions: + * - KeeperRegistry 1.2.0: allow funding within performUpkeep + * : allow configurable registry maxPerformGas + * : add function to let admin change upkeep gas limit + * : add minUpkeepSpend requirement + : upgrade to solidity v0.8 + * - KeeperRegistry 1.1.0: added flatFeeMicroLink + * - KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistry 1.2.0"; + + error CannotCancel(); + error UpkeepNotActive(); + error MigrationNotPermitted(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error NotAContract(); + error PaymentGreaterThanAllPLI(); + error OnlyActiveKeepers(); + error InsufficientFunds(); + error KeepersMustTakeTurns(); + error ParameterLengthError(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByPLIToken(); + error InvalidPayee(); + error DuplicateEntry(); + error ValueNotChanged(); + error IndexOutOfRange(); + error TranscoderNotSet(); + error ArrayHasNoEntries(); + error GasLimitOutsideRange(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedPayee(); + error GasLimitCanOnlyIncrease(); + error OnlyCallableByAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error InvalidRecipient(); + error InvalidDataLength(); + error TargetCheckReverted(bytes reason); + + enum MigrationPermission { + NONE, + OUTGOING, + INCOMING, + BIDIRECTIONAL + } + + /** + * @notice storage of the registry, contains a mix of config and state data + */ + struct Storage { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; // 1 evm word + uint32 maxPerformGas; + uint32 nonce; // 2 evm words + } + + struct KeeperInfo { + address payee; + uint96 balance; + bool active; + } + + struct PerformParams { + address from; + uint256 id; + bytes performData; + uint256 maxLinkPayment; + uint256 gasLimit; + uint256 adjustedGasWei; + uint256 linkEth; + } + + event UpkeepRegistered(uint256 indexed id, uint32 executeGas, address admin); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + address indexed from, + uint96 payment, + bytes performData + ); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event OwnerFundsWithdrawn(uint96 amount); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event ConfigSet(Config config); + event KeepersUpdated(address[] keepers, address[] payees); + event PaymentWithdrawn(address indexed keeper, uint256 indexed amount, address indexed to, address payee); + event PayeeshipTransferRequested(address indexed keeper, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed keeper, address indexed from, address indexed to); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + + /** + * @param link address of the PLI Token + * @param linkEthFeed address of the PLI/ETH price feed + * @param fastGasFeed address of the Fast Gas price feed + * @param config registry config settings + */ + constructor(address link, address linkEthFeed, address fastGasFeed, Config memory config) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(link); + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + FAST_GAS_FEED = AggregatorV3Interface(fastGasFeed); + setConfig(config); + } + + // ACTIONS + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external override onlyOwnerOrRegistrar returns (uint256 id) { + id = uint256(keccak256(abi.encodePacked(blockhash(block.number - 1), address(this), s_storage.nonce))); + _createUpkeep(id, target, gasLimit, admin, 0, checkData); + s_storage.nonce++; + emit UpkeepRegistered(id, gasLimit, admin); + return id; + } + + /** + * @notice simulated by keepers via eth_call to see if the upkeep needs to be + * performed. If upkeep is needed, the call then simulates performUpkeep + * to make sure it succeeds. Finally, it returns the success status along with + * payment information and the perform data payload. + * @param id identifier of the upkeep to check + * @param from the address to simulate performing the upkeep from + */ + function checkUpkeep( + uint256 id, + address from + ) + external + override + cannotExecute + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) + { + Upkeep memory upkeep = s_upkeep[id]; + + bytes memory callData = abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[id]); + (bool success, bytes memory result) = upkeep.target.call{gas: s_storage.checkGasLimit}(callData); + + if (!success) revert TargetCheckReverted(result); + + (success, performData) = abi.decode(result, (bool, bytes)); + if (!success) revert UpkeepNotNeeded(); + + PerformParams memory params = _generatePerformParams(from, id, performData, false); + _prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + return (performData, params.maxLinkPayment, params.gasLimit, params.adjustedGasWei, params.linkEth); + } + + /** + * @notice executes the upkeep with the perform data returned from + * checkUpkeep, validates the keeper's permissions, and pays the keeper. + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + */ + function performUpkeep( + uint256 id, + bytes calldata performData + ) external override whenNotPaused returns (bool success) { + return _performUpkeepWithParams(_generatePerformParams(msg.sender, id, performData, true)); + } + + /** + * @notice prevent an upkeep from being performed in the future + * @param id upkeep to be canceled + */ + function cancelUpkeep(uint256 id) external override { + uint64 maxValid = s_upkeep[id].maxValidBlocknumber; + bool canceled = maxValid != UINT64_MAX; + bool isOwner = msg.sender == owner(); + + if (canceled && !(isOwner && maxValid > block.number)) revert CannotCancel(); + if (!isOwner && msg.sender != s_upkeep[id].admin) revert OnlyCallableByOwnerOrAdmin(); + + uint256 height = block.number; + if (!isOwner) { + height = height + CANCELATION_DELAY; + } + s_upkeep[id].maxValidBlocknumber = uint64(height); + s_upkeepIDs.remove(id); + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @notice adds PLI funding for an upkeep by transferring from the sender's + * PLI balance + * @param id upkeep to fund + * @param amount number of PLI to transfer + */ + function addFunds(uint256 id, uint96 amount) external override onlyActiveUpkeep(id) { + s_upkeep[id].balance = s_upkeep[id].balance + amount; + s_expectedLinkBalance = s_expectedLinkBalance + amount; + PLI.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override { + if (msg.sender != address(PLI)) revert OnlyCallableByPLIToken(); + if (data.length != 32) revert InvalidDataLength(); + uint256 id = abi.decode(data, (uint256)); + if (s_upkeep[id].maxValidBlocknumber != UINT64_MAX) revert UpkeepNotActive(); + + s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount); + s_expectedLinkBalance = s_expectedLinkBalance + amount; + + emit FundsAdded(id, sender, uint96(amount)); + } + + /** + * @notice removes funding from a canceled upkeep + * @param id upkeep to withdraw funds from + * @param to destination address for sending remaining funds + */ + function withdrawFunds(uint256 id, address to) external validRecipient(to) onlyUpkeepAdmin(id) { + if (s_upkeep[id].maxValidBlocknumber > block.number) revert UpkeepNotCanceled(); + + uint96 minUpkeepSpend = s_storage.minUpkeepSpend; + uint96 amountLeft = s_upkeep[id].balance; + uint96 amountSpent = s_upkeep[id].amountSpent; + + uint96 cancellationFee = 0; + // cancellationFee is supposed to be min(max(minUpkeepSpend - amountSpent,0), amountLeft) + if (amountSpent < minUpkeepSpend) { + cancellationFee = minUpkeepSpend - amountSpent; + if (cancellationFee > amountLeft) { + cancellationFee = amountLeft; + } + } + uint96 amountToWithdraw = amountLeft - cancellationFee; + + s_upkeep[id].balance = 0; + s_ownerLinkBalance = s_ownerLinkBalance + cancellationFee; + + s_expectedLinkBalance = s_expectedLinkBalance - amountToWithdraw; + emit FundsWithdrawn(id, amountToWithdraw, to); + + PLI.transfer(to, amountToWithdraw); + } + + /** + * @notice withdraws PLI funds collected through cancellation fees + */ + function withdrawOwnerFunds() external onlyOwner { + uint96 amount = s_ownerLinkBalance; + + s_expectedLinkBalance = s_expectedLinkBalance - amount; + s_ownerLinkBalance = 0; + + emit OwnerFundsWithdrawn(amount); + PLI.transfer(msg.sender, amount); + } + + /** + * @notice allows the admin of an upkeep to modify gas limit + * @param id upkeep to be change the gas limit for + * @param gasLimit new gas limit for the upkeep + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external override onlyActiveUpkeep(id) onlyUpkeepAdmin(id) { + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + + s_upkeep[id].executeGas = gasLimit; + + emit UpkeepGasLimitSet(id, gasLimit); + } + + /** + * @notice recovers PLI funds improperly transferred to the registry + * @dev In principle this function’s execution cost could exceed block + * gas limit. However, in our anticipated deployment, the number of upkeeps and + * keepers will be low enough to avoid this problem. + */ + function recoverFunds() external onlyOwner { + uint256 total = PLI.balanceOf(address(this)); + PLI.transfer(msg.sender, total - s_expectedLinkBalance); + } + + /** + * @notice withdraws a keeper's payment, callable only by the keeper's payee + * @param from keeper address + * @param to address to send the payment to + */ + function withdrawPayment(address from, address to) external validRecipient(to) { + KeeperInfo memory keeper = s_keeperInfo[from]; + if (keeper.payee != msg.sender) revert OnlyCallableByPayee(); + + s_keeperInfo[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance - keeper.balance; + emit PaymentWithdrawn(from, keeper.balance, to, msg.sender); + + PLI.transfer(to, keeper.balance); + } + + /** + * @notice proposes the safe transfer of a keeper's payee to another address + * @param keeper address of the keeper to transfer payee role + * @param proposed address to nominate for next payeeship + */ + function transferPayeeship(address keeper, address proposed) external { + if (s_keeperInfo[keeper].payee != msg.sender) revert OnlyCallableByPayee(); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedPayee[keeper] != proposed) { + s_proposedPayee[keeper] = proposed; + emit PayeeshipTransferRequested(keeper, msg.sender, proposed); + } + } + + /** + * @notice accepts the safe transfer of payee role for a keeper + * @param keeper address to accept the payee role for + */ + function acceptPayeeship(address keeper) external { + if (s_proposedPayee[keeper] != msg.sender) revert OnlyCallableByProposedPayee(); + address past = s_keeperInfo[keeper].payee; + s_keeperInfo[keeper].payee = msg.sender; + s_proposedPayee[keeper] = ZERO_ADDRESS; + + emit PayeeshipTransferred(keeper, past, msg.sender); + } + + /** + * @notice signals to keepers that they should not perform upkeeps until the + * contract has been unpaused + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice signals to keepers that they can perform upkeeps once again after + * having been paused + */ + function unpause() external onlyOwner { + _unpause(); + } + + // SETTERS + + /** + * @notice updates the configuration of the registry + * @param config registry config fields + */ + function setConfig(Config memory config) public onlyOwner { + if (config.maxPerformGas < s_storage.maxPerformGas) revert GasLimitCanOnlyIncrease(); + s_storage = Storage({ + paymentPremiumPPB: config.paymentPremiumPPB, + flatFeeMicroLink: config.flatFeeMicroLink, + blockCountPerTurn: config.blockCountPerTurn, + checkGasLimit: config.checkGasLimit, + stalenessSeconds: config.stalenessSeconds, + gasCeilingMultiplier: config.gasCeilingMultiplier, + minUpkeepSpend: config.minUpkeepSpend, + maxPerformGas: config.maxPerformGas, + nonce: s_storage.nonce + }); + s_fallbackGasPrice = config.fallbackGasPrice; + s_fallbackLinkPrice = config.fallbackLinkPrice; + s_transcoder = config.transcoder; + s_registrar = config.registrar; + emit ConfigSet(config); + } + + /** + * @notice update the list of keepers allowed to perform upkeep + * @param keepers list of addresses allowed to perform upkeep + * @param payees addresses corresponding to keepers who are allowed to + * move payments which have been accrued + */ + function setKeepers(address[] calldata keepers, address[] calldata payees) external onlyOwner { + if (keepers.length != payees.length || keepers.length < 2) revert ParameterLengthError(); + for (uint256 i = 0; i < s_keeperList.length; i++) { + address keeper = s_keeperList[i]; + s_keeperInfo[keeper].active = false; + } + for (uint256 i = 0; i < keepers.length; i++) { + address keeper = keepers[i]; + KeeperInfo storage s_keeper = s_keeperInfo[keeper]; + address oldPayee = s_keeper.payee; + address newPayee = payees[i]; + if ( + (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS) + ) revert InvalidPayee(); + if (s_keeper.active) revert DuplicateEntry(); + s_keeper.active = true; + if (newPayee != IGNORE_ADDRESS) { + s_keeper.payee = newPayee; + } + } + s_keeperList = keepers; + emit KeepersUpdated(keepers, payees); + } + + // GETTERS + + /** + * @notice read all of the details about an upkeep + */ + function getUpkeep( + uint256 id + ) + external + view + override + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber, + uint96 amountSpent + ) + { + Upkeep memory reg = s_upkeep[id]; + return ( + reg.target, + reg.executeGas, + s_checkData[id], + reg.balance, + reg.lastKeeper, + reg.admin, + reg.maxValidBlocknumber, + reg.amountSpent + ); + } + + /** + * @notice retrieve active upkeep IDs + * @param startIndex starting index in list + * @param maxCount max count to retrieve (0 = unlimited) + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * should consider keeping the blockheight constant to ensure a wholistic picture of the contract state + */ + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view override returns (uint256[] memory) { + uint256 maxIdx = s_upkeepIDs.length(); + if (startIndex >= maxIdx) revert IndexOutOfRange(); + if (maxCount == 0) { + maxCount = maxIdx - startIndex; + } + uint256[] memory ids = new uint256[](maxCount); + for (uint256 idx = 0; idx < maxCount; idx++) { + ids[idx] = s_upkeepIDs.at(startIndex + idx); + } + return ids; + } + + /** + * @notice read the current info about any keeper address + */ + function getKeeperInfo(address query) external view override returns (address payee, bool active, uint96 balance) { + KeeperInfo memory keeper = s_keeperInfo[query]; + return (keeper.payee, keeper.active, keeper.balance); + } + + /** + * @notice read the current state of the registry + */ + function getState() + external + view + override + returns (State memory state, Config memory config, address[] memory keepers) + { + Storage memory store = s_storage; + state.nonce = store.nonce; + state.ownerLinkBalance = s_ownerLinkBalance; + state.expectedLinkBalance = s_expectedLinkBalance; + state.numUpkeeps = s_upkeepIDs.length(); + config.paymentPremiumPPB = store.paymentPremiumPPB; + config.flatFeeMicroLink = store.flatFeeMicroLink; + config.blockCountPerTurn = store.blockCountPerTurn; + config.checkGasLimit = store.checkGasLimit; + config.stalenessSeconds = store.stalenessSeconds; + config.gasCeilingMultiplier = store.gasCeilingMultiplier; + config.minUpkeepSpend = store.minUpkeepSpend; + config.maxPerformGas = store.maxPerformGas; + config.fallbackGasPrice = s_fallbackGasPrice; + config.fallbackLinkPrice = s_fallbackLinkPrice; + config.transcoder = s_transcoder; + config.registrar = s_registrar; + return (state, config, s_keeperList); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance) { + return getMaxPaymentForGas(s_upkeep[id].executeGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + * @param gasLimit the gas to calculate payment for + */ + function getMaxPaymentForGas(uint256 gasLimit) public view returns (uint96 maxPayment) { + (uint256 gasWei, uint256 linkEth) = _getFeedData(); + uint256 adjustedGasWei = _adjustGasPrice(gasWei, false); + return _calculatePaymentAmount(gasLimit, adjustedGasWei, linkEth); + } + + /** + * @notice retrieves the migration permission for a peer registry + */ + function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) { + return s_peerRegistryMigrationPermission[peer]; + } + + /** + * @notice sets the peer registry migration permission + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner { + s_peerRegistryMigrationPermission[peer] = permission; + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external override { + if ( + s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING && + s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + if (s_transcoder == ZERO_ADDRESS) revert TranscoderNotSet(); + if (ids.length == 0) revert ArrayHasNoEntries(); + uint256 id; + Upkeep memory upkeep; + uint256 totalBalanceRemaining; + bytes[] memory checkDatas = new bytes[](ids.length); + Upkeep[] memory upkeeps = new Upkeep[](ids.length); + for (uint256 idx = 0; idx < ids.length; idx++) { + id = ids[idx]; + upkeep = s_upkeep[id]; + if (upkeep.admin != msg.sender) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber != UINT64_MAX) revert UpkeepNotActive(); + upkeeps[idx] = upkeep; + checkDatas[idx] = s_checkData[id]; + totalBalanceRemaining = totalBalanceRemaining + upkeep.balance; + delete s_upkeep[id]; + delete s_checkData[id]; + s_upkeepIDs.remove(id); + emit UpkeepMigrated(id, upkeep.balance, destination); + } + s_expectedLinkBalance = s_expectedLinkBalance - totalBalanceRemaining; + bytes memory encodedUpkeeps = abi.encode(ids, upkeeps, checkDatas); + MigratableKeeperRegistryInterface(destination).receiveUpkeeps( + UpkeepTranscoderInterface(s_transcoder).transcodeUpkeeps( + UpkeepFormat.V1, + MigratableKeeperRegistryInterface(destination).upkeepTranscoderVersion(), + encodedUpkeeps + ) + ); + PLI.transfer(destination, totalBalanceRemaining); + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + UpkeepFormat public constant override upkeepTranscoderVersion = UpkeepFormat.V1; + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external override { + if ( + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING && + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + (uint256[] memory ids, Upkeep[] memory upkeeps, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], Upkeep[], bytes[]) + ); + for (uint256 idx = 0; idx < ids.length; idx++) { + _createUpkeep( + ids[idx], + upkeeps[idx].target, + upkeeps[idx].executeGas, + upkeeps[idx].admin, + upkeeps[idx].balance, + checkDatas[idx] + ); + emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender); + } + } + + /** + * @notice creates a new upkeep with the given fields + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + */ + function _createUpkeep( + uint256 id, + address target, + uint32 gasLimit, + address admin, + uint96 balance, + bytes memory checkData + ) internal whenNotPaused { + if (!target.isContract()) revert NotAContract(); + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + s_upkeep[id] = Upkeep({ + target: target, + executeGas: gasLimit, + balance: balance, + admin: admin, + maxValidBlocknumber: UINT64_MAX, + lastKeeper: ZERO_ADDRESS, + amountSpent: 0 + }); + s_expectedLinkBalance = s_expectedLinkBalance + balance; + s_checkData[id] = checkData; + s_upkeepIDs.add(id); + } + + /** + * @dev retrieves feed data for fast gas/eth and link/eth prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function _getFeedData() private view returns (uint256 gasWei, uint256 linkEth) { + uint32 stalenessSeconds = s_storage.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = FAST_GAS_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + linkEth = s_fallbackLinkPrice; + } else { + linkEth = uint256(feedValue); + } + return (gasWei, linkEth); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + */ + function _calculatePaymentAmount( + uint256 gasLimit, + uint256 gasWei, + uint256 linkEth + ) private view returns (uint96 payment) { + uint256 weiForGas = gasWei * (gasLimit + REGISTRY_GAS_OVERHEAD); + uint256 premium = PPB_BASE + s_storage.paymentPremiumPPB; + uint256 total = ((weiForGas * (1e9) * (premium)) / (linkEth)) + (uint256(s_storage.flatFeeMicroLink) * (1e12)); + if (total > PLI_TOTAL_SUPPLY) revert PaymentGreaterThanAllPLI(); + return uint96(total); // PLI_TOTAL_SUPPLY < UINT96_MAX + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= PERFORM_GAS_CUSHION and check for underflow + if lt(g, PERFORM_GAS_CUSHION) { + revert(0, 0) + } + g := sub(g, PERFORM_GAS_CUSHION) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * keeper and the exact gas required by the Upkeep + */ + function _performUpkeepWithParams( + PerformParams memory params + ) private nonReentrant validUpkeep(params.id) returns (bool success) { + Upkeep memory upkeep = s_upkeep[params.id]; + _prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + uint256 gasUsed = gasleft(); + bytes memory callData = abi.encodeWithSelector(PERFORM_SELECTOR, params.performData); + success = _callWithExactGas(params.gasLimit, upkeep.target, callData); + gasUsed = gasUsed - gasleft(); + + uint96 payment = _calculatePaymentAmount(gasUsed, params.adjustedGasWei, params.linkEth); + + s_upkeep[params.id].balance = s_upkeep[params.id].balance - payment; + s_upkeep[params.id].amountSpent = s_upkeep[params.id].amountSpent + payment; + s_upkeep[params.id].lastKeeper = params.from; + s_keeperInfo[params.from].balance = s_keeperInfo[params.from].balance + payment; + + emit UpkeepPerformed(params.id, success, params.from, payment, params.performData); + return success; + } + + /** + * @dev ensures all required checks are passed before an upkeep is performed + */ + function _prePerformUpkeep(Upkeep memory upkeep, address from, uint256 maxLinkPayment) private view { + if (!s_keeperInfo[from].active) revert OnlyActiveKeepers(); + if (upkeep.balance < maxLinkPayment) revert InsufficientFunds(); + if (upkeep.lastKeeper == from) revert KeepersMustTakeTurns(); + } + + /** + * @dev adjusts the gas price to min(ceiling, tx.gasprice) or just uses the ceiling if tx.gasprice is disabled + */ + function _adjustGasPrice(uint256 gasWei, bool useTxGasPrice) private view returns (uint256 adjustedPrice) { + adjustedPrice = gasWei * s_storage.gasCeilingMultiplier; + if (useTxGasPrice && tx.gasprice < adjustedPrice) { + adjustedPrice = tx.gasprice; + } + } + + /** + * @dev generates a PerformParams struct for use in _performUpkeepWithParams() + */ + function _generatePerformParams( + address from, + uint256 id, + bytes memory performData, + bool useTxGasPrice + ) private view returns (PerformParams memory) { + uint256 gasLimit = s_upkeep[id].executeGas; + (uint256 gasWei, uint256 linkEth) = _getFeedData(); + uint256 adjustedGasWei = _adjustGasPrice(gasWei, useTxGasPrice); + uint96 maxLinkPayment = _calculatePaymentAmount(gasLimit, adjustedGasWei, linkEth); + + return + PerformParams({ + from: from, + id: id, + performData: performData, + maxLinkPayment: maxLinkPayment, + gasLimit: gasLimit, + adjustedGasWei: adjustedGasWei, + linkEth: linkEth + }); + } + + // MODIFIERS + + /** + * @dev ensures a upkeep is valid + */ + modifier validUpkeep(uint256 id) { + if (s_upkeep[id].maxValidBlocknumber <= block.number) revert UpkeepNotActive(); + _; + } + + /** + * @dev Reverts if called by anyone other than the admin of upkeep #id + */ + modifier onlyUpkeepAdmin(uint256 id) { + if (msg.sender != s_upkeep[id].admin) revert OnlyCallableByAdmin(); + _; + } + + /** + * @dev Reverts if called on a cancelled upkeep + */ + modifier onlyActiveUpkeep(uint256 id) { + if (s_upkeep[id].maxValidBlocknumber != UINT64_MAX) revert UpkeepNotActive(); + _; + } + + /** + * @dev ensures that burns don't accidentally happen by sending to the zero + * address + */ + modifier validRecipient(address to) { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + _; + } + + /** + * @dev Reverts if called by anyone other than the contract owner or registrar. + */ + modifier onlyOwnerOrRegistrar() { + if (msg.sender != owner() && msg.sender != s_registrar) revert OnlyCallableByOwnerOrRegistrar(); + _; + } +} diff --git a/contracts/src/v0.8/automation/v1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.sol b/contracts/src/v0.8/automation/v1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.sol new file mode 100644 index 00000000..253a421a --- /dev/null +++ b/contracts/src/v0.8/automation/v1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {AutomationRegistryExecutableInterface} from "../interfaces/v1_2/AutomationRegistryInterface1_2.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +/** + * @notice This contract serves as a wrapper around a keeper registry's checkUpkeep function. + */ +contract KeeperRegistryCheckUpkeepGasUsageWrapper1_2 is ConfirmedOwner { + AutomationRegistryExecutableInterface private immutable i_keeperRegistry; + + /** + * @param keeperRegistry address of a keeper registry + */ + constructor(AutomationRegistryExecutableInterface keeperRegistry) ConfirmedOwner(msg.sender) { + i_keeperRegistry = keeperRegistry; + } + + /** + * @return the keeper registry + */ + function getKeeperRegistry() external view returns (AutomationRegistryExecutableInterface) { + return i_keeperRegistry; + } + + /** + * @notice This function is called by monitoring service to estimate how much gas checkUpkeep functions will consume. + * @param id identifier of the upkeep to check + * @param from the address to simulate performing the upkeep from + */ + function measureCheckGas(uint256 id, address from) external returns (bool, bytes memory, uint256) { + uint256 startGas = gasleft(); + try i_keeperRegistry.checkUpkeep(id, from) returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) { + uint256 gasUsed = startGas - gasleft(); + return (true, performData, gasUsed); + } catch { + uint256 gasUsed = startGas - gasleft(); + return (false, "", gasUsed); + } + } +} diff --git a/contracts/src/v0.8/automation/v1_3/KeeperRegistry1_3.sol b/contracts/src/v0.8/automation/v1_3/KeeperRegistry1_3.sol new file mode 100644 index 00000000..8d1bba6d --- /dev/null +++ b/contracts/src/v0.8/automation/v1_3/KeeperRegistry1_3.sol @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/proxy/Proxy.sol"; +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import "@openzeppelin/contracts/utils/Address.sol"; +import "./KeeperRegistryBase1_3.sol"; +import "./KeeperRegistryLogic1_3.sol"; +import {AutomationRegistryExecutableInterface, State} from "../interfaces/v1_3/AutomationRegistryInterface1_3.sol"; +import "../interfaces/MigratableKeeperRegistryInterface.sol"; +import "../../interfaces/TypeAndVersionInterface.sol"; +import "../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @notice Registry for adding work for Plugin Keepers to perform on client + * contracts. Clients must support the Upkeep interface. + */ +contract KeeperRegistry1_3 is + KeeperRegistryBase1_3, + Proxy, + TypeAndVersionInterface, + AutomationRegistryExecutableInterface, + MigratableKeeperRegistryInterface, + IERC677Receiver +{ + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + + address public immutable KEEPER_REGISTRY_LOGIC; + + /** + * @notice versions: + * - KeeperRegistry 1.3.0: split contract into Proxy and Logic + * : account for Arbitrum and Optimism L1 gas fee + * : allow users to configure upkeeps + * - KeeperRegistry 1.2.0: allow funding within performUpkeep + * : allow configurable registry maxPerformGas + * : add function to let admin change upkeep gas limit + * : add minUpkeepSpend requirement + : upgrade to solidity v0.8 + * - KeeperRegistry 1.1.0: added flatFeeMicroLink + * - KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistry 1.3.0"; + + /** + * @param keeperRegistryLogic the address of keeper registry logic + * @param config registry config settings + */ + constructor( + KeeperRegistryLogic1_3 keeperRegistryLogic, + Config memory config + ) + KeeperRegistryBase1_3( + keeperRegistryLogic.PAYMENT_MODEL(), + keeperRegistryLogic.REGISTRY_GAS_OVERHEAD(), + address(keeperRegistryLogic.PLI()), + address(keeperRegistryLogic.PLI_ETH_FEED()), + address(keeperRegistryLogic.FAST_GAS_FEED()) + ) + { + KEEPER_REGISTRY_LOGIC = address(keeperRegistryLogic); + setConfig(config); + } + + // ACTIONS + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external override returns (uint256 id) { + // Executed through logic contract + _fallback(); + } + + /** + * @notice simulated by keepers via eth_call to see if the upkeep needs to be + * performed. If upkeep is needed, the call then simulates performUpkeep + * to make sure it succeeds. Finally, it returns the success status along with + * payment information and the perform data payload. + * @param id identifier of the upkeep to check + * @param from the address to simulate performing the upkeep from + */ + function checkUpkeep( + uint256 id, + address from + ) + external + override + cannotExecute + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) + { + // Executed through logic contract + _fallback(); + } + + /** + * @notice executes the upkeep with the perform data returned from + * checkUpkeep, validates the keeper's permissions, and pays the keeper. + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + */ + function performUpkeep( + uint256 id, + bytes calldata performData + ) external override whenNotPaused returns (bool success) { + return _performUpkeepWithParams(_generatePerformParams(msg.sender, id, performData, true)); + } + + /** + * @notice prevent an upkeep from being performed in the future + * @param id upkeep to be canceled + */ + function cancelUpkeep(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice pause an upkeep + * @param id upkeep to be paused + */ + function pauseUpkeep(uint256 id) external override { + Upkeep memory upkeep = s_upkeep[id]; + requireAdminAndNotCancelled(upkeep); + if (upkeep.paused) revert OnlyUnpausedUpkeep(); + s_upkeep[id].paused = true; + s_upkeepIDs.remove(id); + emit UpkeepPaused(id); + } + + /** + * @notice unpause an upkeep + * @param id upkeep to be resumed + */ + function unpauseUpkeep(uint256 id) external override { + Upkeep memory upkeep = s_upkeep[id]; + requireAdminAndNotCancelled(upkeep); + if (!upkeep.paused) revert OnlyPausedUpkeep(); + s_upkeep[id].paused = false; + s_upkeepIDs.add(id); + emit UpkeepUnpaused(id); + } + + /** + * @notice update the check data of an upkeep + * @param id the id of the upkeep whose check data needs to be updated + * @param newCheckData the new check data + */ + function updateCheckData(uint256 id, bytes calldata newCheckData) external override { + Upkeep memory upkeep = s_upkeep[id]; + requireAdminAndNotCancelled(upkeep); + s_checkData[id] = newCheckData; + emit UpkeepCheckDataUpdated(id, newCheckData); + } + + /** + * @notice adds PLI funding for an upkeep by transferring from the sender's + * PLI balance + * @param id upkeep to fund + * @param amount number of PLI to transfer + */ + function addFunds(uint256 id, uint96 amount) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override { + if (msg.sender != address(PLI)) revert OnlyCallableByPLIToken(); + if (data.length != 32) revert InvalidDataLength(); + uint256 id = abi.decode(data, (uint256)); + if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + + s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount); + s_expectedLinkBalance = s_expectedLinkBalance + amount; + + emit FundsAdded(id, sender, uint96(amount)); + } + + /** + * @notice removes funding from a canceled upkeep + * @param id upkeep to withdraw funds from + * @param to destination address for sending remaining funds + */ + function withdrawFunds(uint256 id, address to) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice withdraws PLI funds collected through cancellation fees + */ + function withdrawOwnerFunds() external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice allows the admin of an upkeep to modify gas limit + * @param id upkeep to be change the gas limit for + * @param gasLimit new gas limit for the upkeep + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice recovers PLI funds improperly transferred to the registry + * @dev In principle this function’s execution cost could exceed block + * gas limit. However, in our anticipated deployment, the number of upkeeps and + * keepers will be low enough to avoid this problem. + */ + function recoverFunds() external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice withdraws a keeper's payment, callable only by the keeper's payee + * @param from keeper address + * @param to address to send the payment to + */ + function withdrawPayment(address from, address to) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice proposes the safe transfer of a keeper's payee to another address + * @param keeper address of the keeper to transfer payee role + * @param proposed address to nominate for next payeeship + */ + function transferPayeeship(address keeper, address proposed) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice accepts the safe transfer of payee role for a keeper + * @param keeper address to accept the payee role for + */ + function acceptPayeeship(address keeper) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice proposes the safe transfer of an upkeep's admin role to another address + * @param id the upkeep id to transfer admin + * @param proposed address to nominate for the new upkeep admin + */ + function transferUpkeepAdmin(uint256 id, address proposed) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice accepts the safe transfer of admin role for an upkeep + * @param id the upkeep id + */ + function acceptUpkeepAdmin(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice signals to keepers that they should not perform upkeeps until the + * contract has been unpaused + */ + function pause() external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice signals to keepers that they can perform upkeeps once again after + * having been paused + */ + function unpause() external { + // Executed through logic contract + _fallback(); + } + + // SETTERS + + /** + * @notice updates the configuration of the registry + * @param config registry config fields + */ + function setConfig(Config memory config) public onlyOwner { + if (config.maxPerformGas < s_storage.maxPerformGas) revert GasLimitCanOnlyIncrease(); + s_storage = Storage({ + paymentPremiumPPB: config.paymentPremiumPPB, + flatFeeMicroLink: config.flatFeeMicroLink, + blockCountPerTurn: config.blockCountPerTurn, + checkGasLimit: config.checkGasLimit, + stalenessSeconds: config.stalenessSeconds, + gasCeilingMultiplier: config.gasCeilingMultiplier, + minUpkeepSpend: config.minUpkeepSpend, + maxPerformGas: config.maxPerformGas, + nonce: s_storage.nonce + }); + s_fallbackGasPrice = config.fallbackGasPrice; + s_fallbackLinkPrice = config.fallbackLinkPrice; + s_transcoder = config.transcoder; + s_registrar = config.registrar; + emit ConfigSet(config); + } + + /** + * @notice update the list of keepers allowed to perform upkeep + * @param keepers list of addresses allowed to perform upkeep + * @param payees addresses corresponding to keepers who are allowed to + * move payments which have been accrued + */ + function setKeepers(address[] calldata keepers, address[] calldata payees) external { + // Executed through logic contract + _fallback(); + } + + // GETTERS + + /** + * @notice read all of the details about an upkeep + */ + function getUpkeep( + uint256 id + ) + external + view + override + returns ( + address target, + uint32 executeGas, + bytes memory checkData, + uint96 balance, + address lastKeeper, + address admin, + uint64 maxValidBlocknumber, + uint96 amountSpent, + bool paused + ) + { + Upkeep memory reg = s_upkeep[id]; + return ( + reg.target, + reg.executeGas, + s_checkData[id], + reg.balance, + reg.lastKeeper, + reg.admin, + reg.maxValidBlocknumber, + reg.amountSpent, + reg.paused + ); + } + + /** + * @notice retrieve active upkeep IDs. Active upkeep is defined as an upkeep which is not paused and not canceled. + * @param startIndex starting index in list + * @param maxCount max count to retrieve (0 = unlimited) + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * should consider keeping the blockheight constant to ensure a holistic picture of the contract state + */ + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view override returns (uint256[] memory) { + uint256 maxIdx = s_upkeepIDs.length(); + if (startIndex >= maxIdx) revert IndexOutOfRange(); + if (maxCount == 0) { + maxCount = maxIdx - startIndex; + } + uint256[] memory ids = new uint256[](maxCount); + for (uint256 idx = 0; idx < maxCount; idx++) { + ids[idx] = s_upkeepIDs.at(startIndex + idx); + } + return ids; + } + + /** + * @notice read the current info about any keeper address + */ + function getKeeperInfo(address query) external view override returns (address payee, bool active, uint96 balance) { + KeeperInfo memory keeper = s_keeperInfo[query]; + return (keeper.payee, keeper.active, keeper.balance); + } + + /** + * @notice read the current state of the registry + */ + function getState() + external + view + override + returns (State memory state, Config memory config, address[] memory keepers) + { + Storage memory store = s_storage; + state.nonce = store.nonce; + state.ownerLinkBalance = s_ownerLinkBalance; + state.expectedLinkBalance = s_expectedLinkBalance; + state.numUpkeeps = s_upkeepIDs.length(); + config.paymentPremiumPPB = store.paymentPremiumPPB; + config.flatFeeMicroLink = store.flatFeeMicroLink; + config.blockCountPerTurn = store.blockCountPerTurn; + config.checkGasLimit = store.checkGasLimit; + config.stalenessSeconds = store.stalenessSeconds; + config.gasCeilingMultiplier = store.gasCeilingMultiplier; + config.minUpkeepSpend = store.minUpkeepSpend; + config.maxPerformGas = store.maxPerformGas; + config.fallbackGasPrice = s_fallbackGasPrice; + config.fallbackLinkPrice = s_fallbackLinkPrice; + config.transcoder = s_transcoder; + config.registrar = s_registrar; + return (state, config, s_keeperList); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance) { + return getMaxPaymentForGas(s_upkeep[id].executeGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + * @param gasLimit the gas to calculate payment for + */ + function getMaxPaymentForGas(uint256 gasLimit) public view returns (uint96 maxPayment) { + (uint256 fastGasWei, uint256 linkEth) = _getFeedData(); + return _calculatePaymentAmount(gasLimit, fastGasWei, linkEth, false); + } + + /** + * @notice retrieves the migration permission for a peer registry + */ + function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) { + return s_peerRegistryMigrationPermission[peer]; + } + + /** + * @notice sets the peer registry migration permission + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external { + // Executed through logic contract + _fallback(); + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + UpkeepFormat public constant override upkeepTranscoderVersion = UPKEEP_TRANSCODER_VERSION_BASE; + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @dev This is the address to which proxy functions are delegated to + */ + function _implementation() internal view override returns (address) { + return KEEPER_REGISTRY_LOGIC; + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= PERFORM_GAS_CUSHION and check for underflow + if lt(g, PERFORM_GAS_CUSHION) { + revert(0, 0) + } + g := sub(g, PERFORM_GAS_CUSHION) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * keeper and the exact gas required by the Upkeep + */ + function _performUpkeepWithParams(PerformParams memory params) private nonReentrant returns (bool success) { + Upkeep memory upkeep = s_upkeep[params.id]; + if (upkeep.maxValidBlocknumber <= block.number) revert UpkeepCancelled(); + _prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + uint256 gasUsed = gasleft(); + bytes memory callData = abi.encodeWithSelector(PERFORM_SELECTOR, params.performData); + success = _callWithExactGas(params.gasLimit, upkeep.target, callData); + gasUsed = gasUsed - gasleft(); + uint96 payment = _calculatePaymentAmount(gasUsed, params.fastGasWei, params.linkEth, true); + + s_upkeep[params.id].balance = s_upkeep[params.id].balance - payment; + s_upkeep[params.id].amountSpent = s_upkeep[params.id].amountSpent + payment; + s_upkeep[params.id].lastKeeper = params.from; + s_keeperInfo[params.from].balance = s_keeperInfo[params.from].balance + payment; + + emit UpkeepPerformed(params.id, success, params.from, payment, params.performData); + return success; + } +} diff --git a/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol b/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol new file mode 100644 index 00000000..5212ce61 --- /dev/null +++ b/contracts/src/v0.8/automation/v1_3/KeeperRegistryBase1_3.sol @@ -0,0 +1,304 @@ +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/security/Pausable.sol"; +import "@openzeppelin/contracts/security/ReentrancyGuard.sol"; +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import "../../vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; +import "../ExecutionPrevention.sol"; +import {Config, Upkeep} from "../interfaces/v1_3/AutomationRegistryInterface1_3.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import "../../shared/interfaces/AggregatorV3Interface.sol"; +import "../../shared/interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../interfaces/UpkeepTranscoderInterface.sol"; + +/** + * @notice Base Keeper Registry contract, contains shared logic between + * KeeperRegistry and KeeperRegistryLogic + */ +abstract contract KeeperRegistryBase1_3 is ConfirmedOwner, ExecutionPrevention, ReentrancyGuard, Pausable { + address internal constant ZERO_ADDRESS = address(0); + address internal constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 internal constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 internal constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + uint256 internal constant PERFORM_GAS_MIN = 2_300; + uint256 internal constant CANCELLATION_DELAY = 50; + uint256 internal constant PERFORM_GAS_CUSHION = 5_000; + uint256 internal constant PPB_BASE = 1_000_000_000; + uint32 internal constant UINT32_MAX = type(uint32).max; + uint96 internal constant PLI_TOTAL_SUPPLY = 1e27; + UpkeepFormat internal constant UPKEEP_TRANSCODER_VERSION_BASE = UpkeepFormat.V2; + // L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + // MAX_INPUT_DATA represents the estimated max size of the sum of L1 data padding and msg.data in performUpkeep + // function, which includes 4 bytes for function selector, 32 bytes for upkeep id, 35 bytes for data padding, and + // 64 bytes for estimated perform data + bytes internal constant MAX_INPUT_DATA = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + + address[] internal s_keeperList; + EnumerableSet.UintSet internal s_upkeepIDs; + mapping(uint256 => Upkeep) internal s_upkeep; + mapping(address => KeeperInfo) internal s_keeperInfo; + mapping(address => address) internal s_proposedPayee; + mapping(uint256 => address) internal s_proposedAdmin; + mapping(uint256 => bytes) internal s_checkData; + mapping(address => MigrationPermission) internal s_peerRegistryMigrationPermission; + Storage internal s_storage; + uint256 internal s_fallbackGasPrice; // not in config object for gas savings + uint256 internal s_fallbackLinkPrice; // not in config object for gas savings + uint96 internal s_ownerLinkBalance; + uint256 internal s_expectedLinkBalance; + address internal s_transcoder; + address internal s_registrar; + + LinkTokenInterface public immutable PLI; + AggregatorV3Interface public immutable PLI_ETH_FEED; + AggregatorV3Interface public immutable FAST_GAS_FEED; + OVM_GasPriceOracle public immutable OPTIMISM_ORACLE = OVM_GasPriceOracle(0x420000000000000000000000000000000000000F); + ArbGasInfo public immutable ARB_NITRO_ORACLE = ArbGasInfo(0x000000000000000000000000000000000000006C); + PaymentModel public immutable PAYMENT_MODEL; + uint256 public immutable REGISTRY_GAS_OVERHEAD; + + error ArrayHasNoEntries(); + error CannotCancel(); + error DuplicateEntry(); + error EmptyAddress(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IndexOutOfRange(); + error InsufficientFunds(); + error InvalidDataLength(); + error InvalidPayee(); + error InvalidRecipient(); + error KeepersMustTakeTurns(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveKeepers(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyPausedUpkeep(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error TargetCheckReverted(bytes reason); + error TranscoderNotSet(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + + enum MigrationPermission { + NONE, + OUTGOING, + INCOMING, + BIDIRECTIONAL + } + + enum PaymentModel { + DEFAULT, + ARBITRUM, + OPTIMISM + } + + /** + * @notice storage of the registry, contains a mix of config and state data + */ + struct Storage { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; + uint24 blockCountPerTurn; + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; // 1 full evm word + uint32 maxPerformGas; + uint32 nonce; + } + + struct KeeperInfo { + address payee; + uint96 balance; + bool active; + } + + struct PerformParams { + address from; + uint256 id; + bytes performData; + uint256 maxLinkPayment; + uint256 gasLimit; + uint256 fastGasWei; + uint256 linkEth; + } + + event ConfigSet(Config config); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event KeepersUpdated(address[] keepers, address[] payees); + event OwnerFundsWithdrawn(uint96 amount); + event PayeeshipTransferRequested(address indexed keeper, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed keeper, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed keeper, uint256 indexed amount, address indexed to, address payee); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataUpdated(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + address indexed from, + uint96 payment, + bytes performData + ); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepUnpaused(uint256 indexed id); + event UpkeepRegistered(uint256 indexed id, uint32 executeGas, address admin); + + /** + * @param paymentModel the payment model of default, Arbitrum, or Optimism + * @param registryGasOverhead the gas overhead used by registry in performUpkeep + * @param link address of the PLI Token + * @param linkEthFeed address of the PLI/ETH price feed + * @param fastGasFeed address of the Fast Gas price feed + */ + constructor( + PaymentModel paymentModel, + uint256 registryGasOverhead, + address link, + address linkEthFeed, + address fastGasFeed + ) ConfirmedOwner(msg.sender) { + PAYMENT_MODEL = paymentModel; + REGISTRY_GAS_OVERHEAD = registryGasOverhead; + if (ZERO_ADDRESS == link || ZERO_ADDRESS == linkEthFeed || ZERO_ADDRESS == fastGasFeed) { + revert EmptyAddress(); + } + PLI = LinkTokenInterface(link); + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + FAST_GAS_FEED = AggregatorV3Interface(fastGasFeed); + } + + /** + * @dev retrieves feed data for fast gas/eth and link/eth prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function _getFeedData() internal view returns (uint256 gasWei, uint256 linkEth) { + uint32 stalenessSeconds = s_storage.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = FAST_GAS_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + if ((staleFallback && stalenessSeconds < block.timestamp - timestamp) || feedValue <= 0) { + linkEth = s_fallbackLinkPrice; + } else { + linkEth = uint256(feedValue); + } + return (gasWei, linkEth); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + * @param gasLimit the amount of gas used + * @param fastGasWei the fast gas price + * @param linkEth the exchange ratio between PLI and ETH + * @param isExecution if this is triggered by a perform upkeep function + */ + function _calculatePaymentAmount( + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkEth, + bool isExecution + ) internal view returns (uint96 payment) { + Storage memory store = s_storage; + uint256 gasWei = fastGasWei * store.gasCeilingMultiplier; + // in case it's actual execution use actual gas price, capped by fastGasWei * gasCeilingMultiplier + if (isExecution && tx.gasprice < gasWei) { + gasWei = tx.gasprice; + } + + uint256 weiForGas = gasWei * (gasLimit + REGISTRY_GAS_OVERHEAD); + uint256 premium = PPB_BASE + store.paymentPremiumPPB; + uint256 l1CostWei = 0; + if (PAYMENT_MODEL == PaymentModel.OPTIMISM) { + bytes memory txCallData = new bytes(0); + if (isExecution) { + txCallData = bytes.concat(msg.data, L1_FEE_DATA_PADDING); + } else { + txCallData = MAX_INPUT_DATA; + } + l1CostWei = OPTIMISM_ORACLE.getL1Fee(txCallData); + } else if (PAYMENT_MODEL == PaymentModel.ARBITRUM) { + l1CostWei = ARB_NITRO_ORACLE.getCurrentTxL1GasFees(); + } + // if it's not performing upkeeps, use gas ceiling multiplier to estimate the upper bound + if (!isExecution) { + l1CostWei = store.gasCeilingMultiplier * l1CostWei; + } + + uint256 total = ((weiForGas + l1CostWei) * 1e9 * premium) / linkEth + uint256(store.flatFeeMicroLink) * 1e12; + if (total > PLI_TOTAL_SUPPLY) revert PaymentGreaterThanAllPLI(); + return uint96(total); // PLI_TOTAL_SUPPLY < UINT96_MAX + } + + /** + * @dev ensures all required checks are passed before an upkeep is performed + */ + function _prePerformUpkeep(Upkeep memory upkeep, address from, uint256 maxLinkPayment) internal view { + if (upkeep.paused) revert OnlyUnpausedUpkeep(); + if (!s_keeperInfo[from].active) revert OnlyActiveKeepers(); + if (upkeep.balance < maxLinkPayment) revert InsufficientFunds(); + if (upkeep.lastKeeper == from) revert KeepersMustTakeTurns(); + } + + /** + * @dev ensures the upkeep is not cancelled and the caller is the upkeep admin + */ + function requireAdminAndNotCancelled(Upkeep memory upkeep) internal view { + if (msg.sender != upkeep.admin) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + } + + /** + * @dev generates a PerformParams struct for use in _performUpkeepWithParams() + */ + function _generatePerformParams( + address from, + uint256 id, + bytes memory performData, + bool isExecution + ) internal view returns (PerformParams memory) { + uint256 gasLimit = s_upkeep[id].executeGas; + (uint256 fastGasWei, uint256 linkEth) = _getFeedData(); + uint96 maxLinkPayment = _calculatePaymentAmount(gasLimit, fastGasWei, linkEth, isExecution); + + return + PerformParams({ + from: from, + id: id, + performData: performData, + maxLinkPayment: maxLinkPayment, + gasLimit: gasLimit, + fastGasWei: fastGasWei, + linkEth: linkEth + }); + } +} diff --git a/contracts/src/v0.8/automation/v1_3/KeeperRegistryLogic1_3.sol b/contracts/src/v0.8/automation/v1_3/KeeperRegistryLogic1_3.sol new file mode 100644 index 00000000..5a7c8dd6 --- /dev/null +++ b/contracts/src/v0.8/automation/v1_3/KeeperRegistryLogic1_3.sol @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import "@openzeppelin/contracts/utils/Address.sol"; +import "./KeeperRegistryBase1_3.sol"; +import "../interfaces/MigratableKeeperRegistryInterface.sol"; +import "../interfaces/UpkeepTranscoderInterface.sol"; + +/** + * @notice Logic contract, works in tandem with KeeperRegistry as a proxy + */ +contract KeeperRegistryLogic1_3 is KeeperRegistryBase1_3 { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + + /** + * @param paymentModel one of Default, Arbitrum, Optimism + * @param registryGasOverhead the gas overhead used by registry in performUpkeep + * @param link address of the PLI Token + * @param linkEthFeed address of the PLI/ETH price feed + * @param fastGasFeed address of the Fast Gas price feed + */ + constructor( + PaymentModel paymentModel, + uint256 registryGasOverhead, + address link, + address linkEthFeed, + address fastGasFeed + ) KeeperRegistryBase1_3(paymentModel, registryGasOverhead, link, linkEthFeed, fastGasFeed) {} + + function checkUpkeep( + uint256 id, + address from + ) + external + cannotExecute + returns ( + bytes memory performData, + uint256 maxLinkPayment, + uint256 gasLimit, + uint256 adjustedGasWei, + uint256 linkEth + ) + { + Upkeep memory upkeep = s_upkeep[id]; + + bytes memory callData = abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[id]); + (bool success, bytes memory result) = upkeep.target.call{gas: s_storage.checkGasLimit}(callData); + + if (!success) revert TargetCheckReverted(result); + + (success, performData) = abi.decode(result, (bool, bytes)); + if (!success) revert UpkeepNotNeeded(); + + PerformParams memory params = _generatePerformParams(from, id, performData, false); + _prePerformUpkeep(upkeep, params.from, params.maxLinkPayment); + + return ( + performData, + params.maxLinkPayment, + params.gasLimit, + // adjustedGasWei equals fastGasWei multiplies gasCeilingMultiplier in non-execution cases + params.fastGasWei * s_storage.gasCeilingMultiplier, + params.linkEth + ); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawOwnerFunds() external onlyOwner { + uint96 amount = s_ownerLinkBalance; + + s_expectedLinkBalance = s_expectedLinkBalance - amount; + s_ownerLinkBalance = 0; + + emit OwnerFundsWithdrawn(amount); + PLI.transfer(msg.sender, amount); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function recoverFunds() external onlyOwner { + uint256 total = PLI.balanceOf(address(this)); + PLI.transfer(msg.sender, total - s_expectedLinkBalance); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setKeepers(address[] calldata keepers, address[] calldata payees) external onlyOwner { + if (keepers.length != payees.length || keepers.length < 2) revert ParameterLengthError(); + for (uint256 i = 0; i < s_keeperList.length; i++) { + address keeper = s_keeperList[i]; + s_keeperInfo[keeper].active = false; + } + for (uint256 i = 0; i < keepers.length; i++) { + address keeper = keepers[i]; + KeeperInfo storage s_keeper = s_keeperInfo[keeper]; + address oldPayee = s_keeper.payee; + address newPayee = payees[i]; + if ( + (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS) + ) revert InvalidPayee(); + if (s_keeper.active) revert DuplicateEntry(); + s_keeper.active = true; + if (newPayee != IGNORE_ADDRESS) { + s_keeper.payee = newPayee; + } + } + s_keeperList = keepers; + emit KeepersUpdated(keepers, payees); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function unpause() external onlyOwner { + _unpause(); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner { + s_peerRegistryMigrationPermission[peer] = permission; + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData + ) external returns (uint256 id) { + if (msg.sender != owner() && msg.sender != s_registrar) revert OnlyCallableByOwnerOrRegistrar(); + + id = uint256(keccak256(abi.encodePacked(blockhash(block.number - 1), address(this), s_storage.nonce))); + _createUpkeep(id, target, gasLimit, admin, 0, checkData, false); + s_storage.nonce++; + emit UpkeepRegistered(id, gasLimit, admin); + return id; + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function cancelUpkeep(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + bool canceled = upkeep.maxValidBlocknumber != UINT32_MAX; + bool isOwner = msg.sender == owner(); + + if (canceled && !(isOwner && upkeep.maxValidBlocknumber > block.number)) revert CannotCancel(); + if (!isOwner && msg.sender != upkeep.admin) revert OnlyCallableByOwnerOrAdmin(); + + uint256 height = block.number; + if (!isOwner) { + height = height + CANCELLATION_DELAY; + } + s_upkeep[id].maxValidBlocknumber = uint32(height); + s_upkeepIDs.remove(id); + + // charge the cancellation fee if the minUpkeepSpend is not met + uint96 minUpkeepSpend = s_storage.minUpkeepSpend; + uint96 cancellationFee = 0; + // cancellationFee is supposed to be min(max(minUpkeepSpend - amountSpent,0), amountLeft) + if (upkeep.amountSpent < minUpkeepSpend) { + cancellationFee = minUpkeepSpend - upkeep.amountSpent; + if (cancellationFee > upkeep.balance) { + cancellationFee = upkeep.balance; + } + } + s_upkeep[id].balance = upkeep.balance - cancellationFee; + s_ownerLinkBalance = s_ownerLinkBalance + cancellationFee; + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function addFunds(uint256 id, uint96 amount) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + + s_upkeep[id].balance = upkeep.balance + amount; + s_expectedLinkBalance = s_expectedLinkBalance + amount; + PLI.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawFunds(uint256 id, address to) external { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.admin != msg.sender) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber > block.number) revert UpkeepNotCanceled(); + + uint96 amountToWithdraw = s_upkeep[id].balance; + s_expectedLinkBalance = s_expectedLinkBalance - amountToWithdraw; + s_upkeep[id].balance = 0; + emit FundsWithdrawn(id, amountToWithdraw, to); + + PLI.transfer(to, amountToWithdraw); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external { + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + if (upkeep.admin != msg.sender) revert OnlyCallableByAdmin(); + + s_upkeep[id].executeGas = gasLimit; + + emit UpkeepGasLimitSet(id, gasLimit); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawPayment(address from, address to) external { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + KeeperInfo memory keeper = s_keeperInfo[from]; + if (keeper.payee != msg.sender) revert OnlyCallableByPayee(); + + s_keeperInfo[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance - keeper.balance; + emit PaymentWithdrawn(from, keeper.balance, to, msg.sender); + + PLI.transfer(to, keeper.balance); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function transferPayeeship(address keeper, address proposed) external { + if (s_keeperInfo[keeper].payee != msg.sender) revert OnlyCallableByPayee(); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedPayee[keeper] != proposed) { + s_proposedPayee[keeper] = proposed; + emit PayeeshipTransferRequested(keeper, msg.sender, proposed); + } + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function acceptPayeeship(address keeper) external { + if (s_proposedPayee[keeper] != msg.sender) revert OnlyCallableByProposedPayee(); + address past = s_keeperInfo[keeper].payee; + s_keeperInfo[keeper].payee = msg.sender; + s_proposedPayee[keeper] = ZERO_ADDRESS; + + emit PayeeshipTransferred(keeper, past, msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function transferUpkeepAdmin(uint256 id, address proposed) external { + Upkeep memory upkeep = s_upkeep[id]; + requireAdminAndNotCancelled(upkeep); + if (proposed == msg.sender) revert ValueNotChanged(); + if (proposed == ZERO_ADDRESS) revert InvalidRecipient(); + + if (s_proposedAdmin[id] != proposed) { + s_proposedAdmin[id] = proposed; + emit UpkeepAdminTransferRequested(id, msg.sender, proposed); + } + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function acceptUpkeepAdmin(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + if (s_proposedAdmin[id] != msg.sender) revert OnlyCallableByProposedAdmin(); + address past = upkeep.admin; + s_upkeep[id].admin = msg.sender; + s_proposedAdmin[id] = ZERO_ADDRESS; + + emit UpkeepAdminTransferred(id, past, msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external { + if ( + s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING && + s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + if (s_transcoder == ZERO_ADDRESS) revert TranscoderNotSet(); + if (ids.length == 0) revert ArrayHasNoEntries(); + uint256 id; + Upkeep memory upkeep; + uint256 totalBalanceRemaining; + bytes[] memory checkDatas = new bytes[](ids.length); + Upkeep[] memory upkeeps = new Upkeep[](ids.length); + for (uint256 idx = 0; idx < ids.length; idx++) { + id = ids[idx]; + upkeep = s_upkeep[id]; + requireAdminAndNotCancelled(upkeep); + upkeeps[idx] = upkeep; + checkDatas[idx] = s_checkData[id]; + totalBalanceRemaining = totalBalanceRemaining + upkeep.balance; + delete s_upkeep[id]; + delete s_checkData[id]; + // nullify existing proposed admin change if an upkeep is being migrated + delete s_proposedAdmin[id]; + s_upkeepIDs.remove(id); + emit UpkeepMigrated(id, upkeep.balance, destination); + } + s_expectedLinkBalance = s_expectedLinkBalance - totalBalanceRemaining; + bytes memory encodedUpkeeps = abi.encode(ids, upkeeps, checkDatas); + MigratableKeeperRegistryInterface(destination).receiveUpkeeps( + UpkeepTranscoderInterface(s_transcoder).transcodeUpkeeps( + UPKEEP_TRANSCODER_VERSION_BASE, + MigratableKeeperRegistryInterface(destination).upkeepTranscoderVersion(), + encodedUpkeeps + ) + ); + PLI.transfer(destination, totalBalanceRemaining); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external { + if ( + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING && + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + (uint256[] memory ids, Upkeep[] memory upkeeps, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], Upkeep[], bytes[]) + ); + for (uint256 idx = 0; idx < ids.length; idx++) { + _createUpkeep( + ids[idx], + upkeeps[idx].target, + upkeeps[idx].executeGas, + upkeeps[idx].admin, + upkeeps[idx].balance, + checkDatas[idx], + upkeeps[idx].paused + ); + emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender); + } + } + + /** + * @notice creates a new upkeep with the given fields + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param paused if this upkeep is paused + */ + function _createUpkeep( + uint256 id, + address target, + uint32 gasLimit, + address admin, + uint96 balance, + bytes memory checkData, + bool paused + ) internal whenNotPaused { + if (!target.isContract()) revert NotAContract(); + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + s_upkeep[id] = Upkeep({ + target: target, + executeGas: gasLimit, + balance: balance, + admin: admin, + maxValidBlocknumber: UINT32_MAX, + lastKeeper: ZERO_ADDRESS, + amountSpent: 0, + paused: paused + }); + s_expectedLinkBalance = s_expectedLinkBalance + balance; + s_checkData[id] = checkData; + s_upkeepIDs.add(id); + } +} diff --git a/contracts/src/v0.8/automation/v2_0/KeeperRegistrar2_0.sol b/contracts/src/v0.8/automation/v2_0/KeeperRegistrar2_0.sol new file mode 100644 index 00000000..82c68f1b --- /dev/null +++ b/contracts/src/v0.8/automation/v2_0/KeeperRegistrar2_0.sol @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "../../shared/interfaces/LinkTokenInterface.sol"; +import "../interfaces/v2_0/AutomationRegistryInterface2_0.sol"; +import "../../interfaces/TypeAndVersionInterface.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import "../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract KeeperRegistrar2_0 is TypeAndVersionInterface, ConfirmedOwner, IERC677Receiver { + /** + * DISABLED: No auto approvals, all new upkeeps should be approved manually. + * ENABLED_SENDER_ALLOWLIST: Auto approvals for allowed senders subject to max allowed. Manual for rest. + * ENABLED_ALL: Auto approvals for all new upkeeps subject to max allowed. + */ + enum AutoApproveType { + DISABLED, + ENABLED_SENDER_ALLOWLIST, + ENABLED_ALL + } + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + mapping(bytes32 => PendingRequest) private s_pendingRequests; + + LinkTokenInterface public immutable PLI; + + /** + * @notice versions: + * - KeeperRegistrar 2.0.0: Remove source from register + * Breaks our example of "Register an Upkeep using your own deployed contract" + * - KeeperRegistrar 1.1.0: Add functionality for sender allowlist in auto approve + * : Remove rate limit and add max allowed for auto approve + * - KeeperRegistrar 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistrar 2.0.0"; + + struct RegistrarConfig { + AutoApproveType autoApproveConfigType; + uint32 autoApproveMaxAllowed; + uint32 approvedCount; + AutomationRegistryBaseInterface keeperRegistry; + uint96 minPLIJuels; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + struct RegistrationParams { + string name; + bytes encryptedEmail; + address upkeepContract; + uint32 gasLimit; + address adminAddress; + bytes checkData; + bytes offchainConfig; + uint96 amount; + } + + RegistrarConfig private s_config; + // Only applicable if s_config.configType is ENABLED_SENDER_ALLOWLIST + mapping(address => bool) private s_autoApproveAllowedSenders; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes checkData, + uint96 amount + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event AutoApproveAllowedSenderSet(address indexed senderAddress, bool allowed); + + event ConfigChanged( + AutoApproveType autoApproveConfigType, + uint32 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ); + + error InvalidAdminAddress(); + error RequestNotFound(); + error HashMismatch(); + error OnlyAdminOrOwner(); + error InsufficientPayment(); + error RegistrationRequestFailed(); + error OnlyLink(); + error AmountMismatch(); + error SenderMismatch(); + error FunctionNotPermitted(); + error LinkTransferFailed(address to); + error InvalidDataLength(); + + /* + * @param PLIAddress Address of Link token + * @param autoApproveConfigType setting for auto-approve registrations + * @param autoApproveMaxAllowed max number of registrations that can be auto approved + * @param keeperRegistry keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + constructor( + address PLIAddress, + AutoApproveType autoApproveConfigType, + uint16 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(PLIAddress); + setRegistrationConfig(autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels); + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on PLI contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param amount quantity of PLI upkeep is funded with (specified in Juels) + * @param offchainConfig offchainConfig for upkeep in bytes + * @param sender address of the sender making the request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + bytes calldata offchainConfig, + uint96 amount, + address sender + ) external onlyPLI { + _register( + RegistrationParams({ + name: name, + encryptedEmail: encryptedEmail, + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + checkData: checkData, + offchainConfig: offchainConfig, + amount: amount + }), + sender + ); + } + + /** + * @notice Allows external users to register upkeeps; assumes amount is approved for transfer by the contract + * @param requestParams struct of all possible registration parameters + */ + function registerUpkeep(RegistrationParams calldata requestParams) external returns (uint256) { + if (requestParams.amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + + PLI.transferFrom(msg.sender, address(this), requestParams.amount); + + return _register(requestParams, msg.sender); + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + bytes calldata checkData, + bytes calldata offchainConfig, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + if (request.admin == address(0)) { + revert RequestNotFound(); + } + bytes32 expectedHash = keccak256(abi.encode(upkeepContract, gasLimit, adminAddress, checkData, offchainConfig)); + if (hash != expectedHash) { + revert HashMismatch(); + } + delete s_pendingRequests[hash]; + _approve( + RegistrationParams({ + name: name, + encryptedEmail: "", + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + checkData: checkData, + offchainConfig: offchainConfig, + amount: request.balance + }), + expectedHash + ); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the request.admin + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + if (!(msg.sender == request.admin || msg.sender == owner())) { + revert OnlyAdminOrOwner(); + } + if (request.admin == address(0)) { + revert RequestNotFound(); + } + delete s_pendingRequests[hash]; + bool success = PLI.transfer(request.admin, request.balance); + if (!success) { + revert LinkTransferFailed(request.admin); + } + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set if registration requests should be sent directly to the Keeper Registry + * @param autoApproveConfigType setting for auto-approve registrations + * note: autoApproveAllowedSenders list persists across config changes irrespective of type + * @param autoApproveMaxAllowed max number of registrations that can be auto approved + * @param keeperRegistry new keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + function setRegistrationConfig( + AutoApproveType autoApproveConfigType, + uint16 autoApproveMaxAllowed, + address keeperRegistry, + uint96 minPLIJuels + ) public onlyOwner { + uint32 approvedCount = s_config.approvedCount; + s_config = RegistrarConfig({ + autoApproveConfigType: autoApproveConfigType, + autoApproveMaxAllowed: autoApproveMaxAllowed, + approvedCount: approvedCount, + minPLIJuels: minPLIJuels, + keeperRegistry: AutomationRegistryBaseInterface(keeperRegistry) + }); + + emit ConfigChanged(autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels); + } + + /** + * @notice owner calls this function to set allowlist status for senderAddress + * @param senderAddress senderAddress to set the allowlist status for + * @param allowed true if senderAddress needs to be added to allowlist, false if needs to be removed + */ + function setAutoApproveAllowedSender(address senderAddress, bool allowed) external onlyOwner { + s_autoApproveAllowedSenders[senderAddress] = allowed; + + emit AutoApproveAllowedSenderSet(senderAddress, allowed); + } + + /** + * @notice read the allowlist status of senderAddress + * @param senderAddress address to read the allowlist status for + */ + function getAutoApproveAllowedSender(address senderAddress) external view returns (bool) { + return s_autoApproveAllowedSenders[senderAddress]; + } + + /** + * @notice read the current registration configuration + */ + function getRegistrationConfig() + external + view + returns ( + AutoApproveType autoApproveConfigType, + uint32 autoApproveMaxAllowed, + uint32 approvedCount, + address keeperRegistry, + uint256 minPLIJuels + ) + { + RegistrarConfig memory config = s_config; + return ( + config.autoApproveConfigType, + config.autoApproveMaxAllowed, + config.approvedCount, + address(config.keeperRegistry), + config.minPLIJuels + ); + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param sender Address of the sender transfering PLI + * @param amount Amount of PLI sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes calldata data + ) + external + override + onlyPLI + permittedFunctionsForPLI(data) + isActualAmount(amount, data) + isActualSender(sender, data) + { + if (data.length < 292) revert InvalidDataLength(); + if (amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + (bool success, ) = address(this).delegatecall(data); + // calls register + if (!success) { + revert RegistrationRequestFailed(); + } + } + + //PRIVATE + + /** + * @dev verify registration request and emit RegistrationRequested event + */ + function _register(RegistrationParams memory params, address sender) private returns (uint256) { + if (params.adminAddress == address(0)) { + revert InvalidAdminAddress(); + } + bytes32 hash = keccak256( + abi.encode(params.upkeepContract, params.gasLimit, params.adminAddress, params.checkData, params.offchainConfig) + ); + + emit RegistrationRequested( + hash, + params.name, + params.encryptedEmail, + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.checkData, + params.amount + ); + + uint256 upkeepId; + RegistrarConfig memory config = s_config; + if (_shouldAutoApprove(config, sender)) { + s_config.approvedCount = config.approvedCount + 1; + + upkeepId = _approve(params, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance + params.amount; + s_pendingRequests[hash] = PendingRequest({admin: params.adminAddress, balance: newBalance}); + } + + return upkeepId; + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function _approve(RegistrationParams memory params, bytes32 hash) private returns (uint256) { + AutomationRegistryBaseInterface keeperRegistry = s_config.keeperRegistry; + + // register upkeep + uint256 upkeepId = keeperRegistry.registerUpkeep( + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.checkData, + params.offchainConfig + ); + // fund upkeep + bool success = PLI.transferAndCall(address(keeperRegistry), params.amount, abi.encode(upkeepId)); + if (!success) { + revert LinkTransferFailed(address(keeperRegistry)); + } + + emit RegistrationApproved(hash, params.name, upkeepId); + + return upkeepId; + } + + /** + * @dev verify sender allowlist if needed and check max limit + */ + function _shouldAutoApprove(RegistrarConfig memory config, address sender) private view returns (bool) { + if (config.autoApproveConfigType == AutoApproveType.DISABLED) { + return false; + } + if ( + config.autoApproveConfigType == AutoApproveType.ENABLED_SENDER_ALLOWLIST && (!s_autoApproveAllowedSenders[sender]) + ) { + return false; + } + if (config.approvedCount < config.autoApproveMaxAllowed) { + return true; + } + return false; + } + + //MODIFIERS + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + if (msg.sender != address(PLI)) { + revert OnlyLink(); + } + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) // First 32 bytes contain length of data + } + if (funcSelector != REGISTER_REQUEST_SELECTOR) { + revert FunctionNotPermitted(); + } + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes calldata data) { + // decode register function arguments to get actual amount + (, , , , , , , uint96 amount, ) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, bytes, bytes, uint96, address) + ); + if (expected != amount) { + revert AmountMismatch(); + } + _; + } + + /** + * @dev Reverts if the actual sender address does not match the expected sender address + * @param expected address that should match the actual sender address + * @param data bytes + */ + modifier isActualSender(address expected, bytes calldata data) { + // decode register function arguments to get actual sender + (, , , , , , , , address sender) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, bytes, bytes, uint96, address) + ); + if (expected != sender) { + revert SenderMismatch(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/v2_0/KeeperRegistry2_0.sol b/contracts/src/v0.8/automation/v2_0/KeeperRegistry2_0.sol new file mode 100644 index 00000000..7f9485c5 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_0/KeeperRegistry2_0.sol @@ -0,0 +1,1012 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/proxy/Proxy.sol"; +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import "./KeeperRegistryBase2_0.sol"; +import {AutomationRegistryExecutableInterface, UpkeepInfo, State, OnchainConfig, UpkeepFailureReason} from "../interfaces/v2_0/AutomationRegistryInterface2_0.sol"; +import "../interfaces/MigratableKeeperRegistryInterface.sol"; +import "../interfaces/MigratableKeeperRegistryInterfaceV2.sol"; +import "../../shared/interfaces/IERC677Receiver.sol"; +import {OCR2Abstract} from "../../shared/ocr2/OCR2Abstract.sol"; + +/** + _. _|_ _ ._ _ _._|_o _ ._ o _ _ ._ _| _ __|_o._ +(_||_||_(_)| | |(_| |_|(_)| | |_> (_)|_|| (_|(/__> |_|| |\/ + / + */ +/** + * @notice Registry for adding work for Plugin Keepers to perform on client + * contracts. Clients must support the Upkeep interface. + */ +contract KeeperRegistry2_0 is + KeeperRegistryBase2_0, + Proxy, + OCR2Abstract, + AutomationRegistryExecutableInterface, + MigratableKeeperRegistryInterface, + MigratableKeeperRegistryInterfaceV2, + IERC677Receiver +{ + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + + // Immutable address of logic contract where some functionality is delegated to + address private immutable i_keeperRegistryLogic; + + /** + * @notice versions: + * - KeeperRegistry 2.0.2: pass revert bytes as performData when target contract reverts + * : fixes issue with arbitrum block number + * : does an early return in case of stale report instead of revert + * - KeeperRegistry 2.0.1: implements workaround for buggy migrate function in 1.X + * - KeeperRegistry 2.0.0: implement OCR interface + * - KeeperRegistry 1.3.0: split contract into Proxy and Logic + * : account for Arbitrum and Optimism L1 gas fee + * : allow users to configure upkeeps + * - KeeperRegistry 1.2.0: allow funding within performUpkeep + * : allow configurable registry maxPerformGas + * : add function to let admin change upkeep gas limit + * : add minUpkeepSpend requirement + * : upgrade to solidity v0.8 + * - KeeperRegistry 1.1.0: added flatFeeMicroLink + * - KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistry 2.0.2"; + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + + UpkeepFormat public constant override upkeepTranscoderVersion = UPKEEP_TRANSCODER_VERSION_BASE; + + /** + * @inheritdoc MigratableKeeperRegistryInterfaceV2 + */ + uint8 public constant override upkeepVersion = UPKEEP_VERSION_BASE; + + /** + * @param keeperRegistryLogic address of the logic contract + */ + constructor( + KeeperRegistryBase2_0 keeperRegistryLogic + ) + KeeperRegistryBase2_0( + keeperRegistryLogic.getMode(), + keeperRegistryLogic.getLinkAddress(), + keeperRegistryLogic.getLinkNativeFeedAddress(), + keeperRegistryLogic.getFastGasFeedAddress() + ) + { + i_keeperRegistryLogic = address(keeperRegistryLogic); + } + + //////// + // ACTIONS + //////// + + /** + * @dev This struct is used to maintain run time information about an upkeep in transmit function + * @member upkeep the upkeep struct + * @member earlyChecksPassed whether the upkeep passed early checks before perform + * @member paymentParams the paymentParams for this upkeep + * @member performSuccess whether the perform was successful + * @member gasUsed gasUsed by this upkeep in perform + */ + struct UpkeepTransmitInfo { + Upkeep upkeep; + bool earlyChecksPassed; + uint96 maxLinkPayment; + bool performSuccess; + uint256 gasUsed; + uint256 gasOverhead; + } + + /** + * @inheritdoc OCR2Abstract + */ + function transmit( + bytes32[3] calldata reportContext, + bytes calldata rawReport, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) external override { + uint256 gasOverhead = gasleft(); + HotVars memory hotVars = s_hotVars; + + if (hotVars.paused) revert RegistryPaused(); + if (!s_transmitters[msg.sender].active) revert OnlyActiveTransmitters(); + + Report memory report = _decodeReport(rawReport); + UpkeepTransmitInfo[] memory upkeepTransmitInfo = new UpkeepTransmitInfo[](report.upkeepIds.length); + uint16 numUpkeepsPassedChecks; + + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + upkeepTransmitInfo[i].upkeep = s_upkeep[report.upkeepIds[i]]; + + upkeepTransmitInfo[i].maxLinkPayment = _getMaxLinkPayment( + hotVars, + upkeepTransmitInfo[i].upkeep.executeGas, + uint32(report.wrappedPerformDatas[i].performData.length), + report.fastGasWei, + report.linkNative, + true + ); + upkeepTransmitInfo[i].earlyChecksPassed = _prePerformChecks( + report.upkeepIds[i], + report.wrappedPerformDatas[i], + upkeepTransmitInfo[i].upkeep, + upkeepTransmitInfo[i].maxLinkPayment + ); + + if (upkeepTransmitInfo[i].earlyChecksPassed) { + numUpkeepsPassedChecks += 1; + } + } + // No upkeeps to be performed in this report + if (numUpkeepsPassedChecks == 0) { + return; + } + + // Verify signatures + if (s_latestConfigDigest != reportContext[0]) revert ConfigDigestMismatch(); + if (rs.length != hotVars.f + 1 || rs.length != ss.length) revert IncorrectNumberOfSignatures(); + _verifyReportSignature(reportContext, rawReport, rs, ss, rawVs); + + // Actually perform upkeeps + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + if (upkeepTransmitInfo[i].earlyChecksPassed) { + // Check if this upkeep was already performed in this report + if (s_upkeep[report.upkeepIds[i]].lastPerformBlockNumber == uint32(_blockNum())) { + revert InvalidReport(); + } + + // Actually perform the target upkeep + (upkeepTransmitInfo[i].performSuccess, upkeepTransmitInfo[i].gasUsed) = _performUpkeep( + upkeepTransmitInfo[i].upkeep, + report.wrappedPerformDatas[i].performData + ); + + // Deduct that gasUsed by upkeep from our running counter + gasOverhead -= upkeepTransmitInfo[i].gasUsed; + + // Store last perform block number for upkeep + s_upkeep[report.upkeepIds[i]].lastPerformBlockNumber = uint32(_blockNum()); + } + } + + // This is the overall gas overhead that will be split across performed upkeeps + // Take upper bound of 16 gas per callData bytes, which is approximated to be reportLength + // Rest of msg.data is accounted for in accounting overheads + gasOverhead = + (gasOverhead - gasleft() + 16 * rawReport.length) + + ACCOUNTING_FIXED_GAS_OVERHEAD + + (ACCOUNTING_PER_SIGNER_GAS_OVERHEAD * (hotVars.f + 1)); + gasOverhead = gasOverhead / numUpkeepsPassedChecks + ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD; + + uint96 totalReimbursement; + uint96 totalPremium; + { + uint96 reimbursement; + uint96 premium; + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + if (upkeepTransmitInfo[i].earlyChecksPassed) { + upkeepTransmitInfo[i].gasOverhead = _getCappedGasOverhead( + gasOverhead, + uint32(report.wrappedPerformDatas[i].performData.length), + hotVars.f + ); + + (reimbursement, premium) = _postPerformPayment( + hotVars, + report.upkeepIds[i], + upkeepTransmitInfo[i], + report.fastGasWei, + report.linkNative, + numUpkeepsPassedChecks + ); + totalPremium += premium; + totalReimbursement += reimbursement; + + emit UpkeepPerformed( + report.upkeepIds[i], + upkeepTransmitInfo[i].performSuccess, + report.wrappedPerformDatas[i].checkBlockNumber, + upkeepTransmitInfo[i].gasUsed, + upkeepTransmitInfo[i].gasOverhead, + reimbursement + premium + ); + } + } + } + // record payments + s_transmitters[msg.sender].balance += totalReimbursement; + s_hotVars.totalPremium += totalPremium; + + uint40 epochAndRound = uint40(uint256(reportContext[1])); + uint32 epoch = uint32(epochAndRound >> 8); + if (epoch > hotVars.latestEpoch) { + s_hotVars.latestEpoch = epoch; + } + } + + /** + * @notice simulates the upkeep with the perform data returned from + * checkUpkeep + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + */ + function simulatePerformUpkeep( + uint256 id, + bytes calldata performData + ) external cannotExecute returns (bool success, uint256 gasUsed) { + if (s_hotVars.paused) revert RegistryPaused(); + + Upkeep memory upkeep = s_upkeep[id]; + return _performUpkeep(upkeep, performData); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override { + if (msg.sender != address(i_link)) revert OnlyCallableByPLIToken(); + if (data.length != 32) revert InvalidDataLength(); + uint256 id = abi.decode(data, (uint256)); + if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + + s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount); + s_expectedLinkBalance = s_expectedLinkBalance + amount; + + emit FundsAdded(id, sender, uint96(amount)); + } + + //////// + // SETTERS + //////// + + /** + * @inheritdoc OCR2Abstract + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external override onlyOwner { + if (signers.length > MAX_NUM_ORACLES) revert TooManyOracles(); + if (f == 0) revert IncorrectNumberOfFaultyOracles(); + if (signers.length != transmitters.length || signers.length <= 3 * f) revert IncorrectNumberOfSigners(); + + // move all pooled payments out of the pool to each transmitter's balance + uint96 totalPremium = s_hotVars.totalPremium; + uint96 oldLength = uint96(s_transmittersList.length); + for (uint256 i = 0; i < oldLength; i++) { + _updateTransmitterBalanceFromPool(s_transmittersList[i], totalPremium, oldLength); + } + + // remove any old signer/transmitter addresses + address signerAddress; + address transmitterAddress; + for (uint256 i = 0; i < oldLength; i++) { + signerAddress = s_signersList[i]; + transmitterAddress = s_transmittersList[i]; + delete s_signers[signerAddress]; + // Do not delete the whole transmitter struct as it has balance information stored + s_transmitters[transmitterAddress].active = false; + } + delete s_signersList; + delete s_transmittersList; + + // add new signer/transmitter addresses + { + Transmitter memory transmitter; + address temp; + for (uint256 i = 0; i < signers.length; i++) { + if (s_signers[signers[i]].active) revert RepeatedSigner(); + s_signers[signers[i]] = Signer({active: true, index: uint8(i)}); + + temp = transmitters[i]; + transmitter = s_transmitters[temp]; + if (transmitter.active) revert RepeatedTransmitter(); + transmitter.active = true; + transmitter.index = uint8(i); + transmitter.lastCollected = totalPremium; + s_transmitters[temp] = transmitter; + } + } + s_signersList = signers; + s_transmittersList = transmitters; + + // Set the onchain config + OnchainConfig memory onchainConfigStruct = abi.decode(onchainConfig, (OnchainConfig)); + if (onchainConfigStruct.maxPerformGas < s_storage.maxPerformGas) revert GasLimitCanOnlyIncrease(); + if (onchainConfigStruct.maxCheckDataSize < s_storage.maxCheckDataSize) revert MaxCheckDataSizeCanOnlyIncrease(); + if (onchainConfigStruct.maxPerformDataSize < s_storage.maxPerformDataSize) + revert MaxPerformDataSizeCanOnlyIncrease(); + + s_hotVars = HotVars({ + f: f, + paymentPremiumPPB: onchainConfigStruct.paymentPremiumPPB, + flatFeeMicroLink: onchainConfigStruct.flatFeeMicroLink, + stalenessSeconds: onchainConfigStruct.stalenessSeconds, + gasCeilingMultiplier: onchainConfigStruct.gasCeilingMultiplier, + paused: false, + reentrancyGuard: false, + totalPremium: totalPremium, + latestEpoch: 0 + }); + + s_storage = Storage({ + checkGasLimit: onchainConfigStruct.checkGasLimit, + minUpkeepSpend: onchainConfigStruct.minUpkeepSpend, + maxPerformGas: onchainConfigStruct.maxPerformGas, + transcoder: onchainConfigStruct.transcoder, + registrar: onchainConfigStruct.registrar, + maxCheckDataSize: onchainConfigStruct.maxCheckDataSize, + maxPerformDataSize: onchainConfigStruct.maxPerformDataSize, + nonce: s_storage.nonce, + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + ownerLinkBalance: s_storage.ownerLinkBalance + }); + s_fallbackGasPrice = onchainConfigStruct.fallbackGasPrice; + s_fallbackLinkPrice = onchainConfigStruct.fallbackLinkPrice; + + uint32 previousConfigBlockNumber = s_storage.latestConfigBlockNumber; + s_storage.latestConfigBlockNumber = uint32(_blockNum()); + s_storage.configCount += 1; + + s_latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_storage.configCount, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + + emit ConfigSet( + previousConfigBlockNumber, + s_latestConfigDigest, + s_storage.configCount, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + } + + //////// + // GETTERS + //////// + + /** + * @notice read all of the details about an upkeep + */ + function getUpkeep(uint256 id) external view override returns (UpkeepInfo memory upkeepInfo) { + Upkeep memory reg = s_upkeep[id]; + upkeepInfo = UpkeepInfo({ + target: reg.target, + executeGas: reg.executeGas, + checkData: s_checkData[id], + balance: reg.balance, + admin: s_upkeepAdmin[id], + maxValidBlocknumber: reg.maxValidBlocknumber, + lastPerformBlockNumber: reg.lastPerformBlockNumber, + amountSpent: reg.amountSpent, + paused: reg.paused, + offchainConfig: s_upkeepOffchainConfig[id] + }); + return upkeepInfo; + } + + /** + * @notice retrieve active upkeep IDs. Active upkeep is defined as an upkeep which is not paused and not canceled. + * @param startIndex starting index in list + * @param maxCount max count to retrieve (0 = unlimited) + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * should consider keeping the blockheight constant to ensure a holistic picture of the contract state + */ + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view override returns (uint256[] memory) { + uint256 maxIdx = s_upkeepIDs.length(); + if (startIndex >= maxIdx) revert IndexOutOfRange(); + if (maxCount == 0) { + maxCount = maxIdx - startIndex; + } + uint256[] memory ids = new uint256[](maxCount); + for (uint256 idx = 0; idx < maxCount; idx++) { + ids[idx] = s_upkeepIDs.at(startIndex + idx); + } + return ids; + } + + /** + * @notice read the current info about any transmitter address + */ + function getTransmitterInfo( + address query + ) external view override returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee) { + Transmitter memory transmitter = s_transmitters[query]; + uint96 totalDifference = s_hotVars.totalPremium - transmitter.lastCollected; + uint96 pooledShare = totalDifference / uint96(s_transmittersList.length); + + return ( + transmitter.active, + transmitter.index, + (transmitter.balance + pooledShare), + transmitter.lastCollected, + s_transmitterPayees[query] + ); + } + + /** + * @notice read the current info about any signer address + */ + function getSignerInfo(address query) external view returns (bool active, uint8 index) { + Signer memory signer = s_signers[query]; + return (signer.active, signer.index); + } + + /** + * @notice read the current state of the registry + */ + function getState() + external + view + override + returns ( + State memory state, + OnchainConfig memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ) + { + state = State({ + nonce: s_storage.nonce, + ownerLinkBalance: s_storage.ownerLinkBalance, + expectedLinkBalance: s_expectedLinkBalance, + totalPremium: s_hotVars.totalPremium, + numUpkeeps: s_upkeepIDs.length(), + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + latestConfigDigest: s_latestConfigDigest, + latestEpoch: s_hotVars.latestEpoch, + paused: s_hotVars.paused + }); + + config = OnchainConfig({ + paymentPremiumPPB: s_hotVars.paymentPremiumPPB, + flatFeeMicroLink: s_hotVars.flatFeeMicroLink, + checkGasLimit: s_storage.checkGasLimit, + stalenessSeconds: s_hotVars.stalenessSeconds, + gasCeilingMultiplier: s_hotVars.gasCeilingMultiplier, + minUpkeepSpend: s_storage.minUpkeepSpend, + maxPerformGas: s_storage.maxPerformGas, + maxCheckDataSize: s_storage.maxCheckDataSize, + maxPerformDataSize: s_storage.maxPerformDataSize, + fallbackGasPrice: s_fallbackGasPrice, + fallbackLinkPrice: s_fallbackLinkPrice, + transcoder: s_storage.transcoder, + registrar: s_storage.registrar + }); + + return (state, config, s_signersList, s_transmittersList, s_hotVars.f); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getMinBalanceForUpkeep(uint256 id) external view returns (uint96 minBalance) { + return getMaxPaymentForGas(s_upkeep[id].executeGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + * @param gasLimit the gas to calculate payment for + */ + function getMaxPaymentForGas(uint32 gasLimit) public view returns (uint96 maxPayment) { + HotVars memory hotVars = s_hotVars; + (uint256 fastGasWei, uint256 linkNative) = _getFeedData(hotVars); + return _getMaxLinkPayment(hotVars, gasLimit, s_storage.maxPerformDataSize, fastGasWei, linkNative, false); + } + + /** + * @notice retrieves the migration permission for a peer registry + */ + function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) { + return s_peerRegistryMigrationPermission[peer]; + } + + /** + * @notice retrieves the address of the logic address + */ + function getKeeperRegistryLogicAddress() external view returns (address) { + return i_keeperRegistryLogic; + } + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_storage.configCount, s_storage.latestConfigBlockNumber, s_latestConfigDigest); + } + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDigestAndEpoch() + external + view + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (false, s_latestConfigDigest, s_hotVars.latestEpoch); + } + + //////// + // INTERNAL FUNCTIONS + //////// + + /** + * @dev This is the address to which proxy functions are delegated to + */ + function _implementation() internal view override returns (address) { + return i_keeperRegistryLogic; + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= PERFORM_GAS_CUSHION and check for underflow + if lt(g, PERFORM_GAS_CUSHION) { + revert(0, 0) + } + g := sub(g, PERFORM_GAS_CUSHION) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + /** + * @dev _decodeReport decodes a serialized report into a Report struct + */ + function _decodeReport(bytes memory rawReport) internal pure returns (Report memory) { + ( + uint256 fastGasWei, + uint256 linkNative, + uint256[] memory upkeepIds, + PerformDataWrapper[] memory wrappedPerformDatas + ) = abi.decode(rawReport, (uint256, uint256, uint256[], PerformDataWrapper[])); + if (upkeepIds.length != wrappedPerformDatas.length) revert InvalidReport(); + + return + Report({ + fastGasWei: fastGasWei, + linkNative: linkNative, + upkeepIds: upkeepIds, + wrappedPerformDatas: wrappedPerformDatas + }); + } + + /** + * @dev Does some early sanity checks before actually performing an upkeep + */ + function _prePerformChecks( + uint256 upkeepId, + PerformDataWrapper memory wrappedPerformData, + Upkeep memory upkeep, + uint96 maxLinkPayment + ) internal returns (bool) { + if (wrappedPerformData.checkBlockNumber < upkeep.lastPerformBlockNumber) { + // Can happen when another report performed this upkeep after this report was generated + emit StaleUpkeepReport(upkeepId); + return false; + } + + if (_blockHash(wrappedPerformData.checkBlockNumber) != wrappedPerformData.checkBlockhash) { + // Can happen when the block on which report was generated got reorged + // We will also revert if checkBlockNumber is older than 256 blocks. In this case we rely on a new transmission + // with the latest checkBlockNumber + emit ReorgedUpkeepReport(upkeepId); + return false; + } + + if (upkeep.maxValidBlocknumber <= _blockNum()) { + // Can happen when an upkeep got cancelled after report was generated. + // However we have a CANCELLATION_DELAY of 50 blocks so shouldn't happen in practice + emit CancelledUpkeepReport(upkeepId); + return false; + } + + if (upkeep.balance < maxLinkPayment) { + // Can happen due to flucutations in gas / link prices + emit InsufficientFundsUpkeepReport(upkeepId); + return false; + } + + return true; + } + + /** + * @dev Verify signatures attached to report + */ + function _verifyReportSignature( + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) internal view { + bytes32 h = keccak256(abi.encode(keccak256(report), reportContext)); + // i-th byte counts number of sigs made by i-th signer + uint256 signedCount = 0; + + Signer memory signer; + address signerAddress; + for (uint256 i = 0; i < rs.length; i++) { + signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + signer = s_signers[signerAddress]; + if (!signer.active) revert OnlyActiveSigners(); + unchecked { + signedCount += 1 << (8 * signer.index); + } + } + + if (signedCount & ORACLE_MASK != signedCount) revert DuplicateSigners(); + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * transmitter and the exact gas required by the Upkeep + */ + function _performUpkeep( + Upkeep memory upkeep, + bytes memory performData + ) private nonReentrant returns (bool success, uint256 gasUsed) { + gasUsed = gasleft(); + bytes memory callData = abi.encodeWithSelector(PERFORM_SELECTOR, performData); + success = _callWithExactGas(upkeep.executeGas, upkeep.target, callData); + gasUsed = gasUsed - gasleft(); + + return (success, gasUsed); + } + + /** + * @dev does postPerform payment processing for an upkeep. Deducts upkeep's balance and increases + * amount spent. + */ + function _postPerformPayment( + HotVars memory hotVars, + uint256 upkeepId, + UpkeepTransmitInfo memory upkeepTransmitInfo, + uint256 fastGasWei, + uint256 linkNative, + uint16 numBatchedUpkeeps + ) internal returns (uint96 gasReimbursement, uint96 premium) { + (gasReimbursement, premium) = _calculatePaymentAmount( + hotVars, + upkeepTransmitInfo.gasUsed, + upkeepTransmitInfo.gasOverhead, + fastGasWei, + linkNative, + numBatchedUpkeeps, + true + ); + + uint96 payment = gasReimbursement + premium; + s_upkeep[upkeepId].balance -= payment; + s_upkeep[upkeepId].amountSpent += payment; + + return (gasReimbursement, premium); + } + + /** + * @dev Caps the gas overhead by the constant overhead used within initial payment checks in order to + * prevent a revert in payment processing. + */ + function _getCappedGasOverhead( + uint256 calculatedGasOverhead, + uint32 performDataLength, + uint8 f + ) private pure returns (uint256 cappedGasOverhead) { + cappedGasOverhead = _getMaxGasOverhead(performDataLength, f); + if (calculatedGasOverhead < cappedGasOverhead) { + return calculatedGasOverhead; + } + return cappedGasOverhead; + } + + //////// + // PROXY FUNCTIONS - EXECUTED THROUGH FALLBACK + //////// + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData, + bytes calldata offchainConfig + ) external override returns (uint256 id) { + // Executed through logic contract + _fallback(); + } + + /** + * @notice simulated by keepers via eth_call to see if the upkeep needs to be + * performed. It returns the success status / failure reason along with the perform data payload. + * @param id identifier of the upkeep to check + */ + function checkUpkeep( + uint256 id + ) + external + override + cannotExecute + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 fastGasWei, + uint256 linkNative + ) + { + // Executed through logic contract + _fallback(); + } + + /** + * @notice prevent an upkeep from being performed in the future + * @param id upkeep to be canceled + */ + function cancelUpkeep(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice pause an upkeep + * @param id upkeep to be paused + */ + function pauseUpkeep(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice unpause an upkeep + * @param id upkeep to be resumed + */ + function unpauseUpkeep(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice update the check data of an upkeep + * @param id the id of the upkeep whose check data needs to be updated + * @param newCheckData the new check data + */ + function updateCheckData(uint256 id, bytes calldata newCheckData) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice adds PLI funding for an upkeep by transferring from the sender's + * PLI balance + * @param id upkeep to fund + * @param amount number of PLI to transfer + */ + function addFunds(uint256 id, uint96 amount) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice removes funding from a canceled upkeep + * @param id upkeep to withdraw funds from + * @param to destination address for sending remaining funds + */ + function withdrawFunds(uint256 id, address to) external { + // Executed through logic contract + // Restricted to nonRentrant in logic contract as this is not callable from a user's performUpkeep + _fallback(); + } + + /** + * @notice allows the admin of an upkeep to modify gas limit + * @param id upkeep to be change the gas limit for + * @param gasLimit new gas limit for the upkeep + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice allows the admin of an upkeep to modify the offchain config + * @param id upkeep to be change the gas limit for + * @param config instructs oracles of offchain config preferences + */ + function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice withdraws a transmitter's payment, callable only by the transmitter's payee + * @param from transmitter address + * @param to address to send the payment to + */ + function withdrawPayment(address from, address to) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice proposes the safe transfer of a transmitter's payee to another address + * @param transmitter address of the transmitter to transfer payee role + * @param proposed address to nominate for next payeeship + */ + function transferPayeeship(address transmitter, address proposed) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice accepts the safe transfer of payee role for a transmitter + * @param transmitter address to accept the payee role for + */ + function acceptPayeeship(address transmitter) external { + // Executed through logic contract + _fallback(); + } + + /** + * @notice proposes the safe transfer of an upkeep's admin role to another address + * @param id the upkeep id to transfer admin + * @param proposed address to nominate for the new upkeep admin + */ + function transferUpkeepAdmin(uint256 id, address proposed) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @notice accepts the safe transfer of admin role for an upkeep + * @param id the upkeep id + */ + function acceptUpkeepAdmin(uint256 id) external override { + // Executed through logic contract + _fallback(); + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function migrateUpkeeps( + uint256[] calldata ids, + address destination + ) external override(MigratableKeeperRegistryInterface, MigratableKeeperRegistryInterfaceV2) { + // Executed through logic contract + _fallback(); + } + + /** + * @inheritdoc MigratableKeeperRegistryInterface + */ + function receiveUpkeeps( + bytes calldata encodedUpkeeps + ) external override(MigratableKeeperRegistryInterface, MigratableKeeperRegistryInterfaceV2) { + // Executed through logic contract + _fallback(); + } + + //////// + // OWNER RESTRICTED FUNCTIONS + //////// + + /** + * @notice recovers PLI funds improperly transferred to the registry + * @dev In principle this function’s execution cost could exceed block + * gas limit. However, in our anticipated deployment, the number of upkeeps and + * transmitters will be low enough to avoid this problem. + */ + function recoverFunds() external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } + + /** + * @notice withdraws PLI funds collected through cancellation fees + */ + function withdrawOwnerFunds() external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } + + /** + * @notice update the list of payees corresponding to the transmitters + * @param payees addresses corresponding to transmitters who are allowed to + * move payments which have been accrued + */ + function setPayees(address[] calldata payees) external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } + + /** + * @notice signals to transmitters that they should not perform upkeeps until the + * contract has been unpaused + */ + function pause() external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } + + /** + * @notice signals to transmitters that they can perform upkeeps once again after + * having been paused + */ + function unpause() external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } + + /** + * @notice sets the peer registry migration permission + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external { + // Executed through logic contract + // Restricted to onlyOwner in logic contract + _fallback(); + } +} diff --git a/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol b/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol new file mode 100644 index 00000000..4b037a94 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_0/KeeperRegistryBase2_0.sol @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import "../../vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; +import {ArbSys} from "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import "../ExecutionPrevention.sol"; +import "../../shared/access/ConfirmedOwner.sol"; +import "../../shared/interfaces/AggregatorV3Interface.sol"; +import "../../shared/interfaces/LinkTokenInterface.sol"; +import "../interfaces/KeeperCompatibleInterface.sol"; +import "../interfaces/UpkeepTranscoderInterface.sol"; + +/** + * @notice relevant state of an upkeep which is used in transmit function + * @member executeGas the gas limit of upkeep execution + * @member maxValidBlocknumber until which block this upkeep is valid + * @member paused if this upkeep has been paused + * @member target the contract which needs to be serviced + * @member amountSpent the amount this upkeep has spent + * @member balance the balance of this upkeep + * @member lastPerformBlockNumber the last block number when this upkeep was performed + */ +struct Upkeep { + uint32 executeGas; + uint32 maxValidBlocknumber; + bool paused; + address target; + // 3 bytes left in 1st EVM word - not written to in transmit + uint96 amountSpent; + uint96 balance; + uint32 lastPerformBlockNumber; + // 4 bytes left in 2nd EVM word - written in transmit path +} + +/** + * @notice Base Keeper Registry contract, contains shared logic between + * KeeperRegistry and KeeperRegistryLogic + */ +abstract contract KeeperRegistryBase2_0 is ConfirmedOwner, ExecutionPrevention { + address internal constant ZERO_ADDRESS = address(0); + address internal constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 internal constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 internal constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + uint256 internal constant PERFORM_GAS_MIN = 2_300; + uint256 internal constant CANCELLATION_DELAY = 50; + uint256 internal constant PERFORM_GAS_CUSHION = 5_000; + uint256 internal constant PPB_BASE = 1_000_000_000; + uint32 internal constant UINT32_MAX = type(uint32).max; + uint96 internal constant PLI_TOTAL_SUPPLY = 1e27; + // The first byte of the mask can be 0, because we only ever have 31 oracles + uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101; + /** + * @dev UPKEEP_TRANSCODER_VERSION_BASE is temporary necessity for backwards compatibility with + * MigratableKeeperRegistryInterfaceV1 - it should be removed in future versions in favor of + * UPKEEP_VERSION_BASE and MigratableKeeperRegistryInterfaceV2 + */ + UpkeepFormat internal constant UPKEEP_TRANSCODER_VERSION_BASE = UpkeepFormat.V1; + uint8 internal constant UPKEEP_VERSION_BASE = uint8(UpkeepFormat.V3); + // L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + + uint256 internal constant REGISTRY_GAS_OVERHEAD = 70_000; // Used only in maxPayment estimation, not in actual payment + uint256 internal constant REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD = 20; // Used only in maxPayment estimation, not in actual payment. Value scales with performData length. + uint256 internal constant REGISTRY_PER_SIGNER_GAS_OVERHEAD = 7_500; // Used only in maxPayment estimation, not in actual payment. Value scales with f. + + uint256 internal constant ACCOUNTING_FIXED_GAS_OVERHEAD = 26_900; // Used in actual payment. Fixed overhead per tx + uint256 internal constant ACCOUNTING_PER_SIGNER_GAS_OVERHEAD = 1_100; // Used in actual payment. overhead per signer + uint256 internal constant ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD = 5_800; // Used in actual payment. overhead per upkeep performed + + OVM_GasPriceOracle internal constant OPTIMISM_ORACLE = OVM_GasPriceOracle(0x420000000000000000000000000000000000000F); + ArbGasInfo internal constant ARB_NITRO_ORACLE = ArbGasInfo(0x000000000000000000000000000000000000006C); + ArbSys internal constant ARB_SYS = ArbSys(0x0000000000000000000000000000000000000064); + + LinkTokenInterface internal immutable i_link; + AggregatorV3Interface internal immutable i_linkNativeFeed; + AggregatorV3Interface internal immutable i_fastGasFeed; + Mode internal immutable i_mode; + + // @dev - The storage is gas optimised for one and only function - transmit. All the storage accessed in transmit + // is stored compactly. Rest of the storage layout is not of much concern as transmit is the only hot path + // Upkeep storage + EnumerableSet.UintSet internal s_upkeepIDs; + mapping(uint256 => Upkeep) internal s_upkeep; // accessed during transmit + mapping(uint256 => address) internal s_upkeepAdmin; + mapping(uint256 => address) internal s_proposedAdmin; + mapping(uint256 => bytes) internal s_checkData; + // Registry config and state + mapping(address => Transmitter) internal s_transmitters; + mapping(address => Signer) internal s_signers; + address[] internal s_signersList; // s_signersList contains the signing address of each oracle + address[] internal s_transmittersList; // s_transmittersList contains the transmission address of each oracle + mapping(address => address) internal s_transmitterPayees; // s_payees contains the mapping from transmitter to payee. + mapping(address => address) internal s_proposedPayee; // proposed payee for a transmitter + bytes32 internal s_latestConfigDigest; // Read on transmit path in case of signature verification + HotVars internal s_hotVars; // Mixture of config and state, used in transmit + Storage internal s_storage; // Mixture of config and state, not used in transmit + uint256 internal s_fallbackGasPrice; + uint256 internal s_fallbackLinkPrice; + uint256 internal s_expectedLinkBalance; // Used in case of erroneous PLI transfers to contract + mapping(address => MigrationPermission) internal s_peerRegistryMigrationPermission; // Permissions for migration to and fro + mapping(uint256 => bytes) internal s_upkeepOffchainConfig; // general configuration preferences + + error ArrayHasNoEntries(); + error CannotCancel(); + error DuplicateEntry(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IndexOutOfRange(); + error InsufficientFunds(); + error InvalidDataLength(); + error InvalidPayee(); + error InvalidRecipient(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveTransmitters(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyPausedUpkeep(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error TargetCheckReverted(bytes reason); + error TranscoderNotSet(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + error ConfigDigestMismatch(); + error IncorrectNumberOfSignatures(); + error OnlyActiveSigners(); + error DuplicateSigners(); + error TooManyOracles(); + error IncorrectNumberOfSigners(); + error IncorrectNumberOfFaultyOracles(); + error RepeatedSigner(); + error RepeatedTransmitter(); + error OnchainConfigNonEmpty(); + error CheckDataExceedsLimit(); + error MaxCheckDataSizeCanOnlyIncrease(); + error MaxPerformDataSizeCanOnlyIncrease(); + error InvalidReport(); + error RegistryPaused(); + error ReentrantCall(); + error UpkeepAlreadyExists(); + + enum MigrationPermission { + NONE, + OUTGOING, + INCOMING, + BIDIRECTIONAL + } + + enum Mode { + DEFAULT, + ARBITRUM, + OPTIMISM + } + + // Config + State storage struct which is on hot transmit path + struct HotVars { + uint8 f; // maximum number of faulty oracles + uint32 paymentPremiumPPB; // premium percentage charged to user over tx cost + uint32 flatFeeMicroLink; // flat fee charged to user for every perform + uint24 stalenessSeconds; // Staleness tolerance for feeds + uint16 gasCeilingMultiplier; // multiplier on top of fast gas feed for upper bound + bool paused; // pause switch for all upkeeps in the registry + bool reentrancyGuard; // guard against reentrancy + uint96 totalPremium; // total historical payment to oracles for premium + uint32 latestEpoch; // latest epoch for which a report was transmitted + // 1 EVM word full + } + + // Config + State storage struct which is not on hot transmit path + struct Storage { + uint96 minUpkeepSpend; // Minimum amount an upkeep must spend + address transcoder; // Address of transcoder contract used in migrations + // 1 EVM word full + uint96 ownerLinkBalance; // Balance of owner, accumulates minUpkeepSpend in case it is not spent + address registrar; // Address of registrar used to register upkeeps + // 2 EVM word full + uint32 checkGasLimit; // Gas limit allowed in checkUpkeep + uint32 maxPerformGas; // Max gas an upkeep can use on this registry + uint32 nonce; // Nonce for each upkeep created + uint32 configCount; // incremented each time a new config is posted, The count + // is incorporated into the config digest to prevent replay attacks. + uint32 latestConfigBlockNumber; // makes it easier for offchain systems to extract config from logs + uint32 maxCheckDataSize; // max length of checkData bytes + uint32 maxPerformDataSize; // max length of performData bytes + // 4 bytes to 3rd EVM word + } + + struct Transmitter { + bool active; + uint8 index; // Index of oracle in s_signersList/s_transmittersList + uint96 balance; + uint96 lastCollected; + } + + struct Signer { + bool active; + // Index of oracle in s_signersList/s_transmittersList + uint8 index; + } + + // This struct is used to pack information about the user's check function + struct PerformDataWrapper { + uint32 checkBlockNumber; // Block number-1 on which check was simulated + bytes32 checkBlockhash; // blockhash of checkBlockNumber. Used for reorg protection + bytes performData; // actual performData that user's check returned + } + + // Report transmitted by OCR to transmit function + struct Report { + uint256 fastGasWei; + uint256 linkNative; + uint256[] upkeepIds; // Ids of upkeeps + PerformDataWrapper[] wrappedPerformDatas; // Contains checkInfo and performData for the corresponding upkeeps + } + + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event OwnerFundsWithdrawn(uint96 amount); + event PayeesUpdated(address[] transmitters, address[] payees); + event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataUpdated(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + uint32 checkBlockNumber, + uint256 gasUsed, + uint256 gasOverhead, + uint96 totalPayment + ); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepUnpaused(uint256 indexed id); + event UpkeepRegistered(uint256 indexed id, uint32 executeGas, address admin); + event StaleUpkeepReport(uint256 indexed id); + event ReorgedUpkeepReport(uint256 indexed id); + event InsufficientFundsUpkeepReport(uint256 indexed id); + event CancelledUpkeepReport(uint256 indexed id); + event Paused(address account); + event Unpaused(address account); + + /** + * @param mode the contract mode of default, Arbitrum, or Optimism + * @param link address of the PLI Token + * @param linkNativeFeed address of the PLI/Native price feed + * @param fastGasFeed address of the Fast Gas price feed + */ + constructor(Mode mode, address link, address linkNativeFeed, address fastGasFeed) ConfirmedOwner(msg.sender) { + i_mode = mode; + i_link = LinkTokenInterface(link); + i_linkNativeFeed = AggregatorV3Interface(linkNativeFeed); + i_fastGasFeed = AggregatorV3Interface(fastGasFeed); + } + + //////// + // GETTERS + //////// + + function getMode() external view returns (Mode) { + return i_mode; + } + + function getLinkAddress() external view returns (address) { + return address(i_link); + } + + function getLinkNativeFeedAddress() external view returns (address) { + return address(i_linkNativeFeed); + } + + function getFastGasFeedAddress() external view returns (address) { + return address(i_fastGasFeed); + } + + //////// + // INTERNAL + //////// + + /** + * @dev retrieves feed data for fast gas/native and link/native prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function _getFeedData(HotVars memory hotVars) internal view returns (uint256 gasWei, uint256 linkNative) { + uint32 stalenessSeconds = hotVars.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = i_linkNativeFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + linkNative = s_fallbackLinkPrice; + } else { + linkNative = uint256(feedValue); + } + return (gasWei, linkNative); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + * @param gasLimit the amount of gas used + * @param gasOverhead the amount of gas overhead + * @param fastGasWei the fast gas price + * @param linkNative the exchange ratio between PLI and Native token + * @param numBatchedUpkeeps the number of upkeeps in this batch. Used to divide the L1 cost + * @param isExecution if this is triggered by a perform upkeep function + */ + function _calculatePaymentAmount( + HotVars memory hotVars, + uint256 gasLimit, + uint256 gasOverhead, + uint256 fastGasWei, + uint256 linkNative, + uint16 numBatchedUpkeeps, + bool isExecution + ) internal view returns (uint96, uint96) { + uint256 gasWei = fastGasWei * hotVars.gasCeilingMultiplier; + // in case it's actual execution use actual gas price, capped by fastGasWei * gasCeilingMultiplier + if (isExecution && tx.gasprice < gasWei) { + gasWei = tx.gasprice; + } + + uint256 l1CostWei = 0; + if (i_mode == Mode.OPTIMISM) { + bytes memory txCallData = new bytes(0); + if (isExecution) { + txCallData = bytes.concat(msg.data, L1_FEE_DATA_PADDING); + } else { + // @dev fee is 4 per 0 byte, 16 per non-zero byte. Worst case we can have + // s_storage.maxPerformDataSize non zero-bytes. Instead of setting bytes to non-zero + // we initialize 'new bytes' of length 4*maxPerformDataSize to cover for zero bytes. + txCallData = new bytes(4 * s_storage.maxPerformDataSize); + } + l1CostWei = OPTIMISM_ORACLE.getL1Fee(txCallData); + } else if (i_mode == Mode.ARBITRUM) { + l1CostWei = ARB_NITRO_ORACLE.getCurrentTxL1GasFees(); + } + // if it's not performing upkeeps, use gas ceiling multiplier to estimate the upper bound + if (!isExecution) { + l1CostWei = hotVars.gasCeilingMultiplier * l1CostWei; + } + // Divide l1CostWei among all batched upkeeps. Spare change from division is not charged + l1CostWei = l1CostWei / numBatchedUpkeeps; + + uint256 gasPayment = ((gasWei * (gasLimit + gasOverhead) + l1CostWei) * 1e18) / linkNative; + uint256 premium = (((gasWei * gasLimit) + l1CostWei) * 1e9 * hotVars.paymentPremiumPPB) / + linkNative + + uint256(hotVars.flatFeeMicroLink) * + 1e12; + // PLI_TOTAL_SUPPLY < UINT96_MAX + if (gasPayment + premium > PLI_TOTAL_SUPPLY) revert PaymentGreaterThanAllPLI(); + return (uint96(gasPayment), uint96(premium)); + } + + /** + * @dev generates the max link payment for an upkeep + */ + function _getMaxLinkPayment( + HotVars memory hotVars, + uint32 executeGas, + uint32 performDataLength, + uint256 fastGasWei, + uint256 linkNative, + bool isExecution // Whether this is an actual perform execution or just a simulation + ) internal view returns (uint96) { + uint256 gasOverhead = _getMaxGasOverhead(performDataLength, hotVars.f); + (uint96 reimbursement, uint96 premium) = _calculatePaymentAmount( + hotVars, + executeGas, + gasOverhead, + fastGasWei, + linkNative, + 1, // Consider only 1 upkeep in batch to get maxPayment + isExecution + ); + + return reimbursement + premium; + } + + /** + * @dev returns the max gas overhead that can be charged for an upkeep + */ + function _getMaxGasOverhead(uint32 performDataLength, uint8 f) internal pure returns (uint256) { + // performData causes additional overhead in report length and memory operations + return + REGISTRY_GAS_OVERHEAD + + (REGISTRY_PER_SIGNER_GAS_OVERHEAD * (f + 1)) + + (REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD * performDataLength); + } + + /** + * @dev move a transmitter's balance from total pool to withdrawable balance + */ + function _updateTransmitterBalanceFromPool( + address transmitterAddress, + uint96 totalPremium, + uint96 payeeCount + ) internal returns (uint96) { + Transmitter memory transmitter = s_transmitters[transmitterAddress]; + + uint96 uncollected = totalPremium - transmitter.lastCollected; + uint96 due = uncollected / payeeCount; + transmitter.balance += due; + transmitter.lastCollected = totalPremium; + + // Transfer spare change to owner + s_storage.ownerLinkBalance += (uncollected - due * payeeCount); + s_transmitters[transmitterAddress] = transmitter; + + return transmitter.balance; + } + + /** + * @notice returns the current block number in a chain agnostic manner + */ + function _blockNum() internal view returns (uint256) { + if (i_mode == Mode.ARBITRUM) { + return ARB_SYS.arbBlockNumber(); + } else { + return block.number; + } + } + + /** + * @notice returns the blockhash of the provided block number in a chain agnostic manner + * @param n the blocknumber to retrieve the blockhash for + * @return blockhash the blockhash of block number n, or 0 if n is out queryable of range + */ + function _blockHash(uint256 n) internal view returns (bytes32) { + if (i_mode == Mode.ARBITRUM) { + uint256 blockNum = ARB_SYS.arbBlockNumber(); + if (n >= blockNum || blockNum - n > 256) { + return ""; + } + return ARB_SYS.arbBlockHash(n); + } else { + return blockhash(n); + } + } + + /** + * @notice replicates Open Zeppelin's ReentrancyGuard but optimized to fit our storage + */ + modifier nonReentrant() { + if (s_hotVars.reentrancyGuard) revert ReentrantCall(); + s_hotVars.reentrancyGuard = true; + _; + s_hotVars.reentrancyGuard = false; + } +} diff --git a/contracts/src/v0.8/automation/v2_0/KeeperRegistryLogic2_0.sol b/contracts/src/v0.8/automation/v2_0/KeeperRegistryLogic2_0.sol new file mode 100644 index 00000000..4dd0c7ce --- /dev/null +++ b/contracts/src/v0.8/automation/v2_0/KeeperRegistryLogic2_0.sol @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import "./KeeperRegistryBase2_0.sol"; +import {UpkeepFailureReason} from "../interfaces/v2_0/AutomationRegistryInterface2_0.sol"; +import "../interfaces/MigratableKeeperRegistryInterfaceV2.sol"; +import "../interfaces/UpkeepTranscoderInterfaceV2.sol"; + +/** + * @notice Logic contract, works in tandem with KeeperRegistry as a proxy + */ +contract KeeperRegistryLogic2_0 is KeeperRegistryBase2_0 { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + + /** + * @param mode one of Default, Arbitrum, Optimism + * @param link address of the PLI Token + * @param linkNativeFeed address of the PLI/Native price feed + * @param fastGasFeed address of the Fast Gas price feed + */ + constructor( + Mode mode, + address link, + address linkNativeFeed, + address fastGasFeed + ) KeeperRegistryBase2_0(mode, link, linkNativeFeed, fastGasFeed) {} + + function checkUpkeep( + uint256 id + ) + external + cannotExecute + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 fastGasWei, + uint256 linkNative + ) + { + HotVars memory hotVars = s_hotVars; + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) + return (false, bytes(""), UpkeepFailureReason.UPKEEP_CANCELLED, gasUsed, 0, 0); + if (upkeep.paused) return (false, bytes(""), UpkeepFailureReason.UPKEEP_PAUSED, gasUsed, 0, 0); + + (fastGasWei, linkNative) = _getFeedData(hotVars); + uint96 maxLinkPayment = _getMaxLinkPayment( + hotVars, + upkeep.executeGas, + s_storage.maxPerformDataSize, + fastGasWei, + linkNative, + false + ); + if (upkeep.balance < maxLinkPayment) + return (false, bytes(""), UpkeepFailureReason.INSUFFICIENT_BALANCE, gasUsed, fastGasWei, linkNative); + + gasUsed = gasleft(); + bytes memory callData = abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[id]); + (bool success, bytes memory result) = upkeep.target.call{gas: s_storage.checkGasLimit}(callData); + gasUsed = gasUsed - gasleft(); + + if (!success) { + upkeepFailureReason = UpkeepFailureReason.TARGET_CHECK_REVERTED; + } else { + (upkeepNeeded, result) = abi.decode(result, (bool, bytes)); + if (!upkeepNeeded) + return (false, bytes(""), UpkeepFailureReason.UPKEEP_NOT_NEEDED, gasUsed, fastGasWei, linkNative); + if (result.length > s_storage.maxPerformDataSize) + return (false, bytes(""), UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, gasUsed, fastGasWei, linkNative); + } + + performData = abi.encode( + PerformDataWrapper({ + checkBlockNumber: uint32(_blockNum() - 1), + checkBlockhash: _blockHash(_blockNum() - 1), + performData: result + }) + ); + + return (success, performData, upkeepFailureReason, gasUsed, fastGasWei, linkNative); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawOwnerFunds() external onlyOwner { + uint96 amount = s_storage.ownerLinkBalance; + + s_expectedLinkBalance = s_expectedLinkBalance - amount; + s_storage.ownerLinkBalance = 0; + + emit OwnerFundsWithdrawn(amount); + i_link.transfer(msg.sender, amount); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function recoverFunds() external onlyOwner { + uint256 total = i_link.balanceOf(address(this)); + i_link.transfer(msg.sender, total - s_expectedLinkBalance); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setPayees(address[] calldata payees) external onlyOwner { + if (s_transmittersList.length != payees.length) revert ParameterLengthError(); + for (uint256 i = 0; i < s_transmittersList.length; i++) { + address transmitter = s_transmittersList[i]; + address oldPayee = s_transmitterPayees[transmitter]; + address newPayee = payees[i]; + if ( + (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS) + ) revert InvalidPayee(); + if (newPayee != IGNORE_ADDRESS) { + s_transmitterPayees[transmitter] = newPayee; + } + } + emit PayeesUpdated(s_transmittersList, payees); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function pause() external onlyOwner { + s_hotVars.paused = true; + + emit Paused(msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function unpause() external onlyOwner { + s_hotVars.paused = false; + + emit Unpaused(msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner { + s_peerRegistryMigrationPermission[peer] = permission; + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData, + bytes calldata offchainConfig + ) external returns (uint256 id) { + if (msg.sender != owner() && msg.sender != s_storage.registrar) revert OnlyCallableByOwnerOrRegistrar(); + + id = uint256(keccak256(abi.encode(_blockHash(_blockNum() - 1), address(this), s_storage.nonce))); + _createUpkeep(id, target, gasLimit, admin, 0, checkData, false); + s_storage.nonce++; + s_upkeepOffchainConfig[id] = offchainConfig; + emit UpkeepRegistered(id, gasLimit, admin); + return id; + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function cancelUpkeep(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + bool canceled = upkeep.maxValidBlocknumber != UINT32_MAX; + bool isOwner = msg.sender == owner(); + + if (canceled && !(isOwner && upkeep.maxValidBlocknumber > _blockNum())) revert CannotCancel(); + if (!isOwner && msg.sender != s_upkeepAdmin[id]) revert OnlyCallableByOwnerOrAdmin(); + + uint256 height = _blockNum(); + if (!isOwner) { + height = height + CANCELLATION_DELAY; + } + s_upkeep[id].maxValidBlocknumber = uint32(height); + s_upkeepIDs.remove(id); + + // charge the cancellation fee if the minUpkeepSpend is not met + uint96 minUpkeepSpend = s_storage.minUpkeepSpend; + uint96 cancellationFee = 0; + // cancellationFee is supposed to be min(max(minUpkeepSpend - amountSpent,0), amountLeft) + if (upkeep.amountSpent < minUpkeepSpend) { + cancellationFee = minUpkeepSpend - upkeep.amountSpent; + if (cancellationFee > upkeep.balance) { + cancellationFee = upkeep.balance; + } + } + s_upkeep[id].balance = upkeep.balance - cancellationFee; + s_storage.ownerLinkBalance = s_storage.ownerLinkBalance + cancellationFee; + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function addFunds(uint256 id, uint96 amount) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + + s_upkeep[id].balance = upkeep.balance + amount; + s_expectedLinkBalance = s_expectedLinkBalance + amount; + i_link.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawFunds(uint256 id, address to) external nonReentrant { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + Upkeep memory upkeep = s_upkeep[id]; + if (s_upkeepAdmin[id] != msg.sender) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber > _blockNum()) revert UpkeepNotCanceled(); + + uint96 amountToWithdraw = s_upkeep[id].balance; + s_expectedLinkBalance = s_expectedLinkBalance - amountToWithdraw; + s_upkeep[id].balance = 0; + i_link.transfer(to, amountToWithdraw); + emit FundsWithdrawn(id, amountToWithdraw, to); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external { + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + _requireAdminAndNotCancelled(id); + s_upkeep[id].executeGas = gasLimit; + + emit UpkeepGasLimitSet(id, gasLimit); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external { + _requireAdminAndNotCancelled(id); + + s_upkeepOffchainConfig[id] = config; + + emit UpkeepOffchainConfigSet(id, config); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function withdrawPayment(address from, address to) external { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + if (s_transmitterPayees[from] != msg.sender) revert OnlyCallableByPayee(); + + uint96 balance = _updateTransmitterBalanceFromPool(from, s_hotVars.totalPremium, uint96(s_transmittersList.length)); + s_transmitters[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance - balance; + + i_link.transfer(to, balance); + + emit PaymentWithdrawn(from, balance, to, msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function transferPayeeship(address transmitter, address proposed) external { + if (s_transmitterPayees[transmitter] != msg.sender) revert OnlyCallableByPayee(); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedPayee[transmitter] != proposed) { + s_proposedPayee[transmitter] = proposed; + emit PayeeshipTransferRequested(transmitter, msg.sender, proposed); + } + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function acceptPayeeship(address transmitter) external { + if (s_proposedPayee[transmitter] != msg.sender) revert OnlyCallableByProposedPayee(); + address past = s_transmitterPayees[transmitter]; + s_transmitterPayees[transmitter] = msg.sender; + s_proposedPayee[transmitter] = ZERO_ADDRESS; + + emit PayeeshipTransferred(transmitter, past, msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function transferUpkeepAdmin(uint256 id, address proposed) external { + _requireAdminAndNotCancelled(id); + if (proposed == msg.sender) revert ValueNotChanged(); + if (proposed == ZERO_ADDRESS) revert InvalidRecipient(); + + if (s_proposedAdmin[id] != proposed) { + s_proposedAdmin[id] = proposed; + emit UpkeepAdminTransferRequested(id, msg.sender, proposed); + } + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function acceptUpkeepAdmin(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + if (s_proposedAdmin[id] != msg.sender) revert OnlyCallableByProposedAdmin(); + address past = s_upkeepAdmin[id]; + s_upkeepAdmin[id] = msg.sender; + s_proposedAdmin[id] = ZERO_ADDRESS; + + emit UpkeepAdminTransferred(id, past, msg.sender); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function pauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.paused) revert OnlyUnpausedUpkeep(); + s_upkeep[id].paused = true; + s_upkeepIDs.remove(id); + emit UpkeepPaused(id); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function unpauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (!upkeep.paused) revert OnlyPausedUpkeep(); + s_upkeep[id].paused = false; + s_upkeepIDs.add(id); + emit UpkeepUnpaused(id); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function updateCheckData(uint256 id, bytes calldata newCheckData) external { + _requireAdminAndNotCancelled(id); + if (newCheckData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + s_checkData[id] = newCheckData; + emit UpkeepCheckDataUpdated(id, newCheckData); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external { + if ( + s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING && + s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + if (s_storage.transcoder == ZERO_ADDRESS) revert TranscoderNotSet(); + if (ids.length == 0) revert ArrayHasNoEntries(); + uint256 id; + Upkeep memory upkeep; + uint256 totalBalanceRemaining; + bytes[] memory checkDatas = new bytes[](ids.length); + address[] memory admins = new address[](ids.length); + Upkeep[] memory upkeeps = new Upkeep[](ids.length); + for (uint256 idx = 0; idx < ids.length; idx++) { + id = ids[idx]; + upkeep = s_upkeep[id]; + _requireAdminAndNotCancelled(id); + upkeeps[idx] = upkeep; + checkDatas[idx] = s_checkData[id]; + admins[idx] = s_upkeepAdmin[id]; + totalBalanceRemaining = totalBalanceRemaining + upkeep.balance; + delete s_upkeep[id]; + delete s_checkData[id]; + // nullify existing proposed admin change if an upkeep is being migrated + delete s_proposedAdmin[id]; + s_upkeepIDs.remove(id); + emit UpkeepMigrated(id, upkeep.balance, destination); + } + s_expectedLinkBalance = s_expectedLinkBalance - totalBalanceRemaining; + bytes memory encodedUpkeeps = abi.encode(ids, upkeeps, checkDatas, admins); + MigratableKeeperRegistryInterfaceV2(destination).receiveUpkeeps( + UpkeepTranscoderInterfaceV2(s_storage.transcoder).transcodeUpkeeps( + UPKEEP_VERSION_BASE, + MigratableKeeperRegistryInterfaceV2(destination).upkeepVersion(), + encodedUpkeeps + ) + ); + i_link.transfer(destination, totalBalanceRemaining); + } + + /** + * @dev Called through KeeperRegistry main contract + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external { + if ( + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING && + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + (uint256[] memory ids, Upkeep[] memory upkeeps, bytes[] memory checkDatas, address[] memory upkeepAdmins) = abi + .decode(encodedUpkeeps, (uint256[], Upkeep[], bytes[], address[])); + for (uint256 idx = 0; idx < ids.length; idx++) { + _createUpkeep( + ids[idx], + upkeeps[idx].target, + upkeeps[idx].executeGas, + upkeepAdmins[idx], + upkeeps[idx].balance, + checkDatas[idx], + upkeeps[idx].paused + ); + emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender); + } + } + + /** + * @notice creates a new upkeep with the given fields + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data passed to the contract when checking for upkeep + * @param paused if this upkeep is paused + */ + function _createUpkeep( + uint256 id, + address target, + uint32 gasLimit, + address admin, + uint96 balance, + bytes memory checkData, + bool paused + ) internal { + if (s_hotVars.paused) revert RegistryPaused(); + if (!target.isContract()) revert NotAContract(); + if (checkData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + if (s_upkeep[id].target != address(0)) revert UpkeepAlreadyExists(); + s_upkeep[id] = Upkeep({ + target: target, + executeGas: gasLimit, + balance: balance, + maxValidBlocknumber: UINT32_MAX, + lastPerformBlockNumber: 0, + amountSpent: 0, + paused: paused + }); + s_upkeepAdmin[id] = admin; + s_expectedLinkBalance = s_expectedLinkBalance + balance; + s_checkData[id] = checkData; + s_upkeepIDs.add(id); + } + + /** + * @dev ensures the upkeep is not cancelled and the caller is the upkeep admin + */ + function _requireAdminAndNotCancelled(uint256 upkeepId) internal view { + if (msg.sender != s_upkeepAdmin[upkeepId]) revert OnlyCallableByAdmin(); + if (s_upkeep[upkeepId].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + } +} diff --git a/contracts/src/v0.8/automation/v2_0/UpkeepTranscoder3_0.sol b/contracts/src/v0.8/automation/v2_0/UpkeepTranscoder3_0.sol new file mode 100644 index 00000000..0a56f209 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_0/UpkeepTranscoder3_0.sol @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "../../automation/interfaces/UpkeepTranscoderInterface.sol"; +import "../../interfaces/TypeAndVersionInterface.sol"; +import {Upkeep as UpkeepV1} from "../../automation/v1_2/KeeperRegistry1_2.sol"; +import {Upkeep as UpkeepV2} from "../../automation/v1_3/KeeperRegistryBase1_3.sol"; +import {Upkeep as UpkeepV3} from "../../automation/v2_0/KeeperRegistryBase2_0.sol"; +import "../../automation/UpkeepFormat.sol"; + +/** + * @notice UpkeepTranscoder 3_0 allows converting upkeep data from previous keeper registry versions 1.2 and 1.3 to + * registry 2.0 + */ +contract UpkeepTranscoder3_0 is UpkeepTranscoderInterface, TypeAndVersionInterface { + error InvalidTranscoding(); + + /** + * @notice versions: + * - UpkeepTranscoder 3.0.0: version 3.0.0 works with registry 2.0; adds temporary workaround for UpkeepFormat enum bug + */ + string public constant override typeAndVersion = "UpkeepTranscoder 3.0.0"; + uint32 internal constant UINT32_MAX = type(uint32).max; + + /** + * @notice transcodeUpkeeps transforms upkeep data from the format expected by + * one registry to the format expected by another. It future-proofs migrations + * by allowing keepers team to customize migration paths and set sensible defaults + * when new fields are added + * @param fromVersion struct version the upkeep is migrating from + * @param encodedUpkeeps encoded upkeep data + * @dev this transcoder should ONLY be use for V1/V2 --> V3 migrations + * @dev this transcoder **ignores** the toVersion param, as it assumes all migrations are + * for the V3 version. Therefore, it is the responsibility of the deployer of this contract + * to ensure it is not used in any other migration paths. + */ + function transcodeUpkeeps( + UpkeepFormat fromVersion, + UpkeepFormat, + bytes calldata encodedUpkeeps + ) external view override returns (bytes memory) { + // this transcoder only handles upkeep V1/V2 to V3, all other formats are invalid. + if (fromVersion == UpkeepFormat.V1) { + (uint256[] memory ids, UpkeepV1[] memory upkeepsV1, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], UpkeepV1[], bytes[]) + ); + + if (ids.length != upkeepsV1.length || ids.length != checkDatas.length) { + revert InvalidTranscoding(); + } + + address[] memory admins = new address[](ids.length); + UpkeepV3[] memory newUpkeeps = new UpkeepV3[](ids.length); + UpkeepV1 memory upkeepV1; + for (uint256 idx = 0; idx < ids.length; idx++) { + upkeepV1 = upkeepsV1[idx]; + newUpkeeps[idx] = UpkeepV3({ + executeGas: upkeepV1.executeGas, + maxValidBlocknumber: UINT32_MAX, // maxValidBlocknumber is uint64 in V1, hence a new default value is provided + paused: false, // migrated upkeeps are not paused by default + target: upkeepV1.target, + amountSpent: upkeepV1.amountSpent, + balance: upkeepV1.balance, + lastPerformBlockNumber: 0 + }); + admins[idx] = upkeepV1.admin; + } + return abi.encode(ids, newUpkeeps, checkDatas, admins); + } + + if (fromVersion == UpkeepFormat.V2) { + (uint256[] memory ids, UpkeepV2[] memory upkeepsV2, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], UpkeepV2[], bytes[]) + ); + + if (ids.length != upkeepsV2.length || ids.length != checkDatas.length) { + revert InvalidTranscoding(); + } + + address[] memory admins = new address[](ids.length); + UpkeepV3[] memory newUpkeeps = new UpkeepV3[](ids.length); + UpkeepV2 memory upkeepV2; + for (uint256 idx = 0; idx < ids.length; idx++) { + upkeepV2 = upkeepsV2[idx]; + newUpkeeps[idx] = UpkeepV3({ + executeGas: upkeepV2.executeGas, + maxValidBlocknumber: upkeepV2.maxValidBlocknumber, + paused: upkeepV2.paused, + target: upkeepV2.target, + amountSpent: upkeepV2.amountSpent, + balance: upkeepV2.balance, + lastPerformBlockNumber: 0 + }); + admins[idx] = upkeepV2.admin; + } + return abi.encode(ids, newUpkeeps, checkDatas, admins); + } + + revert InvalidTranscoding(); + } +} diff --git a/contracts/src/v0.8/automation/v2_1/AutomationRegistrar2_1.sol b/contracts/src/v0.8/automation/v2_1/AutomationRegistrar2_1.sol new file mode 100644 index 00000000..66d0d3dc --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/AutomationRegistrar2_1.sol @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {IKeeperRegistryMaster} from "../interfaces/v2_1/IKeeperRegistryMaster.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @notice Contract to accept requests for upkeep registrations + * @dev There are 2 registration workflows in this contract + * Flow 1. auto approve OFF / manual registration - UI calls `register` function on this contract, this contract owner at a later time then manually + * calls `approve` to register upkeep and emit events to inform UI and others interested. + * Flow 2. auto approve ON / real time registration - UI calls `register` function as before, which calls the `registerUpkeep` function directly on + * keeper registry and then emits approved event to finish the flow automatically without manual intervention. + * The idea is to have same interface(functions,events) for UI or anyone using this contract irrespective of auto approve being enabled or not. + * they can just listen to `RegistrationRequested` & `RegistrationApproved` events and know the status on registrations. + */ +contract AutomationRegistrar2_1 is TypeAndVersionInterface, ConfirmedOwner, IERC677Receiver { + /** + * DISABLED: No auto approvals, all new upkeeps should be approved manually. + * ENABLED_SENDER_ALLOWLIST: Auto approvals for allowed senders subject to max allowed. Manual for rest. + * ENABLED_ALL: Auto approvals for all new upkeeps subject to max allowed. + */ + enum AutoApproveType { + DISABLED, + ENABLED_SENDER_ALLOWLIST, + ENABLED_ALL + } + + bytes4 private constant REGISTER_REQUEST_SELECTOR = this.register.selector; + + mapping(bytes32 => PendingRequest) private s_pendingRequests; + mapping(uint8 => TriggerRegistrationStorage) private s_triggerRegistrations; + + LinkTokenInterface public immutable PLI; + + /** + * @notice versions: + * - KeeperRegistrar 2.1.0: Update for compatability with registry 2.1.0 + * Add auto approval levels by type + * - KeeperRegistrar 2.0.0: Remove source from register + * Breaks our example of "Register an Upkeep using your own deployed contract" + * - KeeperRegistrar 1.1.0: Add functionality for sender allowlist in auto approve + * : Remove rate limit and add max allowed for auto approve + * - KeeperRegistrar 1.0.0: initial release + */ + string public constant override typeAndVersion = "AutomationRegistrar 2.1.0"; + + /** + * @notice TriggerRegistrationStorage stores the auto-approval levels for upkeeps by type + * @member autoApproveType the auto approval setting (see enum) + * @member autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + * @member approvedCount the count of upkeeps auto approved of this type + */ + struct TriggerRegistrationStorage { + AutoApproveType autoApproveType; + uint32 autoApproveMaxAllowed; + uint32 approvedCount; + } + + /** + * @notice InitialTriggerConfig configures the auto-approval levels for upkeeps by trigger type + * @dev this struct is only used in the constructor to set the initial values for various trigger configs + * @member triggerType the upkeep type to configure + * @member autoApproveType the auto approval setting (see enum) + * @member autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + */ + struct InitialTriggerConfig { + uint8 triggerType; + AutoApproveType autoApproveType; + uint32 autoApproveMaxAllowed; + } + + struct RegistrarConfig { + IKeeperRegistryMaster keeperRegistry; + uint96 minPLIJuels; + } + + struct PendingRequest { + address admin; + uint96 balance; + } + + struct RegistrationParams { + string name; + bytes encryptedEmail; + address upkeepContract; + uint32 gasLimit; + address adminAddress; + uint8 triggerType; + bytes checkData; + bytes triggerConfig; + bytes offchainConfig; + uint96 amount; + } + + RegistrarConfig private s_config; + // Only applicable if s_config.configType is ENABLED_SENDER_ALLOWLIST + mapping(address => bool) private s_autoApproveAllowedSenders; + + event RegistrationRequested( + bytes32 indexed hash, + string name, + bytes encryptedEmail, + address indexed upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes triggerConfig, + bytes offchainConfig, + bytes checkData, + uint96 amount + ); + + event RegistrationApproved(bytes32 indexed hash, string displayName, uint256 indexed upkeepId); + + event RegistrationRejected(bytes32 indexed hash); + + event AutoApproveAllowedSenderSet(address indexed senderAddress, bool allowed); + + event ConfigChanged(address keeperRegistry, uint96 minPLIJuels); + + event TriggerConfigSet(uint8 triggerType, AutoApproveType autoApproveType, uint32 autoApproveMaxAllowed); + + error InvalidAdminAddress(); + error RequestNotFound(); + error HashMismatch(); + error OnlyAdminOrOwner(); + error InsufficientPayment(); + error RegistrationRequestFailed(); + error OnlyLink(); + error AmountMismatch(); + error SenderMismatch(); + error FunctionNotPermitted(); + error LinkTransferFailed(address to); + error InvalidDataLength(); + + /** + * @param PLIAddress Address of Link token + * @param keeperRegistry keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + * @param triggerConfigs the initial config for individual triggers + */ + constructor( + address PLIAddress, + address keeperRegistry, + uint96 minPLIJuels, + InitialTriggerConfig[] memory triggerConfigs + ) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(PLIAddress); + setConfig(keeperRegistry, minPLIJuels); + for (uint256 idx = 0; idx < triggerConfigs.length; idx++) { + setTriggerConfig( + triggerConfigs[idx].triggerType, + triggerConfigs[idx].autoApproveType, + triggerConfigs[idx].autoApproveMaxAllowed + ); + } + } + + //EXTERNAL + + /** + * @notice register can only be called through transferAndCall on PLI contract + * @param name string of the upkeep to be registered + * @param encryptedEmail email address of upkeep contact + * @param upkeepContract address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when performing upkeep + * @param adminAddress address to cancel upkeep and withdraw remaining funds + * @param triggerType the type of trigger for the upkeep + * @param checkData data passed to the contract when checking for upkeep + * @param triggerConfig the config for the trigger + * @param offchainConfig offchainConfig for upkeep in bytes + * @param amount quantity of PLI upkeep is funded with (specified in Juels) + * @param sender address of the sender making the request + */ + function register( + string memory name, + bytes calldata encryptedEmail, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig, + uint96 amount, + address sender + ) external onlyPLI { + _register( + RegistrationParams({ + name: name, + encryptedEmail: encryptedEmail, + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + triggerType: triggerType, + checkData: checkData, + triggerConfig: triggerConfig, + offchainConfig: offchainConfig, + amount: amount + }), + sender + ); + } + + /** + * @notice Allows external users to register upkeeps; assumes amount is approved for transfer by the contract + * @param requestParams struct of all possible registration parameters + */ + function registerUpkeep(RegistrationParams calldata requestParams) external returns (uint256) { + if (requestParams.amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + + PLI.transferFrom(msg.sender, address(this), requestParams.amount); + + return _register(requestParams, msg.sender); + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function approve( + string memory name, + address upkeepContract, + uint32 gasLimit, + address adminAddress, + uint8 triggerType, + bytes calldata checkData, + bytes memory triggerConfig, + bytes calldata offchainConfig, + bytes32 hash + ) external onlyOwner { + PendingRequest memory request = s_pendingRequests[hash]; + if (request.admin == address(0)) { + revert RequestNotFound(); + } + bytes32 expectedHash = keccak256( + abi.encode(upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig) + ); + if (hash != expectedHash) { + revert HashMismatch(); + } + delete s_pendingRequests[hash]; + _approve( + RegistrationParams({ + name: name, + encryptedEmail: "", + upkeepContract: upkeepContract, + gasLimit: gasLimit, + adminAddress: adminAddress, + triggerType: triggerType, + checkData: checkData, + triggerConfig: triggerConfig, + offchainConfig: offchainConfig, + amount: request.balance + }), + expectedHash + ); + } + + /** + * @notice cancel will remove a registration request and return the refunds to the request.admin + * @param hash the request hash + */ + function cancel(bytes32 hash) external { + PendingRequest memory request = s_pendingRequests[hash]; + if (!(msg.sender == request.admin || msg.sender == owner())) { + revert OnlyAdminOrOwner(); + } + if (request.admin == address(0)) { + revert RequestNotFound(); + } + delete s_pendingRequests[hash]; + bool success = PLI.transfer(request.admin, request.balance); + if (!success) { + revert LinkTransferFailed(request.admin); + } + emit RegistrationRejected(hash); + } + + /** + * @notice owner calls this function to set contract config + * @param keeperRegistry new keeper registry address + * @param minPLIJuels minimum PLI that new registrations should fund their upkeep with + */ + function setConfig(address keeperRegistry, uint96 minPLIJuels) public onlyOwner { + s_config = RegistrarConfig({minPLIJuels: minPLIJuels, keeperRegistry: IKeeperRegistryMaster(keeperRegistry)}); + emit ConfigChanged(keeperRegistry, minPLIJuels); + } + + /** + * @notice owner calls to set the config for this upkeep type + * @param triggerType the upkeep type to configure + * @param autoApproveType the auto approval setting (see enum) + * @param autoApproveMaxAllowed the max number of upkeeps that can be auto approved of this type + */ + function setTriggerConfig( + uint8 triggerType, + AutoApproveType autoApproveType, + uint32 autoApproveMaxAllowed + ) public onlyOwner { + s_triggerRegistrations[triggerType].autoApproveType = autoApproveType; + s_triggerRegistrations[triggerType].autoApproveMaxAllowed = autoApproveMaxAllowed; + emit TriggerConfigSet(triggerType, autoApproveType, autoApproveMaxAllowed); + } + + /** + * @notice owner calls this function to set allowlist status for senderAddress + * @param senderAddress senderAddress to set the allowlist status for + * @param allowed true if senderAddress needs to be added to allowlist, false if needs to be removed + */ + function setAutoApproveAllowedSender(address senderAddress, bool allowed) external onlyOwner { + s_autoApproveAllowedSenders[senderAddress] = allowed; + + emit AutoApproveAllowedSenderSet(senderAddress, allowed); + } + + /** + * @notice read the allowlist status of senderAddress + * @param senderAddress address to read the allowlist status for + */ + function getAutoApproveAllowedSender(address senderAddress) external view returns (bool) { + return s_autoApproveAllowedSenders[senderAddress]; + } + + /** + * @notice read the current registration configuration + */ + function getConfig() external view returns (address keeperRegistry, uint256 minPLIJuels) { + RegistrarConfig memory config = s_config; + return (address(config.keeperRegistry), config.minPLIJuels); + } + + /** + * @notice read the config for this upkeep type + * @param triggerType upkeep type to read config for + */ + function getTriggerRegistrationDetails(uint8 triggerType) external view returns (TriggerRegistrationStorage memory) { + return s_triggerRegistrations[triggerType]; + } + + /** + * @notice gets the admin address and the current balance of a registration request + */ + function getPendingRequest(bytes32 hash) external view returns (address, uint96) { + PendingRequest memory request = s_pendingRequests[hash]; + return (request.admin, request.balance); + } + + /** + * @notice Called when PLI is sent to the contract via `transferAndCall` + * @param sender Address of the sender transfering PLI + * @param amount Amount of PLI sent (specified in Juels) + * @param data Payload of the transaction + */ + function onTokenTransfer( + address sender, + uint256 amount, + bytes calldata data + ) + external + override + onlyPLI + permittedFunctionsForPLI(data) + isActualAmount(amount, data) + isActualSender(sender, data) + { + if (amount < s_config.minPLIJuels) { + revert InsufficientPayment(); + } + (bool success, ) = address(this).delegatecall(data); + // calls register + if (!success) { + revert RegistrationRequestFailed(); + } + } + + // ================================================================ + // | PRIVATE | + // ================================================================ + + /** + * @dev verify registration request and emit RegistrationRequested event + */ + function _register(RegistrationParams memory params, address sender) private returns (uint256) { + if (params.adminAddress == address(0)) { + revert InvalidAdminAddress(); + } + bytes32 hash = keccak256( + abi.encode( + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.checkData, + params.triggerConfig, + params.offchainConfig + ) + ); + + emit RegistrationRequested( + hash, + params.name, + params.encryptedEmail, + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.triggerConfig, + params.offchainConfig, + params.checkData, + params.amount + ); + + uint256 upkeepId; + if (_shouldAutoApprove(s_triggerRegistrations[params.triggerType], sender)) { + s_triggerRegistrations[params.triggerType].approvedCount++; + upkeepId = _approve(params, hash); + } else { + uint96 newBalance = s_pendingRequests[hash].balance + params.amount; + s_pendingRequests[hash] = PendingRequest({admin: params.adminAddress, balance: newBalance}); + } + + return upkeepId; + } + + /** + * @dev register upkeep on KeeperRegistry contract and emit RegistrationApproved event + */ + function _approve(RegistrationParams memory params, bytes32 hash) private returns (uint256) { + IKeeperRegistryMaster keeperRegistry = s_config.keeperRegistry; + uint256 upkeepId = keeperRegistry.registerUpkeep( + params.upkeepContract, + params.gasLimit, + params.adminAddress, + params.triggerType, + params.checkData, + params.triggerConfig, + params.offchainConfig + ); + bool success = PLI.transferAndCall(address(keeperRegistry), params.amount, abi.encode(upkeepId)); + if (!success) { + revert LinkTransferFailed(address(keeperRegistry)); + } + emit RegistrationApproved(hash, params.name, upkeepId); + return upkeepId; + } + + /** + * @dev verify sender allowlist if needed and check max limit + */ + function _shouldAutoApprove(TriggerRegistrationStorage memory config, address sender) private view returns (bool) { + if (config.autoApproveType == AutoApproveType.DISABLED) { + return false; + } + if (config.autoApproveType == AutoApproveType.ENABLED_SENDER_ALLOWLIST && (!s_autoApproveAllowedSenders[sender])) { + return false; + } + if (config.approvedCount < config.autoApproveMaxAllowed) { + return true; + } + return false; + } + + // ================================================================ + // | MODIFIERS | + // ================================================================ + + /** + * @dev Reverts if not sent from the PLI token + */ + modifier onlyPLI() { + if (msg.sender != address(PLI)) { + revert OnlyLink(); + } + _; + } + + /** + * @dev Reverts if the given data does not begin with the `register` function selector + * @param _data The data payload of the request + */ + modifier permittedFunctionsForPLI(bytes memory _data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(_data, 32)) // First 32 bytes contain length of data + } + if (funcSelector != REGISTER_REQUEST_SELECTOR) { + revert FunctionNotPermitted(); + } + _; + } + + /** + * @dev Reverts if the actual amount passed does not match the expected amount + * @param expected amount that should match the actual amount + * @param data bytes + */ + modifier isActualAmount(uint256 expected, bytes calldata data) { + // decode register function arguments to get actual amount + (, , , , , , , , , uint96 amount, ) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, uint8, bytes, bytes, bytes, uint96, address) + ); + if (expected != amount) { + revert AmountMismatch(); + } + _; + } + + /** + * @dev Reverts if the actual sender address does not match the expected sender address + * @param expected address that should match the actual sender address + * @param data bytes + */ + modifier isActualSender(address expected, bytes calldata data) { + // decode register function arguments to get actual sender + (, , , , , , , , , , address sender) = abi.decode( + data[4:], + (string, bytes, address, uint32, address, uint8, bytes, bytes, bytes, uint96, address) + ); + if (expected != sender) { + revert SenderMismatch(); + } + _; + } +} diff --git a/contracts/src/v0.8/automation/v2_1/AutomationUtils2_1.sol b/contracts/src/v0.8/automation/v2_1/AutomationUtils2_1.sol new file mode 100644 index 00000000..f6ba913b --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/AutomationUtils2_1.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {KeeperRegistryBase2_1} from "./KeeperRegistryBase2_1.sol"; +import {Log} from "../interfaces/ILogAutomation.sol"; + +/** + * @notice this file exposes structs that are otherwise internal to the automation registry + * doing this allows those structs to be encoded and decoded with type safety in offchain code + * and tests because generated wrappers are made available + */ + +/** + * @notice structure of trigger for log triggers + */ +struct LogTriggerConfig { + address contractAddress; + uint8 filterSelector; // denotes which topics apply to filter ex 000, 101, 111...only last 3 bits apply + bytes32 topic0; + bytes32 topic1; + bytes32 topic2; + bytes32 topic3; +} + +contract AutomationUtils2_1 { + /** + * @dev this can be removed as OnchainConfig is now exposed directly from the registry + */ + function _onChainConfig(KeeperRegistryBase2_1.OnchainConfig memory) external {} // 0x2ff92a81 + + function _report(KeeperRegistryBase2_1.Report memory) external {} // 0xe65d6546 + + function _logTriggerConfig(LogTriggerConfig memory) external {} // 0x21f373d7 + + function _logTrigger(KeeperRegistryBase2_1.LogTrigger memory) external {} // 0x1c8d8260 + + function _conditionalTrigger(KeeperRegistryBase2_1.ConditionalTrigger memory) external {} // 0x4b6df294 + + function _log(Log memory) external {} // 0xe9720a49 +} diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistry2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistry2_1.sol new file mode 100644 index 00000000..e87d5a9b --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistry2_1.sol @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {KeeperRegistryBase2_1} from "./KeeperRegistryBase2_1.sol"; +import {KeeperRegistryLogicB2_1} from "./KeeperRegistryLogicB2_1.sol"; +import {Chainable} from "../Chainable.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; +import {OCR2Abstract} from "../../shared/ocr2/OCR2Abstract.sol"; + +/** + * @notice Registry for adding work for Plugin Keepers to perform on client + * contracts. Clients must support the Upkeep interface. + */ +contract KeeperRegistry2_1 is KeeperRegistryBase2_1, OCR2Abstract, Chainable, IERC677Receiver { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @notice versions: + * - KeeperRegistry 2.1.0: introduces support for log, cron, and ready triggers + : removes the need for "wrapped perform data" + * - KeeperRegistry 2.0.2: pass revert bytes as performData when target contract reverts + * : fixes issue with arbitrum block number + * : does an early return in case of stale report instead of revert + * - KeeperRegistry 2.0.1: implements workaround for buggy migrate function in 1.X + * - KeeperRegistry 2.0.0: implement OCR interface + * - KeeperRegistry 1.3.0: split contract into Proxy and Logic + * : account for Arbitrum and Optimism L1 gas fee + * : allow users to configure upkeeps + * - KeeperRegistry 1.2.0: allow funding within performUpkeep + * : allow configurable registry maxPerformGas + * : add function to let admin change upkeep gas limit + * : add minUpkeepSpend requirement + * : upgrade to solidity v0.8 + * - KeeperRegistry 1.1.0: added flatFeeMicroLink + * - KeeperRegistry 1.0.0: initial release + */ + string public constant override typeAndVersion = "KeeperRegistry 2.1.0"; + + /** + * @param logicA the address of the first logic contract, but cast as logicB in order to call logicB functions + */ + constructor( + KeeperRegistryLogicB2_1 logicA + ) + KeeperRegistryBase2_1( + logicA.getMode(), + logicA.getLinkAddress(), + logicA.getLinkNativeFeedAddress(), + logicA.getFastGasFeedAddress(), + logicA.getAutomationForwarderLogic() + ) + Chainable(address(logicA)) + {} + + // ================================================================ + // | ACTIONS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + */ + function transmit( + bytes32[3] calldata reportContext, + bytes calldata rawReport, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) external override { + uint256 gasOverhead = gasleft(); + HotVars memory hotVars = s_hotVars; + + if (hotVars.paused) revert RegistryPaused(); + if (!s_transmitters[msg.sender].active) revert OnlyActiveTransmitters(); + + // Verify signatures + if (s_latestConfigDigest != reportContext[0]) revert ConfigDigestMismatch(); + if (rs.length != hotVars.f + 1 || rs.length != ss.length) revert IncorrectNumberOfSignatures(); + _verifyReportSignature(reportContext, rawReport, rs, ss, rawVs); + + Report memory report = _decodeReport(rawReport); + UpkeepTransmitInfo[] memory upkeepTransmitInfo = new UpkeepTransmitInfo[](report.upkeepIds.length); + uint16 numUpkeepsPassedChecks; + + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + upkeepTransmitInfo[i].upkeep = s_upkeep[report.upkeepIds[i]]; + upkeepTransmitInfo[i].triggerType = _getTriggerType(report.upkeepIds[i]); + upkeepTransmitInfo[i].maxLinkPayment = _getMaxLinkPayment( + hotVars, + upkeepTransmitInfo[i].triggerType, + uint32(report.gasLimits[i]), + uint32(report.performDatas[i].length), + report.fastGasWei, + report.linkNative, + true + ); + (upkeepTransmitInfo[i].earlyChecksPassed, upkeepTransmitInfo[i].dedupID) = _prePerformChecks( + report.upkeepIds[i], + report.triggers[i], + upkeepTransmitInfo[i] + ); + + if (upkeepTransmitInfo[i].earlyChecksPassed) { + numUpkeepsPassedChecks += 1; + } else { + continue; + } + + // Actually perform the target upkeep + (upkeepTransmitInfo[i].performSuccess, upkeepTransmitInfo[i].gasUsed) = _performUpkeep( + upkeepTransmitInfo[i].upkeep.forwarder, + report.gasLimits[i], + report.performDatas[i] + ); + + // Deduct that gasUsed by upkeep from our running counter + gasOverhead -= upkeepTransmitInfo[i].gasUsed; + + // Store last perform block number / deduping key for upkeep + _updateTriggerMarker(report.upkeepIds[i], upkeepTransmitInfo[i]); + } + // No upkeeps to be performed in this report + if (numUpkeepsPassedChecks == 0) { + return; + } + + // This is the overall gas overhead that will be split across performed upkeeps + // Take upper bound of 16 gas per callData bytes, which is approximated to be reportLength + // Rest of msg.data is accounted for in accounting overheads + gasOverhead = + (gasOverhead - gasleft() + 16 * rawReport.length) + + ACCOUNTING_FIXED_GAS_OVERHEAD + + (ACCOUNTING_PER_SIGNER_GAS_OVERHEAD * (hotVars.f + 1)); + gasOverhead = gasOverhead / numUpkeepsPassedChecks + ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD; + + uint96 totalReimbursement; + uint96 totalPremium; + { + uint96 reimbursement; + uint96 premium; + for (uint256 i = 0; i < report.upkeepIds.length; i++) { + if (upkeepTransmitInfo[i].earlyChecksPassed) { + upkeepTransmitInfo[i].gasOverhead = _getCappedGasOverhead( + gasOverhead, + upkeepTransmitInfo[i].triggerType, + uint32(report.performDatas[i].length), + hotVars.f + ); + + (reimbursement, premium) = _postPerformPayment( + hotVars, + report.upkeepIds[i], + upkeepTransmitInfo[i], + report.fastGasWei, + report.linkNative, + numUpkeepsPassedChecks + ); + totalPremium += premium; + totalReimbursement += reimbursement; + + emit UpkeepPerformed( + report.upkeepIds[i], + upkeepTransmitInfo[i].performSuccess, + reimbursement + premium, + upkeepTransmitInfo[i].gasUsed, + upkeepTransmitInfo[i].gasOverhead, + report.triggers[i] + ); + } + } + } + // record payments + s_transmitters[msg.sender].balance += totalReimbursement; + s_hotVars.totalPremium += totalPremium; + + uint40 epochAndRound = uint40(uint256(reportContext[1])); + uint32 epoch = uint32(epochAndRound >> 8); + if (epoch > hotVars.latestEpoch) { + s_hotVars.latestEpoch = epoch; + } + } + + /** + * @notice simulates the upkeep with the perform data returned from checkUpkeep + * @param id identifier of the upkeep to execute the data with. + * @param performData calldata parameter to be passed to the target upkeep. + * @return success whether the call reverted or not + * @return gasUsed the amount of gas the target contract consumed + */ + function simulatePerformUpkeep( + uint256 id, + bytes calldata performData + ) external cannotExecute returns (bool success, uint256 gasUsed) { + if (s_hotVars.paused) revert RegistryPaused(); + Upkeep memory upkeep = s_upkeep[id]; + (success, gasUsed) = _performUpkeep(upkeep.forwarder, upkeep.performGas, performData); + return (success, gasUsed); + } + + /** + * @notice uses PLI's transferAndCall to PLI and add funding to an upkeep + * @dev safe to cast uint256 to uint96 as total PLI supply is under UINT96MAX + * @param sender the account which transferred the funds + * @param amount number of PLI transfer + */ + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external override { + if (msg.sender != address(i_link)) revert OnlyCallableByPLIToken(); + if (data.length != 32) revert InvalidDataLength(); + uint256 id = abi.decode(data, (uint256)); + if (s_upkeep[id].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + s_upkeep[id].balance = s_upkeep[id].balance + uint96(amount); + s_expectedLinkBalance = s_expectedLinkBalance + amount; + emit FundsAdded(id, sender, uint96(amount)); + } + + // ================================================================ + // | SETTERS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + * @dev prefer the type-safe version of setConfig (below) whenever possible + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfigBytes, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external override { + setConfigTypeSafe( + signers, + transmitters, + f, + abi.decode(onchainConfigBytes, (OnchainConfig)), + offchainConfigVersion, + offchainConfig + ); + } + + function setConfigTypeSafe( + address[] memory signers, + address[] memory transmitters, + uint8 f, + OnchainConfig memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) public onlyOwner { + if (signers.length > MAX_NUM_ORACLES) revert TooManyOracles(); + if (f == 0) revert IncorrectNumberOfFaultyOracles(); + if (signers.length != transmitters.length || signers.length <= 3 * f) revert IncorrectNumberOfSigners(); + + // move all pooled payments out of the pool to each transmitter's balance + uint96 totalPremium = s_hotVars.totalPremium; + uint96 oldLength = uint96(s_transmittersList.length); + for (uint256 i = 0; i < oldLength; i++) { + _updateTransmitterBalanceFromPool(s_transmittersList[i], totalPremium, oldLength); + } + + // remove any old signer/transmitter addresses + address signerAddress; + address transmitterAddress; + for (uint256 i = 0; i < oldLength; i++) { + signerAddress = s_signersList[i]; + transmitterAddress = s_transmittersList[i]; + delete s_signers[signerAddress]; + // Do not delete the whole transmitter struct as it has balance information stored + s_transmitters[transmitterAddress].active = false; + } + delete s_signersList; + delete s_transmittersList; + + // add new signer/transmitter addresses + { + Transmitter memory transmitter; + address temp; + for (uint256 i = 0; i < signers.length; i++) { + if (s_signers[signers[i]].active) revert RepeatedSigner(); + if (signers[i] == ZERO_ADDRESS) revert InvalidSigner(); + s_signers[signers[i]] = Signer({active: true, index: uint8(i)}); + + temp = transmitters[i]; + if (temp == ZERO_ADDRESS) revert InvalidTransmitter(); + transmitter = s_transmitters[temp]; + if (transmitter.active) revert RepeatedTransmitter(); + transmitter.active = true; + transmitter.index = uint8(i); + // new transmitters start afresh from current totalPremium + // some spare change of premium from previous pool will be forfeited + transmitter.lastCollected = totalPremium; + s_transmitters[temp] = transmitter; + } + } + s_signersList = signers; + s_transmittersList = transmitters; + + s_hotVars = HotVars({ + f: f, + paymentPremiumPPB: onchainConfig.paymentPremiumPPB, + flatFeeMicroLink: onchainConfig.flatFeeMicroLink, + stalenessSeconds: onchainConfig.stalenessSeconds, + gasCeilingMultiplier: onchainConfig.gasCeilingMultiplier, + paused: s_hotVars.paused, + reentrancyGuard: s_hotVars.reentrancyGuard, + totalPremium: totalPremium, + latestEpoch: 0 // DON restarts epoch + }); + + s_storage = Storage({ + checkGasLimit: onchainConfig.checkGasLimit, + minUpkeepSpend: onchainConfig.minUpkeepSpend, + maxPerformGas: onchainConfig.maxPerformGas, + transcoder: onchainConfig.transcoder, + maxCheckDataSize: onchainConfig.maxCheckDataSize, + maxPerformDataSize: onchainConfig.maxPerformDataSize, + maxRevertDataSize: onchainConfig.maxRevertDataSize, + upkeepPrivilegeManager: onchainConfig.upkeepPrivilegeManager, + nonce: s_storage.nonce, + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + ownerLinkBalance: s_storage.ownerLinkBalance + }); + s_fallbackGasPrice = onchainConfig.fallbackGasPrice; + s_fallbackLinkPrice = onchainConfig.fallbackLinkPrice; + + uint32 previousConfigBlockNumber = s_storage.latestConfigBlockNumber; + s_storage.latestConfigBlockNumber = uint32(_blockNum()); + s_storage.configCount += 1; + + bytes memory onchainConfigBytes = abi.encode(onchainConfig); + + s_latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_storage.configCount, + signers, + transmitters, + f, + onchainConfigBytes, + offchainConfigVersion, + offchainConfig + ); + + for (uint256 idx = 0; idx < s_registrars.length(); idx++) { + s_registrars.remove(s_registrars.at(idx)); + } + + for (uint256 idx = 0; idx < onchainConfig.registrars.length; idx++) { + s_registrars.add(onchainConfig.registrars[idx]); + } + + emit ConfigSet( + previousConfigBlockNumber, + s_latestConfigDigest, + s_storage.configCount, + signers, + transmitters, + f, + onchainConfigBytes, + offchainConfigVersion, + offchainConfig + ); + } + + // ================================================================ + // | GETTERS | + // ================================================================ + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_storage.configCount, s_storage.latestConfigBlockNumber, s_latestConfigDigest); + } + + /** + * @inheritdoc OCR2Abstract + */ + function latestConfigDigestAndEpoch() + external + view + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (false, s_latestConfigDigest, s_hotVars.latestEpoch); + } +} diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol new file mode 100644 index 00000000..64d63b51 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistryBase2_1.sol @@ -0,0 +1,959 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {ArbGasInfo} from "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {OVM_GasPriceOracle} from "../../vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; +import {ExecutionPrevention} from "../ExecutionPrevention.sol"; +import {ArbSys} from "../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import {StreamsLookupCompatibleInterface} from "../interfaces/StreamsLookupCompatibleInterface.sol"; +import {ILogAutomation, Log} from "../interfaces/ILogAutomation.sol"; +import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {KeeperCompatibleInterface} from "../interfaces/KeeperCompatibleInterface.sol"; +import {UpkeepFormat} from "../interfaces/UpkeepTranscoderInterface.sol"; + +/** + * @notice Base Keeper Registry contract, contains shared logic between + * KeeperRegistry and KeeperRegistryLogic + * @dev all errors, events, and internal functions should live here + */ +abstract contract KeeperRegistryBase2_1 is ConfirmedOwner, ExecutionPrevention { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + address internal constant ZERO_ADDRESS = address(0); + address internal constant IGNORE_ADDRESS = 0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF; + bytes4 internal constant CHECK_SELECTOR = KeeperCompatibleInterface.checkUpkeep.selector; + bytes4 internal constant PERFORM_SELECTOR = KeeperCompatibleInterface.performUpkeep.selector; + bytes4 internal constant CHECK_CALLBACK_SELECTOR = StreamsLookupCompatibleInterface.checkCallback.selector; + bytes4 internal constant CHECK_LOG_SELECTOR = ILogAutomation.checkLog.selector; + uint256 internal constant PERFORM_GAS_MIN = 2_300; + uint256 internal constant CANCELLATION_DELAY = 50; + uint256 internal constant PERFORM_GAS_CUSHION = 5_000; + uint256 internal constant PPB_BASE = 1_000_000_000; + uint32 internal constant UINT32_MAX = type(uint32).max; + uint96 internal constant PLI_TOTAL_SUPPLY = 1e27; + // The first byte of the mask can be 0, because we only ever have 31 oracles + uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101; + /** + * @dev UPKEEP_TRANSCODER_VERSION_BASE is temporary necessity for backwards compatibility with + * MigratableKeeperRegistryInterfaceV1 - it should be removed in future versions in favor of + * UPKEEP_VERSION_BASE and MigratableKeeperRegistryInterfaceV2 + */ + UpkeepFormat internal constant UPKEEP_TRANSCODER_VERSION_BASE = UpkeepFormat.V1; + uint8 internal constant UPKEEP_VERSION_BASE = 3; + // L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + + uint256 internal constant REGISTRY_CONDITIONAL_OVERHEAD = 90_000; // Used in maxPayment estimation, and in capping overheads during actual payment + uint256 internal constant REGISTRY_LOG_OVERHEAD = 110_000; // Used only in maxPayment estimation, and in capping overheads during actual payment. + uint256 internal constant REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD = 20; // Used only in maxPayment estimation, and in capping overheads during actual payment. Value scales with performData length. + uint256 internal constant REGISTRY_PER_SIGNER_GAS_OVERHEAD = 7_500; // Used only in maxPayment estimation, and in capping overheads during actual payment. Value scales with f. + + uint256 internal constant ACCOUNTING_FIXED_GAS_OVERHEAD = 27_500; // Used in actual payment. Fixed overhead per tx + uint256 internal constant ACCOUNTING_PER_SIGNER_GAS_OVERHEAD = 1_100; // Used in actual payment. overhead per signer + uint256 internal constant ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD = 7_000; // Used in actual payment. overhead per upkeep performed + + OVM_GasPriceOracle internal constant OPTIMISM_ORACLE = OVM_GasPriceOracle(0x420000000000000000000000000000000000000F); + ArbGasInfo internal constant ARB_NITRO_ORACLE = ArbGasInfo(0x000000000000000000000000000000000000006C); + ArbSys internal constant ARB_SYS = ArbSys(0x0000000000000000000000000000000000000064); + + LinkTokenInterface internal immutable i_link; + AggregatorV3Interface internal immutable i_linkNativeFeed; + AggregatorV3Interface internal immutable i_fastGasFeed; + Mode internal immutable i_mode; + address internal immutable i_automationForwarderLogic; + + /** + * @dev - The storage is gas optimised for one and only one function - transmit. All the storage accessed in transmit + * is stored compactly. Rest of the storage layout is not of much concern as transmit is the only hot path + */ + + // Upkeep storage + EnumerableSet.UintSet internal s_upkeepIDs; + mapping(uint256 => Upkeep) internal s_upkeep; // accessed during transmit + mapping(uint256 => address) internal s_upkeepAdmin; + mapping(uint256 => address) internal s_proposedAdmin; + mapping(uint256 => bytes) internal s_checkData; + mapping(bytes32 => bool) internal s_dedupKeys; + // Registry config and state + EnumerableSet.AddressSet internal s_registrars; + mapping(address => Transmitter) internal s_transmitters; + mapping(address => Signer) internal s_signers; + address[] internal s_signersList; // s_signersList contains the signing address of each oracle + address[] internal s_transmittersList; // s_transmittersList contains the transmission address of each oracle + mapping(address => address) internal s_transmitterPayees; // s_payees contains the mapping from transmitter to payee. + mapping(address => address) internal s_proposedPayee; // proposed payee for a transmitter + bytes32 internal s_latestConfigDigest; // Read on transmit path in case of signature verification + HotVars internal s_hotVars; // Mixture of config and state, used in transmit + Storage internal s_storage; // Mixture of config and state, not used in transmit + uint256 internal s_fallbackGasPrice; + uint256 internal s_fallbackLinkPrice; + uint256 internal s_expectedLinkBalance; // Used in case of erroneous PLI transfers to contract + mapping(address => MigrationPermission) internal s_peerRegistryMigrationPermission; // Permissions for migration to and fro + mapping(uint256 => bytes) internal s_upkeepTriggerConfig; // upkeep triggers + mapping(uint256 => bytes) internal s_upkeepOffchainConfig; // general config set by users for each upkeep + mapping(uint256 => bytes) internal s_upkeepPrivilegeConfig; // general config set by an administrative role for an upkeep + mapping(address => bytes) internal s_adminPrivilegeConfig; // general config set by an administrative role for an admin + + error ArrayHasNoEntries(); + error CannotCancel(); + error CheckDataExceedsLimit(); + error ConfigDigestMismatch(); + error DuplicateEntry(); + error DuplicateSigners(); + error GasLimitCanOnlyIncrease(); + error GasLimitOutsideRange(); + error IncorrectNumberOfFaultyOracles(); + error IncorrectNumberOfSignatures(); + error IncorrectNumberOfSigners(); + error IndexOutOfRange(); + error InsufficientFunds(); + error InvalidDataLength(); + error InvalidTrigger(); + error InvalidPayee(); + error InvalidRecipient(); + error InvalidReport(); + error InvalidSigner(); + error InvalidTransmitter(); + error InvalidTriggerType(); + error MaxCheckDataSizeCanOnlyIncrease(); + error MaxPerformDataSizeCanOnlyIncrease(); + error MigrationNotPermitted(); + error NotAContract(); + error OnlyActiveSigners(); + error OnlyActiveTransmitters(); + error OnlyCallableByAdmin(); + error OnlyCallableByPLIToken(); + error OnlyCallableByOwnerOrAdmin(); + error OnlyCallableByOwnerOrRegistrar(); + error OnlyCallableByPayee(); + error OnlyCallableByProposedAdmin(); + error OnlyCallableByProposedPayee(); + error OnlyCallableByUpkeepPrivilegeManager(); + error OnlyPausedUpkeep(); + error OnlyUnpausedUpkeep(); + error ParameterLengthError(); + error PaymentGreaterThanAllPLI(); + error ReentrantCall(); + error RegistryPaused(); + error RepeatedSigner(); + error RepeatedTransmitter(); + error TargetCheckReverted(bytes reason); + error TooManyOracles(); + error TranscoderNotSet(); + error UpkeepAlreadyExists(); + error UpkeepCancelled(); + error UpkeepNotCanceled(); + error UpkeepNotNeeded(); + error ValueNotChanged(); + + enum MigrationPermission { + NONE, + OUTGOING, + INCOMING, + BIDIRECTIONAL + } + + enum Mode { + DEFAULT, + ARBITRUM, + OPTIMISM + } + + enum Trigger { + CONDITION, + LOG + } + + enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE, + CALLBACK_REVERTED, + REVERT_DATA_EXCEEDS_LIMIT, + REGISTRY_PAUSED + } + + /** + * @notice OnchainConfig of the registry + * @dev only used in params and return values + * @member paymentPremiumPPB payment premium rate oracles receive on top of + * being reimbursed for gas, measured in parts per billion + * @member flatFeeMicroLink flat fee paid to oracles for performing upkeeps, + * priced in MicroLink; can be used in conjunction with or independently of + * paymentPremiumPPB + * @member checkGasLimit gas limit when checking for upkeep + * @member stalenessSeconds number of seconds that is allowed for feed data to + * be stale before switching to the fallback pricing + * @member gasCeilingMultiplier multiplier to apply to the fast gas feed price + * when calculating the payment ceiling for keepers + * @member minUpkeepSpend minimum PLI that an upkeep must spend before cancelling + * @member maxPerformGas max performGas allowed for an upkeep on this registry + * @member maxCheckDataSize max length of checkData bytes + * @member maxPerformDataSize max length of performData bytes + * @member maxRevertDataSize max length of revertData bytes + * @member fallbackGasPrice gas price used if the gas price feed is stale + * @member fallbackLinkPrice PLI price used if the PLI price feed is stale + * @member transcoder address of the transcoder contract + * @member registrars addresses of the registrar contracts + * @member upkeepPrivilegeManager address which can set privilege for upkeeps + */ + struct OnchainConfig { + uint32 paymentPremiumPPB; + uint32 flatFeeMicroLink; // min 0.000001 PLI, max 4294 PLI + uint32 checkGasLimit; + uint24 stalenessSeconds; + uint16 gasCeilingMultiplier; + uint96 minUpkeepSpend; + uint32 maxPerformGas; + uint32 maxCheckDataSize; + uint32 maxPerformDataSize; + uint32 maxRevertDataSize; + uint256 fallbackGasPrice; + uint256 fallbackLinkPrice; + address transcoder; + address[] registrars; + address upkeepPrivilegeManager; + } + + /** + * @notice state of the registry + * @dev only used in params and return values + * @dev this will likely be deprecated in a future version of the registry in favor of individual getters + * @member nonce used for ID generation + * @member ownerLinkBalance withdrawable balance of PLI by contract owner + * @member expectedLinkBalance the expected balance of PLI of the registry + * @member totalPremium the total premium collected on registry so far + * @member numUpkeeps total number of upkeeps on the registry + * @member configCount ordinal number of current config, out of all configs applied to this contract so far + * @member latestConfigBlockNumber last block at which this config was set + * @member latestConfigDigest domain-separation tag for current config + * @member latestEpoch for which a report was transmitted + * @member paused freeze on execution scoped to the entire registry + */ + struct State { + uint32 nonce; + uint96 ownerLinkBalance; + uint256 expectedLinkBalance; + uint96 totalPremium; + uint256 numUpkeeps; + uint32 configCount; + uint32 latestConfigBlockNumber; + bytes32 latestConfigDigest; + uint32 latestEpoch; + bool paused; + } + + /** + * @notice relevant state of an upkeep which is used in transmit function + * @member paused if this upkeep has been paused + * @member performGas the gas limit of upkeep execution + * @member maxValidBlocknumber until which block this upkeep is valid + * @member forwarder the forwarder contract to use for this upkeep + * @member amountSpent the amount this upkeep has spent + * @member balance the balance of this upkeep + * @member lastPerformedBlockNumber the last block number when this upkeep was performed + */ + struct Upkeep { + bool paused; + uint32 performGas; + uint32 maxValidBlocknumber; + IAutomationForwarder forwarder; + // 0 bytes left in 1st EVM word - not written to in transmit + uint96 amountSpent; + uint96 balance; + uint32 lastPerformedBlockNumber; + // 2 bytes left in 2nd EVM word - written in transmit path + } + + /** + * @notice all information about an upkeep + * @dev only used in return values + * @dev this will likely be deprecated in a future version of the registry + * @member target the contract which needs to be serviced + * @member performGas the gas limit of upkeep execution + * @member checkData the checkData bytes for this upkeep + * @member balance the balance of this upkeep + * @member admin for this upkeep + * @member maxValidBlocknumber until which block this upkeep is valid + * @member lastPerformedBlockNumber the last block number when this upkeep was performed + * @member amountSpent the amount this upkeep has spent + * @member paused if this upkeep has been paused + * @member offchainConfig the off-chain config of this upkeep + */ + struct UpkeepInfo { + address target; + uint32 performGas; + bytes checkData; + uint96 balance; + address admin; + uint64 maxValidBlocknumber; + uint32 lastPerformedBlockNumber; + uint96 amountSpent; + bool paused; + bytes offchainConfig; + } + + /// @dev Config + State storage struct which is on hot transmit path + struct HotVars { + uint8 f; // maximum number of faulty oracles + uint32 paymentPremiumPPB; // premium percentage charged to user over tx cost + uint32 flatFeeMicroLink; // flat fee charged to user for every perform + uint24 stalenessSeconds; // Staleness tolerance for feeds + uint16 gasCeilingMultiplier; // multiplier on top of fast gas feed for upper bound + bool paused; // pause switch for all upkeeps in the registry + bool reentrancyGuard; // guard against reentrancy + uint96 totalPremium; // total historical payment to oracles for premium + uint32 latestEpoch; // latest epoch for which a report was transmitted + // 1 EVM word full + } + + /// @dev Config + State storage struct which is not on hot transmit path + struct Storage { + uint96 minUpkeepSpend; // Minimum amount an upkeep must spend + address transcoder; // Address of transcoder contract used in migrations + // 1 EVM word full + uint96 ownerLinkBalance; // Balance of owner, accumulates minUpkeepSpend in case it is not spent + uint32 checkGasLimit; // Gas limit allowed in checkUpkeep + uint32 maxPerformGas; // Max gas an upkeep can use on this registry + uint32 nonce; // Nonce for each upkeep created + uint32 configCount; // incremented each time a new config is posted, The count + // is incorporated into the config digest to prevent replay attacks. + uint32 latestConfigBlockNumber; // makes it easier for offchain systems to extract config from logs + // 2 EVM word full + uint32 maxCheckDataSize; // max length of checkData bytes + uint32 maxPerformDataSize; // max length of performData bytes + uint32 maxRevertDataSize; // max length of revertData bytes + address upkeepPrivilegeManager; // address which can set privilege for upkeeps + // 3 EVM word full + } + + /// @dev Report transmitted by OCR to transmit function + struct Report { + uint256 fastGasWei; + uint256 linkNative; + uint256[] upkeepIds; + uint256[] gasLimits; + bytes[] triggers; + bytes[] performDatas; + } + + /** + * @dev This struct is used to maintain run time information about an upkeep in transmit function + * @member upkeep the upkeep struct + * @member earlyChecksPassed whether the upkeep passed early checks before perform + * @member maxLinkPayment the max amount this upkeep could pay for work + * @member performSuccess whether the perform was successful + * @member triggerType the type of trigger + * @member gasUsed gasUsed by this upkeep in perform + * @member gasOverhead gasOverhead for this upkeep + * @member dedupID unique ID used to dedup an upkeep/trigger combo + */ + struct UpkeepTransmitInfo { + Upkeep upkeep; + bool earlyChecksPassed; + uint96 maxLinkPayment; + bool performSuccess; + Trigger triggerType; + uint256 gasUsed; + uint256 gasOverhead; + bytes32 dedupID; + } + + struct Transmitter { + bool active; + uint8 index; // Index of oracle in s_signersList/s_transmittersList + uint96 balance; + uint96 lastCollected; + } + + struct Signer { + bool active; + // Index of oracle in s_signersList/s_transmittersList + uint8 index; + } + + /** + * @notice the trigger structure conditional trigger type + */ + struct ConditionalTrigger { + uint32 blockNum; + bytes32 blockHash; + } + + /** + * @notice the trigger structure of log upkeeps + * @dev NOTE that blockNum / blockHash describe the block used for the callback, + * not necessarily the block number that the log was emitted in!!!! + */ + struct LogTrigger { + bytes32 logBlockHash; + bytes32 txHash; + uint32 logIndex; + uint32 blockNum; + bytes32 blockHash; + } + + event AdminPrivilegeConfigSet(address indexed admin, bytes privilegeConfig); + event CancelledUpkeepReport(uint256 indexed id, bytes trigger); + event DedupKeyAdded(bytes32 indexed dedupKey); + event FundsAdded(uint256 indexed id, address indexed from, uint96 amount); + event FundsWithdrawn(uint256 indexed id, uint256 amount, address to); + event InsufficientFundsUpkeepReport(uint256 indexed id, bytes trigger); + event OwnerFundsWithdrawn(uint96 amount); + event Paused(address account); + event PayeesUpdated(address[] transmitters, address[] payees); + event PayeeshipTransferRequested(address indexed transmitter, address indexed from, address indexed to); + event PayeeshipTransferred(address indexed transmitter, address indexed from, address indexed to); + event PaymentWithdrawn(address indexed transmitter, uint256 indexed amount, address indexed to, address payee); + event ReorgedUpkeepReport(uint256 indexed id, bytes trigger); + event StaleUpkeepReport(uint256 indexed id, bytes trigger); + event UpkeepAdminTransferred(uint256 indexed id, address indexed from, address indexed to); + event UpkeepAdminTransferRequested(uint256 indexed id, address indexed from, address indexed to); + event UpkeepCanceled(uint256 indexed id, uint64 indexed atBlockHeight); + event UpkeepCheckDataSet(uint256 indexed id, bytes newCheckData); + event UpkeepGasLimitSet(uint256 indexed id, uint96 gasLimit); + event UpkeepMigrated(uint256 indexed id, uint256 remainingBalance, address destination); + event UpkeepOffchainConfigSet(uint256 indexed id, bytes offchainConfig); + event UpkeepPaused(uint256 indexed id); + event UpkeepPerformed( + uint256 indexed id, + bool indexed success, + uint96 totalPayment, + uint256 gasUsed, + uint256 gasOverhead, + bytes trigger + ); + event UpkeepPrivilegeConfigSet(uint256 indexed id, bytes privilegeConfig); + event UpkeepReceived(uint256 indexed id, uint256 startingBalance, address importedFrom); + event UpkeepRegistered(uint256 indexed id, uint32 performGas, address admin); + event UpkeepTriggerConfigSet(uint256 indexed id, bytes triggerConfig); + event UpkeepUnpaused(uint256 indexed id); + event Unpaused(address account); + + /** + * @param mode the contract mode of default, Arbitrum, or Optimism + * @param link address of the PLI Token + * @param linkNativeFeed address of the PLI/Native price feed + * @param fastGasFeed address of the Fast Gas price feed + */ + constructor( + Mode mode, + address link, + address linkNativeFeed, + address fastGasFeed, + address automationForwarderLogic + ) ConfirmedOwner(msg.sender) { + i_mode = mode; + i_link = LinkTokenInterface(link); + i_linkNativeFeed = AggregatorV3Interface(linkNativeFeed); + i_fastGasFeed = AggregatorV3Interface(fastGasFeed); + i_automationForwarderLogic = automationForwarderLogic; + } + + // ================================================================ + // | INTERNAL FUNCTIONS ONLY | + // ================================================================ + + /** + * @dev creates a new upkeep with the given fields + * @param id the id of the upkeep + * @param upkeep the upkeep to create + * @param admin address to cancel upkeep and withdraw remaining funds + * @param checkData data which is passed to user's checkUpkeep + * @param triggerConfig the trigger config for this upkeep + * @param offchainConfig the off-chain config of this upkeep + */ + function _createUpkeep( + uint256 id, + Upkeep memory upkeep, + address admin, + bytes memory checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) internal { + if (s_hotVars.paused) revert RegistryPaused(); + if (checkData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + if (upkeep.performGas < PERFORM_GAS_MIN || upkeep.performGas > s_storage.maxPerformGas) + revert GasLimitOutsideRange(); + if (address(s_upkeep[id].forwarder) != address(0)) revert UpkeepAlreadyExists(); + s_upkeep[id] = upkeep; + s_upkeepAdmin[id] = admin; + s_checkData[id] = checkData; + s_expectedLinkBalance = s_expectedLinkBalance + upkeep.balance; + s_upkeepTriggerConfig[id] = triggerConfig; + s_upkeepOffchainConfig[id] = offchainConfig; + s_upkeepIDs.add(id); + } + + /** + * @dev creates an ID for the upkeep based on the upkeep's type + * @dev the format of the ID looks like this: + * ****00000000000X**************** + * 4 bytes of entropy + * 11 bytes of zeros + * 1 identifying byte for the trigger type + * 16 bytes of entropy + * @dev this maintains the same level of entropy as eth addresses, so IDs will still be unique + * @dev we add the "identifying" part in the middle so that it is mostly hidden from users who usually only + * see the first 4 and last 4 hex values ex 0x1234...ABCD + */ + function _createID(Trigger triggerType) internal view returns (uint256) { + bytes1 empty; + bytes memory idBytes = abi.encodePacked( + keccak256(abi.encode(_blockHash(_blockNum() - 1), address(this), s_storage.nonce)) + ); + for (uint256 idx = 4; idx < 15; idx++) { + idBytes[idx] = empty; + } + idBytes[15] = bytes1(uint8(triggerType)); + return uint256(bytes32(idBytes)); + } + + /** + * @dev retrieves feed data for fast gas/native and link/native prices. if the feed + * data is stale it uses the configured fallback price. Once a price is picked + * for gas it takes the min of gas price in the transaction or the fast gas + * price in order to reduce costs for the upkeep clients. + */ + function _getFeedData(HotVars memory hotVars) internal view returns (uint256 gasWei, uint256 linkNative) { + uint32 stalenessSeconds = hotVars.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 feedValue; + (, feedValue, , timestamp, ) = i_fastGasFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + gasWei = s_fallbackGasPrice; + } else { + gasWei = uint256(feedValue); + } + (, feedValue, , timestamp, ) = i_linkNativeFeed.latestRoundData(); + if ( + feedValue <= 0 || block.timestamp < timestamp || (staleFallback && stalenessSeconds < block.timestamp - timestamp) + ) { + linkNative = s_fallbackLinkPrice; + } else { + linkNative = uint256(feedValue); + } + return (gasWei, linkNative); + } + + /** + * @dev calculates PLI paid for gas spent plus a configure premium percentage + * @param gasLimit the amount of gas used + * @param gasOverhead the amount of gas overhead + * @param fastGasWei the fast gas price + * @param linkNative the exchange ratio between PLI and Native token + * @param numBatchedUpkeeps the number of upkeeps in this batch. Used to divide the L1 cost + * @param isExecution if this is triggered by a perform upkeep function + */ + function _calculatePaymentAmount( + HotVars memory hotVars, + uint256 gasLimit, + uint256 gasOverhead, + uint256 fastGasWei, + uint256 linkNative, + uint16 numBatchedUpkeeps, + bool isExecution + ) internal view returns (uint96, uint96) { + uint256 gasWei = fastGasWei * hotVars.gasCeilingMultiplier; + // in case it's actual execution use actual gas price, capped by fastGasWei * gasCeilingMultiplier + if (isExecution && tx.gasprice < gasWei) { + gasWei = tx.gasprice; + } + + uint256 l1CostWei = 0; + if (i_mode == Mode.OPTIMISM) { + bytes memory txCallData = new bytes(0); + if (isExecution) { + txCallData = bytes.concat(msg.data, L1_FEE_DATA_PADDING); + } else { + // fee is 4 per 0 byte, 16 per non-zero byte. Worst case we can have + // s_storage.maxPerformDataSize non zero-bytes. Instead of setting bytes to non-zero + // we initialize 'new bytes' of length 4*maxPerformDataSize to cover for zero bytes. + txCallData = new bytes(4 * s_storage.maxPerformDataSize); + } + l1CostWei = OPTIMISM_ORACLE.getL1Fee(txCallData); + } else if (i_mode == Mode.ARBITRUM) { + if (isExecution) { + l1CostWei = ARB_NITRO_ORACLE.getCurrentTxL1GasFees(); + } else { + // fee is 4 per 0 byte, 16 per non-zero byte - we assume all non-zero and + // max data size to calculate max payment + (, uint256 perL1CalldataUnit, , , , ) = ARB_NITRO_ORACLE.getPricesInWei(); + l1CostWei = perL1CalldataUnit * s_storage.maxPerformDataSize * 16; + } + } + // if it's not performing upkeeps, use gas ceiling multiplier to estimate the upper bound + if (!isExecution) { + l1CostWei = hotVars.gasCeilingMultiplier * l1CostWei; + } + // Divide l1CostWei among all batched upkeeps. Spare change from division is not charged + l1CostWei = l1CostWei / numBatchedUpkeeps; + + uint256 gasPayment = ((gasWei * (gasLimit + gasOverhead) + l1CostWei) * 1e18) / linkNative; + uint256 premium = (((gasWei * gasLimit) + l1CostWei) * 1e9 * hotVars.paymentPremiumPPB) / + linkNative + + uint256(hotVars.flatFeeMicroLink) * + 1e12; + // PLI_TOTAL_SUPPLY < UINT96_MAX + if (gasPayment + premium > PLI_TOTAL_SUPPLY) revert PaymentGreaterThanAllPLI(); + return (uint96(gasPayment), uint96(premium)); + } + + /** + * @dev calculates the max PLI payment for an upkeep + */ + function _getMaxLinkPayment( + HotVars memory hotVars, + Trigger triggerType, + uint32 performGas, + uint32 performDataLength, + uint256 fastGasWei, + uint256 linkNative, + bool isExecution // Whether this is an actual perform execution or just a simulation + ) internal view returns (uint96) { + uint256 gasOverhead = _getMaxGasOverhead(triggerType, performDataLength, hotVars.f); + (uint96 reimbursement, uint96 premium) = _calculatePaymentAmount( + hotVars, + performGas, + gasOverhead, + fastGasWei, + linkNative, + 1, // Consider only 1 upkeep in batch to get maxPayment + isExecution + ); + + return reimbursement + premium; + } + + /** + * @dev returns the max gas overhead that can be charged for an upkeep + */ + function _getMaxGasOverhead(Trigger triggerType, uint32 performDataLength, uint8 f) internal pure returns (uint256) { + // performData causes additional overhead in report length and memory operations + uint256 baseOverhead; + if (triggerType == Trigger.CONDITION) { + baseOverhead = REGISTRY_CONDITIONAL_OVERHEAD; + } else if (triggerType == Trigger.LOG) { + baseOverhead = REGISTRY_LOG_OVERHEAD; + } else { + revert InvalidTriggerType(); + } + return + baseOverhead + + (REGISTRY_PER_SIGNER_GAS_OVERHEAD * (f + 1)) + + (REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD * performDataLength); + } + + /** + * @dev move a transmitter's balance from total pool to withdrawable balance + */ + function _updateTransmitterBalanceFromPool( + address transmitterAddress, + uint96 totalPremium, + uint96 payeeCount + ) internal returns (uint96) { + Transmitter memory transmitter = s_transmitters[transmitterAddress]; + + if (transmitter.active) { + uint96 uncollected = totalPremium - transmitter.lastCollected; + uint96 due = uncollected / payeeCount; + transmitter.balance += due; + transmitter.lastCollected += due * payeeCount; + s_transmitters[transmitterAddress] = transmitter; + } + + return transmitter.balance; + } + + /** + * @dev gets the trigger type from an upkeepID (trigger type is encoded in the middle of the ID) + */ + function _getTriggerType(uint256 upkeepId) internal pure returns (Trigger) { + bytes32 rawID = bytes32(upkeepId); + bytes1 empty = bytes1(0); + for (uint256 idx = 4; idx < 15; idx++) { + if (rawID[idx] != empty) { + // old IDs that were created before this standard and migrated to this registry + return Trigger.CONDITION; + } + } + return Trigger(uint8(rawID[15])); + } + + function _checkPayload( + uint256 upkeepId, + Trigger triggerType, + bytes memory triggerData + ) internal view returns (bytes memory) { + if (triggerType == Trigger.CONDITION) { + return abi.encodeWithSelector(CHECK_SELECTOR, s_checkData[upkeepId]); + } else if (triggerType == Trigger.LOG) { + Log memory log = abi.decode(triggerData, (Log)); + return abi.encodeWithSelector(CHECK_LOG_SELECTOR, log, s_checkData[upkeepId]); + } + revert InvalidTriggerType(); + } + + /** + * @dev _decodeReport decodes a serialized report into a Report struct + */ + function _decodeReport(bytes calldata rawReport) internal pure returns (Report memory) { + Report memory report = abi.decode(rawReport, (Report)); + uint256 expectedLength = report.upkeepIds.length; + if ( + report.gasLimits.length != expectedLength || + report.triggers.length != expectedLength || + report.performDatas.length != expectedLength + ) { + revert InvalidReport(); + } + return report; + } + + /** + * @dev Does some early sanity checks before actually performing an upkeep + * @return bool whether the upkeep should be performed + * @return bytes32 dedupID for preventing duplicate performances of this trigger + */ + function _prePerformChecks( + uint256 upkeepId, + bytes memory rawTrigger, + UpkeepTransmitInfo memory transmitInfo + ) internal returns (bool, bytes32) { + bytes32 dedupID; + if (transmitInfo.triggerType == Trigger.CONDITION) { + if (!_validateConditionalTrigger(upkeepId, rawTrigger, transmitInfo)) return (false, dedupID); + } else if (transmitInfo.triggerType == Trigger.LOG) { + bool valid; + (valid, dedupID) = _validateLogTrigger(upkeepId, rawTrigger, transmitInfo); + if (!valid) return (false, dedupID); + } else { + revert InvalidTriggerType(); + } + if (transmitInfo.upkeep.maxValidBlocknumber <= _blockNum()) { + // Can happen when an upkeep got cancelled after report was generated. + // However we have a CANCELLATION_DELAY of 50 blocks so shouldn't happen in practice + emit CancelledUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + if (transmitInfo.upkeep.balance < transmitInfo.maxLinkPayment) { + // Can happen due to fluctuations in gas / link prices + emit InsufficientFundsUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + return (true, dedupID); + } + + /** + * @dev Does some early sanity checks before actually performing an upkeep + */ + function _validateConditionalTrigger( + uint256 upkeepId, + bytes memory rawTrigger, + UpkeepTransmitInfo memory transmitInfo + ) internal returns (bool) { + ConditionalTrigger memory trigger = abi.decode(rawTrigger, (ConditionalTrigger)); + if (trigger.blockNum < transmitInfo.upkeep.lastPerformedBlockNumber) { + // Can happen when another report performed this upkeep after this report was generated + emit StaleUpkeepReport(upkeepId, rawTrigger); + return false; + } + if ( + (trigger.blockHash != bytes32("") && _blockHash(trigger.blockNum) != trigger.blockHash) || + trigger.blockNum >= _blockNum() + ) { + // There are two cases of reorged report + // 1. trigger block number is in future: this is an edge case during extreme deep reorgs of chain + // which is always protected against + // 2. blockHash at trigger block number was same as trigger time. This is an optional check which is + // applied if DON sends non empty trigger.blockHash. Note: It only works for last 256 blocks on chain + // when it is sent + emit ReorgedUpkeepReport(upkeepId, rawTrigger); + return false; + } + return true; + } + + function _validateLogTrigger( + uint256 upkeepId, + bytes memory rawTrigger, + UpkeepTransmitInfo memory transmitInfo + ) internal returns (bool, bytes32) { + LogTrigger memory trigger = abi.decode(rawTrigger, (LogTrigger)); + bytes32 dedupID = keccak256(abi.encodePacked(upkeepId, trigger.logBlockHash, trigger.txHash, trigger.logIndex)); + if ( + (trigger.blockHash != bytes32("") && _blockHash(trigger.blockNum) != trigger.blockHash) || + trigger.blockNum >= _blockNum() + ) { + // Reorg protection is same as conditional trigger upkeeps + emit ReorgedUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + if (s_dedupKeys[dedupID]) { + emit StaleUpkeepReport(upkeepId, rawTrigger); + return (false, dedupID); + } + return (true, dedupID); + } + + /** + * @dev Verify signatures attached to report + */ + function _verifyReportSignature( + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs + ) internal view { + bytes32 h = keccak256(abi.encode(keccak256(report), reportContext)); + // i-th byte counts number of sigs made by i-th signer + uint256 signedCount = 0; + + Signer memory signer; + address signerAddress; + for (uint256 i = 0; i < rs.length; i++) { + signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + signer = s_signers[signerAddress]; + if (!signer.active) revert OnlyActiveSigners(); + unchecked { + signedCount += 1 << (8 * signer.index); + } + } + + if (signedCount & ORACLE_MASK != signedCount) revert DuplicateSigners(); + } + + /** + * @dev updates a storage marker for this upkeep to prevent duplicate and out of order performances + * @dev for conditional triggers we set the latest block number, for log triggers we store a dedupID + */ + function _updateTriggerMarker(uint256 upkeepID, UpkeepTransmitInfo memory upkeepTransmitInfo) internal { + if (upkeepTransmitInfo.triggerType == Trigger.CONDITION) { + s_upkeep[upkeepID].lastPerformedBlockNumber = uint32(_blockNum()); + } else if (upkeepTransmitInfo.triggerType == Trigger.LOG) { + s_dedupKeys[upkeepTransmitInfo.dedupID] = true; + emit DedupKeyAdded(upkeepTransmitInfo.dedupID); + } + } + + /** + * @dev calls the Upkeep target with the performData param passed in by the + * transmitter and the exact gas required by the Upkeep + */ + function _performUpkeep( + IAutomationForwarder forwarder, + uint256 performGas, + bytes memory performData + ) internal nonReentrant returns (bool success, uint256 gasUsed) { + performData = abi.encodeWithSelector(PERFORM_SELECTOR, performData); + return forwarder.forward(performGas, performData); + } + + /** + * @dev does postPerform payment processing for an upkeep. Deducts upkeep's balance and increases + * amount spent. + */ + function _postPerformPayment( + HotVars memory hotVars, + uint256 upkeepId, + UpkeepTransmitInfo memory upkeepTransmitInfo, + uint256 fastGasWei, + uint256 linkNative, + uint16 numBatchedUpkeeps + ) internal returns (uint96 gasReimbursement, uint96 premium) { + (gasReimbursement, premium) = _calculatePaymentAmount( + hotVars, + upkeepTransmitInfo.gasUsed, + upkeepTransmitInfo.gasOverhead, + fastGasWei, + linkNative, + numBatchedUpkeeps, + true + ); + + uint96 payment = gasReimbursement + premium; + s_upkeep[upkeepId].balance -= payment; + s_upkeep[upkeepId].amountSpent += payment; + + return (gasReimbursement, premium); + } + + /** + * @dev Caps the gas overhead by the constant overhead used within initial payment checks in order to + * prevent a revert in payment processing. + */ + function _getCappedGasOverhead( + uint256 calculatedGasOverhead, + Trigger triggerType, + uint32 performDataLength, + uint8 f + ) internal pure returns (uint256 cappedGasOverhead) { + cappedGasOverhead = _getMaxGasOverhead(triggerType, performDataLength, f); + if (calculatedGasOverhead < cappedGasOverhead) { + return calculatedGasOverhead; + } + return cappedGasOverhead; + } + + /** + * @dev ensures the upkeep is not cancelled and the caller is the upkeep admin + */ + function _requireAdminAndNotCancelled(uint256 upkeepId) internal view { + if (msg.sender != s_upkeepAdmin[upkeepId]) revert OnlyCallableByAdmin(); + if (s_upkeep[upkeepId].maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + } + + /** + * @dev returns the current block number in a chain agnostic manner + */ + function _blockNum() internal view returns (uint256) { + if (i_mode == Mode.ARBITRUM) { + return ARB_SYS.arbBlockNumber(); + } else { + return block.number; + } + } + + /** + * @dev returns the blockhash of the provided block number in a chain agnostic manner + * @param n the blocknumber to retrieve the blockhash for + * @return blockhash the blockhash of block number n, or 0 if n is out queryable of range + */ + function _blockHash(uint256 n) internal view returns (bytes32) { + if (i_mode == Mode.ARBITRUM) { + uint256 blockNum = ARB_SYS.arbBlockNumber(); + if (n >= blockNum || blockNum - n > 256) { + return ""; + } + return ARB_SYS.arbBlockHash(n); + } else { + return blockhash(n); + } + } + + /** + * @dev replicates Open Zeppelin's ReentrancyGuard but optimized to fit our storage + */ + modifier nonReentrant() { + if (s_hotVars.reentrancyGuard) revert ReentrantCall(); + s_hotVars.reentrancyGuard = true; + _; + s_hotVars.reentrancyGuard = false; + } +} diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol new file mode 100644 index 00000000..9588421f --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicA2_1.sol @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {KeeperRegistryBase2_1} from "./KeeperRegistryBase2_1.sol"; +import {KeeperRegistryLogicB2_1} from "./KeeperRegistryLogicB2_1.sol"; +import {Chainable} from "../Chainable.sol"; +import {AutomationForwarder} from "../AutomationForwarder.sol"; +import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol"; +import {UpkeepTranscoderInterfaceV2} from "../interfaces/UpkeepTranscoderInterfaceV2.sol"; +import {MigratableKeeperRegistryInterfaceV2} from "../interfaces/MigratableKeeperRegistryInterfaceV2.sol"; + +/** + * @notice Logic contract, works in tandem with KeeperRegistry as a proxy + */ +contract KeeperRegistryLogicA2_1 is KeeperRegistryBase2_1, Chainable { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @param logicB the address of the second logic contract + */ + constructor( + KeeperRegistryLogicB2_1 logicB + ) + KeeperRegistryBase2_1( + logicB.getMode(), + logicB.getLinkAddress(), + logicB.getLinkNativeFeedAddress(), + logicB.getFastGasFeedAddress(), + logicB.getAutomationForwarderLogic() + ) + Chainable(address(logicB)) + {} + + /** + * @notice called by the automation DON to check if work is needed + * @param id the upkeep ID to check for work needed + * @param triggerData extra contextual data about the trigger (not used in all code paths) + * @dev this one of the core functions called in the hot path + * @dev there is a 2nd checkUpkeep function (below) that is being maintained for backwards compatibility + * @dev there is an incongruency on what gets returned during failure modes + * ex sometimes we include price data, sometimes we omit it depending on the failure + */ + function checkUpkeep( + uint256 id, + bytes memory triggerData + ) + public + cannotExecute + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ) + { + Trigger triggerType = _getTriggerType(id); + HotVars memory hotVars = s_hotVars; + Upkeep memory upkeep = s_upkeep[id]; + + if (hotVars.paused) return (false, bytes(""), UpkeepFailureReason.REGISTRY_PAUSED, 0, upkeep.performGas, 0, 0); + if (upkeep.maxValidBlocknumber != UINT32_MAX) + return (false, bytes(""), UpkeepFailureReason.UPKEEP_CANCELLED, 0, upkeep.performGas, 0, 0); + if (upkeep.paused) return (false, bytes(""), UpkeepFailureReason.UPKEEP_PAUSED, 0, upkeep.performGas, 0, 0); + + (fastGasWei, linkNative) = _getFeedData(hotVars); + uint96 maxLinkPayment = _getMaxLinkPayment( + hotVars, + triggerType, + upkeep.performGas, + s_storage.maxPerformDataSize, + fastGasWei, + linkNative, + false + ); + if (upkeep.balance < maxLinkPayment) { + return (false, bytes(""), UpkeepFailureReason.INSUFFICIENT_BALANCE, 0, upkeep.performGas, 0, 0); + } + + bytes memory callData = _checkPayload(id, triggerType, triggerData); + + gasUsed = gasleft(); + (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(callData); + gasUsed = gasUsed - gasleft(); + + if (!success) { + // User's target check reverted. We capture the revert data here and pass it within performData + if (result.length > s_storage.maxRevertDataSize) { + return ( + false, + bytes(""), + UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + } + return ( + upkeepNeeded, + result, + UpkeepFailureReason.TARGET_CHECK_REVERTED, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + } + + (upkeepNeeded, performData) = abi.decode(result, (bool, bytes)); + if (!upkeepNeeded) + return ( + false, + bytes(""), + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + + if (performData.length > s_storage.maxPerformDataSize) + return ( + false, + bytes(""), + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + gasUsed, + upkeep.performGas, + fastGasWei, + linkNative + ); + + return (upkeepNeeded, performData, upkeepFailureReason, gasUsed, upkeep.performGas, fastGasWei, linkNative); + } + + /** + * @notice see other checkUpkeep function for description + * @dev this function may be deprecated in a future version of plugin automation + */ + function checkUpkeep( + uint256 id + ) + external + returns ( + bool upkeepNeeded, + bytes memory performData, + UpkeepFailureReason upkeepFailureReason, + uint256 gasUsed, + uint256 gasLimit, + uint256 fastGasWei, + uint256 linkNative + ) + { + return checkUpkeep(id, bytes("")); + } + + /** + * @dev checkCallback is used specifically for automation data streams lookups (see StreamsLookupCompatibleInterface.sol) + * @param id the upkeepID to execute a callback for + * @param values the values returned from the data streams lookup + * @param extraData the user-provided extra context data + */ + function checkCallback( + uint256 id, + bytes[] memory values, + bytes calldata extraData + ) + external + cannotExecute + returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed) + { + bytes memory payload = abi.encodeWithSelector(CHECK_CALLBACK_SELECTOR, values, extraData); + return executeCallback(id, payload); + } + + /** + * @notice this is a generic callback executor that forwards a call to a user's contract with the configured + * gas limit + * @param id the upkeepID to execute a callback for + * @param payload the data (including function selector) to call on the upkeep target contract + */ + function executeCallback( + uint256 id, + bytes memory payload + ) + public + cannotExecute + returns (bool upkeepNeeded, bytes memory performData, UpkeepFailureReason upkeepFailureReason, uint256 gasUsed) + { + Upkeep memory upkeep = s_upkeep[id]; + gasUsed = gasleft(); + (bool success, bytes memory result) = upkeep.forwarder.getTarget().call{gas: s_storage.checkGasLimit}(payload); + gasUsed = gasUsed - gasleft(); + if (!success) { + return (false, bytes(""), UpkeepFailureReason.CALLBACK_REVERTED, gasUsed); + } + (upkeepNeeded, performData) = abi.decode(result, (bool, bytes)); + if (!upkeepNeeded) { + return (false, bytes(""), UpkeepFailureReason.UPKEEP_NOT_NEEDED, gasUsed); + } + if (performData.length > s_storage.maxPerformDataSize) { + return (false, bytes(""), UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, gasUsed); + } + return (upkeepNeeded, performData, upkeepFailureReason, gasUsed); + } + + /** + * @notice adds a new upkeep + * @param target address to perform upkeep on + * @param gasLimit amount of gas to provide the target contract when + * performing upkeep + * @param admin address to cancel upkeep and withdraw remaining funds + * @param triggerType the trigger for the upkeep + * @param checkData data passed to the contract when checking for upkeep + * @param triggerConfig the config for the trigger + * @param offchainConfig arbitrary offchain config for the upkeep + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + Trigger triggerType, + bytes calldata checkData, + bytes memory triggerConfig, + bytes memory offchainConfig + ) public returns (uint256 id) { + if (msg.sender != owner() && !s_registrars.contains(msg.sender)) revert OnlyCallableByOwnerOrRegistrar(); + if (!target.isContract()) revert NotAContract(); + id = _createID(triggerType); + IAutomationForwarder forwarder = IAutomationForwarder( + address(new AutomationForwarder(target, address(this), i_automationForwarderLogic)) + ); + _createUpkeep( + id, + Upkeep({ + performGas: gasLimit, + balance: 0, + maxValidBlocknumber: UINT32_MAX, + lastPerformedBlockNumber: 0, + amountSpent: 0, + paused: false, + forwarder: forwarder + }), + admin, + checkData, + triggerConfig, + offchainConfig + ); + s_storage.nonce++; + emit UpkeepRegistered(id, gasLimit, admin); + emit UpkeepCheckDataSet(id, checkData); + emit UpkeepTriggerConfigSet(id, triggerConfig); + emit UpkeepOffchainConfigSet(id, offchainConfig); + return (id); + } + + /** + * @notice this function registers a conditional upkeep, using a backwards compatible function signature + * @dev this function is backwards compatible with versions <=2.0, but may be removed in a future version + */ + function registerUpkeep( + address target, + uint32 gasLimit, + address admin, + bytes calldata checkData, + bytes calldata offchainConfig + ) external returns (uint256 id) { + return registerUpkeep(target, gasLimit, admin, Trigger.CONDITION, checkData, bytes(""), offchainConfig); + } + + /** + * @notice cancels an upkeep + * @param id the upkeepID to cancel + * @dev if a user cancels an upkeep, their funds are locked for CANCELLATION_DELAY blocks to + * allow any pending performUpkeep txs time to get confirmed + */ + function cancelUpkeep(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + bool canceled = upkeep.maxValidBlocknumber != UINT32_MAX; + bool isOwner = msg.sender == owner(); + + if (canceled && !(isOwner && upkeep.maxValidBlocknumber > _blockNum())) revert CannotCancel(); + if (!isOwner && msg.sender != s_upkeepAdmin[id]) revert OnlyCallableByOwnerOrAdmin(); + + uint256 height = _blockNum(); + if (!isOwner) { + height = height + CANCELLATION_DELAY; + } + s_upkeep[id].maxValidBlocknumber = uint32(height); + s_upkeepIDs.remove(id); + + // charge the cancellation fee if the minUpkeepSpend is not met + uint96 minUpkeepSpend = s_storage.minUpkeepSpend; + uint96 cancellationFee = 0; + // cancellationFee is supposed to be min(max(minUpkeepSpend - amountSpent,0), amountLeft) + if (upkeep.amountSpent < minUpkeepSpend) { + cancellationFee = minUpkeepSpend - upkeep.amountSpent; + if (cancellationFee > upkeep.balance) { + cancellationFee = upkeep.balance; + } + } + s_upkeep[id].balance = upkeep.balance - cancellationFee; + s_storage.ownerLinkBalance = s_storage.ownerLinkBalance + cancellationFee; + + emit UpkeepCanceled(id, uint64(height)); + } + + /** + * @notice adds fund to an upkeep + * @param id the upkeepID + * @param amount the amount of PLI to fund, in jules (jules = "wei" of PLI) + */ + function addFunds(uint256 id, uint96 amount) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + s_upkeep[id].balance = upkeep.balance + amount; + s_expectedLinkBalance = s_expectedLinkBalance + amount; + i_link.transferFrom(msg.sender, address(this), amount); + emit FundsAdded(id, msg.sender, amount); + } + + /** + * @notice migrates upkeeps from one registry to another + * @param ids the upkeepIDs to migrate + * @param destination the destination registry address + * @dev a transcoder must be set in order to enable migration + * @dev migration permissions must be set on *both* sending and receiving registries + * @dev only an upkeep admin can migrate their upkeeps + */ + function migrateUpkeeps(uint256[] calldata ids, address destination) external { + if ( + s_peerRegistryMigrationPermission[destination] != MigrationPermission.OUTGOING && + s_peerRegistryMigrationPermission[destination] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + if (s_storage.transcoder == ZERO_ADDRESS) revert TranscoderNotSet(); + if (ids.length == 0) revert ArrayHasNoEntries(); + uint256 id; + Upkeep memory upkeep; + uint256 totalBalanceRemaining; + address[] memory admins = new address[](ids.length); + Upkeep[] memory upkeeps = new Upkeep[](ids.length); + bytes[] memory checkDatas = new bytes[](ids.length); + bytes[] memory triggerConfigs = new bytes[](ids.length); + bytes[] memory offchainConfigs = new bytes[](ids.length); + for (uint256 idx = 0; idx < ids.length; idx++) { + id = ids[idx]; + upkeep = s_upkeep[id]; + _requireAdminAndNotCancelled(id); + upkeep.forwarder.updateRegistry(destination); + upkeeps[idx] = upkeep; + admins[idx] = s_upkeepAdmin[id]; + checkDatas[idx] = s_checkData[id]; + triggerConfigs[idx] = s_upkeepTriggerConfig[id]; + offchainConfigs[idx] = s_upkeepOffchainConfig[id]; + totalBalanceRemaining = totalBalanceRemaining + upkeep.balance; + delete s_upkeep[id]; + delete s_checkData[id]; + delete s_upkeepTriggerConfig[id]; + delete s_upkeepOffchainConfig[id]; + // nullify existing proposed admin change if an upkeep is being migrated + delete s_proposedAdmin[id]; + s_upkeepIDs.remove(id); + emit UpkeepMigrated(id, upkeep.balance, destination); + } + s_expectedLinkBalance = s_expectedLinkBalance - totalBalanceRemaining; + bytes memory encodedUpkeeps = abi.encode( + ids, + upkeeps, + new address[](ids.length), + admins, + checkDatas, + triggerConfigs, + offchainConfigs + ); + MigratableKeeperRegistryInterfaceV2(destination).receiveUpkeeps( + UpkeepTranscoderInterfaceV2(s_storage.transcoder).transcodeUpkeeps( + UPKEEP_VERSION_BASE, + MigratableKeeperRegistryInterfaceV2(destination).upkeepVersion(), + encodedUpkeeps + ) + ); + i_link.transfer(destination, totalBalanceRemaining); + } + + /** + * @notice received upkeeps migrated from another registry + * @param encodedUpkeeps the raw upkeep data to import + * @dev this function is never called directly, it is only called by another registry's migrate function + */ + function receiveUpkeeps(bytes calldata encodedUpkeeps) external { + if ( + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.INCOMING && + s_peerRegistryMigrationPermission[msg.sender] != MigrationPermission.BIDIRECTIONAL + ) revert MigrationNotPermitted(); + ( + uint256[] memory ids, + Upkeep[] memory upkeeps, + address[] memory targets, + address[] memory upkeepAdmins, + bytes[] memory checkDatas, + bytes[] memory triggerConfigs, + bytes[] memory offchainConfigs + ) = abi.decode(encodedUpkeeps, (uint256[], Upkeep[], address[], address[], bytes[], bytes[], bytes[])); + for (uint256 idx = 0; idx < ids.length; idx++) { + if (address(upkeeps[idx].forwarder) == ZERO_ADDRESS) { + upkeeps[idx].forwarder = IAutomationForwarder( + address(new AutomationForwarder(targets[idx], address(this), i_automationForwarderLogic)) + ); + } + _createUpkeep( + ids[idx], + upkeeps[idx], + upkeepAdmins[idx], + checkDatas[idx], + triggerConfigs[idx], + offchainConfigs[idx] + ); + emit UpkeepReceived(ids[idx], upkeeps[idx].balance, msg.sender); + } + } + + /** + * @notice sets the upkeep trigger config + * @param id the upkeepID to change the trigger for + * @param triggerConfig the new trigger config + */ + function setUpkeepTriggerConfig(uint256 id, bytes calldata triggerConfig) external { + _requireAdminAndNotCancelled(id); + s_upkeepTriggerConfig[id] = triggerConfig; + emit UpkeepTriggerConfigSet(id, triggerConfig); + } +} diff --git a/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicB2_1.sol b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicB2_1.sol new file mode 100644 index 00000000..411dbe85 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/KeeperRegistryLogicB2_1.sol @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity 0.8.16; + +import {KeeperRegistryBase2_1} from "./KeeperRegistryBase2_1.sol"; +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {Address} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; +import {UpkeepFormat} from "../interfaces/UpkeepTranscoderInterface.sol"; +import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol"; + +contract KeeperRegistryLogicB2_1 is KeeperRegistryBase2_1 { + using Address for address; + using EnumerableSet for EnumerableSet.UintSet; + using EnumerableSet for EnumerableSet.AddressSet; + + /** + * @dev see KeeperRegistry master contract for constructor description + */ + constructor( + Mode mode, + address link, + address linkNativeFeed, + address fastGasFeed, + address automationForwarderLogic + ) KeeperRegistryBase2_1(mode, link, linkNativeFeed, fastGasFeed, automationForwarderLogic) {} + + // ================================================================ + // | UPKEEP MANAGEMENT | + // ================================================================ + + /** + * @notice transfers the address of an admin for an upkeep + */ + function transferUpkeepAdmin(uint256 id, address proposed) external { + _requireAdminAndNotCancelled(id); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedAdmin[id] != proposed) { + s_proposedAdmin[id] = proposed; + emit UpkeepAdminTransferRequested(id, msg.sender, proposed); + } + } + + /** + * @notice accepts the transfer of an upkeep admin + */ + function acceptUpkeepAdmin(uint256 id) external { + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.maxValidBlocknumber != UINT32_MAX) revert UpkeepCancelled(); + if (s_proposedAdmin[id] != msg.sender) revert OnlyCallableByProposedAdmin(); + address past = s_upkeepAdmin[id]; + s_upkeepAdmin[id] = msg.sender; + s_proposedAdmin[id] = ZERO_ADDRESS; + + emit UpkeepAdminTransferred(id, past, msg.sender); + } + + /** + * @notice pauses an upkeep - an upkeep will be neither checked nor performed while paused + */ + function pauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (upkeep.paused) revert OnlyUnpausedUpkeep(); + s_upkeep[id].paused = true; + s_upkeepIDs.remove(id); + emit UpkeepPaused(id); + } + + /** + * @notice unpauses an upkeep + */ + function unpauseUpkeep(uint256 id) external { + _requireAdminAndNotCancelled(id); + Upkeep memory upkeep = s_upkeep[id]; + if (!upkeep.paused) revert OnlyPausedUpkeep(); + s_upkeep[id].paused = false; + s_upkeepIDs.add(id); + emit UpkeepUnpaused(id); + } + + /** + * @notice updates the checkData for an upkeep + */ + function setUpkeepCheckData(uint256 id, bytes calldata newCheckData) external { + _requireAdminAndNotCancelled(id); + if (newCheckData.length > s_storage.maxCheckDataSize) revert CheckDataExceedsLimit(); + s_checkData[id] = newCheckData; + emit UpkeepCheckDataSet(id, newCheckData); + } + + /** + * @notice updates the gas limit for an upkeep + */ + function setUpkeepGasLimit(uint256 id, uint32 gasLimit) external { + if (gasLimit < PERFORM_GAS_MIN || gasLimit > s_storage.maxPerformGas) revert GasLimitOutsideRange(); + _requireAdminAndNotCancelled(id); + s_upkeep[id].performGas = gasLimit; + + emit UpkeepGasLimitSet(id, gasLimit); + } + + /** + * @notice updates the offchain config for an upkeep + */ + function setUpkeepOffchainConfig(uint256 id, bytes calldata config) external { + _requireAdminAndNotCancelled(id); + s_upkeepOffchainConfig[id] = config; + emit UpkeepOffchainConfigSet(id, config); + } + + /** + * @notice withdraws PLI funds from an upkeep + * @dev note that an upkeep must be cancelled first!! + */ + function withdrawFunds(uint256 id, address to) external nonReentrant { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + Upkeep memory upkeep = s_upkeep[id]; + if (s_upkeepAdmin[id] != msg.sender) revert OnlyCallableByAdmin(); + if (upkeep.maxValidBlocknumber > _blockNum()) revert UpkeepNotCanceled(); + uint96 amountToWithdraw = s_upkeep[id].balance; + s_expectedLinkBalance = s_expectedLinkBalance - amountToWithdraw; + s_upkeep[id].balance = 0; + i_link.transfer(to, amountToWithdraw); + emit FundsWithdrawn(id, amountToWithdraw, to); + } + + // ================================================================ + // | NODE MANAGEMENT | + // ================================================================ + + /** + * @notice transfers the address of payee for a transmitter + */ + function transferPayeeship(address transmitter, address proposed) external { + if (s_transmitterPayees[transmitter] != msg.sender) revert OnlyCallableByPayee(); + if (proposed == msg.sender) revert ValueNotChanged(); + + if (s_proposedPayee[transmitter] != proposed) { + s_proposedPayee[transmitter] = proposed; + emit PayeeshipTransferRequested(transmitter, msg.sender, proposed); + } + } + + /** + * @notice accepts the transfer of the payee + */ + function acceptPayeeship(address transmitter) external { + if (s_proposedPayee[transmitter] != msg.sender) revert OnlyCallableByProposedPayee(); + address past = s_transmitterPayees[transmitter]; + s_transmitterPayees[transmitter] = msg.sender; + s_proposedPayee[transmitter] = ZERO_ADDRESS; + + emit PayeeshipTransferred(transmitter, past, msg.sender); + } + + /** + * @notice withdraws PLI received as payment for work performed + */ + function withdrawPayment(address from, address to) external { + if (to == ZERO_ADDRESS) revert InvalidRecipient(); + if (s_transmitterPayees[from] != msg.sender) revert OnlyCallableByPayee(); + uint96 balance = _updateTransmitterBalanceFromPool(from, s_hotVars.totalPremium, uint96(s_transmittersList.length)); + s_transmitters[from].balance = 0; + s_expectedLinkBalance = s_expectedLinkBalance - balance; + i_link.transfer(to, balance); + emit PaymentWithdrawn(from, balance, to, msg.sender); + } + + // ================================================================ + // | OWNER / MANAGER ACTIONS | + // ================================================================ + + /** + * @notice sets the privilege config for an upkeep + */ + function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes calldata newPrivilegeConfig) external { + if (msg.sender != s_storage.upkeepPrivilegeManager) { + revert OnlyCallableByUpkeepPrivilegeManager(); + } + s_upkeepPrivilegeConfig[upkeepId] = newPrivilegeConfig; + emit UpkeepPrivilegeConfigSet(upkeepId, newPrivilegeConfig); + } + + /** + * @notice withdraws the owner's PLI balance + */ + function withdrawOwnerFunds() external onlyOwner { + uint96 amount = s_storage.ownerLinkBalance; + s_expectedLinkBalance = s_expectedLinkBalance - amount; + s_storage.ownerLinkBalance = 0; + emit OwnerFundsWithdrawn(amount); + i_link.transfer(msg.sender, amount); + } + + /** + * @notice allows the owner to withdraw any PLI accidentally sent to the contract + */ + function recoverFunds() external onlyOwner { + uint256 total = i_link.balanceOf(address(this)); + i_link.transfer(msg.sender, total - s_expectedLinkBalance); + } + + /** + * @notice sets the payees for the transmitters + */ + function setPayees(address[] calldata payees) external onlyOwner { + if (s_transmittersList.length != payees.length) revert ParameterLengthError(); + for (uint256 i = 0; i < s_transmittersList.length; i++) { + address transmitter = s_transmittersList[i]; + address oldPayee = s_transmitterPayees[transmitter]; + address newPayee = payees[i]; + if ( + (newPayee == ZERO_ADDRESS) || (oldPayee != ZERO_ADDRESS && oldPayee != newPayee && newPayee != IGNORE_ADDRESS) + ) revert InvalidPayee(); + if (newPayee != IGNORE_ADDRESS) { + s_transmitterPayees[transmitter] = newPayee; + } + } + emit PayeesUpdated(s_transmittersList, payees); + } + + /** + * @notice sets the migration permission for a peer registry + * @dev this must be done before upkeeps can be migrated to/from another registry + */ + function setPeerRegistryMigrationPermission(address peer, MigrationPermission permission) external onlyOwner { + s_peerRegistryMigrationPermission[peer] = permission; + } + + /** + * @notice pauses the entire registry + */ + function pause() external onlyOwner { + s_hotVars.paused = true; + emit Paused(msg.sender); + } + + /** + * @notice unpauses the entire registry + */ + function unpause() external onlyOwner { + s_hotVars.paused = false; + emit Unpaused(msg.sender); + } + + /** + * @notice sets a generic bytes field used to indicate the privilege that this admin address had + * @param admin the address to set privilege for + * @param newPrivilegeConfig the privileges that this admin has + */ + function setAdminPrivilegeConfig(address admin, bytes calldata newPrivilegeConfig) external { + if (msg.sender != s_storage.upkeepPrivilegeManager) { + revert OnlyCallableByUpkeepPrivilegeManager(); + } + s_adminPrivilegeConfig[admin] = newPrivilegeConfig; + emit AdminPrivilegeConfigSet(admin, newPrivilegeConfig); + } + + // ================================================================ + // | GETTERS | + // ================================================================ + + function getConditionalGasOverhead() external pure returns (uint256) { + return REGISTRY_CONDITIONAL_OVERHEAD; + } + + function getLogGasOverhead() external pure returns (uint256) { + return REGISTRY_LOG_OVERHEAD; + } + + function getPerPerformByteGasOverhead() external pure returns (uint256) { + return REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD; + } + + function getPerSignerGasOverhead() external pure returns (uint256) { + return REGISTRY_PER_SIGNER_GAS_OVERHEAD; + } + + function getCancellationDelay() external pure returns (uint256) { + return CANCELLATION_DELAY; + } + + function getMode() external view returns (Mode) { + return i_mode; + } + + function getLinkAddress() external view returns (address) { + return address(i_link); + } + + function getLinkNativeFeedAddress() external view returns (address) { + return address(i_linkNativeFeed); + } + + function getFastGasFeedAddress() external view returns (address) { + return address(i_fastGasFeed); + } + + function getAutomationForwarderLogic() external view returns (address) { + return i_automationForwarderLogic; + } + + function upkeepTranscoderVersion() public pure returns (UpkeepFormat) { + return UPKEEP_TRANSCODER_VERSION_BASE; + } + + function upkeepVersion() public pure returns (uint8) { + return UPKEEP_VERSION_BASE; + } + + /** + * @notice read all of the details about an upkeep + * @dev this function may be deprecated in a future version of automation in favor of individual + * getters for each field + */ + function getUpkeep(uint256 id) external view returns (UpkeepInfo memory upkeepInfo) { + Upkeep memory reg = s_upkeep[id]; + address target = address(reg.forwarder) == address(0) ? address(0) : reg.forwarder.getTarget(); + upkeepInfo = UpkeepInfo({ + target: target, + performGas: reg.performGas, + checkData: s_checkData[id], + balance: reg.balance, + admin: s_upkeepAdmin[id], + maxValidBlocknumber: reg.maxValidBlocknumber, + lastPerformedBlockNumber: reg.lastPerformedBlockNumber, + amountSpent: reg.amountSpent, + paused: reg.paused, + offchainConfig: s_upkeepOffchainConfig[id] + }); + return upkeepInfo; + } + + /** + * @notice retrieve active upkeep IDs. Active upkeep is defined as an upkeep which is not paused and not canceled. + * @param startIndex starting index in list + * @param maxCount max count to retrieve (0 = unlimited) + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * should consider keeping the blockheight constant to ensure a holistic picture of the contract state + */ + function getActiveUpkeepIDs(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory) { + uint256 numUpkeeps = s_upkeepIDs.length(); + if (startIndex >= numUpkeeps) revert IndexOutOfRange(); + uint256 endIndex = startIndex + maxCount; + endIndex = endIndex > numUpkeeps || maxCount == 0 ? numUpkeeps : endIndex; + uint256[] memory ids = new uint256[](endIndex - startIndex); + for (uint256 idx = 0; idx < ids.length; idx++) { + ids[idx] = s_upkeepIDs.at(idx + startIndex); + } + return ids; + } + + /** + * @notice returns the upkeep's trigger type + */ + function getTriggerType(uint256 upkeepId) external pure returns (Trigger) { + return _getTriggerType(upkeepId); + } + + /** + * @notice returns the trigger config for an upkeeep + */ + function getUpkeepTriggerConfig(uint256 upkeepId) public view returns (bytes memory) { + return s_upkeepTriggerConfig[upkeepId]; + } + + /** + * @notice read the current info about any transmitter address + */ + function getTransmitterInfo( + address query + ) external view returns (bool active, uint8 index, uint96 balance, uint96 lastCollected, address payee) { + Transmitter memory transmitter = s_transmitters[query]; + + uint96 pooledShare = 0; + if (transmitter.active) { + uint96 totalDifference = s_hotVars.totalPremium - transmitter.lastCollected; + pooledShare = totalDifference / uint96(s_transmittersList.length); + } + + return ( + transmitter.active, + transmitter.index, + (transmitter.balance + pooledShare), + transmitter.lastCollected, + s_transmitterPayees[query] + ); + } + + /** + * @notice read the current info about any signer address + */ + function getSignerInfo(address query) external view returns (bool active, uint8 index) { + Signer memory signer = s_signers[query]; + return (signer.active, signer.index); + } + + /** + * @notice read the current state of the registry + */ + function getState() + external + view + returns ( + State memory state, + OnchainConfig memory config, + address[] memory signers, + address[] memory transmitters, + uint8 f + ) + { + state = State({ + nonce: s_storage.nonce, + ownerLinkBalance: s_storage.ownerLinkBalance, + expectedLinkBalance: s_expectedLinkBalance, + totalPremium: s_hotVars.totalPremium, + numUpkeeps: s_upkeepIDs.length(), + configCount: s_storage.configCount, + latestConfigBlockNumber: s_storage.latestConfigBlockNumber, + latestConfigDigest: s_latestConfigDigest, + latestEpoch: s_hotVars.latestEpoch, + paused: s_hotVars.paused + }); + + config = OnchainConfig({ + paymentPremiumPPB: s_hotVars.paymentPremiumPPB, + flatFeeMicroLink: s_hotVars.flatFeeMicroLink, + checkGasLimit: s_storage.checkGasLimit, + stalenessSeconds: s_hotVars.stalenessSeconds, + gasCeilingMultiplier: s_hotVars.gasCeilingMultiplier, + minUpkeepSpend: s_storage.minUpkeepSpend, + maxPerformGas: s_storage.maxPerformGas, + maxCheckDataSize: s_storage.maxCheckDataSize, + maxPerformDataSize: s_storage.maxPerformDataSize, + maxRevertDataSize: s_storage.maxRevertDataSize, + fallbackGasPrice: s_fallbackGasPrice, + fallbackLinkPrice: s_fallbackLinkPrice, + transcoder: s_storage.transcoder, + registrars: s_registrars.values(), + upkeepPrivilegeManager: s_storage.upkeepPrivilegeManager + }); + + return (state, config, s_signersList, s_transmittersList, s_hotVars.f); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getBalance(uint256 id) external view returns (uint96 balance) { + return s_upkeep[id].balance; + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + */ + function getMinBalance(uint256 id) external view returns (uint96) { + return getMinBalanceForUpkeep(id); + } + + /** + * @notice calculates the minimum balance required for an upkeep to remain eligible + * @param id the upkeep id to calculate minimum balance for + * @dev this will be deprecated in a future version in favor of getMinBalance + */ + function getMinBalanceForUpkeep(uint256 id) public view returns (uint96 minBalance) { + return getMaxPaymentForGas(_getTriggerType(id), s_upkeep[id].performGas); + } + + /** + * @notice calculates the maximum payment for a given gas limit + * @param gasLimit the gas to calculate payment for + */ + function getMaxPaymentForGas(Trigger triggerType, uint32 gasLimit) public view returns (uint96 maxPayment) { + HotVars memory hotVars = s_hotVars; + (uint256 fastGasWei, uint256 linkNative) = _getFeedData(hotVars); + return + _getMaxLinkPayment(hotVars, triggerType, gasLimit, s_storage.maxPerformDataSize, fastGasWei, linkNative, false); + } + + /** + * @notice retrieves the migration permission for a peer registry + */ + function getPeerRegistryMigrationPermission(address peer) external view returns (MigrationPermission) { + return s_peerRegistryMigrationPermission[peer]; + } + + /** + * @notice returns the upkeep privilege config + */ + function getUpkeepPrivilegeConfig(uint256 upkeepId) external view returns (bytes memory) { + return s_upkeepPrivilegeConfig[upkeepId]; + } + + /** + * @notice returns the upkeep privilege config + */ + function getAdminPrivilegeConfig(address admin) external view returns (bytes memory) { + return s_adminPrivilegeConfig[admin]; + } + + /** + * @notice returns the upkeep's forwarder contract + */ + function getForwarder(uint256 upkeepID) external view returns (IAutomationForwarder) { + return s_upkeep[upkeepID].forwarder; + } + + /** + * @notice returns the upkeep's forwarder contract + */ + function hasDedupKey(bytes32 dedupKey) external view returns (bool) { + return s_dedupKeys[dedupKey]; + } +} diff --git a/contracts/src/v0.8/automation/v2_1/LICENSE b/contracts/src/v0.8/automation/v2_1/LICENSE new file mode 100644 index 00000000..8ab52047 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/LICENSE @@ -0,0 +1,56 @@ +Business Source License 1.1 + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +--- + +Parameters + +Licensor: SmartContract Plugin Limited SEZC + +Licensed Work: Automation v2.1 +The Licensed Work is (c) 2023 SmartContract Plugin Limited SEZC + +Additional Use Grant: Any uses listed and defined at https://github.com/goplugin/plugin-automation/tree/main/Automation_Grants.md + + +Change Date: September 12, 2027 + +Change License: MIT + +--- + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. + +If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. + +MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below. + +--- + +Covenants of Licensor + +In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. + +2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None". + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/contracts/src/v0.8/automation/v2_1/README.md b/contracts/src/v0.8/automation/v2_1/README.md new file mode 100644 index 00000000..6c0e4ce7 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/README.md @@ -0,0 +1,40 @@ +# Automation Contract Structure + +The on-chain component of Plugin automation is too large to fit into the [size requirements][size-limit-eip] of a single contract. It is also too large to fit into 2 contracts, a solution that works for most large projects. Therefore, we included this explanation of how the pieces fit together and various tradeoffs incurred. + +### Glossary + +**Master Contract** - also known as the “storage” contract. This is the contract whose state we care about. It is the entry-point into the chain of delegatecalls. (We avoid the term "proxy" because it is commonly associated with upgradability, and this system _is not upgradable_ even though it relies on some of the same mechanics.) + +**Logic Contract** - this a contract whose sole purpose is to hold code. We use the code at this address and execute it in the context of the master contract in order to increase our total capacity for on-chain code. + +### Overview + +We chain multiple logic contracts together using [fallback functions][fallback] and [delegatecall][delegatecall]. If a function definition is not found on one contract, we fall back to the next, always executing the function in the scope of the master contract. The actual implementation of this is based off of [OZ's Proxy contract][oz-proxy]. + +### Diagram + +```mermaid +graph LR + Master -- delegatecall --> la[Logic A] + la -- delegatecall --> lb[Logic B] + lb -. delegatecall .-> lx[Logic X] +``` + +### Special Considerations + +- functions on the master contract have the least gas overhead, therefore, our most price-sensitive functions live there +- functions on the master contract have first-class support from tools like etherscan and tenderly - functions that we (or users) call often to debug should live there +- etherscan supports executing logic contract functions that are once removed from the master - therefore we give secondary preference to the first logic contract for user and debugging functions +- functions on logic A through logic X (as of writing) have no support on etherscan and will essentially be "invisible" to everyone but advanced users - we will try to reserve this space for uncommon interactions that are mostly done progamatically +- We use Logic A, B, C... to avoid confusion with the version ex `KeeperRegistryLogicA2_1.sol` --> Logic Contract A verion 2.1 +- Storage locations for logic contract addresses MUST BE BYTECODE (this is done by marking them as "immutable") otherwise the chaining mechanism will break + +### Master Interface + +The Master Interface is a deduped combination of all the interfaces from all contracts in the chain. We generate this interface programatically using the script `generate-automation-master-interface.ts`. This process is not a hardened one. Users of this script should take great care to ensure it's efficacy. + +[size-limit-eip]: https://eips.ethereum.org/EIPS/eip-170 +[fallback]: https://docs.soliditylang.org/en/v0.8.12/contracts.html#fallback-function +[delegatecall]: https://docs.soliditylang.org/en/v0.8.12/introduction-to-smart-contracts.html?highlight=delegatecall#delegatecall-callcode-and-libraries +[oz-proxy]: https://docs.openzeppelin.com/contracts/4.x/api/proxy#Proxy diff --git a/contracts/src/v0.8/automation/v2_1/UpkeepTranscoder4_0.sol b/contracts/src/v0.8/automation/v2_1/UpkeepTranscoder4_0.sol new file mode 100644 index 00000000..53b681d4 --- /dev/null +++ b/contracts/src/v0.8/automation/v2_1/UpkeepTranscoder4_0.sol @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity 0.8.16; + +import {UpkeepTranscoderInterfaceV2} from "../interfaces/UpkeepTranscoderInterfaceV2.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; +import {KeeperRegistryBase2_1 as R21} from "./KeeperRegistryBase2_1.sol"; +import {IAutomationForwarder} from "../interfaces/IAutomationForwarder.sol"; + +enum RegistryVersion { + V12, + V13, + V20, + V21 +} + +/** + * @dev structs copied directly from source (can't import without changing the contract version) + */ +struct UpkeepV12 { + uint96 balance; + address lastKeeper; + uint32 executeGas; + uint64 maxValidBlocknumber; + address target; + uint96 amountSpent; + address admin; +} + +struct UpkeepV13 { + uint96 balance; + address lastKeeper; + uint96 amountSpent; + address admin; + uint32 executeGas; + uint32 maxValidBlocknumber; + address target; + bool paused; +} + +struct UpkeepV20 { + uint32 executeGas; + uint32 maxValidBlocknumber; + bool paused; + address target; + uint96 amountSpent; + uint96 balance; + uint32 lastPerformedBlockNumber; +} + +/** + * @notice UpkeepTranscoder allows converting upkeep data from previous keeper registry versions 1.2, 1.3, and + * 2.0 to registry 2.1 + */ +contract UpkeepTranscoder4_0 is UpkeepTranscoderInterfaceV2, TypeAndVersionInterface { + error InvalidTranscoding(); + + /** + * @notice versions: + * - UpkeepTranscoder 4.0.0: adds support for registry 2.1; adds support for offchainConfigs + * - UpkeepTranscoder 3.0.0: works with registry 2.0; adds temporary workaround for UpkeepFormat enum bug + */ + string public constant override typeAndVersion = "UpkeepTranscoder 4.0.0"; + uint32 internal constant UINT32_MAX = type(uint32).max; + IAutomationForwarder internal constant ZERO_FORWARDER = IAutomationForwarder(address(0)); + + /** + * @notice transcodeUpkeeps transforms upkeep data from the format expected by + * one registry to the format expected by another. It future-proofs migrations + * by allowing keepers team to customize migration paths and set sensible defaults + * when new fields are added + * @param fromVersion struct version the upkeep is migrating from + * @param encodedUpkeeps encoded upkeep data + * @dev this transcoder should ONLY be use for V1/V2 --> V3 migrations + * @dev this transcoder **ignores** the toVersion param, as it assumes all migrations are + * for the V3 version. Therefore, it is the responsibility of the deployer of this contract + * to ensure it is not used in any other migration paths. + */ + function transcodeUpkeeps( + uint8 fromVersion, + uint8, + bytes calldata encodedUpkeeps + ) external view override returns (bytes memory) { + // v1.2 => v2.1 + if (fromVersion == uint8(RegistryVersion.V12)) { + (uint256[] memory ids, UpkeepV12[] memory upkeepsV12, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], UpkeepV12[], bytes[]) + ); + if (ids.length != upkeepsV12.length || ids.length != checkDatas.length) { + revert InvalidTranscoding(); + } + address[] memory targets = new address[](ids.length); + address[] memory admins = new address[](ids.length); + R21.Upkeep[] memory newUpkeeps = new R21.Upkeep[](ids.length); + UpkeepV12 memory upkeepV12; + for (uint256 idx = 0; idx < ids.length; idx++) { + upkeepV12 = upkeepsV12[idx]; + newUpkeeps[idx] = R21.Upkeep({ + performGas: upkeepV12.executeGas, + maxValidBlocknumber: UINT32_MAX, // maxValidBlocknumber is uint64 in V1, hence a new default value is provided + paused: false, // migrated upkeeps are not paused by default + forwarder: ZERO_FORWARDER, + amountSpent: upkeepV12.amountSpent, + balance: upkeepV12.balance, + lastPerformedBlockNumber: 0 + }); + targets[idx] = upkeepV12.target; + admins[idx] = upkeepV12.admin; + } + return abi.encode(ids, newUpkeeps, targets, admins, checkDatas, new bytes[](ids.length), new bytes[](ids.length)); + } + // v1.3 => v2.1 + if (fromVersion == uint8(RegistryVersion.V13)) { + (uint256[] memory ids, UpkeepV13[] memory upkeepsV13, bytes[] memory checkDatas) = abi.decode( + encodedUpkeeps, + (uint256[], UpkeepV13[], bytes[]) + ); + if (ids.length != upkeepsV13.length || ids.length != checkDatas.length) { + revert InvalidTranscoding(); + } + address[] memory targets = new address[](ids.length); + address[] memory admins = new address[](ids.length); + R21.Upkeep[] memory newUpkeeps = new R21.Upkeep[](ids.length); + UpkeepV13 memory upkeepV13; + for (uint256 idx = 0; idx < ids.length; idx++) { + upkeepV13 = upkeepsV13[idx]; + newUpkeeps[idx] = R21.Upkeep({ + performGas: upkeepV13.executeGas, + maxValidBlocknumber: upkeepV13.maxValidBlocknumber, + paused: upkeepV13.paused, + forwarder: ZERO_FORWARDER, + amountSpent: upkeepV13.amountSpent, + balance: upkeepV13.balance, + lastPerformedBlockNumber: 0 + }); + targets[idx] = upkeepV13.target; + admins[idx] = upkeepV13.admin; + } + return abi.encode(ids, newUpkeeps, targets, admins, checkDatas, new bytes[](ids.length), new bytes[](ids.length)); + } + // v2.0 => v2.1 + if (fromVersion == uint8(RegistryVersion.V20)) { + (uint256[] memory ids, UpkeepV20[] memory upkeepsV20, bytes[] memory checkDatas, address[] memory admins) = abi + .decode(encodedUpkeeps, (uint256[], UpkeepV20[], bytes[], address[])); + if (ids.length != upkeepsV20.length || ids.length != checkDatas.length) { + revert InvalidTranscoding(); + } + // bit of a hack - transcodeUpkeeps should be a pure function + R21.Upkeep[] memory newUpkeeps = new R21.Upkeep[](ids.length); + bytes[] memory emptyBytes = new bytes[](ids.length); + address[] memory targets = new address[](ids.length); + UpkeepV20 memory upkeepV20; + for (uint256 idx = 0; idx < ids.length; idx++) { + upkeepV20 = upkeepsV20[idx]; + newUpkeeps[idx] = R21.Upkeep({ + performGas: upkeepV20.executeGas, + maxValidBlocknumber: upkeepV20.maxValidBlocknumber, + paused: upkeepV20.paused, + forwarder: ZERO_FORWARDER, + amountSpent: upkeepV20.amountSpent, + balance: upkeepV20.balance, + lastPerformedBlockNumber: 0 + }); + targets[idx] = upkeepV20.target; + } + return abi.encode(ids, newUpkeeps, targets, admins, checkDatas, emptyBytes, emptyBytes); + } + // v2.1 => v2.1 + if (fromVersion == uint8(RegistryVersion.V21)) { + return encodedUpkeeps; + } + + revert InvalidTranscoding(); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol new file mode 100644 index 00000000..85c728ce --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsBilling.sol @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {IFunctionsBilling, FunctionsBillingConfig} from "./interfaces/IFunctionsBilling.sol"; + +import {Routable} from "./Routable.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +import {SafeCast} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; + +import {ChainSpecificUtil} from "./libraries/ChainSpecificUtil.sol"; + +/// @title Functions Billing contract +/// @notice Contract that calculates payment from users to the nodes of the Decentralized Oracle Network (DON). +abstract contract FunctionsBilling is Routable, IFunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + uint256 private constant REASONABLE_GAS_PRICE_CEILING = 1_000_000_000_000_000; // 1 million gwei + + event RequestBilled( + bytes32 indexed requestId, + uint96 juelsPerGas, + uint256 l1FeeShareWei, + uint96 callbackCostJuels, + uint96 totalCostJuels + ); + + // ================================================================ + // | Request Commitment state | + // ================================================================ + + mapping(bytes32 requestId => bytes32 commitmentHash) private s_requestCommitments; + + event CommitmentDeleted(bytes32 requestId); + + FunctionsBillingConfig private s_config; + + event ConfigUpdated(FunctionsBillingConfig config); + + error UnsupportedRequestDataVersion(); + error InsufficientBalance(); + error InvalidSubscription(); + error UnauthorizedSender(); + error MustBeSubOwner(address owner); + error InvalidLinkWeiPrice(int256 linkWei); + error PaymentTooLarge(); + error NoTransmittersSet(); + error InvalidCalldata(); + + // ================================================================ + // | Balance state | + // ================================================================ + + mapping(address transmitter => uint96 balanceJuelsLink) private s_withdrawableTokens; + // Pool together collected DON fees + // Disperse them on withdrawal or change in OCR configuration + uint96 internal s_feePool; + + AggregatorV3Interface private s_linkToNativeFeed; + + // ================================================================ + // | Initialization | + // ================================================================ + constructor(address router, FunctionsBillingConfig memory config, address linkToNativeFeed) Routable(router) { + s_linkToNativeFeed = AggregatorV3Interface(linkToNativeFeed); + + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice Gets the Plugin Coordinator's billing configuration + /// @return config + function getConfig() external view returns (FunctionsBillingConfig memory) { + return s_config; + } + + /// @notice Sets the Plugin Coordinator's billing configuration + /// @param config - See the contents of the FunctionsBillingConfig struct in IFunctionsBilling.sol for more information + function updateConfig(FunctionsBillingConfig memory config) public { + _onlyOwner(); + + s_config = config; + emit ConfigUpdated(config); + } + + // ================================================================ + // | Fee Calculation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function getDONFee(bytes memory /* requestData */) public view override returns (uint72) { + return s_config.donFee; + } + + /// @inheritdoc IFunctionsBilling + function getAdminFee() public view override returns (uint72) { + return _getRouter().getAdminFee(); + } + + /// @inheritdoc IFunctionsBilling + function getWeiPerUnitLink() public view returns (uint256) { + FunctionsBillingConfig memory config = s_config; + (, int256 weiPerUnitLink, , uint256 timestamp, ) = s_linkToNativeFeed.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (config.feedStalenessSeconds < block.timestamp - timestamp && config.feedStalenessSeconds > 0) { + return config.fallbackNativePerUnitLink; + } + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + return uint256(weiPerUnitLink); + } + + function _getJuelsFromWei(uint256 amountWei) private view returns (uint96) { + // (1e18 juels/link) * wei / (wei/link) = juels + // There are only 1e9*1e18 = 1e27 juels in existence, should not exceed uint96 (2^96 ~ 7e28) + return SafeCast.toUint96((1e18 * amountWei) / getWeiPerUnitLink()); + } + + // ================================================================ + // | Cost Estimation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function estimateCost( + uint64 subscriptionId, + bytes calldata data, + uint32 callbackGasLimit, + uint256 gasPriceWei + ) external view override returns (uint96) { + _getRouter().isValidCallbackGasLimit(subscriptionId, callbackGasLimit); + // Reasonable ceilings to prevent integer overflows + if (gasPriceWei > REASONABLE_GAS_PRICE_CEILING) { + revert InvalidCalldata(); + } + uint72 adminFee = getAdminFee(); + uint72 donFee = getDONFee(data); + return _calculateCostEstimate(callbackGasLimit, gasPriceWei, donFee, adminFee); + } + + /// @notice Estimate the cost in Juels of PLI + // that will be charged to a subscription to fulfill a Functions request + // Gas Price can be overestimated to account for flucuations between request and response time + function _calculateCostEstimate( + uint32 callbackGasLimit, + uint256 gasPriceWei, + uint72 donFee, + uint72 adminFee + ) internal view returns (uint96) { + // If gas price is less than the minimum fulfillment gas price, override to using the minimum + if (gasPriceWei < s_config.minimumEstimateGasPriceWei) { + gasPriceWei = s_config.minimumEstimateGasPriceWei; + } + + uint256 gasPriceWithOverestimation = gasPriceWei + + ((gasPriceWei * s_config.fulfillmentGasPriceOverEstimationBP) / 10_000); + /// @NOTE: Basis Points are 1/100th of 1%, divide by 10_000 to bring back to original units + + uint256 executionGas = s_config.gasOverheadBeforeCallback + s_config.gasOverheadAfterCallback + callbackGasLimit; + uint256 l1FeeWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + uint96 estimatedGasReimbursementJuels = _getJuelsFromWei((gasPriceWithOverestimation * executionGas) + l1FeeWei); + + uint96 feesJuels = uint96(donFee) + uint96(adminFee); + + return estimatedGasReimbursementJuels + feesJuels; + } + + // ================================================================ + // | Billing | + // ================================================================ + + /// @notice Initiate the billing process for an Functions request + /// @dev Only callable by the Functions Router + /// @param request - Plugin Functions request data, see FunctionsResponse.RequestMeta for the structure + /// @return commitment - The parameters of the request that must be held consistent at response time + function _startBilling( + FunctionsResponse.RequestMeta memory request + ) internal returns (FunctionsResponse.Commitment memory commitment) { + FunctionsBillingConfig memory config = s_config; + + // Nodes should support all past versions of the structure + if (request.dataVersion > config.maxSupportedRequestDataVersion) { + revert UnsupportedRequestDataVersion(); + } + + uint72 donFee = getDONFee(request.data); + uint96 estimatedTotalCostJuels = _calculateCostEstimate( + request.callbackGasLimit, + tx.gasprice, + donFee, + request.adminFee + ); + + // Check that subscription can afford the estimated cost + if ((request.availableBalance) < estimatedTotalCostJuels) { + revert InsufficientBalance(); + } + + uint32 timeoutTimestamp = uint32(block.timestamp + config.requestTimeoutSeconds); + bytes32 requestId = keccak256( + abi.encode( + address(this), + request.requestingContract, + request.subscriptionId, + request.initiatedRequests + 1, + keccak256(request.data), + request.dataVersion, + request.callbackGasLimit, + estimatedTotalCostJuels, + timeoutTimestamp, + // solhint-disable-next-line avoid-tx-origin + tx.origin + ) + ); + + commitment = FunctionsResponse.Commitment({ + adminFee: request.adminFee, + coordinator: address(this), + client: request.requestingContract, + subscriptionId: request.subscriptionId, + callbackGasLimit: request.callbackGasLimit, + estimatedTotalCostJuels: estimatedTotalCostJuels, + timeoutTimestamp: timeoutTimestamp, + requestId: requestId, + donFee: donFee, + gasOverheadBeforeCallback: config.gasOverheadBeforeCallback, + gasOverheadAfterCallback: config.gasOverheadAfterCallback + }); + + s_requestCommitments[requestId] = keccak256(abi.encode(commitment)); + + return commitment; + } + + /// @notice Finalize billing process for an Functions request by sending a callback to the Client contract and then charging the subscription + /// @param requestId identifier for the request that was generated by the Registry in the beginBilling commitment + /// @param response response data from DON consensus + /// @param err error from DON consensus + /// @param reportBatchSize the number of fulfillments in the transmitter's report + /// @return result fulfillment result + /// @dev Only callable by a node that has been approved on the Coordinator + /// @dev simulated offchain to determine if sufficient balance is present to fulfill the request + function _fulfillAndBill( + bytes32 requestId, + bytes memory response, + bytes memory err, + bytes memory onchainMetadata, + bytes memory /* offchainMetadata TODO: use in getDonFee() for dynamic billing */, + uint8 reportBatchSize + ) internal returns (FunctionsResponse.FulfillResult) { + FunctionsResponse.Commitment memory commitment = abi.decode(onchainMetadata, (FunctionsResponse.Commitment)); + + uint256 gasOverheadWei = (commitment.gasOverheadBeforeCallback + commitment.gasOverheadAfterCallback) * tx.gasprice; + uint256 l1FeeShareWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data) / reportBatchSize; + // Gas overhead without callback + uint96 gasOverheadJuels = _getJuelsFromWei(gasOverheadWei + l1FeeShareWei); + uint96 juelsPerGas = _getJuelsFromWei(tx.gasprice); + + // The Functions Router will perform the callback to the client contract + (FunctionsResponse.FulfillResult resultCode, uint96 callbackCostJuels) = _getRouter().fulfill( + response, + err, + juelsPerGas, + gasOverheadJuels + commitment.donFee, // cost without callback or admin fee, those will be added by the Router + msg.sender, + commitment + ); + + // The router will only pay the DON on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the Coordinator should hold on to the request commitment + if ( + resultCode == FunctionsResponse.FulfillResult.FULFILLED || + resultCode == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + delete s_requestCommitments[requestId]; + // Reimburse the transmitter for the fulfillment gas cost + s_withdrawableTokens[msg.sender] = gasOverheadJuels + callbackCostJuels; + // Put donFee into the pool of fees, to be split later + // Saves on storage writes that would otherwise be charged to the user + s_feePool += commitment.donFee; + emit RequestBilled({ + requestId: requestId, + juelsPerGas: juelsPerGas, + l1FeeShareWei: l1FeeShareWei, + callbackCostJuels: callbackCostJuels, + totalCostJuels: gasOverheadJuels + callbackCostJuels + commitment.donFee + commitment.adminFee + }); + } + + return resultCode; + } + + // ================================================================ + // | Request Timeout | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Router + /// @dev Used by FunctionsRouter.sol during timeout of a request + function deleteCommitment(bytes32 requestId) external override onlyRouter { + // Delete commitment + delete s_requestCommitments[requestId]; + emit CommitmentDeleted(requestId); + } + + // ================================================================ + // | Fund withdrawal | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function oracleWithdraw(address recipient, uint96 amount) external { + _disperseFeePool(); + + if (amount == 0) { + amount = s_withdrawableTokens[msg.sender]; + } else if (s_withdrawableTokens[msg.sender] < amount) { + revert InsufficientBalance(); + } + s_withdrawableTokens[msg.sender] -= amount; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(recipient, amount); + } + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Coordinator owner + function oracleWithdrawAll() external { + _onlyOwner(); + _disperseFeePool(); + + address[] memory transmitters = _getTransmitters(); + + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < transmitters.length; ++i) { + uint96 balance = s_withdrawableTokens[transmitters[i]]; + if (balance > 0) { + s_withdrawableTokens[transmitters[i]] = 0; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(transmitters[i], balance); + } + } + } + + // Overriden in FunctionsCoordinator, which has visibility into transmitters + function _getTransmitters() internal view virtual returns (address[] memory); + + // DON fees are collected into a pool s_feePool + // When OCR configuration changes, or any oracle withdraws, this must be dispersed + function _disperseFeePool() internal { + if (s_feePool == 0) { + return; + } + // All transmitters are assumed to also be observers + // Pay out the DON fee to all transmitters + address[] memory transmitters = _getTransmitters(); + uint256 numberOfTransmitters = transmitters.length; + if (numberOfTransmitters == 0) { + revert NoTransmittersSet(); + } + uint96 feePoolShare = s_feePool / uint96(numberOfTransmitters); + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < numberOfTransmitters; ++i) { + s_withdrawableTokens[transmitters[i]] += feePoolShare; + } + s_feePool -= feePoolShare * uint96(numberOfTransmitters); + } + + // Overriden in FunctionsCoordinator.sol + function _onlyOwner() internal view virtual; + + // Used in FunctionsCoordinator.sol + function _isExistingRequest(bytes32 requestId) internal view returns (bool) { + return s_requestCommitments[requestId] != bytes32(0); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsClient.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsClient.sol new file mode 100644 index 00000000..b59cc3bb --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsClient.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsRouter} from "./interfaces/IFunctionsRouter.sol"; +import {IFunctionsClient} from "./interfaces/IFunctionsClient.sol"; + +import {FunctionsRequest} from "./libraries/FunctionsRequest.sol"; + +/// @title The Plugin Functions client contract +/// @notice Contract developers can inherit this contract in order to make Plugin Functions requests +abstract contract FunctionsClient is IFunctionsClient { + using FunctionsRequest for FunctionsRequest.Request; + + IFunctionsRouter internal immutable i_functionsRouter; + + event RequestSent(bytes32 indexed id); + event RequestFulfilled(bytes32 indexed id); + + error OnlyRouterCanFulfill(); + + constructor(address router) { + i_functionsRouter = IFunctionsRouter(router); + } + + /// @notice Sends a Plugin Functions request + /// @param data The CBOR encoded bytes data for a Functions request + /// @param subscriptionId The subscription ID that will be charged to service the request + /// @param callbackGasLimit the amount of gas that will be available for the fulfillment callback + /// @return requestId The generated request ID for this request + function _sendRequest( + bytes memory data, + uint64 subscriptionId, + uint32 callbackGasLimit, + bytes32 donId + ) internal returns (bytes32) { + bytes32 requestId = i_functionsRouter.sendRequest( + subscriptionId, + data, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + donId + ); + emit RequestSent(requestId); + return requestId; + } + + /// @notice User defined function to handle a response from the DON + /// @param requestId The request ID, returned by sendRequest() + /// @param response Aggregated response from the execution of the user's source code + /// @param err Aggregated error from the execution of the user code or from the execution pipeline + /// @dev Either response or error parameter will be set, but never both + function _fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal virtual; + + /// @inheritdoc IFunctionsClient + function handleOracleFulfillment(bytes32 requestId, bytes memory response, bytes memory err) external override { + if (msg.sender != address(i_functionsRouter)) { + revert OnlyRouterCanFulfill(); + } + _fulfillRequest(requestId, response, err); + emit RequestFulfilled(requestId); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol new file mode 100644 index 00000000..bbaff43b --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsCoordinator} from "./interfaces/IFunctionsCoordinator.sol"; +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +import {FunctionsBilling, FunctionsBillingConfig} from "./FunctionsBilling.sol"; +import {OCR2Base} from "./ocr/OCR2Base.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +/// @title Functions Coordinator contract +/// @notice Contract that nodes of a Decentralized Oracle Network (DON) interact with +contract FunctionsCoordinator is OCR2Base, IFunctionsCoordinator, FunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + /// @inheritdoc ITypeAndVersion + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "Functions Coordinator v1.2.0"; + + event OracleRequest( + bytes32 indexed requestId, + address indexed requestingContract, + address requestInitiator, + uint64 subscriptionId, + address subscriptionOwner, + bytes data, + uint16 dataVersion, + bytes32 flags, + uint64 callbackGasLimit, + FunctionsResponse.Commitment commitment + ); + event OracleResponse(bytes32 indexed requestId, address transmitter); + + error InconsistentReportData(); + error EmptyPublicKey(); + error UnauthorizedPublicKeyChange(); + + bytes private s_donPublicKey; + bytes private s_thresholdPublicKey; + + constructor( + address router, + FunctionsBillingConfig memory config, + address linkToNativeFeed + ) OCR2Base() FunctionsBilling(router, config, linkToNativeFeed) {} + + /// @inheritdoc IFunctionsCoordinator + function getThresholdPublicKey() external view override returns (bytes memory) { + if (s_thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setThresholdPublicKey(bytes calldata thresholdPublicKey) external override onlyOwner { + if (thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_thresholdPublicKey = thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function getDONPublicKey() external view override returns (bytes memory) { + if (s_donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_donPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setDONPublicKey(bytes calldata donPublicKey) external override onlyOwner { + if (donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_donPublicKey = donPublicKey; + } + + /// @dev check if node is in current transmitter list + function _isTransmitter(address node) internal view returns (bool) { + address[] memory nodes = s_transmitters; + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < nodes.length; ++i) { + if (nodes[i] == node) { + return true; + } + } + return false; + } + + /// @inheritdoc IFunctionsCoordinator + function startRequest( + FunctionsResponse.RequestMeta calldata request + ) external override onlyRouter returns (FunctionsResponse.Commitment memory commitment) { + commitment = _startBilling(request); + + emit OracleRequest( + commitment.requestId, + request.requestingContract, + // solhint-disable-next-line avoid-tx-origin + tx.origin, + request.subscriptionId, + request.subscriptionOwner, + request.data, + request.dataVersion, + request.flags, + request.callbackGasLimit, + commitment + ); + + return commitment; + } + + /// @dev DON fees are pooled together. If the OCR configuration is going to change, these need to be distributed. + function _beforeSetConfig(uint8 /* _f */, bytes memory /* _onchainConfig */) internal override { + if (_getTransmitters().length > 0) { + _disperseFeePool(); + } + } + + /// @dev Used by FunctionsBilling.sol + function _getTransmitters() internal view override returns (address[] memory) { + return s_transmitters; + } + + function _beforeTransmit( + bytes calldata report + ) internal view override returns (bool shouldStop, DecodedReport memory decodedReport) { + ( + bytes32[] memory requestIds, + bytes[] memory results, + bytes[] memory errors, + bytes[] memory onchainMetadata, + bytes[] memory offchainMetadata + ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[])); + uint256 numberOfFulfillments = uint8(requestIds.length); + + if ( + numberOfFulfillments == 0 || + numberOfFulfillments != results.length || + numberOfFulfillments != errors.length || + numberOfFulfillments != onchainMetadata.length || + numberOfFulfillments != offchainMetadata.length + ) { + revert ReportInvalid("Fields must be equal length"); + } + + for (uint256 i = 0; i < numberOfFulfillments; ++i) { + if (_isExistingRequest(requestIds[i])) { + // If there is an existing request, validate report + // Leave shouldStop to default, false + break; + } + if (i == numberOfFulfillments - 1) { + // If the last fulfillment on the report does not exist, then all are duplicates + // Indicate that it's safe to stop to save on the gas of validating the report + shouldStop = true; + } + } + + return ( + shouldStop, + DecodedReport({ + requestIds: requestIds, + results: results, + errors: errors, + onchainMetadata: onchainMetadata, + offchainMetadata: offchainMetadata + }) + ); + } + + /// @dev Report hook called within OCR2Base.sol + function _report(DecodedReport memory decodedReport) internal override { + uint256 numberOfFulfillments = uint8(decodedReport.requestIds.length); + + // Bounded by "MaxRequestBatchSize" on the Job's ReportingPluginConfig + for (uint256 i = 0; i < numberOfFulfillments; ++i) { + FunctionsResponse.FulfillResult result = FunctionsResponse.FulfillResult( + _fulfillAndBill( + decodedReport.requestIds[i], + decodedReport.results[i], + decodedReport.errors[i], + decodedReport.onchainMetadata[i], + decodedReport.offchainMetadata[i], + uint8(numberOfFulfillments) // will not exceed "MaxRequestBatchSize" on the Job's ReportingPluginConfig + ) + ); + + // Emit on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the DON will re-try + if ( + result == FunctionsResponse.FulfillResult.FULFILLED || + result == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + emit OracleResponse(decodedReport.requestIds[i], msg.sender); + } + } + } + + /// @dev Used in FunctionsBilling.sol + function _onlyOwner() internal view override { + _validateOwnership(); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsRouter.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsRouter.sol new file mode 100644 index 00000000..7def612c --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsRouter.sol @@ -0,0 +1,587 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; +import {IFunctionsRouter} from "./interfaces/IFunctionsRouter.sol"; +import {IFunctionsCoordinator} from "./interfaces/IFunctionsCoordinator.sol"; +import {IAccessController} from "../../../shared/interfaces/IAccessController.sol"; + +import {FunctionsSubscriptions} from "./FunctionsSubscriptions.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; + +import {SafeCast} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; +import {Pausable} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol"; + +contract FunctionsRouter is IFunctionsRouter, FunctionsSubscriptions, Pausable, ITypeAndVersion, ConfirmedOwner { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "Functions Router v2.0.0"; + + // We limit return data to a selector plus 4 words. This is to avoid + // malicious contracts from returning large amounts of data and causing + // repeated out-of-gas scenarios. + uint16 public constant MAX_CALLBACK_RETURN_BYTES = 4 + 4 * 32; + uint8 private constant MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + + event RequestProcessed( + bytes32 indexed requestId, + uint64 indexed subscriptionId, + uint96 totalCostJuels, + address transmitter, + FunctionsResponse.FulfillResult resultCode, + bytes response, + bytes err, + bytes callbackReturnData + ); + + event RequestNotProcessed( + bytes32 indexed requestId, + address coordinator, + address transmitter, + FunctionsResponse.FulfillResult resultCode + ); + + error EmptyRequestData(); + error OnlyCallableFromCoordinator(); + error SenderMustAcceptTermsOfService(address sender); + error InvalidGasFlagValue(uint8 value); + error GasLimitTooBig(uint32 limit); + error DuplicateRequestId(bytes32 requestId); + + struct CallbackResult { + bool success; // ══════╸ Whether the callback succeeded or not + uint256 gasUsed; // ═══╸ The amount of gas consumed during the callback + bytes returnData; // ══╸ The return of the callback function + } + + // ================================================================ + // | Route state | + // ================================================================ + + mapping(bytes32 id => address routableContract) private s_route; + + error RouteNotFound(bytes32 id); + + // Identifier for the route to the Terms of Service Allow List + bytes32 private s_allowListId; + + // ================================================================ + // | Configuration state | + // ================================================================ + struct Config { + uint16 maxConsumersPerSubscription; // ═════════╗ Maximum number of consumers which can be added to a single subscription. This bound ensures we are able to loop over all subscription consumers as needed, without exceeding gas limits. Should a user require more consumers, they can use multiple subscriptions. + uint72 adminFee; // ║ Flat fee (in Juels of PLI) that will be paid to the Router owner for operation of the network + bytes4 handleOracleFulfillmentSelector; // ║ The function selector that is used when calling back to the Client contract + uint16 gasForCallExactCheck; // ════════════════╝ Used during calling back to the client. Ensures we have at least enough gas to be able to revert if gasAmount > 63//64*gas available. + uint32[] maxCallbackGasLimits; // ══════════════╸ List of max callback gas limits used by flag with MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX + uint16 subscriptionDepositMinimumRequests; //═══╗ Amount of requests that must be completed before the full subscription balance will be released when closing a subscription account. + uint72 subscriptionDepositJuels; // ════════════╝ Amount of subscription funds that are held as a deposit until Config.subscriptionDepositMinimumRequests are made using the subscription. + } + + Config private s_config; + + event ConfigUpdated(Config); + + // ================================================================ + // | Proposal state | + // ================================================================ + + uint8 private constant MAX_PROPOSAL_SET_LENGTH = 8; + + struct ContractProposalSet { + bytes32[] ids; // ══╸ The IDs that key into the routes that will be modified if the update is applied + address[] to; // ═══╸ The address of the contracts that the route will point to if the updated is applied + } + ContractProposalSet private s_proposedContractSet; + + event ContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ); + + event ContractUpdated(bytes32 id, address from, address to); + + error InvalidProposal(); + error IdentifierIsReserved(bytes32 id); + + // ================================================================ + // | Initialization | + // ================================================================ + + constructor( + address linkToken, + Config memory config + ) FunctionsSubscriptions(linkToken) ConfirmedOwner(msg.sender) Pausable() { + // Set the intial configuration + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice The identifier of the route to retrieve the address of the access control contract + // The access control contract controls which accounts can manage subscriptions + /// @return id - bytes32 id that can be passed to the "getContractById" of the Router + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice The router configuration + function updateConfig(Config memory config) public onlyOwner { + s_config = config; + emit ConfigUpdated(config); + } + + /// @inheritdoc IFunctionsRouter + function isValidCallbackGasLimit(uint64 subscriptionId, uint32 callbackGasLimit) public view { + uint8 callbackGasLimitsIndexSelector = uint8(getFlags(subscriptionId)[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + if (callbackGasLimitsIndexSelector >= s_config.maxCallbackGasLimits.length) { + revert InvalidGasFlagValue(callbackGasLimitsIndexSelector); + } + uint32 maxCallbackGasLimit = s_config.maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + if (callbackGasLimit > maxCallbackGasLimit) { + revert GasLimitTooBig(maxCallbackGasLimit); + } + } + + /// @inheritdoc IFunctionsRouter + function getAdminFee() external view override returns (uint72) { + return s_config.adminFee; + } + + /// @inheritdoc IFunctionsRouter + function getAllowListId() external view override returns (bytes32) { + return s_allowListId; + } + + /// @inheritdoc IFunctionsRouter + function setAllowListId(bytes32 allowListId) external override onlyOwner { + s_allowListId = allowListId; + } + + /// @dev Used within FunctionsSubscriptions.sol + function _getMaxConsumers() internal view override returns (uint16) { + return s_config.maxConsumersPerSubscription; + } + + /// @dev Used within FunctionsSubscriptions.sol + function _getSubscriptionDepositDetails() internal view override returns (uint16, uint72) { + return (s_config.subscriptionDepositMinimumRequests, s_config.subscriptionDepositJuels); + } + + // ================================================================ + // | Requests | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function sendRequest( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external override returns (bytes32) { + IFunctionsCoordinator coordinator = IFunctionsCoordinator(getContractById(donId)); + return _sendRequest(donId, coordinator, subscriptionId, data, dataVersion, callbackGasLimit); + } + + /// @inheritdoc IFunctionsRouter + function sendRequestToProposed( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external override returns (bytes32) { + IFunctionsCoordinator coordinator = IFunctionsCoordinator(getProposedContractById(donId)); + return _sendRequest(donId, coordinator, subscriptionId, data, dataVersion, callbackGasLimit); + } + + function _sendRequest( + bytes32 donId, + IFunctionsCoordinator coordinator, + uint64 subscriptionId, + bytes memory data, + uint16 dataVersion, + uint32 callbackGasLimit + ) private returns (bytes32) { + _whenNotPaused(); + _isExistingSubscription(subscriptionId); + _isAllowedConsumer(msg.sender, subscriptionId); + isValidCallbackGasLimit(subscriptionId, callbackGasLimit); + + if (data.length == 0) { + revert EmptyRequestData(); + } + + Subscription memory subscription = getSubscription(subscriptionId); + Consumer memory consumer = getConsumer(msg.sender, subscriptionId); + uint72 adminFee = s_config.adminFee; + + // Forward request to DON + FunctionsResponse.Commitment memory commitment = coordinator.startRequest( + FunctionsResponse.RequestMeta({ + requestingContract: msg.sender, + data: data, + subscriptionId: subscriptionId, + dataVersion: dataVersion, + flags: getFlags(subscriptionId), + callbackGasLimit: callbackGasLimit, + adminFee: adminFee, + initiatedRequests: consumer.initiatedRequests, + completedRequests: consumer.completedRequests, + availableBalance: subscription.balance - subscription.blockedBalance, + subscriptionOwner: subscription.owner + }) + ); + + // Do not allow setting a comittment for a requestId that already exists + if (s_requestCommitments[commitment.requestId] != bytes32(0)) { + revert DuplicateRequestId(commitment.requestId); + } + + // Store a commitment about the request + s_requestCommitments[commitment.requestId] = keccak256( + abi.encode( + FunctionsResponse.Commitment({ + adminFee: adminFee, + coordinator: address(coordinator), + client: msg.sender, + subscriptionId: subscriptionId, + callbackGasLimit: callbackGasLimit, + estimatedTotalCostJuels: commitment.estimatedTotalCostJuels, + timeoutTimestamp: commitment.timeoutTimestamp, + requestId: commitment.requestId, + donFee: commitment.donFee, + gasOverheadBeforeCallback: commitment.gasOverheadBeforeCallback, + gasOverheadAfterCallback: commitment.gasOverheadAfterCallback + }) + ) + ); + + _markRequestInFlight(msg.sender, subscriptionId, commitment.estimatedTotalCostJuels); + + emit RequestStart({ + requestId: commitment.requestId, + donId: donId, + subscriptionId: subscriptionId, + subscriptionOwner: subscription.owner, + requestingContract: msg.sender, + // solhint-disable-next-line avoid-tx-origin + requestInitiator: tx.origin, + data: data, + dataVersion: dataVersion, + callbackGasLimit: callbackGasLimit, + estimatedTotalCostJuels: commitment.estimatedTotalCostJuels + }); + + return commitment.requestId; + } + + // ================================================================ + // | Responses | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function fulfill( + bytes memory response, + bytes memory err, + uint96 juelsPerGas, + uint96 costWithoutFulfillment, + address transmitter, + FunctionsResponse.Commitment memory commitment + ) external override returns (FunctionsResponse.FulfillResult resultCode, uint96) { + _whenNotPaused(); + + if (msg.sender != commitment.coordinator) { + revert OnlyCallableFromCoordinator(); + } + + { + bytes32 commitmentHash = s_requestCommitments[commitment.requestId]; + + if (commitmentHash == bytes32(0)) { + resultCode = FunctionsResponse.FulfillResult.INVALID_REQUEST_ID; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + if (keccak256(abi.encode(commitment)) != commitmentHash) { + resultCode = FunctionsResponse.FulfillResult.INVALID_COMMITMENT; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + // Check that the transmitter has supplied enough gas for the callback to succeed + if (gasleft() < commitment.callbackGasLimit + commitment.gasOverheadAfterCallback) { + resultCode = FunctionsResponse.FulfillResult.INSUFFICIENT_GAS_PROVIDED; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + } + + { + uint96 callbackCost = juelsPerGas * SafeCast.toUint96(commitment.callbackGasLimit); + uint96 totalCostJuels = commitment.adminFee + costWithoutFulfillment + callbackCost; + + // Check that the subscription can still afford to fulfill the request + if (totalCostJuels > getSubscription(commitment.subscriptionId).balance) { + resultCode = FunctionsResponse.FulfillResult.SUBSCRIPTION_BALANCE_INVARIANT_VIOLATION; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + // Check that the cost has not exceeded the quoted cost + if (totalCostJuels > commitment.estimatedTotalCostJuels) { + resultCode = FunctionsResponse.FulfillResult.COST_EXCEEDS_COMMITMENT; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + } + + delete s_requestCommitments[commitment.requestId]; + + CallbackResult memory result = _callback( + commitment.requestId, + response, + err, + commitment.callbackGasLimit, + commitment.client + ); + + resultCode = result.success + ? FunctionsResponse.FulfillResult.FULFILLED + : FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR; + + Receipt memory receipt = _pay( + commitment.subscriptionId, + commitment.estimatedTotalCostJuels, + commitment.client, + commitment.adminFee, + juelsPerGas, + SafeCast.toUint96(result.gasUsed), + costWithoutFulfillment + ); + + emit RequestProcessed({ + requestId: commitment.requestId, + subscriptionId: commitment.subscriptionId, + totalCostJuels: receipt.totalCostJuels, + transmitter: transmitter, + resultCode: resultCode, + response: response, + err: err, + callbackReturnData: result.returnData + }); + + return (resultCode, receipt.callbackGasCostJuels); + } + + function _callback( + bytes32 requestId, + bytes memory response, + bytes memory err, + uint32 callbackGasLimit, + address client + ) private returns (CallbackResult memory) { + bool destinationNoLongerExists; + assembly { + // solidity calls check that a contract actually exists at the destination, so we do the same + destinationNoLongerExists := iszero(extcodesize(client)) + } + if (destinationNoLongerExists) { + // Return without attempting callback + // The subscription will still be charged to reimburse transmitter's gas overhead + return CallbackResult({success: false, gasUsed: 0, returnData: new bytes(0)}); + } + + bytes memory encodedCallback = abi.encodeWithSelector( + s_config.handleOracleFulfillmentSelector, + requestId, + response, + err + ); + + uint16 gasForCallExactCheck = s_config.gasForCallExactCheck; + + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid payment. + // NOTE: that callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + + bool success; + uint256 gasUsed; + // allocate return data memory ahead of time + bytes memory returnData = new bytes(MAX_CALLBACK_RETURN_BYTES); + + assembly { + let g := gas() + // Compute g -= gasForCallExactCheck and check for underflow + // The gas actually passed to the callee is _min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. gasForCallExactCheck ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, gasForCallExactCheck) { + revert(0, 0) + } + g := sub(g, gasForCallExactCheck) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), callbackGasLimit)) { + revert(0, 0) + } + // call and report whether we succeeded + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + let gasBeforeCall := gas() + success := call(callbackGasLimit, client, 0, add(encodedCallback, 0x20), mload(encodedCallback), 0, 0) + gasUsed := sub(gasBeforeCall, gas()) + + // limit our copy to MAX_CALLBACK_RETURN_BYTES bytes + let toCopy := returndatasize() + if gt(toCopy, MAX_CALLBACK_RETURN_BYTES) { + toCopy := MAX_CALLBACK_RETURN_BYTES + } + // Store the length of the copied bytes + mstore(returnData, toCopy) + // copy the bytes from returnData[0:_toCopy] + returndatacopy(add(returnData, 0x20), 0, toCopy) + } + + return CallbackResult({success: success, gasUsed: gasUsed, returnData: returnData}); + } + + // ================================================================ + // | Route methods | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function getContractById(bytes32 id) public view override returns (address) { + address currentImplementation = s_route[id]; + if (currentImplementation == address(0)) { + revert RouteNotFound(id); + } + return currentImplementation; + } + + /// @inheritdoc IFunctionsRouter + function getProposedContractById(bytes32 id) public view override returns (address) { + // Iterations will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint8 i = 0; i < s_proposedContractSet.ids.length; ++i) { + if (id == s_proposedContractSet.ids[i]) { + return s_proposedContractSet.to[i]; + } + } + revert RouteNotFound(id); + } + + // ================================================================ + // | Contract Proposal methods | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function getProposedContractSet() external view override returns (bytes32[] memory, address[] memory) { + return (s_proposedContractSet.ids, s_proposedContractSet.to); + } + + /// @inheritdoc IFunctionsRouter + function proposeContractsUpdate( + bytes32[] memory proposedContractSetIds, + address[] memory proposedContractSetAddresses + ) external override onlyOwner { + // IDs and addresses arrays must be of equal length and must not exceed the max proposal length + uint256 idsArrayLength = proposedContractSetIds.length; + if (idsArrayLength != proposedContractSetAddresses.length || idsArrayLength > MAX_PROPOSAL_SET_LENGTH) { + revert InvalidProposal(); + } + + // NOTE: iterations of this loop will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint256 i = 0; i < idsArrayLength; ++i) { + bytes32 id = proposedContractSetIds[i]; + address proposedContract = proposedContractSetAddresses[i]; + if ( + proposedContract == address(0) || // The Proposed address must be a valid address + s_route[id] == proposedContract // The Proposed address must point to a different address than what is currently set + ) { + revert InvalidProposal(); + } + + emit ContractProposed({ + proposedContractSetId: id, + proposedContractSetFromAddress: s_route[id], + proposedContractSetToAddress: proposedContract + }); + } + + s_proposedContractSet = ContractProposalSet({ids: proposedContractSetIds, to: proposedContractSetAddresses}); + } + + /// @inheritdoc IFunctionsRouter + function updateContracts() external override onlyOwner { + // Iterations will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint256 i = 0; i < s_proposedContractSet.ids.length; ++i) { + bytes32 id = s_proposedContractSet.ids[i]; + address to = s_proposedContractSet.to[i]; + emit ContractUpdated({id: id, from: s_route[id], to: to}); + s_route[id] = to; + } + + delete s_proposedContractSet; + } + + // ================================================================ + // | Modifiers | + // ================================================================ + // Favoring internal functions over actual modifiers to reduce contract size + + /// @dev Used within FunctionsSubscriptions.sol + function _whenNotPaused() internal view override { + _requireNotPaused(); + } + + /// @dev Used within FunctionsSubscriptions.sol + function _onlyRouterOwner() internal view override { + _validateOwnership(); + } + + /// @dev Used within FunctionsSubscriptions.sol + function _onlySenderThatAcceptedToS() internal view override { + address currentImplementation = s_route[s_allowListId]; + if (currentImplementation == address(0)) { + // If not set, ignore this check, allow all access + return; + } + if (!IAccessController(currentImplementation).hasAccess(msg.sender, new bytes(0))) { + revert SenderMustAcceptTermsOfService(msg.sender); + } + } + + /// @inheritdoc IFunctionsRouter + function pause() external override onlyOwner { + _pause(); + } + + /// @inheritdoc IFunctionsRouter + function unpause() external override onlyOwner { + _unpause(); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/FunctionsSubscriptions.sol b/contracts/src/v0.8/functions/dev/v1_X/FunctionsSubscriptions.sol new file mode 100644 index 00000000..a1a084e4 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/FunctionsSubscriptions.sol @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol"; +import {IERC677Receiver} from "../../../shared/interfaces/IERC677Receiver.sol"; +import {IFunctionsBilling} from "./interfaces/IFunctionsBilling.sol"; + +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {SafeERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; + +/// @title Functions Subscriptions contract +/// @notice Contract that coordinates payment from users to the nodes of the Decentralized Oracle Network (DON). +abstract contract FunctionsSubscriptions is IFunctionsSubscriptions, IERC677Receiver { + using SafeERC20 for IERC20; + using FunctionsResponse for FunctionsResponse.Commitment; + + // ================================================================ + // | Balance state | + // ================================================================ + // link token address + IERC20 internal immutable i_linkToken; + + // s_totalLinkBalance tracks the total PLI sent to/from + // this contract through onTokenTransfer, cancelSubscription and oracleWithdraw. + // A discrepancy with this contract's PLI balance indicates that someone + // sent tokens using transfer and so we may need to use recoverFunds. + uint96 private s_totalLinkBalance; + + /// @dev NOP balances are held as a single amount. The breakdown is held by the Coordinator. + mapping(address coordinator => uint96 balanceJuelsLink) private s_withdrawableTokens; + + // ================================================================ + // | Subscription state | + // ================================================================ + // Keep a count of the number of subscriptions so that its possible to + // loop through all the current subscriptions via .getSubscription(). + uint64 private s_currentSubscriptionId; + + mapping(uint64 subscriptionId => Subscription) private s_subscriptions; + + // Maintains the list of keys in s_consumers. + // We do this for 2 reasons: + // 1. To be able to clean up all keys from s_consumers when canceling a subscription. + // 2. To be able to return the list of all consumers in getSubscription. + // Note that we need the s_consumers map to be able to directly check if a + // consumer is valid without reading all the consumers from storage. + mapping(address consumer => mapping(uint64 subscriptionId => Consumer)) private s_consumers; + + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + event SubscriptionFunded(uint64 indexed subscriptionId, uint256 oldBalance, uint256 newBalance); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId, address consumer); + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId, address from, address to); + + error TooManyConsumers(uint16 maximumConsumers); + error InsufficientBalance(uint96 currentBalanceJuels); + error InvalidConsumer(); + error CannotRemoveWithPendingRequests(); + error InvalidSubscription(); + error OnlyCallableFromLink(); + error InvalidCalldata(); + error MustBeSubscriptionOwner(); + error TimeoutNotExceeded(); + error MustBeProposedOwner(address proposedOwner); + event FundsRecovered(address to, uint256 amount); + + // ================================================================ + // | Request state | + // ================================================================ + + mapping(bytes32 requestId => bytes32 commitmentHash) internal s_requestCommitments; + + struct Receipt { + uint96 callbackGasCostJuels; + uint96 totalCostJuels; + } + + event RequestTimedOut(bytes32 indexed requestId); + + // ================================================================ + // | Initialization | + // ================================================================ + constructor(address link) { + i_linkToken = IERC20(link); + } + + // ================================================================ + // | Request/Response | + // ================================================================ + + /// @notice Sets a request as in-flight + /// @dev Only callable within the Router + function _markRequestInFlight(address client, uint64 subscriptionId, uint96 estimatedTotalCostJuels) internal { + // Earmark subscription funds + s_subscriptions[subscriptionId].blockedBalance += estimatedTotalCostJuels; + + // Increment sent requests + s_consumers[client][subscriptionId].initiatedRequests += 1; + } + + /// @notice Moves funds from one subscription account to another. + /// @dev Only callable by the Coordinator contract that is saved in the request commitment + function _pay( + uint64 subscriptionId, + uint96 estimatedTotalCostJuels, + address client, + uint96 adminFee, + uint96 juelsPerGas, + uint96 gasUsed, + uint96 costWithoutCallbackJuels + ) internal returns (Receipt memory) { + uint96 callbackGasCostJuels = juelsPerGas * gasUsed; + uint96 totalCostJuels = costWithoutCallbackJuels + adminFee + callbackGasCostJuels; + + if ( + s_subscriptions[subscriptionId].balance < totalCostJuels || + s_subscriptions[subscriptionId].blockedBalance < estimatedTotalCostJuels + ) { + revert InsufficientBalance(s_subscriptions[subscriptionId].balance); + } + + // Charge the subscription + s_subscriptions[subscriptionId].balance -= totalCostJuels; + + // Unblock earmarked funds + s_subscriptions[subscriptionId].blockedBalance -= estimatedTotalCostJuels; + + // Pay the DON's fees and gas reimbursement + s_withdrawableTokens[msg.sender] += costWithoutCallbackJuels + callbackGasCostJuels; + + // Pay out the administration fee + s_withdrawableTokens[address(this)] += adminFee; + + // Increment finished requests + s_consumers[client][subscriptionId].completedRequests += 1; + + return Receipt({callbackGasCostJuels: callbackGasCostJuels, totalCostJuels: totalCostJuels}); + } + + // ================================================================ + // | Owner methods | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function ownerCancelSubscription(uint64 subscriptionId) external override { + _onlyRouterOwner(); + _isExistingSubscription(subscriptionId); + _cancelSubscriptionHelper(subscriptionId, s_subscriptions[subscriptionId].owner, false); + } + + /// @inheritdoc IFunctionsSubscriptions + function recoverFunds(address to) external override { + _onlyRouterOwner(); + uint256 externalBalance = i_linkToken.balanceOf(address(this)); + uint256 internalBalance = uint256(s_totalLinkBalance); + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + i_linkToken.safeTransfer(to, amount); + emit FundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + // ================================================================ + // | Fund withdrawal | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function oracleWithdraw(address recipient, uint96 amount) external override { + _whenNotPaused(); + + if (amount == 0) { + revert InvalidCalldata(); + } + uint96 currentBalance = s_withdrawableTokens[msg.sender]; + if (currentBalance < amount) { + revert InsufficientBalance(currentBalance); + } + s_withdrawableTokens[msg.sender] -= amount; + s_totalLinkBalance -= amount; + i_linkToken.safeTransfer(recipient, amount); + } + + /// @notice Owner withdraw PLI earned through admin fees + /// @notice If amount is 0 the full balance will be withdrawn + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function ownerWithdraw(address recipient, uint96 amount) external { + _onlyRouterOwner(); + if (amount == 0) { + amount = s_withdrawableTokens[address(this)]; + } + uint96 currentBalance = s_withdrawableTokens[address(this)]; + if (currentBalance < amount) { + revert InsufficientBalance(currentBalance); + } + s_withdrawableTokens[address(this)] -= amount; + s_totalLinkBalance -= amount; + + i_linkToken.safeTransfer(recipient, amount); + } + + // ================================================================ + // | TransferAndCall Deposit helper | + // ================================================================ + + // This function is to be invoked when using PLI.transferAndCall + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function onTokenTransfer(address /* sender */, uint256 amount, bytes calldata data) external override { + _whenNotPaused(); + if (msg.sender != address(i_linkToken)) { + revert OnlyCallableFromLink(); + } + if (data.length != 32) { + revert InvalidCalldata(); + } + uint64 subscriptionId = abi.decode(data, (uint64)); + if (s_subscriptions[subscriptionId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the msg.sender is the subscription owner, + // anyone can fund a subscription. + uint256 oldBalance = s_subscriptions[subscriptionId].balance; + s_subscriptions[subscriptionId].balance += uint96(amount); + s_totalLinkBalance += uint96(amount); + emit SubscriptionFunded(subscriptionId, oldBalance, oldBalance + amount); + } + + // ================================================================ + // | Subscription management | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function getTotalBalance() external view override returns (uint96) { + return s_totalLinkBalance; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscriptionCount() external view override returns (uint64) { + return s_currentSubscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscription(uint64 subscriptionId) public view override returns (Subscription memory) { + _isExistingSubscription(subscriptionId); + return s_subscriptions[subscriptionId]; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscriptionsInRange( + uint64 subscriptionIdStart, + uint64 subscriptionIdEnd + ) external view override returns (Subscription[] memory subscriptions) { + if ( + subscriptionIdStart > subscriptionIdEnd || + subscriptionIdEnd > s_currentSubscriptionId || + s_currentSubscriptionId == 0 + ) { + revert InvalidCalldata(); + } + + subscriptions = new Subscription[]((subscriptionIdEnd - subscriptionIdStart) + 1); + for (uint256 i = 0; i <= subscriptionIdEnd - subscriptionIdStart; ++i) { + subscriptions[i] = s_subscriptions[uint64(subscriptionIdStart + i)]; + } + + return subscriptions; + } + + /// @inheritdoc IFunctionsSubscriptions + function getConsumer(address client, uint64 subscriptionId) public view override returns (Consumer memory) { + return s_consumers[client][subscriptionId]; + } + + /// @dev Used within this file & FunctionsRouter.sol + function _isExistingSubscription(uint64 subscriptionId) internal view { + if (s_subscriptions[subscriptionId].owner == address(0)) { + revert InvalidSubscription(); + } + } + + /// @dev Used within FunctionsRouter.sol + function _isAllowedConsumer(address client, uint64 subscriptionId) internal view { + if (!s_consumers[client][subscriptionId].allowed) { + revert InvalidConsumer(); + } + } + + /// @inheritdoc IFunctionsSubscriptions + function createSubscription() external override returns (uint64 subscriptionId) { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + subscriptionId = ++s_currentSubscriptionId; + s_subscriptions[subscriptionId] = Subscription({ + balance: 0, + blockedBalance: 0, + owner: msg.sender, + proposedOwner: address(0), + consumers: new address[](0), + flags: bytes32(0) + }); + + emit SubscriptionCreated(subscriptionId, msg.sender); + + return subscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function createSubscriptionWithConsumer(address consumer) external override returns (uint64 subscriptionId) { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + subscriptionId = ++s_currentSubscriptionId; + s_subscriptions[subscriptionId] = Subscription({ + balance: 0, + blockedBalance: 0, + owner: msg.sender, + proposedOwner: address(0), + consumers: new address[](0), + flags: bytes32(0) + }); + + s_subscriptions[subscriptionId].consumers.push(consumer); + s_consumers[consumer][subscriptionId].allowed = true; + + emit SubscriptionCreated(subscriptionId, msg.sender); + emit SubscriptionConsumerAdded(subscriptionId, consumer); + + return subscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function proposeSubscriptionOwnerTransfer(uint64 subscriptionId, address newOwner) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + if (newOwner == address(0) || s_subscriptions[subscriptionId].proposedOwner == newOwner) { + revert InvalidCalldata(); + } + + s_subscriptions[subscriptionId].proposedOwner = newOwner; + emit SubscriptionOwnerTransferRequested(subscriptionId, msg.sender, newOwner); + } + + /// @inheritdoc IFunctionsSubscriptions + function acceptSubscriptionOwnerTransfer(uint64 subscriptionId) external override { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + address previousOwner = s_subscriptions[subscriptionId].owner; + address proposedOwner = s_subscriptions[subscriptionId].proposedOwner; + if (proposedOwner != msg.sender) { + revert MustBeProposedOwner(proposedOwner); + } + s_subscriptions[subscriptionId].owner = msg.sender; + s_subscriptions[subscriptionId].proposedOwner = address(0); + emit SubscriptionOwnerTransferred(subscriptionId, previousOwner, msg.sender); + } + + /// @inheritdoc IFunctionsSubscriptions + function removeConsumer(uint64 subscriptionId, address consumer) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + Consumer memory consumerData = s_consumers[consumer][subscriptionId]; + _isAllowedConsumer(consumer, subscriptionId); + if (consumerData.initiatedRequests != consumerData.completedRequests) { + revert CannotRemoveWithPendingRequests(); + } + // Note bounded by config.maxConsumers + address[] memory consumers = s_subscriptions[subscriptionId].consumers; + for (uint256 i = 0; i < consumers.length; ++i) { + if (consumers[i] == consumer) { + // Storage write to preserve last element + s_subscriptions[subscriptionId].consumers[i] = consumers[consumers.length - 1]; + // Storage remove last element + s_subscriptions[subscriptionId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subscriptionId]; + emit SubscriptionConsumerRemoved(subscriptionId, consumer); + } + + /// @dev Overriden in FunctionsRouter.sol + function _getMaxConsumers() internal view virtual returns (uint16); + + /// @inheritdoc IFunctionsSubscriptions + function addConsumer(uint64 subscriptionId, address consumer) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + // Already maxed, cannot add any more consumers. + uint16 maximumConsumers = _getMaxConsumers(); + if (s_subscriptions[subscriptionId].consumers.length >= maximumConsumers) { + revert TooManyConsumers(maximumConsumers); + } + if (s_consumers[consumer][subscriptionId].allowed) { + // Idempotence - do nothing if already added. + // Ensures uniqueness in s_subscriptions[subscriptionId].consumers. + return; + } + + s_consumers[consumer][subscriptionId].allowed = true; + s_subscriptions[subscriptionId].consumers.push(consumer); + + emit SubscriptionConsumerAdded(subscriptionId, consumer); + } + + /// @dev Overriden in FunctionsRouter.sol + function _getSubscriptionDepositDetails() internal virtual returns (uint16, uint72); + + function _cancelSubscriptionHelper(uint64 subscriptionId, address toAddress, bool checkDepositRefundability) private { + Subscription memory subscription = s_subscriptions[subscriptionId]; + uint96 balance = subscription.balance; + uint64 completedRequests = 0; + + // NOTE: loop iterations are bounded by config.maxConsumers + // If no consumers, does nothing. + for (uint256 i = 0; i < subscription.consumers.length; ++i) { + address consumer = subscription.consumers[i]; + completedRequests += s_consumers[consumer][subscriptionId].completedRequests; + delete s_consumers[consumer][subscriptionId]; + } + delete s_subscriptions[subscriptionId]; + + (uint16 subscriptionDepositMinimumRequests, uint72 subscriptionDepositJuels) = _getSubscriptionDepositDetails(); + + // If subscription has not made enough requests, deposit will be forfeited + if (checkDepositRefundability && completedRequests < subscriptionDepositMinimumRequests) { + uint96 deposit = subscriptionDepositJuels > balance ? balance : subscriptionDepositJuels; + if (deposit > 0) { + s_withdrawableTokens[address(this)] += deposit; + balance -= deposit; + } + } + + if (balance > 0) { + s_totalLinkBalance -= balance; + i_linkToken.safeTransfer(toAddress, uint256(balance)); + } + emit SubscriptionCanceled(subscriptionId, toAddress, balance); + } + + /// @inheritdoc IFunctionsSubscriptions + function cancelSubscription(uint64 subscriptionId, address to) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + if (pendingRequestExists(subscriptionId)) { + revert CannotRemoveWithPendingRequests(); + } + + _cancelSubscriptionHelper(subscriptionId, to, true); + } + + /// @inheritdoc IFunctionsSubscriptions + function pendingRequestExists(uint64 subscriptionId) public view override returns (bool) { + address[] memory consumers = s_subscriptions[subscriptionId].consumers; + // NOTE: loop iterations are bounded by config.maxConsumers + for (uint256 i = 0; i < consumers.length; ++i) { + Consumer memory consumer = s_consumers[consumers[i]][subscriptionId]; + if (consumer.initiatedRequests != consumer.completedRequests) { + return true; + } + } + return false; + } + + /// @inheritdoc IFunctionsSubscriptions + function setFlags(uint64 subscriptionId, bytes32 flags) external override { + _onlyRouterOwner(); + _isExistingSubscription(subscriptionId); + s_subscriptions[subscriptionId].flags = flags; + } + + /// @inheritdoc IFunctionsSubscriptions + function getFlags(uint64 subscriptionId) public view returns (bytes32) { + return s_subscriptions[subscriptionId].flags; + } + + // ================================================================ + // | Request Timeout | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function timeoutRequests(FunctionsResponse.Commitment[] calldata requestsToTimeoutByCommitment) external override { + _whenNotPaused(); + + for (uint256 i = 0; i < requestsToTimeoutByCommitment.length; ++i) { + FunctionsResponse.Commitment memory request = requestsToTimeoutByCommitment[i]; + bytes32 requestId = request.requestId; + uint64 subscriptionId = request.subscriptionId; + + // Check that request ID is valid + if (keccak256(abi.encode(request)) != s_requestCommitments[requestId]) { + revert InvalidCalldata(); + } + + // Check that request has exceeded allowed request time + if (block.timestamp < request.timeoutTimestamp) { + revert TimeoutNotExceeded(); + } + + // Notify the Coordinator that the request should no longer be fulfilled + IFunctionsBilling(request.coordinator).deleteCommitment(requestId); + // Release the subscription's balance that had been earmarked for the request + s_subscriptions[subscriptionId].blockedBalance -= request.estimatedTotalCostJuels; + s_consumers[request.client][subscriptionId].completedRequests += 1; + // Delete commitment within Router state + delete s_requestCommitments[requestId]; + + emit RequestTimedOut(requestId); + } + } + + // ================================================================ + // | Modifiers | + // ================================================================ + + function _onlySubscriptionOwner(uint64 subscriptionId) internal view { + address owner = s_subscriptions[subscriptionId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubscriptionOwner(); + } + } + + /// @dev Overriden in FunctionsRouter.sol + function _onlySenderThatAcceptedToS() internal virtual; + + /// @dev Overriden in FunctionsRouter.sol + function _onlyRouterOwner() internal virtual; + + /// @dev Overriden in FunctionsRouter.sol + function _whenNotPaused() internal virtual; +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/Routable.sol b/contracts/src/v0.8/functions/dev/v1_X/Routable.sol new file mode 100644 index 00000000..92e23362 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/Routable.sol @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; +import {IOwnableFunctionsRouter} from "./interfaces/IOwnableFunctionsRouter.sol"; + +/// @title This abstract should be inherited by contracts that will be used +/// as the destinations to a route (id=>contract) on the Router. +/// It provides a Router getter and modifiers. +abstract contract Routable is ITypeAndVersion { + IOwnableFunctionsRouter private immutable i_functionsRouter; + + error RouterMustBeSet(); + error OnlyCallableByRouter(); + error OnlyCallableByRouterOwner(); + + /// @dev Initializes the contract. + constructor(address router) { + if (router == address(0)) { + revert RouterMustBeSet(); + } + i_functionsRouter = IOwnableFunctionsRouter(router); + } + + /// @notice Return the Router + function _getRouter() internal view returns (IOwnableFunctionsRouter router) { + return i_functionsRouter; + } + + /// @notice Reverts if called by anyone other than the router. + modifier onlyRouter() { + if (msg.sender != address(i_functionsRouter)) { + revert OnlyCallableByRouter(); + } + _; + } + + /// @notice Reverts if called by anyone other than the router owner. + modifier onlyRouterOwner() { + if (msg.sender != i_functionsRouter.owner()) { + revert OnlyCallableByRouterOwner(); + } + _; + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/accessControl/TermsOfServiceAllowList.sol b/contracts/src/v0.8/functions/dev/v1_X/accessControl/TermsOfServiceAllowList.sol new file mode 100644 index 00000000..103c9c94 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/accessControl/TermsOfServiceAllowList.sol @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITermsOfServiceAllowList, TermsOfServiceAllowListConfig} from "./interfaces/ITermsOfServiceAllowList.sol"; +import {IAccessController} from "../../../../shared/interfaces/IAccessController.sol"; +import {ITypeAndVersion} from "../../../../shared/interfaces/ITypeAndVersion.sol"; + +import {ConfirmedOwner} from "../../../../shared/access/ConfirmedOwner.sol"; + +import {Address} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; +import {EnumerableSet} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol"; + +/// @notice A contract to handle access control of subscription management dependent on signing a Terms of Service +contract TermsOfServiceAllowList is ITermsOfServiceAllowList, IAccessController, ITypeAndVersion, ConfirmedOwner { + using Address for address; + using EnumerableSet for EnumerableSet.AddressSet; + + /// @inheritdoc ITypeAndVersion + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "Functions Terms of Service Allow List v1.1.0"; + + EnumerableSet.AddressSet private s_allowedSenders; + EnumerableSet.AddressSet private s_blockedSenders; + + event AddedAccess(address user); + event BlockedAccess(address user); + event UnblockedAccess(address user); + + error InvalidSignature(); + error InvalidUsage(); + error RecipientIsBlocked(); + error InvalidCalldata(); + + TermsOfServiceAllowListConfig private s_config; + + event ConfigUpdated(TermsOfServiceAllowListConfig config); + + // ================================================================ + // | Initialization | + // ================================================================ + + constructor( + TermsOfServiceAllowListConfig memory config, + address[] memory initialAllowedSenders, + address[] memory initialBlockedSenders + ) ConfirmedOwner(msg.sender) { + updateConfig(config); + + for (uint256 i = 0; i < initialAllowedSenders.length; ++i) { + s_allowedSenders.add(initialAllowedSenders[i]); + } + + for (uint256 j = 0; j < initialBlockedSenders.length; ++j) { + if (s_allowedSenders.contains(initialBlockedSenders[j])) { + // Allowed senders cannot also be blocked + revert InvalidCalldata(); + } + s_blockedSenders.add(initialBlockedSenders[j]); + } + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice Gets the contracts's configuration + /// @return config + function getConfig() external view returns (TermsOfServiceAllowListConfig memory) { + return s_config; + } + + /// @notice Sets the contracts's configuration + /// @param config - See the contents of the TermsOfServiceAllowListConfig struct in ITermsOfServiceAllowList.sol for more information + function updateConfig(TermsOfServiceAllowListConfig memory config) public onlyOwner { + s_config = config; + emit ConfigUpdated(config); + } + + // ================================================================ + // | Allow methods | + // ================================================================ + + /// @inheritdoc ITermsOfServiceAllowList + function getMessage(address acceptor, address recipient) public pure override returns (bytes32) { + return keccak256(abi.encodePacked(acceptor, recipient)); + } + + /// @inheritdoc ITermsOfServiceAllowList + function acceptTermsOfService(address acceptor, address recipient, bytes32 r, bytes32 s, uint8 v) external override { + if (s_blockedSenders.contains(recipient)) { + revert RecipientIsBlocked(); + } + + // Validate that the signature is correct and the correct data has been signed + bytes32 prefixedMessage = keccak256( + abi.encodePacked("\x19Ethereum Signed Message:\n32", getMessage(acceptor, recipient)) + ); + if (ecrecover(prefixedMessage, v, r, s) != s_config.signerPublicKey) { + revert InvalidSignature(); + } + + // If contract, validate that msg.sender == recipient + // This is to prevent EoAs from claiming contracts that they are not in control of + // If EoA, validate that msg.sender == acceptor == recipient + // This is to prevent EoAs from accepting for other EoAs + if (msg.sender != recipient || (msg.sender != acceptor && !msg.sender.isContract())) { + revert InvalidUsage(); + } + + // Add recipient to the allow list + if (s_allowedSenders.add(recipient)) { + emit AddedAccess(recipient); + } + } + + /// @inheritdoc ITermsOfServiceAllowList + function getAllAllowedSenders() external view override returns (address[] memory) { + return s_allowedSenders.values(); + } + + /// @inheritdoc ITermsOfServiceAllowList + function getAllowedSendersCount() external view override returns (uint64) { + return uint64(s_allowedSenders.length()); + } + + /// @inheritdoc ITermsOfServiceAllowList + function getAllowedSendersInRange( + uint64 allowedSenderIdxStart, + uint64 allowedSenderIdxEnd + ) external view override returns (address[] memory allowedSenders) { + if ( + allowedSenderIdxStart > allowedSenderIdxEnd || + allowedSenderIdxEnd >= s_allowedSenders.length() || + s_allowedSenders.length() == 0 + ) { + revert InvalidCalldata(); + } + + allowedSenders = new address[]((allowedSenderIdxEnd - allowedSenderIdxStart) + 1); + for (uint256 i = 0; i <= allowedSenderIdxEnd - allowedSenderIdxStart; ++i) { + allowedSenders[i] = s_allowedSenders.at(uint256(allowedSenderIdxStart + i)); + } + + return allowedSenders; + } + + /// @inheritdoc IAccessController + function hasAccess(address user, bytes calldata /* data */) external view override returns (bool) { + if (!s_config.enabled) { + return true; + } + return s_allowedSenders.contains(user); + } + + // ================================================================ + // | Block methods | + // ================================================================ + + /// @inheritdoc ITermsOfServiceAllowList + function isBlockedSender(address sender) external view override returns (bool) { + if (!s_config.enabled) { + return false; + } + return s_blockedSenders.contains(sender); + } + + /// @inheritdoc ITermsOfServiceAllowList + function blockSender(address sender) external override onlyOwner { + s_allowedSenders.remove(sender); + s_blockedSenders.add(sender); + emit BlockedAccess(sender); + } + + /// @inheritdoc ITermsOfServiceAllowList + function unblockSender(address sender) external override onlyOwner { + s_blockedSenders.remove(sender); + emit UnblockedAccess(sender); + } + + /// @inheritdoc ITermsOfServiceAllowList + function getBlockedSendersCount() external view override returns (uint64) { + return uint64(s_blockedSenders.length()); + } + + /// @inheritdoc ITermsOfServiceAllowList + function getBlockedSendersInRange( + uint64 blockedSenderIdxStart, + uint64 blockedSenderIdxEnd + ) external view override returns (address[] memory blockedSenders) { + if ( + blockedSenderIdxStart > blockedSenderIdxEnd || + blockedSenderIdxEnd >= s_blockedSenders.length() || + s_blockedSenders.length() == 0 + ) { + revert InvalidCalldata(); + } + + blockedSenders = new address[]((blockedSenderIdxEnd - blockedSenderIdxStart) + 1); + for (uint256 i = 0; i <= blockedSenderIdxEnd - blockedSenderIdxStart; ++i) { + blockedSenders[i] = s_blockedSenders.at(uint256(blockedSenderIdxStart + i)); + } + + return blockedSenders; + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol b/contracts/src/v0.8/functions/dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol new file mode 100644 index 00000000..e781e584 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @notice A contract to handle access control of subscription management dependent on signing a Terms of Service +interface ITermsOfServiceAllowList { + /// @notice Return the message data for the proof given to accept the Terms of Service + /// @param acceptor - The wallet address that has accepted the Terms of Service on the UI + /// @param recipient - The recipient address that the acceptor is taking responsibility for + /// @return Hash of the message data + function getMessage(address acceptor, address recipient) external pure returns (bytes32); + + /// @notice Check if the address is blocked for usage + /// @param sender The transaction sender's address + /// @return True or false + function isBlockedSender(address sender) external returns (bool); + + /// @notice Get a list of all allowed senders + /// @dev WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + /// to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + /// this function has an unbounded cost, and using it as part of a state-changing function may render the function + /// uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + /// @return addresses - all allowed addresses + function getAllAllowedSenders() external view returns (address[] memory); + + /// @notice Get details about the total number of allowed senders + /// @return count - total number of allowed senders in the system + function getAllowedSendersCount() external view returns (uint64); + + /// @notice Retrieve a list of allowed senders using an inclusive range + /// @dev WARNING: getAllowedSendersInRange uses EnumerableSet .length() and .at() methods to iterate over the list + /// without the need for an extra mapping. These method can not guarantee the ordering when new elements are added. + /// Evaluate if eventual consistency will satisfy your usecase before using it. + /// @param allowedSenderIdxStart - index of the allowed sender to start the range at + /// @param allowedSenderIdxEnd - index of the allowed sender to end the range at + /// @return allowedSenders - allowed addresses in the range provided + function getAllowedSendersInRange( + uint64 allowedSenderIdxStart, + uint64 allowedSenderIdxEnd + ) external view returns (address[] memory allowedSenders); + + /// @notice Allows access to the sender based on acceptance of the Terms of Service + /// @param acceptor - The wallet address that has accepted the Terms of Service on the UI + /// @param recipient - The recipient address that the acceptor is taking responsibility for + /// @param r - ECDSA signature r data produced by the Plugin Functions Subscription UI + /// @param s - ECDSA signature s produced by the Plugin Functions Subscription UI + /// @param v - ECDSA signature v produced by the Plugin Functions Subscription UI + function acceptTermsOfService(address acceptor, address recipient, bytes32 r, bytes32 s, uint8 v) external; + + /// @notice Removes a sender's access if already authorized, and disallows re-accepting the Terms of Service + /// @param sender - Address of the sender to block + function blockSender(address sender) external; + + /// @notice Re-allows a previously blocked sender to accept the Terms of Service + /// @param sender - Address of the sender to unblock + function unblockSender(address sender) external; + + /// @notice Get details about the total number of blocked senders + /// @return count - total number of blocked senders in the system + function getBlockedSendersCount() external view returns (uint64); + + /// @notice Retrieve a list of blocked senders using an inclusive range + /// @dev WARNING: getBlockedSendersInRange uses EnumerableSet .length() and .at() methods to iterate over the list + /// without the need for an extra mapping. These method can not guarantee the ordering when new elements are added. + /// Evaluate if eventual consistency will satisfy your usecase before using it. + /// @param blockedSenderIdxStart - index of the blocked sender to start the range at + /// @param blockedSenderIdxEnd - index of the blocked sender to end the range at + /// @return blockedSenders - blocked addresses in the range provided + function getBlockedSendersInRange( + uint64 blockedSenderIdxStart, + uint64 blockedSenderIdxEnd + ) external view returns (address[] memory blockedSenders); +} + +// ================================================================ +// | Configuration state | +// ================================================================ +struct TermsOfServiceAllowListConfig { + bool enabled; // ═════════════╗ When enabled, access will be checked against s_allowedSenders. When disabled, all access will be allowed. + address signerPublicKey; // ══╝ The key pair that needs to sign the acceptance data +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/example/FunctionsClientExample.sol b/contracts/src/v0.8/functions/dev/v1_X/example/FunctionsClientExample.sol new file mode 100644 index 00000000..5ca5dbfe --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/example/FunctionsClientExample.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsClient} from "../FunctionsClient.sol"; +import {ConfirmedOwner} from "../../../../shared/access/ConfirmedOwner.sol"; +import {FunctionsRequest} from "../libraries/FunctionsRequest.sol"; + +/// @title Plugin Functions example Client contract implementation +contract FunctionsClientExample is FunctionsClient, ConfirmedOwner { + using FunctionsRequest for FunctionsRequest.Request; + + uint32 public constant MAX_CALLBACK_GAS = 70_000; + + bytes32 public s_lastRequestId; + bytes32 public s_lastResponse; + bytes32 public s_lastError; + uint32 public s_lastResponseLength; + uint32 public s_lastErrorLength; + + error UnexpectedRequestID(bytes32 requestId); + + constructor(address router) FunctionsClient(router) ConfirmedOwner(msg.sender) {} + + /// @notice Send a simple request + /// @param source JavaScript source code + /// @param encryptedSecretsReferences Encrypted secrets payload + /// @param args List of arguments accessible from within the source code + /// @param subscriptionId Billing ID + function sendRequest( + string calldata source, + bytes calldata encryptedSecretsReferences, + string[] calldata args, + uint64 subscriptionId, + bytes32 jobId + ) external onlyOwner { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + if (encryptedSecretsReferences.length > 0) req._addSecretsReference(encryptedSecretsReferences); + if (args.length > 0) req._setArgs(args); + s_lastRequestId = _sendRequest(req._encodeCBOR(), subscriptionId, MAX_CALLBACK_GAS, jobId); + } + + /// @notice Store latest result/error + /// @param requestId The request ID, returned by sendRequest() + /// @param response Aggregated response from the user code + /// @param err Aggregated error from the user code or from the execution pipeline + /// @dev Either response or error parameter will be set, but never both + function _fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal override { + if (s_lastRequestId != requestId) { + revert UnexpectedRequestID(requestId); + } + // Save only the first 32 bytes of response/error to always fit within MAX_CALLBACK_GAS + s_lastResponse = _bytesToBytes32(response); + s_lastResponseLength = uint32(response.length); + s_lastError = _bytesToBytes32(err); + s_lastErrorLength = uint32(err.length); + } + + function _bytesToBytes32(bytes memory b) private pure returns (bytes32 out) { + uint256 maxLen = 32; + if (b.length < 32) { + maxLen = b.length; + } + for (uint256 i = 0; i < maxLen; ++i) { + out |= bytes32(b[i]) >> (i * 8); + } + return out; + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsBilling.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsBilling.sol new file mode 100644 index 00000000..348c6895 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsBilling.sol @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Plugin Functions DON billing interface. +interface IFunctionsBilling { + /// @notice Return the current conversion from WEI of ETH to PLI from the configured Plugin data feed + /// @return weiPerUnitLink - The amount of WEI in one PLI + function getWeiPerUnitLink() external view returns (uint256); + + /// @notice Determine the fee that will be split between Node Operators for servicing a request + /// @param requestCBOR - CBOR encoded Plugin Functions request data, use FunctionsRequest library to encode a request + /// @return fee - Cost in Juels (1e18) of PLI + function getDONFee(bytes memory requestCBOR) external view returns (uint72); + + /// @notice Determine the fee that will be paid to the Router owner for operating the network + /// @return fee - Cost in Juels (1e18) of PLI + function getAdminFee() external view returns (uint72); + + /// @notice Estimate the total cost that will be charged to a subscription to make a request: transmitter gas re-reimbursement, plus DON fee, plus Registry fee + /// @param - subscriptionId An identifier of the billing account + /// @param - data Encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param - callbackGasLimit Gas limit for the fulfillment callback + /// @param - gasPriceWei The blockchain's gas price to estimate with + /// @return - billedCost Cost in Juels (1e18) of PLI + function estimateCost( + uint64 subscriptionId, + bytes calldata data, + uint32 callbackGasLimit, + uint256 gasPriceWei + ) external view returns (uint96); + + /// @notice Remove a request commitment that the Router has determined to be stale + /// @param requestId - The request ID to remove + function deleteCommitment(bytes32 requestId) external; + + /// @notice Oracle withdraw PLI earned through fulfilling requests + /// @notice If amount is 0 the full balance will be withdrawn + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function oracleWithdraw(address recipient, uint96 amount) external; + + /// @notice Withdraw all PLI earned by Oracles through fulfilling requests + /// @dev transmitter addresses must support PLI tokens to avoid tokens from getting stuck as oracleWithdrawAll() calls will forward tokens directly to transmitters + function oracleWithdrawAll() external; +} + +// ================================================================ +// | Configuration state | +// ================================================================ + +struct FunctionsBillingConfig { + uint32 fulfillmentGasPriceOverEstimationBP; // ══╗ Percentage of gas price overestimation to account for changes in gas price between request and response. Held as basis points (one hundredth of 1 percentage point) + uint32 feedStalenessSeconds; // ║ How long before we consider the feed price to be stale and fallback to fallbackNativePerUnitLink. + uint32 gasOverheadBeforeCallback; // ║ Represents the average gas execution cost before the fulfillment callback. This amount is always billed for every request. + uint32 gasOverheadAfterCallback; // ║ Represents the average gas execution cost after the fulfillment callback. This amount is always billed for every request. + uint72 donFee; // ║ Additional flat fee (in Juels of PLI) that will be split between Node Operators. Max value is 2^80 - 1 == 1.2m PLI. + uint40 minimumEstimateGasPriceWei; // ║ The lowest amount of wei that will be used as the tx.gasprice when estimating the cost to fulfill the request + uint16 maxSupportedRequestDataVersion; // ═══════╝ The highest support request data version supported by the node. All lower versions should also be supported. + uint224 fallbackNativePerUnitLink; // ═══════════╗ Fallback NATIVE CURRENCY / PLI conversion rate if the data feed is stale + uint32 requestTimeoutSeconds; // ════════════════╝ How many seconds it takes before we consider a request to be timed out +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsClient.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsClient.sol new file mode 100644 index 00000000..7ecb9636 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsClient.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Plugin Functions client interface. +interface IFunctionsClient { + /// @notice Plugin Functions response handler called by the Functions Router + /// during fullilment from the designated transmitter node in an OCR round. + /// @param requestId The requestId returned by FunctionsClient.sendRequest(). + /// @param response Aggregated response from the request's source code. + /// @param err Aggregated error either from the request's source code or from the execution pipeline. + /// @dev Either response or error parameter will be set, but never both. + function handleOracleFulfillment(bytes32 requestId, bytes memory response, bytes memory err) external; +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsCoordinator.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsCoordinator.sol new file mode 100644 index 00000000..b8116171 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsCoordinator.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions DON Coordinator interface. +interface IFunctionsCoordinator { + /// @notice Returns the DON's threshold encryption public key used to encrypt secrets + /// @dev All nodes on the DON have separate key shares of the threshold decryption key + /// and nodes must participate in a threshold decryption OCR round to decrypt secrets + /// @return thresholdPublicKey the DON's threshold encryption public key + function getThresholdPublicKey() external view returns (bytes memory); + + /// @notice Sets the DON's threshold encryption public key used to encrypt secrets + /// @dev Used to rotate the key + /// @param thresholdPublicKey The new public key + function setThresholdPublicKey(bytes calldata thresholdPublicKey) external; + + /// @notice Returns the DON's secp256k1 public key that is used to encrypt secrets + /// @dev All nodes on the DON have the corresponding private key + /// needed to decrypt the secrets encrypted with the public key + /// @return publicKey the DON's public key + function getDONPublicKey() external view returns (bytes memory); + + /// @notice Sets DON's secp256k1 public key used to encrypt secrets + /// @dev Used to rotate the key + /// @param donPublicKey The new public key + function setDONPublicKey(bytes calldata donPublicKey) external; + + /// @notice Receives a request to be emitted to the DON for processing + /// @param request The request metadata + /// @dev see the struct for field descriptions + /// @return commitment - The parameters of the request that must be held consistent at response time + function startRequest( + FunctionsResponse.RequestMeta calldata request + ) external returns (FunctionsResponse.Commitment memory commitment); +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsRouter.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsRouter.sol new file mode 100644 index 00000000..1e4019f0 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsRouter.sol @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions Router interface. +interface IFunctionsRouter { + /// @notice The identifier of the route to retrieve the address of the access control contract + /// The access control contract controls which accounts can manage subscriptions + /// @return id - bytes32 id that can be passed to the "getContractById" of the Router + function getAllowListId() external view returns (bytes32); + + /// @notice Set the identifier of the route to retrieve the address of the access control contract + /// The access control contract controls which accounts can manage subscriptions + function setAllowListId(bytes32 allowListId) external; + + /// @notice Get the flat fee (in Juels of PLI) that will be paid to the Router owner for operation of the network + /// @return adminFee + function getAdminFee() external view returns (uint72 adminFee); + + /// @notice Sends a request using the provided subscriptionId + /// @param subscriptionId - A unique subscription ID allocated by billing system, + /// a client can make requests from different contracts referencing the same subscription + /// @param data - CBOR encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param dataVersion - Gas limit for the fulfillment callback + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @param donId - An identifier used to determine which route to send the request along + /// @return requestId - A unique request identifier + function sendRequest( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external returns (bytes32); + + /// @notice Sends a request to the proposed contracts + /// @param subscriptionId - A unique subscription ID allocated by billing system, + /// a client can make requests from different contracts referencing the same subscription + /// @param data - CBOR encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param dataVersion - Gas limit for the fulfillment callback + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @param donId - An identifier used to determine which route to send the request along + /// @return requestId - A unique request identifier + function sendRequestToProposed( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external returns (bytes32); + + /// @notice Fulfill the request by: + /// - calling back the data that the Oracle returned to the client contract + /// - pay the DON for processing the request + /// @dev Only callable by the Coordinator contract that is saved in the commitment + /// @param response response data from DON consensus + /// @param err error from DON consensus + /// @param juelsPerGas - current rate of juels/gas + /// @param costWithoutFulfillment - The cost of processing the request (in Juels of PLI ), without fulfillment + /// @param transmitter - The Node that transmitted the OCR report + /// @param commitment - The parameters of the request that must be held consistent between request and response time + /// @return fulfillResult - + /// @return callbackGasCostJuels - + function fulfill( + bytes memory response, + bytes memory err, + uint96 juelsPerGas, + uint96 costWithoutFulfillment, + address transmitter, + FunctionsResponse.Commitment memory commitment + ) external returns (FunctionsResponse.FulfillResult, uint96); + + /// @notice Validate requested gas limit is below the subscription max. + /// @param subscriptionId subscription ID + /// @param callbackGasLimit desired callback gas limit + function isValidCallbackGasLimit(uint64 subscriptionId, uint32 callbackGasLimit) external view; + + /// @notice Get the current contract given an ID + /// @param id A bytes32 identifier for the route + /// @return contract The current contract address + function getContractById(bytes32 id) external view returns (address); + + /// @notice Get the proposed next contract given an ID + /// @param id A bytes32 identifier for the route + /// @return contract The current or proposed contract address + function getProposedContractById(bytes32 id) external view returns (address); + + /// @notice Return the latest proprosal set + /// @return ids The identifiers of the contracts to update + /// @return to The addresses of the contracts that will be updated to + function getProposedContractSet() external view returns (bytes32[] memory, address[] memory); + + /// @notice Proposes one or more updates to the contract routes + /// @dev Only callable by owner + function proposeContractsUpdate(bytes32[] memory proposalSetIds, address[] memory proposalSetAddresses) external; + + /// @notice Updates the current contract routes to the proposed contracts + /// @dev Only callable by owner + function updateContracts() external; + + /// @dev Puts the system into an emergency stopped state. + /// @dev Only callable by owner + function pause() external; + + /// @dev Takes the system out of an emergency stopped state. + /// @dev Only callable by owner + function unpause() external; +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsSubscriptions.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsSubscriptions.sol new file mode 100644 index 00000000..7d7c3b18 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IFunctionsSubscriptions.sol @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions Subscription interface. +interface IFunctionsSubscriptions { + struct Subscription { + uint96 balance; // ═════════╗ Common PLI balance that is controlled by the Router to be used for all consumer requests. + address owner; // ══════════╝ The owner can fund/withdraw/cancel the subscription. + uint96 blockedBalance; // ══╗ PLI balance that is reserved to pay for pending consumer requests. + address proposedOwner; // ══╝ For safely transferring sub ownership. + address[] consumers; // ════╸ Client contracts that can use the subscription + bytes32 flags; // ══════════╸ Per-subscription flags + } + + struct Consumer { + bool allowed; // ══════════════╗ Owner can fund/withdraw/cancel the sub. + uint64 initiatedRequests; // ║ The number of requests that have been started + uint64 completedRequests; // ══╝ The number of requests that have successfully completed or timed out + } + + /// @notice Get details about a subscription. + /// @param subscriptionId - the ID of the subscription + /// @return subscription - see IFunctionsSubscriptions.Subscription for more information on the structure + function getSubscription(uint64 subscriptionId) external view returns (Subscription memory); + + /// @notice Retrieve details about multiple subscriptions using an inclusive range + /// @param subscriptionIdStart - the ID of the subscription to start the range at + /// @param subscriptionIdEnd - the ID of the subscription to end the range at + /// @return subscriptions - see IFunctionsSubscriptions.Subscription for more information on the structure + function getSubscriptionsInRange( + uint64 subscriptionIdStart, + uint64 subscriptionIdEnd + ) external view returns (Subscription[] memory); + + /// @notice Get details about a consumer of a subscription. + /// @param client - the consumer contract address + /// @param subscriptionId - the ID of the subscription + /// @return consumer - see IFunctionsSubscriptions.Consumer for more information on the structure + function getConsumer(address client, uint64 subscriptionId) external view returns (Consumer memory); + + /// @notice Get details about the total amount of PLI within the system + /// @return totalBalance - total Juels of PLI held by the contract + function getTotalBalance() external view returns (uint96); + + /// @notice Get details about the total number of subscription accounts + /// @return count - total number of subscriptions in the system + function getSubscriptionCount() external view returns (uint64); + + /// @notice Time out all expired requests: unlocks funds and removes the ability for the request to be fulfilled + /// @param requestsToTimeoutByCommitment - A list of request commitments to time out + /// @dev The commitment can be found on the "OracleRequest" event created when sending the request. + function timeoutRequests(FunctionsResponse.Commitment[] calldata requestsToTimeoutByCommitment) external; + + /// @notice Oracle withdraw PLI earned through fulfilling requests + /// @notice If amount is 0 the full balance will be withdrawn + /// @notice Both signing and transmitting wallets will have a balance to withdraw + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function oracleWithdraw(address recipient, uint96 amount) external; + + /// @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + /// @dev Only callable by the Router Owner + /// @param subscriptionId subscription id + /// @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + function ownerCancelSubscription(uint64 subscriptionId) external; + + /// @notice Recover link sent with transfer instead of transferAndCall. + /// @dev Only callable by the Router Owner + /// @param to address to send link to + function recoverFunds(address to) external; + + /// @notice Create a new subscription. + /// @return subscriptionId - A unique subscription id. + /// @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function createSubscription() external returns (uint64); + + /// @notice Create a new subscription and add a consumer. + /// @return subscriptionId - A unique subscription id. + /// @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function createSubscriptionWithConsumer(address consumer) external returns (uint64 subscriptionId); + + /// @notice Propose a new owner for a subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param newOwner - proposed new owner of the subscription + function proposeSubscriptionOwnerTransfer(uint64 subscriptionId, address newOwner) external; + + /// @notice Accept an ownership transfer. + /// @param subscriptionId - ID of the subscription + /// @dev will revert if original owner of subscriptionId has not requested that msg.sender become the new owner. + function acceptSubscriptionOwnerTransfer(uint64 subscriptionId) external; + + /// @notice Remove a consumer from a Plugin Functions subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param consumer - Consumer to remove from the subscription + function removeConsumer(uint64 subscriptionId, address consumer) external; + + /// @notice Add a consumer to a Plugin Functions subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param consumer - New consumer which can use the subscription + function addConsumer(uint64 subscriptionId, address consumer) external; + + /// @notice Cancel a subscription + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param to - Where to send the remaining PLI to + function cancelSubscription(uint64 subscriptionId, address to) external; + + /// @notice Check to see if there exists a request commitment for all consumers for a given sub. + /// @param subscriptionId - ID of the subscription + /// @return true if there exists at least one unfulfilled request for the subscription, false otherwise. + /// @dev Looping is bounded to MAX_CONSUMERS*(number of DONs). + /// @dev Used to disable subscription canceling while outstanding request are present. + function pendingRequestExists(uint64 subscriptionId) external view returns (bool); + + /// @notice Set subscription specific flags for a subscription. + /// Each byte of the flag is used to represent a resource tier that the subscription can utilize. + /// @param subscriptionId - ID of the subscription + /// @param flags - desired flag values + function setFlags(uint64 subscriptionId, bytes32 flags) external; + + /// @notice Get flags for a given subscription. + /// @param subscriptionId - ID of the subscription + /// @return flags - current flag values + function getFlags(uint64 subscriptionId) external view returns (bytes32); +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol new file mode 100644 index 00000000..b9cf66f2 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/interfaces/IOwnableFunctionsRouter.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsRouter} from "./IFunctionsRouter.sol"; +import {IOwnable} from "../../../../shared/interfaces/IOwnable.sol"; + +/// @title Plugin Functions Router interface with Ownability. +interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {} diff --git a/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol b/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol new file mode 100644 index 00000000..574d1bf1 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/libraries/ChainSpecificUtil.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ArbGasInfo} from "../../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {GasPriceOracle} from "../../../../vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/GasPriceOracle.sol"; + +/// @dev A library that abstracts out opcodes that behave differently across chains. +/// @dev The methods below return values that are pertinent to the given chain. +library ChainSpecificUtil { + // ------------ Start Arbitrum Constants ------------ + + /// @dev ARBGAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10 + address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C); + ArbGasInfo private constant ARBGAS = ArbGasInfo(ARBGAS_ADDR); + + uint256 private constant ARB_MAINNET_CHAIN_ID = 42161; + uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613; + uint256 private constant ARB_SEPOLIA_TESTNET_CHAIN_ID = 421614; + + // ------------ End Arbitrum Constants ------------ + + // ------------ Start Optimism Constants ------------ + /// @dev L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev OVM_GASPRICEORACLE_ADDR is the address of the GasPriceOracle precompile on Optimism. + /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + GasPriceOracle private constant OVM_GASPRICEORACLE = GasPriceOracle(OVM_GASPRICEORACLE_ADDR); + + uint256 private constant OP_MAINNET_CHAIN_ID = 10; + uint256 private constant OP_GOERLI_CHAIN_ID = 420; + uint256 private constant OP_SEPOLIA_CHAIN_ID = 11155420; + + /// @dev Base is a OP stack based rollup and follows the same L1 pricing logic as Optimism. + uint256 private constant BASE_MAINNET_CHAIN_ID = 8453; + uint256 private constant BASE_GOERLI_CHAIN_ID = 84531; + uint256 private constant BASE_SEPOLIA_CHAIN_ID = 84532; + + // ------------ End Optimism Constants ------------ + + /// @notice Returns the L1 fees in wei that will be paid for the current transaction, given any calldata + /// @notice for the current transaction. + /// @notice When on a known Arbitrum chain, it uses ArbGas.getCurrentTxL1GasFees to get the fees. + /// @notice On Arbitrum, the provided calldata is not used to calculate the fees. + /// @notice On Optimism, the provided calldata is passed to the GasPriceOracle predeploy + /// @notice and getL1Fee is called to get the fees. + function _getCurrentTxL1GasFees(bytes memory txCallData) internal view returns (uint256 l1FeeWei) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + return ARBGAS.getCurrentTxL1GasFees(); + } else if (_isOptimismChainId(chainid)) { + return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(txCallData, L1_FEE_DATA_PADDING)); + } + return 0; + } + + /// @notice Return true if and only if the provided chain ID is an Arbitrum chain ID. + function _isArbitrumChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == ARB_MAINNET_CHAIN_ID || + chainId == ARB_GOERLI_TESTNET_CHAIN_ID || + chainId == ARB_SEPOLIA_TESTNET_CHAIN_ID; + } + + /// @notice Return true if and only if the provided chain ID is an Optimism (or Base) chain ID. + /// @notice Note that optimism chain id's are also OP stack chain id's. + function _isOptimismChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == OP_MAINNET_CHAIN_ID || + chainId == OP_GOERLI_CHAIN_ID || + chainId == OP_SEPOLIA_CHAIN_ID || + chainId == BASE_MAINNET_CHAIN_ID || + chainId == BASE_GOERLI_CHAIN_ID || + chainId == BASE_SEPOLIA_CHAIN_ID; + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsRequest.sol b/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsRequest.sol new file mode 100644 index 00000000..b856f5c1 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsRequest.sol @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {CBOR} from "../../../../vendor/solidity-cborutils/v2.0.0/CBOR.sol"; + +/// @title Library for encoding the input data of a Functions request into CBOR +library FunctionsRequest { + using CBOR for CBOR.CBORBuffer; + + uint16 public constant REQUEST_DATA_VERSION = 1; + uint256 internal constant DEFAULT_BUFFER_SIZE = 256; + + enum Location { + Inline, // Provided within the Request + Remote, // Hosted through remote location that can be accessed through a provided URL + DONHosted // Hosted on the DON's storage + } + + enum CodeLanguage { + JavaScript + // In future version we may add other languages + } + + struct Request { + Location codeLocation; // ════════════╸ The location of the source code that will be executed on each node in the DON + Location secretsLocation; // ═════════╸ The location of secrets that will be passed into the source code. *Only Remote secrets are supported + CodeLanguage language; // ════════════╸ The coding language that the source code is written in + string source; // ════════════════════╸ Raw source code for Request.codeLocation of Location.Inline, URL for Request.codeLocation of Location.Remote, or slot decimal number for Request.codeLocation of Location.DONHosted + bytes encryptedSecretsReference; // ══╸ Encrypted URLs for Request.secretsLocation of Location.Remote (use addSecretsReference()), or CBOR encoded slotid+version for Request.secretsLocation of Location.DONHosted (use addDONHostedSecrets()) + string[] args; // ════════════════════╸ String arguments that will be passed into the source code + bytes[] bytesArgs; // ════════════════╸ Bytes arguments that will be passed into the source code + } + + error EmptySource(); + error EmptySecrets(); + error EmptyArgs(); + error NoInlineSecrets(); + + /// @notice Encodes a Request to CBOR encoded bytes + /// @param self The request to encode + /// @return CBOR encoded bytes + function _encodeCBOR(Request memory self) internal pure returns (bytes memory) { + CBOR.CBORBuffer memory buffer = CBOR.create(DEFAULT_BUFFER_SIZE); + + buffer.writeString("codeLocation"); + buffer.writeUInt256(uint256(self.codeLocation)); + + buffer.writeString("language"); + buffer.writeUInt256(uint256(self.language)); + + buffer.writeString("source"); + buffer.writeString(self.source); + + if (self.args.length > 0) { + buffer.writeString("args"); + buffer.startArray(); + for (uint256 i = 0; i < self.args.length; ++i) { + buffer.writeString(self.args[i]); + } + buffer.endSequence(); + } + + if (self.encryptedSecretsReference.length > 0) { + if (self.secretsLocation == Location.Inline) { + revert NoInlineSecrets(); + } + buffer.writeString("secretsLocation"); + buffer.writeUInt256(uint256(self.secretsLocation)); + buffer.writeString("secrets"); + buffer.writeBytes(self.encryptedSecretsReference); + } + + if (self.bytesArgs.length > 0) { + buffer.writeString("bytesArgs"); + buffer.startArray(); + for (uint256 i = 0; i < self.bytesArgs.length; ++i) { + buffer.writeBytes(self.bytesArgs[i]); + } + buffer.endSequence(); + } + + return buffer.buf.buf; + } + + /// @notice Initializes a Plugin Functions Request + /// @dev Sets the codeLocation and code on the request + /// @param self The uninitialized request + /// @param codeLocation The user provided source code location + /// @param language The programming language of the user code + /// @param source The user provided source code or a url + function _initializeRequest( + Request memory self, + Location codeLocation, + CodeLanguage language, + string memory source + ) internal pure { + if (bytes(source).length == 0) revert EmptySource(); + + self.codeLocation = codeLocation; + self.language = language; + self.source = source; + } + + /// @notice Initializes a Plugin Functions Request + /// @dev Simplified version of initializeRequest for PoC + /// @param self The uninitialized request + /// @param javaScriptSource The user provided JS code (must not be empty) + function _initializeRequestForInlineJavaScript(Request memory self, string memory javaScriptSource) internal pure { + _initializeRequest(self, Location.Inline, CodeLanguage.JavaScript, javaScriptSource); + } + + /// @notice Adds Remote user encrypted secrets to a Request + /// @param self The initialized request + /// @param encryptedSecretsReference Encrypted comma-separated string of URLs pointing to off-chain secrets + function _addSecretsReference(Request memory self, bytes memory encryptedSecretsReference) internal pure { + if (encryptedSecretsReference.length == 0) revert EmptySecrets(); + + self.secretsLocation = Location.Remote; + self.encryptedSecretsReference = encryptedSecretsReference; + } + + /// @notice Adds DON-hosted secrets reference to a Request + /// @param self The initialized request + /// @param slotID Slot ID of the user's secrets hosted on DON + /// @param version User data version (for the slotID) + function _addDONHostedSecrets(Request memory self, uint8 slotID, uint64 version) internal pure { + CBOR.CBORBuffer memory buffer = CBOR.create(DEFAULT_BUFFER_SIZE); + + buffer.writeString("slotID"); + buffer.writeUInt64(slotID); + buffer.writeString("version"); + buffer.writeUInt64(version); + + self.secretsLocation = Location.DONHosted; + self.encryptedSecretsReference = buffer.buf.buf; + } + + /// @notice Sets args for the user run function + /// @param self The initialized request + /// @param args The array of string args (must not be empty) + function _setArgs(Request memory self, string[] memory args) internal pure { + if (args.length == 0) revert EmptyArgs(); + + self.args = args; + } + + /// @notice Sets bytes args for the user run function + /// @param self The initialized request + /// @param args The array of bytes args (must not be empty) + function _setBytesArgs(Request memory self, bytes[] memory args) internal pure { + if (args.length == 0) revert EmptyArgs(); + + self.bytesArgs = args; + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsResponse.sol b/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsResponse.sol new file mode 100644 index 00000000..35069790 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/libraries/FunctionsResponse.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Library of types that are used for fulfillment of a Functions request +library FunctionsResponse { + // Used to send request information from the Router to the Coordinator + struct RequestMeta { + bytes data; // ══════════════════╸ CBOR encoded Plugin Functions request data, use FunctionsRequest library to encode a request + bytes32 flags; // ═══════════════╸ Per-subscription flags + address requestingContract; // ══╗ The client contract that is sending the request + uint96 availableBalance; // ═════╝ Common PLI balance of the subscription that is controlled by the Router to be used for all consumer requests. + uint72 adminFee; // ═════════════╗ Flat fee (in Juels of PLI) that will be paid to the Router Owner for operation of the network + uint64 subscriptionId; // ║ Identifier of the billing subscription that will be charged for the request + uint64 initiatedRequests; // ║ The number of requests that have been started + uint32 callbackGasLimit; // ║ The amount of gas that the callback to the consuming contract will be given + uint16 dataVersion; // ══════════╝ The version of the structure of the CBOR encoded request data + uint64 completedRequests; // ════╗ The number of requests that have successfully completed or timed out + address subscriptionOwner; // ═══╝ The owner of the billing subscription + } + + enum FulfillResult { + FULFILLED, // 0 + USER_CALLBACK_ERROR, // 1 + INVALID_REQUEST_ID, // 2 + COST_EXCEEDS_COMMITMENT, // 3 + INSUFFICIENT_GAS_PROVIDED, // 4 + SUBSCRIPTION_BALANCE_INVARIANT_VIOLATION, // 5 + INVALID_COMMITMENT // 6 + } + + struct Commitment { + bytes32 requestId; // ═════════════════╸ A unique identifier for a Plugin Functions request + address coordinator; // ═══════════════╗ The Coordinator contract that manages the DON that is servicing a request + uint96 estimatedTotalCostJuels; // ════╝ The maximum cost in Juels (1e18) of PLI that will be charged to fulfill a request + address client; // ════════════════════╗ The client contract that sent the request + uint64 subscriptionId; // ║ Identifier of the billing subscription that will be charged for the request + uint32 callbackGasLimit; // ═══════════╝ The amount of gas that the callback to the consuming contract will be given + uint72 adminFee; // ═══════════════════╗ Flat fee (in Juels of PLI) that will be paid to the Router Owner for operation of the network + uint72 donFee; // ║ Fee (in Juels of PLI) that will be split between Node Operators for servicing a request + uint40 gasOverheadBeforeCallback; // ║ Represents the average gas execution cost before the fulfillment callback. + uint40 gasOverheadAfterCallback; // ║ Represents the average gas execution cost after the fulfillment callback. + uint32 timeoutTimestamp; // ═══════════╝ The timestamp at which a request will be eligible to be timed out + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/mocks/FunctionsV1EventsMock.sol b/contracts/src/v0.8/functions/dev/v1_X/mocks/FunctionsV1EventsMock.sol new file mode 100644 index 00000000..68b51f89 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/mocks/FunctionsV1EventsMock.sol @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.19; + +contract FunctionsV1EventsMock { + struct Config { + uint16 maxConsumersPerSubscription; + uint72 adminFee; + bytes4 handleOracleFulfillmentSelector; + uint16 gasForCallExactCheck; + uint32[] maxCallbackGasLimits; + } + event ConfigUpdated(Config param1); + event ContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ); + event ContractUpdated(bytes32 id, address from, address to); + event FundsRecovered(address to, uint256 amount); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event Paused(address account); + event RequestNotProcessed(bytes32 indexed requestId, address coordinator, address transmitter, uint8 resultCode); + event RequestProcessed( + bytes32 indexed requestId, + uint64 indexed subscriptionId, + uint96 totalCostJuels, + address transmitter, + uint8 resultCode, + bytes response, + bytes err, + bytes callbackReturnData + ); + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + event RequestTimedOut(bytes32 indexed requestId); + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId, address consumer); + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + event SubscriptionFunded(uint64 indexed subscriptionId, uint256 oldBalance, uint256 newBalance); + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId, address from, address to); + event Unpaused(address account); + + function emitConfigUpdated(Config memory param1) public { + emit ConfigUpdated(param1); + } + + function emitContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ) public { + emit ContractProposed(proposedContractSetId, proposedContractSetFromAddress, proposedContractSetToAddress); + } + + function emitContractUpdated(bytes32 id, address from, address to) public { + emit ContractUpdated(id, from, to); + } + + function emitFundsRecovered(address to, uint256 amount) public { + emit FundsRecovered(to, amount); + } + + function emitOwnershipTransferRequested(address from, address to) public { + emit OwnershipTransferRequested(from, to); + } + + function emitOwnershipTransferred(address from, address to) public { + emit OwnershipTransferred(from, to); + } + + function emitPaused(address account) public { + emit Paused(account); + } + + function emitRequestNotProcessed( + bytes32 requestId, + address coordinator, + address transmitter, + uint8 resultCode + ) public { + emit RequestNotProcessed(requestId, coordinator, transmitter, resultCode); + } + + function emitRequestProcessed( + bytes32 requestId, + uint64 subscriptionId, + uint96 totalCostJuels, + address transmitter, + uint8 resultCode, + bytes memory response, + bytes memory err, + bytes memory callbackReturnData + ) public { + emit RequestProcessed( + requestId, + subscriptionId, + totalCostJuels, + transmitter, + resultCode, + response, + err, + callbackReturnData + ); + } + + function emitRequestStart( + bytes32 requestId, + bytes32 donId, + uint64 subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes memory data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ) public { + emit RequestStart( + requestId, + donId, + subscriptionId, + subscriptionOwner, + requestingContract, + requestInitiator, + data, + dataVersion, + callbackGasLimit, + estimatedTotalCostJuels + ); + } + + function emitRequestTimedOut(bytes32 requestId) public { + emit RequestTimedOut(requestId); + } + + function emitSubscriptionCanceled(uint64 subscriptionId, address fundsRecipient, uint256 fundsAmount) public { + emit SubscriptionCanceled(subscriptionId, fundsRecipient, fundsAmount); + } + + function emitSubscriptionConsumerAdded(uint64 subscriptionId, address consumer) public { + emit SubscriptionConsumerAdded(subscriptionId, consumer); + } + + function emitSubscriptionConsumerRemoved(uint64 subscriptionId, address consumer) public { + emit SubscriptionConsumerRemoved(subscriptionId, consumer); + } + + function emitSubscriptionCreated(uint64 subscriptionId, address owner) public { + emit SubscriptionCreated(subscriptionId, owner); + } + + function emitSubscriptionFunded(uint64 subscriptionId, uint256 oldBalance, uint256 newBalance) public { + emit SubscriptionFunded(subscriptionId, oldBalance, newBalance); + } + + function emitSubscriptionOwnerTransferRequested(uint64 subscriptionId, address from, address to) public { + emit SubscriptionOwnerTransferRequested(subscriptionId, from, to); + } + + function emitSubscriptionOwnerTransferred(uint64 subscriptionId, address from, address to) public { + emit SubscriptionOwnerTransferred(subscriptionId, from, to); + } + + function emitUnpaused(address account) public { + emit Unpaused(account); + } +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Abstract.sol b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Abstract.sol new file mode 100644 index 00000000..77cc9502 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Abstract.sol @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../../../shared/interfaces/ITypeAndVersion.sol"; + +abstract contract OCR2Abstract is ITypeAndVersion { + // Maximum number of oracles the offchain reporting protocol is designed for + uint256 internal constant MAX_NUM_ORACLES = 31; + + /** + * @notice triggers a new run of the offchain reporting protocol + * @param previousConfigBlockNumber block in which the previous config was set, to simplify historic analysis + * @param configDigest configDigest of this configuration + * @param configCount ordinal number of this config setting among all config settings over the life of this contract + * @param signers ith element is address ith oracle uses to sign a report + * @param transmitters ith element is address ith oracle uses to transmit a report via the transmit method + * @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param signers addresses with which oracles sign the reports + * @param transmitters addresses oracles use to transmit the reports + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external virtual; + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see _configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + virtual + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + + /** + * @notice optionally emited to indicate the latest configDigest and epoch for + which a report was successfully transmited. Alternatively, the contract may + use latestConfigDigestAndEpoch with scanLogs set to false. + */ + event Transmitted(bytes32 configDigest, uint32 epoch); + + /** + * @notice optionally returns the latest configDigest and epoch for which a + report was successfully transmitted. Alternatively, the contract may return + scanLogs set to true and use Transmitted events to provide this information + to offchain watchers. + * @return scanLogs indicates whether to rely on the configDigest and epoch + returned or whether to scan logs for the Transmitted event instead. + * @return configDigest + * @return epoch + */ + function latestConfigDigestAndEpoch() + external + view + virtual + returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external virtual; +} diff --git a/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol new file mode 100644 index 00000000..43825c49 --- /dev/null +++ b/contracts/src/v0.8/functions/dev/v1_X/ocr/OCR2Base.sol @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "../../../../shared/access/ConfirmedOwner.sol"; +import {OCR2Abstract} from "./OCR2Abstract.sol"; + +/** + * @notice Onchain verification of reports from the offchain reporting protocol + * @dev For details on its operation, see the offchain reporting protocol design + * doc, which refers to this contract as simply the "contract". + */ +abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract { + error ReportInvalid(string message); + error InvalidConfig(string message); + + constructor() ConfirmedOwner(msg.sender) {} + + // incremented each time a new config is posted. This count is incorporated + // into the config digest, to prevent replay attacks. + uint32 internal s_configCount; + uint32 internal s_latestConfigBlockNumber; // makes it easier for offchain systems + // to extract config from logs. + + // Storing these fields used on the hot path in a ConfigInfo variable reduces the + // retrieval of all of them to a single SLOAD. If any further fields are + // added, make sure that storage of the struct still takes at most 32 bytes. + struct ConfigInfo { + bytes32 latestConfigDigest; + uint8 f; // TODO: could be optimized by squeezing into one slot + uint8 n; + } + ConfigInfo internal s_configInfo; + + // Used for s_oracles[a].role, where a is an address, to track the purpose + // of the address, or to indicate that the address is unset. + enum Role { + // No oracle role has been set for address a + Unset, + // Signing address for the s_oracles[a].index'th oracle. I.e., report + // signatures from this oracle should ecrecover back to address a. + Signer, + // Transmission address for the s_oracles[a].index'th oracle. I.e., if a + // report is received by OCR2Aggregator.transmit in which msg.sender is + // a, it is attributed to the s_oracles[a].index'th oracle. + Transmitter + } + + struct Oracle { + uint8 index; // Index of oracle in s_signers/s_transmitters + Role role; // Role of the address which mapped to this struct + } + + mapping(address signerOrTransmitter => Oracle) internal s_oracles; + + // s_signers contains the signing address of each oracle + address[] internal s_signers; + + // s_transmitters contains the transmission address of each oracle, + // i.e. the address the oracle actually sends transactions to the contract from + address[] internal s_transmitters; + + struct DecodedReport { + bytes32[] requestIds; + bytes[] results; + bytes[] errors; + bytes[] onchainMetadata; + bytes[] offchainMetadata; + } + + /* + * Config logic + */ + + // Reverts transaction if config args are invalid + modifier checkConfigValid( + uint256 numSigners, + uint256 numTransmitters, + uint256 f + ) { + if (numSigners > MAX_NUM_ORACLES) revert InvalidConfig("too many signers"); + if (f == 0) revert InvalidConfig("f must be positive"); + if (numSigners != numTransmitters) revert InvalidConfig("oracle addresses out of registration"); + if (numSigners <= 3 * f) revert InvalidConfig("faulty-oracle f too high"); + _; + } + + struct SetConfigArgs { + address[] signers; + address[] transmitters; + uint8 f; + bytes onchainConfig; + uint64 offchainConfigVersion; + bytes offchainConfig; + } + + /// @inheritdoc OCR2Abstract + function latestConfigDigestAndEpoch() + external + view + virtual + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (true, bytes32(0), uint32(0)); + } + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param _signers addresses with which oracles sign the reports + * @param _transmitters addresses oracles use to transmit the reports + * @param _f number of faulty oracles the system can tolerate + * @param _onchainConfig encoded on-chain contract configuration + * @param _offchainConfigVersion version number for offchainEncoding schema + * @param _offchainConfig encoded off-chain oracle configuration + */ + function setConfig( + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _offchainConfigVersion, + bytes memory _offchainConfig + ) external override checkConfigValid(_signers.length, _transmitters.length, _f) onlyOwner { + SetConfigArgs memory args = SetConfigArgs({ + signers: _signers, + transmitters: _transmitters, + f: _f, + onchainConfig: _onchainConfig, + offchainConfigVersion: _offchainConfigVersion, + offchainConfig: _offchainConfig + }); + + _beforeSetConfig(args.f, args.onchainConfig); + + while (s_signers.length != 0) { + // remove any old signer/transmitter addresses + uint256 lastIdx = s_signers.length - 1; + address signer = s_signers[lastIdx]; + address transmitter = s_transmitters[lastIdx]; + delete s_oracles[signer]; + delete s_oracles[transmitter]; + s_signers.pop(); + s_transmitters.pop(); + } + + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < args.signers.length; i++) { + if (args.signers[i] == address(0)) revert InvalidConfig("signer must not be empty"); + if (args.transmitters[i] == address(0)) revert InvalidConfig("transmitter must not be empty"); + // add new signer/transmitter addresses + if (s_oracles[args.signers[i]].role != Role.Unset) revert InvalidConfig("repeated signer address"); + s_oracles[args.signers[i]] = Oracle(uint8(i), Role.Signer); + if (s_oracles[args.transmitters[i]].role != Role.Unset) revert InvalidConfig("repeated transmitter address"); + s_oracles[args.transmitters[i]] = Oracle(uint8(i), Role.Transmitter); + s_signers.push(args.signers[i]); + s_transmitters.push(args.transmitters[i]); + } + s_configInfo.f = args.f; + uint32 previousConfigBlockNumber = s_latestConfigBlockNumber; + s_latestConfigBlockNumber = uint32(block.number); + s_configCount += 1; + { + s_configInfo.latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + s_configInfo.n = uint8(args.signers.length); + + emit ConfigSet( + previousConfigBlockNumber, + s_configInfo.latestConfigDigest, + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + + function _configDigestFromConfigData( + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + _chainId, + _contractAddress, + _configCount, + _signers, + _transmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see __configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_configCount, s_latestConfigBlockNumber, s_configInfo.latestConfigDigest); + } + + /** + * @return list of addresses permitted to transmit reports to this contract + * @dev The list will match the order used to specify the transmitter during setConfig + */ + function transmitters() external view returns (address[] memory) { + return s_transmitters; + } + + function _beforeSetConfig(uint8 _f, bytes memory _onchainConfig) internal virtual; + + /** + * @dev hook called after the report has been fully validated + * for the extending contract to handle additional logic, such as oracle payment + * @param decodedReport decodedReport + */ + function _report(DecodedReport memory decodedReport) internal virtual; + + // The constant-length components of the msg.data sent to transmit. + // See the "If we wanted to call sam" example on for example reasoning + // https://solidity.readthedocs.io/en/v0.7.2/abi-spec.html + uint16 private constant TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT = + 4 + // function selector + 32 * + 3 + // 3 words containing reportContext + 32 + // word containing start location of abiencoded report value + 32 + // word containing location start of abiencoded rs value + 32 + // word containing start location of abiencoded ss value + 32 + // rawVs value + 32 + // word containing length of report + 32 + // word containing length rs + 32 + // word containing length of ss + 0; // placeholder + + function _requireExpectedMsgDataLength( + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss + ) private pure { + // calldata will never be big enough to make this overflow + uint256 expected = uint256(TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT) + + report.length + // one byte pure entry in _report + rs.length * + 32 + // 32 bytes per entry in _rs + ss.length * + 32 + // 32 bytes per entry in _ss + 0; // placeholder + if (msg.data.length != expected) revert ReportInvalid("calldata length mismatch"); + } + + function _beforeTransmit( + bytes calldata report + ) internal virtual returns (bool shouldStop, DecodedReport memory decodedReport); + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external override { + (bool shouldStop, DecodedReport memory decodedReport) = _beforeTransmit(report); + + if (shouldStop) { + return; + } + + { + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + uint32 epochAndRound = uint32(uint256(reportContext[1])); + + emit Transmitted(configDigest, uint32(epochAndRound >> 8)); + + // The following check is disabled to allow both current and proposed routes to submit reports using the same OCR config digest + // Plugin Functions uses globally unique request IDs. Metadata about the request is stored and checked in the Coordinator and Router + // require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch"); + + _requireExpectedMsgDataLength(report, rs, ss); + + uint256 expectedNumSignatures = (s_configInfo.n + s_configInfo.f) / 2 + 1; + + if (rs.length != expectedNumSignatures) revert ReportInvalid("wrong number of signatures"); + if (rs.length != ss.length) revert ReportInvalid("report rs and ss must be of equal length"); + + Oracle memory transmitter = s_oracles[msg.sender]; + if (transmitter.role != Role.Transmitter && msg.sender != s_transmitters[transmitter.index]) + revert ReportInvalid("unauthorized transmitter"); + } + + address[MAX_NUM_ORACLES] memory signed; + + { + // Verify signatures attached to report + bytes32 h = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + Oracle memory o; + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < rs.length; ++i) { + address signer = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_oracles[signer]; + if (o.role != Role.Signer) revert ReportInvalid("address not authorized to sign"); + if (signed[o.index] != address(0)) revert ReportInvalid("non-unique signature"); + signed[o.index] = signer; + } + } + + _report(decodedReport); + } +} diff --git a/contracts/src/v0.8/functions/interfaces/.gitkeep b/contracts/src/v0.8/functions/interfaces/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/contracts/src/v0.8/functions/tests/v1_X/BaseTest.t.sol b/contracts/src/v0.8/functions/tests/v1_X/BaseTest.t.sol new file mode 100644 index 00000000..eabf7e99 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/BaseTest.t.sol @@ -0,0 +1,25 @@ +pragma solidity ^0.8.19; + +import {Test} from "forge-std/Test.sol"; + +contract BaseTest is Test { + bool private s_baseTestInitialized; + + uint256 internal OWNER_PRIVATE_KEY = 0x1; + address internal OWNER_ADDRESS = vm.addr(OWNER_PRIVATE_KEY); + + uint256 internal STRANGER_PRIVATE_KEY = 0x2; + address internal STRANGER_ADDRESS = vm.addr(STRANGER_PRIVATE_KEY); + + uint256 TX_GASPRICE_START = 3000000000; // 3 gwei + + uint72 constant JUELS_PER_PLI = 1e18; + + function setUp() public virtual { + // BaseTest.setUp is often called multiple times from tests' setUp due to inheritance. + if (s_baseTestInitialized) return; + s_baseTestInitialized = true; + // Set msg.sender and tx.origin to OWNER until stopPrank is called + vm.startPrank(OWNER_ADDRESS, OWNER_ADDRESS); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol b/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol new file mode 100644 index 00000000..b45c8c30 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/ChainSpecificUtil.t.sol @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsClient} from "../../dev/v1_X/FunctionsClient.sol"; +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; + +import {FunctionsFulfillmentSetup} from "./Setup.t.sol"; + +import {ArbGasInfo} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {OVM_GasPriceOracle} from "../../../vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; + +/// @notice #_getCurrentTxL1GasFees Arbitrum +/// @dev Arbitrum gas formula = L2 Gas Price * (Gas used on L2 + Extra Buffer for L1 cost) +/// @dev where Extra Buffer for L1 cost = (L1 Estimated Cost / L2 Gas Price) +contract ChainSpecificUtil__getCurrentTxL1GasFees_Arbitrum is FunctionsFulfillmentSetup { + address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C); + uint256 private constant L1_FEE_WEI = 15_818_209_764_247; + + uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(PLI_ETH_RATE)); + + function setUp() public virtual override { + vm.mockCall(ARBGAS_ADDR, abi.encodeWithSelector(ArbGasInfo.getCurrentTxL1GasFees.selector), abi.encode(L1_FEE_WEI)); + } + + function test__getCurrentTxL1GasFees_SuccessWhenArbitrumMainnet() public { + // Set the chainID + vm.chainId(42161); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenArbitrumGoerli() public { + // Set the chainID + vm.chainId(421613); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenArbitrumSepolia() public { + // Set the chainID + vm.chainId(421614); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } +} + +/// @notice #_getCurrentTxL1GasFees Optimism +/// @dev Optimism gas formula = ((l2_base_fee + l2_priority_fee) * l2_gas_used) + L1 data fee +/// @dev where L1 data fee = l1_gas_price * ((count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16) + fixed_overhead + noncalldata_gas) * dynamic_overhead +contract ChainSpecificUtil__getCurrentTxL1GasFees_Optimism is FunctionsFulfillmentSetup { + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + uint256 private constant L1_FEE_WEI = 15_818_209_764_247; + + uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(PLI_ETH_RATE)); + + function setUp() public virtual override { + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(OVM_GasPriceOracle.getL1Fee.selector), + abi.encode(L1_FEE_WEI) + ); + } + + function test__getCurrentTxL1GasFees_SuccessWhenOptimismMainnet() public { + // Set the chainID + vm.chainId(10); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenOptimismGoerli() public { + // Set the chainID + vm.chainId(420); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenOptimismSepolia() public { + // Set the chainID + vm.chainId(11155420); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } +} + +/// @notice #_getCurrentTxL1GasFees Base +/// @dev Base gas formula uses Optimism formula = ((l2_base_fee + l2_priority_fee) * l2_gas_used) + L1 data fee +/// @dev where L1 data fee = l1_gas_price * ((count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16) + fixed_overhead + noncalldata_gas) * dynamic_overhead +contract ChainSpecificUtil__getCurrentTxL1GasFees_Base is FunctionsFulfillmentSetup { + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + uint256 private constant L1_FEE_WEI = 15_818_209_764_247; + + uint96 l1FeeJuels = uint96((1e18 * L1_FEE_WEI) / uint256(PLI_ETH_RATE)); + + function setUp() public virtual override { + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(OVM_GasPriceOracle.getL1Fee.selector), + abi.encode(L1_FEE_WEI) + ); + } + + function test__getCurrentTxL1GasFees_SuccessWhenBaseMainnet() public { + // Set the chainID + vm.chainId(8453); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenBaseGoerli() public { + // Set the chainID + vm.chainId(84531); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } + + function test__getCurrentTxL1GasFees_SuccessWhenBaseSepolia() public { + // Set the chainID + vm.chainId(84532); + + // Setup sends and fulfills request #1 + FunctionsFulfillmentSetup.setUp(); + + // Check request cost estimate + uint96 expectedEstimatedTotalCostJuels = _getExpectedCostEstimate(s_requests[1].requestData.callbackGasLimit) + + l1FeeJuels; + assertEq(s_requests[1].commitment.estimatedTotalCostJuels, expectedEstimatedTotalCostJuels); + + // Check response actual cost + uint96 expectedTotalCostJuels = _getExpectedCost(5416) + l1FeeJuels; + assertEq(s_responses[1].totalCostJuels, expectedTotalCostJuels); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol new file mode 100644 index 00000000..698d13dc --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsBilling.t.sol @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsCoordinator} from "../../dev/v1_X/FunctionsCoordinator.sol"; +import {FunctionsBilling} from "../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {Routable} from "../../dev/v1_X/Routable.sol"; + +import {FunctionsRouterSetup, FunctionsSubscriptionSetup, FunctionsClientRequestSetup, FunctionsFulfillmentSetup, FunctionsMultipleFulfillmentsSetup} from "./Setup.t.sol"; + +import {FunctionsBillingConfig} from "../../dev/v1_X/interfaces/IFunctionsBilling.sol"; + +/// @notice #constructor +contract FunctionsBilling_Constructor is FunctionsSubscriptionSetup { + function test_Constructor_Success() public { + assertEq(address(s_functionsRouter), s_functionsCoordinator.getRouter_HARNESS()); + assertEq(address(s_linkEthFeed), s_functionsCoordinator.getLinkToNativeFeed_HARNESS()); + } +} + +/// @notice #getConfig +contract FunctionsBilling_GetConfig is FunctionsRouterSetup { + function test_GetConfig_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + FunctionsBillingConfig memory config = s_functionsCoordinator.getConfig(); + assertEq(config.feedStalenessSeconds, getCoordinatorConfig().feedStalenessSeconds); + assertEq(config.gasOverheadBeforeCallback, getCoordinatorConfig().gasOverheadBeforeCallback); + assertEq(config.gasOverheadAfterCallback, getCoordinatorConfig().gasOverheadAfterCallback); + assertEq(config.requestTimeoutSeconds, getCoordinatorConfig().requestTimeoutSeconds); + assertEq(config.donFee, getCoordinatorConfig().donFee); + assertEq(config.maxSupportedRequestDataVersion, getCoordinatorConfig().maxSupportedRequestDataVersion); + assertEq(config.fulfillmentGasPriceOverEstimationBP, getCoordinatorConfig().fulfillmentGasPriceOverEstimationBP); + assertEq(config.fallbackNativePerUnitLink, getCoordinatorConfig().fallbackNativePerUnitLink); + } +} + +/// @notice #updateConfig +contract FunctionsBilling_UpdateConfig is FunctionsRouterSetup { + FunctionsBillingConfig internal configToSet; + + function setUp() public virtual override { + FunctionsRouterSetup.setUp(); + + configToSet = FunctionsBillingConfig({ + feedStalenessSeconds: getCoordinatorConfig().feedStalenessSeconds * 2, + gasOverheadAfterCallback: getCoordinatorConfig().gasOverheadAfterCallback * 2, + gasOverheadBeforeCallback: getCoordinatorConfig().gasOverheadBeforeCallback * 2, + requestTimeoutSeconds: getCoordinatorConfig().requestTimeoutSeconds * 2, + donFee: getCoordinatorConfig().donFee * 2, + maxSupportedRequestDataVersion: getCoordinatorConfig().maxSupportedRequestDataVersion * 2, + fulfillmentGasPriceOverEstimationBP: getCoordinatorConfig().fulfillmentGasPriceOverEstimationBP * 2, + fallbackNativePerUnitLink: getCoordinatorConfig().fallbackNativePerUnitLink * 2, + minimumEstimateGasPriceWei: getCoordinatorConfig().minimumEstimateGasPriceWei * 2 + }); + } + + function test_UpdateConfig_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsCoordinator.updateConfig(configToSet); + } + + event ConfigUpdated(FunctionsBillingConfig config); + + function test_UpdateConfig_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ConfigUpdated(configToSet); + + s_functionsCoordinator.updateConfig(configToSet); + + FunctionsBillingConfig memory config = s_functionsCoordinator.getConfig(); + assertEq(config.feedStalenessSeconds, configToSet.feedStalenessSeconds); + assertEq(config.gasOverheadAfterCallback, configToSet.gasOverheadAfterCallback); + assertEq(config.gasOverheadBeforeCallback, configToSet.gasOverheadBeforeCallback); + assertEq(config.requestTimeoutSeconds, configToSet.requestTimeoutSeconds); + assertEq(config.donFee, configToSet.donFee); + assertEq(config.maxSupportedRequestDataVersion, configToSet.maxSupportedRequestDataVersion); + assertEq(config.fulfillmentGasPriceOverEstimationBP, configToSet.fulfillmentGasPriceOverEstimationBP); + assertEq(config.fallbackNativePerUnitLink, configToSet.fallbackNativePerUnitLink); + assertEq(config.minimumEstimateGasPriceWei, configToSet.minimumEstimateGasPriceWei); + } +} + +/// @notice #getDONFee +contract FunctionsBilling_GetDONFee is FunctionsRouterSetup { + function test_GetDONFee_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint72 donFee = s_functionsCoordinator.getDONFee(new bytes(0)); + assertEq(donFee, s_donFee); + } +} + +/// @notice #getAdminFee +contract FunctionsBilling_GetAdminFee is FunctionsRouterSetup { + function test_GetAdminFee_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint72 adminFee = s_functionsCoordinator.getAdminFee(); + assertEq(adminFee, s_adminFee); + } +} + +/// @notice #getWeiPerUnitLink +contract FunctionsBilling_GetWeiPerUnitLink is FunctionsRouterSetup { + function test_GetWeiPerUnitLink_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint256 weiPerUnitLink = s_functionsCoordinator.getWeiPerUnitLink(); + assertEq(weiPerUnitLink, uint256(PLI_ETH_RATE)); + } +} + +/// @notice #estimateCost +contract FunctionsBilling_EstimateCost is FunctionsSubscriptionSetup { + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Get cost estimate as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + } + + uint256 private constant REASONABLE_GAS_PRICE_CEILING = 1_000_000_000_000_000; // 1 million gwei + + function test_EstimateCost_RevertsIfGasPriceAboveCeiling() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5_500; + uint256 gasPriceWei = REASONABLE_GAS_PRICE_CEILING + 1; + + vm.expectRevert(FunctionsBilling.InvalidCalldata.selector); + + s_functionsCoordinator.estimateCost(s_subscriptionId, requestData, callbackGasLimit, gasPriceWei); + } + + function test_EstimateCost_SuccessLowGasPrice() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5_500; + uint256 gasPriceWei = 1; + + uint96 costEstimate = s_functionsCoordinator.estimateCost( + s_subscriptionId, + requestData, + callbackGasLimit, + gasPriceWei + ); + uint96 expectedCostEstimate = 51110500000000200; + assertEq(costEstimate, expectedCostEstimate); + } + + function test_EstimateCost_Success() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5_500; + uint256 gasPriceWei = 5000000000; // 5 gwei + + uint96 costEstimate = s_functionsCoordinator.estimateCost( + s_subscriptionId, + requestData, + callbackGasLimit, + gasPriceWei + ); + uint96 expectedCostEstimate = 255552500000000200; + assertEq(costEstimate, expectedCostEstimate); + } +} + +/// @notice #_calculateCostEstimate +contract FunctionsBilling__CalculateCostEstimate { + // TODO: make contract internal function helper +} + +/// @notice #_startBilling +contract FunctionsBilling__StartBilling is FunctionsFulfillmentSetup { + function test__FulfillAndBill_HasUniqueGlobalRequestId() public { + // Variables that go into a requestId: + // - Coordinator address + // - Consumer contract + // - Subscription ID, + // - Consumer initiated requests + // - Request data + // - Request data version + // - Request callback gas limit + // - Estimated total cost in Juels + // - Request timeout timestamp + // - tx.origin + + // Request #1 has already been fulfilled by the test setup + + // Reset the nonce (initiatedRequests) by removing and re-adding the consumer + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + assertEq(s_functionsRouter.getSubscription(s_subscriptionId).consumers.length, 0); + s_functionsRouter.addConsumer(s_subscriptionId, address(s_functionsClient)); + assertEq(s_functionsRouter.getSubscription(s_subscriptionId).consumers[0], address(s_functionsClient)); + + // Make Request #2 + _sendAndStoreRequest( + 2, + s_requests[1].requestData.sourceCode, + s_requests[1].requestData.secrets, + s_requests[1].requestData.args, + s_requests[1].requestData.bytesArgs, + s_requests[1].requestData.callbackGasLimit + ); + + // Request #1 and #2 should have different request IDs, because the request timeout timestamp has advanced. + // A request cannot be fulfilled in the same block, which prevents removing a consumer in the same block + assertNotEq(s_requests[1].requestId, s_requests[2].requestId); + } +} + +/// @notice #_fulfillAndBill +contract FunctionsBilling__FulfillAndBill is FunctionsClientRequestSetup { + function test__FulfillAndBill_RevertIfInvalidCommitment() public { + vm.expectRevert(); + s_functionsCoordinator.fulfillAndBill_HARNESS( + s_requests[1].requestId, + new bytes(0), + new bytes(0), + new bytes(0), // malformed commitment data + new bytes(0), + 1 + ); + } + + event RequestBilled( + bytes32 indexed requestId, + uint96 juelsPerGas, + uint256 l1FeeShareWei, + uint96 callbackCostJuels, + uint96 totalCostJuels + ); + + function test__FulfillAndBill_Success() public { + uint96 juelsPerGas = uint96((1e18 * TX_GASPRICE_START) / uint256(PLI_ETH_RATE)); + uint96 callbackCostGas = 5072; // Taken manually + uint96 callbackCostJuels = juelsPerGas * callbackCostGas; + uint96 gasOverheadJuels = juelsPerGas * + (getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback); + + uint96 totalCostJuels = gasOverheadJuels + callbackCostJuels + s_donFee + s_adminFee; + + // topic0 (function signature, always checked), check topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit RequestBilled(s_requests[1].requestId, juelsPerGas, 0, callbackCostJuels, totalCostJuels); + + FunctionsResponse.FulfillResult resultCode = s_functionsCoordinator.fulfillAndBill_HARNESS( + s_requests[1].requestId, + new bytes(0), + new bytes(0), + abi.encode(s_requests[1].commitment), + new bytes(0), + 1 + ); + + assertEq(uint256(resultCode), uint256(FunctionsResponse.FulfillResult.FULFILLED)); + } +} + +/// @notice #deleteCommitment +contract FunctionsBilling_DeleteCommitment is FunctionsClientRequestSetup { + function test_DeleteCommitment_RevertIfNotRouter() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(Routable.OnlyCallableByRouter.selector); + s_functionsCoordinator.deleteCommitment(s_requests[1].requestId); + } + + event CommitmentDeleted(bytes32 requestId); + + function test_DeleteCommitment_Success() public { + // Send as Router + vm.stopPrank(); + vm.startPrank(address(s_functionsRouter)); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit CommitmentDeleted(s_requests[1].requestId); + + s_functionsCoordinator.deleteCommitment(s_requests[1].requestId); + } +} + +/// @notice #oracleWithdraw +contract FunctionsBilling_OracleWithdraw is FunctionsMultipleFulfillmentsSetup { + function test_OracleWithdraw_RevertWithNoBalance() public { + uint256[4] memory transmitterBalancesBefore = _getTransmitterBalances(); + _assertTransmittersAllHaveBalance(transmitterBalancesBefore, 0); + + // Send as stranger, which has no balance + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + + // Attempt to withdraw with no amount, which would withdraw the full balance + s_functionsCoordinator.oracleWithdraw(STRANGER_ADDRESS, 0); + + uint256[4] memory transmitterBalancesAfter = _getTransmitterBalances(); + _assertTransmittersAllHaveBalance(transmitterBalancesAfter, 0); + } + + function test_OracleWithdraw_RevertIfInsufficientBalance() public { + // Send as transmitter 1, which has transmitted 1 report + vm.stopPrank(); + vm.startPrank(NOP_TRANSMITTER_ADDRESS_1); + + vm.expectRevert(FunctionsBilling.InsufficientBalance.selector); + + // Attempt to withdraw more than the Coordinator has assigned + s_functionsCoordinator.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, s_fulfillmentCoordinatorBalance + 1); + } + + function test_OracleWithdraw_SuccessTransmitterWithBalanceValidAmountGiven() public { + uint256[4] memory transmitterBalancesBefore = _getTransmitterBalances(); + _assertTransmittersAllHaveBalance(transmitterBalancesBefore, 0); + + // Send as transmitter 1, which has transmitted 1 report + vm.stopPrank(); + vm.startPrank(NOP_TRANSMITTER_ADDRESS_1); + + uint96 expectedTransmitterBalance = s_fulfillmentCoordinatorBalance / 3; + + // Attempt to withdraw half of balance + uint96 halfBalance = expectedTransmitterBalance / 2; + s_functionsCoordinator.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, halfBalance); + + uint256[4] memory transmitterBalancesAfter = _getTransmitterBalances(); + assertEq(transmitterBalancesAfter[0], halfBalance); + assertEq(transmitterBalancesAfter[1], 0); + assertEq(transmitterBalancesAfter[2], 0); + assertEq(transmitterBalancesAfter[3], 0); + } + + function test_OracleWithdraw_SuccessTransmitterWithBalanceNoAmountGiven() public { + uint256[4] memory transmitterBalancesBefore = _getTransmitterBalances(); + _assertTransmittersAllHaveBalance(transmitterBalancesBefore, 0); + + // Send as transmitter 1, which has transmitted 1 report + vm.stopPrank(); + vm.startPrank(NOP_TRANSMITTER_ADDRESS_1); + + // Attempt to withdraw with no amount, which will withdraw the full balance + s_functionsCoordinator.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, 0); + + // 3 report transmissions have been made + uint96 totalDonFees = s_donFee * 3; + // 4 transmitters will share the DON fees + uint96 donFeeShare = totalDonFees / 4; + uint96 expectedTransmitterBalance = ((s_fulfillmentCoordinatorBalance - totalDonFees) / 3) + donFeeShare; + + uint256[4] memory transmitterBalancesAfter = _getTransmitterBalances(); + assertEq(transmitterBalancesAfter[0], expectedTransmitterBalance); + assertEq(transmitterBalancesAfter[1], 0); + assertEq(transmitterBalancesAfter[2], 0); + assertEq(transmitterBalancesAfter[3], 0); + } +} + +/// @notice #oracleWithdrawAll +contract FunctionsBilling_OracleWithdrawAll is FunctionsMultipleFulfillmentsSetup { + function setUp() public virtual override { + // Use no DON fee so that a transmitter has a balance of 0 + s_donFee = 0; + + FunctionsMultipleFulfillmentsSetup.setUp(); + } + + function test_OracleWithdrawAll_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsCoordinator.oracleWithdrawAll(); + } + + function test_OracleWithdrawAll_SuccessPaysTransmittersWithBalance() public { + uint256[4] memory transmitterBalancesBefore = _getTransmitterBalances(); + _assertTransmittersAllHaveBalance(transmitterBalancesBefore, 0); + + s_functionsCoordinator.oracleWithdrawAll(); + + uint96 expectedTransmitterBalance = s_fulfillmentCoordinatorBalance / 3; + + uint256[4] memory transmitterBalancesAfter = _getTransmitterBalances(); + assertEq(transmitterBalancesAfter[0], expectedTransmitterBalance); + assertEq(transmitterBalancesAfter[1], expectedTransmitterBalance); + assertEq(transmitterBalancesAfter[2], expectedTransmitterBalance); + // Transmitter 4 has no balance + assertEq(transmitterBalancesAfter[3], 0); + } +} + +/// @notice #_disperseFeePool +contract FunctionsBilling__DisperseFeePool is FunctionsRouterSetup { + function test__DisperseFeePool_RevertIfNotSet() public { + // Manually set s_feePool (at slot 11) to 1 to get past first check in _disperseFeePool + vm.store(address(s_functionsCoordinator), bytes32(uint256(11)), bytes32(uint256(1))); + + vm.expectRevert(FunctionsBilling.NoTransmittersSet.selector); + s_functionsCoordinator.disperseFeePool_HARNESS(); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsClient.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsClient.t.sol new file mode 100644 index 00000000..36382764 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsClient.t.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsClient} from "../../dev/v1_X/FunctionsClient.sol"; +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; + +import {FunctionsClientSetup, FunctionsSubscriptionSetup, FunctionsClientRequestSetup} from "./Setup.t.sol"; + +/// @notice #constructor +contract FunctionsClient_Constructor is FunctionsClientSetup { + function test_Constructor_Success() public { + assertEq(address(s_functionsRouter), s_functionsClient.getRouter_HARNESS()); + } +} + +/// @notice #_sendRequest +contract FunctionsClient__SendRequest is FunctionsSubscriptionSetup { + function test__SendRequest_RevertIfInvalidCallbackGasLimit() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint8 MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + bytes32 subscriptionFlags = s_functionsRouter.getFlags(s_subscriptionId); + uint8 callbackGasLimitsIndexSelector = uint8(subscriptionFlags[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + uint32[] memory _maxCallbackGasLimits = config.maxCallbackGasLimits; + uint32 maxCallbackGasLimit = _maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.GasLimitTooBig.selector, maxCallbackGasLimit)); + s_functionsClient.sendRequestBytes(requestData, s_subscriptionId, 500_000, s_donId); + } +} + +/// @notice #handleOracleFulfillment +contract FunctionsClient_HandleOracleFulfillment is FunctionsClientRequestSetup { + function test_HandleOracleFulfillment_RevertIfNotRouter() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsClient.OnlyRouterCanFulfill.selector); + s_functionsClient.handleOracleFulfillment(s_requests[1].requestId, new bytes(0), new bytes(0)); + } + + event RequestFulfilled(bytes32 indexed id); + event ResponseReceived(bytes32 indexed requestId, bytes result, bytes err); + + function test_HandleOracleFulfillment_Success() public { + // Send as Router + vm.stopPrank(); + vm.startPrank(address(s_functionsRouter)); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ResponseReceived(s_requests[1].requestId, new bytes(0), new bytes(0)); + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit RequestFulfilled(s_requests[1].requestId); + + s_functionsClient.handleOracleFulfillment(s_requests[1].requestId, new bytes(0), new bytes(0)); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol new file mode 100644 index 00000000..c21a2c09 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsCoordinator.t.sol @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsCoordinator} from "../../dev/v1_X/FunctionsCoordinator.sol"; +import {FunctionsBilling} from "../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {Routable} from "../../dev/v1_X/Routable.sol"; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsRouterSetup, FunctionsDONSetup, FunctionsSubscriptionSetup} from "./Setup.t.sol"; + +/// @notice #constructor +contract FunctionsCoordinator_Constructor is FunctionsRouterSetup { + function test_Constructor_Success() public { + assertEq(s_functionsCoordinator.typeAndVersion(), "Functions Coordinator v1.2.0"); + assertEq(s_functionsCoordinator.owner(), OWNER_ADDRESS); + } +} + +/// @notice #getThresholdPublicKey +contract FunctionsCoordinator_GetThresholdPublicKey is FunctionsDONSetup { + function test_GetThresholdPublicKey_RevertIfEmpty() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Reverts when empty + vm.expectRevert(FunctionsCoordinator.EmptyPublicKey.selector); + s_functionsCoordinator.getThresholdPublicKey(); + } + + function test_GetThresholdPublicKey_Success() public { + s_functionsCoordinator.setThresholdPublicKey(s_thresholdKey); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes memory thresholdKey = s_functionsCoordinator.getThresholdPublicKey(); + assertEq(thresholdKey, s_thresholdKey); + } +} + +/// @notice #setThresholdPublicKey +contract FunctionsCoordinator_SetThresholdPublicKey is FunctionsDONSetup { + function test_SetThresholdPublicKey_RevertNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + bytes memory newThresholdKey = new bytes(0); + s_functionsCoordinator.setThresholdPublicKey(newThresholdKey); + } + + function test_SetThresholdPublicKey_Success() public { + s_functionsCoordinator.setThresholdPublicKey(s_thresholdKey); + + bytes memory thresholdKey = s_functionsCoordinator.getThresholdPublicKey(); + + assertEq(thresholdKey, s_thresholdKey); + } +} + +/// @notice #getDONPublicKey +contract FunctionsCoordinator_GetDONPublicKey is FunctionsDONSetup { + function test_GetDONPublicKey_RevertIfEmpty() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Reverts when empty + vm.expectRevert(FunctionsCoordinator.EmptyPublicKey.selector); + s_functionsCoordinator.getDONPublicKey(); + } + + function test_GetDONPublicKey_Success() public { + s_functionsCoordinator.setDONPublicKey(s_donKey); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes memory donKey = s_functionsCoordinator.getDONPublicKey(); + assertEq(donKey, s_donKey); + } +} + +/// @notice #setDONPublicKey +contract FunctionsCoordinator_SetDONPublicKey is FunctionsDONSetup { + function test_SetDONPublicKey_RevertNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsCoordinator.setDONPublicKey(s_donKey); + } + + function test_SetDONPublicKey_Success() public { + s_functionsCoordinator.setDONPublicKey(s_donKey); + + bytes memory donKey = s_functionsCoordinator.getDONPublicKey(); + assertEq(donKey, s_donKey); + } +} + +/// @notice #_isTransmitter +contract FunctionsCoordinator__IsTransmitter is FunctionsDONSetup { + function test__IsTransmitter_SuccessFound() public { + bool isTransmitter = s_functionsCoordinator.isTransmitter_HARNESS(NOP_TRANSMITTER_ADDRESS_1); + assertEq(isTransmitter, true); + } + + function test__IsTransmitter_SuccessNotFound() public { + bool isTransmitter = s_functionsCoordinator.isTransmitter_HARNESS(STRANGER_ADDRESS); + assertEq(isTransmitter, false); + } +} + +/// @notice #startRequest +contract FunctionsCoordinator_StartRequest is FunctionsSubscriptionSetup { + function test_StartRequest_RevertIfNotRouter() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(Routable.OnlyCallableByRouter.selector); + + s_functionsCoordinator.startRequest( + FunctionsResponse.RequestMeta({ + requestingContract: address(s_functionsClient), + data: new bytes(0), + subscriptionId: s_subscriptionId, + dataVersion: FunctionsRequest.REQUEST_DATA_VERSION, + flags: bytes32(0), + callbackGasLimit: 5_500, + adminFee: s_adminFee, + initiatedRequests: 0, + completedRequests: 0, + availableBalance: s_subscriptionInitialFunding, + subscriptionOwner: OWNER_ADDRESS + }) + ); + } + + event OracleRequest( + bytes32 indexed requestId, + address indexed requestingContract, + address requestInitiator, + uint64 subscriptionId, + address subscriptionOwner, + bytes data, + uint16 dataVersion, + bytes32 flags, + uint64 callbackGasLimit, + FunctionsResponse.Commitment commitment + ); + + function test_StartRequest_Success() public { + // Send as Router + vm.stopPrank(); + vm.startPrank(address(s_functionsRouter)); + (, , address txOrigin) = vm.readCallers(); + + bytes memory _requestData = new bytes(0); + uint32 _callbackGasLimit = 5_500; + uint96 costEstimate = s_functionsCoordinator.estimateCost( + s_subscriptionId, + _requestData, + _callbackGasLimit, + tx.gasprice + ); + uint32 timeoutTimestamp = uint32(block.timestamp + getCoordinatorConfig().requestTimeoutSeconds); + bytes32 expectedRequestId = keccak256( + abi.encode( + address(s_functionsCoordinator), + address(s_functionsClient), + s_subscriptionId, + 1, + keccak256(_requestData), + FunctionsRequest.REQUEST_DATA_VERSION, + _callbackGasLimit, + costEstimate, + timeoutTimestamp, + txOrigin + ) + ); + + FunctionsResponse.Commitment memory expectedComittment = FunctionsResponse.Commitment({ + adminFee: s_adminFee, + coordinator: address(s_functionsCoordinator), + client: address(s_functionsClient), + subscriptionId: s_subscriptionId, + callbackGasLimit: _callbackGasLimit, + estimatedTotalCostJuels: costEstimate, + timeoutTimestamp: timeoutTimestamp, + requestId: expectedRequestId, + donFee: s_donFee, + gasOverheadBeforeCallback: getCoordinatorConfig().gasOverheadBeforeCallback, + gasOverheadAfterCallback: getCoordinatorConfig().gasOverheadAfterCallback + }); + + // topic0 (function signature, always checked), topic1 (true), topic2 (true), NOT topic3 (false), and data (true). + vm.expectEmit(true, true, false, true); + emit OracleRequest({ + requestId: expectedRequestId, + requestingContract: address(s_functionsClient), + requestInitiator: txOrigin, + subscriptionId: s_subscriptionId, + subscriptionOwner: OWNER_ADDRESS, + data: _requestData, + dataVersion: FunctionsRequest.REQUEST_DATA_VERSION, + flags: bytes32(0), + callbackGasLimit: _callbackGasLimit, + commitment: expectedComittment + }); + + s_functionsCoordinator.startRequest( + FunctionsResponse.RequestMeta({ + requestingContract: address(s_functionsClient), + data: _requestData, + subscriptionId: s_subscriptionId, + dataVersion: FunctionsRequest.REQUEST_DATA_VERSION, + flags: bytes32(0), + callbackGasLimit: 5_500, + adminFee: s_adminFee, + initiatedRequests: 0, + completedRequests: 0, + availableBalance: s_subscriptionInitialFunding, + subscriptionOwner: OWNER_ADDRESS + }) + ); + } +} + +/// @notice #_beforeSetConfig +contract FunctionsCoordinator__BeforeSetConfig { + // TODO: make contract internal function helper +} + +/// @notice #_getTransmitters +contract FunctionsCoordinator__GetTransmitters { + // TODO: make contract internal function helper +} + +/// @notice #_report +contract FunctionsCoordinator__Report { + // TODO: make contract internal function helper +} + +/// @notice #_onlyOwner +contract FunctionsCoordinator__OnlyOwner { + // TODO: make contract internal function helper +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol new file mode 100644 index 00000000..e9684d9f --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRequest.t.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; + +import {Test} from "forge-std/Test.sol"; + +/// @notice #REQUEST_DATA_VERSION +contract FunctionsRequest_REQUEST_DATA_VERSION is Test { + function test_REQUEST_DATA_VERSION() public { + // Exposes REQUEST_DATA_VERSION + assertEq(FunctionsRequest.REQUEST_DATA_VERSION, 1); + } +} + +/// @notice #DEFAULT_BUFFER_SIZE +contract FunctionsRequest_DEFAULT_BUFFER_SIZE is Test { + function test_DEFAULT_BUFFER_SIZE() public { + // Exposes DEFAULT_BUFFER_SIZE + assertEq(FunctionsRequest.DEFAULT_BUFFER_SIZE, 256); + } +} + +/// @notice #encodeCBOR +contract FunctionsRequest_EncodeCBOR is Test { + function test_EncodeCBOR_Success() public { + // Exposes DEFAULT_BUFFER_SIZE + assertEq(FunctionsRequest.DEFAULT_BUFFER_SIZE, 256); + } +} + +/// @notice #initializeRequest +contract FunctionsRequest_InitializeRequest is Test {} + +/// @notice #initializeRequestForInlineJavaScript +contract FunctionsRequest_InitializeRequestForInlineJavaScript is Test {} + +/// @notice #addSecretsReference +contract FunctionsRequest_AddSecretsReference is Test {} + +/// @notice #addDONHostedSecrets +contract FunctionsRequest_AddDONHostedSecrets is Test {} + +/// @notice #setArgs +contract FunctionsRequest_SetArgs is Test {} + +/// @notice #setBytesArgs +contract FunctionsRequest_SetBytesArgs is Test {} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsRouter.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRouter.t.sol new file mode 100644 index 00000000..8f12dfda --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsRouter.t.sol @@ -0,0 +1,1585 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {FunctionsCoordinator} from "../../dev/v1_X/FunctionsCoordinator.sol"; +import {FunctionsBilling} from "../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {FunctionsCoordinatorTestHelper} from "./testhelpers/FunctionsCoordinatorTestHelper.sol"; +import {FunctionsClientTestHelper} from "./testhelpers/FunctionsClientTestHelper.sol"; + +import {FunctionsRouterSetup, FunctionsRoutesSetup, FunctionsSubscriptionSetup, FunctionsClientRequestSetup} from "./Setup.t.sol"; + +import "forge-std/Vm.sol"; + +// ================================================================ +// | Functions Router | +// ================================================================ + +/// @notice #constructor +contract FunctionsRouter_Constructor is FunctionsRouterSetup { + function test_Constructor_Success() public { + assertEq(s_functionsRouter.typeAndVersion(), "Functions Router v2.0.0"); + assertEq(s_functionsRouter.owner(), OWNER_ADDRESS); + } +} + +/// @notice #getConfig +contract FunctionsRouter_GetConfig is FunctionsRouterSetup { + function test_GetConfig_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + assertEq(config.maxConsumersPerSubscription, getRouterConfig().maxConsumersPerSubscription); + assertEq(config.adminFee, getRouterConfig().adminFee); + assertEq(config.handleOracleFulfillmentSelector, getRouterConfig().handleOracleFulfillmentSelector); + assertEq(config.maxCallbackGasLimits[0], getRouterConfig().maxCallbackGasLimits[0]); + assertEq(config.maxCallbackGasLimits[1], getRouterConfig().maxCallbackGasLimits[1]); + assertEq(config.maxCallbackGasLimits[2], getRouterConfig().maxCallbackGasLimits[2]); + assertEq(config.gasForCallExactCheck, getRouterConfig().gasForCallExactCheck); + assertEq(config.subscriptionDepositMinimumRequests, getRouterConfig().subscriptionDepositMinimumRequests); + assertEq(config.subscriptionDepositJuels, getRouterConfig().subscriptionDepositJuels); + } +} + +/// @notice #updateConfig +contract FunctionsRouter_UpdateConfig is FunctionsRouterSetup { + FunctionsRouter.Config internal configToSet; + + function setUp() public virtual override { + FunctionsRouterSetup.setUp(); + + uint32[] memory maxCallbackGasLimits = new uint32[](4); + maxCallbackGasLimits[0] = 300_000; + maxCallbackGasLimits[1] = 500_000; + maxCallbackGasLimits[2] = 1_000_000; + maxCallbackGasLimits[3] = 3_000_000; + + configToSet = FunctionsRouter.Config({ + maxConsumersPerSubscription: s_maxConsumersPerSubscription, + adminFee: s_adminFee, + handleOracleFulfillmentSelector: s_handleOracleFulfillmentSelector, + maxCallbackGasLimits: maxCallbackGasLimits, + gasForCallExactCheck: 5000, + subscriptionDepositMinimumRequests: 10, + subscriptionDepositJuels: 5 * 1e18 + }); + } + + function test_UpdateConfig_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.updateConfig(configToSet); + } + + event ConfigUpdated(FunctionsRouter.Config config); + + function test_UpdateConfig_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ConfigUpdated(configToSet); + + s_functionsRouter.updateConfig(configToSet); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + assertEq(config.maxConsumersPerSubscription, configToSet.maxConsumersPerSubscription); + assertEq(config.adminFee, configToSet.adminFee); + assertEq(config.handleOracleFulfillmentSelector, configToSet.handleOracleFulfillmentSelector); + assertEq(config.maxCallbackGasLimits[0], configToSet.maxCallbackGasLimits[0]); + assertEq(config.maxCallbackGasLimits[1], configToSet.maxCallbackGasLimits[1]); + assertEq(config.maxCallbackGasLimits[2], configToSet.maxCallbackGasLimits[2]); + assertEq(config.maxCallbackGasLimits[3], configToSet.maxCallbackGasLimits[3]); + assertEq(config.gasForCallExactCheck, configToSet.gasForCallExactCheck); + } +} + +/// @notice #isValidCallbackGasLimit +contract FunctionsRouter_IsValidCallbackGasLimit is FunctionsSubscriptionSetup { + function test_IsValidCallbackGasLimit_RevertInvalidConfig() public { + // Set an invalid maxCallbackGasLimit flag + bytes32 flagsToSet = 0x5a00000000000000000000000000000000000000000000000000000000000000; + s_functionsRouter.setFlags(s_subscriptionId, flagsToSet); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.InvalidGasFlagValue.selector, 90)); + s_functionsRouter.isValidCallbackGasLimit(s_subscriptionId, 0); + } + + function test_IsValidCallbackGasLimit_RevertGasLimitTooBig() public { + uint8 MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + bytes32 subscriptionFlags = s_functionsRouter.getFlags(s_subscriptionId); + uint8 callbackGasLimitsIndexSelector = uint8(subscriptionFlags[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + uint32[] memory _maxCallbackGasLimits = config.maxCallbackGasLimits; + uint32 maxCallbackGasLimit = _maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.GasLimitTooBig.selector, maxCallbackGasLimit)); + s_functionsRouter.isValidCallbackGasLimit(s_subscriptionId, maxCallbackGasLimit + 1); + } + + function test_IsValidCallbackGasLimit_Success() public view { + uint8 MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + bytes32 subscriptionFlags = s_functionsRouter.getFlags(s_subscriptionId); + uint8 callbackGasLimitsIndexSelector = uint8(subscriptionFlags[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + uint32[] memory _maxCallbackGasLimits = config.maxCallbackGasLimits; + uint32 maxCallbackGasLimit = _maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + + s_functionsRouter.isValidCallbackGasLimit(s_subscriptionId, maxCallbackGasLimit); + } +} + +/// @notice #getAdminFee +contract FunctionsRouter_GetAdminFee is FunctionsRouterSetup { + function test_GetAdminFee_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint72 adminFee = s_functionsRouter.getAdminFee(); + assertEq(adminFee, getRouterConfig().adminFee); + } +} + +/// @notice #getAllowListId +contract FunctionsRouter_GetAllowListId is FunctionsRouterSetup { + function test_GetAllowListId_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 defaultAllowListId = bytes32(0); + + bytes32 allowListId = s_functionsRouter.getAllowListId(); + assertEq(allowListId, defaultAllowListId); + } +} + +/// @notice #setAllowListId +contract FunctionsRouter_SetAllowListId is FunctionsRouterSetup { + function test_UpdateConfig_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 routeIdToSet = bytes32("allowList"); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.setAllowListId(routeIdToSet); + } + + function test_SetAllowListId_Success() public { + bytes32 routeIdToSet = bytes32("allowList"); + s_functionsRouter.setAllowListId(routeIdToSet); + bytes32 allowListId = s_functionsRouter.getAllowListId(); + assertEq(allowListId, routeIdToSet); + } +} + +/// @notice #_getMaxConsumers +contract FunctionsRouter__GetMaxConsumers is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #sendRequest +contract FunctionsRouter_SendRequest is FunctionsSubscriptionSetup { + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Add sending wallet as a subscription consumer + s_functionsRouter.addConsumer(s_subscriptionId, OWNER_ADDRESS); + } + + function test_SendRequest_RevertIfInvalidDonId() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + bytes32 invalidDonId = bytes32("this does not exist"); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.RouteNotFound.selector, invalidDonId)); + s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + invalidDonId + ); + } + + function test_SendRequest_RevertIfIncorrectDonId() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + bytes32 incorrectDonId = s_functionsRouter.getAllowListId(); + + // Low level revert from incorrect call + vm.expectRevert(); + s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + incorrectDonId + ); + } + + function test_SendRequest_RevertIfPaused() public { + s_functionsRouter.pause(); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.sendRequest(s_subscriptionId, requestData, FunctionsRequest.REQUEST_DATA_VERSION, 5000, s_donId); + } + + function test_SendRequest_RevertIfNoSubscription() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint64 invalidSubscriptionId = 123456789; + + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.sendRequest( + invalidSubscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5000, + s_donId + ); + } + + function test_SendRequest_RevertIfConsumerNotAllowed() public { + // Remove sending wallet as a subscription consumer + s_functionsRouter.removeConsumer(s_subscriptionId, OWNER_ADDRESS); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + vm.expectRevert(FunctionsSubscriptions.InvalidConsumer.selector); + s_functionsRouter.sendRequest(s_subscriptionId, requestData, FunctionsRequest.REQUEST_DATA_VERSION, 5000, s_donId); + } + + function test_SendRequest_RevertIfInvalidCallbackGasLimit() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint8 MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + bytes32 subscriptionFlags = s_functionsRouter.getFlags(s_subscriptionId); + uint8 callbackGasLimitsIndexSelector = uint8(subscriptionFlags[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + uint32[] memory _maxCallbackGasLimits = config.maxCallbackGasLimits; + uint32 maxCallbackGasLimit = _maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.GasLimitTooBig.selector, maxCallbackGasLimit)); + s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 500_000, + s_donId + ); + } + + function test_SendRequest_RevertIfEmptyData() public { + // Build invalid request data + bytes memory emptyRequestData = new bytes(0); + + vm.expectRevert(FunctionsRouter.EmptyRequestData.selector); + s_functionsRouter.sendRequest( + s_subscriptionId, + emptyRequestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + s_donId + ); + } + + function test_SendRequest_RevertIfInsufficientSubscriptionBalance() public { + // Create new subscription that does not have any funding + uint64 subscriptionId = s_functionsRouter.createSubscription(); + s_functionsRouter.addConsumer(subscriptionId, address(OWNER_ADDRESS)); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5000; + vm.expectRevert(FunctionsBilling.InsufficientBalance.selector); + + s_functionsRouter.sendRequest( + subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + } + + function test_SendRequest_RevertIfDuplicateRequestId() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + uint32 callbackGasLimit = 5_000; + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + // Send a first request that will remain pending + bytes32 requestId = s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + + // Mock the Coordinator to always give back the first requestId + FunctionsResponse.Commitment memory mockCommitment = FunctionsResponse.Commitment({ + adminFee: s_adminFee, + coordinator: address(s_functionsCoordinator), + client: OWNER_ADDRESS, + subscriptionId: s_subscriptionId, + callbackGasLimit: callbackGasLimit, + estimatedTotalCostJuels: 0, + timeoutTimestamp: uint32(block.timestamp + getCoordinatorConfig().requestTimeoutSeconds), + requestId: requestId, + donFee: s_donFee, + gasOverheadBeforeCallback: getCoordinatorConfig().gasOverheadBeforeCallback, + gasOverheadAfterCallback: getCoordinatorConfig().gasOverheadAfterCallback + }); + + vm.mockCall( + address(s_functionsCoordinator), + abi.encodeWithSelector(FunctionsCoordinator.startRequest.selector), + abi.encode(mockCommitment) + ); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.DuplicateRequestId.selector, requestId)); + s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + } + + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + + function test_SendRequest_Success() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5000; + + uint96 costEstimate = s_functionsCoordinator.estimateCost( + s_subscriptionId, + requestData, + callbackGasLimit, + tx.gasprice + ); + + vm.recordLogs(); + + bytes32 requestIdFromReturn = s_functionsRouter.sendRequest( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + + // Get requestId from RequestStart event log topic 1 + Vm.Log[] memory entries = vm.getRecordedLogs(); + bytes32 requestIdFromEvent = entries[1].topics[1]; + bytes32 donIdFromEvent = entries[1].topics[2]; + bytes32 subscriptionIdFromEvent = entries[1].topics[3]; + + bytes memory expectedRequestData = abi.encode( + OWNER_ADDRESS, + OWNER_ADDRESS, + OWNER_ADDRESS, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + costEstimate + ); + + assertEq(requestIdFromReturn, requestIdFromEvent); + assertEq(donIdFromEvent, s_donId); + assertEq(subscriptionIdFromEvent, bytes32(uint256(s_subscriptionId))); + assertEq(expectedRequestData, entries[1].data); + } +} + +/// @notice #sendRequestToProposed +contract FunctionsRouter_SendRequestToProposed is FunctionsSubscriptionSetup { + FunctionsCoordinatorTestHelper internal s_functionsCoordinator2; // TODO: use actual FunctionsCoordinator instead of helper + + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Add sending wallet as a subscription consumer + s_functionsRouter.addConsumer(s_subscriptionId, OWNER_ADDRESS); + + // Deploy new Coordinator contract + s_functionsCoordinator2 = new FunctionsCoordinatorTestHelper( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + + // Propose new Coordinator contract + bytes32[] memory proposedContractSetIds = new bytes32[](1); + proposedContractSetIds[0] = s_donId; + address[] memory proposedContractSetAddresses = new address[](1); + proposedContractSetAddresses[0] = address(s_functionsCoordinator2); + + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + function test_SendRequestToProposed_RevertIfInvalidDonId() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + bytes32 invalidDonId = bytes32("this does not exist"); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.RouteNotFound.selector, invalidDonId)); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + invalidDonId + ); + } + + function test_SendRequestToProposed_RevertIfIncorrectDonId() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + bytes32 incorrectDonId = s_functionsRouter.getAllowListId(); + + // Low level revert from incorrect call + vm.expectRevert(); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + incorrectDonId + ); + } + + function test_SendRequestToProposed_RevertIfPaused() public { + s_functionsRouter.pause(); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5000, + s_donId + ); + } + + function test_SendRequestToProposed_RevertIfNoSubscription() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint64 invalidSubscriptionId = 123456789; + + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.sendRequestToProposed( + invalidSubscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5000, + s_donId + ); + } + + function test_SendRequestToProposed_RevertIfConsumerNotAllowed() public { + // Remove sending wallet as a subscription consumer + s_functionsRouter.removeConsumer(s_subscriptionId, OWNER_ADDRESS); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + vm.expectRevert(FunctionsSubscriptions.InvalidConsumer.selector); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5000, + s_donId + ); + } + + function test_SendRequestToProposed_RevertIfInvalidCallbackGasLimit() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint8 MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + bytes32 subscriptionFlags = s_functionsRouter.getFlags(s_subscriptionId); + uint8 callbackGasLimitsIndexSelector = uint8(subscriptionFlags[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + + FunctionsRouter.Config memory config = s_functionsRouter.getConfig(); + uint32[] memory _maxCallbackGasLimits = config.maxCallbackGasLimits; + uint32 maxCallbackGasLimit = _maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.GasLimitTooBig.selector, maxCallbackGasLimit)); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 500_000, + s_donId + ); + } + + function test_SendRequestToProposed_RevertIfEmptyData() public { + // Build invalid request data + bytes memory emptyRequestData = new bytes(0); + + vm.expectRevert(FunctionsRouter.EmptyRequestData.selector); + s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + emptyRequestData, + FunctionsRequest.REQUEST_DATA_VERSION, + 5_000, + s_donId + ); + } + + function test_SendRequest_RevertIfInsufficientSubscriptionBalance() public { + // Create new subscription that does not have any funding + uint64 subscriptionId = s_functionsRouter.createSubscription(); + s_functionsRouter.addConsumer(subscriptionId, address(OWNER_ADDRESS)); + + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5000; + vm.expectRevert(FunctionsBilling.InsufficientBalance.selector); + + s_functionsRouter.sendRequestToProposed( + subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + } + + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + + function test_SendRequestToProposed_Success() public { + // Build minimal valid request data + string memory sourceCode = "return 'hello world';"; + FunctionsRequest.Request memory request; + FunctionsRequest._initializeRequest( + request, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + sourceCode + ); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + + uint32 callbackGasLimit = 5000; + + uint96 costEstimate = s_functionsCoordinator2.estimateCost( + s_subscriptionId, + requestData, + callbackGasLimit, + tx.gasprice + ); + + vm.recordLogs(); + + bytes32 requestIdFromReturn = s_functionsRouter.sendRequestToProposed( + s_subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + s_donId + ); + + // Get requestId from RequestStart event log topic 1 + Vm.Log[] memory entries = vm.getRecordedLogs(); + bytes32 requestIdFromEvent = entries[1].topics[1]; + bytes32 donIdFromEvent = entries[1].topics[2]; + bytes32 subscriptionIdFromEvent = entries[1].topics[3]; + + bytes memory expectedRequestData = abi.encode( + OWNER_ADDRESS, + OWNER_ADDRESS, + OWNER_ADDRESS, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + costEstimate + ); + + assertEq(requestIdFromReturn, requestIdFromEvent); + assertEq(donIdFromEvent, s_donId); + assertEq(subscriptionIdFromEvent, bytes32(uint256(s_subscriptionId))); + assertEq(expectedRequestData, entries[1].data); + } +} + +/// @notice #_sendRequest +contract FunctionsRouter__SendRequest is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #fulfill +contract FunctionsRouter_Fulfill is FunctionsClientRequestSetup { + function test_Fulfill_RevertIfPaused() public { + s_functionsRouter.pause(); + + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + vm.expectRevert("Pausable: paused"); + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, false); + } + + function test_Fulfill_RevertIfNotCommittedCoordinator() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes memory response = bytes("hello world!"); + bytes memory err = new bytes(0); + uint96 juelsPerGas = 0; + uint96 costWithoutCallback = 0; + address transmitter = NOP_TRANSMITTER_ADDRESS_1; + FunctionsResponse.Commitment memory commitment = s_requests[1].commitment; + + vm.expectRevert(FunctionsRouter.OnlyCallableFromCoordinator.selector); + s_functionsRouter.fulfill(response, err, juelsPerGas, costWithoutCallback, transmitter, commitment); + } + + event RequestNotProcessed( + bytes32 indexed requestId, + address coordinator, + address transmitter, + FunctionsResponse.FulfillResult resultCode + ); + + function test_Fulfill_RequestNotProcessedInvalidRequestId() public { + // Send as committed Coordinator + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + bytes memory response = bytes("hello world!"); + bytes memory err = new bytes(0); + uint96 juelsPerGas = 0; + uint96 costWithoutCallback = 0; + address transmitter = NOP_TRANSMITTER_ADDRESS_1; + FunctionsResponse.Commitment memory commitment = s_requests[1].commitment; + // Modify request commitment to have a invalid requestId + bytes32 invalidRequestId = bytes32("this does not exist"); + commitment.requestId = invalidRequestId; + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit RequestNotProcessed({ + requestId: s_requests[1].requestId, + coordinator: address(s_functionsCoordinator), + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.INVALID_REQUEST_ID + }); + + (FunctionsResponse.FulfillResult resultCode, uint96 callbackGasCostJuels) = s_functionsRouter.fulfill( + response, + err, + juelsPerGas, + costWithoutCallback, + transmitter, + commitment + ); + + assertEq(uint(resultCode), uint(FunctionsResponse.FulfillResult.INVALID_REQUEST_ID)); + assertEq(callbackGasCostJuels, 0); + } + + function test_Fulfill_RequestNotProcessedInvalidCommitment() public { + // Send as committed Coordinator + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + bytes memory response = bytes("hello world!"); + bytes memory err = new bytes(0); + uint96 juelsPerGas = 0; + uint96 costWithoutCallback = 0; + address transmitter = NOP_TRANSMITTER_ADDRESS_1; + FunctionsResponse.Commitment memory commitment = s_requests[1].commitment; + // Modify request commitment to have charge more than quoted + commitment.estimatedTotalCostJuels = 10 * JUELS_PER_PLI; // 10 PLI + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit RequestNotProcessed({ + requestId: s_requests[1].requestId, + coordinator: address(s_functionsCoordinator), + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.INVALID_COMMITMENT + }); + + (FunctionsResponse.FulfillResult resultCode, uint96 callbackGasCostJuels) = s_functionsRouter.fulfill( + response, + err, + juelsPerGas, + costWithoutCallback, + transmitter, + commitment + ); + + assertEq(uint(resultCode), uint(FunctionsResponse.FulfillResult.INVALID_COMMITMENT)); + assertEq(callbackGasCostJuels, 0); + } + + function test_Fulfill_RequestNotProcessedInsufficientGas() public { + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + uint32 callbackGasLimit = s_requests[requestToFulfill].requestData.callbackGasLimit; + // Coordinator sends enough gas that would get through callback and payment, but fail after + uint256 gasToUse = getCoordinatorConfig().gasOverheadBeforeCallback + callbackGasLimit + 10_000; + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2, checkTopic3, checkData); + emit RequestNotProcessed({ + requestId: s_requests[requestToFulfill].requestId, + coordinator: address(s_functionsCoordinator), + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.INSUFFICIENT_GAS_PROVIDED + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, false, 1, gasToUse); + } + + function test_Fulfill_RequestNotProcessedSubscriptionBalanceInvariant() public { + // Find the storage slot that the Subscription is on + vm.record(); + s_functionsRouter.getSubscription(s_subscriptionId); + (bytes32[] memory reads, ) = vm.accesses(address(s_functionsRouter)); + // The first read is from '_isExistingSubscription' which checks Subscription.owner on slot 0 + // Slot 0 is shared with the Subscription.balance + uint256 slot = uint256(reads[0]); + + // The request has already been initiated, forcibly lower the subscription's balance by clearing out slot 0 + uint96 balance = 1; + address owner = address(0); + bytes32 data = bytes32(abi.encodePacked(balance, owner)); // TODO: make this more accurate + vm.store(address(s_functionsRouter), bytes32(uint256(slot)), data); + + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2, checkTopic3, checkData); + emit RequestNotProcessed({ + requestId: s_requests[requestToFulfill].requestId, + coordinator: address(s_functionsCoordinator), + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.SUBSCRIPTION_BALANCE_INVARIANT_VIOLATION + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, false); + } + + function test_Fulfill_RequestNotProcessedCostExceedsCommitment() public { + // Use higher juelsPerGas than request time + // 10x the gas price + vm.txGasPrice(TX_GASPRICE_START * 10); + + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2, checkTopic3, checkData); + emit RequestNotProcessed({ + requestId: s_requests[requestToFulfill].requestId, + coordinator: address(s_functionsCoordinator), + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.COST_EXCEEDS_COMMITMENT + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, false); + } + + event RequestProcessed( + bytes32 indexed requestId, + uint64 indexed subscriptionId, + uint96 totalCostJuels, + address transmitter, + FunctionsResponse.FulfillResult resultCode, + bytes response, + bytes err, + bytes callbackReturnData + ); + + FunctionsClientTestHelper internal s_clientWithFailingCallback; + + function test_Fulfill_SuccessUserCallbackReverts() public { + // Deploy Client with failing callback + s_clientWithFailingCallback = new FunctionsClientTestHelper(address(s_functionsRouter)); + s_clientWithFailingCallback.setRevertFulfillRequest(true); + + // Add Client as a subscription consumer + s_functionsRouter.addConsumer(s_subscriptionId, address(s_clientWithFailingCallback)); + + // Send a minimal request + uint256 requestKey = 99; + + string memory sourceCode = "return 'hello world';"; + uint32 callbackGasLimit = 5500; + + vm.recordLogs(); + bytes32 requestId = s_clientWithFailingCallback.sendSimpleRequestWithJavaScript( + sourceCode, + s_subscriptionId, + s_donId, + callbackGasLimit + ); + + // Get commitment data from OracleRequest event log + Vm.Log[] memory entries = vm.getRecordedLogs(); + (, , , , , , , FunctionsResponse.Commitment memory _commitment) = abi.decode( + entries[0].data, + (address, uint64, address, bytes, uint16, bytes32, uint64, FunctionsResponse.Commitment) + ); + + s_requests[requestKey] = Request({ + requestData: RequestData({ + sourceCode: sourceCode, + secrets: new bytes(0), + args: new string[](0), + bytesArgs: new bytes[](0), + callbackGasLimit: callbackGasLimit + }), + requestId: requestId, + commitment: _commitment + }); + + // Fulfill + uint256 requestToFulfill = requestKey; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2, checkTopic3, checkData); + emit RequestProcessed({ + requestId: requestId, + subscriptionId: s_subscriptionId, + totalCostJuels: _getExpectedCost(1822), // gasUsed is manually taken + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR, + response: bytes(response), + err: err, + callbackReturnData: vm.parseBytes( + "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000f61736b656420746f207265766572740000000000000000000000000000000000" + ) // TODO: build this programatically + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, true, 1); + } + + function test_Fulfill_SuccessUserCallbackRunsOutOfGas() public { + // Send request #2 with no callback gas + string memory sourceCode = "return 'hello world';"; + bytes memory secrets = new bytes(0); + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + uint32 callbackGasLimit = 0; + _sendAndStoreRequest(2, sourceCode, secrets, args, bytesArgs, callbackGasLimit); + + uint256 requestToFulfill = 2; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1: request ID(true), NOT topic2 (false), NOT topic3 (false), and data (true). + vm.expectEmit(true, false, false, true); + emit RequestProcessed({ + requestId: s_requests[requestToFulfill].requestId, + subscriptionId: s_subscriptionId, + totalCostJuels: _getExpectedCost(137), // gasUsed is manually taken + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR, + response: bytes(response), + err: err, + callbackReturnData: new bytes(0) + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, true, 1); + } + + function test_Fulfill_SuccessClientNoLongerExists() public { + // Delete the Client contract in the time between request and fulfillment + vm.etch(address(s_functionsClient), new bytes(0)); + + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1 (true), topic2 (true), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = true; + bool checkTopic2SubscriptionId = true; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2SubscriptionId, checkTopic3, checkData); + emit RequestProcessed({ + requestId: s_requests[requestToFulfill].requestId, + subscriptionId: s_subscriptionId, + totalCostJuels: _getExpectedCost(0), // gasUsed is manually taken + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR, + response: bytes(response), + err: err, + callbackReturnData: new bytes(0) + }); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, true, 1); + } + + function test_Fulfill_SuccessFulfilled() public { + // Fulfill request 1 + uint256 requestToFulfill = 1; + + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestToFulfill; + string[] memory results = new string[](1); + string memory response = "hello world!"; + results[0] = response; + bytes[] memory errors = new bytes[](1); + bytes memory err = new bytes(0); + errors[0] = err; + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1RequestId = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1RequestId, checkTopic2, checkTopic3, checkData); + emit RequestProcessed({ + requestId: s_requests[requestToFulfill].requestId, + subscriptionId: s_subscriptionId, + totalCostJuels: _getExpectedCost(5416), // gasUsed is manually taken + transmitter: NOP_TRANSMITTER_ADDRESS_1, + resultCode: FunctionsResponse.FulfillResult.FULFILLED, + response: bytes(response), + err: err, + callbackReturnData: new bytes(0) + }); + _reportAndStore(requestNumberKeys, results, errors); + } +} + +/// @notice #_callback +contract FunctionsRouter__Callback is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #getContractById +contract FunctionsRouter_GetContractById is FunctionsRoutesSetup { + function test_GetContractById_RevertIfRouteDoesNotExist() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 invalidRouteId = bytes32("this does not exist"); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.RouteNotFound.selector, invalidRouteId)); + s_functionsRouter.getContractById(invalidRouteId); + } + + function test_GetContractById_SuccessIfRouteExists() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + address routeDestination = s_functionsRouter.getContractById(s_donId); + assertEq(routeDestination, address(s_functionsCoordinator)); + } +} + +/// @notice #getProposedContractById +contract FunctionsRouter_GetProposedContractById is FunctionsRoutesSetup { + FunctionsCoordinatorTestHelper internal s_functionsCoordinator2; // TODO: use actual FunctionsCoordinator instead of helper + + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + // Deploy new Coordinator contract + s_functionsCoordinator2 = new FunctionsCoordinatorTestHelper( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + + // Propose new Coordinator contract + bytes32[] memory proposedContractSetIds = new bytes32[](1); + proposedContractSetIds[0] = s_donId; + address[] memory proposedContractSetAddresses = new address[](1); + proposedContractSetAddresses[0] = address(s_functionsCoordinator2); + + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + function test_GetProposedContractById_RevertIfRouteDoesNotExist() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 invalidRouteId = bytes32("this does not exist"); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.RouteNotFound.selector, invalidRouteId)); + s_functionsRouter.getProposedContractById(invalidRouteId); + } + + function test_GetProposedContractById_SuccessIfRouteExists() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + address routeDestination = s_functionsRouter.getProposedContractById(s_donId); + assertEq(routeDestination, address(s_functionsCoordinator2)); + } +} + +/// @notice #getProposedContractSet +contract FunctionsRouter_GetProposedContractSet is FunctionsRoutesSetup { + FunctionsCoordinatorTestHelper internal s_functionsCoordinator2; // TODO: use actual FunctionsCoordinator instead of helper + bytes32[] s_proposedContractSetIds; + address[] s_proposedContractSetAddresses; + + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + // Deploy new Coordinator contract + s_functionsCoordinator2 = new FunctionsCoordinatorTestHelper( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + + // Propose new Coordinator contract + s_proposedContractSetIds = new bytes32[](1); + s_proposedContractSetIds[0] = s_donId; + s_proposedContractSetAddresses = new address[](1); + s_proposedContractSetAddresses[0] = address(s_functionsCoordinator2); + + s_functionsRouter.proposeContractsUpdate(s_proposedContractSetIds, s_proposedContractSetAddresses); + } + + function test_GetProposedContractSet_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + (bytes32[] memory proposedContractSetIds, address[] memory proposedContractSetAddresses) = s_functionsRouter + .getProposedContractSet(); + + assertEq(proposedContractSetIds.length, 1); + assertEq(proposedContractSetIds[0], s_donId); + assertEq(proposedContractSetIds.length, 1); + assertEq(proposedContractSetAddresses[0], address(s_functionsCoordinator2)); + } +} + +/// @notice #proposeContractsUpdate +contract FunctionsRouter_ProposeContractsUpdate is FunctionsRoutesSetup { + FunctionsCoordinatorTestHelper internal s_functionsCoordinator2; // TODO: use actual FunctionsCoordinator instead of helper + bytes32[] s_proposedContractSetIds; + address[] s_proposedContractSetAddresses; + + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + // Deploy new Coordinator contract + s_functionsCoordinator2 = new FunctionsCoordinatorTestHelper( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + + // Propose new Coordinator contract + s_proposedContractSetIds = new bytes32[](1); + s_proposedContractSetIds[0] = s_donId; + s_proposedContractSetAddresses = new address[](1); + s_proposedContractSetAddresses[0] = address(s_functionsCoordinator2); + } + + function test_ProposeContractsUpdate_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.proposeContractsUpdate(s_proposedContractSetIds, s_proposedContractSetAddresses); + } + + function test_ProposeContractsUpdate_RevertIfLengthMismatch() public { + bytes32[] memory proposedContractSetIds = new bytes32[](1); + proposedContractSetIds[0] = s_donId; + address[] memory proposedContractSetAddresses = new address[](1); + + vm.expectRevert(FunctionsRouter.InvalidProposal.selector); + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + function test_ProposeContractsUpdate_RevertIfExceedsMaxProposal() public { + uint8 MAX_PROPOSAL_SET_LENGTH = 8; + uint8 INVALID_PROPOSAL_SET_LENGTH = MAX_PROPOSAL_SET_LENGTH + 1; + + // Generate some mock data + bytes32[] memory proposedContractSetIds = new bytes32[](INVALID_PROPOSAL_SET_LENGTH); + for (uint256 i = 0; i < INVALID_PROPOSAL_SET_LENGTH; ++i) { + proposedContractSetIds[i] = bytes32(uint256(i + 111)); + } + address[] memory proposedContractSetAddresses = new address[](INVALID_PROPOSAL_SET_LENGTH); + for (uint256 i = 0; i < INVALID_PROPOSAL_SET_LENGTH; ++i) { + proposedContractSetAddresses[i] = address(uint160(uint(keccak256(abi.encodePacked(i + 111))))); + } + + vm.expectRevert(FunctionsRouter.InvalidProposal.selector); + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + function test_ProposeContractsUpdate_RevertIfEmptyAddress() public { + bytes32[] memory proposedContractSetIds = new bytes32[](1); + proposedContractSetIds[0] = s_donId; + address[] memory proposedContractSetAddresses = new address[](1); + proposedContractSetAddresses[0] = address(0); + + vm.expectRevert(FunctionsRouter.InvalidProposal.selector); + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + function test_ProposeContractsUpdate_RevertIfNotNewContract() public { + bytes32[] memory proposedContractSetIds = new bytes32[](1); + proposedContractSetIds[0] = s_donId; + address[] memory proposedContractSetAddresses = new address[](1); + proposedContractSetAddresses[0] = address(s_functionsCoordinator); + + vm.expectRevert(FunctionsRouter.InvalidProposal.selector); + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + } + + event ContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ); + + function test_ProposeContractsUpdate_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ContractProposed({ + proposedContractSetId: s_proposedContractSetIds[0], + proposedContractSetFromAddress: address(s_functionsCoordinator), + proposedContractSetToAddress: s_proposedContractSetAddresses[0] + }); + + s_functionsRouter.proposeContractsUpdate(s_proposedContractSetIds, s_proposedContractSetAddresses); + } +} + +/// @notice #updateContracts +contract FunctionsRouter_UpdateContracts is FunctionsRoutesSetup { + FunctionsCoordinatorTestHelper internal s_functionsCoordinator2; // TODO: use actual FunctionsCoordinator instead of helper + bytes32[] s_proposedContractSetIds; + address[] s_proposedContractSetAddresses; + + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + // Deploy new Coordinator contract + s_functionsCoordinator2 = new FunctionsCoordinatorTestHelper( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + + // Propose new Coordinator contract + s_proposedContractSetIds = new bytes32[](1); + s_proposedContractSetIds[0] = s_donId; + s_proposedContractSetAddresses = new address[](1); + s_proposedContractSetAddresses[0] = address(s_functionsCoordinator2); + + s_functionsRouter.proposeContractsUpdate(s_proposedContractSetIds, s_proposedContractSetAddresses); + } + + function test_UpdateContracts_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.updateContracts(); + } + + event ContractUpdated(bytes32 id, address from, address to); + + function test_UpdateContracts_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ContractUpdated({ + id: s_proposedContractSetIds[0], + from: address(s_functionsCoordinator), + to: s_proposedContractSetAddresses[0] + }); + + s_functionsRouter.updateContracts(); + + (bytes32[] memory proposedContractSetIds, address[] memory proposedContractSetAddresses) = s_functionsRouter + .getProposedContractSet(); + + assertEq(proposedContractSetIds.length, 0); + assertEq(proposedContractSetAddresses.length, 0); + } +} + +/// @notice #_whenNotPaused +contract FunctionsRouter__WhenNotPaused is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #_onlyRouterOwner +contract FunctionsRouter__OnlyRouterOwner is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #_onlySenderThatAcceptedToS +contract FunctionsRouter__OnlySenderThatAcceptedToS is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #pause +contract FunctionsRouter_Pause is FunctionsRouterSetup { + function setUp() public virtual override { + FunctionsRouterSetup.setUp(); + } + + event Paused(address account); + + function test_Pause_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.pause(); + } + + function test_Pause_Success() public { + // topic0 (always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + vm.expectEmit(false, false, false, true); + emit Paused(OWNER_ADDRESS); + + s_functionsRouter.pause(); + + bool isPaused = s_functionsRouter.paused(); + assertEq(isPaused, true); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.createSubscription(); + } +} + +/// @notice #unpause +contract FunctionsRouter_Unpause is FunctionsRouterSetup { + function setUp() public virtual override { + FunctionsRouterSetup.setUp(); + s_functionsRouter.pause(); + } + + event Unpaused(address account); + + function test_Unpause_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.unpause(); + } + + function test_Unpause_Success() public { + // topic0 (always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + vm.expectEmit(false, false, false, true); + emit Unpaused(OWNER_ADDRESS); + + s_functionsRouter.unpause(); + + bool isPaused = s_functionsRouter.paused(); + assertEq(isPaused, false); + + s_functionsRouter.createSubscription(); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol new file mode 100644 index 00000000..1cc9c292 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsSubscriptions.t.sol @@ -0,0 +1,1291 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; + +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; + +import {FunctionsRouterSetup, FunctionsOwnerAcceptTermsOfServiceSetup, FunctionsClientSetup, FunctionsSubscriptionSetup, FunctionsClientRequestSetup, FunctionsFulfillmentSetup} from "./Setup.t.sol"; + +import "forge-std/Vm.sol"; + +// ================================================================ +// | Functions Subscriptions | +// ================================================================ + +contract FunctionsSubscriptions_Constructor_Helper is FunctionsSubscriptions { + constructor(address link) FunctionsSubscriptions(link) {} + + function getLinkToken() public view returns (IERC20) { + return IERC20(i_linkToken); + } + + // overrides + function _getMaxConsumers() internal pure override returns (uint16) { + return 0; + } + + function _getSubscriptionDepositDetails() internal pure override returns (uint16, uint72) { + return (0, 0); + } + + function _onlySenderThatAcceptedToS() internal override {} + + function _onlyRouterOwner() internal override {} + + function _whenNotPaused() internal override {} +} + +/// @notice #constructor +contract FunctionsSubscriptions_Constructor is BaseTest { + FunctionsSubscriptions_Constructor_Helper s_subscriptionsHelper; + address internal s_linkToken = 0x01BE23585060835E02B77ef475b0Cc51aA1e0709; + + function setUp() public virtual override { + BaseTest.setUp(); + s_subscriptionsHelper = new FunctionsSubscriptions_Constructor_Helper(s_linkToken); + } + + function test_Constructor_Success() public { + assertEq(address(s_linkToken), address(s_subscriptionsHelper.getLinkToken())); + } +} + +/// @notice #_markRequestInFlight +contract FunctionsSubscriptions__MarkRequestInFlight { + // TODO: make contract internal function helper +} + +/// @notice #_pay +contract FunctionsSubscriptions__Pay { + // TODO: make contract internal function helper +} + +/// @notice #ownerCancelSubscription +contract FunctionsSubscriptions_OwnerCancelSubscription is FunctionsSubscriptionSetup { + function test_OwnerCancelSubscription_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.ownerCancelSubscription(s_subscriptionId); + } + + function test_OwnerCancelSubscription_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_functionsRouter.ownerCancelSubscription(invalidSubscriptionId); + } + + function test_OwnerCancelSubscription_SuccessSubOwnerRefunded() public { + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + s_functionsRouter.ownerCancelSubscription(s_subscriptionId); + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + s_subscriptionInitialFunding, subscriptionOwnerBalanceAfter); + } + + function test_OwnerCancelSubscription_SuccessWhenRequestInFlight() public { + // send request + string memory sourceCode = "return 'hello world';"; + bytes memory secrets; + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + + s_functionsClient.sendRequest(s_donId, sourceCode, secrets, args, bytesArgs, s_subscriptionId, 5500); + s_functionsRouter.ownerCancelSubscription(s_subscriptionId); + } + + function test_OwnerCancelSubscription_SuccessDeletesSubscription() public { + s_functionsRouter.ownerCancelSubscription(s_subscriptionId); + // Subscription should no longer exist + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.getSubscription(s_subscriptionId); + } + + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + + function test_OwnerCancelSubscription_Success() public { + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1SubscriptionId = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1SubscriptionId, checkTopic2, checkTopic3, checkData); + emit SubscriptionCanceled(s_subscriptionId, OWNER_ADDRESS, s_subscriptionInitialFunding); + + s_functionsRouter.ownerCancelSubscription(s_subscriptionId); + + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + s_subscriptionInitialFunding, subscriptionOwnerBalanceAfter); + } +} + +/// @notice #recoverFunds +contract FunctionsSubscriptions_RecoverFunds is FunctionsRouterSetup { + event FundsRecovered(address to, uint256 amount); + + function test_RecoverFunds_Success() public { + uint256 fundsTransferred = 1 * 1e18; // 1 PLI + s_linkToken.transfer(address(s_functionsRouter), fundsTransferred); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit FundsRecovered(OWNER_ADDRESS, fundsTransferred); + + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + s_functionsRouter.recoverFunds(OWNER_ADDRESS); + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + fundsTransferred, subscriptionOwnerBalanceAfter); + } + + function test_OwnerCancelSubscription_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.recoverFunds(OWNER_ADDRESS); + } +} + +/// @notice #oracleWithdraw +contract FunctionsSubscriptions_OracleWithdraw is FunctionsFulfillmentSetup { + function test_OracleWithdraw_RevertIfPaused() public { + s_functionsRouter.pause(); + + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + vm.expectRevert("Pausable: paused"); + + uint96 amountToWithdraw = 1; // more than 0 + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + } + + function test_OracleWithdraw_RevertIfNoAmount() public { + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + + uint96 amountToWithdraw = 0; + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + } + + function test_OracleWithdraw_RevertIfAmountMoreThanBalance() public { + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + vm.expectRevert( + abi.encodeWithSelector(FunctionsSubscriptions.InsufficientBalance.selector, s_fulfillmentCoordinatorBalance) + ); + + uint96 amountToWithdraw = s_fulfillmentCoordinatorBalance + 1; + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + } + + function test_OracleWithdraw_RevertIfBalanceInvariant() public { + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + // vm.stopPrank(); + // vm.startPrank(address(s_functionsCoordinator)); + // TODO: Use internal function helper contract to modify s_totalLinkBalance + // uint96 amountToWithdraw = s_fulfillmentCoordinatorBalance; + // vm.expectRevert(abi.encodeWithSelector(FunctionsSubscriptions.TotalBalanceInvariantViolated.selector, 0, amountToWithdraw)); + // s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + } + + function test_OracleWithdraw_SuccessPaysRecipient() public { + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + uint256 transmitterBalanceBefore = s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_1); + + uint96 amountToWithdraw = s_fulfillmentCoordinatorBalance; + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + + uint256 transmitterBalanceAfter = s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_1); + assertEq(transmitterBalanceBefore + s_fulfillmentCoordinatorBalance, transmitterBalanceAfter); + } + + function test_OracleWithdraw_SuccessSetsBalanceToZero() public { + // Subscription payable balances are set to the Coordinator + // Send as Coordinator contract + vm.stopPrank(); + vm.startPrank(address(s_functionsCoordinator)); + + uint96 amountToWithdraw = s_fulfillmentCoordinatorBalance; + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, amountToWithdraw); + + // Attempt to withdraw 1 Juel after withdrawing full balance + vm.expectRevert(abi.encodeWithSelector(FunctionsSubscriptions.InsufficientBalance.selector, 0)); + s_functionsRouter.oracleWithdraw(NOP_TRANSMITTER_ADDRESS_1, 1); + } +} + +/// @notice #ownerWithdraw +contract FunctionsSubscriptions_OwnerWithdraw is FunctionsFulfillmentSetup { + function test_OwnerWithdraw_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_functionsRouter.recoverFunds(OWNER_ADDRESS); + } + + function test_OwnerWithdraw_RevertIfAmountMoreThanBalance() public { + vm.expectRevert( + abi.encodeWithSelector(FunctionsSubscriptions.InsufficientBalance.selector, s_fulfillmentRouterOwnerBalance) + ); + + uint96 amountToWithdraw = s_fulfillmentRouterOwnerBalance + 1; + s_functionsRouter.ownerWithdraw(OWNER_ADDRESS, amountToWithdraw); + } + + function test_OwnerWithdraw_RevertIfBalanceInvariant() public { + // TODO: Use internal function helper contract to modify s_totalLinkBalance + // uint96 amountToWithdraw = s_fulfillmentRouterOwnerBalance; + // vm.expectRevert(abi.encodeWithSelector(FunctionsSubscriptions.TotalBalanceInvariantViolated.selector, 0, amountToWithdraw)); + // s_functionsRouter.ownerWithdraw(OWNER_ADDRESS, amountToWithdraw); + } + + function test_OwnerWithdraw_SuccessIfRecipientAddressZero() public { + uint256 balanceBefore = s_linkToken.balanceOf(address(0)); + uint96 amountToWithdraw = s_fulfillmentRouterOwnerBalance; + s_functionsRouter.ownerWithdraw(address(0), amountToWithdraw); + uint256 balanceAfter = s_linkToken.balanceOf(address(0)); + assertEq(balanceBefore + s_fulfillmentRouterOwnerBalance, balanceAfter); + } + + function test_OwnerWithdraw_SuccessIfNoAmount() public { + uint256 balanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + uint96 amountToWithdraw = 0; + s_functionsRouter.ownerWithdraw(OWNER_ADDRESS, amountToWithdraw); + uint256 balanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(balanceBefore + s_fulfillmentRouterOwnerBalance, balanceAfter); + } + + function test_OwnerWithdraw_SuccessPaysRecipient() public { + uint256 balanceBefore = s_linkToken.balanceOf(STRANGER_ADDRESS); + + uint96 amountToWithdraw = s_fulfillmentRouterOwnerBalance; + s_functionsRouter.ownerWithdraw(STRANGER_ADDRESS, amountToWithdraw); + + uint256 balanceAfter = s_linkToken.balanceOf(STRANGER_ADDRESS); + assertEq(balanceBefore + s_fulfillmentRouterOwnerBalance, balanceAfter); + } + + function test_OwnerWithdraw_SuccessSetsBalanceToZero() public { + uint96 amountToWithdraw = s_fulfillmentRouterOwnerBalance; + s_functionsRouter.ownerWithdraw(OWNER_ADDRESS, amountToWithdraw); + + // Attempt to withdraw 1 Juel after withdrawing full balance + vm.expectRevert(abi.encodeWithSelector(FunctionsSubscriptions.InsufficientBalance.selector, 0)); + s_functionsRouter.ownerWithdraw(OWNER_ADDRESS, 1); + } +} + +/// @notice #onTokenTransfer +contract FunctionsSubscriptions_OnTokenTransfer is FunctionsClientSetup { + uint64 s_subscriptionId; + + function setUp() public virtual override { + FunctionsClientSetup.setUp(); + + // Create subscription, but do not fund it + s_subscriptionId = s_functionsRouter.createSubscription(); + s_functionsRouter.addConsumer(s_subscriptionId, address(s_functionsClient)); + } + + function test_OnTokenTransfer_RevertIfPaused() public { + // Funding amount must be less than or equal to PLI total supply + uint256 totalSupplyJuels = 1_000_000_000 * 1e18; + s_functionsRouter.pause(); + vm.expectRevert("Pausable: paused"); + s_linkToken.transferAndCall(address(s_functionsRouter), totalSupplyJuels, abi.encode(s_subscriptionId)); + } + + function test_OnTokenTransfer_RevertIfCallerIsNotLink() public { + // Funding amount must be less than or equal to PLI total supply + uint256 totalSupplyJuels = 1_000_000_000 * 1e18; + vm.expectRevert(FunctionsSubscriptions.OnlyCallableFromLink.selector); + s_functionsRouter.onTokenTransfer(address(s_functionsRouter), totalSupplyJuels, abi.encode(s_subscriptionId)); + } + + function test_OnTokenTransfer_RevertIfCallerIsNoCalldata() public { + // Funding amount must be less than or equal to PLI total supply + uint256 totalSupplyJuels = 1_000_000_000 * 1e18; + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + s_linkToken.transferAndCall(address(s_functionsRouter), totalSupplyJuels, new bytes(0)); + } + + function test_OnTokenTransfer_RevertIfCallerIsNoSubscription() public { + // Funding amount must be less than or equal to PLI total supply + uint256 totalSupplyJuels = 1_000_000_000 * 1e18; + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_linkToken.transferAndCall(address(s_functionsRouter), totalSupplyJuels, abi.encode(invalidSubscriptionId)); + } + + function test_OnTokenTransfer_Success() public { + // Funding amount must be less than PLI total supply + uint256 totalSupplyJuels = 1_000_000_000 * 1e18; + // Some of the total supply is already in the subscription account + s_linkToken.transferAndCall(address(s_functionsRouter), totalSupplyJuels, abi.encode(s_subscriptionId)); + uint96 subscriptionBalanceAfter = s_functionsRouter.getSubscription(s_subscriptionId).balance; + assertEq(totalSupplyJuels, subscriptionBalanceAfter); + } +} + +/// @notice #getTotalBalance +contract FunctionsSubscriptions_GetTotalBalance is FunctionsSubscriptionSetup { + function test_GetTotalBalance_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint96 totalBalance = s_functionsRouter.getTotalBalance(); + assertEq(totalBalance, s_subscriptionInitialFunding); + } +} + +/// @notice #getSubscriptionCount +contract FunctionsSubscriptions_GetSubscriptionCount is FunctionsSubscriptionSetup { + function test_GetSubscriptionCount_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint96 subscriptionCount = s_functionsRouter.getSubscriptionCount(); + // One subscription was made during setup + assertEq(subscriptionCount, 1); + } +} + +/// @notice #getSubscriptionsInRange +contract FunctionsSubscriptions_GetSubscriptionsInRange is FunctionsSubscriptionSetup { + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Create 2 more subscriptions + /* uint64 subscriptionId2 = */ s_functionsRouter.createSubscription(); + uint64 subscriptionId3 = s_functionsRouter.createSubscription(); + + // Give each one unique state + // #1 subscriptionId for requests, #2 empty, #3 proposedOwner of stranger + s_functionsRouter.proposeSubscriptionOwnerTransfer(subscriptionId3, STRANGER_ADDRESS); + } + + function test_GetSubscriptionsInRange_RevertIfStartIsAfterEnd() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + + s_functionsRouter.getSubscriptionsInRange(1, 0); + } + + function test_GetSubscriptionsInRange_RevertIfEndIsAfterLastSubscription() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 lastSubscriptionId = s_functionsRouter.getSubscriptionCount(); + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + s_functionsRouter.getSubscriptionsInRange(1, lastSubscriptionId + 1); + } + + function test_GetSubscriptionsInRange_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 lastSubscriptionId = s_functionsRouter.getSubscriptionCount(); + FunctionsSubscriptions.Subscription[] memory subscriptions = s_functionsRouter.getSubscriptionsInRange( + s_subscriptionId, + lastSubscriptionId + ); + + assertEq(subscriptions.length, 3); + + // Check subscription 1 + assertEq(subscriptions[0].balance, s_subscriptionInitialFunding); + assertEq(subscriptions[0].owner, OWNER_ADDRESS); + assertEq(subscriptions[0].blockedBalance, 0); + assertEq(subscriptions[0].proposedOwner, address(0)); + assertEq(subscriptions[0].consumers[0], address(s_functionsClient)); + assertEq(subscriptions[0].flags, bytes32(0)); + + // Check subscription 2 + assertEq(subscriptions[1].balance, 0); + assertEq(subscriptions[1].owner, OWNER_ADDRESS); + assertEq(subscriptions[1].blockedBalance, 0); + assertEq(subscriptions[1].proposedOwner, address(0)); + assertEq(subscriptions[1].consumers.length, 0); + assertEq(subscriptions[1].flags, bytes32(0)); + + // Check subscription 3 + assertEq(subscriptions[2].balance, 0); + assertEq(subscriptions[2].owner, OWNER_ADDRESS); + assertEq(subscriptions[2].blockedBalance, 0); + assertEq(subscriptions[2].proposedOwner, address(STRANGER_ADDRESS)); + assertEq(subscriptions[2].consumers.length, 0); + assertEq(subscriptions[2].flags, bytes32(0)); + } +} + +/// @notice #getSubscription +contract FunctionsSubscriptions_GetSubscription is FunctionsSubscriptionSetup { + function test_GetSubscription_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + FunctionsSubscriptions.Subscription memory subscription = s_functionsRouter.getSubscription(s_subscriptionId); + + assertEq(subscription.balance, s_subscriptionInitialFunding); + assertEq(subscription.owner, OWNER_ADDRESS); + assertEq(subscription.blockedBalance, 0); + assertEq(subscription.proposedOwner, address(0)); + assertEq(subscription.consumers[0], address(s_functionsClient)); + assertEq(subscription.flags, bytes32(0)); + } +} + +/// @notice #getConsumer +contract FunctionsSubscriptions_GetConsumer is FunctionsSubscriptionSetup { + function test_GetConsumer_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + FunctionsSubscriptions.Consumer memory consumer = s_functionsRouter.getConsumer( + address(s_functionsClient), + s_subscriptionId + ); + + assertEq(consumer.allowed, true); + assertEq(consumer.initiatedRequests, 0); + assertEq(consumer.completedRequests, 0); + } +} + +/// @notice #_isExistingSubscription +contract FunctionsSubscriptions__IsExistingSubscription is FunctionsSubscriptionSetup { + // TODO: make contract internal function helper +} + +/// @notice #_isAllowedConsumer +contract FunctionsSubscriptions__IsAllowedConsumer { + // TODO: make contract internal function helper +} + +/// @notice #createSubscription +contract FunctionsSubscriptions_createSubscription is FunctionsOwnerAcceptTermsOfServiceSetup { + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + + function test_CreateSubscription_Success() public { + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(1, OWNER_ADDRESS); + uint64 firstCallSubscriptionId = s_functionsRouter.createSubscription(); + assertEq(firstCallSubscriptionId, 1); + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(2, OWNER_ADDRESS); + uint64 secondCallSubscriptionId = s_functionsRouter.createSubscription(); + assertEq(secondCallSubscriptionId, 2); + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(3, OWNER_ADDRESS); + uint64 thirdCallSubscriptionId = s_functionsRouter.createSubscription(); + assertEq(thirdCallSubscriptionId, 3); + } + + function test_CreateSubscription_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.createSubscription(); + } + + function test_CreateSubscription_RevertIfNotAllowedSender() public { + // Send as stranger, who has not accepted Terms of Service + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, STRANGER_ADDRESS)); + s_functionsRouter.createSubscription(); + } +} + +/// @notice #createSubscriptionWithConsumer +contract FunctionsSubscriptions_CreateSubscriptionWithConsumer is FunctionsClientSetup { + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + + function test_CreateSubscriptionWithConsumer_Success() public { + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(1, OWNER_ADDRESS); + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionConsumerAdded(1, address(s_functionsClient)); + uint64 firstCallSubscriptionId = s_functionsRouter.createSubscriptionWithConsumer(address(s_functionsClient)); + assertEq(firstCallSubscriptionId, 1); + assertEq(s_functionsRouter.getSubscription(firstCallSubscriptionId).consumers[0], address(s_functionsClient)); + + // Consumer can be address(0) + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(2, OWNER_ADDRESS); + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionConsumerAdded(2, address(0)); + uint64 secondCallSubscriptionId = s_functionsRouter.createSubscriptionWithConsumer(address(0)); + assertEq(secondCallSubscriptionId, 2); + assertEq(s_functionsRouter.getSubscription(secondCallSubscriptionId).consumers[0], address(0)); + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCreated(3, OWNER_ADDRESS); + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionConsumerAdded(3, address(s_functionsClient)); + uint64 thirdCallSubscriptionId = s_functionsRouter.createSubscriptionWithConsumer(address(s_functionsClient)); + assertEq(thirdCallSubscriptionId, 3); + assertEq(s_functionsRouter.getSubscription(thirdCallSubscriptionId).consumers[0], address(s_functionsClient)); + } + + function test_CreateSubscriptionWithConsumer_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.createSubscriptionWithConsumer(address(s_functionsClient)); + } + + function test_CreateSubscriptionWithConsumer_RevertIfNotAllowedSender() public { + // Send as stranger, who has not accepted Terms of Service + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, STRANGER_ADDRESS)); + s_functionsRouter.createSubscriptionWithConsumer(address(s_functionsClient)); + } +} + +/// @notice #proposeSubscriptionOwnerTransfer +contract FunctionsSubscriptions_ProposeSubscriptionOwnerTransfer is FunctionsSubscriptionSetup { + uint256 internal NEW_OWNER_PRIVATE_KEY_WITH_TOS = 0x3; + address internal NEW_OWNER_ADDRESS_WITH_TOS = vm.addr(NEW_OWNER_PRIVATE_KEY_WITH_TOS); + uint256 internal NEW_OWNER_PRIVATE_KEY_WITH_TOS2 = 0x4; + address internal NEW_OWNER_ADDRESS_WITH_TOS2 = vm.addr(NEW_OWNER_PRIVATE_KEY_WITH_TOS2); + uint256 internal NEW_OWNER_PRIVATE_KEY_WITHOUT_TOS = 0x5; + address internal NEW_OWNER_ADDRESS_WITHOUT_TOS = vm.addr(NEW_OWNER_PRIVATE_KEY_WITHOUT_TOS); + + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Accept ToS as new owner #1 + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + bytes32 message = s_termsOfServiceAllowList.getMessage(NEW_OWNER_ADDRESS_WITH_TOS, NEW_OWNER_ADDRESS_WITH_TOS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(NEW_OWNER_ADDRESS_WITH_TOS, NEW_OWNER_ADDRESS_WITH_TOS, r, s, v); + + // Accept ToS as new owner #2 + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS2); + bytes32 message2 = s_termsOfServiceAllowList.getMessage(NEW_OWNER_ADDRESS_WITH_TOS2, NEW_OWNER_ADDRESS_WITH_TOS2); + bytes32 prefixedMessage2 = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message2)); + (uint8 v2, bytes32 r2, bytes32 s2) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage2); + s_termsOfServiceAllowList.acceptTermsOfService( + NEW_OWNER_ADDRESS_WITH_TOS2, + NEW_OWNER_ADDRESS_WITH_TOS2, + r2, + s2, + v2 + ); + + vm.stopPrank(); + vm.startPrank(OWNER_ADDRESS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_functionsRouter.proposeSubscriptionOwnerTransfer(invalidSubscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfNotSubscriptionOwner() public { + // Send as non-owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + + vm.expectRevert(FunctionsSubscriptions.MustBeSubscriptionOwner.selector); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfNotAllowedSender() public { + // Remove owner from Allow List + s_termsOfServiceAllowList.blockSender(OWNER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, OWNER_ADDRESS)); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfEmptyNewOwner() public { + address EMPTY_ADDRESS = address(0); + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, EMPTY_ADDRESS); + } + + function test_ProposeSubscriptionOwnerTransfer_RevertIfInvalidNewOwner() public { + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + } + + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId, address from, address to); + + function test_ProposeSubscriptionOwnerTransfer_Success() public { + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionOwnerTransferRequested(s_subscriptionId, OWNER_ADDRESS, NEW_OWNER_ADDRESS_WITH_TOS); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + assertEq(s_functionsRouter.getSubscription(s_subscriptionId).proposedOwner, NEW_OWNER_ADDRESS_WITH_TOS); + } + + function test_ProposeSubscriptionOwnerTransfer_SuccessChangeProposedOwner() public { + // topic0 (function signature, always checked), topic1 (true), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = true; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionOwnerTransferRequested(s_subscriptionId, OWNER_ADDRESS, NEW_OWNER_ADDRESS_WITH_TOS); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + assertEq(s_functionsRouter.getSubscription(s_subscriptionId).proposedOwner, NEW_OWNER_ADDRESS_WITH_TOS); + + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionOwnerTransferRequested(s_subscriptionId, OWNER_ADDRESS, NEW_OWNER_ADDRESS_WITH_TOS2); + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS2); + assertEq(s_functionsRouter.getSubscription(s_subscriptionId).proposedOwner, NEW_OWNER_ADDRESS_WITH_TOS2); + } +} + +/// @notice #acceptSubscriptionOwnerTransfer +contract FunctionsSubscriptions_AcceptSubscriptionOwnerTransfer is FunctionsSubscriptionSetup { + uint256 internal NEW_OWNER_PRIVATE_KEY_WITH_TOS = 0x3; + address internal NEW_OWNER_ADDRESS_WITH_TOS = vm.addr(NEW_OWNER_PRIVATE_KEY_WITH_TOS); + uint256 internal NEW_OWNER_PRIVATE_KEY_WITHOUT_TOS = 0x4; + address internal NEW_OWNER_ADDRESS_WITHOUT_TOS = vm.addr(NEW_OWNER_PRIVATE_KEY_WITHOUT_TOS); + + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Accept ToS as new owner + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + bytes32 message = s_termsOfServiceAllowList.getMessage(NEW_OWNER_ADDRESS_WITH_TOS, NEW_OWNER_ADDRESS_WITH_TOS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(NEW_OWNER_ADDRESS_WITH_TOS, NEW_OWNER_ADDRESS_WITH_TOS, r, s, v); + + vm.stopPrank(); + vm.startPrank(OWNER_ADDRESS); + } + + function test_AcceptSubscriptionOwnerTransfer_RevertIfPaused() public { + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + s_functionsRouter.pause(); + + // Send as new owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.acceptSubscriptionOwnerTransfer(s_subscriptionId); + } + + function test_AcceptSubscriptionOwnerTransfer_RevertIfNotAllowedSender() public { + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITHOUT_TOS); + + // Send as new owner, who has NOT accepted Terms of Service + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITHOUT_TOS); + + vm.expectRevert( + abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, NEW_OWNER_ADDRESS_WITHOUT_TOS) + ); + s_functionsRouter.acceptSubscriptionOwnerTransfer(s_subscriptionId); + } + + function test_AcceptSubscriptionOwnerTransfer_RevertIfSenderBecomesBlocked() public { + // Propose an address that is allowed to accept ownership + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + bool hasAccess = s_termsOfServiceAllowList.hasAccess(NEW_OWNER_ADDRESS_WITH_TOS, new bytes(0)); + assertEq(hasAccess, true); + + // Revoke access + s_termsOfServiceAllowList.blockSender(NEW_OWNER_ADDRESS_WITH_TOS); + + // Send as blocked address + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + + vm.expectRevert( + abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, NEW_OWNER_ADDRESS_WITH_TOS) + ); + s_functionsRouter.acceptSubscriptionOwnerTransfer(s_subscriptionId); + } + + function test_AcceptSubscriptionOwnerTransfer_RevertIfSenderIsNotNewOwner() public { + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, STRANGER_ADDRESS); + + // Send as someone who is not hte proposed new owner + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsSubscriptions.MustBeProposedOwner.selector, STRANGER_ADDRESS)); + s_functionsRouter.acceptSubscriptionOwnerTransfer(s_subscriptionId); + } + + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId, address from, address to); + + function test_AcceptSubscriptionOwnerTransfer_Success() public { + // Can transfer ownership with a pending request + string memory sourceCode = "return 'hello world';"; + bytes memory secrets; + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + s_functionsClient.sendRequest(s_donId, sourceCode, secrets, args, bytesArgs, s_subscriptionId, 5500); + + s_functionsRouter.proposeSubscriptionOwnerTransfer(s_subscriptionId, NEW_OWNER_ADDRESS_WITH_TOS); + + // Send as new owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(NEW_OWNER_ADDRESS_WITH_TOS); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionOwnerTransferred(s_subscriptionId, OWNER_ADDRESS, NEW_OWNER_ADDRESS_WITH_TOS); + + s_functionsRouter.acceptSubscriptionOwnerTransfer(s_subscriptionId); + + FunctionsSubscriptions.Subscription memory subscription = s_functionsRouter.getSubscription(s_subscriptionId); + assertEq(subscription.owner, NEW_OWNER_ADDRESS_WITH_TOS); + assertEq(subscription.proposedOwner, address(0)); + } +} + +/// @notice #removeConsumer +contract FunctionsSubscriptions_RemoveConsumer is FunctionsSubscriptionSetup { + function test_RemoveConsumer_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + } + + function test_RemoveConsumer_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_functionsRouter.removeConsumer(invalidSubscriptionId, address(s_functionsClient)); + } + + function test_RemoveConsumer_RevertIfNotSubscriptionOwner() public { + // Accept ToS as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + + // Send as non-subscription owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsSubscriptions.MustBeSubscriptionOwner.selector); + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + } + + function test_RemoveConsumer_RevertIfNotAllowedSender() public { + // Remove owner from Allow List + s_termsOfServiceAllowList.blockSender(OWNER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, OWNER_ADDRESS)); + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + } + + function test_RemoveConsumer_RevertIfInvalidConsumer() public { + vm.expectRevert(FunctionsSubscriptions.InvalidConsumer.selector); + s_functionsRouter.removeConsumer(s_subscriptionId, address(0)); + } + + function test_RemoveConsumer_RevertIfPendingRequests() public { + // Send a minimal request + string memory sourceCode = "return 'hello world';"; + bytes memory secrets; + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + + s_functionsClient.sendRequest(s_donId, sourceCode, secrets, args, bytesArgs, s_subscriptionId, 5000); + + vm.expectRevert(FunctionsSubscriptions.CannotRemoveWithPendingRequests.selector); + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + } + + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId, address consumer); + + function test_RemoveConsumer_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionConsumerRemoved(s_subscriptionId, address(s_functionsClient)); + s_functionsRouter.removeConsumer(s_subscriptionId, address(s_functionsClient)); + + FunctionsSubscriptions.Subscription memory subscription = s_functionsRouter.getSubscription(s_subscriptionId); + assertEq(subscription.consumers, new address[](0)); + } +} + +/// @notice #_getMaxConsumers +contract FunctionsSubscriptions__GetMaxConsumers is FunctionsRouterSetup { + // TODO: make contract internal function helper +} + +/// @notice #addConsumer +contract FunctionsSubscriptions_AddConsumer is FunctionsSubscriptionSetup { + function test_AddConsumer_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + } + + function test_AddConsumer_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_functionsRouter.addConsumer(invalidSubscriptionId, address(1)); + } + + function test_AddConsumer_RevertIfNotSubscriptionOwner() public { + // Accept ToS as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + + // Send as non-subscription owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsSubscriptions.MustBeSubscriptionOwner.selector); + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + } + + function test_AddConsumer_RevertIfNotAllowedSender() public { + // Remove owner from Allow List + s_termsOfServiceAllowList.blockSender(OWNER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, OWNER_ADDRESS)); + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + } + + function test_AddConsumer_RevertIfMaximumConsumers() public { + // Fill Consumers to s_maxConsumersPerSubscription + // Already has one from setup + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + s_functionsRouter.addConsumer(s_subscriptionId, address(2)); + + vm.expectRevert( + abi.encodeWithSelector(FunctionsSubscriptions.TooManyConsumers.selector, s_maxConsumersPerSubscription) + ); + s_functionsRouter.addConsumer(s_subscriptionId, address(3)); + } + + function test_AddConsumer_RevertIfMaximumConsumersAfterConfigUpdate() public { + // Fill Consumers to s_maxConsumersPerSubscription + // Already has one from setup + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + s_functionsRouter.addConsumer(s_subscriptionId, address(2)); + + // Lower maxConsumersPerSubscription + s_maxConsumersPerSubscription = 1; + FunctionsRouter.Config memory newRouterConfig = getRouterConfig(); + s_functionsRouter.updateConfig(newRouterConfig); + + // .AddConsumer should still revert + vm.expectRevert( + abi.encodeWithSelector(FunctionsSubscriptions.TooManyConsumers.selector, s_maxConsumersPerSubscription) + ); + s_functionsRouter.addConsumer(s_subscriptionId, address(3)); + } + + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + + function test_AddConsumer_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionConsumerAdded(s_subscriptionId, address(1)); + s_functionsRouter.addConsumer(s_subscriptionId, address(1)); + + FunctionsSubscriptions.Subscription memory subscription = s_functionsRouter.getSubscription(s_subscriptionId); + assertEq(subscription.consumers[1], address(1)); + FunctionsSubscriptions.Consumer memory consumer = s_functionsRouter.getConsumer(address(1), s_subscriptionId); + assertEq(consumer.allowed, true); + } +} + +/// @notice #cancelSubscription +contract FunctionsSubscriptions_CancelSubscription is FunctionsSubscriptionSetup { + function test_CancelSubscription_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + } + + function test_CancelSubscription_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + s_functionsRouter.cancelSubscription(invalidSubscriptionId, OWNER_ADDRESS); + } + + function test_CancelSubscription_RevertIfNotSubscriptionOwner() public { + // Accept ToS as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + + // Send as non-subscription owner, who has accepted Terms of Service + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(FunctionsSubscriptions.MustBeSubscriptionOwner.selector); + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + } + + function test_CancelSubscription_RevertIfNotAllowedSender() public { + // Remove owner from Allow List + s_termsOfServiceAllowList.blockSender(OWNER_ADDRESS); + + vm.expectRevert(abi.encodeWithSelector(FunctionsRouter.SenderMustAcceptTermsOfService.selector, OWNER_ADDRESS)); + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + } + + function test_CancelSubscription_RevertIfPendingRequests() public { + // Send a minimal request + string memory sourceCode = "return 'hello world';"; + bytes memory secrets; + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + + s_functionsClient.sendRequest(s_donId, sourceCode, secrets, args, bytesArgs, s_subscriptionId, 5000); + + vm.expectRevert(FunctionsSubscriptions.CannotRemoveWithPendingRequests.selector); + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + } + + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + + function test_CancelSubscription_SuccessForfeitAllBalanceAsDeposit() public { + // No requests have been completed + assertEq(s_functionsRouter.getConsumer(address(s_functionsClient), s_subscriptionId).completedRequests, 0); + // Subscription balance is less than deposit amount + assertLe(s_functionsRouter.getSubscription(s_subscriptionId).balance, s_subscriptionDepositJuels); + + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + + uint96 expectedRefund = 0; + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCanceled(s_subscriptionId, OWNER_ADDRESS, expectedRefund); + + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + expectedRefund, subscriptionOwnerBalanceAfter); + + // Subscription should no longer exist + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.getSubscription(s_subscriptionId); + + // Router owner should have expectedDepositWithheld to withdraw + uint96 expectedDepositWithheld = s_subscriptionInitialFunding; + uint256 balanceBeforeWithdraw = s_linkToken.balanceOf(STRANGER_ADDRESS); + s_functionsRouter.ownerWithdraw(STRANGER_ADDRESS, 0); + uint256 balanceAfterWithdraw = s_linkToken.balanceOf(STRANGER_ADDRESS); + assertEq(balanceBeforeWithdraw + expectedDepositWithheld, balanceAfterWithdraw); + } + + function test_CancelSubscription_SuccessForfeitSomeBalanceAsDeposit() public { + // No requests have been completed + assertEq(s_functionsRouter.getConsumer(address(s_functionsClient), s_subscriptionId).completedRequests, 0); + // Subscription balance is more than deposit amount, double fund the subscription + s_linkToken.transferAndCall(address(s_functionsRouter), s_subscriptionInitialFunding, abi.encode(s_subscriptionId)); + assertGe(s_functionsRouter.getSubscription(s_subscriptionId).balance, s_subscriptionDepositJuels); + + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + + uint96 expectedRefund = (s_subscriptionInitialFunding * 2) - s_subscriptionDepositJuels; + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCanceled(s_subscriptionId, OWNER_ADDRESS, expectedRefund); + + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + expectedRefund, subscriptionOwnerBalanceAfter); + + // Subscription should no longer exist + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.getSubscription(s_subscriptionId); + + // Router owner should have expectedDepositWithheld to withdraw + uint96 expectedDepositWithheld = s_subscriptionDepositJuels; + uint256 balanceBeforeWithdraw = s_linkToken.balanceOf(STRANGER_ADDRESS); + s_functionsRouter.ownerWithdraw(STRANGER_ADDRESS, 0); + uint256 balanceAfterWithdraw = s_linkToken.balanceOf(STRANGER_ADDRESS); + assertEq(balanceBeforeWithdraw + expectedDepositWithheld, balanceAfterWithdraw); + } +} + +/// @notice #cancelSubscription +contract FunctionsSubscriptions_CancelSubscription_ReceiveDeposit is FunctionsFulfillmentSetup { + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + + function test_CancelSubscription_SuccessRecieveDeposit() public { + uint96 totalCostJuels = s_fulfillmentRouterOwnerBalance + s_fulfillmentCoordinatorBalance; + + uint256 subscriptionOwnerBalanceBefore = s_linkToken.balanceOf(OWNER_ADDRESS); + + uint96 expectedRefund = s_subscriptionInitialFunding - totalCostJuels; + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit SubscriptionCanceled(s_subscriptionId, OWNER_ADDRESS, expectedRefund); + + s_functionsRouter.cancelSubscription(s_subscriptionId, OWNER_ADDRESS); + + uint256 subscriptionOwnerBalanceAfter = s_linkToken.balanceOf(OWNER_ADDRESS); + assertEq(subscriptionOwnerBalanceBefore + expectedRefund, subscriptionOwnerBalanceAfter); + + // Subscription should no longer exist + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + s_functionsRouter.getSubscription(s_subscriptionId); + } +} + +/// @notice #_cancelSubscriptionHelper +contract FunctionsSubscriptions__CancelSubscriptionHelper { + // TODO: make contract internal function helper +} + +/// @notice #pendingRequestExists +contract FunctionsSubscriptions_PendingRequestExists is FunctionsFulfillmentSetup { + function test_PendingRequestExists_SuccessFalseIfNoPendingRequests() public { + bool hasPendingRequests = s_functionsRouter.pendingRequestExists(s_subscriptionId); + assertEq(hasPendingRequests, false); + } + + function test_PendingRequestExists_SuccessTrueIfPendingRequests() public { + // Send a minimal request + string memory sourceCode = "return 'hello world';"; + bytes memory secrets; + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + + s_functionsClient.sendRequest(s_donId, sourceCode, secrets, args, bytesArgs, s_subscriptionId, 5000); + + bool hasPendingRequests = s_functionsRouter.pendingRequestExists(s_subscriptionId); + assertEq(hasPendingRequests, true); + } +} + +/// @notice #setFlags +contract FunctionsSubscriptions_SetFlags is FunctionsSubscriptionSetup { + function test_SetFlags_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + bytes32 flagsToSet = bytes32("1"); + s_functionsRouter.setFlags(s_subscriptionId, flagsToSet); + } + + function test_SetFlags_RevertIfNoSubscription() public { + vm.expectRevert(FunctionsSubscriptions.InvalidSubscription.selector); + uint64 invalidSubscriptionId = 123456789; + bytes32 flagsToSet = bytes32("1"); + s_functionsRouter.setFlags(invalidSubscriptionId, flagsToSet); + } + + function test_SetFlags_Success() public { + bytes32 flagsToSet = bytes32("1"); + s_functionsRouter.setFlags(s_subscriptionId, flagsToSet); + bytes32 flags = s_functionsRouter.getFlags(s_subscriptionId); + assertEq(flags, flagsToSet); + } +} + +/// @notice #getFlags +contract FunctionsSubscriptions_GetFlags is FunctionsSubscriptionSetup { + function test_GetFlags_SuccessInvalidSubscription() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 invalidSubscriptionId = 999999; + + bytes32 flags = s_functionsRouter.getFlags(invalidSubscriptionId); + assertEq(flags, bytes32(0)); + } + + function test_GetFlags_SuccessValidSubscription() public { + // Set flags + bytes32 flagsToSet = bytes32("1"); + s_functionsRouter.setFlags(s_subscriptionId, flagsToSet); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 flags = s_functionsRouter.getFlags(s_subscriptionId); + assertEq(flags, flagsToSet); + } +} + +/// @notice #timeoutRequests +contract FunctionsSubscriptions_TimeoutRequests is FunctionsClientRequestSetup { + function test_TimeoutRequests_RevertIfPaused() public { + s_functionsRouter.pause(); + + vm.expectRevert("Pausable: paused"); + FunctionsResponse.Commitment[] memory commitments = new FunctionsResponse.Commitment[](1); + commitments[0] = s_requests[1].commitment; + s_functionsRouter.timeoutRequests(commitments); + } + + function test_TimeoutRequests_RevertInvalidRequest() public { + // Modify the commitment so that it doesn't match + s_requests[1].commitment.donFee = 123456789; + FunctionsResponse.Commitment[] memory commitments = new FunctionsResponse.Commitment[](1); + commitments[0] = s_requests[1].commitment; + vm.expectRevert(FunctionsSubscriptions.InvalidCalldata.selector); + s_functionsRouter.timeoutRequests(commitments); + } + + function test_TimeoutRequests_RevertIfTimeoutNotExceeded() public { + vm.expectRevert(FunctionsSubscriptions.TimeoutNotExceeded.selector); + FunctionsResponse.Commitment[] memory commitments = new FunctionsResponse.Commitment[](1); + commitments[0] = s_requests[1].commitment; + s_functionsRouter.timeoutRequests(commitments); + } + + event RequestTimedOut(bytes32 indexed requestId); + + function test_TimeoutRequests_Success() public { + uint64 consumerCompletedRequestsBefore = s_functionsRouter + .getConsumer(address(s_functionsClient), s_subscriptionId) + .completedRequests; + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit RequestTimedOut(s_requests[1].requestId); + + // Jump ahead in time past timeout timestamp + vm.warp(s_requests[1].commitment.timeoutTimestamp + 1); + + FunctionsResponse.Commitment[] memory commitments = new FunctionsResponse.Commitment[](1); + commitments[0] = s_requests[1].commitment; + s_functionsRouter.timeoutRequests(commitments); + + // Releases blocked balance and increments completed requests + uint96 subscriptionBlockedBalanceAfter = s_functionsRouter.getSubscription(s_subscriptionId).blockedBalance; + assertEq(0, subscriptionBlockedBalanceAfter); + uint64 consumerCompletedRequestsAfter = s_functionsRouter + .getConsumer(address(s_functionsClient), s_subscriptionId) + .completedRequests; + assertEq(consumerCompletedRequestsBefore + 1, consumerCompletedRequestsAfter); + } +} + +// @notice #_onlySubscriptionOwner +contract FunctionsSubscriptions__OnlySubscriptionOwner { + // TODO: make contract internal function helper +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/FunctionsTermsOfServiceAllowList.t.sol b/contracts/src/v0.8/functions/tests/v1_X/FunctionsTermsOfServiceAllowList.t.sol new file mode 100644 index 00000000..e121f7b8 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/FunctionsTermsOfServiceAllowList.t.sol @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {TermsOfServiceAllowList} from "../../dev/v1_X/accessControl/TermsOfServiceAllowList.sol"; +import {TermsOfServiceAllowListConfig} from "../../dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol"; +import {FunctionsClientTestHelper} from "./testhelpers/FunctionsClientTestHelper.sol"; + +import {FunctionsRoutesSetup, FunctionsOwnerAcceptTermsOfServiceSetup} from "./Setup.t.sol"; +import "forge-std/Vm.sol"; + +/// @notice #constructor +contract FunctionsTermsOfServiceAllowList_Constructor is FunctionsRoutesSetup { + function test_Constructor_Success() public { + assertEq(s_termsOfServiceAllowList.typeAndVersion(), "Functions Terms of Service Allow List v1.1.0"); + assertEq(s_termsOfServiceAllowList.owner(), OWNER_ADDRESS); + } +} + +/// @notice #getConfig +contract FunctionsTermsOfServiceAllowList_GetConfig is FunctionsRoutesSetup { + function test_GetConfig_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + TermsOfServiceAllowListConfig memory config = s_termsOfServiceAllowList.getConfig(); + assertEq(config.enabled, getTermsOfServiceConfig().enabled); + assertEq(config.signerPublicKey, getTermsOfServiceConfig().signerPublicKey); + } +} + +/// @notice #updateConfig +contract FunctionsTermsOfServiceAllowList_UpdateConfig is FunctionsRoutesSetup { + function test_UpdateConfig_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_termsOfServiceAllowList.updateConfig( + TermsOfServiceAllowListConfig({enabled: true, signerPublicKey: STRANGER_ADDRESS}) + ); + } + + event ConfigUpdated(TermsOfServiceAllowListConfig config); + + function test_UpdateConfig_Success() public { + TermsOfServiceAllowListConfig memory configToSet = TermsOfServiceAllowListConfig({ + enabled: false, + signerPublicKey: TOS_SIGNER + }); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit ConfigUpdated(configToSet); + + s_termsOfServiceAllowList.updateConfig(configToSet); + + TermsOfServiceAllowListConfig memory config = s_termsOfServiceAllowList.getConfig(); + assertEq(config.enabled, configToSet.enabled); + assertEq(config.signerPublicKey, configToSet.signerPublicKey); + } +} + +/// @notice #getMessage +contract FunctionsTermsOfServiceAllowList_GetMessage is FunctionsRoutesSetup { + function test_GetMessage_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + + assertEq(message, keccak256(abi.encodePacked(STRANGER_ADDRESS, STRANGER_ADDRESS))); + } +} + +/// @notice #acceptTermsOfService +contract FunctionsTermsOfServiceAllowList_AcceptTermsOfService is FunctionsRoutesSetup { + function test_AcceptTermsOfService_RevertIfBlockedSender() public { + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + vm.expectRevert(TermsOfServiceAllowList.RecipientIsBlocked.selector); + + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } + + function test_AcceptTermsOfService_RevertIfInvalidSigner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(STRANGER_PRIVATE_KEY, prefixedMessage); + + vm.expectRevert(TermsOfServiceAllowList.InvalidSignature.selector); + + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } + + function test_AcceptTermsOfService_RevertIfRecipientIsNotSender() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(OWNER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + vm.expectRevert(TermsOfServiceAllowList.InvalidUsage.selector); + + s_termsOfServiceAllowList.acceptTermsOfService(OWNER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } + + function test_AcceptTermsOfService_RevertIfAcceptorIsNotSender() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, OWNER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + vm.expectRevert(TermsOfServiceAllowList.InvalidUsage.selector); + + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, OWNER_ADDRESS, r, s, v); + } + + function test_AcceptTermsOfService_RevertIfRecipientContractIsNotSender() public { + FunctionsClientTestHelper s_functionsClientHelper = new FunctionsClientTestHelper(address(s_functionsRouter)); + + // Send as externally owned account + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Attempt to accept for a contract account + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, address(s_functionsClientHelper)); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + vm.expectRevert(TermsOfServiceAllowList.InvalidUsage.selector); + + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, address(s_functionsClientHelper), r, s, v); + } + + function testAcceptTermsOfService_InvalidSigner_vuln() public { + // Set the signer as the zero address + TermsOfServiceAllowListConfig memory allowListConfig; + allowListConfig.enabled = true; + allowListConfig.signerPublicKey = address(0); + s_termsOfServiceAllowList.updateConfig(allowListConfig); + + // Provide garbage data (v cannot be 29) to generate an invalid signature + uint8 v = 29; + bytes32 r = 0x0101010000000000000000000000000000000000000000000000000000000000; + bytes32 s = 0x0101010000000000000000000000000000000000000000000000000000000000; + + // Expect a revert on invalid signature but the call is successful + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + // vm.expectRevert(TermsOfServiceAllowList.InvalidSignature.selector); + // TODO: Add validation to setConfig to prevent empty signer + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } + + event AddedAccess(address user); + + function test_AcceptTermsOfService_SuccessIfAcceptingForSelf() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit AddedAccess(STRANGER_ADDRESS); + + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + + assertTrue(s_termsOfServiceAllowList.hasAccess(STRANGER_ADDRESS, new bytes(0))); + + // Check the addedAccess is not emitted, given the recipient was already in the list + vm.recordLogs(); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + Vm.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries.length, 0); + + assertTrue(s_termsOfServiceAllowList.hasAccess(STRANGER_ADDRESS, new bytes(0))); + } + + function test_AcceptTermsOfService_SuccessIfAcceptingForContract() public { + FunctionsClientTestHelper s_functionsClientHelper = new FunctionsClientTestHelper(address(s_functionsRouter)); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, address(s_functionsClientHelper)); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit AddedAccess(address(s_functionsClientHelper)); + + s_functionsClientHelper.acceptTermsOfService(STRANGER_ADDRESS, address(s_functionsClientHelper), r, s, v); + + assertEq(s_termsOfServiceAllowList.hasAccess(address(s_functionsClientHelper), new bytes(0)), true); + } +} + +/// @notice #getAllAllowedSenders +contract FunctionsTermsOfServiceAllowList_GetAllAllowedSenders is FunctionsOwnerAcceptTermsOfServiceSetup { + function test_GetAllAllowedSenders_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + address[] memory expectedSenders = new address[](1); + expectedSenders[0] = OWNER_ADDRESS; + + assertEq(s_termsOfServiceAllowList.getAllAllowedSenders(), expectedSenders); + } +} + +/// @notice #getAllowedSendersCount +contract FunctionsTermsOfServiceAllowList_GetAllowedSendersCount is FunctionsOwnerAcceptTermsOfServiceSetup { + function test_GetAllowedSendersCount_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint96 allowedSendersCount = s_termsOfServiceAllowList.getAllowedSendersCount(); + // One allowed sender was made during setup + assertEq(allowedSendersCount, 1); + } +} + +/// @notice #getAllowedSendersInRange +contract FunctionsTermsOfServiceAllowList_GetAllowedSendersInRange is FunctionsOwnerAcceptTermsOfServiceSetup { + function test_GetAllowedSendersInRange_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + address[] memory expectedSenders = new address[](1); + expectedSenders[0] = OWNER_ADDRESS; + + assertEq(s_termsOfServiceAllowList.getAllowedSendersInRange(0, 0), expectedSenders); + } + + function test_GetAllowedSendersInRange_RevertIfAllowedSendersIsEmpty() public { + // setup a new empty s_termsOfServiceAllowList + FunctionsRoutesSetup.setUp(); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 AllowedSendersCount = s_termsOfServiceAllowList.getAllowedSendersCount(); + uint64 expected = 0; + assertEq(AllowedSendersCount, expected); + + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + s_termsOfServiceAllowList.getAllowedSendersInRange(0, 0); + } + + function test_GetAllowedSendersInRange_RevertIfStartIsAfterEnd() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + + s_termsOfServiceAllowList.getAllowedSendersInRange(1, 0); + } + + function test_GetAllowedSendersInRange_RevertIfEndIsAfterLastAllowedSender() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 AllowedSendersCount = s_termsOfServiceAllowList.getAllowedSendersCount(); + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + s_termsOfServiceAllowList.getAllowedSendersInRange(1, AllowedSendersCount + 1); + } +} + +/// @notice #hasAccess +contract FunctionsTermsOfServiceAllowList_HasAccess is FunctionsRoutesSetup { + function test_HasAccess_FalseWhenEnabled() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Check access of account that is not on the allow list + assertEq(s_termsOfServiceAllowList.hasAccess(STRANGER_ADDRESS, new bytes(0)), false); + } + + function test_HasAccess_TrueWhenDisabled() public { + // Disable allow list, which opens all access + s_termsOfServiceAllowList.updateConfig( + TermsOfServiceAllowListConfig({enabled: false, signerPublicKey: TOS_SIGNER}) + ); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Check access of account that is not on the allow list + assertEq(s_termsOfServiceAllowList.hasAccess(STRANGER_ADDRESS, new bytes(0)), true); + } +} + +/// @notice #isBlockedSender +contract FunctionsTermsOfServiceAllowList_IsBlockedSender is FunctionsRoutesSetup { + function test_IsBlockedSender_SuccessFalse() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + assertEq(s_termsOfServiceAllowList.isBlockedSender(STRANGER_ADDRESS), false); + } + + function test_IsBlockedSender_SuccessTrue() public { + // Block sender + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + assertEq(s_termsOfServiceAllowList.isBlockedSender(STRANGER_ADDRESS), true); + } +} + +/// @notice #blockSender +contract FunctionsTermsOfServiceAllowList_BlockSender is FunctionsRoutesSetup { + function test_BlockSender_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_termsOfServiceAllowList.blockSender(OWNER_ADDRESS); + } + + event BlockedAccess(address user); + + function test_BlockSender_Success() public { + assertFalse(s_termsOfServiceAllowList.isBlockedSender(STRANGER_ADDRESS)); + + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit BlockedAccess(STRANGER_ADDRESS); + + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + assertFalse(s_termsOfServiceAllowList.hasAccess(STRANGER_ADDRESS, new bytes(0))); + assertTrue(s_termsOfServiceAllowList.isBlockedSender(STRANGER_ADDRESS)); + + // Account can no longer accept Terms of Service + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + vm.expectRevert(TermsOfServiceAllowList.RecipientIsBlocked.selector); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } +} + +/// @notice #unblockSender +contract FunctionsTermsOfServiceAllowList_UnblockSender is FunctionsRoutesSetup { + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + } + + function test_UnblockSender_RevertIfNotOwner() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert("Only callable by owner"); + s_termsOfServiceAllowList.unblockSender(STRANGER_ADDRESS); + } + + event UnblockedAccess(address user); + + function test_UnblockSender_Success() public { + // topic0 (function signature, always checked), NOT topic1 (false), NOT topic2 (false), NOT topic3 (false), and data (true). + bool checkTopic1 = false; + bool checkTopic2 = false; + bool checkTopic3 = false; + bool checkData = true; + vm.expectEmit(checkTopic1, checkTopic2, checkTopic3, checkData); + emit UnblockedAccess(STRANGER_ADDRESS); + + s_termsOfServiceAllowList.unblockSender(STRANGER_ADDRESS); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + // Account can now accept the Terms of Service + bytes32 message = s_termsOfServiceAllowList.getMessage(STRANGER_ADDRESS, STRANGER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(STRANGER_ADDRESS, STRANGER_ADDRESS, r, s, v); + } +} + +/// @notice #getBlockedSendersCount +contract FunctionsTermsOfServiceAllowList_GetBlockedSendersCount is FunctionsRoutesSetup { + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + } + + function test_GetBlockedSendersCount_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint96 blockedSendersCount = s_termsOfServiceAllowList.getBlockedSendersCount(); + // One blocked sender was made during setup + assertEq(blockedSendersCount, 1); + } +} + +/// @notice #getBlockedSendersInRange +contract FunctionsTermsOfServiceAllowList_GetBlockedSendersInRange is FunctionsRoutesSetup { + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + s_termsOfServiceAllowList.blockSender(STRANGER_ADDRESS); + } + + function test_GetBlockedSendersInRange_Success() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + address[] memory expectedBlockedSenders = new address[](1); + expectedBlockedSenders[0] = STRANGER_ADDRESS; + + assertEq(s_termsOfServiceAllowList.getBlockedSendersInRange(0, 0), expectedBlockedSenders); + } + + function test_GetBlockedSendersInRange_RevertIfAllowedSendersIsEmpty() public { + // setup a new empty s_termsOfServiceBlockList + FunctionsRoutesSetup.setUp(); + + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 BlockedSendersCount = s_termsOfServiceAllowList.getBlockedSendersCount(); + uint64 expected = 0; + assertEq(BlockedSendersCount, expected); + + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + s_termsOfServiceAllowList.getBlockedSendersInRange(0, 0); + } + + function test_GetBlockedSendersInRange_RevertIfStartIsAfterEnd() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + + s_termsOfServiceAllowList.getBlockedSendersInRange(1, 0); + } + + function test_GetBlockedSendersInRange_RevertIfEndIsAfterLastAllowedSender() public { + // Send as stranger + vm.stopPrank(); + vm.startPrank(STRANGER_ADDRESS); + + uint64 BlockedSendersCount = s_termsOfServiceAllowList.getBlockedSendersCount(); + vm.expectRevert(TermsOfServiceAllowList.InvalidCalldata.selector); + s_termsOfServiceAllowList.getBlockedSendersInRange(1, BlockedSendersCount + 1); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol b/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol new file mode 100644 index 00000000..4e46ddfe --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/Gas.t.sol @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsRouter} from "../../dev/v1_X/FunctionsRouter.sol"; +import {FunctionsSubscriptions} from "../../dev/v1_X/FunctionsSubscriptions.sol"; +import {FunctionsRequest} from "../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {FunctionsClientTestHelper} from "./testhelpers/FunctionsClientTestHelper.sol"; + +import {FunctionsRoutesSetup, FunctionsOwnerAcceptTermsOfServiceSetup, FunctionsSubscriptionSetup, FunctionsClientRequestSetup} from "./Setup.t.sol"; + +import "forge-std/Vm.sol"; + +/// @notice #acceptTermsOfService +contract Gas_AcceptTermsOfService is FunctionsRoutesSetup { + bytes32 s_sigR; + bytes32 s_sigS; + uint8 s_sigV; + + function setUp() public virtual override { + vm.pauseGasMetering(); + + FunctionsRoutesSetup.setUp(); + + bytes32 message = s_termsOfServiceAllowList.getMessage(OWNER_ADDRESS, OWNER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (s_sigV, s_sigR, s_sigS) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + } + + function test_AcceptTermsOfService_Gas() public { + // Pull storage variables into memory + address ownerAddress = OWNER_ADDRESS; + bytes32 sigR = s_sigR; + bytes32 sigS = s_sigS; + uint8 sigV = s_sigV; + vm.resumeGasMetering(); + + s_termsOfServiceAllowList.acceptTermsOfService(ownerAddress, ownerAddress, sigR, sigS, sigV); + } +} + +/// @notice #createSubscription +contract Gas_CreateSubscription is FunctionsOwnerAcceptTermsOfServiceSetup { + function test_CreateSubscription_Gas() public { + s_functionsRouter.createSubscription(); + } +} + +/// @notice #addConsumer +contract Gas_AddConsumer is FunctionsSubscriptionSetup { + function setUp() public virtual override { + vm.pauseGasMetering(); + + FunctionsSubscriptionSetup.setUp(); + } + + function test_AddConsumer_Gas() public { + // Keep input data in memory + uint64 subscriptionId = s_subscriptionId; + address consumerAddress = address(s_functionsCoordinator); // use garbage address + vm.resumeGasMetering(); + + s_functionsRouter.addConsumer(subscriptionId, consumerAddress); + } +} + +/// @notice #fundSubscription +contract Gas_FundSubscription is FunctionsSubscriptionSetup { + function setUp() public virtual override { + vm.pauseGasMetering(); + + FunctionsSubscriptionSetup.setUp(); + } + + function test_FundSubscription_Gas() public { + // Keep input data in memory + address routerAddress = address(s_functionsRouter); + uint96 s_subscriptionFunding = 10 * JUELS_PER_PLI; // 10 PLI + bytes memory data = abi.encode(s_subscriptionId); + vm.resumeGasMetering(); + + s_linkToken.transferAndCall(routerAddress, s_subscriptionFunding, data); + } +} + +/// @notice #sendRequest +contract Gas_SendRequest is FunctionsSubscriptionSetup { + bytes s_minimalRequestData; + bytes s_maximalRequestData; + + function _makeStringOfBytesSize(uint16 bytesSize) internal pure returns (string memory) { + return vm.toString(new bytes((bytesSize - 2) / 2)); + } + + function setUp() public virtual override { + vm.pauseGasMetering(); + + FunctionsSubscriptionSetup.setUp(); + + { + // Create minimum viable request data + FunctionsRequest.Request memory minimalRequest; + string memory minimalSourceCode = "return Functions.encodeString('hello world');"; + FunctionsRequest._initializeRequest( + minimalRequest, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + minimalSourceCode + ); + s_minimalRequestData = FunctionsRequest._encodeCBOR(minimalRequest); + } + + { + // Create maximum viable request data - 30 KB encoded data + FunctionsRequest.Request memory maxmimalRequest; + + // Create maximum viable request data - 30 KB encoded data + string memory maximalSourceCode = _makeStringOfBytesSize(29_898); // CBOR size without source code is 102 bytes + FunctionsRequest._initializeRequest( + maxmimalRequest, + FunctionsRequest.Location.Inline, + FunctionsRequest.CodeLanguage.JavaScript, + maximalSourceCode + ); + s_maximalRequestData = FunctionsRequest._encodeCBOR(maxmimalRequest); + assertEq(s_maximalRequestData.length, 30_000); + } + } + + /// @dev The order of these test cases matters as the first test will consume more gas by writing over default values + function test_SendRequest_MaximumGas() public { + // Pull storage variables into memory + bytes memory maximalRequestData = s_maximalRequestData; + uint64 subscriptionId = s_subscriptionId; + uint32 callbackGasLimit = 300_000; + bytes32 donId = s_donId; + vm.resumeGasMetering(); + + s_functionsClient.sendRequestBytes(maximalRequestData, subscriptionId, callbackGasLimit, donId); + } + + function test_SendRequest_MinimumGas() public { + // Pull storage variables into memory + bytes memory minimalRequestData = s_minimalRequestData; + uint64 subscriptionId = s_subscriptionId; + uint32 callbackGasLimit = 5_500; + bytes32 donId = s_donId; + vm.resumeGasMetering(); + + s_functionsClient.sendRequestBytes(minimalRequestData, subscriptionId, callbackGasLimit, donId); + } +} + +// Setup Fulfill Gas tests +contract Gas_FulfillRequest_Setup is FunctionsClientRequestSetup { + mapping(uint256 reportNumber => Report) s_reports; + + FunctionsClientTestHelper s_functionsClientWithMaximumReturnData; + + function _makeStringOfBytesSize(uint16 bytesSize) internal pure returns (string memory) { + return vm.toString(new bytes((bytesSize - 2) / 2)); + } + + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + { + // Deploy consumer that has large revert return data + s_functionsClientWithMaximumReturnData = new FunctionsClientTestHelper(address(s_functionsRouter)); + s_functionsClientWithMaximumReturnData.setRevertFulfillRequest(true); + string memory revertMessage = _makeStringOfBytesSize(30_000); // 30kb - FunctionsRouter cuts off response at MAX_CALLBACK_RETURN_BYTES = 4 + 4 * 32 = 132bytes, go well above that + s_functionsClientWithMaximumReturnData.setRevertFulfillRequestMessage(revertMessage); + s_functionsRouter.addConsumer(s_subscriptionId, address(s_functionsClientWithMaximumReturnData)); + } + + // Set up maximum gas test + { + // Send request #2 for maximum gas test + uint8 requestNumber = 2; + + bytes memory secrets = new bytes(0); + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + uint32 callbackGasLimit = 300_000; + + // Create maximum viable request data - 30 KB encoded data + string memory maximalSourceCode = _makeStringOfBytesSize(29_898); // CBOR size without source code is 102 bytes + + _sendAndStoreRequest( + requestNumber, + maximalSourceCode, + secrets, + args, + bytesArgs, + callbackGasLimit, + address(s_functionsClientWithMaximumReturnData) + ); + + // Build the report transmission data + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = requestNumber; + string[] memory results = new string[](1); + // Build a 256 byte response size + results[0] = _makeStringOfBytesSize(256); + bytes[] memory errors = new bytes[](1); + errors[0] = new bytes(0); // No error + + (bytes memory report, bytes32[3] memory reportContext) = _buildReport(requestNumberKeys, results, errors); + + uint256[] memory signerPrivateKeys = new uint256[](3); + signerPrivateKeys[0] = NOP_SIGNER_PRIVATE_KEY_1; + signerPrivateKeys[1] = NOP_SIGNER_PRIVATE_KEY_2; + signerPrivateKeys[2] = NOP_SIGNER_PRIVATE_KEY_3; + + (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) = _signReport( + report, + reportContext, + signerPrivateKeys + ); + + // Store the report data + s_reports[1] = Report({rs: rawRs, ss: rawSs, vs: rawVs, report: report, reportContext: reportContext}); + } + + // Set up minimum gas test + { + // Send requests minimum gas test + uint8 requestsToSend = 1; + uint8 requestNumberOffset = 3; // the setup already has request #1 sent, and the previous test case uses request #2, start from request #3 + + string memory sourceCode = "return Functions.encodeString('hello world');"; + bytes memory secrets = new bytes(0); + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + uint32 callbackGasLimit = 5_500; + + for (uint256 i = 0; i < requestsToSend; ++i) { + _sendAndStoreRequest(i + requestNumberOffset, sourceCode, secrets, args, bytesArgs, callbackGasLimit); + } + + // Build the report transmission data + uint256[] memory requestNumberKeys = new uint256[](requestsToSend); + string[] memory results = new string[](requestsToSend); + bytes[] memory errors = new bytes[](requestsToSend); + for (uint256 i = 0; i < requestsToSend; ++i) { + requestNumberKeys[i] = i + requestNumberOffset; + results[i] = "hello world"; + errors[i] = new bytes(0); // no error + } + + (bytes memory report, bytes32[3] memory reportContext) = _buildReport(requestNumberKeys, results, errors); + + uint256[] memory signerPrivateKeys = new uint256[](3); + signerPrivateKeys[0] = NOP_SIGNER_PRIVATE_KEY_1; + signerPrivateKeys[1] = NOP_SIGNER_PRIVATE_KEY_2; + signerPrivateKeys[2] = NOP_SIGNER_PRIVATE_KEY_3; + + (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) = _signReport( + report, + reportContext, + signerPrivateKeys + ); + + // Store the report data + s_reports[2] = Report({rs: rawRs, ss: rawSs, vs: rawVs, report: report, reportContext: reportContext}); + } + + vm.stopPrank(); + vm.startPrank(NOP_TRANSMITTER_ADDRESS_1); + } +} + +/// @notice #fulfillRequest +contract Gas_FulfillRequest_Success is Gas_FulfillRequest_Setup { + function setUp() public virtual override { + vm.pauseGasMetering(); + + Gas_FulfillRequest_Setup.setUp(); + } + + /// @dev The order of these test cases matters as the first test will consume more gas by writing over default values + function test_FulfillRequest_Success_MaximumGas() public { + // Pull storage variables into memory + uint8 reportNumber = 1; + bytes32[] memory rs = s_reports[reportNumber].rs; + bytes32[] memory ss = s_reports[reportNumber].ss; + bytes32 vs = s_reports[reportNumber].vs; + bytes memory report = s_reports[reportNumber].report; + bytes32[3] memory reportContext = s_reports[reportNumber].reportContext; + vm.resumeGasMetering(); + + // 1 fulfillment in the report, single request takes on all report validation cost + // maximum request + // maximum NOPs + // maximum return data + // first storage write to change default values + s_functionsCoordinator.transmit(reportContext, report, rs, ss, vs); + } + + function test_FulfillRequest_Success_MinimumGas() public { + // Pull storage variables into memory + uint8 reportNumber = 2; + bytes32[] memory rs = s_reports[reportNumber].rs; + bytes32[] memory ss = s_reports[reportNumber].ss; + bytes32 vs = s_reports[reportNumber].vs; + bytes memory report = s_reports[reportNumber].report; + bytes32[3] memory reportContext = s_reports[reportNumber].reportContext; + vm.resumeGasMetering(); + + // max fulfillments in the report, cost of validation split between all + // minimal request + // minimum NOPs + // no return data + // not storage writing default values + s_functionsCoordinator.transmit(reportContext, report, rs, ss, vs); + } +} + +/// @notice #fulfillRequest +contract Gas_FulfillRequest_DuplicateRequestID is Gas_FulfillRequest_Setup { + function setUp() public virtual override { + vm.pauseGasMetering(); + + // Send requests + Gas_FulfillRequest_Setup.setUp(); + // Fulfill request #1 & #2 + for (uint256 i = 1; i < 3; i++) { + uint256 reportNumber = i; + bytes32[] memory rs = s_reports[reportNumber].rs; + bytes32[] memory ss = s_reports[reportNumber].ss; + bytes32 vs = s_reports[reportNumber].vs; + bytes memory report = s_reports[reportNumber].report; + bytes32[3] memory reportContext = s_reports[reportNumber].reportContext; + s_functionsCoordinator.transmit(reportContext, report, rs, ss, vs); + } + + // Now tests will attempt to transmit reports with respones to requests that have already been fulfilled + } + + /// @dev The order of these test cases matters as the first test will consume more gas by writing over default values + function test_FulfillRequest_DuplicateRequestID_MaximumGas() public { + // Pull storage variables into memory + uint8 reportNumber = 1; + bytes32[] memory rs = s_reports[reportNumber].rs; + bytes32[] memory ss = s_reports[reportNumber].ss; + bytes32 vs = s_reports[reportNumber].vs; + bytes memory report = s_reports[reportNumber].report; + bytes32[3] memory reportContext = s_reports[reportNumber].reportContext; + vm.resumeGasMetering(); + + // 1 fulfillment in the report, single request takes on all report validation cost + // maximum request + // maximum NOPs + // maximum return data + // first storage write to change default values + s_functionsCoordinator.transmit(reportContext, report, rs, ss, vs); + } + + function test_FulfillRequest_DuplicateRequestID_MinimumGas() public { + // Pull storage variables into memory + uint8 reportNumber = 2; + bytes32[] memory rs = s_reports[reportNumber].rs; + bytes32[] memory ss = s_reports[reportNumber].ss; + bytes32 vs = s_reports[reportNumber].vs; + bytes memory report = s_reports[reportNumber].report; + bytes32[3] memory reportContext = s_reports[reportNumber].reportContext; + vm.resumeGasMetering(); + + // max fulfillments in the report, cost of validation split between all + // minimal request + // minimum NOPs + // no return data + // not storage writing default values + s_functionsCoordinator.transmit(reportContext, report, rs, ss, vs); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol b/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol new file mode 100644 index 00000000..3dc0db85 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/OCR2.t.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +// ================================================================ +// | OCR2Base | +// ================================================================ + +/// @notice #constructor +contract OCR2Base_Constructor {} + +/// @notice #checkConfigValid +contract OCR2Base_CheckConfigValid {} + +/// @notice #latestConfigDigestAndEpoch +contract OCR2Base_LatestConfigDigestAndEpoch {} + +/// @notice #setConfig +contract OCR2Base_SetConfig {} + +/// @notice #configDigestFromConfigData +contract OCR2Base_ConfigDigestFromConfigData {} + +/// @notice #latestConfigDetails +contract OCR2Base_LatestConfigDetails {} + +/// @notice #transmitters +contract OCR2Base_Transmitters {} + +/// @notice #_report +contract OCR2Base__Report { + // TODO: make contract internal function helper +} + +/// @notice #requireExpectedMsgDataLength +contract OCR2Base_RequireExpectedMsgDataLength {} + +/// @notice #transmit +contract OCR2Base_Transmit {} diff --git a/contracts/src/v0.8/functions/tests/v1_X/README.md b/contracts/src/v0.8/functions/tests/v1_X/README.md new file mode 100644 index 00000000..5f96532f --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/README.md @@ -0,0 +1,25 @@ +## Usage + +First set the foundry profile to Functions: +``` +export FOUNDRY_PROFILE=functions +``` + +**To run tests use**: + +All Functions test files: +``` +forge test -vvv +``` + +To run a specific file use: +``` +forge test -vvv --mp src/v0.8/functions/tests/v1_X/[File Name].t.sol +``` + +**To see coverage**: +First ensure that the correct files are being evaluated. For example, if only v0 contracts are, then temporarily change the Functions profile in `./foundry.toml`. + +``` +forge coverage --ir-minimum +``` \ No newline at end of file diff --git a/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol b/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol new file mode 100644 index 00000000..2cfcbebb --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/Setup.t.sol @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {BaseTest} from "./BaseTest.t.sol"; +import {FunctionsClientHarness} from "./testhelpers/FunctionsClientHarness.sol"; +import {FunctionsRouterHarness, FunctionsRouter} from "./testhelpers/FunctionsRouterHarness.sol"; +import {FunctionsCoordinatorHarness} from "./testhelpers/FunctionsCoordinatorHarness.sol"; +import {FunctionsBilling} from "../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsResponse} from "../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {MockV3Aggregator} from "../../../tests/MockV3Aggregator.sol"; +import {TermsOfServiceAllowList} from "../../dev/v1_X/accessControl/TermsOfServiceAllowList.sol"; +import {TermsOfServiceAllowListConfig} from "../../dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol"; +import {MockLinkToken} from "../../../mocks/MockLinkToken.sol"; +import {FunctionsBillingConfig} from "../../dev/v1_X/interfaces/IFunctionsBilling.sol"; + +import "forge-std/Vm.sol"; + +/// @notice Set up to deploy the following contracts: FunctionsRouter, FunctionsCoordinator, PLI/ETH Feed, ToS Allow List, and PLI token +contract FunctionsRouterSetup is BaseTest { + FunctionsRouterHarness internal s_functionsRouter; + FunctionsCoordinatorHarness internal s_functionsCoordinator; + MockV3Aggregator internal s_linkEthFeed; + TermsOfServiceAllowList internal s_termsOfServiceAllowList; + MockLinkToken internal s_linkToken; + + uint16 internal s_maxConsumersPerSubscription = 3; + uint72 internal s_adminFee = 100; + uint72 internal s_donFee = 100; + bytes4 internal s_handleOracleFulfillmentSelector = 0x0ca76175; + uint16 s_subscriptionDepositMinimumRequests = 1; + uint72 s_subscriptionDepositJuels = 11 * JUELS_PER_PLI; + + int256 internal PLI_ETH_RATE = 6000000000000000; + + uint256 internal TOS_SIGNER_PRIVATE_KEY = 0x3; + address internal TOS_SIGNER = vm.addr(TOS_SIGNER_PRIVATE_KEY); + + function setUp() public virtual override { + BaseTest.setUp(); + s_linkToken = new MockLinkToken(); + s_functionsRouter = new FunctionsRouterHarness(address(s_linkToken), getRouterConfig()); + s_linkEthFeed = new MockV3Aggregator(0, PLI_ETH_RATE); + s_functionsCoordinator = new FunctionsCoordinatorHarness( + address(s_functionsRouter), + getCoordinatorConfig(), + address(s_linkEthFeed) + ); + address[] memory initialAllowedSenders; + address[] memory initialBlockedSenders; + s_termsOfServiceAllowList = new TermsOfServiceAllowList( + getTermsOfServiceConfig(), + initialAllowedSenders, + initialBlockedSenders + ); + } + + function getRouterConfig() public view returns (FunctionsRouter.Config memory) { + uint32[] memory maxCallbackGasLimits = new uint32[](3); + maxCallbackGasLimits[0] = 300_000; + maxCallbackGasLimits[1] = 500_000; + maxCallbackGasLimits[2] = 1_000_000; + + return + FunctionsRouter.Config({ + maxConsumersPerSubscription: s_maxConsumersPerSubscription, + adminFee: s_adminFee, + handleOracleFulfillmentSelector: s_handleOracleFulfillmentSelector, + maxCallbackGasLimits: maxCallbackGasLimits, + gasForCallExactCheck: 5000, + subscriptionDepositMinimumRequests: s_subscriptionDepositMinimumRequests, + subscriptionDepositJuels: s_subscriptionDepositJuels + }); + } + + function getCoordinatorConfig() public view returns (FunctionsBillingConfig memory) { + return + FunctionsBillingConfig({ + feedStalenessSeconds: 24 * 60 * 60, // 1 day + gasOverheadAfterCallback: 93_942, + gasOverheadBeforeCallback: 105_000, + requestTimeoutSeconds: 60 * 5, // 5 minutes + donFee: s_donFee, + maxSupportedRequestDataVersion: 1, + fulfillmentGasPriceOverEstimationBP: 5000, + fallbackNativePerUnitLink: 5000000000000000, + minimumEstimateGasPriceWei: 1000000000 // 1 gwei + }); + } + + function getTermsOfServiceConfig() public view returns (TermsOfServiceAllowListConfig memory) { + return TermsOfServiceAllowListConfig({enabled: true, signerPublicKey: TOS_SIGNER}); + } +} + +/// @notice Set up to set the OCR configuration of the Coordinator contract +contract FunctionsDONSetup is FunctionsRouterSetup { + uint256 internal NOP_SIGNER_PRIVATE_KEY_1 = 0x100; + address internal NOP_SIGNER_ADDRESS_1 = vm.addr(NOP_SIGNER_PRIVATE_KEY_1); + uint256 internal NOP_SIGNER_PRIVATE_KEY_2 = 0x101; + address internal NOP_SIGNER_ADDRESS_2 = vm.addr(NOP_SIGNER_PRIVATE_KEY_2); + uint256 internal NOP_SIGNER_PRIVATE_KEY_3 = 0x102; + address internal NOP_SIGNER_ADDRESS_3 = vm.addr(NOP_SIGNER_PRIVATE_KEY_3); + uint256 internal NOP_SIGNER_PRIVATE_KEY_4 = 0x103; + address internal NOP_SIGNER_ADDRESS_4 = vm.addr(NOP_SIGNER_PRIVATE_KEY_4); + + uint256 internal NOP_TRANSMITTER_PRIVATE_KEY_1 = 0x104; + address internal NOP_TRANSMITTER_ADDRESS_1 = vm.addr(NOP_TRANSMITTER_PRIVATE_KEY_1); + uint256 internal NOP_TRANSMITTER_PRIVATE_KEY_2 = 0x105; + address internal NOP_TRANSMITTER_ADDRESS_2 = vm.addr(NOP_TRANSMITTER_PRIVATE_KEY_2); + uint256 internal NOP_TRANSMITTER_PRIVATE_KEY_3 = 0x106; + address internal NOP_TRANSMITTER_ADDRESS_3 = vm.addr(NOP_TRANSMITTER_PRIVATE_KEY_3); + uint256 internal NOP_TRANSMITTER_PRIVATE_KEY_4 = 0x107; + address internal NOP_TRANSMITTER_ADDRESS_4 = vm.addr(NOP_TRANSMITTER_PRIVATE_KEY_4); + + address[] internal s_signers; + address[] internal s_transmitters; + uint8 s_f = 1; + bytes internal s_onchainConfig = new bytes(0); + uint64 internal s_offchainConfigVersion = 1; + bytes internal s_offchainConfig = new bytes(0); + + bytes s_thresholdKey = + vm.parseBytes( + "0x7b2247726f7570223a2250323536222c22475f626172223a22424f2f344358424575792f64547a436a612b614e774d666c2b645a77346d325036533246536b4966472f6633527547327337392b494e79642b4639326a346f586e67433657427561556a752b4a637a32377834484251343d222c2248223a224250532f72485065377941467232416c447a79395549466258776d46384666756632596d514177666e3342373844336f474845643247474536466e616f34552b4c6a4d4d5756792b464f7075686e77554f6a75427a64773d222c22484172726179223a5b22424d75546862414473337768316e67764e56792f6e3841316d42674b5a4b4c475259385937796a39695769337242502f316a32347571695869534531437554384c6f51446a386248466d384345477667517158494e62383d222c224248687974716d6e34314373322f4658416f43737548687151486236382f597930524b2b41354c6647654f645a78466f4e386c442b45656e4b587a544943784f6d3231636d535447364864484a6e336342645663714c673d222c22424d794e7a4534616e596258474d72694f52664c52634e7239766c347878654279316432452f4464335a744630546372386267567435582b2b42355967552b4b7875726e512f4d656b6857335845782b79506e4e4f584d3d222c22424d6a753272375a657a4a45545539413938746a6b6d547966796a79493735345742555835505174724a6578346d6766366130787373426d50325a7472412b55576d504e592b6d4664526b46674f7944694c53614e59453d225d7d" + ); + bytes s_donKey = + vm.parseBytes( + "0xf2f9c47363202d89aa9fa70baf783d70006fe493471ac8cfa82f1426fd09f16a5f6b32b7c4b5d5165cd147a6e513ba4c0efd39d969d6b20a8a21126f0411b9c6" + ); + + function setUp() public virtual override { + FunctionsRouterSetup.setUp(); + + s_signers = new address[](4); + s_signers[0] = NOP_SIGNER_ADDRESS_1; + s_signers[1] = NOP_SIGNER_ADDRESS_2; + s_signers[2] = NOP_SIGNER_ADDRESS_3; + s_signers[3] = NOP_SIGNER_ADDRESS_4; + + s_transmitters = new address[](4); + s_transmitters[0] = NOP_TRANSMITTER_ADDRESS_1; + s_transmitters[1] = NOP_TRANSMITTER_ADDRESS_2; + s_transmitters[2] = NOP_TRANSMITTER_ADDRESS_3; + s_transmitters[3] = NOP_TRANSMITTER_ADDRESS_4; + + // set OCR config + s_functionsCoordinator.setConfig( + s_signers, + s_transmitters, + s_f, + s_onchainConfig, + s_offchainConfigVersion, + s_offchainConfig + ); + } + + function _getTransmitterBalances() internal view returns (uint256[4] memory balances) { + return [ + s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_1), + s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_2), + s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_3), + s_linkToken.balanceOf(NOP_TRANSMITTER_ADDRESS_4) + ]; + } + + function _assertTransmittersAllHaveBalance(uint256[4] memory balances, uint256 expectedBalance) internal { + assertEq(balances[0], expectedBalance); + assertEq(balances[1], expectedBalance); + assertEq(balances[2], expectedBalance); + assertEq(balances[3], expectedBalance); + } +} + +/// @notice Set up to add the Coordinator and ToS Allow Contract as routes on the Router contract +contract FunctionsRoutesSetup is FunctionsDONSetup { + bytes32 s_donId = bytes32("1"); + + function setUp() public virtual override { + FunctionsDONSetup.setUp(); + + bytes32 allowListId = s_functionsRouter.getAllowListId(); + bytes32[] memory proposedContractSetIds = new bytes32[](2); + proposedContractSetIds[0] = s_donId; + proposedContractSetIds[1] = allowListId; + address[] memory proposedContractSetAddresses = new address[](2); + proposedContractSetAddresses[0] = address(s_functionsCoordinator); + proposedContractSetAddresses[1] = address(s_termsOfServiceAllowList); + + s_functionsRouter.proposeContractsUpdate(proposedContractSetIds, proposedContractSetAddresses); + s_functionsRouter.updateContracts(); + } +} + +/// @notice Set up for the OWNER_ADDRESS to accept the Terms of Service +contract FunctionsOwnerAcceptTermsOfServiceSetup is FunctionsRoutesSetup { + function setUp() public virtual override { + FunctionsRoutesSetup.setUp(); + + bytes32 message = s_termsOfServiceAllowList.getMessage(OWNER_ADDRESS, OWNER_ADDRESS); + bytes32 prefixedMessage = keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", message)); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(TOS_SIGNER_PRIVATE_KEY, prefixedMessage); + s_termsOfServiceAllowList.acceptTermsOfService(OWNER_ADDRESS, OWNER_ADDRESS, r, s, v); + } +} + +/// @notice Set up to deploy a consumer contract +contract FunctionsClientSetup is FunctionsOwnerAcceptTermsOfServiceSetup { + FunctionsClientHarness internal s_functionsClient; + + function setUp() public virtual override { + FunctionsOwnerAcceptTermsOfServiceSetup.setUp(); + + s_functionsClient = new FunctionsClientHarness(address(s_functionsRouter)); + } +} + +/// @notice Set up to create a subscription, add the consumer contract as a consumer of the subscription, and fund the subscription with 's_subscriptionInitialFunding' +contract FunctionsSubscriptionSetup is FunctionsClientSetup { + uint64 s_subscriptionId; + uint96 s_subscriptionInitialFunding = 10 * JUELS_PER_PLI; // 10 PLI + + function setUp() public virtual override { + FunctionsClientSetup.setUp(); + + // Create subscription + s_subscriptionId = s_functionsRouter.createSubscription(); + s_functionsRouter.addConsumer(s_subscriptionId, address(s_functionsClient)); + + // Fund subscription + s_linkToken.transferAndCall(address(s_functionsRouter), s_subscriptionInitialFunding, abi.encode(s_subscriptionId)); + } +} + +/// @notice Set up to initate a minimal request and store it in s_requests[1] +contract FunctionsClientRequestSetup is FunctionsSubscriptionSetup { + struct Report { + bytes32[] rs; + bytes32[] ss; + bytes32 vs; + bytes report; + bytes32[3] reportContext; + } + + struct RequestData { + string sourceCode; + bytes secrets; + string[] args; + bytes[] bytesArgs; + uint32 callbackGasLimit; + } + struct Request { + RequestData requestData; + bytes32 requestId; + FunctionsResponse.Commitment commitment; + } + + mapping(uint256 requestNumber => Request) s_requests; + + struct Response { + uint96 totalCostJuels; + } + + mapping(uint256 requestNumber => Response) s_responses; + + uint96 s_fulfillmentRouterOwnerBalance = 0; + uint96 s_fulfillmentCoordinatorBalance = 0; + + function setUp() public virtual override { + FunctionsSubscriptionSetup.setUp(); + + // Send request #1 + string memory sourceCode = "return 'hello world';"; + bytes memory secrets = new bytes(0); + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + uint32 callbackGasLimit = 5500; + _sendAndStoreRequest(1, sourceCode, secrets, args, bytesArgs, callbackGasLimit); + } + + /// @notice Predicts the estimated cost (maximum cost) of a request + /// @dev Meant only for Ethereum, does not add L2 chains' L1 fee + function _getExpectedCostEstimate(uint256 callbackGas) internal view returns (uint96) { + uint256 gasPrice = TX_GASPRICE_START < getCoordinatorConfig().minimumEstimateGasPriceWei + ? getCoordinatorConfig().minimumEstimateGasPriceWei + : TX_GASPRICE_START; + uint256 gasPriceWithOverestimation = gasPrice + + ((gasPrice * getCoordinatorConfig().fulfillmentGasPriceOverEstimationBP) / 10_000); + uint96 juelsPerGas = uint96((1e18 * gasPriceWithOverestimation) / uint256(PLI_ETH_RATE)); + uint96 gasOverheadJuels = juelsPerGas * + ((getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback)); + uint96 callbackGasCostJuels = uint96(juelsPerGas * callbackGas); + return gasOverheadJuels + s_donFee + s_adminFee + callbackGasCostJuels; + } + + /// @notice Predicts the actual cost of a request + /// @dev Meant only for Ethereum, does not add L2 chains' L1 fee + function _getExpectedCost(uint256 gasUsed) internal view returns (uint96) { + uint96 juelsPerGas = uint96((1e18 * TX_GASPRICE_START) / uint256(PLI_ETH_RATE)); + uint96 gasOverheadJuels = juelsPerGas * + (getCoordinatorConfig().gasOverheadBeforeCallback + getCoordinatorConfig().gasOverheadAfterCallback); + uint96 callbackGasCostJuels = uint96(juelsPerGas * gasUsed); + return gasOverheadJuels + s_donFee + s_adminFee + callbackGasCostJuels; + } + + /// @notice Send a request and store information about it in s_requests + /// @param requestNumberKey - the key that the request will be stored in `s_requests` in + /// @param sourceCode - Raw source code for Request.codeLocation of Location.Inline, URL for Request.codeLocation of Location.Remote, or slot decimal number for Request.codeLocation of Location.DONHosted + /// @param secrets - Encrypted URLs for Request.secretsLocation of Location.Remote (use addSecretsReference()), or CBOR encoded slotid+version for Request.secretsLocation of Location.DONHosted (use addDONHostedSecrets()) + /// @param args - String arguments that will be passed into the source code + /// @param bytesArgs - Bytes arguments that will be passed into the source code + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @param client - The consumer contract to send the request from + function _sendAndStoreRequest( + uint256 requestNumberKey, + string memory sourceCode, + bytes memory secrets, + string[] memory args, + bytes[] memory bytesArgs, + uint32 callbackGasLimit, + address client + ) internal { + if (s_requests[requestNumberKey].requestId != bytes32(0)) { + revert("Request already written"); + } + + vm.recordLogs(); + + bytes32 requestId = FunctionsClientHarness(client).sendRequest( + s_donId, + sourceCode, + secrets, + args, + bytesArgs, + s_subscriptionId, + callbackGasLimit + ); + + // Get commitment data from OracleRequest event log + Vm.Log[] memory entries = vm.getRecordedLogs(); + (, , , , , , , FunctionsResponse.Commitment memory commitment) = abi.decode( + entries[0].data, + (address, uint64, address, bytes, uint16, bytes32, uint64, FunctionsResponse.Commitment) + ); + s_requests[requestNumberKey] = Request({ + requestData: RequestData({ + sourceCode: sourceCode, + secrets: secrets, + args: args, + bytesArgs: bytesArgs, + callbackGasLimit: callbackGasLimit + }), + requestId: requestId, + commitment: commitment + }); + } + + /// @notice Send a request and store information about it in s_requests + /// @param requestNumberKey - the key that the request will be stored in `s_requests` in + /// @param sourceCode - Raw source code for Request.codeLocation of Location.Inline, URL for Request.codeLocation of Location.Remote, or slot decimal number for Request.codeLocation of Location.DONHosted + /// @param secrets - Encrypted URLs for Request.secretsLocation of Location.Remote (use addSecretsReference()), or CBOR encoded slotid+version for Request.secretsLocation of Location.DONHosted (use addDONHostedSecrets()) + /// @param args - String arguments that will be passed into the source code + /// @param bytesArgs - Bytes arguments that will be passed into the source code + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @dev @param client - The consumer contract to send the request from (overloaded to fill client with s_functionsClient) + function _sendAndStoreRequest( + uint256 requestNumberKey, + string memory sourceCode, + bytes memory secrets, + string[] memory args, + bytes[] memory bytesArgs, + uint32 callbackGasLimit + ) internal { + _sendAndStoreRequest( + requestNumberKey, + sourceCode, + secrets, + args, + bytesArgs, + callbackGasLimit, + address(s_functionsClient) + ); + } + + function _buildReport( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors + ) internal view returns (bytes memory report, bytes32[3] memory reportContext) { + // Build report + bytes32[] memory _requestIds = new bytes32[](requestNumberKeys.length); + bytes[] memory _results = new bytes[](requestNumberKeys.length); + bytes[] memory _errors = new bytes[](requestNumberKeys.length); + bytes[] memory _onchainMetadata = new bytes[](requestNumberKeys.length); + bytes[] memory _offchainMetadata = new bytes[](requestNumberKeys.length); + for (uint256 i = 0; i < requestNumberKeys.length; ++i) { + if (keccak256(bytes(results[i])) != keccak256(new bytes(0)) && keccak256(errors[i]) != keccak256(new bytes(0))) { + revert("Report can only contain a result OR an error, one must remain empty."); + } + _requestIds[i] = s_requests[requestNumberKeys[i]].requestId; + _results[i] = bytes(results[i]); + _errors[i] = errors[i]; + _onchainMetadata[i] = abi.encode(s_requests[requestNumberKeys[i]].commitment); + _offchainMetadata[i] = new bytes(0); // No off-chain metadata + } + report = abi.encode(_requestIds, _results, _errors, _onchainMetadata, _offchainMetadata); + + // Build report context + uint256 h = uint256( + keccak256( + abi.encode( + block.chainid, + address(s_functionsCoordinator), + 1, + s_signers, + s_transmitters, + s_f, + s_onchainConfig, + s_offchainConfigVersion, + s_offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + bytes32 configDigest = bytes32((prefix & prefixMask) | (h & ~prefixMask)); + reportContext = [configDigest, configDigest, configDigest]; + + return (report, reportContext); + } + + /// @notice Gather signatures on report data + /// @param report - Report bytes generated from `_buildReport` + /// @param reportContext - Report context bytes32 generated from `_buildReport` + /// @param signerPrivateKeys - One or more addresses that will sign the report data + /// @return rawRs - Signature rs + /// @return rawSs - Signature ss + /// @return rawVs - Signature vs + function _signReport( + bytes memory report, + bytes32[3] memory reportContext, + uint256[] memory signerPrivateKeys + ) internal pure returns (bytes32[] memory, bytes32[] memory, bytes32) { + bytes32[] memory rs = new bytes32[](signerPrivateKeys.length); + bytes32[] memory ss = new bytes32[](signerPrivateKeys.length); + bytes memory vs = new bytes(signerPrivateKeys.length); + + bytes32 reportDigest = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + for (uint256 i = 0; i < signerPrivateKeys.length; i++) { + (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKeys[i], reportDigest); + rs[i] = r; + ss[i] = s; + vs[i] = bytes1(v - 27); + } + + return (rs, ss, bytes32(vs)); + } + + function _buildAndSignReport( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors + ) internal view returns (Report memory) { + (bytes memory report, bytes32[3] memory reportContext) = _buildReport(requestNumberKeys, results, errors); + + // Sign the report + // Need at least 3 signers to fulfill minimum number of: (configInfo.n + configInfo.f) / 2 + 1 + uint256[] memory signerPrivateKeys = new uint256[](3); + signerPrivateKeys[0] = NOP_SIGNER_PRIVATE_KEY_1; + signerPrivateKeys[1] = NOP_SIGNER_PRIVATE_KEY_2; + signerPrivateKeys[2] = NOP_SIGNER_PRIVATE_KEY_3; + (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) = _signReport( + report, + reportContext, + signerPrivateKeys + ); + + return Report({report: report, reportContext: reportContext, rs: rawRs, ss: rawSs, vs: rawVs}); + } + + /// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin + /// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report + /// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param transmitter - The address that will send the `.report` transaction + /// @param expectedToSucceed - Boolean representing if the report transmission is expected to produce a RequestProcessed event for every fulfillment. If not, we ignore retrieving the event log. + /// @param requestProcessedStartIndex - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback) + /// @param transmitterGasToUse - Override the default amount of gas that the transmitter sends the `.report` transaction with + function _reportAndStore( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors, + address transmitter, + bool expectedToSucceed, + uint8 requestProcessedStartIndex, + uint256 transmitterGasToUse + ) internal { + { + if (requestNumberKeys.length != results.length || requestNumberKeys.length != errors.length) { + revert("_reportAndStore arguments length mismatch"); + } + } + + Report memory r = _buildAndSignReport(requestNumberKeys, results, errors); + + // Send as transmitter + vm.stopPrank(); + vm.startPrank(transmitter); + + // Send report + vm.recordLogs(); + if (transmitterGasToUse > 0) { + s_functionsCoordinator.transmit{gas: transmitterGasToUse}(r.reportContext, r.report, r.rs, r.ss, r.vs); + } else { + s_functionsCoordinator.transmit(r.reportContext, r.report, r.rs, r.ss, r.vs); + } + + if (expectedToSucceed) { + // Get actual cost from RequestProcessed event log + (uint96 totalCostJuels, , , , , ) = abi.decode( + vm.getRecordedLogs()[requestProcessedStartIndex].data, + (uint96, address, FunctionsResponse.FulfillResult, bytes, bytes, bytes) + ); + // Store response of first request + // TODO: handle multiple requests + s_responses[requestNumberKeys[0]] = Response({totalCostJuels: totalCostJuels}); + // Store profit amounts + s_fulfillmentRouterOwnerBalance += s_adminFee * uint96(requestNumberKeys.length); + // totalCostJuels = costWithoutCallbackJuels + adminFee + callbackGasCostJuels + // TODO: handle multiple requests + s_fulfillmentCoordinatorBalance += totalCostJuels - s_adminFee; + } + + // Return prank to Owner + vm.stopPrank(); + vm.startPrank(OWNER_ADDRESS, OWNER_ADDRESS); + } + + /// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin + /// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report + /// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param transmitter - The address that will send the `.report` transaction + /// @param expectedToSucceed - Boolean representing if the report transmission is expected to produce a RequestProcessed event for every fulfillment. If not, we ignore retrieving the event log. + /// @param requestProcessedIndex - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback) + /// @dev @param transmitterGasToUse is overloaded to give transmitterGasToUse as 0] - Sends the `.report` transaction with the default amount of gas + function _reportAndStore( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors, + address transmitter, + bool expectedToSucceed, + uint8 requestProcessedIndex + ) internal { + _reportAndStore(requestNumberKeys, results, errors, transmitter, expectedToSucceed, requestProcessedIndex, 0); + } + + /// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin + /// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report + /// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param transmitter - The address that will send the `.report` transaction + /// @param expectedToSucceed - Boolean representing if the report transmission is expected to produce a RequestProcessed event for every fulfillment. If not, we ignore retrieving the event log. + /// @dev @param requestProcessedIndex is overloaded to give requestProcessedIndex as 3 (happy path value)] - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback) + /// @dev @param transmitterGasToUse is overloaded to give transmitterGasToUse as 0] - Sends the `.report` transaction with the default amount of gas + function _reportAndStore( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors, + address transmitter, + bool expectedToSucceed + ) internal { + _reportAndStore(requestNumberKeys, results, errors, transmitter, expectedToSucceed, 3); + } + + /// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin + /// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report + /// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param transmitter - The address that will send the `.report` transaction + /// @dev @param expectedToSucceed is overloaded to give the value as true - The report transmission is expected to produce a RequestProcessed event for every fulfillment + /// @dev @param requestProcessedIndex is overloaded to give requestProcessedIndex as 3 (happy path value)] - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback) + /// @dev @param transmitterGasToUse is overloaded to give transmitterGasToUse as 0] - Sends the `.report` transaction with the default amount of gas + function _reportAndStore( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors, + address transmitter + ) internal { + _reportAndStore(requestNumberKeys, results, errors, transmitter, true); + } + + /// @notice Provide a response from the DON to fulfill one or more requests and store the updated balances of the DON & Admin + /// @param requestNumberKeys - One or more requestNumberKeys that were used to store the request in `s_requests` of the requests, that will be added to the report + /// @param results - The result that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @param errors - The error that will be sent to the consumer contract's callback. For each index, e.g. result[index] or errors[index], only one of should be filled. + /// @dev @param transmitter is overloaded to give the value of transmitter #1 - The address that will send the `.report` transaction + /// @dev @param expectedToSucceed is overloaded to give the value as true - The report transmission is expected to produce a RequestProcessed event for every fulfillment + /// @dev @param requestProcessedIndex is overloaded to give requestProcessedIndex as 3 (happy path value)] - On a successful fulfillment the Router will emit a RequestProcessed event. To grab that event we must know the order at which this event was thrown in the report transmission lifecycle. This can change depending on the test setup (e.g. the Client contract gives an extra event during its callback) + /// @dev @param transmitterGasToUse is overloaded to give transmitterGasToUse as 0] - Sends the `.report` transaction with the default amount of gas + function _reportAndStore( + uint256[] memory requestNumberKeys, + string[] memory results, + bytes[] memory errors + ) internal { + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1); + } +} + +/// @notice Set up to have transmitter #1 send a report that fulfills request #1 +contract FunctionsFulfillmentSetup is FunctionsClientRequestSetup { + function setUp() public virtual override { + FunctionsClientRequestSetup.setUp(); + + // Fast forward time by 30 seconds to simulate the DON executing the computation + vm.warp(block.timestamp + 30); + + // Fulfill request 1 + uint256[] memory requestNumberKeys = new uint256[](1); + requestNumberKeys[0] = 1; + string[] memory results = new string[](1); + results[0] = "hello world!"; + bytes[] memory errors = new bytes[](1); + errors[0] = new bytes(0); + + _reportAndStore(requestNumberKeys, results, errors, NOP_TRANSMITTER_ADDRESS_1, true); + } +} + +/// @notice Set up to send and fulfill two more requests, s_request[2] reported by transmitter #2 and s_request[3] reported by transmitter #3 +contract FunctionsMultipleFulfillmentsSetup is FunctionsFulfillmentSetup { + function setUp() public virtual override { + FunctionsFulfillmentSetup.setUp(); + + // Make 2 additional requests (1 already complete) + + // *** Request #2 *** + // Send + string memory sourceCode = "return 'hello world';"; + bytes memory secrets = new bytes(0); + string[] memory args = new string[](0); + bytes[] memory bytesArgs = new bytes[](0); + uint32 callbackGasLimit = 5500; + _sendAndStoreRequest(2, sourceCode, secrets, args, bytesArgs, callbackGasLimit); + // Fulfill as transmitter #2 + uint256[] memory requestNumberKeys1 = new uint256[](1); + requestNumberKeys1[0] = 2; + string[] memory results1 = new string[](1); + results1[0] = "hello world!"; + bytes[] memory errors1 = new bytes[](1); + errors1[0] = new bytes(0); + _reportAndStore(requestNumberKeys1, results1, errors1, NOP_TRANSMITTER_ADDRESS_2, true); + + // *** Request #3 *** + // Send + _sendAndStoreRequest(3, sourceCode, secrets, args, bytesArgs, callbackGasLimit); + // Fulfill as transmitter #3 + uint256[] memory requestNumberKeys2 = new uint256[](1); + requestNumberKeys2[0] = 3; + string[] memory results2 = new string[](1); + results2[0] = "hello world!"; + bytes[] memory errors2 = new bytes[](1); + errors2[0] = new bytes(0); + _reportAndStore(requestNumberKeys2, results2, errors2, NOP_TRANSMITTER_ADDRESS_3, true); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientHarness.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientHarness.sol new file mode 100644 index 00000000..f0cb3965 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientHarness.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsClientUpgradeHelper} from "./FunctionsClientUpgradeHelper.sol"; +import {FunctionsResponse} from "../../../dev/v1_X/libraries/FunctionsResponse.sol"; + +/// @title Functions Client Test Harness +/// @notice Contract to expose internal functions for testing purposes +contract FunctionsClientHarness is FunctionsClientUpgradeHelper { + constructor(address router) FunctionsClientUpgradeHelper(router) {} + + function getRouter_HARNESS() external view returns (address) { + return address(i_functionsRouter); + } + + function sendRequest_HARNESS( + bytes memory data, + uint64 subscriptionId, + uint32 callbackGasLimit, + bytes32 donId + ) external returns (bytes32) { + return super._sendRequest(data, subscriptionId, callbackGasLimit, donId); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientTestHelper.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientTestHelper.sol new file mode 100644 index 00000000..c300f4d2 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientTestHelper.sol @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITermsOfServiceAllowList} from "../../../dev/v1_X/accessControl/interfaces/ITermsOfServiceAllowList.sol"; +import {IFunctionsSubscriptions} from "../../../dev/v1_X/interfaces/IFunctionsSubscriptions.sol"; + +import {FunctionsRequest} from "../../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsClient} from "../../../dev/v1_X/FunctionsClient.sol"; + +contract FunctionsClientTestHelper is FunctionsClient { + using FunctionsRequest for FunctionsRequest.Request; + + event SendRequestInvoked(bytes32 requestId, string sourceCode, uint64 subscriptionId); + event FulfillRequestInvoked(bytes32 requestId, bytes response, bytes err); + + bool private s_revertFulfillRequest; + string private s_revertFulfillRequestMessage = "asked to revert"; + bool private s_doInvalidOperation; + bool private s_doInvalidReentrantOperation; + bool private s_doValidReentrantOperation; + + uint64 private s_subscriptionId; + bytes32 private s_donId; + + constructor(address router) FunctionsClient(router) {} + + function sendRequest( + bytes32 donId, + string calldata source, + bytes calldata secrets, + string[] calldata args, + bytes[] memory bytesArgs, + uint64 subscriptionId, + uint32 callbackGasLimit + ) public returns (bytes32 requestId) { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + if (secrets.length > 0) req._addSecretsReference(secrets); + if (args.length > 0) req._setArgs(args); + if (bytesArgs.length > 0) req._setBytesArgs(bytesArgs); + + return _sendRequest(FunctionsRequest._encodeCBOR(req), subscriptionId, callbackGasLimit, donId); + } + + function sendSimpleRequestWithJavaScript( + string memory sourceCode, + uint64 subscriptionId, + bytes32 donId, + uint32 callbackGasLimit + ) public returns (bytes32 requestId) { + FunctionsRequest.Request memory request; + request._initializeRequestForInlineJavaScript(sourceCode); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + requestId = _sendRequest(requestData, subscriptionId, callbackGasLimit, donId); + emit SendRequestInvoked(requestId, sourceCode, subscriptionId); + } + + function sendRequestProposed( + string memory sourceCode, + uint64 subscriptionId, + bytes32 donId + ) public returns (bytes32 requestId) { + FunctionsRequest.Request memory request; + uint32 callbackGasLimit = 20_000; + request._initializeRequestForInlineJavaScript(sourceCode); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + requestId = i_functionsRouter.sendRequestToProposed( + subscriptionId, + requestData, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + donId + ); + emit RequestSent(requestId); + emit SendRequestInvoked(requestId, sourceCode, subscriptionId); + } + + function acceptTermsOfService(address acceptor, address recipient, bytes32 r, bytes32 s, uint8 v) external { + bytes32 allowListId = i_functionsRouter.getAllowListId(); + ITermsOfServiceAllowList allowList = ITermsOfServiceAllowList(i_functionsRouter.getContractById(allowListId)); + allowList.acceptTermsOfService(acceptor, recipient, r, s, v); + } + + function acceptSubscriptionOwnerTransfer(uint64 subscriptionId) external { + IFunctionsSubscriptions(address(i_functionsRouter)).acceptSubscriptionOwnerTransfer(subscriptionId); + } + + function _fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal override { + if (s_revertFulfillRequest) { + revert(s_revertFulfillRequestMessage); + } + if (s_doInvalidOperation) { + uint256 x = 1; + uint256 y = 0; + x = x / y; + } + if (s_doValidReentrantOperation) { + sendSimpleRequestWithJavaScript("somedata", s_subscriptionId, s_donId, 20_000); + } + if (s_doInvalidReentrantOperation) { + IFunctionsSubscriptions(address(i_functionsRouter)).cancelSubscription(s_subscriptionId, msg.sender); + } + emit FulfillRequestInvoked(requestId, response, err); + } + + function setRevertFulfillRequest(bool on) external { + s_revertFulfillRequest = on; + } + + function setRevertFulfillRequestMessage(string memory message) external { + s_revertFulfillRequestMessage = message; + } + + function setDoInvalidOperation(bool on) external { + s_doInvalidOperation = on; + } + + function setDoInvalidReentrantOperation(bool on, uint64 subscriptionId) external { + s_doInvalidReentrantOperation = on; + s_subscriptionId = subscriptionId; + } + + function setDoValidReentrantOperation(bool on, uint64 subscriptionId, bytes32 donId) external { + s_doValidReentrantOperation = on; + s_subscriptionId = subscriptionId; + s_donId = donId; + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientUpgradeHelper.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientUpgradeHelper.sol new file mode 100644 index 00000000..0c9b3c58 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientUpgradeHelper.sol @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRequest} from "../../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsClient} from "../../../dev/v1_X/FunctionsClient.sol"; +import {ConfirmedOwner} from "../../../../shared/access/ConfirmedOwner.sol"; + +contract FunctionsClientUpgradeHelper is FunctionsClient, ConfirmedOwner { + using FunctionsRequest for FunctionsRequest.Request; + + constructor(address router) FunctionsClient(router) ConfirmedOwner(msg.sender) {} + + event ResponseReceived(bytes32 indexed requestId, bytes result, bytes err); + + /** + * @notice Send a simple request + * + * @param donId DON ID + * @param source JavaScript source code + * @param secrets Encrypted secrets payload + * @param args List of arguments accessible from within the source code + * @param subscriptionId Funtions billing subscription ID + * @param callbackGasLimit Maximum amount of gas used to call the client contract's `handleOracleFulfillment` function + * @return Functions request ID + */ + function sendRequest( + bytes32 donId, + string calldata source, + bytes calldata secrets, + string[] calldata args, + bytes[] memory bytesArgs, + uint64 subscriptionId, + uint32 callbackGasLimit + ) public onlyOwner returns (bytes32) { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + if (secrets.length > 0) req._addSecretsReference(secrets); + if (args.length > 0) req._setArgs(args); + if (bytesArgs.length > 0) req._setBytesArgs(bytesArgs); + + return _sendRequest(FunctionsRequest._encodeCBOR(req), subscriptionId, callbackGasLimit, donId); + } + + function sendRequestBytes( + bytes memory data, + uint64 subscriptionId, + uint32 callbackGasLimit, + bytes32 donId + ) public returns (bytes32 requestId) { + return _sendRequest(data, subscriptionId, callbackGasLimit, donId); + } + + /** + * @notice Same as sendRequest but for DONHosted secrets + */ + function sendRequestWithDONHostedSecrets( + bytes32 donId, + string calldata source, + uint8 slotId, + uint64 slotVersion, + string[] calldata args, + uint64 subscriptionId, + uint32 callbackGasLimit + ) public onlyOwner returns (bytes32) { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + req._addDONHostedSecrets(slotId, slotVersion); + + if (args.length > 0) req._setArgs(args); + + return _sendRequest(FunctionsRequest._encodeCBOR(req), subscriptionId, callbackGasLimit, donId); + } + + // @notice Sends a Plugin Functions request + // @param data The CBOR encoded bytes data for a Functions request + // @param subscriptionId The subscription ID that will be charged to service the request + // @param callbackGasLimit the amount of gas that will be available for the fulfillment callback + // @return requestId The generated request ID for this request + function _sendRequestToProposed( + bytes memory data, + uint64 subscriptionId, + uint32 callbackGasLimit, + bytes32 donId + ) internal returns (bytes32) { + bytes32 requestId = i_functionsRouter.sendRequestToProposed( + subscriptionId, + data, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + donId + ); + emit RequestSent(requestId); + return requestId; + } + + /** + * @notice Send a simple request to the proposed contract + * + * @param donId DON ID + * @param source JavaScript source code + * @param secrets Encrypted secrets payload + * @param args List of arguments accessible from within the source code + * @param subscriptionId Funtions billing subscription ID + * @param callbackGasLimit Maximum amount of gas used to call the client contract's `handleOracleFulfillment` function + * @return Functions request ID + */ + function sendRequestToProposed( + bytes32 donId, + string calldata source, + bytes calldata secrets, + string[] calldata args, + bytes[] memory bytesArgs, + uint64 subscriptionId, + uint32 callbackGasLimit + ) public onlyOwner returns (bytes32) { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + if (secrets.length > 0) req._addSecretsReference(secrets); + if (args.length > 0) req._setArgs(args); + if (bytesArgs.length > 0) req._setBytesArgs(bytesArgs); + + return _sendRequestToProposed(FunctionsRequest._encodeCBOR(req), subscriptionId, callbackGasLimit, donId); + } + + /** + * @notice Same as sendRequestToProposed but for DONHosted secrets + */ + function sendRequestToProposedWithDONHostedSecrets( + bytes32 donId, + string calldata source, + uint8 slotId, + uint64 slotVersion, + string[] calldata args, + uint64 subscriptionId, + uint32 callbackGasLimit + ) public onlyOwner returns (bytes32) { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + req._addDONHostedSecrets(slotId, slotVersion); + + if (args.length > 0) req._setArgs(args); + + return _sendRequestToProposed(FunctionsRequest._encodeCBOR(req), subscriptionId, callbackGasLimit, donId); + } + + /** + * @notice Callback that is invoked once the DON has resolved the request or hit an error + * + * @param requestId The request ID, returned by sendRequest() + * @param response Aggregated response from the user code + * @param err Aggregated error from the user code or from the execution pipeline + * Either response or error parameter will be set, but never both + */ + function _fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal override { + emit ResponseReceived(requestId, response, err); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientWithEmptyCallback.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientWithEmptyCallback.sol new file mode 100644 index 00000000..e5674717 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientWithEmptyCallback.sol @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRequest} from "../../../dev/v1_X/libraries/FunctionsRequest.sol"; +import {FunctionsClient} from "../../../dev/v1_X/FunctionsClient.sol"; + +contract FunctionsClientWithEmptyCallback is FunctionsClient { + using FunctionsRequest for FunctionsRequest.Request; + + event SendRequestInvoked(bytes32 requestId, string sourceCode, uint64 subscriptionId); + event FulfillRequestInvoked(bytes32 requestId, bytes response, bytes err); + + constructor(address router) FunctionsClient(router) {} + + function sendSimpleRequestWithJavaScript( + string memory sourceCode, + uint64 subscriptionId, + bytes32 donId, + uint32 callbackGasLimit + ) public returns (bytes32 requestId) { + FunctionsRequest.Request memory request; + request._initializeRequestForInlineJavaScript(sourceCode); + bytes memory requestData = FunctionsRequest._encodeCBOR(request); + requestId = _sendRequest(requestData, subscriptionId, callbackGasLimit, donId); + emit SendRequestInvoked(requestId, sourceCode, subscriptionId); + } + + function _fulfillRequest(bytes32 /*requestId*/, bytes memory /*response*/, bytes memory /*err*/) internal override { + // Do nothing + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol new file mode 100644 index 00000000..e4e9f727 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorHarness.sol @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsCoordinator} from "../../../dev/v1_X/FunctionsCoordinator.sol"; +import {FunctionsBilling} from "../../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsResponse} from "../../../dev/v1_X/libraries/FunctionsResponse.sol"; +import {FunctionsBillingConfig} from "../../../dev/v1_X/interfaces/IFunctionsBilling.sol"; + +/// @title Functions Coordinator Test Harness +/// @notice Contract to expose internal functions for testing purposes +contract FunctionsCoordinatorHarness is FunctionsCoordinator { + address s_linkToNativeFeed_HARNESS; + address s_router_HARNESS; + + constructor( + address router, + FunctionsBillingConfig memory config, + address linkToNativeFeed + ) FunctionsCoordinator(router, config, linkToNativeFeed) { + s_linkToNativeFeed_HARNESS = linkToNativeFeed; + s_router_HARNESS = router; + } + + function isTransmitter_HARNESS(address node) external view returns (bool) { + return super._isTransmitter(node); + } + + function beforeSetConfig_HARNESS(uint8 _f, bytes memory _onchainConfig) external { + return super._beforeSetConfig(_f, _onchainConfig); + } + + /// @dev Used by FunctionsBilling.sol + function getTransmitters_HARNESS() external view returns (address[] memory) { + return super._getTransmitters(); + } + + function report_HARNESS(DecodedReport memory decodedReport) external { + return super._report(decodedReport); + } + + function onlyOwner_HARNESS() external view { + return super._onlyOwner(); + } + + // ================================================================ + // | Functions Billing | + // ================================================================ + + function getLinkToNativeFeed_HARNESS() external view returns (address) { + return s_linkToNativeFeed_HARNESS; + } + + function getRouter_HARNESS() external view returns (address) { + return s_router_HARNESS; + } + + function calculateCostEstimate_HARNESS( + uint32 callbackGasLimit, + uint256 gasPriceWei, + uint72 donFee, + uint72 adminFee + ) external view returns (uint96) { + return super._calculateCostEstimate(callbackGasLimit, gasPriceWei, donFee, adminFee); + } + + function startBilling_HARNESS( + FunctionsResponse.RequestMeta memory request + ) external returns (FunctionsResponse.Commitment memory commitment) { + return super._startBilling(request); + } + + function fulfillAndBill_HARNESS( + bytes32 requestId, + bytes memory response, + bytes memory err, + bytes memory onchainMetadata, + bytes memory offchainMetadata, + uint8 reportBatchSize + ) external returns (FunctionsResponse.FulfillResult) { + return super._fulfillAndBill(requestId, response, err, onchainMetadata, offchainMetadata, reportBatchSize); + } + + function disperseFeePool_HARNESS() external { + return super._disperseFeePool(); + } + + // ================================================================ + // | OCR2 | + // ================================================================ + + function configDigestFromConfigData_HARNESS( + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) internal pure returns (bytes32) { + return + super._configDigestFromConfigData( + _chainId, + _contractAddress, + _configCount, + _signers, + _transmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorTestHelper.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorTestHelper.sol new file mode 100644 index 00000000..abfec4c2 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorTestHelper.sol @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsCoordinator} from "../../../dev/v1_X/FunctionsCoordinator.sol"; +import {FunctionsBilling} from "../../../dev/v1_X/FunctionsBilling.sol"; +import {FunctionsBillingConfig} from "../../../dev/v1_X/interfaces/IFunctionsBilling.sol"; + +contract FunctionsCoordinatorTestHelper is FunctionsCoordinator { + constructor( + address router, + FunctionsBillingConfig memory config, + address linkToNativeFeed + ) FunctionsCoordinator(router, config, linkToNativeFeed) {} + + function callReport(bytes calldata report) external { + address[MAX_NUM_ORACLES] memory signers; + signers[0] = msg.sender; + ( + bytes32[] memory requestIds, + bytes[] memory results, + bytes[] memory errors, + bytes[] memory onchainMetadata, + bytes[] memory offchainMetadata + ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[])); + _report( + DecodedReport({ + requestIds: requestIds, + results: results, + errors: errors, + onchainMetadata: onchainMetadata, + offchainMetadata: offchainMetadata + }) + ); + } + + function callReportMultipleSigners(bytes calldata report, address secondSigner) external { + address[MAX_NUM_ORACLES] memory signers; + signers[0] = msg.sender; + signers[1] = secondSigner; + ( + bytes32[] memory requestIds, + bytes[] memory results, + bytes[] memory errors, + bytes[] memory onchainMetadata, + bytes[] memory offchainMetadata + ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[])); + _report( + DecodedReport({ + requestIds: requestIds, + results: results, + errors: errors, + onchainMetadata: onchainMetadata, + offchainMetadata: offchainMetadata + }) + ); + } + + function callReportWithSigners(bytes calldata report, address[MAX_NUM_ORACLES] memory /* signers */) external { + ( + bytes32[] memory requestIds, + bytes[] memory results, + bytes[] memory errors, + bytes[] memory onchainMetadata, + bytes[] memory offchainMetadata + ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[])); + _report( + DecodedReport({ + requestIds: requestIds, + results: results, + errors: errors, + onchainMetadata: onchainMetadata, + offchainMetadata: offchainMetadata + }) + ); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsLoadTestClient.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsLoadTestClient.sol new file mode 100644 index 00000000..4b112c4e --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsLoadTestClient.sol @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsClient} from "../../../dev/v1_X/FunctionsClient.sol"; +import {ConfirmedOwner} from "../../../../shared/access/ConfirmedOwner.sol"; +import {FunctionsRequest} from "../../../dev/v1_X/libraries/FunctionsRequest.sol"; + +/** + * @title Plugin Functions load test client implementation + */ +contract FunctionsLoadTestClient is FunctionsClient, ConfirmedOwner { + using FunctionsRequest for FunctionsRequest.Request; + + uint32 public constant MAX_CALLBACK_GAS = 250_000; + + bytes32 public lastRequestID; + bytes public lastResponse; + bytes public lastError; + uint32 public totalRequests; + uint32 public totalEmptyResponses; + uint32 public totalSucceededResponses; + uint32 public totalFailedResponses; + + constructor(address router) FunctionsClient(router) ConfirmedOwner(msg.sender) {} + + /** + * @notice Send a simple request + * @param times Number of times to send the request + * @param source JavaScript source code + * @param encryptedSecretsReferences Encrypted secrets payload + * @param args List of arguments accessible from within the source code + * @param subscriptionId Billing ID + * @param donId DON ID + */ + function sendRequest( + uint32 times, + string calldata source, + bytes calldata encryptedSecretsReferences, + string[] calldata args, + uint64 subscriptionId, + bytes32 donId + ) external onlyOwner { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + if (encryptedSecretsReferences.length > 0) req._addSecretsReference(encryptedSecretsReferences); + if (args.length > 0) req._setArgs(args); + uint i = 0; + for (i = 0; i < times; i++) { + lastRequestID = _sendRequest(req._encodeCBOR(), subscriptionId, MAX_CALLBACK_GAS, donId); + totalRequests += 1; + } + } + + /** + * @notice Same as sendRequest but for DONHosted secrets + * @param times Number of times to send the request + * @param source JavaScript source code + * @param slotId DON hosted secrets slot ID + * @param slotVersion DON hosted secrets slot version + * @param args List of arguments accessible from within the source code + * @param subscriptionId Billing ID + * @param donId DON ID + */ + function sendRequestWithDONHostedSecrets( + uint32 times, + string calldata source, + uint8 slotId, + uint64 slotVersion, + string[] calldata args, + uint64 subscriptionId, + bytes32 donId + ) public onlyOwner { + FunctionsRequest.Request memory req; + req._initializeRequestForInlineJavaScript(source); + req._addDONHostedSecrets(slotId, slotVersion); + if (args.length > 0) req._setArgs(args); + uint i = 0; + for (i = 0; i < times; i++) { + lastRequestID = _sendRequest(req._encodeCBOR(), subscriptionId, MAX_CALLBACK_GAS, donId); + totalRequests += 1; + } + } + + /** + * @notice Sends a Plugin Functions request that has already been CBOR encoded + * @param times Number of times to send the request + * @param cborEncodedRequest The CBOR encoded bytes data for a Functions request + * @param subscriptionId The subscription ID that will be charged to service the request + * @param donId DON ID + */ + function sendEncodedRequest( + uint32 times, + bytes memory cborEncodedRequest, + uint64 subscriptionId, + bytes32 donId + ) public onlyOwner { + uint i = 0; + for (i = 0; i < times; i++) { + lastRequestID = _sendRequest(cborEncodedRequest, subscriptionId, MAX_CALLBACK_GAS, donId); + totalRequests += 1; + } + } + + function resetStats() external onlyOwner { + lastRequestID = ""; + lastResponse = ""; + lastError = ""; + totalRequests = 0; + totalSucceededResponses = 0; + totalFailedResponses = 0; + totalEmptyResponses = 0; + } + + function getStats() + public + view + onlyOwner + returns (bytes32, bytes memory, bytes memory, uint32, uint32, uint32, uint32) + { + return ( + lastRequestID, + lastResponse, + lastError, + totalRequests, + totalSucceededResponses, + totalFailedResponses, + totalEmptyResponses + ); + } + + /** + * @notice Store latest result/error + * @param requestId The request ID, returned by sendRequest() + * @param response Aggregated response from the user code + * @param err Aggregated error from the user code or from the execution pipeline + * Either response or error parameter will be set, but never both + */ + function _fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal override { + lastRequestID = requestId; + lastResponse = response; + lastError = err; + if (response.length == 0) { + totalEmptyResponses += 1; + } + if (err.length != 0) { + totalFailedResponses += 1; + } + if (response.length != 0 && err.length == 0) { + totalSucceededResponses += 1; + } + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsRouterHarness.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsRouterHarness.sol new file mode 100644 index 00000000..7caeff49 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsRouterHarness.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRouter} from "../../../dev/v1_X/FunctionsRouter.sol"; + +/// @title Functions Router Test Harness +/// @notice Contract to expose internal functions for testing purposes +contract FunctionsRouterHarness is FunctionsRouter { + constructor(address linkToken, Config memory config) FunctionsRouter(linkToken, config) {} + + function getMaxConsumers_HARNESS() external view returns (uint16) { + return super._getMaxConsumers(); + } + + function getSubscriptionDepositDetails_HARNESS() external view returns (uint16, uint72) { + return super._getSubscriptionDepositDetails(); + } + + function whenNotPaused_HARNESS() external view { + return super._whenNotPaused(); + } + + function onlyRouterOwner_HARNESS() external view { + return super._onlyRouterOwner(); + } + + function onlySenderThatAcceptedToS_HARNESS() external view { + return super._onlySenderThatAcceptedToS(); + } +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsSubscriptionsHarness.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsSubscriptionsHarness.sol new file mode 100644 index 00000000..2e2427f6 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsSubscriptionsHarness.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsSubscriptions} from "../../../dev/v1_X/FunctionsSubscriptions.sol"; + +/// @title Functions Subscriptions Test Harness +/// @notice Contract to expose internal functions for testing purposes +contract FunctionsSubscriptionsHarness is FunctionsSubscriptions { + constructor(address link) FunctionsSubscriptions(link) {} + + function markRequestInFlight_HARNESS(address client, uint64 subscriptionId, uint96 estimatedTotalCostJuels) external { + return super._markRequestInFlight(client, subscriptionId, estimatedTotalCostJuels); + } + + function pay_HARNESS( + uint64 subscriptionId, + uint96 estimatedTotalCostJuels, + address client, + uint96 adminFee, + uint96 juelsPerGas, + uint96 gasUsed, + uint96 costWithoutCallbackJuels + ) external returns (Receipt memory) { + return + super._pay( + subscriptionId, + estimatedTotalCostJuels, + client, + adminFee, + juelsPerGas, + gasUsed, + costWithoutCallbackJuels + ); + } + + function isExistingSubscription_HARNESS(uint64 subscriptionId) external view { + return super._isExistingSubscription(subscriptionId); + } + + function isAllowedConsumer_HARNESS(address client, uint64 subscriptionId) external view { + return super._isAllowedConsumer(client, subscriptionId); + } + + // Overrides + function _getMaxConsumers() internal view override returns (uint16) {} + + function _getSubscriptionDepositDetails() internal override returns (uint16, uint72) {} + + function _onlySenderThatAcceptedToS() internal override {} + + function _onlyRouterOwner() internal override {} + + function _whenNotPaused() internal override {} +} diff --git a/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsTestHelper.sol b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsTestHelper.sol new file mode 100644 index 00000000..50e90c44 --- /dev/null +++ b/contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsTestHelper.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsRequest} from "../../../dev/v1_X/libraries/FunctionsRequest.sol"; + +contract FunctionsTestHelper { + using FunctionsRequest for FunctionsRequest.Request; + + FunctionsRequest.Request private s_req; + + event RequestData(bytes data); + + function closeEvent() public { + emit RequestData(s_req._encodeCBOR()); + } + + function initializeRequestForInlineJavaScript(string memory sourceCode) public { + FunctionsRequest.Request memory r; + r._initializeRequestForInlineJavaScript(sourceCode); + storeRequest(r); + } + + function addSecretsReference(bytes memory secrets) public { + FunctionsRequest.Request memory r = s_req; + r._addSecretsReference(secrets); + storeRequest(r); + } + + function addEmptyArgs() public pure { + FunctionsRequest.Request memory r; + string[] memory args; + r._setArgs(args); + } + + function addTwoArgs(string memory arg1, string memory arg2) public { + string[] memory args = new string[](2); + args[0] = arg1; + args[1] = arg2; + FunctionsRequest.Request memory r = s_req; + r._setArgs(args); + storeRequest(r); + } + + function storeRequest(FunctionsRequest.Request memory r) private { + s_req.codeLocation = r.codeLocation; + s_req.language = r.language; + s_req.source = r.source; + s_req.args = r.args; + s_req.secretsLocation = r.secretsLocation; + s_req.encryptedSecretsReference = r.encryptedSecretsReference; + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol new file mode 100644 index 00000000..205a5550 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsBilling.sol @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {IFunctionsBilling} from "./interfaces/IFunctionsBilling.sol"; + +import {Routable} from "./Routable.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; + +/// @title Functions Billing contract +/// @notice Contract that calculates payment from users to the nodes of the Decentralized Oracle Network (DON). +abstract contract FunctionsBilling is Routable, IFunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + uint256 private constant REASONABLE_GAS_PRICE_CEILING = 1_000_000_000_000_000; // 1 million gwei + // ================================================================ + // | Request Commitment state | + // ================================================================ + + mapping(bytes32 requestId => bytes32 commitmentHash) private s_requestCommitments; + + event CommitmentDeleted(bytes32 requestId); + + // ================================================================ + // | Configuration state | + // ================================================================ + + struct Config { + uint32 fulfillmentGasPriceOverEstimationBP; // ══╗ Percentage of gas price overestimation to account for changes in gas price between request and response. Held as basis points (one hundredth of 1 percentage point) + uint32 feedStalenessSeconds; // ║ How long before we consider the feed price to be stale and fallback to fallbackNativePerUnitLink. + uint32 gasOverheadBeforeCallback; // ║ Represents the average gas execution cost before the fulfillment callback. This amount is always billed for every request. + uint32 gasOverheadAfterCallback; // ║ Represents the average gas execution cost after the fulfillment callback. This amount is always billed for every request. + uint32 requestTimeoutSeconds; // ║ How many seconds it takes before we consider a request to be timed out + uint72 donFee; // ║ Additional flat fee (in Juels of PLI) that will be split between Node Operators. Max value is 2^80 - 1 == 1.2m PLI. + uint16 maxSupportedRequestDataVersion; // ═══════╝ The highest support request data version supported by the node. All lower versions should also be supported. + uint224 fallbackNativePerUnitLink; // ═══════════╸ fallback NATIVE CURRENCY / PLI conversion rate if the data feed is stale + } + + Config private s_config; + + event ConfigUpdated(Config config); + + error UnsupportedRequestDataVersion(); + error InsufficientBalance(); + error InvalidSubscription(); + error UnauthorizedSender(); + error MustBeSubOwner(address owner); + error InvalidLinkWeiPrice(int256 linkWei); + error PaymentTooLarge(); + error NoTransmittersSet(); + error InvalidCalldata(); + + // ================================================================ + // | Balance state | + // ================================================================ + + mapping(address transmitter => uint96 balanceJuelsLink) private s_withdrawableTokens; + // Pool together collected DON fees + // Disperse them on withdrawal or change in OCR configuration + uint96 internal s_feePool; + + AggregatorV3Interface private s_linkToNativeFeed; + + // ================================================================ + // | Initialization | + // ================================================================ + constructor(address router, Config memory config, address linkToNativeFeed) Routable(router) { + s_linkToNativeFeed = AggregatorV3Interface(linkToNativeFeed); + + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice Gets the Plugin Coordinator's billing configuration + /// @return config + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice Sets the Plugin Coordinator's billing configuration + /// @param config - See the contents of the Config struct in IFunctionsBilling.Config for more information + function updateConfig(Config memory config) public { + _onlyOwner(); + + s_config = config; + emit ConfigUpdated(config); + } + + // ================================================================ + // | Fee Calculation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function getDONFee(bytes memory /* requestData */) public view override returns (uint72) { + return s_config.donFee; + } + + /// @inheritdoc IFunctionsBilling + function getAdminFee() public view override returns (uint72) { + return _getRouter().getAdminFee(); + } + + /// @inheritdoc IFunctionsBilling + function getWeiPerUnitLink() public view returns (uint256) { + Config memory config = s_config; + (, int256 weiPerUnitLink, , uint256 timestamp, ) = s_linkToNativeFeed.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (config.feedStalenessSeconds < block.timestamp - timestamp && config.feedStalenessSeconds > 0) { + return config.fallbackNativePerUnitLink; + } + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + return uint256(weiPerUnitLink); + } + + function _getJuelsPerGas(uint256 gasPriceWei) private view returns (uint96) { + // (1e18 juels/link) * (wei/gas) / (wei/link) = juels per gas + // There are only 1e9*1e18 = 1e27 juels in existence, should not exceed uint96 (2^96 ~ 7e28) + return SafeCast.toUint96((1e18 * gasPriceWei) / getWeiPerUnitLink()); + } + + // ================================================================ + // | Cost Estimation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function estimateCost( + uint64 subscriptionId, + bytes calldata data, + uint32 callbackGasLimit, + uint256 gasPriceWei + ) external view override returns (uint96) { + _getRouter().isValidCallbackGasLimit(subscriptionId, callbackGasLimit); + // Reasonable ceilings to prevent integer overflows + if (gasPriceWei > REASONABLE_GAS_PRICE_CEILING) { + revert InvalidCalldata(); + } + uint72 adminFee = getAdminFee(); + uint72 donFee = getDONFee(data); + return _calculateCostEstimate(callbackGasLimit, gasPriceWei, donFee, adminFee); + } + + /// @notice Estimate the cost in Juels of PLI + // that will be charged to a subscription to fulfill a Functions request + // Gas Price can be overestimated to account for flucuations between request and response time + function _calculateCostEstimate( + uint32 callbackGasLimit, + uint256 gasPriceWei, + uint72 donFee, + uint72 adminFee + ) internal view returns (uint96) { + uint256 executionGas = s_config.gasOverheadBeforeCallback + s_config.gasOverheadAfterCallback + callbackGasLimit; + + uint256 gasPriceWithOverestimation = gasPriceWei + + ((gasPriceWei * s_config.fulfillmentGasPriceOverEstimationBP) / 10_000); + /// @NOTE: Basis Points are 1/100th of 1%, divide by 10_000 to bring back to original units + + uint96 juelsPerGas = _getJuelsPerGas(gasPriceWithOverestimation); + uint256 estimatedGasReimbursement = juelsPerGas * executionGas; + uint96 fees = uint96(donFee) + uint96(adminFee); + + return SafeCast.toUint96(estimatedGasReimbursement + fees); + } + + // ================================================================ + // | Billing | + // ================================================================ + + /// @notice Initiate the billing process for an Functions request + /// @dev Only callable by the Functions Router + /// @param request - Plugin Functions request data, see FunctionsResponse.RequestMeta for the structure + /// @return commitment - The parameters of the request that must be held consistent at response time + function _startBilling( + FunctionsResponse.RequestMeta memory request + ) internal returns (FunctionsResponse.Commitment memory commitment) { + Config memory config = s_config; + + // Nodes should support all past versions of the structure + if (request.dataVersion > config.maxSupportedRequestDataVersion) { + revert UnsupportedRequestDataVersion(); + } + + uint72 donFee = getDONFee(request.data); + uint96 estimatedTotalCostJuels = _calculateCostEstimate( + request.callbackGasLimit, + tx.gasprice, + donFee, + request.adminFee + ); + + // Check that subscription can afford the estimated cost + if ((request.availableBalance) < estimatedTotalCostJuels) { + revert InsufficientBalance(); + } + + bytes32 requestId = _computeRequestId( + address(this), + request.requestingContract, + request.subscriptionId, + request.initiatedRequests + 1 + ); + + commitment = FunctionsResponse.Commitment({ + adminFee: request.adminFee, + coordinator: address(this), + client: request.requestingContract, + subscriptionId: request.subscriptionId, + callbackGasLimit: request.callbackGasLimit, + estimatedTotalCostJuels: estimatedTotalCostJuels, + timeoutTimestamp: uint32(block.timestamp + config.requestTimeoutSeconds), + requestId: requestId, + donFee: donFee, + gasOverheadBeforeCallback: config.gasOverheadBeforeCallback, + gasOverheadAfterCallback: config.gasOverheadAfterCallback + }); + + s_requestCommitments[requestId] = keccak256(abi.encode(commitment)); + + return commitment; + } + + /// @notice Generate a keccak hash request ID + /// @dev uses the number of requests that the consumer of a subscription has sent as a nonce + function _computeRequestId( + address don, + address client, + uint64 subscriptionId, + uint64 nonce + ) private pure returns (bytes32) { + return keccak256(abi.encode(don, client, subscriptionId, nonce)); + } + + /// @notice Finalize billing process for an Functions request by sending a callback to the Client contract and then charging the subscription + /// @param requestId identifier for the request that was generated by the Registry in the beginBilling commitment + /// @param response response data from DON consensus + /// @param err error from DON consensus + /// @return result fulfillment result + /// @dev Only callable by a node that has been approved on the Coordinator + /// @dev simulated offchain to determine if sufficient balance is present to fulfill the request + function _fulfillAndBill( + bytes32 requestId, + bytes memory response, + bytes memory err, + bytes memory onchainMetadata, + bytes memory /* offchainMetadata TODO: use in getDonFee() for dynamic billing */ + ) internal returns (FunctionsResponse.FulfillResult) { + FunctionsResponse.Commitment memory commitment = abi.decode(onchainMetadata, (FunctionsResponse.Commitment)); + + if (s_requestCommitments[requestId] == bytes32(0)) { + return FunctionsResponse.FulfillResult.INVALID_REQUEST_ID; + } + + if (s_requestCommitments[requestId] != keccak256(abi.encode(commitment))) { + return FunctionsResponse.FulfillResult.INVALID_COMMITMENT; + } + + uint96 juelsPerGas = _getJuelsPerGas(tx.gasprice); + // Gas overhead without callback + uint96 gasOverheadJuels = juelsPerGas * + (commitment.gasOverheadBeforeCallback + commitment.gasOverheadAfterCallback); + + // The Functions Router will perform the callback to the client contract + (FunctionsResponse.FulfillResult resultCode, uint96 callbackCostJuels) = _getRouter().fulfill( + response, + err, + juelsPerGas, + gasOverheadJuels + commitment.donFee, // costWithoutFulfillment + msg.sender, + commitment + ); + + // The router will only pay the DON on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the Coordinator should hold on to the request commitment + if ( + resultCode == FunctionsResponse.FulfillResult.FULFILLED || + resultCode == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + delete s_requestCommitments[requestId]; + // Reimburse the transmitter for the fulfillment gas cost + s_withdrawableTokens[msg.sender] = gasOverheadJuels + callbackCostJuels; + // Put donFee into the pool of fees, to be split later + // Saves on storage writes that would otherwise be charged to the user + s_feePool += commitment.donFee; + } + + return resultCode; + } + + // ================================================================ + // | Request Timeout | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Router + /// @dev Used by FunctionsRouter.sol during timeout of a request + function deleteCommitment(bytes32 requestId) external override onlyRouter { + // Delete commitment + delete s_requestCommitments[requestId]; + emit CommitmentDeleted(requestId); + } + + // ================================================================ + // | Fund withdrawal | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function oracleWithdraw(address recipient, uint96 amount) external { + _disperseFeePool(); + + if (amount == 0) { + amount = s_withdrawableTokens[msg.sender]; + } else if (s_withdrawableTokens[msg.sender] < amount) { + revert InsufficientBalance(); + } + s_withdrawableTokens[msg.sender] -= amount; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(recipient, amount); + } + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Coordinator owner + function oracleWithdrawAll() external { + _onlyOwner(); + _disperseFeePool(); + + address[] memory transmitters = _getTransmitters(); + + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < transmitters.length; ++i) { + uint96 balance = s_withdrawableTokens[transmitters[i]]; + if (balance > 0) { + s_withdrawableTokens[transmitters[i]] = 0; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(transmitters[i], balance); + } + } + } + + // Overriden in FunctionsCoordinator, which has visibility into transmitters + function _getTransmitters() internal view virtual returns (address[] memory); + + // DON fees are collected into a pool s_feePool + // When OCR configuration changes, or any oracle withdraws, this must be dispersed + function _disperseFeePool() internal { + if (s_feePool == 0) { + return; + } + // All transmitters are assumed to also be observers + // Pay out the DON fee to all transmitters + address[] memory transmitters = _getTransmitters(); + if (transmitters.length == 0) { + revert NoTransmittersSet(); + } + uint96 feePoolShare = s_feePool / uint96(transmitters.length); + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < transmitters.length; ++i) { + s_withdrawableTokens[transmitters[i]] += feePoolShare; + } + s_feePool -= feePoolShare * uint96(transmitters.length); + } + + // Overriden in FunctionsCoordinator.sol + function _onlyOwner() internal view virtual; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsClient.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsClient.sol new file mode 100644 index 00000000..7af9ac95 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsClient.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsRouter} from "./interfaces/IFunctionsRouter.sol"; +import {IFunctionsClient} from "./interfaces/IFunctionsClient.sol"; + +import {FunctionsRequest} from "./libraries/FunctionsRequest.sol"; + +/// @title The Plugin Functions client contract +/// @notice Contract developers can inherit this contract in order to make Plugin Functions requests +abstract contract FunctionsClient is IFunctionsClient { + using FunctionsRequest for FunctionsRequest.Request; + + IFunctionsRouter internal immutable i_router; + + event RequestSent(bytes32 indexed id); + event RequestFulfilled(bytes32 indexed id); + + error OnlyRouterCanFulfill(); + + constructor(address router) { + i_router = IFunctionsRouter(router); + } + + /// @notice Sends a Plugin Functions request + /// @param data The CBOR encoded bytes data for a Functions request + /// @param subscriptionId The subscription ID that will be charged to service the request + /// @param callbackGasLimit the amount of gas that will be available for the fulfillment callback + /// @return requestId The generated request ID for this request + function _sendRequest( + bytes memory data, + uint64 subscriptionId, + uint32 callbackGasLimit, + bytes32 donId + ) internal returns (bytes32) { + bytes32 requestId = i_router.sendRequest( + subscriptionId, + data, + FunctionsRequest.REQUEST_DATA_VERSION, + callbackGasLimit, + donId + ); + emit RequestSent(requestId); + return requestId; + } + + /// @notice User defined function to handle a response from the DON + /// @param requestId The request ID, returned by sendRequest() + /// @param response Aggregated response from the execution of the user's source code + /// @param err Aggregated error from the execution of the user code or from the execution pipeline + /// @dev Either response or error parameter will be set, but never both + function fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal virtual; + + /// @inheritdoc IFunctionsClient + function handleOracleFulfillment(bytes32 requestId, bytes memory response, bytes memory err) external override { + if (msg.sender != address(i_router)) { + revert OnlyRouterCanFulfill(); + } + fulfillRequest(requestId, response, err); + emit RequestFulfilled(requestId); + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsCoordinator.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsCoordinator.sol new file mode 100644 index 00000000..1488bc45 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsCoordinator.sol @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsCoordinator} from "./interfaces/IFunctionsCoordinator.sol"; +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; + +import {FunctionsBilling} from "./FunctionsBilling.sol"; +import {OCR2Base} from "./ocr/OCR2Base.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +/// @title Functions Coordinator contract +/// @notice Contract that nodes of a Decentralized Oracle Network (DON) interact with +contract FunctionsCoordinator is OCR2Base, IFunctionsCoordinator, FunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + /// @inheritdoc ITypeAndVersion + string public constant override typeAndVersion = "Functions Coordinator v1.0.0"; + + event OracleRequest( + bytes32 indexed requestId, + address indexed requestingContract, + address requestInitiator, + uint64 subscriptionId, + address subscriptionOwner, + bytes data, + uint16 dataVersion, + bytes32 flags, + uint64 callbackGasLimit, + FunctionsResponse.Commitment commitment + ); + event OracleResponse(bytes32 indexed requestId, address transmitter); + + error InconsistentReportData(); + error EmptyPublicKey(); + error UnauthorizedPublicKeyChange(); + + bytes private s_donPublicKey; + bytes private s_thresholdPublicKey; + + constructor( + address router, + Config memory config, + address linkToNativeFeed + ) OCR2Base(true) FunctionsBilling(router, config, linkToNativeFeed) {} + + /// @inheritdoc IFunctionsCoordinator + function getThresholdPublicKey() external view override returns (bytes memory) { + if (s_thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setThresholdPublicKey(bytes calldata thresholdPublicKey) external override onlyOwner { + if (thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_thresholdPublicKey = thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function getDONPublicKey() external view override returns (bytes memory) { + if (s_donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_donPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setDONPublicKey(bytes calldata donPublicKey) external override onlyOwner { + if (donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_donPublicKey = donPublicKey; + } + + /// @dev check if node is in current transmitter list + function _isTransmitter(address node) internal view returns (bool) { + address[] memory nodes = s_transmitters; + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < nodes.length; ++i) { + if (nodes[i] == node) { + return true; + } + } + return false; + } + + /// @inheritdoc IFunctionsCoordinator + function startRequest( + FunctionsResponse.RequestMeta calldata request + ) external override onlyRouter returns (FunctionsResponse.Commitment memory commitment) { + commitment = _startBilling(request); + + emit OracleRequest( + commitment.requestId, + request.requestingContract, + tx.origin, + request.subscriptionId, + request.subscriptionOwner, + request.data, + request.dataVersion, + request.flags, + request.callbackGasLimit, + commitment + ); + + return commitment; + } + + /// @dev DON fees are pooled together. If the OCR configuration is going to change, these need to be distributed. + function _beforeSetConfig(uint8 /* _f */, bytes memory /* _onchainConfig */) internal override { + if (_getTransmitters().length > 0) { + _disperseFeePool(); + } + } + + /// @dev Used by FunctionsBilling.sol + function _getTransmitters() internal view override returns (address[] memory) { + return s_transmitters; + } + + /// @dev Report hook called within OCR2Base.sol + function _report( + uint256 /*initialGas*/, + address /*transmitter*/, + uint8 /*signerCount*/, + address[MAX_NUM_ORACLES] memory /*signers*/, + bytes calldata report + ) internal override { + bytes32[] memory requestIds; + bytes[] memory results; + bytes[] memory errors; + bytes[] memory onchainMetadata; + bytes[] memory offchainMetadata; + (requestIds, results, errors, onchainMetadata, offchainMetadata) = abi.decode( + report, + (bytes32[], bytes[], bytes[], bytes[], bytes[]) + ); + + if ( + requestIds.length == 0 || + requestIds.length != results.length || + requestIds.length != errors.length || + requestIds.length != onchainMetadata.length || + requestIds.length != offchainMetadata.length + ) { + revert ReportInvalid(); + } + + // Bounded by "MaxRequestBatchSize" on the Job's ReportingPluginConfig + for (uint256 i = 0; i < requestIds.length; ++i) { + FunctionsResponse.FulfillResult result = FunctionsResponse.FulfillResult( + _fulfillAndBill(requestIds[i], results[i], errors[i], onchainMetadata[i], offchainMetadata[i]) + ); + + // Emit on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the DON will re-try + if ( + result == FunctionsResponse.FulfillResult.FULFILLED || + result == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + emit OracleResponse(requestIds[i], msg.sender); + } + } + } + + /// @dev Used in FunctionsBilling.sol + function _onlyOwner() internal view override { + _validateOwnership(); + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsRouter.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsRouter.sol new file mode 100644 index 00000000..af155c0a --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsRouter.sol @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; +import {IFunctionsRouter} from "./interfaces/IFunctionsRouter.sol"; +import {IFunctionsCoordinator} from "./interfaces/IFunctionsCoordinator.sol"; +import {IAccessController} from "../../shared/interfaces/IAccessController.sol"; + +import {FunctionsSubscriptions} from "./FunctionsSubscriptions.sol"; +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; +import {Pausable} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol"; + +contract FunctionsRouter is IFunctionsRouter, FunctionsSubscriptions, Pausable, ITypeAndVersion, ConfirmedOwner { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + string public constant override typeAndVersion = "Functions Router v1.0.0"; + + // We limit return data to a selector plus 4 words. This is to avoid + // malicious contracts from returning large amounts of data and causing + // repeated out-of-gas scenarios. + uint16 public constant MAX_CALLBACK_RETURN_BYTES = 4 + 4 * 32; + uint8 private constant MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX = 0; + + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + + event RequestProcessed( + bytes32 indexed requestId, + uint64 indexed subscriptionId, + uint96 totalCostJuels, + address transmitter, + FunctionsResponse.FulfillResult resultCode, + bytes response, + bytes err, + bytes callbackReturnData + ); + + event RequestNotProcessed( + bytes32 indexed requestId, + address coordinator, + address transmitter, + FunctionsResponse.FulfillResult resultCode + ); + + error EmptyRequestData(); + error OnlyCallableFromCoordinator(); + error SenderMustAcceptTermsOfService(address sender); + error InvalidGasFlagValue(uint8 value); + error GasLimitTooBig(uint32 limit); + error DuplicateRequestId(bytes32 requestId); + + struct CallbackResult { + bool success; // ══════╸ Whether the callback succeeded or not + uint256 gasUsed; // ═══╸ The amount of gas consumed during the callback + bytes returnData; // ══╸ The return of the callback function + } + + // ================================================================ + // | Route state | + // ================================================================ + + mapping(bytes32 id => address routableContract) private s_route; + + error RouteNotFound(bytes32 id); + + // Identifier for the route to the Terms of Service Allow List + bytes32 private s_allowListId; + + // ================================================================ + // | Configuration state | + // ================================================================ + struct Config { + uint16 maxConsumersPerSubscription; // ═════════╗ Maximum number of consumers which can be added to a single subscription. This bound ensures we are able to loop over all subscription consumers as needed, without exceeding gas limits. Should a user require more consumers, they can use multiple subscriptions. + uint72 adminFee; // ║ Flat fee (in Juels of PLI) that will be paid to the Router owner for operation of the network + bytes4 handleOracleFulfillmentSelector; // ║ The function selector that is used when calling back to the Client contract + uint16 gasForCallExactCheck; // ════════════════╝ Used during calling back to the client. Ensures we have at least enough gas to be able to revert if gasAmount > 63//64*gas available. + uint32[] maxCallbackGasLimits; // ══════════════╸ List of max callback gas limits used by flag with GAS_FLAG_INDEX + uint16 subscriptionDepositMinimumRequests; //═══╗ Amount of requests that must be completed before the full subscription balance will be released when closing a subscription account. + uint72 subscriptionDepositJuels; // ════════════╝ Amount of subscription funds that are held as a deposit until Config.subscriptionDepositMinimumRequests are made using the subscription. + } + + Config private s_config; + + event ConfigUpdated(Config); + + // ================================================================ + // | Proposal state | + // ================================================================ + + uint8 private constant MAX_PROPOSAL_SET_LENGTH = 8; + + struct ContractProposalSet { + bytes32[] ids; // ══╸ The IDs that key into the routes that will be modified if the update is applied + address[] to; // ═══╸ The address of the contracts that the route will point to if the updated is applied + } + ContractProposalSet private s_proposedContractSet; + + event ContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ); + + event ContractUpdated(bytes32 id, address from, address to); + + error InvalidProposal(); + error IdentifierIsReserved(bytes32 id); + + // ================================================================ + // | Initialization | + // ================================================================ + + constructor( + address linkToken, + Config memory config + ) FunctionsSubscriptions(linkToken) ConfirmedOwner(msg.sender) Pausable() { + // Set the intial configuration + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice The identifier of the route to retrieve the address of the access control contract + // The access control contract controls which accounts can manage subscriptions + /// @return id - bytes32 id that can be passed to the "getContractById" of the Router + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice The router configuration + function updateConfig(Config memory config) public onlyOwner { + s_config = config; + emit ConfigUpdated(config); + } + + /// @inheritdoc IFunctionsRouter + function isValidCallbackGasLimit(uint64 subscriptionId, uint32 callbackGasLimit) public view { + uint8 callbackGasLimitsIndexSelector = uint8(getFlags(subscriptionId)[MAX_CALLBACK_GAS_LIMIT_FLAGS_INDEX]); + if (callbackGasLimitsIndexSelector >= s_config.maxCallbackGasLimits.length) { + revert InvalidGasFlagValue(callbackGasLimitsIndexSelector); + } + uint32 maxCallbackGasLimit = s_config.maxCallbackGasLimits[callbackGasLimitsIndexSelector]; + if (callbackGasLimit > maxCallbackGasLimit) { + revert GasLimitTooBig(maxCallbackGasLimit); + } + } + + /// @inheritdoc IFunctionsRouter + function getAdminFee() external view override returns (uint72) { + return s_config.adminFee; + } + + /// @inheritdoc IFunctionsRouter + function getAllowListId() external view override returns (bytes32) { + return s_allowListId; + } + + /// @inheritdoc IFunctionsRouter + function setAllowListId(bytes32 allowListId) external override onlyOwner { + s_allowListId = allowListId; + } + + /// @dev Used within FunctionsSubscriptions.sol + function _getMaxConsumers() internal view override returns (uint16) { + return s_config.maxConsumersPerSubscription; + } + + /// @dev Used within FunctionsSubscriptions.sol + function _getSubscriptionDepositDetails() internal view override returns (uint16, uint72) { + return (s_config.subscriptionDepositMinimumRequests, s_config.subscriptionDepositJuels); + } + + // ================================================================ + // | Requests | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function sendRequest( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external override returns (bytes32) { + IFunctionsCoordinator coordinator = IFunctionsCoordinator(getContractById(donId)); + return _sendRequest(donId, coordinator, subscriptionId, data, dataVersion, callbackGasLimit); + } + + /// @inheritdoc IFunctionsRouter + function sendRequestToProposed( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external override returns (bytes32) { + IFunctionsCoordinator coordinator = IFunctionsCoordinator(getProposedContractById(donId)); + return _sendRequest(donId, coordinator, subscriptionId, data, dataVersion, callbackGasLimit); + } + + function _sendRequest( + bytes32 donId, + IFunctionsCoordinator coordinator, + uint64 subscriptionId, + bytes memory data, + uint16 dataVersion, + uint32 callbackGasLimit + ) private returns (bytes32) { + _whenNotPaused(); + _isExistingSubscription(subscriptionId); + _isAllowedConsumer(msg.sender, subscriptionId); + isValidCallbackGasLimit(subscriptionId, callbackGasLimit); + + if (data.length == 0) { + revert EmptyRequestData(); + } + + Subscription memory subscription = getSubscription(subscriptionId); + Consumer memory consumer = getConsumer(msg.sender, subscriptionId); + uint72 adminFee = s_config.adminFee; + + // Forward request to DON + FunctionsResponse.Commitment memory commitment = coordinator.startRequest( + FunctionsResponse.RequestMeta({ + requestingContract: msg.sender, + data: data, + subscriptionId: subscriptionId, + dataVersion: dataVersion, + flags: getFlags(subscriptionId), + callbackGasLimit: callbackGasLimit, + adminFee: adminFee, + initiatedRequests: consumer.initiatedRequests, + completedRequests: consumer.completedRequests, + availableBalance: subscription.balance - subscription.blockedBalance, + subscriptionOwner: subscription.owner + }) + ); + + // Do not allow setting a comittment for a requestId that already exists + if (s_requestCommitments[commitment.requestId] != bytes32(0)) { + revert DuplicateRequestId(commitment.requestId); + } + + // Store a commitment about the request + s_requestCommitments[commitment.requestId] = keccak256( + abi.encode( + FunctionsResponse.Commitment({ + adminFee: adminFee, + coordinator: address(coordinator), + client: msg.sender, + subscriptionId: subscriptionId, + callbackGasLimit: callbackGasLimit, + estimatedTotalCostJuels: commitment.estimatedTotalCostJuels, + timeoutTimestamp: commitment.timeoutTimestamp, + requestId: commitment.requestId, + donFee: commitment.donFee, + gasOverheadBeforeCallback: commitment.gasOverheadBeforeCallback, + gasOverheadAfterCallback: commitment.gasOverheadAfterCallback + }) + ) + ); + + _markRequestInFlight(msg.sender, subscriptionId, commitment.estimatedTotalCostJuels); + + emit RequestStart({ + requestId: commitment.requestId, + donId: donId, + subscriptionId: subscriptionId, + subscriptionOwner: subscription.owner, + requestingContract: msg.sender, + requestInitiator: tx.origin, + data: data, + dataVersion: dataVersion, + callbackGasLimit: callbackGasLimit, + estimatedTotalCostJuels: commitment.estimatedTotalCostJuels + }); + + return commitment.requestId; + } + + // ================================================================ + // | Responses | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function fulfill( + bytes memory response, + bytes memory err, + uint96 juelsPerGas, + uint96 costWithoutCallback, + address transmitter, + FunctionsResponse.Commitment memory commitment + ) external override returns (FunctionsResponse.FulfillResult resultCode, uint96) { + _whenNotPaused(); + + if (msg.sender != commitment.coordinator) { + revert OnlyCallableFromCoordinator(); + } + + { + bytes32 commitmentHash = s_requestCommitments[commitment.requestId]; + + if (commitmentHash == bytes32(0)) { + resultCode = FunctionsResponse.FulfillResult.INVALID_REQUEST_ID; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + if (keccak256(abi.encode(commitment)) != commitmentHash) { + resultCode = FunctionsResponse.FulfillResult.INVALID_COMMITMENT; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + // Check that the transmitter has supplied enough gas for the callback to succeed + if (gasleft() < commitment.callbackGasLimit + commitment.gasOverheadAfterCallback) { + resultCode = FunctionsResponse.FulfillResult.INSUFFICIENT_GAS_PROVIDED; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + } + + { + uint96 callbackCost = juelsPerGas * SafeCast.toUint96(commitment.callbackGasLimit); + uint96 totalCostJuels = commitment.adminFee + costWithoutCallback + callbackCost; + + // Check that the subscription can still afford to fulfill the request + if (totalCostJuels > getSubscription(commitment.subscriptionId).balance) { + resultCode = FunctionsResponse.FulfillResult.SUBSCRIPTION_BALANCE_INVARIANT_VIOLATION; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + + // Check that the cost has not exceeded the quoted cost + if (totalCostJuels > commitment.estimatedTotalCostJuels) { + resultCode = FunctionsResponse.FulfillResult.COST_EXCEEDS_COMMITMENT; + emit RequestNotProcessed(commitment.requestId, commitment.coordinator, transmitter, resultCode); + return (resultCode, 0); + } + } + + delete s_requestCommitments[commitment.requestId]; + + CallbackResult memory result = _callback( + commitment.requestId, + response, + err, + commitment.callbackGasLimit, + commitment.client + ); + + resultCode = result.success + ? FunctionsResponse.FulfillResult.FULFILLED + : FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR; + + Receipt memory receipt = _pay( + commitment.subscriptionId, + commitment.estimatedTotalCostJuels, + commitment.client, + commitment.adminFee, + juelsPerGas, + SafeCast.toUint96(result.gasUsed), + costWithoutCallback + ); + + emit RequestProcessed({ + requestId: commitment.requestId, + subscriptionId: commitment.subscriptionId, + totalCostJuels: receipt.totalCostJuels, + transmitter: transmitter, + resultCode: resultCode, + response: response, + err: err, + callbackReturnData: result.returnData + }); + + return (resultCode, receipt.callbackGasCostJuels); + } + + function _callback( + bytes32 requestId, + bytes memory response, + bytes memory err, + uint32 callbackGasLimit, + address client + ) private returns (CallbackResult memory) { + bool destinationNoLongerExists; + assembly { + // solidity calls check that a contract actually exists at the destination, so we do the same + destinationNoLongerExists := iszero(extcodesize(client)) + } + if (destinationNoLongerExists) { + // Return without attempting callback + // The subscription will still be charged to reimburse transmitter's gas overhead + return CallbackResult({success: false, gasUsed: 0, returnData: new bytes(0)}); + } + + bytes memory encodedCallback = abi.encodeWithSelector( + s_config.handleOracleFulfillmentSelector, + requestId, + response, + err + ); + + uint16 gasForCallExactCheck = s_config.gasForCallExactCheck; + + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid payment. + // NOTE: that callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + + bool success; + uint256 gasUsed; + // allocate return data memory ahead of time + bytes memory returnData = new bytes(MAX_CALLBACK_RETURN_BYTES); + + assembly { + let g := gas() + // Compute g -= gasForCallExactCheck and check for underflow + // The gas actually passed to the callee is _min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. gasForCallExactCheck ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, gasForCallExactCheck) { + revert(0, 0) + } + g := sub(g, gasForCallExactCheck) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), callbackGasLimit)) { + revert(0, 0) + } + // call and report whether we succeeded + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + let gasBeforeCall := gas() + success := call(callbackGasLimit, client, 0, add(encodedCallback, 0x20), mload(encodedCallback), 0, 0) + gasUsed := sub(gasBeforeCall, gas()) + + // limit our copy to MAX_CALLBACK_RETURN_BYTES bytes + let toCopy := returndatasize() + if gt(toCopy, MAX_CALLBACK_RETURN_BYTES) { + toCopy := MAX_CALLBACK_RETURN_BYTES + } + // Store the length of the copied bytes + mstore(returnData, toCopy) + // copy the bytes from returnData[0:_toCopy] + returndatacopy(add(returnData, 0x20), 0, toCopy) + } + + return CallbackResult({success: success, gasUsed: gasUsed, returnData: returnData}); + } + + // ================================================================ + // | Route methods | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function getContractById(bytes32 id) public view override returns (address) { + address currentImplementation = s_route[id]; + if (currentImplementation == address(0)) { + revert RouteNotFound(id); + } + return currentImplementation; + } + + /// @inheritdoc IFunctionsRouter + function getProposedContractById(bytes32 id) public view override returns (address) { + // Iterations will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint8 i = 0; i < s_proposedContractSet.ids.length; ++i) { + if (id == s_proposedContractSet.ids[i]) { + return s_proposedContractSet.to[i]; + } + } + revert RouteNotFound(id); + } + + // ================================================================ + // | Contract Proposal methods | + // ================================================================ + + /// @inheritdoc IFunctionsRouter + function getProposedContractSet() external view override returns (bytes32[] memory, address[] memory) { + return (s_proposedContractSet.ids, s_proposedContractSet.to); + } + + /// @inheritdoc IFunctionsRouter + function proposeContractsUpdate( + bytes32[] memory proposedContractSetIds, + address[] memory proposedContractSetAddresses + ) external override onlyOwner { + // IDs and addresses arrays must be of equal length and must not exceed the max proposal length + uint256 idsArrayLength = proposedContractSetIds.length; + if (idsArrayLength != proposedContractSetAddresses.length || idsArrayLength > MAX_PROPOSAL_SET_LENGTH) { + revert InvalidProposal(); + } + + // NOTE: iterations of this loop will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint256 i = 0; i < idsArrayLength; ++i) { + bytes32 id = proposedContractSetIds[i]; + address proposedContract = proposedContractSetAddresses[i]; + if ( + proposedContract == address(0) || // The Proposed address must be a valid address + s_route[id] == proposedContract // The Proposed address must point to a different address than what is currently set + ) { + revert InvalidProposal(); + } + + emit ContractProposed({ + proposedContractSetId: id, + proposedContractSetFromAddress: s_route[id], + proposedContractSetToAddress: proposedContract + }); + } + + s_proposedContractSet = ContractProposalSet({ids: proposedContractSetIds, to: proposedContractSetAddresses}); + } + + /// @inheritdoc IFunctionsRouter + function updateContracts() external override onlyOwner { + // Iterations will not exceed MAX_PROPOSAL_SET_LENGTH + for (uint256 i = 0; i < s_proposedContractSet.ids.length; ++i) { + bytes32 id = s_proposedContractSet.ids[i]; + address to = s_proposedContractSet.to[i]; + emit ContractUpdated({id: id, from: s_route[id], to: to}); + s_route[id] = to; + } + + delete s_proposedContractSet; + } + + // ================================================================ + // | Modifiers | + // ================================================================ + // Favoring internal functions over actual modifiers to reduce contract size + + /// @dev Used within FunctionsSubscriptions.sol + function _whenNotPaused() internal view override { + _requireNotPaused(); + } + + /// @dev Used within FunctionsSubscriptions.sol + function _onlyRouterOwner() internal view override { + _validateOwnership(); + } + + /// @dev Used within FunctionsSubscriptions.sol + function _onlySenderThatAcceptedToS() internal view override { + address currentImplementation = s_route[s_allowListId]; + if (currentImplementation == address(0)) { + // If not set, ignore this check, allow all access + return; + } + if (!IAccessController(currentImplementation).hasAccess(msg.sender, new bytes(0))) { + revert SenderMustAcceptTermsOfService(msg.sender); + } + } + + /// @inheritdoc IFunctionsRouter + function pause() external override onlyOwner { + _pause(); + } + + /// @inheritdoc IFunctionsRouter + function unpause() external override onlyOwner { + _unpause(); + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/FunctionsSubscriptions.sol b/contracts/src/v0.8/functions/v1_0_0/FunctionsSubscriptions.sol new file mode 100644 index 00000000..3a0acf9a --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/FunctionsSubscriptions.sol @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsSubscriptions} from "./interfaces/IFunctionsSubscriptions.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; +import {IFunctionsBilling} from "./interfaces/IFunctionsBilling.sol"; + +import {FunctionsResponse} from "./libraries/FunctionsResponse.sol"; + +import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; + +/// @title Functions Subscriptions contract +/// @notice Contract that coordinates payment from users to the nodes of the Decentralized Oracle Network (DON). +abstract contract FunctionsSubscriptions is IFunctionsSubscriptions, IERC677Receiver { + using SafeERC20 for IERC20; + using FunctionsResponse for FunctionsResponse.Commitment; + + // ================================================================ + // | Balance state | + // ================================================================ + // link token address + IERC20 internal immutable i_linkToken; + + // s_totalLinkBalance tracks the total PLI sent to/from + // this contract through onTokenTransfer, cancelSubscription and oracleWithdraw. + // A discrepancy with this contract's PLI balance indicates that someone + // sent tokens using transfer and so we may need to use recoverFunds. + uint96 private s_totalLinkBalance; + + /// @dev NOP balances are held as a single amount. The breakdown is held by the Coordinator. + mapping(address coordinator => uint96 balanceJuelsLink) private s_withdrawableTokens; + + // ================================================================ + // | Subscription state | + // ================================================================ + // Keep a count of the number of subscriptions so that its possible to + // loop through all the current subscriptions via .getSubscription(). + uint64 private s_currentSubscriptionId; + + mapping(uint64 subscriptionId => Subscription) private s_subscriptions; + + // Maintains the list of keys in s_consumers. + // We do this for 2 reasons: + // 1. To be able to clean up all keys from s_consumers when canceling a subscription. + // 2. To be able to return the list of all consumers in getSubscription. + // Note that we need the s_consumers map to be able to directly check if a + // consumer is valid without reading all the consumers from storage. + mapping(address consumer => mapping(uint64 subscriptionId => Consumer)) private s_consumers; + + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + event SubscriptionFunded(uint64 indexed subscriptionId, uint256 oldBalance, uint256 newBalance); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId, address consumer); + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId, address from, address to); + + error TooManyConsumers(uint16 maximumConsumers); + error InsufficientBalance(uint96 currentBalanceJuels); + error InvalidConsumer(); + error CannotRemoveWithPendingRequests(); + error InvalidSubscription(); + error OnlyCallableFromLink(); + error InvalidCalldata(); + error MustBeSubscriptionOwner(); + error TimeoutNotExceeded(); + error MustBeProposedOwner(address proposedOwner); + event FundsRecovered(address to, uint256 amount); + + // ================================================================ + // | Request state | + // ================================================================ + + mapping(bytes32 requestId => bytes32 commitmentHash) internal s_requestCommitments; + + struct Receipt { + uint96 callbackGasCostJuels; + uint96 totalCostJuels; + } + + event RequestTimedOut(bytes32 indexed requestId); + + // ================================================================ + // | Initialization | + // ================================================================ + constructor(address link) { + i_linkToken = IERC20(link); + } + + // ================================================================ + // | Request/Response | + // ================================================================ + + /// @notice Sets a request as in-flight + /// @dev Only callable within the Router + function _markRequestInFlight(address client, uint64 subscriptionId, uint96 estimatedTotalCostJuels) internal { + // Earmark subscription funds + s_subscriptions[subscriptionId].blockedBalance += estimatedTotalCostJuels; + + // Increment sent requests + s_consumers[client][subscriptionId].initiatedRequests += 1; + } + + /// @notice Moves funds from one subscription account to another. + /// @dev Only callable by the Coordinator contract that is saved in the request commitment + function _pay( + uint64 subscriptionId, + uint96 estimatedTotalCostJuels, + address client, + uint96 adminFee, + uint96 juelsPerGas, + uint96 gasUsed, + uint96 costWithoutCallbackJuels + ) internal returns (Receipt memory) { + uint96 callbackGasCostJuels = juelsPerGas * gasUsed; + uint96 totalCostJuels = costWithoutCallbackJuels + adminFee + callbackGasCostJuels; + + if ( + s_subscriptions[subscriptionId].balance < totalCostJuels || + s_subscriptions[subscriptionId].blockedBalance < estimatedTotalCostJuels + ) { + revert InsufficientBalance(s_subscriptions[subscriptionId].balance); + } + + // Charge the subscription + s_subscriptions[subscriptionId].balance -= totalCostJuels; + + // Unblock earmarked funds + s_subscriptions[subscriptionId].blockedBalance -= estimatedTotalCostJuels; + + // Pay the DON's fees and gas reimbursement + s_withdrawableTokens[msg.sender] += costWithoutCallbackJuels + callbackGasCostJuels; + + // Pay out the administration fee + s_withdrawableTokens[address(this)] += adminFee; + + // Increment finished requests + s_consumers[client][subscriptionId].completedRequests += 1; + + return Receipt({callbackGasCostJuels: callbackGasCostJuels, totalCostJuels: totalCostJuels}); + } + + // ================================================================ + // | Owner methods | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function ownerCancelSubscription(uint64 subscriptionId) external override { + _onlyRouterOwner(); + _isExistingSubscription(subscriptionId); + _cancelSubscriptionHelper(subscriptionId, s_subscriptions[subscriptionId].owner, false); + } + + /// @inheritdoc IFunctionsSubscriptions + function recoverFunds(address to) external override { + _onlyRouterOwner(); + uint256 externalBalance = i_linkToken.balanceOf(address(this)); + uint256 internalBalance = uint256(s_totalLinkBalance); + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + i_linkToken.safeTransfer(to, amount); + emit FundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + // ================================================================ + // | Fund withdrawal | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function oracleWithdraw(address recipient, uint96 amount) external override { + _whenNotPaused(); + + if (amount == 0) { + revert InvalidCalldata(); + } + uint96 currentBalance = s_withdrawableTokens[msg.sender]; + if (currentBalance < amount) { + revert InsufficientBalance(currentBalance); + } + s_withdrawableTokens[msg.sender] -= amount; + s_totalLinkBalance -= amount; + i_linkToken.safeTransfer(recipient, amount); + } + + /// @notice Owner withdraw PLI earned through admin fees + /// @notice If amount is 0 the full balance will be withdrawn + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function ownerWithdraw(address recipient, uint96 amount) external { + _onlyRouterOwner(); + if (amount == 0) { + amount = s_withdrawableTokens[address(this)]; + } + uint96 currentBalance = s_withdrawableTokens[address(this)]; + if (currentBalance < amount) { + revert InsufficientBalance(currentBalance); + } + s_withdrawableTokens[address(this)] -= amount; + s_totalLinkBalance -= amount; + + i_linkToken.safeTransfer(recipient, amount); + } + + // ================================================================ + // | TransferAndCall Deposit helper | + // ================================================================ + + // This function is to be invoked when using PLI.transferAndCall + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function onTokenTransfer(address /* sender */, uint256 amount, bytes calldata data) external override { + _whenNotPaused(); + if (msg.sender != address(i_linkToken)) { + revert OnlyCallableFromLink(); + } + if (data.length != 32) { + revert InvalidCalldata(); + } + uint64 subscriptionId = abi.decode(data, (uint64)); + if (s_subscriptions[subscriptionId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the msg.sender is the subscription owner, + // anyone can fund a subscription. + uint256 oldBalance = s_subscriptions[subscriptionId].balance; + s_subscriptions[subscriptionId].balance += uint96(amount); + s_totalLinkBalance += uint96(amount); + emit SubscriptionFunded(subscriptionId, oldBalance, oldBalance + amount); + } + + // ================================================================ + // | Subscription management | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function getTotalBalance() external view override returns (uint96) { + return s_totalLinkBalance; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscriptionCount() external view override returns (uint64) { + return s_currentSubscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscription(uint64 subscriptionId) public view override returns (Subscription memory) { + _isExistingSubscription(subscriptionId); + return s_subscriptions[subscriptionId]; + } + + /// @inheritdoc IFunctionsSubscriptions + function getSubscriptionsInRange( + uint64 subscriptionIdStart, + uint64 subscriptionIdEnd + ) external view override returns (Subscription[] memory subscriptions) { + if ( + subscriptionIdStart > subscriptionIdEnd || + subscriptionIdEnd > s_currentSubscriptionId || + s_currentSubscriptionId == 0 + ) { + revert InvalidCalldata(); + } + + subscriptions = new Subscription[]((subscriptionIdEnd - subscriptionIdStart) + 1); + for (uint256 i = 0; i <= subscriptionIdEnd - subscriptionIdStart; ++i) { + subscriptions[i] = s_subscriptions[uint64(subscriptionIdStart + i)]; + } + + return subscriptions; + } + + /// @inheritdoc IFunctionsSubscriptions + function getConsumer(address client, uint64 subscriptionId) public view override returns (Consumer memory) { + return s_consumers[client][subscriptionId]; + } + + /// @dev Used within this file & FunctionsRouter.sol + function _isExistingSubscription(uint64 subscriptionId) internal view { + if (s_subscriptions[subscriptionId].owner == address(0)) { + revert InvalidSubscription(); + } + } + + /// @dev Used within FunctionsRouter.sol + function _isAllowedConsumer(address client, uint64 subscriptionId) internal view { + if (!s_consumers[client][subscriptionId].allowed) { + revert InvalidConsumer(); + } + } + + /// @inheritdoc IFunctionsSubscriptions + function createSubscription() external override returns (uint64 subscriptionId) { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + subscriptionId = ++s_currentSubscriptionId; + s_subscriptions[subscriptionId] = Subscription({ + balance: 0, + blockedBalance: 0, + owner: msg.sender, + proposedOwner: address(0), + consumers: new address[](0), + flags: bytes32(0) + }); + + emit SubscriptionCreated(subscriptionId, msg.sender); + + return subscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function createSubscriptionWithConsumer(address consumer) external override returns (uint64 subscriptionId) { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + subscriptionId = ++s_currentSubscriptionId; + s_subscriptions[subscriptionId] = Subscription({ + balance: 0, + blockedBalance: 0, + owner: msg.sender, + proposedOwner: address(0), + consumers: new address[](0), + flags: bytes32(0) + }); + + s_subscriptions[subscriptionId].consumers.push(consumer); + s_consumers[consumer][subscriptionId].allowed = true; + + emit SubscriptionCreated(subscriptionId, msg.sender); + emit SubscriptionConsumerAdded(subscriptionId, consumer); + + return subscriptionId; + } + + /// @inheritdoc IFunctionsSubscriptions + function proposeSubscriptionOwnerTransfer(uint64 subscriptionId, address newOwner) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + if (newOwner == address(0) || s_subscriptions[subscriptionId].proposedOwner == newOwner) { + revert InvalidCalldata(); + } + + s_subscriptions[subscriptionId].proposedOwner = newOwner; + emit SubscriptionOwnerTransferRequested(subscriptionId, msg.sender, newOwner); + } + + /// @inheritdoc IFunctionsSubscriptions + function acceptSubscriptionOwnerTransfer(uint64 subscriptionId) external override { + _whenNotPaused(); + _onlySenderThatAcceptedToS(); + + address previousOwner = s_subscriptions[subscriptionId].owner; + address proposedOwner = s_subscriptions[subscriptionId].proposedOwner; + if (proposedOwner != msg.sender) { + revert MustBeProposedOwner(proposedOwner); + } + s_subscriptions[subscriptionId].owner = msg.sender; + s_subscriptions[subscriptionId].proposedOwner = address(0); + emit SubscriptionOwnerTransferred(subscriptionId, previousOwner, msg.sender); + } + + /// @inheritdoc IFunctionsSubscriptions + function removeConsumer(uint64 subscriptionId, address consumer) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + Consumer memory consumerData = s_consumers[consumer][subscriptionId]; + _isAllowedConsumer(consumer, subscriptionId); + if (consumerData.initiatedRequests != consumerData.completedRequests) { + revert CannotRemoveWithPendingRequests(); + } + // Note bounded by config.maxConsumers + address[] memory consumers = s_subscriptions[subscriptionId].consumers; + for (uint256 i = 0; i < consumers.length; ++i) { + if (consumers[i] == consumer) { + // Storage write to preserve last element + s_subscriptions[subscriptionId].consumers[i] = consumers[consumers.length - 1]; + // Storage remove last element + s_subscriptions[subscriptionId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subscriptionId]; + emit SubscriptionConsumerRemoved(subscriptionId, consumer); + } + + /// @dev Overriden in FunctionsRouter.sol + function _getMaxConsumers() internal view virtual returns (uint16); + + /// @inheritdoc IFunctionsSubscriptions + function addConsumer(uint64 subscriptionId, address consumer) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + // Already maxed, cannot add any more consumers. + uint16 maximumConsumers = _getMaxConsumers(); + if (s_subscriptions[subscriptionId].consumers.length >= maximumConsumers) { + revert TooManyConsumers(maximumConsumers); + } + if (s_consumers[consumer][subscriptionId].allowed) { + // Idempotence - do nothing if already added. + // Ensures uniqueness in s_subscriptions[subscriptionId].consumers. + return; + } + + s_consumers[consumer][subscriptionId].allowed = true; + s_subscriptions[subscriptionId].consumers.push(consumer); + + emit SubscriptionConsumerAdded(subscriptionId, consumer); + } + + /// @dev Overriden in FunctionsRouter.sol + function _getSubscriptionDepositDetails() internal virtual returns (uint16, uint72); + + function _cancelSubscriptionHelper(uint64 subscriptionId, address toAddress, bool checkDepositRefundability) private { + Subscription memory subscription = s_subscriptions[subscriptionId]; + uint96 balance = subscription.balance; + uint64 completedRequests = 0; + + // NOTE: loop iterations are bounded by config.maxConsumers + // If no consumers, does nothing. + for (uint256 i = 0; i < subscription.consumers.length; ++i) { + address consumer = subscription.consumers[i]; + completedRequests += s_consumers[consumer][subscriptionId].completedRequests; + delete s_consumers[consumer][subscriptionId]; + } + delete s_subscriptions[subscriptionId]; + + (uint16 subscriptionDepositMinimumRequests, uint72 subscriptionDepositJuels) = _getSubscriptionDepositDetails(); + + // If subscription has not made enough requests, deposit will be forfeited + if (checkDepositRefundability && completedRequests < subscriptionDepositMinimumRequests) { + uint96 deposit = subscriptionDepositJuels > balance ? balance : subscriptionDepositJuels; + if (deposit > 0) { + s_withdrawableTokens[address(this)] += deposit; + balance -= deposit; + } + } + + if (balance > 0) { + s_totalLinkBalance -= balance; + i_linkToken.safeTransfer(toAddress, uint256(balance)); + } + emit SubscriptionCanceled(subscriptionId, toAddress, balance); + } + + /// @inheritdoc IFunctionsSubscriptions + function cancelSubscription(uint64 subscriptionId, address to) external override { + _whenNotPaused(); + _onlySubscriptionOwner(subscriptionId); + _onlySenderThatAcceptedToS(); + + if (pendingRequestExists(subscriptionId)) { + revert CannotRemoveWithPendingRequests(); + } + + _cancelSubscriptionHelper(subscriptionId, to, true); + } + + /// @inheritdoc IFunctionsSubscriptions + function pendingRequestExists(uint64 subscriptionId) public view override returns (bool) { + address[] memory consumers = s_subscriptions[subscriptionId].consumers; + // NOTE: loop iterations are bounded by config.maxConsumers + for (uint256 i = 0; i < consumers.length; ++i) { + Consumer memory consumer = s_consumers[consumers[i]][subscriptionId]; + if (consumer.initiatedRequests != consumer.completedRequests) { + return true; + } + } + return false; + } + + /// @inheritdoc IFunctionsSubscriptions + function setFlags(uint64 subscriptionId, bytes32 flags) external override { + _onlyRouterOwner(); + _isExistingSubscription(subscriptionId); + s_subscriptions[subscriptionId].flags = flags; + } + + /// @inheritdoc IFunctionsSubscriptions + function getFlags(uint64 subscriptionId) public view returns (bytes32) { + return s_subscriptions[subscriptionId].flags; + } + + // ================================================================ + // | Request Timeout | + // ================================================================ + + /// @inheritdoc IFunctionsSubscriptions + function timeoutRequests(FunctionsResponse.Commitment[] calldata requestsToTimeoutByCommitment) external override { + _whenNotPaused(); + + for (uint256 i = 0; i < requestsToTimeoutByCommitment.length; ++i) { + FunctionsResponse.Commitment memory request = requestsToTimeoutByCommitment[i]; + bytes32 requestId = request.requestId; + uint64 subscriptionId = request.subscriptionId; + + // Check that request ID is valid + if (keccak256(abi.encode(request)) != s_requestCommitments[requestId]) { + revert InvalidCalldata(); + } + + // Check that request has exceeded allowed request time + if (block.timestamp < request.timeoutTimestamp) { + revert TimeoutNotExceeded(); + } + + // Notify the Coordinator that the request should no longer be fulfilled + IFunctionsBilling(request.coordinator).deleteCommitment(requestId); + // Release the subscription's balance that had been earmarked for the request + s_subscriptions[subscriptionId].blockedBalance -= request.estimatedTotalCostJuels; + s_consumers[request.client][subscriptionId].completedRequests += 1; + // Delete commitment within Router state + delete s_requestCommitments[requestId]; + + emit RequestTimedOut(requestId); + } + } + + // ================================================================ + // | Modifiers | + // ================================================================ + + function _onlySubscriptionOwner(uint64 subscriptionId) internal view { + address owner = s_subscriptions[subscriptionId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubscriptionOwner(); + } + } + + /// @dev Overriden in FunctionsRouter.sol + function _onlySenderThatAcceptedToS() internal virtual; + + /// @dev Overriden in FunctionsRouter.sol + function _onlyRouterOwner() internal virtual; + + /// @dev Overriden in FunctionsRouter.sol + function _whenNotPaused() internal virtual; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/Routable.sol b/contracts/src/v0.8/functions/v1_0_0/Routable.sol new file mode 100644 index 00000000..6c11d4d6 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/Routable.sol @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; +import {IOwnableFunctionsRouter} from "./interfaces/IOwnableFunctionsRouter.sol"; + +/// @title This abstract should be inherited by contracts that will be used +/// as the destinations to a route (id=>contract) on the Router. +/// It provides a Router getter and modifiers. +abstract contract Routable is ITypeAndVersion { + IOwnableFunctionsRouter private immutable i_router; + + error RouterMustBeSet(); + error OnlyCallableByRouter(); + error OnlyCallableByRouterOwner(); + + /// @dev Initializes the contract. + constructor(address router) { + if (router == address(0)) { + revert RouterMustBeSet(); + } + i_router = IOwnableFunctionsRouter(router); + } + + /// @notice Return the Router + function _getRouter() internal view returns (IOwnableFunctionsRouter router) { + return i_router; + } + + /// @notice Reverts if called by anyone other than the router. + modifier onlyRouter() { + if (msg.sender != address(i_router)) { + revert OnlyCallableByRouter(); + } + _; + } + + /// @notice Reverts if called by anyone other than the router owner. + modifier onlyRouterOwner() { + if (msg.sender != i_router.owner()) { + revert OnlyCallableByRouterOwner(); + } + _; + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/accessControl/TermsOfServiceAllowList.sol b/contracts/src/v0.8/functions/v1_0_0/accessControl/TermsOfServiceAllowList.sol new file mode 100644 index 00000000..8a42e34c --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/accessControl/TermsOfServiceAllowList.sol @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITermsOfServiceAllowList} from "./interfaces/ITermsOfServiceAllowList.sol"; +import {IAccessController} from "../../../shared/interfaces/IAccessController.sol"; +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; + +import {Address} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol"; + +/// @notice A contract to handle access control of subscription management dependent on signing a Terms of Service +contract TermsOfServiceAllowList is ITermsOfServiceAllowList, IAccessController, ITypeAndVersion, ConfirmedOwner { + using Address for address; + using EnumerableSet for EnumerableSet.AddressSet; + + /// @inheritdoc ITypeAndVersion + string public constant override typeAndVersion = "Functions Terms of Service Allow List v1.0.0"; + + EnumerableSet.AddressSet private s_allowedSenders; + mapping(address => bool) private s_blockedSenders; + + event AddedAccess(address user); + event BlockedAccess(address user); + event UnblockedAccess(address user); + + error InvalidSignature(); + error InvalidUsage(); + error RecipientIsBlocked(); + + // ================================================================ + // | Configuration state | + // ================================================================ + struct Config { + bool enabled; // ═════════════╗ When enabled, access will be checked against s_allowedSenders. When disabled, all access will be allowed. + address signerPublicKey; // ══╝ The key pair that needs to sign the acceptance data + } + + Config private s_config; + + event ConfigUpdated(Config config); + + // ================================================================ + // | Initialization | + // ================================================================ + + constructor(Config memory config) ConfirmedOwner(msg.sender) { + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice Gets the contracts's configuration + /// @return config + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice Sets the contracts's configuration + /// @param config - See the contents of the TermsOfServiceAllowList.Config struct for more information + function updateConfig(Config memory config) public onlyOwner { + s_config = config; + emit ConfigUpdated(config); + } + + // ================================================================ + // | Allow methods | + // ================================================================ + + /// @inheritdoc ITermsOfServiceAllowList + function getMessage(address acceptor, address recipient) public pure override returns (bytes32) { + return keccak256(abi.encodePacked(acceptor, recipient)); + } + + /// @inheritdoc ITermsOfServiceAllowList + function acceptTermsOfService(address acceptor, address recipient, bytes32 r, bytes32 s, uint8 v) external override { + if (s_blockedSenders[recipient]) { + revert RecipientIsBlocked(); + } + + // Validate that the signature is correct and the correct data has been signed + bytes32 prefixedMessage = keccak256( + abi.encodePacked("\x19Ethereum Signed Message:\n32", getMessage(acceptor, recipient)) + ); + if (ecrecover(prefixedMessage, v, r, s) != s_config.signerPublicKey) { + revert InvalidSignature(); + } + + // If contract, validate that msg.sender == recipient + // This is to prevent EoAs from claiming contracts that they are not in control of + // If EoA, validate that msg.sender == acceptor == recipient + // This is to prevent EoAs from accepting for other EoAs + if (msg.sender != recipient || (msg.sender != acceptor && !msg.sender.isContract())) { + revert InvalidUsage(); + } + + // Add recipient to the allow list + s_allowedSenders.add(recipient); + emit AddedAccess(recipient); + } + + /// @inheritdoc ITermsOfServiceAllowList + function getAllAllowedSenders() external view override returns (address[] memory) { + return s_allowedSenders.values(); + } + + /// @inheritdoc IAccessController + function hasAccess(address user, bytes calldata /* data */) external view override returns (bool) { + if (!s_config.enabled) { + return true; + } + return s_allowedSenders.contains(user); + } + + // ================================================================ + // | Block methods | + // ================================================================ + + /// @inheritdoc ITermsOfServiceAllowList + function isBlockedSender(address sender) external view override returns (bool) { + if (!s_config.enabled) { + return false; + } + return s_blockedSenders[sender]; + } + + /// @inheritdoc ITermsOfServiceAllowList + function blockSender(address sender) external override onlyOwner { + s_allowedSenders.remove(sender); + s_blockedSenders[sender] = true; + emit BlockedAccess(sender); + } + + /// @inheritdoc ITermsOfServiceAllowList + function unblockSender(address sender) external override onlyOwner { + s_blockedSenders[sender] = false; + emit UnblockedAccess(sender); + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/accessControl/interfaces/ITermsOfServiceAllowList.sol b/contracts/src/v0.8/functions/v1_0_0/accessControl/interfaces/ITermsOfServiceAllowList.sol new file mode 100644 index 00000000..13ef6147 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/accessControl/interfaces/ITermsOfServiceAllowList.sol @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @notice A contract to handle access control of subscription management dependent on signing a Terms of Service +interface ITermsOfServiceAllowList { + /// @notice Return the message data for the proof given to accept the Terms of Service + /// @param acceptor - The wallet address that has accepted the Terms of Service on the UI + /// @param recipient - The recipient address that the acceptor is taking responsibility for + /// @return Hash of the message data + function getMessage(address acceptor, address recipient) external pure returns (bytes32); + + /// @notice Check if the address is blocked for usage + /// @param sender The transaction sender's address + /// @return True or false + function isBlockedSender(address sender) external returns (bool); + + /// @notice Get a list of all allowed senders + /// @dev WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + /// to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + /// this function has an unbounded cost, and using it as part of a state-changing function may render the function + /// uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + /// @return addresses - all allowed addresses + function getAllAllowedSenders() external view returns (address[] memory); + + /// @notice Allows access to the sender based on acceptance of the Terms of Service + /// @param acceptor - The wallet address that has accepted the Terms of Service on the UI + /// @param recipient - The recipient address that the acceptor is taking responsibility for + /// @param r - ECDSA signature r data produced by the Plugin Functions Subscription UI + /// @param s - ECDSA signature s produced by the Plugin Functions Subscription UI + /// @param v - ECDSA signature v produced by the Plugin Functions Subscription UI + function acceptTermsOfService(address acceptor, address recipient, bytes32 r, bytes32 s, uint8 v) external; + + /// @notice Removes a sender's access if already authorized, and disallows re-accepting the Terms of Service + /// @param sender - Address of the sender to block + function blockSender(address sender) external; + + /// @notice Re-allows a previously blocked sender to accept the Terms of Service + /// @param sender - Address of the sender to unblock + function unblockSender(address sender) external; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/example/FunctionsClientExample.sol b/contracts/src/v0.8/functions/v1_0_0/example/FunctionsClientExample.sol new file mode 100644 index 00000000..bde0c973 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/example/FunctionsClientExample.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsClient} from "../FunctionsClient.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {FunctionsRequest} from "../libraries/FunctionsRequest.sol"; + +/// @title Plugin Functions example Client contract implementation +contract FunctionsClientExample is FunctionsClient, ConfirmedOwner { + using FunctionsRequest for FunctionsRequest.Request; + + uint32 public constant MAX_CALLBACK_GAS = 70_000; + + bytes32 public s_lastRequestId; + bytes32 public s_lastResponse; + bytes32 public s_lastError; + uint32 public s_lastResponseLength; + uint32 public s_lastErrorLength; + + error UnexpectedRequestID(bytes32 requestId); + + constructor(address router) FunctionsClient(router) ConfirmedOwner(msg.sender) {} + + /// @notice Send a simple request + /// @param source JavaScript source code + /// @param encryptedSecretsReferences Encrypted secrets payload + /// @param args List of arguments accessible from within the source code + /// @param subscriptionId Billing ID + function sendRequest( + string calldata source, + bytes calldata encryptedSecretsReferences, + string[] calldata args, + uint64 subscriptionId, + bytes32 jobId + ) external onlyOwner { + FunctionsRequest.Request memory req; + req.initializeRequestForInlineJavaScript(source); + if (encryptedSecretsReferences.length > 0) req.addSecretsReference(encryptedSecretsReferences); + if (args.length > 0) req.setArgs(args); + s_lastRequestId = _sendRequest(req.encodeCBOR(), subscriptionId, MAX_CALLBACK_GAS, jobId); + } + + /// @notice Store latest result/error + /// @param requestId The request ID, returned by sendRequest() + /// @param response Aggregated response from the user code + /// @param err Aggregated error from the user code or from the execution pipeline + /// @dev Either response or error parameter will be set, but never both + function fulfillRequest(bytes32 requestId, bytes memory response, bytes memory err) internal override { + if (s_lastRequestId != requestId) { + revert UnexpectedRequestID(requestId); + } + // Save only the first 32 bytes of response/error to always fit within MAX_CALLBACK_GAS + s_lastResponse = bytesToBytes32(response); + s_lastResponseLength = uint32(response.length); + s_lastError = bytesToBytes32(err); + s_lastErrorLength = uint32(err.length); + } + + function bytesToBytes32(bytes memory b) private pure returns (bytes32 out) { + uint256 maxLen = 32; + if (b.length < 32) { + maxLen = b.length; + } + for (uint256 i = 0; i < maxLen; ++i) { + out |= bytes32(b[i]) >> (i * 8); + } + return out; + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsBilling.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsBilling.sol new file mode 100644 index 00000000..b2b26955 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsBilling.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Plugin Functions DON billing interface. +interface IFunctionsBilling { + /// @notice Return the current conversion from WEI of ETH to PLI from the configured Plugin data feed + /// @return weiPerUnitLink - The amount of WEI in one PLI + function getWeiPerUnitLink() external view returns (uint256); + + /// @notice Determine the fee that will be split between Node Operators for servicing a request + /// @param requestCBOR - CBOR encoded Plugin Functions request data, use FunctionsRequest library to encode a request + /// @return fee - Cost in Juels (1e18) of PLI + function getDONFee(bytes memory requestCBOR) external view returns (uint72); + + /// @notice Determine the fee that will be paid to the Router owner for operating the network + /// @return fee - Cost in Juels (1e18) of PLI + function getAdminFee() external view returns (uint72); + + /// @notice Estimate the total cost that will be charged to a subscription to make a request: transmitter gas re-reimbursement, plus DON fee, plus Registry fee + /// @param - subscriptionId An identifier of the billing account + /// @param - data Encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param - callbackGasLimit Gas limit for the fulfillment callback + /// @param - gasPriceWei The blockchain's gas price to estimate with + /// @return - billedCost Cost in Juels (1e18) of PLI + function estimateCost( + uint64 subscriptionId, + bytes calldata data, + uint32 callbackGasLimit, + uint256 gasPriceWei + ) external view returns (uint96); + + /// @notice Remove a request commitment that the Router has determined to be stale + /// @param requestId - The request ID to remove + function deleteCommitment(bytes32 requestId) external; + + /// @notice Oracle withdraw PLI earned through fulfilling requests + /// @notice If amount is 0 the full balance will be withdrawn + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function oracleWithdraw(address recipient, uint96 amount) external; + + /// @notice Withdraw all PLI earned by Oracles through fulfilling requests + function oracleWithdrawAll() external; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsClient.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsClient.sol new file mode 100644 index 00000000..7ecb9636 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsClient.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Plugin Functions client interface. +interface IFunctionsClient { + /// @notice Plugin Functions response handler called by the Functions Router + /// during fullilment from the designated transmitter node in an OCR round. + /// @param requestId The requestId returned by FunctionsClient.sendRequest(). + /// @param response Aggregated response from the request's source code. + /// @param err Aggregated error either from the request's source code or from the execution pipeline. + /// @dev Either response or error parameter will be set, but never both. + function handleOracleFulfillment(bytes32 requestId, bytes memory response, bytes memory err) external; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsCoordinator.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsCoordinator.sol new file mode 100644 index 00000000..b8116171 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsCoordinator.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions DON Coordinator interface. +interface IFunctionsCoordinator { + /// @notice Returns the DON's threshold encryption public key used to encrypt secrets + /// @dev All nodes on the DON have separate key shares of the threshold decryption key + /// and nodes must participate in a threshold decryption OCR round to decrypt secrets + /// @return thresholdPublicKey the DON's threshold encryption public key + function getThresholdPublicKey() external view returns (bytes memory); + + /// @notice Sets the DON's threshold encryption public key used to encrypt secrets + /// @dev Used to rotate the key + /// @param thresholdPublicKey The new public key + function setThresholdPublicKey(bytes calldata thresholdPublicKey) external; + + /// @notice Returns the DON's secp256k1 public key that is used to encrypt secrets + /// @dev All nodes on the DON have the corresponding private key + /// needed to decrypt the secrets encrypted with the public key + /// @return publicKey the DON's public key + function getDONPublicKey() external view returns (bytes memory); + + /// @notice Sets DON's secp256k1 public key used to encrypt secrets + /// @dev Used to rotate the key + /// @param donPublicKey The new public key + function setDONPublicKey(bytes calldata donPublicKey) external; + + /// @notice Receives a request to be emitted to the DON for processing + /// @param request The request metadata + /// @dev see the struct for field descriptions + /// @return commitment - The parameters of the request that must be held consistent at response time + function startRequest( + FunctionsResponse.RequestMeta calldata request + ) external returns (FunctionsResponse.Commitment memory commitment); +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsRouter.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsRouter.sol new file mode 100644 index 00000000..1e4019f0 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsRouter.sol @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions Router interface. +interface IFunctionsRouter { + /// @notice The identifier of the route to retrieve the address of the access control contract + /// The access control contract controls which accounts can manage subscriptions + /// @return id - bytes32 id that can be passed to the "getContractById" of the Router + function getAllowListId() external view returns (bytes32); + + /// @notice Set the identifier of the route to retrieve the address of the access control contract + /// The access control contract controls which accounts can manage subscriptions + function setAllowListId(bytes32 allowListId) external; + + /// @notice Get the flat fee (in Juels of PLI) that will be paid to the Router owner for operation of the network + /// @return adminFee + function getAdminFee() external view returns (uint72 adminFee); + + /// @notice Sends a request using the provided subscriptionId + /// @param subscriptionId - A unique subscription ID allocated by billing system, + /// a client can make requests from different contracts referencing the same subscription + /// @param data - CBOR encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param dataVersion - Gas limit for the fulfillment callback + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @param donId - An identifier used to determine which route to send the request along + /// @return requestId - A unique request identifier + function sendRequest( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external returns (bytes32); + + /// @notice Sends a request to the proposed contracts + /// @param subscriptionId - A unique subscription ID allocated by billing system, + /// a client can make requests from different contracts referencing the same subscription + /// @param data - CBOR encoded Plugin Functions request data, use FunctionsClient API to encode a request + /// @param dataVersion - Gas limit for the fulfillment callback + /// @param callbackGasLimit - Gas limit for the fulfillment callback + /// @param donId - An identifier used to determine which route to send the request along + /// @return requestId - A unique request identifier + function sendRequestToProposed( + uint64 subscriptionId, + bytes calldata data, + uint16 dataVersion, + uint32 callbackGasLimit, + bytes32 donId + ) external returns (bytes32); + + /// @notice Fulfill the request by: + /// - calling back the data that the Oracle returned to the client contract + /// - pay the DON for processing the request + /// @dev Only callable by the Coordinator contract that is saved in the commitment + /// @param response response data from DON consensus + /// @param err error from DON consensus + /// @param juelsPerGas - current rate of juels/gas + /// @param costWithoutFulfillment - The cost of processing the request (in Juels of PLI ), without fulfillment + /// @param transmitter - The Node that transmitted the OCR report + /// @param commitment - The parameters of the request that must be held consistent between request and response time + /// @return fulfillResult - + /// @return callbackGasCostJuels - + function fulfill( + bytes memory response, + bytes memory err, + uint96 juelsPerGas, + uint96 costWithoutFulfillment, + address transmitter, + FunctionsResponse.Commitment memory commitment + ) external returns (FunctionsResponse.FulfillResult, uint96); + + /// @notice Validate requested gas limit is below the subscription max. + /// @param subscriptionId subscription ID + /// @param callbackGasLimit desired callback gas limit + function isValidCallbackGasLimit(uint64 subscriptionId, uint32 callbackGasLimit) external view; + + /// @notice Get the current contract given an ID + /// @param id A bytes32 identifier for the route + /// @return contract The current contract address + function getContractById(bytes32 id) external view returns (address); + + /// @notice Get the proposed next contract given an ID + /// @param id A bytes32 identifier for the route + /// @return contract The current or proposed contract address + function getProposedContractById(bytes32 id) external view returns (address); + + /// @notice Return the latest proprosal set + /// @return ids The identifiers of the contracts to update + /// @return to The addresses of the contracts that will be updated to + function getProposedContractSet() external view returns (bytes32[] memory, address[] memory); + + /// @notice Proposes one or more updates to the contract routes + /// @dev Only callable by owner + function proposeContractsUpdate(bytes32[] memory proposalSetIds, address[] memory proposalSetAddresses) external; + + /// @notice Updates the current contract routes to the proposed contracts + /// @dev Only callable by owner + function updateContracts() external; + + /// @dev Puts the system into an emergency stopped state. + /// @dev Only callable by owner + function pause() external; + + /// @dev Takes the system out of an emergency stopped state. + /// @dev Only callable by owner + function unpause() external; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsSubscriptions.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsSubscriptions.sol new file mode 100644 index 00000000..7d7c3b18 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IFunctionsSubscriptions.sol @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {FunctionsResponse} from "../libraries/FunctionsResponse.sol"; + +/// @title Plugin Functions Subscription interface. +interface IFunctionsSubscriptions { + struct Subscription { + uint96 balance; // ═════════╗ Common PLI balance that is controlled by the Router to be used for all consumer requests. + address owner; // ══════════╝ The owner can fund/withdraw/cancel the subscription. + uint96 blockedBalance; // ══╗ PLI balance that is reserved to pay for pending consumer requests. + address proposedOwner; // ══╝ For safely transferring sub ownership. + address[] consumers; // ════╸ Client contracts that can use the subscription + bytes32 flags; // ══════════╸ Per-subscription flags + } + + struct Consumer { + bool allowed; // ══════════════╗ Owner can fund/withdraw/cancel the sub. + uint64 initiatedRequests; // ║ The number of requests that have been started + uint64 completedRequests; // ══╝ The number of requests that have successfully completed or timed out + } + + /// @notice Get details about a subscription. + /// @param subscriptionId - the ID of the subscription + /// @return subscription - see IFunctionsSubscriptions.Subscription for more information on the structure + function getSubscription(uint64 subscriptionId) external view returns (Subscription memory); + + /// @notice Retrieve details about multiple subscriptions using an inclusive range + /// @param subscriptionIdStart - the ID of the subscription to start the range at + /// @param subscriptionIdEnd - the ID of the subscription to end the range at + /// @return subscriptions - see IFunctionsSubscriptions.Subscription for more information on the structure + function getSubscriptionsInRange( + uint64 subscriptionIdStart, + uint64 subscriptionIdEnd + ) external view returns (Subscription[] memory); + + /// @notice Get details about a consumer of a subscription. + /// @param client - the consumer contract address + /// @param subscriptionId - the ID of the subscription + /// @return consumer - see IFunctionsSubscriptions.Consumer for more information on the structure + function getConsumer(address client, uint64 subscriptionId) external view returns (Consumer memory); + + /// @notice Get details about the total amount of PLI within the system + /// @return totalBalance - total Juels of PLI held by the contract + function getTotalBalance() external view returns (uint96); + + /// @notice Get details about the total number of subscription accounts + /// @return count - total number of subscriptions in the system + function getSubscriptionCount() external view returns (uint64); + + /// @notice Time out all expired requests: unlocks funds and removes the ability for the request to be fulfilled + /// @param requestsToTimeoutByCommitment - A list of request commitments to time out + /// @dev The commitment can be found on the "OracleRequest" event created when sending the request. + function timeoutRequests(FunctionsResponse.Commitment[] calldata requestsToTimeoutByCommitment) external; + + /// @notice Oracle withdraw PLI earned through fulfilling requests + /// @notice If amount is 0 the full balance will be withdrawn + /// @notice Both signing and transmitting wallets will have a balance to withdraw + /// @param recipient where to send the funds + /// @param amount amount to withdraw + function oracleWithdraw(address recipient, uint96 amount) external; + + /// @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + /// @dev Only callable by the Router Owner + /// @param subscriptionId subscription id + /// @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + function ownerCancelSubscription(uint64 subscriptionId) external; + + /// @notice Recover link sent with transfer instead of transferAndCall. + /// @dev Only callable by the Router Owner + /// @param to address to send link to + function recoverFunds(address to) external; + + /// @notice Create a new subscription. + /// @return subscriptionId - A unique subscription id. + /// @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function createSubscription() external returns (uint64); + + /// @notice Create a new subscription and add a consumer. + /// @return subscriptionId - A unique subscription id. + /// @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + /// @dev Note to fund the subscription, use transferAndCall. For example + /// @dev PLITOKEN.transferAndCall( + /// @dev address(ROUTER), + /// @dev amount, + /// @dev abi.encode(subscriptionId)); + function createSubscriptionWithConsumer(address consumer) external returns (uint64 subscriptionId); + + /// @notice Propose a new owner for a subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param newOwner - proposed new owner of the subscription + function proposeSubscriptionOwnerTransfer(uint64 subscriptionId, address newOwner) external; + + /// @notice Accept an ownership transfer. + /// @param subscriptionId - ID of the subscription + /// @dev will revert if original owner of subscriptionId has not requested that msg.sender become the new owner. + function acceptSubscriptionOwnerTransfer(uint64 subscriptionId) external; + + /// @notice Remove a consumer from a Plugin Functions subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param consumer - Consumer to remove from the subscription + function removeConsumer(uint64 subscriptionId, address consumer) external; + + /// @notice Add a consumer to a Plugin Functions subscription. + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param consumer - New consumer which can use the subscription + function addConsumer(uint64 subscriptionId, address consumer) external; + + /// @notice Cancel a subscription + /// @dev Only callable by the Subscription's owner + /// @param subscriptionId - ID of the subscription + /// @param to - Where to send the remaining PLI to + function cancelSubscription(uint64 subscriptionId, address to) external; + + /// @notice Check to see if there exists a request commitment for all consumers for a given sub. + /// @param subscriptionId - ID of the subscription + /// @return true if there exists at least one unfulfilled request for the subscription, false otherwise. + /// @dev Looping is bounded to MAX_CONSUMERS*(number of DONs). + /// @dev Used to disable subscription canceling while outstanding request are present. + function pendingRequestExists(uint64 subscriptionId) external view returns (bool); + + /// @notice Set subscription specific flags for a subscription. + /// Each byte of the flag is used to represent a resource tier that the subscription can utilize. + /// @param subscriptionId - ID of the subscription + /// @param flags - desired flag values + function setFlags(uint64 subscriptionId, bytes32 flags) external; + + /// @notice Get flags for a given subscription. + /// @param subscriptionId - ID of the subscription + /// @return flags - current flag values + function getFlags(uint64 subscriptionId) external view returns (bytes32); +} diff --git a/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol b/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol new file mode 100644 index 00000000..315d9d01 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/interfaces/IOwnableFunctionsRouter.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsRouter} from "./IFunctionsRouter.sol"; +import {IOwnable} from "../../../shared/interfaces/IOwnable.sol"; + +/// @title Plugin Functions Router interface with Ownability. +interface IOwnableFunctionsRouter is IOwnable, IFunctionsRouter {} diff --git a/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsRequest.sol b/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsRequest.sol new file mode 100644 index 00000000..9b8943a6 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsRequest.sol @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {CBOR} from "../../../vendor/solidity-cborutils/v2.0.0/CBOR.sol"; + +/// @title Library for encoding the input data of a Functions request into CBOR +library FunctionsRequest { + using CBOR for CBOR.CBORBuffer; + + uint16 public constant REQUEST_DATA_VERSION = 1; + uint256 internal constant DEFAULT_BUFFER_SIZE = 256; + + enum Location { + Inline, // Provided within the Request + Remote, // Hosted through remote location that can be accessed through a provided URL + DONHosted // Hosted on the DON's storage + } + + enum CodeLanguage { + JavaScript + // In future version we may add other languages + } + + struct Request { + Location codeLocation; // ════════════╸ The location of the source code that will be executed on each node in the DON + Location secretsLocation; // ═════════╸ The location of secrets that will be passed into the source code. *Only Remote secrets are supported + CodeLanguage language; // ════════════╸ The coding language that the source code is written in + string source; // ════════════════════╸ Raw source code for Request.codeLocation of Location.Inline, URL for Request.codeLocation of Location.Remote, or slot decimal number for Request.codeLocation of Location.DONHosted + bytes encryptedSecretsReference; // ══╸ Encrypted URLs for Request.secretsLocation of Location.Remote (use addSecretsReference()), or CBOR encoded slotid+version for Request.secretsLocation of Location.DONHosted (use addDONHostedSecrets()) + string[] args; // ════════════════════╸ String arguments that will be passed into the source code + bytes[] bytesArgs; // ════════════════╸ Bytes arguments that will be passed into the source code + } + + error EmptySource(); + error EmptySecrets(); + error EmptyArgs(); + error NoInlineSecrets(); + + /// @notice Encodes a Request to CBOR encoded bytes + /// @param self The request to encode + /// @return CBOR encoded bytes + function encodeCBOR(Request memory self) internal pure returns (bytes memory) { + CBOR.CBORBuffer memory buffer = CBOR.create(DEFAULT_BUFFER_SIZE); + + buffer.writeString("codeLocation"); + buffer.writeUInt256(uint256(self.codeLocation)); + + buffer.writeString("language"); + buffer.writeUInt256(uint256(self.language)); + + buffer.writeString("source"); + buffer.writeString(self.source); + + if (self.args.length > 0) { + buffer.writeString("args"); + buffer.startArray(); + for (uint256 i = 0; i < self.args.length; ++i) { + buffer.writeString(self.args[i]); + } + buffer.endSequence(); + } + + if (self.encryptedSecretsReference.length > 0) { + if (self.secretsLocation == Location.Inline) { + revert NoInlineSecrets(); + } + buffer.writeString("secretsLocation"); + buffer.writeUInt256(uint256(self.secretsLocation)); + buffer.writeString("secrets"); + buffer.writeBytes(self.encryptedSecretsReference); + } + + if (self.bytesArgs.length > 0) { + buffer.writeString("bytesArgs"); + buffer.startArray(); + for (uint256 i = 0; i < self.bytesArgs.length; ++i) { + buffer.writeBytes(self.bytesArgs[i]); + } + buffer.endSequence(); + } + + return buffer.buf.buf; + } + + /// @notice Initializes a Plugin Functions Request + /// @dev Sets the codeLocation and code on the request + /// @param self The uninitialized request + /// @param codeLocation The user provided source code location + /// @param language The programming language of the user code + /// @param source The user provided source code or a url + function initializeRequest( + Request memory self, + Location codeLocation, + CodeLanguage language, + string memory source + ) internal pure { + if (bytes(source).length == 0) revert EmptySource(); + + self.codeLocation = codeLocation; + self.language = language; + self.source = source; + } + + /// @notice Initializes a Plugin Functions Request + /// @dev Simplified version of initializeRequest for PoC + /// @param self The uninitialized request + /// @param javaScriptSource The user provided JS code (must not be empty) + function initializeRequestForInlineJavaScript(Request memory self, string memory javaScriptSource) internal pure { + initializeRequest(self, Location.Inline, CodeLanguage.JavaScript, javaScriptSource); + } + + /// @notice Adds Remote user encrypted secrets to a Request + /// @param self The initialized request + /// @param encryptedSecretsReference Encrypted comma-separated string of URLs pointing to off-chain secrets + function addSecretsReference(Request memory self, bytes memory encryptedSecretsReference) internal pure { + if (encryptedSecretsReference.length == 0) revert EmptySecrets(); + + self.secretsLocation = Location.Remote; + self.encryptedSecretsReference = encryptedSecretsReference; + } + + /// @notice Adds DON-hosted secrets reference to a Request + /// @param self The initialized request + /// @param slotID Slot ID of the user's secrets hosted on DON + /// @param version User data version (for the slotID) + function addDONHostedSecrets(Request memory self, uint8 slotID, uint64 version) internal pure { + CBOR.CBORBuffer memory buffer = CBOR.create(DEFAULT_BUFFER_SIZE); + + buffer.writeString("slotID"); + buffer.writeUInt64(slotID); + buffer.writeString("version"); + buffer.writeUInt64(version); + + self.secretsLocation = Location.DONHosted; + self.encryptedSecretsReference = buffer.buf.buf; + } + + /// @notice Sets args for the user run function + /// @param self The initialized request + /// @param args The array of string args (must not be empty) + function setArgs(Request memory self, string[] memory args) internal pure { + if (args.length == 0) revert EmptyArgs(); + + self.args = args; + } + + /// @notice Sets bytes args for the user run function + /// @param self The initialized request + /// @param args The array of bytes args (must not be empty) + function setBytesArgs(Request memory self, bytes[] memory args) internal pure { + if (args.length == 0) revert EmptyArgs(); + + self.bytesArgs = args; + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsResponse.sol b/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsResponse.sol new file mode 100644 index 00000000..35069790 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/libraries/FunctionsResponse.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title Library of types that are used for fulfillment of a Functions request +library FunctionsResponse { + // Used to send request information from the Router to the Coordinator + struct RequestMeta { + bytes data; // ══════════════════╸ CBOR encoded Plugin Functions request data, use FunctionsRequest library to encode a request + bytes32 flags; // ═══════════════╸ Per-subscription flags + address requestingContract; // ══╗ The client contract that is sending the request + uint96 availableBalance; // ═════╝ Common PLI balance of the subscription that is controlled by the Router to be used for all consumer requests. + uint72 adminFee; // ═════════════╗ Flat fee (in Juels of PLI) that will be paid to the Router Owner for operation of the network + uint64 subscriptionId; // ║ Identifier of the billing subscription that will be charged for the request + uint64 initiatedRequests; // ║ The number of requests that have been started + uint32 callbackGasLimit; // ║ The amount of gas that the callback to the consuming contract will be given + uint16 dataVersion; // ══════════╝ The version of the structure of the CBOR encoded request data + uint64 completedRequests; // ════╗ The number of requests that have successfully completed or timed out + address subscriptionOwner; // ═══╝ The owner of the billing subscription + } + + enum FulfillResult { + FULFILLED, // 0 + USER_CALLBACK_ERROR, // 1 + INVALID_REQUEST_ID, // 2 + COST_EXCEEDS_COMMITMENT, // 3 + INSUFFICIENT_GAS_PROVIDED, // 4 + SUBSCRIPTION_BALANCE_INVARIANT_VIOLATION, // 5 + INVALID_COMMITMENT // 6 + } + + struct Commitment { + bytes32 requestId; // ═════════════════╸ A unique identifier for a Plugin Functions request + address coordinator; // ═══════════════╗ The Coordinator contract that manages the DON that is servicing a request + uint96 estimatedTotalCostJuels; // ════╝ The maximum cost in Juels (1e18) of PLI that will be charged to fulfill a request + address client; // ════════════════════╗ The client contract that sent the request + uint64 subscriptionId; // ║ Identifier of the billing subscription that will be charged for the request + uint32 callbackGasLimit; // ═══════════╝ The amount of gas that the callback to the consuming contract will be given + uint72 adminFee; // ═══════════════════╗ Flat fee (in Juels of PLI) that will be paid to the Router Owner for operation of the network + uint72 donFee; // ║ Fee (in Juels of PLI) that will be split between Node Operators for servicing a request + uint40 gasOverheadBeforeCallback; // ║ Represents the average gas execution cost before the fulfillment callback. + uint40 gasOverheadAfterCallback; // ║ Represents the average gas execution cost after the fulfillment callback. + uint32 timeoutTimestamp; // ═══════════╝ The timestamp at which a request will be eligible to be timed out + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/mocks/FunctionsV1EventsMock.sol b/contracts/src/v0.8/functions/v1_0_0/mocks/FunctionsV1EventsMock.sol new file mode 100644 index 00000000..68b51f89 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/mocks/FunctionsV1EventsMock.sol @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.19; + +contract FunctionsV1EventsMock { + struct Config { + uint16 maxConsumersPerSubscription; + uint72 adminFee; + bytes4 handleOracleFulfillmentSelector; + uint16 gasForCallExactCheck; + uint32[] maxCallbackGasLimits; + } + event ConfigUpdated(Config param1); + event ContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ); + event ContractUpdated(bytes32 id, address from, address to); + event FundsRecovered(address to, uint256 amount); + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + event Paused(address account); + event RequestNotProcessed(bytes32 indexed requestId, address coordinator, address transmitter, uint8 resultCode); + event RequestProcessed( + bytes32 indexed requestId, + uint64 indexed subscriptionId, + uint96 totalCostJuels, + address transmitter, + uint8 resultCode, + bytes response, + bytes err, + bytes callbackReturnData + ); + event RequestStart( + bytes32 indexed requestId, + bytes32 indexed donId, + uint64 indexed subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ); + event RequestTimedOut(bytes32 indexed requestId); + event SubscriptionCanceled(uint64 indexed subscriptionId, address fundsRecipient, uint256 fundsAmount); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId, address consumer); + event SubscriptionCreated(uint64 indexed subscriptionId, address owner); + event SubscriptionFunded(uint64 indexed subscriptionId, uint256 oldBalance, uint256 newBalance); + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId, address from, address to); + event Unpaused(address account); + + function emitConfigUpdated(Config memory param1) public { + emit ConfigUpdated(param1); + } + + function emitContractProposed( + bytes32 proposedContractSetId, + address proposedContractSetFromAddress, + address proposedContractSetToAddress + ) public { + emit ContractProposed(proposedContractSetId, proposedContractSetFromAddress, proposedContractSetToAddress); + } + + function emitContractUpdated(bytes32 id, address from, address to) public { + emit ContractUpdated(id, from, to); + } + + function emitFundsRecovered(address to, uint256 amount) public { + emit FundsRecovered(to, amount); + } + + function emitOwnershipTransferRequested(address from, address to) public { + emit OwnershipTransferRequested(from, to); + } + + function emitOwnershipTransferred(address from, address to) public { + emit OwnershipTransferred(from, to); + } + + function emitPaused(address account) public { + emit Paused(account); + } + + function emitRequestNotProcessed( + bytes32 requestId, + address coordinator, + address transmitter, + uint8 resultCode + ) public { + emit RequestNotProcessed(requestId, coordinator, transmitter, resultCode); + } + + function emitRequestProcessed( + bytes32 requestId, + uint64 subscriptionId, + uint96 totalCostJuels, + address transmitter, + uint8 resultCode, + bytes memory response, + bytes memory err, + bytes memory callbackReturnData + ) public { + emit RequestProcessed( + requestId, + subscriptionId, + totalCostJuels, + transmitter, + resultCode, + response, + err, + callbackReturnData + ); + } + + function emitRequestStart( + bytes32 requestId, + bytes32 donId, + uint64 subscriptionId, + address subscriptionOwner, + address requestingContract, + address requestInitiator, + bytes memory data, + uint16 dataVersion, + uint32 callbackGasLimit, + uint96 estimatedTotalCostJuels + ) public { + emit RequestStart( + requestId, + donId, + subscriptionId, + subscriptionOwner, + requestingContract, + requestInitiator, + data, + dataVersion, + callbackGasLimit, + estimatedTotalCostJuels + ); + } + + function emitRequestTimedOut(bytes32 requestId) public { + emit RequestTimedOut(requestId); + } + + function emitSubscriptionCanceled(uint64 subscriptionId, address fundsRecipient, uint256 fundsAmount) public { + emit SubscriptionCanceled(subscriptionId, fundsRecipient, fundsAmount); + } + + function emitSubscriptionConsumerAdded(uint64 subscriptionId, address consumer) public { + emit SubscriptionConsumerAdded(subscriptionId, consumer); + } + + function emitSubscriptionConsumerRemoved(uint64 subscriptionId, address consumer) public { + emit SubscriptionConsumerRemoved(subscriptionId, consumer); + } + + function emitSubscriptionCreated(uint64 subscriptionId, address owner) public { + emit SubscriptionCreated(subscriptionId, owner); + } + + function emitSubscriptionFunded(uint64 subscriptionId, uint256 oldBalance, uint256 newBalance) public { + emit SubscriptionFunded(subscriptionId, oldBalance, newBalance); + } + + function emitSubscriptionOwnerTransferRequested(uint64 subscriptionId, address from, address to) public { + emit SubscriptionOwnerTransferRequested(subscriptionId, from, to); + } + + function emitSubscriptionOwnerTransferred(uint64 subscriptionId, address from, address to) public { + emit SubscriptionOwnerTransferred(subscriptionId, from, to); + } + + function emitUnpaused(address account) public { + emit Unpaused(account); + } +} diff --git a/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Abstract.sol b/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Abstract.sol new file mode 100644 index 00000000..09c4a825 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Abstract.sol @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +abstract contract OCR2Abstract is ITypeAndVersion { + // Maximum number of oracles the offchain reporting protocol is designed for + uint256 internal constant MAX_NUM_ORACLES = 31; + + /** + * @notice triggers a new run of the offchain reporting protocol + * @param previousConfigBlockNumber block in which the previous config was set, to simplify historic analysis + * @param configDigest configDigest of this configuration + * @param configCount ordinal number of this config setting among all config settings over the life of this contract + * @param signers ith element is address ith oracle uses to sign a report + * @param transmitters ith element is address ith oracle uses to transmit a report via the transmit method + * @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param signers addresses with which oracles sign the reports + * @param transmitters addresses oracles use to transmit the reports + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external virtual; + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see _configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + virtual + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + + function _configDigestFromConfigData( + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + chainId, + contractAddress, + configCount, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /** + * @notice optionally emited to indicate the latest configDigest and epoch for + which a report was successfully transmited. Alternatively, the contract may + use latestConfigDigestAndEpoch with scanLogs set to false. + */ + event Transmitted(bytes32 configDigest, uint32 epoch); + + /** + * @notice optionally returns the latest configDigest and epoch for which a + report was successfully transmitted. Alternatively, the contract may return + scanLogs set to true and use Transmitted events to provide this information + to offchain watchers. + * @return scanLogs indicates whether to rely on the configDigest and epoch + returned or whether to scan logs for the Transmitted event instead. + * @return configDigest + * @return epoch + */ + function latestConfigDigestAndEpoch() + external + view + virtual + returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external virtual; +} diff --git a/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Base.sol b/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Base.sol new file mode 100644 index 00000000..ba671d44 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_0_0/ocr/OCR2Base.sol @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {OCR2Abstract} from "./OCR2Abstract.sol"; + +/** + * @notice Onchain verification of reports from the offchain reporting protocol + * @dev For details on its operation, see the offchain reporting protocol design + * doc, which refers to this contract as simply the "contract". + */ +abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract { + error ReportInvalid(); + error InvalidConfig(string message); + + bool internal immutable i_uniqueReports; + + constructor(bool uniqueReports) ConfirmedOwner(msg.sender) { + i_uniqueReports = uniqueReports; + } + + uint256 private constant maxUint32 = (1 << 32) - 1; + + // incremented each time a new config is posted. This count is incorporated + // into the config digest, to prevent replay attacks. + uint32 internal s_configCount; + uint32 internal s_latestConfigBlockNumber; // makes it easier for offchain systems + // to extract config from logs. + + // Storing these fields used on the hot path in a ConfigInfo variable reduces the + // retrieval of all of them to a single SLOAD. If any further fields are + // added, make sure that storage of the struct still takes at most 32 bytes. + struct ConfigInfo { + bytes32 latestConfigDigest; + uint8 f; // TODO: could be optimized by squeezing into one slot + uint8 n; + } + ConfigInfo internal s_configInfo; + + // Used for s_oracles[a].role, where a is an address, to track the purpose + // of the address, or to indicate that the address is unset. + enum Role { + // No oracle role has been set for address a + Unset, + // Signing address for the s_oracles[a].index'th oracle. I.e., report + // signatures from this oracle should ecrecover back to address a. + Signer, + // Transmission address for the s_oracles[a].index'th oracle. I.e., if a + // report is received by OCR2Aggregator.transmit in which msg.sender is + // a, it is attributed to the s_oracles[a].index'th oracle. + Transmitter + } + + struct Oracle { + uint8 index; // Index of oracle in s_signers/s_transmitters + Role role; // Role of the address which mapped to this struct + } + + mapping(address signerOrTransmitter => Oracle) internal s_oracles; + + // s_signers contains the signing address of each oracle + address[] internal s_signers; + + // s_transmitters contains the transmission address of each oracle, + // i.e. the address the oracle actually sends transactions to the contract from + address[] internal s_transmitters; + + /* + * Config logic + */ + + // Reverts transaction if config args are invalid + modifier checkConfigValid( + uint256 numSigners, + uint256 numTransmitters, + uint256 f + ) { + if (numSigners > MAX_NUM_ORACLES) revert InvalidConfig("too many signers"); + if (f == 0) revert InvalidConfig("f must be positive"); + if (numSigners != numTransmitters) revert InvalidConfig("oracle addresses out of registration"); + if (numSigners <= 3 * f) revert InvalidConfig("faulty-oracle f too high"); + _; + } + + struct SetConfigArgs { + address[] signers; + address[] transmitters; + uint8 f; + bytes onchainConfig; + uint64 offchainConfigVersion; + bytes offchainConfig; + } + + /// @inheritdoc OCR2Abstract + function latestConfigDigestAndEpoch() + external + view + virtual + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (true, bytes32(0), uint32(0)); + } + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param _signers addresses with which oracles sign the reports + * @param _transmitters addresses oracles use to transmit the reports + * @param _f number of faulty oracles the system can tolerate + * @param _onchainConfig encoded on-chain contract configuration + * @param _offchainConfigVersion version number for offchainEncoding schema + * @param _offchainConfig encoded off-chain oracle configuration + */ + function setConfig( + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _offchainConfigVersion, + bytes memory _offchainConfig + ) external override checkConfigValid(_signers.length, _transmitters.length, _f) onlyOwner { + SetConfigArgs memory args = SetConfigArgs({ + signers: _signers, + transmitters: _transmitters, + f: _f, + onchainConfig: _onchainConfig, + offchainConfigVersion: _offchainConfigVersion, + offchainConfig: _offchainConfig + }); + + _beforeSetConfig(args.f, args.onchainConfig); + + while (s_signers.length != 0) { + // remove any old signer/transmitter addresses + uint256 lastIdx = s_signers.length - 1; + address signer = s_signers[lastIdx]; + address transmitter = s_transmitters[lastIdx]; + delete s_oracles[signer]; + delete s_oracles[transmitter]; + s_signers.pop(); + s_transmitters.pop(); + } + + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < args.signers.length; i++) { + // add new signer/transmitter addresses + require(s_oracles[args.signers[i]].role == Role.Unset, "repeated signer address"); + s_oracles[args.signers[i]] = Oracle(uint8(i), Role.Signer); + require(s_oracles[args.transmitters[i]].role == Role.Unset, "repeated transmitter address"); + s_oracles[args.transmitters[i]] = Oracle(uint8(i), Role.Transmitter); + s_signers.push(args.signers[i]); + s_transmitters.push(args.transmitters[i]); + } + s_configInfo.f = args.f; + uint32 previousConfigBlockNumber = s_latestConfigBlockNumber; + s_latestConfigBlockNumber = uint32(block.number); + s_configCount += 1; + { + s_configInfo.latestConfigDigest = configDigestFromConfigData( + block.chainid, + address(this), + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + s_configInfo.n = uint8(args.signers.length); + + emit ConfigSet( + previousConfigBlockNumber, + s_configInfo.latestConfigDigest, + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + + function configDigestFromConfigData( + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + _chainId, + _contractAddress, + _configCount, + _signers, + _transmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_configCount, s_latestConfigBlockNumber, s_configInfo.latestConfigDigest); + } + + /** + * @return list of addresses permitted to transmit reports to this contract + * @dev The list will match the order used to specify the transmitter during setConfig + */ + function transmitters() external view returns (address[] memory) { + return s_transmitters; + } + + function _beforeSetConfig(uint8 _f, bytes memory _onchainConfig) internal virtual; + + /** + * @dev hook called after the report has been fully validated + * for the extending contract to handle additional logic, such as oracle payment + * @param initialGas the amount of gas before validation + * @param transmitter the address of the account that submitted the report + * @param signers the addresses of all signing accounts + * @param report serialized report + */ + function _report( + uint256 initialGas, + address transmitter, + uint8 signerCount, + address[MAX_NUM_ORACLES] memory signers, + bytes calldata report + ) internal virtual; + + // The constant-length components of the msg.data sent to transmit. + // See the "If we wanted to call sam" example on for example reasoning + // https://solidity.readthedocs.io/en/v0.7.2/abi-spec.html + uint16 private constant TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT = + 4 + // function selector + 32 * + 3 + // 3 words containing reportContext + 32 + // word containing start location of abiencoded report value + 32 + // word containing location start of abiencoded rs value + 32 + // word containing start location of abiencoded ss value + 32 + // rawVs value + 32 + // word containing length of report + 32 + // word containing length rs + 32 + // word containing length of ss + 0; // placeholder + + function requireExpectedMsgDataLength( + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss + ) private pure { + // calldata will never be big enough to make this overflow + uint256 expected = uint256(TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT) + + report.length + // one byte pure entry in _report + rs.length * + 32 + // 32 bytes per entry in _rs + ss.length * + 32 + // 32 bytes per entry in _ss + 0; // placeholder + require(msg.data.length == expected, "calldata length mismatch"); + } + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external override { + uint256 initialGas = gasleft(); // This line must come first + + { + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + uint32 epochAndRound = uint32(uint256(reportContext[1])); + + emit Transmitted(configDigest, uint32(epochAndRound >> 8)); + + ConfigInfo memory configInfo = s_configInfo; + require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch"); + + requireExpectedMsgDataLength(report, rs, ss); + + uint256 expectedNumSignatures; + if (i_uniqueReports) { + expectedNumSignatures = (configInfo.n + configInfo.f) / 2 + 1; + } else { + expectedNumSignatures = configInfo.f + 1; + } + + require(rs.length == expectedNumSignatures, "wrong number of signatures"); + require(rs.length == ss.length, "signatures out of registration"); + + Oracle memory transmitter = s_oracles[msg.sender]; + require( // Check that sender is authorized to report + transmitter.role == Role.Transmitter && msg.sender == s_transmitters[transmitter.index], + "unauthorized transmitter" + ); + } + + address[MAX_NUM_ORACLES] memory signed; + uint8 signerCount = 0; + + { + // Verify signatures attached to report + bytes32 h = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + Oracle memory o; + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < rs.length; ++i) { + address signer = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_oracles[signer]; + require(o.role == Role.Signer, "address not authorized to sign"); + require(signed[o.index] == address(0), "non-unique signature"); + signed[o.index] = signer; + signerCount += 1; + } + } + + _report(initialGas, msg.sender, signerCount, signed, report); + } +} diff --git a/contracts/src/v0.8/functions/v1_1_0/FunctionsBilling.sol b/contracts/src/v0.8/functions/v1_1_0/FunctionsBilling.sol new file mode 100644 index 00000000..94ae429d --- /dev/null +++ b/contracts/src/v0.8/functions/v1_1_0/FunctionsBilling.sol @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsSubscriptions} from "../v1_0_0/interfaces/IFunctionsSubscriptions.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {IFunctionsBilling} from "../v1_0_0/interfaces/IFunctionsBilling.sol"; + +import {Routable} from "../v1_0_0/Routable.sol"; +import {FunctionsResponse} from "../v1_0_0/libraries/FunctionsResponse.sol"; + +import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; + +import {ChainSpecificUtil} from "./libraries/ChainSpecificUtil.sol"; + +/// @title Functions Billing contract +/// @notice Contract that calculates payment from users to the nodes of the Decentralized Oracle Network (DON). +abstract contract FunctionsBilling is Routable, IFunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + uint256 private constant REASONABLE_GAS_PRICE_CEILING = 1_000_000_000_000_000; // 1 million gwei + + event RequestBilled( + bytes32 indexed requestId, + uint96 juelsPerGas, + uint256 l1FeeShareWei, + uint96 callbackCostJuels, + uint96 totalCostJuels + ); + + // ================================================================ + // | Request Commitment state | + // ================================================================ + + mapping(bytes32 requestId => bytes32 commitmentHash) private s_requestCommitments; + + event CommitmentDeleted(bytes32 requestId); + + // ================================================================ + // | Configuration state | + // ================================================================ + + struct Config { + uint32 fulfillmentGasPriceOverEstimationBP; // ══╗ Percentage of gas price overestimation to account for changes in gas price between request and response. Held as basis points (one hundredth of 1 percentage point) + uint32 feedStalenessSeconds; // ║ How long before we consider the feed price to be stale and fallback to fallbackNativePerUnitLink. + uint32 gasOverheadBeforeCallback; // ║ Represents the average gas execution cost before the fulfillment callback. This amount is always billed for every request. + uint32 gasOverheadAfterCallback; // ║ Represents the average gas execution cost after the fulfillment callback. This amount is always billed for every request. + uint72 donFee; // ║ Additional flat fee (in Juels of PLI) that will be split between Node Operators. Max value is 2^80 - 1 == 1.2m PLI. + uint40 minimumEstimateGasPriceWei; // ║ The lowest amount of wei that will be used as the tx.gasprice when estimating the cost to fulfill the request + uint16 maxSupportedRequestDataVersion; // ═══════╝ The highest support request data version supported by the node. All lower versions should also be supported. + uint224 fallbackNativePerUnitLink; // ═══════════╗ Fallback NATIVE CURRENCY / PLI conversion rate if the data feed is stale + uint32 requestTimeoutSeconds; // ════════════════╝ How many seconds it takes before we consider a request to be timed out + } + + Config private s_config; + + event ConfigUpdated(Config config); + + error UnsupportedRequestDataVersion(); + error InsufficientBalance(); + error InvalidSubscription(); + error UnauthorizedSender(); + error MustBeSubOwner(address owner); + error InvalidLinkWeiPrice(int256 linkWei); + error PaymentTooLarge(); + error NoTransmittersSet(); + error InvalidCalldata(); + + // ================================================================ + // | Balance state | + // ================================================================ + + mapping(address transmitter => uint96 balanceJuelsLink) private s_withdrawableTokens; + // Pool together collected DON fees + // Disperse them on withdrawal or change in OCR configuration + uint96 internal s_feePool; + + AggregatorV3Interface private s_linkToNativeFeed; + + // ================================================================ + // | Initialization | + // ================================================================ + constructor(address router, Config memory config, address linkToNativeFeed) Routable(router) { + s_linkToNativeFeed = AggregatorV3Interface(linkToNativeFeed); + + updateConfig(config); + } + + // ================================================================ + // | Configuration | + // ================================================================ + + /// @notice Gets the Plugin Coordinator's billing configuration + /// @return config + function getConfig() external view returns (Config memory) { + return s_config; + } + + /// @notice Sets the Plugin Coordinator's billing configuration + /// @param config - See the contents of the Config struct in IFunctionsBilling.Config for more information + function updateConfig(Config memory config) public { + _onlyOwner(); + + s_config = config; + emit ConfigUpdated(config); + } + + // ================================================================ + // | Fee Calculation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function getDONFee(bytes memory /* requestData */) public view override returns (uint72) { + return s_config.donFee; + } + + /// @inheritdoc IFunctionsBilling + function getAdminFee() public view override returns (uint72) { + return _getRouter().getAdminFee(); + } + + /// @inheritdoc IFunctionsBilling + function getWeiPerUnitLink() public view returns (uint256) { + Config memory config = s_config; + (, int256 weiPerUnitLink, , uint256 timestamp, ) = s_linkToNativeFeed.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (config.feedStalenessSeconds < block.timestamp - timestamp && config.feedStalenessSeconds > 0) { + return config.fallbackNativePerUnitLink; + } + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + return uint256(weiPerUnitLink); + } + + function _getJuelsFromWei(uint256 amountWei) private view returns (uint96) { + // (1e18 juels/link) * wei / (wei/link) = juels + // There are only 1e9*1e18 = 1e27 juels in existence, should not exceed uint96 (2^96 ~ 7e28) + return SafeCast.toUint96((1e18 * amountWei) / getWeiPerUnitLink()); + } + + // ================================================================ + // | Cost Estimation | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function estimateCost( + uint64 subscriptionId, + bytes calldata data, + uint32 callbackGasLimit, + uint256 gasPriceWei + ) external view override returns (uint96) { + _getRouter().isValidCallbackGasLimit(subscriptionId, callbackGasLimit); + // Reasonable ceilings to prevent integer overflows + if (gasPriceWei > REASONABLE_GAS_PRICE_CEILING) { + revert InvalidCalldata(); + } + uint72 adminFee = getAdminFee(); + uint72 donFee = getDONFee(data); + return _calculateCostEstimate(callbackGasLimit, gasPriceWei, donFee, adminFee); + } + + /// @notice Estimate the cost in Juels of PLI + // that will be charged to a subscription to fulfill a Functions request + // Gas Price can be overestimated to account for flucuations between request and response time + function _calculateCostEstimate( + uint32 callbackGasLimit, + uint256 gasPriceWei, + uint72 donFee, + uint72 adminFee + ) internal view returns (uint96) { + // If gas price is less than the minimum fulfillment gas price, override to using the minimum + if (gasPriceWei < s_config.minimumEstimateGasPriceWei) { + gasPriceWei = s_config.minimumEstimateGasPriceWei; + } + + uint256 gasPriceWithOverestimation = gasPriceWei + + ((gasPriceWei * s_config.fulfillmentGasPriceOverEstimationBP) / 10_000); + /// @NOTE: Basis Points are 1/100th of 1%, divide by 10_000 to bring back to original units + + uint256 executionGas = s_config.gasOverheadBeforeCallback + s_config.gasOverheadAfterCallback + callbackGasLimit; + uint256 l1FeeWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + uint96 estimatedGasReimbursementJuels = _getJuelsFromWei((gasPriceWithOverestimation * executionGas) + l1FeeWei); + + uint96 feesJuels = uint96(donFee) + uint96(adminFee); + + return estimatedGasReimbursementJuels + feesJuels; + } + + // ================================================================ + // | Billing | + // ================================================================ + + /// @notice Initiate the billing process for an Functions request + /// @dev Only callable by the Functions Router + /// @param request - Plugin Functions request data, see FunctionsResponse.RequestMeta for the structure + /// @return commitment - The parameters of the request that must be held consistent at response time + function _startBilling( + FunctionsResponse.RequestMeta memory request + ) internal returns (FunctionsResponse.Commitment memory commitment) { + Config memory config = s_config; + + // Nodes should support all past versions of the structure + if (request.dataVersion > config.maxSupportedRequestDataVersion) { + revert UnsupportedRequestDataVersion(); + } + + uint72 donFee = getDONFee(request.data); + uint96 estimatedTotalCostJuels = _calculateCostEstimate( + request.callbackGasLimit, + tx.gasprice, + donFee, + request.adminFee + ); + + // Check that subscription can afford the estimated cost + if ((request.availableBalance) < estimatedTotalCostJuels) { + revert InsufficientBalance(); + } + + uint32 timeoutTimestamp = uint32(block.timestamp + config.requestTimeoutSeconds); + bytes32 requestId = keccak256( + abi.encode( + address(this), + request.requestingContract, + request.subscriptionId, + request.initiatedRequests + 1, + keccak256(request.data), + request.dataVersion, + request.callbackGasLimit, + estimatedTotalCostJuels, + timeoutTimestamp, + // solhint-disable-next-line avoid-tx-origin + tx.origin + ) + ); + + commitment = FunctionsResponse.Commitment({ + adminFee: request.adminFee, + coordinator: address(this), + client: request.requestingContract, + subscriptionId: request.subscriptionId, + callbackGasLimit: request.callbackGasLimit, + estimatedTotalCostJuels: estimatedTotalCostJuels, + timeoutTimestamp: timeoutTimestamp, + requestId: requestId, + donFee: donFee, + gasOverheadBeforeCallback: config.gasOverheadBeforeCallback, + gasOverheadAfterCallback: config.gasOverheadAfterCallback + }); + + s_requestCommitments[requestId] = keccak256(abi.encode(commitment)); + + return commitment; + } + + /// @notice Finalize billing process for an Functions request by sending a callback to the Client contract and then charging the subscription + /// @param requestId identifier for the request that was generated by the Registry in the beginBilling commitment + /// @param response response data from DON consensus + /// @param err error from DON consensus + /// @param reportBatchSize the number of fulfillments in the transmitter's report + /// @return result fulfillment result + /// @dev Only callable by a node that has been approved on the Coordinator + /// @dev simulated offchain to determine if sufficient balance is present to fulfill the request + function _fulfillAndBill( + bytes32 requestId, + bytes memory response, + bytes memory err, + bytes memory onchainMetadata, + bytes memory /* offchainMetadata TODO: use in getDonFee() for dynamic billing */, + uint8 reportBatchSize + ) internal returns (FunctionsResponse.FulfillResult) { + FunctionsResponse.Commitment memory commitment = abi.decode(onchainMetadata, (FunctionsResponse.Commitment)); + + uint256 gasOverheadWei = (commitment.gasOverheadBeforeCallback + commitment.gasOverheadAfterCallback) * tx.gasprice; + uint256 l1FeeShareWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data) / reportBatchSize; + // Gas overhead without callback + uint96 gasOverheadJuels = _getJuelsFromWei(gasOverheadWei + l1FeeShareWei); + uint96 juelsPerGas = _getJuelsFromWei(tx.gasprice); + + // The Functions Router will perform the callback to the client contract + (FunctionsResponse.FulfillResult resultCode, uint96 callbackCostJuels) = _getRouter().fulfill( + response, + err, + juelsPerGas, + gasOverheadJuels + commitment.donFee, // cost without callback or admin fee, those will be added by the Router + msg.sender, + commitment + ); + + // The router will only pay the DON on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the Coordinator should hold on to the request commitment + if ( + resultCode == FunctionsResponse.FulfillResult.FULFILLED || + resultCode == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + delete s_requestCommitments[requestId]; + // Reimburse the transmitter for the fulfillment gas cost + s_withdrawableTokens[msg.sender] = gasOverheadJuels + callbackCostJuels; + // Put donFee into the pool of fees, to be split later + // Saves on storage writes that would otherwise be charged to the user + s_feePool += commitment.donFee; + emit RequestBilled({ + requestId: requestId, + juelsPerGas: juelsPerGas, + l1FeeShareWei: l1FeeShareWei, + callbackCostJuels: callbackCostJuels, + totalCostJuels: gasOverheadJuels + callbackCostJuels + commitment.donFee + commitment.adminFee + }); + } + + return resultCode; + } + + // ================================================================ + // | Request Timeout | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Router + /// @dev Used by FunctionsRouter.sol during timeout of a request + function deleteCommitment(bytes32 requestId) external override onlyRouter { + // Delete commitment + delete s_requestCommitments[requestId]; + emit CommitmentDeleted(requestId); + } + + // ================================================================ + // | Fund withdrawal | + // ================================================================ + + /// @inheritdoc IFunctionsBilling + function oracleWithdraw(address recipient, uint96 amount) external { + _disperseFeePool(); + + if (amount == 0) { + amount = s_withdrawableTokens[msg.sender]; + } else if (s_withdrawableTokens[msg.sender] < amount) { + revert InsufficientBalance(); + } + s_withdrawableTokens[msg.sender] -= amount; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(recipient, amount); + } + + /// @inheritdoc IFunctionsBilling + /// @dev Only callable by the Coordinator owner + function oracleWithdrawAll() external { + _onlyOwner(); + _disperseFeePool(); + + address[] memory transmitters = _getTransmitters(); + + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < transmitters.length; ++i) { + uint96 balance = s_withdrawableTokens[transmitters[i]]; + if (balance > 0) { + s_withdrawableTokens[transmitters[i]] = 0; + IFunctionsSubscriptions(address(_getRouter())).oracleWithdraw(transmitters[i], balance); + } + } + } + + // Overriden in FunctionsCoordinator, which has visibility into transmitters + function _getTransmitters() internal view virtual returns (address[] memory); + + // DON fees are collected into a pool s_feePool + // When OCR configuration changes, or any oracle withdraws, this must be dispersed + function _disperseFeePool() internal { + if (s_feePool == 0) { + return; + } + // All transmitters are assumed to also be observers + // Pay out the DON fee to all transmitters + address[] memory transmitters = _getTransmitters(); + uint256 numberOfTransmitters = transmitters.length; + if (numberOfTransmitters == 0) { + revert NoTransmittersSet(); + } + uint96 feePoolShare = s_feePool / uint96(numberOfTransmitters); + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < numberOfTransmitters; ++i) { + s_withdrawableTokens[transmitters[i]] += feePoolShare; + } + s_feePool -= feePoolShare * uint96(numberOfTransmitters); + } + + // Overriden in FunctionsCoordinator.sol + function _onlyOwner() internal view virtual; +} diff --git a/contracts/src/v0.8/functions/v1_1_0/FunctionsCoordinator.sol b/contracts/src/v0.8/functions/v1_1_0/FunctionsCoordinator.sol new file mode 100644 index 00000000..a215c178 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_1_0/FunctionsCoordinator.sol @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IFunctionsCoordinator} from "../v1_0_0/interfaces/IFunctionsCoordinator.sol"; +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; + +import {FunctionsBilling} from "./FunctionsBilling.sol"; +import {OCR2Base} from "./ocr/OCR2Base.sol"; +import {FunctionsResponse} from "../v1_0_0/libraries/FunctionsResponse.sol"; + +/// @title Functions Coordinator contract +/// @notice Contract that nodes of a Decentralized Oracle Network (DON) interact with +contract FunctionsCoordinator is OCR2Base, IFunctionsCoordinator, FunctionsBilling { + using FunctionsResponse for FunctionsResponse.RequestMeta; + using FunctionsResponse for FunctionsResponse.Commitment; + using FunctionsResponse for FunctionsResponse.FulfillResult; + + /// @inheritdoc ITypeAndVersion + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "Functions Coordinator v1.1.0"; + + event OracleRequest( + bytes32 indexed requestId, + address indexed requestingContract, + address requestInitiator, + uint64 subscriptionId, + address subscriptionOwner, + bytes data, + uint16 dataVersion, + bytes32 flags, + uint64 callbackGasLimit, + FunctionsResponse.Commitment commitment + ); + event OracleResponse(bytes32 indexed requestId, address transmitter); + + error InconsistentReportData(); + error EmptyPublicKey(); + error UnauthorizedPublicKeyChange(); + + bytes private s_donPublicKey; + bytes private s_thresholdPublicKey; + + constructor( + address router, + Config memory config, + address linkToNativeFeed + ) OCR2Base() FunctionsBilling(router, config, linkToNativeFeed) {} + + /// @inheritdoc IFunctionsCoordinator + function getThresholdPublicKey() external view override returns (bytes memory) { + if (s_thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setThresholdPublicKey(bytes calldata thresholdPublicKey) external override onlyOwner { + if (thresholdPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_thresholdPublicKey = thresholdPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function getDONPublicKey() external view override returns (bytes memory) { + if (s_donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + return s_donPublicKey; + } + + /// @inheritdoc IFunctionsCoordinator + function setDONPublicKey(bytes calldata donPublicKey) external override onlyOwner { + if (donPublicKey.length == 0) { + revert EmptyPublicKey(); + } + s_donPublicKey = donPublicKey; + } + + /// @dev check if node is in current transmitter list + function _isTransmitter(address node) internal view returns (bool) { + address[] memory nodes = s_transmitters; + // Bounded by "maxNumOracles" on OCR2Abstract.sol + for (uint256 i = 0; i < nodes.length; ++i) { + if (nodes[i] == node) { + return true; + } + } + return false; + } + + /// @inheritdoc IFunctionsCoordinator + function startRequest( + FunctionsResponse.RequestMeta calldata request + ) external override onlyRouter returns (FunctionsResponse.Commitment memory commitment) { + commitment = _startBilling(request); + + emit OracleRequest( + commitment.requestId, + request.requestingContract, + // solhint-disable-next-line avoid-tx-origin + tx.origin, + request.subscriptionId, + request.subscriptionOwner, + request.data, + request.dataVersion, + request.flags, + request.callbackGasLimit, + commitment + ); + + return commitment; + } + + /// @dev DON fees are pooled together. If the OCR configuration is going to change, these need to be distributed. + function _beforeSetConfig(uint8 /* _f */, bytes memory /* _onchainConfig */) internal override { + if (_getTransmitters().length > 0) { + _disperseFeePool(); + } + } + + /// @dev Used by FunctionsBilling.sol + function _getTransmitters() internal view override returns (address[] memory) { + return s_transmitters; + } + + /// @dev Report hook called within OCR2Base.sol + function _report( + uint256 /*initialGas*/, + address /*transmitter*/, + uint8 /*signerCount*/, + address[MAX_NUM_ORACLES] memory /*signers*/, + bytes calldata report + ) internal override { + ( + bytes32[] memory requestIds, + bytes[] memory results, + bytes[] memory errors, + bytes[] memory onchainMetadata, + bytes[] memory offchainMetadata + ) = abi.decode(report, (bytes32[], bytes[], bytes[], bytes[], bytes[])); + uint256 numberOfFulfillments = uint8(requestIds.length); + + if ( + numberOfFulfillments == 0 || + numberOfFulfillments != results.length || + numberOfFulfillments != errors.length || + numberOfFulfillments != onchainMetadata.length || + numberOfFulfillments != offchainMetadata.length + ) { + revert ReportInvalid("Fields must be equal length"); + } + + // Bounded by "MaxRequestBatchSize" on the Job's ReportingPluginConfig + for (uint256 i = 0; i < numberOfFulfillments; ++i) { + FunctionsResponse.FulfillResult result = FunctionsResponse.FulfillResult( + _fulfillAndBill( + requestIds[i], + results[i], + errors[i], + onchainMetadata[i], + offchainMetadata[i], + uint8(numberOfFulfillments) // will not exceed "MaxRequestBatchSize" on the Job's ReportingPluginConfig + ) + ); + + // Emit on successfully processing the fulfillment + // In these two fulfillment results the user has been charged + // Otherwise, the DON will re-try + if ( + result == FunctionsResponse.FulfillResult.FULFILLED || + result == FunctionsResponse.FulfillResult.USER_CALLBACK_ERROR + ) { + emit OracleResponse(requestIds[i], msg.sender); + } + } + } + + /// @dev Used in FunctionsBilling.sol + function _onlyOwner() internal view override { + _validateOwnership(); + } +} diff --git a/contracts/src/v0.8/functions/v1_1_0/libraries/ChainSpecificUtil.sol b/contracts/src/v0.8/functions/v1_1_0/libraries/ChainSpecificUtil.sol new file mode 100644 index 00000000..68d346e6 --- /dev/null +++ b/contracts/src/v0.8/functions/v1_1_0/libraries/ChainSpecificUtil.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ArbGasInfo} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {GasPriceOracle} from "../../../vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/GasPriceOracle.sol"; + +/// @dev A library that abstracts out opcodes that behave differently across chains. +/// @dev The methods below return values that are pertinent to the given chain. +library ChainSpecificUtil { + // ------------ Start Arbitrum Constants ------------ + + /// @dev ARBGAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10 + address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C); + ArbGasInfo private constant ARBGAS = ArbGasInfo(ARBGAS_ADDR); + + uint256 private constant ARB_MAINNET_CHAIN_ID = 42161; + uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613; + uint256 private constant ARB_SEPOLIA_TESTNET_CHAIN_ID = 421614; + + // ------------ End Arbitrum Constants ------------ + + // ------------ Start Optimism Constants ------------ + /// @dev L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev OVM_GASPRICEORACLE_ADDR is the address of the GasPriceOracle precompile on Optimism. + /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + GasPriceOracle private constant OVM_GASPRICEORACLE = GasPriceOracle(OVM_GASPRICEORACLE_ADDR); + + uint256 private constant OP_MAINNET_CHAIN_ID = 10; + uint256 private constant OP_GOERLI_CHAIN_ID = 420; + uint256 private constant OP_SEPOLIA_CHAIN_ID = 11155420; + + /// @dev Base is a OP stack based rollup and follows the same L1 pricing logic as Optimism. + uint256 private constant BASE_MAINNET_CHAIN_ID = 8453; + uint256 private constant BASE_GOERLI_CHAIN_ID = 84531; + uint256 private constant BASE_SEPOLIA_CHAIN_ID = 84532; + + // ------------ End Optimism Constants ------------ + + /// @notice Returns the L1 fees in wei that will be paid for the current transaction, given any calldata + /// @notice for the current transaction. + /// @notice When on a known Arbitrum chain, it uses ArbGas.getCurrentTxL1GasFees to get the fees. + /// @notice On Arbitrum, the provided calldata is not used to calculate the fees. + /// @notice On Optimism, the provided calldata is passed to the GasPriceOracle predeploy + /// @notice and getL1Fee is called to get the fees. + function _getCurrentTxL1GasFees(bytes memory txCallData) internal view returns (uint256 l1FeeWei) { + uint256 chainid = block.chainid; + if (_isArbitrumChainId(chainid)) { + return ARBGAS.getCurrentTxL1GasFees(); + } else if (_isOptimismChainId(chainid)) { + return OVM_GASPRICEORACLE.getL1Fee(bytes.concat(txCallData, L1_FEE_DATA_PADDING)); + } + return 0; + } + + /// @notice Return true if and only if the provided chain ID is an Arbitrum chain ID. + function _isArbitrumChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == ARB_MAINNET_CHAIN_ID || + chainId == ARB_GOERLI_TESTNET_CHAIN_ID || + chainId == ARB_SEPOLIA_TESTNET_CHAIN_ID; + } + + /// @notice Return true if and only if the provided chain ID is an Optimism (or Base) chain ID. + /// @notice Note that optimism chain id's are also OP stack chain id's. + function _isOptimismChainId(uint256 chainId) internal pure returns (bool) { + return + chainId == OP_MAINNET_CHAIN_ID || + chainId == OP_GOERLI_CHAIN_ID || + chainId == OP_SEPOLIA_CHAIN_ID || + chainId == BASE_MAINNET_CHAIN_ID || + chainId == BASE_GOERLI_CHAIN_ID || + chainId == BASE_SEPOLIA_CHAIN_ID; + } +} diff --git a/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Abstract.sol b/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Abstract.sol new file mode 100644 index 00000000..4182227d --- /dev/null +++ b/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Abstract.sol @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +abstract contract OCR2Abstract is ITypeAndVersion { + // Maximum number of oracles the offchain reporting protocol is designed for + uint256 internal constant MAX_NUM_ORACLES = 31; + + /** + * @notice triggers a new run of the offchain reporting protocol + * @param previousConfigBlockNumber block in which the previous config was set, to simplify historic analysis + * @param configDigest configDigest of this configuration + * @param configCount ordinal number of this config setting among all config settings over the life of this contract + * @param signers ith element is address ith oracle uses to sign a report + * @param transmitters ith element is address ith oracle uses to transmit a report via the transmit method + * @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param signers addresses with which oracles sign the reports + * @param transmitters addresses oracles use to transmit the reports + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + */ + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external virtual; + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see _configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + virtual + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + + /** + * @notice optionally emited to indicate the latest configDigest and epoch for + which a report was successfully transmited. Alternatively, the contract may + use latestConfigDigestAndEpoch with scanLogs set to false. + */ + event Transmitted(bytes32 configDigest, uint32 epoch); + + /** + * @notice optionally returns the latest configDigest and epoch for which a + report was successfully transmitted. Alternatively, the contract may return + scanLogs set to true and use Transmitted events to provide this information + to offchain watchers. + * @return scanLogs indicates whether to rely on the configDigest and epoch + returned or whether to scan logs for the Transmitted event instead. + * @return configDigest + * @return epoch + */ + function latestConfigDigestAndEpoch() + external + view + virtual + returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external virtual; +} diff --git a/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Base.sol b/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Base.sol new file mode 100644 index 00000000..1b5e7d3b --- /dev/null +++ b/contracts/src/v0.8/functions/v1_1_0/ocr/OCR2Base.sol @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {OCR2Abstract} from "./OCR2Abstract.sol"; + +/** + * @notice Onchain verification of reports from the offchain reporting protocol + * @dev For details on its operation, see the offchain reporting protocol design + * doc, which refers to this contract as simply the "contract". + */ +abstract contract OCR2Base is ConfirmedOwner, OCR2Abstract { + error ReportInvalid(string message); + error InvalidConfig(string message); + + constructor() ConfirmedOwner(msg.sender) {} + + // incremented each time a new config is posted. This count is incorporated + // into the config digest, to prevent replay attacks. + uint32 internal s_configCount; + uint32 internal s_latestConfigBlockNumber; // makes it easier for offchain systems + // to extract config from logs. + + // Storing these fields used on the hot path in a ConfigInfo variable reduces the + // retrieval of all of them to a single SLOAD. If any further fields are + // added, make sure that storage of the struct still takes at most 32 bytes. + struct ConfigInfo { + bytes32 latestConfigDigest; + uint8 f; // TODO: could be optimized by squeezing into one slot + uint8 n; + } + ConfigInfo internal s_configInfo; + + // Used for s_oracles[a].role, where a is an address, to track the purpose + // of the address, or to indicate that the address is unset. + enum Role { + // No oracle role has been set for address a + Unset, + // Signing address for the s_oracles[a].index'th oracle. I.e., report + // signatures from this oracle should ecrecover back to address a. + Signer, + // Transmission address for the s_oracles[a].index'th oracle. I.e., if a + // report is received by OCR2Aggregator.transmit in which msg.sender is + // a, it is attributed to the s_oracles[a].index'th oracle. + Transmitter + } + + struct Oracle { + uint8 index; // Index of oracle in s_signers/s_transmitters + Role role; // Role of the address which mapped to this struct + } + + mapping(address signerOrTransmitter => Oracle) internal s_oracles; + + // s_signers contains the signing address of each oracle + address[] internal s_signers; + + // s_transmitters contains the transmission address of each oracle, + // i.e. the address the oracle actually sends transactions to the contract from + address[] internal s_transmitters; + + /* + * Config logic + */ + + // Reverts transaction if config args are invalid + modifier checkConfigValid( + uint256 numSigners, + uint256 numTransmitters, + uint256 f + ) { + if (numSigners > MAX_NUM_ORACLES) revert InvalidConfig("too many signers"); + if (f == 0) revert InvalidConfig("f must be positive"); + if (numSigners != numTransmitters) revert InvalidConfig("oracle addresses out of registration"); + if (numSigners <= 3 * f) revert InvalidConfig("faulty-oracle f too high"); + _; + } + + struct SetConfigArgs { + address[] signers; + address[] transmitters; + uint8 f; + bytes onchainConfig; + uint64 offchainConfigVersion; + bytes offchainConfig; + } + + /// @inheritdoc OCR2Abstract + function latestConfigDigestAndEpoch() + external + view + virtual + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (true, bytes32(0), uint32(0)); + } + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param _signers addresses with which oracles sign the reports + * @param _transmitters addresses oracles use to transmit the reports + * @param _f number of faulty oracles the system can tolerate + * @param _onchainConfig encoded on-chain contract configuration + * @param _offchainConfigVersion version number for offchainEncoding schema + * @param _offchainConfig encoded off-chain oracle configuration + */ + function setConfig( + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _offchainConfigVersion, + bytes memory _offchainConfig + ) external override checkConfigValid(_signers.length, _transmitters.length, _f) onlyOwner { + SetConfigArgs memory args = SetConfigArgs({ + signers: _signers, + transmitters: _transmitters, + f: _f, + onchainConfig: _onchainConfig, + offchainConfigVersion: _offchainConfigVersion, + offchainConfig: _offchainConfig + }); + + _beforeSetConfig(args.f, args.onchainConfig); + + while (s_signers.length != 0) { + // remove any old signer/transmitter addresses + uint256 lastIdx = s_signers.length - 1; + address signer = s_signers[lastIdx]; + address transmitter = s_transmitters[lastIdx]; + delete s_oracles[signer]; + delete s_oracles[transmitter]; + s_signers.pop(); + s_transmitters.pop(); + } + + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < args.signers.length; i++) { + if (args.signers[i] == address(0)) revert InvalidConfig("signer must not be empty"); + if (args.transmitters[i] == address(0)) revert InvalidConfig("transmitter must not be empty"); + // add new signer/transmitter addresses + if (s_oracles[args.signers[i]].role != Role.Unset) revert InvalidConfig("repeated signer address"); + s_oracles[args.signers[i]] = Oracle(uint8(i), Role.Signer); + if (s_oracles[args.transmitters[i]].role != Role.Unset) revert InvalidConfig("repeated transmitter address"); + s_oracles[args.transmitters[i]] = Oracle(uint8(i), Role.Transmitter); + s_signers.push(args.signers[i]); + s_transmitters.push(args.transmitters[i]); + } + s_configInfo.f = args.f; + uint32 previousConfigBlockNumber = s_latestConfigBlockNumber; + s_latestConfigBlockNumber = uint32(block.number); + s_configCount += 1; + { + s_configInfo.latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + s_configInfo.n = uint8(args.signers.length); + + emit ConfigSet( + previousConfigBlockNumber, + s_configInfo.latestConfigDigest, + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + + function _configDigestFromConfigData( + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + _chainId, + _contractAddress, + _configCount, + _signers, + _transmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0001 << (256 - 16); // 0x000100..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config (see __configDigestFromConfigData) + */ + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_configCount, s_latestConfigBlockNumber, s_configInfo.latestConfigDigest); + } + + /** + * @return list of addresses permitted to transmit reports to this contract + * @dev The list will match the order used to specify the transmitter during setConfig + */ + function transmitters() external view returns (address[] memory) { + return s_transmitters; + } + + function _beforeSetConfig(uint8 _f, bytes memory _onchainConfig) internal virtual; + + /** + * @dev hook called after the report has been fully validated + * for the extending contract to handle additional logic, such as oracle payment + * @param initialGas the amount of gas before validation + * @param transmitter the address of the account that submitted the report + * @param signers the addresses of all signing accounts + * @param report serialized report + */ + function _report( + uint256 initialGas, + address transmitter, + uint8 signerCount, + address[MAX_NUM_ORACLES] memory signers, + bytes calldata report + ) internal virtual; + + // The constant-length components of the msg.data sent to transmit. + // See the "If we wanted to call sam" example on for example reasoning + // https://solidity.readthedocs.io/en/v0.7.2/abi-spec.html + uint16 private constant TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT = + 4 + // function selector + 32 * + 3 + // 3 words containing reportContext + 32 + // word containing start location of abiencoded report value + 32 + // word containing location start of abiencoded rs value + 32 + // word containing start location of abiencoded ss value + 32 + // rawVs value + 32 + // word containing length of report + 32 + // word containing length rs + 32 + // word containing length of ss + 0; // placeholder + + function _requireExpectedMsgDataLength( + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss + ) private pure { + // calldata will never be big enough to make this overflow + uint256 expected = uint256(TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT) + + report.length + // one byte pure entry in _report + rs.length * + 32 + // 32 bytes per entry in _rs + ss.length * + 32 + // 32 bytes per entry in _ss + 0; // placeholder + if (msg.data.length != expected) revert ReportInvalid("calldata length mismatch"); + } + + /** + * @notice transmit is called to post a new report to the contract + * @param report serialized report, which the signatures are signing. + * @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + * @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + * @param rawVs ith element is the the V component of the ith signature + */ + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external override { + uint256 initialGas = gasleft(); // This line must come first + + { + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + uint32 epochAndRound = uint32(uint256(reportContext[1])); + + emit Transmitted(configDigest, uint32(epochAndRound >> 8)); + + // The following check is disabled to allow both current and proposed routes to submit reports using the same OCR config digest + // Plugin Functions uses globally unique request IDs. Metadata about the request is stored and checked in the Coordinator and Router + // require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch"); + + _requireExpectedMsgDataLength(report, rs, ss); + + uint256 expectedNumSignatures = (s_configInfo.n + s_configInfo.f) / 2 + 1; + + if (rs.length != expectedNumSignatures) revert ReportInvalid("wrong number of signatures"); + if (rs.length != ss.length) revert ReportInvalid("report rs and ss must be of equal length"); + + Oracle memory transmitter = s_oracles[msg.sender]; + if (transmitter.role != Role.Transmitter && msg.sender != s_transmitters[transmitter.index]) + revert ReportInvalid("unauthorized transmitter"); + } + + address[MAX_NUM_ORACLES] memory signed; + uint8 signerCount = 0; + + { + // Verify signatures attached to report + bytes32 h = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + Oracle memory o; + // Bounded by MAX_NUM_ORACLES in OCR2Abstract.sol + for (uint256 i = 0; i < rs.length; ++i) { + address signer = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_oracles[signer]; + if (o.role != Role.Signer) revert ReportInvalid("address not authorized to sign"); + if (signed[o.index] != address(0)) revert ReportInvalid("non-unique signature"); + signed[o.index] = signer; + signerCount += 1; + } + } + + _report(initialGas, msg.sender, signerCount, signed, report); + } +} diff --git a/contracts/src/v0.8/interfaces/ENSInterface.sol b/contracts/src/v0.8/interfaces/ENSInterface.sol new file mode 100644 index 00000000..8e7fb581 --- /dev/null +++ b/contracts/src/v0.8/interfaces/ENSInterface.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface ENSInterface { + // Logged when the owner of a node assigns a new owner to a subnode. + event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + + // Logged when the owner of a node transfers ownership to a new account. + event Transfer(bytes32 indexed node, address owner); + + // Logged when the resolver for a node changes. + event NewResolver(bytes32 indexed node, address resolver); + + // Logged when the TTL of a node changes + event NewTTL(bytes32 indexed node, uint64 ttl); + + function setSubnodeOwner(bytes32 node, bytes32 label, address owner) external; + + function setResolver(bytes32 node, address resolver) external; + + function setOwner(bytes32 node, address owner) external; + + function setTTL(bytes32 node, uint64 ttl) external; + + function owner(bytes32 node) external view returns (address); + + function resolver(bytes32 node) external view returns (address); + + function ttl(bytes32 node) external view returns (uint64); +} diff --git a/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol b/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol new file mode 100644 index 00000000..f3272174 --- /dev/null +++ b/contracts/src/v0.8/interfaces/FeedRegistryInterface.sol @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; +pragma abicoder v2; + +import {AggregatorV2V3Interface} from "../shared/interfaces/AggregatorV2V3Interface.sol"; + +interface FeedRegistryInterface { + struct Phase { + uint16 phaseId; + uint80 startingAggregatorRoundId; + uint80 endingAggregatorRoundId; + } + + event FeedProposed( + address indexed asset, + address indexed denomination, + address indexed proposedAggregator, + address currentAggregator, + address sender + ); + event FeedConfirmed( + address indexed asset, + address indexed denomination, + address indexed latestAggregator, + address previousAggregator, + uint16 nextPhaseId, + address sender + ); + + // V3 AggregatorV3Interface + + function decimals(address base, address quote) external view returns (uint8); + + function description(address base, address quote) external view returns (string memory); + + function version(address base, address quote) external view returns (uint256); + + function latestRoundData( + address base, + address quote + ) external view returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); + + function getRoundData( + address base, + address quote, + uint80 _roundId + ) external view returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); + + // V2 AggregatorInterface + + function latestAnswer(address base, address quote) external view returns (int256 answer); + + function latestTimestamp(address base, address quote) external view returns (uint256 timestamp); + + function latestRound(address base, address quote) external view returns (uint256 roundId); + + function getAnswer(address base, address quote, uint256 roundId) external view returns (int256 answer); + + function getTimestamp(address base, address quote, uint256 roundId) external view returns (uint256 timestamp); + + // Registry getters + + function getFeed(address base, address quote) external view returns (AggregatorV2V3Interface aggregator); + + function getPhaseFeed( + address base, + address quote, + uint16 phaseId + ) external view returns (AggregatorV2V3Interface aggregator); + + function isFeedEnabled(address aggregator) external view returns (bool); + + function getPhase(address base, address quote, uint16 phaseId) external view returns (Phase memory phase); + + // Round helpers + + function getRoundFeed( + address base, + address quote, + uint80 roundId + ) external view returns (AggregatorV2V3Interface aggregator); + + function getPhaseRange( + address base, + address quote, + uint16 phaseId + ) external view returns (uint80 startingRoundId, uint80 endingRoundId); + + function getPreviousRoundId( + address base, + address quote, + uint80 roundId + ) external view returns (uint80 previousRoundId); + + function getNextRoundId(address base, address quote, uint80 roundId) external view returns (uint80 nextRoundId); + + // Feed management + + function proposeFeed(address base, address quote, address aggregator) external; + + function confirmFeed(address base, address quote, address aggregator) external; + + // Proposed aggregator + + function getProposedFeed( + address base, + address quote + ) external view returns (AggregatorV2V3Interface proposedAggregator); + + function proposedGetRoundData( + address base, + address quote, + uint80 roundId + ) external view returns (uint80 id, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); + + function proposedLatestRoundData( + address base, + address quote + ) external view returns (uint80 id, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); + + // Phases + function getCurrentPhaseId(address base, address quote) external view returns (uint16 currentPhaseId); +} diff --git a/contracts/src/v0.8/interfaces/FlagsInterface.sol b/contracts/src/v0.8/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..9d172c78 --- /dev/null +++ b/contracts/src/v0.8/interfaces/FlagsInterface.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + + function getFlags(address[] calldata) external view returns (bool[] memory); + + function raiseFlag(address) external; + + function raiseFlags(address[] calldata) external; + + function lowerFlags(address[] calldata) external; + + function setRaisingAccessController(address) external; +} diff --git a/contracts/src/v0.8/interfaces/OperatorInterface.sol b/contracts/src/v0.8/interfaces/OperatorInterface.sol new file mode 100644 index 00000000..ca50b09b --- /dev/null +++ b/contracts/src/v0.8/interfaces/OperatorInterface.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {OracleInterface} from "./OracleInterface.sol"; +import {PluginRequestInterface} from "./PluginRequestInterface.sol"; + +interface OperatorInterface is OracleInterface, PluginRequestInterface { + function operatorRequest( + address sender, + uint256 payment, + bytes32 specId, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function fulfillOracleRequest2( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes calldata data + ) external returns (bool); + + function ownerTransferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); + + function distributeFunds(address payable[] calldata receivers, uint256[] calldata amounts) external payable; +} diff --git a/contracts/src/v0.8/interfaces/OracleInterface.sol b/contracts/src/v0.8/interfaces/OracleInterface.sol new file mode 100644 index 00000000..40365822 --- /dev/null +++ b/contracts/src/v0.8/interfaces/OracleInterface.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface OracleInterface { + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) external returns (bool); + + function withdraw(address recipient, uint256 amount) external; + + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.8/interfaces/PluginRequestInterface.sol b/contracts/src/v0.8/interfaces/PluginRequestInterface.sol new file mode 100644 index 00000000..cf4fec3a --- /dev/null +++ b/contracts/src/v0.8/interfaces/PluginRequestInterface.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface PluginRequestInterface { + function oracleRequest( + address sender, + uint256 requestPrice, + bytes32 serviceAgreementID, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external; + + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunctionId, + uint256 expiration + ) external; +} diff --git a/contracts/src/v0.8/interfaces/PoRAddressList.sol b/contracts/src/v0.8/interfaces/PoRAddressList.sol new file mode 100644 index 00000000..e4898ae8 --- /dev/null +++ b/contracts/src/v0.8/interfaces/PoRAddressList.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @title Plugin Proof-of-Reserve address list interface. + * @notice This interface enables Plugin nodes to get the list addresses to be used in a PoR feed. A single + * contract that implements this interface can only store an address list for a single PoR feed. + * @dev All functions in this interface are expected to be called off-chain, so gas usage is not a big concern. + * This makes it possible to store addresses in optimized data types and convert them to human-readable strings + * in `getPoRAddressList()`. + */ +interface PoRAddressList { + /// @notice Get total number of addresses in the list. + function getPoRAddressListLength() external view returns (uint256); + + /** + * @notice Get a batch of human-readable addresses from the address list. The requested batch size can be greater + * than the actual address list size, in which the full address list will be returned. + * @dev Due to limitations of gas usage in off-chain calls, we need to support fetching the addresses in batches. + * EVM addresses need to be converted to human-readable strings. The address strings need to be in the same format + * that would be used when querying the balance of that address. + * @param startIndex The index of the first address in the batch. + * @param endIndex The index of the last address in the batch. If `endIndex > getPoRAddressListLength()-1`, + * endIndex need to default to `getPoRAddressListLength()-1`. + * @return Array of addresses as strings. + */ + function getPoRAddressList(uint256 startIndex, uint256 endIndex) external view returns (string[] memory); +} diff --git a/contracts/src/v0.8/interfaces/PointerInterface.sol b/contracts/src/v0.8/interfaces/PointerInterface.sol new file mode 100644 index 00000000..ca2b82af --- /dev/null +++ b/contracts/src/v0.8/interfaces/PointerInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface PointerInterface { + function getAddress() external view returns (address); +} diff --git a/contracts/src/v0.8/interfaces/TypeAndVersionInterface.sol b/contracts/src/v0.8/interfaces/TypeAndVersionInterface.sol new file mode 100644 index 00000000..786f2750 --- /dev/null +++ b/contracts/src/v0.8/interfaces/TypeAndVersionInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +abstract contract TypeAndVersionInterface { + function typeAndVersion() external pure virtual returns (string memory); +} diff --git a/contracts/src/v0.8/keystone/KeystoneForwarder.sol b/contracts/src/v0.8/keystone/KeystoneForwarder.sol new file mode 100644 index 00000000..2fa3304a --- /dev/null +++ b/contracts/src/v0.8/keystone/KeystoneForwarder.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import {IForwarder} from "./interfaces/IForwarder.sol"; +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {Utils} from "./libraries/Utils.sol"; + +// solhint-disable custom-errors, no-unused-vars +contract KeystoneForwarder is IForwarder, ConfirmedOwner, TypeAndVersionInterface { + error ReentrantCall(); + + struct HotVars { + bool reentrancyGuard; // guard against reentrancy + } + + HotVars internal s_hotVars; // Mixture of config and state, commonly accessed + + mapping(bytes32 => address) internal s_reports; + + constructor() ConfirmedOwner(msg.sender) {} + + // send a report to targetAddress + function report( + address targetAddress, + bytes calldata data, + bytes[] calldata signatures + ) external nonReentrant returns (bool) { + require(data.length > 4 + 64, "invalid data length"); + + // data is an encoded call with the selector prefixed: (bytes4 selector, bytes report, ...) + // we are able to partially decode just the first param, since we don't know the rest + bytes memory rawReport = abi.decode(data[4:], (bytes)); + + // TODO: we probably need some type of f value config? + + bytes32 hash = keccak256(rawReport); + + // validate signatures + for (uint256 i = 0; i < signatures.length; i++) { + // TODO: is libocr-style multiple bytes32 arrays more optimal? + (bytes32 r, bytes32 s, uint8 v) = Utils._splitSignature(signatures[i]); + address signer = ecrecover(hash, v, r, s); + // TODO: we need to store oracle cluster similar to aggregator then, to validate valid signer list + } + + (bytes32 workflowId, bytes32 workflowExecutionId) = Utils._splitReport(rawReport); + + // report was already processed + if (s_reports[workflowExecutionId] != address(0)) { + return false; + } + + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory result) = targetAddress.call(data); + + s_reports[workflowExecutionId] = msg.sender; + return true; + } + + // get transmitter of a given report or 0x0 if it wasn't transmitted yet + function getTransmitter(bytes32 workflowExecutionId) external view returns (address) { + return s_reports[workflowExecutionId]; + } + + /// @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "KeystoneForwarder 1.0.0"; + } + + /** + * @dev replicates Open Zeppelin's ReentrancyGuard but optimized to fit our storage + */ + modifier nonReentrant() { + if (s_hotVars.reentrancyGuard) revert ReentrantCall(); + s_hotVars.reentrancyGuard = true; + _; + s_hotVars.reentrancyGuard = false; + } +} diff --git a/contracts/src/v0.8/keystone/interfaces/IForwarder.sol b/contracts/src/v0.8/keystone/interfaces/IForwarder.sol new file mode 100644 index 00000000..ce9512c6 --- /dev/null +++ b/contracts/src/v0.8/keystone/interfaces/IForwarder.sol @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +/// @title IForwarder - forwards keystone reports to a target +interface IForwarder {} diff --git a/contracts/src/v0.8/keystone/libraries/Utils.sol b/contracts/src/v0.8/keystone/libraries/Utils.sol new file mode 100644 index 00000000..8e108578 --- /dev/null +++ b/contracts/src/v0.8/keystone/libraries/Utils.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +// solhint-disable custom-errors +library Utils { + // solhint-disable avoid-low-level-calls, plugin-solidity/explicit-returns + function _splitSignature(bytes memory sig) internal pure returns (bytes32 r, bytes32 s, uint8 v) { + require(sig.length == 65, "invalid signature length"); + + assembly { + /* + First 32 bytes stores the length of the signature + + add(sig, 32) = pointer of sig + 32 + effectively, skips first 32 bytes of signature + + mload(p) loads next 32 bytes starting at the memory address p into memory + */ + + // first 32 bytes, after the length prefix + r := mload(add(sig, 32)) + // second 32 bytes + s := mload(add(sig, 64)) + // final byte (first byte of the next 32 bytes) + v := byte(0, mload(add(sig, 96))) + } + + // implicitly return (r, s, v) + } + + // solhint-disable avoid-low-level-calls, plugin-solidity/explicit-returns + function _splitReport( + bytes memory rawReport + ) internal pure returns (bytes32 workflowId, bytes32 workflowExecutionId) { + require(rawReport.length > 64, "invalid report length"); + assembly { + // skip first 32 bytes, contains length of the report + workflowId := mload(add(rawReport, 32)) + workflowExecutionId := mload(add(rawReport, 64)) + } + } +} diff --git a/contracts/src/v0.8/keystone/test/KeystoneForwarder.t.sol b/contracts/src/v0.8/keystone/test/KeystoneForwarder.t.sol new file mode 100644 index 00000000..fc49523c --- /dev/null +++ b/contracts/src/v0.8/keystone/test/KeystoneForwarder.t.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.19; + +import "forge-std/Test.sol"; + +import "../KeystoneForwarder.sol"; +import {Utils} from "../libraries/Utils.sol"; + +contract Receiver { + event MessageReceived(bytes32 indexed workflowId, bytes32 indexed workflowExecutionId, bytes[] mercuryReports); + + constructor() {} + + function foo(bytes calldata rawReport) external { + // decode metadata + (bytes32 workflowId, bytes32 workflowExecutionId) = Utils._splitReport(rawReport); + // parse actual report + bytes[] memory mercuryReports = abi.decode(rawReport[64:], (bytes[])); + emit MessageReceived(workflowId, workflowExecutionId, mercuryReports); + } +} + +contract KeystoneForwarderTest is Test { + function setUp() public virtual {} + + function test_abi_partial_decoding_works() public { + bytes memory report = hex"0102"; + uint256 amount = 1; + bytes memory payload = abi.encode(report, amount); + bytes memory decodedReport = abi.decode(payload, (bytes)); + assertEq(decodedReport, report, "not equal"); + } + + function test_it_works() public { + KeystoneForwarder forwarder = new KeystoneForwarder(); + Receiver receiver = new Receiver(); + + // taken from https://github.com/goplugin/pluginv3.0/blob/2390ec7f3c56de783ef4e15477e99729f188c524/core/services/relay/evm/cap_encoder_test.go#L42-L55 + bytes + memory report = hex"6d795f69640000000000000000000000000000000000000000000000000000006d795f657865637574696f6e5f696400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000301020300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004aabbccdd00000000000000000000000000000000000000000000000000000000"; + bytes memory data = abi.encodeWithSignature("foo(bytes)", report); + bytes[] memory signatures = new bytes[](0); + + vm.expectCall(address(receiver), data); + vm.recordLogs(); + + bool delivered1 = forwarder.report(address(receiver), data, signatures); + assertTrue(delivered1, "report not delivered"); + + Vm.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries[0].emitter, address(receiver)); + // validate workflow id and workflow execution id + bytes32 workflowId = hex"6d795f6964000000000000000000000000000000000000000000000000000000"; + bytes32 executionId = hex"6d795f657865637574696f6e5f69640000000000000000000000000000000000"; + assertEq(entries[0].topics[1], workflowId); + assertEq(entries[0].topics[2], executionId); + bytes[] memory mercuryReports = abi.decode(entries[0].data, (bytes[])); + assertEq(mercuryReports.length, 2); + assertEq(mercuryReports[0], hex"010203"); + assertEq(mercuryReports[1], hex"aabbccdd"); + + // doesn't deliver the same report more than once + bool delivered2 = forwarder.report(address(receiver), data, signatures); + assertFalse(delivered2, "report redelivered"); + } +} diff --git a/contracts/src/v0.8/l2ep/README.md b/contracts/src/v0.8/l2ep/README.md new file mode 100644 index 00000000..1b2ab5e9 --- /dev/null +++ b/contracts/src/v0.8/l2ep/README.md @@ -0,0 +1,148 @@ +# Overview + +This folder contains the source code and tests for the Layer 2 +Emergency Protocol (L2EP) contracts. It is organized as follows: + +```text +. +├─/dev (stores the latest source code for L2EP) +├─/test (stores the Foundry tests for L2EP) +``` + +## The `/dev` Folder + +The `/dev` folder contains subfolders for each chain that +has an L2EP solution implemented for it (e.g. `/scroll`, `/arbitrum`, +`/optimism`). It also contains a subfolder named `/interfaces`, +which stores shared interface types between all the supported +contracts. The top-level contracts (e.g. `CrossDomainOwnable.sol`) +serve as either abstract or parent contracts that are meant +to be reused for each indiviudal chain. + +## The `/test` Folder + +This folder is arranged as follows: + +- `/mocks`: used for both Foundry test cases and Hardhat test cases (NOTE: +Hardhat test cases should be considered deprecated at this point) + +- `/[version]`: test cases for a specific version of the L2EP contracts + +### Testing Conventions and Methodology + +By convention, each testing file should end in `.t.sol` (this is a standard +that other projects have also adopted). Each testing file in this folder +follows a similar structure. + +```text +TestFile.t.sol + | + |--- Base Contract (inherits L2EPTest contract) + | + |--- Child Contract 1 (inherits base contract) + | | + | |--- Test Function + | | + | |--- ... + | + | + |--- Child Contract 2 (inherits base contract) + | | + | |--- Test Function + | | + | |--- ... + | + | + ... +``` + +All test files contain a base contract defined at the top of the file. This +base contract inherits from a contract called `L2EPTest`. The `L2EPTest` +contract and base contracts have no test cases. Instead, the `L2EPTest` +contract is meant to store data/functions that will be reused among all +the base contracts. Similarly, the base contract is meant to store data +and/or functions that will be reused by any contracts that inherit it. +As such, each test file will define separate child contracts, and each +will inherit from the base contract + define its own set of tests. + +The base contract defines a `setUp` function which is automatically called +exactly once before ***each*** of the tests are run in an inheriting contract. +The `setUp` function typically deploys a fresh set of test contracts so that +tests can run independently of each other. Alongside the `setUp` function, +the base contract can also define variables, constants, events, etc. that +are meant to be reused per test. + +The name of the base contract follows the following convention: + +```text +Test +``` + +The child contract names follow a similar convention: + +```text +_ +``` + +Each test function within the child contract complies +with the following naming pattern: + +```text +test_ +``` + +### Running Foundry Tests + +#### Usage + +First make sure you are in the contracts directory: + +```sh +# Assuming you are currently in the /plugin directory +cd ./contracts +``` + +If you already have foundry installed, you can use the following command +to run all L2EP tests: + +```sh +FOUNDRY_PROFILE=l2ep forge test -vvv +``` + +To run a specific L2EP test, you can use a variation of the following command: + +```sh +FOUNDRY_PROFILE=l2ep forge test -vvv --match-path ./src/v0.8/l2ep/test/v1_0_0/scroll/ScrollSequencerUptimeFeed.t.sol +``` + +Or alternatively: + +```sh +FOUNDRY_PROFILE=l2ep forge test -vvv --match-contract ScrollSequencerUptimeFeed +``` + +If you prefer, you can also export `FOUNDRY_PROFILE` so that it doesn't need +to be provided before every command: + +```sh +# Export foundry profile +export FOUNDRY_PROFILE=l2ep + +# Run all tests +forge test -vvv + +# Run all tests and generate a gas snapshot +make snapshot +``` + +A full list of flags for `forge test` can be found [here](https://book.getfoundry.sh/reference/forge/forge-test). + +#### Coverage + +First ensure that the correct files are being evaluated. For example, if only +v1 contracts are, being evaluated then temporarily change the L2EP profile in +`./foundry.toml`. + +```sh +forge coverage +``` diff --git a/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol b/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol new file mode 100644 index 00000000..5dc73619 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/CrossDomainDelegateForwarder.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {CrossDomainOwnable} from "./CrossDomainOwnable.sol"; +import {DelegateForwarderInterface} from "./interfaces/DelegateForwarderInterface.sol"; + +/** + * @title CrossDomainDelegateForwarder - L1 xDomain account representation (with delegatecall support) + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can consider that position to be held by the `l1Owner` + */ +abstract contract CrossDomainDelegateForwarder is DelegateForwarderInterface, CrossDomainOwnable {} diff --git a/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol b/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol new file mode 100644 index 00000000..8f218f66 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/CrossDomainForwarder.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {CrossDomainOwnable} from "./CrossDomainOwnable.sol"; +import {ForwarderInterface} from "./interfaces/ForwarderInterface.sol"; + +/** + * @title CrossDomainForwarder - L1 xDomain account representation + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can consider that position to be held by the `l1Owner` + */ +abstract contract CrossDomainForwarder is ForwarderInterface, CrossDomainOwnable {} diff --git a/contracts/src/v0.8/l2ep/dev/CrossDomainOwnable.sol b/contracts/src/v0.8/l2ep/dev/CrossDomainOwnable.sol new file mode 100644 index 00000000..b9a435a7 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/CrossDomainOwnable.sol @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {CrossDomainOwnableInterface} from "./interfaces/CrossDomainOwnableInterface.sol"; + +/** + * @title The CrossDomainOwnable contract + * @notice A contract with helpers for cross-domain contract ownership. + */ +contract CrossDomainOwnable is CrossDomainOwnableInterface, ConfirmedOwner { + address internal s_l1Owner; + address internal s_l1PendingOwner; + + constructor(address newl1Owner) ConfirmedOwner(msg.sender) { + _setL1Owner(newl1Owner); + } + + /** + * @notice transfer ownership of this account to a new L1 owner + * @param to new L1 owner that will be allowed to call the forward fn + */ + function transferL1Ownership(address to) public virtual override onlyL1Owner { + _transferL1Ownership(to); + } + + /** + * @notice accept ownership of this account to a new L1 owner + */ + function acceptL1Ownership() public virtual override onlyProposedL1Owner { + _setL1Owner(s_l1PendingOwner); + } + + /** + * @notice Get the current owner + */ + function l1Owner() public view override returns (address) { + return s_l1Owner; + } + + /** + * @notice validate, transfer ownership, and emit relevant events + */ + function _transferL1Ownership(address to) internal { + // solhint-disable-next-line custom-errors + require(to != msg.sender, "Cannot transfer to self"); + + s_l1PendingOwner = to; + + emit L1OwnershipTransferRequested(s_l1Owner, to); + } + + /** + * @notice set ownership, emit relevant events. Used in acceptOwnership() + */ + function _setL1Owner(address to) internal { + address oldOwner = s_l1Owner; + s_l1Owner = to; + s_l1PendingOwner = address(0); + + emit L1OwnershipTransferred(oldOwner, to); + } + + /** + * @notice Reverts if called by anyone other than the L1 owner. + */ + modifier onlyL1Owner() virtual { + // solhint-disable-next-line custom-errors + require(msg.sender == s_l1Owner, "Only callable by L1 owner"); + _; + } + + /** + * @notice Reverts if called by anyone other than the L1 owner. + */ + modifier onlyProposedL1Owner() virtual { + // solhint-disable-next-line custom-errors + require(msg.sender == s_l1PendingOwner, "Only callable by proposed L1 owner"); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/Flags.sol b/contracts/src/v0.8/l2ep/dev/Flags.sol new file mode 100644 index 00000000..b943c06d --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/Flags.sol @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {SimpleReadAccessController} from "../../shared/access/SimpleReadAccessController.sol"; +import {AccessControllerInterface} from "../../shared/interfaces/AccessControllerInterface.sol"; +import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; + +/* dev dependencies - to be re/moved after audit */ +import {FlagsInterface} from "./interfaces/FlagsInterface.sol"; + +/** + * @title The Flags contract + * @notice Allows flags to signal to any reader on the access control list. + * The owner can set flags, or designate other addresses to set flags. + * Raise flag actions are controlled by its own access controller. + * Lower flag actions are controlled by its own access controller. + * An expected pattern is to allow addresses to raise flags on themselves, so if you are subscribing to + * FlagOn events you should filter for addresses you care about. + */ +// solhint-disable custom-errors +contract Flags is ITypeAndVersion, FlagsInterface, SimpleReadAccessController { + AccessControllerInterface public raisingAccessController; + AccessControllerInterface public loweringAccessController; + + mapping(address => bool) private s_flags; + + event FlagRaised(address indexed subject); + event FlagLowered(address indexed subject); + event RaisingAccessControllerUpdated(address indexed previous, address indexed current); + event LoweringAccessControllerUpdated(address indexed previous, address indexed current); + + /** + * @param racAddress address for the raising access controller. + * @param lacAddress address for the lowering access controller. + */ + constructor(address racAddress, address lacAddress) { + setRaisingAccessController(racAddress); + setLoweringAccessController(lacAddress); + } + + /** + * @notice versions: + * + * - Flags 1.1.0: upgraded to solc 0.8, added lowering access controller + * - Flags 1.0.0: initial release + * + * @inheritdoc ITypeAndVersion + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "Flags 1.1.0"; + } + + /** + * @notice read the warning flag status of a contract address. + * @param subject The contract address being checked for a flag. + * @return A true value indicates that a flag was raised and a + * false value indicates that no flag was raised. + */ + function getFlag(address subject) external view override checkAccess returns (bool) { + return s_flags[subject]; + } + + /** + * @notice read the warning flag status of a contract address. + * @param subjects An array of addresses being checked for a flag. + * @return An array of bools where a true value for any flag indicates that + * a flag was raised and a false value indicates that no flag was raised. + */ + function getFlags(address[] calldata subjects) external view override checkAccess returns (bool[] memory) { + bool[] memory responses = new bool[](subjects.length); + for (uint256 i = 0; i < subjects.length; i++) { + responses[i] = s_flags[subjects[i]]; + } + return responses; + } + + /** + * @notice enable the warning flag for an address. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subject The contract address whose flag is being raised + */ + function raiseFlag(address subject) external override { + require(_allowedToRaiseFlags(), "Not allowed to raise flags"); + + _tryToRaiseFlag(subject); + } + + /** + * @notice enable the warning flags for multiple addresses. + * Access is controlled by raisingAccessController, except for owner + * who always has access. + * @param subjects List of the contract addresses whose flag is being raised + */ + function raiseFlags(address[] calldata subjects) external override { + require(_allowedToRaiseFlags(), "Not allowed to raise flags"); + + for (uint256 i = 0; i < subjects.length; i++) { + _tryToRaiseFlag(subjects[i]); + } + } + + /** + * @notice allows owner to disable the warning flags for an addresses. + * Access is controlled by loweringAccessController, except for owner + * who always has access. + * @param subject The contract address whose flag is being lowered + */ + function lowerFlag(address subject) external override { + require(_allowedToLowerFlags(), "Not allowed to lower flags"); + + _tryToLowerFlag(subject); + } + + /** + * @notice allows owner to disable the warning flags for multiple addresses. + * Access is controlled by loweringAccessController, except for owner + * who always has access. + * @param subjects List of the contract addresses whose flag is being lowered + */ + function lowerFlags(address[] calldata subjects) external override { + require(_allowedToLowerFlags(), "Not allowed to lower flags"); + + for (uint256 i = 0; i < subjects.length; i++) { + address subject = subjects[i]; + + _tryToLowerFlag(subject); + } + } + + /** + * @notice allows owner to change the access controller for raising flags. + * @param racAddress new address for the raising access controller. + */ + function setRaisingAccessController(address racAddress) public override onlyOwner { + address previous = address(raisingAccessController); + + if (previous != racAddress) { + raisingAccessController = AccessControllerInterface(racAddress); + + emit RaisingAccessControllerUpdated(previous, racAddress); + } + } + + function setLoweringAccessController(address lacAddress) public override onlyOwner { + address previous = address(loweringAccessController); + + if (previous != lacAddress) { + loweringAccessController = AccessControllerInterface(lacAddress); + + emit LoweringAccessControllerUpdated(previous, lacAddress); + } + } + + // PRIVATE + function _allowedToRaiseFlags() private view returns (bool) { + return msg.sender == owner() || raisingAccessController.hasAccess(msg.sender, msg.data); + } + + function _allowedToLowerFlags() private view returns (bool) { + return msg.sender == owner() || loweringAccessController.hasAccess(msg.sender, msg.data); + } + + function _tryToRaiseFlag(address subject) private { + if (!s_flags[subject]) { + s_flags[subject] = true; + emit FlagRaised(subject); + } + } + + function _tryToLowerFlag(address subject) private { + if (s_flags[subject]) { + s_flags[subject] = false; + emit FlagLowered(subject); + } + } +} diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainForwarder.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainForwarder.sol new file mode 100644 index 00000000..cdab6d49 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainForwarder.sol @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +// solhint-disable-next-line no-unused-import +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; + +import {CrossDomainForwarder} from "../CrossDomainForwarder.sol"; +import {CrossDomainOwnable} from "../CrossDomainOwnable.sol"; + +import {AddressAliasHelper} from "../../../vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/** + * @title ArbitrumCrossDomainForwarder - L1 xDomain account representation + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can be considered to be owned by the `l1Owner` + */ +contract ArbitrumCrossDomainForwarder is TypeAndVersionInterface, CrossDomainForwarder { + /** + * @notice creates a new Arbitrum xDomain Forwarder contract + * @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + * @dev Empty constructor required due to inheriting from abstract contract CrossDomainForwarder + */ + constructor(address l1OwnerAddr) CrossDomainOwnable(l1OwnerAddr) {} + + /** + * @notice versions: + * + * - ArbitrumCrossDomainForwarder 0.1.0: initial release + * - ArbitrumCrossDomainForwarder 1.0.0: Use OZ Address, CrossDomainOwnable + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "ArbitrumCrossDomainForwarder 1.0.0"; + } + + /** + * @notice The L2 xDomain `msg.sender`, generated from L1 sender address + */ + function crossDomainMessenger() public view returns (address) { + return AddressAliasHelper.applyL1ToL2Alias(l1Owner()); + } + + /** + * @dev forwarded only if L2 Messenger calls with `xDomainMessageSender` being the L1 owner address + * @inheritdoc ForwarderInterface + */ + function forward(address target, bytes memory data) external virtual override onlyL1Owner { + Address.functionCall(target, data, "Forwarder call reverted"); + } + + /** + * @notice The call MUST come from the L1 owner (via cross-chain message.) Reverts otherwise. + */ + modifier onlyL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == crossDomainMessenger(), "Sender is not the L2 messenger"); + _; + } + + /** + * @notice The call MUST come from the proposed L1 owner (via cross-chain message.) Reverts otherwise. + */ + modifier onlyProposedL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == AddressAliasHelper.applyL1ToL2Alias(s_l1PendingOwner), "Must be proposed L1 owner"); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainGovernor.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainGovernor.sol new file mode 100644 index 00000000..2f1d775e --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainGovernor.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// solhint-disable-next-line no-unused-import +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +// solhint-disable-next-line no-unused-import +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; +import {DelegateForwarderInterface} from "../interfaces/DelegateForwarderInterface.sol"; + +import {ArbitrumCrossDomainForwarder} from "./ArbitrumCrossDomainForwarder.sol"; + +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/** + * @title ArbitrumCrossDomainGovernor - L1 xDomain account representation (with delegatecall support) for Arbitrum + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can be considered to be simultaneously owned by the `l1Owner` and L2 `owner` + */ +contract ArbitrumCrossDomainGovernor is DelegateForwarderInterface, ArbitrumCrossDomainForwarder { + /** + * @notice creates a new Arbitrum xDomain Forwarder contract + * @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + * @dev Empty constructor required due to inheriting from abstract contract CrossDomainForwarder + */ + constructor(address l1OwnerAddr) ArbitrumCrossDomainForwarder(l1OwnerAddr) {} + + /** + * @notice versions: + * + * - ArbitrumCrossDomainGovernor 1.0.0: initial release + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "ArbitrumCrossDomainGovernor 1.0.0"; + } + + /** + * @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + * @inheritdoc ForwarderInterface + */ + function forward(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionCall(target, data, "Governor call reverted"); + } + + /** + * @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + * @inheritdoc DelegateForwarderInterface + */ + function forwardDelegate(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionDelegateCall(target, data, "Governor delegatecall reverted"); + } + + /** + * @notice The call MUST come from either the L1 owner (via cross-chain message) or the L2 owner. Reverts otherwise. + */ + modifier onlyLocalOrCrossDomainOwner() { + // solhint-disable-next-line custom-errors + require(msg.sender == crossDomainMessenger() || msg.sender == owner(), "Sender is not the L2 messenger or owner"); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol new file mode 100644 index 00000000..62284199 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {AddressAliasHelper} from "../../../vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol"; +import {AggregatorInterface} from "../../../shared/interfaces/AggregatorInterface.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {FlagsInterface} from "../interfaces/FlagsInterface.sol"; +import {ArbitrumSequencerUptimeFeedInterface} from "../interfaces/ArbitrumSequencerUptimeFeedInterface.sol"; +import {SimpleReadAccessController} from "../../../shared/access/SimpleReadAccessController.sol"; + +/** + * @title ArbitrumSequencerUptimeFeed - L2 sequencer uptime status aggregator + * @notice L2 contract that receives status updates from a specific L1 address, + * records a new answer if the status changed, and raises or lowers the flag on the + * stored Flags contract. + */ +contract ArbitrumSequencerUptimeFeed is + AggregatorV2V3Interface, + ArbitrumSequencerUptimeFeedInterface, + TypeAndVersionInterface, + SimpleReadAccessController +{ + /// @dev Round info (for uptime history) + struct Round { + bool status; + uint64 timestamp; + } + + /// @dev Packed state struct to save sloads + struct FeedState { + uint80 latestRoundId; + bool latestStatus; + uint64 latestTimestamp; + } + + /// @notice Contract is not yet initialized + error Uninitialized(); + /// @notice Contract is already initialized + error AlreadyInitialized(); + /// @notice Sender is not the L2 messenger + error InvalidSender(); + /// @notice Replacement for AggregatorV3Interface "No data present" + error NoDataPresent(); + + event Initialized(); + event L1SenderTransferred(address indexed from, address indexed to); + /// @dev Emitted when an `updateStatus` call is ignored due to unchanged status or stale timestamp + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + + /// @dev Follows: https://eips.ethereum.org/EIPS/eip-1967 + address public constant FLAG_L2_SEQ_OFFLINE = + address(bytes20(bytes32(uint256(keccak256("plugin.flags.arbitrum-seq-offline")) - 1))); + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint8 public constant override decimals = 0; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override description = "L2 Sequencer Uptime Status Feed"; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint256 public constant override version = 1; + + /// @dev Flags contract to raise/lower flags on, during status transitions + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + FlagsInterface public immutable FLAGS; + /// @dev L1 address + address private s_l1Sender; + /// @dev s_latestRoundId == 0 means this contract is uninitialized. + FeedState private s_feedState = FeedState({latestRoundId: 0, latestStatus: false, latestTimestamp: 0}); + mapping(uint80 => Round) private s_rounds; + + /** + * @param flagsAddress Address of the Flags contract on L2 + * @param l1SenderAddress Address of the L1 contract that is permissioned to call this contract + */ + constructor(address flagsAddress, address l1SenderAddress) { + _setL1Sender(l1SenderAddress); + + FLAGS = FlagsInterface(flagsAddress); + } + + /** + * @notice Check if a roundId is valid in this current contract state + * @dev Mainly used for AggregatorV2V3Interface functions + * @param roundId Round ID to check + */ + function _isValidRound(uint256 roundId) private view returns (bool) { + return roundId > 0 && roundId <= type(uint80).max && s_feedState.latestRoundId >= roundId; + } + + /// @notice Check that this contract is initialised, otherwise throw + function _requireInitialized(uint80 latestRoundId) private pure { + if (latestRoundId == 0) { + revert Uninitialized(); + } + } + + /** + * @notice Initialise the first round. Can't be done in the constructor, + * because this contract's address must be permissioned by the the Flags contract + * (The Flags contract itself is a SimpleReadAccessController). + */ + function initialize() external onlyOwner { + FeedState memory feedState = s_feedState; + if (feedState.latestRoundId != 0) { + revert AlreadyInitialized(); + } + + uint64 timestamp = uint64(block.timestamp); + bool currentStatus = FLAGS.getFlag(FLAG_L2_SEQ_OFFLINE); + + // Initialise roundId == 1 as the first round + _recordRound(1, currentStatus, timestamp); + + emit Initialized(); + } + + /** + * @notice versions: + * + * - ArbitrumSequencerUptimeFeed 1.0.0: initial release + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "ArbitrumSequencerUptimeFeed 1.0.0"; + } + + /// @return L1 sender address + function l1Sender() public view virtual returns (address) { + return s_l1Sender; + } + + /** + * @notice Set the allowed L1 sender for this contract to a new L1 sender + * @dev Can be disabled by setting the L1 sender as `address(0)`. Accessible only by owner. + * @param to new L1 sender that will be allowed to call `updateStatus` on this contract + */ + function transferL1Sender(address to) external virtual onlyOwner { + _setL1Sender(to); + } + + /// @notice internal method that stores the L1 sender + function _setL1Sender(address to) private { + address from = s_l1Sender; + if (from != to) { + s_l1Sender = to; + emit L1SenderTransferred(from, to); + } + } + + /** + * @notice Messages sent by the stored L1 sender will arrive on L2 with this + * address as the `msg.sender` + * @return L2-aliased form of the L1 sender address + */ + function aliasedL1MessageSender() public view returns (address) { + return AddressAliasHelper.applyL1ToL2Alias(l1Sender()); + } + + /** + * @dev Returns an AggregatorV2V3Interface compatible answer from status flag + * + * @param status The status flag to convert to an aggregator-compatible answer + */ + function _getStatusAnswer(bool status) private pure returns (int256) { + return status ? int256(1) : int256(0); + } + + /** + * @notice Raise or lower the flag on the stored Flags contract. + */ + function _forwardStatusToFlags(bool status) private { + if (status) { + FLAGS.raiseFlag(FLAG_L2_SEQ_OFFLINE); + } else { + FLAGS.lowerFlag(FLAG_L2_SEQ_OFFLINE); + } + } + + /** + * @notice Helper function to record a round and set the latest feed state. + * + * @param roundId The round ID to record + * @param status Sequencer status + * @param timestamp Block timestamp of status update + */ + function _recordRound(uint80 roundId, bool status, uint64 timestamp) private { + Round memory nextRound = Round(status, timestamp); + FeedState memory feedState = FeedState(roundId, status, timestamp); + + s_rounds[roundId] = nextRound; + s_feedState = feedState; + + emit NewRound(roundId, msg.sender, timestamp); + emit AnswerUpdated(_getStatusAnswer(status), roundId, timestamp); + } + + /** + * @notice Record a new status and timestamp if it has changed since the last round. + * @dev This function will revert if not called from `l1Sender` via the L1->L2 messenger. + * + * @param status Sequencer status + * @param timestamp Block timestamp of status update + */ + function updateStatus(bool status, uint64 timestamp) external override { + FeedState memory feedState = s_feedState; + _requireInitialized(feedState.latestRoundId); + if (msg.sender != aliasedL1MessageSender()) { + revert InvalidSender(); + } + + // Ignore if status did not change or latest recorded timestamp is newer + if (feedState.latestStatus == status || feedState.latestTimestamp > timestamp) { + emit UpdateIgnored(feedState.latestStatus, feedState.latestTimestamp, status, timestamp); + return; + } + + // Prepare a new round with updated status + feedState.latestRoundId += 1; + _recordRound(feedState.latestRoundId, status, timestamp); + + _forwardStatusToFlags(status); + } + + /// @inheritdoc AggregatorInterface + function latestAnswer() external view override checkAccess returns (int256) { + FeedState memory feedState = s_feedState; + _requireInitialized(feedState.latestRoundId); + return _getStatusAnswer(feedState.latestStatus); + } + + /// @inheritdoc AggregatorInterface + function latestTimestamp() external view override checkAccess returns (uint256) { + FeedState memory feedState = s_feedState; + _requireInitialized(feedState.latestRoundId); + return feedState.latestTimestamp; + } + + /// @inheritdoc AggregatorInterface + function latestRound() external view override checkAccess returns (uint256) { + FeedState memory feedState = s_feedState; + _requireInitialized(feedState.latestRoundId); + return feedState.latestRoundId; + } + + /// @inheritdoc AggregatorInterface + function getAnswer(uint256 roundId) external view override checkAccess returns (int256) { + _requireInitialized(s_feedState.latestRoundId); + if (_isValidRound(roundId)) { + return _getStatusAnswer(s_rounds[uint80(roundId)].status); + } + + return 0; + } + + /// @inheritdoc AggregatorInterface + function getTimestamp(uint256 roundId) external view override checkAccess returns (uint256) { + _requireInitialized(s_feedState.latestRoundId); + if (_isValidRound(roundId)) { + return s_rounds[uint80(roundId)].timestamp; + } + + return 0; + } + + /// @inheritdoc AggregatorV3Interface + function getRoundData( + uint80 _roundId + ) + public + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + _requireInitialized(s_feedState.latestRoundId); + + if (_isValidRound(_roundId)) { + Round memory round = s_rounds[_roundId]; + answer = _getStatusAnswer(round.status); + startedAt = uint256(round.timestamp); + } else { + answer = 0; + startedAt = 0; + } + roundId = _roundId; + updatedAt = startedAt; + answeredInRound = roundId; + + return (roundId, answer, startedAt, updatedAt, answeredInRound); + } + + /// @inheritdoc AggregatorV3Interface + function latestRoundData() + external + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + FeedState memory feedState = s_feedState; + _requireInitialized(feedState.latestRoundId); + + roundId = feedState.latestRoundId; + answer = _getStatusAnswer(feedState.latestStatus); + startedAt = feedState.latestTimestamp; + updatedAt = startedAt; + answeredInRound = roundId; + + return (roundId, answer, startedAt, updatedAt, answeredInRound); + } +} diff --git a/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol new file mode 100644 index 00000000..78d48f27 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AggregatorValidatorInterface} from "../../../shared/interfaces/AggregatorValidatorInterface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol"; +import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol"; + +/* ./dev dependencies - to be moved from ./dev after audit */ +import {ArbitrumSequencerUptimeFeedInterface} from "../interfaces/ArbitrumSequencerUptimeFeedInterface.sol"; +import {IArbitrumDelayedInbox} from "../interfaces/IArbitrumDelayedInbox.sol"; +import {AddressAliasHelper} from "../../../vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol"; +import {ArbSys} from "../../../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/** + * @title ArbitrumValidator - makes xDomain L2 Flags contract call (using L2 xDomain Forwarder contract) + * @notice Allows to raise and lower Flags on the Arbitrum L2 network through L1 bridge + * - The internal AccessController controls the access of the validate method + * - Gas configuration is controlled by a configurable external SimpleWriteAccessController + * - Funds on the contract are managed by the owner + */ +contract ArbitrumValidator is TypeAndVersionInterface, AggregatorValidatorInterface, SimpleWriteAccessController { + enum PaymentStrategy { + L1, + L2 + } + // Config for L1 -> L2 Arbitrum retryable ticket message + struct GasConfig { + uint256 maxGas; + uint256 gasPriceBid; + uint256 baseFee; // Will use block.baseFee if set to 0 + address gasPriceL1FeedAddr; + } + + /// @dev Precompiled contract that exists in every Arbitrum chain at address(100). Exposes a variety of system-level functionality. + address internal constant ARBSYS_ADDR = address(0x0000000000000000000000000000000000000064); + + int256 private constant ANSWER_SEQ_OFFLINE = 1; + + /// @notice The address of Arbitrum's DelayedInbox + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable CROSS_DOMAIN_MESSENGER; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L2_SEQ_STATUS_RECORDER; + // L2 xDomain alias address of this contract + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L2_ALIAS = AddressAliasHelper.applyL1ToL2Alias(address(this)); + + PaymentStrategy private s_paymentStrategy; + GasConfig private s_gasConfig; + AccessControllerInterface private s_configAC; + + /** + * @notice emitted when a new payment strategy is set + * @param paymentStrategy strategy describing how the contract pays for xDomain calls + */ + event PaymentStrategySet(PaymentStrategy indexed paymentStrategy); + + /** + * @notice emitted when a new gas configuration is set + * @param maxGas gas limit for immediate L2 execution attempt. + * @param gasPriceBid maximum L2 gas price to pay + * @param gasPriceL1FeedAddr address of the L1 gas price feed (used to approximate Arbitrum retryable ticket submission cost) + */ + event GasConfigSet(uint256 maxGas, uint256 gasPriceBid, address indexed gasPriceL1FeedAddr); + + /** + * @notice emitted when a new gas access-control contract is set + * @param previous the address prior to the current setting + * @param current the address of the new access-control contract + */ + event ConfigACSet(address indexed previous, address indexed current); + + /** + * @notice emitted when a new ETH withdrawal from L2 was requested + * @param id unique id of the published retryable transaction (keccak256(requestID, uint(0)) + * @param amount of funds to withdraw + */ + event L2WithdrawalRequested(uint256 indexed id, uint256 amount, address indexed refundAddr); + + /** + * @param crossDomainMessengerAddr address the xDomain bridge messenger (Arbitrum Inbox L1) contract address + * @param l2ArbitrumSequencerUptimeFeedAddr the L2 Flags contract address + * @param configACAddr address of the access controller for managing gas price on Arbitrum + * @param maxGas gas limit for immediate L2 execution attempt. A value around 1M should be sufficient + * @param gasPriceBid maximum L2 gas price to pay + * @param gasPriceL1FeedAddr address of the L1 gas price feed (used to approximate Arbitrum retryable ticket submission cost) + * @param _paymentStrategy strategy describing how the contract pays for xDomain calls + */ + constructor( + address crossDomainMessengerAddr, + address l2ArbitrumSequencerUptimeFeedAddr, + address configACAddr, + uint256 maxGas, + uint256 gasPriceBid, + uint256 baseFee, + address gasPriceL1FeedAddr, + PaymentStrategy _paymentStrategy + ) { + // solhint-disable-next-line custom-errors + require(crossDomainMessengerAddr != address(0), "Invalid xDomain Messenger address"); + // solhint-disable-next-line custom-errors + require(l2ArbitrumSequencerUptimeFeedAddr != address(0), "Invalid ArbitrumSequencerUptimeFeed contract address"); + CROSS_DOMAIN_MESSENGER = crossDomainMessengerAddr; + L2_SEQ_STATUS_RECORDER = l2ArbitrumSequencerUptimeFeedAddr; + // Additional L2 payment configuration + _setConfigAC(configACAddr); + _setGasConfig(maxGas, gasPriceBid, baseFee, gasPriceL1FeedAddr); + _setPaymentStrategy(_paymentStrategy); + } + + /** + * @notice versions: + * + * - ArbitrumValidator 0.1.0: initial release + * - ArbitrumValidator 0.2.0: critical Arbitrum network update + * - xDomain `msg.sender` backwards incompatible change (now an alias address) + * - new `withdrawFundsFromL2` fn that withdraws from L2 xDomain alias address + * - approximation of `maxSubmissionCost` using a L1 gas price feed + * - ArbitrumValidator 1.0.0: change target of L2 sequencer status update + * - now calls `updateStatus` on an L2 ArbitrumSequencerUptimeFeed contract instead of + * directly calling the Flags contract + * - ArbitrumValidator 2.0.0: change how maxSubmissionCost is calculated when sending cross chain messages + * - now calls `calculateRetryableSubmissionFee` instead of inlining equation to estimate + * the maxSubmissionCost required to send the message to L2 + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "ArbitrumValidator 2.0.0"; + } + + /// @return stored PaymentStrategy + function paymentStrategy() external view virtual returns (PaymentStrategy) { + return s_paymentStrategy; + } + + /// @return stored GasConfig + function gasConfig() external view virtual returns (GasConfig memory) { + return s_gasConfig; + } + + /// @return config AccessControllerInterface contract address + function configAC() external view virtual returns (address) { + return address(s_configAC); + } + + /** + * @notice makes this contract payable + * @dev receives funds: + * - to use them (if configured) to pay for L2 execution on L1 + * - when withdrawing funds from L2 xDomain alias address (pay for L2 execution on L2) + */ + receive() external payable {} + + /** + * @notice withdraws all funds available in this contract to the msg.sender + * @dev only owner can call this + */ + function withdrawFunds() external onlyOwner { + address payable recipient = payable(msg.sender); + uint256 amount = address(this).balance; + Address.sendValue(recipient, amount); + } + + /** + * @notice withdraws all funds available in this contract to the address specified + * @dev only owner can call this + * @param recipient address where to send the funds + */ + function withdrawFundsTo(address payable recipient) external onlyOwner { + uint256 amount = address(this).balance; + Address.sendValue(recipient, amount); + } + + /** + * @notice withdraws funds from L2 xDomain alias address (representing this L1 contract) + * @dev only owner can call this + * @param amount of funds to withdraws + * @param refundAddr address where gas excess on L2 will be sent + * WARNING: `refundAddr` is not aliased! Make sure you can recover the refunded funds on L2. + * @return id unique id of the published retryable transaction (keccak256(requestID, uint(0)) + */ + function withdrawFundsFromL2(uint256 amount, address refundAddr) external onlyOwner returns (uint256 id) { + // Build an xDomain message to trigger the ArbSys precompile, which will create a L2 -> L1 tx transferring `amount` + bytes memory message = abi.encodeWithSelector(ArbSys.withdrawEth.selector, address(this)); + // Make the xDomain call + // NOTICE: We approximate the max submission cost of sending a retryable tx with specific calldata length. + uint256 maxSubmissionCost = _approximateMaxSubmissionCost(message.length); + uint256 maxGas = 120_000; // static `maxGas` for L2 -> L1 transfer + uint256 gasPriceBid = s_gasConfig.gasPriceBid; + uint256 l1PaymentValue = s_paymentStrategy == PaymentStrategy.L1 + ? _maxRetryableTicketCost(maxSubmissionCost, maxGas, gasPriceBid) + : 0; + // NOTICE: In the case of PaymentStrategy.L2 the L2 xDomain alias address needs to be funded, as it will be paying the fee. + id = IArbitrumDelayedInbox(CROSS_DOMAIN_MESSENGER).createRetryableTicketNoRefundAliasRewrite{value: l1PaymentValue}( + ARBSYS_ADDR, // target + amount, // L2 call value (requested) + maxSubmissionCost, + refundAddr, // excessFeeRefundAddress + refundAddr, // callValueRefundAddress + maxGas, + gasPriceBid, + message + ); + emit L2WithdrawalRequested(id, amount, refundAddr); + + return id; + } + + /** + * @notice sets config AccessControllerInterface contract + * @dev only owner can call this + * @param accessController new AccessControllerInterface contract address + */ + function setConfigAC(address accessController) external onlyOwner { + _setConfigAC(accessController); + } + + /** + * @notice sets Arbitrum gas configuration + * @dev access control provided by `configAC` + * @param maxGas gas limit for immediate L2 execution attempt. A value around 1M should be sufficient + * @param gasPriceBid maximum L2 gas price to pay + * @param gasPriceL1FeedAddr address of the L1 gas price feed (used to approximate Arbitrum retryable ticket submission cost) + */ + function setGasConfig( + uint256 maxGas, + uint256 gasPriceBid, + uint256 baseFee, + address gasPriceL1FeedAddr + ) external onlyOwnerOrConfigAccess { + _setGasConfig(maxGas, gasPriceBid, baseFee, gasPriceL1FeedAddr); + } + + /** + * @notice sets the payment strategy + * @dev access control provided by `configAC` + * @param _paymentStrategy strategy describing how the contract pays for xDomain calls + */ + function setPaymentStrategy(PaymentStrategy _paymentStrategy) external onlyOwnerOrConfigAccess { + _setPaymentStrategy(_paymentStrategy); + } + + /** + * @notice validate method sends an xDomain L2 tx to update Flags contract, in case of change from `previousAnswer`. + * @dev A retryable ticket is created on the Arbitrum L1 Inbox contract. The tx gas fee can be paid from this + * contract providing a value, or if no L1 value is sent with the xDomain message the gas will be paid by + * the L2 xDomain alias account (generated from `address(this)`). This method is accessed controlled. + * @param previousAnswer previous aggregator answer + * @param currentAnswer new aggregator answer - value of 1 considers the service offline. + */ + function validate( + uint256 /* previousRoundId */, + int256 previousAnswer, + uint256 /* currentRoundId */, + int256 currentAnswer + ) external override checkAccess returns (bool) { + // Avoids resending to L2 the same tx on every call + if (previousAnswer == currentAnswer) { + return true; + } + + // Excess gas on L2 will be sent to the L2 xDomain alias address of this contract + address refundAddr = L2_ALIAS; + // Encode the ArbitrumSequencerUptimeFeed call + bytes4 selector = ArbitrumSequencerUptimeFeedInterface.updateStatus.selector; + bool status = currentAnswer == ANSWER_SEQ_OFFLINE; + uint64 timestamp = uint64(block.timestamp); + // Encode `status` and `timestamp` + bytes memory message = abi.encodeWithSelector(selector, status, timestamp); + // Make the xDomain call + // NOTICE: We approximate the max submission cost of sending a retryable tx with specific calldata length. + uint256 maxSubmissionCost = _approximateMaxSubmissionCost(message.length); + uint256 maxGas = s_gasConfig.maxGas; + uint256 gasPriceBid = s_gasConfig.gasPriceBid; + uint256 l1PaymentValue = s_paymentStrategy == PaymentStrategy.L1 + ? _maxRetryableTicketCost(maxSubmissionCost, maxGas, gasPriceBid) + : 0; + // NOTICE: In the case of PaymentStrategy.L2 the L2 xDomain alias address needs to be funded, as it will be paying the fee. + // We also ignore the returned msg number, that can be queried via the `InboxMessageDelivered` event. + IArbitrumDelayedInbox(CROSS_DOMAIN_MESSENGER).createRetryableTicketNoRefundAliasRewrite{value: l1PaymentValue}( + L2_SEQ_STATUS_RECORDER, // target + 0, // L2 call value + maxSubmissionCost, + refundAddr, // excessFeeRefundAddress + refundAddr, // callValueRefundAddress + maxGas, + gasPriceBid, + message + ); + // return success + return true; + } + + /// @notice internal method that stores the payment strategy + function _setPaymentStrategy(PaymentStrategy _paymentStrategy) internal { + s_paymentStrategy = _paymentStrategy; + emit PaymentStrategySet(_paymentStrategy); + } + + /// @notice internal method that stores the gas configuration + function _setGasConfig(uint256 maxGas, uint256 gasPriceBid, uint256 baseFee, address gasPriceL1FeedAddr) internal { + // solhint-disable-next-line custom-errors + require(maxGas > 0, "Max gas is zero"); + // solhint-disable-next-line custom-errors + require(gasPriceBid > 0, "Gas price bid is zero"); + // solhint-disable-next-line custom-errors + require(gasPriceL1FeedAddr != address(0), "Gas price Aggregator is zero address"); + s_gasConfig = GasConfig(maxGas, gasPriceBid, baseFee, gasPriceL1FeedAddr); + emit GasConfigSet(maxGas, gasPriceBid, gasPriceL1FeedAddr); + } + + /// @notice Internal method that stores the configuration access controller + function _setConfigAC(address accessController) internal { + address previousAccessController = address(s_configAC); + if (accessController != previousAccessController) { + s_configAC = AccessControllerInterface(accessController); + emit ConfigACSet(previousAccessController, accessController); + } + } + + /** + * @notice Internal method that approximates the `maxSubmissionCost` + * @dev This function estimates the max submission cost using the formula + * implemented in Arbitrum DelayedInbox's calculateRetryableSubmissionFee function + * @param calldataSizeInBytes xDomain message size in bytes + */ + function _approximateMaxSubmissionCost(uint256 calldataSizeInBytes) internal view returns (uint256) { + return + IArbitrumDelayedInbox(CROSS_DOMAIN_MESSENGER).calculateRetryableSubmissionFee( + calldataSizeInBytes, + s_gasConfig.baseFee + ); + } + + /// @notice Internal helper method that calculates the total cost of the xDomain retryable ticket call + function _maxRetryableTicketCost( + uint256 maxSubmissionCost, + uint256 maxGas, + uint256 gasPriceBid + ) internal pure returns (uint256) { + return maxSubmissionCost + maxGas * gasPriceBid; + } + + /// @dev reverts if the caller does not have access to change the configuration + modifier onlyOwnerOrConfigAccess() { + // solhint-disable-next-line custom-errors + require( + msg.sender == owner() || (address(s_configAC) != address(0) && s_configAC.hasAccess(msg.sender, msg.data)), + "No access" + ); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/ArbitrumSequencerUptimeFeedInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/ArbitrumSequencerUptimeFeedInterface.sol new file mode 100644 index 00000000..6943b9fb --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/ArbitrumSequencerUptimeFeedInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface ArbitrumSequencerUptimeFeedInterface { + function updateStatus(bool status, uint64 timestamp) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/CrossDomainOwnableInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/CrossDomainOwnableInterface.sol new file mode 100644 index 00000000..a4cc6a9f --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/CrossDomainOwnableInterface.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title CrossDomainOwnableInterface - A contract with helpers for cross-domain contract ownership +interface CrossDomainOwnableInterface { + event L1OwnershipTransferRequested(address indexed from, address indexed to); + + event L1OwnershipTransferred(address indexed from, address indexed to); + + function l1Owner() external returns (address); + + function transferL1Ownership(address recipient) external; + + function acceptL1Ownership() external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/DelegateForwarderInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/DelegateForwarderInterface.sol new file mode 100644 index 00000000..792e83ed --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/DelegateForwarderInterface.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title DelegateForwarderInterface - forwards a delegatecall to a target, under some conditions +interface DelegateForwarderInterface { + /** + * @notice forward delegatecalls the `target` with `data` + * @param target contract address to be delegatecalled + * @param data to send to target contract + */ + function forwardDelegate(address target, bytes memory data) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/FlagsInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/FlagsInterface.sol new file mode 100644 index 00000000..b5fd70bd --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/FlagsInterface.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +interface FlagsInterface { + function getFlag(address) external view returns (bool); + + function getFlags(address[] calldata) external view returns (bool[] memory); + + function raiseFlag(address) external; + + function raiseFlags(address[] calldata) external; + + function lowerFlag(address) external; + + function lowerFlags(address[] calldata) external; + + function setRaisingAccessController(address) external; + + function setLoweringAccessController(address) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/ForwarderInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/ForwarderInterface.sol new file mode 100644 index 00000000..a3c29e5f --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/ForwarderInterface.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title ForwarderInterface - forwards a call to a target, under some conditions +interface ForwarderInterface { + /** + * @notice forward calls the `target` with `data` + * @param target contract address to be called + * @param data to send to target contract + */ + function forward(address target, bytes memory data) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/IArbitrumDelayedInbox.sol b/contracts/src/v0.8/l2ep/dev/interfaces/IArbitrumDelayedInbox.sol new file mode 100644 index 00000000..e18efd65 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/IArbitrumDelayedInbox.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IInbox} from "../../../vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol"; + +/** + * @notice This interface extends Arbitrum's IInbox interface to include + * the calculateRetryableSubmissionFee. This new function was added as part + * of Arbitrum's Nitro migration but was excluded from the IInbox interface. This setup + * works for us as the team has added it as a public function to the IInbox proxy + * contract's implementation + */ +interface IArbitrumDelayedInbox is IInbox { + function calculateRetryableSubmissionFee(uint256 dataLength, uint256 baseFee) external view returns (uint256); +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/OptimismSequencerUptimeFeedInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/OptimismSequencerUptimeFeedInterface.sol new file mode 100644 index 00000000..281966b7 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/OptimismSequencerUptimeFeedInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface OptimismSequencerUptimeFeedInterface { + function updateStatus(bool status, uint64 timestamp) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/interfaces/ScrollSequencerUptimeFeedInterface.sol b/contracts/src/v0.8/l2ep/dev/interfaces/ScrollSequencerUptimeFeedInterface.sol new file mode 100644 index 00000000..f0f716d6 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/interfaces/ScrollSequencerUptimeFeedInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +interface ScrollSequencerUptimeFeedInterface { + function updateStatus(bool status, uint64 timestamp) external; +} diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainForwarder.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainForwarder.sol new file mode 100644 index 00000000..9dc310c6 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainForwarder.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +// solhint-disable-next-line no-unused-import +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; + +/* ./dev dependencies - to be moved from ./dev after audit */ +import {CrossDomainForwarder} from "../CrossDomainForwarder.sol"; +import {CrossDomainOwnable} from "../CrossDomainOwnable.sol"; + +import {iOVM_CrossDomainMessenger} from "../../../vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/** + * @title OptimismCrossDomainForwarder - L1 xDomain account representation + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can be considered to be owned by the `l1Owner` + */ +contract OptimismCrossDomainForwarder is TypeAndVersionInterface, CrossDomainForwarder { + // OVM_L2CrossDomainMessenger is a precompile usually deployed to 0x4200000000000000000000000000000000000007 + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + iOVM_CrossDomainMessenger private immutable OVM_CROSS_DOMAIN_MESSENGER; + + /** + * @notice creates a new Optimism xDomain Forwarder contract + * @param crossDomainMessengerAddr the xDomain bridge messenger (Optimism bridge L2) contract address + * @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + */ + constructor(iOVM_CrossDomainMessenger crossDomainMessengerAddr, address l1OwnerAddr) CrossDomainOwnable(l1OwnerAddr) { + // solhint-disable-next-line custom-errors + require(address(crossDomainMessengerAddr) != address(0), "Invalid xDomain Messenger address"); + OVM_CROSS_DOMAIN_MESSENGER = crossDomainMessengerAddr; + } + + /** + * @notice versions: + * + * - OptimismCrossDomainForwarder 0.1.0: initial release + * - OptimismCrossDomainForwarder 1.0.0: Use OZ Address, CrossDomainOwnable + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "OptimismCrossDomainForwarder 1.0.0"; + } + + /** + * @dev forwarded only if L2 Messenger calls with `xDomainMessageSender` being the L1 owner address + * @inheritdoc ForwarderInterface + */ + function forward(address target, bytes memory data) external virtual override onlyL1Owner { + Address.functionCall(target, data, "Forwarder call reverted"); + } + + /** + * @notice This is always the address of the OVM_L2CrossDomainMessenger contract + */ + function crossDomainMessenger() public view returns (address) { + return address(OVM_CROSS_DOMAIN_MESSENGER); + } + + /** + * @notice The call MUST come from the L1 owner (via cross-chain message.) Reverts otherwise. + */ + modifier onlyL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == crossDomainMessenger(), "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + iOVM_CrossDomainMessenger(crossDomainMessenger()).xDomainMessageSender() == l1Owner(), + "xDomain sender is not the L1 owner" + ); + _; + } + + /** + * @notice The call MUST come from the proposed L1 owner (via cross-chain message.) Reverts otherwise. + */ + modifier onlyProposedL1Owner() override { + address messenger = crossDomainMessenger(); + // solhint-disable-next-line custom-errors + require(msg.sender == messenger, "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + iOVM_CrossDomainMessenger(messenger).xDomainMessageSender() == s_l1PendingOwner, + "Must be proposed L1 owner" + ); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainGovernor.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainGovernor.sol new file mode 100644 index 00000000..1f630a3f --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismCrossDomainGovernor.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {DelegateForwarderInterface} from "../interfaces/DelegateForwarderInterface.sol"; +// solhint-disable-next-line no-unused-import +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; + +import {OptimismCrossDomainForwarder} from "./OptimismCrossDomainForwarder.sol"; + +import {iOVM_CrossDomainMessenger} from "../../../vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/** + * @title OptimismCrossDomainGovernor - L1 xDomain account representation (with delegatecall support) for Optimism + * @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. + * @dev Any other L2 contract which uses this contract's address as a privileged position, + * can be considered to be simultaneously owned by the `l1Owner` and L2 `owner` + */ +contract OptimismCrossDomainGovernor is DelegateForwarderInterface, OptimismCrossDomainForwarder { + /** + * @notice creates a new Optimism xDomain Forwarder contract + * @param crossDomainMessengerAddr the xDomain bridge messenger (Optimism bridge L2) contract address + * @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + * @dev Empty constructor required due to inheriting from abstract contract CrossDomainForwarder + */ + constructor( + iOVM_CrossDomainMessenger crossDomainMessengerAddr, + address l1OwnerAddr + ) OptimismCrossDomainForwarder(crossDomainMessengerAddr, l1OwnerAddr) {} + + /** + * @notice versions: + * + * - OptimismCrossDomainForwarder 1.0.0: initial release + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "OptimismCrossDomainGovernor 1.0.0"; + } + + /** + * @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + * @inheritdoc ForwarderInterface + */ + function forward(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionCall(target, data, "Governor call reverted"); + } + + /** + * @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + * @inheritdoc DelegateForwarderInterface + */ + function forwardDelegate(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionDelegateCall(target, data, "Governor delegatecall reverted"); + } + + /** + * @notice The call MUST come from either the L1 owner (via cross-chain message) or the L2 owner. Reverts otherwise. + */ + modifier onlyLocalOrCrossDomainOwner() { + address messenger = crossDomainMessenger(); + // 1. The delegatecall MUST come from either the L1 owner (via cross-chain message) or the L2 owner + // solhint-disable-next-line custom-errors + require(msg.sender == messenger || msg.sender == owner(), "Sender is not the L2 messenger or owner"); + // 2. The L2 Messenger's caller MUST be the L1 Owner + if (msg.sender == messenger) { + // solhint-disable-next-line custom-errors + require( + iOVM_CrossDomainMessenger(messenger).xDomainMessageSender() == l1Owner(), + "xDomain sender is not the L1 owner" + ); + } + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol new file mode 100644 index 00000000..624f95a8 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {AggregatorInterface} from "../../../shared/interfaces/AggregatorInterface.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {OptimismSequencerUptimeFeedInterface} from "./../interfaces/OptimismSequencerUptimeFeedInterface.sol"; +import {SimpleReadAccessController} from "../../../shared/access/SimpleReadAccessController.sol"; +import {IL2CrossDomainMessenger} from "@eth-optimism/contracts/L2/messaging/IL2CrossDomainMessenger.sol"; + +/** + * @title OptimismSequencerUptimeFeed - L2 sequencer uptime status aggregator + * @notice L2 contract that receives status updates from a specific L1 address, + * records a new answer if the status changed + */ +contract OptimismSequencerUptimeFeed is + AggregatorV2V3Interface, + OptimismSequencerUptimeFeedInterface, + TypeAndVersionInterface, + SimpleReadAccessController +{ + /// @dev Round info (for uptime history) + struct Round { + bool status; + uint64 startedAt; + uint64 updatedAt; + } + + /// @dev Packed state struct to save sloads + struct FeedState { + uint80 latestRoundId; + bool latestStatus; + uint64 startedAt; + uint64 updatedAt; + } + + /// @notice Sender is not the L2 messenger + error InvalidSender(); + /// @notice Replacement for AggregatorV3Interface "No data present" + error NoDataPresent(); + + event L1SenderTransferred(address indexed from, address indexed to); + /// @dev Emitted when an `updateStatus` call is ignored due to unchanged status or stale timestamp + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + /// @dev Emitted when a updateStatus is called without the status changing + event RoundUpdated(int256 status, uint64 updatedAt); + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint8 public constant override decimals = 0; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override description = "L2 Sequencer Uptime Status Feed"; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint256 public constant override version = 1; + + /// @dev L1 address + address private s_l1Sender; + /// @dev s_latestRoundId == 0 means this contract is uninitialized. + FeedState private s_feedState = FeedState({latestRoundId: 0, latestStatus: false, startedAt: 0, updatedAt: 0}); + mapping(uint80 => Round) private s_rounds; + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + IL2CrossDomainMessenger private immutable s_l2CrossDomainMessenger; + + /** + * @param l1SenderAddress Address of the L1 contract that is permissioned to call this contract + * @param l2CrossDomainMessengerAddr Address of the L2CrossDomainMessenger contract + * @param initialStatus The initial status of the feed + */ + constructor(address l1SenderAddress, address l2CrossDomainMessengerAddr, bool initialStatus) { + _setL1Sender(l1SenderAddress); + s_l2CrossDomainMessenger = IL2CrossDomainMessenger(l2CrossDomainMessengerAddr); + uint64 timestamp = uint64(block.timestamp); + + // Initialise roundId == 1 as the first round + _recordRound(1, initialStatus, timestamp); + } + + /** + * @notice Check if a roundId is valid in this current contract state + * @dev Mainly used for AggregatorV2V3Interface functions + * @param roundId Round ID to check + */ + function _isValidRound(uint256 roundId) private view returns (bool) { + return roundId > 0 && roundId <= type(uint80).max && s_feedState.latestRoundId >= roundId; + } + + /** + * @notice versions: + * + * - OptimismSequencerUptimeFeed 1.0.0: initial release + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "OptimismSequencerUptimeFeed 1.0.0"; + } + + /// @return L1 sender address + function l1Sender() public view virtual returns (address) { + return s_l1Sender; + } + + /** + * @notice Set the allowed L1 sender for this contract to a new L1 sender + * @dev Can be disabled by setting the L1 sender as `address(0)`. Accessible only by owner. + * @param to new L1 sender that will be allowed to call `updateStatus` on this contract + */ + function transferL1Sender(address to) external virtual onlyOwner { + _setL1Sender(to); + } + + /// @notice internal method that stores the L1 sender + function _setL1Sender(address to) private { + address from = s_l1Sender; + if (from != to) { + s_l1Sender = to; + emit L1SenderTransferred(from, to); + } + } + + /** + * @dev Returns an AggregatorV2V3Interface compatible answer from status flag + * + * @param status The status flag to convert to an aggregator-compatible answer + */ + function _getStatusAnswer(bool status) private pure returns (int256) { + return status ? int256(1) : int256(0); + } + + /** + * @notice Helper function to record a round and set the latest feed state. + * + * @param roundId The round ID to record + * @param status Sequencer status + * @param timestamp The L1 block timestamp of status update + */ + function _recordRound(uint80 roundId, bool status, uint64 timestamp) private { + uint64 updatedAt = uint64(block.timestamp); + Round memory nextRound = Round(status, timestamp, updatedAt); + FeedState memory feedState = FeedState(roundId, status, timestamp, updatedAt); + + s_rounds[roundId] = nextRound; + s_feedState = feedState; + + emit NewRound(roundId, msg.sender, timestamp); + emit AnswerUpdated(_getStatusAnswer(status), roundId, timestamp); + } + + /** + * @notice Helper function to update when a round was last updated + * + * @param roundId The round ID to update + * @param status Sequencer status + */ + function _updateRound(uint80 roundId, bool status) private { + uint64 updatedAt = uint64(block.timestamp); + s_rounds[roundId].updatedAt = updatedAt; + s_feedState.updatedAt = updatedAt; + emit RoundUpdated(_getStatusAnswer(status), updatedAt); + } + + /** + * @notice Record a new status and timestamp if it has changed since the last round. + * @dev This function will revert if not called from `l1Sender` via the L1->L2 messenger. + * + * @param status Sequencer status + * @param timestamp Block timestamp of status update + */ + function updateStatus(bool status, uint64 timestamp) external override { + FeedState memory feedState = s_feedState; + if ( + msg.sender != address(s_l2CrossDomainMessenger) || s_l2CrossDomainMessenger.xDomainMessageSender() != s_l1Sender + ) { + revert InvalidSender(); + } + + // Ignore if latest recorded timestamp is newer + if (feedState.startedAt > timestamp) { + emit UpdateIgnored(feedState.latestStatus, feedState.startedAt, status, timestamp); + return; + } + + if (feedState.latestStatus == status) { + _updateRound(feedState.latestRoundId, status); + } else { + feedState.latestRoundId += 1; + _recordRound(feedState.latestRoundId, status, timestamp); + } + } + + /// @inheritdoc AggregatorInterface + function latestAnswer() external view override checkAccess returns (int256) { + FeedState memory feedState = s_feedState; + return _getStatusAnswer(feedState.latestStatus); + } + + /// @inheritdoc AggregatorInterface + function latestTimestamp() external view override checkAccess returns (uint256) { + FeedState memory feedState = s_feedState; + return feedState.startedAt; + } + + /// @inheritdoc AggregatorInterface + function latestRound() external view override checkAccess returns (uint256) { + FeedState memory feedState = s_feedState; + return feedState.latestRoundId; + } + + /// @inheritdoc AggregatorInterface + function getAnswer(uint256 roundId) external view override checkAccess returns (int256) { + if (_isValidRound(roundId)) { + return _getStatusAnswer(s_rounds[uint80(roundId)].status); + } + + revert NoDataPresent(); + } + + /// @inheritdoc AggregatorInterface + function getTimestamp(uint256 roundId) external view override checkAccess returns (uint256) { + if (_isValidRound(roundId)) { + return s_rounds[uint80(roundId)].startedAt; + } + + revert NoDataPresent(); + } + + /// @inheritdoc AggregatorV3Interface + function getRoundData( + uint80 _roundId + ) + public + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + if (_isValidRound(_roundId)) { + Round memory round = s_rounds[_roundId]; + answer = _getStatusAnswer(round.status); + startedAt = uint256(round.startedAt); + roundId = _roundId; + updatedAt = uint256(round.updatedAt); + answeredInRound = roundId; + } else { + revert NoDataPresent(); + } + return (roundId, answer, startedAt, updatedAt, answeredInRound); + } + + /// @inheritdoc AggregatorV3Interface + function latestRoundData() + external + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + FeedState memory feedState = s_feedState; + + roundId = feedState.latestRoundId; + answer = _getStatusAnswer(feedState.latestStatus); + startedAt = feedState.startedAt; + updatedAt = feedState.updatedAt; + answeredInRound = roundId; + return (roundId, answer, startedAt, updatedAt, answeredInRound); + } +} diff --git a/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol b/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol new file mode 100644 index 00000000..cabaefba --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/optimism/OptimismValidator.sol @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AggregatorValidatorInterface} from "../../../shared/interfaces/AggregatorValidatorInterface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {OptimismSequencerUptimeFeedInterface} from "./../interfaces/OptimismSequencerUptimeFeedInterface.sol"; + +import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol"; + +import {IL1CrossDomainMessenger} from "@eth-optimism/contracts/L1/messaging/IL1CrossDomainMessenger.sol"; + +/** + * @title OptimismValidator - makes cross chain call to update the Sequencer Uptime Feed on L2 + */ +contract OptimismValidator is TypeAndVersionInterface, AggregatorValidatorInterface, SimpleWriteAccessController { + int256 private constant ANSWER_SEQ_OFFLINE = 1; + uint32 private s_gasLimit; + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L1_CROSS_DOMAIN_MESSENGER_ADDRESS; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L2_UPTIME_FEED_ADDR; + + /** + * @notice emitted when gas cost to spend on L2 is updated + * @param gasLimit updated gas cost + */ + event GasLimitUpdated(uint32 gasLimit); + + /** + * @param l1CrossDomainMessengerAddress address the L1CrossDomainMessenger contract address + * @param l2UptimeFeedAddr the address of the OptimismSequencerUptimeFeed contract address + * @param gasLimit the gasLimit to use for sending a message from L1 to L2 + */ + constructor(address l1CrossDomainMessengerAddress, address l2UptimeFeedAddr, uint32 gasLimit) { + // solhint-disable-next-line custom-errors + require(l1CrossDomainMessengerAddress != address(0), "Invalid xDomain Messenger address"); + // solhint-disable-next-line custom-errors + require(l2UptimeFeedAddr != address(0), "Invalid OptimismSequencerUptimeFeed contract address"); + L1_CROSS_DOMAIN_MESSENGER_ADDRESS = l1CrossDomainMessengerAddress; + L2_UPTIME_FEED_ADDR = l2UptimeFeedAddr; + s_gasLimit = gasLimit; + } + + /** + * @notice versions: + * + * - OptimismValidator 0.1.0: initial release + * - OptimismValidator 1.0.0: change target of L2 sequencer status update + * - now calls `updateStatus` on an L2 OptimismSequencerUptimeFeed contract instead of + * directly calling the Flags contract + * + * @inheritdoc TypeAndVersionInterface + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "OptimismValidator 1.0.0"; + } + + /** + * @notice sets the new gas cost to spend when sending cross chain message + * @param gasLimit the updated gas cost + */ + function setGasLimit(uint32 gasLimit) external onlyOwner { + s_gasLimit = gasLimit; + emit GasLimitUpdated(gasLimit); + } + + /** + * @notice fetches the gas cost of sending a cross chain message + */ + function getGasLimit() external view returns (uint32) { + return s_gasLimit; + } + + /** + * @notice validate method sends an xDomain L2 tx to update Uptime Feed contract on L2. + * @dev A message is sent using the L1CrossDomainMessenger. This method is accessed controlled. + * @param currentAnswer new aggregator answer - value of 1 considers the sequencer offline. + */ + function validate( + uint256 /* previousRoundId */, + int256 /* previousAnswer */, + uint256 /* currentRoundId */, + int256 currentAnswer + ) external override checkAccess returns (bool) { + // Encode the OptimismSequencerUptimeFeed call + bytes4 selector = OptimismSequencerUptimeFeedInterface.updateStatus.selector; + bool status = currentAnswer == ANSWER_SEQ_OFFLINE; + uint64 timestamp = uint64(block.timestamp); + // Encode `status` and `timestamp` + bytes memory message = abi.encodeWithSelector(selector, status, timestamp); + // Make the xDomain call + IL1CrossDomainMessenger(L1_CROSS_DOMAIN_MESSENGER_ADDRESS).sendMessage( + L2_UPTIME_FEED_ADDR, // target + message, + s_gasLimit + ); + // return success + return true; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainForwarder.sol b/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainForwarder.sol new file mode 100644 index 00000000..cc9d4c94 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainForwarder.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; + +import {CrossDomainForwarder} from "../CrossDomainForwarder.sol"; +import {CrossDomainOwnable} from "../CrossDomainOwnable.sol"; + +import {IScrollMessenger} from "@scroll-tech/contracts/libraries/IScrollMessenger.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/// @title ScrollCrossDomainForwarder - L1 xDomain account representation +/// @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. +/// @dev Any other L2 contract which uses this contract's address as a privileged position, +/// can be considered to be owned by the `l1Owner` +contract ScrollCrossDomainForwarder is TypeAndVersionInterface, CrossDomainForwarder { + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "ScrollCrossDomainForwarder 1.0.0"; + + address internal immutable i_scrollCrossDomainMessenger; + + /// @param crossDomainMessengerAddr the xDomain bridge messenger (Scroll bridge L2) contract address + /// @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + constructor(IScrollMessenger crossDomainMessengerAddr, address l1OwnerAddr) CrossDomainOwnable(l1OwnerAddr) { + // solhint-disable-next-line custom-errors + require(address(crossDomainMessengerAddr) != address(0), "Invalid xDomain Messenger address"); + i_scrollCrossDomainMessenger = address(crossDomainMessengerAddr); + } + + /// @dev forwarded only if L2 Messenger calls with `xDomainMessageSender` being the L1 owner address + /// @inheritdoc ForwarderInterface + function forward(address target, bytes memory data) external override onlyL1Owner { + Address.functionCall(target, data, "Forwarder call reverted"); + } + + /// @notice This is always the address of the Scroll Cross Domain Messenger contract + function crossDomainMessenger() external view returns (address) { + return address(i_scrollCrossDomainMessenger); + } + + /// @notice The call MUST come from the L1 owner (via cross-chain message.) Reverts otherwise. + modifier onlyL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == i_scrollCrossDomainMessenger, "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + IScrollMessenger(i_scrollCrossDomainMessenger).xDomainMessageSender() == l1Owner(), + "xDomain sender is not the L1 owner" + ); + _; + } + + /// @notice The call MUST come from the proposed L1 owner (via cross-chain message.) Reverts otherwise. + modifier onlyProposedL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == i_scrollCrossDomainMessenger, "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + IScrollMessenger(i_scrollCrossDomainMessenger).xDomainMessageSender() == s_l1PendingOwner, + "Must be proposed L1 owner" + ); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainGovernor.sol b/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainGovernor.sol new file mode 100644 index 00000000..45ef52d5 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/scroll/ScrollCrossDomainGovernor.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {DelegateForwarderInterface} from "../interfaces/DelegateForwarderInterface.sol"; +// solhint-disable-next-line no-unused-import +import {ForwarderInterface} from "../interfaces/ForwarderInterface.sol"; + +import {CrossDomainForwarder} from "../CrossDomainForwarder.sol"; +import {CrossDomainOwnable} from "../CrossDomainOwnable.sol"; + +import {IScrollMessenger} from "@scroll-tech/contracts/libraries/IScrollMessenger.sol"; +import {Address} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol"; + +/// @title ScrollCrossDomainGovernor - L1 xDomain account representation (with delegatecall support) for Scroll +/// @notice L2 Contract which receives messages from a specific L1 address and transparently forwards them to the destination. +/// @dev Any other L2 contract which uses this contract's address as a privileged position, +/// can be considered to be simultaneously owned by the `l1Owner` and L2 `owner` +contract ScrollCrossDomainGovernor is DelegateForwarderInterface, TypeAndVersionInterface, CrossDomainForwarder { + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "ScrollCrossDomainGovernor 1.0.0"; + + address internal immutable i_scrollCrossDomainMessenger; + + /// @param crossDomainMessengerAddr the xDomain bridge messenger (Scroll bridge L2) contract address + /// @param l1OwnerAddr the L1 owner address that will be allowed to call the forward fn + constructor(IScrollMessenger crossDomainMessengerAddr, address l1OwnerAddr) CrossDomainOwnable(l1OwnerAddr) { + // solhint-disable-next-line custom-errors + require(address(crossDomainMessengerAddr) != address(0), "Invalid xDomain Messenger address"); + i_scrollCrossDomainMessenger = address(crossDomainMessengerAddr); + } + + /// @inheritdoc ForwarderInterface + /// @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + function forward(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionCall(target, data, "Governor call reverted"); + } + + /// @inheritdoc DelegateForwarderInterface + /// @dev forwarded only if L2 Messenger calls with `msg.sender` being the L1 owner address, or called by the L2 owner + function forwardDelegate(address target, bytes memory data) external override onlyLocalOrCrossDomainOwner { + Address.functionDelegateCall(target, data, "Governor delegatecall reverted"); + } + + /// @notice The address of the Scroll Cross Domain Messenger contract + function crossDomainMessenger() external view returns (address) { + return address(i_scrollCrossDomainMessenger); + } + + /// @notice The call MUST come from the L1 owner (via cross-chain message.) Reverts otherwise. + modifier onlyL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == i_scrollCrossDomainMessenger, "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + IScrollMessenger(i_scrollCrossDomainMessenger).xDomainMessageSender() == l1Owner(), + "xDomain sender is not the L1 owner" + ); + _; + } + + /// @notice The call MUST come from either the L1 owner (via cross-chain message) or the L2 owner. Reverts otherwise. + modifier onlyLocalOrCrossDomainOwner() { + // 1. The delegatecall MUST come from either the L1 owner (via cross-chain message) or the L2 owner + // solhint-disable-next-line custom-errors + require( + msg.sender == i_scrollCrossDomainMessenger || msg.sender == owner(), + "Sender is not the L2 messenger or owner" + ); + // 2. The L2 Messenger's caller MUST be the L1 Owner + if (msg.sender == i_scrollCrossDomainMessenger) { + // solhint-disable-next-line custom-errors + require( + IScrollMessenger(i_scrollCrossDomainMessenger).xDomainMessageSender() == l1Owner(), + "xDomain sender is not the L1 owner" + ); + } + _; + } + + /// @notice The call MUST come from the proposed L1 owner (via cross-chain message.) Reverts otherwise. + modifier onlyProposedL1Owner() override { + // solhint-disable-next-line custom-errors + require(msg.sender == i_scrollCrossDomainMessenger, "Sender is not the L2 messenger"); + // solhint-disable-next-line custom-errors + require( + IScrollMessenger(i_scrollCrossDomainMessenger).xDomainMessageSender() == s_l1PendingOwner, + "Must be proposed L1 owner" + ); + _; + } +} diff --git a/contracts/src/v0.8/l2ep/dev/scroll/ScrollSequencerUptimeFeed.sol b/contracts/src/v0.8/l2ep/dev/scroll/ScrollSequencerUptimeFeed.sol new file mode 100644 index 00000000..14822d0b --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/scroll/ScrollSequencerUptimeFeed.sol @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ScrollSequencerUptimeFeedInterface} from "../interfaces/ScrollSequencerUptimeFeedInterface.sol"; +import {AggregatorInterface} from "../../../shared/interfaces/AggregatorInterface.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; + +import {SimpleReadAccessController} from "../../../shared/access/SimpleReadAccessController.sol"; + +import {IL2ScrollMessenger} from "@scroll-tech/contracts/L2/IL2ScrollMessenger.sol"; + +/// @title ScrollSequencerUptimeFeed - L2 sequencer uptime status aggregator +/// @notice L2 contract that receives status updates, and records a new answer if the status changed +contract ScrollSequencerUptimeFeed is + AggregatorV2V3Interface, + ScrollSequencerUptimeFeedInterface, + TypeAndVersionInterface, + SimpleReadAccessController +{ + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "ScrollSequencerUptimeFeed 1.0.0"; + + /// @dev Round info (for uptime history) + struct Round { + bool status; + uint64 startedAt; + uint64 updatedAt; + } + + /// @dev Packed state struct to save sloads + struct FeedState { + uint80 latestRoundId; + bool latestStatus; + uint64 startedAt; + uint64 updatedAt; + } + + /// @notice Sender is not the L2 messenger + error InvalidSender(); + /// @notice Replacement for AggregatorV3Interface "No data present" + error NoDataPresent(); + /// @notice Address must not be the zero address + error ZeroAddress(); + + event L1SenderTransferred(address indexed from, address indexed to); + /// @dev Emitted when an `updateStatus` call is ignored due to unchanged status or stale timestamp + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + /// @dev Emitted when a updateStatus is called without the status changing + event RoundUpdated(int256 status, uint64 updatedAt); + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint8 public constant override decimals = 0; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override description = "L2 Sequencer Uptime Status Feed"; + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + uint256 public constant override version = 1; + + /// @dev L1 address + address private s_l1Sender; + /// @dev s_latestRoundId == 0 means this contract is uninitialized. + FeedState private s_feedState = FeedState({latestRoundId: 0, latestStatus: false, startedAt: 0, updatedAt: 0}); + mapping(uint80 roundId => Round round) private s_rounds; + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + IL2ScrollMessenger private immutable s_l2CrossDomainMessenger; + + /// @param l1SenderAddress Address of the L1 contract that is permissioned to call this contract + /// @param l2CrossDomainMessengerAddr Address of the L2CrossDomainMessenger contract + /// @param initialStatus The initial status of the feed + constructor(address l1SenderAddress, address l2CrossDomainMessengerAddr, bool initialStatus) { + if (l2CrossDomainMessengerAddr == address(0)) { + revert ZeroAddress(); + } + + _setL1Sender(l1SenderAddress); + s_l2CrossDomainMessenger = IL2ScrollMessenger(l2CrossDomainMessengerAddr); + + // Initialise roundId == 1 as the first round + _recordRound(1, initialStatus, uint64(block.timestamp)); + } + + /// @notice Check if a roundId is valid in this current contract state + /// @dev Mainly used for AggregatorV2V3Interface functions + /// @param roundId Round ID to check + function _isValidRound(uint256 roundId) private view returns (bool) { + return roundId > 0 && roundId <= type(uint80).max && s_feedState.latestRoundId >= roundId; + } + + /// @return L1 sender address + function l1Sender() public view virtual returns (address) { + return s_l1Sender; + } + + /// @notice Set the allowed L1 sender for this contract to a new L1 sender + /// @dev Can be disabled by setting the L1 sender as `address(0)`. Accessible only by owner. + /// @param to new L1 sender that will be allowed to call `updateStatus` on this contract + function transferL1Sender(address to) external virtual onlyOwner { + _setL1Sender(to); + } + + /// @notice internal method that stores the L1 sender + function _setL1Sender(address to) private { + address from = s_l1Sender; + if (from != to) { + s_l1Sender = to; + emit L1SenderTransferred(from, to); + } + } + + /// @dev Returns an AggregatorV2V3Interface compatible answer from status flag + /// @param status The status flag to convert to an aggregator-compatible answer + function _getStatusAnswer(bool status) private pure returns (int256) { + return status ? int256(1) : int256(0); + } + + /// @notice Helper function to record a round and set the latest feed state. + /// @param roundId The round ID to record + /// @param status Sequencer status + /// @param timestamp The L1 block timestamp of status update + function _recordRound(uint80 roundId, bool status, uint64 timestamp) private { + s_feedState = FeedState(roundId, status, timestamp, uint64(block.timestamp)); + s_rounds[roundId] = Round(status, timestamp, uint64(block.timestamp)); + + emit NewRound(roundId, msg.sender, timestamp); + emit AnswerUpdated(_getStatusAnswer(status), roundId, timestamp); + } + + /// @notice Helper function to update when a round was last updated + /// @param roundId The round ID to update + /// @param status Sequencer status + function _updateRound(uint80 roundId, bool status) private { + s_feedState.updatedAt = uint64(block.timestamp); + s_rounds[roundId].updatedAt = uint64(block.timestamp); + emit RoundUpdated(_getStatusAnswer(status), uint64(block.timestamp)); + } + + /// @notice Record a new status and timestamp if it has changed since the last round. + /// @dev This function will revert if not called from `l1Sender` via the L1->L2 messenger. + /// + /// @param status Sequencer status + /// @param timestamp Block timestamp of status update + function updateStatus(bool status, uint64 timestamp) external override { + FeedState memory feedState = s_feedState; + + if ( + msg.sender != address(s_l2CrossDomainMessenger) || s_l2CrossDomainMessenger.xDomainMessageSender() != s_l1Sender + ) { + revert InvalidSender(); + } + + // Ignore if latest recorded timestamp is newer + if (feedState.startedAt > timestamp) { + emit UpdateIgnored(feedState.latestStatus, feedState.startedAt, status, timestamp); + return; + } + + if (feedState.latestStatus == status) { + _updateRound(feedState.latestRoundId, status); + } else { + feedState.latestRoundId += 1; + _recordRound(feedState.latestRoundId, status, timestamp); + } + } + + /// @inheritdoc AggregatorInterface + function latestAnswer() external view override checkAccess returns (int256) { + return _getStatusAnswer(s_feedState.latestStatus); + } + + /// @inheritdoc AggregatorInterface + function latestTimestamp() external view override checkAccess returns (uint256) { + return s_feedState.startedAt; + } + + /// @inheritdoc AggregatorInterface + function latestRound() external view override checkAccess returns (uint256) { + return s_feedState.latestRoundId; + } + + /// @inheritdoc AggregatorInterface + function getAnswer(uint256 roundId) external view override checkAccess returns (int256) { + if (!_isValidRound(roundId)) { + revert NoDataPresent(); + } + + return _getStatusAnswer(s_rounds[uint80(roundId)].status); + } + + /// @inheritdoc AggregatorInterface + function getTimestamp(uint256 roundId) external view override checkAccess returns (uint256) { + if (!_isValidRound(roundId)) { + revert NoDataPresent(); + } + + return s_rounds[uint80(roundId)].startedAt; + } + + /// @inheritdoc AggregatorV3Interface + function getRoundData( + uint80 _roundId + ) + public + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + if (!_isValidRound(_roundId)) { + revert NoDataPresent(); + } + + Round memory round = s_rounds[_roundId]; + + return (_roundId, _getStatusAnswer(round.status), uint256(round.startedAt), uint256(round.updatedAt), _roundId); + } + + /// @inheritdoc AggregatorV3Interface + function latestRoundData() + external + view + override + checkAccess + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + FeedState memory feedState = s_feedState; + + return ( + feedState.latestRoundId, + _getStatusAnswer(feedState.latestStatus), + feedState.startedAt, + feedState.updatedAt, + feedState.latestRoundId + ); + } +} diff --git a/contracts/src/v0.8/l2ep/dev/scroll/ScrollValidator.sol b/contracts/src/v0.8/l2ep/dev/scroll/ScrollValidator.sol new file mode 100644 index 00000000..eefb23c5 --- /dev/null +++ b/contracts/src/v0.8/l2ep/dev/scroll/ScrollValidator.sol @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {AggregatorValidatorInterface} from "../../../shared/interfaces/AggregatorValidatorInterface.sol"; +import {TypeAndVersionInterface} from "../../../interfaces/TypeAndVersionInterface.sol"; +import {ScrollSequencerUptimeFeedInterface} from "../interfaces/ScrollSequencerUptimeFeedInterface.sol"; + +import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol"; + +import {IL1ScrollMessenger} from "@scroll-tech/contracts/L1/IL1ScrollMessenger.sol"; + +/// @title ScrollValidator - makes cross chain call to update the Sequencer Uptime Feed on L2 +contract ScrollValidator is TypeAndVersionInterface, AggregatorValidatorInterface, SimpleWriteAccessController { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L1_CROSS_DOMAIN_MESSENGER_ADDRESS; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable L2_UPTIME_FEED_ADDR; + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant override typeAndVersion = "ScrollValidator 1.0.0"; + int256 private constant ANSWER_SEQ_OFFLINE = 1; + uint32 private s_gasLimit; + + /// @notice emitted when gas cost to spend on L2 is updated + /// @param gasLimit updated gas cost + event GasLimitUpdated(uint32 gasLimit); + + /// @param l1CrossDomainMessengerAddress address the L1CrossDomainMessenger contract address + /// @param l2UptimeFeedAddr the address of the ScrollSequencerUptimeFeed contract address + /// @param gasLimit the gasLimit to use for sending a message from L1 to L2 + constructor(address l1CrossDomainMessengerAddress, address l2UptimeFeedAddr, uint32 gasLimit) { + // solhint-disable-next-line custom-errors + require(l1CrossDomainMessengerAddress != address(0), "Invalid xDomain Messenger address"); + // solhint-disable-next-line custom-errors + require(l2UptimeFeedAddr != address(0), "Invalid ScrollSequencerUptimeFeed contract address"); + L1_CROSS_DOMAIN_MESSENGER_ADDRESS = l1CrossDomainMessengerAddress; + L2_UPTIME_FEED_ADDR = l2UptimeFeedAddr; + s_gasLimit = gasLimit; + } + + /// @notice sets the new gas cost to spend when sending cross chain message + /// @param gasLimit the updated gas cost + function setGasLimit(uint32 gasLimit) external onlyOwner { + s_gasLimit = gasLimit; + emit GasLimitUpdated(gasLimit); + } + + /// @notice fetches the gas cost of sending a cross chain message + function getGasLimit() external view returns (uint32) { + return s_gasLimit; + } + + /// @notice validate method sends an xDomain L2 tx to update Uptime Feed contract on L2. + /// @dev A message is sent using the L1CrossDomainMessenger. This method is accessed controlled. + /// @param currentAnswer new aggregator answer - value of 1 considers the sequencer offline. + function validate( + uint256 /* previousRoundId */, + int256 /* previousAnswer */, + uint256 /* currentRoundId */, + int256 currentAnswer + ) external override checkAccess returns (bool) { + // Make the xDomain call + IL1ScrollMessenger(L1_CROSS_DOMAIN_MESSENGER_ADDRESS).sendMessage( + L2_UPTIME_FEED_ADDR, + 0, + abi.encodeWithSelector( + ScrollSequencerUptimeFeedInterface.updateStatus.selector, + currentAnswer == ANSWER_SEQ_OFFLINE, + uint64(block.timestamp) + ), + s_gasLimit + ); + + // return success + return true; + } +} diff --git a/contracts/src/v0.8/l2ep/test/mocks/MockAggregatorV2V3.sol b/contracts/src/v0.8/l2ep/test/mocks/MockAggregatorV2V3.sol new file mode 100644 index 00000000..c4e2f710 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/mocks/MockAggregatorV2V3.sol @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import {AggregatorV2V3Interface} from "../../../shared/interfaces/AggregatorV2V3Interface.sol"; + +contract MockAggregatorV2V3 is AggregatorV2V3Interface { + function latestAnswer() external pure returns (int256) { + return 0; + } + + function latestTimestamp() external pure returns (uint256) { + return 0; + } + + function latestRound() external pure returns (uint256) { + return 0; + } + + function getAnswer(uint256) external pure returns (int256) { + return 0; + } + + function getTimestamp(uint256 roundId) external pure returns (uint256) { + return roundId; + } + + function decimals() external pure returns (uint8) { + return 0; + } + + function description() external pure returns (string memory) { + return ""; + } + + function version() external pure returns (uint256) { + return 0; + } + + function getRoundData( + uint80 + ) + external + pure + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (0, 0, 0, 0, 0); + } + + function latestRoundData() + external + pure + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (73786976294838220258, 96800000000, 163826896, 1638268960, 73786976294838220258); + } +} diff --git a/contracts/src/v0.8/l2ep/test/mocks/optimism/MockOVMCrossDomainMessenger.sol b/contracts/src/v0.8/l2ep/test/mocks/optimism/MockOVMCrossDomainMessenger.sol new file mode 100644 index 00000000..3a45cba3 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/mocks/optimism/MockOVMCrossDomainMessenger.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT + +pragma solidity >=0.7.6 <0.9.0; + +import {iOVM_CrossDomainMessenger} from "../../../../vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol"; + +import {Address} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; + +contract MockOVMCrossDomainMessenger is iOVM_CrossDomainMessenger { + address internal s_mockMessageSender; + + constructor(address sender) { + s_mockMessageSender = sender; + } + + function xDomainMessageSender() external view override returns (address) { + return s_mockMessageSender; + } + + function _setMockMessageSender(address sender) external { + s_mockMessageSender = sender; + } + + /** + * Sends a cross domain message to the target messenger. + * @param _target Target contract address. + * @param _message Message to send to the target. + */ + function sendMessage(address _target, bytes calldata _message, uint32) external override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } +} diff --git a/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollCrossDomainMessenger.sol b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollCrossDomainMessenger.sol new file mode 100644 index 00000000..37244910 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollCrossDomainMessenger.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.16; + +import {IScrollMessenger} from "@scroll-tech/contracts/libraries/IScrollMessenger.sol"; + +import {Address} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; + +contract MockScrollCrossDomainMessenger is IScrollMessenger { + address internal s_mockMessageSender; + + constructor(address sender) { + s_mockMessageSender = sender; + } + + function xDomainMessageSender() external view override returns (address) { + return s_mockMessageSender; + } + + function _setMockMessageSender(address sender) external { + s_mockMessageSender = sender; + } + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param _target The address of account who receive the message. + /// @param _message The content of the message. + function sendMessage(address _target, uint256, bytes calldata _message, uint256) external payable override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param _target The address of account who receive the message. + /// @param _message The content of the message. + function sendMessage(address _target, uint256, bytes calldata _message, uint256, address) external payable override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } +} diff --git a/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL1CrossDomainMessenger.sol b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL1CrossDomainMessenger.sol new file mode 100644 index 00000000..e63847d6 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL1CrossDomainMessenger.sol @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import {IL1ScrollMessenger} from "@scroll-tech/contracts/L1/IL1ScrollMessenger.sol"; + +contract MockScrollL1CrossDomainMessenger is IL1ScrollMessenger { + uint256 private s_nonce; + + function xDomainMessageSender() public pure returns (address) { + return address(0); + } + + function sendMessage( + address _target, + uint256 _value, + bytes calldata _message, + uint256 _gasLimit + ) external payable override { + emit SentMessage(msg.sender, _target, _value, s_nonce, _gasLimit, _message); + s_nonce++; + } + + function sendMessage( + address _target, + uint256 _value, + bytes calldata _message, + uint256 _gasLimit, + address + ) external payable override { + emit SentMessage(msg.sender, _target, _value, s_nonce, _gasLimit, _message); + s_nonce++; + } + + function relayMessageWithProof( + address from, + address to, + uint256 value, + uint256 nonce, + bytes memory message, + L2MessageProof memory proof + ) external override {} + + function replayMessage( + address from, + address to, + uint256 value, + uint256 messageNonce, + bytes memory message, + uint32 newGasLimit, + address refundAddress + ) external payable override {} + + function dropMessage( + address from, + address to, + uint256 value, + uint256 messageNonce, + bytes memory message + ) external override {} +} diff --git a/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL2CrossDomainMessenger.sol b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL2CrossDomainMessenger.sol new file mode 100644 index 00000000..66400b7d --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/mocks/scroll/MockScrollL2CrossDomainMessenger.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import {IL2ScrollMessenger} from "@scroll-tech/contracts/L2/IL2ScrollMessenger.sol"; + +contract MockScrollL2CrossDomainMessenger is IL2ScrollMessenger { + uint256 private s_nonce; + address private s_sender; + + function xDomainMessageSender() public view returns (address) { + return s_sender; + } + + function sendMessage( + address _target, + uint256 _value, + bytes calldata _message, + uint256 _gasLimit + ) external payable override { + emit SentMessage(msg.sender, _target, _value, s_nonce, _gasLimit, _message); + s_nonce++; + } + + function sendMessage( + address _target, + uint256 _value, + bytes calldata _message, + uint256 _gasLimit, + address + ) external payable override { + emit SentMessage(msg.sender, _target, _value, s_nonce, _gasLimit, _message); + s_nonce++; + } + + function relayMessage( + address from, + address to, + uint256 value, + uint256 nonce, + bytes calldata message + ) external override {} + + /// Needed for backwards compatibility in Hardhat tests + function setSender(address newSender) external { + s_sender = newSender; + } + + /// Needed for backwards compatibility in Hardhat tests + receive() external payable {} +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/L2EPTest.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/L2EPTest.t.sol new file mode 100644 index 00000000..561e32be --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/L2EPTest.t.sol @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {Greeter} from "../../../tests/Greeter.sol"; + +import {MultiSend} from "../../../vendor/MultiSend.sol"; +import {Test} from "forge-std/Test.sol"; + +contract L2EPTest is Test { + /// Helper variable(s) + address internal s_strangerAddr = vm.addr(0x1); + address internal s_l1OwnerAddr = vm.addr(0x2); + address internal s_eoaValidator = vm.addr(0x3); + address internal s_deployerAddr = vm.addr(0x4); + + /// @param expectedGasUsage - the expected gas usage + /// @param startGasUsage - the gas usage before the code of interest is run + /// @param finalGasUsage - the gas usage after the code of interest is run + /// @param deviation - the amount of gas that the actual usage is allowed to deviate by (e.g. (expectedGas - deviation) <= actualGasUsage <= (expectedGas + deviation)) + function assertGasUsageIsCloseTo( + uint256 expectedGasUsage, + uint256 startGasUsage, + uint256 finalGasUsage, + uint256 deviation + ) public { + uint256 gasUsed = (startGasUsage - finalGasUsage) * tx.gasprice; + assertLe(gasUsed, expectedGasUsage + deviation); + assertGe(gasUsed, expectedGasUsage - deviation); + } + + /// @param selector - the function selector + /// @param greeterAddr - the address of the Greeter contract + /// @param message - the new greeting message, which will be passed as an argument to Greeter#setGreeting + /// @return a 2-layer encoding such that decoding the first layer provides the CrossDomainForwarder#forward + /// function selector and the corresponding arguments to the forward function, and decoding the + /// second layer provides the Greeter#setGreeting function selector and the corresponding + /// arguments to the set greeting function (which in this case is the input message) + function encodeCrossDomainSetGreetingMsg( + bytes4 selector, + address greeterAddr, + string memory message + ) public pure returns (bytes memory) { + return abi.encodeWithSelector(selector, greeterAddr, abi.encodeWithSelector(Greeter.setGreeting.selector, message)); + } + + /// @param selector - the function selector + /// @param multiSendAddr - the address of the MultiSend contract + /// @param encodedTxs - an encoded list of transactions (e.g. abi.encodePacked(encodeMultiSendTx("some data"), ...)) + /// @return a 2-layer encoding such that decoding the first layer provides the CrossDomainGoverner#forwardDelegate + /// function selector and the corresponding arguments to the forwardDelegate function, and decoding the + /// second layer provides the MultiSend#multiSend function selector and the corresponding + /// arguments to the multiSend function (which in this case is the input encodedTxs) + function encodeCrossDomainMultiSendMsg( + bytes4 selector, + address multiSendAddr, + bytes memory encodedTxs + ) public pure returns (bytes memory) { + return + abi.encodeWithSelector(selector, multiSendAddr, abi.encodeWithSelector(MultiSend.multiSend.selector, encodedTxs)); + } + + /// @param greeterAddr - the address of the greeter contract + /// @param data - the transaction data string + /// @return an encoded transaction structured as specified in the MultiSend#multiSend comments + function encodeMultiSendTx(address greeterAddr, bytes memory data) public pure returns (bytes memory) { + bytes memory txData = abi.encodeWithSelector(Greeter.setGreeting.selector, data); + return + abi.encodePacked( + uint8(0), // operation + greeterAddr, // to + uint256(0), // value + uint256(txData.length), // data length + txData // data as bytes + ); + } + + /// @param l1Address - Address on L1 + /// @return an Arbitrum L2 address + function toArbitrumL2AliasAddress(address l1Address) public pure returns (address) { + return address(uint160(l1Address) + uint160(0x1111000000000000000000000000000000001111)); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainForwarder.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainForwarder.t.sol new file mode 100644 index 00000000..be3851c5 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainForwarder.t.sol @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ArbitrumCrossDomainForwarder} from "../../../dev/arbitrum/ArbitrumCrossDomainForwarder.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ArbitrumCrossDomainForwarderTest is L2EPTest { + /// Helper variable(s) + address internal s_crossDomainMessengerAddr = toArbitrumL2AliasAddress(s_l1OwnerAddr); + address internal s_newOwnerCrossDomainMessengerAddr = toArbitrumL2AliasAddress(s_strangerAddr); + + /// Contracts + ArbitrumCrossDomainForwarder internal s_arbitrumCrossDomainForwarder; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_arbitrumCrossDomainForwarder = new ArbitrumCrossDomainForwarder(s_l1OwnerAddr); + s_greeter = new Greeter(address(s_arbitrumCrossDomainForwarder)); + vm.stopPrank(); + } +} + +contract ArbitrumCrossDomainForwarder_Constructor is ArbitrumCrossDomainForwarderTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_arbitrumCrossDomainForwarder.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_arbitrumCrossDomainForwarder.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_arbitrumCrossDomainForwarder.crossDomainMessenger(), s_crossDomainMessengerAddr); + + // it should set the typeAndVersion correctly + assertEq(s_arbitrumCrossDomainForwarder.typeAndVersion(), "ArbitrumCrossDomainForwarder 1.0.0"); + } +} + +contract ArbitrumCrossDomainForwarder_Forward is ArbitrumCrossDomainForwarderTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_arbitrumCrossDomainForwarder.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_arbitrumCrossDomainForwarder.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, greeting) + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_arbitrumCrossDomainForwarder.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, "") + ); + } +} + +contract ArbitrumCrossDomainForwarder_TransferL1Ownership is ArbitrumCrossDomainForwarderTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_arbitrumCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_arbitrumCrossDomainForwarder.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_arbitrumCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_arbitrumCrossDomainForwarder.l1Owner(), s_strangerAddr); + + // Sends the message + s_arbitrumCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_arbitrumCrossDomainForwarder.l1Owner(), address(0)); + + // Sends the message + s_arbitrumCrossDomainForwarder.transferL1Ownership(address(0)); + } +} + +contract ArbitrumCrossDomainForwarder_AcceptL1Ownership is ArbitrumCrossDomainForwarderTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_arbitrumCrossDomainForwarder.acceptL1Ownership(); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Request ownership transfer + vm.startPrank(s_crossDomainMessengerAddr); + s_arbitrumCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + vm.startPrank(s_newOwnerCrossDomainMessengerAddr); + s_arbitrumCrossDomainForwarder.acceptL1Ownership(); + + // Asserts that the ownership was actually transferred + assertEq(s_arbitrumCrossDomainForwarder.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainGovernor.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainGovernor.t.sol new file mode 100644 index 00000000..c5b8adaf --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumCrossDomainGovernor.t.sol @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ArbitrumCrossDomainGovernor} from "../../../dev/arbitrum/ArbitrumCrossDomainGovernor.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +import {MultiSend} from "../../../../vendor/MultiSend.sol"; + +contract ArbitrumCrossDomainGovernorTest is L2EPTest { + /// Helper variable(s) + address internal s_crossDomainMessengerAddr = toArbitrumL2AliasAddress(s_l1OwnerAddr); + address internal s_newOwnerCrossDomainMessengerAddr = toArbitrumL2AliasAddress(s_strangerAddr); + + /// Contracts + ArbitrumCrossDomainGovernor internal s_arbitrumCrossDomainGovernor; + MultiSend internal s_multiSend; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_arbitrumCrossDomainGovernor = new ArbitrumCrossDomainGovernor(s_l1OwnerAddr); + s_greeter = new Greeter(address(s_arbitrumCrossDomainGovernor)); + s_multiSend = new MultiSend(); + vm.stopPrank(); + } +} + +contract ArbitrumCrossDomainGovernor_Constructor is ArbitrumCrossDomainGovernorTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_arbitrumCrossDomainGovernor.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_arbitrumCrossDomainGovernor.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_arbitrumCrossDomainGovernor.crossDomainMessenger(), s_crossDomainMessengerAddr); + + // it should set the typeAndVersion correctly + assertEq(s_arbitrumCrossDomainGovernor.typeAndVersion(), "ArbitrumCrossDomainGovernor 1.0.0"); + } +} + +contract ArbitrumCrossDomainGovernor_Forward is ArbitrumCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_arbitrumCrossDomainGovernor.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_arbitrumCrossDomainGovernor.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, greeting) + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_arbitrumCrossDomainGovernor.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, greeting) + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_arbitrumCrossDomainGovernor.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, "") + ); + } +} + +contract ArbitrumCrossDomainGovernor_ForwardDelegate is ArbitrumCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_arbitrumCrossDomainGovernor.forwardDelegate(address(s_multiSend), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_CallableByCrossDomainMessengerAddressOrL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends the message + s_arbitrumCrossDomainGovernor.forwardDelegate( + address(s_multiSend), + abi.encodeWithSelector( + MultiSend.multiSend.selector, + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ) + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Sends the message + s_arbitrumCrossDomainGovernor.forwardDelegate( + address(s_multiSend), + abi.encodeWithSelector( + MultiSend.multiSend.selector, + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ) + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should revert batch when one call fails + function test_RevertsBatchWhenOneCallFails() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Governor delegatecall reverted"); + s_arbitrumCrossDomainGovernor.forwardDelegate( + address(s_multiSend), + abi.encodeWithSelector( + MultiSend.multiSend.selector, + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "")) + ) + ); + + // Checks that the greeter message is unchanged + assertEq(s_greeter.greeting(), ""); + } + + /// @notice it should bubble up revert when contract call reverts + function test_BubbleUpRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Greeter: revert triggered"); + s_arbitrumCrossDomainGovernor.forwardDelegate( + address(s_greeter), + abi.encodeWithSelector(Greeter.triggerRevert.selector) + ); + } +} + +contract ArbitrumCrossDomainGovernor_TransferL1Ownership is ArbitrumCrossDomainGovernorTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_arbitrumCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_arbitrumCrossDomainGovernor.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_arbitrumCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_arbitrumCrossDomainGovernor.l1Owner(), s_strangerAddr); + + // Sends the message + s_arbitrumCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_arbitrumCrossDomainGovernor.l1Owner(), address(0)); + + // Sends the message + s_arbitrumCrossDomainGovernor.transferL1Ownership(address(0)); + } +} + +contract ArbitrumCrossDomainGovernor_AcceptL1Ownership is ArbitrumCrossDomainGovernorTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_crossDomainMessengerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_arbitrumCrossDomainGovernor.acceptL1Ownership(); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Request ownership transfer + vm.startPrank(s_crossDomainMessengerAddr); + s_arbitrumCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + vm.startPrank(s_newOwnerCrossDomainMessengerAddr); + s_arbitrumCrossDomainGovernor.acceptL1Ownership(); + + // Asserts that the ownership was actually transferred + assertEq(s_arbitrumCrossDomainGovernor.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumSequencerUptimeFeed.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumSequencerUptimeFeed.t.sol new file mode 100644 index 00000000..3b9df3bf --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumSequencerUptimeFeed.t.sol @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {SimpleWriteAccessController} from "../../../../shared/access/SimpleWriteAccessController.sol"; +import {ArbitrumSequencerUptimeFeed} from "../../../dev/arbitrum/ArbitrumSequencerUptimeFeed.sol"; +import {MockAggregatorV2V3} from "../../mocks/MockAggregatorV2V3.sol"; +import {FeedConsumer} from "../../../../tests/FeedConsumer.sol"; +import {Flags} from "../../../dev/Flags.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ArbitrumSequencerUptimeFeedTest is L2EPTest { + /// Constants + uint256 internal constant GAS_USED_DEVIATION = 100; + + /// Helper variable(s) + address internal s_l2MessengerAddr = toArbitrumL2AliasAddress(s_l1OwnerAddr); + + /// L2EP contracts + ArbitrumSequencerUptimeFeed internal s_arbitrumSequencerUptimeFeed; + SimpleWriteAccessController internal s_accessController; + MockAggregatorV2V3 internal s_l1GasFeed; + Flags internal s_flags; + + /// Events + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); + event RoundUpdated(int256 status, uint64 updatedAt); + event Initialized(); + + /// Setup + function setUp() public { + vm.startPrank(s_deployerAddr, s_deployerAddr); + + s_accessController = new SimpleWriteAccessController(); + s_flags = new Flags(address(s_accessController), address(s_accessController)); + s_arbitrumSequencerUptimeFeed = new ArbitrumSequencerUptimeFeed(address(s_flags), s_l1OwnerAddr); + + s_accessController.addAccess(address(s_arbitrumSequencerUptimeFeed)); + s_accessController.addAccess(address(s_flags)); + s_accessController.addAccess(s_deployerAddr); + s_flags.addAccess(address(s_arbitrumSequencerUptimeFeed)); + + vm.expectEmit(); + emit Initialized(); + s_arbitrumSequencerUptimeFeed.initialize(); + + vm.stopPrank(); + } +} + +contract ArbitrumSequencerUptimeFeed_Constants is ArbitrumSequencerUptimeFeedTest { + /// @notice it should have the correct value for FLAG_L2_SEQ_OFFLINE' + function test_InitialState() public { + assertEq(s_arbitrumSequencerUptimeFeed.FLAG_L2_SEQ_OFFLINE(), 0xa438451D6458044c3c8CD2f6f31c91ac882A6d91); + } +} + +contract ArbitrumSequencerUptimeFeed_UpdateStatus is ArbitrumSequencerUptimeFeedTest { + /// @notice it should revert if called by an address that is not the L2 Cross Domain Messenger + function test_RevertIfNotL2CrossDomainMessengerAddr() public { + // Sets msg.sender and tx.origin to an unauthorized address + vm.startPrank(s_strangerAddr, s_strangerAddr); + + // Tries to update the status from an unauthorized account + vm.expectRevert(ArbitrumSequencerUptimeFeed.InvalidSender.selector); + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(1)); + } + + /// @notice it should update status when status has changed and incoming timestamp is newer than the latest + function test_UpdateStatusWhenStatusChangeAndTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp(); + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_arbitrumSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp + 200; + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_arbitrumSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_arbitrumSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should update status when status has changed and incoming timestamp is the same as latest + function test_UpdateStatusWhenStatusChangeAndNoTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Fetches the latest timestamp + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp(); + + // Submits a status update + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_arbitrumSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, same timestamp should update + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_arbitrumSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_arbitrumSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should ignore out-of-order updates + function test_IgnoreOutOfOrderUpdates() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 10000; + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_arbitrumSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Update with different status, but stale timestamp, should be ignored + timestamp = timestamp - 1000; + vm.expectEmit(false, false, false, false); + emit UpdateIgnored(true, 0, true, 0); // arguments are dummy values + // TODO: how can we check that an AnswerUpdated event was NOT emitted + s_arbitrumSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + } +} + +contract ArbitrumSequencerUptimeFeed_AggregatorV3Interface is ArbitrumSequencerUptimeFeedTest { + /// @notice it should return valid answer from getRoundData and latestRoundData + function test_AggregatorV3Interface() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables + uint80 roundId; + int256 answer; + uint256 startedAt; + uint256 updatedAt; + uint80 answeredInRound; + + // Checks initial state + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_arbitrumSequencerUptimeFeed.latestRoundData(); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Submits status update with different status and newer timestamp, should update + uint256 timestamp = startedAt + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_arbitrumSequencerUptimeFeed.getRoundData(2); + assertEq(roundId, 2); + assertEq(answer, 1); + assertEq(answeredInRound, roundId); + assertEq(startedAt, timestamp); + assertLe(updatedAt, startedAt); + + // Saves round 2 data + uint80 roundId2 = roundId; + int256 answer2 = answer; + uint256 startedAt2 = startedAt; + uint256 updatedAt2 = updatedAt; + uint80 answeredInRound2 = answeredInRound; + + // Checks that last round is still returning the correct data + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_arbitrumSequencerUptimeFeed.getRoundData(1); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Assert latestRoundData corresponds to latest round id + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_arbitrumSequencerUptimeFeed.latestRoundData(); + assertEq(roundId2, roundId); + assertEq(answer2, answer); + assertEq(startedAt2, startedAt); + assertEq(updatedAt2, updatedAt); + assertEq(answeredInRound2, answeredInRound); + } + + /// @notice it should revert from #getRoundData when round does not yet exist (future roundId) + function test_Return0WhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + ( + uint80 roundId, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ) = s_arbitrumSequencerUptimeFeed.getRoundData(2); + + // Validates round data + assertEq(roundId, 2); + assertEq(answer, 0); + assertEq(startedAt, 0); + assertEq(updatedAt, 0); + assertEq(answeredInRound, 2); + } +} + +contract ArbitrumSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions is ArbitrumSequencerUptimeFeedTest { + /// @notice it should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted + function test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_arbitrumSequencerUptimeFeed)); + + // Sanity - consumer is not whitelisted + assertEq(s_arbitrumSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_arbitrumSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), false); + + // Asserts reads are not possible from consuming contract + vm.expectRevert("No access"); + feedConsumer.latestAnswer(); + vm.expectRevert("No access"); + feedConsumer.latestRoundData(); + } + + /// @notice it should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted + function test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_arbitrumSequencerUptimeFeed)); + + // Whitelist consumer + vm.startPrank(s_deployerAddr, s_deployerAddr); + s_arbitrumSequencerUptimeFeed.addAccess(address(feedConsumer)); + + // Sanity - consumer is whitelisted + assertEq(s_arbitrumSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_arbitrumSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), true); + + // Asserts reads are possible from consuming contract + (uint80 roundId, int256 answer, , , ) = feedConsumer.latestRoundData(); + assertEq(feedConsumer.latestAnswer(), 0); + assertEq(roundId, 1); + assertEq(answer, 0); + } +} + +contract ArbitrumSequencerUptimeFeed_GasCosts is ArbitrumSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for updates + function test_GasCosts() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Assert initial conditions + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp(); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 0); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed; + uint256 gasStart; + uint256 gasFinal; + + // measures gas used for no update + expectedGasUsed = 5507; // NOTE: used to be 28300 in hardhat tests + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.updateStatus(false, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 0); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + + // measures gas used for update + expectedGasUsed = 68198; // NOTE: used to be 93015 in hardhat tests + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_arbitrumSequencerUptimeFeed.latestAnswer(), 1); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} + +contract ArbitrumSequencerUptimeFeed_AggregatorInterfaceGasCosts is ArbitrumSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for getRoundData(uint80) + function test_GasUsageForGetRoundData() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 4658; // NOTE: used to be 31157 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.getRoundData(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRoundData() + function test_GasUsageForLatestRoundData() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 2154; // NOTE: used to be 28523 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.latestRoundData(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestAnswer() + function test_GasUsageForLatestAnswer() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1722; // NOTE: used to be 28329 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.latestAnswer(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestTimestamp() + function test_GasUsageForLatestTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1652; // NOTE: used to be 28229 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.latestTimestamp(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRound() + function test_GasUsageForLatestRound() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1632; // NOTE: used to be 28245 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.latestRound(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getAnswer() + function test_GasUsageForGetAnswer() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 4059; // NOTE: used to be 30799 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.getAnswer(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getTimestamp() + function test_GasUsageForGetTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l2MessengerAddr, s_l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 4024; // NOTE: used to be 30753 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_arbitrumSequencerUptimeFeed.latestTimestamp() + 1000; + s_arbitrumSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_arbitrumSequencerUptimeFeed.getTimestamp(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumValidator.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumValidator.t.sol new file mode 100644 index 00000000..50463554 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/arbitrum/ArbitrumValidator.t.sol @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {AccessControllerInterface} from "../../../../shared/interfaces/AccessControllerInterface.sol"; + +import {SimpleWriteAccessController} from "../../../../shared/access/SimpleWriteAccessController.sol"; +import {ArbitrumSequencerUptimeFeed} from "../../../dev/arbitrum/ArbitrumSequencerUptimeFeed.sol"; +import {ArbitrumValidator} from "../../../dev/arbitrum/ArbitrumValidator.sol"; +import {MockArbitrumInbox} from "../../../../tests/MockArbitrumInbox.sol"; +import {MockAggregatorV2V3} from "../../mocks/MockAggregatorV2V3.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ArbitrumValidatorTest is L2EPTest { + /// Helper constants + address internal constant L2_SEQ_STATUS_RECORDER_ADDRESS = 0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b; + uint256 internal constant GAS_PRICE_BID = 1000000; + uint256 internal constant BASE_FEE = 14000000000; + uint256 internal constant MAX_GAS = 1000000; + + /// L2EP contracts + AccessControllerInterface internal s_accessController; + MockArbitrumInbox internal s_mockArbitrumInbox; + ArbitrumValidator internal s_arbitrumValidator; + MockAggregatorV2V3 internal s_l1GasFeed; + + /// Events + event RetryableTicketNoRefundAliasRewriteCreated( + address destAddr, + uint256 arbTxCallValue, + uint256 maxSubmissionCost, + address submissionRefundAddress, + address valueRefundAddress, + uint256 maxGas, + uint256 gasPriceBid, + bytes data + ); + + /// Setup + function setUp() public { + s_accessController = new SimpleWriteAccessController(); + s_mockArbitrumInbox = new MockArbitrumInbox(); + s_l1GasFeed = new MockAggregatorV2V3(); + s_arbitrumValidator = new ArbitrumValidator( + address(s_mockArbitrumInbox), + L2_SEQ_STATUS_RECORDER_ADDRESS, + address(s_accessController), + MAX_GAS, + GAS_PRICE_BID, + BASE_FEE, + address(s_l1GasFeed), + ArbitrumValidator.PaymentStrategy.L1 + ); + } +} + +contract ArbitrumValidator_Validate is ArbitrumValidatorTest { + /// @notice it post sequencer offline + function test_PostSequencerOffline() public { + // Gives access to the s_eoaValidator + s_arbitrumValidator.addAccess(s_eoaValidator); + + // Gets the ArbitrumValidator L2 address + address arbitrumValidatorL2Addr = toArbitrumL2AliasAddress(address(s_arbitrumValidator)); + + // Sets block.timestamp to a later date, funds the ArbitrumValidator contract, and sets msg.sender and tx.origin + uint256 futureTimestampInSeconds = block.timestamp + 5000; + vm.warp(futureTimestampInSeconds); + vm.deal(address(s_arbitrumValidator), 1 ether); + vm.startPrank(s_eoaValidator); + + // Sets up the expected event data + vm.expectEmit(); + emit RetryableTicketNoRefundAliasRewriteCreated( + L2_SEQ_STATUS_RECORDER_ADDRESS, // destAddr + 0, // arbTxCallValue + 25312000000000, // maxSubmissionCost + arbitrumValidatorL2Addr, // submissionRefundAddress + arbitrumValidatorL2Addr, // valueRefundAddress + MAX_GAS, // maxGas + GAS_PRICE_BID, // gasPriceBid + abi.encodeWithSelector(ArbitrumSequencerUptimeFeed.updateStatus.selector, true, futureTimestampInSeconds) // data + ); + + // Runs the function (which produces the event to test) + s_arbitrumValidator.validate(0, 0, 1, 1); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainForwarder.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainForwarder.t.sol new file mode 100644 index 00000000..d5c482dc --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainForwarder.t.sol @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {OptimismCrossDomainForwarder} from "../../../dev/optimism/OptimismCrossDomainForwarder.sol"; +import {MockOVMCrossDomainMessenger} from "../../mocks/optimism/MockOVMCrossDomainMessenger.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract OptimismCrossDomainForwarderTest is L2EPTest { + /// Contracts + MockOVMCrossDomainMessenger internal s_mockOptimismCrossDomainMessenger; + OptimismCrossDomainForwarder internal s_optimismCrossDomainForwarder; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_mockOptimismCrossDomainMessenger = new MockOVMCrossDomainMessenger(s_l1OwnerAddr); + s_optimismCrossDomainForwarder = new OptimismCrossDomainForwarder( + s_mockOptimismCrossDomainMessenger, + s_l1OwnerAddr + ); + s_greeter = new Greeter(address(s_optimismCrossDomainForwarder)); + vm.stopPrank(); + } +} + +contract OptimismCrossDomainForwarder_Constructor is OptimismCrossDomainForwarderTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_optimismCrossDomainForwarder.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_optimismCrossDomainForwarder.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_optimismCrossDomainForwarder.crossDomainMessenger(), address(s_mockOptimismCrossDomainMessenger)); + + // it should set the typeAndVersion correctly + assertEq(s_optimismCrossDomainForwarder.typeAndVersion(), "OptimismCrossDomainForwarder 1.0.0"); + } +} + +contract OptimismCrossDomainForwarder_Forward is OptimismCrossDomainForwarderTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_optimismCrossDomainForwarder.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + encodeCrossDomainSetGreetingMsg(s_optimismCrossDomainForwarder.forward.selector, address(s_greeter), greeting), // message + 0 // gas limit + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + encodeCrossDomainSetGreetingMsg(s_optimismCrossDomainForwarder.forward.selector, address(s_greeter), ""), // message + 0 // gas limit + ); + } +} + +contract OptimismCrossDomainForwarder_TransferL1Ownership is OptimismCrossDomainForwarderTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_optimismCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_optimismCrossDomainForwarder.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_optimismCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_optimismCrossDomainForwarder.l1Owner(), s_strangerAddr); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + abi.encodeWithSelector(s_optimismCrossDomainForwarder.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_optimismCrossDomainForwarder.l1Owner(), address(0)); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + abi.encodeWithSelector(s_optimismCrossDomainForwarder.transferL1Ownership.selector, address(0)), // message + 0 // gas limit + ); + } +} + +contract OptimismCrossDomainForwarder_AcceptL1Ownership is OptimismCrossDomainForwarderTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + abi.encodeWithSelector(s_optimismCrossDomainForwarder.acceptL1Ownership.selector), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Request ownership transfer + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + abi.encodeWithSelector(s_optimismCrossDomainForwarder.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Sets a mock message sender + s_mockOptimismCrossDomainMessenger._setMockMessageSender(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainForwarder), // target + abi.encodeWithSelector(s_optimismCrossDomainForwarder.acceptL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Asserts that the ownership was actually transferred + assertEq(s_optimismCrossDomainForwarder.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainGovernor.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainGovernor.t.sol new file mode 100644 index 00000000..e1a5aef9 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismCrossDomainGovernor.t.sol @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {OptimismCrossDomainGovernor} from "../../../dev/optimism/OptimismCrossDomainGovernor.sol"; +import {MockOVMCrossDomainMessenger} from "../../mocks/optimism/MockOVMCrossDomainMessenger.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +import {MultiSend} from "../../../../vendor/MultiSend.sol"; + +contract OptimismCrossDomainGovernorTest is L2EPTest { + /// Contracts + MockOVMCrossDomainMessenger internal s_mockOptimismCrossDomainMessenger; + OptimismCrossDomainGovernor internal s_optimismCrossDomainGovernor; + MultiSend internal s_multiSend; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_mockOptimismCrossDomainMessenger = new MockOVMCrossDomainMessenger(s_l1OwnerAddr); + s_optimismCrossDomainGovernor = new OptimismCrossDomainGovernor(s_mockOptimismCrossDomainMessenger, s_l1OwnerAddr); + s_greeter = new Greeter(address(s_optimismCrossDomainGovernor)); + s_multiSend = new MultiSend(); + vm.stopPrank(); + } +} + +contract OptimismCrossDomainGovernor_Constructor is OptimismCrossDomainGovernorTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_optimismCrossDomainGovernor.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_optimismCrossDomainGovernor.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_optimismCrossDomainGovernor.crossDomainMessenger(), address(s_mockOptimismCrossDomainMessenger)); + + // it should set the typeAndVersion correctly + assertEq(s_optimismCrossDomainGovernor.typeAndVersion(), "OptimismCrossDomainGovernor 1.0.0"); + } +} + +contract OptimismCrossDomainGovernor_Forward is OptimismCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_optimismCrossDomainGovernor.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + encodeCrossDomainSetGreetingMsg(s_optimismCrossDomainGovernor.forward.selector, address(s_greeter), greeting), // message + 0 // gas limit + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + encodeCrossDomainSetGreetingMsg(s_optimismCrossDomainGovernor.forward.selector, address(s_greeter), ""), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_optimismCrossDomainGovernor.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, greeting) + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), greeting); + } +} + +contract OptimismCrossDomainGovernor_ForwardDelegate is OptimismCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_optimismCrossDomainGovernor.forwardDelegate(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_CallableByCrossDomainMessengerAddressOrL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + encodeCrossDomainMultiSendMsg( + s_optimismCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + encodeCrossDomainMultiSendMsg( + s_optimismCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should revert batch when one call fails + function test_RevertsBatchWhenOneCallFails() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Governor delegatecall reverted"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + encodeCrossDomainMultiSendMsg( + s_optimismCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message is unchanged + assertEq(s_greeter.greeting(), ""); + } + + /// @notice it should bubble up revert when contract call reverts + function test_BubbleUpRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Greeter: revert triggered"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector( + OptimismCrossDomainGovernor.forwardDelegate.selector, + address(s_greeter), + abi.encodeWithSelector(Greeter.triggerRevert.selector) + ), // message + 0 // gas limit + ); + } +} + +contract OptimismCrossDomainGovernor_TransferL1Ownership is OptimismCrossDomainGovernorTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_optimismCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_optimismCrossDomainGovernor.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_optimismCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_optimismCrossDomainGovernor.l1Owner(), s_strangerAddr); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector(s_optimismCrossDomainGovernor.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_optimismCrossDomainGovernor.l1Owner(), address(0)); + + // Sends the message + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector(s_optimismCrossDomainGovernor.transferL1Ownership.selector, address(0)), // message + 0 // gas limit + ); + } +} + +contract OptimismCrossDomainGovernor_AcceptL1Ownership is OptimismCrossDomainGovernorTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector(s_optimismCrossDomainGovernor.acceptL1Ownership.selector), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Request ownership transfer + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector(s_optimismCrossDomainGovernor.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Sets a mock message sender + s_mockOptimismCrossDomainMessenger._setMockMessageSender(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + s_mockOptimismCrossDomainMessenger.sendMessage( + address(s_optimismCrossDomainGovernor), // target + abi.encodeWithSelector(s_optimismCrossDomainGovernor.acceptL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Asserts that the ownership was actually transferred + assertEq(s_optimismCrossDomainGovernor.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismSequencerUptimeFeed.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismSequencerUptimeFeed.t.sol new file mode 100644 index 00000000..60598b9f --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismSequencerUptimeFeed.t.sol @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockOptimismL1CrossDomainMessenger} from "../../../../tests/MockOptimismL1CrossDomainMessenger.sol"; +import {MockOptimismL2CrossDomainMessenger} from "../../../../tests/MockOptimismL2CrossDomainMessenger.sol"; +import {OptimismSequencerUptimeFeed} from "../../../dev/optimism/OptimismSequencerUptimeFeed.sol"; +import {FeedConsumer} from "../../../../tests/FeedConsumer.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract OptimismSequencerUptimeFeedTest is L2EPTest { + /// Constants + uint256 internal constant GAS_USED_DEVIATION = 100; + + /// L2EP contracts + MockOptimismL1CrossDomainMessenger internal s_mockOptimismL1CrossDomainMessenger; + MockOptimismL2CrossDomainMessenger internal s_mockOptimismL2CrossDomainMessenger; + OptimismSequencerUptimeFeed internal s_optimismSequencerUptimeFeed; + + /// Events + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); + event RoundUpdated(int256 status, uint64 updatedAt); + + /// Setup + function setUp() public { + // Deploys contracts + s_mockOptimismL1CrossDomainMessenger = new MockOptimismL1CrossDomainMessenger(); + s_mockOptimismL2CrossDomainMessenger = new MockOptimismL2CrossDomainMessenger(); + s_optimismSequencerUptimeFeed = new OptimismSequencerUptimeFeed( + s_l1OwnerAddr, + address(s_mockOptimismL2CrossDomainMessenger), + false + ); + + // Sets mock sender in mock L2 messenger contract + s_mockOptimismL2CrossDomainMessenger.setSender(s_l1OwnerAddr); + } +} + +contract OptimismSequencerUptimeFeed_Constructor is OptimismSequencerUptimeFeedTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Checks L1 sender + address actualL1Addr = s_optimismSequencerUptimeFeed.l1Sender(); + assertEq(actualL1Addr, s_l1OwnerAddr); + + // Checks latest round data + (uint80 roundId, int256 answer, , , ) = s_optimismSequencerUptimeFeed.latestRoundData(); + assertEq(roundId, 1); + assertEq(answer, 0); + } +} + +contract OptimismSequencerUptimeFeed_UpdateStatus is OptimismSequencerUptimeFeedTest { + /// @notice it should revert if called by an address that is not the L2 Cross Domain Messenger + function test_RevertIfNotL2CrossDomainMessengerAddr() public { + // Sets msg.sender and tx.origin to an unauthorized address + vm.startPrank(s_strangerAddr, s_strangerAddr); + + // Tries to update the status from an unauthorized account + vm.expectRevert(OptimismSequencerUptimeFeed.InvalidSender.selector); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(1)); + } + + /// @notice it should revert if called by an address that is not the L2 Cross Domain Messenger and is not the L1 sender + function test_RevertIfNotL2CrossDomainMessengerAddrAndNotL1SenderAddr() public { + // Sets msg.sender and tx.origin to an unauthorized address + vm.startPrank(s_strangerAddr, s_strangerAddr); + + // Sets mock sender in mock L2 messenger contract + s_mockOptimismL2CrossDomainMessenger.setSender(s_strangerAddr); + + // Tries to update the status from an unauthorized account + vm.expectRevert(OptimismSequencerUptimeFeed.InvalidSender.selector); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(1)); + } + + /// @notice it should update status when status has not changed and incoming timestamp is the same as latest + function test_UpdateStatusWhenNoChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Fetches the latest timestamp + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp(); + + // Submits a status update + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Stores the current round data before updating it + ( + uint80 roundIdBeforeUpdate, + int256 answerBeforeUpdate, + uint256 startedAtBeforeUpdate, + , + uint80 answeredInRoundBeforeUpdate + ) = s_optimismSequencerUptimeFeed.latestRoundData(); + + // Submit another status update with the same status + vm.expectEmit(); + emit RoundUpdated(1, uint64(block.timestamp)); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp + 200)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Stores the current round data after updating it + ( + uint80 roundIdAfterUpdate, + int256 answerAfterUpdate, + uint256 startedAtAfterUpdate, + uint256 updatedAtAfterUpdate, + uint80 answeredInRoundAfterUpdate + ) = s_optimismSequencerUptimeFeed.latestRoundData(); + + // Verifies the latest round data has been properly updated + assertEq(roundIdAfterUpdate, roundIdBeforeUpdate); + assertEq(answerAfterUpdate, answerBeforeUpdate); + assertEq(startedAtAfterUpdate, startedAtBeforeUpdate); + assertEq(answeredInRoundAfterUpdate, answeredInRoundBeforeUpdate); + assertEq(updatedAtAfterUpdate, block.timestamp); + } + + /// @notice it should update status when status has changed and incoming timestamp is newer than the latest + function test_UpdateStatusWhenStatusChangeAndTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp(); + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp + 200; + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should update status when status has changed and incoming timestamp is the same as latest + function test_UpdateStatusWhenStatusChangeAndNoTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Fetches the latest timestamp + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp(); + + // Submits a status update + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, same timestamp should update + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should ignore out-of-order updates + function test_IgnoreOutOfOrderUpdates() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 10000; + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_optimismSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Update with different status, but stale timestamp, should be ignored + timestamp = timestamp - 1000; + vm.expectEmit(false, false, false, false); + emit UpdateIgnored(true, 0, true, 0); // arguments are dummy values + // TODO: how can we check that an AnswerUpdated event was NOT emitted + s_optimismSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + } +} + +contract OptimismSequencerUptimeFeed_AggregatorV3Interface is OptimismSequencerUptimeFeedTest { + /// @notice it should return valid answer from getRoundData and latestRoundData + function test_AggregatorV3Interface() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables + uint80 roundId; + int256 answer; + uint256 startedAt; + uint256 updatedAt; + uint80 answeredInRound; + + // Checks initial state + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_optimismSequencerUptimeFeed.latestRoundData(); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Submits status update with different status and newer timestamp, should update + uint256 timestamp = startedAt + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_optimismSequencerUptimeFeed.getRoundData(2); + assertEq(roundId, 2); + assertEq(answer, 1); + assertEq(answeredInRound, roundId); + assertEq(startedAt, timestamp); + assertLe(updatedAt, startedAt); + + // Saves round 2 data + uint80 roundId2 = roundId; + int256 answer2 = answer; + uint256 startedAt2 = startedAt; + uint256 updatedAt2 = updatedAt; + uint80 answeredInRound2 = answeredInRound; + + // Checks that last round is still returning the correct data + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_optimismSequencerUptimeFeed.getRoundData(1); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Assert latestRoundData corresponds to latest round id + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_optimismSequencerUptimeFeed.latestRoundData(); + assertEq(roundId2, roundId); + assertEq(answer2, answer); + assertEq(startedAt2, startedAt); + assertEq(updatedAt2, updatedAt); + assertEq(answeredInRound2, answeredInRound); + } + + /// @notice it should revert from #getRoundData when round does not yet exist (future roundId) + function test_RevertGetRoundDataWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(OptimismSequencerUptimeFeed.NoDataPresent.selector); + s_optimismSequencerUptimeFeed.getRoundData(2); + } + + /// @notice it should revert from #getAnswer when round does not yet exist (future roundId) + function test_RevertGetAnswerWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(OptimismSequencerUptimeFeed.NoDataPresent.selector); + s_optimismSequencerUptimeFeed.getAnswer(2); + } + + /// @notice it should revert from #getTimestamp when round does not yet exist (future roundId) + function test_RevertGetTimestampWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(OptimismSequencerUptimeFeed.NoDataPresent.selector); + s_optimismSequencerUptimeFeed.getTimestamp(2); + } +} + +contract OptimismSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions is OptimismSequencerUptimeFeedTest { + /// @notice it should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted + function test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_optimismSequencerUptimeFeed)); + + // Sanity - consumer is not whitelisted + assertEq(s_optimismSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_optimismSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), false); + + // Asserts reads are not possible from consuming contract + vm.expectRevert("No access"); + feedConsumer.latestAnswer(); + vm.expectRevert("No access"); + feedConsumer.latestRoundData(); + } + + /// @notice it should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted + function test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_optimismSequencerUptimeFeed)); + + // Whitelist consumer + s_optimismSequencerUptimeFeed.addAccess(address(feedConsumer)); + + // Sanity - consumer is whitelisted + assertEq(s_optimismSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_optimismSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), true); + + // Asserts reads are possible from consuming contract + (uint80 roundId, int256 answer, , , ) = feedConsumer.latestRoundData(); + assertEq(feedConsumer.latestAnswer(), 0); + assertEq(roundId, 1); + assertEq(answer, 0); + } +} + +contract OptimismSequencerUptimeFeed_GasCosts is OptimismSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for updates + function test_GasCosts() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Assert initial conditions + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp(); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 0); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed; + uint256 gasStart; + uint256 gasFinal; + + // measures gas used for no update + expectedGasUsed = 10197; // NOTE: used to be 38594 in hardhat tests + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.updateStatus(false, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 0); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + + // measures gas used for update + expectedGasUsed = 33348; // NOTE: used to be 60170 in hardhat tests + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_optimismSequencerUptimeFeed.latestAnswer(), 1); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} + +contract OptimismSequencerUptimeFeed_AggregatorInterfaceGasCosts is OptimismSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for getRoundData(uint80) + function test_GasUsageForGetRoundData() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 4504; // NOTE: used to be 30952 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.getRoundData(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRoundData() + function test_GasUsageForLatestRoundData() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 2154; // NOTE: used to be 28523 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.latestRoundData(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestAnswer() + function test_GasUsageForLatestAnswer() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1722; // NOTE: used to be 28329 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.latestAnswer(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestTimestamp() + function test_GasUsageForLatestTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1598; // NOTE: used to be 28229 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.latestTimestamp(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRound() + function test_GasUsageForLatestRound() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1632; // NOTE: used to be 28245 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.latestRound(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getAnswer() + function test_GasUsageForGetAnswer() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 3929; // NOTE: used to be 30682 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.getAnswer(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getTimestamp() + function test_GasUsageForGetTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockOptimismL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 3817; // NOTE: used to be 30570 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_optimismSequencerUptimeFeed.latestTimestamp() + 1000; + s_optimismSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_optimismSequencerUptimeFeed.getTimestamp(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismValidator.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismValidator.t.sol new file mode 100644 index 00000000..93643968 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/optimism/OptimismValidator.t.sol @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockOptimismL1CrossDomainMessenger} from "../../../../tests/MockOptimismL1CrossDomainMessenger.sol"; +import {MockOptimismL2CrossDomainMessenger} from "../../../../tests/MockOptimismL2CrossDomainMessenger.sol"; +import {OptimismSequencerUptimeFeed} from "../../../dev/optimism/OptimismSequencerUptimeFeed.sol"; +import {OptimismValidator} from "../../../dev/optimism/OptimismValidator.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract OptimismValidatorTest is L2EPTest { + /// Helper constants + address internal constant L2_SEQ_STATUS_RECORDER_ADDRESS = 0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b; + uint32 internal constant INIT_GAS_LIMIT = 1900000; + + /// L2EP contracts + MockOptimismL1CrossDomainMessenger internal s_mockOptimismL1CrossDomainMessenger; + MockOptimismL2CrossDomainMessenger internal s_mockOptimismL2CrossDomainMessenger; + OptimismSequencerUptimeFeed internal s_optimismSequencerUptimeFeed; + OptimismValidator internal s_optimismValidator; + + /// Events + event SentMessage(address indexed target, address sender, bytes message, uint256 messageNonce, uint256 gasLimit); + + /// Setup + function setUp() public { + s_mockOptimismL1CrossDomainMessenger = new MockOptimismL1CrossDomainMessenger(); + s_mockOptimismL2CrossDomainMessenger = new MockOptimismL2CrossDomainMessenger(); + + s_optimismSequencerUptimeFeed = new OptimismSequencerUptimeFeed( + address(s_mockOptimismL1CrossDomainMessenger), + address(s_mockOptimismL2CrossDomainMessenger), + true + ); + + s_optimismValidator = new OptimismValidator( + address(s_mockOptimismL1CrossDomainMessenger), + address(s_optimismSequencerUptimeFeed), + INIT_GAS_LIMIT + ); + } +} + +contract OptimismValidator_SetGasLimit is OptimismValidatorTest { + /// @notice it correctly updates the gas limit + function test_CorrectlyUpdatesTheGasLimit() public { + uint32 newGasLimit = 2000000; + assertEq(s_optimismValidator.getGasLimit(), INIT_GAS_LIMIT); + s_optimismValidator.setGasLimit(newGasLimit); + assertEq(s_optimismValidator.getGasLimit(), newGasLimit); + } +} + +contract OptimismValidator_Validate is OptimismValidatorTest { + /// @notice it reverts if called by account with no access + function test_RevertsIfCalledByAnAccountWithNoAccess() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("No access"); + s_optimismValidator.validate(0, 0, 1, 1); + } + + /// @notice it posts sequencer status when there is not status change + function test_PostSequencerStatusWhenThereIsNotStatusChange() public { + // Gives access to the s_eoaValidator + s_optimismValidator.addAccess(s_eoaValidator); + + // Sets block.timestamp to a later date + uint256 futureTimestampInSeconds = block.timestamp + 5000; + vm.startPrank(s_eoaValidator); + vm.warp(futureTimestampInSeconds); + + // Sets up the expected event data + vm.expectEmit(false, false, false, true); + emit SentMessage( + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + address(s_optimismValidator), // sender + abi.encodeWithSelector(OptimismSequencerUptimeFeed.updateStatus.selector, false, futureTimestampInSeconds), // message + 0, // nonce + INIT_GAS_LIMIT // gas limit + ); + + // Runs the function (which produces the event to test) + s_optimismValidator.validate(0, 0, 0, 0); + } + + /// @notice it post sequencer offline + function test_PostSequencerOffline() public { + // Gives access to the s_eoaValidator + s_optimismValidator.addAccess(s_eoaValidator); + + // Sets block.timestamp to a later date + uint256 futureTimestampInSeconds = block.timestamp + 10000; + vm.startPrank(s_eoaValidator); + vm.warp(futureTimestampInSeconds); + + // Sets up the expected event data + vm.expectEmit(false, false, false, true); + emit SentMessage( + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + address(s_optimismValidator), // sender + abi.encodeWithSelector(OptimismSequencerUptimeFeed.updateStatus.selector, true, futureTimestampInSeconds), // message + 0, // nonce + INIT_GAS_LIMIT // gas limit + ); + + // Runs the function (which produces the event to test) + s_optimismValidator.validate(0, 0, 1, 1); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainForwarder.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainForwarder.t.sol new file mode 100644 index 00000000..f921fa92 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainForwarder.t.sol @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockScrollCrossDomainMessenger} from "../../mocks/scroll/MockScrollCrossDomainMessenger.sol"; +import {ScrollCrossDomainForwarder} from "../../../dev/scroll/ScrollCrossDomainForwarder.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ScrollCrossDomainForwarderTest is L2EPTest { + /// Contracts + MockScrollCrossDomainMessenger internal s_mockScrollCrossDomainMessenger; + ScrollCrossDomainForwarder internal s_scrollCrossDomainForwarder; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_mockScrollCrossDomainMessenger = new MockScrollCrossDomainMessenger(s_l1OwnerAddr); + s_scrollCrossDomainForwarder = new ScrollCrossDomainForwarder(s_mockScrollCrossDomainMessenger, s_l1OwnerAddr); + s_greeter = new Greeter(address(s_scrollCrossDomainForwarder)); + vm.stopPrank(); + } +} + +contract ScrollCrossDomainForwarder_Constructor is ScrollCrossDomainForwarderTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_scrollCrossDomainForwarder.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_scrollCrossDomainForwarder.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_scrollCrossDomainForwarder.crossDomainMessenger(), address(s_mockScrollCrossDomainMessenger)); + + // it should set the typeAndVersion correctly + assertEq(s_scrollCrossDomainForwarder.typeAndVersion(), "ScrollCrossDomainForwarder 1.0.0"); + } +} + +contract ScrollCrossDomainForwarder_Forward is ScrollCrossDomainForwarderTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_scrollCrossDomainForwarder.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + encodeCrossDomainSetGreetingMsg(s_scrollCrossDomainForwarder.forward.selector, address(s_greeter), greeting), // message + 0 // gas limit + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + encodeCrossDomainSetGreetingMsg(s_scrollCrossDomainForwarder.forward.selector, address(s_greeter), ""), // message + 0 // gas limit + ); + } +} + +contract ScrollCrossDomainForwarder_TransferL1Ownership is ScrollCrossDomainForwarderTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_scrollCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_scrollCrossDomainForwarder.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_scrollCrossDomainForwarder.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_scrollCrossDomainForwarder.l1Owner(), s_strangerAddr); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainForwarder.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_scrollCrossDomainForwarder.l1Owner(), address(0)); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainForwarder.transferL1Ownership.selector, address(0)), // message + 0 // gas limit + ); + } +} + +contract ScrollCrossDomainForwarder_AcceptL1Ownership is ScrollCrossDomainForwarderTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainForwarder.acceptL1Ownership.selector), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Request ownership transfer + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainForwarder.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Sets a mock message sender + s_mockScrollCrossDomainMessenger._setMockMessageSender(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainForwarder), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainForwarder.acceptL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Asserts that the ownership was actually transferred + assertEq(s_scrollCrossDomainForwarder.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainGovernor.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainGovernor.t.sol new file mode 100644 index 00000000..9c444604 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollCrossDomainGovernor.t.sol @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockScrollCrossDomainMessenger} from "../../mocks/scroll/MockScrollCrossDomainMessenger.sol"; +import {ScrollCrossDomainGovernor} from "../../../dev/scroll/ScrollCrossDomainGovernor.sol"; +import {Greeter} from "../../../../tests/Greeter.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +import {MultiSend} from "../../../../vendor/MultiSend.sol"; + +contract ScrollCrossDomainGovernorTest is L2EPTest { + /// Contracts + MockScrollCrossDomainMessenger internal s_mockScrollCrossDomainMessenger; + ScrollCrossDomainGovernor internal s_scrollCrossDomainGovernor; + MultiSend internal s_multiSend; + Greeter internal s_greeter; + + /// Events + event L1OwnershipTransferRequested(address indexed from, address indexed to); + event L1OwnershipTransferred(address indexed from, address indexed to); + + /// Setup + function setUp() public { + // Deploys contracts + vm.startPrank(s_l1OwnerAddr); + s_mockScrollCrossDomainMessenger = new MockScrollCrossDomainMessenger(s_l1OwnerAddr); + s_scrollCrossDomainGovernor = new ScrollCrossDomainGovernor(s_mockScrollCrossDomainMessenger, s_l1OwnerAddr); + s_greeter = new Greeter(address(s_scrollCrossDomainGovernor)); + s_multiSend = new MultiSend(); + vm.stopPrank(); + } +} + +contract ScrollCrossDomainGovernor_Constructor is ScrollCrossDomainGovernorTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // it should set the owner correctly + assertEq(s_scrollCrossDomainGovernor.owner(), s_l1OwnerAddr); + + // it should set the l1Owner correctly + assertEq(s_scrollCrossDomainGovernor.l1Owner(), s_l1OwnerAddr); + + // it should set the crossdomain messenger correctly + assertEq(s_scrollCrossDomainGovernor.crossDomainMessenger(), address(s_mockScrollCrossDomainMessenger)); + + // it should set the typeAndVersion correctly + assertEq(s_scrollCrossDomainGovernor.typeAndVersion(), "ScrollCrossDomainGovernor 1.0.0"); + } +} + +contract ScrollCrossDomainGovernor_Forward is ScrollCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_scrollCrossDomainGovernor.forward(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_Forward() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + encodeCrossDomainSetGreetingMsg(s_scrollCrossDomainGovernor.forward.selector, address(s_greeter), greeting), // message + 0 // gas limit + ); + + // Checks that the greeter got the message + assertEq(s_greeter.greeting(), greeting); + } + + /// @notice it should revert when contract call reverts + function test_ForwardRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message + vm.expectRevert("Invalid greeting length"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + encodeCrossDomainSetGreetingMsg(s_scrollCrossDomainGovernor.forward.selector, address(s_greeter), ""), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Defines the cross domain message to send + string memory greeting = "hello"; + + // Sends the message + s_scrollCrossDomainGovernor.forward( + address(s_greeter), + abi.encodeWithSelector(s_greeter.setGreeting.selector, greeting) + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), greeting); + } +} + +contract ScrollCrossDomainGovernor_ForwardDelegate is ScrollCrossDomainGovernorTest { + /// @notice it should not be callable by unknown address + function test_NotCallableByUnknownAddress() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger or owner"); + s_scrollCrossDomainGovernor.forwardDelegate(address(s_greeter), abi.encode("")); + } + + /// @notice it should be callable by crossdomain messenger address / L1 owner + function test_CallableByCrossDomainMessengerAddressOrL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + encodeCrossDomainMultiSendMsg( + s_scrollCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should be callable by L2 owner + function test_CallableByL2Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_l1OwnerAddr); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + encodeCrossDomainMultiSendMsg( + s_scrollCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "bar")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message was updated + assertEq(s_greeter.greeting(), "bar"); + } + + /// @notice it should revert batch when one call fails + function test_RevertsBatchWhenOneCallFails() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Governor delegatecall reverted"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + encodeCrossDomainMultiSendMsg( + s_scrollCrossDomainGovernor.forwardDelegate.selector, + address(s_multiSend), + abi.encodePacked(encodeMultiSendTx(address(s_greeter), "foo"), encodeMultiSendTx(address(s_greeter), "")) + ), // message + 0 // gas limit + ); + + // Checks that the greeter message is unchanged + assertEq(s_greeter.greeting(), ""); + } + + /// @notice it should bubble up revert when contract call reverts + function test_BubbleUpRevert() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends an invalid message (empty transaction data is not allowed) + vm.expectRevert("Greeter: revert triggered"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector( + ScrollCrossDomainGovernor.forwardDelegate.selector, + address(s_greeter), + abi.encodeWithSelector(Greeter.triggerRevert.selector) + ), // message + 0 // gas limit + ); + } +} + +contract ScrollCrossDomainGovernor_TransferL1Ownership is ScrollCrossDomainGovernorTest { + /// @notice it should not be callable by non-owners + function test_NotCallableByNonOwners() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_scrollCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should not be callable by L2 owner + function test_NotCallableByL2Owner() public { + vm.startPrank(s_l1OwnerAddr); + assertEq(s_scrollCrossDomainGovernor.owner(), s_l1OwnerAddr); + vm.expectRevert("Sender is not the L2 messenger"); + s_scrollCrossDomainGovernor.transferL1Ownership(s_strangerAddr); + } + + /// @notice it should be callable by current L1 owner + function test_CallableByL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_scrollCrossDomainGovernor.l1Owner(), s_strangerAddr); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainGovernor.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by current L1 owner to zero address + function test_CallableByL1OwnerOrZeroAddress() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Defines the cross domain message to send + vm.expectEmit(); + emit L1OwnershipTransferRequested(s_scrollCrossDomainGovernor.l1Owner(), address(0)); + + // Sends the message + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainGovernor.transferL1Ownership.selector, address(0)), // message + 0 // gas limit + ); + } +} + +contract ScrollCrossDomainGovernor_AcceptL1Ownership is ScrollCrossDomainGovernorTest { + /// @notice it should not be callable by non pending-owners + function test_NotCallableByNonPendingOwners() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Sends the message + vm.expectRevert("Must be proposed L1 owner"); + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainGovernor.acceptL1Ownership.selector), // message + 0 // gas limit + ); + } + + /// @notice it should be callable by pending L1 owner + function test_CallableByPendingL1Owner() public { + // Sets msg.sender and tx.origin + vm.startPrank(s_strangerAddr); + + // Request ownership transfer + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainGovernor.transferL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Sets a mock message sender + s_mockScrollCrossDomainMessenger._setMockMessageSender(s_strangerAddr); + + // Prepares expected event payload + vm.expectEmit(); + emit L1OwnershipTransferred(s_l1OwnerAddr, s_strangerAddr); + + // Accepts ownership transfer request + s_mockScrollCrossDomainMessenger.sendMessage( + address(s_scrollCrossDomainGovernor), // target + 0, // value + abi.encodeWithSelector(s_scrollCrossDomainGovernor.acceptL1Ownership.selector, s_strangerAddr), // message + 0 // gas limit + ); + + // Asserts that the ownership was actually transferred + assertEq(s_scrollCrossDomainGovernor.l1Owner(), s_strangerAddr); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollSequencerUptimeFeed.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollSequencerUptimeFeed.t.sol new file mode 100644 index 00000000..520fbf6d --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollSequencerUptimeFeed.t.sol @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockScrollL1CrossDomainMessenger} from "../../mocks/scroll/MockScrollL1CrossDomainMessenger.sol"; +import {MockScrollL2CrossDomainMessenger} from "../../mocks/scroll/MockScrollL2CrossDomainMessenger.sol"; +import {ScrollSequencerUptimeFeed} from "../../../dev/scroll/ScrollSequencerUptimeFeed.sol"; +import {FeedConsumer} from "../../../../tests/FeedConsumer.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ScrollSequencerUptimeFeedTest is L2EPTest { + /// Constants + uint256 internal constant GAS_USED_DEVIATION = 100; + + /// L2EP contracts + MockScrollL1CrossDomainMessenger internal s_mockScrollL1CrossDomainMessenger; + MockScrollL2CrossDomainMessenger internal s_mockScrollL2CrossDomainMessenger; + ScrollSequencerUptimeFeed internal s_scrollSequencerUptimeFeed; + + /// Events + event UpdateIgnored(bool latestStatus, uint64 latestTimestamp, bool incomingStatus, uint64 incomingTimestamp); + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); + event RoundUpdated(int256 status, uint64 updatedAt); + + /// Setup + function setUp() public { + // Deploys contracts + s_mockScrollL1CrossDomainMessenger = new MockScrollL1CrossDomainMessenger(); + s_mockScrollL2CrossDomainMessenger = new MockScrollL2CrossDomainMessenger(); + s_scrollSequencerUptimeFeed = new ScrollSequencerUptimeFeed( + s_l1OwnerAddr, + address(s_mockScrollL2CrossDomainMessenger), + false + ); + + // Sets mock sender in mock L2 messenger contract + s_mockScrollL2CrossDomainMessenger.setSender(s_l1OwnerAddr); + } +} + +contract ScrollSequencerUptimeFeed_Constructor is ScrollSequencerUptimeFeedTest { + /// @notice it should have been deployed with the correct initial state + function test_InitialState() public { + // L2 cross domain messenger address must not be the zero address + vm.expectRevert(ScrollSequencerUptimeFeed.ZeroAddress.selector); + new ScrollSequencerUptimeFeed(s_l1OwnerAddr, address(0), false); + + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Checks L1 sender + address actualL1Addr = s_scrollSequencerUptimeFeed.l1Sender(); + assertEq(actualL1Addr, s_l1OwnerAddr); + + // Checks latest round data + (uint80 roundId, int256 answer, , , ) = s_scrollSequencerUptimeFeed.latestRoundData(); + assertEq(roundId, 1); + assertEq(answer, 0); + } +} + +contract ScrollSequencerUptimeFeed_UpdateStatus is ScrollSequencerUptimeFeedTest { + /// @notice it should revert if called by an address that is not the L2 Cross Domain Messenger + function test_RevertIfNotL2CrossDomainMessengerAddr() public { + // Sets msg.sender and tx.origin to an unauthorized address + vm.startPrank(s_strangerAddr, s_strangerAddr); + + // Tries to update the status from an unauthorized account + vm.expectRevert(ScrollSequencerUptimeFeed.InvalidSender.selector); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(1)); + } + + /// @notice it should revert if called by an address that is not the L2 Cross Domain Messenger and is not the L1 sender + function test_RevertIfNotL2CrossDomainMessengerAddrAndNotL1SenderAddr() public { + // Sets msg.sender and tx.origin to an unauthorized address + vm.startPrank(s_strangerAddr, s_strangerAddr); + + // Sets mock sender in mock L2 messenger contract + s_mockScrollL2CrossDomainMessenger.setSender(s_strangerAddr); + + // Tries to update the status from an unauthorized account + vm.expectRevert(ScrollSequencerUptimeFeed.InvalidSender.selector); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(1)); + } + + /// @notice it should update status when status has not changed and incoming timestamp is the same as latest + function test_UpdateStatusWhenNoChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Fetches the latest timestamp + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp(); + + // Submits a status update + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Stores the current round data before updating it + ( + uint80 roundIdBeforeUpdate, + int256 answerBeforeUpdate, + uint256 startedAtBeforeUpdate, + , + uint80 answeredInRoundBeforeUpdate + ) = s_scrollSequencerUptimeFeed.latestRoundData(); + + // Submit another status update with the same status + vm.expectEmit(); + emit RoundUpdated(1, uint64(block.timestamp)); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp + 200)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Stores the current round data after updating it + ( + uint80 roundIdAfterUpdate, + int256 answerAfterUpdate, + uint256 startedAtAfterUpdate, + uint256 updatedAtAfterUpdate, + uint80 answeredInRoundAfterUpdate + ) = s_scrollSequencerUptimeFeed.latestRoundData(); + + // Verifies the latest round data has been properly updated + assertEq(roundIdAfterUpdate, roundIdBeforeUpdate); + assertEq(answerAfterUpdate, answerBeforeUpdate); + assertEq(startedAtAfterUpdate, startedAtBeforeUpdate); + assertEq(answeredInRoundAfterUpdate, answeredInRoundBeforeUpdate); + assertEq(updatedAtAfterUpdate, block.timestamp); + } + + /// @notice it should update status when status has changed and incoming timestamp is newer than the latest + function test_UpdateStatusWhenStatusChangeAndTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp(); + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp + 200; + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should update status when status has changed and incoming timestamp is the same as latest + function test_UpdateStatusWhenStatusChangeAndNoTimeChange() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Fetches the latest timestamp + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp(); + + // Submits a status update + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Submit another status update, different status, same timestamp should update + vm.expectEmit(); + emit AnswerUpdated(0, 3, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 0); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + } + + /// @notice it should ignore out-of-order updates + function test_IgnoreOutOfOrderUpdates() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Submits a status update + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 10000; + vm.expectEmit(); + emit AnswerUpdated(1, 2, timestamp); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertEq(s_scrollSequencerUptimeFeed.latestTimestamp(), uint64(timestamp)); + + // Update with different status, but stale timestamp, should be ignored + timestamp = timestamp - 1000; + vm.expectEmit(false, false, false, false); + emit UpdateIgnored(true, 0, true, 0); // arguments are dummy values + // TODO: how can we check that an AnswerUpdated event was NOT emitted + s_scrollSequencerUptimeFeed.updateStatus(false, uint64(timestamp)); + } +} + +contract ScrollSequencerUptimeFeed_AggregatorV3Interface is ScrollSequencerUptimeFeedTest { + /// @notice it should return valid answer from getRoundData and latestRoundData + function test_AggregatorV3Interface() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables + uint80 roundId; + int256 answer; + uint256 startedAt; + uint256 updatedAt; + uint80 answeredInRound; + + // Checks initial state + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_scrollSequencerUptimeFeed.latestRoundData(); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Submits status update with different status and newer timestamp, should update + uint256 timestamp = startedAt + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_scrollSequencerUptimeFeed.getRoundData(2); + assertEq(roundId, 2); + assertEq(answer, 1); + assertEq(answeredInRound, roundId); + assertEq(startedAt, timestamp); + assertLe(updatedAt, startedAt); + + // Saves round 2 data + uint80 roundId2 = roundId; + int256 answer2 = answer; + uint256 startedAt2 = startedAt; + uint256 updatedAt2 = updatedAt; + uint80 answeredInRound2 = answeredInRound; + + // Checks that last round is still returning the correct data + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_scrollSequencerUptimeFeed.getRoundData(1); + assertEq(roundId, 1); + assertEq(answer, 0); + assertEq(answeredInRound, roundId); + assertEq(startedAt, updatedAt); + + // Assert latestRoundData corresponds to latest round id + (roundId, answer, startedAt, updatedAt, answeredInRound) = s_scrollSequencerUptimeFeed.latestRoundData(); + assertEq(roundId2, roundId); + assertEq(answer2, answer); + assertEq(startedAt2, startedAt); + assertEq(updatedAt2, updatedAt); + assertEq(answeredInRound2, answeredInRound); + } + + /// @notice it should revert from #getRoundData when round does not yet exist (future roundId) + function test_RevertGetRoundDataWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(ScrollSequencerUptimeFeed.NoDataPresent.selector); + s_scrollSequencerUptimeFeed.getRoundData(2); + } + + /// @notice it should revert from #getAnswer when round does not yet exist (future roundId) + function test_RevertGetAnswerWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(ScrollSequencerUptimeFeed.NoDataPresent.selector); + s_scrollSequencerUptimeFeed.getAnswer(2); + } + + /// @notice it should revert from #getTimestamp when round does not yet exist (future roundId) + function test_RevertGetTimestampWhenRoundDoesNotExistYet() public { + // Sets msg.sender and tx.origin to a valid address + vm.startPrank(s_l1OwnerAddr, s_l1OwnerAddr); + + // Gets data from a round that has not happened yet + vm.expectRevert(ScrollSequencerUptimeFeed.NoDataPresent.selector); + s_scrollSequencerUptimeFeed.getTimestamp(2); + } +} + +contract ScrollSequencerUptimeFeed_ProtectReadsOnAggregatorV2V3InterfaceFunctions is ScrollSequencerUptimeFeedTest { + /// @notice it should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted + function test_AggregatorV2V3InterfaceDisallowReadsIfConsumingContractIsNotWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_scrollSequencerUptimeFeed)); + + // Sanity - consumer is not whitelisted + assertEq(s_scrollSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_scrollSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), false); + + // Asserts reads are not possible from consuming contract + vm.expectRevert("No access"); + feedConsumer.latestAnswer(); + vm.expectRevert("No access"); + feedConsumer.latestRoundData(); + } + + /// @notice it should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted + function test_AggregatorV2V3InterfaceAllowReadsIfConsumingContractIsWhitelisted() public { + // Deploys a FeedConsumer contract + FeedConsumer feedConsumer = new FeedConsumer(address(s_scrollSequencerUptimeFeed)); + + // Whitelist consumer + s_scrollSequencerUptimeFeed.addAccess(address(feedConsumer)); + + // Sanity - consumer is whitelisted + assertEq(s_scrollSequencerUptimeFeed.checkEnabled(), true); + assertEq(s_scrollSequencerUptimeFeed.hasAccess(address(feedConsumer), abi.encode("")), true); + + // Asserts reads are possible from consuming contract + (uint80 roundId, int256 answer, , , ) = feedConsumer.latestRoundData(); + assertEq(feedConsumer.latestAnswer(), 0); + assertEq(roundId, 1); + assertEq(answer, 0); + } +} + +contract ScrollSequencerUptimeFeed_GasCosts is ScrollSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for updates + function test_GasCosts() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Assert initial conditions + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp(); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 0); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed; + uint256 gasStart; + uint256 gasFinal; + + // measures gas used for no update + expectedGasUsed = 10197; // NOTE: used to be 38594 in hardhat tests + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.updateStatus(false, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 0); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + + // measures gas used for update + expectedGasUsed = 31644; // NOTE: used to be 58458 in hardhat tests + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp + 1000)); + gasFinal = gasleft(); + assertEq(s_scrollSequencerUptimeFeed.latestAnswer(), 1); + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} + +contract ScrollSequencerUptimeFeed_AggregatorInterfaceGasCosts is ScrollSequencerUptimeFeedTest { + /// @notice it should consume a known amount of gas for getRoundData(uint80) + function test_GasUsageForGetRoundData() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 4504; // NOTE: used to be 30952 in hardhat tesst + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.getRoundData(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRoundData() + function test_GasUsageForLatestRoundData() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 2154; // NOTE: used to be 28523 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.latestRoundData(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestAnswer() + function test_GasUsageForLatestAnswer() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1566; // NOTE: used to be 28229 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.latestAnswer(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestTimestamp() + function test_GasUsageForLatestTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1459; // NOTE: used to be 28129 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.latestTimestamp(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for latestRound() + function test_GasUsageForLatestRound() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 1470; // NOTE: used to be 28145 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.latestRound(); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getAnswer() + function test_GasUsageForGetAnswer() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 3929; // NOTE: used to be 30682 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.getAnswer(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } + + /// @notice it should consume a known amount of gas for getTimestamp() + function test_GasUsageForGetTimestamp() public { + // Sets msg.sender and tx.origin to a valid address + address l2MessengerAddr = address(s_mockScrollL2CrossDomainMessenger); + vm.startPrank(l2MessengerAddr, l2MessengerAddr); + + // Defines helper variables for measuring gas usage + uint256 expectedGasUsed = 3817; // NOTE: used to be 30570 in hardhat tests + uint256 gasStart; + uint256 gasFinal; + + // Initializes a round + uint256 timestamp = s_scrollSequencerUptimeFeed.latestTimestamp() + 1000; + s_scrollSequencerUptimeFeed.updateStatus(true, uint64(timestamp)); + + // Measures gas usage + gasStart = gasleft(); + s_scrollSequencerUptimeFeed.getTimestamp(1); + gasFinal = gasleft(); + + // Checks that gas usage is within expected range + assertGasUsageIsCloseTo(expectedGasUsed, gasStart, gasFinal, GAS_USED_DEVIATION); + } +} diff --git a/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollValidator.t.sol b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollValidator.t.sol new file mode 100644 index 00000000..969c78c7 --- /dev/null +++ b/contracts/src/v0.8/l2ep/test/v1_0_0/scroll/ScrollValidator.t.sol @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {MockScrollL1CrossDomainMessenger} from "../../mocks/scroll/MockScrollL1CrossDomainMessenger.sol"; +import {MockScrollL2CrossDomainMessenger} from "../../mocks/scroll/MockScrollL2CrossDomainMessenger.sol"; +import {ScrollSequencerUptimeFeed} from "../../../dev/scroll/ScrollSequencerUptimeFeed.sol"; +import {ScrollValidator} from "../../../dev/scroll/ScrollValidator.sol"; +import {L2EPTest} from "../L2EPTest.t.sol"; + +contract ScrollValidatorTest is L2EPTest { + /// Helper constants + address internal constant L2_SEQ_STATUS_RECORDER_ADDRESS = 0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b; + uint32 internal constant INIT_GAS_LIMIT = 1900000; + + /// L2EP contracts + MockScrollL1CrossDomainMessenger internal s_mockScrollL1CrossDomainMessenger; + MockScrollL2CrossDomainMessenger internal s_mockScrollL2CrossDomainMessenger; + ScrollSequencerUptimeFeed internal s_scrollSequencerUptimeFeed; + ScrollValidator internal s_scrollValidator; + + /// https://github.com/scroll-tech/scroll/blob/03089eaeee1193ff44c532c7038611ae123e7ef3/contracts/src/libraries/IScrollMessenger.sol#L22 + event SentMessage( + address indexed sender, + address indexed target, + uint256 value, + uint256 messageNonce, + uint256 gasLimit, + bytes message + ); + + /// Setup + function setUp() public { + s_mockScrollL1CrossDomainMessenger = new MockScrollL1CrossDomainMessenger(); + s_mockScrollL2CrossDomainMessenger = new MockScrollL2CrossDomainMessenger(); + + s_scrollSequencerUptimeFeed = new ScrollSequencerUptimeFeed( + address(s_mockScrollL1CrossDomainMessenger), + address(s_mockScrollL2CrossDomainMessenger), + true + ); + + s_scrollValidator = new ScrollValidator( + address(s_mockScrollL1CrossDomainMessenger), + address(s_scrollSequencerUptimeFeed), + INIT_GAS_LIMIT + ); + } +} + +contract ScrollValidator_SetGasLimit is ScrollValidatorTest { + /// @notice it correctly updates the gas limit + function test_CorrectlyUpdatesTheGasLimit() public { + uint32 newGasLimit = 2000000; + assertEq(s_scrollValidator.getGasLimit(), INIT_GAS_LIMIT); + s_scrollValidator.setGasLimit(newGasLimit); + assertEq(s_scrollValidator.getGasLimit(), newGasLimit); + } +} + +contract ScrollValidator_Validate is ScrollValidatorTest { + /// @notice it reverts if called by account with no access + function test_RevertsIfCalledByAnAccountWithNoAccess() public { + vm.startPrank(s_strangerAddr); + vm.expectRevert("No access"); + s_scrollValidator.validate(0, 0, 1, 1); + } + + /// @notice it posts sequencer status when there is not status change + function test_PostSequencerStatusWhenThereIsNotStatusChange() public { + // Gives access to the s_eoaValidator + s_scrollValidator.addAccess(s_eoaValidator); + + // Sets block.timestamp to a later date + uint256 futureTimestampInSeconds = block.timestamp + 5000; + vm.startPrank(s_eoaValidator); + vm.warp(futureTimestampInSeconds); + + // Sets up the expected event data + vm.expectEmit(false, false, false, true); + emit SentMessage( + address(s_scrollValidator), // sender + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + 0, // value + 0, // nonce + INIT_GAS_LIMIT, // gas limit + abi.encodeWithSelector(ScrollSequencerUptimeFeed.updateStatus.selector, false, futureTimestampInSeconds) // message + ); + + // Runs the function (which produces the event to test) + s_scrollValidator.validate(0, 0, 0, 0); + } + + /// @notice it post sequencer offline + function test_PostSequencerOffline() public { + // Gives access to the s_eoaValidator + s_scrollValidator.addAccess(s_eoaValidator); + + // Sets block.timestamp to a later date + uint256 futureTimestampInSeconds = block.timestamp + 10000; + vm.startPrank(s_eoaValidator); + vm.warp(futureTimestampInSeconds); + + // Sets up the expected event data + vm.expectEmit(false, false, false, true); + emit SentMessage( + address(s_scrollValidator), // sender + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + 0, // value + 0, // nonce + INIT_GAS_LIMIT, // gas limit + abi.encodeWithSelector(ScrollSequencerUptimeFeed.updateStatus.selector, true, futureTimestampInSeconds) // message + ); + + // Runs the function (which produces the event to test) + s_scrollValidator.validate(0, 0, 1, 1); + } +} diff --git a/contracts/src/v0.8/llo-feeds/FeeManager.sol b/contracts/src/v0.8/llo-feeds/FeeManager.sol new file mode 100644 index 00000000..b34695f0 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/FeeManager.sol @@ -0,0 +1,507 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {IFeeManager} from "./interfaces/IFeeManager.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "./libraries/Common.sol"; +import {IRewardManager} from "./interfaces/IRewardManager.sol"; +import {IWERC20} from "../shared/interfaces/IWERC20.sol"; +import {IERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol"; +import {Math} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/Math.sol"; +import {SafeERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; +import {IVerifierFeeManager} from "./interfaces/IVerifierFeeManager.sol"; + +/** + * @title FeeManager + * @author Michael Fletcher + * @author Austin Born + * @notice This contract is used for the handling of fees required for users verifying reports. + */ +contract FeeManager is IFeeManager, ConfirmedOwner, TypeAndVersionInterface { + using SafeERC20 for IERC20; + + /// @notice list of subscribers and their discounts subscriberDiscounts[subscriber][feedId][token] + mapping(address => mapping(bytes32 => mapping(address => uint256))) public s_subscriberDiscounts; + + /// @notice keep track of any subsidised link that is owed to the reward manager. + mapping(bytes32 => uint256) public s_linkDeficit; + + /// @notice the total discount that can be applied to a fee, 1e18 = 100% discount + uint64 private constant PERCENTAGE_SCALAR = 1e18; + + /// @notice the PLI token address + address public immutable i_linkAddress; + + /// @notice the native token address + address public immutable i_nativeAddress; + + /// @notice the proxy address + address public immutable i_proxyAddress; + + /// @notice the reward manager address + IRewardManager public immutable i_rewardManager; + + // @notice the mask to apply to get the report version + bytes32 private constant REPORT_VERSION_MASK = 0xffff000000000000000000000000000000000000000000000000000000000000; + + // @notice the different report versions + bytes32 private constant REPORT_V1 = 0x0001000000000000000000000000000000000000000000000000000000000000; + + /// @notice the surcharge fee to be paid if paying in native + uint256 public s_nativeSurcharge; + + /// @notice the error thrown if the discount or surcharge is invalid + error InvalidSurcharge(); + + /// @notice the error thrown if the discount is invalid + error InvalidDiscount(); + + /// @notice the error thrown if the address is invalid + error InvalidAddress(); + + /// @notice thrown if msg.value is supplied with a bad quote + error InvalidDeposit(); + + /// @notice thrown if a report has expired + error ExpiredReport(); + + /// @notice thrown if a report has no quote + error InvalidQuote(); + + // @notice thrown when the caller is not authorized + error Unauthorized(); + + // @notice thrown when trying to clear a zero deficit + error ZeroDeficit(); + + /// @notice thrown when trying to pay an address that cannot except funds + error InvalidReceivingAddress(); + + /// @notice Emitted whenever a subscriber's discount is updated + /// @param subscriber address of the subscriber to update discounts for + /// @param feedId Feed ID for the discount + /// @param token Token address for the discount + /// @param discount Discount to apply, in relation to the PERCENTAGE_SCALAR + event SubscriberDiscountUpdated(address indexed subscriber, bytes32 indexed feedId, address token, uint64 discount); + + /// @notice Emitted when updating the native surcharge + /// @param newSurcharge Surcharge amount to apply relative to PERCENTAGE_SCALAR + event NativeSurchargeUpdated(uint64 newSurcharge); + + /// @notice Emits when this contract does not have enough PLI to send to the reward manager when paying in native + /// @param rewards Config digest and link fees which could not be subsidised + event InsufficientLink(IRewardManager.FeePayment[] rewards); + + /// @notice Emitted when funds are withdrawn + /// @param adminAddress Address of the admin + /// @param recipient Address of the recipient + /// @param assetAddress Address of the asset withdrawn + /// @param quantity Amount of the asset withdrawn + event Withdraw(address adminAddress, address recipient, address assetAddress, uint192 quantity); + + /// @notice Emits when a deficit has been cleared for a particular config digest + /// @param configDigest Config digest of the deficit cleared + /// @param linkQuantity Amount of PLI required to pay the deficit + event LinkDeficitCleared(bytes32 indexed configDigest, uint256 linkQuantity); + + /// @notice Emits when a fee has been processed + /// @param configDigest Config digest of the fee processed + /// @param subscriber Address of the subscriber who paid the fee + /// @param fee Fee paid + /// @param reward Reward paid + /// @param appliedDiscount Discount applied to the fee + event DiscountApplied( + bytes32 indexed configDigest, + address indexed subscriber, + Common.Asset fee, + Common.Asset reward, + uint256 appliedDiscount + ); + + /** + * @notice Construct the FeeManager contract + * @param _linkAddress The address of the PLI token + * @param _nativeAddress The address of the wrapped ERC-20 version of the native token (represents fee in native or wrapped) + * @param _proxyAddress The address of the proxy contract + * @param _rewardManagerAddress The address of the reward manager contract + */ + constructor( + address _linkAddress, + address _nativeAddress, + address _proxyAddress, + address _rewardManagerAddress + ) ConfirmedOwner(msg.sender) { + if ( + _linkAddress == address(0) || + _nativeAddress == address(0) || + _proxyAddress == address(0) || + _rewardManagerAddress == address(0) + ) revert InvalidAddress(); + + i_linkAddress = _linkAddress; + i_nativeAddress = _nativeAddress; + i_proxyAddress = _proxyAddress; + i_rewardManager = IRewardManager(_rewardManagerAddress); + + IERC20(i_linkAddress).approve(address(i_rewardManager), type(uint256).max); + } + + modifier onlyOwnerOrProxy() { + if (msg.sender != i_proxyAddress && msg.sender != owner()) revert Unauthorized(); + _; + } + + modifier onlyProxy() { + if (msg.sender != i_proxyAddress) revert Unauthorized(); + _; + } + + /// @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "FeeManager 2.0.0"; + } + + /// @inheritdoc IERC165 + function supportsInterface(bytes4 interfaceId) external pure override returns (bool) { + return interfaceId == this.processFee.selector || interfaceId == this.processFeeBulk.selector; + } + + /// @inheritdoc IVerifierFeeManager + function processFee( + bytes calldata payload, + bytes calldata parameterPayload, + address subscriber + ) external payable override onlyProxy { + (Common.Asset memory fee, Common.Asset memory reward, uint256 appliedDiscount) = _processFee( + payload, + parameterPayload, + subscriber + ); + + if (fee.amount == 0) { + _tryReturnChange(subscriber, msg.value); + return; + } + + IFeeManager.FeeAndReward[] memory feeAndReward = new IFeeManager.FeeAndReward[](1); + feeAndReward[0] = IFeeManager.FeeAndReward(bytes32(payload), fee, reward, appliedDiscount); + + if (fee.assetAddress == i_linkAddress) { + _handleFeesAndRewards(subscriber, feeAndReward, 1, 0); + } else { + _handleFeesAndRewards(subscriber, feeAndReward, 0, 1); + } + } + + /// @inheritdoc IVerifierFeeManager + function processFeeBulk( + bytes[] calldata payloads, + bytes calldata parameterPayload, + address subscriber + ) external payable override onlyProxy { + FeeAndReward[] memory feesAndRewards = new IFeeManager.FeeAndReward[](payloads.length); + + //keep track of the number of fees to prevent over initialising the FeePayment array within _convertToLinkAndNativeFees + uint256 numberOfLinkFees; + uint256 numberOfNativeFees; + + uint256 feesAndRewardsIndex; + for (uint256 i; i < payloads.length; ++i) { + (Common.Asset memory fee, Common.Asset memory reward, uint256 appliedDiscount) = _processFee( + payloads[i], + parameterPayload, + subscriber + ); + + if (fee.amount != 0) { + feesAndRewards[feesAndRewardsIndex++] = IFeeManager.FeeAndReward( + bytes32(payloads[i]), + fee, + reward, + appliedDiscount + ); + + unchecked { + //keep track of some tallys to make downstream calculations more efficient + if (fee.assetAddress == i_linkAddress) { + ++numberOfLinkFees; + } else { + ++numberOfNativeFees; + } + } + } + } + + if (numberOfLinkFees != 0 || numberOfNativeFees != 0) { + _handleFeesAndRewards(subscriber, feesAndRewards, numberOfLinkFees, numberOfNativeFees); + } else { + _tryReturnChange(subscriber, msg.value); + } + } + + /// @inheritdoc IFeeManager + function getFeeAndReward( + address subscriber, + bytes memory report, + address quoteAddress + ) public view returns (Common.Asset memory, Common.Asset memory, uint256) { + Common.Asset memory fee; + Common.Asset memory reward; + + //get the feedId from the report + bytes32 feedId = bytes32(report); + + //the report needs to be a support version + bytes32 reportVersion = _getReportVersion(feedId); + + //version 1 of the reports don't require quotes, so the fee will be 0 + if (reportVersion == REPORT_V1) { + fee.assetAddress = i_nativeAddress; + reward.assetAddress = i_linkAddress; + return (fee, reward, 0); + } + + //verify the quote payload is a supported token + if (quoteAddress != i_nativeAddress && quoteAddress != i_linkAddress) { + revert InvalidQuote(); + } + + //decode the report depending on the version + uint256 linkQuantity; + uint256 nativeQuantity; + uint256 expiresAt; + (, , , nativeQuantity, linkQuantity, expiresAt) = abi.decode( + report, + (bytes32, uint32, uint32, uint192, uint192, uint32) + ); + + //read the timestamp bytes from the report data and verify it has not expired + if (expiresAt < block.timestamp) { + revert ExpiredReport(); + } + + //get the discount being applied + uint256 discount = s_subscriberDiscounts[subscriber][feedId][quoteAddress]; + + //the reward is always set in PLI + reward.assetAddress = i_linkAddress; + reward.amount = Math.ceilDiv(linkQuantity * (PERCENTAGE_SCALAR - discount), PERCENTAGE_SCALAR); + + //calculate either the PLI fee or native fee if it's within the report + if (quoteAddress == i_linkAddress) { + fee.assetAddress = i_linkAddress; + fee.amount = reward.amount; + } else { + uint256 surchargedFee = Math.ceilDiv(nativeQuantity * (PERCENTAGE_SCALAR + s_nativeSurcharge), PERCENTAGE_SCALAR); + + fee.assetAddress = i_nativeAddress; + fee.amount = Math.ceilDiv(surchargedFee * (PERCENTAGE_SCALAR - discount), PERCENTAGE_SCALAR); + } + + //return the fee + return (fee, reward, discount); + } + + /// @inheritdoc IVerifierFeeManager + function setFeeRecipients( + bytes32 configDigest, + Common.AddressAndWeight[] calldata rewardRecipientAndWeights + ) external onlyOwnerOrProxy { + i_rewardManager.setRewardRecipients(configDigest, rewardRecipientAndWeights); + } + + /// @inheritdoc IFeeManager + function setNativeSurcharge(uint64 surcharge) external onlyOwner { + if (surcharge > PERCENTAGE_SCALAR) revert InvalidSurcharge(); + + s_nativeSurcharge = surcharge; + + emit NativeSurchargeUpdated(surcharge); + } + + /// @inheritdoc IFeeManager + function updateSubscriberDiscount( + address subscriber, + bytes32 feedId, + address token, + uint64 discount + ) external onlyOwner { + //make sure the discount is not greater than the total discount that can be applied + if (discount > PERCENTAGE_SCALAR) revert InvalidDiscount(); + //make sure the token is either PLI or native + if (token != i_linkAddress && token != i_nativeAddress) revert InvalidAddress(); + + s_subscriberDiscounts[subscriber][feedId][token] = discount; + + emit SubscriberDiscountUpdated(subscriber, feedId, token, discount); + } + + /// @inheritdoc IFeeManager + function withdraw(address assetAddress, address recipient, uint192 quantity) external onlyOwner { + //address 0 is used to withdraw native in the context of withdrawing + if (assetAddress == address(0)) { + (bool success, ) = payable(recipient).call{value: quantity}(""); + + if (!success) revert InvalidReceivingAddress(); + return; + } + + //withdraw the requested asset + IERC20(assetAddress).safeTransfer(recipient, quantity); + + //emit event when funds are withdrawn + emit Withdraw(msg.sender, recipient, assetAddress, uint192(quantity)); + } + + /// @inheritdoc IFeeManager + function linkAvailableForPayment() external view returns (uint256) { + //return the amount of PLI this contact has available to pay rewards + return IERC20(i_linkAddress).balanceOf(address(this)); + } + + /** + * @notice Gets the current version of the report that is encoded as the last two bytes of the feed + * @param feedId feed id to get the report version for + */ + function _getReportVersion(bytes32 feedId) internal pure returns (bytes32) { + return REPORT_VERSION_MASK & feedId; + } + + function _processFee( + bytes calldata payload, + bytes calldata parameterPayload, + address subscriber + ) internal view returns (Common.Asset memory, Common.Asset memory, uint256) { + if (subscriber == address(this)) revert InvalidAddress(); + + //decode the report from the payload + (, bytes memory report) = abi.decode(payload, (bytes32[3], bytes)); + + //get the feedId from the report + bytes32 feedId = bytes32(report); + + //v1 doesn't need a quote payload, so skip the decoding + address quote; + if (_getReportVersion(feedId) != REPORT_V1) { + //decode the quote from the bytes + (quote) = abi.decode(parameterPayload, (address)); + } + + //decode the fee, it will always be native or PLI + return getFeeAndReward(subscriber, report, quote); + } + + function _handleFeesAndRewards( + address subscriber, + FeeAndReward[] memory feesAndRewards, + uint256 numberOfLinkFees, + uint256 numberOfNativeFees + ) internal { + IRewardManager.FeePayment[] memory linkRewards = new IRewardManager.FeePayment[](numberOfLinkFees); + IRewardManager.FeePayment[] memory nativeFeeLinkRewards = new IRewardManager.FeePayment[](numberOfNativeFees); + + uint256 totalNativeFee; + uint256 totalNativeFeeLinkValue; + + uint256 linkRewardsIndex; + uint256 nativeFeeLinkRewardsIndex; + + uint256 totalNumberOfFees = numberOfLinkFees + numberOfNativeFees; + for (uint256 i; i < totalNumberOfFees; ++i) { + if (feesAndRewards[i].fee.assetAddress == i_linkAddress) { + linkRewards[linkRewardsIndex++] = IRewardManager.FeePayment( + feesAndRewards[i].configDigest, + uint192(feesAndRewards[i].reward.amount) + ); + } else { + nativeFeeLinkRewards[nativeFeeLinkRewardsIndex++] = IRewardManager.FeePayment( + feesAndRewards[i].configDigest, + uint192(feesAndRewards[i].reward.amount) + ); + totalNativeFee += feesAndRewards[i].fee.amount; + totalNativeFeeLinkValue += feesAndRewards[i].reward.amount; + } + + if (feesAndRewards[i].appliedDiscount != 0) { + emit DiscountApplied( + feesAndRewards[i].configDigest, + subscriber, + feesAndRewards[i].fee, + feesAndRewards[i].reward, + feesAndRewards[i].appliedDiscount + ); + } + } + + //keep track of change in case of any over payment + uint256 change; + + if (msg.value != 0) { + //there must be enough to cover the fee + if (totalNativeFee > msg.value) revert InvalidDeposit(); + + //wrap the amount required to pay the fee & approve as the subscriber paid in wrapped native + IWERC20(i_nativeAddress).deposit{value: totalNativeFee}(); + + unchecked { + //msg.value is always >= to fee.amount + change = msg.value - totalNativeFee; + } + } else { + if (totalNativeFee != 0) { + //subscriber has paid in wrapped native, so transfer the native to this contract + IERC20(i_nativeAddress).safeTransferFrom(subscriber, address(this), totalNativeFee); + } + } + + if (linkRewards.length != 0) { + i_rewardManager.onFeePaid(linkRewards, subscriber); + } + + if (nativeFeeLinkRewards.length != 0) { + //distribute subsidised fees paid in Native + if (totalNativeFeeLinkValue > IERC20(i_linkAddress).balanceOf(address(this))) { + // If not enough PLI on this contract to forward for rewards, tally the deficit to be paid by out-of-band PLI + for (uint256 i; i < nativeFeeLinkRewards.length; ++i) { + unchecked { + //we have previously tallied the fees, any overflows would have already reverted + s_linkDeficit[nativeFeeLinkRewards[i].poolId] += nativeFeeLinkRewards[i].amount; + } + } + + emit InsufficientLink(nativeFeeLinkRewards); + } else { + //distribute the fees + i_rewardManager.onFeePaid(nativeFeeLinkRewards, address(this)); + } + } + + // a refund may be needed if the payee has paid in excess of the fee + _tryReturnChange(subscriber, change); + } + + function _tryReturnChange(address subscriber, uint256 quantity) internal { + if (quantity != 0) { + payable(subscriber).transfer(quantity); + } + } + + /// @inheritdoc IFeeManager + function payLinkDeficit(bytes32 configDigest) external onlyOwner { + uint256 deficit = s_linkDeficit[configDigest]; + + if (deficit == 0) revert ZeroDeficit(); + + delete s_linkDeficit[configDigest]; + + IRewardManager.FeePayment[] memory deficitFeePayment = new IRewardManager.FeePayment[](1); + + deficitFeePayment[0] = IRewardManager.FeePayment(configDigest, uint192(deficit)); + + i_rewardManager.onFeePaid(deficitFeePayment, address(this)); + + emit LinkDeficitCleared(configDigest, deficit); + } +} diff --git a/contracts/src/v0.8/llo-feeds/RewardManager.sol b/contracts/src/v0.8/llo-feeds/RewardManager.sol new file mode 100644 index 00000000..f1d198f0 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/RewardManager.sol @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {IRewardManager} from "./interfaces/IRewardManager.sol"; +import {IERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {Common} from "./libraries/Common.sol"; +import {SafeERC20} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; + +/** + * @title RewardManager + * @author Michael Fletcher + * @author Austin Born + * @notice This contract will be used to reward any configured recipients within a pool. Recipients will receive a share of their pool relative to their configured weight. + */ +contract RewardManager is IRewardManager, ConfirmedOwner, TypeAndVersionInterface { + using SafeERC20 for IERC20; + + // @dev The mapping of total fees collected for a particular pot: s_totalRewardRecipientFees[poolId] + mapping(bytes32 => uint256) public s_totalRewardRecipientFees; + + // @dev The mapping of fee balances for each pot last time the recipient claimed: s_totalRewardRecipientFeesLastClaimedAmounts[poolId][recipient] + mapping(bytes32 => mapping(address => uint256)) public s_totalRewardRecipientFeesLastClaimedAmounts; + + // @dev The mapping of RewardRecipient weights for a particular poolId: s_rewardRecipientWeights[poolId][rewardRecipient]. + mapping(bytes32 => mapping(address => uint256)) public s_rewardRecipientWeights; + + // @dev Keep track of the reward recipient weights that have been set to prevent duplicates + mapping(bytes32 => bool) public s_rewardRecipientWeightsSet; + + // @dev Store a list of pool ids that have been registered, to make off chain lookups easier + bytes32[] public s_registeredPoolIds; + + // @dev The address for the PLI contract + address public immutable i_linkAddress; + + // The total weight of all RewardRecipients. 1e18 = 100% of the pool fees + uint64 private constant PERCENTAGE_SCALAR = 1e18; + + // The fee manager address + address public s_feeManagerAddress; + + // @notice Thrown whenever the RewardRecipient weights are invalid + error InvalidWeights(); + + // @notice Thrown when any given address is invalid + error InvalidAddress(); + + // @notice Thrown when the pool id is invalid + error InvalidPoolId(); + + // @notice Thrown when the calling contract is not within the authorized contracts + error Unauthorized(); + + // @notice Thrown when getAvailableRewardPoolIds parameters are incorrectly set + error InvalidPoolLength(); + + // Events emitted upon state change + event RewardRecipientsUpdated(bytes32 indexed poolId, Common.AddressAndWeight[] newRewardRecipients); + event RewardsClaimed(bytes32 indexed poolId, address indexed recipient, uint192 quantity); + event FeeManagerUpdated(address newFeeManagerAddress); + event FeePaid(FeePayment[] payments, address payer); + + /** + * @notice Constructor + * @param linkAddress address of the wrapped PLI token + */ + constructor(address linkAddress) ConfirmedOwner(msg.sender) { + //ensure that the address ia not zero + if (linkAddress == address(0)) revert InvalidAddress(); + + i_linkAddress = linkAddress; + } + + // @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "RewardManager 1.1.0"; + } + + // @inheritdoc IERC165 + function supportsInterface(bytes4 interfaceId) external pure override returns (bool) { + return interfaceId == this.onFeePaid.selector; + } + + modifier onlyOwnerOrFeeManager() { + if (msg.sender != owner() && msg.sender != s_feeManagerAddress) revert Unauthorized(); + _; + } + + modifier onlyOwnerOrRecipientInPool(bytes32 poolId) { + if (msg.sender != owner() && s_rewardRecipientWeights[poolId][msg.sender] == 0) revert Unauthorized(); + _; + } + + modifier onlyFeeManager() { + if (msg.sender != s_feeManagerAddress) revert Unauthorized(); + _; + } + + /// @inheritdoc IRewardManager + function onFeePaid(FeePayment[] calldata payments, address payer) external override onlyFeeManager { + uint256 totalFeeAmount; + for (uint256 i; i < payments.length; ++i) { + unchecked { + //the total amount for any ERC-20 asset cannot exceed 2^256 - 1 + //see https://github.com/OpenZeppelin/openzeppelin-contracts/blob/36bf1e46fa811f0f07d38eb9cfbc69a955f300ce/contracts/token/ERC20/ERC20.sol#L266 + //for example implementation. + s_totalRewardRecipientFees[payments[i].poolId] += payments[i].amount; + + //tally the total payable fees + totalFeeAmount += payments[i].amount; + } + } + + //transfer the fees to this contract + IERC20(i_linkAddress).safeTransferFrom(payer, address(this), totalFeeAmount); + + emit FeePaid(payments, payer); + } + + /// @inheritdoc IRewardManager + function claimRewards(bytes32[] memory poolIds) external override { + _claimRewards(msg.sender, poolIds); + } + + // wrapper impl for claimRewards + function _claimRewards(address recipient, bytes32[] memory poolIds) internal returns (uint256) { + //get the total amount claimable for this recipient + uint256 claimAmount; + + //loop and claim all the rewards in the poolId pot + for (uint256 i; i < poolIds.length; ++i) { + //get the poolId to be claimed + bytes32 poolId = poolIds[i]; + + //get the total fees for the pot + uint256 totalFeesInPot = s_totalRewardRecipientFees[poolId]; + + unchecked { + //avoid unnecessary storage reads if there's no fees in the pot + if (totalFeesInPot == 0) continue; + + //get the claimable amount for this recipient, this calculation will never exceed the amount in the pot + uint256 claimableAmount = totalFeesInPot - s_totalRewardRecipientFeesLastClaimedAmounts[poolId][recipient]; + + //calculate the recipients share of the fees, which is their weighted share of the difference between the last amount they claimed and the current amount in the pot. This can never be more than the total amount in existence + uint256 recipientShare = (claimableAmount * s_rewardRecipientWeights[poolId][recipient]) / PERCENTAGE_SCALAR; + + //if there's no fees to claim, continue as there's nothing to update + if (recipientShare == 0) continue; + + //keep track of the total amount claimable, this can never be more than the total amount in existence + claimAmount += recipientShare; + + //set the current total amount of fees in the pot as it's used to calculate future claims + s_totalRewardRecipientFeesLastClaimedAmounts[poolId][recipient] = totalFeesInPot; + + //emit event if the recipient has rewards to claim + emit RewardsClaimed(poolIds[i], recipient, uint192(recipientShare)); + } + } + + //check if there's any rewards to claim in the given poolId + if (claimAmount != 0) { + //transfer the reward to the recipient + IERC20(i_linkAddress).safeTransfer(recipient, claimAmount); + } + + return claimAmount; + } + + /// @inheritdoc IRewardManager + function setRewardRecipients( + bytes32 poolId, + Common.AddressAndWeight[] calldata rewardRecipientAndWeights + ) external override onlyOwnerOrFeeManager { + //revert if there are no recipients to set + if (rewardRecipientAndWeights.length == 0) revert InvalidAddress(); + + //check that the weights have not been previously set + if (s_rewardRecipientWeightsSet[poolId]) revert InvalidPoolId(); + + //keep track of the registered poolIds to make off chain lookups easier + s_registeredPoolIds.push(poolId); + + //keep track of which pools have had their reward recipients set + s_rewardRecipientWeightsSet[poolId] = true; + + //set the reward recipients, this will only be called once and contain the full set of RewardRecipients with a total weight of 100% + _setRewardRecipientWeights(poolId, rewardRecipientAndWeights, PERCENTAGE_SCALAR); + + emit RewardRecipientsUpdated(poolId, rewardRecipientAndWeights); + } + + function _setRewardRecipientWeights( + bytes32 poolId, + Common.AddressAndWeight[] calldata rewardRecipientAndWeights, + uint256 expectedWeight + ) internal { + //we can't update the weights if it contains duplicates + if (Common._hasDuplicateAddresses(rewardRecipientAndWeights)) revert InvalidAddress(); + + //loop all the reward recipients and validate the weight and address + uint256 totalWeight; + for (uint256 i; i < rewardRecipientAndWeights.length; ++i) { + //get the weight + uint256 recipientWeight = rewardRecipientAndWeights[i].weight; + //get the address + address recipientAddress = rewardRecipientAndWeights[i].addr; + + //ensure the reward recipient address is not zero + if (recipientAddress == address(0)) revert InvalidAddress(); + + //save/overwrite the weight for the reward recipient + s_rewardRecipientWeights[poolId][recipientAddress] = recipientWeight; + + unchecked { + //keep track of the cumulative weight, this cannot overflow as the total weight is restricted at 1e18 + totalWeight += recipientWeight; + } + } + + //if total weight is not met, the fees will either be under or over distributed + if (totalWeight != expectedWeight) revert InvalidWeights(); + } + + /// @inheritdoc IRewardManager + function updateRewardRecipients( + bytes32 poolId, + Common.AddressAndWeight[] calldata newRewardRecipients + ) external override onlyOwner { + //create an array of poolIds to pass to _claimRewards if required + bytes32[] memory poolIds = new bytes32[](1); + poolIds[0] = poolId; + + //loop all the reward recipients and claim their rewards before updating their weights + uint256 existingTotalWeight; + for (uint256 i; i < newRewardRecipients.length; ++i) { + //get the address + address recipientAddress = newRewardRecipients[i].addr; + //get the existing weight + uint256 existingWeight = s_rewardRecipientWeights[poolId][recipientAddress]; + + //if a recipient is updated, the rewards must be claimed first as they can't claim previous fees at the new weight + _claimRewards(newRewardRecipients[i].addr, poolIds); + + unchecked { + //keep tally of the weights so that the expected collective weight is known + existingTotalWeight += existingWeight; + } + } + + //update the reward recipients, if the new collective weight isn't equal to the previous collective weight, the fees will either be under or over distributed + _setRewardRecipientWeights(poolId, newRewardRecipients, existingTotalWeight); + + //emit event + emit RewardRecipientsUpdated(poolId, newRewardRecipients); + } + + /// @inheritdoc IRewardManager + function payRecipients(bytes32 poolId, address[] calldata recipients) external onlyOwnerOrRecipientInPool(poolId) { + //convert poolIds to an array to match the interface of _claimRewards + bytes32[] memory poolIdsArray = new bytes32[](1); + poolIdsArray[0] = poolId; + + //loop each recipient and claim the rewards for each of the pools and assets + for (uint256 i; i < recipients.length; ++i) { + _claimRewards(recipients[i], poolIdsArray); + } + } + + /// @inheritdoc IRewardManager + function setFeeManager(address newFeeManagerAddress) external onlyOwner { + if (newFeeManagerAddress == address(0)) revert InvalidAddress(); + + s_feeManagerAddress = newFeeManagerAddress; + + emit FeeManagerUpdated(newFeeManagerAddress); + } + + /// @inheritdoc IRewardManager + function getAvailableRewardPoolIds( + address recipient, + uint256 startIndex, + uint256 endIndex + ) external view returns (bytes32[] memory) { + //get the length of the pool ids which we will loop through and potentially return + uint256 registeredPoolIdsLength = s_registeredPoolIds.length; + + uint256 lastIndex = endIndex > registeredPoolIdsLength ? registeredPoolIdsLength : endIndex; + + if (startIndex > lastIndex) revert InvalidPoolLength(); + + //create a new array with the maximum amount of potential pool ids + bytes32[] memory claimablePoolIds = new bytes32[](lastIndex - startIndex); + //we want the pools which a recipient has funds for to be sequential, so we need to keep track of the index + uint256 poolIdArrayIndex; + + //loop all the pool ids, and check if the recipient has a registered weight and a claimable amount + for (uint256 i = startIndex; i < lastIndex; ++i) { + //get the poolId + bytes32 poolId = s_registeredPoolIds[i]; + + //if the recipient has a weight, they are a recipient of this poolId + if (s_rewardRecipientWeights[poolId][recipient] != 0) { + //get the total in this pool + uint256 totalPoolAmount = s_totalRewardRecipientFees[poolId]; + //if the recipient has any PLI, then add the poolId to the array + unchecked { + //s_totalRewardRecipientFeesLastClaimedAmounts can never exceed total pool amount, and the number of pools can't exceed the max array length + if (totalPoolAmount - s_totalRewardRecipientFeesLastClaimedAmounts[poolId][recipient] != 0) { + claimablePoolIds[poolIdArrayIndex++] = poolId; + } + } + } + } + + return claimablePoolIds; + } +} diff --git a/contracts/src/v0.8/llo-feeds/Verifier.sol b/contracts/src/v0.8/llo-feeds/Verifier.sol new file mode 100644 index 00000000..c8858999 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/Verifier.sol @@ -0,0 +1,559 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {IVerifier} from "./interfaces/IVerifier.sol"; +import {IVerifierProxy} from "./interfaces/IVerifierProxy.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "./libraries/Common.sol"; + +// OCR2 standard +uint256 constant MAX_NUM_ORACLES = 31; + +/* + * The verifier contract is used to verify offchain reports signed + * by DONs. A report consists of a price, block number and feed Id. It + * represents the observed price of an asset at a specified block number for + * a feed. The verifier contract is used to verify that such reports have + * been signed by the correct signers. + **/ +contract Verifier is IVerifier, ConfirmedOwner, TypeAndVersionInterface { + // The first byte of the mask can be 0, because we only ever have 31 oracles + uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101; + + enum Role { + // Default role for an oracle address. This means that the oracle address + // is not a signer + Unset, + // Role given to an oracle address that is allowed to sign feed data + Signer + } + + struct Signer { + // Index of oracle in a configuration + uint8 index; + // The oracle's role + Role role; + } + + struct Config { + // Fault tolerance + uint8 f; + // Marks whether or not a configuration is active + bool isActive; + // Map of signer addresses to oracles + mapping(address => Signer) oracles; + } + + struct VerifierState { + // The number of times a new configuration + /// has been set + uint32 configCount; + // The block number of the block the last time + /// the configuration was updated. + uint32 latestConfigBlockNumber; + // The latest epoch a report was verified for + uint32 latestEpoch; + // Whether or not the verifier for this feed has been deactivated + bool isDeactivated; + /// The latest config digest set + bytes32 latestConfigDigest; + /// The historical record of all previously set configs by feedId + mapping(bytes32 => Config) s_verificationDataConfigs; + } + + /// @notice This event is emitted when a new report is verified. + /// It is used to keep a historical record of verified reports. + event ReportVerified(bytes32 indexed feedId, address requester); + + /// @notice This event is emitted whenever a new configuration is set for a feed. It triggers a new run of the offchain reporting protocol. + event ConfigSet( + bytes32 indexed feedId, + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + bytes32[] offchainTransmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /// @notice This event is emitted whenever a configuration is deactivated + event ConfigDeactivated(bytes32 indexed feedId, bytes32 configDigest); + + /// @notice This event is emitted whenever a configuration is activated + event ConfigActivated(bytes32 indexed feedId, bytes32 configDigest); + + /// @notice This event is emitted whenever a feed is activated + event FeedActivated(bytes32 indexed feedId); + + /// @notice This event is emitted whenever a feed is deactivated + event FeedDeactivated(bytes32 indexed feedId); + + /// @notice This error is thrown whenever an address tries + /// to exeecute a transaction that it is not authorized to do so + error AccessForbidden(); + + /// @notice This error is thrown whenever a zero address is passed + error ZeroAddress(); + + /// @notice This error is thrown whenever the feed ID passed in + /// a signed report is empty + error FeedIdEmpty(); + + /// @notice This error is thrown whenever the config digest + /// is empty + error DigestEmpty(); + + /// @notice This error is thrown whenever the config digest + /// passed in has not been set in this verifier + /// @param feedId The feed ID in the signed report + /// @param configDigest The config digest that has not been set + error DigestNotSet(bytes32 feedId, bytes32 configDigest); + + /// @notice This error is thrown whenever the config digest + /// has been deactivated + /// @param feedId The feed ID in the signed report + /// @param configDigest The config digest that is inactive + error DigestInactive(bytes32 feedId, bytes32 configDigest); + + /// @notice This error is thrown whenever trying to set a config + /// with a fault tolerance of 0 + error FaultToleranceMustBePositive(); + + /// @notice This error is thrown whenever a report is signed + /// with more than the max number of signers + /// @param numSigners The number of signers who have signed the report + /// @param maxSigners The maximum number of signers that can sign a report + error ExcessSigners(uint256 numSigners, uint256 maxSigners); + + /// @notice This error is thrown whenever a report is signed + /// with less than the minimum number of signers + /// @param numSigners The number of signers who have signed the report + /// @param minSigners The minimum number of signers that need to sign a report + error InsufficientSigners(uint256 numSigners, uint256 minSigners); + + /// @notice This error is thrown whenever a report is signed + /// with an incorrect number of signers + /// @param numSigners The number of signers who have signed the report + /// @param expectedNumSigners The expected number of signers that need to sign + /// a report + error IncorrectSignatureCount(uint256 numSigners, uint256 expectedNumSigners); + + /// @notice This error is thrown whenever the R and S signer components + /// have different lengths + /// @param rsLength The number of r signature components + /// @param ssLength The number of s signature components + error MismatchedSignatures(uint256 rsLength, uint256 ssLength); + + /// @notice This error is thrown whenever setting a config with duplicate signatures + error NonUniqueSignatures(); + + /// @notice This error is thrown whenever a report fails to verify due to bad or duplicate signatures + error BadVerification(); + + /// @notice This error is thrown whenever the admin tries to deactivate + /// the latest config digest + /// @param feedId The feed ID in the signed report + /// @param configDigest The latest config digest + error CannotDeactivateLatestConfig(bytes32 feedId, bytes32 configDigest); + + /// @notice This error is thrown whenever the feed ID passed in is deactivated + /// @param feedId The feed ID + error InactiveFeed(bytes32 feedId); + + /// @notice This error is thrown whenever the feed ID passed in is not found + /// @param feedId The feed ID + error InvalidFeed(bytes32 feedId); + + /// @notice The address of the verifier proxy + address private immutable i_verifierProxyAddr; + + /// @notice Verifier states keyed on Feed ID + mapping(bytes32 => VerifierState) internal s_feedVerifierStates; + + /// @param verifierProxyAddr The address of the VerifierProxy contract + constructor(address verifierProxyAddr) ConfirmedOwner(msg.sender) { + if (verifierProxyAddr == address(0)) revert ZeroAddress(); + i_verifierProxyAddr = verifierProxyAddr; + } + + modifier checkConfigValid(uint256 numSigners, uint256 f) { + if (f == 0) revert FaultToleranceMustBePositive(); + if (numSigners > MAX_NUM_ORACLES) revert ExcessSigners(numSigners, MAX_NUM_ORACLES); + if (numSigners <= 3 * f) revert InsufficientSigners(numSigners, 3 * f + 1); + _; + } + + /// @inheritdoc IERC165 + function supportsInterface(bytes4 interfaceId) external pure override returns (bool isVerifier) { + return interfaceId == this.verify.selector; + } + + /// @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "Verifier 1.2.0"; + } + + /// @inheritdoc IVerifier + function verify( + bytes calldata signedReport, + address sender + ) external override returns (bytes memory verifierResponse) { + if (msg.sender != i_verifierProxyAddr) revert AccessForbidden(); + ( + bytes32[3] memory reportContext, + bytes memory reportData, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs + ) = abi.decode(signedReport, (bytes32[3], bytes, bytes32[], bytes32[], bytes32)); + + // The feed ID is the first 32 bytes of the report data. + bytes32 feedId = bytes32(reportData); + + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + // If the feed has been deactivated, do not verify the report + if (feedVerifierState.isDeactivated) { + revert InactiveFeed(feedId); + } + + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + Config storage s_config = feedVerifierState.s_verificationDataConfigs[configDigest]; + + _validateReport(feedId, configDigest, rs, ss, s_config); + _updateEpoch(reportContext, feedVerifierState); + + bytes32 hashedReport = keccak256(reportData); + + _verifySignatures(hashedReport, reportContext, rs, ss, rawVs, s_config); + emit ReportVerified(feedId, sender); + + return reportData; + } + + /// @notice Validates parameters of the report + /// @param feedId Feed ID from the report + /// @param configDigest Config digest from the report + /// @param rs R components from the report + /// @param ss S components from the report + /// @param config Config for the given feed ID keyed on the config digest + function _validateReport( + bytes32 feedId, + bytes32 configDigest, + bytes32[] memory rs, + bytes32[] memory ss, + Config storage config + ) private view { + uint8 expectedNumSignatures = config.f + 1; + + if (!config.isActive) revert DigestInactive(feedId, configDigest); + if (rs.length != expectedNumSignatures) revert IncorrectSignatureCount(rs.length, expectedNumSignatures); + if (rs.length != ss.length) revert MismatchedSignatures(rs.length, ss.length); + } + + /** + * @notice Conditionally update the epoch for a feed + * @param reportContext Report context containing the epoch and round + * @param feedVerifierState Feed verifier state to conditionally update + */ + function _updateEpoch(bytes32[3] memory reportContext, VerifierState storage feedVerifierState) private { + uint40 epochAndRound = uint40(uint256(reportContext[1])); + uint32 epoch = uint32(epochAndRound >> 8); + if (epoch > feedVerifierState.latestEpoch) { + feedVerifierState.latestEpoch = epoch; + } + } + + /// @notice Verifies that a report has been signed by the correct + /// signers and that enough signers have signed the reports. + /// @param hashedReport The keccak256 hash of the raw report's bytes + /// @param reportContext The context the report was signed in + /// @param rs ith element is the R components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param ss ith element is the S components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param rawVs ith element is the the V component of the ith signature + /// @param s_config The config digest the report was signed for + function _verifySignatures( + bytes32 hashedReport, + bytes32[3] memory reportContext, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs, + Config storage s_config + ) private view { + bytes32 h = keccak256(abi.encodePacked(hashedReport, reportContext)); + // i-th byte counts number of sigs made by i-th signer + uint256 signedCount; + + Signer memory o; + address signerAddress; + uint256 numSigners = rs.length; + for (uint256 i; i < numSigners; ++i) { + signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_config.oracles[signerAddress]; + if (o.role != Role.Signer) revert BadVerification(); + unchecked { + signedCount += 1 << (8 * o.index); + } + } + + if (signedCount & ORACLE_MASK != signedCount) revert BadVerification(); + } + + /// @inheritdoc IVerifier + function setConfig( + bytes32 feedId, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external override checkConfigValid(signers.length, f) onlyOwner { + _setConfig( + feedId, + block.chainid, + address(this), + 0, // 0 defaults to feedConfig.configCount + 1 + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + recipientAddressesAndWeights + ); + } + + /// @inheritdoc IVerifier + function setConfigFromSource( + bytes32 feedId, + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external override checkConfigValid(signers.length, f) onlyOwner { + _setConfig( + feedId, + sourceChainId, + sourceAddress, + newConfigCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + recipientAddressesAndWeights + ); + } + + /// @notice Sets config based on the given arguments + /// @param feedId Feed ID to set config for + /// @param sourceChainId Chain ID of source config + /// @param sourceAddress Address of source config Verifier + /// @param newConfigCount Optional param to force the new config count + /// @param signers addresses with which oracles sign the reports + /// @param offchainTransmitters CSA key for the ith Oracle + /// @param f number of faulty oracles the system can tolerate + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version number for offchainEncoding schema + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + /// @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + function _setConfig( + bytes32 feedId, + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) internal { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + // Increment the number of times a config has been set first + if (newConfigCount > 0) feedVerifierState.configCount = newConfigCount; + else feedVerifierState.configCount++; + + bytes32 configDigest = _configDigestFromConfigData( + feedId, + sourceChainId, + sourceAddress, + feedVerifierState.configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + + feedVerifierState.s_verificationDataConfigs[configDigest].f = f; + feedVerifierState.s_verificationDataConfigs[configDigest].isActive = true; + for (uint8 i; i < signers.length; ++i) { + address signerAddr = signers[i]; + if (signerAddr == address(0)) revert ZeroAddress(); + + // All signer roles are unset by default for a new config digest. + // Here the contract checks to see if a signer's address has already + // been set to ensure that the group of signer addresses that will + // sign reports with the config digest are unique. + bool isSignerAlreadySet = feedVerifierState.s_verificationDataConfigs[configDigest].oracles[signerAddr].role != + Role.Unset; + if (isSignerAlreadySet) revert NonUniqueSignatures(); + feedVerifierState.s_verificationDataConfigs[configDigest].oracles[signerAddr] = Signer({ + role: Role.Signer, + index: i + }); + } + + IVerifierProxy(i_verifierProxyAddr).setVerifier( + feedVerifierState.latestConfigDigest, + configDigest, + recipientAddressesAndWeights + ); + + emit ConfigSet( + feedId, + feedVerifierState.latestConfigBlockNumber, + configDigest, + feedVerifierState.configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + + feedVerifierState.latestEpoch = 0; + feedVerifierState.latestConfigBlockNumber = uint32(block.number); + feedVerifierState.latestConfigDigest = configDigest; + } + + /// @notice Generates the config digest from config data + /// @param feedId Feed ID to set config for + /// @param sourceChainId Chain ID of source config + /// @param sourceAddress Address of source config Verifier + /// @param configCount ordinal number of this config setting among all config settings over the life of this contract + /// @param signers ith element is address ith oracle uses to sign a report + /// @param offchainTransmitters ith element is address ith oracle used to transmit reports (in this case used for flexible additional field, such as CSA pub keys) + /// @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + /// @dev This function is a modified version of the method from OCR2Abstract + function _configDigestFromConfigData( + bytes32 feedId, + uint256 sourceChainId, + address sourceAddress, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + feedId, + sourceChainId, + sourceAddress, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + // 0x0006 corresponds to ConfigDigestPrefixMercuryV02 in libocr + uint256 prefix = 0x0006 << (256 - 16); // 0x000600..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /// @inheritdoc IVerifier + function activateConfig(bytes32 feedId, bytes32 configDigest) external onlyOwner { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + if (configDigest == bytes32("")) revert DigestEmpty(); + if (feedVerifierState.s_verificationDataConfigs[configDigest].f == 0) revert DigestNotSet(feedId, configDigest); + feedVerifierState.s_verificationDataConfigs[configDigest].isActive = true; + emit ConfigActivated(feedId, configDigest); + } + + /// @inheritdoc IVerifier + function deactivateConfig(bytes32 feedId, bytes32 configDigest) external onlyOwner { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + if (configDigest == bytes32("")) revert DigestEmpty(); + if (feedVerifierState.s_verificationDataConfigs[configDigest].f == 0) revert DigestNotSet(feedId, configDigest); + if (configDigest == feedVerifierState.latestConfigDigest) { + revert CannotDeactivateLatestConfig(feedId, configDigest); + } + feedVerifierState.s_verificationDataConfigs[configDigest].isActive = false; + emit ConfigDeactivated(feedId, configDigest); + } + + /// @inheritdoc IVerifier + function activateFeed(bytes32 feedId) external onlyOwner { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + if (feedVerifierState.configCount == 0) revert InvalidFeed(feedId); + feedVerifierState.isDeactivated = false; + emit FeedActivated(feedId); + } + + /// @inheritdoc IVerifier + function deactivateFeed(bytes32 feedId) external onlyOwner { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + + if (feedVerifierState.configCount == 0) revert InvalidFeed(feedId); + feedVerifierState.isDeactivated = true; + emit FeedDeactivated(feedId); + } + + /// @inheritdoc IVerifier + function latestConfigDigestAndEpoch( + bytes32 feedId + ) external view override returns (bool scanLogs, bytes32 configDigest, uint32 epoch) { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + return (false, feedVerifierState.latestConfigDigest, feedVerifierState.latestEpoch); + } + + /// @inheritdoc IVerifier + function latestConfigDetails( + bytes32 feedId + ) external view override returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) { + VerifierState storage feedVerifierState = s_feedVerifierStates[feedId]; + return ( + feedVerifierState.configCount, + feedVerifierState.latestConfigBlockNumber, + feedVerifierState.latestConfigDigest + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/VerifierProxy.sol b/contracts/src/v0.8/llo-feeds/VerifierProxy.sol new file mode 100644 index 00000000..c32a2717 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/VerifierProxy.sol @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {IVerifierProxy} from "./interfaces/IVerifierProxy.sol"; +import {IVerifier} from "./interfaces/IVerifier.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {AccessControllerInterface} from "../shared/interfaces/AccessControllerInterface.sol"; +import {IERC165} from "../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {IVerifierFeeManager} from "./interfaces/IVerifierFeeManager.sol"; +import {Common} from "./libraries/Common.sol"; + +/** + * The verifier proxy contract is the gateway for all report verification requests + * on a chain. It is responsible for taking in a verification request and routing + * it to the correct verifier contract. + */ +contract VerifierProxy is IVerifierProxy, ConfirmedOwner, TypeAndVersionInterface { + /// @notice This event is emitted whenever a new verifier contract is set + /// @param oldConfigDigest The config digest that was previously the latest config + /// digest of the verifier contract at the verifier address. + /// @param oldConfigDigest The latest config digest of the verifier contract + /// at the verifier address. + /// @param verifierAddress The address of the verifier contract that verifies reports for + /// a given digest + event VerifierSet(bytes32 oldConfigDigest, bytes32 newConfigDigest, address verifierAddress); + + /// @notice This event is emitted whenever a new verifier contract is initialized + /// @param verifierAddress The address of the verifier contract that verifies reports + event VerifierInitialized(address verifierAddress); + + /// @notice This event is emitted whenever a verifier is unset + /// @param configDigest The config digest that was unset + /// @param verifierAddress The Verifier contract address unset + event VerifierUnset(bytes32 configDigest, address verifierAddress); + + /// @notice This event is emitted when a new access controller is set + /// @param oldAccessController The old access controller address + /// @param newAccessController The new access controller address + event AccessControllerSet(address oldAccessController, address newAccessController); + + /// @notice This event is emitted when a new fee manager is set + /// @param oldFeeManager The old fee manager address + /// @param newFeeManager The new fee manager address + event FeeManagerSet(address oldFeeManager, address newFeeManager); + + /// @notice This error is thrown whenever an address tries + /// to exeecute a transaction that it is not authorized to do so + error AccessForbidden(); + + /// @notice This error is thrown whenever a zero address is passed + error ZeroAddress(); + + /// @notice This error is thrown when trying to set a verifier address + /// for a digest that has already been initialized + /// @param configDigest The digest for the verifier that has + /// already been set + /// @param verifier The address of the verifier the digest was set for + error ConfigDigestAlreadySet(bytes32 configDigest, address verifier); + + /// @notice This error is thrown when trying to set a verifier address that has already been initialized + error VerifierAlreadyInitialized(address verifier); + + /// @notice This error is thrown when the verifier at an address does + /// not conform to the verifier interface + error VerifierInvalid(); + + /// @notice This error is thrown when the fee manager at an address does + /// not conform to the fee manager interface + error FeeManagerInvalid(); + + /// @notice This error is thrown whenever a verifier is not found + /// @param configDigest The digest for which a verifier is not found + error VerifierNotFound(bytes32 configDigest); + + /// @notice This error is thrown whenever billing fails. + error BadVerification(); + + /// @notice Mapping of authorized verifiers + mapping(address => bool) private s_initializedVerifiers; + + /// @notice Mapping between config digests and verifiers + mapping(bytes32 => address) private s_verifiersByConfig; + + /// @notice The contract to control addresses that are allowed to verify reports + AccessControllerInterface public s_accessController; + + /// @notice The contract to control fees for report verification + IVerifierFeeManager public s_feeManager; + + constructor(AccessControllerInterface accessController) ConfirmedOwner(msg.sender) { + s_accessController = accessController; + } + + modifier checkAccess() { + AccessControllerInterface ac = s_accessController; + if (address(ac) != address(0) && !ac.hasAccess(msg.sender, msg.data)) revert AccessForbidden(); + _; + } + + modifier onlyInitializedVerifier() { + if (!s_initializedVerifiers[msg.sender]) revert AccessForbidden(); + _; + } + + modifier onlyValidVerifier(address verifierAddress) { + if (verifierAddress == address(0)) revert ZeroAddress(); + if (!IERC165(verifierAddress).supportsInterface(IVerifier.verify.selector)) revert VerifierInvalid(); + _; + } + + modifier onlyUnsetConfigDigest(bytes32 configDigest) { + address configDigestVerifier = s_verifiersByConfig[configDigest]; + if (configDigestVerifier != address(0)) revert ConfigDigestAlreadySet(configDigest, configDigestVerifier); + _; + } + + /// @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "VerifierProxy 2.0.0"; + } + + /// @inheritdoc IVerifierProxy + function verify( + bytes calldata payload, + bytes calldata parameterPayload + ) external payable checkAccess returns (bytes memory) { + IVerifierFeeManager feeManager = s_feeManager; + + // Bill the verifier + if (address(feeManager) != address(0)) { + feeManager.processFee{value: msg.value}(payload, parameterPayload, msg.sender); + } + + return _verify(payload); + } + + /// @inheritdoc IVerifierProxy + function verifyBulk( + bytes[] calldata payloads, + bytes calldata parameterPayload + ) external payable checkAccess returns (bytes[] memory verifiedReports) { + IVerifierFeeManager feeManager = s_feeManager; + + // Bill the verifier + if (address(feeManager) != address(0)) { + feeManager.processFeeBulk{value: msg.value}(payloads, parameterPayload, msg.sender); + } + + //verify the reports + verifiedReports = new bytes[](payloads.length); + for (uint256 i; i < payloads.length; ++i) { + verifiedReports[i] = _verify(payloads[i]); + } + + return verifiedReports; + } + + function _verify(bytes calldata payload) internal returns (bytes memory verifiedReport) { + // First 32 bytes of the signed report is the config digest + bytes32 configDigest = bytes32(payload); + address verifierAddress = s_verifiersByConfig[configDigest]; + if (verifierAddress == address(0)) revert VerifierNotFound(configDigest); + + return IVerifier(verifierAddress).verify(payload, msg.sender); + } + + /// @inheritdoc IVerifierProxy + function initializeVerifier(address verifierAddress) external override onlyOwner onlyValidVerifier(verifierAddress) { + if (s_initializedVerifiers[verifierAddress]) revert VerifierAlreadyInitialized(verifierAddress); + + s_initializedVerifiers[verifierAddress] = true; + emit VerifierInitialized(verifierAddress); + } + + /// @inheritdoc IVerifierProxy + function setVerifier( + bytes32 currentConfigDigest, + bytes32 newConfigDigest, + Common.AddressAndWeight[] calldata addressesAndWeights + ) external override onlyUnsetConfigDigest(newConfigDigest) onlyInitializedVerifier { + s_verifiersByConfig[newConfigDigest] = msg.sender; + + // Empty recipients array will be ignored and must be set off chain + if (addressesAndWeights.length > 0) { + if (address(s_feeManager) == address(0)) { + revert ZeroAddress(); + } + + s_feeManager.setFeeRecipients(newConfigDigest, addressesAndWeights); + } + + emit VerifierSet(currentConfigDigest, newConfigDigest, msg.sender); + } + + /// @inheritdoc IVerifierProxy + function unsetVerifier(bytes32 configDigest) external override onlyOwner { + address verifierAddress = s_verifiersByConfig[configDigest]; + if (verifierAddress == address(0)) revert VerifierNotFound(configDigest); + delete s_verifiersByConfig[configDigest]; + emit VerifierUnset(configDigest, verifierAddress); + } + + /// @inheritdoc IVerifierProxy + function getVerifier(bytes32 configDigest) external view override returns (address) { + return s_verifiersByConfig[configDigest]; + } + + /// @inheritdoc IVerifierProxy + function setAccessController(AccessControllerInterface accessController) external onlyOwner { + address oldAccessController = address(s_accessController); + s_accessController = accessController; + emit AccessControllerSet(oldAccessController, address(accessController)); + } + + /// @inheritdoc IVerifierProxy + function setFeeManager(IVerifierFeeManager feeManager) external onlyOwner { + if (address(feeManager) == address(0)) revert ZeroAddress(); + + if ( + !IERC165(feeManager).supportsInterface(IVerifierFeeManager.processFee.selector) || + !IERC165(feeManager).supportsInterface(IVerifierFeeManager.processFeeBulk.selector) + ) revert FeeManagerInvalid(); + + address oldFeeManager = address(s_feeManager); + s_feeManager = IVerifierFeeManager(feeManager); + emit FeeManagerSet(oldFeeManager, address(feeManager)); + } +} diff --git a/contracts/src/v0.8/llo-feeds/dev/ChannelConfigStore.sol b/contracts/src/v0.8/llo-feeds/dev/ChannelConfigStore.sol new file mode 100644 index 00000000..ca24b775 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/ChannelConfigStore.sol @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.19; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {IChannelConfigStore} from "./interfaces/IChannelConfigStore.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; + +contract ChannelConfigStore is ConfirmedOwner, IChannelConfigStore, TypeAndVersionInterface { + mapping(uint32 => ChannelDefinition) private s_channelDefinitions; + + // mapping(bytes32 => ChannelConfiguration) private s_channelProductionConfigurations; + // mapping(bytes32 => ChannelConfiguration) private s_channelStagingConfigurations; + + event NewChannelDefinition(uint32 channelId, ChannelDefinition channelDefinition); + event ChannelDefinitionRemoved(uint32 channelId); + // event NewProductionConfig(ChannelConfiguration channelConfig); + // event NewStagingConfig(ChannelConfiguration channelConfig); + event PromoteStagingConfig(uint32 channelId); + + error OnlyCallableByEOA(); + error StagingConfigAlreadyPromoted(); + error EmptyStreamIDs(); + error ZeroReportFormat(); + error ZeroChainSelector(); + error ChannelDefinitionNotFound(); + + constructor() ConfirmedOwner(msg.sender) {} + + // function setStagingConfig(bytes32 configDigest, ChannelConfiguration calldata channelConfig) external onlyOwner { + // s_channelStagingConfigurations[channelId] = channelConfig; + + // emit NewStagingConfig(channelConfig); + // } + + //// this will trigger the following: + //// - offchain ShouldRetireCache will start returning true for the old (production) + //// protocol instance + //// - once the old production instance retires it will generate a handover + //// retirement report + //// - the staging instance will become the new production instance once + //// any honest oracle that is on both instances forward the retirement + //// report from the old instance to the new instace via the + //// PredecessorRetirementReportCache + //// + //// Note: the promotion flow only works if the previous production instance + //// is working correctly & generating reports. If that's not the case, the + //// owner is expected to "setProductionConfig" directly instead. This will + //// cause "gaps" to be created, but that seems unavoidable in such a scenario. + // function promoteStagingConfig(bytes32 configDigest) external onlyOwner { + // ChannelConfiguration memory stagingConfig = s_channelStagingConfigurations[channelId]; + + // if(stagingConfig.channelConfigId.length == 0) { + // revert StagingConfigAlreadyPromoted(); + // } + + // s_channelProductionConfigurations[channelId] = s_channelStagingConfigurations[channelId]; + + // emit PromoteStagingConfig(channelId); + // } + + function addChannel(uint32 channelId, ChannelDefinition calldata channelDefinition) external onlyOwner { + if (channelDefinition.streamIDs.length == 0) { + revert EmptyStreamIDs(); + } + + if (channelDefinition.chainSelector == 0) { + revert ZeroChainSelector(); + } + + if (channelDefinition.reportFormat == 0) { + revert ZeroReportFormat(); + } + + s_channelDefinitions[channelId] = channelDefinition; + + emit NewChannelDefinition(channelId, channelDefinition); + } + + function removeChannel(uint32 channelId) external onlyOwner { + if (s_channelDefinitions[channelId].streamIDs.length == 0) { + revert ChannelDefinitionNotFound(); + } + + delete s_channelDefinitions[channelId]; + + emit ChannelDefinitionRemoved(channelId); + } + + function getChannelDefinitions(uint32 channelId) external view returns (ChannelDefinition memory) { + if (msg.sender != tx.origin) { + revert OnlyCallableByEOA(); + } + + return s_channelDefinitions[channelId]; + } + + function typeAndVersion() external pure override returns (string memory) { + return "ChannelConfigStore 0.0.0"; + } + + function supportsInterface(bytes4 interfaceId) external pure returns (bool) { + return interfaceId == type(IChannelConfigStore).interfaceId; + } +} diff --git a/contracts/src/v0.8/llo-feeds/dev/ChannelVerifier.sol b/contracts/src/v0.8/llo-feeds/dev/ChannelVerifier.sol new file mode 100644 index 00000000..424022dc --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/ChannelVerifier.sol @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; +import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../libraries/Common.sol"; +import {IChannelVerifier} from "./interfaces/IChannelVerifier.sol"; + +// OCR2 standard +uint256 constant MAX_NUM_ORACLES = 31; + +/* + * The verifier contract is used to verify offchain reports signed + * by DONs. A report consists of a price, block number and feed Id. It + * represents the observed price of an asset at a specified block number for + * a feed. The verifier contract is used to verify that such reports have + * been signed by the correct signers. + **/ +contract ChannelVerifier is IChannelVerifier, ConfirmedOwner, TypeAndVersionInterface { + // The first byte of the mask can be 0, because we only ever have 31 oracles + uint256 internal constant ORACLE_MASK = 0x0001010101010101010101010101010101010101010101010101010101010101; + + enum Role { + // Default role for an oracle address. This means that the oracle address + // is not a signer + Unset, + // Role given to an oracle address that is allowed to sign feed data + Signer + } + + struct Signer { + // Index of oracle in a configuration + uint8 index; + // The oracle's role + Role role; + } + + struct Config { + // Fault tolerance + uint8 f; + // Marks whether or not a configuration is active + bool isActive; + // Map of signer addresses to oracles + mapping(address => Signer) oracles; + } + + struct VerifierState { + // The number of times a new configuration + /// has been set + uint32 configCount; + // The block number of the block the last time + /// the configuration was updated. + uint32 latestConfigBlockNumber; + // The latest epoch a report was verified for + uint32 latestEpoch; + /// The latest config digest set + bytes32 latestConfigDigest; + /// List of deactivated feeds + mapping(bytes32 => bool) deactivatedFeeds; + /// The historical record of all previously set configs by feedId + mapping(bytes32 => Config) s_verificationDataConfigs; + } + + /// @notice This event is emitted when a new report is verified. + /// It is used to keep a historical record of verified reports. + event ReportVerified(bytes32 indexed feedId, address requester); + + /// @notice This event is emitted whenever a new configuration is set. It triggers a new run of the offchain reporting protocol. + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + bytes32[] offchainTransmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /// @notice This event is emitted whenever a configuration is deactivated + event ConfigDeactivated(bytes32 configDigest); + + /// @notice This event is emitted whenever a configuration is activated + event ConfigActivated(bytes32 configDigest); + + /// @notice This event is emitted whenever a feed is activated + event FeedActivated(bytes32 indexed feedId); + + /// @notice This event is emitted whenever a feed is deactivated + event FeedDeactivated(bytes32 indexed feedId); + + /// @notice This error is thrown whenever an address tries + /// to exeecute a transaction that it is not authorized to do so + error AccessForbidden(); + + /// @notice This error is thrown whenever a zero address is passed + error ZeroAddress(); + + /// @notice This error is thrown whenever the feed ID passed in + /// a signed report is empty + error FeedIdEmpty(); + + /// @notice This error is thrown whenever the config digest + /// is empty + error DigestEmpty(); + + /// @notice This error is thrown whenever the config digest + /// passed in has not been set in this verifier + /// @param configDigest The config digest that has not been set + error DigestNotSet(bytes32 configDigest); + + /// @notice This error is thrown whenever the config digest + /// has been deactivated + /// @param configDigest The config digest that is inactive + error DigestInactive(bytes32 configDigest); + + /// @notice This error is thrown whenever trying to set a config + /// with a fault tolerance of 0 + error FaultToleranceMustBePositive(); + + /// @notice This error is thrown whenever a report is signed + /// with more than the max number of signers + /// @param numSigners The number of signers who have signed the report + /// @param maxSigners The maximum number of signers that can sign a report + error ExcessSigners(uint256 numSigners, uint256 maxSigners); + + /// @notice This error is thrown whenever a report is signed + /// with less than the minimum number of signers + /// @param numSigners The number of signers who have signed the report + /// @param minSigners The minimum number of signers that need to sign a report + error InsufficientSigners(uint256 numSigners, uint256 minSigners); + + /// @notice This error is thrown whenever a report is signed + /// with an incorrect number of signers + /// @param numSigners The number of signers who have signed the report + /// @param expectedNumSigners The expected number of signers that need to sign + /// a report + error IncorrectSignatureCount(uint256 numSigners, uint256 expectedNumSigners); + + /// @notice This error is thrown whenever the R and S signer components + /// have different lengths + /// @param rsLength The number of r signature components + /// @param ssLength The number of s signature components + error MismatchedSignatures(uint256 rsLength, uint256 ssLength); + + /// @notice This error is thrown whenever setting a config with duplicate signatures + error NonUniqueSignatures(); + + /// @notice This error is thrown whenever a report fails to verify due to bad or duplicate signatures + error BadVerification(); + + /// @notice This error is thrown whenever the admin tries to deactivate + /// the latest config digest + /// @param configDigest The latest config digest + error CannotDeactivateLatestConfig(bytes32 configDigest); + + /// @notice This error is thrown whenever the feed ID passed in is deactivated + /// @param feedId The feed ID + error InactiveFeed(bytes32 feedId); + + /// @notice This error is thrown whenever the feed ID passed in is not found + /// @param feedId The feed ID + error InvalidFeed(bytes32 feedId); + + /// @notice The address of the verifier proxy + address private immutable i_verifierProxyAddr; + + /// @notice Verifier states keyed on Feed ID + VerifierState internal s_feedVerifierState; + + /// @param verifierProxyAddr The address of the VerifierProxy contract + constructor(address verifierProxyAddr) ConfirmedOwner(msg.sender) { + if (verifierProxyAddr == address(0)) revert ZeroAddress(); + i_verifierProxyAddr = verifierProxyAddr; + } + + modifier checkConfigValid(uint256 numSigners, uint256 f) { + if (f == 0) revert FaultToleranceMustBePositive(); + if (numSigners > MAX_NUM_ORACLES) revert ExcessSigners(numSigners, MAX_NUM_ORACLES); + if (numSigners <= 3 * f) revert InsufficientSigners(numSigners, 3 * f + 1); + _; + } + + /// @inheritdoc IERC165 + function supportsInterface(bytes4 interfaceId) external pure override returns (bool isVerifier) { + return interfaceId == this.verify.selector; + } + + /// @inheritdoc TypeAndVersionInterface + function typeAndVersion() external pure override returns (string memory) { + return "ChannelVerifier 0.0.0"; + } + + /// @inheritdoc IChannelVerifier + function verify( + bytes calldata signedReport, + address sender + ) external override returns (bytes memory verifierResponse) { + if (msg.sender != i_verifierProxyAddr) revert AccessForbidden(); + ( + bytes32[3] memory reportContext, + bytes memory reportData, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs + ) = abi.decode(signedReport, (bytes32[3], bytes, bytes32[], bytes32[], bytes32)); + + // The feed ID is the first 32 bytes of the report data. + bytes32 feedId = bytes32(reportData); + + // If the feed has been deactivated, do not verify the report + if (s_feedVerifierState.deactivatedFeeds[feedId]) { + revert InactiveFeed(feedId); + } + + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + Config storage s_config = s_feedVerifierState.s_verificationDataConfigs[configDigest]; + + _validateReport(configDigest, rs, ss, s_config); + _updateEpoch(reportContext, s_feedVerifierState); + + bytes32 hashedReport = keccak256(reportData); + + _verifySignatures(hashedReport, reportContext, rs, ss, rawVs, s_config); + emit ReportVerified(feedId, sender); + + return reportData; + } + + /// @notice Validates parameters of the report + /// @param configDigest Config digest from the report + /// @param rs R components from the report + /// @param ss S components from the report + /// @param config Config for the given feed ID keyed on the config digest + function _validateReport( + bytes32 configDigest, + bytes32[] memory rs, + bytes32[] memory ss, + Config storage config + ) private view { + uint8 expectedNumSignatures = config.f + 1; + + if (!config.isActive) revert DigestInactive(configDigest); + if (rs.length != expectedNumSignatures) revert IncorrectSignatureCount(rs.length, expectedNumSignatures); + if (rs.length != ss.length) revert MismatchedSignatures(rs.length, ss.length); + } + + /** + * @notice Conditionally update the epoch for a feed + * @param reportContext Report context containing the epoch and round + * @param feedVerifierState Feed verifier state to conditionally update + */ + function _updateEpoch(bytes32[3] memory reportContext, VerifierState storage feedVerifierState) private { + uint40 epochAndRound = uint40(uint256(reportContext[1])); + uint32 epoch = uint32(epochAndRound >> 8); + if (epoch > feedVerifierState.latestEpoch) { + feedVerifierState.latestEpoch = epoch; + } + } + + /// @notice Verifies that a report has been signed by the correct + /// signers and that enough signers have signed the reports. + /// @param hashedReport The keccak256 hash of the raw report's bytes + /// @param reportContext The context the report was signed in + /// @param rs ith element is the R components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param ss ith element is the S components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param rawVs ith element is the the V component of the ith signature + /// @param s_config The config digest the report was signed for + function _verifySignatures( + bytes32 hashedReport, + bytes32[3] memory reportContext, + bytes32[] memory rs, + bytes32[] memory ss, + bytes32 rawVs, + Config storage s_config + ) private view { + bytes32 h = keccak256(abi.encodePacked(hashedReport, reportContext)); + // i-th byte counts number of sigs made by i-th signer + uint256 signedCount; + + Signer memory o; + address signerAddress; + uint256 numSigners = rs.length; + for (uint256 i; i < numSigners; ++i) { + signerAddress = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_config.oracles[signerAddress]; + if (o.role != Role.Signer) revert BadVerification(); + unchecked { + signedCount += 1 << (8 * o.index); + } + } + + if (signedCount & ORACLE_MASK != signedCount) revert BadVerification(); + } + + /// @inheritdoc IChannelVerifier + function setConfig( + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external override checkConfigValid(signers.length, f) onlyOwner { + _setConfig( + block.chainid, + address(this), + 0, // 0 defaults to feedConfig.configCount + 1 + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + recipientAddressesAndWeights + ); + } + + /// @inheritdoc IChannelVerifier + function setConfigFromSource( + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external override checkConfigValid(signers.length, f) onlyOwner { + _setConfig( + sourceChainId, + sourceAddress, + newConfigCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + recipientAddressesAndWeights + ); + } + + /// @notice Sets config based on the given arguments + /// @param sourceChainId Chain ID of source config + /// @param sourceAddress Address of source config Verifier + /// @param newConfigCount Optional param to force the new config count + /// @param signers addresses with which oracles sign the reports + /// @param offchainTransmitters CSA key for the ith Oracle + /// @param f number of faulty oracles the system can tolerate + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version number for offchainEncoding schema + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + /// @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + function _setConfig( + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) internal { + // Increment the number of times a config has been set first + if (newConfigCount > 0) s_feedVerifierState.configCount = newConfigCount; + else s_feedVerifierState.configCount++; + + bytes32 configDigest = _configDigestFromConfigData( + sourceChainId, + sourceAddress, + s_feedVerifierState.configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + + s_feedVerifierState.s_verificationDataConfigs[configDigest].f = f; + s_feedVerifierState.s_verificationDataConfigs[configDigest].isActive = true; + for (uint8 i; i < signers.length; ++i) { + address signerAddr = signers[i]; + if (signerAddr == address(0)) revert ZeroAddress(); + + // All signer roles are unset by default for a new config digest. + // Here the contract checks to see if a signer's address has already + // been set to ensure that the group of signer addresses that will + // sign reports with the config digest are unique. + bool isSignerAlreadySet = s_feedVerifierState.s_verificationDataConfigs[configDigest].oracles[signerAddr].role != + Role.Unset; + if (isSignerAlreadySet) revert NonUniqueSignatures(); + s_feedVerifierState.s_verificationDataConfigs[configDigest].oracles[signerAddr] = Signer({ + role: Role.Signer, + index: i + }); + } + + recipientAddressesAndWeights; // silence unused var warning + // IVerifierProxy(i_verifierProxyAddr).setVerifier( + // feedVerifierState.latestConfigDigest, + // configDigest, + // recipientAddressesAndWeights + // ); + + emit ConfigSet( + s_feedVerifierState.latestConfigBlockNumber, + configDigest, + s_feedVerifierState.configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ); + + s_feedVerifierState.latestEpoch = 0; + s_feedVerifierState.latestConfigBlockNumber = uint32(block.number); + s_feedVerifierState.latestConfigDigest = configDigest; + } + + /// @notice Generates the config digest from config data + /// @param sourceChainId Chain ID of source config + /// @param sourceAddress Address of source config Verifier + /// @param configCount ordinal number of this config setting among all config settings over the life of this contract + /// @param signers ith element is address ith oracle uses to sign a report + /// @param offchainTransmitters ith element is address ith oracle used to transmit reports (in this case used for flexible additional field, such as CSA pub keys) + /// @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + /// @dev This function is a modified version of the method from OCR2Abstract + function _configDigestFromConfigData( + uint256 sourceChainId, + address sourceAddress, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + sourceChainId, + sourceAddress, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + // 0x0009 corresponds to ConfigDigestPrefixLLO in libocr + uint256 prefix = 0x0009 << (256 - 16); // 0x000900..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + /// @inheritdoc IChannelVerifier + function activateConfig(bytes32 configDigest) external onlyOwner { + if (configDigest == bytes32("")) revert DigestEmpty(); + if (s_feedVerifierState.s_verificationDataConfigs[configDigest].f == 0) revert DigestNotSet(configDigest); + s_feedVerifierState.s_verificationDataConfigs[configDigest].isActive = true; + emit ConfigActivated(configDigest); + } + + /// @inheritdoc IChannelVerifier + function deactivateConfig(bytes32 configDigest) external onlyOwner { + if (configDigest == bytes32("")) revert DigestEmpty(); + if (s_feedVerifierState.s_verificationDataConfigs[configDigest].f == 0) revert DigestNotSet(configDigest); + if (configDigest == s_feedVerifierState.latestConfigDigest) { + revert CannotDeactivateLatestConfig(configDigest); + } + s_feedVerifierState.s_verificationDataConfigs[configDigest].isActive = false; + emit ConfigDeactivated(configDigest); + } + + /// @inheritdoc IChannelVerifier + function activateFeed(bytes32 feedId) external onlyOwner { + if (s_feedVerifierState.deactivatedFeeds[feedId]) return; + + s_feedVerifierState.deactivatedFeeds[feedId] = false; + emit FeedActivated(feedId); + } + + /// @inheritdoc IChannelVerifier + function deactivateFeed(bytes32 feedId) external onlyOwner { + if (s_feedVerifierState.deactivatedFeeds[feedId] == false) return; + + s_feedVerifierState.deactivatedFeeds[feedId] = true; + emit FeedDeactivated(feedId); + } + + /// @inheritdoc IChannelVerifier + function latestConfigDigestAndEpoch() + external + view + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (false, s_feedVerifierState.latestConfigDigest, s_feedVerifierState.latestEpoch); + } + + /// @inheritdoc IChannelVerifier + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return ( + s_feedVerifierState.configCount, + s_feedVerifierState.latestConfigBlockNumber, + s_feedVerifierState.latestConfigDigest + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelConfigStore.sol b/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelConfigStore.sol new file mode 100644 index 00000000..b71387a8 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelConfigStore.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; + +interface IChannelConfigStore is IERC165 { + // function setStagingConfig(bytes32 configDigest, ChannelConfiguration calldata channelConfig) external; + + // function promoteStagingConfig(bytes32 configDigest) external; + + function addChannel(uint32 channelId, ChannelDefinition calldata channelDefinition) external; + + function removeChannel(uint32 channelId) external; + + function getChannelDefinitions(uint32 channelId) external view returns (ChannelDefinition memory); + + // struct ChannelConfiguration { + // bytes32 configDigest; + // } + + struct ChannelDefinition { + // e.g. evm, solana, CosmWasm, kalechain, etc... + bytes8 reportFormat; + // Specifies the chain on which this channel can be verified. Currently uses + // CCIP chain selectors, but lots of other schemes are possible as well. + uint64 chainSelector; + // We assume that StreamIDs is always non-empty and that the 0-th stream + // contains the verification price in PLI and the 1-st stream contains the + // verification price in the native coin. + uint32[] streamIDs; + } +} diff --git a/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelVerifier.sol b/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelVerifier.sol new file mode 100644 index 00000000..6bab5912 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/interfaces/IChannelVerifier.sol @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../../libraries/Common.sol"; + +interface IChannelVerifier is IERC165 { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier. + * @param signedReport The encoded data to be verified. + * @param sender The address that requested to verify the contract. + * This is only used for logging purposes. + * @dev Verification is typically only done through the proxy contract so + * we can't just use msg.sender to log the requester as the msg.sender + * contract will always be the proxy. + * @return verifierResponse The encoded verified response. + */ + function verify(bytes calldata signedReport, address sender) external returns (bytes memory verifierResponse); + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param signers addresses with which oracles sign the reports + * @param offchainTransmitters CSA key for the ith Oracle + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + * @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + */ + function setConfig( + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external; + + /** + * @notice identical to `setConfig` except with args for sourceChainId and sourceAddress + * @param sourceChainId Chain ID of source config + * @param sourceAddress Address of source config Verifier + * @param newConfigCount Param to force the new config count + * @param signers addresses with which oracles sign the reports + * @param offchainTransmitters CSA key for the ith Oracle + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + * @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + */ + function setConfigFromSource( + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external; + + /** + * @notice Activates the configuration for a config digest + * @param configDigest The config digest to activate + * @dev This function can be called by the contract admin to activate a configuration. + */ + function activateConfig(bytes32 configDigest) external; + + /** + * @notice Deactivates the configuration for a config digest + * @param configDigest The config digest to deactivate + * @dev This function can be called by the contract admin to deactivate an incorrect configuration. + */ + function deactivateConfig(bytes32 configDigest) external; + + /** + * @notice Activates the given feed + * @param feedId Feed ID to activated + * @dev This function can be called by the contract admin to activate a feed + */ + function activateFeed(bytes32 feedId) external; + + /** + * @notice Deactivates the given feed + * @param feedId Feed ID to deactivated + * @dev This function can be called by the contract admin to deactivate a feed + */ + function deactivateFeed(bytes32 feedId) external; + + /** + * @notice returns the latest config digest and epoch + * @return scanLogs indicates whether to rely on the configDigest and epoch + * returned or whether to scan logs for the Transmitted event instead. + * @return configDigest + * @return epoch + */ + function latestConfigDigestAndEpoch() external view returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /** + * @notice information about current offchain reporting protocol configuration + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config + */ + function latestConfigDetails() external view returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); +} diff --git a/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedChannelVerifier.sol b/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedChannelVerifier.sol new file mode 100644 index 00000000..650b3b4a --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedChannelVerifier.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +// ExposedChannelVerifier exposes certain internal Verifier +// methods/structures so that golang code can access them, and we get +// reliable type checking on their usage +contract ExposedChannelVerifier { + constructor() {} + + function _configDigestFromConfigData( + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + chainId, + contractAddress, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + // 0x0009 corresponds to ConfigDigestPrefixLLO in libocr + uint256 prefix = 0x0009 << (256 - 16); // 0x000900..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + function exposedConfigDigestFromConfigData( + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + bytes32[] memory _offchainTransmitters, + uint8 _f, + bytes calldata _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) public pure returns (bytes32) { + return + _configDigestFromConfigData( + _chainId, + _contractAddress, + _configCount, + _signers, + _offchainTransmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedVerifier.sol b/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedVerifier.sol new file mode 100644 index 00000000..1c004bf3 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/dev/test/mocks/ExposedVerifier.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +// ExposedVerifier exposes certain internal Verifier +// methods/structures so that golang code can access them, and we get +// reliable type checking on their usage +contract ExposedVerifier { + constructor() {} + + function _configDigestFromConfigData( + bytes32 feedId, + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + feedId, + chainId, + contractAddress, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0006 << (256 - 16); // 0x000600..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + function exposedConfigDigestFromConfigData( + bytes32 _feedId, + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + bytes32[] memory _offchainTransmitters, + uint8 _f, + bytes calldata _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) public pure returns (bytes32) { + return + _configDigestFromConfigData( + _feedId, + _chainId, + _contractAddress, + _configCount, + _signers, + _offchainTransmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol new file mode 100644 index 00000000..2706bb8d --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/interfaces/IFeeManager.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../libraries/Common.sol"; +import {IVerifierFeeManager} from "./IVerifierFeeManager.sol"; + +interface IFeeManager is IERC165, IVerifierFeeManager { + /** + * @notice Calculate the applied fee and the reward from a report. If the sender is a subscriber, they will receive a discount. + * @param subscriber address trying to verify + * @param report report to calculate the fee for + * @param quoteAddress address of the quote payment token + * @return (fee, reward, totalDiscount) fee and the reward data with the discount applied + */ + function getFeeAndReward( + address subscriber, + bytes memory report, + address quoteAddress + ) external returns (Common.Asset memory, Common.Asset memory, uint256); + + /** + * @notice Sets the native surcharge + * @param surcharge surcharge to be paid if paying in native + */ + function setNativeSurcharge(uint64 surcharge) external; + + /** + * @notice Adds a subscriber to the fee manager + * @param subscriber address of the subscriber + * @param feedId feed id to apply the discount to + * @param token token to apply the discount to + * @param discount discount to be applied to the fee + */ + function updateSubscriberDiscount(address subscriber, bytes32 feedId, address token, uint64 discount) external; + + /** + * @notice Withdraws any native or PLI rewards to the owner address + * @param assetAddress address of the asset to withdraw + * @param recipientAddress address to withdraw to + * @param quantity quantity to withdraw + */ + function withdraw(address assetAddress, address recipientAddress, uint192 quantity) external; + + /** + * @notice Returns the link balance of the fee manager + * @return link balance of the fee manager + */ + function linkAvailableForPayment() external returns (uint256); + + /** + * @notice Admin function to pay the PLI deficit for a given config digest + * @param configDigest the config digest to pay the deficit for + */ + function payLinkDeficit(bytes32 configDigest) external; + + /** + * @notice The structure to hold a fee and reward to verify a report + * @param digest the digest linked to the fee and reward + * @param fee the fee paid to verify the report + * @param reward the reward paid upon verification + & @param appliedDiscount the discount applied to the reward + */ + struct FeeAndReward { + bytes32 configDigest; + Common.Asset fee; + Common.Asset reward; + uint256 appliedDiscount; + } + + /** + * @notice The structure to hold quote metadata + * @param quoteAddress the address of the quote + */ + struct Quote { + address quoteAddress; + } +} diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol new file mode 100644 index 00000000..5a6e03f1 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/interfaces/IRewardManager.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../libraries/Common.sol"; + +interface IRewardManager is IERC165 { + /** + * @notice Record the fee received for a particular pool + * @param payments array of structs containing pool id and amount + * @param payee the user the funds should be retrieved from + */ + function onFeePaid(FeePayment[] calldata payments, address payee) external; + + /** + * @notice Claims the rewards in a specific pool + * @param poolIds array of poolIds to claim rewards for + */ + function claimRewards(bytes32[] calldata poolIds) external; + + /** + * @notice Set the RewardRecipients and weights for a specific pool. This should only be called once per pool Id. Else updateRewardRecipients should be used. + * @param poolId poolId to set RewardRecipients and weights for + * @param rewardRecipientAndWeights array of each RewardRecipient and associated weight + */ + function setRewardRecipients(bytes32 poolId, Common.AddressAndWeight[] calldata rewardRecipientAndWeights) external; + + /** + * @notice Updates a subset the reward recipients for a specific poolId. The collective weight of the recipients should add up to the recipients existing weights. Any recipients with a weight of 0 will be removed. + * @param poolId the poolId to update + * @param newRewardRecipients array of new reward recipients + */ + function updateRewardRecipients(bytes32 poolId, Common.AddressAndWeight[] calldata newRewardRecipients) external; + + /** + * @notice Pays all the recipients for each of the pool ids + * @param poolId the pool id to pay recipients for + * @param recipients array of recipients to pay within the pool + */ + function payRecipients(bytes32 poolId, address[] calldata recipients) external; + + /** + * @notice Sets the fee manager. This needs to be done post construction to prevent a circular dependency. + * @param newFeeManager address of the new verifier proxy + */ + function setFeeManager(address newFeeManager) external; + + /** + * @notice Gets a list of pool ids which have reward for a specific recipient. + * @param recipient address of the recipient to get pool ids for + * @param startIndex the index to start from + * @param endIndex the index to stop at + */ + function getAvailableRewardPoolIds( + address recipient, + uint256 startIndex, + uint256 endIndex + ) external view returns (bytes32[] memory); + + /** + * @notice The structure to hold a fee payment notice + * @param poolId the poolId receiving the payment + * @param amount the amount being paid + */ + struct FeePayment { + bytes32 poolId; + uint192 amount; + } +} diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol new file mode 100644 index 00000000..617d702d --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifier.sol @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../libraries/Common.sol"; + +interface IVerifier is IERC165 { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier. + * @param signedReport The encoded data to be verified. + * @param sender The address that requested to verify the contract. + * This is only used for logging purposes. + * @dev Verification is typically only done through the proxy contract so + * we can't just use msg.sender to log the requester as the msg.sender + * contract will always be the proxy. + * @return verifierResponse The encoded verified response. + */ + function verify(bytes calldata signedReport, address sender) external returns (bytes memory verifierResponse); + + /** + * @notice sets offchain reporting protocol configuration incl. participating oracles + * @param feedId Feed ID to set config for + * @param signers addresses with which oracles sign the reports + * @param offchainTransmitters CSA key for the ith Oracle + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + * @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + */ + function setConfig( + bytes32 feedId, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external; + + /** + * @notice identical to `setConfig` except with args for sourceChainId and sourceAddress + * @param feedId Feed ID to set config for + * @param sourceChainId Chain ID of source config + * @param sourceAddress Address of source config Verifier + * @param newConfigCount Param to force the new config count + * @param signers addresses with which oracles sign the reports + * @param offchainTransmitters CSA key for the ith Oracle + * @param f number of faulty oracles the system can tolerate + * @param onchainConfig serialized configuration used by the contract (and possibly oracles) + * @param offchainConfigVersion version number for offchainEncoding schema + * @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + * @param recipientAddressesAndWeights the addresses and weights of all the recipients to receive rewards + */ + function setConfigFromSource( + bytes32 feedId, + uint256 sourceChainId, + address sourceAddress, + uint32 newConfigCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig, + Common.AddressAndWeight[] memory recipientAddressesAndWeights + ) external; + + /** + * @notice Activates the configuration for a config digest + * @param feedId Feed ID to activate config for + * @param configDigest The config digest to activate + * @dev This function can be called by the contract admin to activate a configuration. + */ + function activateConfig(bytes32 feedId, bytes32 configDigest) external; + + /** + * @notice Deactivates the configuration for a config digest + * @param feedId Feed ID to deactivate config for + * @param configDigest The config digest to deactivate + * @dev This function can be called by the contract admin to deactivate an incorrect configuration. + */ + function deactivateConfig(bytes32 feedId, bytes32 configDigest) external; + + /** + * @notice Activates the given feed + * @param feedId Feed ID to activated + * @dev This function can be called by the contract admin to activate a feed + */ + function activateFeed(bytes32 feedId) external; + + /** + * @notice Deactivates the given feed + * @param feedId Feed ID to deactivated + * @dev This function can be called by the contract admin to deactivate a feed + */ + function deactivateFeed(bytes32 feedId) external; + + /** + * @notice returns the latest config digest and epoch for a feed + * @param feedId Feed ID to fetch data for + * @return scanLogs indicates whether to rely on the configDigest and epoch + * returned or whether to scan logs for the Transmitted event instead. + * @return configDigest + * @return epoch + */ + function latestConfigDigestAndEpoch( + bytes32 feedId + ) external view returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /** + * @notice information about current offchain reporting protocol configuration + * @param feedId Feed ID to fetch data for + * @return configCount ordinal number of current config, out of all configs applied to this contract so far + * @return blockNumber block at which this config was set + * @return configDigest domain-separation tag for current config + */ + function latestConfigDetails( + bytes32 feedId + ) external view returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); +} diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol new file mode 100644 index 00000000..522db952 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierFeeManager.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IERC165} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../libraries/Common.sol"; + +interface IVerifierFeeManager is IERC165 { + /** + * @notice Handles fees for a report from the subscriber and manages rewards + * @param payload report to process the fee for + * @param parameterPayload fee payload + * @param subscriber address of the fee will be applied + */ + function processFee(bytes calldata payload, bytes calldata parameterPayload, address subscriber) external payable; + + /** + * @notice Processes the fees for each report in the payload, billing the subscriber and paying the reward manager + * @param payloads reports to process + * @param parameterPayload fee payload + * @param subscriber address of the user to process fee for + */ + function processFeeBulk( + bytes[] calldata payloads, + bytes calldata parameterPayload, + address subscriber + ) external payable; + + /** + * @notice Sets the fee recipients according to the fee manager + * @param configDigest digest of the configuration + * @param rewardRecipientAndWeights the address and weights of all the recipients to receive rewards + */ + function setFeeRecipients( + bytes32 configDigest, + Common.AddressAndWeight[] calldata rewardRecipientAndWeights + ) external; +} diff --git a/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol new file mode 100644 index 00000000..2eb1b4af --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/interfaces/IVerifierProxy.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {Common} from "../libraries/Common.sol"; +import {AccessControllerInterface} from "../../shared/interfaces/AccessControllerInterface.sol"; +import {IVerifierFeeManager} from "./IVerifierFeeManager.sol"; + +interface IVerifierProxy { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier, and bills the user if applicable. + * @param payload The encoded data to be verified, including the signed + * report. + * @param parameterPayload fee metadata for billing + * @return verifierResponse The encoded report from the verifier. + */ + function verify( + bytes calldata payload, + bytes calldata parameterPayload + ) external payable returns (bytes memory verifierResponse); + + /** + * @notice Bulk verifies that the data encoded has been signed + * correctly by routing to the correct verifier, and bills the user if applicable. + * @param payloads The encoded payloads to be verified, including the signed + * report. + * @param parameterPayload fee metadata for billing + * @return verifiedReports The encoded reports from the verifier. + */ + function verifyBulk( + bytes[] calldata payloads, + bytes calldata parameterPayload + ) external payable returns (bytes[] memory verifiedReports); + + /** + * @notice Sets the verifier address initially, allowing `setVerifier` to be set by this Verifier in the future + * @param verifierAddress The address of the verifier contract to initialize + */ + function initializeVerifier(address verifierAddress) external; + + /** + * @notice Sets a new verifier for a config digest + * @param currentConfigDigest The current config digest + * @param newConfigDigest The config digest to set + * @param addressesAndWeights The addresses and weights of reward recipients + * reports for a given config digest. + */ + function setVerifier( + bytes32 currentConfigDigest, + bytes32 newConfigDigest, + Common.AddressAndWeight[] memory addressesAndWeights + ) external; + + /** + * @notice Removes a verifier for a given config digest + * @param configDigest The config digest of the verifier to remove + */ + function unsetVerifier(bytes32 configDigest) external; + + /** + * @notice Retrieves the verifier address that verifies reports + * for a config digest. + * @param configDigest The config digest to query for + * @return verifierAddress The address of the verifier contract that verifies + * reports for a given config digest. + */ + function getVerifier(bytes32 configDigest) external view returns (address verifierAddress); + + /** + * @notice Called by the admin to set an access controller contract + * @param accessController The new access controller to set + */ + function setAccessController(AccessControllerInterface accessController) external; + + /** + * @notice Updates the fee manager + * @param feeManager The new fee manager + */ + function setFeeManager(IVerifierFeeManager feeManager) external; +} diff --git a/contracts/src/v0.8/llo-feeds/libraries/ByteUtil.sol b/contracts/src/v0.8/llo-feeds/libraries/ByteUtil.sol new file mode 100644 index 00000000..39bcaa64 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/libraries/ByteUtil.sol @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +/* + * @title ByteUtil + * @author Michael Fletcher + * @notice Byte utility functions for efficiently parsing and manipulating packed byte data + */ +library ByteUtil { + // Error message when an offset is out of bounds + error MalformedData(); + + /** + * @dev Reads a uint256 from a position within a byte array. + * @param data Byte array to read from. + * @param offset Position to start reading from. + * @return result The uint256 read from the byte array. + */ + // solhint-disable-next-line plugin-solidity/explicit-returns + function _readUint256(bytes memory data, uint256 offset) internal pure returns (uint256 result) { + //bounds check + if (offset + 32 > data.length) revert MalformedData(); + + assembly { + //load 32 byte word accounting for 32 bit length and offset + result := mload(add(add(data, 32), offset)) + } + } + + /** + * @dev Reads a uint192 from a position within a byte array. + * @param data Byte array to read from. + * @param offset Position to start reading from. + * @return result The uint192 read from the byte array. + */ + // solhint-disable-next-line plugin-solidity/explicit-returns + function _readUint192(bytes memory data, uint256 offset) internal pure returns (uint256 result) { + //bounds check + if (offset + 24 > data.length) revert MalformedData(); + + assembly { + //load 32 byte word accounting for 32 bit length and offset + result := mload(add(add(data, 32), offset)) + //shift the result right 64 bits + result := shr(64, result) + } + } + + /** + * @dev Reads a uint32 from a position within a byte array. + * @param data Byte array to read from. + * @param offset Position to start reading from. + * @return result The uint32 read from the byte array. + */ + // solhint-disable-next-line plugin-solidity/explicit-returns + function _readUint32(bytes memory data, uint256 offset) internal pure returns (uint256 result) { + //bounds check + if (offset + 4 > data.length) revert MalformedData(); + + assembly { + //load 32 byte word accounting for 32 bit length and offset + result := mload(add(add(data, 32), offset)) + //shift the result right 224 bits + result := shr(224, result) + } + } + + /** + * @dev Reads an address from a position within a byte array. + * @param data Byte array to read from. + * @param offset Position to start reading from. + * @return result The uint32 read from the byte array. + */ + // solhint-disable-next-line plugin-solidity/explicit-returns + function _readAddress(bytes memory data, uint256 offset) internal pure returns (address result) { + //bounds check + if (offset + 20 > data.length) revert MalformedData(); + + assembly { + //load 32 byte word accounting for 32 bit length and offset + let word := mload(add(add(data, 32), offset)) + //address is the last 20 bytes of the word, so shift right + result := shr(96, word) + } + } +} diff --git a/contracts/src/v0.8/llo-feeds/libraries/Common.sol b/contracts/src/v0.8/llo-feeds/libraries/Common.sol new file mode 100644 index 00000000..f732ced0 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/libraries/Common.sol @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +/* + * @title Common + * @author Michael Fletcher + * @notice Common functions and structs + */ +library Common { + // @notice The asset struct to hold the address of an asset and amount + struct Asset { + address assetAddress; + uint256 amount; + } + + // @notice Struct to hold the address and its associated weight + struct AddressAndWeight { + address addr; + uint64 weight; + } + + /** + * @notice Checks if an array of AddressAndWeight has duplicate addresses + * @param recipients The array of AddressAndWeight to check + * @return bool True if there are duplicates, false otherwise + */ + function _hasDuplicateAddresses(Common.AddressAndWeight[] memory recipients) internal pure returns (bool) { + for (uint256 i = 0; i < recipients.length; ) { + for (uint256 j = i + 1; j < recipients.length; ) { + if (recipients[i].addr == recipients[j].addr) { + return true; + } + unchecked { + ++j; + } + } + unchecked { + ++i; + } + } + return false; + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol b/contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol new file mode 100644 index 00000000..21bd957e --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/ByteUtilTest.t.sol @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Test} from "forge-std/Test.sol"; +import {ByteUtil} from "../libraries/ByteUtil.sol"; + +contract ByteUtilTest is Test { + using ByteUtil for bytes; + + bytes internal constant B_512 = + hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000"; + bytes internal constant B_128 = hex"ffffffffffffffffffffffffffffffff"; + bytes internal constant B_16 = hex"ffff"; + bytes internal constant B_EMPTY = new bytes(0); + + bytes4 internal constant MALFORMED_ERROR_SELECTOR = bytes4(keccak256("MalformedData()")); + + function test_readUint256Max() public { + //read the first 32 bytes + uint256 result = B_512._readUint256(0); + + //the result should be the max value of a uint256 + assertEq(result, type(uint256).max); + } + + function test_readUint192Max() public { + //read the first 24 bytes + uint256 result = B_512._readUint192(0); + + //the result should be the max value of a uint192 + assertEq(result, type(uint192).max); + } + + function test_readUint32Max() public { + //read the first 4 bytes + uint256 result = B_512._readUint32(0); + + //the result should be the max value of a uint32 + assertEq(result, type(uint32).max); + } + + function test_readUint256Min() public { + //read the second 32 bytes + uint256 result = B_512._readUint256(32); + + //the result should be the min value of a uint256 + assertEq(result, type(uint256).min); + } + + function test_readUint192Min() public { + //read the second 24 bytes + uint256 result = B_512._readUint192(32); + + //the result should be the min value of a uint192 + assertEq(result, type(uint192).min); + } + + function test_readUint32Min() public { + //read the second 4 bytes + uint256 result = B_512._readUint32(32); + + //the result should be the min value of a uint32 + assertEq(result, type(uint32).min); + } + + function test_readUint256MultiWord() public { + //read the first 32 bytes + uint256 result = B_512._readUint256(31); + + //the result should be the last byte from the first word (ff), and 31 bytes from the second word (0000) (0xFF...0000) + assertEq(result, type(uint256).max << 248); + } + + function test_readUint192MultiWord() public { + //read the first 24 bytes + uint256 result = B_512._readUint192(31); + + //the result should be the last byte from the first word (ff), and 23 bytes from the second word (0000) (0xFF...0000) + assertEq(result, type(uint192).max << 184); + } + + function test_readUint32MultiWord() public { + //read the first 4 bytes + uint256 result = B_512._readUint32(31); + + //the result should be the last byte from the first word (ff), and 3 bytes from the second word (0000) (0xFF...0000) + assertEq(result, type(uint32).max << 24); + } + + function test_readUint256WithNotEnoughBytes() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //try and read 32 bytes from a 16 byte number + B_128._readUint256(0); + } + + function test_readUint192WithNotEnoughBytes() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //try and read 24 bytes from a 16 byte number + B_128._readUint192(0); + } + + function test_readUint32WithNotEnoughBytes() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //try and read 4 bytes from a 2 byte number + B_16._readUint32(0); + } + + function test_readUint256WithEmptyArray() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //read 20 bytes from an empty array + B_EMPTY._readUint256(0); + } + + function test_readUint192WithEmptyArray() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //read 20 bytes from an empty array + B_EMPTY._readUint192(0); + } + + function test_readUint32WithEmptyArray() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //read 20 bytes from an empty array + B_EMPTY._readUint32(0); + } + + function test_readAddress() public { + //read the first 20 bytes + address result = B_512._readAddress(0); + + //the result should be the max value of a uint256 + assertEq(result, address(type(uint160).max)); + } + + function test_readZeroAddress() public { + //read the first 32 bytes after the first word + address result = B_512._readAddress(32); + + //the result should be 0x00...0 + assertEq(result, address(type(uint160).min)); + } + + function test_readAddressMultiWord() public { + //read the first 20 bytes after byte 13 + address result = B_512._readAddress(13); + + //the result should be the value last 19 bytes of the first word (ffff..) and the first byte of the second word (00) (0xFFFF..00) + assertEq(result, address(type(uint160).max << 8)); + } + + function test_readAddressWithNotEnoughBytes() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //read 20 bytes from a 16 byte array + B_128._readAddress(0); + } + + function test_readAddressWithEmptyArray() public { + //should revert if there's not enough bytes + vm.expectRevert(MALFORMED_ERROR_SELECTOR); + + //read the first 20 bytes of an empty array + B_EMPTY._readAddress(0); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol new file mode 100644 index 00000000..561abd84 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/BaseFeeManager.t.sol @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Test} from "forge-std/Test.sol"; +import {FeeManager} from "../../FeeManager.sol"; +import {RewardManager} from "../../RewardManager.sol"; +import {Common} from "../../libraries/Common.sol"; +import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol"; +import {WERC20Mock} from "../../../shared/mocks/WERC20Mock.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; +import {FeeManagerProxy} from "../mocks/FeeManagerProxy.sol"; + +/** + * @title BaseFeeManagerTest + * @author Michael Fletcher + * @notice Base class for all feeManager tests + * @dev This contract is intended to be inherited from and not used directly. It contains functionality to setup the feeManager + */ +contract BaseFeeManagerTest is Test { + //contracts + FeeManager internal feeManager; + RewardManager internal rewardManager; + FeeManagerProxy internal feeManagerProxy; + + ERC20Mock internal link; + WERC20Mock internal native; + + //erc20 config + uint256 internal constant DEFAULT_PLI_MINT_QUANTITY = 100 ether; + uint256 internal constant DEFAULT_NATIVE_MINT_QUANTITY = 100 ether; + + //contract owner + address internal constant INVALID_ADDRESS = address(0); + address internal constant ADMIN = address(uint160(uint256(keccak256("ADMIN")))); + address internal constant USER = address(uint160(uint256(keccak256("USER")))); + address internal constant PROXY = address(uint160(uint256(keccak256("PROXY")))); + + //version masks + bytes32 internal constant V_MASK = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + bytes32 internal constant V1_BITMASK = 0x0001000000000000000000000000000000000000000000000000000000000000; + bytes32 internal constant V2_BITMASK = 0x0002000000000000000000000000000000000000000000000000000000000000; + bytes32 internal constant V3_BITMASK = 0x0003000000000000000000000000000000000000000000000000000000000000; + + //feed ids & config digests + bytes32 internal constant DEFAULT_FEED_1_V1 = (keccak256("ETH-USD") & V_MASK) | V1_BITMASK; + bytes32 internal constant DEFAULT_FEED_1_V2 = (keccak256("ETH-USD") & V_MASK) | V2_BITMASK; + bytes32 internal constant DEFAULT_FEED_1_V3 = (keccak256("ETH-USD") & V_MASK) | V3_BITMASK; + + bytes32 internal constant DEFAULT_FEED_2_V3 = (keccak256("PLI-USD") & V_MASK) | V3_BITMASK; + bytes32 internal constant DEFAULT_CONFIG_DIGEST = keccak256("DEFAULT_CONFIG_DIGEST"); + + //report + uint256 internal constant DEFAULT_REPORT_PLI_FEE = 1e10; + uint256 internal constant DEFAULT_REPORT_NATIVE_FEE = 1e12; + + //rewards + uint64 internal constant FEE_SCALAR = 1e18; + + address internal constant NATIVE_WITHDRAW_ADDRESS = address(0); + + //the selector for each error + bytes4 internal immutable INVALID_DISCOUNT_ERROR = FeeManager.InvalidDiscount.selector; + bytes4 internal immutable INVALID_ADDRESS_ERROR = FeeManager.InvalidAddress.selector; + bytes4 internal immutable INVALID_SURCHARGE_ERROR = FeeManager.InvalidSurcharge.selector; + bytes4 internal immutable EXPIRED_REPORT_ERROR = FeeManager.ExpiredReport.selector; + bytes4 internal immutable INVALID_DEPOSIT_ERROR = FeeManager.InvalidDeposit.selector; + bytes4 internal immutable INVALID_QUOTE_ERROR = FeeManager.InvalidQuote.selector; + bytes4 internal immutable UNAUTHORIZED_ERROR = FeeManager.Unauthorized.selector; + bytes internal constant ONLY_CALLABLE_BY_OWNER_ERROR = "Only callable by owner"; + bytes internal constant INSUFFICIENT_ALLOWANCE_ERROR = "ERC20: insufficient allowance"; + bytes4 internal immutable ZERO_DEFICIT = FeeManager.ZeroDeficit.selector; + + //events emitted + event SubscriberDiscountUpdated(address indexed subscriber, bytes32 indexed feedId, address token, uint64 discount); + event NativeSurchargeUpdated(uint64 newSurcharge); + event InsufficientLink(IRewardManager.FeePayment[] feesAndRewards); + event Withdraw(address adminAddress, address recipient, address assetAddress, uint192 quantity); + event LinkDeficitCleared(bytes32 indexed configDigest, uint256 linkQuantity); + event DiscountApplied( + bytes32 indexed configDigest, + address indexed subscriber, + Common.Asset fee, + Common.Asset reward, + uint256 appliedDiscountQuantity + ); + + function setUp() public virtual { + //change to admin user + vm.startPrank(ADMIN); + + //init required contracts + _initializeContracts(); + } + + function _initializeContracts() internal { + link = new ERC20Mock("PLI", "PLI", ADMIN, 0); + native = new WERC20Mock(); + + feeManagerProxy = new FeeManagerProxy(); + rewardManager = new RewardManager(address(link)); + feeManager = new FeeManager(address(link), address(native), address(feeManagerProxy), address(rewardManager)); + + //link the feeManager to the proxy + feeManagerProxy.setFeeManager(feeManager); + + //link the feeManager to the reward manager + rewardManager.setFeeManager(address(feeManager)); + + //mint some tokens to the admin + link.mint(ADMIN, DEFAULT_PLI_MINT_QUANTITY); + native.mint(ADMIN, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(ADMIN, DEFAULT_NATIVE_MINT_QUANTITY); + + //mint some tokens to the user + link.mint(USER, DEFAULT_PLI_MINT_QUANTITY); + native.mint(USER, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(USER, DEFAULT_NATIVE_MINT_QUANTITY); + + //mint some tokens to the proxy + link.mint(PROXY, DEFAULT_PLI_MINT_QUANTITY); + native.mint(PROXY, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(PROXY, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function setSubscriberDiscount( + address subscriber, + bytes32 feedId, + address token, + uint256 discount, + address sender + ) internal { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //set the discount + feeManager.updateSubscriberDiscount(subscriber, feedId, token, uint64(discount)); + + //change back to the original address + changePrank(originalAddr); + } + + function setNativeSurcharge(uint256 surcharge, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //set the surcharge + feeManager.setNativeSurcharge(uint64(surcharge)); + + //change back to the original address + changePrank(originalAddr); + } + + // solium-disable-next-line no-unused-vars + function getFee(bytes memory report, address quote, address subscriber) public view returns (Common.Asset memory) { + //get the fee + (Common.Asset memory fee, , ) = feeManager.getFeeAndReward(subscriber, report, quote); + + return fee; + } + + function getReward(bytes memory report, address quote, address subscriber) public view returns (Common.Asset memory) { + //get the reward + (, Common.Asset memory reward, ) = feeManager.getFeeAndReward(subscriber, report, quote); + + return reward; + } + + function getAppliedDiscount(bytes memory report, address quote, address subscriber) public view returns (uint256) { + //get the reward + (, , uint256 appliedDiscount) = feeManager.getFeeAndReward(subscriber, report, quote); + + return appliedDiscount; + } + + function getV1Report(bytes32 feedId) public pure returns (bytes memory) { + return abi.encode(feedId, uint32(0), int192(0), int192(0), int192(0), uint64(0), bytes32(0), uint64(0), uint64(0)); + } + + function getV2Report(bytes32 feedId) public view returns (bytes memory) { + return + abi.encode( + feedId, + uint32(0), + uint32(0), + uint192(DEFAULT_REPORT_NATIVE_FEE), + uint192(DEFAULT_REPORT_PLI_FEE), + uint32(block.timestamp), + int192(0) + ); + } + + function getV3Report(bytes32 feedId) public view returns (bytes memory) { + return + abi.encode( + feedId, + uint32(0), + uint32(0), + uint192(DEFAULT_REPORT_NATIVE_FEE), + uint192(DEFAULT_REPORT_PLI_FEE), + uint32(block.timestamp), + int192(0), + int192(0), + int192(0) + ); + } + + function getV3ReportWithCustomExpiryAndFee( + bytes32 feedId, + uint256 expiry, + uint256 linkFee, + uint256 nativeFee + ) public pure returns (bytes memory) { + return + abi.encode( + feedId, + uint32(0), + uint32(0), + uint192(nativeFee), + uint192(linkFee), + uint32(expiry), + int192(0), + int192(0), + int192(0) + ); + } + + function getLinkQuote() public view returns (address) { + return address(link); + } + + function getNativeQuote() public view returns (address) { + return address(native); + } + + function withdraw(address assetAddress, address recipient, uint256 amount, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //set the surcharge + feeManager.withdraw(assetAddress, recipient, uint192(amount)); + + //change back to the original address + changePrank(originalAddr); + } + + function getLinkBalance(address balanceAddress) public view returns (uint256) { + return link.balanceOf(balanceAddress); + } + + function getNativeBalance(address balanceAddress) public view returns (uint256) { + return native.balanceOf(balanceAddress); + } + + function getNativeUnwrappedBalance(address balanceAddress) public view returns (uint256) { + return balanceAddress.balance; + } + + function mintLink(address recipient, uint256 amount) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(ADMIN); + + //mint the link to the recipient + link.mint(recipient, amount); + + //change back to the original address + changePrank(originalAddr); + } + + function mintNative(address recipient, uint256 amount, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //mint the native to the recipient + native.mint(recipient, amount); + + //change back to the original address + changePrank(originalAddr); + } + + function issueUnwrappedNative(address recipient, uint256 quantity) public { + vm.deal(recipient, quantity); + } + + function ProcessFeeAsUser( + bytes memory payload, + address subscriber, + address tokenAddress, + uint256 wrappedNativeValue, + address sender + ) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //process the fee + feeManager.processFee{value: wrappedNativeValue}(payload, abi.encode(tokenAddress), subscriber); + + //change ProcessFeeAsUserback to the original address + changePrank(originalAddr); + } + + function processFee(bytes memory payload, address subscriber, address feeAddress, uint256 wrappedNativeValue) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(subscriber); + + //process the fee + feeManagerProxy.processFee{value: wrappedNativeValue}(payload, abi.encode(feeAddress)); + + //change back to the original address + changePrank(originalAddr); + } + + function processFee( + bytes[] memory payloads, + address subscriber, + address feeAddress, + uint256 wrappedNativeValue + ) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(subscriber); + + //process the fee + feeManagerProxy.processFeeBulk{value: wrappedNativeValue}(payloads, abi.encode(feeAddress)); + + //change back to the original address + changePrank(originalAddr); + } + + function getPayload(bytes memory reportPayload) public pure returns (bytes memory) { + return abi.encode([DEFAULT_CONFIG_DIGEST, 0, 0], reportPayload, new bytes32[](1), new bytes32[](1), bytes32("")); + } + + function approveLink(address spender, uint256 quantity, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //approve the link to be transferred + link.approve(spender, quantity); + + //change back to the original address + changePrank(originalAddr); + } + + function approveNative(address spender, uint256 quantity, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //approve the link to be transferred + native.approve(spender, quantity); + + //change back to the original address + changePrank(originalAddr); + } + + function payLinkDeficit(bytes32 configDigest, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //approve the link to be transferred + feeManager.payLinkDeficit(configDigest); + + //change back to the original address + changePrank(originalAddr); + } + + function getLinkDeficit(bytes32 configDigest) public view returns (uint256) { + return feeManager.s_linkDeficit(configDigest); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.general.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.general.t.sol new file mode 100644 index 00000000..940e308c --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.general.t.sol @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import "./BaseFeeManager.t.sol"; + +/** + * @title BaseFeeManagerTest + * @author Michael Fletcher + * @notice This contract will test the setup functionality of the feemanager + */ +contract FeeManagerProcessFeeTest is BaseFeeManagerTest { + function setUp() public override { + super.setUp(); + } + + function test_WithdrawERC20() public { + //simulate a fee + mintLink(address(feeManager), DEFAULT_PLI_MINT_QUANTITY); + + //get the balances to ne used for comparison + uint256 contractBalance = getLinkBalance(address(feeManager)); + uint256 adminBalance = getLinkBalance(ADMIN); + + //the amount to withdraw + uint256 withdrawAmount = contractBalance / 2; + + //withdraw some balance + withdraw(address(link), ADMIN, withdrawAmount, ADMIN); + + //check the balance has been reduced + uint256 newContractBalance = getLinkBalance(address(feeManager)); + uint256 newAdminBalance = getLinkBalance(ADMIN); + + //check the balance is greater than zero + assertGt(newContractBalance, 0); + //check the balance has been reduced by the correct amount + assertEq(newContractBalance, contractBalance - withdrawAmount); + //check the admin balance has increased by the correct amount + assertEq(newAdminBalance, adminBalance + withdrawAmount); + } + + function test_WithdrawUnwrappedNative() public { + //issue funds straight to the contract to bypass the lack of fallback function + issueUnwrappedNative(address(feeManager), DEFAULT_NATIVE_MINT_QUANTITY); + + //get the balances to be used for comparison + uint256 contractBalance = getNativeUnwrappedBalance(address(feeManager)); + uint256 adminBalance = getNativeUnwrappedBalance(ADMIN); + + //the amount to withdraw + uint256 withdrawAmount = contractBalance / 2; + + //withdraw some balance + withdraw(NATIVE_WITHDRAW_ADDRESS, ADMIN, withdrawAmount, ADMIN); + + //check the balance has been reduced + uint256 newContractBalance = getNativeUnwrappedBalance(address(feeManager)); + uint256 newAdminBalance = getNativeUnwrappedBalance(ADMIN); + + //check the balance is greater than zero + assertGt(newContractBalance, 0); + //check the balance has been reduced by the correct amount + assertEq(newContractBalance, contractBalance - withdrawAmount); + //check the admin balance has increased by the correct amount + assertEq(newAdminBalance, adminBalance + withdrawAmount); + } + + function test_WithdrawNonAdminAddr() public { + //simulate a fee + mintLink(address(feeManager), DEFAULT_PLI_MINT_QUANTITY); + + //should revert if not admin + vm.expectRevert(ONLY_CALLABLE_BY_OWNER_ERROR); + + //withdraw some balance + withdraw(address(link), ADMIN, DEFAULT_PLI_MINT_QUANTITY, USER); + } + + function test_eventIsEmittedAfterSurchargeIsSet() public { + //native surcharge + uint64 nativeSurcharge = FEE_SCALAR / 5; + + //expect an emit + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit NativeSurchargeUpdated(nativeSurcharge); + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + } + + function test_subscriberDiscountEventIsEmittedOnUpdate() public { + //native surcharge + uint64 discount = FEE_SCALAR / 3; + + //an event should be emitted + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit SubscriberDiscountUpdated(USER, DEFAULT_FEED_1_V3, address(native), discount); + + //set the surcharge + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), discount, ADMIN); + } + + function test_eventIsEmittedUponWithdraw() public { + //simulate a fee + mintLink(address(feeManager), DEFAULT_PLI_MINT_QUANTITY); + + //the amount to withdraw + uint192 withdrawAmount = 1; + + //expect an emit + vm.expectEmit(); + + //the event to be emitted + emit Withdraw(ADMIN, ADMIN, address(link), withdrawAmount); + + //withdraw some balance + withdraw(address(link), ADMIN, withdrawAmount, ADMIN); + } + + function test_linkAvailableForPaymentReturnsLinkBalance() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //check there's a balance + assertGt(getLinkBalance(address(feeManager)), 0); + + //check the link available for payment is the link balance + assertEq(feeManager.linkAvailableForPayment(), getLinkBalance(address(feeManager))); + } + + function test_payLinkDeficit() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V3)); + + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //not enough funds in the reward pool should trigger an insufficient link event + vm.expectEmit(); + + IRewardManager.FeePayment[] memory contractFees = new IRewardManager.FeePayment[](1); + contractFees[0] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + + emit InsufficientLink(contractFees); + + //process the fee + processFee(payload, USER, address(native), 0); + + //double check the rewardManager balance is 0 + assertEq(getLinkBalance(address(rewardManager)), 0); + + //simulate a deposit of link to cover the deficit + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + vm.expectEmit(); + emit LinkDeficitCleared(DEFAULT_CONFIG_DIGEST, DEFAULT_REPORT_PLI_FEE); + + //pay the deficit which will transfer link from the rewardManager to the rewardManager + payLinkDeficit(DEFAULT_CONFIG_DIGEST, ADMIN); + + //check the rewardManager received the link + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + } + + function test_payLinkDeficitTwice() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V3)); + + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //not enough funds in the reward pool should trigger an insufficient link event + vm.expectEmit(); + + IRewardManager.FeePayment[] memory contractFees = new IRewardManager.FeePayment[](1); + contractFees[0] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + + //emit the event that is expected to be emitted + emit InsufficientLink(contractFees); + + //process the fee + processFee(payload, USER, address(native), 0); + + //double check the rewardManager balance is 0 + assertEq(getLinkBalance(address(rewardManager)), 0); + + //simulate a deposit of link to cover the deficit + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + vm.expectEmit(); + emit LinkDeficitCleared(DEFAULT_CONFIG_DIGEST, DEFAULT_REPORT_PLI_FEE); + + //pay the deficit which will transfer link from the rewardManager to the rewardManager + payLinkDeficit(DEFAULT_CONFIG_DIGEST, ADMIN); + + //check the rewardManager received the link + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //paying again should revert with 0 + vm.expectRevert(ZERO_DEFICIT); + + payLinkDeficit(DEFAULT_CONFIG_DIGEST, ADMIN); + } + + function test_payLinkDeficitPaysAllFeesProcessed() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V3)); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE * 2, USER); + + //processing the fee will transfer the native from the user to the feeManager + processFee(payload, USER, address(native), 0); + processFee(payload, USER, address(native), 0); + + //check the deficit has been increased twice + assertEq(getLinkDeficit(DEFAULT_CONFIG_DIGEST), DEFAULT_REPORT_PLI_FEE * 2); + + //double check the rewardManager balance is 0 + assertEq(getLinkBalance(address(rewardManager)), 0); + + //simulate a deposit of link to cover the deficit + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE * 2); + + vm.expectEmit(); + emit LinkDeficitCleared(DEFAULT_CONFIG_DIGEST, DEFAULT_REPORT_PLI_FEE * 2); + + //pay the deficit which will transfer link from the rewardManager to the rewardManager + payLinkDeficit(DEFAULT_CONFIG_DIGEST, ADMIN); + + //check the rewardManager received the link + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * 2); + } + + function test_payLinkDeficitOnlyCallableByAdmin() public { + vm.expectRevert(ONLY_CALLABLE_BY_OWNER_ERROR); + + payLinkDeficit(DEFAULT_CONFIG_DIGEST, USER); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol new file mode 100644 index 00000000..2fd578a5 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.getFeeAndReward.t.sol @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Common} from "../../libraries/Common.sol"; +import "./BaseFeeManager.t.sol"; + +/** + * @title BaseFeeManagerTest + * @author Michael Fletcher + * @notice This contract will test the functionality of the feeManager's getFeeAndReward + */ +contract FeeManagerProcessFeeTest is BaseFeeManagerTest { + function test_baseFeeIsAppliedForNative() public { + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_baseFeeIsAppliedForLink() public { + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_discountAIsNotAppliedWhenSetForOtherUsers() public { + //set the subscriber discount for another user + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), INVALID_ADDRESS); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_discountIsNotAppliedForInvalidTokenAddress() public { + //should revert with invalid address as it's not a configured token + vm.expectRevert(INVALID_ADDRESS_ERROR); + + //set the subscriber discount for another user + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, INVALID_ADDRESS, FEE_SCALAR / 2, ADMIN); + } + + function test_discountIsAppliedForLink() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be half the default + assertEq(fee.amount, DEFAULT_REPORT_PLI_FEE / 2); + } + + function test_DiscountIsAppliedForNative() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be half the default + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE / 2); + } + + function test_discountIsNoLongerAppliedAfterRemoving() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be half the default + assertEq(fee.amount, DEFAULT_REPORT_PLI_FEE / 2); + + //remove the discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), 0, ADMIN); + + //get the fee required by the feeManager + fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_surchargeIsApplied() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge + uint256 expectedSurcharge = ((DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR); + + //expected fee should the base fee offset by the surcharge and discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge); + } + + function test_surchargeIsNotAppliedForLinkFee() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_surchargeIsNoLongerAppliedAfterRemoving() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge + uint256 expectedSurcharge = ((DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR); + + //expected fee should be the base fee offset by the surcharge and discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge); + + //remove the surcharge + setNativeSurcharge(0, ADMIN); + + //get the fee required by the feeManager + fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be the default + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_feeIsUpdatedAfterNewSurchargeIsApplied() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge + uint256 expectedSurcharge = ((DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR); + + //expected fee should the base fee offset by the surcharge and discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge); + + //change the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge + expectedSurcharge = ((DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR); + + //expected fee should the base fee offset by the surcharge and discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge); + } + + function test_surchargeIsAppliedForNativeFeeWithDiscount() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge quantity + uint256 expectedSurcharge = ((DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR); + + //calculate the expected discount quantity + uint256 expectedDiscount = ((DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge) / 2); + + //expected fee should the base fee offset by the surcharge and discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge - expectedDiscount); + } + + function test_emptyQuoteRevertsWithError() public { + //expect a revert + vm.expectRevert(INVALID_QUOTE_ERROR); + + //get the fee required by the feeManager + getFee(getV3Report(DEFAULT_FEED_1_V3), address(0), USER); + } + + function test_nativeSurcharge100Percent() public { + //set the surcharge + setNativeSurcharge(FEE_SCALAR, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be twice the base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE * 2); + } + + function test_nativeSurcharge0Percent() public { + //set the surcharge + setNativeSurcharge(0, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_nativeSurchargeCannotExceed100Percent() public { + //should revert if surcharge is greater than 100% + vm.expectRevert(INVALID_SURCHARGE_ERROR); + + //set the surcharge above the max + setNativeSurcharge(FEE_SCALAR + 1, ADMIN); + } + + function test_discountIsAppliedWith100PercentSurcharge() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //set the surcharge + setNativeSurcharge(FEE_SCALAR, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected discount quantity + uint256 expectedDiscount = DEFAULT_REPORT_NATIVE_FEE; + + //fee should be twice the surcharge minus the discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE * 2 - expectedDiscount); + } + + function test_feeIsZeroWith100PercentDiscount() public { + //set the subscriber discount to 100% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be zero + assertEq(fee.amount, 0); + } + + function test_feeIsUpdatedAfterDiscountIsRemoved() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected discount quantity + uint256 expectedDiscount = DEFAULT_REPORT_NATIVE_FEE / 2; + + //fee should be 50% of the base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE - expectedDiscount); + + //remove the discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), 0, ADMIN); + + //get the fee required by the feeManager + fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be the base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_feeIsUpdatedAfterNewDiscountIsApplied() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected discount quantity + uint256 expectedDiscount = DEFAULT_REPORT_NATIVE_FEE / 2; + + //fee should be 50% of the base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE - expectedDiscount); + + //change the discount to 25% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 4, ADMIN); + + //get the fee required by the feeManager + fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //expected discount is now 25% + expectedDiscount = DEFAULT_REPORT_NATIVE_FEE / 4; + + //fee should be the base fee minus the expected discount + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE - expectedDiscount); + } + + function test_setDiscountOver100Percent() public { + //should revert with invalid discount + vm.expectRevert(INVALID_DISCOUNT_ERROR); + + //set the subscriber discount to over 100% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR + 1, ADMIN); + } + + function test_surchargeIsNotAppliedWith100PercentDiscount() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 5; + + //set the subscriber discount to 100% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR, ADMIN); + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be zero + assertEq(fee.amount, 0); + } + + function test_nonAdminUserCanNotSetDiscount() public { + //should revert with unauthorized + vm.expectRevert(ONLY_CALLABLE_BY_OWNER_ERROR); + + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR, USER); + } + + function test_surchargeFeeRoundsUpWhenUneven() public { + //native surcharge + uint256 nativeSurcharge = FEE_SCALAR / 3; + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected surcharge quantity + uint256 expectedSurcharge = (DEFAULT_REPORT_NATIVE_FEE * nativeSurcharge) / FEE_SCALAR; + + //expected fee should the base fee offset by the expected surcharge + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE + expectedSurcharge + 1); + } + + function test_discountFeeRoundsDownWhenUneven() public { + //native surcharge + uint256 discount = FEE_SCALAR / 3; + + //set the subscriber discount to 33.333% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), discount, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected quantity + uint256 expectedDiscount = ((DEFAULT_REPORT_NATIVE_FEE * discount) / FEE_SCALAR); + + //expected fee should the base fee offset by the expected surcharge + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE - expectedDiscount); + } + + function test_reportWithNoExpiryOrFeeReturnsZero() public { + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV1Report(DEFAULT_FEED_1_V1), getNativeQuote(), USER); + + //fee should be zero + assertEq(fee.amount, 0); + } + + function test_correctDiscountIsAppliedWhenBothTokensAreDiscounted() public { + //set the subscriber and native discounts + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 4, ADMIN); + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager for both tokens + Common.Asset memory linkFee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + Common.Asset memory nativeFee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //calculate the expected discount quantity for each token + uint256 expectedDiscountLink = (DEFAULT_REPORT_PLI_FEE * FEE_SCALAR) / 4 / FEE_SCALAR; + uint256 expectedDiscountNative = (DEFAULT_REPORT_NATIVE_FEE * FEE_SCALAR) / 2 / FEE_SCALAR; + + //check the fee calculation for each token + assertEq(linkFee.amount, DEFAULT_REPORT_PLI_FEE - expectedDiscountLink); + assertEq(nativeFee.amount, DEFAULT_REPORT_NATIVE_FEE - expectedDiscountNative); + } + + function test_discountIsNotAppliedToOtherFeeds() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_2_V3), getNativeQuote(), USER); + + //fee should be the base fee + assertEq(fee.amount, DEFAULT_REPORT_NATIVE_FEE); + } + + function test_noFeeIsAppliedWhenReportHasZeroFee() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, uint32(block.timestamp), 0, 0), + getNativeQuote(), + USER + ); + + //fee should be zero + assertEq(fee.amount, 0); + } + + function test_noFeeIsAppliedWhenReportHasZeroFeeAndDiscountAndSurchargeIsSet() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //set the surcharge + setNativeSurcharge(FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, uint32(block.timestamp), 0, 0), + getNativeQuote(), + USER + ); + + //fee should be zero + assertEq(fee.amount, 0); + } + + function test_nativeSurchargeEventIsEmittedOnUpdate() public { + //native surcharge + uint64 nativeSurcharge = FEE_SCALAR / 3; + + //an event should be emitted + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit NativeSurchargeUpdated(nativeSurcharge); + + //set the surcharge + setNativeSurcharge(nativeSurcharge, ADMIN); + } + + function test_getBaseRewardWithLinkQuote() public { + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //the reward should equal the base fee + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_getRewardWithLinkQuoteAndLinkDiscount() public { + //set the link discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //the reward should equal the discounted base fee + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE / 2); + } + + function test_getRewardWithNativeQuote() public { + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //the reward should equal the base fee in link + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_getRewardWithNativeQuoteAndSurcharge() public { + //set the native surcharge + setNativeSurcharge(FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //the reward should equal the base fee in link regardless of the surcharge + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_getRewardWithLinkDiscount() public { + //set the link discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //the reward should equal the discounted base fee + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE / 2); + } + + function test_getLinkFeeIsRoundedUp() public { + //set the link discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 3, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //the reward should equal .66% + 1 of the base fee due to a 33% discount rounded up + assertEq(fee.amount, (DEFAULT_REPORT_PLI_FEE * 2) / 3 + 1); + } + + function test_getLinkRewardIsSameAsFee() public { + //set the link discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 3, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //check the reward is in link + assertEq(fee.assetAddress, address(link)); + + //the reward should equal .66% of the base fee due to a 33% discount rounded down + assertEq(reward.amount, fee.amount); + } + + function test_getLinkRewardWithNativeQuoteAndSurchargeWithLinkDiscount() public { + //set the native surcharge + setNativeSurcharge(FEE_SCALAR / 2, ADMIN); + + //set the link discount + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 3, ADMIN); + + //get the fee required by the feeManager + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //the reward should equal the base fee in link regardless of the surcharge + assertEq(reward.amount, DEFAULT_REPORT_PLI_FEE); + } + + function test_testRevertIfReportHasExpired() public { + //expect a revert + vm.expectRevert(EXPIRED_REPORT_ERROR); + + //get the fee required by the feeManager + getFee( + getV3ReportWithCustomExpiryAndFee( + DEFAULT_FEED_1_V3, + block.timestamp - 1, + DEFAULT_REPORT_PLI_FEE, + DEFAULT_REPORT_NATIVE_FEE + ), + getNativeQuote(), + USER + ); + } + + function test_discountIsReturnedForLink() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(link), FEE_SCALAR / 2, ADMIN); + + //get the fee applied + uint256 discount = getAppliedDiscount(getV3Report(DEFAULT_FEED_1_V3), getLinkQuote(), USER); + + //fee should be half the default + assertEq(discount, FEE_SCALAR / 2); + } + + function test_DiscountIsReturnedForNative() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //get the discount applied + uint256 discount = getAppliedDiscount(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be half the default + assertEq(discount, FEE_SCALAR / 2); + } + + function test_DiscountIsReturnedForNativeWithSurcharge() public { + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //set the surcharge + setNativeSurcharge(FEE_SCALAR / 5, ADMIN); + + //get the discount applied + uint256 discount = getAppliedDiscount(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + //fee should be half the default + assertEq(discount, FEE_SCALAR / 2); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol new file mode 100644 index 00000000..a9830ff4 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFee.t.sol @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Common} from "../../libraries/Common.sol"; +import "./BaseFeeManager.t.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +/** + * @title BaseFeeManagerTest + * @author Michael Fletcher + * @notice This contract will test the functionality of the feeManager processFee + */ +contract FeeManagerProcessFeeTest is BaseFeeManagerTest { + function setUp() public override { + super.setUp(); + } + + function test_nonAdminProxyUserCannotProcessFee() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //should revert as the user is not the owner + vm.expectRevert(UNAUTHORIZED_ERROR); + + //process the fee + ProcessFeeAsUser(payload, USER, address(link), 0, USER); + } + + function test_processFeeAsProxy() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //approve the link to be transferred from the from the subscriber to the rewardManager + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(link), 0); + + //check the link has been transferred + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the user has had the link fee deducted + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE); + } + + function test_processFeeIfSubscriberIsSelf() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //expect a revert due to the feeManager being the subscriber + vm.expectRevert(INVALID_ADDRESS_ERROR); + + //process the fee will fail due to assertion + processFee(payload, address(feeManager), address(native), 0); + } + + function test_processFeeWithWithEmptyQuotePayload() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //expect a revert as the quote is invalid + vm.expectRevert(); + + //processing the fee will transfer the link by default + processFee(payload, USER, address(0), 0); + } + + function test_processFeeWithWithZeroQuotePayload() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //expect a revert as the quote is invalid + vm.expectRevert(INVALID_QUOTE_ERROR); + + //processing the fee will transfer the link by default + processFee(payload, USER, INVALID_ADDRESS, 0); + } + + function test_processFeeWithWithCorruptQuotePayload() public { + //get the default payload + bytes memory payload = abi.encode( + [DEFAULT_CONFIG_DIGEST, 0, 0], + getV3Report(DEFAULT_FEED_1_V3), + new bytes32[](1), + new bytes32[](1), + bytes32("") + ); + + //expect an evm revert as the quote is corrupt + vm.expectRevert(); + + //processing the fee will not withdraw anything as there is no fee to collect + processFee(payload, USER, address(link), 0); + } + + function test_processFeeDefaultReportsStillVerifiesWithEmptyQuote() public { + //get the default payload + bytes memory payload = getPayload(getV1Report(DEFAULT_FEED_1_V1)); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(0), 0); + } + + function test_processFeeWithDefaultReportPayloadAndQuoteStillVerifies() public { + //get the default payload + bytes memory payload = getPayload(getV1Report(DEFAULT_FEED_1_V1)); + + //processing the fee will not withdraw anything as there is no fee to collect + processFee(payload, USER, address(link), 0); + } + + function test_processFeeNative() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //processing the fee will transfer the native from the user to the feeManager + processFee(payload, USER, address(native), 0); + + //check the native has been transferred + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE); + + //check the link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the feeManager has had the link deducted, the remaining balance should be 0 + assertEq(getLinkBalance(address(feeManager)), 0); + + //check the subscriber has had the native deducted + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + } + + function test_processFeeEmitsEventIfNotEnoughLink() public { + //simulate a deposit of half the link required for the fee + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE / 2); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //expect an emit as there's not enough link + vm.expectEmit(); + + IRewardManager.FeePayment[] memory contractFees = new IRewardManager.FeePayment[](1); + contractFees[0] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + + //emit the event that is expected to be emitted + emit InsufficientLink(contractFees); + + //processing the fee will transfer the native from the user to the feeManager + processFee(payload, USER, address(native), 0); + + //check the native has been transferred + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE); + + //check no link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), 0); + assertEq(getLinkBalance(address(feeManager)), DEFAULT_REPORT_PLI_FEE / 2); + + //check the subscriber has had the native deducted + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + } + + function test_processFeeWithUnwrappedNative() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //only the proxy or admin can call processFee, they will pass in the native value on the users behalf + processFee(payload, USER, address(native), DEFAULT_REPORT_NATIVE_FEE); + + //check the native has been transferred and converted to wrapped native + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE); + assertEq(getNativeUnwrappedBalance(address(feeManager)), 0); + + //check the link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the feeManager has had the link deducted, the remaining balance should be 0 + assertEq(getLinkBalance(address(feeManager)), 0); + + //check the subscriber has had the native deducted + assertEq(getNativeUnwrappedBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + } + + function test_processFeeWithUnwrappedNativeShortFunds() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //expect a revert as not enough funds + vm.expectRevert(INVALID_DEPOSIT_ERROR); + + //only the proxy or admin can call processFee, they will pass in the native value on the users behalf + processFee(payload, USER, address(native), DEFAULT_REPORT_NATIVE_FEE - 1); + } + + function test_processFeeWithUnwrappedNativeLinkAddress() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //expect a revert as not enough funds + vm.expectRevert(INSUFFICIENT_ALLOWANCE_ERROR); + + //the change will be returned and the user will attempted to be billed in PLI + processFee(payload, USER, address(link), DEFAULT_REPORT_NATIVE_FEE - 1); + } + + function test_processFeeWithUnwrappedNativeLinkAddressExcessiveFee() public { + //approve the link to be transferred from the from the subscriber to the rewardManager + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, PROXY); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //call processFee from the proxy to test whether the funds are returned to the subscriber. In reality, the funds would be returned to the caller of the proxy. + processFee(payload, PROXY, address(link), DEFAULT_REPORT_NATIVE_FEE); + + //check the native unwrapped is no longer in the account + assertEq(getNativeBalance(address(feeManager)), 0); + assertEq(getNativeUnwrappedBalance(address(feeManager)), 0); + + //check the link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the feeManager has had the link deducted, the remaining balance should be 0 + assertEq(getLinkBalance(address(feeManager)), 0); + + //native should not be deducted + assertEq(getNativeUnwrappedBalance(PROXY), DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_processFeeWithUnwrappedNativeWithExcessiveFee() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //call processFee from the proxy to test whether the funds are returned to the subscriber. In reality, the funds would be returned to the caller of the proxy. + processFee(payload, PROXY, address(native), DEFAULT_REPORT_NATIVE_FEE * 2); + + //check the native has been transferred and converted to wrapped native + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE); + assertEq(getNativeUnwrappedBalance(address(feeManager)), 0); + + //check the link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the feeManager has had the link deducted, the remaining balance should be 0 + assertEq(getLinkBalance(address(feeManager)), 0); + + //check the subscriber has had the native deducted + assertEq(getNativeUnwrappedBalance(PROXY), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + } + + function test_processFeeUsesCorrectDigest() public { + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //approve the link to be transferred from the from the subscriber to the rewardManager + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(link), 0); + + //check the link has been transferred + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the user has had the link fee deducted + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE); + + //check funds have been paid to the reward manager + assertEq(rewardManager.s_totalRewardRecipientFees(DEFAULT_CONFIG_DIGEST), DEFAULT_REPORT_PLI_FEE); + } + + function test_V1PayloadVerifies() public { + //replicate a default payload + bytes memory payload = abi.encode( + [DEFAULT_CONFIG_DIGEST, 0, 0], + getV2Report(DEFAULT_FEED_1_V1), + new bytes32[](1), + new bytes32[](1), + bytes32("") + ); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(0), 0); + } + + function test_V2PayloadVerifies() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V2)); + + //approve the link to be transferred from the from the subscriber to the rewardManager + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(link), 0); + + //check the link has been transferred + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the user has had the link fee deducted + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE); + } + + function test_V2PayloadWithoutQuoteFails() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V2)); + + //expect a revert as the quote is invalid + vm.expectRevert(); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(0), 0); + } + + function test_V2PayloadWithoutZeroFee() public { + //get the default payload + bytes memory payload = getPayload(getV2Report(DEFAULT_FEED_1_V2)); + + //expect a revert as the quote is invalid + vm.expectRevert(); + + //processing the fee will transfer the link from the user to the rewardManager + processFee(payload, USER, address(link), 0); + } + + function test_processFeeWithInvalidReportVersionFailsToDecode() public { + bytes memory data = abi.encode(0x0000100000000000000000000000000000000000000000000000000000000000); + + //get the default payload + bytes memory payload = getPayload(data); + + //serialization will fail as there is no report to decode + vm.expectRevert(); + + //processing the fee will not withdraw anything as there is no fee to collect + processFee(payload, USER, address(link), 0); + } + + function test_processFeeWithZeroNativeNonZeroLinkWithNativeQuote() public { + //get the default payload + bytes memory payload = getPayload( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, block.timestamp, DEFAULT_REPORT_PLI_FEE, 0) + ); + + //call processFee should not revert as the fee is 0 + processFee(payload, PROXY, address(native), 0); + } + + function test_processFeeWithZeroNativeNonZeroLinkWithLinkQuote() public { + //get the default payload + bytes memory payload = getPayload( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, block.timestamp, DEFAULT_REPORT_PLI_FEE, 0) + ); + + //approve the link to be transferred from the from the subscriber to the rewardManager + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + + //processing the fee will transfer the link to the rewardManager from the user + processFee(payload, USER, address(link), 0); + + //check the link has been transferred + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + + //check the user has had the link fee deducted + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE); + } + + function test_processFeeWithZeroLinkNonZeroNativeWithNativeQuote() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //get the default payload + bytes memory payload = getPayload( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, block.timestamp, 0, DEFAULT_REPORT_NATIVE_FEE) + ); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //processing the fee will transfer the native from the user to the feeManager + processFee(payload, USER, address(native), 0); + + //check the native has been transferred + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE); + + //check no link has been transferred to the rewardManager + assertEq(getLinkBalance(address(rewardManager)), 0); + + //check the feeManager has had no link deducted + assertEq(getLinkBalance(address(feeManager)), DEFAULT_REPORT_PLI_FEE); + + //check the subscriber has had the native deducted + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + } + + function test_processFeeWithZeroLinkNonZeroNativeWithLinkQuote() public { + //get the default payload + bytes memory payload = getPayload( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, block.timestamp, 0, DEFAULT_REPORT_NATIVE_FEE) + ); + + //call processFee should not revert as the fee is 0 + processFee(payload, USER, address(link), 0); + } + + function test_processFeeWithZeroNativeNonZeroLinkReturnsChange() public { + //get the default payload + bytes memory payload = getPayload( + getV3ReportWithCustomExpiryAndFee(DEFAULT_FEED_1_V3, block.timestamp, 0, DEFAULT_REPORT_NATIVE_FEE) + ); + + //call processFee should not revert as the fee is 0 + processFee(payload, USER, address(link), DEFAULT_REPORT_NATIVE_FEE); + + //check the change has been returned + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_V1PayloadVerifiesAndReturnsChange() public { + //emulate a V1 payload with no quote + bytes memory payload = getPayload(getV1Report(DEFAULT_FEED_1_V1)); + + processFee(payload, USER, address(0), DEFAULT_REPORT_NATIVE_FEE); + + //Fee manager should not contain any native + assertEq(address(feeManager).balance, 0); + assertEq(getNativeBalance(address(feeManager)), 0); + + //check the unused native passed in is returned + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_processFeeWithDiscountEmitsEvent() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //set the subscriber discount to 50% + setSubscriberDiscount(USER, DEFAULT_FEED_1_V3, address(native), FEE_SCALAR / 2, ADMIN); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE / 2, USER); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + Common.Asset memory fee = getFee(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + Common.Asset memory reward = getReward(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + uint256 appliedDiscount = getAppliedDiscount(getV3Report(DEFAULT_FEED_1_V3), getNativeQuote(), USER); + + vm.expectEmit(); + + emit DiscountApplied(DEFAULT_CONFIG_DIGEST, USER, fee, reward, appliedDiscount); + + //call processFee should not revert as the fee is 0 + processFee(payload, USER, address(native), 0); + } + + function test_processFeeWithNoDiscountDoesNotEmitEvent() public { + //simulate a deposit of link for the conversion pool + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //approve the native to be transferred from the user + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + //get the default payload + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + //call processFee should not revert as the fee is 0 + processFee(payload, USER, address(native), 0); + + //no logs should have been emitted + assertEq(vm.getRecordedLogs().length, 0); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFeeBulk.t.sol b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFeeBulk.t.sol new file mode 100644 index 00000000..13a4ecf0 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/fee-manager/FeeManager.processFeeBulk.t.sol @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import "./BaseFeeManager.t.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +/** + * @title BaseFeeManagerTest + * @author Michael Fletcher + * @notice This contract will test the functionality of the feeManager processFee + */ +contract FeeManagerProcessFeeTest is BaseFeeManagerTest { + uint256 internal constant NUMBER_OF_REPORTS = 5; + + function setUp() public override { + super.setUp(); + } + + function test_processMultipleLinkReports() public { + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](NUMBER_OF_REPORTS); + for (uint256 i = 0; i < NUMBER_OF_REPORTS; ++i) { + payloads[i] = payload; + } + + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS, USER); + + processFee(payloads, USER, address(link), DEFAULT_NATIVE_MINT_QUANTITY); + + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS); + assertEq(getLinkBalance(address(feeManager)), 0); + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS); + + //the subscriber (user) should receive funds back and not the proxy, although when live the proxy will forward the funds sent and not cover it seen here + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY); + assertEq(PROXY.balance, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_processMultipleWrappedNativeReports() public { + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS + 1); + + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](NUMBER_OF_REPORTS); + for (uint256 i; i < NUMBER_OF_REPORTS; ++i) { + payloads[i] = payload; + } + + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS, USER); + + processFee(payloads, USER, address(native), 0); + + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS); + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS); + assertEq(getLinkBalance(address(feeManager)), 1); + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS); + } + + function test_processMultipleUnwrappedNativeReports() public { + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS + 1); + + bytes memory payload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](NUMBER_OF_REPORTS); + for (uint256 i; i < NUMBER_OF_REPORTS; ++i) { + payloads[i] = payload; + } + + processFee(payloads, USER, address(native), DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS * 2); + + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS); + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS); + assertEq(getLinkBalance(address(feeManager)), 1); + + assertEq(PROXY.balance, DEFAULT_NATIVE_MINT_QUANTITY); + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS); + } + + function test_processV1V2V3Reports() public { + mintLink(address(feeManager), 1); + + bytes memory payloadV1 = abi.encode( + [DEFAULT_CONFIG_DIGEST, 0, 0], + getV1Report(DEFAULT_FEED_1_V1), + new bytes32[](1), + new bytes32[](1), + bytes32("") + ); + + bytes memory linkPayloadV2 = getPayload(getV2Report(DEFAULT_FEED_1_V2)); + bytes memory linkPayloadV3 = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](5); + payloads[0] = payloadV1; + payloads[1] = linkPayloadV2; + payloads[2] = linkPayloadV2; + payloads[3] = linkPayloadV3; + payloads[4] = linkPayloadV3; + + approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE * 4, USER); + + processFee(payloads, USER, address(link), 0); + + assertEq(getNativeBalance(address(feeManager)), 0); + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * 4); + assertEq(getLinkBalance(address(feeManager)), 1); + + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE * 4); + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - 0); + } + + function test_processV1V2V3ReportsWithUnwrapped() public { + mintLink(address(feeManager), DEFAULT_REPORT_PLI_FEE * 4 + 1); + + bytes memory payloadV1 = abi.encode( + [DEFAULT_CONFIG_DIGEST, 0, 0], + getV1Report(DEFAULT_FEED_1_V1), + new bytes32[](1), + new bytes32[](1), + bytes32("") + ); + + bytes memory nativePayloadV2 = getPayload(getV2Report(DEFAULT_FEED_1_V2)); + bytes memory nativePayloadV3 = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](5); + payloads[0] = payloadV1; + payloads[1] = nativePayloadV2; + payloads[2] = nativePayloadV2; + payloads[3] = nativePayloadV3; + payloads[4] = nativePayloadV3; + + processFee(payloads, USER, address(native), DEFAULT_REPORT_NATIVE_FEE * 4); + + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE * 4); + assertEq(getLinkBalance(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * 4); + assertEq(getLinkBalance(address(feeManager)), 1); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * 4); + assertEq(PROXY.balance, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_processMultipleV1Reports() public { + bytes memory payload = abi.encode( + [DEFAULT_CONFIG_DIGEST, 0, 0], + getV1Report(DEFAULT_FEED_1_V1), + new bytes32[](1), + new bytes32[](1), + bytes32("") + ); + + bytes[] memory payloads = new bytes[](NUMBER_OF_REPORTS); + for (uint256 i = 0; i < NUMBER_OF_REPORTS; ++i) { + payloads[i] = payload; + } + + processFee(payloads, USER, address(native), DEFAULT_REPORT_NATIVE_FEE * 5); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY); + assertEq(PROXY.balance, DEFAULT_NATIVE_MINT_QUANTITY); + } + + function test_eventIsEmittedIfNotEnoughLink() public { + bytes memory nativePayload = getPayload(getV3Report(DEFAULT_FEED_1_V3)); + + bytes[] memory payloads = new bytes[](5); + payloads[0] = nativePayload; + payloads[1] = nativePayload; + payloads[2] = nativePayload; + payloads[3] = nativePayload; + payloads[4] = nativePayload; + + approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE * 5, USER); + + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](5); + payments[0] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + payments[1] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + payments[2] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + payments[3] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + payments[4] = IRewardManager.FeePayment(DEFAULT_CONFIG_DIGEST, uint192(DEFAULT_REPORT_PLI_FEE)); + + vm.expectEmit(); + + emit InsufficientLink(payments); + + processFee(payloads, USER, address(native), 0); + + assertEq(getNativeBalance(address(feeManager)), DEFAULT_REPORT_NATIVE_FEE * 5); + assertEq(getNativeBalance(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * 5); + assertEq(getLinkBalance(USER), DEFAULT_PLI_MINT_QUANTITY); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol new file mode 100644 index 00000000..bb5596c4 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/gas/Gas_VerifierTest.t.sol @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest, BaseTestWithConfiguredVerifierAndFeeManager} from "../verifier/BaseVerifierTest.t.sol"; +import {SimpleWriteAccessController} from "../../../shared/access/SimpleWriteAccessController.sol"; +import {Common} from "../../libraries/Common.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +contract Verifier_setConfig is BaseTest { + address[] internal s_signerAddrs; + + function setUp() public override { + BaseTest.setUp(); + Signer[] memory signers = _getSigners(MAX_ORACLES); + s_signerAddrs = _getSignerAddresses(signers); + s_verifierProxy.initializeVerifier(address(s_verifier)); + } + + function testSetConfigSuccess_gas() public { + s_verifier.setConfig( + FEED_ID, + s_signerAddrs, + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } +} + +contract Verifier_verifyWithFee is BaseTestWithConfiguredVerifierAndFeeManager { + uint256 internal constant DEFAULT_PLI_MINT_QUANTITY = 100 ether; + uint256 internal constant DEFAULT_NATIVE_MINT_QUANTITY = 100 ether; + + function setUp() public virtual override { + super.setUp(); + + //mint some link and eth to warm the storage + link.mint(address(rewardManager), DEFAULT_PLI_MINT_QUANTITY); + native.mint(address(feeManager), DEFAULT_NATIVE_MINT_QUANTITY); + + //warm the rewardManager + link.mint(address(this), DEFAULT_NATIVE_MINT_QUANTITY); + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, address(this)); + (, , bytes32 latestConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + + //mint some tokens to the user + link.mint(USER, DEFAULT_PLI_MINT_QUANTITY); + native.mint(USER, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(USER, DEFAULT_NATIVE_MINT_QUANTITY); + + //mint some link tokens to the feeManager pool + link.mint(address(feeManager), DEFAULT_REPORT_PLI_FEE); + + //approve funds prior to test + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + _approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](1); + payments[0] = IRewardManager.FeePayment(latestConfigDigest, uint192(DEFAULT_REPORT_PLI_FEE)); + + changePrank(address(feeManager)); + rewardManager.onFeePaid(payments, address(this)); + + changePrank(USER); + } + + function testVerifyProxyWithLinkFeeSuccess_gas() public { + bytes memory signedLinkPayload = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + s_verifierProxy.verify(signedLinkPayload, abi.encode(link)); + } + + function testVerifyProxyWithNativeFeeSuccess_gas() public { + bytes memory signedNativePayload = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + s_verifierProxy.verify(signedNativePayload, abi.encode(native)); + } +} + +contract Verifier_bulkVerifyWithFee is BaseTestWithConfiguredVerifierAndFeeManager { + uint256 internal constant DEFAULT_PLI_MINT_QUANTITY = 100 ether; + uint256 internal constant DEFAULT_NATIVE_MINT_QUANTITY = 100 ether; + uint256 internal constant NUMBER_OF_REPORTS_TO_VERIFY = 5; + + function setUp() public virtual override { + super.setUp(); + + //mint some link and eth to warm the storage + link.mint(address(rewardManager), DEFAULT_PLI_MINT_QUANTITY); + native.mint(address(feeManager), DEFAULT_NATIVE_MINT_QUANTITY); + + //warm the rewardManager + link.mint(address(this), DEFAULT_NATIVE_MINT_QUANTITY); + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, address(this)); + (, , bytes32 latestConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + + //mint some tokens to the user + link.mint(USER, DEFAULT_PLI_MINT_QUANTITY); + native.mint(USER, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(USER, DEFAULT_NATIVE_MINT_QUANTITY); + + //mint some link tokens to the feeManager pool + link.mint(address(feeManager), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS_TO_VERIFY); + + //approve funds prior to test + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE * NUMBER_OF_REPORTS_TO_VERIFY, USER); + _approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE * NUMBER_OF_REPORTS_TO_VERIFY, USER); + + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](1); + payments[0] = IRewardManager.FeePayment(latestConfigDigest, uint192(DEFAULT_REPORT_PLI_FEE)); + + changePrank(address(feeManager)); + rewardManager.onFeePaid(payments, address(this)); + + changePrank(USER); + } + + function testBulkVerifyProxyWithLinkFeeSuccess_gas() public { + bytes memory signedLinkPayload = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedLinkPayloads = new bytes[](NUMBER_OF_REPORTS_TO_VERIFY); + for (uint256 i = 0; i < NUMBER_OF_REPORTS_TO_VERIFY; i++) { + signedLinkPayloads[i] = signedLinkPayload; + } + + s_verifierProxy.verifyBulk(signedLinkPayloads, abi.encode(link)); + } + + function testBulkVerifyProxyWithNativeFeeSuccess_gas() public { + bytes memory signedNativePayload = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedNativePayloads = new bytes[](NUMBER_OF_REPORTS_TO_VERIFY); + for (uint256 i = 0; i < NUMBER_OF_REPORTS_TO_VERIFY; i++) { + signedNativePayloads[i] = signedNativePayload; + } + + s_verifierProxy.verifyBulk(signedNativePayloads, abi.encode(native)); + } +} + +contract Verifier_verify is BaseTestWithConfiguredVerifierAndFeeManager { + bytes internal s_signedReport; + bytes32 internal s_configDigest; + + function setUp() public override { + BaseTestWithConfiguredVerifierAndFeeManager.setUp(); + BaseTest.V1Report memory s_testReportOne = _createV1Report( + FEED_ID, + OBSERVATIONS_TIMESTAMP, + MEDIAN, + BID, + ASK, + BLOCKNUMBER_UPPER_BOUND, + blockhash(BLOCKNUMBER_UPPER_BOUND), + BLOCKNUMBER_LOWER_BOUND, + uint32(block.timestamp) + ); + (, , s_configDigest) = s_verifier.latestConfigDetails(FEED_ID); + bytes32[3] memory reportContext; + reportContext[0] = s_configDigest; + reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + s_signedReport = _generateV1EncodedBlob(s_testReportOne, reportContext, _getSigners(FAULT_TOLERANCE + 1)); + } + + function testVerifySuccess_gas() public { + changePrank(address(s_verifierProxy)); + + s_verifier.verify(s_signedReport, msg.sender); + } + + function testVerifyProxySuccess_gas() public { + s_verifierProxy.verify(s_signedReport, abi.encode(native)); + } +} + +contract Verifier_accessControlledVerify is BaseTestWithConfiguredVerifierAndFeeManager { + bytes internal s_signedReport; + bytes32 internal s_configDigest; + SimpleWriteAccessController s_accessController; + + address internal constant CLIENT = address(9000); + address internal constant ACCESS_CONTROLLER_ADDR = address(10000); + + function setUp() public override { + BaseTestWithConfiguredVerifierAndFeeManager.setUp(); + BaseTest.V1Report memory s_testReportOne = _createV1Report( + FEED_ID, + OBSERVATIONS_TIMESTAMP, + MEDIAN, + BID, + ASK, + BLOCKNUMBER_UPPER_BOUND, + blockhash(BLOCKNUMBER_UPPER_BOUND), + BLOCKNUMBER_LOWER_BOUND, + uint32(block.timestamp) + ); + (, , s_configDigest) = s_verifier.latestConfigDetails(FEED_ID); + bytes32[3] memory reportContext; + reportContext[0] = s_configDigest; + reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + s_signedReport = _generateV1EncodedBlob(s_testReportOne, reportContext, _getSigners(FAULT_TOLERANCE + 1)); + s_accessController = new SimpleWriteAccessController(); + s_verifierProxy.setAccessController(s_accessController); + s_accessController.addAccess(CLIENT); + } + + function testVerifyWithAccessControl_gas() public { + changePrank(CLIENT); + s_verifierProxy.verify(s_signedReport, abi.encode(native)); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol b/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol new file mode 100644 index 00000000..01cb1a50 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/mocks/ErroredVerifier.sol @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {IVerifier} from "../../interfaces/IVerifier.sol"; +import {Common} from "../../libraries/Common.sol"; + +contract ErroredVerifier is IVerifier { + function supportsInterface(bytes4 interfaceId) public pure override returns (bool) { + return interfaceId == this.verify.selector; + } + + function verify( + bytes memory, + /** + * signedReport* + */ + address + ) + external + pure + override + returns ( + /** + * sender* + */ + bytes memory + ) + { + revert("Failed to verify"); + } + + function setConfig( + bytes32, + address[] memory, + bytes32[] memory, + uint8, + bytes memory, + uint64, + bytes memory, + Common.AddressAndWeight[] memory + ) external pure override { + revert("Failed to set config"); + } + + function setConfigFromSource( + bytes32, + uint256, + address, + uint32, + address[] memory, + bytes32[] memory, + uint8, + bytes memory, + uint64, + bytes memory, + Common.AddressAndWeight[] memory + ) external pure override { + revert("Failed to set config"); + } + + function activateConfig(bytes32, bytes32) external pure { + revert("Failed to activate config"); + } + + function deactivateConfig(bytes32, bytes32) external pure { + revert("Failed to deactivate config"); + } + + function activateFeed(bytes32) external pure { + revert("Failed to activate feed"); + } + + function deactivateFeed(bytes32) external pure { + revert("Failed to deactivate feed"); + } + + function latestConfigDigestAndEpoch(bytes32) external pure override returns (bool, bytes32, uint32) { + revert("Failed to get latest config digest and epoch"); + } + + function latestConfigDetails(bytes32) external pure override returns (uint32, uint32, bytes32) { + revert("Failed to get latest config details"); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/mocks/ExposedVerifier.sol b/contracts/src/v0.8/llo-feeds/test/mocks/ExposedVerifier.sol new file mode 100644 index 00000000..1c004bf3 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/mocks/ExposedVerifier.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +// ExposedVerifier exposes certain internal Verifier +// methods/structures so that golang code can access them, and we get +// reliable type checking on their usage +contract ExposedVerifier { + constructor() {} + + function _configDigestFromConfigData( + bytes32 feedId, + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + feedId, + chainId, + contractAddress, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0006 << (256 - 16); // 0x000600..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + function exposedConfigDigestFromConfigData( + bytes32 _feedId, + uint256 _chainId, + address _contractAddress, + uint64 _configCount, + address[] memory _signers, + bytes32[] memory _offchainTransmitters, + uint8 _f, + bytes calldata _onchainConfig, + uint64 _encodedConfigVersion, + bytes memory _encodedConfig + ) public pure returns (bytes32) { + return + _configDigestFromConfigData( + _feedId, + _chainId, + _contractAddress, + _configCount, + _signers, + _offchainTransmitters, + _f, + _onchainConfig, + _encodedConfigVersion, + _encodedConfig + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/mocks/FeeManagerProxy.sol b/contracts/src/v0.8/llo-feeds/test/mocks/FeeManagerProxy.sol new file mode 100644 index 00000000..16935f69 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/mocks/FeeManagerProxy.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import "../../interfaces/IFeeManager.sol"; + +contract FeeManagerProxy { + IFeeManager internal i_feeManager; + + function processFee(bytes calldata payload, bytes calldata parameterPayload) public payable { + i_feeManager.processFee{value: msg.value}(payload, parameterPayload, msg.sender); + } + + function processFeeBulk(bytes[] calldata payloads, bytes calldata parameterPayload) public payable { + i_feeManager.processFeeBulk{value: msg.value}(payloads, parameterPayload, msg.sender); + } + + function setFeeManager(IFeeManager feeManager) public { + i_feeManager = feeManager; + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol new file mode 100644 index 00000000..65481513 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/BaseRewardManager.t.sol @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Test} from "forge-std/Test.sol"; +import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol"; +import {RewardManager} from "../../RewardManager.sol"; +import {Common} from "../../libraries/Common.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice Base class for all reward manager tests + * @dev This contract is intended to be inherited from and not used directly. It contains functionality to setup a primary and secondary pool + */ +contract BaseRewardManagerTest is Test { + //contracts + ERC20Mock internal asset; + ERC20Mock internal unsupported; + RewardManager internal rewardManager; + + //default address for unregistered recipient + address internal constant INVALID_ADDRESS = address(0); + //contract owner + address internal constant ADMIN = address(uint160(uint256(keccak256("ADMIN")))); + //address to represent verifier contract + address internal constant FEE_MANAGER = address(uint160(uint256(keccak256("FEE_MANAGER")))); + //a general user + address internal constant USER = address(uint160(uint256(keccak256("USER")))); + + //default recipients configured in reward manager + address internal constant DEFAULT_RECIPIENT_1 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_1")))); + address internal constant DEFAULT_RECIPIENT_2 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_2")))); + address internal constant DEFAULT_RECIPIENT_3 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_3")))); + address internal constant DEFAULT_RECIPIENT_4 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_4")))); + address internal constant DEFAULT_RECIPIENT_5 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_5")))); + address internal constant DEFAULT_RECIPIENT_6 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_6")))); + address internal constant DEFAULT_RECIPIENT_7 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_7")))); + + //additional recipients not in the reward manager + address internal constant DEFAULT_RECIPIENT_8 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_8")))); + address internal constant DEFAULT_RECIPIENT_9 = address(uint160(uint256(keccak256("DEFAULT_RECIPIENT_9")))); + + //two pools should be enough to test all edge cases + bytes32 internal constant PRIMARY_POOL_ID = keccak256("primary_pool"); + bytes32 internal constant SECONDARY_POOL_ID = keccak256("secondary_pool"); + bytes32 internal constant INVALID_POOL_ID = keccak256("invalid_pool"); + bytes32 internal constant ZERO_POOL_ID = bytes32(0); + + //convenience arrays of all pool combinations used for testing + bytes32[] internal PRIMARY_POOL_ARRAY = [PRIMARY_POOL_ID]; + bytes32[] internal SECONDARY_POOL_ARRAY = [SECONDARY_POOL_ID]; + bytes32[] internal ALL_POOLS = [PRIMARY_POOL_ID, SECONDARY_POOL_ID]; + + //erc20 config + uint256 internal constant DEFAULT_MINT_QUANTITY = 100 ether; + + //reward scalar (this should match the const in the contract) + uint64 internal constant POOL_SCALAR = 1e18; + uint64 internal constant ONE_PERCENT = POOL_SCALAR / 100; + uint64 internal constant FIFTY_PERCENT = POOL_SCALAR / 2; + uint64 internal constant TEN_PERCENT = POOL_SCALAR / 10; + + //the selector for each error + bytes4 internal immutable UNAUTHORIZED_ERROR_SELECTOR = RewardManager.Unauthorized.selector; + bytes4 internal immutable INVALID_ADDRESS_ERROR_SELECTOR = RewardManager.InvalidAddress.selector; + bytes4 internal immutable INVALID_WEIGHT_ERROR_SELECTOR = RewardManager.InvalidWeights.selector; + bytes4 internal immutable INVALID_POOL_ID_ERROR_SELECTOR = RewardManager.InvalidPoolId.selector; + bytes internal constant ONLY_CALLABLE_BY_OWNER_ERROR = "Only callable by owner"; + bytes4 internal immutable INVALID_POOL_LENGTH_SELECTOR = RewardManager.InvalidPoolLength.selector; + + // Events emitted within the reward manager + event RewardRecipientsUpdated(bytes32 indexed poolId, Common.AddressAndWeight[] newRewardRecipients); + event RewardsClaimed(bytes32 indexed poolId, address indexed recipient, uint192 quantity); + event FeeManagerUpdated(address newProxyAddress); + event FeePaid(IRewardManager.FeePayment[] payments, address payee); + + function setUp() public virtual { + //change to admin user + vm.startPrank(ADMIN); + + //init required contracts + _initializeERC20Contracts(); + _initializeRewardManager(); + } + + function _initializeERC20Contracts() internal { + //create the contracts + asset = new ERC20Mock("ASSET", "AST", ADMIN, 0); + unsupported = new ERC20Mock("UNSUPPORTED", "UNS", ADMIN, 0); + + //mint some tokens to the admin + asset.mint(ADMIN, DEFAULT_MINT_QUANTITY); + unsupported.mint(ADMIN, DEFAULT_MINT_QUANTITY); + + //mint some tokens to the user + asset.mint(FEE_MANAGER, DEFAULT_MINT_QUANTITY); + unsupported.mint(FEE_MANAGER, DEFAULT_MINT_QUANTITY); + } + + function _initializeRewardManager() internal { + //create the contract + rewardManager = new RewardManager(address(asset)); + + rewardManager.setFeeManager(FEE_MANAGER); + } + + function createPrimaryPool() public { + rewardManager.setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients()); + } + + function createSecondaryPool() public { + rewardManager.setRewardRecipients(SECONDARY_POOL_ID, getSecondaryRecipients()); + } + + //override this to test variations of different recipients. changing this function will require existing tests to be updated as constants are hardcoded to be explicit + function getPrimaryRecipients() public virtual returns (Common.AddressAndWeight[] memory) { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + + //init each recipient with even weights. 2500 = 25% of pool + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, POOL_SCALAR / 4); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, POOL_SCALAR / 4); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, POOL_SCALAR / 4); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, POOL_SCALAR / 4); + + return recipients; + } + + function getPrimaryRecipientAddresses() public pure returns (address[] memory) { + //array of recipients + address[] memory recipients = new address[](4); + + recipients[0] = DEFAULT_RECIPIENT_1; + recipients[1] = DEFAULT_RECIPIENT_2; + recipients[2] = DEFAULT_RECIPIENT_3; + recipients[3] = DEFAULT_RECIPIENT_4; + + return recipients; + } + + //override this to test variations of different recipients. + function getSecondaryRecipients() public virtual returns (Common.AddressAndWeight[] memory) { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + + //init each recipient with even weights. 2500 = 25% of pool + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, POOL_SCALAR / 4); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, POOL_SCALAR / 4); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, POOL_SCALAR / 4); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_7, POOL_SCALAR / 4); + + return recipients; + } + + function getSecondaryRecipientAddresses() public pure returns (address[] memory) { + //array of recipients + address[] memory recipients = new address[](4); + + recipients[0] = DEFAULT_RECIPIENT_1; + recipients[1] = DEFAULT_RECIPIENT_5; + recipients[2] = DEFAULT_RECIPIENT_6; + recipients[3] = DEFAULT_RECIPIENT_7; + + return recipients; + } + + function addFundsToPool(bytes32 poolId, Common.Asset memory amount, address sender) public { + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](1); + payments[0] = IRewardManager.FeePayment(poolId, uint192(amount.amount)); + + addFundsToPool(payments, sender); + } + + function addFundsToPool(IRewardManager.FeePayment[] memory payments, address sender) public { + //record the current address and switch to the sender + address originalAddr = msg.sender; + changePrank(sender); + + uint256 totalPayment; + for (uint256 i; i < payments.length; ++i) { + totalPayment += payments[i].amount; + } + + //approve the amount being paid into the pool + ERC20Mock(address(asset)).approve(address(rewardManager), totalPayment); + + //this represents the verifier adding some funds to the pool + rewardManager.onFeePaid(payments, sender); + + //change back to the original address + changePrank(originalAddr); + } + + function getAsset(uint256 quantity) public view returns (Common.Asset memory) { + return Common.Asset(address(asset), quantity); + } + + function getAssetBalance(address addr) public view returns (uint256) { + return asset.balanceOf(addr); + } + + function claimRewards(bytes32[] memory poolIds, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //claim the rewards + rewardManager.claimRewards(poolIds); + + //change back to the original address + changePrank(originalAddr); + } + + function payRecipients(bytes32 poolId, address[] memory recipients, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //pay the recipients + rewardManager.payRecipients(poolId, recipients); + + //change back to the original address + changePrank(originalAddr); + } + + function setRewardRecipients(bytes32 poolId, Common.AddressAndWeight[] memory recipients, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //pay the recipients + rewardManager.setRewardRecipients(poolId, recipients); + + //change back to the original address + changePrank(originalAddr); + } + + function setFeeManager(address feeManager, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //update the proxy + rewardManager.setFeeManager(feeManager); + + //change back to the original address + changePrank(originalAddr); + } + + function updateRewardRecipients(bytes32 poolId, Common.AddressAndWeight[] memory recipients, address sender) public { + //record the current address and switch to the recipient + address originalAddr = msg.sender; + changePrank(sender); + + //pay the recipients + rewardManager.updateRewardRecipients(poolId, recipients); + + //change back to the original address + changePrank(originalAddr); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol new file mode 100644 index 00000000..5f07d36c --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.claim.t.sol @@ -0,0 +1,790 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol"; +import {Common} from "../../libraries/Common.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice This contract will test the claim functionality of the RewardManager contract. + */ +contract RewardManagerClaimTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_claimAllRecipients() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + } + + function test_claimRewardsWithDuplicatePoolIdsDoesNotPayoutTwice() public { + //add funds to a different pool to ensure they're not claimed + addFundsToPool(SECONDARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //create an array containing duplicate poolIds + bytes32[] memory poolIds = new bytes32[](2); + poolIds[0] = PRIMARY_POOL_ID; + poolIds[1] = PRIMARY_POOL_ID; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(poolIds, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //the pool should still have the remaining + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_claimSingleRecipient() public { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[0]; + + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT - expectedRecipientAmount); + } + + function test_claimMultipleRecipients() public { + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, getPrimaryRecipients()[0].addr); + claimRewards(PRIMARY_POOL_ARRAY, getPrimaryRecipients()[1].addr); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getPrimaryRecipients()[0].addr), expectedRecipientAmount); + assertEq(getAssetBalance(getPrimaryRecipients()[1].addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT - (expectedRecipientAmount * 2)); + } + + function test_claimUnregisteredRecipient() public { + //claim the rewards for a recipient who isn't in this pool + claimRewards(PRIMARY_POOL_ARRAY, getSecondaryRecipients()[1].addr); + + //check the recipients didn't receive any fees from this pool + assertEq(getAssetBalance(getSecondaryRecipients()[1].addr), 0); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_claimUnevenAmountRoundsDown() public { + //adding 1 to the pool should leave 1 wei worth of dust, which the contract doesn't handle due to it being economically infeasible + addFundsToPool(PRIMARY_POOL_ID, getAsset(1), FEE_MANAGER); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //check the rewardManager has the remaining quantity equals 1 wei + assertEq(getAssetBalance(address(rewardManager)), 1); + } + + function test_claimUnregisteredPoolId() public { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[0]; + + //claim the individual rewards for this recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //check the recipients balance is still 0 as there's no pool to receive fees from + assertEq(getAssetBalance(recipient.addr), 0); + + //check the rewardManager has the full amount + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_singleRecipientClaimMultipleDeposits() public { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[0]; + + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity, which is 3/4 of the initial deposit + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT - expectedRecipientAmount); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the recipients balance matches the ratio the recipient should have received, which is 1/4 of each deposit + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount * 2); + + //check the rewardManager has the remaining quantity, which is now 3/4 of both deposits + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2 - (expectedRecipientAmount * 2)); + } + + function test_recipientsClaimMultipleDeposits() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //the reward manager balance should be 0 as all of the funds have been claimed + assertEq(getAssetBalance(address(rewardManager)), 0); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //expected recipient amount is 1/4 of the pool deposit + expectedRecipientAmount = (POOL_DEPOSIT_AMOUNT / 4) * 2; + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //the reward manager balance should again be 0 as all of the funds have been claimed + assertEq(getAssetBalance(address(rewardManager)), 0); + } + + function test_eventIsEmittedUponClaim() public { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[0]; + + //expect an emit + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit RewardsClaimed(PRIMARY_POOL_ID, recipient.addr, uint192(POOL_DEPOSIT_AMOUNT / 4)); + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + } + + function test_eventIsNotEmittedUponUnsuccessfulClaim() public { + //record logs to check no events were emitted + vm.recordLogs(); + + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[0]; + + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //no logs should have been emitted + assertEq(vm.getRecordedLogs().length, 0); + } +} + +contract RewardManagerRecipientClaimMultiplePoolsTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a two pools + createPrimaryPool(); + createSecondaryPool(); + + //add funds to each of the pools to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + addFundsToPool(SECONDARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_claimAllRecipientsSinglePool() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //check the pool balance is still equal to DEPOSIT_AMOUNT as the test only claims for one of the pools + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_claimMultipleRecipientsSinglePool() public { + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[1].addr); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getSecondaryRecipients()[0].addr), expectedRecipientAmount); + assertEq(getAssetBalance(getSecondaryRecipients()[1].addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2 - (expectedRecipientAmount * 2)); + } + + function test_claimMultipleRecipientsMultiplePools() public { + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, getPrimaryRecipients()[0].addr); + claimRewards(PRIMARY_POOL_ARRAY, getPrimaryRecipients()[1].addr); + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[1].addr); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received. The first recipient is shared across both pools so should receive 1/4 of each pool + assertEq(getAssetBalance(getPrimaryRecipients()[0].addr), expectedRecipientAmount * 2); + assertEq(getAssetBalance(getPrimaryRecipients()[1].addr), expectedRecipientAmount); + assertEq(getAssetBalance(getSecondaryRecipients()[1].addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_claimAllRecipientsMultiplePools() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i = 1; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //claim funds for each recipient within the pool + for (uint256 i = 1; i < getSecondaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory secondaryRecipient = getSecondaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, secondaryRecipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(secondaryRecipient.addr), expectedRecipientAmount); + } + + //special case to handle the first recipient of each pool as they're the same address + Common.AddressAndWeight memory commonRecipient = getPrimaryRecipients()[0]; + + //claim the individual rewards for each pool + claimRewards(PRIMARY_POOL_ARRAY, commonRecipient.addr); + claimRewards(SECONDARY_POOL_ARRAY, commonRecipient.addr); + + //check the balance matches the ratio the recipient should have received, which is 1/4 of each deposit for each pool + assertEq(getAssetBalance(commonRecipient.addr), expectedRecipientAmount * 2); + } + + function test_claimSingleUniqueRecipient() public { + //the first recipient of the secondary pool is in both pools, so take the second recipient which is unique + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[1]; + + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received 1/4 of the deposit amount + uint256 recipientExpectedAmount = POOL_DEPOSIT_AMOUNT / 4; + + //the recipient should have received 1/4 of the deposit amount + assertEq(getAssetBalance(recipient.addr), recipientExpectedAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2 - recipientExpectedAmount); + } + + function test_claimSingleRecipientMultiplePools() public { + //the first recipient of the secondary pool is in both pools + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[0]; + + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received 1/4 of the deposit amount for each pool + uint256 recipientExpectedAmount = (POOL_DEPOSIT_AMOUNT / 4) * 2; + + //this recipient belongs in both pools so should have received 1/4 of each + assertEq(getAssetBalance(recipient.addr), recipientExpectedAmount); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2 - recipientExpectedAmount); + } + + function test_claimUnregisteredRecipient() public { + //claim the individual rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, getSecondaryRecipients()[1].addr); + claimRewards(SECONDARY_POOL_ARRAY, getPrimaryRecipients()[1].addr); + + //check the recipients didn't receive any fees from this pool + assertEq(getAssetBalance(getSecondaryRecipients()[1].addr), 0); + assertEq(getAssetBalance(getPrimaryRecipients()[1].addr), 0); + + //check the rewardManager has the remaining quantity + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2); + } + + function test_claimUnevenAmountRoundsDown() public { + //adding an uneven amount of dust to each pool, this should round down to the nearest whole number with 4 remaining in the contract + addFundsToPool(PRIMARY_POOL_ID, getAsset(3), FEE_MANAGER); + addFundsToPool(SECONDARY_POOL_ID, getAsset(1), FEE_MANAGER); + + //the recipient should have received 1/4 of the deposit amount for each pool + uint256 recipientExpectedAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), recipientExpectedAmount); + } + + //special case to handle the first recipient of each pool as they're the same address + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getSecondaryRecipients()[0].addr), recipientExpectedAmount * 2); + + //claim funds for each recipient of the secondary pool except the first + for (uint256 i = 1; i < getSecondaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), recipientExpectedAmount); + } + + //contract should have 4 remaining + assertEq(getAssetBalance(address(rewardManager)), 4); + } + + function test_singleRecipientClaimMultipleDeposits() public { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[0]; + + //claim the individual rewards for this recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received 1/4 of the deposit amount + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity, which is 3/4 of the initial deposit plus the deposit from the second pool + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 2 - expectedRecipientAmount); + + //add funds to the pool to be split among the recipients + addFundsToPool(SECONDARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //claim the individual rewards for this recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received 1/4 of the next deposit amount + expectedRecipientAmount += POOL_DEPOSIT_AMOUNT / 4; + + //check the recipients balance matches the ratio the recipient should have received, which is 1/4 of each deposit + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + + //check the rewardManager has the remaining quantity, which is now 3/4 of both deposits + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT * 3 - expectedRecipientAmount); + } + + function test_recipientsClaimMultipleDeposits() public { + //the recipient should have received 1/4 of the deposit amount + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim funds for each recipient within the pool + for (uint256 i; i < getSecondaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //the reward manager balance should contain only the funds of the secondary pool + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + + //add funds to the pool to be split among the recipients + addFundsToPool(SECONDARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //special case to handle the first recipient of each pool as they're the same address + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getSecondaryRecipients()[0].addr), expectedRecipientAmount * 2); + + //claim funds for each recipient within the pool except the first + for (uint256 i = 1; i < getSecondaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getSecondaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(SECONDARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount * 2); + } + + //the reward manager balance should again be the balance of the secondary pool as the primary pool has been emptied twice + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_claimEmptyPoolWhenSecondPoolContainsFunds() public { + //the recipient should have received 1/4 of the deposit amount + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //claim all rewards for each recipient in the primary pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //claim all the rewards again for the first recipient as that address is a member of both pools + claimRewards(PRIMARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + + //check the balance + assertEq(getAssetBalance(getSecondaryRecipients()[0].addr), expectedRecipientAmount); + } + + function test_getRewardsAvailableToRecipientInBothPools() public { + //get index 0 as this recipient is in both default pools + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds( + getPrimaryRecipients()[0].addr, + 0, + type(uint256).max + ); + + //check the recipient is in both pools + assertEq(poolIds[0], PRIMARY_POOL_ID); + assertEq(poolIds[1], SECONDARY_POOL_ID); + } + + function test_getRewardsAvailableToRecipientInSinglePool() public { + //get index 0 as this recipient is in both default pools + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds( + getPrimaryRecipients()[1].addr, + 0, + type(uint256).max + ); + + //check the recipient is in both pools + assertEq(poolIds[0], PRIMARY_POOL_ID); + assertEq(poolIds[1], ZERO_POOL_ID); + } + + function test_getRewardsAvailableToRecipientInNoPools() public { + //get index 0 as this recipient is in both default pools + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds(FEE_MANAGER, 0, type(uint256).max); + + //check the recipient is in neither pool + assertEq(poolIds[0], ZERO_POOL_ID); + assertEq(poolIds[1], ZERO_POOL_ID); + } + + function test_getRewardsAvailableToRecipientInBothPoolsWhereAlreadyClaimed() public { + //get index 0 as this recipient is in both default pools + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds( + getPrimaryRecipients()[0].addr, + 0, + type(uint256).max + ); + + //check the recipient is in both pools + assertEq(poolIds[0], PRIMARY_POOL_ID); + assertEq(poolIds[1], SECONDARY_POOL_ID); + + //claim the rewards for each pool + claimRewards(PRIMARY_POOL_ARRAY, getPrimaryRecipients()[0].addr); + claimRewards(SECONDARY_POOL_ARRAY, getSecondaryRecipients()[0].addr); + + //get the available pools again + poolIds = rewardManager.getAvailableRewardPoolIds(getPrimaryRecipients()[0].addr, 0, type(uint256).max); + + //user should not be in any pool + assertEq(poolIds[0], ZERO_POOL_ID); + assertEq(poolIds[1], ZERO_POOL_ID); + } + + function test_getAvailableRewardsCursorCannotBeGreaterThanTotalPools() public { + vm.expectRevert(INVALID_POOL_LENGTH_SELECTOR); + + rewardManager.getAvailableRewardPoolIds(FEE_MANAGER, type(uint256).max, 0); + } + + function test_getAvailableRewardsCursorAndTotalPoolsEqual() public { + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds(getPrimaryRecipients()[0].addr, 2, 2); + + assertEq(poolIds.length, 0); + } + + function test_getAvailableRewardsCursorSingleResult() public { + bytes32[] memory poolIds = rewardManager.getAvailableRewardPoolIds(getPrimaryRecipients()[0].addr, 0, 1); + + assertEq(poolIds[0], PRIMARY_POOL_ID); + } +} + +contract RewardManagerRecipientClaimDifferentWeightsTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function getPrimaryRecipients() public virtual override returns (Common.AddressAndWeight[] memory) { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + + //init each recipient with uneven weights + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT * 8); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, ONE_PERCENT * 6); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, ONE_PERCENT * 4); + + return recipients; + } + + function test_allRecipientsClaimingReceiveExpectedAmount() public { + //loop all the recipients and claim their expected amount + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received a share proportional to their weight + uint256 expectedRecipientAmount = (POOL_DEPOSIT_AMOUNT * recipient.weight) / POOL_SCALAR; + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + } +} + +contract RewardManagerRecipientClaimUnevenWeightTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + } + + function getPrimaryRecipients() public virtual override returns (Common.AddressAndWeight[] memory) { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](2); + + uint64 oneThird = POOL_SCALAR / 3; + + //init each recipient with even weights. + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, oneThird); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, 2 * oneThird + 1); + + return recipients; + } + + function test_allRecipientsClaimingReceiveExpectedAmountWithSmallDeposit() public { + //add a smaller amount of funds to the pool + uint256 smallDeposit = 1e8; + + //add a smaller amount of funds to the pool + addFundsToPool(PRIMARY_POOL_ID, getAsset(smallDeposit), FEE_MANAGER); + + //loop all the recipients and claim their expected amount + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received a share proportional to their weight + uint256 expectedRecipientAmount = (smallDeposit * recipient.weight) / POOL_SCALAR; + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //smaller deposits will consequently have less precision and will not be able to be split as evenly, the remaining 1 will be lost due to 333...|... being paid out instead of 333...4| + assertEq(getAssetBalance(address(rewardManager)), 1); + } + + function test_allRecipientsClaimingReceiveExpectedAmount() public { + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //loop all the recipients and claim their expected amount + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //claim the individual rewards for each recipient + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //the recipient should have received a share proportional to their weight + uint256 expectedRecipientAmount = (POOL_DEPOSIT_AMOUNT * recipient.weight) / POOL_SCALAR; + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + + //their should be 0 wei left over indicating a successful split + assertEq(getAssetBalance(address(rewardManager)), 0); + } +} + +contract RewardManagerNoRecipientSet is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //add funds to the pool to be split among the recipients once registered + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_claimAllRecipientsAfterRecipientsSet() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //try and claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //there should be no rewards claimed as the recipient is not registered + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the recipient received nothing + assertEq(getAssetBalance(recipient.addr), 0); + } + + //Set the recipients after the rewards have been paid into the pool + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + + //claim funds for each recipient within the pool + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //get the recipient that is claiming + Common.AddressAndWeight memory recipient = getPrimaryRecipients()[i]; + + //there should be no rewards claimed as the recipient is registered + claimRewards(PRIMARY_POOL_ARRAY, recipient.addr); + + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipient.addr), expectedRecipientAmount); + } + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.general.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.general.t.sol new file mode 100644 index 00000000..7fde76d5 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.general.t.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol"; +import {RewardManager} from "../../RewardManager.sol"; +import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice This contract will test the core functionality of the RewardManager contract + */ +contract RewardManagerSetupTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + } + + function test_rejectsZeroLinkAddressOnConstruction() public { + //should revert if the contract is a zero address + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //create a rewardManager with a zero link address + new RewardManager(address(0)); + } + + function test_eventEmittedUponFeeManagerUpdate() public { + //expect the event to be emitted + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit FeeManagerUpdated(FEE_MANAGER); + + //set the verifier proxy + setFeeManager(FEE_MANAGER, ADMIN); + } + + function test_eventEmittedUponFeePaid() public { + //create pool and add funds + createPrimaryPool(); + + //change to the feeManager who is the one who will be paying the fees + changePrank(FEE_MANAGER); + + //approve the amount being paid into the pool + ERC20Mock(getAsset(POOL_DEPOSIT_AMOUNT).assetAddress).approve(address(rewardManager), POOL_DEPOSIT_AMOUNT); + + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](1); + payments[0] = IRewardManager.FeePayment(PRIMARY_POOL_ID, uint192(POOL_DEPOSIT_AMOUNT)); + + //event is emitted when funds are added + vm.expectEmit(); + emit FeePaid(payments, FEE_MANAGER); + + //this represents the verifier adding some funds to the pool + rewardManager.onFeePaid(payments, FEE_MANAGER); + } + + function test_setFeeManagerZeroAddress() public { + //should revert if the contract is a zero address + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //set the verifier proxy + setFeeManager(address(0), ADMIN); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.payRecipients.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.payRecipients.t.sol new file mode 100644 index 00000000..89fac663 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.payRecipients.t.sol @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol"; +import {IRewardManager} from "../../interfaces/IRewardManager.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice This contract will test the payRecipients functionality of the RewardManager contract + */ +contract RewardManagerPayRecipientsTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_payAllRecipients() public { + //pay all the recipients in the pool + payRecipients(PRIMARY_POOL_ID, getPrimaryRecipientAddresses(), ADMIN); + + //each recipient should receive 1/4 of the pool + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check each recipient received the correct amount + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + assertEq(getAssetBalance(getPrimaryRecipientAddresses()[i]), expectedRecipientAmount); + } + } + + function test_paySingleRecipient() public { + //get the first individual recipient + address recipient = getPrimaryRecipientAddresses()[0]; + + //get a single recipient as an array + address[] memory recipients = new address[](1); + recipients[0] = recipient; + + //pay a single recipient + payRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //the recipient should have received 1/4 of the deposit amount + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + assertEq(getAssetBalance(recipient), expectedRecipientAmount); + } + + function test_payRecipientWithInvalidPool() public { + //get the first individual recipient + address recipient = getPrimaryRecipientAddresses()[0]; + + //get a single recipient as an array + address[] memory recipients = new address[](1); + recipients[0] = recipient; + + //pay a single recipient + payRecipients(SECONDARY_POOL_ID, recipients, ADMIN); + + //the recipient should have received nothing + assertEq(getAssetBalance(recipient), 0); + } + + function test_payRecipientsEmptyRecipientList() public { + //get a single recipient + address[] memory recipients = new address[](0); + + //pay a single recipient + payRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //rewardManager should have the full balance + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_payAllRecipientsWithAdditionalUnregisteredRecipient() public { + //load all the recipients and add an additional one who is not in the pool + address[] memory recipients = new address[](getPrimaryRecipientAddresses().length + 1); + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + recipients[i] = getPrimaryRecipientAddresses()[i]; + } + recipients[recipients.length - 1] = DEFAULT_RECIPIENT_5; + + //pay the recipients + payRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //each recipient should receive 1/4 of the pool except the last + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check each recipient received the correct amount + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + assertEq(getAssetBalance(getPrimaryRecipientAddresses()[i]), expectedRecipientAmount); + } + + //the unregistered recipient should receive nothing + assertEq(getAssetBalance(DEFAULT_RECIPIENT_5), 0); + } + + function test_payAllRecipientsWithAdditionalInvalidRecipient() public { + //load all the recipients and add an additional one which is invalid, that should receive nothing + address[] memory recipients = new address[](getPrimaryRecipientAddresses().length + 1); + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + recipients[i] = getPrimaryRecipientAddresses()[i]; + } + recipients[recipients.length - 1] = INVALID_ADDRESS; + + //pay the recipients + payRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //each recipient should receive 1/4 of the pool except the last + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check each recipient received the correct amount + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + assertEq(getAssetBalance(getPrimaryRecipientAddresses()[i]), expectedRecipientAmount); + } + } + + function test_paySubsetOfRecipientsInPool() public { + //load a subset of the recipients into an array + address[] memory recipients = new address[](getPrimaryRecipientAddresses().length - 1); + for (uint256 i = 0; i < recipients.length; i++) { + recipients[i] = getPrimaryRecipientAddresses()[i]; + } + + //pay the subset of recipients + payRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //each recipient should receive 1/4 of the pool except the last + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check each subset of recipients received the correct amount + for (uint256 i = 0; i < recipients.length - 1; i++) { + assertEq(getAssetBalance(recipients[i]), expectedRecipientAmount); + } + + //check the pool has the remaining balance + assertEq( + getAssetBalance(address(rewardManager)), + POOL_DEPOSIT_AMOUNT - expectedRecipientAmount * recipients.length + ); + } + + function test_payAllRecipientsFromNonAdminUser() public { + //should revert if the caller isn't an admin or recipient within the pool + vm.expectRevert(UNAUTHORIZED_ERROR_SELECTOR); + + //pay all the recipients in the pool + payRecipients(PRIMARY_POOL_ID, getPrimaryRecipientAddresses(), FEE_MANAGER); + } + + function test_payAllRecipientsFromRecipientInPool() public { + //pay all the recipients in the pool + payRecipients(PRIMARY_POOL_ID, getPrimaryRecipientAddresses(), DEFAULT_RECIPIENT_1); + + //each recipient should receive 1/4 of the pool + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //check each recipient received the correct amount + for (uint256 i = 0; i < getPrimaryRecipientAddresses().length; i++) { + assertEq(getAssetBalance(getPrimaryRecipientAddresses()[i]), expectedRecipientAmount); + } + } + + function test_payRecipientsWithInvalidPoolId() public { + //pay all the recipients in the pool + payRecipients(INVALID_POOL_ID, getPrimaryRecipientAddresses(), ADMIN); + + //pool should still contain the full balance + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_addFundsToPoolAsOwner() public { + //add funds to the pool + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_addFundsToPoolAsNonOwnerOrFeeManager() public { + //should revert if the caller isn't an admin or recipient within the pool + vm.expectRevert(UNAUTHORIZED_ERROR_SELECTOR); + + IRewardManager.FeePayment[] memory payments = new IRewardManager.FeePayment[](1); + payments[0] = IRewardManager.FeePayment(PRIMARY_POOL_ID, uint192(POOL_DEPOSIT_AMOUNT)); + + //add funds to the pool + rewardManager.onFeePaid(payments, USER); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol new file mode 100644 index 00000000..1cf5b51f --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.setRecipients.t.sol @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol"; +import {Common} from "../../libraries/Common.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice This contract will test the setRecipient functionality of the RewardManager contract + */ +contract RewardManagerSetRecipientsTest is BaseRewardManagerTest { + function setUp() public override { + //setup contracts + super.setUp(); + } + + function test_setRewardRecipients() public { + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + } + + function test_setRewardRecipientsIsEmpty() public { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + + //should revert if the recipients array is empty + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_setRewardRecipientWithZeroWeight() public { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](5); + + //init each recipient with even weights + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, ONE_PERCENT * 25); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, ONE_PERCENT * 25); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, ONE_PERCENT * 25); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, ONE_PERCENT * 25); + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, 0); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_setRewardRecipientWithZeroAddress() public { + //array of recipients + Common.AddressAndWeight[] memory recipients = getPrimaryRecipients(); + + //override the first recipient with a zero address + recipients[0].addr = address(0); + + //should revert if the recipients array is empty + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_setRewardRecipientWeights() public { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + + //init each recipient with even weights + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, 25); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, 25); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, 25); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, 25); + + //should revert if the recipients array is empty + vm.expectRevert(INVALID_WEIGHT_ERROR_SELECTOR); + + //set the recipients with a recipient with a weight of 100% + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_setSingleRewardRecipient() public { + //array of recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](1); + + //init each recipient with even weights + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, POOL_SCALAR); + + //set the recipients with a recipient with a weight of 100% + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_setRewardRecipientTwice() public { + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + + //should revert if recipients for this pool have already been set + vm.expectRevert(INVALID_POOL_ID_ERROR_SELECTOR); + + //set the recipients again + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + } + + function test_setRewardRecipientFromNonOwnerOrFeeManagerAddress() public { + //should revert if the sender is not the owner or proxy + vm.expectRevert(UNAUTHORIZED_ERROR_SELECTOR); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), USER); + } + + function test_setRewardRecipientFromManagerAddress() public { + //update the proxy address + setFeeManager(FEE_MANAGER, ADMIN); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), FEE_MANAGER); + } + + function test_eventIsEmittedUponSetRecipients() public { + //expect an emit + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit RewardRecipientsUpdated(PRIMARY_POOL_ID, getPrimaryRecipients()); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + } + + function test_setRecipientContainsDuplicateRecipients() public { + //create a new array to hold the existing recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length * 2); + + //add all the existing recipients + for (uint256 i; i < getPrimaryRecipients().length; i++) { + recipients[i] = getPrimaryRecipients()[i]; + } + //add all the existing recipients again + for (uint256 i; i < getPrimaryRecipients().length; i++) { + recipients[i + getPrimaryRecipients().length] = getPrimaryRecipients()[i]; + } + + //should revert as the list contains a duplicate + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //set the recipients + setRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol new file mode 100644 index 00000000..6c51a0fb --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/reward-manager/RewardManager.updateRewardRecipients.t.sol @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseRewardManagerTest} from "./BaseRewardManager.t.sol"; +import {Common} from "../../libraries/Common.sol"; + +/** + * @title BaseRewardManagerTest + * @author Michael Fletcher + * @notice This contract will test the updateRecipient functionality of the RewardManager contract + */ +contract RewardManagerUpdateRewardRecipientsTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function test_onlyAdminCanUpdateRecipients() public { + //should revert if the caller is not the admin + vm.expectRevert(ONLY_CALLABLE_BY_OWNER_ERROR); + + //updating a recipient should force the funds to be paid out + updateRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), FEE_MANAGER); + } + + function test_updateAllRecipientsWithSameAddressAndWeight() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //updating a recipient should force the funds to be paid out + updateRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getPrimaryRecipients()[i].addr), expectedRecipientAmount); + } + } + + function test_updatePartialRecipientsWithSameAddressAndWeight() public { + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //get a subset of the recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](2); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, ONE_PERCENT * 25); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, ONE_PERCENT * 25); + + //updating a recipient should force the funds to be paid out + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < recipients.length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipients[i].addr), expectedRecipientAmount); + } + + //the reward manager should still have half remaining funds + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT / 2); + } + + function test_updateRecipientWithNewZeroAddress() public { + //create a new array to hold the existing recipients plus a new zero address + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length + 1); + + //add all the existing recipients + for (uint256 i; i < getPrimaryRecipients().length; i++) { + recipients[i] = getPrimaryRecipients()[i]; + } + //add a new address to the primary recipients + recipients[recipients.length - 1] = Common.AddressAndWeight(address(0), 0); + + //should revert if the recipient is a zero address + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //update the recipients with invalid address + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsContainsDuplicateRecipients() public { + //create a new array to hold the existing recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length * 2); + + //add all the existing recipients + for (uint256 i; i < getPrimaryRecipients().length; i++) { + recipients[i] = getPrimaryRecipients()[i]; + } + //add all the existing recipients again + for (uint256 i; i < getPrimaryRecipients().length; i++) { + recipients[i + getPrimaryRecipients().length] = getPrimaryRecipients()[i]; + } + + //should revert as the list contains a duplicate + vm.expectRevert(INVALID_ADDRESS_ERROR_SELECTOR); + + //update the recipients with the duplicate addresses + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsToDifferentSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length + 4); + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //copy the recipient and set the weight to 0 which implies the recipient is being replaced + recipients[i] = Common.AddressAndWeight(getPrimaryRecipients()[i].addr, 0); + } + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, ONE_PERCENT * 25); + recipients[5] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, ONE_PERCENT * 25); + recipients[6] = Common.AddressAndWeight(DEFAULT_RECIPIENT_7, ONE_PERCENT * 25); + recipients[7] = Common.AddressAndWeight(DEFAULT_RECIPIENT_8, ONE_PERCENT * 25); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsToDifferentPartialSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length + 2); + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //copy the recipient and set the weight to 0 which implies the recipient is being replaced + recipients[i] = Common.AddressAndWeight(getPrimaryRecipients()[i].addr, 0); + } + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, FIFTY_PERCENT); + recipients[5] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, FIFTY_PERCENT); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsToDifferentLargerSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length + 5); + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //copy the recipient and set the weight to 0 which implies the recipient is being replaced + recipients[i] = Common.AddressAndWeight(getPrimaryRecipients()[i].addr, 0); + } + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, TEN_PERCENT * 2); + recipients[5] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, TEN_PERCENT * 2); + recipients[6] = Common.AddressAndWeight(DEFAULT_RECIPIENT_7, TEN_PERCENT * 2); + recipients[7] = Common.AddressAndWeight(DEFAULT_RECIPIENT_8, TEN_PERCENT * 2); + recipients[8] = Common.AddressAndWeight(DEFAULT_RECIPIENT_9, TEN_PERCENT * 2); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsUpdateAndRemoveExistingForLargerSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](9); + + //update the existing recipients + recipients[0] = Common.AddressAndWeight(getPrimaryRecipients()[0].addr, 0); + recipients[1] = Common.AddressAndWeight(getPrimaryRecipients()[1].addr, 0); + recipients[2] = Common.AddressAndWeight(getPrimaryRecipients()[2].addr, TEN_PERCENT * 3); + recipients[3] = Common.AddressAndWeight(getPrimaryRecipients()[3].addr, TEN_PERCENT * 3); + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, TEN_PERCENT); + recipients[5] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, TEN_PERCENT); + recipients[6] = Common.AddressAndWeight(DEFAULT_RECIPIENT_7, TEN_PERCENT); + recipients[7] = Common.AddressAndWeight(DEFAULT_RECIPIENT_8, TEN_PERCENT); + recipients[8] = Common.AddressAndWeight(DEFAULT_RECIPIENT_9, TEN_PERCENT); + + //should revert as the weight does not equal 100% + vm.expectRevert(INVALID_WEIGHT_ERROR_SELECTOR); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsUpdateAndRemoveExistingForSmallerSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](5); + + //update the existing recipients + recipients[0] = Common.AddressAndWeight(getPrimaryRecipients()[0].addr, 0); + recipients[1] = Common.AddressAndWeight(getPrimaryRecipients()[1].addr, 0); + recipients[2] = Common.AddressAndWeight(getPrimaryRecipients()[2].addr, TEN_PERCENT * 3); + recipients[3] = Common.AddressAndWeight(getPrimaryRecipients()[3].addr, TEN_PERCENT * 2); + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, TEN_PERCENT * 5); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientsToDifferentSetWithInvalidWeights() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](getPrimaryRecipients().length + 2); + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //copy the recipient and set the weight to 0 which implies the recipient is being replaced + recipients[i] = Common.AddressAndWeight(getPrimaryRecipients()[i].addr, 0); + } + + //add the new recipients individually + recipients[4] = Common.AddressAndWeight(DEFAULT_RECIPIENT_5, TEN_PERCENT * 5); + recipients[5] = Common.AddressAndWeight(DEFAULT_RECIPIENT_6, TEN_PERCENT); + + //should revert as the weight will not equal 100% + vm.expectRevert(INVALID_WEIGHT_ERROR_SELECTOR); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updatePartialRecipientsToSubset() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, 0); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, 0); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, TEN_PERCENT * 5); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, TEN_PERCENT * 5); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updatePartialRecipientsWithUnderWeightSet() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, TEN_PERCENT); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, TEN_PERCENT); + + //should revert as the new weights exceed the previous weights being replaced + vm.expectRevert(INVALID_WEIGHT_ERROR_SELECTOR); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updatePartialRecipientsWithExcessiveWeight() public { + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, TEN_PERCENT); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, POOL_SCALAR); + + //should revert as the new weights exceed the previous weights being replaced + vm.expectRevert(INVALID_WEIGHT_ERROR_SELECTOR); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + } + + function test_updateRecipientWeights() public { + //expected recipient amount is 1/4 of the pool deposit for original recipients + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //create a list of containing recipients from the primary configured set with their new weights + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, TEN_PERCENT * 3); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, TEN_PERCENT * 5); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < recipients.length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipients[i].addr), expectedRecipientAmount); + } + + //the reward manager should have no funds remaining + assertEq(getAssetBalance(address(rewardManager)), 0); + + //add more funds to the pool to check new distribution + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //loop each user and claim the rewards + for (uint256 i; i < recipients.length; i++) { + //claim the rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipients[i].addr); + } + + //manually check the balance of each recipient + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_1), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_2), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_3), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT * 3) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_4), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT * 5) / POOL_SCALAR + expectedRecipientAmount + ); + } + + function test_partialUpdateRecipientWeights() public { + //expected recipient amount is 1/4 of the pool deposit for original recipients + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //create a list of containing recipients from the primary configured set with their new weights + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](2); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT * 4); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < recipients.length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipients[i].addr), expectedRecipientAmount); + } + + //the reward manager should have half the funds remaining + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT / 2); + + //add more funds to the pool to check new distribution + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //loop each user and claim the rewards + for (uint256 i; i < recipients.length; i++) { + //claim the rewards for this recipient + claimRewards(PRIMARY_POOL_ARRAY, recipients[i].addr); + } + + //manually check the balance of each recipient + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_1), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(DEFAULT_RECIPIENT_2), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT * 4) / POOL_SCALAR + expectedRecipientAmount + ); + + //the reward manager should have half the funds remaining + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + } + + function test_eventIsEmittedUponUpdateRecipients() public { + //expect an emit + vm.expectEmit(); + + //emit the event that is expected to be emitted + emit RewardRecipientsUpdated(PRIMARY_POOL_ID, getPrimaryRecipients()); + + //expected recipient amount is 1/4 of the pool deposit + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //updating a recipient should force the funds to be paid out + updateRewardRecipients(PRIMARY_POOL_ID, getPrimaryRecipients(), ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < getPrimaryRecipients().length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(getPrimaryRecipients()[i].addr), expectedRecipientAmount); + } + } +} + +contract RewardManagerUpdateRewardRecipientsMultiplePoolsTest is BaseRewardManagerTest { + uint256 internal constant POOL_DEPOSIT_AMOUNT = 10e18; + + function setUp() public override { + //setup contracts + super.setUp(); + + //create a single pool for these tests + createPrimaryPool(); + createSecondaryPool(); + + //add funds to the pool to be split among the recipients + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + addFundsToPool(SECONDARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + } + + function getSecondaryRecipients() public override returns (Common.AddressAndWeight[] memory) { + //for testing purposes, the primary and secondary pool to contain the same recipients + return getPrimaryRecipients(); + } + + function test_updatePrimaryRecipientWeights() public { + //expected recipient amount is 1/4 of the pool deposit for original recipients + uint256 expectedRecipientAmount = POOL_DEPOSIT_AMOUNT / 4; + + //create a list of containing recipients from the primary configured set, and new recipients + Common.AddressAndWeight[] memory recipients = new Common.AddressAndWeight[](4); + recipients[0] = Common.AddressAndWeight(DEFAULT_RECIPIENT_1, TEN_PERCENT * 4); + recipients[1] = Common.AddressAndWeight(DEFAULT_RECIPIENT_2, TEN_PERCENT * 4); + recipients[2] = Common.AddressAndWeight(DEFAULT_RECIPIENT_3, TEN_PERCENT); + recipients[3] = Common.AddressAndWeight(DEFAULT_RECIPIENT_4, TEN_PERCENT); + + //updating a recipient should force the funds to be paid out for the primary recipients + updateRewardRecipients(PRIMARY_POOL_ID, recipients, ADMIN); + + //check each recipient received the correct amount + for (uint256 i; i < recipients.length; i++) { + //check the balance matches the ratio the recipient should have received + assertEq(getAssetBalance(recipients[i].addr), expectedRecipientAmount); + } + + //the reward manager should still have the funds for the secondary pool + assertEq(getAssetBalance(address(rewardManager)), POOL_DEPOSIT_AMOUNT); + + //add more funds to the pool to check new distribution + addFundsToPool(PRIMARY_POOL_ID, getAsset(POOL_DEPOSIT_AMOUNT), FEE_MANAGER); + + //claim the rewards for the updated recipients manually + claimRewards(PRIMARY_POOL_ARRAY, recipients[0].addr); + claimRewards(PRIMARY_POOL_ARRAY, recipients[1].addr); + claimRewards(PRIMARY_POOL_ARRAY, recipients[2].addr); + claimRewards(PRIMARY_POOL_ARRAY, recipients[3].addr); + + //check the balance matches the ratio the recipient who were updated should have received + assertEq( + getAssetBalance(recipients[0].addr), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT * 4) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(recipients[1].addr), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT * 4) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(recipients[2].addr), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT) / POOL_SCALAR + expectedRecipientAmount + ); + assertEq( + getAssetBalance(recipients[3].addr), + (POOL_DEPOSIT_AMOUNT * TEN_PERCENT) / POOL_SCALAR + expectedRecipientAmount + ); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol new file mode 100644 index 00000000..00258937 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/BaseVerifierTest.t.sol @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {Test} from "forge-std/Test.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {IVerifier} from "../../interfaces/IVerifier.sol"; +import {ErroredVerifier} from "../mocks/ErroredVerifier.sol"; +import {Verifier} from "../../Verifier.sol"; +import {Strings} from "@openzeppelin/contracts/utils/Strings.sol"; +import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol"; +import {FeeManager} from "../../FeeManager.sol"; +import {Common} from "../../libraries/Common.sol"; +import {ERC20Mock} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol"; +import {WERC20Mock} from "../../../shared/mocks/WERC20Mock.sol"; +import {FeeManager} from "../../FeeManager.sol"; +import {RewardManager} from "../../RewardManager.sol"; + +contract BaseTest is Test { + uint256 internal constant MAX_ORACLES = 31; + address internal constant ADMIN = address(1); + address internal constant USER = address(2); + address internal constant MOCK_VERIFIER_ADDRESS = address(100); + address internal constant MOCK_VERIFIER_ADDRESS_TWO = address(200); + address internal constant ACCESS_CONTROLLER_ADDRESS = address(300); + + bytes32 internal constant V_MASK = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + bytes32 internal constant V1_BITMASK = 0x0001000000000000000000000000000000000000000000000000000000000000; + bytes32 internal constant V2_BITMASK = 0x0002000000000000000000000000000000000000000000000000000000000000; + bytes32 internal constant V3_BITMASK = 0x0003000000000000000000000000000000000000000000000000000000000000; + + //version 0 feeds + bytes32 internal constant FEED_ID = (keccak256("ETH-USD") & V_MASK) | V1_BITMASK; + bytes32 internal constant FEED_ID_2 = (keccak256("PLI-USD") & V_MASK) | V1_BITMASK; + bytes32 internal constant FEED_ID_3 = (keccak256("BTC-USD") & V_MASK) | V1_BITMASK; + + //version 3 feeds + bytes32 internal constant FEED_ID_V3 = (keccak256("ETH-USD") & V_MASK) | V3_BITMASK; + + bytes32 internal constant INVALID_FEED = keccak256("INVALID"); + uint32 internal constant OBSERVATIONS_TIMESTAMP = 1000; + uint64 internal constant BLOCKNUMBER_LOWER_BOUND = 1000; + uint64 internal constant BLOCKNUMBER_UPPER_BOUND = BLOCKNUMBER_LOWER_BOUND + 5; + int192 internal constant MEDIAN = 1 ether; + int192 internal constant BID = 500000000 gwei; + int192 internal constant ASK = 2 ether; + + bytes32 internal constant EMPTY_BYTES = bytes32(""); + + uint8 internal constant FAULT_TOLERANCE = 10; + uint64 internal constant VERIFIER_VERSION = 1; + + string internal constant SERVER_URL = "https://mercury.server/client/"; + uint8 internal constant MAX_COMMITMENT_DELAY = 5; + + VerifierProxy internal s_verifierProxy; + Verifier internal s_verifier; + Verifier internal s_verifier_2; + ErroredVerifier internal s_erroredVerifier; + + struct Signer { + uint256 mockPrivateKey; + address signerAddress; + } + + struct V1Report { + // The feed ID the report has data for + bytes32 feedId; + // The time the median value was observed on + uint32 observationsTimestamp; + // The median value agreed in an OCR round + int192 median; + // The best bid value agreed in an OCR round + int192 bid; + // The best ask value agreed in an OCR round + int192 ask; + // The upper bound of the block range the median value was observed within + uint64 blocknumberUpperBound; + // The blockhash for the upper bound of block range (ensures correct blockchain) + bytes32 upperBlockhash; + // The lower bound of the block range the median value was observed within + uint64 blocknumberLowerBound; + // The current block timestamp + uint64 currentBlockTimestamp; + } + + Signer[MAX_ORACLES] internal s_signers; + bytes32[] internal s_offchaintransmitters; + bool private s_baseTestInitialized; + + function setUp() public virtual { + // BaseTest.setUp is often called multiple times from tests' setUp due to inheritance. + if (s_baseTestInitialized) return; + s_baseTestInitialized = true; + + vm.startPrank(ADMIN); + vm.mockCall( + MOCK_VERIFIER_ADDRESS, + abi.encodeWithSelector(IERC165.supportsInterface.selector, IVerifier.verify.selector), + abi.encode(true) + ); + s_verifierProxy = new VerifierProxy(AccessControllerInterface(address(0))); + + s_verifier = new Verifier(address(s_verifierProxy)); + s_verifier_2 = new Verifier(address(s_verifierProxy)); + s_erroredVerifier = new ErroredVerifier(); + + for (uint256 i; i < MAX_ORACLES; i++) { + uint256 mockPK = i + 1; + s_signers[i].mockPrivateKey = mockPK; + s_signers[i].signerAddress = vm.addr(mockPK); + } + } + + function _getSigners(uint256 numSigners) internal view returns (Signer[] memory) { + Signer[] memory signers = new Signer[](numSigners); + for (uint256 i; i < numSigners; i++) { + signers[i] = s_signers[i]; + } + return signers; + } + + function _getSignerAddresses(Signer[] memory signers) internal view returns (address[] memory) { + address[] memory signerAddrs = new address[](signers.length); + for (uint256 i = 0; i < signerAddrs.length; i++) { + signerAddrs[i] = s_signers[i].signerAddress; + } + return signerAddrs; + } + + function _generateSignerSignatures( + bytes memory report, + bytes32[3] memory reportContext, + Signer[] memory signers + ) internal pure returns (bytes32[] memory rawRs, bytes32[] memory rawSs, bytes32 rawVs) { + bytes32[] memory rs = new bytes32[](signers.length); + bytes32[] memory ss = new bytes32[](signers.length); + bytes memory vs = new bytes(signers.length); + + bytes32 hash = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + for (uint256 i = 0; i < signers.length; i++) { + (uint8 v, bytes32 r, bytes32 s) = vm.sign(signers[i].mockPrivateKey, hash); + rs[i] = r; + ss[i] = s; + vs[i] = bytes1(v - 27); + } + return (rs, ss, bytes32(vs)); + } + + function _encodeReport(V1Report memory report) internal pure returns (bytes memory) { + return + abi.encode( + report.feedId, + report.observationsTimestamp, + report.median, + report.bid, + report.ask, + report.blocknumberUpperBound, + report.upperBlockhash, + report.blocknumberLowerBound, + report.currentBlockTimestamp + ); + } + + function _generateV1EncodedBlob( + V1Report memory report, + bytes32[3] memory reportContext, + Signer[] memory signers + ) internal pure returns (bytes memory) { + bytes memory reportBytes = _encodeReport(report); + (bytes32[] memory rs, bytes32[] memory ss, bytes32 rawVs) = _generateSignerSignatures( + reportBytes, + reportContext, + signers + ); + return abi.encode(reportContext, reportBytes, rs, ss, rawVs); + } + + function _configDigestFromConfigData( + bytes32 feedId, + uint256 chainId, + address verifierAddr, + uint64 configCount, + address[] memory signers, + bytes32[] memory offchainTransmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + feedId, + chainId, + verifierAddr, + configCount, + signers, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + uint256 prefixMask = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 prefix = 0x0006 << (256 - 16); // 0x000600..00 + return bytes32((prefix & prefixMask) | (h & ~prefixMask)); + } + + function _createV1Report( + bytes32 feedId, + uint32 observationsTimestamp, + int192 median, + int192 bid, + int192 ask, + uint64 blocknumberUpperBound, + bytes32 upperBlockhash, + uint64 blocknumberLowerBound, + uint32 currentBlockTimestamp + ) internal pure returns (V1Report memory) { + return + V1Report({ + feedId: feedId, + observationsTimestamp: observationsTimestamp, + median: median, + bid: bid, + ask: ask, + blocknumberUpperBound: blocknumberUpperBound, + upperBlockhash: upperBlockhash, + blocknumberLowerBound: blocknumberLowerBound, + currentBlockTimestamp: currentBlockTimestamp + }); + } + + function _ccipReadURL(bytes32 feedId, uint256 commitmentBlock) internal pure returns (string memory url) { + return + string( + abi.encodePacked( + SERVER_URL, + "?feedIDHex=", + Strings.toHexString(uint256(feedId)), + "&L2Blocknumber=", + Strings.toString(commitmentBlock) + ) + ); + } +} + +contract BaseTestWithConfiguredVerifierAndFeeManager is BaseTest { + FeeManager internal feeManager; + RewardManager internal rewardManager; + ERC20Mock internal link; + WERC20Mock internal native; + + uint256 internal constant DEFAULT_REPORT_PLI_FEE = 1e10; + uint256 internal constant DEFAULT_REPORT_NATIVE_FEE = 1e12; + + bytes32 internal v1ConfigDigest; + bytes32 internal v3ConfigDigest; + + struct V3Report { + // The feed ID the report has data for + bytes32 feedId; + // The time the median value was observed on + uint32 observationsTimestamp; + // The timestamp the report is valid from + uint32 validFromTimestamp; + // The link fee + uint192 linkFee; + // The native fee + uint192 nativeFee; + // The expiry of the report + uint32 expiresAt; + // The median value agreed in an OCR round + int192 benchmarkPrice; + // The best bid value agreed in an OCR round + int192 bid; + // The best ask value agreed in an OCR round + int192 ask; + } + + function setUp() public virtual override { + BaseTest.setUp(); + Signer[] memory signers = _getSigners(MAX_ORACLES); + + s_verifierProxy.initializeVerifier(address(s_verifier)); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , v1ConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + + s_verifier.setConfig( + FEED_ID_V3, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , v3ConfigDigest) = s_verifier.latestConfigDetails(FEED_ID_V3); + + link = new ERC20Mock("PLI", "PLI", ADMIN, 0); + native = new WERC20Mock(); + + rewardManager = new RewardManager(address(link)); + feeManager = new FeeManager(address(link), address(native), address(s_verifierProxy), address(rewardManager)); + + s_verifierProxy.setFeeManager(feeManager); + rewardManager.setFeeManager(address(feeManager)); + } + + function _encodeReport(V3Report memory report) internal pure returns (bytes memory) { + return + abi.encode( + report.feedId, + report.observationsTimestamp, + report.validFromTimestamp, + report.nativeFee, + report.linkFee, + report.expiresAt, + report.benchmarkPrice, + report.bid, + report.ask + ); + } + + function _generateV3EncodedBlob( + V3Report memory report, + bytes32[3] memory reportContext, + Signer[] memory signers + ) internal pure returns (bytes memory) { + bytes memory reportBytes = _encodeReport(report); + (bytes32[] memory rs, bytes32[] memory ss, bytes32 rawVs) = _generateSignerSignatures( + reportBytes, + reportContext, + signers + ); + return abi.encode(reportContext, reportBytes, rs, ss, rawVs); + } + + function _generateV1Report() internal view returns (V1Report memory) { + return + _createV1Report( + FEED_ID, + OBSERVATIONS_TIMESTAMP, + MEDIAN, + BID, + ASK, + BLOCKNUMBER_UPPER_BOUND, + bytes32(blockhash(BLOCKNUMBER_UPPER_BOUND)), + BLOCKNUMBER_LOWER_BOUND, + uint32(block.timestamp) + ); + } + + function _generateV3Report() internal view returns (V3Report memory) { + return + V3Report({ + feedId: FEED_ID_V3, + observationsTimestamp: OBSERVATIONS_TIMESTAMP, + validFromTimestamp: uint32(block.timestamp), + nativeFee: uint192(DEFAULT_REPORT_NATIVE_FEE), + linkFee: uint192(DEFAULT_REPORT_PLI_FEE), + expiresAt: uint32(block.timestamp), + benchmarkPrice: MEDIAN, + bid: BID, + ask: ASK + }); + } + + function _generateReportContext(bytes32 configDigest) internal pure returns (bytes32[3] memory) { + bytes32[3] memory reportContext; + reportContext[0] = configDigest; + reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + return reportContext; + } + + function _approveLink(address spender, uint256 quantity, address sender) internal { + address originalAddr = msg.sender; + changePrank(sender); + + link.approve(spender, quantity); + changePrank(originalAddr); + } + + function _approveNative(address spender, uint256 quantity, address sender) internal { + address originalAddr = msg.sender; + changePrank(sender); + + native.approve(spender, quantity); + changePrank(originalAddr); + } + + function _verify(bytes memory payload, address feeAddress, uint256 wrappedNativeValue, address sender) internal { + address originalAddr = msg.sender; + changePrank(sender); + + s_verifierProxy.verify{value: wrappedNativeValue}(payload, abi.encode(feeAddress)); + + changePrank(originalAddr); + } + + function _verifyBulk( + bytes[] memory payload, + address feeAddress, + uint256 wrappedNativeValue, + address sender + ) internal { + address originalAddr = msg.sender; + changePrank(sender); + + s_verifierProxy.verifyBulk{value: wrappedNativeValue}(payload, abi.encode(feeAddress)); + + changePrank(originalAddr); + } +} + +contract BaseTestWithMultipleConfiguredDigests is BaseTestWithConfiguredVerifierAndFeeManager { + bytes32 internal s_configDigestOne; + bytes32 internal s_configDigestTwo; + bytes32 internal s_configDigestThree; + bytes32 internal s_configDigestFour; + bytes32 internal s_configDigestFive; + + uint32 internal s_numConfigsSet; + + uint8 internal constant FAULT_TOLERANCE_TWO = 2; + uint8 internal constant FAULT_TOLERANCE_THREE = 1; + + function setUp() public virtual override { + BaseTestWithConfiguredVerifierAndFeeManager.setUp(); + Signer[] memory signers = _getSigners(MAX_ORACLES); + + (, , s_configDigestOne) = s_verifier.latestConfigDetails(FEED_ID); + + // Verifier 1, Feed 1, Config 2 + Signer[] memory secondSetOfSigners = _getSigners(8); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(secondSetOfSigners), + s_offchaintransmitters, + FAULT_TOLERANCE_TWO, + bytes(""), + 2, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , s_configDigestTwo) = s_verifier.latestConfigDetails(FEED_ID); + + // Verifier 1, Feed 1, Config 3 + Signer[] memory thirdSetOfSigners = _getSigners(5); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(thirdSetOfSigners), + s_offchaintransmitters, + FAULT_TOLERANCE_THREE, + bytes(""), + 3, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (s_numConfigsSet, , s_configDigestThree) = s_verifier.latestConfigDetails(FEED_ID); + + // Verifier 1, Feed 2, Config 1 + s_verifier.setConfig( + FEED_ID_2, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + 4, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , s_configDigestFour) = s_verifier.latestConfigDetails(FEED_ID_2); + + // Verifier 2, Feed 3, Config 1 + s_verifierProxy.initializeVerifier(address(s_verifier_2)); + s_verifier_2.setConfig( + FEED_ID_3, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , s_configDigestFive) = s_verifier_2.latestConfigDetails(FEED_ID_3); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierActivateConfigTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierActivateConfigTest.t.sol new file mode 100644 index 00000000..f53c26ba --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierActivateConfigTest.t.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; + +contract VerifierActivateConfigTest is BaseTestWithConfiguredVerifierAndFeeManager { + function test_revertsIfNotOwner() public { + vm.expectRevert("Only callable by owner"); + + changePrank(address(s_verifierProxy)); + s_verifier.activateConfig(FEED_ID, bytes32("mock")); + } + + function test_revertsIfDigestIsEmpty() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestEmpty.selector)); + s_verifier.activateConfig(FEED_ID, bytes32("")); + } + + function test_revertsIfDigestNotSet() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestNotSet.selector, FEED_ID, bytes32("non-existent-digest"))); + s_verifier.activateConfig(FEED_ID, bytes32("non-existent-digest")); + } +} + +contract VerifierActivateConfigWithDeactivatedConfigTest is BaseTestWithMultipleConfiguredDigests { + bytes32[3] internal s_reportContext; + + event ConfigActivated(bytes32 configDigest); + + V1Report internal s_testReportOne; + + function setUp() public override { + BaseTestWithMultipleConfiguredDigests.setUp(); + s_reportContext[0] = s_configDigestTwo; + s_reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + s_testReportOne = _createV1Report( + FEED_ID, + uint32(block.timestamp), + MEDIAN, + BID, + ASK, + uint64(block.number), + blockhash(block.number + 3), + uint64(block.number + 3), + uint32(block.timestamp) + ); + + s_verifier.deactivateConfig(FEED_ID, s_configDigestTwo); + } + + function test_allowsVerification() public { + s_verifier.activateConfig(FEED_ID, s_configDigestTwo); + changePrank(address(s_verifierProxy)); + + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE_TWO + 1) + ); + s_verifier.verify(signedReport, msg.sender); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierDeactivateFeedTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierDeactivateFeedTest.t.sol new file mode 100644 index 00000000..97647c88 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierDeactivateFeedTest.t.sol @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; + +contract VerifierActivateFeedTest is BaseTestWithConfiguredVerifierAndFeeManager { + function test_revertsIfNotOwnerActivateFeed() public { + changePrank(address(s_verifierProxy)); + vm.expectRevert("Only callable by owner"); + s_verifier.activateFeed(FEED_ID); + } + + function test_revertsIfNotOwnerDeactivateFeed() public { + changePrank(address(s_verifierProxy)); + vm.expectRevert("Only callable by owner"); + s_verifier.deactivateFeed(FEED_ID); + } + + function test_revertsIfNoFeedExistsActivate() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.InvalidFeed.selector, INVALID_FEED)); + s_verifier.activateFeed(INVALID_FEED); + } + + function test_revertsIfNoFeedExistsDeactivate() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.InvalidFeed.selector, INVALID_FEED)); + s_verifier.deactivateFeed(INVALID_FEED); + } +} + +contract VerifierDeactivateFeedWithVerifyTest is BaseTestWithMultipleConfiguredDigests { + bytes32[3] internal s_reportContext; + + event ConfigActivated(bytes32 configDigest); + + V1Report internal s_testReportOne; + + function setUp() public override { + BaseTestWithMultipleConfiguredDigests.setUp(); + s_reportContext[0] = s_configDigestOne; + s_reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + s_testReportOne = _createV1Report( + FEED_ID, + uint32(block.timestamp), + MEDIAN, + BID, + ASK, + uint64(block.number), + blockhash(block.number + 3), + uint64(block.number + 3), + uint32(block.timestamp) + ); + + s_verifier.deactivateFeed(FEED_ID); + } + + function test_currentReportAllowsVerification() public { + s_verifier.activateFeed(FEED_ID); + changePrank(address(s_verifierProxy)); + + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + s_verifier.verify(signedReport, msg.sender); + } + + function test_previousReportAllowsVerification() public { + s_verifier.activateFeed(FEED_ID); + changePrank(address(s_verifierProxy)); + + s_reportContext[0] = s_configDigestTwo; + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE_TWO + 1) + ); + s_verifier.verify(signedReport, msg.sender); + } + + function test_currentReportFailsVerification() public { + changePrank(address(s_verifierProxy)); + + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + + vm.expectRevert(abi.encodeWithSelector(Verifier.InactiveFeed.selector, FEED_ID)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_previousReportFailsVerification() public { + changePrank(address(s_verifierProxy)); + + s_reportContext[0] = s_configDigestTwo; + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE_TWO + 1) + ); + + vm.expectRevert(abi.encodeWithSelector(Verifier.InactiveFeed.selector, FEED_ID)); + s_verifier.verify(signedReport, msg.sender); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyConstructorTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyConstructorTest.t.sol new file mode 100644 index 00000000..b085dc8a --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyConstructorTest.t.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest} from "./BaseVerifierTest.t.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; +import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol"; + +contract VerifierProxyConstructorTest is BaseTest { + function test_correctlySetsTheOwner() public { + VerifierProxy proxy = new VerifierProxy(AccessControllerInterface(address(0))); + assertEq(proxy.owner(), ADMIN); + } + + function test_correctlySetsTheCorrectAccessControllerInterface() public { + address accessControllerAddr = address(1234); + VerifierProxy proxy = new VerifierProxy(AccessControllerInterface(accessControllerAddr)); + assertEq(address(proxy.s_accessController()), accessControllerAddr); + } + + function test_correctlySetsVersion() public { + string memory version = s_verifierProxy.typeAndVersion(); + assertEq(version, "VerifierProxy 2.0.0"); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyInitializeVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyInitializeVerifierTest.t.sol new file mode 100644 index 00000000..e02b14fe --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyInitializeVerifierTest.t.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest} from "./BaseVerifierTest.t.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; + +contract VerifierProxyInitializeVerifierTest is BaseTest { + bytes32 latestDigest; + + function setUp() public override { + BaseTest.setUp(); + } + + function test_revertsIfNotOwner() public { + changePrank(USER); + vm.expectRevert("Only callable by owner"); + s_verifierProxy.initializeVerifier(address(s_verifier)); + } + + function test_revertsIfZeroAddress() public { + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.ZeroAddress.selector)); + s_verifierProxy.initializeVerifier(address(0)); + } + + function test_revertsIfVerifierAlreadyInitialized() public { + s_verifierProxy.initializeVerifier(address(s_verifier)); + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.VerifierAlreadyInitialized.selector, address(s_verifier))); + s_verifierProxy.initializeVerifier(address(s_verifier)); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetAccessControllerTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetAccessControllerTest.t.sol new file mode 100644 index 00000000..04889e0d --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetAccessControllerTest.t.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest} from "./BaseVerifierTest.t.sol"; +import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol"; + +contract VerifierProxySetAccessControllerTest is BaseTest { + event AccessControllerSet(address oldAccessController, address newAccessController); + + function test_revertsIfCalledByNonOwner() public { + vm.expectRevert("Only callable by owner"); + + changePrank(USER); + s_verifierProxy.setAccessController(AccessControllerInterface(ACCESS_CONTROLLER_ADDRESS)); + } + + function test_successfullySetsNewAccessController() public { + s_verifierProxy.setAccessController(AccessControllerInterface(ACCESS_CONTROLLER_ADDRESS)); + AccessControllerInterface ac = s_verifierProxy.s_accessController(); + assertEq(address(ac), ACCESS_CONTROLLER_ADDRESS); + } + + function test_successfullySetsNewAccessControllerIsEmpty() public { + s_verifierProxy.setAccessController(AccessControllerInterface(address(0))); + AccessControllerInterface ac = s_verifierProxy.s_accessController(); + assertEq(address(ac), address(0)); + } + + function test_emitsTheCorrectEvent() public { + vm.expectEmit(true, false, false, false); + emit AccessControllerSet(address(0), ACCESS_CONTROLLER_ADDRESS); + s_verifierProxy.setAccessController(AccessControllerInterface(ACCESS_CONTROLLER_ADDRESS)); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol new file mode 100644 index 00000000..ea23f880 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxySetVerifierTest.t.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.sol"; +import {IVerifier} from "../../interfaces/IVerifier.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol"; +import {Common} from "../../libraries/Common.sol"; + +contract VerifierProxyInitializeVerifierTest is BaseTestWithConfiguredVerifierAndFeeManager { + function test_revertsIfNotCorrectVerifier() public { + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.AccessForbidden.selector)); + s_verifierProxy.setVerifier(bytes32("prev-config"), bytes32("new-config"), new Common.AddressAndWeight[](0)); + } + + function test_revertsIfDigestAlreadySet() public { + (, , bytes32 takenDigest) = s_verifier.latestConfigDetails(FEED_ID); + + address maliciousVerifier = address(666); + bytes32 maliciousDigest = bytes32("malicious-digest"); + vm.mockCall( + maliciousVerifier, + abi.encodeWithSelector(IERC165.supportsInterface.selector, IVerifier.verify.selector), + abi.encode(true) + ); + s_verifierProxy.initializeVerifier(maliciousVerifier); + vm.expectRevert( + abi.encodeWithSelector(VerifierProxy.ConfigDigestAlreadySet.selector, takenDigest, address(s_verifier)) + ); + changePrank(address(maliciousVerifier)); + s_verifierProxy.setVerifier(maliciousDigest, takenDigest, new Common.AddressAndWeight[](0)); + } + + function test_updatesVerifierIfVerifier() public { + (, , bytes32 prevDigest) = s_verifier.latestConfigDetails(FEED_ID); + changePrank(address(s_verifier)); + s_verifierProxy.setVerifier(prevDigest, bytes32("new-config"), new Common.AddressAndWeight[](0)); + assertEq(s_verifierProxy.getVerifier(bytes32("new-config")), address(s_verifier)); + assertEq(s_verifierProxy.getVerifier(prevDigest), address(s_verifier)); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyTest.t.sol new file mode 100644 index 00000000..ea7e02d7 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyTest.t.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; +import {FeeManager} from "../../FeeManager.sol"; + +contract VerifierProxyInitializeVerifierTest is BaseTestWithConfiguredVerifierAndFeeManager { + function test_setFeeManagerZeroAddress() public { + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.ZeroAddress.selector)); + s_verifierProxy.setFeeManager(FeeManager(address(0))); + } + + function test_setFeeManagerWhichDoesntHonourInterface() public { + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.FeeManagerInvalid.selector)); + s_verifierProxy.setFeeManager(FeeManager(address(s_verifier))); + } + + function test_setFeeManagerWhichDoesntHonourIERC165Interface() public { + vm.expectRevert(); + s_verifierProxy.setFeeManager(FeeManager(address(1))); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyUnsetVerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyUnsetVerifierTest.t.sol new file mode 100644 index 00000000..746aa955 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierProxyUnsetVerifierTest.t.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest, BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; + +contract VerifierProxyUnsetVerifierTest is BaseTest { + function test_revertsIfNotAdmin() public { + vm.expectRevert("Only callable by owner"); + + changePrank(USER); + s_verifierProxy.unsetVerifier(bytes32("")); + } + + function test_revertsIfDigestDoesNotExist() public { + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.VerifierNotFound.selector, bytes32(""))); + s_verifierProxy.unsetVerifier(bytes32("")); + } +} + +contract VerifierProxyUnsetVerifierWithPreviouslySetVerifierTest is BaseTestWithConfiguredVerifierAndFeeManager { + bytes32 internal s_configDigest; + + event VerifierUnset(bytes32 configDigest, address verifierAddr); + + function setUp() public override { + BaseTestWithConfiguredVerifierAndFeeManager.setUp(); + (, , s_configDigest) = s_verifier.latestConfigDetails(FEED_ID); + } + + function test_correctlyUnsetsVerifier() public { + s_verifierProxy.unsetVerifier(s_configDigest); + address verifierAddr = s_verifierProxy.getVerifier(s_configDigest); + assertEq(verifierAddr, address(0)); + } + + function test_emitsAnEventAfterUnsettingVerifier() public { + vm.expectEmit(true, false, false, false); + emit VerifierUnset(s_configDigest, address(s_verifier)); + s_verifierProxy.unsetVerifier(s_configDigest); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol new file mode 100644 index 00000000..0cd59021 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigFromSourceTest.t.sol @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol"; +import {Common} from "../../libraries/Common.sol"; + +contract VerifierSetConfigFromSourceTest is BaseTest { + function setUp() public virtual override { + BaseTest.setUp(); + } + + function test_revertsIfCalledByNonOwner() public { + vm.expectRevert("Only callable by owner"); + Signer[] memory signers = _getSigners(MAX_ORACLES); + + changePrank(USER); + s_verifier.setConfigFromSource( + FEED_ID, + 12345, + address(12345), + 0, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } +} + +contract VerifierSetConfigFromSourceMultipleDigestsTest is BaseTestWithMultipleConfiguredDigests { + function test_correctlyUpdatesTheDigestInTheProxy() public { + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfigFromSource( + FEED_ID, + 12345, + address(12345), + 0, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + address verifierAddr = s_verifierProxy.getVerifier(configDigest); + assertEq(verifierAddr, address(s_verifier)); + } + + function test_correctlyUpdatesDigestsOnMultipleVerifiersInTheProxy() public { + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfigFromSource( + FEED_ID_2, + 12345, + address(12345), + 0, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID_2); + address verifierAddr = s_verifierProxy.getVerifier(configDigest); + assertEq(verifierAddr, address(s_verifier)); + + s_verifier_2.setConfigFromSource( + FEED_ID_3, + 12345, + address(12345), + 0, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest2) = s_verifier_2.latestConfigDetails(FEED_ID_3); + address verifierAddr2 = s_verifierProxy.getVerifier(configDigest2); + assertEq(verifierAddr2, address(s_verifier_2)); + } + + function test_correctlySetsConfigWhenDigestsAreRemoved() public { + s_verifier.deactivateConfig(FEED_ID, s_configDigestTwo); + + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfigFromSource( + FEED_ID, + 12345, + address(s_verifier), + 0, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + bytes32 expectedConfigDigest = _configDigestFromConfigData( + FEED_ID, + 12345, + address(s_verifier), + s_numConfigsSet + 1, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes("") + ); + + (uint32 configCount, uint32 blockNumber, bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + + assertEq(configCount, s_numConfigsSet + 1); + assertEq(blockNumber, block.number); + assertEq(configDigest, expectedConfigDigest); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol new file mode 100644 index 00000000..a4e15dcd --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierSetConfigTest.t.sol @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest, BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; +import {Common} from "../../libraries/Common.sol"; + +contract VerifierSetConfigTest is BaseTest { + function setUp() public virtual override { + BaseTest.setUp(); + } + + function test_revertsIfCalledByNonOwner() public { + vm.expectRevert("Only callable by owner"); + Signer[] memory signers = _getSigners(MAX_ORACLES); + + changePrank(USER); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_revertsIfSetWithTooManySigners() public { + address[] memory signers = new address[](MAX_ORACLES + 1); + vm.expectRevert(abi.encodeWithSelector(Verifier.ExcessSigners.selector, signers.length, MAX_ORACLES)); + s_verifier.setConfig( + FEED_ID, + signers, + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_revertsIfFaultToleranceIsZero() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.FaultToleranceMustBePositive.selector)); + Signer[] memory signers = _getSigners(MAX_ORACLES); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(signers), + s_offchaintransmitters, + 0, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_revertsIfNotEnoughSigners() public { + address[] memory signers = new address[](2); + signers[0] = address(1000); + signers[1] = address(1001); + + vm.expectRevert( + abi.encodeWithSelector(Verifier.InsufficientSigners.selector, signers.length, FAULT_TOLERANCE * 3 + 1) + ); + s_verifier.setConfig( + FEED_ID, + signers, + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_revertsIfDuplicateSigners() public { + Signer[] memory signers = _getSigners(MAX_ORACLES); + address[] memory signerAddrs = _getSignerAddresses(signers); + signerAddrs[0] = signerAddrs[1]; + vm.expectRevert(abi.encodeWithSelector(Verifier.NonUniqueSignatures.selector)); + s_verifier.setConfig( + FEED_ID, + signerAddrs, + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_revertsIfSignerContainsZeroAddress() public { + Signer[] memory signers = _getSigners(MAX_ORACLES); + address[] memory signerAddrs = _getSignerAddresses(signers); + signerAddrs[0] = address(0); + vm.expectRevert(abi.encodeWithSelector(Verifier.ZeroAddress.selector)); + s_verifier.setConfig( + FEED_ID, + signerAddrs, + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + } + + function test_correctlyUpdatesTheConfig() public { + Signer[] memory signers = _getSigners(MAX_ORACLES); + + s_verifierProxy.initializeVerifier(address(s_verifier)); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + bytes32 expectedConfigDigest = _configDigestFromConfigData( + FEED_ID, + block.chainid, + address(s_verifier), + 1, + _getSignerAddresses(signers), + s_offchaintransmitters, + FAULT_TOLERANCE, + bytes(""), + VERIFIER_VERSION, + bytes("") + ); + + (uint32 configCount, uint32 blockNumber, bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(configCount, 1); + assertEq(blockNumber, block.number); + assertEq(configDigest, expectedConfigDigest); + + (bool scanLogs, bytes32 configDigestTwo, uint32 epoch) = s_verifier.latestConfigDigestAndEpoch(FEED_ID); + assertEq(scanLogs, false); + assertEq(configDigestTwo, expectedConfigDigest); + assertEq(epoch, 0); + } +} + +contract VerifierSetConfigWhenThereAreMultipleDigestsTest is BaseTestWithMultipleConfiguredDigests { + function test_correctlyUpdatesTheDigestInTheProxy() public { + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + address verifierAddr = s_verifierProxy.getVerifier(configDigest); + assertEq(verifierAddr, address(s_verifier)); + } + + function test_correctlyUpdatesDigestsOnMultipleVerifiersInTheProxy() public { + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfig( + FEED_ID_2, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID_2); + address verifierAddr = s_verifierProxy.getVerifier(configDigest); + assertEq(verifierAddr, address(s_verifier)); + + s_verifier_2.setConfig( + FEED_ID_3, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + (, , bytes32 configDigest2) = s_verifier_2.latestConfigDetails(FEED_ID_3); + address verifierAddr2 = s_verifierProxy.getVerifier(configDigest2); + assertEq(verifierAddr2, address(s_verifier_2)); + } + + function test_correctlySetsConfigWhenDigestsAreRemoved() public { + s_verifier.deactivateConfig(FEED_ID, s_configDigestTwo); + + Signer[] memory newSigners = _getSigners(15); + + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + + bytes32 expectedConfigDigest = _configDigestFromConfigData( + FEED_ID, + block.chainid, + address(s_verifier), + s_numConfigsSet + 1, + _getSignerAddresses(newSigners), + s_offchaintransmitters, + 4, + bytes(""), + VERIFIER_VERSION, + bytes("") + ); + + (uint32 configCount, uint32 blockNumber, bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + + assertEq(configCount, s_numConfigsSet + 1); + assertEq(blockNumber, block.number); + assertEq(configDigest, expectedConfigDigest); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTest.t.sol new file mode 100644 index 00000000..2857b8f4 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTest.t.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTest} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; + +contract VerifierConstructorTest is BaseTest { + function test_revertsIfInitializedWithEmptyVerifierProxy() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.ZeroAddress.selector)); + new Verifier(address(0)); + } + + function test_setsTheCorrectProperties() public { + Verifier v = new Verifier(address(s_verifierProxy)); + assertEq(v.owner(), ADMIN); + + (bool scanLogs, bytes32 configDigest, uint32 epoch) = v.latestConfigDigestAndEpoch(FEED_ID); + assertEq(scanLogs, false); + assertEq(configDigest, EMPTY_BYTES); + assertEq(epoch, 0); + + (uint32 configCount, uint32 blockNumber, bytes32 configDigestTwo) = v.latestConfigDetails(FEED_ID); + assertEq(configCount, 0); + assertEq(blockNumber, 0); + assertEq(configDigestTwo, EMPTY_BYTES); + + string memory typeAndVersion = s_verifier.typeAndVersion(); + assertEq(typeAndVersion, "Verifier 1.2.0"); + } +} + +contract VerifierSupportsInterfaceTest is BaseTest { + function test_falseIfIsNotCorrectInterface() public { + bool isInterface = s_verifier.supportsInterface(bytes4("abcd")); + assertEq(isInterface, false); + } + + function test_trueIfIsCorrectInterface() public { + bool isInterface = s_verifier.supportsInterface(Verifier.verify.selector); + assertEq(isInterface, true); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTestBillingReport.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTestBillingReport.t.sol new file mode 100644 index 00000000..90c367d9 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierTestBillingReport.t.sol @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.sol"; + +contract VerifierTestWithConfiguredVerifierAndFeeManager is BaseTestWithConfiguredVerifierAndFeeManager { + uint256 internal constant DEFAULT_PLI_MINT_QUANTITY = 100 ether; + uint256 internal constant DEFAULT_NATIVE_MINT_QUANTITY = 100 ether; + + function setUp() public virtual override { + super.setUp(); + + //mint some tokens to the user + link.mint(USER, DEFAULT_PLI_MINT_QUANTITY); + native.mint(USER, DEFAULT_NATIVE_MINT_QUANTITY); + vm.deal(USER, DEFAULT_NATIVE_MINT_QUANTITY); + + //mint some link tokens to the feeManager pool + link.mint(address(feeManager), DEFAULT_REPORT_PLI_FEE); + } +} + +contract VerifierTestBillingReport is VerifierTestWithConfiguredVerifierAndFeeManager { + function test_verifyWithLink() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE, USER); + + _verify(signedReport, address(link), 0, USER); + + assertEq(link.balanceOf(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE); + } + + function test_verifyWithNative() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + _approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE, USER); + + _verify(signedReport, address(native), 0, USER); + + assertEq(native.balanceOf(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + assertEq(link.balanceOf(address(rewardManager)), DEFAULT_REPORT_PLI_FEE); + } + + function test_verifyWithNativeUnwrapped() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + _verify(signedReport, address(native), DEFAULT_REPORT_NATIVE_FEE, USER); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + assertEq(address(feeManager).balance, 0); + } + + function test_verifyWithNativeUnwrappedReturnsChange() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + _verify(signedReport, address(native), DEFAULT_REPORT_NATIVE_FEE * 2, USER); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE); + assertEq(address(feeManager).balance, 0); + } +} + +contract VerifierBulkVerifyBillingReport is VerifierTestWithConfiguredVerifierAndFeeManager { + uint256 internal constant NUMBERS_OF_REPORTS = 5; + + function test_verifyWithBulkLink() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](NUMBERS_OF_REPORTS); + for (uint256 i = 0; i < NUMBERS_OF_REPORTS; i++) { + signedReports[i] = signedReport; + } + + _approveLink(address(rewardManager), DEFAULT_REPORT_PLI_FEE * NUMBERS_OF_REPORTS, USER); + + _verifyBulk(signedReports, address(link), 0, USER); + + assertEq(link.balanceOf(USER), DEFAULT_PLI_MINT_QUANTITY - DEFAULT_REPORT_PLI_FEE * NUMBERS_OF_REPORTS); + assertEq(link.balanceOf(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * NUMBERS_OF_REPORTS); + } + + function test_verifyWithBulkNative() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](NUMBERS_OF_REPORTS); + for (uint256 i = 0; i < NUMBERS_OF_REPORTS; i++) { + signedReports[i] = signedReport; + } + + _approveNative(address(feeManager), DEFAULT_REPORT_NATIVE_FEE * NUMBERS_OF_REPORTS, USER); + + _verifyBulk(signedReports, address(native), 0, USER); + + assertEq(native.balanceOf(USER), DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * NUMBERS_OF_REPORTS); + } + + function test_verifyWithBulkNativeUnwrapped() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](NUMBERS_OF_REPORTS); + for (uint256 i; i < NUMBERS_OF_REPORTS; i++) { + signedReports[i] = signedReport; + } + + _verifyBulk(signedReports, address(native), DEFAULT_REPORT_NATIVE_FEE * NUMBERS_OF_REPORTS, USER); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * 5); + assertEq(address(feeManager).balance, 0); + } + + function test_verifyWithBulkNativeUnwrappedReturnsChange() public { + bytes memory signedReport = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](NUMBERS_OF_REPORTS); + for (uint256 i = 0; i < NUMBERS_OF_REPORTS; i++) { + signedReports[i] = signedReport; + } + + _verifyBulk(signedReports, address(native), DEFAULT_REPORT_NATIVE_FEE * (NUMBERS_OF_REPORTS * 2), USER); + + assertEq(USER.balance, DEFAULT_NATIVE_MINT_QUANTITY - DEFAULT_REPORT_NATIVE_FEE * NUMBERS_OF_REPORTS); + assertEq(address(feeManager).balance, 0); + } + + function test_verifyMultiVersions() public { + bytes memory signedReportV1 = _generateV1EncodedBlob( + _generateV1Report(), + _generateReportContext(v1ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes memory signedReportV3 = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](3); + + signedReports[0] = signedReportV1; + signedReports[1] = signedReportV3; + signedReports[2] = signedReportV3; + + _approveLink(address(rewardManager), 2 * DEFAULT_REPORT_PLI_FEE, USER); + + _verifyBulk(signedReports, address(link), 0, USER); + + assertEq(link.balanceOf(USER), DEFAULT_PLI_MINT_QUANTITY - 2 * DEFAULT_REPORT_PLI_FEE); + assertEq(native.balanceOf(USER), DEFAULT_NATIVE_MINT_QUANTITY); + assertEq(link.balanceOf(address(rewardManager)), DEFAULT_REPORT_PLI_FEE * 2); + } + + function test_verifyMultiVersionsReturnsVerifiedReports() public { + bytes memory signedReportV1 = _generateV1EncodedBlob( + _generateV1Report(), + _generateReportContext(v1ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes memory signedReportV3 = _generateV3EncodedBlob( + _generateV3Report(), + _generateReportContext(v3ConfigDigest), + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes[] memory signedReports = new bytes[](3); + + signedReports[0] = signedReportV1; + signedReports[1] = signedReportV3; + signedReports[2] = signedReportV3; + + _approveLink(address(rewardManager), 2 * DEFAULT_REPORT_PLI_FEE, USER); + + address originalAddr = msg.sender; + changePrank(USER); + + bytes[] memory verifierReports = s_verifierProxy.verifyBulk{value: 0}(signedReports, abi.encode(link)); + + changePrank(originalAddr); + + assertEq(verifierReports[0], _encodeReport(_generateV1Report())); + assertEq(verifierReports[1], _encodeReport(_generateV3Report())); + assertEq(verifierReports[2], _encodeReport(_generateV3Report())); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierUnsetConfigTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierUnsetConfigTest.t.sol new file mode 100644 index 00000000..cc3c3333 --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierUnsetConfigTest.t.sol @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithMultipleConfiguredDigests} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; + +contract VerificationdeactivateConfigWhenThereAreMultipleDigestsTest is BaseTestWithMultipleConfiguredDigests { + function test_revertsIfCalledByNonOwner() public { + vm.expectRevert("Only callable by owner"); + + changePrank(USER); + s_verifier.deactivateConfig(FEED_ID, bytes32("")); + } + + function test_revertsIfRemovingAnEmptyDigest() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestEmpty.selector)); + s_verifier.deactivateConfig(FEED_ID, bytes32("")); + } + + function test_revertsIfRemovingAnNonExistentDigest() public { + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestNotSet.selector, FEED_ID, bytes32("mock-digest"))); + s_verifier.deactivateConfig(FEED_ID, bytes32("mock-digest")); + } + + function test_correctlyRemovesAMiddleDigest() public { + s_verifier.deactivateConfig(FEED_ID, s_configDigestTwo); + (, , bytes32 lastConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(lastConfigDigest, s_configDigestThree); + } + + function test_correctlyRemovesTheFirstDigest() public { + s_verifier.deactivateConfig(FEED_ID, s_configDigestOne); + (, , bytes32 lastConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(lastConfigDigest, s_configDigestThree); + } + + function test_correctlyUnsetsDigestsInSequence() public { + // Delete config digest 2 + s_verifier.deactivateConfig(FEED_ID, s_configDigestTwo); + (, , bytes32 lastConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(lastConfigDigest, s_configDigestThree); + + // Delete config digest 1 + s_verifier.deactivateConfig(FEED_ID, s_configDigestOne); + (, , lastConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(lastConfigDigest, s_configDigestThree); + + // Delete config digest 3 + vm.expectRevert( + abi.encodeWithSelector(Verifier.CannotDeactivateLatestConfig.selector, FEED_ID, s_configDigestThree) + ); + s_verifier.deactivateConfig(FEED_ID, s_configDigestThree); + (, , lastConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + assertEq(lastConfigDigest, s_configDigestThree); + } +} diff --git a/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol new file mode 100644 index 00000000..db7be5ca --- /dev/null +++ b/contracts/src/v0.8/llo-feeds/test/verifier/VerifierVerifyTest.t.sol @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity 0.8.19; + +import {BaseTestWithConfiguredVerifierAndFeeManager} from "./BaseVerifierTest.t.sol"; +import {Verifier} from "../../Verifier.sol"; +import {VerifierProxy} from "../../VerifierProxy.sol"; +import {AccessControllerInterface} from "../../../shared/interfaces/AccessControllerInterface.sol"; +import {Common} from "../../libraries/Common.sol"; + +contract VerifierVerifyTest is BaseTestWithConfiguredVerifierAndFeeManager { + bytes32[3] internal s_reportContext; + + event ReportVerified(bytes32 indexed feedId, address requester); + + V1Report internal s_testReportOne; + + function setUp() public virtual override { + BaseTestWithConfiguredVerifierAndFeeManager.setUp(); + (, , bytes32 configDigest) = s_verifier.latestConfigDetails(FEED_ID); + s_reportContext[0] = configDigest; + s_reportContext[1] = bytes32(abi.encode(uint32(5), uint8(1))); + s_testReportOne = _createV1Report( + FEED_ID, + OBSERVATIONS_TIMESTAMP, + MEDIAN, + BID, + ASK, + BLOCKNUMBER_UPPER_BOUND, + blockhash(BLOCKNUMBER_UPPER_BOUND), + BLOCKNUMBER_LOWER_BOUND, + uint32(block.timestamp) + ); + } + + function assertReportsEqual(bytes memory response, V1Report memory testReport) public { + ( + bytes32 feedId, + uint32 timestamp, + int192 median, + int192 bid, + int192 ask, + uint64 blockNumUB, + bytes32 upperBlockhash, + uint64 blockNumLB + ) = abi.decode(response, (bytes32, uint32, int192, int192, int192, uint64, bytes32, uint64)); + assertEq(feedId, testReport.feedId); + assertEq(timestamp, testReport.observationsTimestamp); + assertEq(median, testReport.median); + assertEq(bid, testReport.bid); + assertEq(ask, testReport.ask); + assertEq(blockNumLB, testReport.blocknumberLowerBound); + assertEq(blockNumUB, testReport.blocknumberUpperBound); + assertEq(upperBlockhash, testReport.upperBlockhash); + } +} + +contract VerifierProxyVerifyTest is VerifierVerifyTest { + function test_revertsIfNoVerifierConfigured() public { + s_reportContext[0] = bytes32("corrupt-digest"); + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.VerifierNotFound.selector, bytes32("corrupt-digest"))); + s_verifierProxy.verify(signedReport, bytes("")); + } + + function test_proxiesToTheCorrectVerifier() public { + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + + bytes memory response = s_verifierProxy.verify(signedReport, abi.encode(native)); + assertReportsEqual(response, s_testReportOne); + } +} + +contract VerifierProxyAccessControlledVerificationTest is VerifierVerifyTest { + function setUp() public override { + VerifierVerifyTest.setUp(); + AccessControllerInterface accessController = AccessControllerInterface(ACCESS_CONTROLLER_ADDRESS); + + s_verifierProxy.setAccessController(accessController); + } + + function test_revertsIfNoAccess() public { + vm.mockCall( + ACCESS_CONTROLLER_ADDRESS, + abi.encodeWithSelector(AccessControllerInterface.hasAccess.selector, USER), + abi.encode(false) + ); + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + vm.expectRevert(abi.encodeWithSelector(VerifierProxy.AccessForbidden.selector)); + + changePrank(USER); + s_verifierProxy.verify(signedReport, abi.encode(native)); + } + + function test_proxiesToTheVerifierIfHasAccess() public { + vm.mockCall( + ACCESS_CONTROLLER_ADDRESS, + abi.encodeWithSelector(AccessControllerInterface.hasAccess.selector, USER), + abi.encode(true) + ); + + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + + changePrank(USER); + bytes memory response = s_verifierProxy.verify(signedReport, bytes("")); + assertReportsEqual(response, s_testReportOne); + } +} + +contract VerifierVerifySingleConfigDigestTest is VerifierVerifyTest { + function test_revertsIfVerifiedByNonProxy() public { + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + vm.expectRevert(abi.encodeWithSelector(Verifier.AccessForbidden.selector)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfVerifiedWithIncorrectAddresses() public { + Signer[] memory signers = _getSigners(FAULT_TOLERANCE + 1); + signers[10].mockPrivateKey = 1234; + bytes memory signedReport = _generateV1EncodedBlob(s_testReportOne, s_reportContext, signers); + changePrank(address(s_verifierProxy)); + vm.expectRevert(abi.encodeWithSelector(Verifier.BadVerification.selector)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfMismatchedSignatureLength() public { + bytes32[] memory rs = new bytes32[](FAULT_TOLERANCE + 1); + bytes32[] memory ss = new bytes32[](FAULT_TOLERANCE + 3); + bytes32 rawVs = bytes32(""); + bytes memory signedReport = abi.encode(s_reportContext, abi.encode(s_testReportOne), rs, ss, rawVs); + changePrank(address(s_verifierProxy)); + vm.expectRevert(abi.encodeWithSelector(Verifier.MismatchedSignatures.selector, rs.length, ss.length)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfConfigDigestNotSet() public { + bytes32[3] memory reportContext = s_reportContext; + reportContext[0] = bytes32("wrong-context-digest"); + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestInactive.selector, FEED_ID, reportContext[0])); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfReportHasUnconfiguredFeedID() public { + V1Report memory report = _createV1Report( + FEED_ID_2, + OBSERVATIONS_TIMESTAMP, + MEDIAN, + BID, + ASK, + BLOCKNUMBER_UPPER_BOUND, + blockhash(BLOCKNUMBER_UPPER_BOUND), + BLOCKNUMBER_LOWER_BOUND, + uint32(block.timestamp) + ); + bytes memory signedReport = _generateV1EncodedBlob(report, s_reportContext, _getSigners(FAULT_TOLERANCE + 1)); + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestInactive.selector, FEED_ID_2, s_reportContext[0])); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfWrongNumberOfSigners() public { + bytes memory signedReport = _generateV1EncodedBlob(s_testReportOne, s_reportContext, _getSigners(10)); + vm.expectRevert(abi.encodeWithSelector(Verifier.IncorrectSignatureCount.selector, 10, FAULT_TOLERANCE + 1)); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_revertsIfDuplicateSignersHaveSigned() public { + Signer[] memory signers = _getSigners(FAULT_TOLERANCE + 1); + // Duplicate signer at index 1 + signers[0] = signers[1]; + bytes memory signedReport = _generateV1EncodedBlob(s_testReportOne, s_reportContext, signers); + vm.expectRevert(abi.encodeWithSelector(Verifier.BadVerification.selector)); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } + + function test_returnsThePriceAndBlockNumIfReportVerified() public { + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + changePrank(address(s_verifierProxy)); + bytes memory response = s_verifier.verify(signedReport, msg.sender); + + assertReportsEqual(response, s_testReportOne); + } + + function test_setsTheCorrectEpoch() public { + s_reportContext[1] = bytes32(uint256(5 << 8)); + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + + (, , uint32 latestEpoch) = s_verifier.latestConfigDigestAndEpoch(FEED_ID); + assertEq(latestEpoch, 5); + } + + function test_emitsAnEventIfReportVerified() public { + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + vm.expectEmit(true, true, true, true, address(s_verifier)); + emit ReportVerified(s_testReportOne.feedId, msg.sender); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } +} + +contract VerifierVerifyMultipleConfigDigestTest is VerifierVerifyTest { + bytes32 internal s_oldConfigDigest; + bytes32 internal s_newConfigDigest; + + uint8 internal constant FAULT_TOLERANCE_TWO = 5; + + function setUp() public override { + VerifierVerifyTest.setUp(); + (, , s_oldConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + s_verifier.setConfig( + FEED_ID, + _getSignerAddresses(_getSigners(20)), + s_offchaintransmitters, + FAULT_TOLERANCE_TWO, + bytes(""), + VERIFIER_VERSION, + bytes(""), + new Common.AddressAndWeight[](0) + ); + (, , s_newConfigDigest) = s_verifier.latestConfigDetails(FEED_ID); + } + + function test_revertsIfVerifyingWithAnUnsetDigest() public { + s_verifier.deactivateConfig(FEED_ID, (s_oldConfigDigest)); + + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + changePrank(address(s_verifierProxy)); + vm.expectRevert(abi.encodeWithSelector(Verifier.DigestInactive.selector, FEED_ID, s_reportContext[0])); + s_verifier.verify(signedReport, msg.sender); + } + + function test_canVerifyOlderReportsWithOlderConfigs() public { + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE + 1) + ); + changePrank(address(s_verifierProxy)); + bytes memory response = s_verifier.verify(signedReport, msg.sender); + assertReportsEqual(response, s_testReportOne); + } + + function test_canVerifyNewerReportsWithNewerConfigs() public { + s_reportContext[0] = s_newConfigDigest; + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE_TWO + 1) + ); + changePrank(address(s_verifierProxy)); + bytes memory response = s_verifier.verify(signedReport, msg.sender); + assertReportsEqual(response, s_testReportOne); + } + + function test_revertsIfAReportIsVerifiedWithAnExistingButIncorrectDigest() public { + // Try sending the older digest signed with the new set of signers + s_reportContext[0] = s_oldConfigDigest; + bytes memory signedReport = _generateV1EncodedBlob( + s_testReportOne, + s_reportContext, + _getSigners(FAULT_TOLERANCE_TWO + 1) + ); + vm.expectRevert( + abi.encodeWithSelector(Verifier.IncorrectSignatureCount.selector, FAULT_TOLERANCE_TWO + 1, FAULT_TOLERANCE + 1) + ); + changePrank(address(s_verifierProxy)); + s_verifier.verify(signedReport, msg.sender); + } +} diff --git a/contracts/src/v0.8/mocks/FunctionsBillingRegistryEventsMock.sol b/contracts/src/v0.8/mocks/FunctionsBillingRegistryEventsMock.sol new file mode 100644 index 00000000..753ba91b --- /dev/null +++ b/contracts/src/v0.8/mocks/FunctionsBillingRegistryEventsMock.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +// Warning: this is an autogenerated file! DO NOT EDIT. + +pragma solidity ^0.8.6; + +contract FunctionsBillingRegistryEventsMock { + struct Commitment {uint64 subscriptionId;address client;uint32 gasLimit;uint256 gasPrice;address don;uint96 donFee;uint96 registryFee;uint96 estimatedCost;uint256 timestamp; } + event AuthorizedSendersChanged(address[] senders,address changedBy); + event BillingEnd(bytes32 indexed requestId,uint64 subscriptionId,uint96 signerPayment,uint96 transmitterPayment,uint96 totalCost,bool success); + event BillingStart(bytes32 indexed requestId,Commitment commitment); + event ConfigSet(uint32 maxGasLimit,uint32 stalenessSeconds,uint256 gasAfterPaymentCalculation,int256 fallbackWeiPerUnitLink,uint32 gasOverhead); + event FundsRecovered(address to,uint256 amount); + event Initialized(uint8 version); + event OwnershipTransferRequested(address indexed from,address indexed to); + event OwnershipTransferred(address indexed from,address indexed to); + event Paused(address account); + event RequestTimedOut(bytes32 indexed requestId); + event SubscriptionCanceled(uint64 indexed subscriptionId,address to,uint256 amount); + event SubscriptionConsumerAdded(uint64 indexed subscriptionId,address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subscriptionId,address consumer); + event SubscriptionCreated(uint64 indexed subscriptionId,address owner); + event SubscriptionFunded(uint64 indexed subscriptionId,uint256 oldBalance,uint256 newBalance); + event SubscriptionOwnerTransferRequested(uint64 indexed subscriptionId,address from,address to); + event SubscriptionOwnerTransferred(uint64 indexed subscriptionId,address from,address to); + event Unpaused(address account); + function emitAuthorizedSendersChanged(address[] memory senders,address changedBy) public { + emit AuthorizedSendersChanged(senders,changedBy); + } + function emitBillingEnd(bytes32 requestId,uint64 subscriptionId,uint96 signerPayment,uint96 transmitterPayment,uint96 totalCost,bool success) public { + emit BillingEnd(requestId,subscriptionId,signerPayment,transmitterPayment,totalCost,success); + } + function emitBillingStart(bytes32 requestId,Commitment memory commitment) public { + emit BillingStart(requestId,commitment); + } + function emitConfigSet(uint32 maxGasLimit,uint32 stalenessSeconds,uint256 gasAfterPaymentCalculation,int256 fallbackWeiPerUnitLink,uint32 gasOverhead) public { + emit ConfigSet(maxGasLimit,stalenessSeconds,gasAfterPaymentCalculation,fallbackWeiPerUnitLink,gasOverhead); + } + function emitFundsRecovered(address to,uint256 amount) public { + emit FundsRecovered(to,amount); + } + function emitInitialized(uint8 version) public { + emit Initialized(version); + } + function emitOwnershipTransferRequested(address from,address to) public { + emit OwnershipTransferRequested(from,to); + } + function emitOwnershipTransferred(address from,address to) public { + emit OwnershipTransferred(from,to); + } + function emitPaused(address account) public { + emit Paused(account); + } + function emitRequestTimedOut(bytes32 requestId) public { + emit RequestTimedOut(requestId); + } + function emitSubscriptionCanceled(uint64 subscriptionId,address to,uint256 amount) public { + emit SubscriptionCanceled(subscriptionId,to,amount); + } + function emitSubscriptionConsumerAdded(uint64 subscriptionId,address consumer) public { + emit SubscriptionConsumerAdded(subscriptionId,consumer); + } + function emitSubscriptionConsumerRemoved(uint64 subscriptionId,address consumer) public { + emit SubscriptionConsumerRemoved(subscriptionId,consumer); + } + function emitSubscriptionCreated(uint64 subscriptionId,address owner) public { + emit SubscriptionCreated(subscriptionId,owner); + } + function emitSubscriptionFunded(uint64 subscriptionId,uint256 oldBalance,uint256 newBalance) public { + emit SubscriptionFunded(subscriptionId,oldBalance,newBalance); + } + function emitSubscriptionOwnerTransferRequested(uint64 subscriptionId,address from,address to) public { + emit SubscriptionOwnerTransferRequested(subscriptionId,from,to); + } + function emitSubscriptionOwnerTransferred(uint64 subscriptionId,address from,address to) public { + emit SubscriptionOwnerTransferred(subscriptionId,from,to); + } + function emitUnpaused(address account) public { + emit Unpaused(account); + } +} diff --git a/contracts/src/v0.8/mocks/FunctionsOracleEventsMock.sol b/contracts/src/v0.8/mocks/FunctionsOracleEventsMock.sol new file mode 100644 index 00000000..c68de7fa --- /dev/null +++ b/contracts/src/v0.8/mocks/FunctionsOracleEventsMock.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +// Warning: this is an autogenerated file! DO NOT EDIT. + +pragma solidity ^0.8.6; + +contract FunctionsOracleEventsMock { + event AuthorizedSendersActive(address account); + event AuthorizedSendersChanged(address[] senders,address changedBy); + event AuthorizedSendersDeactive(address account); + event ConfigSet(uint32 previousConfigBlockNumber,bytes32 configDigest,uint64 configCount,address[] signers,address[] transmitters,uint8 f,bytes onchainConfig,uint64 offchainConfigVersion,bytes offchainConfig); + event Initialized(uint8 version); + event InvalidRequestID(bytes32 indexed requestId); + event OracleRequest(bytes32 indexed requestId,address requestingContract,address requestInitiator,uint64 subscriptionId,address subscriptionOwner,bytes data); + event OracleResponse(bytes32 indexed requestId); + event OwnershipTransferRequested(address indexed from,address indexed to); + event OwnershipTransferred(address indexed from,address indexed to); + event ResponseTransmitted(bytes32 indexed requestId,address transmitter); + event Transmitted(bytes32 configDigest,uint32 epoch); + event UserCallbackError(bytes32 indexed requestId,string reason); + event UserCallbackRawError(bytes32 indexed requestId,bytes lowLevelData); + function emitAuthorizedSendersActive(address account) public { + emit AuthorizedSendersActive(account); + } + function emitAuthorizedSendersChanged(address[] memory senders,address changedBy) public { + emit AuthorizedSendersChanged(senders,changedBy); + } + function emitAuthorizedSendersDeactive(address account) public { + emit AuthorizedSendersDeactive(account); + } + function emitConfigSet(uint32 previousConfigBlockNumber,bytes32 configDigest,uint64 configCount,address[] memory signers,address[] memory transmitters,uint8 f,bytes memory onchainConfig,uint64 offchainConfigVersion,bytes memory offchainConfig) public { + emit ConfigSet(previousConfigBlockNumber,configDigest,configCount,signers,transmitters,f,onchainConfig,offchainConfigVersion,offchainConfig); + } + function emitInitialized(uint8 version) public { + emit Initialized(version); + } + function emitInvalidRequestID(bytes32 requestId) public { + emit InvalidRequestID(requestId); + } + function emitOracleRequest(bytes32 requestId,address requestingContract,address requestInitiator,uint64 subscriptionId,address subscriptionOwner,bytes memory data) public { + emit OracleRequest(requestId,requestingContract,requestInitiator,subscriptionId,subscriptionOwner,data); + } + function emitOracleResponse(bytes32 requestId) public { + emit OracleResponse(requestId); + } + function emitOwnershipTransferRequested(address from,address to) public { + emit OwnershipTransferRequested(from,to); + } + function emitOwnershipTransferred(address from,address to) public { + emit OwnershipTransferred(from,to); + } + function emitResponseTransmitted(bytes32 requestId,address transmitter) public { + emit ResponseTransmitted(requestId,transmitter); + } + function emitTransmitted(bytes32 configDigest,uint32 epoch) public { + emit Transmitted(configDigest,epoch); + } + function emitUserCallbackError(bytes32 requestId,string memory reason) public { + emit UserCallbackError(requestId,reason); + } + function emitUserCallbackRawError(bytes32 requestId,bytes memory lowLevelData) public { + emit UserCallbackRawError(requestId,lowLevelData); + } +} diff --git a/contracts/src/v0.8/mocks/MockAggregatorValidator.sol b/contracts/src/v0.8/mocks/MockAggregatorValidator.sol new file mode 100644 index 00000000..bdc935cd --- /dev/null +++ b/contracts/src/v0.8/mocks/MockAggregatorValidator.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../shared/interfaces/AggregatorValidatorInterface.sol"; + +contract MockAggregatorValidator is AggregatorValidatorInterface { + uint8 immutable id; + + constructor(uint8 id_) { + id = id_; + } + + event ValidateCalled( + uint8 id, + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ); + + function validate( + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ) external override returns (bool) { + emit ValidateCalled(id, previousRoundId, previousAnswer, currentRoundId, currentAnswer); + return true; + } +} diff --git a/contracts/src/v0.8/mocks/MockArbSys.sol b/contracts/src/v0.8/mocks/MockArbSys.sol new file mode 100644 index 00000000..fbfdf0f4 --- /dev/null +++ b/contracts/src/v0.8/mocks/MockArbSys.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +/** + * @dev this contract mocks the arbitrum precompiled ArbSys contract + * https://developer.arbitrum.io/arbos/precompiles#ArbSys + */ +contract MockArbSys { + function arbBlockNumber() public view returns (uint256) { + return block.number; + } + + function arbBlockHash(uint256 arbBlockNum) external view returns (bytes32) { + return blockhash(arbBlockNum); + } +} diff --git a/contracts/src/v0.8/mocks/MockLinkToken.sol b/contracts/src/v0.8/mocks/MockLinkToken.sol new file mode 100644 index 00000000..6b42d716 --- /dev/null +++ b/contracts/src/v0.8/mocks/MockLinkToken.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IERC677Receiver} from "../shared/interfaces/IERC677Receiver.sol"; + +contract MockLinkToken { + uint256 private constant TOTAL_SUPPLY = 1_000_000_000 * 1e18; + + constructor() { + balances[msg.sender] = TOTAL_SUPPLY; + } + + mapping(address => uint256) public balances; + + function totalSupply() external pure returns (uint256 totalTokensIssued) { + return TOTAL_SUPPLY; // 1 billion PLI -> 1e27 Juels + } + + function transfer(address _to, uint256 _value) public returns (bool) { + balances[msg.sender] = balances[msg.sender] - _value; + balances[_to] = balances[_to] + _value; + return true; + } + + function setBalance(address _address, uint256 _value) external returns (bool) { + balances[_address] = _value; + return true; + } + + function balanceOf(address _address) external view returns (uint256) { + return balances[_address]; + } + + function transferAndCall(address _to, uint256 _value, bytes calldata _data) public returns (bool success) { + transfer(_to, _value); + if (isContract(_to)) { + contractFallback(_to, _value, _data); + } + return true; + } + + function isContract(address _addr) private view returns (bool hasCode) { + uint256 length; + assembly { + length := extcodesize(_addr) + } + return length > 0; + } + + function contractFallback(address _to, uint256 _value, bytes calldata _data) private { + IERC677Receiver receiver = IERC677Receiver(_to); + receiver.onTokenTransfer(msg.sender, _value, _data); + } +} diff --git a/contracts/src/v0.8/mocks/MockOffchainAggregator.sol b/contracts/src/v0.8/mocks/MockOffchainAggregator.sol new file mode 100644 index 00000000..5366bbee --- /dev/null +++ b/contracts/src/v0.8/mocks/MockOffchainAggregator.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +contract MockOffchainAggregator { + event RoundIdUpdated(uint80 roundId); + + uint80 public roundId; + + function requestNewRound() external returns (uint80) { + roundId++; + emit RoundIdUpdated(roundId); + return roundId; + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol new file mode 100644 index 00000000..015a725e --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {ConfirmedOwnerWithProposal} from "../../shared/access/ConfirmedOwnerWithProposal.sol"; +import {AuthorizedReceiver} from "./AuthorizedReceiver.sol"; +import {Address} from "@openzeppelin/contracts/utils/Address.sol"; + +// solhint-disable custom-errors +contract AuthorizedForwarder is ConfirmedOwnerWithProposal, AuthorizedReceiver { + using Address for address; + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable linkToken; + + event OwnershipTransferRequestedWithMessage(address indexed from, address indexed to, bytes message); + + constructor( + address link, + address owner, + address recipient, + bytes memory message + ) ConfirmedOwnerWithProposal(owner, recipient) { + require(link != address(0), "Link token cannot be a zero address"); + linkToken = link; + if (recipient != address(0)) { + emit OwnershipTransferRequestedWithMessage(owner, recipient, message); + } + } + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant typeAndVersion = "AuthorizedForwarder 1.1.0"; + + // @notice Forward a call to another contract + // @dev Only callable by an authorized sender + // @param to address + // @param data to forward + function forward(address to, bytes calldata data) external validateAuthorizedSender { + require(to != linkToken, "Cannot forward to Link token"); + _forward(to, data); + } + + // @notice Forward multiple calls to other contracts in a multicall style + // @dev Only callable by an authorized sender + // @param tos An array of addresses to forward the calls to + // @param datas An array of data to forward to each corresponding address + function multiForward(address[] calldata tos, bytes[] calldata datas) external validateAuthorizedSender { + require(tos.length == datas.length, "Arrays must have the same length"); + + for (uint256 i = 0; i < tos.length; ++i) { + address to = tos[i]; + require(to != linkToken, "Cannot forward to Link token"); + + // Perform the forward operation + _forward(to, datas[i]); + } + } + + // @notice Forward a call to another contract + // @dev Only callable by the owner + // @param to address + // @param data to forward + function ownerForward(address to, bytes calldata data) external onlyOwner { + _forward(to, data); + } + + // @notice Transfer ownership with instructions for recipient + // @param to address proposed recipient of ownership + // @param message instructions for recipient upon accepting ownership + function transferOwnershipWithMessage(address to, bytes calldata message) external { + transferOwnership(to); + emit OwnershipTransferRequestedWithMessage(msg.sender, to, message); + } + + // @notice concrete implementation of AuthorizedReceiver + // @return bool of whether sender is authorized + function _canSetAuthorizedSenders() internal view override returns (bool) { + return owner() == msg.sender; + } + + // @notice common forwarding functionality and validation + function _forward(address to, bytes calldata data) private { + require(to.isContract(), "Must forward to a contract"); + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory result) = to.call(data); + if (!success) { + if (result.length == 0) revert("Forwarded call reverted without reason"); + assembly { + revert(add(32, result), mload(result)) + } + } + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol new file mode 100644 index 00000000..ec27ca10 --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/AuthorizedReceiver.sol @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {AuthorizedReceiverInterface} from "./interfaces/AuthorizedReceiverInterface.sol"; + +// solhint-disable custom-errors +abstract contract AuthorizedReceiver is AuthorizedReceiverInterface { + mapping(address sender => bool authorized) private s_authorizedSenders; + address[] private s_authorizedSenderList; + + event AuthorizedSendersChanged(address[] senders, address changedBy); + + // @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + // @param senders The addresses of the authorized Plugin node + function setAuthorizedSenders(address[] calldata senders) external override validateAuthorizedSenderSetter { + require(senders.length > 0, "Must have at least 1 sender"); + // Set previous authorized senders to false + uint256 authorizedSendersLength = s_authorizedSenderList.length; + for (uint256 i = 0; i < authorizedSendersLength; ++i) { + s_authorizedSenders[s_authorizedSenderList[i]] = false; + } + // Set new to true + for (uint256 i = 0; i < senders.length; ++i) { + require(s_authorizedSenders[senders[i]] == false, "Must not have duplicate senders"); + s_authorizedSenders[senders[i]] = true; + } + // Replace list + s_authorizedSenderList = senders; + emit AuthorizedSendersChanged(senders, msg.sender); + } + + // @notice Retrieve a list of authorized senders + // @return array of addresses + function getAuthorizedSenders() external view override returns (address[] memory) { + return s_authorizedSenderList; + } + + // @notice Use this to check if a node is authorized for fulfilling requests + // @param sender The address of the Plugin node + // @return The authorization status of the node + function isAuthorizedSender(address sender) public view override returns (bool) { + return s_authorizedSenders[sender]; + } + + // @notice customizable guard of who can update the authorized sender list + // @return bool whether sender can update authorized sender list + function _canSetAuthorizedSenders() internal virtual returns (bool); + + // @notice validates the sender is an authorized sender + function _validateIsAuthorizedSender() internal view { + require(isAuthorizedSender(msg.sender), "Not authorized sender"); + } + + // @notice prevents non-authorized addresses from calling this method + modifier validateAuthorizedSender() { + _validateIsAuthorizedSender(); + _; + } + + // @notice prevents non-authorized addresses from calling this method + modifier validateAuthorizedSenderSetter() { + require(_canSetAuthorizedSenders(), "Cannot set authorized senders"); + _; + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/LinkTokenReceiver.sol b/contracts/src/v0.8/operatorforwarder/dev/LinkTokenReceiver.sol new file mode 100644 index 00000000..ab1e7fae --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/LinkTokenReceiver.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +// solhint-disable custom-errors +abstract contract LinkTokenReceiver { + // @notice Called when PLI is sent to the contract via `transferAndCall` + // @dev The data payload's first 2 words will be overwritten by the `sender` and `amount` + // values to ensure correctness. Calls oracleRequest. + // @param sender Address of the sender + // @param amount Amount of PLI sent (specified in wei) + // @param data Payload of the transaction + function onTokenTransfer( + address sender, + uint256 amount, + bytes memory data + ) public validateFromPLI permittedFunctionsForPLI(data) { + assembly { + // solhint-disable-next-line avoid-low-level-calls + mstore(add(data, 36), sender) // ensure correct sender is passed + // solhint-disable-next-line avoid-low-level-calls + mstore(add(data, 68), amount) // ensure correct amount is passed0.8.19 + } + // solhint-disable-next-line avoid-low-level-calls + (bool success, ) = address(this).delegatecall(data); // calls oracleRequest + require(success, "Unable to create request"); + } + + function getPluginToken() public view virtual returns (address); + + // @notice Validate the function called on token transfer + function _validateTokenTransferAction(bytes4 funcSelector, bytes memory data) internal virtual; + + // @dev Reverts if not sent from the PLI token + modifier validateFromPLI() { + require(msg.sender == getPluginToken(), "Must use PLI token"); + _; + } + + // @dev Reverts if the given data does not begin with the `oracleRequest` function selector + // @param data The data payload of the request + modifier permittedFunctionsForPLI(bytes memory data) { + bytes4 funcSelector; + assembly { + // solhint-disable-next-line avoid-low-level-calls + funcSelector := mload(add(data, 32)) + } + _validateTokenTransferAction(funcSelector, data); + _; + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/Operator.sol b/contracts/src/v0.8/operatorforwarder/dev/Operator.sol new file mode 100644 index 00000000..2c3d7f8a --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/Operator.sol @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {AuthorizedReceiver} from "./AuthorizedReceiver.sol"; +import {LinkTokenReceiver} from "./LinkTokenReceiver.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {AuthorizedReceiverInterface} from "./interfaces/AuthorizedReceiverInterface.sol"; +import {OperatorInterface} from "../../interfaces/OperatorInterface.sol"; +import {IOwnable} from "../../shared/interfaces/IOwnable.sol"; +import {WithdrawalInterface} from "./interfaces/WithdrawalInterface.sol"; +import {OracleInterface} from "../../interfaces/OracleInterface.sol"; +import {Address} from "@openzeppelin/contracts/utils/Address.sol"; +import {SafeCast} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol"; + +// @title The Plugin Operator contract +// @notice Node operators can deploy this contract to fulfill requests sent to them +// solhint-disable custom-errors +contract Operator is AuthorizedReceiver, ConfirmedOwner, LinkTokenReceiver, OperatorInterface, WithdrawalInterface { + using Address for address; + + struct Commitment { + bytes31 paramsHash; + uint8 dataVersion; + } + + uint256 public constant EXPIRYTIME = 5 minutes; + uint256 private constant MAXIMUM_DATA_VERSION = 256; + uint256 private constant MINIMUM_CONSUMER_GAS_LIMIT = 400000; + uint256 private constant SELECTOR_LENGTH = 4; + uint256 private constant EXPECTED_REQUEST_WORDS = 2; + uint256 private constant MINIMUM_REQUEST_LENGTH = SELECTOR_LENGTH + (32 * EXPECTED_REQUEST_WORDS); + // We initialize fields to 1 instead of 0 so that the first invocation + // does not cost more gas. + uint256 private constant ONE_FOR_CONSISTENT_GAS_COST = 1; + // oracleRequest is intended for version 1, enabling single word responses + bytes4 private constant ORACLE_REQUEST_SELECTOR = this.oracleRequest.selector; + // operatorRequest is intended for version 2, enabling multi-word responses + bytes4 private constant OPERATOR_REQUEST_SELECTOR = this.operatorRequest.selector; + + LinkTokenInterface internal immutable i_linkToken; + mapping(bytes32 => Commitment) private s_commitments; + mapping(address => bool) private s_owned; + // Tokens sent for requests that have not been fulfilled yet + uint256 private s_tokensInEscrow = ONE_FOR_CONSISTENT_GAS_COST; + + event OracleRequest( + bytes32 indexed specId, + address requester, + bytes32 requestId, + uint256 payment, + address callbackAddr, + bytes4 callbackFunctionId, + uint256 cancelExpiration, + uint256 dataVersion, + bytes data + ); + + event CancelOracleRequest(bytes32 indexed requestId); + + event OracleResponse(bytes32 indexed requestId); + + event OwnableContractAccepted(address indexed acceptedContract); + + event TargetsUpdatedAuthorizedSenders(address[] targets, address[] senders, address changedBy); + + // @notice Deploy with the address of the PLI token + // @dev Sets the LinkToken address for the imported LinkTokenInterface + // @param link The address of the PLI token + // @param owner The address of the owner + constructor(address link, address owner) ConfirmedOwner(owner) { + i_linkToken = LinkTokenInterface(link); // external but already deployed and unalterable + } + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant typeAndVersion = "Operator 1.0.0"; + + // @notice Creates the Plugin request. This is a backwards compatible API + // with the Oracle.sol contract, but the behavior changes because + // callbackAddress is assumed to be the same as the request sender. + // @param callbackAddress The consumer of the request + // @param payment The amount of payment given (specified in wei) + // @param specId The Job Specification ID + // @param callbackAddress The address the oracle data will be sent to + // @param callbackFunctionId The callback function ID for the response + // @param nonce The nonce sent by the requester + // @param dataVersion The specified data version + // @param data The extra request parameters + function oracleRequest( + address sender, + uint256 payment, + bytes32 specId, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external override validateFromPLI { + (bytes32 requestId, uint256 expiration) = _verifyAndProcessOracleRequest( + sender, + payment, + callbackAddress, + callbackFunctionId, + nonce, + dataVersion + ); + emit OracleRequest(specId, sender, requestId, payment, sender, callbackFunctionId, expiration, dataVersion, data); + } + + // @notice Creates the Plugin request + // @dev Stores the hash of the params as the on-chain commitment for the request. + // Emits OracleRequest event for the Plugin node to detect. + // @param sender The sender of the request + // @param payment The amount of payment given (specified in wei) + // @param specId The Job Specification ID + // @param callbackFunctionId The callback function ID for the response + // @param nonce The nonce sent by the requester + // @param dataVersion The specified data version + // @param data The extra request parameters + function operatorRequest( + address sender, + uint256 payment, + bytes32 specId, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion, + bytes calldata data + ) external override validateFromPLI { + (bytes32 requestId, uint256 expiration) = _verifyAndProcessOracleRequest( + sender, + payment, + sender, + callbackFunctionId, + nonce, + dataVersion + ); + emit OracleRequest(specId, sender, requestId, payment, sender, callbackFunctionId, expiration, dataVersion, data); + } + + // @notice Called by the Plugin node to fulfill requests + // @dev Given params must hash back to the commitment stored from `oracleRequest`. + // Will call the callback address' callback function without bubbling up error + // checking in a `require` so that the node can get paid. + // @param requestId The fulfillment request ID that must match the requester's + // @param payment The payment amount that will be released for the oracle (specified in wei) + // @param callbackAddress The callback address to call for fulfillment + // @param callbackFunctionId The callback function ID to use for fulfillment + // @param expiration The expiration that the node should respond by before the requester can cancel + // @param data The data to return to the consuming contract + // @return Status if the external call was successful + function fulfillOracleRequest( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes32 data + ) + external + override + validateAuthorizedSender + validateRequestId(requestId) + validateCallbackAddress(callbackAddress) + returns (bool) + { + _verifyOracleRequestAndProcessPayment(requestId, payment, callbackAddress, callbackFunctionId, expiration, 1); + emit OracleResponse(requestId); + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = callbackAddress.call(abi.encodeWithSelector(callbackFunctionId, requestId, data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + // @notice Called by the Plugin node to fulfill requests with multi-word support + // @dev Given params must hash back to the commitment stored from `oracleRequest`. + // Will call the callback address' callback function without bubbling up error + // checking in a `require` so that the node can get paid. + // @param requestId The fulfillment request ID that must match the requester's + // @param payment The payment amount that will be released for the oracle (specified in wei) + // @param callbackAddress The callback address to call for fulfillment + // @param callbackFunctionId The callback function ID to use for fulfillment + // @param expiration The expiration that the node should respond by before the requester can cancel + // @param data The data to return to the consuming contract + // @return Status if the external call was successful + function fulfillOracleRequest2( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + bytes calldata data + ) + external + override + validateAuthorizedSender + validateRequestId(requestId) + validateCallbackAddress(callbackAddress) + validateMultiWordResponseId(requestId, data) + returns (bool) + { + _verifyOracleRequestAndProcessPayment(requestId, payment, callbackAddress, callbackFunctionId, expiration, 2); + emit OracleResponse(requestId); + require(gasleft() >= MINIMUM_CONSUMER_GAS_LIMIT, "Must provide consumer enough gas"); + // All updates to the oracle's fulfillment should come before calling the + // callback(addr+functionId) as it is untrusted. + // See: https://solidity.readthedocs.io/en/develop/security-considerations.html#use-the-checks-effects-interactions-pattern + (bool success, ) = callbackAddress.call(abi.encodePacked(callbackFunctionId, data)); // solhint-disable-line avoid-low-level-calls + return success; + } + + // @notice Transfer the ownership of ownable contracts. This is primarily + // intended for Authorized Forwarders but could possibly be extended to work + // with future contracts.OracleInterface + // @param ownable list of addresses to transfer + // @param newOwner address to transfer ownership to + function transferOwnableContracts(address[] calldata ownable, address newOwner) external onlyOwner { + for (uint256 i = 0; i < ownable.length; ++i) { + s_owned[ownable[i]] = false; + IOwnable(ownable[i]).transferOwnership(newOwner); + } + } + + // @notice Accept the ownership of an ownable contract. This is primarily + // intended for Authorized Forwarders but could possibly be extended to work + // with future contracts. + // @dev Must be the pending owner on the contract + // @param ownable list of addresses of Ownable contracts to accept + function acceptOwnableContracts(address[] calldata ownable) public validateAuthorizedSenderSetter { + for (uint256 i = 0; i < ownable.length; ++i) { + s_owned[ownable[i]] = true; + emit OwnableContractAccepted(ownable[i]); + IOwnable(ownable[i]).acceptOwnership(); + } + } + + // @notice Sets the fulfillment permission for + // @param targets The addresses to set permissions on + // @param senders The addresses that are allowed to send updates + function setAuthorizedSendersOn( + address[] calldata targets, + address[] calldata senders + ) public validateAuthorizedSenderSetter { + emit TargetsUpdatedAuthorizedSenders(targets, senders, msg.sender); + + for (uint256 i = 0; i < targets.length; ++i) { + AuthorizedReceiverInterface(targets[i]).setAuthorizedSenders(senders); + } + } + + // @notice Accepts ownership of ownable contracts and then immediately sets + // the authorized sender list on each of the newly owned contracts. This is + // primarily intended for Authorized Forwarders but could possibly be + // extended to work with future contracts. + // @param targets The addresses to set permissions on + // @param senders The addresses that are allowed to send updates + function acceptAuthorizedReceivers( + address[] calldata targets, + address[] calldata senders + ) external validateAuthorizedSenderSetter { + acceptOwnableContracts(targets); + setAuthorizedSendersOn(targets, senders); + } + + // @notice Allows the node operator to withdraw earned PLI to a given address + // @dev The owner of the contract can be another wallet and does not have to be a Plugin node + // @param recipient The address to send the PLI token to + // @param amount The amount to send (specified in wei) + function withdraw( + address recipient, + uint256 amount + ) external override(OracleInterface, WithdrawalInterface) onlyOwner validateAvailableFunds(amount) { + assert(i_linkToken.transfer(recipient, amount)); + } + + // @notice Displays the amount of PLI that is available for the node operator to withdraw + // @dev We use `ONE_FOR_CONSISTENT_GAS_COST` in place of 0 in storage + // @return The amount of withdrawable PLI on the contract + function withdrawable() external view override(OracleInterface, WithdrawalInterface) returns (uint256) { + return _fundsAvailable(); + } + + // @notice Forward a call to another contract + // @dev Only callable by the owner + // @param to address + // @param data to forward + function ownerForward(address to, bytes calldata data) external onlyOwner validateNotToPLI(to) { + require(to.isContract(), "Must forward to a contract"); + // solhint-disable-next-line avoid-low-level-calls + (bool status, ) = to.call(data); + require(status, "Forwarded call failed"); + } + + // @notice Interact with other LinkTokenReceiver contracts by calling transferAndCall + // @param to The address to transfer to. + // @param value The amount to be transferred. + // @param data The extra data to be passed to the receiving contract. + // @return success bool + function ownerTransferAndCall( + address to, + uint256 value, + bytes calldata data + ) external override onlyOwner validateAvailableFunds(value) returns (bool success) { + return i_linkToken.transferAndCall(to, value, data); + } + + // @notice Distribute funds to multiple addresses using ETH send + // to this payable function. + // @dev Array length must be equal, ETH sent must equal the sum of amounts. + // A malicious receiver could cause the distribution to revert, in which case + // it is expected that the address is removed from the list. + // @param receivers list of addresses + // @param amounts list of amounts + function distributeFunds(address payable[] calldata receivers, uint256[] calldata amounts) external payable { + require(receivers.length > 0 && receivers.length == amounts.length, "Invalid array length(s)"); + uint256 valueRemaining = msg.value; + for (uint256 i = 0; i < receivers.length; ++i) { + uint256 sendAmount = amounts[i]; + valueRemaining = valueRemaining - sendAmount; + (bool success, ) = receivers[i].call{value: sendAmount}(""); + require(success, "Address: unable to send value, recipient may have reverted"); + } + require(valueRemaining == 0, "Too much ETH sent"); + } + + // @notice Allows recipient to cancel requests sent to this oracle contract. + // Will transfer the PLI sent for the request back to the recipient address. + // @dev Given params must hash to a commitment stored on the contract in order + // for the request to be valid. Emits CancelOracleRequest event. + // @param requestId The request ID + // @param payment The amount of payment given (specified in wei) + // @param callbackFunc The requester's specified callback function selector + // @param expiration The time of the expiration for the request + function cancelOracleRequest( + bytes32 requestId, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) external override { + bytes31 paramsHash = _buildParamsHash(payment, msg.sender, callbackFunc, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(expiration <= block.timestamp, "Request is not expired"); + + delete s_commitments[requestId]; + emit CancelOracleRequest(requestId); + + i_linkToken.transfer(msg.sender, payment); + } + + // @notice Allows requester to cancel requests sent to this oracle contract. + // Will transfer the PLI sent for the request back to the recipient address. + // @dev Given params must hash to a commitment stored on the contract in order + // for the request to be valid. Emits CancelOracleRequest event. + // @param nonce The nonce used to generate the request ID + // @param payment The amount of payment given (specified in wei) + // @param callbackFunc The requester's specified callback function selector + // @param expiration The time of the expiration for the request + function cancelOracleRequestByRequester( + uint256 nonce, + uint256 payment, + bytes4 callbackFunc, + uint256 expiration + ) external { + bytes32 requestId = keccak256(abi.encodePacked(msg.sender, nonce)); + bytes31 paramsHash = _buildParamsHash(payment, msg.sender, callbackFunc, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + // solhint-disable-next-line not-rely-on-time + require(expiration <= block.timestamp, "Request is not expired"); + + delete s_commitments[requestId]; + emit CancelOracleRequest(requestId); + + i_linkToken.transfer(msg.sender, payment); + } + + // @notice Returns the address of the PLI token + // @dev This is the public implementation for pluginTokenAddress, which is + // an internal method of the PluginClient contract + function getPluginToken() public view override returns (address) { + return address(i_linkToken); + } + + // @notice Require that the token transfer action is valid + // @dev OPERATOR_REQUEST_SELECTOR = multiword, ORACLE_REQUEST_SELECTOR = singleword + function _validateTokenTransferAction(bytes4 funcSelector, bytes memory data) internal pure override { + require(data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); + require( + funcSelector == OPERATOR_REQUEST_SELECTOR || funcSelector == ORACLE_REQUEST_SELECTOR, + "Must use whitelisted functions" + ); + } + + // @notice Verify the Oracle Request and record necessary information + // @param sender The sender of the request + // @param payment The amount of payment given (specified in wei) + // @param callbackAddress The callback address for the response + // @param callbackFunctionId The callback function ID for the response + // @param nonce The nonce sent by the requester + function _verifyAndProcessOracleRequest( + address sender, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 nonce, + uint256 dataVersion + ) private validateNotToPLI(callbackAddress) returns (bytes32 requestId, uint256 expiration) { + requestId = keccak256(abi.encodePacked(sender, nonce)); + require(s_commitments[requestId].paramsHash == 0, "Must use a unique ID"); + // solhint-disable-next-line not-rely-on-time + expiration = block.timestamp + EXPIRYTIME; + bytes31 paramsHash = _buildParamsHash(payment, callbackAddress, callbackFunctionId, expiration); + s_commitments[requestId] = Commitment(paramsHash, SafeCast.toUint8(dataVersion)); + s_tokensInEscrow = s_tokensInEscrow + payment; + return (requestId, expiration); + } + + // @notice Verify the Oracle request and unlock escrowed payment + // @param requestId The fulfillment request ID that must match the requester's + // @param payment The payment amount that will be released for the oracle (specified in wei) + // @param callbackAddress The callback address to call for fulfillment + // @param callbackFunctionId The callback function ID to use for fulfillment + // @param expiration The expiration that the node should respond by before the requester can cancel + function _verifyOracleRequestAndProcessPayment( + bytes32 requestId, + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration, + uint256 dataVersion + ) internal { + bytes31 paramsHash = _buildParamsHash(payment, callbackAddress, callbackFunctionId, expiration); + require(s_commitments[requestId].paramsHash == paramsHash, "Params do not match request ID"); + require(s_commitments[requestId].dataVersion <= SafeCast.toUint8(dataVersion), "Data versions must match"); + s_tokensInEscrow = s_tokensInEscrow - payment; + delete s_commitments[requestId]; + } + + // @notice Build the bytes31 hash from the payment, callback and expiration. + // @param payment The payment amount that will be released for the oracle (specified in wei) + // @param callbackAddress The callback address to call for fulfillment + // @param callbackFunctionId The callback function ID to use for fulfillment + // @param expiration The expiration that the node should respond by before the requester can cancel + // @return hash bytes31 + function _buildParamsHash( + uint256 payment, + address callbackAddress, + bytes4 callbackFunctionId, + uint256 expiration + ) internal pure returns (bytes31) { + return bytes31(keccak256(abi.encodePacked(payment, callbackAddress, callbackFunctionId, expiration))); + } + + // @notice Returns the PLI available in this contract, not locked in escrow + // @return uint256 PLI tokens available + function _fundsAvailable() private view returns (uint256) { + return i_linkToken.balanceOf(address(this)) - (s_tokensInEscrow - ONE_FOR_CONSISTENT_GAS_COST); + } + + // @notice concrete implementation of AuthorizedReceiver + // @return bool of whether sender is authorized + function _canSetAuthorizedSenders() internal view override returns (bool) { + return isAuthorizedSender(msg.sender) || owner() == msg.sender; + } + + // MODIFIERS + + // @dev Reverts if the first 32 bytes of the bytes array is not equal to requestId + // @param requestId bytes32 + // @param data bytes + modifier validateMultiWordResponseId(bytes32 requestId, bytes calldata data) { + require(data.length >= 32, "Response must be > 32 bytes"); + bytes32 firstDataWord; + assembly { + firstDataWord := calldataload(data.offset) + } + require(requestId == firstDataWord, "First word must be requestId"); + _; + } + + // @dev Reverts if amount requested is greater than withdrawable balance + // @param amount The given amount to compare to `s_withdrawableTokens` + modifier validateAvailableFunds(uint256 amount) { + require(_fundsAvailable() >= amount, "Amount requested is greater than withdrawable balance"); + _; + } + + // @dev Reverts if request ID does not exist + // @param requestId The given request ID to check in stored `commitments` + modifier validateRequestId(bytes32 requestId) { + require(s_commitments[requestId].paramsHash != 0, "Must have a valid requestId"); + _; + } + + // @dev Reverts if the callback address is the PLI token + // @param to The callback address + modifier validateNotToPLI(address to) { + require(to != address(i_linkToken), "Cannot call to PLI"); + _; + } + + // @dev Reverts if the target address is owned by the operator + modifier validateCallbackAddress(address callbackAddress) { + require(!s_owned[callbackAddress], "Cannot call owned contract"); + _; + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/OperatorFactory.sol b/contracts/src/v0.8/operatorforwarder/dev/OperatorFactory.sol new file mode 100644 index 00000000..81d39ea8 --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/OperatorFactory.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {Operator} from "./Operator.sol"; +import {AuthorizedForwarder} from "./AuthorizedForwarder.sol"; + +// @title Operator Factory +// @notice Creates Operator contracts for node operators +// solhint-disable custom-errors +contract OperatorFactory { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address public immutable linkToken; + mapping(address => bool) private s_created; + + event OperatorCreated(address indexed operator, address indexed owner, address indexed sender); + event AuthorizedForwarderCreated(address indexed forwarder, address indexed owner, address indexed sender); + + // @param linkAddress address + constructor(address linkAddress) { + linkToken = linkAddress; + } + + // solhint-disable-next-line plugin-solidity/all-caps-constant-storage-variables + string public constant typeAndVersion = "OperatorFactory 1.0.0"; + + // @notice creates a new Operator contract with the msg.sender as owner + function deployNewOperator() external returns (address) { + Operator operator = new Operator(linkToken, msg.sender); + + s_created[address(operator)] = true; + emit OperatorCreated(address(operator), msg.sender, msg.sender); + + return address(operator); + } + + // @notice creates a new Operator contract with the msg.sender as owner and a + // new Operator Forwarder with the OperatorFactory as the owner + function deployNewOperatorAndForwarder() external returns (address, address) { + Operator operator = new Operator(linkToken, msg.sender); + s_created[address(operator)] = true; + emit OperatorCreated(address(operator), msg.sender, msg.sender); + + AuthorizedForwarder forwarder = new AuthorizedForwarder(linkToken, address(this), address(operator), new bytes(0)); + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), address(this), msg.sender); + + return (address(operator), address(forwarder)); + } + + // @notice creates a new Forwarder contract with the msg.sender as owner + function deployNewForwarder() external returns (address) { + AuthorizedForwarder forwarder = new AuthorizedForwarder(linkToken, msg.sender, address(0), new bytes(0)); + + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), msg.sender, msg.sender); + + return address(forwarder); + } + + // @notice creates a new Forwarder contract with the msg.sender as owner + function deployNewForwarderAndTransferOwnership(address to, bytes calldata message) external returns (address) { + AuthorizedForwarder forwarder = new AuthorizedForwarder(linkToken, msg.sender, to, message); + + s_created[address(forwarder)] = true; + emit AuthorizedForwarderCreated(address(forwarder), msg.sender, msg.sender); + + return address(forwarder); + } + + // @notice indicates whether this factory deployed an address + function created(address query) external view returns (bool) { + return s_created[query]; + } +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/interfaces/AuthorizedReceiverInterface.sol b/contracts/src/v0.8/operatorforwarder/dev/interfaces/AuthorizedReceiverInterface.sol new file mode 100644 index 00000000..28b20b14 --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/interfaces/AuthorizedReceiverInterface.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AuthorizedReceiverInterface { + function isAuthorizedSender(address sender) external view returns (bool); + + function getAuthorizedSenders() external returns (address[] memory); + + function setAuthorizedSenders(address[] calldata senders) external; +} diff --git a/contracts/src/v0.8/operatorforwarder/dev/interfaces/WithdrawalInterface.sol b/contracts/src/v0.8/operatorforwarder/dev/interfaces/WithdrawalInterface.sol new file mode 100644 index 00000000..56448b71 --- /dev/null +++ b/contracts/src/v0.8/operatorforwarder/dev/interfaces/WithdrawalInterface.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface WithdrawalInterface { + // @notice transfer PLI held by the contract belonging to msg.sender to + // another address + // @param recipient is the address to send the PLI to + // @param amount is the amount of PLI to send + function withdraw(address recipient, uint256 amount) external; + + // @notice query the available amount of PLI to withdraw by msg.sender + function withdrawable() external view returns (uint256); +} diff --git a/contracts/src/v0.8/shared/access/ConfirmedOwner.sol b/contracts/src/v0.8/shared/access/ConfirmedOwner.sol new file mode 100644 index 00000000..5b0c1593 --- /dev/null +++ b/contracts/src/v0.8/shared/access/ConfirmedOwner.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwnerWithProposal} from "./ConfirmedOwnerWithProposal.sol"; + +/// @title The ConfirmedOwner contract +/// @notice A contract with helpers for basic contract ownership. +contract ConfirmedOwner is ConfirmedOwnerWithProposal { + constructor(address newOwner) ConfirmedOwnerWithProposal(newOwner, address(0)) {} +} diff --git a/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol b/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol new file mode 100644 index 00000000..7b684187 --- /dev/null +++ b/contracts/src/v0.8/shared/access/ConfirmedOwnerWithProposal.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IOwnable} from "../interfaces/IOwnable.sol"; + +/// @title The ConfirmedOwner contract +/// @notice A contract with helpers for basic contract ownership. +contract ConfirmedOwnerWithProposal is IOwnable { + address private s_owner; + address private s_pendingOwner; + + event OwnershipTransferRequested(address indexed from, address indexed to); + event OwnershipTransferred(address indexed from, address indexed to); + + constructor(address newOwner, address pendingOwner) { + // solhint-disable-next-line custom-errors + require(newOwner != address(0), "Cannot set owner to zero"); + + s_owner = newOwner; + if (pendingOwner != address(0)) { + _transferOwnership(pendingOwner); + } + } + + /// @notice Allows an owner to begin transferring ownership to a new address. + function transferOwnership(address to) public override onlyOwner { + _transferOwnership(to); + } + + /// @notice Allows an ownership transfer to be completed by the recipient. + function acceptOwnership() external override { + // solhint-disable-next-line custom-errors + require(msg.sender == s_pendingOwner, "Must be proposed owner"); + + address oldOwner = s_owner; + s_owner = msg.sender; + s_pendingOwner = address(0); + + emit OwnershipTransferred(oldOwner, msg.sender); + } + + /// @notice Get the current owner + function owner() public view override returns (address) { + return s_owner; + } + + /// @notice validate, transfer ownership, and emit relevant events + function _transferOwnership(address to) private { + // solhint-disable-next-line custom-errors + require(to != msg.sender, "Cannot transfer to self"); + + s_pendingOwner = to; + + emit OwnershipTransferRequested(s_owner, to); + } + + /// @notice validate access + function _validateOwnership() internal view { + // solhint-disable-next-line custom-errors + require(msg.sender == s_owner, "Only callable by owner"); + } + + /// @notice Reverts if called by anyone other than the contract owner. + modifier onlyOwner() { + _validateOwnership(); + _; + } +} diff --git a/contracts/src/v0.8/shared/access/OwnerIsCreator.sol b/contracts/src/v0.8/shared/access/OwnerIsCreator.sol new file mode 100644 index 00000000..829c6879 --- /dev/null +++ b/contracts/src/v0.8/shared/access/OwnerIsCreator.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "./ConfirmedOwner.sol"; + +/// @title The OwnerIsCreator contract +/// @notice A contract with helpers for basic contract ownership. +contract OwnerIsCreator is ConfirmedOwner { + constructor() ConfirmedOwner(msg.sender) {} +} diff --git a/contracts/src/v0.8/shared/access/SimpleReadAccessController.sol b/contracts/src/v0.8/shared/access/SimpleReadAccessController.sol new file mode 100644 index 00000000..f4ea905b --- /dev/null +++ b/contracts/src/v0.8/shared/access/SimpleReadAccessController.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {SimpleWriteAccessController} from "./SimpleWriteAccessController.sol"; + +/// @title SimpleReadAccessController +/// @notice Gives access to: +/// - any externally owned account (note that off-chain actors can always read +/// any contract storage regardless of on-chain access control measures, so this +/// does not weaken the access control while improving usability) +/// - accounts explicitly added to an access list +/// @dev SimpleReadAccessController is not suitable for access controlling writes +/// since it grants any externally owned account access! See +/// SimpleWriteAccessController for that. +contract SimpleReadAccessController is SimpleWriteAccessController { + /// @notice Returns the access of an address + /// @param _user The address to query + function hasAccess(address _user, bytes memory _calldata) public view virtual override returns (bool) { + // solhint-disable-next-line avoid-tx-origin + return super.hasAccess(_user, _calldata) || _user == tx.origin; + } +} diff --git a/contracts/src/v0.8/shared/access/SimpleWriteAccessController.sol b/contracts/src/v0.8/shared/access/SimpleWriteAccessController.sol new file mode 100644 index 00000000..b431331b --- /dev/null +++ b/contracts/src/v0.8/shared/access/SimpleWriteAccessController.sol @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "./ConfirmedOwner.sol"; +import {AccessControllerInterface} from "../interfaces/AccessControllerInterface.sol"; + +/// @title SimpleWriteAccessController +/// @notice Gives access to accounts explicitly added to an access list by the controller's owner. +/// @dev does not make any special permissions for externally, see SimpleReadAccessController for that. +contract SimpleWriteAccessController is AccessControllerInterface, ConfirmedOwner { + bool public checkEnabled; + mapping(address => bool) internal s_accessList; + + event AddedAccess(address user); + event RemovedAccess(address user); + event CheckAccessEnabled(); + event CheckAccessDisabled(); + + constructor() ConfirmedOwner(msg.sender) { + checkEnabled = true; + } + + /// @notice Returns the access of an address + /// @param _user The address to query + function hasAccess(address _user, bytes memory) public view virtual override returns (bool) { + return s_accessList[_user] || !checkEnabled; + } + + /// @notice Adds an address to the access list + /// @param _user The address to add + function addAccess(address _user) external onlyOwner { + if (!s_accessList[_user]) { + s_accessList[_user] = true; + + emit AddedAccess(_user); + } + } + + /// @notice Removes an address from the access list + /// @param _user The address to remove + function removeAccess(address _user) external onlyOwner { + if (s_accessList[_user]) { + s_accessList[_user] = false; + + emit RemovedAccess(_user); + } + } + + /// @notice makes the access check enforced + function enableAccessCheck() external onlyOwner { + if (!checkEnabled) { + checkEnabled = true; + + emit CheckAccessEnabled(); + } + } + + /// @notice makes the access check unenforced + function disableAccessCheck() external onlyOwner { + if (checkEnabled) { + checkEnabled = false; + + emit CheckAccessDisabled(); + } + } + + /// @dev reverts if the caller does not have access + modifier checkAccess() { + // solhint-disable-next-line custom-errors + require(hasAccess(msg.sender, msg.data), "No access"); + _; + } +} diff --git a/contracts/src/v0.8/shared/call/CallWithExactGas.sol b/contracts/src/v0.8/shared/call/CallWithExactGas.sol new file mode 100644 index 00000000..6716dc15 --- /dev/null +++ b/contracts/src/v0.8/shared/call/CallWithExactGas.sol @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice This library contains various callWithExactGas functions. All of them are +/// safe from gas bomb attacks. +/// @dev There is code duplication in this library. This is done to not leave the assembly +/// the blocks. +library CallWithExactGas { + error NoContract(); + error NoGasForCallExactCheck(); + error NotEnoughGasForCall(); + + bytes4 internal constant NO_CONTRACT_SIG = 0x0c3b563c; + bytes4 internal constant NO_GAS_FOR_CALL_EXACT_CHECK_SIG = 0xafa32a2c; + bytes4 internal constant NOT_ENOUGH_GAS_FOR_CALL_SIG = 0x37c3be29; + + /// @notice calls target address with exactly gasAmount gas and payload as calldata. + /// Accounts for gasForCallExactCheck gas that will be used by this function. Will revert + /// if the target is not a contact. Will revert when there is not enough gas to call the + /// target with gasAmount gas. + /// @dev Ignores the return data, which makes it immune to gas bomb attacks. + /// @return success whether the call succeeded + function _callWithExactGas( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck + ) internal returns (bool success) { + assembly { + // solidity calls check that a contract actually exists at the destination, so we do the same + // Note we do this check prior to measuring gas so gasForCallExactCheck (our "cushion") + // doesn't need to account for it. + if iszero(extcodesize(target)) { + mstore(0x0, NO_CONTRACT_SIG) + revert(0x0, 0x4) + } + + let g := gas() + // Compute g -= gasForCallExactCheck and check for underflow + // The gas actually passed to the callee is _min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. gasForCallExactCheck ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, gasForCallExactCheck) { + mstore(0x0, NO_GAS_FOR_CALL_EXACT_CHECK_SIG) + revert(0x0, 0x4) + } + g := sub(g, gasForCallExactCheck) + // if g - g//64 <= gasAmount, revert. We subtract g//64 because of EIP-150 + if iszero(gt(sub(g, div(g, 64)), gasLimit)) { + mstore(0x0, NOT_ENOUGH_GAS_FOR_CALL_SIG) + revert(0x0, 0x4) + } + + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasLimit, target, 0, add(payload, 0x20), mload(payload), 0x0, 0x0) + } + return success; + } + + /// @notice calls target address with exactly gasAmount gas and payload as calldata. + /// Account for gasForCallExactCheck gas that will be used by this function. Will revert + /// if the target is not a contact. Will revert when there is not enough gas to call the + /// target with gasAmount gas. + /// @dev Caps the return data length, which makes it immune to gas bomb attacks. + /// @dev Return data cap logic borrowed from + /// https://github.com/nomad-xyz/ExcessivelySafeCall/blob/main/src/ExcessivelySafeCall.sol. + /// @return success whether the call succeeded + /// @return retData the return data from the call, capped at maxReturnBytes bytes + /// @return gasUsed the gas used by the external call. Does not include the overhead of this function. + function _callWithExactGasSafeReturnData( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck, + uint16 maxReturnBytes + ) internal returns (bool success, bytes memory retData, uint256 gasUsed) { + // allocate retData memory ahead of time + retData = new bytes(maxReturnBytes); + + assembly { + // solidity calls check that a contract actually exists at the destination, so we do the same + // Note we do this check prior to measuring gas so gasForCallExactCheck (our "cushion") + // doesn't need to account for it. + if iszero(extcodesize(target)) { + mstore(0x0, NO_CONTRACT_SIG) + revert(0x0, 0x4) + } + + let g := gas() + // Compute g -= gasForCallExactCheck and check for underflow + // The gas actually passed to the callee is _min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. gasForCallExactCheck ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, gasForCallExactCheck) { + mstore(0x0, NO_GAS_FOR_CALL_EXACT_CHECK_SIG) + revert(0x0, 0x4) + } + g := sub(g, gasForCallExactCheck) + // if g - g//64 <= gasAmount, revert. We subtract g//64 because of EIP-150 + if iszero(gt(sub(g, div(g, 64)), gasLimit)) { + mstore(0x0, NOT_ENOUGH_GAS_FOR_CALL_SIG) + revert(0x0, 0x4) + } + + // We save the gas before the call so we can calculate how much gas the call used + let gasBeforeCall := gas() + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasLimit, target, 0, add(payload, 0x20), mload(payload), 0x0, 0x0) + gasUsed := sub(gasBeforeCall, gas()) + + // limit our copy to maxReturnBytes bytes + let toCopy := returndatasize() + if gt(toCopy, maxReturnBytes) { + toCopy := maxReturnBytes + } + // Store the length of the copied bytes + mstore(retData, toCopy) + // copy the bytes from retData[0:_toCopy] + returndatacopy(add(retData, 0x20), 0x0, toCopy) + } + return (success, retData, gasUsed); + } + + /// @notice Calls target address with exactly gasAmount gas and payload as calldata + /// or reverts if at least gasLimit gas is not available. + /// @dev Does not check if target is a contract. If it is not a contract, the low-level + /// call will still be made and it will succeed. + /// @dev Ignores the return data, which makes it immune to gas bomb attacks. + /// @return success whether the call succeeded + /// @return sufficientGas Whether there was enough gas to make the call + function _callWithExactGasEvenIfTargetIsNoContract( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck + ) internal returns (bool success, bool sufficientGas) { + assembly { + let g := gas() + // Compute g -= CALL_WITH_EXACT_GAS_CUSHION and check for underflow. We + // need the cushion since the logic following the above call to gas also + // costs gas which we cannot account for exactly. So cushion is a + // conservative upper bound for the cost of this logic. + if iszero(lt(g, gasForCallExactCheck)) { + g := sub(g, gasForCallExactCheck) + // If g - g//64 <= gasAmount, we don't have enough gas. We subtract g//64 because of EIP-150. + if gt(sub(g, div(g, 64)), gasLimit) { + // Call and ignore success/return data. Note that we did not check + // whether a contract actually exists at the target address. + success := call(gasLimit, target, 0, add(payload, 0x20), mload(payload), 0x0, 0x0) + sufficientGas := true + } + } + } + return (success, sufficientGas); + } +} diff --git a/contracts/src/v0.8/shared/interfaces/AccessControllerInterface.sol b/contracts/src/v0.8/shared/interfaces/AccessControllerInterface.sol new file mode 100644 index 00000000..fa3e2a5c --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/AccessControllerInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AccessControllerInterface { + function hasAccess(address user, bytes calldata data) external view returns (bool); +} diff --git a/contracts/src/v0.8/shared/interfaces/AggregatorInterface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorInterface.sol new file mode 100644 index 00000000..fe0cef09 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/AggregatorInterface.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AggregatorInterface { + function latestAnswer() external view returns (int256); + + function latestTimestamp() external view returns (uint256); + + function latestRound() external view returns (uint256); + + function getAnswer(uint256 roundId) external view returns (int256); + + function getTimestamp(uint256 roundId) external view returns (uint256); + + event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); + + event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); +} diff --git a/contracts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol new file mode 100644 index 00000000..c023e3d2 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AggregatorInterface} from "./AggregatorInterface.sol"; +import {AggregatorV3Interface} from "./AggregatorV3Interface.sol"; + +interface AggregatorV2V3Interface is AggregatorInterface, AggregatorV3Interface {} diff --git a/contracts/src/v0.8/shared/interfaces/AggregatorV3Interface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorV3Interface.sol new file mode 100644 index 00000000..d3eab7b3 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/AggregatorV3Interface.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AggregatorV3Interface { + function decimals() external view returns (uint8); + + function description() external view returns (string memory); + + function version() external view returns (uint256); + + function getRoundData( + uint80 _roundId + ) external view returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); + + function latestRoundData() + external + view + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); +} diff --git a/contracts/src/v0.8/shared/interfaces/AggregatorValidatorInterface.sol b/contracts/src/v0.8/shared/interfaces/AggregatorValidatorInterface.sol new file mode 100644 index 00000000..7e3b7a3e --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/AggregatorValidatorInterface.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface AggregatorValidatorInterface { + function validate( + uint256 previousRoundId, + int256 previousAnswer, + uint256 currentRoundId, + int256 currentAnswer + ) external returns (bool); +} diff --git a/contracts/src/v0.8/shared/interfaces/IAccessController.sol b/contracts/src/v0.8/shared/interfaces/IAccessController.sol new file mode 100644 index 00000000..07cb7a15 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/IAccessController.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IAccessController { + function hasAccess(address user, bytes calldata data) external view returns (bool); +} diff --git a/contracts/src/v0.8/shared/interfaces/IERC677Receiver.sol b/contracts/src/v0.8/shared/interfaces/IERC677Receiver.sol new file mode 100644 index 00000000..5cb8cfcc --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/IERC677Receiver.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +interface IERC677Receiver { + function onTokenTransfer(address sender, uint256 amount, bytes calldata data) external; +} diff --git a/contracts/src/v0.8/shared/interfaces/IOwnable.sol b/contracts/src/v0.8/shared/interfaces/IOwnable.sol new file mode 100644 index 00000000..3141fe9a --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/IOwnable.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IOwnable { + function owner() external returns (address); + + function transferOwnership(address recipient) external; + + function acceptOwnership() external; +} diff --git a/contracts/src/v0.8/shared/interfaces/ITypeAndVersion.sol b/contracts/src/v0.8/shared/interfaces/ITypeAndVersion.sol new file mode 100644 index 00000000..135f6d0a --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/ITypeAndVersion.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface ITypeAndVersion { + function typeAndVersion() external pure returns (string memory); +} diff --git a/contracts/src/v0.8/shared/interfaces/IWERC20.sol b/contracts/src/v0.8/shared/interfaces/IWERC20.sol new file mode 100644 index 00000000..96073530 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/IWERC20.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IWERC20 { + function deposit() external payable; + + function withdraw(uint256) external; +} diff --git a/contracts/src/v0.8/shared/interfaces/LinkTokenInterface.sol b/contracts/src/v0.8/shared/interfaces/LinkTokenInterface.sol new file mode 100644 index 00000000..c00db9e9 --- /dev/null +++ b/contracts/src/v0.8/shared/interfaces/LinkTokenInterface.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface LinkTokenInterface { + function allowance(address owner, address spender) external view returns (uint256 remaining); + + function approve(address spender, uint256 value) external returns (bool success); + + function balanceOf(address owner) external view returns (uint256 balance); + + function decimals() external view returns (uint8 decimalPlaces); + + function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); + + function increaseApproval(address spender, uint256 subtractedValue) external; + + function name() external view returns (string memory tokenName); + + function symbol() external view returns (string memory tokenSymbol); + + function totalSupply() external view returns (uint256 totalTokensIssued); + + function transfer(address to, uint256 value) external returns (bool success); + + function transferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); + + function transferFrom(address from, address to, uint256 value) external returns (bool success); +} diff --git a/contracts/src/v0.8/shared/mocks/WERC20Mock.sol b/contracts/src/v0.8/shared/mocks/WERC20Mock.sol new file mode 100644 index 00000000..cee7fa7f --- /dev/null +++ b/contracts/src/v0.8/shared/mocks/WERC20Mock.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol"; + +contract WERC20Mock is ERC20 { + constructor() ERC20("WERC20Mock", "WERC") {} + + event Deposit(address indexed dst, uint256 wad); + event Withdrawal(address indexed src, uint256 wad); + + receive() external payable { + deposit(); + } + + function deposit() public payable { + _mint(msg.sender, msg.value); + emit Deposit(msg.sender, msg.value); + } + + function withdraw(uint256 wad) public { + // solhint-disable-next-line custom-errors, reason-string + require(balanceOf(msg.sender) >= wad); + _burn(msg.sender, wad); + payable(msg.sender).transfer(wad); + emit Withdrawal(msg.sender, wad); + } + + function mint(address account, uint256 amount) external { + _mint(account, amount); + } + + function burn(address account, uint256 amount) external { + _burn(account, amount); + } +} diff --git a/contracts/src/v0.8/shared/ocr2/OCR2Abstract.sol b/contracts/src/v0.8/shared/ocr2/OCR2Abstract.sol new file mode 100644 index 00000000..cd3f1971 --- /dev/null +++ b/contracts/src/v0.8/shared/ocr2/OCR2Abstract.sol @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ITypeAndVersion} from "../interfaces/ITypeAndVersion.sol"; + +abstract contract OCR2Abstract is ITypeAndVersion { + uint256 internal constant MAX_NUM_ORACLES = 31; + uint256 private constant PREFIX_MASK = type(uint256).max << (256 - 16); // 0xFFFF00..00 + uint256 private constant PREFIX = 0x0001 << (256 - 16); // 0x000100..00 + + /// @notice triggers a new run of the offchain reporting protocol + /// @param previousConfigBlockNumber block in which the previous config was set, to simplify historic analysis + /// @param configDigest configDigest of this configuration + /// @param configCount ordinal number of this config setting among all config settings over the life of this contract + /// @param signers ith element is address ith oracle uses to sign a report + /// @param transmitters ith element is address ith oracle uses to transmit a report via the transmit method + /// @param f maximum number of faulty/dishonest oracles the protocol can tolerate while still working correctly + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version of the serialization format used for "offchainConfig" parameter + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + event ConfigSet( + uint32 previousConfigBlockNumber, + bytes32 configDigest, + uint64 configCount, + address[] signers, + address[] transmitters, + uint8 f, + bytes onchainConfig, + uint64 offchainConfigVersion, + bytes offchainConfig + ); + + /// @notice sets offchain reporting protocol configuration incl. participating oracles + /// @param signers addresses with which oracles sign the reports + /// @param transmitters addresses oracles use to transmit the reports + /// @param f number of faulty oracles the system can tolerate + /// @param onchainConfig serialized configuration used by the contract (and possibly oracles) + /// @param offchainConfigVersion version number for offchainEncoding schema + /// @param offchainConfig serialized configuration used by the oracles exclusively and only passed through the contract + function setConfig( + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) external virtual; + + /// @notice information about current offchain reporting protocol configuration + /// @return configCount ordinal number of current config, out of all configs applied to this contract so far + /// @return blockNumber block at which this config was set + /// @return configDigest domain-separation tag for current config (see _configDigestFromConfigData) + function latestConfigDetails() + external + view + virtual + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest); + + function _configDigestFromConfigData( + uint256 chainId, + address contractAddress, + uint64 configCount, + address[] memory signers, + address[] memory transmitters, + uint8 f, + bytes memory onchainConfig, + uint64 offchainConfigVersion, + bytes memory offchainConfig + ) internal pure returns (bytes32) { + uint256 h = uint256( + keccak256( + abi.encode( + chainId, + contractAddress, + configCount, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig + ) + ) + ); + return bytes32((PREFIX & PREFIX_MASK) | (h & ~PREFIX_MASK)); + } + + /// @notice optionally emitted to indicate the latest configDigest and epoch for + /// which a report was successfully transmitted. Alternatively, the contract may + /// use latestConfigDigestAndEpoch with scanLogs set to false. + event Transmitted(bytes32 configDigest, uint32 epoch); + + /// @notice optionally returns the latest configDigest and epoch for which a + /// report was successfully transmitted. Alternatively, the contract may return + /// scanLogs set to true and use Transmitted events to provide this information + /// to offchain watchers. + /// @return scanLogs indicates whether to rely on the configDigest and epoch + /// returned or whether to scan logs for the Transmitted event instead. + /// @return configDigest + /// @return epoch + function latestConfigDigestAndEpoch() + external + view + virtual + returns (bool scanLogs, bytes32 configDigest, uint32 epoch); + + /// @notice transmit is called to post a new report to the contract + /// @param reportContext [0]: ConfigDigest, [1]: 27 byte padding, 4-byte epoch and 1-byte round, [2]: ExtraHash + /// @param report serialized report, which the signatures are signing. + /// @param rs ith element is the R components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param ss ith element is the S components of the ith signature on report. Must have at most MAX_NUM_ORACLES entries + /// @param rawVs ith element is the the V component of the ith signature + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external virtual; +} diff --git a/contracts/src/v0.8/shared/ocr2/OCR2Base.sol b/contracts/src/v0.8/shared/ocr2/OCR2Base.sol new file mode 100644 index 00000000..baedac77 --- /dev/null +++ b/contracts/src/v0.8/shared/ocr2/OCR2Base.sol @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {OwnerIsCreator} from "../access/OwnerIsCreator.sol"; +import {OCR2Abstract} from "./OCR2Abstract.sol"; + +/// @notice Onchain verification of reports from the offchain reporting protocol +/// @dev THIS CONTRACT HAS NOT GONE THROUGH ANY SECURITY REVIEW. DO NOT USE IN PROD. +/// @dev For details on its operation, see the offchain reporting protocol design +/// doc, which refers to this contract as simply the "contract". +/// @dev This contract is meant to aid rapid development of new applications based on OCR2. +/// However, for actual production contracts, it is expected that most of the logic of this contract +/// will be folded directly into the application contract. Inheritance prevents us from doing lots +/// of juicy storage layout optimizations, leading to a substantial increase in gas cost. +// solhint-disable custom-errors +abstract contract OCR2Base is OwnerIsCreator, OCR2Abstract { + error ReportInvalid(); + + bool internal immutable i_uniqueReports; + + constructor(bool uniqueReports) OwnerIsCreator() { + i_uniqueReports = uniqueReports; + } + + /// @dev Storing these fields used on the hot path in a ConfigInfo variable reduces + /// the retrieval of all of them to a single SLOAD. If any further fields are + /// added, make sure that storage of the struct still takes at most 32 bytes. + struct ConfigInfo { + bytes32 latestConfigDigest; + uint8 f; // ───╮ + uint8 n; // ───╯ + } + ConfigInfo internal s_configInfo; + + /// @dev Incremented each time a new config is posted. This count is incorporated + /// into the config digest, to prevent replay attacks. + uint32 internal s_configCount; + /// @dev Makes it easier for offchain systems to extract config from logs. + uint32 internal s_latestConfigBlockNumber; + + /// @dev Used for s_oracles[a].role, where a is an address, to track the purpose + /// of the address, or to indicate that the address is unset. + enum Role { + // No oracle role has been set for address a + Unset, + // Signing address for the s_oracles[a].index'th oracle. I.e., report + // signatures from this oracle should ecrecover back to address a. + Signer, + // Transmission address for the s_oracles[a].index'th oracle. I.e., if a + // report is received by OCR2Aggregator.transmit in which msg.sender is + // a, it is attributed to the s_oracles[a].index'th oracle. + Transmitter + } + + struct Oracle { + uint8 index; // ───╮ Index of oracle in s_signers/s_transmitters + Role role; // ─────╯ Role of the address which mapped to this struct + } + + mapping(address => Oracle) /* signer OR transmitter address */ internal s_oracles; + + /// @notice Contains the signing address of each oracle + address[] internal s_signers; + + /// @notice Contains the transmission address of each oracle, + /// i.e. the address the oracle actually sends transactions to the contract from + address[] internal s_transmitters; + + /// @dev Reverts transaction if config args are invalid + modifier checkConfigValid( + uint256 _numSigners, + uint256 _numTransmitters, + uint256 _f + ) { + require(_numSigners <= MAX_NUM_ORACLES, "too many signers"); + require(_f > 0, "f must be positive"); + require(_numSigners == _numTransmitters, "oracle addresses out of registration"); + require(_numSigners > 3 * _f, "faulty-oracle f too high"); + _; + } + + struct SetConfigArgs { + address[] signers; + address[] transmitters; + uint8 f; + bytes onchainConfig; + uint64 offchainConfigVersion; + bytes offchainConfig; + } + + /// @inheritdoc OCR2Abstract + function latestConfigDigestAndEpoch() + external + view + virtual + override + returns (bool scanLogs, bytes32 configDigest, uint32 epoch) + { + return (true, bytes32(0), uint32(0)); + } + + /// @notice sets offchain reporting protocol configuration incl. participating oracles + /// @param _signers addresses with which oracles sign the reports + /// @param _transmitters addresses oracles use to transmit the reports + /// @param _f number of faulty oracles the system can tolerate + /// @param _onchainConfig encoded on-chain contract configuration + /// @param _offchainConfigVersion version number for offchainEncoding schema + /// @param _offchainConfig encoded off-chain oracle configuration + function setConfig( + address[] memory _signers, + address[] memory _transmitters, + uint8 _f, + bytes memory _onchainConfig, + uint64 _offchainConfigVersion, + bytes memory _offchainConfig + ) external override checkConfigValid(_signers.length, _transmitters.length, _f) onlyOwner { + SetConfigArgs memory args = SetConfigArgs({ + signers: _signers, + transmitters: _transmitters, + f: _f, + onchainConfig: _onchainConfig, + offchainConfigVersion: _offchainConfigVersion, + offchainConfig: _offchainConfig + }); + + _beforeSetConfig(args.f, args.onchainConfig); + + while (s_signers.length != 0) { + // remove any old signer/transmitter addresses + uint256 lastIdx = s_signers.length - 1; + address signer = s_signers[lastIdx]; + address transmitter = s_transmitters[lastIdx]; + delete s_oracles[signer]; + delete s_oracles[transmitter]; + s_signers.pop(); + s_transmitters.pop(); + } + + for (uint256 i = 0; i < args.signers.length; ++i) { + // add new signer/transmitter addresses + require(s_oracles[args.signers[i]].role == Role.Unset, "repeated signer address"); + s_oracles[args.signers[i]] = Oracle(uint8(i), Role.Signer); + require(s_oracles[args.transmitters[i]].role == Role.Unset, "repeated transmitter address"); + s_oracles[args.transmitters[i]] = Oracle(uint8(i), Role.Transmitter); + s_signers.push(args.signers[i]); + s_transmitters.push(args.transmitters[i]); + } + s_configInfo.f = args.f; + uint32 previousConfigBlockNumber = s_latestConfigBlockNumber; + s_latestConfigBlockNumber = uint32(block.number); + s_configCount += 1; + { + s_configInfo.latestConfigDigest = _configDigestFromConfigData( + block.chainid, + address(this), + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + } + s_configInfo.n = uint8(args.signers.length); + + emit ConfigSet( + previousConfigBlockNumber, + s_configInfo.latestConfigDigest, + s_configCount, + args.signers, + args.transmitters, + args.f, + args.onchainConfig, + args.offchainConfigVersion, + args.offchainConfig + ); + + _afterSetConfig(args.f, args.onchainConfig); + } + + /// @notice information about current offchain reporting protocol configuration + /// @return configCount ordinal number of current config, out of all configs applied to this contract so far + /// @return blockNumber block at which this config was set + /// @return configDigest domain-separation tag for current config (see configDigestFromConfigData) + function latestConfigDetails() + external + view + override + returns (uint32 configCount, uint32 blockNumber, bytes32 configDigest) + { + return (s_configCount, s_latestConfigBlockNumber, s_configInfo.latestConfigDigest); + } + + /// @return list of addresses permitted to transmit reports to this contract + /// @dev The list will match the order used to specify the transmitter during setConfig + function transmitters() external view returns (address[] memory) { + return s_transmitters; + } + + function _beforeSetConfig(uint8 _f, bytes memory _onchainConfig) internal virtual; + + function _afterSetConfig(uint8 _f, bytes memory _onchainConfig) internal virtual; + + /// @dev hook to allow additional validation of the report by the extending contract + /// @param configDigest separation tag for current config (see configDigestFromConfigData) + /// @param epochAndRound 27 byte padding, 4-byte epoch and 1-byte round + /// @param report serialized report + function _validateReport( + bytes32 configDigest, + uint40 epochAndRound, + bytes memory report + ) internal virtual returns (bool); + + /// @dev hook called after the report has been fully validated + /// for the extending contract to handle additional logic, such as oracle payment + /// @param initialGas the amount of gas before validation + /// @param transmitter the address of the account that submitted the report + /// @param signers the addresses of all signing accounts + /// @param report serialized report + function _report( + uint256 initialGas, + address transmitter, + uint8 signerCount, + address[MAX_NUM_ORACLES] memory signers, + bytes calldata report + ) internal virtual; + + // The constant-length components of the msg.data sent to transmit. + // See the "If we wanted to call sam" example on for example reasoning + // https://solidity.readthedocs.io/en/v0.7.2/abi-spec.html + uint16 private constant TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT = + 4 + // function selector + 32 * + 3 + // 3 words containing reportContext + 32 + // word containing start location of abiencoded report value + 32 + // word containing location start of abiencoded rs value + 32 + // word containing start location of abiencoded ss value + 32 + // rawVs value + 32 + // word containing length of report + 32 + // word containing length rs + 32 + // word containing length of ss + 0; // placeholder + + function _requireExpectedMsgDataLength( + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss + ) private pure { + // calldata will never be big enough to make this overflow + uint256 expected = uint256(TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT) + + report.length + // one byte pure entry in _report + rs.length * + 32 + // 32 bytes per entry in _rs + ss.length * + 32 + // 32 bytes per entry in _ss + 0; // placeholder + require(msg.data.length == expected, "calldata length mismatch"); + } + + /// @notice transmit is called to post a new report to the contract + /// @param report serialized report, which the signatures are signing. + /// @param rs ith element is the R components of the ith signature on report. Must have at most maxNumOracles entries + /// @param ss ith element is the S components of the ith signature on report. Must have at most maxNumOracles entries + /// @param rawVs ith element is the the V component of the ith signature + function transmit( + // NOTE: If these parameters are changed, expectedMsgDataLength and/or + // TRANSMIT_MSGDATA_CONSTANT_LENGTH_COMPONENT need to be changed accordingly + bytes32[3] calldata reportContext, + bytes calldata report, + bytes32[] calldata rs, + bytes32[] calldata ss, + bytes32 rawVs // signatures + ) external override { + uint256 initialGas = gasleft(); // This line must come first + + { + // reportContext consists of: + // reportContext[0]: ConfigDigest + // reportContext[1]: 27 byte padding, 4-byte epoch and 1-byte round + // reportContext[2]: ExtraHash + bytes32 configDigest = reportContext[0]; + uint32 epochAndRound = uint32(uint256(reportContext[1])); + + if (!_validateReport(configDigest, epochAndRound, report)) { + revert ReportInvalid(); + } + + emit Transmitted(configDigest, uint32(epochAndRound >> 8)); + + ConfigInfo memory configInfo = s_configInfo; + require(configInfo.latestConfigDigest == configDigest, "configDigest mismatch"); + + _requireExpectedMsgDataLength(report, rs, ss); + + uint256 expectedNumSignatures; + if (i_uniqueReports) { + expectedNumSignatures = (configInfo.n + configInfo.f) / 2 + 1; + } else { + expectedNumSignatures = configInfo.f + 1; + } + + require(rs.length == expectedNumSignatures, "wrong number of signatures"); + require(rs.length == ss.length, "signatures out of registration"); + + Oracle memory transmitter = s_oracles[msg.sender]; + require( // Check that sender is authorized to report + transmitter.role == Role.Transmitter && msg.sender == s_transmitters[transmitter.index], + "unauthorized transmitter" + ); + } + + address[MAX_NUM_ORACLES] memory signed; + uint8 signerCount = 0; + + { + // Verify signatures attached to report + bytes32 h = keccak256(abi.encodePacked(keccak256(report), reportContext)); + + Oracle memory o; + for (uint256 i = 0; i < rs.length; ++i) { + address signer = ecrecover(h, uint8(rawVs[i]) + 27, rs[i], ss[i]); + o = s_oracles[signer]; + require(o.role == Role.Signer, "address not authorized to sign"); + require(signed[o.index] == address(0), "non-unique signature"); + signed[o.index] = signer; + signerCount += 1; + } + } + + _report(initialGas, msg.sender, signerCount, signed, report); + } +} diff --git a/contracts/src/v0.8/shared/ocr2/README.md b/contracts/src/v0.8/shared/ocr2/README.md new file mode 100644 index 00000000..e7ccc512 --- /dev/null +++ b/contracts/src/v0.8/shared/ocr2/README.md @@ -0,0 +1,5 @@ +# OCR2 Rapid Prototype Contracts + +The contracts in this directory are to aid rapid prototyping of OCR2 based products. They abstract OCR2 config and boilerplate code so that specific logic can be implemented and tested quickly. They are not optimized or audited. + +Do not use these contracts in production. For actual production contracts, it is expected that most of the logic of these contracts will be folded directly into the application contract. \ No newline at end of file diff --git a/contracts/src/v0.8/shared/test/BaseTest.t.sol b/contracts/src/v0.8/shared/test/BaseTest.t.sol new file mode 100644 index 00000000..4d8ef60e --- /dev/null +++ b/contracts/src/v0.8/shared/test/BaseTest.t.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import "forge-std/Test.sol"; + +contract BaseTest is Test { + bool private s_baseTestInitialized; + address internal constant OWNER = 0x72da681452Ab957d1020c25fFaCA47B43980b7C3; + address internal constant STRANGER = 0x02e7d5DD1F4dDbC9f512FfA01d30aa190Ae3edBb; + + // Fri May 26 2023 13:49:53 GMT+0000 + uint256 internal constant BLOCK_TIME = 1685108993; + + function setUp() public virtual { + // BaseTest.setUp is often called multiple times from tests' setUp due to inheritance. + if (s_baseTestInitialized) return; + s_baseTestInitialized = true; + + vm.label(OWNER, "Owner"); + vm.label(STRANGER, "Stranger"); + + // Set the sender to OWNER permanently + vm.startPrank(OWNER); + deal(OWNER, 1e20); + + // Set the block time to a constant known value + vm.warp(BLOCK_TIME); + } +} diff --git a/contracts/src/v0.8/shared/test/call/CallWithExactGas.t.sol b/contracts/src/v0.8/shared/test/call/CallWithExactGas.t.sol new file mode 100644 index 00000000..e5c90b17 --- /dev/null +++ b/contracts/src/v0.8/shared/test/call/CallWithExactGas.t.sol @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {CallWithExactGas} from "../../call/CallWithExactGas.sol"; +import {CallWithExactGasHelper} from "./CallWithExactGasHelper.sol"; +import {BaseTest} from "../BaseTest.t.sol"; +import {GenericReceiver} from "../testhelpers/GenericReceiver.sol"; + +contract CallWithExactGasSetup is BaseTest { + GenericReceiver internal s_receiver; + CallWithExactGasHelper internal s_caller; + uint256 internal constant DEFAULT_GAS_LIMIT = 20_000; + uint16 internal constant DEFAULT_GAS_FOR_CALL_EXACT_CHECK = 5000; + uint256 internal constant EXTCODESIZE_GAS_COST = 2600; + + function setUp() public virtual override { + BaseTest.setUp(); + + s_receiver = new GenericReceiver(false); + s_caller = new CallWithExactGasHelper(); + } +} + +contract CallWithExactGas__callWithExactGas is CallWithExactGasSetup { + function test_callWithExactGasSuccess(bytes memory payload, bytes4 funcSelector) public { + vm.pauseGasMetering(); + + bytes memory data = abi.encodeWithSelector(funcSelector, payload); + vm.assume( + funcSelector != GenericReceiver.setRevert.selector && + funcSelector != GenericReceiver.setErr.selector && + funcSelector != 0x5100fc21 // s_toRevert(), which is public and therefore has a function selector + ); + + vm.expectCall(address(s_receiver), data); + vm.resumeGasMetering(); + + bool success = s_caller.callWithExactGas( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + assertTrue(success); + } + + function test_CallWithExactGasSafeReturnDataExactGas() public { + // The calculated overhead for otherwise unaccounted for gas usage + uint256 overheadForCallWithExactGas = 364; + + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGas.selector, + "", + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + // Since only 63/64th of the gas gets passed, we compensate + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)); + + allowedGas += EXTCODESIZE_GAS_COST + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + overheadForCallWithExactGas; + + // Due to EIP-150 we expect to lose 1/64, so we compensate for this + allowedGas = (allowedGas * 64) / 63; + + (bool success, bytes memory retData) = address(s_caller).call{gas: allowedGas}(payload); + + assertTrue(success); + assertEq(abi.encode(true), retData); + } + + function test_CallWithExactGasReceiverErrorSuccess() public { + bytes memory data = abi.encode("0x52656E73"); + + bytes memory errorData = new bytes(20); + for (uint256 i = 0; i < errorData.length; ++i) { + errorData[i] = 0x01; + } + s_receiver.setErr(errorData); + s_receiver.setRevert(true); + + vm.expectCall(address(s_receiver), data); + + bool success = s_caller.callWithExactGas( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT * 10, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + assertFalse(success); + } + + function test_NoContractReverts() public { + address addressWithoutContract = address(1337); + + vm.expectRevert(CallWithExactGas.NoContract.selector); + + s_caller.callWithExactGas( + "", // empty payload as it will revert well before needing it + addressWithoutContract, + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + } + + function test_NoGasForCallExactCheckReverts() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGas.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + (bool success, bytes memory retData) = address(s_caller).call{gas: DEFAULT_GAS_FOR_CALL_EXACT_CHECK - 1}(payload); + assertFalse(success); + assertEq(retData.length, CallWithExactGas.NoGasForCallExactCheck.selector.length); + assertEq(abi.encodeWithSelector(CallWithExactGas.NoGasForCallExactCheck.selector), retData); + } + + function test_NotEnoughGasForCallReverts() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGas.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + // Supply enough gas for the final call, the DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + // the extcodesize and account for EIP-150. This doesn't account for any other gas + // usage, and will therefore fail because the checks and memory stored/loads + // also cost gas. + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)) + DEFAULT_GAS_FOR_CALL_EXACT_CHECK; + // extcodesize gas cost + allowedGas += EXTCODESIZE_GAS_COST; + // EIP-150 + allowedGas = (allowedGas * 64) / 63; + + // Expect this call to fail due to not having enough gas for the final call + (bool success, bytes memory retData) = address(s_caller).call{gas: allowedGas}(payload); + + assertFalse(success); + assertEq(retData.length, CallWithExactGas.NotEnoughGasForCall.selector.length); + assertEq(abi.encodeWithSelector(CallWithExactGas.NotEnoughGasForCall.selector), retData); + } +} + +contract CallWithExactGas__callWithExactGasSafeReturnData is CallWithExactGasSetup { + function testFuzz_CallWithExactGasSafeReturnDataSuccess(bytes memory payload, bytes4 funcSelector) public { + vm.pauseGasMetering(); + bytes memory data = abi.encodeWithSelector(funcSelector, payload); + vm.assume( + funcSelector != GenericReceiver.setRevert.selector && + funcSelector != GenericReceiver.setErr.selector && + funcSelector != 0x5100fc21 // s_toRevert(), which is public and therefore has a function selector + ); + uint16 maxRetBytes = 0; + + vm.expectCall(address(s_receiver), data); + vm.resumeGasMetering(); + + (bool success, bytes memory retData, uint256 gasUsed) = s_caller.callWithExactGasSafeReturnData( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + maxRetBytes + ); + + assertTrue(success); + assertEq(retData.length, 0); + assertGt(gasUsed, 500); + } + + function test_CallWithExactGasSafeReturnDataExactGas() public { + // The gas cost for `extcodesize` + uint256 extcodesizeGas = EXTCODESIZE_GAS_COST; + // The calculated overhead for retData initialization + uint256 overheadForRetDataInit = 114; + // The calculated overhead for otherwise unaccounted for gas usage + uint256 overheadForCallWithExactGas = 486; + + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasSafeReturnData.selector, + "", + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + 0 + ); + + // Since only 63/64th of the gas gets passed, we compensate + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)); + + allowedGas += + extcodesizeGas + + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + + overheadForRetDataInit + + overheadForCallWithExactGas; + + // Due to EIP-150 we expect to lose 1/64, so we compensate for this + allowedGas = (allowedGas * 64) / 63; + + vm.expectCall(address(s_receiver), ""); + (bool success, bytes memory retData) = address(s_caller).call{gas: allowedGas}(payload); + + assertTrue(success); + (bool innerSuccess, bytes memory innerRetData, uint256 gasUsed) = abi.decode(retData, (bool, bytes, uint256)); + + assertTrue(innerSuccess); + assertEq(innerRetData.length, 0); + assertGt(gasUsed, 500); + } + + function testFuzz_CallWithExactGasReceiverErrorSuccess(uint16 testRetBytes) public { + uint16 maxReturnBytes = 500; + // Bound with upper limit, otherwise the test runs out of gas. + testRetBytes = uint16(bound(testRetBytes, 0, maxReturnBytes * 10)); + + bytes memory data = abi.encode("0x52656E73"); + + bytes memory errorData = new bytes(testRetBytes); + for (uint256 i = 0; i < errorData.length; ++i) { + errorData[i] = 0x01; + } + s_receiver.setErr(errorData); + s_receiver.setRevert(true); + + vm.expectCall(address(s_receiver), data); + + (bool success, bytes memory retData, uint256 gasUsed) = s_caller.callWithExactGasSafeReturnData( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT * 10, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + maxReturnBytes + ); + + assertFalse(success); + + bytes memory expectedReturnData = errorData; + + // If expected return data is longer than MAX_RET_BYTES, truncate it to MAX_RET_BYTES + if (expectedReturnData.length > maxReturnBytes) { + expectedReturnData = new bytes(maxReturnBytes); + for (uint256 i = 0; i < maxReturnBytes; ++i) { + expectedReturnData[i] = errorData[i]; + } + } + assertEq(expectedReturnData, retData); + assertGt(gasUsed, 500); + } + + function test_NoContractReverts() public { + address addressWithoutContract = address(1337); + + vm.expectRevert(CallWithExactGas.NoContract.selector); + + s_caller.callWithExactGasSafeReturnData( + "", // empty payload as it will revert well before needing it + addressWithoutContract, + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + 0 + ); + } + + function test_NoGasForCallExactCheckReverts() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasSafeReturnData.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + 0 + ); + + (bool success, bytes memory retData) = address(s_caller).call{gas: DEFAULT_GAS_FOR_CALL_EXACT_CHECK - 1}(payload); + assertFalse(success); + assertEq(retData.length, CallWithExactGas.NoGasForCallExactCheck.selector.length); + assertEq(abi.encodeWithSelector(CallWithExactGas.NoGasForCallExactCheck.selector), retData); + } + + function test_NotEnoughGasForCallReverts() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasSafeReturnData.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + 0 + ); + + // Supply enough gas for the final call, the DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + // the extcodesize and account for EIP-150. This doesn't account for any other gas + // usage, and will therefore fail because the checks and memory stored/loads + // also cost gas. + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)) + DEFAULT_GAS_FOR_CALL_EXACT_CHECK; + // extcodesize gas cost + allowedGas += EXTCODESIZE_GAS_COST; + // EIP-150 + allowedGas = (allowedGas * 64) / 63; + + // Expect this call to fail due to not having enough gas for the final call + (bool success, bytes memory retData) = address(s_caller).call{gas: allowedGas}(payload); + + assertFalse(success); + assertEq(retData.length, CallWithExactGas.NotEnoughGasForCall.selector.length); + assertEq(abi.encodeWithSelector(CallWithExactGas.NotEnoughGasForCall.selector), retData); + } +} + +contract CallWithExactGas__callWithExactGasEvenIfTargetIsNoContract is CallWithExactGasSetup { + function test_CallWithExactGasEvenIfTargetIsNoContractSuccess(bytes memory payload, bytes4 funcSelector) public { + vm.pauseGasMetering(); + bytes memory data = abi.encodeWithSelector(funcSelector, payload); + vm.assume( + funcSelector != GenericReceiver.setRevert.selector && + funcSelector != GenericReceiver.setErr.selector && + funcSelector != 0x5100fc21 // s_toRevert(), which is public and therefore has a function selector + ); + vm.expectCall(address(s_receiver), data); + vm.resumeGasMetering(); + + (bool success, bool sufficientGas) = s_caller.callWithExactGasEvenIfTargetIsNoContract( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + assertTrue(success); + assertTrue(sufficientGas); + } + + function test_CallWithExactGasEvenIfTargetIsNoContractExactGasSuccess() public { + // The calculated overhead for otherwise unaccounted for gas usage + uint256 overheadForCallWithExactGas = 446; + + bytes memory data = abi.encode("0x52656E73"); + + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasEvenIfTargetIsNoContract.selector, + data, + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + // Since only 63/64th of the gas gets passed, we compensate + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)); + + allowedGas += DEFAULT_GAS_FOR_CALL_EXACT_CHECK + overheadForCallWithExactGas; + + // Due to EIP-150 we expect to lose 1/64, so we compensate for this + allowedGas = (allowedGas * 64) / 63; + + vm.expectCall(address(s_receiver), data); + (bool outerCallSuccess, bytes memory SuccessAndSufficientGas) = address(s_caller).call{gas: allowedGas}(payload); + + // The call succeeds + assertTrue(outerCallSuccess); + + (bool success, bool sufficientGas) = abi.decode(SuccessAndSufficientGas, (bool, bool)); + assertTrue(success); + assertTrue(sufficientGas); + } + + function test_CallWithExactGasEvenIfTargetIsNoContractReceiverErrorSuccess() public { + bytes memory data = abi.encode("0x52656E73"); + + bytes memory errorData = new bytes(20); + for (uint256 i = 0; i < errorData.length; ++i) { + errorData[i] = 0x01; + } + s_receiver.setErr(errorData); + s_receiver.setRevert(true); + + vm.expectCall(address(s_receiver), data); + + (bool success, bool sufficientGas) = s_caller.callWithExactGasEvenIfTargetIsNoContract( + data, + address(s_receiver), + DEFAULT_GAS_LIMIT * 10, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + // We don't care if it reverts, we only care if we have enough gas + assertFalse(success); + assertTrue(sufficientGas); + } + + function test_NoContractSuccess() public { + bytes memory data = abi.encode("0x52656E73"); + address addressWithoutContract = address(1337); + + (bool success, bool sufficientGas) = s_caller.callWithExactGasEvenIfTargetIsNoContract( + data, + addressWithoutContract, + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + assertTrue(success); + assertTrue(sufficientGas); + } + + function test_NoGasForCallExactCheckReturnFalseSuccess() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasEvenIfTargetIsNoContract.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + (bool outerCallSuccess, bytes memory SuccessAndSufficientGas) = address(s_caller).call{ + gas: DEFAULT_GAS_FOR_CALL_EXACT_CHECK - 1 + }(payload); + + // The call succeeds + assertTrue(outerCallSuccess); + + (bool success, bool sufficientGas) = abi.decode(SuccessAndSufficientGas, (bool, bool)); + assertFalse(success); + assertFalse(sufficientGas); + } + + function test_NotEnoughGasForCallReturnsFalseSuccess() public { + bytes memory payload = abi.encodeWithSelector( + s_caller.callWithExactGasEvenIfTargetIsNoContract.selector, + "", // empty payload as it will revert well before needing it + address(s_receiver), + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_FOR_CALL_EXACT_CHECK + ); + + // Supply enough gas for the final call, the DEFAULT_GAS_FOR_CALL_EXACT_CHECK, + // and account for EIP-150. This doesn't account for any other gas usage, and + // will therefore fail because the checks and memory stored/loads also cost gas. + uint256 allowedGas = (DEFAULT_GAS_LIMIT + (DEFAULT_GAS_LIMIT / 64)) + DEFAULT_GAS_FOR_CALL_EXACT_CHECK; + // EIP-150 + allowedGas = (allowedGas * 64) / 63; + + // Expect this call to fail due to not having enough gas for the final call + (bool outerCallSuccess, bytes memory SuccessAndSufficientGas) = address(s_caller).call{gas: allowedGas}(payload); + + // The call succeeds + assertTrue(outerCallSuccess); + + (bool success, bool sufficientGas) = abi.decode(SuccessAndSufficientGas, (bool, bool)); + assertFalse(success); + assertFalse(sufficientGas); + } +} diff --git a/contracts/src/v0.8/shared/test/call/CallWithExactGasHelper.sol b/contracts/src/v0.8/shared/test/call/CallWithExactGasHelper.sol new file mode 100644 index 00000000..93231563 --- /dev/null +++ b/contracts/src/v0.8/shared/test/call/CallWithExactGasHelper.sol @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {CallWithExactGas} from "../../call/CallWithExactGas.sol"; + +contract CallWithExactGasHelper { + function callWithExactGas( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck + ) public returns (bool success) { + return CallWithExactGas._callWithExactGas(payload, target, gasLimit, gasForCallExactCheck); + } + + function callWithExactGasSafeReturnData( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck, + uint16 maxReturnBytes + ) public returns (bool success, bytes memory retData, uint256 gasUsed) { + return + CallWithExactGas._callWithExactGasSafeReturnData(payload, target, gasLimit, gasForCallExactCheck, maxReturnBytes); + } + + function callWithExactGasEvenIfTargetIsNoContract( + bytes memory payload, + address target, + uint256 gasLimit, + uint16 gasForCallExactCheck + ) public returns (bool success, bool sufficientGas) { + return CallWithExactGas._callWithExactGasEvenIfTargetIsNoContract(payload, target, gasLimit, gasForCallExactCheck); + } +} diff --git a/contracts/src/v0.8/shared/test/helpers/ChainReaderTestContract.sol b/contracts/src/v0.8/shared/test/helpers/ChainReaderTestContract.sol new file mode 100644 index 00000000..765513c9 --- /dev/null +++ b/contracts/src/v0.8/shared/test/helpers/ChainReaderTestContract.sol @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8; + +struct TestStruct { + int32 Field; + string DifferentField; + uint8 OracleId; + uint8[32] OracleIds; + address Account; + address[] Accounts; + int192 BigField; + MidLevelTestStruct NestedStruct; +} + +struct MidLevelTestStruct { + bytes2 FixedBytes; + InnerTestStruct Inner; +} + +struct InnerTestStruct { + int64 IntVal; + string S; +} + +contract LatestValueHolder { + event Triggered( + int32 indexed field, + string differentField, + uint8 oracleId, + uint8[32] oracleIds, + address Account, + address[] Accounts, + int192 bigField, + MidLevelTestStruct nestedStruct + ); + + event TriggeredEventWithDynamicTopic(string indexed fieldHash, string field); + + // First topic is event hash + event TriggeredWithFourTopics(int32 indexed field1, int32 indexed field2, int32 indexed field3); + + TestStruct[] private s_seen; + uint64[] private s_arr; + + constructor() { + // See chain_reader_interface_tests.go in plugin-relay + s_arr.push(3); + s_arr.push(4); + } + + function addTestStruct( + int32 field, + string calldata differentField, + uint8 oracleId, + uint8[32] calldata oracleIds, + address account, + address[] calldata accounts, + int192 bigField, + MidLevelTestStruct calldata nestedStruct + ) public { + s_seen.push(TestStruct(field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct)); + } + + function returnSeen( + int32 field, + string calldata differentField, + uint8 oracleId, + uint8[32] calldata oracleIds, + address account, + address[] calldata accounts, + int192 bigField, + MidLevelTestStruct calldata nestedStruct + ) public pure returns (TestStruct memory) { + return TestStruct(field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct); + } + + function getElementAtIndex(uint256 i) public view returns (TestStruct memory) { + // See chain_reader_interface_tests.go in plugin-relay + return s_seen[i - 1]; + } + + function getPrimitiveValue() public pure returns (uint64) { + // See chain_reader_interface_tests.go in plugin-relay + return 3; + } + + function getDifferentPrimitiveValue() public pure returns (uint64) { + // See chain_reader_interface_tests.go in plugin-relay + return 1990; + } + + function getSliceValue() public view returns (uint64[] memory) { + return s_arr; + } + + function triggerEvent( + int32 field, + string calldata differentField, + uint8 oracleId, + uint8[32] calldata oracleIds, + address account, + address[] calldata accounts, + int192 bigField, + MidLevelTestStruct calldata nestedStruct + ) public { + emit Triggered(field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct); + } + + function triggerEventWithDynamicTopic(string calldata field) public { + emit TriggeredEventWithDynamicTopic(field, field); + } + + // first topic is the event signature + function triggerWithFourTopics(int32 field1, int32 field2, int32 field3) public { + emit TriggeredWithFourTopics(field1, field2, field3); + } +} diff --git a/contracts/src/v0.8/shared/test/testhelpers/GenericReceiver.sol b/contracts/src/v0.8/shared/test/testhelpers/GenericReceiver.sol new file mode 100644 index 00000000..2c058012 --- /dev/null +++ b/contracts/src/v0.8/shared/test/testhelpers/GenericReceiver.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract GenericReceiver { + bool public s_toRevert; + bytes private s_err; + + constructor(bool toRevert) { + s_toRevert = toRevert; + } + + function setRevert(bool toRevert) external { + s_toRevert = toRevert; + } + + function setErr(bytes memory err) external { + s_err = err; + } + + // solhint-disable-next-line payable-fallback + fallback() external { + if (s_toRevert) { + bytes memory reason = s_err; + assembly { + revert(add(32, reason), mload(reason)) + } + } + } +} diff --git a/contracts/src/v0.8/shared/test/token/ERC677/BurnMintERC677.t.sol b/contracts/src/v0.8/shared/test/token/ERC677/BurnMintERC677.t.sol new file mode 100644 index 00000000..2ceb864f --- /dev/null +++ b/contracts/src/v0.8/shared/test/token/ERC677/BurnMintERC677.t.sol @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.19; + +import {IBurnMintERC20} from "../../../token/ERC20/IBurnMintERC20.sol"; +import {IERC677} from "../../../token/ERC677/IERC677.sol"; + +import {BaseTest} from "../../BaseTest.t.sol"; +import {BurnMintERC677} from "../../../token/ERC677/BurnMintERC677.sol"; + +import {IERC20} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {IERC165} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; + +contract BurnMintERC677Setup is BaseTest { + event Transfer(address indexed from, address indexed to, uint256 value); + event MintAccessGranted(address indexed minter); + event BurnAccessGranted(address indexed burner); + event MintAccessRevoked(address indexed minter); + event BurnAccessRevoked(address indexed burner); + + BurnMintERC677 internal s_burnMintERC677; + + address internal s_mockPool = address(6243783892); + uint256 internal s_amount = 1e18; + + function setUp() public virtual override { + BaseTest.setUp(); + s_burnMintERC677 = new BurnMintERC677("Plugin Token", "PLI", 18, 1e27); + + // Set s_mockPool to be a burner and minter + s_burnMintERC677.grantMintAndBurnRoles(s_mockPool); + deal(address(s_burnMintERC677), OWNER, s_amount); + } +} + +contract BurnMintERC677_constructor is BurnMintERC677Setup { + function testConstructorSuccess() public { + string memory name = "Plugin token v2"; + string memory symbol = "PLI2"; + uint8 decimals = 19; + uint256 maxSupply = 1e33; + s_burnMintERC677 = new BurnMintERC677(name, symbol, decimals, maxSupply); + + assertEq(name, s_burnMintERC677.name()); + assertEq(symbol, s_burnMintERC677.symbol()); + assertEq(decimals, s_burnMintERC677.decimals()); + assertEq(maxSupply, s_burnMintERC677.maxSupply()); + } +} + +contract BurnMintERC677_approve is BurnMintERC677Setup { + function testApproveSuccess() public { + uint256 balancePre = s_burnMintERC677.balanceOf(STRANGER); + uint256 sendingAmount = s_amount / 2; + + s_burnMintERC677.approve(STRANGER, sendingAmount); + + changePrank(STRANGER); + + s_burnMintERC677.transferFrom(OWNER, STRANGER, sendingAmount); + + assertEq(sendingAmount + balancePre, s_burnMintERC677.balanceOf(STRANGER)); + } + + // Reverts + + function testInvalidAddressReverts() public { + vm.expectRevert(); + + s_burnMintERC677.approve(address(s_burnMintERC677), s_amount); + } +} + +contract BurnMintERC677_transfer is BurnMintERC677Setup { + function testTransferSuccess() public { + uint256 balancePre = s_burnMintERC677.balanceOf(STRANGER); + uint256 sendingAmount = s_amount / 2; + + s_burnMintERC677.transfer(STRANGER, sendingAmount); + + assertEq(sendingAmount + balancePre, s_burnMintERC677.balanceOf(STRANGER)); + } + + // Reverts + + function testInvalidAddressReverts() public { + vm.expectRevert(); + + s_burnMintERC677.transfer(address(s_burnMintERC677), s_amount); + } +} + +contract BurnMintERC677_mint is BurnMintERC677Setup { + function testBasicMintSuccess() public { + uint256 balancePre = s_burnMintERC677.balanceOf(OWNER); + + s_burnMintERC677.grantMintAndBurnRoles(OWNER); + + vm.expectEmit(); + emit Transfer(address(0), OWNER, s_amount); + + s_burnMintERC677.mint(OWNER, s_amount); + + assertEq(balancePre + s_amount, s_burnMintERC677.balanceOf(OWNER)); + } + + // Revert + + function testSenderNotMinterReverts() public { + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotMinter.selector, OWNER)); + s_burnMintERC677.mint(STRANGER, 1e18); + } + + function testMaxSupplyExceededReverts() public { + changePrank(s_mockPool); + + // Mint max supply + s_burnMintERC677.mint(OWNER, s_burnMintERC677.maxSupply()); + + vm.expectRevert( + abi.encodeWithSelector(BurnMintERC677.MaxSupplyExceeded.selector, s_burnMintERC677.maxSupply() + 1) + ); + + // Attempt to mint 1 more than max supply + s_burnMintERC677.mint(OWNER, 1); + } +} + +contract BurnMintERC677_burn is BurnMintERC677Setup { + function testBasicBurnSuccess() public { + s_burnMintERC677.grantBurnRole(OWNER); + deal(address(s_burnMintERC677), OWNER, s_amount); + + vm.expectEmit(); + emit Transfer(OWNER, address(0), s_amount); + + s_burnMintERC677.burn(s_amount); + + assertEq(0, s_burnMintERC677.balanceOf(OWNER)); + } + + // Revert + + function testSenderNotBurnerReverts() public { + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotBurner.selector, OWNER)); + + s_burnMintERC677.burnFrom(STRANGER, s_amount); + } + + function testExceedsBalanceReverts() public { + changePrank(s_mockPool); + + vm.expectRevert("ERC20: burn amount exceeds balance"); + + s_burnMintERC677.burn(s_amount * 2); + } + + function testBurnFromZeroAddressReverts() public { + s_burnMintERC677.grantBurnRole(address(0)); + changePrank(address(0)); + + vm.expectRevert("ERC20: burn from the zero address"); + + s_burnMintERC677.burn(0); + } +} + +contract BurnMintERC677_burnFromAlias is BurnMintERC677Setup { + function setUp() public virtual override { + BurnMintERC677Setup.setUp(); + } + + function testBurnFromSuccess() public { + s_burnMintERC677.approve(s_mockPool, s_amount); + + changePrank(s_mockPool); + + s_burnMintERC677.burn(OWNER, s_amount); + + assertEq(0, s_burnMintERC677.balanceOf(OWNER)); + } + + // Reverts + + function testSenderNotBurnerReverts() public { + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotBurner.selector, OWNER)); + + s_burnMintERC677.burn(OWNER, s_amount); + } + + function testInsufficientAllowanceReverts() public { + changePrank(s_mockPool); + + vm.expectRevert("ERC20: insufficient allowance"); + + s_burnMintERC677.burn(OWNER, s_amount); + } + + function testExceedsBalanceReverts() public { + s_burnMintERC677.approve(s_mockPool, s_amount * 2); + + changePrank(s_mockPool); + + vm.expectRevert("ERC20: burn amount exceeds balance"); + + s_burnMintERC677.burn(OWNER, s_amount * 2); + } +} + +contract BurnMintERC677_burnFrom is BurnMintERC677Setup { + function setUp() public virtual override { + BurnMintERC677Setup.setUp(); + } + + function testBurnFromSuccess() public { + s_burnMintERC677.approve(s_mockPool, s_amount); + + changePrank(s_mockPool); + + s_burnMintERC677.burnFrom(OWNER, s_amount); + + assertEq(0, s_burnMintERC677.balanceOf(OWNER)); + } + + // Reverts + + function testSenderNotBurnerReverts() public { + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotBurner.selector, OWNER)); + + s_burnMintERC677.burnFrom(OWNER, s_amount); + } + + function testInsufficientAllowanceReverts() public { + changePrank(s_mockPool); + + vm.expectRevert("ERC20: insufficient allowance"); + + s_burnMintERC677.burnFrom(OWNER, s_amount); + } + + function testExceedsBalanceReverts() public { + s_burnMintERC677.approve(s_mockPool, s_amount * 2); + + changePrank(s_mockPool); + + vm.expectRevert("ERC20: burn amount exceeds balance"); + + s_burnMintERC677.burnFrom(OWNER, s_amount * 2); + } +} + +contract BurnMintERC677_grantRole is BurnMintERC677Setup { + function testGrantMintAccessSuccess() public { + assertFalse(s_burnMintERC677.isMinter(STRANGER)); + + vm.expectEmit(); + emit MintAccessGranted(STRANGER); + + s_burnMintERC677.grantMintAndBurnRoles(STRANGER); + + assertTrue(s_burnMintERC677.isMinter(STRANGER)); + + vm.expectEmit(); + emit MintAccessRevoked(STRANGER); + + s_burnMintERC677.revokeMintRole(STRANGER); + + assertFalse(s_burnMintERC677.isMinter(STRANGER)); + } + + function testGrantBurnAccessSuccess() public { + assertFalse(s_burnMintERC677.isBurner(STRANGER)); + + vm.expectEmit(); + emit BurnAccessGranted(STRANGER); + + s_burnMintERC677.grantBurnRole(STRANGER); + + assertTrue(s_burnMintERC677.isBurner(STRANGER)); + + vm.expectEmit(); + emit BurnAccessRevoked(STRANGER); + + s_burnMintERC677.revokeBurnRole(STRANGER); + + assertFalse(s_burnMintERC677.isBurner(STRANGER)); + } + + function testGrantManySuccess() public { + uint256 numberOfPools = 10; + address[] memory permissionedAddresses = new address[](numberOfPools + 1); + permissionedAddresses[0] = s_mockPool; + + for (uint160 i = 0; i < numberOfPools; ++i) { + permissionedAddresses[i + 1] = address(i); + s_burnMintERC677.grantMintAndBurnRoles(address(i)); + } + + assertEq(permissionedAddresses, s_burnMintERC677.getBurners()); + assertEq(permissionedAddresses, s_burnMintERC677.getMinters()); + } +} + +contract BurnMintERC677_grantMintAndBurnRoles is BurnMintERC677Setup { + function testGrantMintAndBurnRolesSuccess() public { + assertFalse(s_burnMintERC677.isMinter(STRANGER)); + assertFalse(s_burnMintERC677.isBurner(STRANGER)); + + vm.expectEmit(); + emit MintAccessGranted(STRANGER); + vm.expectEmit(); + emit BurnAccessGranted(STRANGER); + + s_burnMintERC677.grantMintAndBurnRoles(STRANGER); + + assertTrue(s_burnMintERC677.isMinter(STRANGER)); + assertTrue(s_burnMintERC677.isBurner(STRANGER)); + } +} + +contract BurnMintERC677_decreaseApproval is BurnMintERC677Setup { + function testDecreaseApprovalSuccess() public { + s_burnMintERC677.approve(s_mockPool, s_amount); + uint256 allowance = s_burnMintERC677.allowance(OWNER, s_mockPool); + assertEq(allowance, s_amount); + s_burnMintERC677.decreaseApproval(s_mockPool, s_amount); + assertEq(s_burnMintERC677.allowance(OWNER, s_mockPool), allowance - s_amount); + } +} + +contract BurnMintERC677_increaseApproval is BurnMintERC677Setup { + function testIncreaseApprovalSuccess() public { + s_burnMintERC677.approve(s_mockPool, s_amount); + uint256 allowance = s_burnMintERC677.allowance(OWNER, s_mockPool); + assertEq(allowance, s_amount); + s_burnMintERC677.increaseApproval(s_mockPool, s_amount); + assertEq(s_burnMintERC677.allowance(OWNER, s_mockPool), allowance + s_amount); + } +} + +contract BurnMintERC677_supportsInterface is BurnMintERC677Setup { + function testConstructorSuccess() public { + assertTrue(s_burnMintERC677.supportsInterface(type(IERC20).interfaceId)); + assertTrue(s_burnMintERC677.supportsInterface(type(IERC677).interfaceId)); + assertTrue(s_burnMintERC677.supportsInterface(type(IBurnMintERC20).interfaceId)); + assertTrue(s_burnMintERC677.supportsInterface(type(IERC165).interfaceId)); + } +} diff --git a/contracts/src/v0.8/shared/test/token/ERC677/OpStackBurnMintERC677.t.sol b/contracts/src/v0.8/shared/test/token/ERC677/OpStackBurnMintERC677.t.sol new file mode 100644 index 00000000..8be6a432 --- /dev/null +++ b/contracts/src/v0.8/shared/test/token/ERC677/OpStackBurnMintERC677.t.sol @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IBurnMintERC20} from "../../../token/ERC20/IBurnMintERC20.sol"; +import {IOptimismMintableERC20Minimal, IOptimismMintableERC20} from "../../../token/ERC20/IOptimismMintableERC20.sol"; +import {IERC677} from "../../../token/ERC677/IERC677.sol"; + +import {BurnMintERC677} from "../../../token/ERC677/BurnMintERC677.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; +import {OpStackBurnMintERC677} from "../../../token/ERC677/OpStackBurnMintERC677.sol"; + +import {IERC165} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; + +contract OpStackBurnMintERC677Setup is BaseTest { + address internal s_l1Token = address(897352983527); + address internal s_l2Bridge = address(1928235235); + + OpStackBurnMintERC677 internal s_opStackBurnMintERC677; + + function setUp() public virtual override { + BaseTest.setUp(); + s_opStackBurnMintERC677 = new OpStackBurnMintERC677("Plugin Token", "PLI", 18, 1e27, s_l1Token, s_l2Bridge); + } +} + +contract OpStackBurnMintERC677_constructor is OpStackBurnMintERC677Setup { + function testConstructorSuccess() public { + string memory name = "Plugin token l2"; + string memory symbol = "PLI L2"; + uint8 decimals = 18; + uint256 maxSupply = 1e33; + s_opStackBurnMintERC677 = new OpStackBurnMintERC677(name, symbol, decimals, maxSupply, s_l1Token, s_l2Bridge); + + assertEq(name, s_opStackBurnMintERC677.name()); + assertEq(symbol, s_opStackBurnMintERC677.symbol()); + assertEq(decimals, s_opStackBurnMintERC677.decimals()); + assertEq(maxSupply, s_opStackBurnMintERC677.maxSupply()); + assertEq(s_l1Token, s_opStackBurnMintERC677.remoteToken()); + assertEq(s_l2Bridge, s_opStackBurnMintERC677.bridge()); + } +} + +contract OpStackBurnMintERC677_supportsInterface is OpStackBurnMintERC677Setup { + function testConstructorSuccess() public { + assertTrue(s_opStackBurnMintERC677.supportsInterface(type(IOptimismMintableERC20Minimal).interfaceId)); + assertTrue(s_opStackBurnMintERC677.supportsInterface(type(IERC677).interfaceId)); + assertTrue(s_opStackBurnMintERC677.supportsInterface(type(IBurnMintERC20).interfaceId)); + assertTrue(s_opStackBurnMintERC677.supportsInterface(type(IERC165).interfaceId)); + } +} + +contract OpStackBurnMintERC677_interfaceCompatibility is OpStackBurnMintERC677Setup { + event Transfer(address indexed from, address indexed to, uint256 value); + + IOptimismMintableERC20 internal s_opStackToken; + + function setUp() public virtual override { + OpStackBurnMintERC677Setup.setUp(); + s_opStackToken = IOptimismMintableERC20(address(s_opStackBurnMintERC677)); + } + + function testStaticFunctionsCompatibility() public { + assertEq(s_l1Token, s_opStackToken.remoteToken()); + assertEq(s_l2Bridge, s_opStackToken.bridge()); + } + + function testMintCompatibility() public { + // Ensure roles work + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotMinter.selector, OWNER)); + s_opStackToken.mint(OWNER, 1); + + // Use the actual contract to grant mint + s_opStackBurnMintERC677.grantMintRole(OWNER); + + // Ensure zero address check works + vm.expectRevert("ERC20: mint to the zero address"); + s_opStackToken.mint(address(0x0), 0); + + address mintToAddress = address(0x1); + uint256 mintAmount = 1; + + vm.expectEmit(); + emit Transfer(address(0), mintToAddress, mintAmount); + + s_opStackToken.mint(mintToAddress, mintAmount); + } + + function testBurnCompatibility() public { + // Ensure roles work + vm.expectRevert(abi.encodeWithSelector(BurnMintERC677.SenderNotBurner.selector, OWNER)); + s_opStackToken.burn(address(0x0), 1); + + // Use the actual contract to grant burn + s_opStackBurnMintERC677.grantBurnRole(OWNER); + + // Ensure zero address check works + vm.expectRevert("ERC20: approve from the zero address"); + s_opStackToken.burn(address(0x0), 0); + + address burnFromAddress = address(0x1); + uint256 burnAmount = 1; + + // Ensure `burn(address, amount)` works like burnFrom and requires allowance + vm.expectRevert("ERC20: insufficient allowance"); + s_opStackToken.burn(burnFromAddress, burnAmount); + + changePrank(burnFromAddress); + deal(address(s_opStackToken), burnFromAddress, burnAmount); + s_opStackBurnMintERC677.approve(OWNER, burnAmount); + changePrank(OWNER); + + vm.expectEmit(); + emit Transfer(burnFromAddress, address(0x0), burnAmount); + + s_opStackToken.burn(burnFromAddress, burnAmount); + } +} diff --git a/contracts/src/v0.8/shared/token/ERC20/IBurnMintERC20.sol b/contracts/src/v0.8/shared/token/ERC20/IBurnMintERC20.sol new file mode 100644 index 00000000..b9b3b54b --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC20/IBurnMintERC20.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; + +interface IBurnMintERC20 is IERC20 { + /// @notice Mints new tokens for a given address. + /// @param account The address to mint the new tokens to. + /// @param amount The number of tokens to be minted. + /// @dev this function increases the total supply. + function mint(address account, uint256 amount) external; + + /// @notice Burns tokens from the sender. + /// @param amount The number of tokens to be burned. + /// @dev this function decreases the total supply. + function burn(uint256 amount) external; + + /// @notice Burns tokens from a given address.. + /// @param account The address to burn tokens from. + /// @param amount The number of tokens to be burned. + /// @dev this function decreases the total supply. + function burn(address account, uint256 amount) external; + + /// @notice Burns tokens from a given address.. + /// @param account The address to burn tokens from. + /// @param amount The number of tokens to be burned. + /// @dev this function decreases the total supply. + function burnFrom(address account, uint256 amount) external; +} diff --git a/contracts/src/v0.8/shared/token/ERC20/IOptimismMintableERC20.sol b/contracts/src/v0.8/shared/token/ERC20/IOptimismMintableERC20.sol new file mode 100644 index 00000000..4e9d3a24 --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC20/IOptimismMintableERC20.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT +// solhint-disable one-contract-per-file +pragma solidity ^0.8.0; + +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; + +/// @title IOptimismMintableERC20Minimal +/// @dev This interface is a subset of the Optimism ERC20 interface that is defined +/// below. This is done to now have to overwrite the burn and mint functions again in +/// the implementation, as that leads to more complicated, error prone code. +interface IOptimismMintableERC20Minimal is IERC165 { + /// @notice Returns the address of the token on L1. + function remoteToken() external view returns (address); + + /// @notice Returns the address of the bridge on L2. + function bridge() external returns (address); +} + +/// @title IOptimismMintableERC20 +/// @notice This is the complete interface for the Optimism mintable ERC20 token as defined in +/// https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/contracts/universal/IOptimismMintableERC20.sol +interface IOptimismMintableERC20 is IERC165, IOptimismMintableERC20Minimal { + function mint(address _to, uint256 _amount) external; + + function burn(address _from, uint256 _amount) external; +} diff --git a/contracts/src/v0.8/shared/token/ERC677/BurnMintERC677.sol b/contracts/src/v0.8/shared/token/ERC677/BurnMintERC677.sol new file mode 100644 index 00000000..556914da --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC677/BurnMintERC677.sol @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IBurnMintERC20} from "../ERC20/IBurnMintERC20.sol"; +import {IERC677} from "./IERC677.sol"; + +import {ERC677} from "./ERC677.sol"; +import {OwnerIsCreator} from "../../access/OwnerIsCreator.sol"; + +import {ERC20Burnable} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/ERC20Burnable.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol"; +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; + +/// @notice A basic ERC677 compatible token contract with burn and minting roles. +/// @dev The total supply can be limited during deployment. +contract BurnMintERC677 is IBurnMintERC20, ERC677, IERC165, ERC20Burnable, OwnerIsCreator { + using EnumerableSet for EnumerableSet.AddressSet; + + error SenderNotMinter(address sender); + error SenderNotBurner(address sender); + error MaxSupplyExceeded(uint256 supplyAfterMint); + + event MintAccessGranted(address indexed minter); + event BurnAccessGranted(address indexed burner); + event MintAccessRevoked(address indexed minter); + event BurnAccessRevoked(address indexed burner); + + // @dev the allowed minter addresses + EnumerableSet.AddressSet internal s_minters; + // @dev the allowed burner addresses + EnumerableSet.AddressSet internal s_burners; + + /// @dev The number of decimals for the token + uint8 internal immutable i_decimals; + + /// @dev The maximum supply of the token, 0 if unlimited + uint256 internal immutable i_maxSupply; + + constructor(string memory name, string memory symbol, uint8 decimals_, uint256 maxSupply_) ERC677(name, symbol) { + i_decimals = decimals_; + i_maxSupply = maxSupply_; + } + + function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) { + return + interfaceId == type(IERC20).interfaceId || + interfaceId == type(IERC677).interfaceId || + interfaceId == type(IBurnMintERC20).interfaceId || + interfaceId == type(IERC165).interfaceId; + } + + // ================================================================ + // | ERC20 | + // ================================================================ + + /// @dev Returns the number of decimals used in its user representation. + function decimals() public view virtual override returns (uint8) { + return i_decimals; + } + + /// @dev Returns the max supply of the token, 0 if unlimited. + function maxSupply() public view virtual returns (uint256) { + return i_maxSupply; + } + + /// @dev Uses OZ ERC20 _transfer to disallow sending to address(0). + /// @dev Disallows sending to address(this) + function _transfer(address from, address to, uint256 amount) internal virtual override validAddress(to) { + super._transfer(from, to, amount); + } + + /// @dev Uses OZ ERC20 _approve to disallow approving for address(0). + /// @dev Disallows approving for address(this) + function _approve(address owner, address spender, uint256 amount) internal virtual override validAddress(spender) { + super._approve(owner, spender, amount); + } + + /// @dev Exists to be backwards compatible with the older naming convention. + function decreaseApproval(address spender, uint256 subtractedValue) external returns (bool success) { + return decreaseAllowance(spender, subtractedValue); + } + + /// @dev Exists to be backwards compatible with the older naming convention. + function increaseApproval(address spender, uint256 addedValue) external { + increaseAllowance(spender, addedValue); + } + + /// @notice Check if recipient is valid (not this contract address). + /// @param recipient the account we transfer/approve to. + /// @dev Reverts with an empty revert to be compatible with the existing link token when + /// the recipient is this contract address. + modifier validAddress(address recipient) virtual { + // solhint-disable-next-line reason-string, custom-errors + if (recipient == address(this)) revert(); + _; + } + + // ================================================================ + // | Burning & minting | + // ================================================================ + + /// @inheritdoc ERC20Burnable + /// @dev Uses OZ ERC20 _burn to disallow burning from address(0). + /// @dev Decreases the total supply. + function burn(uint256 amount) public override(IBurnMintERC20, ERC20Burnable) onlyBurner { + super.burn(amount); + } + + /// @inheritdoc IBurnMintERC20 + /// @dev Alias for BurnFrom for compatibility with the older naming convention. + /// @dev Uses burnFrom for all validation & logic. + function burn(address account, uint256 amount) public virtual override { + burnFrom(account, amount); + } + + /// @inheritdoc ERC20Burnable + /// @dev Uses OZ ERC20 _burn to disallow burning from address(0). + /// @dev Decreases the total supply. + function burnFrom(address account, uint256 amount) public override(IBurnMintERC20, ERC20Burnable) onlyBurner { + super.burnFrom(account, amount); + } + + /// @inheritdoc IBurnMintERC20 + /// @dev Uses OZ ERC20 _mint to disallow minting to address(0). + /// @dev Disallows minting to address(this) + /// @dev Increases the total supply. + function mint(address account, uint256 amount) external override onlyMinter validAddress(account) { + if (i_maxSupply != 0 && totalSupply() + amount > i_maxSupply) revert MaxSupplyExceeded(totalSupply() + amount); + + _mint(account, amount); + } + + // ================================================================ + // | Roles | + // ================================================================ + + /// @notice grants both mint and burn roles to `burnAndMinter`. + /// @dev calls public functions so this function does not require + /// access controls. This is handled in the inner functions. + function grantMintAndBurnRoles(address burnAndMinter) external { + grantMintRole(burnAndMinter); + grantBurnRole(burnAndMinter); + } + + /// @notice Grants mint role to the given address. + /// @dev only the owner can call this function. + function grantMintRole(address minter) public onlyOwner { + if (s_minters.add(minter)) { + emit MintAccessGranted(minter); + } + } + + /// @notice Grants burn role to the given address. + /// @dev only the owner can call this function. + function grantBurnRole(address burner) public onlyOwner { + if (s_burners.add(burner)) { + emit BurnAccessGranted(burner); + } + } + + /// @notice Revokes mint role for the given address. + /// @dev only the owner can call this function. + function revokeMintRole(address minter) public onlyOwner { + if (s_minters.remove(minter)) { + emit MintAccessRevoked(minter); + } + } + + /// @notice Revokes burn role from the given address. + /// @dev only the owner can call this function + function revokeBurnRole(address burner) public onlyOwner { + if (s_burners.remove(burner)) { + emit BurnAccessRevoked(burner); + } + } + + /// @notice Returns all permissioned minters + function getMinters() public view returns (address[] memory) { + return s_minters.values(); + } + + /// @notice Returns all permissioned burners + function getBurners() public view returns (address[] memory) { + return s_burners.values(); + } + + // ================================================================ + // | Access | + // ================================================================ + + /// @notice Checks whether a given address is a minter for this token. + /// @return true if the address is allowed to mint. + function isMinter(address minter) public view returns (bool) { + return s_minters.contains(minter); + } + + /// @notice Checks whether a given address is a burner for this token. + /// @return true if the address is allowed to burn. + function isBurner(address burner) public view returns (bool) { + return s_burners.contains(burner); + } + + /// @notice Checks whether the msg.sender is a permissioned minter for this token + /// @dev Reverts with a SenderNotMinter if the check fails + modifier onlyMinter() { + if (!isMinter(msg.sender)) revert SenderNotMinter(msg.sender); + _; + } + + /// @notice Checks whether the msg.sender is a permissioned burner for this token + /// @dev Reverts with a SenderNotBurner if the check fails + modifier onlyBurner() { + if (!isBurner(msg.sender)) revert SenderNotBurner(msg.sender); + _; + } +} diff --git a/contracts/src/v0.8/shared/token/ERC677/ERC677.sol b/contracts/src/v0.8/shared/token/ERC677/ERC677.sol new file mode 100644 index 00000000..aa75a117 --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC677/ERC677.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.0; + +import {IERC677} from "./IERC677.sol"; +import {IERC677Receiver} from "../../interfaces/IERC677Receiver.sol"; + +import {Address} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; +import {ERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol"; + +contract ERC677 is IERC677, ERC20 { + using Address for address; + + constructor(string memory name, string memory symbol) ERC20(name, symbol) {} + + /// @inheritdoc IERC677 + function transferAndCall(address to, uint256 amount, bytes memory data) public returns (bool success) { + super.transfer(to, amount); + emit Transfer(msg.sender, to, amount, data); + if (to.isContract()) { + IERC677Receiver(to).onTokenTransfer(msg.sender, amount, data); + } + return true; + } +} diff --git a/contracts/src/v0.8/shared/token/ERC677/IERC677.sol b/contracts/src/v0.8/shared/token/ERC677/IERC677.sol new file mode 100644 index 00000000..7e303a42 --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC677/IERC677.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IERC677 { + event Transfer(address indexed from, address indexed to, uint256 value, bytes data); + + /// @notice Transfer tokens from `msg.sender` to another address and then call `onTransferReceived` on receiver + /// @param to The address which you want to transfer to + /// @param amount The amount of tokens to be transferred + /// @param data bytes Additional data with no specified format, sent in call to `to` + /// @return true unless throwing + function transferAndCall(address to, uint256 amount, bytes memory data) external returns (bool); +} diff --git a/contracts/src/v0.8/shared/token/ERC677/LinkToken.sol b/contracts/src/v0.8/shared/token/ERC677/LinkToken.sol new file mode 100644 index 00000000..6a90d4b3 --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC677/LinkToken.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {BurnMintERC677} from "./BurnMintERC677.sol"; + +contract LinkToken is BurnMintERC677 { + constructor() BurnMintERC677("Plugin Token", "PLI", 18, 1e27) {} +} diff --git a/contracts/src/v0.8/shared/token/ERC677/OpStackBurnMintERC677.sol b/contracts/src/v0.8/shared/token/ERC677/OpStackBurnMintERC677.sol new file mode 100644 index 00000000..95c64c9c --- /dev/null +++ b/contracts/src/v0.8/shared/token/ERC677/OpStackBurnMintERC677.sol @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IOptimismMintableERC20Minimal, IOptimismMintableERC20} from "../ERC20/IOptimismMintableERC20.sol"; + +import {BurnMintERC677} from "./BurnMintERC677.sol"; + +import {IERC165} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; + +/// @notice A basic ERC677 compatible token contract with burn and minting roles that supports +/// the native L2 bridging requirements of the Optimism Stack. +/// @dev Note: the L2 bridge contract needs to be given burn and mint privileges manually, +/// since this contract does not automatically grant them. This allows the owner to revoke +/// the bridge's privileges if necessary. +contract OpStackBurnMintERC677 is BurnMintERC677, IOptimismMintableERC20Minimal { + /// @dev The address of the L1 token. + address internal immutable i_l1Token; + /// @dev The address of the L2 bridge. + address internal immutable i_l2Bridge; + + constructor( + string memory name, + string memory symbol, + uint8 decimals_, + uint256 maxSupply_, + address l1Token, + address l2Bridge + ) BurnMintERC677(name, symbol, decimals_, maxSupply_) { + i_l1Token = l1Token; + i_l2Bridge = l2Bridge; + } + + function supportsInterface(bytes4 interfaceId) public pure virtual override(IERC165, BurnMintERC677) returns (bool) { + return + interfaceId == type(IOptimismMintableERC20).interfaceId || + interfaceId == type(IOptimismMintableERC20Minimal).interfaceId || + super.supportsInterface(interfaceId); + } + + /// @notice Returns the address of the L1 token. + function remoteToken() public view override returns (address) { + return i_l1Token; + } + + /// @notice Returns the address of the L2 bridge. + function bridge() public view override returns (address) { + return i_l2Bridge; + } +} diff --git a/contracts/src/v0.8/tests/AutomationConsumerBenchmark.sol b/contracts/src/v0.8/tests/AutomationConsumerBenchmark.sol new file mode 100644 index 00000000..e85521a6 --- /dev/null +++ b/contracts/src/v0.8/tests/AutomationConsumerBenchmark.sol @@ -0,0 +1,82 @@ +pragma solidity 0.8.16; + +contract AutomationConsumerBenchmark { + event PerformingUpkeep(uint256 id, address from, uint256 initialCall, uint256 nextEligible, uint256 blockNumber); + + mapping(uint256 => uint256) public initialCall; + mapping(uint256 => uint256) public nextEligible; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + mapping(uint256 => uint256) public count; + uint256 deployedAt; + + constructor() { + deployedAt = block.number; + } + + function checkUpkeep(bytes calldata checkData) external view returns (bool, bytes memory) { + ( + uint256 id, + uint256 interval, + uint256 range, + uint256 checkBurnAmount, + uint256 performBurnAmount, + uint256 firstEligibleBuffer + ) = abi.decode(checkData, (uint256, uint256, uint256, uint256, uint256, uint256)); + uint256 startGas = gasleft(); + bytes32 dummyIndex = blockhash(block.number - 1); + bool dummy; + // burn gas + if (checkBurnAmount > 0 && eligible(id, range, firstEligibleBuffer)) { + while (startGas - gasleft() < checkBurnAmount) { + dummy = dummy && dummyMap[dummyIndex]; // arbitrary storage reads + dummyIndex = keccak256(abi.encode(dummyIndex, address(this))); + } + } + return (eligible(id, range, firstEligibleBuffer), checkData); + } + + function performUpkeep(bytes calldata performData) external { + ( + uint256 id, + uint256 interval, + uint256 range, + uint256 checkBurnAmount, + uint256 performBurnAmount, + uint256 firstEligibleBuffer + ) = abi.decode(performData, (uint256, uint256, uint256, uint256, uint256, uint256)); + require(eligible(id, range, firstEligibleBuffer)); + uint256 startGas = gasleft(); + if (initialCall[id] == 0) { + initialCall[id] = block.number; + } + nextEligible[id] = block.number + interval; + count[id]++; + emit PerformingUpkeep(id, tx.origin, initialCall[id], nextEligible[id], block.number); + // burn gas + bytes32 dummyIndex = blockhash(block.number - 1); + bool dummy; + while (startGas - gasleft() < performBurnAmount) { + dummy = dummy && dummyMap[dummyIndex]; // arbitrary storage reads + dummyIndex = keccak256(abi.encode(dummyIndex, address(this))); + } + } + + function getCountPerforms(uint256 id) public view returns (uint256) { + return count[id]; + } + + function eligible(uint256 id, uint256 range, uint256 firstEligibleBuffer) internal view returns (bool) { + return + initialCall[id] == 0 + ? block.number >= firstEligibleBuffer + deployedAt + : (block.number - initialCall[id] < range && block.number > nextEligible[id]); + } + + function checkEligible(uint256 id, uint256 range, uint256 firstEligibleBuffer) public view returns (bool) { + return eligible(id, range, firstEligibleBuffer); + } + + function reset() external { + deployedAt = block.number; + } +} diff --git a/contracts/src/v0.8/tests/Broken.sol b/contracts/src/v0.8/tests/Broken.sol new file mode 100644 index 00000000..21fa9b01 --- /dev/null +++ b/contracts/src/v0.8/tests/Broken.sol @@ -0,0 +1,18 @@ +pragma solidity ^0.8.0; + +// Broken is a contract to aid debugging and testing reverting calls during development. +contract Broken { + error Unauthorized(string reason, int256 reason2); + + function revertWithCustomError() public pure { + revert Unauthorized("param", 121); + } + + function revertWithMessage(string memory message) public pure { + require(false, message); + } + + function revertSilently() public pure { + require(false); + } +} diff --git a/contracts/src/v0.8/tests/Counter.sol b/contracts/src/v0.8/tests/Counter.sol new file mode 100644 index 00000000..1ceb7891 --- /dev/null +++ b/contracts/src/v0.8/tests/Counter.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Counter { + error AlwaysRevert(); + + uint256 public count = 0; + + function increment() public returns (uint256) { + count += 1; + return count; + } + + function reset() public { + count = 0; + } + + function alwaysRevert() public pure { + revert AlwaysRevert(); + } + + function alwaysRevertWithString() public pure { + revert("always revert"); + } +} diff --git a/contracts/src/v0.8/tests/CronReceiver.sol b/contracts/src/v0.8/tests/CronReceiver.sol new file mode 100644 index 00000000..cee50c13 --- /dev/null +++ b/contracts/src/v0.8/tests/CronReceiver.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +contract CronReceiver { + event Received1(); + event Received2(); + + function handler1() external { + emit Received1(); + } + + function handler2() external { + emit Received2(); + } + + function revertHandler() external { + revert("revert!"); + } +} diff --git a/contracts/src/v0.8/tests/ERC20BalanceMonitorExposed.sol b/contracts/src/v0.8/tests/ERC20BalanceMonitorExposed.sol new file mode 100644 index 00000000..a29ba36e --- /dev/null +++ b/contracts/src/v0.8/tests/ERC20BalanceMonitorExposed.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "../automation/upkeeps/ERC20BalanceMonitor.sol"; + +contract ERC20BalanceMonitorExposed is ERC20BalanceMonitor { + constructor( + address erc20TokenAddress, + address keeperRegistryAddress, + uint256 minWaitPeriod + ) ERC20BalanceMonitor(erc20TokenAddress, keeperRegistryAddress, minWaitPeriod) {} + + function setLastTopUpXXXTestOnly(address target, uint56 lastTopUpTimestamp) external { + s_targets[target].lastTopUpTimestamp = lastTopUpTimestamp; + } +} diff --git a/contracts/src/v0.8/tests/EthBalanceMonitorExposed.sol b/contracts/src/v0.8/tests/EthBalanceMonitorExposed.sol new file mode 100644 index 00000000..74cc682d --- /dev/null +++ b/contracts/src/v0.8/tests/EthBalanceMonitorExposed.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import "../automation/upkeeps/EthBalanceMonitor.sol"; + +contract EthBalanceMonitorExposed is EthBalanceMonitor { + constructor( + address keeperRegistryAddress, + uint256 minWaitPeriod + ) EthBalanceMonitor(keeperRegistryAddress, minWaitPeriod) {} + + function setLastTopUpXXXTestOnly(address target, uint56 lastTopUpTimestamp) external { + s_targets[target].lastTopUpTimestamp = lastTopUpTimestamp; + } +} diff --git a/contracts/src/v0.8/tests/FeedConsumer.sol b/contracts/src/v0.8/tests/FeedConsumer.sol new file mode 100644 index 00000000..c9fc6235 --- /dev/null +++ b/contracts/src/v0.8/tests/FeedConsumer.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AggregatorV2V3Interface} from "../shared/interfaces/AggregatorV2V3Interface.sol"; + +contract FeedConsumer { + AggregatorV2V3Interface public immutable AGGREGATOR; + + constructor(address feedAddress) { + AGGREGATOR = AggregatorV2V3Interface(feedAddress); + } + + function latestAnswer() external view returns (int256 answer) { + return AGGREGATOR.latestAnswer(); + } + + function latestTimestamp() external view returns (uint256) { + return AGGREGATOR.latestTimestamp(); + } + + function latestRound() external view returns (uint256) { + return AGGREGATOR.latestRound(); + } + + function getAnswer(uint256 roundId) external view returns (int256) { + return AGGREGATOR.getAnswer(roundId); + } + + function getTimestamp(uint256 roundId) external view returns (uint256) { + return AGGREGATOR.getTimestamp(roundId); + } + + function decimals() external view returns (uint8) { + return AGGREGATOR.decimals(); + } + + function description() external view returns (string memory) { + return AGGREGATOR.description(); + } + + function version() external view returns (uint256) { + return AGGREGATOR.version(); + } + + function getRoundData( + uint80 _roundId + ) + external + view + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return AGGREGATOR.getRoundData(_roundId); + } + + function latestRoundData() + external + view + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return AGGREGATOR.latestRoundData(); + } +} diff --git a/contracts/src/v0.8/tests/FlagsTestHelper.sol b/contracts/src/v0.8/tests/FlagsTestHelper.sol new file mode 100644 index 00000000..3e35cae8 --- /dev/null +++ b/contracts/src/v0.8/tests/FlagsTestHelper.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../Flags.sol"; + +contract FlagsTestHelper { + Flags public flags; + + constructor(address flagsContract) { + flags = Flags(flagsContract); + } + + function getFlag(address subject) external view returns (bool) { + return flags.getFlag(subject); + } + + function getFlags(address[] calldata subjects) external view returns (bool[] memory) { + return flags.getFlags(subjects); + } +} diff --git a/contracts/src/v0.8/tests/Greeter.sol b/contracts/src/v0.8/tests/Greeter.sol new file mode 100644 index 00000000..88ccca56 --- /dev/null +++ b/contracts/src/v0.8/tests/Greeter.sol @@ -0,0 +1,18 @@ +pragma solidity ^0.8.0; + +import "../shared/access/ConfirmedOwner.sol"; + +contract Greeter is ConfirmedOwner { + string public greeting; + + constructor(address owner) ConfirmedOwner(owner) {} + + function setGreeting(string calldata _greeting) external onlyOwner { + require(bytes(_greeting).length > 0, "Invalid greeting length"); + greeting = _greeting; + } + + function triggerRevert() external pure { + require(false, "Greeter: revert triggered"); + } +} diff --git a/contracts/src/v0.8/tests/KeeperCompatibleTestHelper.sol b/contracts/src/v0.8/tests/KeeperCompatibleTestHelper.sol new file mode 100644 index 00000000..2e931c4f --- /dev/null +++ b/contracts/src/v0.8/tests/KeeperCompatibleTestHelper.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../automation/KeeperCompatible.sol"; + +contract KeeperCompatibleTestHelper is KeeperCompatible { + function checkUpkeep(bytes calldata) external override returns (bool, bytes memory) {} + + function performUpkeep(bytes calldata) external override {} + + function verifyCannotExecute() public view cannotExecute {} +} diff --git a/contracts/src/v0.8/tests/LogEmitter.sol b/contracts/src/v0.8/tests/LogEmitter.sol new file mode 100644 index 00000000..37306cc2 --- /dev/null +++ b/contracts/src/v0.8/tests/LogEmitter.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract LogEmitter { + event Log1(uint256); + event Log2(uint256 indexed); + event Log3(string); + event Log4(uint256 indexed, uint256 indexed); + + function EmitLog1(uint256[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log1(v[i]); + } + } + + function EmitLog2(uint256[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log2(v[i]); + } + } + + function EmitLog3(string[] memory v) public { + for (uint256 i = 0; i < v.length; i++) { + emit Log3(v[i]); + } + } + + function EmitLog4(uint256 v, uint256 w, uint256 c) public { + for (uint256 i = 0; i < c; i++) { + emit Log4(v, w); + } + } +} diff --git a/contracts/src/v0.8/tests/MockArbGasInfo.sol b/contracts/src/v0.8/tests/MockArbGasInfo.sol new file mode 100644 index 00000000..f85e0284 --- /dev/null +++ b/contracts/src/v0.8/tests/MockArbGasInfo.sol @@ -0,0 +1,11 @@ +pragma solidity 0.8.6; + +contract MockArbGasInfo { + function getCurrentTxL1GasFees() external view returns (uint256) { + return 1000000; + } + + function getPricesInWei() external view returns (uint256, uint256, uint256, uint256, uint256, uint256) { + return (0, 1000, 0, 0, 0, 0); + } +} diff --git a/contracts/src/v0.8/tests/MockArbitrumInbox.sol b/contracts/src/v0.8/tests/MockArbitrumInbox.sol new file mode 100644 index 00000000..445a361b --- /dev/null +++ b/contracts/src/v0.8/tests/MockArbitrumInbox.sol @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import {IInbox} from "../vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol"; +import {IBridge} from "../vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IBridge.sol"; + +contract MockArbitrumInbox is IInbox { + event RetryableTicketNoRefundAliasRewriteCreated( + address destAddr, + uint256 arbTxCallValue, + uint256 maxSubmissionCost, + address submissionRefundAddress, + address valueRefundAddress, + uint256 maxGas, + uint256 gasPriceBid, + bytes data + ); + + function sendL2Message(bytes calldata /* messageData */) external pure override returns (uint256) { + return 0; + } + + function sendUnsignedTransaction( + uint256 /* maxGas */, + uint256 /* gasPriceBid */, + uint256 /* nonce */, + address /* destAddr */, + uint256 /* amount */, + bytes calldata /* data */ + ) external pure override returns (uint256) { + return 0; + } + + function sendContractTransaction( + uint256 /* maxGas */, + uint256 /* gasPriceBid */, + address /* destAddr */, + uint256 /* amount */, + bytes calldata /* data */ + ) external pure override returns (uint256) { + return 0; + } + + function sendL1FundedUnsignedTransaction( + uint256 /* maxGas */, + uint256 /* gasPriceBid */, + uint256 /* nonce */, + address /* destAddr */, + bytes calldata /* data */ + ) external payable override returns (uint256) { + return 0; + } + + function sendL1FundedContractTransaction( + uint256 /* maxGas */, + uint256 /* gasPriceBid */, + address /* destAddr */, + bytes calldata /* data */ + ) external payable override returns (uint256) { + return 0; + } + + function createRetryableTicketNoRefundAliasRewrite( + address destAddr, + uint256 arbTxCallValue, + uint256 maxSubmissionCost, + address submissionRefundAddress, + address valueRefundAddress, + uint256 maxGas, + uint256 gasPriceBid, + bytes calldata data + ) external payable override returns (uint256) { + emit RetryableTicketNoRefundAliasRewriteCreated( + destAddr, + arbTxCallValue, + maxSubmissionCost, + submissionRefundAddress, + valueRefundAddress, + maxGas, + gasPriceBid, + data + ); + return 42; + } + + function createRetryableTicket( + address /* destAddr */, + uint256 /* arbTxCallValue */, + uint256 /* maxSubmissionCost */, + address /* submissionRefundAddress */, + address /* valueRefundAddress */, + uint256 /* maxGas */, + uint256 /* gasPriceBid */, + bytes calldata /* data */ + ) external payable override returns (uint256) { + return 0; + } + + function depositEth(address /* destAddr */) external payable override returns (uint256) { + return 0; + } + + function depositEthRetryable( + address /* destAddr */, + uint256 /* maxSubmissionCost */, + uint256 /* maxGas */, + uint256 /* maxGasPrice */ + ) external payable override returns (uint256) { + return 0; + } + + function bridge() external pure override returns (IBridge) { + return IBridge(address(0)); + } + + /// @notice This mock function simply replicates the formula used by Arbitrum's + /// DelayedInbox in the Nitro upgrade. The function has been copied here from the Arbitrum + /// team's repository. + /// @param dataLength The length of the calldata that will be executed in L2 + /// @param baseFee The base fee to pay for the transaction. + /// @dev The calculation will use the L1 base fee if it is passed 0. + function calculateRetryableSubmissionFee(uint256 dataLength, uint256 baseFee) public view returns (uint256) { + return (1400 + 6 * dataLength) * (baseFee == 0 ? block.basefee : baseFee); + } +} diff --git a/contracts/src/v0.8/tests/MockETHLINKAggregator.sol b/contracts/src/v0.8/tests/MockETHLINKAggregator.sol new file mode 100644 index 00000000..a0da0363 --- /dev/null +++ b/contracts/src/v0.8/tests/MockETHLINKAggregator.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../shared/interfaces/AggregatorV3Interface.sol"; + +contract MockETHPLIAggregator is AggregatorV3Interface { + int256 public answer; + + constructor(int256 _answer) public { + answer = _answer; + } + + function decimals() external view override returns (uint8) { + return 18; + } + + function description() external view override returns (string memory) { + return "MockETHPLIAggregator"; + } + + function version() external view override returns (uint256) { + return 1; + } + + function getRoundData( + uint80 _roundId + ) + external + view + override + returns (uint80 roundId, int256 ans, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (1, answer, block.timestamp, block.timestamp, 1); + } + + function latestRoundData() + external + view + override + returns (uint80 roundId, int256 ans, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (1, answer, block.timestamp, block.timestamp, 1); + } +} diff --git a/contracts/src/v0.8/tests/MockOVMGasPriceOracle.sol b/contracts/src/v0.8/tests/MockOVMGasPriceOracle.sol new file mode 100644 index 00000000..29790b0e --- /dev/null +++ b/contracts/src/v0.8/tests/MockOVMGasPriceOracle.sol @@ -0,0 +1,7 @@ +pragma solidity 0.8.6; + +contract MockOVMGasPriceOracle { + function getL1Fee(bytes memory _data) public view returns (uint256) { + return 2000000; + } +} diff --git a/contracts/src/v0.8/tests/MockOptimismL1CrossDomainMessenger.sol b/contracts/src/v0.8/tests/MockOptimismL1CrossDomainMessenger.sol new file mode 100644 index 00000000..a92ff8fb --- /dev/null +++ b/contracts/src/v0.8/tests/MockOptimismL1CrossDomainMessenger.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +/* Interface Imports */ +import {IL1CrossDomainMessenger} from "@eth-optimism/contracts/L1/messaging/IL1CrossDomainMessenger.sol"; + +contract MockOptimismL1CrossDomainMessenger is IL1CrossDomainMessenger { + uint256 private s_nonce; + + // slither-disable-next-line external-function + function xDomainMessageSender() public pure returns (address) { + return address(0); + } + + function sendMessage(address _target, bytes memory _message, uint32 _gasLimit) public { + emit SentMessage(_target, msg.sender, _message, s_nonce, _gasLimit); + s_nonce++; + } + + /** + * Relays a cross domain message to a contract. + * @inheritdoc IL1CrossDomainMessenger + */ + // slither-disable-next-line external-function + function relayMessage( + address _target, + address _sender, + bytes memory _message, + uint256 _messageNonce, + L2MessageInclusionProof memory _proof + ) public {} + + function replayMessage( + address _target, + address _sender, + bytes memory _message, + uint256 _queueIndex, + uint32 _oldGasLimit, + uint32 _newGasLimit + ) public {} +} diff --git a/contracts/src/v0.8/tests/MockOptimismL2CrossDomainMessenger.sol b/contracts/src/v0.8/tests/MockOptimismL2CrossDomainMessenger.sol new file mode 100644 index 00000000..38ec3378 --- /dev/null +++ b/contracts/src/v0.8/tests/MockOptimismL2CrossDomainMessenger.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +/* Interface Imports */ +import {IL2CrossDomainMessenger} from "@eth-optimism/contracts/L2/messaging/IL2CrossDomainMessenger.sol"; + +contract MockOptimismL2CrossDomainMessenger is IL2CrossDomainMessenger { + uint256 private s_nonce; + address private s_sender; + + // slither-disable-next-line external-function + function xDomainMessageSender() public view returns (address) { + return s_sender; + } + + function setSender(address newSender) external { + s_sender = newSender; + } + + function sendMessage(address _target, bytes memory _message, uint32 _gasLimit) public { + emit SentMessage(_target, msg.sender, _message, s_nonce, _gasLimit); + s_nonce++; + } + + function relayMessage(address _target, address _sender, bytes memory _message, uint256 _messageNonce) external {} + + receive() external payable {} +} diff --git a/contracts/src/v0.8/tests/MockV3Aggregator.sol b/contracts/src/v0.8/tests/MockV3Aggregator.sol new file mode 100644 index 00000000..9822d23e --- /dev/null +++ b/contracts/src/v0.8/tests/MockV3Aggregator.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../shared/interfaces/AggregatorV2V3Interface.sol"; + +/** + * @title MockV3Aggregator + * @notice Based on the FluxAggregator contract + * @notice Use this contract when you need to test + * other contract's ability to read data from an + * aggregator contract, but how the aggregator got + * its answer is unimportant + */ +contract MockV3Aggregator is AggregatorV2V3Interface { + uint256 public constant override version = 0; + + uint8 public override decimals; + int256 public override latestAnswer; + uint256 public override latestTimestamp; + uint256 public override latestRound; + + mapping(uint256 => int256) public override getAnswer; + mapping(uint256 => uint256) public override getTimestamp; + mapping(uint256 => uint256) private getStartedAt; + + constructor(uint8 _decimals, int256 _initialAnswer) { + decimals = _decimals; + updateAnswer(_initialAnswer); + } + + function updateAnswer(int256 _answer) public { + latestAnswer = _answer; + latestTimestamp = block.timestamp; + latestRound++; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = block.timestamp; + getStartedAt[latestRound] = block.timestamp; + } + + function updateRoundData(uint80 _roundId, int256 _answer, uint256 _timestamp, uint256 _startedAt) public { + latestRound = _roundId; + latestAnswer = _answer; + latestTimestamp = _timestamp; + getAnswer[latestRound] = _answer; + getTimestamp[latestRound] = _timestamp; + getStartedAt[latestRound] = _startedAt; + } + + function getRoundData( + uint80 _roundId + ) + external + view + override + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (_roundId, getAnswer[_roundId], getStartedAt[_roundId], getTimestamp[_roundId], _roundId); + } + + function latestRoundData() + external + view + override + returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return ( + uint80(latestRound), + getAnswer[latestRound], + getStartedAt[latestRound], + getTimestamp[latestRound], + uint80(latestRound) + ); + } + + function description() external pure override returns (string memory) { + return "v0.8/tests/MockV3Aggregator.sol"; + } +} diff --git a/contracts/src/v0.8/tests/PluginClientTestHelper.sol b/contracts/src/v0.8/tests/PluginClientTestHelper.sol new file mode 100644 index 00000000..b1e4b302 --- /dev/null +++ b/contracts/src/v0.8/tests/PluginClientTestHelper.sol @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../PluginClient.sol"; + +contract PluginClientTestHelper is PluginClient { + constructor(address _link, address _oracle) { + _setPluginToken(_link); + _setPluginOracle(_oracle); + } + + event Request(bytes32 id, address callbackAddress, bytes4 callbackfunctionSelector, bytes data); + event LinkAmount(uint256 amount); + + function publicNewRequest(bytes32 _id, address _address, bytes memory _fulfillmentSignature) public { + Plugin.Request memory req = _buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + emit Request(req.id, req.callbackAddress, req.callbackFunctionId, req.buf.buf); + } + + function publicRequest(bytes32 _id, address _address, bytes memory _fulfillmentSignature, uint256 _wei) public { + Plugin.Request memory req = _buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + _sendPluginRequest(req, _wei); + } + + function publicRequestRunTo( + address _oracle, + bytes32 _id, + address _address, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory run = _buildPluginRequest(_id, _address, bytes4(keccak256(_fulfillmentSignature))); + _sendPluginRequestTo(_oracle, run, _wei); + } + + function publicRequestOracleData(bytes32 _id, bytes memory _fulfillmentSignature, uint256 _wei) public { + Plugin.Request memory req = _buildOperatorRequest(_id, bytes4(keccak256(_fulfillmentSignature))); + _sendOperatorRequest(req, _wei); + } + + function publicRequestOracleDataFrom( + address _oracle, + bytes32 _id, + bytes memory _fulfillmentSignature, + uint256 _wei + ) public { + Plugin.Request memory run = _buildOperatorRequest(_id, bytes4(keccak256(_fulfillmentSignature))); + _sendOperatorRequestTo(_oracle, run, _wei); + } + + function publicCancelRequest( + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) public { + _cancelPluginRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } + + function publicPluginToken() public view returns (address) { + return _pluginTokenAddress(); + } + + function publicFulfillPluginRequest(bytes32 _requestId, bytes32) public { + fulfillRequest(_requestId, bytes32(0)); + } + + function fulfillRequest(bytes32 _requestId, bytes32) public { + _validatePluginCallback(_requestId); + } + + function publicPLI(uint256 _amount) public { + emit LinkAmount(PLI_DIVISIBILITY * _amount); + } + + function publicOracleAddress() public view returns (address) { + return _pluginOracleAddress(); + } + + function publicAddExternalRequest(address _oracle, bytes32 _requestId) public { + _addPluginExternalRequest(_oracle, _requestId); + } +} diff --git a/contracts/src/v0.8/tests/PluginTestHelper.sol b/contracts/src/v0.8/tests/PluginTestHelper.sol new file mode 100644 index 00000000..3ecce1fe --- /dev/null +++ b/contracts/src/v0.8/tests/PluginTestHelper.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../Plugin.sol"; +import "../vendor/CBORPlugin.sol"; +import "../vendor/BufferPlugin.sol"; + +contract PluginTestHelper { + using Plugin for Plugin.Request; + using CBORPlugin for BufferPlugin.buffer; + + Plugin.Request private req; + + event RequestData(bytes payload); + + function closeEvent() public { + emit RequestData(req.buf.buf); + } + + function setBuffer(bytes memory data) public { + Plugin.Request memory r2 = req; + r2._setBuffer(data); + req = r2; + } + + function add(string memory _key, string memory _value) public { + Plugin.Request memory r2 = req; + r2._add(_key, _value); + req = r2; + } + + function addBytes(string memory _key, bytes memory _value) public { + Plugin.Request memory r2 = req; + r2._addBytes(_key, _value); + req = r2; + } + + function addInt(string memory _key, int256 _value) public { + Plugin.Request memory r2 = req; + r2._addInt(_key, _value); + req = r2; + } + + function addUint(string memory _key, uint256 _value) public { + Plugin.Request memory r2 = req; + r2._addUint(_key, _value); + req = r2; + } + + // Temporarily have method receive bytes32[] memory until experimental + // string[] memory can be invoked from truffle tests. + function addStringArray(string memory _key, string[] memory _values) public { + Plugin.Request memory r2 = req; + r2._addStringArray(_key, _values); + req = r2; + } +} diff --git a/contracts/src/v0.8/tests/ReceiveEmitter.sol b/contracts/src/v0.8/tests/ReceiveEmitter.sol new file mode 100644 index 00000000..641bf756 --- /dev/null +++ b/contracts/src/v0.8/tests/ReceiveEmitter.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +contract ReceiveEmitter { + event FundsReceived(uint256 amount, uint256 newBalance); + + receive() external payable { + emit FundsReceived(msg.value, address(this).balance); + } +} diff --git a/contracts/src/v0.8/tests/ReceiveFallbackEmitter.sol b/contracts/src/v0.8/tests/ReceiveFallbackEmitter.sol new file mode 100644 index 00000000..618787e9 --- /dev/null +++ b/contracts/src/v0.8/tests/ReceiveFallbackEmitter.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +contract ReceiveFallbackEmitter { + event FundsReceived(uint256 amount, uint256 newBalance); + + fallback() external payable { + emit FundsReceived(msg.value, address(this).balance); + } +} diff --git a/contracts/src/v0.8/tests/ReceiveReverter.sol b/contracts/src/v0.8/tests/ReceiveReverter.sol new file mode 100644 index 00000000..d9a2bdcc --- /dev/null +++ b/contracts/src/v0.8/tests/ReceiveReverter.sol @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +contract ReceiveReverter { + receive() external payable { + revert("Can't send funds"); + } +} diff --git a/contracts/src/v0.8/tests/StreamsLookupUpkeep.sol b/contracts/src/v0.8/tests/StreamsLookupUpkeep.sol new file mode 100644 index 00000000..05882377 --- /dev/null +++ b/contracts/src/v0.8/tests/StreamsLookupUpkeep.sol @@ -0,0 +1,151 @@ +pragma solidity 0.8.16; + +import "../automation/interfaces/AutomationCompatibleInterface.sol"; +import "../automation/interfaces/StreamsLookupCompatibleInterface.sol"; +import {ArbSys} from "../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; + +interface IVerifierProxy { + /** + * @notice Verifies that the data encoded has been signed + * correctly by routing to the correct verifier. + * @param signedReport The encoded data to be verified. + * @return verifierResponse The encoded response from the verifier. + */ + function verify(bytes memory signedReport) external returns (bytes memory verifierResponse); +} + +contract StreamsLookupUpkeep is AutomationCompatibleInterface, StreamsLookupCompatibleInterface { + event MercuryPerformEvent(address indexed sender, uint256 indexed blockNumber, bytes v0, bytes verifiedV0, bytes ed); + + ArbSys internal constant ARB_SYS = ArbSys(0x0000000000000000000000000000000000000064); + // keep these in sync with verifier proxy in RDD + IVerifierProxy internal constant PRODUCTION_TESTNET_VERIFIER_PROXY = + IVerifierProxy(0x09DFf56A4fF44e0f4436260A04F5CFa65636A481); + IVerifierProxy internal constant STAGING_TESTNET_VERIFIER_PROXY = + IVerifierProxy(0x60448B880c9f3B501af3f343DA9284148BD7D77C); + + uint256 public testRange; + uint256 public interval; + uint256 public previousPerformBlock; + uint256 public initialBlock; + uint256 public counter; + string[] public feeds; + string public feedParamKey; + string public timeParamKey; + bool public immutable useArbBlock; + bool public staging; + bool public verify; + bool public shouldRevertCallback; + bool public callbackReturnBool; + + constructor(uint256 _testRange, uint256 _interval, bool _useArbBlock, bool _staging, bool _verify) { + testRange = _testRange; + interval = _interval; + previousPerformBlock = 0; + initialBlock = 0; + counter = 0; + useArbBlock = _useArbBlock; + feedParamKey = "feedIDs"; // feedIDs for v0.3 + timeParamKey = "timestamp"; // timestamp + // search feeds in notion: "Schema and Feed ID Registry" + feeds = [ + //"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", // ETH / USD in production testnet v0.2 + //"0x4254432d5553442d415242495452554d2d544553544e45540000000000000000" // BTC / USD in production testnet v0.2 + "0x00028c915d6af0fd66bba2d0fc9405226bca8d6806333121a7d9832103d1563c" // ETH / USD in staging testnet v0.3 + ]; + staging = _staging; + verify = _verify; + callbackReturnBool = true; + } + + function setParamKeys(string memory _feedParamKey, string memory _timeParamKey) external { + feedParamKey = _feedParamKey; + timeParamKey = _timeParamKey; + } + + function setFeeds(string[] memory _feeds) external { + feeds = _feeds; + } + + function setShouldRevertCallback(bool value) public { + shouldRevertCallback = value; + } + + function setCallbackReturnBool(bool value) public { + callbackReturnBool = value; + } + + function reset() public { + previousPerformBlock = 0; + initialBlock = 0; + counter = 0; + } + + function checkCallback(bytes[] memory values, bytes memory extraData) external view returns (bool, bytes memory) { + require(!shouldRevertCallback, "shouldRevertCallback is true"); + // do sth about the pluginBlob data in values and extraData + bytes memory performData = abi.encode(values, extraData); + return (callbackReturnBool, performData); + } + + function checkUpkeep(bytes calldata data) external view returns (bool, bytes memory) { + if (!eligible()) { + return (false, data); + } + uint256 timeParam; + if (keccak256(abi.encodePacked(feedParamKey)) == keccak256(abi.encodePacked("feedIdHex"))) { + if (useArbBlock) { + timeParam = ARB_SYS.arbBlockNumber(); + } else { + timeParam = block.number; + } + } else { + // assume this will be feedIDs for v0.3 + timeParam = block.timestamp; + } + + // encode ARB_SYS as extraData to verify that it is provided to checkCallback correctly. + // in reality, this can be any data or empty + revert StreamsLookup(feedParamKey, feeds, timeParamKey, timeParam, abi.encodePacked(address(ARB_SYS))); + } + + function performUpkeep(bytes calldata performData) external { + uint256 blockNumber; + if (useArbBlock) { + blockNumber = ARB_SYS.arbBlockNumber(); + } else { + blockNumber = block.number; + } + if (initialBlock == 0) { + initialBlock = blockNumber; + } + (bytes[] memory values, bytes memory extraData) = abi.decode(performData, (bytes[], bytes)); + previousPerformBlock = blockNumber; + counter = counter + 1; + + bytes memory v0 = ""; + bytes memory v1 = ""; + if (verify) { + if (staging) { + v0 = STAGING_TESTNET_VERIFIER_PROXY.verify(values[0]); + } else { + v0 = PRODUCTION_TESTNET_VERIFIER_PROXY.verify(values[0]); + } + } + emit MercuryPerformEvent(msg.sender, blockNumber, values[0], v0, extraData); + } + + function eligible() public view returns (bool) { + if (initialBlock == 0) { + return true; + } + + uint256 blockNumber; + if (useArbBlock) { + blockNumber = ARB_SYS.arbBlockNumber(); + } else { + blockNumber = block.number; + } + return (blockNumber - initialBlock) < testRange && (blockNumber - previousPerformBlock) >= interval; + } +} diff --git a/contracts/src/v0.8/tests/VRFLogEmitter.sol b/contracts/src/v0.8/tests/VRFLogEmitter.sol new file mode 100644 index 00000000..18b99605 --- /dev/null +++ b/contracts/src/v0.8/tests/VRFLogEmitter.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract VRFLogEmitter { + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint64 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address indexed sender + ); + event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bool success); + + function emitRandomWordsRequested( + bytes32 keyHash, + uint256 requestId, + uint256 preSeed, + uint64 subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address sender + ) public { + emit RandomWordsRequested( + keyHash, + requestId, + preSeed, + subId, + minimumRequestConfirmations, + callbackGasLimit, + numWords, + sender + ); + } + + function emitRandomWordsFulfilled(uint256 requestId, uint256 outputSeed, uint96 payment, bool success) public { + emit RandomWordsFulfilled(requestId, outputSeed, payment, success); + } +} diff --git a/contracts/src/v0.8/tests/VerifiableLoadBase.sol b/contracts/src/v0.8/tests/VerifiableLoadBase.sol new file mode 100644 index 00000000..6725650d --- /dev/null +++ b/contracts/src/v0.8/tests/VerifiableLoadBase.sol @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.16; + +import "../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import "../automation/interfaces/v2_1/IKeeperRegistryMaster.sol"; +import {ArbSys} from "../vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import "../automation/v2_1/AutomationRegistrar2_1.sol"; +import {LogTriggerConfig} from "../automation/v2_1/AutomationUtils2_1.sol"; + +abstract contract VerifiableLoadBase is ConfirmedOwner { + error IndexOutOfRange(); + event LogEmitted(uint256 indexed upkeepId, uint256 indexed blockNum, address indexed addr); + event LogEmittedAgain(uint256 indexed upkeepId, uint256 indexed blockNum, address indexed addr); + event UpkeepTopUp(uint256 upkeepId, uint96 amount, uint256 blockNum); + + using EnumerableSet for EnumerableSet.UintSet; + ArbSys internal constant ARB_SYS = ArbSys(0x0000000000000000000000000000000000000064); + //bytes32 public constant emittedSig = 0x97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf08; //keccak256(LogEmitted(uint256,uint256,address)) + bytes32 public immutable emittedSig = LogEmitted.selector; + // bytes32 public constant emittedAgainSig = 0xc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d; //keccak256(LogEmittedAgain(uint256,uint256,address)) + bytes32 public immutable emittedAgainSig = LogEmittedAgain.selector; + + mapping(uint256 => uint256) public lastTopUpBlocks; + mapping(uint256 => uint256) public intervals; + mapping(uint256 => uint256) public previousPerformBlocks; + mapping(uint256 => uint256) public firstPerformBlocks; + mapping(uint256 => uint256) public counters; + mapping(uint256 => uint256) public performGasToBurns; + mapping(uint256 => uint256) public checkGasToBurns; + mapping(uint256 => uint256) public performDataSizes; + mapping(uint256 => uint256) public gasLimits; + mapping(bytes32 => bool) public dummyMap; // used to force storage lookup + mapping(uint256 => uint256[]) public delays; // how to query for delays for a certain past period: calendar day and/or past 24 hours + + mapping(uint256 => mapping(uint16 => uint256[])) public bucketedDelays; + mapping(uint256 => uint16) public buckets; + EnumerableSet.UintSet internal s_upkeepIDs; + AutomationRegistrar2_1 public registrar; + LinkTokenInterface public linkToken; + IKeeperRegistryMaster public registry; + // check if an upkeep is eligible for adding funds at this interval + uint256 public upkeepTopUpCheckInterval = 5; + // an upkeep will get this amount of PLI for every top up + uint96 public addLinkAmount = 200000000000000000; // 0.2 PLI + // if an upkeep's balance is less than this threshold * min balance, this upkeep is eligible for adding funds + uint8 public minBalanceThresholdMultiplier = 20; + // if this contract is using arbitrum block number + bool public immutable useArbitrumBlockNum; + + // the following fields are immutable bc if they are adjusted, the existing upkeeps' delays will be stored in + // different sizes of buckets. it's better to redeploy this contract with new values. + uint16 public immutable BUCKET_SIZE = 100; + + // search feeds in notion: "Schema and Feed ID Registry" + string[] public feedsHex = [ + "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000" + ]; + string public feedParamKey = "feedIdHex"; // feedIDs for v0.3 + string public timeParamKey = "blockNumber"; // timestamp for v0.3 + + /** + * @param _registrar a automation registrar 2.1 address + * @param _useArb if this contract will use arbitrum block number + */ + constructor(AutomationRegistrar2_1 _registrar, bool _useArb) ConfirmedOwner(msg.sender) { + registrar = _registrar; + (address registryAddress, ) = registrar.getConfig(); + registry = IKeeperRegistryMaster(payable(address(registryAddress))); + linkToken = registrar.PLI(); + useArbitrumBlockNum = _useArb; + } + + receive() external payable {} + + function setParamKeys(string memory _feedParamKey, string memory _timeParamKey) external { + feedParamKey = _feedParamKey; + timeParamKey = _timeParamKey; + } + + function setFeeds(string[] memory _feeds) external { + feedsHex = _feeds; + } + + /** + * @notice withdraws PLIs from this contract to msg sender when testing is finished. + */ + function withdrawLinks() external onlyOwner { + uint256 balance = linkToken.balanceOf(address(this)); + linkToken.transfer(msg.sender, balance); + } + + function getBlockNumber() internal view returns (uint256) { + if (useArbitrumBlockNum) { + return ARB_SYS.arbBlockNumber(); + } else { + return block.number; + } + } + + /** + * @notice sets registrar, registry, and link token address. + * @param newRegistrar the new registrar address + */ + function setConfig(AutomationRegistrar2_1 newRegistrar) external { + registrar = newRegistrar; + (address registryAddress, ) = registrar.getConfig(); + registry = IKeeperRegistryMaster(payable(address(registryAddress))); + linkToken = registrar.PLI(); + } + + /** + * @notice gets an array of active upkeep IDs. + * @param startIndex the start index of upkeep IDs + * @param maxCount the max number of upkeep IDs requested + * @return an array of active upkeep IDs + */ + function getActiveUpkeepIDsDeployedByThisContract( + uint256 startIndex, + uint256 maxCount + ) external view returns (uint256[] memory) { + uint256 maxIdx = s_upkeepIDs.length(); + if (startIndex >= maxIdx) revert IndexOutOfRange(); + if (maxCount == 0) { + maxCount = maxIdx - startIndex; + } + uint256[] memory ids = new uint256[](maxCount); + for (uint256 idx = 0; idx < maxCount; idx++) { + ids[idx] = s_upkeepIDs.at(startIndex + idx); + } + return ids; + } + + /** + * @notice gets an array of active upkeep IDs. + * @param startIndex the start index of upkeep IDs + * @param maxCount the max number of upkeep IDs requested + * @return an array of active upkeep IDs + */ + function getAllActiveUpkeepIDsOnRegistry( + uint256 startIndex, + uint256 maxCount + ) external view returns (uint256[] memory) { + return registry.getActiveUpkeepIDs(startIndex, maxCount); + } + + /** + * @notice register an upkeep via the registrar. + * @param params a registration params struct + * @return an upkeep ID + */ + function _registerUpkeep(AutomationRegistrar2_1.RegistrationParams memory params) private returns (uint256) { + uint256 upkeepId = registrar.registerUpkeep(params); + s_upkeepIDs.add(upkeepId); + gasLimits[upkeepId] = params.gasLimit; + return upkeepId; + } + + /** + * @notice returns a log trigger config + */ + function getLogTriggerConfig( + address addr, + uint8 selector, + bytes32 topic0, + bytes32 topic1, + bytes32 topic2, + bytes32 topic3 + ) external view returns (bytes memory logTrigger) { + LogTriggerConfig memory cfg = LogTriggerConfig({ + contractAddress: addr, + filterSelector: selector, + topic0: topic0, + topic1: topic1, + topic2: topic2, + topic3: topic3 + }); + return abi.encode(cfg); + } + + // this function sets pipeline data and trigger config for log trigger upkeeps + function batchPreparingUpkeeps( + uint256[] calldata upkeepIds, + uint8 selector, + bytes32 topic0, + bytes32 topic1, + bytes32 topic2, + bytes32 topic3 + ) external { + uint256 len = upkeepIds.length; + for (uint256 i = 0; i < len; i++) { + uint256 upkeepId = upkeepIds[i]; + + this.updateUpkeepPipelineData(upkeepId, abi.encode(upkeepId)); + + uint8 triggerType = registry.getTriggerType(upkeepId); + if (triggerType == 1) { + // currently no using a filter selector + bytes memory triggerCfg = this.getLogTriggerConfig(address(this), selector, topic0, topic2, topic2, topic3); + registry.setUpkeepTriggerConfig(upkeepId, triggerCfg); + } + } + } + + // this function sets pipeline data and trigger config for log trigger upkeeps + function batchPreparingUpkeepsSimple(uint256[] calldata upkeepIds, uint8 log, uint8 selector) external { + uint256 len = upkeepIds.length; + for (uint256 i = 0; i < len; i++) { + uint256 upkeepId = upkeepIds[i]; + + this.updateUpkeepPipelineData(upkeepId, abi.encode(upkeepId)); + + uint8 triggerType = registry.getTriggerType(upkeepId); + if (triggerType == 1) { + // currently no using a filter selector + bytes32 sig = emittedSig; + if (log != 0) { + sig = emittedAgainSig; + } + bytes memory triggerCfg = this.getLogTriggerConfig( + address(this), + selector, + sig, + bytes32(abi.encode(upkeepId)), + bytes32(0), + bytes32(0) + ); + registry.setUpkeepTriggerConfig(upkeepId, triggerCfg); + } + } + } + + function updateLogTriggerConfig1( + uint256 upkeepId, + address addr, + uint8 selector, + bytes32 topic0, + bytes32 topic1, + bytes32 topic2, + bytes32 topic3 + ) external { + registry.setUpkeepTriggerConfig(upkeepId, this.getLogTriggerConfig(addr, selector, topic0, topic1, topic2, topic3)); + } + + function updateLogTriggerConfig2(uint256 upkeepId, bytes calldata cfg) external { + registry.setUpkeepTriggerConfig(upkeepId, cfg); + } + + /** + * @notice batch registering upkeeps. + * @param number the number of upkeeps to be registered + * @param gasLimit the gas limit of each upkeep + * @param triggerType the trigger type of this upkeep, 0 for conditional, 1 for log trigger + * @param triggerConfig the trigger config of this upkeep + * @param amount the amount of PLI to fund each upkeep + * @param checkGasToBurn the amount of check gas to burn + * @param performGasToBurn the amount of perform gas to burn + */ + function batchRegisterUpkeeps( + uint8 number, + uint32 gasLimit, + uint8 triggerType, + bytes memory triggerConfig, + uint96 amount, + uint256 checkGasToBurn, + uint256 performGasToBurn + ) external { + AutomationRegistrar2_1.RegistrationParams memory params = AutomationRegistrar2_1.RegistrationParams({ + name: "test", + encryptedEmail: bytes(""), + upkeepContract: address(this), + gasLimit: gasLimit, + adminAddress: address(this), // use address of this contract as the admin + triggerType: triggerType, + checkData: bytes(""), // update pipeline data later bc upkeep id is not available now + triggerConfig: triggerConfig, + offchainConfig: bytes(""), + amount: amount + }); + + linkToken.approve(address(registrar), amount * number); + + uint256[] memory upkeepIds = new uint256[](number); + for (uint8 i = 0; i < number; i++) { + uint256 upkeepId = _registerUpkeep(params); + upkeepIds[i] = upkeepId; + checkGasToBurns[upkeepId] = checkGasToBurn; + performGasToBurns[upkeepId] = performGasToBurn; + } + } + + function topUpFund(uint256 upkeepId, uint256 blockNum) public { + if (blockNum - lastTopUpBlocks[upkeepId] > upkeepTopUpCheckInterval) { + KeeperRegistryBase2_1.UpkeepInfo memory info = registry.getUpkeep(upkeepId); + uint96 minBalance = registry.getMinBalanceForUpkeep(upkeepId); + if (info.balance < minBalanceThresholdMultiplier * minBalance) { + addFunds(upkeepId, addLinkAmount); + lastTopUpBlocks[upkeepId] = blockNum; + emit UpkeepTopUp(upkeepId, addLinkAmount, blockNum); + } + } + } + + function getMinBalanceForUpkeep(uint256 upkeepId) external view returns (uint96) { + return registry.getMinBalanceForUpkeep(upkeepId); + } + + function getForwarder(uint256 upkeepID) external view returns (address) { + return registry.getForwarder(upkeepID); + } + + function getBalance(uint256 id) external view returns (uint96 balance) { + return registry.getBalance(id); + } + + function getTriggerType(uint256 upkeepId) external view returns (uint8) { + return registry.getTriggerType(upkeepId); + } + + function burnPerformGas(uint256 upkeepId, uint256 startGas, uint256 blockNum) public { + uint256 performGasToBurn = performGasToBurns[upkeepId]; + while (startGas - gasleft() + 10000 < performGasToBurn) { + dummyMap[blockhash(blockNum)] = false; + } + } + + /** + * @notice adds fund for an upkeep. + * @param upkeepId the upkeep ID + * @param amount the amount of PLI to be funded for the upkeep + */ + function addFunds(uint256 upkeepId, uint96 amount) public { + linkToken.approve(address(registry), amount); + registry.addFunds(upkeepId, amount); + } + + /** + * @notice updates pipeline data for an upkeep. In order for the upkeep to be performed, the pipeline data must be the abi encoded upkeep ID. + * @param upkeepId the upkeep ID + * @param pipelineData the new pipeline data for the upkeep + */ + function updateUpkeepPipelineData(uint256 upkeepId, bytes calldata pipelineData) external { + registry.setUpkeepCheckData(upkeepId, pipelineData); + } + + function withdrawLinks(uint256 upkeepId) external { + registry.withdrawFunds(upkeepId, address(this)); + } + + function batchWithdrawLinks(uint256[] calldata upkeepIds) external { + uint256 len = upkeepIds.length; + for (uint32 i = 0; i < len; i++) { + this.withdrawLinks(upkeepIds[i]); + } + } + + /** + * @notice batch canceling upkeeps. + * @param upkeepIds an array of upkeep IDs + */ + function batchCancelUpkeeps(uint256[] calldata upkeepIds) external { + uint256 len = upkeepIds.length; + for (uint8 i = 0; i < len; i++) { + registry.cancelUpkeep(upkeepIds[i]); + s_upkeepIDs.remove(upkeepIds[i]); + } + } + + function eligible(uint256 upkeepId) public view returns (bool) { + if (firstPerformBlocks[upkeepId] == 0) { + return true; + } + return (getBlockNumber() - previousPerformBlocks[upkeepId]) >= intervals[upkeepId]; + } + + // /** + // * @notice set a new add PLI amount. + // * @param amount the new value + // */ + // function setAddLinkAmount(uint96 amount) external { + // addLinkAmount = amount; + // } + // + // function setUpkeepTopUpCheckInterval(uint256 newInterval) external { + // upkeepTopUpCheckInterval = newInterval; + // } + // + // function setMinBalanceThresholdMultiplier(uint8 newMinBalanceThresholdMultiplier) external { + // minBalanceThresholdMultiplier = newMinBalanceThresholdMultiplier; + // } + + // function setPerformGasToBurn(uint256 upkeepId, uint256 value) public { + // performGasToBurns[upkeepId] = value; + // } + // + // function setCheckGasToBurn(uint256 upkeepId, uint256 value) public { + // checkGasToBurns[upkeepId] = value; + // } + + function setPerformDataSize(uint256 upkeepId, uint256 value) public { + performDataSizes[upkeepId] = value; + } + + function setUpkeepGasLimit(uint256 upkeepId, uint32 gasLimit) public { + registry.setUpkeepGasLimit(upkeepId, gasLimit); + gasLimits[upkeepId] = gasLimit; + } + + function setInterval(uint256 upkeepId, uint256 _interval) external { + intervals[upkeepId] = _interval; + firstPerformBlocks[upkeepId] = 0; + counters[upkeepId] = 0; + + delete delays[upkeepId]; + uint16 currentBucket = buckets[upkeepId]; + for (uint16 i = 0; i <= currentBucket; i++) { + delete bucketedDelays[upkeepId][i]; + } + delete buckets[upkeepId]; + } + + /** + * @notice batch setting intervals for an array of upkeeps. + * @param upkeepIds an array of upkeep IDs + * @param interval a new interval + */ + function batchSetIntervals(uint256[] calldata upkeepIds, uint32 interval) external { + uint256 len = upkeepIds.length; + for (uint256 i = 0; i < len; i++) { + this.setInterval(upkeepIds[i], interval); + } + } + + /** + * @notice batch updating pipeline data for all upkeeps. + * @param upkeepIds an array of upkeep IDs + */ + function batchUpdatePipelineData(uint256[] calldata upkeepIds) external { + uint256 len = upkeepIds.length; + for (uint256 i = 0; i < len; i++) { + uint256 upkeepId = upkeepIds[i]; + this.updateUpkeepPipelineData(upkeepId, abi.encode(upkeepId)); + } + } + + /** + * @notice finds all log trigger upkeeps and emits logs to serve as the initial trigger for upkeeps + */ + function batchSendLogs(uint8 log) external { + uint256[] memory upkeepIds = this.getActiveUpkeepIDsDeployedByThisContract(0, 0); + uint256 len = upkeepIds.length; + uint256 blockNum = getBlockNumber(); + for (uint256 i = 0; i < len; i++) { + uint256 upkeepId = upkeepIds[i]; + uint8 triggerType = registry.getTriggerType(upkeepId); + if (triggerType == 1) { + if (log == 0) { + emit LogEmitted(upkeepId, blockNum, address(this)); + } else { + emit LogEmittedAgain(upkeepId, blockNum, address(this)); + } + } + } + } + + function getUpkeepInfo(uint256 upkeepId) public view returns (KeeperRegistryBase2_1.UpkeepInfo memory) { + return registry.getUpkeep(upkeepId); + } + + function getUpkeepTriggerConfig(uint256 upkeepId) public view returns (bytes memory) { + return registry.getUpkeepTriggerConfig(upkeepId); + } + + function getUpkeepPrivilegeConfig(uint256 upkeepId) public view returns (bytes memory) { + return registry.getUpkeepPrivilegeConfig(upkeepId); + } + + function setUpkeepPrivilegeConfig(uint256 upkeepId, bytes memory cfg) external { + registry.setUpkeepPrivilegeConfig(upkeepId, cfg); + } + + function sendLog(uint256 upkeepId, uint8 log) external { + uint256 blockNum = getBlockNumber(); + if (log == 0) { + emit LogEmitted(upkeepId, blockNum, address(this)); + } else { + emit LogEmittedAgain(upkeepId, blockNum, address(this)); + } + } + + function getDelaysLength(uint256 upkeepId) public view returns (uint256) { + return delays[upkeepId].length; + } + + function getBucketedDelaysLength(uint256 upkeepId) public view returns (uint256) { + uint16 currentBucket = buckets[upkeepId]; + uint256 len = 0; + for (uint16 i = 0; i <= currentBucket; i++) { + len += bucketedDelays[upkeepId][i].length; + } + return len; + } + + function getDelays(uint256 upkeepId) public view returns (uint256[] memory) { + return delays[upkeepId]; + } + + function getBucketedDelays(uint256 upkeepId, uint16 bucket) public view returns (uint256[] memory) { + return bucketedDelays[upkeepId][bucket]; + } + + function getSumDelayLastNPerforms(uint256 upkeepId, uint256 n) public view returns (uint256, uint256) { + uint256[] memory delays = delays[upkeepId]; + return getSumDelayLastNPerforms(delays, n); + } + + function getSumDelayInBucket(uint256 upkeepId, uint16 bucket) public view returns (uint256, uint256) { + uint256[] memory delays = bucketedDelays[upkeepId][bucket]; + return getSumDelayLastNPerforms(delays, delays.length); + } + + function getSumDelayLastNPerforms(uint256[] memory delays, uint256 n) internal view returns (uint256, uint256) { + uint256 i; + uint256 len = delays.length; + if (n == 0 || n >= len) { + n = len; + } + uint256 sum = 0; + + for (i = 0; i < n; i++) sum = sum + delays[len - i - 1]; + return (sum, n); + } + + function getPxDelayLastNPerforms(uint256 upkeepId, uint256 p, uint256 n) public view returns (uint256) { + return getPxDelayLastNPerforms(delays[upkeepId], p, n); + } + + function getPxDelayLastNPerforms(uint256[] memory delays, uint256 p, uint256 n) internal view returns (uint256) { + uint256 i; + uint256 len = delays.length; + if (n == 0 || n >= len) { + n = len; + } + uint256[] memory subArr = new uint256[](n); + + for (i = 0; i < n; i++) subArr[i] = (delays[len - i - 1]); + quickSort(subArr, int256(0), int256(subArr.length - 1)); + + if (p == 100) { + return subArr[subArr.length - 1]; + } + return subArr[(p * subArr.length) / 100]; + } + + function quickSort(uint256[] memory arr, int256 left, int256 right) private pure { + int256 i = left; + int256 j = right; + if (i == j) return; + uint256 pivot = arr[uint256(left + (right - left) / 2)]; + while (i <= j) { + while (arr[uint256(i)] < pivot) i++; + while (pivot < arr[uint256(j)]) j--; + if (i <= j) { + (arr[uint256(i)], arr[uint256(j)]) = (arr[uint256(j)], arr[uint256(i)]); + i++; + j--; + } + } + if (left < j) quickSort(arr, left, j); + if (i < right) quickSort(arr, i, right); + } +} diff --git a/contracts/src/v0.8/tests/VerifiableLoadLogTriggerUpkeep.sol b/contracts/src/v0.8/tests/VerifiableLoadLogTriggerUpkeep.sol new file mode 100644 index 00000000..45630fca --- /dev/null +++ b/contracts/src/v0.8/tests/VerifiableLoadLogTriggerUpkeep.sol @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import "./VerifiableLoadBase.sol"; +import "../automation/interfaces/ILogAutomation.sol"; +import "../automation/interfaces/StreamsLookupCompatibleInterface.sol"; + +contract VerifiableLoadLogTriggerUpkeep is VerifiableLoadBase, StreamsLookupCompatibleInterface, ILogAutomation { + bool public useMercury; + uint8 public logNum; + + /** + * @param _registrar a automation registrar 2.1 address + * @param _useArb if this contract will use arbitrum block number + * @param _useMercury if the log trigger upkeeps will use mercury lookup + */ + constructor( + AutomationRegistrar2_1 _registrar, + bool _useArb, + bool _useMercury + ) VerifiableLoadBase(_registrar, _useArb) { + useMercury = _useMercury; + logNum = 0; + } + + function setLog(uint8 _log) external { + logNum = _log; + } + + function checkLog(Log calldata log, bytes memory checkData) external returns (bool, bytes memory) { + uint256 startGas = gasleft(); + uint256 blockNum = getBlockNumber(); + uint256 uid = abi.decode(checkData, (uint256)); + + bytes32 sig = emittedSig; + if (logNum != 0) { + sig = emittedAgainSig; + } + // filter by event signature + if (log.topics[0] == sig) { + bytes memory t1 = abi.encodePacked(log.topics[1]); // bytes32 to bytes + uint256 upkeepId = abi.decode(t1, (uint256)); + if (upkeepId != uid) { + revert("upkeep ids don't match"); + } + bytes memory t2 = abi.encodePacked(log.topics[2]); + uint256 blockNum = abi.decode(t2, (uint256)); + + bytes memory t3 = abi.encodePacked(log.topics[3]); + address addr = abi.decode(t3, (address)); + + uint256 checkGasToBurn = checkGasToBurns[upkeepId]; + while (startGas - gasleft() + 15000 < checkGasToBurn) { + dummyMap[blockhash(blockNum)] = false; + } + + uint256 timeParam; + if (keccak256(abi.encodePacked(feedParamKey)) == keccak256(abi.encodePacked("feedIdHex"))) { + timeParam = blockNum; + } else { + // assume this will be feedIDs for v0.3 + timeParam = block.timestamp; + } + + if (useMercury) { + revert StreamsLookup(feedParamKey, feedsHex, timeParamKey, timeParam, abi.encode(upkeepId, blockNum, addr)); + } + + // if we don't use mercury, create a perform data which resembles the output of checkCallback + bytes[] memory values = new bytes[](feedsHex.length); + bytes memory extraData = abi.encode(upkeepId, blockNum, addr); + return (true, abi.encode(values, extraData)); + } + revert("unexpected event sig"); + } + + function performUpkeep(bytes calldata performData) external { + uint256 startGas = gasleft(); + (bytes[] memory values, bytes memory extraData) = abi.decode(performData, (bytes[], bytes)); + (uint256 upkeepId, uint256 logBlockNumber, address addr) = abi.decode(extraData, (uint256, uint256, address)); + + uint256 firstPerformBlock = firstPerformBlocks[upkeepId]; + uint256 previousPerformBlock = previousPerformBlocks[upkeepId]; + uint256 currentBlockNum = getBlockNumber(); + + if (firstPerformBlock == 0) { + firstPerformBlocks[upkeepId] = currentBlockNum; + } else { + uint256 delay = currentBlockNum - logBlockNumber; + uint16 bucket = buckets[upkeepId]; + uint256[] memory bucketDelays = bucketedDelays[upkeepId][bucket]; + if (bucketDelays.length == BUCKET_SIZE) { + bucket++; + buckets[upkeepId] = bucket; + } + bucketedDelays[upkeepId][bucket].push(delay); + delays[upkeepId].push(delay); + } + + uint256 counter = counters[upkeepId] + 1; + counters[upkeepId] = counter; + previousPerformBlocks[upkeepId] = currentBlockNum; + + // for every upkeepTopUpCheckInterval (5), check if the upkeep balance is at least + // minBalanceThresholdMultiplier (20) * min balance. If not, add addLinkAmount (0.2) to the upkeep + // upkeepTopUpCheckInterval, minBalanceThresholdMultiplier, and addLinkAmount are configurable + topUpFund(upkeepId, currentBlockNum); + emit LogEmitted(upkeepId, currentBlockNum, address(this)); + burnPerformGas(upkeepId, startGas, currentBlockNum); + } + + function checkCallback( + bytes[] memory values, + bytes memory extraData + ) external pure override returns (bool, bytes memory) { + bytes memory performData = abi.encode(values, extraData); + return (true, performData); + } +} diff --git a/contracts/src/v0.8/tests/VerifiableLoadStreamsLookupUpkeep.sol b/contracts/src/v0.8/tests/VerifiableLoadStreamsLookupUpkeep.sol new file mode 100644 index 00000000..47d0aafa --- /dev/null +++ b/contracts/src/v0.8/tests/VerifiableLoadStreamsLookupUpkeep.sol @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import "./VerifiableLoadBase.sol"; +import "../automation/interfaces/StreamsLookupCompatibleInterface.sol"; + +contract VerifiableLoadStreamsLookupUpkeep is VerifiableLoadBase, StreamsLookupCompatibleInterface { + constructor(AutomationRegistrar2_1 _registrar, bool _useArb) VerifiableLoadBase(_registrar, _useArb) {} + + function checkCallback( + bytes[] memory values, + bytes memory extraData + ) external pure override returns (bool, bytes memory) { + // do sth about the pluginBlob data in values and extraData + bytes memory performData = abi.encode(values, extraData); + return (true, performData); + } + + function checkUpkeep(bytes calldata checkData) external returns (bool, bytes memory) { + uint256 startGas = gasleft(); + uint256 upkeepId = abi.decode(checkData, (uint256)); + + uint256 performDataSize = performDataSizes[upkeepId]; + uint256 checkGasToBurn = checkGasToBurns[upkeepId]; + bytes memory pData = abi.encode(upkeepId, new bytes(performDataSize)); + uint256 blockNum = getBlockNumber(); + bool needed = eligible(upkeepId); + while (startGas - gasleft() + 10000 < checkGasToBurn) { + // 10K margin over gas to burn + // Hard coded check gas to burn + dummyMap[blockhash(blockNum)] = false; // arbitrary storage writes + } + if (!needed) { + return (false, pData); + } + + uint256 timeParam; + if (keccak256(abi.encodePacked(feedParamKey)) == keccak256(abi.encodePacked("feedIdHex"))) { + timeParam = blockNum; + } else { + // assume this will be feedIDs for v0.3 + timeParam = block.timestamp; + } + + revert StreamsLookup(feedParamKey, feedsHex, timeParamKey, timeParam, abi.encode(upkeepId)); + } + + function performUpkeep(bytes calldata performData) external { + uint256 startGas = gasleft(); + (bytes[] memory values, bytes memory extraData) = abi.decode(performData, (bytes[], bytes)); + uint256 upkeepId = abi.decode(extraData, (uint256)); + uint256 firstPerformBlock = firstPerformBlocks[upkeepId]; + uint256 previousPerformBlock = previousPerformBlocks[upkeepId]; + uint256 blockNum = getBlockNumber(); + + if (firstPerformBlock == 0) { + firstPerformBlocks[upkeepId] = blockNum; + } else { + uint256 delay = blockNum - previousPerformBlock - intervals[upkeepId]; + uint16 bucket = buckets[upkeepId]; + uint256[] memory bucketDelays = bucketedDelays[upkeepId][bucket]; + if (bucketDelays.length == BUCKET_SIZE) { + bucket++; + buckets[upkeepId] = bucket; + } + bucketedDelays[upkeepId][bucket].push(delay); + delays[upkeepId].push(delay); + } + + uint256 counter = counters[upkeepId] + 1; + counters[upkeepId] = counter; + previousPerformBlocks[upkeepId] = blockNum; + + topUpFund(upkeepId, blockNum); + burnPerformGas(upkeepId, startGas, blockNum); + } +} diff --git a/contracts/src/v0.8/tests/VerifiableLoadUpkeep.sol b/contracts/src/v0.8/tests/VerifiableLoadUpkeep.sol new file mode 100644 index 00000000..bab75e96 --- /dev/null +++ b/contracts/src/v0.8/tests/VerifiableLoadUpkeep.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.16; + +import "./VerifiableLoadBase.sol"; + +contract VerifiableLoadUpkeep is VerifiableLoadBase { + constructor(AutomationRegistrar2_1 _registrar, bool _useArb) VerifiableLoadBase(_registrar, _useArb) {} + + function checkUpkeep(bytes calldata checkData) external returns (bool, bytes memory) { + uint256 startGas = gasleft(); + uint256 upkeepId = abi.decode(checkData, (uint256)); + + uint256 performDataSize = performDataSizes[upkeepId]; + uint256 checkGasToBurn = checkGasToBurns[upkeepId]; + bytes memory pData = abi.encode(upkeepId, new bytes(performDataSize)); + uint256 blockNum = getBlockNumber(); + bool needed = eligible(upkeepId); + while (startGas - gasleft() + 10000 < checkGasToBurn) { + dummyMap[blockhash(blockNum)] = false; + blockNum--; + } + return (needed, pData); + } + + function performUpkeep(bytes calldata performData) external { + uint256 startGas = gasleft(); + (uint256 upkeepId, ) = abi.decode(performData, (uint256, bytes)); + uint256 firstPerformBlock = firstPerformBlocks[upkeepId]; + uint256 previousPerformBlock = previousPerformBlocks[upkeepId]; + uint256 blockNum = getBlockNumber(); + if (firstPerformBlock == 0) { + firstPerformBlocks[upkeepId] = blockNum; + } else { + uint256 delay = blockNum - previousPerformBlock - intervals[upkeepId]; + uint16 bucket = buckets[upkeepId]; + uint256[] memory bucketDelays = bucketedDelays[upkeepId][bucket]; + if (bucketDelays.length == BUCKET_SIZE) { + bucket++; + buckets[upkeepId] = bucket; + } + bucketedDelays[upkeepId][bucket].push(delay); + delays[upkeepId].push(delay); + } + + uint256 counter = counters[upkeepId] + 1; + counters[upkeepId] = counter; + previousPerformBlocks[upkeepId] = blockNum; + + topUpFund(upkeepId, blockNum); + burnPerformGas(upkeepId, startGas, blockNum); + } +} diff --git a/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol b/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol new file mode 100644 index 00000000..873a3010 --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/ERC-4337/Paymaster.sol @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +import {IPaymaster} from "../../../vendor/entrypoint/interfaces/IPaymaster.sol"; +import {SCALibrary} from "./SCALibrary.sol"; +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {AggregatorV3Interface} from "../../../shared/interfaces/AggregatorV3Interface.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {UserOperation} from "../../../vendor/entrypoint/interfaces/UserOperation.sol"; +import {_packValidationData} from "../../../vendor/entrypoint/core/Helpers.sol"; + +/// @dev PLI token paymaster implementation. +/// TODO: more documentation. +contract Paymaster is IPaymaster, ConfirmedOwner { + error OnlyCallableFromLink(); + error InvalidCalldata(); + error Unauthorized(address sender, address validator); + error UserOperationAlreadyTried(bytes32 userOpHash); + error InsufficientFunds(uint256 juelsNeeded, uint256 subscriptionBalance); + + LinkTokenInterface public immutable i_linkToken; + AggregatorV3Interface public immutable i_linkEthFeed; + address public immutable i_entryPoint; + + struct Config { + uint32 stalenessSeconds; + int256 fallbackWeiPerUnitLink; + } + Config public s_config; + + mapping(bytes32 => bool) internal s_userOpHashMapping; + mapping(address => uint256) internal s_subscriptions; + + constructor( + LinkTokenInterface linkToken, + AggregatorV3Interface linkEthFeed, + address entryPoint + ) ConfirmedOwner(msg.sender) { + i_linkToken = linkToken; + i_linkEthFeed = linkEthFeed; + i_entryPoint = entryPoint; + } + + function setConfig(uint32 stalenessSeconds, int256 fallbackWeiPerUnitLink) external onlyOwner { + s_config = Config({stalenessSeconds: stalenessSeconds, fallbackWeiPerUnitLink: fallbackWeiPerUnitLink}); + } + + function onTokenTransfer(address /* _sender */, uint256 _amount, bytes calldata _data) external { + if (msg.sender != address(i_linkToken)) { + revert OnlyCallableFromLink(); + } + if (_data.length != 32) { + revert InvalidCalldata(); + } + + address subscription = abi.decode(_data, (address)); + s_subscriptions[subscription] += _amount; + } + + function validatePaymasterUserOp( + UserOperation calldata userOp, + bytes32 userOpHash, + uint256 maxCost + ) external returns (bytes memory context, uint256 validationData) { + if (msg.sender != i_entryPoint) { + revert Unauthorized(msg.sender, i_entryPoint); + } + if (s_userOpHashMapping[userOpHash]) { + revert UserOperationAlreadyTried(userOpHash); + } + + uint256 extraCostJuels = _handleExtraCostJuels(userOp); + uint256 costJuels = _getCostJuels(maxCost) + extraCostJuels; + if (s_subscriptions[userOp.sender] < costJuels) { + revert InsufficientFunds(costJuels, s_subscriptions[userOp.sender]); + } + + s_userOpHashMapping[userOpHash] = true; + return (abi.encode(userOp.sender, extraCostJuels), _packValidationData(false, 0, 0)); // success + } + + /// @dev Calculates any extra PLI cost for the user operation, based on the funding type passed to the + /// @dev paymaster. Handles funding the PLI token funding described in the user operation. + /// TODO: add logic for subscription top-up. + function _handleExtraCostJuels(UserOperation calldata userOp) internal returns (uint256 extraCost) { + if (userOp.paymasterAndData.length == 20) { + return 0; // no extra data, stop here + } + + uint8 paymentType = uint8(userOp.paymasterAndData[20]); + + // For direct funding, use top-up logic. + if (paymentType == uint8(SCALibrary.LinkPaymentType.DIRECT_FUNDING)) { + SCALibrary.DirectFundingData memory directFundingData = abi.decode( + userOp.paymasterAndData[21:], + (SCALibrary.DirectFundingData) + ); + if ( + directFundingData.topupThreshold != 0 && + i_linkToken.balanceOf(directFundingData.recipient) < directFundingData.topupThreshold + ) { + i_linkToken.transfer(directFundingData.recipient, directFundingData.topupAmount); + extraCost = directFundingData.topupAmount; + } + } + return extraCost; + } + + /// @dev Deducts user subscription balance after execution. + function postOp(PostOpMode /* mode */, bytes calldata context, uint256 actualGasCost) external { + if (msg.sender != i_entryPoint) { + revert Unauthorized(msg.sender, i_entryPoint); + } + (address sender, uint256 extraCostJuels) = abi.decode(context, (address, uint256)); + s_subscriptions[sender] -= (_getCostJuels(actualGasCost) + extraCostJuels); + } + + function _getCostJuels(uint256 costWei) internal view returns (uint256 costJuels) { + costJuels = (1e18 * costWei) / uint256(_getFeedData()); + return costJuels; + } + + function _getFeedData() internal view returns (int256) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = i_linkEthFeed.latestRoundData(); + if (staleFallback && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_config.fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } +} diff --git a/contracts/src/v0.8/transmission/dev/ERC-4337/SCA.sol b/contracts/src/v0.8/transmission/dev/ERC-4337/SCA.sol new file mode 100644 index 00000000..6a11eecf --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/ERC-4337/SCA.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT +/// TODO: decide on a compiler version. Must not be dynamic, and must be > 0.8.12. +pragma solidity 0.8.15; + +import {IAccount} from "../../../vendor/entrypoint/interfaces/IAccount.sol"; +import {SCALibrary} from "./SCALibrary.sol"; +import {UserOperation} from "../../../vendor/entrypoint/interfaces/UserOperation.sol"; +import {_packValidationData} from "../../../vendor/entrypoint/core/Helpers.sol"; + +/// @dev Smart Contract Account, a contract deployed for a single user and that allows +/// @dev them to invoke meta-transactions. +/// TODO: Consider making the Smart Contract Account upgradeable. +contract SCA is IAccount { + uint256 public s_nonce; + address public immutable i_owner; + address public immutable i_entryPoint; + + error IncorrectNonce(uint256 currentNonce, uint256 nonceGiven); + error NotAuthorized(address sender); + error BadFormatOrOOG(); + error TransactionExpired(uint256 deadline, uint256 currentTimestamp); + error InvalidSignature(bytes32 operationHash, address owner); + + // Assign the owner of this contract upon deployment. + constructor(address owner, address entryPoint) { + i_owner = owner; + i_entryPoint = entryPoint; + } + + /// @dev Validates the user operation via a signature check. + /// TODO: Utilize a "validAfter" for a tx to be only valid _after_ a certain time. + function validateUserOp( + UserOperation calldata userOp, + bytes32 userOpHash, + uint256 /* missingAccountFunds - unused in favor of paymaster */ + ) external returns (uint256 validationData) { + if (userOp.nonce != s_nonce) { + // Revert for non-signature errors. + revert IncorrectNonce(s_nonce, userOp.nonce); + } + + // Verify signature on hash. + bytes32 fullHash = SCALibrary._getUserOpFullHash(userOpHash, address(this)); + bytes memory signature = userOp.signature; + if (SCALibrary._recoverSignature(signature, fullHash) != i_owner) { + return _packValidationData(true, 0, 0); // signature error + } + s_nonce++; + + // Unpack deadline, return successful signature. + (, , uint48 deadline, ) = abi.decode(userOp.callData[4:], (address, uint256, uint48, bytes)); + return _packValidationData(false, deadline, 0); + } + + /// @dev Execute a transaction on behalf of the owner. This function can only + /// @dev be called by the EntryPoint contract, and assumes that `validateUserOp` has succeeded. + function executeTransactionFromEntryPoint(address to, uint256 value, uint48 deadline, bytes calldata data) external { + if (msg.sender != i_entryPoint) { + revert NotAuthorized(msg.sender); + } + if (deadline != 0 && block.timestamp > deadline) { + revert TransactionExpired(deadline, block.timestamp); + } + + // Execute transaction. Bubble up an error if found. + (bool success, bytes memory returnData) = to.call{value: value}(data); + if (!success) { + if (returnData.length == 0) revert BadFormatOrOOG(); + assembly { + revert(add(32, returnData), mload(returnData)) + } + } + } +} diff --git a/contracts/src/v0.8/transmission/dev/ERC-4337/SCALibrary.sol b/contracts/src/v0.8/transmission/dev/ERC-4337/SCALibrary.sol new file mode 100644 index 00000000..35d666a2 --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/ERC-4337/SCALibrary.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +library SCALibrary { + // keccak256("EIP712Domain(uint256 chainId, address verifyingContract)"); + bytes32 internal constant DOMAIN_SEPARATOR = hex"1c7d3b72b37a35523e273aaadd7b4cd66f618bb81429ab053412d51f50ccea61"; + + // keccak256("executeTransactionFromEntryPoint(address to, uint256 value, bytes calldata data)"); + bytes32 internal constant TYPEHASH = hex"4750045d47fce615521b32cee713ff8db50147e98aec5ca94926b52651ca3fa0"; + + enum LinkPaymentType { + DIRECT_FUNDING, + SUBSCRIPTION // TODO: implement + } + + struct DirectFundingData { + address recipient; // recipient of the top-up + uint256 topupThreshold; // set to zero to disable auto-topup + uint256 topupAmount; + } + + function _getUserOpFullHash(bytes32 userOpHash, address scaAddress) internal view returns (bytes32 fullHash) { + bytes32 hashOfEncoding = keccak256(abi.encode(SCALibrary.TYPEHASH, userOpHash)); + fullHash = keccak256( + abi.encodePacked( + bytes1(0x19), + bytes1(0x01), + SCALibrary.DOMAIN_SEPARATOR, + block.chainid, + scaAddress, + hashOfEncoding + ) + ); + return fullHash; + } + + function _recoverSignature(bytes memory signature, bytes32 fullHash) internal pure returns (address) { + bytes32 r; + bytes32 s; + assembly { + r := mload(add(signature, 0x20)) + s := mload(add(signature, 0x40)) + } + uint8 v = uint8(signature[64]); + + return ecrecover(fullHash, v + 27, r, s); + } +} diff --git a/contracts/src/v0.8/transmission/dev/ERC-4337/SmartContractAccountFactory.sol b/contracts/src/v0.8/transmission/dev/ERC-4337/SmartContractAccountFactory.sol new file mode 100644 index 00000000..bb0f2dbd --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/ERC-4337/SmartContractAccountFactory.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +contract SmartContractAccountFactory { + event ContractCreated(address scaAddress); + + error DeploymentFailed(); + + /// @dev Use create2 to deploy a new Smart Contract Account. + /// @dev See EIP-1014 for more on CREATE2. + /// TODO: Return the address of the Smart Contract Account even if it is already + /// deployed. + function deploySmartContractAccount( + bytes32 abiEncodedOwnerAddress, + bytes memory initCode + ) external payable returns (address scaAddress) { + assembly { + scaAddress := create2( + 0, // value - left at zero here + add(0x20, initCode), // initialization bytecode + mload(initCode), // length of initialization bytecode + abiEncodedOwnerAddress // user-defined nonce to ensure unique SCA addresses + ) + } + if (scaAddress == address(0)) { + revert DeploymentFailed(); + } + + emit ContractCreated(scaAddress); + + return scaAddress; + } +} diff --git a/contracts/src/v0.8/transmission/dev/testhelpers/Greeter.sol b/contracts/src/v0.8/transmission/dev/testhelpers/Greeter.sol new file mode 100644 index 00000000..92e50b80 --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/testhelpers/Greeter.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +/// @dev Ownerless greeter contract. +contract Greeter { + string private s_greeting; + + function setGreeting(string memory greeting) external { + s_greeting = greeting; + } + + function getGreeting() external view returns (string memory) { + return s_greeting; + } +} diff --git a/contracts/src/v0.8/transmission/dev/testhelpers/SmartContractAccountHelper.sol b/contracts/src/v0.8/transmission/dev/testhelpers/SmartContractAccountHelper.sol new file mode 100644 index 00000000..014f296f --- /dev/null +++ b/contracts/src/v0.8/transmission/dev/testhelpers/SmartContractAccountHelper.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +import {SCA} from "../ERC-4337/SCA.sol"; +import {SmartContractAccountFactory} from "../ERC-4337/SmartContractAccountFactory.sol"; +import {SCALibrary} from "../ERC-4337/SCALibrary.sol"; + +library SmartContractAccountHelper { + bytes internal constant INITIALIZE_CODE = type(SCA).creationCode; + + function getFullEndTxEncoding( + address endContract, + uint256 value, + uint256 deadline, + bytes memory data + ) public view returns (bytes memory encoding) { + encoding = bytes.concat( + SCA.executeTransactionFromEntryPoint.selector, + abi.encode(endContract, value, block.timestamp + deadline, data) + ); + return encoding; + } + + function getFullHashForSigning(bytes32 userOpHash, address scaAddress) public view returns (bytes32) { + return SCALibrary._getUserOpFullHash(userOpHash, scaAddress); + } + + function getSCAInitCodeWithConstructor( + address owner, + address entryPoint + ) public pure returns (bytes memory initCode) { + initCode = bytes.concat(INITIALIZE_CODE, abi.encode(owner, entryPoint)); + return initCode; + } + + function getInitCode( + address factory, + address owner, + address entryPoint + ) external pure returns (bytes memory initCode) { + bytes32 salt = bytes32(uint256(uint160(owner)) << 96); + bytes memory initializeCodeWithConstructor = bytes.concat(INITIALIZE_CODE, abi.encode(owner, entryPoint)); + initCode = bytes.concat( + bytes20(address(factory)), + abi.encodeWithSelector( + SmartContractAccountFactory.deploySmartContractAccount.selector, + salt, + initializeCodeWithConstructor + ) + ); + return initCode; + } + + /// @dev Computes the smart contract address that results from a CREATE2 operation, per EIP-1014. + function calculateSmartContractAccountAddress( + address owner, + address entryPoint, + address factory + ) external pure returns (address) { + bytes32 salt = bytes32(uint256(uint160(owner)) << 96); + bytes memory initializeCodeWithConstructor = bytes.concat(INITIALIZE_CODE, abi.encode(owner, entryPoint)); + bytes32 initializeCodeHash = keccak256(initializeCodeWithConstructor); + return address(uint160(uint256(keccak256(abi.encodePacked(hex"ff", address(factory), salt, initializeCodeHash))))); + } + + function getAbiEncodedDirectRequestData( + address recipient, + uint256 topupThreshold, + uint256 topupAmount + ) external pure returns (bytes memory) { + SCALibrary.DirectFundingData memory data = SCALibrary.DirectFundingData({ + recipient: recipient, + topupThreshold: topupThreshold, + topupAmount: topupAmount + }); + return abi.encode(data); + } +} diff --git a/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol b/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol new file mode 100644 index 00000000..19d29c88 --- /dev/null +++ b/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol @@ -0,0 +1,43 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity >=0.4.21 <0.9.0; + +interface ArbGasInfo { + // return gas prices in wei, assuming the specified aggregator is used + // ( + // per L2 tx, + // per L1 calldata unit, (zero byte = 4 units, nonzero byte = 16 units) + // per storage allocation, + // per ArbGas base, + // per ArbGas congestion, + // per ArbGas total + // ) + function getPricesInWeiWithAggregator(address aggregator) external view returns (uint, uint, uint, uint, uint, uint); + + // return gas prices in wei, as described above, assuming the caller's preferred aggregator is used + // if the caller hasn't specified a preferred aggregator, the default aggregator is assumed + function getPricesInWei() external view returns (uint, uint, uint, uint, uint, uint); + + // return prices in ArbGas (per L2 tx, per L1 calldata unit, per storage allocation), + // assuming the specified aggregator is used + function getPricesInArbGasWithAggregator(address aggregator) external view returns (uint, uint, uint); + + // return gas prices in ArbGas, as described above, assuming the caller's preferred aggregator is used + // if the caller hasn't specified a preferred aggregator, the default aggregator is assumed + function getPricesInArbGas() external view returns (uint, uint, uint); + + // return gas accounting parameters (speedLimitPerSecond, gasPoolMax, maxTxGasLimit) + function getGasAccountingParams() external view returns (uint, uint, uint); + + // get ArbOS's estimate of the L1 gas price in wei + function getL1GasPriceEstimate() external view returns(uint); + + // set ArbOS's estimate of the L1 gas price in wei + // reverts unless called by chain owner or designated gas oracle (if any) + function setL1GasPriceEstimate(uint priceInWei) external; + + // get L1 gas fees paid by the current transaction (txBaseFeeWei, calldataFeeWei) + function getCurrentTxL1GasFees() external view returns(uint); +} diff --git a/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol b/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol new file mode 100644 index 00000000..fb48f360 --- /dev/null +++ b/contracts/src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol @@ -0,0 +1,153 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity >=0.4.21 <0.9.0; + +/** + * @title System level functionality + * @notice For use by contracts to interact with core L2-specific functionality. + * Precompiled contract that exists in every Arbitrum chain at address(100), 0x0000000000000000000000000000000000000064. + */ +interface ArbSys { + /** + * @notice Get Arbitrum block number (distinct from L1 block number; Arbitrum genesis block has block number 0) + * @return block number as int + */ + function arbBlockNumber() external view returns (uint256); + + /** + * @notice Get Arbitrum block hash (reverts unless currentBlockNum-256 <= arbBlockNum < currentBlockNum) + * @return block hash + */ + function arbBlockHash(uint256 arbBlockNum) external view returns (bytes32); + + /** + * @notice Gets the rollup's unique chain identifier + * @return Chain identifier as int + */ + function arbChainID() external view returns (uint256); + + /** + * @notice Get internal version number identifying an ArbOS build + * @return version number as int + */ + function arbOSVersion() external view returns (uint256); + + /** + * @notice Returns 0 since Nitro has no concept of storage gas + * @return uint 0 + */ + function getStorageGasAvailable() external view returns (uint256); + + /** + * @notice (deprecated) check if current call is top level (meaning it was triggered by an EoA or a L1 contract) + * @dev this call has been deprecated and may be removed in a future release + * @return true if current execution frame is not a call by another L2 contract + */ + function isTopLevelCall() external view returns (bool); + + /** + * @notice map L1 sender contract address to its L2 alias + * @param sender sender address + * @param unused argument no longer used + * @return aliased sender address + */ + function mapL1SenderContractAddressToL2Alias(address sender, address unused) + external + pure + returns (address); + + /** + * @notice check if the caller (of this caller of this) is an aliased L1 contract address + * @return true iff the caller's address is an alias for an L1 contract address + */ + function wasMyCallersAddressAliased() external view returns (bool); + + /** + * @notice return the address of the caller (of this caller of this), without applying L1 contract address aliasing + * @return address of the caller's caller, without applying L1 contract address aliasing + */ + function myCallersAddressWithoutAliasing() external view returns (address); + + /** + * @notice Send given amount of Eth to dest from sender. + * This is a convenience function, which is equivalent to calling sendTxToL1 with empty data. + * @param destination recipient address on L1 + * @return unique identifier for this L2-to-L1 transaction. + */ + function withdrawEth(address destination) + external + payable + returns (uint256); + + /** + * @notice Send a transaction to L1 + * @dev it is not possible to execute on the L1 any L2-to-L1 transaction which contains data + * to a contract address without any code (as enforced by the Bridge contract). + * @param destination recipient address on L1 + * @param data (optional) calldata for L1 contract call + * @return a unique identifier for this L2-to-L1 transaction. + */ + function sendTxToL1(address destination, bytes calldata data) + external + payable + returns (uint256); + + /** + * @notice Get send Merkle tree state + * @return size number of sends in the history + * @return root root hash of the send history + * @return partials hashes of partial subtrees in the send history tree + */ + function sendMerkleTreeState() + external + view + returns ( + uint256 size, + bytes32 root, + bytes32[] memory partials + ); + + /** + * @notice creates a send txn from L2 to L1 + * @param position = (level << 192) + leaf = (0 << 192) + leaf = leaf + */ + event L2ToL1Tx( + address caller, + address indexed destination, + uint256 indexed hash, + uint256 indexed position, + uint256 arbBlockNum, + uint256 ethBlockNum, + uint256 timestamp, + uint256 callvalue, + bytes data + ); + + /// @dev DEPRECATED in favour of the new L2ToL1Tx event above after the nitro upgrade + event L2ToL1Transaction( + address caller, + address indexed destination, + uint256 indexed uniqueId, + uint256 indexed batchNumber, + uint256 indexInBatch, + uint256 arbBlockNum, + uint256 ethBlockNum, + uint256 timestamp, + uint256 callvalue, + bytes data + ); + + /** + * @notice logs a merkle branch for proof synthesis + * @param reserved an index meant only to align the 4th index with L2ToL1Transaction's 4th event + * @param hash the merkle hash + * @param position = (level << 192) + leaf + */ + event SendMerkleUpdate( + uint256 indexed reserved, + bytes32 indexed hash, + uint256 indexed position + ); +} diff --git a/contracts/src/v0.8/vendor/@ensdomains/buffer/v0.1.0/Buffer.sol b/contracts/src/v0.8/vendor/@ensdomains/buffer/v0.1.0/Buffer.sol new file mode 100644 index 00000000..a57b4dd1 --- /dev/null +++ b/contracts/src/v0.8/vendor/@ensdomains/buffer/v0.1.0/Buffer.sol @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: BSD-2-Clause +pragma solidity ^0.8.4; + +/** +* @dev A library for working with mutable byte buffers in Solidity. +* +* Byte buffers are mutable and expandable, and provide a variety of primitives +* for appending to them. At any time you can fetch a bytes object containing the +* current contents of the buffer. The bytes object should not be stored between +* operations, as it may change due to resizing of the buffer. +*/ +library Buffer { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint capacity) internal pure returns(buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + let fpm := add(32, add(ptr, capacity)) + if lt(fpm, ptr) { + revert(0, 0) + } + mstore(0x40, fpm) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns(buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Appends len bytes of a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data, uint len) internal pure returns(buffer memory) { + require(len <= data.length); + + uint off = buf.buf.length; + uint newCapacity = off + len; + if (newCapacity > buf.capacity) { + resize(buf, newCapacity * 2); + } + + uint dest; + uint src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(newCapacity, buflen) { + mstore(bufptr, newCapacity) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + unchecked { + uint mask = (256 ** (32 - len)) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return append(buf, data, data.length); + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns(buffer memory) { + uint off = buf.buf.length; + uint offPlusOne = off + 1; + if (off >= buf.capacity) { + resize(buf, offPlusOne * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if gt(offPlusOne, mload(bufptr)) { + mstore(bufptr, offPlusOne) + } + } + + return buf; + } + + /** + * @dev Appends len bytes of bytes32 to a buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes32 data, uint len) private pure returns(buffer memory) { + uint off = buf.buf.length; + uint newCapacity = len + off; + if (newCapacity > buf.capacity) { + resize(buf, newCapacity * 2); + } + + unchecked { + uint mask = (256 ** len) - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + newCapacity + let dest := add(bufptr, newCapacity) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(newCapacity, mload(bufptr)) { + mstore(bufptr, newCapacity) + } + } + } + return buf; + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return append(buf, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return append(buf, data, 32); + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer. + */ + function appendInt(buffer memory buf, uint data, uint len) internal pure returns(buffer memory) { + uint off = buf.buf.length; + uint newCapacity = len + off; + if (newCapacity > buf.capacity) { + resize(buf, newCapacity * 2); + } + + uint mask = (256 ** len) - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + newCapacity + let dest := add(bufptr, newCapacity) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(newCapacity, mload(bufptr)) { + mstore(bufptr, newCapacity) + } + } + return buf; + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/GasPriceOracle.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/GasPriceOracle.sol new file mode 100644 index 00000000..aebc1f74 --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/GasPriceOracle.sol @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +import { ISemver } from "../universal/ISemver.sol"; +import { Predeploys } from "../libraries/Predeploys.sol"; +import { L1Block } from "./L1Block.sol"; + +/// @custom:proxied +/// @custom:predeploy 0x420000000000000000000000000000000000000F +/// @title GasPriceOracle +/// @notice This contract maintains the variables responsible for computing the L1 portion of the +/// total fee charged on L2. Before Bedrock, this contract held variables in state that were +/// read during the state transition function to compute the L1 portion of the transaction +/// fee. After Bedrock, this contract now simply proxies the L1Block contract, which has +/// the values used to compute the L1 portion of the fee in its state. +/// +/// The contract exposes an API that is useful for knowing how large the L1 portion of the +/// transaction fee will be. The following events were deprecated with Bedrock: +/// - event OverheadUpdated(uint256 overhead); +/// - event ScalarUpdated(uint256 scalar); +/// - event DecimalsUpdated(uint256 decimals); +contract GasPriceOracle is ISemver { + /// @notice Number of decimals used in the scalar. + uint256 public constant DECIMALS = 6; + + /// @notice Semantic version. + /// @custom:semver 1.1.0 + string public constant version = "1.1.0"; + + /// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input + /// transaction, the current L1 base fee, and the various dynamic parameters. + /// @param _data Unsigned fully RLP-encoded transaction to get the L1 fee for. + /// @return L1 fee that should be paid for the tx + function getL1Fee(bytes memory _data) external view returns (uint256) { + uint256 l1GasUsed = getL1GasUsed(_data); + uint256 l1Fee = l1GasUsed * l1BaseFee(); + uint256 divisor = 10 ** DECIMALS; + uint256 unscaled = l1Fee * scalar(); + uint256 scaled = unscaled / divisor; + return scaled; + } + + /// @notice Retrieves the current gas price (base fee). + /// @return Current L2 gas price (base fee). + function gasPrice() public view returns (uint256) { + return block.basefee; + } + + /// @notice Retrieves the current base fee. + /// @return Current L2 base fee. + function baseFee() public view returns (uint256) { + return block.basefee; + } + + /// @notice Retrieves the current fee overhead. + /// @return Current fee overhead. + function overhead() public view returns (uint256) { + return L1Block(Predeploys.L1_BLOCK_ATTRIBUTES).l1FeeOverhead(); + } + + /// @notice Retrieves the current fee scalar. + /// @return Current fee scalar. + function scalar() public view returns (uint256) { + return L1Block(Predeploys.L1_BLOCK_ATTRIBUTES).l1FeeScalar(); + } + + /// @notice Retrieves the latest known L1 base fee. + /// @return Latest known L1 base fee. + function l1BaseFee() public view returns (uint256) { + return L1Block(Predeploys.L1_BLOCK_ATTRIBUTES).basefee(); + } + + /// @custom:legacy + /// @notice Retrieves the number of decimals used in the scalar. + /// @return Number of decimals used in the scalar. + function decimals() public pure returns (uint256) { + return DECIMALS; + } + + /// @notice Computes the amount of L1 gas used for a transaction. Adds the overhead which + /// represents the per-transaction gas overhead of posting the transaction and state + /// roots to L1. Adds 68 bytes of padding to account for the fact that the input does + /// not have a signature. + /// @param _data Unsigned fully RLP-encoded transaction to get the L1 gas for. + /// @return Amount of L1 gas used to publish the transaction. + function getL1GasUsed(bytes memory _data) public view returns (uint256) { + uint256 total = 0; + uint256 length = _data.length; + for (uint256 i = 0; i < length; i++) { + if (_data[i] == 0) { + total += 4; + } else { + total += 16; + } + } + uint256 unsigned = total + overhead(); + return unsigned + (68 * 16); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/L1Block.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/L1Block.sol new file mode 100644 index 00000000..7722b53b --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/L2/L1Block.sol @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +import { ISemver } from "../universal/ISemver.sol"; + +/// @custom:proxied +/// @custom:predeploy 0x4200000000000000000000000000000000000015 +/// @title L1Block +/// @notice The L1Block predeploy gives users access to information about the last known L1 block. +/// Values within this contract are updated once per epoch (every L1 block) and can only be +/// set by the "depositor" account, a special system address. Depositor account transactions +/// are created by the protocol whenever we move to a new epoch. +contract L1Block is ISemver { + /// @notice Address of the special depositor account. + address public constant DEPOSITOR_ACCOUNT = 0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001; + + /// @notice The latest L1 block number known by the L2 system. + uint64 public number; + + /// @notice The latest L1 timestamp known by the L2 system. + uint64 public timestamp; + + /// @notice The latest L1 basefee. + uint256 public basefee; + + /// @notice The latest L1 blockhash. + bytes32 public hash; + + /// @notice The number of L2 blocks in the same epoch. + uint64 public sequenceNumber; + + /// @notice The versioned hash to authenticate the batcher by. + bytes32 public batcherHash; + + /// @notice The overhead value applied to the L1 portion of the transaction fee. + uint256 public l1FeeOverhead; + + /// @notice The scalar value applied to the L1 portion of the transaction fee. + uint256 public l1FeeScalar; + + /// @custom:semver 1.1.0 + string public constant version = "1.1.0"; + + /// @notice Updates the L1 block values. + /// @param _number L1 blocknumber. + /// @param _timestamp L1 timestamp. + /// @param _basefee L1 basefee. + /// @param _hash L1 blockhash. + /// @param _sequenceNumber Number of L2 blocks since epoch start. + /// @param _batcherHash Versioned hash to authenticate batcher by. + /// @param _l1FeeOverhead L1 fee overhead. + /// @param _l1FeeScalar L1 fee scalar. + function setL1BlockValues( + uint64 _number, + uint64 _timestamp, + uint256 _basefee, + bytes32 _hash, + uint64 _sequenceNumber, + bytes32 _batcherHash, + uint256 _l1FeeOverhead, + uint256 _l1FeeScalar + ) + external + { + require(msg.sender == DEPOSITOR_ACCOUNT, "L1Block: only the depositor account can set L1 block values"); + + number = _number; + timestamp = _timestamp; + basefee = _basefee; + hash = _hash; + sequenceNumber = _sequenceNumber; + batcherHash = _batcherHash; + l1FeeOverhead = _l1FeeOverhead; + l1FeeScalar = _l1FeeScalar; + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/libraries/Predeploys.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/libraries/Predeploys.sol new file mode 100644 index 00000000..4a0d399c --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/libraries/Predeploys.sol @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title Predeploys +/// @notice Contains constant addresses for contracts that are pre-deployed to the L2 system. +library Predeploys { + /// @notice Address of the L2ToL1MessagePasser predeploy. + address internal constant L2_TO_L1_MESSAGE_PASSER = 0x4200000000000000000000000000000000000016; + + /// @notice Address of the L2CrossDomainMessenger predeploy. + address internal constant L2_CROSS_DOMAIN_MESSENGER = 0x4200000000000000000000000000000000000007; + + /// @notice Address of the L2StandardBridge predeploy. + address internal constant L2_STANDARD_BRIDGE = 0x4200000000000000000000000000000000000010; + + /// @notice Address of the L2ERC721Bridge predeploy. + address internal constant L2_ERC721_BRIDGE = 0x4200000000000000000000000000000000000014; + + //// @notice Address of the SequencerFeeWallet predeploy. + address internal constant SEQUENCER_FEE_WALLET = 0x4200000000000000000000000000000000000011; + + /// @notice Address of the OptimismMintableERC20Factory predeploy. + address internal constant OPTIMISM_MINTABLE_ERC20_FACTORY = 0x4200000000000000000000000000000000000012; + + /// @notice Address of the OptimismMintableERC721Factory predeploy. + address internal constant OPTIMISM_MINTABLE_ERC721_FACTORY = 0x4200000000000000000000000000000000000017; + + /// @notice Address of the L1Block predeploy. + address internal constant L1_BLOCK_ATTRIBUTES = 0x4200000000000000000000000000000000000015; + + /// @notice Address of the GasPriceOracle predeploy. Includes fee information + /// and helpers for computing the L1 portion of the transaction fee. + address internal constant GAS_PRICE_ORACLE = 0x420000000000000000000000000000000000000F; + + /// @custom:legacy + /// @notice Address of the L1MessageSender predeploy. Deprecated. Use L2CrossDomainMessenger + /// or access tx.origin (or msg.sender) in a L1 to L2 transaction instead. + address internal constant L1_MESSAGE_SENDER = 0x4200000000000000000000000000000000000001; + + /// @custom:legacy + /// @notice Address of the DeployerWhitelist predeploy. No longer active. + address internal constant DEPLOYER_WHITELIST = 0x4200000000000000000000000000000000000002; + + /// @custom:legacy + /// @notice Address of the LegacyERC20ETH predeploy. Deprecated. Balances are migrated to the + /// state trie as of the Bedrock upgrade. Contract has been locked and write functions + /// can no longer be accessed. + address internal constant LEGACY_ERC20_ETH = 0xDeadDeAddeAddEAddeadDEaDDEAdDeaDDeAD0000; + + /// @custom:legacy + /// @notice Address of the L1BlockNumber predeploy. Deprecated. Use the L1Block predeploy + /// instead, which exposes more information about the L1 state. + address internal constant L1_BLOCK_NUMBER = 0x4200000000000000000000000000000000000013; + + /// @custom:legacy + /// @notice Address of the LegacyMessagePasser predeploy. Deprecate. Use the updated + /// L2ToL1MessagePasser contract instead. + address internal constant LEGACY_MESSAGE_PASSER = 0x4200000000000000000000000000000000000000; + + /// @notice Address of the ProxyAdmin predeploy. + address internal constant PROXY_ADMIN = 0x4200000000000000000000000000000000000018; + + /// @notice Address of the BaseFeeVault predeploy. + address internal constant BASE_FEE_VAULT = 0x4200000000000000000000000000000000000019; + + /// @notice Address of the L1FeeVault predeploy. + address internal constant L1_FEE_VAULT = 0x420000000000000000000000000000000000001A; + + /// @notice Address of the GovernanceToken predeploy. + address internal constant GOVERNANCE_TOKEN = 0x4200000000000000000000000000000000000042; + + /// @notice Address of the SchemaRegistry predeploy. + address internal constant SCHEMA_REGISTRY = 0x4200000000000000000000000000000000000020; + + /// @notice Address of the EAS predeploy. + address internal constant EAS = 0x4200000000000000000000000000000000000021; +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/universal/ISemver.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/universal/ISemver.sol new file mode 100644 index 00000000..ae9569a0 --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts-bedrock/v0.16.2/src/universal/ISemver.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @title ISemver +/// @notice ISemver is a simple contract for ensuring that contracts are +/// versioned using semantic versioning. +interface ISemver { + /// @notice Getter for the semantic version of the contract. This is not + /// meant to be used onchain but instead meant to be used by offchain + /// tooling. + /// @return Semver contract version as a string. + function version() external view returns (string memory); +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol new file mode 100644 index 00000000..8b5aad82 --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.4.7/contracts/optimistic-ethereum/iOVM/bridge/messaging/iOVM_CrossDomainMessenger.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity >=0.7.6 <0.9.0; + +/** + * @title iOVM_CrossDomainMessenger + */ +interface iOVM_CrossDomainMessenger { + /********** + * Events * + **********/ + + event SentMessage(bytes message); + event RelayedMessage(bytes32 msgHash); + event FailedRelayedMessage(bytes32 msgHash); + + /************* + * Variables * + *************/ + + function xDomainMessageSender() external view returns (address); + + /******************** + * Public Functions * + ********************/ + + /** + * Sends a cross domain message to the target messenger. + * @param _target Target contract address. + * @param _message Message to send to the target. + * @param _gasLimit Gas limit for the provided message. + */ + function sendMessage(address _target, bytes calldata _message, uint32 _gasLimit) external; +} diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol new file mode 100644 index 00000000..a3e137a0 --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +/* External Imports */ +import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; + +/** + * @title OVM_GasPriceOracle + * @dev This contract exposes the current l2 gas price, a measure of how congested the network + * currently is. This measure is used by the Sequencer to determine what fee to charge for + * transactions. When the system is more congested, the l2 gas price will increase and fees + * will also increase as a result. + * + * All public variables are set while generating the initial L2 state. The + * constructor doesn't run in practice as the L2 state generation script uses + * the deployed bytecode instead of running the initcode. + */ +contract OVM_GasPriceOracle is Ownable { + /************* + * Variables * + *************/ + + // Current L2 gas price + uint256 public gasPrice; + // Current L1 base fee + uint256 public l1BaseFee; + // Amortized cost of batch submission per transaction + uint256 public overhead; + // Value to scale the fee up by + uint256 public scalar; + // Number of decimals of the scalar + uint256 public decimals; + + /*************** + * Constructor * + ***************/ + + /** + * @param _owner Address that will initially own this contract. + */ + constructor(address _owner) Ownable() { + transferOwnership(_owner); + } + + /********** + * Events * + **********/ + + event GasPriceUpdated(uint256); + event L1BaseFeeUpdated(uint256); + event OverheadUpdated(uint256); + event ScalarUpdated(uint256); + event DecimalsUpdated(uint256); + + /******************** + * Public Functions * + ********************/ + + /** + * Allows the owner to modify the l2 gas price. + * @param _gasPrice New l2 gas price. + */ + // slither-disable-next-line external-function + function setGasPrice(uint256 _gasPrice) public onlyOwner { + gasPrice = _gasPrice; + emit GasPriceUpdated(_gasPrice); + } + + /** + * Allows the owner to modify the l1 base fee. + * @param _baseFee New l1 base fee + */ + // slither-disable-next-line external-function + function setL1BaseFee(uint256 _baseFee) public onlyOwner { + l1BaseFee = _baseFee; + emit L1BaseFeeUpdated(_baseFee); + } + + /** + * Allows the owner to modify the overhead. + * @param _overhead New overhead + */ + // slither-disable-next-line external-function + function setOverhead(uint256 _overhead) public onlyOwner { + overhead = _overhead; + emit OverheadUpdated(_overhead); + } + + /** + * Allows the owner to modify the scalar. + * @param _scalar New scalar + */ + // slither-disable-next-line external-function + function setScalar(uint256 _scalar) public onlyOwner { + scalar = _scalar; + emit ScalarUpdated(_scalar); + } + + /** + * Allows the owner to modify the decimals. + * @param _decimals New decimals + */ + // slither-disable-next-line external-function + function setDecimals(uint256 _decimals) public onlyOwner { + decimals = _decimals; + emit DecimalsUpdated(_decimals); + } + + /** + * Computes the L1 portion of the fee + * based on the size of the RLP encoded tx + * and the current l1BaseFee + * @param _data Unsigned RLP encoded tx, 6 elements + * @return L1 fee that should be paid for the tx + */ + // slither-disable-next-line external-function + function getL1Fee(bytes memory _data) public view returns (uint256) { + uint256 l1GasUsed = getL1GasUsed(_data); + uint256 l1Fee = l1GasUsed * l1BaseFee; + uint256 divisor = 10**decimals; + uint256 unscaled = l1Fee * scalar; + uint256 scaled = unscaled / divisor; + return scaled; + } + + // solhint-disable max-line-length + /** + * Computes the amount of L1 gas used for a transaction + * The overhead represents the per batch gas overhead of + * posting both transaction and state roots to L1 given larger + * batch sizes. + * 4 gas for 0 byte + * https://github.com/ethereum/go-ethereum/blob/9ada4a2e2c415e6b0b51c50e901336872e028872/params/protocol_params.go#L33 + * 16 gas for non zero byte + * https://github.com/ethereum/go-ethereum/blob/9ada4a2e2c415e6b0b51c50e901336872e028872/params/protocol_params.go#L87 + * This will need to be updated if calldata gas prices change + * Account for the transaction being unsigned + * Padding is added to account for lack of signature on transaction + * 1 byte for RLP V prefix + * 1 byte for V + * 1 byte for RLP R prefix + * 32 bytes for R + * 1 byte for RLP S prefix + * 32 bytes for S + * Total: 68 bytes of padding + * @param _data Unsigned RLP encoded tx, 6 elements + * @return Amount of L1 gas used for a transaction + */ + // solhint-enable max-line-length + function getL1GasUsed(bytes memory _data) public view returns (uint256) { + uint256 total = 0; + for (uint256 i = 0; i < _data.length; i++) { + if (_data[i] == 0) { + total += 4; + } else { + total += 16; + } + } + uint256 unsigned = total + overhead; + return unsigned + (68 * 16); + } +} diff --git a/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol new file mode 100644 index 00000000..b2a6c787 --- /dev/null +++ b/contracts/src/v0.8/vendor/@eth-optimism/contracts/v0.8.9/contracts/L2/predeploys/OVM_GasPriceOracle.sol @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +/* External Imports */ +import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol"; + +/** + * @title OVM_GasPriceOracle + * @dev This contract exposes the current l2 gas price, a measure of how congested the network + * currently is. This measure is used by the Sequencer to determine what fee to charge for + * transactions. When the system is more congested, the l2 gas price will increase and fees + * will also increase as a result. + * + * All public variables are set while generating the initial L2 state. The + * constructor doesn't run in practice as the L2 state generation script uses + * the deployed bytecode instead of running the initcode. + */ +contract OVM_GasPriceOracle is Ownable { + /************* + * Variables * + *************/ + + // Current L2 gas price + uint256 public gasPrice; + // Current L1 base fee + uint256 public l1BaseFee; + // Amortized cost of batch submission per transaction + uint256 public overhead; + // Value to scale the fee up by + uint256 public scalar; + // Number of decimals of the scalar + uint256 public decimals; + + /*************** + * Constructor * + ***************/ + + /** + * @param _owner Address that will initially own this contract. + */ + constructor(address _owner) Ownable() { + transferOwnership(_owner); + } + + /********** + * Events * + **********/ + + event GasPriceUpdated(uint256); + event L1BaseFeeUpdated(uint256); + event OverheadUpdated(uint256); + event ScalarUpdated(uint256); + event DecimalsUpdated(uint256); + + /******************** + * Public Functions * + ********************/ + + /** + * Allows the owner to modify the l2 gas price. + * @param _gasPrice New l2 gas price. + */ + // slither-disable-next-line external-function + function setGasPrice(uint256 _gasPrice) public onlyOwner { + gasPrice = _gasPrice; + emit GasPriceUpdated(_gasPrice); + } + + /** + * Allows the owner to modify the l1 base fee. + * @param _baseFee New l1 base fee + */ + // slither-disable-next-line external-function + function setL1BaseFee(uint256 _baseFee) public onlyOwner { + l1BaseFee = _baseFee; + emit L1BaseFeeUpdated(_baseFee); + } + + /** + * Allows the owner to modify the overhead. + * @param _overhead New overhead + */ + // slither-disable-next-line external-function + function setOverhead(uint256 _overhead) public onlyOwner { + overhead = _overhead; + emit OverheadUpdated(_overhead); + } + + /** + * Allows the owner to modify the scalar. + * @param _scalar New scalar + */ + // slither-disable-next-line external-function + function setScalar(uint256 _scalar) public onlyOwner { + scalar = _scalar; + emit ScalarUpdated(_scalar); + } + + /** + * Allows the owner to modify the decimals. + * @param _decimals New decimals + */ + // slither-disable-next-line external-function + function setDecimals(uint256 _decimals) public onlyOwner { + decimals = _decimals; + emit DecimalsUpdated(_decimals); + } + + /** + * Computes the L1 portion of the fee + * based on the size of the RLP encoded tx + * and the current l1BaseFee + * @param _data Unsigned RLP encoded tx, 6 elements + * @return L1 fee that should be paid for the tx + */ + // slither-disable-next-line external-function + function getL1Fee(bytes memory _data) public view returns (uint256) { + uint256 l1GasUsed = getL1GasUsed(_data); + uint256 l1Fee = l1GasUsed * l1BaseFee; + uint256 divisor = 10 ** decimals; + uint256 unscaled = l1Fee * scalar; + uint256 scaled = unscaled / divisor; + return scaled; + } + + // solhint-disable max-line-length + /** + * Computes the amount of L1 gas used for a transaction + * The overhead represents the per batch gas overhead of + * posting both transaction and state roots to L1 given larger + * batch sizes. + * 4 gas for 0 byte + * https://github.com/ethereum/go-ethereum/blob/9ada4a2e2c415e6b0b51c50e901336872e028872/params/protocol_params.go#L33 + * 16 gas for non zero byte + * https://github.com/ethereum/go-ethereum/blob/9ada4a2e2c415e6b0b51c50e901336872e028872/params/protocol_params.go#L87 + * This will need to be updated if calldata gas prices change + * Account for the transaction being unsigned + * Padding is added to account for lack of signature on transaction + * 1 byte for RLP V prefix + * 1 byte for V + * 1 byte for RLP R prefix + * 32 bytes for R + * 1 byte for RLP S prefix + * 32 bytes for S + * Total: 68 bytes of padding + * @param _data Unsigned RLP encoded tx, 6 elements + * @return Amount of L1 gas used for a transaction + */ + // solhint-enable max-line-length + function getL1GasUsed(bytes memory _data) public view returns (uint256) { + uint256 total = 0; + for (uint256 i = 0; i < _data.length; i++) { + if (_data[i] == 0) { + total += 4; + } else { + total += 16; + } + } + uint256 unsigned = total + overhead; + return unsigned + (68 * 16); + } +} diff --git a/contracts/src/v0.8/vendor/@scroll-tech/contracts/src/L2/predeploys/IScrollL1GasPriceOracle.sol b/contracts/src/v0.8/vendor/@scroll-tech/contracts/src/L2/predeploys/IScrollL1GasPriceOracle.sol new file mode 100644 index 00000000..95b88e98 --- /dev/null +++ b/contracts/src/v0.8/vendor/@scroll-tech/contracts/src/L2/predeploys/IScrollL1GasPriceOracle.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.16; + +interface IScrollL1GasPriceOracle { + /********** + * Events * + **********/ + + /// @notice Emitted when current fee overhead is updated. + /// @param overhead The current fee overhead updated. + event OverheadUpdated(uint256 overhead); + + /// @notice Emitted when current fee scalar is updated. + /// @param scalar The current fee scalar updated. + event ScalarUpdated(uint256 scalar); + + /// @notice Emitted when current l1 base fee is updated. + /// @param l1BaseFee The current l1 base fee updated. + event L1BaseFeeUpdated(uint256 l1BaseFee); + + /************************* + * Public View Functions * + *************************/ + + /// @notice Return the current l1 fee overhead. + function overhead() external view returns (uint256); + + /// @notice Return the current l1 fee scalar. + function scalar() external view returns (uint256); + + /// @notice Return the latest known l1 base fee. + function l1BaseFee() external view returns (uint256); + + /// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input + /// transaction, the current L1 base fee, and the various dynamic parameters. + /// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for. + /// @return L1 fee that should be paid for the tx + function getL1Fee(bytes memory data) external view returns (uint256); + + /// @notice Computes the amount of L1 gas used for a transaction. Adds the overhead which + /// represents the per-transaction gas overhead of posting the transaction and state + /// roots to L1. Adds 74 bytes of padding to account for the fact that the input does + /// not have a signature. + /// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for. + /// @return Amount of L1 gas used to publish the transaction. + function getL1GasUsed(bytes memory data) external view returns (uint256); + + /***************************** + * Public Mutating Functions * + *****************************/ + + /// @notice Allows whitelisted caller to modify the l1 base fee. + /// @param _l1BaseFee New l1 base fee. + function setL1BaseFee(uint256 _l1BaseFee) external; +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/BufferChainlink.sol b/contracts/src/v0.8/vendor/BufferChainlink.sol new file mode 100644 index 00000000..1c13621d --- /dev/null +++ b/contracts/src/v0.8/vendor/BufferChainlink.sol @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @dev A library for working with mutable byte buffers in Solidity. + * + * Byte buffers are mutable and expandable, and provide a variety of primitives + * for writing to them. At any time you can fetch a bytes object containing the + * current contents of the buffer. The bytes object should not be stored between + * operations, as it may change due to resizing of the buffer. + */ +library BufferPlugin { + /** + * @dev Represents a mutable buffer. Buffers have a current value (buf) and + * a capacity. The capacity may be longer than the current value, in + * which case it can be extended without the need to allocate more memory. + */ + struct buffer { + bytes buf; + uint256 capacity; + } + + /** + * @dev Initializes a buffer with an initial capacity. + * @param buf The buffer to initialize. + * @param capacity The number of bytes of space to allocate the buffer. + * @return The buffer, for chaining. + */ + function init(buffer memory buf, uint256 capacity) internal pure returns (buffer memory) { + if (capacity % 32 != 0) { + capacity += 32 - (capacity % 32); + } + // Allocate space for the buffer data + buf.capacity = capacity; + assembly { + let ptr := mload(0x40) + mstore(buf, ptr) + mstore(ptr, 0) + mstore(0x40, add(32, add(ptr, capacity))) + } + return buf; + } + + /** + * @dev Initializes a new buffer from an existing bytes object. + * Changes to the buffer may mutate the original value. + * @param b The bytes object to initialize the buffer with. + * @return A new buffer. + */ + function fromBytes(bytes memory b) internal pure returns (buffer memory) { + buffer memory buf; + buf.buf = b; + buf.capacity = b.length; + return buf; + } + + function resize(buffer memory buf, uint256 capacity) private pure { + bytes memory oldbuf = buf.buf; + init(buf, capacity); + append(buf, oldbuf); + } + + function max(uint256 a, uint256 b) private pure returns (uint256) { + if (a > b) { + return a; + } + return b; + } + + /** + * @dev Sets buffer length to 0. + * @param buf The buffer to truncate. + * @return The original buffer, for chaining.. + */ + function truncate(buffer memory buf) internal pure returns (buffer memory) { + assembly { + let bufptr := mload(buf) + mstore(bufptr, 0) + } + return buf; + } + + /** + * @dev Writes a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The start offset to write to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function write( + buffer memory buf, + uint256 off, + bytes memory data, + uint256 len + ) internal pure returns (buffer memory) { + require(len <= data.length); + + if (off + len > buf.capacity) { + resize(buf, max(buf.capacity, len + off) * 2); + } + + uint256 dest; + uint256 src; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Start address = buffer address + offset + sizeof(buffer length) + dest := add(add(bufptr, 32), off) + // Update buffer length if we're extending it + if gt(add(len, off), buflen) { + mstore(bufptr, add(len, off)) + } + src := add(data, 32) + } + + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + unchecked { + uint256 mask = (256**(32 - len)) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + } + + return buf; + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @param len The number of bytes to copy. + * @return The original buffer, for chaining. + */ + function append( + buffer memory buf, + bytes memory data, + uint256 len + ) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, len); + } + + /** + * @dev Appends a byte string to a buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, data.length); + } + + /** + * @dev Writes a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write the byte at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeUint8( + buffer memory buf, + uint256 off, + uint8 data + ) internal pure returns (buffer memory) { + if (off >= buf.capacity) { + resize(buf, buf.capacity * 2); + } + + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Length of existing buffer data + let buflen := mload(bufptr) + // Address = buffer address + sizeof(buffer length) + off + let dest := add(add(bufptr, off), 32) + mstore8(dest, data) + // Update buffer length if we extended it + if eq(off, buflen) { + mstore(bufptr, add(buflen, 1)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendUint8(buffer memory buf, uint8 data) internal pure returns (buffer memory) { + return writeUint8(buf, buf.buf.length, data); + } + + /** + * @dev Writes up to 32 bytes to the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (left-aligned). + * @return The original buffer, for chaining. + */ + function write( + buffer memory buf, + uint256 off, + bytes32 data, + uint256 len + ) private pure returns (buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + unchecked { + uint256 mask = (256**len) - 1; + // Right-align data + data = data >> (8 * (32 - len)); + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + sizeof(buffer length) + off + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + } + return buf; + } + + /** + * @dev Writes a bytes20 to the buffer. Resizes if doing so would exceed the + * capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function writeBytes20( + buffer memory buf, + uint256 off, + bytes20 data + ) internal pure returns (buffer memory) { + return write(buf, off, bytes32(data), 20); + } + + /** + * @dev Appends a bytes20 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chhaining. + */ + function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, bytes32(data), 20); + } + + /** + * @dev Appends a bytes32 to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer, for chaining. + */ + function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + return write(buf, buf.buf.length, data, 32); + } + + /** + * @dev Writes an integer to the buffer. Resizes if doing so would exceed + * the capacity of the buffer. + * @param buf The buffer to append to. + * @param off The offset to write at. + * @param data The data to append. + * @param len The number of bytes to write (right-aligned). + * @return The original buffer, for chaining. + */ + function writeInt( + buffer memory buf, + uint256 off, + uint256 data, + uint256 len + ) private pure returns (buffer memory) { + if (len + off > buf.capacity) { + resize(buf, (len + off) * 2); + } + + uint256 mask = (256**len) - 1; + assembly { + // Memory address of the buffer data + let bufptr := mload(buf) + // Address = buffer address + off + sizeof(buffer length) + len + let dest := add(add(bufptr, off), len) + mstore(dest, or(and(mload(dest), not(mask)), data)) + // Update buffer length if we extended it + if gt(add(off, len), mload(bufptr)) { + mstore(bufptr, add(off, len)) + } + } + return buf; + } + + /** + * @dev Appends a byte to the end of the buffer. Resizes if doing so would + * exceed the capacity of the buffer. + * @param buf The buffer to append to. + * @param data The data to append. + * @return The original buffer. + */ + function appendInt( + buffer memory buf, + uint256 data, + uint256 len + ) internal pure returns (buffer memory) { + return writeInt(buf, buf.buf.length, data, len); + } +} diff --git a/contracts/src/v0.8/vendor/CBORChainlink.sol b/contracts/src/v0.8/vendor/CBORChainlink.sol new file mode 100644 index 00000000..23684e8c --- /dev/null +++ b/contracts/src/v0.8/vendor/CBORChainlink.sol @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +pragma solidity >=0.4.19; + +import {BufferPlugin} from "./BufferPlugin.sol"; + +library CBORPlugin { + using BufferPlugin for BufferPlugin.buffer; + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + function encodeFixedNumeric(BufferPlugin.buffer memory buf, uint8 major, uint64 value) private pure { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if (value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if (value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if (value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); + } + } + + function encodeIndefiniteLengthType(BufferPlugin.buffer memory buf, uint8 major) private pure { + buf.appendUint8(uint8((major << 5) | 31)); + } + + function encodeUInt(BufferPlugin.buffer memory buf, uint value) internal pure { + if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } + } + + function encodeInt(BufferPlugin.buffer memory buf, int value) internal pure { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, uint(value)); + } else if(value >= 0) { + encodeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(uint256(value))); + } else { + encodeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(uint256(-1 - value))); + } + } + + function encodeBytes(BufferPlugin.buffer memory buf, bytes memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.append(value); + } + + function encodeBigNum(BufferPlugin.buffer memory buf, uint value) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(value)); + } + + function encodeSignedBigNum(BufferPlugin.buffer memory buf, int input) internal pure { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint256(-1 - input))); + } + + function encodeString(BufferPlugin.buffer memory buf, string memory value) internal pure { + encodeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.append(bytes(value)); + } + + function startArray(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } + + function startMap(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } + + function endSequence(BufferPlugin.buffer memory buf) internal pure { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } +} diff --git a/contracts/src/v0.8/vendor/DateTime.sol b/contracts/src/v0.8/vendor/DateTime.sol new file mode 100644 index 00000000..d375ca4b --- /dev/null +++ b/contracts/src/v0.8/vendor/DateTime.sol @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MIT + +// sourced from https://github.com/pipermerriam/ethereum-datetime + +pragma solidity ^0.8.0; + +library DateTime { + /* + * Date and Time utilities for ethereum contracts + * + */ + struct _DateTime { + uint16 year; + uint8 month; + uint8 day; + uint8 hour; + uint8 minute; + uint8 second; + uint8 weekday; + } + + uint256 constant DAY_IN_SECONDS = 86400; + uint256 constant YEAR_IN_SECONDS = 31536000; + uint256 constant LEAP_YEAR_IN_SECONDS = 31622400; + + uint256 constant HOUR_IN_SECONDS = 3600; + uint256 constant MINUTE_IN_SECONDS = 60; + + uint16 constant ORIGIN_YEAR = 1970; + + function isLeapYear(uint16 year) internal pure returns (bool) { + if (year % 4 != 0) { + return false; + } + if (year % 100 != 0) { + return true; + } + if (year % 400 != 0) { + return false; + } + return true; + } + + function leapYearsBefore(uint256 year) internal pure returns (uint256) { + year -= 1; + return year / 4 - year / 100 + year / 400; + } + + function getDaysInMonth(uint8 month, uint16 year) + internal + pure + returns (uint8) + { + if ( + month == 1 || + month == 3 || + month == 5 || + month == 7 || + month == 8 || + month == 10 || + month == 12 + ) { + return 31; + } else if (month == 4 || month == 6 || month == 9 || month == 11) { + return 30; + } else if (isLeapYear(year)) { + return 29; + } else { + return 28; + } + } + + function parseTimestamp(uint256 timestamp) + internal + pure + returns (_DateTime memory dt) + { + uint256 secondsAccountedFor = 0; + uint256 buf; + uint8 i; + + // Year + dt.year = getYear(timestamp); + buf = leapYearsBefore(dt.year) - leapYearsBefore(ORIGIN_YEAR); + + secondsAccountedFor += LEAP_YEAR_IN_SECONDS * buf; + secondsAccountedFor += YEAR_IN_SECONDS * (dt.year - ORIGIN_YEAR - buf); + + // Month + uint256 secondsInMonth; + for (i = 1; i <= 12; i++) { + secondsInMonth = DAY_IN_SECONDS * getDaysInMonth(i, dt.year); + if (secondsInMonth + secondsAccountedFor > timestamp) { + dt.month = i; + break; + } + secondsAccountedFor += secondsInMonth; + } + + // Day + for (i = 1; i <= getDaysInMonth(dt.month, dt.year); i++) { + if (DAY_IN_SECONDS + secondsAccountedFor > timestamp) { + dt.day = i; + break; + } + secondsAccountedFor += DAY_IN_SECONDS; + } + + // Hour + dt.hour = getHour(timestamp); + + // Minute + dt.minute = getMinute(timestamp); + + // Second + dt.second = getSecond(timestamp); + + // Day of week. + dt.weekday = getWeekday(timestamp); + } + + function getYear(uint256 timestamp) internal pure returns (uint16) { + uint256 secondsAccountedFor = 0; + uint16 year; + uint256 numLeapYears; + + // Year + year = uint16(ORIGIN_YEAR + timestamp / YEAR_IN_SECONDS); + numLeapYears = leapYearsBefore(year) - leapYearsBefore(ORIGIN_YEAR); + + secondsAccountedFor += LEAP_YEAR_IN_SECONDS * numLeapYears; + secondsAccountedFor += + YEAR_IN_SECONDS * + (year - ORIGIN_YEAR - numLeapYears); + + while (secondsAccountedFor > timestamp) { + if (isLeapYear(uint16(year - 1))) { + secondsAccountedFor -= LEAP_YEAR_IN_SECONDS; + } else { + secondsAccountedFor -= YEAR_IN_SECONDS; + } + year -= 1; + } + return year; + } + + function getMonth(uint256 timestamp) internal pure returns (uint8) { + return parseTimestamp(timestamp).month; + } + + function getDay(uint256 timestamp) internal pure returns (uint8) { + return parseTimestamp(timestamp).day; + } + + function getHour(uint256 timestamp) internal pure returns (uint8) { + return uint8((timestamp / 60 / 60) % 24); + } + + function getMinute(uint256 timestamp) internal pure returns (uint8) { + return uint8((timestamp / 60) % 60); + } + + function getSecond(uint256 timestamp) internal pure returns (uint8) { + return uint8(timestamp % 60); + } + + function getWeekday(uint256 timestamp) internal pure returns (uint8) { + return uint8((timestamp / DAY_IN_SECONDS + 4) % 7); + } + + function toTimestamp( + uint16 year, + uint8 month, + uint8 day + ) internal pure returns (uint256 timestamp) { + return toTimestamp(year, month, day, 0, 0, 0); + } + + function toTimestamp( + uint16 year, + uint8 month, + uint8 day, + uint8 hour + ) internal pure returns (uint256 timestamp) { + return toTimestamp(year, month, day, hour, 0, 0); + } + + function toTimestamp( + uint16 year, + uint8 month, + uint8 day, + uint8 hour, + uint8 minute + ) internal pure returns (uint256 timestamp) { + return toTimestamp(year, month, day, hour, minute, 0); + } + + function toTimestamp( + uint16 year, + uint8 month, + uint8 day, + uint8 hour, + uint8 minute, + uint8 second + ) internal pure returns (uint256 timestamp) { + uint16 i; + + // Year + for (i = ORIGIN_YEAR; i < year; i++) { + if (isLeapYear(i)) { + timestamp += LEAP_YEAR_IN_SECONDS; + } else { + timestamp += YEAR_IN_SECONDS; + } + } + + // Month + uint8[12] memory monthDayCounts; + monthDayCounts[0] = 31; + if (isLeapYear(year)) { + monthDayCounts[1] = 29; + } else { + monthDayCounts[1] = 28; + } + monthDayCounts[2] = 31; + monthDayCounts[3] = 30; + monthDayCounts[4] = 31; + monthDayCounts[5] = 30; + monthDayCounts[6] = 31; + monthDayCounts[7] = 31; + monthDayCounts[8] = 30; + monthDayCounts[9] = 31; + monthDayCounts[10] = 30; + monthDayCounts[11] = 31; + + for (i = 1; i < month; i++) { + timestamp += DAY_IN_SECONDS * monthDayCounts[i - 1]; + } + + // Day + timestamp += DAY_IN_SECONDS * (day - 1); + + // Hour + timestamp += HOUR_IN_SECONDS * (hour); + + // Minute + timestamp += MINUTE_IN_SECONDS * (minute); + + // Second + timestamp += second; + + return timestamp; + } +} diff --git a/contracts/src/v0.8/vendor/ENSResolver.sol b/contracts/src/v0.8/vendor/ENSResolver.sol new file mode 100644 index 00000000..eb92cedf --- /dev/null +++ b/contracts/src/v0.8/vendor/ENSResolver.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +abstract contract ENSResolver { + function addr(bytes32 node) public view virtual returns (address); +} diff --git a/contracts/src/v0.8/vendor/IERC165.sol b/contracts/src/v0.8/vendor/IERC165.sol new file mode 100644 index 00000000..9af4bf80 --- /dev/null +++ b/contracts/src/v0.8/vendor/IERC165.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity >=0.6.2; + +interface IERC165 { + /// @notice Query if a contract implements an interface + /// @param interfaceID The interface identifier, as specified in ERC-165 + /// @dev Interface identification is specified in ERC-165. This function + /// uses less than 30,000 gas. + /// @return `true` if the contract implements `interfaceID` and + /// `interfaceID` is not 0xffffffff, `false` otherwise + function supportsInterface(bytes4 interfaceID) external view returns (bool); +} diff --git a/contracts/src/v0.8/vendor/MockOVMCrossDomainMessenger.sol b/contracts/src/v0.8/vendor/MockOVMCrossDomainMessenger.sol new file mode 100644 index 00000000..ae8b6af1 --- /dev/null +++ b/contracts/src/v0.8/vendor/MockOVMCrossDomainMessenger.sol @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: MIT + +pragma solidity >=0.7.6 <0.9.0; + +import "./openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; + +/** + * @title iOVM_CrossDomainMessenger + */ +interface iOVM_CrossDomainMessenger { + /********** + * Events * + **********/ + + event SentMessage(bytes message); + event RelayedMessage(bytes32 msgHash); + event FailedRelayedMessage(bytes32 msgHash); + + /************* + * Variables * + *************/ + + function xDomainMessageSender() external view returns (address); + + /******************** + * Public Functions * + ********************/ + + /** + * Sends a cross domain message to the target messenger. + * @param _target Target contract address. + * @param _message Message to send to the target. + * @param _gasLimit Gas limit for the provided message. + */ + function sendMessage( + address _target, + bytes calldata _message, + uint32 _gasLimit + ) external; +} + +contract MockOVMCrossDomainMessenger is iOVM_CrossDomainMessenger{ + address internal mockMessageSender; + + constructor(address sender) { + mockMessageSender = sender; + } + + function xDomainMessageSender() external view override returns (address) { + return mockMessageSender; + } + + function _setMockMessageSender(address sender) external { + mockMessageSender = sender; + } + + /******************** + * Public Functions * + ********************/ + + /** + * Sends a cross domain message to the target messenger. + * @param _target Target contract address. + * @param _message Message to send to the target. + * @param _gasLimit Gas limit for the provided message. + */ + function sendMessage( + address _target, + bytes calldata _message, + uint32 _gasLimit + ) external override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } +} diff --git a/contracts/src/v0.8/vendor/MockScrollCrossDomainMessenger.sol b/contracts/src/v0.8/vendor/MockScrollCrossDomainMessenger.sol new file mode 100644 index 00000000..bb5390b9 --- /dev/null +++ b/contracts/src/v0.8/vendor/MockScrollCrossDomainMessenger.sol @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.16; + +import "./openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol"; + +/// sourced from: https://github.com/scroll-tech/scroll/blob/develop/contracts/src/libraries/IScrollMessenger.sol +interface IScrollMessenger { + /// ********** + /// * Events * + /// ********** + + /// @notice Emitted when a cross domain message is sent. + /// @param sender The address of the sender who initiates the message. + /// @param target The address of target contract to call. + /// @param value The amount of value passed to the target contract. + /// @param messageNonce The nonce of the message. + /// @param gasLimit The optional gas limit passed to L1 or L2. + /// @param message The calldata passed to the target contract. + event SentMessage( + address indexed sender, + address indexed target, + uint256 value, + uint256 messageNonce, + uint256 gasLimit, + bytes message + ); + + /// @notice Emitted when a cross domain message is relayed successfully. + /// @param messageHash The hash of the message. + event RelayedMessage(bytes32 indexed messageHash); + + /// @notice Emitted when a cross domain message is failed to relay. + /// @param messageHash The hash of the message. + event FailedRelayedMessage(bytes32 indexed messageHash); + + /// ************************* + /// * Public View Functions * + /// ************************* + + /// @notice Return the sender of a cross domain message. + function xDomainMessageSender() external view returns (address); + + /// ***************************** + /// * Public Mutating Functions * + /// ***************************** + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param target The address of account who receive the message. + /// @param value The amount of ether passed when call target contract. + /// @param message The content of the message. + /// @param gasLimit Gas limit required to complete the message relay on corresponding chain. + function sendMessage(address target, uint256 value, bytes calldata message, uint256 gasLimit) external payable; + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param target The address of account who receive the message. + /// @param value The amount of ether passed when call target contract. + /// @param message The content of the message. + /// @param gasLimit Gas limit required to complete the message relay on corresponding chain. + /// @param refundAddress The address of account who will receive the refunded fee. + function sendMessage( + address target, + uint256 value, + bytes calldata message, + uint256 gasLimit, + address refundAddress + ) external payable; +} + +contract MockScrollCrossDomainMessenger is IScrollMessenger { + address internal mockMessageSender; + + constructor(address sender) { + mockMessageSender = sender; + } + + function xDomainMessageSender() external view override returns (address) { + return mockMessageSender; + } + + function _setMockMessageSender(address sender) external { + mockMessageSender = sender; + } + + /// ***************************** + /// * Public Mutating Functions * + /// ***************************** + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param _target The address of account who receive the message. + /// @param _message The content of the message. + function sendMessage(address _target, uint256, bytes calldata _message, uint256) external payable override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } + + /// @notice Send cross chain message from L1 to L2 or L2 to L1. + /// @param _target The address of account who receive the message. + /// @param _message The content of the message. + function sendMessage(address _target, uint256, bytes calldata _message, uint256, address) external payable override { + Address.functionCall(_target, _message, "sendMessage reverted"); + } +} diff --git a/contracts/src/v0.8/vendor/MultiSend.sol b/contracts/src/v0.8/vendor/MultiSend.sol new file mode 100644 index 00000000..15a7d2f5 --- /dev/null +++ b/contracts/src/v0.8/vendor/MultiSend.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: LGPL-3.0-only +pragma solidity >=0.7.0 <0.9.0; + +/// @title Multi Send - Allows to batch multiple transactions into one. +/// @author Nick Dodson - +/// @author Gonçalo Sá - +/// @author Stefan George - +/// @author Richard Meissner - +contract MultiSend { + address private immutable multisendSingleton; + + constructor() { + multisendSingleton = address(this); + } + + /// @dev Sends multiple transactions and reverts all if one fails. + /// @param transactions Encoded transactions. Each transaction is encoded as a packed bytes of + /// operation as a uint8 with 0 for a call or 1 for a delegatecall (=> 1 byte), + /// to as a address (=> 20 bytes), + /// value as a uint256 (=> 32 bytes), + /// data length as a uint256 (=> 32 bytes), + /// data as bytes. + /// see abi.encodePacked for more information on packed encoding + /// @notice This method is payable as delegatecalls keep the msg.value from the previous call + /// If the calling method (e.g. execTransaction) received ETH this would revert otherwise + function multiSend(bytes memory transactions) public payable { + require(address(this) != multisendSingleton, "MultiSend should only be called via delegatecall"); + // solhint-disable-next-line no-inline-assembly + assembly { + let length := mload(transactions) + let i := 0x20 + for { + // Pre block is not used in "while mode" + } lt(i, length) { + // Post block is not used in "while mode" + } { + // First byte of the data is the operation. + // We shift by 248 bits (256 - 8 [operation byte]) it right since mload will always load 32 bytes (a word). + // This will also zero out unused data. + let operation := shr(0xf8, mload(add(transactions, i))) + // We offset the load address by 1 byte (operation byte) + // We shift it right by 96 bits (256 - 160 [20 address bytes]) to right-align the data and zero out unused data. + let to := shr(0x60, mload(add(transactions, add(i, 0x01)))) + // We offset the load address by 21 byte (operation byte + 20 address bytes) + let value := mload(add(transactions, add(i, 0x15))) + // We offset the load address by 53 byte (operation byte + 20 address bytes + 32 value bytes) + let dataLength := mload(add(transactions, add(i, 0x35))) + // We offset the load address by 85 byte (operation byte + 20 address bytes + 32 value bytes + 32 data length bytes) + let data := add(transactions, add(i, 0x55)) + let success := 0 + switch operation + case 0 { + success := call(gas(), to, value, data, dataLength, 0, 0) + } + case 1 { + success := delegatecall(gas(), to, data, dataLength, 0, 0) + } + if eq(success, 0) { + revert(0, 0) + } + // Next entry starts at 85 byte + data length + i := add(i, add(0x55, dataLength)) + } + } + } +} diff --git a/contracts/src/v0.8/vendor/Strings.sol b/contracts/src/v0.8/vendor/Strings.sol new file mode 100644 index 00000000..4a17bd99 --- /dev/null +++ b/contracts/src/v0.8/vendor/Strings.sol @@ -0,0 +1,842 @@ +// SPDX-License-Identifier: Apache 2.0 + +/* + * @title String & slice utility library for Solidity contracts. + * @author Nick Johnson + * + * @dev Functionality in this library is largely implemented using an + * abstraction called a 'slice'. A slice represents a part of a string - + * anything from the entire string to a single character, or even no + * characters at all (a 0-length slice). Since a slice only has to specify + * an offset and a length, copying and manipulating slices is a lot less + * expensive than copying and manipulating the strings they reference. + * + * To further reduce gas costs, most functions on slice that need to return + * a slice modify the original one instead of allocating a new one; for + * instance, `s.split(".")` will return the text up to the first '.', + * modifying s to only contain the remainder of the string after the '.'. + * In situations where you do not want to modify the original slice, you + * can make a copy first with `.copy()`, for example: + * `s.copy().split(".")`. Try and avoid using this idiom in loops; since + * Solidity has no memory management, it will result in allocating many + * short-lived slices that are later discarded. + * + * Functions that return two slices come in two versions: a non-allocating + * version that takes the second slice as an argument, modifying it in + * place, and an allocating version that allocates and returns the second + * slice; see `nextRune` for example. + * + * Functions that have to copy string data will return strings rather than + * slices; these can be cast back to slices for further processing if + * required. + * + * For convenience, some functions are provided with non-modifying + * variants that create a new slice and return both; for instance, + * `s.splitNew('.')` leaves s unmodified, and returns two values + * corresponding to the left and right parts of the string. + */ + +pragma solidity ^0.8.0; + +library strings { + struct slice { + uint256 _len; + uint256 _ptr; + } + + function memcpy( + uint256 dest, + uint256 src, + uint256 len + ) private pure { + // Copy word-length chunks while possible + for (; len >= 32; len -= 32) { + assembly { + mstore(dest, mload(src)) + } + dest += 32; + src += 32; + } + + // Copy remaining bytes + uint256 mask = type(uint256).max; + if (len > 0) { + mask = 256**(32 - len) - 1; + } + assembly { + let srcpart := and(mload(src), not(mask)) + let destpart := and(mload(dest), mask) + mstore(dest, or(destpart, srcpart)) + } + } + + /* + * @dev Returns a slice containing the entire string. + * @param self The string to make a slice from. + * @return A newly allocated slice containing the entire string. + */ + function toSlice(string memory self) internal pure returns (slice memory) { + uint256 ptr; + assembly { + ptr := add(self, 0x20) + } + return slice(bytes(self).length, ptr); + } + + /* + * @dev Returns the length of a null-terminated bytes32 string. + * @param self The value to find the length of. + * @return The length of the string, from 0 to 32. + */ + function len(bytes32 self) internal pure returns (uint256) { + uint256 ret; + if (self == 0) return 0; + if (uint256(self) & type(uint128).max == 0) { + ret += 16; + self = bytes32(uint256(self) / 0x100000000000000000000000000000000); + } + if (uint256(self) & type(uint64).max == 0) { + ret += 8; + self = bytes32(uint256(self) / 0x10000000000000000); + } + if (uint256(self) & type(uint32).max == 0) { + ret += 4; + self = bytes32(uint256(self) / 0x100000000); + } + if (uint256(self) & type(uint16).max == 0) { + ret += 2; + self = bytes32(uint256(self) / 0x10000); + } + if (uint256(self) & type(uint8).max == 0) { + ret += 1; + } + return 32 - ret; + } + + /* + * @dev Returns a slice containing the entire bytes32, interpreted as a + * null-terminated utf-8 string. + * @param self The bytes32 value to convert to a slice. + * @return A new slice containing the value of the input argument up to the + * first null. + */ + function toSliceB32(bytes32 self) internal pure returns (slice memory ret) { + // Allocate space for `self` in memory, copy it there, and point ret at it + assembly { + let ptr := mload(0x40) + mstore(0x40, add(ptr, 0x20)) + mstore(ptr, self) + mstore(add(ret, 0x20), ptr) + } + ret._len = len(self); + } + + /* + * @dev Returns a new slice containing the same data as the current slice. + * @param self The slice to copy. + * @return A new slice containing the same data as `self`. + */ + function copy(slice memory self) internal pure returns (slice memory) { + return slice(self._len, self._ptr); + } + + /* + * @dev Copies a slice to a new string. + * @param self The slice to copy. + * @return A newly allocated string containing the slice's text. + */ + function toString(slice memory self) internal pure returns (string memory) { + string memory ret = new string(self._len); + uint256 retptr; + assembly { + retptr := add(ret, 32) + } + + memcpy(retptr, self._ptr, self._len); + return ret; + } + + /* + * @dev Returns the length in runes of the slice. Note that this operation + * takes time proportional to the length of the slice; avoid using it + * in loops, and call `slice.empty()` if you only need to know whether + * the slice is empty or not. + * @param self The slice to operate on. + * @return The length of the slice in runes. + */ + function len(slice memory self) internal pure returns (uint256 l) { + // Starting at ptr-31 means the LSB will be the byte we care about + uint256 ptr = self._ptr - 31; + uint256 end = ptr + self._len; + for (l = 0; ptr < end; l++) { + uint8 b; + assembly { + b := and(mload(ptr), 0xFF) + } + if (b < 0x80) { + ptr += 1; + } else if (b < 0xE0) { + ptr += 2; + } else if (b < 0xF0) { + ptr += 3; + } else if (b < 0xF8) { + ptr += 4; + } else if (b < 0xFC) { + ptr += 5; + } else { + ptr += 6; + } + } + } + + /* + * @dev Returns true if the slice is empty (has a length of 0). + * @param self The slice to operate on. + * @return True if the slice is empty, False otherwise. + */ + function empty(slice memory self) internal pure returns (bool) { + return self._len == 0; + } + + /* + * @dev Returns a positive number if `other` comes lexicographically after + * `self`, a negative number if it comes before, or zero if the + * contents of the two slices are equal. Comparison is done per-rune, + * on unicode codepoints. + * @param self The first slice to compare. + * @param other The second slice to compare. + * @return The result of the comparison. + */ + function compare(slice memory self, slice memory other) + internal + pure + returns (int256) + { + uint256 shortest = self._len; + if (other._len < self._len) shortest = other._len; + + uint256 selfptr = self._ptr; + uint256 otherptr = other._ptr; + for (uint256 idx = 0; idx < shortest; idx += 32) { + uint256 a; + uint256 b; + assembly { + a := mload(selfptr) + b := mload(otherptr) + } + if (a != b) { + // Mask out irrelevant bytes and check again + uint256 mask = type(uint256).max; // 0xffff... + if (shortest < 32) { + mask = ~(2**(8 * (32 - shortest + idx)) - 1); + } + unchecked { + uint256 diff = (a & mask) - (b & mask); + if (diff != 0) return int256(diff); + } + } + selfptr += 32; + otherptr += 32; + } + return int256(self._len) - int256(other._len); + } + + /* + * @dev Returns true if the two slices contain the same text. + * @param self The first slice to compare. + * @param self The second slice to compare. + * @return True if the slices are equal, false otherwise. + */ + function equals(slice memory self, slice memory other) + internal + pure + returns (bool) + { + return compare(self, other) == 0; + } + + /* + * @dev Extracts the first rune in the slice into `rune`, advancing the + * slice to point to the next rune and returning `self`. + * @param self The slice to operate on. + * @param rune The slice that will contain the first rune. + * @return `rune`. + */ + function nextRune(slice memory self, slice memory rune) + internal + pure + returns (slice memory) + { + rune._ptr = self._ptr; + + if (self._len == 0) { + rune._len = 0; + return rune; + } + + uint256 l; + uint256 b; + // Load the first byte of the rune into the LSBs of b + assembly { + b := and(mload(sub(mload(add(self, 32)), 31)), 0xFF) + } + if (b < 0x80) { + l = 1; + } else if (b < 0xE0) { + l = 2; + } else if (b < 0xF0) { + l = 3; + } else { + l = 4; + } + + // Check for truncated codepoints + if (l > self._len) { + rune._len = self._len; + self._ptr += self._len; + self._len = 0; + return rune; + } + + self._ptr += l; + self._len -= l; + rune._len = l; + return rune; + } + + /* + * @dev Returns the first rune in the slice, advancing the slice to point + * to the next rune. + * @param self The slice to operate on. + * @return A slice containing only the first rune from `self`. + */ + function nextRune(slice memory self) + internal + pure + returns (slice memory ret) + { + nextRune(self, ret); + } + + /* + * @dev Returns the number of the first codepoint in the slice. + * @param self The slice to operate on. + * @return The number of the first codepoint in the slice. + */ + function ord(slice memory self) internal pure returns (uint256 ret) { + if (self._len == 0) { + return 0; + } + + uint256 word; + uint256 length; + uint256 divisor = 2**248; + + // Load the rune into the MSBs of b + assembly { + word := mload(mload(add(self, 32))) + } + uint256 b = word / divisor; + if (b < 0x80) { + ret = b; + length = 1; + } else if (b < 0xE0) { + ret = b & 0x1F; + length = 2; + } else if (b < 0xF0) { + ret = b & 0x0F; + length = 3; + } else { + ret = b & 0x07; + length = 4; + } + + // Check for truncated codepoints + if (length > self._len) { + return 0; + } + + for (uint256 i = 1; i < length; i++) { + divisor = divisor / 256; + b = (word / divisor) & 0xFF; + if (b & 0xC0 != 0x80) { + // Invalid UTF-8 sequence + return 0; + } + ret = (ret * 64) | (b & 0x3F); + } + + return ret; + } + + /* + * @dev Returns the keccak-256 hash of the slice. + * @param self The slice to hash. + * @return The hash of the slice. + */ + function keccak(slice memory self) internal pure returns (bytes32 ret) { + assembly { + ret := keccak256(mload(add(self, 32)), mload(self)) + } + } + + /* + * @dev Returns true if `self` starts with `needle`. + * @param self The slice to operate on. + * @param needle The slice to search for. + * @return True if the slice starts with the provided text, false otherwise. + */ + function startsWith(slice memory self, slice memory needle) + internal + pure + returns (bool) + { + if (self._len < needle._len) { + return false; + } + + if (self._ptr == needle._ptr) { + return true; + } + + bool equal; + assembly { + let length := mload(needle) + let selfptr := mload(add(self, 0x20)) + let needleptr := mload(add(needle, 0x20)) + equal := eq(keccak256(selfptr, length), keccak256(needleptr, length)) + } + return equal; + } + + /* + * @dev If `self` starts with `needle`, `needle` is removed from the + * beginning of `self`. Otherwise, `self` is unmodified. + * @param self The slice to operate on. + * @param needle The slice to search for. + * @return `self` + */ + function beyond(slice memory self, slice memory needle) + internal + pure + returns (slice memory) + { + if (self._len < needle._len) { + return self; + } + + bool equal = true; + if (self._ptr != needle._ptr) { + assembly { + let length := mload(needle) + let selfptr := mload(add(self, 0x20)) + let needleptr := mload(add(needle, 0x20)) + equal := eq(keccak256(selfptr, length), keccak256(needleptr, length)) + } + } + + if (equal) { + self._len -= needle._len; + self._ptr += needle._len; + } + + return self; + } + + /* + * @dev Returns true if the slice ends with `needle`. + * @param self The slice to operate on. + * @param needle The slice to search for. + * @return True if the slice starts with the provided text, false otherwise. + */ + function endsWith(slice memory self, slice memory needle) + internal + pure + returns (bool) + { + if (self._len < needle._len) { + return false; + } + + uint256 selfptr = self._ptr + self._len - needle._len; + + if (selfptr == needle._ptr) { + return true; + } + + bool equal; + assembly { + let length := mload(needle) + let needleptr := mload(add(needle, 0x20)) + equal := eq(keccak256(selfptr, length), keccak256(needleptr, length)) + } + + return equal; + } + + /* + * @dev If `self` ends with `needle`, `needle` is removed from the + * end of `self`. Otherwise, `self` is unmodified. + * @param self The slice to operate on. + * @param needle The slice to search for. + * @return `self` + */ + function until(slice memory self, slice memory needle) + internal + pure + returns (slice memory) + { + if (self._len < needle._len) { + return self; + } + + uint256 selfptr = self._ptr + self._len - needle._len; + bool equal = true; + if (selfptr != needle._ptr) { + assembly { + let length := mload(needle) + let needleptr := mload(add(needle, 0x20)) + equal := eq(keccak256(selfptr, length), keccak256(needleptr, length)) + } + } + + if (equal) { + self._len -= needle._len; + } + + return self; + } + + // Returns the memory address of the first byte of the first occurrence of + // `needle` in `self`, or the first byte after `self` if not found. + function findPtr( + uint256 selflen, + uint256 selfptr, + uint256 needlelen, + uint256 needleptr + ) private pure returns (uint256) { + uint256 ptr = selfptr; + uint256 idx; + + if (needlelen <= selflen) { + if (needlelen <= 32) { + bytes32 mask; + if (needlelen > 0) { + mask = bytes32(~(2**(8 * (32 - needlelen)) - 1)); + } + + bytes32 needledata; + assembly { + needledata := and(mload(needleptr), mask) + } + + uint256 end = selfptr + selflen - needlelen; + bytes32 ptrdata; + assembly { + ptrdata := and(mload(ptr), mask) + } + + while (ptrdata != needledata) { + if (ptr >= end) return selfptr + selflen; + ptr++; + assembly { + ptrdata := and(mload(ptr), mask) + } + } + return ptr; + } else { + // For long needles, use hashing + bytes32 hash; + assembly { + hash := keccak256(needleptr, needlelen) + } + + for (idx = 0; idx <= selflen - needlelen; idx++) { + bytes32 testHash; + assembly { + testHash := keccak256(ptr, needlelen) + } + if (hash == testHash) return ptr; + ptr += 1; + } + } + } + return selfptr + selflen; + } + + // Returns the memory address of the first byte after the last occurrence of + // `needle` in `self`, or the address of `self` if not found. + function rfindPtr( + uint256 selflen, + uint256 selfptr, + uint256 needlelen, + uint256 needleptr + ) private pure returns (uint256) { + uint256 ptr; + + if (needlelen <= selflen) { + if (needlelen <= 32) { + bytes32 mask; + if (needlelen > 0) { + mask = bytes32(~(2**(8 * (32 - needlelen)) - 1)); + } + + bytes32 needledata; + assembly { + needledata := and(mload(needleptr), mask) + } + + ptr = selfptr + selflen - needlelen; + bytes32 ptrdata; + assembly { + ptrdata := and(mload(ptr), mask) + } + + while (ptrdata != needledata) { + if (ptr <= selfptr) return selfptr; + ptr--; + assembly { + ptrdata := and(mload(ptr), mask) + } + } + return ptr + needlelen; + } else { + // For long needles, use hashing + bytes32 hash; + assembly { + hash := keccak256(needleptr, needlelen) + } + ptr = selfptr + (selflen - needlelen); + while (ptr >= selfptr) { + bytes32 testHash; + assembly { + testHash := keccak256(ptr, needlelen) + } + if (hash == testHash) return ptr + needlelen; + ptr -= 1; + } + } + } + return selfptr; + } + + /* + * @dev Modifies `self` to contain everything from the first occurrence of + * `needle` to the end of the slice. `self` is set to the empty slice + * if `needle` is not found. + * @param self The slice to search and modify. + * @param needle The text to search for. + * @return `self`. + */ + function find(slice memory self, slice memory needle) + internal + pure + returns (slice memory) + { + uint256 ptr = findPtr(self._len, self._ptr, needle._len, needle._ptr); + self._len -= ptr - self._ptr; + self._ptr = ptr; + return self; + } + + /* + * @dev Modifies `self` to contain the part of the string from the start of + * `self` to the end of the first occurrence of `needle`. If `needle` + * is not found, `self` is set to the empty slice. + * @param self The slice to search and modify. + * @param needle The text to search for. + * @return `self`. + */ + function rfind(slice memory self, slice memory needle) + internal + pure + returns (slice memory) + { + uint256 ptr = rfindPtr(self._len, self._ptr, needle._len, needle._ptr); + self._len = ptr - self._ptr; + return self; + } + + /* + * @dev Splits the slice, setting `self` to everything after the first + * occurrence of `needle`, and `token` to everything before it. If + * `needle` does not occur in `self`, `self` is set to the empty slice, + * and `token` is set to the entirety of `self`. + * @param self The slice to split. + * @param needle The text to search for in `self`. + * @param token An output parameter to which the first token is written. + * @return `token`. + */ + function split( + slice memory self, + slice memory needle, + slice memory token + ) internal pure returns (slice memory) { + uint256 ptr = findPtr(self._len, self._ptr, needle._len, needle._ptr); + token._ptr = self._ptr; + token._len = ptr - self._ptr; + if (ptr == self._ptr + self._len) { + // Not found + self._len = 0; + } else { + self._len -= token._len + needle._len; + self._ptr = ptr + needle._len; + } + return token; + } + + /* + * @dev Splits the slice, setting `self` to everything after the first + * occurrence of `needle`, and returning everything before it. If + * `needle` does not occur in `self`, `self` is set to the empty slice, + * and the entirety of `self` is returned. + * @param self The slice to split. + * @param needle The text to search for in `self`. + * @return The part of `self` up to the first occurrence of `delim`. + */ + function split(slice memory self, slice memory needle) + internal + pure + returns (slice memory token) + { + split(self, needle, token); + } + + /* + * @dev Splits the slice, setting `self` to everything before the last + * occurrence of `needle`, and `token` to everything after it. If + * `needle` does not occur in `self`, `self` is set to the empty slice, + * and `token` is set to the entirety of `self`. + * @param self The slice to split. + * @param needle The text to search for in `self`. + * @param token An output parameter to which the first token is written. + * @return `token`. + */ + function rsplit( + slice memory self, + slice memory needle, + slice memory token + ) internal pure returns (slice memory) { + uint256 ptr = rfindPtr(self._len, self._ptr, needle._len, needle._ptr); + token._ptr = ptr; + token._len = self._len - (ptr - self._ptr); + if (ptr == self._ptr) { + // Not found + self._len = 0; + } else { + self._len -= token._len + needle._len; + } + return token; + } + + /* + * @dev Splits the slice, setting `self` to everything before the last + * occurrence of `needle`, and returning everything after it. If + * `needle` does not occur in `self`, `self` is set to the empty slice, + * and the entirety of `self` is returned. + * @param self The slice to split. + * @param needle The text to search for in `self`. + * @return The part of `self` after the last occurrence of `delim`. + */ + function rsplit(slice memory self, slice memory needle) + internal + pure + returns (slice memory token) + { + rsplit(self, needle, token); + } + + /* + * @dev Counts the number of nonoverlapping occurrences of `needle` in `self`. + * @param self The slice to search. + * @param needle The text to search for in `self`. + * @return The number of occurrences of `needle` found in `self`. + */ + function count(slice memory self, slice memory needle) + internal + pure + returns (uint256 cnt) + { + uint256 ptr = findPtr(self._len, self._ptr, needle._len, needle._ptr) + + needle._len; + while (ptr <= self._ptr + self._len) { + cnt++; + ptr = + findPtr(self._len - (ptr - self._ptr), ptr, needle._len, needle._ptr) + + needle._len; + } + } + + /* + * @dev Returns True if `self` contains `needle`. + * @param self The slice to search. + * @param needle The text to search for in `self`. + * @return True if `needle` is found in `self`, false otherwise. + */ + function contains(slice memory self, slice memory needle) + internal + pure + returns (bool) + { + return + rfindPtr(self._len, self._ptr, needle._len, needle._ptr) != self._ptr; + } + + /* + * @dev Returns a newly allocated string containing the concatenation of + * `self` and `other`. + * @param self The first slice to concatenate. + * @param other The second slice to concatenate. + * @return The concatenation of the two strings. + */ + function concat(slice memory self, slice memory other) + internal + pure + returns (string memory) + { + string memory ret = new string(self._len + other._len); + uint256 retptr; + assembly { + retptr := add(ret, 32) + } + memcpy(retptr, self._ptr, self._len); + memcpy(retptr + self._len, other._ptr, other._len); + return ret; + } + + /* + * @dev Joins an array of slices, using `self` as a delimiter, returning a + * newly allocated string. + * @param self The delimiter to use. + * @param parts A list of slices to join. + * @return A newly allocated string containing all the slices in `parts`, + * joined with `self`. + */ + function join(slice memory self, slice[] memory parts) + internal + pure + returns (string memory) + { + if (parts.length == 0) return ""; + + uint256 length = self._len * (parts.length - 1); + for (uint256 i = 0; i < parts.length; i++) length += parts[i]._len; + + string memory ret = new string(length); + uint256 retptr; + assembly { + retptr := add(ret, 32) + } + + for (uint256 i = 0; i < parts.length; i++) { + memcpy(retptr, parts[i]._ptr, parts[i]._len); + retptr += parts[i]._len; + if (i < parts.length - 1) { + memcpy(retptr, self._ptr, self._len); + retptr += self._len; + } + } + + return ret; + } +} diff --git a/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IBridge.sol b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IBridge.sol new file mode 100644 index 00000000..3d9e7577 --- /dev/null +++ b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IBridge.sol @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: Apache-2.0 + +/* + * Copyright 2021, Offchain Labs, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTICE: pragma change from original (^0.6.11) +pragma solidity ^0.8.0; + +interface IBridge { + event MessageDelivered( + uint256 indexed messageIndex, + bytes32 indexed beforeInboxAcc, + address inbox, + uint8 kind, + address sender, + bytes32 messageDataHash + ); + + function deliverMessageToInbox( + uint8 kind, + address sender, + bytes32 messageDataHash + ) external payable returns (uint256); + + function executeCall( + address destAddr, + uint256 amount, + bytes calldata data + ) external returns (bool success, bytes memory returnData); + + // These are only callable by the admin + function setInbox(address inbox, bool enabled) external; + + function setOutbox(address inbox, bool enabled) external; + + // View functions + + function activeOutbox() external view returns (address); + + function allowedInboxes(address inbox) external view returns (bool); + + function allowedOutboxes(address outbox) external view returns (bool); + + function inboxAccs(uint256 index) external view returns (bytes32); + + function messageCount() external view returns (uint256); +} diff --git a/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol new file mode 100644 index 00000000..b83fcec3 --- /dev/null +++ b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 + +/* + * Copyright 2021, Offchain Labs, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTICE: pragma change from original (^0.6.11) +pragma solidity ^0.8.0; + +import "./IBridge.sol"; +import "./IMessageProvider.sol"; + +interface IInbox is IMessageProvider { + function sendL2Message(bytes calldata messageData) external returns (uint256); + + function sendUnsignedTransaction( + uint256 maxGas, + uint256 gasPriceBid, + uint256 nonce, + address destAddr, + uint256 amount, + bytes calldata data + ) external returns (uint256); + + function sendContractTransaction( + uint256 maxGas, + uint256 gasPriceBid, + address destAddr, + uint256 amount, + bytes calldata data + ) external returns (uint256); + + function sendL1FundedUnsignedTransaction( + uint256 maxGas, + uint256 gasPriceBid, + uint256 nonce, + address destAddr, + bytes calldata data + ) external payable returns (uint256); + + function sendL1FundedContractTransaction( + uint256 maxGas, + uint256 gasPriceBid, + address destAddr, + bytes calldata data + ) external payable returns (uint256); + + function createRetryableTicketNoRefundAliasRewrite( + address destAddr, + uint256 arbTxCallValue, + uint256 maxSubmissionCost, + address submissionRefundAddress, + address valueRefundAddress, + uint256 maxGas, + uint256 gasPriceBid, + bytes calldata data + ) external payable returns (uint256); + + function createRetryableTicket( + address destAddr, + uint256 arbTxCallValue, + uint256 maxSubmissionCost, + address submissionRefundAddress, + address valueRefundAddress, + uint256 maxGas, + uint256 gasPriceBid, + bytes calldata data + ) external payable returns (uint256); + + function depositEth(address destAddr) external payable returns (uint256); + + function depositEthRetryable( + address destAddr, + uint256 maxSubmissionCost, + uint256 maxGas, + uint256 maxGasPrice + ) external payable returns (uint256); + + function bridge() external view returns (IBridge); +} diff --git a/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IMessageProvider.sol b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IMessageProvider.sol new file mode 100644 index 00000000..a29dc65d --- /dev/null +++ b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IMessageProvider.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 + +/* + * Copyright 2021, Offchain Labs, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTICE: pragma change from original (^0.6.11) +pragma solidity ^0.8.0; + +interface IMessageProvider { + event InboxMessageDelivered(uint256 indexed messageNum, bytes data); + + event InboxMessageDeliveredFromOrigin(uint256 indexed messageNum); +} diff --git a/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol new file mode 100644 index 00000000..dd3d5442 --- /dev/null +++ b/contracts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/libraries/AddressAliasHelper.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 + +/* + * Copyright 2019-2021, Offchain Labs, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTICE: pragma change from original (^0.6.11) +pragma solidity ^0.8.0; + +library AddressAliasHelper { + uint160 constant offset = uint160(0x1111000000000000000000000000000000001111); + + /// @notice Utility function that converts the msg.sender viewed in the L2 to the + /// address in the L1 that submitted a tx to the inbox + /// @param l1Address L2 address as viewed in msg.sender + /// @return l2Address the address in the L1 that triggered the tx to L2 + function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) { + unchecked { + l2Address = address(uint160(l1Address) + offset); + } + } + + /// @notice Utility function that converts the msg.sender viewed in the L2 to the + /// address in the L1 that submitted a tx to the inbox + /// @param l2Address L2 address as viewed in msg.sender + /// @return l1Address the address in the L1 that triggered the tx to L2 + function undoL1ToL2Alias(address l2Address) internal pure returns (address l1Address) { + unchecked { + l1Address = address(uint160(l2Address) - offset); + } + } +} diff --git a/contracts/src/v0.8/vendor/entrypoint/core/EntryPoint.sol b/contracts/src/v0.8/vendor/entrypoint/core/EntryPoint.sol new file mode 100644 index 00000000..86a34b07 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/core/EntryPoint.sol @@ -0,0 +1,861 @@ +/** + ** Account-Abstraction (EIP-4337) singleton EntryPoint implementation. + ** Only one instance required on each chain. + **/ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +/* solhint-disable avoid-low-level-calls */ +/* solhint-disable no-inline-assembly */ + +import "../interfaces/IAccount.sol"; +import "../interfaces/IPaymaster.sol"; +import "../interfaces/IEntryPoint.sol"; + +import "../utils/Exec.sol"; +import "./StakeManager.sol"; +import "./SenderCreator.sol"; +import "./Helpers.sol"; + +contract EntryPoint is IEntryPoint, StakeManager { + using UserOperationLib for UserOperation; + + SenderCreator private immutable senderCreator = new SenderCreator(); + + // internal value used during simulation: need to query aggregator. + address private constant SIMULATE_FIND_AGGREGATOR = address(1); + + // marker for inner call revert on out of gas + bytes32 private constant INNER_OUT_OF_GAS = hex"deaddead"; + + uint256 private constant REVERT_REASON_MAX_LEN = 2048; + + /** + * for simulation purposes, validateUserOp (and validatePaymasterUserOp) must return this value + * in case of signature failure, instead of revert. + */ + uint256 public constant SIG_VALIDATION_FAILED = 1; + + /** + * compensate the caller's beneficiary address with the collected fees of all UserOperations. + * @param beneficiary the address to receive the fees + * @param amount amount to transfer. + */ + function _compensate(address payable beneficiary, uint256 amount) internal { + require(beneficiary != address(0), "AA90 invalid beneficiary"); + (bool success, ) = beneficiary.call{value: amount}(""); + require(success, "AA91 failed send to beneficiary"); + } + + /** + * execute a user op + * @param opIndex index into the opInfo array + * @param userOp the userOp to execute + * @param opInfo the opInfo filled by validatePrepayment for this userOp. + * @return collected the total amount this userOp paid. + */ + function _executeUserOp( + uint256 opIndex, + UserOperation calldata userOp, + UserOpInfo memory opInfo + ) private returns (uint256 collected) { + uint256 preGas = gasleft(); + bytes memory context = getMemoryBytesFromOffset(opInfo.contextOffset); + + try this.innerHandleOp(userOp.callData, opInfo, context) returns ( + uint256 _actualGasCost + ) { + collected = _actualGasCost; + } catch { + bytes32 innerRevertCode; + assembly { + returndatacopy(0, 0, 32) + innerRevertCode := mload(0) + } + // handleOps was called with gas limit too low. abort entire bundle. + if (innerRevertCode == INNER_OUT_OF_GAS) { + //report paymaster, since if it is not deliberately caused by the bundler, + // it must be a revert caused by paymaster. + revert FailedOp(opIndex, "AA95 out of gas"); + } + + uint256 actualGas = preGas - gasleft() + opInfo.preOpGas; + collected = _handlePostOp( + opIndex, + IPaymaster.PostOpMode.postOpReverted, + opInfo, + context, + actualGas + ); + } + } + + /** + * Execute a batch of UserOperations. + * no signature aggregator is used. + * if any account requires an aggregator (that is, it returned an aggregator when + * performing simulateValidation), then handleAggregatedOps() must be used instead. + * @param ops the operations to execute + * @param beneficiary the address to receive the fees + */ + function handleOps( + UserOperation[] calldata ops, + address payable beneficiary + ) public { + uint256 opslen = ops.length; + UserOpInfo[] memory opInfos = new UserOpInfo[](opslen); + + unchecked { + for (uint256 i = 0; i < opslen; i++) { + UserOpInfo memory opInfo = opInfos[i]; + ( + uint256 validationData, + uint256 pmValidationData + ) = _validatePrepayment(i, ops[i], opInfo); + _validateAccountAndPaymasterValidationData( + i, + validationData, + pmValidationData, + address(0) + ); + } + + uint256 collected = 0; + + for (uint256 i = 0; i < opslen; i++) { + collected += _executeUserOp(i, ops[i], opInfos[i]); + } + + _compensate(beneficiary, collected); + } //unchecked + } + + /** + * Execute a batch of UserOperation with Aggregators + * @param opsPerAggregator the operations to execute, grouped by aggregator (or address(0) for no-aggregator accounts) + * @param beneficiary the address to receive the fees + */ + function handleAggregatedOps( + UserOpsPerAggregator[] calldata opsPerAggregator, + address payable beneficiary + ) public { + uint256 opasLen = opsPerAggregator.length; + uint256 totalOps = 0; + for (uint256 i = 0; i < opasLen; i++) { + UserOpsPerAggregator calldata opa = opsPerAggregator[i]; + UserOperation[] calldata ops = opa.userOps; + IAggregator aggregator = opa.aggregator; + + //address(1) is special marker of "signature error" + require( + address(aggregator) != address(1), + "AA96 invalid aggregator" + ); + + if (address(aggregator) != address(0)) { + // solhint-disable-next-line no-empty-blocks + try aggregator.validateSignatures(ops, opa.signature) {} catch { + revert SignatureValidationFailed(address(aggregator)); + } + } + + totalOps += ops.length; + } + + UserOpInfo[] memory opInfos = new UserOpInfo[](totalOps); + + uint256 opIndex = 0; + for (uint256 a = 0; a < opasLen; a++) { + UserOpsPerAggregator calldata opa = opsPerAggregator[a]; + UserOperation[] calldata ops = opa.userOps; + IAggregator aggregator = opa.aggregator; + + uint256 opslen = ops.length; + for (uint256 i = 0; i < opslen; i++) { + UserOpInfo memory opInfo = opInfos[opIndex]; + ( + uint256 validationData, + uint256 paymasterValidationData + ) = _validatePrepayment(opIndex, ops[i], opInfo); + _validateAccountAndPaymasterValidationData( + i, + validationData, + paymasterValidationData, + address(aggregator) + ); + opIndex++; + } + } + + uint256 collected = 0; + opIndex = 0; + for (uint256 a = 0; a < opasLen; a++) { + UserOpsPerAggregator calldata opa = opsPerAggregator[a]; + emit SignatureAggregatorChanged(address(opa.aggregator)); + UserOperation[] calldata ops = opa.userOps; + uint256 opslen = ops.length; + + for (uint256 i = 0; i < opslen; i++) { + collected += _executeUserOp(opIndex, ops[i], opInfos[opIndex]); + opIndex++; + } + } + emit SignatureAggregatorChanged(address(0)); + + _compensate(beneficiary, collected); + } + + /// @inheritdoc IEntryPoint + function simulateHandleOp( + UserOperation calldata op, + address target, + bytes calldata targetCallData + ) external override { + UserOpInfo memory opInfo; + _simulationOnlyValidations(op); + ( + uint256 validationData, + uint256 paymasterValidationData + ) = _validatePrepayment(0, op, opInfo); + ValidationData memory data = _intersectTimeRange( + validationData, + paymasterValidationData + ); + + numberMarker(); + uint256 paid = _executeUserOp(0, op, opInfo); + numberMarker(); + bool targetSuccess; + bytes memory targetResult; + if (target != address(0)) { + (targetSuccess, targetResult) = target.call(targetCallData); + } + revert ExecutionResult( + opInfo.preOpGas, + paid, + data.validAfter, + data.validUntil, + targetSuccess, + targetResult + ); + } + + // A memory copy of UserOp static fields only. + // Excluding: callData, initCode and signature. Replacing paymasterAndData with paymaster. + struct MemoryUserOp { + address sender; + uint256 nonce; + uint256 callGasLimit; + uint256 verificationGasLimit; + uint256 preVerificationGas; + address paymaster; + uint256 maxFeePerGas; + uint256 maxPriorityFeePerGas; + } + + struct UserOpInfo { + MemoryUserOp mUserOp; + bytes32 userOpHash; + uint256 prefund; + uint256 contextOffset; + uint256 preOpGas; + } + + /** + * inner function to handle a UserOperation. + * Must be declared "external" to open a call context, but it can only be called by handleOps. + */ + function innerHandleOp( + bytes memory callData, + UserOpInfo memory opInfo, + bytes calldata context + ) external returns (uint256 actualGasCost) { + uint256 preGas = gasleft(); + require(msg.sender == address(this), "AA92 internal call only"); + MemoryUserOp memory mUserOp = opInfo.mUserOp; + + uint256 callGasLimit = mUserOp.callGasLimit; + unchecked { + // handleOps was called with gas limit too low. abort entire bundle. + if ( + gasleft() < callGasLimit + mUserOp.verificationGasLimit + 5000 + ) { + assembly { + mstore(0, INNER_OUT_OF_GAS) + revert(0, 32) + } + } + } + + IPaymaster.PostOpMode mode = IPaymaster.PostOpMode.opSucceeded; + if (callData.length > 0) { + bool success = Exec.call(mUserOp.sender, 0, callData, callGasLimit); + if (!success) { + bytes memory result = Exec.getReturnData(REVERT_REASON_MAX_LEN); + if (result.length > 0) { + emit UserOperationRevertReason( + opInfo.userOpHash, + mUserOp.sender, + mUserOp.nonce, + result + ); + } + mode = IPaymaster.PostOpMode.opReverted; + } + } + + unchecked { + uint256 actualGas = preGas - gasleft() + opInfo.preOpGas; + //note: opIndex is ignored (relevant only if mode==postOpReverted, which is only possible outside of innerHandleOp) + return _handlePostOp(0, mode, opInfo, context, actualGas); + } + } + + /** + * generate a request Id - unique identifier for this request. + * the request ID is a hash over the content of the userOp (except the signature), the entrypoint and the chainid. + */ + function getUserOpHash(UserOperation calldata userOp) + public + view + returns (bytes32) + { + return + keccak256(abi.encode(userOp.hash(), address(this), block.chainid)); + } + + /** + * copy general fields from userOp into the memory opInfo structure. + */ + function _copyUserOpToMemory( + UserOperation calldata userOp, + MemoryUserOp memory mUserOp + ) internal pure { + mUserOp.sender = userOp.sender; + mUserOp.nonce = userOp.nonce; + mUserOp.callGasLimit = userOp.callGasLimit; + mUserOp.verificationGasLimit = userOp.verificationGasLimit; + mUserOp.preVerificationGas = userOp.preVerificationGas; + mUserOp.maxFeePerGas = userOp.maxFeePerGas; + mUserOp.maxPriorityFeePerGas = userOp.maxPriorityFeePerGas; + bytes calldata paymasterAndData = userOp.paymasterAndData; + if (paymasterAndData.length > 0) { + require( + paymasterAndData.length >= 20, + "AA93 invalid paymasterAndData" + ); + mUserOp.paymaster = address(bytes20(paymasterAndData[:20])); + } else { + mUserOp.paymaster = address(0); + } + } + + /** + * Simulate a call to account.validateUserOp and paymaster.validatePaymasterUserOp. + * @dev this method always revert. Successful result is ValidationResult error. other errors are failures. + * @dev The node must also verify it doesn't use banned opcodes, and that it doesn't reference storage outside the account's data. + * @param userOp the user operation to validate. + */ + function simulateValidation(UserOperation calldata userOp) external { + UserOpInfo memory outOpInfo; + + _simulationOnlyValidations(userOp); + ( + uint256 validationData, + uint256 paymasterValidationData + ) = _validatePrepayment(0, userOp, outOpInfo); + StakeInfo memory paymasterInfo = _getStakeInfo( + outOpInfo.mUserOp.paymaster + ); + StakeInfo memory senderInfo = _getStakeInfo(outOpInfo.mUserOp.sender); + StakeInfo memory factoryInfo; + { + bytes calldata initCode = userOp.initCode; + address factory = initCode.length >= 20 + ? address(bytes20(initCode[0:20])) + : address(0); + factoryInfo = _getStakeInfo(factory); + } + + ValidationData memory data = _intersectTimeRange( + validationData, + paymasterValidationData + ); + address aggregator = data.aggregator; + bool sigFailed = aggregator == address(1); + ReturnInfo memory returnInfo = ReturnInfo( + outOpInfo.preOpGas, + outOpInfo.prefund, + sigFailed, + data.validAfter, + data.validUntil, + getMemoryBytesFromOffset(outOpInfo.contextOffset) + ); + + if (aggregator != address(0) && aggregator != address(1)) { + AggregatorStakeInfo memory aggregatorInfo = AggregatorStakeInfo( + aggregator, + _getStakeInfo(aggregator) + ); + revert ValidationResultWithAggregation( + returnInfo, + senderInfo, + factoryInfo, + paymasterInfo, + aggregatorInfo + ); + } + revert ValidationResult( + returnInfo, + senderInfo, + factoryInfo, + paymasterInfo + ); + } + + function _getRequiredPrefund(MemoryUserOp memory mUserOp) + internal + pure + returns (uint256 requiredPrefund) + { + unchecked { + //when using a Paymaster, the verificationGasLimit is used also to as a limit for the postOp call. + // our security model might call postOp eventually twice + uint256 mul = mUserOp.paymaster != address(0) ? 3 : 1; + uint256 requiredGas = mUserOp.callGasLimit + + mUserOp.verificationGasLimit * + mul + + mUserOp.preVerificationGas; + + requiredPrefund = requiredGas * mUserOp.maxFeePerGas; + } + } + + // create the sender's contract if needed. + function _createSenderIfNeeded( + uint256 opIndex, + UserOpInfo memory opInfo, + bytes calldata initCode + ) internal { + if (initCode.length != 0) { + address sender = opInfo.mUserOp.sender; + if (sender.code.length != 0) + revert FailedOp(opIndex, "AA10 sender already constructed"); + address sender1 = senderCreator.createSender{ + gas: opInfo.mUserOp.verificationGasLimit + }(initCode); + if (sender1 == address(0)) + revert FailedOp(opIndex, "AA13 initCode failed or OOG"); + if (sender1 != sender) + revert FailedOp(opIndex, "AA14 initCode must return sender"); + if (sender1.code.length == 0) + revert FailedOp(opIndex, "AA15 initCode must create sender"); + address factory = address(bytes20(initCode[0:20])); + emit AccountDeployed( + opInfo.userOpHash, + sender, + factory, + opInfo.mUserOp.paymaster + ); + } + } + + /** + * Get counterfactual sender address. + * Calculate the sender contract address that will be generated by the initCode and salt in the UserOperation. + * this method always revert, and returns the address in SenderAddressResult error + * @param initCode the constructor code to be passed into the UserOperation. + */ + function getSenderAddress(bytes calldata initCode) public { + revert SenderAddressResult(senderCreator.createSender(initCode)); + } + + function _simulationOnlyValidations(UserOperation calldata userOp) + internal + view + { + // solhint-disable-next-line no-empty-blocks + try + this._validateSenderAndPaymaster( + userOp.initCode, + userOp.sender, + userOp.paymasterAndData + ) + {} catch Error(string memory revertReason) { + if (bytes(revertReason).length != 0) { + revert FailedOp(0, revertReason); + } + } + } + + /** + * Called only during simulation. + * This function always reverts to prevent warm/cold storage differentiation in simulation vs execution. + */ + function _validateSenderAndPaymaster( + bytes calldata initCode, + address sender, + bytes calldata paymasterAndData + ) external view { + if (initCode.length == 0 && sender.code.length == 0) { + // it would revert anyway. but give a meaningful message + revert("AA20 account not deployed"); + } + if (paymasterAndData.length >= 20) { + address paymaster = address(bytes20(paymasterAndData[0:20])); + if (paymaster.code.length == 0) { + // it would revert anyway. but give a meaningful message + revert("AA30 paymaster not deployed"); + } + } + // always revert + revert(""); + } + + /** + * call account.validateUserOp. + * revert (with FailedOp) in case validateUserOp reverts, or account didn't send required prefund. + * decrement account's deposit if needed + */ + function _validateAccountPrepayment( + uint256 opIndex, + UserOperation calldata op, + UserOpInfo memory opInfo, + uint256 requiredPrefund + ) + internal + returns ( + uint256 gasUsedByValidateAccountPrepayment, + uint256 validationData + ) + { + unchecked { + uint256 preGas = gasleft(); + MemoryUserOp memory mUserOp = opInfo.mUserOp; + address sender = mUserOp.sender; + _createSenderIfNeeded(opIndex, opInfo, op.initCode); + address paymaster = mUserOp.paymaster; + numberMarker(); + uint256 missingAccountFunds = 0; + if (paymaster == address(0)) { + uint256 bal = balanceOf(sender); + missingAccountFunds = bal > requiredPrefund + ? 0 + : requiredPrefund - bal; + } + try + IAccount(sender).validateUserOp{ + gas: mUserOp.verificationGasLimit + }(op, opInfo.userOpHash, missingAccountFunds) + returns (uint256 _validationData) { + validationData = _validationData; + } catch Error(string memory revertReason) { + revert FailedOp( + opIndex, + string.concat("AA23 reverted: ", revertReason) + ); + } catch { + revert FailedOp(opIndex, "AA23 reverted (or OOG)"); + } + if (paymaster == address(0)) { + DepositInfo storage senderInfo = deposits[sender]; + uint256 deposit = senderInfo.deposit; + if (requiredPrefund > deposit) { + revert FailedOp(opIndex, "AA21 didn't pay prefund"); + } + senderInfo.deposit = uint112(deposit - requiredPrefund); + } + gasUsedByValidateAccountPrepayment = preGas - gasleft(); + } + } + + /** + * In case the request has a paymaster: + * Validate paymaster has enough deposit. + * Call paymaster.validatePaymasterUserOp. + * Revert with proper FailedOp in case paymaster reverts. + * Decrement paymaster's deposit + */ + function _validatePaymasterPrepayment( + uint256 opIndex, + UserOperation calldata op, + UserOpInfo memory opInfo, + uint256 requiredPreFund, + uint256 gasUsedByValidateAccountPrepayment + ) internal returns (bytes memory context, uint256 validationData) { + unchecked { + MemoryUserOp memory mUserOp = opInfo.mUserOp; + uint256 verificationGasLimit = mUserOp.verificationGasLimit; + require( + verificationGasLimit > gasUsedByValidateAccountPrepayment, + "AA41 too little verificationGas" + ); + uint256 gas = verificationGasLimit - + gasUsedByValidateAccountPrepayment; + + address paymaster = mUserOp.paymaster; + DepositInfo storage paymasterInfo = deposits[paymaster]; + uint256 deposit = paymasterInfo.deposit; + if (deposit < requiredPreFund) { + revert FailedOp(opIndex, "AA31 paymaster deposit too low"); + } + paymasterInfo.deposit = uint112(deposit - requiredPreFund); + try + IPaymaster(paymaster).validatePaymasterUserOp{gas: gas}( + op, + opInfo.userOpHash, + requiredPreFund + ) + returns (bytes memory _context, uint256 _validationData) { + context = _context; + validationData = _validationData; + } catch Error(string memory revertReason) { + revert FailedOp( + opIndex, + string.concat("AA33 reverted: ", revertReason) + ); + } catch { + revert FailedOp(opIndex, "AA33 reverted (or OOG)"); + } + } + } + + /** + * revert if either account validationData or paymaster validationData is expired + */ + function _validateAccountAndPaymasterValidationData( + uint256 opIndex, + uint256 validationData, + uint256 paymasterValidationData, + address expectedAggregator + ) internal view { + (address aggregator, bool outOfTimeRange) = _getValidationData( + validationData + ); + if (expectedAggregator != aggregator) { + revert FailedOp(opIndex, "AA24 signature error"); + } + if (outOfTimeRange) { + revert FailedOp(opIndex, "AA22 expired or not due"); + } + //pmAggregator is not a real signature aggregator: we don't have logic to handle it as address. + // non-zero address means that the paymaster fails due to some signature check (which is ok only during estimation) + address pmAggregator; + (pmAggregator, outOfTimeRange) = _getValidationData( + paymasterValidationData + ); + if (pmAggregator != address(0)) { + revert FailedOp(opIndex, "AA34 signature error"); + } + if (outOfTimeRange) { + revert FailedOp(opIndex, "AA32 paymaster expired or not due"); + } + } + + function _getValidationData(uint256 validationData) + internal + view + returns (address aggregator, bool outOfTimeRange) + { + if (validationData == 0) { + return (address(0), false); + } + ValidationData memory data = _parseValidationData(validationData); + // solhint-disable-next-line not-rely-on-time + outOfTimeRange = + block.timestamp > data.validUntil || + block.timestamp < data.validAfter; + aggregator = data.aggregator; + } + + /** + * validate account and paymaster (if defined). + * also make sure total validation doesn't exceed verificationGasLimit + * this method is called off-chain (simulateValidation()) and on-chain (from handleOps) + * @param opIndex the index of this userOp into the "opInfos" array + * @param userOp the userOp to validate + */ + function _validatePrepayment( + uint256 opIndex, + UserOperation calldata userOp, + UserOpInfo memory outOpInfo + ) + private + returns (uint256 validationData, uint256 paymasterValidationData) + { + uint256 preGas = gasleft(); + MemoryUserOp memory mUserOp = outOpInfo.mUserOp; + _copyUserOpToMemory(userOp, mUserOp); + outOpInfo.userOpHash = getUserOpHash(userOp); + + // validate all numeric values in userOp are well below 128 bit, so they can safely be added + // and multiplied without causing overflow + uint256 maxGasValues = mUserOp.preVerificationGas | + mUserOp.verificationGasLimit | + mUserOp.callGasLimit | + userOp.maxFeePerGas | + userOp.maxPriorityFeePerGas; + require(maxGasValues <= type(uint120).max, "AA94 gas values overflow"); + + uint256 gasUsedByValidateAccountPrepayment; + uint256 requiredPreFund = _getRequiredPrefund(mUserOp); + ( + gasUsedByValidateAccountPrepayment, + validationData + ) = _validateAccountPrepayment( + opIndex, + userOp, + outOpInfo, + requiredPreFund + ); + //a "marker" where account opcode validation is done and paymaster opcode validation is about to start + // (used only by off-chain simulateValidation) + numberMarker(); + + bytes memory context; + if (mUserOp.paymaster != address(0)) { + (context, paymasterValidationData) = _validatePaymasterPrepayment( + opIndex, + userOp, + outOpInfo, + requiredPreFund, + gasUsedByValidateAccountPrepayment + ); + } + unchecked { + uint256 gasUsed = preGas - gasleft(); + + if (userOp.verificationGasLimit < gasUsed) { + revert FailedOp(opIndex, "AA40 over verificationGasLimit"); + } + outOpInfo.prefund = requiredPreFund; + outOpInfo.contextOffset = getOffsetOfMemoryBytes(context); + outOpInfo.preOpGas = preGas - gasleft() + userOp.preVerificationGas; + } + } + + /** + * process post-operation. + * called just after the callData is executed. + * if a paymaster is defined and its validation returned a non-empty context, its postOp is called. + * the excess amount is refunded to the account (or paymaster - if it was used in the request) + * @param opIndex index in the batch + * @param mode - whether is called from innerHandleOp, or outside (postOpReverted) + * @param opInfo userOp fields and info collected during validation + * @param context the context returned in validatePaymasterUserOp + * @param actualGas the gas used so far by this user operation + */ + function _handlePostOp( + uint256 opIndex, + IPaymaster.PostOpMode mode, + UserOpInfo memory opInfo, + bytes memory context, + uint256 actualGas + ) private returns (uint256 actualGasCost) { + uint256 preGas = gasleft(); + unchecked { + address refundAddress; + MemoryUserOp memory mUserOp = opInfo.mUserOp; + uint256 gasPrice = getUserOpGasPrice(mUserOp); + + address paymaster = mUserOp.paymaster; + if (paymaster == address(0)) { + refundAddress = mUserOp.sender; + } else { + refundAddress = paymaster; + if (context.length > 0) { + actualGasCost = actualGas * gasPrice; + if (mode != IPaymaster.PostOpMode.postOpReverted) { + IPaymaster(paymaster).postOp{ + gas: mUserOp.verificationGasLimit + }(mode, context, actualGasCost); + } else { + // solhint-disable-next-line no-empty-blocks + try + IPaymaster(paymaster).postOp{ + gas: mUserOp.verificationGasLimit + }(mode, context, actualGasCost) + {} catch Error(string memory reason) { + revert FailedOp( + opIndex, + string.concat("AA50 postOp reverted: ", reason) + ); + } catch { + revert FailedOp(opIndex, "AA50 postOp revert"); + } + } + } + } + actualGas += preGas - gasleft(); + actualGasCost = actualGas * gasPrice; + if (opInfo.prefund < actualGasCost) { + revert FailedOp(opIndex, "AA51 prefund below actualGasCost"); + } + uint256 refund = opInfo.prefund - actualGasCost; + _incrementDeposit(refundAddress, refund); + bool success = mode == IPaymaster.PostOpMode.opSucceeded; + emit UserOperationEvent( + opInfo.userOpHash, + mUserOp.sender, + mUserOp.paymaster, + mUserOp.nonce, + success, + actualGasCost, + actualGas + ); + } // unchecked + } + + /** + * the gas price this UserOp agrees to pay. + * relayer/block builder might submit the TX with higher priorityFee, but the user should not + */ + function getUserOpGasPrice(MemoryUserOp memory mUserOp) + internal + view + returns (uint256) + { + unchecked { + uint256 maxFeePerGas = mUserOp.maxFeePerGas; + uint256 maxPriorityFeePerGas = mUserOp.maxPriorityFeePerGas; + if (maxFeePerGas == maxPriorityFeePerGas) { + //legacy mode (for networks that don't support basefee opcode) + return maxFeePerGas; + } + return min(maxFeePerGas, maxPriorityFeePerGas + block.basefee); + } + } + + function min(uint256 a, uint256 b) internal pure returns (uint256) { + return a < b ? a : b; + } + + function getOffsetOfMemoryBytes(bytes memory data) + internal + pure + returns (uint256 offset) + { + assembly { + offset := data + } + } + + function getMemoryBytesFromOffset(uint256 offset) + internal + pure + returns (bytes memory data) + { + assembly { + data := offset + } + } + + //place the NUMBER opcode in the code. + // this is used as a marker during simulation, as this OP is completely banned from the simulated code of the + // account and paymaster. + function numberMarker() internal view { + assembly { + mstore(0, number()) + } + } +} diff --git a/contracts/src/v0.8/vendor/entrypoint/core/Helpers.sol b/contracts/src/v0.8/vendor/entrypoint/core/Helpers.sol new file mode 100644 index 00000000..71a6dc3d --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/core/Helpers.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +/** + * returned data from validateUserOp. + * validateUserOp returns a uint256, with is created by `_packedValidationData` and parsed by `_parseValidationData` + * @param aggregator - address(0) - the account validated the signature by itself. + * address(1) - the account failed to validate the signature. + * otherwise - this is an address of a signature aggregator that must be used to validate the signature. + * @param validAfter - this UserOp is valid only after this timestamp. + * @param validaUntil - this UserOp is valid only up to this timestamp. + */ +struct ValidationData { + address aggregator; + uint48 validAfter; + uint48 validUntil; +} + +//extract sigFailed, validAfter, validUntil. +// also convert zero validUntil to type(uint48).max +function _parseValidationData(uint validationData) pure returns (ValidationData memory data) { + address aggregator = address(uint160(validationData)); + uint48 validUntil = uint48(validationData >> 160); + if (validUntil == 0) { + validUntil = type(uint48).max; + } + uint48 validAfter = uint48(validationData >> (48 + 160)); + return ValidationData(aggregator, validAfter, validUntil); +} + +// intersect account and paymaster ranges. +function _intersectTimeRange( + uint256 validationData, + uint256 paymasterValidationData +) pure returns (ValidationData memory) { + ValidationData memory accountValidationData = _parseValidationData(validationData); + ValidationData memory pmValidationData = _parseValidationData(paymasterValidationData); + address aggregator = accountValidationData.aggregator; + if (aggregator == address(0)) { + aggregator = pmValidationData.aggregator; + } + uint48 validAfter = accountValidationData.validAfter; + uint48 validUntil = accountValidationData.validUntil; + uint48 pmValidAfter = pmValidationData.validAfter; + uint48 pmValidUntil = pmValidationData.validUntil; + + if (validAfter < pmValidAfter) validAfter = pmValidAfter; + if (validUntil > pmValidUntil) validUntil = pmValidUntil; + return ValidationData(aggregator, validAfter, validUntil); +} + +/** + * helper to pack the return value for validateUserOp + * @param data - the ValidationData to pack + */ +function _packValidationData(ValidationData memory data) pure returns (uint256) { + return uint160(data.aggregator) | (uint256(data.validUntil) << 160) | (uint256(data.validAfter) << (160 + 48)); +} + +/** + * helper to pack the return value for validateUserOp, when not using an aggregator + * @param sigFailed - true for signature failure, false for success + * @param validUntil last timestamp this UserOperation is valid (or zero for infinite) + * @param validAfter first timestamp this UserOperation is valid + */ +function _packValidationData(bool sigFailed, uint48 validUntil, uint48 validAfter) pure returns (uint256) { + return (sigFailed ? 1 : 0) | (uint256(validUntil) << 160) | (uint256(validAfter) << (160 + 48)); +} diff --git a/contracts/src/v0.8/vendor/entrypoint/core/SenderCreator.sol b/contracts/src/v0.8/vendor/entrypoint/core/SenderCreator.sol new file mode 100644 index 00000000..36fad7b9 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/core/SenderCreator.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +/** + * helper contract for EntryPoint, to call userOp.initCode from a "neutral" address, + * which is explicitly not the entryPoint itself. + */ +contract SenderCreator { + + /** + * call the "initCode" factory to create and return the sender account address + * @param initCode the initCode value from a UserOp. contains 20 bytes of factory address, followed by calldata + * @return sender the returned address of the created account, or zero address on failure. + */ + function createSender(bytes calldata initCode) external returns (address sender) { + address factory = address(bytes20(initCode[0 : 20])); + bytes memory initCallData = initCode[20 :]; + bool success; + /* solhint-disable no-inline-assembly */ + assembly { + success := call(gas(), factory, 0, add(initCallData, 0x20), mload(initCallData), 0, 32) + sender := mload(0) + } + if (!success) { + sender = address(0); + } + } +} diff --git a/contracts/src/v0.8/vendor/entrypoint/core/StakeManager.sol b/contracts/src/v0.8/vendor/entrypoint/core/StakeManager.sol new file mode 100644 index 00000000..e5ca2b97 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/core/StakeManager.sol @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-3.0-only +pragma solidity ^0.8.12; + +import "../interfaces/IStakeManager.sol"; + +/* solhint-disable avoid-low-level-calls */ +/* solhint-disable not-rely-on-time */ +/** + * manage deposits and stakes. + * deposit is just a balance used to pay for UserOperations (either by a paymaster or an account) + * stake is value locked for at least "unstakeDelay" by a paymaster. + */ +abstract contract StakeManager is IStakeManager { + + /// maps paymaster to their deposits and stakes + mapping(address => DepositInfo) public deposits; + + /// @inheritdoc IStakeManager + function getDepositInfo(address account) public view returns (DepositInfo memory info) { + return deposits[account]; + } + + // internal method to return just the stake info + function _getStakeInfo(address addr) internal view returns (StakeInfo memory info) { + DepositInfo storage depositInfo = deposits[addr]; + info.stake = depositInfo.stake; + info.unstakeDelaySec = depositInfo.unstakeDelaySec; + } + + /// return the deposit (for gas payment) of the account + function balanceOf(address account) public view returns (uint256) { + return deposits[account].deposit; + } + + receive() external payable { + depositTo(msg.sender); + } + + function _incrementDeposit(address account, uint256 amount) internal { + DepositInfo storage info = deposits[account]; + uint256 newAmount = info.deposit + amount; + require(newAmount <= type(uint112).max, "deposit overflow"); + info.deposit = uint112(newAmount); + } + + /** + * add to the deposit of the given account + */ + function depositTo(address account) public payable { + _incrementDeposit(account, msg.value); + DepositInfo storage info = deposits[account]; + emit Deposited(account, info.deposit); + } + + /** + * add to the account's stake - amount and delay + * any pending unstake is first cancelled. + * @param unstakeDelaySec the new lock duration before the deposit can be withdrawn. + */ + function addStake(uint32 unstakeDelaySec) public payable { + DepositInfo storage info = deposits[msg.sender]; + require(unstakeDelaySec > 0, "must specify unstake delay"); + require(unstakeDelaySec >= info.unstakeDelaySec, "cannot decrease unstake time"); + uint256 stake = info.stake + msg.value; + require(stake > 0, "no stake specified"); + require(stake <= type(uint112).max, "stake overflow"); + deposits[msg.sender] = DepositInfo( + info.deposit, + true, + uint112(stake), + unstakeDelaySec, + 0 + ); + emit StakeLocked(msg.sender, stake, unstakeDelaySec); + } + + /** + * attempt to unlock the stake. + * the value can be withdrawn (using withdrawStake) after the unstake delay. + */ + function unlockStake() external { + DepositInfo storage info = deposits[msg.sender]; + require(info.unstakeDelaySec != 0, "not staked"); + require(info.staked, "already unstaking"); + uint48 withdrawTime = uint48(block.timestamp) + info.unstakeDelaySec; + info.withdrawTime = withdrawTime; + info.staked = false; + emit StakeUnlocked(msg.sender, withdrawTime); + } + + + /** + * withdraw from the (unlocked) stake. + * must first call unlockStake and wait for the unstakeDelay to pass + * @param withdrawAddress the address to send withdrawn value. + */ + function withdrawStake(address payable withdrawAddress) external { + DepositInfo storage info = deposits[msg.sender]; + uint256 stake = info.stake; + require(stake > 0, "No stake to withdraw"); + require(info.withdrawTime > 0, "must call unlockStake() first"); + require(info.withdrawTime <= block.timestamp, "Stake withdrawal is not due"); + info.unstakeDelaySec = 0; + info.withdrawTime = 0; + info.stake = 0; + emit StakeWithdrawn(msg.sender, withdrawAddress, stake); + (bool success,) = withdrawAddress.call{value : stake}(""); + require(success, "failed to withdraw stake"); + } + + /** + * withdraw from the deposit. + * @param withdrawAddress the address to send withdrawn value. + * @param withdrawAmount the amount to withdraw. + */ + function withdrawTo(address payable withdrawAddress, uint256 withdrawAmount) external { + DepositInfo storage info = deposits[msg.sender]; + require(withdrawAmount <= info.deposit, "Withdraw amount too large"); + info.deposit = uint112(info.deposit - withdrawAmount); + emit Withdrawn(msg.sender, withdrawAddress, withdrawAmount); + (bool success,) = withdrawAddress.call{value : withdrawAmount}(""); + require(success, "failed to withdraw"); + } +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/IAccount.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/IAccount.sol new file mode 100644 index 00000000..1600de3d --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/IAccount.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +import "./UserOperation.sol"; + +interface IAccount { + + /** + * Validate user's signature and nonce + * the entryPoint will make the call to the recipient only if this validation call returns successfully. + * signature failure should be reported by returning SIG_VALIDATION_FAILED (1). + * This allows making a "simulation call" without a valid signature + * Other failures (e.g. nonce mismatch, or invalid signature format) should still revert to signal failure. + * + * @dev Must validate caller is the entryPoint. + * Must validate the signature and nonce + * @param userOp the operation that is about to be executed. + * @param userOpHash hash of the user's request data. can be used as the basis for signature. + * @param missingAccountFunds missing funds on the account's deposit in the entrypoint. + * This is the minimum amount to transfer to the sender(entryPoint) to be able to make the call. + * The excess is left as a deposit in the entrypoint, for future calls. + * can be withdrawn anytime using "entryPoint.withdrawTo()" + * In case there is a paymaster in the request (or the current deposit is high enough), this value will be zero. + * @return validationData packaged ValidationData structure. use `_packValidationData` and `_unpackValidationData` to encode and decode + * <20-byte> sigAuthorizer - 0 for valid signature, 1 to mark signature failure, + * otherwise, an address of an "authorizer" contract. + * <6-byte> validUntil - last timestamp this operation is valid. 0 for "indefinite" + * <6-byte> validAfter - first timestamp this operation is valid + * If an account doesn't use time-range, it is enough to return SIG_VALIDATION_FAILED value (1) for signature failure. + * Note that the validation code cannot use block.timestamp (or block.number) directly. + */ + function validateUserOp(UserOperation calldata userOp, bytes32 userOpHash, uint256 missingAccountFunds) + external returns (uint256 validationData); +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/IAggregator.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/IAggregator.sol new file mode 100644 index 00000000..086c6f32 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/IAggregator.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +import "./UserOperation.sol"; + +/** + * Aggregated Signatures validator. + */ +interface IAggregator { + + /** + * validate aggregated signature. + * revert if the aggregated signature does not match the given list of operations. + */ + function validateSignatures(UserOperation[] calldata userOps, bytes calldata signature) external view; + + /** + * validate signature of a single userOp + * This method is should be called by bundler after EntryPoint.simulateValidation() returns (reverts) with ValidationResultWithAggregation + * First it validates the signature over the userOp. Then it returns data to be used when creating the handleOps. + * @param userOp the userOperation received from the user. + * @return sigForUserOp the value to put into the signature field of the userOp when calling handleOps. + * (usually empty, unless account and aggregator support some kind of "multisig" + */ + function validateUserOpSignature(UserOperation calldata userOp) + external view returns (bytes memory sigForUserOp); + + /** + * aggregate multiple signatures into a single value. + * This method is called off-chain to calculate the signature to pass with handleOps() + * bundler MAY use optimized custom code perform this aggregation + * @param userOps array of UserOperations to collect the signatures from. + * @return aggregatedSignature the aggregated signature + */ + function aggregateSignatures(UserOperation[] calldata userOps) external view returns (bytes memory aggregatedSignature); +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/IEntryPoint.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/IEntryPoint.sol new file mode 100644 index 00000000..22bb1b7a --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/IEntryPoint.sol @@ -0,0 +1,197 @@ +/** + ** Account-Abstraction (EIP-4337) singleton EntryPoint implementation. + ** Only one instance required on each chain. + **/ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +/* solhint-disable avoid-low-level-calls */ +/* solhint-disable no-inline-assembly */ +/* solhint-disable reason-string */ + +import "./UserOperation.sol"; +import "./IStakeManager.sol"; +import "./IAggregator.sol"; + +interface IEntryPoint is IStakeManager { + + /*** + * An event emitted after each successful request + * @param userOpHash - unique identifier for the request (hash its entire content, except signature). + * @param sender - the account that generates this request. + * @param paymaster - if non-null, the paymaster that pays for this request. + * @param nonce - the nonce value from the request. + * @param success - true if the sender transaction succeeded, false if reverted. + * @param actualGasCost - actual amount paid (by account or paymaster) for this UserOperation. + * @param actualGasUsed - total gas used by this UserOperation (including preVerification, creation, validation and execution). + */ + event UserOperationEvent(bytes32 indexed userOpHash, address indexed sender, address indexed paymaster, uint256 nonce, bool success, uint256 actualGasCost, uint256 actualGasUsed); + + /** + * account "sender" was deployed. + * @param userOpHash the userOp that deployed this account. UserOperationEvent will follow. + * @param sender the account that is deployed + * @param factory the factory used to deploy this account (in the initCode) + * @param paymaster the paymaster used by this UserOp + */ + event AccountDeployed(bytes32 indexed userOpHash, address indexed sender, address factory, address paymaster); + + /** + * An event emitted if the UserOperation "callData" reverted with non-zero length + * @param userOpHash the request unique identifier. + * @param sender the sender of this request + * @param nonce the nonce used in the request + * @param revertReason - the return bytes from the (reverted) call to "callData". + */ + event UserOperationRevertReason(bytes32 indexed userOpHash, address indexed sender, uint256 nonce, bytes revertReason); + + /** + * signature aggregator used by the following UserOperationEvents within this bundle. + */ + event SignatureAggregatorChanged(address indexed aggregator); + + /** + * a custom revert error of handleOps, to identify the offending op. + * NOTE: if simulateValidation passes successfully, there should be no reason for handleOps to fail on it. + * @param opIndex - index into the array of ops to the failed one (in simulateValidation, this is always zero) + * @param reason - revert reason + * The string starts with a unique code "AAmn", where "m" is "1" for factory, "2" for account and "3" for paymaster issues, + * so a failure can be attributed to the correct entity. + * Should be caught in off-chain handleOps simulation and not happen on-chain. + * Useful for mitigating DoS attempts against batchers or for troubleshooting of factory/account/paymaster reverts. + */ + error FailedOp(uint256 opIndex, string reason); + + /** + * error case when a signature aggregator fails to verify the aggregated signature it had created. + */ + error SignatureValidationFailed(address aggregator); + + /** + * Successful result from simulateValidation. + * @param returnInfo gas and time-range returned values + * @param senderInfo stake information about the sender + * @param factoryInfo stake information about the factory (if any) + * @param paymasterInfo stake information about the paymaster (if any) + */ + error ValidationResult(ReturnInfo returnInfo, + StakeInfo senderInfo, StakeInfo factoryInfo, StakeInfo paymasterInfo); + + /** + * Successful result from simulateValidation, if the account returns a signature aggregator + * @param returnInfo gas and time-range returned values + * @param senderInfo stake information about the sender + * @param factoryInfo stake information about the factory (if any) + * @param paymasterInfo stake information about the paymaster (if any) + * @param aggregatorInfo signature aggregation info (if the account requires signature aggregator) + * bundler MUST use it to verify the signature, or reject the UserOperation + */ + error ValidationResultWithAggregation(ReturnInfo returnInfo, + StakeInfo senderInfo, StakeInfo factoryInfo, StakeInfo paymasterInfo, + AggregatorStakeInfo aggregatorInfo); + + /** + * return value of getSenderAddress + */ + error SenderAddressResult(address sender); + + /** + * return value of simulateHandleOp + */ + error ExecutionResult(uint256 preOpGas, uint256 paid, uint48 validAfter, uint48 validUntil, bool targetSuccess, bytes targetResult); + + //UserOps handled, per aggregator + struct UserOpsPerAggregator { + UserOperation[] userOps; + + // aggregator address + IAggregator aggregator; + // aggregated signature + bytes signature; + } + + /** + * Execute a batch of UserOperation. + * no signature aggregator is used. + * if any account requires an aggregator (that is, it returned an aggregator when + * performing simulateValidation), then handleAggregatedOps() must be used instead. + * @param ops the operations to execute + * @param beneficiary the address to receive the fees + */ + function handleOps(UserOperation[] calldata ops, address payable beneficiary) external; + + /** + * Execute a batch of UserOperation with Aggregators + * @param opsPerAggregator the operations to execute, grouped by aggregator (or address(0) for no-aggregator accounts) + * @param beneficiary the address to receive the fees + */ + function handleAggregatedOps( + UserOpsPerAggregator[] calldata opsPerAggregator, + address payable beneficiary + ) external; + + /** + * generate a request Id - unique identifier for this request. + * the request ID is a hash over the content of the userOp (except the signature), the entrypoint and the chainid. + */ + function getUserOpHash(UserOperation calldata userOp) external view returns (bytes32); + + /** + * Simulate a call to account.validateUserOp and paymaster.validatePaymasterUserOp. + * @dev this method always revert. Successful result is ValidationResult error. other errors are failures. + * @dev The node must also verify it doesn't use banned opcodes, and that it doesn't reference storage outside the account's data. + * @param userOp the user operation to validate. + */ + function simulateValidation(UserOperation calldata userOp) external; + + /** + * gas and return values during simulation + * @param preOpGas the gas used for validation (including preValidationGas) + * @param prefund the required prefund for this operation + * @param sigFailed validateUserOp's (or paymaster's) signature check failed + * @param validAfter - first timestamp this UserOp is valid (merging account and paymaster time-range) + * @param validUntil - last timestamp this UserOp is valid (merging account and paymaster time-range) + * @param paymasterContext returned by validatePaymasterUserOp (to be passed into postOp) + */ + struct ReturnInfo { + uint256 preOpGas; + uint256 prefund; + bool sigFailed; + uint48 validAfter; + uint48 validUntil; + bytes paymasterContext; + } + + /** + * returned aggregated signature info. + * the aggregator returned by the account, and its current stake. + */ + struct AggregatorStakeInfo { + address aggregator; + StakeInfo stakeInfo; + } + + /** + * Get counterfactual sender address. + * Calculate the sender contract address that will be generated by the initCode and salt in the UserOperation. + * this method always revert, and returns the address in SenderAddressResult error + * @param initCode the constructor code to be passed into the UserOperation. + */ + function getSenderAddress(bytes memory initCode) external; + + + /** + * simulate full execution of a UserOperation (including both validation and target execution) + * this method will always revert with "ExecutionResult". + * it performs full validation of the UserOperation, but ignores signature error. + * an optional target address is called after the userop succeeds, and its value is returned + * (before the entire call is reverted) + * Note that in order to collect the the success/failure of the target call, it must be executed + * with trace enabled to track the emitted events. + * @param op the UserOperation to simulate + * @param target if nonzero, a target address to call after userop simulation. If called, the targetSuccess and targetResult + * are set to the return from that call. + * @param targetCallData callData to pass to target address + */ + function simulateHandleOp(UserOperation calldata op, address target, bytes calldata targetCallData) external; +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/IPaymaster.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/IPaymaster.sol new file mode 100644 index 00000000..af50367a --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/IPaymaster.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +import "./UserOperation.sol"; + +/** + * the interface exposed by a paymaster contract, who agrees to pay the gas for user's operations. + * a paymaster must hold a stake to cover the required entrypoint stake and also the gas for the transaction. + */ +interface IPaymaster { + + enum PostOpMode { + opSucceeded, // user op succeeded + opReverted, // user op reverted. still has to pay for gas. + postOpReverted //user op succeeded, but caused postOp to revert. Now it's a 2nd call, after user's op was deliberately reverted. + } + + /** + * payment validation: check if paymaster agrees to pay. + * Must verify sender is the entryPoint. + * Revert to reject this request. + * Note that bundlers will reject this method if it changes the state, unless the paymaster is trusted (whitelisted) + * The paymaster pre-pays using its deposit, and receive back a refund after the postOp method returns. + * @param userOp the user operation + * @param userOpHash hash of the user's request data. + * @param maxCost the maximum cost of this transaction (based on maximum gas and gas price from userOp) + * @return context value to send to a postOp + * zero length to signify postOp is not required. + * @return validationData signature and time-range of this operation, encoded the same as the return value of validateUserOperation + * <20-byte> sigAuthorizer - 0 for valid signature, 1 to mark signature failure, + * otherwise, an address of an "authorizer" contract. + * <6-byte> validUntil - last timestamp this operation is valid. 0 for "indefinite" + * <6-byte> validAfter - first timestamp this operation is valid + * Note that the validation code cannot use block.timestamp (or block.number) directly. + */ + function validatePaymasterUserOp(UserOperation calldata userOp, bytes32 userOpHash, uint256 maxCost) + external returns (bytes memory context, uint256 validationData); + + /** + * post-operation handler. + * Must verify sender is the entryPoint + * @param mode enum with the following options: + * opSucceeded - user operation succeeded. + * opReverted - user op reverted. still has to pay for gas. + * postOpReverted - user op succeeded, but caused postOp (in mode=opSucceeded) to revert. + * Now this is the 2nd call, after user's op was deliberately reverted. + * @param context - the context value returned by validatePaymasterUserOp + * @param actualGasCost - actual gas used so far (without this postOp call). + */ + function postOp(PostOpMode mode, bytes calldata context, uint256 actualGasCost) external; +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/IStakeManager.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/IStakeManager.sol new file mode 100644 index 00000000..c19c1bab --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/IStakeManager.sol @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-only +pragma solidity ^0.8.12; + +/** + * manage deposits and stakes. + * deposit is just a balance used to pay for UserOperations (either by a paymaster or an account) + * stake is value locked for at least "unstakeDelay" by the staked entity. + */ +interface IStakeManager { + + event Deposited( + address indexed account, + uint256 totalDeposit + ); + + event Withdrawn( + address indexed account, + address withdrawAddress, + uint256 amount + ); + + /// Emitted when stake or unstake delay are modified + event StakeLocked( + address indexed account, + uint256 totalStaked, + uint256 unstakeDelaySec + ); + + /// Emitted once a stake is scheduled for withdrawal + event StakeUnlocked( + address indexed account, + uint256 withdrawTime + ); + + event StakeWithdrawn( + address indexed account, + address withdrawAddress, + uint256 amount + ); + + /** + * @param deposit the entity's deposit + * @param staked true if this entity is staked. + * @param stake actual amount of ether staked for this entity. + * @param unstakeDelaySec minimum delay to withdraw the stake. + * @param withdrawTime - first block timestamp where 'withdrawStake' will be callable, or zero if already locked + * @dev sizes were chosen so that (deposit,staked, stake) fit into one cell (used during handleOps) + * and the rest fit into a 2nd cell. + * 112 bit allows for 10^15 eth + * 48 bit for full timestamp + * 32 bit allows 150 years for unstake delay + */ + struct DepositInfo { + uint112 deposit; + bool staked; + uint112 stake; + uint32 unstakeDelaySec; + uint48 withdrawTime; + } + + //API struct used by getStakeInfo and simulateValidation + struct StakeInfo { + uint256 stake; + uint256 unstakeDelaySec; + } + + /// @return info - full deposit information of given account + function getDepositInfo(address account) external view returns (DepositInfo memory info); + + /// @return the deposit (for gas payment) of the account + function balanceOf(address account) external view returns (uint256); + + /** + * add to the deposit of the given account + */ + function depositTo(address account) external payable; + + /** + * add to the account's stake - amount and delay + * any pending unstake is first cancelled. + * @param _unstakeDelaySec the new lock duration before the deposit can be withdrawn. + */ + function addStake(uint32 _unstakeDelaySec) external payable; + + /** + * attempt to unlock the stake. + * the value can be withdrawn (using withdrawStake) after the unstake delay. + */ + function unlockStake() external; + + /** + * withdraw from the (unlocked) stake. + * must first call unlockStake and wait for the unstakeDelay to pass + * @param withdrawAddress the address to send withdrawn value. + */ + function withdrawStake(address payable withdrawAddress) external; + + /** + * withdraw from the deposit. + * @param withdrawAddress the address to send withdrawn value. + * @param withdrawAmount the amount to withdraw. + */ + function withdrawTo(address payable withdrawAddress, uint256 withdrawAmount) external; +} diff --git a/contracts/src/v0.8/vendor/entrypoint/interfaces/UserOperation.sol b/contracts/src/v0.8/vendor/entrypoint/interfaces/UserOperation.sol new file mode 100644 index 00000000..dfff4279 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/interfaces/UserOperation.sol @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.12; + +/* solhint-disable no-inline-assembly */ + + /** + * User Operation struct + * @param sender the sender account of this request. + * @param nonce unique value the sender uses to verify it is not a replay. + * @param initCode if set, the account contract will be created by this constructor/ + * @param callData the method call to execute on this account. + * @param callGasLimit the gas limit passed to the callData method call. + * @param verificationGasLimit gas used for validateUserOp and validatePaymasterUserOp. + * @param preVerificationGas gas not calculated by the handleOps method, but added to the gas paid. Covers batch overhead. + * @param maxFeePerGas same as EIP-1559 gas parameter. + * @param maxPriorityFeePerGas same as EIP-1559 gas parameter. + * @param paymasterAndData if set, this field holds the paymaster address and paymaster-specific data. the paymaster will pay for the transaction instead of the sender. + * @param signature sender-verified signature over the entire request, the EntryPoint address and the chain ID. + */ + struct UserOperation { + + address sender; + uint256 nonce; + bytes initCode; + bytes callData; + uint256 callGasLimit; + uint256 verificationGasLimit; + uint256 preVerificationGas; + uint256 maxFeePerGas; + uint256 maxPriorityFeePerGas; + bytes paymasterAndData; + bytes signature; + } + +/** + * Utility functions helpful when working with UserOperation structs. + */ +library UserOperationLib { + + function getSender(UserOperation calldata userOp) internal pure returns (address) { + address data; + //read sender from userOp, which is first userOp member (saves 800 gas...) + assembly {data := calldataload(userOp)} + return address(uint160(data)); + } + + //relayer/block builder might submit the TX with higher priorityFee, but the user should not + // pay above what he signed for. + function gasPrice(UserOperation calldata userOp) internal view returns (uint256) { + unchecked { + uint256 maxFeePerGas = userOp.maxFeePerGas; + uint256 maxPriorityFeePerGas = userOp.maxPriorityFeePerGas; + if (maxFeePerGas == maxPriorityFeePerGas) { + //legacy mode (for networks that don't support basefee opcode) + return maxFeePerGas; + } + return min(maxFeePerGas, maxPriorityFeePerGas + block.basefee); + } + } + + function pack(UserOperation calldata userOp) internal pure returns (bytes memory ret) { + //lighter signature scheme. must match UserOp.ts#packUserOp + bytes calldata sig = userOp.signature; + // copy directly the userOp from calldata up to (but not including) the signature. + // this encoding depends on the ABI encoding of calldata, but is much lighter to copy + // than referencing each field separately. + assembly { + let ofs := userOp + let len := sub(sub(sig.offset, ofs), 32) + ret := mload(0x40) + mstore(0x40, add(ret, add(len, 32))) + mstore(ret, len) + calldatacopy(add(ret, 32), ofs, len) + } + } + + function hash(UserOperation calldata userOp) internal pure returns (bytes32) { + return keccak256(pack(userOp)); + } + + function min(uint256 a, uint256 b) internal pure returns (uint256) { + return a < b ? a : b; + } +} diff --git a/contracts/src/v0.8/vendor/entrypoint/utils/Exec.sol b/contracts/src/v0.8/vendor/entrypoint/utils/Exec.sol new file mode 100644 index 00000000..69d653d9 --- /dev/null +++ b/contracts/src/v0.8/vendor/entrypoint/utils/Exec.sol @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: LGPL-3.0-only +pragma solidity >=0.7.5 <0.9.0; + +// solhint-disable no-inline-assembly + +/** + * Utility functions helpful when making different kinds of contract calls in Solidity. + */ +library Exec { + + function call( + address to, + uint256 value, + bytes memory data, + uint256 txGas + ) internal returns (bool success) { + assembly { + success := call(txGas, to, value, add(data, 0x20), mload(data), 0, 0) + } + } + + function staticcall( + address to, + bytes memory data, + uint256 txGas + ) internal view returns (bool success) { + assembly { + success := staticcall(txGas, to, add(data, 0x20), mload(data), 0, 0) + } + } + + function delegateCall( + address to, + bytes memory data, + uint256 txGas + ) internal returns (bool success) { + assembly { + success := delegatecall(txGas, to, add(data, 0x20), mload(data), 0, 0) + } + } + + // get returned data from last call or calldelegate + function getReturnData(uint256 maxLen) internal pure returns (bytes memory returnData) { + assembly { + let len := returndatasize() + if gt(len, maxLen) { + len := maxLen + } + let ptr := mload(0x40) + mstore(0x40, add(ptr, add(len, 0x20))) + mstore(ptr, len) + returndatacopy(add(ptr, 0x20), 0, len) + returnData := ptr + } + } + + // revert with explicit byte array (probably reverted info from call) + function revertWithData(bytes memory returnData) internal pure { + assembly { + revert(add(returnData, 32), mload(returnData)) + } + } + + function callAndRevert(address to, bytes memory data, uint256 maxLen) internal { + bool success = call(to,0,data,gasleft()); + if (!success) { + revertWithData(getReturnData(maxLen)); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/proxy/Proxy.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/proxy/Proxy.sol new file mode 100644 index 00000000..988cf72a --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/proxy/Proxy.sol @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.6.0) (proxy/Proxy.sol) + +pragma solidity ^0.8.0; + +/** + * @dev This abstract contract provides a fallback function that delegates all calls to another contract using the EVM + * instruction `delegatecall`. We refer to the second contract as the _implementation_ behind the proxy, and it has to + * be specified by overriding the virtual {_implementation} function. + * + * Additionally, delegation to the implementation can be triggered manually through the {_fallback} function, or to a + * different contract through the {_delegate} function. + * + * The success and return data of the delegated call will be returned back to the caller of the proxy. + */ +abstract contract Proxy { + /** + * @dev Delegates the current call to `implementation`. + * + * This function does not return to its internal call site, it will return directly to the external caller. + */ + function _delegate(address implementation) internal virtual { + assembly { + // Copy msg.data. We take full control of memory in this inline assembly + // block because it will not return to Solidity code. We overwrite the + // Solidity scratch pad at memory position 0. + calldatacopy(0, 0, calldatasize()) + + // Call the implementation. + // out and outsize are 0 because we don't know the size yet. + let result := delegatecall(gas(), implementation, 0, calldatasize(), 0, 0) + + // Copy the returned data. + returndatacopy(0, 0, returndatasize()) + + switch result + // delegatecall returns 0 on error. + case 0 { + revert(0, returndatasize()) + } + default { + return(0, returndatasize()) + } + } + } + + /** + * @dev This is a virtual function that should be overridden so it returns the address to which the fallback function + * and {_fallback} should delegate. + */ + function _implementation() internal view virtual returns (address); + + /** + * @dev Delegates the current call to the address returned by `_implementation()`. + * + * This function does not return to its internal call site, it will return directly to the external caller. + */ + function _fallback() internal virtual { + _beforeFallback(); + _delegate(_implementation()); + } + + /** + * @dev Fallback function that delegates calls to the address returned by `_implementation()`. Will run if no other + * function in the contract matches the call data. + */ + fallback() external payable virtual { + _fallback(); + } + + /** + * @dev Fallback function that delegates calls to the address returned by `_implementation()`. Will run if call data + * is empty. + */ + receive() external payable virtual { + _fallback(); + } + + /** + * @dev Hook that is called before falling back to the implementation. Can happen as part of a manual `_fallback` + * call, or as part of the Solidity `fallback` or `receive` functions. + * + * If overridden should call `super._beforeFallback()`. + */ + function _beforeFallback() internal virtual {} +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol new file mode 100644 index 00000000..69f3bf87 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Address.sol @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.7.0) (utils/Address.sol) + +pragma solidity ^0.8.1; + +/** + * @dev Collection of functions related to the address type + */ +library Address { + /** + * @dev Returns true if `account` is a contract. + * + * [IMPORTANT] + * ==== + * It is unsafe to assume that an address for which this function returns + * false is an externally-owned account (EOA) and not a contract. + * + * Among others, `isContract` will return false for the following + * types of addresses: + * + * - an externally-owned account + * - a contract in construction + * - an address where a contract will be created + * - an address where a contract lived, but was destroyed + * ==== + * + * [IMPORTANT] + * ==== + * You shouldn't rely on `isContract` to protect against flash loan attacks! + * + * Preventing calls from contracts is highly discouraged. It breaks composability, breaks support for smart wallets + * like Gnosis Safe, and does not provide security since it can be circumvented by calling from a contract + * constructor. + * ==== + */ + function isContract(address account) internal view returns (bool) { + // This method relies on extcodesize/address.code.length, which returns 0 + // for contracts in construction, since the code is only stored at the end + // of the constructor execution. + + return account.code.length > 0; + } + + /** + * @dev Replacement for Solidity's `transfer`: sends `amount` wei to + * `recipient`, forwarding all available gas and reverting on errors. + * + * https://eips.ethereum.org/EIPS/eip-1884[EIP1884] increases the gas cost + * of certain opcodes, possibly making contracts go over the 2300 gas limit + * imposed by `transfer`, making them unable to receive funds via + * `transfer`. {sendValue} removes this limitation. + * + * https://diligence.consensys.net/posts/2019/09/stop-using-soliditys-transfer-now/[Learn more]. + * + * IMPORTANT: because control is transferred to `recipient`, care must be + * taken to not create reentrancy vulnerabilities. Consider using + * {ReentrancyGuard} or the + * https://solidity.readthedocs.io/en/v0.5.11/security-considerations.html#use-the-checks-effects-interactions-pattern[checks-effects-interactions pattern]. + */ + function sendValue(address payable recipient, uint256 amount) internal { + require(address(this).balance >= amount, "Address: insufficient balance"); + + (bool success, ) = recipient.call{value: amount}(""); + require(success, "Address: unable to send value, recipient may have reverted"); + } + + /** + * @dev Performs a Solidity function call using a low level `call`. A + * plain `call` is an unsafe replacement for a function call: use this + * function instead. + * + * If `target` reverts with a revert reason, it is bubbled up by this + * function (like regular Solidity function calls). + * + * Returns the raw returned data. To convert to the expected return value, + * use https://solidity.readthedocs.io/en/latest/units-and-global-variables.html?highlight=abi.decode#abi-encoding-and-decoding-functions[`abi.decode`]. + * + * Requirements: + * + * - `target` must be a contract. + * - calling `target` with `data` must not revert. + * + * _Available since v3.1._ + */ + function functionCall(address target, bytes memory data) internal returns (bytes memory) { + return functionCallWithValue(target, data, 0, "Address: low-level call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], but with + * `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCall( + address target, + bytes memory data, + string memory errorMessage + ) internal returns (bytes memory) { + return functionCallWithValue(target, data, 0, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but also transferring `value` wei to `target`. + * + * Requirements: + * + * - the calling contract must have an ETH balance of at least `value`. + * - the called Solidity function must be `payable`. + * + * _Available since v3.1._ + */ + function functionCallWithValue( + address target, + bytes memory data, + uint256 value + ) internal returns (bytes memory) { + return functionCallWithValue(target, data, value, "Address: low-level call with value failed"); + } + + /** + * @dev Same as {xref-Address-functionCallWithValue-address-bytes-uint256-}[`functionCallWithValue`], but + * with `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCallWithValue( + address target, + bytes memory data, + uint256 value, + string memory errorMessage + ) internal returns (bytes memory) { + require(address(this).balance >= value, "Address: insufficient balance for call"); + (bool success, bytes memory returndata) = target.call{value: value}(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall(address target, bytes memory data) internal view returns (bytes memory) { + return functionStaticCall(target, data, "Address: low-level static call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall( + address target, + bytes memory data, + string memory errorMessage + ) internal view returns (bytes memory) { + (bool success, bytes memory returndata) = target.staticcall(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall(address target, bytes memory data) internal returns (bytes memory) { + return functionDelegateCall(target, data, "Address: low-level delegate call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall( + address target, + bytes memory data, + string memory errorMessage + ) internal returns (bytes memory) { + (bool success, bytes memory returndata) = target.delegatecall(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Tool to verify that a low level call to smart-contract was successful, and revert (either by bubbling + * the revert reason or using the provided one) in case of unsuccessful call or if target was not a contract. + * + * _Available since v4.8._ + */ + function verifyCallResultFromTarget( + address target, + bool success, + bytes memory returndata, + string memory errorMessage + ) internal view returns (bytes memory) { + if (success) { + if (returndata.length == 0) { + // only check isContract if the call was successful and the return data is empty + // otherwise we already know that it was a contract + require(isContract(target), "Address: call to non-contract"); + } + return returndata; + } else { + _revert(returndata, errorMessage); + } + } + + /** + * @dev Tool to verify that a low level call was successful, and revert if it wasn't, either by bubbling the + * revert reason or using the provided one. + * + * _Available since v4.3._ + */ + function verifyCallResult( + bool success, + bytes memory returndata, + string memory errorMessage + ) internal pure returns (bytes memory) { + if (success) { + return returndata; + } else { + _revert(returndata, errorMessage); + } + } + + function _revert(bytes memory returndata, string memory errorMessage) private pure { + // Look for revert reason and bubble it up if present + if (returndata.length > 0) { + // The easiest way to bubble the revert reason is using memory via assembly + /// @solidity memory-safe-assembly + assembly { + let returndata_size := mload(returndata) + revert(add(32, returndata), returndata_size) + } + } else { + revert(errorMessage); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Context.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Context.sol new file mode 100644 index 00000000..f304065b --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/Context.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (utils/Context.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Provides information about the current execution context, including the + * sender of the transaction and its data. While these are generally available + * via msg.sender and msg.data, they should not be accessed in such a direct + * manner, since when dealing with meta-transactions the account sending and + * paying for execution may not be the actual sender (as far as an application + * is concerned). + * + * This contract is only required for intermediate, library-like contracts. + */ +abstract contract Context { + function _msgSender() internal view virtual returns (address) { + return msg.sender; + } + + function _msgData() internal view virtual returns (bytes calldata) { + return msg.data; + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableMap.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableMap.sol new file mode 100644 index 00000000..3557f367 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableMap.sol @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.7.0) (utils/structs/EnumerableMap.sol) + +pragma solidity ^0.8.0; + +import "./EnumerableSet.sol"; + +/** + * @dev Library for managing an enumerable variant of Solidity's + * https://solidity.readthedocs.io/en/latest/types.html#mapping-types[`mapping`] + * type. + * + * Maps have the following properties: + * + * - Entries are added, removed, and checked for existence in constant time + * (O(1)). + * - Entries are enumerated in O(n). No guarantees are made on the ordering. + * + * ``` + * contract Example { + * // Add the library methods + * using EnumerableMap for EnumerableMap.UintToAddressMap; + * + * // Declare a set state variable + * EnumerableMap.UintToAddressMap private myMap; + * } + * ``` + * + * The following map types are supported: + * + * - `uint256 -> address` (`UintToAddressMap`) since v3.0.0 + * - `address -> uint256` (`AddressToUintMap`) since v4.6.0 + * - `bytes32 -> bytes32` (`Bytes32ToBytes32`) since v4.6.0 + * - `uint256 -> uint256` (`UintToUintMap`) since v4.7.0 + * - `bytes32 -> uint256` (`Bytes32ToUintMap`) since v4.7.0 + * + * [WARNING] + * ==== + * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure unusable. + * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info. + * + * In order to clean an EnumerableMap, you can either remove all elements one by one or create a fresh instance using an array of EnumerableMap. + * ==== + */ +library EnumerableMap { + using EnumerableSet for EnumerableSet.Bytes32Set; + + // To implement this library for multiple types with as little code + // repetition as possible, we write it in terms of a generic Map type with + // bytes32 keys and values. + // The Map implementation uses private functions, and user-facing + // implementations (such as Uint256ToAddressMap) are just wrappers around + // the underlying Map. + // This means that we can only create new EnumerableMaps for types that fit + // in bytes32. + + struct Bytes32ToBytes32Map { + // Storage of keys + EnumerableSet.Bytes32Set _keys; + mapping(bytes32 => bytes32) _values; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + Bytes32ToBytes32Map storage map, + bytes32 key, + bytes32 value + ) internal returns (bool) { + map._values[key] = value; + return map._keys.add(key); + } + + /** + * @dev Removes a key-value pair from a map. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(Bytes32ToBytes32Map storage map, bytes32 key) internal returns (bool) { + delete map._values[key]; + return map._keys.remove(key); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool) { + return map._keys.contains(key); + } + + /** + * @dev Returns the number of key-value pairs in the map. O(1). + */ + function length(Bytes32ToBytes32Map storage map) internal view returns (uint256) { + return map._keys.length(); + } + + /** + * @dev Returns the key-value pair stored at position `index` in the map. O(1). + * + * Note that there are no guarantees on the ordering of entries inside the + * array, and it may change when more entries are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32ToBytes32Map storage map, uint256 index) internal view returns (bytes32, bytes32) { + bytes32 key = map._keys.at(index); + return (key, map._values[key]); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool, bytes32) { + bytes32 value = map._values[key]; + if (value == bytes32(0)) { + return (contains(map, key), bytes32(0)); + } else { + return (true, value); + } + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bytes32) { + bytes32 value = map._values[key]; + require(value != 0 || contains(map, key), "EnumerableMap: nonexistent key"); + return value; + } + + /** + * @dev Same as {_get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {_tryGet}. + */ + function get( + Bytes32ToBytes32Map storage map, + bytes32 key, + string memory errorMessage + ) internal view returns (bytes32) { + bytes32 value = map._values[key]; + require(value != 0 || contains(map, key), errorMessage); + return value; + } + + // UintToUintMap + + struct UintToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + UintToUintMap storage map, + uint256 key, + uint256 value + ) internal returns (bool) { + return set(map._inner, bytes32(key), bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(UintToUintMap storage map, uint256 key) internal returns (bool) { + return remove(map._inner, bytes32(key)); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(UintToUintMap storage map, uint256 key) internal view returns (bool) { + return contains(map._inner, bytes32(key)); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(UintToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintToUintMap storage map, uint256 index) internal view returns (uint256, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (uint256(key), uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(UintToUintMap storage map, uint256 key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(key)); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(UintToUintMap storage map, uint256 key) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(key))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + UintToUintMap storage map, + uint256 key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(key), errorMessage)); + } + + // UintToAddressMap + + struct UintToAddressMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + UintToAddressMap storage map, + uint256 key, + address value + ) internal returns (bool) { + return set(map._inner, bytes32(key), bytes32(uint256(uint160(value)))); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(UintToAddressMap storage map, uint256 key) internal returns (bool) { + return remove(map._inner, bytes32(key)); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(UintToAddressMap storage map, uint256 key) internal view returns (bool) { + return contains(map._inner, bytes32(key)); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(UintToAddressMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintToAddressMap storage map, uint256 index) internal view returns (uint256, address) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (uint256(key), address(uint160(uint256(value)))); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + * + * _Available since v3.4._ + */ + function tryGet(UintToAddressMap storage map, uint256 key) internal view returns (bool, address) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(key)); + return (success, address(uint160(uint256(value)))); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(UintToAddressMap storage map, uint256 key) internal view returns (address) { + return address(uint160(uint256(get(map._inner, bytes32(key))))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + UintToAddressMap storage map, + uint256 key, + string memory errorMessage + ) internal view returns (address) { + return address(uint160(uint256(get(map._inner, bytes32(key), errorMessage)))); + } + + // AddressToUintMap + + struct AddressToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + AddressToUintMap storage map, + address key, + uint256 value + ) internal returns (bool) { + return set(map._inner, bytes32(uint256(uint160(key))), bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(AddressToUintMap storage map, address key) internal returns (bool) { + return remove(map._inner, bytes32(uint256(uint160(key)))); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(AddressToUintMap storage map, address key) internal view returns (bool) { + return contains(map._inner, bytes32(uint256(uint160(key)))); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(AddressToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(AddressToUintMap storage map, uint256 index) internal view returns (address, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (address(uint160(uint256(key))), uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(AddressToUintMap storage map, address key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(uint256(uint160(key)))); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(AddressToUintMap storage map, address key) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(uint256(uint160(key))))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + AddressToUintMap storage map, + address key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(uint256(uint160(key))), errorMessage)); + } + + // Bytes32ToUintMap + + struct Bytes32ToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + Bytes32ToUintMap storage map, + bytes32 key, + uint256 value + ) internal returns (bool) { + return set(map._inner, key, bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(Bytes32ToUintMap storage map, bytes32 key) internal returns (bool) { + return remove(map._inner, key); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool) { + return contains(map._inner, key); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(Bytes32ToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32ToUintMap storage map, uint256 index) internal view returns (bytes32, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (key, uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, key); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(Bytes32ToUintMap storage map, bytes32 key) internal view returns (uint256) { + return uint256(get(map._inner, key)); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + Bytes32ToUintMap storage map, + bytes32 key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, key, errorMessage)); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol new file mode 100644 index 00000000..1d8029e3 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.7.0) (utils/structs/EnumerableSet.sol) +// This file was procedurally generated from scripts/generate/templates/EnumerableSet.js. + +pragma solidity ^0.8.0; + +/** + * @dev Library for managing + * https://en.wikipedia.org/wiki/Set_(abstract_data_type)[sets] of primitive + * types. + * + * Sets have the following properties: + * + * - Elements are added, removed, and checked for existence in constant time + * (O(1)). + * - Elements are enumerated in O(n). No guarantees are made on the ordering. + * + * ``` + * contract Example { + * // Add the library methods + * using EnumerableSet for EnumerableSet.AddressSet; + * + * // Declare a set state variable + * EnumerableSet.AddressSet private mySet; + * } + * ``` + * + * As of v3.3.0, sets of type `bytes32` (`Bytes32Set`), `address` (`AddressSet`) + * and `uint256` (`UintSet`) are supported. + * + * [WARNING] + * ==== + * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure unusable. + * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info. + * + * In order to clean an EnumerableSet, you can either remove all elements one by one or create a fresh instance using an array of EnumerableSet. + * ==== + */ +library EnumerableSet { + // To implement this library for multiple types with as little code + // repetition as possible, we write it in terms of a generic Set type with + // bytes32 values. + // The Set implementation uses private functions, and user-facing + // implementations (such as AddressSet) are just wrappers around the + // underlying Set. + // This means that we can only create new EnumerableSets for types that fit + // in bytes32. + + struct Set { + // Storage of set values + bytes32[] _values; + // Position of the value in the `values` array, plus 1 because index 0 + // means a value is not in the set. + mapping(bytes32 => uint256) _indexes; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function _add(Set storage set, bytes32 value) private returns (bool) { + if (!_contains(set, value)) { + set._values.push(value); + // The value is stored at length-1, but we add 1 to all indexes + // and use 0 as a sentinel value + set._indexes[value] = set._values.length; + return true; + } else { + return false; + } + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function _remove(Set storage set, bytes32 value) private returns (bool) { + // We read and store the value's index to prevent multiple reads from the same storage slot + uint256 valueIndex = set._indexes[value]; + + if (valueIndex != 0) { + // Equivalent to contains(set, value) + // To delete an element from the _values array in O(1), we swap the element to delete with the last one in + // the array, and then remove the last element (sometimes called as 'swap and pop'). + // This modifies the order of the array, as noted in {at}. + + uint256 toDeleteIndex = valueIndex - 1; + uint256 lastIndex = set._values.length - 1; + + if (lastIndex != toDeleteIndex) { + bytes32 lastValue = set._values[lastIndex]; + + // Move the last value to the index where the value to delete is + set._values[toDeleteIndex] = lastValue; + // Update the index for the moved value + set._indexes[lastValue] = valueIndex; // Replace lastValue's index to valueIndex + } + + // Delete the slot where the moved value was stored + set._values.pop(); + + // Delete the index for the deleted slot + delete set._indexes[value]; + + return true; + } else { + return false; + } + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function _contains(Set storage set, bytes32 value) private view returns (bool) { + return set._indexes[value] != 0; + } + + /** + * @dev Returns the number of values on the set. O(1). + */ + function _length(Set storage set) private view returns (uint256) { + return set._values.length; + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function _at(Set storage set, uint256 index) private view returns (bytes32) { + return set._values[index]; + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function _values(Set storage set) private view returns (bytes32[] memory) { + return set._values; + } + + // Bytes32Set + + struct Bytes32Set { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(Bytes32Set storage set, bytes32 value) internal returns (bool) { + return _add(set._inner, value); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(Bytes32Set storage set, bytes32 value) internal returns (bool) { + return _remove(set._inner, value); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(Bytes32Set storage set, bytes32 value) internal view returns (bool) { + return _contains(set._inner, value); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(Bytes32Set storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32Set storage set, uint256 index) internal view returns (bytes32) { + return _at(set._inner, index); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(Bytes32Set storage set) internal view returns (bytes32[] memory) { + bytes32[] memory store = _values(set._inner); + bytes32[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } + + // AddressSet + + struct AddressSet { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(AddressSet storage set, address value) internal returns (bool) { + return _add(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(AddressSet storage set, address value) internal returns (bool) { + return _remove(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(AddressSet storage set, address value) internal view returns (bool) { + return _contains(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(AddressSet storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(AddressSet storage set, uint256 index) internal view returns (address) { + return address(uint160(uint256(_at(set._inner, index)))); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(AddressSet storage set) internal view returns (address[] memory) { + bytes32[] memory store = _values(set._inner); + address[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } + + // UintSet + + struct UintSet { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(UintSet storage set, uint256 value) internal returns (bool) { + return _add(set._inner, bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(UintSet storage set, uint256 value) internal returns (bool) { + return _remove(set._inner, bytes32(value)); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(UintSet storage set, uint256 value) internal view returns (bool) { + return _contains(set._inner, bytes32(value)); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(UintSet storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintSet storage set, uint256 index) internal view returns (uint256) { + return uint256(_at(set._inner, index)); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(UintSet storage set) internal view returns (uint256[] memory) { + bytes32[] memory store = _values(set._inner); + uint256[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/AccessControl.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/AccessControl.sol new file mode 100644 index 00000000..4e388f9d --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/AccessControl.sol @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (access/AccessControl.sol) + +pragma solidity ^0.8.0; + +import "./IAccessControl.sol"; +import "../utils/Context.sol"; +import "../utils/Strings.sol"; +import "../utils/introspection/ERC165.sol"; + +/** + * @dev Contract module that allows children to implement role-based access + * control mechanisms. This is a lightweight version that doesn't allow enumerating role + * members except through off-chain means by accessing the contract event logs. Some + * applications may benefit from on-chain enumerability, for those cases see + * {AccessControlEnumerable}. + * + * Roles are referred to by their `bytes32` identifier. These should be exposed + * in the external API and be unique. The best way to achieve this is by + * using `public constant` hash digests: + * + * ``` + * bytes32 public constant MY_ROLE = keccak256("MY_ROLE"); + * ``` + * + * Roles can be used to represent a set of permissions. To restrict access to a + * function call, use {hasRole}: + * + * ``` + * function foo() public { + * require(hasRole(MY_ROLE, msg.sender)); + * ... + * } + * ``` + * + * Roles can be granted and revoked dynamically via the {grantRole} and + * {revokeRole} functions. Each role has an associated admin role, and only + * accounts that have a role's admin role can call {grantRole} and {revokeRole}. + * + * By default, the admin role for all roles is `DEFAULT_ADMIN_ROLE`, which means + * that only accounts with this role will be able to grant or revoke other + * roles. More complex role relationships can be created by using + * {_setRoleAdmin}. + * + * WARNING: The `DEFAULT_ADMIN_ROLE` is also its own admin: it has permission to + * grant and revoke this role. Extra precautions should be taken to secure + * accounts that have been granted it. + */ +abstract contract AccessControl is Context, IAccessControl, ERC165 { + struct RoleData { + mapping(address => bool) members; + bytes32 adminRole; + } + + mapping(bytes32 => RoleData) private _roles; + + bytes32 public constant DEFAULT_ADMIN_ROLE = 0x00; + + /** + * @dev Modifier that checks that an account has a specific role. Reverts + * with a standardized message including the required role. + * + * The format of the revert reason is given by the following regular expression: + * + * /^AccessControl: account (0x[0-9a-f]{40}) is missing role (0x[0-9a-f]{64})$/ + * + * _Available since v4.1._ + */ + modifier onlyRole(bytes32 role) { + _checkRole(role); + _; + } + + /** + * @dev See {IERC165-supportsInterface}. + */ + function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) { + return interfaceId == type(IAccessControl).interfaceId || super.supportsInterface(interfaceId); + } + + /** + * @dev Returns `true` if `account` has been granted `role`. + */ + function hasRole(bytes32 role, address account) public view virtual override returns (bool) { + return _roles[role].members[account]; + } + + /** + * @dev Revert with a standard message if `_msgSender()` is missing `role`. + * Overriding this function changes the behavior of the {onlyRole} modifier. + * + * Format of the revert message is described in {_checkRole}. + * + * _Available since v4.6._ + */ + function _checkRole(bytes32 role) internal view virtual { + _checkRole(role, _msgSender()); + } + + /** + * @dev Revert with a standard message if `account` is missing `role`. + * + * The format of the revert reason is given by the following regular expression: + * + * /^AccessControl: account (0x[0-9a-f]{40}) is missing role (0x[0-9a-f]{64})$/ + */ + function _checkRole(bytes32 role, address account) internal view virtual { + if (!hasRole(role, account)) { + revert( + string( + abi.encodePacked( + "AccessControl: account ", + Strings.toHexString(account), + " is missing role ", + Strings.toHexString(uint256(role), 32) + ) + ) + ); + } + } + + /** + * @dev Returns the admin role that controls `role`. See {grantRole} and + * {revokeRole}. + * + * To change a role's admin, use {_setRoleAdmin}. + */ + function getRoleAdmin(bytes32 role) public view virtual override returns (bytes32) { + return _roles[role].adminRole; + } + + /** + * @dev Grants `role` to `account`. + * + * If `account` had not been already granted `role`, emits a {RoleGranted} + * event. + * + * Requirements: + * + * - the caller must have ``role``'s admin role. + * + * May emit a {RoleGranted} event. + */ + function grantRole(bytes32 role, address account) public virtual override onlyRole(getRoleAdmin(role)) { + _grantRole(role, account); + } + + /** + * @dev Revokes `role` from `account`. + * + * If `account` had been granted `role`, emits a {RoleRevoked} event. + * + * Requirements: + * + * - the caller must have ``role``'s admin role. + * + * May emit a {RoleRevoked} event. + */ + function revokeRole(bytes32 role, address account) public virtual override onlyRole(getRoleAdmin(role)) { + _revokeRole(role, account); + } + + /** + * @dev Revokes `role` from the calling account. + * + * Roles are often managed via {grantRole} and {revokeRole}: this function's + * purpose is to provide a mechanism for accounts to lose their privileges + * if they are compromised (such as when a trusted device is misplaced). + * + * If the calling account had been revoked `role`, emits a {RoleRevoked} + * event. + * + * Requirements: + * + * - the caller must be `account`. + * + * May emit a {RoleRevoked} event. + */ + function renounceRole(bytes32 role, address account) public virtual override { + require(account == _msgSender(), "AccessControl: can only renounce roles for self"); + + _revokeRole(role, account); + } + + /** + * @dev Grants `role` to `account`. + * + * If `account` had not been already granted `role`, emits a {RoleGranted} + * event. Note that unlike {grantRole}, this function doesn't perform any + * checks on the calling account. + * + * May emit a {RoleGranted} event. + * + * [WARNING] + * ==== + * This function should only be called from the constructor when setting + * up the initial roles for the system. + * + * Using this function in any other way is effectively circumventing the admin + * system imposed by {AccessControl}. + * ==== + * + * NOTE: This function is deprecated in favor of {_grantRole}. + */ + function _setupRole(bytes32 role, address account) internal virtual { + _grantRole(role, account); + } + + /** + * @dev Sets `adminRole` as ``role``'s admin role. + * + * Emits a {RoleAdminChanged} event. + */ + function _setRoleAdmin(bytes32 role, bytes32 adminRole) internal virtual { + bytes32 previousAdminRole = getRoleAdmin(role); + _roles[role].adminRole = adminRole; + emit RoleAdminChanged(role, previousAdminRole, adminRole); + } + + /** + * @dev Grants `role` to `account`. + * + * Internal function without access restriction. + * + * May emit a {RoleGranted} event. + */ + function _grantRole(bytes32 role, address account) internal virtual { + if (!hasRole(role, account)) { + _roles[role].members[account] = true; + emit RoleGranted(role, account, _msgSender()); + } + } + + /** + * @dev Revokes `role` from `account`. + * + * Internal function without access restriction. + * + * May emit a {RoleRevoked} event. + */ + function _revokeRole(bytes32 role, address account) internal virtual { + if (hasRole(role, account)) { + _roles[role].members[account] = false; + emit RoleRevoked(role, account, _msgSender()); + } + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/IAccessControl.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/IAccessControl.sol new file mode 100644 index 00000000..efb82a3c --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/access/IAccessControl.sol @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (access/IAccessControl.sol) + +pragma solidity ^0.8.0; + +/** + * @dev External interface of AccessControl declared to support ERC165 detection. + */ +interface IAccessControl { + /** + * @dev Emitted when `newAdminRole` is set as ``role``'s admin role, replacing `previousAdminRole` + * + * `DEFAULT_ADMIN_ROLE` is the starting admin for all roles, despite + * {RoleAdminChanged} not being emitted signaling this. + * + * _Available since v3.1._ + */ + event RoleAdminChanged(bytes32 indexed role, bytes32 indexed previousAdminRole, bytes32 indexed newAdminRole); + + /** + * @dev Emitted when `account` is granted `role`. + * + * `sender` is the account that originated the contract call, an admin role + * bearer except when using {AccessControl-_setupRole}. + */ + event RoleGranted(bytes32 indexed role, address indexed account, address indexed sender); + + /** + * @dev Emitted when `account` is revoked `role`. + * + * `sender` is the account that originated the contract call: + * - if using `revokeRole`, it is the admin role bearer + * - if using `renounceRole`, it is the role bearer (i.e. `account`) + */ + event RoleRevoked(bytes32 indexed role, address indexed account, address indexed sender); + + /** + * @dev Returns `true` if `account` has been granted `role`. + */ + function hasRole(bytes32 role, address account) external view returns (bool); + + /** + * @dev Returns the admin role that controls `role`. See {grantRole} and + * {revokeRole}. + * + * To change a role's admin, use {AccessControl-_setRoleAdmin}. + */ + function getRoleAdmin(bytes32 role) external view returns (bytes32); + + /** + * @dev Grants `role` to `account`. + * + * If `account` had not been already granted `role`, emits a {RoleGranted} + * event. + * + * Requirements: + * + * - the caller must have ``role``'s admin role. + */ + function grantRole(bytes32 role, address account) external; + + /** + * @dev Revokes `role` from `account`. + * + * If `account` had been granted `role`, emits a {RoleRevoked} event. + * + * Requirements: + * + * - the caller must have ``role``'s admin role. + */ + function revokeRole(bytes32 role, address account) external; + + /** + * @dev Revokes `role` from the calling account. + * + * Roles are often managed via {grantRole} and {revokeRole}: this function's + * purpose is to provide a mechanism for accounts to lose their privileges + * if they are compromised (such as when a trusted device is misplaced). + * + * If the calling account had been granted `role`, emits a {RoleRevoked} + * event. + * + * Requirements: + * + * - the caller must be `account`. + */ + function renounceRole(bytes32 role, address account) external; +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol new file mode 100644 index 00000000..91bb8f82 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC165.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (interfaces/IERC165.sol) + +pragma solidity ^0.8.0; + +import "../utils/introspection/IERC165.sol"; \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol new file mode 100644 index 00000000..7c95dfca --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/IERC20.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (interfaces/IERC20.sol) + +pragma solidity ^0.8.0; + +import "../token/ERC20/IERC20.sol"; \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/draft-IERC20Permit.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/draft-IERC20Permit.sol new file mode 100644 index 00000000..84ac72c7 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/interfaces/draft-IERC20Permit.sol @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** + * @dev Standard ERC20 Errors + * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC20 tokens. + */ +interface IERC20Errors { + /** + * @dev Indicates an error related to the current `balance` of a `sender`. Used in transfers. + * @param sender Address whose tokens are being transferred. + * @param balance Current balance for the interacting account. + * @param needed Minimum amount required to perform a transfer. + */ + error ERC20InsufficientBalance(address sender, uint256 balance, uint256 needed); + + /** + * @dev Indicates a failure with the token `sender`. Used in transfers. + * @param sender Address whose tokens are being transferred. + */ + error ERC20InvalidSender(address sender); + + /** + * @dev Indicates a failure with the token `receiver`. Used in transfers. + * @param receiver Address to which tokens are being transferred. + */ + error ERC20InvalidReceiver(address receiver); + + /** + * @dev Indicates a failure with the `spender`’s `allowance`. Used in transfers. + * @param spender Address that may be allowed to operate on tokens without being their owner. + * @param allowance Amount of tokens a `spender` is allowed to operate with. + * @param needed Minimum amount required to perform a transfer. + */ + error ERC20InsufficientAllowance(address spender, uint256 allowance, uint256 needed); + + /** + * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals. + * @param approver Address initiating an approval operation. + */ + error ERC20InvalidApprover(address approver); + + /** + * @dev Indicates a failure with the `spender` to be approved. Used in approvals. + * @param spender Address that may be allowed to operate on tokens without being their owner. + */ + error ERC20InvalidSpender(address spender); +} + +/** + * @dev Standard ERC721 Errors + * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC721 tokens. + */ +interface IERC721Errors { + /** + * @dev Indicates that an address can't be an owner. For example, `address(0)` is a forbidden owner in EIP-20. + * Used in balance queries. + * @param owner Address of the current owner of a token. + */ + error ERC721InvalidOwner(address owner); + + /** + * @dev Indicates a `tokenId` whose `owner` is the zero address. + * @param tokenId Identifier number of a token. + */ + error ERC721NonexistentToken(uint256 tokenId); + + /** + * @dev Indicates an error related to the ownership over a particular token. Used in transfers. + * @param sender Address whose tokens are being transferred. + * @param tokenId Identifier number of a token. + * @param owner Address of the current owner of a token. + */ + error ERC721IncorrectOwner(address sender, uint256 tokenId, address owner); + + /** + * @dev Indicates a failure with the token `sender`. Used in transfers. + * @param sender Address whose tokens are being transferred. + */ + error ERC721InvalidSender(address sender); + + /** + * @dev Indicates a failure with the token `receiver`. Used in transfers. + * @param receiver Address to which tokens are being transferred. + */ + error ERC721InvalidReceiver(address receiver); + + /** + * @dev Indicates a failure with the `operator`’s approval. Used in transfers. + * @param operator Address that may be allowed to operate on tokens without being their owner. + * @param tokenId Identifier number of a token. + */ + error ERC721InsufficientApproval(address operator, uint256 tokenId); + + /** + * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals. + * @param approver Address initiating an approval operation. + */ + error ERC721InvalidApprover(address approver); + + /** + * @dev Indicates a failure with the `operator` to be approved. Used in approvals. + * @param operator Address that may be allowed to operate on tokens without being their owner. + */ + error ERC721InvalidOperator(address operator); +} + +/** + * @dev Standard ERC1155 Errors + * Interface of the https://eips.ethereum.org/EIPS/eip-6093[ERC-6093] custom errors for ERC1155 tokens. + */ +interface IERC1155Errors { + /** + * @dev Indicates an error related to the current `balance` of a `sender`. Used in transfers. + * @param sender Address whose tokens are being transferred. + * @param balance Current balance for the interacting account. + * @param needed Minimum amount required to perform a transfer. + * @param tokenId Identifier number of a token. + */ + error ERC1155InsufficientBalance(address sender, uint256 balance, uint256 needed, uint256 tokenId); + + /** + * @dev Indicates a failure with the token `sender`. Used in transfers. + * @param sender Address whose tokens are being transferred. + */ + error ERC1155InvalidSender(address sender); + + /** + * @dev Indicates a failure with the token `receiver`. Used in transfers. + * @param receiver Address to which tokens are being transferred. + */ + error ERC1155InvalidReceiver(address receiver); + + /** + * @dev Indicates a failure with the `operator`’s approval. Used in transfers. + * @param operator Address that may be allowed to operate on tokens without being their owner. + * @param owner Address of the current owner of a token. + */ + error ERC1155MissingApprovalForAll(address operator, address owner); + + /** + * @dev Indicates a failure with the `approver` of a token to be approved. Used in approvals. + * @param approver Address initiating an approval operation. + */ + error ERC1155InvalidApprover(address approver); + + /** + * @dev Indicates a failure with the `operator` to be approved. Used in approvals. + * @param operator Address that may be allowed to operate on tokens without being their owner. + */ + error ERC1155InvalidOperator(address operator); + + /** + * @dev Indicates an array length mismatch between ids and values in a safeBatchTransferFrom operation. + * Used in batch transfers. + * @param idsLength Length of the array of token identifiers + * @param valuesLength Length of the array of token amounts + */ + error ERC1155InvalidArrayLength(uint256 idsLength, uint256 valuesLength); +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol new file mode 100644 index 00000000..4db5eb2e --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/mocks/ERC20Mock.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +import "../token/ERC20/ERC20.sol"; + +// mock class using ERC20 +contract ERC20Mock is ERC20 { + constructor( + string memory name, + string memory symbol, + address initialAccount, + uint256 initialBalance + ) payable ERC20(name, symbol) { + _mint(initialAccount, initialBalance); + } + + function mint(address account, uint256 amount) public { + _mint(account, amount); + } + + function burn(address account, uint256 amount) public { + _burn(account, amount); + } + + function transferInternal( + address from, + address to, + uint256 value + ) public { + _transfer(from, to, value); + } + + function approveInternal( + address owner, + address spender, + uint256 value + ) public { + _approve(owner, spender, value); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol new file mode 100644 index 00000000..8cc49ec1 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/security/Pausable.sol @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.7.0) (security/Pausable.sol) + +pragma solidity ^0.8.0; + +import "../utils/Context.sol"; + +/** + * @dev Contract module which allows children to implement an emergency stop + * mechanism that can be triggered by an authorized account. + * + * This module is used through inheritance. It will make available the + * modifiers `whenNotPaused` and `whenPaused`, which can be applied to + * the functions of your contract. Note that they will not be pausable by + * simply including this module, only once the modifiers are put in place. + */ +abstract contract Pausable is Context { + /** + * @dev Emitted when the pause is triggered by `account`. + */ + event Paused(address account); + + /** + * @dev Emitted when the pause is lifted by `account`. + */ + event Unpaused(address account); + + bool private _paused; + + /** + * @dev Initializes the contract in unpaused state. + */ + constructor() { + _paused = false; + } + + /** + * @dev Modifier to make a function callable only when the contract is not paused. + * + * Requirements: + * + * - The contract must not be paused. + */ + modifier whenNotPaused() { + _requireNotPaused(); + _; + } + + /** + * @dev Modifier to make a function callable only when the contract is paused. + * + * Requirements: + * + * - The contract must be paused. + */ + modifier whenPaused() { + _requirePaused(); + _; + } + + /** + * @dev Returns true if the contract is paused, and false otherwise. + */ + function paused() public view virtual returns (bool) { + return _paused; + } + + /** + * @dev Throws if the contract is paused. + */ + function _requireNotPaused() internal view virtual { + require(!paused(), "Pausable: paused"); + } + + /** + * @dev Throws if the contract is not paused. + */ + function _requirePaused() internal view virtual { + require(paused(), "Pausable: not paused"); + } + + /** + * @dev Triggers stopped state. + * + * Requirements: + * + * - The contract must not be paused. + */ + function _pause() internal virtual whenNotPaused { + _paused = true; + emit Paused(_msgSender()); + } + + /** + * @dev Returns to normal state. + * + * Requirements: + * + * - The contract must be paused. + */ + function _unpause() internal virtual whenPaused { + _paused = false; + emit Unpaused(_msgSender()); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol new file mode 100644 index 00000000..0e9b0776 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/ERC20.sol @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (token/ERC20/ERC20.sol) + +pragma solidity ^0.8.0; + +import "./IERC20.sol"; +import "./extensions/IERC20Metadata.sol"; +import "../../utils/Context.sol"; + +/** + * @dev Implementation of the {IERC20} interface. + * + * This implementation is agnostic to the way tokens are created. This means + * that a supply mechanism has to be added in a derived contract using {_mint}. + * For a generic mechanism see {ERC20PresetMinterPauser}. + * + * TIP: For a detailed writeup see our guide + * https://forum.openzeppelin.com/t/how-to-implement-erc20-supply-mechanisms/226[How + * to implement supply mechanisms]. + * + * We have followed general OpenZeppelin Contracts guidelines: functions revert + * instead returning `false` on failure. This behavior is nonetheless + * conventional and does not conflict with the expectations of ERC20 + * applications. + * + * Additionally, an {Approval} event is emitted on calls to {transferFrom}. + * This allows applications to reconstruct the allowance for all accounts just + * by listening to said events. Other implementations of the EIP may not emit + * these events, as it isn't required by the specification. + * + * Finally, the non-standard {decreaseAllowance} and {increaseAllowance} + * functions have been added to mitigate the well-known issues around setting + * allowances. See {IERC20-approve}. + */ +contract ERC20 is Context, IERC20, IERC20Metadata { + mapping(address => uint256) private _balances; + + mapping(address => mapping(address => uint256)) private _allowances; + + uint256 private _totalSupply; + + string private _name; + string private _symbol; + + /** + * @dev Sets the values for {name} and {symbol}. + * + * The default value of {decimals} is 18. To select a different value for + * {decimals} you should overload it. + * + * All two of these values are immutable: they can only be set once during + * construction. + */ + constructor(string memory name_, string memory symbol_) { + _name = name_; + _symbol = symbol_; + } + + /** + * @dev Returns the name of the token. + */ + function name() public view virtual override returns (string memory) { + return _name; + } + + /** + * @dev Returns the symbol of the token, usually a shorter version of the + * name. + */ + function symbol() public view virtual override returns (string memory) { + return _symbol; + } + + /** + * @dev Returns the number of decimals used to get its user representation. + * For example, if `decimals` equals `2`, a balance of `505` tokens should + * be displayed to a user as `5.05` (`505 / 10 ** 2`). + * + * Tokens usually opt for a value of 18, imitating the relationship between + * Ether and Wei. This is the value {ERC20} uses, unless this function is + * overridden; + * + * NOTE: This information is only used for _display_ purposes: it in + * no way affects any of the arithmetic of the contract, including + * {IERC20-balanceOf} and {IERC20-transfer}. + */ + function decimals() public view virtual override returns (uint8) { + return 18; + } + + /** + * @dev See {IERC20-totalSupply}. + */ + function totalSupply() public view virtual override returns (uint256) { + return _totalSupply; + } + + /** + * @dev See {IERC20-balanceOf}. + */ + function balanceOf(address account) public view virtual override returns (uint256) { + return _balances[account]; + } + + /** + * @dev See {IERC20-transfer}. + * + * Requirements: + * + * - `to` cannot be the zero address. + * - the caller must have a balance of at least `amount`. + */ + function transfer(address to, uint256 amount) public virtual override returns (bool) { + address owner = _msgSender(); + _transfer(owner, to, amount); + return true; + } + + /** + * @dev See {IERC20-allowance}. + */ + function allowance(address owner, address spender) public view virtual override returns (uint256) { + return _allowances[owner][spender]; + } + + /** + * @dev See {IERC20-approve}. + * + * NOTE: If `amount` is the maximum `uint256`, the allowance is not updated on + * `transferFrom`. This is semantically equivalent to an infinite approval. + * + * Requirements: + * + * - `spender` cannot be the zero address. + */ + function approve(address spender, uint256 amount) public virtual override returns (bool) { + address owner = _msgSender(); + _approve(owner, spender, amount); + return true; + } + + /** + * @dev See {IERC20-transferFrom}. + * + * Emits an {Approval} event indicating the updated allowance. This is not + * required by the EIP. See the note at the beginning of {ERC20}. + * + * NOTE: Does not update the allowance if the current allowance + * is the maximum `uint256`. + * + * Requirements: + * + * - `from` and `to` cannot be the zero address. + * - `from` must have a balance of at least `amount`. + * - the caller must have allowance for ``from``'s tokens of at least + * `amount`. + */ + function transferFrom(address from, address to, uint256 amount) public virtual override returns (bool) { + address spender = _msgSender(); + _spendAllowance(from, spender, amount); + _transfer(from, to, amount); + return true; + } + + /** + * @dev Atomically increases the allowance granted to `spender` by the caller. + * + * This is an alternative to {approve} that can be used as a mitigation for + * problems described in {IERC20-approve}. + * + * Emits an {Approval} event indicating the updated allowance. + * + * Requirements: + * + * - `spender` cannot be the zero address. + */ + function increaseAllowance(address spender, uint256 addedValue) public virtual returns (bool) { + address owner = _msgSender(); + _approve(owner, spender, allowance(owner, spender) + addedValue); + return true; + } + + /** + * @dev Atomically decreases the allowance granted to `spender` by the caller. + * + * This is an alternative to {approve} that can be used as a mitigation for + * problems described in {IERC20-approve}. + * + * Emits an {Approval} event indicating the updated allowance. + * + * Requirements: + * + * - `spender` cannot be the zero address. + * - `spender` must have allowance for the caller of at least + * `subtractedValue`. + */ + function decreaseAllowance(address spender, uint256 subtractedValue) public virtual returns (bool) { + address owner = _msgSender(); + uint256 currentAllowance = allowance(owner, spender); + require(currentAllowance >= subtractedValue, "ERC20: decreased allowance below zero"); + unchecked { + _approve(owner, spender, currentAllowance - subtractedValue); + } + + return true; + } + + /** + * @dev Moves `amount` of tokens from `from` to `to`. + * + * This internal function is equivalent to {transfer}, and can be used to + * e.g. implement automatic token fees, slashing mechanisms, etc. + * + * Emits a {Transfer} event. + * + * Requirements: + * + * - `from` cannot be the zero address. + * - `to` cannot be the zero address. + * - `from` must have a balance of at least `amount`. + */ + function _transfer(address from, address to, uint256 amount) internal virtual { + require(from != address(0), "ERC20: transfer from the zero address"); + require(to != address(0), "ERC20: transfer to the zero address"); + + _beforeTokenTransfer(from, to, amount); + + uint256 fromBalance = _balances[from]; + require(fromBalance >= amount, "ERC20: transfer amount exceeds balance"); + unchecked { + _balances[from] = fromBalance - amount; + // Overflow not possible: the sum of all balances is capped by totalSupply, and the sum is preserved by + // decrementing then incrementing. + _balances[to] += amount; + } + + emit Transfer(from, to, amount); + + _afterTokenTransfer(from, to, amount); + } + + /** @dev Creates `amount` tokens and assigns them to `account`, increasing + * the total supply. + * + * Emits a {Transfer} event with `from` set to the zero address. + * + * Requirements: + * + * - `account` cannot be the zero address. + */ + function _mint(address account, uint256 amount) internal virtual { + require(account != address(0), "ERC20: mint to the zero address"); + + _beforeTokenTransfer(address(0), account, amount); + + _totalSupply += amount; + unchecked { + // Overflow not possible: balance + amount is at most totalSupply + amount, which is checked above. + _balances[account] += amount; + } + emit Transfer(address(0), account, amount); + + _afterTokenTransfer(address(0), account, amount); + } + + /** + * @dev Destroys `amount` tokens from `account`, reducing the + * total supply. + * + * Emits a {Transfer} event with `to` set to the zero address. + * + * Requirements: + * + * - `account` cannot be the zero address. + * - `account` must have at least `amount` tokens. + */ + function _burn(address account, uint256 amount) internal virtual { + require(account != address(0), "ERC20: burn from the zero address"); + + _beforeTokenTransfer(account, address(0), amount); + + uint256 accountBalance = _balances[account]; + require(accountBalance >= amount, "ERC20: burn amount exceeds balance"); + unchecked { + _balances[account] = accountBalance - amount; + // Overflow not possible: amount <= accountBalance <= totalSupply. + _totalSupply -= amount; + } + + emit Transfer(account, address(0), amount); + + _afterTokenTransfer(account, address(0), amount); + } + + /** + * @dev Sets `amount` as the allowance of `spender` over the `owner` s tokens. + * + * This internal function is equivalent to `approve`, and can be used to + * e.g. set automatic allowances for certain subsystems, etc. + * + * Emits an {Approval} event. + * + * Requirements: + * + * - `owner` cannot be the zero address. + * - `spender` cannot be the zero address. + */ + function _approve(address owner, address spender, uint256 amount) internal virtual { + require(owner != address(0), "ERC20: approve from the zero address"); + require(spender != address(0), "ERC20: approve to the zero address"); + + _allowances[owner][spender] = amount; + emit Approval(owner, spender, amount); + } + + /** + * @dev Updates `owner` s allowance for `spender` based on spent `amount`. + * + * Does not update the allowance amount in case of infinite allowance. + * Revert if not enough allowance is available. + * + * Might emit an {Approval} event. + */ + function _spendAllowance(address owner, address spender, uint256 amount) internal virtual { + uint256 currentAllowance = allowance(owner, spender); + if (currentAllowance != type(uint256).max) { + require(currentAllowance >= amount, "ERC20: insufficient allowance"); + unchecked { + _approve(owner, spender, currentAllowance - amount); + } + } + } + + /** + * @dev Hook that is called before any transfer of tokens. This includes + * minting and burning. + * + * Calling conditions: + * + * - when `from` and `to` are both non-zero, `amount` of ``from``'s tokens + * will be transferred to `to`. + * - when `from` is zero, `amount` tokens will be minted for `to`. + * - when `to` is zero, `amount` of ``from``'s tokens will be burned. + * - `from` and `to` are never both zero. + * + * To learn more about hooks, head to xref:ROOT:extending-contracts.adoc#using-hooks[Using Hooks]. + */ + function _beforeTokenTransfer(address from, address to, uint256 amount) internal virtual {} + + /** + * @dev Hook that is called after any transfer of tokens. This includes + * minting and burning. + * + * Calling conditions: + * + * - when `from` and `to` are both non-zero, `amount` of ``from``'s tokens + * has been transferred to `to`. + * - when `from` is zero, `amount` tokens have been minted for `to`. + * - when `to` is zero, `amount` of ``from``'s tokens have been burned. + * - `from` and `to` are never both zero. + * + * To learn more about hooks, head to xref:ROOT:extending-contracts.adoc#using-hooks[Using Hooks]. + */ + function _afterTokenTransfer(address from, address to, uint256 amount) internal virtual {} +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol new file mode 100644 index 00000000..536ba0b9 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.6.0) (token/ERC20/IERC20.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Interface of the ERC20 standard as defined in the EIP. + */ +interface IERC20 { + /** + * @dev Emitted when `value` tokens are moved from one account (`from`) to + * another (`to`). + * + * Note that `value` may be zero. + */ + event Transfer(address indexed from, address indexed to, uint256 value); + + /** + * @dev Emitted when the allowance of a `spender` for an `owner` is set by + * a call to {approve}. `value` is the new allowance. + */ + event Approval(address indexed owner, address indexed spender, uint256 value); + + /** + * @dev Returns the amount of tokens in existence. + */ + function totalSupply() external view returns (uint256); + + /** + * @dev Returns the amount of tokens owned by `account`. + */ + function balanceOf(address account) external view returns (uint256); + + /** + * @dev Moves `amount` tokens from the caller's account to `to`. + * + * Returns a boolean value indicating whether the operation succeeded. + * + * Emits a {Transfer} event. + */ + function transfer(address to, uint256 amount) external returns (bool); + + /** + * @dev Returns the remaining number of tokens that `spender` will be + * allowed to spend on behalf of `owner` through {transferFrom}. This is + * zero by default. + * + * This value changes when {approve} or {transferFrom} are called. + */ + function allowance(address owner, address spender) external view returns (uint256); + + /** + * @dev Sets `amount` as the allowance of `spender` over the caller's tokens. + * + * Returns a boolean value indicating whether the operation succeeded. + * + * IMPORTANT: Beware that changing an allowance with this method brings the risk + * that someone may use both the old and the new allowance by unfortunate + * transaction ordering. One possible solution to mitigate this race + * condition is to first reduce the spender's allowance to 0 and set the + * desired value afterwards: + * https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729 + * + * Emits an {Approval} event. + */ + function approve(address spender, uint256 amount) external returns (bool); + + /** + * @dev Moves `amount` tokens from `from` to `to` using the + * allowance mechanism. `amount` is then deducted from the caller's + * allowance. + * + * Returns a boolean value indicating whether the operation succeeded. + * + * Emits a {Transfer} event. + */ + function transferFrom(address from, address to, uint256 amount) external returns (bool); +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/ERC20Burnable.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/ERC20Burnable.sol new file mode 100644 index 00000000..e7752729 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/ERC20Burnable.sol @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.5.0) (token/ERC20/extensions/ERC20Burnable.sol) + +pragma solidity ^0.8.0; + +import "../ERC20.sol"; +import "../../../utils/Context.sol"; + +/** + * @dev Extension of {ERC20} that allows token holders to destroy both their own + * tokens and those that they have an allowance for, in a way that can be + * recognized off-chain (via event analysis). + */ +abstract contract ERC20Burnable is Context, ERC20 { + /** + * @dev Destroys `amount` tokens from the caller. + * + * See {ERC20-_burn}. + */ + function burn(uint256 amount) public virtual { + _burn(_msgSender(), amount); + } + + /** + * @dev Destroys `amount` tokens from `account`, deducting from the caller's + * allowance. + * + * See {ERC20-_burn} and {ERC20-allowance}. + * + * Requirements: + * + * - the caller must have allowance for ``accounts``'s tokens of at least + * `amount`. + */ + function burnFrom(address account, uint256 amount) public virtual { + _spendAllowance(account, _msgSender(), amount); + _burn(account, amount); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol new file mode 100644 index 00000000..6e29892a --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (token/ERC20/extensions/IERC20Metadata.sol) + +pragma solidity ^0.8.0; + +import "../IERC20.sol"; + +/** + * @dev Interface for the optional metadata functions from the ERC20 standard. + * + * _Available since v4.1._ + */ +interface IERC20Metadata is IERC20 { + /** + * @dev Returns the name of the token. + */ + function name() external view returns (string memory); + + /** + * @dev Returns the symbol of the token. + */ + function symbol() external view returns (string memory); + + /** + * @dev Returns the decimals places of the token. + */ + function decimals() external view returns (uint8); +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-ERC20Permit.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-ERC20Permit.sol new file mode 100644 index 00000000..3c29ed05 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-ERC20Permit.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (token/ERC20/extensions/draft-ERC20Permit.sol) + +pragma solidity ^0.8.0; + +import "./draft-IERC20Permit.sol"; +import "../ERC20.sol"; +import "../../../utils/cryptography/ECDSA.sol"; +import "../../../utils/cryptography/EIP712.sol"; +import "../../../utils/Counters.sol"; + +/** + * @dev Implementation of the ERC20 Permit extension allowing approvals to be made via signatures, as defined in + * https://eips.ethereum.org/EIPS/eip-2612[EIP-2612]. + * + * Adds the {permit} method, which can be used to change an account's ERC20 allowance (see {IERC20-allowance}) by + * presenting a message signed by the account. By not relying on `{IERC20-approve}`, the token holder account doesn't + * need to send a transaction, and thus is not required to hold Ether at all. + * + * _Available since v3.4._ + */ +abstract contract ERC20Permit is ERC20, IERC20Permit, EIP712 { + using Counters for Counters.Counter; + + mapping(address => Counters.Counter) private _nonces; + + // solhint-disable-next-line var-name-mixedcase + bytes32 private constant _PERMIT_TYPEHASH = + keccak256("Permit(address owner,address spender,uint256 value,uint256 nonce,uint256 deadline)"); + /** + * @dev In previous versions `_PERMIT_TYPEHASH` was declared as `immutable`. + * However, to ensure consistency with the upgradeable transpiler, we will continue + * to reserve a slot. + * @custom:oz-renamed-from _PERMIT_TYPEHASH + */ + // solhint-disable-next-line var-name-mixedcase + bytes32 private _PERMIT_TYPEHASH_DEPRECATED_SLOT; + + /** + * @dev Initializes the {EIP712} domain separator using the `name` parameter, and setting `version` to `"1"`. + * + * It's a good idea to use the same `name` that is defined as the ERC20 token name. + */ + constructor(string memory name) EIP712(name, "1") {} + + /** + * @dev See {IERC20Permit-permit}. + */ + function permit( + address owner, + address spender, + uint256 value, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s + ) public virtual override { + require(block.timestamp <= deadline, "ERC20Permit: expired deadline"); + + bytes32 structHash = keccak256(abi.encode(_PERMIT_TYPEHASH, owner, spender, value, _useNonce(owner), deadline)); + + bytes32 hash = _hashTypedDataV4(structHash); + + address signer = ECDSA.recover(hash, v, r, s); + require(signer == owner, "ERC20Permit: invalid signature"); + + _approve(owner, spender, value); + } + + /** + * @dev See {IERC20Permit-nonces}. + */ + function nonces(address owner) public view virtual override returns (uint256) { + return _nonces[owner].current(); + } + + /** + * @dev See {IERC20Permit-DOMAIN_SEPARATOR}. + */ + // solhint-disable-next-line func-name-mixedcase + function DOMAIN_SEPARATOR() external view override returns (bytes32) { + return _domainSeparatorV4(); + } + + /** + * @dev "Consume a nonce": return the current value and increment. + * + * _Available since v4.1._ + */ + function _useNonce(address owner) internal virtual returns (uint256 current) { + Counters.Counter storage nonce = _nonces[owner]; + current = nonce.current(); + nonce.increment(); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-IERC20Permit.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-IERC20Permit.sol new file mode 100644 index 00000000..b14bbfe2 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/draft-IERC20Permit.sol @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (token/ERC20/extensions/draft-IERC20Permit.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Interface of the ERC20 Permit extension allowing approvals to be made via signatures, as defined in + * https://eips.ethereum.org/EIPS/eip-2612[EIP-2612]. + * + * Adds the {permit} method, which can be used to change an account's ERC20 allowance (see {IERC20-allowance}) by + * presenting a message signed by the account. By not relying on {IERC20-approve}, the token holder account doesn't + * need to send a transaction, and thus is not required to hold Ether at all. + */ +interface IERC20Permit { + /** + * @dev Sets `value` as the allowance of `spender` over ``owner``'s tokens, + * given ``owner``'s signed approval. + * + * IMPORTANT: The same issues {IERC20-approve} has related to transaction + * ordering also apply here. + * + * Emits an {Approval} event. + * + * Requirements: + * + * - `spender` cannot be the zero address. + * - `deadline` must be a timestamp in the future. + * - `v`, `r` and `s` must be a valid `secp256k1` signature from `owner` + * over the EIP712-formatted function arguments. + * - the signature must use ``owner``'s current nonce (see {nonces}). + * + * For more information on the signature format, see the + * https://eips.ethereum.org/EIPS/eip-2612#specification[relevant EIP + * section]. + */ + function permit( + address owner, + address spender, + uint256 value, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s + ) external; + + /** + * @dev Returns the current nonce for `owner`. This value must be + * included whenever a signature is generated for {permit}. + * + * Every successful call to {permit} increases ``owner``'s nonce by one. This + * prevents a signature from being used multiple times. + */ + function nonces(address owner) external view returns (uint256); + + /** + * @dev Returns the domain separator used in the encoding of the signature for {permit}, as defined by {EIP712}. + */ + // solhint-disable-next-line func-name-mixedcase + function DOMAIN_SEPARATOR() external view returns (bytes32); +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol new file mode 100644 index 00000000..2a6939ed --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (token/ERC20/utils/SafeERC20.sol) + +pragma solidity ^0.8.0; + +import "../IERC20.sol"; +import "../extensions/draft-IERC20Permit.sol"; +import "../../../utils/Address.sol"; + +/** + * @title SafeERC20 + * @dev Wrappers around ERC20 operations that throw on failure (when the token + * contract returns false). Tokens that return no value (and instead revert or + * throw on failure) are also supported, non-reverting calls are assumed to be + * successful. + * To use this library you can add a `using SafeERC20 for IERC20;` statement to your contract, + * which allows you to call the safe operations as `token.safeTransfer(...)`, etc. + */ +library SafeERC20 { + using Address for address; + + function safeTransfer(IERC20 token, address to, uint256 value) internal { + _callOptionalReturn(token, abi.encodeWithSelector(token.transfer.selector, to, value)); + } + + function safeTransferFrom(IERC20 token, address from, address to, uint256 value) internal { + _callOptionalReturn(token, abi.encodeWithSelector(token.transferFrom.selector, from, to, value)); + } + + /** + * @dev Deprecated. This function has issues similar to the ones found in + * {IERC20-approve}, and its usage is discouraged. + * + * Whenever possible, use {safeIncreaseAllowance} and + * {safeDecreaseAllowance} instead. + */ + function safeApprove(IERC20 token, address spender, uint256 value) internal { + // safeApprove should only be called when setting an initial allowance, + // or when resetting it to zero. To increase and decrease it, use + // 'safeIncreaseAllowance' and 'safeDecreaseAllowance' + require( + (value == 0) || (token.allowance(address(this), spender) == 0), + "SafeERC20: approve from non-zero to non-zero allowance" + ); + _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, value)); + } + + function safeIncreaseAllowance(IERC20 token, address spender, uint256 value) internal { + uint256 newAllowance = token.allowance(address(this), spender) + value; + _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, newAllowance)); + } + + function safeDecreaseAllowance(IERC20 token, address spender, uint256 value) internal { + unchecked { + uint256 oldAllowance = token.allowance(address(this), spender); + require(oldAllowance >= value, "SafeERC20: decreased allowance below zero"); + uint256 newAllowance = oldAllowance - value; + _callOptionalReturn(token, abi.encodeWithSelector(token.approve.selector, spender, newAllowance)); + } + } + + function safePermit( + IERC20Permit token, + address owner, + address spender, + uint256 value, + uint256 deadline, + uint8 v, + bytes32 r, + bytes32 s + ) internal { + uint256 nonceBefore = token.nonces(owner); + token.permit(owner, spender, value, deadline, v, r, s); + uint256 nonceAfter = token.nonces(owner); + require(nonceAfter == nonceBefore + 1, "SafeERC20: permit did not succeed"); + } + + /** + * @dev Imitates a Solidity high-level call (i.e. a regular function call to a contract), relaxing the requirement + * on the return value: the return value is optional (but if data is returned, it must not be false). + * @param token The token targeted by the call. + * @param data The call data (encoded using abi.encode or one of its variants). + */ + function _callOptionalReturn(IERC20 token, bytes memory data) private { + // We need to perform a low level call here, to bypass Solidity's return data size checking mechanism, since + // we're implementing it ourselves. We use {Address-functionCall} to perform this call, which verifies that + // the target address contains contract code and also asserts for success in the low-level call. + + bytes memory returndata = address(token).functionCall(data, "SafeERC20: low-level call failed"); + if (returndata.length > 0) { + // Return data is optional + require(abi.decode(returndata, (bool)), "SafeERC20: ERC20 operation did not succeed"); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol new file mode 100644 index 00000000..d966c06d --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Address.sol @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/Address.sol) + +pragma solidity ^0.8.1; + +/** + * @dev Collection of functions related to the address type + */ +library Address { + /** + * @dev Returns true if `account` is a contract. + * + * [IMPORTANT] + * ==== + * It is unsafe to assume that an address for which this function returns + * false is an externally-owned account (EOA) and not a contract. + * + * Among others, `isContract` will return false for the following + * types of addresses: + * + * - an externally-owned account + * - a contract in construction + * - an address where a contract will be created + * - an address where a contract lived, but was destroyed + * ==== + * + * [IMPORTANT] + * ==== + * You shouldn't rely on `isContract` to protect against flash loan attacks! + * + * Preventing calls from contracts is highly discouraged. It breaks composability, breaks support for smart wallets + * like Gnosis Safe, and does not provide security since it can be circumvented by calling from a contract + * constructor. + * ==== + */ + function isContract(address account) internal view returns (bool) { + // This method relies on extcodesize/address.code.length, which returns 0 + // for contracts in construction, since the code is only stored at the end + // of the constructor execution. + + return account.code.length > 0; + } + + /** + * @dev Replacement for Solidity's `transfer`: sends `amount` wei to + * `recipient`, forwarding all available gas and reverting on errors. + * + * https://eips.ethereum.org/EIPS/eip-1884[EIP1884] increases the gas cost + * of certain opcodes, possibly making contracts go over the 2300 gas limit + * imposed by `transfer`, making them unable to receive funds via + * `transfer`. {sendValue} removes this limitation. + * + * https://diligence.consensys.net/posts/2019/09/stop-using-soliditys-transfer-now/[Learn more]. + * + * IMPORTANT: because control is transferred to `recipient`, care must be + * taken to not create reentrancy vulnerabilities. Consider using + * {ReentrancyGuard} or the + * https://solidity.readthedocs.io/en/v0.5.11/security-considerations.html#use-the-checks-effects-interactions-pattern[checks-effects-interactions pattern]. + */ + function sendValue(address payable recipient, uint256 amount) internal { + require(address(this).balance >= amount, "Address: insufficient balance"); + + (bool success, ) = recipient.call{value: amount}(""); + require(success, "Address: unable to send value, recipient may have reverted"); + } + + /** + * @dev Performs a Solidity function call using a low level `call`. A + * plain `call` is an unsafe replacement for a function call: use this + * function instead. + * + * If `target` reverts with a revert reason, it is bubbled up by this + * function (like regular Solidity function calls). + * + * Returns the raw returned data. To convert to the expected return value, + * use https://solidity.readthedocs.io/en/latest/units-and-global-variables.html?highlight=abi.decode#abi-encoding-and-decoding-functions[`abi.decode`]. + * + * Requirements: + * + * - `target` must be a contract. + * - calling `target` with `data` must not revert. + * + * _Available since v3.1._ + */ + function functionCall(address target, bytes memory data) internal returns (bytes memory) { + return functionCallWithValue(target, data, 0, "Address: low-level call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], but with + * `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCall(address target, bytes memory data, string memory errorMessage) internal returns (bytes memory) { + return functionCallWithValue(target, data, 0, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but also transferring `value` wei to `target`. + * + * Requirements: + * + * - the calling contract must have an ETH balance of at least `value`. + * - the called Solidity function must be `payable`. + * + * _Available since v3.1._ + */ + function functionCallWithValue(address target, bytes memory data, uint256 value) internal returns (bytes memory) { + return functionCallWithValue(target, data, value, "Address: low-level call with value failed"); + } + + /** + * @dev Same as {xref-Address-functionCallWithValue-address-bytes-uint256-}[`functionCallWithValue`], but + * with `errorMessage` as a fallback revert reason when `target` reverts. + * + * _Available since v3.1._ + */ + function functionCallWithValue( + address target, + bytes memory data, + uint256 value, + string memory errorMessage + ) internal returns (bytes memory) { + require(address(this).balance >= value, "Address: insufficient balance for call"); + (bool success, bytes memory returndata) = target.call{value: value}(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall(address target, bytes memory data) internal view returns (bytes memory) { + return functionStaticCall(target, data, "Address: low-level static call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a static call. + * + * _Available since v3.3._ + */ + function functionStaticCall( + address target, + bytes memory data, + string memory errorMessage + ) internal view returns (bytes memory) { + (bool success, bytes memory returndata) = target.staticcall(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall(address target, bytes memory data) internal returns (bytes memory) { + return functionDelegateCall(target, data, "Address: low-level delegate call failed"); + } + + /** + * @dev Same as {xref-Address-functionCall-address-bytes-string-}[`functionCall`], + * but performing a delegate call. + * + * _Available since v3.4._ + */ + function functionDelegateCall( + address target, + bytes memory data, + string memory errorMessage + ) internal returns (bytes memory) { + (bool success, bytes memory returndata) = target.delegatecall(data); + return verifyCallResultFromTarget(target, success, returndata, errorMessage); + } + + /** + * @dev Tool to verify that a low level call to smart-contract was successful, and revert (either by bubbling + * the revert reason or using the provided one) in case of unsuccessful call or if target was not a contract. + * + * _Available since v4.8._ + */ + function verifyCallResultFromTarget( + address target, + bool success, + bytes memory returndata, + string memory errorMessage + ) internal view returns (bytes memory) { + if (success) { + if (returndata.length == 0) { + // only check isContract if the call was successful and the return data is empty + // otherwise we already know that it was a contract + require(isContract(target), "Address: call to non-contract"); + } + return returndata; + } else { + _revert(returndata, errorMessage); + } + } + + /** + * @dev Tool to verify that a low level call was successful, and revert if it wasn't, either by bubbling the + * revert reason or using the provided one. + * + * _Available since v4.3._ + */ + function verifyCallResult( + bool success, + bytes memory returndata, + string memory errorMessage + ) internal pure returns (bytes memory) { + if (success) { + return returndata; + } else { + _revert(returndata, errorMessage); + } + } + + function _revert(bytes memory returndata, string memory errorMessage) private pure { + // Look for revert reason and bubble it up if present + if (returndata.length > 0) { + // The easiest way to bubble the revert reason is using memory via assembly + /// @solidity memory-safe-assembly + assembly { + let returndata_size := mload(returndata) + revert(add(32, returndata), returndata_size) + } + } else { + revert(errorMessage); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Context.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Context.sol new file mode 100644 index 00000000..357980f2 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Context.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (utils/Context.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Provides information about the current execution context, including the + * sender of the transaction and its data. While these are generally available + * via msg.sender and msg.data, they should not be accessed in such a direct + * manner, since when dealing with meta-transactions the account sending and + * paying for execution may not be the actual sender (as far as an application + * is concerned). + * + * This contract is only required for intermediate, library-like contracts. + */ +abstract contract Context { + function _msgSender() internal view virtual returns (address) { + return msg.sender; + } + + function _msgData() internal view virtual returns (bytes calldata) { + return msg.data; + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Counters.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Counters.sol new file mode 100644 index 00000000..ea330e9d --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Counters.sol @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (utils/Counters.sol) + +pragma solidity ^0.8.0; + +/** + * @title Counters + * @author Matt Condon (@shrugs) + * @dev Provides counters that can only be incremented, decremented or reset. This can be used e.g. to track the number + * of elements in a mapping, issuing ERC721 ids, or counting request ids. + * + * Include with `using Counters for Counters.Counter;` + */ +library Counters { + struct Counter { + // This variable should never be directly accessed by users of the library: interactions must be restricted to + // the library's function. As of Solidity v0.5.2, this cannot be enforced, though there is a proposal to add + // this feature: see https://github.com/ethereum/solidity/issues/4637 + uint256 _value; // default: 0 + } + + function current(Counter storage counter) internal view returns (uint256) { + return counter._value; + } + + function increment(Counter storage counter) internal { + unchecked { + counter._value += 1; + } + } + + function decrement(Counter storage counter) internal { + uint256 value = counter._value; + require(value > 0, "Counter: decrement overflow"); + unchecked { + counter._value = value - 1; + } + } + + function reset(Counter storage counter) internal { + counter._value = 0; + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/StorageSlot.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/StorageSlot.sol new file mode 100644 index 00000000..7e91bc62 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/StorageSlot.sol @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.7.0) (utils/StorageSlot.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Library for reading and writing primitive types to specific storage slots. + * + * Storage slots are often used to avoid storage conflict when dealing with upgradeable contracts. + * This library helps with reading and writing to such slots without the need for inline assembly. + * + * The functions in this library return Slot structs that contain a `value` member that can be used to read or write. + * + * Example usage to set ERC1967 implementation slot: + * ``` + * contract ERC1967 { + * bytes32 internal constant _IMPLEMENTATION_SLOT = 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc; + * + * function _getImplementation() internal view returns (address) { + * return StorageSlot.getAddressSlot(_IMPLEMENTATION_SLOT).value; + * } + * + * function _setImplementation(address newImplementation) internal { + * require(Address.isContract(newImplementation), "ERC1967: new implementation is not a contract"); + * StorageSlot.getAddressSlot(_IMPLEMENTATION_SLOT).value = newImplementation; + * } + * } + * ``` + * + * _Available since v4.1 for `address`, `bool`, `bytes32`, and `uint256`._ + */ +library StorageSlot { + struct AddressSlot { + address value; + } + + struct BooleanSlot { + bool value; + } + + struct Bytes32Slot { + bytes32 value; + } + + struct Uint256Slot { + uint256 value; + } + + /** + * @dev Returns an `AddressSlot` with member `value` located at `slot`. + */ + function getAddressSlot(bytes32 slot) internal pure returns (AddressSlot storage r) { + /// @solidity memory-safe-assembly + assembly { + r.slot := slot + } + } + + /** + * @dev Returns an `BooleanSlot` with member `value` located at `slot`. + */ + function getBooleanSlot(bytes32 slot) internal pure returns (BooleanSlot storage r) { + /// @solidity memory-safe-assembly + assembly { + r.slot := slot + } + } + + /** + * @dev Returns an `Bytes32Slot` with member `value` located at `slot`. + */ + function getBytes32Slot(bytes32 slot) internal pure returns (Bytes32Slot storage r) { + /// @solidity memory-safe-assembly + assembly { + r.slot := slot + } + } + + /** + * @dev Returns an `Uint256Slot` with member `value` located at `slot`. + */ + function getUint256Slot(bytes32 slot) internal pure returns (Uint256Slot storage r) { + /// @solidity memory-safe-assembly + assembly { + r.slot := slot + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Strings.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Strings.sol new file mode 100644 index 00000000..377ba46a --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/Strings.sol @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/Strings.sol) + +pragma solidity ^0.8.0; + +import "./math/Math.sol"; + +/** + * @dev String operations. + */ +library Strings { + bytes16 private constant _SYMBOLS = "0123456789abcdef"; + uint8 private constant _ADDRESS_LENGTH = 20; + + /** + * @dev Converts a `uint256` to its ASCII `string` decimal representation. + */ + function toString(uint256 value) internal pure returns (string memory) { + unchecked { + uint256 length = Math.log10(value) + 1; + string memory buffer = new string(length); + uint256 ptr; + /// @solidity memory-safe-assembly + assembly { + ptr := add(buffer, add(32, length)) + } + while (true) { + ptr--; + /// @solidity memory-safe-assembly + assembly { + mstore8(ptr, byte(mod(value, 10), _SYMBOLS)) + } + value /= 10; + if (value == 0) break; + } + return buffer; + } + } + + /** + * @dev Converts a `uint256` to its ASCII `string` hexadecimal representation. + */ + function toHexString(uint256 value) internal pure returns (string memory) { + unchecked { + return toHexString(value, Math.log256(value) + 1); + } + } + + /** + * @dev Converts a `uint256` to its ASCII `string` hexadecimal representation with fixed length. + */ + function toHexString(uint256 value, uint256 length) internal pure returns (string memory) { + bytes memory buffer = new bytes(2 * length + 2); + buffer[0] = "0"; + buffer[1] = "x"; + for (uint256 i = 2 * length + 1; i > 1; --i) { + buffer[i] = _SYMBOLS[value & 0xf]; + value >>= 4; + } + require(value == 0, "Strings: hex length insufficient"); + return string(buffer); + } + + /** + * @dev Converts an `address` with fixed length of 20 bytes to its not checksummed ASCII `string` hexadecimal representation. + */ + function toHexString(address addr) internal pure returns (string memory) { + return toHexString(uint256(uint160(addr)), _ADDRESS_LENGTH); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/ECDSA.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/ECDSA.sol new file mode 100644 index 00000000..65d4b81b --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/ECDSA.sol @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/cryptography/ECDSA.sol) + +pragma solidity ^0.8.0; + +import "../Strings.sol"; + +/** + * @dev Elliptic Curve Digital Signature Algorithm (ECDSA) operations. + * + * These functions can be used to verify that a message was signed by the holder + * of the private keys of a given address. + */ +library ECDSA { + enum RecoverError { + NoError, + InvalidSignature, + InvalidSignatureLength, + InvalidSignatureS, + InvalidSignatureV // Deprecated in v4.8 + } + + function _throwError(RecoverError error) private pure { + if (error == RecoverError.NoError) { + return; // no error: do nothing + } else if (error == RecoverError.InvalidSignature) { + revert("ECDSA: invalid signature"); + } else if (error == RecoverError.InvalidSignatureLength) { + revert("ECDSA: invalid signature length"); + } else if (error == RecoverError.InvalidSignatureS) { + revert("ECDSA: invalid signature 's' value"); + } + } + + /** + * @dev Returns the address that signed a hashed message (`hash`) with + * `signature` or error string. This address can then be used for verification purposes. + * + * The `ecrecover` EVM opcode allows for malleable (non-unique) signatures: + * this function rejects them by requiring the `s` value to be in the lower + * half order, and the `v` value to be either 27 or 28. + * + * IMPORTANT: `hash` _must_ be the result of a hash operation for the + * verification to be secure: it is possible to craft signatures that + * recover to arbitrary addresses for non-hashed data. A safe way to ensure + * this is by receiving a hash of the original message (which may otherwise + * be too long), and then calling {toEthSignedMessageHash} on it. + * + * Documentation for signature generation: + * - with https://web3js.readthedocs.io/en/v1.3.4/web3-eth-accounts.html#sign[Web3.js] + * - with https://docs.ethers.io/v5/api/signer/#Signer-signMessage[ethers] + * + * _Available since v4.3._ + */ + function tryRecover(bytes32 hash, bytes memory signature) internal pure returns (address, RecoverError) { + if (signature.length == 65) { + bytes32 r; + bytes32 s; + uint8 v; + // ecrecover takes the signature parameters, and the only way to get them + // currently is to use assembly. + /// @solidity memory-safe-assembly + assembly { + r := mload(add(signature, 0x20)) + s := mload(add(signature, 0x40)) + v := byte(0, mload(add(signature, 0x60))) + } + return tryRecover(hash, v, r, s); + } else { + return (address(0), RecoverError.InvalidSignatureLength); + } + } + + /** + * @dev Returns the address that signed a hashed message (`hash`) with + * `signature`. This address can then be used for verification purposes. + * + * The `ecrecover` EVM opcode allows for malleable (non-unique) signatures: + * this function rejects them by requiring the `s` value to be in the lower + * half order, and the `v` value to be either 27 or 28. + * + * IMPORTANT: `hash` _must_ be the result of a hash operation for the + * verification to be secure: it is possible to craft signatures that + * recover to arbitrary addresses for non-hashed data. A safe way to ensure + * this is by receiving a hash of the original message (which may otherwise + * be too long), and then calling {toEthSignedMessageHash} on it. + */ + function recover(bytes32 hash, bytes memory signature) internal pure returns (address) { + (address recovered, RecoverError error) = tryRecover(hash, signature); + _throwError(error); + return recovered; + } + + /** + * @dev Overload of {ECDSA-tryRecover} that receives the `r` and `vs` short-signature fields separately. + * + * See https://eips.ethereum.org/EIPS/eip-2098[EIP-2098 short signatures] + * + * _Available since v4.3._ + */ + function tryRecover(bytes32 hash, bytes32 r, bytes32 vs) internal pure returns (address, RecoverError) { + bytes32 s = vs & bytes32(0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff); + uint8 v = uint8((uint256(vs) >> 255) + 27); + return tryRecover(hash, v, r, s); + } + + /** + * @dev Overload of {ECDSA-recover} that receives the `r and `vs` short-signature fields separately. + * + * _Available since v4.2._ + */ + function recover(bytes32 hash, bytes32 r, bytes32 vs) internal pure returns (address) { + (address recovered, RecoverError error) = tryRecover(hash, r, vs); + _throwError(error); + return recovered; + } + + /** + * @dev Overload of {ECDSA-tryRecover} that receives the `v`, + * `r` and `s` signature fields separately. + * + * _Available since v4.3._ + */ + function tryRecover(bytes32 hash, uint8 v, bytes32 r, bytes32 s) internal pure returns (address, RecoverError) { + // EIP-2 still allows signature malleability for ecrecover(). Remove this possibility and make the signature + // unique. Appendix F in the Ethereum Yellow paper (https://ethereum.github.io/yellowpaper/paper.pdf), defines + // the valid range for s in (301): 0 < s < secp256k1n ÷ 2 + 1, and for v in (302): v ∈ {27, 28}. Most + // signatures from current libraries generate a unique signature with an s-value in the lower half order. + // + // If your library generates malleable signatures, such as s-values in the upper range, calculate a new s-value + // with 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 - s1 and flip v from 27 to 28 or + // vice versa. If your library also generates signatures with 0/1 for v instead 27/28, add 27 to v to accept + // these malleable signatures as well. + if (uint256(s) > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0) { + return (address(0), RecoverError.InvalidSignatureS); + } + + // If the signature is valid (and not malleable), return the signer address + address signer = ecrecover(hash, v, r, s); + if (signer == address(0)) { + return (address(0), RecoverError.InvalidSignature); + } + + return (signer, RecoverError.NoError); + } + + /** + * @dev Overload of {ECDSA-recover} that receives the `v`, + * `r` and `s` signature fields separately. + */ + function recover(bytes32 hash, uint8 v, bytes32 r, bytes32 s) internal pure returns (address) { + (address recovered, RecoverError error) = tryRecover(hash, v, r, s); + _throwError(error); + return recovered; + } + + /** + * @dev Returns an Ethereum Signed Message, created from a `hash`. This + * produces hash corresponding to the one signed with the + * https://eth.wiki/json-rpc/API#eth_sign[`eth_sign`] + * JSON-RPC method as part of EIP-191. + * + * See {recover}. + */ + function toEthSignedMessageHash(bytes32 hash) internal pure returns (bytes32) { + // 32 is the length in bytes of hash, + // enforced by the type signature above + return keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n32", hash)); + } + + /** + * @dev Returns an Ethereum Signed Message, created from `s`. This + * produces hash corresponding to the one signed with the + * https://eth.wiki/json-rpc/API#eth_sign[`eth_sign`] + * JSON-RPC method as part of EIP-191. + * + * See {recover}. + */ + function toEthSignedMessageHash(bytes memory s) internal pure returns (bytes32) { + return keccak256(abi.encodePacked("\x19Ethereum Signed Message:\n", Strings.toString(s.length), s)); + } + + /** + * @dev Returns an Ethereum Signed Typed Data, created from a + * `domainSeparator` and a `structHash`. This produces hash corresponding + * to the one signed with the + * https://eips.ethereum.org/EIPS/eip-712[`eth_signTypedData`] + * JSON-RPC method as part of EIP-712. + * + * See {recover}. + */ + function toTypedDataHash(bytes32 domainSeparator, bytes32 structHash) internal pure returns (bytes32) { + return keccak256(abi.encodePacked("\x19\x01", domainSeparator, structHash)); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/EIP712.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/EIP712.sol new file mode 100644 index 00000000..6924570e --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/cryptography/EIP712.sol @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/cryptography/EIP712.sol) + +pragma solidity ^0.8.0; + +import "./ECDSA.sol"; + +/** + * @dev https://eips.ethereum.org/EIPS/eip-712[EIP 712] is a standard for hashing and signing of typed structured data. + * + * The encoding specified in the EIP is very generic, and such a generic implementation in Solidity is not feasible, + * thus this contract does not implement the encoding itself. Protocols need to implement the type-specific encoding + * they need in their contracts using a combination of `abi.encode` and `keccak256`. + * + * This contract implements the EIP 712 domain separator ({_domainSeparatorV4}) that is used as part of the encoding + * scheme, and the final step of the encoding to obtain the message digest that is then signed via ECDSA + * ({_hashTypedDataV4}). + * + * The implementation of the domain separator was designed to be as efficient as possible while still properly updating + * the chain id to protect against replay attacks on an eventual fork of the chain. + * + * NOTE: This contract implements the version of the encoding known as "v4", as implemented by the JSON RPC method + * https://docs.metamask.io/guide/signing-data.html[`eth_signTypedDataV4` in MetaMask]. + * + * _Available since v3.4._ + */ +abstract contract EIP712 { + /* solhint-disable var-name-mixedcase */ + // Cache the domain separator as an immutable value, but also store the chain id that it corresponds to, in order to + // invalidate the cached domain separator if the chain id changes. + bytes32 private immutable _CACHED_DOMAIN_SEPARATOR; + uint256 private immutable _CACHED_CHAIN_ID; + address private immutable _CACHED_THIS; + + bytes32 private immutable _HASHED_NAME; + bytes32 private immutable _HASHED_VERSION; + bytes32 private immutable _TYPE_HASH; + + /* solhint-enable var-name-mixedcase */ + + /** + * @dev Initializes the domain separator and parameter caches. + * + * The meaning of `name` and `version` is specified in + * https://eips.ethereum.org/EIPS/eip-712#definition-of-domainseparator[EIP 712]: + * + * - `name`: the user readable name of the signing domain, i.e. the name of the DApp or the protocol. + * - `version`: the current major version of the signing domain. + * + * NOTE: These parameters cannot be changed except through a xref:learn::upgrading-smart-contracts.adoc[smart + * contract upgrade]. + */ + constructor(string memory name, string memory version) { + bytes32 hashedName = keccak256(bytes(name)); + bytes32 hashedVersion = keccak256(bytes(version)); + bytes32 typeHash = keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); + _HASHED_NAME = hashedName; + _HASHED_VERSION = hashedVersion; + _CACHED_CHAIN_ID = block.chainid; + _CACHED_DOMAIN_SEPARATOR = _buildDomainSeparator(typeHash, hashedName, hashedVersion); + _CACHED_THIS = address(this); + _TYPE_HASH = typeHash; + } + + /** + * @dev Returns the domain separator for the current chain. + */ + function _domainSeparatorV4() internal view returns (bytes32) { + if (address(this) == _CACHED_THIS && block.chainid == _CACHED_CHAIN_ID) { + return _CACHED_DOMAIN_SEPARATOR; + } else { + return _buildDomainSeparator(_TYPE_HASH, _HASHED_NAME, _HASHED_VERSION); + } + } + + function _buildDomainSeparator( + bytes32 typeHash, + bytes32 nameHash, + bytes32 versionHash + ) private view returns (bytes32) { + return keccak256(abi.encode(typeHash, nameHash, versionHash, block.chainid, address(this))); + } + + /** + * @dev Given an already https://eips.ethereum.org/EIPS/eip-712#definition-of-hashstruct[hashed struct], this + * function returns the hash of the fully encoded EIP712 message for this domain. + * + * This hash can be used together with {ECDSA-recover} to obtain the signer of a message. For example: + * + * ```solidity + * bytes32 digest = _hashTypedDataV4(keccak256(abi.encode( + * keccak256("Mail(address to,string contents)"), + * mailTo, + * keccak256(bytes(mailContents)) + * ))); + * address signer = ECDSA.recover(digest, signature); + * ``` + */ + function _hashTypedDataV4(bytes32 structHash) internal view virtual returns (bytes32) { + return ECDSA.toTypedDataHash(_domainSeparatorV4(), structHash); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/ERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/ERC165.sol new file mode 100644 index 00000000..c682b07a --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/ERC165.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (utils/introspection/ERC165.sol) + +pragma solidity ^0.8.0; + +import "./IERC165.sol"; + +/** + * @dev Implementation of the {IERC165} interface. + * + * Contracts that want to implement ERC165 should inherit from this contract and override {supportsInterface} to check + * for the additional interface id that will be supported. For example: + * + * ```solidity + * function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) { + * return interfaceId == type(MyInterface).interfaceId || super.supportsInterface(interfaceId); + * } + * ``` + * + * Alternatively, {ERC165Storage} provides an easier to use but more expensive implementation. + */ +abstract contract ERC165 is IERC165 { + /** + * @dev See {IERC165-supportsInterface}. + */ + function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) { + return interfaceId == type(IERC165).interfaceId; + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol new file mode 100644 index 00000000..5fa65516 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts v4.4.1 (utils/introspection/IERC165.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Interface of the ERC165 standard, as defined in the + * https://eips.ethereum.org/EIPS/eip-165[EIP]. + * + * Implementers can declare support of contract interfaces, which can then be + * queried by others ({ERC165Checker}). + * + * For an implementation, see {ERC165}. + */ +interface IERC165 { + /** + * @dev Returns true if this contract implements the interface defined by + * `interfaceId`. See the corresponding + * https://eips.ethereum.org/EIPS/eip-165#how-interfaces-are-identified[EIP section] + * to learn more about how these ids are created. + * + * This function call must use less than 30 000 gas. + */ + function supportsInterface(bytes4 interfaceId) external view returns (bool); +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/Math.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/Math.sol new file mode 100644 index 00000000..7a793295 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/Math.sol @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/math/Math.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Standard math utilities missing in the Solidity language. + */ +library Math { + enum Rounding { + Down, // Toward negative infinity + Up, // Toward infinity + Zero // Toward zero + } + + /** + * @dev Returns the largest of two numbers. + */ + function max(uint256 a, uint256 b) internal pure returns (uint256) { + return a > b ? a : b; + } + + /** + * @dev Returns the smallest of two numbers. + */ + function min(uint256 a, uint256 b) internal pure returns (uint256) { + return a < b ? a : b; + } + + /** + * @dev Returns the average of two numbers. The result is rounded towards + * zero. + */ + function average(uint256 a, uint256 b) internal pure returns (uint256) { + // (a + b) / 2 can overflow. + return (a & b) + (a ^ b) / 2; + } + + /** + * @dev Returns the ceiling of the division of two numbers. + * + * This differs from standard division with `/` in that it rounds up instead + * of rounding down. + */ + function ceilDiv(uint256 a, uint256 b) internal pure returns (uint256) { + // (a + b - 1) / b can overflow on addition, so we distribute. + return a == 0 ? 0 : (a - 1) / b + 1; + } + + /** + * @notice Calculates floor(x * y / denominator) with full precision. Throws if result overflows a uint256 or denominator == 0 + * @dev Original credit to Remco Bloemen under MIT license (https://xn--2-umb.com/21/muldiv) + * with further edits by Uniswap Labs also under MIT license. + */ + function mulDiv(uint256 x, uint256 y, uint256 denominator) internal pure returns (uint256 result) { + unchecked { + // 512-bit multiply [prod1 prod0] = x * y. Compute the product mod 2^256 and mod 2^256 - 1, then use + // use the Chinese Remainder Theorem to reconstruct the 512 bit result. The result is stored in two 256 + // variables such that product = prod1 * 2^256 + prod0. + uint256 prod0; // Least significant 256 bits of the product + uint256 prod1; // Most significant 256 bits of the product + assembly { + let mm := mulmod(x, y, not(0)) + prod0 := mul(x, y) + prod1 := sub(sub(mm, prod0), lt(mm, prod0)) + } + + // Handle non-overflow cases, 256 by 256 division. + if (prod1 == 0) { + return prod0 / denominator; + } + + // Make sure the result is less than 2^256. Also prevents denominator == 0. + require(denominator > prod1); + + /////////////////////////////////////////////// + // 512 by 256 division. + /////////////////////////////////////////////// + + // Make division exact by subtracting the remainder from [prod1 prod0]. + uint256 remainder; + assembly { + // Compute remainder using mulmod. + remainder := mulmod(x, y, denominator) + + // Subtract 256 bit number from 512 bit number. + prod1 := sub(prod1, gt(remainder, prod0)) + prod0 := sub(prod0, remainder) + } + + // Factor powers of two out of denominator and compute largest power of two divisor of denominator. Always >= 1. + // See https://cs.stackexchange.com/q/138556/92363. + + // Does not overflow because the denominator cannot be zero at this stage in the function. + uint256 twos = denominator & (~denominator + 1); + assembly { + // Divide denominator by twos. + denominator := div(denominator, twos) + + // Divide [prod1 prod0] by twos. + prod0 := div(prod0, twos) + + // Flip twos such that it is 2^256 / twos. If twos is zero, then it becomes one. + twos := add(div(sub(0, twos), twos), 1) + } + + // Shift in bits from prod1 into prod0. + prod0 |= prod1 * twos; + + // Invert denominator mod 2^256. Now that denominator is an odd number, it has an inverse modulo 2^256 such + // that denominator * inv = 1 mod 2^256. Compute the inverse by starting with a seed that is correct for + // four bits. That is, denominator * inv = 1 mod 2^4. + uint256 inverse = (3 * denominator) ^ 2; + + // Use the Newton-Raphson iteration to improve the precision. Thanks to Hensel's lifting lemma, this also works + // in modular arithmetic, doubling the correct bits in each step. + inverse *= 2 - denominator * inverse; // inverse mod 2^8 + inverse *= 2 - denominator * inverse; // inverse mod 2^16 + inverse *= 2 - denominator * inverse; // inverse mod 2^32 + inverse *= 2 - denominator * inverse; // inverse mod 2^64 + inverse *= 2 - denominator * inverse; // inverse mod 2^128 + inverse *= 2 - denominator * inverse; // inverse mod 2^256 + + // Because the division is now exact we can divide by multiplying with the modular inverse of denominator. + // This will give us the correct result modulo 2^256. Since the preconditions guarantee that the outcome is + // less than 2^256, this is the final result. We don't need to compute the high bits of the result and prod1 + // is no longer required. + result = prod0 * inverse; + return result; + } + } + + /** + * @notice Calculates x * y / denominator with full precision, following the selected rounding direction. + */ + function mulDiv(uint256 x, uint256 y, uint256 denominator, Rounding rounding) internal pure returns (uint256) { + uint256 result = mulDiv(x, y, denominator); + if (rounding == Rounding.Up && mulmod(x, y, denominator) > 0) { + result += 1; + } + return result; + } + + /** + * @dev Returns the square root of a number. If the number is not a perfect square, the value is rounded down. + * + * Inspired by Henry S. Warren, Jr.'s "Hacker's Delight" (Chapter 11). + */ + function sqrt(uint256 a) internal pure returns (uint256) { + if (a == 0) { + return 0; + } + + // For our first guess, we get the biggest power of 2 which is smaller than the square root of the target. + // + // We know that the "msb" (most significant bit) of our target number `a` is a power of 2 such that we have + // `msb(a) <= a < 2*msb(a)`. This value can be written `msb(a)=2**k` with `k=log2(a)`. + // + // This can be rewritten `2**log2(a) <= a < 2**(log2(a) + 1)` + // → `sqrt(2**k) <= sqrt(a) < sqrt(2**(k+1))` + // → `2**(k/2) <= sqrt(a) < 2**((k+1)/2) <= 2**(k/2 + 1)` + // + // Consequently, `2**(log2(a) / 2)` is a good first approximation of `sqrt(a)` with at least 1 correct bit. + uint256 result = 1 << (log2(a) >> 1); + + // At this point `result` is an estimation with one bit of precision. We know the true value is a uint128, + // since it is the square root of a uint256. Newton's method converges quadratically (precision doubles at + // every iteration). We thus need at most 7 iteration to turn our partial result with one bit of precision + // into the expected uint128 result. + unchecked { + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + result = (result + a / result) >> 1; + return min(result, a / result); + } + } + + /** + * @notice Calculates sqrt(a), following the selected rounding direction. + */ + function sqrt(uint256 a, Rounding rounding) internal pure returns (uint256) { + unchecked { + uint256 result = sqrt(a); + return result + (rounding == Rounding.Up && result * result < a ? 1 : 0); + } + } + + /** + * @dev Return the log in base 2, rounded down, of a positive value. + * Returns 0 if given 0. + */ + function log2(uint256 value) internal pure returns (uint256) { + uint256 result = 0; + unchecked { + if (value >> 128 > 0) { + value >>= 128; + result += 128; + } + if (value >> 64 > 0) { + value >>= 64; + result += 64; + } + if (value >> 32 > 0) { + value >>= 32; + result += 32; + } + if (value >> 16 > 0) { + value >>= 16; + result += 16; + } + if (value >> 8 > 0) { + value >>= 8; + result += 8; + } + if (value >> 4 > 0) { + value >>= 4; + result += 4; + } + if (value >> 2 > 0) { + value >>= 2; + result += 2; + } + if (value >> 1 > 0) { + result += 1; + } + } + return result; + } + + /** + * @dev Return the log in base 2, following the selected rounding direction, of a positive value. + * Returns 0 if given 0. + */ + function log2(uint256 value, Rounding rounding) internal pure returns (uint256) { + unchecked { + uint256 result = log2(value); + return result + (rounding == Rounding.Up && 1 << result < value ? 1 : 0); + } + } + + /** + * @dev Return the log in base 10, rounded down, of a positive value. + * Returns 0 if given 0. + */ + function log10(uint256 value) internal pure returns (uint256) { + uint256 result = 0; + unchecked { + if (value >= 10 ** 64) { + value /= 10 ** 64; + result += 64; + } + if (value >= 10 ** 32) { + value /= 10 ** 32; + result += 32; + } + if (value >= 10 ** 16) { + value /= 10 ** 16; + result += 16; + } + if (value >= 10 ** 8) { + value /= 10 ** 8; + result += 8; + } + if (value >= 10 ** 4) { + value /= 10 ** 4; + result += 4; + } + if (value >= 10 ** 2) { + value /= 10 ** 2; + result += 2; + } + if (value >= 10 ** 1) { + result += 1; + } + } + return result; + } + + /** + * @dev Return the log in base 10, following the selected rounding direction, of a positive value. + * Returns 0 if given 0. + */ + function log10(uint256 value, Rounding rounding) internal pure returns (uint256) { + unchecked { + uint256 result = log10(value); + return result + (rounding == Rounding.Up && 10 ** result < value ? 1 : 0); + } + } + + /** + * @dev Return the log in base 256, rounded down, of a positive value. + * Returns 0 if given 0. + * + * Adding one to the result gives the number of pairs of hex symbols needed to represent `value` as a hex string. + */ + function log256(uint256 value) internal pure returns (uint256) { + uint256 result = 0; + unchecked { + if (value >> 128 > 0) { + value >>= 128; + result += 16; + } + if (value >> 64 > 0) { + value >>= 64; + result += 8; + } + if (value >> 32 > 0) { + value >>= 32; + result += 4; + } + if (value >> 16 > 0) { + value >>= 16; + result += 2; + } + if (value >> 8 > 0) { + result += 1; + } + } + return result; + } + + /** + * @dev Return the log in base 10, following the selected rounding direction, of a positive value. + * Returns 0 if given 0. + */ + function log256(uint256 value, Rounding rounding) internal pure returns (uint256) { + unchecked { + uint256 result = log256(value); + return result + (rounding == Rounding.Up && 1 << (result * 8) < value ? 1 : 0); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol new file mode 100644 index 00000000..28c11118 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SafeCast.sol @@ -0,0 +1,1136 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/math/SafeCast.sol) +// This file was procedurally generated from scripts/generate/templates/SafeCast.js. + +pragma solidity ^0.8.0; + +/** + * @dev Wrappers over Solidity's uintXX/intXX casting operators with added overflow + * checks. + * + * Downcasting from uint256/int256 in Solidity does not revert on overflow. This can + * easily result in undesired exploitation or bugs, since developers usually + * assume that overflows raise errors. `SafeCast` restores this intuition by + * reverting the transaction when such an operation overflows. + * + * Using this library instead of the unchecked operations eliminates an entire + * class of bugs, so it's recommended to use it always. + * + * Can be combined with {SafeMath} and {SignedSafeMath} to extend it to smaller types, by performing + * all math on `uint256` and `int256` and then downcasting. + */ +library SafeCast { + /** + * @dev Returns the downcasted uint248 from uint256, reverting on + * overflow (when the input is greater than largest uint248). + * + * Counterpart to Solidity's `uint248` operator. + * + * Requirements: + * + * - input must fit into 248 bits + * + * _Available since v4.7._ + */ + function toUint248(uint256 value) internal pure returns (uint248) { + require(value <= type(uint248).max, "SafeCast: value doesn't fit in 248 bits"); + return uint248(value); + } + + /** + * @dev Returns the downcasted uint240 from uint256, reverting on + * overflow (when the input is greater than largest uint240). + * + * Counterpart to Solidity's `uint240` operator. + * + * Requirements: + * + * - input must fit into 240 bits + * + * _Available since v4.7._ + */ + function toUint240(uint256 value) internal pure returns (uint240) { + require(value <= type(uint240).max, "SafeCast: value doesn't fit in 240 bits"); + return uint240(value); + } + + /** + * @dev Returns the downcasted uint232 from uint256, reverting on + * overflow (when the input is greater than largest uint232). + * + * Counterpart to Solidity's `uint232` operator. + * + * Requirements: + * + * - input must fit into 232 bits + * + * _Available since v4.7._ + */ + function toUint232(uint256 value) internal pure returns (uint232) { + require(value <= type(uint232).max, "SafeCast: value doesn't fit in 232 bits"); + return uint232(value); + } + + /** + * @dev Returns the downcasted uint224 from uint256, reverting on + * overflow (when the input is greater than largest uint224). + * + * Counterpart to Solidity's `uint224` operator. + * + * Requirements: + * + * - input must fit into 224 bits + * + * _Available since v4.2._ + */ + function toUint224(uint256 value) internal pure returns (uint224) { + require(value <= type(uint224).max, "SafeCast: value doesn't fit in 224 bits"); + return uint224(value); + } + + /** + * @dev Returns the downcasted uint216 from uint256, reverting on + * overflow (when the input is greater than largest uint216). + * + * Counterpart to Solidity's `uint216` operator. + * + * Requirements: + * + * - input must fit into 216 bits + * + * _Available since v4.7._ + */ + function toUint216(uint256 value) internal pure returns (uint216) { + require(value <= type(uint216).max, "SafeCast: value doesn't fit in 216 bits"); + return uint216(value); + } + + /** + * @dev Returns the downcasted uint208 from uint256, reverting on + * overflow (when the input is greater than largest uint208). + * + * Counterpart to Solidity's `uint208` operator. + * + * Requirements: + * + * - input must fit into 208 bits + * + * _Available since v4.7._ + */ + function toUint208(uint256 value) internal pure returns (uint208) { + require(value <= type(uint208).max, "SafeCast: value doesn't fit in 208 bits"); + return uint208(value); + } + + /** + * @dev Returns the downcasted uint200 from uint256, reverting on + * overflow (when the input is greater than largest uint200). + * + * Counterpart to Solidity's `uint200` operator. + * + * Requirements: + * + * - input must fit into 200 bits + * + * _Available since v4.7._ + */ + function toUint200(uint256 value) internal pure returns (uint200) { + require(value <= type(uint200).max, "SafeCast: value doesn't fit in 200 bits"); + return uint200(value); + } + + /** + * @dev Returns the downcasted uint192 from uint256, reverting on + * overflow (when the input is greater than largest uint192). + * + * Counterpart to Solidity's `uint192` operator. + * + * Requirements: + * + * - input must fit into 192 bits + * + * _Available since v4.7._ + */ + function toUint192(uint256 value) internal pure returns (uint192) { + require(value <= type(uint192).max, "SafeCast: value doesn't fit in 192 bits"); + return uint192(value); + } + + /** + * @dev Returns the downcasted uint184 from uint256, reverting on + * overflow (when the input is greater than largest uint184). + * + * Counterpart to Solidity's `uint184` operator. + * + * Requirements: + * + * - input must fit into 184 bits + * + * _Available since v4.7._ + */ + function toUint184(uint256 value) internal pure returns (uint184) { + require(value <= type(uint184).max, "SafeCast: value doesn't fit in 184 bits"); + return uint184(value); + } + + /** + * @dev Returns the downcasted uint176 from uint256, reverting on + * overflow (when the input is greater than largest uint176). + * + * Counterpart to Solidity's `uint176` operator. + * + * Requirements: + * + * - input must fit into 176 bits + * + * _Available since v4.7._ + */ + function toUint176(uint256 value) internal pure returns (uint176) { + require(value <= type(uint176).max, "SafeCast: value doesn't fit in 176 bits"); + return uint176(value); + } + + /** + * @dev Returns the downcasted uint168 from uint256, reverting on + * overflow (when the input is greater than largest uint168). + * + * Counterpart to Solidity's `uint168` operator. + * + * Requirements: + * + * - input must fit into 168 bits + * + * _Available since v4.7._ + */ + function toUint168(uint256 value) internal pure returns (uint168) { + require(value <= type(uint168).max, "SafeCast: value doesn't fit in 168 bits"); + return uint168(value); + } + + /** + * @dev Returns the downcasted uint160 from uint256, reverting on + * overflow (when the input is greater than largest uint160). + * + * Counterpart to Solidity's `uint160` operator. + * + * Requirements: + * + * - input must fit into 160 bits + * + * _Available since v4.7._ + */ + function toUint160(uint256 value) internal pure returns (uint160) { + require(value <= type(uint160).max, "SafeCast: value doesn't fit in 160 bits"); + return uint160(value); + } + + /** + * @dev Returns the downcasted uint152 from uint256, reverting on + * overflow (when the input is greater than largest uint152). + * + * Counterpart to Solidity's `uint152` operator. + * + * Requirements: + * + * - input must fit into 152 bits + * + * _Available since v4.7._ + */ + function toUint152(uint256 value) internal pure returns (uint152) { + require(value <= type(uint152).max, "SafeCast: value doesn't fit in 152 bits"); + return uint152(value); + } + + /** + * @dev Returns the downcasted uint144 from uint256, reverting on + * overflow (when the input is greater than largest uint144). + * + * Counterpart to Solidity's `uint144` operator. + * + * Requirements: + * + * - input must fit into 144 bits + * + * _Available since v4.7._ + */ + function toUint144(uint256 value) internal pure returns (uint144) { + require(value <= type(uint144).max, "SafeCast: value doesn't fit in 144 bits"); + return uint144(value); + } + + /** + * @dev Returns the downcasted uint136 from uint256, reverting on + * overflow (when the input is greater than largest uint136). + * + * Counterpart to Solidity's `uint136` operator. + * + * Requirements: + * + * - input must fit into 136 bits + * + * _Available since v4.7._ + */ + function toUint136(uint256 value) internal pure returns (uint136) { + require(value <= type(uint136).max, "SafeCast: value doesn't fit in 136 bits"); + return uint136(value); + } + + /** + * @dev Returns the downcasted uint128 from uint256, reverting on + * overflow (when the input is greater than largest uint128). + * + * Counterpart to Solidity's `uint128` operator. + * + * Requirements: + * + * - input must fit into 128 bits + * + * _Available since v2.5._ + */ + function toUint128(uint256 value) internal pure returns (uint128) { + require(value <= type(uint128).max, "SafeCast: value doesn't fit in 128 bits"); + return uint128(value); + } + + /** + * @dev Returns the downcasted uint120 from uint256, reverting on + * overflow (when the input is greater than largest uint120). + * + * Counterpart to Solidity's `uint120` operator. + * + * Requirements: + * + * - input must fit into 120 bits + * + * _Available since v4.7._ + */ + function toUint120(uint256 value) internal pure returns (uint120) { + require(value <= type(uint120).max, "SafeCast: value doesn't fit in 120 bits"); + return uint120(value); + } + + /** + * @dev Returns the downcasted uint112 from uint256, reverting on + * overflow (when the input is greater than largest uint112). + * + * Counterpart to Solidity's `uint112` operator. + * + * Requirements: + * + * - input must fit into 112 bits + * + * _Available since v4.7._ + */ + function toUint112(uint256 value) internal pure returns (uint112) { + require(value <= type(uint112).max, "SafeCast: value doesn't fit in 112 bits"); + return uint112(value); + } + + /** + * @dev Returns the downcasted uint104 from uint256, reverting on + * overflow (when the input is greater than largest uint104). + * + * Counterpart to Solidity's `uint104` operator. + * + * Requirements: + * + * - input must fit into 104 bits + * + * _Available since v4.7._ + */ + function toUint104(uint256 value) internal pure returns (uint104) { + require(value <= type(uint104).max, "SafeCast: value doesn't fit in 104 bits"); + return uint104(value); + } + + /** + * @dev Returns the downcasted uint96 from uint256, reverting on + * overflow (when the input is greater than largest uint96). + * + * Counterpart to Solidity's `uint96` operator. + * + * Requirements: + * + * - input must fit into 96 bits + * + * _Available since v4.2._ + */ + function toUint96(uint256 value) internal pure returns (uint96) { + require(value <= type(uint96).max, "SafeCast: value doesn't fit in 96 bits"); + return uint96(value); + } + + /** + * @dev Returns the downcasted uint88 from uint256, reverting on + * overflow (when the input is greater than largest uint88). + * + * Counterpart to Solidity's `uint88` operator. + * + * Requirements: + * + * - input must fit into 88 bits + * + * _Available since v4.7._ + */ + function toUint88(uint256 value) internal pure returns (uint88) { + require(value <= type(uint88).max, "SafeCast: value doesn't fit in 88 bits"); + return uint88(value); + } + + /** + * @dev Returns the downcasted uint80 from uint256, reverting on + * overflow (when the input is greater than largest uint80). + * + * Counterpart to Solidity's `uint80` operator. + * + * Requirements: + * + * - input must fit into 80 bits + * + * _Available since v4.7._ + */ + function toUint80(uint256 value) internal pure returns (uint80) { + require(value <= type(uint80).max, "SafeCast: value doesn't fit in 80 bits"); + return uint80(value); + } + + /** + * @dev Returns the downcasted uint72 from uint256, reverting on + * overflow (when the input is greater than largest uint72). + * + * Counterpart to Solidity's `uint72` operator. + * + * Requirements: + * + * - input must fit into 72 bits + * + * _Available since v4.7._ + */ + function toUint72(uint256 value) internal pure returns (uint72) { + require(value <= type(uint72).max, "SafeCast: value doesn't fit in 72 bits"); + return uint72(value); + } + + /** + * @dev Returns the downcasted uint64 from uint256, reverting on + * overflow (when the input is greater than largest uint64). + * + * Counterpart to Solidity's `uint64` operator. + * + * Requirements: + * + * - input must fit into 64 bits + * + * _Available since v2.5._ + */ + function toUint64(uint256 value) internal pure returns (uint64) { + require(value <= type(uint64).max, "SafeCast: value doesn't fit in 64 bits"); + return uint64(value); + } + + /** + * @dev Returns the downcasted uint56 from uint256, reverting on + * overflow (when the input is greater than largest uint56). + * + * Counterpart to Solidity's `uint56` operator. + * + * Requirements: + * + * - input must fit into 56 bits + * + * _Available since v4.7._ + */ + function toUint56(uint256 value) internal pure returns (uint56) { + require(value <= type(uint56).max, "SafeCast: value doesn't fit in 56 bits"); + return uint56(value); + } + + /** + * @dev Returns the downcasted uint48 from uint256, reverting on + * overflow (when the input is greater than largest uint48). + * + * Counterpart to Solidity's `uint48` operator. + * + * Requirements: + * + * - input must fit into 48 bits + * + * _Available since v4.7._ + */ + function toUint48(uint256 value) internal pure returns (uint48) { + require(value <= type(uint48).max, "SafeCast: value doesn't fit in 48 bits"); + return uint48(value); + } + + /** + * @dev Returns the downcasted uint40 from uint256, reverting on + * overflow (when the input is greater than largest uint40). + * + * Counterpart to Solidity's `uint40` operator. + * + * Requirements: + * + * - input must fit into 40 bits + * + * _Available since v4.7._ + */ + function toUint40(uint256 value) internal pure returns (uint40) { + require(value <= type(uint40).max, "SafeCast: value doesn't fit in 40 bits"); + return uint40(value); + } + + /** + * @dev Returns the downcasted uint32 from uint256, reverting on + * overflow (when the input is greater than largest uint32). + * + * Counterpart to Solidity's `uint32` operator. + * + * Requirements: + * + * - input must fit into 32 bits + * + * _Available since v2.5._ + */ + function toUint32(uint256 value) internal pure returns (uint32) { + require(value <= type(uint32).max, "SafeCast: value doesn't fit in 32 bits"); + return uint32(value); + } + + /** + * @dev Returns the downcasted uint24 from uint256, reverting on + * overflow (when the input is greater than largest uint24). + * + * Counterpart to Solidity's `uint24` operator. + * + * Requirements: + * + * - input must fit into 24 bits + * + * _Available since v4.7._ + */ + function toUint24(uint256 value) internal pure returns (uint24) { + require(value <= type(uint24).max, "SafeCast: value doesn't fit in 24 bits"); + return uint24(value); + } + + /** + * @dev Returns the downcasted uint16 from uint256, reverting on + * overflow (when the input is greater than largest uint16). + * + * Counterpart to Solidity's `uint16` operator. + * + * Requirements: + * + * - input must fit into 16 bits + * + * _Available since v2.5._ + */ + function toUint16(uint256 value) internal pure returns (uint16) { + require(value <= type(uint16).max, "SafeCast: value doesn't fit in 16 bits"); + return uint16(value); + } + + /** + * @dev Returns the downcasted uint8 from uint256, reverting on + * overflow (when the input is greater than largest uint8). + * + * Counterpart to Solidity's `uint8` operator. + * + * Requirements: + * + * - input must fit into 8 bits + * + * _Available since v2.5._ + */ + function toUint8(uint256 value) internal pure returns (uint8) { + require(value <= type(uint8).max, "SafeCast: value doesn't fit in 8 bits"); + return uint8(value); + } + + /** + * @dev Converts a signed int256 into an unsigned uint256. + * + * Requirements: + * + * - input must be greater than or equal to 0. + * + * _Available since v3.0._ + */ + function toUint256(int256 value) internal pure returns (uint256) { + require(value >= 0, "SafeCast: value must be positive"); + return uint256(value); + } + + /** + * @dev Returns the downcasted int248 from int256, reverting on + * overflow (when the input is less than smallest int248 or + * greater than largest int248). + * + * Counterpart to Solidity's `int248` operator. + * + * Requirements: + * + * - input must fit into 248 bits + * + * _Available since v4.7._ + */ + function toInt248(int256 value) internal pure returns (int248 downcasted) { + downcasted = int248(value); + require(downcasted == value, "SafeCast: value doesn't fit in 248 bits"); + } + + /** + * @dev Returns the downcasted int240 from int256, reverting on + * overflow (when the input is less than smallest int240 or + * greater than largest int240). + * + * Counterpart to Solidity's `int240` operator. + * + * Requirements: + * + * - input must fit into 240 bits + * + * _Available since v4.7._ + */ + function toInt240(int256 value) internal pure returns (int240 downcasted) { + downcasted = int240(value); + require(downcasted == value, "SafeCast: value doesn't fit in 240 bits"); + } + + /** + * @dev Returns the downcasted int232 from int256, reverting on + * overflow (when the input is less than smallest int232 or + * greater than largest int232). + * + * Counterpart to Solidity's `int232` operator. + * + * Requirements: + * + * - input must fit into 232 bits + * + * _Available since v4.7._ + */ + function toInt232(int256 value) internal pure returns (int232 downcasted) { + downcasted = int232(value); + require(downcasted == value, "SafeCast: value doesn't fit in 232 bits"); + } + + /** + * @dev Returns the downcasted int224 from int256, reverting on + * overflow (when the input is less than smallest int224 or + * greater than largest int224). + * + * Counterpart to Solidity's `int224` operator. + * + * Requirements: + * + * - input must fit into 224 bits + * + * _Available since v4.7._ + */ + function toInt224(int256 value) internal pure returns (int224 downcasted) { + downcasted = int224(value); + require(downcasted == value, "SafeCast: value doesn't fit in 224 bits"); + } + + /** + * @dev Returns the downcasted int216 from int256, reverting on + * overflow (when the input is less than smallest int216 or + * greater than largest int216). + * + * Counterpart to Solidity's `int216` operator. + * + * Requirements: + * + * - input must fit into 216 bits + * + * _Available since v4.7._ + */ + function toInt216(int256 value) internal pure returns (int216 downcasted) { + downcasted = int216(value); + require(downcasted == value, "SafeCast: value doesn't fit in 216 bits"); + } + + /** + * @dev Returns the downcasted int208 from int256, reverting on + * overflow (when the input is less than smallest int208 or + * greater than largest int208). + * + * Counterpart to Solidity's `int208` operator. + * + * Requirements: + * + * - input must fit into 208 bits + * + * _Available since v4.7._ + */ + function toInt208(int256 value) internal pure returns (int208 downcasted) { + downcasted = int208(value); + require(downcasted == value, "SafeCast: value doesn't fit in 208 bits"); + } + + /** + * @dev Returns the downcasted int200 from int256, reverting on + * overflow (when the input is less than smallest int200 or + * greater than largest int200). + * + * Counterpart to Solidity's `int200` operator. + * + * Requirements: + * + * - input must fit into 200 bits + * + * _Available since v4.7._ + */ + function toInt200(int256 value) internal pure returns (int200 downcasted) { + downcasted = int200(value); + require(downcasted == value, "SafeCast: value doesn't fit in 200 bits"); + } + + /** + * @dev Returns the downcasted int192 from int256, reverting on + * overflow (when the input is less than smallest int192 or + * greater than largest int192). + * + * Counterpart to Solidity's `int192` operator. + * + * Requirements: + * + * - input must fit into 192 bits + * + * _Available since v4.7._ + */ + function toInt192(int256 value) internal pure returns (int192 downcasted) { + downcasted = int192(value); + require(downcasted == value, "SafeCast: value doesn't fit in 192 bits"); + } + + /** + * @dev Returns the downcasted int184 from int256, reverting on + * overflow (when the input is less than smallest int184 or + * greater than largest int184). + * + * Counterpart to Solidity's `int184` operator. + * + * Requirements: + * + * - input must fit into 184 bits + * + * _Available since v4.7._ + */ + function toInt184(int256 value) internal pure returns (int184 downcasted) { + downcasted = int184(value); + require(downcasted == value, "SafeCast: value doesn't fit in 184 bits"); + } + + /** + * @dev Returns the downcasted int176 from int256, reverting on + * overflow (when the input is less than smallest int176 or + * greater than largest int176). + * + * Counterpart to Solidity's `int176` operator. + * + * Requirements: + * + * - input must fit into 176 bits + * + * _Available since v4.7._ + */ + function toInt176(int256 value) internal pure returns (int176 downcasted) { + downcasted = int176(value); + require(downcasted == value, "SafeCast: value doesn't fit in 176 bits"); + } + + /** + * @dev Returns the downcasted int168 from int256, reverting on + * overflow (when the input is less than smallest int168 or + * greater than largest int168). + * + * Counterpart to Solidity's `int168` operator. + * + * Requirements: + * + * - input must fit into 168 bits + * + * _Available since v4.7._ + */ + function toInt168(int256 value) internal pure returns (int168 downcasted) { + downcasted = int168(value); + require(downcasted == value, "SafeCast: value doesn't fit in 168 bits"); + } + + /** + * @dev Returns the downcasted int160 from int256, reverting on + * overflow (when the input is less than smallest int160 or + * greater than largest int160). + * + * Counterpart to Solidity's `int160` operator. + * + * Requirements: + * + * - input must fit into 160 bits + * + * _Available since v4.7._ + */ + function toInt160(int256 value) internal pure returns (int160 downcasted) { + downcasted = int160(value); + require(downcasted == value, "SafeCast: value doesn't fit in 160 bits"); + } + + /** + * @dev Returns the downcasted int152 from int256, reverting on + * overflow (when the input is less than smallest int152 or + * greater than largest int152). + * + * Counterpart to Solidity's `int152` operator. + * + * Requirements: + * + * - input must fit into 152 bits + * + * _Available since v4.7._ + */ + function toInt152(int256 value) internal pure returns (int152 downcasted) { + downcasted = int152(value); + require(downcasted == value, "SafeCast: value doesn't fit in 152 bits"); + } + + /** + * @dev Returns the downcasted int144 from int256, reverting on + * overflow (when the input is less than smallest int144 or + * greater than largest int144). + * + * Counterpart to Solidity's `int144` operator. + * + * Requirements: + * + * - input must fit into 144 bits + * + * _Available since v4.7._ + */ + function toInt144(int256 value) internal pure returns (int144 downcasted) { + downcasted = int144(value); + require(downcasted == value, "SafeCast: value doesn't fit in 144 bits"); + } + + /** + * @dev Returns the downcasted int136 from int256, reverting on + * overflow (when the input is less than smallest int136 or + * greater than largest int136). + * + * Counterpart to Solidity's `int136` operator. + * + * Requirements: + * + * - input must fit into 136 bits + * + * _Available since v4.7._ + */ + function toInt136(int256 value) internal pure returns (int136 downcasted) { + downcasted = int136(value); + require(downcasted == value, "SafeCast: value doesn't fit in 136 bits"); + } + + /** + * @dev Returns the downcasted int128 from int256, reverting on + * overflow (when the input is less than smallest int128 or + * greater than largest int128). + * + * Counterpart to Solidity's `int128` operator. + * + * Requirements: + * + * - input must fit into 128 bits + * + * _Available since v3.1._ + */ + function toInt128(int256 value) internal pure returns (int128 downcasted) { + downcasted = int128(value); + require(downcasted == value, "SafeCast: value doesn't fit in 128 bits"); + } + + /** + * @dev Returns the downcasted int120 from int256, reverting on + * overflow (when the input is less than smallest int120 or + * greater than largest int120). + * + * Counterpart to Solidity's `int120` operator. + * + * Requirements: + * + * - input must fit into 120 bits + * + * _Available since v4.7._ + */ + function toInt120(int256 value) internal pure returns (int120 downcasted) { + downcasted = int120(value); + require(downcasted == value, "SafeCast: value doesn't fit in 120 bits"); + } + + /** + * @dev Returns the downcasted int112 from int256, reverting on + * overflow (when the input is less than smallest int112 or + * greater than largest int112). + * + * Counterpart to Solidity's `int112` operator. + * + * Requirements: + * + * - input must fit into 112 bits + * + * _Available since v4.7._ + */ + function toInt112(int256 value) internal pure returns (int112 downcasted) { + downcasted = int112(value); + require(downcasted == value, "SafeCast: value doesn't fit in 112 bits"); + } + + /** + * @dev Returns the downcasted int104 from int256, reverting on + * overflow (when the input is less than smallest int104 or + * greater than largest int104). + * + * Counterpart to Solidity's `int104` operator. + * + * Requirements: + * + * - input must fit into 104 bits + * + * _Available since v4.7._ + */ + function toInt104(int256 value) internal pure returns (int104 downcasted) { + downcasted = int104(value); + require(downcasted == value, "SafeCast: value doesn't fit in 104 bits"); + } + + /** + * @dev Returns the downcasted int96 from int256, reverting on + * overflow (when the input is less than smallest int96 or + * greater than largest int96). + * + * Counterpart to Solidity's `int96` operator. + * + * Requirements: + * + * - input must fit into 96 bits + * + * _Available since v4.7._ + */ + function toInt96(int256 value) internal pure returns (int96 downcasted) { + downcasted = int96(value); + require(downcasted == value, "SafeCast: value doesn't fit in 96 bits"); + } + + /** + * @dev Returns the downcasted int88 from int256, reverting on + * overflow (when the input is less than smallest int88 or + * greater than largest int88). + * + * Counterpart to Solidity's `int88` operator. + * + * Requirements: + * + * - input must fit into 88 bits + * + * _Available since v4.7._ + */ + function toInt88(int256 value) internal pure returns (int88 downcasted) { + downcasted = int88(value); + require(downcasted == value, "SafeCast: value doesn't fit in 88 bits"); + } + + /** + * @dev Returns the downcasted int80 from int256, reverting on + * overflow (when the input is less than smallest int80 or + * greater than largest int80). + * + * Counterpart to Solidity's `int80` operator. + * + * Requirements: + * + * - input must fit into 80 bits + * + * _Available since v4.7._ + */ + function toInt80(int256 value) internal pure returns (int80 downcasted) { + downcasted = int80(value); + require(downcasted == value, "SafeCast: value doesn't fit in 80 bits"); + } + + /** + * @dev Returns the downcasted int72 from int256, reverting on + * overflow (when the input is less than smallest int72 or + * greater than largest int72). + * + * Counterpart to Solidity's `int72` operator. + * + * Requirements: + * + * - input must fit into 72 bits + * + * _Available since v4.7._ + */ + function toInt72(int256 value) internal pure returns (int72 downcasted) { + downcasted = int72(value); + require(downcasted == value, "SafeCast: value doesn't fit in 72 bits"); + } + + /** + * @dev Returns the downcasted int64 from int256, reverting on + * overflow (when the input is less than smallest int64 or + * greater than largest int64). + * + * Counterpart to Solidity's `int64` operator. + * + * Requirements: + * + * - input must fit into 64 bits + * + * _Available since v3.1._ + */ + function toInt64(int256 value) internal pure returns (int64 downcasted) { + downcasted = int64(value); + require(downcasted == value, "SafeCast: value doesn't fit in 64 bits"); + } + + /** + * @dev Returns the downcasted int56 from int256, reverting on + * overflow (when the input is less than smallest int56 or + * greater than largest int56). + * + * Counterpart to Solidity's `int56` operator. + * + * Requirements: + * + * - input must fit into 56 bits + * + * _Available since v4.7._ + */ + function toInt56(int256 value) internal pure returns (int56 downcasted) { + downcasted = int56(value); + require(downcasted == value, "SafeCast: value doesn't fit in 56 bits"); + } + + /** + * @dev Returns the downcasted int48 from int256, reverting on + * overflow (when the input is less than smallest int48 or + * greater than largest int48). + * + * Counterpart to Solidity's `int48` operator. + * + * Requirements: + * + * - input must fit into 48 bits + * + * _Available since v4.7._ + */ + function toInt48(int256 value) internal pure returns (int48 downcasted) { + downcasted = int48(value); + require(downcasted == value, "SafeCast: value doesn't fit in 48 bits"); + } + + /** + * @dev Returns the downcasted int40 from int256, reverting on + * overflow (when the input is less than smallest int40 or + * greater than largest int40). + * + * Counterpart to Solidity's `int40` operator. + * + * Requirements: + * + * - input must fit into 40 bits + * + * _Available since v4.7._ + */ + function toInt40(int256 value) internal pure returns (int40 downcasted) { + downcasted = int40(value); + require(downcasted == value, "SafeCast: value doesn't fit in 40 bits"); + } + + /** + * @dev Returns the downcasted int32 from int256, reverting on + * overflow (when the input is less than smallest int32 or + * greater than largest int32). + * + * Counterpart to Solidity's `int32` operator. + * + * Requirements: + * + * - input must fit into 32 bits + * + * _Available since v3.1._ + */ + function toInt32(int256 value) internal pure returns (int32 downcasted) { + downcasted = int32(value); + require(downcasted == value, "SafeCast: value doesn't fit in 32 bits"); + } + + /** + * @dev Returns the downcasted int24 from int256, reverting on + * overflow (when the input is less than smallest int24 or + * greater than largest int24). + * + * Counterpart to Solidity's `int24` operator. + * + * Requirements: + * + * - input must fit into 24 bits + * + * _Available since v4.7._ + */ + function toInt24(int256 value) internal pure returns (int24 downcasted) { + downcasted = int24(value); + require(downcasted == value, "SafeCast: value doesn't fit in 24 bits"); + } + + /** + * @dev Returns the downcasted int16 from int256, reverting on + * overflow (when the input is less than smallest int16 or + * greater than largest int16). + * + * Counterpart to Solidity's `int16` operator. + * + * Requirements: + * + * - input must fit into 16 bits + * + * _Available since v3.1._ + */ + function toInt16(int256 value) internal pure returns (int16 downcasted) { + downcasted = int16(value); + require(downcasted == value, "SafeCast: value doesn't fit in 16 bits"); + } + + /** + * @dev Returns the downcasted int8 from int256, reverting on + * overflow (when the input is less than smallest int8 or + * greater than largest int8). + * + * Counterpart to Solidity's `int8` operator. + * + * Requirements: + * + * - input must fit into 8 bits + * + * _Available since v3.1._ + */ + function toInt8(int256 value) internal pure returns (int8 downcasted) { + downcasted = int8(value); + require(downcasted == value, "SafeCast: value doesn't fit in 8 bits"); + } + + /** + * @dev Converts an unsigned uint256 into a signed int256. + * + * Requirements: + * + * - input must be less than or equal to maxInt256. + * + * _Available since v3.0._ + */ + function toInt256(uint256 value) internal pure returns (int256) { + // Note: Unsafe cast below is okay because `type(int256).max` is guaranteed to be positive + require(value <= uint256(type(int256).max), "SafeCast: value doesn't fit in an int256"); + return int256(value); + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SignedMath.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SignedMath.sol new file mode 100644 index 00000000..93524bb7 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/math/SignedMath.sol @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/math/SignedMath.sol) + +pragma solidity ^0.8.0; + +/** + * @dev Standard signed math utilities missing in the Solidity language. + */ +library SignedMath { + /** + * @dev Returns the largest of two signed numbers. + */ + function max(int256 a, int256 b) internal pure returns (int256) { + return a > b ? a : b; + } + + /** + * @dev Returns the smallest of two signed numbers. + */ + function min(int256 a, int256 b) internal pure returns (int256) { + return a < b ? a : b; + } + + /** + * @dev Returns the average of two signed numbers without overflow. + * The result is rounded towards zero. + */ + function average(int256 a, int256 b) internal pure returns (int256) { + // Formula from the book "Hacker's Delight" + int256 x = (a & b) + ((a ^ b) >> 1); + return x + (int256(uint256(x) >> 255) & (a ^ b)); + } + + /** + * @dev Returns the absolute unsigned value of a signed value. + */ + function abs(int256 n) internal pure returns (uint256) { + unchecked { + // must be unchecked in order to support `n = type(int256).min` + return uint256(n >= 0 ? n : -n); + } + } +} diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableMap.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableMap.sol new file mode 100644 index 00000000..7f4e9115 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableMap.sol @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/structs/EnumerableMap.sol) +// This file was procedurally generated from scripts/generate/templates/EnumerableMap.js. + +pragma solidity ^0.8.0; + +import "./EnumerableSet.sol"; + +/** + * @dev Library for managing an enumerable variant of Solidity's + * https://solidity.readthedocs.io/en/latest/types.html#mapping-types[`mapping`] + * type. + * + * Maps have the following properties: + * + * - Entries are added, removed, and checked for existence in constant time + * (O(1)). + * - Entries are enumerated in O(n). No guarantees are made on the ordering. + * + * ``` + * contract Example { + * // Add the library methods + * using EnumerableMap for EnumerableMap.UintToAddressMap; + * + * // Declare a set state variable + * EnumerableMap.UintToAddressMap private myMap; + * } + * ``` + * + * The following map types are supported: + * + * - `uint256 -> address` (`UintToAddressMap`) since v3.0.0 + * - `address -> uint256` (`AddressToUintMap`) since v4.6.0 + * - `bytes32 -> bytes32` (`Bytes32ToBytes32Map`) since v4.6.0 + * - `uint256 -> uint256` (`UintToUintMap`) since v4.7.0 + * - `bytes32 -> uint256` (`Bytes32ToUintMap`) since v4.7.0 + * + * [WARNING] + * ==== + * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure + * unusable. + * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info. + * + * In order to clean an EnumerableMap, you can either remove all elements one by one or create a fresh instance using an + * array of EnumerableMap. + * ==== + */ +library EnumerableMap { + using EnumerableSet for EnumerableSet.Bytes32Set; + + // To implement this library for multiple types with as little code + // repetition as possible, we write it in terms of a generic Map type with + // bytes32 keys and values. + // The Map implementation uses private functions, and user-facing + // implementations (such as Uint256ToAddressMap) are just wrappers around + // the underlying Map. + // This means that we can only create new EnumerableMaps for types that fit + // in bytes32. + + struct Bytes32ToBytes32Map { + // Storage of keys + EnumerableSet.Bytes32Set _keys; + mapping(bytes32 => bytes32) _values; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + Bytes32ToBytes32Map storage map, + bytes32 key, + bytes32 value + ) internal returns (bool) { + map._values[key] = value; + return map._keys.add(key); + } + + /** + * @dev Removes a key-value pair from a map. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(Bytes32ToBytes32Map storage map, bytes32 key) internal returns (bool) { + delete map._values[key]; + return map._keys.remove(key); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool) { + return map._keys.contains(key); + } + + /** + * @dev Returns the number of key-value pairs in the map. O(1). + */ + function length(Bytes32ToBytes32Map storage map) internal view returns (uint256) { + return map._keys.length(); + } + + /** + * @dev Returns the key-value pair stored at position `index` in the map. O(1). + * + * Note that there are no guarantees on the ordering of entries inside the + * array, and it may change when more entries are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32ToBytes32Map storage map, uint256 index) internal view returns (bytes32, bytes32) { + bytes32 key = map._keys.at(index); + return (key, map._values[key]); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bool, bytes32) { + bytes32 value = map._values[key]; + if (value == bytes32(0)) { + return (contains(map, key), bytes32(0)); + } else { + return (true, value); + } + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(Bytes32ToBytes32Map storage map, bytes32 key) internal view returns (bytes32) { + bytes32 value = map._values[key]; + require(value != 0 || contains(map, key), "EnumerableMap: nonexistent key"); + return value; + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + Bytes32ToBytes32Map storage map, + bytes32 key, + string memory errorMessage + ) internal view returns (bytes32) { + bytes32 value = map._values[key]; + require(value != 0 || contains(map, key), errorMessage); + return value; + } + + // UintToUintMap + + struct UintToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + UintToUintMap storage map, + uint256 key, + uint256 value + ) internal returns (bool) { + return set(map._inner, bytes32(key), bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(UintToUintMap storage map, uint256 key) internal returns (bool) { + return remove(map._inner, bytes32(key)); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(UintToUintMap storage map, uint256 key) internal view returns (bool) { + return contains(map._inner, bytes32(key)); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(UintToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintToUintMap storage map, uint256 index) internal view returns (uint256, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (uint256(key), uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(UintToUintMap storage map, uint256 key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(key)); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(UintToUintMap storage map, uint256 key) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(key))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + UintToUintMap storage map, + uint256 key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(key), errorMessage)); + } + + // UintToAddressMap + + struct UintToAddressMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + UintToAddressMap storage map, + uint256 key, + address value + ) internal returns (bool) { + return set(map._inner, bytes32(key), bytes32(uint256(uint160(value)))); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(UintToAddressMap storage map, uint256 key) internal returns (bool) { + return remove(map._inner, bytes32(key)); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(UintToAddressMap storage map, uint256 key) internal view returns (bool) { + return contains(map._inner, bytes32(key)); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(UintToAddressMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintToAddressMap storage map, uint256 index) internal view returns (uint256, address) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (uint256(key), address(uint160(uint256(value)))); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(UintToAddressMap storage map, uint256 key) internal view returns (bool, address) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(key)); + return (success, address(uint160(uint256(value)))); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(UintToAddressMap storage map, uint256 key) internal view returns (address) { + return address(uint160(uint256(get(map._inner, bytes32(key))))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + UintToAddressMap storage map, + uint256 key, + string memory errorMessage + ) internal view returns (address) { + return address(uint160(uint256(get(map._inner, bytes32(key), errorMessage)))); + } + + // AddressToUintMap + + struct AddressToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + AddressToUintMap storage map, + address key, + uint256 value + ) internal returns (bool) { + return set(map._inner, bytes32(uint256(uint160(key))), bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(AddressToUintMap storage map, address key) internal returns (bool) { + return remove(map._inner, bytes32(uint256(uint160(key)))); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(AddressToUintMap storage map, address key) internal view returns (bool) { + return contains(map._inner, bytes32(uint256(uint160(key)))); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(AddressToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(AddressToUintMap storage map, uint256 index) internal view returns (address, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (address(uint160(uint256(key))), uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(AddressToUintMap storage map, address key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, bytes32(uint256(uint160(key)))); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(AddressToUintMap storage map, address key) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(uint256(uint160(key))))); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + AddressToUintMap storage map, + address key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, bytes32(uint256(uint160(key))), errorMessage)); + } + + // Bytes32ToUintMap + + struct Bytes32ToUintMap { + Bytes32ToBytes32Map _inner; + } + + /** + * @dev Adds a key-value pair to a map, or updates the value for an existing + * key. O(1). + * + * Returns true if the key was added to the map, that is if it was not + * already present. + */ + function set( + Bytes32ToUintMap storage map, + bytes32 key, + uint256 value + ) internal returns (bool) { + return set(map._inner, key, bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the key was removed from the map, that is if it was present. + */ + function remove(Bytes32ToUintMap storage map, bytes32 key) internal returns (bool) { + return remove(map._inner, key); + } + + /** + * @dev Returns true if the key is in the map. O(1). + */ + function contains(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool) { + return contains(map._inner, key); + } + + /** + * @dev Returns the number of elements in the map. O(1). + */ + function length(Bytes32ToUintMap storage map) internal view returns (uint256) { + return length(map._inner); + } + + /** + * @dev Returns the element stored at position `index` in the set. O(1). + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32ToUintMap storage map, uint256 index) internal view returns (bytes32, uint256) { + (bytes32 key, bytes32 value) = at(map._inner, index); + return (key, uint256(value)); + } + + /** + * @dev Tries to returns the value associated with `key`. O(1). + * Does not revert if `key` is not in the map. + */ + function tryGet(Bytes32ToUintMap storage map, bytes32 key) internal view returns (bool, uint256) { + (bool success, bytes32 value) = tryGet(map._inner, key); + return (success, uint256(value)); + } + + /** + * @dev Returns the value associated with `key`. O(1). + * + * Requirements: + * + * - `key` must be in the map. + */ + function get(Bytes32ToUintMap storage map, bytes32 key) internal view returns (uint256) { + return uint256(get(map._inner, key)); + } + + /** + * @dev Same as {get}, with a custom error message when `key` is not in the map. + * + * CAUTION: This function is deprecated because it requires allocating memory for the error + * message unnecessarily. For custom revert reasons use {tryGet}. + */ + function get( + Bytes32ToUintMap storage map, + bytes32 key, + string memory errorMessage + ) internal view returns (uint256) { + return uint256(get(map._inner, key, errorMessage)); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol new file mode 100644 index 00000000..0bcbefe7 --- /dev/null +++ b/contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/utils/structs/EnumerableSet.sol @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: MIT +// OpenZeppelin Contracts (last updated v4.8.0) (utils/structs/EnumerableSet.sol) +// This file was procedurally generated from scripts/generate/templates/EnumerableSet.js. + +pragma solidity ^0.8.0; + +/** + * @dev Library for managing + * https://en.wikipedia.org/wiki/Set_(abstract_data_type)[sets] of primitive + * types. + * + * Sets have the following properties: + * + * - Elements are added, removed, and checked for existence in constant time + * (O(1)). + * - Elements are enumerated in O(n). No guarantees are made on the ordering. + * + * ``` + * contract Example { + * // Add the library methods + * using EnumerableSet for EnumerableSet.AddressSet; + * + * // Declare a set state variable + * EnumerableSet.AddressSet private mySet; + * } + * ``` + * + * As of v3.3.0, sets of type `bytes32` (`Bytes32Set`), `address` (`AddressSet`) + * and `uint256` (`UintSet`) are supported. + * + * [WARNING] + * ==== + * Trying to delete such a structure from storage will likely result in data corruption, rendering the structure + * unusable. + * See https://github.com/ethereum/solidity/pull/11843[ethereum/solidity#11843] for more info. + * + * In order to clean an EnumerableSet, you can either remove all elements one by one or create a fresh instance using an + * array of EnumerableSet. + * ==== + */ +library EnumerableSet { + // To implement this library for multiple types with as little code + // repetition as possible, we write it in terms of a generic Set type with + // bytes32 values. + // The Set implementation uses private functions, and user-facing + // implementations (such as AddressSet) are just wrappers around the + // underlying Set. + // This means that we can only create new EnumerableSets for types that fit + // in bytes32. + + struct Set { + // Storage of set values + bytes32[] _values; + // Position of the value in the `values` array, plus 1 because index 0 + // means a value is not in the set. + mapping(bytes32 => uint256) _indexes; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function _add(Set storage set, bytes32 value) private returns (bool) { + if (!_contains(set, value)) { + set._values.push(value); + // The value is stored at length-1, but we add 1 to all indexes + // and use 0 as a sentinel value + set._indexes[value] = set._values.length; + return true; + } else { + return false; + } + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function _remove(Set storage set, bytes32 value) private returns (bool) { + // We read and store the value's index to prevent multiple reads from the same storage slot + uint256 valueIndex = set._indexes[value]; + + if (valueIndex != 0) { + // Equivalent to contains(set, value) + // To delete an element from the _values array in O(1), we swap the element to delete with the last one in + // the array, and then remove the last element (sometimes called as 'swap and pop'). + // This modifies the order of the array, as noted in {at}. + + uint256 toDeleteIndex = valueIndex - 1; + uint256 lastIndex = set._values.length - 1; + + if (lastIndex != toDeleteIndex) { + bytes32 lastValue = set._values[lastIndex]; + + // Move the last value to the index where the value to delete is + set._values[toDeleteIndex] = lastValue; + // Update the index for the moved value + set._indexes[lastValue] = valueIndex; // Replace lastValue's index to valueIndex + } + + // Delete the slot where the moved value was stored + set._values.pop(); + + // Delete the index for the deleted slot + delete set._indexes[value]; + + return true; + } else { + return false; + } + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function _contains(Set storage set, bytes32 value) private view returns (bool) { + return set._indexes[value] != 0; + } + + /** + * @dev Returns the number of values on the set. O(1). + */ + function _length(Set storage set) private view returns (uint256) { + return set._values.length; + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function _at(Set storage set, uint256 index) private view returns (bytes32) { + return set._values[index]; + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function _values(Set storage set) private view returns (bytes32[] memory) { + return set._values; + } + + // Bytes32Set + + struct Bytes32Set { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(Bytes32Set storage set, bytes32 value) internal returns (bool) { + return _add(set._inner, value); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(Bytes32Set storage set, bytes32 value) internal returns (bool) { + return _remove(set._inner, value); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(Bytes32Set storage set, bytes32 value) internal view returns (bool) { + return _contains(set._inner, value); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(Bytes32Set storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(Bytes32Set storage set, uint256 index) internal view returns (bytes32) { + return _at(set._inner, index); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(Bytes32Set storage set) internal view returns (bytes32[] memory) { + bytes32[] memory store = _values(set._inner); + bytes32[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } + + // AddressSet + + struct AddressSet { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(AddressSet storage set, address value) internal returns (bool) { + return _add(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(AddressSet storage set, address value) internal returns (bool) { + return _remove(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(AddressSet storage set, address value) internal view returns (bool) { + return _contains(set._inner, bytes32(uint256(uint160(value)))); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(AddressSet storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(AddressSet storage set, uint256 index) internal view returns (address) { + return address(uint160(uint256(_at(set._inner, index)))); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(AddressSet storage set) internal view returns (address[] memory) { + bytes32[] memory store = _values(set._inner); + address[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } + + // UintSet + + struct UintSet { + Set _inner; + } + + /** + * @dev Add a value to a set. O(1). + * + * Returns true if the value was added to the set, that is if it was not + * already present. + */ + function add(UintSet storage set, uint256 value) internal returns (bool) { + return _add(set._inner, bytes32(value)); + } + + /** + * @dev Removes a value from a set. O(1). + * + * Returns true if the value was removed from the set, that is if it was + * present. + */ + function remove(UintSet storage set, uint256 value) internal returns (bool) { + return _remove(set._inner, bytes32(value)); + } + + /** + * @dev Returns true if the value is in the set. O(1). + */ + function contains(UintSet storage set, uint256 value) internal view returns (bool) { + return _contains(set._inner, bytes32(value)); + } + + /** + * @dev Returns the number of values in the set. O(1). + */ + function length(UintSet storage set) internal view returns (uint256) { + return _length(set._inner); + } + + /** + * @dev Returns the value stored at position `index` in the set. O(1). + * + * Note that there are no guarantees on the ordering of values inside the + * array, and it may change when more values are added or removed. + * + * Requirements: + * + * - `index` must be strictly less than {length}. + */ + function at(UintSet storage set, uint256 index) internal view returns (uint256) { + return uint256(_at(set._inner, index)); + } + + /** + * @dev Return the entire set in an array + * + * WARNING: This operation will copy the entire storage to memory, which can be quite expensive. This is designed + * to mostly be used by view accessors that are queried without any gas fees. Developers should keep in mind that + * this function has an unbounded cost, and using it as part of a state-changing function may render the function + * uncallable if the set grows to a point where copying to memory consumes too much gas to fit in a block. + */ + function values(UintSet storage set) internal view returns (uint256[] memory) { + bytes32[] memory store = _values(set._inner); + uint256[] memory result; + + /// @solidity memory-safe-assembly + assembly { + result := store + } + + return result; + } +} diff --git a/contracts/src/v0.8/vendor/solidity-cborutils/v2.0.0/CBOR.sol b/contracts/src/v0.8/vendor/solidity-cborutils/v2.0.0/CBOR.sol new file mode 100644 index 00000000..5c111370 --- /dev/null +++ b/contracts/src/v0.8/vendor/solidity-cborutils/v2.0.0/CBOR.sol @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import "../../@ensdomains/buffer/v0.1.0/Buffer.sol"; + +/** +* @dev A library for populating CBOR encoded payload in Solidity. +* +* https://datatracker.ietf.org/doc/html/rfc7049 +* +* The library offers various write* and start* methods to encode values of different types. +* The resulted buffer can be obtained with data() method. +* Encoding of primitive types is staightforward, whereas encoding of sequences can result +* in an invalid CBOR if start/write/end flow is violated. +* For the purpose of gas saving, the library does not verify start/write/end flow internally, +* except for nested start/end pairs. +*/ + +library CBOR { + using Buffer for Buffer.buffer; + + struct CBORBuffer { + Buffer.buffer buf; + uint256 depth; + } + + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + + uint8 private constant CBOR_FALSE = 20; + uint8 private constant CBOR_TRUE = 21; + uint8 private constant CBOR_NULL = 22; + uint8 private constant CBOR_UNDEFINED = 23; + + function create(uint256 capacity) internal pure returns(CBORBuffer memory cbor) { + Buffer.init(cbor.buf, capacity); + cbor.depth = 0; + return cbor; + } + + function data(CBORBuffer memory buf) internal pure returns(bytes memory) { + require(buf.depth == 0, "Invalid CBOR"); + return buf.buf.buf; + } + + function writeUInt256(CBORBuffer memory buf, uint256 value) internal pure { + buf.buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + writeBytes(buf, abi.encode(value)); + } + + function writeInt256(CBORBuffer memory buf, int256 value) internal pure { + if (value < 0) { + buf.buf.appendUint8( + uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM) + ); + writeBytes(buf, abi.encode(uint256(-1 - value))); + } else { + writeUInt256(buf, uint256(value)); + } + } + + function writeUInt64(CBORBuffer memory buf, uint64 value) internal pure { + writeFixedNumeric(buf, MAJOR_TYPE_INT, value); + } + + function writeInt64(CBORBuffer memory buf, int64 value) internal pure { + if(value >= 0) { + writeFixedNumeric(buf, MAJOR_TYPE_INT, uint64(value)); + } else{ + writeFixedNumeric(buf, MAJOR_TYPE_NEGATIVE_INT, uint64(-1 - value)); + } + } + + function writeBytes(CBORBuffer memory buf, bytes memory value) internal pure { + writeFixedNumeric(buf, MAJOR_TYPE_BYTES, uint64(value.length)); + buf.buf.append(value); + } + + function writeString(CBORBuffer memory buf, string memory value) internal pure { + writeFixedNumeric(buf, MAJOR_TYPE_STRING, uint64(bytes(value).length)); + buf.buf.append(bytes(value)); + } + + function writeBool(CBORBuffer memory buf, bool value) internal pure { + writeContentFree(buf, value ? CBOR_TRUE : CBOR_FALSE); + } + + function writeNull(CBORBuffer memory buf) internal pure { + writeContentFree(buf, CBOR_NULL); + } + + function writeUndefined(CBORBuffer memory buf) internal pure { + writeContentFree(buf, CBOR_UNDEFINED); + } + + function startArray(CBORBuffer memory buf) internal pure { + writeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + buf.depth += 1; + } + + function startFixedArray(CBORBuffer memory buf, uint64 length) internal pure { + writeDefiniteLengthType(buf, MAJOR_TYPE_ARRAY, length); + } + + function startMap(CBORBuffer memory buf) internal pure { + writeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + buf.depth += 1; + } + + function startFixedMap(CBORBuffer memory buf, uint64 length) internal pure { + writeDefiniteLengthType(buf, MAJOR_TYPE_MAP, length); + } + + function endSequence(CBORBuffer memory buf) internal pure { + writeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + buf.depth -= 1; + } + + function writeKVString(CBORBuffer memory buf, string memory key, string memory value) internal pure { + writeString(buf, key); + writeString(buf, value); + } + + function writeKVBytes(CBORBuffer memory buf, string memory key, bytes memory value) internal pure { + writeString(buf, key); + writeBytes(buf, value); + } + + function writeKVUInt256(CBORBuffer memory buf, string memory key, uint256 value) internal pure { + writeString(buf, key); + writeUInt256(buf, value); + } + + function writeKVInt256(CBORBuffer memory buf, string memory key, int256 value) internal pure { + writeString(buf, key); + writeInt256(buf, value); + } + + function writeKVUInt64(CBORBuffer memory buf, string memory key, uint64 value) internal pure { + writeString(buf, key); + writeUInt64(buf, value); + } + + function writeKVInt64(CBORBuffer memory buf, string memory key, int64 value) internal pure { + writeString(buf, key); + writeInt64(buf, value); + } + + function writeKVBool(CBORBuffer memory buf, string memory key, bool value) internal pure { + writeString(buf, key); + writeBool(buf, value); + } + + function writeKVNull(CBORBuffer memory buf, string memory key) internal pure { + writeString(buf, key); + writeNull(buf); + } + + function writeKVUndefined(CBORBuffer memory buf, string memory key) internal pure { + writeString(buf, key); + writeUndefined(buf); + } + + function writeKVMap(CBORBuffer memory buf, string memory key) internal pure { + writeString(buf, key); + startMap(buf); + } + + function writeKVArray(CBORBuffer memory buf, string memory key) internal pure { + writeString(buf, key); + startArray(buf); + } + + function writeFixedNumeric( + CBORBuffer memory buf, + uint8 major, + uint64 value + ) private pure { + if (value <= 23) { + buf.buf.appendUint8(uint8((major << 5) | value)); + } else if (value <= 0xFF) { + buf.buf.appendUint8(uint8((major << 5) | 24)); + buf.buf.appendInt(value, 1); + } else if (value <= 0xFFFF) { + buf.buf.appendUint8(uint8((major << 5) | 25)); + buf.buf.appendInt(value, 2); + } else if (value <= 0xFFFFFFFF) { + buf.buf.appendUint8(uint8((major << 5) | 26)); + buf.buf.appendInt(value, 4); + } else { + buf.buf.appendUint8(uint8((major << 5) | 27)); + buf.buf.appendInt(value, 8); + } + } + + function writeIndefiniteLengthType(CBORBuffer memory buf, uint8 major) + private + pure + { + buf.buf.appendUint8(uint8((major << 5) | 31)); + } + + function writeDefiniteLengthType(CBORBuffer memory buf, uint8 major, uint64 length) + private + pure + { + writeFixedNumeric(buf, major, length); + } + + function writeContentFree(CBORBuffer memory buf, uint8 value) private pure { + buf.buf.appendUint8(uint8((MAJOR_TYPE_CONTENT_FREE << 5) | value)); + } +} \ No newline at end of file diff --git a/contracts/src/v0.8/vrf/AuthorizedReceiver.sol b/contracts/src/v0.8/vrf/AuthorizedReceiver.sol new file mode 100644 index 00000000..a067fbf4 --- /dev/null +++ b/contracts/src/v0.8/vrf/AuthorizedReceiver.sol @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {EnumerableSet} from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import {IAuthorizedReceiver} from "./interfaces/IAuthorizedReceiver.sol"; + +abstract contract AuthorizedReceiver is IAuthorizedReceiver { + using EnumerableSet for EnumerableSet.AddressSet; + + event AuthorizedSendersChanged(address[] senders, address changedBy); + + error EmptySendersList(); + error UnauthorizedSender(); + error NotAllowedToSetSenders(); + + EnumerableSet.AddressSet private s_authorizedSenders; + address[] private s_authorizedSendersList; + + /** + * @notice Sets the fulfillment permission for a given node. Use `true` to allow, `false` to disallow. + * @param senders The addresses of the authorized Plugin node + */ + function setAuthorizedSenders(address[] calldata senders) external override validateAuthorizedSenderSetter { + if (senders.length == 0) { + revert EmptySendersList(); + } + for (uint256 i = 0; i < s_authorizedSendersList.length; i++) { + s_authorizedSenders.remove(s_authorizedSendersList[i]); + } + for (uint256 i = 0; i < senders.length; i++) { + s_authorizedSenders.add(senders[i]); + } + s_authorizedSendersList = senders; + emit AuthorizedSendersChanged(senders, msg.sender); + } + + /** + * @notice Retrieve a list of authorized senders + * @return array of addresses + */ + function getAuthorizedSenders() public view override returns (address[] memory) { + return s_authorizedSendersList; + } + + /** + * @notice Use this to check if a node is authorized for fulfilling requests + * @param sender The address of the Plugin node + * @return The authorization status of the node + */ + function isAuthorizedSender(address sender) public view override returns (bool) { + return s_authorizedSenders.contains(sender); + } + + /** + * @notice customizable guard of who can update the authorized sender list + * @return bool whether sender can update authorized sender list + */ + function _canSetAuthorizedSenders() internal virtual returns (bool); + + /** + * @notice validates the sender is an authorized sender + */ + function _validateIsAuthorizedSender() internal view { + if (!isAuthorizedSender(msg.sender)) { + revert UnauthorizedSender(); + } + } + + /** + * @notice prevents non-authorized addresses from calling this method + */ + modifier validateAuthorizedSender() { + _validateIsAuthorizedSender(); + _; + } + + /** + * @notice prevents non-authorized addresses from calling this method + */ + modifier validateAuthorizedSenderSetter() { + if (!_canSetAuthorizedSenders()) { + revert NotAllowedToSetSenders(); + } + _; + } +} diff --git a/contracts/src/v0.8/vrf/BatchBlockhashStore.sol b/contracts/src/v0.8/vrf/BatchBlockhashStore.sol new file mode 100644 index 00000000..93b892ab --- /dev/null +++ b/contracts/src/v0.8/vrf/BatchBlockhashStore.sol @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity 0.8.6; + +import {ChainSpecificUtil} from "../ChainSpecificUtil.sol"; + +/** + * @title BatchBlockhashStore + * @notice The BatchBlockhashStore contract acts as a proxy to write many blockhashes to the + * provided BlockhashStore contract efficiently in a single transaction. This results + * in plenty of gas savings and higher throughput of blockhash storage, which is desirable + * in times of high network congestion. + */ +contract BatchBlockhashStore { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + BlockhashStore public immutable BHS; + + constructor(address blockhashStoreAddr) { + BHS = BlockhashStore(blockhashStoreAddr); + } + + /** + * @notice stores blockhashes of the given block numbers in the configured blockhash store, assuming + * they are availble though the blockhash() instruction. + * @param blockNumbers the block numbers to store the blockhashes of. Must be available via the + * blockhash() instruction, otherwise this function call will revert. + */ + function store(uint256[] memory blockNumbers) public { + for (uint256 i = 0; i < blockNumbers.length; i++) { + // skip the block if it's not storeable, the caller will have to check + // after the transaction is mined to see if the blockhash was truly stored. + if (!_storeableBlock(blockNumbers[i])) { + continue; + } + BHS.store(blockNumbers[i]); + } + } + + /** + * @notice stores blockhashes after verifying blockheader of child/subsequent block + * @param blockNumbers the block numbers whose blockhashes should be stored, in decreasing order + * @param headers the rlp-encoded block headers of blockNumbers[i] + 1. + */ + function storeVerifyHeader(uint256[] memory blockNumbers, bytes[] memory headers) public { + // solhint-disable-next-line custom-errors + require(blockNumbers.length == headers.length, "input array arg lengths mismatch"); + for (uint256 i = 0; i < blockNumbers.length; i++) { + BHS.storeVerifyHeader(blockNumbers[i], headers[i]); + } + } + + /** + * @notice retrieves blockhashes of all the given block numbers from the blockhash store, if available. + * @param blockNumbers array of block numbers to fetch blockhashes for + * @return blockhashes array of block hashes corresponding to each block number provided in the `blockNumbers` + * param. If the blockhash is not found, 0x0 is returned instead of the real blockhash, indicating + * that it is not in the blockhash store. + */ + function getBlockhashes(uint256[] memory blockNumbers) external view returns (bytes32[] memory) { + bytes32[] memory blockHashes = new bytes32[](blockNumbers.length); + for (uint256 i = 0; i < blockNumbers.length; i++) { + try BHS.getBlockhash(blockNumbers[i]) returns (bytes32 bh) { + blockHashes[i] = bh; + } catch Error(string memory /* reason */) { + blockHashes[i] = 0x0; + } + } + return blockHashes; + } + + /** + * @notice returns true if and only if the given block number's blockhash can be retrieved + * using the blockhash() instruction. + * @param blockNumber the block number to check if it's storeable with blockhash() + */ + function _storeableBlock(uint256 blockNumber) private view returns (bool) { + // handle edge case on simulated chains which possibly have < 256 blocks total. + return + ChainSpecificUtil._getBlockNumber() <= 256 ? true : blockNumber >= (ChainSpecificUtil._getBlockNumber() - 256); + } +} + +interface BlockhashStore { + function storeVerifyHeader(uint256 n, bytes memory header) external; + + function store(uint256 n) external; + + function getBlockhash(uint256 n) external view returns (bytes32); +} diff --git a/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol b/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol new file mode 100644 index 00000000..1ad5083e --- /dev/null +++ b/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity 0.8.6; + +import {VRFTypes} from "./VRFTypes.sol"; + +/** + * @title BatchVRFCoordinatorV2 + * @notice The BatchVRFCoordinatorV2 contract acts as a proxy to write many random responses to the + * provided VRFCoordinatorV2 contract efficiently in a single transaction. + */ +contract BatchVRFCoordinatorV2 { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + VRFCoordinatorV2 public immutable COORDINATOR; + + event ErrorReturned(uint256 indexed requestId, string reason); + event RawErrorReturned(uint256 indexed requestId, bytes lowLevelData); + + constructor(address coordinatorAddr) { + COORDINATOR = VRFCoordinatorV2(coordinatorAddr); + } + + /** + * @notice fulfills multiple randomness requests with the provided proofs and commitments. + * @param proofs the randomness proofs generated by the VRF provider. + * @param rcs the request commitments corresponding to the randomness proofs. + */ + function fulfillRandomWords(VRFTypes.Proof[] memory proofs, VRFTypes.RequestCommitment[] memory rcs) external { + // solhint-disable-next-line custom-errors + require(proofs.length == rcs.length, "input array arg lengths mismatch"); + for (uint256 i = 0; i < proofs.length; i++) { + try COORDINATOR.fulfillRandomWords(proofs[i], rcs[i]) returns (uint96 /* payment */) { + continue; + } catch Error(string memory reason) { + uint256 requestId = _getRequestIdFromProof(proofs[i]); + emit ErrorReturned(requestId, reason); + } catch (bytes memory lowLevelData) { + uint256 requestId = _getRequestIdFromProof(proofs[i]); + emit RawErrorReturned(requestId, lowLevelData); + } + } + } + + /** + * @notice Returns the proving key hash associated with this public key. + * @param publicKey the key to return the hash of. + */ + function _hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Returns the request ID of the request associated with the given proof. + * @param proof the VRF proof provided by the VRF oracle. + */ + function _getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { + bytes32 keyHash = _hashOfKey(proof.pk); + return uint256(keccak256(abi.encode(keyHash, proof.seed))); + } +} + +interface VRFCoordinatorV2 { + function fulfillRandomWords( + VRFTypes.Proof memory proof, + VRFTypes.RequestCommitment memory rc + ) external returns (uint96); +} diff --git a/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol b/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol new file mode 100644 index 00000000..c5dcc2ca --- /dev/null +++ b/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {AutomationCompatibleInterface as KeeperCompatibleInterface} from "../automation/interfaces/AutomationCompatibleInterface.sol"; +import {VRFConsumerBaseV2} from "./VRFConsumerBaseV2.sol"; +import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol"; + +// solhint-disable plugin-solidity/prefix-immutable-variables-with-i + +/** + * @title KeepersVRFConsumer + * @notice KeepersVRFConsumer is a Plugin Keepers compatible contract that also acts as a + * VRF V2 requester and consumer. In particular, a random words request is made when `performUpkeep` + * is called in a cadence provided by the upkeep interval. + */ +contract KeepersVRFConsumer is KeeperCompatibleInterface, VRFConsumerBaseV2 { + // Upkeep interval in seconds. This contract's performUpkeep method will + // be called by the Keepers network roughly every UPKEEP_INTERVAL seconds. + uint256 public immutable UPKEEP_INTERVAL; + + // VRF V2 information, provided upon contract construction. + VRFCoordinatorV2Interface public immutable COORDINATOR; + uint64 public immutable SUBSCRIPTION_ID; + uint16 public immutable REQUEST_CONFIRMATIONS; + bytes32 public immutable KEY_HASH; + + // Contract state, updated in performUpkeep and fulfillRandomWords. + uint256 public s_lastTimeStamp; + uint256 public s_vrfRequestCounter; + uint256 public s_vrfResponseCounter; + + struct RequestRecord { + uint256 requestId; + bool fulfilled; + uint32 callbackGasLimit; + uint256 randomness; + } + mapping(uint256 => RequestRecord) public s_requests; /* request ID */ /* request record */ + + constructor( + address vrfCoordinator, + uint64 subscriptionId, + bytes32 keyHash, + uint16 requestConfirmations, + uint256 upkeepInterval + ) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + SUBSCRIPTION_ID = subscriptionId; + REQUEST_CONFIRMATIONS = requestConfirmations; + KEY_HASH = keyHash; + UPKEEP_INTERVAL = upkeepInterval; + + s_lastTimeStamp = block.timestamp; + s_vrfRequestCounter = 0; + s_vrfResponseCounter = 0; + } + + /** + * @notice Returns true if and only if at least UPKEEP_INTERVAL seconds have elapsed + * since the last upkeep or since construction of the contract. + * @return upkeepNeeded true if and only if at least UPKEEP_INTERVAL seconds have elapsed since the last upkeep or since construction + * of the contract. + */ + // solhint-disable-next-line plugin-solidity/explicit-returns + function checkUpkeep( + bytes calldata /* checkData */ + ) external view override returns (bool upkeepNeeded, bytes memory /* performData */) { + upkeepNeeded = (block.timestamp - s_lastTimeStamp) > UPKEEP_INTERVAL; + } + + /** + * @notice Requests random words from the VRF coordinator if UPKEEP_INTERVAL seconds have elapsed + * since the last upkeep or since construction of the contract. + */ + function performUpkeep(bytes calldata /* performData */) external override { + if ((block.timestamp - s_lastTimeStamp) > UPKEEP_INTERVAL) { + s_lastTimeStamp = block.timestamp; + + _requestRandomWords(); + } + } + + /** + * @notice VRF callback implementation + * @param requestId the VRF V2 request ID, provided at request time. + * @param randomWords the randomness provided by Plugin VRF. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + // Check that the request exists. If not, revert. + RequestRecord memory record = s_requests[requestId]; + // solhint-disable-next-line custom-errors + require(record.requestId == requestId, "request ID not found in map"); + + // Update the randomness in the record, and increment the response counter. + s_requests[requestId].randomness = randomWords[0]; + s_vrfResponseCounter++; + } + + /** + * @notice Requests random words from Plugin VRF. + */ + function _requestRandomWords() internal { + uint256 requestId = COORDINATOR.requestRandomWords( + KEY_HASH, + SUBSCRIPTION_ID, + REQUEST_CONFIRMATIONS, + 150000, // callback gas limit + 1 // num words + ); + s_requests[requestId] = RequestRecord({ + requestId: requestId, + fulfilled: false, + callbackGasLimit: 150000, + randomness: 0 + }); + s_vrfRequestCounter++; + } +} diff --git a/contracts/src/v0.8/vrf/VRF.sol b/contracts/src/v0.8/vrf/VRF.sol new file mode 100644 index 00000000..f7d62a27 --- /dev/null +++ b/contracts/src/v0.8/vrf/VRF.sol @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/** **************************************************************************** + * @notice Verification of verifiable-random-function (VRF) proofs, following + * @notice https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @notice See https://eprint.iacr.org/2017/099.pdf for security proofs. + + * @dev Bibliographic references: + + * @dev Goldberg, et al., "Verifiable Random Functions (VRFs)", Internet Draft + * @dev draft-irtf-cfrg-vrf-05, IETF, Aug 11 2019, + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05 + + * @dev Papadopoulos, et al., "Making NSEC5 Practical for DNSSEC", Cryptology + * @dev ePrint Archive, Report 2017/099, https://eprint.iacr.org/2017/099.pdf + * **************************************************************************** + * @dev USAGE + + * @dev The main entry point is _randomValueFromVRFProof. See its docstring. + * **************************************************************************** + * @dev PURPOSE + + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is computationally indistinguishable to her from a uniform + * @dev random sample from the output space. + + * @dev The purpose of this contract is to perform that verification. + * **************************************************************************** + * @dev DESIGN NOTES + + * @dev The VRF algorithm verified here satisfies the full uniqueness, full + * @dev collision resistance, and full pseudo-randomness security properties. + * @dev See "SECURITY PROPERTIES" below, and + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-3 + + * @dev An elliptic curve point is generally represented in the solidity code + * @dev as a uint256[2], corresponding to its affine coordinates in + * @dev GF(FIELD_SIZE). + + * @dev For the sake of efficiency, this implementation deviates from the spec + * @dev in some minor ways: + + * @dev - Keccak hash rather than the SHA256 hash recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev Keccak costs much less gas on the EVM, and provides similar security. + + * @dev - Secp256k1 curve instead of the P-256 or ED25519 curves recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev For curve-point multiplication, it's much cheaper to abuse ECRECOVER + + * @dev - _hashToCurve recursively hashes until it finds a curve x-ordinate. On + * @dev the EVM, this is slightly more efficient than the recommendation in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + * @dev step 5, to concatenate with a nonce then hash, and rehash with the + * @dev nonce updated until a valid x-ordinate is found. + + * @dev - _hashToCurve does not include a cipher version string or the byte 0x1 + * @dev in the hash message, as recommended in step 5.B of the draft + * @dev standard. They are unnecessary here because no variation in the + * @dev cipher suite is allowed. + + * @dev - Similarly, the hash input in _scalarFromCurvePoints does not include a + * @dev commitment to the cipher suite, either, which differs from step 2 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + * @dev . Also, the hash input is the concatenation of the uncompressed + * @dev points, not the compressed points as recommended in step 3. + + * @dev - In the calculation of the challenge value "c", the "u" value (i.e. + * @dev the value computed by Reggie as the nonce times the secp256k1 + * @dev generator point, see steps 5 and 7 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @dev ) is replaced by its ethereum address, i.e. the lower 160 bits of the + * @dev keccak hash of the original u. This is because we only verify the + * @dev calculation of u up to its address, by abusing ECRECOVER. + * **************************************************************************** + * @dev SECURITY PROPERTIES + + * @dev Here are the security properties for this VRF: + + * @dev Full uniqueness: For any seed and valid VRF public key, there is + * @dev exactly one VRF output which can be proved to come from that seed, in + * @dev the sense that the proof will pass _verifyVRFProof. + + * @dev Full collision resistance: It's cryptographically infeasible to find + * @dev two seeds with same VRF output from a fixed, valid VRF key + + * @dev Full pseudorandomness: Absent the proofs that the VRF outputs are + * @dev derived from a given seed, the outputs are computationally + * @dev indistinguishable from randomness. + + * @dev https://eprint.iacr.org/2017/099.pdf, Appendix B contains the proofs + * @dev for these properties. + + * @dev For secp256k1, the key validation described in section + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.6 + * @dev is unnecessary, because secp256k1 has cofactor 1, and the + * @dev representation of the public key used here (affine x- and y-ordinates + * @dev of the secp256k1 point on the standard y^2=x^3+7 curve) cannot refer to + * @dev the point at infinity. + * **************************************************************************** + * @dev OTHER SECURITY CONSIDERATIONS + * + * @dev The seed input to the VRF could in principle force an arbitrary amount + * @dev of work in _hashToCurve, by requiring extra rounds of hashing and + * @dev checking whether that's yielded the x ordinate of a secp256k1 point. + * @dev However, under the Random Oracle Model the probability of choosing a + * @dev point which forces n extra rounds in _hashToCurve is 2⁻ⁿ. The base cost + * @dev for calling _hashToCurve is about 25,000 gas, and each round of checking + * @dev for a valid x ordinate costs about 15,555 gas, so to find a seed for + * @dev which _hashToCurve would cost more than 2,017,000 gas, one would have to + * @dev try, in expectation, about 2¹²⁸ seeds, which is infeasible for any + * @dev foreseeable computational resources. (25,000 + 128 * 15,555 < 2,017,000.) + + * @dev Since the gas block limit for the Ethereum main net is 10,000,000 gas, + * @dev this means it is infeasible for an adversary to prevent correct + * @dev operation of this contract by choosing an adverse seed. + + * @dev (See TestMeasureHashToCurveGasCost for verification of the gas cost for + * @dev _hashToCurve.) + + * @dev It may be possible to make a secure constant-time _hashToCurve function. + * @dev See notes in _hashToCurve docstring. +*/ +contract VRF { + // See https://www.secg.org/sec2-v2.pdf, section 2.4.1, for these constants. + // Number of points in Secp256k1 + uint256 private constant GROUP_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + // Prime characteristic of the galois field over which Secp256k1 is defined + uint256 private constant FIELD_SIZE = + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F; + uint256 private constant WORD_LENGTH_BYTES = 0x20; + + // (base^exponent) % FIELD_SIZE + // Cribbed from https://medium.com/@rbkhmrcr/precompiles-solidity-e5d29bd428c4 + function _bigModExp(uint256 base, uint256 exponent) internal view returns (uint256 exponentiation) { + uint256 callResult; + uint256[6] memory bigModExpContractInputs; + bigModExpContractInputs[0] = WORD_LENGTH_BYTES; // Length of base + bigModExpContractInputs[1] = WORD_LENGTH_BYTES; // Length of exponent + bigModExpContractInputs[2] = WORD_LENGTH_BYTES; // Length of modulus + bigModExpContractInputs[3] = base; + bigModExpContractInputs[4] = exponent; + bigModExpContractInputs[5] = FIELD_SIZE; + uint256[1] memory output; + assembly { + callResult := staticcall( + not(0), // Gas cost: no limit + 0x05, // Bigmodexp contract address + bigModExpContractInputs, + 0xc0, // Length of input segment: 6*0x20-bytes + output, + 0x20 // Length of output segment + ) + } + if (callResult == 0) { + // solhint-disable-next-line custom-errors + revert("bigModExp failure!"); + } + return output[0]; + } + + // Let q=FIELD_SIZE. q % 4 = 3, ∴ x≡r^2 mod q ⇒ x^SQRT_POWER≡±r mod q. See + // https://en.wikipedia.org/wiki/Modular_square_root#Prime_or_prime_power_modulus + uint256 private constant SQRT_POWER = (FIELD_SIZE + 1) >> 2; + + // Computes a s.t. a^2 = x in the field. Assumes a exists + function _squareRoot(uint256 x) internal view returns (uint256) { + return _bigModExp(x, SQRT_POWER); + } + + // The value of y^2 given that (x,y) is on secp256k1. + function _ySquared(uint256 x) internal pure returns (uint256) { + // Curve is y^2=x^3+7. See section 2.4.1 of https://www.secg.org/sec2-v2.pdf + uint256 xCubed = mulmod(x, mulmod(x, x, FIELD_SIZE), FIELD_SIZE); + return addmod(xCubed, 7, FIELD_SIZE); + } + + // True iff p is on secp256k1 + function _isOnCurve(uint256[2] memory p) internal pure returns (bool) { + // Section 2.3.6. in https://www.secg.org/sec1-v2.pdf + // requires each ordinate to be in [0, ..., FIELD_SIZE-1] + // solhint-disable-next-line custom-errors + require(p[0] < FIELD_SIZE, "invalid x-ordinate"); + // solhint-disable-next-line custom-errors + require(p[1] < FIELD_SIZE, "invalid y-ordinate"); + return _ySquared(p[0]) == mulmod(p[1], p[1], FIELD_SIZE); + } + + // Hash x uniformly into {0, ..., FIELD_SIZE-1}. + function _fieldHash(bytes memory b) internal pure returns (uint256 x_) { + x_ = uint256(keccak256(b)); + // Rejecting if x >= FIELD_SIZE corresponds to step 2.1 in section 2.3.4 of + // http://www.secg.org/sec1-v2.pdf , which is part of the definition of + // string_to_point in the IETF draft + while (x_ >= FIELD_SIZE) { + x_ = uint256(keccak256(abi.encodePacked(x_))); + } + return x_; + } + + // Hash b to a random point which hopefully lies on secp256k1. The y ordinate + // is always even, due to + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + // step 5.C, which references arbitrary_string_to_point, defined in + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 as + // returning the point with given x ordinate, and even y ordinate. + function _newCandidateSecp256k1Point(bytes memory b) internal view returns (uint256[2] memory p) { + unchecked { + p[0] = _fieldHash(b); + p[1] = _squareRoot(_ySquared(p[0])); + if (p[1] % 2 == 1) { + // Note that 0 <= p[1] < FIELD_SIZE + // so this cannot wrap, we use unchecked to save gas. + p[1] = FIELD_SIZE - p[1]; + } + } + return p; + } + + // Domain-separation tag for initial hash in _hashToCurve. Corresponds to + // vrf.go/hashToCurveHashPrefix + uint256 internal constant HASH_TO_CURVE_HASH_PREFIX = 1; + + // Cryptographic hash function onto the curve. + // + // Corresponds to algorithm in section 5.4.1.1 of the draft standard. (But see + // DESIGN NOTES above for slight differences.) + // + // TODO(alx): Implement a bounded-computation hash-to-curve, as described in + // "Construction of Rational Points on Elliptic Curves over Finite Fields" + // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.831.5299&rep=rep1&type=pdf + // and suggested by + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-01#section-5.2.2 + // (Though we can't used exactly that because secp256k1's j-invariant is 0.) + // + // This would greatly simplify the analysis in "OTHER SECURITY CONSIDERATIONS" + // https://www.pivotaltracker.com/story/show/171120900 + function _hashToCurve(uint256[2] memory pk, uint256 input) internal view returns (uint256[2] memory rv) { + rv = _newCandidateSecp256k1Point(abi.encodePacked(HASH_TO_CURVE_HASH_PREFIX, pk, input)); + while (!_isOnCurve(rv)) { + rv = _newCandidateSecp256k1Point(abi.encodePacked(rv[0])); + } + return rv; + } + + /** ********************************************************************* + * @notice Check that product==scalar*multiplicand + * + * @dev Based on Vitalik Buterin's idea in ethresear.ch post cited below. + * + * @param multiplicand: secp256k1 point + * @param scalar: non-zero GF(GROUP_ORDER) scalar + * @param product: secp256k1 expected to be multiplier * multiplicand + * @return verifies true iff product==scalar*multiplicand, with cryptographically high probability + */ + function _ecmulVerify( + uint256[2] memory multiplicand, + uint256 scalar, + uint256[2] memory product + ) internal pure returns (bool verifies) { + // solhint-disable-next-line custom-errors + require(scalar != 0, "zero scalar"); // Rules out an ecrecover failure case + uint256 x = multiplicand[0]; // x ordinate of multiplicand + uint8 v = multiplicand[1] % 2 == 0 ? 27 : 28; // parity of y ordinate + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // Point corresponding to address ecrecover(0, v, x, s=scalar*x) is + // (x⁻¹ mod GROUP_ORDER) * (scalar * x * multiplicand - 0 * g), i.e. + // scalar*multiplicand. See https://crypto.stackexchange.com/a/18106 + bytes32 scalarTimesX = bytes32(mulmod(scalar, x, GROUP_ORDER)); + address actual = ecrecover(bytes32(0), v, bytes32(x), scalarTimesX); + // Explicit conversion to address takes bottom 160 bits + address expected = address(uint160(uint256(keccak256(abi.encodePacked(product))))); + return (actual == expected); + } + + // Returns x1/z1-x2/z2=(x1z2-x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) + function _projectiveSub( + uint256 x1, + uint256 z1, + uint256 x2, + uint256 z2 + ) internal pure returns (uint256 x3, uint256 z3) { + unchecked { + uint256 num1 = mulmod(z2, x1, FIELD_SIZE); + // Note this cannot wrap since x2 is a point in [0, FIELD_SIZE-1] + // we use unchecked to save gas. + uint256 num2 = mulmod(FIELD_SIZE - x2, z1, FIELD_SIZE); + (x3, z3) = (addmod(num1, num2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + } + return (x3, z3); + } + + // Returns x1/z1*x2/z2=(x1x2)/(z1z2), in projective coordinates on P¹(𝔽ₙ) + function _projectiveMul( + uint256 x1, + uint256 z1, + uint256 x2, + uint256 z2 + ) internal pure returns (uint256 x3, uint256 z3) { + (x3, z3) = (mulmod(x1, x2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + return (x3, z3); + } + + /** ************************************************************************** + @notice Computes elliptic-curve sum, in projective co-ordinates + + @dev Using projective coordinates avoids costly divisions + + @dev To use this with p and q in affine coordinates, call + @dev _projectiveECAdd(px, py, qx, qy). This will return + @dev the addition of (px, py, 1) and (qx, qy, 1), in the + @dev secp256k1 group. + + @dev This can be used to calculate the z which is the inverse to zInv + @dev in isValidVRFOutput. But consider using a faster + @dev re-implementation such as ProjectiveECAdd in the golang vrf package. + + @dev This function assumes [px,py,1],[qx,qy,1] are valid projective + coordinates of secp256k1 points. That is safe in this contract, + because this method is only used by _linearCombination, which checks + points are on the curve via ecrecover. + ************************************************************************** + @param px The first affine coordinate of the first summand + @param py The second affine coordinate of the first summand + @param qx The first affine coordinate of the second summand + @param qy The second affine coordinate of the second summand + + (px,py) and (qx,qy) must be distinct, valid secp256k1 points. + ************************************************************************** + Return values are projective coordinates of [px,py,1]+[qx,qy,1] as points + on secp256k1, in P²(𝔽ₙ) + @return sx + @return sy + @return sz + */ + function _projectiveECAdd( + uint256 px, + uint256 py, + uint256 qx, + uint256 qy + ) internal pure returns (uint256 sx, uint256 sy, uint256 sz) { + unchecked { + // See "Group law for E/K : y^2 = x^3 + ax + b", in section 3.1.2, p. 80, + // "Guide to Elliptic Curve Cryptography" by Hankerson, Menezes and Vanstone + // We take the equations there for (sx,sy), and homogenize them to + // projective coordinates. That way, no inverses are required, here, and we + // only need the one inverse in _affineECAdd. + + // We only need the "point addition" equations from Hankerson et al. Can + // skip the "point doubling" equations because p1 == p2 is cryptographically + // impossible, and required not to be the case in _linearCombination. + + // Add extra "projective coordinate" to the two points + (uint256 z1, uint256 z2) = (1, 1); + + // (lx, lz) = (qy-py)/(qx-px), i.e., gradient of secant line. + // Cannot wrap since px and py are in [0, FIELD_SIZE-1] + uint256 lx = addmod(qy, FIELD_SIZE - py, FIELD_SIZE); + uint256 lz = addmod(qx, FIELD_SIZE - px, FIELD_SIZE); + + uint256 dx; // Accumulates denominator from sx calculation + // sx=((qy-py)/(qx-px))^2-px-qx + (sx, dx) = _projectiveMul(lx, lz, lx, lz); // ((qy-py)/(qx-px))^2 + (sx, dx) = _projectiveSub(sx, dx, px, z1); // ((qy-py)/(qx-px))^2-px + (sx, dx) = _projectiveSub(sx, dx, qx, z2); // ((qy-py)/(qx-px))^2-px-qx + + uint256 dy; // Accumulates denominator from sy calculation + // sy=((qy-py)/(qx-px))(px-sx)-py + (sy, dy) = _projectiveSub(px, z1, sx, dx); // px-sx + (sy, dy) = _projectiveMul(sy, dy, lx, lz); // ((qy-py)/(qx-px))(px-sx) + (sy, dy) = _projectiveSub(sy, dy, py, z1); // ((qy-py)/(qx-px))(px-sx)-py + + if (dx != dy) { + // Cross-multiply to put everything over a common denominator + sx = mulmod(sx, dy, FIELD_SIZE); + sy = mulmod(sy, dx, FIELD_SIZE); + sz = mulmod(dx, dy, FIELD_SIZE); + } else { + // Already over a common denominator, use that for z ordinate + sz = dx; + } + } + return (sx, sy, sz); + } + + // p1+p2, as affine points on secp256k1. + // + // invZ must be the inverse of the z returned by _projectiveECAdd(p1, p2). + // It is computed off-chain to save gas. + // + // p1 and p2 must be distinct, because _projectiveECAdd doesn't handle + // point doubling. + function _affineECAdd( + uint256[2] memory p1, + uint256[2] memory p2, + uint256 invZ + ) internal pure returns (uint256[2] memory) { + uint256 x; + uint256 y; + uint256 z; + (x, y, z) = _projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); + // solhint-disable-next-line custom-errors + require(mulmod(z, invZ, FIELD_SIZE) == 1, "invZ must be inverse of z"); + // Clear the z ordinate of the projective representation by dividing through + // by it, to obtain the affine representation + return [mulmod(x, invZ, FIELD_SIZE), mulmod(y, invZ, FIELD_SIZE)]; + } + + // True iff address(c*p+s*g) == lcWitness, where g is generator. (With + // cryptographically high probability.) + function _verifyLinearCombinationWithGenerator( + uint256 c, + uint256[2] memory p, + uint256 s, + address lcWitness + ) internal pure returns (bool) { + // Rule out ecrecover failure modes which return address 0. + unchecked { + // solhint-disable-next-line custom-errors + require(lcWitness != address(0), "bad witness"); + uint8 v = (p[1] % 2 == 0) ? 27 : 28; // parity of y-ordinate of p + // Note this cannot wrap (X - Y % X), but we use unchecked to save + // gas. + bytes32 pseudoHash = bytes32(GROUP_ORDER - mulmod(p[0], s, GROUP_ORDER)); // -s*p[0] + bytes32 pseudoSignature = bytes32(mulmod(c, p[0], GROUP_ORDER)); // c*p[0] + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // The point corresponding to the address returned by + // ecrecover(-s*p[0],v,p[0],c*p[0]) is + // (p[0]⁻¹ mod GROUP_ORDER)*(c*p[0]-(-s)*p[0]*g)=c*p+s*g. + // See https://crypto.stackexchange.com/a/18106 + // https://bitcoin.stackexchange.com/questions/38351/ecdsa-v-r-s-what-is-v + address computed = ecrecover(pseudoHash, v, bytes32(p[0]), pseudoSignature); + return computed == lcWitness; + } + } + + // c*p1 + s*p2. Requires cp1Witness=c*p1 and sp2Witness=s*p2. Also + // requires cp1Witness != sp2Witness (which is fine for this application, + // since it is cryptographically impossible for them to be equal. In the + // (cryptographically impossible) case that a prover accidentally derives + // a proof with equal c*p1 and s*p2, they should retry with a different + // proof nonce.) Assumes that all points are on secp256k1 + // (which is checked in _verifyVRFProof below.) + function _linearCombination( + uint256 c, + uint256[2] memory p1, + uint256[2] memory cp1Witness, + uint256 s, + uint256[2] memory p2, + uint256[2] memory sp2Witness, + uint256 zInv + ) internal pure returns (uint256[2] memory) { + unchecked { + // Note we are relying on the wrap around here + // solhint-disable-next-line custom-errors + require((cp1Witness[0] % FIELD_SIZE) != (sp2Witness[0] % FIELD_SIZE), "points in sum must be distinct"); + // solhint-disable-next-line custom-errors + require(_ecmulVerify(p1, c, cp1Witness), "First mul check failed"); + // solhint-disable-next-line custom-errors + require(_ecmulVerify(p2, s, sp2Witness), "Second mul check failed"); + return _affineECAdd(cp1Witness, sp2Witness, zInv); + } + } + + // Domain-separation tag for the hash taken in _scalarFromCurvePoints. + // Corresponds to scalarFromCurveHashPrefix in vrf.go + uint256 internal constant SCALAR_FROM_CURVE_POINTS_HASH_PREFIX = 2; + + // Pseudo-random number from inputs. Matches vrf.go/_scalarFromCurvePoints, and + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + // The draft calls (in step 7, via the definition of string_to_int, in + // https://datatracker.ietf.org/doc/html/rfc8017#section-4.2 ) for taking the + // first hash without checking that it corresponds to a number less than the + // group order, which will lead to a slight bias in the sample. + // + // TODO(alx): We could save a bit of gas by following the standard here and + // using the compressed representation of the points, if we collated the y + // parities into a single bytes32. + // https://www.pivotaltracker.com/story/show/171120588 + function _scalarFromCurvePoints( + uint256[2] memory hash, + uint256[2] memory pk, + uint256[2] memory gamma, + address uWitness, + uint256[2] memory v + ) internal pure returns (uint256 s) { + return uint256(keccak256(abi.encodePacked(SCALAR_FROM_CURVE_POINTS_HASH_PREFIX, hash, pk, gamma, v, uWitness))); + } + + // True if (gamma, c, s) is a correctly constructed randomness proof from pk + // and seed. zInv must be the inverse of the third ordinate from + // _projectiveECAdd applied to cGammaWitness and sHashWitness. Corresponds to + // section 5.3 of the IETF draft. + // + // TODO(alx): Since I'm only using pk in the ecrecover call, I could only pass + // the x ordinate, and the parity of the y ordinate in the top bit of uWitness + // (which I could make a uint256 without using any extra space.) Would save + // about 2000 gas. https://www.pivotaltracker.com/story/show/170828567 + function _verifyVRFProof( + uint256[2] memory pk, + uint256[2] memory gamma, + uint256 c, + uint256 s, + uint256 seed, + address uWitness, + uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, + uint256 zInv + ) internal view { + unchecked { + // solhint-disable-next-line custom-errors + require(_isOnCurve(pk), "public key is not on curve"); + // solhint-disable-next-line custom-errors + require(_isOnCurve(gamma), "gamma is not on curve"); + // solhint-disable-next-line custom-errors + require(_isOnCurve(cGammaWitness), "cGammaWitness is not on curve"); + // solhint-disable-next-line custom-errors + require(_isOnCurve(sHashWitness), "sHashWitness is not on curve"); + // Step 5. of IETF draft section 5.3 (pk corresponds to 5.3's Y, and here + // we use the address of u instead of u itself. Also, here we add the + // terms instead of taking the difference, and in the proof construction in + // vrf.GenerateProof, we correspondingly take the difference instead of + // taking the sum as they do in step 7 of section 5.1.) + // solhint-disable-next-line custom-errors + require(_verifyLinearCombinationWithGenerator(c, pk, s, uWitness), "addr(c*pk+s*g)!=_uWitness"); + // Step 4. of IETF draft section 5.3 (pk corresponds to Y, seed to alpha_string) + uint256[2] memory hash = _hashToCurve(pk, seed); + // Step 6. of IETF draft section 5.3, but see note for step 5 about +/- terms + uint256[2] memory v = _linearCombination(c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); + // Steps 7. and 8. of IETF draft section 5.3 + uint256 derivedC = _scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + // solhint-disable-next-line custom-errors + require(c == derivedC, "invalid proof"); + } + } + + // Domain-separation tag for the hash used as the final VRF output. + // Corresponds to vrfRandomOutputHashPrefix in vrf.go + uint256 internal constant VRF_RANDOM_OUTPUT_HASH_PREFIX = 3; + + struct Proof { + uint256[2] pk; + uint256[2] gamma; + uint256 c; + uint256 s; + uint256 seed; + address uWitness; + uint256[2] cGammaWitness; + uint256[2] sHashWitness; + uint256 zInv; + } + + /* *************************************************************************** + * @notice Returns proof's output, if proof is valid. Otherwise reverts + + * @param proof vrf proof components + * @param seed seed used to generate the vrf output + * + * Throws if proof is invalid, otherwise: + * @return output i.e., the random output implied by the proof + * *************************************************************************** + */ + function _randomValueFromVRFProof(Proof memory proof, uint256 seed) internal view returns (uint256 output) { + _verifyVRFProof( + proof.pk, + proof.gamma, + proof.c, + proof.s, + seed, + proof.uWitness, + proof.cGammaWitness, + proof.sHashWitness, + proof.zInv + ); + output = uint256(keccak256(abi.encode(VRF_RANDOM_OUTPUT_HASH_PREFIX, proof.gamma))); + return output; + } +} diff --git a/contracts/src/v0.8/vrf/VRFConsumerBase.sol b/contracts/src/v0.8/vrf/VRFConsumerBase.sol new file mode 100644 index 00000000..d30bac7f --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFConsumerBase.sol @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol"; + +import {VRFRequestIDBase} from "./VRFRequestIDBase.sol"; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBase, and can + * @dev initialize VRFConsumerBase's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumer { + * @dev constructor(, address _vrfCoordinator, address _link) + * @dev VRFConsumerBase(_vrfCoordinator, _link) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash), and have told you the minimum PLI + * @dev price for VRF service. Make sure your contract has sufficient PLI, and + * @dev call requestRandomness(keyHash, fee, seed), where seed is the input you + * @dev want to generate randomness from. + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomness method. + * + * @dev The randomness argument to fulfillRandomness is the actual random value + * @dev generated from your seed. + * + * @dev The requestId argument is generated from the keyHash and the seed by + * @dev makeRequestId(keyHash, seed). If your contract could have concurrent + * @dev requests open, you can use the requestId to track which seed is + * @dev associated with which randomness. See VRFRequestIDBase.sol for more + * @dev details. (See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously.) + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. (Which is critical to making unpredictable randomness! See the + * @dev next section.) + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBase.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the ultimate input to the VRF is mixed with the block hash of the + * @dev block in which the request is made, user-provided seeds have no impact + * @dev on its economic security properties. They are only included for API + * @dev compatability with previous versions of this contract. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. + */ +abstract contract VRFConsumerBase is VRFRequestIDBase { + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBase expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomness the VRF output + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomness(bytes32 requestId, uint256 randomness) internal virtual; + + /** + * @dev In order to keep backwards compatibility we have kept the user + * seed field around. We remove the use of it because given that the blockhash + * enters later, it overrides whatever randomness the used seed provides. + * Given that it adds no security, and can easily lead to misunderstandings, + * we have removed it from usage and can now provide a simpler API. + */ + uint256 private constant USER_SEED_PLACEHOLDER = 0; + + /** + * @notice requestRandomness initiates a request for VRF output given _seed + * + * @dev The fulfillRandomness method receives the output, once it's provided + * @dev by the Oracle, and verified by the vrfCoordinator. + * + * @dev The _keyHash must already be registered with the VRFCoordinator, and + * @dev the _fee must exceed the fee specified during registration of the + * @dev _keyHash. + * + * @dev The _seed parameter is vestigial, and is kept only for API + * @dev compatibility with older versions. It can't *hurt* to mix in some of + * @dev your own randomness, here, but it's not necessary because the VRF + * @dev oracle will mix the hash of the block containing your request into the + * @dev VRF seed it ultimately uses. + * + * @param _keyHash ID of public key against which randomness is generated + * @param _fee The amount of PLI to send with the request + * + * @return requestId unique ID for this request + * + * @dev The returned requestId can be used to distinguish responses to + * @dev concurrent requests. It is passed as the first argument to + * @dev fulfillRandomness. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function requestRandomness(bytes32 _keyHash, uint256 _fee) internal returns (bytes32 requestId) { + PLI.transferAndCall(vrfCoordinator, _fee, abi.encode(_keyHash, USER_SEED_PLACEHOLDER)); + // This is the seed passed to VRFCoordinator. The oracle will mix this with + // the hash of the block containing this request to obtain the seed/input + // which is finally passed to the VRF cryptographic machinery. + uint256 vRFSeed = makeVRFInputSeed(_keyHash, USER_SEED_PLACEHOLDER, address(this), nonces[_keyHash]); + // nonces[_keyHash] must stay in sync with + // VRFCoordinator.nonces[_keyHash][this], which was incremented by the above + // successful PLI.transferAndCall (in VRFCoordinator.randomnessRequest). + // This provides protection against the user repeating their input seed, + // which would result in a predictable/duplicate output, if multiple such + // requests appeared in the same block. + nonces[_keyHash] = nonces[_keyHash] + 1; + return makeRequestId(_keyHash, vRFSeed); + } + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + LinkTokenInterface internal immutable PLI; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address private immutable vrfCoordinator; + + // Nonces for each VRF key from which randomness has been requested. + // + // Must stay in sync with VRFCoordinator[_keyHash][this] + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + mapping(bytes32 => uint256) /* keyHash */ /* nonce */ private nonces; + + /** + * @param _vrfCoordinator address of VRFCoordinator contract + * @param _link address of PLI token contract + * + * @dev https://docs.chain.link/docs/link-token-contracts + */ + constructor(address _vrfCoordinator, address _link) { + vrfCoordinator = _vrfCoordinator; + PLI = LinkTokenInterface(_link); + } + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomness(bytes32 requestId, uint256 randomness) external { + // solhint-disable-next-line custom-errors + require(msg.sender == vrfCoordinator, "Only VRFCoordinator can fulfill"); + fulfillRandomness(requestId, randomness); + } +} diff --git a/contracts/src/v0.8/vrf/VRFConsumerBaseV2.sol b/contracts/src/v0.8/vrf/VRFConsumerBaseV2.sol new file mode 100644 index 00000000..83c7f5fa --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFConsumerBaseV2.sol @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. It ensures 2 things: + * @dev 1. The fulfillment came from the VRFCoordinator + * @dev 2. The consumer contract implements fulfillRandomWords. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBase, and can + * @dev initialize VRFConsumerBase's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumer { + * @dev constructor(, address _vrfCoordinator, address _link) + * @dev VRFConsumerBase(_vrfCoordinator) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash). Create subscription, fund it + * @dev and your consumer contract as a consumer of it (see VRFCoordinatorInterface + * @dev subscription management functions). + * @dev Call requestRandomWords(keyHash, subId, minimumRequestConfirmations, + * @dev callbackGasLimit, numWords), + * @dev see (VRFCoordinatorInterface for a description of the arguments). + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomWords method. + * + * @dev The randomness argument to fulfillRandomWords is a set of random words + * @dev generated from your requestId and the blockHash of the request. + * + * @dev If your contract could have concurrent requests open, you can use the + * @dev requestId returned from requestRandomWords to track which response is associated + * @dev with which randomness request. + * @dev See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously. + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBase.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. It is for this reason that + * @dev that you can signal to an oracle you'd like them to wait longer before + * @dev responding to the request (however this is not enforced in the contract + * @dev and so remains effective only in the case of unmodified oracle software). + */ +abstract contract VRFConsumerBaseV2 { + error OnlyCoordinatorCanFulfill(address have, address want); + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + address private immutable vrfCoordinator; + + /** + * @param _vrfCoordinator address of VRFCoordinator contract + */ + constructor(address _vrfCoordinator) { + vrfCoordinator = _vrfCoordinator; + } + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBaseV2 expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomWords the VRF output expanded to the requested number of words + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal virtual; + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomWords(uint256 requestId, uint256[] memory randomWords) external { + if (msg.sender != vrfCoordinator) { + revert OnlyCoordinatorCanFulfill(msg.sender, vrfCoordinator); + } + fulfillRandomWords(requestId, randomWords); + } +} diff --git a/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol new file mode 100644 index 00000000..fba5a2ff --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol"; +import {BlockhashStoreInterface} from "./interfaces/BlockhashStoreInterface.sol"; +import {AggregatorV3Interface} from "../shared/interfaces/AggregatorV3Interface.sol"; +import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {IERC677Receiver} from "../shared/interfaces/IERC677Receiver.sol"; +import {VRF} from "./VRF.sol"; +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {VRFConsumerBaseV2} from "./VRFConsumerBaseV2.sol"; +import {ChainSpecificUtil} from "../ChainSpecificUtil.sol"; + +contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCoordinatorV2Interface, IERC677Receiver { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + LinkTokenInterface public immutable PLI; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + AggregatorV3Interface public immutable PLI_ETH_FEED; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + BlockhashStoreInterface public immutable BLOCKHASH_STORE; + + // We need to maintain a list of consuming addresses. + // This bound ensures we are able to loop over them as needed. + // Should a user require more consumers, they can use multiple subscriptions. + uint16 public constant MAX_CONSUMERS = 100; + error TooManyConsumers(); + error InsufficientBalance(); + error InvalidConsumer(uint64 subId, address consumer); + error InvalidSubscription(); + error OnlyCallableFromLink(); + error InvalidCalldata(); + error MustBeSubOwner(address owner); + error PendingRequestExists(); + error MustBeRequestedOwner(address proposedOwner); + error BalanceInvariantViolated(uint256 internalBalance, uint256 externalBalance); // Should never happen + event FundsRecovered(address to, uint256 amount); + // We use the subscription struct (1 word) + // at fulfillment time. + struct Subscription { + // There are only 1e9*1e18 = 1e27 juels in existence, so the balance can fit in uint96 (2^96 ~ 7e28) + uint96 balance; // Common link balance used for all consumer requests. + uint64 reqCount; // For fee tiers + } + // We use the config for the mgmt APIs + struct SubscriptionConfig { + address owner; // Owner can fund/withdraw/cancel the sub. + address requestedOwner; // For safely transferring sub ownership. + // Maintains the list of keys in s_consumers. + // We do this for 2 reasons: + // 1. To be able to clean up all keys from s_consumers when canceling a subscription. + // 2. To be able to return the list of all consumers in getSubscription. + // Note that we need the s_consumers map to be able to directly check if a + // consumer is valid without reading all the consumers from storage. + address[] consumers; + } + // Note a nonce of 0 indicates an the consumer is not assigned to that subscription. + mapping(address => mapping(uint64 => uint64)) /* consumer */ /* subId */ /* nonce */ private s_consumers; + mapping(uint64 => SubscriptionConfig) /* subId */ /* subscriptionConfig */ private s_subscriptionConfigs; + mapping(uint64 => Subscription) /* subId */ /* subscription */ private s_subscriptions; + // We make the sub count public so that its possible to + // get all the current subscriptions via getSubscription. + uint64 private s_currentSubId; + // s_totalBalance tracks the total link sent to/from + // this contract through onTokenTransfer, cancelSubscription and oracleWithdraw. + // A discrepancy with this contract's link balance indicates someone + // sent tokens using transfer and so we may need to use recoverFunds. + uint96 private s_totalBalance; + event SubscriptionCreated(uint64 indexed subId, address owner); + event SubscriptionFunded(uint64 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionConsumerAdded(uint64 indexed subId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subId, address consumer); + event SubscriptionCanceled(uint64 indexed subId, address to, uint256 amount); + event SubscriptionOwnerTransferRequested(uint64 indexed subId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subId, address from, address to); + + // Set this maximum to 200 to give us a 56 block window to fulfill + // the request before requiring the block hash feeder. + uint16 public constant MAX_REQUEST_CONFIRMATIONS = 200; + uint32 public constant MAX_NUM_WORDS = 500; + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + error InvalidRequestConfirmations(uint16 have, uint16 min, uint16 max); + error GasLimitTooBig(uint32 have, uint32 want); + error NumWordsTooBig(uint32 have, uint32 want); + error ProvingKeyAlreadyRegistered(bytes32 keyHash); + error NoSuchProvingKey(bytes32 keyHash); + error InvalidLinkWeiPrice(int256 linkWei); + error InsufficientGasForConsumer(uint256 have, uint256 want); + error NoCorrespondingRequest(); + error IncorrectCommitment(); + error BlockhashNotInStore(uint256 blockNum); + error PaymentTooLarge(); + error Reentrant(); + struct RequestCommitment { + uint64 blockNum; + uint64 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + } + mapping(bytes32 => address) /* keyHash */ /* oracle */ private s_provingKeys; + bytes32[] private s_provingKeyHashes; + mapping(address => uint96) /* oracle */ /* PLI balance */ private s_withdrawableTokens; + mapping(uint256 => bytes32) /* requestID */ /* commitment */ private s_requestCommitments; + event ProvingKeyRegistered(bytes32 keyHash, address indexed oracle); + event ProvingKeyDeregistered(bytes32 keyHash, address indexed oracle); + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint64 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address indexed sender + ); + event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bool success); + + struct Config { + uint16 minimumRequestConfirmations; + uint32 maxGasLimit; + // Reentrancy protection. + bool reentrancyLock; + // stalenessSeconds is how long before we consider the feed price to be stale + // and fallback to fallbackWeiPerUnitLink. + uint32 stalenessSeconds; + // Gas to cover oracle payment after we calculate the payment. + // We make it configurable in case those operations are repriced. + uint32 gasAfterPaymentCalculation; + } + int256 private s_fallbackWeiPerUnitLink; + Config private s_config; + FeeConfig private s_feeConfig; + struct FeeConfig { + // Flat fee charged per fulfillment in millionths of link + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeLinkPPMTier1; + uint32 fulfillmentFlatFeeLinkPPMTier2; + uint32 fulfillmentFlatFeeLinkPPMTier3; + uint32 fulfillmentFlatFeeLinkPPMTier4; + uint32 fulfillmentFlatFeeLinkPPMTier5; + uint24 reqsForTier2; + uint24 reqsForTier3; + uint24 reqsForTier4; + uint24 reqsForTier5; + } + event ConfigSet( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig feeConfig + ); + + constructor(address link, address blockhashStore, address linkEthFeed) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(link); + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + BLOCKHASH_STORE = BlockhashStoreInterface(blockhashStore); + } + + /** + * @notice Registers a proving key to an oracle. + * @param oracle address of the oracle + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function registerProvingKey(address oracle, uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + if (s_provingKeys[kh] != address(0)) { + revert ProvingKeyAlreadyRegistered(kh); + } + s_provingKeys[kh] = oracle; + s_provingKeyHashes.push(kh); + emit ProvingKeyRegistered(kh, oracle); + } + + /** + * @notice Deregisters a proving key to an oracle. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function deregisterProvingKey(uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + address oracle = s_provingKeys[kh]; + if (oracle == address(0)) { + revert NoSuchProvingKey(kh); + } + delete s_provingKeys[kh]; + for (uint256 i = 0; i < s_provingKeyHashes.length; i++) { + if (s_provingKeyHashes[i] == kh) { + bytes32 last = s_provingKeyHashes[s_provingKeyHashes.length - 1]; + // Copy last element and overwrite kh to be deleted with it + s_provingKeyHashes[i] = last; + s_provingKeyHashes.pop(); + } + } + emit ProvingKeyDeregistered(kh, oracle); + } + + /** + * @notice Returns the proving key hash key associated with this public key + * @param publicKey the key to return the hash of + */ + function hashOfKey(uint256[2] memory publicKey) public pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the eth/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback eth/link price in the case of a stale feed + * @param feeConfig fee tier configuration + */ + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig memory feeConfig + ) external onlyOwner { + if (minimumRequestConfirmations > MAX_REQUEST_CONFIRMATIONS) { + revert InvalidRequestConfirmations( + minimumRequestConfirmations, + minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + if (fallbackWeiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(fallbackWeiPerUnitLink); + } + s_config = Config({ + minimumRequestConfirmations: minimumRequestConfirmations, + maxGasLimit: maxGasLimit, + stalenessSeconds: stalenessSeconds, + gasAfterPaymentCalculation: gasAfterPaymentCalculation, + reentrancyLock: false + }); + s_feeConfig = feeConfig; + s_fallbackWeiPerUnitLink = fallbackWeiPerUnitLink; + emit ConfigSet( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + s_feeConfig + ); + } + + function getConfig() + external + view + returns ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ) + { + return ( + s_config.minimumRequestConfirmations, + s_config.maxGasLimit, + s_config.stalenessSeconds, + s_config.gasAfterPaymentCalculation + ); + } + + function getFeeConfig() + external + view + returns ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ) + { + return ( + s_feeConfig.fulfillmentFlatFeeLinkPPMTier1, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier2, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier3, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier4, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier5, + s_feeConfig.reqsForTier2, + s_feeConfig.reqsForTier3, + s_feeConfig.reqsForTier4, + s_feeConfig.reqsForTier5 + ); + } + + function getTotalBalance() external view returns (uint256) { + return s_totalBalance; + } + + function getFallbackWeiPerUnitLink() external view returns (int256) { + return s_fallbackWeiPerUnitLink; + } + + /** + * @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + * @param subId subscription id + * @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + */ + function ownerCancelSubscription(uint64 subId) external onlyOwner { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + _cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); + } + + /** + * @notice Recover link sent with transfer instead of transferAndCall. + * @param to address to send link to + */ + function recoverFunds(address to) external onlyOwner { + uint256 externalBalance = PLI.balanceOf(address(this)); + uint256 internalBalance = uint256(s_totalBalance); + if (internalBalance > externalBalance) { + revert BalanceInvariantViolated(internalBalance, externalBalance); + } + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + PLI.transfer(to, amount); + emit FundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function getRequestConfig() external view override returns (uint16, uint32, bytes32[] memory) { + return (s_config.minimumRequestConfirmations, s_config.maxGasLimit, s_provingKeyHashes); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function requestRandomWords( + bytes32 keyHash, + uint64 subId, + uint16 requestConfirmations, + uint32 callbackGasLimit, + uint32 numWords + ) external override nonReentrant returns (uint256) { + // Input validation using the subscription storage. + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // Its important to ensure that the consumer is in fact who they say they + // are, otherwise they could use someone else's subscription balance. + // A nonce of 0 indicates consumer is not allocated to the sub. + uint64 currentNonce = s_consumers[msg.sender][subId]; + if (currentNonce == 0) { + revert InvalidConsumer(subId, msg.sender); + } + // Input validation using the config storage word. + if ( + requestConfirmations < s_config.minimumRequestConfirmations || requestConfirmations > MAX_REQUEST_CONFIRMATIONS + ) { + revert InvalidRequestConfirmations( + requestConfirmations, + s_config.minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + // No lower bound on the requested gas limit. A user could request 0 + // and they would simply be billed for the proof verification and wouldn't be + // able to do anything with the random value. + if (callbackGasLimit > s_config.maxGasLimit) { + revert GasLimitTooBig(callbackGasLimit, s_config.maxGasLimit); + } + if (numWords > MAX_NUM_WORDS) { + revert NumWordsTooBig(numWords, MAX_NUM_WORDS); + } + // Note we do not check whether the keyHash is valid to save gas. + // The consequence for users is that they can send requests + // for invalid keyHashes which will simply not be fulfilled. + uint64 nonce = currentNonce + 1; + (uint256 requestId, uint256 preSeed) = _computeRequestId(keyHash, msg.sender, subId, nonce); + + s_requestCommitments[requestId] = keccak256( + abi.encode(requestId, ChainSpecificUtil._getBlockNumber(), subId, callbackGasLimit, numWords, msg.sender) + ); + emit RandomWordsRequested( + keyHash, + requestId, + preSeed, + subId, + requestConfirmations, + callbackGasLimit, + numWords, + msg.sender + ); + s_consumers[msg.sender][subId] = nonce; + + return requestId; + } + + /** + * @notice Get request commitment + * @param requestId id of request + * @dev used to determine if a request is fulfilled or not + */ + function getCommitment(uint256 requestId) external view returns (bytes32) { + return s_requestCommitments[requestId]; + } + + function _computeRequestId( + bytes32 keyHash, + address sender, + uint64 subId, + uint64 nonce + ) private pure returns (uint256, uint256) { + uint256 preSeed = uint256(keccak256(abi.encode(keyHash, sender, subId, nonce))); + return (uint256(keccak256(abi.encode(keyHash, preSeed))), preSeed); + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + function _getRandomnessFromProof( + Proof memory proof, + RequestCommitment memory rc + ) private view returns (bytes32 keyHash, uint256 requestId, uint256 randomness) { + keyHash = hashOfKey(proof.pk); + // Only registered proving keys are permitted. + address oracle = s_provingKeys[keyHash]; + if (oracle == address(0)) { + revert NoSuchProvingKey(keyHash); + } + requestId = uint256(keccak256(abi.encode(keyHash, proof.seed))); + bytes32 commitment = s_requestCommitments[requestId]; + if (commitment == 0) { + revert NoCorrespondingRequest(); + } + if ( + commitment != keccak256(abi.encode(requestId, rc.blockNum, rc.subId, rc.callbackGasLimit, rc.numWords, rc.sender)) + ) { + revert IncorrectCommitment(); + } + + bytes32 blockHash = ChainSpecificUtil._getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + blockHash = BLOCKHASH_STORE.getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + revert BlockhashNotInStore(rc.blockNum); + } + } + + // The seed actually used by the VRF machinery, mixing in the blockhash + uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); + randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + return (keyHash, requestId, randomness); + } + + /* + * @notice Compute fee based on the request count + * @param reqCount number of requests + * @return feePPM fee in PLI PPM + */ + function getFeeTier(uint64 reqCount) public view returns (uint32) { + FeeConfig memory fc = s_feeConfig; + if (0 <= reqCount && reqCount <= fc.reqsForTier2) { + return fc.fulfillmentFlatFeeLinkPPMTier1; + } + if (fc.reqsForTier2 < reqCount && reqCount <= fc.reqsForTier3) { + return fc.fulfillmentFlatFeeLinkPPMTier2; + } + if (fc.reqsForTier3 < reqCount && reqCount <= fc.reqsForTier4) { + return fc.fulfillmentFlatFeeLinkPPMTier3; + } + if (fc.reqsForTier4 < reqCount && reqCount <= fc.reqsForTier5) { + return fc.fulfillmentFlatFeeLinkPPMTier4; + } + return fc.fulfillmentFlatFeeLinkPPMTier5; + } + + /* + * @notice Fulfill a randomness request + * @param proof contains the proof and randomness + * @param rc request commitment pre-image, committed to at request time + * @return payment amount billed to the subscription + * @dev simulated offchain to determine if sufficient balance is present to fulfill the request + */ + function fulfillRandomWords(Proof memory proof, RequestCommitment memory rc) external nonReentrant returns (uint96) { + uint256 startGas = gasleft(); + (bytes32 keyHash, uint256 requestId, uint256 randomness) = _getRandomnessFromProof(proof, rc); + + uint256[] memory randomWords = new uint256[](rc.numWords); + for (uint256 i = 0; i < rc.numWords; i++) { + randomWords[i] = uint256(keccak256(abi.encode(randomness, i))); + } + + delete s_requestCommitments[requestId]; + VRFConsumerBaseV2 v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomWords.selector, requestId, randomWords); + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid oracle payment. + // Do not allow any non-view/non-pure coordinator functions to be called + // during the consumers callback code via reentrancyLock. + // Note that _callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + s_config.reentrancyLock = true; + bool success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + s_config.reentrancyLock = false; + + // Increment the req count for fee tier selection. + uint64 reqCount = s_subscriptions[rc.subId].reqCount; + s_subscriptions[rc.subId].reqCount += 1; + + // We want to charge users exactly for how much gas they use in their callback. + // The gasAfterPaymentCalculation is meant to cover these additional operations where we + // decrement the subscription balance and increment the oracles withdrawable balance. + // We also add the flat link fee to the payment amount. + // Its specified in millionths of link, if s_config.fulfillmentFlatFeeLinkPPM = 1 + // 1 pli / 1e6 = 1e18 juels / 1e6 = 1e12 juels. + uint96 payment = _calculatePaymentAmount( + startGas, + s_config.gasAfterPaymentCalculation, + getFeeTier(reqCount), + tx.gasprice + ); + if (s_subscriptions[rc.subId].balance < payment) { + revert InsufficientBalance(); + } + s_subscriptions[rc.subId].balance -= payment; + s_withdrawableTokens[s_provingKeys[keyHash]] += payment; + // Include payment in the event for tracking costs. + emit RandomWordsFulfilled(requestId, randomness, payment, success); + return payment; + } + + // Get the amount of gas used for fulfillment + function _calculatePaymentAmount( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeeLinkPPM, + uint256 weiPerUnitGas + ) internal view returns (uint96) { + int256 weiPerUnitLink; + weiPerUnitLink = _getFeedData(); + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + // Will return non-zero on chains that have this enabled + uint256 l1CostWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + // (1e18 juels/link) ((wei/gas * gas) + l1wei) / (wei/link) = juels + uint256 paymentNoFee = (1e18 * (weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft()) + l1CostWei)) / + uint256(weiPerUnitLink); + uint256 fee = 1e12 * uint256(fulfillmentFlatFeeLinkPPM); + if (paymentNoFee > (1e27 - fee)) { + revert PaymentTooLarge(); // Payment + fee cannot be more than all of the link in existence. + } + return uint96(paymentNoFee + fee); + } + + function _getFeedData() private view returns (int256) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } + + /* + * @notice Oracle withdraw PLI earned through fulfilling requests + * @param recipient where to send the funds + * @param amount amount to withdraw + */ + function oracleWithdraw(address recipient, uint96 amount) external nonReentrant { + if (s_withdrawableTokens[msg.sender] < amount) { + revert InsufficientBalance(); + } + s_withdrawableTokens[msg.sender] -= amount; + s_totalBalance -= amount; + if (!PLI.transfer(recipient, amount)) { + revert InsufficientBalance(); + } + } + + function onTokenTransfer(address /* sender */, uint256 amount, bytes calldata data) external override nonReentrant { + if (msg.sender != address(PLI)) { + revert OnlyCallableFromLink(); + } + if (data.length != 32) { + revert InvalidCalldata(); + } + uint64 subId = abi.decode(data, (uint64)); + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the msg.sender is the subscription owner, + // anyone can fund a subscription. + uint256 oldBalance = s_subscriptions[subId].balance; + s_subscriptions[subId].balance += uint96(amount); + s_totalBalance += uint96(amount); + emit SubscriptionFunded(subId, oldBalance, oldBalance + amount); + } + + function getCurrentSubId() external view returns (uint64) { + return s_currentSubId; + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function getSubscription( + uint64 subId + ) external view override returns (uint96 balance, uint64 reqCount, address owner, address[] memory consumers) { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + return ( + s_subscriptions[subId].balance, + s_subscriptions[subId].reqCount, + s_subscriptionConfigs[subId].owner, + s_subscriptionConfigs[subId].consumers + ); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function createSubscription() external override nonReentrant returns (uint64) { + s_currentSubId++; + uint64 currentSubId = s_currentSubId; + address[] memory consumers = new address[](0); + s_subscriptions[currentSubId] = Subscription({balance: 0, reqCount: 0}); + s_subscriptionConfigs[currentSubId] = SubscriptionConfig({ + owner: msg.sender, + requestedOwner: address(0), + consumers: consumers + }); + + emit SubscriptionCreated(currentSubId, msg.sender); + return currentSubId; + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function requestSubscriptionOwnerTransfer( + uint64 subId, + address newOwner + ) external override onlySubOwner(subId) nonReentrant { + // Proposing to address(0) would never be claimable so don't need to check. + if (s_subscriptionConfigs[subId].requestedOwner != newOwner) { + s_subscriptionConfigs[subId].requestedOwner = newOwner; + emit SubscriptionOwnerTransferRequested(subId, msg.sender, newOwner); + } + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function acceptSubscriptionOwnerTransfer(uint64 subId) external override nonReentrant { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + if (s_subscriptionConfigs[subId].requestedOwner != msg.sender) { + revert MustBeRequestedOwner(s_subscriptionConfigs[subId].requestedOwner); + } + address oldOwner = s_subscriptionConfigs[subId].owner; + s_subscriptionConfigs[subId].owner = msg.sender; + s_subscriptionConfigs[subId].requestedOwner = address(0); + emit SubscriptionOwnerTransferred(subId, oldOwner, msg.sender); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function removeConsumer(uint64 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + if (s_consumers[consumer][subId] == 0) { + revert InvalidConsumer(subId, consumer); + } + // Note bounded by MAX_CONSUMERS + address[] memory consumers = s_subscriptionConfigs[subId].consumers; + uint256 lastConsumerIndex = consumers.length - 1; + for (uint256 i = 0; i < consumers.length; i++) { + if (consumers[i] == consumer) { + address last = consumers[lastConsumerIndex]; + // Storage write to preserve last element + s_subscriptionConfigs[subId].consumers[i] = last; + // Storage remove last element + s_subscriptionConfigs[subId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subId]; + emit SubscriptionConsumerRemoved(subId, consumer); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function addConsumer(uint64 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + // Already maxed, cannot add any more consumers. + if (s_subscriptionConfigs[subId].consumers.length == MAX_CONSUMERS) { + revert TooManyConsumers(); + } + if (s_consumers[consumer][subId] != 0) { + // Idempotence - do nothing if already added. + // Ensures uniqueness in s_subscriptions[subId].consumers. + return; + } + // Initialize the nonce to 1, indicating the consumer is allocated. + s_consumers[consumer][subId] = 1; + s_subscriptionConfigs[subId].consumers.push(consumer); + + emit SubscriptionConsumerAdded(subId, consumer); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function cancelSubscription(uint64 subId, address to) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + _cancelSubscriptionHelper(subId, to); + } + + function _cancelSubscriptionHelper(uint64 subId, address to) private nonReentrant { + SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; + Subscription memory sub = s_subscriptions[subId]; + uint96 balance = sub.balance; + // Note bounded by MAX_CONSUMERS; + // If no consumers, does nothing. + for (uint256 i = 0; i < subConfig.consumers.length; i++) { + delete s_consumers[subConfig.consumers[i]][subId]; + } + delete s_subscriptionConfigs[subId]; + delete s_subscriptions[subId]; + s_totalBalance -= balance; + if (!PLI.transfer(to, uint256(balance))) { + revert InsufficientBalance(); + } + emit SubscriptionCanceled(subId, to, balance); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + * @dev Looping is bounded to MAX_CONSUMERS*(number of keyhashes). + * @dev Used to disable subscription canceling while outstanding request are present. + */ + function pendingRequestExists(uint64 subId) public view override returns (bool) { + SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; + for (uint256 i = 0; i < subConfig.consumers.length; i++) { + for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { + (uint256 reqId, ) = _computeRequestId( + s_provingKeyHashes[j], + subConfig.consumers[i], + subId, + s_consumers[subConfig.consumers[i]][subId] + ); + if (s_requestCommitments[reqId] != 0) { + return true; + } + } + } + return false; + } + + modifier onlySubOwner(uint64 subId) { + address owner = s_subscriptionConfigs[subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubOwner(owner); + } + _; + } + + modifier nonReentrant() { + if (s_config.reentrancyLock) { + revert Reentrant(); + } + _; + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "VRFCoordinatorV2 1.0.0"; + } +} diff --git a/contracts/src/v0.8/vrf/VRFOwner.sol b/contracts/src/v0.8/vrf/VRFOwner.sol new file mode 100644 index 00000000..0f0c50a9 --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFOwner.sol @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity ^0.8.6; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {AuthorizedReceiver} from "./AuthorizedReceiver.sol"; +import {VRFTypes} from "./VRFTypes.sol"; + +// Taken from VRFCoordinatorV2.sol +// Must be abi-compatible with what's there +struct FeeConfig { + // Flat fee charged per fulfillment in millionths of link + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeLinkPPMTier1; + uint32 fulfillmentFlatFeeLinkPPMTier2; + uint32 fulfillmentFlatFeeLinkPPMTier3; + uint32 fulfillmentFlatFeeLinkPPMTier4; + uint32 fulfillmentFlatFeeLinkPPMTier5; + uint24 reqsForTier2; + uint24 reqsForTier3; + uint24 reqsForTier4; + uint24 reqsForTier5; +} + +// Taken from VRFCoordinatorV2.sol +// Must be abi-compatible with what's there +struct Config { + uint16 minimumRequestConfirmations; + uint32 maxGasLimit; + // stalenessSeconds is how long before we consider the feed price to be stale + // and fallback to fallbackWeiPerUnitLink. + uint32 stalenessSeconds; + // Gas to cover oracle payment after we calculate the payment. + // We make it configurable in case those operations are repriced. + uint32 gasAfterPaymentCalculation; + int256 fallbackWeiPerUnitLink; + FeeConfig feeConfig; +} + +/// @dev IVRFCoordinatorV2 is the set of functions on the VRF coordinator V2 +/// @dev that are used in the VRF Owner contract below. +interface IVRFCoordinatorV2 { + function acceptOwnership() external; + + function transferOwnership(address to) external; + + function registerProvingKey(address oracle, uint256[2] calldata publicProvingKey) external; + + function deregisterProvingKey(uint256[2] calldata publicProvingKey) external; + + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig memory feeConfig + ) external; + + function getConfig() + external + view + returns ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ); + + function getFeeConfig() + external + view + returns ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ); + + function getFallbackWeiPerUnitLink() external view returns (int256); + + function ownerCancelSubscription(uint64 subId) external; + + function recoverFunds(address to) external; + + function hashOfKey(uint256[2] memory publicKey) external pure returns (bytes32); + + function fulfillRandomWords( + VRFTypes.Proof memory proof, + VRFTypes.RequestCommitment memory rc + ) external returns (uint96); +} + +/** + * @notice VRFOwner is a contract that acts as the owner of the VRF + * @notice coordinator, with some useful utilities in the event extraordinary + * @notice things happen on-chain (i.e ETH/PLI price wildly fluctuates, and + * @notice a VRF fulfillment reverts on-chain). + */ +contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { + int256 private constant MAX_INT256 = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; + + IVRFCoordinatorV2 internal s_vrfCoordinator; + + event RandomWordsForced(uint256 indexed requestId, uint64 indexed subId, address indexed sender); + + constructor(address _vrfCoordinator) ConfirmedOwner(msg.sender) { + // solhint-disable-next-line custom-errors + require(_vrfCoordinator != address(0), "vrf coordinator address must be non-zero"); + s_vrfCoordinator = IVRFCoordinatorV2(_vrfCoordinator); + } + + /** + * @notice Accepts ownership of the VRF coordinator if transferred to us. + */ + function acceptVRFOwnership() external onlyOwner { + s_vrfCoordinator.acceptOwnership(); + } + + /** + * @notice Transfers ownership of the VRF coordinator to the specified address. + * @param to the address to transfer ownership of the VRF Coordinator to. + */ + function transferVRFOwnership(address to) external onlyOwner { + s_vrfCoordinator.transferOwnership(to); + } + + /** + * @notice Returns the address of the VRF coordinator reference in this contract. + * @return The address of the VRF coordinator reference in this contract. + */ + function getVRFCoordinator() public view returns (address) { + return address(s_vrfCoordinator); + } + + /** + * @notice Registers a proving key to an oracle. + * @param oracle address of the oracle + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function registerProvingKey(address oracle, uint256[2] calldata publicProvingKey) external onlyOwner { + s_vrfCoordinator.registerProvingKey(oracle, publicProvingKey); + } + + /** + * @notice Deregisters a proving key to an oracle. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function deregisterProvingKey(uint256[2] calldata publicProvingKey) external onlyOwner { + s_vrfCoordinator.deregisterProvingKey(publicProvingKey); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the eth/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback eth/link price in the case of a stale feed + * @param feeConfig fee tier configuration + */ + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig memory feeConfig + ) public onlyOwner { + s_vrfCoordinator.setConfig( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + feeConfig + ); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator - only to be used from within fulfillRandomWords. + * @dev The reason plain setConfig cannot be used is that it is marked as onlyOwner. Since fulfillRandomWords + * @dev is gated by authorized senders, and the authorized senders are not necessarily owners, the call will + * @dev always fail if the caller of fulfillRandomWords is not the owner, which is not what we want. + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the eth/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback eth/link price in the case of a stale feed + * @param feeConfig fee tier configuration + */ + function _setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig memory feeConfig + ) private { + s_vrfCoordinator.setConfig( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + feeConfig + ); + } + + /** + * @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + * @param subId subscription id + * @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + */ + function ownerCancelSubscription(uint64 subId) external onlyOwner { + s_vrfCoordinator.ownerCancelSubscription(subId); + } + + /** + * @notice Recover link sent with transfer instead of transferAndCall. + * @param to address to send link to + */ + function recoverFunds(address to) external onlyOwner { + s_vrfCoordinator.recoverFunds(to); + } + + /** + * @notice Get all relevant configs from the VRF coordinator. + * @dev This is done in a separate function to avoid the "stack too deep" issue + * @dev when too many local variables are in the same scope. + * @return Config struct containing all relevant configs from the VRF coordinator. + */ + function _getConfigs() private view returns (Config memory) { + ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ) = s_vrfCoordinator.getConfig(); + ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ) = s_vrfCoordinator.getFeeConfig(); + int256 fallbackWeiPerUnitLink = s_vrfCoordinator.getFallbackWeiPerUnitLink(); + return + Config({ + minimumRequestConfirmations: minimumRequestConfirmations, + maxGasLimit: maxGasLimit, + stalenessSeconds: stalenessSeconds, + gasAfterPaymentCalculation: gasAfterPaymentCalculation, + fallbackWeiPerUnitLink: fallbackWeiPerUnitLink, + feeConfig: FeeConfig({ + fulfillmentFlatFeeLinkPPMTier1: fulfillmentFlatFeeLinkPPMTier1, + fulfillmentFlatFeeLinkPPMTier2: fulfillmentFlatFeeLinkPPMTier2, + fulfillmentFlatFeeLinkPPMTier3: fulfillmentFlatFeeLinkPPMTier3, + fulfillmentFlatFeeLinkPPMTier4: fulfillmentFlatFeeLinkPPMTier4, + fulfillmentFlatFeeLinkPPMTier5: fulfillmentFlatFeeLinkPPMTier5, + reqsForTier2: reqsForTier2, + reqsForTier3: reqsForTier3, + reqsForTier4: reqsForTier4, + reqsForTier5: reqsForTier5 + }) + }); + } + + /** + * @notice Fulfill a randomness request + * @param proof contains the proof and randomness + * @param rc request commitment pre-image, committed to at request time + */ + function fulfillRandomWords( + VRFTypes.Proof memory proof, + VRFTypes.RequestCommitment memory rc + ) external validateAuthorizedSender { + uint256 requestId = _requestIdFromProof(proof.pk, proof.seed); + + // Get current configs to restore them to original values after + // calling _setConfig. + Config memory cfg = _getConfigs(); + + // call _setConfig with the appropriate params in order to fulfill + // an accidentally-underfunded request. + _setConfig( + cfg.minimumRequestConfirmations, + cfg.maxGasLimit, + 1, // stalenessSeconds + 0, // gasAfterPaymentCalculation + MAX_INT256, // fallbackWeiPerUnitLink + FeeConfig({ + fulfillmentFlatFeeLinkPPMTier1: 0, + fulfillmentFlatFeeLinkPPMTier2: 0, + fulfillmentFlatFeeLinkPPMTier3: 0, + fulfillmentFlatFeeLinkPPMTier4: 0, + fulfillmentFlatFeeLinkPPMTier5: 0, + reqsForTier2: 0, + reqsForTier3: 0, + reqsForTier4: 0, + reqsForTier5: 0 + }) + ); + + s_vrfCoordinator.fulfillRandomWords(proof, rc); + + // reset configuration back to old values. + _setConfig( + cfg.minimumRequestConfirmations, + cfg.maxGasLimit, + cfg.stalenessSeconds, + cfg.gasAfterPaymentCalculation, + cfg.fallbackWeiPerUnitLink, + cfg.feeConfig + ); + + emit RandomWordsForced(requestId, rc.subId, rc.sender); + } + + /** + * @notice Concrete implementation of AuthorizedReceiver + * @return bool of whether sender is authorized + */ + function _canSetAuthorizedSenders() internal view override returns (bool) { + return owner() == msg.sender; + } + + /** + * @notice Returns the request for corresponding to the given public key and proof seed. + * @param publicKey the VRF public key associated with the proof + * @param proofSeed the proof seed + * @dev Refer to VRFCoordinatorV2.getRandomnessFromProof for original implementation. + */ + function _requestIdFromProof(uint256[2] memory publicKey, uint256 proofSeed) private view returns (uint256) { + bytes32 keyHash = s_vrfCoordinator.hashOfKey(publicKey); + uint256 requestId = uint256(keccak256(abi.encode(keyHash, proofSeed))); + return requestId; + } +} diff --git a/contracts/src/v0.8/vrf/VRFRequestIDBase.sol b/contracts/src/v0.8/vrf/VRFRequestIDBase.sol new file mode 100644 index 00000000..c068201e --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFRequestIDBase.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract VRFRequestIDBase { + /** + * @notice returns the seed which is actually input to the VRF coordinator + * + * @dev To prevent repetition of VRF output due to repetition of the + * @dev user-supplied seed, that seed is combined in a hash with the + * @dev user-specific nonce, and the address of the consuming contract. The + * @dev risk of repetition is mostly mitigated by inclusion of a blockhash in + * @dev the final seed, but the nonce does protect against repetition in + * @dev requests which are included in a single block. + * + * @param _userSeed VRF seed input provided by user + * @param _requester Address of the requesting contract + * @param _nonce User-specific nonce at the time of the request + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function makeVRFInputSeed( + bytes32 _keyHash, + uint256 _userSeed, + address _requester, + uint256 _nonce + ) internal pure returns (uint256) { + return uint256(keccak256(abi.encode(_keyHash, _userSeed, _requester, _nonce))); + } + + /** + * @notice Returns the id for this request + * @param _keyHash The serviceAgreement ID to be used for this request + * @param _vRFInputSeed The seed to be passed directly to the VRF + * @return The id for this request + * + * @dev Note that _vRFInputSeed is not the seed passed by the consuming + * @dev contract, but the one generated by makeVRFInputSeed + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function makeRequestId(bytes32 _keyHash, uint256 _vRFInputSeed) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(_keyHash, _vRFInputSeed)); + } +} diff --git a/contracts/src/v0.8/vrf/VRFTypes.sol b/contracts/src/v0.8/vrf/VRFTypes.sol new file mode 100644 index 00000000..be26051f --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFTypes.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +/** + * @title VRFTypes + * @notice The VRFTypes library is a collection of types that is required to fulfill VRF requests + * on-chain. They must be ABI-compatible with the types used by the coordinator contracts. + */ +library VRFTypes { + // ABI-compatible with VRF.Proof. + // This proof is used for VRF V2 and V2Plus. + struct Proof { + uint256[2] pk; + uint256[2] gamma; + uint256 c; + uint256 s; + uint256 seed; + address uWitness; + uint256[2] cGammaWitness; + uint256[2] sHashWitness; + uint256 zInv; + } + + // ABI-compatible with VRFCoordinatorV2.RequestCommitment. + // This is only used for VRF V2. + struct RequestCommitment { + uint64 blockNum; + uint64 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + } + + // ABI-compatible with VRFCoordinatorV2Plus.RequestCommitment. + // This is only used for VRF V2Plus. + struct RequestCommitmentV2Plus { + uint64 blockNum; + uint256 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + bytes extraArgs; + } +} diff --git a/contracts/src/v0.8/vrf/VRFV2Wrapper.sol b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol new file mode 100644 index 00000000..19422199 --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity ^0.8.6; + +import {ConfirmedOwner} from "../shared/access/ConfirmedOwner.sol"; +import {TypeAndVersionInterface} from "../interfaces/TypeAndVersionInterface.sol"; +import {VRFConsumerBaseV2} from "./VRFConsumerBaseV2.sol"; +import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol"; +import {AggregatorV3Interface} from "../shared/interfaces/AggregatorV3Interface.sol"; +import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFV2WrapperInterface} from "./interfaces/VRFV2WrapperInterface.sol"; +import {VRFV2WrapperConsumerBase} from "./VRFV2WrapperConsumerBase.sol"; +import {ChainSpecificUtil} from "../ChainSpecificUtil.sol"; + +/** + * @notice A wrapper for VRFCoordinatorV2 that provides an interface better suited to one-off + * @notice requests for randomness. + */ +contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBaseV2, VRFV2WrapperInterface { + event WrapperFulfillmentFailed(uint256 indexed requestId, address indexed consumer); + + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + LinkTokenInterface public immutable PLI; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + AggregatorV3Interface public immutable PLI_ETH_FEED; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + ExtendedVRFCoordinatorV2Interface public immutable COORDINATOR; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + uint64 public immutable SUBSCRIPTION_ID; + /// @dev this is the size of a VRF v2 fulfillment's calldata abi-encoded in bytes. + /// @dev proofSize = 13 words = 13 * 256 = 3328 bits + /// @dev commitmentSize = 5 words = 5 * 256 = 1280 bits + /// @dev dataSize = proofSize + commitmentSize = 4608 bits + /// @dev selector = 32 bits + /// @dev total data size = 4608 bits + 32 bits = 4640 bits = 580 bytes + uint32 public s_fulfillmentTxSizeBytes = 580; + + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + + // lastRequestId is the request ID of the most recent VRF V2 request made by this wrapper. This + // should only be relied on within the same transaction the request was made. + uint256 public override lastRequestId; + + // Configuration fetched from VRFCoordinatorV2 + + // s_configured tracks whether this contract has been configured. If not configured, randomness + // requests cannot be made. + bool public s_configured; + + // s_disabled disables the contract when true. When disabled, new VRF requests cannot be made + // but existing ones can still be fulfilled. + bool public s_disabled; + + // s_fallbackWeiPerUnitLink is the backup PLI exchange rate used when the PLI/NATIVE feed is + // stale. + int256 private s_fallbackWeiPerUnitLink; + + // s_stalenessSeconds is the number of seconds before we consider the feed price to be stale and + // fallback to fallbackWeiPerUnitLink. + uint32 private s_stalenessSeconds; + + // s_fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2 + // charges. + uint32 private s_fulfillmentFlatFeeLinkPPM; + + // Other configuration + + // s_wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + // function. The cost for this gas is passed to the user. + uint32 private s_wrapperGasOverhead; + + // s_coordinatorGasOverhead reflects the gas overhead of the coordinator's fulfillRandomWords + // function. The cost for this gas is billed to the subscription, and must therefor be included + // in the pricing for wrapped requests. This includes the gas costs of proof verification and + // payment calculation in the coordinator. + uint32 private s_coordinatorGasOverhead; + + // s_wrapperPremiumPercentage is the premium ratio in percentage. For example, a value of 0 + // indicates no premium. A value of 15 indicates a 15 percent premium. + uint8 private s_wrapperPremiumPercentage; + + // s_keyHash is the key hash to use when requesting randomness. Fees are paid based on current gas + // fees, so this should be set to the highest gas lane on the network. + bytes32 internal s_keyHash; + + // s_maxNumWords is the max number of words that can be requested in a single wrapped VRF request. + uint8 internal s_maxNumWords; + + struct Callback { + address callbackAddress; + uint32 callbackGasLimit; + uint256 requestGasPrice; + int256 requestWeiPerUnitLink; + uint256 juelsPaid; + } + mapping(uint256 => Callback) /* requestID */ /* callback */ public s_callbacks; + + constructor( + address _link, + address _linkEthFeed, + address _coordinator + ) ConfirmedOwner(msg.sender) VRFConsumerBaseV2(_coordinator) { + PLI = LinkTokenInterface(_link); + PLI_ETH_FEED = AggregatorV3Interface(_linkEthFeed); + COORDINATOR = ExtendedVRFCoordinatorV2Interface(_coordinator); + + // Create this wrapper's subscription and add itself as a consumer. + uint64 subId = ExtendedVRFCoordinatorV2Interface(_coordinator).createSubscription(); + SUBSCRIPTION_ID = subId; + ExtendedVRFCoordinatorV2Interface(_coordinator).addConsumer(subId, address(this)); + } + + /** + * @notice setFulfillmentTxSize sets the size of the fulfillment transaction in bytes. + * @param size is the size of the fulfillment transaction in bytes. + */ + function setFulfillmentTxSize(uint32 size) external onlyOwner { + s_fulfillmentTxSizeBytes = size; + } + + /** + * @notice setConfig configures VRFV2Wrapper. + * + * @dev Sets wrapper-specific configuration based on the given parameters, and fetches any needed + * @dev VRFCoordinatorV2 configuration from the coordinator. + * + * @param _wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + * function. + * + * @param _coordinatorGasOverhead reflects the gas overhead of the coordinator's + * fulfillRandomWords function. + * + * @param _wrapperPremiumPercentage is the premium ratio in percentage for wrapper requests. + * + * @param _keyHash to use for requesting randomness. + */ + function setConfig( + uint32 _wrapperGasOverhead, + uint32 _coordinatorGasOverhead, + uint8 _wrapperPremiumPercentage, + bytes32 _keyHash, + uint8 _maxNumWords + ) external onlyOwner { + s_wrapperGasOverhead = _wrapperGasOverhead; + s_coordinatorGasOverhead = _coordinatorGasOverhead; + s_wrapperPremiumPercentage = _wrapperPremiumPercentage; + s_keyHash = _keyHash; + s_maxNumWords = _maxNumWords; + s_configured = true; + + // Get other configuration from coordinator + (, , s_stalenessSeconds, ) = COORDINATOR.getConfig(); + s_fallbackWeiPerUnitLink = COORDINATOR.getFallbackWeiPerUnitLink(); + (s_fulfillmentFlatFeeLinkPPM, , , , , , , , ) = COORDINATOR.getFeeConfig(); + } + + /** + * @notice getConfig returns the current VRFV2Wrapper configuration. + * + * @return fallbackWeiPerUnitLink is the backup PLI exchange rate used when the PLI/NATIVE feed + * is stale. + * + * @return stalenessSeconds is the number of seconds before we consider the feed price to be stale + * and fallback to fallbackWeiPerUnitLink. + * + * @return fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2 + * charges. + * + * @return wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + * function. The cost for this gas is passed to the user. + * + * @return coordinatorGasOverhead reflects the gas overhead of the coordinator's + * fulfillRandomWords function. + * + * @return wrapperPremiumPercentage is the premium ratio in percentage. For example, a value of 0 + * indicates no premium. A value of 15 indicates a 15 percent premium. + * + * @return keyHash is the key hash to use when requesting randomness. Fees are paid based on + * current gas fees, so this should be set to the highest gas lane on the network. + * + * @return maxNumWords is the max number of words that can be requested in a single wrapped VRF + * request. + */ + function getConfig() + external + view + returns ( + int256 fallbackWeiPerUnitLink, + uint32 stalenessSeconds, + uint32 fulfillmentFlatFeeLinkPPM, + uint32 wrapperGasOverhead, + uint32 coordinatorGasOverhead, + uint8 wrapperPremiumPercentage, + bytes32 keyHash, + uint8 maxNumWords + ) + { + return ( + s_fallbackWeiPerUnitLink, + s_stalenessSeconds, + s_fulfillmentFlatFeeLinkPPM, + s_wrapperGasOverhead, + s_coordinatorGasOverhead, + s_wrapperPremiumPercentage, + s_keyHash, + s_maxNumWords + ); + } + + /** + * @notice Calculates the price of a VRF request with the given callbackGasLimit at the current + * @notice block. + * + * @dev This function relies on the transaction gas price which is not automatically set during + * @dev simulation. To estimate the price at a specific gas price, use the estimatePrice function. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + */ + function calculateRequestPrice( + uint32 _callbackGasLimit + ) external view override onlyConfiguredNotDisabled returns (uint256) { + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, tx.gasprice, weiPerUnitLink); + } + + /** + * @notice Estimates the price of a VRF request with a specific gas limit and gas price. + * + * @dev This is a convenience function that can be called in simulation to better understand + * @dev pricing. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + * @param _requestGasPriceWei is the gas price in wei used for the estimation. + */ + function estimateRequestPrice( + uint32 _callbackGasLimit, + uint256 _requestGasPriceWei + ) external view override onlyConfiguredNotDisabled returns (uint256) { + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); + } + + function _calculateRequestPrice( + uint256 _gas, + uint256 _requestGasPrice, + int256 _weiPerUnitLink + ) internal view returns (uint256) { + // costWei is the base fee denominated in wei (native) + // costWei takes into account the L1 posting costs of the VRF fulfillment + // transaction, if we are on an L2. + uint256 costWei = (_requestGasPrice * + (_gas + s_wrapperGasOverhead + s_coordinatorGasOverhead) + + ChainSpecificUtil._getL1CalldataGasCost(s_fulfillmentTxSizeBytes)); + // (1e18 juels/link) * ((wei/gas * (gas)) + l1wei) / (wei/link) == 1e18 juels * wei/link / (wei/link) == 1e18 juels * wei/link * link/wei == juels + // baseFee is the base fee denominated in juels (link) + uint256 baseFee = (1e18 * costWei) / uint256(_weiPerUnitLink); + // feeWithPremium is the fee after the percentage premium is applied + uint256 feeWithPremium = (baseFee * (s_wrapperPremiumPercentage + 100)) / 100; + // feeWithFlatFee is the fee after the flat fee is applied on top of the premium + uint256 feeWithFlatFee = feeWithPremium + (1e12 * uint256(s_fulfillmentFlatFeeLinkPPM)); + + return feeWithFlatFee; + } + + /** + * @notice onTokenTransfer is called by LinkToken upon payment for a VRF request. + * + * @dev Reverts if payment is too low. + * + * @param _sender is the sender of the payment, and the address that will receive a VRF callback + * upon fulfillment. + * + * @param _amount is the amount of PLI paid in Juels. + * + * @param _data is the abi-encoded VRF request parameters: uint32 callbackGasLimit, + * uint16 requestConfirmations, and uint32 numWords. + */ + function onTokenTransfer(address _sender, uint256 _amount, bytes calldata _data) external onlyConfiguredNotDisabled { + // solhint-disable-next-line custom-errors + require(msg.sender == address(PLI), "only callable from PLI"); + + (uint32 callbackGasLimit, uint16 requestConfirmations, uint32 numWords) = abi.decode( + _data, + (uint32, uint16, uint32) + ); + uint32 eip150Overhead = _getEIP150Overhead(callbackGasLimit); + int256 weiPerUnitLink = _getFeedData(); + uint256 price = _calculateRequestPrice(callbackGasLimit, tx.gasprice, weiPerUnitLink); + // solhint-disable-next-line custom-errors + require(_amount >= price, "fee too low"); + // solhint-disable-next-line custom-errors + require(numWords <= s_maxNumWords, "numWords too high"); + + uint256 requestId = COORDINATOR.requestRandomWords( + s_keyHash, + SUBSCRIPTION_ID, + requestConfirmations, + callbackGasLimit + eip150Overhead + s_wrapperGasOverhead, + numWords + ); + s_callbacks[requestId] = Callback({ + callbackAddress: _sender, + callbackGasLimit: callbackGasLimit, + requestGasPrice: tx.gasprice, + requestWeiPerUnitLink: weiPerUnitLink, + juelsPaid: _amount + }); + lastRequestId = requestId; + } + + /** + * @notice withdraw is used by the VRFV2Wrapper's owner to withdraw PLI revenue. + * + * @param _recipient is the address that should receive the PLI funds. + * + * @param _amount is the amount of PLI in Juels that should be withdrawn. + */ + function withdraw(address _recipient, uint256 _amount) external onlyOwner { + PLI.transfer(_recipient, _amount); + } + + /** + * @notice enable this contract so that new requests can be accepted. + */ + function enable() external onlyOwner { + s_disabled = false; + } + + /** + * @notice disable this contract so that new requests will be rejected. When disabled, new requests + * @notice will revert but existing requests can still be fulfilled. + */ + function disable() external onlyOwner { + s_disabled = true; + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + Callback memory callback = s_callbacks[_requestId]; + delete s_callbacks[_requestId]; + // solhint-disable-next-line custom-errors + require(callback.callbackAddress != address(0), "request not found"); // This should never happen + + VRFV2WrapperConsumerBase c; + bytes memory resp = abi.encodeWithSelector(c.rawFulfillRandomWords.selector, _requestId, _randomWords); + + bool success = _callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); + if (!success) { + emit WrapperFulfillmentFailed(_requestId, callback.callbackAddress); + } + } + + function _getFeedData() private view returns (int256) { + bool staleFallback = s_stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && s_stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + // solhint-disable-next-line custom-errors + require(weiPerUnitLink >= 0, "Invalid PLI wei price"); + return weiPerUnitLink; + } + + /** + * @dev Calculates extra amount of gas required for running an assembly call() post-EIP150. + */ + function _getEIP150Overhead(uint32 gas) private pure returns (uint32) { + return gas / 63 + 1; + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + function typeAndVersion() external pure virtual override returns (string memory) { + return "VRFV2Wrapper 1.0.0"; + } + + modifier onlyConfiguredNotDisabled() { + // solhint-disable-next-line custom-errors + require(s_configured, "wrapper is not configured"); + // solhint-disable-next-line custom-errors + require(!s_disabled, "wrapper is disabled"); + _; + } +} + +interface ExtendedVRFCoordinatorV2Interface is VRFCoordinatorV2Interface { + function getConfig() + external + view + returns ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ); + + function getFallbackWeiPerUnitLink() external view returns (int256); + + function getFeeConfig() + external + view + returns ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ); +} diff --git a/contracts/src/v0.8/vrf/VRFV2WrapperConsumerBase.sol b/contracts/src/v0.8/vrf/VRFV2WrapperConsumerBase.sol new file mode 100644 index 00000000..2cb96cec --- /dev/null +++ b/contracts/src/v0.8/vrf/VRFV2WrapperConsumerBase.sol @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../shared/interfaces/LinkTokenInterface.sol"; +import {VRFV2WrapperInterface} from "./interfaces/VRFV2WrapperInterface.sol"; + +/** ******************************************************************************* + * @notice Interface for contracts using VRF randomness through the VRF V2 wrapper + * ******************************************************************************** + * @dev PURPOSE + * + * @dev Create VRF V2 requests without the need for subscription management. Rather than creating + * @dev and funding a VRF V2 subscription, a user can use this wrapper to create one off requests, + * @dev paying up front rather than at fulfillment. + * + * @dev Since the price is determined using the gas price of the request transaction rather than + * @dev the fulfillment transaction, the wrapper charges an additional premium on callback gas + * @dev usage, in addition to some extra overhead costs associated with the VRFV2Wrapper contract. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFV2WrapperConsumerBase. The consumer must be funded + * @dev with enough PLI to make the request, otherwise requests will revert. To request randomness, + * @dev call the 'requestRandomness' function with the desired VRF parameters. This function handles + * @dev paying for the request based on the current pricing. + * + * @dev Consumers must implement the fullfillRandomWords function, which will be called during + * @dev fulfillment with the randomness result. + */ +abstract contract VRFV2WrapperConsumerBase { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + LinkTokenInterface internal immutable PLI; + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + VRFV2WrapperInterface internal immutable VRF_V2_WRAPPER; + + /** + * @param _link is the address of LinkToken + * @param _vrfV2Wrapper is the address of the VRFV2Wrapper contract + */ + constructor(address _link, address _vrfV2Wrapper) { + PLI = LinkTokenInterface(_link); + VRF_V2_WRAPPER = VRFV2WrapperInterface(_vrfV2Wrapper); + } + + /** + * @dev Requests randomness from the VRF V2 wrapper. + * + * @param _callbackGasLimit is the gas limit that should be used when calling the consumer's + * fulfillRandomWords function. + * @param _requestConfirmations is the number of confirmations to wait before fulfilling the + * request. A higher number of confirmations increases security by reducing the likelihood + * that a chain re-org changes a published randomness outcome. + * @param _numWords is the number of random words to request. + * + * @return requestId is the VRF V2 request ID of the newly created randomness request. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function requestRandomness( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) internal returns (uint256 requestId) { + PLI.transferAndCall( + address(VRF_V2_WRAPPER), + VRF_V2_WRAPPER.calculateRequestPrice(_callbackGasLimit), + abi.encode(_callbackGasLimit, _requestConfirmations, _numWords) + ); + return VRF_V2_WRAPPER.lastRequestId(); + } + + /** + * @notice fulfillRandomWords handles the VRF V2 wrapper response. The consuming contract must + * @notice implement it. + * + * @param _requestId is the VRF V2 request ID. + * @param _randomWords is the randomness result. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal virtual; + + function rawFulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) external { + // solhint-disable-next-line custom-errors + require(msg.sender == address(VRF_V2_WRAPPER), "only VRF V2 wrapper can fulfill"); + fulfillRandomWords(_requestId, _randomWords); + } +} diff --git a/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol b/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol new file mode 100644 index 00000000..d6976a7f --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +// solhint-disable-next-line one-contract-per-file +pragma solidity 0.8.6; + +import {VRFTypes} from "../VRFTypes.sol"; + +/** + * @title BatchVRFCoordinatorV2Plus + * @notice The BatchVRFCoordinatorV2Plus contract acts as a proxy to write many random responses to the + * @notice provided VRFCoordinatorV2Plus contract efficiently in a single transaction. + */ +contract BatchVRFCoordinatorV2Plus { + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + IVRFCoordinatorV2Plus public immutable COORDINATOR; + + event ErrorReturned(uint256 indexed requestId, string reason); + event RawErrorReturned(uint256 indexed requestId, bytes lowLevelData); + + constructor(address coordinatorAddr) { + COORDINATOR = IVRFCoordinatorV2Plus(coordinatorAddr); + } + + /** + * @notice fulfills multiple randomness requests with the provided proofs and commitments. + * @param proofs the randomness proofs generated by the VRF provider. + * @param rcs the request commitments corresponding to the randomness proofs. + */ + function fulfillRandomWords(VRFTypes.Proof[] memory proofs, VRFTypes.RequestCommitmentV2Plus[] memory rcs) external { + // solhint-disable-next-line custom-errors + require(proofs.length == rcs.length, "input array arg lengths mismatch"); + for (uint256 i = 0; i < proofs.length; i++) { + try COORDINATOR.fulfillRandomWords(proofs[i], rcs[i], false) returns (uint96 /* payment */) { + continue; + } catch Error(string memory reason) { + uint256 requestId = _getRequestIdFromProof(proofs[i]); + emit ErrorReturned(requestId, reason); + } catch (bytes memory lowLevelData) { + uint256 requestId = _getRequestIdFromProof(proofs[i]); + emit RawErrorReturned(requestId, lowLevelData); + } + } + } + + /** + * @notice Returns the proving key hash associated with this public key. + * @param publicKey the key to return the hash of. + */ + function _hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Returns the request ID of the request associated with the given proof. + * @param proof the VRF proof provided by the VRF oracle. + */ + function _getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { + bytes32 keyHash = _hashOfKey(proof.pk); + return uint256(keccak256(abi.encode(keyHash, proof.seed))); + } +} + +interface IVRFCoordinatorV2Plus { + function fulfillRandomWords( + VRFTypes.Proof memory proof, + VRFTypes.RequestCommitmentV2Plus memory rc, + bool onlyPremium + ) external returns (uint96); +} diff --git a/contracts/src/v0.8/vrf/dev/BlockhashStore.sol b/contracts/src/v0.8/vrf/dev/BlockhashStore.sol new file mode 100644 index 00000000..b6389c9b --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/BlockhashStore.sol @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; + +/** + * @title BlockhashStore + * @notice This contract provides a way to access blockhashes older than + * the 256 block limit imposed by the BLOCKHASH opcode. + * You may assume that any blockhash stored by the contract is correct. + * Note that the contract depends on the format of serialized Ethereum + * blocks. If a future hardfork of Ethereum changes that format, the + * logic in this contract may become incorrect and an updated version + * would have to be deployed. + */ +contract BlockhashStore { + mapping(uint256 => bytes32) internal s_blockhashes; + + /** + * @notice stores blockhash of a given block, assuming it is available through BLOCKHASH + * @param n the number of the block whose blockhash should be stored + */ + function store(uint256 n) public { + bytes32 h = ChainSpecificUtil._getBlockhash(uint64(n)); + // solhint-disable-next-line custom-errors + require(h != 0x0, "blockhash(n) failed"); + s_blockhashes[n] = h; + } + + /** + * @notice stores blockhash of the earliest block still available through BLOCKHASH. + */ + function storeEarliest() external { + store(ChainSpecificUtil._getBlockNumber() - 256); + } + + /** + * @notice stores blockhash after verifying blockheader of child/subsequent block + * @param n the number of the block whose blockhash should be stored + * @param header the rlp-encoded blockheader of block n+1. We verify its correctness by checking + * that it hashes to a stored blockhash, and then extract parentHash to get the n-th blockhash. + */ + function storeVerifyHeader(uint256 n, bytes memory header) public { + // solhint-disable-next-line custom-errors + require(keccak256(header) == s_blockhashes[n + 1], "header has unknown blockhash"); + + // At this point, we know that header is the correct blockheader for block n+1. + + // The header is an rlp-encoded list. The head item of that list is the 32-byte blockhash of the parent block. + // Based on how rlp works, we know that blockheaders always have the following form: + // 0xf9____a0PARENTHASH... + // ^ ^ ^ + // | | | + // | | +--- PARENTHASH is 32 bytes. rlpenc(PARENTHASH) is 0xa || PARENTHASH. + // | | + // | +--- 2 bytes containing the sum of the lengths of the encoded list items + // | + // +--- 0xf9 because we have a list and (sum of lengths of encoded list items) fits exactly into two bytes. + // + // As a consequence, the PARENTHASH is always at offset 4 of the rlp-encoded block header. + + bytes32 parentHash; + assembly { + parentHash := mload(add(header, 36)) // 36 = 32 byte offset for length prefix of ABI-encoded array + // + 4 byte offset of PARENTHASH (see above) + } + + s_blockhashes[n] = parentHash; + } + + /** + * @notice gets a blockhash from the store. If no hash is known, this function reverts. + * @param n the number of the block whose blockhash should be returned + */ + function getBlockhash(uint256 n) external view returns (bytes32) { + bytes32 h = s_blockhashes[n]; + // solhint-disable-next-line custom-errors + require(h != 0x0, "blockhash not found in store"); + return h; + } +} diff --git a/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol new file mode 100644 index 00000000..7a339dc4 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; +import {IVRFSubscriptionV2Plus} from "./interfaces/IVRFSubscriptionV2Plus.sol"; + +abstract contract SubscriptionAPI is ConfirmedOwner, IERC677Receiver, IVRFSubscriptionV2Plus { + using EnumerableSet for EnumerableSet.UintSet; + + /// @dev may not be provided upon construction on some chains due to lack of availability + LinkTokenInterface public PLI; + /// @dev may not be provided upon construction on some chains due to lack of availability + AggregatorV3Interface public PLI_NATIVE_FEED; + + // We need to maintain a list of consuming addresses. + // This bound ensures we are able to loop over them as needed. + // Should a user require more consumers, they can use multiple subscriptions. + uint16 public constant MAX_CONSUMERS = 100; + error TooManyConsumers(); + error InsufficientBalance(); + error InvalidConsumer(uint256 subId, address consumer); + error InvalidSubscription(); + error OnlyCallableFromLink(); + error InvalidCalldata(); + error MustBeSubOwner(address owner); + error PendingRequestExists(); + error MustBeRequestedOwner(address proposedOwner); + error BalanceInvariantViolated(uint256 internalBalance, uint256 externalBalance); // Should never happen + event FundsRecovered(address to, uint256 amount); + event NativeFundsRecovered(address to, uint256 amount); + error LinkAlreadySet(); + error FailedToSendNative(); + error FailedToTransferLink(); + error IndexOutOfRange(); + error LinkNotSet(); + + // We use the subscription struct (1 word) + // at fulfillment time. + struct Subscription { + // There are only 1e9*1e18 = 1e27 juels in existence, so the balance can fit in uint96 (2^96 ~ 7e28) + uint96 balance; // Common link balance used for all consumer requests. + // a uint96 is large enough to hold around ~8e28 wei, or 80 billion ether. + // That should be enough to cover most (if not all) subscriptions. + uint96 nativeBalance; // Common native balance used for all consumer requests. + uint64 reqCount; + } + // We use the config for the mgmt APIs + struct SubscriptionConfig { + address owner; // Owner can fund/withdraw/cancel the sub. + address requestedOwner; // For safely transferring sub ownership. + // Maintains the list of keys in s_consumers. + // We do this for 2 reasons: + // 1. To be able to clean up all keys from s_consumers when canceling a subscription. + // 2. To be able to return the list of all consumers in getSubscription. + // Note that we need the s_consumers map to be able to directly check if a + // consumer is valid without reading all the consumers from storage. + address[] consumers; + } + // Note a nonce of 0 indicates an the consumer is not assigned to that subscription. + mapping(address => mapping(uint256 => uint64)) /* consumer */ /* subId */ /* nonce */ internal s_consumers; + mapping(uint256 => SubscriptionConfig) /* subId */ /* subscriptionConfig */ internal s_subscriptionConfigs; + mapping(uint256 => Subscription) /* subId */ /* subscription */ internal s_subscriptions; + // subscription nonce used to construct subId. Rises monotonically + uint64 public s_currentSubNonce; + // track all subscription id's that were created by this contract + // note: access should be through the getActiveSubscriptionIds() view function + // which takes a starting index and a max number to fetch in order to allow + // "pagination" of the subscription ids. in the event a very large number of + // subscription id's are stored in this set, they cannot be retrieved in a + // single RPC call without violating various size limits. + EnumerableSet.UintSet internal s_subIds; + // s_totalBalance tracks the total link sent to/from + // this contract through onTokenTransfer, cancelSubscription and oracleWithdraw. + // A discrepancy with this contract's link balance indicates someone + // sent tokens using transfer and so we may need to use recoverFunds. + uint96 public s_totalBalance; + // s_totalNativeBalance tracks the total native sent to/from + // this contract through fundSubscription, cancelSubscription and oracleWithdrawNative. + // A discrepancy with this contract's native balance indicates someone + // sent native using transfer and so we may need to use recoverNativeFunds. + uint96 public s_totalNativeBalance; + uint96 internal s_withdrawableTokens; + uint96 internal s_withdrawableNative; + + event SubscriptionCreated(uint256 indexed subId, address owner); + event SubscriptionFunded(uint256 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionFundedWithNative(uint256 indexed subId, uint256 oldNativeBalance, uint256 newNativeBalance); + event SubscriptionConsumerAdded(uint256 indexed subId, address consumer); + event SubscriptionConsumerRemoved(uint256 indexed subId, address consumer); + event SubscriptionCanceled(uint256 indexed subId, address to, uint256 amountLink, uint256 amountNative); + event SubscriptionOwnerTransferRequested(uint256 indexed subId, address from, address to); + event SubscriptionOwnerTransferred(uint256 indexed subId, address from, address to); + + struct Config { + uint16 minimumRequestConfirmations; + uint32 maxGasLimit; + // Reentrancy protection. + bool reentrancyLock; + // stalenessSeconds is how long before we consider the feed price to be stale + // and fallback to fallbackWeiPerUnitLink. + uint32 stalenessSeconds; + // Gas to cover oracle payment after we calculate the payment. + // We make it configurable in case those operations are repriced. + // The recommended number is below, though it may vary slightly + // if certain chains do not implement certain EIP's. + // 21000 + // base cost of the transaction + // 100 + 5000 + // warm subscription balance read and update. See https://eips.ethereum.org/EIPS/eip-2929 + // 2*2100 + 5000 - // cold read oracle address and oracle balance and first time oracle balance update, note first time will be 20k, but 5k subsequently + // 4800 + // request delete refund (refunds happen after execution), note pre-london fork was 15k. See https://eips.ethereum.org/EIPS/eip-3529 + // 6685 + // Positive static costs of argument encoding etc. note that it varies by +/- x*12 for every x bytes of non-zero data in the proof. + // Total: 37,185 gas. + uint32 gasAfterPaymentCalculation; + // Flat fee charged per fulfillment in millionths of native. + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeNativePPM; + // Discount relative to fulfillmentFlatFeeNativePPM for link payment in millionths of native + // Should not exceed fulfillmentFlatFeeNativePPM + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeLinkDiscountPPM; + // nativePremiumPercentage is the percentage of the total gas costs that is added to the final premium for native payment + // nativePremiumPercentage = 10 means 10% of the total gas costs is added. only integral percentage is allowed + uint8 nativePremiumPercentage; + // linkPremiumPercentage is the percentage of total gas costs that is added to the final premium for link payment + // linkPremiumPercentage = 10 means 10% of the total gas costs is added. only integral percentage is allowed + uint8 linkPremiumPercentage; + } + Config public s_config; + + error Reentrant(); + modifier nonReentrant() { + _nonReentrant(); + _; + } + + function _nonReentrant() internal view { + if (s_config.reentrancyLock) { + revert Reentrant(); + } + } + + constructor() ConfirmedOwner(msg.sender) {} + + /** + * @notice set the PLI token contract and link native feed to be + * used by this coordinator + * @param link - address of link token + * @param linkNativeFeed address of the link native feed + */ + function setPLIAndPLINativeFeed(address link, address linkNativeFeed) external onlyOwner { + // Disallow re-setting link token because the logic wouldn't really make sense + if (address(PLI) != address(0)) { + revert LinkAlreadySet(); + } + PLI = LinkTokenInterface(link); + PLI_NATIVE_FEED = AggregatorV3Interface(linkNativeFeed); + } + + /** + * @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + * @param subId subscription id + * @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + */ + function ownerCancelSubscription(uint256 subId) external onlyOwner { + address owner = s_subscriptionConfigs[subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + _cancelSubscriptionHelper(subId, owner); + } + + /** + * @notice Recover link sent with transfer instead of transferAndCall. + * @param to address to send link to + */ + function recoverFunds(address to) external onlyOwner { + // If PLI is not set, we cannot recover funds. + // It is possible that this coordinator address was funded with PLI + // by accident by a user but the PLI token needs to be set first + // before we can recover it. + if (address(PLI) == address(0)) { + revert LinkNotSet(); + } + + uint256 externalBalance = PLI.balanceOf(address(this)); + uint256 internalBalance = uint256(s_totalBalance); + if (internalBalance > externalBalance) { + revert BalanceInvariantViolated(internalBalance, externalBalance); + } + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + if (!PLI.transfer(to, amount)) { + revert FailedToTransferLink(); + } + emit FundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + /** + * @notice Recover native sent with transfer/call/send instead of fundSubscription. + * @param to address to send native to + */ + function recoverNativeFunds(address payable to) external onlyOwner { + uint256 externalBalance = address(this).balance; + uint256 internalBalance = uint256(s_totalNativeBalance); + if (internalBalance > externalBalance) { + revert BalanceInvariantViolated(internalBalance, externalBalance); + } + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + (bool sent, ) = to.call{value: amount}(""); + if (!sent) { + revert FailedToSendNative(); + } + emit NativeFundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + /* + * @notice withdraw PLI earned through fulfilling requests + * @param recipient where to send the funds + * @param amount amount to withdraw + */ + function withdraw(address recipient) external nonReentrant onlyOwner { + if (address(PLI) == address(0)) { + revert LinkNotSet(); + } + if (s_withdrawableTokens == 0) { + revert InsufficientBalance(); + } + uint96 amount = s_withdrawableTokens; + s_withdrawableTokens -= amount; + s_totalBalance -= amount; + if (!PLI.transfer(recipient, amount)) { + revert InsufficientBalance(); + } + } + + /* + * @notice withdraw native earned through fulfilling requests + * @param recipient where to send the funds + * @param amount amount to withdraw + */ + function withdrawNative(address payable recipient) external nonReentrant onlyOwner { + if (s_withdrawableNative == 0) { + revert InsufficientBalance(); + } + // Prevent re-entrancy by updating state before transfer. + uint96 amount = s_withdrawableNative; + s_withdrawableNative -= amount; + s_totalNativeBalance -= amount; + (bool sent, ) = recipient.call{value: amount}(""); + if (!sent) { + revert FailedToSendNative(); + } + } + + function onTokenTransfer(address /* sender */, uint256 amount, bytes calldata data) external override nonReentrant { + if (msg.sender != address(PLI)) { + revert OnlyCallableFromLink(); + } + if (data.length != 32) { + revert InvalidCalldata(); + } + uint256 subId = abi.decode(data, (uint256)); + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the sender is the subscription owner, + // anyone can fund a subscription. + uint256 oldBalance = s_subscriptions[subId].balance; + s_subscriptions[subId].balance += uint96(amount); + s_totalBalance += uint96(amount); + emit SubscriptionFunded(subId, oldBalance, oldBalance + amount); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function fundSubscriptionWithNative(uint256 subId) external payable override nonReentrant { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the msg.sender is the subscription owner, + // anyone can fund a subscription. + // We also do not check that msg.value > 0, since that's just a no-op + // and would be a waste of gas on the caller's part. + uint256 oldNativeBalance = s_subscriptions[subId].nativeBalance; + s_subscriptions[subId].nativeBalance += uint96(msg.value); + s_totalNativeBalance += uint96(msg.value); + emit SubscriptionFundedWithNative(subId, oldNativeBalance, oldNativeBalance + msg.value); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function getSubscription( + uint256 subId + ) + public + view + override + returns (uint96 balance, uint96 nativeBalance, uint64 reqCount, address owner, address[] memory consumers) + { + owner = s_subscriptionConfigs[subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + return ( + s_subscriptions[subId].balance, + s_subscriptions[subId].nativeBalance, + s_subscriptions[subId].reqCount, + owner, + s_subscriptionConfigs[subId].consumers + ); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function getActiveSubscriptionIds( + uint256 startIndex, + uint256 maxCount + ) external view override returns (uint256[] memory ids) { + uint256 numSubs = s_subIds.length(); + if (startIndex >= numSubs) revert IndexOutOfRange(); + uint256 endIndex = startIndex + maxCount; + endIndex = endIndex > numSubs || maxCount == 0 ? numSubs : endIndex; + uint256 idsLength = endIndex - startIndex; + ids = new uint256[](idsLength); + for (uint256 idx = 0; idx < idsLength; ++idx) { + ids[idx] = s_subIds.at(idx + startIndex); + } + return ids; + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function createSubscription() external override nonReentrant returns (uint256 subId) { + // Generate a subscription id that is globally unique. + uint64 currentSubNonce = s_currentSubNonce; + subId = uint256( + keccak256(abi.encodePacked(msg.sender, blockhash(block.number - 1), address(this), currentSubNonce)) + ); + // Increment the subscription nonce counter. + s_currentSubNonce = currentSubNonce + 1; + // Initialize storage variables. + address[] memory consumers = new address[](0); + s_subscriptions[subId] = Subscription({balance: 0, nativeBalance: 0, reqCount: 0}); + s_subscriptionConfigs[subId] = SubscriptionConfig({ + owner: msg.sender, + requestedOwner: address(0), + consumers: consumers + }); + // Update the s_subIds set, which tracks all subscription ids created in this contract. + s_subIds.add(subId); + + emit SubscriptionCreated(subId, msg.sender); + return subId; + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function requestSubscriptionOwnerTransfer( + uint256 subId, + address newOwner + ) external override onlySubOwner(subId) nonReentrant { + // Proposing to address(0) would never be claimable so don't need to check. + SubscriptionConfig storage subscriptionConfig = s_subscriptionConfigs[subId]; + if (subscriptionConfig.requestedOwner != newOwner) { + subscriptionConfig.requestedOwner = newOwner; + emit SubscriptionOwnerTransferRequested(subId, msg.sender, newOwner); + } + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function acceptSubscriptionOwnerTransfer(uint256 subId) external override nonReentrant { + address oldOwner = s_subscriptionConfigs[subId].owner; + if (oldOwner == address(0)) { + revert InvalidSubscription(); + } + if (s_subscriptionConfigs[subId].requestedOwner != msg.sender) { + revert MustBeRequestedOwner(s_subscriptionConfigs[subId].requestedOwner); + } + s_subscriptionConfigs[subId].owner = msg.sender; + s_subscriptionConfigs[subId].requestedOwner = address(0); + emit SubscriptionOwnerTransferred(subId, oldOwner, msg.sender); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function addConsumer(uint256 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + // Already maxed, cannot add any more consumers. + address[] storage consumers = s_subscriptionConfigs[subId].consumers; + if (consumers.length == MAX_CONSUMERS) { + revert TooManyConsumers(); + } + mapping(uint256 => uint64) storage nonces = s_consumers[consumer]; + if (nonces[subId] != 0) { + // Idempotence - do nothing if already added. + // Ensures uniqueness in s_subscriptions[subId].consumers. + return; + } + // Initialize the nonce to 1, indicating the consumer is allocated. + nonces[subId] = 1; + consumers.push(consumer); + + emit SubscriptionConsumerAdded(subId, consumer); + } + + function _deleteSubscription(uint256 subId) internal returns (uint96 balance, uint96 nativeBalance) { + address[] storage consumers = s_subscriptionConfigs[subId].consumers; + balance = s_subscriptions[subId].balance; + nativeBalance = s_subscriptions[subId].nativeBalance; + // Note bounded by MAX_CONSUMERS; + // If no consumers, does nothing. + uint256 consumersLength = consumers.length; + for (uint256 i = 0; i < consumersLength; ++i) { + delete s_consumers[consumers[i]][subId]; + } + delete s_subscriptionConfigs[subId]; + delete s_subscriptions[subId]; + s_subIds.remove(subId); + if (balance != 0) { + s_totalBalance -= balance; + } + if (nativeBalance != 0) { + s_totalNativeBalance -= nativeBalance; + } + return (balance, nativeBalance); + } + + function _cancelSubscriptionHelper(uint256 subId, address to) internal { + (uint96 balance, uint96 nativeBalance) = _deleteSubscription(subId); + + // Only withdraw PLI if the token is active and there is a balance. + if (address(PLI) != address(0) && balance != 0) { + if (!PLI.transfer(to, uint256(balance))) { + revert InsufficientBalance(); + } + } + + // send native to the "to" address using call + (bool success, ) = to.call{value: uint256(nativeBalance)}(""); + if (!success) { + revert FailedToSendNative(); + } + emit SubscriptionCanceled(subId, to, balance, nativeBalance); + } + + modifier onlySubOwner(uint256 subId) { + _onlySubOwner(subId); + _; + } + + function _onlySubOwner(uint256 subId) internal view { + address owner = s_subscriptionConfigs[subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubOwner(owner); + } + } +} diff --git a/contracts/src/v0.8/vrf/dev/TrustedBlockhashStore.sol b/contracts/src/v0.8/vrf/dev/TrustedBlockhashStore.sol new file mode 100644 index 00000000..b1a53b57 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/TrustedBlockhashStore.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.6; + +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {BlockhashStore} from "./BlockhashStore.sol"; + +contract TrustedBlockhashStore is ConfirmedOwner, BlockhashStore { + error NotInWhitelist(); + error InvalidTrustedBlockhashes(); + error InvalidRecentBlockhash(); + + mapping(address => bool) public s_whitelistStatus; + address[] public s_whitelist; + + constructor(address[] memory whitelist) ConfirmedOwner(msg.sender) { + setWhitelist(whitelist); + } + + /** + * @notice sets the whitelist of addresses that can store blockhashes + * @param whitelist the whitelist of addresses that can store blockhashes + */ + function setWhitelist(address[] memory whitelist) public onlyOwner { + address[] memory previousWhitelist = s_whitelist; + s_whitelist = whitelist; + + // Unset whitelist status for all addresses in the previous whitelist, + // and set whitelist status for all addresses in the new whitelist. + for (uint256 i = 0; i < previousWhitelist.length; i++) { + s_whitelistStatus[previousWhitelist[i]] = false; + } + for (uint256 i = 0; i < whitelist.length; i++) { + s_whitelistStatus[whitelist[i]] = true; + } + } + + /** + * @notice stores a list of blockhashes and their respective blocks, only callable + * by a whitelisted address + * @param blockhashes the list of blockhashes and their respective blocks + */ + function storeTrusted( + uint256[] calldata blockNums, + bytes32[] calldata blockhashes, + uint256 recentBlockNumber, + bytes32 recentBlockhash + ) external { + bytes32 onChainHash = ChainSpecificUtil._getBlockhash(uint64(recentBlockNumber)); + if (onChainHash != recentBlockhash) { + revert InvalidRecentBlockhash(); + } + + if (!s_whitelistStatus[msg.sender]) { + revert NotInWhitelist(); + } + + if (blockNums.length != blockhashes.length) { + revert InvalidTrustedBlockhashes(); + } + + for (uint256 i = 0; i < blockNums.length; i++) { + s_blockhashes[blockNums[i]] = blockhashes[i]; + } + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Plus.sol b/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Plus.sol new file mode 100644 index 00000000..dc7c4e0e --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Plus.sol @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {IVRFCoordinatorV2Plus} from "./interfaces/IVRFCoordinatorV2Plus.sol"; +import {IVRFMigratableConsumerV2Plus} from "./interfaces/IVRFMigratableConsumerV2Plus.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. It ensures 2 things: + * @dev 1. The fulfillment came from the VRFCoordinatorV2Plus. + * @dev 2. The consumer contract implements fulfillRandomWords. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBaseV2Plus, and can + * @dev initialize VRFConsumerBaseV2Plus's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumerV2Plus is VRFConsumerBaseV2Plus { + * @dev constructor(, address _vrfCoordinator, address _subOwner) + * @dev VRFConsumerBaseV2Plus(_vrfCoordinator, _subOwner) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash). Create a subscription, fund it + * @dev and your consumer contract as a consumer of it (see VRFCoordinatorInterface + * @dev subscription management functions). + * @dev Call requestRandomWords(keyHash, subId, minimumRequestConfirmations, + * @dev callbackGasLimit, numWords, extraArgs), + * @dev see (IVRFCoordinatorV2Plus for a description of the arguments). + * + * @dev Once the VRFCoordinatorV2Plus has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomWords method. + * + * @dev The randomness argument to fulfillRandomWords is a set of random words + * @dev generated from your requestId and the blockHash of the request. + * + * @dev If your contract could have concurrent requests open, you can use the + * @dev requestId returned from requestRandomWords to track which response is associated + * @dev with which randomness request. + * @dev See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously. + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBaseV2Plus.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. It is for this reason that + * @dev that you can signal to an oracle you'd like them to wait longer before + * @dev responding to the request (however this is not enforced in the contract + * @dev and so remains effective only in the case of unmodified oracle software). + */ +abstract contract VRFConsumerBaseV2Plus is IVRFMigratableConsumerV2Plus, ConfirmedOwner { + error OnlyCoordinatorCanFulfill(address have, address want); + error OnlyOwnerOrCoordinator(address have, address owner, address coordinator); + error ZeroAddress(); + + // s_vrfCoordinator should be used by consumers to make requests to vrfCoordinator + // so that coordinator reference is updated after migration + IVRFCoordinatorV2Plus public s_vrfCoordinator; + + /** + * @param _vrfCoordinator address of VRFCoordinator contract + */ + constructor(address _vrfCoordinator) ConfirmedOwner(msg.sender) { + s_vrfCoordinator = IVRFCoordinatorV2Plus(_vrfCoordinator); + } + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBaseV2Plus expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomWords the VRF output expanded to the requested number of words + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal virtual; + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomWords(uint256 requestId, uint256[] memory randomWords) external { + if (msg.sender != address(s_vrfCoordinator)) { + revert OnlyCoordinatorCanFulfill(msg.sender, address(s_vrfCoordinator)); + } + fulfillRandomWords(requestId, randomWords); + } + + /** + * @inheritdoc IVRFMigratableConsumerV2Plus + */ + function setCoordinator(address _vrfCoordinator) public override onlyOwnerOrCoordinator { + s_vrfCoordinator = IVRFCoordinatorV2Plus(_vrfCoordinator); + } + + modifier onlyOwnerOrCoordinator() { + if (msg.sender != owner() && msg.sender != address(s_vrfCoordinator)) { + revert OnlyOwnerOrCoordinator(msg.sender, owner(), address(s_vrfCoordinator)); + } + _; + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Upgradeable.sol b/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Upgradeable.sol new file mode 100644 index 00000000..479cd91e --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFConsumerBaseV2Upgradeable.sol @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. It ensures 2 things: + * @dev 1. The fulfillment came from the VRFCoordinator + * @dev 2. The consumer contract implements fulfillRandomWords. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerBase, and can + * @dev initialize VRFConsumerBase's attributes in their respective initializer as + * @dev shown: + * + * @dev import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; + * @dev contract VRFConsumer is Initializable, VRFConsumerBaseV2Upgradeable { + * @dev initialize(, address _vrfCoordinator) public initializer { + * @dev __VRFConsumerBaseV2_init(_vrfCoordinator); + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash). Create subscription, fund it + * @dev and your consumer contract as a consumer of it (see VRFCoordinatorInterface + * @dev subscription management functions). + * @dev Call requestRandomWords(keyHash, subId, minimumRequestConfirmations, + * @dev callbackGasLimit, numWords), + * @dev see (VRFCoordinatorInterface for a description of the arguments). + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomWords method. + * + * @dev The randomness argument to fulfillRandomWords is a set of random words + * @dev generated from your requestId and the blockHash of the request. + * + * @dev If your contract could have concurrent requests open, you can use the + * @dev requestId returned from requestRandomWords to track which response is associated + * @dev with which randomness request. + * @dev See "SECURITY CONSIDERATIONS" for principles to keep in mind, + * @dev if your contract could have multiple requests in flight simultaneously. + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev A method with the ability to call your fulfillRandomness method directly + * @dev could spoof a VRF response with any random value, so it's critical that + * @dev it cannot be directly called by anything other than this base contract + * @dev (specifically, by the VRFConsumerBase.rawFulfillRandomness method). + * + * @dev For your users to trust that your contract's random behavior is free + * @dev from malicious interference, it's best if you can write it so that all + * @dev behaviors implied by a VRF response are executed *during* your + * @dev fulfillRandomness method. If your contract must store the response (or + * @dev anything derived from it) and use it later, you must ensure that any + * @dev user-significant behavior which depends on that stored value cannot be + * @dev manipulated by a subsequent VRF request. + * + * @dev Similarly, both miners and the VRF oracle itself have some influence + * @dev over the order in which VRF responses appear on the blockchain, so if + * @dev your contract could have multiple VRF requests in flight simultaneously, + * @dev you must ensure that the order in which the VRF responses arrive cannot + * @dev be used to manipulate your contract's user-significant behavior. + * + * @dev Since the block hash of the block which contains the requestRandomness + * @dev call is mixed into the input to the VRF *last*, a sufficiently powerful + * @dev miner could, in principle, fork the blockchain to evict the block + * @dev containing the request, forcing the request to be included in a + * @dev different block with a different hash, and therefore a different input + * @dev to the VRF. However, such an attack would incur a substantial economic + * @dev cost. This cost scales with the number of blocks the VRF oracle waits + * @dev until it calls responds to a request. It is for this reason that + * @dev that you can signal to an oracle you'd like them to wait longer before + * @dev responding to the request (however this is not enforced in the contract + * @dev and so remains effective only in the case of unmodified oracle software). + */ + +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; + +/** + * @dev The VRFConsumerBaseV2Upgradable is an upgradable variant of VRFConsumerBaseV2 + * @dev (see https://docs.openzeppelin.com/upgrades-plugins/1.x/writing-upgradeable). + * @dev It's semantics are identical to VRFConsumerBaseV2 and can be inherited from + * @dev to create an upgradeable VRF consumer contract. + */ +abstract contract VRFConsumerBaseV2Upgradeable is Initializable { + error OnlyCoordinatorCanFulfill(address have, address want); + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + address private vrfCoordinator; + + // See https://github.com/OpenZeppelin/openzeppelin-sdk/issues/37. + // Each uint256 covers a single storage slot, see https://docs.soliditylang.org/en/latest/internals/layout_in_storage.html. + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + uint256[49] private __gap; + + /** + * @param _vrfCoordinator the VRFCoordinatorV2 address. + * @dev See https://docs.chain.link/docs/vrf/v2/supported-networks/ for coordinator + * @dev addresses on your preferred network. + */ + // solhint-disable-next-line func-name-mixedcase + function __VRFConsumerBaseV2_init(address _vrfCoordinator) internal onlyInitializing { + if (_vrfCoordinator == address(0)) { + // solhint-disable-next-line custom-errors + revert("must give valid coordinator address"); + } + + vrfCoordinator = _vrfCoordinator; + } + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. See "SECURITY CONSIDERATIONS" above for important + * @notice principles to keep in mind when implementing your fulfillRandomness + * @notice method. + * + * @dev VRFConsumerBaseV2 expects its subcontracts to have a method with this + * @dev signature, and will call it once it has verified the proof + * @dev associated with the randomness. (It is triggered via a call to + * @dev rawFulfillRandomness, below.) + * + * @param requestId The Id initially returned by requestRandomness + * @param randomWords the VRF output expanded to the requested number of words + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal virtual; + + // rawFulfillRandomness is called by VRFCoordinator when it receives a valid VRF + // proof. rawFulfillRandomness then calls fulfillRandomness, after validating + // the origin of the call + function rawFulfillRandomWords(uint256 requestId, uint256[] memory randomWords) external { + if (msg.sender != vrfCoordinator) { + revert OnlyCoordinatorCanFulfill(msg.sender, vrfCoordinator); + } + fulfillRandomWords(requestId, randomWords); + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol b/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol new file mode 100644 index 00000000..f02e8504 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {BlockhashStoreInterface} from "../interfaces/BlockhashStoreInterface.sol"; +import {VRF} from "../../vrf/VRF.sol"; +import {VRFConsumerBaseV2Plus, IVRFMigratableConsumerV2Plus} from "./VRFConsumerBaseV2Plus.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; +import {SubscriptionAPI} from "./SubscriptionAPI.sol"; +import {VRFV2PlusClient} from "./libraries/VRFV2PlusClient.sol"; +import {IVRFCoordinatorV2PlusMigration} from "./interfaces/IVRFCoordinatorV2PlusMigration.sol"; +// solhint-disable-next-line no-unused-import +import {IVRFCoordinatorV2Plus, IVRFSubscriptionV2Plus} from "./interfaces/IVRFCoordinatorV2Plus.sol"; + +// solhint-disable-next-line contract-name-camelcase +contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { + /// @dev should always be available + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + BlockhashStoreInterface public immutable BLOCKHASH_STORE; + + // Set this maximum to 200 to give us a 56 block window to fulfill + // the request before requiring the block hash feeder. + uint16 public constant MAX_REQUEST_CONFIRMATIONS = 200; + uint32 public constant MAX_NUM_WORDS = 500; + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + error InvalidRequestConfirmations(uint16 have, uint16 min, uint16 max); + error GasLimitTooBig(uint32 have, uint32 want); + error NumWordsTooBig(uint32 have, uint32 want); + error ProvingKeyAlreadyRegistered(bytes32 keyHash); + error NoSuchProvingKey(bytes32 keyHash); + error InvalidLinkWeiPrice(int256 linkWei); + error LinkDiscountTooHigh(uint32 flatFeeLinkDiscountPPM, uint32 flatFeeNativePPM); + error InsufficientGasForConsumer(uint256 have, uint256 want); + error NoCorrespondingRequest(); + error IncorrectCommitment(); + error BlockhashNotInStore(uint256 blockNum); + error PaymentTooLarge(); + error InvalidExtraArgsTag(); + error GasPriceExceeded(uint256 gasPrice, uint256 maxGas); + struct RequestCommitment { + uint64 blockNum; + uint256 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + bytes extraArgs; + } + + struct ProvingKey { + bool exists; // proving key exists + uint64 maxGas; // gas lane max gas price for fulfilling requests + } + + mapping(bytes32 => ProvingKey) /* keyHash */ /* provingKey */ public s_provingKeys; + bytes32[] public s_provingKeyHashes; + mapping(uint256 => bytes32) /* requestID */ /* commitment */ public s_requestCommitments; + event ProvingKeyRegistered(bytes32 keyHash, uint64 maxGas); + event ProvingKeyDeregistered(bytes32 keyHash, uint64 maxGas); + + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + + event RandomWordsFulfilled( + uint256 indexed requestId, + uint256 outputSeed, + uint256 indexed subId, + uint96 payment, + bool success, + bool onlyPremium + ); + + int256 public s_fallbackWeiPerUnitLink; + + event ConfigSet( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + uint32 fulfillmentFlatFeeNativePPM, + uint32 fulfillmentFlatFeeLinkDiscountPPM, + uint8 nativePremiumPercentage, + uint8 linkPremiumPercentage + ); + + constructor(address blockhashStore) SubscriptionAPI() { + BLOCKHASH_STORE = BlockhashStoreInterface(blockhashStore); + } + + /** + * @notice Registers a proving key to. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function registerProvingKey(uint256[2] calldata publicProvingKey, uint64 maxGas) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + if (s_provingKeys[kh].exists) { + revert ProvingKeyAlreadyRegistered(kh); + } + s_provingKeys[kh] = ProvingKey({exists: true, maxGas: maxGas}); + s_provingKeyHashes.push(kh); + emit ProvingKeyRegistered(kh, maxGas); + } + + /** + * @notice Deregisters a proving key. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function deregisterProvingKey(uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + ProvingKey memory key = s_provingKeys[kh]; + if (!key.exists) { + revert NoSuchProvingKey(kh); + } + delete s_provingKeys[kh]; + uint256 s_provingKeyHashesLength = s_provingKeyHashes.length; + for (uint256 i = 0; i < s_provingKeyHashesLength; ++i) { + if (s_provingKeyHashes[i] == kh) { + // Copy last element and overwrite kh to be deleted with it + s_provingKeyHashes[i] = s_provingKeyHashes[s_provingKeyHashesLength - 1]; + s_provingKeyHashes.pop(); + break; + } + } + emit ProvingKeyDeregistered(kh, key.maxGas); + } + + /** + * @notice Returns the proving key hash key associated with this public key + * @param publicKey the key to return the hash of + */ + function hashOfKey(uint256[2] memory publicKey) public pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the native/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback native/link price in the case of a stale feed + * @param fulfillmentFlatFeeNativePPM flat fee in native for native payment + * @param fulfillmentFlatFeeLinkDiscountPPM flat fee discount for link payment in native + * @param nativePremiumPercentage native premium percentage + * @param linkPremiumPercentage link premium percentage + */ + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + uint32 fulfillmentFlatFeeNativePPM, + uint32 fulfillmentFlatFeeLinkDiscountPPM, + uint8 nativePremiumPercentage, + uint8 linkPremiumPercentage + ) external onlyOwner { + if (minimumRequestConfirmations > MAX_REQUEST_CONFIRMATIONS) { + revert InvalidRequestConfirmations( + minimumRequestConfirmations, + minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + if (fallbackWeiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(fallbackWeiPerUnitLink); + } + if (fulfillmentFlatFeeNativePPM > 0 && fulfillmentFlatFeeLinkDiscountPPM >= fulfillmentFlatFeeNativePPM) { + revert LinkDiscountTooHigh(fulfillmentFlatFeeLinkDiscountPPM, fulfillmentFlatFeeNativePPM); + } + s_config = Config({ + minimumRequestConfirmations: minimumRequestConfirmations, + maxGasLimit: maxGasLimit, + stalenessSeconds: stalenessSeconds, + gasAfterPaymentCalculation: gasAfterPaymentCalculation, + reentrancyLock: false, + fulfillmentFlatFeeNativePPM: fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM: fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage: nativePremiumPercentage, + linkPremiumPercentage: linkPremiumPercentage + }); + s_fallbackWeiPerUnitLink = fallbackWeiPerUnitLink; + emit ConfigSet( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage, + linkPremiumPercentage + ); + } + + /// @dev Convert the extra args bytes into a struct + /// @param extraArgs The extra args bytes + /// @return The extra args struct + function _fromBytes(bytes calldata extraArgs) internal pure returns (VRFV2PlusClient.ExtraArgsV1 memory) { + if (extraArgs.length == 0) { + return VRFV2PlusClient.ExtraArgsV1({nativePayment: false}); + } + if (bytes4(extraArgs) != VRFV2PlusClient.EXTRA_ARGS_V1_TAG) revert InvalidExtraArgsTag(); + return abi.decode(extraArgs[4:], (VRFV2PlusClient.ExtraArgsV1)); + } + + /** + * @notice Request a set of random words. + * @param req - a struct containing following fiels for randomness request: + * keyHash - Corresponds to a particular oracle job which uses + * that key for generating the VRF proof. Different keyHash's have different gas price + * ceilings, so you can select a specific one to bound your maximum per request cost. + * subId - The ID of the VRF subscription. Must be funded + * with the minimum subscription balance required for the selected keyHash. + * requestConfirmations - How many blocks you'd like the + * oracle to wait before responding to the request. See SECURITY CONSIDERATIONS + * for why you may want to request more. The acceptable range is + * [minimumRequestBlockConfirmations, 200]. + * callbackGasLimit - How much gas you'd like to receive in your + * fulfillRandomWords callback. Note that gasleft() inside fulfillRandomWords + * may be slightly less than this amount because of gas used calling the function + * (argument decoding etc.), so you may need to request slightly more than you expect + * to have inside fulfillRandomWords. The acceptable range is + * [0, maxGasLimit] + * numWords - The number of uint256 random values you'd like to receive + * in your fulfillRandomWords callback. Note these numbers are expanded in a + * secure way by the VRFCoordinator from a single random value supplied by the oracle. + * extraArgs - Encoded extra arguments that has a boolean flag for whether payment + * should be made in native or PLI. Payment in PLI is only available if the PLI token is available to this contract. + * @return requestId - A unique identifier of the request. Can be used to match + * a request to a response in fulfillRandomWords. + */ + function requestRandomWords( + VRFV2PlusClient.RandomWordsRequest calldata req + ) external override nonReentrant returns (uint256 requestId) { + // Input validation using the subscription storage. + uint256 subId = req.subId; + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // Its important to ensure that the consumer is in fact who they say they + // are, otherwise they could use someone else's subscription balance. + // A nonce of 0 indicates consumer is not allocated to the sub. + mapping(uint256 => uint64) storage nonces = s_consumers[msg.sender]; + uint64 nonce = nonces[subId]; + if (nonce == 0) { + revert InvalidConsumer(subId, msg.sender); + } + // Input validation using the config storage word. + if ( + req.requestConfirmations < s_config.minimumRequestConfirmations || + req.requestConfirmations > MAX_REQUEST_CONFIRMATIONS + ) { + revert InvalidRequestConfirmations( + req.requestConfirmations, + s_config.minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + // No lower bound on the requested gas limit. A user could request 0 + // and they would simply be billed for the proof verification and wouldn't be + // able to do anything with the random value. + if (req.callbackGasLimit > s_config.maxGasLimit) { + revert GasLimitTooBig(req.callbackGasLimit, s_config.maxGasLimit); + } + if (req.numWords > MAX_NUM_WORDS) { + revert NumWordsTooBig(req.numWords, MAX_NUM_WORDS); + } + + // Note we do not check whether the keyHash is valid to save gas. + // The consequence for users is that they can send requests + // for invalid keyHashes which will simply not be fulfilled. + ++nonce; + uint256 preSeed; + (requestId, preSeed) = _computeRequestId(req.keyHash, msg.sender, subId, nonce); + + bytes memory extraArgsBytes = VRFV2PlusClient._argsToBytes(_fromBytes(req.extraArgs)); + s_requestCommitments[requestId] = keccak256( + abi.encode( + requestId, + ChainSpecificUtil._getBlockNumber(), + subId, + req.callbackGasLimit, + req.numWords, + msg.sender, + extraArgsBytes + ) + ); + emit RandomWordsRequested( + req.keyHash, + requestId, + preSeed, + subId, + req.requestConfirmations, + req.callbackGasLimit, + req.numWords, + extraArgsBytes, + msg.sender + ); + nonces[subId] = nonce; + + return requestId; + } + + function _computeRequestId( + bytes32 keyHash, + address sender, + uint256 subId, + uint64 nonce + ) internal pure returns (uint256, uint256) { + uint256 preSeed = uint256(keccak256(abi.encode(keyHash, sender, subId, nonce))); + return (uint256(keccak256(abi.encode(keyHash, preSeed))), preSeed); + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + struct Output { + ProvingKey provingKey; + uint256 requestId; + uint256 randomness; + } + + function _getRandomnessFromProof( + Proof memory proof, + RequestCommitment memory rc + ) internal view returns (Output memory) { + bytes32 keyHash = hashOfKey(proof.pk); + ProvingKey memory key = s_provingKeys[keyHash]; + // Only registered proving keys are permitted. + if (!key.exists) { + revert NoSuchProvingKey(keyHash); + } + uint256 requestId = uint256(keccak256(abi.encode(keyHash, proof.seed))); + bytes32 commitment = s_requestCommitments[requestId]; + if (commitment == 0) { + revert NoCorrespondingRequest(); + } + if ( + commitment != + keccak256(abi.encode(requestId, rc.blockNum, rc.subId, rc.callbackGasLimit, rc.numWords, rc.sender, rc.extraArgs)) + ) { + revert IncorrectCommitment(); + } + + bytes32 blockHash = ChainSpecificUtil._getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + blockHash = BLOCKHASH_STORE.getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + revert BlockhashNotInStore(rc.blockNum); + } + } + + // The seed actually used by the VRF machinery, mixing in the blockhash + uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); + uint256 randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + return Output(key, requestId, randomness); + } + + function _getValidatedGasPrice(bool onlyPremium, uint64 gasLaneMaxGas) internal view returns (uint256 gasPrice) { + if (tx.gasprice > gasLaneMaxGas) { + if (onlyPremium) { + // if only the premium amount needs to be billed, then the premium is capped by the gas lane max + return uint256(gasLaneMaxGas); + } else { + // Ensure gas price does not exceed the gas lane max gas price + revert GasPriceExceeded(tx.gasprice, gasLaneMaxGas); + } + } + return tx.gasprice; + } + + function _deliverRandomness( + uint256 requestId, + RequestCommitment memory rc, + uint256[] memory randomWords + ) internal returns (bool success) { + VRFConsumerBaseV2Plus v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomWords.selector, requestId, randomWords); + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid oracle payment. + // Do not allow any non-view/non-pure coordinator functions to be called + // during the consumers callback code via reentrancyLock. + // Note that _callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + s_config.reentrancyLock = true; + success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + s_config.reentrancyLock = false; + return success; + } + + /* + * @notice Fulfill a randomness request. + * @param proof contains the proof and randomness + * @param rc request commitment pre-image, committed to at request time + * @param onlyPremium only charge premium + * @return payment amount billed to the subscription + * @dev simulated offchain to determine if sufficient balance is present to fulfill the request + */ + function fulfillRandomWords( + Proof memory proof, + RequestCommitment memory rc, + bool onlyPremium + ) external nonReentrant returns (uint96 payment) { + uint256 startGas = gasleft(); + Output memory output = _getRandomnessFromProof(proof, rc); + uint256 gasPrice = _getValidatedGasPrice(onlyPremium, output.provingKey.maxGas); + + uint256[] memory randomWords; + uint256 randomness = output.randomness; + // stack too deep error + { + uint256 numWords = rc.numWords; + randomWords = new uint256[](numWords); + for (uint256 i = 0; i < numWords; ++i) { + randomWords[i] = uint256(keccak256(abi.encode(randomness, i))); + } + } + + uint256 requestId = output.requestId; + delete s_requestCommitments[requestId]; + bool success = _deliverRandomness(requestId, rc, randomWords); + + // Increment the req count for the subscription. + uint256 subId = rc.subId; + ++s_subscriptions[subId].reqCount; + + // stack too deep error + { + bool nativePayment = uint8(rc.extraArgs[rc.extraArgs.length - 1]) == 1; + + // We want to charge users exactly for how much gas they use in their callback. + // The gasAfterPaymentCalculation is meant to cover these additional operations where we + // decrement the subscription balance and increment the oracles withdrawable balance. + payment = _calculatePaymentAmount(startGas, gasPrice, nativePayment, onlyPremium); + + _chargePayment(payment, nativePayment, subId); + } + + // Include payment in the event for tracking costs. + emit RandomWordsFulfilled(requestId, randomness, subId, payment, success, onlyPremium); + + return payment; + } + + function _chargePayment(uint96 payment, bool nativePayment, uint256 subId) internal { + Subscription storage subcription = s_subscriptions[subId]; + if (nativePayment) { + uint96 prevBal = subcription.nativeBalance; + if (prevBal < payment) { + revert InsufficientBalance(); + } + subcription.nativeBalance = prevBal - payment; + s_withdrawableNative += payment; + } else { + uint96 prevBal = subcription.balance; + if (prevBal < payment) { + revert InsufficientBalance(); + } + subcription.balance = prevBal - payment; + s_withdrawableTokens += payment; + } + } + + function _calculatePaymentAmount( + uint256 startGas, + uint256 weiPerUnitGas, + bool nativePayment, + bool onlyPremium + ) internal returns (uint96) { + if (nativePayment) { + return _calculatePaymentAmountNative(startGas, weiPerUnitGas, onlyPremium); + } + return _calculatePaymentAmountLink(startGas, weiPerUnitGas, onlyPremium); + } + + function _calculatePaymentAmountNative( + uint256 startGas, + uint256 weiPerUnitGas, + bool onlyPremium + ) internal returns (uint96) { + // Will return non-zero on chains that have this enabled + uint256 l1CostWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + // calculate the payment without the premium + uint256 baseFeeWei = weiPerUnitGas * (s_config.gasAfterPaymentCalculation + startGas - gasleft()); + // calculate flat fee in native + uint256 flatFeeWei = 1e12 * uint256(s_config.fulfillmentFlatFeeNativePPM); + if (onlyPremium) { + return uint96((((l1CostWei + baseFeeWei) * (s_config.nativePremiumPercentage)) / 100) + flatFeeWei); + } else { + return uint96((((l1CostWei + baseFeeWei) * (100 + s_config.nativePremiumPercentage)) / 100) + flatFeeWei); + } + } + + // Get the amount of gas used for fulfillment + function _calculatePaymentAmountLink( + uint256 startGas, + uint256 weiPerUnitGas, + bool onlyPremium + ) internal returns (uint96) { + int256 weiPerUnitLink; + weiPerUnitLink = _getFeedData(); + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + // Will return non-zero on chains that have this enabled + uint256 l1CostWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + // (1e18 juels/link) ((wei/gas * gas) + l1wei) / (wei/link) = juels + uint256 paymentNoFee = (1e18 * + (weiPerUnitGas * (s_config.gasAfterPaymentCalculation + startGas - gasleft()) + l1CostWei)) / + uint256(weiPerUnitLink); + // calculate the flat fee in wei + uint256 flatFeeWei = 1e12 * + uint256(s_config.fulfillmentFlatFeeNativePPM - s_config.fulfillmentFlatFeeLinkDiscountPPM); + uint256 flatFeeJuels = (1e18 * flatFeeWei) / uint256(weiPerUnitLink); + uint256 payment; + if (onlyPremium) { + payment = ((paymentNoFee * (s_config.linkPremiumPercentage)) / 100 + flatFeeJuels); + } else { + payment = ((paymentNoFee * (100 + s_config.linkPremiumPercentage)) / 100 + flatFeeJuels); + } + if (payment > 1e27) { + revert PaymentTooLarge(); // Payment + fee cannot be more than all of the link in existence. + } + return uint96(payment); + } + + function _getFeedData() private view returns (int256 weiPerUnitLink) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + uint256 timestamp; + (, weiPerUnitLink, , timestamp, ) = PLI_NATIVE_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (stalenessSeconds > 0 && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function pendingRequestExists(uint256 subId) public view override returns (bool) { + address[] storage consumers = s_subscriptionConfigs[subId].consumers; + uint256 consumersLength = consumers.length; + if (consumersLength == 0) { + return false; + } + uint256 provingKeyHashesLength = s_provingKeyHashes.length; + for (uint256 i = 0; i < consumersLength; ++i) { + address consumer = consumers[i]; + for (uint256 j = 0; j < provingKeyHashesLength; ++j) { + (uint256 reqId, ) = _computeRequestId(s_provingKeyHashes[j], consumer, subId, s_consumers[consumer][subId]); + if (s_requestCommitments[reqId] != 0) { + return true; + } + } + } + return false; + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function removeConsumer(uint256 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + if (s_consumers[consumer][subId] == 0) { + revert InvalidConsumer(subId, consumer); + } + // Note bounded by MAX_CONSUMERS + address[] memory consumers = s_subscriptionConfigs[subId].consumers; + uint256 lastConsumerIndex = consumers.length - 1; + for (uint256 i = 0; i < consumers.length; ++i) { + if (consumers[i] == consumer) { + address last = consumers[lastConsumerIndex]; + // Storage write to preserve last element + s_subscriptionConfigs[subId].consumers[i] = last; + // Storage remove last element + s_subscriptionConfigs[subId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subId]; + emit SubscriptionConsumerRemoved(subId, consumer); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function cancelSubscription(uint256 subId, address to) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + _cancelSubscriptionHelper(subId, to); + } + + /*************************************************************************** + * Section: Migration + ***************************************************************************/ + + address[] internal s_migrationTargets; + + /// @dev Emitted when new coordinator is registered as migratable target + event CoordinatorRegistered(address coordinatorAddress); + + /// @dev Emitted when new coordinator is deregistered + event CoordinatorDeregistered(address coordinatorAddress); + + /// @notice emitted when migration to new coordinator completes successfully + /// @param newCoordinator coordinator address after migration + /// @param subId subscription ID + event MigrationCompleted(address newCoordinator, uint256 subId); + + /// @notice emitted when migrate() is called and given coordinator is not registered as migratable target + error CoordinatorNotRegistered(address coordinatorAddress); + + /// @notice emitted when migrate() is called and given coordinator is registered as migratable target + error CoordinatorAlreadyRegistered(address coordinatorAddress); + + /// @dev encapsulates data to be migrated from current coordinator + struct V1MigrationData { + uint8 fromVersion; + uint256 subId; + address subOwner; + address[] consumers; + uint96 linkBalance; + uint96 nativeBalance; + } + + function _isTargetRegistered(address target) internal view returns (bool) { + uint256 migrationTargetsLength = s_migrationTargets.length; + for (uint256 i = 0; i < migrationTargetsLength; ++i) { + if (s_migrationTargets[i] == target) { + return true; + } + } + return false; + } + + function registerMigratableCoordinator(address target) external onlyOwner { + if (_isTargetRegistered(target)) { + revert CoordinatorAlreadyRegistered(target); + } + s_migrationTargets.push(target); + emit CoordinatorRegistered(target); + } + + function deregisterMigratableCoordinator(address target) external onlyOwner { + uint256 nTargets = s_migrationTargets.length; + for (uint256 i = 0; i < nTargets; ++i) { + if (s_migrationTargets[i] == target) { + s_migrationTargets[i] = s_migrationTargets[nTargets - 1]; + s_migrationTargets.pop(); + emit CoordinatorDeregistered(target); + return; + } + } + revert CoordinatorNotRegistered(target); + } + + function migrate(uint256 subId, address newCoordinator) external nonReentrant { + if (!_isTargetRegistered(newCoordinator)) { + revert CoordinatorNotRegistered(newCoordinator); + } + (uint96 balance, uint96 nativeBalance, , address owner, address[] memory consumers) = getSubscription(subId); + // solhint-disable-next-line custom-errors + require(owner == msg.sender, "Not subscription owner"); + // solhint-disable-next-line custom-errors + require(!pendingRequestExists(subId), "Pending request exists"); + + V1MigrationData memory migrationData = V1MigrationData({ + fromVersion: 1, + subId: subId, + subOwner: owner, + consumers: consumers, + linkBalance: balance, + nativeBalance: nativeBalance + }); + bytes memory encodedData = abi.encode(migrationData); + _deleteSubscription(subId); + IVRFCoordinatorV2PlusMigration(newCoordinator).onMigration{value: nativeBalance}(encodedData); + + // Only transfer PLI if the token is active and there is a balance. + if (address(PLI) != address(0) && balance != 0) { + // solhint-disable-next-line custom-errors + require(PLI.transfer(address(newCoordinator), balance), "insufficient funds"); + } + + // despite the fact that we follow best practices this is still probably safest + // to prevent any re-entrancy possibilities. + s_config.reentrancyLock = true; + for (uint256 i = 0; i < consumers.length; ++i) { + IVRFMigratableConsumerV2Plus(consumers[i]).setCoordinator(newCoordinator); + } + s_config.reentrancyLock = false; + + emit MigrationCompleted(newCoordinator, subId); + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFSubscriptionBalanceMonitor.sol b/contracts/src/v0.8/vrf/dev/VRFSubscriptionBalanceMonitor.sol new file mode 100644 index 00000000..5562120f --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFSubscriptionBalanceMonitor.sol @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {AutomationCompatibleInterface as KeeperCompatibleInterface} from "../../automation/interfaces/AutomationCompatibleInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {Pausable} from "@openzeppelin/contracts/security/Pausable.sol"; + +/** + * @title The VRFSubscriptionBalanceMonitor contract. + * @notice A keeper-compatible contract that monitors and funds VRF subscriptions. + */ +contract VRFSubscriptionBalanceMonitor is ConfirmedOwner, Pausable, KeeperCompatibleInterface { + VRFCoordinatorV2Interface public COORDINATOR; + LinkTokenInterface public PLITOKEN; + + uint256 private constant MIN_GAS_FOR_TRANSFER = 55_000; + + event FundsAdded(uint256 amountAdded, uint256 newBalance, address sender); + event FundsWithdrawn(uint256 amountWithdrawn, address payee); + event TopUpSucceeded(uint64 indexed subscriptionId); + event TopUpFailed(uint64 indexed subscriptionId); + event KeeperRegistryAddressUpdated(address oldAddress, address newAddress); + event VRFCoordinatorV2AddressUpdated(address oldAddress, address newAddress); + event LinkTokenAddressUpdated(address oldAddress, address newAddress); + event MinWaitPeriodUpdated(uint256 oldMinWaitPeriod, uint256 newMinWaitPeriod); + event OutOfGas(uint256 lastId); + + error InvalidWatchList(); + error OnlyKeeperRegistry(); + error DuplicateSubcriptionId(uint64 duplicate); + + struct Target { + bool isActive; + uint96 minBalanceJuels; + uint96 topUpAmountJuels; + uint56 lastTopUpTimestamp; + } + + address public s_keeperRegistryAddress; // the address of the keeper registry + uint256 public s_minWaitPeriodSeconds; // minimum time to wait between top-ups + uint64[] public s_watchList; // the watchlist on which subscriptions are stored + mapping(uint64 => Target) internal s_targets; + + /** + * @param linkTokenAddress the Link token address + * @param coordinatorAddress the address of the vrf coordinator contract + * @param keeperRegistryAddress the address of the keeper registry contract + * @param minWaitPeriodSeconds the minimum wait period for addresses between funding + */ + constructor( + address linkTokenAddress, + address coordinatorAddress, + address keeperRegistryAddress, + uint256 minWaitPeriodSeconds + ) ConfirmedOwner(msg.sender) { + setLinkTokenAddress(linkTokenAddress); + setVRFCoordinatorV2Address(coordinatorAddress); + setKeeperRegistryAddress(keeperRegistryAddress); + setMinWaitPeriodSeconds(minWaitPeriodSeconds); + } + + /** + * @notice Sets the list of subscriptions to watch and their funding parameters. + * @param subscriptionIds the list of subscription ids to watch + * @param minBalancesJuels the minimum balances for each subscription + * @param topUpAmountsJuels the amount to top up each subscription + */ + function setWatchList( + uint64[] calldata subscriptionIds, + uint96[] calldata minBalancesJuels, + uint96[] calldata topUpAmountsJuels + ) external onlyOwner { + if (subscriptionIds.length != minBalancesJuels.length || subscriptionIds.length != topUpAmountsJuels.length) { + revert InvalidWatchList(); + } + uint64[] memory oldWatchList = s_watchList; + for (uint256 idx = 0; idx < oldWatchList.length; idx++) { + s_targets[oldWatchList[idx]].isActive = false; + } + for (uint256 idx = 0; idx < subscriptionIds.length; idx++) { + if (s_targets[subscriptionIds[idx]].isActive) { + revert DuplicateSubcriptionId(subscriptionIds[idx]); + } + if (subscriptionIds[idx] == 0) { + revert InvalidWatchList(); + } + if (topUpAmountsJuels[idx] <= minBalancesJuels[idx]) { + revert InvalidWatchList(); + } + s_targets[subscriptionIds[idx]] = Target({ + isActive: true, + minBalanceJuels: minBalancesJuels[idx], + topUpAmountJuels: topUpAmountsJuels[idx], + lastTopUpTimestamp: 0 + }); + } + s_watchList = subscriptionIds; + } + + /** + * @notice Gets a list of subscriptions that are underfunded. + * @return list of subscriptions that are underfunded + */ + function getUnderfundedSubscriptions() public view returns (uint64[] memory) { + uint64[] memory watchList = s_watchList; + uint64[] memory needsFunding = new uint64[](watchList.length); + uint256 count = 0; + uint256 minWaitPeriod = s_minWaitPeriodSeconds; + uint256 contractBalance = PLITOKEN.balanceOf(address(this)); + Target memory target; + for (uint256 idx = 0; idx < watchList.length; idx++) { + target = s_targets[watchList[idx]]; + (uint96 subscriptionBalance, , , ) = COORDINATOR.getSubscription(watchList[idx]); + if ( + target.lastTopUpTimestamp + minWaitPeriod <= block.timestamp && + contractBalance >= target.topUpAmountJuels && + subscriptionBalance < target.minBalanceJuels + ) { + needsFunding[count] = watchList[idx]; + count++; + contractBalance -= target.topUpAmountJuels; + } + } + if (count < watchList.length) { + assembly { + mstore(needsFunding, count) + } + } + return needsFunding; + } + + /** + * @notice Send funds to the subscriptions provided. + * @param needsFunding the list of subscriptions to fund + */ + function topUp(uint64[] memory needsFunding) public whenNotPaused { + uint256 minWaitPeriodSeconds = s_minWaitPeriodSeconds; + uint256 contractBalance = PLITOKEN.balanceOf(address(this)); + Target memory target; + for (uint256 idx = 0; idx < needsFunding.length; idx++) { + target = s_targets[needsFunding[idx]]; + (uint96 subscriptionBalance, , , ) = COORDINATOR.getSubscription(needsFunding[idx]); + if ( + target.isActive && + target.lastTopUpTimestamp + minWaitPeriodSeconds <= block.timestamp && + subscriptionBalance < target.minBalanceJuels && + contractBalance >= target.topUpAmountJuels + ) { + bool success = PLITOKEN.transferAndCall( + address(COORDINATOR), + target.topUpAmountJuels, + abi.encode(needsFunding[idx]) + ); + if (success) { + s_targets[needsFunding[idx]].lastTopUpTimestamp = uint56(block.timestamp); + contractBalance -= target.topUpAmountJuels; + emit TopUpSucceeded(needsFunding[idx]); + } else { + emit TopUpFailed(needsFunding[idx]); + } + } + if (gasleft() < MIN_GAS_FOR_TRANSFER) { + emit OutOfGas(idx); + return; + } + } + } + + /** + * @notice Gets list of subscription ids that are underfunded and returns a keeper-compatible payload. + * @return upkeepNeeded signals if upkeep is needed, performData is an abi encoded list of subscription ids that need funds + */ + function checkUpkeep( + bytes calldata + ) external view override whenNotPaused returns (bool upkeepNeeded, bytes memory performData) { + uint64[] memory needsFunding = getUnderfundedSubscriptions(); + upkeepNeeded = needsFunding.length > 0; + performData = abi.encode(needsFunding); + return (upkeepNeeded, performData); + } + + /** + * @notice Called by the keeper to send funds to underfunded addresses. + * @param performData the abi encoded list of addresses to fund + */ + function performUpkeep(bytes calldata performData) external override onlyKeeperRegistry whenNotPaused { + uint64[] memory needsFunding = abi.decode(performData, (uint64[])); + topUp(needsFunding); + } + + /** + * @notice Withdraws the contract balance in PLI. + * @param amount the amount of PLI (in juels) to withdraw + * @param payee the address to pay + */ + function withdraw(uint256 amount, address payable payee) external onlyOwner { + // solhint-disable-next-line custom-errors, reason-string + require(payee != address(0)); + emit FundsWithdrawn(amount, payee); + PLITOKEN.transfer(payee, amount); + } + + /** + * @notice Sets the PLI token address. + */ + function setLinkTokenAddress(address linkTokenAddress) public onlyOwner { + // solhint-disable-next-line custom-errors, reason-string + require(linkTokenAddress != address(0)); + emit LinkTokenAddressUpdated(address(PLITOKEN), linkTokenAddress); + PLITOKEN = LinkTokenInterface(linkTokenAddress); + } + + /** + * @notice Sets the VRF coordinator address. + */ + function setVRFCoordinatorV2Address(address coordinatorAddress) public onlyOwner { + // solhint-disable-next-line custom-errors, reason-string + require(coordinatorAddress != address(0)); + emit VRFCoordinatorV2AddressUpdated(address(COORDINATOR), coordinatorAddress); + COORDINATOR = VRFCoordinatorV2Interface(coordinatorAddress); + } + + /** + * @notice Sets the keeper registry address. + */ + function setKeeperRegistryAddress(address keeperRegistryAddress) public onlyOwner { + // solhint-disable-next-line custom-errors, reason-string + require(keeperRegistryAddress != address(0)); + emit KeeperRegistryAddressUpdated(s_keeperRegistryAddress, keeperRegistryAddress); + s_keeperRegistryAddress = keeperRegistryAddress; + } + + /** + * @notice Sets the minimum wait period (in seconds) for subscription ids between funding. + */ + function setMinWaitPeriodSeconds(uint256 period) public onlyOwner { + emit MinWaitPeriodUpdated(s_minWaitPeriodSeconds, period); + s_minWaitPeriodSeconds = period; + } + + /** + * @notice Gets configuration information for a subscription on the watchlist. + */ + function getSubscriptionInfo( + uint64 subscriptionId + ) external view returns (bool isActive, uint96 minBalanceJuels, uint96 topUpAmountJuels, uint56 lastTopUpTimestamp) { + Target memory target = s_targets[subscriptionId]; + return (target.isActive, target.minBalanceJuels, target.topUpAmountJuels, target.lastTopUpTimestamp); + } + + /** + * @notice Gets the list of subscription ids being watched. + */ + function getWatchList() external view returns (uint64[] memory) { + return s_watchList; + } + + /** + * @notice Pause the contract, which prevents executing performUpkeep. + */ + function pause() external onlyOwner { + _pause(); + } + + /** + * @notice Unpause the contract. + */ + function unpause() external onlyOwner { + _unpause(); + } + + modifier onlyKeeperRegistry() { + if (msg.sender != s_keeperRegistryAddress) { + revert OnlyKeeperRegistry(); + } + _; + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol new file mode 100644 index 00000000..ffabbda4 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; +import {IVRFV2PlusMigrate} from "./interfaces/IVRFV2PlusMigrate.sol"; +import {VRFConsumerBaseV2Plus} from "./VRFConsumerBaseV2Plus.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {VRFV2PlusClient} from "./libraries/VRFV2PlusClient.sol"; +import {IVRFV2PlusWrapper} from "./interfaces/IVRFV2PlusWrapper.sol"; +import {VRFV2PlusWrapperConsumerBase} from "./VRFV2PlusWrapperConsumerBase.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; + +/** + * @notice A wrapper for VRFCoordinatorV2 that provides an interface better suited to one-off + * @notice requests for randomness. + */ +// solhint-disable-next-line max-states-count +contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBaseV2Plus, IVRFV2PlusWrapper { + event WrapperFulfillmentFailed(uint256 indexed requestId, address indexed consumer); + + error LinkAlreadySet(); + error FailedToTransferLink(); + error IncorrectExtraArgsLength(uint16 expectedMinimumLength, uint16 actualLength); + error NativePaymentInOnTokenTransfer(); + error PLIPaymentInRequestRandomWordsInNative(); + + /* Storage Slot 1: BEGIN */ + // s_keyHash is the key hash to use when requesting randomness. Fees are paid based on current gas + // fees, so this should be set to the highest gas lane on the network. + bytes32 internal s_keyHash; + /* Storage Slot 1: END */ + + /* Storage Slot 2: BEGIN */ + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + uint256 public immutable SUBSCRIPTION_ID; + /* Storage Slot 2: END */ + + /* Storage Slot 3: BEGIN */ + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + /* Storage Slot 3: END */ + + /* Storage Slot 4: BEGIN */ + // lastRequestId is the request ID of the most recent VRF V2 request made by this wrapper. This + // should only be relied on within the same transaction the request was made. + uint256 public override lastRequestId; + /* Storage Slot 4: END */ + + /* Storage Slot 5: BEGIN */ + // s_fallbackWeiPerUnitLink is the backup PLI exchange rate used when the PLI/NATIVE feed is + // stale. + int256 private s_fallbackWeiPerUnitLink; + /* Storage Slot 5: END */ + + /* Storage Slot 6: BEGIN */ + // s_stalenessSeconds is the number of seconds before we consider the feed price to be stale and + // fallback to fallbackWeiPerUnitLink. + uint32 private s_stalenessSeconds; + + // s_fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2 + // charges. + uint32 private s_fulfillmentFlatFeeLinkPPM; + + // s_fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2 + // charges. + uint32 private s_fulfillmentFlatFeeNativePPM; + + LinkTokenInterface public s_link; + /* Storage Slot 6: END */ + + /* Storage Slot 7: BEGIN */ + // s_wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + // function. The cost for this gas is passed to the user. + uint32 private s_wrapperGasOverhead; + + // Configuration fetched from VRFCoordinatorV2 + + /// @dev this is the size of a VRF v2 fulfillment's calldata abi-encoded in bytes. + /// @dev proofSize = 13 words = 13 * 256 = 3328 bits + /// @dev commitmentSize = 5 words = 5 * 256 = 1280 bits + /// @dev dataSize = proofSize + commitmentSize = 4608 bits + /// @dev selector = 32 bits + /// @dev total data size = 4608 bits + 32 bits = 4640 bits = 580 bytes + uint32 public s_fulfillmentTxSizeBytes = 580; + + // s_coordinatorGasOverhead reflects the gas overhead of the coordinator's fulfillRandomWords + // function. The cost for this gas is billed to the subscription, and must therefor be included + // in the pricing for wrapped requests. This includes the gas costs of proof verification and + // payment calculation in the coordinator. + uint32 private s_coordinatorGasOverhead; + + AggregatorV3Interface public s_linkNativeFeed; + /* Storage Slot 7: END */ + + /* Storage Slot 8: BEGIN */ + // s_configured tracks whether this contract has been configured. If not configured, randomness + // requests cannot be made. + bool public s_configured; + + // s_disabled disables the contract when true. When disabled, new VRF requests cannot be made + // but existing ones can still be fulfilled. + bool public s_disabled; + + // s_wrapperPremiumPercentage is the premium ratio in percentage. For example, a value of 0 + // indicates no premium. A value of 15 indicates a 15 percent premium. + uint8 private s_wrapperPremiumPercentage; + + // s_maxNumWords is the max number of words that can be requested in a single wrapped VRF request. + uint8 internal s_maxNumWords; + + uint16 private constant EXPECTED_MIN_LENGTH = 36; + /* Storage Slot 8: END */ + + struct Callback { + address callbackAddress; + uint32 callbackGasLimit; + // Reducing requestGasPrice from uint256 to uint64 slots Callback struct + // into a single word, thus saving an entire SSTORE and leading to 21K + // gas cost saving. 18 ETH would be the max gas price we can process. + // GasPrice is unlikely to be more than 14 ETH on most chains + uint64 requestGasPrice; + } + /* Storage Slot 9: BEGIN */ + mapping(uint256 => Callback) /* requestID */ /* callback */ public s_callbacks; + + /* Storage Slot 9: END */ + + constructor(address _link, address _linkNativeFeed, address _coordinator) VRFConsumerBaseV2Plus(_coordinator) { + if (_link != address(0)) { + s_link = LinkTokenInterface(_link); + } + if (_linkNativeFeed != address(0)) { + s_linkNativeFeed = AggregatorV3Interface(_linkNativeFeed); + } + + // Create this wrapper's subscription and add itself as a consumer. + uint256 subId = s_vrfCoordinator.createSubscription(); + SUBSCRIPTION_ID = subId; + s_vrfCoordinator.addConsumer(subId, address(this)); + } + + /** + * @notice set the link token to be used by this wrapper + * @param link address of the link token + */ + function setPLI(address link) external onlyOwner { + // Disallow re-setting link token because the logic wouldn't really make sense + if (address(s_link) != address(0)) { + revert LinkAlreadySet(); + } + s_link = LinkTokenInterface(link); + } + + /** + * @notice set the link native feed to be used by this wrapper + * @param linkNativeFeed address of the link native feed + */ + function setLinkNativeFeed(address linkNativeFeed) external onlyOwner { + s_linkNativeFeed = AggregatorV3Interface(linkNativeFeed); + } + + /** + * @notice setFulfillmentTxSize sets the size of the fulfillment transaction in bytes. + * @param size is the size of the fulfillment transaction in bytes. + */ + function setFulfillmentTxSize(uint32 size) external onlyOwner { + s_fulfillmentTxSizeBytes = size; + } + + /** + * @notice setConfig configures VRFV2Wrapper. + * + * @dev Sets wrapper-specific configuration based on the given parameters, and fetches any needed + * @dev VRFCoordinatorV2 configuration from the coordinator. + * + * @param _wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + * function. + * + * @param _coordinatorGasOverhead reflects the gas overhead of the coordinator's + * fulfillRandomWords function. + * + * @param _wrapperPremiumPercentage is the premium ratio in percentage for wrapper requests. + * + * @param _keyHash to use for requesting randomness. + * @param _maxNumWords is the max number of words that can be requested in a single wrapped VRF request + * @param _stalenessSeconds is the number of seconds before we consider the feed price to be stale + * and fallback to fallbackWeiPerUnitLink. + * + * @param _fallbackWeiPerUnitLink is the backup PLI exchange rate used when the PLI/NATIVE feed + * is stale. + * + * @param _fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2Plus + * charges. + * + * @param _fulfillmentFlatFeeNativePPM is the flat fee in millionths of native that VRFCoordinatorV2Plus + * charges. + */ + function setConfig( + uint32 _wrapperGasOverhead, + uint32 _coordinatorGasOverhead, + uint8 _wrapperPremiumPercentage, + bytes32 _keyHash, + uint8 _maxNumWords, + uint32 _stalenessSeconds, + int256 _fallbackWeiPerUnitLink, + uint32 _fulfillmentFlatFeeLinkPPM, + uint32 _fulfillmentFlatFeeNativePPM + ) external onlyOwner { + s_wrapperGasOverhead = _wrapperGasOverhead; + s_coordinatorGasOverhead = _coordinatorGasOverhead; + s_wrapperPremiumPercentage = _wrapperPremiumPercentage; + s_keyHash = _keyHash; + s_maxNumWords = _maxNumWords; + s_configured = true; + + // Get other configuration from coordinator + s_stalenessSeconds = _stalenessSeconds; + s_fallbackWeiPerUnitLink = _fallbackWeiPerUnitLink; + s_fulfillmentFlatFeeLinkPPM = _fulfillmentFlatFeeLinkPPM; + s_fulfillmentFlatFeeNativePPM = _fulfillmentFlatFeeNativePPM; + } + + /** + * @notice getConfig returns the current VRFV2Wrapper configuration. + * + * @return fallbackWeiPerUnitLink is the backup PLI exchange rate used when the PLI/NATIVE feed + * is stale. + * + * @return stalenessSeconds is the number of seconds before we consider the feed price to be stale + * and fallback to fallbackWeiPerUnitLink. + * + * @return fulfillmentFlatFeeLinkPPM is the flat fee in millionths of PLI that VRFCoordinatorV2Plus + * charges. + * + * @return fulfillmentFlatFeeNativePPM is the flat fee in millionths of native that VRFCoordinatorV2Plus + * charges. + * + * @return wrapperGasOverhead reflects the gas overhead of the wrapper's fulfillRandomWords + * function. The cost for this gas is passed to the user. + * + * @return coordinatorGasOverhead reflects the gas overhead of the coordinator's + * fulfillRandomWords function. + * + * @return wrapperPremiumPercentage is the premium ratio in percentage. For example, a value of 0 + * indicates no premium. A value of 15 indicates a 15 percent premium. + * + * @return keyHash is the key hash to use when requesting randomness. Fees are paid based on + * current gas fees, so this should be set to the highest gas lane on the network. + * + * @return maxNumWords is the max number of words that can be requested in a single wrapped VRF + * request. + */ + function getConfig() + external + view + returns ( + int256 fallbackWeiPerUnitLink, + uint32 stalenessSeconds, + uint32 fulfillmentFlatFeeLinkPPM, + uint32 fulfillmentFlatFeeNativePPM, + uint32 wrapperGasOverhead, + uint32 coordinatorGasOverhead, + uint8 wrapperPremiumPercentage, + bytes32 keyHash, + uint8 maxNumWords + ) + { + return ( + s_fallbackWeiPerUnitLink, + s_stalenessSeconds, + s_fulfillmentFlatFeeLinkPPM, + s_fulfillmentFlatFeeNativePPM, + s_wrapperGasOverhead, + s_coordinatorGasOverhead, + s_wrapperPremiumPercentage, + s_keyHash, + s_maxNumWords + ); + } + + /** + * @notice Calculates the price of a VRF request with the given callbackGasLimit at the current + * @notice block. + * + * @dev This function relies on the transaction gas price which is not automatically set during + * @dev simulation. To estimate the price at a specific gas price, use the estimatePrice function. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + */ + function calculateRequestPrice( + uint32 _callbackGasLimit + ) external view override onlyConfiguredNotDisabled returns (uint256) { + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, tx.gasprice, weiPerUnitLink); + } + + function calculateRequestPriceNative( + uint32 _callbackGasLimit + ) external view override onlyConfiguredNotDisabled returns (uint256) { + return _calculateRequestPriceNative(_callbackGasLimit, tx.gasprice); + } + + /** + * @notice Estimates the price of a VRF request with a specific gas limit and gas price. + * + * @dev This is a convenience function that can be called in simulation to better understand + * @dev pricing. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + * @param _requestGasPriceWei is the gas price in wei used for the estimation. + */ + function estimateRequestPrice( + uint32 _callbackGasLimit, + uint256 _requestGasPriceWei + ) external view override onlyConfiguredNotDisabled returns (uint256) { + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); + } + + function estimateRequestPriceNative( + uint32 _callbackGasLimit, + uint256 _requestGasPriceWei + ) external view override onlyConfiguredNotDisabled returns (uint256) { + return _calculateRequestPriceNative(_callbackGasLimit, _requestGasPriceWei); + } + + function _calculateRequestPriceNative(uint256 _gas, uint256 _requestGasPrice) internal view returns (uint256) { + // costWei is the base fee denominated in wei (native) + // costWei takes into account the L1 posting costs of the VRF fulfillment + // transaction, if we are on an L2. + uint256 costWei = (_requestGasPrice * + (_gas + s_wrapperGasOverhead + s_coordinatorGasOverhead) + + ChainSpecificUtil._getL1CalldataGasCost(s_fulfillmentTxSizeBytes)); + // ((wei/gas * (gas)) + l1wei) + // baseFee is the base fee denominated in wei + uint256 baseFee = costWei; + // feeWithPremium is the fee after the percentage premium is applied + uint256 feeWithPremium = (baseFee * (s_wrapperPremiumPercentage + 100)) / 100; + // feeWithFlatFee is the fee after the flat fee is applied on top of the premium + uint256 feeWithFlatFee = feeWithPremium + (1e12 * uint256(s_fulfillmentFlatFeeNativePPM)); + + return feeWithFlatFee; + } + + function _calculateRequestPrice( + uint256 _gas, + uint256 _requestGasPrice, + int256 _weiPerUnitLink + ) internal view returns (uint256) { + // costWei is the base fee denominated in wei (native) + // costWei takes into account the L1 posting costs of the VRF fulfillment + // transaction, if we are on an L2. + uint256 costWei = (_requestGasPrice * + (_gas + s_wrapperGasOverhead + s_coordinatorGasOverhead) + + ChainSpecificUtil._getL1CalldataGasCost(s_fulfillmentTxSizeBytes)); + // (1e18 juels/link) * ((wei/gas * (gas)) + l1wei) / (wei/link) == 1e18 juels * wei/link / (wei/link) == 1e18 juels * wei/link * link/wei == juels + // baseFee is the base fee denominated in juels (link) + uint256 baseFee = (1e18 * costWei) / uint256(_weiPerUnitLink); + // feeWithPremium is the fee after the percentage premium is applied + uint256 feeWithPremium = (baseFee * (s_wrapperPremiumPercentage + 100)) / 100; + // feeWithFlatFee is the fee after the flat fee is applied on top of the premium + uint256 feeWithFlatFee = feeWithPremium + (1e12 * uint256(s_fulfillmentFlatFeeLinkPPM)); + + return feeWithFlatFee; + } + + /** + * @notice onTokenTransfer is called by LinkToken upon payment for a VRF request. + * + * @dev Reverts if payment is too low. + * + * @param _sender is the sender of the payment, and the address that will receive a VRF callback + * upon fulfillment. + * + * @param _amount is the amount of PLI paid in Juels. + * + * @param _data is the abi-encoded VRF request parameters: uint32 callbackGasLimit, + * uint16 requestConfirmations, and uint32 numWords. + */ + function onTokenTransfer(address _sender, uint256 _amount, bytes calldata _data) external onlyConfiguredNotDisabled { + // solhint-disable-next-line custom-errors + require(msg.sender == address(s_link), "only callable from PLI"); + + (uint32 callbackGasLimit, uint16 requestConfirmations, uint32 numWords, bytes memory extraArgs) = abi.decode( + _data, + (uint32, uint16, uint32, bytes) + ); + checkPaymentMode(extraArgs, true); + uint32 eip150Overhead = _getEIP150Overhead(callbackGasLimit); + int256 weiPerUnitLink = _getFeedData(); + uint256 price = _calculateRequestPrice(callbackGasLimit, tx.gasprice, weiPerUnitLink); + // solhint-disable-next-line custom-errors + require(_amount >= price, "fee too low"); + // solhint-disable-next-line custom-errors + require(numWords <= s_maxNumWords, "numWords too high"); + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: s_keyHash, + subId: SUBSCRIPTION_ID, + requestConfirmations: requestConfirmations, + callbackGasLimit: callbackGasLimit + eip150Overhead + s_wrapperGasOverhead, + numWords: numWords, + extraArgs: extraArgs // empty extraArgs defaults to link payment + }); + uint256 requestId = s_vrfCoordinator.requestRandomWords(req); + s_callbacks[requestId] = Callback({ + callbackAddress: _sender, + callbackGasLimit: callbackGasLimit, + requestGasPrice: uint64(tx.gasprice) + }); + lastRequestId = requestId; + } + + function checkPaymentMode(bytes memory extraArgs, bool isLinkMode) public pure { + // If extraArgs is empty, payment mode is PLI by default + if (extraArgs.length == 0) { + if (!isLinkMode) { + revert PLIPaymentInRequestRandomWordsInNative(); + } + return; + } + if (extraArgs.length < EXPECTED_MIN_LENGTH) { + revert IncorrectExtraArgsLength(EXPECTED_MIN_LENGTH, uint16(extraArgs.length)); + } + // ExtraArgsV1 only has struct {bool nativePayment} as of now + // The following condition checks if nativePayment in abi.encode of + // ExtraArgsV1 matches the appropriate function call (onTokenTransfer + // for PLI and requestRandomWordsInNative for Native payment) + bool nativePayment = extraArgs[35] == hex"01"; + if (nativePayment && isLinkMode) { + revert NativePaymentInOnTokenTransfer(); + } + if (!nativePayment && !isLinkMode) { + revert PLIPaymentInRequestRandomWordsInNative(); + } + } + + function requestRandomWordsInNative( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + bytes calldata extraArgs + ) external payable override returns (uint256 requestId) { + checkPaymentMode(extraArgs, false); + + uint32 eip150Overhead = _getEIP150Overhead(_callbackGasLimit); + uint256 price = _calculateRequestPriceNative(_callbackGasLimit, tx.gasprice); + // solhint-disable-next-line custom-errors + require(msg.value >= price, "fee too low"); + // solhint-disable-next-line custom-errors + require(_numWords <= s_maxNumWords, "numWords too high"); + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: s_keyHash, + subId: SUBSCRIPTION_ID, + requestConfirmations: _requestConfirmations, + callbackGasLimit: _callbackGasLimit + eip150Overhead + s_wrapperGasOverhead, + numWords: _numWords, + extraArgs: extraArgs + }); + requestId = s_vrfCoordinator.requestRandomWords(req); + s_callbacks[requestId] = Callback({ + callbackAddress: msg.sender, + callbackGasLimit: _callbackGasLimit, + requestGasPrice: uint64(tx.gasprice) + }); + + return requestId; + } + + /** + * @notice withdraw is used by the VRFV2Wrapper's owner to withdraw PLI revenue. + * + * @param _recipient is the address that should receive the PLI funds. + * + * @param _amount is the amount of PLI in Juels that should be withdrawn. + */ + function withdraw(address _recipient, uint256 _amount) external onlyOwner { + if (!s_link.transfer(_recipient, _amount)) { + revert FailedToTransferLink(); + } + } + + /** + * @notice withdraw is used by the VRFV2Wrapper's owner to withdraw native revenue. + * + * @param _recipient is the address that should receive the native funds. + * + * @param _amount is the amount of native in Wei that should be withdrawn. + */ + function withdrawNative(address _recipient, uint256 _amount) external onlyOwner { + (bool success, ) = payable(_recipient).call{value: _amount}(""); + // solhint-disable-next-line custom-errors + require(success, "failed to withdraw native"); + } + + /** + * @notice enable this contract so that new requests can be accepted. + */ + function enable() external onlyOwner { + s_disabled = false; + } + + /** + * @notice disable this contract so that new requests will be rejected. When disabled, new requests + * @notice will revert but existing requests can still be fulfilled. + */ + function disable() external onlyOwner { + s_disabled = true; + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + Callback memory callback = s_callbacks[_requestId]; + delete s_callbacks[_requestId]; + // solhint-disable-next-line custom-errors + require(callback.callbackAddress != address(0), "request not found"); // This should never happen + + VRFV2PlusWrapperConsumerBase c; + bytes memory resp = abi.encodeWithSelector(c.rawFulfillRandomWords.selector, _requestId, _randomWords); + + bool success = _callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); + if (!success) { + emit WrapperFulfillmentFailed(_requestId, callback.callbackAddress); + } + } + + function _getFeedData() private view returns (int256) { + bool staleFallback = s_stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = s_linkNativeFeed.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && s_stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + // solhint-disable-next-line custom-errors + require(weiPerUnitLink >= 0, "Invalid PLI wei price"); + return weiPerUnitLink; + } + + /** + * @dev Calculates extra amount of gas required for running an assembly call() post-EIP150. + */ + function _getEIP150Overhead(uint32 gas) private pure returns (uint32) { + return gas / 63 + 1; + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + function typeAndVersion() external pure virtual override returns (string memory) { + return "VRFV2Wrapper 1.0.0"; + } + + modifier onlyConfiguredNotDisabled() { + // solhint-disable-next-line custom-errors + require(s_configured, "wrapper is not configured"); + // solhint-disable-next-line custom-errors + require(!s_disabled, "wrapper is disabled"); + _; + } + + /*************************************************************************** + * Section: Migration of VRFV2PlusWrapper to latest VRFV2PlusCoordinator + ***************************************************************************/ + + function migrate(address newCoordinator) external onlyOwner { + IVRFV2PlusMigrate(address(s_vrfCoordinator)).migrate(SUBSCRIPTION_ID, newCoordinator); + } +} diff --git a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapperConsumerBase.sol b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapperConsumerBase.sol new file mode 100644 index 00000000..9751213f --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapperConsumerBase.sol @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {IVRFV2PlusWrapper} from "./interfaces/IVRFV2PlusWrapper.sol"; + +/** + * + * @notice Interface for contracts using VRF randomness through the VRF V2 wrapper + * ******************************************************************************** + * @dev PURPOSE + * + * @dev Create VRF V2+ requests without the need for subscription management. Rather than creating + * @dev and funding a VRF V2+ subscription, a user can use this wrapper to create one off requests, + * @dev paying up front rather than at fulfillment. + * + * @dev Since the price is determined using the gas price of the request transaction rather than + * @dev the fulfillment transaction, the wrapper charges an additional premium on callback gas + * @dev usage, in addition to some extra overhead costs associated with the VRFV2Wrapper contract. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFV2PlusWrapperConsumerBase. The consumer must be funded + * @dev with enough PLI or ether to make the request, otherwise requests will revert. To request randomness, + * @dev call the 'requestRandomWords' function with the desired VRF parameters. This function handles + * @dev paying for the request based on the current pricing. + * + * @dev Consumers must implement the fullfillRandomWords function, which will be called during + * @dev fulfillment with the randomness result. + */ +abstract contract VRFV2PlusWrapperConsumerBase { + error PLIAlreadySet(); + error OnlyVRFWrapperCanFulfill(address have, address want); + + LinkTokenInterface internal s_linkToken; + IVRFV2PlusWrapper public immutable i_vrfV2PlusWrapper; + + /** + * @param _link is the address of LinkToken + * @param _vrfV2PlusWrapper is the address of the VRFV2Wrapper contract + */ + constructor(address _link, address _vrfV2PlusWrapper) { + if (_link != address(0)) { + s_linkToken = LinkTokenInterface(_link); + } + + i_vrfV2PlusWrapper = IVRFV2PlusWrapper(_vrfV2PlusWrapper); + } + + /** + * @notice setLinkToken changes the PLI token address. + * @param _link is the address of the new PLI token contract + */ + function setLinkToken(address _link) external { + if (address(s_linkToken) != address(0)) { + revert PLIAlreadySet(); + } + + s_linkToken = LinkTokenInterface(_link); + } + + /** + * @dev Requests randomness from the VRF V2+ wrapper. + * + * @param _callbackGasLimit is the gas limit that should be used when calling the consumer's + * fulfillRandomWords function. + * @param _requestConfirmations is the number of confirmations to wait before fulfilling the + * request. A higher number of confirmations increases security by reducing the likelihood + * that a chain re-org changes a published randomness outcome. + * @param _numWords is the number of random words to request. + * + * @return requestId is the VRF V2+ request ID of the newly created randomness request. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function requestRandomness( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + bytes memory extraArgs + ) internal returns (uint256 requestId, uint256 reqPrice) { + reqPrice = i_vrfV2PlusWrapper.calculateRequestPrice(_callbackGasLimit); + s_linkToken.transferAndCall( + address(i_vrfV2PlusWrapper), + reqPrice, + abi.encode(_callbackGasLimit, _requestConfirmations, _numWords, extraArgs) + ); + return (i_vrfV2PlusWrapper.lastRequestId(), reqPrice); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function requestRandomnessPayInNative( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + bytes memory extraArgs + ) internal returns (uint256 requestId, uint256 requestPrice) { + requestPrice = i_vrfV2PlusWrapper.calculateRequestPriceNative(_callbackGasLimit); + return ( + i_vrfV2PlusWrapper.requestRandomWordsInNative{value: requestPrice}( + _callbackGasLimit, + _requestConfirmations, + _numWords, + extraArgs + ), + requestPrice + ); + } + + /** + * @notice fulfillRandomWords handles the VRF V2 wrapper response. The consuming contract must + * @notice implement it. + * + * @param _requestId is the VRF V2 request ID. + * @param _randomWords is the randomness result. + */ + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal virtual; + + function rawFulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) external { + address vrfWrapperAddr = address(i_vrfV2PlusWrapper); + if (msg.sender != vrfWrapperAddr) { + revert OnlyVRFWrapperCanFulfill(msg.sender, vrfWrapperAddr); + } + fulfillRandomWords(_requestId, _randomWords); + } + + /// @notice getBalance returns the native balance of the consumer contract + function getBalance() public view returns (uint256) { + return address(this).balance; + } + + /// @notice getLinkToken returns the link token contract + function getLinkToken() public view returns (LinkTokenInterface) { + return s_linkToken; + } +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2Plus.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2Plus.sol new file mode 100644 index 00000000..846da0b1 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2Plus.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; +import {IVRFSubscriptionV2Plus} from "./IVRFSubscriptionV2Plus.sol"; + +// Interface that enables consumers of VRFCoordinatorV2Plus to be future-proof for upgrades +// This interface is supported by subsequent versions of VRFCoordinatorV2Plus +interface IVRFCoordinatorV2Plus is IVRFSubscriptionV2Plus { + /** + * @notice Request a set of random words. + * @param req - a struct containing following fiels for randomness request: + * keyHash - Corresponds to a particular oracle job which uses + * that key for generating the VRF proof. Different keyHash's have different gas price + * ceilings, so you can select a specific one to bound your maximum per request cost. + * subId - The ID of the VRF subscription. Must be funded + * with the minimum subscription balance required for the selected keyHash. + * requestConfirmations - How many blocks you'd like the + * oracle to wait before responding to the request. See SECURITY CONSIDERATIONS + * for why you may want to request more. The acceptable range is + * [minimumRequestBlockConfirmations, 200]. + * callbackGasLimit - How much gas you'd like to receive in your + * fulfillRandomWords callback. Note that gasleft() inside fulfillRandomWords + * may be slightly less than this amount because of gas used calling the function + * (argument decoding etc.), so you may need to request slightly more than you expect + * to have inside fulfillRandomWords. The acceptable range is + * [0, maxGasLimit] + * numWords - The number of uint256 random values you'd like to receive + * in your fulfillRandomWords callback. Note these numbers are expanded in a + * secure way by the VRFCoordinator from a single random value supplied by the oracle. + * extraArgs - abi-encoded extra args + * @return requestId - A unique identifier of the request. Can be used to match + * a request to a response in fulfillRandomWords. + */ + function requestRandomWords(VRFV2PlusClient.RandomWordsRequest calldata req) external returns (uint256 requestId); +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusInternal.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusInternal.sol new file mode 100644 index 00000000..90208b9b --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusInternal.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {IVRFCoordinatorV2Plus} from "./IVRFCoordinatorV2Plus.sol"; + +// IVRFCoordinatorV2PlusInternal is the interface used by plugin core and should +// not be used by consumer conracts +// Future versions of VRF V2plus must conform to this interface +// VRF coordinator doesn't directly inherit from this interface because solidity +// imposes interface methods be external, whereas methods implementated VRF coordinator +// are public. This is OK because IVRFCoordinatorV2PlusInternal doesn't have any solidity +// use case. It is only used to generate gethwrappers +interface IVRFCoordinatorV2PlusInternal is IVRFCoordinatorV2Plus { + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + + event RandomWordsFulfilled( + uint256 indexed requestId, + uint256 outputSeed, + uint256 indexed subId, + uint96 payment, + bool success, + bool onlyPremium + ); + + struct RequestCommitment { + uint64 blockNum; + uint256 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + bytes extraArgs; + } + + struct Proof { + uint256[2] pk; + uint256[2] gamma; + uint256 c; + uint256 s; + uint256 seed; + address uWitness; + uint256[2] cGammaWitness; + uint256[2] sHashWitness; + uint256 zInv; + } + + // solhint-disable-next-line func-name-mixedcase + function s_requestCommitments(uint256 requestID) external view returns (bytes32); + + function fulfillRandomWords( + Proof memory proof, + RequestCommitment memory rc, + bool onlyPremium + ) external returns (uint96); + + // solhint-disable-next-line func-name-mixedcase + function PLI_NATIVE_FEED() external view returns (address); +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusMigration.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusMigration.sol new file mode 100644 index 00000000..f77aaa76 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFCoordinatorV2PlusMigration.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +// Future versions of VRFCoordinatorV2Plus must implement IVRFCoordinatorV2PlusMigration +// to support migrations from previous versions +interface IVRFCoordinatorV2PlusMigration { + /** + * @notice called by older versions of coordinator for migration. + * @notice only callable by older versions of coordinator + * @notice supports transfer of native currency + * @param encodedData - user data from older version of coordinator + */ + function onMigration(bytes calldata encodedData) external payable; +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFMigratableConsumerV2Plus.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFMigratableConsumerV2Plus.sol new file mode 100644 index 00000000..ed61fb6a --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFMigratableConsumerV2Plus.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice The IVRFMigratableConsumerV2Plus interface defines the +/// @notice method required to be implemented by all V2Plus consumers. +/// @dev This interface is designed to be used in VRFConsumerBaseV2Plus. +interface IVRFMigratableConsumerV2Plus { + /// @notice Sets the VRF Coordinator address + /// @notice This method is should only be callable by the coordinator or contract owner + function setCoordinator(address vrfCoordinator) external; +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFSubscriptionV2Plus.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFSubscriptionV2Plus.sol new file mode 100644 index 00000000..e6051cdf --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFSubscriptionV2Plus.sol @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice The IVRFSubscriptionV2Plus interface defines the subscription +/// @notice related methods implemented by the V2Plus coordinator. +interface IVRFSubscriptionV2Plus { + /** + * @notice Add a consumer to a VRF subscription. + * @param subId - ID of the subscription + * @param consumer - New consumer which can use the subscription + */ + function addConsumer(uint256 subId, address consumer) external; + + /** + * @notice Remove a consumer from a VRF subscription. + * @param subId - ID of the subscription + * @param consumer - Consumer to remove from the subscription + */ + function removeConsumer(uint256 subId, address consumer) external; + + /** + * @notice Cancel a subscription + * @param subId - ID of the subscription + * @param to - Where to send the remaining PLI to + */ + function cancelSubscription(uint256 subId, address to) external; + + /** + * @notice Request subscription owner transfer. + * @param subId - ID of the subscription + * @dev will revert if original owner of subId has + * not requested that msg.sender become the new owner. + */ + function acceptSubscriptionOwnerTransfer(uint256 subId) external; + + /** + * @notice Request subscription owner transfer. + * @param subId - ID of the subscription + * @param newOwner - proposed new owner of the subscription + */ + function requestSubscriptionOwnerTransfer(uint256 subId, address newOwner) external; + + /** + * @notice Create a VRF subscription. + * @return subId - A unique subscription id. + * @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + * @dev Note to fund the subscription with PLI, use transferAndCall. For example + * @dev PLITOKEN.transferAndCall( + * @dev address(COORDINATOR), + * @dev amount, + * @dev abi.encode(subId)); + * @dev Note to fund the subscription with Native, use fundSubscriptionWithNative. Be sure + * @dev to send Native with the call, for example: + * @dev COORDINATOR.fundSubscriptionWithNative{value: amount}(subId); + */ + function createSubscription() external returns (uint256 subId); + + /** + * @notice Get a VRF subscription. + * @param subId - ID of the subscription + * @return balance - PLI balance of the subscription in juels. + * @return nativeBalance - native balance of the subscription in wei. + * @return reqCount - Requests count of subscription. + * @return owner - owner of the subscription. + * @return consumers - list of consumer address which are able to use this subscription. + */ + function getSubscription( + uint256 subId + ) + external + view + returns (uint96 balance, uint96 nativeBalance, uint64 reqCount, address owner, address[] memory consumers); + + /* + * @notice Check to see if there exists a request commitment consumers + * for all consumers and keyhashes for a given sub. + * @param subId - ID of the subscription + * @return true if there exists at least one unfulfilled request for the subscription, false + * otherwise. + */ + function pendingRequestExists(uint256 subId) external view returns (bool); + + /** + * @notice Paginate through all active VRF subscriptions. + * @param startIndex index of the subscription to start from + * @param maxCount maximum number of subscriptions to return, 0 to return all + * @dev the order of IDs in the list is **not guaranteed**, therefore, if making successive calls, one + * @dev should consider keeping the blockheight constant to ensure a holistic picture of the contract state + */ + function getActiveSubscriptionIds(uint256 startIndex, uint256 maxCount) external view returns (uint256[] memory); + + /** + * @notice Fund a subscription with native. + * @param subId - ID of the subscription + * @notice This method expects msg.value to be greater than 0. + */ + function fundSubscriptionWithNative(uint256 subId) external payable; +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusMigrate.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusMigrate.sol new file mode 100644 index 00000000..e1a755ff --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusMigrate.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice This interface is implemented by all VRF V2+ coordinators that can +/// @notice migrate subscription data to new coordinators. +interface IVRFV2PlusMigrate { + /** + * @notice migrate the provided subscription ID to the provided VRF coordinator + * @notice msg.sender must be the subscription owner and newCoordinator must + * @notice implement IVRFCoordinatorV2PlusMigration. + * @param subId the subscription ID to migrate + * @param newCoordinator the vrf coordinator to migrate to + */ + function migrate(uint256 subId, address newCoordinator) external; +} diff --git a/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusWrapper.sol b/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusWrapper.sol new file mode 100644 index 00000000..aa3de0b6 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/interfaces/IVRFV2PlusWrapper.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IVRFV2PlusWrapper { + /** + * @return the request ID of the most recent VRF V2 request made by this wrapper. This should only + * be relied option within the same transaction that the request was made. + */ + function lastRequestId() external view returns (uint256); + + /** + * @notice Calculates the price of a VRF request with the given callbackGasLimit at the current + * @notice block. + * + * @dev This function relies on the transaction gas price which is not automatically set during + * @dev simulation. To estimate the price at a specific gas price, use the estimatePrice function. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + */ + function calculateRequestPrice(uint32 _callbackGasLimit) external view returns (uint256); + + /** + * @notice Calculates the price of a VRF request in native with the given callbackGasLimit at the current + * @notice block. + * + * @dev This function relies on the transaction gas price which is not automatically set during + * @dev simulation. To estimate the price at a specific gas price, use the estimatePrice function. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + */ + function calculateRequestPriceNative(uint32 _callbackGasLimit) external view returns (uint256); + + /** + * @notice Estimates the price of a VRF request with a specific gas limit and gas price. + * + * @dev This is a convenience function that can be called in simulation to better understand + * @dev pricing. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + * @param _requestGasPriceWei is the gas price in wei used for the estimation. + */ + function estimateRequestPrice(uint32 _callbackGasLimit, uint256 _requestGasPriceWei) external view returns (uint256); + + /** + * @notice Estimates the price of a VRF request in native with a specific gas limit and gas price. + * + * @dev This is a convenience function that can be called in simulation to better understand + * @dev pricing. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + * @param _requestGasPriceWei is the gas price in wei used for the estimation. + */ + function estimateRequestPriceNative( + uint32 _callbackGasLimit, + uint256 _requestGasPriceWei + ) external view returns (uint256); + + /** + * @notice Requests randomness from the VRF V2 wrapper, paying in native token. + * + * @param _callbackGasLimit is the gas limit for the request. + * @param _requestConfirmations number of request confirmations to wait before serving a request. + * @param _numWords is the number of words to request. + */ + function requestRandomWordsInNative( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + bytes memory extraArgs + ) external payable returns (uint256 requestId); +} diff --git a/contracts/src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol b/contracts/src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol new file mode 100644 index 00000000..31ed4de8 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +// End consumer library. +library VRFV2PlusClient { + // extraArgs will evolve to support new features + bytes4 public constant EXTRA_ARGS_V1_TAG = bytes4(keccak256("VRF ExtraArgsV1")); + struct ExtraArgsV1 { + bool nativePayment; + } + + struct RandomWordsRequest { + bytes32 keyHash; + uint256 subId; + uint16 requestConfirmations; + uint32 callbackGasLimit; + uint32 numWords; + bytes extraArgs; + } + + function _argsToBytes(ExtraArgsV1 memory extraArgs) internal pure returns (bytes memory bts) { + return abi.encodeWithSelector(EXTRA_ARGS_V1_TAG, extraArgs); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol b/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol new file mode 100644 index 00000000..80f74372 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {VRFCoordinatorV2_5} from "../VRFCoordinatorV2_5.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; + +// solhint-disable-next-line contract-name-camelcase +contract ExposedVRFCoordinatorV2_5 is VRFCoordinatorV2_5 { + using EnumerableSet for EnumerableSet.UintSet; + + constructor(address blockhashStore) VRFCoordinatorV2_5(blockhashStore) {} + + function computeRequestIdExternal( + bytes32 keyHash, + address sender, + uint256 subId, + uint64 nonce + ) external pure returns (uint256, uint256) { + return _computeRequestId(keyHash, sender, subId, nonce); + } + + function isTargetRegisteredExternal(address target) external view returns (bool) { + return _isTargetRegistered(target); + } + + function getRandomnessFromProofExternal( + Proof calldata proof, + RequestCommitment calldata rc + ) external view returns (Output memory) { + return _getRandomnessFromProof(proof, rc); + } + + function getActiveSubscriptionIdsLength() external view returns (uint256) { + return s_subIds.length(); + } + + function getSubscriptionConfig(uint256 subId) external view returns (SubscriptionConfig memory) { + return s_subscriptionConfigs[subId]; + } + + function getSubscriptionStruct(uint256 subId) external view returns (Subscription memory) { + return s_subscriptions[subId]; + } + + function setTotalBalanceTestingOnlyXXX(uint96 newBalance) external { + s_totalBalance = newBalance; + } + + function setTotalNativeBalanceTestingOnlyXXX(uint96 newBalance) external { + s_totalNativeBalance = newBalance; + } + + function setWithdrawableTokensTestingOnlyXXX(uint96 newBalance) external { + s_withdrawableTokens = newBalance; + } + + function getWithdrawableTokensTestingOnlyXXX() external view returns (uint96) { + return s_withdrawableTokens; + } + + function setWithdrawableNativeTestingOnlyXXX(uint96 newBalance) external { + s_withdrawableNative = newBalance; + } + + function getWithdrawableNativeTestingOnlyXXX() external view returns (uint96) { + return s_withdrawableNative; + } + + function calculatePaymentAmount( + uint256 startGas, + uint256 weiPerUnitGas, + bool nativePayment, + bool onlyPremium + ) external returns (uint96) { + return _calculatePaymentAmount(startGas, weiPerUnitGas, nativePayment, onlyPremium); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol new file mode 100644 index 00000000..4926464a --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFConsumerV2PlusUpgradeableExample.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {IVRFCoordinatorV2Plus} from "../interfaces/IVRFCoordinatorV2Plus.sol"; +import {VRFConsumerBaseV2Upgradeable} from "../VRFConsumerBaseV2Upgradeable.sol"; +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +contract VRFConsumerV2PlusUpgradeableExample is Initializable, VRFConsumerBaseV2Upgradeable { + uint256[] public s_randomWords; + uint256 public s_requestId; + IVRFCoordinatorV2Plus public COORDINATOR; + LinkTokenInterface public PLITOKEN; + uint256 public s_subId; + uint256 public s_gasAvailable; + + function initialize(address _vrfCoordinator, address _link) public initializer { + __VRFConsumerBaseV2_init(_vrfCoordinator); + COORDINATOR = IVRFCoordinatorV2Plus(_vrfCoordinator); + PLITOKEN = LinkTokenInterface(_link); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + // solhint-disable-next-line custom-errors + require(requestId == s_requestId, "request ID is incorrect"); + + s_gasAvailable = gasleft(); + s_randomWords = randomWords; + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "sub not set"); + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + COORDINATOR.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness( + bytes32 keyHash, + uint256 subId, + uint16 minReqConfs, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256) { + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: keyHash, + subId: subId, + requestConfirmations: minReqConfs, + callbackGasLimit: callbackGasLimit, + numWords: numWords, + extraArgs: "" // empty extraArgs defaults to link payment + }); + s_requestId = COORDINATOR.requestRandomWords(req); + return s_requestId; + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol new file mode 100644 index 00000000..46d76e32 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol @@ -0,0 +1,734 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {BlockhashStoreInterface} from "../../interfaces/BlockhashStoreInterface.sol"; +// solhint-disable-next-line no-unused-import +import {IVRFCoordinatorV2Plus, IVRFSubscriptionV2Plus} from "../interfaces/IVRFCoordinatorV2Plus.sol"; +import {VRF} from "../../../vrf/VRF.sol"; +import {VRFConsumerBaseV2Plus, IVRFMigratableConsumerV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {ChainSpecificUtil} from "../../../ChainSpecificUtil.sol"; +import {SubscriptionAPI} from "../SubscriptionAPI.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; +import {IVRFCoordinatorV2PlusMigration} from "../interfaces/IVRFCoordinatorV2PlusMigration.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v4.7.3/contracts/utils/structs/EnumerableSet.sol"; + +contract VRFCoordinatorV2PlusUpgradedVersion is + VRF, + SubscriptionAPI, + IVRFCoordinatorV2PlusMigration, + IVRFCoordinatorV2Plus +{ + using EnumerableSet for EnumerableSet.UintSet; + /// @dev should always be available + // solhint-disable-next-line plugin-solidity/prefix-immutable-variables-with-i + BlockhashStoreInterface public immutable BLOCKHASH_STORE; + + // Set this maximum to 200 to give us a 56 block window to fulfill + // the request before requiring the block hash feeder. + uint16 public constant MAX_REQUEST_CONFIRMATIONS = 200; + uint32 public constant MAX_NUM_WORDS = 500; + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + error InvalidRequestConfirmations(uint16 have, uint16 min, uint16 max); + error GasLimitTooBig(uint32 have, uint32 want); + error NumWordsTooBig(uint32 have, uint32 want); + error ProvingKeyAlreadyRegistered(bytes32 keyHash); + error NoSuchProvingKey(bytes32 keyHash); + error InvalidLinkWeiPrice(int256 linkWei); + error NoCorrespondingRequest(); + error IncorrectCommitment(); + error BlockhashNotInStore(uint256 blockNum); + error PaymentTooLarge(); + error InvalidExtraArgsTag(); + /// @notice emitted when version in the request doesn't match expected version + error InvalidVersion(uint8 requestVersion, uint8 expectedVersion); + /// @notice emitted when transferred balance (msg.value) does not match the metadata in V1MigrationData + error InvalidNativeBalance(uint256 transferredValue, uint96 expectedValue); + error SubscriptionIDCollisionFound(); + + struct RequestCommitment { + uint64 blockNum; + uint256 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + bytes extraArgs; + } + + mapping(bytes32 => bool) /* keyHash */ /* exists */ internal s_provingKeys; + bytes32[] public s_provingKeyHashes; + mapping(uint256 => bytes32) /* requestID */ /* commitment */ public s_requestCommitments; + + event ProvingKeyRegistered(bytes32 keyHash); + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + event RandomWordsFulfilled( + uint256 indexed requestId, + uint256 outputSeed, + uint256 indexed subID, + uint96 payment, + bool success + ); + + int256 internal s_fallbackWeiPerUnitLink; + + FeeConfig internal s_feeConfig; + + struct FeeConfig { + // Flat fee charged per fulfillment in millionths of link + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeLinkPPM; + // Flat fee charged per fulfillment in millionths of native. + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeNativePPM; + } + + event ConfigSet( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + uint8 nativePremiumPercentage, + uint8 linkPremiumPercentage + ); + + constructor(address blockhashStore) SubscriptionAPI() { + BLOCKHASH_STORE = BlockhashStoreInterface(blockhashStore); + } + + /** + * @notice Registers a proving key to an oracle. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function registerProvingKey(uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + if (s_provingKeys[kh]) { + revert ProvingKeyAlreadyRegistered(kh); + } + s_provingKeys[kh] = true; + s_provingKeyHashes.push(kh); + emit ProvingKeyRegistered(kh); + } + + /** + * @notice Returns the proving key hash key associated with this public key + * @param publicKey the key to return the hash of + */ + function hashOfKey(uint256[2] memory publicKey) public pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the native/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback native/link price in the case of a stale feed + * @param nativePremiumPercentage native premium percentage + * @param linkPremiumPercentage link premium percentage + */ + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + uint32 fulfillmentFlatFeeNativePPM, + uint32 fulfillmentFlatFeeLinkDiscountPPM, + uint8 nativePremiumPercentage, + uint8 linkPremiumPercentage + ) external onlyOwner { + if (minimumRequestConfirmations > MAX_REQUEST_CONFIRMATIONS) { + revert InvalidRequestConfirmations( + minimumRequestConfirmations, + minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + if (fallbackWeiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(fallbackWeiPerUnitLink); + } + s_config = Config({ + minimumRequestConfirmations: minimumRequestConfirmations, + maxGasLimit: maxGasLimit, + stalenessSeconds: stalenessSeconds, + gasAfterPaymentCalculation: gasAfterPaymentCalculation, + reentrancyLock: false, + fulfillmentFlatFeeNativePPM: fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM: fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage: nativePremiumPercentage, + linkPremiumPercentage: linkPremiumPercentage + }); + s_fallbackWeiPerUnitLink = fallbackWeiPerUnitLink; + emit ConfigSet( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + nativePremiumPercentage, + linkPremiumPercentage + ); + } + + /** + * @notice Get configuration relevant for making requests + * @return minimumRequestConfirmations global min for request confirmations + * @return maxGasLimit global max for request gas limit + * @return s_provingKeyHashes list of registered key hashes + */ + function getRequestConfig() external view returns (uint16, uint32, bytes32[] memory) { + return (s_config.minimumRequestConfirmations, s_config.maxGasLimit, s_provingKeyHashes); + } + + /// @dev Convert the extra args bytes into a struct + /// @param extraArgs The extra args bytes + /// @return The extra args struct + function _fromBytes(bytes calldata extraArgs) internal pure returns (VRFV2PlusClient.ExtraArgsV1 memory) { + if (extraArgs.length == 0) { + return VRFV2PlusClient.ExtraArgsV1({nativePayment: false}); + } + if (bytes4(extraArgs) != VRFV2PlusClient.EXTRA_ARGS_V1_TAG) revert InvalidExtraArgsTag(); + return abi.decode(extraArgs[4:], (VRFV2PlusClient.ExtraArgsV1)); + } + + /** + * @notice Request a set of random words. + * @param req - a struct containing following fiels for randomness request: + * keyHash - Corresponds to a particular oracle job which uses + * that key for generating the VRF proof. Different keyHash's have different gas price + * ceilings, so you can select a specific one to bound your maximum per request cost. + * subId - The ID of the VRF subscription. Must be funded + * with the minimum subscription balance required for the selected keyHash. + * requestConfirmations - How many blocks you'd like the + * oracle to wait before responding to the request. See SECURITY CONSIDERATIONS + * for why you may want to request more. The acceptable range is + * [minimumRequestBlockConfirmations, 200]. + * callbackGasLimit - How much gas you'd like to receive in your + * fulfillRandomWords callback. Note that gasleft() inside fulfillRandomWords + * may be slightly less than this amount because of gas used calling the function + * (argument decoding etc.), so you may need to request slightly more than you expect + * to have inside fulfillRandomWords. The acceptable range is + * [0, maxGasLimit] + * numWords - The number of uint256 random values you'd like to receive + * in your fulfillRandomWords callback. Note these numbers are expanded in a + * secure way by the VRFCoordinator from a single random value supplied by the oracle. + * extraArgs - Encoded extra arguments that has a boolean flag for whether payment + * should be made in native or PLI. Payment in PLI is only available if the PLI token is available to this contract. + * @return requestId - A unique identifier of the request. Can be used to match + * a request to a response in fulfillRandomWords. + */ + function requestRandomWords( + VRFV2PlusClient.RandomWordsRequest calldata req + ) external override nonReentrant returns (uint256) { + // Input validation using the subscription storage. + if (s_subscriptionConfigs[req.subId].owner == address(0)) { + revert InvalidSubscription(); + } + // Its important to ensure that the consumer is in fact who they say they + // are, otherwise they could use someone else's subscription balance. + // A nonce of 0 indicates consumer is not allocated to the sub. + uint64 currentNonce = s_consumers[msg.sender][req.subId]; + if (currentNonce == 0) { + revert InvalidConsumer(req.subId, msg.sender); + } + // Input validation using the config storage word. + if ( + req.requestConfirmations < s_config.minimumRequestConfirmations || + req.requestConfirmations > MAX_REQUEST_CONFIRMATIONS + ) { + revert InvalidRequestConfirmations( + req.requestConfirmations, + s_config.minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + // No lower bound on the requested gas limit. A user could request 0 + // and they would simply be billed for the proof verification and wouldn't be + // able to do anything with the random value. + if (req.callbackGasLimit > s_config.maxGasLimit) { + revert GasLimitTooBig(req.callbackGasLimit, s_config.maxGasLimit); + } + if (req.numWords > MAX_NUM_WORDS) { + revert NumWordsTooBig(req.numWords, MAX_NUM_WORDS); + } + // Note we do not check whether the keyHash is valid to save gas. + // The consequence for users is that they can send requests + // for invalid keyHashes which will simply not be fulfilled. + uint64 nonce = currentNonce + 1; + (uint256 requestId, uint256 preSeed) = _computeRequestId(req.keyHash, msg.sender, req.subId, nonce); + + VRFV2PlusClient.ExtraArgsV1 memory extraArgs = _fromBytes(req.extraArgs); + bytes memory extraArgsBytes = VRFV2PlusClient._argsToBytes(extraArgs); + s_requestCommitments[requestId] = keccak256( + abi.encode( + requestId, + ChainSpecificUtil._getBlockNumber(), + req.subId, + req.callbackGasLimit, + req.numWords, + msg.sender, + extraArgsBytes + ) + ); + emit RandomWordsRequested( + req.keyHash, + requestId, + preSeed, + req.subId, + req.requestConfirmations, + req.callbackGasLimit, + req.numWords, + extraArgsBytes, + msg.sender + ); + s_consumers[msg.sender][req.subId] = nonce; + + return requestId; + } + + function _computeRequestId( + bytes32 keyHash, + address sender, + uint256 subId, + uint64 nonce + ) internal pure returns (uint256, uint256) { + uint256 preSeed = uint256(keccak256(abi.encode(keyHash, sender, subId, nonce))); + return (uint256(keccak256(abi.encode(keyHash, preSeed))), preSeed); + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + struct Output { + bytes32 keyHash; + uint256 requestId; + uint256 randomness; + } + + function _getRandomnessFromProof( + Proof memory proof, + RequestCommitment memory rc + ) internal view returns (Output memory) { + bytes32 keyHash = hashOfKey(proof.pk); + // Only registered proving keys are permitted. + if (!s_provingKeys[keyHash]) { + revert NoSuchProvingKey(keyHash); + } + uint256 requestId = uint256(keccak256(abi.encode(keyHash, proof.seed))); + bytes32 commitment = s_requestCommitments[requestId]; + if (commitment == 0) { + revert NoCorrespondingRequest(); + } + if ( + commitment != + keccak256(abi.encode(requestId, rc.blockNum, rc.subId, rc.callbackGasLimit, rc.numWords, rc.sender, rc.extraArgs)) + ) { + revert IncorrectCommitment(); + } + + bytes32 blockHash = ChainSpecificUtil._getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + blockHash = BLOCKHASH_STORE.getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + revert BlockhashNotInStore(rc.blockNum); + } + } + + // The seed actually used by the VRF machinery, mixing in the blockhash + uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); + uint256 randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + return Output(keyHash, requestId, randomness); + } + + /* + * @notice Fulfill a randomness request + * @param proof contains the proof and randomness + * @param rc request commitment pre-image, committed to at request time + * @return payment amount billed to the subscription + * @dev simulated offchain to determine if sufficient balance is present to fulfill the request + */ + function fulfillRandomWords( + Proof memory proof, + RequestCommitment memory rc, + bool + ) external nonReentrant returns (uint96) { + uint256 startGas = gasleft(); + Output memory output = _getRandomnessFromProof(proof, rc); + + uint256[] memory randomWords = new uint256[](rc.numWords); + for (uint256 i = 0; i < rc.numWords; i++) { + randomWords[i] = uint256(keccak256(abi.encode(output.randomness, i))); + } + + delete s_requestCommitments[output.requestId]; + VRFConsumerBaseV2Plus v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomWords.selector, output.requestId, randomWords); + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid oracle payment. + // Do not allow any non-view/non-pure coordinator functions to be called + // during the consumers callback code via reentrancyLock. + // Note that _callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + s_config.reentrancyLock = true; + bool success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + s_config.reentrancyLock = false; + + // Increment the req count for the subscription. + uint64 reqCount = s_subscriptions[rc.subId].reqCount; + s_subscriptions[rc.subId].reqCount = reqCount + 1; + + // stack too deep error + { + bool nativePayment = uint8(rc.extraArgs[rc.extraArgs.length - 1]) == 1; + // We want to charge users exactly for how much gas they use in their callback. + // The gasAfterPaymentCalculation is meant to cover these additional operations where we + // decrement the subscription balance and increment the oracles withdrawable balance. + uint96 payment = _calculatePaymentAmount( + startGas, + s_config.gasAfterPaymentCalculation, + tx.gasprice, + nativePayment + ); + if (nativePayment) { + if (s_subscriptions[rc.subId].nativeBalance < payment) { + revert InsufficientBalance(); + } + s_subscriptions[rc.subId].nativeBalance -= payment; + s_withdrawableNative += payment; + } else { + if (s_subscriptions[rc.subId].balance < payment) { + revert InsufficientBalance(); + } + s_subscriptions[rc.subId].balance -= payment; + s_withdrawableTokens += payment; + } + + // Include payment in the event for tracking costs. + // event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bytes extraArgs, bool success); + emit RandomWordsFulfilled(output.requestId, output.randomness, rc.subId, payment, success); + + return payment; + } + } + + function _calculatePaymentAmount( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint256 weiPerUnitGas, + bool nativePayment + ) internal view returns (uint96) { + if (nativePayment) { + return + _calculatePaymentAmountNative( + startGas, + gasAfterPaymentCalculation, + s_feeConfig.fulfillmentFlatFeeNativePPM, + weiPerUnitGas + ); + } + return + _calculatePaymentAmountLink( + startGas, + gasAfterPaymentCalculation, + s_feeConfig.fulfillmentFlatFeeLinkPPM, + weiPerUnitGas + ); + } + + function _calculatePaymentAmountNative( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeePPM, + uint256 weiPerUnitGas + ) internal view returns (uint96) { + // Will return non-zero on chains that have this enabled + uint256 l1CostWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + // calculate the payment without the premium + uint256 baseFeeWei = weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft()); + // calculate the flat fee in wei + uint256 flatFeeWei = 1e12 * uint256(fulfillmentFlatFeePPM); + // return the final fee with the flat fee and l1 cost (if applicable) added + return uint96(baseFeeWei + flatFeeWei + l1CostWei); + } + + // Get the amount of gas used for fulfillment + function _calculatePaymentAmountLink( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeeLinkPPM, + uint256 weiPerUnitGas + ) internal view returns (uint96) { + int256 weiPerUnitLink; + weiPerUnitLink = _getFeedData(); + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + // Will return non-zero on chains that have this enabled + uint256 l1CostWei = ChainSpecificUtil._getCurrentTxL1GasFees(msg.data); + // (1e18 juels/link) ((wei/gas * gas) + l1wei) / (wei/link) = juels + uint256 paymentNoFee = (1e18 * (weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft()) + l1CostWei)) / + uint256(weiPerUnitLink); + uint256 fee = 1e12 * uint256(fulfillmentFlatFeeLinkPPM); + if (paymentNoFee > (1e27 - fee)) { + revert PaymentTooLarge(); // Payment + fee cannot be more than all of the link in existence. + } + return uint96(paymentNoFee + fee); + } + + function _getFeedData() private view returns (int256) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = PLI_NATIVE_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } + + /* + * @notice Check to see if there exists a request commitment consumers + * for all consumers and keyhashes for a given sub. + * @param subId - ID of the subscription + * @return true if there exists at least one unfulfilled request for the subscription, false + * otherwise. + * @dev Looping is bounded to MAX_CONSUMERS*(number of keyhashes). + * @dev Used to disable subscription canceling while outstanding request are present. + */ + function pendingRequestExists(uint256 subId) public view override returns (bool) { + SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; + for (uint256 i = 0; i < subConfig.consumers.length; i++) { + for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { + (uint256 reqId, ) = _computeRequestId( + s_provingKeyHashes[j], + subConfig.consumers[i], + subId, + s_consumers[subConfig.consumers[i]][subId] + ); + if (s_requestCommitments[reqId] != 0) { + return true; + } + } + } + return false; + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function removeConsumer(uint256 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + if (s_consumers[consumer][subId] == 0) { + revert InvalidConsumer(subId, consumer); + } + // Note bounded by MAX_CONSUMERS + address[] memory consumers = s_subscriptionConfigs[subId].consumers; + uint256 lastConsumerIndex = consumers.length - 1; + for (uint256 i = 0; i < consumers.length; i++) { + if (consumers[i] == consumer) { + address last = consumers[lastConsumerIndex]; + // Storage write to preserve last element + s_subscriptionConfigs[subId].consumers[i] = last; + // Storage remove last element + s_subscriptionConfigs[subId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subId]; + emit SubscriptionConsumerRemoved(subId, consumer); + } + + /** + * @inheritdoc IVRFSubscriptionV2Plus + */ + function cancelSubscription(uint256 subId, address to) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + _cancelSubscriptionHelper(subId, to); + } + + /*************************************************************************** + * Section: Migration + ***************************************************************************/ + + address[] internal s_migrationTargets; + + /// @dev Emitted when new coordinator is registered as migratable target + event CoordinatorRegistered(address coordinatorAddress); + + /// @notice emitted when migration to new coordinator completes successfully + /// @param newCoordinator coordinator address after migration + /// @param subId subscription ID + event MigrationCompleted(address newCoordinator, uint256 subId); + + /// @notice emitted when migrate() is called and given coordinator is not registered as migratable target + error CoordinatorNotRegistered(address coordinatorAddress); + + /// @notice emitted when migrate() is called and given coordinator is registered as migratable target + error CoordinatorAlreadyRegistered(address coordinatorAddress); + + /// @dev encapsulates data to be migrated from current coordinator + struct V1MigrationData { + uint8 fromVersion; + uint256 subId; + address subOwner; + address[] consumers; + uint96 linkBalance; + uint96 nativeBalance; + } + + function _isTargetRegistered(address target) internal view returns (bool) { + for (uint256 i = 0; i < s_migrationTargets.length; i++) { + if (s_migrationTargets[i] == target) { + return true; + } + } + return false; + } + + function registerMigratableCoordinator(address target) external onlyOwner { + if (_isTargetRegistered(target)) { + revert CoordinatorAlreadyRegistered(target); + } + s_migrationTargets.push(target); + emit CoordinatorRegistered(target); + } + + function migrate(uint256 subId, address newCoordinator) external nonReentrant { + if (!_isTargetRegistered(newCoordinator)) { + revert CoordinatorNotRegistered(newCoordinator); + } + (uint96 balance, uint96 nativeBalance, , address owner, address[] memory consumers) = getSubscription(subId); + // solhint-disable-next-line custom-errors + require(owner == msg.sender, "Not subscription owner"); + // solhint-disable-next-line custom-errors + require(!pendingRequestExists(subId), "Pending request exists"); + + V1MigrationData memory migrationData = V1MigrationData({ + fromVersion: migrationVersion(), + subId: subId, + subOwner: owner, + consumers: consumers, + linkBalance: balance, + nativeBalance: nativeBalance + }); + bytes memory encodedData = abi.encode(migrationData); + _deleteSubscription(subId); + IVRFCoordinatorV2PlusMigration(newCoordinator).onMigration{value: nativeBalance}(encodedData); + + // Only transfer PLI if the token is active and there is a balance. + if (address(PLI) != address(0) && balance != 0) { + // solhint-disable-next-line custom-errors + require(PLI.transfer(address(newCoordinator), balance), "insufficient funds"); + } + + // despite the fact that we follow best practices this is still probably safest + // to prevent any re-entrancy possibilities. + s_config.reentrancyLock = true; + for (uint256 i = 0; i < consumers.length; i++) { + IVRFMigratableConsumerV2Plus(consumers[i]).setCoordinator(newCoordinator); + } + s_config.reentrancyLock = false; + + emit MigrationCompleted(newCoordinator, subId); + } + + function migrationVersion() public pure returns (uint8 version) { + return 2; + } + + /** + * @inheritdoc IVRFCoordinatorV2PlusMigration + */ + function onMigration(bytes calldata encodedData) external payable override { + V1MigrationData memory migrationData = abi.decode(encodedData, (V1MigrationData)); + + if (migrationData.fromVersion != 1) { + revert InvalidVersion(migrationData.fromVersion, 1); + } + + if (msg.value != uint256(migrationData.nativeBalance)) { + revert InvalidNativeBalance(msg.value, migrationData.nativeBalance); + } + + // it should be impossible to have a subscription id collision, for two reasons: + // 1. the subscription ID is calculated using inputs that cannot be replicated under different + // conditions. + // 2. once a subscription is migrated it is deleted from the previous coordinator, so it cannot + // be migrated again. + // however, we should have this check here in case the `migrate` function on + // future coordinators "forgets" to delete subscription data allowing re-migration of the same + // subscription. + if (s_subscriptionConfigs[migrationData.subId].owner != address(0)) { + revert SubscriptionIDCollisionFound(); + } + + for (uint256 i = 0; i < migrationData.consumers.length; i++) { + s_consumers[migrationData.consumers[i]][migrationData.subId] = 1; + } + + s_subscriptions[migrationData.subId] = Subscription({ + nativeBalance: migrationData.nativeBalance, + balance: migrationData.linkBalance, + reqCount: 0 + }); + s_subscriptionConfigs[migrationData.subId] = SubscriptionConfig({ + owner: migrationData.subOwner, + consumers: migrationData.consumers, + requestedOwner: address(0) + }); + + s_totalBalance += uint96(migrationData.linkBalance); + s_totalNativeBalance += uint96(migrationData.nativeBalance); + + s_subIds.add(migrationData.subId); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol new file mode 100644 index 00000000..0204be80 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {IVRFCoordinatorV2PlusMigration} from "../interfaces/IVRFCoordinatorV2PlusMigration.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +/// @dev this contract is only meant for testing migration +/// @dev it is a simplified example of future version (V2) of VRFCoordinatorV2Plus +// solhint-disable-next-line contract-name-camelcase +contract VRFCoordinatorV2Plus_V2Example is IVRFCoordinatorV2PlusMigration { + error SubscriptionIDCollisionFound(); + + struct Subscription { + uint96 linkBalance; + uint96 nativeBalance; + uint64 reqCount; + address owner; + address[] consumers; + } + + mapping(uint256 => Subscription) public s_subscriptions; /* subId */ /* subscription */ + mapping(uint256 => address) public s_requestConsumerMapping; /* RequestId */ /* consumer address */ + + uint96 public s_totalLinkBalance; + uint96 public s_totalNativeBalance; + // request ID nonce + uint256 public s_requestId = 0; + + // older version of coordinator, from which migration is supported + address public s_prevCoordinator; + address public s_link; + + constructor(address link, address prevCoordinator) { + s_link = link; + s_prevCoordinator = prevCoordinator; + } + + /*************************************************************************** + * Section: Subscription + **************************************************************************/ + + /// @dev Emitted when a subscription for a given ID cannot be found + error InvalidSubscription(); + + function getSubscription( + uint256 subId + ) + public + view + returns (uint96 linkBalance, uint96 nativeBalance, uint64 reqCount, address owner, address[] memory consumers) + { + if (s_subscriptions[subId].owner == address(0)) { + revert InvalidSubscription(); + } + return ( + s_subscriptions[subId].linkBalance, + s_subscriptions[subId].nativeBalance, + s_subscriptions[subId].reqCount, + s_subscriptions[subId].owner, + s_subscriptions[subId].consumers + ); + } + + /*************************************************************************** + * Section: Migration + **************************************************************************/ + + /// @notice emitted when caller is not a previous version of VRF coordinator + /// @param sender caller + /// @param previousCoordinator expected coordinator address + error MustBePreviousCoordinator(address sender, address previousCoordinator); + + /// @notice emitted when version in the request doesn't match expected version + error InvalidVersion(uint8 requestVersion, uint8 expectedVersion); + + /// @notice emitted when transferred balance (msg.value) does not match the metadata in V1MigrationData + error InvalidNativeBalance(uint256 transferredValue, uint96 expectedValue); + + /// @dev encapsulates data migrated over from previous coordinator + struct V1MigrationData { + uint8 fromVersion; + uint256 subId; + address subOwner; + address[] consumers; + uint96 linkBalance; + uint96 nativeBalance; + } + + /** + * @inheritdoc IVRFCoordinatorV2PlusMigration + */ + function onMigration(bytes calldata encodedData) external payable override { + if (msg.sender != s_prevCoordinator) { + revert MustBePreviousCoordinator(msg.sender, s_prevCoordinator); + } + + V1MigrationData memory migrationData = abi.decode(encodedData, (V1MigrationData)); + + if (migrationData.fromVersion != 1) { + revert InvalidVersion(migrationData.fromVersion, 1); + } + + if (msg.value != uint256(migrationData.nativeBalance)) { + revert InvalidNativeBalance(msg.value, migrationData.nativeBalance); + } + + // it should be impossible to have a subscription id collision, for two reasons: + // 1. the subscription ID is calculated using inputs that cannot be replicated under different + // conditions. + // 2. once a subscription is migrated it is deleted from the previous coordinator, so it cannot + // be migrated again. + // however, we should have this check here in case the `migrate` function on + // future coordinators "forgets" to delete subscription data allowing re-migration of the same + // subscription. + if (s_subscriptions[migrationData.subId].owner != address(0)) { + revert SubscriptionIDCollisionFound(); + } + + s_subscriptions[migrationData.subId] = Subscription({ + nativeBalance: migrationData.nativeBalance, + linkBalance: migrationData.linkBalance, + reqCount: 0, + owner: migrationData.subOwner, + consumers: migrationData.consumers + }); + s_totalNativeBalance += migrationData.nativeBalance; + s_totalLinkBalance += migrationData.linkBalance; + } + + /*************************************************************************** + * Section: Request/Response + **************************************************************************/ + + function requestRandomWords(VRFV2PlusClient.RandomWordsRequest calldata req) external returns (uint256 requestId) { + Subscription memory sub = s_subscriptions[req.subId]; + sub.reqCount = sub.reqCount + 1; + return _handleRequest(msg.sender); + } + + function _handleRequest(address requester) private returns (uint256) { + s_requestId = s_requestId + 1; + uint256 requestId = s_requestId; + s_requestConsumerMapping[s_requestId] = requester; + return requestId; + } + + function generateFakeRandomness(uint256 requestID) public pure returns (uint256[] memory) { + uint256[] memory randomness = new uint256[](1); + randomness[0] = uint256(keccak256(abi.encode(requestID, "not random"))); + return randomness; + } + + function fulfillRandomWords(uint256 requestId) external { + VRFConsumerBaseV2Plus consumer = VRFConsumerBaseV2Plus(s_requestConsumerMapping[requestId]); + consumer.rawFulfillRandomWords(requestId, generateFakeRandomness(requestId)); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFMaliciousConsumerV2Plus.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFMaliciousConsumerV2Plus.sol new file mode 100644 index 00000000..18b6b0ff --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFMaliciousConsumerV2Plus.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +contract VRFMaliciousConsumerV2Plus is VRFConsumerBaseV2Plus { + uint256[] public s_randomWords; + uint256 public s_requestId; + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + LinkTokenInterface internal PLITOKEN; + uint256 public s_gasAvailable; + uint256 internal s_subId; + bytes32 internal s_keyHash; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2Plus(vrfCoordinator) { + PLITOKEN = LinkTokenInterface(link); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + s_gasAvailable = gasleft(); + s_randomWords = randomWords; + s_requestId = requestId; + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: s_keyHash, + subId: s_subId, + requestConfirmations: 1, + callbackGasLimit: 200000, + numWords: 1, + extraArgs: "" // empty extraArgs defaults to link payment + }); + // Should revert + s_vrfCoordinator.requestRandomWords(req); + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = s_vrfCoordinator.createSubscription(); + s_vrfCoordinator.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + s_vrfCoordinator.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness(bytes32 keyHash) external returns (uint256) { + s_keyHash = keyHash; + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: keyHash, + subId: s_subId, + requestConfirmations: 1, + callbackGasLimit: 500000, + numWords: 1, + extraArgs: "" // empty extraArgs defaults to link payment + }); + return s_vrfCoordinator.requestRandomWords(req); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol new file mode 100644 index 00000000..5f170fd8 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {IVRFCoordinatorV2Plus} from "../interfaces/IVRFCoordinatorV2Plus.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +/// @notice This contract is used for testing only and should not be used for production. +contract VRFV2PlusConsumerExample is ConfirmedOwner, VRFConsumerBaseV2Plus { + LinkTokenInterface public s_linkToken; + uint256 public s_recentRequestId; + IVRFCoordinatorV2Plus public s_vrfCoordinatorApiV1; + uint256 public s_subId; + + struct Response { + bool fulfilled; + address requester; + uint256 requestId; + uint256[] randomWords; + } + mapping(uint256 /* request id */ => Response /* response */) public s_requests; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2Plus(vrfCoordinator) { + s_vrfCoordinatorApiV1 = IVRFCoordinatorV2Plus(vrfCoordinator); + s_linkToken = LinkTokenInterface(link); + } + + function getRandomness(uint256 requestId, uint256 idx) public view returns (uint256 randomWord) { + Response memory resp = s_requests[requestId]; + // solhint-disable-next-line custom-errors + require(resp.requestId != 0, "request ID is incorrect"); + return resp.randomWords[idx]; + } + + function _subscribe() internal returns (uint256) { + if (s_subId == 0) { + s_subId = s_vrfCoordinatorApiV1.createSubscription(); + s_vrfCoordinatorApiV1.addConsumer(s_subId, address(this)); + } + return s_subId; + } + + function createSubscriptionAndFundNative() external payable { + _subscribe(); + s_vrfCoordinatorApiV1.fundSubscriptionWithNative{value: msg.value}(s_subId); + } + + function createSubscriptionAndFund(uint96 amount) external { + _subscribe(); + // Approve the link transfer. + s_linkToken.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "sub not set"); + s_linkToken.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); + } + + function topUpSubscriptionNative() external payable { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "sub not set"); + s_vrfCoordinatorApiV1.fundSubscriptionWithNative{value: msg.value}(s_subId); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + // solhint-disable-next-line custom-errors + require(requestId == s_recentRequestId, "request ID is incorrect"); + s_requests[requestId].randomWords = randomWords; + s_requests[requestId].fulfilled = true; + } + + function requestRandomWords( + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash, + bool nativePayment + ) external { + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: keyHash, + subId: s_subId, + requestConfirmations: requestConfirmations, + callbackGasLimit: callbackGasLimit, + numWords: numWords, + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: nativePayment})) + }); + uint256 requestId = s_vrfCoordinator.requestRandomWords(req); + Response memory resp = Response({ + requestId: requestId, + randomWords: new uint256[](0), + fulfilled: false, + requester: msg.sender + }); + s_requests[requestId] = resp; + s_recentRequestId = requestId; + } + + function updateSubscription(address[] memory consumers) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + s_vrfCoordinatorApiV1.addConsumer(s_subId, consumers[i]); + } + } + + function setSubId(uint256 subId) external { + s_subId = subId; + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusExternalSubOwnerExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusExternalSubOwnerExample.sol new file mode 100644 index 00000000..e93f4d0f --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusExternalSubOwnerExample.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +/// @notice This contract is used for testing only and should not be used for production. +contract VRFV2PlusExternalSubOwnerExample is VRFConsumerBaseV2Plus { + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + LinkTokenInterface internal PLITOKEN; + + uint256[] public s_randomWords; + uint256 public s_requestId; + address internal s_owner; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2Plus(vrfCoordinator) { + PLITOKEN = LinkTokenInterface(link); + s_owner = msg.sender; + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + // solhint-disable-next-line custom-errors + require(requestId == s_requestId, "request ID is incorrect"); + s_randomWords = randomWords; + } + + function requestRandomWords( + uint256 subId, + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash, + bool nativePayment + ) external onlyOwner { + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: keyHash, + subId: subId, + requestConfirmations: requestConfirmations, + callbackGasLimit: callbackGasLimit, + numWords: numWords, + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: nativePayment})) + }); + // Will revert if subscription is not funded. + s_requestId = s_vrfCoordinator.requestRandomWords(req); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol new file mode 100644 index 00000000..361ef69f --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusLoadTestWithMetrics.sol @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ChainSpecificUtil} from "../../../ChainSpecificUtil.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +/** + * @title The VRFLoadTestExternalSubOwner contract. + * @notice Allows making many VRF V2 randomness requests in a single transaction for load testing. + */ +contract VRFV2PlusLoadTestWithMetrics is VRFConsumerBaseV2Plus { + uint256 public s_responseCount; + uint256 public s_requestCount; + uint256 public s_averageFulfillmentInMillions = 0; // in millions for better precision + uint256 public s_slowestFulfillment = 0; + uint256 public s_fastestFulfillment = 999; + uint256 public s_lastRequestId; + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + mapping(uint256 => uint256) internal requestHeights; // requestIds to block number when rand request was made + + struct RequestStatus { + bool fulfilled; + uint256[] randomWords; + uint256 requestTimestamp; + uint256 fulfilmentTimestamp; + uint256 requestBlockNumber; + uint256 fulfilmentBlockNumber; + } + + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor(address _vrfCoordinator) VRFConsumerBaseV2Plus(_vrfCoordinator) {} + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + uint256 fulfilmentBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 requestDelay = fulfilmentBlockNumber - requestHeights[_requestId]; + uint256 requestDelayInMillions = requestDelay * 1_000_000; + + if (requestDelay > s_slowestFulfillment) { + s_slowestFulfillment = requestDelay; + } + s_fastestFulfillment = requestDelay < s_fastestFulfillment ? requestDelay : s_fastestFulfillment; + s_averageFulfillmentInMillions = s_responseCount > 0 + ? (s_averageFulfillmentInMillions * s_responseCount + requestDelayInMillions) / (s_responseCount + 1) + : requestDelayInMillions; + + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + s_requests[_requestId].fulfilmentTimestamp = block.timestamp; + s_requests[_requestId].fulfilmentBlockNumber = fulfilmentBlockNumber; + + s_responseCount++; + } + + function requestRandomWords( + uint256 _subId, + uint16 _requestConfirmations, + bytes32 _keyHash, + uint32 _callbackGasLimit, + bool _nativePayment, + uint32 _numWords, + uint16 _requestCount + ) external onlyOwner { + for (uint16 i = 0; i < _requestCount; i++) { + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: _keyHash, + subId: _subId, + requestConfirmations: _requestConfirmations, + callbackGasLimit: _callbackGasLimit, + numWords: _numWords, + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: _nativePayment})) + }); + // Will revert if subscription is not funded. + uint256 requestId = s_vrfCoordinator.requestRandomWords(req); + + s_lastRequestId = requestId; + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + s_requests[requestId] = RequestStatus({ + randomWords: new uint256[](0), + fulfilled: false, + requestTimestamp: block.timestamp, + fulfilmentTimestamp: 0, + requestBlockNumber: requestBlockNumber, + fulfilmentBlockNumber: 0 + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + } + } + + function reset() external { + s_averageFulfillmentInMillions = 0; // in millions for better precision + s_slowestFulfillment = 0; + s_fastestFulfillment = 999; + s_requestCount = 0; + s_responseCount = 0; + } + + function getRequestStatus( + uint256 _requestId + ) + external + view + returns ( + bool fulfilled, + uint256[] memory randomWords, + uint256 requestTimestamp, + uint256 fulfilmentTimestamp, + uint256 requestBlockNumber, + uint256 fulfilmentBlockNumber + ) + { + RequestStatus memory request = s_requests[_requestId]; + return ( + request.fulfilled, + request.randomWords, + request.requestTimestamp, + request.fulfilmentTimestamp, + request.requestBlockNumber, + request.fulfilmentBlockNumber + ); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol new file mode 100644 index 00000000..16797bb9 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.0; + +import {IVRFMigratableConsumerV2Plus} from "../interfaces/IVRFMigratableConsumerV2Plus.sol"; +import {IVRFCoordinatorV2Plus} from "../interfaces/IVRFCoordinatorV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +contract VRFV2PlusMaliciousMigrator is IVRFMigratableConsumerV2Plus { + IVRFCoordinatorV2Plus internal s_vrfCoordinator; + + constructor(address _vrfCoordinator) { + s_vrfCoordinator = IVRFCoordinatorV2Plus(_vrfCoordinator); + } + + /** + * @inheritdoc IVRFMigratableConsumerV2Plus + */ + function setCoordinator(address /* _vrfCoordinator */) public override { + // try to re-enter, should revert + // args don't really matter + s_vrfCoordinator.requestRandomWords( + VRFV2PlusClient.RandomWordsRequest({ + keyHash: bytes32(0), + subId: 0, + requestConfirmations: 0, + callbackGasLimit: 0, + numWords: 0, + extraArgs: "" + }) + ); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusRevertingExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusRevertingExample.sol new file mode 100644 index 00000000..fd2ef375 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusRevertingExample.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +// VRFV2RevertingExample will always revert. Used for testing only, useless in prod. +contract VRFV2PlusRevertingExample is VRFConsumerBaseV2Plus { + uint256[] public s_randomWords; + uint256 public s_requestId; + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + LinkTokenInterface internal PLITOKEN; + uint256 public s_subId; + uint256 public s_gasAvailable; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2Plus(vrfCoordinator) { + PLITOKEN = LinkTokenInterface(link); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256, uint256[] memory) internal pure override { + // solhint-disable-next-line custom-errors, reason-string + revert(); + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = s_vrfCoordinator.createSubscription(); + s_vrfCoordinator.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "sub not set"); + // Approve the link transfer. + PLITOKEN.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + s_vrfCoordinator.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness( + bytes32 keyHash, + uint256 subId, + uint16 minReqConfs, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256) { + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: keyHash, + subId: subId, + requestConfirmations: minReqConfs, + callbackGasLimit: callbackGasLimit, + numWords: numWords, + extraArgs: "" // empty extraArgs defaults to link payment + }); + s_requestId = s_vrfCoordinator.requestRandomWords(req); + return s_requestId; + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusSingleConsumerExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusSingleConsumerExample.sol new file mode 100644 index 00000000..d1881670 --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusSingleConsumerExample.sol @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: MIT +// Example of a single consumer contract which owns the subscription. +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFConsumerBaseV2Plus} from "../VRFConsumerBaseV2Plus.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +/// @notice This contract is used for testing only and should not be used for production. +contract VRFV2PlusSingleConsumerExample is VRFConsumerBaseV2Plus { + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + LinkTokenInterface internal PLITOKEN; + + struct RequestConfig { + uint256 subId; + uint32 callbackGasLimit; + uint16 requestConfirmations; + uint32 numWords; + bytes32 keyHash; + bool nativePayment; + } + RequestConfig public s_requestConfig; + uint256[] public s_randomWords; + uint256 public s_requestId; + address internal s_owner; + + constructor( + address vrfCoordinator, + address link, + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash, + bool nativePayment + ) VRFConsumerBaseV2Plus(vrfCoordinator) { + PLITOKEN = LinkTokenInterface(link); + s_owner = msg.sender; + s_requestConfig = RequestConfig({ + subId: 0, // Unset initially + callbackGasLimit: callbackGasLimit, + requestConfirmations: requestConfirmations, + numWords: numWords, + keyHash: keyHash, + nativePayment: nativePayment + }); + subscribe(); + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + // solhint-disable-next-line custom-errors + require(requestId == s_requestId, "request ID is incorrect"); + s_randomWords = randomWords; + } + + // Assumes the subscription is funded sufficiently. + function requestRandomWords() external onlyOwner { + RequestConfig memory rc = s_requestConfig; + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: rc.keyHash, + subId: rc.subId, + requestConfirmations: rc.requestConfirmations, + callbackGasLimit: rc.callbackGasLimit, + numWords: rc.numWords, + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: rc.nativePayment})) + }); + // Will revert if subscription is not set and funded. + s_requestId = s_vrfCoordinator.requestRandomWords(req); + } + + // Assumes this contract owns link + // This method is analogous to VRFv1, except the amount + // should be selected based on the keyHash (each keyHash functions like a "gas lane" + // with different link costs). + function fundAndRequestRandomWords(uint256 amount) external onlyOwner { + RequestConfig memory rc = s_requestConfig; + PLITOKEN.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_requestConfig.subId)); + VRFV2PlusClient.RandomWordsRequest memory req = VRFV2PlusClient.RandomWordsRequest({ + keyHash: rc.keyHash, + subId: rc.subId, + requestConfirmations: rc.requestConfirmations, + callbackGasLimit: rc.callbackGasLimit, + numWords: rc.numWords, + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: rc.nativePayment})) + }); + // Will revert if subscription is not set and funded. + s_requestId = s_vrfCoordinator.requestRandomWords(req); + } + + // Assumes this contract owns link + function topUpSubscription(uint256 amount) external onlyOwner { + PLITOKEN.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_requestConfig.subId)); + } + + function withdraw(uint256 amount, address to) external onlyOwner { + PLITOKEN.transfer(to, amount); + } + + function unsubscribe(address to) external onlyOwner { + // Returns funds to this address + s_vrfCoordinator.cancelSubscription(s_requestConfig.subId, to); + s_requestConfig.subId = 0; + } + + // Keep this separate in case the contract want to unsubscribe and then + // resubscribe. + function subscribe() public onlyOwner { + // Create a subscription, current subId + address[] memory consumers = new address[](1); + consumers[0] = address(this); + s_requestConfig.subId = s_vrfCoordinator.createSubscription(); + s_vrfCoordinator.addConsumer(s_requestConfig.subId, consumers[0]); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol new file mode 100644 index 00000000..b64edf3b --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2PlusWrapperConsumerBase} from "../VRFV2PlusWrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +contract VRFV2PlusWrapperConsumerExample is VRFV2PlusWrapperConsumerBase, ConfirmedOwner { + event WrappedRequestFulfilled(uint256 requestId, uint256[] randomWords, uint256 payment); + event WrapperRequestMade(uint256 indexed requestId, uint256 paid); + + struct RequestStatus { + uint256 paid; + bool fulfilled; + uint256[] randomWords; + bool native; + } + + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor( + address _link, + address _vrfV2Wrapper + ) ConfirmedOwner(msg.sender) VRFV2PlusWrapperConsumerBase(_link, _vrfV2Wrapper) {} + + function makeRequest( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) external onlyOwner returns (uint256 requestId) { + bytes memory extraArgs = VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: false})); + uint256 paid; + (requestId, paid) = requestRandomness(_callbackGasLimit, _requestConfirmations, _numWords, extraArgs); + s_requests[requestId] = RequestStatus({paid: paid, randomWords: new uint256[](0), fulfilled: false, native: false}); + emit WrapperRequestMade(requestId, paid); + return requestId; + } + + function makeRequestNative( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) external onlyOwner returns (uint256 requestId) { + bytes memory extraArgs = VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: true})); + uint256 paid; + (requestId, paid) = requestRandomnessPayInNative(_callbackGasLimit, _requestConfirmations, _numWords, extraArgs); + s_requests[requestId] = RequestStatus({paid: paid, randomWords: new uint256[](0), fulfilled: false, native: true}); + emit WrapperRequestMade(requestId, paid); + return requestId; + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + emit WrappedRequestFulfilled(_requestId, _randomWords, s_requests[_requestId].paid); + } + + function getRequestStatus( + uint256 _requestId + ) external view returns (uint256 paid, bool fulfilled, uint256[] memory randomWords) { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + RequestStatus memory request = s_requests[_requestId]; + return (request.paid, request.fulfilled, request.randomWords); + } + + /// @notice withdrawLink withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in juels + function withdrawLink(uint256 amount) external onlyOwner { + s_linkToken.transfer(owner(), amount); + } + + /// @notice withdrawNative withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in wei + function withdrawNative(uint256 amount) external onlyOwner { + (bool success, ) = payable(owner()).call{value: amount}(""); + // solhint-disable-next-line custom-errors + require(success, "withdrawNative failed"); + } +} diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperLoadTestConsumer.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperLoadTestConsumer.sol new file mode 100644 index 00000000..891640ad --- /dev/null +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperLoadTestConsumer.sol @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2PlusWrapperConsumerBase} from "../VRFV2PlusWrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../../shared/access/ConfirmedOwner.sol"; +import {ChainSpecificUtil} from "../../../ChainSpecificUtil.sol"; +import {VRFV2PlusClient} from "../libraries/VRFV2PlusClient.sol"; + +contract VRFV2PlusWrapperLoadTestConsumer is VRFV2PlusWrapperConsumerBase, ConfirmedOwner { + uint256 public s_responseCount; + uint256 public s_requestCount; + uint256 public s_averageFulfillmentInMillions = 0; // in millions for better precision + uint256 public s_slowestFulfillment = 0; + uint256 public s_fastestFulfillment = 999; + uint256 public s_lastRequestId; + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + mapping(uint256 => uint256) internal requestHeights; // requestIds to block number when rand request was made + + event WrappedRequestFulfilled(uint256 requestId, uint256[] randomWords, uint256 payment); + event WrapperRequestMade(uint256 indexed requestId, uint256 paid); + + struct RequestStatus { + uint256 paid; + bool fulfilled; + uint256[] randomWords; + uint256 requestTimestamp; + uint256 fulfilmentTimestamp; + uint256 requestBlockNumber; + uint256 fulfilmentBlockNumber; + bool native; + } + + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor( + address _link, + address _vrfV2PlusWrapper + ) ConfirmedOwner(msg.sender) VRFV2PlusWrapperConsumerBase(_link, _vrfV2PlusWrapper) {} + + function makeRequests( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + uint16 _requestCount + ) external onlyOwner { + for (uint16 i = 0; i < _requestCount; i++) { + bytes memory extraArgs = VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: false})); + (uint256 requestId, uint256 paid) = requestRandomness( + _callbackGasLimit, + _requestConfirmations, + _numWords, + extraArgs + ); + s_lastRequestId = requestId; + + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + s_requests[requestId] = RequestStatus({ + paid: paid, + fulfilled: false, + randomWords: new uint256[](0), + requestTimestamp: block.timestamp, + requestBlockNumber: requestBlockNumber, + fulfilmentTimestamp: 0, + fulfilmentBlockNumber: 0, + native: false + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + emit WrapperRequestMade(requestId, paid); + } + } + + function makeRequestsNative( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + uint16 _requestCount + ) external onlyOwner { + for (uint16 i = 0; i < _requestCount; i++) { + bytes memory extraArgs = VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: true})); + (uint256 requestId, uint256 paid) = requestRandomnessPayInNative( + _callbackGasLimit, + _requestConfirmations, + _numWords, + extraArgs + ); + s_lastRequestId = requestId; + + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + s_requests[requestId] = RequestStatus({ + paid: paid, + fulfilled: false, + randomWords: new uint256[](0), + requestTimestamp: block.timestamp, + requestBlockNumber: requestBlockNumber, + fulfilmentTimestamp: 0, + fulfilmentBlockNumber: 0, + native: true + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + emit WrapperRequestMade(requestId, paid); + } + } + + // solhint-disable-next-line plugin-solidity/prefix-internal-functions-with-underscore + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + uint256 fulfilmentBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 requestDelay = fulfilmentBlockNumber - requestHeights[_requestId]; + uint256 requestDelayInMillions = requestDelay * 1_000_000; + + if (requestDelay > s_slowestFulfillment) { + s_slowestFulfillment = requestDelay; + } + s_fastestFulfillment = requestDelay < s_fastestFulfillment ? requestDelay : s_fastestFulfillment; + s_averageFulfillmentInMillions = s_responseCount > 0 + ? (s_averageFulfillmentInMillions * s_responseCount + requestDelayInMillions) / (s_responseCount + 1) + : requestDelayInMillions; + + s_responseCount++; + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + s_requests[_requestId].fulfilmentTimestamp = block.timestamp; + s_requests[_requestId].fulfilmentBlockNumber = fulfilmentBlockNumber; + + emit WrappedRequestFulfilled(_requestId, _randomWords, s_requests[_requestId].paid); + } + + function getRequestStatus( + uint256 _requestId + ) + external + view + returns ( + uint256 paid, + bool fulfilled, + uint256[] memory randomWords, + uint256 requestTimestamp, + uint256 fulfilmentTimestamp, + uint256 requestBlockNumber, + uint256 fulfilmentBlockNumber + ) + { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + RequestStatus memory request = s_requests[_requestId]; + return ( + request.paid, + request.fulfilled, + request.randomWords, + request.requestTimestamp, + request.fulfilmentTimestamp, + request.requestBlockNumber, + request.fulfilmentBlockNumber + ); + } + + function reset() external { + s_averageFulfillmentInMillions = 0; // in millions for better precision + s_slowestFulfillment = 0; + s_fastestFulfillment = 999; + s_requestCount = 0; + s_responseCount = 0; + } + + /// @notice withdrawLink withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in juels + function withdrawLink(uint256 amount) external onlyOwner { + s_linkToken.transfer(owner(), amount); + } + + /// @notice withdrawNative withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in wei + function withdrawNative(uint256 amount) external onlyOwner { + (bool success, ) = payable(owner()).call{value: amount}(""); + // solhint-disable-next-line custom-errors + require(success, "withdrawNative failed"); + } + + receive() external payable {} +} diff --git a/contracts/src/v0.8/vrf/interfaces/BlockhashStoreInterface.sol b/contracts/src/v0.8/vrf/interfaces/BlockhashStoreInterface.sol new file mode 100644 index 00000000..81775570 --- /dev/null +++ b/contracts/src/v0.8/vrf/interfaces/BlockhashStoreInterface.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface BlockhashStoreInterface { + function getBlockhash(uint256 number) external view returns (bytes32); +} diff --git a/contracts/src/v0.8/vrf/interfaces/IAuthorizedReceiver.sol b/contracts/src/v0.8/vrf/interfaces/IAuthorizedReceiver.sol new file mode 100644 index 00000000..78140d58 --- /dev/null +++ b/contracts/src/v0.8/vrf/interfaces/IAuthorizedReceiver.sol @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IAuthorizedReceiver { + function isAuthorizedSender(address sender) external view returns (bool); + + function getAuthorizedSenders() external returns (address[] memory); + + function setAuthorizedSenders(address[] calldata senders) external; +} diff --git a/contracts/src/v0.8/vrf/interfaces/VRFCoordinatorV2Interface.sol b/contracts/src/v0.8/vrf/interfaces/VRFCoordinatorV2Interface.sol new file mode 100644 index 00000000..299b4e18 --- /dev/null +++ b/contracts/src/v0.8/vrf/interfaces/VRFCoordinatorV2Interface.sol @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface VRFCoordinatorV2Interface { + /** + * @notice Get configuration relevant for making requests + * @return minimumRequestConfirmations global min for request confirmations + * @return maxGasLimit global max for request gas limit + * @return s_provingKeyHashes list of registered key hashes + */ + function getRequestConfig() external view returns (uint16, uint32, bytes32[] memory); + + /** + * @notice Request a set of random words. + * @param keyHash - Corresponds to a particular oracle job which uses + * that key for generating the VRF proof. Different keyHash's have different gas price + * ceilings, so you can select a specific one to bound your maximum per request cost. + * @param subId - The ID of the VRF subscription. Must be funded + * with the minimum subscription balance required for the selected keyHash. + * @param minimumRequestConfirmations - How many blocks you'd like the + * oracle to wait before responding to the request. See SECURITY CONSIDERATIONS + * for why you may want to request more. The acceptable range is + * [minimumRequestBlockConfirmations, 200]. + * @param callbackGasLimit - How much gas you'd like to receive in your + * fulfillRandomWords callback. Note that gasleft() inside fulfillRandomWords + * may be slightly less than this amount because of gas used calling the function + * (argument decoding etc.), so you may need to request slightly more than you expect + * to have inside fulfillRandomWords. The acceptable range is + * [0, maxGasLimit] + * @param numWords - The number of uint256 random values you'd like to receive + * in your fulfillRandomWords callback. Note these numbers are expanded in a + * secure way by the VRFCoordinator from a single random value supplied by the oracle. + * @return requestId - A unique identifier of the request. Can be used to match + * a request to a response in fulfillRandomWords. + */ + function requestRandomWords( + bytes32 keyHash, + uint64 subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256 requestId); + + /** + * @notice Create a VRF subscription. + * @return subId - A unique subscription id. + * @dev You can manage the consumer set dynamically with addConsumer/removeConsumer. + * @dev Note to fund the subscription, use transferAndCall. For example + * @dev PLITOKEN.transferAndCall( + * @dev address(COORDINATOR), + * @dev amount, + * @dev abi.encode(subId)); + */ + function createSubscription() external returns (uint64 subId); + + /** + * @notice Get a VRF subscription. + * @param subId - ID of the subscription + * @return balance - PLI balance of the subscription in juels. + * @return reqCount - number of requests for this subscription, determines fee tier. + * @return owner - owner of the subscription. + * @return consumers - list of consumer address which are able to use this subscription. + */ + function getSubscription( + uint64 subId + ) external view returns (uint96 balance, uint64 reqCount, address owner, address[] memory consumers); + + /** + * @notice Request subscription owner transfer. + * @param subId - ID of the subscription + * @param newOwner - proposed new owner of the subscription + */ + function requestSubscriptionOwnerTransfer(uint64 subId, address newOwner) external; + + /** + * @notice Request subscription owner transfer. + * @param subId - ID of the subscription + * @dev will revert if original owner of subId has + * not requested that msg.sender become the new owner. + */ + function acceptSubscriptionOwnerTransfer(uint64 subId) external; + + /** + * @notice Add a consumer to a VRF subscription. + * @param subId - ID of the subscription + * @param consumer - New consumer which can use the subscription + */ + function addConsumer(uint64 subId, address consumer) external; + + /** + * @notice Remove a consumer from a VRF subscription. + * @param subId - ID of the subscription + * @param consumer - Consumer to remove from the subscription + */ + function removeConsumer(uint64 subId, address consumer) external; + + /** + * @notice Cancel a subscription + * @param subId - ID of the subscription + * @param to - Where to send the remaining PLI to + */ + function cancelSubscription(uint64 subId, address to) external; + + /* + * @notice Check to see if there exists a request commitment consumers + * for all consumers and keyhashes for a given sub. + * @param subId - ID of the subscription + * @return true if there exists at least one unfulfilled request for the subscription, false + * otherwise. + */ + function pendingRequestExists(uint64 subId) external view returns (bool); +} diff --git a/contracts/src/v0.8/vrf/interfaces/VRFV2WrapperInterface.sol b/contracts/src/v0.8/vrf/interfaces/VRFV2WrapperInterface.sol new file mode 100644 index 00000000..71dbfb6b --- /dev/null +++ b/contracts/src/v0.8/vrf/interfaces/VRFV2WrapperInterface.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface VRFV2WrapperInterface { + /** + * @return the request ID of the most recent VRF V2 request made by this wrapper. This should only + * be relied option within the same transaction that the request was made. + */ + function lastRequestId() external view returns (uint256); + + /** + * @notice Calculates the price of a VRF request with the given callbackGasLimit at the current + * @notice block. + * + * @dev This function relies on the transaction gas price which is not automatically set during + * @dev simulation. To estimate the price at a specific gas price, use the estimatePrice function. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + */ + function calculateRequestPrice(uint32 _callbackGasLimit) external view returns (uint256); + + /** + * @notice Estimates the price of a VRF request with a specific gas limit and gas price. + * + * @dev This is a convenience function that can be called in simulation to better understand + * @dev pricing. + * + * @param _callbackGasLimit is the gas limit used to estimate the price. + * @param _requestGasPriceWei is the gas price in wei used for the estimation. + */ + function estimateRequestPrice(uint32 _callbackGasLimit, uint256 _requestGasPriceWei) external view returns (uint256); +} diff --git a/contracts/src/v0.8/vrf/mocks/VRFCoordinatorMock.sol b/contracts/src/v0.8/vrf/mocks/VRFCoordinatorMock.sol new file mode 100644 index 00000000..cde17df9 --- /dev/null +++ b/contracts/src/v0.8/vrf/mocks/VRFCoordinatorMock.sol @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFConsumerBase} from "../../vrf/VRFConsumerBase.sol"; + +// solhint-disable custom-errors + +contract VRFCoordinatorMock { + LinkTokenInterface public PLI; + + event RandomnessRequest(address indexed sender, bytes32 indexed keyHash, uint256 indexed seed, uint256 fee); + + constructor(address linkAddress) { + PLI = LinkTokenInterface(linkAddress); + } + + function onTokenTransfer(address sender, uint256 fee, bytes memory _data) public onlyPLI { + (bytes32 keyHash, uint256 seed) = abi.decode(_data, (bytes32, uint256)); + emit RandomnessRequest(sender, keyHash, seed, fee); + } + + function callBackWithRandomness(bytes32 requestId, uint256 randomness, address consumerContract) public { + VRFConsumerBase v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomness.selector, requestId, randomness); + uint256 b = 206000; + require(gasleft() >= b, "not enough gas for consumer"); + // solhint-disable-next-line avoid-low-level-calls, no-unused-vars + (bool success, ) = consumerContract.call(resp); + } + + modifier onlyPLI() { + require(msg.sender == address(PLI), "Must use PLI token"); + _; + } +} diff --git a/contracts/src/v0.8/vrf/mocks/VRFCoordinatorV2Mock.sol b/contracts/src/v0.8/vrf/mocks/VRFCoordinatorV2Mock.sol new file mode 100644 index 00000000..c2fb1b94 --- /dev/null +++ b/contracts/src/v0.8/vrf/mocks/VRFCoordinatorV2Mock.sol @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: MIT +// A mock for testing code that relies on VRFCoordinatorV2. +pragma solidity ^0.8.4; + +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +// solhint-disable plugin-solidity/prefix-immutable-variables-with-i +// solhint-disable custom-errors +// solhint-disable avoid-low-level-calls + +contract VRFCoordinatorV2Mock is VRFCoordinatorV2Interface, ConfirmedOwner { + uint96 public immutable BASE_FEE; + uint96 public immutable GAS_PRICE_PLI; + uint16 public immutable MAX_CONSUMERS = 100; + + error InvalidSubscription(); + error InsufficientBalance(); + error MustBeSubOwner(address owner); + error TooManyConsumers(); + error InvalidConsumer(); + error InvalidRandomWords(); + error Reentrant(); + + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint64 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address indexed sender + ); + event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bool success); + event SubscriptionCreated(uint64 indexed subId, address owner); + event SubscriptionFunded(uint64 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionCanceled(uint64 indexed subId, address to, uint256 amount); + event ConsumerAdded(uint64 indexed subId, address consumer); + event ConsumerRemoved(uint64 indexed subId, address consumer); + event ConfigSet(); + + struct Config { + // Reentrancy protection. + bool reentrancyLock; + } + Config private s_config; + uint64 internal s_currentSubId; + uint256 internal s_nextRequestId = 1; + uint256 internal s_nextPreSeed = 100; + struct Subscription { + address owner; + uint96 balance; + } + mapping(uint64 => Subscription) internal s_subscriptions; /* subId */ /* subscription */ + mapping(uint64 => address[]) internal s_consumers; /* subId */ /* consumers */ + + struct Request { + uint64 subId; + uint32 callbackGasLimit; + uint32 numWords; + } + mapping(uint256 => Request) internal s_requests; /* requestId */ /* request */ + + constructor(uint96 _baseFee, uint96 _gasPriceLink) ConfirmedOwner(msg.sender) { + BASE_FEE = _baseFee; + GAS_PRICE_PLI = _gasPriceLink; + setConfig(); + } + + /** + * @notice Sets the configuration of the vrfv2 mock coordinator + */ + function setConfig() public onlyOwner { + s_config = Config({reentrancyLock: false}); + emit ConfigSet(); + } + + function consumerIsAdded(uint64 _subId, address _consumer) public view returns (bool) { + address[] memory consumers = s_consumers[_subId]; + for (uint256 i = 0; i < consumers.length; i++) { + if (consumers[i] == _consumer) { + return true; + } + } + return false; + } + + modifier onlyValidConsumer(uint64 _subId, address _consumer) { + if (!consumerIsAdded(_subId, _consumer)) { + revert InvalidConsumer(); + } + _; + } + + /** + * @notice fulfillRandomWords fulfills the given request, sending the random words to the supplied + * @notice consumer. + * + * @dev This mock uses a simplified formula for calculating payment amount and gas usage, and does + * @dev not account for all edge cases handled in the real VRF coordinator. When making requests + * @dev against the real coordinator a small amount of additional PLI is required. + * + * @param _requestId the request to fulfill + * @param _consumer the VRF randomness consumer to send the result to + */ + function fulfillRandomWords(uint256 _requestId, address _consumer) external nonReentrant { + fulfillRandomWordsWithOverride(_requestId, _consumer, new uint256[](0)); + } + + /** + * @notice fulfillRandomWordsWithOverride allows the user to pass in their own random words. + * + * @param _requestId the request to fulfill + * @param _consumer the VRF randomness consumer to send the result to + * @param _words user-provided random words + */ + function fulfillRandomWordsWithOverride(uint256 _requestId, address _consumer, uint256[] memory _words) public { + uint256 startGas = gasleft(); + if (s_requests[_requestId].subId == 0) { + revert("nonexistent request"); + } + Request memory req = s_requests[_requestId]; + + if (_words.length == 0) { + _words = new uint256[](req.numWords); + for (uint256 i = 0; i < req.numWords; i++) { + _words[i] = uint256(keccak256(abi.encode(_requestId, i))); + } + } else if (_words.length != req.numWords) { + revert InvalidRandomWords(); + } + + VRFConsumerBaseV2 v; + bytes memory callReq = abi.encodeWithSelector(v.rawFulfillRandomWords.selector, _requestId, _words); + s_config.reentrancyLock = true; + (bool success, ) = _consumer.call{gas: req.callbackGasLimit}(callReq); + s_config.reentrancyLock = false; + + uint96 payment = uint96(BASE_FEE + ((startGas - gasleft()) * GAS_PRICE_PLI)); + if (s_subscriptions[req.subId].balance < payment) { + revert InsufficientBalance(); + } + s_subscriptions[req.subId].balance -= payment; + delete (s_requests[_requestId]); + emit RandomWordsFulfilled(_requestId, _requestId, payment, success); + } + + /** + * @notice fundSubscription allows funding a subscription with an arbitrary amount for testing. + * + * @param _subId the subscription to fund + * @param _amount the amount to fund + */ + function fundSubscription(uint64 _subId, uint96 _amount) public { + if (s_subscriptions[_subId].owner == address(0)) { + revert InvalidSubscription(); + } + uint96 oldBalance = s_subscriptions[_subId].balance; + s_subscriptions[_subId].balance += _amount; + emit SubscriptionFunded(_subId, oldBalance, oldBalance + _amount); + } + + function requestRandomWords( + bytes32 _keyHash, + uint64 _subId, + uint16 _minimumRequestConfirmations, + uint32 _callbackGasLimit, + uint32 _numWords + ) external override nonReentrant onlyValidConsumer(_subId, msg.sender) returns (uint256) { + if (s_subscriptions[_subId].owner == address(0)) { + revert InvalidSubscription(); + } + + uint256 requestId = s_nextRequestId++; + uint256 preSeed = s_nextPreSeed++; + + s_requests[requestId] = Request({subId: _subId, callbackGasLimit: _callbackGasLimit, numWords: _numWords}); + + emit RandomWordsRequested( + _keyHash, + requestId, + preSeed, + _subId, + _minimumRequestConfirmations, + _callbackGasLimit, + _numWords, + msg.sender + ); + return requestId; + } + + function createSubscription() external override returns (uint64 _subId) { + s_currentSubId++; + s_subscriptions[s_currentSubId] = Subscription({owner: msg.sender, balance: 0}); + emit SubscriptionCreated(s_currentSubId, msg.sender); + return s_currentSubId; + } + + function getSubscription( + uint64 _subId + ) external view override returns (uint96 balance, uint64 reqCount, address owner, address[] memory consumers) { + if (s_subscriptions[_subId].owner == address(0)) { + revert InvalidSubscription(); + } + return (s_subscriptions[_subId].balance, 0, s_subscriptions[_subId].owner, s_consumers[_subId]); + } + + function cancelSubscription(uint64 _subId, address _to) external override onlySubOwner(_subId) nonReentrant { + emit SubscriptionCanceled(_subId, _to, s_subscriptions[_subId].balance); + delete (s_subscriptions[_subId]); + } + + modifier onlySubOwner(uint64 _subId) { + address owner = s_subscriptions[_subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubOwner(owner); + } + _; + } + + function getRequestConfig() external pure override returns (uint16, uint32, bytes32[] memory) { + return (3, 2000000, new bytes32[](0)); + } + + function addConsumer(uint64 _subId, address _consumer) external override onlySubOwner(_subId) { + if (s_consumers[_subId].length == MAX_CONSUMERS) { + revert TooManyConsumers(); + } + + if (consumerIsAdded(_subId, _consumer)) { + return; + } + + s_consumers[_subId].push(_consumer); + emit ConsumerAdded(_subId, _consumer); + } + + function removeConsumer( + uint64 _subId, + address _consumer + ) external override onlySubOwner(_subId) onlyValidConsumer(_subId, _consumer) nonReentrant { + address[] storage consumers = s_consumers[_subId]; + for (uint256 i = 0; i < consumers.length; i++) { + if (consumers[i] == _consumer) { + address last = consumers[consumers.length - 1]; + consumers[i] = last; + consumers.pop(); + break; + } + } + + emit ConsumerRemoved(_subId, _consumer); + } + + function getConfig() + external + pure + returns ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ) + { + return (4, 2_500_000, 2_700, 33285); + } + + function getFeeConfig() + external + pure + returns ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ) + { + return ( + 100000, // 0.1 PLI + 100000, // 0.1 PLI + 100000, // 0.1 PLI + 100000, // 0.1 PLI + 100000, // 0.1 PLI + 0, + 0, + 0, + 0 + ); + } + + modifier nonReentrant() { + if (s_config.reentrancyLock) { + revert Reentrant(); + } + _; + } + + function getFallbackWeiPerUnitLink() external pure returns (int256) { + return 4000000000000000; // 0.004 Ether + } + + function requestSubscriptionOwnerTransfer(uint64 /*_subId*/, address /*_newOwner*/) external pure override { + revert("not implemented"); + } + + function acceptSubscriptionOwnerTransfer(uint64 /*_subId*/) external pure override { + revert("not implemented"); + } + + function pendingRequestExists(uint64 /*subId*/) public pure override returns (bool) { + revert("not implemented"); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/ChainSpecificUtilHelper.sol b/contracts/src/v0.8/vrf/testhelpers/ChainSpecificUtilHelper.sol new file mode 100644 index 00000000..a594e026 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/ChainSpecificUtilHelper.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../../ChainSpecificUtil.sol"; + +/// @dev A helper contract that exposes ChainSpecificUtil methods for testing +contract ChainSpecificUtilHelper { + function getBlockhash(uint64 blockNumber) external view returns (bytes32) { + return ChainSpecificUtil._getBlockhash(blockNumber); + } + + function getBlockNumber() external view returns (uint256) { + return ChainSpecificUtil._getBlockNumber(); + } + + function getCurrentTxL1GasFees(string memory txCallData) external view returns (uint256) { + return ChainSpecificUtil._getCurrentTxL1GasFees(bytes(txCallData)); + } + + function getL1CalldataGasCost(uint256 calldataSize) external view returns (uint256) { + return ChainSpecificUtil._getL1CalldataGasCost(calldataSize); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFConsumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFConsumer.sol new file mode 100644 index 00000000..eaac0be1 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFConsumer.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFConsumerBase} from "../VRFConsumerBase.sol"; + +contract VRFConsumer is VRFConsumerBase { + uint256 public randomnessOutput; + bytes32 public requestId; + + constructor( + address vrfCoordinator, + address link + ) + // solhint-disable-next-line no-empty-blocks + VRFConsumerBase(vrfCoordinator, link) + { + /* empty */ + } + + function fulfillRandomness(bytes32 /* requestId */, uint256 randomness) internal override { + randomnessOutput = randomness; + requestId = requestId; + } + + function doRequestRandomness(bytes32 keyHash, uint256 fee) external returns (bytes32) { + return requestRandomness(keyHash, fee); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2.sol b/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2.sol new file mode 100644 index 00000000..0abb46c5 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2.sol @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +contract VRFConsumerV2 is VRFConsumerBaseV2 { + uint256[] public s_randomWords; + uint256 public s_requestId; + VRFCoordinatorV2Interface internal COORDINATOR; + LinkTokenInterface internal PLITOKEN; + uint64 public s_subId; + uint256 public s_gasAvailable; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + PLITOKEN = LinkTokenInterface(link); + } + + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + require(requestId == s_requestId, "request ID is incorrect"); + + s_gasAvailable = gasleft(); + s_randomWords = randomWords; + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + require(s_subId != 0, "sub not set"); + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + COORDINATOR.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness( + bytes32 keyHash, + uint64 subId, + uint16 minReqConfs, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256) { + s_requestId = COORDINATOR.requestRandomWords(keyHash, subId, minReqConfs, callbackGasLimit, numWords); + return s_requestId; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2UpgradeableExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2UpgradeableExample.sol new file mode 100644 index 00000000..e893fb45 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFConsumerV2UpgradeableExample.sol @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2Upgradeable} from "../dev/VRFConsumerBaseV2Upgradeable.sol"; +import {Initializable} from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; + +contract VRFConsumerV2UpgradeableExample is Initializable, VRFConsumerBaseV2Upgradeable { + uint256[] public s_randomWords; + uint256 public s_requestId; + VRFCoordinatorV2Interface public COORDINATOR; + LinkTokenInterface public PLITOKEN; + uint64 public s_subId; + uint256 public s_gasAvailable; + + function initialize(address _vrfCoordinator, address _link) public initializer { + __VRFConsumerBaseV2_init(_vrfCoordinator); + COORDINATOR = VRFCoordinatorV2Interface(_vrfCoordinator); + PLITOKEN = LinkTokenInterface(_link); + } + + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + require(requestId == s_requestId, "request ID is incorrect"); + + s_gasAvailable = gasleft(); + s_randomWords = randomWords; + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + require(s_subId != 0, "sub not set"); + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + COORDINATOR.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness( + bytes32 keyHash, + uint64 subId, + uint16 minReqConfs, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256) { + s_requestId = COORDINATOR.requestRandomWords(keyHash, subId, minReqConfs, callbackGasLimit, numWords); + return s_requestId; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorTestV2.sol b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorTestV2.sol new file mode 100644 index 00000000..cee16dd0 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorTestV2.sol @@ -0,0 +1,837 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {BlockhashStoreInterface} from "../interfaces/BlockhashStoreInterface.sol"; +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {TypeAndVersionInterface} from "../../interfaces/TypeAndVersionInterface.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; +import {VRF} from "../VRF.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +contract VRFCoordinatorTestV2 is + VRF, + ConfirmedOwner, + TypeAndVersionInterface, + VRFCoordinatorV2Interface, + IERC677Receiver +{ + LinkTokenInterface public immutable PLI; + AggregatorV3Interface public immutable PLI_ETH_FEED; + BlockhashStoreInterface public immutable BLOCKHASH_STORE; + + // We need to maintain a list of consuming addresses. + // This bound ensures we are able to loop over them as needed. + // Should a user require more consumers, they can use multiple subscriptions. + uint16 public constant MAX_CONSUMERS = 100; + + error TooManyConsumers(); + error InsufficientBalance(); + error InvalidConsumer(uint64 subId, address consumer); + error InvalidSubscription(); + error OnlyCallableFromLink(); + error InvalidCalldata(); + error MustBeSubOwner(address owner); + error PendingRequestExists(); + error MustBeRequestedOwner(address proposedOwner); + error BalanceInvariantViolated(uint256 internalBalance, uint256 externalBalance); // Should never happen + event FundsRecovered(address to, uint256 amount); + // We use the subscription struct (1 word) + // at fulfillment time. + struct Subscription { + // There are only 1e9*1e18 = 1e27 juels in existence, so the balance can fit in uint96 (2^96 ~ 7e28) + uint96 balance; // Common link balance used for all consumer requests. + uint64 reqCount; // For fee tiers + } + // We use the config for the mgmt APIs + struct SubscriptionConfig { + address owner; // Owner can fund/withdraw/cancel the sub. + address requestedOwner; // For safely transferring sub ownership. + // Maintains the list of keys in s_consumers. + // We do this for 2 reasons: + // 1. To be able to clean up all keys from s_consumers when canceling a subscription. + // 2. To be able to return the list of all consumers in getSubscription. + // Note that we need the s_consumers map to be able to directly check if a + // consumer is valid without reading all the consumers from storage. + address[] consumers; + } + // Note a nonce of 0 indicates an the consumer is not assigned to that subscription. + mapping(address => mapping(uint64 => uint64)) /* consumer */ /* subId */ /* nonce */ private s_consumers; + mapping(uint64 => SubscriptionConfig) /* subId */ /* subscriptionConfig */ private s_subscriptionConfigs; + mapping(uint64 => Subscription) /* subId */ /* subscription */ private s_subscriptions; + // We make the sub count public so that its possible to + // get all the current subscriptions via getSubscription. + uint64 private s_currentSubId; + // s_totalBalance tracks the total link sent to/from + // this contract through onTokenTransfer, cancelSubscription and oracleWithdraw. + // A discrepancy with this contract's link balance indicates someone + // sent tokens using transfer and so we may need to use recoverFunds. + uint96 private s_totalBalance; + event SubscriptionCreated(uint64 indexed subId, address owner); + event SubscriptionFunded(uint64 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionConsumerAdded(uint64 indexed subId, address consumer); + event SubscriptionConsumerRemoved(uint64 indexed subId, address consumer); + event SubscriptionCanceled(uint64 indexed subId, address to, uint256 amount); + event SubscriptionOwnerTransferRequested(uint64 indexed subId, address from, address to); + event SubscriptionOwnerTransferred(uint64 indexed subId, address from, address to); + + // Set this maximum to 200 to give us a 56 block window to fulfill + // the request before requiring the block hash feeder. + uint16 public constant MAX_REQUEST_CONFIRMATIONS = 200; + uint32 public constant MAX_NUM_WORDS = 500; + // 5k is plenty for an EXTCODESIZE call (2600) + warm CALL (100) + // and some arithmetic operations. + uint256 private constant GAS_FOR_CALL_EXACT_CHECK = 5_000; + error InvalidRequestConfirmations(uint16 have, uint16 min, uint16 max); + error GasLimitTooBig(uint32 have, uint32 want); + error NumWordsTooBig(uint32 have, uint32 want); + error ProvingKeyAlreadyRegistered(bytes32 keyHash); + error NoSuchProvingKey(bytes32 keyHash); + error InvalidLinkWeiPrice(int256 linkWei); + error InsufficientGasForConsumer(uint256 have, uint256 want); + error NoCorrespondingRequest(); + error IncorrectCommitment(); + error BlockhashNotInStore(uint256 blockNum); + error PaymentTooLarge(); + error Reentrant(); + + struct RequestCommitment { + uint64 blockNum; + uint64 subId; + uint32 callbackGasLimit; + uint32 numWords; + address sender; + } + mapping(bytes32 => address) /* keyHash */ /* oracle */ private s_provingKeys; + bytes32[] private s_provingKeyHashes; + mapping(address => uint96) /* oracle */ /* PLI balance */ private s_withdrawableTokens; + mapping(uint256 => bytes32) /* requestID */ /* commitment */ private s_requestCommitments; + event ProvingKeyRegistered(bytes32 keyHash, address indexed oracle); + event ProvingKeyDeregistered(bytes32 keyHash, address indexed oracle); + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint64 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address indexed sender + ); + event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bool success); + + struct Config { + uint16 minimumRequestConfirmations; + uint32 maxGasLimit; + // Reentrancy protection. + bool reentrancyLock; + // stalenessSeconds is how long before we consider the feed price to be stale + // and fallback to fallbackWeiPerUnitLink. + uint32 stalenessSeconds; + // Gas to cover oracle payment after we calculate the payment. + // We make it configurable in case those operations are repriced. + uint32 gasAfterPaymentCalculation; + } + int256 private s_fallbackWeiPerUnitLink; + Config private s_config; + FeeConfig private s_feeConfig; + struct FeeConfig { + // Flat fee charged per fulfillment in millionths of link + // So fee range is [0, 2^32/10^6]. + uint32 fulfillmentFlatFeeLinkPPMTier1; + uint32 fulfillmentFlatFeeLinkPPMTier2; + uint32 fulfillmentFlatFeeLinkPPMTier3; + uint32 fulfillmentFlatFeeLinkPPMTier4; + uint32 fulfillmentFlatFeeLinkPPMTier5; + uint24 reqsForTier2; + uint24 reqsForTier3; + uint24 reqsForTier4; + uint24 reqsForTier5; + } + event ConfigSet( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig feeConfig + ); + + constructor(address link, address blockhashStore, address linkEthFeed) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(link); + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + BLOCKHASH_STORE = BlockhashStoreInterface(blockhashStore); + } + + /** + * @notice Registers a proving key to an oracle. + * @param oracle address of the oracle + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function registerProvingKey(address oracle, uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + if (s_provingKeys[kh] != address(0)) { + revert ProvingKeyAlreadyRegistered(kh); + } + s_provingKeys[kh] = oracle; + s_provingKeyHashes.push(kh); + emit ProvingKeyRegistered(kh, oracle); + } + + /** + * @notice Deregisters a proving key to an oracle. + * @param publicProvingKey key that oracle can use to submit vrf fulfillments + */ + function deregisterProvingKey(uint256[2] calldata publicProvingKey) external onlyOwner { + bytes32 kh = hashOfKey(publicProvingKey); + address oracle = s_provingKeys[kh]; + if (oracle == address(0)) { + revert NoSuchProvingKey(kh); + } + delete s_provingKeys[kh]; + for (uint256 i = 0; i < s_provingKeyHashes.length; i++) { + if (s_provingKeyHashes[i] == kh) { + bytes32 last = s_provingKeyHashes[s_provingKeyHashes.length - 1]; + // Copy last element and overwrite kh to be deleted with it + s_provingKeyHashes[i] = last; + s_provingKeyHashes.pop(); + } + } + emit ProvingKeyDeregistered(kh, oracle); + } + + /** + * @notice Returns the proving key hash key associated with this public key + * @param publicKey the key to return the hash of + */ + function hashOfKey(uint256[2] memory publicKey) public pure returns (bytes32) { + return keccak256(abi.encode(publicKey)); + } + + /** + * @notice Sets the configuration of the vrfv2 coordinator + * @param minimumRequestConfirmations global min for request confirmations + * @param maxGasLimit global max for request gas limit + * @param stalenessSeconds if the eth/link feed is more stale then this, use the fallback price + * @param gasAfterPaymentCalculation gas used in doing accounting after completing the gas measurement + * @param fallbackWeiPerUnitLink fallback eth/link price in the case of a stale feed + * @param feeConfig fee tier configuration + */ + function setConfig( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation, + int256 fallbackWeiPerUnitLink, + FeeConfig memory feeConfig + ) external onlyOwner { + if (minimumRequestConfirmations > MAX_REQUEST_CONFIRMATIONS) { + revert InvalidRequestConfirmations( + minimumRequestConfirmations, + minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + if (fallbackWeiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(fallbackWeiPerUnitLink); + } + s_config = Config({ + minimumRequestConfirmations: minimumRequestConfirmations, + maxGasLimit: maxGasLimit, + stalenessSeconds: stalenessSeconds, + gasAfterPaymentCalculation: gasAfterPaymentCalculation, + reentrancyLock: false + }); + s_feeConfig = feeConfig; + s_fallbackWeiPerUnitLink = fallbackWeiPerUnitLink; + emit ConfigSet( + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + s_feeConfig + ); + } + + function getConfig() + external + view + returns ( + uint16 minimumRequestConfirmations, + uint32 maxGasLimit, + uint32 stalenessSeconds, + uint32 gasAfterPaymentCalculation + ) + { + return ( + s_config.minimumRequestConfirmations, + s_config.maxGasLimit, + s_config.stalenessSeconds, + s_config.gasAfterPaymentCalculation + ); + } + + function getFeeConfig() + external + view + returns ( + uint32 fulfillmentFlatFeeLinkPPMTier1, + uint32 fulfillmentFlatFeeLinkPPMTier2, + uint32 fulfillmentFlatFeeLinkPPMTier3, + uint32 fulfillmentFlatFeeLinkPPMTier4, + uint32 fulfillmentFlatFeeLinkPPMTier5, + uint24 reqsForTier2, + uint24 reqsForTier3, + uint24 reqsForTier4, + uint24 reqsForTier5 + ) + { + return ( + s_feeConfig.fulfillmentFlatFeeLinkPPMTier1, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier2, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier3, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier4, + s_feeConfig.fulfillmentFlatFeeLinkPPMTier5, + s_feeConfig.reqsForTier2, + s_feeConfig.reqsForTier3, + s_feeConfig.reqsForTier4, + s_feeConfig.reqsForTier5 + ); + } + + function getTotalBalance() external view returns (uint256) { + return s_totalBalance; + } + + function getFallbackWeiPerUnitLink() external view returns (int256) { + return s_fallbackWeiPerUnitLink; + } + + /** + * @notice Owner cancel subscription, sends remaining link directly to the subscription owner. + * @param subId subscription id + * @dev notably can be called even if there are pending requests, outstanding ones may fail onchain + */ + function ownerCancelSubscription(uint64 subId) external onlyOwner { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); + } + + /** + * @notice Recover link sent with transfer instead of transferAndCall. + * @param to address to send link to + */ + function recoverFunds(address to) external onlyOwner { + uint256 externalBalance = PLI.balanceOf(address(this)); + uint256 internalBalance = uint256(s_totalBalance); + if (internalBalance > externalBalance) { + revert BalanceInvariantViolated(internalBalance, externalBalance); + } + if (internalBalance < externalBalance) { + uint256 amount = externalBalance - internalBalance; + PLI.transfer(to, amount); + emit FundsRecovered(to, amount); + } + // If the balances are equal, nothing to be done. + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function getRequestConfig() external view override returns (uint16, uint32, bytes32[] memory) { + return (s_config.minimumRequestConfirmations, s_config.maxGasLimit, s_provingKeyHashes); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function requestRandomWords( + bytes32 keyHash, + uint64 subId, + uint16 requestConfirmations, + uint32 callbackGasLimit, + uint32 numWords + ) external override nonReentrant returns (uint256) { + // Input validation using the subscription storage. + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // Its important to ensure that the consumer is in fact who they say they + // are, otherwise they could use someone else's subscription balance. + // A nonce of 0 indicates consumer is not allocated to the sub. + uint64 currentNonce = s_consumers[msg.sender][subId]; + if (currentNonce == 0) { + revert InvalidConsumer(subId, msg.sender); + } + // Input validation using the config storage word. + if ( + requestConfirmations < s_config.minimumRequestConfirmations || requestConfirmations > MAX_REQUEST_CONFIRMATIONS + ) { + revert InvalidRequestConfirmations( + requestConfirmations, + s_config.minimumRequestConfirmations, + MAX_REQUEST_CONFIRMATIONS + ); + } + // No lower bound on the requested gas limit. A user could request 0 + // and they would simply be billed for the proof verification and wouldn't be + // able to do anything with the random value. + if (callbackGasLimit > s_config.maxGasLimit) { + revert GasLimitTooBig(callbackGasLimit, s_config.maxGasLimit); + } + if (numWords > MAX_NUM_WORDS) { + revert NumWordsTooBig(numWords, MAX_NUM_WORDS); + } + // Note we do not check whether the keyHash is valid to save gas. + // The consequence for users is that they can send requests + // for invalid keyHashes which will simply not be fulfilled. + uint64 nonce = currentNonce + 1; + (uint256 requestId, uint256 preSeed) = computeRequestId(keyHash, msg.sender, subId, nonce); + + s_requestCommitments[requestId] = keccak256( + abi.encode(requestId, block.number, subId, callbackGasLimit, numWords, msg.sender) + ); + emit RandomWordsRequested( + keyHash, + requestId, + preSeed, + subId, + requestConfirmations, + callbackGasLimit, + numWords, + msg.sender + ); + s_consumers[msg.sender][subId] = nonce; + + return requestId; + } + + /** + * @notice Get request commitment + * @param requestId id of request + * @dev used to determine if a request is fulfilled or not + */ + function getCommitment(uint256 requestId) external view returns (bytes32) { + return s_requestCommitments[requestId]; + } + + function computeRequestId( + bytes32 keyHash, + address sender, + uint64 subId, + uint64 nonce + ) private pure returns (uint256, uint256) { + uint256 preSeed = uint256(keccak256(abi.encode(keyHash, sender, subId, nonce))); + return (uint256(keccak256(abi.encode(keyHash, preSeed))), preSeed); + } + + /** + * @dev calls target address with exactly gasAmount gas and data as calldata + * or reverts if at least gasAmount gas is not available. + */ + function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + // solhint-disable-next-line no-inline-assembly + assembly { + let g := gas() + // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow + // The gas actually passed to the callee is min(gasAmount, 63//64*gas available). + // We want to ensure that we revert if gasAmount > 63//64*gas available + // as we do not want to provide them with less, however that check itself costs + // gas. GAS_FOR_CALL_EXACT_CHECK ensures we have at least enough gas to be able + // to revert if gasAmount > 63//64*gas available. + if lt(g, GAS_FOR_CALL_EXACT_CHECK) { + revert(0, 0) + } + g := sub(g, GAS_FOR_CALL_EXACT_CHECK) + // if g - g//64 <= gasAmount, revert + // (we subtract g//64 because of EIP-150) + if iszero(gt(sub(g, div(g, 64)), gasAmount)) { + revert(0, 0) + } + // solidity calls check that a contract actually exists at the destination, so we do the same + if iszero(extcodesize(target)) { + revert(0, 0) + } + // call and return whether we succeeded. ignore return data + // call(gas,addr,value,argsOffset,argsLength,retOffset,retLength) + success := call(gasAmount, target, 0, add(data, 0x20), mload(data), 0, 0) + } + return success; + } + + function getRandomnessFromProof( + Proof memory proof, + RequestCommitment memory rc + ) private view returns (bytes32 keyHash, uint256 requestId, uint256 randomness) { + keyHash = hashOfKey(proof.pk); + // Only registered proving keys are permitted. + address oracle = s_provingKeys[keyHash]; + if (oracle == address(0)) { + revert NoSuchProvingKey(keyHash); + } + requestId = uint256(keccak256(abi.encode(keyHash, proof.seed))); + bytes32 commitment = s_requestCommitments[requestId]; + if (commitment == 0) { + revert NoCorrespondingRequest(); + } + if ( + commitment != keccak256(abi.encode(requestId, rc.blockNum, rc.subId, rc.callbackGasLimit, rc.numWords, rc.sender)) + ) { + revert IncorrectCommitment(); + } + + bytes32 blockHash = blockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + blockHash = BLOCKHASH_STORE.getBlockhash(rc.blockNum); + if (blockHash == bytes32(0)) { + revert BlockhashNotInStore(rc.blockNum); + } + } + + // The seed actually used by the VRF machinery, mixing in the blockhash + uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); + randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + } + + /* + * @notice Compute fee based on the request count + * @param reqCount number of requests + * @return feePPM fee in PLI PPM + */ + function getFeeTier(uint64 reqCount) public view returns (uint32) { + FeeConfig memory fc = s_feeConfig; + if (0 <= reqCount && reqCount <= fc.reqsForTier2) { + return fc.fulfillmentFlatFeeLinkPPMTier1; + } + if (fc.reqsForTier2 < reqCount && reqCount <= fc.reqsForTier3) { + return fc.fulfillmentFlatFeeLinkPPMTier2; + } + if (fc.reqsForTier3 < reqCount && reqCount <= fc.reqsForTier4) { + return fc.fulfillmentFlatFeeLinkPPMTier3; + } + if (fc.reqsForTier4 < reqCount && reqCount <= fc.reqsForTier5) { + return fc.fulfillmentFlatFeeLinkPPMTier4; + } + return fc.fulfillmentFlatFeeLinkPPMTier5; + } + + /* + * @notice Fulfill a randomness request + * @param proof contains the proof and randomness + * @param rc request commitment pre-image, committed to at request time + * @return payment amount billed to the subscription + * @dev simulated offchain to determine if sufficient balance is present to fulfill the request + */ + function fulfillRandomWords(Proof memory proof, RequestCommitment memory rc) external nonReentrant returns (uint96) { + uint256 startGas = gasleft(); + (bytes32 keyHash, uint256 requestId, uint256 randomness) = getRandomnessFromProof(proof, rc); + + uint256[] memory randomWords = new uint256[](rc.numWords); + for (uint256 i = 0; i < rc.numWords; i++) { + randomWords[i] = uint256(keccak256(abi.encode(randomness, i))); + } + + delete s_requestCommitments[requestId]; + VRFConsumerBaseV2 v; + bytes memory resp = abi.encodeWithSelector(v.rawFulfillRandomWords.selector, requestId, randomWords); + // Call with explicitly the amount of callback gas requested + // Important to not let them exhaust the gas budget and avoid oracle payment. + // Do not allow any non-view/non-pure coordinator functions to be called + // during the consumers callback code via reentrancyLock. + // Note that callWithExactGas will revert if we do not have sufficient gas + // to give the callee their requested amount. + s_config.reentrancyLock = true; + bool success = callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + s_config.reentrancyLock = false; + + // Increment the req count for fee tier selection. + uint64 reqCount = s_subscriptions[rc.subId].reqCount; + s_subscriptions[rc.subId].reqCount += 1; + + // We want to charge users exactly for how much gas they use in their callback. + // The gasAfterPaymentCalculation is meant to cover these additional operations where we + // decrement the subscription balance and increment the oracles withdrawable balance. + // We also add the flat link fee to the payment amount. + // Its specified in millionths of link, if s_config.fulfillmentFlatFeeLinkPPM = 1 + // 1 pli / 1e6 = 1e18 juels / 1e6 = 1e12 juels. + uint96 payment = calculatePaymentAmount( + startGas, + s_config.gasAfterPaymentCalculation, + getFeeTier(reqCount), + tx.gasprice + ); + if (s_subscriptions[rc.subId].balance < payment) { + revert InsufficientBalance(); + } + s_subscriptions[rc.subId].balance -= payment; + s_withdrawableTokens[s_provingKeys[keyHash]] += payment; + // Include payment in the event for tracking costs. + emit RandomWordsFulfilled(requestId, randomness, payment, success); + return payment; + } + + // Get the amount of gas used for fulfillment + function calculatePaymentAmount( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeeLinkPPM, + uint256 weiPerUnitGas + ) internal view returns (uint96) { + int256 weiPerUnitLink; + weiPerUnitLink = getFeedData(); + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + // (1e18 juels/link) (wei/gas * gas) / (wei/link) = juels + uint256 paymentNoFee = (1e18 * weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft())) / + uint256(weiPerUnitLink); + uint256 fee = 1e12 * uint256(fulfillmentFlatFeeLinkPPM); + if (paymentNoFee > (1e27 - fee)) { + revert PaymentTooLarge(); // Payment + fee cannot be more than all of the link in existence. + } + return uint96(paymentNoFee + fee); + } + + function getFeedData() private view returns (int256) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } + + /* + * @notice Oracle withdraw PLI earned through fulfilling requests + * @param recipient where to send the funds + * @param amount amount to withdraw + */ + function oracleWithdraw(address recipient, uint96 amount) external nonReentrant { + if (s_withdrawableTokens[msg.sender] < amount) { + revert InsufficientBalance(); + } + s_withdrawableTokens[msg.sender] -= amount; + s_totalBalance -= amount; + if (!PLI.transfer(recipient, amount)) { + revert InsufficientBalance(); + } + } + + function onTokenTransfer(address /* sender */, uint256 amount, bytes calldata data) external override nonReentrant { + if (msg.sender != address(PLI)) { + revert OnlyCallableFromLink(); + } + if (data.length != 32) { + revert InvalidCalldata(); + } + uint64 subId = abi.decode(data, (uint64)); + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + // We do not check that the msg.sender is the subscription owner, + // anyone can fund a subscription. + uint256 oldBalance = s_subscriptions[subId].balance; + s_subscriptions[subId].balance += uint96(amount); + s_totalBalance += uint96(amount); + emit SubscriptionFunded(subId, oldBalance, oldBalance + amount); + } + + function getCurrentSubId() external view returns (uint64) { + return s_currentSubId; + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function getSubscription( + uint64 subId + ) external view override returns (uint96 balance, uint64 reqCount, address owner, address[] memory consumers) { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + return ( + s_subscriptions[subId].balance, + s_subscriptions[subId].reqCount, + s_subscriptionConfigs[subId].owner, + s_subscriptionConfigs[subId].consumers + ); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function createSubscription() external override nonReentrant returns (uint64) { + s_currentSubId++; + uint64 currentSubId = s_currentSubId; + address[] memory consumers = new address[](0); + s_subscriptions[currentSubId] = Subscription({balance: 0, reqCount: 0}); + s_subscriptionConfigs[currentSubId] = SubscriptionConfig({ + owner: msg.sender, + requestedOwner: address(0), + consumers: consumers + }); + + emit SubscriptionCreated(currentSubId, msg.sender); + return currentSubId; + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function requestSubscriptionOwnerTransfer( + uint64 subId, + address newOwner + ) external override onlySubOwner(subId) nonReentrant { + // Proposing to address(0) would never be claimable so don't need to check. + if (s_subscriptionConfigs[subId].requestedOwner != newOwner) { + s_subscriptionConfigs[subId].requestedOwner = newOwner; + emit SubscriptionOwnerTransferRequested(subId, msg.sender, newOwner); + } + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function acceptSubscriptionOwnerTransfer(uint64 subId) external override nonReentrant { + if (s_subscriptionConfigs[subId].owner == address(0)) { + revert InvalidSubscription(); + } + if (s_subscriptionConfigs[subId].requestedOwner != msg.sender) { + revert MustBeRequestedOwner(s_subscriptionConfigs[subId].requestedOwner); + } + address oldOwner = s_subscriptionConfigs[subId].owner; + s_subscriptionConfigs[subId].owner = msg.sender; + s_subscriptionConfigs[subId].requestedOwner = address(0); + emit SubscriptionOwnerTransferred(subId, oldOwner, msg.sender); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function removeConsumer(uint64 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + if (s_consumers[consumer][subId] == 0) { + revert InvalidConsumer(subId, consumer); + } + // Note bounded by MAX_CONSUMERS + address[] memory consumers = s_subscriptionConfigs[subId].consumers; + uint256 lastConsumerIndex = consumers.length - 1; + for (uint256 i = 0; i < consumers.length; i++) { + if (consumers[i] == consumer) { + address last = consumers[lastConsumerIndex]; + // Storage write to preserve last element + s_subscriptionConfigs[subId].consumers[i] = last; + // Storage remove last element + s_subscriptionConfigs[subId].consumers.pop(); + break; + } + } + delete s_consumers[consumer][subId]; + emit SubscriptionConsumerRemoved(subId, consumer); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function addConsumer(uint64 subId, address consumer) external override onlySubOwner(subId) nonReentrant { + // Already maxed, cannot add any more consumers. + if (s_subscriptionConfigs[subId].consumers.length == MAX_CONSUMERS) { + revert TooManyConsumers(); + } + if (s_consumers[consumer][subId] != 0) { + // Idempotence - do nothing if already added. + // Ensures uniqueness in s_subscriptions[subId].consumers. + return; + } + // Initialize the nonce to 1, indicating the consumer is allocated. + s_consumers[consumer][subId] = 1; + s_subscriptionConfigs[subId].consumers.push(consumer); + + emit SubscriptionConsumerAdded(subId, consumer); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + */ + function cancelSubscription(uint64 subId, address to) external override onlySubOwner(subId) nonReentrant { + if (pendingRequestExists(subId)) { + revert PendingRequestExists(); + } + cancelSubscriptionHelper(subId, to); + } + + function cancelSubscriptionHelper(uint64 subId, address to) private nonReentrant { + SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; + Subscription memory sub = s_subscriptions[subId]; + uint96 balance = sub.balance; + // Note bounded by MAX_CONSUMERS; + // If no consumers, does nothing. + for (uint256 i = 0; i < subConfig.consumers.length; i++) { + delete s_consumers[subConfig.consumers[i]][subId]; + } + delete s_subscriptionConfigs[subId]; + delete s_subscriptions[subId]; + s_totalBalance -= balance; + if (!PLI.transfer(to, uint256(balance))) { + revert InsufficientBalance(); + } + emit SubscriptionCanceled(subId, to, balance); + } + + /** + * @inheritdoc VRFCoordinatorV2Interface + * @dev Looping is bounded to MAX_CONSUMERS*(number of keyhashes). + * @dev Used to disable subscription canceling while outstanding request are present. + */ + function pendingRequestExists(uint64 subId) public view override returns (bool) { + SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; + for (uint256 i = 0; i < subConfig.consumers.length; i++) { + for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { + (uint256 reqId, ) = computeRequestId( + s_provingKeyHashes[j], + subConfig.consumers[i], + subId, + s_consumers[subConfig.consumers[i]][subId] + ); + if (s_requestCommitments[reqId] != 0) { + return true; + } + } + } + return false; + } + + modifier onlySubOwner(uint64 subId) { + address owner = s_subscriptionConfigs[subId].owner; + if (owner == address(0)) { + revert InvalidSubscription(); + } + if (msg.sender != owner) { + revert MustBeSubOwner(owner); + } + _; + } + + modifier nonReentrant() { + if (s_config.reentrancyLock) { + revert Reentrant(); + } + _; + } + + /** + * @notice The type and version of this contract + * @return Type and version string + */ + function typeAndVersion() external pure virtual override returns (string memory) { + return "VRFCoordinatorV2 1.0.0"; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol new file mode 100644 index 00000000..3022e83e --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {AggregatorV3Interface} from "../../shared/interfaces/AggregatorV3Interface.sol"; + +// Ideally this contract should inherit from VRFCoordinatorV2 and delegate calls to VRFCoordinatorV2 +// However, due to exceeding contract size limit, the logic from VRFCoordinatorV2 is ported over to this contract +contract VRFCoordinatorV2TestHelper { + uint96 internal s_paymentAmount; + + AggregatorV3Interface public immutable PLI_ETH_FEED; + + struct Config { + uint16 minimumRequestConfirmations; + uint32 maxGasLimit; + // Reentrancy protection. + bool reentrancyLock; + // stalenessSeconds is how long before we consider the feed price to be stale + // and fallback to fallbackWeiPerUnitLink. + uint32 stalenessSeconds; + // Gas to cover oracle payment after we calculate the payment. + // We make it configurable in case those operations are repriced. + uint32 gasAfterPaymentCalculation; + } + int256 private s_fallbackWeiPerUnitLink; + Config private s_config; + + constructor( + address linkEthFeed // solhint-disable-next-line no-empty-blocks + ) { + PLI_ETH_FEED = AggregatorV3Interface(linkEthFeed); + } + + function calculatePaymentAmountTest( + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeeLinkPPM, + uint256 weiPerUnitGas + ) external { + s_paymentAmount = calculatePaymentAmount( + gasleft(), + gasAfterPaymentCalculation, + fulfillmentFlatFeeLinkPPM, + weiPerUnitGas + ); + } + + error InvalidLinkWeiPrice(int256 linkWei); + error PaymentTooLarge(); + + function getFeedData() private view returns (int256) { + uint32 stalenessSeconds = s_config.stalenessSeconds; + bool staleFallback = stalenessSeconds > 0; + uint256 timestamp; + int256 weiPerUnitLink; + (, weiPerUnitLink, , timestamp, ) = PLI_ETH_FEED.latestRoundData(); + // solhint-disable-next-line not-rely-on-time + if (staleFallback && stalenessSeconds < block.timestamp - timestamp) { + weiPerUnitLink = s_fallbackWeiPerUnitLink; + } + return weiPerUnitLink; + } + + // Get the amount of gas used for fulfillment + function calculatePaymentAmount( + uint256 startGas, + uint256 gasAfterPaymentCalculation, + uint32 fulfillmentFlatFeeLinkPPM, + uint256 weiPerUnitGas + ) internal view returns (uint96) { + int256 weiPerUnitLink; + weiPerUnitLink = getFeedData(); + if (weiPerUnitLink <= 0) { + revert InvalidLinkWeiPrice(weiPerUnitLink); + } + // (1e18 juels/link) (wei/gas * gas) / (wei/link) = juels + uint256 paymentNoFee = (1e18 * weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft())) / + uint256(weiPerUnitLink); + uint256 fee = 1e12 * uint256(fulfillmentFlatFeeLinkPPM); + if (paymentNoFee > (1e27 - fee)) { + revert PaymentTooLarge(); // Payment + fee cannot be more than all of the link in existence. + } + return uint96(paymentNoFee + fee); + } + + function getPaymentAmount() public view returns (uint96) { + return s_paymentAmount; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFExternalSubOwnerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFExternalSubOwnerExample.sol new file mode 100644 index 00000000..6a4822b7 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFExternalSubOwnerExample.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +contract VRFExternalSubOwnerExample is VRFConsumerBaseV2 { + VRFCoordinatorV2Interface internal COORDINATOR; + LinkTokenInterface internal PLITOKEN; + + uint256[] public s_randomWords; + uint256 public s_requestId; + address internal s_owner; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + PLITOKEN = LinkTokenInterface(link); + s_owner = msg.sender; + } + + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + require(requestId == s_requestId, "request ID is incorrect"); + s_randomWords = randomWords; + } + + function requestRandomWords( + uint64 subId, + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash + ) external onlyOwner { + // Will revert if subscription is not funded. + s_requestId = COORDINATOR.requestRandomWords(keyHash, subId, requestConfirmations, callbackGasLimit, numWords); + } + + function transferOwnership(address newOwner) external onlyOwner { + s_owner = newOwner; + } + + modifier onlyOwner() { + require(msg.sender == s_owner); + _; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestExternalSubOwner.sol b/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestExternalSubOwner.sol new file mode 100644 index 00000000..0a94f3a7 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestExternalSubOwner.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +/** + * @title The VRFLoadTestExternalSubOwner contract. + * @notice Allows making many VRF V2 randomness requests in a single transaction for load testing. + */ +contract VRFLoadTestExternalSubOwner is VRFConsumerBaseV2, ConfirmedOwner { + VRFCoordinatorV2Interface public immutable COORDINATOR; + LinkTokenInterface public immutable PLI; + + uint256 public s_responseCount; + + constructor(address _vrfCoordinator, address _link) VRFConsumerBaseV2(_vrfCoordinator) ConfirmedOwner(msg.sender) { + COORDINATOR = VRFCoordinatorV2Interface(_vrfCoordinator); + PLI = LinkTokenInterface(_link); + } + + function fulfillRandomWords(uint256, uint256[] memory) internal override { + s_responseCount++; + } + + function requestRandomWords( + uint64 _subId, + uint16 _requestConfirmations, + bytes32 _keyHash, + uint16 _requestCount + ) external onlyOwner { + for (uint16 i = 0; i < _requestCount; i++) { + COORDINATOR.requestRandomWords(_keyHash, _subId, _requestConfirmations, 50_000, 1); + } + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestOwnerlessConsumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestOwnerlessConsumer.sol new file mode 100644 index 00000000..edadd0f2 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFLoadTestOwnerlessConsumer.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFConsumerBase} from "../VRFConsumerBase.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; + +/** + * @title The VRFLoadTestOwnerlessConsumer contract. + * @notice Allows making many VRF V1 randomness requests in a single transaction for load testing. + */ +contract VRFLoadTestOwnerlessConsumer is VRFConsumerBase, IERC677Receiver { + // The price of each VRF request in Juels. 1 PLI = 1e18 Juels. + uint256 public immutable PRICE; + + uint256 public s_responseCount; + + constructor(address _vrfCoordinator, address _link, uint256 _price) VRFConsumerBase(_vrfCoordinator, _link) { + PRICE = _price; + } + + function fulfillRandomness(bytes32, uint256) internal override { + s_responseCount++; + } + + /** + * @dev Creates as many randomness requests as can be made with the funds transferred. + * @param _amount The amount of PLI transferred to pay for these requests. + * @param _data The data passed to transferAndCall on LinkToken. Must be an abi-encoded key hash. + */ + function onTokenTransfer(address, uint256 _amount, bytes calldata _data) external override { + if (msg.sender != address(PLI)) { + revert("only callable from PLI"); + } + bytes32 keyHash = abi.decode(_data, (bytes32)); + + uint256 spent = 0; + while (spent + PRICE <= _amount) { + requestRandomness(keyHash, PRICE); + spent += PRICE; + } + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFMaliciousConsumerV2.sol b/contracts/src/v0.8/vrf/testhelpers/VRFMaliciousConsumerV2.sol new file mode 100644 index 00000000..7b203f21 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFMaliciousConsumerV2.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +contract VRFMaliciousConsumerV2 is VRFConsumerBaseV2 { + uint256[] public s_randomWords; + uint256 public s_requestId; + VRFCoordinatorV2Interface internal COORDINATOR; + LinkTokenInterface internal PLITOKEN; + uint64 public s_subId; + uint256 public s_gasAvailable; + bytes32 internal s_keyHash; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + PLITOKEN = LinkTokenInterface(link); + } + + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + s_gasAvailable = gasleft(); + s_randomWords = randomWords; + s_requestId = requestId; + // Should revert + COORDINATOR.requestRandomWords(s_keyHash, s_subId, 1, 200000, 1); + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + COORDINATOR.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness(bytes32 keyHash) external returns (uint256) { + s_keyHash = keyHash; + return COORDINATOR.requestRandomWords(keyHash, s_subId, 1, 500000, 1); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFMockETHLINKAggregator.sol b/contracts/src/v0.8/vrf/testhelpers/VRFMockETHLINKAggregator.sol new file mode 100644 index 00000000..bd8b9f2e --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFMockETHLINKAggregator.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "../../shared/interfaces/AggregatorV3Interface.sol"; + +contract VRFMockETHPLIAggregator is AggregatorV3Interface { + int256 public answer; + uint256 private blockTimestampDeduction = 0; + + constructor(int256 _answer) public { + answer = _answer; + } + + function decimals() external view override returns (uint8) { + return 18; + } + + function description() external view override returns (string memory) { + return "VRFMockETHPLIAggregator"; + } + + function version() external view override returns (uint256) { + return 1; + } + + function getRoundData( + uint80 _roundId + ) + external + view + override + returns (uint80 roundId, int256 ans, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (1, answer, getDeductedBlockTimestamp(), getDeductedBlockTimestamp(), 1); + } + + function latestRoundData() + external + view + override + returns (uint80 roundId, int256 ans, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) + { + return (1, answer, getDeductedBlockTimestamp(), getDeductedBlockTimestamp(), 1); + } + + function getDeductedBlockTimestamp() internal view returns (uint256) { + return block.timestamp - blockTimestampDeduction; + } + + function setBlockTimestampDeduction(uint256 _blockTimestampDeduction) external { + blockTimestampDeduction = _blockTimestampDeduction; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFOwnerlessConsumerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFOwnerlessConsumerExample.sol new file mode 100644 index 00000000..0140dd24 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFOwnerlessConsumerExample.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +// An example VRF V1 consumer contract that can be triggered using a transferAndCall from the link +// contract. +pragma solidity ^0.8.4; + +import {VRFConsumerBase} from "../VRFConsumerBase.sol"; +import {IERC677Receiver} from "../../shared/interfaces/IERC677Receiver.sol"; + +contract VRFOwnerlessConsumerExample is VRFConsumerBase, IERC677Receiver { + uint256 public s_randomnessOutput; + bytes32 public s_requestId; + + error OnlyCallableFromLink(); + + constructor(address _vrfCoordinator, address _link) VRFConsumerBase(_vrfCoordinator, _link) { + /* empty */ + } + + function fulfillRandomness(bytes32 requestId, uint256 _randomness) internal override { + require(requestId == s_requestId, "request ID is incorrect"); + s_randomnessOutput = _randomness; + } + + /** + * @dev Creates a new randomness request. This function can only be used by calling + * transferAndCall on the LinkToken contract. + * @param _amount The amount of PLI transferred to pay for this request. + * @param _data The data passed to transferAndCall on LinkToken. Must be an abi-encoded key hash. + */ + function onTokenTransfer(address /* sender */, uint256 _amount, bytes calldata _data) external override { + if (msg.sender != address(PLI)) { + revert OnlyCallableFromLink(); + } + + bytes32 keyHash = abi.decode(_data, (bytes32)); + s_requestId = requestRandomness(keyHash, _amount); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFRequestIDBaseTestHelper.sol b/contracts/src/v0.8/vrf/testhelpers/VRFRequestIDBaseTestHelper.sol new file mode 100644 index 00000000..344797f0 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFRequestIDBaseTestHelper.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFRequestIDBase} from "../VRFRequestIDBase.sol"; + +contract VRFRequestIDBaseTestHelper is VRFRequestIDBase { + function makeVRFInputSeed_( + bytes32 _keyHash, + uint256 _userSeed, + address _requester, + uint256 _nonce + ) public pure returns (uint256) { + return makeVRFInputSeed(_keyHash, _userSeed, _requester, _nonce); + } + + function makeRequestId_(bytes32 _keyHash, uint256 _vRFInputSeed) public pure returns (bytes32) { + return makeRequestId(_keyHash, _vRFInputSeed); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFSingleConsumerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFSingleConsumerExample.sol new file mode 100644 index 00000000..c5215fe3 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFSingleConsumerExample.sol @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: MIT +// Example of a single consumer contract which owns the subscription. +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +contract VRFSingleConsumerExample is VRFConsumerBaseV2 { + VRFCoordinatorV2Interface internal COORDINATOR; + LinkTokenInterface internal PLITOKEN; + + struct RequestConfig { + uint64 subId; + uint32 callbackGasLimit; + uint16 requestConfirmations; + uint32 numWords; + bytes32 keyHash; + } + RequestConfig public s_requestConfig; + uint256[] public s_randomWords; + uint256 public s_requestId; + address s_owner; + + constructor( + address vrfCoordinator, + address link, + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash + ) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + PLITOKEN = LinkTokenInterface(link); + s_owner = msg.sender; + s_requestConfig = RequestConfig({ + subId: 0, // Unset initially + callbackGasLimit: callbackGasLimit, + requestConfirmations: requestConfirmations, + numWords: numWords, + keyHash: keyHash + }); + subscribe(); + } + + function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { + require(requestId == s_requestId, "request ID is incorrect"); + s_randomWords = randomWords; + } + + // Assumes the subscription is funded sufficiently. + function requestRandomWords() external onlyOwner { + RequestConfig memory rc = s_requestConfig; + // Will revert if subscription is not set and funded. + s_requestId = COORDINATOR.requestRandomWords( + rc.keyHash, + rc.subId, + rc.requestConfirmations, + rc.callbackGasLimit, + rc.numWords + ); + } + + // Assumes this contract owns link + // This method is analogous to VRFv1, except the amount + // should be selected based on the keyHash (each keyHash functions like a "gas lane" + // with different link costs). + function fundAndRequestRandomWords(uint256 amount) external onlyOwner { + RequestConfig memory rc = s_requestConfig; + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_requestConfig.subId)); + // Will revert if subscription is not set and funded. + s_requestId = COORDINATOR.requestRandomWords( + rc.keyHash, + rc.subId, + rc.requestConfirmations, + rc.callbackGasLimit, + rc.numWords + ); + } + + // Assumes this contract owns link + function topUpSubscription(uint256 amount) external onlyOwner { + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_requestConfig.subId)); + } + + function withdraw(uint256 amount, address to) external onlyOwner { + PLITOKEN.transfer(to, amount); + } + + function unsubscribe(address to) external onlyOwner { + // Returns funds to this address + COORDINATOR.cancelSubscription(s_requestConfig.subId, to); + s_requestConfig.subId = 0; + } + + // Keep this separate in case the contract want to unsubscribe and then + // resubscribe. + function subscribe() public onlyOwner { + // Create a subscription, current subId + address[] memory consumers = new address[](1); + consumers[0] = address(this); + s_requestConfig.subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_requestConfig.subId, consumers[0]); + } + + modifier onlyOwner() { + require(msg.sender == s_owner); + _; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFSubscriptionBalanceMonitorExposed.sol b/contracts/src/v0.8/vrf/testhelpers/VRFSubscriptionBalanceMonitorExposed.sol new file mode 100644 index 00000000..471b6f99 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFSubscriptionBalanceMonitorExposed.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT + +pragma solidity 0.8.6; + +import {VRFSubscriptionBalanceMonitor} from "../dev/VRFSubscriptionBalanceMonitor.sol"; + +contract VRFSubscriptionBalanceMonitorExposed is VRFSubscriptionBalanceMonitor { + constructor( + address linkTokenAddress, + address coordinatorAddress, + address keeperRegistryAddress, + uint256 minWaitPeriodSeconds + ) VRFSubscriptionBalanceMonitor(linkTokenAddress, coordinatorAddress, keeperRegistryAddress, minWaitPeriodSeconds) {} + + function setLastTopUpXXXTestOnly(uint64 target, uint56 lastTopUpTimestamp) external { + s_targets[target].lastTopUpTimestamp = lastTopUpTimestamp; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol b/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol new file mode 100644 index 00000000..bcead3f0 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRF} from "../VRF.sol"; + +/** *********************************************************************** + @notice Testing harness for VRF.sol, exposing its internal methods. Not to + @notice be used for production. +*/ +contract VRFTestHelper is VRF { + function bigModExp_(uint256 base, uint256 exponent) public view returns (uint256) { + return super._bigModExp(base, exponent); + } + + function squareRoot_(uint256 x) public view returns (uint256) { + return super._squareRoot(x); + } + + function ySquared_(uint256 x) public pure returns (uint256) { + return super._ySquared(x); + } + + function fieldHash_(bytes memory b) public pure returns (uint256) { + return super._fieldHash(b); + } + + function hashToCurve_(uint256[2] memory pk, uint256 x) public view returns (uint256[2] memory) { + return super._hashToCurve(pk, x); + } + + function ecmulVerify_(uint256[2] memory x, uint256 scalar, uint256[2] memory q) public pure returns (bool) { + return super._ecmulVerify(x, scalar, q); + } + + function projectiveECAdd_( + uint256 px, + uint256 py, + uint256 qx, + uint256 qy + ) public pure returns (uint256, uint256, uint256) { + return super._projectiveECAdd(px, py, qx, qy); + } + + function affineECAdd_( + uint256[2] memory p1, + uint256[2] memory p2, + uint256 invZ + ) public pure returns (uint256[2] memory) { + return super._affineECAdd(p1, p2, invZ); + } + + function verifyLinearCombinationWithGenerator_( + uint256 c, + uint256[2] memory p, + uint256 s, + address lcWitness + ) public pure returns (bool) { + return super._verifyLinearCombinationWithGenerator(c, p, s, lcWitness); + } + + function linearCombination_( + uint256 c, + uint256[2] memory p1, + uint256[2] memory cp1Witness, + uint256 s, + uint256[2] memory p2, + uint256[2] memory sp2Witness, + uint256 zInv + ) public pure returns (uint256[2] memory) { + return super._linearCombination(c, p1, cp1Witness, s, p2, sp2Witness, zInv); + } + + function scalarFromCurvePoints_( + uint256[2] memory hash, + uint256[2] memory pk, + uint256[2] memory gamma, + address uWitness, + uint256[2] memory v + ) public pure returns (uint256) { + return super._scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + } + + function isOnCurve_(uint256[2] memory p) public pure returns (bool) { + return super._isOnCurve(p); + } + + function verifyVRFProof_( + uint256[2] memory pk, + uint256[2] memory gamma, + uint256 c, + uint256 s, + uint256 seed, + address uWitness, + uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, + uint256 zInv + ) public view { + super._verifyVRFProof(pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv); + } + + function randomValueFromVRFProof_(Proof memory proof, uint256 seed) public view returns (uint256 output) { + return super._randomValueFromVRFProof(proof, seed); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2LoadTestWithMetrics.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2LoadTestWithMetrics.sol new file mode 100644 index 00000000..a8c71000 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2LoadTestWithMetrics.sol @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; + +/** + * @title The VRFLoadTestExternalSubOwner contract. + * @notice Allows making many VRF V2 randomness requests in a single transaction for load testing. + */ +contract VRFV2LoadTestWithMetrics is VRFConsumerBaseV2 { + VRFCoordinatorV2Interface public immutable COORDINATOR; + LinkTokenInterface public PLITOKEN; + uint256 public s_responseCount; + uint256 public s_requestCount; + uint256 public s_averageFulfillmentInMillions = 0; // in millions for better precision + uint256 public s_slowestFulfillment = 0; + uint256 public s_fastestFulfillment = 999; + uint256 public s_lastRequestId; + mapping(uint256 => uint256) internal requestHeights; // requestIds to block number when rand request was made + + event SubscriptionCreatedFundedAndConsumerAdded(uint64 subId, address consumer, uint256 amount); + + struct RequestStatus { + bool fulfilled; + uint256[] randomWords; + uint requestTimestamp; + uint fulfilmentTimestamp; + uint256 requestBlockNumber; + uint256 fulfilmentBlockNumber; + } + + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor(address _vrfCoordinator) VRFConsumerBaseV2(_vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(_vrfCoordinator); + } + + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + uint256 fulfilmentBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 requestDelay = fulfilmentBlockNumber - requestHeights[_requestId]; + uint256 requestDelayInMillions = requestDelay * 1_000_000; + + if (requestDelay > s_slowestFulfillment) { + s_slowestFulfillment = requestDelay; + } + s_fastestFulfillment = requestDelay < s_fastestFulfillment ? requestDelay : s_fastestFulfillment; + s_averageFulfillmentInMillions = s_responseCount > 0 + ? (s_averageFulfillmentInMillions * s_responseCount + requestDelayInMillions) / (s_responseCount + 1) + : requestDelayInMillions; + + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + s_requests[_requestId].fulfilmentTimestamp = block.timestamp; + s_requests[_requestId].fulfilmentBlockNumber = fulfilmentBlockNumber; + + s_responseCount++; + } + + function requestRandomWords( + uint64 _subId, + uint16 _requestConfirmations, + bytes32 _keyHash, + uint32 _callbackGasLimit, + uint32 _numWords, + uint16 _requestCount + ) external { + _makeLoadTestRequests(_subId, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount); + } + + function requestRandomWordsWithForceFulfill( + uint16 _requestConfirmations, + bytes32 _keyHash, + uint32 _callbackGasLimit, + uint32 _numWords, + uint16 _requestCount, + uint256 _subTopUpAmount, + address _link + ) external { + // create a subscription, address(this) will be the owner + uint64 _subId = COORDINATOR.createSubscription(); + // add address(this) as a consumer on the subscription + COORDINATOR.addConsumer(_subId, address(this)); + topUpSubscription(_subId, _subTopUpAmount, _link); + emit SubscriptionCreatedFundedAndConsumerAdded(_subId, address(this), _subTopUpAmount); + + _makeLoadTestRequests(_subId, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount); + + COORDINATOR.removeConsumer(_subId, address(this)); + COORDINATOR.cancelSubscription(_subId, msg.sender); + } + + function reset() external { + s_averageFulfillmentInMillions = 0; // in millions for better precision + s_slowestFulfillment = 0; + s_fastestFulfillment = 999; + s_requestCount = 0; + s_responseCount = 0; + } + + function getRequestStatus( + uint256 _requestId + ) + external + view + returns ( + bool fulfilled, + uint256[] memory randomWords, + uint requestTimestamp, + uint fulfilmentTimestamp, + uint256 requestBlockNumber, + uint256 fulfilmentBlockNumber + ) + { + RequestStatus memory request = s_requests[_requestId]; + return ( + request.fulfilled, + request.randomWords, + request.requestTimestamp, + request.fulfilmentTimestamp, + request.requestBlockNumber, + request.fulfilmentBlockNumber + ); + } + + function _makeLoadTestRequests( + uint64 _subId, + uint16 _requestConfirmations, + bytes32 _keyHash, + uint32 _callbackGasLimit, + uint32 _numWords, + uint16 _requestCount + ) internal { + for (uint16 i = 0; i < _requestCount; i++) { + uint256 requestId = COORDINATOR.requestRandomWords( + _keyHash, + _subId, + _requestConfirmations, + _callbackGasLimit, + _numWords + ); + s_lastRequestId = requestId; + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + s_requests[requestId] = RequestStatus({ + randomWords: new uint256[](0), + fulfilled: false, + requestTimestamp: block.timestamp, + fulfilmentTimestamp: 0, + requestBlockNumber: requestBlockNumber, + fulfilmentBlockNumber: 0 + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + } + } + + function topUpSubscription(uint64 _subId, uint256 _amount, address _link) public { + PLITOKEN = LinkTokenInterface(_link); + PLITOKEN.transferAndCall(address(COORDINATOR), _amount, abi.encode(_subId)); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2OwnerTestConsumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2OwnerTestConsumer.sol new file mode 100644 index 00000000..69f60551 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2OwnerTestConsumer.sol @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; + +contract VRFV2OwnerTestConsumer is VRFConsumerBaseV2, ConfirmedOwner { + VRFCoordinatorV2Interface public COORDINATOR; + LinkTokenInterface public PLITOKEN; + uint64 public subId; + uint256 public s_responseCount; + uint256 public s_requestCount; + uint256 public s_averageFulfillmentInMillions = 0; // in millions for better precision + uint256 public s_slowestFulfillment = 0; + uint256 public s_fastestFulfillment = 999; + uint256 public s_lastRequestId; + mapping(uint256 => uint256) internal requestHeights; // requestIds to block number when rand request was made + + event SubscriptionCreatedFundedAndConsumerAdded(uint64 subId, address consumer, uint256 amount); + + struct RequestStatus { + bool fulfilled; + uint256[] randomWords; + uint requestTimestamp; + uint fulfilmentTimestamp; + uint256 requestBlockNumber; + uint256 fulfilmentBlockNumber; + } + + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor(address _vrfCoordinator, address _link) VRFConsumerBaseV2(_vrfCoordinator) ConfirmedOwner(msg.sender) { + COORDINATOR = VRFCoordinatorV2Interface(_vrfCoordinator); + PLITOKEN = LinkTokenInterface(_link); + } + + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + uint256 fulfilmentBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 requestDelay = fulfilmentBlockNumber - requestHeights[_requestId]; + uint256 requestDelayInMillions = requestDelay * 1_000_000; + + if (requestDelay > s_slowestFulfillment) { + s_slowestFulfillment = requestDelay; + } + s_fastestFulfillment = requestDelay < s_fastestFulfillment ? requestDelay : s_fastestFulfillment; + s_averageFulfillmentInMillions = s_responseCount > 0 + ? (s_averageFulfillmentInMillions * s_responseCount + requestDelayInMillions) / (s_responseCount + 1) + : requestDelayInMillions; + + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + s_requests[_requestId].fulfilmentTimestamp = block.timestamp; + s_requests[_requestId].fulfilmentBlockNumber = fulfilmentBlockNumber; + + s_responseCount++; + } + + function requestRandomWords( + uint16 _requestConfirmations, + bytes32 _keyHash, + uint32 _callbackGasLimit, + uint32 _numWords, + uint16 _requestCount, + uint256 _subTopUpAmount + ) external onlyOwner { + // create a subscription, address(this) will be the owner + subId = COORDINATOR.createSubscription(); + // add address(this) as a consumer on the subscription + COORDINATOR.addConsumer(subId, address(this)); + topUpSubscription(_subTopUpAmount); + emit SubscriptionCreatedFundedAndConsumerAdded(subId, address(this), _subTopUpAmount); + + for (uint16 i = 0; i < _requestCount; i++) { + uint256 requestId = COORDINATOR.requestRandomWords( + _keyHash, + subId, + _requestConfirmations, + _callbackGasLimit, + _numWords + ); + s_lastRequestId = requestId; + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + s_requests[requestId] = RequestStatus({ + randomWords: new uint256[](0), + fulfilled: false, + requestTimestamp: block.timestamp, + fulfilmentTimestamp: 0, + requestBlockNumber: requestBlockNumber, + fulfilmentBlockNumber: 0 + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + } + + COORDINATOR.removeConsumer(subId, address(this)); + COORDINATOR.cancelSubscription(subId, msg.sender); + } + + function reset() external { + s_averageFulfillmentInMillions = 0; // in millions for better precision + s_slowestFulfillment = 0; + s_fastestFulfillment = 999; + s_requestCount = 0; + s_responseCount = 0; + } + + function getRequestStatus( + uint256 _requestId + ) + external + view + returns ( + bool fulfilled, + uint256[] memory randomWords, + uint requestTimestamp, + uint fulfilmentTimestamp, + uint256 requestBlockNumber, + uint256 fulfilmentBlockNumber + ) + { + RequestStatus memory request = s_requests[_requestId]; + return ( + request.fulfilled, + request.randomWords, + request.requestTimestamp, + request.fulfilmentTimestamp, + request.requestBlockNumber, + request.fulfilmentBlockNumber + ); + } + + function topUpSubscription(uint256 amount) public onlyOwner { + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(subId)); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2ProxyAdmin.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2ProxyAdmin.sol new file mode 100644 index 00000000..4b0a7fd6 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2ProxyAdmin.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; + +contract VRFV2ProxyAdmin is ProxyAdmin { + // Nothing here, this is just to generate the gethwrapper for tests. +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2RevertingExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2RevertingExample.sol new file mode 100644 index 00000000..649fb3fa --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2RevertingExample.sol @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFCoordinatorV2Interface} from "../interfaces/VRFCoordinatorV2Interface.sol"; +import {VRFConsumerBaseV2} from "../VRFConsumerBaseV2.sol"; + +// VRFV2RevertingExample will always revert. Used for testing only, useless in prod. +contract VRFV2RevertingExample is VRFConsumerBaseV2 { + uint256[] public s_randomWords; + uint256 public s_requestId; + VRFCoordinatorV2Interface internal COORDINATOR; + LinkTokenInterface internal PLITOKEN; + uint64 public s_subId; + uint256 public s_gasAvailable; + + constructor(address vrfCoordinator, address link) VRFConsumerBaseV2(vrfCoordinator) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + PLITOKEN = LinkTokenInterface(link); + } + + function fulfillRandomWords(uint256, uint256[] memory) internal pure override { + // solhint-disable-next-line custom-errors, reason-string + revert(); + } + + function createSubscriptionAndFund(uint96 amount) external { + if (s_subId == 0) { + s_subId = COORDINATOR.createSubscription(); + COORDINATOR.addConsumer(s_subId, address(this)); + } + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function topUpSubscription(uint96 amount) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "sub not set"); + // Approve the link transfer. + PLITOKEN.transferAndCall(address(COORDINATOR), amount, abi.encode(s_subId)); + } + + function updateSubscription(address[] memory consumers) external { + // solhint-disable-next-line custom-errors + require(s_subId != 0, "subID not set"); + for (uint256 i = 0; i < consumers.length; i++) { + COORDINATOR.addConsumer(s_subId, consumers[i]); + } + } + + function requestRandomness( + bytes32 keyHash, + uint64 subId, + uint16 minReqConfs, + uint32 callbackGasLimit, + uint32 numWords + ) external returns (uint256) { + s_requestId = COORDINATOR.requestRandomWords(keyHash, subId, minReqConfs, callbackGasLimit, numWords); + return s_requestId; + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2TransparentUpgradeableProxy.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2TransparentUpgradeableProxy.sol new file mode 100644 index 00000000..f3364d02 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2TransparentUpgradeableProxy.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; + +contract VRFV2TransparentUpgradeableProxy is TransparentUpgradeableProxy { + // Nothing special here, this is just to generate the gethwrapper for tests. + constructor( + address _logic, + address admin_, + bytes memory _data + ) payable TransparentUpgradeableProxy(_logic, admin_, _data) {} +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperConsumerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperConsumerExample.sol new file mode 100644 index 00000000..eedecfd1 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperConsumerExample.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2WrapperConsumerBase} from "../VRFV2WrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +contract VRFV2WrapperConsumerExample is VRFV2WrapperConsumerBase, ConfirmedOwner { + event WrappedRequestFulfilled(uint256 requestId, uint256[] randomWords, uint256 payment); + event WrapperRequestMade(uint256 indexed requestId, uint256 paid); + + struct RequestStatus { + uint256 paid; + bool fulfilled; + uint256[] randomWords; + } + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + constructor( + address _link, + address _vrfV2Wrapper + ) ConfirmedOwner(msg.sender) VRFV2WrapperConsumerBase(_link, _vrfV2Wrapper) {} + + function makeRequest( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) external onlyOwner returns (uint256 requestId) { + requestId = requestRandomness(_callbackGasLimit, _requestConfirmations, _numWords); + uint256 paid = VRF_V2_WRAPPER.calculateRequestPrice(_callbackGasLimit); + s_requests[requestId] = RequestStatus({paid: paid, randomWords: new uint256[](0), fulfilled: false}); + emit WrapperRequestMade(requestId, paid); + return requestId; + } + + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + emit WrappedRequestFulfilled(_requestId, _randomWords, s_requests[_requestId].paid); + } + + function getRequestStatus( + uint256 _requestId + ) external view returns (uint256 paid, bool fulfilled, uint256[] memory randomWords) { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + RequestStatus memory request = s_requests[_requestId]; + return (request.paid, request.fulfilled, request.randomWords); + } + + /// @notice withdrawLink withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in juels + function withdrawLink(uint256 amount) external onlyOwner { + PLI.transfer(owner(), amount); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperLoadTestConsumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperLoadTestConsumer.sol new file mode 100644 index 00000000..1576fc50 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperLoadTestConsumer.sol @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2WrapperConsumerBase} from "../VRFV2WrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {ChainSpecificUtil} from "../../ChainSpecificUtil.sol"; +import {VRFV2WrapperInterface} from "../interfaces/VRFV2WrapperInterface.sol"; + +contract VRFV2WrapperLoadTestConsumer is VRFV2WrapperConsumerBase, ConfirmedOwner { + VRFV2WrapperInterface public immutable i_vrfV2Wrapper; + uint256 public s_responseCount; + uint256 public s_requestCount; + uint256 public s_averageFulfillmentInMillions = 0; // in millions for better precision + uint256 public s_slowestFulfillment = 0; + uint256 public s_fastestFulfillment = 999; + uint256 public s_lastRequestId; + // solhint-disable-next-line plugin-solidity/prefix-storage-variables-with-s-underscore + mapping(uint256 => uint256) internal requestHeights; // requestIds to block number when rand request was made + mapping(uint256 => RequestStatus) /* requestId */ /* requestStatus */ public s_requests; + + event WrappedRequestFulfilled(uint256 requestId, uint256[] randomWords, uint256 payment); + event WrapperRequestMade(uint256 indexed requestId, uint256 paid); + + struct RequestStatus { + uint256 paid; + bool fulfilled; + uint256[] randomWords; + uint256 requestTimestamp; + uint256 fulfilmentTimestamp; + uint256 requestBlockNumber; + uint256 fulfilmentBlockNumber; + } + + constructor( + address _link, + address _vrfV2Wrapper + ) ConfirmedOwner(msg.sender) VRFV2WrapperConsumerBase(_link, _vrfV2Wrapper) { + i_vrfV2Wrapper = VRFV2WrapperInterface(_vrfV2Wrapper); + } + + function makeRequests( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords, + uint16 _requestCount + ) external onlyOwner { + for (uint16 i = 0; i < _requestCount; i++) { + uint256 requestId = requestRandomness(_callbackGasLimit, _requestConfirmations, _numWords); + s_lastRequestId = requestId; + uint256 requestBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 paid = VRF_V2_WRAPPER.calculateRequestPrice(_callbackGasLimit); + s_requests[requestId] = RequestStatus({ + paid: paid, + fulfilled: false, + randomWords: new uint256[](0), + requestTimestamp: block.timestamp, + fulfilmentTimestamp: 0, + requestBlockNumber: requestBlockNumber, + fulfilmentBlockNumber: 0 + }); + s_requestCount++; + requestHeights[requestId] = requestBlockNumber; + emit WrapperRequestMade(requestId, paid); + } + } + + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + uint256 fulfilmentBlockNumber = ChainSpecificUtil._getBlockNumber(); + uint256 requestDelay = fulfilmentBlockNumber - requestHeights[_requestId]; + uint256 requestDelayInMillions = requestDelay * 1_000_000; + + if (requestDelay > s_slowestFulfillment) { + s_slowestFulfillment = requestDelay; + } + if (requestDelay < s_fastestFulfillment) { + s_fastestFulfillment = requestDelay; + } + s_averageFulfillmentInMillions = s_responseCount > 0 + ? (s_averageFulfillmentInMillions * s_responseCount + requestDelayInMillions) / (s_responseCount + 1) + : requestDelayInMillions; + + s_responseCount++; + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + s_requests[_requestId].fulfilmentTimestamp = block.timestamp; + s_requests[_requestId].fulfilmentBlockNumber = fulfilmentBlockNumber; + + emit WrappedRequestFulfilled(_requestId, _randomWords, s_requests[_requestId].paid); + } + + function getRequestStatus( + uint256 _requestId + ) + external + view + returns ( + uint256 paid, + bool fulfilled, + uint256[] memory randomWords, + uint256 requestTimestamp, + uint256 fulfilmentTimestamp, + uint256 requestBlockNumber, + uint256 fulfilmentBlockNumber + ) + { + // solhint-disable-next-line custom-errors + require(s_requests[_requestId].paid > 0, "request not found"); + RequestStatus memory request = s_requests[_requestId]; + return ( + request.paid, + request.fulfilled, + request.randomWords, + request.requestTimestamp, + request.fulfilmentTimestamp, + request.requestBlockNumber, + request.fulfilmentBlockNumber + ); + } + + /// @notice withdrawLink withdraws the amount specified in amount to the owner + /// @param amount the amount to withdraw, in juels + function withdrawLink(uint256 amount) external onlyOwner { + PLI.transfer(owner(), amount); + } + + function reset() external { + s_averageFulfillmentInMillions = 0; + s_slowestFulfillment = 0; + s_fastestFulfillment = 999; + s_requestCount = 0; + s_responseCount = 0; + } + + receive() external payable {} +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperOutOfGasConsumerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperOutOfGasConsumerExample.sol new file mode 100644 index 00000000..353027d5 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperOutOfGasConsumerExample.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2WrapperConsumerBase} from "../VRFV2WrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +contract VRFV2WrapperOutOfGasConsumerExample is VRFV2WrapperConsumerBase, ConfirmedOwner { + constructor( + address _link, + address _vrfV2Wrapper + ) ConfirmedOwner(msg.sender) VRFV2WrapperConsumerBase(_link, _vrfV2Wrapper) {} + + function makeRequest( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) external onlyOwner returns (uint256 requestId) { + return requestRandomness(_callbackGasLimit, _requestConfirmations, _numWords); + } + + function fulfillRandomWords(uint256 /* _requestId */, uint256[] memory /* _randomWords */) internal view override { + while (gasleft() > 0) {} + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperRevertingConsumerExample.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperRevertingConsumerExample.sol new file mode 100644 index 00000000..d78992ac --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperRevertingConsumerExample.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.6; + +import {VRFV2WrapperConsumerBase} from "../VRFV2WrapperConsumerBase.sol"; +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; + +contract VRFV2WrapperRevertingConsumerExample is VRFV2WrapperConsumerBase, ConfirmedOwner { + constructor( + address _link, + address _vrfV2Wrapper + ) ConfirmedOwner(msg.sender) VRFV2WrapperConsumerBase(_link, _vrfV2Wrapper) {} + + function makeRequest( + uint32 _callbackGasLimit, + uint16 _requestConfirmations, + uint32 _numWords + ) external onlyOwner returns (uint256 requestId) { + return requestRandomness(_callbackGasLimit, _requestConfirmations, _numWords); + } + + function fulfillRandomWords(uint256 /* _requestId */, uint256[] memory /* _randomWords */) internal pure override { + revert("reverting example"); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperUnderFundingConsumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperUnderFundingConsumer.sol new file mode 100644 index 00000000..17036d4d --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFV2WrapperUnderFundingConsumer.sol @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {ConfirmedOwner} from "../../shared/access/ConfirmedOwner.sol"; +import {LinkTokenInterface} from "../../shared/interfaces/LinkTokenInterface.sol"; +import {VRFV2WrapperInterface} from "../interfaces/VRFV2WrapperInterface.sol"; + +contract VRFV2WrapperUnderFundingConsumer is ConfirmedOwner { + LinkTokenInterface internal immutable PLI; + VRFV2WrapperInterface internal immutable VRF_V2_WRAPPER; + + constructor(address _link, address _vrfV2Wrapper) ConfirmedOwner(msg.sender) { + PLI = LinkTokenInterface(_link); + VRF_V2_WRAPPER = VRFV2WrapperInterface(_vrfV2Wrapper); + } + + function makeRequest(uint32 _callbackGasLimit, uint16 _requestConfirmations, uint32 _numWords) external onlyOwner { + PLI.transferAndCall( + address(VRF_V2_WRAPPER), + // Pay less than the needed amount + VRF_V2_WRAPPER.calculateRequestPrice(_callbackGasLimit) - 1, + abi.encode(_callbackGasLimit, _requestConfirmations, _numWords) + ); + } +} diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFv2Consumer.sol b/contracts/src/v0.8/vrf/testhelpers/VRFv2Consumer.sol new file mode 100644 index 00000000..4ec7ad08 --- /dev/null +++ b/contracts/src/v0.8/vrf/testhelpers/VRFv2Consumer.sol @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: MIT +// An example of a consumer contract that relies on a subscription for funding. +pragma solidity 0.8.6; + +import "../interfaces/VRFCoordinatorV2Interface.sol"; +import "../VRFConsumerBaseV2.sol"; +import "../../shared/access/ConfirmedOwner.sol"; + +/** + * THIS IS AN EXAMPLE CONTRACT THAT USES HARDCODED VALUES FOR CLARITY. + * THIS IS AN EXAMPLE CONTRACT THAT USES UN-AUDITED CODE. + * DO NOT USE THIS CODE IN PRODUCTION. + */ + +contract VRFv2Consumer is VRFConsumerBaseV2, ConfirmedOwner { + event RequestSent(uint256 requestId, uint32 numWords); + event RequestFulfilled(uint256 requestId, uint256[] randomWords); + + struct RequestStatus { + bool fulfilled; // whether the request has been successfully fulfilled + bool exists; // whether a requestId exists + uint256[] randomWords; + } + mapping(uint256 => RequestStatus) public s_requests; /* requestId --> requestStatus */ + VRFCoordinatorV2Interface COORDINATOR; + + // past requests Id. + uint256[] public requestIds; + uint256 public lastRequestId; + + constructor(address vrfCoordinator) VRFConsumerBaseV2(vrfCoordinator) ConfirmedOwner(msg.sender) { + COORDINATOR = VRFCoordinatorV2Interface(vrfCoordinator); + } + + // Assumes the subscription is funded sufficiently. + function requestRandomWords( + uint64 subId, + uint32 callbackGasLimit, + uint16 requestConfirmations, + uint32 numWords, + bytes32 keyHash + ) external onlyOwner returns (uint256 requestId) { + // Will revert if subscription is not set and funded. + requestId = COORDINATOR.requestRandomWords(keyHash, subId, requestConfirmations, callbackGasLimit, numWords); + s_requests[requestId] = RequestStatus({randomWords: new uint256[](0), exists: true, fulfilled: false}); + requestIds.push(requestId); + lastRequestId = requestId; + emit RequestSent(requestId, numWords); + return requestId; + } + + function fulfillRandomWords(uint256 _requestId, uint256[] memory _randomWords) internal override { + require(s_requests[_requestId].exists, "request not found"); + s_requests[_requestId].fulfilled = true; + s_requests[_requestId].randomWords = _randomWords; + emit RequestFulfilled(_requestId, _randomWords); + } + + function getRequestStatus(uint256 _requestId) external view returns (bool fulfilled, uint256[] memory randomWords) { + require(s_requests[_requestId].exists, "request not found"); + RequestStatus memory request = s_requests[_requestId]; + return (request.fulfilled, request.randomWords); + } +} diff --git a/contracts/test/cross-version/KeeperCompatible.test.ts b/contracts/test/cross-version/KeeperCompatible.test.ts new file mode 100644 index 00000000..968ce65f --- /dev/null +++ b/contracts/test/cross-version/KeeperCompatible.test.ts @@ -0,0 +1,34 @@ +import { ethers } from 'hardhat' +import { Contract } from 'ethers' +import { expect } from 'chai' +import { publicAbi } from '../test-helpers/helpers' + +describe('KeeperCompatible', () => { + for (let version = 6; version <= 8; version++) { + describe(`version v0.${version}`, () => { + let contract: Contract + + before(async () => { + const factory = await ethers.getContractFactory( + `src/v0.${version}/tests/KeeperCompatibleTestHelper.sol:KeeperCompatibleTestHelper`, + ) + contract = await factory.deploy() + }) + + it('has a keeper compatible interface [ @skip-coverage ]', async () => { + publicAbi(contract, [ + 'checkUpkeep', + 'performUpkeep', + 'verifyCannotExecute', + ]) + }) + + it('prevents execution of protected functions', async () => { + await contract + .connect(ethers.constants.AddressZero) + .verifyCannotExecute() // succeeds + await expect(contract.verifyCannotExecute()).to.be.reverted + }) + }) + } +}) diff --git a/contracts/test/cross-version/directory.test.ts b/contracts/test/cross-version/directory.test.ts new file mode 100644 index 00000000..fde5e592 --- /dev/null +++ b/contracts/test/cross-version/directory.test.ts @@ -0,0 +1,31 @@ +import fs from 'fs' +import path from 'path' +import { expect } from 'chai' + +// Directories that start with a number do not currently work with typechain (https://github.com/dethcrypto/TypeChain/issues/794) +describe('Directory', () => { + it('Should not have a file or directory starting with a number in contracts/src', () => { + const srcPath = path.join(__dirname, '..', '..', 'src') + + const noNumbersAsFirstChar = (dirPath: string): boolean => { + const entries = fs.readdirSync(dirPath, { withFileTypes: true }) + + for (const entry of entries) { + if (/^\d/.test(entry.name)) { + throw new Error( + `${path.join(dirPath, entry.name)} starts with a number`, + ) + } + + if (entry.isDirectory()) { + const newPath = path.join(dirPath, entry.name) + noNumbersAsFirstChar(newPath) + } + } + + return true + } + + expect(noNumbersAsFirstChar(srcPath)).to.be.true + }) +}) diff --git a/contracts/test/test-helpers/debug.ts b/contracts/test/test-helpers/debug.ts new file mode 100644 index 00000000..9f23efe6 --- /dev/null +++ b/contracts/test/test-helpers/debug.ts @@ -0,0 +1,17 @@ +/** + * @packageDocumentation + * + * This file contains functionality for debugging tests, like creating loggers. + */ +import debug from 'debug' + +/** + * This creates a debug logger instance to be used within our internal code. + * + * @see https://www.npmjs.com/package/debug to see how to use the logger at runtime + * @see wallet.ts makes extensive use of this function. + * @param name The root namespace to assign to the log messages + */ +export function makeDebug(name: string): debug.Debugger { + return debug(name) +} diff --git a/contracts/test/test-helpers/fixtures.ts b/contracts/test/test-helpers/fixtures.ts new file mode 100644 index 00000000..6b813bb5 --- /dev/null +++ b/contracts/test/test-helpers/fixtures.ts @@ -0,0 +1,49 @@ +export const validCrons = [ + '* * * * *', // every minute + '*/2 * * * *', // every even minute + '0 * * * *', // every hour + '0 0 * * *', // every day at midnight + '0 12 * * *', // every day at noon + '0 12 * * 0-4', // week days at noon + '0 0 1 * *', // every month on the first at midnight + '0 0 1 7 *', // first of July at midnight + '0 0 * * 1', // every monday at midnight + '*/5 * * * *', // every 5 min + '0 0 * * 2-4', // wed - friday at midnight + '0 * 31 * 0', // 31st day of the month, mondays, at midnight + '59 23 29 2 1', // last minute of tuesday leap days + '0 12 1,3,5,7,11,13,17,19,23,27,29,31 * *', // prime days at noon + '*/20 3,7,20 10-20 */2 5-6', // every 20 min b/t hours 3:4, 7:8, and 20:21 on the 10-20th days, even months, weekends + '0 0 29 2 *', // every leap day + '0 0 */2 2 *', // every even day in february +] + +export const invalidCrons = [ + '60 * * * *', // invalid minute + '0 24 * * *', // invalid hour + '0 * 32 * *', // invalid day + '0 * 0 * *', // invalid day + '0 * * 13 *', // invalid month + '0 * * 0 *', // invalid month + '* * 30 2 *', // invalid day/month + '* * 31 2,4,6,9,11 *', // invalid day/month + '* * 31 */9 *', // invalid day/month + '* * 20-31 2 *', // invalid day/month + '* * 28,29,30 2 *', // invalid day/month + '0 * * * 7', // invalid day of week + '0 12-24 * * 7', // invalid hour range + '0 * * * 5-10', // invalid day of week range + '0 * * * 1-1', // invalid range + '0 * * * 2-1', // invalid range + '0 0,3,5,30 * * *', // invalid hour list + '*/100 * * * *', // invalid interval + '*/0 * * * *', // invalid interval + '0****', // no spaces + '0 * * **', // too few spaces + '0 * * * *', // too many spaces + ' 0 * * * *', // leading whitespace + '0 * * * * ', // trailing whitespace + '0 * * * ', // field missing + '0 1, * * *', // invalid list + '0 * 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27 * *', // list too big +] diff --git a/contracts/test/test-helpers/helpers.ts b/contracts/test/test-helpers/helpers.ts new file mode 100644 index 00000000..e65cd57e --- /dev/null +++ b/contracts/test/test-helpers/helpers.ts @@ -0,0 +1,342 @@ +import { BigNumber, BigNumberish, Contract, ContractTransaction } from 'ethers' +import { providers } from 'ethers' +import { assert, expect } from 'chai' +import hre, { ethers, network } from 'hardhat' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import cbor from 'cbor' +import { LinkToken } from '../../typechain' + +/** + * Convert string to hex bytes + * @param data string to convert to hex bytes + */ +export function stringToBytes(data: string): string { + return ethers.utils.hexlify(ethers.utils.toUtf8Bytes(data)) +} + +/** + * Add a hex prefix to a hex string + * + * @param hex The hex string to prepend the hex prefix to + */ +export function addHexPrefix(hex: string): string { + return hex.startsWith('0x') ? hex : `0x${hex}` +} + +/** + * Convert a number value to bytes32 format + * + * @param num The number value to convert to bytes32 format + */ +export function numToBytes32( + num: Parameters[0], +): string { + const hexNum = ethers.utils.hexlify(num) + const strippedNum = stripHexPrefix(hexNum) + if (strippedNum.length > 32 * 2) { + throw Error( + 'Cannot convert number to bytes32 format, value is greater than maximum bytes32 value', + ) + } + return addHexPrefix(strippedNum.padStart(32 * 2, '0')) +} + +/** + * Retrieve single log from transaction + * + * @param tx The transaction to wait for, then extract logs from + * @param index The index of the log to retrieve + */ +export async function getLog( + tx: ContractTransaction, + index: number, +): Promise { + const logs = await getLogs(tx) + if (!logs[index]) { + throw Error('unable to extract log from transaction receipt') + } + return logs[index] +} + +/** + * Extract array of logs from a transaction + * + * @param tx The transaction to wait for, then extract logs from + */ +export async function getLogs( + tx: ContractTransaction, +): Promise { + const receipt = await tx.wait() + if (!receipt.logs) { + throw Error('unable to extract logs from transaction receipt') + } + return receipt.logs +} + +/** + * Convert a UTF-8 string into a bytes32 hex string representation + * + * The inverse function of [[parseBytes32String]] + * + * @param args The UTF-8 string representation to convert to a bytes32 hex string representation + */ +export function toBytes32String( + ...args: Parameters +): ReturnType { + return ethers.utils.formatBytes32String(...args) +} + +/** + * Strip the leading 0x hex prefix from a hex string + * + * @param hex The hex string to strip the leading hex prefix out of + */ +export function stripHexPrefix(hex: string): string { + if (!ethers.utils.isHexString(hex)) { + throw Error(`Expected valid hex string, got: "${hex}"`) + } + + return hex.replace('0x', '') +} + +/** + * Create a buffer from a hex string + * + * @param hexstr The hex string to convert to a buffer + */ +export function hexToBuf(hexstr: string): Buffer { + return Buffer.from(stripHexPrefix(hexstr), 'hex') +} + +/** + * Decodes a CBOR hex string, and adds opening and closing brackets to the CBOR if they are not present. + * + * @param hexstr The hex string to decode + */ +export function decodeDietCBOR(hexstr: string) { + const buf = hexToBuf(hexstr) + + return cbor.decodeFirstSync(addCBORMapDelimiters(buf)) +} + +/** + * Add a starting and closing map characters to a CBOR encoding if they are not already present. + */ +export function addCBORMapDelimiters(buffer: Buffer): Buffer { + if (buffer[0] >> 5 === 5) { + return buffer + } + + /** + * This is the opening character of a CBOR map. + * @see https://en.wikipedia.org/wiki/CBOR#CBOR_data_item_header + */ + const startIndefiniteLengthMap = Buffer.from([0xbf]) + /** + * This is the closing character in a CBOR map. + * @see https://en.wikipedia.org/wiki/CBOR#CBOR_data_item_header + */ + const endIndefiniteLengthMap = Buffer.from([0xff]) + return Buffer.concat( + [startIndefiniteLengthMap, buffer, endIndefiniteLengthMap], + buffer.length + 2, + ) +} + +/** + * Convert an Ether value to a wei amount + * + * @param args Ether value to convert to an Ether amount + */ +export function toWei( + ...args: Parameters +): ReturnType { + return ethers.utils.parseEther(...args) +} + +/** + * Converts any number, BigNumber, hex string or Arrayish to a hex string. + * + * @param args Value to convert to a hex string + */ +export function toHex( + ...args: Parameters +): ReturnType { + return ethers.utils.hexlify(...args) +} + +/** + * Increase the current time within the evm to 5 minutes past the current time + * + * @param provider The ethers provider to send the time increase request to + */ +export async function increaseTime5Minutes( + provider: providers.JsonRpcProvider, +): Promise { + await increaseTimeBy(5 * 60, provider) +} + +/** + * Increase the current time within the evm to "n" seconds past the current time + * + * @param seconds The number of seconds to increase to the current time by + * @param provider The ethers provider to send the time increase request to + */ +export async function increaseTimeBy( + seconds: number, + provider: providers.JsonRpcProvider, +) { + await provider.send('evm_increaseTime', [seconds]) +} + +/** + * Instruct the provider to mine an additional block + * + * @param provider The ethers provider to instruct to mine an additional block + */ +export async function mineBlock(provider: providers.JsonRpcProvider) { + await provider.send('evm_mine', []) +} + +/** + * Parse out an evm word (32 bytes) into an address (20 bytes) representation + * + * @param hex The evm word in hex string format to parse the address + * out of. + */ +export function evmWordToAddress(hex?: string): string { + if (!hex) { + throw Error('Input not defined') + } + + assert.equal(hex.slice(0, 26), '0x000000000000000000000000') + return ethers.utils.getAddress(hex.slice(26)) +} + +/** + * Check that a contract's abi exposes the expected interface. + * + * @param contract The contract with the actual abi to check the expected exposed methods and getters against. + * @param expectedPublic The expected public exposed methods and getters to match against the actual abi. + */ +export function publicAbi( + contract: Contract, + expectedPublic: string[], +): boolean { + const actualPublic = [] + for (const m in contract.functions) { + if (!m.includes('(')) { + actualPublic.push(m) + } + } + + for (const method of actualPublic) { + const index = expectedPublic.indexOf(method) + assert.isAtLeast(index, 0, `#${method} is NOT expected to be public`) + } + + for (const method of expectedPublic) { + const index = actualPublic.indexOf(method) + assert.isAtLeast(index, 0, `#${method} is expected to be public`) + } + + return true +} + +/** + * Converts an L1 address to an Arbitrum L2 address + * + * @param l1Address Address on L1 + */ +export function toArbitrumL2AliasAddress(l1Address: string): string { + return ethers.utils.getAddress( + BigNumber.from(l1Address) + .add('0x1111000000000000000000000000000000001111') + .toHexString() + .replace('0x01', '0x'), + ) +} + +/** + * Lets you impersonate and sign transactions from any account. + * + * @param address Address to impersonate + */ +export async function impersonateAs( + address: string, +): Promise { + await hre.network.provider.request({ + method: 'hardhat_impersonateAccount', + params: [address], + }) + return await ethers.getSigner(address) +} + +export async function stopImpersonateAs(address: string): Promise { + await hre.network.provider.request({ + method: 'hardhat_stopImpersonatingAccount', + params: [address], + }) +} + +export async function assertBalance( + address: string, + balance: BigNumberish, + msg?: string, +) { + expect(await ethers.provider.getBalance(address)).equal(balance, msg) +} + +export async function assertLinkTokenBalance( + lt: LinkToken, + address: string, + balance: BigNumberish, + msg?: string, +) { + expect(await lt.balanceOf(address)).equal(balance, msg) +} + +export async function assertSubscriptionBalance( + coordinator: Contract, + subID: BigNumberish, + balance: BigNumberish, + msg?: string, +) { + expect((await coordinator.getSubscription(subID)).balance).deep.equal( + balance, + msg, + ) +} + +export async function setTimestamp(timestamp: number) { + await network.provider.request({ + method: 'evm_setNextBlockTimestamp', + params: [timestamp], + }) + await network.provider.request({ + method: 'evm_mine', + params: [], + }) +} + +export async function fastForward(duration: number) { + await network.provider.request({ + method: 'evm_increaseTime', + params: [duration], + }) + await network.provider.request({ + method: 'evm_mine', + params: [], + }) +} + +export async function reset() { + await network.provider.request({ + method: 'hardhat_reset', + params: [], + }) +} + +export function randomAddress() { + return ethers.Wallet.createRandom().address +} diff --git a/contracts/test/test-helpers/matchers.ts b/contracts/test/test-helpers/matchers.ts new file mode 100644 index 00000000..8daa6731 --- /dev/null +++ b/contracts/test/test-helpers/matchers.ts @@ -0,0 +1,57 @@ +import { BigNumber, BigNumberish } from '@ethersproject/bignumber' +import { ContractReceipt } from '@ethersproject/contracts' +import { assert, expect } from 'chai' + +/** + * Check that two big numbers are the same value. + * + * @param expected The expected value to match against + * @param actual The actual value to match against the expected value + * @param failureMessage Failure message to display if the actual value does not match the expected value. + */ +export function bigNumEquals( + expected: BigNumberish, + actual: BigNumberish, + failureMessage?: string, +): void { + const msg = failureMessage ? ': ' + failureMessage : '' + assert( + BigNumber.from(expected).eq(BigNumber.from(actual)), + `BigNum (expected)${expected} is not (actual)${actual} ${msg}`, + ) +} + +/** + * Check that an evm operation reverts + * + * @param action The asynchronous action to execute, which should cause an evm revert. + * @param msg The failure message to display if the action __does not__ throw + */ +export async function evmRevert( + action: (() => Promise) | Promise, + msg?: string, +) { + if (msg) { + await expect(action).to.be.revertedWith(msg) + } else { + await expect(action).to.be.reverted + } +} + +/** + * Assert that an event doesnt exist + * + * @param max The maximum allowable gas difference + * @param receipt1 The contract receipt to compare to + * @param receipt2 The contract receipt with a gas difference + */ +export function gasDiffLessThan( + max: number, + receipt1: ContractReceipt, + receipt2: ContractReceipt, +) { + assert(receipt1, 'receipt1 is not present for gas comparison') + assert(receipt2, 'receipt2 is not present for gas comparison') + const diff = receipt2.gasUsed?.sub(receipt1.gasUsed || 0) + assert.isAbove(max, diff?.toNumber() || Infinity) +} diff --git a/contracts/test/test-helpers/oracle.ts b/contracts/test/test-helpers/oracle.ts new file mode 100644 index 00000000..07947b26 --- /dev/null +++ b/contracts/test/test-helpers/oracle.ts @@ -0,0 +1,465 @@ +/** + * @packageDocumentation + * + * This file provides convenience functions to interact with existing solidity contract abstraction libraries, such as + * @truffle/contract and ethers.js specifically for our `Oracle.sol` solidity smart contract. + */ +import { BigNumberish } from '@ethersproject/bignumber/lib/bignumber' +import { ethers } from 'ethers' +import { makeDebug } from './debug' +import { addCBORMapDelimiters, stripHexPrefix, toHex } from './helpers' +const debug = makeDebug('oracle') + +/** + * Transaction options such as gasLimit, gasPrice, data, ... + */ +type TxOptions = Omit + +/** + * A run request is an event emitted by `Oracle.sol` which triggers a job run + * on a receiving plugin node watching for RunRequests coming from that + * specId + optionally requester. + */ +export interface RunRequest { + /** + * The ID of the job spec this request is targeting + * + * @solformat bytes32 + */ + specId: string + /** + * The requester of the run + * + * @solformat address + */ + requester: string + /** + * The ID of the request, check Oracle.sol#oracleRequest to see how its computed + * + * @solformat bytes32 + */ + requestId: string + /** + * The amount of PLI used for payment + * + * @solformat uint256 + */ + payment: string + /** + * The address of the contract instance to callback with the fulfillment result + * + * @solformat address + */ + callbackAddr: string + /** + * The function selector of the method that the oracle should call after fulfillment + * + * @solformat bytes4 + */ + callbackFunc: string + /** + * The expiration that the node should respond by before the requester can cancel + * + * @solformat uint256 + */ + expiration: string + /** + * The specified data version + * + * @solformat uint256 + */ + dataVersion: number + /** + * The CBOR encoded payload of the request + * + * @solformat bytes + */ + data: Buffer + + /** + * The hash of the signature of the OracleRequest event. + * ```solidity + * event OracleRequest( + * bytes32 indexed specId, + * address requester, + * bytes32 requestId, + * uint256 payment, + * address callbackAddr, + * bytes4 callbackFunctionId, + * uint256 cancelExpiration, + * uint256 dataVersion, + * bytes data + * ); + * ``` + * Note: this is a property used for testing purposes only. + * It is not part of the actual run request. + * + * @solformat bytes32 + */ + topic: string +} + +/** + * Convert the javascript format of the parameters needed to call the + * ```solidity + * function fulfillOracleRequest( + * bytes32 _requestId, + * uint256 _payment, + * address _callbackAddress, + * bytes4 _callbackFunctionId, + * uint256 _expiration, + * bytes32 _data + * ) + * ``` + * method on an Oracle.sol contract. + * + * @param runRequest The run request to flatten into the correct order to perform the `fulfillOracleRequest` function + * @param response The response to fulfill the run request with, if it is an ascii string, it is converted to bytes32 string + * @param txOpts Additional ethereum tx options + */ +export function convertFufillParams( + runRequest: RunRequest, + response: string, + txOpts: TxOptions = {}, +): [string, string, string, string, string, string, TxOptions] { + const d = debug.extend('fulfillOracleRequestParams') + d('Response param: %s', response) + + const bytes32Len = 32 * 2 + 2 + const convertedResponse = + response.length < bytes32Len + ? ethers.utils.formatBytes32String(response) + : response + d('Converted Response param: %s', convertedResponse) + + return [ + runRequest.requestId, + runRequest.payment, + runRequest.callbackAddr, + runRequest.callbackFunc, + runRequest.expiration, + convertedResponse, + txOpts, + ] +} + +/** + * Convert the javascript format of the parameters needed to call the + * ```solidity + * function fulfillOracleRequest2( + * bytes32 _requestId, + * uint256 _payment, + * address _callbackAddress, + * bytes4 _callbackFunctionId, + * uint256 _expiration, + * bytes memory _data + * ) + * ``` + * method on an Oracle.sol contract. + * + * @param runRequest The run request to flatten into the correct order to perform the `fulfillOracleRequest` function + * @param response The response to fulfill the run request with, if it is an ascii string, it is converted to bytes32 string + * @param txOpts Additional ethereum tx options + */ +export function convertFulfill2Params( + runRequest: RunRequest, + responseTypes: string[], + responseValues: string[], + txOpts: TxOptions = {}, +): [string, string, string, string, string, string, TxOptions] { + const d = debug.extend('fulfillOracleRequestParams') + d('Response param: %s', responseValues) + const types = [...responseTypes] + const values = [...responseValues] + types.unshift('bytes32') + values.unshift(runRequest.requestId) + const convertedResponse = ethers.utils.defaultAbiCoder.encode(types, values) + d('Encoded Response param: %s', convertedResponse) + return [ + runRequest.requestId, + runRequest.payment, + runRequest.callbackAddr, + runRequest.callbackFunc, + runRequest.expiration, + convertedResponse, + txOpts, + ] +} + +/** + * Convert the javascript format of the parameters needed to call the + * ```solidity + * function cancelOracleRequest( + * bytes32 _requestId, + * uint256 _payment, + * bytes4 _callbackFunc, + * uint256 _expiration + * ) + * ``` + * method on an Oracle.sol contract. + * + * @param runRequest The run request to flatten into the correct order to perform the `cancelOracleRequest` function + * @param txOpts Additional ethereum tx options + */ +export function convertCancelParams( + runRequest: RunRequest, + txOpts: TxOptions = {}, +): [string, string, string, string, TxOptions] { + return [ + runRequest.requestId, + runRequest.payment, + runRequest.callbackFunc, + runRequest.expiration, + txOpts, + ] +} + +/** + * Convert the javascript format of the parameters needed to call the + * ```solidity + * function cancelOracleRequestByRequester( + * uint256 nonce, + * uint256 _payment, + * bytes4 _callbackFunc, + * uint256 _expiration + * ) + * ``` + * method on an Oracle.sol contract. + * + * @param nonce The nonce used to generate the request ID + * @param runRequest The run request to flatten into the correct order to perform the `cancelOracleRequest` function + * @param txOpts Additional ethereum tx options + */ +export function convertCancelByRequesterParams( + runRequest: RunRequest, + nonce: number, + txOpts: TxOptions = {}, +): [number, string, string, string, TxOptions] { + return [ + nonce, + runRequest.payment, + runRequest.callbackFunc, + runRequest.expiration, + txOpts, + ] +} + +/** + * Abi encode parameters to call the `oracleRequest` method on the Oracle.sol contract. + * ```solidity + * function oracleRequest( + * address _sender, + * uint256 _payment, + * bytes32 _specId, + * address _callbackAddress, + * bytes4 _callbackFunctionId, + * uint256 _nonce, + * uint256 _dataVersion, + * bytes _data + * ) + * ``` + * + * @param specId The Job Specification ID + * @param callbackAddr The callback contract address for the response + * @param callbackFunctionId The callback function id for the response + * @param nonce The nonce sent by the requester + * @param data The CBOR payload of the request + */ +export function encodeOracleRequest( + specId: string, + callbackAddr: string, + callbackFunctionId: string, + nonce: number, + data: BigNumberish, + dataVersion: BigNumberish = 1, +): string { + const oracleRequestSighash = '0x40429946' + return encodeRequest( + oracleRequestSighash, + specId, + callbackAddr, + callbackFunctionId, + nonce, + data, + dataVersion, + ) +} + +/** + * Abi encode parameters to call the `operatorRequest` method on the Operator.sol contract. + * ```solidity + * function operatorRequest( + * address _sender, + * uint256 _payment, + * bytes32 _specId, + * address _callbackAddress, + * bytes4 _callbackFunctionId, + * uint256 _nonce, + * uint256 _dataVersion, + * bytes _data + * ) + * ``` + * + * @param specId The Job Specification ID + * @param callbackAddr The callback contract address for the response + * @param callbackFunctionId The callback function id for the response + * @param nonce The nonce sent by the requester + * @param data The CBOR payload of the request + */ +export function encodeRequestOracleData( + specId: string, + callbackFunctionId: string, + nonce: number, + data: BigNumberish, + dataVersion: BigNumberish = 2, +): string { + const sendOperatorRequestSigHash = '0x3c6d41b9' + const requestInputs = [ + { name: '_sender', type: 'address' }, + { name: '_payment', type: 'uint256' }, + { name: '_specId', type: 'bytes32' }, + { name: '_callbackFunctionId', type: 'bytes4' }, + { name: '_nonce', type: 'uint256' }, + { name: '_dataVersion', type: 'uint256' }, + { name: '_data', type: 'bytes' }, + ] + const encodedParams = ethers.utils.defaultAbiCoder.encode( + requestInputs.map((i) => i.type), + [ + ethers.constants.AddressZero, + 0, + specId, + callbackFunctionId, + nonce, + dataVersion, + data, + ], + ) + return `${sendOperatorRequestSigHash}${stripHexPrefix(encodedParams)}` +} + +function encodeRequest( + oracleRequestSighash: string, + specId: string, + callbackAddr: string, + callbackFunctionId: string, + nonce: number, + data: BigNumberish, + dataVersion: BigNumberish = 1, +): string { + const oracleRequestInputs = [ + { name: '_sender', type: 'address' }, + { name: '_payment', type: 'uint256' }, + { name: '_specId', type: 'bytes32' }, + { name: '_callbackAddress', type: 'address' }, + { name: '_callbackFunctionId', type: 'bytes4' }, + { name: '_nonce', type: 'uint256' }, + { name: '_dataVersion', type: 'uint256' }, + { name: '_data', type: 'bytes' }, + ] + const encodedParams = ethers.utils.defaultAbiCoder.encode( + oracleRequestInputs.map((i) => i.type), + [ + ethers.constants.AddressZero, + 0, + specId, + callbackAddr, + callbackFunctionId, + nonce, + dataVersion, + data, + ], + ) + return `${oracleRequestSighash}${stripHexPrefix(encodedParams)}` +} + +/** + * Extract a javascript representation of a run request from the data + * contained within a EVM log. + * ```solidity + * event OracleRequest( + * bytes32 indexed specId, + * address requester, + * bytes32 requestId, + * uint256 payment, + * address callbackAddr, + * bytes4 callbackFunctionId, + * uint256 cancelExpiration, + * uint256 dataVersion, + * bytes data + * ); + * ``` + * + * @param log The log to extract the run request from + */ +export function decodeRunRequest(log?: ethers.providers.Log): RunRequest { + if (!log) { + throw Error('No logs found to decode') + } + + const ORACLE_REQUEST_TYPES = [ + 'address', + 'bytes32', + 'uint256', + 'address', + 'bytes4', + 'uint256', + 'uint256', + 'bytes', + ] + const [ + requester, + requestId, + payment, + callbackAddress, + callbackFunc, + expiration, + version, + data, + ] = ethers.utils.defaultAbiCoder.decode(ORACLE_REQUEST_TYPES, log.data) + + return { + specId: log.topics[1], + requester, + requestId: toHex(requestId), + payment: toHex(payment), + callbackAddr: callbackAddress, + callbackFunc: toHex(callbackFunc), + expiration: toHex(expiration), + data: addCBORMapDelimiters(Buffer.from(stripHexPrefix(data), 'hex')), + dataVersion: version.toNumber(), + + topic: log.topics[0], + } +} + +/** + * Extract a javascript representation of a ConcretePlugined#Request event + * from an EVM log. + * ```solidity + * event Request( + * bytes32 id, + * address callbackAddress, + * bytes4 callbackfunctionSelector, + * bytes data + * ); + * ``` + * The request event is emitted from the `ConcretePlugined.sol` testing contract. + * + * @param log The log to decode + */ +export function decodeCCRequest( + log: ethers.providers.Log, +): ethers.utils.Result { + const d = debug.extend('decodeRunABI') + d('params %o', log) + + const REQUEST_TYPES = ['bytes32', 'address', 'bytes4', 'bytes'] + const decodedValue = ethers.utils.defaultAbiCoder.decode( + REQUEST_TYPES, + log.data, + ) + d('decoded value %o', decodedValue) + + return decodedValue +} diff --git a/contracts/test/test-helpers/setup.ts b/contracts/test/test-helpers/setup.ts new file mode 100644 index 00000000..0c741d7e --- /dev/null +++ b/contracts/test/test-helpers/setup.ts @@ -0,0 +1,92 @@ +import { ethers } from 'hardhat' +// Suppress "Duplicate definition" error logs +ethers.utils.Logger.setLogLevel(ethers.utils.Logger.levels.ERROR) + +import { Signer } from 'ethers' + +export interface Contracts { + contract1: Signer + contract2: Signer + contract3: Signer + contract4: Signer + contract5: Signer + contract6: Signer + contract7: Signer + contract8: Signer +} + +export interface Roles { + defaultAccount: Signer + oracleNode: Signer + oracleNode1: Signer + oracleNode2: Signer + oracleNode3: Signer + oracleNode4: Signer + stranger: Signer + consumer: Signer + consumer2: Signer +} + +export interface Personas { + Default: Signer + Carol: Signer + Eddy: Signer + Nancy: Signer + Ned: Signer + Neil: Signer + Nelly: Signer + Norbert: Signer + Nick: Signer +} + +export interface Users { + contracts: Contracts + roles: Roles + personas: Personas +} + +export async function getUsers() { + const accounts = await ethers.getSigners() + + const personas: Personas = { + Default: accounts[0], + Neil: accounts[1], + Ned: accounts[2], + Nelly: accounts[3], + Nancy: accounts[4], + Norbert: accounts[5], + Carol: accounts[6], + Eddy: accounts[7], + Nick: accounts[8], + } + + const contracts: Contracts = { + contract1: accounts[0], + contract2: accounts[1], + contract3: accounts[2], + contract4: accounts[3], + contract5: accounts[4], + contract6: accounts[5], + contract7: accounts[6], + contract8: accounts[7], + } + + const roles: Roles = { + defaultAccount: accounts[0], + oracleNode: accounts[1], + oracleNode1: accounts[2], + oracleNode2: accounts[3], + oracleNode3: accounts[4], + oracleNode4: accounts[5], + stranger: accounts[6], + consumer: accounts[7], + consumer2: accounts[8], + } + + const users: Users = { + personas, + roles, + contracts, + } + return users +} diff --git a/contracts/test/v0.8/Cron.test.ts b/contracts/test/v0.8/Cron.test.ts new file mode 100644 index 00000000..2cdc7c24 --- /dev/null +++ b/contracts/test/v0.8/Cron.test.ts @@ -0,0 +1,127 @@ +import moment from 'moment' +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { CronInternalTestHelper } from '../../typechain/CronInternalTestHelper' +import { CronExternalTestHelper } from '../../typechain/CronExternalTestHelper' +import { invalidCrons, validCrons } from '../test-helpers/fixtures' +import { reset, setTimestamp } from '../test-helpers/helpers' + +let cron: CronInternalTestHelper | CronExternalTestHelper +let cronInternal: CronInternalTestHelper +let cronExternal: CronExternalTestHelper + +const timeStamp = 32503680000 // Jan 1, 3000 12:00AM + +describe('Cron', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + const admin = accounts[1] + const cronInternalTestHelperFactory = await ethers.getContractFactory( + 'CronInternalTestHelper', + ) + cronInternal = await cronInternalTestHelperFactory.deploy() + const cronExternalFactory = await ethers.getContractFactory( + 'src/v0.8/automation/libraries/external/Cron.sol:Cron', + admin, + ) + const cronExternalLib = await cronExternalFactory.deploy() + const cronExternalTestHelperFactory = await ethers.getContractFactory( + 'CronExternalTestHelper', + { + libraries: { + Cron: cronExternalLib.address, + }, + }, + ) + cronExternal = await cronExternalTestHelperFactory.deploy() + }) + + afterEach(async () => { + await reset() + }) + + for (let libType of ['Internal', 'External']) { + describe(libType, () => { + beforeEach(() => { + cron = libType === 'Internal' ? cronInternal : cronExternal + }) + + describe('encodeCronString() / encodedSpecToString()', () => { + it('converts all valid cron strings to encoded structs and back', async () => { + const tests = validCrons.map(async (input) => { + const spec = await cron.encodeCronString(input) + const output = await cron.encodedSpecToString(spec) + assert.equal(output, input) + }) + await Promise.all(tests) + }) + + it('errors while parsing invalid cron strings', async () => { + for (let idx = 0; idx < invalidCrons.length; idx++) { + const input = invalidCrons[idx] + await expect( + cron.encodeCronString(input), + `expected ${input} to be invalid`, + ).to.be.revertedWith('') + } + }) + }) + + describe('calculateNextTick() / calculateLastTick()', () => { + it('correctly identifies the next & last ticks for cron jobs', async () => { + await setTimestamp(timeStamp) + const now = () => moment.unix(timeStamp) + const tests = [ + { + cron: '0 0 31 * *', // every 31st day at midnight + nextTick: now().add(30, 'days').unix(), + lastTick: now().subtract(1, 'day').unix(), + }, + { + cron: '0 12 * * *', // every day at noon + nextTick: now().add(12, 'hours').unix(), + lastTick: now().subtract(12, 'hours').unix(), + }, + { + cron: '10 2,4,6 * * *', // at 2:10, 4:10 and 6:10 + nextTick: now().add(2, 'hours').add(10, 'minutes').unix(), + lastTick: now() + .subtract(17, 'hours') + .subtract(50, 'minutes') + .unix(), + }, + { + cron: '0 0 1 */3 *', // every 3rd month at midnight + nextTick: now().add(2, 'months').unix(), + lastTick: now().subtract(1, 'months').unix(), + }, + { + cron: '30 12 29 2 *', // 12:30 on leap days + nextTick: 32634966600, // February 29, 3004 12:30 PM + lastTick: 32382592200, // February 29, 2996 12:30 PM + }, + ] + for (let idx = 0; idx < tests.length; idx++) { + const test = tests[idx] + const nextTick = ( + await cron.calculateNextTick(test.cron) + ).toNumber() + const lastTick = ( + await cron.calculateLastTick(test.cron) + ).toNumber() + assert.equal( + nextTick, + test.nextTick, + `got wrong next tick for "${test.cron}"`, + ) + assert.equal( + lastTick, + test.lastTick, + `got wrong next tick for "${test.cron}"`, + ) + } + }) + }) + }) + } +}) diff --git a/contracts/test/v0.8/Flags.test.ts b/contracts/test/v0.8/Flags.test.ts new file mode 100644 index 00000000..eff0912c --- /dev/null +++ b/contracts/test/v0.8/Flags.test.ts @@ -0,0 +1,405 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { Personas, getUsers } from '../test-helpers/setup' + +let personas: Personas + +let controllerFactory: ContractFactory +let flagsFactory: ContractFactory +let consumerFactory: ContractFactory + +let controller: Contract +let flags: Contract +let consumer: Contract + +before(async () => { + personas = (await getUsers()).personas + controllerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/SimpleWriteAccessController.sol:SimpleWriteAccessController', + personas.Nelly, + ) + consumerFactory = await ethers.getContractFactory( + 'src/v0.8/tests/FlagsTestHelper.sol:FlagsTestHelper', + personas.Nelly, + ) + flagsFactory = await ethers.getContractFactory( + 'src/v0.8/Flags.sol:Flags', + personas.Nelly, + ) +}) + +describe('Flags', () => { + beforeEach(async () => { + controller = await controllerFactory.deploy() + flags = await flagsFactory.deploy(controller.address) + await flags.disableAccessCheck() + consumer = await consumerFactory.deploy(flags.address) + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(flags, [ + 'getFlag', + 'getFlags', + 'lowerFlags', + 'raiseFlag', + 'raiseFlags', + 'raisingAccessController', + 'setRaisingAccessController', + // Ownable methods: + 'acceptOwnership', + 'owner', + 'transferOwnership', + // AccessControl methods: + 'addAccess', + 'disableAccessCheck', + 'enableAccessCheck', + 'removeAccess', + 'checkEnabled', + 'hasAccess', + ]) + }) + + describe('#raiseFlag', () => { + describe('when called by the owner', () => { + it('updates the warning flag', async () => { + assert.equal(false, await flags.getFlag(consumer.address)) + + await flags.connect(personas.Nelly).raiseFlag(consumer.address) + + assert.equal(true, await flags.getFlag(consumer.address)) + }) + + it('emits an event log', async () => { + await expect(flags.connect(personas.Nelly).raiseFlag(consumer.address)) + .to.emit(flags, 'FlagRaised') + .withArgs(consumer.address) + }) + + describe('if a flag has already been raised', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).raiseFlag(consumer.address) + }) + + it('emits an event log', async () => { + const tx = await flags + .connect(personas.Nelly) + .raiseFlag(consumer.address) + const receipt = await tx.wait() + assert.equal(0, receipt.events?.length) + }) + }) + }) + + describe('when called by an enabled setter', () => { + beforeEach(async () => { + await controller + .connect(personas.Nelly) + .addAccess(await personas.Neil.getAddress()) + }) + + it('sets the flags', async () => { + await flags.connect(personas.Neil).raiseFlag(consumer.address), + assert.equal(true, await flags.getFlag(consumer.address)) + }) + }) + + describe('when called by a non-enabled setter', () => { + it('reverts', async () => { + await expect( + flags.connect(personas.Neil).raiseFlag(consumer.address), + ).to.be.revertedWith('Not allowed to raise flags') + }) + }) + + describe('when called when there is no raisingAccessController', () => { + beforeEach(async () => { + await expect( + flags + .connect(personas.Nelly) + .setRaisingAccessController( + '0x0000000000000000000000000000000000000000', + ), + ).to.emit(flags, 'RaisingAccessControllerUpdated') + assert.equal( + '0x0000000000000000000000000000000000000000', + await flags.raisingAccessController(), + ) + }) + + it('succeeds for the owner', async () => { + await flags.connect(personas.Nelly).raiseFlag(consumer.address) + assert.equal(true, await flags.getFlag(consumer.address)) + }) + + it('reverts for non-owner', async () => { + await expect(flags.connect(personas.Neil).raiseFlag(consumer.address)) + .to.be.reverted + }) + }) + }) + + describe('#raiseFlags', () => { + describe('when called by the owner', () => { + it('updates the warning flag', async () => { + assert.equal(false, await flags.getFlag(consumer.address)) + + await flags.connect(personas.Nelly).raiseFlags([consumer.address]) + + assert.equal(true, await flags.getFlag(consumer.address)) + }) + + it('emits an event log', async () => { + await expect( + flags.connect(personas.Nelly).raiseFlags([consumer.address]), + ) + .to.emit(flags, 'FlagRaised') + .withArgs(consumer.address) + }) + + describe('if a flag has already been raised', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).raiseFlags([consumer.address]) + }) + + it('emits an event log', async () => { + const tx = await flags + .connect(personas.Nelly) + .raiseFlags([consumer.address]) + const receipt = await tx.wait() + assert.equal(0, receipt.events?.length) + }) + }) + }) + + describe('when called by an enabled setter', () => { + beforeEach(async () => { + await controller + .connect(personas.Nelly) + .addAccess(await personas.Neil.getAddress()) + }) + + it('sets the flags', async () => { + await flags.connect(personas.Neil).raiseFlags([consumer.address]), + assert.equal(true, await flags.getFlag(consumer.address)) + }) + }) + + describe('when called by a non-enabled setter', () => { + it('reverts', async () => { + await expect( + flags.connect(personas.Neil).raiseFlags([consumer.address]), + ).to.be.revertedWith('Not allowed to raise flags') + }) + }) + + describe('when called when there is no raisingAccessController', () => { + beforeEach(async () => { + await expect( + flags + .connect(personas.Nelly) + .setRaisingAccessController( + '0x0000000000000000000000000000000000000000', + ), + ).to.emit(flags, 'RaisingAccessControllerUpdated') + + assert.equal( + '0x0000000000000000000000000000000000000000', + await flags.raisingAccessController(), + ) + }) + + it('succeeds for the owner', async () => { + await flags.connect(personas.Nelly).raiseFlags([consumer.address]) + assert.equal(true, await flags.getFlag(consumer.address)) + }) + + it('reverts for non-owners', async () => { + await expect( + flags.connect(personas.Neil).raiseFlags([consumer.address]), + ).to.be.reverted + }) + }) + }) + + describe('#lowerFlags', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).raiseFlags([consumer.address]) + }) + + describe('when called by the owner', () => { + it('updates the warning flag', async () => { + assert.equal(true, await flags.getFlag(consumer.address)) + + await flags.connect(personas.Nelly).lowerFlags([consumer.address]) + + assert.equal(false, await flags.getFlag(consumer.address)) + }) + + it('emits an event log', async () => { + await expect( + flags.connect(personas.Nelly).lowerFlags([consumer.address]), + ) + .to.emit(flags, 'FlagLowered') + .withArgs(consumer.address) + }) + + describe('if a flag has already been raised', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).lowerFlags([consumer.address]) + }) + + it('emits an event log', async () => { + const tx = await flags + .connect(personas.Nelly) + .lowerFlags([consumer.address]) + const receipt = await tx.wait() + assert.equal(0, receipt.events?.length) + }) + }) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + flags.connect(personas.Neil).lowerFlags([consumer.address]), + ).to.be.revertedWith('Only callable by owner') + }) + }) + }) + + describe('#getFlag', () => { + describe('if the access control is turned on', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).enableAccessCheck() + }) + + it('reverts', async () => { + await expect(consumer.getFlag(consumer.address)).to.be.revertedWith( + 'No access', + ) + }) + + describe('if access is granted to the address', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).addAccess(consumer.address) + }) + + it('does not revert', async () => { + await consumer.getFlag(consumer.address) + }) + }) + }) + + describe('if the access control is turned off', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).disableAccessCheck() + }) + + it('does not revert', async () => { + await consumer.getFlag(consumer.address) + }) + + describe('if access is granted to the address', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).addAccess(consumer.address) + }) + + it('does not revert', async () => { + await consumer.getFlag(consumer.address) + }) + }) + }) + }) + + describe('#getFlags', () => { + beforeEach(async () => { + await flags.connect(personas.Nelly).disableAccessCheck() + await flags + .connect(personas.Nelly) + .raiseFlags([ + await personas.Neil.getAddress(), + await personas.Norbert.getAddress(), + ]) + }) + + it('respects the access controls of #getFlag', async () => { + await flags.connect(personas.Nelly).enableAccessCheck() + + await expect(consumer.getFlag(consumer.address)).to.be.revertedWith( + 'No access', + ) + + await flags.connect(personas.Nelly).addAccess(consumer.address) + + await consumer.getFlag(consumer.address) + }) + + it('returns the flags in the order they are requested', async () => { + const response = await consumer.getFlags([ + await personas.Nelly.getAddress(), + await personas.Neil.getAddress(), + await personas.Ned.getAddress(), + await personas.Norbert.getAddress(), + ]) + + assert.deepEqual([false, true, false, true], response) + }) + }) + + describe('#setRaisingAccessController', () => { + let controller2: Contract + + beforeEach(async () => { + controller2 = await controllerFactory.connect(personas.Nelly).deploy() + await controller2.connect(personas.Nelly).enableAccessCheck() + }) + + it('updates access control rules', async () => { + const neilAddress = await personas.Neil.getAddress() + await controller.connect(personas.Nelly).addAccess(neilAddress) + await flags.connect(personas.Neil).raiseFlags([consumer.address]) // doesn't raise + + await flags + .connect(personas.Nelly) + .setRaisingAccessController(controller2.address) + + await expect( + flags.connect(personas.Neil).raiseFlags([consumer.address]), + ).to.be.revertedWith('Not allowed to raise flags') + }) + + it('emits a log announcing the change', async () => { + await expect( + flags + .connect(personas.Nelly) + .setRaisingAccessController(controller2.address), + ) + .to.emit(flags, 'RaisingAccessControllerUpdated') + .withArgs(controller.address, controller2.address) + }) + + it('does not emit a log when there is no change', async () => { + await flags + .connect(personas.Nelly) + .setRaisingAccessController(controller2.address) + + await expect( + flags + .connect(personas.Nelly) + .setRaisingAccessController(controller2.address), + ).to.not.emit(flags, 'RaisingAccessControllerUpdated') + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + flags + .connect(personas.Neil) + .setRaisingAccessController(controller2.address), + ).to.be.revertedWith('Only callable by owner') + }) + }) + }) +}) diff --git a/contracts/test/v0.8/HeartbeatRequester.test.ts b/contracts/test/v0.8/HeartbeatRequester.test.ts new file mode 100644 index 00000000..31425e29 --- /dev/null +++ b/contracts/test/v0.8/HeartbeatRequester.test.ts @@ -0,0 +1,142 @@ +import { getUsers, Personas } from '../test-helpers/setup' +import { ethers } from 'hardhat' +import { Signer } from 'ethers' +import { + HeartbeatRequester, + MockAggregatorProxy, + MockOffchainAggregator, +} from '../../typechain' +import { HeartbeatRequester__factory as HeartbeatRequesterFactory } from '../../typechain/factories/HeartbeatRequester__factory' +import { MockAggregatorProxy__factory as MockAggregatorProxyFactory } from '../../typechain/factories/MockAggregatorProxy__factory' +import { MockOffchainAggregator__factory as MockOffchainAggregatorFactory } from '../../typechain/factories/MockOffchainAggregator__factory' +import { assert, expect } from 'chai' + +let personas: Personas +let owner: Signer +let caller1: Signer +let proxy1: Signer +let proxy2: Signer +let aggregator: MockOffchainAggregator +let aggregatorFactory: MockOffchainAggregatorFactory +let aggregatorProxy: MockAggregatorProxy +let aggregatorProxyFactory: MockAggregatorProxyFactory +let requester: HeartbeatRequester +let requesterFactory: HeartbeatRequesterFactory + +describe('HeartbeatRequester', () => { + beforeEach(async () => { + personas = (await getUsers()).personas + owner = personas.Default + caller1 = personas.Carol + proxy1 = personas.Nelly + proxy2 = personas.Eddy + + // deploy heartbeat requester + requesterFactory = await ethers.getContractFactory('HeartbeatRequester') + requester = await requesterFactory.connect(owner).deploy() + await requester.deployed() + }) + + describe('#permitHeartbeat', () => { + it('adds a heartbeat and emits an event', async () => { + const callerAddress = await caller1.getAddress() + const proxyAddress1 = await proxy1.getAddress() + const proxyAddress2 = await proxy2.getAddress() + const tx1 = await requester + .connect(owner) + .permitHeartbeat(callerAddress, proxyAddress1) + await expect(tx1) + .to.emit(requester, 'HeartbeatPermitted') + .withArgs(callerAddress, proxyAddress1, ethers.constants.AddressZero) + + const tx2 = await requester + .connect(owner) + .permitHeartbeat(callerAddress, proxyAddress2) + await expect(tx2) + .to.emit(requester, 'HeartbeatPermitted') + .withArgs(callerAddress, proxyAddress2, proxyAddress1) + }) + + it('reverts when not called by its owner', async () => { + const callerAddress = await caller1.getAddress() + const proxyAddress = await proxy1.getAddress() + await expect( + requester.connect(caller1).permitHeartbeat(callerAddress, proxyAddress), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('#removeHeartbeat', () => { + it('removes a heartbeat and emits an event', async () => { + const callerAddress = await caller1.getAddress() + const proxyAddress = await proxy1.getAddress() + const tx1 = await requester + .connect(owner) + .permitHeartbeat(callerAddress, proxyAddress) + await expect(tx1) + .to.emit(requester, 'HeartbeatPermitted') + .withArgs(callerAddress, proxyAddress, ethers.constants.AddressZero) + + const tx2 = await requester.connect(owner).removeHeartbeat(callerAddress) + await expect(tx2) + .to.emit(requester, 'HeartbeatRemoved') + .withArgs(callerAddress, proxyAddress) + }) + + it('reverts when not called by its owner', async () => { + await expect( + requester.connect(caller1).removeHeartbeat(await caller1.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('#getAggregatorAndRequestHeartbeat', () => { + it('reverts if caller and proxy combination is not allowed', async () => { + const callerAddress = await caller1.getAddress() + const proxyAddress = await proxy1.getAddress() + await requester + .connect(owner) + .permitHeartbeat(callerAddress, proxyAddress) + + await expect( + requester + .connect(caller1) + .getAggregatorAndRequestHeartbeat(await owner.getAddress()), + ).to.be.revertedWith('HeartbeatNotPermitted()') + }) + + it('calls corresponding aggregator to request a new round', async () => { + aggregatorFactory = await ethers.getContractFactory( + 'MockOffchainAggregator', + ) + aggregator = await aggregatorFactory.connect(owner).deploy() + await aggregator.deployed() + + aggregatorProxyFactory = await ethers.getContractFactory( + 'MockAggregatorProxy', + ) + aggregatorProxy = await aggregatorProxyFactory + .connect(owner) + .deploy(aggregator.address) + await aggregatorProxy.deployed() + + await requester + .connect(owner) + .permitHeartbeat(await caller1.getAddress(), aggregatorProxy.address) + + const tx1 = await requester + .connect(caller1) + .getAggregatorAndRequestHeartbeat(aggregatorProxy.address) + + await expect(tx1).to.emit(aggregator, 'RoundIdUpdated').withArgs(1) + assert.equal((await aggregator.roundId()).toNumber(), 1) + + const tx2 = await requester + .connect(caller1) + .getAggregatorAndRequestHeartbeat(aggregatorProxy.address) + + await expect(tx2).to.emit(aggregator, 'RoundIdUpdated').withArgs(2) + assert.equal((await aggregator.roundId()).toNumber(), 2) + }) + }) +}) diff --git a/contracts/test/v0.8/KeeperRegistrar.test.ts b/contracts/test/v0.8/KeeperRegistrar.test.ts new file mode 100644 index 00000000..b41ae27a --- /dev/null +++ b/contracts/test/v0.8/KeeperRegistrar.test.ts @@ -0,0 +1,808 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../test-helpers/matchers' +import { getUsers, Personas } from '../test-helpers/setup' +import { BigNumber, Signer } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../typechain/factories/LinkToken__factory' + +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../typechain/factories/UpkeepMock__factory' +import { KeeperRegistry1_2 as KeeperRegistry } from '../../typechain/KeeperRegistry1_2' +import { KeeperRegistry1_2__factory as KeeperRegistryFactory } from '../../typechain/factories/KeeperRegistry1_2__factory' +import { KeeperRegistrar } from '../../typechain/KeeperRegistrar' +import { KeeperRegistrar__factory as KeeperRegistrarFactory } from '../../typechain/factories/KeeperRegistrar__factory' + +import { MockV3Aggregator } from '../../typechain/MockV3Aggregator' +import { LinkToken } from '../../typechain/LinkToken' +import { UpkeepMock } from '../../typechain/UpkeepMock' +import { toWei } from '../test-helpers/helpers' + +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let keeperRegistrar: KeeperRegistrarFactory +let upkeepMockFactory: UpkeepMockFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + // @ts-ignore bug in autogen file + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry1_2') + keeperRegistrar = await ethers.getContractFactory('KeeperRegistrar') + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') +}) + +const errorMsgs = { + onlyOwner: 'revert Only callable by owner', + onlyAdmin: 'OnlyAdminOrOwner()', + hashPayload: 'HashMismatch()', + requestNotFound: 'RequestNotFound()', +} + +describe('KeeperRegistrar', () => { + const upkeepName = 'SampleUpkeep' + + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const executeGas = BigNumber.from(100000) + const source = BigNumber.from(100) + const paymentPremiumPPB = BigNumber.from(250000000) + const flatFeeMicroLink = BigNumber.from(0) + const maxAllowedAutoApprove = 5 + + const blockCountPerTurn = BigNumber.from(3) + const emptyBytes = '0x00' + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from('1000000000000000000') + const amount = BigNumber.from('5000000000000000000') + const amount1 = BigNumber.from('6000000000000000000') + const transcoder = ethers.constants.AddressZero + + // Enum values are not auto exported in ABI so have to manually declare + const autoApproveType_DISABLED = 0 + const autoApproveType_ENABLED_SENDER_ALLOWLIST = 1 + const autoApproveType_ENABLED_ALL = 2 + + let owner: Signer + let admin: Signer + let someAddress: Signer + let registrarOwner: Signer + let stranger: Signer + let requestSender: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let mock: UpkeepMock + let registrar: KeeperRegistrar + + beforeEach(async () => { + owner = personas.Default + admin = personas.Neil + someAddress = personas.Ned + registrarOwner = personas.Nelly + stranger = personas.Nancy + requestSender = personas.Norbert + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrar: ethers.constants.AddressZero, + } + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + registry = await keeperRegistryFactory + .connect(owner) + .deploy( + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + config, + ) + + mock = await upkeepMockFactory.deploy() + + registrar = await keeperRegistrar + .connect(registrarOwner) + .deploy( + linkToken.address, + autoApproveType_DISABLED, + BigNumber.from('0'), + registry.address, + minUpkeepSpend, + ) + + await linkToken + .connect(owner) + .transfer(await requestSender.getAddress(), toWei('1000')) + + config.registrar = registrar.address + await registry.setConfig(config) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registrar.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistrar 1.1.0') + }) + }) + + describe('#register', () => { + it('reverts if not called by the PLI token', async () => { + await evmRevert( + registrar + .connect(someAddress) + .register( + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ), + 'OnlyLink()', + ) + }) + + it('reverts if the amount passed in data mismatches actual amount sent', async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount1, + source, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'AmountMismatch()', + ) + }) + + it('reverts if the sender passed in data mismatches actual sender', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await admin.getAddress(), // Should have been requestSender.getAddress() + ], + ) + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'SenderMismatch()', + ) + }) + + it('reverts if the admin address is 0x0000...', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + '0x0000000000000000000000000000000000000000', + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'RegistrationRequestFailed()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + //get upkeep count before attempting registration + const beforeCount = (await registry.getState()).state.numUpkeeps + + //set auto approve OFF, threshold limits dont matter in this case + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + + it('Auto Approve ON - Throttle max approvals - does not register an upkeep on KeeperRegistry beyond the max limit, emits only RegistrationRequested event after limit is hit', async () => { + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 0) + + //set auto approve on, with max 1 allowed + await registrar.connect(registrarOwner).setRegistrationConfig( + autoApproveType_ENABLED_ALL, + 1, // maxAllowedAutoApprove + registry.address, + minUpkeepSpend, + ) + + //register within threshold, new upkeep should be registered + let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + //try registering another one, new upkeep should not be registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 1, // make unique hash + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // Still 1 + + // Now set new max limit to 2. One more upkeep should get auto approved + await registrar.connect(registrarOwner).setRegistrationConfig( + autoApproveType_ENABLED_ALL, + 2, // maxAllowedAutoApprove + registry.address, + minUpkeepSpend, + ) + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 2, // make unique hash + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // 1 -> 2 + + // One more upkeep should not get registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 3, // make unique hash + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // Still 2 + }) + + it('Auto Approve Sender Allowlist - sender in allowlist - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + // Add sender to allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve Sender Allowlist - sender NOT in allowlist - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + const beforeCount = (await registry.getState()).state.numUpkeeps + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + // Explicitly remove sender from allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + + //register. auto approve shouldn't happen + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + }) + + describe('#setAutoApproveAllowedSender', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setAutoApproveAllowedSender(await admin.getAddress(), false) + await evmRevert(tx, 'Only callable by owner') + }) + + it('sets the allowed status correctly and emits log', async () => { + const senderAddress = await stranger.getAddress() + let tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, true) + + let senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isTrue(senderAllowedStatus) + + tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, false) + + senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isFalse(senderAllowedStatus) + }) + }) + + describe('#approve', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + }) + + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, 'Only callable by owner') + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('reverts if any member of the payload changes', async () => { + let tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + ethers.Wallet.createRandom().address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + 10000, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + ethers.Wallet.createRandom().address, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + '0x1234', + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + }) + + it('approves an existing registration request', async () => { + const tx = await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('deletes the request afterwards / reverts if the request DNE', async () => { + await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) + + describe('#cancel', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + amount, + source, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + // submit duplicate request (increase balance) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + }) + + it('reverts if not called by the admin / owner', async () => { + const tx = registrar.connect(stranger).cancel(hash) + await evmRevert(tx, errorMsgs.onlyAdmin) + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .cancel( + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('refunds the total request balance to the admin address', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(admin).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('deletes the request hash', async () => { + await registrar.connect(registrarOwner).cancel(hash) + let tx = registrar.connect(registrarOwner).cancel(hash) + await evmRevert(tx, errorMsgs.requestNotFound) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) +}) diff --git a/contracts/test/v0.8/PermissionedForwardProxy.test.ts b/contracts/test/v0.8/PermissionedForwardProxy.test.ts new file mode 100644 index 00000000..ef9129d7 --- /dev/null +++ b/contracts/test/v0.8/PermissionedForwardProxy.test.ts @@ -0,0 +1,176 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { expect, assert } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { Personas, getUsers } from '../test-helpers/setup' + +const PERMISSION_NOT_SET = 'PermissionNotSet' + +let personas: Personas + +let controllerFactory: ContractFactory +let counterFactory: ContractFactory +let controller: Contract +let counter: Contract + +before(async () => { + personas = (await getUsers()).personas + controllerFactory = await ethers.getContractFactory( + 'src/v0.8/PermissionedForwardProxy.sol:PermissionedForwardProxy', + personas.Carol, + ) + counterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Counter.sol:Counter', + personas.Carol, + ) +}) + +describe('PermissionedForwardProxy', () => { + beforeEach(async () => { + controller = await controllerFactory.connect(personas.Carol).deploy() + counter = await counterFactory.connect(personas.Carol).deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(controller, [ + 'forward', + 'setPermission', + 'removePermission', + 'getPermission', + // Owned + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('#setPermission', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + it('adds the permission to the proxy', async () => { + const tx = await controller + .connect(personas.Carol) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 1) + assert.equal(eventLog?.[0].event, 'PermissionSet') + assert.equal(eventLog?.[0].args?.[0], await personas.Carol.getAddress()) + assert.equal(eventLog?.[0].args?.[1], await personas.Eddy.getAddress()) + + expect( + await controller.getPermission(await personas.Carol.getAddress()), + ).to.be.equal(await personas.Eddy.getAddress()) + }) + }) + }) + + describe('#removePermission', () => { + beforeEach(async () => { + // Add permission before testing + await controller + .connect(personas.Carol) + .setPermission( + await personas.Carol.getAddress(), + await personas.Eddy.getAddress(), + ) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .removePermission(await personas.Carol.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + it('removes the permission to the proxy', async () => { + const tx = await controller + .connect(personas.Carol) + .removePermission(await personas.Carol.getAddress()) + + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 1) + assert.equal(eventLog?.[0].event, 'PermissionRemoved') + assert.equal(eventLog?.[0].args?.[0], await personas.Carol.getAddress()) + + expect( + await controller.getPermission(await personas.Carol.getAddress()), + ).to.be.equal(ethers.constants.AddressZero) + }) + }) + }) + + describe('#forward', () => { + describe('when permission does not exist', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Carol) + .forward(await personas.Eddy.getAddress(), '0x'), + ).to.be.revertedWith(PERMISSION_NOT_SET) + }) + }) + + describe('when permission exists', () => { + beforeEach(async () => { + // Add permission before testing + await controller + .connect(personas.Carol) + .setPermission(await personas.Carol.getAddress(), counter.address) + }) + + it('calls target successfully', async () => { + await controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('increment'), + ) + + expect(await counter.count()).to.be.equal(1) + }) + + it('reverts when target reverts and bubbles up error', async () => { + await expect( + controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('alwaysRevertWithString'), + ), + ).to.be.revertedWith('always revert') // Revert strings should be bubbled up + + await expect( + controller + .connect(personas.Carol) + .forward( + counter.address, + counter.interface.encodeFunctionData('alwaysRevert'), + ), + ).to.be.reverted // Javascript VM not able to parse custom errors defined on another contract + }) + }) + }) +}) diff --git a/contracts/test/v0.8/Plugin.test.ts b/contracts/test/v0.8/Plugin.test.ts new file mode 100644 index 00000000..7176a4fb --- /dev/null +++ b/contracts/test/v0.8/Plugin.test.ts @@ -0,0 +1,182 @@ +import { ethers } from 'hardhat' +import { publicAbi, decodeDietCBOR, hexToBuf } from '../test-helpers/helpers' +import { assert } from 'chai' +import { Contract, ContractFactory, providers, Signer } from 'ethers' +import { Roles, getUsers } from '../test-helpers/setup' +import { makeDebug } from '../test-helpers/debug' + +const debug = makeDebug('PluginTestHelper') +let concretePluginFactory: ContractFactory + +let roles: Roles + +before(async () => { + roles = (await getUsers()).roles + concretePluginFactory = await ethers.getContractFactory( + 'src/v0.8/tests/PluginTestHelper.sol:PluginTestHelper', + roles.defaultAccount, + ) +}) + +describe('PluginTestHelper', () => { + let ccl: Contract + let defaultAccount: Signer + + beforeEach(async () => { + defaultAccount = roles.defaultAccount + ccl = await concretePluginFactory.connect(defaultAccount).deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(ccl, [ + 'add', + 'addBytes', + 'addInt', + 'addStringArray', + 'addUint', + 'closeEvent', + 'setBuffer', + ]) + }) + + async function parseCCLEvent(tx: providers.TransactionResponse) { + const receipt = await tx.wait() + const data = receipt.logs?.[0].data + const d = debug.extend('parseCCLEvent') + d('data %s', data) + return ethers.utils.defaultAbiCoder.decode(['bytes'], data ?? '') + } + + describe('#close', () => { + it('handles empty payloads', async () => { + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, {}) + }) + }) + + describe('#setBuffer', () => { + it('emits the buffer', async () => { + await ccl.setBuffer('0xA161616162') + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, { a: 'b' }) + }) + }) + + describe('#add', () => { + it('stores and logs keys and values', async () => { + await ccl.add('first', 'word!!') + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, { first: 'word!!' }) + }) + + it('handles two entries', async () => { + await ccl.add('first', 'uno') + await ccl.add('second', 'dos') + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + + assert.deepEqual(decoded, { + first: 'uno', + second: 'dos', + }) + }) + }) + + describe('#addBytes', () => { + it('stores and logs keys and values', async () => { + await ccl.addBytes('first', '0xaabbccddeeff') + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + const expected = hexToBuf('0xaabbccddeeff') + assert.deepEqual(decoded, { first: expected }) + }) + + it('handles two entries', async () => { + await ccl.addBytes('first', '0x756E6F') + await ccl.addBytes('second', '0x646F73') + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + + const expectedFirst = hexToBuf('0x756E6F') + const expectedSecond = hexToBuf('0x646F73') + assert.deepEqual(decoded, { + first: expectedFirst, + second: expectedSecond, + }) + }) + + it('handles strings', async () => { + await ccl.addBytes('first', ethers.utils.toUtf8Bytes('apple')) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + const expected = ethers.utils.toUtf8Bytes('apple') + assert.deepEqual(decoded, { first: expected }) + }) + }) + + describe('#addInt', () => { + it('stores and logs keys and values', async () => { + await ccl.addInt('first', 1) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, { first: 1 }) + }) + + it('handles two entries', async () => { + await ccl.addInt('first', 1) + await ccl.addInt('second', 2) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + + assert.deepEqual(decoded, { + first: 1, + second: 2, + }) + }) + }) + + describe('#addUint', () => { + it('stores and logs keys and values', async () => { + await ccl.addUint('first', 1) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, { first: 1 }) + }) + + it('handles two entries', async () => { + await ccl.addUint('first', 1) + await ccl.addUint('second', 2) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + + assert.deepEqual(decoded, { + first: 1, + second: 2, + }) + }) + }) + + describe('#addStringArray', () => { + it('stores and logs keys and values', async () => { + await ccl.addStringArray('word', ['seinfeld', '"4"', 'LIFE']) + const tx = await ccl.closeEvent() + const [payload] = await parseCCLEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual(decoded, { word: ['seinfeld', '"4"', 'LIFE'] }) + }) + }) +}) diff --git a/contracts/test/v0.8/PluginClient.test.ts b/contracts/test/v0.8/PluginClient.test.ts new file mode 100644 index 00000000..79dbb357 --- /dev/null +++ b/contracts/test/v0.8/PluginClient.test.ts @@ -0,0 +1,452 @@ +import { ethers } from 'hardhat' +import { assert } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { Roles, getUsers } from '../test-helpers/setup' +import { + convertFufillParams, + decodeCCRequest, + decodeRunRequest, + RunRequest, +} from '../test-helpers/oracle' +import { decodeDietCBOR } from '../test-helpers/helpers' +import { evmRevert } from '../test-helpers/matchers' + +let concretePluginClientFactory: ContractFactory +let emptyOracleFactory: ContractFactory +let getterSetterFactory: ContractFactory +let operatorFactory: ContractFactory +let linkTokenFactory: ContractFactory + +let roles: Roles + +before(async () => { + roles = (await getUsers()).roles + + concretePluginClientFactory = await ethers.getContractFactory( + 'src/v0.8/tests/PluginClientTestHelper.sol:PluginClientTestHelper', + roles.defaultAccount, + ) + emptyOracleFactory = await ethers.getContractFactory( + 'src/v0.6/tests/EmptyOracle.sol:EmptyOracle', + roles.defaultAccount, + ) + getterSetterFactory = await ethers.getContractFactory( + 'src/v0.5/tests/GetterSetter.sol:GetterSetter', + roles.defaultAccount, + ) + operatorFactory = await ethers.getContractFactory( + 'src/v0.7/Operator.sol:Operator', + roles.defaultAccount, + ) + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + roles.defaultAccount, + ) +}) + +describe('PluginClientTestHelper', () => { + const specId = + '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000' + let cc: Contract + let gs: Contract + let oc: Contract + let newoc: Contract + let link: Contract + + beforeEach(async () => { + link = await linkTokenFactory.connect(roles.defaultAccount).deploy() + oc = await operatorFactory + .connect(roles.defaultAccount) + .deploy(link.address, await roles.defaultAccount.getAddress()) + newoc = await operatorFactory + .connect(roles.defaultAccount) + .deploy(link.address, await roles.defaultAccount.getAddress()) + gs = await getterSetterFactory.connect(roles.defaultAccount).deploy() + cc = await concretePluginClientFactory + .connect(roles.defaultAccount) + .deploy(link.address, oc.address) + }) + + describe('#newRequest', () => { + it('forwards the information to the oracle contract through the link token', async () => { + const tx = await cc.publicNewRequest( + specId, + gs.address, + ethers.utils.toUtf8Bytes('requestedBytes32(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + + assert.equal(1, receipt.logs?.length) + const [jId, cbAddr, cbFId, cborData] = receipt.logs + ? decodeCCRequest(receipt.logs[0]) + : [] + const params = decodeDietCBOR(cborData ?? '') + + assert.equal(specId, jId) + assert.equal(gs.address, cbAddr) + assert.equal('0xed53e511', cbFId) + assert.deepEqual({}, params) + }) + }) + + describe('#pluginRequest(Request)', () => { + it('emits an event from the contract showing the run ID', async () => { + const tx = await cc.publicRequest( + specId, + cc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + + const { events, logs } = await tx.wait() + + assert.equal(4, events?.length) + + assert.equal(logs?.[0].address, cc.address) + assert.equal(events?.[0].event, 'PluginRequested') + }) + }) + + describe('#pluginRequestTo(Request)', () => { + it('emits an event from the contract showing the run ID', async () => { + const tx = await cc.publicRequestRunTo( + newoc.address, + specId, + cc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { events } = await tx.wait() + + assert.equal(4, events?.length) + assert.equal(events?.[0].event, 'PluginRequested') + }) + + it('emits an event on the target oracle contract', async () => { + const tx = await cc.publicRequestRunTo( + newoc.address, + specId, + cc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { logs } = await tx.wait() + const event = logs && newoc.interface.parseLog(logs[3]) + + assert.equal(4, logs?.length) + assert.equal(event?.name, 'OracleRequest') + }) + + it('does not modify the stored oracle address', async () => { + await cc.publicRequestRunTo( + newoc.address, + specId, + cc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + + const actualOracleAddress = await cc.publicOracleAddress() + assert.equal(oc.address, actualOracleAddress) + }) + }) + + describe('#requestOracleData', () => { + it('emits an event from the contract showing the run ID', async () => { + const tx = await cc.publicRequestOracleData( + specId, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + + const { events, logs } = await tx.wait() + + assert.equal(4, events?.length) + + assert.equal(logs?.[0].address, cc.address) + assert.equal(events?.[0].event, 'PluginRequested') + }) + }) + + describe('#requestOracleDataFrom', () => { + it('emits an event from the contract showing the run ID', async () => { + const tx = await cc.publicRequestOracleDataFrom( + newoc.address, + specId, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { events } = await tx.wait() + + assert.equal(4, events?.length) + assert.equal(events?.[0].event, 'PluginRequested') + }) + + it('emits an event on the target oracle contract', async () => { + const tx = await cc.publicRequestOracleDataFrom( + newoc.address, + specId, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { logs } = await tx.wait() + const event = logs && newoc.interface.parseLog(logs[3]) + + assert.equal(4, logs?.length) + assert.equal(event?.name, 'OracleRequest') + }) + + it('does not modify the stored oracle address', async () => { + await cc.publicRequestOracleDataFrom( + newoc.address, + specId, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + + const actualOracleAddress = await cc.publicOracleAddress() + assert.equal(oc.address, actualOracleAddress) + }) + }) + + describe('#cancelPluginRequest', () => { + let requestId: string + // a concrete plugin attached to an empty oracle + let ecc: Contract + + beforeEach(async () => { + const emptyOracle = await emptyOracleFactory + .connect(roles.defaultAccount) + .deploy() + ecc = await concretePluginClientFactory + .connect(roles.defaultAccount) + .deploy(link.address, emptyOracle.address) + + const tx = await ecc.publicRequest( + specId, + ecc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { events } = await tx.wait() + requestId = (events?.[0]?.args as any).id + }) + + it('emits an event from the contract showing the run was cancelled', async () => { + const tx = await ecc.publicCancelRequest( + requestId, + 0, + ethers.utils.hexZeroPad('0x', 4), + 0, + ) + const { events } = await tx.wait() + + assert.equal(1, events?.length) + assert.equal(events?.[0].event, 'PluginCancelled') + assert.equal(requestId, (events?.[0].args as any).id) + }) + + it('throws if given a bogus event ID', async () => { + await evmRevert( + ecc.publicCancelRequest( + ethers.utils.formatBytes32String('bogusId'), + 0, + ethers.utils.hexZeroPad('0x', 4), + 0, + ), + ) + }) + }) + + describe('#recordPluginFulfillment(modifier)', () => { + let request: RunRequest + + beforeEach(async () => { + await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + const tx = await cc.publicRequest( + specId, + cc.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const { logs } = await tx.wait() + + request = decodeRunRequest(logs?.[3]) + }) + + it('emits an event marking the request fulfilled', async () => { + const tx = await oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ) + const { logs } = await tx.wait() + + const event = logs && cc.interface.parseLog(logs[1]) + + assert.equal(2, logs?.length) + assert.equal(event?.name, 'PluginFulfilled') + assert.equal(request.requestId, event?.args.id) + }) + + it('should only allow one fulfillment per id', async () => { + await oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ) + + await evmRevert( + oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ), + 'Must have a valid requestId', + ) + }) + + it('should only allow the oracle to fulfill the request', async () => { + await evmRevert( + oc + .connect(roles.stranger) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ), + 'Not authorized sender', + ) + }) + }) + + describe('#fulfillPluginRequest(function)', () => { + let request: RunRequest + + beforeEach(async () => { + await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + const tx = await cc.publicRequest( + specId, + cc.address, + ethers.utils.toUtf8Bytes( + 'publicFulfillPluginRequest(bytes32,bytes32)', + ), + 0, + ) + const { logs } = await tx.wait() + + request = decodeRunRequest(logs?.[3]) + }) + + it('emits an event marking the request fulfilled', async () => { + await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + const tx = await oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ) + + const { logs } = await tx.wait() + const event = logs && cc.interface.parseLog(logs[1]) + + assert.equal(2, logs?.length) + assert.equal(event?.name, 'PluginFulfilled') + assert.equal(request.requestId, event?.args?.id) + }) + + it('should only allow one fulfillment per id', async () => { + await oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ) + + await evmRevert( + oc + .connect(roles.defaultAccount) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ), + 'Must have a valid requestId', + ) + }) + + it('should only allow the oracle to fulfill the request', async () => { + await evmRevert( + oc + .connect(roles.stranger) + .fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ), + 'Not authorized sender', + ) + }) + }) + + describe('#pluginToken', () => { + it('returns the Link Token address', async () => { + const addr = await cc.publicPluginToken() + assert.equal(addr, link.address) + }) + }) + + describe('#addExternalRequest', () => { + let mock: Contract + let request: RunRequest + + beforeEach(async () => { + mock = await concretePluginClientFactory + .connect(roles.defaultAccount) + .deploy(link.address, oc.address) + + const tx = await cc.publicRequest( + specId, + mock.address, + ethers.utils.toUtf8Bytes('fulfillRequest(bytes32,bytes32)'), + 0, + ) + const receipt = await tx.wait() + + request = decodeRunRequest(receipt.logs?.[3]) + await mock.publicAddExternalRequest(oc.address, request.requestId) + }) + + it('allows the external request to be fulfilled', async () => { + await oc.setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + await oc.fulfillOracleRequest( + ...convertFufillParams( + request, + ethers.utils.formatBytes32String('hi mom!'), + ), + ) + }) + + it('does not allow the same requestId to be used', async () => { + await evmRevert( + cc.publicAddExternalRequest(newoc.address, request.requestId), + ) + }) + }) +}) diff --git a/contracts/test/v0.8/SimpleReadAccessController.test.ts b/contracts/test/v0.8/SimpleReadAccessController.test.ts new file mode 100644 index 00000000..32e2e743 --- /dev/null +++ b/contracts/test/v0.8/SimpleReadAccessController.test.ts @@ -0,0 +1,250 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory, Transaction } from 'ethers' +import { Personas, getUsers } from '../test-helpers/setup' + +let personas: Personas + +let controllerFactory: ContractFactory +let controller: Contract + +before(async () => { + personas = (await getUsers()).personas + controllerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/SimpleReadAccessController.sol:SimpleReadAccessController', + personas.Carol, + ) +}) + +describe('SimpleReadAccessController', () => { + beforeEach(async () => { + controller = await controllerFactory.connect(personas.Carol).deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(controller, [ + 'hasAccess', + 'addAccess', + 'disableAccessCheck', + 'enableAccessCheck', + 'removeAccess', + 'checkEnabled', + // Owned + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('#constructor', () => { + it('defaults checkEnabled to true', async () => { + assert(await controller.checkEnabled()) + }) + }) + + describe('#hasAccess', () => { + it('allows unauthorized calls originating from the same account', async () => { + assert.isTrue( + await controller + .connect(personas.Eddy) + .hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('blocks unauthorized calls originating from different accounts', async () => { + assert.isFalse( + await controller + .connect(personas.Carol) + .hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + assert.isFalse( + await controller + .connect(personas.Eddy) + .hasAccess(await personas.Carol.getAddress(), '0x00'), + ) + }) + }) + + describe('#addAccess', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .addAccess(await personas.Eddy.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + assert.isFalse( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + tx = await controller.addAccess(await personas.Eddy.getAddress()) + }) + + it('adds the address to the controller', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + await expect(tx) + .to.emit(controller, 'AddedAccess') + .withArgs(await personas.Eddy.getAddress()) + }) + + describe('when called twice', () => { + it('does not emit a log', async () => { + const tx2 = await controller.addAccess( + await personas.Eddy.getAddress(), + ) + const receipt = await tx2.wait() + assert.equal(receipt.events?.length, 0) + }) + }) + }) + }) + + describe('#removeAccess', () => { + beforeEach(async () => { + await controller.addAccess(await personas.Eddy.getAddress()) + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .removeAccess(await personas.Eddy.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + tx = await controller.removeAccess(await personas.Eddy.getAddress()) + }) + + it('removes the address from the controller', async () => { + assert.isFalse( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + await expect(tx) + .to.emit(controller, 'RemovedAccess') + .withArgs(await personas.Eddy.getAddress()) + }) + + describe('when called twice', () => { + it('does not emit a log', async () => { + const tx2 = await controller.removeAccess( + await personas.Eddy.getAddress(), + ) + const receipt = await tx2.wait() + assert.equal(receipt.events?.length, 0) + }) + }) + }) + }) + + describe('#disableAccessCheck', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller.connect(personas.Eddy).disableAccessCheck(), + ).to.be.revertedWith('Only callable by owner') + assert.isTrue(await controller.checkEnabled()) + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + await controller.addAccess(await personas.Eddy.getAddress()) + tx = await controller.disableAccessCheck() + }) + + it('sets checkEnabled to false', async () => { + assert.isFalse(await controller.checkEnabled()) + }) + + it('allows users with access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('allows users without access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Ned.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + await expect(tx).to.emit(controller, 'CheckAccessDisabled') + }) + + describe('when called twice', () => { + it('does not emit a log', async () => { + const tx2 = await controller.disableAccessCheck() + const receipt = await tx2.wait() + assert.equal(receipt.events?.length, 0) + }) + }) + }) + }) + + describe('#enableAccessCheck', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller.connect(personas.Eddy).enableAccessCheck(), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + await controller.disableAccessCheck() + await controller.addAccess(await personas.Eddy.getAddress()) + tx = await controller.enableAccessCheck() + }) + + it('allows users with access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('does not allow users without access', async () => { + assert.isFalse( + await controller.hasAccess(await personas.Ned.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + expect(tx).to.emit(controller, 'CheckAccessEnabled') + }) + + describe('when called twice', () => { + it('does not emit a log', async () => { + const tx2 = await controller.enableAccessCheck() + const receipt = await tx2.wait() + assert.equal(receipt.events?.length, 0) + }) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/SimpleWriteAccessController.test.ts b/contracts/test/v0.8/SimpleWriteAccessController.test.ts new file mode 100644 index 00000000..f4d7f219 --- /dev/null +++ b/contracts/test/v0.8/SimpleWriteAccessController.test.ts @@ -0,0 +1,214 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory, Transaction } from 'ethers' +import { Personas, getUsers } from '../test-helpers/setup' + +let personas: Personas + +let controllerFactory: ContractFactory +let controller: Contract + +before(async () => { + personas = (await getUsers()).personas + controllerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/SimpleWriteAccessController.sol:SimpleWriteAccessController', + personas.Carol, + ) +}) + +describe('SimpleWriteAccessController', () => { + beforeEach(async () => { + controller = await controllerFactory.connect(personas.Carol).deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(controller, [ + 'hasAccess', + 'addAccess', + 'disableAccessCheck', + 'enableAccessCheck', + 'removeAccess', + 'checkEnabled', + // Owned + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('#constructor', () => { + it('defaults checkEnabled to true', async () => { + assert(await controller.checkEnabled()) + }) + }) + + describe('#hasAccess', () => { + it('allows unauthorized calls originating from the same account', async () => { + assert.isFalse( + await controller + .connect(personas.Eddy) + .hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('blocks unauthorized calls originating from different accounts', async () => { + assert.isFalse( + await controller + .connect(personas.Carol) + .hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + assert.isFalse( + await controller + .connect(personas.Eddy) + .hasAccess(await personas.Carol.getAddress(), '0x00'), + ) + }) + }) + + describe('#addAccess', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .addAccess(await personas.Eddy.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + assert.isFalse( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + tx = await controller.addAccess(await personas.Eddy.getAddress()) + }) + + it('adds the address to the controller', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + expect(tx) + .to.emit(controller, 'AddedAccess') + .withArgs(await personas.Eddy.getAddress()) + }) + }) + }) + + describe('#removeAccess', () => { + beforeEach(async () => { + await controller.addAccess(await personas.Eddy.getAddress()) + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller + .connect(personas.Eddy) + .removeAccess(await personas.Eddy.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + tx = await controller.removeAccess(await personas.Eddy.getAddress()) + }) + + it('removes the address from the controller', async () => { + assert.isFalse( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + expect(tx) + .to.emit(controller, 'RemovedAccess') + .withArgs(await personas.Eddy.getAddress()) + }) + }) + }) + + describe('#disableAccessCheck', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller.connect(personas.Eddy).disableAccessCheck(), + ).to.be.revertedWith('Only callable by owner') + assert.isTrue(await controller.checkEnabled()) + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + await controller.addAccess(await personas.Eddy.getAddress()) + tx = await controller.disableAccessCheck() + }) + + it('sets checkEnabled to false', async () => { + assert.isFalse(await controller.checkEnabled()) + }) + + it('allows users with access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('allows users without access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Ned.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + await expect(tx).to.emit(controller, 'CheckAccessDisabled') + }) + }) + }) + + describe('#enableAccessCheck', () => { + describe('when called by a non-owner', () => { + it('reverts', async () => { + await expect( + controller.connect(personas.Eddy).enableAccessCheck(), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('when called by the owner', () => { + let tx: Transaction + beforeEach(async () => { + await controller.disableAccessCheck() + await controller.addAccess(await personas.Eddy.getAddress()) + tx = await controller.enableAccessCheck() + }) + + it('allows users with access', async () => { + assert.isTrue( + await controller.hasAccess(await personas.Eddy.getAddress(), '0x00'), + ) + }) + + it('does not allow users without access', async () => { + assert.isFalse( + await controller.hasAccess(await personas.Ned.getAddress(), '0x00'), + ) + }) + + it('announces the change via a log', async () => { + await expect(tx).to.emit(controller, 'CheckAccessEnabled') + }) + }) + }) +}) diff --git a/contracts/test/v0.8/VRFD20.test.ts b/contracts/test/v0.8/VRFD20.test.ts new file mode 100644 index 00000000..88466160 --- /dev/null +++ b/contracts/test/v0.8/VRFD20.test.ts @@ -0,0 +1,303 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { + BigNumber, + constants, + Contract, + ContractFactory, + ContractTransaction, +} from 'ethers' +import { getUsers, Personas, Roles } from '../test-helpers/setup' +import { + evmWordToAddress, + getLog, + publicAbi, + toBytes32String, + toWei, + numToBytes32, + getLogs, +} from '../test-helpers/helpers' + +let roles: Roles +let personas: Personas +let linkTokenFactory: ContractFactory +let vrfCoordinatorMockFactory: ContractFactory +let vrfD20Factory: ContractFactory + +before(async () => { + const users = await getUsers() + + roles = users.roles + personas = users.personas + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + roles.defaultAccount, + ) + vrfCoordinatorMockFactory = await ethers.getContractFactory( + 'src/v0.8/vrf/mocks/VRFCoordinatorMock.sol:VRFCoordinatorMock', + roles.defaultAccount, + ) + vrfD20Factory = await ethers.getContractFactory( + 'src/v0.6/examples/VRFD20.sol:VRFD20', + roles.defaultAccount, + ) +}) + +describe('VRFD20', () => { + const deposit = toWei('1') + const fee = toWei('0.1') + const keyHash = toBytes32String('keyHash') + + let link: Contract + let vrfCoordinator: Contract + let vrfD20: Contract + + beforeEach(async () => { + link = await linkTokenFactory.connect(roles.defaultAccount).deploy() + vrfCoordinator = await vrfCoordinatorMockFactory + .connect(roles.defaultAccount) + .deploy(link.address) + vrfD20 = await vrfD20Factory + .connect(roles.defaultAccount) + .deploy(vrfCoordinator.address, link.address, keyHash, fee) + await link.transfer(vrfD20.address, deposit) + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(vrfD20, [ + // Owned + 'acceptOwnership', + 'owner', + 'transferOwnership', + //VRFConsumerBase + 'rawFulfillRandomness', + // VRFD20 + 'rollDice', + 'house', + 'withdrawPLI', + 'keyHash', + 'fee', + 'setKeyHash', + 'setFee', + ]) + }) + + describe('#withdrawPLI', () => { + describe('failure', () => { + it('reverts when called by a non-owner', async () => { + await expect( + vrfD20 + .connect(roles.stranger) + .withdrawPLI(await roles.stranger.getAddress(), deposit), + ).to.be.revertedWith('Only callable by owner') + }) + + it('reverts when not enough PLI in the contract', async () => { + const withdrawAmount = deposit.mul(2) + await expect( + vrfD20 + .connect(roles.defaultAccount) + .withdrawPLI( + await roles.defaultAccount.getAddress(), + withdrawAmount, + ), + ).to.be.reverted + }) + }) + + describe('success', () => { + it('withdraws PLI', async () => { + const startingAmount = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + const expectedAmount = BigNumber.from(startingAmount).add(deposit) + await vrfD20 + .connect(roles.defaultAccount) + .withdrawPLI(await roles.defaultAccount.getAddress(), deposit) + const actualAmount = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + assert.equal(actualAmount.toString(), expectedAmount.toString()) + }) + }) + }) + + describe('#setKeyHash', () => { + const newHash = toBytes32String('newhash') + + describe('failure', () => { + it('reverts when called by a non-owner', async () => { + await expect( + vrfD20.connect(roles.stranger).setKeyHash(newHash), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('success', () => { + it('sets the key hash', async () => { + await vrfD20.setKeyHash(newHash) + const actualHash = await vrfD20.keyHash() + assert.equal(actualHash, newHash) + }) + }) + }) + + describe('#setFee', () => { + const newFee = 1234 + + describe('failure', () => { + it('reverts when called by a non-owner', async () => { + await expect( + vrfD20.connect(roles.stranger).setFee(newFee), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('success', () => { + it('sets the fee', async () => { + await vrfD20.setFee(newFee) + const actualFee = await vrfD20.fee() + assert.equal(actualFee.toString(), newFee.toString()) + }) + }) + }) + + describe('#house', () => { + describe('failure', () => { + it('reverts when dice not rolled', async () => { + await expect( + vrfD20.house(await personas.Nancy.getAddress()), + ).to.be.revertedWith('Dice not rolled') + }) + + it('reverts when dice roll is in progress', async () => { + await vrfD20.rollDice(await personas.Nancy.getAddress()) + await expect( + vrfD20.house(await personas.Nancy.getAddress()), + ).to.be.revertedWith('Roll in progress') + }) + }) + + describe('success', () => { + it('returns the correct house', async () => { + const randomness = 98765 + const expectedHouse = 'Martell' + const tx = await vrfD20.rollDice(await personas.Nancy.getAddress()) + const log = await getLog(tx, 3) + const eventRequestId = log?.topics?.[1] + await vrfCoordinator.callBackWithRandomness( + eventRequestId, + randomness, + vrfD20.address, + ) + const response = await vrfD20.house(await personas.Nancy.getAddress()) + assert.equal(response.toString(), expectedHouse) + }) + }) + }) + + describe('#rollDice', () => { + describe('success', () => { + let tx: ContractTransaction + beforeEach(async () => { + tx = await vrfD20.rollDice(await personas.Nancy.getAddress()) + }) + + it('emits a RandomnessRequest event from the VRFCoordinator', async () => { + const log = await getLog(tx, 2) + const topics = log?.topics + assert.equal(evmWordToAddress(topics?.[1]), vrfD20.address) + assert.equal(topics?.[2], keyHash) + assert.equal(topics?.[3], constants.HashZero) + }) + }) + + describe('failure', () => { + it('reverts when PLI balance is zero', async () => { + const vrfD202 = await vrfD20Factory + .connect(roles.defaultAccount) + .deploy(vrfCoordinator.address, link.address, keyHash, fee) + await expect( + vrfD202.rollDice(await personas.Nancy.getAddress()), + ).to.be.revertedWith('Not enough PLI to pay fee') + }) + + it('reverts when called by a non-owner', async () => { + await expect( + vrfD20 + .connect(roles.stranger) + .rollDice(await personas.Nancy.getAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + + it('reverts when the roller rolls more than once', async () => { + await vrfD20.rollDice(await personas.Nancy.getAddress()) + await expect( + vrfD20.rollDice(await personas.Nancy.getAddress()), + ).to.be.revertedWith('Already rolled') + }) + }) + }) + + describe('#fulfillRandomness', () => { + const randomness = 98765 + const expectedModResult = (randomness % 20) + 1 + const expectedHouse = 'Martell' + let eventRequestId: string + beforeEach(async () => { + const tx = await vrfD20.rollDice(await personas.Nancy.getAddress()) + const log = await getLog(tx, 3) + eventRequestId = log?.topics?.[1] + }) + + describe('success', () => { + let tx: ContractTransaction + beforeEach(async () => { + tx = await vrfCoordinator.callBackWithRandomness( + eventRequestId, + randomness, + vrfD20.address, + ) + }) + + it('emits a DiceLanded event', async () => { + const log = await getLog(tx, 0) + assert.equal(log?.topics[1], eventRequestId) + assert.equal(log?.topics[2], numToBytes32(expectedModResult)) + }) + + it('sets the correct dice roll result', async () => { + const response = await vrfD20.house(await personas.Nancy.getAddress()) + assert.equal(response.toString(), expectedHouse) + }) + + it('allows someone else to roll', async () => { + const secondRandomness = 55555 + tx = await vrfD20.rollDice(await personas.Ned.getAddress()) + const log = await getLog(tx, 3) + eventRequestId = log?.topics?.[1] + tx = await vrfCoordinator.callBackWithRandomness( + eventRequestId, + secondRandomness, + vrfD20.address, + ) + }) + }) + + describe('failure', () => { + it('does not fulfill when fulfilled by the wrong VRFcoordinator', async () => { + const vrfCoordinator2 = await vrfCoordinatorMockFactory + .connect(roles.defaultAccount) + .deploy(link.address) + + const tx = await vrfCoordinator2.callBackWithRandomness( + eventRequestId, + randomness, + vrfD20.address, + ) + const logs = await getLogs(tx) + assert.equal(logs.length, 0) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/VRFSubscriptionBalanceMonitor.test.ts b/contracts/test/v0.8/VRFSubscriptionBalanceMonitor.test.ts new file mode 100644 index 00000000..6f910bc0 --- /dev/null +++ b/contracts/test/v0.8/VRFSubscriptionBalanceMonitor.test.ts @@ -0,0 +1,627 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { + LinkToken, + VRFSubscriptionBalanceMonitorExposed, +} from '../../typechain' +import * as h from '../test-helpers/helpers' +import { BigNumber, Contract } from 'ethers' + +const OWNABLE_ERR = 'Only callable by owner' +const INVALID_WATCHLIST_ERR = `InvalidWatchList()` +const PAUSED_ERR = 'Pausable: paused' +const ONLY_KEEPER_ERR = `OnlyKeeperRegistry()` + +const zeroPLI = ethers.utils.parseEther('0') +const onePLI = ethers.utils.parseEther('1') +const twoPLI = ethers.utils.parseEther('2') +const threePLI = ethers.utils.parseEther('3') +const fivePLI = ethers.utils.parseEther('5') +const sixPLI = ethers.utils.parseEther('6') +const tenPLI = ethers.utils.parseEther('10') +const oneHundredPLI = ethers.utils.parseEther('100') + +let lt: LinkToken +let coordinator: Contract +let bm: VRFSubscriptionBalanceMonitorExposed +let owner: SignerWithAddress +let stranger: SignerWithAddress +let keeperRegistry: SignerWithAddress + +const sub1 = BigNumber.from(1) +const sub2 = BigNumber.from(2) +const sub3 = BigNumber.from(3) +const sub4 = BigNumber.from(4) +const sub5 = BigNumber.from(5) +const sub6 = BigNumber.from(6) + +const toNums = (bigNums: BigNumber[]) => bigNums.map((n) => n.toNumber()) + +async function assertWatchlistBalances( + balance1: BigNumber, + balance2: BigNumber, + balance3: BigNumber, + balance4: BigNumber, + balance5: BigNumber, + balance6: BigNumber, +) { + await h.assertSubscriptionBalance(coordinator, sub1, balance1, 'sub 1') + await h.assertSubscriptionBalance(coordinator, sub2, balance2, 'sub 2') + await h.assertSubscriptionBalance(coordinator, sub3, balance3, 'sub 3') + await h.assertSubscriptionBalance(coordinator, sub4, balance4, 'sub 4') + await h.assertSubscriptionBalance(coordinator, sub5, balance5, 'sub 5') + await h.assertSubscriptionBalance(coordinator, sub6, balance6, 'sub 6') +} + +describe('VRFSubscriptionBalanceMonitor', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + keeperRegistry = accounts[2] + + const bmFactory = await ethers.getContractFactory( + 'VRFSubscriptionBalanceMonitorExposed', + owner, + ) + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + owner, + ) + + const coordinatorFactory = await ethers.getContractFactory( + 'src/v0.8/vrf/VRFCoordinatorV2.sol:VRFCoordinatorV2', + owner, + ) + + lt = await ltFactory.deploy() + coordinator = await coordinatorFactory.deploy( + lt.address, + lt.address, + lt.address, + ) // we don't use BHS or LinkEthFeed + bm = await bmFactory.deploy( + lt.address, + coordinator.address, + keeperRegistry.address, + 0, + ) + + for (let i = 0; i <= 5; i++) { + await coordinator.connect(owner).createSubscription() + } + + // Transfer PLI to stranger. + await lt.transfer(stranger.address, oneHundredPLI) + + // Fund sub 5. + await lt + .connect(owner) + .transferAndCall( + coordinator.address, + oneHundredPLI, + ethers.utils.defaultAbiCoder.encode(['uint256'], ['5']), + ) + + // Fun sub 6. + await lt + .connect(owner) + .transferAndCall( + coordinator.address, + oneHundredPLI, + ethers.utils.defaultAbiCoder.encode(['uint256'], ['6']), + ) + + await Promise.all([bm.deployed(), coordinator.deployed(), lt.deployed()]) + }) + + afterEach(async () => { + await h.reset() + }) + + describe('add funds', () => { + it('Should allow anyone to add funds', async () => { + await lt.transfer(bm.address, onePLI) + await lt.connect(stranger).transfer(bm.address, onePLI) + }) + }) + + describe('withdraw()', () => { + beforeEach(async () => { + const tx = await lt.connect(owner).transfer(bm.address, onePLI) + await tx.wait() + }) + + it('Should allow the owner to withdraw', async () => { + const beforeBalance = await lt.balanceOf(owner.address) + const tx = await bm.connect(owner).withdraw(onePLI, owner.address) + await tx.wait() + const afterBalance = await lt.balanceOf(owner.address) + assert.isTrue( + afterBalance.gt(beforeBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should emit an event', async () => { + const tx = await bm.connect(owner).withdraw(onePLI, owner.address) + await expect(tx) + .to.emit(bm, 'FundsWithdrawn') + .withArgs(onePLI, owner.address) + }) + + it('Should allow the owner to withdraw to anyone', async () => { + const beforeBalance = await lt.balanceOf(stranger.address) + const tx = await bm.connect(owner).withdraw(onePLI, stranger.address) + await tx.wait() + const afterBalance = await lt.balanceOf(stranger.address) + assert.isTrue( + beforeBalance.add(onePLI).eq(afterBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should not allow strangers to withdraw', async () => { + const tx = bm.connect(stranger).withdraw(onePLI, owner.address) + await expect(tx).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('pause() / unpause()', () => { + it('Should allow owner to pause / unpause', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const unpauseTx = await bm.connect(owner).unpause() + await unpauseTx.wait() + }) + + it('Should not allow strangers to pause / unpause', async () => { + const pauseTxStranger = bm.connect(stranger).pause() + await expect(pauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + const pauseTxOwner = await bm.connect(owner).pause() + await pauseTxOwner.wait() + const unpauseTxStranger = bm.connect(stranger).unpause() + await expect(unpauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('setWatchList() / getWatchList() / getAccountInfo()', () => { + it('Should allow owner to set the watchlist', async () => { + // should start unactive + assert.isFalse((await bm.getSubscriptionInfo(sub1)).isActive) + // add first watchlist + let setTx = await bm + .connect(owner) + .setWatchList([sub1], [onePLI], [twoPLI]) + await setTx.wait() + let watchList = await bm.getWatchList() + assert.deepEqual(toNums(watchList), toNums([sub1])) + const subInfo = await bm.getSubscriptionInfo(1) + assert.isTrue(subInfo.isActive) + expect(subInfo.minBalanceJuels).to.equal(onePLI) + expect(subInfo.topUpAmountJuels).to.equal(twoPLI) + // add more to watchlist + setTx = await bm + .connect(owner) + .setWatchList( + [1, 2, 3], + [onePLI, twoPLI, threePLI], + [twoPLI, threePLI, fivePLI], + ) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(toNums(watchList), toNums([sub1, sub2, sub3])) + let subInfo1 = await bm.getSubscriptionInfo(sub1) + let subInfo2 = await bm.getSubscriptionInfo(sub2) + let subInfo3 = await bm.getSubscriptionInfo(sub3) + expect(subInfo1.isActive).to.be.true + expect(subInfo1.minBalanceJuels).to.equal(onePLI) + expect(subInfo1.topUpAmountJuels).to.equal(twoPLI) + expect(subInfo2.isActive).to.be.true + expect(subInfo2.minBalanceJuels).to.equal(twoPLI) + expect(subInfo2.topUpAmountJuels).to.equal(threePLI) + expect(subInfo3.isActive).to.be.true + expect(subInfo3.minBalanceJuels).to.equal(threePLI) + expect(subInfo3.topUpAmountJuels).to.equal(fivePLI) + // remove some from watchlist + setTx = await bm + .connect(owner) + .setWatchList([sub3, sub1], [threePLI, onePLI], [fivePLI, twoPLI]) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(toNums(watchList), toNums([sub3, sub1])) + subInfo1 = await bm.getSubscriptionInfo(sub1) + subInfo2 = await bm.getSubscriptionInfo(sub2) + subInfo3 = await bm.getSubscriptionInfo(sub3) + expect(subInfo1.isActive).to.be.true + expect(subInfo2.isActive).to.be.false + expect(subInfo3.isActive).to.be.true + }) + + it('Should not allow duplicates in the watchlist', async () => { + const errMsg = `DuplicateSubcriptionId(${sub1})` + const setTx = bm + .connect(owner) + .setWatchList( + [sub1, sub2, sub1], + [onePLI, twoPLI, threePLI], + [twoPLI, threePLI, fivePLI], + ) + await expect(setTx).to.be.revertedWith(errMsg) + }) + + it('Should not allow a topUpAmountJuels les than or equal to minBalance in the watchlist', async () => { + const setTx = bm + .connect(owner) + .setWatchList( + [sub1, sub2, sub1], + [onePLI, twoPLI, threePLI], + [zeroPLI, twoPLI, threePLI], + ) + await expect(setTx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should not allow strangers to set the watchlist', async () => { + const setTxStranger = bm + .connect(stranger) + .setWatchList([sub1], [onePLI], [twoPLI]) + await expect(setTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should revert if the list lengths differ', async () => { + let tx = bm.connect(owner).setWatchList([sub1], [], [twoPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([sub1], [onePLI], []) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([], [onePLI], [twoPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the subIDs are zero', async () => { + let tx = bm + .connect(owner) + .setWatchList([sub1, 0], [onePLI, onePLI], [twoPLI, twoPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the top up amounts are 0', async () => { + const tx = bm + .connect(owner) + .setWatchList([sub1, sub2], [onePLI, onePLI], [twoPLI, zeroPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + }) + + describe('getKeeperRegistryAddress() / setKeeperRegistryAddress()', () => { + const newAddress = ethers.Wallet.createRandom().address + + it('Should initialize with the registry address provided to the constructor', async () => { + const address = await bm.s_keeperRegistryAddress() + assert.equal(address, keeperRegistry.address) + }) + + it('Should allow the owner to set the registry address', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await setTx.wait() + const address = await bm.s_keeperRegistryAddress() + assert.equal(address, newAddress) + }) + + it('Should not allow strangers to set the registry address', async () => { + const setTx = bm.connect(stranger).setKeeperRegistryAddress(newAddress) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await expect(setTx) + .to.emit(bm, 'KeeperRegistryAddressUpdated') + .withArgs(keeperRegistry.address, newAddress) + }) + }) + + describe('getMinWaitPeriodSeconds / setMinWaitPeriodSeconds()', () => { + const newWaitPeriod = BigNumber.from(1) + + it('Should initialize with the wait period provided to the constructor', async () => { + const minWaitPeriod = await bm.s_minWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(0) + }) + + it('Should allow owner to set the wait period', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await setTx.wait() + const minWaitPeriod = await bm.s_minWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(newWaitPeriod) + }) + + it('Should not allow strangers to set the wait period', async () => { + const setTx = bm.connect(stranger).setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx) + .to.emit(bm, 'MinWaitPeriodUpdated') + .withArgs(0, newWaitPeriod) + }) + }) + + describe('checkUpkeep() / getUnderfundedSubscriptions()', () => { + beforeEach(async () => { + const setTx = await bm.connect(owner).setWatchList( + [ + sub1, // needs funds + sub5, // funded + sub2, // needs funds + sub6, // funded + sub3, // needs funds + ], + new Array(5).fill(onePLI), + new Array(5).fill(twoPLI), + ) + await setTx.wait() + }) + + it('Should return list of subscriptions that are underfunded', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + sixPLI, // needs 6 total + ) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + let [subs] = ethers.utils.defaultAbiCoder.decode(['uint64[]'], payload) + assert.deepEqual(toNums(subs), toNums([sub1, sub2, sub3])) + // checkUpkeep payload should match getUnderfundedSubscriptions() + subs = await bm.getUnderfundedSubscriptions() + assert.deepEqual(toNums(subs), toNums([sub1, sub2, sub3])) + }) + + it('Should return some results even if contract cannot fund all eligible targets', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + fivePLI, // needs 6 total + ) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [subs] = ethers.utils.defaultAbiCoder.decode(['uint64[]'], payload) + assert.deepEqual(toNums(subs), toNums([sub1, sub2])) + }) + + it('Should omit subscriptions that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + const fundTx = await lt.connect(owner).transfer(bm.address, sixPLI) + await Promise.all([setWaitPdTx.wait(), fundTx.wait()]) + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + sub2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [subs] = ethers.utils.defaultAbiCoder.decode(['uint64[]'], payload) + assert.deepEqual(toNums(subs), toNums([sub1, sub3])) + }) + + it('Should revert when paused', async () => { + const tx = await bm.connect(owner).pause() + await tx.wait() + const ethCall = bm.checkUpkeep('0x') + await expect(ethCall).to.be.revertedWith(PAUSED_ERR) + }) + }) + + describe('performUpkeep()', () => { + let validPayload: string + let invalidPayload: string + + beforeEach(async () => { + validPayload = ethers.utils.defaultAbiCoder.encode( + ['uint64[]'], + [[sub1, sub2, sub3]], + ) + invalidPayload = ethers.utils.defaultAbiCoder.encode( + ['uint64[]'], + [[sub1, sub2, sub4, sub5]], + ) + const setTx = await bm.connect(owner).setWatchList( + [ + sub1, // needs funds + sub5, // funded + sub2, // needs funds + sub6, // funded + sub3, // needs funds + // sub4 - omitted + ], + new Array(5).fill(onePLI), + new Array(5).fill(twoPLI), + ) + await setTx.wait() + }) + + it('Should revert when paused', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const performTx = bm.connect(keeperRegistry).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(PAUSED_ERR) + }) + + context('when partially funded', () => { + it('Should fund as many subscriptions as possible', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + fivePLI, // only enough PLI to fund 2 subscriptions + ) + await fundTx.wait() + console.log((await lt.balanceOf(bm.address)).toString()) + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + + await assertWatchlistBalances( + twoPLI, + twoPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + await expect(performTx).to.emit(bm, 'TopUpSucceeded').withArgs(sub1) + await expect(performTx).to.emit(bm, 'TopUpSucceeded').withArgs(sub1) + }) + }) + + context('when fully funded', () => { + beforeEach(async () => { + const fundTx = await lt.connect(owner).transfer(bm.address, tenPLI) + await fundTx.wait() + }) + + it('Should fund the appropriate subscriptions', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + twoPLI, + twoPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should only fund active, underfunded subscriptions', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(invalidPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + twoPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should not fund subscriptions that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + await setWaitPdTx.wait() + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + sub2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + zeroPLI, + twoPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should only be callable by the keeper registry contract', async () => { + let performTx = bm.connect(owner).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + performTx = bm.connect(stranger).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + }) + + it('Should protect against running out of gas', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 130_000 }) // too little for all 3 transfers + await performTx.wait() + const balance1 = (await coordinator.getSubscription(sub1)).balance + const balance2 = (await coordinator.getSubscription(sub2)).balance + const balance3 = (await coordinator.getSubscription(sub3)).balance + const balances = [balance1, balance2, balance3].map((n) => n.toString()) + expect(balances) + .to.include(twoPLI.toString()) // expect at least 1 transfer + .to.include(zeroPLI.toString()) // expect at least 1 out of funds + }) + }) + }) + + describe('topUp()', () => { + context('when not paused', () => { + it('Should be callable by anyone', async () => { + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + await bm.connect(user).topUp([]) + } + }) + }) + context('when paused', () => { + it('Should be callable by no one', async () => { + await bm.connect(owner).pause() + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + const tx = bm.connect(user).topUp([]) + await expect(tx).to.be.revertedWith(PAUSED_ERR) + } + }) + }) + }) +}) diff --git a/contracts/test/v0.8/ValidatorProxy.test.ts b/contracts/test/v0.8/ValidatorProxy.test.ts new file mode 100644 index 00000000..2d274245 --- /dev/null +++ b/contracts/test/v0.8/ValidatorProxy.test.ts @@ -0,0 +1,403 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Signer, Contract, constants } from 'ethers' +import { Users, getUsers } from '../test-helpers/setup' + +let users: Users + +let owner: Signer +let ownerAddress: string +let aggregator: Signer +let aggregatorAddress: string +let validator: Signer +let validatorAddress: string +let validatorProxy: Contract + +before(async () => { + users = await getUsers() + owner = users.personas.Default + aggregator = users.contracts.contract1 + validator = users.contracts.contract2 + ownerAddress = await owner.getAddress() + aggregatorAddress = await aggregator.getAddress() + validatorAddress = await validator.getAddress() +}) + +describe('ValidatorProxy', () => { + beforeEach(async () => { + const vpf = await ethers.getContractFactory( + 'src/v0.8/ValidatorProxy.sol:ValidatorProxy', + owner, + ) + validatorProxy = await vpf.deploy(aggregatorAddress, validatorAddress) + validatorProxy = await validatorProxy.deployed() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(validatorProxy, [ + // ConfirmedOwner functions + 'acceptOwnership', + 'owner', + 'transferOwnership', + // ValidatorProxy functions + 'validate', + 'proposeNewAggregator', + 'upgradeAggregator', + 'getAggregators', + 'proposeNewValidator', + 'upgradeValidator', + 'getValidators', + 'typeAndVersion', + ]) + }) + + describe('#constructor', () => { + it('should set the aggregator addresses correctly', async () => { + const response = await validatorProxy.getAggregators() + assert.equal(response.current, aggregatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + + it('should set the validator addresses conrrectly', async () => { + const response = await validatorProxy.getValidators() + assert.equal(response.current, validatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + + it('should set the owner correctly', async () => { + const response = await validatorProxy.owner() + assert.equal(response, ownerAddress) + }) + }) + + describe('#proposeNewAggregator', () => { + let newAggregator: Signer + let newAggregatorAddress: string + beforeEach(async () => { + newAggregator = users.contracts.contract3 + newAggregatorAddress = await newAggregator.getAddress() + }) + + describe('failure', () => { + it('should only be called by the owner', async () => { + const stranger = users.contracts.contract4 + await expect( + validatorProxy + .connect(stranger) + .proposeNewAggregator(newAggregatorAddress), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should revert if no change in proposal', async () => { + await validatorProxy.proposeNewAggregator(newAggregatorAddress) + await expect( + validatorProxy.proposeNewAggregator(newAggregatorAddress), + ).to.be.revertedWith('Invalid proposal') + }) + + it('should revert if the proposal is the same as the current', async () => { + await expect( + validatorProxy.proposeNewAggregator(aggregatorAddress), + ).to.be.revertedWith('Invalid proposal') + }) + }) + + describe('success', () => { + it('should emit an event', async () => { + await expect(validatorProxy.proposeNewAggregator(newAggregatorAddress)) + .to.emit(validatorProxy, 'AggregatorProposed') + .withArgs(newAggregatorAddress) + }) + + it('should set the correct address and hasProposal is true', async () => { + await validatorProxy.proposeNewAggregator(newAggregatorAddress) + const response = await validatorProxy.getAggregators() + assert.equal(response.current, aggregatorAddress) + assert.equal(response.hasProposal, true) + assert.equal(response.proposed, newAggregatorAddress) + }) + + it('should set a zero address and hasProposal is false', async () => { + await validatorProxy.proposeNewAggregator(newAggregatorAddress) + await validatorProxy.proposeNewAggregator(constants.AddressZero) + const response = await validatorProxy.getAggregators() + assert.equal(response.current, aggregatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + }) + }) + + describe('#upgradeAggregator', () => { + describe('failure', () => { + it('should only be called by the owner', async () => { + const stranger = users.contracts.contract4 + await expect( + validatorProxy.connect(stranger).upgradeAggregator(), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should revert if there is no proposal', async () => { + await expect(validatorProxy.upgradeAggregator()).to.be.revertedWith( + 'No proposal', + ) + }) + }) + + describe('success', () => { + let newAggregator: Signer + let newAggregatorAddress: string + beforeEach(async () => { + newAggregator = users.contracts.contract3 + newAggregatorAddress = await newAggregator.getAddress() + await validatorProxy.proposeNewAggregator(newAggregatorAddress) + }) + + it('should emit an event', async () => { + await expect(validatorProxy.upgradeAggregator()) + .to.emit(validatorProxy, 'AggregatorUpgraded') + .withArgs(aggregatorAddress, newAggregatorAddress) + }) + + it('should upgrade the addresses', async () => { + await validatorProxy.upgradeAggregator() + const response = await validatorProxy.getAggregators() + assert.equal(response.current, newAggregatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + }) + }) + + describe('#proposeNewValidator', () => { + let newValidator: Signer + let newValidatorAddress: string + + beforeEach(async () => { + newValidator = users.contracts.contract3 + newValidatorAddress = await newValidator.getAddress() + }) + + describe('failure', () => { + it('should only be called by the owner', async () => { + const stranger = users.contracts.contract4 + await expect( + validatorProxy + .connect(stranger) + .proposeNewAggregator(newValidatorAddress), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should revert if no change in proposal', async () => { + await validatorProxy.proposeNewValidator(newValidatorAddress) + await expect( + validatorProxy.proposeNewValidator(newValidatorAddress), + ).to.be.revertedWith('Invalid proposal') + }) + + it('should revert if the proposal is the same as the current', async () => { + await expect( + validatorProxy.proposeNewValidator(validatorAddress), + ).to.be.revertedWith('Invalid proposal') + }) + }) + + describe('success', () => { + it('should emit an event', async () => { + await expect(validatorProxy.proposeNewValidator(newValidatorAddress)) + .to.emit(validatorProxy, 'ValidatorProposed') + .withArgs(newValidatorAddress) + }) + + it('should set the correct address and hasProposal is true', async () => { + await validatorProxy.proposeNewValidator(newValidatorAddress) + const response = await validatorProxy.getValidators() + assert.equal(response.current, validatorAddress) + assert.equal(response.hasProposal, true) + assert.equal(response.proposed, newValidatorAddress) + }) + + it('should set a zero address and hasProposal is false', async () => { + await validatorProxy.proposeNewValidator(newValidatorAddress) + await validatorProxy.proposeNewValidator(constants.AddressZero) + const response = await validatorProxy.getValidators() + assert.equal(response.current, validatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + }) + }) + + describe('#upgradeValidator', () => { + describe('failure', () => { + it('should only be called by the owner', async () => { + const stranger = users.contracts.contract4 + await expect( + validatorProxy.connect(stranger).upgradeValidator(), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should revert if there is no proposal', async () => { + await expect(validatorProxy.upgradeValidator()).to.be.revertedWith( + 'No proposal', + ) + }) + }) + + describe('success', () => { + let newValidator: Signer + let newValidatorAddress: string + beforeEach(async () => { + newValidator = users.contracts.contract3 + newValidatorAddress = await newValidator.getAddress() + await validatorProxy.proposeNewValidator(newValidatorAddress) + }) + + it('should emit an event', async () => { + await expect(validatorProxy.upgradeValidator()) + .to.emit(validatorProxy, 'ValidatorUpgraded') + .withArgs(validatorAddress, newValidatorAddress) + }) + + it('should upgrade the addresses', async () => { + await validatorProxy.upgradeValidator() + const response = await validatorProxy.getValidators() + assert.equal(response.current, newValidatorAddress) + assert.equal(response.hasProposal, false) + assert.equal(response.proposed, constants.AddressZero) + }) + }) + }) + + describe('#validate', () => { + describe('failure', () => { + it('reverts when not called by aggregator or proposed aggregator', async () => { + const stranger = users.contracts.contract5 + await expect( + validatorProxy.connect(stranger).validate(99, 88, 77, 66), + ).to.be.revertedWith('Not a configured aggregator') + }) + + it('reverts when there is no validator set', async () => { + const vpf = await ethers.getContractFactory( + 'src/v0.8/ValidatorProxy.sol:ValidatorProxy', + owner, + ) + validatorProxy = await vpf.deploy( + aggregatorAddress, + constants.AddressZero, + ) + await validatorProxy.deployed() + await expect( + validatorProxy.connect(aggregator).validate(99, 88, 77, 66), + ).to.be.revertedWith('No validator set') + }) + }) + + describe('success', () => { + describe('from the aggregator', () => { + let mockValidator1: Contract + beforeEach(async () => { + const mvf = await ethers.getContractFactory( + 'src/v0.8/mocks/MockAggregatorValidator.sol:MockAggregatorValidator', + owner, + ) + mockValidator1 = await mvf.deploy(1) + mockValidator1 = await mockValidator1.deployed() + const vpf = await ethers.getContractFactory( + 'src/v0.8/ValidatorProxy.sol:ValidatorProxy', + owner, + ) + validatorProxy = await vpf.deploy( + aggregatorAddress, + mockValidator1.address, + ) + validatorProxy = await validatorProxy.deployed() + }) + + describe('for a single validator', () => { + it('calls validate on the validator', async () => { + await expect( + validatorProxy.connect(aggregator).validate(200, 300, 400, 500), + ) + .to.emit(mockValidator1, 'ValidateCalled') + .withArgs(1, 200, 300, 400, 500) + }) + + it('uses a specific amount of gas [ @skip-coverage ]', async () => { + const resp = await validatorProxy + .connect(aggregator) + .validate(200, 300, 400, 500) + const receipt = await resp.wait() + assert.equal(receipt.gasUsed.toString(), '32373') + }) + }) + + describe('for a validator and a proposed validator', () => { + let mockValidator2: Contract + + beforeEach(async () => { + const mvf = await ethers.getContractFactory( + 'src/v0.8/mocks/MockAggregatorValidator.sol:MockAggregatorValidator', + owner, + ) + mockValidator2 = await mvf.deploy(2) + mockValidator2 = await mockValidator2.deployed() + await validatorProxy.proposeNewValidator(mockValidator2.address) + }) + + it('calls validate on the validator', async () => { + await expect( + validatorProxy + .connect(aggregator) + .validate(2000, 3000, 4000, 5000), + ) + .to.emit(mockValidator1, 'ValidateCalled') + .withArgs(1, 2000, 3000, 4000, 5000) + }) + + it('also calls validate on the proposed validator', async () => { + await expect( + validatorProxy + .connect(aggregator) + .validate(2000, 3000, 4000, 5000), + ) + .to.emit(mockValidator2, 'ValidateCalled') + .withArgs(2, 2000, 3000, 4000, 5000) + }) + + it('uses a specific amount of gas [ @skip-coverage ]', async () => { + const resp = await validatorProxy + .connect(aggregator) + .validate(2000, 3000, 4000, 5000) + const receipt = await resp.wait() + assert.equal(receipt.gasUsed.toString(), '40429') + }) + }) + }) + + describe('from the proposed aggregator', () => { + let newAggregator: Signer + let newAggregatorAddress: string + beforeEach(async () => { + newAggregator = users.contracts.contract3 + newAggregatorAddress = await newAggregator.getAddress() + await validatorProxy + .connect(owner) + .proposeNewAggregator(newAggregatorAddress) + }) + + it('emits an event', async () => { + await expect( + validatorProxy.connect(newAggregator).validate(555, 666, 777, 888), + ) + .to.emit(validatorProxy, 'ProposedAggregatorValidateCall') + .withArgs(newAggregatorAddress, 555, 666, 777, 888) + }) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/AutomationGasAnalysis.test.ts b/contracts/test/v0.8/automation/AutomationGasAnalysis.test.ts new file mode 100644 index 00000000..99a9fb9c --- /dev/null +++ b/contracts/test/v0.8/automation/AutomationGasAnalysis.test.ts @@ -0,0 +1,258 @@ +import { ethers } from 'hardhat' +import { BigNumber } from 'ethers' +import { expect, assert } from 'chai' +import { getUsers } from '../../test-helpers/setup' +import { randomAddress, toWei } from '../../test-helpers/helpers' +import { deployRegistry21 } from './helpers' + +// don't run these tests in CI +const describeMaybe = process.env.CI ? describe.skip : describe + +// registry settings +const f = 1 +const linkEth = BigNumber.from(300000000) +const gasWei = BigNumber.from(100) +const minUpkeepSpend = BigNumber.from('1000000000000000000') +const paymentPremiumPPB = BigNumber.from(250000000) +const flatFeeMicroLink = BigNumber.from(0) +const blockCountPerTurn = 20 +const checkGasLimit = BigNumber.from(20000000) +const fallbackGasPrice = BigNumber.from(200) +const fallbackLinkPrice = BigNumber.from(200000000) +const maxCheckDataSize = BigNumber.from(10000) +const maxPerformDataSize = BigNumber.from(10000) +const maxRevertDataSize = BigNumber.from(1000) +const maxPerformGas = BigNumber.from(5000000) +const stalenessSeconds = BigNumber.from(43820) +const gasCeilingMultiplier = BigNumber.from(1) +const signers = [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), +] +const transmitters = [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), +] +const transcoder = ethers.constants.AddressZero + +// registrar settings +const triggerType = 0 // conditional +const autoApproveType = 2 // auto-approve enabled +const autoApproveMaxAllowed = 100 // auto-approve enabled + +// upkeep settings +const name = 'test upkeep' +const encryptedEmail = '0xabcd1234' +const gasLimit = 100_000 +const checkData = '0xdeadbeef' +const amount = toWei('5') +const source = 5 +const triggerConfig = '0x' +const offchainConfig = '0x' + +describeMaybe('Automation Gas Analysis', () => { + it('Compares gas usage amongst registries / registrars', async () => { + assert( + Boolean(process.env.REPORT_GAS), + 'this test must be run with REPORT_GAS=true', + ) + + const personas = (await getUsers()).personas + const owner = personas.Default + const ownerAddress = await owner.getAddress() + + // factories + const getFact = ethers.getContractFactory + const linkTokenFactory = await getFact('LinkToken') + const mockV3AggregatorFactory = await getFact( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + ) + const upkeepMockFactory = await getFact('UpkeepMock') + const registry12Factory = await getFact('KeeperRegistry1_2') + const registrar12Factory = await getFact('KeeperRegistrar') + const registry20Factory = await getFact('KeeperRegistry2_0') + const registryLogic20Factory = await getFact('KeeperRegistryLogic2_0') + const registrar20Factory = await getFact('KeeperRegistrar2_0') + const registrar21Factory = await getFact('AutomationRegistrar2_1') + const forwarderLogicFactory = await getFact('AutomationForwarderLogic') + + // deploy dependancy contracts + const linkToken = await linkTokenFactory.connect(owner).deploy() + const gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + const linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + const upkeep = await upkeepMockFactory.connect(owner).deploy() + + // deploy v1.2 + const registrar12 = await registrar12Factory.connect(owner).deploy( + linkToken.address, + autoApproveType, + autoApproveMaxAllowed, + ethers.constants.AddressZero, // set later + minUpkeepSpend, + ) + const registry12 = await registry12Factory + .connect(owner) + .deploy(linkToken.address, linkEthFeed.address, gasPriceFeed.address, { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrar: registrar12.address, + }) + await registrar12.setRegistrationConfig( + autoApproveType, + autoApproveMaxAllowed, + registry12.address, + minUpkeepSpend, + ) + + // deploy v2.0 + const registryLogic20 = await registryLogic20Factory + .connect(owner) + .deploy(0, linkToken.address, linkEthFeed.address, gasPriceFeed.address) + const registry20 = await registry20Factory + .connect(owner) + .deploy(registryLogic20.address) + const registrar20 = await registrar20Factory + .connect(owner) + .deploy( + linkToken.address, + autoApproveType, + autoApproveMaxAllowed, + registry20.address, + minUpkeepSpend, + ) + const config20 = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrar: registrar20.address, + } + const onchainConfig20 = ethers.utils.defaultAbiCoder.encode( + [ + 'tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds\ + ,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,\ + uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,\ + address registrar)', + ], + [config20], + ) + await registry20 + .connect(owner) + .setConfig(signers, transmitters, f, onchainConfig20, 1, '0x') + + // deploy v2.1 + const forwarderLogic = await forwarderLogicFactory.connect(owner).deploy() + const registry21 = await deployRegistry21( + owner, + 0, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + forwarderLogic.address, + ) + const registrar21 = await registrar21Factory + .connect(owner) + .deploy(linkToken.address, registry21.address, minUpkeepSpend, [ + { + triggerType, + autoApproveType, + autoApproveMaxAllowed, + }, + ]) + const onchainConfig21 = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrars: [registrar21.address], + upkeepPrivilegeManager: randomAddress(), + } + await registry21 + .connect(owner) + .setConfigTypeSafe(signers, transmitters, f, onchainConfig21, 1, '0x') + + // approve PLI + await linkToken.connect(owner).approve(registrar20.address, amount) + await linkToken.connect(owner).approve(registrar21.address, amount) + + const abiEncodedBytes = registrar12.interface.encodeFunctionData( + 'register', + [ + name, + encryptedEmail, + upkeep.address, + gasLimit, + ownerAddress, + checkData, + amount, + source, + ownerAddress, + ], + ) + + let tx = await linkToken + .connect(owner) + .transferAndCall(registrar12.address, amount, abiEncodedBytes) + await expect(tx).to.emit(registry12, 'UpkeepRegistered') + + tx = await registrar20.connect(owner).registerUpkeep({ + name, + encryptedEmail, + upkeepContract: upkeep.address, + gasLimit, + adminAddress: ownerAddress, + checkData, + amount, + offchainConfig, + }) + await expect(tx).to.emit(registry20, 'UpkeepRegistered') + + tx = await registrar21.connect(owner).registerUpkeep({ + name, + encryptedEmail, + upkeepContract: upkeep.address, + gasLimit, + adminAddress: ownerAddress, + triggerType, + checkData, + amount, + triggerConfig, + offchainConfig, + }) + await expect(tx).to.emit(registry21, 'UpkeepRegistered') + }) +}) diff --git a/contracts/test/v0.8/automation/AutomationRegistrar2_1.test.ts b/contracts/test/v0.8/automation/AutomationRegistrar2_1.test.ts new file mode 100644 index 00000000..cf1188b0 --- /dev/null +++ b/contracts/test/v0.8/automation/AutomationRegistrar2_1.test.ts @@ -0,0 +1,1035 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { BigNumber, Signer } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { AutomationRegistrar2_1__factory as AutomationRegistrarFactory } from '../../../typechain/factories/AutomationRegistrar2_1__factory' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { toWei } from '../../test-helpers/helpers' +import { IKeeperRegistryMaster as IKeeperRegistry } from '../../../typechain/IKeeperRegistryMaster' +import { AutomationRegistrar2_1 as Registrar } from '../../../typechain/AutomationRegistrar2_1' +import { deployRegistry21 } from './helpers' + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** REGISTRAR v2.1 IS FROZEN ************************************/ + +// We are leaving the original tests enabled, however as 2.1 is still actively being deployed + +describe('AutomationRegistrar2_1 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal( + ethers.utils.id(AutomationRegistrarFactory.bytecode), + '0x9633058bd81e8479f88baaee9bda533406295c80ccbc43d4509701001bbea6e3', + 'KeeperRegistry bytecode has changed', + ) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +// copied from KeeperRegistryBase2_1.sol +enum Trigger { + CONDITION, + LOG, +} + +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let upkeepMockFactory: UpkeepMockFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') +}) + +const errorMsgs = { + onlyOwner: 'revert Only callable by owner', + onlyAdmin: 'OnlyAdminOrOwner()', + hashPayload: 'HashMismatch()', + requestNotFound: 'RequestNotFound()', +} + +describe('AutomationRegistrar2_1', () => { + const upkeepName = 'SampleUpkeep' + + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const performGas = BigNumber.from(100000) + const paymentPremiumPPB = BigNumber.from(250000000) + const flatFeeMicroLink = BigNumber.from(0) + const maxAllowedAutoApprove = 5 + const trigger = '0xdeadbeef' + const offchainConfig = '0x01234567' + + const emptyBytes = '0x00' + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxCheckDataSize = BigNumber.from(10000) + const maxPerformDataSize = BigNumber.from(10000) + const maxRevertDataSize = BigNumber.from(1000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from('1000000000000000000') + const amount = BigNumber.from('5000000000000000000') + const amount1 = BigNumber.from('6000000000000000000') + const transcoder = ethers.constants.AddressZero + const upkeepManager = ethers.Wallet.createRandom().address + + // Enum values are not auto exported in ABI so have to manually declare + const autoApproveType_DISABLED = 0 + const autoApproveType_ENABLED_SENDER_ALLOWLIST = 1 + const autoApproveType_ENABLED_ALL = 2 + + let owner: Signer + let admin: Signer + let someAddress: Signer + let registrarOwner: Signer + let stranger: Signer + let requestSender: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let mock: UpkeepMock + let registry: IKeeperRegistry + let registrar: Registrar + + beforeEach(async () => { + owner = personas.Default + admin = personas.Neil + someAddress = personas.Ned + registrarOwner = personas.Nelly + stranger = personas.Nancy + requestSender = personas.Norbert + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + + registry = await deployRegistry21( + owner, + 0, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + mock = await upkeepMockFactory.deploy() + + const registrarFactory = await ethers.getContractFactory( + 'AutomationRegistrar2_1', + ) + registrar = await registrarFactory + .connect(registrarOwner) + .deploy(linkToken.address, registry.address, minUpkeepSpend, [ + { + triggerType: Trigger.CONDITION, + autoApproveType: autoApproveType_DISABLED, + autoApproveMaxAllowed: 0, + }, + { + triggerType: Trigger.LOG, + autoApproveType: autoApproveType_DISABLED, + autoApproveMaxAllowed: 0, + }, + ]) + + await linkToken + .connect(owner) + .transfer(await requestSender.getAddress(), toWei('1000')) + + const keepers = [ + await personas.Carol.getAddress(), + await personas.Nancy.getAddress(), + await personas.Ned.getAddress(), + await personas.Neil.getAddress(), + ] + const onchainConfig = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrars: [registrar.address], + upkeepPrivilegeManager: upkeepManager, + } + await registry + .connect(owner) + .setConfigTypeSafe(keepers, keepers, 1, onchainConfig, 1, '0x') + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registrar.typeAndVersion() + assert.equal(typeAndVersion, 'AutomationRegistrar 2.1.0') + }) + }) + + describe('#register', () => { + it('reverts if not called by the PLI token', async () => { + await evmRevert( + registrar + .connect(someAddress) + .register( + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ), + 'OnlyLink()', + ) + }) + + it('reverts if the amount passed in data mismatches actual amount sent', async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount1, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'AmountMismatch()', + ) + }) + + it('reverts if the sender passed in data mismatches actual sender', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await admin.getAddress(), // Should have been requestSender.getAddress() + ], + ) + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'SenderMismatch()', + ) + }) + + it('reverts if the admin address is 0x0000...', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + '0x0000000000000000000000000000000000000000', + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'RegistrationRequestFailed()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + //get upkeep count before attempting registration + const beforeCount = (await registry.getState()).state.numUpkeeps + + //set auto approve OFF, threshold limits dont matter in this case + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + + it('Auto Approve ON - Throttle max approvals - does not register an upkeep on KeeperRegistry beyond the max limit, emits only RegistrationRequested event after limit is hit', async () => { + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 0) + + //set auto approve on, with max 1 allowed + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.CONDITION, autoApproveType_ENABLED_ALL, 1) + + //set auto approve on, with max 1 allowed + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 1) + + // register within threshold, new upkeep should be registered + let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + // try registering another one, new upkeep should not be registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 1, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // Still 1 + + // register a second type of upkeep, different limit + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + Trigger.LOG, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // 1 -> 2 + + // Now set new max limit to 2. One more upkeep should get auto approved + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.CONDITION, autoApproveType_ENABLED_ALL, 2) + + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 2, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 3) // 2 -> 3 + + // One more upkeep should not get registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 3, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 3) // Still 3 + }) + + it('Auto Approve Sender Allowlist - sender in allowlist - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + ) + + // Add sender to allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve Sender Allowlist - sender NOT in allowlist - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + const beforeCount = (await registry.getState()).state.numUpkeeps + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + ) + + // Explicitly remove sender from allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + + //register. auto approve shouldn't happen + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + }) + + describe('#registerUpkeep', () => { + it('reverts with empty message if amount sent is not available in PLI allowance', async () => { + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig: emptyBytes, + amount, + encryptedEmail: emptyBytes, + }), + '', + ) + }) + + it('reverts if the amount passed in data is less than configured minimum', async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + // amt is one order of magnitude less than minUpkeepSpend + const amt = BigNumber.from('100000000000000000') + + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig: emptyBytes, + amount: amt, + encryptedEmail: emptyBytes, + }), + 'InsufficientPayment()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + await linkToken.connect(requestSender).approve(registrar.address, amount) + + const tx = await registrar.connect(requestSender).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig, + amount, + encryptedEmail: emptyBytes, + }) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const [id] = await registry.getActiveUpkeepIDs(0, 1) + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + }) + + describe('#setAutoApproveAllowedSender', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setAutoApproveAllowedSender(await admin.getAddress(), false) + await evmRevert(tx, 'Only callable by owner') + }) + + it('sets the allowed status correctly and emits log', async () => { + const senderAddress = await stranger.getAddress() + let tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, true) + + let senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isTrue(senderAllowedStatus) + + tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, false) + + senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isFalse(senderAllowedStatus) + }) + }) + + describe('#setTriggerConfig', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + await evmRevert(tx, 'Only callable by owner') + }) + + it('changes the config', async () => { + const tx = await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + await registrar.getTriggerRegistrationDetails(Trigger.LOG) + await expect(tx) + .to.emit(registrar, 'TriggerConfigSet') + .withArgs(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + }) + }) + + describe('#approve', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + }) + + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, 'Only callable by owner') + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('reverts if any member of the payload changes', async () => { + let tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + ethers.Wallet.createRandom().address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + 10000, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + ethers.Wallet.createRandom().address, + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + '0x1234', + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + }) + + it('approves an existing registration request', async () => { + const tx = await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('deletes the request afterwards / reverts if the request DNE', async () => { + await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) + + describe('#cancel', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + // submit duplicate request (increase balance) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + }) + + it('reverts if not called by the admin / owner', async () => { + const tx = registrar.connect(stranger).cancel(hash) + await evmRevert(tx, errorMsgs.onlyAdmin) + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .cancel( + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('refunds the total request balance to the admin address if owner cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(registrarOwner).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('refunds the total request balance to the admin address if admin cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(admin).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('deletes the request hash', async () => { + await registrar.connect(registrarOwner).cancel(hash) + let tx = registrar.connect(registrarOwner).cancel(hash) + await evmRevert(tx, errorMsgs.requestNotFound) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/AutomationRegistrar2_2.test.ts b/contracts/test/v0.8/automation/AutomationRegistrar2_2.test.ts new file mode 100644 index 00000000..33e34080 --- /dev/null +++ b/contracts/test/v0.8/automation/AutomationRegistrar2_2.test.ts @@ -0,0 +1,1013 @@ +import { ethers } from 'hardhat' +import { ContractFactory, Contract } from 'ethers' +import { assert, expect } from 'chai' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { BigNumber, Signer } from 'ethers' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { toWei } from '../../test-helpers/helpers' +import { IKeeperRegistryMaster as IKeeperRegistry } from '../../../typechain/IKeeperRegistryMaster' +import { AutomationRegistrar2_2 as Registrar } from '../../../typechain/AutomationRegistrar2_2' +import { deployRegistry21 } from './helpers' + +// copied from KeeperRegistryBase2_2.sol +enum Trigger { + CONDITION, + LOG, +} + +let linkTokenFactory: ContractFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let upkeepMockFactory: UpkeepMockFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') +}) + +const errorMsgs = { + onlyOwner: 'revert Only callable by owner', + onlyAdmin: 'OnlyAdminOrOwner()', + hashPayload: 'HashMismatch()', + requestNotFound: 'RequestNotFound()', +} + +describe('AutomationRegistrar2_2', () => { + const upkeepName = 'SampleUpkeep' + + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const performGas = BigNumber.from(100000) + const paymentPremiumPPB = BigNumber.from(250000000) + const flatFeeMicroLink = BigNumber.from(0) + const maxAllowedAutoApprove = 5 + const trigger = '0xdeadbeef' + const offchainConfig = '0x01234567' + + const emptyBytes = '0x00' + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxCheckDataSize = BigNumber.from(10000) + const maxPerformDataSize = BigNumber.from(10000) + const maxRevertDataSize = BigNumber.from(1000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from('1000000000000000000') + const amount = BigNumber.from('5000000000000000000') + const amount1 = BigNumber.from('6000000000000000000') + const transcoder = ethers.constants.AddressZero + const upkeepManager = ethers.Wallet.createRandom().address + + // Enum values are not auto exported in ABI so have to manually declare + const autoApproveType_DISABLED = 0 + const autoApproveType_ENABLED_SENDER_ALLOWLIST = 1 + const autoApproveType_ENABLED_ALL = 2 + + let owner: Signer + let admin: Signer + let someAddress: Signer + let registrarOwner: Signer + let stranger: Signer + let requestSender: Signer + + let linkToken: Contract + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let mock: UpkeepMock + let registry: IKeeperRegistry + let registrar: Registrar + + beforeEach(async () => { + owner = personas.Default + admin = personas.Neil + someAddress = personas.Ned + registrarOwner = personas.Nelly + stranger = personas.Nancy + requestSender = personas.Norbert + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + + registry = await deployRegistry21( + owner, + 0, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + mock = await upkeepMockFactory.deploy() + + const registrarFactory = await ethers.getContractFactory( + 'AutomationRegistrar2_2', + ) + registrar = await registrarFactory + .connect(registrarOwner) + .deploy(linkToken.address, registry.address, minUpkeepSpend, [ + { + triggerType: Trigger.CONDITION, + autoApproveType: autoApproveType_DISABLED, + autoApproveMaxAllowed: 0, + }, + { + triggerType: Trigger.LOG, + autoApproveType: autoApproveType_DISABLED, + autoApproveMaxAllowed: 0, + }, + ]) + + await linkToken + .connect(owner) + .transfer(await requestSender.getAddress(), toWei('1000')) + + const keepers = [ + await personas.Carol.getAddress(), + await personas.Nancy.getAddress(), + await personas.Ned.getAddress(), + await personas.Neil.getAddress(), + ] + const onchainConfig = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrars: [registrar.address], + upkeepPrivilegeManager: upkeepManager, + } + await registry + .connect(owner) + .setConfigTypeSafe(keepers, keepers, 1, onchainConfig, 1, '0x') + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registrar.typeAndVersion() + assert.equal(typeAndVersion, 'AutomationRegistrar 2.1.0') + }) + }) + + describe('#register', () => { + it('reverts if not called by the PLI token', async () => { + await evmRevert( + registrar + .connect(someAddress) + .register( + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ), + 'OnlyLink()', + ) + }) + + it('reverts if the amount passed in data mismatches actual amount sent', async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount1, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'AmountMismatch()', + ) + }) + + it('reverts if the sender passed in data mismatches actual sender', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await admin.getAddress(), // Should have been requestSender.getAddress() + ], + ) + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'SenderMismatch()', + ) + }) + + it('reverts if the admin address is 0x0000...', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + '0x0000000000000000000000000000000000000000', + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'RegistrationRequestFailed()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + //get upkeep count before attempting registration + const beforeCount = (await registry.getState()).state.numUpkeeps + + //set auto approve OFF, threshold limits dont matter in this case + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + + it('Auto Approve ON - Throttle max approvals - does not register an upkeep on KeeperRegistry beyond the max limit, emits only RegistrationRequested event after limit is hit', async () => { + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 0) + + //set auto approve on, with max 1 allowed + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.CONDITION, autoApproveType_ENABLED_ALL, 1) + + //set auto approve on, with max 1 allowed + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 1) + + // register within threshold, new upkeep should be registered + let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + // try registering another one, new upkeep should not be registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 1, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // Still 1 + + // register a second type of upkeep, different limit + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + Trigger.LOG, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // 1 -> 2 + + // Now set new max limit to 2. One more upkeep should get auto approved + await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.CONDITION, autoApproveType_ENABLED_ALL, 2) + + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 2, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 3) // 2 -> 3 + + // One more upkeep should not get registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + performGas.toNumber() + 3, // make unique hash + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 3) // Still 3 + }) + + it('Auto Approve Sender Allowlist - sender in allowlist - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + ) + + // Add sender to allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve Sender Allowlist - sender NOT in allowlist - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + const beforeCount = (await registry.getState()).state.numUpkeeps + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + ) + + // Explicitly remove sender from allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + + //register. auto approve shouldn't happen + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + }) + + describe('#registerUpkeep', () => { + it('reverts with empty message if amount sent is not available in PLI allowance', async () => { + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig: emptyBytes, + amount, + encryptedEmail: emptyBytes, + }), + '', + ) + }) + + it('reverts if the amount passed in data is less than configured minimum', async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + // amt is one order of magnitude less than minUpkeepSpend + const amt = BigNumber.from('100000000000000000') + + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig: emptyBytes, + amount: amt, + encryptedEmail: emptyBytes, + }), + 'InsufficientPayment()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + ) + + await linkToken.connect(requestSender).approve(registrar.address, amount) + + const tx = await registrar.connect(requestSender).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: performGas, + adminAddress: await admin.getAddress(), + triggerType: 0, + checkData: emptyBytes, + triggerConfig: trigger, + offchainConfig, + amount, + encryptedEmail: emptyBytes, + }) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const [id] = await registry.getActiveUpkeepIDs(0, 1) + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.performGas, performGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + }) + + describe('#setAutoApproveAllowedSender', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setAutoApproveAllowedSender(await admin.getAddress(), false) + await evmRevert(tx, 'Only callable by owner') + }) + + it('sets the allowed status correctly and emits log', async () => { + const senderAddress = await stranger.getAddress() + let tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, true) + + let senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isTrue(senderAllowedStatus) + + tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, false) + + senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isFalse(senderAllowedStatus) + }) + }) + + describe('#setTriggerConfig', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + await evmRevert(tx, 'Only callable by owner') + }) + + it('changes the config', async () => { + const tx = await registrar + .connect(registrarOwner) + .setTriggerConfig(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + await registrar.getTriggerRegistrationDetails(Trigger.LOG) + await expect(tx) + .to.emit(registrar, 'TriggerConfigSet') + .withArgs(Trigger.LOG, autoApproveType_ENABLED_ALL, 100) + }) + }) + + describe('#approve', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + }) + + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, 'Only callable by owner') + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('reverts if any member of the payload changes', async () => { + let tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + ethers.Wallet.createRandom().address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + 10000, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + ethers.Wallet.createRandom().address, + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + '0x1234', + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + }) + + it('approves an existing registration request', async () => { + const tx = await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('deletes the request afterwards / reverts if the request DNE', async () => { + await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) + + describe('#cancel', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setTriggerConfig( + Trigger.CONDITION, + autoApproveType_DISABLED, + maxAllowedAutoApprove, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + // submit duplicate request (increase balance) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + }) + + it('reverts if not called by the admin / owner', async () => { + const tx = registrar.connect(stranger).cancel(hash) + await evmRevert(tx, errorMsgs.onlyAdmin) + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .cancel( + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('refunds the total request balance to the admin address if owner cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(registrarOwner).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('refunds the total request balance to the admin address if admin cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(admin).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('deletes the request hash', async () => { + await registrar.connect(registrarOwner).cancel(hash) + let tx = registrar.connect(registrarOwner).cancel(hash) + await evmRevert(tx, errorMsgs.requestNotFound) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + performGas, + await admin.getAddress(), + 0, + emptyBytes, + trigger, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/AutomationRegistry2_2.test.ts b/contracts/test/v0.8/automation/AutomationRegistry2_2.test.ts new file mode 100644 index 00000000..aedd7582 --- /dev/null +++ b/contracts/test/v0.8/automation/AutomationRegistry2_2.test.ts @@ -0,0 +1,5885 @@ +import { ethers } from 'hardhat' +import { loadFixture } from '@nomicfoundation/hardhat-network-helpers' +import { assert, expect } from 'chai' +import { + BigNumber, + BigNumberish, + BytesLike, + Contract, + ContractFactory, + ContractReceipt, + ContractTransaction, + Signer, + Wallet, +} from 'ethers' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { randomAddress, toWei } from '../../test-helpers/helpers' +import { StreamsLookupUpkeep__factory as StreamsLookupUpkeepFactory } from '../../../typechain/factories/StreamsLookupUpkeep__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../../typechain/factories/UpkeepAutoFunder__factory' +import { MockArbGasInfo__factory as MockArbGasInfoFactory } from '../../../typechain/factories/MockArbGasInfo__factory' +import { MockOVMGasPriceOracle__factory as MockOVMGasPriceOracleFactory } from '../../../typechain/factories/MockOVMGasPriceOracle__factory' +import { ChainModuleBase__factory as ChainModuleBaseFactory } from '../../../typechain/factories/ChainModuleBase__factory' +import { ArbitrumModule__factory as ArbitrumModuleFactory } from '../../../typechain/factories/ArbitrumModule__factory' +import { OptimismModule__factory as OptimismModuleFactory } from '../../../typechain/factories/OptimismModule__factory' +import { ILogAutomation__factory as ILogAutomationactory } from '../../../typechain/factories/ILogAutomation__factory' +import { IAutomationForwarder__factory as IAutomationForwarderFactory } from '../../../typechain/factories/IAutomationForwarder__factory' +import { MockArbSys__factory as MockArbSysFactory } from '../../../typechain/factories/MockArbSys__factory' +import { AutomationUtils2_2 as AutomationUtils } from '../../../typechain/AutomationUtils2_2' +import { MockArbGasInfo } from '../../../typechain/MockArbGasInfo' +import { MockOVMGasPriceOracle } from '../../../typechain/MockOVMGasPriceOracle' +import { StreamsLookupUpkeep } from '../../../typechain/StreamsLookupUpkeep' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { ChainModuleBase } from '../../../typechain/ChainModuleBase' +import { ArbitrumModule } from '../../../typechain/ArbitrumModule' +import { OptimismModule } from '../../../typechain/OptimismModule' +import { UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder' +import { IChainModule, UpkeepAutoFunder } from '../../../typechain' +import { + CancelledUpkeepReportEvent, + IAutomationRegistryMaster as IAutomationRegistry, + ReorgedUpkeepReportEvent, + StaleUpkeepReportEvent, + UpkeepPerformedEvent, +} from '../../../typechain/IAutomationRegistryMaster' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' +import { deployRegistry22 } from './helpers' + +const describeMaybe = process.env.SKIP_SLOW ? describe.skip : describe +const itMaybe = process.env.SKIP_SLOW ? it.skip : it + +// copied from AutomationRegistryInterface2_2.sol +enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE, + CHECK_CALLBACK_REVERTED, + REVERT_DATA_EXCEEDS_LIMIT, + REGISTRY_PAUSED, +} + +// copied from AutomationRegistryBase2_2.sol +enum Trigger { + CONDITION, + LOG, +} + +// un-exported types that must be extracted from the utils contract +type Report = Parameters[0] +type OnChainConfig = Parameters[0] +type LogTrigger = Parameters[0] +type ConditionalTrigger = Parameters[0] +type Log = Parameters[0] + +// ----------------------------------------------------------------------------------------------- + +// These values should match the constants declared in registry +let registryConditionalOverhead: BigNumber +let registryLogOverhead: BigNumber +let registryPerSignerGasOverhead: BigNumber +let registryPerPerformByteGasOverhead: BigNumber +let registryTransmitCalldataFixedBytesOverhead: BigNumber +let registryTransmitCalldataPerSignerBytesOverhead: BigNumber +let cancellationDelay: number + +// This is the margin for gas that we test for. Gas charged should always be greater +// than total gas used in tx but should not increase beyond this margin +const gasCalculationMargin = BigNumber.from(5000) +// This is the margin for gas overhead estimation in checkUpkeep. The estimated gas +// overhead should be larger than actual gas overhead but should not increase beyond this margin +const gasEstimationMargin = BigNumber.from(5000) + +const linkEth = BigNumber.from(5000000000000000) // 1 Link = 0.005 Eth +const gasWei = BigNumber.from(1000000000) // 1 gwei +// ----------------------------------------------------------------------------------------------- +// test-wide configs for upkeeps +const linkDivisibility = BigNumber.from('1000000000000000000') +const performGas = BigNumber.from('1000000') +const paymentPremiumBase = BigNumber.from('1000000000') +const paymentPremiumPPB = BigNumber.from('250000000') +const flatFeeMicroLink = BigNumber.from(0) + +const randomBytes = '0x1234abcd' +const emptyBytes = '0x' +const emptyBytes32 = + '0x0000000000000000000000000000000000000000000000000000000000000000' + +const transmitGasOverhead = 1_000_000 +const checkGasOverhead = 500_000 + +const stalenessSeconds = BigNumber.from(43820) +const gasCeilingMultiplier = BigNumber.from(2) +const checkGasLimit = BigNumber.from(10000000) +const fallbackGasPrice = gasWei.mul(BigNumber.from('2')) +const fallbackLinkPrice = linkEth.div(BigNumber.from('2')) +const maxCheckDataSize = BigNumber.from(1000) +const maxPerformDataSize = BigNumber.from(1000) +const maxRevertDataSize = BigNumber.from(1000) +const maxPerformGas = BigNumber.from(5000000) +const minUpkeepSpend = BigNumber.from(0) +const f = 1 +const offchainVersion = 1 +const offchainBytes = '0x' +const zeroAddress = ethers.constants.AddressZero +const epochAndRound5_1 = + '0x0000000000000000000000000000000000000000000000000000000000000501' + +let logTriggerConfig: string + +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: ContractFactory +let mockArbGasInfoFactory: MockArbGasInfoFactory +let mockOVMGasPriceOracleFactory: MockOVMGasPriceOracleFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory +let chainModuleBaseFactory: ChainModuleBaseFactory +let arbitrumModuleFactory: ArbitrumModuleFactory +let optimismModuleFactory: OptimismModuleFactory +let streamsLookupUpkeepFactory: StreamsLookupUpkeepFactory +let personas: Personas + +// contracts +let linkToken: Contract +let linkEthFeed: MockV3Aggregator +let gasPriceFeed: MockV3Aggregator +let registry: IAutomationRegistry // default registry, used for most tests +let arbRegistry: IAutomationRegistry // arbitrum registry +let opRegistry: IAutomationRegistry // optimism registry +let mgRegistry: IAutomationRegistry // "migrate registry" used in migration tests +let blankRegistry: IAutomationRegistry // used to test initial configurations +let mockArbGasInfo: MockArbGasInfo +let mockOVMGasPriceOracle: MockOVMGasPriceOracle +let mock: UpkeepMock +let autoFunderUpkeep: UpkeepAutoFunder +let ltUpkeep: MockContract +let transcoder: UpkeepTranscoder +let chainModuleBase: ChainModuleBase +let arbitrumModule: ArbitrumModule +let optimismModule: OptimismModule +let streamsLookupUpkeep: StreamsLookupUpkeep +let automationUtils: AutomationUtils + +function now() { + return Math.floor(Date.now() / 1000) +} + +async function getUpkeepID(tx: ContractTransaction): Promise { + const receipt = await tx.wait() + for (const event of receipt.events || []) { + if ( + event.args && + event.eventSignature == 'UpkeepRegistered(uint256,uint32,address)' + ) { + return event.args[0] + } + } + throw new Error('could not find upkeep ID in tx event logs') +} + +const getTriggerType = (upkeepId: BigNumber): Trigger => { + const hexBytes = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + const bytes = ethers.utils.arrayify(hexBytes) + for (let idx = 4; idx < 15; idx++) { + if (bytes[idx] != 0) { + return Trigger.CONDITION + } + } + return bytes[15] as Trigger +} + +const encodeConfig = (onchainConfig: OnChainConfig) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_onChainConfig', [onchainConfig]) + .slice(10) + ) +} + +const encodeBlockTrigger = (conditionalTrigger: ConditionalTrigger) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_conditionalTrigger', [conditionalTrigger]) + .slice(10) + ) +} + +const encodeLogTrigger = (logTrigger: LogTrigger) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_logTrigger', [logTrigger]) + .slice(10) + ) +} + +const encodeLog = (log: Log) => { + return ( + '0x' + automationUtils.interface.encodeFunctionData('_log', [log]).slice(10) + ) +} + +const encodeReport = (report: Report) => { + return ( + '0x' + + automationUtils.interface.encodeFunctionData('_report', [report]).slice(10) + ) +} + +type UpkeepData = { + Id: BigNumberish + performGas: BigNumberish + performData: BytesLike + trigger: BytesLike +} + +const makeReport = (upkeeps: UpkeepData[]) => { + const upkeepIds = upkeeps.map((u) => u.Id) + const performGases = upkeeps.map((u) => u.performGas) + const triggers = upkeeps.map((u) => u.trigger) + const performDatas = upkeeps.map((u) => u.performData) + return encodeReport({ + fastGasWei: gasWei, + linkNative: linkEth, + upkeepIds, + gasLimits: performGases, + triggers, + performDatas, + }) +} + +const makeLatestBlockReport = async (upkeepsIDs: BigNumberish[]) => { + const latestBlock = await ethers.provider.getBlock('latest') + const upkeeps: UpkeepData[] = [] + for (let i = 0; i < upkeepsIDs.length; i++) { + upkeeps.push({ + Id: upkeepsIDs[i], + performGas, + trigger: encodeBlockTrigger({ + blockNum: latestBlock.number, + blockHash: latestBlock.hash, + }), + performData: '0x', + }) + } + return makeReport(upkeeps) +} + +const signReport = ( + reportContext: string[], + report: any, + signers: Wallet[], +) => { + const reportDigest = ethers.utils.keccak256(report) + const packedArgs = ethers.utils.solidityPack( + ['bytes32', 'bytes32[3]'], + [reportDigest, reportContext], + ) + const packedDigest = ethers.utils.keccak256(packedArgs) + + const signatures = [] + for (const signer of signers) { + signatures.push(signer._signingKey().signDigest(packedDigest)) + } + const vs = signatures.map((i) => '0' + (i.v - 27).toString(16)).join('') + return { + vs: '0x' + vs.padEnd(64, '0'), + rs: signatures.map((i) => i.r), + ss: signatures.map((i) => i.s), + } +} + +const parseUpkeepPerformedLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events[ + 'UpkeepPerformed(uint256,bool,uint96,uint256,uint256,bytes)' + ].name + ) { + parsedLogs.push(log as unknown as UpkeepPerformedEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseReorgedUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['ReorgedUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as ReorgedUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseStaleUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['StaleUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as StaleUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseCancelledUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['CancelledUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as CancelledUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +describe('AutomationRegistry2_2', () => { + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let keeper4: Signer + let keeper5: Signer + let nonkeeper: Signer + let signer1: Wallet + let signer2: Wallet + let signer3: Wallet + let signer4: Wallet + let signer5: Wallet + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + let payee4: Signer + let payee5: Signer + + let upkeepId: BigNumber // conditional upkeep + let afUpkeepId: BigNumber // auto funding upkeep + let logUpkeepId: BigNumber // log trigger upkeepID + let streamsLookupUpkeepId: BigNumber // streams lookup upkeep + const numUpkeeps = 4 // see above + let keeperAddresses: string[] + let payees: string[] + let signers: Wallet[] + let signerAddresses: string[] + let config: any + let arbConfig: any + let opConfig: any + let baseConfig: Parameters + let arbConfigParams: Parameters + let opConfigParams: Parameters + let upkeepManager: string + + before(async () => { + personas = (await getUsers()).personas + + const utilsFactory = await ethers.getContractFactory('AutomationUtils2_2') + automationUtils = await utilsFactory.deploy() + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + mockArbGasInfoFactory = await ethers.getContractFactory('MockArbGasInfo') + mockOVMGasPriceOracleFactory = await ethers.getContractFactory( + 'MockOVMGasPriceOracle', + ) + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepAutoFunderFactory = + await ethers.getContractFactory('UpkeepAutoFunder') + chainModuleBaseFactory = await ethers.getContractFactory('ChainModuleBase') + arbitrumModuleFactory = await ethers.getContractFactory('ArbitrumModule') + optimismModuleFactory = await ethers.getContractFactory('OptimismModule') + streamsLookupUpkeepFactory = await ethers.getContractFactory( + 'StreamsLookupUpkeep', + ) + + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + keeper4 = personas.Norbert + keeper5 = personas.Nick + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + payee4 = personas.Eddy + payee5 = personas.Carol + upkeepManager = await personas.Norbert.getAddress() + // signers + signer1 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000001', + ) + signer2 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000002', + ) + signer3 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000003', + ) + signer4 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000004', + ) + signer5 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000005', + ) + + keeperAddresses = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + await keeper4.getAddress(), + await keeper5.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + await payee5.getAddress(), + ] + signers = [signer1, signer2, signer3, signer4, signer5] + + // We append 26 random addresses to keepers, payees and signers to get a system of 31 oracles + // This allows f value of 1 - 10 + for (let i = 0; i < 26; i++) { + keeperAddresses.push(randomAddress()) + payees.push(randomAddress()) + signers.push(ethers.Wallet.createRandom()) + } + signerAddresses = [] + for (const signer of signers) { + signerAddresses.push(await signer.getAddress()) + } + + logTriggerConfig = + '0x' + + automationUtils.interface + .encodeFunctionData('_logTriggerConfig', [ + { + contractAddress: randomAddress(), + filterSelector: 0, + topic0: ethers.utils.randomBytes(32), + topic1: ethers.utils.randomBytes(32), + topic2: ethers.utils.randomBytes(32), + topic3: ethers.utils.randomBytes(32), + }, + ]) + .slice(10) + }) + + // This function is similar to registry's _calculatePaymentAmount + // It uses global fastGasWei, linkEth, and assumes isExecution = false (gasFee = fastGasWei*multiplier) + // rest of the parameters are the same + const linkForGas = ( + upkeepGasSpent: BigNumber, + gasOverhead: BigNumber, + gasMultiplier: BigNumber, + premiumPPB: BigNumber, + flatFee: BigNumber, + l1CostWei?: BigNumber, + ) => { + l1CostWei = l1CostWei === undefined ? BigNumber.from(0) : l1CostWei + + const gasSpent = gasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei + .mul(gasMultiplier) + .mul(gasSpent) + .mul(linkDivisibility) + .div(linkEth) + const l1Fee = l1CostWei.mul(linkDivisibility).div(linkEth) + const gasPayment = base.add(l1Fee) + + const premium = gasWei + .mul(gasMultiplier) + .mul(upkeepGasSpent) + .add(l1CostWei) + .mul(linkDivisibility) + .div(linkEth) + .mul(premiumPPB) + .div(paymentPremiumBase) + .add(BigNumber.from(flatFee).mul('1000000000000')) + + return { + total: gasPayment.add(premium), + gasPayment, + premium, + } + } + + const verifyMaxPayment = async ( + registry: IAutomationRegistry, + chainModule: IChainModule, + maxl1CostWeWithoutMultiplier?: BigNumber, + ) => { + type TestCase = { + name: string + multiplier: number + gas: number + premium: number + flatFee: number + } + + const tests: TestCase[] = [ + { + name: 'no fees', + multiplier: 1, + gas: 100000, + premium: 0, + flatFee: 0, + }, + { + name: 'basic fees', + multiplier: 1, + gas: 100000, + premium: 250000000, + flatFee: 1000000, + }, + { + name: 'max fees', + multiplier: 3, + gas: 10000000, + premium: 250000000, + flatFee: 1000000, + }, + ] + + const fPlusOne = BigNumber.from(f + 1) + const chainModuleOverheads = await chainModule.getGasOverhead() + const totalConditionalOverhead = registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(fPlusOne)) + .add( + registryPerPerformByteGasOverhead + .add(chainModuleOverheads.chainModulePerByteOverhead) + .mul( + maxPerformDataSize + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul(fPlusOne), + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead) + + const totalLogOverhead = registryLogOverhead + .add(registryPerSignerGasOverhead.mul(fPlusOne)) + .add( + registryPerPerformByteGasOverhead + .add(chainModuleOverheads.chainModulePerByteOverhead) + .mul( + maxPerformDataSize + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul(fPlusOne), + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead) + + for (const test of tests) { + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: test.premium, + flatFeeMicroLink: test.flatFee, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: test.multiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModule.address, + reorgProtectionEnabled: true, + }), + offchainVersion, + offchainBytes, + ) + + const conditionalPrice = await registry.getMaxPaymentForGas( + Trigger.CONDITION, + test.gas, + ) + expect(conditionalPrice).to.equal( + linkForGas( + BigNumber.from(test.gas), + totalConditionalOverhead, + BigNumber.from(test.multiplier), + BigNumber.from(test.premium), + BigNumber.from(test.flatFee), + maxl1CostWeWithoutMultiplier?.mul(BigNumber.from(test.multiplier)), + ).total, + ) + + const logPrice = await registry.getMaxPaymentForGas(Trigger.LOG, test.gas) + expect(logPrice).to.equal( + linkForGas( + BigNumber.from(test.gas), + totalLogOverhead, + BigNumber.from(test.multiplier), + BigNumber.from(test.premium), + BigNumber.from(test.flatFee), + maxl1CostWeWithoutMultiplier?.mul(BigNumber.from(test.multiplier)), + ).total, + ) + } + } + + const verifyConsistentAccounting = async ( + maxAllowedSpareChange: BigNumber, + ) => { + const expectedLinkBalance = (await registry.getState()).state + .expectedLinkBalance + const linkTokenBalance = await linkToken.balanceOf(registry.address) + const upkeepIdBalance = (await registry.getUpkeep(upkeepId)).balance + let totalKeeperBalance = BigNumber.from(0) + for (let i = 0; i < keeperAddresses.length; i++) { + totalKeeperBalance = totalKeeperBalance.add( + (await registry.getTransmitterInfo(keeperAddresses[i])).balance, + ) + } + const ownerBalance = (await registry.getState()).state.ownerLinkBalance + assert.isTrue(expectedLinkBalance.eq(linkTokenBalance)) + assert.isTrue( + upkeepIdBalance + .add(totalKeeperBalance) + .add(ownerBalance) + .lte(expectedLinkBalance), + ) + assert.isTrue( + expectedLinkBalance + .sub(upkeepIdBalance) + .sub(totalKeeperBalance) + .sub(ownerBalance) + .lte(maxAllowedSpareChange), + ) + } + + interface GetTransmitTXOptions { + numSigners?: number + startingSignerIndex?: number + gasLimit?: BigNumberish + gasPrice?: BigNumberish + performGas?: BigNumberish + performDatas?: string[] + checkBlockNum?: number + checkBlockHash?: string + logBlockHash?: BytesLike + txHash?: BytesLike + logIndex?: number + timestamp?: number + } + + const getTransmitTx = async ( + registry: IAutomationRegistry, + transmitter: Signer, + upkeepIds: BigNumber[], + overrides: GetTransmitTXOptions = {}, + ) => { + const latestBlock = await ethers.provider.getBlock('latest') + const configDigest = (await registry.getState()).state.latestConfigDigest + const config = { + numSigners: f + 1, + startingSignerIndex: 0, + performDatas: undefined, + performGas, + checkBlockNum: latestBlock.number, + checkBlockHash: latestBlock.hash, + logIndex: 0, + txHash: undefined, // assigned uniquely below + logBlockHash: undefined, // assigned uniquely below + timestamp: now(), + gasLimit: undefined, + gasPrice: undefined, + } + Object.assign(config, overrides) + const upkeeps: UpkeepData[] = [] + for (let i = 0; i < upkeepIds.length; i++) { + let trigger: string + switch (getTriggerType(upkeepIds[i])) { + case Trigger.CONDITION: + trigger = encodeBlockTrigger({ + blockNum: config.checkBlockNum, + blockHash: config.checkBlockHash, + }) + break + case Trigger.LOG: + trigger = encodeLogTrigger({ + logBlockHash: config.logBlockHash || ethers.utils.randomBytes(32), + txHash: config.txHash || ethers.utils.randomBytes(32), + logIndex: config.logIndex, + blockNum: config.checkBlockNum, + blockHash: config.checkBlockHash, + }) + break + } + upkeeps.push({ + Id: upkeepIds[i], + performGas: config.performGas, + trigger, + performData: config.performDatas ? config.performDatas[i] : '0x', + }) + } + + const report = makeReport(upkeeps) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport( + reportContext, + report, + signers.slice( + config.startingSignerIndex, + config.startingSignerIndex + config.numSigners, + ), + ) + + type txOverride = { + gasLimit?: BigNumberish | Promise + gasPrice?: BigNumberish | Promise + } + const txOverrides: txOverride = {} + if (config.gasLimit) { + txOverrides.gasLimit = config.gasLimit + } + if (config.gasPrice) { + txOverrides.gasPrice = config.gasPrice + } + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + txOverrides, + ) + } + + const getTransmitTxWithReport = async ( + registry: IAutomationRegistry, + transmitter: Signer, + report: BytesLike, + ) => { + const configDigest = (await registry.getState()).state.latestConfigDigest + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport(reportContext, report, signers.slice(0, f + 1)) + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ) + } + + const setup = async () => { + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + const upkeepTranscoderFactory = await ethers.getContractFactory( + 'UpkeepTranscoder4_0', + ) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + mockArbGasInfo = await mockArbGasInfoFactory.connect(owner).deploy() + mockOVMGasPriceOracle = await mockOVMGasPriceOracleFactory + .connect(owner) + .deploy() + chainModuleBase = await chainModuleBaseFactory.connect(owner).deploy() + arbitrumModule = await arbitrumModuleFactory.connect(owner).deploy() + optimismModule = await optimismModuleFactory.connect(owner).deploy() + streamsLookupUpkeep = await streamsLookupUpkeepFactory + .connect(owner) + .deploy( + BigNumber.from('10000'), + BigNumber.from('100'), + false /* useArbBlock */, + true /* staging */, + false /* verify mercury response */, + ) + + const arbOracleCode = await ethers.provider.send('eth_getCode', [ + mockArbGasInfo.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x000000000000000000000000000000000000006C', + arbOracleCode, + ]) + + const optOracleCode = await ethers.provider.send('eth_getCode', [ + mockOVMGasPriceOracle.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x420000000000000000000000000000000000000F', + optOracleCode, + ]) + + const mockArbSys = await new MockArbSysFactory(owner).deploy() + const arbSysCode = await ethers.provider.send('eth_getCode', [ + mockArbSys.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x0000000000000000000000000000000000000064', + arbSysCode, + ]) + + config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + } + + arbConfig = { ...config } + arbConfig.chainModule = arbitrumModule.address + opConfig = { ...config } + opConfig.chainModule = optimismModule.address + + baseConfig = [ + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ] + arbConfigParams = [ + signerAddresses, + keeperAddresses, + f, + encodeConfig(arbConfig), + offchainVersion, + offchainBytes, + ] + opConfigParams = [ + signerAddresses, + keeperAddresses, + f, + encodeConfig(opConfig), + offchainVersion, + offchainBytes, + ] + + registry = await deployRegistry22( + owner, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + zeroAddress, + ) + + arbRegistry = await deployRegistry22( + owner, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + zeroAddress, + ) + + opRegistry = await deployRegistry22( + owner, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + zeroAddress, + ) + + mgRegistry = await deployRegistry22( + owner, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + zeroAddress, + ) + + blankRegistry = await deployRegistry22( + owner, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + zeroAddress, + ) + + registryConditionalOverhead = await registry.getConditionalGasOverhead() + registryLogOverhead = await registry.getLogGasOverhead() + registryPerSignerGasOverhead = await registry.getPerSignerGasOverhead() + registryPerPerformByteGasOverhead = + await registry.getPerPerformByteGasOverhead() + registryTransmitCalldataFixedBytesOverhead = + await registry.getTransmitCalldataFixedBytesOverhead() + registryTransmitCalldataPerSignerBytesOverhead = + await registry.getTransmitCalldataPerSignerBytesOverhead() + cancellationDelay = (await registry.getCancellationDelay()).toNumber() + + await registry.connect(owner).setConfig(...baseConfig) + await mgRegistry.connect(owner).setConfig(...baseConfig) + await arbRegistry.connect(owner).setConfig(...arbConfigParams) + await opRegistry.connect(owner).setConfig(...opConfigParams) + for (const reg of [registry, arbRegistry, opRegistry, mgRegistry]) { + await reg.connect(owner).setPayees(payees) + await linkToken.connect(admin).approve(reg.address, toWei('1000')) + await linkToken.connect(owner).approve(reg.address, toWei('1000')) + } + + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await admin.getAddress(), toWei('1000')) + let tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + upkeepId = await getUpkeepID(tx) + + autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](autoFunderUpkeep.address, performGas, autoFunderUpkeep.address, randomBytes, '0x') + afUpkeepId = await getUpkeepID(tx) + + ltUpkeep = await deployMockContract(owner, ILogAutomationactory.abi) + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,uint8,bytes,bytes,bytes)' + ](ltUpkeep.address, performGas, await admin.getAddress(), Trigger.LOG, '0x', logTriggerConfig, emptyBytes) + logUpkeepId = await getUpkeepID(tx) + + await autoFunderUpkeep.setUpkeepId(afUpkeepId) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](streamsLookupUpkeep.address, performGas, await admin.getAddress(), randomBytes, '0x') + streamsLookupUpkeepId = await getUpkeepID(tx) + } + + const getMultipleUpkeepsDeployedAndFunded = async ( + numPassingConditionalUpkeeps: number, + numPassingLogUpkeeps: number, + numFailingUpkeeps: number, + ) => { + const passingConditionalUpkeepIds = [] + const passingLogUpkeepIds = [] + const failingUpkeepIds = [] + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const condUpkeepId = await getUpkeepID(tx) + passingConditionalUpkeepIds.push(condUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(admin).addFunds(condUpkeepId, toWei('100')) + } + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,uint8,bytes,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), Trigger.LOG, '0x', logTriggerConfig, emptyBytes) + const logUpkeepId = await getUpkeepID(tx) + passingLogUpkeepIds.push(logUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(admin).addFunds(logUpkeepId, toWei('100')) + } + for (let i = 0; i < numFailingUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const failingUpkeepId = await getUpkeepID(tx) + failingUpkeepIds.push(failingUpkeepId) + } + return { + passingConditionalUpkeepIds, + passingLogUpkeepIds, + failingUpkeepIds, + } + } + + beforeEach(async () => { + await loadFixture(setup) + }) + + describe('#transmit', () => { + const fArray = [1, 5, 10] + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId]), + 'RegistryPaused()', + ) + }) + + it('reverts when called by non active transmitter', async () => { + await evmRevert( + getTransmitTx(registry, payee1, [upkeepId]), + 'OnlyActiveTransmitters()', + ) + }) + + it('reverts when report data lengths mismatches', async () => { + const upkeepIds = [] + const gasLimits: BigNumber[] = [] + const triggers: string[] = [] + const performDatas = [] + + upkeepIds.push(upkeepId) + gasLimits.push(performGas) + triggers.push('0x') + performDatas.push('0x') + // Push an extra perform data + performDatas.push('0x') + + const report = encodeReport({ + fastGasWei: 0, + linkNative: 0, + upkeepIds, + gasLimits, + triggers, + performDatas, + }) + + await evmRevert( + getTransmitTxWithReport(registry, keeper1, report), + 'InvalidReport()', + ) + }) + + it('returns early when invalid upkeepIds are included in report', async () => { + const tx = await getTransmitTx(registry, keeper1, [ + upkeepId.add(BigNumber.from('1')), + ]) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('performs even when the upkeep has insufficient funds and the upkeep pays out all the remaining balance', async () => { + // add very little fund to this upkeep + await registry.connect(admin).addFunds(upkeepId, BigNumber.from(10)) + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + const receipt = await tx.wait() + // the upkeep is underfunded in transmit but still performed + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal(upkeepPerformedLogs.length, 1) + const balance = (await registry.getUpkeep(upkeepId)).balance + assert.equal(balance.toNumber(), 0) + }) + + context('When the upkeep is funded', async () => { + beforeEach(async () => { + // Fund the upkeep + await Promise.all([ + registry.connect(admin).addFunds(upkeepId, toWei('100')), + registry.connect(admin).addFunds(logUpkeepId, toWei('100')), + ]) + }) + + it('handles duplicate upkeepIDs', async () => { + const tests: [string, BigNumber, number, number][] = [ + // [name, upkeep, num stale, num performed] + ['conditional', upkeepId, 1, 1], // checkBlocks must be sequential + ['log-trigger', logUpkeepId, 0, 2], // logs are deduped based on the "trigger ID" + ] + for (const [type, id, nStale, nPerformed] of tests) { + const tx = await getTransmitTx(registry, keeper1, [id, id]) + const receipt = await tx.wait() + const staleUpkeepReport = parseStaleUpkeepReportLogs(receipt) + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + staleUpkeepReport.length, + nStale, + `wrong log count for ${type} upkeep`, + ) + assert.equal( + upkeepPerformedLogs.length, + nPerformed, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('handles duplicate log triggers', async () => { + const logBlockHash = ethers.utils.randomBytes(32) + const txHash = ethers.utils.randomBytes(32) + const logIndex = 0 + const expectedDedupKey = ethers.utils.solidityKeccak256( + ['uint256', 'bytes32', 'bytes32', 'uint32'], + [logUpkeepId, logBlockHash, txHash, logIndex], + ) + assert.isFalse(await registry.hasDedupKey(expectedDedupKey)) + const tx = await getTransmitTx( + registry, + keeper1, + [logUpkeepId, logUpkeepId], + { logBlockHash, txHash, logIndex }, // will result in the same dedup key + ) + const receipt = await tx.wait() + const staleUpkeepReport = parseStaleUpkeepReportLogs(receipt) + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal(staleUpkeepReport.length, 1) + assert.equal(upkeepPerformedLogs.length, 1) + assert.isTrue(await registry.hasDedupKey(expectedDedupKey)) + await expect(tx) + .to.emit(registry, 'DedupKeyAdded') + .withArgs(expectedDedupKey) + }) + + it('returns early when check block number is less than last perform (block)', async () => { + // First perform an upkeep to put last perform block number on upkeep state + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + const lastPerformed = (await registry.getUpkeep(upkeepId)) + .lastPerformedBlockNumber + const lastPerformBlock = await ethers.provider.getBlock(lastPerformed) + assert.equal(lastPerformed.toString(), tx.blockNumber?.toString()) + // Try to transmit a report which has checkBlockNumber = lastPerformed-1, should result in stale report + const transmitTx = await getTransmitTx(registry, keeper1, [upkeepId], { + checkBlockNum: lastPerformBlock.number - 1, + checkBlockHash: lastPerformBlock.parentHash, + }) + const receipt = await transmitTx.wait() + const staleUpkeepReportLogs = parseStaleUpkeepReportLogs(receipt) + // exactly 1 StaleUpkeepReportLogs log should be emitted + assert.equal(staleUpkeepReportLogs.length, 1) + }) + + it('handles case when check block hash does not match', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number - 1, + checkBlockHash: latestBlock.hash, // should be latestBlock.parentHash + }) + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('handles case when check block number is older than 256 blocks', async () => { + for (let i = 0; i < 256; i++) { + await ethers.provider.send('evm_mine', []) + } + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + const old = await ethers.provider.getBlock(latestBlock.number - 256) + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: old.number, + checkBlockHash: old.hash, + }) + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows bypassing reorg protection with empty blockhash', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number, + checkBlockHash: emptyBytes32, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows bypassing reorg protection with reorgProtectionEnabled false config', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + let newConfig = config + newConfig.reorgProtectionEnabled = false + await registry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number - 1, + checkBlockHash: latestBlock.hash, // should be latestBlock.parentHash + }) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows very old trigger block numbers when bypassing reorg protection with reorgProtectionEnabled config', async () => { + let newConfig = config + newConfig.reorgProtectionEnabled = false + await registry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + for (let i = 0; i < 256; i++) { + await ethers.provider.send('evm_mine', []) + } + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + const old = await ethers.provider.getBlock(latestBlock.number - 256) + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: old.number, + checkBlockHash: old.hash, + }) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows very old trigger block numbers when bypassing reorg protection with empty blockhash', async () => { + // mine enough blocks so that blockhash(1) is unavailable + for (let i = 0; i <= 256; i++) { + await ethers.provider.send('evm_mine', []) + } + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: 1, + checkBlockHash: emptyBytes32, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('returns early when future block number is provided as trigger, irrespective of blockhash being present', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + + // Should fail when blockhash is empty + let tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: emptyBytes32, + }) + let receipt = await tx.wait() + let reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + + // Should also fail when blockhash is not empty + tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: latestBlock.hash, + }) + receipt = await tx.wait() + reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('returns early when future block number is provided as trigger, irrespective of reorgProtectionEnabled config', async () => { + let newConfig = config + newConfig.reorgProtectionEnabled = false + await registry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + + // Should fail when blockhash is empty + let tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: emptyBytes32, + }) + let receipt = await tx.wait() + let reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + + // Should also fail when blockhash is not empty + tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: latestBlock.hash, + }) + receipt = await tx.wait() + reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('returns early when upkeep is cancelled and cancellation delay has gone', async () => { + const latestBlockReport = await makeLatestBlockReport([upkeepId]) + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTxWithReport( + registry, + keeper1, + latestBlockReport, + ) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if the target cannot execute', async () => { + await mock.setCanPerform(false) + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const success = upkeepPerformedLog.args.success + assert.equal(success, false) + }) + + it('does not revert if the target runs out of gas', async () => { + await mock.setCanPerform(false) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + performGas: 10, // too little gas + }) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const success = upkeepPerformedLog.args.success + assert.equal(success, false) + }) + + it('reverts if not enough gas supplied', async () => { + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId], { + gasLimit: performGas, + }), + ) + }) + + it('executes the data passed to the registry', async () => { + await mock.setCanPerform(true) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + performDatas: [randomBytes], + }) + const receipt = await tx.wait() + + const upkeepPerformedWithABI = [ + 'event UpkeepPerformedWith(bytes upkeepData)', + ] + const iface = new ethers.utils.Interface(upkeepPerformedWithABI) + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + assert.equal(parsedLogs.length, 1) + assert.equal(parsedLogs[0].args.upkeepData, randomBytes) + }) + + it('uses actual execution price for payment and premium calculation', async () => { + // Actual multiplier is 2, but we set gasPrice to be 1x gasWei + const gasPrice = gasWei.mul(BigNumber.from('1')) + await mock.setCanPerform(true) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + gasPrice, + }) + const receipt = await tx.wait() + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).premium.toString(), + premium.toString(), + ) + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + // Actual multiplier is 2, but we set gasPrice to be 10x + const gasPrice = gasWei.mul(BigNumber.from('10')) + await mock.setCanPerform(true) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + gasPrice, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, // Should be same with exisitng multiplier + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + }) + + it('correctly accounts for l payment', async () => { + await mock.setCanPerform(true) + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + let tx = await arbRegistry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + await arbRegistry.connect(owner).addFunds(testUpkeepId, toWei('100')) + + // Do the thing + tx = await getTransmitTx( + arbRegistry, + keeper1, + [testUpkeepId], + + { gasPrice: gasWei.mul('5') }, // High gas price so that it gets capped + ) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb, + ).total.toString(), + totalPayment.toString(), + ) + }) + + itMaybe('can self fund', async () => { + const maxPayment = await registry.getMaxPaymentForGas( + Trigger.CONDITION, + performGas, + ) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(afUpkeepId, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + let postUpkeepBalance = (await registry.getUpkeep(afUpkeepId)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + const autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + postUpkeepBalance = (await registry.getUpkeep(afUpkeepId)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + await registry.connect(owner).addFunds(afUpkeepId, toWei('100')) + + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(afUpkeepId) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(afUpkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + + it('reverts when configDigest mismatches', async () => { + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [emptyBytes32, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 1)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'ConfigDigestMismatch()', + ) + }) + + it('reverts with incorrect number of signatures', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 2)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'IncorrectNumberOfSignatures()', + ) + }) + + it('reverts with invalid signature for inactive signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [ + new ethers.Wallet(ethers.Wallet.createRandom()), + new ethers.Wallet(ethers.Wallet.createRandom()), + ]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'OnlyActiveSigners()', + ) + }) + + it('reverts with invalid signature for duplicated signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [signer1, signer1]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'DuplicateSigners()', + ) + }) + + itMaybe( + 'has a large enough gas overhead to cover upkeep that use all its gas [ @skip-coverage ]', + async () => { + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + 10, // maximise f to maximise overhead + config, + offchainVersion, + offchainBytes, + ) + const tx = await registry + .connect(owner) + ['registerUpkeep(address,uint32,address,bytes,bytes)']( + mock.address, + maxPerformGas, // max allowed gas + await admin.getAddress(), + randomBytes, + '0x', + ) + const testUpkeepId = await getUpkeepID(tx) + await registry.connect(admin).addFunds(testUpkeepId, toWei('100')) + + let performData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + performData += '11' + } // max allowed performData + + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(maxPerformGas) + + await getTransmitTx(registry, keeper1, [testUpkeepId], { + gasLimit: maxPerformGas.add(transmitGasOverhead), + numSigners: 11, + performDatas: [performData], + }) // Should not revert + }, + ) + + itMaybe( + 'performs upkeep, deducts payment, updates lastPerformed and emits events', + async () => { + await mock.setCanPerform(true) + + for (const i in fArray) { + const newF = fArray[i] + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + const checkBlock = await ethers.provider.getBlock('latest') + + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(upkeepId) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf( + registry.address, + ) + + // Do the thing + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + checkBlockNum: checkBlock.number, + checkBlockHash: checkBlock.hash, + numSigners: newF + 1, + }) + + const receipt = await tx.wait() + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const id = upkeepPerformedLog.args.id + const success = upkeepPerformedLog.args.success + const trigger = upkeepPerformedLog.args.trigger + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + assert.equal(id.toString(), upkeepId.toString()) + assert.equal(success, true) + assert.equal( + trigger, + encodeBlockTrigger({ + blockNum: checkBlock.number, + blockHash: checkBlock.hash, + }), + ) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(upkeepId) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf( + registry.address, + ) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = totalPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + assert.equal( + registrationBefore.balance.sub(totalPayment).toString(), + registrationAfter.balance.toString(), + ) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + + // Amount spent should be updated correctly + assert.equal( + registrationAfter.amountSpent.sub(totalPayment).toString(), + registrationBefore.amountSpent.toString(), + ) + assert.isTrue( + registrationAfter.amountSpent + .sub(registrationBefore.amountSpent) + .eq(registrationBefore.balance.sub(registrationAfter.balance)), + ) + // Last perform block number should be updated + assert.equal( + registrationAfter.lastPerformedBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + // Latest epoch should be 5 + assert.equal((await registry.getState()).state.latestEpoch, 5) + } + }, + ) + + describe('Gas benchmarking conditional upkeeps [ @skip-coverage ]', function () { + const fs = [1, 10] + fs.forEach(function (newF) { + it( + 'When f=' + + newF + + ' calculates gas overhead appropriately within a margin for different scenarios', + async () => { + // Perform the upkeep once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + + // Different test scenarios + let longBytes = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + longBytes += '11' + } + const upkeepSuccessArray = [true, false] + const performGasArray = [5000, performGas] + const performDataArray = ['0x', longBytes] + const chainModuleOverheads = + await chainModuleBase.getGasOverhead() + + for (const i in upkeepSuccessArray) { + for (const j in performGasArray) { + for (const k in performDataArray) { + const upkeepSuccess = upkeepSuccessArray[i] + const performGas = performGasArray[j] + const performData = performDataArray[k] + + await mock.setCanPerform(upkeepSuccess) + await mock.setPerformGasToBurn(performGas) + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + tx = await getTransmitTx(registry, keeper1, [upkeepId], { + numSigners: newF + 1, + performDatas: [performData], + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = + parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const upkeepGasUsed = upkeepPerformedLog.args.gasUsed + const chargedGasOverhead = + upkeepPerformedLog.args.gasOverhead + const actualGasOverhead = receipt.gasUsed.sub(upkeepGasUsed) + const estimatedGasOverhead = registryConditionalOverhead + .add( + registryPerSignerGasOverhead.mul( + BigNumber.from(newF + 1), + ), + ) + .add( + registryPerPerformByteGasOverhead + .add(chainModuleOverheads.chainModulePerByteOverhead) + .mul( + BigNumber.from(performData.length / 2 - 1) + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(newF + 1), + ), + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead) + + assert.isTrue(upkeepGasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(actualGasOverhead.gt(BigNumber.from('0'))) + + console.log( + 'Gas Benchmarking conditional upkeeps:', + 'upkeepSuccess=', + upkeepSuccess, + 'performGas=', + performGas.toString(), + 'performData length=', + performData.length / 2 - 1, + 'sig verification ( f =', + newF, + '): estimated overhead: ', + estimatedGasOverhead.toString(), + ' charged overhead: ', + chargedGasOverhead.toString(), + ' actual overhead: ', + actualGasOverhead.toString(), + ' calculation margin over gasUsed: ', + chargedGasOverhead.sub(actualGasOverhead).toString(), + ' estimation margin over gasUsed: ', + estimatedGasOverhead.sub(actualGasOverhead).toString(), + ) + + // The actual gas overhead should be less than charged gas overhead, but not by a lot + // The charged gas overhead is controlled by ACCOUNTING_FIXED_GAS_OVERHEAD and + // ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD, and their correct values should be set to + // satisfy constraints in multiple places + assert.isTrue( + chargedGasOverhead.gt(actualGasOverhead), + 'Gas overhead calculated is too low, increase account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD) by at least ' + + actualGasOverhead.sub(chargedGasOverhead).toString(), + ) + assert.isTrue( + chargedGasOverhead + .sub(actualGasOverhead) + .lt(gasCalculationMargin), + 'Gas overhead calculated is too high, decrease account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by at least ' + + chargedGasOverhead + .sub(actualGasOverhead) + .sub(gasCalculationMargin) + .toString(), + ) + + // The estimated overhead during checkUpkeep should be close to the actual overhead in transaction + // It should be greater than the actual overhead but not by a lot + // The estimated overhead is controlled by variables + // REGISTRY_CONDITIONAL_OVERHEAD, REGISTRY_LOG_OVERHEAD, REGISTRY_PER_SIGNER_GAS_OVERHEAD + // REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD + assert.isTrue( + estimatedGasOverhead.gt(actualGasOverhead), + 'Gas overhead estimated in check upkeep is too low, increase estimation gas variables (REGISTRY_CONDITIONAL_OVERHEAD/REGISTRY_LOG_OVERHEAD/REGISTRY_PER_SIGNER_GAS_OVERHEAD/REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD) by at least ' + + estimatedGasOverhead.sub(chargedGasOverhead).toString(), + ) + assert.isTrue( + estimatedGasOverhead + .sub(actualGasOverhead) + .lt(gasEstimationMargin), + 'Gas overhead estimated is too high, decrease estimation gas variables (REGISTRY_CONDITIONAL_OVERHEAD/REGISTRY_LOG_OVERHEAD/REGISTRY_PER_SIGNER_GAS_OVERHEAD/REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD) by at least ' + + estimatedGasOverhead + .sub(actualGasOverhead) + .sub(gasEstimationMargin) + .toString(), + ) + } + } + } + }, + ) + }) + }) + + describe('Gas benchmarking log upkeeps [ @skip-coverage ]', function () { + const fs = [1, 10] + fs.forEach(function (newF) { + it( + 'When f=' + + newF + + ' calculates gas overhead appropriately within a margin', + async () => { + // Perform the upkeep once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx(registry, keeper1, [logUpkeepId]) + await tx.wait() + const performData = '0x' + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + await registry.setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + tx = await getTransmitTx(registry, keeper1, [logUpkeepId], { + numSigners: newF + 1, + performDatas: [performData], + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + const chainModuleOverheads = + await chainModuleBase.getGasOverhead() + + const upkeepGasUsed = upkeepPerformedLog.args.gasUsed + const chargedGasOverhead = upkeepPerformedLog.args.gasOverhead + const actualGasOverhead = receipt.gasUsed.sub(upkeepGasUsed) + const estimatedGasOverhead = registryLogOverhead + .add(registryPerSignerGasOverhead.mul(BigNumber.from(newF + 1))) + .add( + registryPerPerformByteGasOverhead + .add(chainModuleOverheads.chainModulePerByteOverhead) + .mul( + BigNumber.from(performData.length / 2 - 1) + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(newF + 1), + ), + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead) + + assert.isTrue(upkeepGasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(actualGasOverhead.gt(BigNumber.from('0'))) + + console.log( + 'Gas Benchmarking log upkeeps:', + 'upkeepSuccess=', + true, + 'performGas=', + performGas.toString(), + 'performData length=', + performData.length / 2 - 1, + 'sig verification ( f =', + newF, + '): estimated overhead: ', + estimatedGasOverhead.toString(), + ' charged overhead: ', + chargedGasOverhead.toString(), + ' actual overhead: ', + actualGasOverhead.toString(), + ' calculation margin over gasUsed: ', + chargedGasOverhead.sub(actualGasOverhead).toString(), + ' estimation margin over gasUsed: ', + estimatedGasOverhead.sub(actualGasOverhead).toString(), + ) + + assert.isTrue( + chargedGasOverhead.gt(actualGasOverhead), + 'Gas overhead calculated is too low, increase account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD) by at least ' + + actualGasOverhead.sub(chargedGasOverhead).toString(), + ) + assert.isTrue( + chargedGasOverhead + .sub(actualGasOverhead) + .lt(gasCalculationMargin), + 'Gas overhead calculated is too high, decrease account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by at least ' + + chargedGasOverhead + .sub(actualGasOverhead) + .sub(gasCalculationMargin) + .toString(), + ) + + assert.isTrue( + estimatedGasOverhead.gt(actualGasOverhead), + 'Gas overhead estimated in check upkeep is too low, increase estimation gas variables (REGISTRY_CONDITIONAL_OVERHEAD/REGISTRY_LOG_OVERHEAD/REGISTRY_PER_SIGNER_GAS_OVERHEAD/REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD) by at least ' + + estimatedGasOverhead.sub(chargedGasOverhead).toString(), + ) + assert.isTrue( + estimatedGasOverhead + .sub(actualGasOverhead) + .lt(gasEstimationMargin), + 'Gas overhead estimated is too high, decrease estimation gas variables (REGISTRY_CONDITIONAL_OVERHEAD/REGISTRY_LOG_OVERHEAD/REGISTRY_PER_SIGNER_GAS_OVERHEAD/REGISTRY_PER_PERFORM_BYTE_GAS_OVERHEAD) by at least ' + + estimatedGasOverhead + .sub(actualGasOverhead) + .sub(gasEstimationMargin) + .toString(), + ) + }, + ) + }) + }) + }) + }) + + describe('#transmit with upkeep batches [ @skip-coverage ]', function () { + const numPassingConditionalUpkeepsArray = [0, 1, 5] + const numPassingLogUpkeepsArray = [0, 1, 5] + const numFailingUpkeepsArray = [0, 3] + + for (let idx = 0; idx < numPassingConditionalUpkeepsArray.length; idx++) { + for (let jdx = 0; jdx < numPassingLogUpkeepsArray.length; jdx++) { + for (let kdx = 0; kdx < numFailingUpkeepsArray.length; kdx++) { + const numPassingConditionalUpkeeps = + numPassingConditionalUpkeepsArray[idx] + const numPassingLogUpkeeps = numPassingLogUpkeepsArray[jdx] + const numFailingUpkeeps = numFailingUpkeepsArray[kdx] + if (numPassingConditionalUpkeeps == 0 && numPassingLogUpkeeps == 0) { + continue + } + it( + '[Conditional:' + + numPassingConditionalUpkeeps + + ',Log:' + + numPassingLogUpkeeps + + ',Failures:' + + numFailingUpkeeps + + '] performs successful upkeeps and does not charge failing upkeeps', + async () => { + const allUpkeeps = await getMultipleUpkeepsDeployedAndFunded( + numPassingConditionalUpkeeps, + numPassingLogUpkeeps, + numFailingUpkeeps, + ) + const passingConditionalUpkeepIds = + allUpkeeps.passingConditionalUpkeepIds + const passingLogUpkeepIds = allUpkeeps.passingLogUpkeepIds + const failingUpkeepIds = allUpkeeps.failingUpkeepIds + + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf( + registry.address, + ) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const registrationConditionalPassingBefore = await Promise.all( + passingConditionalUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + const registrationLogPassingBefore = await Promise.all( + passingLogUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + const registrationFailingBefore = await Promise.all( + failingUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + + // cancel upkeeps so they will fail in the transmit process + // must call the cancel upkeep as the owner to avoid the CANCELLATION_DELAY + for (let ldx = 0; ldx < failingUpkeepIds.length; ldx++) { + await registry + .connect(owner) + .cancelUpkeep(failingUpkeepIds[ldx]) + } + + const tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal( + upkeepPerformedLogs.length, + numPassingConditionalUpkeeps + numPassingLogUpkeeps, + ) + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly numFailingUpkeeps Upkeep Performed should be emitted + assert.equal(cancelledUpkeepReportLogs.length, numFailingUpkeeps) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf( + registry.address, + ) + const registrationConditionalPassingAfter = await Promise.all( + passingConditionalUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registrationLogPassingAfter = await Promise.all( + passingLogUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registrationFailingAfter = await Promise.all( + failingUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + let netPayment = BigNumber.from('0') + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const id = upkeepPerformedLogs[i].args.id + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const gasOverhead = upkeepPerformedLogs[i].args.gasOverhead + const totalPayment = upkeepPerformedLogs[i].args.totalPayment + + expect(id).to.equal(passingConditionalUpkeepIds[i]) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + // Balance should be deducted + assert.equal( + registrationConditionalPassingBefore[i].balance + .sub(totalPayment) + .toString(), + registrationConditionalPassingAfter[i].balance.toString(), + ) + + // Amount spent should be updated correctly + assert.equal( + registrationConditionalPassingAfter[i].amountSpent + .sub(totalPayment) + .toString(), + registrationConditionalPassingBefore[ + i + ].amountSpent.toString(), + ) + + // Last perform block number should be updated + assert.equal( + registrationConditionalPassingAfter[ + i + ].lastPerformedBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + netPayment = netPayment.add(totalPayment) + } + + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const id = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args.id + const gasUsed = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasUsed + const gasOverhead = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasOverhead + const totalPayment = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .totalPayment + + expect(id).to.equal(passingLogUpkeepIds[i]) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + // Balance should be deducted + assert.equal( + registrationLogPassingBefore[i].balance + .sub(totalPayment) + .toString(), + registrationLogPassingAfter[i].balance.toString(), + ) + + // Amount spent should be updated correctly + assert.equal( + registrationLogPassingAfter[i].amountSpent + .sub(totalPayment) + .toString(), + registrationLogPassingBefore[i].amountSpent.toString(), + ) + + // Last perform block number should not be updated for log triggers + assert.equal( + registrationLogPassingAfter[ + i + ].lastPerformedBlockNumber.toString(), + '0', + ) + + netPayment = netPayment.add(totalPayment) + } + + for (let i = 0; i < numFailingUpkeeps; i++) { + // CancelledUpkeep log should be emitted + const id = cancelledUpkeepReportLogs[i].args.id + expect(id).to.equal(failingUpkeepIds[i]) + + // Balance and amount spent should be same + assert.equal( + registrationFailingBefore[i].balance.toString(), + registrationFailingAfter[i].balance.toString(), + ) + assert.equal( + registrationFailingBefore[i].amountSpent.toString(), + registrationFailingAfter[i].amountSpent.toString(), + ) + + // Last perform block number should not be updated + assert.equal( + registrationFailingAfter[ + i + ].lastPerformedBlockNumber.toString(), + '0', + ) + } + + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = netPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + // Keeper should be paid net payment for all passed upkeeps + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }, + ) + + it( + '[Conditional:' + + numPassingConditionalUpkeeps + + ',Log' + + numPassingLogUpkeeps + + ',Failures:' + + numFailingUpkeeps + + '] splits gas overhead appropriately among performed upkeeps [ @skip-coverage ]', + async () => { + const allUpkeeps = await getMultipleUpkeepsDeployedAndFunded( + numPassingConditionalUpkeeps, + numPassingLogUpkeeps, + numFailingUpkeeps, + ) + const passingConditionalUpkeepIds = + allUpkeeps.passingConditionalUpkeepIds + const passingLogUpkeepIds = allUpkeeps.passingLogUpkeepIds + const failingUpkeepIds = allUpkeeps.failingUpkeepIds + + // Perform the upkeeps once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + await tx.wait() + + // cancel upkeeps so they will fail in the transmit process + // must call the cancel upkeep as the owner to avoid the CANCELLATION_DELAY + for (let ldx = 0; ldx < failingUpkeepIds.length; ldx++) { + await registry + .connect(owner) + .cancelUpkeep(failingUpkeepIds[ldx]) + } + + // Do the actual thing + + tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal( + upkeepPerformedLogs.length, + numPassingConditionalUpkeeps + numPassingLogUpkeeps, + ) + + let netGasUsedPlusChargedOverhead = BigNumber.from('0') + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const chargedGasOverhead = + upkeepPerformedLogs[i].args.gasOverhead + + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + + // Overhead should be same for every upkeep + assert.isTrue( + chargedGasOverhead.eq( + upkeepPerformedLogs[0].args.gasOverhead, + ), + ) + netGasUsedPlusChargedOverhead = netGasUsedPlusChargedOverhead + .add(gasUsed) + .add(chargedGasOverhead) + } + + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const gasUsed = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasUsed + const chargedGasOverhead = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasOverhead + + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + + // Overhead should be same for every upkeep + assert.isTrue( + chargedGasOverhead.eq( + upkeepPerformedLogs[numPassingConditionalUpkeeps].args + .gasOverhead, + ), + ) + netGasUsedPlusChargedOverhead = netGasUsedPlusChargedOverhead + .add(gasUsed) + .add(chargedGasOverhead) + } + + console.log( + 'Gas Benchmarking - batching (passedConditionalUpkeeps: ', + numPassingConditionalUpkeeps, + 'passedLogUpkeeps:', + numPassingLogUpkeeps, + 'failedUpkeeps:', + numFailingUpkeeps, + '): ', + numPassingConditionalUpkeeps > 0 + ? 'charged conditional overhead' + : '', + numPassingConditionalUpkeeps > 0 + ? upkeepPerformedLogs[0].args.gasOverhead.toString() + : '', + numPassingLogUpkeeps > 0 ? 'charged log overhead' : '', + numPassingLogUpkeeps > 0 + ? upkeepPerformedLogs[ + numPassingConditionalUpkeeps + ].args.gasOverhead.toString() + : '', + ' margin over gasUsed', + netGasUsedPlusChargedOverhead.sub(receipt.gasUsed).toString(), + ) + + // The total gas charged should be greater than tx gas + assert.isTrue( + netGasUsedPlusChargedOverhead.gt(receipt.gasUsed), + 'Charged gas overhead is too low for batch upkeeps, increase ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD', + ) + }, + ) + } + } + } + + it('has enough perform gas overhead for large batches [ @skip-coverage ]', async () => { + const numUpkeeps = 20 + const upkeepIds: BigNumber[] = [] + let totalPerformGas = BigNumber.from('0') + for (let i = 0; i < numUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + upkeepIds.push(testUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(owner).addFunds(testUpkeepId, toWei('10')) + + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + + totalPerformGas = totalPerformGas.add(performGas) + } + + // Should revert with no overhead added + await evmRevert( + getTransmitTx(registry, keeper1, upkeepIds, { + gasLimit: totalPerformGas, + }), + ) + // Should not revert with overhead added + await getTransmitTx(registry, keeper1, upkeepIds, { + gasLimit: totalPerformGas.add(transmitGasOverhead), + }) + }) + + it('splits l2 payment among performed upkeeps according to perform data weight', async () => { + const numUpkeeps = 7 + const upkeepIds: BigNumber[] = [] + const performDataSizes = [0, 10, 1000, 50, 33, 69, 420] + const performDatas: string[] = [] + const upkeepCalldataWeights: BigNumber[] = [] + let totalCalldataWeight = BigNumber.from('0') + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + for (let i = 0; i < numUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + const tx = await arbRegistry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + upkeepIds.push(testUpkeepId) + + // Add funds to passing upkeeps + await arbRegistry.connect(owner).addFunds(testUpkeepId, toWei('100')) + + // Generate performData + let pd = '0x' + for (let j = 0; j < performDataSizes[i]; j++) { + pd += '11' + } + performDatas.push(pd) + const w = BigNumber.from(performDataSizes[i]) + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(f + 1), + ), + ) + upkeepCalldataWeights.push(w) + totalCalldataWeight = totalCalldataWeight.add(w) + } + + // Do the thing + const tx = await getTransmitTx(arbRegistry, keeper1, upkeepIds, { + gasPrice: gasWei.mul('5'), // High gas price so that it gets capped + performDatas, + }) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, numUpkeeps) + + for (let i = 0; i < numUpkeeps; i++) { + const upkeepPerformedLog = upkeepPerformedLogs[i] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb.mul(upkeepCalldataWeights[i]).div(totalCalldataWeight), + ).total.toString(), + totalPayment.toString(), + ) + } + }) + }) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('100')) + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, emptyBytes) + + const id1 = await getUpkeepID(tx) + await registry.connect(admin).addFunds(id1, toWei('5')) + + await getTransmitTx(registry, keeper1, [id1]) + await getTransmitTx(registry, keeper2, [id1]) + await getTransmitTx(registry, keeper3, [id1]) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, emptyBytes) + const id2 = await getUpkeepID(tx2) + await registry.connect(admin).addFunds(id2, toWei('5')) + + await getTransmitTx(registry, keeper1, [id2]) + await getTransmitTx(registry, keeper2, [id2]) + await getTransmitTx(registry, keeper3, [id2]) + + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id2]) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry + .connect(admin) + .withdrawFunds(id1, await nonkeeper.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).recoverFunds() + + const balanceAfter = await linkToken.balanceOf(registry.address) + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + assert.isTrue(ownerAfter.eq(ownerBefore.add(sent))) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep / #transmit', () => { + it('calculates the minimum balance appropriately', async () => { + await mock.setCanCheck(true) + + const oneWei = BigNumber.from(1) + const minBalance = await registry.getMinBalanceForUpkeep(upkeepId) + const tooLow = minBalance.sub(oneWei) + + await registry.connect(admin).addFunds(upkeepId, tooLow) + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + await registry.connect(admin).addFunds(upkeepId, oneWei) + checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }) + + it('uses maxPerformData size in checkUpkeep but actual performDataSize in transmit', async () => { + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const upkeepID = await getUpkeepID(tx) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + + // upkeep is underfunded by 1 wei + const minBalance1 = (await registry.getMinBalanceForUpkeep(upkeepID)).sub( + 1, + ) + await registry.connect(owner).addFunds(upkeepID, minBalance1) + + // upkeep check should return false, 2 should return true + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepID) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + // however upkeep should perform and pay all the remaining balance + let maxPerformData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + maxPerformData += '11' + } + + const tx2 = await getTransmitTx(registry, keeper1, [upkeepID], { + gasPrice: gasWei.mul(gasCeilingMultiplier), + performDatas: [maxPerformData], + }) + + const receipt = await tx2.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal(upkeepPerformedLogs.length, 1) + }) + }) + + describe('#withdrawFunds', () => { + let upkeepId2: BigNumber + + beforeEach(async () => { + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + upkeepId2 = await getUpkeepID(tx) + + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).addFunds(upkeepId2, toWei('100')) + + // Do a perform so that upkeep is charged some amount + await getTransmitTx(registry, keeper1, [upkeepId]) + await getTransmitTx(registry, keeper1, [upkeepId2]) + }) + + it('reverts if called on a non existing ID', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId.add(1), await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'UpkeepNotCanceled()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(upkeepId, zeroAddress), + 'InvalidRecipient()', + ) + }) + + describe('after the registration is paused, then cancelled', () => { + it('allows the admin to withdraw', async () => { + const balance = await registry.getBalance(upkeepId) + const payee = await payee1.getAddress() + await registry.connect(admin).pauseUpkeep(upkeepId) + await registry.connect(owner).cancelUpkeep(upkeepId) + await expect(() => + registry.connect(admin).withdrawFunds(upkeepId, payee), + ).to.changeTokenBalance(linkToken, payee1, balance) + }) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await registry.connect(owner).cancelUpkeep(upkeepId2) + }) + + it('can be called successively on two upkeeps', async () => { + await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await registry + .connect(admin) + .withdrawFunds(upkeepId2, await payee1.getAddress()) + }) + + it('moves the funds out and updates the balance and emits an event', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(upkeepId) + const previousBalance = registration.balance + + const tx = await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await expect(tx) + .to.emit(registry, 'FundsWithdrawn') + .withArgs(upkeepId, previousBalance, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(previousBalance).eq(payee1After)) + assert.isTrue(registryBefore.sub(previousBalance).eq(registryAfter)) + + registration = await registry.getUpkeep(upkeepId) + assert.equal(0, registration.balance.toNumber()) + }) + }) + }) + + describe('#simulatePerformUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'OnlySimulatedBackend()', + ) + }) + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'RegistryPaused()', + ) + }) + + it('returns false and gasUsed when perform fails', async () => { + await mock.setCanPerform(false) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, false) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns true, gasUsed, and performGas when perform succeeds', async () => { + await mock.setCanPerform(true) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns correct amount of gasUsed when perform succeeds', async () => { + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + // Full execute gas should be used, with some performGasBuffer(1000) + assert.isTrue( + simulatePerformResult.gasUsed.gt( + performGas.sub(BigNumber.from('1000')), + ), + ) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic['checkUpkeep(uint256)'](upkeepId), + 'OnlySimulatedBackend()', + ) + }) + + it('returns false and error code if the upkeep is cancelled by admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is cancelled by owner', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the registry is paused', async () => { + await registry.connect(owner).pause() + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.REGISTRY_PAUSED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_PAUSED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if user is out of funds', async () => { + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('200')) + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).addFunds(logUpkeepId, toWei('100')) + }) + + it('returns false, error code, and revert data if the target check reverts', async () => { + await mock.setShouldRevertCheck(true) + await mock.setCheckRevertReason( + 'custom revert error, clever way to insert offchain data', + ) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + + const revertReasonBytes = `0x${checkUpkeepResult.performData.slice(10)}` // remove sighash + assert.equal( + ethers.utils.defaultAbiCoder.decode(['string'], revertReasonBytes)[0], + 'custom revert error, clever way to insert offchain data', + ) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.TARGET_CHECK_REVERTED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + // Feed data should be returned here + assert.isTrue(checkUpkeepResult.fastGasWei.gt(BigNumber.from('0'))) + assert.isTrue(checkUpkeepResult.linkNative.gt(BigNumber.from('0'))) + }) + + it('returns false, error code, and no revert data if the target check revert data exceeds maxRevertDataSize', async () => { + await mock.setShouldRevertCheck(true) + let longRevertReason = '' + for (let i = 0; i <= maxRevertDataSize.toNumber(); i++) { + longRevertReason += 'x' + } + await mock.setCheckRevertReason(longRevertReason) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is not needed', async () => { + await mock.setCanCheck(false) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the performData exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 5000; i++) { + longBytes += '1' + } + await mock.setCanCheck(true) + await mock.setPerformData(longBytes) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns true with gas used if the target can execute', async () => { + await mock.setCanCheck(true) + await mock.setPerformData(randomBytes) + + const latestBlock = await ethers.provider.getBlock('latest') + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId, { + blockTag: latestBlock.number, + }) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + assert.equal(checkUpkeepResult.performData, randomBytes) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.NONE, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + assert.isTrue(checkUpkeepResult.fastGasWei.eq(gasWei)) + assert.isTrue(checkUpkeepResult.linkNative.eq(linkEth)) + }) + + it('calls checkLog for log-trigger upkeeps', async () => { + const log: Log = { + index: 0, + timestamp: 0, + txHash: ethers.utils.randomBytes(32), + blockNumber: 100, + blockHash: ethers.utils.randomBytes(32), + source: randomAddress(), + topics: [ethers.utils.randomBytes(32), ethers.utils.randomBytes(32)], + data: ethers.utils.randomBytes(1000), + } + + await ltUpkeep.mock.checkLog.withArgs(log, '0x').returns(true, '0x1234') + + const checkData = encodeLog(log) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256,bytes)'](logUpkeepId, checkData) + + expect(checkUpkeepResult.upkeepNeeded).to.be.true + expect(checkUpkeepResult.performData).to.equal('0x1234') + }) + + itMaybe( + 'has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', + async () => { + await mock.setCanCheck(true) + await mock.setCheckGasToBurn(checkGasLimit) + const gas = checkGasLimit.add(checkGasOverhead) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId, { + gasLimit: gas, + }) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }, + ) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId.add(1), amount), + 'UpkeepCancelled()', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(admin).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('lets anyone add funds to an upkeep not just admin', async () => { + await linkToken.connect(owner).transfer(await payee1.getAddress(), amount) + await linkToken.connect(payee1).approve(registry.address, amount) + + await registry.connect(payee1).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(admin).addFunds(upkeepId, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(upkeepId, await admin.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + }) + + describe('#getActiveUpkeepIDs', () => { + it('reverts if startIndex is out of bounds ', async () => { + await evmRevert( + registry.getActiveUpkeepIDs(numUpkeeps, 0), + 'IndexOutOfRange()', + ) + await evmRevert( + registry.getActiveUpkeepIDs(numUpkeeps + 1, 0), + 'IndexOutOfRange()', + ) + }) + + it('returns upkeep IDs bounded by maxCount', async () => { + let upkeepIds = await registry.getActiveUpkeepIDs(0, 1) + assert(upkeepIds.length == 1) + assert(upkeepIds[0].eq(upkeepId)) + upkeepIds = await registry.getActiveUpkeepIDs(1, 3) + assert(upkeepIds.length == 3) + expect(upkeepIds).to.deep.equal([ + afUpkeepId, + logUpkeepId, + streamsLookupUpkeepId, + ]) + }) + + it('returns as many ids as possible if maxCount > num available', async () => { + const upkeepIds = await registry.getActiveUpkeepIDs(1, numUpkeeps + 100) + assert(upkeepIds.length == numUpkeeps - 1) + }) + + it('returns all upkeep IDs if maxCount is 0', async () => { + let upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert(upkeepIds.length == numUpkeeps) + upkeepIds = await registry.getActiveUpkeepIDs(2, 0) + assert(upkeepIds.length == numUpkeeps - 2) + }) + }) + + describe('#getMaxPaymentForGas', () => { + let maxl1CostWeiArbWithoutMultiplier: BigNumber + let maxl1CostWeiOptWithoutMultiplier: BigNumber + + beforeEach(async () => { + const arbL1PriceinWei = BigNumber.from(1000) // Same as MockArbGasInfo.sol + maxl1CostWeiArbWithoutMultiplier = arbL1PriceinWei + .mul(16) + .mul( + maxPerformDataSize + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(f + 1), + ), + ), + ) + maxl1CostWeiOptWithoutMultiplier = BigNumber.from(2000000) // Same as MockOVMGasPriceOracle.sol + }) + + itMaybe('calculates the max fee appropriately', async () => { + await verifyMaxPayment(registry, chainModuleBase) + }) + + itMaybe('calculates the max fee appropriately for Arbitrum', async () => { + await verifyMaxPayment( + arbRegistry, + arbitrumModule, + maxl1CostWeiArbWithoutMultiplier, + ) + }) + + itMaybe('calculates the max fee appropriately for Optimism', async () => { + await verifyMaxPayment( + opRegistry, + optimismModule, + maxl1CostWeiOptWithoutMultiplier, + ) + }) + + it('uses the fallback gas price if the feed has issues', async () => { + const chainModuleOverheads = await chainModuleBase.getGasOverhead() + const expectedFallbackMaxPayment = linkForGas( + performGas, + registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add( + maxPerformDataSize + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(f + 1), + ), + ) + .mul( + registryPerPerformByteGasOverhead.add( + chainModuleOverheads.chainModulePerByteOverhead, + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead), + gasCeilingMultiplier.mul('2'), // fallbackGasPrice is 2x gas price + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = now() + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = now() + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + }) + + it('uses the fallback link price if the feed has issues', async () => { + const chainModuleOverheads = await chainModuleBase.getGasOverhead() + const expectedFallbackMaxPayment = linkForGas( + performGas, + registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add( + maxPerformDataSize + .add(registryTransmitCalldataFixedBytesOverhead) + .add( + registryTransmitCalldataPerSignerBytesOverhead.mul( + BigNumber.from(f + 1), + ), + ) + .mul( + registryPerPerformByteGasOverhead.add( + chainModuleOverheads.chainModulePerByteOverhead, + ), + ), + ) + .add(chainModuleOverheads.chainModuleFixedOverhead), + gasCeilingMultiplier.mul('2'), // fallbackLinkPrice is 1/2 pli price, so multiply by 2 + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = now() + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = now() + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + }) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'AutomationRegistry 2.2.0') + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the PLI token', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'OnlyCallableByPLIToken()', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + const before = (await registry.getUpkeep(upkeepId)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(upkeepId)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describeMaybe('#setConfig - onchain', async () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const maxGas = BigNumber.from(6) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const newMinUpkeepSpend = BigNumber.from(9) + const newMaxCheckDataSize = BigNumber.from(10000) + const newMaxPerformDataSize = BigNumber.from(10000) + const newMaxRevertDataSize = BigNumber.from(10000) + const newMaxPerformGas = BigNumber.from(10000000) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + const newTranscoder = randomAddress() + const newRegistrars = [randomAddress(), randomAddress()] + const upkeepManager = randomAddress() + + const newConfig: OnChainConfig = { + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxRevertDataSize: newMaxRevertDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: newTranscoder, + registrars: newRegistrars, + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + } + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('reverts if signers or transmitters are the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + [randomAddress(), randomAddress(), randomAddress(), zeroAddress], + [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), + ], + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'InvalidSigner()', + ) + + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), + ], + [randomAddress(), randomAddress(), randomAddress(), zeroAddress], + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'InvalidTransmitter()', + ) + }) + + it('updates the onchainConfig and configDigest', async () => { + const old = await registry.getState() + const oldConfig = old.config + const oldState = old.state + assert.isTrue(paymentPremiumPPB.eq(oldConfig.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(oldConfig.flatFeeMicroLink)) + assert.isTrue(stalenessSeconds.eq(oldConfig.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(oldConfig.gasCeilingMultiplier)) + + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + + const updated = await registry.getState() + const updatedConfig = updated.config + const updatedState = updated.state + assert.equal(updatedConfig.paymentPremiumPPB, payment.toNumber()) + assert.equal(updatedConfig.flatFeeMicroLink, flatFee.toNumber()) + assert.equal(updatedConfig.stalenessSeconds, staleness.toNumber()) + assert.equal(updatedConfig.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal( + updatedConfig.minUpkeepSpend.toString(), + newMinUpkeepSpend.toString(), + ) + assert.equal( + updatedConfig.maxCheckDataSize, + newMaxCheckDataSize.toNumber(), + ) + assert.equal( + updatedConfig.maxPerformDataSize, + newMaxPerformDataSize.toNumber(), + ) + assert.equal( + updatedConfig.maxRevertDataSize, + newMaxRevertDataSize.toNumber(), + ) + assert.equal(updatedConfig.maxPerformGas, newMaxPerformGas.toNumber()) + assert.equal(updatedConfig.checkGasLimit, maxGas.toNumber()) + assert.equal( + updatedConfig.fallbackGasPrice.toNumber(), + fbGasEth.toNumber(), + ) + assert.equal( + updatedConfig.fallbackLinkPrice.toNumber(), + fbLinkEth.toNumber(), + ) + assert.equal(updatedState.latestEpoch, 0) + + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + + assert.equal(updatedConfig.transcoder, newTranscoder) + assert.deepEqual(updatedConfig.registrars, newRegistrars) + assert.equal(updatedConfig.upkeepPrivilegeManager, upkeepManager) + }) + + it('maintains paused state when config is changed', async () => { + await registry.pause() + const old = await registry.getState() + assert.isTrue(old.state.paused) + + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + + const updated = await registry.getState() + assert.isTrue(updated.state.paused) + }) + + it('emits an event', async () => { + const tx = await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + await expect(tx).to.emit(registry, 'ConfigSet') + }) + }) + + describe('#setConfig - offchain', () => { + let newKeepers: string[] + + beforeEach(async () => { + newKeepers = [ + await personas.Eddy.getAddress(), + await personas.Nick.getAddress(), + await personas.Neil.getAddress(), + await personas.Carol.getAddress(), + ] + }) + + it('reverts when called by anyone but the owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('reverts if too many keeperAddresses set', async () => { + for (let i = 0; i < 40; i++) { + newKeepers.push(randomAddress()) + } + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'TooManyOracles()', + ) + }) + + it('reverts if f=0', async () => { + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + 0, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfFaultyOracles()', + ) + }) + + it('reverts if signers != transmitters length', async () => { + const signers = [randomAddress()] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + signers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts if signers <= 3f', async () => { + newKeepers.pop() + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts on repeated signers', async () => { + const newSigners = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newSigners, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'RepeatedSigner()', + ) + }) + + it('reverts on repeated transmitters', async () => { + const newTransmitters = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newTransmitters, + f, + config, + offchainVersion, + offchainBytes, + ), + 'RepeatedTransmitter()', + ) + }) + + itMaybe('stores new config and emits event', async () => { + // Perform an upkeep so that totalPremium is updated + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + let tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + + const newOffChainVersion = BigNumber.from('2') + const newOffChainConfig = '0x1122' + + const old = await registry.getState() + const oldState = old.state + assert(oldState.totalPremium.gt(BigNumber.from('0'))) + + const newSigners = newKeepers + tx = await registry + .connect(owner) + .setConfigTypeSafe( + newSigners, + newKeepers, + f, + config, + newOffChainVersion, + newOffChainConfig, + ) + + const updated = await registry.getState() + const updatedState = updated.state + assert(oldState.totalPremium.eq(updatedState.totalPremium)) + + // Old signer addresses which are not in new signers should be non active + for (let i = 0; i < signerAddresses.length; i++) { + const signer = signerAddresses[i] + if (!newSigners.includes(signer)) { + assert(!(await registry.getSignerInfo(signer)).active) + assert((await registry.getSignerInfo(signer)).index == 0) + } + } + // New signer addresses should be active + for (let i = 0; i < newSigners.length; i++) { + const signer = newSigners[i] + assert((await registry.getSignerInfo(signer)).active) + assert((await registry.getSignerInfo(signer)).index == i) + } + // Old transmitter addresses which are not in new transmitter should be non active, update lastCollected but retain other info + for (let i = 0; i < keeperAddresses.length; i++) { + const transmitter = keeperAddresses[i] + if (!newKeepers.includes(transmitter)) { + assert(!(await registry.getTransmitterInfo(transmitter)).active) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + (await registry.getTransmitterInfo(transmitter)).lastCollected.eq( + oldState.totalPremium.sub( + oldState.totalPremium.mod(keeperAddresses.length), + ), + ), + ) + } + } + // New transmitter addresses should be active + for (let i = 0; i < newKeepers.length; i++) { + const transmitter = newKeepers[i] + assert((await registry.getTransmitterInfo(transmitter)).active) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + (await registry.getTransmitterInfo(transmitter)).lastCollected.eq( + oldState.totalPremium, + ), + ) + } + + // config digest should be updated + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + + //New config should be updated + assert.deepEqual(updated.signers, newKeepers) + assert.deepEqual(updated.transmitters, newKeepers) + + // Event should have been emitted + await expect(tx).to.emit(registry, 'ConfigSet') + }) + }) + + describe('#setPeerRegistryMigrationPermission() / #getPeerRegistryMigrationPermission()', () => { + const peer = randomAddress() + it('allows the owner to set the peer registries', async () => { + let permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + await registry.setPeerRegistryMigrationPermission(peer, 1) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(1) + await registry.setPeerRegistryMigrationPermission(peer, 2) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(2) + await registry.setPeerRegistryMigrationPermission(peer, 0) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + }) + it('reverts if passed an unsupported permission', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 10), + ).to.be.reverted + }) + it('reverts if not called by the owner', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 1), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('#registerUpkeep', () => { + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'RegistryPaused()', + ) + }) + + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](zeroAddress, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'NotAContract()', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'OnlyCallableByOwnerOrRegistrar()', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, 2299, await admin.getAddress(), emptyBytes, '0x'), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, 5000001, await admin.getAddress(), emptyBytes, '0x'), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if checkData is too long', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), longBytes, '0x'), + 'CheckDataExceedsLimit()', + ) + }) + + it('creates a record of the registration', async () => { + const performGases = [100000, 500000] + const checkDatas = [emptyBytes, '0x12'] + + for (let jdx = 0; jdx < performGases.length; jdx++) { + const performGas = performGases[jdx] + for (let kdx = 0; kdx < checkDatas.length; kdx++) { + const checkData = checkDatas[kdx] + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), checkData, '0x') + + //confirm the upkeep details and verify emitted events + const testUpkeepId = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(testUpkeepId, performGas, await admin.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataSet') + .withArgs(testUpkeepId, checkData) + await expect(tx) + .to.emit(registry, 'UpkeepTriggerConfigSet') + .withArgs(testUpkeepId, '0x') + + const registration = await registry.getUpkeep(testUpkeepId) + + assert.equal(mock.address, registration.target) + assert.notEqual( + ethers.constants.AddressZero, + await registry.getForwarder(testUpkeepId), + ) + assert.equal( + performGas.toString(), + registration.performGas.toString(), + ) + assert.equal(await admin.getAddress(), registration.admin) + assert.equal(0, registration.balance.toNumber()) + assert.equal(0, registration.amountSpent.toNumber()) + assert.equal(0, registration.lastPerformedBlockNumber) + assert.equal(checkData, registration.checkData) + assert.equal(registration.paused, false) + assert.equal(registration.offchainConfig, '0x') + assert(registration.maxValidBlocknumber.eq('0xffffffff')) + } + } + }) + }) + + describe('#pauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is already paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('pauses the upkeep and emits an event', async () => { + const tx = await registry.connect(admin).pauseUpkeep(upkeepId) + await expect(tx).to.emit(registry, 'UpkeepPaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, true) + }) + }) + + describe('#unpauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('reverts if the upkeep is not paused', async () => { + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'OnlyPausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + + assert.equal(registration.paused, true) + + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('unpauses the upkeep and emits an event', async () => { + const originalCount = (await registry.getActiveUpkeepIDs(0, 0)).length + + await registry.connect(admin).pauseUpkeep(upkeepId) + + const tx = await registry.connect(admin).unpauseUpkeep(upkeepId) + + await expect(tx).to.emit(registry, 'UpkeepUnpaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, false) + + const upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert.equal(upkeepIds.length, originalCount) + }) + }) + + describe('#setUpkeepCheckData', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(keeper1) + .setUpkeepCheckData(upkeepId.add(1), randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the caller is not upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).setUpkeepCheckData(upkeepId, randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).setUpkeepCheckData(upkeepId, randomBytes), + 'UpkeepCancelled()', + ) + }) + + it('is allowed to update on paused upkeep', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + await registry.connect(admin).setUpkeepCheckData(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + + it('reverts if new data exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + + await evmRevert( + registry.connect(admin).setUpkeepCheckData(upkeepId, longBytes), + 'CheckDataExceedsLimit()', + ) + }) + + it('updates the upkeep check data and emits an event', async () => { + const tx = await registry + .connect(admin) + .setUpkeepCheckData(upkeepId, randomBytes) + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataSet') + .withArgs(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + }) + + describe('#setUpkeepGasLimit', () => { + const newGasLimit = BigNumber.from('300000') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId.add(1), newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepGasLimit(upkeepId, newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if new gas limit is out of bounds', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('100')), + 'GasLimitOutsideRange()', + ) + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('6000000')), + 'GasLimitOutsideRange()', + ) + }) + + it('updates the gas limit successfully', async () => { + const initialGasLimit = (await registry.getUpkeep(upkeepId)).performGas + assert.equal(initialGasLimit, performGas.toNumber()) + await registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit) + const updatedGasLimit = (await registry.getUpkeep(upkeepId)).performGas + assert.equal(updatedGasLimit, newGasLimit.toNumber()) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, newGasLimit) + await expect(tx) + .to.emit(registry, 'UpkeepGasLimitSet') + .withArgs(upkeepId, newGasLimit) + }) + }) + + describe('#setUpkeepOffchainConfig', () => { + const newConfig = '0xc0ffeec0ffee' + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId.add(1), newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepOffchainConfig(upkeepId, newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('updates the config successfully', async () => { + const initialConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(initialConfig, '0x') + await registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig) + const updatedConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(newConfig, updatedConfig) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId, newConfig) + await expect(tx) + .to.emit(registry, 'UpkeepOffchainConfigSet') + .withArgs(upkeepId, newConfig) + }) + }) + + describe('#setUpkeepTriggerConfig', () => { + const newConfig = '0xdeadbeef' + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepTriggerConfig(upkeepId.add(1), newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepTriggerConfig(upkeepId, newConfig), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepTriggerConfig(upkeepId, newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepTriggerConfig(upkeepId, newConfig) + await expect(tx) + .to.emit(registry, 'UpkeepTriggerConfigSet') + .withArgs(upkeepId, newConfig) + }) + }) + + describe('#transferUpkeepAdmin', () => { + it('reverts when called by anyone but the current upkeep admin', async () => { + await evmRevert( + registry + .connect(payee1) + .transferUpkeepAdmin(upkeepId, await payee2.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await admin.getAddress()), + 'ValueNotChanged()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await keeper1.getAddress()), + 'UpkeepCancelled()', + ) + }) + + it('allows cancelling transfer by reverting to zero address', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, ethers.constants.AddressZero) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs( + upkeepId, + await admin.getAddress(), + ethers.constants.AddressZero, + ) + }) + + it('does not change the upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await admin.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + + it('does not emit an event when called with the same proposed upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptUpkeepAdmin', () => { + beforeEach(async () => { + // Start admin transfer to payee1 + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + }) + + it('reverts when not called by the proposed upkeep admin', async () => { + await evmRevert( + registry.connect(payee2).acceptUpkeepAdmin(upkeepId), + 'OnlyCallableByProposedAdmin()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('does change the admin', async () => { + await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await payee1.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferred') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + }) + + describe('#withdrawOwnerFunds', () => { + it('can only be called by owner', async () => { + await evmRevert( + registry.connect(keeper1).withdrawOwnerFunds(), + 'Only callable by owner', + ) + }) + + itMaybe('withdraws the collected fees to owner', async () => { + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + // Very high min spend, whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + }, + offchainVersion, + offchainBytes, + ) + const upkeepBalance = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).cancelUpkeep(upkeepId) + + // Transfered to owner balance on registry + let ownerRegistryBalance = (await registry.getState()).state + .ownerLinkBalance + assert.isTrue(ownerRegistryBalance.eq(upkeepBalance)) + + // Now withdraw + await registry.connect(owner).withdrawOwnerFunds() + + ownerRegistryBalance = (await registry.getState()).state.ownerLinkBalance + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + // Owner registry balance should be changed to 0 + assert.isTrue(ownerRegistryBalance.eq(BigNumber.from('0'))) + + // Owner should be credited with the balance + assert.isTrue(ownerBefore.add(upkeepBalance).eq(ownerAfter)) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'ValueNotChanged()', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'OnlyCallableByProposedPayee()', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('Does not allow transmits when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId]), + 'RegistryPaused()', + ) + }) + + it('Does not allow creation of new upkeeps when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'RegistryPaused()', + ) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue((await registry.getState()).state.paused) + + await registry.connect(owner).unpause() + + assert.isFalse((await registry.getState()).state.paused) + }) + }) + + describe('#migrateUpkeeps() / #receiveUpkeeps()', async () => { + context('when permissions are set', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 1) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 2) + }) + + it('migrates an upkeep', async () => { + const offchainBytes = '0x987654abcd' + await registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId, offchainBytes) + const reg1Upkeep = await registry.getUpkeep(upkeepId) + const forwarderAddress = await registry.getForwarder(upkeepId) + expect(reg1Upkeep.balance).to.equal(toWei('100')) + expect(reg1Upkeep.checkData).to.equal(randomBytes) + expect(forwarderAddress).to.not.equal(ethers.constants.AddressZero) + expect(reg1Upkeep.offchainConfig).to.equal(offchainBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + const forwarder = IAutomationForwarderFactory.connect( + forwarderAddress, + owner, + ) + expect(await forwarder.getRegistry()).to.equal(registry.address) + // Set an upkeep admin transfer in progress too + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps - 1, + ) + expect((await mgRegistry.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await mgRegistry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect( + (await mgRegistry.getState()).state.expectedLinkBalance, + ).to.equal(toWei('100')) + expect((await mgRegistry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await mgRegistry.getUpkeep(upkeepId)).offchainConfig).to.equal( + offchainBytes, + ) + expect(await mgRegistry.getForwarder(upkeepId)).to.equal( + forwarderAddress, + ) + // test that registry is updated on forwarder + expect(await forwarder.getRegistry()).to.equal(mgRegistry.address) + // migration will delete the upkeep and nullify admin transfer + await expect( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('UpkeepCancelled()') + await expect( + mgRegistry.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('OnlyCallableByProposedAdmin()') + }) + + it('migrates a paused upkeep', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + await registry.connect(admin).pauseUpkeep(upkeepId) + // verify the upkeep is paused + expect((await registry.getUpkeep(upkeepId)).paused).to.equal(true) + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps - 1, + ) + expect((await mgRegistry.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await mgRegistry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await mgRegistry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect( + (await mgRegistry.getState()).state.expectedLinkBalance, + ).to.equal(toWei('100')) + // verify the upkeep is still paused after migration + expect((await mgRegistry.getUpkeep(upkeepId)).paused).to.equal(true) + }) + + it('emits an event on both contracts', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + const tx = registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + await expect(tx) + .to.emit(registry, 'UpkeepMigrated') + .withArgs(upkeepId, toWei('100'), mgRegistry.address) + await expect(tx) + .to.emit(mgRegistry, 'UpkeepReceived') + .withArgs(upkeepId, toWei('100'), registry.address) + }) + + it('is only migratable by the admin', async () => { + await expect( + registry + .connect(owner) + .migrateUpkeeps([upkeepId], mgRegistry.address), + ).to.be.revertedWith('OnlyCallableByAdmin()') + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + }) + }) + + context('when permissions are not set', () => { + it('reverts', async () => { + // no permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 0) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // only outgoing permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 1) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // only incoming permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 0) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 2) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // permissions opposite direction + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 2) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 1) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + }) + }) + }) + + describe('#setPayees', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setPayees(payees), + 'Only callable by owner', + ) + }) + + it('reverts with different numbers of payees than transmitters', async () => { + await evmRevert( + registry.connect(owner).setPayees([...payees, randomAddress()]), + 'ParameterLengthError()', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await blankRegistry.connect(owner).setConfig(...baseConfig) // used to test initial config + + await evmRevert( + blankRegistry // used to test initial config + .connect(owner) + .setPayees([ethers.constants.AddressZero, ...payees.slice(1)]), + 'InvalidPayee()', + ) + }) + + itMaybe( + 'sets the payees when exisitng payees are zero address', + async () => { + //Initial payees should be zero address + await blankRegistry.connect(owner).setConfig(...baseConfig) // used to test initial config + + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = ( + await blankRegistry.getTransmitterInfo(keeperAddresses[i]) + ).payee // used to test initial config + assert.equal(payee, zeroAddress) + } + + await blankRegistry.connect(owner).setPayees(payees) // used to test initial config + + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = ( + await blankRegistry.getTransmitterInfo(keeperAddresses[i]) + ).payee + assert.equal(payee, payees[i]) + } + }, + ) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + const signers = Array.from({ length: 5 }, randomAddress) + const keepers = Array.from({ length: 5 }, randomAddress) + const payees = Array.from({ length: 5 }, randomAddress) + const newTransmitter = randomAddress() + const newPayee = randomAddress() + const ignoreAddresses = new Array(payees.length).fill(IGNORE_ADDRESS) + const newPayees = [...ignoreAddresses, newPayee] + // arbitrum registry + // configure registry with 5 keepers // optimism registry + await blankRegistry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + signers, + keepers, + f, + config, + offchainVersion, + offchainBytes, + ) + // arbitrum registry + // set initial payees // optimism registry + await blankRegistry.connect(owner).setPayees(payees) // used to test initial configurations + // arbitrum registry + // add another keeper // optimism registry + await blankRegistry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + [...signers, randomAddress()], + [...keepers, newTransmitter], + f, + config, + offchainVersion, + offchainBytes, + ) + // arbitrum registry + // update payee list // optimism registry // arbitrum registry + await blankRegistry.connect(owner).setPayees(newPayees) // used to test initial configurations // optimism registry + const ignored = await blankRegistry.getTransmitterInfo(newTransmitter) // used to test initial configurations + assert.equal(newPayee, ignored.payee) + assert.equal(true, ignored.active) + }) + + it('reverts if payee is non zero and owner tries to change payee', async () => { + const newPayees = [randomAddress(), ...payees.slice(1)] + + await evmRevert( + registry.connect(owner).setPayees(newPayees), + 'InvalidPayee()', + ) + }) + + it('emits events for every payee added and removed', async () => { + const tx = await registry.connect(owner).setPayees(payees) + await expect(tx) + .to.emit(registry, 'PayeesUpdated') + .withArgs(keeperAddresses, payees) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId.add(1)), + 'CannotCancel()', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(upkeepId), + 'OnlyCallableByOwnerOrAdmin()', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(upkeepId, BigNumber.from(receipt.blockNumber)) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + const registration = await registry.getUpkeep(upkeepId) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs( + upkeepId, + BigNumber.from(receipt.blockNumber + cancellationDelay), + ) + }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).cancelUpkeep(upkeepId) + + await getTransmitTx(registry, keeper1, [upkeepId]) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + describeMaybe('when an upkeep has been performed', async () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId]) + }) + + it('deducts a cancellation fee from the upkeep and gives to owner', async () => { + const minUpkeepSpend = toWei('10') + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + }, + offchainVersion, + offchainBytes, + ) + + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + const amountSpent = toWei('100').sub(upkeepBefore) + const cancellationFee = minUpkeepSpend.sub(amountSpent) + + await registry.connect(admin).cancelUpkeep(upkeepId) + + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // post upkeep balance should be previous balance minus cancellation fee + assert.isTrue(upkeepBefore.sub(cancellationFee).eq(upkeepAfter)) + // payee balance should not change + assert.isTrue(payee1Before.eq(payee1After)) + // owner should receive the cancellation fee + assert.isTrue(ownerAfter.sub(ownerBefore).eq(cancellationFee)) + }) + + it('deducts up to balance as cancellation fee', async () => { + // Very high min spend, should deduct whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + }, + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // all upkeep balance is deducted for cancellation fee + assert.equal(0, upkeepAfter.toNumber()) + // payee balance should not change + assert.isTrue(payee1After.eq(payee1Before)) + // all upkeep balance is transferred to the owner + assert.isTrue(ownerAfter.sub(ownerBefore).eq(upkeepBefore)) + }) + + it('does not deduct cancellation fee if more than minUpkeepSpend is spent', async () => { + // Very low min spend, already spent in one perform upkeep + const minUpkeepSpend = BigNumber.from(420) + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + chainModule: chainModuleBase.address, + reorgProtectionEnabled: true, + }, + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // upkeep does not pay cancellation fee after cancellation because minimum upkeep spent is met + assert.isTrue(upkeepBefore.eq(upkeepAfter)) + // owner balance does not change + assert.isTrue(ownerAfter.eq(ownerBefore)) + // payee balance does not change + assert.isTrue(payee1Before.eq(payee1After)) + }) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId]) + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'InvalidRecipient()', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = (await registry.getUpkeep(upkeepId)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + // Withdrawing for first time, last collected = 0 + assert.equal(keeperBefore.lastCollected.toString(), '0') + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = (await registry.getUpkeep(upkeepId)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // registry total premium should not change + assert.isTrue(registryPremiumBefore.eq(registryPremiumAfter)) + + // Last collected should be updated to premium-change + assert.isTrue( + keeperAfter.lastCollected.eq( + registryPremiumBefore.sub( + registryPremiumBefore.mod(keeperAddresses.length), + ), + ), + ) + + // owner balance should remain unchanged + assert.isTrue(ownerAfter.eq(ownerBefore)) + + assert.isTrue(keeperAfter.balance.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore.balance).eq(toLinkAfter)) + assert.isTrue( + registryLinkBefore.sub(keeperBefore.balance).eq(registryLinkAfter), + ) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) + + describe('#checkCallback', () => { + it('returns false with appropriate failure reason when target callback reverts', async () => { + await streamsLookupUpkeep.setShouldRevertCallback(true) + + const values: any[] = ['0x1234', '0xabcd'] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.CHECK_CALLBACK_REVERTED, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false with appropriate failure reason when target callback returns big performData', async () => { + let longBytes = '0x' + for (let i = 0; i <= maxPerformDataSize.toNumber(); i++) { + longBytes += '11' + } + const values: any[] = [longBytes, longBytes] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false with appropriate failure reason when target callback returns false', async () => { + await streamsLookupUpkeep.setCallbackReturnBool(false) + const values: any[] = ['0x1234', '0xabcd'] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('succeeds with upkeep needed', async () => { + const values: any[] = ['0x1234', '0xabcd'] + + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + const expectedPerformData = ethers.utils.defaultAbiCoder.encode( + ['bytes[]', 'bytes'], + [values, '0x'], + ) + + assert.isTrue(res.upkeepNeeded) + assert.equal(res.performData, expectedPerformData) + assert.equal(res.upkeepFailureReason, UpkeepFailureReason.NONE) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + }) + + describe('#setUpkeepPrivilegeConfig() / #getUpkeepPrivilegeConfig()', () => { + it('reverts when non manager tries to set privilege config', async () => { + await evmRevert( + registry.connect(payee3).setUpkeepPrivilegeConfig(upkeepId, '0x1234'), + 'OnlyCallableByUpkeepPrivilegeManager()', + ) + }) + + it('returns empty bytes for upkeep privilege config before setting', async () => { + const cfg = await registry.getUpkeepPrivilegeConfig(upkeepId) + assert.equal(cfg, '0x') + }) + + it('allows upkeep manager to set privilege config', async () => { + const tx = await registry + .connect(personas.Norbert) + .setUpkeepPrivilegeConfig(upkeepId, '0x1234') + await expect(tx) + .to.emit(registry, 'UpkeepPrivilegeConfigSet') + .withArgs(upkeepId, '0x1234') + + const cfg = await registry.getUpkeepPrivilegeConfig(upkeepId) + assert.equal(cfg, '0x1234') + }) + }) + + describe('#setAdminPrivilegeConfig() / #getAdminPrivilegeConfig()', () => { + const admin = randomAddress() + + it('reverts when non manager tries to set privilege config', async () => { + await evmRevert( + registry.connect(payee3).setAdminPrivilegeConfig(admin, '0x1234'), + 'OnlyCallableByUpkeepPrivilegeManager()', + ) + }) + + it('returns empty bytes for upkeep privilege config before setting', async () => { + const cfg = await registry.getAdminPrivilegeConfig(admin) + assert.equal(cfg, '0x') + }) + + it('allows upkeep manager to set privilege config', async () => { + const tx = await registry + .connect(personas.Norbert) + .setAdminPrivilegeConfig(admin, '0x1234') + await expect(tx) + .to.emit(registry, 'AdminPrivilegeConfigSet') + .withArgs(admin, '0x1234') + + const cfg = await registry.getAdminPrivilegeConfig(admin) + assert.equal(cfg, '0x1234') + }) + }) + + describe('transmitterPremiumSplit [ @skip-coverage ]', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + }) + + it('splits premium evenly across transmitters', async () => { + // Do a transmit from keeper1 + await getTransmitTx(registry, keeper1, [upkeepId]) + + const registryPremium = (await registry.getState()).state.totalPremium + assert.isTrue(registryPremium.gt(BigNumber.from(0))) + + const premiumPerTransmitter = registryPremium.div( + BigNumber.from(keeperAddresses.length), + ) + const k1Balance = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + // transmitter should be reimbursed for gas and get the premium + assert.isTrue(k1Balance.gt(premiumPerTransmitter)) + const k1GasReimbursement = k1Balance.sub(premiumPerTransmitter) + + const k2Balance = ( + await registry.getTransmitterInfo(await keeper2.getAddress()) + ).balance + // non transmitter should get its share of premium + assert.isTrue(k2Balance.eq(premiumPerTransmitter)) + + // Now do a transmit from keeper 2 + await getTransmitTx(registry, keeper2, [upkeepId]) + const registryPremiumNew = (await registry.getState()).state.totalPremium + assert.isTrue(registryPremiumNew.gt(registryPremium)) + const premiumPerTransmitterNew = registryPremiumNew.div( + BigNumber.from(keeperAddresses.length), + ) + const additionalPremium = premiumPerTransmitterNew.sub( + premiumPerTransmitter, + ) + + const k1BalanceNew = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + // k1 should get the new premium + assert.isTrue( + k1BalanceNew.eq(k1GasReimbursement.add(premiumPerTransmitterNew)), + ) + + const k2BalanceNew = ( + await registry.getTransmitterInfo(await keeper2.getAddress()) + ).balance + // k2 should get gas reimbursement in addition to new premium + assert.isTrue(k2BalanceNew.gt(k2Balance.add(additionalPremium))) + }) + + it('updates last collected upon payment withdrawn', async () => { + // Do a transmit from keeper1 + await getTransmitTx(registry, keeper1, [upkeepId]) + + const registryPremium = (await registry.getState()).state.totalPremium + const k1 = await registry.getTransmitterInfo(await keeper1.getAddress()) + const k2 = await registry.getTransmitterInfo(await keeper2.getAddress()) + + // Withdrawing for first time, last collected = 0 + assert.isTrue(k1.lastCollected.eq(BigNumber.from(0))) + assert.isTrue(k2.lastCollected.eq(BigNumber.from(0))) + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + const k1New = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const k2New = await registry.getTransmitterInfo( + await keeper2.getAddress(), + ) + + // transmitter info lastCollected should be updated for k1, not for k2 + assert.isTrue( + k1New.lastCollected.eq( + registryPremium.sub(registryPremium.mod(keeperAddresses.length)), + ), + ) + assert.isTrue(k2New.lastCollected.eq(BigNumber.from(0))) + }) + + itMaybe( + 'maintains consistent balance information across all parties', + async () => { + // throughout transmits, withdrawals, setConfigs total claim on balances should remain less than expected balance + // some spare change can get lost but it should be less than maxAllowedSpareChange + + let maxAllowedSpareChange = BigNumber.from('0') + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('31')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('31')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses.slice(2, 15), // only use 2-14th index keepers + keeperAddresses.slice(2, 15), + f, + config, + offchainVersion, + offchainBytes, + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper3, [upkeepId], { + startingSignerIndex: 2, + }) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('13')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee3) + .withdrawPayment( + await keeper3.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses.slice(0, 4), // only use 0-3rd index keepers + keeperAddresses.slice(0, 4), + f, + config, + offchainVersion, + offchainBytes, + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('4')) + await getTransmitTx(registry, keeper3, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('4')) + + await verifyConsistentAccounting(maxAllowedSpareChange) + await registry + .connect(payee5) + .withdrawPayment( + await keeper5.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + }, + ) + }) +}) diff --git a/contracts/test/v0.8/automation/CronUpkeep.test.ts b/contracts/test/v0.8/automation/CronUpkeep.test.ts new file mode 100644 index 00000000..649cec98 --- /dev/null +++ b/contracts/test/v0.8/automation/CronUpkeep.test.ts @@ -0,0 +1,561 @@ +import moment from 'moment' +import { ethers } from 'hardhat' +import { Contract } from 'ethers' +import { assert, expect } from 'chai' +import { CronUpkeepTestHelper } from '../../../typechain/CronUpkeepTestHelper' +import { CronUpkeepDelegate } from '../../../typechain/CronUpkeepDelegate' +import { CronUpkeepFactory } from '../../../typechain/CronUpkeepFactory' +import { CronUpkeepTestHelper__factory as CronUpkeepTestHelperFactory } from '../../../typechain/factories/CronUpkeepTestHelper__factory' +import { CronInternalTestHelper } from '../../../typechain/CronInternalTestHelper' +import { CronReceiver } from '../../../typechain/CronReceiver' +import { BigNumber, BigNumberish } from '@ethersproject/bignumber' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { validCrons } from '../../test-helpers/fixtures' +import * as h from '../../test-helpers/helpers' + +const { utils } = ethers +const { AddressZero } = ethers.constants + +const OWNABLE_ERR = 'Only callable by owner' +const CRON_NOT_FOUND_ERR = 'CronJobIDNotFound' + +let cron: CronUpkeepTestHelper +let cronFactory: CronUpkeepTestHelperFactory // the typechain factory that deploys cron upkeep contracts +let cronFactoryContract: CronUpkeepFactory // the cron factory contract +let cronDelegate: CronUpkeepDelegate +let cronTestHelper: CronInternalTestHelper +let cronReceiver1: CronReceiver +let cronReceiver2: CronReceiver + +let admin: SignerWithAddress +let owner: SignerWithAddress +let stranger: SignerWithAddress + +const timeStamp = 32503680000 // Jan 1, 3000 12:00AM +const basicCronString = '0 * * * *' + +let handler1Sig: string +let handler2Sig: string +let revertHandlerSig: string +let basicSpec: string + +async function assertJobIDsEqual(expected: number[]) { + const ids = (await cron.getActiveCronJobIDs()).map((n) => n.toNumber()) + assert.deepEqual(ids.sort(), expected.sort()) +} + +function decodePayload(payload: string) { + return utils.defaultAbiCoder.decode( + ['uint256', 'uint256', 'address', 'bytes'], + payload, + ) as [BigNumber, BigNumber, string, string] +} + +function encodePayload(payload: [BigNumberish, BigNumberish, string, string]) { + return utils.defaultAbiCoder.encode( + ['uint256', 'uint256', 'address', 'bytes'], + payload, + ) +} + +async function createBasicCron() { + return await cron.createCronJobFromEncodedSpec( + cronReceiver1.address, + handler1Sig, + basicSpec, + ) +} + +describe('CronUpkeep', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + admin = accounts[0] + owner = accounts[1] + stranger = accounts[2] + const crFactory = await ethers.getContractFactory('CronReceiver', owner) + cronReceiver1 = await crFactory.deploy() + cronReceiver2 = await crFactory.deploy() + const cronDelegateFactory = await ethers.getContractFactory( + 'CronUpkeepDelegate', + admin, + ) + cronDelegate = await cronDelegateFactory.deploy() + const cronExternalFactory = await ethers.getContractFactory( + 'src/v0.8/automation/libraries/external/Cron.sol:Cron', + admin, + ) + const cronExternalLib = await cronExternalFactory.deploy() + cronFactory = await ethers.getContractFactory('CronUpkeepTestHelper', { + signer: admin, + libraries: { Cron: cronExternalLib.address }, + }) + cron = ( + await cronFactory.deploy(owner.address, cronDelegate.address, 5, []) + ).connect(owner) + const cronFactoryContractFactory = await ethers.getContractFactory( + 'CronUpkeepFactory', + { signer: admin, libraries: { Cron: cronExternalLib.address } }, + ) // the typechain factory that creates the cron factory contract + cronFactoryContract = await cronFactoryContractFactory.deploy() + const fs = cronReceiver1.interface.functions + handler1Sig = utils.id(fs['handler1()'].format('sighash')).slice(0, 10) + handler2Sig = utils.id(fs['handler2()'].format('sighash')).slice(0, 10) + revertHandlerSig = utils + .id(fs['revertHandler()'].format('sighash')) + .slice(0, 10) + const cronTHFactory = await ethers.getContractFactory( + 'CronInternalTestHelper', + ) + cronTestHelper = await cronTHFactory.deploy() + basicSpec = await cronFactoryContract.encodeCronString(basicCronString) + }) + + afterEach(async () => { + await h.reset() + }) + + it('has a limited public ABI [ @skip-coverage ]', () => { + // Casting cron is necessary due to a tricky versioning mismatch issue, likely between ethers + // and typechain. Remove once the version issue is resolved. + // https://app.shortcut.com/pluginlabs/story/21905/remove-contract-cast-in-cronupkeep-test-ts + h.publicAbi(cron as unknown as Contract, [ + 's_maxJobs', + 'performUpkeep', + 'createCronJobFromEncodedSpec', + 'updateCronJob', + 'deleteCronJob', + 'checkUpkeep', + 'getActiveCronJobIDs', + 'getCronJob', + // Ownable methods: + 'acceptOwnership', + 'owner', + 'transferOwnership', + // Pausable methods + 'paused', + 'pause', + 'unpause', + // Cron helper methods + 'createCronJobFromString', + 'txCheckUpkeep', + ]) + }) + + describe('constructor()', () => { + it('sets the initial values', async () => { + expect(await cron.owner()).to.equal(owner.address) + expect(await cron.s_maxJobs()).to.equal(5) + }) + + it('optionally creates a first job', async () => { + const payload = await cronFactoryContract.encodeCronJob( + cronReceiver1.address, + handler1Sig, + basicCronString, + ) + cron = ( + await cronFactory.deploy( + owner.address, + cronDelegate.address, + 5, + payload, + ) + ).connect(owner) + const job = await cron.getCronJob(1) + assert.equal(job.target, cronReceiver1.address) + assert.equal(job.handler, handler1Sig) + assert.equal(job.cronString, basicCronString) + }) + }) + + describe('checkUpkeep() / performUpkeep()', () => { + beforeEach(async () => { + await h.setTimestamp(timeStamp) + // id 1 + await cron.createCronJobFromString( + cronReceiver1.address, + handler1Sig, + '0 0 31 * *', // 31st day of every month + ) + // id 2 + await cron.createCronJobFromString( + cronReceiver1.address, + handler2Sig, + '10 * * * *', // on the 10 min mark + ) + // id 3 + await cron.createCronJobFromString( + cronReceiver2.address, + handler1Sig, + '0 0 * 7 *', // every day in July + ) + // id 4 + await cron.createCronJobFromString( + cronReceiver2.address, + revertHandlerSig, + '20 * * * *', // on the 20 min mark + ) + }) + + describe('checkUpkeep()', () => { + it('returns false if no one is elligible', async () => { + const [needsUpkeep] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isFalse(needsUpkeep) + }) + + it('returns the id of eligible cron jobs', async () => { + await h.fastForward(moment.duration(11, 'minutes').asSeconds()) + const [needsUpkeep, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep) + const [id, ..._] = decodePayload(payload) + assert.equal(id.toNumber(), 2) + }) + + describe('when mutiple crons are elligible', () => { + it('cycles through the cron IDs based on block number', async () => { + await h.fastForward(moment.duration(1, 'year').asSeconds()) + let [_, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + const [id1] = decodePayload(payload) + await h.mineBlock(ethers.provider) + ;[_, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + const [id2] = decodePayload(payload) + await h.mineBlock(ethers.provider) + ;[_, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + const [id3] = decodePayload(payload) + await h.mineBlock(ethers.provider) + ;[_, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + const [id4] = decodePayload(payload) + assert.deepEqual( + [id1, id2, id3, id4].map((n) => n.toNumber()).sort(), + [1, 2, 3, 4], + ) + }) + }) + }) + + describe('performUpkeep()', () => { + it('forwards the call to the appropriate target/handler', async () => { + await h.fastForward(moment.duration(11, 'minutes').asSeconds()) + const [needsUpkeep, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep) + await expect(cron.performUpkeep(payload)).to.emit( + cronReceiver1, + 'Received2', + ) + }) + + it('emits an event', async () => { + await h.fastForward(moment.duration(11, 'minutes').asSeconds()) + const [needsUpkeep, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep) + await expect(cron.performUpkeep(payload)) + .to.emit(cron, 'CronJobExecuted') + .withArgs(2, true) + }) + + it('succeeds even if the call to the target fails', async () => { + await cron.deleteCronJob(2) + await h.fastForward(moment.duration(21, 'minutes').asSeconds()) + const payload = encodePayload([ + 4, + moment.unix(timeStamp).add(20, 'minutes').unix(), + cronReceiver2.address, + revertHandlerSig, + ]) + await expect(cron.performUpkeep(payload)) + .to.emit(cron, 'CronJobExecuted') + .withArgs(4, false) + }) + + it('is only callable by anyone', async () => { + await h.fastForward(moment.duration(11, 'minutes').asSeconds()) + const [needsUpkeep, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep) + await cron.connect(stranger).performUpkeep(payload) + }) + + it('is only callable once for a given tick', async () => { + await h.fastForward(moment.duration(10, 'minutes').asSeconds()) + const [needsUpkeep, payload] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep) + const maliciousPayload = encodePayload([ + 2, + moment.unix(timeStamp).add(10, 'minutes').add(59, 'seconds').unix(), + cronReceiver1.address, + handler2Sig, + ]) + await cron.performUpkeep(payload) + await expect(cron.performUpkeep(payload)).to.be.reverted + await expect(cron.performUpkeep(maliciousPayload)).to.be.reverted + await h.fastForward(moment.duration(1, 'minute').asSeconds()) + await expect(cron.performUpkeep(payload)).to.be.reverted + await expect(cron.performUpkeep(maliciousPayload)).to.be.reverted + await h.fastForward(moment.duration(10, 'minute').asSeconds()) + await expect(cron.performUpkeep(payload)).to.be.reverted + await expect(cron.performUpkeep(maliciousPayload)).to.be.reverted + }) + }) + }) + + describe('createCronJobFromEncodedSpec()', () => { + it('creates jobs with sequential IDs', async () => { + const cronString1 = '0 * * * *' + const cronString2 = '0 1,2,3 */4 5-6 1-2' + const encodedSpec1 = + await cronFactoryContract.encodeCronString(cronString1) + const encodedSpec2 = + await cronFactoryContract.encodeCronString(cronString2) + const nextTick1 = ( + await cronTestHelper.calculateNextTick(cronString1) + ).toNumber() + const nextTick2 = ( + await cronTestHelper.calculateNextTick(cronString2) + ).toNumber() + await cron.createCronJobFromEncodedSpec( + cronReceiver1.address, + handler1Sig, + encodedSpec1, + ) + await assertJobIDsEqual([1]) + await cron.createCronJobFromEncodedSpec( + cronReceiver1.address, + handler2Sig, + encodedSpec1, + ) + await assertJobIDsEqual([1, 2]) + await cron.createCronJobFromEncodedSpec( + cronReceiver2.address, + handler1Sig, + encodedSpec2, + ) + await assertJobIDsEqual([1, 2, 3]) + await cron.createCronJobFromEncodedSpec( + cronReceiver2.address, + handler2Sig, + encodedSpec2, + ) + await assertJobIDsEqual([1, 2, 3, 4]) + const cron1 = await cron.getCronJob(1) + const cron2 = await cron.getCronJob(2) + const cron3 = await cron.getCronJob(3) + const cron4 = await cron.getCronJob(4) + assert.equal(cron1.target, cronReceiver1.address) + assert.equal(cron1.handler, handler1Sig) + assert.equal(cron1.cronString, cronString1) + assert.equal(cron1.nextTick.toNumber(), nextTick1) + assert.equal(cron2.target, cronReceiver1.address) + assert.equal(cron2.handler, handler2Sig) + assert.equal(cron2.cronString, cronString1) + assert.equal(cron2.nextTick.toNumber(), nextTick1) + assert.equal(cron3.target, cronReceiver2.address) + assert.equal(cron3.handler, handler1Sig) + assert.equal(cron3.cronString, cronString2) + assert.equal(cron3.nextTick.toNumber(), nextTick2) + assert.equal(cron4.target, cronReceiver2.address) + assert.equal(cron4.handler, handler2Sig) + assert.equal(cron4.cronString, cronString2) + assert.equal(cron4.nextTick.toNumber(), nextTick2) + }) + + it('emits an event', async () => { + await expect(createBasicCron()).to.emit(cron, 'CronJobCreated') + }) + + it('is only callable by the owner', async () => { + await expect( + cron + .connect(stranger) + .createCronJobFromEncodedSpec( + cronReceiver1.address, + handler1Sig, + basicSpec, + ), + ).to.be.revertedWith(OWNABLE_ERR) + }) + + it('errors if trying to create more jobs than allowed', async () => { + for (let idx = 0; idx < 5; idx++) { + await createBasicCron() + } + await expect(createBasicCron()).to.be.revertedWith('ExceedsMaxJobs') + }) + }) + + describe('updateCronJob()', () => { + const newCronString = '0 0 1 1 1' + let newEncodedSpec: string + beforeEach(async () => { + await createBasicCron() + newEncodedSpec = await cronFactoryContract.encodeCronString(newCronString) + }) + + it('updates a cron job', async () => { + let cron1 = await cron.getCronJob(1) + assert.equal(cron1.target, cronReceiver1.address) + assert.equal(cron1.handler, handler1Sig) + assert.equal(cron1.cronString, basicCronString) + await cron.updateCronJob( + 1, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ) + cron1 = await cron.getCronJob(1) + assert.equal(cron1.target, cronReceiver2.address) + assert.equal(cron1.handler, handler2Sig) + assert.equal(cron1.cronString, newCronString) + }) + + it('emits an event', async () => { + await expect( + await cron.updateCronJob( + 1, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ), + ).to.emit(cron, 'CronJobUpdated') + }) + + it('is only callable by the owner', async () => { + await expect( + cron + .connect(stranger) + .updateCronJob(1, cronReceiver2.address, handler2Sig, newEncodedSpec), + ).to.be.revertedWith(OWNABLE_ERR) + }) + + it('reverts if trying to update a non-existent ID', async () => { + await expect( + cron.updateCronJob( + 2, + cronReceiver2.address, + handler2Sig, + newEncodedSpec, + ), + ).to.be.revertedWith(CRON_NOT_FOUND_ERR) + }) + }) + + describe('deleteCronJob()', () => { + it("deletes a jobs by it's ID", async () => { + await createBasicCron() + await createBasicCron() + await createBasicCron() + await createBasicCron() + await assertJobIDsEqual([1, 2, 3, 4]) + await cron.deleteCronJob(2) + await expect(cron.getCronJob(2)).to.be.revertedWith(CRON_NOT_FOUND_ERR) + await expect(cron.deleteCronJob(2)).to.be.revertedWith(CRON_NOT_FOUND_ERR) + await assertJobIDsEqual([1, 3, 4]) + await cron.deleteCronJob(1) + await assertJobIDsEqual([3, 4]) + await cron.deleteCronJob(4) + await assertJobIDsEqual([3]) + await cron.deleteCronJob(3) + await assertJobIDsEqual([]) + }) + + it('emits an event', async () => { + await createBasicCron() + await expect(cron.deleteCronJob(1)).to.emit(cron, 'CronJobDeleted') + }) + + it('reverts if trying to delete a non-existent ID', async () => { + await createBasicCron() + await createBasicCron() + await expect(cron.deleteCronJob(0)).to.be.revertedWith(CRON_NOT_FOUND_ERR) + await expect(cron.deleteCronJob(3)).to.be.revertedWith(CRON_NOT_FOUND_ERR) + }) + }) + + describe('pause() / unpause()', () => { + it('is only callable by the owner', async () => { + await expect(cron.connect(stranger).pause()).to.be.reverted + await expect(cron.connect(stranger).unpause()).to.be.reverted + }) + + it('pauses / unpauses the contract', async () => { + expect(await cron.paused()).to.be.false + await cron.pause() + expect(await cron.paused()).to.be.true + await cron.unpause() + expect(await cron.paused()).to.be.false + }) + }) +}) + +// only run during pnpm test:gas +describe.skip('Cron Gas Usage', () => { + before(async () => { + const accounts = await ethers.getSigners() + admin = accounts[0] + owner = accounts[1] + const crFactory = await ethers.getContractFactory('CronReceiver', owner) + cronReceiver1 = await crFactory.deploy() + const cronDelegateFactory = await ethers.getContractFactory( + 'CronUpkeepDelegate', + owner, + ) + const cronDelegate = await cronDelegateFactory.deploy() + const cronExternalFactory = await ethers.getContractFactory( + 'src/v0.8/automation/libraries/external/Cron.sol:Cron', + admin, + ) + const cronExternalLib = await cronExternalFactory.deploy() + const cronFactory = await ethers.getContractFactory( + 'CronUpkeepTestHelper', + { + signer: owner, + libraries: { Cron: cronExternalLib.address }, + }, + ) + cron = await cronFactory.deploy(owner.address, cronDelegate.address, 5, []) + const fs = cronReceiver1.interface.functions + handler1Sig = utils + .id(fs['handler1()'].format('sighash')) // TODO this seems like an ethers bug + .slice(0, 10) + }) + + describe('checkUpkeep() / performUpkeep()', () => { + it('uses gas', async () => { + for (let idx = 0; idx < validCrons.length; idx++) { + const cronString = validCrons[idx] + const cronID = idx + 1 + await cron.createCronJobFromString( + cronReceiver1.address, + handler1Sig, + cronString, + ) + await h.fastForward(moment.duration(100, 'years').asSeconds()) // long enough that at least 1 tick occurs + const [needsUpkeep, data] = await cron + .connect(AddressZero) + .callStatic.checkUpkeep('0x') + assert.isTrue(needsUpkeep, `failed for cron string ${cronString}`) + await cron.txCheckUpkeep('0x') + await cron.performUpkeep(data) + await cron.deleteCronJob(cronID) + } + }) + }) +}) diff --git a/contracts/test/v0.8/automation/CronUpkeepFactory.test.ts b/contracts/test/v0.8/automation/CronUpkeepFactory.test.ts new file mode 100644 index 00000000..e9a7de83 --- /dev/null +++ b/contracts/test/v0.8/automation/CronUpkeepFactory.test.ts @@ -0,0 +1,107 @@ +import { ethers } from 'hardhat' +import { Contract } from 'ethers' +import { assert, expect } from 'chai' +import { CronUpkeepFactory } from '../../../typechain/CronUpkeepFactory' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import * as h from '../../test-helpers/helpers' +import { reset } from '../../test-helpers/helpers' + +const OWNABLE_ERR = 'Only callable by owner' + +let cronExternalLib: Contract +let factory: CronUpkeepFactory + +let admin: SignerWithAddress +let owner: SignerWithAddress +let stranger: SignerWithAddress + +describe('CronUpkeepFactory', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + admin = accounts[0] + owner = accounts[1] + stranger = accounts[2] + const cronExternalFactory = await ethers.getContractFactory( + 'src/v0.8/automation/libraries/external/Cron.sol:Cron', + admin, + ) + cronExternalLib = await cronExternalFactory.deploy() + const cronUpkeepFactoryFactory = await ethers.getContractFactory( + 'CronUpkeepFactory', + { + signer: admin, + libraries: { + Cron: cronExternalLib.address, + }, + }, + ) + factory = await cronUpkeepFactoryFactory.deploy() + }) + + afterEach(async () => { + await reset() + }) + + it('has a limited public ABI [ @skip-coverage ]', () => { + h.publicAbi(factory as unknown as Contract, [ + 's_maxJobs', + 'newCronUpkeep', + 'newCronUpkeepWithJob', + 'setMaxJobs', + 'cronDelegateAddress', + 'encodeCronString', + 'encodeCronJob', + // Ownable methods: + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('constructor()', () => { + it('deploys a delegate contract', async () => { + assert.notEqual( + await factory.cronDelegateAddress(), + ethers.constants.AddressZero, + ) + }) + }) + + describe('newCronUpkeep()', () => { + it('emits an event', async () => { + await expect(factory.connect(owner).newCronUpkeep()).to.emit( + factory, + 'NewCronUpkeepCreated', + ) + }) + it('sets the deployer as the owner', async () => { + const response = await factory.connect(owner).newCronUpkeep() + const { events } = await response.wait() + if (!events) { + assert.fail('no events emitted') + } + const upkeepAddress = events[0].args?.upkeep + const cronUpkeepFactory = await ethers.getContractFactory('CronUpkeep', { + libraries: { Cron: cronExternalLib.address }, + }) + assert( + await cronUpkeepFactory.attach(upkeepAddress).owner(), + owner.address, + ) + }) + }) + + describe('setMaxJobs()', () => { + it('sets the max jobs value', async () => { + expect(await factory.s_maxJobs()).to.equal(5) + await factory.setMaxJobs(6) + expect(await factory.s_maxJobs()).to.equal(6) + }) + + it('is only callable by the owner', async () => { + await expect(factory.connect(stranger).setMaxJobs(6)).to.be.revertedWith( + OWNABLE_ERR, + ) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/ERC20BalanceMonitor.test.ts b/contracts/test/v0.8/automation/ERC20BalanceMonitor.test.ts new file mode 100644 index 00000000..be497362 --- /dev/null +++ b/contracts/test/v0.8/automation/ERC20BalanceMonitor.test.ts @@ -0,0 +1,684 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { ReceiveEmitter } from '../../../typechain/ReceiveEmitter' +import { ReceiveFallbackEmitter } from '../../../typechain/ReceiveFallbackEmitter' +import * as h from '../../test-helpers/helpers' +import { ERC20BalanceMonitorExposed, LinkToken } from '../../../typechain' +import { BigNumber } from 'ethers' + +const OWNABLE_ERR = 'Only callable by owner' +const INVALID_WATCHLIST_ERR = `InvalidWatchList()` +const PAUSED_ERR = 'Pausable: paused' +const ONLY_KEEPER_ERR = `OnlyKeeperRegistry()` + +const zeroPLI = ethers.utils.parseEther('0') +const onePLI = ethers.utils.parseEther('1') +const twoPLI = ethers.utils.parseEther('2') +const threePLI = ethers.utils.parseEther('3') +const fivePLI = ethers.utils.parseEther('5') +const sixPLI = ethers.utils.parseEther('6') +const tenPLI = ethers.utils.parseEther('10') + +const oneHundredPLI = ethers.utils.parseEther('100') + +const watchAddress1 = ethers.Wallet.createRandom().address +const watchAddress2 = ethers.Wallet.createRandom().address +const watchAddress3 = ethers.Wallet.createRandom().address +const watchAddress4 = ethers.Wallet.createRandom().address +let watchAddress5: string +let watchAddress6: string + +let bm: ERC20BalanceMonitorExposed +let lt: LinkToken +let receiveEmitter: ReceiveEmitter +let receiveFallbackEmitter: ReceiveFallbackEmitter +let owner: SignerWithAddress +let stranger: SignerWithAddress +let keeperRegistry: SignerWithAddress + +async function assertWatchlistBalances( + balance1: BigNumber, + balance2: BigNumber, + balance3: BigNumber, + balance4: BigNumber, + balance5: BigNumber, + balance6: BigNumber, +) { + await h.assertLinkTokenBalance(lt, watchAddress1, balance1, 'address 1') + await h.assertLinkTokenBalance(lt, watchAddress2, balance2, 'address 2') + await h.assertLinkTokenBalance(lt, watchAddress3, balance3, 'address 3') + await h.assertLinkTokenBalance(lt, watchAddress4, balance4, 'address 4') + await h.assertLinkTokenBalance(lt, watchAddress5, balance5, 'address 5') + await h.assertLinkTokenBalance(lt, watchAddress6, balance6, 'address 6') +} + +describe('ERC20BalanceMonitor', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + keeperRegistry = accounts[2] + watchAddress5 = accounts[3].address + watchAddress6 = accounts[4].address + + const bmFactory = await ethers.getContractFactory( + 'ERC20BalanceMonitorExposed', + owner, + ) + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + owner, + ) + const reFactory = await ethers.getContractFactory('ReceiveEmitter', owner) + const rfeFactory = await ethers.getContractFactory( + 'ReceiveFallbackEmitter', + owner, + ) + + lt = await ltFactory.deploy() + bm = await bmFactory.deploy(lt.address, keeperRegistry.address, 0) + + for (let i = 1; i <= 4; i++) { + const recipient = await accounts[i].getAddress() + await lt.connect(owner).transfer(recipient, oneHundredPLI) + } + + receiveEmitter = await reFactory.deploy() + receiveFallbackEmitter = await rfeFactory.deploy() + await Promise.all([ + bm.deployed(), + receiveEmitter.deployed(), + receiveFallbackEmitter.deployed(), + ]) + }) + + afterEach(async () => { + await h.reset() + }) + + describe('add funds', () => { + it('Should allow anyone to add funds', async () => { + await lt.transfer(bm.address, onePLI) + await lt.connect(stranger).transfer(bm.address, onePLI) + }) + }) + + describe('withdraw()', () => { + beforeEach(async () => { + const tx = await lt.connect(owner).transfer(bm.address, onePLI) + await tx.wait() + }) + + it('Should allow the owner to withdraw', async () => { + const beforeBalance = await lt.balanceOf(owner.address) + const tx = await bm.connect(owner).withdraw(onePLI, owner.address) + await tx.wait() + const afterBalance = await lt.balanceOf(owner.address) + assert.isTrue( + afterBalance.gt(beforeBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should emit an event', async () => { + const tx = await bm.connect(owner).withdraw(onePLI, owner.address) + await expect(tx) + .to.emit(bm, 'FundsWithdrawn') + .withArgs(onePLI, owner.address) + }) + + it('Should allow the owner to withdraw to anyone', async () => { + const beforeBalance = await lt.balanceOf(stranger.address) + const tx = await bm.connect(owner).withdraw(onePLI, stranger.address) + await tx.wait() + const afterBalance = await lt.balanceOf(stranger.address) + assert.isTrue( + beforeBalance.add(onePLI).eq(afterBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should not allow strangers to withdraw', async () => { + const tx = bm.connect(stranger).withdraw(onePLI, owner.address) + await expect(tx).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('pause() / unpause()', () => { + it('Should allow owner to pause / unpause', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const unpauseTx = await bm.connect(owner).unpause() + await unpauseTx.wait() + }) + + it('Should not allow strangers to pause / unpause', async () => { + const pauseTxStranger = bm.connect(stranger).pause() + await expect(pauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + const pauseTxOwner = await bm.connect(owner).pause() + await pauseTxOwner.wait() + const unpauseTxStranger = bm.connect(stranger).unpause() + await expect(unpauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('setWatchList() / getWatchList() / getAccountInfo()', () => { + it('Should allow owner to set the watchlist', async () => { + // should start unactive + assert.isFalse((await bm.getAccountInfo(watchAddress1)).isActive) + // add first watchlist + let setTx = await bm + .connect(owner) + .setWatchList([watchAddress1], [onePLI], [twoPLI]) + await setTx.wait() + let watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress1]) + const accountInfo = await bm.getAccountInfo(watchAddress1) + assert.isTrue(accountInfo.isActive) + expect(accountInfo.minBalance).to.equal(onePLI) + expect(accountInfo.topUpLevel).to.equal(twoPLI) + // add more to watchlist + setTx = await bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress3], + [onePLI, twoPLI, threePLI], + [twoPLI, threePLI, fivePLI], + ) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress1, watchAddress2, watchAddress3]) + let accountInfo1 = await bm.getAccountInfo(watchAddress1) + let accountInfo2 = await bm.getAccountInfo(watchAddress2) + let accountInfo3 = await bm.getAccountInfo(watchAddress3) + expect(accountInfo1.isActive).to.be.true + expect(accountInfo1.minBalance).to.equal(onePLI) + expect(accountInfo1.topUpLevel).to.equal(twoPLI) + expect(accountInfo2.isActive).to.be.true + expect(accountInfo2.minBalance).to.equal(twoPLI) + expect(accountInfo2.topUpLevel).to.equal(threePLI) + expect(accountInfo3.isActive).to.be.true + expect(accountInfo3.minBalance).to.equal(threePLI) + expect(accountInfo3.topUpLevel).to.equal(fivePLI) + // remove some from watchlist + setTx = await bm + .connect(owner) + .setWatchList( + [watchAddress3, watchAddress1], + [threePLI, onePLI], + [fivePLI, twoPLI], + ) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress3, watchAddress1]) + accountInfo1 = await bm.getAccountInfo(watchAddress1) + accountInfo2 = await bm.getAccountInfo(watchAddress2) + accountInfo3 = await bm.getAccountInfo(watchAddress3) + expect(accountInfo1.isActive).to.be.true + expect(accountInfo2.isActive).to.be.false + expect(accountInfo3.isActive).to.be.true + }) + + it('Should not allow duplicates in the watchlist', async () => { + const errMsg = `DuplicateAddress("${watchAddress1}")` + const setTx = bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress1], + [onePLI, twoPLI, threePLI], + [twoPLI, threePLI, fivePLI], + ) + await expect(setTx).to.be.revertedWith(errMsg) + }) + + it('Should not allow a topUpLevel les than or equal to minBalance in the watchlist', async () => { + const setTx = bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress1], + [onePLI, twoPLI, threePLI], + [zeroPLI, twoPLI, threePLI], + ) + await expect(setTx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should not allow larger than maximum watchlist size', async () => { + const watchlist: any[][] = [[], [], []] + Array.from(Array(301).keys()).forEach(() => { + watchlist[0].push(owner.address) + watchlist[1].push(onePLI) + watchlist[2].push(twoPLI) + }) + const tx = bm + .connect(owner) + .setWatchList(watchlist[0], watchlist[1], watchlist[2]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should not allow strangers to set the watchlist', async () => { + const setTxStranger = bm + .connect(stranger) + .setWatchList([watchAddress1], [onePLI], [twoPLI]) + await expect(setTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should revert if the list lengths differ', async () => { + let tx = bm.connect(owner).setWatchList([watchAddress1], [], [twoPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([watchAddress1], [onePLI], []) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([], [onePLI], [twoPLI]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the addresses are empty', async () => { + let tx = bm + .connect(owner) + .setWatchList( + [watchAddress1, ethers.constants.AddressZero], + [onePLI, onePLI], + [twoPLI, twoPLI], + ) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the top up amounts are 0', async () => { + const tx = bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2], + [onePLI, onePLI], + [twoPLI, zeroPLI], + ) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + }) + + describe('getKeeperRegistryAddress() / setKeeperRegistryAddress()', () => { + const newAddress = ethers.Wallet.createRandom().address + + it('Should initialize with the registry address provided to the constructor', async () => { + const address = await bm.getKeeperRegistryAddress() + assert.equal(address, keeperRegistry.address) + }) + + it('Should allow the owner to set the registry address', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await setTx.wait() + const address = await bm.getKeeperRegistryAddress() + assert.equal(address, newAddress) + }) + + it('Should not allow strangers to set the registry address', async () => { + const setTx = bm.connect(stranger).setKeeperRegistryAddress(newAddress) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await expect(setTx) + .to.emit(bm, 'KeeperRegistryAddressUpdated') + .withArgs(keeperRegistry.address, newAddress) + }) + }) + + describe('getMinWaitPeriodSeconds / setMinWaitPeriodSeconds()', () => { + const newWaitPeriod = BigNumber.from(1) + + it('Should initialize with the wait period provided to the constructor', async () => { + const minWaitPeriod = await bm.getMinWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(0) + }) + + it('Should allow owner to set the wait period', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await setTx.wait() + const minWaitPeriod = await bm.getMinWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(newWaitPeriod) + }) + + it('Should not allow strangers to set the wait period', async () => { + const setTx = bm.connect(stranger).setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx) + .to.emit(bm, 'MinWaitPeriodUpdated') + .withArgs(0, newWaitPeriod) + }) + }) + + describe('checkUpkeep() / getUnderfundedAddresses()', () => { + beforeEach(async () => { + const setTx = await bm.connect(owner).setWatchList( + [ + watchAddress1, // needs funds + watchAddress5, // funded + watchAddress2, // needs funds + watchAddress6, // funded + watchAddress3, // needs funds + ], + new Array(5).fill(onePLI), + new Array(5).fill(twoPLI), + ) + await setTx.wait() + }) + + it('Should return list of address that are underfunded', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + sixPLI, // needs 6 total + ) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + let [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress2, watchAddress3]) + // checkUpkeep payload should match getUnderfundedAddresses() + addresses = await bm.getUnderfundedAddresses() + assert.deepEqual(addresses, [watchAddress1, watchAddress2, watchAddress3]) + }) + + it('Should return some results even if contract cannot fund all eligible targets', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + fivePLI, // needs 6 total + ) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress2]) + }) + + it('Should omit addresses that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + const fundTx = await lt.connect(owner).transfer(bm.address, sixPLI) + await Promise.all([setWaitPdTx.wait(), fundTx.wait()]) + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + watchAddress2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress3]) + }) + + it('Should revert when paused', async () => { + const tx = await bm.connect(owner).pause() + await tx.wait() + const ethCall = bm.checkUpkeep('0x') + await expect(ethCall).to.be.revertedWith(PAUSED_ERR) + }) + }) + + describe('performUpkeep()', () => { + let validPayload: string + let invalidPayload: string + + beforeEach(async () => { + validPayload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [[watchAddress1, watchAddress2, watchAddress3]], + ) + invalidPayload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [[watchAddress1, watchAddress2, watchAddress4, watchAddress5]], + ) + const setTx = await bm.connect(owner).setWatchList( + [ + watchAddress1, // needs funds + watchAddress5, // funded + watchAddress2, // needs funds + watchAddress6, // funded + watchAddress3, // needs funds + // watchAddress4 - omitted + ], + new Array(5).fill(onePLI), + new Array(5).fill(twoPLI), + ) + await setTx.wait() + }) + + it('Should revert when paused', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const performTx = bm.connect(keeperRegistry).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(PAUSED_ERR) + }) + + context('when partially funded', () => { + it('Should fund as many addresses as possible', async () => { + const fundTx = await lt.connect(owner).transfer( + bm.address, + fivePLI, // only enough PLI to fund 2 addresses + ) + await fundTx.wait() + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload) + await assertWatchlistBalances( + twoPLI, + twoPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress1) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress2) + }) + }) + + context('when fully funded', () => { + beforeEach(async () => { + const fundTx = await lt.connect(owner).transfer(bm.address, tenPLI) + await fundTx.wait() + }) + + it('Should fund the appropriate addresses', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + twoPLI, + twoPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should only fund active, underfunded addresses', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(invalidPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + twoPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should not fund addresses that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + await setWaitPdTx.wait() + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + watchAddress2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances( + twoPLI, + zeroPLI, + twoPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + }) + + it('Should only be callable by the keeper registry contract', async () => { + let performTx = bm.connect(owner).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + performTx = bm.connect(stranger).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + }) + + it('Should protect against running out of gas', async () => { + await assertWatchlistBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + oneHundredPLI, + oneHundredPLI, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 130_000 }) // too little for all 3 transfers + await performTx.wait() + const balance1 = await lt.balanceOf(watchAddress1) + const balance2 = await lt.balanceOf(watchAddress2) + const balance3 = await lt.balanceOf(watchAddress3) + const balances = [balance1, balance2, balance3].map((n) => n.toString()) + expect(balances) + .to.include(twoPLI.toString()) // expect at least 1 transfer + .to.include(zeroPLI.toString()) // expect at least 1 out of funds + }) + + it('Should provide enough gas to support receive and fallback functions', async () => { + const addresses = [ + receiveEmitter.address, + receiveFallbackEmitter.address, + ] + const payload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [addresses], + ) + const setTx = await bm + .connect(owner) + .setWatchList( + addresses, + new Array(2).fill(onePLI), + new Array(2).fill(twoPLI), + ) + await setTx.wait() + + const reBalanceBefore = await lt.balanceOf(receiveEmitter.address) + const rfeBalanceBefore = await lt.balanceOf( + receiveFallbackEmitter.address, + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(payload, { gasLimit: 2_500_000 }) + await h.assertLinkTokenBalance( + lt, + receiveEmitter.address, + reBalanceBefore.add(twoPLI), + ) + await h.assertLinkTokenBalance( + lt, + receiveFallbackEmitter.address, + rfeBalanceBefore.add(twoPLI), + ) + + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(receiveEmitter.address) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(receiveFallbackEmitter.address) + }) + }) + }) + + describe('topUp()', () => { + context('when not paused', () => { + it('Should be callable by anyone', async () => { + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + await bm.connect(user).topUp([]) + } + }) + }) + context('when paused', () => { + it('Should be callable by no one', async () => { + await bm.connect(owner).pause() + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + const tx = bm.connect(user).topUp([]) + await expect(tx).to.be.revertedWith(PAUSED_ERR) + } + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/EthBalanceMonitor.test.ts b/contracts/test/v0.8/automation/EthBalanceMonitor.test.ts new file mode 100644 index 00000000..1f7163b0 --- /dev/null +++ b/contracts/test/v0.8/automation/EthBalanceMonitor.test.ts @@ -0,0 +1,655 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { EthBalanceMonitorExposed } from '../../../typechain/EthBalanceMonitorExposed' +import { ReceiveReverter } from '../../../typechain/ReceiveReverter' +import { ReceiveEmitter } from '../../../typechain/ReceiveEmitter' +import { ReceiveFallbackEmitter } from '../../../typechain/ReceiveFallbackEmitter' +import { BigNumber } from 'ethers' +import * as h from '../../test-helpers/helpers' + +const OWNABLE_ERR = 'Only callable by owner' +const INVALID_WATCHLIST_ERR = `InvalidWatchList()` +const PAUSED_ERR = 'Pausable: paused' +const ONLY_KEEPER_ERR = `OnlyKeeperRegistry()` + +const zeroEth = ethers.utils.parseEther('0') +const oneEth = ethers.utils.parseEther('1') +const twoEth = ethers.utils.parseEther('2') +const threeEth = ethers.utils.parseEther('3') +const fiveEth = ethers.utils.parseEther('5') +const sixEth = ethers.utils.parseEther('6') +const tenEth = ethers.utils.parseEther('10') + +const watchAddress1 = ethers.Wallet.createRandom().address +const watchAddress2 = ethers.Wallet.createRandom().address +const watchAddress3 = ethers.Wallet.createRandom().address +const watchAddress4 = ethers.Wallet.createRandom().address +let watchAddress5: string +let watchAddress6: string + +async function assertWatchlistBalances( + balance1: number, + balance2: number, + balance3: number, + balance4: number, + balance5: number, + balance6: number, +) { + const toEth = (n: number) => ethers.utils.parseUnits(n.toString(), 'ether') + await h.assertBalance(watchAddress1, toEth(balance1), 'address 1') + await h.assertBalance(watchAddress2, toEth(balance2), 'address 2') + await h.assertBalance(watchAddress3, toEth(balance3), 'address 3') + await h.assertBalance(watchAddress4, toEth(balance4), 'address 4') + await h.assertBalance(watchAddress5, toEth(balance5), 'address 5') + await h.assertBalance(watchAddress6, toEth(balance6), 'address 6') +} + +let bm: EthBalanceMonitorExposed +let receiveReverter: ReceiveReverter +let receiveEmitter: ReceiveEmitter +let receiveFallbackEmitter: ReceiveFallbackEmitter +let owner: SignerWithAddress +let stranger: SignerWithAddress +let keeperRegistry: SignerWithAddress + +describe('EthBalanceMonitor', () => { + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + keeperRegistry = accounts[2] + watchAddress5 = accounts[3].address + watchAddress6 = accounts[4].address + + const bmFactory = await ethers.getContractFactory( + 'EthBalanceMonitorExposed', + owner, + ) + const rrFactory = await ethers.getContractFactory('ReceiveReverter', owner) + const reFactory = await ethers.getContractFactory('ReceiveEmitter', owner) + const rfeFactory = await ethers.getContractFactory( + 'ReceiveFallbackEmitter', + owner, + ) + + bm = await bmFactory.deploy(keeperRegistry.address, 0) + receiveReverter = await rrFactory.deploy() + receiveEmitter = await reFactory.deploy() + receiveFallbackEmitter = await rfeFactory.deploy() + await Promise.all([ + bm.deployed(), + receiveReverter.deployed(), + receiveEmitter.deployed(), + receiveFallbackEmitter.deployed(), + ]) + }) + + afterEach(async () => { + await h.reset() + }) + + describe('receive()', () => { + it('Should allow anyone to add funds', async () => { + await owner.sendTransaction({ + to: bm.address, + value: oneEth, + }) + await stranger.sendTransaction({ + to: bm.address, + value: oneEth, + }) + }) + + it('Should emit an event', async () => { + await owner.sendTransaction({ + to: bm.address, + value: oneEth, + }) + const tx = stranger.sendTransaction({ + to: bm.address, + value: oneEth, + }) + await expect(tx) + .to.emit(bm, 'FundsAdded') + .withArgs(oneEth, twoEth, stranger.address) + }) + }) + + describe('withdraw()', () => { + beforeEach(async () => { + const tx = await owner.sendTransaction({ + to: bm.address, + value: oneEth, + }) + await tx.wait() + }) + + it('Should allow the owner to withdraw', async () => { + const beforeBalance = await owner.getBalance() + const tx = await bm.connect(owner).withdraw(oneEth, owner.address) + await tx.wait() + const afterBalance = await owner.getBalance() + assert.isTrue( + afterBalance.gt(beforeBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should emit an event', async () => { + const tx = await bm.connect(owner).withdraw(oneEth, owner.address) + await expect(tx) + .to.emit(bm, 'FundsWithdrawn') + .withArgs(oneEth, owner.address) + }) + + it('Should allow the owner to withdraw to anyone', async () => { + const beforeBalance = await stranger.getBalance() + const tx = await bm.connect(owner).withdraw(oneEth, stranger.address) + await tx.wait() + const afterBalance = await stranger.getBalance() + assert.isTrue( + beforeBalance.add(oneEth).eq(afterBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should not allow strangers to withdraw', async () => { + const tx = bm.connect(stranger).withdraw(oneEth, owner.address) + await expect(tx).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('pause() / unpause()', () => { + it('Should allow owner to pause / unpause', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const unpauseTx = await bm.connect(owner).unpause() + await unpauseTx.wait() + }) + + it('Should not allow strangers to pause / unpause', async () => { + const pauseTxStranger = bm.connect(stranger).pause() + await expect(pauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + const pauseTxOwner = await bm.connect(owner).pause() + await pauseTxOwner.wait() + const unpauseTxStranger = bm.connect(stranger).unpause() + await expect(unpauseTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + }) + + describe('setWatchList() / getWatchList() / getAccountInfo()', () => { + it('Should allow owner to set the watchlist', async () => { + // should start unactive + assert.isFalse((await bm.getAccountInfo(watchAddress1)).isActive) + // add first watchlist + let setTx = await bm + .connect(owner) + .setWatchList([watchAddress1], [oneEth], [twoEth]) + await setTx.wait() + let watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress1]) + const accountInfo = await bm.getAccountInfo(watchAddress1) + assert.isTrue(accountInfo.isActive) + expect(accountInfo.minBalanceWei).to.equal(oneEth) + expect(accountInfo.topUpAmountWei).to.equal(twoEth) + // add more to watchlist + setTx = await bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress3], + [oneEth, twoEth, threeEth], + [oneEth, twoEth, threeEth], + ) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress1, watchAddress2, watchAddress3]) + let accountInfo1 = await bm.getAccountInfo(watchAddress1) + let accountInfo2 = await bm.getAccountInfo(watchAddress2) + let accountInfo3 = await bm.getAccountInfo(watchAddress3) + expect(accountInfo1.isActive).to.be.true + expect(accountInfo1.minBalanceWei).to.equal(oneEth) + expect(accountInfo1.topUpAmountWei).to.equal(oneEth) + expect(accountInfo2.isActive).to.be.true + expect(accountInfo2.minBalanceWei).to.equal(twoEth) + expect(accountInfo2.topUpAmountWei).to.equal(twoEth) + expect(accountInfo3.isActive).to.be.true + expect(accountInfo3.minBalanceWei).to.equal(threeEth) + expect(accountInfo3.topUpAmountWei).to.equal(threeEth) + // remove some from watchlist + setTx = await bm + .connect(owner) + .setWatchList( + [watchAddress3, watchAddress1], + [threeEth, oneEth], + [threeEth, oneEth], + ) + await setTx.wait() + watchList = await bm.getWatchList() + assert.deepEqual(watchList, [watchAddress3, watchAddress1]) + accountInfo1 = await bm.getAccountInfo(watchAddress1) + accountInfo2 = await bm.getAccountInfo(watchAddress2) + accountInfo3 = await bm.getAccountInfo(watchAddress3) + expect(accountInfo1.isActive).to.be.true + expect(accountInfo2.isActive).to.be.false + expect(accountInfo3.isActive).to.be.true + }) + + it('Should not allow duplicates in the watchlist', async () => { + const errMsg = `DuplicateAddress("${watchAddress1}")` + const setTx = bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress1], + [oneEth, twoEth, threeEth], + [oneEth, twoEth, threeEth], + ) + await expect(setTx).to.be.revertedWith(errMsg) + }) + + it('Should not allow strangers to set the watchlist', async () => { + const setTxStranger = bm + .connect(stranger) + .setWatchList([watchAddress1], [oneEth], [twoEth]) + await expect(setTxStranger).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should revert if the list lengths differ', async () => { + let tx = bm.connect(owner).setWatchList([watchAddress1], [], [twoEth]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([watchAddress1], [oneEth], []) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + tx = bm.connect(owner).setWatchList([], [oneEth], [twoEth]) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the addresses are empty', async () => { + let tx = bm + .connect(owner) + .setWatchList( + [watchAddress1, ethers.constants.AddressZero], + [oneEth, oneEth], + [twoEth, twoEth], + ) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should revert if any of the top up amounts are 0', async () => { + const tx = bm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2], + [oneEth, oneEth], + [twoEth, zeroEth], + ) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + }) + + describe('getKeeperRegistryAddress() / setKeeperRegistryAddress()', () => { + const newAddress = ethers.Wallet.createRandom().address + + it('Should initialize with the registry address provided to the constructor', async () => { + const address = await bm.getKeeperRegistryAddress() + assert.equal(address, keeperRegistry.address) + }) + + it('Should allow the owner to set the registry address', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await setTx.wait() + const address = await bm.getKeeperRegistryAddress() + assert.equal(address, newAddress) + }) + + it('Should not allow strangers to set the registry address', async () => { + const setTx = bm.connect(stranger).setKeeperRegistryAddress(newAddress) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm.connect(owner).setKeeperRegistryAddress(newAddress) + await expect(setTx) + .to.emit(bm, 'KeeperRegistryAddressUpdated') + .withArgs(keeperRegistry.address, newAddress) + }) + }) + + describe('getMinWaitPeriodSeconds / setMinWaitPeriodSeconds()', () => { + const newWaitPeriod = BigNumber.from(1) + + it('Should initialize with the wait period provided to the constructor', async () => { + const minWaitPeriod = await bm.getMinWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(0) + }) + + it('Should allow owner to set the wait period', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await setTx.wait() + const minWaitPeriod = await bm.getMinWaitPeriodSeconds() + expect(minWaitPeriod).to.equal(newWaitPeriod) + }) + + it('Should not allow strangers to set the wait period', async () => { + const setTx = bm.connect(stranger).setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx).to.be.revertedWith(OWNABLE_ERR) + }) + + it('Should emit an event', async () => { + const setTx = await bm + .connect(owner) + .setMinWaitPeriodSeconds(newWaitPeriod) + await expect(setTx) + .to.emit(bm, 'MinWaitPeriodUpdated') + .withArgs(0, newWaitPeriod) + }) + }) + + describe('checkUpkeep() / getUnderfundedAddresses()', () => { + beforeEach(async () => { + const setTx = await bm.connect(owner).setWatchList( + [ + watchAddress1, // needs funds + watchAddress5, // funded + watchAddress2, // needs funds + watchAddress6, // funded + watchAddress3, // needs funds + ], + new Array(5).fill(oneEth), + new Array(5).fill(twoEth), + ) + await setTx.wait() + }) + + it('Should return list of address that are underfunded', async () => { + const fundTx = await owner.sendTransaction({ + to: bm.address, + value: sixEth, // needs 6 total + }) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + let [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress2, watchAddress3]) + // checkUpkeep payload should match getUnderfundedAddresses() + addresses = await bm.getUnderfundedAddresses() + assert.deepEqual(addresses, [watchAddress1, watchAddress2, watchAddress3]) + }) + + it('Should return some results even if contract cannot fund all eligible targets', async () => { + const fundTx = await owner.sendTransaction({ + to: bm.address, + value: fiveEth, // needs 6 total + }) + await fundTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress2]) + }) + + it('Should omit addresses that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + const fundTx = await owner.sendTransaction({ + to: bm.address, + value: sixEth, + }) + await Promise.all([setWaitPdTx.wait(), fundTx.wait()]) + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + watchAddress2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + const [should, payload] = await bm.checkUpkeep('0x') + assert.isTrue(should) + const [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + assert.deepEqual(addresses, [watchAddress1, watchAddress3]) + }) + + it('Should revert when paused', async () => { + const tx = await bm.connect(owner).pause() + await tx.wait() + const ethCall = bm.checkUpkeep('0x') + await expect(ethCall).to.be.revertedWith(PAUSED_ERR) + }) + }) + + describe('performUpkeep()', () => { + let validPayload: string + let invalidPayload: string + + beforeEach(async () => { + validPayload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [[watchAddress1, watchAddress2, watchAddress3]], + ) + invalidPayload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [[watchAddress1, watchAddress2, watchAddress4, watchAddress5]], + ) + const setTx = await bm.connect(owner).setWatchList( + [ + watchAddress1, // needs funds + watchAddress5, // funded + watchAddress2, // needs funds + watchAddress6, // funded + watchAddress3, // needs funds + // watchAddress4 - omitted + ], + new Array(5).fill(oneEth), + new Array(5).fill(twoEth), + ) + await setTx.wait() + }) + + it('Should revert when paused', async () => { + const pauseTx = await bm.connect(owner).pause() + await pauseTx.wait() + const performTx = bm.connect(keeperRegistry).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(PAUSED_ERR) + }) + + context('when partially funded', () => { + it('Should fund as many addresses as possible', async () => { + const fundTx = await owner.sendTransaction({ + to: bm.address, + value: fiveEth, // only enough eth to fund 2 addresses + }) + await fundTx.wait() + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload) + await assertWatchlistBalances(2, 2, 0, 0, 10_000, 10_000) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress1) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress2) + }) + }) + + context('when fully funded', () => { + beforeEach(async () => { + const fundTx = await owner.sendTransaction({ + to: bm.address, + value: tenEth, + }) + await fundTx.wait() + }) + + it('Should fund the appropriate addresses', async () => { + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances(2, 2, 2, 0, 10_000, 10_000) + }) + + it('Should only fund active, underfunded addresses', async () => { + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(invalidPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances(2, 2, 0, 0, 10_000, 10_000) + }) + + it('Should continue funding addresses even if one reverts', async () => { + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const addresses = [ + watchAddress1, + receiveReverter.address, + watchAddress2, + ] + const setTx = await bm + .connect(owner) + .setWatchList( + addresses, + new Array(3).fill(oneEth), + new Array(3).fill(twoEth), + ) + await setTx.wait() + const payload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [addresses], + ) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(payload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances(2, 2, 0, 0, 10_000, 10_000) + await h.assertBalance(receiveReverter.address, 0) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress1) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(watchAddress2) + await expect(performTx) + .to.emit(bm, 'TopUpFailed') + .withArgs(receiveReverter.address) + }) + + it('Should not fund addresses that have been funded recently', async () => { + const setWaitPdTx = await bm.setMinWaitPeriodSeconds(3600) // 1 hour + await setWaitPdTx.wait() + const block = await ethers.provider.getBlock('latest') + const setTopUpTx = await bm.setLastTopUpXXXTestOnly( + watchAddress2, + block.timestamp - 100, + ) + await setTopUpTx.wait() + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 2_500_000 }) + await performTx.wait() + await assertWatchlistBalances(2, 0, 2, 0, 10_000, 10_000) + }) + + it('Should only be callable by the keeper registry contract', async () => { + let performTx = bm.connect(owner).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + performTx = bm.connect(stranger).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(ONLY_KEEPER_ERR) + }) + + it('Should protect against running out of gas', async () => { + await assertWatchlistBalances(0, 0, 0, 0, 10_000, 10_000) + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 130_000 }) // too little for all 3 transfers + await performTx.wait() + const balance1 = await ethers.provider.getBalance(watchAddress1) + const balance2 = await ethers.provider.getBalance(watchAddress2) + const balance3 = await ethers.provider.getBalance(watchAddress3) + const balances = [balance1, balance2, balance3].map((n) => n.toString()) + expect(balances) + .to.include(twoEth.toString()) // expect at least 1 transfer + .to.include(zeroEth.toString()) // expect at least 1 out of funds + }) + + it('Should provide enough gas to support receive and fallback functions', async () => { + const addresses = [ + receiveEmitter.address, + receiveFallbackEmitter.address, + ] + const payload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [addresses], + ) + const setTx = await bm + .connect(owner) + .setWatchList( + addresses, + new Array(2).fill(oneEth), + new Array(2).fill(twoEth), + ) + await setTx.wait() + + const reBalanceBefore = await ethers.provider.getBalance( + receiveEmitter.address, + ) + const rfeBalanceBefore = await ethers.provider.getBalance( + receiveFallbackEmitter.address, + ) + + const performTx = await bm + .connect(keeperRegistry) + .performUpkeep(payload, { gasLimit: 2_500_000 }) + await h.assertBalance( + receiveEmitter.address, + reBalanceBefore.add(twoEth), + ) + await h.assertBalance( + receiveFallbackEmitter.address, + rfeBalanceBefore.add(twoEth), + ) + + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(receiveEmitter.address) + await expect(performTx) + .to.emit(bm, 'TopUpSucceeded') + .withArgs(receiveFallbackEmitter.address) + }) + }) + }) + + describe('topUp()', () => { + context('when not paused', () => { + it('Should be callable by anyone', async () => { + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + await bm.connect(user).topUp([]) + } + }) + }) + context('when paused', () => { + it('Should be callable by no one', async () => { + await bm.connect(owner).pause() + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + const tx = bm.connect(user).topUp([]) + await expect(tx).to.be.revertedWith(PAUSED_ERR) + } + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/IKeeperRegistryMaster.test.ts b/contracts/test/v0.8/automation/IKeeperRegistryMaster.test.ts new file mode 100644 index 00000000..bd4b24e5 --- /dev/null +++ b/contracts/test/v0.8/automation/IKeeperRegistryMaster.test.ts @@ -0,0 +1,154 @@ +import fs from 'fs' +import { ethers } from 'hardhat' +import { assert } from 'chai' +import { FunctionFragment } from '@ethersproject/abi' +import { KeeperRegistry2_1__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry2_1__factory' +import { KeeperRegistryLogicA2_1__factory as KeeperRegistryLogicAFactory } from '../../../typechain/factories/KeeperRegistryLogicA2_1__factory' +import { KeeperRegistryLogicB2_1__factory as KeeperRegistryLogicBFactory } from '../../../typechain/factories/KeeperRegistryLogicB2_1__factory' +import { KeeperRegistryBase2_1__factory as KeeperRegistryBaseFactory } from '../../../typechain/factories/KeeperRegistryBase2_1__factory' +import { Chainable__factory as ChainableFactory } from '../../../typechain/factories/Chainable__factory' +import { IKeeperRegistryMaster__factory as IKeeperRegistryMasterFactory } from '../../../typechain/factories/IKeeperRegistryMaster__factory' +import { IAutomationRegistryConsumer__factory as IAutomationRegistryConsumerFactory } from '../../../typechain/factories/IAutomationRegistryConsumer__factory' +import { MigratableKeeperRegistryInterface__factory as MigratableKeeperRegistryInterfaceFactory } from '../../../typechain/factories/MigratableKeeperRegistryInterface__factory' +import { MigratableKeeperRegistryInterfaceV2__factory as MigratableKeeperRegistryInterfaceV2Factory } from '../../../typechain/factories/MigratableKeeperRegistryInterfaceV2__factory' +import { OCR2Abstract__factory as OCR2AbstractFactory } from '../../../typechain/factories/OCR2Abstract__factory' + +type Entry = { + inputs?: any[] + outputs?: any[] + name?: string + type: string +} + +type InterfaceABI = ConstructorParameters[0] + +const compositeABIs = [ + KeeperRegistryFactory.abi, + KeeperRegistryLogicAFactory.abi, + KeeperRegistryLogicBFactory.abi, +] + +function entryID(entry: Entry) { + // remove "internal type" and "name" since they don't affect the ability + // of a contract to satisfy an interface + const preimage = Object.assign({}, entry) + if (entry.inputs) { + preimage.inputs = entry.inputs.map(({ type }) => ({ + type, + })) + } + if (entry.outputs) { + preimage.outputs = entry.outputs.map(({ type }) => ({ + type, + })) + } + return ethers.utils.id(JSON.stringify(preimage)) +} + +/** + * @dev because the keeper master interface is a composit of several different contracts, + * it is possible that a interface could be satisfied by functions across different + * contracts, and therefore not enforcable by the compiler directly. Instead, we use this + * test to assert that the master interface satisfies the contraints of an individual interface + */ +function assertSatisfiesInterface( + contractABI: InterfaceABI, + expectedABI: InterfaceABI, +) { + const implementer = new ethers.utils.Interface(contractABI) + const expected = new ethers.utils.Interface(expectedABI) + for (const functionName in expected.functions) { + if ( + Object.prototype.hasOwnProperty.call(expected, functionName) && + functionName.match('^.+(.*)$') // only match typed function sigs + ) { + assert.isDefined( + implementer.functions[functionName], + `missing function ${functionName}`, + ) + const propertiesToMatch: (keyof FunctionFragment)[] = [ + 'constant', + 'stateMutability', + 'payable', + ] + for (const property of propertiesToMatch) { + assert.equal( + implementer.functions[functionName][property], + expected.functions[functionName][property], + `property ${property} does not match for function ${functionName}`, + ) + } + } + } +} + +describe('IKeeperRegistryMaster', () => { + it('is up to date', async () => { + const checksum = ethers.utils.id(compositeABIs.join('')) + const knownChecksum = fs + .readFileSync( + 'src/v0.8/automation/interfaces/v2_1/IKeeperRegistryMaster.sol', + ) + .toString() + .slice(17, 83) // checksum located at top of file + assert.equal( + checksum, + knownChecksum, + 'master interface is out of date - regenerate using "pnpm ts-node ./scripts/generate-automation-master-interface.ts"', + ) + }) + + it('is generated from composite contracts without competing definitions', async () => { + const sharedEntries = [ + ...ChainableFactory.abi, + ...KeeperRegistryBaseFactory.abi, + ] + const abiSet = new Set() + const sharedSet = new Set() + for (const entry of sharedEntries) { + sharedSet.add(entryID(entry)) + } + for (const abi of compositeABIs) { + for (const entry of abi) { + const id = entryID(entry) + if (!abiSet.has(id)) { + abiSet.add(id) + } else if (!sharedSet.has(id)) { + assert.fail( + `composite contracts contain duplicate entry: ${JSON.stringify( + entry, + )}`, + ) + } + } + } + }) + + it('satisfies the IAutomationRegistryConsumer interface', async () => { + assertSatisfiesInterface( + IKeeperRegistryMasterFactory.abi, + IAutomationRegistryConsumerFactory.abi, + ) + }) + + it('satisfies the MigratableKeeperRegistryInterface interface', async () => { + assertSatisfiesInterface( + IKeeperRegistryMasterFactory.abi, + MigratableKeeperRegistryInterfaceFactory.abi, + ) + }) + + it('satisfies the MigratableKeeperRegistryInterfaceV2 interface', async () => { + assertSatisfiesInterface( + IKeeperRegistryMasterFactory.abi, + MigratableKeeperRegistryInterfaceV2Factory.abi, + ) + }) + + it('satisfies the OCR2Abstract interface', async () => { + assertSatisfiesInterface( + IKeeperRegistryMasterFactory.abi, + OCR2AbstractFactory.abi, + ) + }) +}) diff --git a/contracts/test/v0.8/automation/KeeperRegistrar2_0.test.ts b/contracts/test/v0.8/automation/KeeperRegistrar2_0.test.ts new file mode 100644 index 00000000..19c6942f --- /dev/null +++ b/contracts/test/v0.8/automation/KeeperRegistrar2_0.test.ts @@ -0,0 +1,937 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { BigNumber, Signer } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' + +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { KeeperRegistry2_0 as KeeperRegistry } from '../../../typechain/KeeperRegistry2_0' +import { KeeperRegistryLogic20 as KeeperRegistryLogic } from '../../../typechain/KeeperRegistryLogic20' +import { KeeperRegistrar20 as KeeperRegistrar } from '../../../typechain/KeeperRegistrar20' +import { KeeperRegistry2_0__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry2_0__factory' +import { KeeperRegistryLogic2_0__factory as KeeperRegistryLogicFactory } from '../../../typechain/factories/KeeperRegistryLogic2_0__factory' +import { KeeperRegistrar2_0__factory as KeeperRegistrarFactory } from '../../../typechain/factories/KeeperRegistrar2_0__factory' + +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { toWei } from '../../test-helpers/helpers' + +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let keeperRegistryLogicFactory: KeeperRegistryLogicFactory +let keeperRegistrar: KeeperRegistrarFactory +let upkeepMockFactory: UpkeepMockFactory + +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + // @ts-ignore bug in autogen file + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry2_0') + // @ts-ignore bug in autogen file + keeperRegistryLogicFactory = await ethers.getContractFactory( + 'KeeperRegistryLogic2_0', + ) + // @ts-ignore bug in autogen file + keeperRegistrar = await ethers.getContractFactory('KeeperRegistrar2_0') + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') +}) + +const errorMsgs = { + onlyOwner: 'revert Only callable by owner', + onlyAdmin: 'OnlyAdminOrOwner()', + hashPayload: 'HashMismatch()', + requestNotFound: 'RequestNotFound()', +} + +describe('KeeperRegistrar2_0', () => { + const upkeepName = 'SampleUpkeep' + + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const executeGas = BigNumber.from(100000) + const paymentPremiumPPB = BigNumber.from(250000000) + const flatFeeMicroLink = BigNumber.from(0) + const maxAllowedAutoApprove = 5 + const offchainConfig = '0x01234567' + + const emptyBytes = '0x00' + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxCheckDataSize = BigNumber.from(10000) + const maxPerformDataSize = BigNumber.from(10000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from('1000000000000000000') + const amount = BigNumber.from('5000000000000000000') + const amount1 = BigNumber.from('6000000000000000000') + const transcoder = ethers.constants.AddressZero + + // Enum values are not auto exported in ABI so have to manually declare + const autoApproveType_DISABLED = 0 + const autoApproveType_ENABLED_SENDER_ALLOWLIST = 1 + const autoApproveType_ENABLED_ALL = 2 + + let owner: Signer + let admin: Signer + let someAddress: Signer + let registrarOwner: Signer + let stranger: Signer + let requestSender: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let registryLogic: KeeperRegistryLogic + let mock: UpkeepMock + let registrar: KeeperRegistrar + + beforeEach(async () => { + owner = personas.Default + admin = personas.Neil + someAddress = personas.Ned + registrarOwner = personas.Nelly + stranger = personas.Nancy + requestSender = personas.Norbert + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy(0, linkToken.address, linkEthFeed.address, gasPriceFeed.address) + + registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + + mock = await upkeepMockFactory.deploy() + + registrar = await keeperRegistrar + .connect(registrarOwner) + .deploy( + linkToken.address, + autoApproveType_DISABLED, + BigNumber.from('0'), + registry.address, + minUpkeepSpend, + ) + + await linkToken + .connect(owner) + .transfer(await requestSender.getAddress(), toWei('1000')) + + const keepers = [ + await personas.Carol.getAddress(), + await personas.Nancy.getAddress(), + await personas.Ned.getAddress(), + await personas.Neil.getAddress(), + ] + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder, + registrar: registrar.address, + } + const onchainConfig = ethers.utils.defaultAbiCoder.encode( + [ + 'tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds\ + ,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,\ + uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,\ + address registrar)', + ], + [config], + ) + await registry + .connect(owner) + .setConfig(keepers, keepers, 1, onchainConfig, 1, '0x') + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registrar.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistrar 2.0.0') + }) + }) + + describe('#register', () => { + it('reverts if not called by the PLI token', async () => { + await evmRevert( + registrar + .connect(someAddress) + .register( + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ), + 'OnlyLink()', + ) + }) + + it('reverts if the amount passed in data mismatches actual amount sent', async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount1, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'AmountMismatch()', + ) + }) + + it('reverts if the sender passed in data mismatches actual sender', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await admin.getAddress(), // Should have been requestSender.getAddress() + ], + ) + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'SenderMismatch()', + ) + }) + + it('reverts if the admin address is 0x0000...', async () => { + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + '0x0000000000000000000000000000000000000000', + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + await evmRevert( + linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes), + 'RegistrationRequestFailed()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve OFF - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + //get upkeep count before attempting registration + const beforeCount = (await registry.getState()).state.numUpkeeps + + //set auto approve OFF, threshold limits dont matter in this case + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + + it('Auto Approve ON - Throttle max approvals - does not register an upkeep on KeeperRegistry beyond the max limit, emits only RegistrationRequested event after limit is hit', async () => { + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 0) + + //set auto approve on, with max 1 allowed + await registrar.connect(registrarOwner).setRegistrationConfig( + autoApproveType_ENABLED_ALL, + 1, // maxAllowedAutoApprove + registry.address, + minUpkeepSpend, + ) + + //register within threshold, new upkeep should be registered + let abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + //try registering another one, new upkeep should not be registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 1, // make unique hash + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // Still 1 + + // Now set new max limit to 2. One more upkeep should get auto approved + await registrar.connect(registrarOwner).setRegistrationConfig( + autoApproveType_ENABLED_ALL, + 2, // maxAllowedAutoApprove + registry.address, + minUpkeepSpend, + ) + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 2, // make unique hash + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // 1 -> 2 + + // One more upkeep should not get registered + abiEncodedBytes = registrar.interface.encodeFunctionData('register', [ + upkeepName, + emptyBytes, + mock.address, + executeGas.toNumber() + 3, // make unique hash + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ]) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 2) // Still 2 + }) + + it('Auto Approve Sender Allowlist - sender in allowlist - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + // Add sender to allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + + //register with auto approve ON + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + + const [id] = await registry.getActiveUpkeepIDs(0, 1) + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('Auto Approve Sender Allowlist - sender NOT in allowlist - does not registers an upkeep on KeeperRegistry, emits only RegistrationRequested event', async () => { + const beforeCount = (await registry.getState()).state.numUpkeeps + const senderAddress = await requestSender.getAddress() + + //set auto approve to ENABLED_SENDER_ALLOWLIST type with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_SENDER_ALLOWLIST, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + // Explicitly remove sender from allowlist + await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + + //register. auto approve shouldn't happen + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + + //get upkeep count after attempting registration + const afterCount = (await registry.getState()).state.numUpkeeps + //confirm that a new upkeep has NOT been registered and upkeep count is still the same + assert.deepEqual(beforeCount, afterCount) + + //confirm that only RegistrationRequested event is emitted and RegistrationApproved event is not + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).not.to.emit(registrar, 'RegistrationApproved') + + const hash = receipt.logs[2].topics[1] + const pendingRequest = await registrar.getPendingRequest(hash) + assert.equal(await admin.getAddress(), pendingRequest[0]) + assert.ok(amount.eq(pendingRequest[1])) + }) + }) + + describe('#registerUpkeep', () => { + it('reverts with empty message if amount sent is not available in PLI allowance', async () => { + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: executeGas, + adminAddress: await admin.getAddress(), + checkData: emptyBytes, + offchainConfig: emptyBytes, + amount, + encryptedEmail: emptyBytes, + }), + '', + ) + }) + + it('reverts if the amount passed in data is less than configured minimum', async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + // amt is one order of magnitude less than minUpkeepSpend + const amt = BigNumber.from('100000000000000000') + + await evmRevert( + registrar.connect(someAddress).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: executeGas, + adminAddress: await admin.getAddress(), + checkData: emptyBytes, + offchainConfig: emptyBytes, + amount: amt, + encryptedEmail: emptyBytes, + }), + 'InsufficientPayment()', + ) + }) + + it('Auto Approve ON - registers an upkeep on KeeperRegistry instantly and emits both RegistrationRequested and RegistrationApproved events', async () => { + //set auto approve ON with high threshold limits + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_ENABLED_ALL, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + await linkToken.connect(requestSender).approve(registrar.address, amount) + + const tx = await registrar.connect(requestSender).registerUpkeep({ + name: upkeepName, + upkeepContract: mock.address, + gasLimit: executeGas, + adminAddress: await admin.getAddress(), + checkData: emptyBytes, + offchainConfig, + amount, + encryptedEmail: emptyBytes, + }) + assert.equal((await registry.getState()).state.numUpkeeps.toNumber(), 1) // 0 -> 1 + + //confirm if a new upkeep has been registered and the details are the same as the one just registered + const [id] = await registry.getActiveUpkeepIDs(0, 1) + const newupkeep = await registry.getUpkeep(id) + assert.equal(newupkeep.target, mock.address) + assert.equal(newupkeep.admin, await admin.getAddress()) + assert.equal(newupkeep.checkData, emptyBytes) + assert.equal(newupkeep.balance.toString(), amount.toString()) + assert.equal(newupkeep.executeGas, executeGas.toNumber()) + assert.equal(newupkeep.offchainConfig, offchainConfig) + + await expect(tx).to.emit(registrar, 'RegistrationRequested') + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + }) + + describe('#setAutoApproveAllowedSender', () => { + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .setAutoApproveAllowedSender(await admin.getAddress(), false) + await evmRevert(tx, 'Only callable by owner') + }) + + it('sets the allowed status correctly and emits log', async () => { + const senderAddress = await stranger.getAddress() + let tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, true) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, true) + + let senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isTrue(senderAllowedStatus) + + tx = await registrar + .connect(registrarOwner) + .setAutoApproveAllowedSender(senderAddress, false) + await expect(tx) + .to.emit(registrar, 'AutoApproveAllowedSenderSet') + .withArgs(senderAddress, false) + + senderAllowedStatus = await registrar + .connect(owner) + .getAutoApproveAllowedSender(senderAddress) + assert.isFalse(senderAllowedStatus) + }) + }) + + describe('#approve', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + }) + + it('reverts if not called by the owner', async () => { + const tx = registrar + .connect(stranger) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + hash, + ) + await evmRevert(tx, 'Only callable by owner') + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('reverts if any member of the payload changes', async () => { + let tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + ethers.Wallet.createRandom().address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + 10000, + await admin.getAddress(), + emptyBytes, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + ethers.Wallet.createRandom().address, + emptyBytes, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + '0x1234', + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.hashPayload) + }) + + it('approves an existing registration request', async () => { + const tx = await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + hash, + ) + await expect(tx).to.emit(registrar, 'RegistrationApproved') + }) + + it('deletes the request afterwards / reverts if the request DNE', async () => { + await registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + hash, + ) + const tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) + + describe('#cancel', () => { + let hash: string + + beforeEach(async () => { + await registrar + .connect(registrarOwner) + .setRegistrationConfig( + autoApproveType_DISABLED, + maxAllowedAutoApprove, + registry.address, + minUpkeepSpend, + ) + + //register with auto approve OFF + const abiEncodedBytes = registrar.interface.encodeFunctionData( + 'register', + [ + upkeepName, + emptyBytes, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + offchainConfig, + amount, + await requestSender.getAddress(), + ], + ) + const tx = await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + const receipt = await tx.wait() + hash = receipt.logs[2].topics[1] + // submit duplicate request (increase balance) + await linkToken + .connect(requestSender) + .transferAndCall(registrar.address, amount, abiEncodedBytes) + }) + + it('reverts if not called by the admin / owner', async () => { + const tx = registrar.connect(stranger).cancel(hash) + await evmRevert(tx, errorMsgs.onlyAdmin) + }) + + it('reverts if the hash does not exist', async () => { + const tx = registrar + .connect(registrarOwner) + .cancel( + '0x000000000000000000000000322813fd9a801c5507c9de605d63cea4f2ce6c44', + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + + it('refunds the total request balance to the admin address if owner cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(registrarOwner).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('refunds the total request balance to the admin address if admin cancels', async () => { + const before = await linkToken.balanceOf(await admin.getAddress()) + const tx = await registrar.connect(admin).cancel(hash) + const after = await linkToken.balanceOf(await admin.getAddress()) + assert.isTrue(after.sub(before).eq(amount.mul(BigNumber.from(2)))) + await expect(tx).to.emit(registrar, 'RegistrationRejected') + }) + + it('deletes the request hash', async () => { + await registrar.connect(registrarOwner).cancel(hash) + let tx = registrar.connect(registrarOwner).cancel(hash) + await evmRevert(tx, errorMsgs.requestNotFound) + tx = registrar + .connect(registrarOwner) + .approve( + upkeepName, + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + hash, + ) + await evmRevert(tx, errorMsgs.requestNotFound) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/KeeperRegistry1_2.test.ts b/contracts/test/v0.8/automation/KeeperRegistry1_2.test.ts new file mode 100644 index 00000000..2c8440bf --- /dev/null +++ b/contracts/test/v0.8/automation/KeeperRegistry1_2.test.ts @@ -0,0 +1,2228 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { BigNumber, BigNumberish, Signer } from 'ethers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { UpkeepReverter__factory as UpkeepReverterFactory } from '../../../typechain/factories/UpkeepReverter__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../../typechain/factories/UpkeepAutoFunder__factory' +import { UpkeepTranscoder__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder__factory' +import { KeeperRegistry1_2__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry1_2__factory' +import { KeeperRegistry1_2 as KeeperRegistry } from '../../../typechain/KeeperRegistry1_2' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder' +import { toWei } from '../../test-helpers/helpers' + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** REGISTRY v1.2 IS FROZEN ************************************/ + +// All tests are disabled for this contract, as we expect it to never change in the future. +// Instead, we test that the bytecode for the contract has not changed. +// If this test ever fails, you should remove it and then re-run the original test suite. + +const BYTECODE = KeeperRegistryFactory.bytecode +const BYTECODE_CHECKSUM = + '0x8e465b93eae52724b7edbef5bc133c96520dad33f959373e5d026549ca40158c' + +describe('KeeperRegistry1_2 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal(ethers.utils.id(BYTECODE), BYTECODE_CHECKSUM) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +async function getUpkeepID(tx: any) { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +function randomAddress() { + return ethers.Wallet.createRandom().address +} + +// ----------------------------------------------------------------------------------------------- +// DEV: these *should* match the perform/check gas overhead values in the contract and on the node +const PERFORM_GAS_OVERHEAD = BigNumber.from(160000) +const CHECK_GAS_OVERHEAD = BigNumber.from(170000) +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepReverterFactory: UpkeepReverterFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory +let upkeepTranscoderFactory: UpkeepTranscoderFactory +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + // @ts-ignore bug in autogen file + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry1_2') + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepReverterFactory = await ethers.getContractFactory('UpkeepReverter') + upkeepAutoFunderFactory = await ethers.getContractFactory('UpkeepAutoFunder') + upkeepTranscoderFactory = await ethers.getContractFactory('UpkeepTranscoder') +}) + +describe.skip('KeeperRegistry1_2', () => { + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const linkDivisibility = BigNumber.from('1000000000000000000') + const executeGas = BigNumber.from('100000') + const paymentPremiumBase = BigNumber.from('1000000000') + const paymentPremiumPPB = BigNumber.from('250000000') + const flatFeeMicroLink = BigNumber.from(0) + const blockCountPerTurn = BigNumber.from(3) + const emptyBytes = '0x00' + const randomBytes = '0x1234abcd' + const zeroAddress = ethers.constants.AddressZero + const extraGas = BigNumber.from('250000') + const registryGasOverhead = BigNumber.from('80000') + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(20000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from(0) + + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let nonkeeper: Signer + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let registry2: KeeperRegistry + let mock: UpkeepMock + let transcoder: UpkeepTranscoder + + let id: BigNumber + let keepers: string[] + let payees: string[] + + beforeEach(async () => { + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + + keepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + ] + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + registry = await keeperRegistryFactory + .connect(owner) + .deploy(linkToken.address, linkEthFeed.address, gasPriceFeed.address, { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + registry2 = await keeperRegistryFactory + .connect(owner) + .deploy(linkToken.address, linkEthFeed.address, gasPriceFeed.address, { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper2.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper3.getAddress(), toWei('1000')) + + await registry.connect(owner).setKeepers(keepers, payees) + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + ) + id = await getUpkeepID(tx) + }) + + const linkForGas = ( + upkeepGasSpent: BigNumberish, + premiumPPB?: BigNumberish, + flatFee?: BigNumberish, + ) => { + premiumPPB = premiumPPB === undefined ? paymentPremiumPPB : premiumPPB + flatFee = flatFee === undefined ? flatFeeMicroLink : flatFee + const gasSpent = registryGasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei.mul(gasSpent).mul(linkDivisibility).div(linkEth) + const premium = base.mul(premiumPPB).div(paymentPremiumBase) + const flatFeeJules = BigNumber.from(flatFee).mul('1000000000000') + return base.add(premium).add(flatFeeJules) + } + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistry 1.2.0') + }) + }) + + describe('#setKeepers', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setKeepers([], []), + 'Only callable by owner', + ) + }) + + it('reverts when adding the same keeper twice', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper1.getAddress()], + [await payee1.getAddress(), await payee1.getAddress()], + ), + 'DuplicateEntry()', + ) + }) + + it('reverts with different numbers of keepers/payees', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress()], + ), + 'ParameterLengthError()', + ) + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ), + 'ParameterLengthError()', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [ + await payee1.getAddress(), + '0x0000000000000000000000000000000000000000', + ], + ), + 'InvalidPayee()', + ) + }) + + it('emits events for every keeper added and removed', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, (await registry.getState()).keepers) + + // remove keepers + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [await payee2.getAddress(), await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, (await registry.getState()).keepers) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('updates the keeper to inactive when removed', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper3.getAddress()], + [await payee1.getAddress(), await payee3.getAddress()], + ) + const added = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.isTrue(added.active) + const removed = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.isFalse(removed.active) + }) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, (await registry.getState()).keepers) + + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [IGNORE_ADDRESS, await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, (await registry.getState()).keepers) + + const ignored = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.equal(await payee2.getAddress(), ignored.payee) + assert.equal(true, ignored.active) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('reverts if the owner changes the payee', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await evmRevert( + registry + .connect(owner) + .setKeepers(keepers, [ + await payee1.getAddress(), + await payee2.getAddress(), + await owner.getAddress(), + ]), + 'InvalidPayee()', + ) + }) + }) + + describe('#registerUpkeep', () => { + context('and the registry is paused', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + it('reverts', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'Pausable: paused', + ) + }) + }) + + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'NotAContract()', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'OnlyCallableByOwnerOrRegistrar()', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 2299, + await admin.getAddress(), + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 5000001, + await admin.getAddress(), + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('creates a record of the registration', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(id, executeGas, await admin.getAddress()) + const registration = await registry.getUpkeep(id) + assert.equal(mock.address, registration.target) + assert.equal(0, registration.balance.toNumber()) + assert.equal(emptyBytes, registration.checkData) + assert(registration.maxValidBlocknumber.eq('0xffffffffffffffff')) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + }) + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(id.add(1), amount), + 'UpkeepNotActive()', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(keeper1).addFunds(id, amount) + const registration = await registry.getUpkeep(id) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(keeper1).addFunds(id, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(id, await keeper1.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'UpkeepNotActive()', + ) + }) + }) + + describe('#setUpkeepGasLimit', () => { + const newGasLimit = BigNumber.from('500000') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).setUpkeepGasLimit(id.add(1), newGasLimit), + 'UpkeepNotActive()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).setUpkeepGasLimit(id, newGasLimit), + 'UpkeepNotActive()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepGasLimit(id, newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if new gas limit is out of bounds', async () => { + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(id, BigNumber.from('100')), + 'GasLimitOutsideRange()', + ) + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(id, BigNumber.from('6000000')), + 'GasLimitOutsideRange()', + ) + }) + + it('updates the gas limit successfully', async () => { + const initialGasLimit = (await registry.getUpkeep(id)).executeGas + assert.equal(initialGasLimit, executeGas.toNumber()) + await registry.connect(admin).setUpkeepGasLimit(id, newGasLimit) + const updatedGasLimit = (await registry.getUpkeep(id)).executeGas + assert.equal(updatedGasLimit, newGasLimit.toNumber()) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepGasLimit(id, newGasLimit) + await expect(tx) + .to.emit(registry, 'UpkeepGasLimitSet') + .withArgs(id, newGasLimit) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if the upkeep is not funded', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'InsufficientFunds()', + ) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts if executed', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await keeper1.getAddress()), + 'OnlySimulatedBackend()', + ) + }) + + it('reverts if the specified keeper is not valid', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await owner.getAddress()), + 'OnlySimulatedBackend()', + ) + }) + + context('and upkeep is not needed', () => { + beforeEach(async () => { + await mock.setCanCheck(false) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'UpkeepNotNeeded()', + ) + }) + }) + + context('and the upkeep check fails', () => { + beforeEach(async () => { + const reverter = await upkeepReverterFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + reverter.address, + 2500000, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await linkToken + .connect(keeper1) + .approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'TargetCheckReverted', + ) + }) + }) + + context('and upkeep check simulations succeeds', () => { + beforeEach(async () => { + await mock.setCanCheck(true) + await mock.setCanPerform(true) + }) + + it('returns true with pricing info if the target can execute', async () => { + const newGasMultiplier = BigNumber.from(10) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: newGasMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const response = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + assert.isTrue(response.gasLimit.eq(executeGas)) + assert.isTrue(response.linkEth.eq(linkEth)) + assert.isTrue( + response.adjustedGasWei.eq(gasWei.mul(newGasMultiplier)), + ) + assert.isTrue( + response.maxLinkPayment.eq( + linkForGas(executeGas.toNumber()).mul(newGasMultiplier), + ), + ) + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setCheckGasToBurn(checkGasLimit) + await mock.setPerformGasToBurn(executeGas) + const gas = checkGasLimit + .add(executeGas) + .add(PERFORM_GAS_OVERHEAD) + .add(CHECK_GAS_OVERHEAD) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress(), { + gasLimit: gas, + }) + }) + }) + }) + }) + + describe('#performUpkeep', () => { + let _lastKeeper = keeper1 + + async function getPerformPaymentAmount() { + _lastKeeper = _lastKeeper === keeper1 ? keeper2 : keeper1 + const before = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + await registry.connect(_lastKeeper).performUpkeep(id, '0x') + const after = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + const difference = after.sub(before) + return difference + } + + it('reverts if the registration is not funded', async () => { + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'InsufficientFunds()', + ) + }) + + context('and the registry is paused', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts', async () => { + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'Pausable: paused', + ) + }) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + }) + + it('does not revert if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + await registry.connect(keeper3).performUpkeep(id, '0x') + }) + + it('returns false if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + assert.isFalse( + await registry.connect(keeper1).callStatic.performUpkeep(id, '0x'), + ) + }) + + it('returns true if called', async () => { + await mock.setCanPerform(true) + + const response = await registry + .connect(keeper1) + .callStatic.performUpkeep(id, '0x') + assert.isTrue(response) + }) + + it('reverts if not enough gas supplied', async () => { + await mock.setCanPerform(true) + + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasLimit: BigNumber.from('120000') }), + ) + }) + + it('executes the data passed to the registry', async () => { + await mock.setCanPerform(true) + + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: extraGas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + expect(eventLog?.[1].args?.[0]).to.equal(id) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('updates payment balances', async () => { + const keeperBefore = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(id) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperAfter = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(id) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.balance.gt(keeperBefore.balance)) + assert.isTrue(registrationBefore.balance.gt(registrationAfter.balance)) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }) + + it('updates amount spent correctly', async () => { + const registrationBefore = await registry.getUpkeep(id) + const balanceBefore = registrationBefore.balance + const amountSpentBefore = registrationBefore.amountSpent + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const registrationAfter = await registry.getUpkeep(id) + const balanceAfter = registrationAfter.balance + const amountSpentAfter = registrationAfter.amountSpent + + assert.isTrue(balanceAfter.lt(balanceBefore)) + assert.isTrue(amountSpentAfter.gt(amountSpentBefore)) + assert.isTrue( + amountSpentAfter + .sub(amountSpentBefore) + .eq(balanceBefore.sub(balanceAfter)), + ) + }) + + it('only pays for gas used [ @skip-coverage ]', async () => { + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry.connect(keeper1).performUpkeep(id, '0x') + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()) + const totalTx = linkForGas(receipt.gasUsed.toNumber()) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).lt(difference)) // exact number is flaky + assert.isTrue(linkForGas(6000).gt(difference)) // instead test a range + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from('1000000000') // 10M x the gas feed's rate + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas).mul(multiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(multiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(multiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(multiplier).gt(difference)) + }) + + it('only pays as much as the node spent [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from(200) // 2X the gas feed's rate + const effectiveMultiplier = BigNumber.from(2) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()).mul(effectiveMultiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(effectiveMultiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(effectiveMultiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(effectiveMultiplier).gt(difference)) + }) + + it('pays the caller even if the target function fails', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id = await getUpkeepID(tx) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + const keeperBalanceBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperBalanceAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + assert.isTrue(keeperBalanceAfter.gt(keeperBalanceBefore)) + }) + + it('reverts if called by a non-keeper', async () => { + await evmRevert( + registry.connect(nonkeeper).performUpkeep(id, '0x'), + 'OnlyActiveKeepers()', + ) + }) + + it('reverts if the upkeep has been canceled', async () => { + await mock.setCanPerform(true) + + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'UpkeepNotActive()', + ) + }) + + it('uses the fallback gas price if the feed price is stale [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 🥳 + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback gas price if the feed price is non-sensical [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const currentBlockNum = await ethers.provider.getBlockNumber() + const currentBlock = await ethers.provider.getBlock(currentBlockNum) + const updatedAt = currentBlock.timestamp + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('uses the fallback if the link price feed is stale', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 🥳 + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback link price if the feed price is non-sensical [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const currentBlockNum = await ethers.provider.getBlockNumber() + const currentBlock = await ethers.provider.getBlock(currentBlockNum) + const updatedAt = currentBlock.timestamp + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('reverts if the same caller calls twice in a row', async () => { + await registry.connect(keeper1).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'KeepersMustTakeTurns()', + ) + await registry.connect(keeper2).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'KeepersMustTakeTurns()', + ) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setPerformGasToBurn(executeGas) + await mock.setCanPerform(true) + const gas = executeGas.add(PERFORM_GAS_OVERHEAD) + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: gas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + expect(eventLog?.[1].args?.[0]).to.equal(id) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('can self fund', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + let maxPayment = await registry.getMaxPaymentForGas(executeGas) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(upkeepID, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + let postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + let autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper2).performUpkeep(upkeepID, '0x') + + postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await registry.connect(owner).addFunds(upkeepID, toWei('100')) + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(upkeepID) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(upkeepID) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('#withdrawFunds', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(id.add(1), await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, await payee1.getAddress()), + 'UpkeepNotCanceled()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, zeroAddress), + 'InvalidRecipient()', + ) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(id) + }) + + it('moves the funds out and updates the balance', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(id) + let previousBalance = registration.balance + + await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(previousBalance).eq(payee1After)) + assert.isTrue(registryBefore.sub(previousBalance).eq(registryAfter)) + + registration = await registry.getUpkeep(id) + assert.equal(0, registration.balance.toNumber()) + }) + + it('deducts cancellation fees from upkeep and gives to owner', async () => { + let minUpkeepSpend = toWei('10') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + let amountSpent = toWei('100').sub(upkeepBefore) + let cancellationFee = minUpkeepSpend.sub(amountSpent) + + await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + let upkeepAfter = (await registry.getUpkeep(id)).balance + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // Post upkeep balance should be 0 + assert.equal(0, upkeepAfter.toNumber()) + // balance - cancellationFee should be transferred to payee + assert.isTrue( + payee1Before.add(upkeepBefore.sub(cancellationFee)).eq(payee1After), + ) + assert.isTrue(ownerAfter.eq(cancellationFee)) + }) + + it('deducts max upto balance as cancellation fees', async () => { + // Very high min spend, should deduct whole balance as cancellation fees + let minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + let upkeepAfter = (await registry.getUpkeep(id)).balance + + assert.equal(0, upkeepAfter.toNumber()) + // No funds should be transferred, all of upkeepBefore should be given to owner + assert.isTrue(payee1After.eq(payee1Before)) + assert.isTrue(ownerAfter.eq(upkeepBefore)) + }) + + it('does not deduct cancellation fees if enough is spent', async () => { + // Very low min spend, already spent in one upkeep + let minUpkeepSpend = BigNumber.from(420) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + let upkeepAfter = (await registry.getUpkeep(id)).balance + + assert.equal(0, upkeepAfter.toNumber()) + // No cancellation fees for owner + assert.equal(0, ownerAfter.toNumber()) + // Whole balance transferred to payee + assert.isTrue(payee1Before.add(upkeepBefore).eq(payee1After)) + }) + }) + }) + + describe('#withdrawOwnerFunds', () => { + it('can only be called by owner', async () => { + await evmRevert( + registry.connect(keeper1).withdrawOwnerFunds(), + 'Only callable by owner', + ) + }) + + it('withdraws the collected fees to owner', async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + // Very high min spend, whole balance as cancellation fees + let minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + let upkeepBalance = (await registry.getUpkeep(id)).balance + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).cancelUpkeep(id) + await registry.connect(admin).withdrawFunds(id, await payee1.getAddress()) + // Transfered to owner balance on registry + let ownerRegistryBalance = (await registry.getState()).state + .ownerLinkBalance + assert.isTrue(ownerRegistryBalance.eq(upkeepBalance)) + + // Now withdraw + await registry.connect(owner).withdrawOwnerFunds() + + ownerRegistryBalance = (await registry.getState()).state.ownerLinkBalance + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + // Owner registry balance should be changed to 0 + assert.isTrue(ownerRegistryBalance.eq(BigNumber.from('0'))) + + // Owner should be credited with the balance + assert.isTrue(ownerBefore.add(upkeepBalance).eq(ownerAfter)) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(id.add(1)), + 'CannotCancel()', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(id), + 'OnlyCallableByOwnerOrAdmin()', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber)) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'UpkeepNotActive()', + ) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(id) + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(id) + const registration = await registry.getUpkeep(id) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(id) + + const registration = await registry.getUpkeep(id) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + const delay = 50 + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber + delay)) + }) + + // it('updates the canceled registrations list', async () => { + // let canceled = await registry.callStatic.getCanceledUpkeepList() + // assert.deepEqual([], canceled) + + // await registry.connect(admin).cancelUpkeep(id) + + // canceled = await registry.callStatic.getCanceledUpkeepList() + // assert.deepEqual([id], canceled) + // }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(admin).cancelUpkeep(id) + await registry.connect(keeper2).performUpkeep(id, '0x') // still works + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'UpkeepNotActive()', + ) + }) + + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + + // it('does not revert or double add the cancellation record if called by the owner immediately after', async () => { + // await registry.connect(admin).cancelUpkeep(id) + + // await registry.connect(owner).cancelUpkeep(id) + + // const canceled = await registry.callStatic.getCanceledUpkeepList() + // assert.deepEqual([id], canceled) + // }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(id) + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'InvalidRecipient()', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationBefore = (await registry.getUpkeep(id)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationAfter = (await registry.getUpkeep(id)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore).eq(toLinkAfter)) + assert.isTrue(registryLinkBefore.sub(keeperBefore).eq(registryLinkAfter)) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'ValueNotChanged()', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'OnlyCallableByProposedPayee()', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#setConfig', () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const checks = BigNumber.from(3) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const maxGas = BigNumber.from(6) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry.connect(payee1).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + 'Only callable by owner', + ) + }) + + it('updates the config', async () => { + const old = (await registry.getState()).config + assert.isTrue(paymentPremiumPPB.eq(old.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(old.flatFeeMicroLink)) + assert.isTrue(blockCountPerTurn.eq(old.blockCountPerTurn)) + assert.isTrue(stalenessSeconds.eq(old.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(old.gasCeilingMultiplier)) + + await registry.connect(owner).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const updated = (await registry.getState()).config + assert.equal(updated.paymentPremiumPPB, payment.toNumber()) + assert.equal(updated.flatFeeMicroLink, flatFee.toNumber()) + assert.equal(updated.blockCountPerTurn, checks.toNumber()) + assert.equal(updated.stalenessSeconds, staleness.toNumber()) + assert.equal(updated.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal(updated.checkGasLimit, maxGas.toNumber()) + assert.equal(updated.fallbackGasPrice.toNumber(), fbGasEth.toNumber()) + assert.equal(updated.fallbackLinkPrice.toNumber(), fbLinkEth.toNumber()) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + await expect(tx) + .to.emit(registry, 'ConfigSet') + .withArgs([ + payment, + flatFee, + checks, + maxGas, + staleness, + ceiling, + minUpkeepSpend, + maxPerformGas, + fbGasEth, + fbLinkEth, + ]) + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the PLI token', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id]) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'OnlyCallableByPLIToken()', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'UpkeepNotActive()', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id]) + + const before = (await registry.getUpkeep(id)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(id)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id1 = await getUpkeepID(tx) + await registry.connect(keeper1).addFunds(id1, toWei('5')) + await registry.connect(keeper1).performUpkeep(id1, '0x') + await registry.connect(keeper2).performUpkeep(id1, '0x') + await registry.connect(keeper3).performUpkeep(id1, '0x') + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id2 = await getUpkeepID(tx2) + await registry.connect(keeper1).addFunds(id2, toWei('5')) + await registry.connect(keeper1).performUpkeep(id2, '0x') + await registry.connect(keeper2).performUpkeep(id2, '0x') + await registry.connect(keeper3).performUpkeep(id2, '0x') + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id2]) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // remove a keeper + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry.connect(admin).withdrawFunds(id1, await admin.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + + await linkToken.balanceOf(registry.address) + + await registry.connect(owner).recoverFunds() + const balanceAfter = await linkToken.balanceOf(registry.address) + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse(await registry.paused()) + + await registry.connect(owner).pause() + + assert.isTrue(await registry.paused()) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue(await registry.paused()) + + await registry.connect(owner).unpause() + + assert.isFalse(await registry.paused()) + }) + }) + + describe('#getMaxPaymentForGas', () => { + const gasAmounts = [100000, 10000000] + const premiums = [0, 250000000] + const flatFees = [0, 1000000] + it('calculates the max fee approptiately', async () => { + for (let idx = 0; idx < gasAmounts.length; idx++) { + const gas = gasAmounts[idx] + for (let jdx = 0; jdx < premiums.length; jdx++) { + const premium = premiums[jdx] + for (let kdx = 0; kdx < flatFees.length; kdx++) { + const flatFee = flatFees[kdx] + await registry.connect(owner).setConfig({ + paymentPremiumPPB: premium, + flatFeeMicroLink: flatFee, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const price = await registry.getMaxPaymentForGas(gas) + expect(price).to.equal(linkForGas(gas, premium, flatFee)) + } + } + } + }) + }) + + describe('#setPeerRegistryMigrationPermission() / #getPeerRegistryMigrationPermission()', () => { + const peer = randomAddress() + it('allows the owner to set the peer registries', async () => { + let permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + await registry.setPeerRegistryMigrationPermission(peer, 1) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(1) + await registry.setPeerRegistryMigrationPermission(peer, 2) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(2) + await registry.setPeerRegistryMigrationPermission(peer, 0) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + }) + it('reverts if passed an unsupported permission', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 10), + ).to.be.reverted + }) + it('reverts if not called by the owner', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 1), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('migrateUpkeeps() / #receiveUpkeeps()', async () => { + context('when permissions are set', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + }) + + it('migrates an upkeep', async () => { + expect((await registry.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + // migrate + await registry.connect(admin).migrateUpkeeps([id], registry2.address) + expect((await registry.getState()).state.numUpkeeps).to.equal(0) + expect((await registry2.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(id)).balance).to.equal(0) + expect((await registry.getUpkeep(id)).checkData).to.equal('0x') + expect((await registry2.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry2.getState()).state.expectedLinkBalance).to.equal( + toWei('100'), + ) + expect((await registry2.getUpkeep(id)).checkData).to.equal(randomBytes) + }) + it('emits an event on both contracts', async () => { + expect((await registry.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + const tx = registry + .connect(admin) + .migrateUpkeeps([id], registry2.address) + await expect(tx) + .to.emit(registry, 'UpkeepMigrated') + .withArgs(id, toWei('100'), registry2.address) + await expect(tx) + .to.emit(registry2, 'UpkeepReceived') + .withArgs(id, toWei('100'), registry.address) + }) + it('is only migratable by the admin', async () => { + await expect( + registry.connect(owner).migrateUpkeeps([id], registry2.address), + ).to.be.revertedWith('OnlyCallableByAdmin()') + await registry.connect(admin).migrateUpkeeps([id], registry2.address) + }) + }) + + context('when permissions are not set', () => { + it('reverts', async () => { + // no permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // only outgoing permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // only incoming permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // permissions opposite direction + await registry.setPeerRegistryMigrationPermission(registry2.address, 2) + await registry2.setPeerRegistryMigrationPermission(registry.address, 1) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + }) + }) + }) + + describe('#checkUpkeep / #performUpkeep', () => { + const performData = '0xc0ffeec0ffee' + const multiplier = BigNumber.from(10) + const flatFee = BigNumber.from('100000') //0.1 PLI + const callGasPrice = 1 + + it('uses the same minimum balance calculation [ @skip-coverage ]', async () => { + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink: flatFee, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + + const tx1 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID1 = await getUpkeepID(tx1) + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID2 = await getUpkeepID(tx2) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + // upkeep 1 is underfunded, 2 is funded + const minBalance1 = (await registry.getMaxPaymentForGas(executeGas)).sub( + 1, + ) + const minBalance2 = await registry.getMaxPaymentForGas(executeGas) + await registry.connect(owner).addFunds(upkeepID1, minBalance1) + await registry.connect(owner).addFunds(upkeepID2, minBalance2) + // upkeep 1 check should revert, 2 should succeed + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID1, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }), + ) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID2, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }) + // upkeep 1 perform should revert, 2 should succeed + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(upkeepID1, performData, { gasLimit: extraGas }), + 'InsufficientFunds()', + ) + await registry + .connect(keeper1) + .performUpkeep(upkeepID2, performData, { gasLimit: extraGas }) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep', () => { + it('calculates the minimum balance appropriately', async () => { + const oneWei = BigNumber.from('1') + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + const minBalance = await registry.getMinBalanceForUpkeep(id) + const tooLow = minBalance.sub(oneWei) + await registry.connect(keeper1).addFunds(id, tooLow) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'InsufficientFunds()', + ) + await registry.connect(keeper1).addFunds(id, oneWei) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/KeeperRegistry1_3.test.ts b/contracts/test/v0.8/automation/KeeperRegistry1_3.test.ts new file mode 100644 index 00000000..9f3e17cc --- /dev/null +++ b/contracts/test/v0.8/automation/KeeperRegistry1_3.test.ts @@ -0,0 +1,2641 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { BigNumber, BigNumberish, Signer } from 'ethers' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { toWei } from '../../test-helpers/helpers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { UpkeepReverter__factory as UpkeepReverterFactory } from '../../../typechain/factories/UpkeepReverter__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../../typechain/factories/UpkeepAutoFunder__factory' +import { UpkeepTranscoder__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder__factory' +import { KeeperRegistry1_3__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry1_3__factory' +import { MockArbGasInfo__factory as MockArbGasInfoFactory } from '../../../typechain/factories/MockArbGasInfo__factory' +import { MockOVMGasPriceOracle__factory as MockOVMGasPriceOracleFactory } from '../../../typechain/factories/MockOVMGasPriceOracle__factory' +import { KeeperRegistryLogic1_3__factory as KeeperRegistryLogicFactory } from '../../../typechain/factories/KeeperRegistryLogic1_3__factory' +import { KeeperRegistry1_3 as KeeperRegistry } from '../../../typechain/KeeperRegistry1_3' +import { KeeperRegistryLogic13 as KeeperRegistryLogic } from '../../../typechain/KeeperRegistryLogic13' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { MockArbGasInfo } from '../../../typechain/MockArbGasInfo' +import { MockOVMGasPriceOracle } from '../../../typechain/MockOVMGasPriceOracle' +import { UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder' + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** REGISTRY v1.3 IS FROZEN ************************************/ + +// All tests are disabled for this contract, as we expect it to never change in the future. +// Instead, we test that the bytecode for the contract has not changed. +// If this test ever fails, you should remove it and then re-run the original test suite. + +const BYTECODE = KeeperRegistryFactory.bytecode +const BYTECODE_CHECKSUM = + '0x7e831ebc4e043fc2946449e11f0d170ba5b6085b213591973c437bc5109b1582' + +describe('KeeperRegistry1_3 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal(ethers.utils.id(BYTECODE), BYTECODE_CHECKSUM) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +async function getUpkeepID(tx: any) { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +function randomAddress() { + return ethers.Wallet.createRandom().address +} + +// ----------------------------------------------------------------------------------------------- +// DEV: these *should* match the perform/check gas overhead values in the contract and on the node +const PERFORM_GAS_OVERHEAD = BigNumber.from(160000) +const CHECK_GAS_OVERHEAD = BigNumber.from(362287) +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let keeperRegistryLogicFactory: KeeperRegistryLogicFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepReverterFactory: UpkeepReverterFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory +let upkeepTranscoderFactory: UpkeepTranscoderFactory +let mockArbGasInfoFactory: MockArbGasInfoFactory +let mockOVMGasPriceOracleFactory: MockOVMGasPriceOracleFactory +let personas: Personas + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + // @ts-ignore bug in autogen file + keeperRegistryFactory = await ethers.getContractFactory('KeeperRegistry1_3') + // @ts-ignore bug in autogen file + keeperRegistryLogicFactory = await ethers.getContractFactory( + 'KeeperRegistryLogic1_3', + ) + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepReverterFactory = await ethers.getContractFactory('UpkeepReverter') + upkeepAutoFunderFactory = await ethers.getContractFactory('UpkeepAutoFunder') + upkeepTranscoderFactory = await ethers.getContractFactory('UpkeepTranscoder') + mockArbGasInfoFactory = await ethers.getContractFactory('MockArbGasInfo') + mockOVMGasPriceOracleFactory = await ethers.getContractFactory( + 'MockOVMGasPriceOracle', + ) +}) + +describe.skip('KeeperRegistry1_3', () => { + const linkEth = BigNumber.from(300000000) + const gasWei = BigNumber.from(100) + const linkDivisibility = BigNumber.from('1000000000000000000') + const executeGas = BigNumber.from('100000') + const paymentPremiumBase = BigNumber.from('1000000000') + const paymentPremiumPPB = BigNumber.from('250000000') + const premiumMultiplier = BigNumber.from('1000000000') + const flatFeeMicroLink = BigNumber.from(0) + const blockCountPerTurn = BigNumber.from(3) + const emptyBytes = '0x00' + const randomBytes = '0x1234abcd' + const zeroAddress = ethers.constants.AddressZero + const extraGas = BigNumber.from('250000') + const registryGasOverhead = BigNumber.from('80000') + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(1) + const checkGasLimit = BigNumber.from(10000000) + const fallbackGasPrice = BigNumber.from(200) + const fallbackLinkPrice = BigNumber.from(200000000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from(0) + const l1CostWeiArb = BigNumber.from(1000000) + const l1CostWeiOpt = BigNumber.from(2000000) + + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let nonkeeper: Signer + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let registryLogic: KeeperRegistryLogic + let registry2: KeeperRegistry + let registryLogic2: KeeperRegistryLogic + let mock: UpkeepMock + let transcoder: UpkeepTranscoder + let mockArbGasInfo: MockArbGasInfo + let mockOVMGasPriceOracle: MockOVMGasPriceOracle + + let id: BigNumber + let keepers: string[] + let payees: string[] + + beforeEach(async () => { + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + + keepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + ] + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + mockArbGasInfo = await mockArbGasInfoFactory.connect(owner).deploy() + mockOVMGasPriceOracle = await mockOVMGasPriceOracleFactory + .connect(owner) + .deploy() + + const arbOracleCode = await ethers.provider.send('eth_getCode', [ + mockArbGasInfo.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x000000000000000000000000000000000000006C', + arbOracleCode, + ]) + + const optOracleCode = await ethers.provider.send('eth_getCode', [ + mockOVMGasPriceOracle.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x420000000000000000000000000000000000000F', + optOracleCode, + ]) + + registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + 0, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address, config) + registryLogic2 = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + 0, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + registry2 = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic2.address, config) + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper2.getAddress(), toWei('1000')) + await linkToken + .connect(owner) + .transfer(await keeper3.getAddress(), toWei('1000')) + + await registry.connect(owner).setKeepers(keepers, payees) + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + ) + id = await getUpkeepID(tx) + }) + + const linkForGas = ( + upkeepGasSpent: BigNumberish, + premiumPPB?: BigNumberish, + flatFee?: BigNumberish, + l1CostWei?: BigNumber, + ) => { + premiumPPB = premiumPPB === undefined ? paymentPremiumPPB : premiumPPB + flatFee = flatFee === undefined ? flatFeeMicroLink : flatFee + l1CostWei = l1CostWei === undefined ? BigNumber.from(0) : l1CostWei + const gasSpent = registryGasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei.mul(gasSpent).mul(linkDivisibility).div(linkEth) + const l1Fee = l1CostWei + .mul(premiumMultiplier) + .mul(paymentPremiumBase.add(premiumPPB)) + .div(linkEth) + const premium = base.mul(premiumPPB).div(paymentPremiumBase) + const flatFeeJules = BigNumber.from(flatFee).mul('1000000000000') + return base.add(premium).add(flatFeeJules).add(l1Fee) + } + + const verifyMaxPayment = async ( + keeperRegistryLogic: KeeperRegistryLogic, + gasAmounts: number[], + premiums: number[], + flatFees: number[], + l1CostWei?: BigNumber, + ) => { + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + + let registry = await keeperRegistryFactory + .connect(owner) + .deploy(keeperRegistryLogic.address, config) + + for (let idx = 0; idx < gasAmounts.length; idx++) { + const gas = gasAmounts[idx] + for (let jdx = 0; jdx < premiums.length; jdx++) { + const premium = premiums[jdx] + for (let kdx = 0; kdx < flatFees.length; kdx++) { + const flatFee = flatFees[kdx] + await registry.connect(owner).setConfig({ + paymentPremiumPPB: premium, + flatFeeMicroLink: flatFee, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const price = await registry.getMaxPaymentForGas(gas) + expect(price).to.equal(linkForGas(gas, premium, flatFee, l1CostWei)) + } + } + } + } + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistry 1.3.0') + }) + }) + + describe('#setKeepers', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setKeepers([], []), + 'Only callable by owner', + ) + }) + + it('reverts when adding the same keeper twice', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper1.getAddress()], + [await payee1.getAddress(), await payee1.getAddress()], + ), + 'DuplicateEntry()', + ) + }) + + it('reverts with different numbers of keepers/payees', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress()], + ), + 'ParameterLengthError()', + ) + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ), + 'ParameterLengthError()', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [ + await payee1.getAddress(), + '0x0000000000000000000000000000000000000000', + ], + ), + 'InvalidPayee()', + ) + }) + + it('emits events for every keeper added and removed', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, (await registry.getState()).keepers) + + // remove keepers + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [await payee2.getAddress(), await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, (await registry.getState()).keepers) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('updates the keeper to inactive when removed', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper3.getAddress()], + [await payee1.getAddress(), await payee3.getAddress()], + ) + const added = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.isTrue(added.active) + const removed = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.isFalse(removed.active) + }) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + const oldKeepers = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + ] + const oldPayees = [await payee1.getAddress(), await payee2.getAddress()] + await registry.connect(owner).setKeepers(oldKeepers, oldPayees) + assert.deepEqual(oldKeepers, (await registry.getState()).keepers) + + const newKeepers = [ + await keeper2.getAddress(), + await keeper3.getAddress(), + ] + const newPayees = [IGNORE_ADDRESS, await payee3.getAddress()] + const tx = await registry.connect(owner).setKeepers(newKeepers, newPayees) + assert.deepEqual(newKeepers, (await registry.getState()).keepers) + + const ignored = await registry.getKeeperInfo(await keeper2.getAddress()) + assert.equal(await payee2.getAddress(), ignored.payee) + assert.equal(true, ignored.active) + + await expect(tx) + .to.emit(registry, 'KeepersUpdated') + .withArgs(newKeepers, newPayees) + }) + + it('reverts if the owner changes the payee', async () => { + await registry.connect(owner).setKeepers(keepers, payees) + await evmRevert( + registry + .connect(owner) + .setKeepers(keepers, [ + await payee1.getAddress(), + await payee2.getAddress(), + await owner.getAddress(), + ]), + 'InvalidPayee()', + ) + }) + }) + + describe('#pauseUpkeep', () => { + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).pauseUpkeep(id), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is already paused', async () => { + await registry.connect(admin).pauseUpkeep(id) + + await evmRevert( + registry.connect(admin).pauseUpkeep(id), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(id), + 'OnlyCallableByAdmin()', + ) + }) + + it('pauses the upkeep and emits an event', async () => { + const tx = await registry.connect(admin).pauseUpkeep(id) + await expect(tx).to.emit(registry, 'UpkeepPaused').withArgs(id) + + const registration = await registry.getUpkeep(id) + assert.equal(registration.paused, true) + }) + }) + + describe('#unpauseUpkeep', () => { + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).unpauseUpkeep(id), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is not paused', async () => { + await evmRevert( + registry.connect(admin).unpauseUpkeep(id), + 'OnlyPausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await registry.connect(admin).pauseUpkeep(id) + + const registration = await registry.getUpkeep(id) + + assert.equal(registration.paused, true) + + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(id), + 'OnlyCallableByAdmin()', + ) + }) + + it('unpauses the upkeep and emits an event', async () => { + await registry.connect(admin).pauseUpkeep(id) + + const tx = await registry.connect(admin).unpauseUpkeep(id) + + await expect(tx).to.emit(registry, 'UpkeepUnpaused').withArgs(id) + + const registration = await registry.getUpkeep(id) + assert.equal(registration.paused, false) + + const upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert.equal(upkeepIds.length, 1) + }) + }) + + describe('#updateCheckData', () => { + it('reverts if the caller is not upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).updateCheckData(id, randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).updateCheckData(id, randomBytes), + 'UpkeepCancelled()', + ) + }) + + it('updates the paused upkeep check data', async () => { + await registry.connect(admin).pauseUpkeep(id) + await registry.connect(admin).updateCheckData(id, randomBytes) + + const registration = await registry.getUpkeep(id) + assert.equal(randomBytes, registration.checkData) + }) + + it('updates the upkeep check data and emits an event', async () => { + const tx = await registry.connect(admin).updateCheckData(id, randomBytes) + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataUpdated') + .withArgs(id, randomBytes) + + const registration = await registry.getUpkeep(id) + assert.equal(randomBytes, registration.checkData) + }) + }) + + describe('#registerUpkeep', () => { + context('and the registry is paused', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + it('reverts', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'Pausable: paused', + ) + }) + }) + + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'NotAContract()', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ), + 'OnlyCallableByOwnerOrRegistrar()', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 2299, + await admin.getAddress(), + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 5000001, + await admin.getAddress(), + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('creates a record of the registration', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(id, executeGas, await admin.getAddress()) + const registration = await registry.getUpkeep(id) + assert.equal(mock.address, registration.target) + assert.equal(0, registration.balance.toNumber()) + assert.equal(emptyBytes, registration.checkData) + assert.equal(registration.paused, false) + assert(registration.maxValidBlocknumber.eq('0xffffffff')) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + }) + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(id.add(1), amount), + 'UpkeepCancelled()', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(keeper1).addFunds(id, amount) + const registration = await registry.getUpkeep(id) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(keeper1).addFunds(id, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(id, await keeper1.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'UpkeepCancelled()', + ) + }) + }) + + describe('#setUpkeepGasLimit', () => { + const newGasLimit = BigNumber.from('500000') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).setUpkeepGasLimit(id.add(1), newGasLimit), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).setUpkeepGasLimit(id, newGasLimit), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepGasLimit(id, newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if new gas limit is out of bounds', async () => { + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(id, BigNumber.from('100')), + 'GasLimitOutsideRange()', + ) + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(id, BigNumber.from('6000000')), + 'GasLimitOutsideRange()', + ) + }) + + it('updates the gas limit successfully', async () => { + const initialGasLimit = (await registry.getUpkeep(id)).executeGas + assert.equal(initialGasLimit, executeGas.toNumber()) + await registry.connect(admin).setUpkeepGasLimit(id, newGasLimit) + const updatedGasLimit = (await registry.getUpkeep(id)).executeGas + assert.equal(updatedGasLimit, newGasLimit.toNumber()) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepGasLimit(id, newGasLimit) + await expect(tx) + .to.emit(registry, 'UpkeepGasLimitSet') + .withArgs(id, newGasLimit) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if the upkeep is not funded', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'InsufficientFunds()', + ) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts if executed', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await keeper1.getAddress()), + 'OnlySimulatedBackend()', + ) + }) + + it('reverts if the specified keeper is not valid', async () => { + await mock.setCanPerform(true) + await mock.setCanCheck(true) + await evmRevert( + registry.checkUpkeep(id, await owner.getAddress()), + 'OnlySimulatedBackend()', + ) + }) + + context('and upkeep is not needed', () => { + beforeEach(async () => { + await mock.setCanCheck(false) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'UpkeepNotNeeded()', + ) + }) + }) + + context('and the upkeep check fails', () => { + beforeEach(async () => { + const reverter = await upkeepReverterFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + reverter.address, + 2500000, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + await linkToken + .connect(keeper1) + .approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + }) + + it('reverts', async () => { + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'TargetCheckReverted', + ) + }) + }) + + context('and upkeep check simulations succeeds', () => { + beforeEach(async () => { + await mock.setCanCheck(true) + await mock.setCanPerform(true) + }) + + it('reverts if the upkeep is paused', async () => { + await registry.connect(admin).pauseUpkeep(id) + + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('returns true with pricing info if the target can execute', async () => { + const newGasMultiplier = BigNumber.from(10) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: newGasMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const response = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + assert.isTrue(response.gasLimit.eq(executeGas)) + assert.isTrue(response.linkEth.eq(linkEth)) + assert.isTrue( + response.adjustedGasWei.eq(gasWei.mul(newGasMultiplier)), + ) + assert.isTrue( + response.maxLinkPayment.eq( + linkForGas(executeGas.toNumber()).mul(newGasMultiplier), + ), + ) + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setCheckGasToBurn(checkGasLimit) + const gas = checkGasLimit.add(CHECK_GAS_OVERHEAD) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress(), { + gasLimit: gas, + }) + }) + }) + }) + }) + + describe('#performUpkeep', () => { + let _lastKeeper = keeper1 + + async function getPerformPaymentAmount() { + _lastKeeper = _lastKeeper === keeper1 ? keeper2 : keeper1 + const before = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + await registry.connect(_lastKeeper).performUpkeep(id, '0x') + const after = ( + await registry.getKeeperInfo(await _lastKeeper.getAddress()) + ).balance + const difference = after.sub(before) + return difference + } + + it('reverts if the registration is not funded', async () => { + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'InsufficientFunds()', + ) + }) + + context('and the registry is paused', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts', async () => { + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'Pausable: paused', + ) + }) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + }) + + it('does not revert if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + await registry.connect(keeper3).performUpkeep(id, '0x') + }) + + it('returns false if the target cannot execute', async () => { + const mockResponse = await mock + .connect(zeroAddress) + .callStatic.checkUpkeep('0x') + assert.isFalse(mockResponse.callable) + + assert.isFalse( + await registry.connect(keeper1).callStatic.performUpkeep(id, '0x'), + ) + }) + + it('returns true if called', async () => { + await mock.setCanPerform(true) + + const response = await registry + .connect(keeper1) + .callStatic.performUpkeep(id, '0x') + assert.isTrue(response) + }) + + it('reverts if not enough gas supplied', async () => { + await mock.setCanPerform(true) + + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasLimit: BigNumber.from('120000') }), + ) + }) + + it('executes the data passed to the registry', async () => { + await mock.setCanPerform(true) + + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: extraGas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + expect(eventLog?.[1].args?.[0]).to.equal(id) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('updates payment balances', async () => { + const keeperBefore = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(id) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperAfter = await registry.getKeeperInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(id) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.balance.gt(keeperBefore.balance)) + assert.isTrue(registrationBefore.balance.gt(registrationAfter.balance)) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }) + + it('updates amount spent correctly', async () => { + const registrationBefore = await registry.getUpkeep(id) + const balanceBefore = registrationBefore.balance + const amountSpentBefore = registrationBefore.amountSpent + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const registrationAfter = await registry.getUpkeep(id) + const balanceAfter = registrationAfter.balance + const amountSpentAfter = registrationAfter.amountSpent + + assert.isTrue(balanceAfter.lt(balanceBefore)) + assert.isTrue(amountSpentAfter.gt(amountSpentBefore)) + assert.isTrue( + amountSpentAfter + .sub(amountSpentBefore) + .eq(balanceBefore.sub(balanceAfter)), + ) + }) + + it('only pays for gas used [ @skip-coverage ]', async () => { + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry.connect(keeper1).performUpkeep(id, '0x') + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()) + const totalTx = linkForGas(receipt.gasUsed.toNumber()) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).lt(difference)) // exact number is flaky + assert.isTrue(linkForGas(6000).gt(difference)) // instead test a range + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from('1000000000') // 10M x the gas feed's rate + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas).mul(multiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(multiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(multiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(multiplier).gt(difference)) + }) + + it('only pays as much as the node spent [ @skip-coverage ]', async () => { + const multiplier = BigNumber.from(10) + const gasPrice = BigNumber.from(200) // 2X the gas feed's rate + const effectiveMultiplier = BigNumber.from(2) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const before = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(keeper1) + .performUpkeep(id, '0x', { gasPrice }) + const receipt = await tx.wait() + const after = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + + const max = linkForGas(executeGas.toNumber()).mul(effectiveMultiplier) + const totalTx = linkForGas(receipt.gasUsed).mul(effectiveMultiplier) + const difference = after.sub(before) + assert.isTrue(max.gt(totalTx)) + assert.isTrue(totalTx.gt(difference)) + assert.isTrue(linkForGas(5700).mul(effectiveMultiplier).lt(difference)) + assert.isTrue(linkForGas(6000).mul(effectiveMultiplier).gt(difference)) + }) + + it('pays the caller even if the target function fails', async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id = await getUpkeepID(tx) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + const keeperBalanceBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + + // Do the thing + await registry.connect(keeper1).performUpkeep(id, '0x') + + const keeperBalanceAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + assert.isTrue(keeperBalanceAfter.gt(keeperBalanceBefore)) + }) + + it('reverts if called by a non-keeper', async () => { + await evmRevert( + registry.connect(nonkeeper).performUpkeep(id, '0x'), + 'OnlyActiveKeepers()', + ) + }) + + it('reverts if the upkeep has been canceled', async () => { + await mock.setCanPerform(true) + + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is paused', async () => { + await registry.connect(admin).pauseUpkeep(id) + + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('uses the fallback gas price if the feed price is stale [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 🥳 + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback gas price if the feed price is non-sensical [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const updatedAt = Math.floor(Date.now() / 1000) + const startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('uses the fallback if the link price feed is stale', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const answer = 100 + const updatedAt = 946684800 // New Years 2000 🥳 + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + const amountWithStaleFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithStaleFeed)) + }) + + it('uses the fallback link price if the feed price is non-sensical [ @skip-coverage ]', async () => { + const normalAmount = await getPerformPaymentAmount() + const roundId = 99 + const updatedAt = Math.floor(Date.now() / 1000) + const startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + const amountWithNegativeFeed = await getPerformPaymentAmount() + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + const amountWithZeroFeed = await getPerformPaymentAmount() + assert.isTrue(normalAmount.lt(amountWithNegativeFeed)) + assert.isTrue(normalAmount.lt(amountWithZeroFeed)) + }) + + it('reverts if the same caller calls twice in a row', async () => { + await registry.connect(keeper1).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper1).performUpkeep(id, '0x'), + 'KeepersMustTakeTurns()', + ) + await registry.connect(keeper2).performUpkeep(id, '0x') + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'KeepersMustTakeTurns()', + ) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await registry.connect(admin).setUpkeepGasLimit(id, maxPerformGas) + await mock.setPerformGasToBurn(maxPerformGas) + await mock.setCanPerform(true) + const gas = maxPerformGas.add(PERFORM_GAS_OVERHEAD) + const performData = '0xc0ffeec0ffee' + const tx = await registry + .connect(keeper1) + .performUpkeep(id, performData, { gasLimit: gas }) + const receipt = await tx.wait() + const eventLog = receipt?.events + + assert.equal(eventLog?.length, 2) + assert.equal(eventLog?.[1].event, 'UpkeepPerformed') + expect(eventLog?.[1].args?.[0]).to.equal(id) + assert.equal(eventLog?.[1].args?.[1], true) + assert.equal(eventLog?.[1].args?.[2], await keeper1.getAddress()) + assert.isNotEmpty(eventLog?.[1].args?.[3]) + assert.equal(eventLog?.[1].args?.[4], performData) + }) + + it('can self fund', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + let maxPayment = await registry.getMaxPaymentForGas(executeGas) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(upkeepID, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + let postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + let autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await registry.connect(keeper2).performUpkeep(upkeepID, '0x') + + postUpkeepBalance = (await registry.getUpkeep(upkeepID)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + emptyBytes, + ) + const upkeepID = await getUpkeepID(tx) + await autoFunderUpkeep.setUpkeepId(upkeepID) + + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await registry.connect(owner).addFunds(upkeepID, toWei('100')) + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(upkeepID) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await registry.connect(keeper1).performUpkeep(upkeepID, '0x') + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(upkeepID) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('#withdrawFunds', () => { + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(id.add(1), await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, await payee1.getAddress()), + 'UpkeepNotCanceled()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(id, zeroAddress), + 'InvalidRecipient()', + ) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(id) + }) + + it('moves the funds out and updates the balance and emits an event', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(id) + let previousBalance = registration.balance + + const tx = await registry + .connect(admin) + .withdrawFunds(id, await payee1.getAddress()) + await expect(tx) + .to.emit(registry, 'FundsWithdrawn') + .withArgs(id, previousBalance, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(previousBalance).eq(payee1After)) + assert.isTrue(registryBefore.sub(previousBalance).eq(registryAfter)) + + registration = await registry.getUpkeep(id) + assert.equal(0, registration.balance.toNumber()) + }) + }) + }) + + describe('#withdrawOwnerFunds', () => { + it('can only be called by owner', async () => { + await evmRevert( + registry.connect(keeper1).withdrawOwnerFunds(), + 'Only callable by owner', + ) + }) + + it('withdraws the collected fees to owner', async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await registry.connect(keeper1).addFunds(id, toWei('100')) + // Very high min spend, whole balance as cancellation fees + let minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + let upkeepBalance = (await registry.getUpkeep(id)).balance + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).cancelUpkeep(id) + await registry.connect(admin).withdrawFunds(id, await payee1.getAddress()) + // Transfered to owner balance on registry + let ownerRegistryBalance = (await registry.getState()).state + .ownerLinkBalance + assert.isTrue(ownerRegistryBalance.eq(upkeepBalance)) + + // Now withdraw + await registry.connect(owner).withdrawOwnerFunds() + + ownerRegistryBalance = (await registry.getState()).state.ownerLinkBalance + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + // Owner registry balance should be changed to 0 + assert.isTrue(ownerRegistryBalance.eq(BigNumber.from('0'))) + + // Owner should be credited with the balance + assert.isTrue(ownerBefore.add(upkeepBalance).eq(ownerAfter)) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(id.add(1)), + 'CannotCancel()', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(id), + 'OnlyCallableByOwnerOrAdmin()', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber)) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(id) + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'UpkeepCancelled()', + ) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(id) + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(id) + const registration = await registry.getUpkeep(id) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(id) + + const registration = await registry.getUpkeep(id) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + const delay = 50 + + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(admin).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(id) + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(id), + 'CannotCancel()', + ) + }) + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(id) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(id) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(id, BigNumber.from(receipt.blockNumber + delay)) + }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(admin).cancelUpkeep(id) + await registry.connect(keeper2).performUpkeep(id, '0x') // still works + + for (let i = 0; i < delay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(keeper2).performUpkeep(id, '0x'), + 'UpkeepCancelled()', + ) + }) + + describe('when an upkeep has been performed', async () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('deducts a cancellation fee from the upkeep and gives to owner', async () => { + let minUpkeepSpend = toWei('10') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + let amountSpent = toWei('100').sub(upkeepBefore) + let cancellationFee = minUpkeepSpend.sub(amountSpent) + + await registry.connect(admin).cancelUpkeep(id) + + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepAfter = (await registry.getUpkeep(id)).balance + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // post upkeep balance should be previous balance minus cancellation fee + assert.isTrue(upkeepBefore.sub(cancellationFee).eq(upkeepAfter)) + // payee balance should not change + assert.isTrue(payee1Before.eq(payee1After)) + // owner should receive the cancellation fee + assert.isTrue(ownerAfter.eq(cancellationFee)) + }) + + it('deducts up to balance as cancellation fee', async () => { + // Very high min spend, should deduct whole balance as cancellation fees + let minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + await registry.connect(admin).cancelUpkeep(id) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + let upkeepAfter = (await registry.getUpkeep(id)).balance + + // all upkeep balance is deducted for cancellation fee + assert.equal(0, upkeepAfter.toNumber()) + // payee balance should not change + assert.isTrue(payee1After.eq(payee1Before)) + // all upkeep balance is transferred to the owner + assert.isTrue(ownerAfter.eq(upkeepBefore)) + }) + + it('does not deduct cancellation fee if more than minUpkeepSpend is spent', async () => { + // Very low min spend, already spent in one perform upkeep + let minUpkeepSpend = BigNumber.from(420) + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let upkeepBefore = (await registry.getUpkeep(id)).balance + let ownerBefore = (await registry.getState()).state.ownerLinkBalance + assert.equal(0, ownerBefore.toNumber()) + + await registry.connect(admin).cancelUpkeep(id) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + let ownerAfter = (await registry.getState()).state.ownerLinkBalance + let upkeepAfter = (await registry.getUpkeep(id)).balance + + // upkeep does not pay cancellation fee after cancellation because minimum upkeep spent is met + assert.isTrue(upkeepBefore.eq(upkeepAfter)) + // owner balance does not change + assert.equal(0, ownerAfter.toNumber()) + // payee balance does not change + assert.isTrue(payee1Before.eq(payee1After)) + }) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.connect(keeper1).performUpkeep(id, '0x') + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'InvalidRecipient()', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationBefore = (await registry.getUpkeep(id)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = ( + await registry.getKeeperInfo(await keeper1.getAddress()) + ).balance + const registrationAfter = (await registry.getUpkeep(id)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(keeperAfter.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore).eq(toLinkAfter)) + assert.isTrue(registryLinkBefore.sub(keeperBefore).eq(registryLinkAfter)) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = (await registry.getKeeperInfo(await keeper1.getAddress())) + .balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'ValueNotChanged()', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'OnlyCallableByProposedPayee()', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getKeeperInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#transferUpkeepAdmin', () => { + beforeEach(async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + id = await getUpkeepID(tx) + }) + + it('reverts when called by anyone but the current upkeep admin', async () => { + await evmRevert( + registry + .connect(payee1) + .transferUpkeepAdmin(id, await payee2.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(id, await admin.getAddress()), + 'ValueNotChanged()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(id, await keeper1.getAddress()), + 'UpkeepCancelled()', + ) + }) + + it('reverts when transferring to zero address', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(id, ethers.constants.AddressZero), + 'InvalidRecipient()', + ) + }) + + it('does not change the upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + + const upkeep = await registry.getUpkeep(id) + assert.equal(await admin.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs(id, await admin.getAddress(), await payee1.getAddress()) + }) + + it('does not emit an event when called with the same proposed upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptUpkeepAdmin', () => { + beforeEach(async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + }) + + it('reverts when not called by the proposed upkeep admin', async () => { + await evmRevert( + registry.connect(payee2).acceptUpkeepAdmin(id), + 'OnlyCallableByProposedAdmin()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(id) + + await evmRevert( + registry.connect(payee1).acceptUpkeepAdmin(id), + 'UpkeepCancelled()', + ) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry.connect(payee1).acceptUpkeepAdmin(id) + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferred') + .withArgs(id, await admin.getAddress(), await payee1.getAddress()) + }) + + it('does change the payee', async () => { + await registry.connect(payee1).acceptUpkeepAdmin(id) + + const upkeep = await registry.getUpkeep(id) + assert.equal(await payee1.getAddress(), upkeep.admin) + }) + }) + + describe('#setConfig', () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const checks = BigNumber.from(3) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const maxGas = BigNumber.from(6) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry.connect(payee1).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + 'Only callable by owner', + ) + }) + + it('updates the config', async () => { + const old = (await registry.getState()).config + assert.isTrue(paymentPremiumPPB.eq(old.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(old.flatFeeMicroLink)) + assert.isTrue(blockCountPerTurn.eq(old.blockCountPerTurn)) + assert.isTrue(stalenessSeconds.eq(old.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(old.gasCeilingMultiplier)) + + await registry.connect(owner).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + + const updated = (await registry.getState()).config + assert.equal(updated.paymentPremiumPPB, payment.toNumber()) + assert.equal(updated.flatFeeMicroLink, flatFee.toNumber()) + assert.equal(updated.blockCountPerTurn, checks.toNumber()) + assert.equal(updated.stalenessSeconds, staleness.toNumber()) + assert.equal(updated.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal(updated.checkGasLimit, maxGas.toNumber()) + assert.equal(updated.fallbackGasPrice.toNumber(), fbGasEth.toNumber()) + assert.equal(updated.fallbackLinkPrice.toNumber(), fbLinkEth.toNumber()) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).setConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + blockCountPerTurn: checks, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + await expect(tx) + .to.emit(registry, 'ConfigSet') + .withArgs([ + payment, + flatFee, + checks, + maxGas, + staleness, + ceiling, + minUpkeepSpend, + maxPerformGas, + fbGasEth, + fbLinkEth, + ]) + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the PLI token', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id]) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'OnlyCallableByPLIToken()', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(id) + await evmRevert( + registry.connect(keeper1).addFunds(id, amount), + 'UpkeepCancelled()', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id]) + + const before = (await registry.getUpkeep(id)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(id)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id1 = await getUpkeepID(tx) + await registry.connect(keeper1).addFunds(id1, toWei('5')) + await registry.connect(keeper1).performUpkeep(id1, '0x') + await registry.connect(keeper2).performUpkeep(id1, '0x') + await registry.connect(keeper3).performUpkeep(id1, '0x') + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const id2 = await getUpkeepID(tx2) + await registry.connect(keeper1).addFunds(id2, toWei('5')) + await registry.connect(keeper1).performUpkeep(id2, '0x') + await registry.connect(keeper2).performUpkeep(id2, '0x') + await registry.connect(keeper3).performUpkeep(id2, '0x') + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id2]) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // remove a keeper + await registry + .connect(owner) + .setKeepers( + [await keeper1.getAddress(), await keeper2.getAddress()], + [await payee1.getAddress(), await payee2.getAddress()], + ) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry.connect(admin).withdrawFunds(id1, await admin.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + + await linkToken.balanceOf(registry.address) + + await registry.connect(owner).recoverFunds() + const balanceAfter = await linkToken.balanceOf(registry.address) + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse(await registry.paused()) + + await registry.connect(owner).pause() + + assert.isTrue(await registry.paused()) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue(await registry.paused()) + + await registry.connect(owner).unpause() + + assert.isFalse(await registry.paused()) + }) + }) + + describe('#getMaxPaymentForGas', () => { + const gasAmounts = [100000, 10000000] + const premiums = [0, 250000000] + const flatFees = [0, 1000000] + it('calculates the max fee appropriately', async () => { + const registryLogicL1 = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + 0, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + await verifyMaxPayment(registryLogicL1, gasAmounts, premiums, flatFees) + }) + + it('calculates the max fee appropriately for Arbitrum', async () => { + const registryLogicArb = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + 1, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + await verifyMaxPayment( + registryLogicArb, + gasAmounts, + premiums, + flatFees, + l1CostWeiArb, + ) + }) + + it('calculates the max fee appropriately for Optimism', async () => { + const registryLogicOpt = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + 2, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + await verifyMaxPayment( + registryLogicOpt, + gasAmounts, + premiums, + flatFees, + l1CostWeiOpt, + ) + }) + }) + + describe('#setPeerRegistryMigrationPermission() / #getPeerRegistryMigrationPermission()', () => { + const peer = randomAddress() + it('allows the owner to set the peer registries', async () => { + let permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + await registry.setPeerRegistryMigrationPermission(peer, 1) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(1) + await registry.setPeerRegistryMigrationPermission(peer, 2) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(2) + await registry.setPeerRegistryMigrationPermission(peer, 0) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + }) + it('reverts if passed an unsupported permission', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 10), + ).to.be.reverted + }) + it('reverts if not called by the owner', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 1), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('migrateUpkeeps() / #receiveUpkeeps()', async () => { + context('when permissions are set', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(id, toWei('100')) + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + }) + + it('migrates an upkeep', async () => { + expect((await registry.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + await registry + .connect(admin) + .transferUpkeepAdmin(id, await payee1.getAddress()) + + // migrate + await registry.connect(admin).migrateUpkeeps([id], registry2.address) + expect((await registry.getState()).state.numUpkeeps).to.equal(0) + expect((await registry2.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(id)).balance).to.equal(0) + expect((await registry.getUpkeep(id)).checkData).to.equal('0x') + expect((await registry2.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry2.getState()).state.expectedLinkBalance).to.equal( + toWei('100'), + ) + expect((await registry2.getUpkeep(id)).checkData).to.equal(randomBytes) + // migration will delete the upkeep and nullify admin transfer + await expect( + registry.connect(payee1).acceptUpkeepAdmin(id), + ).to.be.revertedWith('UpkeepCancelled()') + await expect( + registry2.connect(payee1).acceptUpkeepAdmin(id), + ).to.be.revertedWith('OnlyCallableByProposedAdmin()') + }) + + it('migrates a paused upkeep', async () => { + expect((await registry.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + await registry.connect(admin).pauseUpkeep(id) + // verify the upkeep is paused + expect((await registry.getUpkeep(id)).paused).to.equal(true) + // migrate + await registry.connect(admin).migrateUpkeeps([id], registry2.address) + expect((await registry.getState()).state.numUpkeeps).to.equal(0) + expect((await registry2.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(id)).balance).to.equal(0) + expect((await registry2.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal('0x') + expect((await registry2.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry2.getState()).state.expectedLinkBalance).to.equal( + toWei('100'), + ) + // verify the upkeep is still paused after migration + expect((await registry2.getUpkeep(id)).paused).to.equal(true) + }) + + it('emits an event on both contracts', async () => { + expect((await registry.getUpkeep(id)).balance).to.equal(toWei('100')) + expect((await registry.getUpkeep(id)).checkData).to.equal(randomBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + const tx = registry + .connect(admin) + .migrateUpkeeps([id], registry2.address) + await expect(tx) + .to.emit(registry, 'UpkeepMigrated') + .withArgs(id, toWei('100'), registry2.address) + await expect(tx) + .to.emit(registry2, 'UpkeepReceived') + .withArgs(id, toWei('100'), registry.address) + }) + it('is only migratable by the admin', async () => { + await expect( + registry.connect(owner).migrateUpkeeps([id], registry2.address), + ).to.be.revertedWith('OnlyCallableByAdmin()') + await registry.connect(admin).migrateUpkeeps([id], registry2.address) + }) + }) + + context('when permissions are not set', () => { + it('reverts', async () => { + // no permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // only outgoing permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // only incoming permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + // permissions opposite direction + await registry.setPeerRegistryMigrationPermission(registry2.address, 2) + await registry2.setPeerRegistryMigrationPermission(registry.address, 1) + await expect(registry.migrateUpkeeps([id], registry2.address)).to.be + .reverted + }) + }) + }) + + describe('#checkUpkeep / #performUpkeep', () => { + const performData = '0xc0ffeec0ffee' + const multiplier = BigNumber.from(10) + const flatFee = BigNumber.from('100000') //0.1 PLI + const callGasPrice = 1 + + it('uses the same minimum balance calculation [ @skip-coverage ]', async () => { + await registry.connect(owner).setConfig({ + paymentPremiumPPB, + flatFeeMicroLink: flatFee, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + await linkToken.connect(owner).approve(registry.address, toWei('100')) + + const tx1 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID1 = await getUpkeepID(tx1) + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + ) + const upkeepID2 = await getUpkeepID(tx2) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + // upkeep 1 is underfunded, 2 is funded + const minBalance1 = (await registry.getMaxPaymentForGas(executeGas)).sub( + 1, + ) + const minBalance2 = await registry.getMaxPaymentForGas(executeGas) + await registry.connect(owner).addFunds(upkeepID1, minBalance1) + await registry.connect(owner).addFunds(upkeepID2, minBalance2) + // upkeep 1 check should revert, 2 should succeed + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID1, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }), + ) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID2, await keeper1.getAddress(), { + gasPrice: callGasPrice, + }) + // upkeep 1 perform should revert, 2 should succeed + await evmRevert( + registry + .connect(keeper1) + .performUpkeep(upkeepID1, performData, { gasLimit: extraGas }), + 'InsufficientFunds()', + ) + await registry + .connect(keeper1) + .performUpkeep(upkeepID2, performData, { gasLimit: extraGas }) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep', () => { + it('calculates the minimum balance appropriately', async () => { + const oneWei = BigNumber.from('1') + await linkToken.connect(keeper1).approve(registry.address, toWei('100')) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + const minBalance = await registry.getMinBalanceForUpkeep(id) + const tooLow = minBalance.sub(oneWei) + await registry.connect(keeper1).addFunds(id, tooLow) + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()), + 'InsufficientFunds()', + ) + await registry.connect(keeper1).addFunds(id, oneWei) + await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(id, await keeper1.getAddress()) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/KeeperRegistry2_0.test.ts b/contracts/test/v0.8/automation/KeeperRegistry2_0.test.ts new file mode 100644 index 00000000..078ebd36 --- /dev/null +++ b/contracts/test/v0.8/automation/KeeperRegistry2_0.test.ts @@ -0,0 +1,4802 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { BigNumber, Signer, Wallet } from 'ethers' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { toWei } from '../../test-helpers/helpers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../../typechain/factories/UpkeepAutoFunder__factory' +import { UpkeepTranscoder__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder__factory' +import { KeeperRegistry2_0__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry2_0__factory' +import { MockArbGasInfo__factory as MockArbGasInfoFactory } from '../../../typechain/factories/MockArbGasInfo__factory' +import { MockOVMGasPriceOracle__factory as MockOVMGasPriceOracleFactory } from '../../../typechain/factories/MockOVMGasPriceOracle__factory' +import { KeeperRegistryLogic2_0__factory as KeeperRegistryLogicFactory } from '../../../typechain/factories/KeeperRegistryLogic2_0__factory' +import { MockArbSys__factory as MockArbSysFactory } from '../../../typechain/factories/MockArbSys__factory' +import { KeeperRegistry2_0 as KeeperRegistry } from '../../../typechain/KeeperRegistry2_0' +import { KeeperRegistryLogic20 as KeeperRegistryLogic } from '../../../typechain/KeeperRegistryLogic20' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { MockArbGasInfo } from '../../../typechain/MockArbGasInfo' +import { MockOVMGasPriceOracle } from '../../../typechain/MockOVMGasPriceOracle' +import { UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder' + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** REGISTRY v2.0 IS FROZEN ************************************/ + +// All tests are disabled for this contract, as we expect it to never change in the future. +// Instead, we test that the bytecode for the contract has not changed. +// If this test ever fails, you should remove it and then re-run the original test suite. + +const BYTECODE = KeeperRegistryFactory.bytecode +const BYTECODE_CHECKSUM = + '0x60660453a335cdcd42b5aa64e58a8c04517e8a8645d2618b51a7552df6e2973b' + +describe('KeeperRegistry2_0 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal(ethers.utils.id(BYTECODE), BYTECODE_CHECKSUM) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +// copied from AutomationRegistryInterface2_0.sol +enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE, +} + +// copied from AutomationRegistryInterface2_0.sol +enum Mode { + DEFAULT, + ARBITRUM, + OPTIMISM, +} + +async function getUpkeepID(tx: any) { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +function randomAddress() { + return ethers.Wallet.createRandom().address +} + +// ----------------------------------------------------------------------------------------------- +// These are the gas overheads that off chain systems should provide to check upkeep / transmit +// These overheads are not actually charged for +const transmitGasOverhead = BigNumber.from(800000) +const checkGasOverhead = BigNumber.from(400000) + +// These values should match the constants declared in registry +const registryGasOverhead = BigNumber.from(70_000) +const registryPerSignerGasOverhead = BigNumber.from(7500) +const registryPerPerformByteGasOverhead = BigNumber.from(20) +const cancellationDelay = 50 + +// This is the margin for gas that we test for. Gas charged should always be greater +// than total gas used in tx but should not increase beyond this margin +const gasCalculationMargin = BigNumber.from(4000) +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory: KeeperRegistryFactory +let keeperRegistryLogicFactory: KeeperRegistryLogicFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory +let upkeepTranscoderFactory: UpkeepTranscoderFactory +let mockArbGasInfoFactory: MockArbGasInfoFactory +let mockOVMGasPriceOracleFactory: MockOVMGasPriceOracleFactory +let personas: Personas + +const encodeConfig = (config: any) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds\ + ,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,\ + uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,\ + address registrar)', + ], + [config], + ) +} + +const linkEth = BigNumber.from(5000000000000000) // 1 Link = 0.005 Eth +const gasWei = BigNumber.from(1000000000) // 1 gwei +const encodeReport = ( + upkeeps: any, + gasWeiReport = gasWei, + linkEthReport = linkEth, +) => { + const upkeepIds = upkeeps.map((u: any) => u.Id) + const performDataTuples = upkeeps.map((u: any) => [ + u.checkBlockNum, + u.checkBlockHash, + u.performData, + ]) + return ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256', 'uint256[]', 'tuple(uint32,bytes32,bytes)[]'], + [gasWeiReport, linkEthReport, upkeepIds, performDataTuples], + ) +} + +const encodeLatestBlockReport = async (upkeeps: any) => { + const latestBlock = await ethers.provider.getBlock('latest') + for (let i = 0; i < upkeeps.length; i++) { + upkeeps[i].checkBlockNum = latestBlock.number + upkeeps[i].checkBlockHash = latestBlock.hash + upkeeps[i].performData = '0x' + } + return encodeReport(upkeeps) +} + +const signReport = ( + reportContext: string[], + report: any, + signers: Wallet[], +) => { + const reportDigest = ethers.utils.keccak256(report) + const packedArgs = ethers.utils.solidityPack( + ['bytes32', 'bytes32[3]'], + [reportDigest, reportContext], + ) + const packedDigest = ethers.utils.keccak256(packedArgs) + + const signatures = [] + for (const signer of signers) { + signatures.push(signer._signingKey().signDigest(packedDigest)) + } + const vs = signatures.map((i) => '0' + (i.v - 27).toString(16)).join('') + return { + vs: '0x' + vs.padEnd(64, '0'), + rs: signatures.map((i) => i.r), + ss: signatures.map((i) => i.s), + } +} + +const parseUpkeepPerformedLogs = (receipt: any) => { + const upkeepPerformedABI = [ + 'event UpkeepPerformed(uint256 indexed id,bool indexed success, \ + uint32 checkBlockNumber,uint256 gasUsed,uint256 gasOverhead,uint96 totalPayment)', + ] + const iface = new ethers.utils.Interface(upkeepPerformedABI) + + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + return parsedLogs +} + +const parseReorgedUpkeepReportLogs = (receipt: any) => { + const logABI = [' event ReorgedUpkeepReport(uint256 indexed id)'] + const iface = new ethers.utils.Interface(logABI) + + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + return parsedLogs +} + +const parseStaleUpkeepReportLogs = (receipt: any) => { + const logABI = [' event StaleUpkeepReport(uint256 indexed id)'] + const iface = new ethers.utils.Interface(logABI) + + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + return parsedLogs +} + +const parseInsufficientFundsUpkeepReportLogs = (receipt: any) => { + const logABI = [' event InsufficientFundsUpkeepReport(uint256 indexed id)'] + const iface = new ethers.utils.Interface(logABI) + + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + return parsedLogs +} + +const parseCancelledUpkeepReportLogs = (receipt: any) => { + const logABI = [' event CancelledUpkeepReport(uint256 indexed id)'] + const iface = new ethers.utils.Interface(logABI) + + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + return parsedLogs +} + +before(async () => { + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + keeperRegistryFactory = (await ethers.getContractFactory( + 'KeeperRegistry2_0', + )) as unknown as KeeperRegistryFactory // bug in typechain requires force casting + keeperRegistryLogicFactory = (await ethers.getContractFactory( + 'KeeperRegistryLogic2_0', + )) as unknown as KeeperRegistryLogicFactory // bug in typechain requires force casting + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepAutoFunderFactory = await ethers.getContractFactory('UpkeepAutoFunder') + upkeepTranscoderFactory = await ethers.getContractFactory('UpkeepTranscoder') + mockArbGasInfoFactory = await ethers.getContractFactory('MockArbGasInfo') + mockOVMGasPriceOracleFactory = await ethers.getContractFactory( + 'MockOVMGasPriceOracle', + ) +}) + +describe.skip('KeeperRegistry2_0', () => { + const linkDivisibility = BigNumber.from('1000000000000000000') + const executeGas = BigNumber.from('1000000') + const paymentPremiumBase = BigNumber.from('1000000000') + const paymentPremiumPPB = BigNumber.from('250000000') + const flatFeeMicroLink = BigNumber.from(0) + + const randomBytes = '0x1234abcd' + const emptyBytes = '0x' + const emptyBytes32 = + '0x0000000000000000000000000000000000000000000000000000000000000000' + + const stalenessSeconds = BigNumber.from(43820) + const gasCeilingMultiplier = BigNumber.from(2) + const checkGasLimit = BigNumber.from(10000000) + const fallbackGasPrice = gasWei.mul(BigNumber.from('2')) + const fallbackLinkPrice = linkEth.div(BigNumber.from('2')) + const maxCheckDataSize = BigNumber.from(1000) + const maxPerformDataSize = BigNumber.from(1000) + const maxPerformGas = BigNumber.from(5000000) + const minUpkeepSpend = BigNumber.from(0) + const f = 1 + const offchainVersion = 1 + const offchainBytes = '0x' + const zeroAddress = ethers.constants.AddressZero + const epochAndRound5_1 = + '0x0000000000000000000000000000000000000000000000000000000000000501' + + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let keeper4: Signer + let keeper5: Signer + let nonkeeper: Signer + let signer1: Wallet + let signer2: Wallet + let signer3: Wallet + let signer4: Wallet + let signer5: Wallet + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + let payee4: Signer + let payee5: Signer + + let linkToken: LinkToken + let linkEthFeed: MockV3Aggregator + let gasPriceFeed: MockV3Aggregator + let registry: KeeperRegistry + let registryLogic: KeeperRegistryLogic + let mock: UpkeepMock + let transcoder: UpkeepTranscoder + let mockArbGasInfo: MockArbGasInfo + let mockOVMGasPriceOracle: MockOVMGasPriceOracle + + let upkeepId: BigNumber + let keeperAddresses: string[] + let payees: string[] + let signers: Wallet[] + let signerAddresses: string[] + let config: any + + const linkForGas = ( + upkeepGasSpent: BigNumber, + gasOverhead: BigNumber, + gasMultiplier: BigNumber, + premiumPPB: BigNumber, + flatFee: BigNumber, + l1CostWei?: BigNumber, + numUpkeepsBatch?: BigNumber, + ) => { + l1CostWei = l1CostWei === undefined ? BigNumber.from(0) : l1CostWei + numUpkeepsBatch = + numUpkeepsBatch === undefined ? BigNumber.from(1) : numUpkeepsBatch + + const gasSpent = gasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei + .mul(gasMultiplier) + .mul(gasSpent) + .mul(linkDivisibility) + .div(linkEth) + const l1Fee = l1CostWei + .mul(gasMultiplier) + .div(numUpkeepsBatch) + .mul(linkDivisibility) + .div(linkEth) + const gasPayment = base.add(l1Fee) + + const premium = gasWei + .mul(gasMultiplier) + .mul(upkeepGasSpent) + .add(l1CostWei.mul(gasMultiplier).div(numUpkeepsBatch)) + .mul(linkDivisibility) + .div(linkEth) + .mul(premiumPPB) + .div(paymentPremiumBase) + .add(BigNumber.from(flatFee).mul('1000000000000')) + + return { + total: gasPayment.add(premium), + gasPaymemnt: gasPayment, + premium, + } + } + + const verifyMaxPayment = async ( + mode: number, + multipliers: BigNumber[], + gasAmounts: number[], + premiums: number[], + flatFees: number[], + l1CostWei?: BigNumber, + ) => { + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + + // Deploy a new registry since we change payment model + const registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + mode, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + // Deploy a new registry since we change payment model + const registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + + const fPlusOne = BigNumber.from(f + 1) + const totalGasOverhead = registryGasOverhead + .add(registryPerSignerGasOverhead.mul(fPlusOne)) + .add(registryPerPerformByteGasOverhead.mul(maxPerformDataSize)) + + for (let idx = 0; idx < gasAmounts.length; idx++) { + const gas = gasAmounts[idx] + for (let jdx = 0; jdx < premiums.length; jdx++) { + const premium = premiums[jdx] + for (let kdx = 0; kdx < flatFees.length; kdx++) { + const flatFee = flatFees[kdx] + for (let ldx = 0; ldx < multipliers.length; ldx++) { + const multiplier = multipliers[ldx] + + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: premium, + flatFeeMicroLink: flatFee, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: multiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + + const price = await registry.getMaxPaymentForGas(gas) + expect(price).to.equal( + linkForGas( + BigNumber.from(gas), + totalGasOverhead, + multiplier, + BigNumber.from(premium), + BigNumber.from(flatFee), + l1CostWei, + ).total, + ) + } + } + } + } + } + + const getTransmitTx = async ( + registry: KeeperRegistry, + transmitter: any, + upkeepIds: any, + numSigners: any, + extraParams?: any, + performData?: any, + checkBlockNum?: any, + checkBlockHash?: any, + ) => { + const latestBlock = await ethers.provider.getBlock('latest') + const configDigest = (await registry.getState()).state.latestConfigDigest + + const upkeeps = [] + for (let i = 0; i < upkeepIds.length; i++) { + upkeeps.push({ + Id: upkeepIds[i], + checkBlockNum: checkBlockNum ? checkBlockNum : latestBlock.number, + checkBlockHash: checkBlockHash ? checkBlockHash : latestBlock.hash, + performData: performData ? performData : '0x', + }) + } + + const report = encodeReport(upkeeps) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport(reportContext, report, signers.slice(0, numSigners)) + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + { gasLimit: extraParams?.gasLimit, gasPrice: extraParams?.gasPrice }, + ) + } + + const getTransmitTxWithReport = async ( + registry: KeeperRegistry, + transmitter: any, + report: any, + numSigners: any, + ) => { + const configDigest = (await registry.getState()).state.latestConfigDigest + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport(reportContext, report, signers.slice(0, numSigners)) + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ) + } + + beforeEach(async () => { + // Deploys a registry, setups of initial configuration + // Registers an upkeep which is unfunded to start with + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + keeper4 = personas.Norbert + keeper5 = personas.Nick + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + payee4 = personas.Eddy + payee5 = personas.Carol + // signers + signer1 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000001', + ) + signer2 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000002', + ) + signer3 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000003', + ) + signer4 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000004', + ) + signer5 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000005', + ) + + keeperAddresses = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + await keeper4.getAddress(), + await keeper5.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + await payee5.getAddress(), + ] + signers = [signer1, signer2, signer3, signer4, signer5] + + // We append 26 random addresses to keepers, payees and signers to get a system of 31 oracles + // This allows f value of 1 - 10 + for (let i = 0; i < 26; i++) { + keeperAddresses.push(randomAddress()) + payees.push(randomAddress()) + signers.push(ethers.Wallet.createRandom()) + } + signerAddresses = [] + for (const signer of signers) { + signerAddresses.push(await signer.getAddress()) + } + + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + mockArbGasInfo = await mockArbGasInfoFactory.connect(owner).deploy() + mockOVMGasPriceOracle = await mockOVMGasPriceOracleFactory + .connect(owner) + .deploy() + + const arbOracleCode = await ethers.provider.send('eth_getCode', [ + mockArbGasInfo.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x000000000000000000000000000000000000006C', + arbOracleCode, + ]) + + const optOracleCode = await ethers.provider.send('eth_getCode', [ + mockOVMGasPriceOracle.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x420000000000000000000000000000000000000F', + optOracleCode, + ]) + + const mockArbSys = await new MockArbSysFactory(owner).deploy() + const arbSysCode = await ethers.provider.send('eth_getCode', [ + mockArbSys.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x0000000000000000000000000000000000000064', + arbSysCode, + ]) + + registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + Mode.DEFAULT, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + await registry.connect(owner).setPayees(payees) + + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await admin.getAddress(), toWei('1000')) + await linkToken.connect(admin).approve(registry.address, toWei('1000')) + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + }) + + describe('#transmit', () => { + const fArray = [1, 5, 10] + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1), + 'RegistryPaused()', + ) + }) + + it('reverts when called by non active transmitter', async () => { + await evmRevert( + getTransmitTx(registry, payee1, [upkeepId.toString()], f + 1), + 'OnlyActiveTransmitters()', + ) + }) + + it('reverts when upkeeps and performData length mismatches', async () => { + const upkeepIds = [] + const performDataTuples = [] + const latestBlock = await ethers.provider.getBlock('latest') + + upkeepIds.push(upkeepId) + performDataTuples.push([latestBlock.number + 1, latestBlock.hash, '0x']) + // Push an extra perform data + performDataTuples.push([latestBlock.number + 1, latestBlock.hash, '0x']) + + const report = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256', 'uint256[]', 'tuple(uint32,bytes32,bytes)[]'], + [0, 0, upkeepIds, performDataTuples], + ) + + await evmRevert( + getTransmitTxWithReport(registry, keeper1, report, f + 1), + 'InvalidReport()', + ) + }) + + it('reverts when wrappedPerformData is incorrectly encoded', async () => { + const upkeepIds = [] + const wrappedPerformDatas = [] + const latestBlock = await ethers.provider.getBlock('latest') + + upkeepIds.push(upkeepId) + wrappedPerformDatas.push( + ethers.utils.defaultAbiCoder.encode( + ['tuple(uint32,bytes32)'], // missing performData + [[latestBlock.number + 1, latestBlock.hash]], + ), + ) + + const report = ethers.utils.defaultAbiCoder.encode( + ['uint256[]', 'bytes[]'], + [upkeepIds, wrappedPerformDatas], + ) + + await evmRevert(getTransmitTxWithReport(registry, keeper1, report, f + 1)) + }) + + it('returns early when no upkeeps are included in report', async () => { + const upkeepIds: string[] = [] + const wrappedPerformDatas: string[] = [] + const report = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256', 'uint256[]', 'bytes[]'], + [0, 0, upkeepIds, wrappedPerformDatas], + ) + + await getTransmitTxWithReport(registry, keeper1, report, f + 1) + }) + + it('returns early when invalid upkeepIds are included in report', async () => { + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.add(BigNumber.from('1')).toString()], + f + 1, + ) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('reverts when duplicated upkeepIds are included in report', async () => { + // Fund the upkeep so that pre-checks pass + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await evmRevert( + getTransmitTx( + registry, + keeper1, + [upkeepId.toString(), upkeepId.toString()], + f + 1, + ), + 'InvalidReport()', + ) + }) + + it('returns early when upkeep has insufficient funds', async () => { + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + + const receipt = await tx.wait() + const insufficientFundsUpkeepReportLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly 1 InsufficientFundsUpkeepReportLogs log should be emitted + assert.equal(insufficientFundsUpkeepReportLogs.length, 1) + }) + + context('When the upkeep is funded', async () => { + beforeEach(async () => { + // Fund the upkeep + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + }) + + it('returns early when check block number is less than last perform', async () => { + // First perform an upkeep to put last perform block number on upkeep state + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + await tx.wait() + + const lastPerformBlockNumber = (await registry.getUpkeep(upkeepId)) + .lastPerformBlockNumber + const lastPerformBlock = await ethers.provider.getBlock( + lastPerformBlockNumber, + ) + assert.equal( + lastPerformBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + // Try to transmit a report which has checkBlockNumber = lastPerformBlockNumber-1, should result in stale report + const transmitTx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + {}, + '0x', + lastPerformBlock.number - 1, + lastPerformBlock.parentHash, + ) + + const receipt = await transmitTx.wait() + const staleUpkeepReportLogs = parseStaleUpkeepReportLogs(receipt) + // exactly 1 StaleUpkeepReportLogs log should be emitted + assert.equal(staleUpkeepReportLogs.length, 1) + }) + + it('returns early when check block hash does not match', async () => { + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + const latestBlock = await ethers.provider.getBlock('latest') + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + {}, + '0x', + latestBlock.number - 1, + latestBlock.hash, + ) // should be latestBlock.parentHash + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal(reorgedUpkeepReportLogs.length, 1) + }) + + it('returns early when check block number is older than 256 blocks', async () => { + const latestBlockReport = await encodeLatestBlockReport([ + { Id: upkeepId.toString() }, + ]) + + for (let i = 0; i < 256; i++) { + await ethers.provider.send('evm_mine', []) + } + + // Try to transmit a report which is older than 256 blocks so block hash cannot be matched + const tx = await registry + .connect(keeper1) + .transmit( + [emptyBytes32, emptyBytes32, emptyBytes32], + latestBlockReport, + [], + [], + emptyBytes32, + ) + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal(reorgedUpkeepReportLogs.length, 1) + }) + + it('returns early when upkeep is cancelled and cancellation delay has gone', async () => { + const latestBlockReport = await encodeLatestBlockReport([ + { Id: upkeepId.toString() }, + ]) + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTxWithReport( + registry, + keeper1, + latestBlockReport, + f + 1, + ) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if the target cannot execute', async () => { + mock.setCanPerform(false) + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const success = upkeepPerformedLog.args.success + assert.equal(success, false) + }) + + it('reverts if not enough gas supplied', async () => { + mock.setPerformGasToBurn(executeGas) + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1, { + gasLimit: executeGas, + }), + ) + }) + + it('executes the data passed to the registry', async () => { + mock.setCanPerform(true) + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + {}, + randomBytes, + ) + const receipt = await tx.wait() + + const upkeepPerformedWithABI = [ + 'event UpkeepPerformedWith(bytes upkeepData)', + ] + const iface = new ethers.utils.Interface(upkeepPerformedWithABI) + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + assert.equal(parsedLogs.length, 1) + assert.equal(parsedLogs[0].args.upkeepData, randomBytes) + }) + + it('uses actual execution price for payment and premium calculation', async () => { + // Actual multiplier is 2, but we set gasPrice to be 1x gasWei + const gasPrice = gasWei.mul(BigNumber.from('1')) + mock.setCanPerform(true) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + { gasPrice }, + ) + const receipt = await tx.wait() + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).premium.toString(), + premium.toString(), + ) + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + // Actual multiplier is 2, but we set gasPrice to be 10x + const gasPrice = gasWei.mul(BigNumber.from('10')) + mock.setCanPerform(true) + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + { gasPrice }, + ) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, // Should be same with exisitng multiplier + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + }) + + it('correctly accounts for l1 payment', async () => { + mock.setCanPerform(true) + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + // Deploy a new registry since we change payment model + const registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + Mode.ARBITRUM, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + // Deploy a new registry since we change payment model + const registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + let tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + await linkToken.connect(owner).approve(registry.address, toWei('1000')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + + // Do the thing + tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + { gasPrice: gasWei.mul('5') }, // High gas price so that it gets capped + ) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb.div(gasCeilingMultiplier), // Dividing by gasCeilingMultiplier as it gets multiplied later + ).total.toString(), + totalPayment.toString(), + ) + }) + + it('can self fund', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + + await autoFunderUpkeep.setUpkeepId(upkeepId) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + const maxPayment = await registry.getMaxPaymentForGas(executeGas) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(upkeepId, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + + let postUpkeepBalance = (await registry.getUpkeep(upkeepId)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + const autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + + postUpkeepBalance = (await registry.getUpkeep(upkeepId)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + const autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + const tx = await registry + .connect(owner) + .registerUpkeep( + autoFunderUpkeep.address, + executeGas, + autoFunderUpkeep.address, + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + + await autoFunderUpkeep.setUpkeepId(upkeepId) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(upkeepId) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(upkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + + it('reverts when configDigest mismatches', async () => { + const report = await encodeLatestBlockReport([ + { + Id: upkeepId.toString(), + }, + ]) + const reportContext = [emptyBytes32, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 1)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'ConfigDigestMismatch()', + ) + }) + + it('reverts with incorrect number of signatures', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await encodeLatestBlockReport([ + { + Id: upkeepId.toString(), + }, + ]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 2)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'IncorrectNumberOfSignatures()', + ) + }) + + it('reverts with invalid signature for inactive signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await encodeLatestBlockReport([ + { + Id: upkeepId.toString(), + }, + ]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [ + new ethers.Wallet(ethers.Wallet.createRandom()), + new ethers.Wallet(ethers.Wallet.createRandom()), + ]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'OnlyActiveSigners()', + ) + }) + + it('reverts with invalid signature for duplicated signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await encodeLatestBlockReport([ + { + Id: upkeepId.toString(), + }, + ]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [signer1, signer1]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'DuplicateSigners()', + ) + }) + + it('has a large enough gas overhead to cover upkeep that use all its gas [ @skip-coverage ]', async () => { + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + 10, // maximise f to maximise overhead + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + const tx = await registry.connect(owner).registerUpkeep( + mock.address, + maxPerformGas, // max allowed gas + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + + let performData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + performData += '11' + } // max allowed performData + + mock.setCanPerform(true) + mock.setPerformGasToBurn(maxPerformGas) + + await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + 11, + { gasLimit: maxPerformGas.add(transmitGasOverhead) }, + performData, + ) // Should not revert + }) + + it('performs upkeep, deducts payment, updates lastPerformBlockNumber and emits events', async () => { + for (const i in fArray) { + const newF = fArray[i] + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + newF, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + mock.setCanPerform(true) + const checkBlock = await ethers.provider.getBlock('latest') + + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(upkeepId) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + + // Do the thing + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + newF + 1, + {}, + '0x', + checkBlock.number - 1, + checkBlock.parentHash, + ) + + const receipt = await tx.wait() + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const id = upkeepPerformedLog.args.id + const success = upkeepPerformedLog.args.success + const checkBlockNumber = upkeepPerformedLog.args.checkBlockNumber + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal(id.toString(), upkeepId.toString()) + assert.equal(success, true) + assert.equal( + checkBlockNumber.toString(), + (checkBlock.number - 1).toString(), + ) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(upkeepId) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = totalPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + assert.equal( + registrationBefore.balance.sub(totalPayment).toString(), + registrationAfter.balance.toString(), + ) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + + // Amount spent should be updated correctly + assert.equal( + registrationAfter.amountSpent.sub(totalPayment).toString(), + registrationBefore.amountSpent.toString(), + ) + assert.isTrue( + registrationAfter.amountSpent + .sub(registrationBefore.amountSpent) + .eq(registrationBefore.balance.sub(registrationAfter.balance)), + ) + // Last perform block number should be updated + assert.equal( + registrationAfter.lastPerformBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + // Latest epoch should be 5 + assert.equal((await registry.getState()).state.latestEpoch, 5) + } + }) + + it('calculates gas overhead appropriately within a margin for different scenarios [ @skip-coverage ]', async () => { + // Perform the upkeep once to remove non-zero storage slots and have predictable gas measurement + + let tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + + await tx.wait() + + // Different test scenarios + let longBytes = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + longBytes += '11' + } + const upkeepSuccessArray = [true, false] + const performGasArray = [5000, 100000, executeGas] + const performDataArray = ['0x', randomBytes, longBytes] + + for (const i in upkeepSuccessArray) { + for (const j in performGasArray) { + for (const k in performDataArray) { + for (const l in fArray) { + const upkeepSuccess = upkeepSuccessArray[i] + const performGas = performGasArray[j] + const performData = performDataArray[k] + const newF = fArray[l] + + mock.setCanPerform(upkeepSuccess) + mock.setPerformGasToBurn(performGas) + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + newF, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + newF + 1, + {}, + performData, + ) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const upkeepGasUsed = upkeepPerformedLog.args.gasUsed + const chargedGasOverhead = upkeepPerformedLog.args.gasOverhead + const actualGasOverhead = receipt.gasUsed.sub(upkeepGasUsed) + + assert.isTrue(upkeepGasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + + if (i == '0' && j == '0' && k == '0') { + console.log( + 'Gas Benchmarking - sig verification ( f =', + newF, + '): calculated overhead: ', + chargedGasOverhead.toString(), + ' actual overhead: ', + actualGasOverhead.toString(), + ' margin over gasUsed: ', + chargedGasOverhead.sub(actualGasOverhead).toString(), + ) + } + + // Overhead should not get capped + const gasOverheadCap = registryGasOverhead + .add( + registryPerSignerGasOverhead.mul(BigNumber.from(newF + 1)), + ) + .add( + BigNumber.from( + registryPerPerformByteGasOverhead.toNumber() * + performData.length, + ), + ) + const gasCapMinusOverhead = + gasOverheadCap.sub(chargedGasOverhead) + assert.isTrue( + gasCapMinusOverhead.gt(BigNumber.from(0)), + 'Gas overhead got capped. Verify gas overhead variables in test match those in the registry. To not have the overheads capped increase REGISTRY_GAS_OVERHEAD by atleast ' + + gasCapMinusOverhead.toString(), + ) + // total gas charged should be greater than tx gas but within gasCalculationMargin + assert.isTrue( + chargedGasOverhead.gt(actualGasOverhead), + 'Gas overhead calculated is too low, increase account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + actualGasOverhead.sub(chargedGasOverhead).toString(), + ) + + assert.isTrue( + chargedGasOverhead + .sub(actualGasOverhead) + .lt(BigNumber.from(gasCalculationMargin)), + ), + 'Gas overhead calculated is too high, decrease account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + chargedGasOverhead + .sub(chargedGasOverhead) + .sub(BigNumber.from(gasCalculationMargin)) + .toString() + } + } + } + } + }) + }) + + describe('When upkeeps are batched', () => { + const numPassingUpkeepsArray = [1, 2, 10] + const numFailingUpkeepsArray = [0, 1, 3] + + numPassingUpkeepsArray.forEach(function (numPassingUpkeeps) { + numFailingUpkeepsArray.forEach(function (numFailingUpkeeps) { + describe( + 'passing upkeeps ' + + numPassingUpkeeps.toString() + + ', failing upkeeps ' + + numFailingUpkeeps.toString(), + () => { + let passingUpkeepIds: string[] + let failingUpkeepIds: string[] + + beforeEach(async () => { + passingUpkeepIds = [] + failingUpkeepIds = [] + for (let i = 0; i < numPassingUpkeeps; i++) { + mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + passingUpkeepIds.push(upkeepId.toString()) + + // Add funds to passing upkeeps + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + } + for (let i = 0; i < numFailingUpkeeps; i++) { + mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + failingUpkeepIds.push(upkeepId.toString()) + } + }) + + it('performs successful upkeeps and does not change failing upkeeps', async () => { + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf( + registry.address, + ) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const registrationPassingBefore = await Promise.all( + passingUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformBlockNumber.toString(), '0') + return reg + }), + ) + const registrationFailingBefore = await await Promise.all( + failingUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformBlockNumber.toString(), '0') + return reg + }), + ) + + const tx = await getTransmitTx( + registry, + keeper1, + passingUpkeepIds.concat(failingUpkeepIds), + f + 1, + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, numPassingUpkeeps) + const insufficientFundsLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly numFailingUpkeeps Upkeep Performed should be emitted + assert.equal(insufficientFundsLogs.length, numFailingUpkeeps) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf( + registry.address, + ) + const registrationPassingAfter = await Promise.all( + passingUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registrationFailingAfter = await await Promise.all( + failingUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + let netPayment = BigNumber.from('0') + for (let i = 0; i < numPassingUpkeeps; i++) { + const id = upkeepPerformedLogs[i].args.id + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const gasOverhead = upkeepPerformedLogs[i].args.gasOverhead + const totalPayment = upkeepPerformedLogs[i].args.totalPayment + + assert.equal(id.toString(), passingUpkeepIds[i]) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + // Balance should be deducted + assert.equal( + registrationPassingBefore[i].balance + .sub(totalPayment) + .toString(), + registrationPassingAfter[i].balance.toString(), + ) + + // Amount spent should be updated correctly + assert.equal( + registrationPassingAfter[i].amountSpent + .sub(totalPayment) + .toString(), + registrationPassingBefore[i].amountSpent.toString(), + ) + + // Last perform block number should be updated + assert.equal( + registrationPassingAfter[ + i + ].lastPerformBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + netPayment = netPayment.add(totalPayment) + } + + for (let i = 0; i < numFailingUpkeeps; i++) { + // InsufficientFunds log should be emitted + const id = insufficientFundsLogs[i].args.id + assert.equal(id.toString(), failingUpkeepIds[i]) + + // Balance and amount spent should be same + assert.equal( + registrationFailingBefore[i].balance.toString(), + registrationFailingAfter[i].balance.toString(), + ) + assert.equal( + registrationFailingBefore[i].amountSpent.toString(), + registrationFailingAfter[i].amountSpent.toString(), + ) + + // Last perform block number should not be updated + assert.equal( + registrationFailingAfter[ + i + ].lastPerformBlockNumber.toString(), + '0', + ) + } + + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = netPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + // Keeper should be paid net payment for all passed upkeeps + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }) + + it('splits gas overhead appropriately among performed upkeeps [ @skip-coverage ]', async () => { + // Perform the upkeeps once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx( + registry, + keeper1, + passingUpkeepIds.concat(failingUpkeepIds), + f + 1, + ) + + await tx.wait() + + // Do the actual thing + + tx = await getTransmitTx( + registry, + keeper1, + passingUpkeepIds.concat(failingUpkeepIds), + f + 1, + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, numPassingUpkeeps) + + const gasOverheadCap = registryGasOverhead.add( + registryPerSignerGasOverhead.mul(BigNumber.from(f + 1)), + ) + + const overheadCanGetCapped = + numPassingUpkeeps == 1 && numFailingUpkeeps > 0 + // Should only happen with 1 successful upkeep and some failing upkeeps. + // With 2 successful upkeeps and upto 3 failing upkeeps, overhead should be small enough to not get capped + let netGasUsedPlusOverhead = BigNumber.from('0') + + for (let i = 0; i < numPassingUpkeeps; i++) { + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const gasOverhead = upkeepPerformedLogs[i].args.gasOverhead + + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + + // Overhead should not exceed capped + assert.isTrue(gasOverhead.lte(gasOverheadCap)) + + // Overhead should be same for every upkeep since they have equal performData, hence same caps + assert.isTrue( + gasOverhead.eq(upkeepPerformedLogs[0].args.gasOverhead), + ) + + netGasUsedPlusOverhead = netGasUsedPlusOverhead + .add(gasUsed) + .add(gasOverhead) + } + + const overheadsGotCapped = + upkeepPerformedLogs[0].args.gasOverhead.eq(gasOverheadCap) + // Should only get capped in certain scenarios + if (overheadsGotCapped) { + assert.isTrue( + overheadCanGetCapped, + 'Gas overhead got capped. Verify gas overhead variables in test match those in the registry. To not have the overheads capped increase REGISTRY_GAS_OVERHEAD', + ) + } + + console.log( + 'Gas Benchmarking - batching (passedUpkeeps: ', + numPassingUpkeeps, + 'failedUpkeeps:', + numFailingUpkeeps, + '): ', + 'overheadsGotCapped', + overheadsGotCapped, + 'calculated overhead', + upkeepPerformedLogs[0].args.gasOverhead.toString(), + ' margin over gasUsed', + netGasUsedPlusOverhead.sub(receipt.gasUsed).toString(), + ) + + // If overheads dont get capped then total gas charged should be greater than tx gas + // We don't check whether the net is within gasMargin as the margin changes with numFailedUpkeeps + // Which is ok, as long as individual gas overhead is capped + if (!overheadsGotCapped) { + assert.isTrue( + netGasUsedPlusOverhead.gt(receipt.gasUsed), + 'Gas overhead is too low, increase ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD', + ) + } + }) + }, + ) + }) + }) + + it('has enough perform gas overhead for large batches [ @skip-coverage ]', async () => { + const numUpkeeps = 20 + const upkeepIds: string[] = [] + let totalExecuteGas = BigNumber.from('0') + for (let i = 0; i < numUpkeeps; i++) { + mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + upkeepIds.push(upkeepId.toString()) + + // Add funds to passing upkeeps + await registry.connect(owner).addFunds(upkeepId, toWei('10')) + + mock.setCanPerform(true) + mock.setPerformGasToBurn(executeGas) + + totalExecuteGas = totalExecuteGas.add(executeGas) + } + + // Should revert with no overhead added + await evmRevert( + getTransmitTx(registry, keeper1, upkeepIds, f + 1, { + gasLimit: totalExecuteGas, + }), + ) + // Should not revert with overhead added + await getTransmitTx(registry, keeper1, upkeepIds, f + 1, { + gasLimit: totalExecuteGas.add(transmitGasOverhead), + }) + }) + + it('splits l2 payment among performed upkeeps', async () => { + const numUpkeeps = 7 + const upkeepIds: string[] = [] + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + // Deploy a new registry since we change payment model + const registryLogic = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + Mode.ARBITRUM, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + // Deploy a new registry since we change payment model + const registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + await linkToken.connect(owner).approve(registry.address, toWei('10000')) + for (let i = 0; i < numUpkeeps; i++) { + mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId = await getUpkeepID(tx) + upkeepIds.push(upkeepId.toString()) + + // Add funds to passing upkeeps + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + } + + // Do the thing + const tx = await getTransmitTx( + registry, + keeper1, + upkeepIds, + f + 1, + { gasPrice: gasWei.mul('5') }, // High gas price so that it gets capped + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, numUpkeeps) + + // Verify the payment calculation in upkeepPerformed[0] + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb.div(gasCeilingMultiplier), // Dividing by gasCeilingMultiplier as it gets multiplied later + BigNumber.from(numUpkeeps), + ).total.toString(), + totalPayment.toString(), + ) + }) + }) + }) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('100')) + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ) + + const id1 = await getUpkeepID(tx) + await registry.connect(admin).addFunds(id1, toWei('5')) + + await getTransmitTx(registry, keeper1, [id1.toString()], f + 1) + await getTransmitTx(registry, keeper2, [id1.toString()], f + 1) + await getTransmitTx(registry, keeper3, [id1.toString()], f + 1) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ) + const id2 = await getUpkeepID(tx2) + await registry.connect(admin).addFunds(id2, toWei('5')) + + await getTransmitTx(registry, keeper1, [id2.toString()], f + 1) + await getTransmitTx(registry, keeper2, [id2.toString()], f + 1) + await getTransmitTx(registry, keeper3, [id2.toString()], f + 1) + + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id2]) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry + .connect(admin) + .withdrawFunds(id1, await nonkeeper.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).recoverFunds() + + const balanceAfter = await linkToken.balanceOf(registry.address) + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + assert.isTrue(ownerAfter.eq(ownerBefore.add(sent))) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep / #transmit', () => { + it('calculates the minimum balance appropriately', async () => { + await mock.setCanCheck(true) + + const oneWei = BigNumber.from(1) + const minBalance = await registry.getMinBalanceForUpkeep(upkeepId) + const tooLow = minBalance.sub(oneWei) + + await registry.connect(admin).addFunds(upkeepId, tooLow) + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + await registry.connect(admin).addFunds(upkeepId, oneWei) + checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }) + + it('uses maxPerformData size in checkUpkeep but actual performDataSize in transmit', async () => { + const tx1 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + const upkeepID1 = await getUpkeepID(tx1) + const tx2 = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + const upkeepID2 = await getUpkeepID(tx2) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + + // upkeep 1 is underfunded, 2 is fully funded + const minBalance1 = ( + await registry.getMinBalanceForUpkeep(upkeepID1) + ).sub(1) + const minBalance2 = await registry.getMinBalanceForUpkeep(upkeepID2) + await registry.connect(owner).addFunds(upkeepID1, minBalance1) + await registry.connect(owner).addFunds(upkeepID2, minBalance2) + + // upkeep 1 check should return false, 2 should return true + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID1) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepID2) + assert.equal(checkUpkeepResult.upkeepNeeded, true) + + // upkeep 1 perform should return with insufficient balance using max performData size + let maxPerformData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + maxPerformData += '11' + } + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepID1.toString()], + f + 1, + { gasPrice: gasWei.mul(gasCeilingMultiplier) }, + maxPerformData, + ) + + const receipt = await tx.wait() + const insufficientFundsUpkeepReportLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly 1 InsufficientFundsUpkeepReportLogs log should be emitted + assert.equal(insufficientFundsUpkeepReportLogs.length, 1) + + // upkeep 1 perform should succeed with empty performData + await getTransmitTx( + registry, + keeper1, + [upkeepID1.toString()], + f + 1, + { gasPrice: gasWei.mul(gasCeilingMultiplier) }, + '0x', + ), + // upkeep 2 perform should succeed with max performData size + await getTransmitTx( + registry, + keeper1, + [upkeepID2.toString()], + f + 1, + { gasPrice: gasWei.mul(gasCeilingMultiplier) }, + maxPerformData, + ) + }) + }) + + describe('#withdrawFunds', () => { + let upkeepId2: BigNumber + + beforeEach(async () => { + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId2 = await getUpkeepID(tx) + + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).addFunds(upkeepId2, toWei('100')) + + // Do a perform so that upkeep is charged some amount + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + await getTransmitTx(registry, keeper1, [upkeepId2.toString()], f + 1) + }) + + it('reverts if called on a non existing ID', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId.add(1), await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'UpkeepNotCanceled()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(upkeepId, zeroAddress), + 'InvalidRecipient()', + ) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await registry.connect(owner).cancelUpkeep(upkeepId2) + }) + + it('can be called successively on two upkeeps', async () => { + await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await registry + .connect(admin) + .withdrawFunds(upkeepId2, await payee1.getAddress()) + }) + + it('moves the funds out and updates the balance and emits an event', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(upkeepId) + const previousBalance = registration.balance + + const tx = await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await expect(tx) + .to.emit(registry, 'FundsWithdrawn') + .withArgs(upkeepId, previousBalance, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(previousBalance).eq(payee1After)) + assert.isTrue(registryBefore.sub(previousBalance).eq(registryAfter)) + + registration = await registry.getUpkeep(upkeepId) + assert.equal(0, registration.balance.toNumber()) + }) + }) + }) + + describe('#simulatePerformUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'OnlySimulatedBackend()', + ) + }) + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'RegistryPaused()', + ) + }) + + it('returns false and gasUsed when perform fails', async () => { + await mock.setCanPerform(false) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, false) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns true and gasUsed when perform succeeds', async () => { + await mock.setCanPerform(true) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns correct amount of gasUsed when perform succeeds', async () => { + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(executeGas) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + // Full execute gas should be used, with some performGasBuffer(1000) + assert.isTrue( + simulatePerformResult.gasUsed.gt( + executeGas.sub(BigNumber.from('1000')), + ), + ) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic.checkUpkeep(upkeepId), + 'OnlySimulatedBackend()', + ) + }) + + it('returns false and error code if the upkeep is cancelled by admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + assert.equal(checkUpkeepResult.gasUsed.toString(), '0') + }) + + it('returns false and error code if the upkeep is cancelled by owner', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + assert.equal(checkUpkeepResult.gasUsed.toString(), '0') + }) + + it('returns false and error code if the upkeep is paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_PAUSED, + ) + assert.equal(checkUpkeepResult.gasUsed.toString(), '0') + }) + + it('returns false and error code if user is out of funds', async () => { + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + assert.equal(checkUpkeepResult.gasUsed.toString(), '0') + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('100')) + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + }) + + it('returns false, error code, and revert data if the target check reverts', async () => { + await mock.setShouldRevertCheck(true) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + + const wrappedPerfromData = ethers.utils.defaultAbiCoder.decode( + [ + 'tuple(uint32 checkBlockNum, bytes32 checkBlockHash, bytes performData)', + ], + checkUpkeepResult.performData, + ) + const revertReasonBytes = `0x${wrappedPerfromData[0][2].slice(10)}` // remove sighash + assert.equal( + ethers.utils.defaultAbiCoder.decode(['string'], revertReasonBytes)[0], + 'shouldRevertCheck should be false', + ) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.TARGET_CHECK_REVERTED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false and error code if the upkeep is not needed', async () => { + await mock.setCanCheck(false) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false and error code if the performData exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 5000; i++) { + longBytes += '1' + } + await mock.setCanCheck(true) + await mock.setPerformData(longBytes) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns true with wrapped perform data and gas used if the target can execute', async () => { + await mock.setCanCheck(true) + await mock.setPerformData(randomBytes) + + const latestBlock = await ethers.provider.getBlock('latest') + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId, { + blockTag: latestBlock.number, + }) + + const wrappedPerfromData = ethers.utils.defaultAbiCoder.decode( + [ + 'tuple(uint32 checkBlockNum, bytes32 checkBlockHash, bytes performData)', + ], + checkUpkeepResult.performData, + ) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + assert.equal( + wrappedPerfromData[0].checkBlockNum, + latestBlock.number - 1, + ) + assert.equal( + wrappedPerfromData[0].checkBlockHash, + latestBlock.parentHash, + ) + assert.equal(wrappedPerfromData[0].performData, randomBytes) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.NONE, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + assert.isTrue(checkUpkeepResult.fastGasWei.eq(gasWei)) + assert.isTrue(checkUpkeepResult.linkNative.eq(linkEth)) + }) + + it('has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', async () => { + await mock.setCanCheck(true) + await mock.setCheckGasToBurn(checkGasLimit) + const gas = checkGasLimit.add(checkGasOverhead) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic.checkUpkeep(upkeepId, { + gasLimit: gas, + }) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId.add(1), amount), + 'UpkeepCancelled()', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(admin).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('lets anyone add funds to an upkeep not just admin', async () => { + await linkToken.connect(owner).transfer(await payee1.getAddress(), amount) + await linkToken.connect(payee1).approve(registry.address, amount) + + await registry.connect(payee1).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(admin).addFunds(upkeepId, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(upkeepId, await admin.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + }) + + describe('#getActiveUpkeepIDs', () => { + let upkeepId2: BigNumber + + beforeEach(async () => { + // Register another upkeep so that we have 2 + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + randomBytes, + emptyBytes, + ) + upkeepId2 = await getUpkeepID(tx) + }) + + it('reverts if startIndex is out of bounds ', async () => { + await evmRevert(registry.getActiveUpkeepIDs(4, 0), 'IndexOutOfRange()') + }) + + it('reverts if startIndex + maxCount is out of bounds', async () => { + await evmRevert(registry.getActiveUpkeepIDs(0, 4)) + }) + + it('returns upkeep IDs bounded by maxCount', async () => { + let upkeepIds = await registry.getActiveUpkeepIDs(0, 1) + assert( + upkeepIds.length == 1, + 'Only maxCount number of upkeeps should be returned', + ) + assert( + upkeepIds[0].toString() == upkeepId.toString(), + 'Correct upkeep ID should be returned', + ) + + upkeepIds = await registry.getActiveUpkeepIDs(1, 1) + assert( + upkeepIds.length == 1, + 'Only maxCount number of upkeeps should be returned', + ) + assert( + upkeepIds[0].toString() == upkeepId2.toString(), + 'Correct upkeep ID should be returned', + ) + }) + + it('returns all upkeep IDs if maxCount is 0', async () => { + const upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert(upkeepIds.length == 2, 'All upkeeps should be returned') + assert( + upkeepIds[0].toString() == upkeepId.toString(), + 'Correct upkeep ID should be returned', + ) + assert( + upkeepIds[1].toString() == upkeepId2.toString(), + 'Correct upkeep ID should be returned', + ) + }) + }) + + describe('#getMaxPaymentForGas', () => { + const multipliers = [BigNumber.from(1), BigNumber.from(3)] + const gasAmounts = [100000, 10000000] + const premiums = [0, 250000000] + const flatFees = [0, 1000000] + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + // Same as MockOVMGasPriceOracle.sol + const l1CostWeiOpt = BigNumber.from(2000000) + + it('calculates the max fee appropriately', async () => { + await verifyMaxPayment( + Mode.DEFAULT, + multipliers, + gasAmounts, + premiums, + flatFees, + ) + }) + + it('calculates the max fee appropriately for Arbitrum', async () => { + await verifyMaxPayment( + Mode.ARBITRUM, + multipliers, + gasAmounts, + premiums, + flatFees, + l1CostWeiArb, + ) + }) + + it('calculates the max fee appropriately for Optimism', async () => { + await verifyMaxPayment( + Mode.OPTIMISM, + multipliers, + gasAmounts, + premiums, + flatFees, + l1CostWeiOpt, + ) + }) + + it('uses the fallback gas price if the feed has issues', async () => { + const expectedFallbackMaxPayment = linkForGas( + executeGas, + registryGasOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add(maxPerformDataSize.mul(registryPerPerformByteGasOverhead)), + gasCeilingMultiplier.mul('2'), // fallbackGasPrice is 2x gas price + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = Math.floor(Date.now() / 1000) + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = Math.floor(Date.now() / 1000) + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + }) + + it('uses the fallback link price if the feed has issues', async () => { + const expectedFallbackMaxPayment = linkForGas( + executeGas, + registryGasOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add(maxPerformDataSize.mul(registryPerPerformByteGasOverhead)), + gasCeilingMultiplier.mul('2'), // fallbackLinkPrice is 1/2 pli price, so multiply by 2 + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = Math.floor(Date.now() / 1000) + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = Math.floor(Date.now() / 1000) + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + (await registry.getMaxPaymentForGas(executeGas)).toString(), + ) + }) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistry 2.0.2') + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the PLI token', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'OnlyCallableByPLIToken()', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + const before = (await registry.getUpkeep(upkeepId)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(upkeepId)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describe('#setConfig - onchain', () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const maxGas = BigNumber.from(6) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + const newMinUpkeepSpend = BigNumber.from(9) + const newMaxCheckDataSize = BigNumber.from(10000) + const newMaxPerformDataSize = BigNumber.from(10000) + const newMaxPerformGas = BigNumber.from(10000000) + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry.connect(payee1).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('updates the onchainConfig and configDigest', async () => { + const old = await registry.getState() + const oldConfig = old.config + const oldState = old.state + assert.isTrue(paymentPremiumPPB.eq(oldConfig.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(oldConfig.flatFeeMicroLink)) + assert.isTrue(stalenessSeconds.eq(oldConfig.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(oldConfig.gasCeilingMultiplier)) + + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + + const updated = await registry.getState() + const updatedConfig = updated.config + const updatedState = updated.state + assert.equal(updatedConfig.paymentPremiumPPB, payment.toNumber()) + assert.equal(updatedConfig.flatFeeMicroLink, flatFee.toNumber()) + assert.equal(updatedConfig.stalenessSeconds, staleness.toNumber()) + assert.equal(updatedConfig.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal( + updatedConfig.minUpkeepSpend.toString(), + newMinUpkeepSpend.toString(), + ) + assert.equal( + updatedConfig.maxCheckDataSize, + newMaxCheckDataSize.toNumber(), + ) + assert.equal( + updatedConfig.maxPerformDataSize, + newMaxPerformDataSize.toNumber(), + ) + assert.equal(updatedConfig.maxPerformGas, newMaxPerformGas.toNumber()) + assert.equal(updatedConfig.checkGasLimit, maxGas.toNumber()) + assert.equal( + updatedConfig.fallbackGasPrice.toNumber(), + fbGasEth.toNumber(), + ) + assert.equal( + updatedConfig.fallbackLinkPrice.toNumber(), + fbLinkEth.toNumber(), + ) + assert.equal(updatedState.latestEpoch, 0) + + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + await expect(tx).to.emit(registry, 'ConfigSet') + }) + + it('reverts upon decreasing max limits', async () => { + await evmRevert( + registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: BigNumber.from(1), + maxPerformDataSize: newMaxPerformDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ), + 'MaxCheckDataSizeCanOnlyIncrease()', + ) + await evmRevert( + registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: BigNumber.from(1), + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ), + 'MaxPerformDataSizeCanOnlyIncrease()', + ) + await evmRevert( + registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxPerformGas: BigNumber.from(1), + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ), + 'GasLimitCanOnlyIncrease()', + ) + }) + }) + + describe('#setConfig - offchain', () => { + let newKeepers: string[] + + beforeEach(async () => { + newKeepers = [ + await personas.Eddy.getAddress(), + await personas.Nick.getAddress(), + await personas.Neil.getAddress(), + await personas.Carol.getAddress(), + ] + }) + + it('reverts when called by anyone but the owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfig( + newKeepers, + newKeepers, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('reverts if too many keeperAddresses set', async () => { + for (let i = 0; i < 40; i++) { + newKeepers.push(randomAddress()) + } + await evmRevert( + registry + .connect(owner) + .setConfig( + newKeepers, + newKeepers, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'TooManyOracles()', + ) + }) + + it('reverts if f=0', async () => { + await evmRevert( + registry + .connect(owner) + .setConfig( + newKeepers, + newKeepers, + 0, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfFaultyOracles()', + ) + }) + + it('reverts if signers != transmitters length', async () => { + const signers = [randomAddress()] + await evmRevert( + registry + .connect(owner) + .setConfig( + signers, + newKeepers, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts if signers <= 3f', async () => { + newKeepers.pop() + await evmRevert( + registry + .connect(owner) + .setConfig( + newKeepers, + newKeepers, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts on repeated signers', async () => { + const newSigners = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfig( + newSigners, + newKeepers, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'RepeatedSigner()', + ) + }) + + it('reverts on repeated transmitters', async () => { + const newTransmitters = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfig( + newKeepers, + newTransmitters, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ), + 'RepeatedTransmitter()', + ) + }) + + it('stores new config and emits event', async () => { + // Perform an upkeep so that totalPremium is updated + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + let tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + await tx.wait() + + const newOffChainVersion = BigNumber.from('2') + const newOffChainConfig = '0x1122' + + const old = await registry.getState() + const oldState = old.state + assert(oldState.totalPremium.gt(BigNumber.from('0'))) + + const newSigners = newKeepers + tx = await registry + .connect(owner) + .setConfig( + newSigners, + newKeepers, + f, + encodeConfig(config), + newOffChainVersion, + newOffChainConfig, + ) + + const updated = await registry.getState() + const updatedState = updated.state + assert(oldState.totalPremium.eq(updatedState.totalPremium)) + + // Old signer addresses which are not in new signers should be non active + for (let i = 0; i < signerAddresses.length; i++) { + const signer = signerAddresses[i] + if (!newSigners.includes(signer)) { + assert((await registry.getSignerInfo(signer)).active == false) + assert((await registry.getSignerInfo(signer)).index == 0) + } + } + // New signer addresses should be active + for (let i = 0; i < newSigners.length; i++) { + const signer = newSigners[i] + assert((await registry.getSignerInfo(signer)).active == true) + assert((await registry.getSignerInfo(signer)).index == i) + } + // Old transmitter addresses which are not in new transmitter should be non active, update lastCollected but retain other info + for (let i = 0; i < keeperAddresses.length; i++) { + const transmitter = keeperAddresses[i] + if (!newKeepers.includes(transmitter)) { + assert( + (await registry.getTransmitterInfo(transmitter)).active == false, + ) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + ( + await registry.getTransmitterInfo(transmitter) + ).lastCollected.toString() == oldState.totalPremium.toString(), + ) + } + } + // New transmitter addresses should be active + for (let i = 0; i < newKeepers.length; i++) { + const transmitter = newKeepers[i] + assert((await registry.getTransmitterInfo(transmitter)).active == true) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + ( + await registry.getTransmitterInfo(transmitter) + ).lastCollected.toString() == oldState.totalPremium.toString(), + ) + } + + // config digest should be updated + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + + //New config should be updated + assert.deepEqual(updated.signers, newKeepers) + assert.deepEqual(updated.transmitters, newKeepers) + + // Event should have been emitted + await expect(tx).to.emit(registry, 'ConfigSet') + }) + }) + + describe('#setPeerRegistryMigrationPermission() / #getPeerRegistryMigrationPermission()', () => { + const peer = randomAddress() + it('allows the owner to set the peer registries', async () => { + let permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + await registry.setPeerRegistryMigrationPermission(peer, 1) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(1) + await registry.setPeerRegistryMigrationPermission(peer, 2) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(2) + await registry.setPeerRegistryMigrationPermission(peer, 0) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + }) + it('reverts if passed an unsupported permission', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 10), + ).to.be.reverted + }) + it('reverts if not called by the owner', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 1), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('#registerUpkeep', () => { + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'RegistryPaused()', + ) + }) + + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + zeroAddress, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'NotAContract()', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'OnlyCallableByOwnerOrRegistrar()', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 2299, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + 5000001, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if checkData is too long', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + longBytes, + emptyBytes, + ), + 'CheckDataExceedsLimit()', + ) + }) + + it('creates a record of the registration', async () => { + const executeGases = [100000, 500000] + const checkDatas = [emptyBytes, '0x12'] + const offchainConfig = '0x1234567890' + + for (let jdx = 0; jdx < executeGases.length; jdx++) { + const executeGas = executeGases[jdx] + for (let kdx = 0; kdx < checkDatas.length; kdx++) { + const checkData = checkDatas[kdx] + const tx = await registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + checkData, + offchainConfig, + ) + + //confirm the upkeep details + upkeepId = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(upkeepId, executeGas, await admin.getAddress()) + const registration = await registry.getUpkeep(upkeepId) + + assert.equal(mock.address, registration.target) + assert.equal( + executeGas.toString(), + registration.executeGas.toString(), + ) + assert.equal(await admin.getAddress(), registration.admin) + assert.equal(0, registration.balance.toNumber()) + assert.equal(0, registration.amountSpent.toNumber()) + assert.equal(0, registration.lastPerformBlockNumber) + assert.equal(checkData, registration.checkData) + assert.equal(registration.paused, false) + assert.equal(registration.offchainConfig, offchainConfig) + assert(registration.maxValidBlocknumber.eq('0xffffffff')) + } + } + }) + }) + + describe('#pauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is already paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('pauses the upkeep and emits an event', async () => { + const tx = await registry.connect(admin).pauseUpkeep(upkeepId) + await expect(tx).to.emit(registry, 'UpkeepPaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, true) + }) + }) + + describe('#unpauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('reverts if the upkeep is not paused', async () => { + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'OnlyPausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + + assert.equal(registration.paused, true) + + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('unpauses the upkeep and emits an event', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const tx = await registry.connect(admin).unpauseUpkeep(upkeepId) + + await expect(tx).to.emit(registry, 'UpkeepUnpaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, false) + + const upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert.equal(upkeepIds.length, 1) + }) + }) + + describe('#updateCheckData', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).updateCheckData(upkeepId.add(1), randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the caller is not upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).updateCheckData(upkeepId, randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).updateCheckData(upkeepId, randomBytes), + 'UpkeepCancelled()', + ) + }) + + it('is allowed to update on paused upkeep', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + await registry.connect(admin).updateCheckData(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + + it('reverts if newCheckData exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + + await evmRevert( + registry.connect(admin).updateCheckData(upkeepId, longBytes), + 'CheckDataExceedsLimit()', + ) + }) + + it('updates the upkeep check data and emits an event', async () => { + const tx = await registry + .connect(admin) + .updateCheckData(upkeepId, randomBytes) + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataUpdated') + .withArgs(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + }) + + describe('#setUpkeepGasLimit', () => { + const newGasLimit = BigNumber.from('300000') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId.add(1), newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepGasLimit(upkeepId, newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if new gas limit is out of bounds', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('100')), + 'GasLimitOutsideRange()', + ) + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('6000000')), + 'GasLimitOutsideRange()', + ) + }) + + it('updates the gas limit successfully', async () => { + const initialGasLimit = (await registry.getUpkeep(upkeepId)).executeGas + assert.equal(initialGasLimit, executeGas.toNumber()) + await registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit) + const updatedGasLimit = (await registry.getUpkeep(upkeepId)).executeGas + assert.equal(updatedGasLimit, newGasLimit.toNumber()) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, newGasLimit) + await expect(tx) + .to.emit(registry, 'UpkeepGasLimitSet') + .withArgs(upkeepId, newGasLimit) + }) + }) + + describe('#setUpkeepOffchainConfig', () => { + const newConfig = '0xc0ffeec0ffee' + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId.add(1), newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepOffchainConfig(upkeepId, newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('updates the config successfully', async () => { + const initialConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(initialConfig, '0x') + await registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig) + const updatedConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(newConfig, updatedConfig) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId, newConfig) + await expect(tx) + .to.emit(registry, 'UpkeepOffchainConfigSet') + .withArgs(upkeepId, newConfig) + }) + }) + + describe('#transferUpkeepAdmin', () => { + it('reverts when called by anyone but the current upkeep admin', async () => { + await evmRevert( + registry + .connect(payee1) + .transferUpkeepAdmin(upkeepId, await payee2.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await admin.getAddress()), + 'ValueNotChanged()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await keeper1.getAddress()), + 'UpkeepCancelled()', + ) + }) + + it('reverts when transferring to zero address', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, ethers.constants.AddressZero), + 'InvalidRecipient()', + ) + }) + + it('does not change the upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await admin.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + + it('does not emit an event when called with the same proposed upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptUpkeepAdmin', () => { + beforeEach(async () => { + // Start admin transfer to payee1 + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + }) + + it('reverts when not called by the proposed upkeep admin', async () => { + await evmRevert( + registry.connect(payee2).acceptUpkeepAdmin(upkeepId), + 'OnlyCallableByProposedAdmin()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('does change the admin', async () => { + await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await payee1.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferred') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + }) + + describe('#withdrawOwnerFunds', () => { + it('can only be called by owner', async () => { + await evmRevert( + registry.connect(keeper1).withdrawOwnerFunds(), + 'Only callable by owner', + ) + }) + + it('withdraws the collected fees to owner', async () => { + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + // Very high min spend, whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + const upkeepBalance = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).cancelUpkeep(upkeepId) + + // Transfered to owner balance on registry + let ownerRegistryBalance = (await registry.getState()).state + .ownerLinkBalance + assert.isTrue(ownerRegistryBalance.eq(upkeepBalance)) + + // Now withdraw + await registry.connect(owner).withdrawOwnerFunds() + + ownerRegistryBalance = (await registry.getState()).state.ownerLinkBalance + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + // Owner registry balance should be changed to 0 + assert.isTrue(ownerRegistryBalance.eq(BigNumber.from('0'))) + + // Owner should be credited with the balance + assert.isTrue(ownerBefore.add(upkeepBalance).eq(ownerAfter)) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'ValueNotChanged()', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'OnlyCallableByProposedPayee()', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('Does not allow transmits when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1), + 'RegistryPaused()', + ) + }) + + it('Does not allow creation of new upkeeps when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + registry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin.getAddress(), + emptyBytes, + emptyBytes, + ), + 'RegistryPaused()', + ) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue((await registry.getState()).state.paused) + + await registry.connect(owner).unpause() + + assert.isFalse((await registry.getState()).state.paused) + }) + }) + + describe('migrateUpkeeps() / #receiveUpkeeps()', async () => { + let registry2: KeeperRegistry + let registryLogic2: KeeperRegistryLogic + + beforeEach(async () => { + registryLogic2 = await keeperRegistryLogicFactory + .connect(owner) + .deploy( + Mode.DEFAULT, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + registry2 = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic2.address) + await registry2 + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + 1, + '0x', + ) + }) + + context('when permissions are set', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + }) + + it('migrates an upkeep', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + // Set an upkeep admin transfer in progress too + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], registry2.address) + expect((await registry.getState()).state.numUpkeeps).to.equal(0) + expect((await registry2.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await registry2.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry2.getState()).state.expectedLinkBalance).to.equal( + toWei('100'), + ) + expect((await registry2.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + // migration will delete the upkeep and nullify admin transfer + await expect( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('UpkeepCancelled()') + await expect( + registry2.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('OnlyCallableByProposedAdmin()') + }) + + it('migrates a paused upkeep', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + await registry.connect(admin).pauseUpkeep(upkeepId) + // verify the upkeep is paused + expect((await registry.getUpkeep(upkeepId)).paused).to.equal(true) + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], registry2.address) + expect((await registry.getState()).state.numUpkeeps).to.equal(0) + expect((await registry2.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await registry2.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await registry2.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry2.getState()).state.expectedLinkBalance).to.equal( + toWei('100'), + ) + // verify the upkeep is still paused after migration + expect((await registry2.getUpkeep(upkeepId)).paused).to.equal(true) + }) + + it('emits an event on both contracts', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal(1) + const tx = registry + .connect(admin) + .migrateUpkeeps([upkeepId], registry2.address) + await expect(tx) + .to.emit(registry, 'UpkeepMigrated') + .withArgs(upkeepId, toWei('100'), registry2.address) + await expect(tx) + .to.emit(registry2, 'UpkeepReceived') + .withArgs(upkeepId, toWei('100'), registry.address) + }) + + it('is only migratable by the admin', async () => { + await expect( + registry.connect(owner).migrateUpkeeps([upkeepId], registry2.address), + ).to.be.revertedWith('OnlyCallableByAdmin()') + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], registry2.address) + }) + }) + + context('when permissions are not set', () => { + it('reverts', async () => { + // no permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], registry2.address)).to + .be.reverted + // only outgoing permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 1) + await registry2.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], registry2.address)).to + .be.reverted + // only incoming permissions + await registry.setPeerRegistryMigrationPermission(registry2.address, 0) + await registry2.setPeerRegistryMigrationPermission(registry.address, 2) + await expect(registry.migrateUpkeeps([upkeepId], registry2.address)).to + .be.reverted + // permissions opposite direction + await registry.setPeerRegistryMigrationPermission(registry2.address, 2) + await registry2.setPeerRegistryMigrationPermission(registry.address, 1) + await expect(registry.migrateUpkeeps([upkeepId], registry2.address)).to + .be.reverted + }) + }) + }) + + describe('#setPayees', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + + beforeEach(async () => { + keeperAddresses = keeperAddresses.slice(0, 4) + signerAddresses = signerAddresses.slice(0, 4) + payees = payees.slice(0, 4) + + // Redeploy registry with zero address payees (non set) + registry = await keeperRegistryFactory + .connect(owner) + .deploy(registryLogic.address) + + await registry + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + }) + + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setPayees([]), + 'Only callable by owner', + ) + }) + + it('reverts with different numbers of payees than transmitters', async () => { + // 4 transmitters are set, so exactly 4 payess should be added + await evmRevert( + registry.connect(owner).setPayees([await payee1.getAddress()]), + 'ParameterLengthError()', + ) + await evmRevert( + registry + .connect(owner) + .setPayees([ + await payee1.getAddress(), + await payee1.getAddress(), + await payee1.getAddress(), + await payee1.getAddress(), + await payee1.getAddress(), + ]), + 'ParameterLengthError()', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setPayees([ + await payee1.getAddress(), + '0x0000000000000000000000000000000000000000', + await payee3.getAddress(), + await payee4.getAddress(), + ]), + 'InvalidPayee()', + ) + }) + + it('sets the payees when exisitng payees are zero address', async () => { + //Initial payees should be zero address + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = (await registry.getTransmitterInfo(keeperAddresses[i])) + .payee + assert.equal(payee, zeroAddress) + } + + await registry.connect(owner).setPayees(payees) + + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = (await registry.getTransmitterInfo(keeperAddresses[i])) + .payee + assert.equal(payee, payees[i]) + } + }) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + // Set initial payees + await registry.connect(owner).setPayees(payees) + + const newPayees = [ + await payee1.getAddress(), + IGNORE_ADDRESS, + await payee3.getAddress(), + await payee4.getAddress(), + ] + await registry.connect(owner).setPayees(newPayees) + + const ignored = await registry.getTransmitterInfo( + await keeper2.getAddress(), + ) + assert.equal(await payee2.getAddress(), ignored.payee) + assert.equal(true, ignored.active) + }) + + it('reverts if payee is non zero and owner tries to change payee', async () => { + // Set initial payees + await registry.connect(owner).setPayees(payees) + + const newPayees = [ + await payee1.getAddress(), + await owner.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + ] + await evmRevert( + registry.connect(owner).setPayees(newPayees), + 'InvalidPayee()', + ) + }) + + it('emits events for every payee added and removed', async () => { + const tx = await registry.connect(owner).setPayees(payees) + await expect(tx) + .to.emit(registry, 'PayeesUpdated') + .withArgs(keeperAddresses, payees) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId.add(1)), + 'CannotCancel()', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(upkeepId), + 'OnlyCallableByOwnerOrAdmin()', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(upkeepId, BigNumber.from(receipt.blockNumber)) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + const registration = await registry.getUpkeep(upkeepId) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs( + upkeepId, + BigNumber.from(receipt.blockNumber + cancellationDelay), + ) + }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).cancelUpkeep(upkeepId) + + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTx( + registry, + keeper1, + [upkeepId.toString()], + f + 1, + ) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + describe('when an upkeep has been performed', async () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + }) + + it('deducts a cancellation fee from the upkeep and gives to owner', async () => { + const minUpkeepSpend = toWei('10') + + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + const amountSpent = toWei('100').sub(upkeepBefore) + const cancellationFee = minUpkeepSpend.sub(amountSpent) + + await registry.connect(admin).cancelUpkeep(upkeepId) + + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // post upkeep balance should be previous balance minus cancellation fee + assert.isTrue(upkeepBefore.sub(cancellationFee).eq(upkeepAfter)) + // payee balance should not change + assert.isTrue(payee1Before.eq(payee1After)) + // owner should receive the cancellation fee + assert.isTrue(ownerAfter.sub(ownerBefore).eq(cancellationFee)) + }) + + it('deducts up to balance as cancellation fee', async () => { + // Very high min spend, should deduct whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // all upkeep balance is deducted for cancellation fee + assert.equal(0, upkeepAfter.toNumber()) + // payee balance should not change + assert.isTrue(payee1After.eq(payee1Before)) + // all upkeep balance is transferred to the owner + assert.isTrue(ownerAfter.sub(ownerBefore).eq(upkeepBefore)) + }) + + it('does not deduct cancellation fee if more than minUpkeepSpend is spent', async () => { + // Very low min spend, already spent in one perform upkeep + const minUpkeepSpend = BigNumber.from(420) + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }), + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // upkeep does not pay cancellation fee after cancellation because minimum upkeep spent is met + assert.isTrue(upkeepBefore.eq(upkeepAfter)) + // owner balance does not change + assert.isTrue(ownerAfter.eq(ownerBefore)) + // payee balance does not change + assert.isTrue(payee1Before.eq(payee1After)) + }) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId.toString()], f + 1) + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'InvalidRecipient()', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = (await registry.getUpkeep(upkeepId)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + // Withdrawing for first time, last collected = 0 + assert.equal(keeperBefore.lastCollected.toString(), '0') + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = (await registry.getUpkeep(upkeepId)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // registry total premium should not change + assert.isTrue(registryPremiumBefore.eq(registryPremiumAfter)) + // Last collected should be updated + assert.equal( + keeperAfter.lastCollected.toString(), + registryPremiumBefore.toString(), + ) + + const spareChange = registryPremiumBefore.mod( + BigNumber.from(keeperAddresses.length), + ) + // spare change should go to owner + assert.isTrue(ownerAfter.sub(spareChange).eq(ownerBefore)) + + assert.isTrue(keeperAfter.balance.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore.balance).eq(toLinkAfter)) + assert.isTrue( + registryLinkBefore.sub(keeperBefore.balance).eq(registryLinkAfter), + ) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/KeeperRegistry2_1.test.ts b/contracts/test/v0.8/automation/KeeperRegistry2_1.test.ts new file mode 100644 index 00000000..1e8bbb0e --- /dev/null +++ b/contracts/test/v0.8/automation/KeeperRegistry2_1.test.ts @@ -0,0 +1,5700 @@ +import { ethers } from 'hardhat' +import { loadFixture } from '@nomicfoundation/hardhat-network-helpers' +import { assert, expect } from 'chai' +import { + BigNumber, + BigNumberish, + BytesLike, + ContractReceipt, + ContractTransaction, + Signer, + Wallet, +} from 'ethers' +import { evmRevert } from '../../test-helpers/matchers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { randomAddress, toWei } from '../../test-helpers/helpers' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { StreamsLookupUpkeep__factory as StreamsLookupUpkeepFactory } from '../../../typechain/factories/StreamsLookupUpkeep__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { UpkeepAutoFunder__factory as UpkeepAutoFunderFactory } from '../../../typechain/factories/UpkeepAutoFunder__factory' +import { MockArbGasInfo__factory as MockArbGasInfoFactory } from '../../../typechain/factories/MockArbGasInfo__factory' +import { MockOVMGasPriceOracle__factory as MockOVMGasPriceOracleFactory } from '../../../typechain/factories/MockOVMGasPriceOracle__factory' +import { ILogAutomation__factory as ILogAutomationactory } from '../../../typechain/factories/ILogAutomation__factory' +import { IAutomationForwarder__factory as IAutomationForwarderFactory } from '../../../typechain/factories/IAutomationForwarder__factory' +import { KeeperRegistry2_1__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry2_1__factory' +import { KeeperRegistryLogicA2_1__factory as KeeperRegistryLogicAFactory } from '../../../typechain/factories/KeeperRegistryLogicA2_1__factory' +import { KeeperRegistryLogicB2_1__factory as KeeperRegistryLogicBFactory } from '../../../typechain/factories/KeeperRegistryLogicB2_1__factory' +import { AutomationForwarderLogic__factory as AutomationForwarderLogicFactory } from '../../../typechain/factories/AutomationForwarderLogic__factory' +import { MockArbSys__factory as MockArbSysFactory } from '../../../typechain/factories/MockArbSys__factory' +import { AutomationUtils2_1 as AutomationUtils } from '../../../typechain/AutomationUtils2_1' +import { StreamsLookupUpkeep } from '../../../typechain/StreamsLookupUpkeep' +import { MockV3Aggregator } from '../../../typechain/MockV3Aggregator' +import { LinkToken } from '../../../typechain/LinkToken' +import { UpkeepMock } from '../../../typechain/UpkeepMock' +import { MockArbGasInfo } from '../../../typechain/MockArbGasInfo' +import { MockOVMGasPriceOracle } from '../../../typechain/MockOVMGasPriceOracle' +import { UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder' +import { UpkeepAutoFunder } from '../../../typechain' +import { + CancelledUpkeepReportEvent, + IKeeperRegistryMaster as IKeeperRegistry, + InsufficientFundsUpkeepReportEvent, + ReorgedUpkeepReportEvent, + StaleUpkeepReportEvent, + UpkeepPerformedEvent, +} from '../../../typechain/IKeeperRegistryMaster' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' +import { deployRegistry21 } from './helpers' + +const describeMaybe = process.env.SKIP_SLOW ? describe.skip : describe +const itMaybe = process.env.SKIP_SLOW ? it.skip : it + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** REGISTRY v2.1 IS FROZEN ************************************/ + +// We are leaving the original tests enabled, however as 2.1 is still actively being deployed + +describe('KeeperRegistry2_1 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal( + ethers.utils.id(KeeperRegistryFactory.bytecode), + '0xd8dfe20e746039e8420349326becc0a15dcd8fa3cd6aa0924d214328a7c45206', + 'KeeperRegistry bytecode has changed', + ) + assert.equal( + ethers.utils.id(KeeperRegistryLogicAFactory.bytecode), + '0xe69d334fa75af0d6d8572996d815c93b8be1c8546670510b0d20ef349e57b2df', + 'KeeperRegistryLogicA bytecode has changed', + ) + assert.equal( + ethers.utils.id(KeeperRegistryLogicBFactory.bytecode), + '0x891c26ba35b9b13afc9400fac5471d15842828ab717cbdc70ee263210c542563', + 'KeeperRegistryLogicB bytecode has changed', + ) + assert.equal( + ethers.utils.id(AutomationForwarderLogicFactory.bytecode), + '0x6b89065111e9236407329fae3d68b33c311b7d3b6c2ae3dd15c1691a28b1aca7', + 'AutomationForwarderLogic bytecode has changed', + ) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +// copied from AutomationRegistryInterface2_1.sol +enum UpkeepFailureReason { + NONE, + UPKEEP_CANCELLED, + UPKEEP_PAUSED, + TARGET_CHECK_REVERTED, + UPKEEP_NOT_NEEDED, + PERFORM_DATA_EXCEEDS_LIMIT, + INSUFFICIENT_BALANCE, + CHECK_CALLBACK_REVERTED, + REVERT_DATA_EXCEEDS_LIMIT, + REGISTRY_PAUSED, +} + +// copied from AutomationRegistryInterface2_1.sol +enum Mode { + DEFAULT, + ARBITRUM, + OPTIMISM, +} + +// copied from KeeperRegistryBase2_1.sol +enum Trigger { + CONDITION, + LOG, +} + +// un-exported types that must be extracted from the utils contract +type Report = Parameters[0] +type OnChainConfig = Parameters[0] +type LogTrigger = Parameters[0] +type ConditionalTrigger = Parameters[0] +type Log = Parameters[0] + +// ----------------------------------------------------------------------------------------------- + +// These values should match the constants declared in registry +let registryConditionalOverhead: BigNumber +let registryLogOverhead: BigNumber +let registryPerSignerGasOverhead: BigNumber +let registryPerPerformByteGasOverhead: BigNumber +let cancellationDelay: number + +// This is the margin for gas that we test for. Gas charged should always be greater +// than total gas used in tx but should not increase beyond this margin +const gasCalculationMargin = BigNumber.from(8000) + +const linkEth = BigNumber.from(5000000000000000) // 1 Link = 0.005 Eth +const gasWei = BigNumber.from(1000000000) // 1 gwei +// ----------------------------------------------------------------------------------------------- +// test-wide configs for upkeeps +const linkDivisibility = BigNumber.from('1000000000000000000') +const performGas = BigNumber.from('1000000') +const paymentPremiumBase = BigNumber.from('1000000000') +const paymentPremiumPPB = BigNumber.from('250000000') +const flatFeeMicroLink = BigNumber.from(0) + +const randomBytes = '0x1234abcd' +const emptyBytes = '0x' +const emptyBytes32 = + '0x0000000000000000000000000000000000000000000000000000000000000000' + +const transmitGasOverhead = 1_000_000 +const checkGasOverhead = 400_000 + +const stalenessSeconds = BigNumber.from(43820) +const gasCeilingMultiplier = BigNumber.from(2) +const checkGasLimit = BigNumber.from(10000000) +const fallbackGasPrice = gasWei.mul(BigNumber.from('2')) +const fallbackLinkPrice = linkEth.div(BigNumber.from('2')) +const maxCheckDataSize = BigNumber.from(1000) +const maxPerformDataSize = BigNumber.from(1000) +const maxRevertDataSize = BigNumber.from(1000) +const maxPerformGas = BigNumber.from(5000000) +const minUpkeepSpend = BigNumber.from(0) +const f = 1 +const offchainVersion = 1 +const offchainBytes = '0x' +const zeroAddress = ethers.constants.AddressZero +const epochAndRound5_1 = + '0x0000000000000000000000000000000000000000000000000000000000000501' + +let logTriggerConfig: string + +// ----------------------------------------------------------------------------------------------- + +// Smart contract factories +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let upkeepMockFactory: UpkeepMockFactory +let upkeepAutoFunderFactory: UpkeepAutoFunderFactory +let mockArbGasInfoFactory: MockArbGasInfoFactory +let mockOVMGasPriceOracleFactory: MockOVMGasPriceOracleFactory +let streamsLookupUpkeepFactory: StreamsLookupUpkeepFactory +let personas: Personas + +// contracts +let linkToken: LinkToken +let linkEthFeed: MockV3Aggregator +let gasPriceFeed: MockV3Aggregator +let registry: IKeeperRegistry // default registry, used for most tests +let arbRegistry: IKeeperRegistry // arbitrum registry +let opRegistry: IKeeperRegistry // optimism registry +let mgRegistry: IKeeperRegistry // "migrate registry" used in migration tests +let blankRegistry: IKeeperRegistry // used to test initial configurations +let mock: UpkeepMock +let autoFunderUpkeep: UpkeepAutoFunder +let ltUpkeep: MockContract +let transcoder: UpkeepTranscoder +let mockArbGasInfo: MockArbGasInfo +let mockOVMGasPriceOracle: MockOVMGasPriceOracle +let streamsLookupUpkeep: StreamsLookupUpkeep +let automationUtils: AutomationUtils + +function now() { + return Math.floor(Date.now() / 1000) +} + +async function getUpkeepID(tx: ContractTransaction): Promise { + const receipt = await tx.wait() + for (const event of receipt.events || []) { + if ( + event.args && + event.eventSignature == 'UpkeepRegistered(uint256,uint32,address)' + ) { + return event.args[0] + } + } + throw new Error('could not find upkeep ID in tx event logs') +} + +const getTriggerType = (upkeepId: BigNumber): Trigger => { + const hexBytes = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + const bytes = ethers.utils.arrayify(hexBytes) + for (let idx = 4; idx < 15; idx++) { + if (bytes[idx] != 0) { + return Trigger.CONDITION + } + } + return bytes[15] as Trigger +} + +const encodeConfig = (onchainConfig: OnChainConfig) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_onChainConfig', [onchainConfig]) + .slice(10) + ) +} + +const encodeBlockTrigger = (conditionalTrigger: ConditionalTrigger) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_conditionalTrigger', [conditionalTrigger]) + .slice(10) + ) +} + +const encodeLogTrigger = (logTrigger: LogTrigger) => { + return ( + '0x' + + automationUtils.interface + .encodeFunctionData('_logTrigger', [logTrigger]) + .slice(10) + ) +} + +const encodeLog = (log: Log) => { + return ( + '0x' + automationUtils.interface.encodeFunctionData('_log', [log]).slice(10) + ) +} + +const encodeReport = (report: Report) => { + return ( + '0x' + + automationUtils.interface.encodeFunctionData('_report', [report]).slice(10) + ) +} + +type UpkeepData = { + Id: BigNumberish + performGas: BigNumberish + performData: BytesLike + trigger: BytesLike +} + +const makeReport = (upkeeps: UpkeepData[]) => { + const upkeepIds = upkeeps.map((u) => u.Id) + const performGases = upkeeps.map((u) => u.performGas) + const triggers = upkeeps.map((u) => u.trigger) + const performDatas = upkeeps.map((u) => u.performData) + return encodeReport({ + fastGasWei: gasWei, + linkNative: linkEth, + upkeepIds, + gasLimits: performGases, + triggers, + performDatas, + }) +} + +const makeLatestBlockReport = async (upkeepsIDs: BigNumberish[]) => { + const latestBlock = await ethers.provider.getBlock('latest') + const upkeeps: UpkeepData[] = [] + for (let i = 0; i < upkeepsIDs.length; i++) { + upkeeps.push({ + Id: upkeepsIDs[i], + performGas, + trigger: encodeBlockTrigger({ + blockNum: latestBlock.number, + blockHash: latestBlock.hash, + }), + performData: '0x', + }) + } + return makeReport(upkeeps) +} + +const signReport = ( + reportContext: string[], + report: any, + signers: Wallet[], +) => { + const reportDigest = ethers.utils.keccak256(report) + const packedArgs = ethers.utils.solidityPack( + ['bytes32', 'bytes32[3]'], + [reportDigest, reportContext], + ) + const packedDigest = ethers.utils.keccak256(packedArgs) + + const signatures = [] + for (const signer of signers) { + signatures.push(signer._signingKey().signDigest(packedDigest)) + } + const vs = signatures.map((i) => '0' + (i.v - 27).toString(16)).join('') + return { + vs: '0x' + vs.padEnd(64, '0'), + rs: signatures.map((i) => i.r), + ss: signatures.map((i) => i.s), + } +} + +const parseUpkeepPerformedLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events[ + 'UpkeepPerformed(uint256,bool,uint96,uint256,uint256,bytes)' + ].name + ) { + parsedLogs.push(log as unknown as UpkeepPerformedEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseReorgedUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['ReorgedUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as ReorgedUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseStaleUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['StaleUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as StaleUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseInsufficientFundsUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events[ + 'InsufficientFundsUpkeepReport(uint256,bytes)' + ].name + ) { + parsedLogs.push(log as unknown as InsufficientFundsUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +const parseCancelledUpkeepReportLogs = (receipt: ContractReceipt) => { + const parsedLogs = [] + for (const rawLog of receipt.logs) { + try { + const log = registry.interface.parseLog(rawLog) + if ( + log.name == + registry.interface.events['CancelledUpkeepReport(uint256,bytes)'].name + ) { + parsedLogs.push(log as unknown as CancelledUpkeepReportEvent) + } + } catch { + continue + } + } + return parsedLogs +} + +describe('KeeperRegistry2_1', () => { + let owner: Signer + let keeper1: Signer + let keeper2: Signer + let keeper3: Signer + let keeper4: Signer + let keeper5: Signer + let nonkeeper: Signer + let signer1: Wallet + let signer2: Wallet + let signer3: Wallet + let signer4: Wallet + let signer5: Wallet + let admin: Signer + let payee1: Signer + let payee2: Signer + let payee3: Signer + let payee4: Signer + let payee5: Signer + + let upkeepId: BigNumber // conditional upkeep + let afUpkeepId: BigNumber // auto funding upkeep + let logUpkeepId: BigNumber // log trigger upkeepID + let streamsLookupUpkeepId: BigNumber // streams lookup upkeep + const numUpkeeps = 4 // see above + let keeperAddresses: string[] + let payees: string[] + let signers: Wallet[] + let signerAddresses: string[] + let config: any + let baseConfig: Parameters + let upkeepManager: string + + before(async () => { + personas = (await getUsers()).personas + + const utilsFactory = await ethers.getContractFactory('AutomationUtils2_1') + automationUtils = await utilsFactory.deploy() + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + upkeepAutoFunderFactory = + await ethers.getContractFactory('UpkeepAutoFunder') + mockArbGasInfoFactory = await ethers.getContractFactory('MockArbGasInfo') + mockOVMGasPriceOracleFactory = await ethers.getContractFactory( + 'MockOVMGasPriceOracle', + ) + streamsLookupUpkeepFactory = await ethers.getContractFactory( + 'StreamsLookupUpkeep', + ) + + owner = personas.Default + keeper1 = personas.Carol + keeper2 = personas.Eddy + keeper3 = personas.Nancy + keeper4 = personas.Norbert + keeper5 = personas.Nick + nonkeeper = personas.Ned + admin = personas.Neil + payee1 = personas.Nelly + payee2 = personas.Norbert + payee3 = personas.Nick + payee4 = personas.Eddy + payee5 = personas.Carol + upkeepManager = await personas.Norbert.getAddress() + // signers + signer1 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000001', + ) + signer2 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000002', + ) + signer3 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000003', + ) + signer4 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000004', + ) + signer5 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000005', + ) + + keeperAddresses = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + await keeper4.getAddress(), + await keeper5.getAddress(), + ] + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + await payee5.getAddress(), + ] + signers = [signer1, signer2, signer3, signer4, signer5] + + // We append 26 random addresses to keepers, payees and signers to get a system of 31 oracles + // This allows f value of 1 - 10 + for (let i = 0; i < 26; i++) { + keeperAddresses.push(randomAddress()) + payees.push(randomAddress()) + signers.push(ethers.Wallet.createRandom()) + } + signerAddresses = [] + for (const signer of signers) { + signerAddresses.push(await signer.getAddress()) + } + + logTriggerConfig = + '0x' + + automationUtils.interface + .encodeFunctionData('_logTriggerConfig', [ + { + contractAddress: randomAddress(), + filterSelector: 0, + topic0: ethers.utils.randomBytes(32), + topic1: ethers.utils.randomBytes(32), + topic2: ethers.utils.randomBytes(32), + topic3: ethers.utils.randomBytes(32), + }, + ]) + .slice(10) + }) + + const linkForGas = ( + upkeepGasSpent: BigNumber, + gasOverhead: BigNumber, + gasMultiplier: BigNumber, + premiumPPB: BigNumber, + flatFee: BigNumber, + l1CostWei?: BigNumber, + numUpkeepsBatch?: BigNumber, + ) => { + l1CostWei = l1CostWei === undefined ? BigNumber.from(0) : l1CostWei + numUpkeepsBatch = + numUpkeepsBatch === undefined ? BigNumber.from(1) : numUpkeepsBatch + + const gasSpent = gasOverhead.add(BigNumber.from(upkeepGasSpent)) + const base = gasWei + .mul(gasMultiplier) + .mul(gasSpent) + .mul(linkDivisibility) + .div(linkEth) + const l1Fee = l1CostWei + .mul(gasMultiplier) + .div(numUpkeepsBatch) + .mul(linkDivisibility) + .div(linkEth) + const gasPayment = base.add(l1Fee) + + const premium = gasWei + .mul(gasMultiplier) + .mul(upkeepGasSpent) + .add(l1CostWei.mul(gasMultiplier).div(numUpkeepsBatch)) + .mul(linkDivisibility) + .div(linkEth) + .mul(premiumPPB) + .div(paymentPremiumBase) + .add(BigNumber.from(flatFee).mul('1000000000000')) + + return { + total: gasPayment.add(premium), + gasPaymemnt: gasPayment, + premium, + } + } + + const verifyMaxPayment = async ( + registry: IKeeperRegistry, + l1CostWei?: BigNumber, + ) => { + type TestCase = { + name: string + multiplier: number + gas: number + premium: number + flatFee: number + } + + const tests: TestCase[] = [ + { + name: 'no fees', + multiplier: 1, + gas: 100000, + premium: 0, + flatFee: 0, + }, + { + name: 'basic fees', + multiplier: 1, + gas: 100000, + premium: 250000000, + flatFee: 1000000, + }, + { + name: 'max fees', + multiplier: 3, + gas: 10000000, + premium: 250000000, + flatFee: 1000000, + }, + ] + + const fPlusOne = BigNumber.from(f + 1) + const totalConditionalOverhead = registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(fPlusOne)) + .add(registryPerPerformByteGasOverhead.mul(maxPerformDataSize)) + const totalLogOverhead = registryLogOverhead + .add(registryPerSignerGasOverhead.mul(fPlusOne)) + .add(registryPerPerformByteGasOverhead.mul(maxPerformDataSize)) + + for (const test of tests) { + await registry.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig({ + paymentPremiumPPB: test.premium, + flatFeeMicroLink: test.flatFee, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier: test.multiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + }), + offchainVersion, + offchainBytes, + ) + + const conditionalPrice = await registry.getMaxPaymentForGas( + Trigger.CONDITION, + test.gas, + ) + expect(conditionalPrice).to.equal( + linkForGas( + BigNumber.from(test.gas), + totalConditionalOverhead, + BigNumber.from(test.multiplier), + BigNumber.from(test.premium), + BigNumber.from(test.flatFee), + l1CostWei, + ).total, + ) + + const logPrice = await registry.getMaxPaymentForGas(Trigger.LOG, test.gas) + expect(logPrice).to.equal( + linkForGas( + BigNumber.from(test.gas), + totalLogOverhead, + BigNumber.from(test.multiplier), + BigNumber.from(test.premium), + BigNumber.from(test.flatFee), + l1CostWei, + ).total, + ) + } + } + + const verifyConsistentAccounting = async ( + maxAllowedSpareChange: BigNumber, + ) => { + const expectedLinkBalance = (await registry.getState()).state + .expectedLinkBalance + const linkTokenBalance = await linkToken.balanceOf(registry.address) + const upkeepIdBalance = (await registry.getUpkeep(upkeepId)).balance + let totalKeeperBalance = BigNumber.from(0) + for (let i = 0; i < keeperAddresses.length; i++) { + totalKeeperBalance = totalKeeperBalance.add( + (await registry.getTransmitterInfo(keeperAddresses[i])).balance, + ) + } + const ownerBalance = (await registry.getState()).state.ownerLinkBalance + assert.isTrue(expectedLinkBalance.eq(linkTokenBalance)) + assert.isTrue( + upkeepIdBalance + .add(totalKeeperBalance) + .add(ownerBalance) + .lte(expectedLinkBalance), + ) + assert.isTrue( + expectedLinkBalance + .sub(upkeepIdBalance) + .sub(totalKeeperBalance) + .sub(ownerBalance) + .lte(maxAllowedSpareChange), + ) + } + + interface GetTransmitTXOptions { + numSigners?: number + startingSignerIndex?: number + gasLimit?: BigNumberish + gasPrice?: BigNumberish + performGas?: BigNumberish + performData?: string + checkBlockNum?: number + checkBlockHash?: string + logBlockHash?: BytesLike + txHash?: BytesLike + logIndex?: number + timestamp?: number + } + + const getTransmitTx = async ( + registry: IKeeperRegistry, + transmitter: Signer, + upkeepIds: BigNumber[], + overrides: GetTransmitTXOptions = {}, + ) => { + const latestBlock = await ethers.provider.getBlock('latest') + const configDigest = (await registry.getState()).state.latestConfigDigest + const config = { + numSigners: f + 1, + startingSignerIndex: 0, + performData: '0x', + performGas, + checkBlockNum: latestBlock.number, + checkBlockHash: latestBlock.hash, + logIndex: 0, + txHash: undefined, // assigned uniquely below + logBlockHash: undefined, // assigned uniquely below + timestamp: now(), + gasLimit: undefined, + gasPrice: undefined, + } + Object.assign(config, overrides) + const upkeeps: UpkeepData[] = [] + for (let i = 0; i < upkeepIds.length; i++) { + let trigger: string + switch (getTriggerType(upkeepIds[i])) { + case Trigger.CONDITION: + trigger = encodeBlockTrigger({ + blockNum: config.checkBlockNum, + blockHash: config.checkBlockHash, + }) + break + case Trigger.LOG: + trigger = encodeLogTrigger({ + logBlockHash: config.logBlockHash || ethers.utils.randomBytes(32), + txHash: config.txHash || ethers.utils.randomBytes(32), + logIndex: config.logIndex, + blockNum: config.checkBlockNum, + blockHash: config.checkBlockHash, + }) + break + } + upkeeps.push({ + Id: upkeepIds[i], + performGas: config.performGas, + trigger, + performData: config.performData, + }) + } + + const report = makeReport(upkeeps) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport( + reportContext, + report, + signers.slice( + config.startingSignerIndex, + config.startingSignerIndex + config.numSigners, + ), + ) + + type txOverride = { + gasLimit?: BigNumberish | Promise + gasPrice?: BigNumberish | Promise + } + const txOverrides: txOverride = {} + if (config.gasLimit) { + txOverrides.gasLimit = config.gasLimit + } + if (config.gasPrice) { + txOverrides.gasPrice = config.gasPrice + } + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + txOverrides, + ) + } + + const getTransmitTxWithReport = async ( + registry: IKeeperRegistry, + transmitter: Signer, + report: BytesLike, + ) => { + const configDigest = (await registry.getState()).state.latestConfigDigest + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] + const sigs = signReport(reportContext, report, signers.slice(0, f + 1)) + + return registry + .connect(transmitter) + .transmit( + [configDigest, epochAndRound5_1, emptyBytes32], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ) + } + + const setup = async () => { + linkToken = await linkTokenFactory.connect(owner).deploy() + gasPriceFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory + .connect(owner) + .deploy(9, linkEth) + const upkeepTranscoderFactory = await ethers.getContractFactory( + 'UpkeepTranscoder4_0', + ) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + mockArbGasInfo = await mockArbGasInfoFactory.connect(owner).deploy() + mockOVMGasPriceOracle = await mockOVMGasPriceOracleFactory + .connect(owner) + .deploy() + streamsLookupUpkeep = await streamsLookupUpkeepFactory + .connect(owner) + .deploy( + BigNumber.from('10000'), + BigNumber.from('100'), + false /* useArbBlock */, + true /* staging */, + false /* verify mercury response */, + ) + + const arbOracleCode = await ethers.provider.send('eth_getCode', [ + mockArbGasInfo.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x000000000000000000000000000000000000006C', + arbOracleCode, + ]) + + const optOracleCode = await ethers.provider.send('eth_getCode', [ + mockOVMGasPriceOracle.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x420000000000000000000000000000000000000F', + optOracleCode, + ]) + + const mockArbSys = await new MockArbSysFactory(owner).deploy() + const arbSysCode = await ethers.provider.send('eth_getCode', [ + mockArbSys.address, + ]) + await ethers.provider.send('hardhat_setCode', [ + '0x0000000000000000000000000000000000000064', + arbSysCode, + ]) + + config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + } + + baseConfig = [ + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ] + + registry = await deployRegistry21( + owner, + Mode.DEFAULT, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + arbRegistry = await deployRegistry21( + owner, + Mode.ARBITRUM, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + opRegistry = await deployRegistry21( + owner, + Mode.OPTIMISM, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + mgRegistry = await deployRegistry21( + owner, + Mode.DEFAULT, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + blankRegistry = await deployRegistry21( + owner, + Mode.DEFAULT, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + registryConditionalOverhead = await registry.getConditionalGasOverhead() + registryLogOverhead = await registry.getLogGasOverhead() + registryPerSignerGasOverhead = await registry.getPerSignerGasOverhead() + registryPerPerformByteGasOverhead = + await registry.getPerPerformByteGasOverhead() + cancellationDelay = (await registry.getCancellationDelay()).toNumber() + + for (const reg of [registry, arbRegistry, opRegistry, mgRegistry]) { + await reg.connect(owner).setConfig(...baseConfig) + await reg.connect(owner).setPayees(payees) + await linkToken.connect(admin).approve(reg.address, toWei('1000')) + await linkToken.connect(owner).approve(reg.address, toWei('1000')) + } + + mock = await upkeepMockFactory.deploy() + await linkToken + .connect(owner) + .transfer(await admin.getAddress(), toWei('1000')) + let tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + upkeepId = await getUpkeepID(tx) + + autoFunderUpkeep = await upkeepAutoFunderFactory + .connect(owner) + .deploy(linkToken.address, registry.address) + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](autoFunderUpkeep.address, performGas, autoFunderUpkeep.address, randomBytes, '0x') + afUpkeepId = await getUpkeepID(tx) + + ltUpkeep = await deployMockContract(owner, ILogAutomationactory.abi) + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,uint8,bytes,bytes,bytes)' + ](ltUpkeep.address, performGas, await admin.getAddress(), Trigger.LOG, '0x', logTriggerConfig, emptyBytes) + logUpkeepId = await getUpkeepID(tx) + + await autoFunderUpkeep.setUpkeepId(afUpkeepId) + // Give enough funds for upkeep as well as to the upkeep contract + await linkToken + .connect(owner) + .transfer(autoFunderUpkeep.address, toWei('1000')) + + tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](streamsLookupUpkeep.address, performGas, await admin.getAddress(), randomBytes, '0x') + streamsLookupUpkeepId = await getUpkeepID(tx) + } + + const getMultipleUpkeepsDeployedAndFunded = async ( + numPassingConditionalUpkeeps: number, + numPassingLogUpkeeps: number, + numFailingUpkeeps: number, + ) => { + const passingConditionalUpkeepIds = [] + const passingLogUpkeepIds = [] + const failingUpkeepIds = [] + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const condUpkeepId = await getUpkeepID(tx) + passingConditionalUpkeepIds.push(condUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(admin).addFunds(condUpkeepId, toWei('100')) + } + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,uint8,bytes,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), Trigger.LOG, '0x', logTriggerConfig, emptyBytes) + const logUpkeepId = await getUpkeepID(tx) + passingLogUpkeepIds.push(logUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(admin).addFunds(logUpkeepId, toWei('100')) + } + for (let i = 0; i < numFailingUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(BigNumber.from('0')) + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const failingUpkeepId = await getUpkeepID(tx) + failingUpkeepIds.push(failingUpkeepId) + } + return { + passingConditionalUpkeepIds, + passingLogUpkeepIds, + failingUpkeepIds, + } + } + + beforeEach(async () => { + await loadFixture(setup) + }) + + describe('#transmit', () => { + const fArray = [1, 5, 10] + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId]), + 'RegistryPaused()', + ) + }) + + it('reverts when called by non active transmitter', async () => { + await evmRevert( + getTransmitTx(registry, payee1, [upkeepId]), + 'OnlyActiveTransmitters()', + ) + }) + + it('reverts when report data lengths mismatches', async () => { + const upkeepIds = [] + const gasLimits: BigNumber[] = [] + const triggers: string[] = [] + const performDatas = [] + + upkeepIds.push(upkeepId) + gasLimits.push(performGas) + triggers.push('0x') + performDatas.push('0x') + // Push an extra perform data + performDatas.push('0x') + + const report = encodeReport({ + fastGasWei: 0, + linkNative: 0, + upkeepIds, + gasLimits, + triggers, + performDatas, + }) + + await evmRevert( + getTransmitTxWithReport(registry, keeper1, report), + 'InvalidReport()', + ) + }) + + it('returns early when invalid upkeepIds are included in report', async () => { + const tx = await getTransmitTx(registry, keeper1, [ + upkeepId.add(BigNumber.from('1')), + ]) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('returns early when upkeep has insufficient funds', async () => { + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + const receipt = await tx.wait() + const insufficientFundsUpkeepReportLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly 1 InsufficientFundsUpkeepReportLogs log should be emitted + assert.equal(insufficientFundsUpkeepReportLogs.length, 1) + }) + + it('permits retrying log triggers after funds are added', async () => { + const txHash = ethers.utils.randomBytes(32) + let tx = await getTransmitTx(registry, keeper1, [logUpkeepId], { + txHash, + logIndex: 0, + }) + let receipt = await tx.wait() + const insufficientFundsLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + assert.equal(insufficientFundsLogs.length, 1) + registry.connect(admin).addFunds(logUpkeepId, toWei('100')) + tx = await getTransmitTx(registry, keeper1, [logUpkeepId], { + txHash, + logIndex: 0, + }) + receipt = await tx.wait() + const performedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal(performedLogs.length, 1) + }) + + context('When the upkeep is funded', async () => { + beforeEach(async () => { + // Fund the upkeep + await Promise.all([ + registry.connect(admin).addFunds(upkeepId, toWei('100')), + registry.connect(admin).addFunds(logUpkeepId, toWei('100')), + ]) + }) + + it('handles duplicate upkeepIDs', async () => { + const tests: [string, BigNumber, number, number][] = [ + // [name, upkeep, num stale, num performed] + ['conditional', upkeepId, 1, 1], // checkBlocks must be sequential + ['log-trigger', logUpkeepId, 0, 2], // logs are deduped based on the "trigger ID" + ] + for (const [type, id, nStale, nPerformed] of tests) { + const tx = await getTransmitTx(registry, keeper1, [id, id]) + const receipt = await tx.wait() + const staleUpkeepReport = parseStaleUpkeepReportLogs(receipt) + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + staleUpkeepReport.length, + nStale, + `wrong log count for ${type} upkeep`, + ) + assert.equal( + upkeepPerformedLogs.length, + nPerformed, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('handles duplicate log triggers', async () => { + const logBlockHash = ethers.utils.randomBytes(32) + const txHash = ethers.utils.randomBytes(32) + const logIndex = 0 + const expectedDedupKey = ethers.utils.solidityKeccak256( + ['uint256', 'bytes32', 'bytes32', 'uint32'], + [logUpkeepId, logBlockHash, txHash, logIndex], + ) + assert.isFalse(await registry.hasDedupKey(expectedDedupKey)) + const tx = await getTransmitTx( + registry, + keeper1, + [logUpkeepId, logUpkeepId], + { logBlockHash, txHash, logIndex }, // will result in the same dedup key + ) + const receipt = await tx.wait() + const staleUpkeepReport = parseStaleUpkeepReportLogs(receipt) + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal(staleUpkeepReport.length, 1) + assert.equal(upkeepPerformedLogs.length, 1) + assert.isTrue(await registry.hasDedupKey(expectedDedupKey)) + await expect(tx) + .to.emit(registry, 'DedupKeyAdded') + .withArgs(expectedDedupKey) + }) + + it('returns early when check block number is less than last perform (block)', async () => { + // First perform an upkeep to put last perform block number on upkeep state + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + const lastPerformed = (await registry.getUpkeep(upkeepId)) + .lastPerformedBlockNumber + const lastPerformBlock = await ethers.provider.getBlock(lastPerformed) + assert.equal(lastPerformed.toString(), tx.blockNumber?.toString()) + // Try to transmit a report which has checkBlockNumber = lastPerformed-1, should result in stale report + const transmitTx = await getTransmitTx(registry, keeper1, [upkeepId], { + checkBlockNum: lastPerformBlock.number - 1, + checkBlockHash: lastPerformBlock.parentHash, + }) + const receipt = await transmitTx.wait() + const staleUpkeepReportLogs = parseStaleUpkeepReportLogs(receipt) + // exactly 1 StaleUpkeepReportLogs log should be emitted + assert.equal(staleUpkeepReportLogs.length, 1) + }) + + it('handles case when check block hash does not match', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number - 1, + checkBlockHash: latestBlock.hash, // should be latestBlock.parentHash + }) + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('handles case when check block number is older than 256 blocks', async () => { + for (let i = 0; i < 256; i++) { + await ethers.provider.send('evm_mine', []) + } + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + const old = await ethers.provider.getBlock(latestBlock.number - 256) + // Try to transmit a report which has incorrect checkBlockHash + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: old.number, + checkBlockHash: old.hash, + }) + + const receipt = await tx.wait() + const reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows bypassing reorg protection with empty blockhash', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number, + checkBlockHash: emptyBytes32, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('allows very old trigger block numbers when bypassing reorg protection with empty blockhash', async () => { + // mine enough blocks so that blockhash(1) is unavailable + for (let i = 0; i <= 256; i++) { + await ethers.provider.send('evm_mine', []) + } + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: 1, + checkBlockHash: emptyBytes32, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + assert.equal( + upkeepPerformedLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('returns early when future block number is provided as trigger, irrespective of blockhash being present', async () => { + const tests: [string, BigNumber][] = [ + ['conditional', upkeepId], + ['log-trigger', logUpkeepId], + ] + for (const [type, id] of tests) { + const latestBlock = await ethers.provider.getBlock('latest') + + // Should fail when blockhash is empty + let tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: emptyBytes32, + }) + let receipt = await tx.wait() + let reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + + // Should also fail when blockhash is not empty + tx = await getTransmitTx(registry, keeper1, [id], { + checkBlockNum: latestBlock.number + 100, + checkBlockHash: latestBlock.hash, + }) + receipt = await tx.wait() + reorgedUpkeepReportLogs = parseReorgedUpkeepReportLogs(receipt) + // exactly 1 ReorgedUpkeepReportLogs log should be emitted + assert.equal( + reorgedUpkeepReportLogs.length, + 1, + `wrong log count for ${type} upkeep`, + ) + } + }) + + it('returns early when upkeep is cancelled and cancellation delay has gone', async () => { + const latestBlockReport = await makeLatestBlockReport([upkeepId]) + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTxWithReport( + registry, + keeper1, + latestBlockReport, + ) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if the target cannot execute', async () => { + await mock.setCanPerform(false) + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const success = upkeepPerformedLog.args.success + assert.equal(success, false) + }) + + it('does not revert if the target runs out of gas', async () => { + await mock.setCanPerform(false) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + performGas: 10, // too little gas + }) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const success = upkeepPerformedLog.args.success + assert.equal(success, false) + }) + + it('reverts if not enough gas supplied', async () => { + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId], { + gasLimit: performGas, + }), + ) + }) + + it('executes the data passed to the registry', async () => { + await mock.setCanPerform(true) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + performData: randomBytes, + }) + const receipt = await tx.wait() + + const upkeepPerformedWithABI = [ + 'event UpkeepPerformedWith(bytes upkeepData)', + ] + const iface = new ethers.utils.Interface(upkeepPerformedWithABI) + const parsedLogs = [] + for (let i = 0; i < receipt.logs.length; i++) { + const log = receipt.logs[i] + try { + parsedLogs.push(iface.parseLog(log)) + } catch (e) { + // ignore log + } + } + assert.equal(parsedLogs.length, 1) + assert.equal(parsedLogs[0].args.upkeepData, randomBytes) + }) + + it('uses actual execution price for payment and premium calculation', async () => { + // Actual multiplier is 2, but we set gasPrice to be 1x gasWei + const gasPrice = gasWei.mul(BigNumber.from('1')) + await mock.setCanPerform(true) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + gasPrice, + }) + const receipt = await tx.wait() + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + BigNumber.from('1'), // Not the config multiplier, but the actual gas used + paymentPremiumPPB, + flatFeeMicroLink, + ).premium.toString(), + premium.toString(), + ) + }) + + it('only pays at a rate up to the gas ceiling [ @skip-coverage ]', async () => { + // Actual multiplier is 2, but we set gasPrice to be 10x + const gasPrice = gasWei.mul(BigNumber.from('10')) + await mock.setCanPerform(true) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + gasPrice, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, // Should be same with exisitng multiplier + paymentPremiumPPB, + flatFeeMicroLink, + ).total.toString(), + totalPayment.toString(), + ) + }) + + it('correctly accounts for l payment', async () => { + await mock.setCanPerform(true) + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + let tx = await arbRegistry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + await arbRegistry.connect(owner).addFunds(testUpkeepId, toWei('100')) + + // Do the thing + tx = await getTransmitTx( + arbRegistry, + keeper1, + [testUpkeepId], + + { gasPrice: gasWei.mul('5') }, // High gas price so that it gets capped + ) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb.div(gasCeilingMultiplier), // Dividing by gasCeilingMultiplier as it gets multiplied later + ).total.toString(), + totalPayment.toString(), + ) + }) + + itMaybe('can self fund', async () => { + const maxPayment = await registry.getMaxPaymentForGas( + Trigger.CONDITION, + performGas, + ) + + // First set auto funding amount to 0 and verify that balance is deducted upon performUpkeep + let initialBalance = toWei('100') + await registry.connect(owner).addFunds(afUpkeepId, initialBalance) + await autoFunderUpkeep.setAutoFundLink(0) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + let postUpkeepBalance = (await registry.getUpkeep(afUpkeepId)).balance + assert.isTrue(postUpkeepBalance.lt(initialBalance)) // Balance should be deducted + assert.isTrue(postUpkeepBalance.gte(initialBalance.sub(maxPayment))) // Balance should not be deducted more than maxPayment + + // Now set auto funding amount to 100 wei and verify that the balance increases + initialBalance = postUpkeepBalance + const autoTopupAmount = toWei('100') + await autoFunderUpkeep.setAutoFundLink(autoTopupAmount) + await autoFunderUpkeep.setIsEligible(true) + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + postUpkeepBalance = (await registry.getUpkeep(afUpkeepId)).balance + // Balance should increase by autoTopupAmount and decrease by max maxPayment + assert.isTrue( + postUpkeepBalance.gte( + initialBalance.add(autoTopupAmount).sub(maxPayment), + ), + ) + }) + + it('can self cancel', async () => { + await registry.connect(owner).addFunds(afUpkeepId, toWei('100')) + + await autoFunderUpkeep.setIsEligible(true) + await autoFunderUpkeep.setShouldCancel(true) + + let registration = await registry.getUpkeep(afUpkeepId) + const oldExpiration = registration.maxValidBlocknumber + + // Do the thing + await getTransmitTx(registry, keeper1, [afUpkeepId]) + + // Verify upkeep gets cancelled + registration = await registry.getUpkeep(afUpkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + + it('reverts when configDigest mismatches', async () => { + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [emptyBytes32, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 1)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'ConfigDigestMismatch()', + ) + }) + + it('reverts with incorrect number of signatures', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, signers.slice(0, f + 2)) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'IncorrectNumberOfSignatures()', + ) + }) + + it('reverts with invalid signature for inactive signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [ + new ethers.Wallet(ethers.Wallet.createRandom()), + new ethers.Wallet(ethers.Wallet.createRandom()), + ]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'OnlyActiveSigners()', + ) + }) + + it('reverts with invalid signature for duplicated signers', async () => { + const configDigest = (await registry.getState()).state + .latestConfigDigest + const report = await makeLatestBlockReport([upkeepId]) + const reportContext = [configDigest, epochAndRound5_1, emptyBytes32] // wrong config digest + const sigs = signReport(reportContext, report, [signer1, signer1]) + await evmRevert( + registry + .connect(keeper1) + .transmit( + [reportContext[0], reportContext[1], reportContext[2]], + report, + sigs.rs, + sigs.ss, + sigs.vs, + ), + 'DuplicateSigners()', + ) + }) + + itMaybe( + 'has a large enough gas overhead to cover upkeep that use all its gas [ @skip-coverage ]', + async () => { + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + 10, // maximise f to maximise overhead + config, + offchainVersion, + offchainBytes, + ) + const tx = await registry + .connect(owner) + ['registerUpkeep(address,uint32,address,bytes,bytes)']( + mock.address, + maxPerformGas, // max allowed gas + await admin.getAddress(), + randomBytes, + '0x', + ) + const testUpkeepId = await getUpkeepID(tx) + await registry.connect(admin).addFunds(testUpkeepId, toWei('100')) + + let performData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + performData += '11' + } // max allowed performData + + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(maxPerformGas) + + await getTransmitTx(registry, keeper1, [testUpkeepId], { + gasLimit: maxPerformGas.add(transmitGasOverhead), + numSigners: 11, + performData, + }) // Should not revert + }, + ) + + itMaybe( + 'performs upkeep, deducts payment, updates lastPerformed and emits events', + async () => { + await mock.setCanPerform(true) + + for (const i in fArray) { + const newF = fArray[i] + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + const checkBlock = await ethers.provider.getBlock('latest') + + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = await registry.getUpkeep(upkeepId) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf( + registry.address, + ) + + // Do the thing + const tx = await getTransmitTx(registry, keeper1, [upkeepId], { + checkBlockNum: checkBlock.number, + checkBlockHash: checkBlock.hash, + numSigners: newF + 1, + }) + + const receipt = await tx.wait() + + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const id = upkeepPerformedLog.args.id + const success = upkeepPerformedLog.args.success + const trigger = upkeepPerformedLog.args.trigger + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + assert.equal(id.toString(), upkeepId.toString()) + assert.equal(success, true) + assert.equal( + trigger, + encodeBlockTrigger({ + blockNum: checkBlock.number, + blockHash: checkBlock.hash, + }), + ) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = await registry.getUpkeep(upkeepId) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf( + registry.address, + ) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = totalPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + assert.equal( + registrationBefore.balance.sub(totalPayment).toString(), + registrationAfter.balance.toString(), + ) + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + + // Amount spent should be updated correctly + assert.equal( + registrationAfter.amountSpent.sub(totalPayment).toString(), + registrationBefore.amountSpent.toString(), + ) + assert.isTrue( + registrationAfter.amountSpent + .sub(registrationBefore.amountSpent) + .eq(registrationBefore.balance.sub(registrationAfter.balance)), + ) + // Last perform block number should be updated + assert.equal( + registrationAfter.lastPerformedBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + // Latest epoch should be 5 + assert.equal((await registry.getState()).state.latestEpoch, 5) + } + }, + ) + + describeMaybe( + 'Gas benchmarking conditional upkeeps [ @skip-coverage ]', + function () { + const fs = [1, 10] + fs.forEach(function (newF) { + it( + 'When f=' + + newF + + ' calculates gas overhead appropriately within a margin for different scenarios', + async () => { + // Perform the upkeep once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + + // Different test scenarios + let longBytes = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + longBytes += '11' + } + const upkeepSuccessArray = [true, false] + const performGasArray = [5000, performGas] + const performDataArray = ['0x', longBytes] + + for (const i in upkeepSuccessArray) { + for (const j in performGasArray) { + for (const k in performDataArray) { + const upkeepSuccess = upkeepSuccessArray[i] + const performGas = performGasArray[j] + const performData = performDataArray[k] + + await mock.setCanPerform(upkeepSuccess) + await mock.setPerformGasToBurn(performGas) + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + tx = await getTransmitTx(registry, keeper1, [upkeepId], { + numSigners: newF + 1, + performData, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = + parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const upkeepGasUsed = upkeepPerformedLog.args.gasUsed + const chargedGasOverhead = + upkeepPerformedLog.args.gasOverhead + const actualGasOverhead = + receipt.gasUsed.sub(upkeepGasUsed) + + assert.isTrue(upkeepGasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + + console.log( + 'Gas Benchmarking conditional upkeeps:', + 'upkeepSuccess=', + upkeepSuccess, + 'performGas=', + performGas.toString(), + 'performData length=', + performData.length / 2 - 1, + 'sig verification ( f =', + newF, + '): calculated overhead: ', + chargedGasOverhead.toString(), + ' actual overhead: ', + actualGasOverhead.toString(), + ' margin over gasUsed: ', + chargedGasOverhead.sub(actualGasOverhead).toString(), + ) + + // Overhead should not get capped + const gasOverheadCap = registryConditionalOverhead + .add( + registryPerSignerGasOverhead.mul( + BigNumber.from(newF + 1), + ), + ) + .add( + BigNumber.from( + registryPerPerformByteGasOverhead.toNumber() * + performData.length, + ), + ) + const gasCapMinusOverhead = + gasOverheadCap.sub(chargedGasOverhead) + assert.isTrue( + gasCapMinusOverhead.gt(BigNumber.from(0)), + 'Gas overhead got capped. Verify gas overhead variables in test match those in the registry. To not have the overheads capped increase REGISTRY_GAS_OVERHEAD by atleast ' + + gasCapMinusOverhead.toString(), + ) + // total gas charged should be greater than tx gas but within gasCalculationMargin + assert.isTrue( + chargedGasOverhead.gt(actualGasOverhead), + 'Gas overhead calculated is too low, increase account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + actualGasOverhead.sub(chargedGasOverhead).toString(), + ) + + assert.isTrue( + chargedGasOverhead + .sub(actualGasOverhead) + .lt(gasCalculationMargin), + ), + 'Gas overhead calculated is too high, decrease account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + chargedGasOverhead + .sub(chargedGasOverhead) + .sub(gasCalculationMargin) + .toString() + } + } + } + }, + ) + }) + }, + ) + + describeMaybe( + 'Gas benchmarking log upkeeps [ @skip-coverage ]', + function () { + const fs = [1, 10] + fs.forEach(function (newF) { + it( + 'When f=' + + newF + + ' calculates gas overhead appropriately within a margin', + async () => { + // Perform the upkeep once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx(registry, keeper1, [logUpkeepId]) + await tx.wait() + const performData = '0x' + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + await registry.setConfigTypeSafe( + signerAddresses, + keeperAddresses, + newF, + config, + offchainVersion, + offchainBytes, + ) + tx = await getTransmitTx(registry, keeper1, [logUpkeepId], { + numSigners: newF + 1, + performData, + }) + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly 1 Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, 1) + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const upkeepGasUsed = upkeepPerformedLog.args.gasUsed + const chargedGasOverhead = upkeepPerformedLog.args.gasOverhead + const actualGasOverhead = receipt.gasUsed.sub(upkeepGasUsed) + + assert.isTrue(upkeepGasUsed.gt(BigNumber.from('0'))) + assert.isTrue(chargedGasOverhead.gt(BigNumber.from('0'))) + + console.log( + 'Gas Benchmarking log upkeeps:', + 'upkeepSuccess=', + true, + 'performGas=', + performGas.toString(), + 'performData length=', + performData.length / 2 - 1, + 'sig verification ( f =', + newF, + '): calculated overhead: ', + chargedGasOverhead.toString(), + ' actual overhead: ', + actualGasOverhead.toString(), + ' margin over gasUsed: ', + chargedGasOverhead.sub(actualGasOverhead).toString(), + ) + + // Overhead should not get capped + const gasOverheadCap = registryLogOverhead + .add( + registryPerSignerGasOverhead.mul(BigNumber.from(newF + 1)), + ) + .add( + BigNumber.from( + registryPerPerformByteGasOverhead.toNumber() * + performData.length, + ), + ) + const gasCapMinusOverhead = + gasOverheadCap.sub(chargedGasOverhead) + assert.isTrue( + gasCapMinusOverhead.gt(BigNumber.from(0)), + 'Gas overhead got capped. Verify gas overhead variables in test match those in the registry. To not have the overheads capped increase REGISTRY_GAS_OVERHEAD by atleast ' + + gasCapMinusOverhead.toString(), + ) + // total gas charged should be greater than tx gas but within gasCalculationMargin + assert.isTrue( + chargedGasOverhead.gt(actualGasOverhead), + 'Gas overhead calculated is too low, increase account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + actualGasOverhead.sub(chargedGasOverhead).toString(), + ) + + assert.isTrue( + chargedGasOverhead + .sub(actualGasOverhead) + .lt(gasCalculationMargin), + ), + 'Gas overhead calculated is too high, decrease account gas variables (ACCOUNTING_FIXED_GAS_OVERHEAD/ACCOUNTING_PER_SIGNER_GAS_OVERHEAD) by atleast ' + + chargedGasOverhead + .sub(chargedGasOverhead) + .sub(gasCalculationMargin) + .toString() + }, + ) + }) + }, + ) + }) + }) + + describeMaybe( + '#transmit with upkeep batches [ @skip-coverage ]', + function () { + const numPassingConditionalUpkeepsArray = [0, 1, 5] + const numPassingLogUpkeepsArray = [0, 1, 5] + const numFailingUpkeepsArray = [0, 3] + + for (let idx = 0; idx < numPassingConditionalUpkeepsArray.length; idx++) { + for (let jdx = 0; jdx < numPassingLogUpkeepsArray.length; jdx++) { + for (let kdx = 0; kdx < numFailingUpkeepsArray.length; kdx++) { + const numPassingConditionalUpkeeps = + numPassingConditionalUpkeepsArray[idx] + const numPassingLogUpkeeps = numPassingLogUpkeepsArray[jdx] + const numFailingUpkeeps = numFailingUpkeepsArray[kdx] + if ( + numPassingConditionalUpkeeps == 0 && + numPassingLogUpkeeps == 0 + ) { + continue + } + it( + '[Conditional:' + + numPassingConditionalUpkeeps + + ',Log:' + + numPassingLogUpkeeps + + ',Failures:' + + numFailingUpkeeps + + '] performs successful upkeeps and does not charge failing upkeeps', + async () => { + const allUpkeeps = await getMultipleUpkeepsDeployedAndFunded( + numPassingConditionalUpkeeps, + numPassingLogUpkeeps, + numFailingUpkeeps, + ) + const passingConditionalUpkeepIds = + allUpkeeps.passingConditionalUpkeepIds + const passingLogUpkeepIds = allUpkeeps.passingLogUpkeepIds + const failingUpkeepIds = allUpkeeps.failingUpkeepIds + + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkBefore = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkBefore = await linkToken.balanceOf( + registry.address, + ) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const registrationConditionalPassingBefore = await Promise.all( + passingConditionalUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + const registrationLogPassingBefore = await Promise.all( + passingLogUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + const registrationFailingBefore = await Promise.all( + failingUpkeepIds.map(async (id) => { + const reg = await registry.getUpkeep(BigNumber.from(id)) + assert.equal(reg.lastPerformedBlockNumber.toString(), '0') + return reg + }), + ) + + const tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal( + upkeepPerformedLogs.length, + numPassingConditionalUpkeeps + numPassingLogUpkeeps, + ) + const insufficientFundsLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly numFailingUpkeeps Upkeep Performed should be emitted + assert.equal(insufficientFundsLogs.length, numFailingUpkeeps) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const keeperLinkAfter = await linkToken.balanceOf( + await keeper1.getAddress(), + ) + const registryLinkAfter = await linkToken.balanceOf( + registry.address, + ) + const registrationConditionalPassingAfter = await Promise.all( + passingConditionalUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registrationLogPassingAfter = await Promise.all( + passingLogUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registrationFailingAfter = await Promise.all( + failingUpkeepIds.map(async (id) => { + return await registry.getUpkeep(BigNumber.from(id)) + }), + ) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const premium = registryPremiumAfter.sub(registryPremiumBefore) + + let netPayment = BigNumber.from('0') + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const id = upkeepPerformedLogs[i].args.id + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const gasOverhead = upkeepPerformedLogs[i].args.gasOverhead + const totalPayment = upkeepPerformedLogs[i].args.totalPayment + + expect(id).to.equal(passingConditionalUpkeepIds[i]) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + // Balance should be deducted + assert.equal( + registrationConditionalPassingBefore[i].balance + .sub(totalPayment) + .toString(), + registrationConditionalPassingAfter[i].balance.toString(), + ) + + // Amount spent should be updated correctly + assert.equal( + registrationConditionalPassingAfter[i].amountSpent + .sub(totalPayment) + .toString(), + registrationConditionalPassingBefore[ + i + ].amountSpent.toString(), + ) + + // Last perform block number should be updated + assert.equal( + registrationConditionalPassingAfter[ + i + ].lastPerformedBlockNumber.toString(), + tx.blockNumber?.toString(), + ) + + netPayment = netPayment.add(totalPayment) + } + + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const id = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .id + const gasUsed = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasUsed + const gasOverhead = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasOverhead + const totalPayment = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .totalPayment + + expect(id).to.equal(passingLogUpkeepIds[i]) + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + assert.isTrue(totalPayment.gt(BigNumber.from('0'))) + + // Balance should be deducted + assert.equal( + registrationLogPassingBefore[i].balance + .sub(totalPayment) + .toString(), + registrationLogPassingAfter[i].balance.toString(), + ) + + // Amount spent should be updated correctly + assert.equal( + registrationLogPassingAfter[i].amountSpent + .sub(totalPayment) + .toString(), + registrationLogPassingBefore[i].amountSpent.toString(), + ) + + // Last perform block number should not be updated for log triggers + assert.equal( + registrationLogPassingAfter[ + i + ].lastPerformedBlockNumber.toString(), + '0', + ) + + netPayment = netPayment.add(totalPayment) + } + + for (let i = 0; i < numFailingUpkeeps; i++) { + // InsufficientFunds log should be emitted + const id = insufficientFundsLogs[i].args.id + expect(id).to.equal(failingUpkeepIds[i]) + + // Balance and amount spent should be same + assert.equal( + registrationFailingBefore[i].balance.toString(), + registrationFailingAfter[i].balance.toString(), + ) + assert.equal( + registrationFailingBefore[i].amountSpent.toString(), + registrationFailingAfter[i].amountSpent.toString(), + ) + + // Last perform block number should not be updated + assert.equal( + registrationFailingAfter[ + i + ].lastPerformedBlockNumber.toString(), + '0', + ) + } + + // Keeper payment is gasPayment + premium / num keepers + const keeperPayment = netPayment + .sub(premium) + .add(premium.div(BigNumber.from(keeperAddresses.length))) + + // Keeper should be paid net payment for all passed upkeeps + assert.equal( + keeperAfter.balance.sub(keeperPayment).toString(), + keeperBefore.balance.toString(), + ) + + assert.isTrue(keeperLinkAfter.eq(keeperLinkBefore)) + assert.isTrue(registryLinkBefore.eq(registryLinkAfter)) + }, + ) + + it( + '[Conditional:' + + numPassingConditionalUpkeeps + + ',Log' + + numPassingLogUpkeeps + + ',Failures:' + + numFailingUpkeeps + + '] splits gas overhead appropriately among performed upkeeps [ @skip-coverage ]', + async () => { + const allUpkeeps = await getMultipleUpkeepsDeployedAndFunded( + numPassingConditionalUpkeeps, + numPassingLogUpkeeps, + numFailingUpkeeps, + ) + const passingConditionalUpkeepIds = + allUpkeeps.passingConditionalUpkeepIds + const passingLogUpkeepIds = allUpkeeps.passingLogUpkeepIds + const failingUpkeepIds = allUpkeeps.failingUpkeepIds + + // Perform the upkeeps once to remove non-zero storage slots and have predictable gas measurement + let tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + await tx.wait() + + // Do the actual thing + + tx = await getTransmitTx( + registry, + keeper1, + passingConditionalUpkeepIds.concat( + passingLogUpkeepIds.concat(failingUpkeepIds), + ), + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal( + upkeepPerformedLogs.length, + numPassingConditionalUpkeeps + numPassingLogUpkeeps, + ) + + const gasConditionalOverheadCap = + registryConditionalOverhead.add( + registryPerSignerGasOverhead.mul(BigNumber.from(f + 1)), + ) + const gasLogOverheadCap = registryLogOverhead.add( + registryPerSignerGasOverhead.mul(BigNumber.from(f + 1)), + ) + + const overheadCanGetCapped = + numFailingUpkeeps > 0 && + numPassingConditionalUpkeeps <= 1 && + numPassingLogUpkeeps <= 1 + // Can happen if there are failing upkeeps and only 1 successful upkeep of each type + let netGasUsedPlusOverhead = BigNumber.from('0') + + for (let i = 0; i < numPassingConditionalUpkeeps; i++) { + const gasUsed = upkeepPerformedLogs[i].args.gasUsed + const gasOverhead = upkeepPerformedLogs[i].args.gasOverhead + + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + + // Overhead should not exceed capped + assert.isTrue(gasOverhead.lte(gasConditionalOverheadCap)) + + // Overhead should be same for every upkeep since they have equal performData, hence same caps + assert.isTrue( + gasOverhead.eq(upkeepPerformedLogs[0].args.gasOverhead), + ) + + netGasUsedPlusOverhead = netGasUsedPlusOverhead + .add(gasUsed) + .add(gasOverhead) + } + for (let i = 0; i < numPassingLogUpkeeps; i++) { + const gasUsed = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasUsed + const gasOverhead = + upkeepPerformedLogs[numPassingConditionalUpkeeps + i].args + .gasOverhead + + assert.isTrue(gasUsed.gt(BigNumber.from('0'))) + assert.isTrue(gasOverhead.gt(BigNumber.from('0'))) + + // Overhead should not exceed capped + assert.isTrue(gasOverhead.lte(gasLogOverheadCap)) + + // Overhead should be same for every upkeep since they have equal performData, hence same caps + assert.isTrue( + gasOverhead.eq( + upkeepPerformedLogs[numPassingConditionalUpkeeps].args + .gasOverhead, + ), + ) + + netGasUsedPlusOverhead = netGasUsedPlusOverhead + .add(gasUsed) + .add(gasOverhead) + } + + const overheadsGotCapped = + (numPassingConditionalUpkeeps > 0 && + upkeepPerformedLogs[0].args.gasOverhead.eq( + gasConditionalOverheadCap, + )) || + (numPassingLogUpkeeps > 0 && + upkeepPerformedLogs[ + numPassingConditionalUpkeeps + ].args.gasOverhead.eq(gasLogOverheadCap)) + // Should only get capped in certain scenarios + if (overheadsGotCapped) { + assert.isTrue( + overheadCanGetCapped, + 'Gas overhead got capped. Verify gas overhead variables in test match those in the registry. To not have the overheads capped increase REGISTRY_GAS_OVERHEAD', + ) + } + + console.log( + 'Gas Benchmarking - batching (passedConditionalUpkeeps: ', + numPassingConditionalUpkeeps, + 'passedLogUpkeeps:', + numPassingLogUpkeeps, + 'failedUpkeeps:', + numFailingUpkeeps, + '): ', + 'overheadsGotCapped', + overheadsGotCapped, + numPassingConditionalUpkeeps > 0 + ? 'calculated conditional overhead' + : '', + numPassingConditionalUpkeeps > 0 + ? upkeepPerformedLogs[0].args.gasOverhead.toString() + : '', + numPassingLogUpkeeps > 0 ? 'calculated log overhead' : '', + numPassingLogUpkeeps > 0 + ? upkeepPerformedLogs[ + numPassingConditionalUpkeeps + ].args.gasOverhead.toString() + : '', + ' margin over gasUsed', + netGasUsedPlusOverhead.sub(receipt.gasUsed).toString(), + ) + + // If overheads dont get capped then total gas charged should be greater than tx gas + // We don't check whether the net is within gasMargin as the margin changes with numFailedUpkeeps + // Which is ok, as long as individual gas overhead is capped + if (!overheadsGotCapped) { + assert.isTrue( + netGasUsedPlusOverhead.gt(receipt.gasUsed), + 'Gas overhead is too low, increase ACCOUNTING_PER_UPKEEP_GAS_OVERHEAD', + ) + } + }, + ) + } + } + } + + it('has enough perform gas overhead for large batches [ @skip-coverage ]', async () => { + const numUpkeeps = 20 + const upkeepIds: BigNumber[] = [] + let totalPerformGas = BigNumber.from('0') + for (let i = 0; i < numUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + upkeepIds.push(testUpkeepId) + + // Add funds to passing upkeeps + await registry.connect(owner).addFunds(testUpkeepId, toWei('10')) + + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + + totalPerformGas = totalPerformGas.add(performGas) + } + + // Should revert with no overhead added + await evmRevert( + getTransmitTx(registry, keeper1, upkeepIds, { + gasLimit: totalPerformGas, + }), + ) + // Should not revert with overhead added + await getTransmitTx(registry, keeper1, upkeepIds, { + gasLimit: totalPerformGas.add(transmitGasOverhead), + }) + }) + + it('splits l2 payment among performed upkeeps', async () => { + const numUpkeeps = 7 + const upkeepIds: BigNumber[] = [] + // Same as MockArbGasInfo.sol + const l1CostWeiArb = BigNumber.from(1000000) + + for (let i = 0; i < numUpkeeps; i++) { + const mock = await upkeepMockFactory.deploy() + const tx = await arbRegistry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const testUpkeepId = await getUpkeepID(tx) + upkeepIds.push(testUpkeepId) + + // Add funds to passing upkeeps + await arbRegistry.connect(owner).addFunds(testUpkeepId, toWei('100')) + } + + // Do the thing + const tx = await getTransmitTx( + arbRegistry, + keeper1, + upkeepIds, + + { gasPrice: gasWei.mul('5') }, // High gas price so that it gets capped + ) + + const receipt = await tx.wait() + const upkeepPerformedLogs = parseUpkeepPerformedLogs(receipt) + // exactly numPassingUpkeeps Upkeep Performed should be emitted + assert.equal(upkeepPerformedLogs.length, numUpkeeps) + + // Verify the payment calculation in upkeepPerformed[0] + const upkeepPerformedLog = upkeepPerformedLogs[0] + + const gasUsed = upkeepPerformedLog.args.gasUsed + const gasOverhead = upkeepPerformedLog.args.gasOverhead + const totalPayment = upkeepPerformedLog.args.totalPayment + + assert.equal( + linkForGas( + gasUsed, + gasOverhead, + gasCeilingMultiplier, + paymentPremiumPPB, + flatFeeMicroLink, + l1CostWeiArb.div(gasCeilingMultiplier), // Dividing by gasCeilingMultiplier as it gets multiplied later + BigNumber.from(numUpkeeps), + ).total.toString(), + totalPayment.toString(), + ) + }) + }, + ) + + describe('#recoverFunds', () => { + const sent = toWei('7') + + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('100')) + await linkToken + .connect(owner) + .transfer(await keeper1.getAddress(), toWei('1000')) + + // add funds to upkeep 1 and perform and withdraw some payment + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, emptyBytes) + + const id1 = await getUpkeepID(tx) + await registry.connect(admin).addFunds(id1, toWei('5')) + + await getTransmitTx(registry, keeper1, [id1]) + await getTransmitTx(registry, keeper2, [id1]) + await getTransmitTx(registry, keeper3, [id1]) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds directly to the registry + await linkToken.connect(keeper1).transfer(registry.address, sent) + + // add funds to upkeep 2 and perform and withdraw some payment + const tx2 = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, emptyBytes) + const id2 = await getUpkeepID(tx2) + await registry.connect(admin).addFunds(id2, toWei('5')) + + await getTransmitTx(registry, keeper1, [id2]) + await getTransmitTx(registry, keeper2, [id2]) + await getTransmitTx(registry, keeper3, [id2]) + + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + + // transfer funds using onTokenTransfer + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [id2]) + await linkToken + .connect(owner) + .transferAndCall(registry.address, toWei('1'), data) + + // withdraw some funds + await registry.connect(owner).cancelUpkeep(id1) + await registry + .connect(admin) + .withdrawFunds(id1, await nonkeeper.getAddress()) + }) + + it('reverts if not called by owner', async () => { + await evmRevert( + registry.connect(keeper1).recoverFunds(), + 'Only callable by owner', + ) + }) + + it('allows any funds that have been accidentally transfered to be moved', async () => { + const balanceBefore = await linkToken.balanceOf(registry.address) + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).recoverFunds() + + const balanceAfter = await linkToken.balanceOf(registry.address) + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + assert.isTrue(balanceBefore.eq(balanceAfter.add(sent))) + assert.isTrue(ownerAfter.eq(ownerBefore.add(sent))) + }) + }) + + describe('#getMinBalanceForUpkeep / #checkUpkeep / #transmit', () => { + it('calculates the minimum balance appropriately', async () => { + await mock.setCanCheck(true) + + const oneWei = BigNumber.from(1) + const minBalance = await registry.getMinBalanceForUpkeep(upkeepId) + const tooLow = minBalance.sub(oneWei) + + await registry.connect(admin).addFunds(upkeepId, tooLow) + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + await registry.connect(admin).addFunds(upkeepId, oneWei) + checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }) + + it('uses maxPerformData size in checkUpkeep but actual performDataSize in transmit', async () => { + const tx1 = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const upkeepID1 = await getUpkeepID(tx1) + const tx2 = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + const upkeepID2 = await getUpkeepID(tx2) + await mock.setCanCheck(true) + await mock.setCanPerform(true) + + // upkeep 1 is underfunded, 2 is fully funded + const minBalance1 = ( + await registry.getMinBalanceForUpkeep(upkeepID1) + ).sub(1) + const minBalance2 = await registry.getMinBalanceForUpkeep(upkeepID2) + await registry.connect(owner).addFunds(upkeepID1, minBalance1) + await registry.connect(owner).addFunds(upkeepID2, minBalance2) + + // upkeep 1 check should return false, 2 should return true + let checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepID1) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + + checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepID2) + assert.equal(checkUpkeepResult.upkeepNeeded, true) + + // upkeep 1 perform should return with insufficient balance using max performData size + let maxPerformData = '0x' + for (let i = 0; i < maxPerformDataSize.toNumber(); i++) { + maxPerformData += '11' + } + + const tx = await getTransmitTx(registry, keeper1, [upkeepID1], { + gasPrice: gasWei.mul(gasCeilingMultiplier), + performData: maxPerformData, + }) + + const receipt = await tx.wait() + const insufficientFundsUpkeepReportLogs = + parseInsufficientFundsUpkeepReportLogs(receipt) + // exactly 1 InsufficientFundsUpkeepReportLogs log should be emitted + assert.equal(insufficientFundsUpkeepReportLogs.length, 1) + + // upkeep 1 perform should succeed with empty performData + await getTransmitTx(registry, keeper1, [upkeepID1], { + gasPrice: gasWei.mul(gasCeilingMultiplier), + }), + // upkeep 2 perform should succeed with max performData size + await getTransmitTx(registry, keeper1, [upkeepID2], { + gasPrice: gasWei.mul(gasCeilingMultiplier), + performData: maxPerformData, + }) + }) + }) + + describe('#withdrawFunds', () => { + let upkeepId2: BigNumber + + beforeEach(async () => { + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), randomBytes, '0x') + upkeepId2 = await getUpkeepID(tx) + + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).addFunds(upkeepId2, toWei('100')) + + // Do a perform so that upkeep is charged some amount + await getTransmitTx(registry, keeper1, [upkeepId]) + await getTransmitTx(registry, keeper1, [upkeepId2]) + }) + + it('reverts if called on a non existing ID', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId.add(1), await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry + .connect(owner) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if called on an uncanceled upkeep', async () => { + await evmRevert( + registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()), + 'UpkeepNotCanceled()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry.connect(admin).withdrawFunds(upkeepId, zeroAddress), + 'InvalidRecipient()', + ) + }) + + describe('after the registration is paused, then cancelled', () => { + it('allows the admin to withdraw', async () => { + const balance = await registry.getBalance(upkeepId) + const payee = await payee1.getAddress() + await registry.connect(admin).pauseUpkeep(upkeepId) + await registry.connect(owner).cancelUpkeep(upkeepId) + await expect(() => + registry.connect(admin).withdrawFunds(upkeepId, payee), + ).to.changeTokenBalance(linkToken, payee1, balance) + }) + }) + + describe('after the registration is cancelled', () => { + beforeEach(async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await registry.connect(owner).cancelUpkeep(upkeepId2) + }) + + it('can be called successively on two upkeeps', async () => { + await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await registry + .connect(admin) + .withdrawFunds(upkeepId2, await payee1.getAddress()) + }) + + it('moves the funds out and updates the balance and emits an event', async () => { + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const registryBefore = await linkToken.balanceOf(registry.address) + + let registration = await registry.getUpkeep(upkeepId) + const previousBalance = registration.balance + + const tx = await registry + .connect(admin) + .withdrawFunds(upkeepId, await payee1.getAddress()) + await expect(tx) + .to.emit(registry, 'FundsWithdrawn') + .withArgs(upkeepId, previousBalance, await payee1.getAddress()) + + const payee1After = await linkToken.balanceOf(await payee1.getAddress()) + const registryAfter = await linkToken.balanceOf(registry.address) + + assert.isTrue(payee1Before.add(previousBalance).eq(payee1After)) + assert.isTrue(registryBefore.sub(previousBalance).eq(registryAfter)) + + registration = await registry.getUpkeep(upkeepId) + assert.equal(0, registration.balance.toNumber()) + }) + }) + }) + + describe('#simulatePerformUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'OnlySimulatedBackend()', + ) + }) + + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x'), + 'RegistryPaused()', + ) + }) + + it('returns false and gasUsed when perform fails', async () => { + await mock.setCanPerform(false) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, false) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns true, gasUsed, and performGas when perform succeeds', async () => { + await mock.setCanPerform(true) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + assert.isTrue(simulatePerformResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns correct amount of gasUsed when perform succeeds', async () => { + await mock.setCanPerform(true) + await mock.setPerformGasToBurn(performGas) + + const simulatePerformResult = await registry + .connect(zeroAddress) + .callStatic.simulatePerformUpkeep(upkeepId, '0x') + + assert.equal(simulatePerformResult.success, true) + // Full execute gas should be used, with some performGasBuffer(1000) + assert.isTrue( + simulatePerformResult.gasUsed.gt( + performGas.sub(BigNumber.from('1000')), + ), + ) + }) + }) + + describe('#checkUpkeep', () => { + it('reverts if called by non zero address', async () => { + await evmRevert( + registry + .connect(await owner.getAddress()) + .callStatic['checkUpkeep(uint256)'](upkeepId), + 'OnlySimulatedBackend()', + ) + }) + + it('returns false and error code if the upkeep is cancelled by admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is cancelled by owner', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_CANCELLED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the registry is paused', async () => { + await registry.connect(owner).pause() + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.REGISTRY_PAUSED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_PAUSED, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if user is out of funds', async () => { + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.INSUFFICIENT_BALANCE, + ) + expect(checkUpkeepResult.gasUsed).to.equal(0) + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + context('when the registration is funded', () => { + beforeEach(async () => { + await linkToken.connect(admin).approve(registry.address, toWei('200')) + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).addFunds(logUpkeepId, toWei('100')) + }) + + it('returns false, error code, and revert data if the target check reverts', async () => { + await mock.setShouldRevertCheck(true) + await mock.setCheckRevertReason( + 'custom revert error, clever way to insert offchain data', + ) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + + const revertReasonBytes = `0x${checkUpkeepResult.performData.slice(10)}` // remove sighash + assert.equal( + ethers.utils.defaultAbiCoder.decode(['string'], revertReasonBytes)[0], + 'custom revert error, clever way to insert offchain data', + ) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.TARGET_CHECK_REVERTED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + // Feed data should be returned here + assert.isTrue(checkUpkeepResult.fastGasWei.gt(BigNumber.from('0'))) + assert.isTrue(checkUpkeepResult.linkNative.gt(BigNumber.from('0'))) + }) + + it('returns false, error code, and no revert data if the target check revert data exceeds maxRevertDataSize', async () => { + await mock.setShouldRevertCheck(true) + let longRevertReason = '' + for (let i = 0; i <= maxRevertDataSize.toNumber(); i++) { + longRevertReason += 'x' + } + await mock.setCheckRevertReason(longRevertReason) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + assert.equal(checkUpkeepResult.upkeepNeeded, false) + + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.REVERT_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the upkeep is not needed', async () => { + await mock.setCanCheck(false) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns false and error code if the performData exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 5000; i++) { + longBytes += '1' + } + await mock.setCanCheck(true) + await mock.setPerformData(longBytes) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId) + + assert.equal(checkUpkeepResult.upkeepNeeded, false) + assert.equal(checkUpkeepResult.performData, '0x') + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + }) + + it('returns true with gas used if the target can execute', async () => { + await mock.setCanCheck(true) + await mock.setPerformData(randomBytes) + + const latestBlock = await ethers.provider.getBlock('latest') + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId, { + blockTag: latestBlock.number, + }) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + assert.equal(checkUpkeepResult.performData, randomBytes) + assert.equal( + checkUpkeepResult.upkeepFailureReason, + UpkeepFailureReason.NONE, + ) + assert.isTrue(checkUpkeepResult.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + expect(checkUpkeepResult.gasLimit).to.equal(performGas) + assert.isTrue(checkUpkeepResult.fastGasWei.eq(gasWei)) + assert.isTrue(checkUpkeepResult.linkNative.eq(linkEth)) + }) + + it('calls checkLog for log-trigger upkeeps', async () => { + const log: Log = { + index: 0, + timestamp: 0, + txHash: ethers.utils.randomBytes(32), + blockNumber: 100, + blockHash: ethers.utils.randomBytes(32), + source: randomAddress(), + topics: [ethers.utils.randomBytes(32), ethers.utils.randomBytes(32)], + data: ethers.utils.randomBytes(1000), + } + + await ltUpkeep.mock.checkLog.withArgs(log, '0x').returns(true, '0x1234') + + const checkData = encodeLog(log) + + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256,bytes)'](logUpkeepId, checkData) + + expect(checkUpkeepResult.upkeepNeeded).to.be.true + expect(checkUpkeepResult.performData).to.equal('0x1234') + }) + + itMaybe( + 'has a large enough gas overhead to cover upkeeps that use all their gas [ @skip-coverage ]', + async () => { + await mock.setCanCheck(true) + await mock.setCheckGasToBurn(checkGasLimit) + const gas = checkGasLimit.add(checkGasOverhead) + const checkUpkeepResult = await registry + .connect(zeroAddress) + .callStatic['checkUpkeep(uint256)'](upkeepId, { + gasLimit: gas, + }) + + assert.equal(checkUpkeepResult.upkeepNeeded, true) + }, + ) + }) + }) + + describe('#addFunds', () => { + const amount = toWei('1') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId.add(1), amount), + 'UpkeepCancelled()', + ) + }) + + it('adds to the balance of the registration', async () => { + await registry.connect(admin).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('lets anyone add funds to an upkeep not just admin', async () => { + await linkToken.connect(owner).transfer(await payee1.getAddress(), amount) + await linkToken.connect(payee1).approve(registry.address, amount) + + await registry.connect(payee1).addFunds(upkeepId, amount) + const registration = await registry.getUpkeep(upkeepId) + assert.isTrue(amount.eq(registration.balance)) + }) + + it('emits a log', async () => { + const tx = await registry.connect(admin).addFunds(upkeepId, amount) + await expect(tx) + .to.emit(registry, 'FundsAdded') + .withArgs(upkeepId, await admin.getAddress(), amount) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + }) + + describe('#getActiveUpkeepIDs', () => { + it('reverts if startIndex is out of bounds ', async () => { + await evmRevert( + registry.getActiveUpkeepIDs(numUpkeeps, 0), + 'IndexOutOfRange()', + ) + await evmRevert( + registry.getActiveUpkeepIDs(numUpkeeps + 1, 0), + 'IndexOutOfRange()', + ) + }) + + it('returns upkeep IDs bounded by maxCount', async () => { + let upkeepIds = await registry.getActiveUpkeepIDs(0, 1) + assert(upkeepIds.length == 1) + assert(upkeepIds[0].eq(upkeepId)) + upkeepIds = await registry.getActiveUpkeepIDs(1, 3) + assert(upkeepIds.length == 3) + expect(upkeepIds).to.deep.equal([ + afUpkeepId, + logUpkeepId, + streamsLookupUpkeepId, + ]) + }) + + it('returns as many ids as possible if maxCount > num available', async () => { + const upkeepIds = await registry.getActiveUpkeepIDs(1, numUpkeeps + 100) + assert(upkeepIds.length == numUpkeeps - 1) + }) + + it('returns all upkeep IDs if maxCount is 0', async () => { + let upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert(upkeepIds.length == numUpkeeps) + upkeepIds = await registry.getActiveUpkeepIDs(2, 0) + assert(upkeepIds.length == numUpkeeps - 2) + }) + }) + + describe('#getMaxPaymentForGas', () => { + const arbL1PriceinWei = BigNumber.from(1000) // Same as MockArbGasInfo.sol + const l1CostWeiArb = arbL1PriceinWei.mul(16).mul(maxPerformDataSize) + const l1CostWeiOpt = BigNumber.from(2000000) // Same as MockOVMGasPriceOracle.sol + itMaybe('calculates the max fee appropriately', async () => { + await verifyMaxPayment(registry) + }) + + itMaybe('calculates the max fee appropriately for Arbitrum', async () => { + await verifyMaxPayment(arbRegistry, l1CostWeiArb) + }) + + itMaybe('calculates the max fee appropriately for Optimism', async () => { + await verifyMaxPayment(opRegistry, l1CostWeiOpt) + }) + + it('uses the fallback gas price if the feed has issues', async () => { + const expectedFallbackMaxPayment = linkForGas( + performGas, + registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add(maxPerformDataSize.mul(registryPerPerformByteGasOverhead)), + gasCeilingMultiplier.mul('2'), // fallbackGasPrice is 2x gas price + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = now() + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = now() + startedAt = 946684799 + await gasPriceFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + }) + + it('uses the fallback link price if the feed has issues', async () => { + const expectedFallbackMaxPayment = linkForGas( + performGas, + registryConditionalOverhead + .add(registryPerSignerGasOverhead.mul(f + 1)) + .add(maxPerformDataSize.mul(registryPerPerformByteGasOverhead)), + gasCeilingMultiplier.mul('2'), // fallbackLinkPrice is 1/2 pli price, so multiply by 2 + paymentPremiumPPB, + flatFeeMicroLink, + ).total + + // Stale feed + let roundId = 99 + const answer = 100 + let updatedAt = 946684800 // New Years 2000 🥳 + let startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, answer, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Negative feed price + roundId = 100 + updatedAt = now() + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, -100, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + + // Zero feed price + roundId = 101 + updatedAt = now() + startedAt = 946684799 + await linkEthFeed + .connect(owner) + .updateRoundData(roundId, 0, updatedAt, startedAt) + + assert.equal( + expectedFallbackMaxPayment.toString(), + ( + await registry.getMaxPaymentForGas(Trigger.CONDITION, performGas) + ).toString(), + ) + }) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await registry.typeAndVersion() + assert.equal(typeAndVersion, 'KeeperRegistry 2.1.0') + }) + }) + + describe('#onTokenTransfer', () => { + const amount = toWei('1') + + it('reverts if not called by the PLI token', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + await evmRevert( + registry + .connect(keeper1) + .onTokenTransfer(await keeper1.getAddress(), amount, data), + 'OnlyCallableByPLIToken()', + ) + }) + + it('reverts if not called with more or less than 32 bytes', async () => { + const longData = ethers.utils.defaultAbiCoder.encode( + ['uint256', 'uint256'], + ['33', '34'], + ) + const shortData = '0x12345678' + + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, longData), + ) + await evmRevert( + linkToken + .connect(owner) + .transferAndCall(registry.address, amount, shortData), + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(keeper1).addFunds(upkeepId, amount), + 'UpkeepCancelled()', + ) + }) + + it('updates the funds of the job id passed', async () => { + const data = ethers.utils.defaultAbiCoder.encode(['uint256'], [upkeepId]) + + const before = (await registry.getUpkeep(upkeepId)).balance + await linkToken + .connect(owner) + .transferAndCall(registry.address, amount, data) + const after = (await registry.getUpkeep(upkeepId)).balance + + assert.isTrue(before.add(amount).eq(after)) + }) + }) + + describeMaybe('#setConfig - onchain', () => { + const payment = BigNumber.from(1) + const flatFee = BigNumber.from(2) + const maxGas = BigNumber.from(6) + const staleness = BigNumber.from(4) + const ceiling = BigNumber.from(5) + const newMinUpkeepSpend = BigNumber.from(9) + const newMaxCheckDataSize = BigNumber.from(10000) + const newMaxPerformDataSize = BigNumber.from(10000) + const newMaxRevertDataSize = BigNumber.from(10000) + const newMaxPerformGas = BigNumber.from(10000000) + const fbGasEth = BigNumber.from(7) + const fbLinkEth = BigNumber.from(8) + const newTranscoder = randomAddress() + const newRegistrars = [randomAddress(), randomAddress()] + const upkeepManager = randomAddress() + + const newConfig: OnChainConfig = { + paymentPremiumPPB: payment, + flatFeeMicroLink: flatFee, + checkGasLimit: maxGas, + stalenessSeconds: staleness, + gasCeilingMultiplier: ceiling, + minUpkeepSpend: newMinUpkeepSpend, + maxCheckDataSize: newMaxCheckDataSize, + maxPerformDataSize: newMaxPerformDataSize, + maxRevertDataSize: newMaxRevertDataSize, + maxPerformGas: newMaxPerformGas, + fallbackGasPrice: fbGasEth, + fallbackLinkPrice: fbLinkEth, + transcoder: newTranscoder, + registrars: newRegistrars, + upkeepPrivilegeManager: upkeepManager, + } + + it('reverts when called by anyone but the proposed owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('reverts if signers or transmitters are the zero address', async () => { + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + [randomAddress(), randomAddress(), randomAddress(), zeroAddress], + [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), + ], + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'InvalidSigner()', + ) + + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + [ + randomAddress(), + randomAddress(), + randomAddress(), + randomAddress(), + ], + [randomAddress(), randomAddress(), randomAddress(), zeroAddress], + f, + newConfig, + offchainVersion, + offchainBytes, + ), + 'InvalidTransmitter()', + ) + }) + + it('updates the onchainConfig and configDigest', async () => { + const old = await registry.getState() + const oldConfig = old.config + const oldState = old.state + assert.isTrue(paymentPremiumPPB.eq(oldConfig.paymentPremiumPPB)) + assert.isTrue(flatFeeMicroLink.eq(oldConfig.flatFeeMicroLink)) + assert.isTrue(stalenessSeconds.eq(oldConfig.stalenessSeconds)) + assert.isTrue(gasCeilingMultiplier.eq(oldConfig.gasCeilingMultiplier)) + + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + + const updated = await registry.getState() + const updatedConfig = updated.config + const updatedState = updated.state + assert.equal(updatedConfig.paymentPremiumPPB, payment.toNumber()) + assert.equal(updatedConfig.flatFeeMicroLink, flatFee.toNumber()) + assert.equal(updatedConfig.stalenessSeconds, staleness.toNumber()) + assert.equal(updatedConfig.gasCeilingMultiplier, ceiling.toNumber()) + assert.equal( + updatedConfig.minUpkeepSpend.toString(), + newMinUpkeepSpend.toString(), + ) + assert.equal( + updatedConfig.maxCheckDataSize, + newMaxCheckDataSize.toNumber(), + ) + assert.equal( + updatedConfig.maxPerformDataSize, + newMaxPerformDataSize.toNumber(), + ) + assert.equal( + updatedConfig.maxRevertDataSize, + newMaxRevertDataSize.toNumber(), + ) + assert.equal(updatedConfig.maxPerformGas, newMaxPerformGas.toNumber()) + assert.equal(updatedConfig.checkGasLimit, maxGas.toNumber()) + assert.equal( + updatedConfig.fallbackGasPrice.toNumber(), + fbGasEth.toNumber(), + ) + assert.equal( + updatedConfig.fallbackLinkPrice.toNumber(), + fbLinkEth.toNumber(), + ) + assert.equal(updatedState.latestEpoch, 0) + + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + + assert.equal(updatedConfig.transcoder, newTranscoder) + assert.deepEqual(updatedConfig.registrars, newRegistrars) + assert.equal(updatedConfig.upkeepPrivilegeManager, upkeepManager) + }) + + it('maintains paused state when config is changed', async () => { + await registry.pause() + const old = await registry.getState() + assert.isTrue(old.state.paused) + + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + + const updated = await registry.getState() + assert.isTrue(updated.state.paused) + }) + + it('emits an event', async () => { + const tx = await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + newConfig, + offchainVersion, + offchainBytes, + ) + await expect(tx).to.emit(registry, 'ConfigSet') + }) + }) + + describe('#setConfig - offchain', () => { + let newKeepers: string[] + + beforeEach(async () => { + newKeepers = [ + await personas.Eddy.getAddress(), + await personas.Nick.getAddress(), + await personas.Neil.getAddress(), + await personas.Carol.getAddress(), + ] + }) + + it('reverts when called by anyone but the owner', async () => { + await evmRevert( + registry + .connect(payee1) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'Only callable by owner', + ) + }) + + it('reverts if too many keeperAddresses set', async () => { + for (let i = 0; i < 40; i++) { + newKeepers.push(randomAddress()) + } + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'TooManyOracles()', + ) + }) + + it('reverts if f=0', async () => { + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + 0, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfFaultyOracles()', + ) + }) + + it('reverts if signers != transmitters length', async () => { + const signers = [randomAddress()] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + signers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts if signers <= 3f', async () => { + newKeepers.pop() + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'IncorrectNumberOfSigners()', + ) + }) + + it('reverts on repeated signers', async () => { + const newSigners = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newSigners, + newKeepers, + f, + config, + offchainVersion, + offchainBytes, + ), + 'RepeatedSigner()', + ) + }) + + it('reverts on repeated transmitters', async () => { + const newTransmitters = [ + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + await personas.Eddy.getAddress(), + ] + await evmRevert( + registry + .connect(owner) + .setConfigTypeSafe( + newKeepers, + newTransmitters, + f, + config, + offchainVersion, + offchainBytes, + ), + 'RepeatedTransmitter()', + ) + }) + + itMaybe('stores new config and emits event', async () => { + // Perform an upkeep so that totalPremium is updated + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + let tx = await getTransmitTx(registry, keeper1, [upkeepId]) + await tx.wait() + + const newOffChainVersion = BigNumber.from('2') + const newOffChainConfig = '0x1122' + + const old = await registry.getState() + const oldState = old.state + assert(oldState.totalPremium.gt(BigNumber.from('0'))) + + const newSigners = newKeepers + tx = await registry + .connect(owner) + .setConfigTypeSafe( + newSigners, + newKeepers, + f, + config, + newOffChainVersion, + newOffChainConfig, + ) + + const updated = await registry.getState() + const updatedState = updated.state + assert(oldState.totalPremium.eq(updatedState.totalPremium)) + + // Old signer addresses which are not in new signers should be non active + for (let i = 0; i < signerAddresses.length; i++) { + const signer = signerAddresses[i] + if (!newSigners.includes(signer)) { + assert((await registry.getSignerInfo(signer)).active == false) + assert((await registry.getSignerInfo(signer)).index == 0) + } + } + // New signer addresses should be active + for (let i = 0; i < newSigners.length; i++) { + const signer = newSigners[i] + assert((await registry.getSignerInfo(signer)).active == true) + assert((await registry.getSignerInfo(signer)).index == i) + } + // Old transmitter addresses which are not in new transmitter should be non active, update lastCollected but retain other info + for (let i = 0; i < keeperAddresses.length; i++) { + const transmitter = keeperAddresses[i] + if (!newKeepers.includes(transmitter)) { + assert( + (await registry.getTransmitterInfo(transmitter)).active == false, + ) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + (await registry.getTransmitterInfo(transmitter)).lastCollected.eq( + oldState.totalPremium.sub( + oldState.totalPremium.mod(keeperAddresses.length), + ), + ), + ) + } + } + // New transmitter addresses should be active + for (let i = 0; i < newKeepers.length; i++) { + const transmitter = newKeepers[i] + assert((await registry.getTransmitterInfo(transmitter)).active == true) + assert((await registry.getTransmitterInfo(transmitter)).index == i) + assert( + (await registry.getTransmitterInfo(transmitter)).lastCollected.eq( + oldState.totalPremium, + ), + ) + } + + // config digest should be updated + assert(oldState.configCount + 1 == updatedState.configCount) + assert( + oldState.latestConfigBlockNumber != + updatedState.latestConfigBlockNumber, + ) + assert(oldState.latestConfigDigest != updatedState.latestConfigDigest) + + //New config should be updated + assert.deepEqual(updated.signers, newKeepers) + assert.deepEqual(updated.transmitters, newKeepers) + + // Event should have been emitted + await expect(tx).to.emit(registry, 'ConfigSet') + }) + }) + + describe('#setPeerRegistryMigrationPermission() / #getPeerRegistryMigrationPermission()', () => { + const peer = randomAddress() + it('allows the owner to set the peer registries', async () => { + let permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + await registry.setPeerRegistryMigrationPermission(peer, 1) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(1) + await registry.setPeerRegistryMigrationPermission(peer, 2) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(2) + await registry.setPeerRegistryMigrationPermission(peer, 0) + permission = await registry.getPeerRegistryMigrationPermission(peer) + expect(permission).to.equal(0) + }) + it('reverts if passed an unsupported permission', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 10), + ).to.be.reverted + }) + it('reverts if not called by the owner', async () => { + await expect( + registry.connect(admin).setPeerRegistryMigrationPermission(peer, 1), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('#registerUpkeep', () => { + it('reverts when registry is paused', async () => { + await registry.connect(owner).pause() + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'RegistryPaused()', + ) + }) + + it('reverts if the target is not a contract', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](zeroAddress, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'NotAContract()', + ) + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry + .connect(keeper1) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'OnlyCallableByOwnerOrRegistrar()', + ) + }) + + it('reverts if execute gas is too low', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, 2299, await admin.getAddress(), emptyBytes, '0x'), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if execute gas is too high', async () => { + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, 5000001, await admin.getAddress(), emptyBytes, '0x'), + 'GasLimitOutsideRange()', + ) + }) + + it('reverts if checkData is too long', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), longBytes, '0x'), + 'CheckDataExceedsLimit()', + ) + }) + + it('creates a record of the registration', async () => { + const performGases = [100000, 500000] + const checkDatas = [emptyBytes, '0x12'] + + for (let jdx = 0; jdx < performGases.length; jdx++) { + const performGas = performGases[jdx] + for (let kdx = 0; kdx < checkDatas.length; kdx++) { + const checkData = checkDatas[kdx] + const tx = await registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), checkData, '0x') + + //confirm the upkeep details and verify emitted events + const testUpkeepId = await getUpkeepID(tx) + await expect(tx) + .to.emit(registry, 'UpkeepRegistered') + .withArgs(testUpkeepId, performGas, await admin.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataSet') + .withArgs(testUpkeepId, checkData) + await expect(tx) + .to.emit(registry, 'UpkeepTriggerConfigSet') + .withArgs(testUpkeepId, '0x') + + const registration = await registry.getUpkeep(testUpkeepId) + + assert.equal(mock.address, registration.target) + assert.notEqual( + ethers.constants.AddressZero, + await registry.getForwarder(testUpkeepId), + ) + assert.equal( + performGas.toString(), + registration.performGas.toString(), + ) + assert.equal(await admin.getAddress(), registration.admin) + assert.equal(0, registration.balance.toNumber()) + assert.equal(0, registration.amountSpent.toNumber()) + assert.equal(0, registration.lastPerformedBlockNumber) + assert.equal(checkData, registration.checkData) + assert.equal(registration.paused, false) + assert.equal(registration.offchainConfig, '0x') + assert(registration.maxValidBlocknumber.eq('0xffffffff')) + } + } + }) + }) + + describe('#pauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('reverts if the upkeep is already paused', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).pauseUpkeep(upkeepId), + 'OnlyUnpausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).pauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('pauses the upkeep and emits an event', async () => { + const tx = await registry.connect(admin).pauseUpkeep(upkeepId) + await expect(tx).to.emit(registry, 'UpkeepPaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, true) + }) + }) + + describe('#unpauseUpkeep', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId.add(1)), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is already canceled', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('reverts if the upkeep is not paused', async () => { + await evmRevert( + registry.connect(admin).unpauseUpkeep(upkeepId), + 'OnlyPausedUpkeep()', + ) + }) + + it('reverts if the caller is not the upkeep admin', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + + assert.equal(registration.paused, true) + + await evmRevert( + registry.connect(keeper1).unpauseUpkeep(upkeepId), + 'OnlyCallableByAdmin()', + ) + }) + + it('unpauses the upkeep and emits an event', async () => { + const originalCount = (await registry.getActiveUpkeepIDs(0, 0)).length + + await registry.connect(admin).pauseUpkeep(upkeepId) + + const tx = await registry.connect(admin).unpauseUpkeep(upkeepId) + + await expect(tx).to.emit(registry, 'UpkeepUnpaused').withArgs(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(registration.paused, false) + + const upkeepIds = await registry.getActiveUpkeepIDs(0, 0) + assert.equal(upkeepIds.length, originalCount) + }) + }) + + describe('#setUpkeepCheckData', () => { + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(keeper1) + .setUpkeepCheckData(upkeepId.add(1), randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the caller is not upkeep admin', async () => { + await evmRevert( + registry.connect(keeper1).setUpkeepCheckData(upkeepId, randomBytes), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).setUpkeepCheckData(upkeepId, randomBytes), + 'UpkeepCancelled()', + ) + }) + + it('is allowed to update on paused upkeep', async () => { + await registry.connect(admin).pauseUpkeep(upkeepId) + await registry.connect(admin).setUpkeepCheckData(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + + it('reverts if new data exceeds limit', async () => { + let longBytes = '0x' + for (let i = 0; i < 10000; i++) { + longBytes += '1' + } + + await evmRevert( + registry.connect(admin).setUpkeepCheckData(upkeepId, longBytes), + 'CheckDataExceedsLimit()', + ) + }) + + it('updates the upkeep check data and emits an event', async () => { + const tx = await registry + .connect(admin) + .setUpkeepCheckData(upkeepId, randomBytes) + await expect(tx) + .to.emit(registry, 'UpkeepCheckDataSet') + .withArgs(upkeepId, randomBytes) + + const registration = await registry.getUpkeep(upkeepId) + assert.equal(randomBytes, registration.checkData) + }) + }) + + describe('#setUpkeepGasLimit', () => { + const newGasLimit = BigNumber.from('300000') + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId.add(1), newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepGasLimit(upkeepId, newGasLimit), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if new gas limit is out of bounds', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('100')), + 'GasLimitOutsideRange()', + ) + await evmRevert( + registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, BigNumber.from('6000000')), + 'GasLimitOutsideRange()', + ) + }) + + it('updates the gas limit successfully', async () => { + const initialGasLimit = (await registry.getUpkeep(upkeepId)).performGas + assert.equal(initialGasLimit, performGas.toNumber()) + await registry.connect(admin).setUpkeepGasLimit(upkeepId, newGasLimit) + const updatedGasLimit = (await registry.getUpkeep(upkeepId)).performGas + assert.equal(updatedGasLimit, newGasLimit.toNumber()) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepGasLimit(upkeepId, newGasLimit) + await expect(tx) + .to.emit(registry, 'UpkeepGasLimitSet') + .withArgs(upkeepId, newGasLimit) + }) + }) + + describe('#setUpkeepOffchainConfig', () => { + const newConfig = '0xc0ffeec0ffee' + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId.add(1), newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepOffchainConfig(upkeepId, newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('updates the config successfully', async () => { + const initialConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(initialConfig, '0x') + await registry.connect(admin).setUpkeepOffchainConfig(upkeepId, newConfig) + const updatedConfig = (await registry.getUpkeep(upkeepId)).offchainConfig + assert.equal(newConfig, updatedConfig) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId, newConfig) + await expect(tx) + .to.emit(registry, 'UpkeepOffchainConfigSet') + .withArgs(upkeepId, newConfig) + }) + }) + + describe('#setUpkeepTriggerConfig', () => { + const newConfig = '0xdeadbeef' + + it('reverts if the registration does not exist', async () => { + await evmRevert( + registry + .connect(admin) + .setUpkeepTriggerConfig(upkeepId.add(1), newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts if the upkeep is canceled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(admin).setUpkeepTriggerConfig(upkeepId, newConfig), + 'UpkeepCancelled()', + ) + }) + + it('reverts if called by anyone but the admin', async () => { + await evmRevert( + registry.connect(owner).setUpkeepTriggerConfig(upkeepId, newConfig), + 'OnlyCallableByAdmin()', + ) + }) + + it('emits a log', async () => { + const tx = await registry + .connect(admin) + .setUpkeepTriggerConfig(upkeepId, newConfig) + await expect(tx) + .to.emit(registry, 'UpkeepTriggerConfigSet') + .withArgs(upkeepId, newConfig) + }) + }) + + describe('#transferUpkeepAdmin', () => { + it('reverts when called by anyone but the current upkeep admin', async () => { + await evmRevert( + registry + .connect(payee1) + .transferUpkeepAdmin(upkeepId, await payee2.getAddress()), + 'OnlyCallableByAdmin()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await admin.getAddress()), + 'ValueNotChanged()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await keeper1.getAddress()), + 'UpkeepCancelled()', + ) + }) + + it('allows cancelling transfer by reverting to zero address', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, ethers.constants.AddressZero) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs( + upkeepId, + await admin.getAddress(), + ethers.constants.AddressZero, + ) + }) + + it('does not change the upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await admin.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferRequested') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + + it('does not emit an event when called with the same proposed upkeep admin', async () => { + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + const tx = await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptUpkeepAdmin', () => { + beforeEach(async () => { + // Start admin transfer to payee1 + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + }) + + it('reverts when not called by the proposed upkeep admin', async () => { + await evmRevert( + registry.connect(payee2).acceptUpkeepAdmin(upkeepId), + 'OnlyCallableByProposedAdmin()', + ) + }) + + it('reverts when the upkeep is cancelled', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + 'UpkeepCancelled()', + ) + }) + + it('does change the admin', async () => { + await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + + const upkeep = await registry.getUpkeep(upkeepId) + assert.equal(await payee1.getAddress(), upkeep.admin) + }) + + it('emits an event announcing the new upkeep admin', async () => { + const tx = await registry.connect(payee1).acceptUpkeepAdmin(upkeepId) + await expect(tx) + .to.emit(registry, 'UpkeepAdminTransferred') + .withArgs(upkeepId, await admin.getAddress(), await payee1.getAddress()) + }) + }) + + describe('#withdrawOwnerFunds', () => { + it('can only be called by owner', async () => { + await evmRevert( + registry.connect(keeper1).withdrawOwnerFunds(), + 'Only callable by owner', + ) + }) + + itMaybe('withdraws the collected fees to owner', async () => { + await registry.connect(admin).addFunds(upkeepId, toWei('100')) + // Very high min spend, whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + }, + offchainVersion, + offchainBytes, + ) + const upkeepBalance = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = await linkToken.balanceOf(await owner.getAddress()) + + await registry.connect(owner).cancelUpkeep(upkeepId) + + // Transfered to owner balance on registry + let ownerRegistryBalance = (await registry.getState()).state + .ownerLinkBalance + assert.isTrue(ownerRegistryBalance.eq(upkeepBalance)) + + // Now withdraw + await registry.connect(owner).withdrawOwnerFunds() + + ownerRegistryBalance = (await registry.getState()).state.ownerLinkBalance + const ownerAfter = await linkToken.balanceOf(await owner.getAddress()) + + // Owner registry balance should be changed to 0 + assert.isTrue(ownerRegistryBalance.eq(BigNumber.from('0'))) + + // Owner should be credited with the balance + assert.isTrue(ownerBefore.add(upkeepBalance).eq(ownerAfter)) + }) + }) + + describe('#transferPayeeship', () => { + it('reverts when called by anyone but the current payee', async () => { + await evmRevert( + registry + .connect(payee2) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts when transferring to self', async () => { + await evmRevert( + registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee1.getAddress(), + ), + 'ValueNotChanged()', + ) + }) + + it('does not change the payee', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee1.getAddress(), info.payee) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferRequested') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does not emit an event when called with the same proposal', async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + + const tx = await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(0, receipt.logs.length) + }) + }) + + describe('#acceptPayeeship', () => { + beforeEach(async () => { + await registry + .connect(payee1) + .transferPayeeship( + await keeper1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('reverts when called by anyone but the proposed payee', async () => { + await evmRevert( + registry.connect(payee1).acceptPayeeship(await keeper1.getAddress()), + 'OnlyCallableByProposedPayee()', + ) + }) + + it('emits an event announcing the new payee', async () => { + const tx = await registry + .connect(payee2) + .acceptPayeeship(await keeper1.getAddress()) + await expect(tx) + .to.emit(registry, 'PayeeshipTransferred') + .withArgs( + await keeper1.getAddress(), + await payee1.getAddress(), + await payee2.getAddress(), + ) + }) + + it('does change the payee', async () => { + await registry.connect(payee2).acceptPayeeship(await keeper1.getAddress()) + + const info = await registry.getTransmitterInfo(await keeper1.getAddress()) + assert.equal(await payee2.getAddress(), info.payee) + }) + }) + + describe('#pause', () => { + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).pause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as paused', async () => { + assert.isFalse((await registry.getState()).state.paused) + + await registry.connect(owner).pause() + + assert.isTrue((await registry.getState()).state.paused) + }) + + it('Does not allow transmits when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + getTransmitTx(registry, keeper1, [upkeepId]), + 'RegistryPaused()', + ) + }) + + it('Does not allow creation of new upkeeps when paused', async () => { + await registry.connect(owner).pause() + + await evmRevert( + registry + .connect(owner) + [ + 'registerUpkeep(address,uint32,address,bytes,bytes)' + ](mock.address, performGas, await admin.getAddress(), emptyBytes, '0x'), + 'RegistryPaused()', + ) + }) + }) + + describe('#unpause', () => { + beforeEach(async () => { + await registry.connect(owner).pause() + }) + + it('reverts if called by a non-owner', async () => { + await evmRevert( + registry.connect(keeper1).unpause(), + 'Only callable by owner', + ) + }) + + it('marks the contract as not paused', async () => { + assert.isTrue((await registry.getState()).state.paused) + + await registry.connect(owner).unpause() + + assert.isFalse((await registry.getState()).state.paused) + }) + }) + + describe('#migrateUpkeeps() / #receiveUpkeeps()', async () => { + context('when permissions are set', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 1) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 2) + }) + + it('migrates an upkeep', async () => { + const offchainBytes = '0x987654abcd' + await registry + .connect(admin) + .setUpkeepOffchainConfig(upkeepId, offchainBytes) + const reg1Upkeep = await registry.getUpkeep(upkeepId) + const forwarderAddress = await registry.getForwarder(upkeepId) + expect(reg1Upkeep.balance).to.equal(toWei('100')) + expect(reg1Upkeep.checkData).to.equal(randomBytes) + expect(forwarderAddress).to.not.equal(ethers.constants.AddressZero) + expect(reg1Upkeep.offchainConfig).to.equal(offchainBytes) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + const forwarder = await IAutomationForwarderFactory.connect( + forwarderAddress, + owner, + ) + expect(await forwarder.getRegistry()).to.equal(registry.address) + // Set an upkeep admin transfer in progress too + await registry + .connect(admin) + .transferUpkeepAdmin(upkeepId, await payee1.getAddress()) + + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps - 1, + ) + expect((await mgRegistry.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await mgRegistry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect( + (await mgRegistry.getState()).state.expectedLinkBalance, + ).to.equal(toWei('100')) + expect((await mgRegistry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await mgRegistry.getUpkeep(upkeepId)).offchainConfig).to.equal( + offchainBytes, + ) + expect(await mgRegistry.getForwarder(upkeepId)).to.equal( + forwarderAddress, + ) + // test that registry is updated on forwarder + expect(await forwarder.getRegistry()).to.equal(mgRegistry.address) + // migration will delete the upkeep and nullify admin transfer + await expect( + registry.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('UpkeepCancelled()') + await expect( + mgRegistry.connect(payee1).acceptUpkeepAdmin(upkeepId), + ).to.be.revertedWith('OnlyCallableByProposedAdmin()') + }) + + it('migrates a paused upkeep', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + await registry.connect(admin).pauseUpkeep(upkeepId) + // verify the upkeep is paused + expect((await registry.getUpkeep(upkeepId)).paused).to.equal(true) + // migrate + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps - 1, + ) + expect((await mgRegistry.getState()).state.numUpkeeps).to.equal(1) + expect((await registry.getUpkeep(upkeepId)).balance).to.equal(0) + expect((await mgRegistry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal('0x') + expect((await mgRegistry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect( + (await mgRegistry.getState()).state.expectedLinkBalance, + ).to.equal(toWei('100')) + // verify the upkeep is still paused after migration + expect((await mgRegistry.getUpkeep(upkeepId)).paused).to.equal(true) + }) + + it('emits an event on both contracts', async () => { + expect((await registry.getUpkeep(upkeepId)).balance).to.equal( + toWei('100'), + ) + expect((await registry.getUpkeep(upkeepId)).checkData).to.equal( + randomBytes, + ) + expect((await registry.getState()).state.numUpkeeps).to.equal( + numUpkeeps, + ) + const tx = registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + await expect(tx) + .to.emit(registry, 'UpkeepMigrated') + .withArgs(upkeepId, toWei('100'), mgRegistry.address) + await expect(tx) + .to.emit(mgRegistry, 'UpkeepReceived') + .withArgs(upkeepId, toWei('100'), registry.address) + }) + + it('is only migratable by the admin', async () => { + await expect( + registry + .connect(owner) + .migrateUpkeeps([upkeepId], mgRegistry.address), + ).to.be.revertedWith('OnlyCallableByAdmin()') + await registry + .connect(admin) + .migrateUpkeeps([upkeepId], mgRegistry.address) + }) + }) + + context('when permissions are not set', () => { + it('reverts', async () => { + // no permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 0) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // only outgoing permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 1) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 0) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // only incoming permissions + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 0) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 2) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + // permissions opposite direction + await registry.setPeerRegistryMigrationPermission(mgRegistry.address, 2) + await mgRegistry.setPeerRegistryMigrationPermission(registry.address, 1) + await expect(registry.migrateUpkeeps([upkeepId], mgRegistry.address)).to + .be.reverted + }) + }) + }) + + describe('#setPayees', () => { + const IGNORE_ADDRESS = '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF' + + it('reverts when not called by the owner', async () => { + await evmRevert( + registry.connect(keeper1).setPayees(payees), + 'Only callable by owner', + ) + }) + + it('reverts with different numbers of payees than transmitters', async () => { + await evmRevert( + registry.connect(owner).setPayees([...payees, randomAddress()]), + 'ParameterLengthError()', + ) + }) + + it('reverts if the payee is the zero address', async () => { + await blankRegistry.connect(owner).setConfig(...baseConfig) // used to test initial config + + await evmRevert( + blankRegistry // used to test initial config + .connect(owner) + .setPayees([ethers.constants.AddressZero, ...payees.slice(1)]), + 'InvalidPayee()', + ) + }) + + itMaybe( + 'sets the payees when exisitng payees are zero address', + async () => { + //Initial payees should be zero address + await blankRegistry.connect(owner).setConfig(...baseConfig) // used to test initial config + + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = ( + await blankRegistry.getTransmitterInfo(keeperAddresses[i]) + ).payee // used to test initial config + assert.equal(payee, zeroAddress) + } + + await blankRegistry.connect(owner).setPayees(payees) // used to test initial config + + for (let i = 0; i < keeperAddresses.length; i++) { + const payee = ( + await blankRegistry.getTransmitterInfo(keeperAddresses[i]) + ).payee + assert.equal(payee, payees[i]) + } + }, + ) + + it('does not change the payee if IGNORE_ADDRESS is used as payee', async () => { + const signers = Array.from({ length: 5 }, randomAddress) + const keepers = Array.from({ length: 5 }, randomAddress) + const payees = Array.from({ length: 5 }, randomAddress) + const newTransmitter = randomAddress() + const newPayee = randomAddress() + const ignoreAddresses = new Array(payees.length).fill(IGNORE_ADDRESS) + const newPayees = [...ignoreAddresses, newPayee] + // arbitrum registry + // configure registry with 5 keepers // optimism registry + await blankRegistry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + signers, + keepers, + f, + config, + offchainVersion, + offchainBytes, + ) + // arbitrum registry + // set initial payees // optimism registry + await blankRegistry.connect(owner).setPayees(payees) // used to test initial configurations + // arbitrum registry + // add another keeper // optimism registry + await blankRegistry // used to test initial configurations + .connect(owner) + .setConfigTypeSafe( + [...signers, randomAddress()], + [...keepers, newTransmitter], + f, + config, + offchainVersion, + offchainBytes, + ) + // arbitrum registry + // update payee list // optimism registry // arbitrum registry + await blankRegistry.connect(owner).setPayees(newPayees) // used to test initial configurations // optimism registry + const ignored = await blankRegistry.getTransmitterInfo(newTransmitter) // used to test initial configurations + assert.equal(newPayee, ignored.payee) + assert.equal(true, ignored.active) + }) + + it('reverts if payee is non zero and owner tries to change payee', async () => { + const newPayees = [randomAddress(), ...payees.slice(1)] + + await evmRevert( + registry.connect(owner).setPayees(newPayees), + 'InvalidPayee()', + ) + }) + + it('emits events for every payee added and removed', async () => { + const tx = await registry.connect(owner).setPayees(payees) + await expect(tx) + .to.emit(registry, 'PayeesUpdated') + .withArgs(keeperAddresses, payees) + }) + }) + + describe('#cancelUpkeep', () => { + it('reverts if the ID is not valid', async () => { + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId.add(1)), + 'CannotCancel()', + ) + }) + + it('reverts if called by a non-owner/non-admin', async () => { + await evmRevert( + registry.connect(keeper1).cancelUpkeep(upkeepId), + 'OnlyCallableByOwnerOrAdmin()', + ) + }) + + describe('when called by the owner', async () => { + it('sets the registration to invalid immediately', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(owner).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs(upkeepId, BigNumber.from(receipt.blockNumber)) + }) + + it('immediately prevents upkeep', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + it('does not revert if reverts if called multiple times', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + describe('when called by the owner when the admin has just canceled', () => { + let oldExpiration: BigNumber + + beforeEach(async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + const registration = await registry.getUpkeep(upkeepId) + oldExpiration = registration.maxValidBlocknumber + }) + + it('allows the owner to cancel it more quickly', async () => { + await registry.connect(owner).cancelUpkeep(upkeepId) + + const registration = await registry.getUpkeep(upkeepId) + const newExpiration = registration.maxValidBlocknumber + assert.isTrue(newExpiration.lt(oldExpiration)) + }) + }) + }) + + describe('when called by the admin', async () => { + it('reverts if called again by the admin', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + await evmRevert( + registry.connect(admin).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('reverts if called by the owner after the timeout', async () => { + await registry.connect(admin).cancelUpkeep(upkeepId) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + await evmRevert( + registry.connect(owner).cancelUpkeep(upkeepId), + 'CannotCancel()', + ) + }) + + it('sets the registration to invalid in 50 blocks', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + const registration = await registry.getUpkeep(upkeepId) + assert.equal( + registration.maxValidBlocknumber.toNumber(), + receipt.blockNumber + 50, + ) + }) + + it('emits an event', async () => { + const tx = await registry.connect(admin).cancelUpkeep(upkeepId) + const receipt = await tx.wait() + await expect(tx) + .to.emit(registry, 'UpkeepCanceled') + .withArgs( + upkeepId, + BigNumber.from(receipt.blockNumber + cancellationDelay), + ) + }) + + it('immediately prevents upkeep', async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await registry.connect(admin).cancelUpkeep(upkeepId) + + await getTransmitTx(registry, keeper1, [upkeepId]) + + for (let i = 0; i < cancellationDelay; i++) { + await ethers.provider.send('evm_mine', []) + } + + const tx = await getTransmitTx(registry, keeper1, [upkeepId]) + + const receipt = await tx.wait() + const cancelledUpkeepReportLogs = + parseCancelledUpkeepReportLogs(receipt) + // exactly 1 CancelledUpkeepReport log should be emitted + assert.equal(cancelledUpkeepReportLogs.length, 1) + }) + + describeMaybe('when an upkeep has been performed', async () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId]) + }) + + it('deducts a cancellation fee from the upkeep and gives to owner', async () => { + const minUpkeepSpend = toWei('10') + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + }, + offchainVersion, + offchainBytes, + ) + + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + const amountSpent = toWei('100').sub(upkeepBefore) + const cancellationFee = minUpkeepSpend.sub(amountSpent) + + await registry.connect(admin).cancelUpkeep(upkeepId) + + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // post upkeep balance should be previous balance minus cancellation fee + assert.isTrue(upkeepBefore.sub(cancellationFee).eq(upkeepAfter)) + // payee balance should not change + assert.isTrue(payee1Before.eq(payee1After)) + // owner should receive the cancellation fee + assert.isTrue(ownerAfter.sub(ownerBefore).eq(cancellationFee)) + }) + + it('deducts up to balance as cancellation fee', async () => { + // Very high min spend, should deduct whole balance as cancellation fees + const minUpkeepSpend = toWei('1000') + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + }, + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // all upkeep balance is deducted for cancellation fee + assert.equal(0, upkeepAfter.toNumber()) + // payee balance should not change + assert.isTrue(payee1After.eq(payee1Before)) + // all upkeep balance is transferred to the owner + assert.isTrue(ownerAfter.sub(ownerBefore).eq(upkeepBefore)) + }) + + it('does not deduct cancellation fee if more than minUpkeepSpend is spent', async () => { + // Very low min spend, already spent in one perform upkeep + const minUpkeepSpend = BigNumber.from(420) + await registry.connect(owner).setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrars: [], + upkeepPrivilegeManager: upkeepManager, + }, + offchainVersion, + offchainBytes, + ) + const payee1Before = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const upkeepBefore = (await registry.getUpkeep(upkeepId)).balance + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + await registry.connect(admin).cancelUpkeep(upkeepId) + const payee1After = await linkToken.balanceOf( + await payee1.getAddress(), + ) + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + const upkeepAfter = (await registry.getUpkeep(upkeepId)).balance + + // upkeep does not pay cancellation fee after cancellation because minimum upkeep spent is met + assert.isTrue(upkeepBefore.eq(upkeepAfter)) + // owner balance does not change + assert.isTrue(ownerAfter.eq(ownerBefore)) + // payee balance does not change + assert.isTrue(payee1Before.eq(payee1After)) + }) + }) + }) + }) + + describe('#withdrawPayment', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + await getTransmitTx(registry, keeper1, [upkeepId]) + }) + + it('reverts if called by anyone but the payee', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ), + 'OnlyCallableByPayee()', + ) + }) + + it('reverts if called with the 0 address', async () => { + await evmRevert( + registry + .connect(payee2) + .withdrawPayment(await keeper1.getAddress(), zeroAddress), + 'InvalidRecipient()', + ) + }) + + it('updates the balances', async () => { + const to = await nonkeeper.getAddress() + const keeperBefore = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationBefore = (await registry.getUpkeep(upkeepId)).balance + const toLinkBefore = await linkToken.balanceOf(to) + const registryLinkBefore = await linkToken.balanceOf(registry.address) + const registryPremiumBefore = (await registry.getState()).state + .totalPremium + const ownerBefore = (await registry.getState()).state.ownerLinkBalance + + // Withdrawing for first time, last collected = 0 + assert.equal(keeperBefore.lastCollected.toString(), '0') + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment(await keeper1.getAddress(), to) + + const keeperAfter = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const registrationAfter = (await registry.getUpkeep(upkeepId)).balance + const toLinkAfter = await linkToken.balanceOf(to) + const registryLinkAfter = await linkToken.balanceOf(registry.address) + const registryPremiumAfter = (await registry.getState()).state + .totalPremium + const ownerAfter = (await registry.getState()).state.ownerLinkBalance + + // registry total premium should not change + assert.isTrue(registryPremiumBefore.eq(registryPremiumAfter)) + + // Last collected should be updated to premium-change + assert.isTrue( + keeperAfter.lastCollected.eq( + registryPremiumBefore.sub( + registryPremiumBefore.mod(keeperAddresses.length), + ), + ), + ) + + // owner balance should remain unchanged + assert.isTrue(ownerAfter.eq(ownerBefore)) + + assert.isTrue(keeperAfter.balance.eq(BigNumber.from(0))) + assert.isTrue(registrationBefore.eq(registrationAfter)) + assert.isTrue(toLinkBefore.add(keeperBefore.balance).eq(toLinkAfter)) + assert.isTrue( + registryLinkBefore.sub(keeperBefore.balance).eq(registryLinkAfter), + ) + }) + + it('emits a log announcing the withdrawal', async () => { + const balance = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + const tx = await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await expect(tx) + .to.emit(registry, 'PaymentWithdrawn') + .withArgs( + await keeper1.getAddress(), + balance, + await nonkeeper.getAddress(), + await payee1.getAddress(), + ) + }) + }) + + describe('#checkCallback', () => { + it('returns false with appropriate failure reason when target callback reverts', async () => { + await streamsLookupUpkeep.setShouldRevertCallback(true) + + const values: any[] = ['0x1234', '0xabcd'] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.CHECK_CALLBACK_REVERTED, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false with appropriate failure reason when target callback returns big performData', async () => { + let longBytes = '0x' + for (let i = 0; i <= maxPerformDataSize.toNumber(); i++) { + longBytes += '11' + } + const values: any[] = [longBytes, longBytes] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.PERFORM_DATA_EXCEEDS_LIMIT, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('returns false with appropriate failure reason when target callback returns false', async () => { + await streamsLookupUpkeep.setCallbackReturnBool(false) + const values: any[] = ['0x1234', '0xabcd'] + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + + assert.isFalse(res.upkeepNeeded) + assert.equal(res.performData, '0x') + assert.equal( + res.upkeepFailureReason, + UpkeepFailureReason.UPKEEP_NOT_NEEDED, + ) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + + it('succeeds with upkeep needed', async () => { + const values: any[] = ['0x1234', '0xabcd'] + + const res = await registry + .connect(zeroAddress) + .callStatic.checkCallback(streamsLookupUpkeepId, values, '0x') + const expectedPerformData = ethers.utils.defaultAbiCoder.encode( + ['bytes[]', 'bytes'], + [values, '0x'], + ) + + assert.isTrue(res.upkeepNeeded) + assert.equal(res.performData, expectedPerformData) + assert.equal(res.upkeepFailureReason, UpkeepFailureReason.NONE) + assert.isTrue(res.gasUsed.gt(BigNumber.from('0'))) // Some gas should be used + }) + }) + + describe('#setUpkeepPrivilegeConfig() / #getUpkeepPrivilegeConfig()', () => { + it('reverts when non manager tries to set privilege config', async () => { + await evmRevert( + registry.connect(payee3).setUpkeepPrivilegeConfig(upkeepId, '0x1234'), + 'OnlyCallableByUpkeepPrivilegeManager()', + ) + }) + + it('returns empty bytes for upkeep privilege config before setting', async () => { + const cfg = await registry.getUpkeepPrivilegeConfig(upkeepId) + assert.equal(cfg, '0x') + }) + + it('allows upkeep manager to set privilege config', async () => { + const tx = await registry + .connect(personas.Norbert) + .setUpkeepPrivilegeConfig(upkeepId, '0x1234') + await expect(tx) + .to.emit(registry, 'UpkeepPrivilegeConfigSet') + .withArgs(upkeepId, '0x1234') + + const cfg = await registry.getUpkeepPrivilegeConfig(upkeepId) + assert.equal(cfg, '0x1234') + }) + }) + + describe('#setAdminPrivilegeConfig() / #getAdminPrivilegeConfig()', () => { + const admin = randomAddress() + + it('reverts when non manager tries to set privilege config', async () => { + await evmRevert( + registry.connect(payee3).setAdminPrivilegeConfig(admin, '0x1234'), + 'OnlyCallableByUpkeepPrivilegeManager()', + ) + }) + + it('returns empty bytes for upkeep privilege config before setting', async () => { + const cfg = await registry.getAdminPrivilegeConfig(admin) + assert.equal(cfg, '0x') + }) + + it('allows upkeep manager to set privilege config', async () => { + const tx = await registry + .connect(personas.Norbert) + .setAdminPrivilegeConfig(admin, '0x1234') + await expect(tx) + .to.emit(registry, 'AdminPrivilegeConfigSet') + .withArgs(admin, '0x1234') + + const cfg = await registry.getAdminPrivilegeConfig(admin) + assert.equal(cfg, '0x1234') + }) + }) + + describe('transmitterPremiumSplit [ @skip-coverage ]', () => { + beforeEach(async () => { + await linkToken.connect(owner).approve(registry.address, toWei('100')) + await registry.connect(owner).addFunds(upkeepId, toWei('100')) + }) + + it('splits premium evenly across transmitters', async () => { + // Do a transmit from keeper1 + await getTransmitTx(registry, keeper1, [upkeepId]) + + const registryPremium = (await registry.getState()).state.totalPremium + assert.isTrue(registryPremium.gt(BigNumber.from(0))) + + const premiumPerTransmitter = registryPremium.div( + BigNumber.from(keeperAddresses.length), + ) + const k1Balance = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + // transmitter should be reimbursed for gas and get the premium + assert.isTrue(k1Balance.gt(premiumPerTransmitter)) + const k1GasReimbursement = k1Balance.sub(premiumPerTransmitter) + + const k2Balance = ( + await registry.getTransmitterInfo(await keeper2.getAddress()) + ).balance + // non transmitter should get its share of premium + assert.isTrue(k2Balance.eq(premiumPerTransmitter)) + + // Now do a transmit from keeper 2 + await getTransmitTx(registry, keeper2, [upkeepId]) + const registryPremiumNew = (await registry.getState()).state.totalPremium + assert.isTrue(registryPremiumNew.gt(registryPremium)) + const premiumPerTransmitterNew = registryPremiumNew.div( + BigNumber.from(keeperAddresses.length), + ) + const additionalPremium = premiumPerTransmitterNew.sub( + premiumPerTransmitter, + ) + + const k1BalanceNew = ( + await registry.getTransmitterInfo(await keeper1.getAddress()) + ).balance + // k1 should get the new premium + assert.isTrue( + k1BalanceNew.eq(k1GasReimbursement.add(premiumPerTransmitterNew)), + ) + + const k2BalanceNew = ( + await registry.getTransmitterInfo(await keeper2.getAddress()) + ).balance + // k2 should get gas reimbursement in addition to new premium + assert.isTrue(k2BalanceNew.gt(k2Balance.add(additionalPremium))) + }) + + it('updates last collected upon payment withdrawn', async () => { + // Do a transmit from keeper1 + await getTransmitTx(registry, keeper1, [upkeepId]) + + const registryPremium = (await registry.getState()).state.totalPremium + const k1 = await registry.getTransmitterInfo(await keeper1.getAddress()) + const k2 = await registry.getTransmitterInfo(await keeper2.getAddress()) + + // Withdrawing for first time, last collected = 0 + assert.isTrue(k1.lastCollected.eq(BigNumber.from(0))) + assert.isTrue(k2.lastCollected.eq(BigNumber.from(0))) + + //// Do the thing + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + + const k1New = await registry.getTransmitterInfo( + await keeper1.getAddress(), + ) + const k2New = await registry.getTransmitterInfo( + await keeper2.getAddress(), + ) + + // transmitter info lastCollected should be updated for k1, not for k2 + assert.isTrue( + k1New.lastCollected.eq( + registryPremium.sub(registryPremium.mod(keeperAddresses.length)), + ), + ) + assert.isTrue(k2New.lastCollected.eq(BigNumber.from(0))) + }) + + itMaybe( + 'maintains consistent balance information across all parties', + async () => { + // throughout transmits, withdrawals, setConfigs total claim on balances should remain less than expected balance + // some spare change can get lost but it should be less than maxAllowedSpareChange + + let maxAllowedSpareChange = BigNumber.from('0') + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('31')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee2) + .withdrawPayment( + await keeper2.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('31')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses.slice(2, 15), // only use 2-14th index keepers + keeperAddresses.slice(2, 15), + f, + config, + offchainVersion, + offchainBytes, + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await getTransmitTx(registry, keeper3, [upkeepId], { + startingSignerIndex: 2, + }) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('13')) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee3) + .withdrawPayment( + await keeper3.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry.connect(owner).setConfigTypeSafe( + signerAddresses.slice(0, 4), // only use 0-3rd index keepers + keeperAddresses.slice(0, 4), + f, + config, + offchainVersion, + offchainBytes, + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + await getTransmitTx(registry, keeper1, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('4')) + await getTransmitTx(registry, keeper3, [upkeepId]) + maxAllowedSpareChange = maxAllowedSpareChange.add(BigNumber.from('4')) + + await verifyConsistentAccounting(maxAllowedSpareChange) + await registry + .connect(payee5) + .withdrawPayment( + await keeper5.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + + await registry + .connect(payee1) + .withdrawPayment( + await keeper1.getAddress(), + await nonkeeper.getAddress(), + ) + await verifyConsistentAccounting(maxAllowedSpareChange) + }, + ) + }) +}) diff --git a/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts b/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts new file mode 100644 index 00000000..749ff1de --- /dev/null +++ b/contracts/test/v0.8/automation/LinkAvailableBalanceMonitor.test.ts @@ -0,0 +1,1020 @@ +import { ethers } from 'hardhat' +import chai, { assert, expect } from 'chai' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { loadFixture } from '@nomicfoundation/hardhat-network-helpers' +import * as h from '../../test-helpers/helpers' +import { mineBlock } from '../../test-helpers/helpers' +import { IAggregatorProxy__factory as IAggregatorProxyFactory } from '../../../typechain/factories/IAggregatorProxy__factory' +import { ILinkAvailable__factory as ILinkAvailableFactory } from '../../../typechain/factories/ILinkAvailable__factory' +import { LinkAvailableBalanceMonitor, LinkToken } from '../../../typechain' +import { BigNumber } from 'ethers' +import deepEqualInAnyOrder from 'deep-equal-in-any-order' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' + +chai.use(deepEqualInAnyOrder) + +//////////////////////////////// GAS USAGE LIMITS - CHANGE WITH CAUTION ////////////////////////// +// // +// we try to keep gas usage under this amount (max is 5M) // +const TARGET_PERFORM_GAS_LIMIT = 2_000_000 +// we try to keep gas usage under this amount (max is 5M) the test is not a perfectly accurate // +// measurement of gas usage because it relies on mocks which may do fewer storage reads // +// therefore, we keep a healthy margin to avoid running over the limit! // +const TARGET_CHECK_GAS_LIMIT = 3_500_000 +// // +////////////////////////////////////////////////////////////////////////////////////////////////// +const INVALID_WATCHLIST_ERR = `InvalidWatchList()` +const PAUSED_ERR = 'Pausable: paused' + +const zeroPLI = ethers.utils.parseEther('0') +const onePLI = ethers.utils.parseEther('1') +const twoPLI = ethers.utils.parseEther('2') +const fourPLI = ethers.utils.parseEther('4') +const tenPLI = ethers.utils.parseEther('10') +const oneHundredPLI = ethers.utils.parseEther('100') + +const randAddr = () => ethers.Wallet.createRandom().address + +let labm: LinkAvailableBalanceMonitor +let lt: LinkToken +let owner: SignerWithAddress +let stranger: SignerWithAddress +let keeperRegistry: SignerWithAddress +let proxy1: MockContract +let proxy2: MockContract +let proxy3: MockContract +let proxy4: MockContract // leave this proxy / aggregator unconfigured for topUp() testing +let aggregator1: MockContract +let aggregator2: MockContract +let aggregator3: MockContract +let aggregator4: MockContract // leave this proxy / aggregator unconfigured for topUp() testing + +let directTarget1: MockContract // Contracts which are direct target of balance monitoring without proxy +let directTarget2: MockContract + +let watchListAddresses: string[] +let watchListMinBalances: BigNumber[] +let watchListTopUpAmounts: BigNumber[] +let watchListDstChainSelectors: number[] + +async function assertContractLinkBalances( + balance1: BigNumber, + balance2: BigNumber, + balance3: BigNumber, + balance4: BigNumber, + balance5: BigNumber, +) { + await h.assertLinkTokenBalance(lt, aggregator1.address, balance1, 'address 1') + await h.assertLinkTokenBalance(lt, aggregator2.address, balance2, 'address 2') + await h.assertLinkTokenBalance(lt, aggregator3.address, balance3, 'address 3') + await h.assertLinkTokenBalance( + lt, + directTarget1.address, + balance4, + 'address 4', + ) + await h.assertLinkTokenBalance( + lt, + directTarget2.address, + balance5, + 'address 5', + ) +} + +const setup = async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + keeperRegistry = accounts[2] + + proxy1 = await deployMockContract(owner, IAggregatorProxyFactory.abi) + proxy2 = await deployMockContract(owner, IAggregatorProxyFactory.abi) + proxy3 = await deployMockContract(owner, IAggregatorProxyFactory.abi) + proxy4 = await deployMockContract(owner, IAggregatorProxyFactory.abi) + aggregator1 = await deployMockContract(owner, ILinkAvailableFactory.abi) + aggregator2 = await deployMockContract(owner, ILinkAvailableFactory.abi) + aggregator3 = await deployMockContract(owner, ILinkAvailableFactory.abi) + aggregator4 = await deployMockContract(owner, ILinkAvailableFactory.abi) + directTarget1 = await deployMockContract(owner, ILinkAvailableFactory.abi) + directTarget2 = await deployMockContract(owner, ILinkAvailableFactory.abi) + + await proxy1.deployed() + await proxy2.deployed() + await proxy3.deployed() + await proxy4.deployed() + await aggregator1.deployed() + await aggregator2.deployed() + await aggregator3.deployed() + await aggregator4.deployed() + await directTarget1.deployed() + await directTarget2.deployed() + + watchListAddresses = [ + proxy1.address, + proxy2.address, + proxy3.address, + directTarget1.address, + directTarget2.address, + ] + watchListMinBalances = [onePLI, onePLI, onePLI, twoPLI, twoPLI] + watchListTopUpAmounts = [twoPLI, twoPLI, twoPLI, twoPLI, twoPLI] + watchListDstChainSelectors = [1, 2, 3, 4, 5] + + await proxy1.mock.aggregator.returns(aggregator1.address) + await proxy2.mock.aggregator.returns(aggregator2.address) + await proxy3.mock.aggregator.returns(aggregator3.address) + + await aggregator1.mock.linkAvailableForPayment.returns(0) + await aggregator2.mock.linkAvailableForPayment.returns(0) + await aggregator3.mock.linkAvailableForPayment.returns(0) + + await directTarget1.mock.linkAvailableForPayment.returns(0) + await directTarget2.mock.linkAvailableForPayment.returns(0) + + const labmFactory = await ethers.getContractFactory( + 'LinkAvailableBalanceMonitor', + owner, + ) + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + owner, + ) + + // New parameters needed by the constructor + const maxPerform = 5 + const maxCheck = 20 + const minWaitPeriodSeconds = 0 + const upkeepInterval = 10 + + lt = (await ltFactory.deploy()) as LinkToken + labm = await labmFactory.deploy( + owner.address, + lt.address, + minWaitPeriodSeconds, + maxPerform, + maxCheck, + upkeepInterval, + ) + await labm.deployed() + + for (let i = 1; i <= 4; i++) { + const recipient = await accounts[i].getAddress() + await lt.connect(owner).transfer(recipient, oneHundredPLI) + } + + const setTx = await labm + .connect(owner) + .setWatchList( + watchListAddresses, + watchListMinBalances, + watchListTopUpAmounts, + watchListDstChainSelectors, + ) + await setTx.wait() +} + +describe('LinkAvailableBalanceMonitor', () => { + beforeEach(async () => { + await loadFixture(setup) + }) + + describe('add funds', () => { + it('Should allow anyone to add funds', async () => { + await lt.transfer(labm.address, onePLI) + await lt.connect(stranger).transfer(labm.address, onePLI) + }) + }) + + describe('setTopUpAmount()', () => { + it('configures the top-up amount', async () => { + await labm + .connect(owner) + .setTopUpAmount(directTarget1.address, BigNumber.from(100)) + const report = await labm.getAccountInfo(directTarget1.address) + assert.equal(report.topUpAmount.toString(), '100') + }) + + it('configuresis only callable by the owner', async () => { + await expect( + labm.connect(stranger).setTopUpAmount(directTarget1.address, 100), + ).to.be.reverted + }) + }) + + describe('setMinBalance()', () => { + it('configures the min balance', async () => { + await labm + .connect(owner) + .setMinBalance(proxy1.address, BigNumber.from(100)) + const report = await labm.getAccountInfo(proxy1.address) + assert.equal(report.minBalance.toString(), '100') + }) + + it('reverts if address is not in the watchlist', async () => { + await expect(labm.connect(owner).setMinBalance(proxy4.address, 100)).to.be + .reverted + }) + + it('is only callable by the owner', async () => { + await expect(labm.connect(stranger).setMinBalance(proxy1.address, 100)).to + .be.reverted + }) + }) + + describe('setMaxPerform()', () => { + it('configures the MaxPerform', async () => { + await labm.connect(owner).setMaxPerform(BigNumber.from(100)) + const report = await labm.getMaxPerform() + assert.equal(report.toString(), '100') + }) + + it('is only callable by the owner', async () => { + await expect(labm.connect(stranger).setMaxPerform(100)).to.be.reverted + }) + }) + + describe('setMaxCheck()', () => { + it('configures the MaxCheck', async () => { + await labm.connect(owner).setMaxCheck(BigNumber.from(100)) + const report = await labm.getMaxCheck() + assert.equal(report.toString(), '100') + }) + + it('is only callable by the owner', async () => { + await expect(labm.connect(stranger).setMaxCheck(100)).to.be.reverted + }) + }) + + describe('setUpkeepInterval()', () => { + it('configures the UpkeepInterval', async () => { + await labm.connect(owner).setUpkeepInterval(BigNumber.from(100)) + const report = await labm.getUpkeepInterval() + assert.equal(report.toString(), '100') + }) + + it('is only callable by the owner', async () => { + await expect(labm.connect(stranger).setUpkeepInterval(100)).to.be.reverted + }) + }) + + describe('withdraw()', () => { + beforeEach(async () => { + const tx = await lt.connect(owner).transfer(labm.address, onePLI) + await tx.wait() + }) + + it('Should allow the owner to withdraw', async () => { + const beforeBalance = await lt.balanceOf(owner.address) + const tx = await labm.connect(owner).withdraw(onePLI, owner.address) + await tx.wait() + const afterBalance = await lt.balanceOf(owner.address) + assert.isTrue( + afterBalance.gt(beforeBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should emit an event', async () => { + const tx = await labm.connect(owner).withdraw(onePLI, owner.address) + await expect(tx) + .to.emit(labm, 'FundsWithdrawn') + .withArgs(onePLI, owner.address) + }) + + it('Should allow the owner to withdraw to anyone', async () => { + const beforeBalance = await lt.balanceOf(stranger.address) + const tx = await labm.connect(owner).withdraw(onePLI, stranger.address) + await tx.wait() + const afterBalance = await lt.balanceOf(stranger.address) + assert.isTrue( + beforeBalance.add(onePLI).eq(afterBalance), + 'balance did not increase after withdraw', + ) + }) + + it('Should not allow strangers to withdraw', async () => { + const tx = labm.connect(stranger).withdraw(onePLI, owner.address) + await expect(tx).to.be.reverted + }) + }) + + describe('pause() / unpause()', () => { + it('Should allow owner to pause / unpause', async () => { + const pauseTx = await labm.connect(owner).pause() + await pauseTx.wait() + const unpauseTx = await labm.connect(owner).unpause() + await unpauseTx.wait() + }) + + it('Should not allow strangers to pause / unpause', async () => { + const pauseTxStranger = labm.connect(stranger).pause() + await expect(pauseTxStranger).to.be.reverted + const pauseTxOwner = await labm.connect(owner).pause() + await pauseTxOwner.wait() + const unpauseTxStranger = labm.connect(stranger).unpause() + await expect(unpauseTxStranger).to.be.reverted + }) + }) + + describe('setWatchList() / addToWatchListOrDecomissionOrDecomission() / removeFromWatchlist() / getWatchList()', () => { + const watchAddress1 = randAddr() + const watchAddress2 = randAddr() + const watchAddress3 = randAddr() + + beforeEach(async () => { + // reset watchlist to empty before running these tests + await labm.connect(owner).setWatchList([], [], [], []) + const watchList = await labm.getWatchList() + assert.deepEqual(watchList, []) + }) + + it('Should allow owner to adjust the watchlist', async () => { + // add first watchlist + let tx = await labm + .connect(owner) + .setWatchList([watchAddress1], [onePLI], [onePLI], [0]) + let watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + // add more to watchlist + tx = await labm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress3], + [onePLI, onePLI, onePLI], + [onePLI, onePLI, onePLI], + [1, 2, 3], + ) + await tx.wait() + watchList = await labm.getWatchList() + assert.deepEqual(watchList, [watchAddress1, watchAddress2, watchAddress3]) + }) + + it('Should not allow different length arrays in the watchlist', async () => { + const errMsg = `InvalidWatchList()` + let tx = labm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress1], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + await expect(tx).to.be.revertedWith(errMsg) + }) + + it('Should not allow duplicates in the watchlist', async () => { + const errMsg = `DuplicateAddress("${watchAddress1}")` + let tx = labm + .connect(owner) + .setWatchList( + [watchAddress1, watchAddress2, watchAddress1], + [onePLI, onePLI, onePLI], + [onePLI, onePLI, onePLI], + [1, 2, 3], + ) + await expect(tx).to.be.revertedWith(errMsg) + }) + + it('Should not allow strangers to set the watchlist', async () => { + const setTxStranger = labm + .connect(stranger) + .setWatchList([watchAddress1], [onePLI], [onePLI], [0]) + await expect(setTxStranger).to.be.reverted + }) + + it('Should revert if any of the addresses are empty', async () => { + let tx = labm + .connect(owner) + .setWatchList( + [watchAddress1, ethers.constants.AddressZero], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + await expect(tx).to.be.revertedWith(INVALID_WATCHLIST_ERR) + }) + + it('Should allow owner to add multiple addresses with dstChainSelector 0 to the watchlist', async () => { + let tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress1, 0) + await tx.wait + let watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + + tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress2, 0) + await tx.wait + watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + assert.deepEqual(watchList[1], watchAddress2) + + tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress3, 0) + await tx.wait + watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + assert.deepEqual(watchList[1], watchAddress2) + assert.deepEqual(watchList[2], watchAddress3) + }) + + it('Should allow owner to add only one address with an unique non-zero dstChainSelector 0 to the watchlist', async () => { + let tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress1, 1) + await tx.wait + let watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + + // 1 is active + let report = await labm.getAccountInfo(watchAddress1) + assert.isTrue(report.isActive) + + tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress2, 1) + await tx.wait + watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress2) + + // 2 is active, 1 should be false + report = await labm.getAccountInfo(watchAddress2) + assert.isTrue(report.isActive) + report = await labm.getAccountInfo(watchAddress1) + assert.isFalse(report.isActive) + + tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress3, 1) + await tx.wait + watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress3) + + // 3 is active, 1 and 2 should be false + report = await labm.getAccountInfo(watchAddress3) + assert.isTrue(report.isActive) + report = await labm.getAccountInfo(watchAddress2) + assert.isFalse(report.isActive) + report = await labm.getAccountInfo(watchAddress1) + assert.isFalse(report.isActive) + }) + + it('Should not add address 0 to the watchlist', async () => { + await labm + .connect(owner) + .addToWatchListOrDecomission(ethers.constants.AddressZero, 1) + expect(await labm.getWatchList()).to.not.contain( + ethers.constants.AddressZero, + ) + }) + + it('Should not allow stangers to add addresses to the watchlist', async () => { + await expect( + labm.connect(stranger).addToWatchListOrDecomission(watchAddress1, 1), + ).to.be.reverted + }) + + it('Should allow owner to remove addresses from the watchlist', async () => { + let tx = await labm + .connect(owner) + .addToWatchListOrDecomission(watchAddress1, 1) + await tx.wait + let watchList = await labm.getWatchList() + assert.deepEqual(watchList[0], watchAddress1) + let report = await labm.getAccountInfo(watchAddress1) + assert.isTrue(report.isActive) + + // remove address + tx = await labm.connect(owner).removeFromWatchList(watchAddress1) + + // address should be false + report = await labm.getAccountInfo(watchAddress1) + assert.isFalse(report.isActive) + + watchList = await labm.getWatchList() + assert.deepEqual(watchList, []) + }) + + it('Should allow only one address per dstChainSelector', async () => { + // add address1 + await labm.connect(owner).addToWatchListOrDecomission(watchAddress1, 1) + expect(await labm.getWatchList()).to.contain(watchAddress1) + + // add address2 + await labm.connect(owner).addToWatchListOrDecomission(watchAddress2, 1) + + // only address2 has to be in the watchlist + const watchlist = await labm.getWatchList() + expect(watchlist).to.not.contain(watchAddress1) + expect(watchlist).to.contain(watchAddress2) + }) + + it('Should delete the onRamp address on a zero-address with same dstChainSelector', async () => { + // add address1 + await labm.connect(owner).addToWatchListOrDecomission(watchAddress1, 1) + expect(await labm.getWatchList()).to.contain(watchAddress1) + + // simulates an onRampSet(zeroAddress, same dstChainSelector) + await labm + .connect(owner) + .addToWatchListOrDecomission(ethers.constants.AddressZero, 1) + + // address1 should be cleaned + const watchlist = await labm.getWatchList() + expect(watchlist).to.not.contain(watchAddress1) + assert.deepEqual(watchlist, []) + }) + }) + + describe('checkUpkeep() / sampleUnderfundedAddresses() [ @skip-coverage ]', () => { + it('Should return list of address that are underfunded', async () => { + const fundTx = await lt + .connect(owner) + .transfer(labm.address, oneHundredPLI) + await fundTx.wait() + + await labm.setWatchList( + watchListAddresses, + watchListMinBalances, + watchListTopUpAmounts, + watchListDstChainSelectors, + ) + + const [should, payload] = await labm.checkUpkeep('0x') + assert.isTrue(should) + let [addresses] = ethers.utils.defaultAbiCoder.decode( + ['address[]'], + payload, + ) + + expect(addresses).to.deep.equalInAnyOrder(watchListAddresses) + addresses = await labm.sampleUnderfundedAddresses() + expect(addresses).to.deep.equalInAnyOrder(watchListAddresses) + }) + + it('Should omit aggregators that have sufficient funding', async () => { + const fundTx = await lt.connect(owner).transfer( + labm.address, + oneHundredPLI, // enough for anything that needs funding + ) + await fundTx.wait() + + await labm.setWatchList( + [aggregator2.address, directTarget1.address, directTarget2.address], + [onePLI, twoPLI, twoPLI], + [onePLI, onePLI, onePLI], + [1, 2, 3], + ) + + // all of them are underfunded, return 3 + await aggregator2.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget1.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget2.mock.linkAvailableForPayment.returns(zeroPLI) + + let addresses = await labm.sampleUnderfundedAddresses() + expect(addresses).to.deep.equalInAnyOrder([ + aggregator2.address, + directTarget1.address, + directTarget2.address, + ]) + + await aggregator2.mock.linkAvailableForPayment.returns(onePLI) // aggregator2 is enough funded + await directTarget1.mock.linkAvailableForPayment.returns(onePLI) // directTarget1 is NOT enough funded + await directTarget2.mock.linkAvailableForPayment.returns(onePLI) // directTarget2 is NOT funded + addresses = await labm.sampleUnderfundedAddresses() + expect(addresses).to.deep.equalInAnyOrder([ + directTarget1.address, + directTarget2.address, + ]) + + await directTarget1.mock.linkAvailableForPayment.returns(tenPLI) + addresses = await labm.sampleUnderfundedAddresses() + expect(addresses).to.deep.equalInAnyOrder([directTarget2.address]) + + await directTarget2.mock.linkAvailableForPayment.returns(tenPLI) + addresses = await labm.sampleUnderfundedAddresses() + expect(addresses).to.deep.equalInAnyOrder([]) + }) + + it('Should revert when paused', async () => { + const tx = await labm.connect(owner).pause() + await tx.wait() + const ethCall = labm.checkUpkeep('0x') + await expect(ethCall).to.be.revertedWith(PAUSED_ERR) + }) + + context('with a large set of proxies', async () => { + // in this test, we cheat a little bit and point each proxy to the same aggregator, + // which helps cut down on test time + let MAX_PERFORM: number + let MAX_CHECK: number + let proxyAddresses: string[] + let minBalances: BigNumber[] + let topUpAmount: BigNumber[] + let aggregators: MockContract[] + let dstChainSelectors: number[] + + beforeEach(async () => { + MAX_PERFORM = await labm.getMaxPerform() + MAX_CHECK = await labm.getMaxCheck() + proxyAddresses = [] + minBalances = [] + topUpAmount = [] + aggregators = [] + dstChainSelectors = [] + const numAggregators = MAX_CHECK + 50 + for (let idx = 0; idx < numAggregators; idx++) { + const proxy = await deployMockContract( + owner, + IAggregatorProxyFactory.abi, + ) + const aggregator = await deployMockContract( + owner, + ILinkAvailableFactory.abi, + ) + await proxy.mock.aggregator.returns(aggregator.address) + await aggregator.mock.linkAvailableForPayment.returns(0) + proxyAddresses.push(proxy.address) + minBalances.push(onePLI) + topUpAmount.push(onePLI) + aggregators.push(aggregator) + dstChainSelectors.push(0) + } + await labm.setWatchList( + proxyAddresses, + minBalances, + topUpAmount, + dstChainSelectors, + ) + let watchlist = await labm.getWatchList() + expect(watchlist).to.deep.equalInAnyOrder(proxyAddresses) + assert.equal(watchlist.length, minBalances.length) + }) + + it('Should not include more than MAX_PERFORM addresses', async () => { + const addresses = await labm.sampleUnderfundedAddresses() + expect(addresses.length).to.be.lessThanOrEqual(MAX_PERFORM) + }) + + it('Should sample from the list of addresses pseudorandomly', async () => { + const firstAddress: string[] = [] + for (let idx = 0; idx < 10; idx++) { + const addresses = await labm.sampleUnderfundedAddresses() + assert.equal(addresses.length, MAX_PERFORM) + assert.equal( + new Set(addresses).size, + MAX_PERFORM, + 'duplicate address found', + ) + firstAddress.push(addresses[0]) + await mineBlock(ethers.provider) + } + assert( + new Set(firstAddress).size > 1, + 'sample did not shuffle starting index', + ) + }) + + it('Can check MAX_CHECK upkeeps within the allotted gas limit', async () => { + for (const aggregator of aggregators) { + // here we make no aggregators eligible for funding, requiring the function to + // traverse the whole list + await aggregator.mock.linkAvailableForPayment.returns(tenPLI) + } + await labm.checkUpkeep('0x', { gasLimit: TARGET_CHECK_GAS_LIMIT }) + }) + }) + }) + + describe('performUpkeep()', () => { + let validPayload: string + + beforeEach(async () => { + validPayload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [watchListAddresses], + ) + await labm + .connect(owner) + .setWatchList( + watchListAddresses, + watchListMinBalances, + watchListTopUpAmounts, + watchListDstChainSelectors, + ) + }) + + it('Should revert when paused', async () => { + await labm.connect(owner).pause() + const performTx = labm.connect(keeperRegistry).performUpkeep(validPayload) + await expect(performTx).to.be.revertedWith(PAUSED_ERR) + }) + + it('Should fund the appropriate addresses', async () => { + await aggregator1.mock.linkAvailableForPayment.returns(zeroPLI) + await aggregator2.mock.linkAvailableForPayment.returns(zeroPLI) + await aggregator3.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget1.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget2.mock.linkAvailableForPayment.returns(zeroPLI) + + const fundTx = await lt.connect(owner).transfer(labm.address, tenPLI) + await fundTx.wait() + + h.assertLinkTokenBalance(lt, aggregator1.address, zeroPLI) + h.assertLinkTokenBalance(lt, aggregator2.address, zeroPLI) + h.assertLinkTokenBalance(lt, aggregator3.address, zeroPLI) + h.assertLinkTokenBalance(lt, directTarget1.address, zeroPLI) + h.assertLinkTokenBalance(lt, directTarget2.address, zeroPLI) + + const performTx = await labm + .connect(keeperRegistry) + .performUpkeep(validPayload, { gasLimit: 1_500_000 }) + await performTx.wait() + + h.assertLinkTokenBalance(lt, aggregator1.address, twoPLI) + h.assertLinkTokenBalance(lt, aggregator2.address, twoPLI) + h.assertLinkTokenBalance(lt, aggregator3.address, twoPLI) + h.assertLinkTokenBalance(lt, directTarget1.address, twoPLI) + h.assertLinkTokenBalance(lt, directTarget2.address, twoPLI) + }) + + it('Can handle MAX_PERFORM proxies within gas limit', async () => { + const MAX_PERFORM = await labm.getMaxPerform() + const proxyAddresses = [] + const minBalances = [] + const topUpAmount = [] + const dstChainSelectors = [] + for (let idx = 0; idx < MAX_PERFORM; idx++) { + const proxy = await deployMockContract( + owner, + IAggregatorProxyFactory.abi, + ) + const aggregator = await deployMockContract( + owner, + ILinkAvailableFactory.abi, + ) + await proxy.mock.aggregator.returns(aggregator.address) + await aggregator.mock.linkAvailableForPayment.returns(0) + proxyAddresses.push(proxy.address) + minBalances.push(onePLI) + topUpAmount.push(onePLI) + dstChainSelectors.push(0) + } + await labm.setWatchList( + proxyAddresses, + minBalances, + topUpAmount, + dstChainSelectors, + ) + let watchlist = await labm.getWatchList() + expect(watchlist).to.deep.equalInAnyOrder(proxyAddresses) + assert.equal(watchlist.length, minBalances.length) + + // add funds + const wl = await labm.getWatchList() + let fundsNeeded = BigNumber.from(0) + for (let idx = 0; idx < wl.length; idx++) { + const targetInfo = await labm.getAccountInfo(wl[idx]) + const targetTopUpAmount = targetInfo.topUpAmount + fundsNeeded.add(targetTopUpAmount) + } + await lt.connect(owner).transfer(labm.address, fundsNeeded) + + // encode payload + const payload = ethers.utils.defaultAbiCoder.encode( + ['address[]'], + [proxyAddresses], + ) + + // do the thing + await labm + .connect(keeperRegistry) + .performUpkeep(payload, { gasLimit: TARGET_PERFORM_GAS_LIMIT }) + }) + }) + + describe('topUp()', () => { + it('Should revert topUp address(0)', async () => { + const tx = await labm.connect(owner).topUp([ethers.constants.AddressZero]) + await expect(tx).to.emit(labm, 'TopUpBlocked') + }) + + context('when not paused', () => { + it('Should be callable by anyone', async () => { + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + await labm.connect(user).topUp([]) + } + }) + }) + + context('when paused', () => { + it('Should be callable by no one', async () => { + await labm.connect(owner).pause() + const users = [owner, keeperRegistry, stranger] + for (let idx = 0; idx < users.length; idx++) { + const user = users[idx] + const tx = labm.connect(user).topUp([]) + await expect(tx).to.be.revertedWith(PAUSED_ERR) + } + }) + }) + + context('when fully funded', () => { + beforeEach(async () => { + await lt.connect(owner).transfer(labm.address, tenPLI) + await assertContractLinkBalances( + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + zeroPLI, + ) + }) + + it('Should fund the appropriate addresses', async () => { + const tx = await labm.connect(keeperRegistry).topUp(watchListAddresses) + + await aggregator1.mock.linkAvailableForPayment.returns(twoPLI) + await aggregator2.mock.linkAvailableForPayment.returns(twoPLI) + await aggregator3.mock.linkAvailableForPayment.returns(twoPLI) + await directTarget1.mock.linkAvailableForPayment.returns(twoPLI) + await directTarget2.mock.linkAvailableForPayment.returns(twoPLI) + + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy2.address) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy3.address) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(directTarget1.address) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(directTarget2.address) + }) + + it('Should only fund the addresses provided', async () => { + await labm + .connect(keeperRegistry) + .topUp([proxy1.address, directTarget1.address]) + + await aggregator1.mock.linkAvailableForPayment.returns(twoPLI) + await aggregator2.mock.linkAvailableForPayment.returns(zeroPLI) + await aggregator3.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget1.mock.linkAvailableForPayment.returns(twoPLI) + await directTarget2.mock.linkAvailableForPayment.returns(zeroPLI) + }) + + it('Should skip un-approved addresses', async () => { + await labm + .connect(owner) + .setWatchList( + [proxy1.address, directTarget1.address], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + const tx = await labm + .connect(keeperRegistry) + .topUp([ + proxy1.address, + proxy2.address, + proxy3.address, + directTarget1.address, + directTarget2.address, + ]) + + h.assertLinkTokenBalance(lt, aggregator1.address, twoPLI) + h.assertLinkTokenBalance(lt, aggregator2.address, zeroPLI) + h.assertLinkTokenBalance(lt, aggregator3.address, zeroPLI) + h.assertLinkTokenBalance(lt, directTarget1.address, twoPLI) + h.assertLinkTokenBalance(lt, directTarget2.address, zeroPLI) + + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(directTarget1.address) + await expect(tx).to.emit(labm, 'TopUpBlocked').withArgs(proxy2.address) + await expect(tx).to.emit(labm, 'TopUpBlocked').withArgs(proxy3.address) + await expect(tx) + .to.emit(labm, 'TopUpBlocked') + .withArgs(directTarget2.address) + }) + + it('Should skip an address if the proxy is invalid and it is not a direct target', async () => { + await labm + .connect(owner) + .setWatchList( + [proxy1.address, proxy4.address], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + const tx = await labm + .connect(keeperRegistry) + .topUp([proxy1.address, proxy4.address]) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx).to.emit(labm, 'TopUpBlocked').withArgs(proxy4.address) + }) + + it('Should skip an address if the aggregator is invalid', async () => { + await proxy4.mock.aggregator.returns(aggregator4.address) + await labm + .connect(owner) + .setWatchList( + [proxy1.address, proxy4.address], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + const tx = await labm + .connect(keeperRegistry) + .topUp([proxy1.address, proxy4.address]) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx).to.emit(labm, 'TopUpBlocked').withArgs(proxy4.address) + }) + + it('Should skip an address if the aggregator has sufficient funding', async () => { + await proxy4.mock.aggregator.returns(aggregator4.address) + await aggregator4.mock.linkAvailableForPayment.returns(tenPLI) + await labm + .connect(owner) + .setWatchList( + [proxy1.address, proxy4.address], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + const tx = await labm + .connect(keeperRegistry) + .topUp([proxy1.address, proxy4.address]) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx).to.emit(labm, 'TopUpBlocked').withArgs(proxy4.address) + }) + + it('Should skip an address if the direct target has sufficient funding', async () => { + await directTarget1.mock.linkAvailableForPayment.returns(tenPLI) + await labm + .connect(owner) + .setWatchList( + [proxy1.address, directTarget1.address], + [onePLI, onePLI], + [onePLI, onePLI], + [1, 2], + ) + const tx = await labm + .connect(keeperRegistry) + .topUp([proxy1.address, directTarget1.address]) + await expect(tx) + .to.emit(labm, 'TopUpSucceeded') + .withArgs(proxy1.address) + await expect(tx) + .to.emit(labm, 'TopUpBlocked') + .withArgs(directTarget1.address) + }) + }) + + context('when partially funded', () => { + it('Should fund as many addresses as possible T', async () => { + await lt.connect(owner).transfer( + labm.address, + fourPLI, // only enough PLI to fund 2 addresses + ) + + await aggregator1.mock.linkAvailableForPayment.returns(twoPLI) + await aggregator2.mock.linkAvailableForPayment.returns(twoPLI) + await aggregator3.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget1.mock.linkAvailableForPayment.returns(zeroPLI) + await directTarget2.mock.linkAvailableForPayment.returns(zeroPLI) + + h.assertLinkTokenBalance(lt, aggregator1.address, twoPLI) + h.assertLinkTokenBalance(lt, aggregator2.address, twoPLI) + h.assertLinkTokenBalance(lt, aggregator3.address, zeroPLI) + h.assertLinkTokenBalance(lt, directTarget1.address, zeroPLI) + h.assertLinkTokenBalance(lt, directTarget2.address, zeroPLI) + + const tx = await labm.connect(keeperRegistry).topUp(watchListAddresses) + await expect(tx).to.emit(labm, 'TopUpSucceeded') + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/UpkeepBalanceMonitor.test.ts b/contracts/test/v0.8/automation/UpkeepBalanceMonitor.test.ts new file mode 100644 index 00000000..259a9c3b --- /dev/null +++ b/contracts/test/v0.8/automation/UpkeepBalanceMonitor.test.ts @@ -0,0 +1,399 @@ +import { ethers } from 'hardhat' +import { expect } from 'chai' +import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { randomAddress } from '../../test-helpers/helpers' +import { loadFixture } from '@nomicfoundation/hardhat-network-helpers' +import { IKeeperRegistryMaster__factory as RegistryFactory } from '../../../typechain/factories/IKeeperRegistryMaster__factory' +import { IAutomationForwarder__factory as ForwarderFactory } from '../../../typechain/factories/IAutomationForwarder__factory' +import { UpkeepBalanceMonitor } from '../../../typechain/UpkeepBalanceMonitor' +import { LinkToken } from '../../../typechain/LinkToken' +import { BigNumber } from 'ethers' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let registry: MockContract +let registry2: MockContract +let forwarder: MockContract +let linkToken: LinkToken +let upkeepBalanceMonitor: UpkeepBalanceMonitor + +const setup = async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + owner, + ) + linkToken = (await ltFactory.deploy()) as LinkToken + const bmFactory = await ethers.getContractFactory( + 'UpkeepBalanceMonitor', + owner, + ) + upkeepBalanceMonitor = await bmFactory.deploy(linkToken.address, { + maxBatchSize: 10, + minPercentage: 120, + targetPercentage: 300, + maxTopUpAmount: ethers.utils.parseEther('100'), + }) + registry = await deployMockContract(owner, RegistryFactory.abi) + registry2 = await deployMockContract(owner, RegistryFactory.abi) + forwarder = await deployMockContract(owner, ForwarderFactory.abi) + await forwarder.mock.getRegistry.returns(registry.address) + await upkeepBalanceMonitor.setForwarder(forwarder.address) + await linkToken + .connect(owner) + .transfer(upkeepBalanceMonitor.address, ethers.utils.parseEther('10000')) + await upkeepBalanceMonitor + .connect(owner) + .setWatchList(registry.address, [0, 1, 2, 3, 4, 5, 6, 7, 8]) + await upkeepBalanceMonitor + .connect(owner) + .setWatchList(registry2.address, [9, 10, 11]) + for (let i = 0; i < 9; i++) { + await registry.mock.getMinBalance.withArgs(i).returns(100) + await registry.mock.getBalance.withArgs(i).returns(121) // all upkeeps are sufficiently funded + } + for (let i = 9; i < 12; i++) { + await registry2.mock.getMinBalance.withArgs(i).returns(100) + await registry2.mock.getBalance.withArgs(i).returns(121) // all upkeeps are sufficiently funded + } +} + +describe('UpkeepBalanceMonitor', () => { + beforeEach(async () => { + await loadFixture(setup) + }) + + describe('constructor()', () => { + it('should set the initial values correctly', async () => { + const config = await upkeepBalanceMonitor.getConfig() + expect(config.maxBatchSize).to.equal(10) + expect(config.minPercentage).to.equal(120) + expect(config.targetPercentage).to.equal(300) + expect(config.maxTopUpAmount).to.equal(ethers.utils.parseEther('100')) + }) + }) + + describe('setConfig()', () => { + const newConfig = { + maxBatchSize: 100, + minPercentage: 150, + targetPercentage: 500, + maxTopUpAmount: 1, + } + + it('should set config correctly', async () => { + await upkeepBalanceMonitor.connect(owner).setConfig(newConfig) + const config = await upkeepBalanceMonitor.getConfig() + expect(config.maxBatchSize).to.equal(newConfig.maxBatchSize) + expect(config.minPercentage).to.equal(newConfig.minPercentage) + expect(config.targetPercentage).to.equal(newConfig.targetPercentage) + expect(config.maxTopUpAmount).to.equal(newConfig.maxTopUpAmount) + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor.connect(stranger).setConfig(newConfig), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should emit an event', async () => { + await expect( + upkeepBalanceMonitor.connect(owner).setConfig(newConfig), + ).to.emit(upkeepBalanceMonitor, 'ConfigSet') + }) + }) + + describe('setForwarder()', () => { + const newForwarder = randomAddress() + + it('should set the forwarder correctly', async () => { + await upkeepBalanceMonitor.connect(owner).setForwarder(newForwarder) + const forwarderAddress = await upkeepBalanceMonitor.getForwarder() + expect(forwarderAddress).to.equal(newForwarder) + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor.connect(stranger).setForwarder(randomAddress()), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should emit an event', async () => { + await expect( + upkeepBalanceMonitor.connect(owner).setForwarder(newForwarder), + ) + .to.emit(upkeepBalanceMonitor, 'ForwarderSet') + .withArgs(newForwarder) + }) + }) + + describe('setWatchList()', () => { + const newWatchList = [ + BigNumber.from(1), + BigNumber.from(2), + BigNumber.from(10), + ] + + it('should add addresses to the watchlist', async () => { + await upkeepBalanceMonitor + .connect(owner) + .setWatchList(registry.address, newWatchList) + const [_, upkeepIDs] = await upkeepBalanceMonitor.getWatchList() + expect(upkeepIDs[0]).to.deep.equal(newWatchList) + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor + .connect(stranger) + .setWatchList(registry.address, [1, 2, 3]), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should emit an event', async () => { + await expect( + upkeepBalanceMonitor + .connect(owner) + .setWatchList(registry.address, newWatchList), + ) + .to.emit(upkeepBalanceMonitor, 'WatchListSet') + .withArgs(registry.address) + }) + }) + + describe('withdraw()', () => { + const payee = randomAddress() + const withdrawAmount = 100 + + it('should withdraw funds to a payee', async () => { + const initialBalance = await linkToken.balanceOf( + upkeepBalanceMonitor.address, + ) + await upkeepBalanceMonitor.connect(owner).withdraw(withdrawAmount, payee) + const finalBalance = await linkToken.balanceOf( + upkeepBalanceMonitor.address, + ) + const payeeBalance = await linkToken.balanceOf(payee) + expect(finalBalance).to.equal(initialBalance.sub(withdrawAmount)) + expect(payeeBalance).to.equal(withdrawAmount) + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor.connect(stranger).withdraw(withdrawAmount, payee), + ).to.be.revertedWith('Only callable by owner') + }) + + it('should emit an event', async () => { + await expect( + upkeepBalanceMonitor.connect(owner).withdraw(withdrawAmount, payee), + ) + .to.emit(upkeepBalanceMonitor, 'FundsWithdrawn') + .withArgs(100, payee) + }) + }) + + describe('pause() and unpause()', () => { + it('should pause and unpause the contract', async () => { + await upkeepBalanceMonitor.connect(owner).pause() + expect(await upkeepBalanceMonitor.paused()).to.be.true + await upkeepBalanceMonitor.connect(owner).unpause() + expect(await upkeepBalanceMonitor.paused()).to.be.false + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor.connect(stranger).pause(), + ).to.be.revertedWith('Only callable by owner') + await upkeepBalanceMonitor.connect(owner).pause() + await expect( + upkeepBalanceMonitor.connect(stranger).unpause(), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('checkUpkeep() / getUnderfundedUpkeeps()', () => { + it('should find the underfunded upkeeps', async () => { + let [upkeepIDs, registries, topUpAmounts] = + await upkeepBalanceMonitor.getUnderfundedUpkeeps() + expect(upkeepIDs.length).to.equal(0) + expect(registries.length).to.equal(0) + expect(topUpAmounts.length).to.equal(0) + let [upkeepNeeded, performData] = + await upkeepBalanceMonitor.checkUpkeep('0x') + expect(upkeepNeeded).to.be.false + expect(performData).to.equal('0x') + // update the balance for some upkeeps + await registry.mock.getBalance.withArgs(2).returns(120) + await registry.mock.getBalance.withArgs(4).returns(15) + await registry.mock.getBalance.withArgs(5).returns(0) + ;[upkeepIDs, registries, topUpAmounts] = + await upkeepBalanceMonitor.getUnderfundedUpkeeps() + expect(upkeepIDs.map((v) => v.toNumber())).to.deep.equal([2, 4, 5]) + expect(registries).to.deep.equal([ + registry.address, + registry.address, + registry.address, + ]) + expect(topUpAmounts.map((v) => v.toNumber())).to.deep.equal([ + 180, 285, 300, + ]) + ;[upkeepNeeded, performData] = + await upkeepBalanceMonitor.checkUpkeep('0x') + expect(upkeepNeeded).to.be.true + expect(performData).to.equal( + ethers.utils.defaultAbiCoder.encode( + ['uint256[]', 'address[]', 'uint256[]'], + [ + [2, 4, 5], + [registry.address, registry.address, registry.address], + [180, 285, 300], + ], + ), + ) + // update all to need funding + for (let i = 0; i < 9; i++) { + await registry.mock.getBalance.withArgs(i).returns(0) + } + for (let i = 9; i < 12; i++) { + await registry2.mock.getBalance.withArgs(i).returns(0) + } + // only the max batch size are included in the list + ;[upkeepIDs, registries, topUpAmounts] = + await upkeepBalanceMonitor.getUnderfundedUpkeeps() + expect(upkeepIDs.length).to.equal(10) + expect(topUpAmounts.length).to.equal(10) + expect(upkeepIDs.map((v) => v.toNumber())).to.deep.equal([ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + ]) + expect(registries).to.deep.equal([ + ...Array(9).fill(registry.address), + registry2.address, + ]) + expect(topUpAmounts.map((v) => v.toNumber())).to.deep.equal([ + ...Array(10).fill(300), + ]) + // update the balance for some upkeeps + await registry.mock.getBalance.withArgs(0).returns(300) + await registry.mock.getBalance.withArgs(5).returns(300) + ;[upkeepIDs, registries, topUpAmounts] = + await upkeepBalanceMonitor.getUnderfundedUpkeeps() + expect(upkeepIDs.length).to.equal(10) + expect(topUpAmounts.length).to.equal(10) + expect(upkeepIDs.map((v) => v.toNumber())).to.deep.equal([ + 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, + ]) + expect(registries).to.deep.equal([ + ...Array(7).fill(registry.address), + ...Array(3).fill(registry2.address), + ]) + expect(topUpAmounts.map((v) => v.toNumber())).to.deep.equal([ + ...Array(10).fill(300), + ]) + }) + }) + + describe('topUp()', () => { + beforeEach(async () => { + await registry.mock.onTokenTransfer + .withArgs( + upkeepBalanceMonitor.address, + 100, + ethers.utils.defaultAbiCoder.encode(['uint256'], [1]), + ) + .returns() + await registry.mock.onTokenTransfer + .withArgs( + upkeepBalanceMonitor.address, + 50, + ethers.utils.defaultAbiCoder.encode(['uint256'], [7]), + ) + .returns() + }) + + it('cannot be called by a non-owner', async () => { + await expect( + upkeepBalanceMonitor.connect(stranger).topUp([], [], []), + ).to.be.revertedWith('OnlyForwarderOrOwner()') + }) + + it('should revert if the contract is paused', async () => { + await upkeepBalanceMonitor.connect(owner).pause() + await expect( + upkeepBalanceMonitor.connect(owner).topUp([], [], []), + ).to.be.revertedWith('Pausable: paused') + }) + + it('tops up the upkeeps by the amounts provided', async () => { + const initialBalance = await linkToken.balanceOf(registry.address) + const tx = await upkeepBalanceMonitor + .connect(owner) + .topUp([1, 7], [registry.address, registry.address], [100, 50]) + const finalBalance = await linkToken.balanceOf(registry.address) + expect(finalBalance).to.equal(initialBalance.add(150)) + await expect(tx) + .to.emit(upkeepBalanceMonitor, 'TopUpSucceeded') + .withArgs(1, 100) + await expect(tx) + .to.emit(upkeepBalanceMonitor, 'TopUpSucceeded') + .withArgs(7, 50) + }) + + it('does not abort if one top-up fails', async () => { + const initialBalance = await linkToken.balanceOf(registry.address) + const tx = await upkeepBalanceMonitor + .connect(owner) + .topUp( + [1, 7, 100], + [registry.address, registry.address, registry.address], + [100, 50, 100], + ) + const finalBalance = await linkToken.balanceOf(registry.address) + expect(finalBalance).to.equal(initialBalance.add(150)) + await expect(tx) + .to.emit(upkeepBalanceMonitor, 'TopUpSucceeded') + .withArgs(1, 100) + await expect(tx) + .to.emit(upkeepBalanceMonitor, 'TopUpSucceeded') + .withArgs(7, 50) + await expect(tx) + .to.emit(upkeepBalanceMonitor, 'TopUpFailed') + .withArgs(100) + }) + }) + + describe('checkUpkeep() / performUpkeep()', () => { + it('works round-trip', async () => { + await registry.mock.getBalance.withArgs(1).returns(100) // needs 200 + await registry.mock.getBalance.withArgs(7).returns(0) // needs 300 + await registry.mock.onTokenTransfer + .withArgs( + upkeepBalanceMonitor.address, + 200, + ethers.utils.defaultAbiCoder.encode(['uint256'], [1]), + ) + .returns() + await registry.mock.onTokenTransfer + .withArgs( + upkeepBalanceMonitor.address, + 300, + ethers.utils.defaultAbiCoder.encode(['uint256'], [7]), + ) + .returns() + const [upkeepNeeded, performData] = + await upkeepBalanceMonitor.checkUpkeep('0x') + expect(upkeepNeeded).to.be.true + const initialBalance = await linkToken.balanceOf(registry.address) + await upkeepBalanceMonitor.connect(owner).performUpkeep(performData) + const finalBalance = await linkToken.balanceOf(registry.address) + expect(finalBalance).to.equal(initialBalance.add(500)) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/UpkeepTranscoder.test.ts b/contracts/test/v0.8/automation/UpkeepTranscoder.test.ts new file mode 100644 index 00000000..6ce7673a --- /dev/null +++ b/contracts/test/v0.8/automation/UpkeepTranscoder.test.ts @@ -0,0 +1,69 @@ +import { ethers } from 'hardhat' +import { assert } from 'chai' +import { evmRevert } from '../../test-helpers/matchers' +import { UpkeepTranscoder__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder__factory' +import { UpkeepTranscoder } from '../../../typechain' + +let upkeepTranscoderFactory: UpkeepTranscoderFactory +let transcoder: UpkeepTranscoder + +before(async () => { + upkeepTranscoderFactory = await ethers.getContractFactory('UpkeepTranscoder') +}) + +describe('UpkeepTranscoder', () => { + const formatV1 = 0 + const formatV2 = 1 + const formatV3 = 2 + + beforeEach(async () => { + transcoder = await upkeepTranscoderFactory.deploy() + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await transcoder.typeAndVersion() + assert.equal(typeAndVersion, 'UpkeepTranscoder 1.0.0') + }) + }) + + describe('#transcodeUpkeeps', () => { + const encodedData = '0xc0ffee' + + it('reverts if the from type is not an enum value', async () => { + await evmRevert(transcoder.transcodeUpkeeps(3, 1, encodedData)) + }) + + it('reverts if the from type != to type', async () => { + await evmRevert( + transcoder.transcodeUpkeeps(1, 2, encodedData), + 'InvalidTranscoding()', + ) + }) + + context('when from and to versions equal', () => { + it('returns the data that was passed in', async () => { + let response = await transcoder.transcodeUpkeeps( + formatV1, + formatV1, + encodedData, + ) + assert.equal(encodedData, response) + + response = await transcoder.transcodeUpkeeps( + formatV2, + formatV2, + encodedData, + ) + assert.equal(encodedData, response) + + response = await transcoder.transcodeUpkeeps( + formatV3, + formatV3, + encodedData, + ) + assert.equal(encodedData, response) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/UpkeepTranscoder3_0.test.ts b/contracts/test/v0.8/automation/UpkeepTranscoder3_0.test.ts new file mode 100644 index 00000000..2f0f169a --- /dev/null +++ b/contracts/test/v0.8/automation/UpkeepTranscoder3_0.test.ts @@ -0,0 +1,576 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { UpkeepTranscoder30__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder30__factory' +import { UpkeepTranscoder30 as UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder30' +import { KeeperRegistry2_0__factory as KeeperRegistry2_0Factory } from '../../../typechain/factories/KeeperRegistry2_0__factory' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { UpkeepMock__factory as UpkeepMockFactory } from '../../../typechain/factories/UpkeepMock__factory' +import { evmRevert } from '../../test-helpers/matchers' +import { BigNumber, Signer } from 'ethers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { KeeperRegistryLogic2_0__factory as KeeperRegistryLogic20Factory } from '../../../typechain/factories/KeeperRegistryLogic2_0__factory' +import { KeeperRegistry1_3__factory as KeeperRegistry1_3Factory } from '../../../typechain/factories/KeeperRegistry1_3__factory' +import { KeeperRegistryLogic1_3__factory as KeeperRegistryLogicFactory } from '../../../typechain/factories/KeeperRegistryLogic1_3__factory' +import { toWei } from '../../test-helpers/helpers' +import { LinkToken } from '../../../typechain' + +let upkeepMockFactory: UpkeepMockFactory +let upkeepTranscoderFactory: UpkeepTranscoderFactory +let transcoder: UpkeepTranscoder +let linkTokenFactory: LinkTokenFactory +let mockV3AggregatorFactory: MockV3AggregatorFactory +let keeperRegistryFactory20: KeeperRegistry2_0Factory +let keeperRegistryFactory13: KeeperRegistry1_3Factory +let keeperRegistryLogicFactory20: KeeperRegistryLogic20Factory +let keeperRegistryLogicFactory13: KeeperRegistryLogicFactory +let personas: Personas +let owner: Signer +let upkeepsV1: any[] +let upkeepsV2: any[] +let upkeepsV3: any[] +let admins: string[] +let admin0: Signer +let admin1: Signer +const executeGas = BigNumber.from('100000') +const paymentPremiumPPB = BigNumber.from('250000000') +const flatFeeMicroLink = BigNumber.from(0) +const blockCountPerTurn = BigNumber.from(3) +const randomBytes = '0x1234abcd' +const stalenessSeconds = BigNumber.from(43820) +const gasCeilingMultiplier = BigNumber.from(1) +const checkGasLimit = BigNumber.from(20000000) +const fallbackGasPrice = BigNumber.from(200) +const fallbackLinkPrice = BigNumber.from(200000000) +const maxPerformGas = BigNumber.from(5000000) +const minUpkeepSpend = BigNumber.from(0) +const maxCheckDataSize = BigNumber.from(1000) +const maxPerformDataSize = BigNumber.from(1000) +const mode = BigNumber.from(0) +const linkEth = BigNumber.from(300000000) +const gasWei = BigNumber.from(100) +const registryGasOverhead = BigNumber.from('80000') +const balance = 50000000000000 +const amountSpent = 200000000000000 +const target0 = '0xffffffffffffffffffffffffffffffffffffffff' +const target1 = '0xfffffffffffffffffffffffffffffffffffffffe' +const lastKeeper0 = '0x233a95ccebf3c9f934482c637c08b4015cdd6ddd' +const lastKeeper1 = '0x233a95ccebf3c9f934482c637c08b4015cdd6ddc' +enum UpkeepFormat { + V1, + V2, + V3, + V4, +} +const idx = [123, 124] + +async function getUpkeepID(tx: any) { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +const encodeConfig = (config: any) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds\ + ,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,\ + uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,\ + address registrar)', + ], + [config], + ) +} + +const encodeUpkeepV1 = (ids: number[], upkeeps: any[], checkDatas: any[]) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'uint256[]', + 'tuple(uint96,address,uint32,uint64,address,uint96,address)[]', + 'bytes[]', + ], + [ids, upkeeps, checkDatas], + ) +} + +const encodeUpkeepV2 = (ids: number[], upkeeps: any[], checkDatas: any[]) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'uint256[]', + 'tuple(uint96,address,uint96,address,uint32,uint32,address,bool)[]', + 'bytes[]', + ], + [ids, upkeeps, checkDatas], + ) +} + +const encodeUpkeepV3 = ( + ids: number[], + upkeeps: any[], + checkDatas: any[], + admins: string[], +) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'uint256[]', + 'tuple(uint32,uint32,bool,address,uint96,uint96,uint32)[]', + 'bytes[]', + 'address[]', + ], + [ids, upkeeps, checkDatas, admins], + ) +} + +before(async () => { + // @ts-ignore bug in autogen file + upkeepTranscoderFactory = await ethers.getContractFactory( + 'UpkeepTranscoder3_0', + ) + personas = (await getUsers()).personas + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + // need full path because there are two contracts with name MockV3Aggregator + mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + + upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + + owner = personas.Norbert + admin0 = personas.Neil + admin1 = personas.Nick + admins = [ + (await admin0.getAddress()).toLowerCase(), + (await admin1.getAddress()).toLowerCase(), + ] +}) + +async function deployLinkToken() { + return await linkTokenFactory.connect(owner).deploy() +} + +async function deployFeeds() { + return [ + await mockV3AggregatorFactory.connect(owner).deploy(0, gasWei), + await mockV3AggregatorFactory.connect(owner).deploy(9, linkEth), + ] +} + +async function deployLegacyRegistry1_2( + linkToken: LinkToken, + gasPriceFeed: any, + linkEthFeed: any, +) { + const mock = await upkeepMockFactory.deploy() + // @ts-ignore bug in autogen file + const keeperRegistryFactory = + await ethers.getContractFactory('KeeperRegistry1_2') + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + const legacyRegistry = await keeperRegistryFactory + .connect(owner) + .deploy(linkToken.address, linkEthFeed.address, gasPriceFeed.address, { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const tx = await legacyRegistry + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin0.getAddress(), + randomBytes, + ) + const id = await getUpkeepID(tx) + return [id, legacyRegistry] +} + +async function deployLegacyRegistry1_3( + linkToken: LinkToken, + gasPriceFeed: any, + linkEthFeed: any, +) { + const mock = await upkeepMockFactory.deploy() + // @ts-ignore bug in autogen file + keeperRegistryFactory13 = await ethers.getContractFactory('KeeperRegistry1_3') + // @ts-ignore bug in autogen file + keeperRegistryLogicFactory13 = await ethers.getContractFactory( + 'KeeperRegistryLogic1_3', + ) + + const registryLogic13 = await keeperRegistryLogicFactory13 + .connect(owner) + .deploy( + 0, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + const Registry1_3 = await keeperRegistryFactory13 + .connect(owner) + .deploy(registryLogic13.address, config) + + const tx = await Registry1_3.connect(owner).registerUpkeep( + mock.address, + executeGas, + await admin0.getAddress(), + randomBytes, + ) + const id = await getUpkeepID(tx) + + return [id, Registry1_3] +} + +async function deployRegistry2_0( + linkToken: LinkToken, + gasPriceFeed: any, + linkEthFeed: any, +) { + // @ts-ignore bug in autogen file + keeperRegistryFactory20 = await ethers.getContractFactory('KeeperRegistry2_0') + // @ts-ignore bug in autogen file + keeperRegistryLogicFactory20 = await ethers.getContractFactory( + 'KeeperRegistryLogic2_0', + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + + const registryLogic = await keeperRegistryLogicFactory20 + .connect(owner) + .deploy(mode, linkToken.address, linkEthFeed.address, gasPriceFeed.address) + + const Registry2_0 = await keeperRegistryFactory20 + .connect(owner) + .deploy(registryLogic.address) + + // deploys a registry, setups of initial configuration, registers an upkeep + const keeper1 = personas.Carol + const keeper2 = personas.Eddy + const keeper3 = personas.Nancy + const keeper4 = personas.Norbert + const keeper5 = personas.Nick + const payee1 = personas.Nelly + const payee2 = personas.Norbert + const payee3 = personas.Nick + const payee4 = personas.Eddy + const payee5 = personas.Carol + // signers + const signer1 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000001', + ) + const signer2 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000002', + ) + const signer3 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000003', + ) + const signer4 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000004', + ) + const signer5 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000005', + ) + + const keeperAddresses = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + await keeper4.getAddress(), + await keeper5.getAddress(), + ] + const payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + await payee5.getAddress(), + ] + const signers = [signer1, signer2, signer3, signer4, signer5] + + const signerAddresses = [] + for (const signer of signers) { + signerAddresses.push(await signer.getAddress()) + } + + const f = 1 + const offchainVersion = 1 + const offchainBytes = '0x' + + await Registry2_0.connect(owner).setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig(config), + offchainVersion, + offchainBytes, + ) + await Registry2_0.connect(owner).setPayees(payees) + return Registry2_0 +} + +describe('UpkeepTranscoder3_0', () => { + beforeEach(async () => { + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await transcoder.typeAndVersion() + assert.equal(typeAndVersion, 'UpkeepTranscoder 3.0.0') + }) + }) + + describe('#transcodeUpkeeps', () => { + const encodedData = '0xabcd' + + it('reverts if the from type is not V1 or V2', async () => { + await evmRevert( + transcoder.transcodeUpkeeps( + UpkeepFormat.V3, + UpkeepFormat.V1, + encodedData, + ), + ) + await evmRevert( + transcoder.transcodeUpkeeps( + UpkeepFormat.V4, + UpkeepFormat.V1, + encodedData, + ), + ) + }) + + context('when from and to versions are correct', () => { + upkeepsV3 = [ + [executeGas, 2 ** 32 - 1, false, target0, amountSpent, balance, 0], + [executeGas, 2 ** 32 - 1, false, target1, amountSpent, balance, 0], + ] + + it('transcodes V1 upkeeps to V3 properly, regardless of toVersion value', async () => { + upkeepsV1 = [ + [ + balance, + lastKeeper0, + executeGas, + 2 ** 32, + target0, + amountSpent, + await admin0.getAddress(), + ], + [ + balance, + lastKeeper1, + executeGas, + 2 ** 32, + target1, + amountSpent, + await admin1.getAddress(), + ], + ] + + const data = await transcoder.transcodeUpkeeps( + UpkeepFormat.V1, + UpkeepFormat.V1, + encodeUpkeepV1(idx, upkeepsV1, ['0xabcd', '0xffff']), + ) + assert.equal( + encodeUpkeepV3(idx, upkeepsV3, ['0xabcd', '0xffff'], admins), + data, + ) + }) + + it('transcodes V2 upkeeps to V3 properly, regardless of toVersion value', async () => { + upkeepsV2 = [ + [ + balance, + lastKeeper0, + amountSpent, + await admin0.getAddress(), + executeGas, + 2 ** 32 - 1, + target0, + false, + ], + [ + balance, + lastKeeper1, + amountSpent, + await admin1.getAddress(), + executeGas, + 2 ** 32 - 1, + target1, + false, + ], + ] + + const data = await transcoder.transcodeUpkeeps( + UpkeepFormat.V2, + UpkeepFormat.V2, + encodeUpkeepV2(idx, upkeepsV2, ['0xabcd', '0xffff']), + ) + assert.equal( + encodeUpkeepV3(idx, upkeepsV3, ['0xabcd', '0xffff'], admins), + data, + ) + }) + + it('migrates upkeeps from 1.2 registry to 2.0', async () => { + const linkToken = await deployLinkToken() + const [gasPriceFeed, linkEthFeed] = await deployFeeds() + const [id, legacyRegistry] = await deployLegacyRegistry1_2( + linkToken, + gasPriceFeed, + linkEthFeed, + ) + const Registry2_0 = await deployRegistry2_0( + linkToken, + gasPriceFeed, + linkEthFeed, + ) + + await linkToken + .connect(owner) + .approve(legacyRegistry.address, toWei('1000')) + await legacyRegistry.connect(owner).addFunds(id, toWei('1000')) + + // set outgoing permission to registry 2_0 and incoming permission for registry 1_2 + await legacyRegistry.setPeerRegistryMigrationPermission( + Registry2_0.address, + 1, + ) + await Registry2_0.setPeerRegistryMigrationPermission( + legacyRegistry.address, + 2, + ) + + expect((await legacyRegistry.getUpkeep(id)).balance).to.equal( + toWei('1000'), + ) + expect((await legacyRegistry.getUpkeep(id)).checkData).to.equal( + randomBytes, + ) + expect((await legacyRegistry.getState()).state.numUpkeeps).to.equal(1) + + await legacyRegistry + .connect(admin0) + .migrateUpkeeps([id], Registry2_0.address) + + expect((await legacyRegistry.getState()).state.numUpkeeps).to.equal(0) + expect((await Registry2_0.getState()).state.numUpkeeps).to.equal(1) + expect((await legacyRegistry.getUpkeep(id)).balance).to.equal(0) + expect((await legacyRegistry.getUpkeep(id)).checkData).to.equal('0x') + expect((await Registry2_0.getUpkeep(id)).balance).to.equal( + toWei('1000'), + ) + expect( + (await Registry2_0.getState()).state.expectedLinkBalance, + ).to.equal(toWei('1000')) + expect(await linkToken.balanceOf(Registry2_0.address)).to.equal( + toWei('1000'), + ) + expect((await Registry2_0.getUpkeep(id)).checkData).to.equal( + randomBytes, + ) + }) + + it('migrates upkeeps from 1.3 registry to 2.0', async () => { + const linkToken = await deployLinkToken() + const [gasPriceFeed, linkEthFeed] = await deployFeeds() + const [id, legacyRegistry] = await deployLegacyRegistry1_3( + linkToken, + gasPriceFeed, + linkEthFeed, + ) + const Registry2_0 = await deployRegistry2_0( + linkToken, + gasPriceFeed, + linkEthFeed, + ) + + await linkToken + .connect(owner) + .approve(legacyRegistry.address, toWei('1000')) + await legacyRegistry.connect(owner).addFunds(id, toWei('1000')) + + // set outgoing permission to registry 2_0 and incoming permission for registry 1_3 + await legacyRegistry.setPeerRegistryMigrationPermission( + Registry2_0.address, + 1, + ) + await Registry2_0.setPeerRegistryMigrationPermission( + legacyRegistry.address, + 2, + ) + + expect((await legacyRegistry.getUpkeep(id)).balance).to.equal( + toWei('1000'), + ) + expect((await legacyRegistry.getUpkeep(id)).checkData).to.equal( + randomBytes, + ) + expect((await legacyRegistry.getState()).state.numUpkeeps).to.equal(1) + + await legacyRegistry + .connect(admin0) + .migrateUpkeeps([id], Registry2_0.address) + + expect((await legacyRegistry.getState()).state.numUpkeeps).to.equal(0) + expect((await Registry2_0.getState()).state.numUpkeeps).to.equal(1) + expect((await legacyRegistry.getUpkeep(id)).balance).to.equal(0) + expect((await legacyRegistry.getUpkeep(id)).checkData).to.equal('0x') + expect((await Registry2_0.getUpkeep(id)).balance).to.equal( + toWei('1000'), + ) + expect( + (await Registry2_0.getState()).state.expectedLinkBalance, + ).to.equal(toWei('1000')) + expect(await linkToken.balanceOf(Registry2_0.address)).to.equal( + toWei('1000'), + ) + expect((await Registry2_0.getUpkeep(id)).checkData).to.equal( + randomBytes, + ) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/UpkeepTranscoder4_0.test.ts b/contracts/test/v0.8/automation/UpkeepTranscoder4_0.test.ts new file mode 100644 index 00000000..97005489 --- /dev/null +++ b/contracts/test/v0.8/automation/UpkeepTranscoder4_0.test.ts @@ -0,0 +1,654 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { UpkeepTranscoder4_0 as UpkeepTranscoder } from '../../../typechain/UpkeepTranscoder4_0' +import { KeeperRegistry2_0__factory as KeeperRegistry2_0Factory } from '../../../typechain/factories/KeeperRegistry2_0__factory' +import { LinkToken__factory as LinkTokenFactory } from '../../../typechain/factories/LinkToken__factory' +import { MockV3Aggregator__factory as MockV3AggregatorFactory } from '../../../typechain/factories/MockV3Aggregator__factory' +import { evmRevert } from '../../test-helpers/matchers' +import { BigNumber, Signer } from 'ethers' +import { getUsers, Personas } from '../../test-helpers/setup' +import { KeeperRegistryLogic2_0__factory as KeeperRegistryLogic20Factory } from '../../../typechain/factories/KeeperRegistryLogic2_0__factory' +import { KeeperRegistry1_3__factory as KeeperRegistry1_3Factory } from '../../../typechain/factories/KeeperRegistry1_3__factory' +import { KeeperRegistryLogic1_3__factory as KeeperRegistryLogicFactory } from '../../../typechain/factories/KeeperRegistryLogic1_3__factory' +import { UpkeepTranscoder4_0__factory as UpkeepTranscoderFactory } from '../../../typechain/factories/UpkeepTranscoder4_0__factory' +import { toWei } from '../../test-helpers/helpers' +import { loadFixture } from '@nomicfoundation/hardhat-network-helpers' +import { + IKeeperRegistryMaster, + KeeperRegistry1_2, + KeeperRegistry1_3, + KeeperRegistry2_0, + LinkToken, + MockV3Aggregator, + UpkeepMock, +} from '../../../typechain' +import { deployRegistry21 } from './helpers' + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +/*********************************** TRANSCODER v4.0 IS FROZEN ************************************/ + +// We are leaving the original tests enabled, however as automation v2.1 is still actively being deployed + +describe('UpkeepTranscoder v4.0 - Frozen [ @skip-coverage ]', () => { + it('has not changed', () => { + assert.equal( + ethers.utils.id(UpkeepTranscoderFactory.bytecode), + '0xf22c4701b0088e6e69c389a34a22041a69f00890a89246e3c2a6d38172222dae', + 'UpkeepTranscoder bytecode has changed', + ) + }) +}) + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +let transcoder: UpkeepTranscoder +let linkTokenFactory: LinkTokenFactory +let keeperRegistryFactory20: KeeperRegistry2_0Factory +let keeperRegistryFactory13: KeeperRegistry1_3Factory +let keeperRegistryLogicFactory20: KeeperRegistryLogic20Factory +let keeperRegistryLogicFactory13: KeeperRegistryLogicFactory +let linkToken: LinkToken +let registry12: KeeperRegistry1_2 +let registry13: KeeperRegistry1_3 +let registry20: KeeperRegistry2_0 +let registry21: IKeeperRegistryMaster +let gasPriceFeed: MockV3Aggregator +let linkEthFeed: MockV3Aggregator +let mock: UpkeepMock +let personas: Personas +let owner: Signer +let upkeepsV12: any[] +let upkeepsV13: any[] +let upkeepsV21: any[] +let admins: string[] +let admin0: Signer +let admin1: Signer +let id12: BigNumber +let id13: BigNumber +let id20: BigNumber +const executeGas = BigNumber.from('100000') +const paymentPremiumPPB = BigNumber.from('250000000') +const flatFeeMicroLink = BigNumber.from(0) +const blockCountPerTurn = BigNumber.from(3) +const randomBytes = '0x1234abcd' +const stalenessSeconds = BigNumber.from(43820) +const gasCeilingMultiplier = BigNumber.from(1) +const checkGasLimit = BigNumber.from(20000000) +const fallbackGasPrice = BigNumber.from(200) +const fallbackLinkPrice = BigNumber.from(200000000) +const maxPerformGas = BigNumber.from(5000000) +const minUpkeepSpend = BigNumber.from(0) +const maxCheckDataSize = BigNumber.from(1000) +const maxPerformDataSize = BigNumber.from(1000) +const mode = BigNumber.from(0) +const linkEth = BigNumber.from(300000000) +const gasWei = BigNumber.from(100) +const registryGasOverhead = BigNumber.from('80000') +const balance = 50000000000000 +const amountSpent = 200000000000000 +const { AddressZero } = ethers.constants +const target0 = '0xffffffffffffffffffffffffffffffffffffffff' +const target1 = '0xfffffffffffffffffffffffffffffffffffffffe' +const lastKeeper0 = '0x233a95ccebf3c9f934482c637c08b4015cdd6ddd' +const lastKeeper1 = '0x233a95ccebf3c9f934482c637c08b4015cdd6ddc' + +const f = 1 +const offchainVersion = 1 +const offchainBytes = '0x' +let keeperAddresses: string[] +let signerAddresses: string[] +let payees: string[] + +enum UpkeepFormat { + V12, + V13, + V20, + V21, + V30, // Does not exist +} +const idx = [123, 124] + +async function getUpkeepID(tx: any): Promise { + const receipt = await tx.wait() + return receipt.events[0].args.id +} + +const encodeConfig20 = (config: any) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds\ + ,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,\ + uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,\ + address registrar)', + ], + [config], + ) +} + +const encodeUpkeepV12 = (ids: number[], upkeeps: any[], checkDatas: any[]) => { + return ethers.utils.defaultAbiCoder.encode( + [ + 'uint256[]', + 'tuple(uint96,address,uint32,uint64,address,uint96,address)[]', + 'bytes[]', + ], + [ids, upkeeps, checkDatas], + ) +} + +async function deployRegistry1_2(): Promise<[BigNumber, KeeperRegistry1_2]> { + const keeperRegistryFactory = + await ethers.getContractFactory('KeeperRegistry1_2') + const registry12 = await keeperRegistryFactory + .connect(owner) + .deploy(linkToken.address, linkEthFeed.address, gasPriceFeed.address, { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + }) + const tx = await registry12 + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin0.getAddress(), + randomBytes, + ) + const id = await getUpkeepID(tx) + return [id, registry12] +} + +async function deployRegistry1_3(): Promise<[BigNumber, KeeperRegistry1_3]> { + keeperRegistryFactory13 = await ethers.getContractFactory('KeeperRegistry1_3') + keeperRegistryLogicFactory13 = await ethers.getContractFactory( + 'KeeperRegistryLogic1_3', + ) + + const registryLogic13 = await keeperRegistryLogicFactory13 + .connect(owner) + .deploy( + 0, + registryGasOverhead, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + blockCountPerTurn, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + const registry13 = await keeperRegistryFactory13 + .connect(owner) + .deploy(registryLogic13.address, config) + + const tx = await registry13 + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin0.getAddress(), + randomBytes, + ) + const id = await getUpkeepID(tx) + + return [id, registry13] +} + +async function deployRegistry2_0(): Promise<[BigNumber, KeeperRegistry2_0]> { + keeperRegistryFactory20 = await ethers.getContractFactory('KeeperRegistry2_0') + keeperRegistryLogicFactory20 = await ethers.getContractFactory( + 'KeeperRegistryLogic2_0', + ) + + const config = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: transcoder.address, + registrar: ethers.constants.AddressZero, + } + + const registryLogic = await keeperRegistryLogicFactory20 + .connect(owner) + .deploy(mode, linkToken.address, linkEthFeed.address, gasPriceFeed.address) + + const registry20 = await keeperRegistryFactory20 + .connect(owner) + .deploy(registryLogic.address) + + await registry20 + .connect(owner) + .setConfig( + signerAddresses, + keeperAddresses, + f, + encodeConfig20(config), + offchainVersion, + offchainBytes, + ) + await registry20.connect(owner).setPayees(payees) + + const tx = await registry20 + .connect(owner) + .registerUpkeep( + mock.address, + executeGas, + await admin0.getAddress(), + randomBytes, + randomBytes, + ) + const id = await getUpkeepID(tx) + + return [id, registry20] +} + +async function deployRegistry2_1() { + const registry = await deployRegistry21( + owner, + mode, + linkToken.address, + linkEthFeed.address, + gasPriceFeed.address, + ) + + const onchainConfig = { + paymentPremiumPPB, + flatFeeMicroLink, + checkGasLimit, + stalenessSeconds, + gasCeilingMultiplier, + minUpkeepSpend, + maxCheckDataSize, + maxPerformDataSize, + maxRevertDataSize: 1000, + maxPerformGas, + fallbackGasPrice, + fallbackLinkPrice, + transcoder: ethers.constants.AddressZero, + registrars: [], + upkeepPrivilegeManager: await owner.getAddress(), + } + + await registry + .connect(owner) + .setConfigTypeSafe( + signerAddresses, + keeperAddresses, + f, + onchainConfig, + offchainVersion, + offchainBytes, + ) + + return registry +} + +const setup = async () => { + personas = (await getUsers()).personas + owner = personas.Norbert + admin0 = personas.Neil + admin1 = personas.Nick + admins = [ + (await admin0.getAddress()).toLowerCase(), + (await admin1.getAddress()).toLowerCase(), + ] + + const upkeepTranscoderFactory = await ethers.getContractFactory( + 'UpkeepTranscoder4_0', + ) + transcoder = await upkeepTranscoderFactory.connect(owner).deploy() + + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) + linkToken = await linkTokenFactory.connect(owner).deploy() + // need full path because there are two contracts with name MockV3Aggregator + const mockV3AggregatorFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + )) as unknown as MockV3AggregatorFactory + + gasPriceFeed = await mockV3AggregatorFactory.connect(owner).deploy(0, gasWei) + linkEthFeed = await mockV3AggregatorFactory.connect(owner).deploy(9, linkEth) + + const upkeepMockFactory = await ethers.getContractFactory('UpkeepMock') + mock = await upkeepMockFactory.deploy() + + const keeper1 = personas.Carol + const keeper2 = personas.Eddy + const keeper3 = personas.Nancy + const keeper4 = personas.Norbert + const keeper5 = personas.Nick + const payee1 = personas.Nelly + const payee2 = personas.Norbert + const payee3 = personas.Nick + const payee4 = personas.Eddy + const payee5 = personas.Carol + // signers + const signer1 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000001', + ) + const signer2 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000002', + ) + const signer3 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000003', + ) + const signer4 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000004', + ) + const signer5 = new ethers.Wallet( + '0x7777777000000000000000000000000000000000000000000000000000000005', + ) + + keeperAddresses = [ + await keeper1.getAddress(), + await keeper2.getAddress(), + await keeper3.getAddress(), + await keeper4.getAddress(), + await keeper5.getAddress(), + ] + + payees = [ + await payee1.getAddress(), + await payee2.getAddress(), + await payee3.getAddress(), + await payee4.getAddress(), + await payee5.getAddress(), + ] + const signers = [signer1, signer2, signer3, signer4, signer5] + + signerAddresses = signers.map((signer) => signer.address) + ;[id12, registry12] = await deployRegistry1_2() + ;[id13, registry13] = await deployRegistry1_3() + ;[id20, registry20] = await deployRegistry2_0() + registry21 = await deployRegistry2_1() + + upkeepsV12 = [ + [ + balance, + lastKeeper0, + executeGas, + 2 ** 32, + target0, + amountSpent, + await admin0.getAddress(), + ], + [ + balance, + lastKeeper1, + executeGas, + 2 ** 32, + target1, + amountSpent, + await admin1.getAddress(), + ], + ] + + upkeepsV13 = [ + [ + balance, + lastKeeper0, + amountSpent, + await admin0.getAddress(), + executeGas, + 2 ** 32 - 1, + target0, + false, + ], + [ + balance, + lastKeeper1, + amountSpent, + await admin1.getAddress(), + executeGas, + 2 ** 32 - 1, + target1, + false, + ], + ] + + upkeepsV21 = [ + [ + false, + executeGas, + 2 ** 32 - 1, + AddressZero, // forwarder will always be zero + amountSpent, + balance, + 0, + target0, + ], + [ + false, + executeGas, + 2 ** 32 - 1, + AddressZero, // forwarder will always be zero + amountSpent, + balance, + 0, + target1, + ], + ] +} + +describe('UpkeepTranscoder4_0', () => { + beforeEach(async () => { + await loadFixture(setup) + }) + + describe('#typeAndVersion', () => { + it('uses the correct type and version', async () => { + const typeAndVersion = await transcoder.typeAndVersion() + assert.equal(typeAndVersion, 'UpkeepTranscoder 4.0.0') + }) + }) + + describe('#transcodeUpkeeps', () => { + const encodedData = '0xabcd' + + it('reverts if the from type is not v1.2, v1.3, v2.0, or v2.1', async () => { + await evmRevert( + transcoder.transcodeUpkeeps( + UpkeepFormat.V30, + UpkeepFormat.V12, + encodedData, + ), + ) + }) + + context('when from version is correct', () => { + // note this is a bugfix - the "to" version should be accounted for in + // future versions of the transcoder + it('transcodes to v2.1, regardless of toVersion value', async () => { + const data1 = await transcoder.transcodeUpkeeps( + UpkeepFormat.V12, + UpkeepFormat.V12, + encodeUpkeepV12(idx, upkeepsV12, ['0xabcd', '0xffff']), + ) + const data2 = await transcoder.transcodeUpkeeps( + UpkeepFormat.V12, + UpkeepFormat.V13, + encodeUpkeepV12(idx, upkeepsV12, ['0xabcd', '0xffff']), + ) + const data3 = await transcoder.transcodeUpkeeps( + UpkeepFormat.V12, + 100, + encodeUpkeepV12(idx, upkeepsV12, ['0xabcd', '0xffff']), + ) + assert.equal(data1, data2) + assert.equal(data1, data3) + }) + + it('migrates upkeeps from 1.2 registry to 2.1', async () => { + await linkToken + .connect(owner) + .approve(registry12.address, toWei('1000')) + await registry12.connect(owner).addFunds(id12, toWei('1000')) + + await registry12.setPeerRegistryMigrationPermission( + registry21.address, + 1, + ) + await registry21.setPeerRegistryMigrationPermission( + registry12.address, + 2, + ) + + expect((await registry12.getUpkeep(id12)).balance).to.equal( + toWei('1000'), + ) + expect((await registry12.getUpkeep(id12)).checkData).to.equal( + randomBytes, + ) + expect((await registry12.getState()).state.numUpkeeps).to.equal(1) + + await registry12 + .connect(admin0) + .migrateUpkeeps([id12], registry21.address) + + expect((await registry12.getState()).state.numUpkeeps).to.equal(0) + expect((await registry21.getState()).state.numUpkeeps).to.equal(1) + expect((await registry12.getUpkeep(id12)).balance).to.equal(0) + expect((await registry12.getUpkeep(id12)).checkData).to.equal('0x') + expect((await registry21.getUpkeep(id12)).balance).to.equal( + toWei('1000'), + ) + expect( + (await registry21.getState()).state.expectedLinkBalance, + ).to.equal(toWei('1000')) + expect(await linkToken.balanceOf(registry21.address)).to.equal( + toWei('1000'), + ) + expect((await registry21.getUpkeep(id12)).checkData).to.equal( + randomBytes, + ) + expect((await registry21.getUpkeep(id12)).offchainConfig).to.equal('0x') + expect(await registry21.getUpkeepTriggerConfig(id12)).to.equal('0x') + }) + + it('migrates upkeeps from 1.3 registry to 2.1', async () => { + await linkToken + .connect(owner) + .approve(registry13.address, toWei('1000')) + await registry13.connect(owner).addFunds(id13, toWei('1000')) + + await registry13.setPeerRegistryMigrationPermission( + registry21.address, + 1, + ) + await registry21.setPeerRegistryMigrationPermission( + registry13.address, + 2, + ) + + expect((await registry13.getUpkeep(id13)).balance).to.equal( + toWei('1000'), + ) + expect((await registry13.getUpkeep(id13)).checkData).to.equal( + randomBytes, + ) + expect((await registry13.getState()).state.numUpkeeps).to.equal(1) + + await registry13 + .connect(admin0) + .migrateUpkeeps([id13], registry21.address) + + expect((await registry13.getState()).state.numUpkeeps).to.equal(0) + expect((await registry21.getState()).state.numUpkeeps).to.equal(1) + expect((await registry13.getUpkeep(id13)).balance).to.equal(0) + expect((await registry13.getUpkeep(id13)).checkData).to.equal('0x') + expect((await registry21.getUpkeep(id13)).balance).to.equal( + toWei('1000'), + ) + expect( + (await registry21.getState()).state.expectedLinkBalance, + ).to.equal(toWei('1000')) + expect(await linkToken.balanceOf(registry21.address)).to.equal( + toWei('1000'), + ) + expect((await registry21.getUpkeep(id13)).checkData).to.equal( + randomBytes, + ) + expect((await registry21.getUpkeep(id13)).offchainConfig).to.equal('0x') + expect(await registry21.getUpkeepTriggerConfig(id13)).to.equal('0x') + }) + + it('migrates upkeeps from 2.0 registry to 2.1', async () => { + await linkToken + .connect(owner) + .approve(registry20.address, toWei('1000')) + await registry20.connect(owner).addFunds(id20, toWei('1000')) + + await registry20.setPeerRegistryMigrationPermission( + registry21.address, + 1, + ) + await registry21.setPeerRegistryMigrationPermission( + registry20.address, + 2, + ) + + expect((await registry20.getUpkeep(id20)).balance).to.equal( + toWei('1000'), + ) + expect((await registry20.getUpkeep(id20)).checkData).to.equal( + randomBytes, + ) + expect((await registry20.getState()).state.numUpkeeps).to.equal(1) + + await registry20 + .connect(admin0) + .migrateUpkeeps([id20], registry21.address) + + expect((await registry20.getState()).state.numUpkeeps).to.equal(0) + expect((await registry21.getState()).state.numUpkeeps).to.equal(1) + expect((await registry20.getUpkeep(id20)).balance).to.equal(0) + expect((await registry20.getUpkeep(id20)).checkData).to.equal('0x') + expect((await registry21.getUpkeep(id20)).balance).to.equal( + toWei('1000'), + ) + expect( + (await registry21.getState()).state.expectedLinkBalance, + ).to.equal(toWei('1000')) + expect(await linkToken.balanceOf(registry21.address)).to.equal( + toWei('1000'), + ) + expect((await registry21.getUpkeep(id20)).checkData).to.equal( + randomBytes, + ) + expect(await registry21.getUpkeepTriggerConfig(id20)).to.equal('0x') + }) + }) + }) +}) diff --git a/contracts/test/v0.8/automation/helpers.ts b/contracts/test/v0.8/automation/helpers.ts new file mode 100644 index 00000000..b3b4f0ef --- /dev/null +++ b/contracts/test/v0.8/automation/helpers.ts @@ -0,0 +1,70 @@ +import { Signer } from 'ethers' +import { ethers } from 'hardhat' +import { KeeperRegistryLogicB2_1__factory as KeeperRegistryLogicBFactory } from '../../../typechain/factories/KeeperRegistryLogicB2_1__factory' +import { IKeeperRegistryMaster as IKeeperRegistry } from '../../../typechain/IKeeperRegistryMaster' +import { IKeeperRegistryMaster__factory as IKeeperRegistryMasterFactory } from '../../../typechain/factories/IKeeperRegistryMaster__factory' +import { AutomationRegistryLogicB2_2__factory as AutomationRegistryLogicBFactory } from '../../../typechain/factories/AutomationRegistryLogicB2_2__factory' +import { IAutomationRegistryMaster as IAutomationRegistry } from '../../../typechain/IAutomationRegistryMaster' +import { IAutomationRegistryMaster__factory as IAutomationRegistryMasterFactory } from '../../../typechain/factories/IAutomationRegistryMaster__factory' + +export const deployRegistry21 = async ( + from: Signer, + mode: Parameters[0], + link: Parameters[1], + linkNative: Parameters[2], + fastgas: Parameters[3], +): Promise => { + const logicBFactory = await ethers.getContractFactory( + 'KeeperRegistryLogicB2_1', + ) + const logicAFactory = await ethers.getContractFactory( + 'KeeperRegistryLogicA2_1', + ) + const registryFactory = await ethers.getContractFactory('KeeperRegistry2_1') + const forwarderLogicFactory = await ethers.getContractFactory( + 'AutomationForwarderLogic', + ) + const forwarderLogic = await forwarderLogicFactory.connect(from).deploy() + const logicB = await logicBFactory + .connect(from) + .deploy(mode, link, linkNative, fastgas, forwarderLogic.address) + const logicA = await logicAFactory.connect(from).deploy(logicB.address) + const master = await registryFactory.connect(from).deploy(logicA.address) + return IKeeperRegistryMasterFactory.connect(master.address, from) +} + +export const deployRegistry22 = async ( + from: Signer, + link: Parameters[0], + linkNative: Parameters[1], + fastgas: Parameters[2], + allowedReadOnlyAddress: Parameters< + AutomationRegistryLogicBFactory['deploy'] + >[3], +): Promise => { + const logicBFactory = await ethers.getContractFactory( + 'AutomationRegistryLogicB2_2', + ) + const logicAFactory = await ethers.getContractFactory( + 'AutomationRegistryLogicA2_2', + ) + const registryFactory = await ethers.getContractFactory( + 'AutomationRegistry2_2', + ) + const forwarderLogicFactory = await ethers.getContractFactory( + 'AutomationForwarderLogic', + ) + const forwarderLogic = await forwarderLogicFactory.connect(from).deploy() + const logicB = await logicBFactory + .connect(from) + .deploy( + link, + linkNative, + fastgas, + forwarderLogic.address, + allowedReadOnlyAddress, + ) + const logicA = await logicAFactory.connect(from).deploy(logicB.address) + const master = await registryFactory.connect(from).deploy(logicA.address) + return IAutomationRegistryMasterFactory.connect(master.address, from) +} diff --git a/contracts/test/v0.8/dev/ArbitrumCrossDomainForwarder.test.ts b/contracts/test/v0.8/dev/ArbitrumCrossDomainForwarder.test.ts new file mode 100644 index 00000000..6b6d8aba --- /dev/null +++ b/contracts/test/v0.8/dev/ArbitrumCrossDomainForwarder.test.ts @@ -0,0 +1,194 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { + impersonateAs, + publicAbi, + toArbitrumL2AliasAddress, +} from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let crossdomainMessenger: SignerWithAddress +let newL1OwnerAddress: string +let newOwnerCrossdomainMessenger: SignerWithAddress +let forwarderFactory: ContractFactory +let greeterFactory: ContractFactory +let forwarder: Contract +let greeter: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainForwarder.sol:ArbitrumCrossDomainForwarder', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) +}) + +describe('ArbitrumCrossDomainForwarder', () => { + beforeEach(async () => { + // governor config + crossdomainMessenger = await impersonateAs( + toArbitrumL2AliasAddress(l1OwnerAddress), + ) + await owner.sendTransaction({ + to: crossdomainMessenger.address, + value: ethers.utils.parseEther('1.0'), + }) + newOwnerCrossdomainMessenger = await impersonateAs( + toArbitrumL2AliasAddress(newL1OwnerAddress), + ) + await owner.sendTransaction({ + to: newOwnerCrossdomainMessenger.address, + value: ethers.utils.parseEther('1.0'), + }) + + forwarder = await forwarderFactory.deploy(l1OwnerAddress) + greeter = await greeterFactory.deploy(forwarder.address) + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(forwarder, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await forwarder.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await forwarder.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await forwarder.crossDomainMessenger() + assert.equal(response, crossdomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await forwarder.typeAndVersion() + assert.equal(response, 'ArbitrumCrossDomainForwarder 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + forwarder.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + await forwarder + .connect(crossdomainMessenger) + .forward(greeter.address, setGreetingData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + await expect( + forwarder + .connect(crossdomainMessenger) + .forward(greeter.address, setGreetingData), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + forwarder.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const forwarderOwner = await forwarder.owner() + assert.equal(forwarderOwner, owner.address) + + await expect( + forwarder.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + await expect( + forwarder + .connect(crossdomainMessenger) + .transferL1Ownership(newL1OwnerAddress), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await forwarder.l1Owner() + await expect( + forwarder + .connect(crossdomainMessenger) + .transferL1Ownership(ethers.constants.AddressZero), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + await expect( + forwarder.connect(crossdomainMessenger).acceptL1Ownership(), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + await forwarder + .connect(crossdomainMessenger) + .transferL1Ownership(newL1OwnerAddress) + await expect( + forwarder.connect(newOwnerCrossdomainMessenger).acceptL1Ownership(), + ) + .to.emit(forwarder, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await forwarder.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ArbitrumCrossDomainGovernor.test.ts b/contracts/test/v0.8/dev/ArbitrumCrossDomainGovernor.test.ts new file mode 100644 index 00000000..1275cc6f --- /dev/null +++ b/contracts/test/v0.8/dev/ArbitrumCrossDomainGovernor.test.ts @@ -0,0 +1,365 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import etherslib, { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { + impersonateAs, + publicAbi, + stripHexPrefix, + toArbitrumL2AliasAddress, +} from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let crossdomainMessenger: SignerWithAddress +let newL1OwnerAddress: string +let newOwnerCrossdomainMessenger: SignerWithAddress +let governorFactory: ContractFactory +let greeterFactory: ContractFactory +let multisendFactory: ContractFactory +let governor: Contract +let greeter: Contract +let multisend: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + governorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/arbitrum/ArbitrumCrossDomainGovernor.sol:ArbitrumCrossDomainGovernor', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) + multisendFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MultiSend.sol:MultiSend', + owner, + ) +}) + +describe('ArbitrumCrossDomainGovernor', () => { + beforeEach(async () => { + // governor config + crossdomainMessenger = await impersonateAs( + toArbitrumL2AliasAddress(l1OwnerAddress), + ) + await owner.sendTransaction({ + to: crossdomainMessenger.address, + value: ethers.utils.parseEther('1.0'), + }) + newOwnerCrossdomainMessenger = await impersonateAs( + toArbitrumL2AliasAddress(newL1OwnerAddress), + ) + await owner.sendTransaction({ + to: newOwnerCrossdomainMessenger.address, + value: ethers.utils.parseEther('1.0'), + }) + + governor = await governorFactory.deploy(l1OwnerAddress) + greeter = await greeterFactory.deploy(governor.address) + multisend = await multisendFactory.deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(governor, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'forwardDelegate', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await governor.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await governor.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await governor.crossDomainMessenger() + assert.equal(response, crossdomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await governor.typeAndVersion() + assert.equal(response, 'ArbitrumCrossDomainGovernor 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + await governor + .connect(crossdomainMessenger) + .forward(greeter.address, setGreetingData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should be callable by L2 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + await governor.connect(owner).forward(greeter.address, setGreetingData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + await expect( + governor + .connect(crossdomainMessenger) + .forward(greeter.address, setGreetingData), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#forwardDelegate', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forwardDelegate(multisend.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + await governor + .connect(crossdomainMessenger) + .forwardDelegate(multisend.address, multisendData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should be callable by L2 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + await governor + .connect(owner) + .forwardDelegate(multisend.address, multisendData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should revert batch when one call fails', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + '', // should revert + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + await expect( + governor + .connect(crossdomainMessenger) + .forwardDelegate(multisend.address, multisendData), + ).to.be.revertedWith('Governor delegatecall reverted') + + const greeting = await greeter.greeting() + assert.equal(greeting, '') // Unchanged + }) + + it('should bubble up revert when contract call reverts', async () => { + const triggerRevertData = + greeterFactory.interface.encodeFunctionData('triggerRevert') + await expect( + governor + .connect(crossdomainMessenger) + .forwardDelegate(greeter.address, triggerRevertData), + ).to.be.revertedWith('Greeter: revert triggered') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + governor.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const governorOwner = await governor.owner() + assert.equal(governorOwner, owner.address) + + await expect( + governor.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + await expect( + governor + .connect(crossdomainMessenger) + .transferL1Ownership(newL1OwnerAddress), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await governor.l1Owner() + await expect( + governor + .connect(crossdomainMessenger) + .transferL1Ownership(ethers.constants.AddressZero), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + await expect( + governor.connect(crossdomainMessenger).acceptL1Ownership(), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + await governor + .connect(crossdomainMessenger) + .transferL1Ownership(newL1OwnerAddress) + await expect( + governor.connect(newOwnerCrossdomainMessenger).acceptL1Ownership(), + ) + .to.emit(governor, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await governor.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) + +// Multisend contract helpers + +/** + * Encodes an underlying transaction for the Multisend contract + * + * @param operation 0 for CALL, 1 for DELEGATECALL + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeTxData( + operation: number, + to: string, + value: number, + data: string, +): string { + let dataBuffer = Buffer.from(stripHexPrefix(data), 'hex') + const types = ['uint8', 'address', 'uint256', 'uint256', 'bytes'] + const values = [operation, to, value, dataBuffer.length, dataBuffer] + let encoded = ethers.utils.solidityPack(types, values) + return stripHexPrefix(encoded) +} + +/** + * Encodes a Multisend call + * + * @param MultisendInterface Ethers Interface object of the Multisend contract + * @param transactions one or more transactions to include in the Multisend call + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeMultisendData( + MultisendInterface: etherslib.utils.Interface, + transactions: { to: string; value: number; data: string }[], +): string { + let nestedTransactionData = '0x' + for (let transaction of transactions) { + nestedTransactionData += encodeTxData( + 0, + transaction.to, + transaction.value, + transaction.data, + ) + } + const encodedMultisendFnData = MultisendInterface.encodeFunctionData( + 'multiSend', + [nestedTransactionData], + ) + return encodedMultisendFnData +} diff --git a/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts b/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts new file mode 100644 index 00000000..4d9ddefd --- /dev/null +++ b/contracts/test/v0.8/dev/ArbitrumSequencerUptimeFeed.test.ts @@ -0,0 +1,417 @@ +import { ethers, network } from 'hardhat' +import { BigNumber, Contract } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' + +describe('ArbitrumSequencerUptimeFeed', () => { + let flags: Contract + let arbitrumSequencerUptimeFeed: Contract + let accessController: Contract + let uptimeFeedConsumer: Contract + let deployer: SignerWithAddress + let l1Owner: SignerWithAddress + let l2Messenger: SignerWithAddress + const gasUsedDeviation = 100 + + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + l1Owner = accounts[1] + const dummy = accounts[2] + const l2MessengerAddress = ethers.utils.getAddress( + BigNumber.from(l1Owner.address) + .add('0x1111000000000000000000000000000000001111') + .toHexString(), + ) + // Pretend we're on L2 + await network.provider.request({ + method: 'hardhat_impersonateAccount', + params: [l2MessengerAddress], + }) + l2Messenger = await ethers.getSigner(l2MessengerAddress) + // Credit the L2 messenger with some ETH + await dummy.sendTransaction({ + to: l2Messenger.address, + value: (await dummy.getBalance()).sub(ethers.utils.parseEther('0.1')), + }) + }) + + beforeEach(async () => { + const accessControllerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/SimpleWriteAccessController.sol:SimpleWriteAccessController', + deployer, + ) + accessController = await accessControllerFactory.deploy() + + const flagsHistoryFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/Flags.sol:Flags', + deployer, + ) + flags = await flagsHistoryFactory.deploy( + accessController.address, + accessController.address, + ) + await accessController.addAccess(flags.address) + + const arbitrumSequencerStatusRecorderFactory = + await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol:ArbitrumSequencerUptimeFeed', + deployer, + ) + arbitrumSequencerUptimeFeed = + await arbitrumSequencerStatusRecorderFactory.deploy( + flags.address, + l1Owner.address, + ) + // Required for ArbitrumSequencerUptimeFeed to raise/lower flags + await accessController.addAccess(arbitrumSequencerUptimeFeed.address) + // Required for ArbitrumSequencerUptimeFeed to read flags + await flags.addAccess(arbitrumSequencerUptimeFeed.address) + + // Deployer requires access to invoke initialize + await accessController.addAccess(deployer.address) + // Once ArbitrumSequencerUptimeFeed has access, we can initialise the 0th aggregator round + const initTx = await arbitrumSequencerUptimeFeed + .connect(deployer) + .initialize() + await expect(initTx).to.emit(arbitrumSequencerUptimeFeed, 'Initialized') + + // Mock consumer + const statusFeedConsumerFactory = await ethers.getContractFactory( + 'src/v0.8/tests/FeedConsumer.sol:FeedConsumer', + deployer, + ) + uptimeFeedConsumer = await statusFeedConsumerFactory.deploy( + arbitrumSequencerUptimeFeed.address, + ) + }) + + describe('constants', () => { + it('should have the correct value for FLAG_L2_SEQ_OFFLINE', async () => { + const flag: string = + await arbitrumSequencerUptimeFeed.FLAG_L2_SEQ_OFFLINE() + expect(flag.toLowerCase()).to.equal( + '0xa438451d6458044c3c8cd2f6f31c91ac882a6d91', + ) + }) + }) + + describe('#updateStatus', () => { + it(`should update status when status has changed and incoming timestamp is newer than the latest`, async () => { + let timestamp = await arbitrumSequencerUptimeFeed.latestTimestamp() + let tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, same status, newer timestamp, should ignore + tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp.add(1000)) + await expect(tx).not.to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + await expect(tx).to.emit(arbitrumSequencerUptimeFeed, 'UpdateIgnored') + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal('1') + expect(await arbitrumSequencerUptimeFeed.latestTimestamp()).to.equal( + timestamp, + ) + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp.add(2000) + tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(0) + expect(await arbitrumSequencerUptimeFeed.latestTimestamp()).to.equal( + timestamp, + ) + }) + + it(`should update status when status has changed and incoming timestamp is the same as latest`, async () => { + const timestamp = await arbitrumSequencerUptimeFeed.latestTimestamp() + let tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, same status, same timestamp, should ignore + tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx).not.to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + await expect(tx).to.emit(arbitrumSequencerUptimeFeed, 'UpdateIgnored') + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal('1') + expect(await arbitrumSequencerUptimeFeed.latestTimestamp()).to.equal( + timestamp, + ) + + // Submit another status update, different status, same timestamp should update + tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(0) + expect(await arbitrumSequencerUptimeFeed.latestTimestamp()).to.equal( + timestamp, + ) + }) + + it('should ignore out-of-order updates', async () => { + const timestamp = ( + await arbitrumSequencerUptimeFeed.latestTimestamp() + ).add(10_000) + // Update status + let tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(1) + + // Update with different status, but stale timestamp, should be ignored + const staleTimestamp = timestamp.sub(1000) + tx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(false, staleTimestamp) + await expect(tx) + .to.not.emit(arbitrumSequencerUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + await expect(tx).to.emit(arbitrumSequencerUptimeFeed, 'UpdateIgnored') + }) + }) + + describe('AggregatorV3Interface', () => { + it('should return valid answer from getRoundData and latestRoundData', async () => { + let [roundId, answer, startedAt, updatedAt, answeredInRound] = + await arbitrumSequencerUptimeFeed.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(updatedAt) // startedAt = updatedAt = timestamp + + // Submit status update with different status and newer timestamp, should update + const timestamp = (startedAt as BigNumber).add(1000) + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await arbitrumSequencerUptimeFeed.getRoundData(2) + expect(roundId).to.equal(2) + expect(answer).to.equal(1) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(timestamp) + expect(updatedAt).to.equal(startedAt) + + // Check that last round is still returning the correct data + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await arbitrumSequencerUptimeFeed.getRoundData(1) + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(updatedAt) + + // Assert latestRoundData corresponds to latest round id + expect(await arbitrumSequencerUptimeFeed.getRoundData(2)).to.deep.equal( + await arbitrumSequencerUptimeFeed.latestRoundData(), + ) + }) + + it('should return 0 from #getRoundData when round does not yet exist (future roundId)', async () => { + const [roundId, answer, startedAt, updatedAt, answeredInRound] = + await arbitrumSequencerUptimeFeed.getRoundData(2) + expect(roundId).to.equal(2) + expect(answer).to.equal(0) + expect(startedAt).to.equal(0) + expect(updatedAt).to.equal(0) + expect(answeredInRound).to.equal(2) + }) + }) + + describe('Protect reads on AggregatorV2V3Interface functions', () => { + it('should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted', async () => { + // Sanity - consumer is not whitelisted + expect(await arbitrumSequencerUptimeFeed.checkEnabled()).to.be.true + expect( + await arbitrumSequencerUptimeFeed.hasAccess( + uptimeFeedConsumer.address, + '0x00', + ), + ).to.be.false + + // Assert reads are not possible from consuming contract + await expect(uptimeFeedConsumer.latestAnswer()).to.be.revertedWith( + 'No access', + ) + await expect(uptimeFeedConsumer.latestRoundData()).to.be.revertedWith( + 'No access', + ) + }) + + it('should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted', async () => { + // Whitelist consumer + await arbitrumSequencerUptimeFeed.addAccess(uptimeFeedConsumer.address) + // Sanity - consumer is whitelisted + expect(await arbitrumSequencerUptimeFeed.checkEnabled()).to.be.true + expect( + await arbitrumSequencerUptimeFeed.hasAccess( + uptimeFeedConsumer.address, + '0x00', + ), + ).to.be.true + + // Assert reads are possible from consuming contract + expect(await uptimeFeedConsumer.latestAnswer()).to.be.equal('0') + const [roundId, answer] = await uptimeFeedConsumer.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + }) + }) + + describe('Gas costs', () => { + it('should consume a known amount of gas for updates @skip-coverage', async () => { + // Sanity - start at flag = 0 (`false`) + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(0) + let timestamp = await arbitrumSequencerUptimeFeed.latestTimestamp() + + // Gas for no update + timestamp = timestamp.add(1000) + const _noUpdateTx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + const noUpdateTx = await _noUpdateTx.wait(1) + // Assert no update + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(0) + expect(noUpdateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28300, + gasUsedDeviation, + ) + + // Gas for update + timestamp = timestamp.add(1000) + const _updateTx = await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + const updateTx = await _updateTx.wait(1) + // Assert update + expect(await arbitrumSequencerUptimeFeed.latestAnswer()).to.equal(1) + expect(updateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 93015, + gasUsedDeviation, + ) + }) + + describe('Aggregator interface', () => { + beforeEach(async () => { + const timestamp = ( + await arbitrumSequencerUptimeFeed.latestTimestamp() + ).add(1000) + // Initialise a round + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + }) + + it('should consume a known amount of gas for getRoundData(uint80) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.getRoundData(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 31157, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRoundData() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRoundData(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28523, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestAnswer() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestAnswer(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28329, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestTimestamp() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestTimestamp(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28229, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRound() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRound(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28245, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getAnswer(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.getAnswer(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30799, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getTimestamp(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await arbitrumSequencerUptimeFeed + .connect(l2Messenger) + .populateTransaction.getTimestamp(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30753, + gasUsedDeviation, + ) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ArbitrumValidator.test.ts b/contracts/test/v0.8/dev/ArbitrumValidator.test.ts new file mode 100644 index 00000000..232eea95 --- /dev/null +++ b/contracts/test/v0.8/dev/ArbitrumValidator.test.ts @@ -0,0 +1,134 @@ +import { ethers } from 'hardhat' +import { BigNumber, BigNumberish, Contract, ContractFactory } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' +/// Pick ABIs from compilation +// @ts-ignore +import { abi as arbitrumSequencerStatusRecorderAbi } from '../../../artifacts/src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol/ArbitrumSequencerUptimeFeed.json' +// @ts-ignore +import { abi as arbitrumInboxAbi } from '../../../artifacts/src/v0.8/vendor/arb-bridge-eth/v0.8.0-custom/contracts/bridge/interfaces/IInbox.sol/IInbox.json' +// @ts-ignore +import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json' + +const truncateBigNumToAddress = (num: BigNumberish) => { + // Pad, then slice off '0x' prefix + const hexWithoutPrefix = BigNumber.from(num).toHexString().slice(2) + // Ethereum address is 20B -> 40 hex chars w/o 0x prefix + const truncated = hexWithoutPrefix + .split('') + .reverse() + .slice(0, 40) + .reverse() + .join('') + return '0x' + truncated +} + +describe('ArbitrumValidator', () => { + const MAX_GAS = BigNumber.from(1_000_000) + const GAS_PRICE_BID = BigNumber.from(1_000_000) + const BASE_FEE = BigNumber.from(14_000_000_000) + /** Fake L2 target */ + const L2_SEQ_STATUS_RECORDER_ADDRESS = + '0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b' + let arbitrumValidator: Contract + let accessController: Contract + let arbitrumSequencerStatusRecorderFactory: ContractFactory + let mockArbitrumInbox: Contract + let l1GasFeed: MockContract + let deployer: SignerWithAddress + let eoaValidator: SignerWithAddress + let arbitrumValidatorL2Address: string + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + eoaValidator = accounts[1] + }) + + beforeEach(async () => { + const accessControllerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/SimpleWriteAccessController.sol:SimpleWriteAccessController', + deployer, + ) + accessController = await accessControllerFactory.deploy() + + // Required for building the calldata + arbitrumSequencerStatusRecorderFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/arbitrum/ArbitrumSequencerUptimeFeed.sol:ArbitrumSequencerUptimeFeed', + deployer, + ) + l1GasFeed = await deployMockContract(deployer as any, aggregatorAbi) + await l1GasFeed.mock.latestRoundData.returns( + '73786976294838220258' /** roundId */, + '96800000000' /** answer */, + '163826896' /** startedAt */, + '1638268960' /** updatedAt */, + '73786976294838220258' /** answeredInRound */, + ) + // Arbitrum Inbox contract on L1 + const mockArbitrumInboxFactory = await ethers.getContractFactory( + 'src/v0.8/tests/MockArbitrumInbox.sol:MockArbitrumInbox', + ) + mockArbitrumInbox = await mockArbitrumInboxFactory.deploy() + + // Contract under test + const arbitrumValidatorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/arbitrum/ArbitrumValidator.sol:ArbitrumValidator', + deployer, + ) + arbitrumValidator = await arbitrumValidatorFactory.deploy( + mockArbitrumInbox.address, + L2_SEQ_STATUS_RECORDER_ADDRESS, + accessController.address, + MAX_GAS /** L1 gas bid */, + GAS_PRICE_BID /** L2 gas bid */, + BASE_FEE, + l1GasFeed.address, + 0, + ) + // Transfer some ETH to the ArbitrumValidator contract + await deployer.sendTransaction({ + to: arbitrumValidator.address, + value: ethers.utils.parseEther('1.0'), + }) + arbitrumValidatorL2Address = ethers.utils.getAddress( + truncateBigNumToAddress( + BigNumber.from(arbitrumValidator.address).add( + '0x1111000000000000000000000000000000001111', + ), + ), + ) + }) + + describe('#validate', () => { + it('post sequencer offline', async () => { + await arbitrumValidator.addAccess(eoaValidator.address) + + const now = Math.ceil(Date.now() / 1000) + 1000 + await ethers.provider.send('evm_setNextBlockTimestamp', [now]) + const arbitrumSequencerStatusRecorderCallData = + arbitrumSequencerStatusRecorderFactory.interface.encodeFunctionData( + 'updateStatus', + [true, now], + ) + await expect(arbitrumValidator.connect(eoaValidator).validate(0, 0, 1, 1)) + .to.emit( + mockArbitrumInbox, + 'RetryableTicketNoRefundAliasRewriteCreated', + ) + .withArgs( + L2_SEQ_STATUS_RECORDER_ADDRESS, + 0, + '25312000000000', + arbitrumValidatorL2Address, + arbitrumValidatorL2Address, + MAX_GAS, + GAS_PRICE_BID, + arbitrumSequencerStatusRecorderCallData, + ) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/BatchBlockhashStore.test.ts b/contracts/test/v0.8/dev/BatchBlockhashStore.test.ts new file mode 100644 index 00000000..6821184f --- /dev/null +++ b/contracts/test/v0.8/dev/BatchBlockhashStore.test.ts @@ -0,0 +1,324 @@ +import { assert, expect } from 'chai' +import { Contract, Signer } from 'ethers' +import { ethers } from 'hardhat' +import * as rlp from 'rlp' + +function range(size: number, startAt = 0) { + return [...Array(size).keys()].map((i) => i + startAt) +} + +describe('BatchBlockhashStore', () => { + let blockhashStore: Contract + let batchBHS: Contract + let owner: Signer + + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + + const bhFactory = await ethers.getContractFactory( + 'src/v0.6/BlockhashStore.sol:BlockhashStore', + accounts[0], + ) + + blockhashStore = await bhFactory.deploy() + + const batchBHSFactory = await ethers.getContractFactory( + 'src/v0.8/vrf/BatchBlockhashStore.sol:BatchBlockhashStore', + accounts[0], + ) + + batchBHS = await batchBHSFactory.deploy(blockhashStore.address) + + // Mine some blocks so that we have some blockhashes to store. + for (let i = 0; i < 10; i++) { + await ethers.provider.send('evm_mine', []) + } + }) + + describe('#store', () => { + it('stores batches of blocknumbers', async () => { + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + const bottomBlock = latestBlock - 5 + const numBlocks = 3 + await batchBHS.connect(owner).store(range(numBlocks, bottomBlock)) + + // Mine some blocks to confirm the store batch tx above. + for (let i = 0; i < 2; i++) { + await ethers.provider.send('evm_mine', []) + } + + // check the bhs if it was stored + for (let i = bottomBlock; i < bottomBlock + numBlocks; i++) { + const actualBh = await blockhashStore.connect(owner).getBlockhash(i) + const expectedBh = (await ethers.provider.getBlock(i)).hash + expect(expectedBh).to.equal(actualBh) + } + }) + + it('skips block numbers that are too far back', async () => { + // blockhash(n) fails if n is more than 256 blocks behind the current block in which + // the instruction is executing. + for (let i = 0; i < 256; i++) { + await ethers.provider.send('evm_mine', []) + } + + const gettableBlock = + (await ethers.provider.send('eth_blockNumber', [])) - 1 + + // Store 3 block numbers that are too far back, and one that is close enough. + await batchBHS.connect(owner).store([1, 2, 3, gettableBlock]) + + await ethers.provider.send('evm_mine', []) + + // Only block "250" should be stored + const actualBh = await blockhashStore + .connect(owner) + .getBlockhash(gettableBlock) + const expectedBh = (await ethers.provider.getBlock(gettableBlock)).hash + expect(expectedBh).to.equal(actualBh) + + // others were not stored + for (let i of [1, 2, 3]) { + expect( + blockhashStore.connect(owner).getBlockhash(i), + ).to.be.revertedWith('blockhash not found in store') + } + }) + }) + + describe('#getBlockhashes', () => { + it('fetches blockhashes of a batch of block numbers', async () => { + // Store a bunch of block hashes + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + const bottomBlock = latestBlock - 5 + const numBlocks = 3 + await batchBHS.connect(owner).store(range(numBlocks, bottomBlock)) + + // Mine some blocks to confirm the store batch tx above. + for (let i = 0; i < 2; i++) { + await ethers.provider.send('evm_mine', []) + } + + // fetch the blocks in a batch + const actualBlockhashes = await batchBHS + .connect(owner) + .getBlockhashes(range(numBlocks, bottomBlock)) + let expectedBlockhashes = [] + for (let i = bottomBlock; i < bottomBlock + numBlocks; i++) { + const block = await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + i.toString(16), + false, + ]) + expectedBlockhashes.push(block.hash) + } + assert.deepEqual(actualBlockhashes, expectedBlockhashes) + }) + + it('returns 0x0 for block numbers without an associated blockhash', async () => { + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + const bottomBlock = latestBlock - 5 + const numBlocks = 3 + const blockhashes = await batchBHS + .connect(owner) + .getBlockhashes(range(numBlocks, bottomBlock)) + const expected = [ + '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', + ] + assert.deepEqual(blockhashes, expected) + }) + }) + + describe('#storeVerifyHeader', () => { + it('stores batches of blocknumbers using storeVerifyHeader [ @skip-coverage ]', async () => { + // Store a single blockhash and go backwards from there using storeVerifyHeader + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + await batchBHS.connect(owner).store([latestBlock]) + await ethers.provider.send('evm_mine', []) + + const numBlocks = 3 + const startBlock = latestBlock - 1 + const blockNumbers = range( + numBlocks + 1, + startBlock - numBlocks, + ).reverse() + let blockHeaders = [] + let expectedBlockhashes = [] + for (let i of blockNumbers) { + const block = await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + (i + 1).toString(16), + false, + ]) + // eip 1559 header - switch to this if we upgrade hardhat + // and use post-london forks of ethereum. + const encodedHeader = rlp.encode([ + block.parentHash, + block.sha3Uncles, + ethers.utils.arrayify(block.miner), + block.stateRoot, + block.transactionsRoot, + block.receiptsRoot, + block.logsBloom, + block.difficulty == '0x0' ? '0x' : block.difficulty, + block.number, + block.gasLimit, + block.gasUsed == '0x0' ? '0x' : block.gasUsed, + block.timestamp, + block.extraData, + block.mixHash, + block.nonce, + block.baseFeePerGas, + ]) + // // pre-london block header serialization - kept for prosperity + // const encodedHeader = rlp.encode([ + // block.parentHash, + // block.sha3Uncles, + // ethers.utils.arrayify(block.miner), + // block.stateRoot, + // block.transactionsRoot, + // block.receiptsRoot, + // block.logsBloom, + // block.difficulty, + // block.number, + // block.gasLimit, + // block.gasUsed == '0x0' ? '0x' : block.gasUsed, + // block.timestamp, + // block.extraData, + // block.mixHash, + // block.nonce, + // ]) + blockHeaders.push('0x' + encodedHeader.toString('hex')) + expectedBlockhashes.push( + ( + await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + i.toString(16), + false, + ]) + ).hash, + ) + } + await batchBHS + .connect(owner) + .storeVerifyHeader(blockNumbers, blockHeaders) + + // fetch blocks that were just stored and assert correctness + const actualBlockhashes = await batchBHS + .connect(owner) + .getBlockhashes(blockNumbers) + + assert.deepEqual(actualBlockhashes, expectedBlockhashes) + }) + + describe('bad input', () => { + it('reverts on mismatched input array sizes', async () => { + // Store a single blockhash and go backwards from there using storeVerifyHeader + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + await batchBHS.connect(owner).store([latestBlock]) + + await ethers.provider.send('evm_mine', []) + + const numBlocks = 3 + const startBlock = latestBlock - 1 + const blockNumbers = range( + numBlocks + 1, + startBlock - numBlocks, + ).reverse() + let blockHeaders = [] + let expectedBlockhashes = [] + for (let i of blockNumbers) { + const block = await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + (i + 1).toString(16), + false, + ]) + const encodedHeader = rlp.encode([ + block.parentHash, + block.sha3Uncles, + ethers.utils.arrayify(block.miner), + block.stateRoot, + block.transactionsRoot, + block.receiptsRoot, + block.logsBloom, + block.difficulty == '0x0' ? '0x' : block.difficulty, + block.number, + block.gasLimit, + block.gasUsed == '0x0' ? '0x' : block.gasUsed, + block.timestamp, + block.extraData, + block.mixHash, + block.nonce, + block.baseFeePerGas, + ]) + blockHeaders.push('0x' + encodedHeader.toString('hex')) + expectedBlockhashes.push( + ( + await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + i.toString(16), + false, + ]) + ).hash, + ) + } + // remove last element to simulate different input array sizes + blockHeaders.pop() + expect( + batchBHS.connect(owner).storeVerifyHeader(blockNumbers, blockHeaders), + ).to.be.revertedWith('input array arg lengths mismatch') + }) + + it('reverts on bad block header input', async () => { + // Store a single blockhash and go backwards from there using storeVerifyHeader + const latestBlock = await ethers.provider.send('eth_blockNumber', []) + await batchBHS.connect(owner).store([latestBlock]) + + await ethers.provider.send('evm_mine', []) + + const numBlocks = 3 + const startBlock = latestBlock - 1 + const blockNumbers = range( + numBlocks + 1, + startBlock - numBlocks, + ).reverse() + let blockHeaders = [] + let expectedBlockhashes = [] + for (let i of blockNumbers) { + const block = await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + (i + 1).toString(16), + false, + ]) + const encodedHeader = rlp.encode([ + block.parentHash, + block.sha3Uncles, + ethers.utils.arrayify(block.miner), + block.stateRoot, + block.transactionsRoot, + block.receiptsRoot, + block.logsBloom, + block.difficulty == '0x0' ? '0x' : block.difficulty, + block.number, + block.gasLimit, + block.gasUsed, // incorrect: in cases where it's 0x0 it should be 0x instead. + block.timestamp, + block.extraData, + block.mixHash, + block.nonce, + block.baseFeePerGas, + ]) + blockHeaders.push('0x' + encodedHeader.toString('hex')) + expectedBlockhashes.push( + ( + await ethers.provider.send('eth_getBlockByNumber', [ + '0x' + i.toString(16), + false, + ]) + ).hash, + ) + } + expect( + batchBHS.connect(owner).storeVerifyHeader(blockNumbers, blockHeaders), + ).to.be.revertedWith('header has unknown blockhash') + }) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/CrossDomainOwnable.test.ts b/contracts/test/v0.8/dev/CrossDomainOwnable.test.ts new file mode 100644 index 00000000..7d9d58cf --- /dev/null +++ b/contracts/test/v0.8/dev/CrossDomainOwnable.test.ts @@ -0,0 +1,77 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let ownableFactory: ContractFactory +let ownable: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + l1OwnerAddress = owner.address + + // Contract factories + ownableFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/CrossDomainOwnable.sol:CrossDomainOwnable', + owner, + ) +}) + +describe('CrossDomainOwnable', () => { + beforeEach(async () => { + ownable = await ownableFactory.deploy(l1OwnerAddress) + }) + + describe('#constructor', () => { + it('should set the l1Owner correctly', async () => { + const response = await ownable.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + ownable.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Only callable by L1 owner') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await ownable.l1Owner() + await expect(ownable.transferL1Ownership(stranger.address)) + .to.emit(ownable, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, stranger.address) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await ownable.l1Owner() + await expect(ownable.transferL1Ownership(ethers.constants.AddressZero)) + .to.emit(ownable, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + await expect( + ownable.connect(stranger).acceptL1Ownership(), + ).to.be.revertedWith('Only callable by proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await ownable.l1Owner() + await ownable.transferL1Ownership(stranger.address) + await expect(ownable.connect(stranger).acceptL1Ownership()) + .to.emit(ownable, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, stranger.address) + + const updatedL1Owner = await ownable.l1Owner() + assert.equal(updatedL1Owner, stranger.address) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/KeeperRegistryCheckUpkeepGasUsageWrapper.test.ts b/contracts/test/v0.8/dev/KeeperRegistryCheckUpkeepGasUsageWrapper.test.ts new file mode 100644 index 00000000..9187487a --- /dev/null +++ b/contracts/test/v0.8/dev/KeeperRegistryCheckUpkeepGasUsageWrapper.test.ts @@ -0,0 +1,110 @@ +import { ethers } from 'hardhat' +import { BigNumber, Signer } from 'ethers' +import { assert } from 'chai' +import { KeeperRegistryCheckUpkeepGasUsageWrapper12 as GasWrapper } from '../../../typechain/KeeperRegistryCheckUpkeepGasUsageWrapper12' +import { KeeperRegistryCheckUpkeepGasUsageWrapper1_2__factory as GasWrapperFactory } from '../../../typechain/factories/KeeperRegistryCheckUpkeepGasUsageWrapper1_2__factory' +import { getUsers, Personas } from '../../test-helpers/setup' +import { + deployMockContract, + MockContract, +} from '@ethereum-waffle/mock-contract' +import { KeeperRegistry1_2__factory as KeeperRegistryFactory } from '../../../typechain/factories/KeeperRegistry1_2__factory' + +let personas: Personas +let owner: Signer +let caller: Signer +let nelly: Signer +let registryMockContract: MockContract +let gasWrapper: GasWrapper +let gasWrapperFactory: GasWrapperFactory + +const upkeepId = 123 + +describe('KeeperRegistryCheckUpkeepGasUsageWrapper1_2', () => { + before(async () => { + personas = (await getUsers()).personas + owner = personas.Default + caller = personas.Carol + nelly = personas.Nelly + + registryMockContract = await deployMockContract( + owner as any, + KeeperRegistryFactory.abi, + ) + // @ts-ignore bug in autogen file + gasWrapperFactory = await ethers.getContractFactory( + 'KeeperRegistryCheckUpkeepGasUsageWrapper1_2', + ) + gasWrapper = await gasWrapperFactory + .connect(owner) + .deploy(registryMockContract.address) + await gasWrapper.deployed() + }) + + describe('measureCheckGas()', () => { + it("returns gas used when registry's checkUpkeep executes successfully", async () => { + await registryMockContract.mock.checkUpkeep + .withArgs(upkeepId, await nelly.getAddress()) + .returns( + '0xabcd' /* performData */, + BigNumber.from(1000) /* maxLinkPayment */, + BigNumber.from(2000) /* gasLimit */, + BigNumber.from(3000) /* adjustedGasWei */, + BigNumber.from(4000) /* linkEth */, + ) + + const response = await gasWrapper + .connect(caller) + .callStatic.measureCheckGas( + BigNumber.from(upkeepId), + await nelly.getAddress(), + ) + + assert.isTrue(response[0], 'The checkUpkeepSuccess should be true') + assert.equal( + response[1], + '0xabcd', + 'The performData should be forwarded correctly', + ) + assert.isTrue( + response[2] > BigNumber.from(0), + 'The gasUsed value must be larger than 0', + ) + }) + + it("returns gas used when registry's checkUpkeep reverts", async () => { + await registryMockContract.mock.checkUpkeep + .withArgs(upkeepId, await nelly.getAddress()) + .revertsWithReason('Error') + + const response = await gasWrapper + .connect(caller) + .callStatic.measureCheckGas( + BigNumber.from(upkeepId), + await nelly.getAddress(), + ) + + assert.isFalse(response[0], 'The checkUpkeepSuccess should be false') + assert.equal( + response[1], + '0x', + 'The performData should be forwarded correctly', + ) + assert.isTrue( + response[2] > BigNumber.from(0), + 'The gasUsed value must be larger than 0', + ) + }) + }) + + describe('getKeeperRegistry()', () => { + it('returns the underlying keeper registry', async () => { + const registry = await gasWrapper.connect(caller).getKeeperRegistry() + assert.equal( + registry, + registryMockContract.address, + 'The underlying keeper registry is incorrect', + ) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/OptimismCrossDomainForwarder.test.ts b/contracts/test/v0.8/dev/OptimismCrossDomainForwarder.test.ts new file mode 100644 index 00000000..3b75b412 --- /dev/null +++ b/contracts/test/v0.8/dev/OptimismCrossDomainForwarder.test.ts @@ -0,0 +1,224 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { publicAbi } from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let newL1OwnerAddress: string +let forwarderFactory: ContractFactory +let greeterFactory: ContractFactory +let crossDomainMessengerFactory: ContractFactory +let crossDomainMessenger: Contract +let forwarder: Contract +let greeter: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + + // forwarder config + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/optimism/OptimismCrossDomainForwarder.sol:OptimismCrossDomainForwarder', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) + crossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MockOVMCrossDomainMessenger.sol:MockOVMCrossDomainMessenger', + ) +}) + +describe('OptimismCrossDomainForwarder', () => { + beforeEach(async () => { + crossDomainMessenger = + await crossDomainMessengerFactory.deploy(l1OwnerAddress) + forwarder = await forwarderFactory.deploy( + crossDomainMessenger.address, + l1OwnerAddress, + ) + greeter = await greeterFactory.deploy(forwarder.address) + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(forwarder, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await forwarder.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await forwarder.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await forwarder.crossDomainMessenger() + assert.equal(response, crossDomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await forwarder.typeAndVersion() + assert.equal(response, 'OptimismCrossDomainForwarder 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + forwarder.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardData, 0) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardData, 0), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + forwarder.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const forwarderOwner = await forwarder.owner() + assert.equal(forwarderOwner, owner.address) + + await expect( + forwarder.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardData, 0), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await forwarder.l1Owner() + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [ethers.constants.AddressZero], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardData, 0), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardData, 0), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + + // Transfer ownership + const forwardTransferData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + await crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardTransferData, 0) + + const forwardAcceptData = forwarderFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + // Simulate cross-chain message from another sender + await crossDomainMessenger._setMockMessageSender(newL1OwnerAddress) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(forwarder.address, forwardAcceptData, 0), + ) + .to.emit(forwarder, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await forwarder.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/OptimismCrossDomainGovernor.test.ts b/contracts/test/v0.8/dev/OptimismCrossDomainGovernor.test.ts new file mode 100644 index 00000000..9ea425bb --- /dev/null +++ b/contracts/test/v0.8/dev/OptimismCrossDomainGovernor.test.ts @@ -0,0 +1,409 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import etherslib, { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { publicAbi, stripHexPrefix } from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let newL1OwnerAddress: string +let governorFactory: ContractFactory +let greeterFactory: ContractFactory +let multisendFactory: ContractFactory +let crossDomainMessengerFactory: ContractFactory +let crossDomainMessenger: Contract +let governor: Contract +let greeter: Contract +let multisend: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + + // governor config + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + governorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/optimism/OptimismCrossDomainGovernor.sol:OptimismCrossDomainGovernor', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) + multisendFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MultiSend.sol:MultiSend', + owner, + ) + crossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MockOVMCrossDomainMessenger.sol:MockOVMCrossDomainMessenger', + ) +}) + +describe('OptimismCrossDomainGovernor', () => { + beforeEach(async () => { + crossDomainMessenger = + await crossDomainMessengerFactory.deploy(l1OwnerAddress) + governor = await governorFactory.deploy( + crossDomainMessenger.address, + l1OwnerAddress, + ) + greeter = await greeterFactory.deploy(governor.address) + multisend = await multisendFactory.deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(governor, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'forwardDelegate', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await governor.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await governor.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await governor.crossDomainMessenger() + assert.equal(response, crossDomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await governor.typeAndVersion() + assert.equal(response, 'OptimismCrossDomainGovernor 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should be callable by L2 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + await governor.connect(owner).forward(greeter.address, setGreetingData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#forwardDelegate', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forwardDelegate(multisend.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [multisend.address, multisendData], + ) + + await crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should be callable by L2 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + await governor + .connect(owner) + .forwardDelegate(multisend.address, multisendData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should revert batch when one call fails', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + '', // should revert + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [multisend.address, multisendData], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ).to.be.revertedWith('Governor delegatecall reverted') + + const greeting = await greeter.greeting() + assert.equal(greeting, '') // Unchanged + }) + + it('should bubble up revert when contract call reverts', async () => { + const triggerRevertData = + greeterFactory.interface.encodeFunctionData('triggerRevert') + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [greeter.address, triggerRevertData], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ).to.be.revertedWith('Greeter: revert triggered') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + governor.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const governorOwner = await governor.owner() + assert.equal(governorOwner, owner.address) + + await expect( + governor.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + const forwardData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await governor.l1Owner() + const forwardData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [ethers.constants.AddressZero], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + const forwardData = governorFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardData, 0), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + + // Transfer ownership + const forwardTransferData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + await crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardTransferData, 0) + + const forwardAcceptData = governorFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + // Simulate cross-chain message from another sender + await crossDomainMessenger._setMockMessageSender(newL1OwnerAddress) + + await expect( + crossDomainMessenger // Simulate cross-chain OVM message + .connect(stranger) + .sendMessage(governor.address, forwardAcceptData, 0), + ) + .to.emit(governor, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await governor.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) + +// Multisend contract helpers + +/** + * Encodes an underlying transaction for the Multisend contract + * + * @param operation 0 for CALL, 1 for DELEGATECALL + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeTxData( + operation: number, + to: string, + value: number, + data: string, +): string { + let dataBuffer = Buffer.from(stripHexPrefix(data), 'hex') + const types = ['uint8', 'address', 'uint256', 'uint256', 'bytes'] + const values = [operation, to, value, dataBuffer.length, dataBuffer] + let encoded = ethers.utils.solidityPack(types, values) + return stripHexPrefix(encoded) +} + +/** + * Encodes a Multisend call + * + * @param MultisendInterface Ethers Interface object of the Multisend contract + * @param transactions one or more transactions to include in the Multisend call + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeMultisendData( + MultisendInterface: etherslib.utils.Interface, + transactions: { to: string; value: number; data: string }[], +): string { + let nestedTransactionData = '0x' + for (let transaction of transactions) { + nestedTransactionData += encodeTxData( + 0, + transaction.to, + transaction.value, + transaction.data, + ) + } + const encodedMultisendFnData = MultisendInterface.encodeFunctionData( + 'multiSend', + [nestedTransactionData], + ) + return encodedMultisendFnData +} diff --git a/contracts/test/v0.8/dev/OptimismSequencerUptimeFeed.test.ts b/contracts/test/v0.8/dev/OptimismSequencerUptimeFeed.test.ts new file mode 100644 index 00000000..28565687 --- /dev/null +++ b/contracts/test/v0.8/dev/OptimismSequencerUptimeFeed.test.ts @@ -0,0 +1,428 @@ +import { ethers, network } from 'hardhat' +import { BigNumber, Contract } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' + +describe('OptimismSequencerUptimeFeed', () => { + let l2CrossDomainMessenger: Contract + let optimismUptimeFeed: Contract + let uptimeFeedConsumer: Contract + let deployer: SignerWithAddress + let l1Owner: SignerWithAddress + let l2Messenger: SignerWithAddress + let dummy: SignerWithAddress + const gasUsedDeviation = 100 + const initialStatus = 0 + + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + l1Owner = accounts[1] + dummy = accounts[3] + + const l2CrossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/tests/MockOptimismL2CrossDomainMessenger.sol:MockOptimismL2CrossDomainMessenger', + deployer, + ) + + l2CrossDomainMessenger = await l2CrossDomainMessengerFactory.deploy() + + // Pretend we're on L2 + await network.provider.request({ + method: 'hardhat_impersonateAccount', + params: [l2CrossDomainMessenger.address], + }) + l2Messenger = await ethers.getSigner(l2CrossDomainMessenger.address) + // Credit the L2 messenger with some ETH + await dummy.sendTransaction({ + to: l2Messenger.address, + value: ethers.utils.parseEther('10'), + }) + }) + + beforeEach(async () => { + const optimismSequencerStatusRecorderFactory = + await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol:OptimismSequencerUptimeFeed', + deployer, + ) + optimismUptimeFeed = await optimismSequencerStatusRecorderFactory.deploy( + l1Owner.address, + l2CrossDomainMessenger.address, + initialStatus, + ) + + // Set mock sender in mock L2 messenger contract + await l2CrossDomainMessenger.setSender(l1Owner.address) + + // Mock consumer + const statusFeedConsumerFactory = await ethers.getContractFactory( + 'src/v0.8/tests/FeedConsumer.sol:FeedConsumer', + deployer, + ) + uptimeFeedConsumer = await statusFeedConsumerFactory.deploy( + optimismUptimeFeed.address, + ) + }) + + describe('constructor', () => { + it('should have been deployed with the correct initial state', async () => { + const l1Sender = await optimismUptimeFeed.l1Sender() + expect(l1Sender).to.equal(l1Owner.address) + const { roundId, answer } = await optimismUptimeFeed.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(initialStatus) + }) + }) + + describe('#updateStatus', () => { + it('should revert if called by an address that is not the L2 Cross Domain Messenger', async () => { + let timestamp = await optimismUptimeFeed.latestTimestamp() + expect( + optimismUptimeFeed.connect(dummy).updateStatus(true, timestamp), + ).to.be.revertedWith('InvalidSender') + }) + + it('should revert if called by an address that is not the L2 Cross Domain Messenger and is not the L1 sender', async () => { + let timestamp = await optimismUptimeFeed.latestTimestamp() + await l2CrossDomainMessenger.setSender(dummy.address) + expect( + optimismUptimeFeed.connect(dummy).updateStatus(true, timestamp), + ).to.be.revertedWith('InvalidSender') + }) + + it(`should update status when status has not changed and incoming timestamp is the same as latest`, async () => { + const timestamp = await optimismUptimeFeed.latestTimestamp() + let tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + + const latestRoundBeforeUpdate = await optimismUptimeFeed.latestRoundData() + + tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp.add(200)) + + // Submit another status update with the same status + const latestBlock = await ethers.provider.getBlock('latest') + + await expect(tx) + .to.emit(optimismUptimeFeed, 'RoundUpdated') + .withArgs(1, latestBlock.timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + expect(await optimismUptimeFeed.latestTimestamp()).to.equal(timestamp) + + // Verify that latest round has been properly updated + const latestRoundDataAfterUpdate = + await optimismUptimeFeed.latestRoundData() + expect(latestRoundDataAfterUpdate.roundId).to.equal( + latestRoundBeforeUpdate.roundId, + ) + expect(latestRoundDataAfterUpdate.answer).to.equal( + latestRoundBeforeUpdate.answer, + ) + expect(latestRoundDataAfterUpdate.startedAt).to.equal( + latestRoundBeforeUpdate.startedAt, + ) + expect(latestRoundDataAfterUpdate.answeredInRound).to.equal( + latestRoundBeforeUpdate.answeredInRound, + ) + expect(latestRoundDataAfterUpdate.updatedAt).to.equal( + latestBlock.timestamp, + ) + }) + + it(`should update status when status has changed and incoming timestamp is newer than the latest`, async () => { + let timestamp = await optimismUptimeFeed.latestTimestamp() + let tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp.add(2000) + tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(0) + expect(await optimismUptimeFeed.latestTimestamp()).to.equal(timestamp) + }) + + it(`should update status when status has changed and incoming timestamp is the same as latest`, async () => { + const timestamp = await optimismUptimeFeed.latestTimestamp() + let tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, different status, same timestamp should update + tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(0) + expect(await optimismUptimeFeed.latestTimestamp()).to.equal(timestamp) + }) + + it('should ignore out-of-order updates', async () => { + const timestamp = (await optimismUptimeFeed.latestTimestamp()).add(10_000) + // Update status + let tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + + // Update with different status, but stale timestamp, should be ignored + const staleTimestamp = timestamp.sub(1000) + tx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(false, staleTimestamp) + await expect(tx) + .to.not.emit(optimismUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + await expect(tx).to.emit(optimismUptimeFeed, 'UpdateIgnored') + }) + }) + + describe('AggregatorV3Interface', () => { + it('should return valid answer from getRoundData and latestRoundData', async () => { + let [roundId, answer, startedAt, updatedAt, answeredInRound] = + await optimismUptimeFeed.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + + // Submit status update with different status and newer timestamp, should update + const timestamp = (startedAt as BigNumber).add(1000) + await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await optimismUptimeFeed.getRoundData(2) + expect(roundId).to.equal(2) + expect(answer).to.equal(1) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(timestamp) + expect(updatedAt).to.equal(updatedAt) + + // Check that last round is still returning the correct data + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await optimismUptimeFeed.getRoundData(1) + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(startedAt) + expect(updatedAt).to.equal(updatedAt) + + // Assert latestRoundData corresponds to latest round id + expect(await optimismUptimeFeed.getRoundData(2)).to.deep.equal( + await optimismUptimeFeed.latestRoundData(), + ) + }) + + it('should revert from #getRoundData when round does not yet exist (future roundId)', async () => { + expect(optimismUptimeFeed.getRoundData(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + + it('should revert from #getAnswer when round does not yet exist (future roundId)', async () => { + expect(optimismUptimeFeed.getAnswer(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + + it('should revert from #getTimestamp when round does not yet exist (future roundId)', async () => { + expect(optimismUptimeFeed.getTimestamp(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + }) + + describe('Protect reads on AggregatorV2V3Interface functions', () => { + it('should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted', async () => { + // Sanity - consumer is not whitelisted + expect(await optimismUptimeFeed.checkEnabled()).to.be.true + expect( + await optimismUptimeFeed.hasAccess(uptimeFeedConsumer.address, '0x00'), + ).to.be.false + + // Assert reads are not possible from consuming contract + await expect(uptimeFeedConsumer.latestAnswer()).to.be.revertedWith( + 'No access', + ) + await expect(uptimeFeedConsumer.latestRoundData()).to.be.revertedWith( + 'No access', + ) + }) + + it('should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted', async () => { + // Whitelist consumer + await optimismUptimeFeed.addAccess(uptimeFeedConsumer.address) + // Sanity - consumer is whitelisted + expect(await optimismUptimeFeed.checkEnabled()).to.be.true + expect( + await optimismUptimeFeed.hasAccess(uptimeFeedConsumer.address, '0x00'), + ).to.be.true + + // Assert reads are possible from consuming contract + expect(await uptimeFeedConsumer.latestAnswer()).to.be.equal('0') + const [roundId, answer] = await uptimeFeedConsumer.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + }) + }) + + describe('Gas costs', () => { + it('should consume a known amount of gas for updates @skip-coverage', async () => { + // Sanity - start at flag = 0 (`false`) + expect(await optimismUptimeFeed.latestAnswer()).to.equal(0) + let timestamp = await optimismUptimeFeed.latestTimestamp() + + // Gas for no update + timestamp = timestamp.add(1000) + const _noUpdateTx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + const noUpdateTx = await _noUpdateTx.wait(1) + // Assert no update + expect(await optimismUptimeFeed.latestAnswer()).to.equal(0) + expect(noUpdateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 38594, + gasUsedDeviation, + ) + + // Gas for update + timestamp = timestamp.add(1000) + const _updateTx = await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + const updateTx = await _updateTx.wait(1) + // Assert update + expect(await optimismUptimeFeed.latestAnswer()).to.equal(1) + expect(updateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 60170, + gasUsedDeviation, + ) + }) + + describe('Aggregator interface', () => { + beforeEach(async () => { + const timestamp = (await optimismUptimeFeed.latestTimestamp()).add(1000) + // Initialise a round + await optimismUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + }) + + it('should consume a known amount of gas for getRoundData(uint80) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.getRoundData(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30952, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRoundData() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRoundData(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28523, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestAnswer() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestAnswer(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28329, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestTimestamp() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestTimestamp(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28229, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRound() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRound(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28245, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getAnswer(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.getAnswer(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30682, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getTimestamp(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await optimismUptimeFeed + .connect(l2Messenger) + .populateTransaction.getTimestamp(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30570, + gasUsedDeviation, + ) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/OptimismValidator.test.ts b/contracts/test/v0.8/dev/OptimismValidator.test.ts new file mode 100644 index 00000000..ee69211f --- /dev/null +++ b/contracts/test/v0.8/dev/OptimismValidator.test.ts @@ -0,0 +1,123 @@ +import { ethers } from 'hardhat' +import { BigNumber, Contract, ContractFactory } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +/// Pick ABIs from compilation +// @ts-ignore +import { abi as optimismSequencerStatusRecorderAbi } from '../../../artifacts/src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol/OptimismSequencerUptimeFeed.json' +// @ts-ignore +import { abi as optimismL1CrossDomainMessengerAbi } from '@eth-optimism/contracts/artifacts/contracts/L1/messaging/L1CrossDomainMessenger.sol' +// @ts-ignore +import { abi as aggregatorAbi } from '../../../artifacts/src/v0.8/shared/interfaces/AggregatorV2V3Interface.sol/AggregatorV2V3Interface.json' + +describe('OptimismValidator', () => { + const GAS_LIMIT = BigNumber.from(1_900_000) + /** Fake L2 target */ + const L2_SEQ_STATUS_RECORDER_ADDRESS = + '0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b' + let optimismValidator: Contract + let optimismUptimeFeedFactory: ContractFactory + let mockOptimismL1CrossDomainMessenger: Contract + let deployer: SignerWithAddress + let eoaValidator: SignerWithAddress + + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + eoaValidator = accounts[1] + }) + + beforeEach(async () => { + // Required for building the calldata + optimismUptimeFeedFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/optimism/OptimismSequencerUptimeFeed.sol:OptimismSequencerUptimeFeed', + deployer, + ) + + // Optimism Messenger contract on L1 + const mockOptimismL1CrossDomainMessengerFactory = + await ethers.getContractFactory( + 'src/v0.8/tests/MockOptimismL1CrossDomainMessenger.sol:MockOptimismL1CrossDomainMessenger', + ) + mockOptimismL1CrossDomainMessenger = + await mockOptimismL1CrossDomainMessengerFactory.deploy() + + // Contract under test + const optimismValidatorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/optimism/OptimismValidator.sol:OptimismValidator', + deployer, + ) + + optimismValidator = await optimismValidatorFactory.deploy( + mockOptimismL1CrossDomainMessenger.address, + L2_SEQ_STATUS_RECORDER_ADDRESS, + GAS_LIMIT, + ) + }) + + describe('#setGasLimit', () => { + it('correctly updates the gas limit', async () => { + const newGasLimit = BigNumber.from(2_000_000) + const tx = await optimismValidator.setGasLimit(newGasLimit) + await tx.wait() + const currentGasLimit = await optimismValidator.getGasLimit() + expect(currentGasLimit).to.equal(newGasLimit) + }) + }) + + describe('#validate', () => { + it('reverts if called by account with no access', async () => { + await expect( + optimismValidator.connect(eoaValidator).validate(0, 0, 1, 1), + ).to.be.revertedWith('No access') + }) + + it('posts sequencer status when there is not status change', async () => { + await optimismValidator.addAccess(eoaValidator.address) + + const currentBlock = await ethers.provider.getBlock('latest') + const futureTimestamp = currentBlock.timestamp + 5000 + + await ethers.provider.send('evm_setNextBlockTimestamp', [futureTimestamp]) + const sequencerStatusRecorderCallData = + optimismUptimeFeedFactory.interface.encodeFunctionData('updateStatus', [ + false, + futureTimestamp, + ]) + + await expect(optimismValidator.connect(eoaValidator).validate(0, 0, 0, 0)) + .to.emit(mockOptimismL1CrossDomainMessenger, 'SentMessage') + .withArgs( + L2_SEQ_STATUS_RECORDER_ADDRESS, + optimismValidator.address, + sequencerStatusRecorderCallData, + 0, + GAS_LIMIT, + ) + }) + + it('post sequencer offline', async () => { + await optimismValidator.addAccess(eoaValidator.address) + + const currentBlock = await ethers.provider.getBlock('latest') + const futureTimestamp = currentBlock.timestamp + 10000 + + await ethers.provider.send('evm_setNextBlockTimestamp', [futureTimestamp]) + const sequencerStatusRecorderCallData = + optimismUptimeFeedFactory.interface.encodeFunctionData('updateStatus', [ + true, + futureTimestamp, + ]) + + await expect(optimismValidator.connect(eoaValidator).validate(0, 0, 1, 1)) + .to.emit(mockOptimismL1CrossDomainMessenger, 'SentMessage') + .withArgs( + L2_SEQ_STATUS_RECORDER_ADDRESS, + optimismValidator.address, + sequencerStatusRecorderCallData, + 0, + GAS_LIMIT, + ) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ScrollCrossDomainForwarder.test.ts b/contracts/test/v0.8/dev/ScrollCrossDomainForwarder.test.ts new file mode 100644 index 00000000..923d4132 --- /dev/null +++ b/contracts/test/v0.8/dev/ScrollCrossDomainForwarder.test.ts @@ -0,0 +1,259 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { publicAbi } from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let newL1OwnerAddress: string +let forwarderFactory: ContractFactory +let greeterFactory: ContractFactory +let crossDomainMessengerFactory: ContractFactory +let crossDomainMessenger: Contract +let forwarder: Contract +let greeter: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + + // forwarder config + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/scroll/ScrollCrossDomainForwarder.sol:ScrollCrossDomainForwarder', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) + crossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MockScrollCrossDomainMessenger.sol:MockScrollCrossDomainMessenger', + ) +}) + +describe('ScrollCrossDomainForwarder', () => { + beforeEach(async () => { + crossDomainMessenger = + await crossDomainMessengerFactory.deploy(l1OwnerAddress) + forwarder = await forwarderFactory.deploy( + crossDomainMessenger.address, + l1OwnerAddress, + ) + greeter = await greeterFactory.deploy(forwarder.address) + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(forwarder, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await forwarder.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await forwarder.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await forwarder.crossDomainMessenger() + assert.equal(response, crossDomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await forwarder.typeAndVersion() + assert.equal(response, 'ScrollCrossDomainForwarder 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + forwarder.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + forwarder.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const forwarderOwner = await forwarder.owner() + assert.equal(forwarderOwner, owner.address) + + await expect( + forwarder.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await forwarder.l1Owner() + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [ethers.constants.AddressZero], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ) + .to.emit(forwarder, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + const forwardData = forwarderFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await forwarder.l1Owner() + + // Transfer ownership + const forwardTransferData = forwarderFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + await crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardTransferData, // message + 0, // gasLimit + ) + + const forwardAcceptData = forwarderFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + // Simulate cross-chain message from another sender + await crossDomainMessenger._setMockMessageSender(newL1OwnerAddress) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + forwarder.address, // target + 0, // value + forwardAcceptData, // message + 0, // gasLimit + ), + ) + .to.emit(forwarder, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await forwarder.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ScrollCrossDomainGovernor.test.ts b/contracts/test/v0.8/dev/ScrollCrossDomainGovernor.test.ts new file mode 100644 index 00000000..adb78c26 --- /dev/null +++ b/contracts/test/v0.8/dev/ScrollCrossDomainGovernor.test.ts @@ -0,0 +1,459 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import etherslib, { Contract, ContractFactory } from 'ethers' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { publicAbi, stripHexPrefix } from '../../test-helpers/helpers' + +let owner: SignerWithAddress +let stranger: SignerWithAddress +let l1OwnerAddress: string +let newL1OwnerAddress: string +let governorFactory: ContractFactory +let greeterFactory: ContractFactory +let multisendFactory: ContractFactory +let crossDomainMessengerFactory: ContractFactory +let crossDomainMessenger: Contract +let governor: Contract +let greeter: Contract +let multisend: Contract + +before(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + stranger = accounts[1] + + // governor config + l1OwnerAddress = owner.address + newL1OwnerAddress = stranger.address + + // Contract factories + governorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/scroll/ScrollCrossDomainGovernor.sol:ScrollCrossDomainGovernor', + owner, + ) + greeterFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Greeter.sol:Greeter', + owner, + ) + multisendFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MultiSend.sol:MultiSend', + owner, + ) + crossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/vendor/MockScrollCrossDomainMessenger.sol:MockScrollCrossDomainMessenger', + ) +}) + +describe('ScrollCrossDomainGovernor', () => { + beforeEach(async () => { + crossDomainMessenger = + await crossDomainMessengerFactory.deploy(l1OwnerAddress) + governor = await governorFactory.deploy( + crossDomainMessenger.address, + l1OwnerAddress, + ) + greeter = await greeterFactory.deploy(governor.address) + multisend = await multisendFactory.deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(governor, [ + 'typeAndVersion', + 'crossDomainMessenger', + 'forward', + 'forwardDelegate', + 'l1Owner', + 'transferL1Ownership', + 'acceptL1Ownership', + // ConfirmedOwner methods: + 'owner', + 'transferOwnership', + 'acceptOwnership', + ]) + }) + + describe('#constructor', () => { + it('should set the owner correctly', async () => { + const response = await governor.owner() + assert.equal(response, owner.address) + }) + + it('should set the l1Owner correctly', async () => { + const response = await governor.l1Owner() + assert.equal(response, l1OwnerAddress) + }) + + it('should set the crossdomain messenger correctly', async () => { + const response = await governor.crossDomainMessenger() + assert.equal(response, crossDomainMessenger.address) + }) + + it('should set the typeAndVersion correctly', async () => { + const response = await governor.typeAndVersion() + assert.equal(response, 'ScrollCrossDomainGovernor 1.0.0') + }) + }) + + describe('#forward', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forward(greeter.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should be callable by L2 owner', async () => { + const newGreeting = 'hello' + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [newGreeting], + ) + await governor.connect(owner).forward(greeter.address, setGreetingData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, newGreeting) + }) + + it('should revert when contract call reverts', async () => { + const setGreetingData = greeterFactory.interface.encodeFunctionData( + 'setGreeting', + [''], + ) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forward', + [greeter.address, setGreetingData], + ) + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Invalid greeting length') + }) + }) + + describe('#forwardDelegate', () => { + it('should not be callable by unknown address', async () => { + await expect( + governor.connect(stranger).forwardDelegate(multisend.address, '0x'), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by crossdomain messenger address / L1 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [multisend.address, multisendData], + ) + + await crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should be callable by L2 owner', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'bar', + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + await governor + .connect(owner) + .forwardDelegate(multisend.address, multisendData) + + const updatedGreeting = await greeter.greeting() + assert.equal(updatedGreeting, 'bar') + }) + + it('should revert batch when one call fails', async () => { + const calls = [ + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + 'foo', + ]), + value: 0, + }, + { + to: greeter.address, + data: greeterFactory.interface.encodeFunctionData('setGreeting', [ + '', // should revert + ]), + value: 0, + }, + ] + const multisendData = encodeMultisendData(multisend.interface, calls) + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [multisend.address, multisendData], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Governor delegatecall reverted') + + const greeting = await greeter.greeting() + assert.equal(greeting, '') // Unchanged + }) + + it('should bubble up revert when contract call reverts', async () => { + const triggerRevertData = + greeterFactory.interface.encodeFunctionData('triggerRevert') + const forwardData = governorFactory.interface.encodeFunctionData( + 'forwardDelegate', + [greeter.address, triggerRevertData], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Greeter: revert triggered') + }) + }) + + describe('#transferL1Ownership', () => { + it('should not be callable by non-owners', async () => { + await expect( + governor.connect(stranger).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should not be callable by L2 owner', async () => { + const governorOwner = await governor.owner() + assert.equal(governorOwner, owner.address) + + await expect( + governor.connect(owner).transferL1Ownership(stranger.address), + ).to.be.revertedWith('Sender is not the L2 messenger') + }) + + it('should be callable by current L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + const forwardData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, newL1OwnerAddress) + }) + + it('should be callable by current L1 owner to zero address', async () => { + const currentL1Owner = await governor.l1Owner() + const forwardData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [ethers.constants.AddressZero], + ) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ) + .to.emit(governor, 'L1OwnershipTransferRequested') + .withArgs(currentL1Owner, ethers.constants.AddressZero) + }) + }) + + describe('#acceptL1Ownership', () => { + it('should not be callable by non pending-owners', async () => { + const forwardData = governorFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardData, // message + 0, // gasLimit + ), + ).to.be.revertedWith('Must be proposed L1 owner') + }) + + it('should be callable by pending L1 owner', async () => { + const currentL1Owner = await governor.l1Owner() + + // Transfer ownership + const forwardTransferData = governorFactory.interface.encodeFunctionData( + 'transferL1Ownership', + [newL1OwnerAddress], + ) + await crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardTransferData, // message + 0, // gasLimit + ) + + const forwardAcceptData = governorFactory.interface.encodeFunctionData( + 'acceptL1Ownership', + [], + ) + // Simulate cross-chain message from another sender + await crossDomainMessenger._setMockMessageSender(newL1OwnerAddress) + + await expect( + crossDomainMessenger // Simulate cross-chain message + .connect(stranger) + ['sendMessage(address,uint256,bytes,uint256)']( + governor.address, // target + 0, // value + forwardAcceptData, // message + 0, // gasLimit + ), + ) + .to.emit(governor, 'L1OwnershipTransferred') + .withArgs(currentL1Owner, newL1OwnerAddress) + + const updatedL1Owner = await governor.l1Owner() + assert.equal(updatedL1Owner, newL1OwnerAddress) + }) + }) +}) + +// Multisend contract helpers + +/** + * Encodes an underlying transaction for the Multisend contract + * + * @param operation 0 for CALL, 1 for DELEGATECALL + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeTxData( + operation: number, + to: string, + value: number, + data: string, +): string { + const dataBuffer = Buffer.from(stripHexPrefix(data), 'hex') + const types = ['uint8', 'address', 'uint256', 'uint256', 'bytes'] + const values = [operation, to, value, dataBuffer.length, dataBuffer] + const encoded = ethers.utils.solidityPack(types, values) + return stripHexPrefix(encoded) +} + +/** + * Encodes a Multisend call + * + * @param MultisendInterface Ethers Interface object of the Multisend contract + * @param transactions one or more transactions to include in the Multisend call + * @param to tx target address + * @param value tx value + * @param data tx data + */ +export function encodeMultisendData( + MultisendInterface: etherslib.utils.Interface, + transactions: { to: string; value: number; data: string }[], +): string { + let nestedTransactionData = '0x' + for (const transaction of transactions) { + nestedTransactionData += encodeTxData( + 0, + transaction.to, + transaction.value, + transaction.data, + ) + } + const encodedMultisendFnData = MultisendInterface.encodeFunctionData( + 'multiSend', + [nestedTransactionData], + ) + return encodedMultisendFnData +} diff --git a/contracts/test/v0.8/dev/ScrollSequencerUptimeFeed.test.ts b/contracts/test/v0.8/dev/ScrollSequencerUptimeFeed.test.ts new file mode 100644 index 00000000..1d93497b --- /dev/null +++ b/contracts/test/v0.8/dev/ScrollSequencerUptimeFeed.test.ts @@ -0,0 +1,426 @@ +import { ethers, network } from 'hardhat' +import { BigNumber, Contract } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' + +describe('ScrollSequencerUptimeFeed', () => { + let l2CrossDomainMessenger: Contract + let scrollUptimeFeed: Contract + let uptimeFeedConsumer: Contract + let deployer: SignerWithAddress + let l1Owner: SignerWithAddress + let l2Messenger: SignerWithAddress + let dummy: SignerWithAddress + const gasUsedDeviation = 100 + const initialStatus = 0 + + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + l1Owner = accounts[1] + dummy = accounts[3] + + const l2CrossDomainMessengerFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/test/mocks/scroll/MockScrollL2CrossDomainMessenger.sol:MockScrollL2CrossDomainMessenger', + deployer, + ) + + l2CrossDomainMessenger = await l2CrossDomainMessengerFactory.deploy() + + // Pretend we're on L2 + await network.provider.request({ + method: 'hardhat_impersonateAccount', + params: [l2CrossDomainMessenger.address], + }) + l2Messenger = await ethers.getSigner(l2CrossDomainMessenger.address) + // Credit the L2 messenger with some ETH + await dummy.sendTransaction({ + to: l2Messenger.address, + value: ethers.utils.parseEther('10'), + }) + }) + + beforeEach(async () => { + const scrollSequencerStatusRecorderFactory = + await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/scroll/ScrollSequencerUptimeFeed.sol:ScrollSequencerUptimeFeed', + deployer, + ) + scrollUptimeFeed = await scrollSequencerStatusRecorderFactory.deploy( + l1Owner.address, + l2CrossDomainMessenger.address, + initialStatus, + ) + + // Set mock sender in mock L2 messenger contract + await l2CrossDomainMessenger.setSender(l1Owner.address) + + // Mock consumer + const statusFeedConsumerFactory = await ethers.getContractFactory( + 'src/v0.8/tests/FeedConsumer.sol:FeedConsumer', + deployer, + ) + uptimeFeedConsumer = await statusFeedConsumerFactory.deploy( + scrollUptimeFeed.address, + ) + }) + + describe('constructor', () => { + it('should have been deployed with the correct initial state', async () => { + const l1Sender = await scrollUptimeFeed.l1Sender() + expect(l1Sender).to.equal(l1Owner.address) + const { roundId, answer } = await scrollUptimeFeed.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(initialStatus) + }) + }) + + describe('#updateStatus', () => { + it('should revert if called by an address that is not the L2 Cross Domain Messenger', async () => { + const timestamp = await scrollUptimeFeed.latestTimestamp() + expect( + scrollUptimeFeed.connect(dummy).updateStatus(true, timestamp), + ).to.be.revertedWith('InvalidSender') + }) + + it('should revert if called by an address that is not the L2 Cross Domain Messenger and is not the L1 sender', async () => { + const timestamp = await scrollUptimeFeed.latestTimestamp() + await l2CrossDomainMessenger.setSender(dummy.address) + expect( + scrollUptimeFeed.connect(dummy).updateStatus(true, timestamp), + ).to.be.revertedWith('InvalidSender') + }) + + it(`should update status when status has not changed and incoming timestamp is the same as latest`, async () => { + const timestamp = await scrollUptimeFeed.latestTimestamp() + let tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + + const latestRoundBeforeUpdate = await scrollUptimeFeed.latestRoundData() + + tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp.add(200)) + + // Submit another status update with the same status + const latestBlock = await ethers.provider.getBlock('latest') + + await expect(tx) + .to.emit(scrollUptimeFeed, 'RoundUpdated') + .withArgs(1, latestBlock.timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + expect(await scrollUptimeFeed.latestTimestamp()).to.equal(timestamp) + + // Verify that latest round has been properly updated + const latestRoundDataAfterUpdate = + await scrollUptimeFeed.latestRoundData() + expect(latestRoundDataAfterUpdate.roundId).to.equal( + latestRoundBeforeUpdate.roundId, + ) + expect(latestRoundDataAfterUpdate.answer).to.equal( + latestRoundBeforeUpdate.answer, + ) + expect(latestRoundDataAfterUpdate.startedAt).to.equal( + latestRoundBeforeUpdate.startedAt, + ) + expect(latestRoundDataAfterUpdate.answeredInRound).to.equal( + latestRoundBeforeUpdate.answeredInRound, + ) + expect(latestRoundDataAfterUpdate.updatedAt).to.equal( + latestBlock.timestamp, + ) + }) + + it(`should update status when status has changed and incoming timestamp is newer than the latest`, async () => { + let timestamp = await scrollUptimeFeed.latestTimestamp() + let tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, different status, newer timestamp should update + timestamp = timestamp.add(2000) + tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(0) + expect(await scrollUptimeFeed.latestTimestamp()).to.equal(timestamp) + }) + + it(`should update status when status has changed and incoming timestamp is the same as latest`, async () => { + const timestamp = await scrollUptimeFeed.latestTimestamp() + let tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + + // Submit another status update, different status, same timestamp should update + tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(0, 3 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(0) + expect(await scrollUptimeFeed.latestTimestamp()).to.equal(timestamp) + }) + + it('should ignore out-of-order updates', async () => { + const timestamp = (await scrollUptimeFeed.latestTimestamp()).add(10_000) + // Update status + let tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + await expect(tx) + .to.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + + // Update with different status, but stale timestamp, should be ignored + const staleTimestamp = timestamp.sub(1000) + tx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(false, staleTimestamp) + await expect(tx) + .to.not.emit(scrollUptimeFeed, 'AnswerUpdated') + .withArgs(1, 2 /** roundId */, timestamp) + await expect(tx).to.emit(scrollUptimeFeed, 'UpdateIgnored') + }) + }) + + describe('AggregatorV3Interface', () => { + it('should return valid answer from getRoundData and latestRoundData', async () => { + let [roundId, answer, startedAt, updatedAt, answeredInRound] = + await scrollUptimeFeed.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(updatedAt) + + // Submit status update with different status and newer timestamp, should update + const timestamp = (startedAt as BigNumber).add(1000) + await scrollUptimeFeed.connect(l2Messenger).updateStatus(true, timestamp) + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await scrollUptimeFeed.getRoundData(2) + expect(roundId).to.equal(2) + expect(answer).to.equal(1) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(timestamp) + expect(updatedAt.lte(startedAt)).to.be.true + + // Check that last round is still returning the correct data + ;[roundId, answer, startedAt, updatedAt, answeredInRound] = + await scrollUptimeFeed.getRoundData(1) + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + expect(answeredInRound).to.equal(roundId) + expect(startedAt).to.equal(updatedAt) + + // Assert latestRoundData corresponds to latest round id + expect(await scrollUptimeFeed.getRoundData(2)).to.deep.equal( + await scrollUptimeFeed.latestRoundData(), + ) + }) + + it('should revert from #getRoundData when round does not yet exist (future roundId)', async () => { + expect(scrollUptimeFeed.getRoundData(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + + it('should revert from #getAnswer when round does not yet exist (future roundId)', async () => { + expect(scrollUptimeFeed.getAnswer(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + + it('should revert from #getTimestamp when round does not yet exist (future roundId)', async () => { + expect(scrollUptimeFeed.getTimestamp(2)).to.be.revertedWith( + 'NoDataPresent()', + ) + }) + }) + + describe('Protect reads on AggregatorV2V3Interface functions', () => { + it('should disallow reads on AggregatorV2V3Interface functions when consuming contract is not whitelisted', async () => { + // Sanity - consumer is not whitelisted + expect(await scrollUptimeFeed.checkEnabled()).to.be.true + expect( + await scrollUptimeFeed.hasAccess(uptimeFeedConsumer.address, '0x00'), + ).to.be.false + + // Assert reads are not possible from consuming contract + await expect(uptimeFeedConsumer.latestAnswer()).to.be.revertedWith( + 'No access', + ) + await expect(uptimeFeedConsumer.latestRoundData()).to.be.revertedWith( + 'No access', + ) + }) + + it('should allow reads on AggregatorV2V3Interface functions when consuming contract is whitelisted', async () => { + // Whitelist consumer + await scrollUptimeFeed.addAccess(uptimeFeedConsumer.address) + // Sanity - consumer is whitelisted + expect(await scrollUptimeFeed.checkEnabled()).to.be.true + expect( + await scrollUptimeFeed.hasAccess(uptimeFeedConsumer.address, '0x00'), + ).to.be.true + + // Assert reads are possible from consuming contract + expect(await uptimeFeedConsumer.latestAnswer()).to.be.equal('0') + const [roundId, answer] = await uptimeFeedConsumer.latestRoundData() + expect(roundId).to.equal(1) + expect(answer).to.equal(0) + }) + }) + + describe('Gas costs', () => { + it('should consume a known amount of gas for updates @skip-coverage', async () => { + // Sanity - start at flag = 0 (`false`) + expect(await scrollUptimeFeed.latestAnswer()).to.equal(0) + let timestamp = await scrollUptimeFeed.latestTimestamp() + + // Gas for no update + timestamp = timestamp.add(1000) + const _noUpdateTx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(false, timestamp) + const noUpdateTx = await _noUpdateTx.wait(1) + // Assert no update + expect(await scrollUptimeFeed.latestAnswer()).to.equal(0) + expect(noUpdateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 38594, + gasUsedDeviation, + ) + + // Gas for update + timestamp = timestamp.add(1000) + const _updateTx = await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + const updateTx = await _updateTx.wait(1) + // Assert update + expect(await scrollUptimeFeed.latestAnswer()).to.equal(1) + expect(updateTx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 58458, + gasUsedDeviation, + ) + }) + + describe('Aggregator interface', () => { + beforeEach(async () => { + const timestamp = (await scrollUptimeFeed.latestTimestamp()).add(1000) + // Initialise a round + await scrollUptimeFeed + .connect(l2Messenger) + .updateStatus(true, timestamp) + }) + + it('should consume a known amount of gas for getRoundData(uint80) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.getRoundData(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30952, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRoundData() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRoundData(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28523, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestAnswer() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestAnswer(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28229, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestTimestamp() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestTimestamp(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28129, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for latestRound() @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.latestRound(), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 28145, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getAnswer(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.getAnswer(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30682, + gasUsedDeviation, + ) + }) + + it('should consume a known amount of gas for getTimestamp(roundId) @skip-coverage', async () => { + const _tx = await l2Messenger.sendTransaction( + await scrollUptimeFeed + .connect(l2Messenger) + .populateTransaction.getTimestamp(1), + ) + const tx = await _tx.wait(1) + expect(tx.cumulativeGasUsed.toNumber()).to.be.closeTo( + 30570, + gasUsedDeviation, + ) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/ScrollValidator.test.ts b/contracts/test/v0.8/dev/ScrollValidator.test.ts new file mode 100644 index 00000000..c5ec59c5 --- /dev/null +++ b/contracts/test/v0.8/dev/ScrollValidator.test.ts @@ -0,0 +1,118 @@ +import { ethers } from 'hardhat' +import { BigNumber, Contract, ContractFactory } from 'ethers' +import { expect } from 'chai' +import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' + +describe('ScrollValidator', () => { + const GAS_LIMIT = BigNumber.from(1_900_000) + /** Fake L2 target */ + const L2_SEQ_STATUS_RECORDER_ADDRESS = + '0x491B1dDA0A8fa069bbC1125133A975BF4e85a91b' + let scrollValidator: Contract + let scrollUptimeFeedFactory: ContractFactory + let mockScrollL1CrossDomainMessenger: Contract + let deployer: SignerWithAddress + let eoaValidator: SignerWithAddress + + before(async () => { + const accounts = await ethers.getSigners() + deployer = accounts[0] + eoaValidator = accounts[1] + }) + + beforeEach(async () => { + // Required for building the calldata + scrollUptimeFeedFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/scroll/ScrollSequencerUptimeFeed.sol:ScrollSequencerUptimeFeed', + deployer, + ) + + // Scroll Messenger contract on L1 + const mockScrollL1CrossDomainMessengerFactory = + await ethers.getContractFactory( + 'src/v0.8/l2ep/test/mocks/scroll/MockScrollL1CrossDomainMessenger.sol:MockScrollL1CrossDomainMessenger', + ) + mockScrollL1CrossDomainMessenger = + await mockScrollL1CrossDomainMessengerFactory.deploy() + + // Contract under test + const scrollValidatorFactory = await ethers.getContractFactory( + 'src/v0.8/l2ep/dev/scroll/ScrollValidator.sol:ScrollValidator', + deployer, + ) + + scrollValidator = await scrollValidatorFactory.deploy( + mockScrollL1CrossDomainMessenger.address, + L2_SEQ_STATUS_RECORDER_ADDRESS, + GAS_LIMIT, + ) + }) + + describe('#setGasLimit', () => { + it('correctly updates the gas limit', async () => { + const newGasLimit = BigNumber.from(2_000_000) + const tx = await scrollValidator.setGasLimit(newGasLimit) + await tx.wait() + const currentGasLimit = await scrollValidator.getGasLimit() + expect(currentGasLimit).to.equal(newGasLimit) + }) + }) + + describe('#validate', () => { + it('reverts if called by account with no access', async () => { + await expect( + scrollValidator.connect(eoaValidator).validate(0, 0, 1, 1), + ).to.be.revertedWith('No access') + }) + + it('posts sequencer status when there is not status change', async () => { + await scrollValidator.addAccess(eoaValidator.address) + + const currentBlock = await ethers.provider.getBlock('latest') + const futureTimestamp = currentBlock.timestamp + 5000 + + await ethers.provider.send('evm_setNextBlockTimestamp', [futureTimestamp]) + const sequencerStatusRecorderCallData = + scrollUptimeFeedFactory.interface.encodeFunctionData('updateStatus', [ + false, + futureTimestamp, + ]) + + await expect(scrollValidator.connect(eoaValidator).validate(0, 0, 0, 0)) + .to.emit(mockScrollL1CrossDomainMessenger, 'SentMessage') + .withArgs( + scrollValidator.address, // sender + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + 0, // value + 0, // nonce + GAS_LIMIT, // gas limit + sequencerStatusRecorderCallData, // message + ) + }) + + it('post sequencer offline', async () => { + await scrollValidator.addAccess(eoaValidator.address) + + const currentBlock = await ethers.provider.getBlock('latest') + const futureTimestamp = currentBlock.timestamp + 10000 + + await ethers.provider.send('evm_setNextBlockTimestamp', [futureTimestamp]) + const sequencerStatusRecorderCallData = + scrollUptimeFeedFactory.interface.encodeFunctionData('updateStatus', [ + true, + futureTimestamp, + ]) + + await expect(scrollValidator.connect(eoaValidator).validate(0, 0, 1, 1)) + .to.emit(mockScrollL1CrossDomainMessenger, 'SentMessage') + .withArgs( + scrollValidator.address, // sender + L2_SEQ_STATUS_RECORDER_ADDRESS, // target + 0, // value + 0, // nonce + GAS_LIMIT, // gas limit + sequencerStatusRecorderCallData, // message + ) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/VRFCoordinatorV2.test.ts b/contracts/test/v0.8/dev/VRFCoordinatorV2.test.ts new file mode 100644 index 00000000..0c1b2a8f --- /dev/null +++ b/contracts/test/v0.8/dev/VRFCoordinatorV2.test.ts @@ -0,0 +1,1123 @@ +import { ethers } from 'hardhat' +import { Signer, Contract, BigNumber } from 'ethers' +import { assert, expect } from 'chai' +import { publicAbi } from '../../test-helpers/helpers' +import { randomAddressString } from 'hardhat/internal/hardhat-network/provider/utils/random' + +describe('VRFCoordinatorV2', () => { + let vrfCoordinatorV2: Contract + let vrfCoordinatorV2TestHelper: Contract + let linkToken: Contract + let blockHashStore: Contract + let mockLinkEth: Contract + let owner: Signer + let subOwner: Signer + let subOwnerAddress: string + let consumer: Signer + let random: Signer + let randomAddress: string + let oracle: Signer + const linkEth = BigNumber.from(300000000) + type config = { + minimumRequestBlockConfirmations: number + maxGasLimit: number + stalenessSeconds: number + gasAfterPaymentCalculation: number + weiPerUnitLink: BigNumber + } + let c: config + + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + subOwner = accounts[1] + subOwnerAddress = await subOwner.getAddress() + consumer = accounts[2] + random = accounts[3] + randomAddress = await random.getAddress() + oracle = accounts[4] + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + accounts[0], + ) + linkToken = await ltFactory.deploy() + const bhFactory = await ethers.getContractFactory( + 'src/v0.6/BlockhashStore.sol:BlockhashStore', + accounts[0], + ) + blockHashStore = await bhFactory.deploy() + const mockAggregatorV3Factory = await ethers.getContractFactory( + 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator', + accounts[0], + ) + mockLinkEth = await mockAggregatorV3Factory.deploy(0, linkEth) + const vrfCoordinatorV2Factory = await ethers.getContractFactory( + 'src/v0.8/vrf/VRFCoordinatorV2.sol:VRFCoordinatorV2', + accounts[0], + ) + vrfCoordinatorV2 = await vrfCoordinatorV2Factory.deploy( + linkToken.address, + blockHashStore.address, + mockLinkEth.address, + ) + const vrfCoordinatorV2TestHelperFactory = await ethers.getContractFactory( + 'src/v0.8/vrf/testhelpers/VRFCoordinatorV2TestHelper.sol:VRFCoordinatorV2TestHelper', + accounts[0], + ) + vrfCoordinatorV2TestHelper = await vrfCoordinatorV2TestHelperFactory.deploy( + mockLinkEth.address, + ) + await linkToken.transfer( + subOwnerAddress, + BigNumber.from('1000000000000000000'), + ) // 1 pli + await linkToken.transfer( + randomAddress, + BigNumber.from('1000000000000000000'), + ) // 1 pli + c = { + minimumRequestBlockConfirmations: 1, + maxGasLimit: 1000000, + stalenessSeconds: 86400, + gasAfterPaymentCalculation: + 21000 + 5000 + 2100 + 20000 + 2 * 2100 - 15000 + 7315, + weiPerUnitLink: BigNumber.from('10000000000000000'), + } + // Note if you try and use an object, ethers + // confuses that with an override object and will error. + // It appears that only arrays work for struct args. + const fc = [0, 0, 0, 0, 0, 0, 0, 0, 0] + await vrfCoordinatorV2 + .connect(owner) + .setConfig( + c.minimumRequestBlockConfirmations, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + c.weiPerUnitLink, + fc, + ) + }) + + it('has a limited public interface [ @skip-coverage ]', async () => { + publicAbi(vrfCoordinatorV2, [ + // Public constants + 'MAX_CONSUMERS', + 'MAX_NUM_WORDS', + 'MAX_REQUEST_CONFIRMATIONS', + // Owner + 'acceptOwnership', + 'transferOwnership', + 'owner', + 'getConfig', + 'getFeeConfig', + 'getFallbackWeiPerUnitLink', + 'getCurrentSubId', + 'setConfig', + 'getRequestConfig', + 'recoverFunds', + 'ownerCancelSubscription', + 'getFeeTier', + 'pendingRequestExists', + 'getTotalBalance', + // Oracle + 'requestRandomWords', + 'getCommitment', // Note we use this to check if a request is already fulfilled. + 'hashOfKey', + 'fulfillRandomWords', + 'registerProvingKey', + 'deregisterProvingKey', + 'oracleWithdraw', + // Subscription management + 'createSubscription', + 'addConsumer', + 'removeConsumer', + 'getSubscription', + 'onTokenTransfer', // Effectively the fundSubscription. + 'cancelSubscription', + 'requestSubscriptionOwnerTransfer', + 'acceptSubscriptionOwnerTransfer', + // Misc + 'typeAndVersion', + 'BLOCKHASH_STORE', + 'PLI', + 'PLI_ETH_FEED', + ]) + }) + + describe('#setConfig', async function () { + it('only owner can set', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .setConfig( + c.minimumRequestBlockConfirmations, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + c.weiPerUnitLink, + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ), + ).to.be.revertedWith('Only callable by owner') + // Anyone can read the config. + const resp = await vrfCoordinatorV2.connect(random).getConfig() + assert(resp[0] == c.minimumRequestBlockConfirmations) + assert(resp[1] == c.maxGasLimit) + assert(resp[2] == c.stalenessSeconds) + assert(resp[3].toString() == c.gasAfterPaymentCalculation.toString()) + }) + + it('max req confs', async function () { + await expect( + vrfCoordinatorV2 + .connect(owner) + .setConfig( + 201, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + c.weiPerUnitLink, + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ), + ).to.be.revertedWith('InvalidRequestConfirmations(201, 201, 200)') + }) + + it('positive fallback price', async function () { + await expect( + vrfCoordinatorV2 + .connect(owner) + .setConfig( + c.minimumRequestBlockConfirmations, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + 0, + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ), + ).to.be.revertedWith('InvalidLinkWeiPrice(0)') + await expect( + vrfCoordinatorV2 + .connect(owner) + .setConfig( + c.minimumRequestBlockConfirmations, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + -1, + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ), + ).to.be.revertedWith('InvalidLinkWeiPrice(-1)') + }) + }) + + async function createSubscription(): Promise { + // let consumers: string[] = [await consumer.getAddress()]; + const tx = await vrfCoordinatorV2.connect(subOwner).createSubscription() + const receipt = await tx.wait() + const subId = receipt.events[0].args['subId'] + await vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(subId, await consumer.getAddress()) + return subId + } + + async function createSubscriptionWithConsumers( + consumers: string[], + ): Promise { + const tx = await vrfCoordinatorV2.connect(subOwner).createSubscription() + const receipt = await tx.wait() + const subId = receipt.events[0].args['subId'] + for (let i = 0; i < consumers.length; i++) { + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, consumers[i]) + } + return subId + } + + describe('#createSubscription', async function () { + it('can create a subscription', async function () { + await expect(vrfCoordinatorV2.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2, 'SubscriptionCreated') + .withArgs(1, subOwnerAddress) + const s = await vrfCoordinatorV2.getSubscription(1) + assert(s.balance.toString() == '0', 'invalid balance') + assert(s.owner == subOwnerAddress, 'invalid address') + }) + it('subscription id increments', async function () { + await expect(vrfCoordinatorV2.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2, 'SubscriptionCreated') + .withArgs(1, subOwnerAddress) + await expect(vrfCoordinatorV2.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2, 'SubscriptionCreated') + .withArgs(2, subOwnerAddress) + }) + it('cannot create more than the max', async function () { + const subId = createSubscriptionWithConsumers([]) + for (let i = 0; i < 100; i++) { + await vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(subId, randomAddressString()) + } + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(subId, randomAddressString()), + ).to.be.revertedWith(`TooManyConsumers()`) + }) + }) + + describe('#requestSubscriptionOwnerTransfer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + it('rejects non-owner', async function () { + await expect( + vrfCoordinatorV2 + .connect(random) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + }) + it('owner can request transfer', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferRequested') + .withArgs(subId, subOwnerAddress, randomAddress) + // Same request is a noop + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ).to.not.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferRequested') + }) + }) + + describe('#acceptSubscriptionOwnerTransfer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + it('subscription must exist', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .acceptSubscriptionOwnerTransfer(1203123123), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be requested owner to accept', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ) + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .acceptSubscriptionOwnerTransfer(subId), + ).to.be.revertedWith(`MustBeRequestedOwner("${randomAddress}")`) + }) + it('requested owner can accept', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferRequested') + .withArgs(subId, subOwnerAddress, randomAddress) + await expect( + vrfCoordinatorV2.connect(random).acceptSubscriptionOwnerTransfer(subId), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferred') + .withArgs(subId, subOwnerAddress, randomAddress) + }) + }) + + describe('#addConsumer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + it('subscription must exist', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(1203123123, randomAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + vrfCoordinatorV2.connect(random).addConsumer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + }) + it('add is idempotent', async function () { + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + }) + it('cannot add more than maximum', async function () { + // There is one consumer, add another 99 to hit the max + for (let i = 0; i < 99; i++) { + await vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(subId, randomAddressString()) + } + // Adding one more should fail + // await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress); + await expect( + vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress), + ).to.be.revertedWith(`TooManyConsumers()`) + // Same is true if we first create with the maximum + const consumers: string[] = [] + for (let i = 0; i < 100; i++) { + consumers.push(randomAddressString()) + } + subId = await createSubscriptionWithConsumers(consumers) + await expect( + vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress), + ).to.be.revertedWith(`TooManyConsumers()`) + }) + it('owner can update', async function () { + await expect( + vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionConsumerAdded') + .withArgs(subId, randomAddress) + }) + }) + + describe('#removeConsumer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + it('subscription must exist', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .removeConsumer(1203123123, randomAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + vrfCoordinatorV2.connect(random).removeConsumer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + }) + it('owner can update', async function () { + const subBefore = await vrfCoordinatorV2.getSubscription(subId) + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + await expect( + vrfCoordinatorV2.connect(subOwner).removeConsumer(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionConsumerRemoved') + .withArgs(subId, randomAddress) + const subAfter = await vrfCoordinatorV2.getSubscription(subId) + // Subscription should NOT contain the removed consumer + assert.deepEqual(subBefore.consumers, subAfter.consumers) + }) + it('can remove all consumers', async function () { + // Testing the handling of zero. + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + await vrfCoordinatorV2 + .connect(subOwner) + .removeConsumer(subId, randomAddress) + await vrfCoordinatorV2 + .connect(subOwner) + .removeConsumer(subId, await consumer.getAddress()) + // Should be empty + const subAfter = await vrfCoordinatorV2.getSubscription(subId) + assert.deepEqual(subAfter.consumers, []) + }) + }) + + describe('#cancelSubscription', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + it('subscription must exist', async function () { + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(1203123123, subOwnerAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + vrfCoordinatorV2 + .connect(random) + .cancelSubscription(subId, subOwnerAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + }) + it('can cancel', async function () { + await linkToken + .connect(subOwner) + .transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionCanceled') + .withArgs(subId, randomAddress, BigNumber.from('1000')) + const randomBalance = await linkToken.balanceOf(randomAddress) + assert.equal(randomBalance.toString(), '1000000000000001000') + await expect( + vrfCoordinatorV2.connect(subOwner).getSubscription(subId), + ).to.be.revertedWith('InvalidSubscription') + }) + it('can add same consumer after canceling', async function () { + await linkToken + .connect(subOwner) + .transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + await vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress) + subId = await createSubscription() + // The cancel should have removed this consumer, so we can add it again. + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + }) + it('cannot cancel with pending req', async function () { + await linkToken + .connect(subOwner) + .transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + await vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey) + await vrfCoordinatorV2.connect(owner).reg + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + await vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000000, // callbackGasLimit + 1, // numWords + ) + // Should revert with outstanding requests + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress), + ).to.be.revertedWith('PendingRequestExists()') + // However the owner is able to cancel + // funds go to the sub owner. + await expect( + vrfCoordinatorV2.connect(owner).ownerCancelSubscription(subId), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionCanceled') + .withArgs(subId, subOwnerAddress, BigNumber.from('1000')) + }) + }) + + describe('#recoverFunds', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription() + }) + + // Note we can't test the oracleWithdraw without fulfilling a request, so leave + // that coverage to the go tests. + it('function that should change internal balance do', async function () { + type bf = [() => Promise, BigNumber] + const balanceChangingFns: Array = [ + [ + async function () { + const s = ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]) + await linkToken + .connect(subOwner) + .transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000'), + s, + ) + }, + BigNumber.from('1000'), + ], + [ + async function () { + await vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress) + }, + BigNumber.from('-1000'), + ], + ] + for (const [fn, expectedBalanceChange] of balanceChangingFns) { + const startingBalance = await vrfCoordinatorV2.getTotalBalance() + await fn() + const endingBalance = await vrfCoordinatorV2.getTotalBalance() + assert( + endingBalance.sub(startingBalance).toString() == + expectedBalanceChange.toString(), + ) + } + }) + it('only owner can recover', async function () { + await expect( + vrfCoordinatorV2.connect(subOwner).recoverFunds(randomAddress), + ).to.be.revertedWith(`Only callable by owner`) + }) + + it('owner can recover link transferred', async function () { + // Set the internal balance + assert(BigNumber.from('0'), linkToken.balanceOf(randomAddress)) + const s = ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]) + await linkToken + .connect(subOwner) + .transferAndCall(vrfCoordinatorV2.address, BigNumber.from('1000'), s) + // Circumvent internal balance + await linkToken + .connect(subOwner) + .transfer(vrfCoordinatorV2.address, BigNumber.from('1000')) + // Should recover this 1000 + await expect(vrfCoordinatorV2.connect(owner).recoverFunds(randomAddress)) + .to.emit(vrfCoordinatorV2, 'FundsRecovered') + .withArgs(randomAddress, BigNumber.from('1000')) + assert(BigNumber.from('1000'), linkToken.balanceOf(randomAddress)) + }) + }) + + it('subscription lifecycle', async function () { + // Create subscription. + const tx = await vrfCoordinatorV2.connect(subOwner).createSubscription() + const receipt = await tx.wait() + assert(receipt.events[0].event == 'SubscriptionCreated') + assert(receipt.events[0].args['owner'] == subOwnerAddress, 'sub owner') + const subId = receipt.events[0].args['subId'] + await vrfCoordinatorV2 + .connect(subOwner) + .addConsumer(subId, await consumer.getAddress()) + + // Fund the subscription + await expect( + linkToken + .connect(subOwner) + .transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionFunded') + .withArgs(subId, BigNumber.from(0), BigNumber.from('1000000000000000000')) + + // Non-owners cannot change the consumers + await expect( + vrfCoordinatorV2.connect(random).addConsumer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + await expect( + vrfCoordinatorV2.connect(random).removeConsumer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + + // Non-owners cannot ask to transfer ownership + await expect( + vrfCoordinatorV2 + .connect(random) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + + // Owners can request ownership transfership + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .requestSubscriptionOwnerTransfer(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferRequested') + .withArgs(subId, subOwnerAddress, randomAddress) + + // Non-requested owners cannot accept + await expect( + vrfCoordinatorV2.connect(subOwner).acceptSubscriptionOwnerTransfer(subId), + ).to.be.revertedWith(`MustBeRequestedOwner("${randomAddress}")`) + + // Requested owners can accept + await expect( + vrfCoordinatorV2.connect(random).acceptSubscriptionOwnerTransfer(subId), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionOwnerTransferred') + .withArgs(subId, subOwnerAddress, randomAddress) + + // Transfer it back to subOwner + vrfCoordinatorV2 + .connect(random) + .requestSubscriptionOwnerTransfer(subId, subOwnerAddress) + vrfCoordinatorV2.connect(subOwner).acceptSubscriptionOwnerTransfer(subId) + + // Non-owners cannot cancel + await expect( + vrfCoordinatorV2.connect(random).cancelSubscription(subId, randomAddress), + ).to.be.revertedWith(`MustBeSubOwner("${subOwnerAddress}")`) + + await expect( + vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress), + ) + .to.emit(vrfCoordinatorV2, 'SubscriptionCanceled') + .withArgs(subId, randomAddress, BigNumber.from('1000000000000000000')) + const random2Balance = await linkToken.balanceOf(randomAddress) + assert.equal(random2Balance.toString(), '2000000000000000000') + }) + + describe('#requestRandomWords', async function () { + let subId: number + let kh: string + beforeEach(async () => { + subId = await createSubscription() + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + kh = await vrfCoordinatorV2.hashOfKey(testKey) + }) + it('invalid subId', async function () { + await expect( + vrfCoordinatorV2.connect(random).requestRandomWords( + kh, // keyhash + 12301928312, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith(`InvalidSubscription()`) + }) + it('invalid consumer', async function () { + await expect( + vrfCoordinatorV2.connect(random).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith( + `InvalidConsumer(${subId}, "${randomAddress.toString()}")`, + ) + }) + it('invalid req confs', async function () { + await expect( + vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 0, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith(`InvalidRequestConfirmations(0, 1, 200)`) + }) + it('gas limit too high', async function () { + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await expect( + vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000001, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith(`GasLimitTooBig(1000001, 1000000)`) + }) + + it('nonce increments', async function () { + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + const r1 = await vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000000, // callbackGasLimit + 1, // numWords + ) + const r1Receipt = await r1.wait() + const seed1 = r1Receipt.events[0].args['requestId'] + const r2 = await vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000000, // callbackGasLimit + 1, // numWords + ) + const r2Receipt = await r2.wait() + const seed2 = r2Receipt.events[0].args['requestId'] + assert(seed2 != seed1) + }) + + it('emits correct log', async function () { + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + const reqTx = await vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ) + const reqReceipt = await reqTx.wait() + assert(reqReceipt.events.length == 1) + const reqEvent = reqReceipt.events[0] + assert(reqEvent.event == 'RandomWordsRequested', 'wrong event name') + assert( + reqEvent.args['keyHash'] == kh, + `wrong kh ${reqEvent.args['keyHash']} ${kh}`, + ) + assert( + reqEvent.args['subId'].toString() == subId.toString(), + 'wrong subId', + ) + assert( + reqEvent.args['minimumRequestConfirmations'].toString() == + BigNumber.from(1).toString(), + 'wrong minRequestConf', + ) + assert( + reqEvent.args['callbackGasLimit'] == 1000, + 'wrong callbackGasLimit', + ) + assert(reqEvent.args['numWords'] == 1, 'wrong numWords') + assert( + reqEvent.args['sender'] == (await consumer.getAddress()), + 'wrong sender address', + ) + }) + it('add/remove consumer invariant', async function () { + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await vrfCoordinatorV2.connect(subOwner).addConsumer(subId, randomAddress) + await vrfCoordinatorV2 + .connect(subOwner) + .removeConsumer(subId, randomAddress) + await expect( + vrfCoordinatorV2.connect(random).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith( + `InvalidConsumer(${subId}, "${randomAddress.toString()}")`, + ) + }) + it('cancel/add subscription invariant', async function () { + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await vrfCoordinatorV2 + .connect(subOwner) + .cancelSubscription(subId, randomAddress) + subId = await createSubscriptionWithConsumers([]) + // Should not succeed because consumer was previously registered + // i.e. cancel should be cleaning up correctly. + await expect( + vrfCoordinatorV2.connect(random).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ), + ).to.be.revertedWith( + `InvalidConsumer(${subId}, "${randomAddress.toString()}")`, + ) + }) + }) + + describe('#oracleWithdraw', async function () { + it('cannot withdraw with no balance', async function () { + await expect( + vrfCoordinatorV2 + .connect(oracle) + .oracleWithdraw(randomAddressString(), BigNumber.from('100')), + ).to.be.revertedWith(`InsufficientBalance`) + }) + }) + + describe('#calculatePaymentAmount [ @skip-coverage ]', async function () { + it('output within sensible range', async function () { + // By default, hardhat sends txes with the block limit as their gas limit. + await vrfCoordinatorV2TestHelper + .connect(oracle) + .calculatePaymentAmountTest( + BigNumber.from('0'), // Gas after payment + 0, // Fee PPM + BigNumber.from('1000000000'), // Wei per unit gas (gas price) + ) + const paymentAmount = await vrfCoordinatorV2TestHelper.getPaymentAmount() + // The gas price is 1gwei and the eth/link price is set to 300000000 wei per unit link. + // paymentAmount = 1e18*weiPerUnitGas*(gasAfterPaymentCalculation + startGas - gasleft()) / uint256(weiPerUnitLink); + // So we expect x to be in the range (few thousand gas for the call) + // 1e18*1e9*(1000 gas)/30000000 < x < 1e18*1e9*(5000 gas)/30000000 + // 3.333333333E22 < x < 1.666666667E23 + //const gss = await vrfCoordinatorV2TestHelper.getGasStart(); + assert( + paymentAmount.gt(BigNumber.from('33333333330000000000000')), + 'payment too small', + ) + assert( + paymentAmount.lt(BigNumber.from('166666666600000000000000')), + 'payment too large', + ) + }) + it('payment too large', async function () { + // Set this gas price to be astronomical 1ETH/gas + // That means the payment will be (even for 1gas) + // 1e18*1e18/30000000 + // 3.333333333E28 > 1e27 (all link in existence) + await expect( + vrfCoordinatorV2TestHelper.connect(oracle).calculatePaymentAmountTest( + BigNumber.from('0'), // Gas after payment + 0, // Fee PPM + BigNumber.from('1000000000000000000'), + ), + ).to.be.revertedWith(`PaymentTooLarge()`) + }) + + it('non-positive link wei price should revert', async function () { + const mockAggregatorV3Factory = await ethers.getContractFactory( + 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator', + owner, + ) + const vrfCoordinatorV2TestHelperFactory = await ethers.getContractFactory( + 'VRFCoordinatorV2TestHelper', + owner, + ) + const mockLinkEthZero = await mockAggregatorV3Factory.deploy(0, 0) + const vrfCoordinatorV2TestHelperZero = + await vrfCoordinatorV2TestHelperFactory.deploy(mockLinkEthZero.address) + await expect( + vrfCoordinatorV2TestHelperZero + .connect(oracle) + .calculatePaymentAmountTest( + BigNumber.from('0'), // Gas after payment + 0, // Fee PPM + BigNumber.from('1000000000000000000'), + ), + ).to.be.revertedWith(`InvalidLinkWeiPrice(0)`) + const mockLinkEthNegative = await mockAggregatorV3Factory.deploy(0, -1) + const vrfCoordinatorV2TestHelperNegative = + await vrfCoordinatorV2TestHelperFactory.deploy( + mockLinkEthNegative.address, + ) + await expect( + vrfCoordinatorV2TestHelperNegative + .connect(owner) + .calculatePaymentAmountTest( + BigNumber.from('0'), // Gas after payment + 0, // Fee PPM + BigNumber.from('1000000000000000000'), + ), + ).to.be.revertedWith(`InvalidLinkWeiPrice(-1)`) + }) + }) + + describe('#keyRegistration', async function () { + it('register key emits log', async function () { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + await expect( + vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey), + ) + .to.emit(vrfCoordinatorV2, 'ProvingKeyRegistered') + .withArgs(kh, subOwnerAddress) + const reqConfig = await vrfCoordinatorV2.getRequestConfig() + assert(reqConfig[2].length == 1) // 1 keyhash registered + }) + it('cannot re-register key', async function () { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + await vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey) + await expect( + vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey), + ).to.be.revertedWith(`ProvingKeyAlreadyRegistered("${kh}")`) + }) + it('deregister key emits log', async function () { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + await vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey) + await expect(vrfCoordinatorV2.deregisterProvingKey(testKey)) + .to.emit(vrfCoordinatorV2, 'ProvingKeyDeregistered') + .withArgs(kh, subOwnerAddress) + const reqConfig = await vrfCoordinatorV2.getRequestConfig() + assert(reqConfig[2].length == 0) // 0 keyhash registered + }) + it('cannot deregister unregistered key', async function () { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + await expect( + vrfCoordinatorV2.deregisterProvingKey(testKey), + ).to.be.revertedWith(`NoSuchProvingKey("${kh}")`) + }) + it('can register after deregister', async function () { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + await vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey) + await vrfCoordinatorV2.deregisterProvingKey(testKey) + await vrfCoordinatorV2.registerProvingKey(randomAddress, testKey) + }) + }) + + describe('#fulfillRandomWords', async function () { + beforeEach(async () => { + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + await vrfCoordinatorV2.registerProvingKey(subOwnerAddress, testKey) + }) + it('unregistered key should fail', async function () { + const proof = [ + [BigNumber.from('1'), BigNumber.from('3')], // pk NOT registered + [BigNumber.from('1'), BigNumber.from('2')], // gamma + BigNumber.from('1'), // c + BigNumber.from('1'), // s + BigNumber.from('1'), // seed + randomAddress, // uWitness + [BigNumber.from('1'), BigNumber.from('2')], // cGammaWitness + [BigNumber.from('1'), BigNumber.from('2')], // sHashWitness + BigNumber.from('1'), + ] // 13 words in proof + const rc = [ + 1, // blockNum + 2, // subId + 3, // callbackGasLimit + 4, // numWords + randomAddress, // sender + ] + await expect( + vrfCoordinatorV2.connect(oracle).fulfillRandomWords(proof, rc), + ).to.be.revertedWith( + `NoSuchProvingKey("0xa15bc60c955c405d20d9149c709e2460f1c2d9a497496a7f46004d1772c3054c")`, + ) + }) + it('no corresponding request', async function () { + const proof = [ + [BigNumber.from('1'), BigNumber.from('2')], // pk + [BigNumber.from('1'), BigNumber.from('2')], // gamma + BigNumber.from('1'), // c + BigNumber.from('1'), // s + BigNumber.from('1'), // seed + randomAddress, // uWitness + [BigNumber.from('1'), BigNumber.from('2')], // cGammaWitness + [BigNumber.from('1'), BigNumber.from('2')], // sHashWitness + BigNumber.from('1'), + ] // 13 words in proof + const rc = [ + 1, // blockNum + 2, // subId + 3, // callbackGasLimit + 4, // numWords + randomAddress, // sender + ] + await expect( + vrfCoordinatorV2.connect(oracle).fulfillRandomWords(proof, rc), + ).to.be.revertedWith(`NoCorrespondingRequest()`) + }) + it('incorrect commitment wrong blocknum', async function () { + const subId = await createSubscription() + await linkToken.connect(subOwner).transferAndCall( + vrfCoordinatorV2.address, + BigNumber.from('1000000000000000000'), // 1 pli > 0.1 min. + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + const testKey = [BigNumber.from('1'), BigNumber.from('2')] + const kh = await vrfCoordinatorV2.hashOfKey(testKey) + const tx = await vrfCoordinatorV2.connect(consumer).requestRandomWords( + kh, // keyhash + subId, // subId + 1, // minReqConf + 1000, // callbackGasLimit + 1, // numWords + ) + const reqReceipt = await tx.wait() + // We give it the right proof length and a valid preSeed + // but an invalid commitment + const preSeed = reqReceipt.events[0].args['preSeed'] + const proof = [ + [BigNumber.from('1'), BigNumber.from('2')], + [BigNumber.from('1'), BigNumber.from('2')], + BigNumber.from('1'), + BigNumber.from('1'), + preSeed, + randomAddress, + [BigNumber.from('1'), BigNumber.from('2')], + [BigNumber.from('1'), BigNumber.from('2')], + BigNumber.from('1'), + ] + const rc = [ + reqReceipt.blockNumber + 1, // Wrong blocknumber + subId, + 1000, + 1, + await consumer.getAddress(), + ] + await expect( + vrfCoordinatorV2.connect(oracle).fulfillRandomWords(proof, rc), + ).to.be.revertedWith(`IncorrectCommitment()`) + }) + }) + + describe('#getFeeTier', async function () { + beforeEach(async () => { + await expect( + vrfCoordinatorV2 + .connect(owner) + .setConfig( + c.minimumRequestBlockConfirmations, + c.maxGasLimit, + c.stalenessSeconds, + c.gasAfterPaymentCalculation, + c.weiPerUnitLink, + [10000, 1000, 100, 10, 1, 10, 20, 30, 40], + ), + ) + }) + it('tier1', async function () { + assert((await vrfCoordinatorV2.connect(random).getFeeTier(0)) == 10000) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(5)) == 10000) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(10)) == 10000) + }) + it('tier2', async function () { + assert((await vrfCoordinatorV2.connect(random).getFeeTier(11)) == 1000) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(12)) == 1000) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(20)) == 1000) + }) + it('tier3', async function () { + assert((await vrfCoordinatorV2.connect(random).getFeeTier(21)) == 100) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(30)) == 100) + }) + it('tier4', async function () { + assert((await vrfCoordinatorV2.connect(random).getFeeTier(31)) == 10) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(40)) == 10) + }) + it('tier5', async function () { + assert((await vrfCoordinatorV2.connect(random).getFeeTier(41)) == 1) + assert((await vrfCoordinatorV2.connect(random).getFeeTier(123102)) == 1) + }) + }) + + /* + Note that all the fulfillment happy path testing is done in Go, to make use of the existing go code to produce + proofs offchain. + */ +}) diff --git a/contracts/test/v0.8/dev/VRFCoordinatorV2Mock.test.ts b/contracts/test/v0.8/dev/VRFCoordinatorV2Mock.test.ts new file mode 100644 index 00000000..d5f2c3ac --- /dev/null +++ b/contracts/test/v0.8/dev/VRFCoordinatorV2Mock.test.ts @@ -0,0 +1,331 @@ +import { assert, expect } from 'chai' +import { BigNumber, Contract, Signer } from 'ethers' +import { ethers } from 'hardhat' + +describe('VRFCoordinatorV2Mock', () => { + let vrfCoordinatorV2Mock: Contract + let vrfConsumerV2: Contract + let linkToken: Contract + let subOwner: Signer + let random: Signer + let subOwnerAddress: string + let pointOneLink = BigNumber.from('100000000000000000') + let oneLink = BigNumber.from('1000000000000000000') + let keyhash = + '0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0' + let testConsumerAddress = '0x1111000000000000000000000000000000001111' + let testConsumerAddress2 = '0x1111000000000000000000000000000000001110' + + beforeEach(async () => { + const accounts = await ethers.getSigners() + subOwner = accounts[1] + subOwnerAddress = await subOwner.getAddress() + random = accounts[2] + + const vrfCoordinatorV2MockFactory = await ethers.getContractFactory( + 'src/v0.8/vrf/mocks/VRFCoordinatorV2Mock.sol:VRFCoordinatorV2Mock', + accounts[0], + ) + vrfCoordinatorV2Mock = await vrfCoordinatorV2MockFactory.deploy( + pointOneLink, + 1e9, // 0.000000001 PLI per gas + ) + + const ltFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + accounts[0], + ) + linkToken = await ltFactory.deploy() + + const vrfConsumerV2Factory = await ethers.getContractFactory( + 'src/v0.8/vrf/testhelpers/VRFConsumerV2.sol:VRFConsumerV2', + accounts[0], + ) + vrfConsumerV2 = await vrfConsumerV2Factory.deploy( + vrfCoordinatorV2Mock.address, + linkToken.address, + ) + }) + + async function createSubscription(): Promise { + const tx = await vrfCoordinatorV2Mock.connect(subOwner).createSubscription() + const receipt = await tx.wait() + return receipt.events[0].args['subId'] + } + + describe('#createSubscription', async function () { + it('can create a subscription', async function () { + await expect(vrfCoordinatorV2Mock.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2Mock, 'SubscriptionCreated') + .withArgs(1, subOwnerAddress) + const s = await vrfCoordinatorV2Mock.getSubscription(1) + assert(s.balance.toString() == '0', 'invalid balance') + assert(s.owner == subOwnerAddress, 'invalid address') + }) + it('subscription id increments', async function () { + await expect(vrfCoordinatorV2Mock.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2Mock, 'SubscriptionCreated') + .withArgs(1, subOwnerAddress) + await expect(vrfCoordinatorV2Mock.connect(subOwner).createSubscription()) + .to.emit(vrfCoordinatorV2Mock, 'SubscriptionCreated') + .withArgs(2, subOwnerAddress) + }) + }) + describe('#addConsumer', async function () { + it('can add a consumer to a subscription', async function () { + let subId = await createSubscription() + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, testConsumerAddress), + ) + .to.emit(vrfCoordinatorV2Mock, 'ConsumerAdded') + .withArgs(subId, testConsumerAddress) + let sub = await vrfCoordinatorV2Mock + .connect(subOwner) + .getSubscription(subId) + expect(sub.consumers).to.eql([testConsumerAddress]) + }) + it('cannot add a consumer to a nonexistent subscription', async function () { + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(4, testConsumerAddress), + ).to.be.revertedWith('InvalidSubscription') + }) + it('cannot add more than the consumer maximum', async function () { + let subId = await createSubscription() + for (let i = 0; i < 100; i++) { + const testIncrementingAddress = BigNumber.from(i) + .add('0x1000000000000000000000000000000000000000') + .toHexString() + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, testIncrementingAddress), + ).to.emit(vrfCoordinatorV2Mock, 'ConsumerAdded') + } + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, testConsumerAddress), + ).to.be.revertedWith('TooManyConsumers') + }) + }) + describe('#removeConsumer', async function () { + it('can remove a consumer from a subscription', async function () { + let subId = await createSubscription() + for (const addr of [testConsumerAddress, testConsumerAddress2]) { + await expect( + vrfCoordinatorV2Mock.connect(subOwner).addConsumer(subId, addr), + ).to.emit(vrfCoordinatorV2Mock, 'ConsumerAdded') + } + + let sub = await vrfCoordinatorV2Mock + .connect(subOwner) + .getSubscription(subId) + expect(sub.consumers).to.eql([testConsumerAddress, testConsumerAddress2]) + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .removeConsumer(subId, testConsumerAddress), + ) + .to.emit(vrfCoordinatorV2Mock, 'ConsumerRemoved') + .withArgs(subId, testConsumerAddress) + + sub = await vrfCoordinatorV2Mock.connect(subOwner).getSubscription(subId) + expect(sub.consumers).to.eql([testConsumerAddress2]) + }) + it('cannot remove a consumer from a nonexistent subscription', async function () { + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .removeConsumer(4, testConsumerAddress), + ).to.be.revertedWith('InvalidSubscription') + }) + it('cannot remove a consumer after it is already removed', async function () { + let subId = await createSubscription() + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, testConsumerAddress), + ).to.emit(vrfCoordinatorV2Mock, 'ConsumerAdded') + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .removeConsumer(subId, testConsumerAddress), + ).to.emit(vrfCoordinatorV2Mock, 'ConsumerRemoved') + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .removeConsumer(subId, testConsumerAddress), + ).to.be.revertedWith('InvalidConsumer') + }) + }) + describe('#fundSubscription', async function () { + it('can fund a subscription', async function () { + let subId = await createSubscription() + await expect( + vrfCoordinatorV2Mock.connect(subOwner).fundSubscription(subId, oneLink), + ) + .to.emit(vrfCoordinatorV2Mock, 'SubscriptionFunded') + .withArgs(subId, 0, oneLink) + let sub = await vrfCoordinatorV2Mock + .connect(subOwner) + .getSubscription(subId) + expect(sub.balance).to.equal(oneLink) + }) + it('cannot fund a nonexistent subscription', async function () { + await expect( + vrfCoordinatorV2Mock.connect(subOwner).fundSubscription(4, oneLink), + ).to.be.revertedWith('InvalidSubscription') + }) + }) + describe('#cancelSubscription', async function () { + it('can cancel a subscription', async function () { + let subId = await createSubscription() + await expect( + vrfCoordinatorV2Mock.connect(subOwner).getSubscription(subId), + ).to.not.be.reverted + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .cancelSubscription(subId, subOwner.getAddress()), + ).to.emit(vrfCoordinatorV2Mock, 'SubscriptionCanceled') + + await expect( + vrfCoordinatorV2Mock.connect(subOwner).getSubscription(subId), + ).to.be.revertedWith('InvalidSubscription') + }) + }) + describe('#fulfillRandomWords', async function () { + it('fails to fulfill without being a valid consumer', async function () { + let subId = await createSubscription() + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .requestRandomWords(keyhash, subId, 3, 500_000, 2), + ).to.be.revertedWith('InvalidConsumer') + }) + it('fails to fulfill with insufficient funds', async function () { + let subId = await createSubscription() + await vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, await subOwner.getAddress()) + + await expect( + vrfCoordinatorV2Mock + .connect(subOwner) + .requestRandomWords(keyhash, subId, 3, 500_000, 2), + ) + .to.emit(vrfCoordinatorV2Mock, 'RandomWordsRequested') + .withArgs(keyhash, 1, 100, subId, 3, 500_000, 2, subOwnerAddress) + + await expect( + vrfCoordinatorV2Mock + .connect(random) + .fulfillRandomWords(1, vrfConsumerV2.address), + ).to.be.revertedWith('InsufficientBalance') + }) + it('can request and fulfill [ @skip-coverage ]', async function () { + let subId = await createSubscription() + await vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, vrfConsumerV2.address) + await expect( + vrfCoordinatorV2Mock.connect(subOwner).fundSubscription(subId, oneLink), + ).to.not.be.reverted + + // Call requestRandomWords from the consumer contract so that the requestId + // member variable on the consumer is appropriately set. + expect( + await vrfConsumerV2 + .connect(subOwner) + .requestRandomness(keyhash, subId, 3, 500_000, 2), + ) + .to.emit(vrfCoordinatorV2Mock, 'RandomWordsRequested') + .withArgs(keyhash, 1, 100, subId, 3, 500_000, 2, vrfConsumerV2.address) + + let tx = await vrfCoordinatorV2Mock + .connect(random) + .fulfillRandomWords(1, vrfConsumerV2.address) + let receipt = await tx.wait() + expect(receipt.events[0].event).to.equal('RandomWordsFulfilled') + expect(receipt.events[0].args['requestId']).to.equal(1) + expect(receipt.events[0].args['outputSeed']).to.equal(1) + expect(receipt.events[0].args['success']).to.equal(true) + assert( + receipt.events[0].args['payment'] + .sub(BigNumber.from('100119403000000000')) + .lt(BigNumber.from('10000000000')), + ) + + // Check that balance was subtracted + let sub = await vrfCoordinatorV2Mock + .connect(random) + .getSubscription(subId) + expect(sub.balance).to.equal( + oneLink.sub(receipt.events[0].args['payment']), + ) + }) + it('Correctly allows for user override of fulfillRandomWords [ @skip-coverage ]', async function () { + let subId = await createSubscription() + await vrfCoordinatorV2Mock + .connect(subOwner) + .addConsumer(subId, vrfConsumerV2.address) + await expect( + vrfCoordinatorV2Mock.connect(subOwner).fundSubscription(subId, oneLink), + ).to.not.be.reverted + + // Call requestRandomWords from the consumer contract so that the requestId + // member variable on the consumer is appropriately set. + expect( + await vrfConsumerV2 + .connect(subOwner) + .requestRandomness(keyhash, subId, 3, 500_000, 2), + ) + .to.emit(vrfCoordinatorV2Mock, 'RandomWordsRequested') + .withArgs(keyhash, 1, 100, subId, 3, 500_000, 2, vrfConsumerV2.address) + + // Call override with incorrect word count. + await expect( + vrfCoordinatorV2Mock + .connect(random) + .fulfillRandomWordsWithOverride( + 1, + vrfConsumerV2.address, + [1, 2, 3, 4, 5], + ), + ).to.be.revertedWith('InvalidRandomWords') + + // Call override correctly. + let tx = await vrfCoordinatorV2Mock + .connect(random) + .fulfillRandomWordsWithOverride(1, vrfConsumerV2.address, [2533, 1768]) + let receipt = await tx.wait() + expect(receipt.events[0].event).to.equal('RandomWordsFulfilled') + expect(receipt.events[0].args['requestId']).to.equal(1) + expect(receipt.events[0].args['outputSeed']).to.equal(1) + expect(receipt.events[0].args['success']).to.equal(true) + assert( + receipt.events[0].args['payment'] + .sub(BigNumber.from('100120516000000000')) + .lt(BigNumber.from('10000000000')), + ) + + // Check that balance was subtracted + let sub = await vrfCoordinatorV2Mock + .connect(random) + .getSubscription(subId) + expect(sub.balance).to.equal( + oneLink.sub(receipt.events[0].args['payment']), + ) + }) + }) +}) diff --git a/contracts/test/v0.8/dev/VRFV2Wrapper.test.ts b/contracts/test/v0.8/dev/VRFV2Wrapper.test.ts new file mode 100644 index 00000000..00e86c87 --- /dev/null +++ b/contracts/test/v0.8/dev/VRFV2Wrapper.test.ts @@ -0,0 +1,656 @@ +import { assert, expect } from 'chai' +import { BigNumber, BigNumberish, Signer } from 'ethers' +import { ethers } from 'hardhat' +import { reset, toBytes32String } from '../../test-helpers/helpers' +import { bigNumEquals } from '../../test-helpers/matchers' +import { describe } from 'mocha' +import { + LinkToken, + MockV3Aggregator, + MockV3Aggregator__factory, + VRFCoordinatorV2Mock, + VRFV2Wrapper, + VRFV2WrapperConsumerExample, + VRFV2WrapperOutOfGasConsumerExample, + VRFV2WrapperRevertingConsumerExample, +} from '../../../typechain' + +describe('VRFV2Wrapper', () => { + const pointOneLink = BigNumber.from('100000000000000000') + const pointZeroZeroThreeLink = BigNumber.from('3000000000000000') + const oneHundredLink = BigNumber.from('100000000000000000000') + const oneHundredGwei = BigNumber.from('100000000000') + const fiftyGwei = BigNumber.from('50000000000') + + // Configuration + + // This value is the worst-case gas overhead from the wrapper contract under the following + // conditions, plus some wiggle room: + // - 10 words requested + // - Refund issued to consumer + const wrapperGasOverhead = BigNumber.from(60_000) + const coordinatorGasOverhead = BigNumber.from(52_000) + const wrapperPremiumPercentage = 10 + const maxNumWords = 10 + const weiPerUnitLink = pointZeroZeroThreeLink + const flatFee = pointOneLink + + let wrapper: VRFV2Wrapper + let coordinator: VRFCoordinatorV2Mock + let link: LinkToken + let wrongLink: LinkToken + let linkEthFeed: MockV3Aggregator + let consumer: VRFV2WrapperConsumerExample + let consumerWrongLink: VRFV2WrapperConsumerExample + let consumerRevert: VRFV2WrapperRevertingConsumerExample + let consumerOutOfGas: VRFV2WrapperOutOfGasConsumerExample + + let owner: Signer + let requester: Signer + let consumerOwner: Signer + let withdrawRecipient: Signer + + // This should match implementation in VRFV2Wrapper::calculateGasPriceInternal + const calculatePrice = ( + gasLimit: BigNumberish, + _wrapperGasOverhead: BigNumberish = wrapperGasOverhead, + _coordinatorGasOverhead: BigNumberish = coordinatorGasOverhead, + _gasPriceWei: BigNumberish = oneHundredGwei, + _weiPerUnitLink: BigNumberish = weiPerUnitLink, + _wrapperPremium: BigNumberish = wrapperPremiumPercentage, + _flatFee: BigNumberish = flatFee, + ): BigNumber => { + const totalGas = BigNumber.from(0) + .add(gasLimit) + .add(_wrapperGasOverhead) + .add(_coordinatorGasOverhead) + const baseFee = BigNumber.from('1000000000000000000') + .mul(_gasPriceWei) + .mul(totalGas) + .div(_weiPerUnitLink) + const withPremium = baseFee + .mul(BigNumber.from(100).add(_wrapperPremium)) + .div(100) + return withPremium.add(_flatFee) + } + + before(async () => { + await reset() + }) + + beforeEach(async () => { + const accounts = await ethers.getSigners() + owner = accounts[0] + requester = accounts[1] + consumerOwner = accounts[2] + withdrawRecipient = accounts[3] + + const coordinatorFactory = await ethers.getContractFactory( + 'VRFCoordinatorV2Mock', + owner, + ) + coordinator = await coordinatorFactory.deploy( + pointOneLink, + 1e9, // 0.000000001 PLI per gas + ) + + const linkEthFeedFactory = (await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + owner, + )) as unknown as MockV3Aggregator__factory + linkEthFeed = await linkEthFeedFactory.deploy(18, weiPerUnitLink) // 1 PLI = 0.003 ETH + + const linkFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + owner, + ) + link = await linkFactory.deploy() + wrongLink = await linkFactory.deploy() + + const wrapperFactory = await ethers.getContractFactory( + 'VRFV2Wrapper', + owner, + ) + wrapper = await wrapperFactory.deploy( + link.address, + linkEthFeed.address, + coordinator.address, + ) + + const consumerFactory = await ethers.getContractFactory( + 'VRFV2WrapperConsumerExample', + consumerOwner, + ) + consumer = await consumerFactory.deploy(link.address, wrapper.address) + consumerWrongLink = await consumerFactory.deploy( + wrongLink.address, + wrapper.address, + ) + consumerRevert = await consumerFactory.deploy(link.address, wrapper.address) + + const revertingConsumerFactory = await ethers.getContractFactory( + 'VRFV2WrapperRevertingConsumerExample', + consumerOwner, + ) + consumerRevert = await revertingConsumerFactory.deploy( + link.address, + wrapper.address, + ) + + const outOfGasConsumerFactory = await ethers.getContractFactory( + 'VRFV2WrapperOutOfGasConsumerExample', + consumerOwner, + ) + consumerOutOfGas = await outOfGasConsumerFactory.deploy( + link.address, + wrapper.address, + ) + }) + + const configure = async (): Promise => { + await expect( + wrapper + .connect(owner) + .setConfig( + wrapperGasOverhead, + coordinatorGasOverhead, + wrapperPremiumPercentage, + toBytes32String('keyHash'), + maxNumWords, + ), + ).to.not.be.reverted + } + + const fund = async (address: string, amount: BigNumber): Promise => { + await expect(link.connect(owner).transfer(address, amount)).to.not.be + .reverted + } + + const fundSub = async (): Promise => { + await expect(coordinator.connect(owner).fundSubscription(1, oneHundredLink)) + .to.not.be.reverted + } + + describe('calculatePrice', async () => { + // Note: This is a meta-test for the calculatePrice func above. It is then assumed correct for + // the remainder of the tests + it('can calculate price at 50 gwei, 100k limit', async () => { + const result = calculatePrice( + 100_000, + wrapperGasOverhead, + coordinatorGasOverhead, + fiftyGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + bigNumEquals(BigNumber.from('3986666666666666666'), result) + }) + + it('can calculate price at 50 gwei, 200k limit', async () => { + const result = calculatePrice( + 200_000, + wrapperGasOverhead, + coordinatorGasOverhead, + fiftyGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + bigNumEquals(BigNumber.from('5820000000000000000'), result) + }) + + it('can calculate price at 200 gwei, 100k limit', async () => { + const result = calculatePrice( + 200_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + bigNumEquals(BigNumber.from('11540000000000000000'), result) + }) + + it('can calculate price at 200 gwei, 100k limit, 25% premium', async () => { + const result = calculatePrice( + 200_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + 25, + flatFee, + ) + bigNumEquals(BigNumber.from('13100000000000000000'), result) + }) + }) + + describe('#setConfig/#getConfig', async () => { + it('can be configured', async () => { + await configure() + + const resp = await wrapper.connect(requester).getConfig() + bigNumEquals(BigNumber.from('4000000000000000'), resp[0]) // fallbackWeiPerUnitLink + bigNumEquals(2_700, resp[1]) // stalenessSeconds + bigNumEquals(BigNumber.from('100000'), resp[2]) // fulfillmentFlatFeeLinkPPM + bigNumEquals(wrapperGasOverhead, resp[3]) + bigNumEquals(coordinatorGasOverhead, resp[4]) + bigNumEquals(wrapperPremiumPercentage, resp[5]) + assert.equal(resp[6], toBytes32String('keyHash')) + bigNumEquals(10, resp[7]) + }) + + it('can be reconfigured', async () => { + await configure() + + await expect( + wrapper.connect(owner).setConfig( + 140_000, // wrapperGasOverhead + 195_000, // coordinatorGasOverhead + 9, // wrapperPremiumPercentage + toBytes32String('keyHash2'), // keyHash + 9, // maxNumWords + ), + ).to.not.be.reverted + + const resp = await wrapper.connect(requester).getConfig() + bigNumEquals(BigNumber.from('4000000000000000'), resp[0]) // fallbackWeiPerUnitLink + bigNumEquals(2_700, resp[1]) // stalenessSeconds + bigNumEquals(BigNumber.from('100000'), resp[2]) // fulfillmentFlatFeeLinkPPM + bigNumEquals(140_000, resp[3]) // wrapperGasOverhead + bigNumEquals(195_000, resp[4]) // coordinatorGasOverhead + bigNumEquals(9, resp[5]) // wrapperPremiumPercentage + assert.equal(resp[6], toBytes32String('keyHash2')) // keyHash + bigNumEquals(9, resp[7]) // maxNumWords + }) + + it('cannot be configured by a non-owner', async () => { + await expect( + wrapper.connect(requester).setConfig( + 10_000, // wrapperGasOverhead + 10_000, // coordinatorGasOverhead + 10, // wrapperPremiumPercentage + toBytes32String('keyHash'), // keyHash + 10, // maxNumWords + ), + ).to.be.reverted + }) + }) + describe('#calculatePrice', async () => { + it('cannot calculate price when not configured', async () => { + await expect(wrapper.connect(requester).calculateRequestPrice(100_000)).to + .be.reverted + }) + it('can calculate price at 50 gwei, 100k gas', async () => { + await configure() + const expected = calculatePrice( + 100_000, + wrapperGasOverhead, + coordinatorGasOverhead, + fiftyGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .calculateRequestPrice(100_000, { gasPrice: fiftyGwei }) + bigNumEquals(expected, resp) + }) + + it('can calculate price at 100 gwei, 100k gas', async () => { + await configure() + const expected = calculatePrice( + 100_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .calculateRequestPrice(100_000, { gasPrice: oneHundredGwei }) + bigNumEquals(expected, resp) + }) + + it('can calculate price at 100 gwei, 200k gas', async () => { + await configure() + const expected = calculatePrice( + 200_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .calculateRequestPrice(200_000, { gasPrice: oneHundredGwei }) + bigNumEquals(expected, resp) + }) + }) + + describe('#estimatePrice', async () => { + it('cannot estimate price when not configured', async () => { + await expect( + wrapper + .connect(requester) + .estimateRequestPrice(100_000, oneHundredGwei), + ).to.be.reverted + }) + it('can estimate price at 50 gwei, 100k gas', async () => { + await configure() + const expected = calculatePrice( + 100_000, + wrapperGasOverhead, + coordinatorGasOverhead, + fiftyGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .estimateRequestPrice(100_000, fiftyGwei) + bigNumEquals(expected, resp) + }) + + it('can estimate price at 100 gwei, 100k gas', async () => { + await configure() + const expected = calculatePrice( + 100_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .estimateRequestPrice(100_000, oneHundredGwei) + bigNumEquals(expected, resp) + }) + + it('can estimate price at 100 gwei, 200k gas', async () => { + await configure() + const expected = calculatePrice( + 200_000, + wrapperGasOverhead, + coordinatorGasOverhead, + oneHundredGwei, + weiPerUnitLink, + wrapperPremiumPercentage, + flatFee, + ) + const resp = await wrapper + .connect(requester) + .estimateRequestPrice(200_000, oneHundredGwei) + bigNumEquals(expected, resp) + }) + }) + + describe('#onTokenTransfer/#fulfillRandomWords', async () => { + it('cannot request randomness when not configured', async () => { + await expect( + consumer.connect(consumerOwner).makeRequest(80_000, 3, 2, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.be.reverted + }) + it('can only be called through LinkToken', async () => { + configure() + await expect( + wrongLink + .connect(owner) + .transfer(consumerWrongLink.address, oneHundredLink, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.not.be.reverted + await expect( + consumerWrongLink.connect(consumerOwner).makeRequest(80_000, 3, 2, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.be.reverted + }) + it('can request and fulfill randomness', async () => { + await configure() + await fund(consumer.address, oneHundredLink) + await fundSub() + + await expect( + consumer.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.emit(coordinator, 'RandomWordsRequested') + + const price = calculatePrice(100_000) + + // Check that the wrapper has the paid amount + bigNumEquals(price, await link.balanceOf(wrapper.address)) + + const { paid, fulfilled } = await consumer.s_requests(1 /* requestId */) + bigNumEquals(price, paid) + expect(fulfilled).to.be.false + + // fulfill the request + await expect( + coordinator + .connect(owner) + .fulfillRandomWordsWithOverride(1, wrapper.address, [123], { + gasLimit: 1_000_000, + }), + ) + .to.emit(coordinator, 'RandomWordsFulfilled') + .to.emit(consumer, 'WrappedRequestFulfilled') + .withArgs(1, [123], BigNumber.from(price)) + + const expectedBalance = price + const diff = expectedBalance + .sub(await link.balanceOf(wrapper.address)) + .abs() + expect(diff.lt(pointOneLink)).to.be.true + }) + it('does not revert if consumer runs out of gas', async () => { + await configure() + await fund(consumerOutOfGas.address, oneHundredLink) + await fundSub() + + await expect( + consumerOutOfGas.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.emit(coordinator, 'RandomWordsRequested') + + const price = calculatePrice(100_000) + + // Check that the wrapper has the paid amount + bigNumEquals(price, await link.balanceOf(wrapper.address)) + + // fulfill the request + await expect( + coordinator + .connect(owner) + .fulfillRandomWordsWithOverride(1, wrapper.address, [123], { + gasLimit: 1_000_000, + }), + ) + .to.emit(coordinator, 'RandomWordsFulfilled') + .to.emit(wrapper, 'WrapperFulfillmentFailed') + }) + it('does not revert if consumer reverts', async () => { + await configure() + await fund(consumerRevert.address, oneHundredLink) + await fundSub() + + await expect( + consumerRevert.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.emit(coordinator, 'RandomWordsRequested') + + const price = calculatePrice(100_000) + + // Check that the wrapper has the paid amount + bigNumEquals(price, await link.balanceOf(wrapper.address)) + + // fulfill the request + await expect( + coordinator + .connect(owner) + .fulfillRandomWordsWithOverride(1, wrapper.address, [123]), + ) + .to.emit(coordinator, 'RandomWordsFulfilled') + .to.emit(wrapper, 'WrapperFulfillmentFailed') + + const expectedBalance = price + const diff = expectedBalance + .sub(await link.balanceOf(wrapper.address)) + .abs() + + expect(diff.lt(pointOneLink)).to.be.true + }) + }) + describe('#disable/#enable', async () => { + it('can only calculate price when enabled', async () => { + await configure() + + await expect(wrapper.connect(owner).disable()).to.not.be.reverted + await expect( + wrapper.connect(consumerOwner).calculateRequestPrice(100_000, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.be.reverted + + await expect(wrapper.connect(owner).enable()).to.not.be.reverted + await expect( + wrapper.connect(consumerOwner).calculateRequestPrice(100_000, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.not.be.reverted + }) + + it('can only estimate price when enabled', async () => { + await configure() + + await expect(wrapper.connect(owner).disable()).to.not.be.reverted + await expect( + wrapper + .connect(consumerOwner) + .estimateRequestPrice(100_000, oneHundredGwei), + ).to.be.reverted + + await expect(wrapper.connect(owner).enable()).to.not.be.reverted + await expect( + wrapper + .connect(consumerOwner) + .estimateRequestPrice(100_000, oneHundredGwei), + ).to.not.be.reverted + }) + + it('can be configured while disabled', async () => { + await expect(wrapper.connect(owner).disable()).to.not.be.reverted + await configure() + }) + + it('can only request randomness when enabled', async () => { + await configure() + await fund(consumer.address, oneHundredLink) + await fundSub() + + await expect(wrapper.connect(owner).disable()).to.not.be.reverted + await expect( + consumer.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.be.reverted + + await expect(wrapper.connect(owner).enable()).to.not.be.reverted + await expect( + consumer.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.not.be.reverted + }) + + it('can fulfill randomness when disabled', async () => { + await configure() + await fund(consumer.address, oneHundredLink) + await fundSub() + + await expect( + consumer.connect(consumerOwner).makeRequest(100_000, 3, 1, { + gasPrice: oneHundredGwei, + gasLimit: 1_000_000, + }), + ).to.not.be.reverted + await expect(wrapper.connect(owner).disable()).to.not.be.reverted + + await expect( + coordinator + .connect(owner) + .fulfillRandomWordsWithOverride(1, wrapper.address, [123], { + gasLimit: 1_000_000, + }), + ) + .to.emit(coordinator, 'RandomWordsFulfilled') + .to.emit(consumer, 'WrappedRequestFulfilled') + }) + }) + + describe('#withdraw', async () => { + it('can withdraw funds to the owner', async () => { + await configure() + await fund(wrapper.address, oneHundredLink) + const recipientAddress = await withdrawRecipient.getAddress() + + // Withdraw half the funds + await expect( + wrapper + .connect(owner) + .withdraw(recipientAddress, oneHundredLink.div(2)), + ).to.not.be.reverted + bigNumEquals( + oneHundredLink.div(2), + await link.balanceOf(recipientAddress), + ) + bigNumEquals(oneHundredLink.div(2), await link.balanceOf(wrapper.address)) + + // Withdraw the rest + await expect( + wrapper + .connect(owner) + .withdraw(recipientAddress, oneHundredLink.div(2)), + ).to.not.be.reverted + bigNumEquals(oneHundredLink, await link.balanceOf(recipientAddress)) + bigNumEquals(0, await link.balanceOf(wrapper.address)) + }) + + it('cannot withdraw funds to non owners', async () => { + await configure() + await fund(wrapper.address, oneHundredLink) + const recipientAddress = await withdrawRecipient.getAddress() + + await expect( + wrapper + .connect(consumerOwner) + .withdraw(recipientAddress, oneHundredLink.div(2)), + ).to.be.reverted + }) + }) +}) diff --git a/contracts/test/v0.8/foundry/BaseTest.t.sol b/contracts/test/v0.8/foundry/BaseTest.t.sol new file mode 100644 index 00000000..4da698d1 --- /dev/null +++ b/contracts/test/v0.8/foundry/BaseTest.t.sol @@ -0,0 +1,17 @@ +pragma solidity ^0.8.0; + +import {Test} from "forge-std/Test.sol"; + +contract BaseTest is Test { + bool private s_baseTestInitialized; + address internal constant OWNER = 0x00007e64E1fB0C487F25dd6D3601ff6aF8d32e4e; + + function setUp() public virtual { + // BaseTest.setUp is often called multiple times from tests' setUp due to inheritance. + if (s_baseTestInitialized) return; + s_baseTestInitialized = true; + + // Set msg.sender to OWNER until changePrank or stopPrank is called + vm.startPrank(OWNER); + } +} diff --git a/contracts/test/v0.8/foundry/transmission/EIP_712_1014_4337.t.sol b/contracts/test/v0.8/foundry/transmission/EIP_712_1014_4337.t.sol new file mode 100644 index 00000000..61fb2346 --- /dev/null +++ b/contracts/test/v0.8/foundry/transmission/EIP_712_1014_4337.t.sol @@ -0,0 +1,369 @@ +pragma solidity ^0.8.15; + +import "../BaseTest.t.sol"; +import "../../../../src/v0.8/transmission/dev/ERC-4337/SmartContractAccountFactory.sol"; +import "../../../../src/v0.8/transmission/dev/testhelpers/SmartContractAccountHelper.sol"; +import "../../../../src/v0.8/transmission/dev/ERC-4337/SCA.sol"; +import "../../../../src/v0.8/transmission/dev/testhelpers/Greeter.sol"; +import "../../../../src/v0.8/transmission/dev/ERC-4337/Paymaster.sol"; +import "../../../../src/v0.8/vendor/entrypoint/interfaces/UserOperation.sol"; +import "../../../../src/v0.8/vendor/entrypoint/core/EntryPoint.sol"; +import "../../../../src/v0.8/vendor/entrypoint/interfaces/IEntryPoint.sol"; +import "../../../../src/v0.8/transmission/dev/ERC-4337/SCALibrary.sol"; +import "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import "../../../../src/v0.8/shared/interfaces/LinkTokenInterface.sol"; +import "../../../../src/v0.8/vrf/mocks/VRFCoordinatorMock.sol"; +import "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import "../../../../src/v0.8/vrf/testhelpers/VRFConsumer.sol"; + +/*--------------------------------------------------------------------------------------------------------------------+ +| EIP 712 + 1014 + 4337 | +| ________________ | +| This implementation allows for meta-transactions to be signed by end-users and posted on-chain by executors. It | +| utilizes the following components: | +| - EIP-712: The method by which meta-transactions are authorized. | +| - EIP-1014: The method by which the Smart Contract Account is generated. | +| - EIP-4337: The method by which meta-transactions are executed. | +| | +| The below tests illustrate end-user flows for interacting with this meta-transaction system. For users with | +| existing Smart Contract Accounts (SCAs), they simply sign off on the operation, after which the executor | +| invokes the EntryPoint that authorizes the operation on the end-user's SCA, and then exectute the transaction | +| as the SCA. For users without existing SCAs, EIP-1014 ensures that the address of an SCA can be known in advance, | +| so users can sign-off on transactions that will be executed by a not-yet-deployed SCA. The EntryPoint contract | +| takes advantage of this functionality and allows for the SCA to be created in the same user operation that invokes | +| it, and the end-user signs off on this creation-and-execution flow. After the initial creation-and-execution, the | +| SCA is reused for future transactions. | +| | +| End-Dapps/protocols do not need to be EIP-2771-compliant or accommodate any other kind of transaction standard. | +| They can be interacted with out-of-the-box through the SCA, which acts in place of the user's EOA as their | +| immutable identity. | +| | +-+---------------------------------------------------------------------------------------------------------------------*/ + +/*----------------------------+ +| TESTS | +| ________________ | +| | ++----------------------------*/ + +contract EIP_712_1014_4337 is BaseTest { + event RandomnessRequest(address indexed sender, bytes32 indexed keyHash, uint256 indexed seed, uint256 fee); + + address internal constant PLI_WHALE = 0xD883a6A1C22fC4AbFE938a5aDF9B2Cc31b1BF18B; + address internal ENTRY_POINT; + + Greeter greeter; + EntryPoint entryPoint; + MockV3Aggregator linkEthFeed; + + // Randomly generated private/public key pair. + uint256 END_USER_PKEY = uint256(bytes32(hex"99d518dbfea4b4ec301390f7e26d53d711fa1ca0c1a6e4cbed89617d4c578a8e")); + address END_USER = 0xB6708257D4E1bf0b8C144793fc2Ff3193C737ed1; + + function setUp() public override { + BaseTest.setUp(); + // Fund user accounts; + vm.deal(END_USER, 10_000 ether); + vm.deal(PLI_WHALE, 10_000 ether); + + // Impersonate a PLI whale. + changePrank(PLI_WHALE); + + // Create simople greeter contract. + greeter = new Greeter(); + assertEq("", greeter.getGreeting()); + + // Create entry point contract. + entryPoint = new EntryPoint(); + ENTRY_POINT = address(entryPoint); + + // Deploy link/eth feed. + linkEthFeed = new MockV3Aggregator(18, 5000000000000000); // .005 ETH + } + + /// @dev Test case for user that already has a Smart Contract Account. + /// @dev EntryPoint.sol should use the existing SCA to execute the meta transaction. + function testEIP712EIP4337WithExistingSmartContractAccount() public { + // Pre-calculate user smart contract account address. + SmartContractAccountFactory factory = new SmartContractAccountFactory(); + address toDeployAddress = SmartContractAccountHelper.calculateSmartContractAccountAddress( + END_USER, + ENTRY_POINT, + address(factory) + ); + + // Deploy the end-contract. + bytes32 salt = bytes32(uint256(uint160(END_USER)) << 96); + bytes memory fullInitializeCode = SmartContractAccountHelper.getSCAInitCodeWithConstructor(END_USER, ENTRY_POINT); + factory.deploySmartContractAccount(salt, fullInitializeCode); + changePrank(END_USER); + + // Ensure a correct deployment and a functioning end-contract. + uint256 contractCodeSize; + assembly { + contractCodeSize := extcodesize(toDeployAddress) + } + assertTrue(contractCodeSize > 0); + assertEq(END_USER, SCA(toDeployAddress).i_owner()); + + // Create the calldata for a setGreeting call. + string memory greeting = "hi"; + bytes memory encodedGreetingCall = bytes.concat(Greeter.setGreeting.selector, abi.encode(greeting)); // abi.encodeWithSelector equivalent + + // Produce the final full end-tx encoding, to be used as calldata in the user operation. + bytes memory fullEncoding = SmartContractAccountHelper.getFullEndTxEncoding( + address(greeter), + uint256(0), + 0, + encodedGreetingCall + ); + + // Construct the user operation. + UserOperation memory op = UserOperation({ + sender: toDeployAddress, + nonce: 0, + initCode: "", + callData: fullEncoding, + callGasLimit: 1_000_000, + verificationGasLimit: 1_000_000, + preVerificationGas: 10_000, + maxFeePerGas: 100, + maxPriorityFeePerGas: 200, + paymasterAndData: "", + signature: "" + }); + + // Sign user operation. + bytes32 userOpHash = entryPoint.getUserOpHash(op); + bytes32 fullHash = SCALibrary._getUserOpFullHash(userOpHash, toDeployAddress); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(END_USER_PKEY, fullHash); + op.signature = abi.encodePacked(r, s, v - 27); + + // Deposit funds for the transaction. + entryPoint.depositTo{value: 10 ether}(toDeployAddress); + + // Execute the user operation. + UserOperation[] memory operations = new UserOperation[](1); + operations[0] = op; + entryPoint.handleOps(operations, payable(END_USER)); + + // Assert that the greeting was set. + assertEq("hi", Greeter(greeter).getGreeting()); + assertEq(SCA(toDeployAddress).s_nonce(), uint256(1)); + } + + /// @dev Test case for fresh user, EntryPoint.sol should generate a + /// @dev Smart Contract Account for them and execute the meta transaction. + function testEIP712EIP4337AndCreateSmartContractAccount() public { + // Pre-calculate user smart contract account address. + SmartContractAccountFactory factory = new SmartContractAccountFactory(); + address toDeployAddress = SmartContractAccountHelper.calculateSmartContractAccountAddress( + END_USER, + ENTRY_POINT, + address(factory) + ); + + // Construct initCode byte array. + bytes memory fullInitializeCode = SmartContractAccountHelper.getInitCode(address(factory), END_USER, ENTRY_POINT); + + // Create the calldata for a setGreeting call. + string memory greeting = "bye"; + bytes memory encodedGreetingCall = bytes.concat(Greeter.setGreeting.selector, abi.encode(greeting)); + + // Produce the final full end-tx encoding, to be used as calldata in the user operation. + bytes memory fullEncoding = SmartContractAccountHelper.getFullEndTxEncoding( + address(greeter), + uint256(0), + 0, + encodedGreetingCall + ); + + // Construct the user opeartion. + UserOperation memory op = UserOperation({ + sender: toDeployAddress, + nonce: 0, + initCode: fullInitializeCode, + callData: fullEncoding, + callGasLimit: 1_000_000, + verificationGasLimit: 1_000_000, + preVerificationGas: 10_000, + maxFeePerGas: 100, + maxPriorityFeePerGas: 200, + paymasterAndData: "", + signature: "" + }); + + // Sign user operation. + bytes32 userOpHash = entryPoint.getUserOpHash(op); + bytes32 fullHash = SCALibrary._getUserOpFullHash(userOpHash, toDeployAddress); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(END_USER_PKEY, fullHash); + op.signature = abi.encodePacked(r, s, v - 27); + + // Deposit funds for the transaction. + entryPoint.depositTo{value: 10 ether}(toDeployAddress); + + // Execute the user operation. + UserOperation[] memory operations = new UserOperation[](1); + operations[0] = op; + entryPoint.handleOps(operations, payable(END_USER)); + + // Assert that the greeting was set. + assertEq("bye", Greeter(greeter).getGreeting()); + assertEq(SCA(toDeployAddress).s_nonce(), uint256(1)); + assertEq(SCA(toDeployAddress).i_owner(), END_USER); + } + + /// @dev Test case for a user executing a setGreeting with a PLI token paymaster. + function testEIP712EIP4337AndCreateSmartContractAccountWithPaymaster() public { + // Pre-calculate user smart contract account address. + SmartContractAccountFactory factory = new SmartContractAccountFactory(); + address toDeployAddress = SmartContractAccountHelper.calculateSmartContractAccountAddress( + END_USER, + ENTRY_POINT, + address(factory) + ); + + // Construct initCode byte array. + bytes memory fullInitializeCode = SmartContractAccountHelper.getInitCode(address(factory), END_USER, ENTRY_POINT); + + // Create the calldata for a setGreeting call. + string memory greeting = "good day"; + bytes memory encodedGreetingCall = bytes.concat(Greeter.setGreeting.selector, abi.encode(greeting)); + + // Produce the final full end-tx encoding, to be used as calldata in the user operation. + bytes memory fullEncoding = SmartContractAccountHelper.getFullEndTxEncoding( + address(greeter), + uint256(0), + 0, + encodedGreetingCall + ); + + // Create Link token, and deposit into paymaster. + MockLinkToken linkToken = new MockLinkToken(); + Paymaster paymaster = new Paymaster(LinkTokenInterface(address(linkToken)), linkEthFeed, ENTRY_POINT); + linkToken.transferAndCall(address(paymaster), 1000 ether, abi.encode(address(toDeployAddress))); + + // Construct the user opeartion. + UserOperation memory op = UserOperation({ + sender: toDeployAddress, + nonce: 0, + initCode: fullInitializeCode, + callData: fullEncoding, + callGasLimit: 1_000_000, + verificationGasLimit: 1_500_000, + preVerificationGas: 10_000, + maxFeePerGas: 100, + maxPriorityFeePerGas: 200, + paymasterAndData: abi.encodePacked(address(paymaster)), + signature: "" + }); + + // Sign user operation. + bytes32 userOpHash = entryPoint.getUserOpHash(op); + bytes32 fullHash = SCALibrary._getUserOpFullHash(userOpHash, toDeployAddress); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(END_USER_PKEY, fullHash); + op.signature = abi.encodePacked(r, s, v - 27); + + // Deposit funds for the transaction. + entryPoint.depositTo{value: 10 ether}(address(paymaster)); + + // Execute the user operation. + UserOperation[] memory operations = new UserOperation[](1); + operations[0] = op; + entryPoint.handleOps(operations, payable(END_USER)); + + // Assert that the greeting was set. + assertEq("good day", Greeter(greeter).getGreeting()); + assertEq(SCA(toDeployAddress).s_nonce(), uint256(1)); + } + + /// @dev Test case for a VRF Request via PLI token paymaster and an SCA. + function testEIP712EIP4337AndCreateSmartContractAccountWithPaymasterForVRFRequest() public { + // Pre-calculate user smart contract account address. + SmartContractAccountFactory factory = new SmartContractAccountFactory(); + address toDeployAddress = SmartContractAccountHelper.calculateSmartContractAccountAddress( + END_USER, + ENTRY_POINT, + address(factory) + ); + + // Construct initCode byte array. + bytes memory fullInitializeCode = SmartContractAccountHelper.getInitCode(address(factory), END_USER, ENTRY_POINT); + + // Create the calldata for a VRF request. + bytes32 keyhash = bytes32(uint256(123)); + uint256 fee = 1 ether; + bytes memory encodedVRFRequestCallData = bytes.concat( + VRFConsumer.doRequestRandomness.selector, + abi.encode(keyhash, fee) + ); + + // Create the VRF Contracts + MockLinkToken linkToken = new MockLinkToken(); + VRFCoordinatorMock vrfCoordinator = new VRFCoordinatorMock(address(linkToken)); + VRFConsumer vrfConsumer = new VRFConsumer(address(vrfCoordinator), address(linkToken)); + + // Produce the final full end-tx encoding, to be used as calldata in the user operation. + bytes memory fullEncoding = SmartContractAccountHelper.getFullEndTxEncoding( + address(vrfConsumer), // end-contract + uint256(0), // value + 0, // timeout (seconds) + encodedVRFRequestCallData + ); + + // Create Link token, and deposit into paymaster. + Paymaster paymaster = new Paymaster(LinkTokenInterface(address(linkToken)), linkEthFeed, ENTRY_POINT); + linkToken.transferAndCall(address(paymaster), 1000 ether, abi.encode(address(toDeployAddress))); + + // Construct direct funding data. + SCALibrary.DirectFundingData memory directFundingData = SCALibrary.DirectFundingData({ + recipient: address(vrfConsumer), + topupThreshold: 1, + topupAmount: 10 ether + }); + + // Construct the user opeartion. + UserOperation memory op = UserOperation({ + sender: toDeployAddress, + nonce: 0, + initCode: fullInitializeCode, + callData: fullEncoding, + callGasLimit: 200_000, + verificationGasLimit: 1_000_000, + preVerificationGas: 10_000, + maxFeePerGas: 10, + maxPriorityFeePerGas: 10, + paymasterAndData: abi.encodePacked(address(paymaster), uint8(0), abi.encode(directFundingData)), + signature: "" + }); + + // Sign user operation. + bytes32 fullHash = SCALibrary._getUserOpFullHash(entryPoint.getUserOpHash(op), toDeployAddress); + op.signature = getSignature(fullHash); + + // Deposit funds for the transaction. + entryPoint.depositTo{value: 10 ether}(address(paymaster)); + + // Assert correct log is emmitted for the end-contract vrf request. + vm.expectEmit(true, true, true, true); + emit RandomnessRequest( + address(vrfConsumer), + keyhash, + 0, // seed - we use a zero seed + fee + ); + + // Execute the user operation. + UserOperation[] memory operations = new UserOperation[](1); + operations[0] = op; + + // Execute user operation and ensure correct outcome. + entryPoint.handleOps(operations, payable(END_USER)); + assertEq(SCA(toDeployAddress).s_nonce(), uint256(1)); + } + + function getSignature(bytes32 h) internal view returns (bytes memory) { + (uint8 v, bytes32 r, bytes32 s) = vm.sign(END_USER_PKEY, h); + return abi.encodePacked(r, s, v - 27); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/ChainSpecificUtil.t.sol b/contracts/test/v0.8/foundry/vrf/ChainSpecificUtil.t.sol new file mode 100644 index 00000000..e0ac0036 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/ChainSpecificUtil.t.sol @@ -0,0 +1,195 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {ChainSpecificUtil} from "../../../../src/v0.8/ChainSpecificUtil.sol"; +import {ArbSys} from "../../../../src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbSys.sol"; +import {ArbGasInfo} from "../../../../src/v0.8/vendor/@arbitrum/nitro-contracts/src/precompiles/ArbGasInfo.sol"; +import {OVM_GasPriceOracle} from "../../../../src/v0.8/vendor/@eth-optimism/contracts/v0.8.6/contracts/L2/predeploys/OVM_GasPriceOracle.sol"; + +contract ChainSpecificUtilTest is BaseTest { + // ------------ Start Arbitrum Constants ------------ + + /// @dev ARBSYS_ADDR is the address of the ArbSys precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbSys.sol#L10 + address private constant ARBSYS_ADDR = address(0x0000000000000000000000000000000000000064); + ArbSys private constant ARBSYS = ArbSys(ARBSYS_ADDR); + + /// @dev ARBGAS_ADDR is the address of the ArbGasInfo precompile on Arbitrum. + /// @dev reference: https://github.com/OffchainLabs/nitro/blob/v2.0.14/contracts/src/precompiles/ArbGasInfo.sol#L10 + address private constant ARBGAS_ADDR = address(0x000000000000000000000000000000000000006C); + ArbGasInfo private constant ARBGAS = ArbGasInfo(ARBGAS_ADDR); + + uint256 private constant ARB_MAINNET_CHAIN_ID = 42161; + uint256 private constant ARB_GOERLI_TESTNET_CHAIN_ID = 421613; + uint256 private constant ARB_SEPOLIA_TESTNET_CHAIN_ID = 421614; + + // ------------ End Arbitrum Constants ------------ + + // ------------ Start Optimism Constants ------------ + /// @dev L1_FEE_DATA_PADDING includes 35 bytes for L1 data padding for Optimism + bytes internal constant L1_FEE_DATA_PADDING = + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + /// @dev OVM_GASPRICEORACLE_ADDR is the address of the OVM_GasPriceOracle precompile on Optimism. + /// @dev reference: https://community.optimism.io/docs/developers/build/transaction-fees/#estimating-the-l1-data-fee + address private constant OVM_GASPRICEORACLE_ADDR = address(0x420000000000000000000000000000000000000F); + OVM_GasPriceOracle private constant OVM_GASPRICEORACLE = OVM_GasPriceOracle(OVM_GASPRICEORACLE_ADDR); + + uint256 private constant OP_MAINNET_CHAIN_ID = 10; + uint256 private constant OP_GOERLI_CHAIN_ID = 420; + uint256 private constant OP_SEPOLIA_CHAIN_ID = 11155420; + + /// @dev Base is a OP stack based rollup and follows the same L1 pricing logic as Optimism. + uint256 private constant BASE_MAINNET_CHAIN_ID = 8453; + uint256 private constant BASE_GOERLI_CHAIN_ID = 84531; + + // ------------ End Optimism Constants ------------ + + function setUp() public override { + BaseTest.setUp(); + vm.clearMockedCalls(); + } + + function testGetBlockhashArbitrum() public { + uint256[3] memory chainIds = [ARB_MAINNET_CHAIN_ID, ARB_GOERLI_TESTNET_CHAIN_ID, ARB_SEPOLIA_TESTNET_CHAIN_ID]; + bytes32[3] memory expectedBlockHashes = [keccak256("mainnet"), keccak256("goerli"), keccak256("sepolia")]; + uint256[3] memory expectedBlockNumbers = [uint256(10), 11, 12]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + bytes32 expectedBlockHash = expectedBlockHashes[i]; + uint256 expectedBlockNumber = expectedBlockNumbers[i]; + vm.mockCall( + ARBSYS_ADDR, + abi.encodeWithSelector(ArbSys.arbBlockNumber.selector), + abi.encode(expectedBlockNumber + 1) + ); + vm.mockCall( + ARBSYS_ADDR, + abi.encodeWithSelector(ArbSys.arbBlockHash.selector, expectedBlockNumber), + abi.encodePacked(expectedBlockHash) + ); + bytes32 actualBlockHash = ChainSpecificUtil._getBlockhash(uint64(expectedBlockNumber)); + assertEq(expectedBlockHash, actualBlockHash, "incorrect blockhash"); + } + } + + function testGetBlockhashOptimism() public { + // Optimism L2 block hash is simply blockhash() + bytes32 actualBlockhash = ChainSpecificUtil._getBlockhash(uint64(block.number - 1)); + assertEq(blockhash(block.number - 1), actualBlockhash); + } + + function testGetBlockNumberArbitrum() public { + uint256[2] memory chainIds = [ARB_MAINNET_CHAIN_ID, ARB_GOERLI_TESTNET_CHAIN_ID]; + uint256[3] memory expectedBlockNumbers = [uint256(10), 11, 12]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + uint256 expectedBlockNumber = expectedBlockNumbers[i]; + vm.mockCall(ARBSYS_ADDR, abi.encodeWithSelector(ArbSys.arbBlockNumber.selector), abi.encode(expectedBlockNumber)); + uint256 actualBlockNumber = ChainSpecificUtil._getBlockNumber(); + assertEq(expectedBlockNumber, actualBlockNumber, "incorrect block number"); + } + } + + function testGetBlockNumberOptimism() public { + // Optimism L2 block number is simply block.number + uint256 actualBlockNumber = ChainSpecificUtil._getBlockNumber(); + assertEq(block.number, actualBlockNumber); + } + + function testGetCurrentTxL1GasFeesArbitrum() public { + uint256[3] memory chainIds = [ARB_MAINNET_CHAIN_ID, ARB_GOERLI_TESTNET_CHAIN_ID, ARB_SEPOLIA_TESTNET_CHAIN_ID]; + uint256[3] memory expectedGasFees = [uint256(10 gwei), 12 gwei, 14 gwei]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + uint256 expectedGasFee = expectedGasFees[i]; + vm.mockCall( + ARBGAS_ADDR, + abi.encodeWithSelector(ArbGasInfo.getCurrentTxL1GasFees.selector), + abi.encode(expectedGasFee) + ); + uint256 actualGasFee = ChainSpecificUtil._getCurrentTxL1GasFees(""); + assertEq(expectedGasFee, actualGasFee, "incorrect gas fees"); + } + } + + function testGetCurrentTxL1GasFeesOptimism() public { + // set optimism chain id + uint256[5] memory chainIds = [ + OP_MAINNET_CHAIN_ID, + OP_GOERLI_CHAIN_ID, + OP_SEPOLIA_CHAIN_ID, + BASE_MAINNET_CHAIN_ID, + BASE_GOERLI_CHAIN_ID + ]; + uint256[5] memory expectedGasFees = [uint256(10 gwei), 12 gwei, 14 gwei, 16 gwei, 18 gwei]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + uint256 expectedL1Fee = expectedGasFees[i]; + bytes memory someCalldata = abi.encode(address(0), "blah", uint256(1)); + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(OVM_GasPriceOracle.getL1Fee.selector, bytes.concat(someCalldata, L1_FEE_DATA_PADDING)), + abi.encode(expectedL1Fee) + ); + uint256 actualL1Fee = ChainSpecificUtil._getCurrentTxL1GasFees(someCalldata); + assertEq(expectedL1Fee, actualL1Fee, "incorrect gas fees"); + } + } + + function testGetL1CalldataGasCostArbitrum() public { + uint256[3] memory chainIds = [ARB_MAINNET_CHAIN_ID, ARB_GOERLI_TESTNET_CHAIN_ID, ARB_SEPOLIA_TESTNET_CHAIN_ID]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + vm.mockCall( + ARBGAS_ADDR, + abi.encodeWithSelector(ArbGasInfo.getPricesInWei.selector), + abi.encode(0, 10, 0, 0, 0, 0) + ); + + // fee = l1PricePerByte * (calldataSizeBytes + 140) + // fee = 10 * (10 + 140) = 1500 + uint256 dataFee = ChainSpecificUtil._getL1CalldataGasCost(10); + assertEq(dataFee, 1500); + } + } + + function testGetL1CalldataGasCostOptimism() public { + uint256[5] memory chainIds = [ + OP_MAINNET_CHAIN_ID, + OP_GOERLI_CHAIN_ID, + OP_SEPOLIA_CHAIN_ID, + BASE_MAINNET_CHAIN_ID, + BASE_GOERLI_CHAIN_ID + ]; + for (uint256 i = 0; i < chainIds.length; i++) { + vm.chainId(chainIds[i]); + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(bytes4(hex"519b4bd3")), // l1BaseFee() + abi.encode(10) + ); + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(bytes4(hex"0c18c162")), // overhead() + abi.encode(160) + ); + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(bytes4(hex"f45e65d8")), // scalar() + abi.encode(500_000) + ); + vm.mockCall( + OVM_GASPRICEORACLE_ADDR, + abi.encodeWithSelector(bytes4(hex"313ce567")), // decimals() + abi.encode(6) + ); + + // tx_data_gas = count_zero_bytes(tx_data) * 4 + count_non_zero_bytes(tx_data) * 16 + // tx_data_gas = 0 * 4 + 10 * 16 = 160 + // l1_data_fee = l1_gas_price * (tx_data_gas + fixed_overhead) * dynamic_overhead + // l1_data_fee = 10 * (160 + 160) * 500_000 / 1_000_000 = 1600 + uint256 dataFee = ChainSpecificUtil._getL1CalldataGasCost(10); + assertEq(dataFee, 1600); + } + } +} diff --git a/contracts/test/v0.8/foundry/vrf/TrustedBlockhashStore.t.sol b/contracts/test/v0.8/foundry/vrf/TrustedBlockhashStore.t.sol new file mode 100644 index 00000000..33ffd1bb --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/TrustedBlockhashStore.t.sol @@ -0,0 +1,89 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {TrustedBlockhashStore} from "../../../../src/v0.8/vrf/dev/TrustedBlockhashStore.sol"; +import {console} from "forge-std/console.sol"; + +contract TrustedBlockhashStoreTest is BaseTest { + address internal constant PLI_WHALE = 0xD883a6A1C22fC4AbFE938a5aDF9B2Cc31b1BF18B; + address internal constant PLI_WHALE_2 = 0xe9b2C5A6D9bA93dD354783a9De0a265da7551a20; + TrustedBlockhashStore bhs; + uint256 unreachableBlockNumber = 5; + bytes32 unreachableBlockhash; + + function setUp() public override { + BaseTest.setUp(); + + // Get the blockhash for a block that later becomes unreachable in the EVM. + vm.roll(10); + unreachableBlockhash = blockhash(unreachableBlockNumber); + + // Fund our users. + vm.roll(1000); + vm.deal(PLI_WHALE, 10_000 ether); + changePrank(PLI_WHALE); + + address[] memory whitelist = new address[](1); + whitelist[0] = PLI_WHALE; + bhs = new TrustedBlockhashStore(whitelist); + } + + function testGenericBHSFunctions() public { + // Should store. + uint256 blockNumber = 999; + bhs.store(blockNumber); + assertEq(bhs.getBlockhash(blockNumber), blockhash(blockNumber)); + + // Should store earliest. + uint256 earliestBlockNumber = block.number - 256; + bhs.storeEarliest(); + assertEq(bhs.getBlockhash(earliestBlockNumber), blockhash(earliestBlockNumber)); + } + + function testTrustedBHSFunctions() public { + uint256 recentBlockNumber = 999; + + // Assume that the EVM cannot access the blockhash for block 5. + uint256 unreachableBlock = 5; + assertEq(blockhash(unreachableBlock), 0); + + // Store blockhash from whitelisted address; + uint256[] memory invalidBlockNums = new uint256[](0); + uint256[] memory blockNums = new uint256[](1); + blockNums[0] = unreachableBlock; + bytes32[] memory blockhashes = new bytes32[](1); + blockhashes[0] = unreachableBlockhash; + + // Should not be able to store with invalid recent blockhash + vm.expectRevert(TrustedBlockhashStore.InvalidRecentBlockhash.selector); + bhs.storeTrusted(blockNums, blockhashes, recentBlockNumber, blockhash(998)); + + // Should not be able to store or change whitelist for non-whitelisted address. + changePrank(PLI_WHALE_2); + vm.expectRevert(TrustedBlockhashStore.NotInWhitelist.selector); + bhs.storeTrusted(blockNums, blockhashes, recentBlockNumber, blockhash(recentBlockNumber)); + vm.expectRevert("Only callable by owner"); + bhs.setWhitelist(new address[](0)); + + // Should not store for a mismatched list of block numbers and hashes. + changePrank(PLI_WHALE); + vm.expectRevert(TrustedBlockhashStore.InvalidTrustedBlockhashes.selector); + bhs.storeTrusted(invalidBlockNums, blockhashes, recentBlockNumber, blockhash(recentBlockNumber)); + + // Should store unreachable blocks via whitelisted address. + bhs.storeTrusted(blockNums, blockhashes, recentBlockNumber, blockhash(recentBlockNumber)); + assertEq(bhs.getBlockhash(unreachableBlock), unreachableBlockhash); + + // Change whitelist. Assert that the old whitelisted address can no longer store, + // but the new one can. + address[] memory newWhitelist = new address[](1); + newWhitelist[0] = PLI_WHALE_2; + bhs.setWhitelist(newWhitelist); + + vm.expectRevert(TrustedBlockhashStore.NotInWhitelist.selector); + bhs.storeTrusted(blockNums, blockhashes, recentBlockNumber, blockhash(recentBlockNumber)); + + changePrank(PLI_WHALE_2); + bhs.storeTrusted(blockNums, blockhashes, recentBlockNumber, blockhash(recentBlockNumber)); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Mock.t.sol b/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Mock.t.sol new file mode 100644 index 00000000..88428800 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Mock.t.sol @@ -0,0 +1,381 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {VRF} from "../../../../src/v0.8/vrf/VRF.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import {VRFCoordinatorV2Mock} from "../../../../src/v0.8/vrf/mocks/VRFCoordinatorV2Mock.sol"; +import {VRFConsumerV2} from "../../../../src/v0.8/vrf/testhelpers/VRFConsumerV2.sol"; + +contract VRFCoordinatorV2MockTest is BaseTest { + MockLinkToken internal s_linkToken; + MockV3Aggregator internal s_linkEthFeed; + VRFCoordinatorV2Mock internal s_vrfCoordinatorV2Mock; + VRFConsumerV2 internal s_vrfConsumerV2; + address internal s_subOwner = address(1234); + address internal s_randomOwner = address(4567); + + // VRF KeyV2 generated from a node; not sensitive information. + // The secret key used to generate this key is: 10. + bytes internal constant UNCOMPRESSED_PUBLIC_KEY = + hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7893aba425419bc27a3b6c7e693a24c696f794c2ed877a1593cbee53b037368d7"; + bytes internal constant COMPRESSED_PUBLIC_KEY = + hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c701"; + bytes32 internal constant KEY_HASH = hex"9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528"; + + uint32 internal constant DEFAULT_CALLBACK_GAS_LIMIT = 500_000; + uint16 internal constant DEFAULT_REQUEST_CONFIRMATIONS = 3; + uint32 internal constant DEFAULT_NUM_WORDS = 1; + + uint96 pointOneLink = 0.1 ether; + uint96 oneLink = 1 ether; + + event SubscriptionCreated(uint64 indexed subId, address owner); + event SubscriptionFunded(uint64 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionCanceled(uint64 indexed subId, address to, uint256 amount); + event ConsumerAdded(uint64 indexed subId, address consumer); + event ConsumerRemoved(uint64 indexed subId, address consumer); + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint64 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + address indexed sender + ); + event RandomWordsFulfilled(uint256 indexed requestId, uint256 outputSeed, uint96 payment, bool success); + + function setUp() public override { + BaseTest.setUp(); + + // Fund our users. + vm.roll(1); + vm.deal(OWNER, 10_000 ether); + vm.deal(s_subOwner, 20 ether); + + // Deploy link token and link/eth feed. + s_linkToken = new MockLinkToken(); + s_linkEthFeed = new MockV3Aggregator(18, 500000000000000000); // .5 ETH (good for testing) + + // Deploy coordinator and consumer. + s_vrfCoordinatorV2Mock = new VRFCoordinatorV2Mock( + pointOneLink, + 1_000_000_000 // 0.000000001 PLI per gas + ); + address coordinatorAddr = address(s_vrfCoordinatorV2Mock); + s_vrfConsumerV2 = new VRFConsumerV2(coordinatorAddr, address(s_linkToken)); + + s_vrfCoordinatorV2Mock.setConfig(); + } + + function testCreateSubscription() public { + vm.startPrank(s_subOwner); + vm.expectEmit( + true, // no first indexed topic + false, // no second indexed topic + false, // no third indexed topic + true // check data (target coordinator address) + ); + emit SubscriptionCreated(1, s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + assertEq(subId, 1); + + (uint96 balance, uint64 reqCount, address owner, address[] memory consumers) = s_vrfCoordinatorV2Mock + .getSubscription(subId); + assertEq(balance, 0); + assertEq(reqCount, 0); + assertEq(owner, s_subOwner); + assertEq(consumers.length, 0); + // s_testCoordinator.fundSubscriptionWithEth{value: 10 ether}(subId); + + // Test if subId increments + vm.expectEmit(true, false, false, true); + emit SubscriptionCreated(2, s_subOwner); + subId = s_vrfCoordinatorV2Mock.createSubscription(); + assertEq(subId, 2); + vm.stopPrank(); + } + + function testAddConsumer() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + vm.expectEmit(true, false, false, true); + emit ConsumerAdded(subId, address(s_vrfConsumerV2)); + s_vrfCoordinatorV2Mock.addConsumer(subId, address(s_vrfConsumerV2)); + + (uint96 balance, uint64 reqCount, address owner, address[] memory consumers) = s_vrfCoordinatorV2Mock + .getSubscription(subId); + assertEq(balance, 0); + assertEq(reqCount, 0); + assertEq(owner, s_subOwner); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_vrfConsumerV2)); + vm.stopPrank(); + } + + // cannot add a consumer to a nonexistent subscription + function testAddConsumerToInvalidSub() public { + vm.startPrank(s_subOwner); + bytes4 reason = bytes4(keccak256("InvalidSubscription()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.addConsumer(1, address(s_vrfConsumerV2)); + vm.stopPrank(); + } + + // cannot add more than the consumer maximum + function testAddMaxConsumers() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + // Add 100 consumers + for (uint64 i = 101; i <= 200; ++i) { + s_vrfCoordinatorV2Mock.addConsumer(subId, address(bytes20(keccak256(abi.encodePacked(i))))); + } + // Adding 101th consumer should revert + bytes4 reason = bytes4(keccak256("TooManyConsumers()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.addConsumer(subId, address(s_vrfConsumerV2)); + vm.stopPrank(); + } + + // can remove a consumer from a subscription + function testRemoveConsumerFromSub() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.addConsumer(subId, address(s_vrfConsumerV2)); + + (, , , address[] memory consumers) = s_vrfCoordinatorV2Mock.getSubscription(subId); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_vrfConsumerV2)); + + vm.expectEmit(true, false, false, true); + emit ConsumerRemoved(subId, address(s_vrfConsumerV2)); + s_vrfCoordinatorV2Mock.removeConsumer(subId, address(s_vrfConsumerV2)); + + vm.stopPrank(); + } + + // cannot remove a consumer from a nonexistent subscription + function testRemoveConsumerFromInvalidSub() public { + vm.startPrank(s_subOwner); + bytes4 reason = bytes4(keccak256("InvalidSubscription()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.removeConsumer(1, address(s_vrfConsumerV2)); + vm.stopPrank(); + } + + // cannot remove a consumer after it is already removed + function testRemoveConsumerAgain() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.addConsumer(subId, address(s_vrfConsumerV2)); + + (, , , address[] memory consumers) = s_vrfCoordinatorV2Mock.getSubscription(subId); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_vrfConsumerV2)); + + vm.expectEmit(true, false, false, true); + emit ConsumerRemoved(subId, address(s_vrfConsumerV2)); + s_vrfCoordinatorV2Mock.removeConsumer(subId, address(s_vrfConsumerV2)); + + // Removing consumer again should revert with InvalidConsumer + bytes4 reason = bytes4(keccak256("InvalidConsumer()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.removeConsumer(subId, address(s_vrfConsumerV2)); + vm.stopPrank(); + } + + // can fund a subscription + function testFundSubscription() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + vm.expectEmit(true, false, false, true); + emit SubscriptionFunded(subId, 0, oneLink); + s_vrfCoordinatorV2Mock.fundSubscription(subId, oneLink); + + (uint96 balance, , , address[] memory consumers) = s_vrfCoordinatorV2Mock.getSubscription(subId); + assertEq(balance, oneLink); + assertEq(consumers.length, 0); + vm.stopPrank(); + } + + // cannot fund a nonexistent subscription + function testFundInvalidSubscription() public { + vm.startPrank(s_subOwner); + + // Removing consumer again should revert with InvalidConsumer + bytes4 reason = bytes4(keccak256("InvalidSubscription()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.removeConsumer(1, address(s_vrfConsumerV2)); + + vm.stopPrank(); + } + + // can cancel a subscription + function testCancelSubscription() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.fundSubscription(subId, oneLink); + + vm.expectEmit(true, false, false, true); + emit SubscriptionCanceled(subId, s_subOwner, oneLink); + s_vrfCoordinatorV2Mock.cancelSubscription(subId, s_subOwner); + + bytes4 reason = bytes4(keccak256("InvalidSubscription()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.getSubscription(subId); + + vm.stopPrank(); + } + + // fails to fulfill without being a valid consumer + function testRequestRandomWordsInvalidConsumer() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.fundSubscription(subId, oneLink); + + bytes4 reason = bytes4(keccak256("InvalidConsumer()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.requestRandomWords( + KEY_HASH, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_NUM_WORDS + ); + vm.stopPrank(); + } + + // fails to fulfill with insufficient funds + function testRequestRandomWordsInsufficientFunds() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + address consumerAddr = address(s_vrfConsumerV2); + s_vrfCoordinatorV2Mock.addConsumer(subId, address(s_vrfConsumerV2)); + + vm.stopPrank(); + + vm.startPrank(consumerAddr); + + vm.expectEmit(true, false, false, true); + emit RandomWordsRequested( + KEY_HASH, + 1, + 100, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_NUM_WORDS, + address(s_subOwner) + ); + uint256 reqId = s_vrfCoordinatorV2Mock.requestRandomWords( + KEY_HASH, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_NUM_WORDS + ); + + bytes4 reason = bytes4(keccak256("InsufficientBalance()")); + vm.expectRevert(toBytes(reason)); + s_vrfCoordinatorV2Mock.fulfillRandomWords(reqId, consumerAddr); + + vm.stopPrank(); + } + + // can request and fulfill [ @skip-coverage ] + function testRequestRandomWordsHappyPath() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.fundSubscription(subId, oneLink); + + address consumerAddr = address(s_vrfConsumerV2); + s_vrfCoordinatorV2Mock.addConsumer(subId, consumerAddr); + + vm.expectEmit(true, false, false, true); + emit RandomWordsRequested( + KEY_HASH, + 1, + 100, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_NUM_WORDS, + address(s_subOwner) + ); + uint256 reqId = s_vrfConsumerV2.requestRandomness( + KEY_HASH, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_NUM_WORDS + ); + + vm.expectEmit(true, false, false, true); + emit RandomWordsFulfilled(reqId, 1, 100090236000000000, true); + s_vrfCoordinatorV2Mock.fulfillRandomWords(reqId, consumerAddr); + + vm.stopPrank(); + } + + // Correctly allows for user override of fulfillRandomWords [ @skip-coverage ] + function testRequestRandomWordsUserOverride() public { + vm.startPrank(s_subOwner); + uint64 subId = s_vrfCoordinatorV2Mock.createSubscription(); + + s_vrfCoordinatorV2Mock.fundSubscription(subId, oneLink); + + address consumerAddr = address(s_vrfConsumerV2); + s_vrfCoordinatorV2Mock.addConsumer(subId, consumerAddr); + + vm.expectEmit(true, false, false, true); + emit RandomWordsRequested( + KEY_HASH, + 1, + 100, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + 2, + address(s_subOwner) + ); + uint256 reqId = s_vrfConsumerV2.requestRandomness( + KEY_HASH, + subId, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + 2 + ); + + bytes4 reason = bytes4(keccak256("InvalidRandomWords()")); + vm.expectRevert(toBytes(reason)); + uint256[] memory words1 = new uint256[](5); + words1[0] = 1; + words1[1] = 2; + words1[2] = 3; + words1[3] = 4; + words1[4] = 5; + s_vrfCoordinatorV2Mock.fulfillRandomWordsWithOverride(reqId, consumerAddr, uint256[](words1)); + + vm.expectEmit(true, false, false, true); + uint256[] memory words2 = new uint256[](2); + words1[0] = 2533; + words1[1] = 1768; + emit RandomWordsFulfilled(reqId, 1, 100072314000000000, true); + s_vrfCoordinatorV2Mock.fulfillRandomWordsWithOverride(reqId, consumerAddr, words2); + + vm.stopPrank(); + } + + function toBytes(bytes4 _data) public pure returns (bytes memory) { + return abi.encodePacked(_data); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Plus_Migration.t.sol b/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Plus_Migration.t.sol new file mode 100644 index 00000000..9ff07c56 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFCoordinatorV2Plus_Migration.t.sol @@ -0,0 +1,352 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {VRFCoordinatorV2Plus_V2Example} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol"; +import {ExposedVRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol"; +import {VRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol"; +import {SubscriptionAPI} from "../../../../src/v0.8/vrf/dev/SubscriptionAPI.sol"; +import {VRFV2PlusConsumerExample} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import {VRFV2PlusMaliciousMigrator} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFV2PlusMaliciousMigrator.sol"; + +contract VRFCoordinatorV2Plus_Migration is BaseTest { + uint256 internal constant DEFAULT_PLI_FUNDING = 10 ether; // 10 PLI + uint256 internal constant DEFAULT_NATIVE_FUNDING = 50 ether; // 50 ETH + uint32 internal constant DEFAULT_CALLBACK_GAS_LIMIT = 50_000; + uint16 internal constant DEFAULT_REQUEST_CONFIRMATIONS = 3; + uint32 internal constant DEFAULT_NUM_WORDS = 1; + // VRF KeyV2 generated from a node; not sensitive information. + // The secret key used to generate this key is: 10. + bytes internal constant UNCOMPRESSED_PUBLIC_KEY = + hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7893aba425419bc27a3b6c7e693a24c696f794c2ed877a1593cbee53b037368d7"; + bytes internal constant COMPRESSED_PUBLIC_KEY = + hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c701"; + bytes32 internal constant KEY_HASH = hex"9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528"; + uint64 internal constant GAS_LANE_MAX_GAS = 5000 gwei; + + ExposedVRFCoordinatorV2_5 v1Coordinator; + VRFCoordinatorV2Plus_V2Example v2Coordinator; + ExposedVRFCoordinatorV2_5 v1Coordinator_noLink; + VRFCoordinatorV2Plus_V2Example v2Coordinator_noLink; + uint256 subId; + uint256 subId_noLink; + VRFV2PlusConsumerExample testConsumer; + VRFV2PlusConsumerExample testConsumer_noLink; + MockLinkToken linkToken; + address linkTokenAddr; + MockV3Aggregator linkNativeFeed; + address v1CoordinatorAddr; + address v2CoordinatorAddr; + address v1CoordinatorAddr_noLink; + address v2CoordinatorAddr_noLink; + + event CoordinatorRegistered(address coordinatorAddress); + event CoordinatorDeregistered(address coordinatorAddress); + event MigrationCompleted(address newCoordinator, uint256 subId); + + function setUp() public override { + BaseTest.setUp(); + vm.deal(OWNER, 100 ether); + address bhs = makeAddr("bhs"); + v1Coordinator = new ExposedVRFCoordinatorV2_5(bhs); + v1Coordinator_noLink = new ExposedVRFCoordinatorV2_5(bhs); + subId = v1Coordinator.createSubscription(); + subId_noLink = v1Coordinator_noLink.createSubscription(); + linkToken = new MockLinkToken(); + linkNativeFeed = new MockV3Aggregator(18, 500000000000000000); // .5 ETH (good for testing) + v1Coordinator.setPLIAndPLINativeFeed(address(linkToken), address(linkNativeFeed)); + linkTokenAddr = address(linkToken); + v2Coordinator = new VRFCoordinatorV2Plus_V2Example(address(linkToken), address(v1Coordinator)); + v2Coordinator_noLink = new VRFCoordinatorV2Plus_V2Example(address(0), address(v1Coordinator_noLink)); + v1CoordinatorAddr = address(v1Coordinator); + v2CoordinatorAddr = address(v2Coordinator); + v1CoordinatorAddr_noLink = address(v1Coordinator_noLink); + v2CoordinatorAddr_noLink = address(v2Coordinator_noLink); + + vm.expectEmit( + false, // no first indexed topic + false, // no second indexed topic + false, // no third indexed topic + true // check data (target coordinator address) + ); + emit CoordinatorRegistered(v2CoordinatorAddr); + v1Coordinator.registerMigratableCoordinator(v2CoordinatorAddr); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + + vm.expectEmit( + false, // no first indexed topic + false, // no second indexed topic + false, // no third indexed topic + true // check data (target coordinator address) + ); + emit CoordinatorRegistered(v2CoordinatorAddr_noLink); + v1Coordinator_noLink.registerMigratableCoordinator(v2CoordinatorAddr_noLink); + assertTrue(v1Coordinator_noLink.isTargetRegisteredExternal(v2CoordinatorAddr_noLink)); + + testConsumer = new VRFV2PlusConsumerExample(address(v1Coordinator), address(linkToken)); + testConsumer_noLink = new VRFV2PlusConsumerExample(address(v1Coordinator_noLink), address(0)); + v1Coordinator.setConfig( + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + 600, + 10_000, + 20_000, + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + v1Coordinator_noLink.setConfig( + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_CALLBACK_GAS_LIMIT, + 600, + 10_000, + 20_000, + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + registerProvingKey(); + testConsumer.setCoordinator(v1CoordinatorAddr); + testConsumer_noLink.setCoordinator(v1CoordinatorAddr_noLink); + } + + function testDeregister() public { + vm.expectEmit( + false, // no first indexed topic + false, // no second indexed topic + false, // no third indexed topic + true // check data (target coordinator address) + ); + emit CoordinatorDeregistered(v2CoordinatorAddr); + v1Coordinator.deregisterMigratableCoordinator(v2CoordinatorAddr); + assertFalse(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.CoordinatorNotRegistered.selector, v2CoordinatorAddr)); + v1Coordinator.migrate(subId, v2CoordinatorAddr); + + // test register/deregister multiple coordinators + address v3CoordinatorAddr = makeAddr("v3Coordinator"); + v1Coordinator.registerMigratableCoordinator(v2CoordinatorAddr); + v1Coordinator.registerMigratableCoordinator(v3CoordinatorAddr); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v3CoordinatorAddr)); + + v1Coordinator.deregisterMigratableCoordinator(v3CoordinatorAddr); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + assertFalse(v1Coordinator.isTargetRegisteredExternal(v3CoordinatorAddr)); + + v1Coordinator.registerMigratableCoordinator(v3CoordinatorAddr); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v3CoordinatorAddr)); + + v1Coordinator.deregisterMigratableCoordinator(v2CoordinatorAddr); + assertFalse(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + assertTrue(v1Coordinator.isTargetRegisteredExternal(v3CoordinatorAddr)); + + v1Coordinator.deregisterMigratableCoordinator(v3CoordinatorAddr); + assertFalse(v1Coordinator.isTargetRegisteredExternal(v2CoordinatorAddr)); + assertFalse(v1Coordinator.isTargetRegisteredExternal(v3CoordinatorAddr)); + } + + function testMigration() public { + linkToken.transferAndCall(v1CoordinatorAddr, DEFAULT_PLI_FUNDING, abi.encode(subId)); + v1Coordinator.fundSubscriptionWithNative{value: DEFAULT_NATIVE_FUNDING}(subId); + v1Coordinator.addConsumer(subId, address(testConsumer)); + + // subscription exists in V1 coordinator before migration + (uint96 balance, uint96 nativeBalance, uint64 reqCount, address owner, address[] memory consumers) = v1Coordinator + .getSubscription(subId); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(owner, address(OWNER)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(testConsumer)); + + assertEq(v1Coordinator.s_totalBalance(), DEFAULT_PLI_FUNDING); + assertEq(v1Coordinator.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + + // Update consumer to point to the new coordinator + vm.expectEmit( + false, // no first indexed field + false, // no second indexed field + false, // no third indexed field + true // check data fields + ); + emit MigrationCompleted(v2CoordinatorAddr, subId); + v1Coordinator.migrate(subId, v2CoordinatorAddr); + + // subscription no longer exists in v1 coordinator after migration + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + v1Coordinator.getSubscription(subId); + assertEq(v1Coordinator.s_totalBalance(), 0); + assertEq(v1Coordinator.s_totalNativeBalance(), 0); + assertEq(linkToken.balanceOf(v1CoordinatorAddr), 0); + assertEq(v1CoordinatorAddr.balance, 0); + + // subscription exists in v2 coordinator + (balance, nativeBalance, reqCount, owner, consumers) = v2Coordinator.getSubscription(subId); + assertEq(owner, address(OWNER)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(testConsumer)); + assertEq(reqCount, 0); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(v2Coordinator.s_totalLinkBalance(), DEFAULT_PLI_FUNDING); + assertEq(v2Coordinator.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + assertEq(linkToken.balanceOf(v2CoordinatorAddr), DEFAULT_PLI_FUNDING); + assertEq(v2CoordinatorAddr.balance, DEFAULT_NATIVE_FUNDING); + + // calling migrate again on V1 coordinator should fail + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + v1Coordinator.migrate(subId, v2CoordinatorAddr); + + // test request still works after migration + testConsumer.requestRandomWords( + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_NUM_WORDS, + KEY_HASH, + false + ); + assertEq(testConsumer.s_recentRequestId(), 1); + + v2Coordinator.fulfillRandomWords(testConsumer.s_recentRequestId()); + assertEq( + testConsumer.getRandomness(testConsumer.s_recentRequestId(), 0), + v2Coordinator.generateFakeRandomness(testConsumer.s_recentRequestId())[0] + ); + } + + function testMigrationNoLink() public { + v1Coordinator_noLink.fundSubscriptionWithNative{value: DEFAULT_NATIVE_FUNDING}(subId_noLink); + v1Coordinator_noLink.addConsumer(subId_noLink, address(testConsumer_noLink)); + + // subscription exists in V1 coordinator before migration + ( + uint96 balance, + uint96 nativeBalance, + uint64 reqCount, + address owner, + address[] memory consumers + ) = v1Coordinator_noLink.getSubscription(subId_noLink); + assertEq(balance, 0); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(owner, address(OWNER)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(testConsumer_noLink)); + + assertEq(v1Coordinator_noLink.s_totalBalance(), 0); + assertEq(v1Coordinator_noLink.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + + // Update consumer to point to the new coordinator + vm.expectEmit( + false, // no first indexed field + false, // no second indexed field + false, // no third indexed field + true // check data fields + ); + emit MigrationCompleted(v2CoordinatorAddr_noLink, subId_noLink); + v1Coordinator_noLink.migrate(subId_noLink, v2CoordinatorAddr_noLink); + + // subscription no longer exists in v1 coordinator after migration + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + v1Coordinator_noLink.getSubscription(subId); + assertEq(v1Coordinator_noLink.s_totalBalance(), 0); + assertEq(v1Coordinator_noLink.s_totalNativeBalance(), 0); + assertEq(linkToken.balanceOf(v1CoordinatorAddr_noLink), 0); + assertEq(v1CoordinatorAddr_noLink.balance, 0); + + // subscription exists in v2 coordinator + (balance, nativeBalance, reqCount, owner, consumers) = v2Coordinator_noLink.getSubscription(subId_noLink); + assertEq(owner, address(OWNER)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(testConsumer_noLink)); + assertEq(reqCount, 0); + assertEq(balance, 0); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(v2Coordinator_noLink.s_totalLinkBalance(), 0); + assertEq(v2Coordinator_noLink.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + assertEq(linkToken.balanceOf(v2CoordinatorAddr_noLink), 0); + assertEq(v2CoordinatorAddr_noLink.balance, DEFAULT_NATIVE_FUNDING); + + // calling migrate again on V1 coordinator should fail + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + v1Coordinator_noLink.migrate(subId_noLink, v2CoordinatorAddr_noLink); + + // test request still works after migration + testConsumer_noLink.requestRandomWords( + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_NUM_WORDS, + KEY_HASH, + false + ); + assertEq(testConsumer_noLink.s_recentRequestId(), 1); + + v2Coordinator_noLink.fulfillRandomWords(testConsumer_noLink.s_recentRequestId()); + assertEq( + testConsumer_noLink.getRandomness(testConsumer_noLink.s_recentRequestId(), 0), + v2Coordinator_noLink.generateFakeRandomness(testConsumer_noLink.s_recentRequestId())[0] + ); + } + + function testMigrateRevertsWhenInvalidCoordinator() external { + address invalidCoordinator = makeAddr("invalidCoordinator"); + + vm.expectRevert( + abi.encodeWithSelector(VRFCoordinatorV2_5.CoordinatorNotRegistered.selector, address(invalidCoordinator)) + ); + v1Coordinator.migrate(subId, invalidCoordinator); + } + + function testMigrateRevertsWhenInvalidCaller() external { + changePrank(makeAddr("invalidCaller")); + vm.expectRevert(bytes("Not subscription owner")); + v1Coordinator.migrate(subId, v2CoordinatorAddr); + } + + function testMigrateRevertsWhenPendingFulfillment() external { + v1Coordinator.addConsumer(subId, address(testConsumer)); + testConsumer.setSubId(subId); + testConsumer.requestRandomWords( + DEFAULT_CALLBACK_GAS_LIMIT, + DEFAULT_REQUEST_CONFIRMATIONS, + DEFAULT_NUM_WORDS, + KEY_HASH, + false + ); + + vm.expectRevert(bytes("Pending request exists")); + v1Coordinator.migrate(subId, v2CoordinatorAddr); + } + + function testMigrateRevertsWhenReentrant() public { + // deploy malicious contracts, subscriptions + address maliciousUser = makeAddr("maliciousUser"); + changePrank(maliciousUser); + uint256 maliciousSubId = v1Coordinator.createSubscription(); + VRFV2PlusMaliciousMigrator prankster = new VRFV2PlusMaliciousMigrator(address(v1Coordinator)); + v1Coordinator.addConsumer(maliciousSubId, address(prankster)); + + // try to migrate malicious subscription, should fail + vm.expectRevert(abi.encodeWithSelector(SubscriptionAPI.Reentrant.selector)); + v1Coordinator.migrate(maliciousSubId, v2CoordinatorAddr); + } + + function registerProvingKey() public { + uint256[2] memory uncompressedKeyParts = this.getProvingKeyParts(UNCOMPRESSED_PUBLIC_KEY); + v1Coordinator.registerProvingKey(uncompressedKeyParts, GAS_LANE_MAX_GAS); + v1Coordinator_noLink.registerProvingKey(uncompressedKeyParts, GAS_LANE_MAX_GAS); + } + + // note: Call this function via this.getProvingKeyParts to be able to pass memory as calldata and + // index over the byte array. + function getProvingKeyParts(bytes calldata uncompressedKey) public pure returns (uint256[2] memory) { + uint256 keyPart1 = uint256(bytes32(uncompressedKey[0:32])); + uint256 keyPart2 = uint256(bytes32(uncompressedKey[32:64])); + return [keyPart1, keyPart2]; + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFV2Plus.t.sol b/contracts/test/v0.8/foundry/vrf/VRFV2Plus.t.sol new file mode 100644 index 00000000..37ad0bde --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFV2Plus.t.sol @@ -0,0 +1,702 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {VRF} from "../../../../src/v0.8/vrf/VRF.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import {ExposedVRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol"; +import {VRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol"; +import {SubscriptionAPI} from "../../../../src/v0.8/vrf/dev/SubscriptionAPI.sol"; +import {BlockhashStore} from "../../../../src/v0.8/vrf/dev/BlockhashStore.sol"; +import {VRFV2PlusConsumerExample} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol"; +import {VRFV2PlusClient} from "../../../../src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol"; +import {console} from "forge-std/console.sol"; +import {VmSafe} from "forge-std/Vm.sol"; +import "@openzeppelin/contracts/utils/math/Math.sol"; // for Math.ceilDiv + +/* + * USAGE INSTRUCTIONS: + * To add new tests/proofs, uncomment the "console.sol" import from foundry, and gather key fields + * from your VRF request. + * Then, pass your request info into the generate-proof-v2-plus script command + * located in /core/scripts/vrfv2/testnet/proofs.go to generate a proof that can be tested on-chain. + **/ + +contract VRFV2Plus is BaseTest { + address internal constant PLI_WHALE = 0xD883a6A1C22fC4AbFE938a5aDF9B2Cc31b1BF18B; + uint64 internal constant GAS_LANE_MAX_GAS = 5000 gwei; + + // Bytecode for a VRFV2PlusConsumerExample contract. + // to calculate: console.logBytes(type(VRFV2PlusConsumerExample).creationCode); + bytes constant initializeCode = + hex"60806040523480156200001157600080fd5b5060405162001377380380620013778339810160408190526200003491620001cc565b8133806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf8162000103565b5050600280546001600160a01b03199081166001600160a01b0394851617909155600580548216958416959095179094555060038054909316911617905562000204565b6001600160a01b0381163314156200015e5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001c757600080fd5b919050565b60008060408385031215620001e057600080fd5b620001eb83620001af565b9150620001fb60208401620001af565b90509250929050565b61116380620002146000396000f3fe608060405234801561001057600080fd5b50600436106101005760003560e01c80638098004311610097578063cf62c8ab11610066578063cf62c8ab14610242578063de367c8e14610255578063eff2701714610268578063f2fde38b1461027b57600080fd5b806380980043146101ab5780638da5cb5b146101be5780638ea98117146101cf578063a168fa89146101e257600080fd5b80635d7d53e3116100d35780635d7d53e314610166578063706da1ca1461016f5780637725135b1461017857806379ba5097146101a357600080fd5b80631fe543e31461010557806329e5d8311461011a5780632fa4e4421461014057806336bfffed14610153575b600080fd5b610118610113366004610e4e565b61028e565b005b61012d610128366004610ef2565b6102fa565b6040519081526020015b60405180910390f35b61011861014e366004610f7f565b610410565b610118610161366004610d5b565b6104bc565b61012d60045481565b61012d60065481565b60035461018b906001600160a01b031681565b6040516001600160a01b039091168152602001610137565b6101186105c0565b6101186101b9366004610e1c565b600655565b6000546001600160a01b031661018b565b6101186101dd366004610d39565b61067e565b61021d6101f0366004610e1c565b6007602052600090815260409020805460019091015460ff82169161010090046001600160a01b03169083565b6040805193151584526001600160a01b03909216602084015290820152606001610137565b610118610250366004610f7f565b61073d565b60055461018b906001600160a01b031681565b610118610276366004610f14565b610880565b610118610289366004610d39565b610a51565b6002546001600160a01b031633146102ec576002546040517f1cf993f40000000000000000000000000000000000000000000000000000000081523360048201526001600160a01b0390911660248201526044015b60405180910390fd5b6102f68282610a65565b5050565b60008281526007602090815260408083208151608081018352815460ff81161515825261010090046001600160a01b0316818501526001820154818401526002820180548451818702810187019095528085528695929460608601939092919083018282801561038957602002820191906000526020600020905b815481526020019060010190808311610375575b50505050508152505090508060400151600014156103e95760405162461bcd60e51b815260206004820152601760248201527f7265717565737420494420697320696e636f727265637400000000000000000060448201526064016102e3565b806060015183815181106103ff576103ff61111c565b602002602001015191505092915050565b6003546002546006546040805160208101929092526001600160a01b0393841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b815260040161046a93929190610ffa565b602060405180830381600087803b15801561048457600080fd5b505af1158015610498573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102f69190610dff565b60065461050b5760405162461bcd60e51b815260206004820152600d60248201527f7375624944206e6f74207365740000000000000000000000000000000000000060448201526064016102e3565b60005b81518110156102f65760055460065483516001600160a01b039092169163bec4c08c91908590859081106105445761054461111c565b60200260200101516040518363ffffffff1660e01b815260040161057b9291909182526001600160a01b0316602082015260400190565b600060405180830381600087803b15801561059557600080fd5b505af11580156105a9573d6000803e3d6000fd5b5050505080806105b8906110f3565b91505061050e565b6001546001600160a01b0316331461061a5760405162461bcd60e51b815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016102e3565b600080543373ffffffffffffffffffffffffffffffffffffffff19808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6000546001600160a01b031633148015906106a457506002546001600160a01b03163314155b1561070e57336106bc6000546001600160a01b031690565b6002546040517f061db9c10000000000000000000000000000000000000000000000000000000081526001600160a01b03938416600482015291831660248301529190911660448201526064016102e3565b6002805473ffffffffffffffffffffffffffffffffffffffff19166001600160a01b0392909216919091179055565b60065461041057600560009054906101000a90046001600160a01b03166001600160a01b031663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561079457600080fd5b505af11580156107a8573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107cc9190610e35565b60068190556005546040517fbec4c08c00000000000000000000000000000000000000000000000000000000815260048101929092523060248301526001600160a01b03169063bec4c08c90604401600060405180830381600087803b15801561083557600080fd5b505af1158015610849573d6000803e3d6000fd5b505050506003546002546006546040516001600160a01b0393841693634000aea0931691859161043d919060200190815260200190565b60006040518060c0016040528084815260200160065481526020018661ffff1681526020018763ffffffff1681526020018563ffffffff1681526020016108d66040518060200160405280861515815250610af8565b90526002546040517f9b1c385e0000000000000000000000000000000000000000000000000000000081529192506000916001600160a01b0390911690639b1c385e90610927908590600401611039565b602060405180830381600087803b15801561094157600080fd5b505af1158015610955573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109799190610e35565b604080516080810182526000808252336020808401918252838501868152855184815280830187526060860190815287855260078352959093208451815493517fffffffffffffffffffffff0000000000000000000000000000000000000000009094169015157fffffffffffffffffffffff0000000000000000000000000000000000000000ff16176101006001600160a01b039094169390930292909217825591516001820155925180519495509193849392610a3f926002850192910190610ca9565b50505060049190915550505050505050565b610a59610b96565b610a6281610bf2565b50565b6004548214610ab65760405162461bcd60e51b815260206004820152601760248201527f7265717565737420494420697320696e636f727265637400000000000000000060448201526064016102e3565b60008281526007602090815260409091208251610adb92600290920191840190610ca9565b50506000908152600760205260409020805460ff19166001179055565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa82604051602401610b3191511515815260200190565b60408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b6000546001600160a01b03163314610bf05760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016102e3565b565b6001600160a01b038116331415610c4b5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016102e3565b6001805473ffffffffffffffffffffffffffffffffffffffff19166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610ce4579160200282015b82811115610ce4578251825591602001919060010190610cc9565b50610cf0929150610cf4565b5090565b5b80821115610cf05760008155600101610cf5565b80356001600160a01b0381168114610d2057600080fd5b919050565b803563ffffffff81168114610d2057600080fd5b600060208284031215610d4b57600080fd5b610d5482610d09565b9392505050565b60006020808385031215610d6e57600080fd5b823567ffffffffffffffff811115610d8557600080fd5b8301601f81018513610d9657600080fd5b8035610da9610da4826110cf565b61109e565b80828252848201915084840188868560051b8701011115610dc957600080fd5b600094505b83851015610df357610ddf81610d09565b835260019490940193918501918501610dce565b50979650505050505050565b600060208284031215610e1157600080fd5b8151610d5481611148565b600060208284031215610e2e57600080fd5b5035919050565b600060208284031215610e4757600080fd5b5051919050565b60008060408385031215610e6157600080fd5b8235915060208084013567ffffffffffffffff811115610e8057600080fd5b8401601f81018613610e9157600080fd5b8035610e9f610da4826110cf565b80828252848201915084840189868560051b8701011115610ebf57600080fd5b600094505b83851015610ee2578035835260019490940193918501918501610ec4565b5080955050505050509250929050565b60008060408385031215610f0557600080fd5b50508035926020909101359150565b600080600080600060a08688031215610f2c57600080fd5b610f3586610d25565b9450602086013561ffff81168114610f4c57600080fd5b9350610f5a60408701610d25565b9250606086013591506080860135610f7181611148565b809150509295509295909350565b600060208284031215610f9157600080fd5b81356bffffffffffffffffffffffff81168114610d5457600080fd5b6000815180845260005b81811015610fd357602081850181015186830182015201610fb7565b81811115610fe5576000602083870101525b50601f01601f19169290920160200192915050565b6001600160a01b03841681526bffffffffffffffffffffffff831660208201526060604082015260006110306060830184610fad565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c08084015261109660e0840182610fad565b949350505050565b604051601f8201601f1916810167ffffffffffffffff811182821017156110c7576110c7611132565b604052919050565b600067ffffffffffffffff8211156110e9576110e9611132565b5060051b60200190565b600060001982141561111557634e487b7160e01b600052601160045260246000fd5b5060010190565b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052604160045260246000fd5b8015158114610a6257600080fdfea164736f6c6343000806000a"; + + BlockhashStore s_bhs; + ExposedVRFCoordinatorV2_5 s_testCoordinator; + ExposedVRFCoordinatorV2_5 s_testCoordinator_noLink; + VRFV2PlusConsumerExample s_testConsumer; + MockLinkToken s_linkToken; + MockV3Aggregator s_linkNativeFeed; + + // VRF KeyV2 generated from a node; not sensitive information. + // The secret key used to generate this key is: 10. + bytes vrfUncompressedPublicKey = + hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7893aba425419bc27a3b6c7e693a24c696f794c2ed877a1593cbee53b037368d7"; + bytes vrfCompressedPublicKey = hex"a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c701"; + bytes32 vrfKeyHash = hex"9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528"; + + function setUp() public override { + BaseTest.setUp(); + + // Fund our users. + vm.roll(1); + vm.deal(PLI_WHALE, 10_000 ether); + changePrank(PLI_WHALE); + + vm.txGasPrice(100 gwei); + + // Instantiate BHS. + s_bhs = new BlockhashStore(); + + // Deploy coordinator and consumer. + // Note: adding contract deployments to this section will require the VRF proofs be regenerated. + s_testCoordinator = new ExposedVRFCoordinatorV2_5(address(s_bhs)); + s_linkToken = new MockLinkToken(); + s_linkNativeFeed = new MockV3Aggregator(18, 500000000000000000); // .5 ETH (good for testing) + + // Use create2 to deploy our consumer, so that its address is always the same + // and surrounding changes do not alter our generated proofs. + bytes memory consumerInitCode = bytes.concat( + initializeCode, + abi.encode(address(s_testCoordinator), address(s_linkToken)) + ); + bytes32 abiEncodedOwnerAddress = bytes32(uint256(uint160(PLI_WHALE)) << 96); + address consumerCreate2Address; + assembly { + consumerCreate2Address := create2( + 0, // value - left at zero here + add(0x20, consumerInitCode), // initialization bytecode (excluding first memory slot which contains its length) + mload(consumerInitCode), // length of initialization bytecode + abiEncodedOwnerAddress // user-defined nonce to ensure unique SCA addresses + ) + } + s_testConsumer = VRFV2PlusConsumerExample(consumerCreate2Address); + + s_testCoordinator_noLink = new ExposedVRFCoordinatorV2_5(address(s_bhs)); + + // Configure the coordinator. + s_testCoordinator.setPLIAndPLINativeFeed(address(s_linkToken), address(s_linkNativeFeed)); + } + + function setConfig() internal { + s_testCoordinator.setConfig( + 0, // minRequestConfirmations + 2_500_000, // maxGasLimit + 1, // stalenessSeconds + 50_000, // gasAfterPaymentCalculation + 50000000000000000, // fallbackWeiPerUnitLink + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + } + + function testSetConfig() public { + // Should setConfig successfully. + setConfig(); + + // Test that setting requestConfirmations above MAX_REQUEST_CONFIRMATIONS reverts. + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.InvalidRequestConfirmations.selector, 500, 500, 200)); + s_testCoordinator.setConfig( + 500, + 2_500_000, + 1, + 50_000, + 50000000000000000, + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + + // Test that setting fallbackWeiPerUnitLink to zero reverts. + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.InvalidLinkWeiPrice.selector, 0)); + + s_testCoordinator.setConfig( + 0, + 2_500_000, + 1, + 50_000, + 0, + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + + // Test that setting link discount flat fee higher than native flat fee reverts + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.LinkDiscountTooHigh.selector, uint32(1000), uint32(500))); + + s_testCoordinator.setConfig( + 0, + 2_500_000, + 1, + 50_000, + 500, + 500, // fulfillmentFlatFeeNativePPM + 1000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + } + + function testRegisterProvingKey() public { + // Should set the proving key successfully. + registerProvingKey(); + + // Should revert when already registered. + uint256[2] memory uncompressedKeyParts = this.getProvingKeyParts(vrfUncompressedPublicKey); + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.ProvingKeyAlreadyRegistered.selector, vrfKeyHash)); + s_testCoordinator.registerProvingKey(uncompressedKeyParts, GAS_LANE_MAX_GAS); + } + + event ProvingKeyRegistered(bytes32 keyHash, uint64 maxGas); + event ProvingKeyDeregistered(bytes32 keyHash, uint64 maxGas); + + function registerProvingKey() public { + uint256[2] memory uncompressedKeyParts = this.getProvingKeyParts(vrfUncompressedPublicKey); + bytes32 keyHash = keccak256(abi.encode(uncompressedKeyParts)); + vm.expectEmit( + false, // no indexed args to check for + false, // no indexed args to check for + false, // no indexed args to check for + true + ); // check data fields: keyHash and maxGas + emit ProvingKeyRegistered(keyHash, GAS_LANE_MAX_GAS); + s_testCoordinator.registerProvingKey(uncompressedKeyParts, GAS_LANE_MAX_GAS); + (bool exists, uint64 maxGas) = s_testCoordinator.s_provingKeys(keyHash); + assertTrue(exists); + assertEq(GAS_LANE_MAX_GAS, maxGas); + assertEq(s_testCoordinator.s_provingKeyHashes(0), keyHash); + } + + function testDeregisterProvingKey() public { + // Should set the proving key successfully. + registerProvingKey(); + + bytes + memory unregisteredPubKey = hex"6d919e4ed6add6c34b2af77eb6b2d2f5d27db11ba004e70734b23bd4321ea234ff8577a063314bead6d88c1b01849289a5542767a5138924f38fed551a7773db"; + + // Should revert when given pubkey is not registered + uint256[2] memory unregisteredKeyParts = this.getProvingKeyParts(unregisteredPubKey); + bytes32 unregisterdKeyHash = keccak256(abi.encode(unregisteredKeyParts)); + vm.expectRevert(abi.encodeWithSelector(VRFCoordinatorV2_5.NoSuchProvingKey.selector, unregisterdKeyHash)); + s_testCoordinator.deregisterProvingKey(unregisteredKeyParts); + + // correctly deregister pubkey + uint256[2] memory uncompressedKeyParts = this.getProvingKeyParts(vrfUncompressedPublicKey); + bytes32 keyHash = keccak256(abi.encode(uncompressedKeyParts)); + vm.expectEmit( + false, // no indexed args to check for + false, // no indexed args to check for + false, // no indexed args to check for + true + ); // check data fields: keyHash and maxGas + emit ProvingKeyDeregistered(keyHash, GAS_LANE_MAX_GAS); + s_testCoordinator.deregisterProvingKey(uncompressedKeyParts); + (bool exists, uint64 maxGas) = s_testCoordinator.s_provingKeys(keyHash); + assertFalse(exists); + assertEq(0, maxGas); + } + + // note: Call this function via this.getProvingKeyParts to be able to pass memory as calldata and + // index over the byte array. + function getProvingKeyParts(bytes calldata uncompressedKey) public pure returns (uint256[2] memory) { + uint256 keyPart1 = uint256(bytes32(uncompressedKey[0:32])); + uint256 keyPart2 = uint256(bytes32(uncompressedKey[32:64])); + return [keyPart1, keyPart2]; + } + + function testCreateSubscription() public { + uint256 subId = s_testCoordinator.createSubscription(); + s_testCoordinator.fundSubscriptionWithNative{value: 10 ether}(subId); + } + + function testCancelSubWithNoLink() public { + uint256 subId = s_testCoordinator_noLink.createSubscription(); + s_testCoordinator_noLink.fundSubscriptionWithNative{value: 1000 ether}(subId); + + assertEq(PLI_WHALE.balance, 9000 ether); + s_testCoordinator_noLink.cancelSubscription(subId, PLI_WHALE); + assertEq(PLI_WHALE.balance, 10_000 ether); + + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + s_testCoordinator_noLink.getSubscription(subId); + } + + function testGetActiveSubscriptionIds() public { + uint numSubs = 40; + for (uint i = 0; i < numSubs; i++) { + s_testCoordinator.createSubscription(); + } + // get all subscriptions, assert length is correct + uint256[] memory allSubs = s_testCoordinator.getActiveSubscriptionIds(0, 0); + assertEq(allSubs.length, s_testCoordinator.getActiveSubscriptionIdsLength()); + + // paginate through subscriptions, batching by 10. + // we should eventually get all the subscriptions this way. + uint256[][] memory subIds = paginateSubscriptions(s_testCoordinator, 10); + // check that all subscriptions were returned + uint actualNumSubs = 0; + for (uint batchIdx = 0; batchIdx < subIds.length; batchIdx++) { + for (uint subIdx = 0; subIdx < subIds[batchIdx].length; subIdx++) { + s_testCoordinator.getSubscription(subIds[batchIdx][subIdx]); + actualNumSubs++; + } + } + assertEq(actualNumSubs, s_testCoordinator.getActiveSubscriptionIdsLength()); + + // cancel a bunch of subscriptions, assert that they are not returned + uint256[] memory subsToCancel = new uint256[](3); + for (uint i = 0; i < 3; i++) { + subsToCancel[i] = subIds[0][i]; + } + for (uint i = 0; i < subsToCancel.length; i++) { + s_testCoordinator.cancelSubscription(subsToCancel[i], PLI_WHALE); + } + uint256[][] memory newSubIds = paginateSubscriptions(s_testCoordinator, 10); + // check that all subscriptions were returned + // and assert that none of the canceled subscriptions are returned + actualNumSubs = 0; + for (uint batchIdx = 0; batchIdx < newSubIds.length; batchIdx++) { + for (uint subIdx = 0; subIdx < newSubIds[batchIdx].length; subIdx++) { + for (uint i = 0; i < subsToCancel.length; i++) { + assertFalse(newSubIds[batchIdx][subIdx] == subsToCancel[i]); + } + s_testCoordinator.getSubscription(newSubIds[batchIdx][subIdx]); + actualNumSubs++; + } + } + assertEq(actualNumSubs, s_testCoordinator.getActiveSubscriptionIdsLength()); + } + + function paginateSubscriptions( + ExposedVRFCoordinatorV2_5 coordinator, + uint256 batchSize + ) internal view returns (uint256[][] memory) { + uint arrIndex = 0; + uint startIndex = 0; + uint256 numSubs = coordinator.getActiveSubscriptionIdsLength(); + uint256[][] memory subIds = new uint256[][](Math.ceilDiv(numSubs, batchSize)); + while (startIndex < numSubs) { + subIds[arrIndex] = coordinator.getActiveSubscriptionIds(startIndex, batchSize); + startIndex += batchSize; + arrIndex++; + } + return subIds; + } + + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + event RandomWordsFulfilled( + uint256 indexed requestId, + uint256 outputSeed, + uint256 indexed subID, + uint96 payment, + bytes extraArgs, + bool success + ); + + function testRequestAndFulfillRandomWordsNative() public { + ( + VRF.Proof memory proof, + VRFCoordinatorV2_5.RequestCommitment memory rc, + uint256 subId, + uint256 requestId + ) = setupSubAndRequestRandomnessNativePayment(); + (, uint96 nativeBalanceBefore, , , ) = s_testCoordinator.getSubscription(subId); + + uint256 outputSeed = s_testCoordinator.getRandomnessFromProofExternal(proof, rc).randomness; + vm.recordLogs(); + uint96 payment = s_testCoordinator.fulfillRandomWords(proof, rc, false); + VmSafe.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries[0].topics[1], bytes32(uint256(requestId))); + assertEq(entries[0].topics[2], bytes32(uint256(subId))); + (uint256 loggedOutputSeed, , bool loggedSuccess) = abi.decode(entries[0].data, (uint256, uint256, bool)); + assertEq(loggedOutputSeed, outputSeed); + assertEq(loggedSuccess, true); + + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, true); + + // The cost of fulfillRandomWords is approximately 70_000 gas. + // gasAfterPaymentCalculation is 50_000. + // + // The cost of the VRF fulfillment charged to the user is: + // baseFeeWei = weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft()) + // baseFeeWei = 1e11 * (50_000 + 70_000) + // baseFeeWei = 1.2e16 + // flatFeeWei = 1e12 * (fulfillmentFlatFeeNativePPM) + // flatFeeWei = 1e12 * 500_000 = 5e17 + // ... + // billed_fee = baseFeeWei * (100 + linkPremiumPercentage / 100) + 5e17 + // billed_fee = 1.2e16 * 1.15 + 5e17 + // billed_fee = 5.138e+17 + (, uint96 nativeBalanceAfter, , , ) = s_testCoordinator.getSubscription(subId); + // 1e15 is less than 1 percent discrepancy + assertApproxEqAbs(payment, 5.138 * 1e17, 1e15); + assertApproxEqAbs(nativeBalanceAfter, nativeBalanceBefore - 5.138 * 1e17, 1e15); + } + + function testRequestAndFulfillRandomWordsPLI() public { + ( + VRF.Proof memory proof, + VRFCoordinatorV2_5.RequestCommitment memory rc, + uint256 subId, + uint256 requestId + ) = setupSubAndRequestRandomnessPLIPayment(); + (uint96 linkBalanceBefore, , , , ) = s_testCoordinator.getSubscription(subId); + + uint256 outputSeed = s_testCoordinator.getRandomnessFromProofExternal(proof, rc).randomness; + vm.recordLogs(); + uint96 payment = s_testCoordinator.fulfillRandomWords(proof, rc, false); + + VmSafe.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries[0].topics[1], bytes32(uint256(requestId))); + assertEq(entries[0].topics[2], bytes32(uint256(subId))); + (uint256 loggedOutputSeed, , bool loggedSuccess) = abi.decode(entries[0].data, (uint256, uint256, bool)); + assertEq(loggedOutputSeed, outputSeed); + assertEq(loggedSuccess, true); + + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, true); + + // The cost of fulfillRandomWords is approximately 97_000 gas. + // gasAfterPaymentCalculation is 50_000. + // + // The cost of the VRF fulfillment charged to the user is: + // paymentNoFee = (weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft() + l1CostWei) / link_native_ratio) + // paymentNoFee = (1e11 * (50_000 + 97_000 + 0)) / .5 + // paymentNoFee = 2.94e16 + // flatFeeWei = 1e12 * (fulfillmentFlatFeeNativePPM - fulfillmentFlatFeeLinkDiscountPPM) + // flatFeeWei = 1e12 * (500_000 - 100_000) + // flatFeeJuels = 1e18 * flatFeeWei / link_native_ratio + // flatFeeJuels = 4e17 / 0.5 = 8e17 + // billed_fee = paymentNoFee * ((100 + 10) / 100) + 8e17 + // billed_fee = 2.94e16 * 1.1 + 8e17 + // billed_fee = 3.234e16 + 8e17 = 8.3234e17 + // note: delta is doubled from the native test to account for more variance due to the link/native ratio + (uint96 linkBalanceAfter, , , , ) = s_testCoordinator.getSubscription(subId); + // 1e15 is less than 1 percent discrepancy + assertApproxEqAbs(payment, 8.3234 * 1e17, 1e15); + assertApproxEqAbs(linkBalanceAfter, linkBalanceBefore - 8.3234 * 1e17, 1e15); + } + + function setupSubAndRequestRandomnessPLIPayment() + internal + returns (VRF.Proof memory proof, VRFCoordinatorV2_5.RequestCommitment memory rc, uint256 subId, uint256 requestId) + { + uint32 requestBlock = 20; + vm.roll(requestBlock); + s_linkToken.transfer(address(s_testConsumer), 10 ether); + s_testConsumer.createSubscriptionAndFund(10 ether); + uint256 subId = s_testConsumer.s_subId(); + + // Apply basic configs to contract. + setConfig(); + registerProvingKey(); + + // Request random words. + vm.expectEmit(true, true, false, true); + (uint256 requestId, uint256 preSeed) = s_testCoordinator.computeRequestIdExternal( + vrfKeyHash, + address(s_testConsumer), + subId, + 2 + ); + emit RandomWordsRequested( + vrfKeyHash, + requestId, + preSeed, + subId, + 0, // minConfirmations + 1_000_000, // callbackGasLimit + 1, // numWords + VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: false})), // nativePayment, // nativePayment + address(s_testConsumer) // requester + ); + s_testConsumer.requestRandomWords(1_000_000, 0, 1, vrfKeyHash, false); + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, false); + + // Uncomment these console logs to see info about the request: + // console.log("requestId: ", requestId); + // console.log("preSeed: ", preSeed); + // console.log("sender: ", address(s_testConsumer)); + + // Move on to the next block. + // Store the previous block's blockhash, and assert that it is as expected. + vm.roll(requestBlock + 1); + s_bhs.store(requestBlock); + assertEq(hex"0000000000000000000000000000000000000000000000000000000000000014", s_bhs.getBlockhash(requestBlock)); + + // Fulfill the request. + // Proof generated via the generate-proof-v2-plus script command. Example usage: + /* + go run . generate-proof-v2-plus \ + -key-hash 0x9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528 \ + -pre-seed 108233140904510496268355288815996296196427471042093167619305836589216327096601 \ + -block-hash 0x0000000000000000000000000000000000000000000000000000000000000014 \ + -block-num 20 \ + -sender 0x90A8820424CC8a819d14cBdE54D12fD3fbFa9bb2 + */ + VRF.Proof memory proof = VRF.Proof({ + pk: [ + 72488970228380509287422715226575535698893157273063074627791787432852706183111, + 62070622898698443831883535403436258712770888294397026493185421712108624767191 + ], + gamma: [ + 49785247270467418393187938018746488660500261614113251546613288843777654841004, + 8320717868018488740308781441198484312662094766876176838868269181386589318272 + ], + c: 41596204381278553342984662603150353549780558761307588910860350083645227536604, + s: 81592778991188138734863787790226463602813498664606420860910885269124681994753, + seed: 108233140904510496268355288815996296196427471042093167619305836589216327096601, + uWitness: 0x56920892EE71E624d369dCc8dc63B6878C85Ca70, + cGammaWitness: [ + 28250667431035633903490940933503696927659499415200427260709034207157951953043, + 105660182690338773283351292037478192732977803900032569393220726139772041021018 + ], + sHashWitness: [ + 18420263847278540234821121001488166570853056146131705862117248292063859054211, + 15740432967529684573970722302302642068194042971767150190061244675457227502736 + ], + zInv: 100579074451139970455673776933943662313989441807178260211316504761358492254052 + }); + VRFCoordinatorV2_5.RequestCommitment memory rc = VRFCoordinatorV2_5.RequestCommitment({ + blockNum: requestBlock, + subId: subId, + callbackGasLimit: 1000000, + numWords: 1, + sender: address(s_testConsumer), + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: false})) + }); + return (proof, rc, subId, requestId); + } + + function setupSubAndRequestRandomnessNativePayment() + internal + returns (VRF.Proof memory proof, VRFCoordinatorV2_5.RequestCommitment memory rc, uint256 subId, uint256 requestId) + { + uint32 requestBlock = 10; + vm.roll(requestBlock); + s_testConsumer.createSubscriptionAndFund(0); + uint256 subId = s_testConsumer.s_subId(); + s_testCoordinator.fundSubscriptionWithNative{value: 10 ether}(subId); + + // Apply basic configs to contract. + setConfig(); + registerProvingKey(); + + // Request random words. + vm.expectEmit(true, true, true, true); + (uint256 requestId, uint256 preSeed) = s_testCoordinator.computeRequestIdExternal( + vrfKeyHash, + address(s_testConsumer), + subId, + 2 + ); + emit RandomWordsRequested( + vrfKeyHash, + requestId, + preSeed, + subId, + 0, // minConfirmations + 1_000_000, // callbackGasLimit + 1, // numWords + VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: true})), // nativePayment + address(s_testConsumer) // requester + ); + s_testConsumer.requestRandomWords(1_000_000, 0, 1, vrfKeyHash, true); + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, false); + + // Uncomment these console logs to see info about the request: + // console.log("requestId: ", requestId); + // console.log("preSeed: ", preSeed); + // console.log("sender: ", address(s_testConsumer)); + + // Move on to the next block. + // Store the previous block's blockhash, and assert that it is as expected. + vm.roll(requestBlock + 1); + s_bhs.store(requestBlock); + assertEq(hex"000000000000000000000000000000000000000000000000000000000000000a", s_bhs.getBlockhash(requestBlock)); + + // Fulfill the request. + // Proof generated via the generate-proof-v2-plus script command. Example usage: + /* + go run . generate-proof-v2-plus \ + -key-hash 0x9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528 \ + -pre-seed 93724884573574303181157854277074121673523280784530506403108144933983063023487 \ + -block-hash 0x000000000000000000000000000000000000000000000000000000000000000a \ + -block-num 10 \ + -sender 0x90A8820424CC8a819d14cBdE54D12fD3fbFa9bb2 \ + -native-payment true + */ + VRF.Proof memory proof = VRF.Proof({ + pk: [ + 72488970228380509287422715226575535698893157273063074627791787432852706183111, + 62070622898698443831883535403436258712770888294397026493185421712108624767191 + ], + gamma: [ + 51111463251706978184511913295560024261167135799300172382907308330135472647507, + 41885656274025752055847945432737871864088659248922821023734315208027501951872 + ], + c: 96917856581077810363012153828220232197567408835708926581335248000925197916153, + s: 103298896676233752268329042222773891728807677368628421408380318882272184455566, + seed: 93724884573574303181157854277074121673523280784530506403108144933983063023487, + uWitness: 0xFCaA10875C6692f6CcC86c64300eb0b52f2D4323, + cGammaWitness: [ + 61463607927970680172418313129927007099021056249775757132623753443657677198526, + 48686021866486086188742596461341782400160109177829661164208082534005682984658 + ], + sHashWitness: [ + 91508089836242281395929619352465003226819385335975246221498243754781593857533, + 63571625936444669399167157725633389238098818902162172059681813608664564703308 + ], + zInv: 97568175302326019383632009699686265453584842953005404815285123863099260038246 + }); + VRFCoordinatorV2_5.RequestCommitment memory rc = VRFCoordinatorV2_5.RequestCommitment({ + blockNum: requestBlock, + subId: subId, + callbackGasLimit: 1_000_000, + numWords: 1, + sender: address(s_testConsumer), + extraArgs: VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: true})) + }); + + return (proof, rc, subId, requestId); + } + + function testRequestAndFulfillRandomWords_NetworkGasPriceExceedsGasLane() public { + ( + VRF.Proof memory proof, + VRFCoordinatorV2_5.RequestCommitment memory rc, + , + + ) = setupSubAndRequestRandomnessNativePayment(); + + // network gas is higher than gas lane max gas + uint256 networkGasPrice = GAS_LANE_MAX_GAS + 1; + vm.txGasPrice(networkGasPrice); + vm.expectRevert( + abi.encodeWithSelector(VRFCoordinatorV2_5.GasPriceExceeded.selector, networkGasPrice, GAS_LANE_MAX_GAS) + ); + s_testCoordinator.fulfillRandomWords(proof, rc, false); + } + + function testRequestAndFulfillRandomWords_OnlyPremium_NativePayment() public { + ( + VRF.Proof memory proof, + VRFCoordinatorV2_5.RequestCommitment memory rc, + uint256 subId, + uint256 requestId + ) = setupSubAndRequestRandomnessNativePayment(); + (, uint96 nativeBalanceBefore, , , ) = s_testCoordinator.getSubscription(subId); + + // network gas is twice the gas lane max gas + uint256 networkGasPrice = GAS_LANE_MAX_GAS * 2; + vm.txGasPrice(networkGasPrice); + + uint256 outputSeed = s_testCoordinator.getRandomnessFromProofExternal(proof, rc).randomness; + vm.recordLogs(); + uint96 payment = s_testCoordinator.fulfillRandomWords(proof, rc, true /* onlyPremium */); + VmSafe.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries[0].topics[1], bytes32(uint256(requestId))); + assertEq(entries[0].topics[2], bytes32(uint256(subId))); + (uint256 loggedOutputSeed, , bool loggedSuccess) = abi.decode(entries[0].data, (uint256, uint256, bool)); + assertEq(loggedOutputSeed, outputSeed); + assertEq(loggedSuccess, true); + + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, true); + + // The cost of fulfillRandomWords is approximately 70_000 gas. + // gasAfterPaymentCalculation is 50_000. + // + // The cost of the VRF fulfillment charged to the user is: + // baseFeeWei = weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft()) + // network gas price is capped at gas lane max gas (5000 gwei) + // baseFeeWei = 5e12 * (50_000 + 70_000) + // baseFeeWei = 6e17 + // flatFeeWei = 1e12 * (fulfillmentFlatFeeNativePPM) + // flatFeeWei = 1e12 * 500_000 = 5e17 + // ... + // billed_fee = baseFeeWei * (linkPremiumPercentage / 100) + 5e17 + // billed_fee = 6e17 * 0.15 + 5e17 + // billed_fee = 5.9e+17 + (, uint96 nativeBalanceAfter, , , ) = s_testCoordinator.getSubscription(subId); + // 1e15 is less than 1 percent discrepancy + assertApproxEqAbs(payment, 5.9 * 1e17, 1e15); + assertApproxEqAbs(nativeBalanceAfter, nativeBalanceBefore - 5.9 * 1e17, 1e15); + } + + function testRequestAndFulfillRandomWords_OnlyPremium_LinkPayment() public { + ( + VRF.Proof memory proof, + VRFCoordinatorV2_5.RequestCommitment memory rc, + uint256 subId, + uint256 requestId + ) = setupSubAndRequestRandomnessPLIPayment(); + (uint96 linkBalanceBefore, , , , ) = s_testCoordinator.getSubscription(subId); + + // network gas is twice the gas lane max gas + uint256 networkGasPrice = GAS_LANE_MAX_GAS * 5; + vm.txGasPrice(networkGasPrice); + + uint256 outputSeed = s_testCoordinator.getRandomnessFromProofExternal(proof, rc).randomness; + vm.recordLogs(); + uint96 payment = s_testCoordinator.fulfillRandomWords(proof, rc, true /* onlyPremium */); + + VmSafe.Log[] memory entries = vm.getRecordedLogs(); + assertEq(entries[0].topics[1], bytes32(uint256(requestId))); + assertEq(entries[0].topics[2], bytes32(uint256(subId))); + (uint256 loggedOutputSeed, , bool loggedSuccess) = abi.decode(entries[0].data, (uint256, uint256, bool)); + assertEq(loggedOutputSeed, outputSeed); + assertEq(loggedSuccess, true); + + (bool fulfilled, , ) = s_testConsumer.s_requests(requestId); + assertEq(fulfilled, true); + + // The cost of fulfillRandomWords is approximately 97_000 gas. + // gasAfterPaymentCalculation is 50_000. + // + // The cost of the VRF fulfillment charged to the user is: + // paymentNoFee = (weiPerUnitGas * (gasAfterPaymentCalculation + startGas - gasleft() + l1CostWei) / link_native_ratio) + // network gas price is capped at gas lane max gas (5000 gwei) + // paymentNoFee = (5e12 * (50_000 + 97_000 + 0)) / .5 + // paymentNoFee = 1.47e+18 + // flatFeeWei = 1e12 * (fulfillmentFlatFeeNativePPM - fulfillmentFlatFeeLinkDiscountPPM) + // flatFeeWei = 1e12 * (500_000 - 100_000) + // flatFeeJuels = 1e18 * flatFeeWei / link_native_ratio + // flatFeeJuels = 4e17 / 0.5 = 8e17 + // billed_fee = paymentNoFee * (10 / 100) + 8e17 + // billed_fee = 1.47e+18 * 0.1 + 8e17 + // billed_fee = 9.47e+17 + // note: delta is doubled from the native test to account for more variance due to the link/native ratio + (uint96 linkBalanceAfter, , , , ) = s_testCoordinator.getSubscription(subId); + // 1e15 is less than 1 percent discrepancy + assertApproxEqAbs(payment, 9.47 * 1e17, 1e15); + assertApproxEqAbs(linkBalanceAfter, linkBalanceBefore - 9.47 * 1e17, 1e15); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFV2PlusSubscriptionAPI.t.sol b/contracts/test/v0.8/foundry/vrf/VRFV2PlusSubscriptionAPI.t.sol new file mode 100644 index 00000000..488c71c5 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFV2PlusSubscriptionAPI.t.sol @@ -0,0 +1,627 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {ExposedVRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol"; +import {SubscriptionAPI} from "../../../../src/v0.8/vrf/dev/SubscriptionAPI.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import "@openzeppelin/contracts/utils/Strings.sol"; // for Strings.toString + +contract VRFV2PlusSubscriptionAPITest is BaseTest { + event SubscriptionFunded(uint256 indexed subId, uint256 oldBalance, uint256 newBalance); + event SubscriptionFundedWithNative(uint256 indexed subId, uint256 oldNativeBalance, uint256 newNativeBalance); + event SubscriptionCanceled(uint256 indexed subId, address to, uint256 amountLink, uint256 amountNative); + event FundsRecovered(address to, uint256 amountLink); + event NativeFundsRecovered(address to, uint256 amountNative); + event SubscriptionOwnerTransferRequested(uint256 indexed subId, address from, address to); + event SubscriptionOwnerTransferred(uint256 indexed subId, address from, address to); + event SubscriptionConsumerAdded(uint256 indexed subId, address consumer); + + ExposedVRFCoordinatorV2_5 s_subscriptionAPI; + + function setUp() public override { + BaseTest.setUp(); + address bhs = makeAddr("bhs"); + s_subscriptionAPI = new ExposedVRFCoordinatorV2_5(bhs); + } + + function testDefaultState() public { + assertEq(address(s_subscriptionAPI.PLI()), address(0)); + assertEq(address(s_subscriptionAPI.PLI_NATIVE_FEED()), address(0)); + assertEq(s_subscriptionAPI.s_currentSubNonce(), 0); + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 0); + assertEq(s_subscriptionAPI.s_totalBalance(), 0); + assertEq(s_subscriptionAPI.s_totalNativeBalance(), 0); + } + + function testSetPLIAndPLINativeFeed() public { + address link = makeAddr("link"); + address linkNativeFeed = makeAddr("linkNativeFeed"); + s_subscriptionAPI.setPLIAndPLINativeFeed(link, linkNativeFeed); + assertEq(address(s_subscriptionAPI.PLI()), link); + assertEq(address(s_subscriptionAPI.PLI_NATIVE_FEED()), linkNativeFeed); + + // try setting it again, should revert + vm.expectRevert(SubscriptionAPI.LinkAlreadySet.selector); + s_subscriptionAPI.setPLIAndPLINativeFeed(link, linkNativeFeed); + } + + function testOwnerCancelSubscriptionNoFunds() public { + // CASE: new subscription w/ no funds at all + // Should cancel trivially + + // Note that the link token is not set, but this should still + // not fail in that case. + + // Create the subscription from a separate address + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // change back to owner and cancel the subscription + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionCanceled(subId, subOwner, 0, 0); + s_subscriptionAPI.ownerCancelSubscription(subId); + + // assert that the subscription no longer exists + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 0); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, address(0)); + // no point in checking s_subscriptions because all fields are zeroed out + // due to no balance and no requests made + } + + function testOwnerCancelSubscriptionNativeFundsOnly() public { + // CASE: new subscription with native funds only + // no link funds. + // should cancel and return the native funds + + // Create the subscription from a separate address + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with ether + vm.deal(subOwner, 10 ether); + vm.expectEmit(true, false, false, true); + emit SubscriptionFundedWithNative(subId, 0, 5 ether); + s_subscriptionAPI.fundSubscriptionWithNative{value: 5 ether}(subId); + + // change back to owner and cancel the subscription + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionCanceled(subId, subOwner, 0 /* link balance */, 5 ether /* native balance */); + s_subscriptionAPI.ownerCancelSubscription(subId); + + // assert that the subscription no longer exists + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 0); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, address(0)); + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).nativeBalance, 0); + + // check the native balance of the subOwner, should be 10 ether + assertEq(address(subOwner).balance, 10 ether); + } + + function testOwnerCancelSubscriptionLinkFundsOnly() public { + // CASE: new subscription with link funds only + // no native funds. + // should cancel and return the link funds + + // Create link token and set the link token on the subscription api object + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // Create the subscription from a separate address + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with link + // can do it from the owner acct because anyone can fund a subscription + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionFunded(subId, 0, 5 ether); + bool success = linkToken.transferAndCall(address(s_subscriptionAPI), 5 ether, abi.encode(subId)); + assertTrue(success, "failed link transfer and call"); + + // change back to owner and cancel the subscription + vm.expectEmit(true, false, false, true); + emit SubscriptionCanceled(subId, subOwner, 5 ether /* link balance */, 0 /* native balance */); + s_subscriptionAPI.ownerCancelSubscription(subId); + + // assert that the subscription no longer exists + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 0); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, address(0)); + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).balance, 0); + + // check the link balance of the sub owner, should be 5 PLI + assertEq(linkToken.balanceOf(subOwner), 5 ether); + } + + function testOwnerCancelSubscriptionNativeAndLinkFunds() public { + // CASE: new subscription with link and native funds + // should cancel and return both link and native funds + + // Create link token and set the link token on the subscription api object + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // Create the subscription from a separate address + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with link + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionFunded(subId, 0, 5 ether); + bool success = linkToken.transferAndCall(address(s_subscriptionAPI), 5 ether, abi.encode(subId)); + assertTrue(success, "failed link transfer and call"); + + // fund the subscription with ether + vm.deal(subOwner, 10 ether); + changePrank(subOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionFundedWithNative(subId, 0, 5 ether); + s_subscriptionAPI.fundSubscriptionWithNative{value: 5 ether}(subId); + + // change back to owner and cancel the subscription + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionCanceled(subId, subOwner, 5 ether /* link balance */, 5 ether /* native balance */); + s_subscriptionAPI.ownerCancelSubscription(subId); + + // assert that the subscription no longer exists + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 0); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, address(0)); + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).balance, 0); + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).nativeBalance, 0); + + // check the link balance of the sub owner, should be 5 PLI + assertEq(linkToken.balanceOf(subOwner), 5 ether, "link balance incorrect"); + // check the ether balance of the sub owner, should be 10 ether + assertEq(address(subOwner).balance, 10 ether, "native balance incorrect"); + } + + function testRecoverFundsPLINotSet() public { + // CASE: link token not set + // should revert with error LinkNotSet + + // call recoverFunds + vm.expectRevert(SubscriptionAPI.LinkNotSet.selector); + s_subscriptionAPI.recoverFunds(OWNER); + } + + function testRecoverFundsBalanceInvariantViolated() public { + // CASE: link token set + // and internal balance is greater than external balance + + // Create link token and set the link token on the subscription api object + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // set the total balance to be greater than the external balance + // so that we trigger the invariant violation + // note that this field is not modifiable in the actual contracts + // other than through onTokenTransfer or similar functions + s_subscriptionAPI.setTotalBalanceTestingOnlyXXX(100 ether); + + // call recoverFunds + vm.expectRevert(abi.encodeWithSelector(SubscriptionAPI.BalanceInvariantViolated.selector, 100 ether, 0)); + s_subscriptionAPI.recoverFunds(OWNER); + } + + function testRecoverFundsAmountToTransfer() public { + // CASE: link token set + // and internal balance is less than external balance + // (i.e invariant is not violated) + // should recover funds successfully + + // Create link token and set the link token on the subscription api object + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // transfer 10 PLI to the contract to recover + bool success = linkToken.transfer(address(s_subscriptionAPI), 10 ether); + assertTrue(success, "failed link transfer"); + + // call recoverFunds + vm.expectEmit(true, false, false, true); + emit FundsRecovered(OWNER, 10 ether); + s_subscriptionAPI.recoverFunds(OWNER); + } + + function testRecoverFundsNothingToTransfer() public { + // CASE: link token set + // and there is nothing to transfer + // should do nothing at all + + // Create link token and set the link token on the subscription api object + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // create a subscription and fund it with 5 PLI + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with link + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionFunded(subId, 0, 5 ether); + bool success = linkToken.transferAndCall(address(s_subscriptionAPI), 5 ether, abi.encode(subId)); + assertTrue(success, "failed link transfer and call"); + + // call recoverFunds, nothing should happen because external balance == internal balance + s_subscriptionAPI.recoverFunds(OWNER); + assertEq(linkToken.balanceOf(address(s_subscriptionAPI)), s_subscriptionAPI.s_totalBalance()); + } + + function testRecoverNativeFundsBalanceInvariantViolated() public { + // set the total balance to be greater than the external balance + // so that we trigger the invariant violation + // note that this field is not modifiable in the actual contracts + // other than through onTokenTransfer or similar functions + s_subscriptionAPI.setTotalNativeBalanceTestingOnlyXXX(100 ether); + + // call recoverFunds + vm.expectRevert(abi.encodeWithSelector(SubscriptionAPI.BalanceInvariantViolated.selector, 100 ether, 0)); + s_subscriptionAPI.recoverNativeFunds(payable(OWNER)); + } + + function testRecoverNativeFundsAmountToTransfer() public { + // transfer 10 PLI to the contract to recover + vm.deal(address(s_subscriptionAPI), 10 ether); + + // call recoverFunds + vm.expectEmit(true, false, false, true); + emit NativeFundsRecovered(OWNER, 10 ether); + s_subscriptionAPI.recoverNativeFunds(payable(OWNER)); + } + + function testRecoverNativeFundsNothingToTransfer() public { + // create a subscription and fund it with 5 ether + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with ether + vm.deal(subOwner, 5 ether); + changePrank(subOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionFundedWithNative(subId, 0, 5 ether); + s_subscriptionAPI.fundSubscriptionWithNative{value: 5 ether}(subId); + + // call recoverNativeFunds, nothing should happen because external balance == internal balance + changePrank(OWNER); + s_subscriptionAPI.recoverNativeFunds(payable(OWNER)); + assertEq(address(s_subscriptionAPI).balance, s_subscriptionAPI.s_totalNativeBalance()); + } + + function testWithdrawNoLink() public { + // CASE: no link token set + vm.expectRevert(SubscriptionAPI.LinkNotSet.selector); + s_subscriptionAPI.withdraw(OWNER); + } + + function testWithdrawInsufficientBalance() public { + // CASE: link token set, trying to withdraw + // more than balance + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // call withdraw + vm.expectRevert(SubscriptionAPI.InsufficientBalance.selector); + s_subscriptionAPI.withdraw(OWNER); + } + + function testWithdrawSufficientBalanceLinkSet() public { + // CASE: link token set, trying to withdraw + // less than balance + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // transfer 10 PLI to the contract to withdraw + bool success = linkToken.transfer(address(s_subscriptionAPI), 10 ether); + assertTrue(success, "failed link transfer"); + + // set the withdrawable tokens of the contract to be 1 ether + s_subscriptionAPI.setWithdrawableTokensTestingOnlyXXX(1 ether); + assertEq(s_subscriptionAPI.getWithdrawableTokensTestingOnlyXXX(), 1 ether); + + // set the total balance to be the same as the link balance for consistency + // (this is not necessary for the test, but just to be sane) + s_subscriptionAPI.setTotalBalanceTestingOnlyXXX(10 ether); + + // call Withdraw from owner address + uint256 ownerBalance = linkToken.balanceOf(OWNER); + changePrank(OWNER); + s_subscriptionAPI.withdraw(OWNER); + // assert link balance of owner + assertEq(linkToken.balanceOf(OWNER) - ownerBalance, 1 ether, "owner link balance incorrect"); + // assert state of subscription api + assertEq(s_subscriptionAPI.getWithdrawableTokensTestingOnlyXXX(), 0, "owner withdrawable tokens incorrect"); + // assert that total balance is changed by the withdrawn amount + assertEq(s_subscriptionAPI.s_totalBalance(), 9 ether, "total balance incorrect"); + } + + function testWithdrawNativeInsufficientBalance() public { + // CASE: trying to withdraw more than balance + // should revert with InsufficientBalance + + // call WithdrawNative + changePrank(OWNER); + vm.expectRevert(SubscriptionAPI.InsufficientBalance.selector); + s_subscriptionAPI.withdrawNative(payable(OWNER)); + } + + function testWithdrawLinkInvalidOwner() public { + address invalidAddress = makeAddr("invalidAddress"); + changePrank(invalidAddress); + vm.expectRevert("Only callable by owner"); + s_subscriptionAPI.withdraw(payable(OWNER)); + } + + function testWithdrawNativeInvalidOwner() public { + address invalidAddress = makeAddr("invalidAddress"); + changePrank(invalidAddress); + vm.expectRevert("Only callable by owner"); + s_subscriptionAPI.withdrawNative(payable(OWNER)); + } + + function testWithdrawNativeSufficientBalance() public { + // CASE: trying to withdraw less than balance + // should withdraw successfully + + // transfer 10 ether to the contract to withdraw + vm.deal(address(s_subscriptionAPI), 10 ether); + + // set the withdrawable eth of the contract to be 1 ether + s_subscriptionAPI.setWithdrawableNativeTestingOnlyXXX(1 ether); + assertEq(s_subscriptionAPI.getWithdrawableNativeTestingOnlyXXX(), 1 ether); + + // set the total balance to be the same as the eth balance for consistency + // (this is not necessary for the test, but just to be sane) + s_subscriptionAPI.setTotalNativeBalanceTestingOnlyXXX(10 ether); + + // call WithdrawNative from owner address + changePrank(OWNER); + s_subscriptionAPI.withdrawNative(payable(OWNER)); + // assert native balance + assertEq(address(OWNER).balance, 1 ether, "owner native balance incorrect"); + // assert state of subscription api + assertEq(s_subscriptionAPI.getWithdrawableNativeTestingOnlyXXX(), 0, "owner withdrawable native incorrect"); + // assert that total balance is changed by the withdrawn amount + assertEq(s_subscriptionAPI.s_totalNativeBalance(), 9 ether, "total native balance incorrect"); + } + + function testOnTokenTransferCallerNotLink() public { + vm.expectRevert(SubscriptionAPI.OnlyCallableFromLink.selector); + s_subscriptionAPI.onTokenTransfer(makeAddr("someaddress"), 1 ether, abi.encode(uint256(1))); + } + + function testOnTokenTransferInvalidCalldata() public { + // create and set link token on subscription api + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // call link.transferAndCall with invalid calldata + vm.expectRevert(SubscriptionAPI.InvalidCalldata.selector); + linkToken.transferAndCall(address(s_subscriptionAPI), 1 ether, abi.encode(uint256(1), address(1))); + } + + function testOnTokenTransferInvalidSubscriptionId() public { + // create and set link token on subscription api + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // generate bogus sub id + uint256 subId = uint256(keccak256("idontexist")); + + // try to fund bogus sub id + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + linkToken.transferAndCall(address(s_subscriptionAPI), 1 ether, abi.encode(subId)); + } + + function testOnTokenTransferSuccess() public { + // happy path link funding test + // create and set link token on subscription api + MockLinkToken linkToken = new MockLinkToken(); + s_subscriptionAPI.setPLIAndPLINativeFeed(address(linkToken), address(0)); + assertEq(address(s_subscriptionAPI.PLI()), address(linkToken)); + + // create a subscription and fund it with 5 PLI + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with link + changePrank(OWNER); + vm.expectEmit(true, false, false, true); + emit SubscriptionFunded(subId, 0, 5 ether); + bool success = linkToken.transferAndCall(address(s_subscriptionAPI), 5 ether, abi.encode(subId)); + assertTrue(success, "failed link transfer and call"); + + // assert that the subscription is funded + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).balance, 5 ether); + } + + function testFundSubscriptionWithNativeInvalidSubscriptionId() public { + // CASE: invalid subscription id + // should revert with InvalidSubscription + + uint256 subId = uint256(keccak256("idontexist")); + + // try to fund the subscription with native, should fail + address funder = makeAddr("funder"); + vm.deal(funder, 5 ether); + changePrank(funder); + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + s_subscriptionAPI.fundSubscriptionWithNative{value: 5 ether}(subId); + } + + function testFundSubscriptionWithNative() public { + // happy path test + // funding subscription with native + + // create a subscription and fund it with native + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + + // fund the subscription with native + vm.deal(subOwner, 5 ether); + changePrank(subOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionFundedWithNative(subId, 0, 5 ether); + s_subscriptionAPI.fundSubscriptionWithNative{value: 5 ether}(subId); + + // assert that the subscription is funded + assertEq(s_subscriptionAPI.getSubscriptionStruct(subId).nativeBalance, 5 ether); + } + + function testCreateSubscription() public { + // test that the subscription is created successfully + // and test the initial state of the subscription + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + assertEq(s_subscriptionAPI.getActiveSubscriptionIdsLength(), 1); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, subOwner); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 0); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).requestedOwner, address(0)); + } + + function testCreateSubscriptionRecreate() public { + // create two subscriptions from the same eoa + // they should never be the same due to nonce incrementation + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint64 nonceBefore = s_subscriptionAPI.s_currentSubNonce(); + uint256 subId1 = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 1); + uint256 subId2 = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.s_currentSubNonce(), nonceBefore + 2); + assertTrue(subId1 != subId2); + } + + function testSubscriptionOwnershipTransfer() public { + // create two eoa's, and create a subscription from one of them + // and transfer ownership to the other + // assert that the subscription is now owned by the other eoa + address oldOwner = makeAddr("oldOwner"); + address newOwner = makeAddr("newOwner"); + + // create sub + changePrank(oldOwner); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).owner, oldOwner); + + // request ownership transfer + changePrank(oldOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionOwnerTransferRequested(subId, oldOwner, newOwner); + s_subscriptionAPI.requestSubscriptionOwnerTransfer(subId, newOwner); + + // accept ownership transfer from newOwner + changePrank(newOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionOwnerTransferred(subId, oldOwner, newOwner); + s_subscriptionAPI.acceptSubscriptionOwnerTransfer(subId); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).requestedOwner, address(0)); + } + + function testAddConsumerTooManyConsumers() public { + // add 100 consumers to a sub and then + // try adding one more and see the revert + address subOwner = makeAddr("subOwner"); + changePrank(subOwner); + uint256 subId = s_subscriptionAPI.createSubscription(); + for (uint256 i = 0; i < 100; i++) { + address consumer = makeAddr(Strings.toString(i)); + vm.expectEmit(true, false, false, true); + emit SubscriptionConsumerAdded(subId, consumer); + s_subscriptionAPI.addConsumer(subId, consumer); + } + + // try adding one more consumer, should revert + address consumer = makeAddr("consumer"); + changePrank(subOwner); + vm.expectRevert(SubscriptionAPI.TooManyConsumers.selector); + s_subscriptionAPI.addConsumer(subId, consumer); + } + + function testAddConsumerReaddSameConsumer() public { + // try adding the same consumer twice + // should be a no-op + // assert state is unchanged after the 2nd add + address subOwner = makeAddr("subOwner"); + address consumer = makeAddr("consumer"); + changePrank(subOwner); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 0); + changePrank(subOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionConsumerAdded(subId, consumer); + s_subscriptionAPI.addConsumer(subId, consumer); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 1); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers[0], consumer); + + // add consumer again, should be no-op + changePrank(subOwner); + s_subscriptionAPI.addConsumer(subId, consumer); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 1); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers[0], consumer); + } + + function testAddConsumer() public { + // create a subscription and add a consumer + // assert subscription state afterwards + address subOwner = makeAddr("subOwner"); + address consumer = makeAddr("consumer"); + changePrank(subOwner); + uint256 subId = s_subscriptionAPI.createSubscription(); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 0); + + // only subscription owner can add a consumer + address notSubOwner = makeAddr("notSubOwner"); + changePrank(notSubOwner); + vm.expectRevert(abi.encodeWithSelector(SubscriptionAPI.MustBeSubOwner.selector, subOwner)); + s_subscriptionAPI.addConsumer(subId, consumer); + + // subscription owner is able to add a consumer + changePrank(subOwner); + vm.expectEmit(true, false, false, true); + emit SubscriptionConsumerAdded(subId, consumer); + s_subscriptionAPI.addConsumer(subId, consumer); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers.length, 1); + assertEq(s_subscriptionAPI.getSubscriptionConfig(subId).consumers[0], consumer); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper.t.sol b/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper.t.sol new file mode 100644 index 00000000..aad5d667 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper.t.sol @@ -0,0 +1,255 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {VRF} from "../../../../src/v0.8/vrf/VRF.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import {ExposedVRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol"; +import {VRFV2PlusWrapperConsumerBase} from "../../../../src/v0.8/vrf/dev/VRFV2PlusWrapperConsumerBase.sol"; +import {VRFV2PlusWrapperConsumerExample} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol"; +import {VRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol"; +import {VRFV2PlusWrapper} from "../../../../src/v0.8/vrf/dev/VRFV2PlusWrapper.sol"; +import {VRFV2PlusClient} from "../../../../src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol"; +import {console} from "forge-std/console.sol"; + +contract VRFV2PlusWrapperTest is BaseTest { + address internal constant PLI_WHALE = 0xD883a6A1C22fC4AbFE938a5aDF9B2Cc31b1BF18B; + bytes32 vrfKeyHash = hex"9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528"; + uint32 wrapperGasOverhead = 10_000; + uint32 coordinatorGasOverhead = 20_000; + + ExposedVRFCoordinatorV2_5 s_testCoordinator; + MockLinkToken s_linkToken; + MockV3Aggregator s_linkNativeFeed; + VRFV2PlusWrapper s_wrapper; + VRFV2PlusWrapperConsumerExample s_consumer; + + function setUp() public override { + BaseTest.setUp(); + + // Fund our users. + vm.roll(1); + vm.deal(PLI_WHALE, 10_000 ether); + changePrank(PLI_WHALE); + + // Deploy link token and link/native feed. + s_linkToken = new MockLinkToken(); + s_linkNativeFeed = new MockV3Aggregator(18, 500000000000000000); // .5 ETH (good for testing) + + // Deploy coordinator and consumer. + s_testCoordinator = new ExposedVRFCoordinatorV2_5(address(0)); + s_wrapper = new VRFV2PlusWrapper(address(s_linkToken), address(s_linkNativeFeed), address(s_testCoordinator)); + s_consumer = new VRFV2PlusWrapperConsumerExample(address(s_linkToken), address(s_wrapper)); + + // Configure the coordinator. + s_testCoordinator.setPLIAndPLINativeFeed(address(s_linkToken), address(s_linkNativeFeed)); + setConfigCoordinator(); + setConfigWrapper(); + + s_testCoordinator.s_config(); + } + + function setConfigCoordinator() internal { + s_testCoordinator.setConfig( + 0, // minRequestConfirmations + 2_500_000, // maxGasLimit + 1, // stalenessSeconds + 50_000, // gasAfterPaymentCalculation + 50000000000000000, // fallbackWeiPerUnitLink + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + } + + function setConfigWrapper() internal { + s_wrapper.setConfig( + wrapperGasOverhead, // wrapper gas overhead + coordinatorGasOverhead, // coordinator gas overhead + 0, // premium percentage + vrfKeyHash, // keyHash + 10, // max number of words, + 1, // stalenessSeconds + 50000000000000000, // fallbackWeiPerUnitLink + 0, // fulfillmentFlatFeeLinkPPM + 0 // fulfillmentFlatFeeNativePPM + ); + ( + , + , + , + , + uint32 _wrapperGasOverhead, + uint32 _coordinatorGasOverhead, + uint8 _wrapperPremiumPercentage, + bytes32 _keyHash, + uint8 _maxNumWords + ) = s_wrapper.getConfig(); + assertEq(_wrapperGasOverhead, wrapperGasOverhead); + assertEq(_coordinatorGasOverhead, coordinatorGasOverhead); + assertEq(0, _wrapperPremiumPercentage); + assertEq(vrfKeyHash, _keyHash); + assertEq(10, _maxNumWords); + } + + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + + function testSetLinkAndLinkNativeFeed() public { + VRFV2PlusWrapper wrapper = new VRFV2PlusWrapper(address(0), address(0), address(s_testCoordinator)); + + // Set PLI and PLI/Native feed on wrapper. + wrapper.setPLI(address(s_linkToken)); + wrapper.setLinkNativeFeed(address(s_linkNativeFeed)); + assertEq(address(wrapper.s_link()), address(s_linkToken)); + assertEq(address(wrapper.s_linkNativeFeed()), address(s_linkNativeFeed)); + + // Revert for subsequent assignment. + vm.expectRevert(VRFV2PlusWrapper.LinkAlreadySet.selector); + wrapper.setPLI(address(s_linkToken)); + + // Consumer can set PLI token. + VRFV2PlusWrapperConsumerExample consumer = new VRFV2PlusWrapperConsumerExample(address(0), address(wrapper)); + consumer.setLinkToken(address(s_linkToken)); + + // Revert for subsequent assignment. + vm.expectRevert(VRFV2PlusWrapperConsumerBase.PLIAlreadySet.selector); + consumer.setLinkToken(address(s_linkToken)); + } + + function testRequestAndFulfillRandomWordsNativeWrapper() public { + // Fund subscription. + s_testCoordinator.fundSubscriptionWithNative{value: 10 ether}(s_wrapper.SUBSCRIPTION_ID()); + vm.deal(address(s_consumer), 10 ether); + + // Get type and version. + assertEq(s_wrapper.typeAndVersion(), "VRFV2Wrapper 1.0.0"); + + // Cannot make request while disabled. + s_wrapper.disable(); + vm.expectRevert("wrapper is disabled"); + s_consumer.makeRequestNative(500_000, 0, 1); + s_wrapper.enable(); + + // Request randomness from wrapper. + uint32 callbackGasLimit = 1_000_000; + vm.expectEmit(true, true, true, true); + (uint256 requestId, uint256 preSeed) = s_testCoordinator.computeRequestIdExternal( + vrfKeyHash, + address(s_wrapper), + s_wrapper.SUBSCRIPTION_ID(), + 2 + ); + uint32 EIP150Overhead = callbackGasLimit / 63 + 1; + emit RandomWordsRequested( + vrfKeyHash, + requestId, + preSeed, + s_wrapper.SUBSCRIPTION_ID(), // subId + 0, // minConfirmations + callbackGasLimit + EIP150Overhead + wrapperGasOverhead, // callbackGasLimit - accounts for EIP 150 + 1, // numWords + VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: true})), // extraArgs + address(s_wrapper) // requester + ); + requestId = s_consumer.makeRequestNative(callbackGasLimit, 0, 1); + + (uint256 paid, bool fulfilled, bool native) = s_consumer.s_requests(requestId); + uint32 expectedPaid = callbackGasLimit + wrapperGasOverhead + coordinatorGasOverhead; + uint256 wrapperNativeCostEstimate = s_wrapper.estimateRequestPriceNative(callbackGasLimit, tx.gasprice); + uint256 wrapperCostCalculation = s_wrapper.calculateRequestPriceNative(callbackGasLimit); + assertEq(paid, expectedPaid); + assertEq(uint256(paid), wrapperNativeCostEstimate); + assertEq(wrapperNativeCostEstimate, wrapperCostCalculation); + assertEq(fulfilled, false); + assertEq(native, true); + assertEq(address(s_consumer).balance, 10 ether - expectedPaid); + + (, uint256 gasLimit, ) = s_wrapper.s_callbacks(requestId); + assertEq(gasLimit, callbackGasLimit); + + changePrank(address(s_testCoordinator)); + uint256[] memory words = new uint256[](1); + words[0] = 123; + s_wrapper.rawFulfillRandomWords(requestId, words); + (, bool nowFulfilled, uint256[] memory storedWords) = s_consumer.getRequestStatus(requestId); + assertEq(nowFulfilled, true); + assertEq(storedWords[0], 123); + + // Withdraw funds from wrapper. + changePrank(PLI_WHALE); + uint256 priorWhaleBalance = PLI_WHALE.balance; + s_wrapper.withdrawNative(PLI_WHALE, paid); + assertEq(PLI_WHALE.balance, priorWhaleBalance + paid); + assertEq(address(s_wrapper).balance, 0); + } + + function testRequestAndFulfillRandomWordsPLIWrapper() public { + // Fund subscription. + s_linkToken.transferAndCall(address(s_testCoordinator), 10 ether, abi.encode(s_wrapper.SUBSCRIPTION_ID())); + s_linkToken.transfer(address(s_consumer), 10 ether); + + // Request randomness from wrapper. + uint32 callbackGasLimit = 1_000_000; + vm.expectEmit(true, true, true, true); + (uint256 requestId, uint256 preSeed) = s_testCoordinator.computeRequestIdExternal( + vrfKeyHash, + address(s_wrapper), + s_wrapper.SUBSCRIPTION_ID(), + 2 + ); + uint32 EIP150Overhead = callbackGasLimit / 63 + 1; + emit RandomWordsRequested( + vrfKeyHash, + requestId, + preSeed, + s_wrapper.SUBSCRIPTION_ID(), // subId + 0, // minConfirmations + callbackGasLimit + EIP150Overhead + wrapperGasOverhead, // callbackGasLimit - accounts for EIP 150 + 1, // numWords + VRFV2PlusClient._argsToBytes(VRFV2PlusClient.ExtraArgsV1({nativePayment: false})), // extraArgs + address(s_wrapper) // requester + ); + s_consumer.makeRequest(callbackGasLimit, 0, 1); + + // Assert that the request was made correctly. + (uint256 paid, bool fulfilled, bool native) = s_consumer.s_requests(requestId); + uint32 expectedPaid = (callbackGasLimit + wrapperGasOverhead + coordinatorGasOverhead) * 2; + uint256 wrapperCostEstimate = s_wrapper.estimateRequestPrice(callbackGasLimit, tx.gasprice); + uint256 wrapperCostCalculation = s_wrapper.calculateRequestPrice(callbackGasLimit); + assertEq(paid, expectedPaid); // 1_030_000 * 2 for link/native ratio + assertEq(uint256(paid), wrapperCostEstimate); + assertEq(wrapperCostEstimate, wrapperCostCalculation); + assertEq(fulfilled, false); + assertEq(native, false); + assertEq(s_linkToken.balanceOf(address(s_consumer)), 10 ether - expectedPaid); + (, uint256 gasLimit, ) = s_wrapper.s_callbacks(requestId); + assertEq(gasLimit, callbackGasLimit); + + // Fulfill the request. + changePrank(address(s_testCoordinator)); + uint256[] memory words = new uint256[](1); + words[0] = 456; + s_wrapper.rawFulfillRandomWords(requestId, words); + (, bool nowFulfilled, uint256[] memory storedWords) = s_consumer.getRequestStatus(requestId); + assertEq(nowFulfilled, true); + assertEq(storedWords[0], 456); + + // Withdraw funds from wrapper. + changePrank(PLI_WHALE); + uint256 priorWhaleBalance = s_linkToken.balanceOf(PLI_WHALE); + s_wrapper.withdraw(PLI_WHALE, paid); + assertEq(s_linkToken.balanceOf(PLI_WHALE), priorWhaleBalance + paid); + assertEq(s_linkToken.balanceOf(address(s_wrapper)), 0); + } +} diff --git a/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper_Migration.t.sol b/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper_Migration.t.sol new file mode 100644 index 00000000..60335061 --- /dev/null +++ b/contracts/test/v0.8/foundry/vrf/VRFV2Wrapper_Migration.t.sol @@ -0,0 +1,358 @@ +pragma solidity 0.8.6; + +import "../BaseTest.t.sol"; +import {VRF} from "../../../../src/v0.8/vrf/VRF.sol"; +import {MockLinkToken} from "../../../../src/v0.8/mocks/MockLinkToken.sol"; +import {MockV3Aggregator} from "../../../../src/v0.8/tests/MockV3Aggregator.sol"; +import {ExposedVRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol"; +import {VRFCoordinatorV2Plus_V2Example} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol"; +import {VRFV2PlusWrapperConsumerBase} from "../../../../src/v0.8/vrf/dev/VRFV2PlusWrapperConsumerBase.sol"; +import {VRFV2PlusWrapperConsumerExample} from "../../../../src/v0.8/vrf/dev/testhelpers/VRFV2PlusWrapperConsumerExample.sol"; +import {SubscriptionAPI} from "../../../../src/v0.8/vrf/dev/SubscriptionAPI.sol"; +import {VRFCoordinatorV2_5} from "../../../../src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol"; +import {VRFV2PlusWrapper} from "../../../../src/v0.8/vrf/dev/VRFV2PlusWrapper.sol"; +import {VRFV2PlusClient} from "../../../../src/v0.8/vrf/dev/libraries/VRFV2PlusClient.sol"; + +contract VRFV2PlusWrapperTest is BaseTest { + address internal constant PLI_WHALE = 0xD883a6A1C22fC4AbFE938a5aDF9B2Cc31b1BF18B; + uint256 internal constant DEFAULT_NATIVE_FUNDING = 7 ether; // 7 ETH + uint256 internal constant DEFAULT_PLI_FUNDING = 10 ether; // 10 ETH + bytes32 vrfKeyHash = hex"9f2353bde94264dbc3d554a94cceba2d7d2b4fdce4304d3e09a1fea9fbeb1528"; + uint32 wrapperGasOverhead = 10_000; + uint32 coordinatorGasOverhead = 20_000; + + ExposedVRFCoordinatorV2_5 s_testCoordinator; + MockLinkToken s_linkToken; + MockV3Aggregator s_linkNativeFeed; + VRFV2PlusWrapper s_wrapper; + VRFV2PlusWrapperConsumerExample s_consumer; + + VRFCoordinatorV2Plus_V2Example s_newCoordinator; + + event CoordinatorRegistered(address coordinatorAddress); + event MigrationCompleted(address newCoordinator, uint256 subId); + event WrapperRequestMade(uint256 indexed requestId, uint256 paid); + + function setUp() public override { + BaseTest.setUp(); + + // Fund our users. + vm.roll(1); + vm.deal(PLI_WHALE, 10_000 ether); + changePrank(PLI_WHALE); + + // Deploy link token and link/native feed. + s_linkToken = new MockLinkToken(); + s_linkNativeFeed = new MockV3Aggregator(18, 500000000000000000); // .5 ETH (good for testing) + + // Deploy coordinator and consumer. + s_testCoordinator = new ExposedVRFCoordinatorV2_5(address(0)); + s_wrapper = new VRFV2PlusWrapper(address(s_linkToken), address(s_linkNativeFeed), address(s_testCoordinator)); + s_consumer = new VRFV2PlusWrapperConsumerExample(address(s_linkToken), address(s_wrapper)); + + // Configure the coordinator. + s_testCoordinator.setPLIAndPLINativeFeed(address(s_linkToken), address(s_linkNativeFeed)); + setConfigCoordinator(); + setConfigWrapper(); + + s_testCoordinator.s_config(); + + // Data structures for Migrateable Wrapper + s_newCoordinator = new VRFCoordinatorV2Plus_V2Example(address(0), address(s_testCoordinator)); + vm.expectEmit( + false, // no first indexed topic + false, // no second indexed topic + false, // no third indexed topic + true // check data (target coordinator address) + ); + address newCoordinatorAddr = address(s_newCoordinator); + emit CoordinatorRegistered(newCoordinatorAddr); + s_testCoordinator.registerMigratableCoordinator(newCoordinatorAddr); + assertTrue(s_testCoordinator.isTargetRegisteredExternal(newCoordinatorAddr)); + } + + function setConfigCoordinator() internal { + s_testCoordinator.setConfig( + 0, // minRequestConfirmations + 2_500_000, // maxGasLimit + 1, // stalenessSeconds + 50_000, // gasAfterPaymentCalculation + 50000000000000000, // fallbackWeiPerUnitLink + 500_000, // fulfillmentFlatFeeNativePPM + 100_000, // fulfillmentFlatFeeLinkDiscountPPM + 15, // nativePremiumPercentage + 10 // linkPremiumPercentage + ); + } + + function setConfigWrapper() internal { + s_wrapper.setConfig( + wrapperGasOverhead, // wrapper gas overhead + coordinatorGasOverhead, // coordinator gas overhead + 0, // premium percentage + vrfKeyHash, // keyHash + 10, // max number of words, + 1, // stalenessSeconds + 50000000000000000, // fallbackWeiPerUnitLink + 0, // fulfillmentFlatFeeLinkPPM + 0 // fulfillmentFlatFeeNativePPM + ); + ( + , + , + , + , + uint32 _wrapperGasOverhead, + uint32 _coordinatorGasOverhead, + uint8 _wrapperPremiumPercentage, + bytes32 _keyHash, + uint8 _maxNumWords + ) = s_wrapper.getConfig(); + assertEq(_wrapperGasOverhead, wrapperGasOverhead); + assertEq(_coordinatorGasOverhead, coordinatorGasOverhead); + assertEq(0, _wrapperPremiumPercentage); + assertEq(vrfKeyHash, _keyHash); + assertEq(10, _maxNumWords); + } + + event RandomWordsRequested( + bytes32 indexed keyHash, + uint256 requestId, + uint256 preSeed, + uint256 indexed subId, + uint16 minimumRequestConfirmations, + uint32 callbackGasLimit, + uint32 numWords, + bytes extraArgs, + address indexed sender + ); + + function testMigrateWrapperPLIPayment() public { + s_linkToken.transfer(address(s_consumer), DEFAULT_PLI_FUNDING); + + uint256 subID = s_wrapper.SUBSCRIPTION_ID(); + address oldCoordinatorAddr = address(s_testCoordinator); + + // Fund subscription with native and PLI payment to check + // if funds are transferred to new subscription after call + // migration to new coordinator + s_linkToken.transferAndCall(oldCoordinatorAddr, DEFAULT_PLI_FUNDING, abi.encode(subID)); + s_testCoordinator.fundSubscriptionWithNative{value: DEFAULT_NATIVE_FUNDING}(subID); + + // Get type and version. + assertEq(s_wrapper.typeAndVersion(), "VRFV2Wrapper 1.0.0"); + + // subscription exists in V1 coordinator before migration + + ( + uint96 balance, + uint96 nativeBalance, + uint64 reqCount, + address owner, + address[] memory consumers + ) = s_testCoordinator.getSubscription(subID); + assertEq(reqCount, 0); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(owner, address(s_wrapper)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_wrapper)); + + vm.startPrank(PLI_WHALE); + + // Update wrapper to point to the new coordinator + vm.expectEmit( + false, // no first indexed field + false, // no second indexed field + false, // no third indexed field + true // check data fields + ); + address newCoordinatorAddr = address(s_newCoordinator); + emit MigrationCompleted(newCoordinatorAddr, subID); + + s_wrapper.migrate(newCoordinatorAddr); + + // subscription no longer exists in v1 coordinator after migration + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + s_testCoordinator.getSubscription(subID); + assertEq(s_testCoordinator.s_totalBalance(), 0); + assertEq(s_testCoordinator.s_totalNativeBalance(), 0); + assertEq(s_linkToken.balanceOf(oldCoordinatorAddr), 0); + assertEq(oldCoordinatorAddr.balance, 0); + + // subscription exists in v2 coordinator + (balance, nativeBalance, reqCount, owner, consumers) = s_newCoordinator.getSubscription(subID); + assertEq(owner, address(s_wrapper)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_wrapper)); + assertEq(reqCount, 0); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(s_newCoordinator.s_totalLinkBalance(), DEFAULT_PLI_FUNDING); + assertEq(s_newCoordinator.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + assertEq(s_linkToken.balanceOf(newCoordinatorAddr), DEFAULT_PLI_FUNDING); + assertEq(newCoordinatorAddr.balance, DEFAULT_NATIVE_FUNDING); + + // calling migrate again on V1 coordinator should fail + vm.expectRevert(); + s_wrapper.migrate(newCoordinatorAddr); + + // Request randomness from wrapper. + uint32 callbackGasLimit = 1_000_000; + vm.expectEmit(true, true, true, true); + uint256 wrapperCost = s_wrapper.calculateRequestPrice(callbackGasLimit); + emit WrapperRequestMade(1, wrapperCost); + uint256 requestId = s_consumer.makeRequest(callbackGasLimit, 0, 1); + assertEq(requestId, 1); + + (uint256 paid, bool fulfilled, bool native) = s_consumer.s_requests(requestId); + uint32 expectedPaid = (callbackGasLimit + wrapperGasOverhead + coordinatorGasOverhead) * 2; + uint256 wrapperCostEstimate = s_wrapper.estimateRequestPrice(callbackGasLimit, tx.gasprice); + uint256 wrapperCostCalculation = s_wrapper.calculateRequestPrice(callbackGasLimit); + assertEq(paid, expectedPaid); // 1_030_000 * 2 for link/native ratio + assertEq(uint256(paid), wrapperCostEstimate); + assertEq(wrapperCostEstimate, wrapperCostCalculation); + assertEq(fulfilled, false); + assertEq(native, false); + assertEq(s_linkToken.balanceOf(address(s_consumer)), DEFAULT_PLI_FUNDING - expectedPaid); + + (, uint256 gasLimit, ) = s_wrapper.s_callbacks(requestId); + assertEq(gasLimit, callbackGasLimit); + + vm.stopPrank(); + + vm.startPrank(newCoordinatorAddr); + + uint256[] memory words = new uint256[](1); + words[0] = 123; + s_wrapper.rawFulfillRandomWords(requestId, words); + (, bool nowFulfilled, uint256[] memory storedWords) = s_consumer.getRequestStatus(requestId); + assertEq(nowFulfilled, true); + assertEq(storedWords[0], 123); + + vm.stopPrank(); + + /// Withdraw funds from wrapper. + vm.startPrank(PLI_WHALE); + uint256 priorWhaleBalance = s_linkToken.balanceOf(PLI_WHALE); + s_wrapper.withdraw(PLI_WHALE, paid); + assertEq(s_linkToken.balanceOf(PLI_WHALE), priorWhaleBalance + paid); + assertEq(s_linkToken.balanceOf(address(s_wrapper)), 0); + + vm.stopPrank(); + } + + function testMigrateWrapperNativePayment() public { + vm.deal(address(s_consumer), DEFAULT_NATIVE_FUNDING); + + uint256 subID = s_wrapper.SUBSCRIPTION_ID(); + address oldCoordinatorAddr = address(s_testCoordinator); + + // Fund subscription with native and PLI payment to check + // if funds are transferred to new subscription after call + // migration to new coordinator + s_linkToken.transferAndCall(oldCoordinatorAddr, DEFAULT_PLI_FUNDING, abi.encode(subID)); + s_testCoordinator.fundSubscriptionWithNative{value: DEFAULT_NATIVE_FUNDING}(subID); + + // Get type and version. + assertEq(s_wrapper.typeAndVersion(), "VRFV2Wrapper 1.0.0"); + + // subscription exists in V1 coordinator before migration + ( + uint96 balance, + uint96 nativeBalance, + uint64 reqCount, + address owner, + address[] memory consumers + ) = s_testCoordinator.getSubscription(subID); + assertEq(reqCount, 0); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(owner, address(s_wrapper)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_wrapper)); + + vm.startPrank(PLI_WHALE); + + // Update wrapper to point to the new coordinator + vm.expectEmit( + false, // no first indexed field + false, // no second indexed field + false, // no third indexed field + true // check data fields + ); + address newCoordinatorAddr = address(s_newCoordinator); + emit MigrationCompleted(newCoordinatorAddr, subID); + + s_wrapper.migrate(newCoordinatorAddr); + + // subscription no longer exists in v1 coordinator after migration + vm.expectRevert(SubscriptionAPI.InvalidSubscription.selector); + s_testCoordinator.getSubscription(subID); + assertEq(s_testCoordinator.s_totalBalance(), 0); + assertEq(s_testCoordinator.s_totalNativeBalance(), 0); + assertEq(s_linkToken.balanceOf(oldCoordinatorAddr), 0); + assertEq(oldCoordinatorAddr.balance, 0); + + // subscription exists in v2 coordinator + (balance, nativeBalance, reqCount, owner, consumers) = s_newCoordinator.getSubscription(subID); + assertEq(owner, address(s_wrapper)); + assertEq(consumers.length, 1); + assertEq(consumers[0], address(s_wrapper)); + assertEq(reqCount, 0); + assertEq(balance, DEFAULT_PLI_FUNDING); + assertEq(nativeBalance, DEFAULT_NATIVE_FUNDING); + assertEq(s_newCoordinator.s_totalLinkBalance(), DEFAULT_PLI_FUNDING); + assertEq(s_newCoordinator.s_totalNativeBalance(), DEFAULT_NATIVE_FUNDING); + assertEq(s_linkToken.balanceOf(newCoordinatorAddr), DEFAULT_PLI_FUNDING); + assertEq(newCoordinatorAddr.balance, DEFAULT_NATIVE_FUNDING); + + // calling migrate again on V1 coordinator should fail + vm.expectRevert(); + s_wrapper.migrate(newCoordinatorAddr); + + // Request randomness from wrapper. + uint32 callbackGasLimit = 1_000_000; + vm.expectEmit(true, true, true, true); + uint256 wrapperCost = s_wrapper.calculateRequestPriceNative(callbackGasLimit); + emit WrapperRequestMade(1, wrapperCost); + uint256 requestId = s_consumer.makeRequestNative(callbackGasLimit, 0, 1); + assertEq(requestId, 1); + + (uint256 paid, bool fulfilled, bool native) = s_consumer.s_requests(requestId); + uint32 expectedPaid = callbackGasLimit + wrapperGasOverhead + coordinatorGasOverhead; + uint256 wrapperNativeCostEstimate = s_wrapper.estimateRequestPriceNative(callbackGasLimit, tx.gasprice); + uint256 wrapperCostCalculation = s_wrapper.calculateRequestPriceNative(callbackGasLimit); + assertEq(paid, expectedPaid); + assertEq(uint256(paid), wrapperNativeCostEstimate); + assertEq(wrapperNativeCostEstimate, wrapperCostCalculation); + assertEq(fulfilled, false); + assertEq(native, true); + assertEq(address(s_consumer).balance, DEFAULT_NATIVE_FUNDING - expectedPaid); + + (, uint256 gasLimit, ) = s_wrapper.s_callbacks(requestId); + assertEq(gasLimit, callbackGasLimit); + + vm.stopPrank(); + + vm.startPrank(newCoordinatorAddr); + + uint256[] memory words = new uint256[](1); + words[0] = 123; + s_wrapper.rawFulfillRandomWords(requestId, words); + (, bool nowFulfilled, uint256[] memory storedWords) = s_consumer.getRequestStatus(requestId); + assertEq(nowFulfilled, true); + assertEq(storedWords[0], 123); + + vm.stopPrank(); + + // Withdraw funds from wrapper. + vm.startPrank(PLI_WHALE); + uint256 priorWhaleBalance = PLI_WHALE.balance; + s_wrapper.withdrawNative(PLI_WHALE, paid); + assertEq(PLI_WHALE.balance, priorWhaleBalance + paid); + assertEq(address(s_wrapper).balance, 0); + + vm.stopPrank(); + } +} diff --git a/contracts/test/v0.8/functions/v1/Functions.test.ts b/contracts/test/v0.8/functions/v1/Functions.test.ts new file mode 100644 index 00000000..14a68c21 --- /dev/null +++ b/contracts/test/v0.8/functions/v1/Functions.test.ts @@ -0,0 +1,172 @@ +import { ethers } from 'hardhat' +import { + publicAbi, + decodeDietCBOR, + hexToBuf, +} from '../../../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory, providers, Signer } from 'ethers' +import { Roles, getUsers } from '../../../test-helpers/setup' +import { makeDebug } from '../../../test-helpers/debug' + +const debug = makeDebug('FunctionsTestHelper') +let concreteFunctionsTestHelperFactory: ContractFactory + +let roles: Roles + +before(async () => { + roles = (await getUsers()).roles + concreteFunctionsTestHelperFactory = await ethers.getContractFactory( + 'src/v0.8/functions/tests/v1_X/testhelpers/FunctionsTestHelper.sol:FunctionsTestHelper', + roles.defaultAccount, + ) +}) + +describe('FunctionsTestHelper', () => { + let ctr: Contract + let defaultAccount: Signer + + beforeEach(async () => { + defaultAccount = roles.defaultAccount + ctr = await concreteFunctionsTestHelperFactory + .connect(defaultAccount) + .deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + expect( + publicAbi(ctr, [ + 'closeEvent', + 'initializeRequestForInlineJavaScript', + 'addSecretsReference', + 'addTwoArgs', + 'addEmptyArgs', + ]), + ).to.equal(true) + }) + + async function parseRequestDataEvent(tx: providers.TransactionResponse) { + const receipt = await tx.wait() + const data = receipt.logs?.[0].data + const d = debug.extend('parseRequestDataEvent') + d('data %s', data) + return ethers.utils.defaultAbiCoder.decode(['bytes'], data ?? '') + } + + describe('#closeEvent', () => { + it('handles empty request', async () => { + const tx = await ctr.closeEvent() + const [payload] = await parseRequestDataEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual( + { + ...decoded, + language: decoded.language.toNumber(), + codeLocation: decoded.codeLocation.toNumber(), + }, + { + language: 0, + codeLocation: 0, + source: '', + }, + ) + }) + }) + + describe('#initializeRequestForInlineJavaScript', () => { + it('emits simple CBOR encoded request for js', async () => { + const js = 'function run(args, responses) {}' + await ctr.initializeRequestForInlineJavaScript(js) + const tx = await ctr.closeEvent() + const [payload] = await parseRequestDataEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual( + { + ...decoded, + language: decoded.language.toNumber(), + codeLocation: decoded.codeLocation.toNumber(), + }, + { + language: 0, + codeLocation: 0, + source: js, + }, + ) + }) + }) + + describe('#initializeRequestForInlineJavaScript to revert', () => { + it('reverts with EmptySource() if source param is empty', async () => { + await expect( + ctr.initializeRequestForInlineJavaScript(''), + ).to.be.revertedWith('EmptySource()') + }) + }) + + describe('#addSecrets', () => { + it('emits CBOR encoded request with js and secrets', async () => { + const js = 'function run(args, responses) {}' + const secrets = '0xA161616162' + await ctr.initializeRequestForInlineJavaScript(js) + await ctr.addSecretsReference(secrets) + const tx = await ctr.closeEvent() + const [payload] = await parseRequestDataEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual( + { + ...decoded, + language: decoded.language.toNumber(), + codeLocation: decoded.codeLocation.toNumber(), + secretsLocation: decoded.secretsLocation.toNumber(), + }, + { + language: 0, + codeLocation: 0, + source: js, + secretsLocation: 1, + secrets: hexToBuf(secrets), + }, + ) + }) + }) + + describe('#addSecrets to revert', () => { + it('reverts with EmptySecrets() if secrets param is empty', async () => { + const js = 'function run(args, responses) {}' + await ctr.initializeRequestForInlineJavaScript(js) + await expect(ctr.addSecretsReference('0x')).to.be.revertedWith( + 'EmptySecrets()', + ) + }) + }) + + describe('#addArgs', () => { + it('emits CBOR encoded request with js and args', async () => { + const js = 'function run(args, responses) {}' + await ctr.initializeRequestForInlineJavaScript(js) + await ctr.addTwoArgs('arg1', 'arg2') + const tx = await ctr.closeEvent() + const [payload] = await parseRequestDataEvent(tx) + const decoded = await decodeDietCBOR(payload) + assert.deepEqual( + { + ...decoded, + language: decoded.language.toNumber(), + codeLocation: decoded.codeLocation.toNumber(), + }, + { + language: 0, + codeLocation: 0, + source: js, + args: ['arg1', 'arg2'], + }, + ) + }) + }) + + describe('#addEmptyArgs to revert', () => { + it('reverts with EmptyArgs() if args param is empty', async () => { + await expect(ctr.addEmptyArgs()).to.be.revertedWith('EmptyArgs()') + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/FunctionsClient.test.ts b/contracts/test/v0.8/functions/v1/FunctionsClient.test.ts new file mode 100644 index 00000000..826953fb --- /dev/null +++ b/contracts/test/v0.8/functions/v1/FunctionsClient.test.ts @@ -0,0 +1,227 @@ +import { ethers } from 'hardhat' +import { assert, expect } from 'chai' +import { decodeDietCBOR, stringToBytes } from '../../../test-helpers/helpers' +import { + getSetupFactory, + FunctionsContracts, + FunctionsRoles, + anyValue, + ids, + createSubscription, + getEventArg, + parseOracleRequestEventArgs, + encodeReport, +} from './utils' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +describe('Functions Client', () => { + describe('#sendSimpleRequestWithJavaScript', () => { + it('emits events from the client and the oracle contracts', async () => { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + const flags = + '0x0101010101010101010101010101010101010101010101010101010101010101' + const callbackGas = 100_000 + await contracts.router.setFlags(subscriptionId, flags) + const defaultAccountAddress = await roles.defaultAccount.getAddress() + await expect( + contracts.client + .connect(roles.defaultAccount) + .sendSimpleRequestWithJavaScript( + 'return `hello world`', + subscriptionId, + ids.donId, + callbackGas, + ), + ) + .to.emit(contracts.client, 'RequestSent') + .withArgs(anyValue) + .to.emit(contracts.coordinator, 'OracleRequest') + .withArgs( + anyValue, + contracts.client.address, + defaultAccountAddress, + subscriptionId, + roles.subOwnerAddress, + anyValue, + anyValue, + flags, + callbackGas, + anyValue, + ) + }) + + it('respects gas flag setting', async () => { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + const flags = + '0x0101010101010101010101010101010101010101010101010101010101010101' + await contracts.router.setFlags(subscriptionId, flags) + await expect( + contracts.client + .connect(roles.defaultAccount) + .sendSimpleRequestWithJavaScript( + 'return `hello world`', + subscriptionId, + ids.donId, + 400_000, + ), + ) + .to.emit(contracts.client, 'RequestSent') + .to.emit(contracts.coordinator, 'OracleRequest') + await expect( + contracts.client + .connect(roles.defaultAccount) + .sendSimpleRequestWithJavaScript( + 'return `hello world`', + subscriptionId, + ids.donId, + 600_000, // limit set by gas flag == 1 is 500_000 + ), + ).to.be.revertedWith('GasLimitTooBig(500000)') + }) + + it('encodes user request to CBOR', async () => { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + const js = 'function run(){return response}' + const tx = await contracts.client.sendSimpleRequestWithJavaScript( + js, + subscriptionId, + ids.donId, + 20_000, + ) + const args = await parseOracleRequestEventArgs(tx) + assert.equal(args.length, 5) + const decoded = await decodeDietCBOR(args[3]) + assert.deepEqual( + { + ...decoded, + language: decoded.language.toNumber(), + codeLocation: decoded.codeLocation.toNumber(), + }, + { + language: 0, + codeLocation: 0, + source: js, + }, + ) + }) + }) + + describe('#fulfillRequest', () => { + it('emits fulfillment events', async () => { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + const tx = await contracts.client.sendSimpleRequestWithJavaScript( + 'function run(){return response}', + subscriptionId, + ids.donId, + 20_000, + ) + const { events } = await tx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + await expect(tx) + .to.emit(contracts.client, 'RequestSent') + .withArgs(requestId) + + const response = stringToBytes('response') + const error = stringToBytes('') + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + stringToBytes(''), + ) + await expect(contracts.coordinator.callReport(report)) + .to.emit(contracts.coordinator, 'OracleResponse') + .withArgs(requestId, await roles.defaultAccount.getAddress()) + .to.emit(contracts.client, 'FulfillRequestInvoked') + .withArgs(requestId, response, error) + }) + }) +}) + +describe('Faulty Functions Client', () => { + it('can complete requests with an empty callback', async () => { + const clientWithEmptyCallbackTestHelperFactory = + await ethers.getContractFactory( + 'src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientWithEmptyCallback.sol:FunctionsClientWithEmptyCallback', + roles.consumer, + ) + + const clientWithEmptyCallback = + await clientWithEmptyCallbackTestHelperFactory + .connect(roles.consumer) + .deploy(contracts.router.address) + + const subscriptionId = await createSubscription( + roles.subOwner, + [clientWithEmptyCallback.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + const tx = await clientWithEmptyCallback.sendSimpleRequestWithJavaScript( + 'function run(){return response}', + subscriptionId, + ids.donId, + 20_000, + ) + const { events } = await tx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + await expect(tx) + .to.emit(clientWithEmptyCallback, 'RequestSent') + .withArgs(requestId) + + const response = stringToBytes('response') + const error = stringToBytes('') + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + stringToBytes(''), + ) + await expect(contracts.coordinator.callReport(report)) + .to.emit(contracts.coordinator, 'OracleResponse') + .withArgs(requestId, await roles.defaultAccount.getAddress()) + .to.emit(contracts.router, 'RequestProcessed') + }) +}) diff --git a/contracts/test/v0.8/functions/v1/FunctionsCoordinator.test.ts b/contracts/test/v0.8/functions/v1/FunctionsCoordinator.test.ts new file mode 100644 index 00000000..89444ca8 --- /dev/null +++ b/contracts/test/v0.8/functions/v1/FunctionsCoordinator.test.ts @@ -0,0 +1,68 @@ +import { expect } from 'chai' +import { + getSetupFactory, + FunctionsContracts, + coordinatorConfig, + FunctionsRoles, +} from './utils' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +describe('Functions Coordinator', () => { + describe('Config', () => { + it('non-owner is unable to update config', async () => { + await expect( + contracts.coordinator + .connect(roles.stranger) + .updateConfig(coordinatorConfig), + ).to.be.revertedWith('Only callable by owner') + }) + + it('Owner can update config', async () => { + const beforeConfig = await contracts.coordinator.getConfig() + await expect( + contracts.coordinator.updateConfig({ + ...coordinatorConfig, + donFee: 10, + }), + ).to.emit(contracts.coordinator, 'ConfigUpdated') + const afterConfig = await contracts.coordinator.getConfig() + expect(beforeConfig).to.not.equal(afterConfig) + }) + + it('returns the config set', async () => { + const config = await contracts.coordinator + .connect(roles.stranger) + .getConfig() + await Promise.all( + Object.keys(coordinatorConfig).map((key) => + expect(config[key]).to.equal( + coordinatorConfig[key as keyof typeof coordinatorConfig], + ), + ), + ) + }) + + it('#fulfillmentGasPriceOverEstimationBP overestimates gas cost', async () => { + const estimateWithNoOverestimaton = + await contracts.coordinator.estimateCost(1, 0x0, 100_000, 2000000000) + + await contracts.coordinator.updateConfig({ + ...coordinatorConfig, + fulfillmentGasPriceOverEstimationBP: 10_000, + }) + + // Halve the gas price, which should be the same estimate because of fulfillmentGasPriceOverEstimationBP doubling the gas price + const estimateWithOverestimaton = + await contracts.coordinator.estimateCost(1, 0x0, 100_000, 1000000000) + + expect(estimateWithNoOverestimaton).to.equal(estimateWithOverestimaton) + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/FunctionsRouter.test.ts b/contracts/test/v0.8/functions/v1/FunctionsRouter.test.ts new file mode 100644 index 00000000..e484283e --- /dev/null +++ b/contracts/test/v0.8/functions/v1/FunctionsRouter.test.ts @@ -0,0 +1,66 @@ +import { expect } from 'chai' +import { ethers } from 'hardhat' +import { stringToBytes } from '../../../test-helpers/helpers' +import { + getSetupFactory, + FunctionsContracts, + functionsRouterConfig, + FunctionsRoles, +} from './utils' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +describe('Functions Router - Request lifecycle', () => { + describe('Config', () => { + it('#typeAndVersion', async () => { + expect(await contracts.router.typeAndVersion()).to.be.equal( + 'Functions Router v2.0.0', + ) + }) + it('non-owner is unable to update config', async () => { + await expect( + contracts.router + .connect(roles.stranger) + .updateConfig(functionsRouterConfig), + ).to.be.revertedWith('Only callable by owner') + }) + + it('owner can update config', async () => { + const beforeConfig = await contracts.router.getConfig() + await expect( + contracts.router.updateConfig({ + ...functionsRouterConfig, + adminFee: 10, + }), + ).to.emit(contracts.router, 'ConfigUpdated') + const afterConfig = await contracts.router.getConfig() + expect(beforeConfig).to.not.equal(afterConfig) + }) + + it('returns the config set', async () => { + const config = await contracts.router.connect(roles.stranger).getConfig() + await Promise.all( + Object.keys(functionsRouterConfig).map((key) => + expect(config[key]).to.deep.equal( + functionsRouterConfig[key as keyof typeof functionsRouterConfig], + ), + ), + ) + }) + }) + describe('Allow List path', () => { + it('non-owner is unable to set Allow List ID', async () => { + await expect( + contracts.router + .connect(roles.stranger) + .setAllowListId(ethers.utils.hexZeroPad(stringToBytes(''), 32)), + ).to.be.revertedWith('Only callable by owner') + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/FunctionsSubscriptions.test.ts b/contracts/test/v0.8/functions/v1/FunctionsSubscriptions.test.ts new file mode 100644 index 00000000..86cfb9dd --- /dev/null +++ b/contracts/test/v0.8/functions/v1/FunctionsSubscriptions.test.ts @@ -0,0 +1,862 @@ +import { ethers } from 'hardhat' +import { expect } from 'chai' +import { BigNumber } from 'ethers' +import { randomAddressString } from 'hardhat/internal/hardhat-network/provider/utils/random' +import { + getSetupFactory, + FunctionsContracts, + FunctionsRoles, + createSubscription, + acceptTermsOfService, + ids, + getEventArg, + accessControlMockPrivateKey, + encodeReport, +} from './utils' +import { stringToBytes } from '../../../test-helpers/helpers' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +const donLabel = ethers.utils.formatBytes32String('1') + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +describe('Functions Router - Subscriptions', () => { + describe('Subscription management', () => { + describe('#createSubscription', async function () { + it('can create a subscription', async function () { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + await expect( + contracts.router.connect(roles.subOwner).createSubscription(), + ) + .to.emit(contracts.router, 'SubscriptionCreated') + .withArgs(1, roles.subOwnerAddress) + const s = await contracts.router.getSubscription(1) + expect(s.balance.toString()).to.equal('0') + expect(s.owner).to.equal(roles.subOwnerAddress) + }) + it('subscription id increments', async function () { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + await expect( + contracts.router.connect(roles.subOwner).createSubscription(), + ) + .to.emit(contracts.router, 'SubscriptionCreated') + .withArgs(1, roles.subOwnerAddress) + await expect( + contracts.router.connect(roles.subOwner).createSubscription(), + ) + .to.emit(contracts.router, 'SubscriptionCreated') + .withArgs(2, roles.subOwnerAddress) + }) + it('cannot create more than the max', async function () { + const subId = createSubscription( + roles.subOwner, + [], + contracts.router, + contracts.accessControl, + ) + for (let i = 0; i < 100; i++) { + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, randomAddressString()) + } + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, randomAddressString()), + ).to.be.revertedWith(`TooManyConsumers`) + }) + }) + + describe('#proposeSubscriptionOwnerTransfer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + it('rejects non-owner', async function () { + await expect( + contracts.router + .connect(roles.stranger) + .proposeSubscriptionOwnerTransfer(subId, roles.strangerAddress), + ).to.be.revertedWith(`MustBeSubscriptionOwner()`) + }) + it('owner can request transfer', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .proposeSubscriptionOwnerTransfer(subId, roles.strangerAddress), + ) + .to.emit(contracts.router, 'SubscriptionOwnerTransferRequested') + .withArgs(subId, roles.subOwnerAddress, roles.strangerAddress) + // Same request reverts + await expect( + contracts.router + .connect(roles.subOwner) + .proposeSubscriptionOwnerTransfer(subId, roles.strangerAddress), + ).to.be.revertedWith('InvalidCalldata') + }) + }) + + describe('#acceptSubscriptionOwnerTransfer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + it('subscription must exist', async function () { + // 0x0 is requested owner + await expect( + contracts.router + .connect(roles.subOwner) + .acceptSubscriptionOwnerTransfer(1203123123), + ).to.be.revertedWith(`MustBeProposedOwner`) + }) + it('must be requested owner to accept', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .proposeSubscriptionOwnerTransfer(subId, roles.strangerAddress), + ) + await expect( + contracts.router + .connect(roles.subOwner) + .acceptSubscriptionOwnerTransfer(subId), + ).to.be.revertedWith(`MustBeProposedOwner`) + }) + it('requested owner can accept', async function () { + await acceptTermsOfService( + contracts.accessControl, + roles.stranger, + roles.strangerAddress, + ) + await expect( + contracts.router + .connect(roles.subOwner) + .proposeSubscriptionOwnerTransfer(subId, roles.strangerAddress), + ) + .to.emit(contracts.router, 'SubscriptionOwnerTransferRequested') + .withArgs(subId, roles.subOwnerAddress, roles.strangerAddress) + await expect( + contracts.router + .connect(roles.stranger) + .acceptSubscriptionOwnerTransfer(subId), + ) + .to.emit(contracts.router, 'SubscriptionOwnerTransferred') + .withArgs(subId, roles.subOwnerAddress, roles.strangerAddress) + }) + }) + + describe('#addConsumer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + it('subscription must exist', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(1203123123, roles.strangerAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + contracts.router + .connect(roles.stranger) + .addConsumer(subId, roles.strangerAddress), + ).to.be.revertedWith(`MustBeSubscriptionOwner()`) + }) + it('add is idempotent', async function () { + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress) + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress), + ).to.not.be.reverted + }) + it('cannot add more than maximum', async function () { + // There is one consumer, add another 99 to hit the max + for (let i = 0; i < 99; i++) { + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, randomAddressString()) + } + // Adding one more should fail + // await contracts.router.connect(roles.subOwner).addConsumer(subId, roles.strangerAddress); + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress), + ).to.be.revertedWith(`TooManyConsumers`) + // Same is true if we first create with the maximum + const consumers: string[] = [] + for (let i = 0; i < 100; i++) { + consumers.push(randomAddressString()) + } + subId = await createSubscription( + roles.subOwner, + consumers, + contracts.router, + contracts.accessControl, + ) + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress), + ).to.be.revertedWith(`TooManyConsumers`) + }) + it('owner can update', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress), + ) + .to.emit(contracts.router, 'SubscriptionConsumerAdded') + .withArgs(subId, roles.strangerAddress) + }) + }) + + describe('#removeConsumer', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + it('subscription must exist', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .removeConsumer(1203123123, roles.strangerAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + contracts.router + .connect(roles.stranger) + .removeConsumer(subId, roles.strangerAddress), + ).to.be.revertedWith(`MustBeSubscriptionOwner()`) + }) + it('owner can update', async function () { + const subBefore = await contracts.router.getSubscription(subId) + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress) + await expect( + contracts.router + .connect(roles.subOwner) + .removeConsumer(subId, roles.strangerAddress), + ) + .to.emit(contracts.router, 'SubscriptionConsumerRemoved') + .withArgs(subId, roles.strangerAddress) + const subAfter = await contracts.router.getSubscription(subId) + // Subscription should NOT contain the removed consumer + expect(subBefore.consumers).to.deep.equal(subAfter.consumers) + }) + it('can remove all consumers', async function () { + // Testing the handling of zero. + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress) + await contracts.router + .connect(roles.subOwner) + .removeConsumer(subId, roles.strangerAddress) + await contracts.router + .connect(roles.subOwner) + .removeConsumer(subId, roles.consumerAddress) + // Should be empty + const subAfter = await contracts.router.getSubscription(subId) + expect(subAfter.consumers).to.deep.equal([]) + }) + }) + + describe('#pendingRequestExists', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('130790416713017745'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, contracts.client.address) + }) + it('returns false when there is no latest pending request', async function () { + expect( + await contracts.router + .connect(roles.subOwner) + .pendingRequestExists(subId), + ).to.be.false + }) + it('returns true when the latest request is pending', async function () { + await contracts.client + .connect(roles.consumer) + .sendSimpleRequestWithJavaScript( + `return 'hello world'`, + subId, + donLabel, + 20_000, + ) + expect( + await contracts.router + .connect(roles.subOwner) + .pendingRequestExists(subId), + ).to.be.true + }) + }) + + describe('#cancelSubscription', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + it('subscription must exist', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .cancelSubscription(1203123123, roles.subOwnerAddress), + ).to.be.revertedWith(`InvalidSubscription`) + }) + it('must be owner', async function () { + await expect( + contracts.router + .connect(roles.stranger) + .cancelSubscription(subId, roles.subOwnerAddress), + ).to.be.revertedWith(`MustBeSubscriptionOwner()`) + }) + it('can cancel', async function () { + const strangerBalanceBefore = await contracts.linkToken.balanceOf( + roles.strangerAddress, + ) + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('1000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await expect( + contracts.router + .connect(roles.subOwner) + .cancelSubscription(subId, roles.strangerAddress), + ) + .to.emit(contracts.router, 'SubscriptionCanceled') + .withArgs(subId, roles.strangerAddress, BigNumber.from('0')) + const strangerBalance = await contracts.linkToken.balanceOf( + roles.strangerAddress, + ) + expect(strangerBalance.toString()).to.equal( + strangerBalanceBefore.toString(), + ) + await expect( + contracts.router.connect(roles.subOwner).getSubscription(subId), + ).to.be.revertedWith('InvalidSubscription') + }) + it('can add same consumer after canceling', async function () { + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('1000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress) + await contracts.router + .connect(roles.subOwner) + .cancelSubscription(subId, roles.strangerAddress) + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + // The cancel should have removed this consumer, so we can add it again. + await expect( + contracts.router + .connect(roles.subOwner) + .addConsumer(subId, roles.strangerAddress), + ).to.not.be.reverted + }) + it('cannot cancel with pending request', async function () { + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('130790416713017745'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + await contracts.router + .connect(roles.subOwner) + .addConsumer(subId, contracts.client.address) + await contracts.client + .connect(roles.consumer) + .sendSimpleRequestWithJavaScript( + `return 'hello world'`, + subId, + donLabel, + 20_000, + ) + // Should revert with outstanding requests + await expect( + contracts.router + .connect(roles.subOwner) + .cancelSubscription(subId, roles.strangerAddress), + ).to.be.revertedWith('CannotRemoveWithPendingRequests()') + // However the owner is able to cancel + // funds go to the sub owner. + await expect( + contracts.router + .connect(roles.defaultAccount) + .ownerCancelSubscription(subId), + ) + .to.emit(contracts.router, 'SubscriptionCanceled') + .withArgs( + subId, + roles.subOwnerAddress, + BigNumber.from('130790416713017745'), + ) + }) + }) + + describe('#recoverFunds', async function () { + let subId: number + beforeEach(async () => { + subId = await createSubscription( + roles.subOwner, + [roles.consumerAddress], + contracts.router, + contracts.accessControl, + ) + }) + + it('function that should change internal balance do', async function () { + type bf = [() => Promise, BigNumber] + const balanceChangingFns: Array = [ + [ + async function () { + const s = ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]) + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('1000'), + s, + ) + }, + BigNumber.from('1000'), + ], + [ + async function () { + await contracts.router + .connect(roles.subOwner) + .cancelSubscription(subId, roles.strangerAddress) + }, + BigNumber.from('0'), + ], + ] + for (const [fn, expectedBalanceChange] of balanceChangingFns) { + const startingBalance = await contracts.router.getTotalBalance() + await fn() + const endingBalance = await contracts.router.getTotalBalance() + expect(endingBalance.sub(startingBalance.toString())).to.equal( + expectedBalanceChange.toString(), + ) + } + }) + it('only owner can recover', async function () { + await expect( + contracts.router + .connect(roles.subOwner) + .recoverFunds(roles.strangerAddress), + ).to.be.revertedWith('Only callable by owner') + }) + + it('owner can recover link transferred', async function () { + // Set the internal balance + expect( + await contracts.linkToken.balanceOf(roles.strangerAddress), + ).to.equal(BigNumber.from('1000000000000000000')) + const subscription = ethers.utils.defaultAbiCoder.encode( + ['uint64'], + [subId], + ) + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('1000'), + subscription, + ) + // Circumvent internal balance + await contracts.linkToken + .connect(roles.subOwner) + .transfer(contracts.router.address, BigNumber.from('1000')) + // Should recover this 1000 + await expect( + contracts.router + .connect(roles.defaultAccount) + .recoverFunds(roles.strangerAddress), + ) + .to.emit(contracts.router, 'FundsRecovered') + .withArgs(roles.strangerAddress, BigNumber.from('1000')) + expect( + await contracts.linkToken.balanceOf(roles.strangerAddress), + ).to.equal(BigNumber.from('1000000000000001000')) + }) + }) + }) + + describe('#oracleWithdraw', async function () { + it('cannot withdraw with no balance', async function () { + await expect( + contracts.router + .connect(roles.oracleNode) + .oracleWithdraw(randomAddressString(), BigNumber.from('100')), + ).to.be.revertedWith(`InsufficientBalance`) + }) + }) + + describe('#ownerWithdraw', async function () { + it('cannot withdraw more than balance', async function () { + await expect( + contracts.router.oracleWithdraw( + randomAddressString(), + BigNumber.from('100'), + ), + ).to.be.revertedWith(`InsufficientBalance`) + }) + }) + + describe('#flagsSet', async function () { + it('get flags that were previously set', async function () { + const flags = ethers.utils.formatBytes32String('arbitrary_byte_values') + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + await expect( + contracts.router.connect(roles.subOwner).createSubscription(), + ) + .to.emit(contracts.router, 'SubscriptionCreated') + .withArgs(1, roles.subOwnerAddress) + await contracts.router.setFlags(1, flags) + expect(await contracts.router.getFlags(1)).to.equal(flags) + }) + }) + + describe('#reentrancy', async function () { + // Use a fixed gas price for these tests + const gasPrice = 3000000000 // 3 gwei + + it('allows callbacks to start another request if they have sufficient funds', async function () { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + + // Set test helper flag + await contracts.client.setDoValidReentrantOperation( + true, + subscriptionId, + ids.donId, + ) + + // Set flag so they have enough callback gas + const flags = new Uint8Array(32) + flags[0] = 1 + await contracts.router + .connect(roles.defaultAccount) + .setFlags(subscriptionId, flags) + + // Send request + const tx = await contracts.client.sendSimpleRequestWithJavaScript( + 'function run(){return response}', + subscriptionId, + ids.donId, + 400_000, + { gasPrice }, + ) + const { events } = await tx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + await expect(tx) + .to.emit(contracts.client, 'RequestSent') + .withArgs(requestId) + + const response = stringToBytes('response') + const error = stringToBytes('') + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const offchainMetadata = stringToBytes('') + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + offchainMetadata, + ) + + await expect(contracts.coordinator.callReport(report, { gasPrice })) + .to.emit(contracts.coordinator, 'OracleResponse') + .withArgs(requestId, await roles.defaultAccount.getAddress()) + .to.emit(contracts.router, 'RequestProcessed') + .withArgs( + requestId, + subscriptionId, + () => true, + () => true, + 0, // Result code for callback failing + () => true, + () => true, + () => true, + ) + .to.emit(contracts.client, 'FulfillRequestInvoked') + .withArgs(requestId, response, error) + .to.emit(contracts.client, 'SendRequestInvoked') + }) + + it('prevents callbacks from starting another request if have insufficient funds', async function () { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + const createSubTx = await contracts.router + .connect(roles.subOwner) + .createSubscription() + const createSubReceipt = await createSubTx.wait() + const subscriptionId = + createSubReceipt.events[0].args['subscriptionId'].toNumber() + await contracts.router + .connect(roles.subOwner) + .addConsumer(subscriptionId, contracts.client.address) + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('300000000000000000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subscriptionId]), + ) + + // Set test helper flag + await contracts.client.setDoValidReentrantOperation( + true, + subscriptionId, + ids.donId, + ) + + // Set flag so they have enough callback gas + const flags = new Uint8Array(32) + flags[0] = 1 + await contracts.router + .connect(roles.defaultAccount) + .setFlags(subscriptionId, flags) + + // Send request + const tx = await contracts.client.sendSimpleRequestWithJavaScript( + 'function run(){return response}', + subscriptionId, + ids.donId, + 400_000, + { gasPrice }, + ) + const { events } = await tx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + await expect(tx) + .to.emit(contracts.client, 'RequestSent') + .withArgs(requestId) + + const response = stringToBytes('response') + const error = stringToBytes('') + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const offchainMetadata = stringToBytes('') + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + offchainMetadata, + ) + + await expect(contracts.coordinator.callReport(report, { gasPrice })) + .to.emit(contracts.coordinator, 'OracleResponse') + .withArgs(requestId, await roles.defaultAccount.getAddress()) + .to.emit(contracts.client, 'FulfillRequestInvoked') + .withArgs(requestId, response, error) + .to.emit(contracts.router, 'RequestProcessed') + .withArgs( + requestId, + subscriptionId, + () => true, + () => true, + 1, // Result code for callback failing + () => true, + () => true, + () => true, + ) + }) + + it('callbacks are unable to improperly use subscription methods', async function () { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + const createSubTx = await contracts.router + .connect(roles.subOwner) + .createSubscription() + const createSubReceipt = await createSubTx.wait() + const subscriptionId = + createSubReceipt.events[0].args['subscriptionId'].toNumber() + await contracts.router + .connect(roles.subOwner) + .addConsumer(subscriptionId, contracts.client.address) + await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('1000000000000000000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subscriptionId]), + ) + + // Set flag so they have enough callback gas + const flags = new Uint8Array(32) + flags[0] = 1 + await contracts.router + .connect(roles.defaultAccount) + .setFlags(subscriptionId, flags) + + // Accept ToS for client contract + const acceptorAddress = roles.subOwnerAddress + const recipientAddress = contracts.client.address + const message = await contracts.accessControl.getMessage( + acceptorAddress, + recipientAddress, + ) + const wallet = new ethers.Wallet(accessControlMockPrivateKey) + const flatSignature = await wallet.signMessage( + ethers.utils.arrayify(message), + ) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + await contracts.client + .connect(roles.subOwner) + .acceptTermsOfService(acceptorAddress, recipientAddress, r, s, v) + + // Transfer Subscription ownership to client contract so that it can call subscription methods + await contracts.router + .connect(roles.subOwner) + .proposeSubscriptionOwnerTransfer( + subscriptionId, + contracts.client.address, + ) + await contracts.client.acceptSubscriptionOwnerTransfer(subscriptionId) + + // Set test helper flag + await contracts.client.setDoInvalidReentrantOperation( + true, + subscriptionId, + ) + + // Send request + const tx = await contracts.client.sendSimpleRequestWithJavaScript( + 'function run(){return response}', + subscriptionId, + ids.donId, + 400_000, + { gasPrice }, + ) + const { events } = await tx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + await expect(tx) + .to.emit(contracts.client, 'RequestSent') + .withArgs(requestId) + + const response = stringToBytes('response') + const error = stringToBytes('') + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const offchainMetadata = stringToBytes('') + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + offchainMetadata, + ) + + await expect(contracts.coordinator.callReport(report, { gasPrice })) + .to.emit(contracts.coordinator, 'OracleResponse') + .withArgs(requestId, await roles.defaultAccount.getAddress()) + .to.emit(contracts.client, 'FulfillRequestInvoked') + .withArgs(requestId, response, error) + .to.emit(contracts.router, 'RequestProcessed') + .withArgs( + requestId, + subscriptionId, + () => true, + () => true, + 1, // Result code for callback failing + () => true, + () => true, + () => true, + ) + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/GasGolf.test.ts b/contracts/test/v0.8/functions/v1/GasGolf.test.ts new file mode 100644 index 00000000..32cc660b --- /dev/null +++ b/contracts/test/v0.8/functions/v1/GasGolf.test.ts @@ -0,0 +1,116 @@ +import { ethers } from 'hardhat' +import { BigNumber } from 'ethers' +import { + accessControlMockPrivateKey, + encodeReport, + FunctionsContracts, + FunctionsRoles, + getEventArg, + getSetupFactory, + ids, +} from './utils' +import { stringToBytes } from '../../../test-helpers/helpers' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +const baselineGasUsed = 721271 +let currentGasUsed = 0 + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +after(() => { + const score = currentGasUsed - baselineGasUsed + console.log(`\n ⛳ Par : ${baselineGasUsed} gas`) + console.log(`\n 🏌️ You : ${currentGasUsed} gas`) + console.log(`\n 🚩 Score : ${score} gas`) +}) + +describe('Gas Golf', () => { + it('taking a swing', async () => { + // User signs Terms of Service + const message = await contracts.accessControl.getMessage( + roles.consumerAddress, + roles.consumerAddress, + ) + const wallet = new ethers.Wallet(accessControlMockPrivateKey) + const flatSignature = await wallet.signMessage( + ethers.utils.arrayify(message), + ) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + const acceptTermsOfServiceTx = await contracts.accessControl + .connect(roles.consumer) + .acceptTermsOfService( + roles.consumerAddress, + roles.consumerAddress, + r, + s, + v, + ) + const { gasUsed: acceptTermsOfServiceGasUsed } = + await acceptTermsOfServiceTx.wait() + + // User creates a new Subscription + const createSubscriptionTx = await contracts.router + .connect(roles.consumer) + .createSubscription() + const createSubscriptionTxReceipt = await createSubscriptionTx.wait() + const createSubscriptionTxGasUsed = createSubscriptionTxReceipt.gasUsed + const subscriptionId = + createSubscriptionTxReceipt.events[0].args['subscriptionId'].toNumber() + + // User adds a consuming contract to their Subscription + const addConsumerTx = await contracts.router + .connect(roles.consumer) + .addConsumer(subscriptionId, contracts.client.address) + const { gasUsed: addConsumerTxGasUsed } = await addConsumerTx.wait() + + // User funds their subscription + const transferAndCallTx = await contracts.linkToken + .connect(roles.subOwner) + .transferAndCall( + contracts.router.address, + BigNumber.from('54666805176129187'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subscriptionId]), + ) + const { gasUsed: transferAndCallTxGasUsed } = await transferAndCallTx.wait() + + // User sends request + const requestTx = await contracts.client.sendSimpleRequestWithJavaScript( + 'function myFancyFunction(){return "woah, thats fancy"}', + subscriptionId, + ids.donId, + 20_000, + ) + const { gasUsed: requestTxGasUsed, events } = await requestTx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + const oracleRequestEvent = await contracts.coordinator.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + // DON's transmitter submits a response + const response = stringToBytes('woah, thats fancy') + const error = stringToBytes('') + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const offchainMetadata = stringToBytes('') + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + response, + error, + onchainMetadata, + offchainMetadata, + ) + const fulfillmentTx = await contracts.coordinator.callReport(report) + const { gasUsed: fulfillmentTxGasUsed } = await fulfillmentTx.wait() + + currentGasUsed = acceptTermsOfServiceGasUsed + .add(createSubscriptionTxGasUsed) + .add(addConsumerTxGasUsed) + .add(transferAndCallTxGasUsed) + .add(requestTxGasUsed) + .add(fulfillmentTxGasUsed) + .toNumber() + }) +}) diff --git a/contracts/test/v0.8/functions/v1/RouterBase.test.ts b/contracts/test/v0.8/functions/v1/RouterBase.test.ts new file mode 100644 index 00000000..3a3b58b8 --- /dev/null +++ b/contracts/test/v0.8/functions/v1/RouterBase.test.ts @@ -0,0 +1,157 @@ +import { ethers } from 'hardhat' +import { expect } from 'chai' +import { + getSetupFactory, + coordinatorConfig, + FunctionsContracts, + FunctionsFactories, + FunctionsRoles, + ids, + createSubscription, + encodeReport, + stringToHex, + getEventArg, +} from './utils' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let factories: FunctionsFactories +let roles: FunctionsRoles + +beforeEach(async () => { + ;({ contracts, factories, roles } = setup()) +}) + +describe('FunctionsRouter - Base', () => { + describe('Updates', () => { + it('One or more contracts on a route can be updated by the owner', async () => { + const coordinator2 = await factories.functionsCoordinatorFactory + .connect(roles.defaultAccount) + .deploy( + contracts.router.address, + coordinatorConfig, + contracts.mockLinkEth.address, + ) + const coordinator3 = await factories.functionsCoordinatorFactory + .connect(roles.defaultAccount) + .deploy( + contracts.router.address, + coordinatorConfig, + contracts.mockLinkEth.address, + ) + const coordinator4 = await factories.functionsCoordinatorFactory + .connect(roles.defaultAccount) + .deploy( + contracts.router.address, + coordinatorConfig, + contracts.mockLinkEth.address, + ) + + await expect( + contracts.router['getContractById(bytes32)'](ids.donId2), + ).to.be.revertedWith('RouteNotFound') + await expect( + contracts.router['getContractById(bytes32)'](ids.donId3), + ).to.be.revertedWith('RouteNotFound') + await expect( + contracts.router['getContractById(bytes32)'](ids.donId4), + ).to.be.revertedWith('RouteNotFound') + await expect( + contracts.router.proposeContractsUpdate( + [ids.donId2, ids.donId3, ids.donId4], + [coordinator2.address, coordinator3.address, coordinator4.address], + ), + ).to.emit(contracts.router, `ContractProposed`) + + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + + const requestProposedTx = await contracts.client.sendRequestProposed( + `return 'hello world'`, + subscriptionId, + ids.donId2, + ) + + const { events } = await requestProposedTx.wait() + const requestId = getEventArg(events, 'RequestSent', 0) + + const oracleRequestEvent = await coordinator2.queryFilter( + contracts.coordinator.filters.OracleRequest(), + ) + const onchainMetadata = oracleRequestEvent[0].args?.['commitment'] + const report = await encodeReport( + ethers.utils.hexZeroPad(requestId, 32), + stringToHex('hello world'), + stringToHex(''), + onchainMetadata, + stringToHex(''), + ) + + await expect( + coordinator2 + .connect(roles.oracleNode) + .callReport(report, { gasLimit: 500_000 }), + ).to.emit(contracts.client, 'FulfillRequestInvoked') + + await expect(contracts.router.updateContracts()).to.emit( + contracts.router, + 'ContractUpdated', + ) + expect( + await contracts.router['getContractById(bytes32)'](ids.donId2), + ).to.equal(coordinator2.address) + expect( + await contracts.router['getContractById(bytes32)'](ids.donId3), + ).to.equal(coordinator3.address) + expect( + await contracts.router['getContractById(bytes32)'](ids.donId4), + ).to.equal(coordinator4.address) + }) + + it('non-owner is unable to propose contract updates', async () => { + await expect( + contracts.router + .connect(roles.stranger) + .proposeContractsUpdate([ids.donId], [contracts.coordinator.address]), + ).to.be.revertedWith('Only callable by owner') + }) + + it('non-owner is unable to apply contract updates', async () => { + await expect( + contracts.router.connect(roles.stranger).updateContracts(), + ).to.be.revertedWith('Only callable by owner') + }) + }) + + describe('Emergency Pause', () => { + it('has paused state visible', async () => { + const paused = await contracts.router.paused() + expect(paused).to.equal(false) + }) + it('can pause the system', async () => { + const subscriptionId = await createSubscription( + roles.subOwner, + [contracts.client.address], + contracts.router, + contracts.accessControl, + contracts.linkToken, + ) + + await contracts.router.pause() + + await expect( + contracts.client.sendSimpleRequestWithJavaScript( + `return 'hello world'`, + subscriptionId, + ids.donId, + 20_000, + ), + ).to.be.revertedWith('Pausable: paused') + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/TermsOfServiceAllowList.test.ts b/contracts/test/v0.8/functions/v1/TermsOfServiceAllowList.test.ts new file mode 100644 index 00000000..1e1ad18a --- /dev/null +++ b/contracts/test/v0.8/functions/v1/TermsOfServiceAllowList.test.ts @@ -0,0 +1,166 @@ +import { ethers } from 'hardhat' +import { expect } from 'chai' +import { + getSetupFactory, + FunctionsContracts, + FunctionsRoles, + acceptTermsOfService, + accessControlMockPrivateKey, + accessControlConfig, +} from './utils' + +const setup = getSetupFactory() +let contracts: FunctionsContracts +let roles: FunctionsRoles + +beforeEach(async () => { + ;({ contracts, roles } = setup()) +}) + +describe('ToS Access Control', () => { + describe('Config', () => { + it('non-owner is unable to update config', async () => { + await expect( + contracts.accessControl + .connect(roles.stranger) + .updateConfig(accessControlConfig), + ).to.be.revertedWith('Only callable by owner') + }) + + it('Owner can update config', async () => { + const beforeConfig = await contracts.accessControl.getConfig() + await expect( + contracts.accessControl.updateConfig({ + ...accessControlConfig, + enabled: false, + }), + ).to.emit(contracts.accessControl, 'ConfigUpdated') + const afterConfig = await contracts.accessControl.getConfig() + expect(beforeConfig).to.not.equal(afterConfig) + }) + it('returns the config set', async () => { + const config = await contracts.accessControl + .connect(roles.stranger) + .getConfig() + await Promise.all( + Object.keys(accessControlConfig).map((key) => { + expect(config[key]).to.equal( + accessControlConfig[key as keyof typeof accessControlConfig], + ) + }), + ) + }) + }) + + describe('Accepting', () => { + it('can only be done with a valid signature', async () => { + const message = await contracts.accessControl.getMessage( + roles.strangerAddress, + roles.strangerAddress, + ) + const flatSignature = await roles.stranger.signMessage( + ethers.utils.arrayify(message), + ) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + await expect( + contracts.accessControl + .connect(roles.stranger) + .acceptTermsOfService( + roles.strangerAddress, + roles.strangerAddress, + r, + s, + v, + ), + ).to.be.revertedWith('InvalidSignature') + }) + it('can be done by Externally Owned Accounts if recipient themself', async () => { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + expect( + await contracts.accessControl.hasAccess(roles.subOwnerAddress, '0x'), + ).to.equal(true) + }) + it('cannot be done by Externally Owned Accounts if recipient another EoA', async () => { + await expect( + acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.strangerAddress, + ), + ).to.be.revertedWith('InvalidUsage') + }) + it('can be done by Contract Accounts if recipient themself', async () => { + const acceptorAddress = roles.consumerAddress + const recipientAddress = contracts.client.address + const message = await contracts.accessControl.getMessage( + acceptorAddress, + recipientAddress, + ) + const wallet = new ethers.Wallet(accessControlMockPrivateKey) + const flatSignature = await wallet.signMessage( + ethers.utils.arrayify(message), + ) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + await contracts.client + .connect(roles.consumer) + .acceptTermsOfService(acceptorAddress, recipientAddress, r, s, v) + + expect( + await contracts.accessControl.hasAccess(recipientAddress, '0x'), + ).to.equal(true) + }) + it('cannot be done by Contract Accounts that if they are not the recipient', async () => { + const acceptorAddress = roles.consumerAddress + const recipientAddress = contracts.coordinator.address + const message = await contracts.accessControl.getMessage( + acceptorAddress, + recipientAddress, + ) + const wallet = new ethers.Wallet(accessControlMockPrivateKey) + const flatSignature = await wallet.signMessage( + ethers.utils.arrayify(message), + ) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + await expect( + contracts.client + .connect(roles.consumer) + .acceptTermsOfService(acceptorAddress, recipientAddress, r, s, v), + ).to.be.revertedWith('InvalidUsage') + }) + }) + + describe('Blocking', () => { + it('can only be done by the Router Owner', async () => { + await expect( + contracts.accessControl + .connect(roles.stranger) + .blockSender(roles.subOwnerAddress), + ).to.be.revertedWith('Only callable by owner') + }) + it('removes the ability to re-accept the terms of service', async () => { + await contracts.accessControl.blockSender(roles.subOwnerAddress) + await expect( + acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ), + ).to.be.revertedWith('RecipientIsBlocked') + }) + it('removes the ability to manage subscriptions', async () => { + await acceptTermsOfService( + contracts.accessControl, + roles.subOwner, + roles.subOwnerAddress, + ) + await contracts.accessControl.blockSender(roles.subOwnerAddress) + await expect( + contracts.router.connect(roles.subOwner).createSubscription(), + ).to.be.revertedWith('SenderMustAcceptTermsOfService') + }) + }) +}) diff --git a/contracts/test/v0.8/functions/v1/utils.ts b/contracts/test/v0.8/functions/v1/utils.ts new file mode 100644 index 00000000..37dbca80 --- /dev/null +++ b/contracts/test/v0.8/functions/v1/utils.ts @@ -0,0 +1,336 @@ +import { ethers } from 'hardhat' +import { BigNumber, ContractFactory, Signer, Contract, providers } from 'ethers' +import { Roles, getUsers } from '../../../test-helpers/setup' +import { EventFragment } from 'ethers/lib/utils' + +export type FunctionsRoles = Roles & { + subOwner: Signer + subOwnerAddress: string + consumer: Signer + consumerAddress: string + stranger: Signer + strangerAddress: string +} + +export type FunctionsFactories = { + functionsRouterFactory: ContractFactory + functionsCoordinatorFactory: ContractFactory + clientTestHelperFactory: ContractFactory + linkTokenFactory: ContractFactory + mockAggregatorV3Factory: ContractFactory + accessControlFactory: ContractFactory +} +export type FunctionsContracts = { + router: Contract + coordinator: Contract + client: Contract + linkToken: Contract + mockLinkEth: Contract + accessControl: Contract +} + +export const ids = { + routerId: ethers.utils.formatBytes32String(''), + donId: ethers.utils.formatBytes32String('1'), + donId2: ethers.utils.formatBytes32String('2'), + donId3: ethers.utils.formatBytes32String('3'), + donId4: ethers.utils.formatBytes32String('4'), + donId5: ethers.utils.formatBytes32String('5'), +} + +export const anyValue = () => true + +export const stringToHex = (s: string) => { + return ethers.utils.hexlify(ethers.utils.toUtf8Bytes(s)) +} + +export const encodeReport = async ( + requestId: string, + result: string, + err: string, + onchainMetadata: any, + offchainMetadata: string, +) => { + const functionsResponse = await ethers.getContractFactory( + 'src/v0.8/functions/dev/v1_X/FunctionsCoordinator.sol:FunctionsCoordinator', + ) + const onchainMetadataBytes = functionsResponse.interface._abiCoder.encode( + [ + getEventInputs( + Object.values(functionsResponse.interface.events), + 'OracleRequest', + 9, + ), + ], + [[...onchainMetadata]], + ) + const abi = ethers.utils.defaultAbiCoder + return abi.encode( + ['bytes32[]', 'bytes[]', 'bytes[]', 'bytes[]', 'bytes[]'], + [[requestId], [result], [err], [onchainMetadataBytes], [offchainMetadata]], + ) +} + +export type FunctionsRouterConfig = { + maxConsumersPerSubscription: number + adminFee: number + handleOracleFulfillmentSelector: string + maxCallbackGasLimits: number[] + gasForCallExactCheck: number + subscriptionDepositMinimumRequests: number + subscriptionDepositJuels: BigNumber +} +export const functionsRouterConfig: FunctionsRouterConfig = { + maxConsumersPerSubscription: 100, + adminFee: 0, + handleOracleFulfillmentSelector: '0x0ca76175', + maxCallbackGasLimits: [300_000, 500_000, 1_000_000], + gasForCallExactCheck: 5000, + subscriptionDepositMinimumRequests: 10, + subscriptionDepositJuels: BigNumber.from('1000000000000000000'), +} +export type CoordinatorConfig = { + feedStalenessSeconds: number + gasOverheadBeforeCallback: number + gasOverheadAfterCallback: number + requestTimeoutSeconds: number + donFee: number + maxSupportedRequestDataVersion: number + fulfillmentGasPriceOverEstimationBP: number + fallbackNativePerUnitLink: BigNumber + minimumEstimateGasPriceWei: number +} +const fallbackNativePerUnitLink = 5000000000000000 +export const coordinatorConfig: CoordinatorConfig = { + feedStalenessSeconds: 86_400, + gasOverheadBeforeCallback: 44_615, + gasOverheadAfterCallback: 44_615, + requestTimeoutSeconds: 300, + donFee: 0, + maxSupportedRequestDataVersion: 1, + fulfillmentGasPriceOverEstimationBP: 0, + fallbackNativePerUnitLink: BigNumber.from(fallbackNativePerUnitLink), + minimumEstimateGasPriceWei: 1000000000, +} +export const accessControlMockPublicKey = ethers.utils.getAddress( + '0x32237412cC0321f56422d206e505dB4B3871AF5c', +) +export const accessControlMockPrivateKey = + '2e8c8eaff4159e59711b42424c1555af1b78409e12c6f9c69a6a986d75442b20' +export type AccessControlConfig = { + enabled: boolean + signerPublicKey: string // address +} +export const accessControlConfig: AccessControlConfig = { + enabled: true, + signerPublicKey: accessControlMockPublicKey, +} + +export async function setupRolesAndFactories(): Promise<{ + roles: FunctionsRoles + factories: FunctionsFactories +}> { + const roles = (await getUsers()).roles + const functionsRouterFactory = await ethers.getContractFactory( + 'src/v0.8/functions/dev/v1_X/FunctionsRouter.sol:FunctionsRouter', + roles.defaultAccount, + ) + const functionsCoordinatorFactory = await ethers.getContractFactory( + 'src/v0.8/functions/tests/v1_X/testhelpers/FunctionsCoordinatorTestHelper.sol:FunctionsCoordinatorTestHelper', + roles.defaultAccount, + ) + const accessControlFactory = await ethers.getContractFactory( + 'src/v0.8/functions/dev/v1_X/accessControl/TermsOfServiceAllowList.sol:TermsOfServiceAllowList', + roles.defaultAccount, + ) + const clientTestHelperFactory = await ethers.getContractFactory( + 'src/v0.8/functions/tests/v1_X/testhelpers/FunctionsClientTestHelper.sol:FunctionsClientTestHelper', + roles.consumer, + ) + const linkTokenFactory = await ethers.getContractFactory( + 'src/v0.8/mocks/MockLinkToken.sol:MockLinkToken', + roles.defaultAccount, + ) + const mockAggregatorV3Factory = await ethers.getContractFactory( + 'src/v0.8/tests/MockV3Aggregator.sol:MockV3Aggregator', + roles.defaultAccount, + ) + return { + roles: { + ...roles, + subOwner: roles.consumer, + subOwnerAddress: await roles.consumer.getAddress(), + consumer: roles.consumer2, + consumerAddress: await roles.consumer2.getAddress(), + stranger: roles.stranger, + strangerAddress: await roles.stranger.getAddress(), + }, + factories: { + functionsRouterFactory, + functionsCoordinatorFactory, + clientTestHelperFactory, + linkTokenFactory, + mockAggregatorV3Factory, + accessControlFactory, + }, + } +} + +export async function acceptTermsOfService( + accessControl: Contract, + acceptor: Signer, + recipientAddress: string, +) { + const acceptorAddress = await acceptor.getAddress() + const message = await accessControl.getMessage( + acceptorAddress, + recipientAddress, + ) + const wallet = new ethers.Wallet(accessControlMockPrivateKey) + const flatSignature = await wallet.signMessage(ethers.utils.arrayify(message)) + const { r, s, v } = ethers.utils.splitSignature(flatSignature) + return accessControl + .connect(acceptor) + .acceptTermsOfService(acceptorAddress, recipientAddress, r, s, v) +} + +export async function createSubscription( + owner: Signer, + consumers: string[], + router: Contract, + accessControl: Contract, + linkToken?: Contract, +): Promise { + const ownerAddress = await owner.getAddress() + await acceptTermsOfService(accessControl, owner, ownerAddress) + const tx = await router.connect(owner).createSubscription() + const receipt = await tx.wait() + const subId = receipt.events[0].args['subscriptionId'].toNumber() + for (let i = 0; i < consumers.length; i++) { + await router.connect(owner).addConsumer(subId, consumers[i]) + } + if (linkToken) { + await linkToken + .connect(owner) + .transferAndCall( + router.address, + BigNumber.from('1000000000000000000'), + ethers.utils.defaultAbiCoder.encode(['uint64'], [subId]), + ) + } + return subId +} + +export function getSetupFactory(): () => { + contracts: FunctionsContracts + factories: FunctionsFactories + roles: FunctionsRoles +} { + let contracts: FunctionsContracts + let factories: FunctionsFactories + let roles: FunctionsRoles + + before(async () => { + const { roles: r, factories: f } = await setupRolesAndFactories() + factories = f + roles = r + }) + + beforeEach(async () => { + const linkEthRate = BigNumber.from(5021530000000000) + + // Deploy + const linkToken = await factories.linkTokenFactory + .connect(roles.defaultAccount) + .deploy() + + const mockLinkEth = await factories.mockAggregatorV3Factory.deploy( + 0, + linkEthRate, + ) + + const router = await factories.functionsRouterFactory + .connect(roles.defaultAccount) + .deploy(linkToken.address, functionsRouterConfig) + + const coordinator = await factories.functionsCoordinatorFactory + .connect(roles.defaultAccount) + .deploy(router.address, coordinatorConfig, mockLinkEth.address) + + const initialAllowedSenders: string[] = [] + const initialBlockedSenders: string[] = [] + const accessControl = await factories.accessControlFactory + .connect(roles.defaultAccount) + .deploy(accessControlConfig, initialAllowedSenders, initialBlockedSenders) + + const client = await factories.clientTestHelperFactory + .connect(roles.consumer) + .deploy(router.address) + + // Setup accounts + await linkToken.transfer( + roles.subOwnerAddress, + BigNumber.from('1000000000000000000'), // 1 PLI + ) + await linkToken.transfer( + roles.strangerAddress, + BigNumber.from('1000000000000000000'), // 1 PLI + ) + + const allowListId = await router.getAllowListId() + await router.proposeContractsUpdate( + [ids.donId, allowListId], + [coordinator.address, accessControl.address], + ) + await router.updateContracts() + + contracts = { + client, + coordinator, + router, + linkToken, + mockLinkEth, + accessControl, + } + }) + + return () => { + return { contracts, factories, roles } + } +} + +export function getEventArg(events: any, eventName: string, argIndex: number) { + if (Array.isArray(events)) { + const event = events.find((e: any) => e.event === eventName) + if (event && Array.isArray(event.args) && event.args.length > 0) { + return event.args[argIndex] + } + } + return undefined +} + +export function getEventInputs( + events: EventFragment[], + eventName: string, + argIndex: number, +) { + if (Array.isArray(events)) { + const event = events.find((e) => e.name.includes(eventName)) + if (event && Array.isArray(event.inputs) && event.inputs.length > 0) { + return event.inputs[argIndex] + } + } + throw 'Not found' +} + +export async function parseOracleRequestEventArgs( + tx: providers.TransactionResponse, +) { + const receipt = await tx.wait() + const data = receipt.logs?.[1].data + // NOTE: indexed args are on topics, not data + return ethers.utils.defaultAbiCoder.decode( + ['address', 'uint64', 'address', 'bytes', 'uint16'], + data ?? '', + ) +} diff --git a/contracts/test/v0.8/operatorforwarder/AuthorizedForwarder.test.ts b/contracts/test/v0.8/operatorforwarder/AuthorizedForwarder.test.ts new file mode 100644 index 00000000..368d60a4 --- /dev/null +++ b/contracts/test/v0.8/operatorforwarder/AuthorizedForwarder.test.ts @@ -0,0 +1,724 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory, ContractReceipt } from 'ethers' +import { getUsers, Roles } from '../../test-helpers/setup' +import { evmRevert } from '../../test-helpers/matchers' + +let getterSetterFactory: ContractFactory +let forwarderFactory: ContractFactory +let brokenFactory: ContractFactory +let linkTokenFactory: ContractFactory + +let roles: Roles +const zeroAddress = ethers.constants.AddressZero + +before(async () => { + const users = await getUsers() + + roles = users.roles + getterSetterFactory = await ethers.getContractFactory( + 'src/v0.4/tests/GetterSetter.sol:GetterSetter', + roles.defaultAccount, + ) + brokenFactory = await ethers.getContractFactory( + 'src/v0.8/tests/Broken.sol:Broken', + roles.defaultAccount, + ) + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol:AuthorizedForwarder', + roles.defaultAccount, + ) + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + roles.defaultAccount, + ) +}) + +describe('AuthorizedForwarder', () => { + let link: Contract + let forwarder: Contract + + beforeEach(async () => { + link = await linkTokenFactory.connect(roles.defaultAccount).deploy() + forwarder = await forwarderFactory + .connect(roles.defaultAccount) + .deploy( + link.address, + await roles.defaultAccount.getAddress(), + zeroAddress, + '0x', + ) + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(forwarder, [ + 'forward', + 'multiForward', + 'getAuthorizedSenders', + 'linkToken', + 'isAuthorizedSender', + 'ownerForward', + 'setAuthorizedSenders', + 'transferOwnershipWithMessage', + 'typeAndVersion', + // ConfirmedOwner + 'transferOwnership', + 'acceptOwnership', + 'owner', + ]) + }) + + describe('#typeAndVersion', () => { + it('describes the authorized forwarder', async () => { + assert.equal( + await forwarder.typeAndVersion(), + 'AuthorizedForwarder 1.1.0', + ) + }) + }) + + describe('deployment', () => { + it('sets the correct link token', async () => { + assert.equal(await forwarder.linkToken(), link.address) + }) + + it('reverts on zeroAddress value for link token', async () => { + await evmRevert( + forwarderFactory.connect(roles.defaultAccount).deploy( + zeroAddress, // Link Address + await roles.defaultAccount.getAddress(), + zeroAddress, + '0x', + ), + ) + }) + + it('sets no authorized senders', async () => { + const senders = await forwarder.getAuthorizedSenders() + assert.equal(senders.length, 0) + }) + }) + + describe('#setAuthorizedSenders', () => { + let newSenders: string[] + let receipt: ContractReceipt + describe('when called by the owner', () => { + describe('set authorized senders containing duplicate/s', () => { + beforeEach(async () => { + newSenders = [ + await roles.oracleNode1.getAddress(), + await roles.oracleNode1.getAddress(), + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + }) + it('reverts with a must not have duplicate senders message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders), + 'Must not have duplicate senders', + ) + }) + }) + + describe('setting 3 authorized senders', () => { + beforeEach(async () => { + newSenders = [ + await roles.oracleNode1.getAddress(), + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + const tx = await forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders) + receipt = await tx.wait() + }) + + it('adds the authorized nodes', async () => { + const authorizedSenders = await forwarder.getAuthorizedSenders() + assert.equal(newSenders.length, authorizedSenders.length) + for (let i = 0; i < authorizedSenders.length; i++) { + assert.equal(authorizedSenders[i], newSenders[i]) + } + }) + + it('emits an event', async () => { + assert.equal(receipt.events?.length, 1) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'AuthorizedSendersChanged') + const encodedSenders = ethers.utils.defaultAbiCoder.encode( + ['address[]', 'address'], + [newSenders, await roles.defaultAccount.getAddress()], + ) + assert.equal(responseEvent?.data, encodedSenders) + }) + + it('replaces the authorized nodes', async () => { + const newSenders = await forwarder + .connect(roles.defaultAccount) + .getAuthorizedSenders() + assert.notIncludeOrderedMembers(newSenders, [ + await roles.oracleNode.getAddress(), + ]) + }) + + after(async () => { + await forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.oracleNode.getAddress()]) + }) + }) + + describe('setting 0 authorized senders', () => { + beforeEach(async () => { + newSenders = [] + }) + + it('reverts with a minimum senders message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders), + 'Must have at least 1 sender', + ) + }) + }) + }) + + describe('when called by a non-owner', () => { + it('cannot add an authorized node', async () => { + await evmRevert( + forwarder + .connect(roles.stranger) + .setAuthorizedSenders([await roles.stranger.getAddress()]), + 'Cannot set authorized senders', + ) + }) + }) + }) + + describe('#forward', () => { + let bytes: string + let payload: string + let mock: Contract + + beforeEach(async () => { + mock = await getterSetterFactory.connect(roles.defaultAccount).deploy() + bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100)) + payload = getterSetterFactory.interface.encodeFunctionData( + getterSetterFactory.interface.getFunction('setBytes'), + [bytes], + ) + }) + + describe('when called by an unauthorized node', () => { + it('reverts', async () => { + await evmRevert( + forwarder.connect(roles.stranger).forward(mock.address, payload), + ) + }) + }) + + describe('when called by an authorized node', () => { + beforeEach(async () => { + await forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + }) + + describe('when destination call reverts', () => { + let brokenMock: Contract + let brokenPayload: string + let brokenMsgPayload: string + + beforeEach(async () => { + brokenMock = await brokenFactory + .connect(roles.defaultAccount) + .deploy() + brokenMsgPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertWithMessage'), + ['Failure message'], + ) + + brokenPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertSilently'), + [], + ) + }) + + describe('when reverts with message', () => { + it('return revert message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .forward(brokenMock.address, brokenMsgPayload), + "reverted with reason string 'Failure message'", + ) + }) + }) + + describe('when reverts without message', () => { + it('return silent failure message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .forward(brokenMock.address, brokenPayload), + 'Forwarded call reverted without reason', + ) + }) + }) + }) + + describe('when sending to a non-contract address', () => { + it('reverts', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .forward(zeroAddress, payload), + 'Must forward to a contract', + ) + }) + }) + + describe('when attempting to forward to the link token', () => { + it('reverts', async () => { + const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .forward(link.address, sighash), + ) + }) + }) + + describe('when forwarding to any other address', () => { + it('forwards the data', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .forward(mock.address, payload) + await tx.wait() + assert.equal(await mock.getBytes(), bytes) + }) + + it('perceives the message is sent by the AuthorizedForwarder', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .forward(mock.address, payload) + await expect(tx) + .to.emit(mock, 'SetBytes') + .withArgs(forwarder.address, bytes) + }) + }) + }) + }) + + describe('#multiForward', () => { + let bytes: string + let payload: string + let mock: Contract + + beforeEach(async () => { + mock = await getterSetterFactory.connect(roles.defaultAccount).deploy() + bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100)) + payload = getterSetterFactory.interface.encodeFunctionData( + getterSetterFactory.interface.getFunction('setBytes'), + [bytes], + ) + }) + + describe('when called by an unauthorized node', () => { + it('reverts', async () => { + await evmRevert( + forwarder + .connect(roles.stranger) + .multiForward([mock.address], [payload]), + ) + }) + }) + + describe('when it receives a single call by an authorized node', () => { + beforeEach(async () => { + await forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + }) + + describe('when destination call reverts', () => { + let brokenMock: Contract + let brokenPayload: string + let brokenMsgPayload: string + + beforeEach(async () => { + brokenMock = await brokenFactory + .connect(roles.defaultAccount) + .deploy() + brokenMsgPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertWithMessage'), + ['Failure message'], + ) + + brokenPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertSilently'), + [], + ) + }) + + describe('when reverts with message', () => { + it('return revert message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([brokenMock.address], [brokenMsgPayload]), + "reverted with reason string 'Failure message'", + ) + }) + }) + + describe('when reverts without message', () => { + it('return silent failure message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([brokenMock.address], [brokenPayload]), + 'Forwarded call reverted without reason', + ) + }) + }) + }) + + describe('when sending to a non-contract address', () => { + it('reverts', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([zeroAddress], [payload]), + 'Must forward to a contract', + ) + }) + }) + + describe('when attempting to forward to the link token', () => { + it('reverts', async () => { + const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([link.address], [sighash]), + ) + }) + }) + + describe('when forwarding to any other address', () => { + it('forwards the data', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .multiForward([mock.address], [payload]) + await tx.wait() + assert.equal(await mock.getBytes(), bytes) + }) + + it('perceives the message is sent by the AuthorizedForwarder', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .multiForward([mock.address], [payload]) + await expect(tx) + .to.emit(mock, 'SetBytes') + .withArgs(forwarder.address, bytes) + }) + }) + }) + + describe('when its called by an authorized node', () => { + beforeEach(async () => { + await forwarder + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.defaultAccount.getAddress()]) + }) + + describe('when 1/1 calls reverts', () => { + let brokenMock: Contract + let brokenPayload: string + let brokenMsgPayload: string + + beforeEach(async () => { + brokenMock = await brokenFactory + .connect(roles.defaultAccount) + .deploy() + brokenMsgPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertWithMessage'), + ['Failure message'], + ) + + brokenPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertSilently'), + [], + ) + }) + + describe('when reverts with message', () => { + it('return revert message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([brokenMock.address], [brokenMsgPayload]), + "reverted with reason string 'Failure message'", + ) + }) + }) + + describe('when reverts without message', () => { + it('return silent failure message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([brokenMock.address], [brokenPayload]), + 'Forwarded call reverted without reason', + ) + }) + }) + }) + + describe('when 1/many calls revert', () => { + let brokenMock: Contract + let brokenPayload: string + let brokenMsgPayload: string + + beforeEach(async () => { + brokenMock = await brokenFactory + .connect(roles.defaultAccount) + .deploy() + brokenMsgPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertWithMessage'), + ['Failure message'], + ) + + brokenPayload = brokenFactory.interface.encodeFunctionData( + brokenFactory.interface.getFunction('revertSilently'), + [], + ) + }) + + describe('when reverts with message', () => { + it('return revert message', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward( + [brokenMock.address, mock.address], + [brokenMsgPayload, payload], + ), + "reverted with reason string 'Failure message'", + ) + + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward( + [mock.address, brokenMock.address], + [payload, brokenMsgPayload], + ), + "reverted with reason string 'Failure message'", + ) + }) + }) + + describe('when reverts without message', () => { + it('return silent failure message', async () => { + await evmRevert( + // first + forwarder + .connect(roles.defaultAccount) + .multiForward( + [brokenMock.address, mock.address], + [brokenPayload, payload], + ), + 'Forwarded call reverted without reason', + ) + + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward( + [mock.address, brokenMock.address], + [payload, brokenPayload], + ), + 'Forwarded call reverted without reason', + ) + }) + }) + }) + + describe('when sending to a non-contract address', () => { + it('reverts', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([zeroAddress], [payload]), + 'Must forward to a contract', + ) + }) + }) + + describe('when attempting to forward to the link token', () => { + it('reverts', async () => { + const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .multiForward([link.address], [sighash]), + ) + }) + }) + + describe('when forwarding to any other address', () => { + it('forwards the data', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .multiForward([mock.address], [payload]) + await tx.wait() + assert.equal(await mock.getBytes(), bytes) + }) + + it('perceives the message is sent by the AuthorizedForwarder', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .multiForward([mock.address], [payload]) + await expect(tx) + .to.emit(mock, 'SetBytes') + .withArgs(forwarder.address, bytes) + }) + }) + }) + }) + + describe('#transferOwnershipWithMessage', () => { + const message = '0x42' + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await evmRevert( + forwarder + .connect(roles.stranger) + .transferOwnershipWithMessage( + await roles.stranger.getAddress(), + message, + ), + 'Only callable by owner', + ) + }) + }) + + describe('when called by the owner', () => { + it('calls the normal ownership transfer proposal', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .transferOwnershipWithMessage( + await roles.stranger.getAddress(), + message, + ) + const receipt = await tx.wait() + + assert.equal(receipt?.events?.[0]?.event, 'OwnershipTransferRequested') + assert.equal(receipt?.events?.[0]?.address, forwarder.address) + assert.equal( + receipt?.events?.[0]?.args?.[0], + await roles.defaultAccount.getAddress(), + ) + assert.equal( + receipt?.events?.[0]?.args?.[1], + await roles.stranger.getAddress(), + ) + }) + + it('calls the normal ownership transfer proposal', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .transferOwnershipWithMessage( + await roles.stranger.getAddress(), + message, + ) + const receipt = await tx.wait() + + assert.equal( + receipt?.events?.[1]?.event, + 'OwnershipTransferRequestedWithMessage', + ) + assert.equal(receipt?.events?.[1]?.address, forwarder.address) + assert.equal( + receipt?.events?.[1]?.args?.[0], + await roles.defaultAccount.getAddress(), + ) + assert.equal( + receipt?.events?.[1]?.args?.[1], + await roles.stranger.getAddress(), + ) + assert.equal(receipt?.events?.[1]?.args?.[2], message) + }) + }) + }) + + describe('#ownerForward', () => { + let bytes: string + let payload: string + let mock: Contract + + beforeEach(async () => { + mock = await getterSetterFactory.connect(roles.defaultAccount).deploy() + bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100)) + payload = getterSetterFactory.interface.encodeFunctionData( + getterSetterFactory.interface.getFunction('setBytes'), + [bytes], + ) + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await evmRevert( + forwarder.connect(roles.stranger).ownerForward(mock.address, payload), + ) + }) + }) + + describe('when called by owner', () => { + describe('when attempting to forward to the link token', () => { + it('does not revert', async () => { + const sighash = linkTokenFactory.interface.getSighash('name') // any Link Token function + + await forwarder + .connect(roles.defaultAccount) + .ownerForward(link.address, sighash) + }) + }) + + describe('when forwarding to any other address', () => { + it('forwards the data', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .ownerForward(mock.address, payload) + await tx.wait() + assert.equal(await mock.getBytes(), bytes) + }) + + it('reverts when sending to a non-contract address', async () => { + await evmRevert( + forwarder + .connect(roles.defaultAccount) + .ownerForward(zeroAddress, payload), + 'Must forward to a contract', + ) + }) + + it('perceives the message is sent by the Operator', async () => { + const tx = await forwarder + .connect(roles.defaultAccount) + .ownerForward(mock.address, payload) + await expect(tx) + .to.emit(mock, 'SetBytes') + .withArgs(forwarder.address, bytes) + }) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/operatorforwarder/ConfirmedOwner.test.ts b/contracts/test/v0.8/operatorforwarder/ConfirmedOwner.test.ts new file mode 100644 index 00000000..3bd34732 --- /dev/null +++ b/contracts/test/v0.8/operatorforwarder/ConfirmedOwner.test.ts @@ -0,0 +1,136 @@ +import { ethers } from 'hardhat' +import { publicAbi } from '../../test-helpers/helpers' +import { assert, expect } from 'chai' +import { Contract, ContractFactory, Signer } from 'ethers' +import { Personas, getUsers } from '../../test-helpers/setup' +import { evmRevert } from '../../test-helpers/matchers' + +let confirmedOwnerTestHelperFactory: ContractFactory +let confirmedOwnerFactory: ContractFactory + +let personas: Personas +let owner: Signer +let nonOwner: Signer +let newOwner: Signer + +before(async () => { + const users = await getUsers() + personas = users.personas + owner = personas.Carol + nonOwner = personas.Neil + newOwner = personas.Ned + + confirmedOwnerTestHelperFactory = await ethers.getContractFactory( + 'src/v0.7/tests/ConfirmedOwnerTestHelper.sol:ConfirmedOwnerTestHelper', + owner, + ) + confirmedOwnerFactory = await ethers.getContractFactory( + 'src/v0.8/shared/access/ConfirmedOwner.sol:ConfirmedOwner', + owner, + ) +}) + +describe('ConfirmedOwner', () => { + let confirmedOwner: Contract + + beforeEach(async () => { + confirmedOwner = await confirmedOwnerTestHelperFactory + .connect(owner) + .deploy() + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(confirmedOwner, [ + 'acceptOwnership', + 'owner', + 'transferOwnership', + // test helper public methods + 'modifierOnlyOwner', + ]) + }) + + describe('#constructor', () => { + it('assigns ownership to the deployer', async () => { + const [actual, expected] = await Promise.all([ + owner.getAddress(), + confirmedOwner.owner(), + ]) + + assert.equal(actual, expected) + }) + + it('reverts if assigned to the zero address', async () => { + await evmRevert( + confirmedOwnerFactory + .connect(owner) + .deploy(ethers.constants.AddressZero), + 'Cannot set owner to zero', + ) + }) + }) + + describe('#onlyOwner modifier', () => { + describe('when called by an owner', () => { + it('successfully calls the method', async () => { + const tx = await confirmedOwner.connect(owner).modifierOnlyOwner() + await expect(tx).to.emit(confirmedOwner, 'Here') + }) + }) + + describe('when called by anyone but the owner', () => { + it('reverts', async () => + await evmRevert(confirmedOwner.connect(nonOwner).modifierOnlyOwner())) + }) + }) + + describe('#transferOwnership', () => { + describe('when called by an owner', () => { + it('emits a log', async () => { + const tx = await confirmedOwner + .connect(owner) + .transferOwnership(await newOwner.getAddress()) + await expect(tx) + .to.emit(confirmedOwner, 'OwnershipTransferRequested') + .withArgs(await owner.getAddress(), await newOwner.getAddress()) + }) + + it('does not allow ownership transfer to self', async () => { + await evmRevert( + confirmedOwner + .connect(owner) + .transferOwnership(await owner.getAddress()), + 'Cannot transfer to self', + ) + }) + }) + }) + + describe('when called by anyone but the owner', () => { + it('reverts', async () => + await evmRevert( + confirmedOwner + .connect(nonOwner) + .transferOwnership(await newOwner.getAddress()), + )) + }) + + describe('#acceptOwnership', () => { + describe('after #transferOwnership has been called', () => { + beforeEach(async () => { + await confirmedOwner + .connect(owner) + .transferOwnership(await newOwner.getAddress()) + }) + + it('allows the recipient to call it', async () => { + const tx = await confirmedOwner.connect(newOwner).acceptOwnership() + await expect(tx) + .to.emit(confirmedOwner, 'OwnershipTransferred') + .withArgs(await owner.getAddress(), await newOwner.getAddress()) + }) + + it('does not allow a non-recipient to call it', async () => + await evmRevert(confirmedOwner.connect(nonOwner).acceptOwnership())) + }) + }) +}) diff --git a/contracts/test/v0.8/operatorforwarder/Operator.test.ts b/contracts/test/v0.8/operatorforwarder/Operator.test.ts new file mode 100644 index 00000000..7ef58ac4 --- /dev/null +++ b/contracts/test/v0.8/operatorforwarder/Operator.test.ts @@ -0,0 +1,3819 @@ +import { ethers } from 'hardhat' +import { + publicAbi, + toBytes32String, + toWei, + stringToBytes, + increaseTime5Minutes, + getLog, +} from '../../test-helpers/helpers' +import { assert, expect } from 'chai' +import { + BigNumber, + constants, + Contract, + ContractFactory, + ContractReceipt, + ContractTransaction, + Signer, +} from 'ethers' +import { getUsers, Roles } from '../../test-helpers/setup' +import { bigNumEquals, evmRevert } from '../../test-helpers/matchers' +import type { providers } from 'ethers' +import { + convertCancelParams, + convertCancelByRequesterParams, + convertFufillParams, + convertFulfill2Params, + decodeRunRequest, + encodeOracleRequest, + encodeRequestOracleData, + RunRequest, +} from '../../test-helpers/oracle' + +let v7ConsumerFactory: ContractFactory +let basicConsumerFactory: ContractFactory +let multiWordConsumerFactory: ContractFactory +let gasGuzzlingConsumerFactory: ContractFactory +let getterSetterFactory: ContractFactory +let maliciousRequesterFactory: ContractFactory +let maliciousConsumerFactory: ContractFactory +let maliciousMultiWordConsumerFactory: ContractFactory +let operatorFactory: ContractFactory +let forwarderFactory: ContractFactory +let linkTokenFactory: ContractFactory +const zeroAddress = ethers.constants.AddressZero + +let roles: Roles + +before(async () => { + const users = await getUsers() + + roles = users.roles + v7ConsumerFactory = await ethers.getContractFactory( + 'src/v0.7/tests/Consumer.sol:Consumer', + ) + basicConsumerFactory = await ethers.getContractFactory( + 'src/v0.6/tests/BasicConsumer.sol:BasicConsumer', + ) + multiWordConsumerFactory = await ethers.getContractFactory( + 'src/v0.7/tests/MultiWordConsumer.sol:MultiWordConsumer', + ) + gasGuzzlingConsumerFactory = await ethers.getContractFactory( + 'src/v0.6/tests/GasGuzzlingConsumer.sol:GasGuzzlingConsumer', + ) + getterSetterFactory = await ethers.getContractFactory( + 'src/v0.4/tests/GetterSetter.sol:GetterSetter', + ) + maliciousRequesterFactory = await ethers.getContractFactory( + 'src/v0.4/tests/MaliciousRequester.sol:MaliciousRequester', + ) + maliciousConsumerFactory = await ethers.getContractFactory( + 'src/v0.4/tests/MaliciousConsumer.sol:MaliciousConsumer', + ) + maliciousMultiWordConsumerFactory = await ethers.getContractFactory( + 'src/v0.6/tests/MaliciousMultiWordConsumer.sol:MaliciousMultiWordConsumer', + ) + operatorFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/Operator.sol:Operator', + ) + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol:AuthorizedForwarder', + ) + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + ) +}) + +describe('Operator', () => { + let fHash: string + let specId: string + let to: string + let link: Contract + let operator: Contract + let forwarder1: Contract + let forwarder2: Contract + let owner: Signer + + beforeEach(async () => { + fHash = getterSetterFactory.interface.getSighash('requestedBytes32') + specId = + '0x4c7b7ffb66b344fbaa64995af81e355a00000000000000000000000000000000' + to = '0x80e29acb842498fe6591f020bd82766dce619d43' + link = await linkTokenFactory.connect(roles.defaultAccount).deploy() + owner = roles.defaultAccount + operator = await operatorFactory + .connect(owner) + .deploy(link.address, await owner.getAddress()) + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.oracleNode.getAddress()]) + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(operator, [ + 'acceptAuthorizedReceivers', + 'acceptOwnableContracts', + 'cancelOracleRequest', + 'cancelOracleRequestByRequester', + 'distributeFunds', + 'fulfillOracleRequest', + 'fulfillOracleRequest2', + 'getAuthorizedSenders', + 'getPluginToken', + 'EXPIRYTIME', + 'isAuthorizedSender', + 'onTokenTransfer', + 'operatorRequest', + 'oracleRequest', + 'ownerForward', + 'ownerTransferAndCall', + 'setAuthorizedSenders', + 'setAuthorizedSendersOn', + 'transferOwnableContracts', + 'typeAndVersion', + 'withdraw', + 'withdrawable', + // Ownable methods: + 'acceptOwnership', + 'owner', + 'transferOwnership', + ]) + }) + + describe('#typeAndVersion', () => { + it('describes the operator', async () => { + assert.equal(await operator.typeAndVersion(), 'Operator 1.0.0') + }) + }) + + describe('#transferOwnableContracts', () => { + beforeEach(async () => { + forwarder1 = await forwarderFactory + .connect(owner) + .deploy(link.address, operator.address, zeroAddress, '0x') + forwarder2 = await forwarderFactory + .connect(owner) + .deploy(link.address, operator.address, zeroAddress, '0x') + }) + + describe('being called by the owner', () => { + it('cannot transfer to self', async () => { + await evmRevert( + operator + .connect(owner) + .transferOwnableContracts([forwarder1.address], operator.address), + 'Cannot transfer to self', + ) + }) + + it('emits an ownership transfer request event', async () => { + const tx = await operator + .connect(owner) + .transferOwnableContracts( + [forwarder1.address, forwarder2.address], + await roles.oracleNode1.getAddress(), + ) + const receipt = await tx.wait() + assert.equal(receipt?.events?.length, 2) + const log1 = receipt?.events?.[0] + assert.equal(log1?.event, 'OwnershipTransferRequested') + assert.equal(log1?.address, forwarder1.address) + assert.equal(log1?.args?.[0], operator.address) + assert.equal(log1?.args?.[1], await roles.oracleNode1.getAddress()) + const log2 = receipt?.events?.[1] + assert.equal(log2?.event, 'OwnershipTransferRequested') + assert.equal(log2?.address, forwarder2.address) + assert.equal(log2?.args?.[0], operator.address) + assert.equal(log2?.args?.[1], await roles.oracleNode1.getAddress()) + }) + }) + + describe('being called by a non-owner', () => { + it('reverts with message', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .transferOwnableContracts( + [forwarder1.address], + await roles.oracleNode2.getAddress(), + ), + 'Only callable by owner', + ) + }) + }) + }) + + describe('#acceptOwnableContracts', () => { + describe('being called by the owner', () => { + let operator2: Contract + let receipt: ContractReceipt + + beforeEach(async () => { + operator2 = await operatorFactory + .connect(roles.defaultAccount) + .deploy(link.address, await roles.defaultAccount.getAddress()) + forwarder1 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, zeroAddress, '0x') + forwarder2 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, zeroAddress, '0x') + await operator + .connect(roles.defaultAccount) + .transferOwnableContracts( + [forwarder1.address, forwarder2.address], + operator2.address, + ) + const tx = await operator2 + .connect(roles.defaultAccount) + .acceptOwnableContracts([forwarder1.address, forwarder2.address]) + receipt = await tx.wait() + }) + + it('sets the new owner on the forwarder', async () => { + assert.equal(await forwarder1.owner(), operator2.address) + }) + + it('emits ownership transferred events', async () => { + assert.equal(receipt?.events?.[0]?.event, 'OwnableContractAccepted') + assert.equal(receipt?.events?.[0]?.args?.[0], forwarder1.address) + + assert.equal(receipt?.events?.[1]?.event, 'OwnershipTransferred') + assert.equal(receipt?.events?.[1]?.address, forwarder1.address) + assert.equal(receipt?.events?.[1]?.args?.[0], operator.address) + assert.equal(receipt?.events?.[1]?.args?.[1], operator2.address) + + assert.equal(receipt?.events?.[2]?.event, 'OwnableContractAccepted') + assert.equal(receipt?.events?.[2]?.args?.[0], forwarder2.address) + + assert.equal(receipt?.events?.[3]?.event, 'OwnershipTransferred') + assert.equal(receipt?.events?.[3]?.address, forwarder2.address) + assert.equal(receipt?.events?.[3]?.args?.[0], operator.address) + assert.equal(receipt?.events?.[3]?.args?.[1], operator2.address) + }) + }) + + describe('being called by a non-owner authorized sender', () => { + it('does not revert', async () => { + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.oracleNode1.getAddress()]) + + await operator.connect(roles.oracleNode1).acceptOwnableContracts([]) + }) + }) + + describe('being called by a non owner', () => { + it('reverts with message', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .acceptOwnableContracts([await roles.oracleNode2.getAddress()]), + 'Cannot set authorized senders', + ) + }) + }) + }) + + describe('#distributeFunds', () => { + describe('when called with empty arrays', () => { + it('reverts with invalid array message', async () => { + await evmRevert( + operator.connect(roles.defaultAccount).distributeFunds([], []), + 'Invalid array length(s)', + ) + }) + }) + + describe('when called with unequal array lengths', () => { + it('reverts with invalid array message', async () => { + const receivers = [ + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + const amounts = [1, 2, 3] + await evmRevert( + operator + .connect(roles.defaultAccount) + .distributeFunds(receivers, amounts), + 'Invalid array length(s)', + ) + }) + }) + + describe('when called with not enough ETH', () => { + it('reverts with subtraction overflow message', async () => { + const amountToSend = toWei('2') + const ethSent = toWei('1') + await evmRevert( + operator + .connect(roles.defaultAccount) + .distributeFunds( + [await roles.oracleNode2.getAddress()], + [amountToSend], + { + value: ethSent, + }, + ), + 'Arithmetic operation underflowed or overflowed outside of an unchecked block', + ) + }) + }) + + describe('when called with too much ETH', () => { + it('reverts with too much ETH message', async () => { + const amountToSend = toWei('2') + const ethSent = toWei('3') + await evmRevert( + operator + .connect(roles.defaultAccount) + .distributeFunds( + [await roles.oracleNode2.getAddress()], + [amountToSend], + { + value: ethSent, + }, + ), + 'Too much ETH sent', + ) + }) + }) + + describe('when called with correct values', () => { + it('updates the balances', async () => { + const node2BalanceBefore = await roles.oracleNode2.getBalance() + const node3BalanceBefore = await roles.oracleNode3.getBalance() + const receivers = [ + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + const sendNode2 = toWei('2') + const sendNode3 = toWei('3') + const totalAmount = toWei('5') + const amounts = [sendNode2, sendNode3] + + await operator + .connect(roles.defaultAccount) + .distributeFunds(receivers, amounts, { value: totalAmount }) + + const node2BalanceAfter = await roles.oracleNode2.getBalance() + const node3BalanceAfter = await roles.oracleNode3.getBalance() + + assert.equal( + node2BalanceAfter.sub(node2BalanceBefore).toString(), + sendNode2.toString(), + ) + + assert.equal( + node3BalanceAfter.sub(node3BalanceBefore).toString(), + sendNode3.toString(), + ) + }) + }) + }) + + describe('#setAuthorizedSenders', () => { + let newSenders: string[] + let receipt: ContractReceipt + describe('when called by the owner', () => { + describe('setting 3 authorized senders', () => { + beforeEach(async () => { + newSenders = [ + await roles.oracleNode1.getAddress(), + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + const tx = await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders) + receipt = await tx.wait() + }) + + it('adds the authorized nodes', async () => { + const authorizedSenders = await operator.getAuthorizedSenders() + assert.equal(newSenders.length, authorizedSenders.length) + for (let i = 0; i < authorizedSenders.length; i++) { + assert.equal(authorizedSenders[i], newSenders[i]) + } + }) + + it('emits an event on the Operator', async () => { + assert.equal(receipt.events?.length, 1) + + const encodedSenders1 = ethers.utils.defaultAbiCoder.encode( + ['address[]', 'address'], + [newSenders, await roles.defaultAccount.getAddress()], + ) + + const responseEvent1 = receipt.events?.[0] + assert.equal(responseEvent1?.event, 'AuthorizedSendersChanged') + assert.equal(responseEvent1?.data, encodedSenders1) + }) + + it('replaces the authorized nodes', async () => { + const originalAuthorization = await operator + .connect(roles.defaultAccount) + .isAuthorizedSender(await roles.oracleNode.getAddress()) + assert.isFalse(originalAuthorization) + }) + + after(async () => { + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.oracleNode.getAddress()]) + }) + }) + + describe('setting 0 authorized senders', () => { + beforeEach(async () => { + newSenders = [] + }) + + it('reverts with a minimum senders message', async () => { + await evmRevert( + operator + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders), + 'Must have at least 1 sender', + ) + }) + }) + }) + + describe('when called by an authorized sender', () => { + beforeEach(async () => { + newSenders = [await roles.oracleNode1.getAddress()] + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders(newSenders) + }) + + it('succeeds', async () => { + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.stranger.getAddress()]) + }) + }) + + describe('when called by a non-owner', () => { + it('cannot add an authorized node', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .setAuthorizedSenders([await roles.stranger.getAddress()]), + 'Cannot set authorized senders', + ) + }) + }) + }) + + describe('#setAuthorizedSendersOn', () => { + let newSenders: string[] + + beforeEach(async () => { + await operator + .connect(roles.defaultAccount) + .setAuthorizedSenders([await roles.oracleNode1.getAddress()]) + newSenders = [ + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + + forwarder1 = await forwarderFactory + .connect(owner) + .deploy(link.address, operator.address, zeroAddress, '0x') + forwarder2 = await forwarderFactory + .connect(owner) + .deploy(link.address, operator.address, zeroAddress, '0x') + }) + + describe('when called by a non-authorized sender', () => { + it('reverts', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .setAuthorizedSendersOn(newSenders, [forwarder1.address]), + 'Cannot set authorized senders', + ) + }) + }) + + describe('when called by an owner', () => { + it('does not revert', async () => { + await operator + .connect(roles.defaultAccount) + .setAuthorizedSendersOn( + [forwarder1.address, forwarder2.address], + newSenders, + ) + }) + }) + + describe('when called by an authorized sender', () => { + it('does not revert', async () => { + await operator + .connect(roles.oracleNode1) + .setAuthorizedSendersOn( + [forwarder1.address, forwarder2.address], + newSenders, + ) + }) + + it('does revert with 0 senders', async () => { + await operator + .connect(roles.oracleNode1) + .setAuthorizedSendersOn( + [forwarder1.address, forwarder2.address], + newSenders, + ) + }) + + it('emits a log announcing the change and who made it', async () => { + const targets = [forwarder1.address, forwarder2.address] + const tx = await operator + .connect(roles.oracleNode1) + .setAuthorizedSendersOn(targets, newSenders) + + const receipt = await tx.wait() + const encodedArgs = ethers.utils.defaultAbiCoder.encode( + ['address[]', 'address[]', 'address'], + [targets, newSenders, await roles.oracleNode1.getAddress()], + ) + + const event1 = receipt.events?.[0] + assert.equal(event1?.event, 'TargetsUpdatedAuthorizedSenders') + assert.equal(event1?.address, operator.address) + assert.equal(event1?.data, encodedArgs) + }) + + it('updates the sender list on each of the targets', async () => { + const tx = await operator + .connect(roles.oracleNode1) + .setAuthorizedSendersOn( + [forwarder1.address, forwarder2.address], + newSenders, + ) + + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 3, receipt.toString()) + const encodedSenders = ethers.utils.defaultAbiCoder.encode( + ['address[]', 'address'], + [newSenders, operator.address], + ) + + const event1 = receipt.events?.[1] + assert.equal(event1?.event, 'AuthorizedSendersChanged') + assert.equal(event1?.address, forwarder1.address) + assert.equal(event1?.data, encodedSenders) + + const event2 = receipt.events?.[2] + assert.equal(event2?.event, 'AuthorizedSendersChanged') + assert.equal(event2?.address, forwarder2.address) + assert.equal(event2?.data, encodedSenders) + }) + }) + }) + + describe('#acceptAuthorizedReceivers', () => { + let newSenders: string[] + + describe('being called by the owner', () => { + let operator2: Contract + let receipt: ContractReceipt + + beforeEach(async () => { + operator2 = await operatorFactory + .connect(roles.defaultAccount) + .deploy(link.address, await roles.defaultAccount.getAddress()) + forwarder1 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, zeroAddress, '0x') + forwarder2 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, zeroAddress, '0x') + await operator + .connect(roles.defaultAccount) + .transferOwnableContracts( + [forwarder1.address, forwarder2.address], + operator2.address, + ) + newSenders = [ + await roles.oracleNode2.getAddress(), + await roles.oracleNode3.getAddress(), + ] + + const tx = await operator2 + .connect(roles.defaultAccount) + .acceptAuthorizedReceivers( + [forwarder1.address, forwarder2.address], + newSenders, + ) + receipt = await tx.wait() + }) + + it('sets the new owner on the forwarder', async () => { + assert.equal(await forwarder1.owner(), operator2.address) + }) + + it('emits ownership transferred events', async () => { + assert.equal(receipt?.events?.[0]?.event, 'OwnableContractAccepted') + assert.equal(receipt?.events?.[0]?.args?.[0], forwarder1.address) + + assert.equal(receipt?.events?.[1]?.event, 'OwnershipTransferred') + assert.equal(receipt?.events?.[1]?.address, forwarder1.address) + assert.equal(receipt?.events?.[1]?.args?.[0], operator.address) + assert.equal(receipt?.events?.[1]?.args?.[1], operator2.address) + + assert.equal(receipt?.events?.[2]?.event, 'OwnableContractAccepted') + assert.equal(receipt?.events?.[2]?.args?.[0], forwarder2.address) + + assert.equal(receipt?.events?.[3]?.event, 'OwnershipTransferred') + assert.equal(receipt?.events?.[3]?.address, forwarder2.address) + assert.equal(receipt?.events?.[3]?.args?.[0], operator.address) + assert.equal(receipt?.events?.[3]?.args?.[1], operator2.address) + + assert.equal( + receipt?.events?.[4]?.event, + 'TargetsUpdatedAuthorizedSenders', + ) + + const encodedSenders = ethers.utils.defaultAbiCoder.encode( + ['address[]', 'address'], + [newSenders, operator2.address], + ) + assert.equal(receipt?.events?.[5]?.event, 'AuthorizedSendersChanged') + assert.equal(receipt?.events?.[5]?.address, forwarder1.address) + assert.equal(receipt?.events?.[5]?.data, encodedSenders) + + assert.equal(receipt?.events?.[6]?.event, 'AuthorizedSendersChanged') + assert.equal(receipt?.events?.[6]?.address, forwarder2.address) + assert.equal(receipt?.events?.[6]?.data, encodedSenders) + }) + }) + + describe('being called by a non owner', () => { + it('reverts with message', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .acceptAuthorizedReceivers( + [forwarder1.address, forwarder2.address], + newSenders, + ), + 'Cannot set authorized senders', + ) + }) + }) + }) + + describe('#onTokenTransfer', () => { + describe('when called from any address but the PLI token', () => { + it('triggers the intended method', async () => { + const callData = encodeOracleRequest( + specId, + to, + fHash, + 0, + constants.HashZero, + ) + + await evmRevert( + operator.onTokenTransfer( + await roles.defaultAccount.getAddress(), + 0, + callData, + ), + ) + }) + }) + + describe('when called from the PLI token', () => { + it('triggers the intended method', async () => { + const callData = encodeOracleRequest( + specId, + to, + fHash, + 0, + constants.HashZero, + ) + + const tx = await link.transferAndCall(operator.address, 0, callData, { + value: 0, + }) + const receipt = await tx.wait() + + assert.equal(3, receipt.logs?.length) + }) + + describe('with no data', () => { + it('reverts', async () => { + await evmRevert( + link.transferAndCall(operator.address, 0, '0x', { + value: 0, + }), + ) + }) + }) + }) + + describe('malicious requester', () => { + let mock: Contract + let requester: Contract + const paymentAmount = toWei('1') + + beforeEach(async () => { + mock = await maliciousRequesterFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(mock.address, paymentAmount) + }) + + it('cannot withdraw from oracle', async () => { + const operatorOriginalBalance = await link.balanceOf(operator.address) + const mockOriginalBalance = await link.balanceOf(mock.address) + + await evmRevert(mock.maliciousWithdraw()) + + const operatorNewBalance = await link.balanceOf(operator.address) + const mockNewBalance = await link.balanceOf(mock.address) + + bigNumEquals(operatorOriginalBalance, operatorNewBalance) + bigNumEquals(mockNewBalance, mockOriginalBalance) + }) + + describe('if the requester tries to create a requestId for another contract', () => { + it('the requesters ID will not match with the oracle contract', async () => { + const tx = await mock.maliciousTargetConsumer(to) + const receipt = await tx.wait() + + const mockRequestId = receipt.logs?.[0].data + const requestId = (receipt.events?.[0].args as any).requestId + assert.notEqual(mockRequestId, requestId) + }) + + it('the target requester can still create valid requests', async () => { + requester = await basicConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + await link.transfer(requester.address, paymentAmount) + await mock.maliciousTargetConsumer(requester.address) + await requester.requestEthereumPrice('USD', paymentAmount) + }) + }) + }) + + it('does not allow recursive calls of onTokenTransfer', async () => { + const requestPayload = encodeOracleRequest( + specId, + to, + fHash, + 0, + constants.HashZero, + ) + + const ottSelector = + operatorFactory.interface.getSighash('onTokenTransfer') + const header = + '000000000000000000000000c5fdf4076b8f3a5357c5e395ab970b5b54098fef' + // to + '0000000000000000000000000000000000000000000000000000000000000539' + // amount + '0000000000000000000000000000000000000000000000000000000000000060' + // offset + '0000000000000000000000000000000000000000000000000000000000000136' // length + + const maliciousPayload = ottSelector + header + requestPayload.slice(2) + + await evmRevert( + link.transferAndCall(operator.address, 0, maliciousPayload, { + value: 0, + }), + ) + }) + }) + + describe('#oracleRequest', () => { + describe('when called through the PLI token', () => { + const paid = 100 + let log: providers.Log | undefined + let receipt: providers.TransactionReceipt + + beforeEach(async () => { + const args = encodeOracleRequest( + specId, + to, + fHash, + 1, + constants.HashZero, + ) + const tx = await link.transferAndCall(operator.address, paid, args) + receipt = await tx.wait() + assert.equal(3, receipt?.logs?.length) + + log = receipt.logs && receipt.logs[2] + }) + + it('logs an event', async () => { + assert.equal(operator.address, log?.address) + + assert.equal(log?.topics?.[1], specId) + + const req = decodeRunRequest(receipt?.logs?.[2]) + assert.equal(await roles.defaultAccount.getAddress(), req.requester) + bigNumEquals(paid, req.payment) + }) + + it('uses the expected event signature', async () => { + // If updating this test, be sure to update models.RunLogTopic. + const eventSignature = + '0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65' + assert.equal(eventSignature, log?.topics?.[0]) + }) + + it('does not allow the same requestId to be used twice', async () => { + const args2 = encodeOracleRequest( + specId, + to, + fHash, + 1, + constants.HashZero, + ) + await evmRevert(link.transferAndCall(operator.address, paid, args2)) + }) + + describe('when called with a payload less than 2 EVM words + function selector', () => { + it('throws an error', async () => { + const funcSelector = + operatorFactory.interface.getSighash('oracleRequest') + const maliciousData = + funcSelector + + '0000000000000000000000000000000000000000000000000000000000000000000' + await evmRevert( + link.transferAndCall(operator.address, paid, maliciousData), + ) + }) + }) + + describe('when called with a payload between 3 and 9 EVM words', () => { + it('throws an error', async () => { + const funcSelector = + operatorFactory.interface.getSighash('oracleRequest') + const maliciousData = + funcSelector + + '000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001' + await evmRevert( + link.transferAndCall(operator.address, paid, maliciousData), + ) + }) + }) + }) + + describe('when dataVersion is higher than 255', () => { + it('throws an error', async () => { + const paid = 100 + const args = encodeOracleRequest( + specId, + to, + fHash, + 1, + constants.HashZero, + 256, + ) + await evmRevert(link.transferAndCall(operator.address, paid, args)) + }) + }) + + describe('when not called through the PLI token', () => { + it('reverts', async () => { + await evmRevert( + operator + .connect(roles.oracleNode) + .oracleRequest( + '0x0000000000000000000000000000000000000000', + 0, + specId, + to, + fHash, + 1, + 1, + '0x', + ), + ) + }) + }) + }) + + describe('#operatorRequest', () => { + describe('when called through the PLI token', () => { + const paid = 100 + let log: providers.Log | undefined + let receipt: providers.TransactionReceipt + + beforeEach(async () => { + const args = encodeRequestOracleData( + specId, + fHash, + 1, + constants.HashZero, + ) + const tx = await link.transferAndCall(operator.address, paid, args) + receipt = await tx.wait() + assert.equal(3, receipt?.logs?.length) + + log = receipt.logs && receipt.logs[2] + }) + + it('logs an event', async () => { + assert.equal(operator.address, log?.address) + + assert.equal(log?.topics?.[1], specId) + + const req = decodeRunRequest(receipt?.logs?.[2]) + assert.equal(await roles.defaultAccount.getAddress(), req.requester) + bigNumEquals(paid, req.payment) + }) + + it('uses the expected event signature', async () => { + // If updating this test, be sure to update models.RunLogTopic. + const eventSignature = + '0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65' + assert.equal(eventSignature, log?.topics?.[0]) + }) + + it('does not allow the same requestId to be used twice', async () => { + const args2 = encodeRequestOracleData( + specId, + fHash, + 1, + constants.HashZero, + ) + await evmRevert(link.transferAndCall(operator.address, paid, args2)) + }) + + describe('when called with a payload less than 2 EVM words + function selector', () => { + it('throws an error', async () => { + const funcSelector = + operatorFactory.interface.getSighash('oracleRequest') + const maliciousData = + funcSelector + + '0000000000000000000000000000000000000000000000000000000000000000000' + await evmRevert( + link.transferAndCall(operator.address, paid, maliciousData), + ) + }) + }) + + describe('when called with a payload between 3 and 9 EVM words', () => { + it('throws an error', async () => { + const funcSelector = + operatorFactory.interface.getSighash('oracleRequest') + const maliciousData = + funcSelector + + '000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001' + await evmRevert( + link.transferAndCall(operator.address, paid, maliciousData), + ) + }) + }) + }) + + describe('when dataVersion is higher than 255', () => { + it('throws an error', async () => { + const paid = 100 + const args = encodeRequestOracleData( + specId, + fHash, + 1, + constants.HashZero, + 256, + ) + await evmRevert(link.transferAndCall(operator.address, paid, args)) + }) + }) + + describe('when not called through the PLI token', () => { + it('reverts', async () => { + await evmRevert( + operator + .connect(roles.oracleNode) + .oracleRequest( + '0x0000000000000000000000000000000000000000', + 0, + specId, + to, + fHash, + 1, + 1, + '0x', + ), + ) + }) + }) + }) + + describe('#fulfillOracleRequest', () => { + const response = 'Hi Mom!' + let maliciousRequester: Contract + let basicConsumer: Contract + let maliciousConsumer: Contract + let gasGuzzlingConsumer: Contract + let request: ReturnType + + describe('gas guzzling consumer [ @skip-coverage ]', () => { + beforeEach(async () => { + gasGuzzlingConsumer = await gasGuzzlingConsumerFactory + .connect(roles.consumer) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(gasGuzzlingConsumer.address, paymentAmount) + const tx = + await gasGuzzlingConsumer.gassyRequestEthereumPrice(paymentAmount) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('emits an OracleResponse event', async () => { + const fulfillParams = convertFufillParams(request, response) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 1) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + }) + + describe('cooperative consumer', () => { + beforeEach(async () => { + basicConsumer = await basicConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(basicConsumer.address, paymentAmount) + const currency = 'USD' + const tx = await basicConsumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + describe('when called by an unauthorized node', () => { + beforeEach(async () => { + assert.equal( + false, + await operator.isAuthorizedSender( + await roles.stranger.getAddress(), + ), + ) + }) + + it('raises an error', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .fulfillOracleRequest(...convertFufillParams(request, response)), + ) + }) + }) + + describe('when fulfilled with the wrong function', () => { + let v7Consumer + beforeEach(async () => { + v7Consumer = await v7ConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(v7Consumer.address, paymentAmount) + const currency = 'USD' + const tx = await v7Consumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('raises an error', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .fulfillOracleRequest(...convertFufillParams(request, response)), + ) + }) + }) + + describe('when called by an authorized node', () => { + it('raises an error if the request ID does not exist', async () => { + request.requestId = ethers.utils.formatBytes32String('DOESNOTEXIST') + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)), + ) + }) + + it('sets the value on the requested contract', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + const currentValue = await basicConsumer.currentPrice() + assert.equal(response, ethers.utils.parseBytes32String(currentValue)) + }) + + it('emits an OracleResponse event', async () => { + const fulfillParams = convertFufillParams(request, response) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 3) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + + it('does not allow a request to be fulfilled twice', async () => { + const response2 = response + ' && Hello World!!' + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response2)), + ) + + const currentValue = await basicConsumer.currentPrice() + assert.equal(response, ethers.utils.parseBytes32String(currentValue)) + }) + }) + + describe('when the oracle does not provide enough gas', () => { + // if updating this defaultGasLimit, be sure it matches with the + // defaultGasLimit specified in store/tx_manager.go + const defaultGasLimit = 500000 + + beforeEach(async () => { + bigNumEquals(0, await operator.withdrawable()) + }) + + it('does not allow the oracle to withdraw the payment', async () => { + await evmRevert( + operator.connect(roles.oracleNode).fulfillOracleRequest( + ...convertFufillParams(request, response, { + gasLimit: 70000, + }), + ), + ) + + bigNumEquals(0, await operator.withdrawable()) + }) + + it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => { + await operator.connect(roles.oracleNode).fulfillOracleRequest( + ...convertFufillParams(request, response, { + gasLimit: defaultGasLimit, + }), + ) + + bigNumEquals(request.payment, await operator.withdrawable()) + }) + }) + }) + + describe('with a malicious requester', () => { + beforeEach(async () => { + const paymentAmount = toWei('1') + maliciousRequester = await maliciousRequesterFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousRequester.address, paymentAmount) + }) + + it('cannot cancel before the expiration', async () => { + await evmRevert( + maliciousRequester.maliciousRequestCancel( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ), + ) + }) + + it('cannot call functions on the PLI token through callbacks', async () => { + await evmRevert( + maliciousRequester.request( + specId, + link.address, + ethers.utils.toUtf8Bytes('transfer(address,uint256)'), + ), + ) + }) + + describe('requester lies about amount of PLI sent', () => { + it('the oracle uses the amount of PLI actually paid', async () => { + const tx = await maliciousRequester.maliciousPrice(specId) + const receipt = await tx.wait() + const req = decodeRunRequest(receipt.logs?.[3]) + + assert(toWei('1').eq(req.payment)) + }) + }) + }) + + describe('with a malicious consumer', () => { + const paymentAmount = toWei('1') + + beforeEach(async () => { + maliciousConsumer = await maliciousConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousConsumer.address, paymentAmount) + }) + + describe('fails during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response2)), + ) + }) + }) + + describe('calls selfdestruct', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + await maliciousConsumer.remove() + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + }) + + describe('request is canceled during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('cancelRequestOnFulfill(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + bigNumEquals(0, await link.balanceOf(maliciousConsumer.address)) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + const mockBalance = await link.balanceOf(maliciousConsumer.address) + bigNumEquals(mockBalance, 0) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response2)), + ) + }) + }) + + describe('tries to steal funds from node', () => { + it('is not successful with call', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with send', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with transfer', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, response)) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + }) + + describe('when calling an owned contract', () => { + beforeEach(async () => { + forwarder1 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, link.address, operator.address, '0x') + }) + + it('does not allow the contract to callback to owned contracts', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('whatever(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + let request = decodeRunRequest(receipt.logs?.[3]) + let responseParams = convertFufillParams(request, response) + // set the params to be the owned address + responseParams[2] = forwarder1.address + + //accept ownership + await operator + .connect(roles.defaultAccount) + .acceptOwnableContracts([forwarder1.address]) + + // do the thing + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...responseParams), + 'Cannot call owned contract', + ) + + await operator + .connect(roles.defaultAccount) + .transferOwnableContracts([forwarder1.address], link.address) + //reverts for a different reason after transferring ownership + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...responseParams), + 'Params do not match request ID', + ) + }) + }) + }) + }) + + describe('#fulfillOracleRequest2', () => { + describe('single word fulfils', () => { + const response = 'Hi mom!' + const responseTypes = ['bytes32'] + const responseValues = [toBytes32String(response)] + let maliciousRequester: Contract + let basicConsumer: Contract + let maliciousConsumer: Contract + let gasGuzzlingConsumer: Contract + let request: ReturnType + let request2: ReturnType + + describe('gas guzzling consumer [ @skip-coverage ]', () => { + beforeEach(async () => { + gasGuzzlingConsumer = await gasGuzzlingConsumerFactory + .connect(roles.consumer) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(gasGuzzlingConsumer.address, paymentAmount) + const tx = + await gasGuzzlingConsumer.gassyRequestEthereumPrice(paymentAmount) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 1) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + }) + + describe('cooperative consumer', () => { + beforeEach(async () => { + basicConsumer = await basicConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(basicConsumer.address, paymentAmount) + const currency = 'USD' + const tx = await basicConsumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + describe('when called by an unauthorized node', () => { + beforeEach(async () => { + assert.equal( + false, + await operator.isAuthorizedSender( + await roles.stranger.getAddress(), + ), + ) + }) + + it('raises an error', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + }) + + describe('when called by an authorized node', () => { + it('raises an error if the request ID does not exist', async () => { + request.requestId = ethers.utils.formatBytes32String('DOESNOTEXIST') + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + + it('sets the value on the requested contract', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const currentValue = await basicConsumer.currentPrice() + assert.equal( + response, + ethers.utils.parseBytes32String(currentValue), + ) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 3) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + + it('does not allow a request to be fulfilled twice', async () => { + const response2 = response + ' && Hello World!!' + const response2Values = [toBytes32String(response2)] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + + const currentValue = await basicConsumer.currentPrice() + assert.equal( + response, + ethers.utils.parseBytes32String(currentValue), + ) + }) + }) + + describe('when the oracle does not provide enough gas', () => { + // if updating this defaultGasLimit, be sure it matches with the + // defaultGasLimit specified in store/tx_manager.go + const defaultGasLimit = 500000 + + beforeEach(async () => { + bigNumEquals(0, await operator.withdrawable()) + }) + + it('does not allow the oracle to withdraw the payment', async () => { + await evmRevert( + operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + { + gasLimit: 70000, + }, + ), + ), + ) + + bigNumEquals(0, await operator.withdrawable()) + }) + + it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => { + await operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params(request, responseTypes, responseValues, { + gasLimit: defaultGasLimit, + }), + ) + + bigNumEquals(request.payment, await operator.withdrawable()) + }) + }) + }) + + describe('with a malicious oracle', () => { + beforeEach(async () => { + // Setup Request 1 + basicConsumer = await basicConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(basicConsumer.address, paymentAmount) + const currency = 'USD' + const tx = await basicConsumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + // Setup Request 2 + await link.transfer(basicConsumer.address, paymentAmount) + const tx2 = await basicConsumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt2 = await tx2.wait() + request2 = decodeRunRequest(receipt2.logs?.[3]) + }) + + it('cannot spoof requestId in response data by moving calldata offset', async () => { + // Malicious Oracle Fulfill 2 + const functionSelector = '0x6ae0bc76' // fulfillOracleRequest2 + const dataOffset = + '0000000000000000000000000000000000000000000000000000000000000100' // Moved to 0x0124 + const fillerBytes = + '0000000000000000000000000000000000000000000000000000000000000000' + const expectedCalldataStart = request.requestId.slice(2) // 0xe4, this is checked against requestId in validateMultiWordResponseId + const dataSize = + '0000000000000000000000000000000000000000000000000000000000000040' // Two 32 byte blocks + const maliciousCalldataId = request2.requestId.slice(2) // 0x0124, set to a different requestId + const calldataData = + '1122334455667788991122334455667788991122334455667788991122334455' // some garbage value as response value + + const data = + functionSelector + + /** Input Params - slice off 0x prefix and pad with 0's */ + request.requestId.slice(2) + + request.payment.slice(2).padStart(64, '0') + + request.callbackAddr.slice(2).padStart(64, '0') + + request.callbackFunc.slice(2).padEnd(64, '0') + + request.expiration.slice(2).padStart(64, '0') + + // calldata "data" + dataOffset + + fillerBytes + + expectedCalldataStart + + dataSize + + maliciousCalldataId + + calldataData + + await evmRevert( + operator.connect(roles.oracleNode).signer.sendTransaction({ + to: operator.address, + data, + }), + ) + }) + }) + + describe('with a malicious requester', () => { + beforeEach(async () => { + const paymentAmount = toWei('1') + maliciousRequester = await maliciousRequesterFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousRequester.address, paymentAmount) + }) + + it('cannot cancel before the expiration', async () => { + await evmRevert( + maliciousRequester.maliciousRequestCancel( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ), + ) + }) + + it('cannot call functions on the PLI token through callbacks', async () => { + await evmRevert( + maliciousRequester.request( + specId, + link.address, + ethers.utils.toUtf8Bytes('transfer(address,uint256)'), + ), + ) + }) + + describe('requester lies about amount of PLI sent', () => { + it('the oracle uses the amount of PLI actually paid', async () => { + const tx = await maliciousRequester.maliciousPrice(specId) + const receipt = await tx.wait() + const req = decodeRunRequest(receipt.logs?.[3]) + + assert(toWei('1').eq(req.payment)) + }) + }) + }) + + describe('with a malicious consumer', () => { + const paymentAmount = toWei('1') + + beforeEach(async () => { + maliciousConsumer = await maliciousConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousConsumer.address, paymentAmount) + }) + + describe('fails during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + const response2Values = [toBytes32String(response2)] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + }) + }) + + describe('calls selfdestruct', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + await maliciousConsumer.remove() + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + }) + + describe('request is canceled during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes( + 'cancelRequestOnFulfill(bytes32,bytes32)', + ), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + bigNumEquals(0, await link.balanceOf(maliciousConsumer.address)) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const mockBalance = await link.balanceOf(maliciousConsumer.address) + bigNumEquals(mockBalance, 0) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + const response2Values = [toBytes32String(response2)] + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + }) + }) + + describe('tries to steal funds from node', () => { + it('is not successful with call', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with send', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with transfer', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + }) + + describe('when calling an owned contract', () => { + beforeEach(async () => { + forwarder1 = await forwarderFactory + .connect(roles.defaultAccount) + .deploy(link.address, link.address, operator.address, '0x') + }) + + it('does not allow the contract to callback to owned contracts', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('whatever(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + let request = decodeRunRequest(receipt.logs?.[3]) + let responseParams = convertFufillParams(request, response) + // set the params to be the owned address + responseParams[2] = forwarder1.address + + //accept ownership + await operator + .connect(roles.defaultAccount) + .acceptOwnableContracts([forwarder1.address]) + + // do the thing + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...responseParams), + 'Cannot call owned contract', + ) + + await operator + .connect(roles.defaultAccount) + .transferOwnableContracts([forwarder1.address], link.address) + //reverts for a different reason after transferring ownership + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...responseParams), + 'Params do not match request ID', + ) + }) + }) + }) + }) + + describe('multi word fulfils', () => { + describe('one bytes parameter', () => { + const response = + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit.\ + Fusce euismod malesuada ligula, eget semper metus ultrices sit amet.' + const responseTypes = ['bytes'] + const responseValues = [stringToBytes(response)] + let maliciousRequester: Contract + let multiConsumer: Contract + let maliciousConsumer: Contract + let gasGuzzlingConsumer: Contract + let request: ReturnType + + describe('gas guzzling consumer [ @skip-coverage ]', () => { + beforeEach(async () => { + gasGuzzlingConsumer = await gasGuzzlingConsumerFactory + .connect(roles.consumer) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(gasGuzzlingConsumer.address, paymentAmount) + const tx = + await gasGuzzlingConsumer.gassyMultiWordRequest(paymentAmount) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 1) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + }) + + describe('cooperative consumer', () => { + beforeEach(async () => { + multiConsumer = await multiWordConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(multiConsumer.address, paymentAmount) + const currency = 'USD' + const tx = await multiConsumer.requestEthereumPrice( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it("matches the consumer's request ID", async () => { + const nonce = await multiConsumer.publicGetNextRequestCount() + const tx = await multiConsumer.requestEthereumPrice('USD', 0) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + const packed = ethers.utils.solidityPack( + ['address', 'uint256'], + [multiConsumer.address, nonce], + ) + const expected = ethers.utils.keccak256(packed) + assert.equal(expected, request.requestId) + }) + + describe('when called by an unauthorized node', () => { + beforeEach(async () => { + assert.equal( + false, + await operator.isAuthorizedSender( + await roles.stranger.getAddress(), + ), + ) + }) + + it('raises an error', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + }) + + describe('when called by an authorized node', () => { + it('raises an error if the request ID does not exist', async () => { + request.requestId = + ethers.utils.formatBytes32String('DOESNOTEXIST') + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + + it('sets the value on the requested contract', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const currentValue = await multiConsumer.currentPrice() + assert.equal(response, ethers.utils.toUtf8String(currentValue)) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 3) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + + it('does not allow a request to be fulfilled twice', async () => { + const response2 = response + ' && Hello World!!' + const response2Values = [stringToBytes(response2)] + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + + const currentValue = await multiConsumer.currentPrice() + assert.equal(response, ethers.utils.toUtf8String(currentValue)) + }) + }) + + describe('when the oracle does not provide enough gas', () => { + // if updating this defaultGasLimit, be sure it matches with the + // defaultGasLimit specified in store/tx_manager.go + const defaultGasLimit = 500000 + + beforeEach(async () => { + bigNumEquals(0, await operator.withdrawable()) + }) + + it('does not allow the oracle to withdraw the payment', async () => { + await evmRevert( + operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + { + gasLimit: 70000, + }, + ), + ), + ) + + bigNumEquals(0, await operator.withdrawable()) + }) + + it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => { + await operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + { + gasLimit: defaultGasLimit, + }, + ), + ) + + bigNumEquals(request.payment, await operator.withdrawable()) + }) + }) + }) + + describe('with a malicious requester', () => { + beforeEach(async () => { + const paymentAmount = toWei('1') + maliciousRequester = await maliciousRequesterFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousRequester.address, paymentAmount) + }) + + it('cannot cancel before the expiration', async () => { + await evmRevert( + maliciousRequester.maliciousRequestCancel( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ), + ) + }) + + it('cannot call functions on the PLI token through callbacks', async () => { + await evmRevert( + maliciousRequester.request( + specId, + link.address, + ethers.utils.toUtf8Bytes('transfer(address,uint256)'), + ), + ) + }) + + describe('requester lies about amount of PLI sent', () => { + it('the oracle uses the amount of PLI actually paid', async () => { + const tx = await maliciousRequester.maliciousPrice(specId) + const receipt = await tx.wait() + const req = decodeRunRequest(receipt.logs?.[3]) + + assert(toWei('1').eq(req.payment)) + }) + }) + }) + + describe('with a malicious consumer', () => { + const paymentAmount = toWei('1') + + beforeEach(async () => { + maliciousConsumer = await maliciousMultiWordConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousConsumer.address, paymentAmount) + }) + + describe('fails during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + const response2Values = [stringToBytes(response2)] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + }) + }) + + describe('calls selfdestruct', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + await maliciousConsumer.remove() + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + }) + + describe('request is canceled during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes( + 'cancelRequestOnFulfill(bytes32,bytes32)', + ), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + bigNumEquals(0, await link.balanceOf(maliciousConsumer.address)) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const mockBalance = await link.balanceOf( + maliciousConsumer.address, + ) + bigNumEquals(mockBalance, 0) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response2 = 'hack the planet 102' + const response2Values = [stringToBytes(response2)] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + response2Values, + ), + ), + ) + }) + }) + + describe('tries to steal funds from node', () => { + it('is not successful with call', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with send', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with transfer', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + }) + }) + }) + + describe('multiple bytes32 parameters', () => { + const response1 = '100' + const response2 = '7777777' + const response3 = 'forty two' + const responseTypes = ['bytes32', 'bytes32', 'bytes32'] + const responseValues = [ + toBytes32String(response1), + toBytes32String(response2), + toBytes32String(response3), + ] + let maliciousRequester: Contract + let multiConsumer: Contract + let maliciousConsumer: Contract + let gasGuzzlingConsumer: Contract + let request: ReturnType + + describe('gas guzzling consumer [ @skip-coverage ]', () => { + beforeEach(async () => { + gasGuzzlingConsumer = await gasGuzzlingConsumerFactory + .connect(roles.consumer) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(gasGuzzlingConsumer.address, paymentAmount) + const tx = + await gasGuzzlingConsumer.gassyMultiWordRequest(paymentAmount) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 1) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + }) + + describe('cooperative consumer', () => { + beforeEach(async () => { + multiConsumer = await multiWordConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(multiConsumer.address, paymentAmount) + const currency = 'USD' + const tx = await multiConsumer.requestMultipleParameters( + currency, + paymentAmount, + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + describe('when called by an unauthorized node', () => { + beforeEach(async () => { + assert.equal( + false, + await operator.isAuthorizedSender( + await roles.stranger.getAddress(), + ), + ) + }) + + it('raises an error', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + }) + + describe('when called by an authorized node', () => { + it('raises an error if the request ID does not exist', async () => { + request.requestId = + ethers.utils.formatBytes32String('DOESNOTEXIST') + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ), + ) + }) + + it('sets the value on the requested contract', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const firstValue = await multiConsumer.usd() + const secondValue = await multiConsumer.eur() + const thirdValue = await multiConsumer.jpy() + assert.equal( + response1, + ethers.utils.parseBytes32String(firstValue), + ) + assert.equal( + response2, + ethers.utils.parseBytes32String(secondValue), + ) + assert.equal( + response3, + ethers.utils.parseBytes32String(thirdValue), + ) + }) + + it('emits an OracleResponse2 event', async () => { + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + const tx = await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams) + const receipt = await tx.wait() + assert.equal(receipt.events?.length, 3) + const responseEvent = receipt.events?.[0] + assert.equal(responseEvent?.event, 'OracleResponse') + assert.equal(responseEvent?.args?.[0], request.requestId) + }) + + it('does not allow a request to be fulfilled twice', async () => { + const response4 = response3 + ' && Hello World!!' + const repeatedResponseValues = [ + toBytes32String(response1), + toBytes32String(response2), + toBytes32String(response4), + ] + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + repeatedResponseValues, + ), + ), + ) + + const firstValue = await multiConsumer.usd() + const secondValue = await multiConsumer.eur() + const thirdValue = await multiConsumer.jpy() + assert.equal( + response1, + ethers.utils.parseBytes32String(firstValue), + ) + assert.equal( + response2, + ethers.utils.parseBytes32String(secondValue), + ) + assert.equal( + response3, + ethers.utils.parseBytes32String(thirdValue), + ) + }) + }) + + describe('when the oracle does not provide enough gas', () => { + // if updating this defaultGasLimit, be sure it matches with the + // defaultGasLimit specified in store/tx_manager.go + const defaultGasLimit = 500000 + + beforeEach(async () => { + bigNumEquals(0, await operator.withdrawable()) + }) + + it('does not allow the oracle to withdraw the payment', async () => { + await evmRevert( + operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + { + gasLimit: 70000, + }, + ), + ), + ) + + bigNumEquals(0, await operator.withdrawable()) + }) + + it(`${defaultGasLimit} is enough to pass the gas requirement`, async () => { + await operator.connect(roles.oracleNode).fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + { + gasLimit: defaultGasLimit, + }, + ), + ) + + bigNumEquals(request.payment, await operator.withdrawable()) + }) + }) + }) + + describe('with a malicious requester', () => { + beforeEach(async () => { + const paymentAmount = toWei('1') + maliciousRequester = await maliciousRequesterFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousRequester.address, paymentAmount) + }) + + it('cannot cancel before the expiration', async () => { + await evmRevert( + maliciousRequester.maliciousRequestCancel( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ), + ) + }) + + it('cannot call functions on the PLI token through callbacks', async () => { + await evmRevert( + maliciousRequester.request( + specId, + link.address, + ethers.utils.toUtf8Bytes('transfer(address,uint256)'), + ), + ) + }) + + describe('requester lies about amount of PLI sent', () => { + it('the oracle uses the amount of PLI actually paid', async () => { + const tx = await maliciousRequester.maliciousPrice(specId) + const receipt = await tx.wait() + const req = decodeRunRequest(receipt.logs?.[3]) + + assert(toWei('1').eq(req.payment)) + }) + }) + }) + + describe('with a malicious consumer', () => { + const paymentAmount = toWei('1') + + beforeEach(async () => { + maliciousConsumer = await maliciousMultiWordConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address) + await link.transfer(maliciousConsumer.address, paymentAmount) + }) + + describe('fails during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('assertFail(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response4 = 'hack the planet 102' + const repeatedResponseValues = [ + toBytes32String(response1), + toBytes32String(response2), + toBytes32String(response4), + ] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + repeatedResponseValues, + ), + ), + ) + }) + }) + + describe('calls selfdestruct', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('doesNothing(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + await maliciousConsumer.remove() + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + }) + + describe('request is canceled during fulfillment', () => { + beforeEach(async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes( + 'cancelRequestOnFulfill(bytes32,bytes32)', + ), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + bigNumEquals(0, await link.balanceOf(maliciousConsumer.address)) + }) + + it('allows the oracle node to receive their payment', async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + const mockBalance = await link.balanceOf( + maliciousConsumer.address, + ) + bigNumEquals(mockBalance, 0) + + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(balance, 0) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), paymentAmount) + const newBalance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + bigNumEquals(paymentAmount, newBalance) + }) + + it("can't fulfill the data again", async () => { + const response4 = 'hack the planet 102' + const repeatedResponseValues = [ + toBytes32String(response1), + toBytes32String(response2), + toBytes32String(response4), + ] + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + repeatedResponseValues, + ), + ), + ) + }) + }) + + describe('tries to steal funds from node', () => { + it('is not successful with call', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthCall(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with send', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthSend(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + + it('is not successful with transfer', async () => { + const tx = await maliciousConsumer.requestData( + specId, + ethers.utils.toUtf8Bytes('stealEthTransfer(bytes32,bytes32)'), + ) + const receipt = await tx.wait() + request = decodeRunRequest(receipt.logs?.[3]) + + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest2( + ...convertFulfill2Params( + request, + responseTypes, + responseValues, + ), + ) + bigNumEquals( + 0, + await ethers.provider.getBalance(maliciousConsumer.address), + ) + }) + }) + }) + }) + }) + + describe('when the response data is too short', () => { + const response = 'Hi mom!' + const responseTypes = ['bytes32'] + const responseValues = [toBytes32String(response)] + + it('reverts', async () => { + let basicConsumer = await basicConsumerFactory + .connect(roles.defaultAccount) + .deploy(link.address, operator.address, specId) + const paymentAmount = toWei('1') + await link.transfer(basicConsumer.address, paymentAmount) + const tx = await basicConsumer.requestEthereumPrice( + 'USD', + paymentAmount, + ) + const receipt = await tx.wait() + let request = decodeRunRequest(receipt.logs?.[3]) + + const fulfillParams = convertFulfill2Params( + request, + responseTypes, + responseValues, + ) + fulfillParams[5] = '0x' // overwrite the data to be of lenght 0 + await evmRevert( + operator + .connect(roles.oracleNode) + .fulfillOracleRequest2(...fulfillParams), + 'Response must be > 32 bytes', + ) + }) + }) + }) + + describe('#withdraw', () => { + describe('without reserving funds via oracleRequest', () => { + it('does nothing', async () => { + let balance = await link.balanceOf(await roles.oracleNode.getAddress()) + assert.equal(0, balance.toNumber()) + await evmRevert( + operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), toWei('1')), + ) + balance = await link.balanceOf(await roles.oracleNode.getAddress()) + assert.equal(0, balance.toNumber()) + }) + + describe('recovering funds that were mistakenly sent', () => { + const paid = 1 + beforeEach(async () => { + await link.transfer(operator.address, paid) + }) + + it('withdraws funds', async () => { + const operatorBalanceBefore = await link.balanceOf(operator.address) + const accountBalanceBefore = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.defaultAccount.getAddress(), paid) + + const operatorBalanceAfter = await link.balanceOf(operator.address) + const accountBalanceAfter = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + const accountDifference = + accountBalanceAfter.sub(accountBalanceBefore) + const operatorDifference = + operatorBalanceBefore.sub(operatorBalanceAfter) + + bigNumEquals(operatorDifference, paid) + bigNumEquals(accountDifference, paid) + }) + }) + }) + + describe('reserving funds via oracleRequest', () => { + const payment = 15 + let request: ReturnType + + beforeEach(async () => { + const requester = await roles.defaultAccount.getAddress() + const args = encodeOracleRequest( + specId, + requester, + fHash, + 0, + constants.HashZero, + ) + const tx = await link.transferAndCall(operator.address, payment, args) + const receipt = await tx.wait() + assert.equal(3, receipt.logs?.length) + request = decodeRunRequest(receipt.logs?.[2]) + }) + + describe('but not freeing funds w fulfillOracleRequest', () => { + it('does not transfer funds', async () => { + await evmRevert( + operator + .connect(roles.defaultAccount) + .withdraw(await roles.oracleNode.getAddress(), payment), + ) + const balance = await link.balanceOf( + await roles.oracleNode.getAddress(), + ) + assert.equal(0, balance.toNumber()) + }) + + describe('recovering funds that were mistakenly sent', () => { + const paid = 1 + beforeEach(async () => { + await link.transfer(operator.address, paid) + }) + + it('withdraws funds', async () => { + const operatorBalanceBefore = await link.balanceOf(operator.address) + const accountBalanceBefore = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.defaultAccount.getAddress(), paid) + + const operatorBalanceAfter = await link.balanceOf(operator.address) + const accountBalanceAfter = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + const accountDifference = + accountBalanceAfter.sub(accountBalanceBefore) + const operatorDifference = + operatorBalanceBefore.sub(operatorBalanceAfter) + + bigNumEquals(operatorDifference, paid) + bigNumEquals(accountDifference, paid) + }) + }) + }) + + describe('and freeing funds', () => { + beforeEach(async () => { + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest( + ...convertFufillParams(request, 'Hello World!'), + ) + }) + + it('does not allow input greater than the balance', async () => { + const originalOracleBalance = await link.balanceOf(operator.address) + const originalStrangerBalance = await link.balanceOf( + await roles.stranger.getAddress(), + ) + const withdrawalAmount = payment + 1 + + assert.isAbove(withdrawalAmount, originalOracleBalance.toNumber()) + await evmRevert( + operator + .connect(roles.defaultAccount) + .withdraw(await roles.stranger.getAddress(), withdrawalAmount), + ) + + const newOracleBalance = await link.balanceOf(operator.address) + const newStrangerBalance = await link.balanceOf( + await roles.stranger.getAddress(), + ) + + assert.equal( + originalOracleBalance.toNumber(), + newOracleBalance.toNumber(), + ) + assert.equal( + originalStrangerBalance.toNumber(), + newStrangerBalance.toNumber(), + ) + }) + + it('allows transfer of partial balance by owner to specified address', async () => { + const partialAmount = 6 + const difference = payment - partialAmount + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.stranger.getAddress(), partialAmount) + const strangerBalance = await link.balanceOf( + await roles.stranger.getAddress(), + ) + const oracleBalance = await link.balanceOf(operator.address) + assert.equal(partialAmount, strangerBalance.toNumber()) + assert.equal(difference, oracleBalance.toNumber()) + }) + + it('allows transfer of entire balance by owner to specified address', async () => { + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.stranger.getAddress(), payment) + const balance = await link.balanceOf( + await roles.stranger.getAddress(), + ) + assert.equal(payment, balance.toNumber()) + }) + + it('does not allow a transfer of funds by non-owner', async () => { + await evmRevert( + operator + .connect(roles.stranger) + .withdraw(await roles.stranger.getAddress(), payment), + ) + const balance = await link.balanceOf( + await roles.stranger.getAddress(), + ) + assert.isTrue(ethers.constants.Zero.eq(balance)) + }) + + describe('recovering funds that were mistakenly sent', () => { + const paid = 1 + beforeEach(async () => { + await link.transfer(operator.address, paid) + }) + + it('withdraws funds', async () => { + const operatorBalanceBefore = await link.balanceOf(operator.address) + const accountBalanceBefore = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + await operator + .connect(roles.defaultAccount) + .withdraw(await roles.defaultAccount.getAddress(), paid) + + const operatorBalanceAfter = await link.balanceOf(operator.address) + const accountBalanceAfter = await link.balanceOf( + await roles.defaultAccount.getAddress(), + ) + + const accountDifference = + accountBalanceAfter.sub(accountBalanceBefore) + const operatorDifference = + operatorBalanceBefore.sub(operatorBalanceAfter) + + bigNumEquals(operatorDifference, paid) + bigNumEquals(accountDifference, paid) + }) + }) + }) + }) + }) + + describe('#withdrawable', () => { + let request: ReturnType + const amount = toWei('1') + + beforeEach(async () => { + const requester = await roles.defaultAccount.getAddress() + const args = encodeOracleRequest( + specId, + requester, + fHash, + 0, + constants.HashZero, + ) + const tx = await link.transferAndCall(operator.address, amount, args) + const receipt = await tx.wait() + assert.equal(3, receipt.logs?.length) + request = decodeRunRequest(receipt.logs?.[2]) + await operator + .connect(roles.oracleNode) + .fulfillOracleRequest(...convertFufillParams(request, 'Hello World!')) + }) + + it('returns the correct value', async () => { + const withdrawAmount = await operator.withdrawable() + bigNumEquals(withdrawAmount, request.payment) + }) + + describe('funds that were mistakenly sent', () => { + const paid = 1 + beforeEach(async () => { + await link.transfer(operator.address, paid) + }) + + it('returns the correct value', async () => { + const withdrawAmount = await operator.withdrawable() + + const expectedAmount = amount.add(paid) + bigNumEquals(withdrawAmount, expectedAmount) + }) + }) + }) + + describe('#ownerTransferAndCall', () => { + let operator2: Contract + let args: string + let to: string + const startingBalance = 1000 + const payment = 20 + + beforeEach(async () => { + operator2 = await operatorFactory + .connect(roles.oracleNode2) + .deploy(link.address, await roles.oracleNode2.getAddress()) + to = operator2.address + args = encodeOracleRequest( + specId, + operator.address, + operatorFactory.interface.getSighash('fulfillOracleRequest'), + 1, + constants.HashZero, + ) + }) + + describe('when called by a non-owner', () => { + it('reverts with owner error message', async () => { + await link.transfer(operator.address, startingBalance) + await evmRevert( + operator + .connect(roles.stranger) + .ownerTransferAndCall(to, payment, args), + 'Only callable by owner', + ) + }) + }) + + describe('when called by the owner', () => { + beforeEach(async () => { + await link.transfer(operator.address, startingBalance) + }) + + describe('without sufficient funds in contract', () => { + it('reverts with funds message', async () => { + const tooMuch = startingBalance * 2 + await evmRevert( + operator + .connect(roles.defaultAccount) + .ownerTransferAndCall(to, tooMuch, args), + 'Amount requested is greater than withdrawable balance', + ) + }) + }) + + describe('with sufficient funds', () => { + let tx: ContractTransaction + let receipt: ContractReceipt + let requesterBalanceBefore: BigNumber + let requesterBalanceAfter: BigNumber + let receiverBalanceBefore: BigNumber + let receiverBalanceAfter: BigNumber + + before(async () => { + requesterBalanceBefore = await link.balanceOf(operator.address) + receiverBalanceBefore = await link.balanceOf(operator2.address) + tx = await operator + .connect(roles.defaultAccount) + .ownerTransferAndCall(to, payment, args) + receipt = await tx.wait() + requesterBalanceAfter = await link.balanceOf(operator.address) + receiverBalanceAfter = await link.balanceOf(operator2.address) + }) + + it('emits an event', async () => { + assert.equal(3, receipt.logs?.length) + const transferLog = await getLog(tx, 1) + const parsedLog = link.interface.parseLog({ + data: transferLog.data, + topics: transferLog.topics, + }) + await expect(parsedLog.name).to.equal('Transfer') + }) + + it('transfers the tokens', async () => { + bigNumEquals( + requesterBalanceBefore.sub(requesterBalanceAfter), + payment, + ) + bigNumEquals(receiverBalanceAfter.sub(receiverBalanceBefore), payment) + }) + }) + }) + }) + + describe('#cancelOracleRequestByRequester', () => { + const nonce = 17 + + describe('with no pending requests', () => { + it('fails', async () => { + const fakeRequest: RunRequest = { + requestId: ethers.utils.formatBytes32String('1337'), + payment: '0', + callbackFunc: + getterSetterFactory.interface.getSighash('requestedBytes32'), + expiration: '999999999999', + + callbackAddr: '', + data: Buffer.from(''), + dataVersion: 0, + specId: '', + requester: '', + topic: '', + } + await increaseTime5Minutes(ethers.provider) + + await evmRevert( + operator + .connect(roles.stranger) + .cancelOracleRequestByRequester( + ...convertCancelByRequesterParams(fakeRequest, nonce), + ), + ) + }) + }) + + describe('with a pending request', () => { + const startingBalance = 100 + let request: ReturnType + let receipt: providers.TransactionReceipt + + beforeEach(async () => { + const requestAmount = 20 + + await link.transfer(await roles.consumer.getAddress(), startingBalance) + + const args = encodeOracleRequest( + specId, + await roles.consumer.getAddress(), + fHash, + nonce, + constants.HashZero, + ) + const tx = await link + .connect(roles.consumer) + .transferAndCall(operator.address, requestAmount, args) + receipt = await tx.wait() + + assert.equal(3, receipt.logs?.length) + request = decodeRunRequest(receipt.logs?.[2]) + + // pre conditions + const oracleBalance = await link.balanceOf(operator.address) + bigNumEquals(request.payment, oracleBalance) + + const consumerAmount = await link.balanceOf( + await roles.consumer.getAddress(), + ) + assert.equal( + startingBalance - Number(request.payment), + consumerAmount.toNumber(), + ) + }) + + describe('from a stranger', () => { + it('fails', async () => { + await evmRevert( + operator + .connect(roles.consumer) + .cancelOracleRequestByRequester( + ...convertCancelByRequesterParams(request, nonce), + ), + ) + }) + }) + + describe('from the requester', () => { + it('refunds the correct amount', async () => { + await increaseTime5Minutes(ethers.provider) + await operator + .connect(roles.consumer) + .cancelOracleRequestByRequester( + ...convertCancelByRequesterParams(request, nonce), + ) + const balance = await link.balanceOf( + await roles.consumer.getAddress(), + ) + + assert.equal(startingBalance, balance.toNumber()) // 100 + }) + + it('triggers a cancellation event', async () => { + await increaseTime5Minutes(ethers.provider) + const tx = await operator + .connect(roles.consumer) + .cancelOracleRequestByRequester( + ...convertCancelByRequesterParams(request, nonce), + ) + const receipt = await tx.wait() + + assert.equal(receipt.logs?.length, 2) + assert.equal(request.requestId, receipt.logs?.[0].topics[1]) + }) + + it('fails when called twice', async () => { + await increaseTime5Minutes(ethers.provider) + await operator + .connect(roles.consumer) + .cancelOracleRequestByRequester( + ...convertCancelByRequesterParams(request, nonce), + ) + + await evmRevert( + operator + .connect(roles.consumer) + .cancelOracleRequestByRequester(...convertCancelParams(request)), + ) + }) + }) + }) + }) + + describe('#cancelOracleRequest', () => { + describe('with no pending requests', () => { + it('fails', async () => { + const fakeRequest: RunRequest = { + requestId: ethers.utils.formatBytes32String('1337'), + payment: '0', + callbackFunc: + getterSetterFactory.interface.getSighash('requestedBytes32'), + expiration: '999999999999', + + callbackAddr: '', + data: Buffer.from(''), + dataVersion: 0, + specId: '', + requester: '', + topic: '', + } + await increaseTime5Minutes(ethers.provider) + + await evmRevert( + operator + .connect(roles.stranger) + .cancelOracleRequest(...convertCancelParams(fakeRequest)), + ) + }) + }) + + describe('with a pending request', () => { + const startingBalance = 100 + let request: ReturnType + let receipt: providers.TransactionReceipt + + beforeEach(async () => { + const requestAmount = 20 + + await link.transfer(await roles.consumer.getAddress(), startingBalance) + + const args = encodeOracleRequest( + specId, + await roles.consumer.getAddress(), + fHash, + 1, + constants.HashZero, + ) + const tx = await link + .connect(roles.consumer) + .transferAndCall(operator.address, requestAmount, args) + receipt = await tx.wait() + + assert.equal(3, receipt.logs?.length) + request = decodeRunRequest(receipt.logs?.[2]) + }) + + it('has correct initial balances', async () => { + const oracleBalance = await link.balanceOf(operator.address) + bigNumEquals(request.payment, oracleBalance) + + const consumerAmount = await link.balanceOf( + await roles.consumer.getAddress(), + ) + assert.equal( + startingBalance - Number(request.payment), + consumerAmount.toNumber(), + ) + }) + + describe('from a stranger', () => { + it('fails', async () => { + await evmRevert( + operator + .connect(roles.consumer) + .cancelOracleRequest(...convertCancelParams(request)), + ) + }) + }) + + describe('from the requester', () => { + it('refunds the correct amount', async () => { + await increaseTime5Minutes(ethers.provider) + await operator + .connect(roles.consumer) + .cancelOracleRequest(...convertCancelParams(request)) + const balance = await link.balanceOf( + await roles.consumer.getAddress(), + ) + + assert.equal(startingBalance, balance.toNumber()) // 100 + }) + + it('triggers a cancellation event', async () => { + await increaseTime5Minutes(ethers.provider) + const tx = await operator + .connect(roles.consumer) + .cancelOracleRequest(...convertCancelParams(request)) + const receipt = await tx.wait() + + assert.equal(receipt.logs?.length, 2) + assert.equal(request.requestId, receipt.logs?.[0].topics[1]) + }) + + it('fails when called twice', async () => { + await increaseTime5Minutes(ethers.provider) + await operator + .connect(roles.consumer) + .cancelOracleRequest(...convertCancelParams(request)) + + await evmRevert( + operator + .connect(roles.consumer) + .cancelOracleRequest(...convertCancelParams(request)), + ) + }) + }) + }) + }) + + describe('#ownerForward', () => { + let bytes: string + let payload: string + let mock: Contract + + beforeEach(async () => { + bytes = ethers.utils.hexlify(ethers.utils.randomBytes(100)) + payload = getterSetterFactory.interface.encodeFunctionData( + getterSetterFactory.interface.getFunction('setBytes'), + [bytes], + ) + mock = await getterSetterFactory.connect(roles.defaultAccount).deploy() + }) + + describe('when called by a non-owner', () => { + it('reverts', async () => { + await evmRevert( + operator.connect(roles.stranger).ownerForward(mock.address, payload), + ) + }) + }) + + describe('when called by owner', () => { + describe('when attempting to forward to the link token', () => { + it('reverts', async () => { + const sighash = linkTokenFactory.interface.getSighash('name') + await evmRevert( + operator + .connect(roles.defaultAccount) + .ownerForward(link.address, sighash), + 'Cannot call to PLI', + ) + }) + }) + + describe('when forwarding to any other address', () => { + it('forwards the data', async () => { + const tx = await operator + .connect(roles.defaultAccount) + .ownerForward(mock.address, payload) + await tx.wait() + assert.equal(await mock.getBytes(), bytes) + }) + + it('reverts when sending to a non-contract address', async () => { + await evmRevert( + operator + .connect(roles.defaultAccount) + .ownerForward(zeroAddress, payload), + 'Must forward to a contract', + ) + }) + + it('perceives the message is sent by the Operator', async () => { + const tx = await operator + .connect(roles.defaultAccount) + .ownerForward(mock.address, payload) + const receipt = await tx.wait() + const log: any = receipt.logs?.[0] + const logData = mock.interface.decodeEventLog( + mock.interface.getEvent('SetBytes'), + log.data, + log.topics, + ) + assert.equal(ethers.utils.getAddress(logData.from), operator.address) + }) + }) + }) + }) +}) diff --git a/contracts/test/v0.8/operatorforwarder/OperatorFactory.test.ts b/contracts/test/v0.8/operatorforwarder/OperatorFactory.test.ts new file mode 100644 index 00000000..89b6d70b --- /dev/null +++ b/contracts/test/v0.8/operatorforwarder/OperatorFactory.test.ts @@ -0,0 +1,293 @@ +import { ethers } from 'hardhat' +import { evmWordToAddress, publicAbi } from '../../test-helpers/helpers' +import { assert } from 'chai' +import { Contract, ContractFactory, ContractReceipt } from 'ethers' +import { getUsers, Roles } from '../../test-helpers/setup' + +let linkTokenFactory: ContractFactory +let operatorGeneratorFactory: ContractFactory +let operatorFactory: ContractFactory +let forwarderFactory: ContractFactory + +let roles: Roles + +before(async () => { + const users = await getUsers() + + roles = users.roles + linkTokenFactory = await ethers.getContractFactory( + 'src/v0.4/LinkToken.sol:LinkToken', + roles.defaultAccount, + ) + operatorGeneratorFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/OperatorFactory.sol:OperatorFactory', + roles.defaultAccount, + ) + operatorFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/Operator.sol:Operator', + roles.defaultAccount, + ) + forwarderFactory = await ethers.getContractFactory( + 'src/v0.8/operatorforwarder/dev/AuthorizedForwarder.sol:AuthorizedForwarder', + roles.defaultAccount, + ) +}) + +describe('OperatorFactory', () => { + let link: Contract + let operatorGenerator: Contract + let operator: Contract + let forwarder: Contract + let receipt: ContractReceipt + let emittedOperator: string + let emittedForwarder: string + + beforeEach(async () => { + link = await linkTokenFactory.connect(roles.defaultAccount).deploy() + operatorGenerator = await operatorGeneratorFactory + .connect(roles.defaultAccount) + .deploy(link.address) + }) + + it('has a limited public interface [ @skip-coverage ]', () => { + publicAbi(operatorGenerator, [ + 'created', + 'deployNewOperator', + 'deployNewOperatorAndForwarder', + 'deployNewForwarder', + 'deployNewForwarderAndTransferOwnership', + 'linkToken', + 'typeAndVersion', + ]) + }) + + describe('#typeAndVersion', () => { + it('describes the authorized forwarder', async () => { + assert.equal( + await operatorGenerator.typeAndVersion(), + 'OperatorFactory 1.0.0', + ) + }) + }) + + describe('#deployNewOperator', () => { + beforeEach(async () => { + const tx = await operatorGenerator + .connect(roles.oracleNode) + .deployNewOperator() + + receipt = await tx.wait() + emittedOperator = evmWordToAddress(receipt.logs?.[0].topics?.[1]) + }) + + it('emits an event', async () => { + assert.equal(receipt?.events?.[0]?.event, 'OperatorCreated') + assert.equal(emittedOperator, receipt.events?.[0].args?.[0]) + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[1], + ) + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[2], + ) + }) + + it('sets the correct owner', async () => { + operator = await operatorFactory + .connect(roles.defaultAccount) + .attach(emittedOperator) + const ownerString = await operator.owner() + assert.equal(ownerString, await roles.oracleNode.getAddress()) + }) + + it('records that it deployed that address', async () => { + assert.isTrue(await operatorGenerator.created(emittedOperator)) + }) + }) + + describe('#deployNewOperatorAndForwarder', () => { + beforeEach(async () => { + const tx = await operatorGenerator + .connect(roles.oracleNode) + .deployNewOperatorAndForwarder() + + receipt = await tx.wait() + emittedOperator = evmWordToAddress(receipt.logs?.[0].topics?.[1]) + emittedForwarder = evmWordToAddress(receipt.logs?.[3].topics?.[1]) + }) + + it('emits an event recording that the operator was deployed', async () => { + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[1], + ) + assert.equal(receipt?.events?.[0]?.event, 'OperatorCreated') + assert.equal(receipt?.events?.[0]?.args?.[0], emittedOperator) + assert.equal( + receipt?.events?.[0]?.args?.[1], + await roles.oracleNode.getAddress(), + ) + assert.equal( + receipt?.events?.[0]?.args?.[2], + await roles.oracleNode.getAddress(), + ) + }) + + it('proposes the transfer of the forwarder to the operator', async () => { + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[1], + ) + assert.equal( + receipt?.events?.[1]?.topics?.[0], + '0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278', //OwnershipTransferRequested(address,address) + ) + assert.equal( + evmWordToAddress(receipt?.events?.[1]?.topics?.[1]), + operatorGenerator.address, + ) + assert.equal( + evmWordToAddress(receipt?.events?.[1]?.topics?.[2]), + emittedOperator, + ) + + assert.equal( + receipt?.events?.[2]?.topics?.[0], + '0x4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e', //OwnershipTransferRequestedWithMessage(address,address,bytes) + ) + assert.equal( + evmWordToAddress(receipt?.events?.[2]?.topics?.[1]), + operatorGenerator.address, + ) + assert.equal( + evmWordToAddress(receipt?.events?.[2]?.topics?.[2]), + emittedOperator, + ) + }) + + it('emits an event recording that the forwarder was deployed', async () => { + assert.equal(receipt?.events?.[3]?.event, 'AuthorizedForwarderCreated') + assert.equal(receipt?.events?.[3]?.args?.[0], emittedForwarder) + assert.equal(receipt?.events?.[3]?.args?.[1], operatorGenerator.address) + assert.equal( + receipt?.events?.[3]?.args?.[2], + await roles.oracleNode.getAddress(), + ) + }) + + it('sets the correct owner on the operator', async () => { + operator = await operatorFactory + .connect(roles.defaultAccount) + .attach(receipt?.events?.[0]?.args?.[0]) + assert.equal(await roles.oracleNode.getAddress(), await operator.owner()) + }) + + it('sets the operator as the owner of the forwarder', async () => { + forwarder = await forwarderFactory + .connect(roles.defaultAccount) + .attach(emittedForwarder) + assert.equal(operatorGenerator.address, await forwarder.owner()) + }) + + it('records that it deployed that address', async () => { + assert.isTrue(await operatorGenerator.created(emittedOperator)) + assert.isTrue(await operatorGenerator.created(emittedForwarder)) + }) + }) + + describe('#deployNewForwarder', () => { + beforeEach(async () => { + const tx = await operatorGenerator + .connect(roles.oracleNode) + .deployNewForwarder() + + receipt = await tx.wait() + emittedForwarder = receipt.events?.[0].args?.[0] + }) + + it('emits an event', async () => { + assert.equal(receipt?.events?.[0]?.event, 'AuthorizedForwarderCreated') + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[1], + ) // owner + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[0].args?.[2], + ) // sender + }) + + it('sets the caller as the owner', async () => { + forwarder = await forwarderFactory + .connect(roles.defaultAccount) + .attach(emittedForwarder) + const ownerString = await forwarder.owner() + assert.equal(ownerString, await roles.oracleNode.getAddress()) + }) + + it('records that it deployed that address', async () => { + assert.isTrue(await operatorGenerator.created(emittedForwarder)) + }) + }) + + describe('#deployNewForwarderAndTransferOwnership', () => { + const message = '0x42' + + beforeEach(async () => { + const tx = await operatorGenerator + .connect(roles.oracleNode) + .deployNewForwarderAndTransferOwnership( + await roles.stranger.getAddress(), + message, + ) + receipt = await tx.wait() + + emittedForwarder = evmWordToAddress(receipt.logs?.[2].topics?.[1]) + }) + + it('emits an event', async () => { + assert.equal(receipt?.events?.[2]?.event, 'AuthorizedForwarderCreated') + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[2].args?.[1], + ) // owner + assert.equal( + await roles.oracleNode.getAddress(), + receipt.events?.[2].args?.[2], + ) // sender + }) + + it('sets the caller as the owner', async () => { + forwarder = await forwarderFactory + .connect(roles.defaultAccount) + .attach(emittedForwarder) + const ownerString = await forwarder.owner() + assert.equal(ownerString, await roles.oracleNode.getAddress()) + }) + + it('proposes a transfer to the recipient', async () => { + const emittedOwner = evmWordToAddress(receipt.logs?.[0].topics?.[1]) + assert.equal(emittedOwner, await roles.oracleNode.getAddress()) + const emittedRecipient = evmWordToAddress(receipt.logs?.[0].topics?.[2]) + assert.equal(emittedRecipient, await roles.stranger.getAddress()) + }) + + it('proposes a transfer to the recipient with the specified message', async () => { + const emittedOwner = evmWordToAddress(receipt.logs?.[1].topics?.[1]) + assert.equal(emittedOwner, await roles.oracleNode.getAddress()) + const emittedRecipient = evmWordToAddress(receipt.logs?.[1].topics?.[2]) + assert.equal(emittedRecipient, await roles.stranger.getAddress()) + + const encodedMessage = ethers.utils.defaultAbiCoder.encode( + ['bytes'], + [message], + ) + assert.equal(receipt?.logs?.[1]?.data, encodedMessage) + }) + + it('records that it deployed that address', async () => { + assert.isTrue(await operatorGenerator.created(emittedForwarder)) + }) + }) +}) diff --git a/contracts/tsconfig.json b/contracts/tsconfig.json new file mode 100644 index 00000000..6205c888 --- /dev/null +++ b/contracts/tsconfig.json @@ -0,0 +1,37 @@ +{ + "compilerOptions": { + /* Basic Options */ + "incremental": true /* Enable incremental compilation */, + "target": "ES2019" /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019' or 'ESNEXT'. */, + "module": "commonjs" /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', or 'ESNext'. */, + "composite": true /* Enable project compilation */, + "noEmit": true /* Do not emit outputs. */, + "noErrorTruncation": true /* Do not truncate error messages */, + "skipLibCheck": true /* Skip type checking of declaration files. Requires TypeScript version 2.0 or later. */, + "importHelpers": true /* Import emit helpers from 'tslib'. */, + + "resolveJsonModule": true, + /* Strict Type-Checking Options */ + "strict": true /* Enable all strict type-checking options. */, + + /* Additional Checks */ + "noUnusedLocals": true /* Report errors on unused locals. */, + "noUnusedParameters": true /* Report errors on unused parameters. */, + "noImplicitReturns": true /* Report error when not all code paths in function return a value. */, + "noFallthroughCasesInSwitch": true /* Report errors for fallthrough cases in switch statement. */, + "forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */, + + /* Module Resolution Options */ + "moduleResolution": "node" /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */, + "allowSyntheticDefaultImports": true /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */, + "esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */, + + /* Source Map Options */ + "inlineSourceMap": true /* Emit a single file with source maps instead of having a separate file. */, + "inlineSources": true /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */, + + /* Experimental Options */ + "experimentalDecorators": true /* Enables experimental support for ES7 decorators. */, + "emitDecoratorMetadata": true /* Enables experimental support for emitting type metadata for decorators. */ + } +} diff --git a/core/README.md b/core/README.md new file mode 100644 index 00000000..71666f12 --- /dev/null +++ b/core/README.md @@ -0,0 +1,57 @@ +
+

+ +Plugin logo + +

+
+ +[![Go Report Card](https://goreportcard.com/badge/github.com/goplugin/pluginv3.0)](https://goreportcard.com/report/github.com/goplugin/pluginv3.0) +[![GoDoc](https://godoc.org/github.com/goplugin/pluginv3.0?status.svg)](https://godoc.org/github.com/goplugin/pluginv3.0) + +Plugin Core is the API backend that Plugin client contracts on Ethereum +make requests to. The backend utilizes Solidity contract ABIs to generate types +for interacting with Ethereum contracts. + +## Features + +* Headless API implementation +* CLI tool providing conveniance commands for node configuration, administration, + and CRUD object operations (e.g. Jobs, Runs, and even the VRF) + +## Installation + +See the [root README](../README.md#install) +for instructions on how to build the full Plugin node. + +## Directory Structure + +This directory contains the majority of the code for the backend of Plugin. + +Static assets are pulled in using Go's [`embed`](https://pkg.go.dev/embed) package +and included in the final binary. + +## Common Commands + +**Install:** + +By default `go install` will install this directory under the name `core`. +You can instead, build it, and place it in your path as `plugin`: + +```sh +go build -o $GOPATH/bin/plugin . +``` + +**Test:** + +```sh +# A higher parallel number can speed up tests at the expense of more RAM. +go test -p 1 ./... +``` + +This excludes more extensive integration tests which require a bit more setup, head over to [./integration-tests] +(../integration-tests/README.md) for more details on running those. + +The golang testsuite is almost entirely parallelizable, and so running the default +`go test ./...` will commonly peg your processor. Limit parallelization with the +`-p 2` or whatever best fits your computer: `go test -p 4 ./...`. diff --git a/core/auth/auth.go b/core/auth/auth.go new file mode 100644 index 00000000..a6245556 --- /dev/null +++ b/core/auth/auth.go @@ -0,0 +1,63 @@ +package auth + +import ( + "encoding/hex" + "fmt" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/pkg/errors" + "golang.org/x/crypto/sha3" +) + +var ( + // ErrorAuthFailed is a generic authentication failed - but not because of + // some system failure on our behalf (i.e. HTTP 5xx), more detail is not + // given + ErrorAuthFailed = errors.New("Authentication failed") +) + +// Token is used for API authentication. +type Token struct { + AccessKey string `json:"accessKey"` + Secret string `json:"secret"` +} + +// GetID returns the ID of this structure for jsonapi serialization. +func (ta *Token) GetID() string { + return ta.AccessKey +} + +// GetName returns the pluralized "type" of this structure for jsonapi serialization. +func (ta *Token) GetName() string { + return "auth_tokens" +} + +// SetID returns the ID of this structure for jsonapi serialization. +func (ta *Token) SetID(id string) error { + ta.AccessKey = id + return nil +} + +// NewToken returns a new Authentication Token. +func NewToken() *Token { + return &Token{ + AccessKey: utils.NewBytes32ID(), + Secret: utils.NewSecret(utils.DefaultSecretSize), + } +} + +func hashInput(ta *Token, salt string) []byte { + return []byte(fmt.Sprintf("v0-%s-%s-%s", ta.AccessKey, ta.Secret, salt)) +} + +// HashedSecret generates a hashed password for an external initiator +// authentication +func HashedSecret(ta *Token, salt string) (string, error) { + hasher := sha3.New256() + _, err := hasher.Write(hashInput(ta, salt)) + if err != nil { + return "", errors.Wrap(err, "error writing external initiator authentication to hasher") + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} diff --git a/core/bridges/bridge_type.go b/core/bridges/bridge_type.go new file mode 100644 index 00000000..402a9cf8 --- /dev/null +++ b/core/bridges/bridge_type.go @@ -0,0 +1,201 @@ +package bridges + +import ( + "crypto/subtle" + "database/sql/driver" + "encoding/json" + "fmt" + "math/big" + "regexp" + "strings" + "time" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// BridgeTypeRequest is the incoming record used to create a BridgeType +type BridgeTypeRequest struct { + Name BridgeName `json:"name"` + URL models.WebURL `json:"url"` + Confirmations uint32 `json:"confirmations"` + MinimumContractPayment *assets.Link `json:"minimumContractPayment"` +} + +// GetID returns the ID of this structure for jsonapi serialization. +func (bt BridgeTypeRequest) GetID() string { + return bt.Name.String() +} + +// GetName returns the pluralized "type" of this structure for jsonapi serialization. +func (bt BridgeTypeRequest) GetName() string { + return "bridges" +} + +// SetID is used to set the ID of this structure when deserializing from jsonapi documents. +func (bt *BridgeTypeRequest) SetID(value string) error { + name, err := ParseBridgeName(value) + bt.Name = name + return err +} + +// BridgeTypeAuthentication is the record returned in response to a request to create a BridgeType +type BridgeTypeAuthentication struct { + Name BridgeName + URL models.WebURL + Confirmations uint32 + IncomingToken string + OutgoingToken string + MinimumContractPayment *assets.Link +} + +// BridgeType is used for external adapters and has fields for +// the name of the adapter and its URL. +type BridgeType struct { + Name BridgeName + URL models.WebURL + Confirmations uint32 + IncomingTokenHash string + Salt string + OutgoingToken string + MinimumContractPayment *assets.Link + CreatedAt time.Time + UpdatedAt time.Time +} + +// NewBridgeType returns a bridge type authentication (with plaintext +// password) and a bridge type (with hashed password, for persisting) +func NewBridgeType(btr *BridgeTypeRequest) (*BridgeTypeAuthentication, + *BridgeType, error) { + incomingToken := utils.NewSecret(24) + outgoingToken := utils.NewSecret(24) + salt := utils.NewSecret(24) + + hash, err := incomingTokenHash(incomingToken, salt) + if err != nil { + return nil, nil, err + } + + return &BridgeTypeAuthentication{ + Name: btr.Name, + URL: btr.URL, + Confirmations: btr.Confirmations, + IncomingToken: incomingToken, + OutgoingToken: outgoingToken, + MinimumContractPayment: btr.MinimumContractPayment, + }, &BridgeType{ + Name: btr.Name, + URL: btr.URL, + Confirmations: btr.Confirmations, + IncomingTokenHash: hash, + Salt: salt, + OutgoingToken: outgoingToken, + MinimumContractPayment: btr.MinimumContractPayment, + }, nil +} + +// AuthenticateBridgeType returns true if the passed token matches its +// IncomingToken, or returns false with an error. +func AuthenticateBridgeType(bt *BridgeType, token string) (bool, error) { + hash, err := incomingTokenHash(token, bt.Salt) + if err != nil { + return false, err + } + return subtle.ConstantTimeCompare([]byte(hash), []byte(bt.IncomingTokenHash)) == 1, nil +} + +func incomingTokenHash(token, salt string) (string, error) { + input := fmt.Sprintf("%s-%s", token, salt) + hash, err := utils.Sha256(input) + if err != nil { + return "", err + } + return hash, nil +} + +// NOTE: latestAnswer and updatedAt is the only metadata used. +// Currently market closer adapter and outlier detection depend latestAnswer. +// https://github.com/goplugin/external-adapters-js/tree/f474bd2e2de13ebe5c9dc3df36ebb7018817005e/composite/market-closure +// https://github.com/goplugin/external-adapters-js/tree/5abb8e5ec2024f724fd39122897baa63c3cd0167/composite/outlier-detection +type BridgeMetaData struct { + LatestAnswer *big.Int `json:"latestAnswer"` + UpdatedAt *big.Int `json:"updatedAt"` // A unix timestamp +} + +type BridgeMetaDataJSON struct { + Meta BridgeMetaData +} + +func MarshalBridgeMetaData(latestAnswer *big.Int, updatedAt *big.Int) (map[string]interface{}, error) { + b, err := json.Marshal(&BridgeMetaData{LatestAnswer: latestAnswer, UpdatedAt: updatedAt}) + if err != nil { + return nil, err + } + var mp map[string]interface{} + err = json.Unmarshal(b, &mp) + if err != nil { + return nil, err + } + return mp, nil +} + +// BridgeName defines what Adapter a TaskSpec will use. +type BridgeName string + +var bridgeNameRegex = regexp.MustCompile("^[a-zA-Z0-9-_]*$") + +// ParseBridgeName returns a formatted Task type. +func ParseBridgeName(val string) (BridgeName, error) { + if !bridgeNameRegex.MatchString(val) { + return "", fmt.Errorf("task type validation: name %v contains invalid characters", val) + } + + return BridgeName(strings.ToLower(val)), nil +} + +// MustParseBridgeName instantiates a new BridgeName, and panics if a bad input is provided. +func MustParseBridgeName(val string) BridgeName { + tt, err := ParseBridgeName(val) + if err != nil { + panic(fmt.Sprintf("%v is not a valid BridgeName", val)) + } + return tt +} + +// UnmarshalJSON converts a bytes slice of JSON to a BridgeName. +func (t *BridgeName) UnmarshalJSON(input []byte) error { + var aux string + if err := json.Unmarshal(input, &aux); err != nil { + return err + } + tt, err := ParseBridgeName(aux) + *t = tt + return err +} + +// MarshalJSON converts a BridgeName to a JSON byte slice. +func (t BridgeName) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns this BridgeName as a string. +func (t BridgeName) String() string { + return string(t) +} + +// Value returns this instance serialized for database storage. +func (t BridgeName) Value() (driver.Value, error) { + return string(t), nil +} + +// Scan reads the database value and returns an instance. +func (t *BridgeName) Scan(value interface{}) error { + temp, ok := value.(string) + if !ok { + return fmt.Errorf("unable to convert %v of %T to BridgeName", value, value) + } + + *t = BridgeName(temp) + return nil +} diff --git a/core/bridges/bridge_type_test.go b/core/bridges/bridge_type_test.go new file mode 100644 index 00000000..168beb10 --- /dev/null +++ b/core/bridges/bridge_type_test.go @@ -0,0 +1,127 @@ +package bridges_test + +import ( + "encoding/json" + "math/big" + "math/rand" + "net/url" + "strconv" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common/math" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBridgeTypeRequest(t *testing.T) { + u, err := url.Parse("http://example.com/test") + require.NoError(t, err) + r := bridges.BridgeTypeRequest{ + Name: bridges.MustParseBridgeName("test-bridge-name"), + URL: models.WebURL(*u), + Confirmations: math.MaxUint32, + MinimumContractPayment: (*assets.Link)(big.NewInt(1000)), + } + assert.Equal(t, "bridges", r.GetName()) + assert.Equal(t, "test-bridge-name", r.GetID()) + const validID = "abc123foo_bar-test" + assert.NoError(t, r.SetID(validID)) + assert.Equal(t, validID, r.GetID()) + assert.Error(t, r.SetID("abc123.,<>/.foobar")) +} + +func TestBridgeType_Authenticate(t *testing.T) { + t.Parallel() + + bta, bt := cltest.NewBridgeType(t, cltest.BridgeOpts{}) + tests := []struct { + name, token string + wantError bool + }{ + {"correct", bta.IncomingToken, false}, + {"incorrect", "gibberish", true}, + {"empty incorrect", "", true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ok, err := bridges.AuthenticateBridgeType(bt, test.token) + require.NoError(t, err) + + if test.wantError { + assert.False(t, ok) + } else { + assert.True(t, ok) + } + }) + } +} + +func BenchmarkParseBridgeName(b *testing.B) { + const valid = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_` + for _, l := range []int{1, 10, 20, 50, 100, 1000, 10000} { + b.Run(strconv.Itoa(l), func(b *testing.B) { + var sb strings.Builder + for i := 0; i < l; i++ { + sb.WriteByte(valid[rand.Intn(len(valid))]) + } + name := sb.String() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := bridges.ParseBridgeName(name) + if err != nil { + b.Fatalf("failed to parse %q: %v\n", name, err) + } + } + }) + } +} + +func TestBridgeName_UnmarshalJSON(t *testing.T) { + var b bridges.BridgeName + require.NoError(t, json.Unmarshal([]byte(`"asdf123test"`), &b)) + require.Equal(t, "asdf123test", b.String()) + + got, err := json.Marshal(b) + require.NoError(t, err) + require.Equal(t, []byte(`"asdf123test"`), got) + + require.Error(t, json.Unmarshal([]byte(`"invalid,.<>/asdf?"`), &b)) +} + +func TestMarshalBridgeMetaData(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + latestAnswer *big.Int + updatedAt *big.Int + want map[string]any + }{ + {"nil", nil, nil, + map[string]any{"latestAnswer": nil, "updatedAt": nil}}, + {"zero", big.NewInt(0), big.NewInt(0), + map[string]any{"latestAnswer": float64(0), "updatedAt": float64(0)}}, + {"one", big.NewInt(1), big.NewInt(1), + map[string]any{"latestAnswer": float64(1), "updatedAt": float64(1)}}, + {"negative", big.NewInt(-100), big.NewInt(-10), + map[string]any{"latestAnswer": float64(-100), "updatedAt": float64(-10)}}, + // 9223372036854775807000 + {"large", new(big.Int).Mul(big.NewInt(math.MaxInt64), big.NewInt(1000)), big.NewInt(1), + map[string]any{"latestAnswer": float64(9.223372036854776e+21), "updatedAt": float64(1)}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := bridges.MarshalBridgeMetaData(tt.latestAnswer, tt.updatedAt) + require.NoError(t, err) + assert.Equalf(t, tt.want, got, "MarshalBridgeMetaData(%v, %v)", tt.latestAnswer, tt.updatedAt) + }) + } +} diff --git a/core/bridges/external_initiator.go b/core/bridges/external_initiator.go new file mode 100644 index 00000000..6ad7dced --- /dev/null +++ b/core/bridges/external_initiator.go @@ -0,0 +1,67 @@ +package bridges + +import ( + "crypto/subtle" + "strings" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/pkg/errors" +) + +// ExternalInitiatorRequest is the incoming record used to create an ExternalInitiator. +type ExternalInitiatorRequest struct { + Name string `json:"name"` + URL *models.WebURL `json:"url,omitempty"` +} + +// ExternalInitiator represents a user that can initiate runs remotely +type ExternalInitiator struct { + ID int64 + Name string + URL *models.WebURL + AccessKey string + Salt string + HashedSecret string + OutgoingSecret string + OutgoingToken string + + CreatedAt time.Time + UpdatedAt time.Time +} + +// NewExternalInitiator generates an ExternalInitiator from an +// auth.Token, hashing the password for storage +func NewExternalInitiator( + eia *auth.Token, + eir *ExternalInitiatorRequest, +) (*ExternalInitiator, error) { + salt := utils.NewSecret(utils.DefaultSecretSize) + hashedSecret, err := auth.HashedSecret(eia, salt) + if err != nil { + return nil, errors.Wrap(err, "error hashing secret for external initiator") + } + + return &ExternalInitiator{ + Name: strings.ToLower(eir.Name), + URL: eir.URL, + AccessKey: eia.AccessKey, + HashedSecret: hashedSecret, + Salt: salt, + OutgoingToken: utils.NewSecret(utils.DefaultSecretSize), + OutgoingSecret: utils.NewSecret(utils.DefaultSecretSize), + }, nil +} + +// AuthenticateExternalInitiator compares an auth against an initiator and +// returns true if the password hashes match +func AuthenticateExternalInitiator(eia *auth.Token, ea *ExternalInitiator) (bool, error) { + hashedSecret, err := auth.HashedSecret(eia, ea.Salt) + if err != nil { + return false, err + } + return subtle.ConstantTimeCompare([]byte(hashedSecret), []byte(ea.HashedSecret)) == 1, nil +} diff --git a/core/bridges/external_initiator_test.go b/core/bridges/external_initiator_test.go new file mode 100644 index 00000000..f54d6ecb --- /dev/null +++ b/core/bridges/external_initiator_test.go @@ -0,0 +1,50 @@ +package bridges_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + + "github.com/stretchr/testify/assert" +) + +func TestNewExternalInitiator(t *testing.T) { + eia := auth.NewToken() + assert.Len(t, eia.AccessKey, 32) + assert.Len(t, eia.Secret, 64) + + url := cltest.WebURL(t, "http://localhost:8888") + name := uuid.New().String() + eir := &bridges.ExternalInitiatorRequest{ + Name: name, + URL: &url, + } + ei, err := bridges.NewExternalInitiator(eia, eir) + assert.NoError(t, err) + assert.NotEqual(t, ei.HashedSecret, eia.Secret) + assert.Equal(t, ei.AccessKey, eia.AccessKey) +} + +func TestAuthenticateExternalInitiator(t *testing.T) { + eia := auth.NewToken() + ok, err := bridges.AuthenticateExternalInitiator(eia, &bridges.ExternalInitiator{ + Salt: "salt", + HashedSecret: "secret", + }) + require.NoError(t, err) + require.False(t, ok) + + hs, err := auth.HashedSecret(eia, "salt") + require.NoError(t, err) + ok, err = bridges.AuthenticateExternalInitiator(eia, &bridges.ExternalInitiator{ + Salt: "salt", + HashedSecret: hs, + }) + require.NoError(t, err) + require.True(t, ok) +} diff --git a/core/bridges/mocks/orm.go b/core/bridges/mocks/orm.go new file mode 100644 index 00000000..0af746cd --- /dev/null +++ b/core/bridges/mocks/orm.go @@ -0,0 +1,359 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + auth "github.com/goplugin/pluginv3.0/v2/core/auth" + bridges "github.com/goplugin/pluginv3.0/v2/core/bridges" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// BridgeTypes provides a mock function with given fields: offset, limit +func (_m *ORM) BridgeTypes(offset int, limit int) ([]bridges.BridgeType, int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for BridgeTypes") + } + + var r0 []bridges.BridgeType + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]bridges.BridgeType, int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []bridges.BridgeType); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridges.BridgeType) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// CreateBridgeType provides a mock function with given fields: bt +func (_m *ORM) CreateBridgeType(bt *bridges.BridgeType) error { + ret := _m.Called(bt) + + if len(ret) == 0 { + panic("no return value specified for CreateBridgeType") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bridges.BridgeType) error); ok { + r0 = rf(bt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateExternalInitiator provides a mock function with given fields: externalInitiator +func (_m *ORM) CreateExternalInitiator(externalInitiator *bridges.ExternalInitiator) error { + ret := _m.Called(externalInitiator) + + if len(ret) == 0 { + panic("no return value specified for CreateExternalInitiator") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bridges.ExternalInitiator) error); ok { + r0 = rf(externalInitiator) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteBridgeType provides a mock function with given fields: bt +func (_m *ORM) DeleteBridgeType(bt *bridges.BridgeType) error { + ret := _m.Called(bt) + + if len(ret) == 0 { + panic("no return value specified for DeleteBridgeType") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bridges.BridgeType) error); ok { + r0 = rf(bt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteExternalInitiator provides a mock function with given fields: name +func (_m *ORM) DeleteExternalInitiator(name string) error { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for DeleteExternalInitiator") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExternalInitiators provides a mock function with given fields: offset, limit +func (_m *ORM) ExternalInitiators(offset int, limit int) ([]bridges.ExternalInitiator, int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for ExternalInitiators") + } + + var r0 []bridges.ExternalInitiator + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]bridges.ExternalInitiator, int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []bridges.ExternalInitiator); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridges.ExternalInitiator) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// FindBridge provides a mock function with given fields: name +func (_m *ORM) FindBridge(name bridges.BridgeName) (bridges.BridgeType, error) { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for FindBridge") + } + + var r0 bridges.BridgeType + var r1 error + if rf, ok := ret.Get(0).(func(bridges.BridgeName) (bridges.BridgeType, error)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func(bridges.BridgeName) bridges.BridgeType); ok { + r0 = rf(name) + } else { + r0 = ret.Get(0).(bridges.BridgeType) + } + + if rf, ok := ret.Get(1).(func(bridges.BridgeName) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindBridges provides a mock function with given fields: name +func (_m *ORM) FindBridges(name []bridges.BridgeName) ([]bridges.BridgeType, error) { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for FindBridges") + } + + var r0 []bridges.BridgeType + var r1 error + if rf, ok := ret.Get(0).(func([]bridges.BridgeName) ([]bridges.BridgeType, error)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func([]bridges.BridgeName) []bridges.BridgeType); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bridges.BridgeType) + } + } + + if rf, ok := ret.Get(1).(func([]bridges.BridgeName) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindExternalInitiator provides a mock function with given fields: eia +func (_m *ORM) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) { + ret := _m.Called(eia) + + if len(ret) == 0 { + panic("no return value specified for FindExternalInitiator") + } + + var r0 *bridges.ExternalInitiator + var r1 error + if rf, ok := ret.Get(0).(func(*auth.Token) (*bridges.ExternalInitiator, error)); ok { + return rf(eia) + } + if rf, ok := ret.Get(0).(func(*auth.Token) *bridges.ExternalInitiator); ok { + r0 = rf(eia) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*bridges.ExternalInitiator) + } + } + + if rf, ok := ret.Get(1).(func(*auth.Token) error); ok { + r1 = rf(eia) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindExternalInitiatorByName provides a mock function with given fields: iname +func (_m *ORM) FindExternalInitiatorByName(iname string) (bridges.ExternalInitiator, error) { + ret := _m.Called(iname) + + if len(ret) == 0 { + panic("no return value specified for FindExternalInitiatorByName") + } + + var r0 bridges.ExternalInitiator + var r1 error + if rf, ok := ret.Get(0).(func(string) (bridges.ExternalInitiator, error)); ok { + return rf(iname) + } + if rf, ok := ret.Get(0).(func(string) bridges.ExternalInitiator); ok { + r0 = rf(iname) + } else { + r0 = ret.Get(0).(bridges.ExternalInitiator) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(iname) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCachedResponse provides a mock function with given fields: dotId, specId, maxElapsed +func (_m *ORM) GetCachedResponse(dotId string, specId int32, maxElapsed time.Duration) ([]byte, error) { + ret := _m.Called(dotId, specId, maxElapsed) + + if len(ret) == 0 { + panic("no return value specified for GetCachedResponse") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, int32, time.Duration) ([]byte, error)); ok { + return rf(dotId, specId, maxElapsed) + } + if rf, ok := ret.Get(0).(func(string, int32, time.Duration) []byte); ok { + r0 = rf(dotId, specId, maxElapsed) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, int32, time.Duration) error); ok { + r1 = rf(dotId, specId, maxElapsed) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateBridgeType provides a mock function with given fields: bt, btr +func (_m *ORM) UpdateBridgeType(bt *bridges.BridgeType, btr *bridges.BridgeTypeRequest) error { + ret := _m.Called(bt, btr) + + if len(ret) == 0 { + panic("no return value specified for UpdateBridgeType") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bridges.BridgeType, *bridges.BridgeTypeRequest) error); ok { + r0 = rf(bt, btr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpsertBridgeResponse provides a mock function with given fields: dotId, specId, response +func (_m *ORM) UpsertBridgeResponse(dotId string, specId int32, response []byte) error { + ret := _m.Called(dotId, specId, response) + + if len(ret) == 0 { + panic("no return value specified for UpsertBridgeResponse") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, int32, []byte) error); ok { + r0 = rf(dotId, specId, response) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/bridges/orm.go b/core/bridges/orm.go new file mode 100644 index 00000000..770b6276 --- /dev/null +++ b/core/bridges/orm.go @@ -0,0 +1,264 @@ +package bridges + +import ( + "database/sql" + "fmt" + "sync" + "time" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ORM --output ./mocks --case=underscore + +type ORM interface { + FindBridge(name BridgeName) (bt BridgeType, err error) + FindBridges(name []BridgeName) (bts []BridgeType, err error) + DeleteBridgeType(bt *BridgeType) error + BridgeTypes(offset int, limit int) ([]BridgeType, int, error) + CreateBridgeType(bt *BridgeType) error + UpdateBridgeType(bt *BridgeType, btr *BridgeTypeRequest) error + + GetCachedResponse(dotId string, specId int32, maxElapsed time.Duration) ([]byte, error) + UpsertBridgeResponse(dotId string, specId int32, response []byte) error + + ExternalInitiators(offset int, limit int) ([]ExternalInitiator, int, error) + CreateExternalInitiator(externalInitiator *ExternalInitiator) error + DeleteExternalInitiator(name string) error + FindExternalInitiator(eia *auth.Token) (*ExternalInitiator, error) + FindExternalInitiatorByName(iname string) (exi ExternalInitiator, err error) +} + +type orm struct { + q pg.Q + + bridgeTypesCache sync.Map +} + +var _ ORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) ORM { + namedLogger := lggr.Named("BridgeORM") + return &orm{q: pg.NewQ(db, namedLogger, cfg)} +} + +// FindBridge looks up a Bridge by its Name. +// Returns sql.ErrNoRows if name not present +func (o *orm) FindBridge(name BridgeName) (bt BridgeType, err error) { + if bridgeType, ok := o.bridgeTypesCache.Load(name); ok { + return bridgeType.(BridgeType), nil + } + + stmt := "SELECT * FROM bridge_types WHERE name = $1" + err = o.q.Get(&bt, stmt, name.String()) + if err == nil { + o.bridgeTypesCache.Store(bt.Name, bt) + } + return +} + +// FindBridges looks up multiple bridges in a single query. +// Errors unless all bridges successfully found. Requires at least one bridge. +// Expects all bridges to be unique +func (o *orm) FindBridges(names []BridgeName) (bts []BridgeType, err error) { + if len(names) == 0 { + return nil, errors.Errorf("at least one bridge name is required") + } + + var allFoundBts []BridgeType + var searchNames []BridgeName + + for _, n := range names { + if bridgeType, ok := o.bridgeTypesCache.Load(n); ok { + allFoundBts = append(allFoundBts, bridgeType.(BridgeType)) + } else { + searchNames = append(searchNames, n) + } + } + + if len(allFoundBts) == len(names) { + return allFoundBts, nil + } + + stmt := "SELECT * FROM bridge_types WHERE name IN (?)" + query, args, err := sqlx.In(stmt, searchNames) + if err != nil { + return nil, err + } + err = o.q.Select(&bts, o.q.Rebind(query), args...) + if err != nil { + return nil, err + } + for _, bt := range bts { + o.bridgeTypesCache.Store(bt.Name, bt) + } + allFoundBts = append(allFoundBts, bts...) + if len(allFoundBts) != len(names) { + return nil, errors.Errorf("not all bridges exist, asked for %v, exists %v", names, allFoundBts) + } + return allFoundBts, nil +} + +// DeleteBridgeType removes the bridge type +func (o *orm) DeleteBridgeType(bt *BridgeType) error { + query := "DELETE FROM bridge_types WHERE name = $1" + result, err := o.q.Exec(query, bt.Name) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + // We delete regardless of the rows affected, in case it gets out of sync + o.bridgeTypesCache.Delete(bt.Name) + if rowsAffected == 0 { + return sql.ErrNoRows + } + return err +} + +// BridgeTypes returns bridge types ordered by name filtered limited by the +// passed params. +func (o *orm) BridgeTypes(offset int, limit int) (bridges []BridgeType, count int, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + if err = tx.Get(&count, "SELECT COUNT(*) FROM bridge_types"); err != nil { + return errors.Wrap(err, "BridgeTypes failed to get count") + } + sql := `SELECT * FROM bridge_types ORDER BY name asc LIMIT $1 OFFSET $2;` + if err = tx.Select(&bridges, sql, limit, offset); err != nil { + return errors.Wrap(err, "BridgeTypes failed to load bridge_types") + } + return nil + }, pg.OptReadOnlyTx()) + + return +} + +// CreateBridgeType saves the bridge type. +func (o *orm) CreateBridgeType(bt *BridgeType) error { + stmt := `INSERT INTO bridge_types (name, url, confirmations, incoming_token_hash, salt, outgoing_token, minimum_contract_payment, created_at, updated_at) + VALUES (:name, :url, :confirmations, :incoming_token_hash, :salt, :outgoing_token, :minimum_contract_payment, now(), now()) + RETURNING *;` + err := o.q.Transaction(func(tx pg.Queryer) error { + stmt, err := tx.PrepareNamed(stmt) + if err != nil { + return err + } + defer stmt.Close() + return stmt.Get(bt, bt) + }) + if err == nil { + o.bridgeTypesCache.Store(bt.Name, *bt) + } + + return errors.Wrap(err, "CreateBridgeType failed") +} + +// UpdateBridgeType updates the bridge type. +func (o *orm) UpdateBridgeType(bt *BridgeType, btr *BridgeTypeRequest) error { + stmt := "UPDATE bridge_types SET url = $1, confirmations = $2, minimum_contract_payment = $3 WHERE name = $4 RETURNING *" + err := o.q.Get(bt, stmt, btr.URL, btr.Confirmations, btr.MinimumContractPayment, bt.Name) + if err == nil { + o.bridgeTypesCache.Store(bt.Name, *bt) + } + + return err +} + +func (o *orm) GetCachedResponse(dotId string, specId int32, maxElapsed time.Duration) (response []byte, err error) { + stalenessThreshold := time.Now().Add(-maxElapsed) + sql := `SELECT value FROM bridge_last_value WHERE + dot_id = $1 AND + spec_id = $2 AND + finished_at > ($3) + ORDER BY finished_at + DESC LIMIT 1;` + err = errors.Wrap(o.q.Get(&response, sql, dotId, specId, stalenessThreshold), fmt.Sprintf("failed to fetch last good value for task %s spec %d", dotId, specId)) + return +} + +func (o *orm) UpsertBridgeResponse(dotId string, specId int32, response []byte) error { + sql := `INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) + ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;` + + err := o.q.ExecQ(sql, dotId, specId, response, time.Now()) + return errors.Wrap(err, "failed to upsert bridge response") +} + +// --- External Initiator + +// ExternalInitiators returns a list of external initiators sorted by name +func (o *orm) ExternalInitiators(offset int, limit int) (exis []ExternalInitiator, count int, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + if err = tx.Get(&count, "SELECT COUNT(*) FROM external_initiators"); err != nil { + return errors.Wrap(err, "ExternalInitiators failed to get count") + } + + sql := `SELECT * FROM external_initiators ORDER BY name asc LIMIT $1 OFFSET $2;` + if err = tx.Select(&exis, sql, limit, offset); err != nil { + return errors.Wrap(err, "ExternalInitiators failed to load external_initiators") + } + return nil + }, pg.OptReadOnlyTx()) + return +} + +// CreateExternalInitiator inserts a new external initiator +func (o *orm) CreateExternalInitiator(externalInitiator *ExternalInitiator) (err error) { + query := `INSERT INTO external_initiators (name, url, access_key, salt, hashed_secret, outgoing_secret, outgoing_token, created_at, updated_at) + VALUES (:name, :url, :access_key, :salt, :hashed_secret, :outgoing_secret, :outgoing_token, now(), now()) + RETURNING * + ` + err = o.q.Transaction(func(tx pg.Queryer) error { + var stmt *sqlx.NamedStmt + stmt, err = tx.PrepareNamed(query) + if err != nil { + return errors.Wrap(err, "failed to prepare named stmt") + } + defer stmt.Close() + return errors.Wrap(stmt.Get(externalInitiator, externalInitiator), "failed to load external_initiator") + }) + return errors.Wrap(err, "CreateExternalInitiator failed") +} + +// DeleteExternalInitiator removes an external initiator +func (o *orm) DeleteExternalInitiator(name string) error { + query := "DELETE FROM external_initiators WHERE name = $1" + ctx, cancel := o.q.Context() + defer cancel() + result, err := o.q.ExecContext(ctx, query, name) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return sql.ErrNoRows + } + return err +} + +// FindExternalInitiator finds an external initiator given an authentication request +func (o *orm) FindExternalInitiator( + eia *auth.Token, +) (*ExternalInitiator, error) { + exi := &ExternalInitiator{} + err := o.q.Get(exi, `SELECT * FROM external_initiators WHERE access_key = $1`, eia.AccessKey) + return exi, err +} + +// FindExternalInitiatorByName finds an external initiator given an authentication request +func (o *orm) FindExternalInitiatorByName(iname string) (exi ExternalInitiator, err error) { + err = o.q.Get(&exi, `SELECT * FROM external_initiators WHERE lower(name) = lower($1)`, iname) + return +} diff --git a/core/bridges/orm_test.go b/core/bridges/orm_test.go new file mode 100644 index 00000000..7a4d2b6a --- /dev/null +++ b/core/bridges/orm_test.go @@ -0,0 +1,203 @@ +package bridges_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func setupORM(t *testing.T) (*sqlx.DB, bridges.ORM) { + t.Helper() + + cfg := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + + return db, orm +} + +func TestORM_FindBridges(t *testing.T) { + t.Parallel() + _, orm := setupORM(t) + + bt := bridges.BridgeType{ + Name: "bridge1", + URL: cltest.WebURL(t, "https://bridge1.com"), + } + assert.NoError(t, orm.CreateBridgeType(&bt)) + bt2 := bridges.BridgeType{ + Name: "bridge2", + URL: cltest.WebURL(t, "https://bridge2.com"), + } + assert.NoError(t, orm.CreateBridgeType(&bt2)) + bts, err := orm.FindBridges([]bridges.BridgeName{"bridge2", "bridge1"}) + require.NoError(t, err) + require.Equal(t, 2, len(bts)) + + bts, err = orm.FindBridges([]bridges.BridgeName{"bridge1"}) + require.NoError(t, err) + require.Equal(t, 1, len(bts)) + require.Equal(t, "bridge1", bts[0].Name.String()) + + // One invalid bridge errors + bts, err = orm.FindBridges([]bridges.BridgeName{"bridge1", "bridgeX"}) + require.Error(t, err, bts) + + // All invalid bridges error + bts, err = orm.FindBridges([]bridges.BridgeName{"bridgeY", "bridgeX"}) + require.Error(t, err, bts) + + // Requires at least one bridge + bts, err = orm.FindBridges([]bridges.BridgeName{}) + require.Error(t, err, bts) +} + +func TestORM_FindBridge(t *testing.T) { + t.Parallel() + + _, orm := setupORM(t) + + bt := bridges.BridgeType{} + bt.Name = bridges.MustParseBridgeName("solargridreporting") + bt.URL = cltest.WebURL(t, "https://denergy.eth") + assert.NoError(t, orm.CreateBridgeType(&bt)) + + cases := []struct { + description string + name bridges.BridgeName + want bridges.BridgeType + errored bool + }{ + {"actual external adapter", bt.Name, bt, false}, + {"core adapter", "ethtx", bridges.BridgeType{}, true}, + {"non-existent adapter", "nonExistent", bridges.BridgeType{}, true}, + } + + for _, test := range cases { + t.Run(test.description, func(t *testing.T) { + tt, err := orm.FindBridge(test.name) + tt.CreatedAt = test.want.CreatedAt + tt.UpdatedAt = test.want.UpdatedAt + if test.errored { + require.Error(t, err) + } else { + // we can't make any assumptions about the return type if scanning failed + require.Equal(t, test.want, tt) + } + }) + } +} +func TestORM_UpdateBridgeType(t *testing.T) { + _, orm := setupORM(t) + + firstBridge := &bridges.BridgeType{ + Name: "UniqueName", + URL: cltest.WebURL(t, "http:/oneurl.com"), + } + + require.NoError(t, orm.CreateBridgeType(firstBridge)) + + updateBridge := &bridges.BridgeTypeRequest{ + URL: cltest.WebURL(t, "http:/updatedurl.com"), + } + + require.NoError(t, orm.UpdateBridgeType(firstBridge, updateBridge)) + + foundbridge, err := orm.FindBridge("UniqueName") + require.NoError(t, err) + require.Equal(t, updateBridge.URL, foundbridge.URL) + + bs, count, err := orm.BridgeTypes(0, 10) + require.NoError(t, err) + require.Equal(t, 1, count) + require.Len(t, bs, 1) + + require.NoError(t, orm.DeleteBridgeType(&foundbridge)) + + bs, count, err = orm.BridgeTypes(0, 10) + require.NoError(t, err) + require.Equal(t, 0, count) + require.Len(t, bs, 0) +} + +func TestORM_TestCachedResponse(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + + _, err = orm.GetCachedResponse("dot", specID, 1*time.Second) + require.Error(t, err) + require.Contains(t, err.Error(), "no rows in result set") + + err = orm.UpsertBridgeResponse("dot", specID, []byte{111, 222, 2}) + require.NoError(t, err) + + val, err := orm.GetCachedResponse("dot", specID, 1*time.Second) + require.NoError(t, err) + require.Equal(t, []byte{111, 222, 2}, val) +} + +func TestORM_CreateExternalInitiator(t *testing.T) { + _, orm := setupORM(t) + + token := auth.NewToken() + name := uuid.New().String() + req := bridges.ExternalInitiatorRequest{ + Name: name, + } + exi, err := bridges.NewExternalInitiator(token, &req) + require.NoError(t, err) + require.NoError(t, orm.CreateExternalInitiator(exi)) + + exi2, err := bridges.NewExternalInitiator(token, &req) + require.NoError(t, err) + require.Contains(t, orm.CreateExternalInitiator(exi2).Error(), `ERROR: duplicate key value violates unique constraint "external_initiators_name_key" (SQLSTATE 23505)`) +} + +func TestORM_DeleteExternalInitiator(t *testing.T) { + _, orm := setupORM(t) + + token := auth.NewToken() + name := uuid.New().String() + req := bridges.ExternalInitiatorRequest{ + Name: name, + } + exi, err := bridges.NewExternalInitiator(token, &req) + require.NoError(t, err) + require.NoError(t, orm.CreateExternalInitiator(exi)) + + _, err = orm.FindExternalInitiator(token) + require.NoError(t, err) + _, err = orm.FindExternalInitiatorByName(exi.Name) + require.NoError(t, err) + + err = orm.DeleteExternalInitiator(exi.Name) + require.NoError(t, err) + + _, err = orm.FindExternalInitiator(token) + require.Error(t, err) + _, err = orm.FindExternalInitiatorByName(exi.Name) + require.Error(t, err) + + require.NoError(t, orm.CreateExternalInitiator(exi)) +} diff --git a/core/build/build.go b/core/build/build.go new file mode 100644 index 00000000..e7d1f7cb --- /dev/null +++ b/core/build/build.go @@ -0,0 +1,27 @@ +// Package build utilizes build tags and package testing API to determine the environment that this binary was built to target. +// - Prod is the default +// - Test is automatically set in test binaries, e.g. when using `go test` +// - Dev can be set with the 'dev' build tag, for standard builds or test binaries +package build + +const ( + Prod = "prod" + Dev = "dev" + Test = "test" +) + +var mode string + +func Mode() string { return mode } + +func IsDev() bool { + return mode == Dev +} + +func IsTest() bool { + return mode == Test +} + +func IsProd() bool { + return mode == Prod +} diff --git a/core/build/init.go b/core/build/init.go new file mode 100644 index 00000000..a32dc4a8 --- /dev/null +++ b/core/build/init.go @@ -0,0 +1,13 @@ +//go:build !dev + +package build + +import "testing" + +func init() { + if testing.Testing() { + mode = Test + } else { + mode = Prod + } +} diff --git a/core/build/init_dev.go b/core/build/init_dev.go new file mode 100644 index 00000000..a8773ef3 --- /dev/null +++ b/core/build/init_dev.go @@ -0,0 +1,5 @@ +//go:build dev + +package build + +func init() { mode = Dev } diff --git a/core/capabilities/registry.go b/core/capabilities/registry.go new file mode 100644 index 00000000..a1fa786d --- /dev/null +++ b/core/capabilities/registry.go @@ -0,0 +1,154 @@ +package capabilities + +import ( + "context" + "fmt" + "sync" + + "github.com/goplugin/plugin-common/pkg/capabilities" +) + +// Registry is a struct for the registry of capabilities. +// Registry is safe for concurrent use. +type Registry struct { + m map[string]capabilities.BaseCapability + mu sync.RWMutex +} + +// Get gets a capability from the registry. +func (r *Registry) Get(_ context.Context, id string) (capabilities.BaseCapability, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + c, ok := r.m[id] + if !ok { + return nil, fmt.Errorf("capability not found with id %s", id) + } + + return c, nil +} + +// GetTrigger gets a capability from the registry and tries to coerce it to the TriggerCapability interface. +func (r *Registry) GetTrigger(ctx context.Context, id string) (capabilities.TriggerCapability, error) { + c, err := r.Get(ctx, id) + if err != nil { + return nil, err + } + + tc, ok := c.(capabilities.TriggerCapability) + if !ok { + return nil, fmt.Errorf("capability with id: %s does not satisfy the capability interface", id) + } + + return tc, nil +} + +// GetAction gets a capability from the registry and tries to coerce it to the ActionCapability interface. +func (r *Registry) GetAction(ctx context.Context, id string) (capabilities.ActionCapability, error) { + c, err := r.Get(ctx, id) + if err != nil { + return nil, err + } + + ac, ok := c.(capabilities.ActionCapability) + if !ok { + return nil, fmt.Errorf("capability with id: %s does not satisfy the capability interface", id) + } + + return ac, nil +} + +// GetConsensus gets a capability from the registry and tries to coerce it to the ConsensusCapability interface. +func (r *Registry) GetConsensus(ctx context.Context, id string) (capabilities.ConsensusCapability, error) { + c, err := r.Get(ctx, id) + if err != nil { + return nil, err + } + + cc, ok := c.(capabilities.ConsensusCapability) + if !ok { + return nil, fmt.Errorf("capability with id: %s does not satisfy the capability interface", id) + } + + return cc, nil +} + +// GetTarget gets a capability from the registry and tries to coerce it to the TargetCapability interface. +func (r *Registry) GetTarget(ctx context.Context, id string) (capabilities.TargetCapability, error) { + c, err := r.Get(ctx, id) + if err != nil { + return nil, err + } + + tc, ok := c.(capabilities.TargetCapability) + if !ok { + return nil, fmt.Errorf("capability with id: %s does not satisfy the capability interface", id) + } + + return tc, nil +} + +// List lists all the capabilities in the registry. +func (r *Registry) List(_ context.Context) ([]capabilities.BaseCapability, error) { + r.mu.RLock() + defer r.mu.RUnlock() + cl := []capabilities.BaseCapability{} + for _, v := range r.m { + cl = append(cl, v) + } + + return cl, nil +} + +// Add adds a capability to the registry. +func (r *Registry) Add(ctx context.Context, c capabilities.BaseCapability) error { + r.mu.Lock() + defer r.mu.Unlock() + + info, err := c.Info(ctx) + if err != nil { + return err + } + + switch info.CapabilityType { + case capabilities.CapabilityTypeTrigger: + _, ok := c.(capabilities.TriggerCapability) + if !ok { + return fmt.Errorf("trigger capability does not satisfy TriggerCapability interface") + } + case capabilities.CapabilityTypeAction: + _, ok := c.(capabilities.ActionCapability) + if !ok { + return fmt.Errorf("action does not satisfy ActionCapability interface") + } + case capabilities.CapabilityTypeConsensus: + _, ok := c.(capabilities.ConsensusCapability) + if !ok { + return fmt.Errorf("consensus capability does not satisfy ConsensusCapability interface") + } + case capabilities.CapabilityTypeTarget: + _, ok := c.(capabilities.TargetCapability) + if !ok { + return fmt.Errorf("target capability does not satisfy TargetCapability interface") + } + default: + return fmt.Errorf("unknown capability type: %s", info.CapabilityType) + } + + id := info.ID + _, ok := r.m[id] + if ok { + return fmt.Errorf("capability with id: %s already exists", id) + } + + r.m[id] = c + return nil + +} + +// NewRegistry returns a new Registry. +func NewRegistry() *Registry { + return &Registry{ + m: map[string]capabilities.BaseCapability{}, + } +} diff --git a/core/capabilities/registry_test.go b/core/capabilities/registry_test.go new file mode 100644 index 00000000..a21911ae --- /dev/null +++ b/core/capabilities/registry_test.go @@ -0,0 +1,185 @@ +package capabilities_test + +import ( + "context" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/capabilities" + "github.com/goplugin/plugin-common/pkg/capabilities/triggers" + coreCapabilities "github.com/goplugin/pluginv3.0/v2/core/capabilities" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type mockCapability struct { + capabilities.CapabilityInfo +} + +func (m *mockCapability) Execute(ctx context.Context, callback chan<- capabilities.CapabilityResponse, req capabilities.CapabilityRequest) error { + return nil +} + +func (m *mockCapability) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { + return nil +} + +func (m *mockCapability) UnregisterFromWorkflow(ctx context.Context, request capabilities.UnregisterFromWorkflowRequest) error { + return nil +} + +func TestRegistry(t *testing.T) { + ctx := testutils.Context(t) + + r := coreCapabilities.NewRegistry() + + id := "capability-1" + ci, err := capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeAction, + "capability-1-description", + "v1.0.0", + ) + require.NoError(t, err) + + c := &mockCapability{CapabilityInfo: ci} + err = r.Add(ctx, c) + require.NoError(t, err) + + gc, err := r.Get(ctx, id) + require.NoError(t, err) + + assert.Equal(t, c, gc) + + cs, err := r.List(ctx) + require.NoError(t, err) + assert.Len(t, cs, 1) + assert.Equal(t, c, cs[0]) +} + +func TestRegistry_NoDuplicateIDs(t *testing.T) { + ctx := testutils.Context(t) + r := coreCapabilities.NewRegistry() + + id := "capability-1" + ci, err := capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeAction, + "capability-1-description", + "v1.0.0", + ) + require.NoError(t, err) + + c := &mockCapability{CapabilityInfo: ci} + err = r.Add(ctx, c) + require.NoError(t, err) + + ci, err = capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeConsensus, + "capability-2-description", + "v1.0.0", + ) + require.NoError(t, err) + c2 := &mockCapability{CapabilityInfo: ci} + + err = r.Add(ctx, c2) + assert.ErrorContains(t, err, "capability with id: capability-1 already exists") +} + +func TestRegistry_ChecksExecutionAPIByType(t *testing.T) { + tcs := []struct { + name string + newCapability func(ctx context.Context, reg *coreCapabilities.Registry) (string, error) + getCapability func(ctx context.Context, reg *coreCapabilities.Registry, id string) error + errContains string + }{ + { + name: "action", + newCapability: func(ctx context.Context, reg *coreCapabilities.Registry) (string, error) { + id := uuid.New().String() + ci, err := capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeAction, + "capability-1-description", + "v1.0.0", + ) + require.NoError(t, err) + + c := &mockCapability{CapabilityInfo: ci} + return id, reg.Add(ctx, c) + }, + getCapability: func(ctx context.Context, reg *coreCapabilities.Registry, id string) error { + _, err := reg.GetAction(ctx, id) + return err + }, + }, + { + name: "target", + newCapability: func(ctx context.Context, reg *coreCapabilities.Registry) (string, error) { + id := uuid.New().String() + ci, err := capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeTarget, + "capability-1-description", + "v1.0.0", + ) + require.NoError(t, err) + + c := &mockCapability{CapabilityInfo: ci} + return id, reg.Add(ctx, c) + }, + getCapability: func(ctx context.Context, reg *coreCapabilities.Registry, id string) error { + _, err := reg.GetTarget(ctx, id) + return err + }, + }, + { + name: "trigger", + newCapability: func(ctx context.Context, reg *coreCapabilities.Registry) (string, error) { + odt := triggers.NewOnDemand() + info, err := odt.Info(ctx) + require.NoError(t, err) + return info.ID, reg.Add(ctx, odt) + }, + getCapability: func(ctx context.Context, reg *coreCapabilities.Registry, id string) error { + _, err := reg.GetTrigger(ctx, id) + return err + }, + }, + { + name: "consensus", + newCapability: func(ctx context.Context, reg *coreCapabilities.Registry) (string, error) { + id := uuid.New().String() + ci, err := capabilities.NewCapabilityInfo( + id, + capabilities.CapabilityTypeConsensus, + "capability-1-description", + "v1.0.0", + ) + require.NoError(t, err) + + c := &mockCapability{CapabilityInfo: ci} + return id, reg.Add(ctx, c) + }, + getCapability: func(ctx context.Context, reg *coreCapabilities.Registry, id string) error { + _, err := reg.GetConsensus(ctx, id) + return err + }, + }, + } + + ctx := testutils.Context(t) + reg := coreCapabilities.NewRegistry() + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + id, err := tc.newCapability(ctx, reg) + require.NoError(t, err) + + err = tc.getCapability(ctx, reg, id) + require.NoError(t, err) + }) + } +} diff --git a/core/capabilities/targets/write_target.go b/core/capabilities/targets/write_target.go new file mode 100644 index 00000000..eae1a364 --- /dev/null +++ b/core/capabilities/targets/write_target.go @@ -0,0 +1,239 @@ +package targets + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + + chainselectors "github.com/goplugin/chain-selectors" + + "github.com/goplugin/plugin-common/pkg/capabilities" + commontypes "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/values" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + abiutil "github.com/goplugin/pluginv3.0/v2/core/chains/evm/abi" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/keystone/generated/forwarder" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +var forwardABI = evmtypes.MustGetABI(forwarder.KeystoneForwarderMetaData.ABI) + +func InitializeWrite(registry commontypes.CapabilitiesRegistry, legacyEVMChains legacyevm.LegacyChainContainer) error { + for _, chain := range legacyEVMChains.Slice() { + capability := NewEvmWrite(chain) + if err := registry.Add(context.TODO(), capability); err != nil { + return err + } + } + return nil +} + +var ( + _ capabilities.ActionCapability = &EvmWrite{} +) + +type EvmWrite struct { + chain legacyevm.Chain + capabilities.CapabilityInfo +} + +func NewEvmWrite(chain legacyevm.Chain) *EvmWrite { + // generate ID based on chain selector + name := fmt.Sprintf("write_%v", chain.ID()) + chainName, err := chainselectors.NameFromChainId(chain.ID().Uint64()) + if err == nil { + name = fmt.Sprintf("write_%v", chainName) + } + + info := capabilities.MustNewCapabilityInfo( + name, + capabilities.CapabilityTypeTarget, + "Write target.", + "v1.0.0", + ) + + return &EvmWrite{ + chain, + info, + } +} + +type EvmConfig struct { + ChainID uint + Address string + Params []any + ABI string +} + +// TODO: enforce required key presence + +func parseConfig(rawConfig *values.Map) (EvmConfig, error) { + var config EvmConfig + configAny, err := rawConfig.Unwrap() + if err != nil { + return config, err + } + err = mapstructure.Decode(configAny, &config) + return config, err +} + +func evaluateParams(params []any, inputs map[string]any) ([]any, error) { + vars := pipeline.NewVarsFrom(inputs) + var args []any + for _, param := range params { + switch v := param.(type) { + case string: + val, err := pipeline.VarExpr(v, vars)() + if err == nil { + args = append(args, val) + } else if errors.Is(errors.Cause(err), pipeline.ErrParameterEmpty) { + args = append(args, param) + } else { + return args, err + } + default: + args = append(args, param) + } + } + + return args, nil +} + +func encodePayload(args []any, rawSelector string) ([]byte, error) { + // TODO: do spec parsing as part of parseConfig() + + // Based on https://github.com/ethereum/go-ethereum/blob/f1c27c286ea2d0e110a507e5749e92d0a6144f08/signer/fourbyte/abi.go#L77-L102 + + // NOTE: without having full ABI it's actually impossible to support function overloading + selector, err := abiutil.ParseSignature(rawSelector) + if err != nil { + return nil, err + } + + abidata, err := json.Marshal([]abi.SelectorMarshaling{selector}) + if err != nil { + return nil, err + } + + spec, err := abi.JSON(strings.NewReader(string(abidata))) + if err != nil { + return nil, err + } + + return spec.Pack(selector.Name, args...) + + // NOTE: could avoid JSON encoding/decoding the selector + // var args abi.Arguments + // for _, arg := range selector.Inputs { + // ty, err := abi.NewType(arg.Type, arg.InternalType, arg.Components) + // if err != nil { + // return nil, err + // } + // args = append(args, abi.Argument{Name: arg.Name, Type: ty}) + // } + // // we only care about the name + inputs so we can compute the method ID + // method := abi.NewMethod(selector.Name, selector.Name, abi.Function, "nonpayable", false, false, args, nil) + // + // https://github.com/ethereum/go-ethereum/blob/f1c27c286ea2d0e110a507e5749e92d0a6144f08/accounts/abi/abi.go#L77-L82 + // arguments, err := method.Inputs.Pack(args...) + // if err != nil { + // return nil, err + // } + // // Pack up the method ID too if not a constructor and return + // return append(method.ID, arguments...), nil +} + +func (cap *EvmWrite) Execute(ctx context.Context, callback chan<- capabilities.CapabilityResponse, request capabilities.CapabilityRequest) error { + // TODO: idempotency + + // TODO: extract into ChainWriter? + txm := cap.chain.TxManager() + + config := cap.chain.Config().EVM().ChainWriter() + + reqConfig, err := parseConfig(request.Config) + if err != nil { + return err + } + + inputsAny, err := request.Inputs.Unwrap() + if err != nil { + return err + } + inputs := inputsAny.(map[string]any) + + // evaluate any variables in reqConfig.Params + args, err := evaluateParams(reqConfig.Params, inputs) + if err != nil { + return err + } + + data, err := encodePayload(args, reqConfig.ABI) + if err != nil { + return err + } + + // TODO: validate encoded report is prefixed with workflowID and executionID that match the request meta + + // unlimited gas in the MVP demo + gasLimit := 0 + // No signature validation in the MVP demo + signatures := [][]byte{} + + // construct forwarding payload + calldata, err := forwardABI.Pack("report", common.HexToAddress(reqConfig.Address), data, signatures) + if err != nil { + return err + } + + txMeta := &txmgr.TxMeta{ + // FwdrDestAddress could also be set for better logging but it's used for various purposes around Operator Forwarders + WorkflowExecutionID: &request.Metadata.WorkflowExecutionID, + } + strategy := txmgrcommon.NewSendEveryStrategy() + + checker := txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + } + req := txmgr.TxRequest{ + FromAddress: config.FromAddress().Address(), + ToAddress: config.ForwarderAddress().Address(), + EncodedPayload: calldata, + FeeLimit: uint32(gasLimit), + Meta: txMeta, + Strategy: strategy, + Checker: checker, + // SignalCallback: true, TODO: add code that checks if a workflow id is present, if so, route callback to chainwriter rather than pipeline + } + tx, err := txm.CreateTransaction(ctx, req) + if err != nil { + return err + } + fmt.Printf("Transaction submitted %v", tx.ID) + go func() { + // TODO: cast tx.Error to Err (or Value to Value?) + callback <- capabilities.CapabilityResponse{ + Value: nil, + Err: nil, + } + close(callback) + }() + return nil +} + +func (cap *EvmWrite) RegisterToWorkflow(ctx context.Context, request capabilities.RegisterToWorkflowRequest) error { + return nil +} + +func (cap *EvmWrite) UnregisterFromWorkflow(ctx context.Context, request capabilities.UnregisterFromWorkflowRequest) error { + return nil +} diff --git a/core/capabilities/targets/write_target_test.go b/core/capabilities/targets/write_target_test.go new file mode 100644 index 00000000..3c6f4fb5 --- /dev/null +++ b/core/capabilities/targets/write_target_test.go @@ -0,0 +1,92 @@ +package targets_test + +import ( + "math/big" + "testing" + + "github.com/goplugin/plugin-common/pkg/capabilities" + "github.com/goplugin/plugin-common/pkg/values" + "github.com/goplugin/pluginv3.0/v2/core/capabilities/targets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/keystone/generated/forwarder" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var forwardABI = evmtypes.MustGetABI(forwarder.KeystoneForwarderMetaData.ABI) + +func TestEvmWrite(t *testing.T) { + chain := evmmocks.NewChain(t) + + txManager := txmmocks.NewMockEvmTxManager(t) + chain.On("ID").Return(big.NewInt(11155111)) + chain.On("TxManager").Return(txManager) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + a := testutils.NewAddress() + addr, err := ethkey.NewEIP55Address(a.Hex()) + require.NoError(t, err) + c.EVM[0].ChainWriter.FromAddress = &addr + + forwarderA := testutils.NewAddress() + forwarderAddr, err := ethkey.NewEIP55Address(forwarderA.Hex()) + require.NoError(t, err) + c.EVM[0].ChainWriter.ForwarderAddress = &forwarderAddr + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + chain.On("Config").Return(evmcfg) + + capability := targets.NewEvmWrite(chain) + ctx := testutils.Context(t) + + config, err := values.NewMap(map[string]any{ + "abi": "receive(report bytes)", + "params": []any{"$(report)"}, + }) + require.NoError(t, err) + + inputs, err := values.NewMap(map[string]any{ + "report": []byte{1, 2, 3}, + }) + require.NoError(t, err) + + req := capabilities.CapabilityRequest{ + Metadata: capabilities.RequestMetadata{ + WorkflowID: "hello", + }, + Config: config, + Inputs: inputs, + } + + txManager.On("CreateTransaction", mock.Anything, mock.Anything).Return(txmgr.Tx{}, nil).Run(func(args mock.Arguments) { + req := args.Get(1).(txmgr.TxRequest) + payload := make(map[string]any) + method := forwardABI.Methods["report"] + err = method.Inputs.UnpackIntoMap(payload, req.EncodedPayload[4:]) + require.NoError(t, err) + require.Equal(t, []byte{ + 0xa6, 0x9b, 0x6e, 0xd0, // selector = keccak(signature)[:4] + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, // type = bytes + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, // len = 3 + 0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // elements [1, 2, 3] zero padded + }, payload["data"]) + + }) + + ch := make(chan capabilities.CapabilityResponse) + + err = capability.Execute(ctx, ch, req) + require.NoError(t, err) + + response := <-ch + require.Nil(t, response.Err) +} diff --git a/core/cbor/cbor.go b/core/cbor/cbor.go new file mode 100644 index 00000000..cc3f74e4 --- /dev/null +++ b/core/cbor/cbor.go @@ -0,0 +1,118 @@ +package cbor + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/fxamacker/cbor/v2" + "github.com/pkg/errors" +) + +// ParseDietCBOR attempts to coerce the input byte array into valid CBOR. +// Assumes the input is "diet" CBOR which is like CBOR, except: +// 1. It is guaranteed to always be a map +// 2. It may or may not include the opening and closing markers "{}" +func ParseDietCBOR(b []byte) (map[string]interface{}, error) { + b = autoAddMapDelimiters(b) + + var m map[interface{}]interface{} + if _, err := cbor.UnmarshalFirst(b, &m); err != nil { + return nil, err + } + + coerced, err := CoerceInterfaceMapToStringMap(m) + if err != nil { + return nil, err + } + + output, ok := coerced.(map[string]interface{}) + if !ok { + return nil, errors.New("cbor data cannot be coerced to map") + } + + return output, nil +} + +// Similar to ParseDietCBOR but outputs to a concrete struct, which meets the +// "top-level map" requirement of "diet" CBOR. +func ParseDietCBORToStruct(b []byte, v interface{}) error { + b = autoAddMapDelimiters(b) + _, err := cbor.UnmarshalFirst(b, v) + return err +} + +// ParseStandardCBOR parses CBOR in "standards compliant" mode. +// Literal values are passed through "as-is". +// The input is not assumed to be a map. +// Empty inputs will return nil. +func ParseStandardCBOR(b []byte) (a interface{}, err error) { + if len(b) == 0 { + return nil, nil + } + if _, err = cbor.UnmarshalFirst(b, &a); err != nil { + return nil, err + } + return +} + +// Automatically add missing start map and end map to a CBOR encoded buffer +func autoAddMapDelimiters(b []byte) []byte { + if len(b) == 0 || (len(b) > 1 && (b[0]>>5) != 5) { + var buffer bytes.Buffer + buffer.Write([]byte{0xbf}) + buffer.Write(b) + buffer.Write([]byte{0xff}) + return buffer.Bytes() + } + + return b +} + +// CoerceInterfaceMapToStringMap converts map[interface{}]interface{} (interface maps) to +// map[string]interface{} (string maps) and []interface{} with interface maps to string maps. +// Relevant when serializing between CBOR and JSON. +// +// It also handles the CBOR 'bignum' type as documented here: https://tools.ietf.org/html/rfc7049#section-2.4.2 +func CoerceInterfaceMapToStringMap(in interface{}) (interface{}, error) { + switch typed := in.(type) { + case map[string]interface{}: + for k, v := range typed { + coerced, err := CoerceInterfaceMapToStringMap(v) + if err != nil { + return nil, err + } + typed[k] = coerced + } + return typed, nil + case map[interface{}]interface{}: + m := map[string]interface{}{} + for k, v := range typed { + coercedKey, ok := k.(string) + if !ok { + return nil, fmt.Errorf("unable to coerce key %T %v to a string", k, k) + } + coerced, err := CoerceInterfaceMapToStringMap(v) + if err != nil { + return nil, err + } + m[coercedKey] = coerced + } + return m, nil + case []interface{}: + r := make([]interface{}, len(typed)) + for i, v := range typed { + coerced, err := CoerceInterfaceMapToStringMap(v) + if err != nil { + return nil, err + } + r[i] = coerced + } + return r, nil + case big.Int: + value, _ := (in).(big.Int) + return &value, nil + default: + return in, nil + } +} diff --git a/core/cbor/cbor_test.go b/core/cbor/cbor_test.go new file mode 100644 index 00000000..300dd078 --- /dev/null +++ b/core/cbor/cbor_test.go @@ -0,0 +1,377 @@ +package cbor + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func Test_ParseCBOR(t *testing.T) { + t.Parallel() + + address, err := hex.DecodeString("0x8bd112d3f8f92e41c861939545ad387307af9703") + require.NoError(t, err) + + tests := []struct { + name string + in string + want interface{} + wantErrored bool + }{ + { + "hello world", + `0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff`, + jsonMustUnmarshal(t, `{"path":["recent","usd"],"url":"https://etherprice.com/api"}`), + false, + }, + { + "trailing empty bytes", + `0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff000000`, + jsonMustUnmarshal(t, `{"path":["recent","usd"],"url":"https://etherprice.com/api"}`), + false, + }, + { + "nested maps", + `0xbf657461736b739f6868747470706f7374ff66706172616d73bf636d73676f68656c6c6f5f636861696e6c696e6b6375726c75687474703a2f2f6c6f63616c686f73743a36363930ffff`, + jsonMustUnmarshal(t, `{"params":{"msg":"hello_plugin","url":"http://localhost:6690"},"tasks":["httppost"]}`), + false, + }, + { + "missing initial start map marker", + `0x636B65796576616C7565ff`, + jsonMustUnmarshal(t, `{"key":"value"}`), + false, + }, + { + "with address encoded", + `0x6d72656d6f7465436861696e4964186a6e6c69627261727956657273696f6e016f636f6e747261637441646472657373548bd112d3f8f92e41c861939545ad387307af97036d636f6e6669726d6174696f6e730a68626c6f636b4e756d69307831336261626264`, + map[string]interface{}{ + "blockNum": "0x13babbd", + "confirmations": uint64(10), + "contractAddress": address, + "libraryVersion": uint64(1), + "remoteChainId": uint64(106), + }, + false, + }, + { + "bignums", + "0x" + + "bf" + // map(*) + "67" + // text(7) + "6269676e756d73" + // "bignums" + "9f" + // array(*) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "4000000000000000000000000000000000000000000000000000000000000000" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409984) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409983) + "ff" + // primitive(*) + "ff", // primitive(*) + map[string]interface{}{ + "bignums": []interface{}{ + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), + }, + }, + false, + }, + { + "bignums", + "0x" + + "67" + // text(7) + "6269676e756d73" + // "bignums" + "9f" + // array(*) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "4000000000000000000000000000000000000000000000000000000000000000" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409984) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409983) + "ff", // primitive(*) + map[string]interface{}{ + "bignums": []interface{}{ + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), + }, + }, + false, + }, + {"empty object", `0xa0`, jsonMustUnmarshal(t, `{}`), false}, + {"empty string", `0x`, jsonMustUnmarshal(t, `{}`), false}, + {"invalid CBOR", `0xff`, jsonMustUnmarshal(t, `{}`), true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b, err := hexutil.Decode(test.in) + assert.NoError(t, err) + + json, err := ParseDietCBOR(b) + if test.wantErrored { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.want, json) + } + }) + } +} + +func Test_ParseCBORToStruct_Success(t *testing.T) { + t.Parallel() + + hexCBOR := `0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff000000` + bytesCBOR, err := hexutil.Decode(hexCBOR) + assert.NoError(t, err) + + parsed := struct { + Url string `cbor:"url"` + Path []string `cbor:"path"` + }{} + err = ParseDietCBORToStruct(bytesCBOR, &parsed) + + require.NoError(t, err) + require.Equal(t, "https://etherprice.com/api", parsed.Url) + require.Equal(t, []string{"recent", "usd"}, parsed.Path) +} + +func Test_ParseCBORToStruct_WrongFieldType(t *testing.T) { + t.Parallel() + + hexCBOR := `0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff000000` + bytesCBOR, err := hexutil.Decode(hexCBOR) + assert.NoError(t, err) + + parsed := struct { + Url string `cbor:"url"` + Path []int `cbor:"path"` // exect int but get string + }{} + err = ParseDietCBORToStruct(bytesCBOR, &parsed) + + require.Error(t, err) +} + +func Test_ParseCBORToStruct_BinaryStringOfWrongType(t *testing.T) { + t.Parallel() + + // {"key":"value"} but with last byte replaced with invalid unicode (0x88) + hexCBOR := `0x636B65796576616C7588` + bytesCBOR, err := hexutil.Decode(hexCBOR) + assert.NoError(t, err) + + parsed := struct { + Key string `cbor:"key"` + }{} + err = ParseDietCBORToStruct(bytesCBOR, &parsed) + require.Error(t, err) +} + +func Test_autoAddMapDelimiters(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in []byte + want []byte + }{ + { + "map(0)", + hexutil.MustDecode("0xA0"), + hexutil.MustDecode("0xA0"), + }, + { + `map(1) {"key":"value"}`, + hexutil.MustDecode("0xA1636B65796576616C7565"), + hexutil.MustDecode("0xA1636B65796576616C7565"), + }, + { + "array(0)", + hexutil.MustDecode("0x80"), + hexutil.MustDecode("0x80"), + }, + { + `map(*) {"key":"value"}`, + hexutil.MustDecode("0xbf636B65796576616C7565ff"), + hexutil.MustDecode("0xbf636B65796576616C7565ff"), + }, + { + `map(*) {"key":"value"} missing open delimiter`, + hexutil.MustDecode("0x636B65796576616C7565ff"), + hexutil.MustDecode("0xbf636B65796576616C7565ffff"), + }, + { + `map(*) {"key":"value"} missing closing delimiter`, + hexutil.MustDecode("0xbf636B65796576616C7565"), + hexutil.MustDecode("0xbf636B65796576616C7565"), + }, + { + `map(*) {"key":"value"} missing both delimiters`, + hexutil.MustDecode("0x636B65796576616C7565"), + hexutil.MustDecode("0xbf636B65796576616C7565ff"), + }, + { + "empty input adds delimiters", + []byte{}, + []byte{0xbf, 0xff}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.want, autoAddMapDelimiters(test.in)) + }) + } +} + +func jsonMustUnmarshal(t *testing.T, in string) interface{} { + var j interface{} + err := json.Unmarshal([]byte(in), &j) + require.NoError(t, err) + return j +} + +func TestCoerceInterfaceMapToStringMap(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + want interface{} + }{ + {"empty map", map[interface{}]interface{}{}, map[string]interface{}{}}, + {"simple map", map[interface{}]interface{}{"key": "value"}, map[string]interface{}{"key": "value"}}, + {"int map", map[int]interface{}{1: "value"}, map[int]interface{}{1: "value"}}, + { + "nested string map map", + map[string]interface{}{"key": map[interface{}]interface{}{"nk": "nv"}}, + map[string]interface{}{"key": map[string]interface{}{"nk": "nv"}}, + }, + { + "nested map map", + map[interface{}]interface{}{"key": map[interface{}]interface{}{"nk": "nv"}}, + map[string]interface{}{"key": map[string]interface{}{"nk": "nv"}}, + }, + { + "nested map array", + map[interface{}]interface{}{"key": []interface{}{1, "value"}}, + map[string]interface{}{"key": []interface{}{1, "value"}}, + }, + {"empty array", []interface{}{}, []interface{}{}}, + {"simple array", []interface{}{1, "value"}, []interface{}{1, "value"}}, + { + "nested array map", + []interface{}{map[interface{}]interface{}{"key": map[interface{}]interface{}{"nk": "nv"}}}, + []interface{}{map[string]interface{}{"key": map[string]interface{}{"nk": "nv"}}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + decoded, err := CoerceInterfaceMapToStringMap(test.input) + require.NoError(t, err) + assert.True(t, reflect.DeepEqual(test.want, decoded)) + }) + } +} + +func TestCoerceInterfaceMapToStringMap_BadInputs(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + }{ + {"error map", map[interface{}]interface{}{1: "value"}}, + {"error array", []interface{}{map[interface{}]interface{}{1: "value"}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := CoerceInterfaceMapToStringMap(test.input) + assert.Error(t, err) + }) + } +} + +func TestJSON_CBOR(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in interface{} + }{ + {"empty object", jsonMustUnmarshal(t, `{}`)}, + {"array", jsonMustUnmarshal(t, `[1,2,3,4]`)}, + { + "basic object", + jsonMustUnmarshal(t, `{"path":["recent","usd"],"url":"https://etherprice.com/api"}`), + }, + { + "complex object", + jsonMustUnmarshal(t, `{"a":{"1":[{"b":"free"},{"c":"more"},{"d":["less", {"nesting":{"4":"life"}}]}]}}`), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + encoded := mustMarshal(t, test.in) + + var decoded interface{} + err := cbor.Unmarshal(encoded, &decoded) + require.NoError(t, err) + + decoded, err = CoerceInterfaceMapToStringMap(decoded) + require.NoError(t, err) + assert.True(t, reflect.DeepEqual(test.in, decoded)) + }) + } +} + +// mustMarshal returns a bytes array of the JSON map or array encoded to CBOR. +func mustMarshal(t *testing.T, j interface{}) []byte { + switch v := j.(type) { + case map[string]interface{}, []interface{}, nil: + b, err := cbor.Marshal(v) + if err != nil { + t.Fatalf("failed to marshal CBOR: %v", err) + } + return b + default: + t.Fatalf("unable to coerce JSON to CBOR for type %T", v) + return nil + } +} diff --git a/core/chains/chain_kv.go b/core/chains/chain_kv.go new file mode 100644 index 00000000..e5d87420 --- /dev/null +++ b/core/chains/chain_kv.go @@ -0,0 +1,63 @@ +package chains + +import ( + "errors" + "fmt" + + "golang.org/x/exp/maps" + + "github.com/goplugin/plugin-common/pkg/types" +) + +type ChainsKV[T types.ChainService] struct { + // note: this is read only after construction so no need for mutex + chains map[string]T +} + +var ErrNoSuchChainID = errors.New("chain id does not exist") + +func NewChainsKV[T types.ChainService](cs map[string]T) *ChainsKV[T] { + return &ChainsKV[T]{ + chains: cs, + } +} +func (c *ChainsKV[T]) Len() int { + return len(c.chains) +} + +// Get return [ErrNoSuchChainID] if [id] is not found +func (c *ChainsKV[T]) Get(id string) (T, error) { + var dflt T + chn, exist := c.chains[id] + if !exist { + return dflt, fmt.Errorf("%w: %s", ErrNoSuchChainID, id) + } + return chn, nil +} + +func (c *ChainsKV[T]) List(ids ...string) ([]T, error) { + if len(ids) == 0 { + return c.Slice(), nil + } + + var ( + result []T + err error + ) + + for _, id := range ids { + chn, exists := c.chains[id] + if !exists { + err2 := fmt.Errorf("%w: %s", ErrNoSuchChainID, id) + err = errors.Join(err, err2) + continue + } + result = append(result, chn) + } + + return result, err +} + +func (c *ChainsKV[T]) Slice() []T { + return maps.Values(c.chains) +} diff --git a/core/chains/chain_kv_test.go b/core/chains/chain_kv_test.go new file mode 100644 index 00000000..09c2d3e0 --- /dev/null +++ b/core/chains/chain_kv_test.go @@ -0,0 +1,101 @@ +package chains_test + +import ( + "context" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/chains" +) + +func Test_ChainKV(t *testing.T) { + var ( + testChainID = "id" + testChain = &testChainService{name: "test chain"} + ) + // test empty case + empty := make(map[string]*testChainService) + kv := chains.NewChainsKV[*testChainService](empty) + c, err := kv.Get(testChainID) + assert.Nil(t, c) + assert.ErrorIs(t, err, chains.ErrNoSuchChainID) + + assert.Equal(t, kv.Len(), 0) + assert.Len(t, kv.Slice(), 0) + + cs, err := kv.List() + assert.NoError(t, err) + assert.Len(t, cs, 0) + + // test with one chain + onechain := map[string]*testChainService{testChainID: testChain} + kv = chains.NewChainsKV[*testChainService](onechain) + c, err = kv.Get(testChainID) + assert.Equal(t, c, testChain) + assert.NoError(t, err) + + assert.Equal(t, kv.Len(), 1) + assert.Len(t, kv.Slice(), 1) + + cs, err = kv.List() + assert.NoError(t, err) + assert.Len(t, cs, 1) + + //List explicit chain + cs, err = kv.List(testChainID) + assert.NoError(t, err) + assert.Len(t, cs, 1) + assert.Equal(t, testChain, cs[0]) + + //List no such id + cs, err = kv.List("no such id") + assert.Error(t, err) + assert.Len(t, cs, 0) +} + +type testChainService struct { + name string +} + +// Start the service. Must quit immediately if the context is cancelled. +// The given context applies to Start function only and must not be retained. +func (s *testChainService) Start(_ context.Context) error { + return nil +} + +// Close stops the Service. +// Invariants: Usually after this call the Service cannot be started +// again, you need to build a new Service to do so. +func (s *testChainService) Close() error { + return nil +} + +// Name returns the fully qualified name of the service +func (s *testChainService) Name() string { + return s.name +} + +// Ready should return nil if ready, or an error message otherwise. +func (s *testChainService) Ready() error { + return nil +} + +// HealthReport returns a full health report of the callee including it's dependencies. +// key is the dep name, value is nil if healthy, or error message otherwise. +func (s *testChainService) HealthReport() map[string]error { + return map[string]error{} +} + +// Implement [types.ChainService] interface +func (s *testChainService) GetChainStatus(ctx context.Context) (stat types.ChainStatus, err error) { + return +} +func (s *testChainService) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) (stats []types.NodeStatus, nextPageToken string, total int, err error) { + return +} +func (s *testChainService) Transact(ctx context.Context, from string, to string, amount *big.Int, balanceCheck bool) error { + return nil +} diff --git a/core/chains/config.go b/core/chains/config.go new file mode 100644 index 00000000..3556c33a --- /dev/null +++ b/core/chains/config.go @@ -0,0 +1,16 @@ +package chains + +import ( + "errors" +) + +var ( + // ErrChainIDEmpty is returned when chain is required but was empty. + ErrChainIDEmpty = errors.New("chain id empty") + ErrNotFound = errors.New("not found") +) + +// ChainOpts holds options for configuring a Chain +type ChainOpts interface { + Validate() error +} diff --git a/core/chains/constraints.go b/core/chains/constraints.go new file mode 100644 index 00000000..b84c8e93 --- /dev/null +++ b/core/chains/constraints.go @@ -0,0 +1,10 @@ +package chains + +// ID types represent unique identifiers within a particular chain type. Using string is recommended. +type ID any + +// Node types should be a struct including these default fields: +// +// ID int32 +// Name string +type Node any diff --git a/core/chains/errors.go b/core/chains/errors.go new file mode 100644 index 00000000..f13317bb --- /dev/null +++ b/core/chains/errors.go @@ -0,0 +1,8 @@ +package chains + +import "errors" + +var ( + ErrLOOPPUnsupported = errors.New("LOOPP not yet supported") + ErrChainDisabled = errors.New("chain is disabled") +) diff --git a/core/chains/evm/abi/selector_parser.go b/core/chains/evm/abi/selector_parser.go new file mode 100644 index 00000000..30e687ba --- /dev/null +++ b/core/chains/evm/abi/selector_parser.go @@ -0,0 +1,249 @@ +// Sourced from https://github.com/ethereum/go-ethereum/blob/fe91d476ba3e29316b6dc99b6efd4a571481d888/accounts/abi/selector_parser.go#L126 +// Modified assembleArgs to retain argument names + +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func isAlpha(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +func isIdentifierSymbol(c byte) bool { + return c == '$' || c == '_' +} + +func parseToken(unescapedSelector string, isIdent bool) (string, string, error) { + if len(unescapedSelector) == 0 { + return "", "", errors.New("empty token") + } + firstChar := unescapedSelector[0] + position := 1 + if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) { + return "", "", fmt.Errorf("invalid token start: %c", firstChar) + } + for position < len(unescapedSelector) { + char := unescapedSelector[position] + if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) { + break + } + position++ + } + return unescapedSelector[:position], unescapedSelector[position:], nil +} + +func parseIdentifier(unescapedSelector string) (string, string, error) { + return parseToken(unescapedSelector, true) +} + +func parseElementaryType(unescapedSelector string) (string, string, error) { + parsedType, rest, err := parseToken(unescapedSelector, false) + if err != nil { + return "", "", fmt.Errorf("failed to parse elementary type: %v", err) + } + // handle arrays + for len(rest) > 0 && rest[0] == '[' { + parsedType = parsedType + string(rest[0]) + rest = rest[1:] + for len(rest) > 0 && isDigit(rest[0]) { + parsedType = parsedType + string(rest[0]) + rest = rest[1:] + } + if len(rest) == 0 || rest[0] != ']' { + return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0]) + } + parsedType = parsedType + string(rest[0]) + rest = rest[1:] + } + return parsedType, rest, nil +} + +func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) { + if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' { + return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0]) + } + parsedType, rest, err := parseType(unescapedSelector[1:]) + if err != nil { + return nil, "", fmt.Errorf("failed to parse type: %v", err) + } + result := []interface{}{parsedType} + for len(rest) > 0 && rest[0] != ')' { + parsedType, rest, err = parseType(rest[1:]) + if err != nil { + return nil, "", fmt.Errorf("failed to parse type: %v", err) + } + result = append(result, parsedType) + } + if len(rest) == 0 || rest[0] != ')' { + return nil, "", fmt.Errorf("expected ')', got '%s'", rest) + } + if len(rest) >= 3 && rest[1] == '[' && rest[2] == ']' { + return append(result, "[]"), rest[3:], nil + } + return result, rest[1:], nil +} + +func parseType(unescapedSelector string) (interface{}, string, error) { + if len(unescapedSelector) == 0 { + return nil, "", errors.New("empty type") + } + if unescapedSelector[0] == '(' { + return parseCompositeType(unescapedSelector) + } + return parseElementaryType(unescapedSelector) +} + +func parseArgs(unescapedSelector string) ([]abi.ArgumentMarshaling, error) { + if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' { + return nil, fmt.Errorf("expected '(', got %c", unescapedSelector[0]) + } + result := []abi.ArgumentMarshaling{} + rest := unescapedSelector[1:] + var parsedType any + var err error + for len(rest) > 0 && rest[0] != ')' { + // parse method name + var name string + name, rest, err = parseIdentifier(rest[:]) + if err != nil { + return nil, fmt.Errorf("failed to parse name: %v", err) + } + + // skip whitespace between name and identifier + for rest[0] == ' ' { + rest = rest[1:] + } + + // parse type + parsedType, rest, err = parseType(rest[:]) + if err != nil { + return nil, fmt.Errorf("failed to parse type: %v", err) + } + + arg, err := assembleArg(name, parsedType) + if err != nil { + return nil, fmt.Errorf("failed to parse type: %v", err) + } + + result = append(result, arg) + + for rest[0] == ' ' || rest[0] == ',' { + rest = rest[1:] + } + } + if len(rest) == 0 || rest[0] != ')' { + return nil, fmt.Errorf("expected ')', got '%s'", rest) + } + if len(rest) > 1 { + return nil, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest) + } + return result, nil +} + +func assembleArg(name string, arg any) (abi.ArgumentMarshaling, error) { + if s, ok := arg.(string); ok { + return abi.ArgumentMarshaling{Name: name, Type: s, InternalType: s, Components: nil, Indexed: false}, nil + } else if components, ok := arg.([]interface{}); ok { + subArgs, err := assembleArgs(components) + if err != nil { + return abi.ArgumentMarshaling{}, fmt.Errorf("failed to assemble components: %v", err) + } + tupleType := "tuple" + if len(subArgs) != 0 && subArgs[len(subArgs)-1].Type == "[]" { + subArgs = subArgs[:len(subArgs)-1] + tupleType = "tuple[]" + } + return abi.ArgumentMarshaling{Name: name, Type: tupleType, InternalType: tupleType, Components: subArgs, Indexed: false}, nil + } + return abi.ArgumentMarshaling{}, fmt.Errorf("failed to assemble args: unexpected type %T", arg) +} + +func assembleArgs(args []interface{}) ([]abi.ArgumentMarshaling, error) { + arguments := make([]abi.ArgumentMarshaling, 0) + for i, arg := range args { + // generate dummy name to avoid unmarshal issues + name := fmt.Sprintf("name%d", i) + arg, err := assembleArg(name, arg) + if err != nil { + return nil, err + } + arguments = append(arguments, arg) + } + return arguments, nil +} + +// ParseSelector converts a method selector into a struct that can be JSON encoded +// and consumed by other functions in this package. +// Note, although uppercase letters are not part of the ABI spec, this function +// still accepts it as the general format is valid. +func ParseSelector(unescapedSelector string) (abi.SelectorMarshaling, error) { + name, rest, err := parseIdentifier(unescapedSelector) + if err != nil { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err) + } + args := []interface{}{} + if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' { + rest = rest[2:] + } else { + args, rest, err = parseCompositeType(rest) + if err != nil { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err) + } + } + if len(rest) > 0 { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest) + } + + // Reassemble the fake ABI and construct the JSON + fakeArgs, err := assembleArgs(args) + if err != nil { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err) + } + + return abi.SelectorMarshaling{Name: name, Type: "function", Inputs: fakeArgs}, nil +} + +// ParseSelector converts a method selector into a struct that can be JSON encoded +// and consumed by other functions in this package. +// Note, although uppercase letters are not part of the ABI spec, this function +// still accepts it as the general format is valid. +func ParseSignature(unescapedSelector string) (abi.SelectorMarshaling, error) { + name, rest, err := parseIdentifier(unescapedSelector) + if err != nil { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err) + } + args := []abi.ArgumentMarshaling{} + if len(rest) < 2 || rest[0] != '(' || rest[1] != ')' { + args, err = parseArgs(rest) + if err != nil { + return abi.SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err) + } + } + + return abi.SelectorMarshaling{Name: name, Type: "function", Inputs: args}, nil +} diff --git a/core/chains/evm/abi/selector_parser_test.go b/core/chains/evm/abi/selector_parser_test.go new file mode 100644 index 00000000..caae3744 --- /dev/null +++ b/core/chains/evm/abi/selector_parser_test.go @@ -0,0 +1,126 @@ +// Sourced from https://github.com/ethereum/go-ethereum/blob/fe91d476ba3e29316b6dc99b6efd4a571481d888/accounts/abi/selector_parser_test.go + +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package abi + +import ( + "fmt" + "log" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +func TestParseSelector(t *testing.T) { + t.Parallel() + mkType := func(types ...interface{}) []abi.ArgumentMarshaling { + var result []abi.ArgumentMarshaling + for i, typeOrComponents := range types { + name := fmt.Sprintf("name%d", i) + if typeName, ok := typeOrComponents.(string); ok { + result = append(result, abi.ArgumentMarshaling{Name: name, Type: typeName, InternalType: typeName, Components: nil, Indexed: false}) + } else if components, ok := typeOrComponents.([]abi.ArgumentMarshaling); ok { + result = append(result, abi.ArgumentMarshaling{Name: name, Type: "tuple", InternalType: "tuple", Components: components, Indexed: false}) + } else if components, ok := typeOrComponents.([][]abi.ArgumentMarshaling); ok { + result = append(result, abi.ArgumentMarshaling{Name: name, Type: "tuple[]", InternalType: "tuple[]", Components: components[0], Indexed: false}) + } else { + log.Fatalf("unexpected type %T", typeOrComponents) + } + } + return result + } + tests := []struct { + input string + name string + args []abi.ArgumentMarshaling + }{ + {"noargs()", "noargs", []abi.ArgumentMarshaling{}}, + {"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")}, + {"other(uint256,address)", "other", mkType("uint256", "address")}, + {"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")}, + {"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")}, + {"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest", + mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))}, + {"arrayNest((uint256,uint256)[],bytes32)", "arrayNest", mkType([][]abi.ArgumentMarshaling{mkType("uint256", "uint256")}, "bytes32")}, + {"multiArrayNest((uint256,uint256)[],(uint256,uint256)[])", "multiArrayNest", + mkType([][]abi.ArgumentMarshaling{mkType("uint256", "uint256")}, [][]abi.ArgumentMarshaling{mkType("uint256", "uint256")})}, + {"singleArrayNestAndArray((uint256,uint256)[],bytes32[])", "singleArrayNestAndArray", + mkType([][]abi.ArgumentMarshaling{mkType("uint256", "uint256")}, "bytes32[]")}, + {"singleArrayNestWithArrayAndArray((uint256[],address[2],uint8[4][][5])[],bytes32[])", "singleArrayNestWithArrayAndArray", + mkType([][]abi.ArgumentMarshaling{mkType("uint256[]", "address[2]", "uint8[4][][5]")}, "bytes32[]")}, + } + for i, tt := range tests { + selector, err := ParseSelector(tt.input) + if err != nil { + t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err) + } + if selector.Name != tt.name { + t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name) + } + + if selector.Type != "function" { + t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function") + } + if !reflect.DeepEqual(selector.Inputs, tt.args) { + t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args) + } + } +} + +func TestParseSignature(t *testing.T) { + t.Parallel() + mkType := func(name string, typeOrComponents interface{}) abi.ArgumentMarshaling { + if typeName, ok := typeOrComponents.(string); ok { + return abi.ArgumentMarshaling{Name: name, Type: typeName, InternalType: typeName, Components: nil, Indexed: false} + } else if components, ok := typeOrComponents.([]abi.ArgumentMarshaling); ok { + return abi.ArgumentMarshaling{Name: name, Type: "tuple", InternalType: "tuple", Components: components, Indexed: false} + } else if components, ok := typeOrComponents.([][]abi.ArgumentMarshaling); ok { + return abi.ArgumentMarshaling{Name: name, Type: "tuple[]", InternalType: "tuple[]", Components: components[0], Indexed: false} + } + log.Fatalf("unexpected type %T", typeOrComponents) + return abi.ArgumentMarshaling{} + } + tests := []struct { + input string + name string + args []abi.ArgumentMarshaling + }{ + {"noargs()", "noargs", []abi.ArgumentMarshaling{}}, + {"simple(a uint256, b uint256, c uint256)", "simple", []abi.ArgumentMarshaling{mkType("a", "uint256"), mkType("b", "uint256"), mkType("c", "uint256")}}, + {"other(foo uint256, bar address)", "other", []abi.ArgumentMarshaling{mkType("foo", "uint256"), mkType("bar", "address")}}, + {"withArray(a uint256[], b address[2], c uint8[4][][5])", "withArray", []abi.ArgumentMarshaling{mkType("a", "uint256[]"), mkType("b", "address[2]"), mkType("c", "uint8[4][][5]")}}, + {"singleNest(d bytes32, e uint8, f (uint256,uint256), g address)", "singleNest", []abi.ArgumentMarshaling{mkType("d", "bytes32"), mkType("e", "uint8"), mkType("f", []abi.ArgumentMarshaling{mkType("name0", "uint256"), mkType("name1", "uint256")}), mkType("g", "address")}}, + } + for i, tt := range tests { + selector, err := ParseSignature(tt.input) + if err != nil { + t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err) + } + if selector.Name != tt.name { + t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name) + } + + if selector.Type != "function" { + t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function") + } + if !reflect.DeepEqual(selector.Inputs, tt.args) { + t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args) + } + } +} diff --git a/core/chains/evm/assets/assets.go b/core/chains/evm/assets/assets.go new file mode 100644 index 00000000..3137e071 --- /dev/null +++ b/core/chains/evm/assets/assets.go @@ -0,0 +1,117 @@ +package assets + +import ( + "database/sql/driver" + "fmt" + "math/big" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/utils/bytes" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + + "github.com/shopspring/decimal" +) + +// Eth contains a field to represent the smallest units of ETH +type Eth big.Int + +// NewEth returns a new struct to represent ETH from it's smallest unit (wei) +func NewEth(w int64) *Eth { + return (*Eth)(big.NewInt(w)) +} + +// NewEthValue returns a new struct to represent ETH from it's smallest unit (wei) +func NewEthValue(w int64) Eth { + eth := NewEth(w) + return *eth +} + +// NewEthValueS returns a new struct to represent ETH from a string value of Eth (not wei) +// the underlying value is still wei +func NewEthValueS(s string) (Eth, error) { + e, err := decimal.NewFromString(s) + if err != nil { + return Eth{}, err + } + w := e.Mul(decimal.RequireFromString("10").Pow(decimal.RequireFromString("18"))) + return *(*Eth)(w.BigInt()), nil +} + +// Cmp delegates to *big.Int.Cmp +func (e *Eth) Cmp(y *Eth) int { + return e.ToInt().Cmp(y.ToInt()) +} + +func (e *Eth) String() string { + if e == nil { + return "" + } + return assets.Format(e.ToInt(), 18) +} + +// SetInt64 delegates to *big.Int.SetInt64 +func (e *Eth) SetInt64(w int64) *Eth { + return (*Eth)(e.ToInt().SetInt64(w)) +} + +// SetString delegates to *big.Int.SetString +func (e *Eth) SetString(s string, base int) (*Eth, bool) { + w, ok := e.ToInt().SetString(s, base) + return (*Eth)(w), ok +} + +// MarshalJSON implements the json.Marshaler interface. +func (e Eth) MarshalJSON() ([]byte, error) { + value, err := e.MarshalText() + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`"%s"`, value)), nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (e *Eth) MarshalText() ([]byte, error) { + return e.ToInt().MarshalText() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (e *Eth) UnmarshalJSON(data []byte) error { + if bytes.HasQuotes(data) { + return e.UnmarshalText(bytes.TrimQuotes(data)) + } + return assets.ErrNoQuotesForCurrency +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (e *Eth) UnmarshalText(text []byte) error { + if _, ok := e.SetString(string(text), 10); !ok { + return fmt.Errorf("assets: cannot unmarshal %q into a *assets.Eth", text) + } + return nil +} + +// IsZero returns true when the value is 0 and false otherwise +func (e *Eth) IsZero() bool { + zero := big.NewInt(0) + return e.ToInt().Cmp(zero) == 0 +} + +// Symbol returns ETH +func (*Eth) Symbol() string { + return "ETH" +} + +// ToInt returns the Eth value as a *big.Int. +func (e *Eth) ToInt() *big.Int { + return (*big.Int)(e) +} + +// Scan reads the database value and returns an instance. +func (e *Eth) Scan(value interface{}) error { + return (*ubig.Big)(e).Scan(value) +} + +// Value returns the Eth value for serialization to database. +func (e Eth) Value() (driver.Value, error) { + return (ubig.Big)(e).Value() +} diff --git a/core/chains/evm/assets/assets_test.go b/core/chains/evm/assets/assets_test.go new file mode 100644 index 00000000..22831a8f --- /dev/null +++ b/core/chains/evm/assets/assets_test.go @@ -0,0 +1,116 @@ +package assets_test + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" +) + +func TestAssets_NewEthAndString(t *testing.T) { + t.Parallel() + + eth := assets.NewEth(0) + + assert.Equal(t, "0.000000000000000000", eth.String()) + + eth.SetInt64(1) + assert.Equal(t, "0.000000000000000001", eth.String()) + + eth.SetString("900000000000000000", 10) + assert.Equal(t, "0.900000000000000000", eth.String()) + + eth.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10) + assert.Equal(t, "115792089237316195423570985008687907853269984665640564039457.584007913129639935", eth.String()) + + eth.SetString("115792089237316195423570985008687907853269984665640564039457584007913129639936", 10) + assert.Equal(t, "115792089237316195423570985008687907853269984665640564039457.584007913129639936", eth.String()) +} + +func TestAssets_Eth_IsZero(t *testing.T) { + t.Parallel() + + zeroEth := assets.NewEth(0) + assert.True(t, zeroEth.IsZero()) + + oneLink := assets.NewEth(1) + assert.False(t, oneLink.IsZero()) +} + +func TestAssets_Eth_MarshalJson(t *testing.T) { + t.Parallel() + + eth := assets.NewEth(1) + + b, err := json.Marshal(eth) + assert.NoError(t, err) + assert.Equal(t, []byte(`"1"`), b) +} + +func TestAssets_Eth_UnmarshalJsonOk(t *testing.T) { + t.Parallel() + + eth := assets.Eth{} + + err := json.Unmarshal([]byte(`"1"`), ð) + assert.NoError(t, err) + assert.Equal(t, "0.000000000000000001", eth.String()) +} + +func TestAssets_Eth_UnmarshalJsonError(t *testing.T) { + t.Parallel() + + eth := assets.Eth{} + + err := json.Unmarshal([]byte(`"x"`), ð) + assert.EqualError(t, err, "assets: cannot unmarshal \"x\" into a *assets.Eth") + + err = json.Unmarshal([]byte(`1`), ð) + assert.Equal(t, commonassets.ErrNoQuotesForCurrency, err) +} + +func TestAssets_NewEth(t *testing.T) { + t.Parallel() + + ethRef := assets.NewEth(123) + ethVal := assets.NewEthValue(123) + ethStr, err := assets.NewEthValueS(ethRef.String()) + assert.NoError(t, err) + assert.Equal(t, *ethRef, ethVal) + assert.Equal(t, *ethRef, ethStr) +} + +func TestAssets_EthSymbol(t *testing.T) { + t.Parallel() + + eth := assets.NewEth(123) + assert.Equal(t, "ETH", eth.Symbol()) +} + +func TestAssets_EthScanValue(t *testing.T) { + t.Parallel() + + eth := assets.NewEth(123) + v, err := eth.Value() + assert.NoError(t, err) + + eth2 := assets.NewEth(0) + err = eth2.Scan(v) + assert.NoError(t, err) + + assert.Equal(t, eth, eth2) +} + +func TestAssets_EthCmpEth(t *testing.T) { + t.Parallel() + + eth1 := assets.NewEth(123) + eth2 := assets.NewEth(321) + assert.NotZero(t, eth1.Cmp(eth2)) + + eth3 := assets.NewEth(321) + assert.Zero(t, eth3.Cmp(eth2)) +} diff --git a/core/chains/evm/assets/units.go b/core/chains/evm/assets/units.go new file mode 100644 index 00000000..ddf23bf0 --- /dev/null +++ b/core/chains/evm/assets/units.go @@ -0,0 +1,28 @@ +package assets + +import ( + "math/big" + + "golang.org/x/exp/constraints" + + "github.com/ethereum/go-ethereum/params" +) + +func GWei[T constraints.Signed](n T) *Wei { + w := big.NewInt(int64(n)) + w.Mul(w, big.NewInt(params.GWei)) + return NewWei(w) +} + +// UEther converts units of micro-ether (terawei) into wei +func UEther[T constraints.Signed](n T) *Wei { + w := big.NewInt(int64(n)) + w.Mul(w, big.NewInt(params.GWei*1000)) + return NewWei(w) +} + +func Ether[T constraints.Signed](n T) *Wei { + w := big.NewInt(int64(n)) + w.Mul(w, big.NewInt(params.Ether)) + return NewWei(w) +} diff --git a/core/chains/evm/assets/units_test.go b/core/chains/evm/assets/units_test.go new file mode 100644 index 00000000..8fdf14e1 --- /dev/null +++ b/core/chains/evm/assets/units_test.go @@ -0,0 +1,41 @@ +package assets_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" +) + +func TestAssets_Units(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + fn func(int64) *assets.Wei + factor *big.Int + }{ + {name: "Wei", fn: assets.NewWeiI[int64], factor: big.NewInt(params.Wei)}, + {name: "GWei", fn: assets.GWei[int64], factor: big.NewInt(params.GWei)}, + {name: "UEther", fn: assets.UEther[int64], factor: big.NewInt(params.GWei * 1000)}, + {name: "Ether", fn: assets.Ether[int64], factor: big.NewInt(params.Ether)}, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + expected := assets.NewWeiI(0) + assert.Equal(t, expected, test.fn(0)) + + expected = assets.NewWeiI(100) + expected = expected.Mul(test.factor) + assert.Equal(t, expected, test.fn(100)) + }) + } +} diff --git a/core/chains/evm/assets/wei.go b/core/chains/evm/assets/wei.go new file mode 100644 index 00000000..8855c743 --- /dev/null +++ b/core/chains/evm/assets/wei.go @@ -0,0 +1,280 @@ +package assets + +import ( + "database/sql/driver" + "fmt" + "math/big" + "strings" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "golang.org/x/exp/constraints" + + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +const ( + // canonical unit suffixes + wei = "wei" + kwei = "kwei" + mwei = "mwei" + gwei = "gwei" + micro = "micro" + milli = "milli" + eth = "ether" + keth = "kether" + meth = "mether" + geth = "gether" + teth = "tether" +) + +func suffixExp(suf string) int32 { + switch suf { + default: + panic("unrecognized suffix: " + suf) + case wei: + return 0 + case kwei: + return 3 + case mwei: + return 6 + case gwei: + return 9 + case micro: + return 12 + case milli: + return 15 + case eth: + return 18 + case keth: + return 21 + case meth: + return 24 + case geth: + return 27 + case teth: + return 30 + } +} + +// Wei extends ubig.Big to implement encoding.TextMarshaler and +// encoding.TextUnmarshaler with support for unit suffixes, as well as +// additional functions +type Wei ubig.Big + +func MaxWei(w, x *Wei) *Wei { + return NewWei(bigmath.Max(w.ToInt(), x.ToInt())) +} + +// NewWei constructs a Wei from *big.Int. +func NewWei(i *big.Int) *Wei { + return (*Wei)(i) +} + +func NewWeiI[T constraints.Signed](i T) *Wei { + return NewWei(big.NewInt(int64(i))) +} + +// Returns input big.Int in Wei string format. +func FormatWei(i *big.Int) string { + return NewWei(i).String() +} + +func (w *Wei) Text(suffix string) string { + switch suffix { + default: // empty or unknown + fallthrough + case wei: + return w.text(wei, 0) + case kwei: + return w.text(kwei, 3) + case mwei: + return w.text(mwei, 6) + case gwei: + return w.text(gwei, 9) + case micro: + return w.text(micro, 12) + case milli: + return w.text(milli, 15) + case eth: + return w.text(eth, 18) + case keth: + return w.text(keth, 21) + case meth: + return w.text(meth, 24) + case geth: + return w.text(geth, 27) + case teth: + return w.text(teth, 30) + } +} + +// text formats w with the given suffix and exponent. As a special case, the suffix is omitted for `0`. +func (w *Wei) text(suf string, exp int32) string { + d := decimal.NewFromBigInt((*big.Int)(w), -exp) + if d.IsZero() { + return "0" + } + return fmt.Sprintf("%s %s", d, suf) + +} + +const u64Eth = 1_000_000_000_000_000_000 + +var ( + bigKeth = new(big.Int).Mul(big.NewInt(u64Eth), big.NewInt(1_000)) + bigMeth = new(big.Int).Mul(big.NewInt(u64Eth), big.NewInt(1_000_000)) + bigGeth = new(big.Int).Mul(big.NewInt(u64Eth), big.NewInt(1_000_000_000)) + bigTeth = new(big.Int).Mul(big.NewInt(u64Eth), big.NewInt(1_000_000_000_000)) +) + +func (w *Wei) MarshalText() ([]byte, error) { + return []byte(w.String()), nil +} + +func (w *Wei) String() string { + b := (*big.Int)(w) + if b.IsUint64() { + // <= math.MaxUint64 = 9.223_372_036_854_775_808 eth + u := b.Uint64() + switch { + case u >= u64Eth: + return w.Text(eth) + case u >= 1_000_000_000_000_000: + return w.Text(milli) + case u >= 1_000_000_000_000: + return w.Text(micro) + case u >= 1_000_000_000: + return w.Text(gwei) + case u >= 1_000_000: + return w.Text(mwei) + case u >= 1_000: + return w.Text(kwei) + default: + return w.Text(wei) + } + } + // > math.MaxUint64 = 9.223_372_036_854_775_808 eth + if b.Cmp(bigTeth) >= 0 { + return w.Text(teth) + } + if b.Cmp(bigGeth) >= 0 { + return w.Text(geth) + } + if b.Cmp(bigMeth) >= 0 { + return w.Text(meth) + } + if b.Cmp(bigKeth) >= 0 { + return w.Text(keth) + } + return w.Text(eth) +} + +func (w *Wei) UnmarshalText(b []byte) error { + s := string(b) + for _, suf := range []string{ + teth, geth, meth, keth, eth, + milli, micro, + gwei, mwei, kwei, wei, + } { + if !strings.HasSuffix(s, suf) { + continue + } + t := strings.TrimSuffix(s, suf) + t = strings.TrimSuffix(t, " ") + d, err := decimal.NewFromString(t) + if err != nil { + return errors.Wrapf(err, "unable to parse %q", s) + } + se := suffixExp(suf) + if d.IsInteger() { + m := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(se)), nil) + *w = (Wei)(*new(big.Int).Mul(d.BigInt(), m)) + return nil + } + + d = d.Mul(decimal.New(1, se)) + if !d.IsInteger() { + err := errors.New("maximum precision is wei") + return errors.Wrapf(err, "unable to parse %q", s) + } + *w = (Wei)(*d.BigInt()) + return nil + + } + // unrecognized or missing suffix + d, err := decimal.NewFromString(s) + if err != nil { + return errors.Wrapf(err, "unable to parse %q", s) + } + if d.IsInteger() { + *w = (Wei)(*d.BigInt()) + return nil + } + return errors.Errorf("unable to parse %q", s) +} + +func (w *Wei) ToInt() *big.Int { + return (*big.Int)(w) +} + +func (w *Wei) Int64() int64 { + return w.ToInt().Int64() +} + +func (w *Wei) Cmp(y *Wei) int { + return w.ToInt().Cmp(y.ToInt()) +} + +func (w *Wei) IsNegative() bool { + return w.Cmp(NewWeiI(0)) < 0 +} + +func (w *Wei) IsZero() bool { + return w.Cmp(NewWeiI(0)) == 0 +} + +func (w *Wei) Equal(y *Wei) bool { + return w.Cmp(y) == 0 +} + +func WeiMax(x, y *Wei) *Wei { + return NewWei(bigmath.Max(x.ToInt(), y.ToInt())) +} + +func WeiMin(x, y *Wei) *Wei { + return NewWei(bigmath.Min(x.ToInt(), y.ToInt())) +} + +// NOTE: Maths functions always return newly allocated number and do not mutate + +func (w *Wei) Sub(y *Wei) *Wei { + result := big.NewInt(0).Sub(w.ToInt(), y.ToInt()) + return NewWei(result) +} + +func (w *Wei) Add(y *Wei) *Wei { + return NewWei(big.NewInt(0).Add(w.ToInt(), y.ToInt())) +} + +func (w *Wei) Mul(y *big.Int) *Wei { + return NewWei(big.NewInt(0).Mul(w.ToInt(), y)) +} + +func (w *Wei) AddPercentage(percentage uint16) *Wei { + bumped := new(big.Int) + bumped.Mul(w.ToInt(), big.NewInt(int64(100+percentage))) + bumped.Div(bumped, big.NewInt(100)) + return NewWei(bumped) +} + +// Scan reads the database value and returns an instance. +func (w *Wei) Scan(value interface{}) error { + return (*ubig.Big)(w).Scan(value) +} + +// Value returns this instance serialized for database storage. +func (w Wei) Value() (driver.Value, error) { + return (ubig.Big)(w).Value() +} diff --git a/core/chains/evm/assets/wei_test.go b/core/chains/evm/assets/wei_test.go new file mode 100644 index 00000000..e1181a7a --- /dev/null +++ b/core/chains/evm/assets/wei_test.go @@ -0,0 +1,129 @@ +package assets + +import ( + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWei(t *testing.T) { + for _, tt := range []struct { + input string + exp string + }{ + {"0", "0"}, + {"0 wei", "0"}, + {"0 ether", "0"}, + {"1", "1 wei"}, + {"1000", "1 kwei"}, + {"1100", "1.1 kwei"}, + {"1.1 kwei", "1.1 kwei"}, + {"1.1000 kwei", "1.1 kwei"}, + {"10. kwei", "10 kwei"}, + {"10.0 kwei", "10 kwei"}, + {"10.1 kwei", "10.1 kwei"}, + {"999.9 kwei", "999.9 kwei"}, + {"1000000", "1 mwei"}, + {"1000000000", "1 gwei"}, + {"1000000000000", "1 micro"}, + {"200000000000000", "200 micro"}, + {"200 micro", "200 micro"}, + {"0.2 milli", "200 micro"}, + {"281 micro", "281 micro"}, + {"281.474976710655 micro", "281.474976710655 micro"}, + {"0.281474976710655 milli", "281.474976710655 micro"}, + {"999.9 micro", "999.9 micro"}, + {"1000000000000000", "1 milli"}, + {"1000000000000000000", "1 ether"}, + {"1000000000000000000000", "1 kether"}, + {"1000000000000000000000000", "1 mether"}, + {"1000000000000000000000000000", "1 gether"}, + {"1000000000000000000000000000000", "1 tether"}, + {"1100000000000000000000000000000", "1.1 tether"}, + } { + t.Run(tt.input, func(t *testing.T) { + var w Wei + err := w.UnmarshalText([]byte(tt.input)) + require.NoError(t, err) + b, err := w.MarshalText() + require.NoError(t, err) + assert.Equal(t, tt.exp, string(b)) + assert.Equal(t, tt.exp, w.String()) + }) + } +} + +func FuzzWei(f *testing.F) { + f.Add("1") + f.Add("2.3 gwei") + f.Add("00005 wei") + f.Add("1100000000000000000000000000000") + f.Add("1 wei") + f.Add("2.3 kwei") + f.Add("0.0005gwei") + f.Add("1100000000000000000000000000000 wei") + f.Add("9.7 tether") + f.Add("0.567gether") + f.Add("5.753 mether") + f.Add("42 kether") + f.Add("1 ether") + f.Add("10.4 milli") + f.Add("5 micro") + f.Fuzz(func(t *testing.T, v string) { + if len(v) > 1_000 { + t.Skipf("too many characters: %d", len(v)) + } + if e := tryParseExp(v); -1000 > e || e > 1000 { + t.Skipf("exponent too large: %d", e) + } + var w Wei + err := w.UnmarshalText([]byte(v)) + if err != nil { + t.Skip() + } + + b, err := w.MarshalText() + require.NoErrorf(t, err, "failed to marshal %v after unmarshaling from %q", w, v) + + var w2 Wei + err = w2.UnmarshalText(b) + require.NoErrorf(t, err, "failed to unmarshal %s after marshaling from %v", string(b), w) + require.Equal(t, w, w2, "unequal values after marshal/unmarshal") + }) +} + +func tryParseExp(v string) int64 { + i := strings.IndexAny(v, "Ee") + if i == -1 { + return -1 + } + v = v[i+1:] + if i := strings.IndexFunc(v, func(r rune) bool { + switch { + case r == '-' || r == '+': + return false + case r < '0' || '9' < r: + return true + } + return false + }); i > -1 { + v = v[:i] + } + e, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return -1 + } + return e +} + +func Test_tryParseExp(t *testing.T) { + got := tryParseExp("000000000E0000000060000000wei") + assert.Equal(t, int64(60000000), got) + got = tryParseExp("0e-80000800") + assert.Equal(t, int64(-80000800), got) + got = tryParseExp("0e+802444440") + assert.Equal(t, int64(802444440), got) +} diff --git a/core/chains/evm/client/README.md b/core/chains/evm/client/README.md new file mode 100644 index 00000000..b9c7a4f4 --- /dev/null +++ b/core/chains/evm/client/README.md @@ -0,0 +1,34 @@ +# EVM Client + +## Node FSM + +```mermaid +stateDiagram-v2 + [*] --> Started : Start() + + state Started { + [*] --> Undialed + Undialed --> Unusable + Undialed --> Unreachable + Undialed --> Dialed + + Unreachable --> Dialed + + Dialed --> Unreachable + Dialed --> InvalidChainID + Dialed --> Alive + + InvalidChainID --> Unreachable + InvalidChainID --> Alive + + Alive --> Unreachable + Alive --> OutOfSync + + OutOfSync --> Unreachable + OutOfSync --> InvalidChainID + OutOfSync --> Alive + } + + Started --> Closed : Close() + Closed --> [*] +``` diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go new file mode 100644 index 00000000..be5900b3 --- /dev/null +++ b/core/chains/evm/client/chain_client.go @@ -0,0 +1,283 @@ +package client + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var _ Client = (*chainClient)(nil) + +// TODO-1663: rename this to client, once the client.go file is deprecated. +type chainClient struct { + multiNode commonclient.MultiNode[ + *big.Int, + evmtypes.Nonce, + common.Address, + common.Hash, + *types.Transaction, + common.Hash, + types.Log, + ethereum.FilterQuery, + *evmtypes.Receipt, + *assets.Wei, + *evmtypes.Head, + RPCCLient, + ] + logger logger.SugaredLogger +} + +func NewChainClient( + lggr logger.Logger, + selectionMode string, + leaseDuration time.Duration, + noNewHeadsThreshold time.Duration, + nodes []commonclient.Node[*big.Int, *evmtypes.Head, RPCCLient], + sendonlys []commonclient.SendOnlyNode[*big.Int, RPCCLient], + chainID *big.Int, + chainType config.ChainType, +) Client { + multiNode := commonclient.NewMultiNode[ + *big.Int, + evmtypes.Nonce, + common.Address, + common.Hash, + *types.Transaction, + common.Hash, + types.Log, + ethereum.FilterQuery, + *evmtypes.Receipt, + *assets.Wei, + *evmtypes.Head, + RPCCLient, + ]( + lggr, + selectionMode, + leaseDuration, + noNewHeadsThreshold, + nodes, + sendonlys, + chainID, + chainType, + "EVM", + func(tx *types.Transaction, err error) commonclient.SendTxReturnCode { + return ClassifySendError(err, logger.Sugared(logger.Nop()), tx, common.Address{}, chainType.IsL2()) + }, + 0, // use the default value provided by the implementation + ) + return &chainClient{ + multiNode: multiNode, + logger: logger.Sugared(lggr), + } +} + +func (c *chainClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + return c.multiNode.BalanceAt(ctx, account, blockNumber) +} + +func (c *chainClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + batch := make([]any, len(b)) + for i, arg := range b { + batch[i] = any(arg) + } + return c.multiNode.BatchCallContext(ctx, batch) +} + +func (c *chainClient) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + batch := make([]any, len(b)) + for i, arg := range b { + batch[i] = any(arg) + } + return c.multiNode.BatchCallContextAll(ctx, batch) +} + +// TODO-1663: return custom Block type instead of geth's once client.go is deprecated. +func (c *chainClient) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Block, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return b, err + } + return rpc.BlockByHashGeth(ctx, hash) +} + +// TODO-1663: return custom Block type instead of geth's once client.go is deprecated. +func (c *chainClient) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Block, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return b, err + } + return rpc.BlockByNumberGeth(ctx, number) +} + +func (c *chainClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + return c.multiNode.CallContext(ctx, result, method, args...) +} + +func (c *chainClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return c.multiNode.CallContract(ctx, msg, blockNumber) +} + +func (c *chainClient) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + return c.multiNode.PendingCallContract(ctx, msg) +} + +// TODO-1663: change this to actual ChainID() call once client.go is deprecated. +func (c *chainClient) ChainID() (*big.Int, error) { + //return c.multiNode.ChainID(ctx), nil + return c.multiNode.ConfiguredChainID(), nil +} + +func (c *chainClient) Close() { + c.multiNode.Close() +} + +func (c *chainClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + return c.multiNode.CodeAt(ctx, account, blockNumber) +} + +func (c *chainClient) ConfiguredChainID() *big.Int { + return c.multiNode.ConfiguredChainID() +} + +func (c *chainClient) Dial(ctx context.Context) error { + return c.multiNode.Dial(ctx) +} + +func (c *chainClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + return c.multiNode.EstimateGas(ctx, call) +} +func (c *chainClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return c.multiNode.FilterEvents(ctx, q) +} + +func (c *chainClient) HeaderByHash(ctx context.Context, h common.Hash) (head *types.Header, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return head, err + } + return rpc.HeaderByHash(ctx, h) +} + +func (c *chainClient) HeaderByNumber(ctx context.Context, n *big.Int) (head *types.Header, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return head, err + } + return rpc.HeaderByNumber(ctx, n) +} + +func (c *chainClient) HeadByHash(ctx context.Context, h common.Hash) (*evmtypes.Head, error) { + return c.multiNode.BlockByHash(ctx, h) +} + +func (c *chainClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + return c.multiNode.BlockByNumber(ctx, n) +} + +func (c *chainClient) IsL2() bool { + return c.multiNode.IsL2() +} + +func (c *chainClient) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*commonassets.Link, error) { + return c.multiNode.PLIBalance(ctx, address, linkAddress) +} + +func (c *chainClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + return c.multiNode.LatestBlockHeight(ctx) +} + +func (c *chainClient) NodeStates() map[string]string { + return c.multiNode.NodeStates() +} + +func (c *chainClient) PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return b, err + } + return rpc.PendingCodeAt(ctx, account) +} + +// TODO-1663: change this to evmtypes.Nonce(int64) once client.go is deprecated. +func (c *chainClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + n, err := c.multiNode.PendingSequenceAt(ctx, account) + return uint64(n), err +} + +func (c *chainClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { + return c.multiNode.SendTransaction(ctx, tx) +} + +func (c *chainClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) { + err := c.SendTransaction(ctx, tx) + returnCode := ClassifySendError(err, c.logger, tx, fromAddress, c.IsL2()) + return returnCode, err +} + +func (c *chainClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { + return c.multiNode.SequenceAt(ctx, account, blockNumber) +} + +func (c *chainClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (s ethereum.Subscription, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return s, err + } + return rpc.SubscribeFilterLogs(ctx, q, ch) +} + +func (c *chainClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { + csf := newChainIDSubForwarder(c.ConfiguredChainID(), ch) + err := csf.start(c.multiNode.Subscribe(ctx, csf.srcCh, "newHeads")) + if err != nil { + return nil, err + } + return csf, nil +} + +func (c *chainClient) SuggestGasPrice(ctx context.Context) (p *big.Int, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return p, err + } + return rpc.SuggestGasPrice(ctx) +} + +func (c *chainClient) SuggestGasTipCap(ctx context.Context) (t *big.Int, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return t, err + } + return rpc.SuggestGasTipCap(ctx) +} + +func (c *chainClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { + return c.multiNode.TokenBalance(ctx, address, contractAddress) +} + +func (c *chainClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + return c.multiNode.TransactionByHash(ctx, txHash) +} + +// TODO-1663: return custom Receipt type instead of geth's once client.go is deprecated. +func (c *chainClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error) { + rpc, err := c.multiNode.SelectNodeRPC() + if err != nil { + return r, err + } + //return rpc.TransactionReceipt(ctx, txHash) + return rpc.TransactionReceiptGeth(ctx, txHash) +} diff --git a/core/chains/evm/client/chain_id_sub.go b/core/chains/evm/client/chain_id_sub.go new file mode 100644 index 00000000..bd1a38dd --- /dev/null +++ b/core/chains/evm/client/chain_id_sub.go @@ -0,0 +1,95 @@ +package client + +import ( + "math/big" + + "github.com/ethereum/go-ethereum" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +var _ ethereum.Subscription = &chainIDSubForwarder{} + +// chainIDSubForwarder wraps a head subscription in order to intercept and augment each head with chainID before forwarding. +type chainIDSubForwarder struct { + chainID *big.Int + destCh chan<- *evmtypes.Head + + srcCh chan *evmtypes.Head + srcSub ethereum.Subscription + + done chan struct{} + err chan error + unSub chan struct{} +} + +func newChainIDSubForwarder(chainID *big.Int, ch chan<- *evmtypes.Head) *chainIDSubForwarder { + return &chainIDSubForwarder{ + chainID: chainID, + destCh: ch, + srcCh: make(chan *evmtypes.Head), + done: make(chan struct{}), + err: make(chan error), + unSub: make(chan struct{}, 1), + } +} + +// start spawns the forwarding loop for sub. +func (c *chainIDSubForwarder) start(sub ethereum.Subscription, err error) error { + if err != nil { + close(c.srcCh) + return err + } + c.srcSub = sub + go c.forwardLoop() + return nil +} + +// forwardLoop receives from src, adds the chainID, and then sends to dest. +// It also handles Unsubscribing, which may interrupt either forwarding operation. +func (c *chainIDSubForwarder) forwardLoop() { + // the error channel must be closed when unsubscribing + defer close(c.err) + defer close(c.done) + + for { + select { + case err := <-c.srcSub.Err(): + select { + case c.err <- err: + case <-c.unSub: + c.srcSub.Unsubscribe() + } + return + + case h := <-c.srcCh: + h.EVMChainID = ubig.New(c.chainID) + select { + case c.destCh <- h: + case <-c.unSub: + c.srcSub.Unsubscribe() + return + } + + case <-c.unSub: + c.srcSub.Unsubscribe() + return + } + } +} + +func (c *chainIDSubForwarder) Unsubscribe() { + // tell forwardLoop to unsubscribe + select { + case c.unSub <- struct{}{}: + default: + // already triggered + } + // wait for forwardLoop to complete + <-c.done +} + +func (c *chainIDSubForwarder) Err() <-chan error { + return c.err +} diff --git a/core/chains/evm/client/chain_id_sub_test.go b/core/chains/evm/client/chain_id_sub_test.go new file mode 100644 index 00000000..61ecf9f5 --- /dev/null +++ b/core/chains/evm/client/chain_id_sub_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +type mockSubscription struct { + unsubscribed bool + Errors chan error +} + +func newMockSubscription() *mockSubscription { + return &mockSubscription{Errors: make(chan error)} +} + +func (mes *mockSubscription) Err() <-chan error { return mes.Errors } + +func (mes *mockSubscription) Unsubscribe() { + mes.unsubscribed = true + close(mes.Errors) +} + +func TestChainIDSubForwarder(t *testing.T) { + t.Parallel() + + chainID := big.NewInt(123) + + t.Run("unsubscribe forwarder", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := newMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("unsubscribe forwarder with error", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := newMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + sub.Errors <- errors.New("boo") + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("unsubscribe forwarder with message", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := newMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + forwarder.srcCh <- &evmtypes.Head{} + forwarder.Unsubscribe() + + assert.True(t, sub.unsubscribed) + _, ok := <-sub.Err() + assert.False(t, ok) + _, ok = <-forwarder.Err() + assert.False(t, ok) + }) + + t.Run("non nil error parameter", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := newMockSubscription() + errIn := errors.New("foo") + errOut := forwarder.start(sub, errIn) + assert.Equal(t, errIn, errOut) + }) + + t.Run("forwarding", func(t *testing.T) { + t.Parallel() + + ch := make(chan *evmtypes.Head) + forwarder := newChainIDSubForwarder(chainID, ch) + sub := newMockSubscription() + err := forwarder.start(sub, nil) + assert.NoError(t, err) + + head := &evmtypes.Head{ + ID: 1, + } + forwarder.srcCh <- head + receivedHead := <-ch + assert.Equal(t, head, receivedHead) + assert.Equal(t, ubig.New(chainID), receivedHead.EVMChainID) + + expectedErr := errors.New("error") + sub.Errors <- expectedErr + receivedErr := <-forwarder.Err() + assert.Equal(t, expectedErr, receivedErr) + }) +} diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go new file mode 100644 index 00000000..27dc983e --- /dev/null +++ b/core/chains/evm/client/client.go @@ -0,0 +1,368 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/common/config" + htrktypes "github.com/goplugin/pluginv3.0/v2/common/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" +) + +const queryTimeout = 10 * time.Second +const BALANCE_OF_ADDRESS_FUNCTION_SELECTOR = "0x70a08231" + +//go:generate mockery --quiet --name Client --output ./mocks/ --case=underscore + +// Client is the interface used to interact with an ethereum node. +type Client interface { + Dial(ctx context.Context) error + Close() + // ChainID locally stored for quick access + ConfiguredChainID() *big.Int + // ChainID RPC call + ChainID() (*big.Int, error) + + // NodeStates returns a map of node Name->node state + // It might be nil or empty, e.g. for mock clients etc + NodeStates() map[string]string + + TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) + BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) + PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) + + // Wrapped RPC methods + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + // BatchCallContextAll calls BatchCallContext for every single node including + // sendonlys. + // CAUTION: This should only be used for mass re-transmitting transactions, it + // might have unexpected effects to use it for anything else. + BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error + + // HeadByNumber and HeadByHash is a reimplemented version due to a + // difference in how block header hashes are calculated by Parity nodes + // running on Kovan, Avalanche and potentially others. We have to return our own wrapper type to capture the + // correct hash from the RPC response. + HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) + HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error) + SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) + + SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) + + // Wrapped Geth client methods + // blockNumber can be specified as `nil` to imply latest block + // if blocks, transactions, or receipts are not found - a nil result and an error are returned + // these methods may not be compatible with non Ethereum chains as return types may follow different formats + // suggested options: use HeadByNumber/HeadByHash (above) or CallContext and parse with custom types + SendTransaction(ctx context.Context, tx *types.Transaction) error + CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) + PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) + SuggestGasPrice(ctx context.Context) (*big.Int, error) + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + LatestBlockHeight(ctx context.Context) (*big.Int, error) + + HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) + HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) + + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) + PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + + IsL2() bool +} + +func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc) { + return context.WithTimeout(context.Background(), queryTimeout) +} + +// client represents an abstract client that manages connections to +// multiple nodes for a single chain id +type client struct { + logger logger.SugaredLogger + pool *Pool +} + +var _ Client = (*client)(nil) +var _ htrktypes.Client[*evmtypes.Head, ethereum.Subscription, *big.Int, common.Hash] = (*client)(nil) + +// NewClientWithNodes instantiates a client from a list of nodes +// Currently only supports one primary +// +// Deprecated: use [NewChainClient] +func NewClientWithNodes(lggr logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, primaryNodes []Node, sendOnlyNodes []SendOnlyNode, chainID *big.Int, chainType config.ChainType) (*client, error) { + pool := NewPool(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaryNodes, sendOnlyNodes, chainID, chainType) + return &client{ + logger: logger.Sugared(lggr), + pool: pool, + }, nil +} + +// Dial opens websocket connections if necessary and sanity-checks that the +// node's remote chain ID matches the local one +func (client *client) Dial(ctx context.Context) error { + if err := client.pool.Dial(ctx); err != nil { + return errors.Wrap(err, "failed to dial pool") + } + return nil +} + +func (client *client) Close() { + client.pool.Close() +} + +func (client *client) NodeStates() (states map[string]string) { + states = make(map[string]string) + for _, n := range client.pool.nodes { + states[n.Name()] = n.State().String() + } + for _, s := range client.pool.sendonlys { + states[s.Name()] = s.State().String() + } + return +} + +// CallArgs represents the data used to call the balance method of a contract. +// "To" is the address of the ERC contract. "Data" is the message sent +// to the contract. "From" is the sender address. +type CallArgs struct { + From common.Address `json:"from"` + To common.Address `json:"to"` + Data hexutil.Bytes `json:"data"` +} + +// TokenBalance returns the balance of the given address for the token contract address. +func (client *client) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { + result := "" + numLinkBigInt := new(big.Int) + functionSelector := evmtypes.HexToFunctionSelector(BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address) + data := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(address.Bytes(), utils.EVMWordByteLen)) + args := CallArgs{ + To: contractAddress, + Data: data, + } + err := client.CallContext(ctx, &result, "eth_call", args, "latest") + if err != nil { + return numLinkBigInt, err + } + if _, ok := numLinkBigInt.SetString(result, 0); !ok { + return nil, fmt.Errorf("failed to parse int: %s", result) + } + return numLinkBigInt, nil +} + +// PLIBalance returns the balance of PLI at the given address +func (client *client) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) { + balance, err := client.TokenBalance(ctx, address, linkAddress) + if err != nil { + return assets.NewLinkFromJuels(0), err + } + return (*assets.Link)(balance), nil +} + +func (client *client) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + return client.pool.BalanceAt(ctx, account, blockNumber) +} + +// We wrap the GethClient's `TransactionReceipt` method so that we can ignore the error that arises +// when we're talking to a Parity node that has no receipt yet. +func (client *client) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + receipt, err = client.pool.TransactionReceipt(ctx, txHash) + + if err != nil && strings.Contains(err.Error(), "missing required field") { + return nil, ethereum.NotFound + } + return +} + +func (client *client) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { + return client.pool.TransactionByHash(ctx, txHash) +} + +func (client *client) ConfiguredChainID() *big.Int { + return client.pool.chainID +} + +func (client *client) ChainID() (*big.Int, error) { + return client.pool.ChainID(), nil +} + +func (client *client) HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) { + return client.pool.HeaderByNumber(ctx, n) +} + +func (client *client) HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) { + return client.pool.HeaderByHash(ctx, h) +} + +func (client *client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) { + err := client.SendTransaction(ctx, tx) + returnCode := ClassifySendError(err, client.logger, tx, fromAddress, client.pool.ChainType().IsL2()) + return returnCode, err +} + +// SendTransaction also uses the sendonly HTTP RPC URLs if set +func (client *client) SendTransaction(ctx context.Context, tx *types.Transaction) error { + return client.pool.SendTransaction(ctx, tx) +} + +func (client *client) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return client.pool.PendingNonceAt(ctx, account) +} + +func (client *client) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { + nonce, err := client.pool.NonceAt(ctx, account, blockNumber) + return evmtypes.Nonce(nonce), err +} + +func (client *client) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return client.pool.PendingCodeAt(ctx, account) +} + +func (client *client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { + return client.pool.EstimateGas(ctx, call) +} + +// SuggestGasPrice calls the RPC node to get a suggested gas price. +// WARNING: It is not recommended to ever use this result for anything +// important. There are a number of issues with asking the RPC node to provide a +// gas estimate; it is not reliable. Unless you really have a good reason to +// use this, you should probably use core node's internal gas estimator +// instead. +func (client *client) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + return client.pool.SuggestGasPrice(ctx) +} + +func (client *client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return client.pool.CallContract(ctx, msg, blockNumber) +} + +func (client *client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + return client.pool.PendingCallContract(ctx, msg) +} + +func (client *client) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + return client.pool.CodeAt(ctx, account, blockNumber) +} + +func (client *client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + return client.pool.BlockByNumber(ctx, number) +} + +func (client *client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return client.pool.BlockByHash(ctx, hash) +} + +func (client *client) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + var height big.Int + h, err := client.pool.BlockNumber(ctx) + return height.SetUint64(h), err +} + +func (client *client) HeadByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) { + hex := ToBlockNumArg(number) + err = client.pool.CallContext(ctx, &head, "eth_getBlockByNumber", hex, false) + if err != nil { + return nil, err + } + if head == nil { + err = ethereum.NotFound + return + } + head.EVMChainID = ubig.New(client.ConfiguredChainID()) + return +} + +func (client *client) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { + err = client.pool.CallContext(ctx, &head, "eth_getBlockByHash", hash.Hex(), false) + if err != nil { + return nil, err + } + if head == nil { + err = ethereum.NotFound + return + } + head.EVMChainID = ubig.New(client.ConfiguredChainID()) + return +} + +func ToBlockNumArg(number *big.Int) string { + if number == nil { + return "latest" + } + return hexutil.EncodeBig(number) +} + +func (client *client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return client.pool.FilterLogs(ctx, q) +} + +func (client *client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + client.logger.Debugw("evmclient.Client#SubscribeFilterLogs(...)", + "q", q, + ) + return client.pool.SubscribeFilterLogs(ctx, q, ch) +} + +func (client *client) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { + csf := newChainIDSubForwarder(client.ConfiguredChainID(), ch) + err := csf.start(client.pool.EthSubscribe(ctx, csf.srcCh, "newHeads")) + if err != nil { + return nil, err + } + return csf, nil +} + +func (client *client) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { + return client.pool.EthSubscribe(ctx, channel, args...) +} + +func (client *client) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + return client.pool.CallContext(ctx, result, method, args...) +} + +func (client *client) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return client.pool.BatchCallContext(ctx, b) +} + +func (client *client) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + return client.pool.BatchCallContextAll(ctx, b) +} + +// SuggestGasTipCap calls the RPC node to get a suggested gas tip cap. +// WARNING: It is not recommended to ever use this result for anything +// important. There are a number of issues with asking the RPC node to provide a +// gas estimate; it is not reliable. Unless you really have a good reason to +// use this, you should probably use core node's internal gas estimator +// instead. +func (client *client) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { + return client.pool.SuggestGasTipCap(ctx) +} + +func (client *client) IsL2() bool { + return client.pool.ChainType().IsL2() +} diff --git a/core/chains/evm/client/client_test.go b/core/chains/evm/client/client_test.go new file mode 100644 index 00000000..50124533 --- /dev/null +++ b/core/chains/evm/client/client_test.go @@ -0,0 +1,913 @@ +package client_test + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "net/http/httptest" + "net/url" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func mustNewClient(t *testing.T, wsURL string, sendonlys ...url.URL) client.Client { + return mustNewClientWithChainID(t, wsURL, testutils.FixtureChainID, sendonlys...) +} + +func mustNewClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) client.Client { + cfg := client.TestNodePoolConfig{ + NodeSelectionMode: client.NodeSelectionMode_RoundRobin, + } + c, err := client.NewClientWithTestNode(t, cfg, time.Second*0, wsURL, nil, sendonlys, 42, chainID) + require.NoError(t, err) + return c +} + +func mustNewChainClient(t *testing.T, wsURL string, sendonlys ...url.URL) client.Client { + return mustNewChainClientWithChainID(t, wsURL, testutils.FixtureChainID, sendonlys...) +} + +func mustNewChainClientWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) client.Client { + cfg := client.TestNodePoolConfig{ + NodeSelectionMode: client.NodeSelectionMode_RoundRobin, + } + c, err := client.NewChainClientWithTestNode(t, cfg, time.Second*0, cfg.NodeLeaseDuration, wsURL, nil, sendonlys, 42, chainID) + require.NoError(t, err) + return c +} + +func mustNewClients(t *testing.T, wsURL string, sendonlys ...url.URL) []client.Client { + var clients []client.Client + clients = append(clients, mustNewClient(t, wsURL, sendonlys...)) + clients = append(clients, mustNewChainClient(t, wsURL, sendonlys...)) + return clients +} + +func mustNewClientsWithChainID(t *testing.T, wsURL string, chainID *big.Int, sendonlys ...url.URL) []client.Client { + var clients []client.Client + clients = append(clients, mustNewClientWithChainID(t, wsURL, chainID, sendonlys...)) + clients = append(clients, mustNewChainClientWithChainID(t, wsURL, chainID, sendonlys...)) + return clients +} + +func TestEthClient_TransactionReceipt(t *testing.T) { + t.Parallel() + + txHash := "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238" + + mustReadResult := func(t *testing.T, file string) []byte { + response := cltest.MustReadFile(t, file) + var resp struct { + Result json.RawMessage `json:"result"` + } + err := json.Unmarshal(response, &resp) + require.NoError(t, err) + return resp.Result + } + + t.Run("happy path", func(t *testing.T) { + result := mustReadResult(t, "../../../testdata/jsonrpc/getTransactionReceipt.json") + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getTransactionReceipt", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, txHash, params.Array()[0].String()) { + resp.Result = string(result) + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + hash := common.HexToHash(txHash) + receipt, err := ethClient.TransactionReceipt(testutils.Context(t), hash) + require.NoError(t, err) + assert.Equal(t, hash, receipt.TxHash) + assert.Equal(t, big.NewInt(11), receipt.BlockNumber) + } + }) + + t.Run("no tx hash, returns ethereum.NotFound", func(t *testing.T) { + result := mustReadResult(t, "../../../testdata/jsonrpc/getTransactionReceipt_notFound.json") + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getTransactionReceipt", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, txHash, params.Array()[0].String()) { + resp.Result = string(result) + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + hash := common.HexToHash(txHash) + _, err = ethClient.TransactionReceipt(testutils.Context(t), hash) + require.Equal(t, ethereum.NotFound, errors.Cause(err)) + } + }) +} + +func TestEthClient_PendingNonceAt(t *testing.T) { + t.Parallel() + + address := testutils.NewAddress() + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_getTransactionCount", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + if assert.Equal(t, strings.ToLower(address.Hex()), strings.ToLower(arr[0].String())) && + assert.Equal(t, "pending", arr[1].String()) { + resp.Result = `"0x100"` + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + result, err := ethClient.PendingNonceAt(testutils.Context(t), address) + require.NoError(t, err) + + var expected uint64 = 256 + require.Equal(t, result, expected) + } +} + +func TestEthClient_BalanceAt(t *testing.T) { + t.Parallel() + + largeBalance, _ := big.NewInt(0).SetString("100000000000000000000", 10) + address := testutils.NewAddress() + + tests := []struct { + name string + balance *big.Int + }{ + {"basic", big.NewInt(256)}, + {"larger than signed 64 bit integer", largeBalance}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if assert.Equal(t, "eth_getBalance", method) && assert.True(t, params.IsArray()) && + assert.Equal(t, strings.ToLower(address.Hex()), strings.ToLower(params.Array()[0].String())) { + resp.Result = `"` + hexutil.EncodeBig(test.balance) + `"` + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + result, err := ethClient.BalanceAt(testutils.Context(t), address, nil) + require.NoError(t, err) + assert.Equal(t, test.balance, result) + } + }) + } +} + +func TestEthClient_LatestBlockHeight(t *testing.T) { + t.Parallel() + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_blockNumber", method) { + return + } + resp.Result = `"0x100"` + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + result, err := ethClient.LatestBlockHeight(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, big.NewInt(256), result) + } +} + +func TestEthClient_GetERC20Balance(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + expectedBig, _ := big.NewInt(0).SetString("100000000000000000000000000000000000000", 10) + + tests := []struct { + name string + balance *big.Int + }{ + {"small", big.NewInt(256)}, + {"big", expectedBig}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + contractAddress := testutils.NewAddress() + userAddress := testutils.NewAddress() + functionSelector := evmtypes.HexToFunctionSelector(client.BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address) + txData := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(userAddress.Bytes(), utils.EVMWordByteLen)) + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_call", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + callArgs := arr[0] + if assert.True(t, callArgs.IsObject()) && + assert.Equal(t, strings.ToLower(contractAddress.Hex()), callArgs.Get("to").String()) && + assert.Equal(t, hexutil.Encode(txData), callArgs.Get("data").String()) && + assert.Equal(t, "latest", arr[1].String()) { + + resp.Result = `"` + hexutil.EncodeBig(test.balance) + `"` + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + result, err := ethClient.TokenBalance(ctx, userAddress, contractAddress) + require.NoError(t, err) + assert.Equal(t, test.balance, result) + } + }) + } +} + +func TestReceipt_UnmarshalEmptyBlockHash(t *testing.T) { + t.Parallel() + + input := `{ + "transactionHash": "0x444172bef57ad978655171a8af2cfd89baa02a97fcb773067aef7794d6913374", + "gasUsed": "0x1", + "cumulativeGasUsed": "0x1", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x8bf99b", + "blockHash": null + }` + + var receipt types.Receipt + err := json.Unmarshal([]byte(input), &receipt) + require.NoError(t, err) +} + +func TestEthClient_HeaderByNumber(t *testing.T) { + t.Parallel() + + expectedBlockNum := big.NewInt(1) + expectedBlockHash := "0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a" + + tests := []struct { + name string + expectedRequestBlock *big.Int + expectedResponseBlock int64 + error error + rpcResp string + }{ + {"happy geth", expectedBlockNum, expectedBlockNum.Int64(), nil, + `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, + {"happy parity", expectedBlockNum, expectedBlockNum.Int64(), nil, + `{"author":"0xd1aeb42885a43b72b518182ef893125814811048","difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa00f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","0x880ece08ea8c49dfd9"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`}, + {"missing header", expectedBlockNum, 0, fmt.Errorf("no live nodes available for chain %s", cltest.FixtureChainID.String()), + `null`}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_getBlockByNumber", method) || !assert.True(t, params.IsArray()) { + return + } + arr := params.Array() + blockNumStr := arr[0].String() + var blockNum hexutil.Big + err := blockNum.UnmarshalText([]byte(blockNumStr)) + if assert.NoError(t, err) && assert.Equal(t, test.expectedRequestBlock, blockNum.ToInt()) && + assert.Equal(t, false, arr[1].Bool()) { + resp.Result = test.rpcResp + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second) + result, err := ethClient.HeadByNumber(ctx, expectedBlockNum) + if test.error != nil { + require.Error(t, err, test.error) + } else { + require.NoError(t, err) + require.Equal(t, expectedBlockHash, result.Hash.Hex()) + require.Equal(t, test.expectedResponseBlock, result.Number) + require.Zero(t, cltest.FixtureChainID.Cmp(result.EVMChainID.ToInt())) + } + cancel() + } + }) + } +} + +func TestEthClient_SendTransaction_NoSecondaryURL(t *testing.T) { + t.Parallel() + + tx := cltest.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + if !assert.Equal(t, "eth_sendRawTransaction", method) { + return + } + resp.Result = `"` + tx.Hash().Hex() + `"` + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + err = ethClient.SendTransaction(testutils.Context(t), tx) + assert.NoError(t, err) + } +} + +func TestEthClient_SendTransaction_WithSecondaryURLs(t *testing.T) { + t.Parallel() + + tx := cltest.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + } + return + }).WSURL().String() + + rpcSrv := rpc.NewServer() + t.Cleanup(rpcSrv.Stop) + service := sendTxService{chainID: &cltest.FixtureChainID} + err := rpcSrv.RegisterName("eth", &service) + require.NoError(t, err) + ts := httptest.NewServer(rpcSrv) + t.Cleanup(ts.Close) + + sendonlyURL := *cltest.MustParseURL(t, ts.URL) + + clients := mustNewClients(t, wsURL, sendonlyURL, sendonlyURL) + for _, ethClient := range clients { + err = ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + err = ethClient.SendTransaction(testutils.Context(t), tx) + require.NoError(t, err) + } + + // Unfortunately it's a bit tricky to test this, since there is no + // synchronization. We have to rely on timing instead. + require.Eventually(t, func() bool { return service.sentCount.Load() == int32(len(clients)*2) }, testutils.WaitTimeout(t), 500*time.Millisecond) +} + +func TestEthClient_SendTransactionReturnCode(t *testing.T) { + t.Parallel() + + fromAddress := testutils.NewAddress() + tx := cltest.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + t.Run("returns Fatal error type when error message is fatal", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "invalid sender" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Fatal) + } + }) + + t.Run("returns TransactionAlreadyKnown error type when error message is nonce too low", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "nonce too low" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.TransactionAlreadyKnown) + } + }) + + t.Run("returns Successful error type when there is no error message", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.NoError(t, err) + assert.Equal(t, errType, commonclient.Successful) + } + }) + + t.Run("returns Underpriced error type when transaction is terminally underpriced", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "transaction underpriced" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Underpriced) + } + }) + + t.Run("returns Unsupported error type when error message is queue full", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "queue full" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Unsupported) + } + }) + + t.Run("returns Retryable error type when there is a transaction gap", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "NonceGap" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Retryable) + } + }) + + t.Run("returns InsufficientFunds error type when the sender address doesn't have enough funds", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "insufficient funds for transfer" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.InsufficientFunds) + } + }) + + t.Run("returns ExceedsFeeCap error type when gas price is too high for the node", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "Transaction fee cap exceeded" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.ExceedsMaxFee) + } + }) + + t.Run("returns Unknown error type when the error can't be categorized", func(t *testing.T) { + wsURL := testutils.NewWSServer(t, &cltest.FixtureChainID, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "eth_sendRawTransaction": + resp.Result = `"` + tx.Hash().Hex() + `"` + resp.Error.Message = "some random error" + } + return + }).WSURL().String() + + clients := mustNewClients(t, wsURL) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + errType, err := ethClient.SendTransactionReturnCode(testutils.Context(t), tx, fromAddress) + assert.Error(t, err) + assert.Equal(t, errType, commonclient.Unknown) + } + }) +} + +type sendTxService struct { + chainID *big.Int + sentCount atomic.Int32 +} + +func (x *sendTxService) ChainId(ctx context.Context) (*hexutil.Big, error) { + return (*hexutil.Big)(x.chainID), nil +} + +func (x *sendTxService) SendRawTransaction(ctx context.Context, signRawTx hexutil.Bytes) error { + x.sentCount.Add(1) + return nil +} + +func TestEthClient_SubscribeNewHead(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(testutils.Context(t), testutils.WaitTimeout(t)) + defer cancel() + + chainId := big.NewInt(123456) + wsURL := testutils.NewWSServer(t, chainId, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + if method == "eth_unsubscribe" { + resp.Result = "true" + return + } + assert.Equal(t, "eth_subscribe", method) + if assert.True(t, params.IsArray()) && assert.Equal(t, "newHeads", params.Array()[0].String()) { + resp.Result = `"0x00"` + resp.Notify = headResult + } + return + }).WSURL().String() + + clients := mustNewClientsWithChainID(t, wsURL, chainId) + for _, ethClient := range clients { + err := ethClient.Dial(testutils.Context(t)) + require.NoError(t, err) + + headCh := make(chan *evmtypes.Head) + sub, err := ethClient.SubscribeNewHead(ctx, headCh) + require.NoError(t, err) + + select { + case err := <-sub.Err(): + t.Fatal(err) + case <-ctx.Done(): + t.Fatal(ctx.Err()) + case h := <-headCh: + require.NotNil(t, h.EVMChainID) + require.Zero(t, chainId.Cmp(h.EVMChainID.ToInt())) + } + sub.Unsubscribe() + } +} + +func TestEthClient_ErroringClient(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + // Empty node means there are no active nodes to select from, causing client to always return error. + erroringClient := client.NewChainClientWithEmptyNode(t, commonclient.NodeSelectionModeRoundRobin, time.Second*0, time.Second*0, testutils.FixtureChainID) + + _, err := erroringClient.BalanceAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.BatchCallContext(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.BatchCallContextAll(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.BlockByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.BlockByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.CallContext(ctx, nil, "") + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.CallContract(ctx, ethereum.CallMsg{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + // TODO-1663: test actual ChainID() call once client.go is deprecated. + id, err := erroringClient.ChainID() + require.Equal(t, id, testutils.FixtureChainID) + //require.Equal(t, err, commonclient.ErroringNodeError) + require.Equal(t, err, nil) + + _, err = erroringClient.CodeAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + id = erroringClient.ConfiguredChainID() + require.Equal(t, id, testutils.FixtureChainID) + + err = erroringClient.Dial(ctx) + require.ErrorContains(t, err, "no available nodes for chain") + + _, err = erroringClient.EstimateGas(ctx, ethereum.CallMsg{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.FilterLogs(ctx, ethereum.FilterQuery{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeaderByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeaderByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeadByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.HeadByNumber(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.PLIBalance(ctx, common.Address{}, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.LatestBlockHeight(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.PendingCodeAt(ctx, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.PendingNonceAt(ctx, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + err = erroringClient.SendTransaction(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + code, err := erroringClient.SendTransactionReturnCode(ctx, nil, common.Address{}) + require.Equal(t, code, commonclient.Unknown) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SequenceAt(ctx, common.Address{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SubscribeNewHead(ctx, nil) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SuggestGasPrice(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.SuggestGasTipCap(ctx) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TokenBalance(ctx, common.Address{}, common.Address{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TransactionByHash(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + + _, err = erroringClient.TransactionReceipt(ctx, common.Hash{}) + require.Equal(t, err, commonclient.ErroringNodeError) + +} + +const headResult = client.HeadResult diff --git a/core/chains/evm/client/doc.go b/core/chains/evm/client/doc.go new file mode 100644 index 00000000..f3cba4a0 --- /dev/null +++ b/core/chains/evm/client/doc.go @@ -0,0 +1,10 @@ +/* +The simulated backend cannot access old blocks and will return an error if +anything other than `latest`, `nil`, or the latest block are passed to +`CallContract`. + +The simulated client avoids the old block error from the simulated backend by +passing `nil` to `CallContract` when calling `CallContext` or `BatchCallContext` +and will not return an error when an old block is used. +*/ +package client diff --git a/core/chains/evm/client/erroring_node.go b/core/chains/evm/client/erroring_node.go new file mode 100644 index 00000000..2a69a69d --- /dev/null +++ b/core/chains/evm/client/erroring_node.go @@ -0,0 +1,150 @@ +package client + +import ( + "context" + "math/big" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" +) + +var _ Node = (*erroringNode)(nil) + +type erroringNode struct { + errMsg string +} + +func (e *erroringNode) UnsubscribeAllExceptAliveLoop() {} + +func (e *erroringNode) SubscribersCount() int32 { + return 0 +} + +func (e *erroringNode) ChainID() (chainID *big.Int) { return nil } + +func (e *erroringNode) Start(ctx context.Context) error { return errors.New(e.errMsg) } + +func (e *erroringNode) Close() error { return nil } + +func (e *erroringNode) Verify(ctx context.Context, expectedChainID *big.Int) (err error) { + return errors.New(e.errMsg) +} + +func (e *erroringNode) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + return errors.New(e.errMsg) +} + +func (e *erroringNode) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return errors.New(e.errMsg) +} + +func (e *erroringNode) SendTransaction(ctx context.Context, tx *types.Transaction) error { + return errors.New(e.errMsg) +} + +func (e *erroringNode) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return 0, errors.New(e.errMsg) +} + +func (e *erroringNode) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + return 0, errors.New(e.errMsg) +} + +func (e *erroringNode) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) BlockNumber(ctx context.Context) (uint64, error) { + return 0, errors.New(e.errMsg) +} + +func (e *erroringNode) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + return 0, errors.New(e.errMsg) +} + +func (e *erroringNode) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) HeaderByNumber(_ context.Context, _ *big.Int) (*types.Header, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) HeaderByHash(_ context.Context, _ common.Hash) (*types.Header, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { + return nil, errors.New(e.errMsg) +} + +func (e *erroringNode) String() string { + return "" +} + +func (e *erroringNode) State() NodeState { + return NodeStateUnreachable +} + +func (e *erroringNode) StateAndLatest() (NodeState, int64, *big.Int) { + return NodeStateUnreachable, -1, nil +} + +func (e *erroringNode) Order() int32 { + return 100 +} + +func (e *erroringNode) DeclareOutOfSync() {} +func (e *erroringNode) DeclareInSync() {} +func (e *erroringNode) DeclareUnreachable() {} +func (e *erroringNode) Name() string { return "" } +func (e *erroringNode) NodeStates() map[int32]string { return nil } diff --git a/core/chains/evm/client/erroring_node_test.go b/core/chains/evm/client/erroring_node_test.go new file mode 100644 index 00000000..1543ef70 --- /dev/null +++ b/core/chains/evm/client/erroring_node_test.go @@ -0,0 +1,101 @@ +package client + +import ( + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestErroringNode(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + n := &erroringNode{ + "boo", + } + + require.Nil(t, n.ChainID()) + err := n.Start(ctx) + require.Equal(t, n.errMsg, err.Error()) + + defer func() { assert.NoError(t, n.Close()) }() + + err = n.Verify(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + err = n.CallContext(ctx, nil, "") + require.Equal(t, n.errMsg, err.Error()) + + err = n.BatchCallContext(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + err = n.SendTransaction(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.PendingCodeAt(ctx, common.Address{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.PendingNonceAt(ctx, common.Address{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.NonceAt(ctx, common.Address{}, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.TransactionReceipt(ctx, common.Hash{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.BlockByNumber(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.BlockByHash(ctx, common.Hash{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.BalanceAt(ctx, common.Address{}, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.FilterLogs(ctx, ethereum.FilterQuery{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.EstimateGas(ctx, ethereum.CallMsg{}) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.SuggestGasPrice(ctx) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.CallContract(ctx, ethereum.CallMsg{}, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.CodeAt(ctx, common.Address{}, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.HeaderByNumber(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.SuggestGasTipCap(ctx) + require.Equal(t, n.errMsg, err.Error()) + + _, err = n.EthSubscribe(ctx, nil) + require.Equal(t, n.errMsg, err.Error()) + + require.Equal(t, "", n.String()) + require.Equal(t, NodeStateUnreachable, n.State()) + + state, num, _ := n.StateAndLatest() + require.Equal(t, NodeStateUnreachable, state) + require.Equal(t, int64(-1), num) + + n.DeclareInSync() + n.DeclareOutOfSync() + n.DeclareUnreachable() + + require.Zero(t, n.Name()) + require.Nil(t, n.NodeStates()) +} diff --git a/core/chains/evm/client/errors.go b/core/chains/evm/client/errors.go new file mode 100644 index 00000000..c822d7dc --- /dev/null +++ b/core/chains/evm/client/errors.go @@ -0,0 +1,504 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/label" +) + +// fatal means this transaction can never be accepted even with a different nonce or higher gas price +type SendError struct { + fatal bool + err error +} + +func (s *SendError) Error() string { + return s.err.Error() +} + +// Fatal indicates whether the error should be considered fatal or not +// Fatal errors mean that no matter how many times the send is retried, no node +// will ever accept it +func (s *SendError) Fatal() bool { + return s != nil && s.fatal +} + +// CauseStr returns the string of the original error +func (s *SendError) CauseStr() string { + if s.err != nil { + return errors.Cause(s.err).Error() + } + return "" +} + +const ( + NonceTooLow = iota + // Nethermind specific error. Nethermind throws a NonceGap error when the tx nonce is greater than current_nonce + tx_count_in_mempool, instead of keeping the tx in mempool. + // See: https://github.com/NethermindEth/nethermind/blob/master/src/Nethermind/Nethermind.TxPool/Filters/GapNonceFilter.cs + NonceTooHigh + ReplacementTransactionUnderpriced + LimitReached + TransactionAlreadyInMempool + TerminallyUnderpriced + InsufficientEth + TxFeeExceedsCap + // Note: L2FeeTooLow/L2FeeTooHigh/L2Full have a very specific meaning specific + // to L2s (Arbitrum and clones). Do not implement this for non-L2 + // chains. This is potentially confusing because some RPC nodes e.g. + // Nethermind implement an error called `FeeTooLow` which has distinct + // meaning from this one. + L2FeeTooLow + L2FeeTooHigh + L2Full + TransactionAlreadyMined + Fatal +) + +type ClientErrors = map[int]*regexp.Regexp + +// Parity +// See: https://github.com/openethereum/openethereum/blob/master/rpc/src/v1/helpers/errors.rs#L420 +var parFatal = regexp.MustCompile(`^Transaction gas is too low. There is not enough gas to cover minimal cost of the transaction|^Transaction cost exceeds current gas limit. Limit:|^Invalid signature|Recipient is banned in local queue.|Supplied gas is beyond limit|Sender is banned in local queue|Code is banned in local queue|Transaction is not permitted|Transaction is too big, see chain specification for the limit|^Invalid RLP data`) +var parity = ClientErrors{ + NonceTooLow: regexp.MustCompile("^Transaction nonce is too low. Try incrementing the nonce."), + ReplacementTransactionUnderpriced: regexp.MustCompile("^Transaction gas price .+is too low. There is another transaction with same nonce in the queue"), + LimitReached: regexp.MustCompile("There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee."), + TransactionAlreadyInMempool: regexp.MustCompile("Transaction with the same hash was already imported."), + TerminallyUnderpriced: regexp.MustCompile("^Transaction gas price is too low. It does not satisfy your node's minimal gas price"), + InsufficientEth: regexp.MustCompile("^(Insufficient funds. The account you tried to send transaction from does not have enough funds.|Insufficient balance for transaction.)"), + Fatal: parFatal, +} + +// Geth +// See: https://github.com/ethereum/go-ethereum/blob/b9df7ecdc3d3685180ceb29665bab59e9f614da5/core/tx_pool.go#L516 +var gethFatal = regexp.MustCompile(`(: |^)(exceeds block gas limit|invalid sender|negative value|oversized data|gas uint64 overflow|intrinsic gas too low)$`) +var geth = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(: |^)nonce too low$`), + NonceTooHigh: regexp.MustCompile(`(: |^)nonce too high$`), + ReplacementTransactionUnderpriced: regexp.MustCompile(`(: |^)replacement transaction underpriced$`), + TransactionAlreadyInMempool: regexp.MustCompile(`(: |^)(?i)(known transaction|already known)`), + TerminallyUnderpriced: regexp.MustCompile(`(: |^)transaction underpriced$`), + InsufficientEth: regexp.MustCompile(`(: |^)(insufficient funds for transfer|insufficient funds for gas \* price \+ value|insufficient balance for transfer)$`), + TxFeeExceedsCap: regexp.MustCompile(`(: |^)tx fee \([0-9\.]+ [a-zA-Z]+\) exceeds the configured cap \([0-9\.]+ [a-zA-Z]+\)$`), + Fatal: gethFatal, +} + +// Besu +// See: https://github.com/hyperledger/besu/blob/81f25e15f9891787829b532f2fb38c8c43fd6b2e/ethereum/api/src/main/java/org/hyperledger/besu/ethereum/api/jsonrpc/internal/response/JsonRpcError.java +var besuFatal = regexp.MustCompile(`^(Intrinsic gas exceeds gas limit|Transaction gas limit exceeds block gas limit|Invalid signature)$`) +var besu = ClientErrors{ + NonceTooLow: regexp.MustCompile(`^Nonce too low$`), + ReplacementTransactionUnderpriced: regexp.MustCompile(`^Replacement transaction underpriced$`), + TransactionAlreadyInMempool: regexp.MustCompile(`^Known transaction$`), + TerminallyUnderpriced: regexp.MustCompile(`^Gas price below configured minimum gas price$`), + InsufficientEth: regexp.MustCompile(`^Upfront cost exceeds account balance$`), + TxFeeExceedsCap: regexp.MustCompile(`^Transaction fee cap exceeded$`), + Fatal: besuFatal, +} + +// Erigon +// See: +// - https://github.com/ledgerwatch/erigon/blob/devel/core/tx_pool.go +// - https://github.com/ledgerwatch/erigon/blob/devel/core/error.go +// - https://github.com/ledgerwatch/erigon/blob/devel/core/vm/errors.go +// +// Note: some error definitions are unused, many errors are created inline. +var erigonFatal = regexp.MustCompile(`(: |^)(exceeds block gas limit|invalid sender|negative value|oversized data|gas uint64 overflow|intrinsic gas too low)$`) +var erigon = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(: |^)nonce too low$`), + NonceTooHigh: regexp.MustCompile(`(: |^)nonce too high$`), + ReplacementTransactionUnderpriced: regexp.MustCompile(`(: |^)replacement transaction underpriced$`), + TransactionAlreadyInMempool: regexp.MustCompile(`(: |^)(block already known|already known)`), + TerminallyUnderpriced: regexp.MustCompile(`(: |^)transaction underpriced$`), + InsufficientEth: regexp.MustCompile(`(: |^)(insufficient funds for transfer|insufficient funds for gas \* price \+ value|insufficient balance for transfer)$`), + TxFeeExceedsCap: regexp.MustCompile(`(: |^)tx fee \([0-9\.]+ [a-zA-Z]+\) exceeds the configured cap \([0-9\.]+ [a-zA-Z]+\)$`), + Fatal: erigonFatal, +} + +// Arbitrum +// https://github.com/OffchainLabs/arbitrum/blob/cac30586bc10ecc1ae73e93de517c90984677fdb/packages/arb-evm/evm/result.go#L158 +// nitro: https://github.com/OffchainLabs/go-ethereum/blob/master/core/state_transition.go +var arbitrumFatal = regexp.MustCompile(`(: |^)(invalid message format|forbidden sender address)$|(: |^)(execution reverted)(:|$)`) +var arbitrum = ClientErrors{ + // TODO: Arbitrum returns this in case of low or high nonce. Update this when Arbitrum fix it + // https://app.shortcut.com/pluginlabs/story/16801/add-full-support-for-incorrect-nonce-on-arbitrum + NonceTooLow: regexp.MustCompile(`(: |^)invalid transaction nonce$|(: |^)nonce too low(:|$)`), + NonceTooHigh: regexp.MustCompile(`(: |^)nonce too high(:|$)`), + TerminallyUnderpriced: regexp.MustCompile(`(: |^)gas price too low$`), + InsufficientEth: regexp.MustCompile(`(: |^)(not enough funds for gas|insufficient funds for gas \* price \+ value)`), + Fatal: arbitrumFatal, + L2FeeTooLow: regexp.MustCompile(`(: |^)max fee per gas less than block base fee(:|$)`), + L2Full: regexp.MustCompile(`(: |^)(queue full|sequencer pending tx pool full, please try again)(:|$)`), +} + +var celo = ClientErrors{ + TxFeeExceedsCap: regexp.MustCompile(`(: |^)tx fee \([0-9\.]+ of currency celo\) exceeds the configured cap \([0-9\.]+ [a-zA-Z]+\)$`), + TerminallyUnderpriced: regexp.MustCompile(`(: |^)gasprice is less than gas price minimum floor`), + InsufficientEth: regexp.MustCompile(`(: |^)insufficient funds for gas \* price \+ value \+ gatewayFee$`), + LimitReached: regexp.MustCompile(`(: |^)txpool is full`), +} + +var metis = ClientErrors{ + L2FeeTooLow: regexp.MustCompile(`(: |^)gas price too low: \d+ wei, use at least tx.gasPrice = \d+ wei$`), +} + +// Substrate (Moonriver) +var substrate = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(: |^)Pool\(Stale\)$`), + TransactionAlreadyInMempool: regexp.MustCompile(`(: |^)Pool\(AlreadyImported\)$`), +} + +var avalanche = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(: |^)nonce too low: address 0x[0-9a-fA-F]{40} current nonce \([\d]+\) > tx nonce \([\d]+\)$`), +} + +// Klaytn +// https://github.com/klaytn/klaytn/blob/dev/blockchain/error.go +// https://github.com/klaytn/klaytn/blob/dev/blockchain/tx_pool.go +var klaytn = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(: |^)nonce too low$`), // retry with an increased nonce + TransactionAlreadyInMempool: regexp.MustCompile(`(: |^)(known transaction)`), // don't send the tx again. The exactly same tx is already in the mempool + ReplacementTransactionUnderpriced: regexp.MustCompile(`(: |^)replacement transaction underpriced$|there is another tx which has the same nonce in the tx pool$`), // retry with an increased gasPrice or maxFeePerGas. This error happened when there is another tx having higher gasPrice or maxFeePerGas exist in the mempool + TerminallyUnderpriced: regexp.MustCompile(`(: |^)(transaction underpriced|^intrinsic gas too low)`), // retry with an increased gasPrice or maxFeePerGas + LimitReached: regexp.MustCompile(`(: |^)txpool is full`), // retry with few seconds wait + InsufficientEth: regexp.MustCompile(`(: |^)insufficient funds`), // stop to send a tx. The sender address doesn't have enough KLAY + TxFeeExceedsCap: regexp.MustCompile(`(: |^)(invalid gas fee cap|max fee per gas higher than max priority fee per gas)`), // retry with a valid gasPrice, maxFeePerGas, or maxPriorityFeePerGas. The new value can get from the return of `eth_gasPrice` + Fatal: gethFatal, +} + +// Nethermind +// All errors: https://github.com/NethermindEth/nethermind/blob/master/src/Nethermind/Nethermind.TxPool/AcceptTxResult.cs +// All filters: https://github.com/NethermindEth/nethermind/tree/9b68ec048c65f4b44fb863164c0dec3f7780d820/src/Nethermind/Nethermind.TxPool/Filters +var nethermindFatal = regexp.MustCompile(`(: |^)(SenderIsContract|Invalid(, transaction Hash is null)?|Int256Overflow|FailedToResolveSender|GasLimitExceeded(, Gas limit: \d+, gas limit of rejected tx: \d+)?)$`) +var nethermind = ClientErrors{ + // OldNonce: The EOA (externally owned account) that signed this transaction (sender) has already signed and executed a transaction with the same nonce. + NonceTooLow: regexp.MustCompile(`(: |^)OldNonce(, Current nonce: \d+, nonce of rejected tx: \d+)?$`), + NonceTooHigh: regexp.MustCompile(`(: |^)NonceGap(, Future nonce. Expected nonce: \d+)?$`), + + // FeeTooLow/FeeTooLowToCompete: Fee paid by this transaction is not enough to be accepted in the mempool. + TerminallyUnderpriced: regexp.MustCompile(`(: |^)(FeeTooLow(, MaxFeePerGas too low. MaxFeePerGas: \d+, BaseFee: \d+, MaxPriorityFeePerGas:\d+, Block number: \d+|` + + `, EffectivePriorityFeePerGas too low \d+ < \d+, BaseFee: \d+|` + + `, FeePerGas needs to be higher than \d+ to be added to the TxPool. Affordable FeePerGas of rejected tx: \d+.)?|` + + `FeeTooLowToCompete)$`), + + // AlreadyKnown: A transaction with the same hash has already been added to the pool in the past. + // OwnNonceAlreadyUsed: A transaction with same nonce has been signed locally already and is awaiting in the pool. + TransactionAlreadyInMempool: regexp.MustCompile(`(: |^)(AlreadyKnown|OwnNonceAlreadyUsed)$`), + + // InsufficientFunds: Sender account has not enough balance to execute this transaction. + InsufficientEth: regexp.MustCompile(`(: |^)InsufficientFunds(, Account balance: \d+, cumulative cost: \d+)?$`), + Fatal: nethermindFatal, +} + +// Harmony +// https://github.com/harmony-one/harmony/blob/main/core/tx_pool.go#L49 +var harmonyFatal = regexp.MustCompile("(: |^)(invalid shard|staking message does not match directive message|`from` address of transaction in blacklist|`to` address of transaction in blacklist)$") +var harmony = ClientErrors{ + TransactionAlreadyMined: regexp.MustCompile(`(: |^)transaction already finalized$`), + Fatal: harmonyFatal, +} + +var zkSync = ClientErrors{ + NonceTooLow: regexp.MustCompile(`(?:: |^)nonce too low\..+actual: \d*$`), + NonceTooHigh: regexp.MustCompile(`(?:: |^)nonce too high\..+actual: \d*$`), + TerminallyUnderpriced: regexp.MustCompile(`(?:: |^)max fee per gas less than block base fee$`), + InsufficientEth: regexp.MustCompile(`(?:: |^)(?:insufficient balance for transfer$|insufficient funds for gas + value)`), + TxFeeExceedsCap: regexp.MustCompile(`(?:: |^)max priority fee per gas higher than max fee per gas$`), + // intrinsic gas too low - gas limit less than 14700 + // Not enough gas for transaction validation - gas limit less than L2 fee + // Failed to pay the fee to the operator - gas limit less than L2+L1 fee + // Error function_selector = 0x, data = 0x - contract call with gas limit of 0 + // can't start a transaction from a non-account - trying to send from an invalid address, e.g. estimating a contract -> contract tx + // max fee per gas higher than 2^64-1 - uint64 overflow + // oversized data - data too large + Fatal: regexp.MustCompile(`(?:: |^)(?:exceeds block gas limit|intrinsic gas too low|Not enough gas for transaction validation|Failed to pay the fee to the operator|Error function_selector = 0x, data = 0x|invalid sender. can't start a transaction from a non-account|max(?: priority)? fee per (?:gas|pubdata byte) higher than 2\^64-1|oversized data. max: \d+; actual: \d+)$`), +} + +var clients = []ClientErrors{parity, geth, arbitrum, metis, substrate, avalanche, nethermind, harmony, besu, erigon, klaytn, celo, zkSync} + +func (s *SendError) is(errorType int) bool { + if s == nil || s.err == nil { + return false + } + str := s.CauseStr() + for _, client := range clients { + if _, ok := client[errorType]; !ok { + continue + } + if client[errorType].MatchString(str) { + return true + } + } + return false +} + +// IsReplacementUnderpriced indicates that a transaction already exists in the mempool with this nonce but a different gas price or payload +func (s *SendError) IsReplacementUnderpriced() bool { + return s.is(ReplacementTransactionUnderpriced) +} + +func (s *SendError) IsNonceTooLowError() bool { + return s.is(NonceTooLow) +} + +func (s *SendError) IsNonceTooHighError() bool { + return s.is(NonceTooHigh) +} + +// IsTransactionAlreadyMined - Harmony returns this error if the transaction has already been mined +func (s *SendError) IsTransactionAlreadyMined() bool { + return s.is(TransactionAlreadyMined) +} + +// Geth/parity returns this error if the transaction is already in the node's mempool +func (s *SendError) IsTransactionAlreadyInMempool() bool { + return s.is(TransactionAlreadyInMempool) +} + +// IsTerminallyUnderpriced indicates that this transaction is so far underpriced the node won't even accept it in the first place +func (s *SendError) IsTerminallyUnderpriced() bool { + return s.is(TerminallyUnderpriced) +} + +func (s *SendError) IsTemporarilyUnderpriced() bool { + return s.is(LimitReached) +} + +func (s *SendError) IsInsufficientEth() bool { + return s.is(InsufficientEth) +} + +// IsTxFeeExceedsCap returns true if the transaction and gas price are combined in +// some way that makes the total transaction too expensive for the eth node to +// accept at all. No amount of retrying at this or higher gas prices can ever +// succeed. +func (s *SendError) IsTxFeeExceedsCap() bool { + return s.is(TxFeeExceedsCap) +} + +// L2FeeTooLow is an l2-specific error returned when total fee is too low +func (s *SendError) L2FeeTooLow() bool { + return s.is(L2FeeTooLow) +} + +// IsL2FeeTooHigh is an l2-specific error returned when total fee is too high +func (s *SendError) IsL2FeeTooHigh() bool { + return s.is(L2FeeTooHigh) +} + +// IsL2Full is an l2-specific error returned when the queue or mempool is full. +func (s *SendError) IsL2Full() bool { + return s.is(L2Full) +} + +// IsTimeout indicates if the error was caused by an exceeded context deadline +func (s *SendError) IsTimeout() bool { + if s == nil { + return false + } + if s.err == nil { + return false + } + return errors.Is(s.err, context.DeadlineExceeded) +} + +// IsCanceled indicates if the error was caused by an context cancellation +func (s *SendError) IsCanceled() bool { + if s == nil { + return false + } + if s.err == nil { + return false + } + return errors.Is(s.err, context.Canceled) +} + +func NewFatalSendError(e error) *SendError { + if e == nil { + return nil + } + return &SendError{err: errors.WithStack(e), fatal: true} +} + +func NewSendErrorS(s string) *SendError { + return NewSendError(errors.New(s)) +} + +func NewSendError(e error) *SendError { + if e == nil { + return nil + } + fatal := isFatalSendError(e) + return &SendError{err: errors.WithStack(e), fatal: fatal} +} + +// Geth/parity returns these errors if the transaction failed in such a way that: +// 1. It will never be included into a block as a result of this send +// 2. Resending the transaction at a different gas price will never change the outcome +func isFatalSendError(err error) bool { + if err == nil { + return false + } + str := errors.Cause(err).Error() + for _, client := range clients { + if _, ok := client[Fatal]; !ok { + continue + } + if client[Fatal].MatchString(str) { + return true + } + } + return false +} + +// go-ethereum@v1.10.0/rpc/json.go +type JsonError struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data,omitempty"` +} + +func (err JsonError) Error() string { + if err.Message == "" { + return fmt.Sprintf("json-rpc error { Code = %d, Data = '%v' }", err.Code, err.Data) + } + return err.Message +} + +func (err *JsonError) String() string { + return fmt.Sprintf("json-rpc error { Code = %d, Message = '%s', Data = '%v' }", err.Code, err.Message, err.Data) +} + +func ExtractRPCErrorOrNil(err error) *JsonError { + jErr, eErr := ExtractRPCError(err) + if eErr != nil { + return nil + } + return jErr +} + +// ExtractRPCError attempts to extract a full JsonError (including revert reason details) +// from an error returned by a CallContract to an external RPC. As per https://github.com/ethereum/go-ethereum/blob/c49e065fea78a5d3759f7853a608494913e5824e/internal/ethapi/api.go#L974 +// CallContract server side for a revert will return an error which contains either: +// - The error directly from the EVM if there's no data (no revert reason, like an index out of bounds access) which +// when marshalled will only have a Message. +// - An error which implements rpc.DataError which when marshalled will have a Data field containing the execution result. +// If the revert not a custom Error (solidity >= 0.8.0), like require(1 == 2, "revert"), then geth and forks will automatically +// parse the string and put it in the message. If its a custom error, it's up to the client to decode the Data field which will be +// the abi encoded data of the custom error, i.e. revert MyCustomError(10) -> keccak(MyCustomError(uint256))[:4] || abi.encode(10). +// +// However, it appears that RPCs marshal this in different ways into a JsonError object received client side, +// some adding "Reverted" prefixes, removing the method signature etc. To avoid RPC specific parsing and support custom errors +// we return the full object returned from the RPC with a String() method that stringifies all fields for logging so no information is lost. +// Some examples: +// kovan (parity) +// { "error": { "code" : -32015, "data": "Reverted 0xABC123...", "message": "VM execution error." } } // revert reason always omitted from message. +// rinkeby / ropsten (geth) +// { "error": { "code": 3, "data": "0xABC123...", "message": "execution reverted: hello world" } } // revert reason automatically parsed if a simple require and included in message. +func ExtractRPCError(baseErr error) (*JsonError, error) { + if baseErr == nil { + return nil, errors.New("no error present") + } + cause := errors.Cause(baseErr) + jsonBytes, err := json.Marshal(cause) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal err to json") + } + jErr := JsonError{} + err = json.Unmarshal(jsonBytes, &jErr) + if err != nil { + return nil, errors.Wrapf(err, "unable to unmarshal json into jsonError struct (got: %v)", baseErr) + } + if jErr.Code == 0 { + return nil, errors.Errorf("not a RPCError because it does not have a code (got: %v)", baseErr) + } + return &jErr, nil +} + +func ClassifySendError(err error, lggr logger.SugaredLogger, tx *types.Transaction, fromAddress common.Address, isL2 bool) commonclient.SendTxReturnCode { + sendError := NewSendError(err) + if sendError == nil { + return commonclient.Successful + } + if sendError.Fatal() { + lggr.Criticalw("Fatal error sending transaction", "err", sendError, "etx", tx) + // Attempt is thrown away in this case; we don't need it since it never got accepted by a node + return commonclient.Fatal + } + if sendError.IsNonceTooLowError() || sendError.IsTransactionAlreadyMined() { + lggr.Debugw("Transaction already confirmed for this nonce: %d", tx.Nonce(), "err", sendError, "etx", tx) + // Nonce too low indicated that a transaction at this nonce was confirmed already. + // Mark it as TransactionAlreadyKnown. + return commonclient.TransactionAlreadyKnown + } + if sendError.IsReplacementUnderpriced() { + lggr.Errorw(fmt.Sprintf("Replacement transaction underpriced for eth_tx %x. "+ + "Please note that using your node's private keys outside of the plugin node is NOT SUPPORTED and can lead to missed transactions.", + tx.Hash()), "gasPrice", tx.GasPrice, "gasTipCap", tx.GasTipCap, "gasFeeCap", tx.GasFeeCap, "err", sendError, "etx", tx) + + // Assume success and hand off to the next cycle. + return commonclient.Successful + } + if sendError.IsTransactionAlreadyInMempool() { + lggr.Debugw("Transaction already in mempool", "etx", tx, "err", sendError) + return commonclient.Successful + } + if sendError.IsTemporarilyUnderpriced() { + lggr.Infow("Transaction temporarily underpriced", "err", sendError) + return commonclient.Successful + } + if sendError.IsTerminallyUnderpriced() { + lggr.Errorw("Transaction terminally underpriced", "etx", tx, "err", sendError) + return commonclient.Underpriced + } + if sendError.L2FeeTooLow() || sendError.IsL2FeeTooHigh() || sendError.IsL2Full() { + if isL2 { + lggr.Errorw("Transaction fee out of range", "err", sendError, "etx", tx) + return commonclient.FeeOutOfValidRange + } + lggr.Errorw("this error type only handled for L2s", "err", sendError, "etx", tx) + return commonclient.Unsupported + } + if sendError.IsNonceTooHighError() { + // This error occurs when the tx nonce is greater than current_nonce + tx_count_in_mempool, + // instead of keeping the tx in mempool. This can happen if previous transactions haven't + // reached the client yet. The correct thing to do is to mark it as retryable. + lggr.Warnw("Transaction has a nonce gap.", "err", sendError, "etx", tx) + return commonclient.Retryable + } + if sendError.IsInsufficientEth() { + lggr.Criticalw(fmt.Sprintf("Tx %x with type 0x%d was rejected due to insufficient eth: %s\n"+ + "ACTION REQUIRED: Plugin wallet with address 0x%x is OUT OF FUNDS", + tx.Hash(), tx.Type(), sendError.Error(), fromAddress, + ), "err", sendError, "etx", tx) + return commonclient.InsufficientFunds + } + if sendError.IsTimeout() { + lggr.Errorw("timeout while sending transaction %x", tx.Hash(), "err", sendError, "etx", tx) + return commonclient.Retryable + } + if sendError.IsCanceled() { + lggr.Errorw("context was canceled while sending transaction %x", tx.Hash(), "err", sendError, "etx", tx) + return commonclient.Retryable + } + if sendError.IsTxFeeExceedsCap() { + lggr.Criticalw(fmt.Sprintf("Sending transaction failed: %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning), + "etx", tx, + "err", sendError, + "id", "RPCTxFeeCapExceeded", + ) + return commonclient.ExceedsMaxFee + } + lggr.Errorw("Unknown error encountered when sending transaction", "err", err, "etx", tx) + return commonclient.Unknown +} diff --git a/core/chains/evm/client/errors_test.go b/core/chains/evm/client/errors_test.go new file mode 100644 index 00000000..47f8c4c4 --- /dev/null +++ b/core/chains/evm/client/errors_test.go @@ -0,0 +1,356 @@ +package client_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +func newSendErrorWrapped(s string) *evmclient.SendError { + return evmclient.NewSendError(errors.Wrap(errors.New(s), "wrapped with some old bollocks")) +} + +type errorCase struct { + message string + expect bool + network string +} + +func Test_Eth_Errors(t *testing.T) { + t.Parallel() + + var err *evmclient.SendError + randomError := evmclient.NewSendErrorS("some old bollocks") + + t.Run("IsNonceTooLowError", func(t *testing.T) { + assert.False(t, randomError.IsNonceTooLowError()) + + tests := []errorCase{ + {"nonce too low", true, "Geth"}, + {"nonce too low: address 0x336394A3219e71D9d9bd18201d34E95C1Bb7122C, tx: 8089 state: 8090", true, "Arbitrum"}, + {"Nonce too low", true, "Besu"}, + {"nonce too low", true, "Erigon"}, + {"nonce too low", true, "Klaytn"}, + {"Transaction nonce is too low. Try incrementing the nonce.", true, "Parity"}, + {"transaction rejected: nonce too low", true, "Arbitrum"}, + {"invalid transaction nonce", true, "Arbitrum"}, + {"call failed: nonce too low: address 0x0499BEA33347cb62D79A9C0b1EDA01d8d329894c current nonce (5833) > tx nonce (5511)", true, "Avalanche"}, + {"call failed: OldNonce", true, "Nethermind"}, + {"call failed: OldNonce, Current nonce: 22, nonce of rejected tx: 17", true, "Nethermind"}, + {"nonce too low. allowed nonce range: 427 - 447, actual: 426", true, "zkSync"}, + } + + for _, test := range tests { + t.Run(test.network, func(t *testing.T) { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsNonceTooLowError(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsNonceTooLowError(), test.expect) + }) + } + }) + + t.Run("IsNonceTooHigh", func(t *testing.T) { + + tests := []errorCase{ + {"call failed: NonceGap", true, "Nethermind"}, + {"call failed: NonceGap, Future nonce. Expected nonce: 10", true, "Nethermind"}, + {"nonce too high: address 0x336394A3219e71D9d9bd18201d34E95C1Bb7122C, tx: 8089 state: 8090", true, "Arbitrum"}, + {"nonce too high", true, "Geth"}, + {"nonce too high", true, "Erigon"}, + {"nonce too high. allowed nonce range: 427 - 477, actual: 527", true, "zkSync"}, + } + + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsNonceTooHighError(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsNonceTooHighError(), test.expect) + } + }) + + t.Run("IsTransactionAlreadyMined", func(t *testing.T) { + assert.False(t, randomError.IsTransactionAlreadyMined()) + + tests := []errorCase{ + {"transaction already finalized", true, "Harmony"}, + } + + for _, test := range tests { + t.Run(test.network, func(t *testing.T) { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsTransactionAlreadyMined(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsTransactionAlreadyMined(), test.expect) + }) + } + }) + + t.Run("IsReplacementUnderpriced", func(t *testing.T) { + + tests := []errorCase{ + {"replacement transaction underpriced", true, "geth"}, + {"Replacement transaction underpriced", true, "Besu"}, + {"replacement transaction underpriced", true, "Erigon"}, + {"replacement transaction underpriced", true, "Klaytn"}, + {"there is another tx which has the same nonce in the tx pool", true, "Klaytn"}, + {"Transaction gas price 100wei is too low. There is another transaction with same nonce in the queue with gas price 150wei. Try increasing the gas price or incrementing the nonce.", true, "Parity"}, + {"There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.", false, "Parity"}, + {"gas price too low", false, "Arbitrum"}, + } + + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsReplacementUnderpriced(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsReplacementUnderpriced(), test.expect) + } + }) + + t.Run("IsTransactionAlreadyInMempool", func(t *testing.T) { + assert.False(t, randomError.IsTransactionAlreadyInMempool()) + + tests := []errorCase{ + // I have seen this in log output + {"known transaction: 0x7f657507aee0511e36d2d1972a6b22e917cc89f92b6c12c4dbd57eaabb236960", true, "Geth"}, + // This comes from the geth source - https://github.com/ethereum/go-ethereum/blob/eb9d7d15ecf08cd5104e01a8af64489f01f700b0/core/tx_pool.go#L57 + {"already known", true, "Geth"}, + // This one is present in the light client (?!) + {"Known transaction (7f65)", true, "Geth"}, + {"Known transaction", true, "Besu"}, + {"already known", true, "Erigon"}, + {"block already known", true, "Erigon"}, + {"Transaction with the same hash was already imported.", true, "Parity"}, + {"call failed: AlreadyKnown", true, "Nethermind"}, + {"call failed: OwnNonceAlreadyUsed", true, "Nethermind"}, + {"known transaction", true, "Klaytn"}, + } + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsTransactionAlreadyInMempool(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsTransactionAlreadyInMempool(), test.expect) + } + }) + + t.Run("IsTerminallyUnderpriced", func(t *testing.T) { + assert.False(t, randomError.IsTerminallyUnderpriced()) + + tests := []errorCase{ + {"transaction underpriced", true, "geth"}, + {"replacement transaction underpriced", false, "geth"}, + {"Gas price below configured minimum gas price", true, "Besu"}, + {"transaction underpriced", true, "Erigon"}, + {"There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.", false, "Parity"}, + {"Transaction gas price is too low. It does not satisfy your node's minimal gas price (minimal: 100 got: 50). Try increasing the gas price.", true, "Parity"}, + {"gas price too low", true, "Arbitrum"}, + {"FeeTooLow", true, "Nethermind"}, + {"FeeTooLow, MaxFeePerGas too low. MaxFeePerGas: 50, BaseFee: 100, MaxPriorityFeePerGas:200, Block number: 5", true, "Nethermind"}, + {"FeeTooLow, EffectivePriorityFeePerGas too low 10 < 20, BaseFee: 30", true, "Nethermind"}, + {"FeeTooLow, FeePerGas needs to be higher than 100 to be added to the TxPool. Affordable FeePerGas of rejected tx: 50.", true, "Nethermind"}, + {"FeeTooLowToCompete", true, "Nethermind"}, + {"transaction underpriced", true, "Klaytn"}, + {"intrinsic gas too low", true, "Klaytn"}, + {"max fee per gas less than block base fee", true, "zkSync"}, + } + + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsTerminallyUnderpriced(), test.expect, "expected %q to match %s for client %s", err, "IsTerminallyUnderpriced", test.network) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsTerminallyUnderpriced(), test.expect, "expected %q to match %s for client %s", err, "IsTerminallyUnderpriced", test.network) + } + }) + + t.Run("IsTemporarilyUnderpriced", func(t *testing.T) { + tests := []errorCase{ + {"There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.", true, "Parity"}, + {"There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee.", true, "Parity"}, + {"Transaction gas price is too low. It does not satisfy your node's minimal gas price (minimal: 100 got: 50). Try increasing the gas price.", false, "Parity"}, + } + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsTemporarilyUnderpriced(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsTemporarilyUnderpriced(), test.expect) + } + }) + + t.Run("IsInsufficientEth", func(t *testing.T) { + tests := []errorCase{ + {"insufficient funds for transfer", true, "Geth"}, + {"insufficient funds for gas * price + value", true, "Geth"}, + {"insufficient balance for transfer", true, "Geth"}, + {"Upfront cost exceeds account balance", true, "Besu"}, + {"insufficient funds for transfer", true, "Erigon"}, + {"insufficient funds for gas * price + value", true, "Erigon"}, + {"insufficient balance for transfer", true, "Erigon"}, + {"Insufficient balance for transaction. Balance=100.25, Cost=200.50", true, "Parity"}, + {"Insufficient funds. The account you tried to send transaction from does not have enough funds. Required 200.50 and got: 100.25.", true, "Parity"}, + {"transaction rejected: insufficient funds for gas * price + value", true, "Arbitrum"}, + {"not enough funds for gas", true, "Arbitrum"}, + {"insufficient funds for gas * price + value: address 0xb68D832c1241bc50db1CF09e96c0F4201D5539C9 have 9934612900000000 want 9936662900000000", true, "Arbitrum"}, + {"call failed: InsufficientFunds", true, "Nethermind"}, + {"call failed: InsufficientFunds, Account balance: 4740799397601480913, cumulative cost: 22019342038993800000", true, "Nethermind"}, + {"insufficient funds", true, "Klaytn"}, + {"insufficient funds for gas * price + value + gatewayFee", true, "celo"}, + {"insufficient balance for transfer", true, "zkSync"}, + {"insufficient funds for gas + value. balance: 42719769622667482000, fee: 48098250000000, value: 42719769622667482000", true, "celo"}, + } + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsInsufficientEth(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsInsufficientEth(), test.expect) + } + }) + + t.Run("IsTxFeeExceedsCap", func(t *testing.T) { + tests := []errorCase{ + {"tx fee (1.10 ether) exceeds the configured cap (1.00 ether)", true, "geth"}, + {"tx fee (1.10 FTM) exceeds the configured cap (1.00 FTM)", true, "geth"}, + {"tx fee (1.10 foocoin) exceeds the configured cap (1.00 foocoin)", true, "geth"}, + {"Transaction fee cap exceeded", true, "Besu"}, + {"tx fee (1.10 ether) exceeds the configured cap (1.00 ether)", true, "Erigon"}, + {"invalid gas fee cap", true, "Klaytn"}, + {"max fee per gas higher than max priority fee per gas", true, "Klaytn"}, + {"tx fee (1.10 of currency celo) exceeds the configured cap (1.00 celo)", true, "celo"}, + {"max priority fee per gas higher than max fee per gas", true, "zkSync"}, + } + for _, test := range tests { + err = evmclient.NewSendErrorS(test.message) + assert.Equal(t, err.IsTxFeeExceedsCap(), test.expect) + err = newSendErrorWrapped(test.message) + assert.Equal(t, err.IsTxFeeExceedsCap(), test.expect) + } + + assert.False(t, randomError.IsTxFeeExceedsCap()) + // Nil + err = evmclient.NewSendError(nil) + assert.False(t, err.IsTxFeeExceedsCap()) + }) + + t.Run("L2 Fees errors", func(t *testing.T) { + err = evmclient.NewSendErrorS("max fee per gas less than block base fee") + assert.False(t, err.IsL2FeeTooHigh()) + assert.True(t, err.L2FeeTooLow()) + err = newSendErrorWrapped("max fee per gas less than block base fee") + assert.False(t, err.IsL2FeeTooHigh()) + assert.True(t, err.L2FeeTooLow()) + + err = evmclient.NewSendErrorS("queue full") + assert.True(t, err.IsL2Full()) + err = evmclient.NewSendErrorS("sequencer pending tx pool full, please try again") + assert.True(t, err.IsL2Full()) + + assert.False(t, randomError.IsL2FeeTooHigh()) + assert.False(t, randomError.L2FeeTooLow()) + // Nil + err = evmclient.NewSendError(nil) + assert.False(t, err.IsL2FeeTooHigh()) + assert.False(t, err.L2FeeTooLow()) + }) + + t.Run("Metis gas price errors", func(t *testing.T) { + err := evmclient.NewSendErrorS("primary websocket (wss://ws-mainnet.metis.io) call failed: gas price too low: 18000000000 wei, use at least tx.gasPrice = 19500000000 wei") + assert.True(t, err.L2FeeTooLow()) + err = newSendErrorWrapped("primary websocket (wss://ws-mainnet.metis.io) call failed: gas price too low: 18000000000 wei, use at least tx.gasPrice = 19500000000 wei") + assert.True(t, err.L2FeeTooLow()) + + assert.False(t, randomError.L2FeeTooLow()) + // Nil + err = evmclient.NewSendError(nil) + assert.False(t, err.L2FeeTooLow()) + }) + + t.Run("moonriver errors", func(t *testing.T) { + err := evmclient.NewSendErrorS("primary http (http://***REDACTED***:9933) call failed: submit transaction to pool failed: Pool(Stale)") + assert.True(t, err.IsNonceTooLowError()) + assert.False(t, err.IsTransactionAlreadyInMempool()) + assert.False(t, err.Fatal()) + err = evmclient.NewSendErrorS("primary http (http://***REDACTED***:9933) call failed: submit transaction to pool failed: Pool(AlreadyImported)") + assert.True(t, err.IsTransactionAlreadyInMempool()) + assert.False(t, err.IsNonceTooLowError()) + assert.False(t, err.Fatal()) + }) +} + +func Test_Eth_Errors_Fatal(t *testing.T) { + t.Parallel() + + tests := []errorCase{ + {"some old bollocks", false, "none"}, + + {"insufficient funds for transfer", false, "Geth"}, + {"exceeds block gas limit", true, "Geth"}, + {"invalid sender", true, "Geth"}, + {"negative value", true, "Geth"}, + {"oversized data", true, "Geth"}, + {"gas uint64 overflow", true, "Geth"}, + {"intrinsic gas too low", true, "Geth"}, + + {"Intrinsic gas exceeds gas limit", true, "Besu"}, + {"Transaction gas limit exceeds block gas limit", true, "Besu"}, + {"Invalid signature", true, "Besu"}, + + {"insufficient funds for transfer", false, "Erigon"}, + {"exceeds block gas limit", true, "Erigon"}, + {"invalid sender", true, "Erigon"}, + {"negative value", true, "Erigon"}, + {"oversized data", true, "Erigon"}, + {"gas uint64 overflow", true, "Erigon"}, + {"intrinsic gas too low", true, "Erigon"}, + + {"Insufficient funds. The account you tried to send transaction from does not have enough funds. Required 100 and got: 50.", false, "Parity"}, + {"Supplied gas is beyond limit.", true, "Parity"}, + {"Sender is banned in local queue.", true, "Parity"}, + {"Recipient is banned in local queue.", true, "Parity"}, + {"Code is banned in local queue.", true, "Parity"}, + {"Transaction is not permitted.", true, "Parity"}, + {"Transaction is too big, see chain specification for the limit.", true, "Parity"}, + {"Transaction gas is too low. There is not enough gas to cover minimal cost of the transaction (minimal: 100 got: 50) Try increasing supplied gas.", true, "Parity"}, + {"Transaction cost exceeds current gas limit. Limit: 50, got: 100. Try decreasing supplied gas.", true, "Parity"}, + {"Invalid signature: some old bollocks", true, "Parity"}, + {"Invalid RLP data: some old bollocks", true, "Parity"}, + + {"invalid message format", true, "Arbitrum"}, + {"forbidden sender address", true, "Arbitrum"}, + {"tx dropped due to L2 congestion", false, "Arbitrum"}, + {"execution reverted: error code", true, "Arbitrum"}, + {"execution reverted: stale report", true, "Arbitrum"}, + {"execution reverted", true, "Arbitrum"}, + + {"call failed: SenderIsContract", true, "Nethermind"}, + {"call failed: Invalid", true, "Nethermind"}, + {"call failed: Invalid, transaction Hash is null", true, "Nethermind"}, + {"call failed: Int256Overflow", true, "Nethermind"}, + {"call failed: FailedToResolveSender", true, "Nethermind"}, + {"call failed: GasLimitExceeded", true, "Nethermind"}, + {"call failed: GasLimitExceeded, Gas limit: 100, gas limit of rejected tx: 150", true, "Nethermind"}, + + {"invalid shard", true, "Harmony"}, + {"`to` address of transaction in blacklist", true, "Harmony"}, + {"`from` address of transaction in blacklist", true, "Harmony"}, + {"staking message does not match directive message", true, "Harmony"}, + + {"intrinsic gas too low", true, "zkSync"}, + {"failed to validate the transaction. reason: Validation revert: Account validation error: Not enough gas for transaction validation", true, "zkSync"}, + {"failed to validate the transaction. reason: Validation revert: Failed to pay for the transaction: Failed to pay the fee to the operator", true, "zkSync"}, + {"failed to validate the transaction. reason: Validation revert: Account validation error: Error function_selector = 0x, data = 0x", true, "zkSync"}, + {"invalid sender. can't start a transaction from a non-account", true, "zkSync"}, + {"Failed to serialize transaction: max fee per gas higher than 2^64-1", true, "zkSync"}, + {"Failed to serialize transaction: max fee per pubdata byte higher than 2^64-1", true, "zkSync"}, + {"Failed to serialize transaction: max priority fee per gas higher than 2^64-1", true, "zkSync"}, + {"Failed to serialize transaction: oversized data. max: 1000000; actual: 1000000", true, "zkSync"}, + } + + for _, test := range tests { + t.Run(test.message, func(t *testing.T) { + err := evmclient.NewSendError(errors.New(test.message)) + assert.Equal(t, test.expect, err.Fatal()) + }) + } +} diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go new file mode 100644 index 00000000..28fae408 --- /dev/null +++ b/core/chains/evm/client/helpers_test.go @@ -0,0 +1,139 @@ +package client + +import ( + "fmt" + "math/big" + "net/url" + "testing" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type TestNodePoolConfig struct { + NodePollFailureThreshold uint32 + NodePollInterval time.Duration + NodeSelectionMode string + NodeSyncThreshold uint32 + NodeLeaseDuration time.Duration +} + +func (tc TestNodePoolConfig) PollFailureThreshold() uint32 { return tc.NodePollFailureThreshold } +func (tc TestNodePoolConfig) PollInterval() time.Duration { return tc.NodePollInterval } +func (tc TestNodePoolConfig) SelectionMode() string { return tc.NodeSelectionMode } +func (tc TestNodePoolConfig) SyncThreshold() uint32 { return tc.NodeSyncThreshold } +func (tc TestNodePoolConfig) LeaseDuration() time.Duration { + return tc.NodeLeaseDuration +} + +func NewClientWithTestNode(t *testing.T, nodePoolCfg config.NodePool, noNewHeadsThreshold time.Duration, rpcUrl string, rpcHTTPURL *url.URL, sendonlyRPCURLs []url.URL, id int32, chainID *big.Int) (*client, error) { + parsed, err := url.ParseRequestURI(rpcUrl) + if err != nil { + return nil, err + } + + if parsed.Scheme != "ws" && parsed.Scheme != "wss" { + return nil, errors.Errorf("ethereum url scheme must be websocket: %s", parsed.String()) + } + + lggr := logger.Sugared(logger.Test(t)) + n := NewNode(nodePoolCfg, noNewHeadsThreshold, lggr, *parsed, rpcHTTPURL, "eth-primary-0", id, chainID, 1) + n.(*node).setLatestReceived(0, big.NewInt(0)) + primaries := []Node{n} + + var sendonlys []SendOnlyNode + for i, url := range sendonlyRPCURLs { + if url.Scheme != "http" && url.Scheme != "https" { + return nil, errors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", url.String()) + } + s := NewSendOnlyNode(lggr, url, fmt.Sprintf("eth-sendonly-%d", i), chainID) + sendonlys = append(sendonlys, s) + } + + pool := NewPool(lggr, nodePoolCfg.SelectionMode(), nodePoolCfg.LeaseDuration(), noNewHeadsThreshold, primaries, sendonlys, chainID, "") + c := &client{logger: lggr, pool: pool} + t.Cleanup(c.Close) + return c, nil +} + +func Wrap(err error, s string) error { + return wrap(err, s) +} + +func NewChainClientWithTestNode( + t *testing.T, + nodeCfg commonclient.NodeConfig, + noNewHeadsThreshold time.Duration, + leaseDuration time.Duration, + rpcUrl string, + rpcHTTPURL *url.URL, + sendonlyRPCURLs []url.URL, + id int32, + chainID *big.Int, +) (Client, error) { + parsed, err := url.ParseRequestURI(rpcUrl) + if err != nil { + return nil, err + } + + if parsed.Scheme != "ws" && parsed.Scheme != "wss" { + return nil, errors.Errorf("ethereum url scheme must be websocket: %s", parsed.String()) + } + + lggr := logger.Test(t) + rpc := NewRPCClient(lggr, *parsed, rpcHTTPURL, "eth-primary-rpc-0", id, chainID, commonclient.Primary) + + n := commonclient.NewNode[*big.Int, *evmtypes.Head, RPCCLient]( + nodeCfg, noNewHeadsThreshold, lggr, *parsed, rpcHTTPURL, "eth-primary-node-0", id, chainID, 1, rpc, "EVM") + primaries := []commonclient.Node[*big.Int, *evmtypes.Head, RPCCLient]{n} + + var sendonlys []commonclient.SendOnlyNode[*big.Int, RPCCLient] + for i, u := range sendonlyRPCURLs { + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.Errorf("sendonly ethereum rpc url scheme must be http(s): %s", u.String()) + } + var empty url.URL + rpc := NewRPCClient(lggr, empty, &sendonlyRPCURLs[i], fmt.Sprintf("eth-sendonly-rpc-%d", i), id, chainID, commonclient.Secondary) + s := commonclient.NewSendOnlyNode[*big.Int, RPCCLient]( + lggr, u, fmt.Sprintf("eth-sendonly-%d", i), chainID, rpc) + sendonlys = append(sendonlys, s) + } + + var chainType commonconfig.ChainType + c := NewChainClient(lggr, nodeCfg.SelectionMode(), leaseDuration, noNewHeadsThreshold, primaries, sendonlys, chainID, chainType) + t.Cleanup(c.Close) + return c, nil +} + +func NewChainClientWithEmptyNode( + t *testing.T, + selectionMode string, + leaseDuration time.Duration, + noNewHeadsThreshold time.Duration, + chainID *big.Int, +) Client { + + lggr := logger.Test(t) + + var chainType commonconfig.ChainType + c := NewChainClient(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, nil, nil, chainID, chainType) + t.Cleanup(c.Close) + return c +} + +type TestableSendOnlyNode interface { + SendOnlyNode + SetEthClient(newBatchSender BatchSender, newSender TxSender) +} + +const HeadResult = `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x1","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}` + +func IsDialed(s SendOnlyNode) bool { + return s.(*sendOnlyNode).dialed +} diff --git a/core/chains/evm/client/mocks/batch_sender.go b/core/chains/evm/client/mocks/batch_sender.go new file mode 100644 index 00000000..3d65749b --- /dev/null +++ b/core/chains/evm/client/mocks/batch_sender.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + rpc "github.com/ethereum/go-ethereum/rpc" + mock "github.com/stretchr/testify/mock" +) + +// BatchSender is an autogenerated mock type for the BatchSender type +type BatchSender struct { + mock.Mock +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *BatchSender) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBatchSender creates a new instance of BatchSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBatchSender(t interface { + mock.TestingT + Cleanup(func()) +}) *BatchSender { + mock := &BatchSender{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/client/mocks/client.go b/core/chains/evm/client/mocks/client.go new file mode 100644 index 00000000..3bdb596e --- /dev/null +++ b/core/chains/evm/client/mocks/client.go @@ -0,0 +1,972 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + assets "github.com/goplugin/plugin-common/pkg/assets" + + common "github.com/ethereum/go-ethereum/common" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + + context "context" + + ethereum "github.com/ethereum/go-ethereum" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// BalanceAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Client) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for BalanceAt") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (*big.Int, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) *big.Int); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *Client) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchCallContextAll provides a mock function with given fields: ctx, b +func (_m *Client) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContextAll") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CallContext provides a mock function with given fields: ctx, result, method, args +func (_m *Client) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, ctx, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { + r0 = rf(ctx, result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainID provides a mock function with given fields: +func (_m *Client) ChainID() (*big.Int, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func() (*big.Int, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *Client) Close() { + _m.Called() +} + +// CodeAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Client) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConfiguredChainID provides a mock function with given fields: +func (_m *Client) ConfiguredChainID() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfiguredChainID") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// Dial provides a mock function with given fields: ctx +func (_m *Client) Dial(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Dial") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *Client) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeadByHash provides a mock function with given fields: ctx, n +func (_m *Client) HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error) { + ret := _m.Called(ctx, n) + + if len(ret) == 0 { + panic("no return value specified for HeadByHash") + } + + var r0 *evmtypes.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*evmtypes.Head, error)); ok { + return rf(ctx, n) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *evmtypes.Head); ok { + r0 = rf(ctx, n) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*evmtypes.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, n) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeadByNumber provides a mock function with given fields: ctx, n +func (_m *Client) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + ret := _m.Called(ctx, n) + + if len(ret) == 0 { + panic("no return value specified for HeadByNumber") + } + + var r0 *evmtypes.Head + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*evmtypes.Head, error)); ok { + return rf(ctx, n) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *evmtypes.Head); ok { + r0 = rf(ctx, n) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*evmtypes.Head) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, n) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByHash provides a mock function with given fields: ctx, h +func (_m *Client) HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, h) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, h) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, h) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, h) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByNumber provides a mock function with given fields: ctx, n +func (_m *Client) HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, n) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, n) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, n) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, n) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsL2 provides a mock function with given fields: +func (_m *Client) IsL2() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsL2") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// PLIBalance provides a mock function with given fields: ctx, address, linkAddress +func (_m *Client) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) { + ret := _m.Called(ctx, address, linkAddress) + + if len(ret) == 0 { + panic("no return value specified for PLIBalance") + } + + var r0 *assets.Link + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*assets.Link, error)); ok { + return rf(ctx, address, linkAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *assets.Link); ok { + r0 = rf(ctx, address, linkAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Link) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { + r1 = rf(ctx, address, linkAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlockHeight provides a mock function with given fields: ctx +func (_m *Client) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for LatestBlockHeight") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeStates provides a mock function with given fields: +func (_m *Client) NodeStates() map[string]string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NodeStates") + } + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +// PendingCallContract provides a mock function with given fields: ctx, msg +func (_m *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for PendingCallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) ([]byte, error)); ok { + return rf(ctx, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) []byte); ok { + r0 = rf(ctx, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *Client) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *Client) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendTransactionReturnCode provides a mock function with given fields: ctx, tx, fromAddress +func (_m *Client) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) { + ret := _m.Called(ctx, tx, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for SendTransactionReturnCode") + } + + var r0 commonclient.SendTxReturnCode + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) (commonclient.SendTxReturnCode, error)); ok { + return rf(ctx, tx, fromAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, common.Address) commonclient.SendTxReturnCode); ok { + r0 = rf(ctx, tx, fromAddress) + } else { + r0 = ret.Get(0).(commonclient.SendTxReturnCode) + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.Transaction, common.Address) error); ok { + r1 = rf(ctx, tx, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SequenceAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Client) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for SequenceAt") + } + + var r0 evmtypes.Nonce + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (evmtypes.Nonce, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) evmtypes.Nonce); ok { + r0 = rf(ctx, account, blockNumber) + } else { + r0 = ret.Get(0).(evmtypes.Nonce) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeNewHead provides a mock function with given fields: ctx, ch +func (_m *Client) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { + ret := _m.Called(ctx, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeNewHead") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head) (ethereum.Subscription, error)); ok { + return rf(ctx, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head) ethereum.Subscription); ok { + r0 = rf(ctx, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *evmtypes.Head) error); ok { + r1 = rf(ctx, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *Client) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *Client) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TokenBalance provides a mock function with given fields: ctx, address, contractAddress +func (_m *Client) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { + ret := _m.Called(ctx, address, contractAddress) + + if len(ret) == 0 { + panic("no return value specified for TokenBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) (*big.Int, error)); ok { + return rf(ctx, address, contractAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address) *big.Int); ok { + r0 = rf(ctx, address, contractAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, common.Address) error); ok { + r1 = rf(ctx, address, contractAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionByHash provides a mock function with given fields: ctx, txHash +func (_m *Client) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionByHash") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Transaction, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Transaction); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionReceipt provides a mock function with given fields: ctx, txHash +func (_m *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionReceipt") + } + + var r0 *types.Receipt + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Receipt, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Receipt); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Receipt) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { + mock.TestingT + Cleanup(func()) +}) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/client/mocks/tx_sender.go b/core/chains/evm/client/mocks/tx_sender.go new file mode 100644 index 00000000..a769a786 --- /dev/null +++ b/core/chains/evm/client/mocks/tx_sender.go @@ -0,0 +1,80 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// TxSender is an autogenerated mock type for the TxSender type +type TxSender struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: _a0 +func (_m *TxSender) ChainID(_a0 context.Context) (*big.Int, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *TxSender) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTxSender creates a new instance of TxSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxSender(t interface { + mock.TestingT + Cleanup(func()) +}) *TxSender { + mock := &TxSender{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/client/node.go b/core/chains/evm/client/node.go new file mode 100644 index 00000000..44f7c91b --- /dev/null +++ b/core/chains/evm/client/node.go @@ -0,0 +1,1164 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "net/url" + "strconv" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var ( + promEVMPoolRPCNodeDials = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_dials_total", + Help: "The total number of dials for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeDialsFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_dials_failed", + Help: "The total number of failed dials for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeDialsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_dials_success", + Help: "The total number of successful dials for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeVerifies = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_verifies", + Help: "The total number of chain ID verifications for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeVerifiesFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_verifies_failed", + Help: "The total number of failed chain ID verifications for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeVerifiesSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_verifies_success", + Help: "The total number of successful chain ID verifications for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + + promEVMPoolRPCNodeCalls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_calls_total", + Help: "The approximate total number of RPC calls for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeCallsFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_calls_failed", + Help: "The approximate total number of failed RPC calls for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeCallsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_calls_success", + Help: "The approximate total number of successful RPC calls for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCCallTiming = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "evm_pool_rpc_node_rpc_call_time", + Help: "The duration of an RPC call in nanoseconds", + Buckets: []float64{ + float64(50 * time.Millisecond), + float64(100 * time.Millisecond), + float64(200 * time.Millisecond), + float64(500 * time.Millisecond), + float64(1 * time.Second), + float64(2 * time.Second), + float64(4 * time.Second), + float64(8 * time.Second), + }, + }, []string{"evmChainID", "nodeName", "rpcHost", "isSendOnly", "success", "rpcCallName"}) +) + +//go:generate mockery --quiet --name Node --output ../mocks/ --case=underscore + +// Node represents a client that connects to an ethereum-compatible RPC node +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.Node] +type Node interface { + Start(ctx context.Context) error + Close() error + + // State returns NodeState + State() NodeState + // StateAndLatest returns NodeState with the latest received block number & total difficulty. + StateAndLatest() (state NodeState, blockNum int64, totalDifficulty *big.Int) + // Name is a unique identifier for this node. + Name() string + ChainID() *big.Int + Order() int32 + SubscribersCount() int32 + UnsubscribeAllExceptAliveLoop() + + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + SendTransaction(ctx context.Context, tx *types.Transaction) error + PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) + PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) + NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) + TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) + BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + BlockNumber(ctx context.Context) (uint64, error) + BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) + SuggestGasPrice(ctx context.Context) (*big.Int, error) + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) + PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) + HeaderByHash(context.Context, common.Hash) (*types.Header, error) + SuggestGasTipCap(ctx context.Context) (*big.Int, error) + EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) + + String() string +} + +type rawclient struct { + rpc *rpc.Client + geth *ethclient.Client + uri url.URL +} + +// Node represents one ethereum node. +// It must have a ws url and may have a http url +type node struct { + services.StateMachine + lfcLog logger.Logger + rpcLog logger.SugaredLogger + name string + id int32 + chainID *big.Int + nodePoolCfg config.NodePool + noNewHeadsThreshold time.Duration + order int32 + + ws rawclient + http *rawclient + + stateMu sync.RWMutex // protects state* fields + state NodeState + // Each node is tracking the last received head number and total difficulty + stateLatestBlockNumber int64 + stateLatestTotalDifficulty *big.Int + + // Need to track subscriptions because closing the RPC does not (always?) + // close the underlying subscription + subs []ethereum.Subscription + + // Need to track the aliveLoop subscription, so we do not cancel it when checking lease + aliveLoopSub ethereum.Subscription + + // chStopInFlight can be closed to immediately cancel all in-flight requests on + // this node. Closing and replacing should be serialized through + // stateMu since it can happen on state transitions as well as node Close. + chStopInFlight chan struct{} + // nodeCtx is the node lifetime's context + nodeCtx context.Context + // cancelNodeCtx cancels nodeCtx when stopping the node + cancelNodeCtx context.CancelFunc + // wg waits for subsidiary goroutines + wg sync.WaitGroup + + // nLiveNodes is a passed in function that allows this node to: + // 1. see how many live nodes there are in total, so we can prevent the last alive node in a pool from being + // moved to out-of-sync state. It is better to have one out-of-sync node than no nodes at all. + // 2. compare against the highest head (by number or difficulty) to ensure we don't fall behind too far. + nLiveNodes func() (count int, blockNumber int64, totalDifficulty *big.Int) +} + +// NewNode returns a new *node as Node +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewNode] +func NewNode(nodeCfg config.NodePool, noNewHeadsThreshold time.Duration, lggr logger.Logger, wsuri url.URL, httpuri *url.URL, name string, id int32, chainID *big.Int, nodeOrder int32) Node { + n := new(node) + n.name = name + n.id = id + n.chainID = chainID + n.nodePoolCfg = nodeCfg + n.noNewHeadsThreshold = noNewHeadsThreshold + n.ws.uri = wsuri + n.order = nodeOrder + if httpuri != nil { + n.http = &rawclient{uri: *httpuri} + } + n.chStopInFlight = make(chan struct{}) + n.nodeCtx, n.cancelNodeCtx = context.WithCancel(context.Background()) + lggr = logger.Named(lggr, "Node") + lggr = logger.With(lggr, + "nodeTier", "primary", + "nodeName", name, + "node", n.String(), + "evmChainID", chainID, + "nodeOrder", n.order, + "mode", n.getNodeMode(), + ) + n.lfcLog = logger.Named(lggr, "Lifecycle") + n.rpcLog = logger.Sugared(lggr).Named("RPC") + n.stateLatestBlockNumber = -1 + + return n +} + +// Start dials and verifies the node +// Should only be called once in a node's lifecycle +// Return value is necessary to conform to interface but this will never +// actually return an error. +func (n *node) Start(startCtx context.Context) error { + return n.StartOnce(n.name, func() error { + n.start(startCtx) + return nil + }) +} + +// start initially dials the node and verifies chain ID +// This spins off lifecycle goroutines. +// Not thread-safe. +// Node lifecycle is synchronous: only one goroutine should be running at a +// time. +func (n *node) start(startCtx context.Context) { + if n.state != NodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", n.state)) + } + + dialCtx, dialCancel := n.makeQueryCtx(startCtx) + defer dialCancel() + if err := n.dial(dialCtx); err != nil { + n.lfcLog.Errorw("Dial failed: EVM Node is unreachable", "err", err) + n.declareUnreachable() + return + } + n.setState(NodeStateDialed) + + verifyCtx, verifyCancel := n.makeQueryCtx(startCtx) + defer verifyCancel() + if err := n.verify(verifyCtx); errors.Is(err, errInvalidChainID) { + n.lfcLog.Errorw("Verify failed: EVM Node has the wrong chain ID", "err", err) + n.declareInvalidChainID() + return + } else if err != nil { + n.lfcLog.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + n.declareUnreachable() + return + } + + n.declareAlive() +} + +// Not thread-safe +// Pure dial: does not mutate node "state" field. +func (n *node) dial(callerCtx context.Context) error { + ctx, cancel := n.makeQueryCtx(callerCtx) + defer cancel() + + promEVMPoolRPCNodeDials.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr := logger.With(n.lfcLog, "wsuri", n.ws.uri.Redacted()) + if n.http != nil { + lggr = logger.With(lggr, "httpuri", n.http.uri.Redacted()) + } + lggr.Debugw("RPC dial: evmclient.Client#dial") + + wsrpc, err := rpc.DialWebsocket(ctx, n.ws.uri.String(), "") + if err != nil { + promEVMPoolRPCNodeDialsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + return errors.Wrapf(err, "error while dialing websocket: %v", n.ws.uri.Redacted()) + } + + var httprpc *rpc.Client + if n.http != nil { + httprpc, err = rpc.DialHTTP(n.http.uri.String()) + if err != nil { + promEVMPoolRPCNodeDialsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + return errors.Wrapf(err, "error while dialing HTTP: %v", n.http.uri.Redacted()) + } + } + + n.ws.rpc = wsrpc + n.ws.geth = ethclient.NewClient(wsrpc) + + if n.http != nil { + n.http.rpc = httprpc + n.http.geth = ethclient.NewClient(httprpc) + } + + promEVMPoolRPCNodeDialsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + + return nil +} + +var errInvalidChainID = errors.New("invalid chain id") + +// verify checks that all connections to eth nodes match the given chain ID +// Not thread-safe +// Pure verify: does not mutate node "state" field. +func (n *node) verify(callerCtx context.Context) (err error) { + ctx, cancel := n.makeQueryCtx(callerCtx) + defer cancel() + + promEVMPoolRPCNodeVerifies.WithLabelValues(n.chainID.String(), n.name).Inc() + promFailed := func() { + promEVMPoolRPCNodeVerifiesFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + } + + st := n.State() + switch st { + case NodeStateDialed, NodeStateOutOfSync, NodeStateInvalidChainID: + default: + panic(fmt.Sprintf("cannot verify node in state %v", st)) + } + + var chainID *big.Int + if chainID, err = n.ws.geth.ChainID(ctx); err != nil { + promFailed() + return errors.Wrapf(err, "failed to verify chain ID for node %s", n.name) + } else if chainID.Cmp(n.chainID) != 0 { + promFailed() + return errors.Wrapf( + errInvalidChainID, + "websocket rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + n.chainID.String(), + n.name, + ) + } + if n.http != nil { + if chainID, err = n.http.geth.ChainID(ctx); err != nil { + promFailed() + return errors.Wrapf(err, "failed to verify chain ID for node %s", n.name) + } else if chainID.Cmp(n.chainID) != 0 { + promFailed() + return errors.Wrapf( + errInvalidChainID, + "http rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + n.chainID.String(), + n.name, + ) + } + } + + promEVMPoolRPCNodeVerifiesSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + + return nil +} + +func (n *node) Close() error { + return n.StopOnce(n.name, func() error { + defer func() { + n.wg.Wait() + if n.ws.rpc != nil { + n.ws.rpc.Close() + } + }() + + n.stateMu.Lock() + defer n.stateMu.Unlock() + + n.cancelNodeCtx() + n.cancelInflightRequests() + n.state = NodeStateClosed + return nil + }) +} + +// registerSub adds the sub to the node list +func (n *node) registerSub(sub ethereum.Subscription) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.subs = append(n.subs, sub) +} + +// disconnectAll disconnects all clients connected to the node +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node) disconnectAll() { + if n.ws.rpc != nil { + n.ws.rpc.Close() + } + n.cancelInflightRequests() + n.unsubscribeAll() +} + +// SubscribersCount returns the number of client subscribed to the node +func (n *node) SubscribersCount() int32 { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return int32(len(n.subs)) +} + +// UnsubscribeAllExceptAliveLoop disconnects all subscriptions to the node except the alive loop subscription +// while holding the n.stateMu lock +func (n *node) UnsubscribeAllExceptAliveLoop() { + n.stateMu.Lock() + defer n.stateMu.Unlock() + + for _, s := range n.subs { + if s != n.aliveLoopSub { + s.Unsubscribe() + } + } +} + +// cancelInflightRequests closes and replaces the chStopInFlight +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node) cancelInflightRequests() { + close(n.chStopInFlight) + n.chStopInFlight = make(chan struct{}) +} + +// unsubscribeAll unsubscribes all subscriptions +// WARNING: NOT THREAD-SAFE +// This must be called from within the n.stateMu lock +func (n *node) unsubscribeAll() { + for _, sub := range n.subs { + sub.Unsubscribe() + } + n.subs = nil +} + +// getChStopInflight provides a convenience helper that mutex wraps a +// read to the chStopInFlight +func (n *node) getChStopInflight() chan struct{} { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.chStopInFlight +} + +func (n *node) getRPCDomain() string { + if n.http != nil { + return n.http.uri.Host + } + return n.ws.uri.Host +} + +// RPC wrappers + +// CallContext implementation +func (n *node) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + defer cancel() + lggr := n.newRqLggr().With( + "method", method, + "args", args, + ) + + lggr.Debug("RPC call: evmclient.Client#CallContext") + start := time.Now() + if http != nil { + err = n.wrapHTTP(http.rpc.CallContext(ctx, result, method, args...)) + } else { + err = n.wrapWS(ws.rpc.CallContext(ctx, result, method, args...)) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "CallContext") + + return err +} + +func (n *node) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + defer cancel() + lggr := n.newRqLggr().With("nBatchElems", len(b), "batchElems", b) + + lggr.Trace("RPC call: evmclient.Client#BatchCallContext") + start := time.Now() + if http != nil { + err = n.wrapHTTP(http.rpc.BatchCallContext(ctx, b)) + } else { + err = n.wrapWS(ws.rpc.BatchCallContext(ctx, b)) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BatchCallContext") + + return err +} + +func (n *node) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { + ctx, cancel, ws, _, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("args", args) + + lggr.Debug("RPC call: evmclient.Client#EthSubscribe") + start := time.Now() + sub, err := ws.rpc.EthSubscribe(ctx, channel, args...) + if err == nil { + n.registerSub(sub) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "EthSubscribe") + + return sub, err +} + +// GethClient wrappers + +func (n *node) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("txHash", txHash) + + lggr.Debug("RPC call: evmclient.Client#TransactionReceipt") + + start := time.Now() + if http != nil { + receipt, err = http.geth.TransactionReceipt(ctx, txHash) + err = n.wrapHTTP(err) + } else { + receipt, err = ws.geth.TransactionReceipt(ctx, txHash) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "TransactionReceipt", + "receipt", receipt, + ) + + return +} + +func (n *node) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("txHash", txHash) + + lggr.Debug("RPC call: evmclient.Client#TransactionByHash") + + start := time.Now() + if http != nil { + tx, _, err = http.geth.TransactionByHash(ctx, txHash) + err = n.wrapHTTP(err) + } else { + tx, _, err = ws.geth.TransactionByHash(ctx, txHash) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "TransactionByHash", + "receipt", tx, + ) + + return +} + +func (n *node) HeaderByNumber(ctx context.Context, number *big.Int) (header *types.Header, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("number", number) + + lggr.Debug("RPC call: evmclient.Client#HeaderByNumber") + start := time.Now() + if http != nil { + header, err = http.geth.HeaderByNumber(ctx, number) + err = n.wrapHTTP(err) + } else { + header, err = ws.geth.HeaderByNumber(ctx, number) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "HeaderByNumber", "header", header) + + return +} + +func (n *node) HeaderByHash(ctx context.Context, hash common.Hash) (header *types.Header, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("hash", hash) + + lggr.Debug("RPC call: evmclient.Client#HeaderByHash") + start := time.Now() + if http != nil { + header, err = http.geth.HeaderByHash(ctx, hash) + err = n.wrapHTTP(err) + } else { + header, err = ws.geth.HeaderByHash(ctx, hash) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "HeaderByHash", + "header", header, + ) + + return +} + +func (n *node) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + defer cancel() + lggr := n.newRqLggr().With("tx", tx) + + lggr.Debug("RPC call: evmclient.Client#SendTransaction") + start := time.Now() + if http != nil { + err = n.wrapHTTP(http.geth.SendTransaction(ctx, tx)) + } else { + err = n.wrapWS(ws.geth.SendTransaction(ctx, tx)) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "SendTransaction") + + return err +} + +// PendingNonceAt returns one higher than the highest nonce from both mempool and mined transactions +func (n *node) PendingNonceAt(ctx context.Context, account common.Address) (nonce uint64, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := n.newRqLggr().With("account", account) + + lggr.Debug("RPC call: evmclient.Client#PendingNonceAt") + start := time.Now() + if http != nil { + nonce, err = http.geth.PendingNonceAt(ctx, account) + err = n.wrapHTTP(err) + } else { + nonce, err = ws.geth.PendingNonceAt(ctx, account) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "PendingNonceAt", + "nonce", nonce, + ) + + return +} + +// NonceAt is a bit of a misnomer. You might expect it to return the highest +// mined nonce at the given block number, but it actually returns the total +// transaction count which is the highest mined nonce + 1 +func (n *node) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (nonce uint64, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := n.newRqLggr().With("account", account, "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#NonceAt") + start := time.Now() + if http != nil { + nonce, err = http.geth.NonceAt(ctx, account, blockNumber) + err = n.wrapHTTP(err) + } else { + nonce, err = ws.geth.NonceAt(ctx, account, blockNumber) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "NonceAt", + "nonce", nonce, + ) + + return +} + +func (n *node) PendingCodeAt(ctx context.Context, account common.Address) (code []byte, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("account", account) + + lggr.Debug("RPC call: evmclient.Client#PendingCodeAt") + start := time.Now() + if http != nil { + code, err = http.geth.PendingCodeAt(ctx, account) + err = n.wrapHTTP(err) + } else { + code, err = ws.geth.PendingCodeAt(ctx, account) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "PendingCodeAt", + "code", code, + ) + + return +} + +func (n *node) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) (code []byte, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("account", account, "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#CodeAt") + start := time.Now() + if http != nil { + code, err = http.geth.CodeAt(ctx, account, blockNumber) + err = n.wrapHTTP(err) + } else { + code, err = ws.geth.CodeAt(ctx, account, blockNumber) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "CodeAt", + "code", code, + ) + + return +} + +func (n *node) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := n.newRqLggr().With("call", call) + + lggr.Debug("RPC call: evmclient.Client#EstimateGas") + start := time.Now() + if http != nil { + gas, err = http.geth.EstimateGas(ctx, call) + err = n.wrapHTTP(err) + } else { + gas, err = ws.geth.EstimateGas(ctx, call) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "EstimateGas", + "gas", gas, + ) + + return +} + +func (n *node) SuggestGasPrice(ctx context.Context) (price *big.Int, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#SuggestGasPrice") + start := time.Now() + if http != nil { + price, err = http.geth.SuggestGasPrice(ctx) + err = n.wrapHTTP(err) + } else { + price, err = ws.geth.SuggestGasPrice(ctx) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "SuggestGasPrice", + "price", price, + ) + + return +} + +func (n *node) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) (val []byte, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#CallContract") + start := time.Now() + if http != nil { + val, err = http.geth.CallContract(ctx, msg, blockNumber) + err = n.wrapHTTP(err) + } else { + val, err = ws.geth.CallContract(ctx, msg, blockNumber) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "CallContract", + "val", val, + ) + + return + +} + +func (n *node) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) (val []byte, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("callMsg", msg) + + lggr.Debug("RPC call: evmclient.Client#PendingCallContract") + start := time.Now() + if http != nil { + val, err = http.geth.PendingCallContract(ctx, msg) + err = n.wrapHTTP(err) + } else { + val, err = ws.geth.PendingCallContract(ctx, msg) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "PendingCallContract", + "val", val, + ) + + return + +} + +func (n *node) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Block, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("number", number) + + lggr.Debug("RPC call: evmclient.Client#BlockByNumber") + start := time.Now() + if http != nil { + b, err = http.geth.BlockByNumber(ctx, number) + err = n.wrapHTTP(err) + } else { + b, err = ws.geth.BlockByNumber(ctx, number) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BlockByNumber", + "block", b, + ) + + return +} + +func (n *node) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Block, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("hash", hash) + + lggr.Debug("RPC call: evmclient.Client#BlockByHash") + start := time.Now() + if http != nil { + b, err = http.geth.BlockByHash(ctx, hash) + err = n.wrapHTTP(err) + } else { + b, err = ws.geth.BlockByHash(ctx, hash) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BlockByHash", + "block", b, + ) + + return +} + +func (n *node) BlockNumber(ctx context.Context) (height uint64, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := n.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#BlockNumber") + start := time.Now() + if http != nil { + height, err = http.geth.BlockNumber(ctx) + err = n.wrapHTTP(err) + } else { + height, err = ws.geth.BlockNumber(ctx) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BlockNumber", + "height", height, + ) + + return +} + +func (n *node) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#BalanceAt") + start := time.Now() + if http != nil { + balance, err = http.geth.BalanceAt(ctx, account, blockNumber) + err = n.wrapHTTP(err) + } else { + balance, err = ws.geth.BalanceAt(ctx, account, blockNumber) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "BalanceAt", + "balance", balance, + ) + + return +} + +func (n *node) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []types.Log, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("q", q) + + lggr.Debug("RPC call: evmclient.Client#FilterLogs") + start := time.Now() + if http != nil { + l, err = http.geth.FilterLogs(ctx, q) + err = n.wrapHTTP(err) + } else { + l, err = ws.geth.FilterLogs(ctx, q) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "FilterLogs", + "log", l, + ) + + return +} + +func (n *node) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (sub ethereum.Subscription, err error) { + ctx, cancel, ws, _, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr().With("q", q) + + lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs") + start := time.Now() + sub, err = ws.geth.SubscribeFilterLogs(ctx, q, ch) + if err == nil { + n.registerSub(sub) + } + err = n.wrapWS(err) + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "SubscribeFilterLogs") + + return +} + +func (n *node) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { + ctx, cancel, ws, http, err := n.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := n.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#SuggestGasTipCap") + start := time.Now() + if http != nil { + tipCap, err = http.geth.SuggestGasTipCap(ctx) + err = n.wrapHTTP(err) + } else { + tipCap, err = ws.geth.SuggestGasTipCap(ctx) + err = n.wrapWS(err) + } + duration := time.Since(start) + + n.logResult(lggr, err, duration, n.getRPCDomain(), "SuggestGasTipCap", + "tipCap", tipCap, + ) + + return +} + +func (n *node) ChainID() (chainID *big.Int) { return n.chainID } + +// newRqLggr generates a new logger with a unique request ID +func (n *node) newRqLggr() logger.SugaredLogger { + return n.rpcLog.With("requestID", uuid.New()) +} + +func (n *node) logResult( + lggr logger.Logger, + err error, + callDuration time.Duration, + rpcDomain, + callName string, + results ...interface{}, +) { + slggr := logger.Sugared(lggr).With("duration", callDuration, "rpcDomain", rpcDomain, "callName", callName) + promEVMPoolRPCNodeCalls.WithLabelValues(n.chainID.String(), n.name).Inc() + if err == nil { + promEVMPoolRPCNodeCallsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + slggr.Tracew(fmt.Sprintf("evmclient.Client#%s RPC call success", callName), results...) + } else { + promEVMPoolRPCNodeCallsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + slggr.Debugw( + fmt.Sprintf("evmclient.Client#%s RPC call failure", callName), + append(results, "err", err)..., + ) + } + promEVMPoolRPCCallTiming. + WithLabelValues( + n.chainID.String(), // chain id + n.name, // node name + rpcDomain, // rpc domain + "false", // is send only + strconv.FormatBool(err == nil), // is successful + callName, // rpc call name + ). + Observe(float64(callDuration)) +} + +func (n *node) wrapWS(err error) error { + err = wrap(err, fmt.Sprintf("primary websocket (%s)", n.ws.uri.Redacted())) + return err +} + +func (n *node) wrapHTTP(err error) error { + err = wrap(err, fmt.Sprintf("primary http (%s)", n.http.uri.Redacted())) + if err != nil { + n.rpcLog.Debugw("Call failed", "err", err) + } else { + n.rpcLog.Trace("Call succeeded") + } + return err +} + +func wrap(err error, tp string) error { + if err == nil { + return nil + } + if errors.Cause(err).Error() == "context deadline exceeded" { + err = errors.Wrap(err, "remote eth node timed out") + } + return errors.Wrapf(err, "%s call failed", tp) +} + +// makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx but returns error if node is not NodeStateAlive. +func (n *node) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient, err error) { + // Need to wrap in mutex because state transition can cancel and replace the + // context + n.stateMu.RLock() + if n.state != NodeStateAlive { + err = errors.Errorf("cannot execute RPC call on node with state: %s", n.state) + n.stateMu.RUnlock() + return + } + cancelCh := n.chStopInFlight + ws = n.ws + if n.http != nil { + cp := *n.http + http = &cp + } + n.stateMu.RUnlock() + ctx, cancel = makeQueryCtx(parentCtx, cancelCh) + return +} + +func (n *node) makeQueryCtx(ctx context.Context) (context.Context, context.CancelFunc) { + return makeQueryCtx(ctx, n.getChStopInflight()) +} + +// makeQueryCtx returns a context that cancels if: +// 1. Passed in ctx cancels +// 2. Passed in channel is closed +// 3. Default timeout is reached (queryTimeout) +func makeQueryCtx(ctx context.Context, ch services.StopChan) (context.Context, context.CancelFunc) { + var chCancel, timeoutCancel context.CancelFunc + ctx, chCancel = ch.Ctx(ctx) + ctx, timeoutCancel = context.WithTimeout(ctx, queryTimeout) + cancel := func() { + chCancel() + timeoutCancel() + } + return ctx, cancel +} + +func (n *node) getNodeMode() string { + if n.http != nil { + return "http" + } + return "websocket" +} + +func (n *node) String() string { + s := fmt.Sprintf("(primary)%s:%s", n.name, n.ws.uri.Redacted()) + if n.http != nil { + s = s + fmt.Sprintf(":%s", n.http.uri.Redacted()) + } + return s +} + +func (n *node) Name() string { + return n.name +} + +func (n *node) Order() int32 { + return n.order +} diff --git a/core/chains/evm/client/node_fsm.go b/core/chains/evm/client/node_fsm.go new file mode 100644 index 00000000..c92af3b4 --- /dev/null +++ b/core/chains/evm/client/node_fsm.go @@ -0,0 +1,259 @@ +package client + +import ( + "fmt" + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + promEVMPoolRPCNodeTransitionsToAlive = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_alive", + Help: fmt.Sprintf("Total number of times node has transitioned to %s", NodeStateAlive), + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeTransitionsToInSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_in_sync", + Help: fmt.Sprintf("Total number of times node has transitioned from %s to %s", NodeStateOutOfSync, NodeStateAlive), + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeTransitionsToOutOfSync = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_out_of_sync", + Help: fmt.Sprintf("Total number of times node has transitioned to %s", NodeStateOutOfSync), + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeTransitionsToUnreachable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_unreachable", + Help: fmt.Sprintf("Total number of times node has transitioned to %s", NodeStateUnreachable), + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeTransitionsToInvalidChainID = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_invalid_chain_id", + Help: fmt.Sprintf("Total number of times node has transitioned to %s", NodeStateInvalidChainID), + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeTransitionsToUnusable = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_transitions_to_unusable", + Help: fmt.Sprintf("Total number of times node has transitioned to %s", NodeStateUnusable), + }, []string{"evmChainID", "nodeName"}) +) + +// NodeState represents the current state of the node +// Node is a FSM (finite state machine) +// +// Deprecated: to be removed. It is now internal in common/client +type NodeState int + +func (n NodeState) String() string { + switch n { + case NodeStateUndialed: + return "Undialed" + case NodeStateDialed: + return "Dialed" + case NodeStateInvalidChainID: + return "InvalidChainID" + case NodeStateAlive: + return "Alive" + case NodeStateUnreachable: + return "Unreachable" + case NodeStateUnusable: + return "Unusable" + case NodeStateOutOfSync: + return "OutOfSync" + case NodeStateClosed: + return "Closed" + default: + return fmt.Sprintf("NodeState(%d)", n) + } +} + +// GoString prints a prettier state +func (n NodeState) GoString() string { + return fmt.Sprintf("NodeState%s(%d)", n.String(), n) +} + +const ( + // NodeStateUndialed is the first state of a virgin node + NodeStateUndialed = NodeState(iota) + // NodeStateDialed is after a node has successfully dialed but before it has verified the correct chain ID + NodeStateDialed + // NodeStateInvalidChainID is after chain ID verification failed + NodeStateInvalidChainID + // NodeStateAlive is a healthy node after chain ID verification succeeded + NodeStateAlive + // NodeStateUnreachable is a node that cannot be dialed or has disconnected + NodeStateUnreachable + // NodeStateOutOfSync is a node that is accepting connections but exceeded + // the failure threshold without sending any new heads. It will be + // disconnected, then put into a revive loop and re-awakened after redial + // if a new head arrives + NodeStateOutOfSync + // NodeStateUnusable is a sendonly node that has an invalid URL that can never be reached + NodeStateUnusable + // NodeStateClosed is after the connection has been closed and the node is at the end of its lifecycle + NodeStateClosed + // nodeStateLen tracks the number of states + nodeStateLen +) + +// allNodeStates represents all possible states a node can be in +var allNodeStates []NodeState + +func init() { + for s := NodeState(0); s < nodeStateLen; s++ { + allNodeStates = append(allNodeStates, s) + } +} + +// FSM methods + +// State allows reading the current state of the node. +func (n *node) State() NodeState { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.state +} + +func (n *node) StateAndLatest() (NodeState, int64, *big.Int) { + n.stateMu.RLock() + defer n.stateMu.RUnlock() + return n.state, n.stateLatestBlockNumber, n.stateLatestTotalDifficulty +} + +// setState is only used by internal state management methods. +// This is low-level; care should be taken by the caller to ensure the new state is a valid transition. +// State changes should always be synchronous: only one goroutine at a time should change state. +// n.stateMu should not be locked for long periods of time because external clients expect a timely response from n.State() +func (n *node) setState(s NodeState) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.state = s +} + +// declareXXX methods change the state and pass conrol off the new state +// management goroutine + +func (n *node) declareAlive() { + n.transitionToAlive(func() { + n.lfcLog.Infow("RPC Node is online", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node) transitionToAlive(fn func()) { + promEVMPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == NodeStateClosed { + return + } + switch n.state { + case NodeStateDialed, NodeStateInvalidChainID: + n.state = NodeStateAlive + default: + panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateAlive)) + } + fn() +} + +// declareInSync puts a node back into Alive state, allowing it to be used by +// pool consumers again +func (n *node) declareInSync() { + n.transitionToInSync(func() { + n.lfcLog.Infow("RPC Node is back in sync", "nodeState", n.state) + n.wg.Add(1) + go n.aliveLoop() + }) +} + +func (n *node) transitionToInSync(fn func()) { + promEVMPoolRPCNodeTransitionsToAlive.WithLabelValues(n.chainID.String(), n.name).Inc() + promEVMPoolRPCNodeTransitionsToInSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == NodeStateClosed { + return + } + switch n.state { + case NodeStateOutOfSync: + n.state = NodeStateAlive + default: + panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateAlive)) + } + fn() +} + +// declareOutOfSync puts a node into OutOfSync state, disconnecting all current +// clients and making it unavailable for use until back in-sync. +func (n *node) declareOutOfSync(isOutOfSync func(num int64, td *big.Int) bool) { + n.transitionToOutOfSync(func() { + n.lfcLog.Errorw("RPC Node is out of sync", "nodeState", n.state) + n.wg.Add(1) + go n.outOfSyncLoop(isOutOfSync) + }) +} + +func (n *node) transitionToOutOfSync(fn func()) { + promEVMPoolRPCNodeTransitionsToOutOfSync.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == NodeStateClosed { + return + } + switch n.state { + case NodeStateAlive: + n.disconnectAll() + n.state = NodeStateOutOfSync + default: + panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateOutOfSync)) + } + fn() +} + +func (n *node) declareUnreachable() { + n.transitionToUnreachable(func() { + n.lfcLog.Errorw("RPC Node is unreachable", "nodeState", n.state) + n.wg.Add(1) + go n.unreachableLoop() + }) +} + +func (n *node) transitionToUnreachable(fn func()) { + promEVMPoolRPCNodeTransitionsToUnreachable.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == NodeStateClosed { + return + } + switch n.state { + case NodeStateUndialed, NodeStateDialed, NodeStateAlive, NodeStateOutOfSync, NodeStateInvalidChainID: + n.disconnectAll() + n.state = NodeStateUnreachable + default: + panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateUnreachable)) + } + fn() +} + +func (n *node) declareInvalidChainID() { + n.transitionToInvalidChainID(func() { + n.lfcLog.Errorw("RPC Node has the wrong chain ID", "nodeState", n.state) + n.wg.Add(1) + go n.invalidChainIDLoop() + }) +} + +func (n *node) transitionToInvalidChainID(fn func()) { + promEVMPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(n.chainID.String(), n.name).Inc() + n.stateMu.Lock() + defer n.stateMu.Unlock() + if n.state == NodeStateClosed { + return + } + switch n.state { + case NodeStateDialed, NodeStateOutOfSync: + n.disconnectAll() + n.state = NodeStateInvalidChainID + default: + panic(fmt.Sprintf("cannot transition from %#v to %#v", n.state, NodeStateInvalidChainID)) + } + fn() +} diff --git a/core/chains/evm/client/node_fsm_test.go b/core/chains/evm/client/node_fsm_test.go new file mode 100644 index 00000000..fbe214dd --- /dev/null +++ b/core/chains/evm/client/node_fsm_test.go @@ -0,0 +1,173 @@ +package client + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type fnMock struct{ calls int } + +func (fm *fnMock) Fn() { + fm.calls++ +} + +func (fm *fnMock) AssertNotCalled(t *testing.T) { + assert.Equal(t, 0, fm.calls) +} + +func (fm *fnMock) AssertCalled(t *testing.T) { + assert.Greater(t, fm.calls, 0) +} + +func (fm *fnMock) AssertNumberOfCalls(t *testing.T, n int) { + assert.Equal(t, n, fm.calls) +} + +var _ ethereum.Subscription = (*subMock)(nil) + +type subMock struct{ unsubbed bool } + +func (s *subMock) Unsubscribe() { + s.unsubbed = true +} +func (s *subMock) Err() <-chan error { return nil } + +func TestUnit_Node_StateTransitions(t *testing.T) { + t.Parallel() + + s := testutils.NewWSServer(t, testutils.FixtureChainID, nil) + iN := NewNode(TestNodePoolConfig{}, time.Second*0, logger.Test(t), *s.WSURL(), nil, "test node", 42, nil, 1) + n := iN.(*node) + + assert.Equal(t, NodeStateUndialed, n.State()) + + t.Run("setState", func(t *testing.T) { + n.setState(NodeStateAlive) + assert.Equal(t, NodeStateAlive, n.State()) + n.setState(NodeStateUndialed) + assert.Equal(t, NodeStateUndialed, n.State()) + }) + + // must dial to set rpc client for use in state transitions + err := n.dial(testutils.Context(t)) + require.NoError(t, err) + + t.Run("transitionToAlive", func(t *testing.T) { + m := new(fnMock) + assert.Panics(t, func() { + n.transitionToAlive(m.Fn) + }) + m.AssertNotCalled(t) + n.setState(NodeStateDialed) + n.transitionToAlive(m.Fn) + m.AssertNumberOfCalls(t, 1) + n.setState(NodeStateInvalidChainID) + n.transitionToAlive(m.Fn) + m.AssertNumberOfCalls(t, 2) + }) + + t.Run("transitionToInSync", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateAlive) + assert.Panics(t, func() { + n.transitionToInSync(m.Fn) + }) + m.AssertNotCalled(t) + n.setState(NodeStateOutOfSync) + n.transitionToInSync(m.Fn) + m.AssertCalled(t) + }) + t.Run("transitionToOutOfSync", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateOutOfSync) + assert.Panics(t, func() { + n.transitionToOutOfSync(m.Fn) + }) + m.AssertNotCalled(t) + n.setState(NodeStateAlive) + n.transitionToOutOfSync(m.Fn) + m.AssertCalled(t) + }) + t.Run("transitionToOutOfSync unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateAlive) + sub := &subMock{} + n.registerSub(sub) + n.transitionToOutOfSync(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) + t.Run("transitionToUnreachable", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateUnreachable) + assert.Panics(t, func() { + n.transitionToUnreachable(m.Fn) + }) + m.AssertNotCalled(t) + n.setState(NodeStateDialed) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 1) + n.setState(NodeStateAlive) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 2) + n.setState(NodeStateOutOfSync) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 3) + n.setState(NodeStateUndialed) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 4) + n.setState(NodeStateInvalidChainID) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 5) + }) + t.Run("transitionToUnreachable unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateDialed) + sub := &subMock{} + n.registerSub(sub) + n.transitionToUnreachable(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) + t.Run("transitionToInvalidChainID", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateUnreachable) + assert.Panics(t, func() { + n.transitionToInvalidChainID(m.Fn) + }) + m.AssertNotCalled(t) + n.setState(NodeStateDialed) + n.transitionToInvalidChainID(m.Fn) + n.setState(NodeStateOutOfSync) + n.transitionToInvalidChainID(m.Fn) + m.AssertNumberOfCalls(t, 2) + }) + t.Run("transitionToInvalidChainID unsubscribes everything", func(t *testing.T) { + m := new(fnMock) + n.setState(NodeStateDialed) + sub := &subMock{} + n.registerSub(sub) + n.transitionToInvalidChainID(m.Fn) + m.AssertNumberOfCalls(t, 1) + assert.True(t, sub.unsubbed) + }) + t.Run("Close", func(t *testing.T) { + // first attempt errors due to node being unstarted + assert.Error(t, n.Close()) + // must start to allow closing + err := n.StartOnce("test node", func() error { return nil }) + assert.NoError(t, err) + assert.NoError(t, n.Close()) + + assert.Equal(t, NodeStateClosed, n.State()) + // second attempt errors due to node being stopped twice + assert.Error(t, n.Close()) + }) +} diff --git a/core/chains/evm/client/node_lifecycle.go b/core/chains/evm/client/node_lifecycle.go new file mode 100644 index 00000000..afecfd68 --- /dev/null +++ b/core/chains/evm/client/node_lifecycle.go @@ -0,0 +1,442 @@ +package client + +import ( + "context" + "fmt" + "math" + "math/big" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + cutils "github.com/goplugin/plugin-common/pkg/utils" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +var ( + promEVMPoolRPCNodeHighestSeenBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "evm_pool_rpc_node_highest_seen_block", + Help: "The highest seen block for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodeNumSeenBlocks = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_num_seen_blocks", + Help: "The total number of new blocks seen by the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodePolls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_polls_total", + Help: "The total number of poll checks for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodePollsFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_polls_failed", + Help: "The total number of failed poll checks for the given RPC node", + }, []string{"evmChainID", "nodeName"}) + promEVMPoolRPCNodePollsSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "evm_pool_rpc_node_polls_success", + Help: "The total number of successful poll checks for the given RPC node", + }, []string{"evmChainID", "nodeName"}) +) + +// zombieNodeCheckInterval controls how often to re-check to see if we need to +// state change in case we have to force a state transition due to no available +// nodes. +// NOTE: This only applies to out-of-sync nodes if they are the last available node +func zombieNodeCheckInterval(noNewHeadsThreshold time.Duration) time.Duration { + interval := noNewHeadsThreshold + if interval <= 0 || interval > queryTimeout { + interval = queryTimeout + } + return cutils.WithJitter(interval) +} + +func (n *node) setLatestReceived(blockNumber int64, totalDifficulty *big.Int) { + n.stateMu.Lock() + defer n.stateMu.Unlock() + n.stateLatestBlockNumber = blockNumber + n.stateLatestTotalDifficulty = totalDifficulty +} + +const ( + msgCannotDisable = "but cannot disable this connection because there are no other RPC endpoints, or all other RPC endpoints are dead." + msgDegradedState = "Plugin is now operating in a degraded state and urgent action is required to resolve the issue" +) + +// Node is a FSM +// Each state has a loop that goes with it, which monitors the node and moves it into another state as necessary. +// Only one loop must run at a time. +// Each loop passes control onto the next loop as it exits, except when the node is Closed which terminates the loop permanently. + +// This handles node lifecycle for the ALIVE state +// Should only be run ONCE per node, after a successful Dial +func (n *node) aliveLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case NodeStateAlive: + case NodeStateClosed: + return + default: + panic(fmt.Sprintf("aliveLoop can only run for node in Alive state, got: %s", state)) + } + } + + noNewHeadsTimeoutThreshold := n.noNewHeadsThreshold + pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() + pollInterval := n.nodePoolCfg.PollInterval() + + lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) + lggr.Tracew("Alive loop starting", "nodeState", n.State()) + + headsC := make(chan *evmtypes.Head) + sub, err := n.EthSubscribe(n.nodeCtx, headsC, "newHeads") + if err != nil { + lggr.Errorw("Initial subscribe for heads failed", "nodeState", n.State()) + n.declareUnreachable() + return + } + n.aliveLoopSub = sub + defer sub.Unsubscribe() + + var outOfSyncT *time.Ticker + var outOfSyncTC <-chan time.Time + if noNewHeadsTimeoutThreshold > 0 { + lggr.Debugw("Head liveness checking enabled", "nodeState", n.State()) + outOfSyncT = time.NewTicker(noNewHeadsTimeoutThreshold) + defer outOfSyncT.Stop() + outOfSyncTC = outOfSyncT.C + } else { + lggr.Debug("Head liveness checking disabled") + } + + var pollCh <-chan time.Time + if pollInterval > 0 { + lggr.Debug("Polling enabled") + pollT := time.NewTicker(pollInterval) + defer pollT.Stop() + pollCh = pollT.C + if pollFailureThreshold > 0 { + // polling can be enabled with no threshold to enable polling but + // the node will not be marked offline regardless of the number of + // poll failures + lggr.Debug("Polling liveness checking enabled") + } + } else { + lggr.Debug("Polling disabled") + } + + _, highestReceivedBlockNumber, _ := n.StateAndLatest() + var pollFailures uint32 + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-pollCh: + var version string + promEVMPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) + ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) + ctx, cancel2 := n.makeQueryCtx(ctx) + err := n.CallContext(ctx, &version, "web3_clientVersion") + cancel2() + cancel() + if err != nil { + // prevent overflow + if pollFailures < math.MaxUint32 { + promEVMPoolRPCNodePollsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures++ + } + lggr.Warnw(fmt.Sprintf("Poll failure, RPC endpoint %s failed to respond properly", n.String()), "err", err, "pollFailures", pollFailures, "nodeState", n.State()) + } else { + lggr.Debugw("Version poll successful", "nodeState", n.State(), "clientVersion", version) + promEVMPoolRPCNodePollsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() + pollFailures = 0 + } + if pollFailureThreshold > 0 && pollFailures >= pollFailureThreshold { + lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) + continue + } + } + n.declareUnreachable() + return + } + _, num, td := n.StateAndLatest() + if outOfSync, liveNodes := n.syncStatus(num, td); outOfSync { + // note: there must be another live node for us to be out of sync + lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) + if liveNodes < 2 { + lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) + continue + } + n.declareOutOfSync(n.isOutOfSync) + return + } + case bh, open := <-headsC: + if !open { + lggr.Errorw("Subscription channel unexpectedly closed", "nodeState", n.State()) + n.declareUnreachable() + return + } + promEVMPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() + lggr.Tracew("Got head", "head", bh) + if bh.Number > highestReceivedBlockNumber { + promEVMPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.Number)) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) + highestReceivedBlockNumber = bh.Number + } else { + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) + } + if outOfSyncT != nil { + outOfSyncT.Reset(noNewHeadsTimeoutThreshold) + } + n.setLatestReceived(bh.Number, bh.TotalDifficulty) + case err := <-sub.Err(): + lggr.Errorw("Subscription was terminated", "err", err, "nodeState", n.State()) + n.declareUnreachable() + return + case <-outOfSyncTC: + // We haven't received a head on the channel for at least the + // threshold amount of time, mark it broken + lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 2 { + lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) + // We don't necessarily want to wait the full timeout to check again, we should + // check regularly and log noisily in this state + outOfSyncT.Reset(zombieNodeCheckInterval(n.noNewHeadsThreshold)) + continue + } + } + n.declareOutOfSync(func(num int64, td *big.Int) bool { return num < highestReceivedBlockNumber }) + return + } + } +} + +func (n *node) isOutOfSync(num int64, td *big.Int) (outOfSync bool) { + outOfSync, _ = n.syncStatus(num, td) + return +} + +// syncStatus returns outOfSync true if num or td is more than SyncThresold behind the best node. +// Always returns outOfSync false for SyncThreshold 0. +// liveNodes is only included when outOfSync is true. +func (n *node) syncStatus(num int64, td *big.Int) (outOfSync bool, liveNodes int) { + if n.nLiveNodes == nil { + return // skip for tests + } + threshold := n.nodePoolCfg.SyncThreshold() + if threshold == 0 { + return // disabled + } + // Check against best node + ln, highest, greatest := n.nLiveNodes() + mode := n.nodePoolCfg.SelectionMode() + switch mode { + case NodeSelectionMode_HighestHead, NodeSelectionMode_RoundRobin, NodeSelectionMode_PriorityLevel: + return num < highest-int64(threshold), ln + case NodeSelectionMode_TotalDifficulty: + bigThreshold := big.NewInt(int64(threshold)) + return td.Cmp(bigmath.Sub(greatest, bigThreshold)) < 0, ln + default: + panic("unrecognized NodeSelectionMode: " + mode) + } +} + +const ( + msgReceivedBlock = "Received block for RPC node, waiting until back in-sync to mark as live again" + msgInSync = "RPC node back in sync" +) + +// outOfSyncLoop takes an OutOfSync node and waits until isOutOfSync returns false to go back to live status +func (n *node) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case NodeStateOutOfSync: + case NodeStateClosed: + return + default: + panic(fmt.Sprintf("outOfSyncLoop can only run for node in OutOfSync state, got: %s", state)) + } + } + + outOfSyncAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) + lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) + + // Need to redial since out-of-sync nodes are automatically disconnected + if err := n.dial(n.nodeCtx); err != nil { + lggr.Errorw("Failed to dial out-of-sync RPC node", "nodeState", n.State()) + n.declareUnreachable() + return + } + + // Manually re-verify since out-of-sync nodes are automatically disconnected + if err := n.verify(n.nodeCtx); err != nil { + lggr.Errorw(fmt.Sprintf("Failed to verify out-of-sync RPC node: %v", err), "err", err) + n.declareInvalidChainID() + return + } + + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + + ch := make(chan *evmtypes.Head) + subCtx, cancel := n.makeQueryCtx(n.nodeCtx) + // raw call here to bypass node state checking + sub, err := n.ws.rpc.EthSubscribe(subCtx, ch, "newHeads") + cancel() + if err != nil { + lggr.Errorw("Failed to subscribe heads on out-of-sync RPC node", "nodeState", n.State(), "err", err) + n.declareUnreachable() + return + } + defer sub.Unsubscribe() + + for { + select { + case <-n.nodeCtx.Done(): + return + case head, open := <-ch: + if !open { + lggr.Error("Subscription channel unexpectedly closed", "nodeState", n.State()) + n.declareUnreachable() + return + } + n.setLatestReceived(head.Number, head.TotalDifficulty) + if !isOutOfSync(head.Number, head.TotalDifficulty) { + // back in-sync! flip back into alive loop + lggr.Infow(fmt.Sprintf("%s: %s. Node was out-of-sync for %s", msgInSync, n.String(), time.Since(outOfSyncAt)), "blockNumber", head.Number, "totalDifficulty", head.TotalDifficulty, "nodeState", n.State()) + n.declareInSync() + return + } + lggr.Debugw(msgReceivedBlock, "blockNumber", head.Number, "totalDifficulty", head.TotalDifficulty, "nodeState", n.State()) + case <-time.After(zombieNodeCheckInterval(n.noNewHeadsThreshold)): + if n.nLiveNodes != nil { + if l, _, _ := n.nLiveNodes(); l < 1 { + lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + n.declareInSync() + return + } + } + case err := <-sub.Err(): + lggr.Errorw("Subscription was terminated", "nodeState", n.State(), "err", err) + n.declareUnreachable() + return + } + } +} + +func (n *node) unreachableLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case NodeStateUnreachable: + case NodeStateClosed: + return + default: + panic(fmt.Sprintf("unreachableLoop can only run for node in Unreachable state, got: %s", state)) + } + } + + unreachableAt := time.Now() + + lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) + lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) + + dialRetryBackoff := utils.NewRedialBackoff() + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-time.After(dialRetryBackoff.Duration()): + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) + + err := n.dial(n.nodeCtx) + if err != nil { + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; still unreachable: %v", err), "err", err, "nodeState", n.State()) + continue + } + + n.setState(NodeStateDialed) + + err = n.verify(n.nodeCtx) + + if errors.Is(err, errInvalidChainID) { + lggr.Errorw("Failed to redial RPC node; remote endpoint returned the wrong chain ID", "err", err) + n.declareInvalidChainID() + return + } else if err != nil { + lggr.Errorw(fmt.Sprintf("Failed to redial RPC node; verify failed: %v", err), "err", err) + n.declareUnreachable() + return + } + + lggr.Infow(fmt.Sprintf("Successfully redialled and verified RPC node %s. Node was offline for %s", n.String(), time.Since(unreachableAt)), "nodeState", n.State()) + n.declareAlive() + return + } + } +} + +func (n *node) invalidChainIDLoop() { + defer n.wg.Done() + + { + // sanity check + state := n.State() + switch state { + case NodeStateInvalidChainID: + case NodeStateClosed: + return + default: + panic(fmt.Sprintf("invalidChainIDLoop can only run for node in InvalidChainID state, got: %s", state)) + } + } + + invalidAt := time.Now() + + lggr := logger.Named(n.lfcLog, "InvalidChainID") + lggr.Debugw(fmt.Sprintf("Periodically re-checking RPC node %s with invalid chain ID", n.String()), "nodeState", n.State()) + + chainIDRecheckBackoff := utils.NewRedialBackoff() + + for { + select { + case <-n.nodeCtx.Done(): + return + case <-time.After(chainIDRecheckBackoff.Duration()): + err := n.verify(n.nodeCtx) + if errors.Is(err, errInvalidChainID) { + lggr.Errorw("Failed to verify RPC node; remote endpoint returned the wrong chain ID", "err", err) + continue + } else if err != nil { + lggr.Errorw(fmt.Sprintf("Unexpected error while verifying RPC node chain ID; %v", err), "err", err) + n.declareUnreachable() + return + } + lggr.Infow(fmt.Sprintf("Successfully verified RPC node. Node was offline for %s", time.Since(invalidAt)), "nodeState", n.State()) + n.declareAlive() + return + } + } +} diff --git a/core/chains/evm/client/node_lifecycle_test.go b/core/chains/evm/client/node_lifecycle_test.go new file mode 100644 index 00000000..2e42b421 --- /dev/null +++ b/core/chains/evm/client/node_lifecycle_test.go @@ -0,0 +1,858 @@ +package client + +import ( + "fmt" + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func standardHandler(method string, _ gjson.Result) (resp testutils.JSONRPCResponse) { + if method == "eth_subscribe" { + resp.Result = `"0x00"` + resp.Notify = HeadResult + return + } + return +} + +func newTestNode(t *testing.T, cfg config.NodePool, noNewHeadsThresholds time.Duration) *node { + return newTestNodeWithCallback(t, cfg, noNewHeadsThresholds, standardHandler) +} + +func newTestNodeWithCallback(t *testing.T, cfg config.NodePool, noNewHeadsThreshold time.Duration, callback testutils.JSONRPCHandler) *node { + s := testutils.NewWSServer(t, testutils.FixtureChainID, callback) + iN := NewNode(cfg, noNewHeadsThreshold, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + return n +} + +// dial sets up the node and puts it into the live state, bypassing the +// normal Start() method which would fire off unwanted goroutines +func dial(t *testing.T, n *node) { + ctx := testutils.Context(t) + require.NoError(t, n.dial(ctx)) + n.setState(NodeStateAlive) + start(t, n) +} + +func start(t *testing.T, n *node) { + // must start to allow closing + err := n.StartOnce("test node", func() error { return nil }) + assert.NoError(t, err) +} + +func makeHeadResult(n int) string { + return fmt.Sprintf( + `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"%s","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`, + testutils.IntToHex(n), + ) +} + +func makeNewHeadWSMessage(n int) string { + return fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_subscription","params":{"subscription":"0x00","result":%s}}`, makeHeadResult(n)) +} + +func TestUnit_NodeLifecycle_aliveLoop(t *testing.T) { + t.Parallel() + + t.Run("with no poll and sync timeouts, exits on close", func(t *testing.T) { + pollAndSyncTimeoutsDisabledCfg := TestNodePoolConfig{} + n := newTestNode(t, pollAndSyncTimeoutsDisabledCfg, 0*time.Second) + dial(t, n) + + ch := make(chan struct{}) + n.wg.Add(1) + go func() { + defer close(ch) + n.aliveLoop() + }() + assert.NoError(t, n.Close()) + testutils.WaitWithTimeout(t, ch, "expected aliveLoop to exit") + }) + + t.Run("with no poll failures past threshold, stays alive", func(t *testing.T) { + threshold := 5 + cfg := TestNodePoolConfig{NodePollFailureThreshold: uint32(threshold), NodePollInterval: testutils.TestInterval} + var calls atomic.Int32 + n := newTestNodeWithCallback(t, cfg, time.Second*0, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + defer calls.Add(1) + // It starts working right before it hits threshold + if int(calls.Load())+1 >= threshold { + resp.Result = `"test client version"` + return + } + resp.Result = "this will error" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.AssertEventually(t, func() bool { + // Need to wait for one complete cycle before checking state so add + // 1 to threshold + return int(calls.Load()) > threshold+1 + }) + + assert.Equal(t, NodeStateAlive, n.State()) + }) + + t.Run("with threshold poll failures, transitions to unreachable", func(t *testing.T) { + syncTimeoutsDisabledCfg := TestNodePoolConfig{NodePollFailureThreshold: 3, NodePollInterval: testutils.TestInterval} + n := newTestNode(t, syncTimeoutsDisabledCfg, time.Second*0) + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateUnreachable + }) + }) + + t.Run("with threshold poll failures, but we are the last node alive, forcibly keeps it alive", func(t *testing.T) { + threshold := 3 + cfg := TestNodePoolConfig{NodePollFailureThreshold: uint32(threshold), NodePollInterval: testutils.TestInterval} + var calls atomic.Int32 + n := newTestNodeWithCallback(t, cfg, time.Second*0, func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = HeadResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + defer calls.Add(1) + resp.Error.Message = "this will error" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + n.nLiveNodes = func() (int, int64, *big.Int) { return 1, 0, nil } + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.AssertEventually(t, func() bool { + // Need to wait for one complete cycle before checking state so add + // 1 to threshold + return int(calls.Load()) > threshold+1 + }) + + assert.Equal(t, NodeStateAlive, n.State()) + }) + + t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { + pollDisabledCfg := TestNodePoolConfig{} + n := newTestNodeWithCallback(t, pollDisabledCfg, testutils.TestInterval, func(string, gjson.Result) (resp testutils.JSONRPCResponse) { return }) + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + _, err := n.EthSubscribe(testutils.Context(t), make(chan *evmtypes.Head)) + assert.Error(t, err) + + n.wg.Add(1) + n.aliveLoop() + + assert.Equal(t, NodeStateUnreachable, n.State()) + // sc-39341: ensure failed EthSubscribe didn't register a (*rpc.ClientSubscription)(nil) which would lead to a panic on Unsubscribe + assert.Len(t, n.subs, 0) + }) + + t.Run("if remote RPC connection is closed transitions to unreachable", func(t *testing.T) { + // NoNewHeadsThreshold needs to be positive but must be very large so + // we don't time out waiting for a new head before we have a chance to + // handle the server disconnect + cfg := TestNodePoolConfig{NodePollInterval: 1 * time.Second} + chSubbed := make(chan struct{}, 1) + chPolled := make(chan struct{}) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + case "web3_clientVersion": + select { + case chPolled <- struct{}{}: + default: + } + resp.Result = `"test client version 2"` + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, testutils.WaitTimeout(t), logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription") + testutils.WaitWithTimeout(t, chPolled, "timed out waiting for initial poll") + + assert.Equal(t, NodeStateAlive, n.State()) + + // Simulate remote websocket disconnect + // This causes sub.Err() to close + s.Close() + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateUnreachable + }) + }) + + t.Run("when no new heads received for threshold, transitions to out of sync", func(t *testing.T) { + cfg := TestNodePoolConfig{} + chSubbed := make(chan struct{}, 2) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + resp.Result = `"test client version 2"` + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, 1*time.Second, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for InSync") + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateOutOfSync + }) + + // Otherwise, there may be data race on dial() vs Close() (accessing ws.rpc) + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for OutOfSync") + }) + + t.Run("when no new heads received for threshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + pollDisabledCfg := TestNodePoolConfig{} + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + case "eth_unsubscribe": + resp.Result = "true" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(pollDisabledCfg, testutils.TestInterval, lggr, *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (int, int64, *big.Int) { return 1, 0, nil } + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + // to avoid timing-dependent tests, simply wait for the log message instead + // wait for the log twice to be sure we have fully completed the code path and gone around the loop + testutils.WaitForLogMessageCount(t, observedLogs, msgCannotDisable, 2) + + assert.Equal(t, NodeStateAlive, n.State()) + }) + + t.Run("when behind more than SyncThreshold, transitions to out of sync", func(t *testing.T) { + cfg := TestNodePoolConfig{NodeSyncThreshold: 10, NodePollFailureThreshold: 2, NodePollInterval: 100 * time.Millisecond, NodeSelectionMode: NodeSelectionMode_HighestHead} + chSubbed := make(chan struct{}, 2) + var highestHead atomic.Int64 + const stall = 10 + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(int(highestHead.Load())) + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + resp.Result = `"test client version 2"` + // always tick each poll, but only signal back up to stall + if n := highestHead.Add(1); n <= stall { + resp.Notify = makeHeadResult(int(n)) + } + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, 0*time.Second, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 2, highestHead.Load(), nil + } + + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for InSync") + + // ensure alive up to stall + testutils.AssertEventually(t, func() bool { + state, num, _ := n.StateAndLatest() + if num < stall { + require.Equal(t, NodeStateAlive, state) + } + return num == stall + }) + + testutils.AssertEventually(t, func() bool { + state, num, _ := n.StateAndLatest() + return state == NodeStateOutOfSync && num == stall + }) + assert.GreaterOrEqual(t, highestHead.Load(), int64(stall+cfg.SyncThreshold())) + + // Otherwise, there may be data race on dial() vs Close() (accessing ws.rpc) + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for OutOfSync") + }) + + t.Run("when behind but SyncThreshold=0, stay alive", func(t *testing.T) { + cfg := TestNodePoolConfig{NodeSyncThreshold: 0, NodePollFailureThreshold: 2, NodePollInterval: 100 * time.Millisecond, NodeSelectionMode: NodeSelectionMode_HighestHead} + chSubbed := make(chan struct{}, 1) + var highestHead atomic.Int64 + const stall = 10 + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(int(highestHead.Load())) + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + resp.Result = `"test client version 2"` + // always tick each poll, but only signal back up to stall + if n := highestHead.Add(1); n <= stall { + resp.Notify = makeHeadResult(int(n)) + } + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, 0*time.Second, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 2, highestHead.Load(), nil + } + + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for InSync") + + // ensure alive up to stall + testutils.AssertEventually(t, func() bool { + state, num, _ := n.StateAndLatest() + require.Equal(t, NodeStateAlive, state) + return num == stall + }) + + assert.Equal(t, NodeStateAlive, n.state) + assert.GreaterOrEqual(t, highestHead.Load(), int64(stall+cfg.SyncThreshold())) + }) + + t.Run("when behind more than SyncThreshold but we are the last live node, forcibly stays alive", func(t *testing.T) { + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + cfg := TestNodePoolConfig{NodeSyncThreshold: 5, NodePollFailureThreshold: 2, NodePollInterval: 100 * time.Millisecond, NodeSelectionMode: NodeSelectionMode_HighestHead} + chSubbed := make(chan struct{}, 1) + var highestHead atomic.Int64 + const stall = 10 + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + n := highestHead.Load() + if n > stall { + n = stall + } + resp.Notify = makeHeadResult(int(n)) + return + case "eth_unsubscribe": + resp.Result = "true" + return + case "web3_clientVersion": + resp.Result = `"test client version 2"` + // always tick each poll, but only signal back up to stall + if n := highestHead.Add(1); n <= stall { + resp.Notify = makeHeadResult(int(n)) + } + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, 0*time.Second, lggr, *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 1, highestHead.Load(), nil + } + + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.aliveLoop() + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription for InSync") + + // ensure alive up to stall + testutils.AssertEventually(t, func() bool { + state, num, _ := n.StateAndLatest() + require.Equal(t, NodeStateAlive, state) + return num == stall + }) + + assert.Equal(t, NodeStateAlive, n.state) + testutils.AssertEventually(t, func() bool { + return highestHead.Load() >= int64(stall+cfg.SyncThreshold()) + }) + + testutils.WaitForLogMessageCount(t, observedLogs, msgCannotDisable, 1) + + state, num, _ := n.StateAndLatest() + assert.Equal(t, NodeStateAlive, state) + assert.Equal(t, int64(stall), num) + + }) +} + +func TestUnit_NodeLifecycle_outOfSyncLoop(t *testing.T) { + t.Parallel() + + t.Run("exits on close", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNode(t, cfg, time.Second*0) + dial(t, n) + n.setState(NodeStateOutOfSync) + + ch := make(chan struct{}) + + n.wg.Add(1) + go func() { + defer close(ch) + n.outOfSyncLoop(func(num int64, td *big.Int) bool { return false }) + }() + assert.NoError(t, n.Close()) + testutils.WaitWithTimeout(t, ch, "expected outOfSyncLoop to exit") + }) + + t.Run("if initial subscribe fails, transitions to unreachable", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNodeWithCallback(t, cfg, time.Second*0, func(string, gjson.Result) (resp testutils.JSONRPCResponse) { return }) + dial(t, n) + n.setState(NodeStateOutOfSync) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + + n.outOfSyncLoop(func(num int64, td *big.Int) bool { return num == 0 }) + assert.Equal(t, NodeStateUnreachable, n.State()) + }) + + t.Run("transitions to unreachable if remote RPC subscription channel closed", func(t *testing.T) { + cfg := TestNodePoolConfig{} + chSubbed := make(chan struct{}, 1) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, time.Second, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + + dial(t, n) + n.setState(NodeStateOutOfSync) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.outOfSyncLoop(func(num int64, td *big.Int) bool { return num == 0 }) + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription") + + assert.Equal(t, NodeStateOutOfSync, n.State()) + + // Simulate remote websocket disconnect + // This causes sub.Err() to close + s.Close() + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateUnreachable + }) + }) + + t.Run("transitions to alive if it receives a newer head", func(t *testing.T) { + // NoNewHeadsThreshold needs to be positive but must be very large so + // we don't time out waiting for a new head before we have a chance to + // handle the server disconnect + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + cfg := TestNodePoolConfig{} + chSubbed := make(chan struct{}, 1) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeNewHeadWSMessage(42) + return + case "eth_unsubscribe": + resp.Result = "true" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, time.Second*0, lggr, *s.WSURL(), nil, "test node", 0, testutils.FixtureChainID, 1) + n := iN.(*node) + + start(t, n) + n.setState(NodeStateOutOfSync) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.outOfSyncLoop(func(num int64, td *big.Int) bool { return num < 43 }) + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription") + + assert.Equal(t, NodeStateOutOfSync, n.State()) + + // heads less than latest seen head are ignored; they do not make the node live + for i := 0; i < 43; i++ { + msg := makeNewHeadWSMessage(i) + s.MustWriteBinaryMessageSync(t, msg) + testutils.WaitForLogMessageCount(t, observedLogs, msgReceivedBlock, i+1) + assert.Equal(t, NodeStateOutOfSync, n.State()) + } + + msg := makeNewHeadWSMessage(43) + s.MustWriteBinaryMessageSync(t, msg) + + testutils.AssertEventually(t, func() bool { + s, n, td := n.StateAndLatest() + return s == NodeStateAlive && n != -1 && td != nil + }) + + testutils.WaitForLogMessage(t, observedLogs, msgInSync) + }) + + t.Run("transitions to alive if back in-sync", func(t *testing.T) { + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + cfg := TestNodePoolConfig{NodeSyncThreshold: 5, NodeSelectionMode: NodeSelectionMode_HighestHead} + chSubbed := make(chan struct{}, 1) + const stall = 42 + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeNewHeadWSMessage(stall) + return + case "eth_unsubscribe": + resp.Result = "true" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, time.Second*0, lggr, *s.WSURL(), nil, "test node", 0, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (count int, blockNumber int64, totalDifficulty *big.Int) { + return 2, stall + int64(cfg.SyncThreshold()), nil + } + + start(t, n) + n.setState(NodeStateOutOfSync) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.outOfSyncLoop(n.isOutOfSync) + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription") + + assert.Equal(t, NodeStateOutOfSync, n.State()) + + // heads less than stall (latest seen head - SyncThreshold) are ignored; they do not make the node live + for i := 0; i < stall; i++ { + msg := makeNewHeadWSMessage(i) + s.MustWriteBinaryMessageSync(t, msg) + testutils.WaitForLogMessageCount(t, observedLogs, msgReceivedBlock, i+1) + assert.Equal(t, NodeStateOutOfSync, n.State()) + } + + msg := makeNewHeadWSMessage(stall) + s.MustWriteBinaryMessageSync(t, msg) + + testutils.AssertEventually(t, func() bool { + s, n, td := n.StateAndLatest() + return s == NodeStateAlive && n != -1 && td != nil + }) + + testutils.WaitForLogMessage(t, observedLogs, msgInSync) + }) + + t.Run("if no live nodes are available, forcibly marks this one alive again", func(t *testing.T) { + cfg := TestNodePoolConfig{} + chSubbed := make(chan struct{}, 1) + s := testutils.NewWSServer(t, testutils.FixtureChainID, + func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + chSubbed <- struct{}{} + resp.Result = `"0x00"` + resp.Notify = makeHeadResult(0) + return + case "eth_unsubscribe": + resp.Result = "true" + return + default: + t.Errorf("unexpected RPC method: %s", method) + } + return + }) + + iN := NewNode(cfg, testutils.TestInterval, logger.Test(t), *s.WSURL(), nil, "test node", 42, testutils.FixtureChainID, 1) + n := iN.(*node) + n.nLiveNodes = func() (int, int64, *big.Int) { return 0, 0, nil } + + dial(t, n) + n.setState(NodeStateOutOfSync) + defer func() { assert.NoError(t, n.Close()) }() + + n.wg.Add(1) + go n.outOfSyncLoop(func(num int64, td *big.Int) bool { return num == 0 }) + + testutils.WaitWithTimeout(t, chSubbed, "timed out waiting for initial subscription") + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateAlive + }) + }) +} + +func TestUnit_NodeLifecycle_unreachableLoop(t *testing.T) { + t.Parallel() + + t.Run("exits on close", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNode(t, cfg, time.Second*0) + start(t, n) + n.setState(NodeStateUnreachable) + + ch := make(chan struct{}) + n.wg.Add(1) + go func() { + n.unreachableLoop() + close(ch) + }() + assert.NoError(t, n.Close()) + testutils.WaitWithTimeout(t, ch, "expected unreachableLoop to exit") + }) + + t.Run("on successful redial and verify, transitions to alive", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNode(t, cfg, time.Second*0) + start(t, n) + defer func() { assert.NoError(t, n.Close()) }() + n.setState(NodeStateUnreachable) + n.wg.Add(1) + + go n.unreachableLoop() + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateAlive + }) + }) + + t.Run("on successful redial but failed verify, transitions to invalid chain ID", func(t *testing.T) { + cfg := TestNodePoolConfig{} + s := testutils.NewWSServer(t, testutils.FixtureChainID, standardHandler) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + iN := NewNode(cfg, time.Second*0, lggr, *s.WSURL(), nil, "test node", 0, big.NewInt(42), 1) + n := iN.(*node) + defer func() { assert.NoError(t, n.Close()) }() + start(t, n) + n.setState(NodeStateUnreachable) + n.wg.Add(1) + + go n.unreachableLoop() + + testutils.WaitForLogMessage(t, observedLogs, "Failed to redial RPC node; remote endpoint returned the wrong chain ID") + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateInvalidChainID + }) + }) + + t.Run("on failed redial, keeps trying to redial", func(t *testing.T) { + cfg := TestNodePoolConfig{} + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + iN := NewNode(cfg, time.Second*0, lggr, *testutils.MustParseURL(t, "ws://test.invalid"), nil, "test node", 0, big.NewInt(42), 1) + n := iN.(*node) + defer func() { assert.NoError(t, n.Close()) }() + start(t, n) + n.setState(NodeStateUnreachable) + n.wg.Add(1) + + go n.unreachableLoop() + + testutils.WaitForLogMessageCount(t, observedLogs, "Failed to redial RPC node", 3) + + assert.Equal(t, NodeStateUnreachable, n.State()) + }) +} +func TestUnit_NodeLifecycle_invalidChainIDLoop(t *testing.T) { + t.Parallel() + + t.Run("exits on close", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNode(t, cfg, time.Second*0) + start(t, n) + n.setState(NodeStateInvalidChainID) + + ch := make(chan struct{}) + n.wg.Add(1) + go func() { + n.invalidChainIDLoop() + close(ch) + }() + assert.NoError(t, n.Close()) + testutils.WaitWithTimeout(t, ch, "expected invalidChainIDLoop to exit") + }) + + t.Run("on successful verify, transitions to alive", func(t *testing.T) { + cfg := TestNodePoolConfig{} + n := newTestNode(t, cfg, time.Second*0) + dial(t, n) + defer func() { assert.NoError(t, n.Close()) }() + n.setState(NodeStateInvalidChainID) + n.wg.Add(1) + + go n.invalidChainIDLoop() + + testutils.AssertEventually(t, func() bool { + return n.State() == NodeStateAlive + }) + }) + + t.Run("on failed verify, keeps checking", func(t *testing.T) { + cfg := TestNodePoolConfig{} + s := testutils.NewWSServer(t, testutils.FixtureChainID, standardHandler) + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + iN := NewNode(cfg, time.Second*0, lggr, *s.WSURL(), nil, "test node", 0, big.NewInt(42), 1) + n := iN.(*node) + defer func() { assert.NoError(t, n.Close()) }() + dial(t, n) + n.setState(NodeStateUnreachable) + n.wg.Add(1) + + go n.unreachableLoop() + + testutils.WaitForLogMessageCount(t, observedLogs, "Failed to redial RPC node; remote endpoint returned the wrong chain ID", 3) + + assert.Equal(t, NodeStateInvalidChainID, n.State()) + }) +} diff --git a/core/chains/evm/client/node_selector_highest_head.go b/core/chains/evm/client/node_selector_highest_head.go new file mode 100644 index 00000000..6672c428 --- /dev/null +++ b/core/chains/evm/client/node_selector_highest_head.go @@ -0,0 +1,32 @@ +package client + +import ( + "math" +) + +type highestHeadNodeSelector []Node + +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewHighestHeadNodeSelector] +func NewHighestHeadNodeSelector(nodes []Node) NodeSelector { + return highestHeadNodeSelector(nodes) +} + +func (s highestHeadNodeSelector) Select() Node { + var highestHeadNumber int64 = math.MinInt64 + var highestHeadNodes []Node + for _, n := range s { + state, currentHeadNumber, _ := n.StateAndLatest() + if state == NodeStateAlive && currentHeadNumber >= highestHeadNumber { + if highestHeadNumber < currentHeadNumber { + highestHeadNumber = currentHeadNumber + highestHeadNodes = nil + } + highestHeadNodes = append(highestHeadNodes, n) + } + } + return firstOrHighestPriority(highestHeadNodes) +} + +func (s highestHeadNodeSelector) Name() string { + return NodeSelectionMode_HighestHead +} diff --git a/core/chains/evm/client/node_selector_highest_head_test.go b/core/chains/evm/client/node_selector_highest_head_test.go new file mode 100644 index 00000000..c4e18b77 --- /dev/null +++ b/core/chains/evm/client/node_selector_highest_head_test.go @@ -0,0 +1,175 @@ +package client_test + +import ( + "testing" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + + "github.com/stretchr/testify/assert" +) + +func TestHighestHeadNodeSelectorName(t *testing.T) { + selector := evmclient.NewHighestHeadNodeSelector(nil) + assert.Equal(t, selector.Name(), evmclient.NodeSelectionMode_HighestHead) +} + +func TestHighestHeadNodeSelector(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(evmclient.NodeStateOutOfSync, int64(-1), nil) + } else if i == 1 { + // second node is alive, LatestReceivedBlockNumber = 1 + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), nil) + } else { + // third node is alive, LatestReceivedBlockNumber = 2 (best node) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(2), nil) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := evmclient.NewHighestHeadNodeSelector(nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := evmmocks.NewNode(t) + // fourth node is alive, LatestReceivedBlockNumber = 2 (same as 3rd) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(2), nil) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := evmclient.NewHighestHeadNodeSelector(nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := evmmocks.NewNode(t) + // fifth node is alive, LatestReceivedBlockNumber = 3 (better than 3rd and 4th) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), nil) + node.On("Order").Return(int32(1)) + nodes = append(nodes, node) + + selector := evmclient.NewHighestHeadNodeSelector(nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(-1), nil) + node1.On("Order").Return(int32(1)) + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(-1), nil) + node2.On("Order").Return(int32(1)) + nodes := []evmclient.Node{node1, node2} + + selector := evmclient.NewHighestHeadNodeSelector(nodes) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestHighestHeadNodeSelector_None(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(evmclient.NodeStateOutOfSync, int64(-1), nil) + } else { + // others are unreachable + node.On("StateAndLatest").Return(evmclient.NodeStateUnreachable, int64(1), nil) + } + nodes = append(nodes, node) + } + + selector := evmclient.NewHighestHeadNodeSelector(nodes) + assert.Nil(t, selector.Select()) +} + +func TestHighestHeadNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + t.Run("same head and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), nil) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := evmclient.NewHighestHeadNodeSelector(nodes) + //Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same head but different order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), nil) + node1.On("Order").Return(int32(3)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), nil) + node2.On("Order").Return(int32(1)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), nil) + node3.On("Order").Return(int32(2)) + + nodes := []evmclient.Node{node1, node2, node3} + selector := evmclient.NewHighestHeadNodeSelector(nodes) + //Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different head but same order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), nil) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(2), nil) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), nil) + node3.On("Order").Return(int32(3)) + + nodes := []evmclient.Node{node1, node2, node3} + selector := evmclient.NewHighestHeadNodeSelector(nodes) + //Should select the third node as it has the highest head + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(10), nil) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(11), nil) + node2.On("Order").Maybe().Return(int32(4)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(11), nil) + node3.On("Order").Maybe().Return(int32(3)) + + node4 := evmmocks.NewNode(t) + node4.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(10), nil) + node4.On("Order").Maybe().Return(int32(1)) + + nodes := []evmclient.Node{node1, node2, node3, node4} + selector := evmclient.NewHighestHeadNodeSelector(nodes) + //Should select the third node as it has the highest head and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/core/chains/evm/client/node_selector_priority_level.go b/core/chains/evm/client/node_selector_priority_level.go new file mode 100644 index 00000000..5fffad44 --- /dev/null +++ b/core/chains/evm/client/node_selector_priority_level.go @@ -0,0 +1,104 @@ +package client + +import ( + "math" + "sort" + "sync/atomic" +) + +type priorityLevelNodeSelector struct { + nodes []Node + roundRobinCount []atomic.Uint32 +} + +type nodeWithPriority struct { + node Node + priority int32 +} + +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewPriorityLevelNodeSelector] +func NewPriorityLevelNodeSelector(nodes []Node) NodeSelector { + return &priorityLevelNodeSelector{ + nodes: nodes, + roundRobinCount: make([]atomic.Uint32, nrOfPriorityTiers(nodes)), + } +} + +func (s priorityLevelNodeSelector) Select() Node { + nodes := s.getHighestPriorityAliveTier() + + if len(nodes) == 0 { + return nil + } + priorityLevel := nodes[len(nodes)-1].priority + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount[priorityLevel].Add(1) - 1 + idx := int(count % uint32(len(nodes))) + + return nodes[idx].node +} + +func (s priorityLevelNodeSelector) Name() string { + return NodeSelectionMode_PriorityLevel +} + +// getHighestPriorityAliveTier filters nodes that are not in state NodeStateAlive and +// returns only the highest tier of alive nodes +func (s priorityLevelNodeSelector) getHighestPriorityAliveTier() []nodeWithPriority { + var nodes []nodeWithPriority + for _, n := range s.nodes { + if n.State() == NodeStateAlive { + nodes = append(nodes, nodeWithPriority{n, n.Order()}) + } + } + + if len(nodes) == 0 { + return nil + } + + return removeLowerTiers(nodes) +} + +// removeLowerTiers take a slice of nodeWithPriority and keeps only the highest tier +func removeLowerTiers(nodes []nodeWithPriority) []nodeWithPriority { + sort.SliceStable(nodes, func(i, j int) bool { + return nodes[i].priority > nodes[j].priority + }) + + var nodes2 []nodeWithPriority + currentPriority := nodes[len(nodes)-1].priority + + for _, n := range nodes { + if n.priority == currentPriority { + nodes2 = append(nodes2, n) + } + } + + return nodes2 +} + +// nrOfPriorityTiers calculates the total number of priority tiers +func nrOfPriorityTiers(nodes []Node) int32 { + highestPriority := int32(0) + for _, n := range nodes { + priority := n.Order() + if highestPriority < priority { + highestPriority = priority + } + } + return highestPriority + 1 +} + +// firstOrHighestPriority takes a list of nodes and returns the first one with the highest priority +func firstOrHighestPriority(nodes []Node) Node { + hp := int32(math.MaxInt32) + var node Node + for _, n := range nodes { + if n.Order() < hp { + hp = n.Order() + node = n + } + } + return node +} diff --git a/core/chains/evm/client/node_selector_priority_level_test.go b/core/chains/evm/client/node_selector_priority_level_test.go new file mode 100644 index 00000000..77f73c67 --- /dev/null +++ b/core/chains/evm/client/node_selector_priority_level_test.go @@ -0,0 +1,86 @@ +package client_test + +import ( + "testing" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + + "github.com/stretchr/testify/assert" +) + +func TestPriorityLevelNodeSelectorName(t *testing.T) { + selector := evmclient.NewPriorityLevelNodeSelector(nil) + assert.Equal(t, selector.Name(), evmclient.NodeSelectionMode_PriorityLevel) +} + +func TestPriorityLevelNodeSelector(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + n1 := evmmocks.NewNode(t) + n1.On("State").Return(evmclient.NodeStateAlive) + n1.On("Order").Return(int32(1)) + + n2 := evmmocks.NewNode(t) + n2.On("State").Return(evmclient.NodeStateAlive) + n2.On("Order").Return(int32(1)) + + n3 := evmmocks.NewNode(t) + n3.On("State").Return(evmclient.NodeStateAlive) + n3.On("Order").Return(int32(1)) + + nodes = append(nodes, n1, n2, n3) + selector := evmclient.NewPriorityLevelNodeSelector(nodes) + assert.Same(t, nodes[0], selector.Select()) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) + assert.Same(t, nodes[0], selector.Select()) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) +} + +func TestPriorityLevelNodeSelector_None(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("State").Return(evmclient.NodeStateOutOfSync) + node.On("Order").Return(int32(1)) + } else { + // others are unreachable + node.On("State").Return(evmclient.NodeStateUnreachable) + node.On("Order").Return(int32(1)) + } + nodes = append(nodes, node) + } + + selector := evmclient.NewPriorityLevelNodeSelector(nodes) + assert.Nil(t, selector.Select()) +} + +func TestPriorityLevelNodeSelector_DifferentOrder(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + n1 := evmmocks.NewNode(t) + n1.On("State").Return(evmclient.NodeStateAlive) + n1.On("Order").Return(int32(1)) + + n2 := evmmocks.NewNode(t) + n2.On("State").Return(evmclient.NodeStateAlive) + n2.On("Order").Return(int32(2)) + + n3 := evmmocks.NewNode(t) + n3.On("State").Return(evmclient.NodeStateAlive) + n3.On("Order").Return(int32(3)) + + nodes = append(nodes, n1, n2, n3) + selector := evmclient.NewPriorityLevelNodeSelector(nodes) + assert.Same(t, nodes[0], selector.Select()) + assert.Same(t, nodes[0], selector.Select()) +} diff --git a/core/chains/evm/client/node_selector_round_robin.go b/core/chains/evm/client/node_selector_round_robin.go new file mode 100644 index 00000000..95781924 --- /dev/null +++ b/core/chains/evm/client/node_selector_round_robin.go @@ -0,0 +1,39 @@ +package client + +import "sync/atomic" + +type roundRobinSelector struct { + nodes []Node + roundRobinCount atomic.Uint32 +} + +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewRoundRobinSelector] +func NewRoundRobinSelector(nodes []Node) NodeSelector { + return &roundRobinSelector{ + nodes: nodes, + } +} + +func (s *roundRobinSelector) Select() Node { + var liveNodes []Node + for _, n := range s.nodes { + if n.State() == NodeStateAlive { + liveNodes = append(liveNodes, n) + } + } + + nNodes := len(liveNodes) + if nNodes == 0 { + return nil + } + + // NOTE: Inc returns the number after addition, so we must -1 to get the "current" counter + count := s.roundRobinCount.Add(1) - 1 + idx := int(count % uint32(nNodes)) + + return liveNodes[idx] +} + +func (s *roundRobinSelector) Name() string { + return NodeSelectionMode_RoundRobin +} diff --git a/core/chains/evm/client/node_selector_round_robin_test.go b/core/chains/evm/client/node_selector_round_robin_test.go new file mode 100644 index 00000000..cb5ea6b9 --- /dev/null +++ b/core/chains/evm/client/node_selector_round_robin_test.go @@ -0,0 +1,55 @@ +package client_test + +import ( + "testing" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + + "github.com/stretchr/testify/assert" +) + +func TestRoundRobinNodeSelector(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("State").Return(evmclient.NodeStateOutOfSync) + } else { + // second & third nodes are alive + node.On("State").Return(evmclient.NodeStateAlive) + } + nodes = append(nodes, node) + } + + selector := evmclient.NewRoundRobinSelector(nodes) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) + assert.Same(t, nodes[1], selector.Select()) + assert.Same(t, nodes[2], selector.Select()) +} + +func TestRoundRobinNodeSelector_None(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("State").Return(evmclient.NodeStateOutOfSync) + } else { + // others are unreachable + node.On("State").Return(evmclient.NodeStateUnreachable) + } + nodes = append(nodes, node) + } + + selector := evmclient.NewRoundRobinSelector(nodes) + assert.Nil(t, selector.Select()) +} diff --git a/core/chains/evm/client/node_selector_total_difficulty.go b/core/chains/evm/client/node_selector_total_difficulty.go new file mode 100644 index 00000000..1357f876 --- /dev/null +++ b/core/chains/evm/client/node_selector_total_difficulty.go @@ -0,0 +1,43 @@ +package client + +import "math/big" + +type totalDifficultyNodeSelector []Node + +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewTotalDifficultyNodeSelector] +func NewTotalDifficultyNodeSelector(nodes []Node) NodeSelector { + return totalDifficultyNodeSelector(nodes) +} + +func (s totalDifficultyNodeSelector) Select() Node { + // NodeNoNewHeadsThreshold may not be enabled, in this case all nodes have td == nil + var highestTD *big.Int + var nodes []Node + var aliveNodes []Node + + for _, n := range s { + state, _, currentTD := n.StateAndLatest() + if state != NodeStateAlive { + continue + } + + aliveNodes = append(aliveNodes, n) + if currentTD != nil && (highestTD == nil || currentTD.Cmp(highestTD) >= 0) { + if highestTD == nil || currentTD.Cmp(highestTD) > 0 { + highestTD = currentTD + nodes = nil + } + nodes = append(nodes, n) + } + } + + //If all nodes have td == nil pick one from the nodes that are alive + if len(nodes) == 0 { + return firstOrHighestPriority(aliveNodes) + } + return firstOrHighestPriority(nodes) +} + +func (s totalDifficultyNodeSelector) Name() string { + return NodeSelectionMode_TotalDifficulty +} diff --git a/core/chains/evm/client/node_selector_total_difficulty_test.go b/core/chains/evm/client/node_selector_total_difficulty_test.go new file mode 100644 index 00000000..3861fa33 --- /dev/null +++ b/core/chains/evm/client/node_selector_total_difficulty_test.go @@ -0,0 +1,176 @@ +package client_test + +import ( + "math/big" + "testing" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + + "github.com/stretchr/testify/assert" +) + +func TestTotalDifficultyNodeSelectorName(t *testing.T) { + selector := evmclient.NewTotalDifficultyNodeSelector(nil) + assert.Equal(t, selector.Name(), evmclient.NodeSelectionMode_TotalDifficulty) +} + +func TestTotalDifficultyNodeSelector(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(evmclient.NodeStateOutOfSync, int64(-1), nil) + } else if i == 1 { + // second node is alive + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(7)) + } else { + // third node is alive and best + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(2), big.NewInt(8)) + } + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + } + + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + assert.Same(t, nodes[2], selector.Select()) + + t.Run("stick to the same node", func(t *testing.T) { + node := evmmocks.NewNode(t) + // fourth node is alive (same as 3rd) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(2), big.NewInt(8)) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("another best node", func(t *testing.T) { + node := evmmocks.NewNode(t) + // fifth node is alive (better than 3rd and 4th) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), big.NewInt(11)) + node.On("Order").Maybe().Return(int32(1)) + nodes = append(nodes, node) + + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + assert.Same(t, nodes[4], selector.Select()) + }) + + t.Run("nodes never update latest block number", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(-1), nil) + node1.On("Order").Maybe().Return(int32(1)) + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(-1), nil) + node2.On("Order").Maybe().Return(int32(1)) + nodes := []evmclient.Node{node1, node2} + + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + assert.Same(t, node1, selector.Select()) + }) +} + +func TestTotalDifficultyNodeSelector_None(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + if i == 0 { + // first node is out of sync + node.On("StateAndLatest").Return(evmclient.NodeStateOutOfSync, int64(-1), nil) + } else { + // others are unreachable + node.On("StateAndLatest").Return(evmclient.NodeStateUnreachable, int64(1), big.NewInt(7)) + } + nodes = append(nodes, node) + } + + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + assert.Nil(t, selector.Select()) +} + +func TestTotalDifficultyNodeSelectorWithOrder(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + + t.Run("same td and order", func(t *testing.T) { + for i := 0; i < 3; i++ { + node := evmmocks.NewNode(t) + node.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(10)) + node.On("Order").Return(int32(2)) + nodes = append(nodes, node) + } + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + //Should select the first node because all things are equal + assert.Same(t, nodes[0], selector.Select()) + }) + + t.Run("same td but different order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), big.NewInt(10)) + node1.On("Order").Return(int32(3)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), big.NewInt(10)) + node2.On("Order").Return(int32(1)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(3), big.NewInt(10)) + node3.On("Order").Return(int32(2)) + + nodes := []evmclient.Node{node1, node2, node3} + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + //Should select the second node as it has the highest priority + assert.Same(t, nodes[1], selector.Select()) + }) + + t.Run("different td but same order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(10)) + node1.On("Order").Maybe().Return(int32(3)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(11)) + node2.On("Order").Maybe().Return(int32(3)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(12)) + node3.On("Order").Return(int32(3)) + + nodes := []evmclient.Node{node1, node2, node3} + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + //Should select the third node as it has the highest td + assert.Same(t, nodes[2], selector.Select()) + }) + + t.Run("different head and different order", func(t *testing.T) { + node1 := evmmocks.NewNode(t) + node1.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(100)) + node1.On("Order").Maybe().Return(int32(4)) + + node2 := evmmocks.NewNode(t) + node2.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(110)) + node2.On("Order").Maybe().Return(int32(5)) + + node3 := evmmocks.NewNode(t) + node3.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(110)) + node3.On("Order").Maybe().Return(int32(1)) + + node4 := evmmocks.NewNode(t) + node4.On("StateAndLatest").Return(evmclient.NodeStateAlive, int64(1), big.NewInt(105)) + node4.On("Order").Maybe().Return(int32(2)) + + nodes := []evmclient.Node{node1, node2, node3, node4} + selector := evmclient.NewTotalDifficultyNodeSelector(nodes) + //Should select the third node as it has the highest td and will win the priority tie-breaker + assert.Same(t, nodes[2], selector.Select()) + }) +} diff --git a/core/chains/evm/client/node_test.go b/core/chains/evm/client/node_test.go new file mode 100644 index 00000000..87581726 --- /dev/null +++ b/core/chains/evm/client/node_test.go @@ -0,0 +1,31 @@ +package client_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func Test_NodeWrapError(t *testing.T) { + t.Parallel() + + t.Run("handles nil errors", func(t *testing.T) { + err := evmclient.Wrap(nil, "foo") + assert.NoError(t, err) + }) + + t.Run("adds extra info to context deadline exceeded errors", func(t *testing.T) { + ctx, cancel := context.WithTimeout(testutils.Context(t), 0) + defer cancel() + + err := ctx.Err() + + err = evmclient.Wrap(err, "foo") + + assert.EqualError(t, err, "foo call failed: remote eth node timed out: context deadline exceeded") + }) +} diff --git a/core/chains/evm/client/null_client.go b/core/chains/evm/client/null_client.go new file mode 100644 index 00000000..3780bff8 --- /dev/null +++ b/core/chains/evm/client/null_client.go @@ -0,0 +1,228 @@ +package client + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// NullClient satisfies the Client but has no side effects +type NullClient struct { + cid *big.Int + lggr logger.Logger +} + +func NewNullClient(cid *big.Int, lggr logger.Logger) *NullClient { + return &NullClient{cid: cid, lggr: logger.Named(lggr, "NullClient")} +} + +// NullClientChainID the ChainID that nullclient will return +// 0 is never used as a real chain ID so makes sense as a dummy value here +const NullClientChainID = 0 + +// +// Client methods +// + +func (nc *NullClient) Dial(context.Context) error { + nc.lggr.Debug("Dial") + return nil +} + +func (nc *NullClient) Close() { + nc.lggr.Debug("Close") +} + +func (nc *NullClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { + nc.lggr.Debug("TokenBalance") + return big.NewInt(0), nil +} + +func (nc *NullClient) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) { + nc.lggr.Debug("PLIBalance") + return assets.NewLinkFromJuels(0), nil +} + +func (nc *NullClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + nc.lggr.Debug("CallContext") + return nil +} + +func (nc *NullClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + nc.lggr.Debug("HeadByNumber") + return nil, nil +} + +func (nc *NullClient) HeadByHash(ctx context.Context, h common.Hash) (*evmtypes.Head, error) { + nc.lggr.Debug("HeadByHash") + return nil, nil +} + +type nullSubscription struct { + lggr logger.Logger +} + +func newNullSubscription(lggr logger.Logger) *nullSubscription { + return &nullSubscription{lggr: logger.Named(lggr, "NullSubscription")} +} + +func (ns *nullSubscription) Unsubscribe() { + ns.lggr.Debug("Unsubscribe") +} + +func (ns *nullSubscription) Err() <-chan error { + ns.lggr.Debug("Err") + return nil +} + +func (nc *NullClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + nc.lggr.Debug("SubscribeFilterLogs") + return newNullSubscription(nc.lggr), nil +} + +func (nc *NullClient) SubscribeNewHead(ctx context.Context, ch chan<- *evmtypes.Head) (ethereum.Subscription, error) { + nc.lggr.Debug("SubscribeNewHead") + return newNullSubscription(nc.lggr), nil +} + +// +// GethClient methods +// + +func (nc *NullClient) ConfiguredChainID() *big.Int { + nc.lggr.Debug("ConfiguredChainID") + if nc.cid != nil { + return nc.cid + } + return big.NewInt(NullClientChainID) +} + +func (nc *NullClient) ChainID() (*big.Int, error) { + nc.lggr.Debug("ChainID") + return nil, nil +} + +func (nc *NullClient) HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) { + nc.lggr.Debug("HeaderByNumber") + return nil, nil +} + +func (nc *NullClient) HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) { + nc.lggr.Debug("HeaderByHash") + return nil, nil +} + +func (nc *NullClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, sender common.Address) (commonclient.SendTxReturnCode, error) { + nc.lggr.Debug("SendTransactionReturnCode") + return commonclient.Successful, nil +} + +func (nc *NullClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { + nc.lggr.Debug("SendTransaction") + return nil +} + +func (nc *NullClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + nc.lggr.Debug("PendingCodeAt") + return nil, nil +} + +func (nc *NullClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + nc.lggr.Debug("PendingNonceAt") + return 0, nil +} + +func (nc *NullClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { + nc.lggr.Debug("SequenceAt") + return 0, nil +} + +func (nc *NullClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + nc.lggr.Debug("TransactionReceipt") + return nil, nil +} + +func (nc *NullClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + nc.lggr.Debug("TransactionByHash") + return nil, nil +} + +func (nc *NullClient) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + nc.lggr.Debug("BlockByNumber") + return nil, nil +} + +func (nc *NullClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + nc.lggr.Debug("BlockByHash") + return nil, nil +} + +func (nc *NullClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + nc.lggr.Debug("LatestBlockHeight") + return nil, nil +} + +func (nc *NullClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + nc.lggr.Debug("BalanceAt") + return big.NewInt(0), nil +} + +func (nc *NullClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + nc.lggr.Debug("FilterLogs") + return nil, nil +} + +func (nc *NullClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + nc.lggr.Debug("EstimateGas") + return 0, nil +} + +func (nc *NullClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + nc.lggr.Debug("SuggestGasPrice") + return big.NewInt(0), nil +} + +func (nc *NullClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + nc.lggr.Debug("CallContract") + return nil, nil +} + +func (nc *NullClient) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + nc.lggr.Debug("PendingCallContract") + return nil, nil +} + +func (nc *NullClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + nc.lggr.Debug("CodeAt") + return nil, nil +} + +func (nc *NullClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return nil +} + +// BatchCallContextAll implements evmclient.Client interface +func (nc *NullClient) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + return nil +} + +func (nc *NullClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { + return nil, nil +} + +// NodeStates implements evmclient.Client +func (nc *NullClient) NodeStates() map[string]string { return nil } + +func (nc *NullClient) IsL2() bool { + nc.lggr.Debug("IsL2") + return false +} diff --git a/core/chains/evm/client/null_client_test.go b/core/chains/evm/client/null_client_test.go new file mode 100644 index 00000000..eed0ee59 --- /dev/null +++ b/core/chains/evm/client/null_client_test.go @@ -0,0 +1,166 @@ +package client_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestNullClient(t *testing.T) { + t.Parallel() + + t.Run("chain id", func(t *testing.T) { + lggr := logger.Test(t) + cid := big.NewInt(123) + nc := client.NewNullClient(cid, lggr) + require.Equal(t, cid, nc.ConfiguredChainID()) + + nc = client.NewNullClient(nil, lggr) + require.Equal(t, big.NewInt(client.NullClientChainID), nc.ConfiguredChainID()) + }) + + t.Run("CL client methods", func(t *testing.T) { + lggr, logs := logger.TestObserved(t, zapcore.DebugLevel) + nc := client.NewNullClient(nil, lggr) + ctx := testutils.Context(t) + + err := nc.Dial(ctx) + require.NoError(t, err) + require.Equal(t, 1, logs.FilterMessage("Dial").Len()) + + nc.Close() + require.Equal(t, 1, logs.FilterMessage("Close").Len()) + + b, err := nc.TokenBalance(ctx, common.Address{}, common.Address{}) + require.NoError(t, err) + require.Zero(t, b.Int64()) + require.Equal(t, 1, logs.FilterMessage("TokenBalance").Len()) + + l, err := nc.PLIBalance(ctx, common.Address{}, common.Address{}) + require.NoError(t, err) + require.True(t, l.IsZero()) + require.Equal(t, 1, logs.FilterMessage("PLIBalance").Len()) + + err = nc.CallContext(ctx, nil, "") + require.NoError(t, err) + require.Equal(t, 1, logs.FilterMessage("CallContext").Len()) + + h, err := nc.HeadByNumber(ctx, nil) + require.NoError(t, err) + require.Nil(t, h) + require.Equal(t, 1, logs.FilterMessage("HeadByNumber").Len()) + + chHeads := make(chan *evmtypes.Head) + sub, err := nc.SubscribeNewHead(ctx, chHeads) + require.NoError(t, err) + require.Equal(t, 1, logs.FilterMessage("SubscribeNewHead").Len()) + require.Nil(t, sub.Err()) + require.Equal(t, 1, logs.FilterMessage("Err").Len()) + sub.Unsubscribe() + require.Equal(t, 1, logs.FilterMessage("Unsubscribe").Len()) + + chLogs := make(chan types.Log) + _, err = nc.SubscribeFilterLogs(ctx, ethereum.FilterQuery{}, chLogs) + require.NoError(t, err) + require.Equal(t, 1, logs.FilterMessage("SubscribeFilterLogs").Len()) + }) + + t.Run("Geth client methods", func(t *testing.T) { + lggr, logs := logger.TestObserved(t, zapcore.DebugLevel) + nc := client.NewNullClient(nil, lggr) + ctx := testutils.Context(t) + + h, err := nc.HeaderByNumber(ctx, nil) + require.NoError(t, err) + require.Nil(t, h) + require.Equal(t, 1, logs.FilterMessage("HeaderByNumber").Len()) + + err = nc.SendTransaction(ctx, nil) + require.NoError(t, err) + require.Equal(t, 1, logs.FilterMessage("SendTransaction").Len()) + + c, err := nc.PendingCodeAt(ctx, common.Address{}) + require.NoError(t, err) + require.Empty(t, c) + require.Equal(t, 1, logs.FilterMessage("PendingCodeAt").Len()) + + n, err := nc.PendingNonceAt(ctx, common.Address{}) + require.NoError(t, err) + require.Zero(t, n) + require.Equal(t, 1, logs.FilterMessage("PendingNonceAt").Len()) + + s, err := nc.SequenceAt(ctx, common.Address{}, nil) + require.NoError(t, err) + require.Zero(t, s) + require.Equal(t, 1, logs.FilterMessage("SequenceAt").Len()) + + r, err := nc.TransactionReceipt(ctx, common.Hash{}) + require.NoError(t, err) + require.Nil(t, r) + require.Equal(t, 1, logs.FilterMessage("TransactionReceipt").Len()) + + b, err := nc.BlockByNumber(ctx, nil) + require.NoError(t, err) + require.Nil(t, b) + require.Equal(t, 1, logs.FilterMessage("BlockByNumber").Len()) + + b, err = nc.BlockByHash(ctx, common.Hash{}) + require.NoError(t, err) + require.Nil(t, b) + require.Equal(t, 1, logs.FilterMessage("BlockByHash").Len()) + + bal, err := nc.BalanceAt(ctx, common.Address{}, nil) + require.NoError(t, err) + require.Zero(t, bal.Int64()) + require.Equal(t, 1, logs.FilterMessage("BalanceAt").Len()) + + log, err := nc.FilterLogs(ctx, ethereum.FilterQuery{}) + require.NoError(t, err) + require.Nil(t, log) + require.Equal(t, 1, logs.FilterMessage("FilterLogs").Len()) + + gas, err := nc.EstimateGas(ctx, ethereum.CallMsg{}) + require.NoError(t, err) + require.Zero(t, gas) + require.Equal(t, 1, logs.FilterMessage("EstimateGas").Len()) + + gp, err := nc.SuggestGasPrice(ctx) + require.NoError(t, err) + require.Zero(t, gp.Int64()) + require.Equal(t, 1, logs.FilterMessage("SuggestGasPrice").Len()) + + cc, err := nc.CallContract(ctx, ethereum.CallMsg{}, nil) + require.NoError(t, err) + require.Nil(t, cc) + require.Equal(t, 1, logs.FilterMessage("CallContract").Len()) + + ca, err := nc.CodeAt(ctx, common.Address{}, nil) + require.NoError(t, err) + require.Nil(t, ca) + require.Equal(t, 1, logs.FilterMessage("CodeAt").Len()) + + err = nc.BatchCallContext(ctx, []rpc.BatchElem{}) + require.NoError(t, err) + + err = nc.BatchCallContextAll(ctx, []rpc.BatchElem{}) + require.NoError(t, err) + + tip, err := nc.SuggestGasTipCap(ctx) + require.NoError(t, err) + require.Nil(t, tip) + + m := nc.NodeStates() + require.Nil(t, m) + }) +} diff --git a/core/chains/evm/client/pool.go b/core/chains/evm/client/pool.go new file mode 100644 index 00000000..c5ea4be6 --- /dev/null +++ b/core/chains/evm/client/pool.go @@ -0,0 +1,503 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/common/config" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var ( + // PromEVMPoolRPCNodeStates reports current RPC node state + PromEVMPoolRPCNodeStates = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "evm_pool_rpc_node_states", + Help: "The number of RPC nodes currently in the given state for the given chain", + }, []string{"evmChainID", "state"}) +) + +const ( + NodeSelectionMode_HighestHead = "HighestHead" + NodeSelectionMode_RoundRobin = "RoundRobin" + NodeSelectionMode_TotalDifficulty = "TotalDifficulty" + NodeSelectionMode_PriorityLevel = "PriorityLevel" +) + +// NodeSelector represents a strategy to select the next node from the pool. +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NodeSelector] +type NodeSelector interface { + // Select returns a Node, or nil if none can be selected. + // Implementation must be thread-safe. + Select() Node + // Name returns the strategy name, e.g. "HighestHead" or "RoundRobin" + Name() string +} + +// PoolConfig represents settings for the Pool +// +// Deprecated: to be removed +type PoolConfig interface { + NodeSelectionMode() string + NodeNoNewHeadsThreshold() time.Duration + LeaseDuration() time.Duration +} + +// Pool represents an abstraction over one or more primary nodes +// It is responsible for liveness checking and balancing queries across live nodes +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.MultiNode] +type Pool struct { + services.StateMachine + nodes []Node + sendonlys []SendOnlyNode + chainID *big.Int + chainType config.ChainType + logger logger.SugaredLogger + selectionMode string + noNewHeadsThreshold time.Duration + nodeSelector NodeSelector + leaseDuration time.Duration + leaseTicker *time.Ticker + + activeMu sync.RWMutex + activeNode Node + + chStop services.StopChan + wg sync.WaitGroup +} + +// NewPool - creates new instance of [Pool] +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewMultiNode] +func NewPool(lggr logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsTreshold time.Duration, nodes []Node, sendonlys []SendOnlyNode, chainID *big.Int, chainType config.ChainType) *Pool { + if chainID == nil { + panic("chainID is required") + } + + nodeSelector := func() NodeSelector { + switch selectionMode { + case NodeSelectionMode_HighestHead: + return NewHighestHeadNodeSelector(nodes) + case NodeSelectionMode_RoundRobin: + return NewRoundRobinSelector(nodes) + case NodeSelectionMode_TotalDifficulty: + return NewTotalDifficultyNodeSelector(nodes) + case NodeSelectionMode_PriorityLevel: + return NewPriorityLevelNodeSelector(nodes) + default: + panic(fmt.Sprintf("unsupported NodeSelectionMode: %s", selectionMode)) + } + }() + + lggr = logger.Named(lggr, "Pool") + lggr = logger.With(lggr, "evmChainID", chainID.String()) + + p := &Pool{ + nodes: nodes, + sendonlys: sendonlys, + chainID: chainID, + chainType: chainType, + logger: logger.Sugared(lggr), + selectionMode: selectionMode, + noNewHeadsThreshold: noNewHeadsTreshold, + nodeSelector: nodeSelector, + chStop: make(chan struct{}), + leaseDuration: leaseDuration, + } + + p.logger.Debugf("The pool is configured to use NodeSelectionMode: %s", selectionMode) + + return p +} + +// Dial starts every node in the pool +// +// Nodes handle their own redialing and runloops, so this function does not +// return any error if the nodes aren't available +func (p *Pool) Dial(ctx context.Context) error { + return p.StartOnce("Pool", func() (merr error) { + if len(p.nodes) == 0 { + return errors.Errorf("no available nodes for chain %s", p.chainID.String()) + } + var ms services.MultiStart + for _, n := range p.nodes { + if n.ChainID().Cmp(p.chainID) != 0 { + return ms.CloseBecause(errors.Errorf("node %s has chain ID %s which does not match pool chain ID of %s", n.String(), n.ChainID().String(), p.chainID.String())) + } + rawNode, ok := n.(*node) + if ok { + // This is a bit hacky but it allows the node to be aware of + // pool state and prevent certain state transitions that might + // otherwise leave no nodes available. It is better to have one + // node in a degraded state than no nodes at all. + rawNode.nLiveNodes = p.nLiveNodes + } + // node will handle its own redialing and automatic recovery + if err := ms.Start(ctx, n); err != nil { + return err + } + } + for _, s := range p.sendonlys { + if s.ChainID().Cmp(p.chainID) != 0 { + return ms.CloseBecause(errors.Errorf("sendonly node %s has chain ID %s which does not match pool chain ID of %s", s.String(), s.ChainID().String(), p.chainID.String())) + } + if err := ms.Start(ctx, s); err != nil { + return err + } + } + p.wg.Add(1) + go p.runLoop() + + if p.leaseDuration.Seconds() > 0 && p.selectionMode != NodeSelectionMode_RoundRobin { + p.logger.Infof("The pool will switch to best node every %s", p.leaseDuration.String()) + p.wg.Add(1) + go p.checkLeaseLoop() + } else { + p.logger.Info("Best node switching is disabled") + } + + return nil + }) +} + +// nLiveNodes returns the number of currently alive nodes, as well as the highest block number and greatest total difficulty. +// totalDifficulty will be 0 if all nodes return nil. +func (p *Pool) nLiveNodes() (nLiveNodes int, blockNumber int64, totalDifficulty *big.Int) { + totalDifficulty = big.NewInt(0) + for _, n := range p.nodes { + if s, num, td := n.StateAndLatest(); s == NodeStateAlive { + nLiveNodes++ + if num > blockNumber { + blockNumber = num + } + if td != nil && td.Cmp(totalDifficulty) > 0 { + totalDifficulty = td + } + } + } + return +} + +func (p *Pool) checkLease() { + bestNode := p.nodeSelector.Select() + for _, n := range p.nodes { + // Terminate client subscriptions. Services are responsible for reconnecting, which will be routed to the new + // best node. Only terminate connections with more than 1 subscription to account for the aliveLoop subscription + if n.State() == NodeStateAlive && n != bestNode && n.SubscribersCount() > 1 { + p.logger.Infof("Switching to best node from %q to %q", n.String(), bestNode.String()) + n.UnsubscribeAllExceptAliveLoop() + } + } + + if bestNode != p.activeNode { + p.activeMu.Lock() + p.activeNode = bestNode + p.activeMu.Unlock() + } +} + +func (p *Pool) checkLeaseLoop() { + defer p.wg.Done() + p.leaseTicker = time.NewTicker(p.leaseDuration) + defer p.leaseTicker.Stop() + + for { + select { + case <-p.leaseTicker.C: + p.checkLease() + case <-p.chStop: + return + } + } +} + +func (p *Pool) runLoop() { + defer p.wg.Done() + + p.report() + + // Prometheus' default interval is 15s, set this to under 7.5s to avoid + // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) + reportInterval := 6500 * time.Millisecond + monitor := time.NewTicker(utils.WithJitter(reportInterval)) + defer monitor.Stop() + + for { + select { + case <-monitor.C: + p.report() + case <-p.chStop: + return + } + } +} + +func (p *Pool) report() { + type nodeWithState struct { + Node string + State string + } + + var total, dead int + counts := make(map[NodeState]int) + nodeStates := make([]nodeWithState, len(p.nodes)) + for i, n := range p.nodes { + state := n.State() + nodeStates[i] = nodeWithState{n.String(), state.String()} + total++ + if state != NodeStateAlive { + dead++ + } + counts[state]++ + } + for _, state := range allNodeStates { + count := counts[state] + PromEVMPoolRPCNodeStates.WithLabelValues(p.chainID.String(), state.String()).Set(float64(count)) + } + + live := total - dead + p.logger.Tracew(fmt.Sprintf("Pool state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + if total == dead { + rerr := fmt.Errorf("no EVM primary nodes available: 0/%d nodes are alive", total) + p.logger.Criticalw(rerr.Error(), "nodeStates", nodeStates) + p.SvcErrBuffer.Append(rerr) + } else if dead > 0 { + p.logger.Errorw(fmt.Sprintf("At least one EVM primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + } +} + +// Close tears down the pool and closes all nodes +func (p *Pool) Close() error { + return p.StopOnce("Pool", func() error { + close(p.chStop) + p.wg.Wait() + + return services.CloseAll(services.MultiCloser(p.nodes), services.MultiCloser(p.sendonlys)) + }) +} + +func (p *Pool) ChainID() *big.Int { + return p.selectNode().ChainID() +} + +func (p *Pool) ChainType() config.ChainType { + return p.chainType +} + +// selectNode returns the active Node, if it is still NodeStateAlive, otherwise it selects a new one from the NodeSelector. +func (p *Pool) selectNode() (node Node) { + p.activeMu.RLock() + node = p.activeNode + p.activeMu.RUnlock() + if node != nil && node.State() == NodeStateAlive { + return // still alive + } + + // select a new one + p.activeMu.Lock() + defer p.activeMu.Unlock() + node = p.activeNode + if node != nil && node.State() == NodeStateAlive { + return // another goroutine beat us here + } + + p.activeNode = p.nodeSelector.Select() + + if p.activeNode == nil { + p.logger.Criticalw("No live RPC nodes available", "NodeSelectionMode", p.nodeSelector.Name()) + errmsg := fmt.Errorf("no live nodes available for chain %s", p.chainID.String()) + p.SvcErrBuffer.Append(errmsg) + return &erroringNode{errMsg: errmsg.Error()} + } + + if p.leaseTicker != nil { + p.leaseTicker.Reset(p.leaseDuration) + } + return p.activeNode +} + +func (p *Pool) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + return p.selectNode().CallContext(ctx, result, method, args...) +} + +func (p *Pool) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return p.selectNode().BatchCallContext(ctx, b) +} + +// BatchCallContextAll calls BatchCallContext for every single node including +// sendonlys. +// CAUTION: This should only be used for mass re-transmitting transactions, it +// might have unexpected effects to use it for anything else. +func (p *Pool) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + var wg sync.WaitGroup + defer wg.Wait() + + main := p.selectNode() + var all []SendOnlyNode + for _, n := range p.nodes { + all = append(all, n) + } + all = append(all, p.sendonlys...) + for _, n := range all { + if n == main { + // main node is used at the end for the return value + continue + } + // Parallel call made to all other nodes with ignored return value + wg.Add(1) + go func(n SendOnlyNode) { + defer wg.Done() + err := n.BatchCallContext(ctx, b) + if err != nil { + p.logger.Debugw("Secondary node BatchCallContext failed", "err", err) + } else { + p.logger.Trace("Secondary node BatchCallContext success") + } + }(n) + } + + return main.BatchCallContext(ctx, b) +} + +// SendTransaction wrapped Geth client methods +func (p *Pool) SendTransaction(ctx context.Context, tx *types.Transaction) error { + main := p.selectNode() + var all []SendOnlyNode + for _, n := range p.nodes { + all = append(all, n) + } + all = append(all, p.sendonlys...) + for _, n := range all { + if n == main { + // main node is used at the end for the return value + continue + } + // Parallel send to all other nodes with ignored return value + // Async - we do not want to block the main thread with secondary nodes + // in case they are unreliable/slow. + // It is purely a "best effort" send. + // Resource is not unbounded because the default context has a timeout. + ok := p.IfNotStopped(func() { + // Must wrap inside IfNotStopped to avoid waitgroup racing with Close + p.wg.Add(1) + go func(n SendOnlyNode) { + defer p.wg.Done() + + sendCtx, cancel := p.chStop.CtxCancel(ContextWithDefaultTimeout()) + defer cancel() + + err := NewSendError(n.SendTransaction(sendCtx, tx)) + p.logger.Debugw("Sendonly node sent transaction", "name", n.String(), "tx", tx, "err", err) + if err == nil || err.IsNonceTooLowError() || err.IsTransactionAlreadyMined() || err.IsTransactionAlreadyInMempool() { + // Nonce too low or transaction known errors are expected since + // the primary SendTransaction may well have succeeded already + return + } + + p.logger.Warnw("Eth client returned error", "name", n.String(), "err", err, "tx", tx) + }(n) + }) + if !ok { + p.logger.Debug("Cannot send transaction on sendonly node; pool is stopped", "node", n.String()) + } + } + + return main.SendTransaction(ctx, tx) +} + +func (p *Pool) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return p.selectNode().PendingCodeAt(ctx, account) +} + +func (p *Pool) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return p.selectNode().PendingNonceAt(ctx, account) +} + +func (p *Pool) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + return p.selectNode().NonceAt(ctx, account, blockNumber) +} + +func (p *Pool) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + return p.selectNode().TransactionReceipt(ctx, txHash) +} + +func (p *Pool) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + return p.selectNode().TransactionByHash(ctx, txHash) +} + +func (p *Pool) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + return p.selectNode().BlockByNumber(ctx, number) +} + +func (p *Pool) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return p.selectNode().BlockByHash(ctx, hash) +} + +func (p *Pool) BlockNumber(ctx context.Context) (uint64, error) { + return p.selectNode().BlockNumber(ctx) +} + +func (p *Pool) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + return p.selectNode().BalanceAt(ctx, account, blockNumber) +} + +func (p *Pool) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return p.selectNode().FilterLogs(ctx, q) +} + +func (p *Pool) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + return p.selectNode().SubscribeFilterLogs(ctx, q, ch) +} + +func (p *Pool) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + return p.selectNode().EstimateGas(ctx, call) +} + +func (p *Pool) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + return p.selectNode().SuggestGasPrice(ctx) +} + +func (p *Pool) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return p.selectNode().CallContract(ctx, msg, blockNumber) +} + +func (p *Pool) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + return p.selectNode().PendingCallContract(ctx, msg) +} + +func (p *Pool) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + return p.selectNode().CodeAt(ctx, account, blockNumber) +} + +// bind.ContractBackend methods +func (p *Pool) HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) { + return p.selectNode().HeaderByNumber(ctx, n) +} +func (p *Pool) HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) { + return p.selectNode().HeaderByHash(ctx, h) +} + +func (p *Pool) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return p.selectNode().SuggestGasTipCap(ctx) +} + +// EthSubscribe implements evmclient.Client +func (p *Pool) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { + return p.selectNode().EthSubscribe(ctx, channel, args...) +} diff --git a/core/chains/evm/client/pool_test.go b/core/chains/evm/client/pool_test.go new file mode 100644 index 00000000..4575b614 --- /dev/null +++ b/core/chains/evm/client/pool_test.go @@ -0,0 +1,396 @@ +package client_test + +import ( + "context" + "math/big" + "net/http/httptest" + "net/url" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + promtestutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type poolConfig struct { + selectionMode string + noNewHeadsThreshold time.Duration + leaseDuration time.Duration +} + +func (c poolConfig) NodeSelectionMode() string { + return c.selectionMode +} + +func (c poolConfig) NodeNoNewHeadsThreshold() time.Duration { + return c.noNewHeadsThreshold +} + +func (c poolConfig) LeaseDuration() time.Duration { + return c.leaseDuration +} + +var defaultConfig evmclient.PoolConfig = &poolConfig{ + selectionMode: evmclient.NodeSelectionMode_RoundRobin, + noNewHeadsThreshold: 0, + leaseDuration: time.Second * 0, +} + +func TestPool_Dial(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + poolChainID *big.Int + nodeChainID int64 + sendNodeChainID int64 + nodes []chainIDResps + sendNodes []chainIDResp + errStr string + }{ + { + name: "no nodes", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{}, + sendNodes: []chainIDResp{}, + errStr: "no available nodes for chain 0", + }, + { + name: "normal", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{ + {ws: chainIDResp{testutils.FixtureChainID.Int64(), nil}}, + }, + sendNodes: []chainIDResp{ + {testutils.FixtureChainID.Int64(), nil}, + }, + }, + { + name: "node has wrong chain ID compared to pool", + poolChainID: testutils.FixtureChainID, + nodeChainID: 42, + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{ + {ws: chainIDResp{1, nil}}, + }, + sendNodes: []chainIDResp{ + {1, nil}, + }, + errStr: "has chain ID 42 which does not match pool chain ID of 0", + }, + { + name: "sendonly node has wrong chain ID compared to pool", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: 42, + nodes: []chainIDResps{ + {ws: chainIDResp{testutils.FixtureChainID.Int64(), nil}}, + }, + sendNodes: []chainIDResp{ + {testutils.FixtureChainID.Int64(), nil}, + }, + errStr: "has chain ID 42 which does not match pool chain ID of 0", + }, + { + name: "remote RPC has wrong chain ID for primary node (ws) - no error, it will go into retry loop", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{ + { + ws: chainIDResp{42, nil}, + http: &chainIDResp{testutils.FixtureChainID.Int64(), nil}, + }, + }, + sendNodes: []chainIDResp{ + {testutils.FixtureChainID.Int64(), nil}, + }, + }, + { + name: "remote RPC has wrong chain ID for primary node (http) - no error, it will go into retry loop", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{ + { + ws: chainIDResp{testutils.FixtureChainID.Int64(), nil}, + http: &chainIDResp{42, nil}, + }, + }, + sendNodes: []chainIDResp{ + {testutils.FixtureChainID.Int64(), nil}, + }, + }, + { + name: "remote RPC has wrong chain ID for sendonly node - no error, it will go into retry loop", + poolChainID: testutils.FixtureChainID, + nodeChainID: testutils.FixtureChainID.Int64(), + sendNodeChainID: testutils.FixtureChainID.Int64(), + nodes: []chainIDResps{ + {ws: chainIDResp{testutils.FixtureChainID.Int64(), nil}}, + }, + sendNodes: []chainIDResp{ + {42, nil}, + }, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + ctx := testutils.Context(t) + + nodes := make([]evmclient.Node, len(test.nodes)) + for i, n := range test.nodes { + nodes[i] = n.newNode(t, test.nodeChainID) + } + sendNodes := make([]evmclient.SendOnlyNode, len(test.sendNodes)) + for i, n := range test.sendNodes { + sendNodes[i] = n.newSendOnlyNode(t, test.sendNodeChainID) + } + p := evmclient.NewPool(logger.Test(t), defaultConfig.NodeSelectionMode(), defaultConfig.LeaseDuration(), time.Second*0, nodes, sendNodes, test.poolChainID, "") + err := p.Dial(ctx) + if err == nil { + t.Cleanup(func() { assert.NoError(t, p.Close()) }) + } + assert.True(t, p.ChainType().IsValid()) + assert.False(t, p.ChainType().IsL2()) + if test.errStr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.errStr) + } else { + require.NoError(t, err) + } + }) + } +} + +type chainIDResp struct { + chainID int64 + err error +} + +func (r *chainIDResp) newSendOnlyNode(t *testing.T, nodeChainID int64) evmclient.SendOnlyNode { + httpURL := r.newHTTPServer(t) + return evmclient.NewSendOnlyNode(logger.Test(t), *httpURL, t.Name(), big.NewInt(nodeChainID)) +} + +func (r *chainIDResp) newHTTPServer(t *testing.T) *url.URL { + rpcSrv := rpc.NewServer() + t.Cleanup(rpcSrv.Stop) + err := rpcSrv.RegisterName("eth", &chainIDService{*r}) + require.NoError(t, err) + ts := httptest.NewServer(rpcSrv) + t.Cleanup(ts.Close) + + httpURL, err := url.Parse(ts.URL) + require.NoError(t, err) + return httpURL +} + +type chainIDResps struct { + ws chainIDResp + http *chainIDResp + id int32 +} + +func (r *chainIDResps) newNode(t *testing.T, nodeChainID int64) evmclient.Node { + ws := testutils.NewWSServer(t, big.NewInt(r.ws.chainID), func(method string, params gjson.Result) (resp testutils.JSONRPCResponse) { + switch method { + case "eth_subscribe": + resp.Result = `"0x00"` + resp.Notify = headResult + return + case "eth_unsubscribe": + resp.Result = "true" + return + } + t.Errorf("Unexpected method call: %s(%s)", method, params) + return + }).WSURL().String() + + wsURL, err := url.Parse(ws) + require.NoError(t, err) + + var httpURL *url.URL + if r.http != nil { + httpURL = r.http.newHTTPServer(t) + } + + defer func() { r.id++ }() + return evmclient.NewNode(evmclient.TestNodePoolConfig{}, time.Second*0, logger.Test(t), *wsURL, httpURL, t.Name(), r.id, big.NewInt(nodeChainID), 0) +} + +type chainIDService struct { + chainIDResp +} + +func (x *chainIDService) ChainId(ctx context.Context) (*hexutil.Big, error) { + if x.err != nil { + return nil, x.err + } + return (*hexutil.Big)(big.NewInt(x.chainID)), nil +} + +func TestUnit_Pool_RunLoop(t *testing.T) { + t.Parallel() + + n1 := evmmocks.NewNode(t) + n2 := evmmocks.NewNode(t) + n3 := evmmocks.NewNode(t) + nodes := []evmclient.Node{n1, n2, n3} + + lggr, observedLogs := logger.TestObserved(t, zap.ErrorLevel) + p := evmclient.NewPool(lggr, defaultConfig.NodeSelectionMode(), defaultConfig.LeaseDuration(), time.Second*0, nodes, []evmclient.SendOnlyNode{}, &cltest.FixtureChainID, "") + + n1.On("String").Maybe().Return("n1") + n2.On("String").Maybe().Return("n2") + n3.On("String").Maybe().Return("n3") + + n1.On("Close").Maybe().Return(nil) + n2.On("Close").Maybe().Return(nil) + n3.On("Close").Maybe().Return(nil) + + // n1 is alive + n1.On("Start", mock.Anything).Return(nil).Once() + n1.On("State").Return(evmclient.NodeStateAlive) + n1.On("ChainID").Return(testutils.FixtureChainID).Once() + // n2 is unreachable + n2.On("Start", mock.Anything).Return(nil).Once() + n2.On("State").Return(evmclient.NodeStateUnreachable) + n2.On("ChainID").Return(testutils.FixtureChainID).Once() + // n3 is out of sync + n3.On("Start", mock.Anything).Return(nil).Once() + n3.On("State").Return(evmclient.NodeStateOutOfSync) + n3.On("ChainID").Return(testutils.FixtureChainID).Once() + + require.NoError(t, p.Dial(testutils.Context(t))) + t.Cleanup(func() { assert.NoError(t, p.Close()) }) + + testutils.WaitForLogMessage(t, observedLogs, "At least one EVM primary node is dead") + + testutils.AssertEventually(t, func() bool { + totalReported := promtestutil.CollectAndCount(evmclient.PromEVMPoolRPCNodeStates) + if totalReported < 3 { + return false + } + if promtestutil.ToFloat64(evmclient.PromEVMPoolRPCNodeStates.WithLabelValues("0", "Alive")) < 1.0 { + return false + } + if promtestutil.ToFloat64(evmclient.PromEVMPoolRPCNodeStates.WithLabelValues("0", "Unreachable")) < 1.0 { + return false + } + if promtestutil.ToFloat64(evmclient.PromEVMPoolRPCNodeStates.WithLabelValues("0", "OutOfSync")) < 1.0 { + return false + } + return true + }) +} + +func TestUnit_Pool_BatchCallContextAll(t *testing.T) { + t.Parallel() + + var nodes []evmclient.Node + var sendonlys []evmclient.SendOnlyNode + + nodeCount := 2 + sendOnlyCount := 3 + + b := []rpc.BatchElem{ + {Method: "method", Args: []interface{}{1, false}}, + {Method: "method2"}, + } + + ctx := testutils.Context(t) + + for i := 0; i < nodeCount; i++ { + node := evmmocks.NewNode(t) + node.On("State").Return(evmclient.NodeStateAlive).Maybe() + node.On("BatchCallContext", ctx, b).Return(nil).Once() + nodes = append(nodes, node) + } + for i := 0; i < sendOnlyCount; i++ { + s := evmmocks.NewSendOnlyNode(t) + s.On("BatchCallContext", ctx, b).Return(nil).Once() + sendonlys = append(sendonlys, s) + } + + p := evmclient.NewPool(logger.Test(t), defaultConfig.NodeSelectionMode(), defaultConfig.LeaseDuration(), time.Second*0, nodes, sendonlys, &cltest.FixtureChainID, "") + + assert.True(t, p.ChainType().IsValid()) + assert.False(t, p.ChainType().IsL2()) + require.NoError(t, p.BatchCallContextAll(ctx, b)) +} + +func TestUnit_Pool_LeaseDuration(t *testing.T) { + t.Parallel() + + n1 := evmmocks.NewNode(t) + n2 := evmmocks.NewNode(t) + nodes := []evmclient.Node{n1, n2} + type nodeStateSwitch struct { + isAlive bool + mu sync.RWMutex + } + + nodeSwitch := nodeStateSwitch{ + isAlive: true, + mu: sync.RWMutex{}, + } + + n1.On("String").Maybe().Return("n1") + n2.On("String").Maybe().Return("n2") + n1.On("Close").Maybe().Return(nil) + n2.On("Close").Maybe().Return(nil) + n2.On("UnsubscribeAllExceptAliveLoop").Return() + n2.On("SubscribersCount").Return(int32(2)) + + n1.On("Start", mock.Anything).Return(nil).Once() + n1.On("State").Return(func() evmclient.NodeState { + nodeSwitch.mu.RLock() + defer nodeSwitch.mu.RUnlock() + if nodeSwitch.isAlive { + return evmclient.NodeStateAlive + } + return evmclient.NodeStateOutOfSync + }) + n1.On("Order").Return(int32(1)) + n1.On("ChainID").Return(testutils.FixtureChainID).Once() + + n2.On("Start", mock.Anything).Return(nil).Once() + n2.On("State").Return(evmclient.NodeStateAlive) + n2.On("Order").Return(int32(2)) + n2.On("ChainID").Return(testutils.FixtureChainID).Once() + + lggr, observedLogs := logger.TestObserved(t, zap.InfoLevel) + p := evmclient.NewPool(lggr, "PriorityLevel", time.Second*2, time.Second*0, nodes, []evmclient.SendOnlyNode{}, &cltest.FixtureChainID, "") + require.NoError(t, p.Dial(testutils.Context(t))) + t.Cleanup(func() { assert.NoError(t, p.Close()) }) + + testutils.WaitForLogMessage(t, observedLogs, "The pool will switch to best node every 2s") + nodeSwitch.mu.Lock() + nodeSwitch.isAlive = false + nodeSwitch.mu.Unlock() + testutils.WaitForLogMessage(t, observedLogs, "At least one EVM primary node is dead") + nodeSwitch.mu.Lock() + nodeSwitch.isAlive = true + nodeSwitch.mu.Unlock() + testutils.WaitForLogMessage(t, observedLogs, `Switching to best node from "n2" to "n1"`) + +} diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go new file mode 100644 index 00000000..420cb35b --- /dev/null +++ b/core/chains/evm/client/rpc_client.go @@ -0,0 +1,1074 @@ +package client + +import ( + "context" + "fmt" + "math/big" + "net/url" + "strconv" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/google/uuid" + "github.com/pkg/errors" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// RPCCLient includes all the necessary generalized RPC methods along with any additional chain-specific methods. +type RPCCLient interface { + commonclient.RPC[ + *big.Int, + evmtypes.Nonce, + common.Address, + common.Hash, + *types.Transaction, + common.Hash, + types.Log, + ethereum.FilterQuery, + *evmtypes.Receipt, + *assets.Wei, + *evmtypes.Head, + ] + BlockByHashGeth(ctx context.Context, hash common.Hash) (b *types.Block, err error) + BlockByNumberGeth(ctx context.Context, number *big.Int) (b *types.Block, err error) + HeaderByHash(ctx context.Context, h common.Hash) (head *types.Header, err error) + HeaderByNumber(ctx context.Context, n *big.Int) (head *types.Header, err error) + PendingCodeAt(ctx context.Context, account common.Address) (b []byte, err error) + SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (s ethereum.Subscription, err error) + SuggestGasPrice(ctx context.Context) (p *big.Int, err error) + SuggestGasTipCap(ctx context.Context) (t *big.Int, err error) + TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (r *types.Receipt, err error) +} + +type rpcClient struct { + rpcLog logger.SugaredLogger + name string + id int32 + chainID *big.Int + tier commonclient.NodeTier + + ws rawclient + http *rawclient + + stateMu sync.RWMutex // protects state* fields + + // Need to track subscriptions because closing the RPC does not (always?) + // close the underlying subscription + subs []ethereum.Subscription + + // Need to track the aliveLoop subscription, so we do not cancel it when checking lease on the MultiNode + aliveLoopSub ethereum.Subscription + + // chStopInFlight can be closed to immediately cancel all in-flight requests on + // this rpcClient. Closing and replacing should be serialized through + // stateMu since it can happen on state transitions as well as rpcClient Close. + chStopInFlight chan struct{} +} + +// NewRPCCLient returns a new *rpcClient as commonclient.RPC +func NewRPCClient( + lggr logger.Logger, + wsuri url.URL, + httpuri *url.URL, + name string, + id int32, + chainID *big.Int, + tier commonclient.NodeTier, +) RPCCLient { + r := new(rpcClient) + r.name = name + r.id = id + r.chainID = chainID + r.tier = tier + r.ws.uri = wsuri + if httpuri != nil { + r.http = &rawclient{uri: *httpuri} + } + r.chStopInFlight = make(chan struct{}) + lggr = logger.Named(lggr, "Client") + lggr = logger.With(lggr, + "clientTier", tier.String(), + "clientName", name, + "client", r.String(), + "evmChainID", chainID, + ) + r.rpcLog = logger.Sugared(lggr).Named("RPC") + + return r +} + +// Not thread-safe, pure dial. +func (r *rpcClient) Dial(callerCtx context.Context) error { + ctx, cancel := r.makeQueryCtx(callerCtx) + defer cancel() + + promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc() + lggr := r.rpcLog.With("wsuri", r.ws.uri.Redacted()) + if r.http != nil { + lggr = lggr.With("httpuri", r.http.uri.Redacted()) + } + lggr.Debugw("RPC dial: evmclient.Client#dial") + + wsrpc, err := rpc.DialWebsocket(ctx, r.ws.uri.String(), "") + if err != nil { + promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() + return errors.Wrapf(err, "error while dialing websocket: %v", r.ws.uri.Redacted()) + } + + r.ws.rpc = wsrpc + r.ws.geth = ethclient.NewClient(wsrpc) + + if r.http != nil { + if err := r.DialHTTP(); err != nil { + return err + } + } + + promEVMPoolRPCNodeDialsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc() + + return nil +} + +// Not thread-safe, pure dial. +// DialHTTP doesn't actually make any external HTTP calls +// It can only return error if the URL is malformed. +func (r *rpcClient) DialHTTP() error { + promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc() + lggr := r.rpcLog.With("httpuri", r.ws.uri.Redacted()) + lggr.Debugw("RPC dial: evmclient.Client#dial") + + var httprpc *rpc.Client + httprpc, err := rpc.DialHTTP(r.http.uri.String()) + if err != nil { + promEVMPoolRPCNodeDialsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() + return errors.Wrapf(err, "error while dialing HTTP: %v", r.http.uri.Redacted()) + } + + r.http.rpc = httprpc + r.http.geth = ethclient.NewClient(httprpc) + + promEVMPoolRPCNodeDialsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc() + + return nil +} + +func (r *rpcClient) Close() { + defer func() { + if r.ws.rpc != nil { + r.ws.rpc.Close() + } + }() + + r.stateMu.Lock() + defer r.stateMu.Unlock() + r.cancelInflightRequests() +} + +// cancelInflightRequests closes and replaces the chStopInFlight +// WARNING: NOT THREAD-SAFE +// This must be called from within the r.stateMu lock +func (r *rpcClient) cancelInflightRequests() { + close(r.chStopInFlight) + r.chStopInFlight = make(chan struct{}) +} + +func (r *rpcClient) String() string { + s := fmt.Sprintf("(%s)%s:%s", r.tier.String(), r.name, r.ws.uri.Redacted()) + if r.http != nil { + s = s + fmt.Sprintf(":%s", r.http.uri.Redacted()) + } + return s +} + +func (r *rpcClient) logResult( + lggr logger.Logger, + err error, + callDuration time.Duration, + rpcDomain, + callName string, + results ...interface{}, +) { + lggr = logger.With(lggr, "duration", callDuration, "rpcDomain", rpcDomain, "callName", callName) + promEVMPoolRPCNodeCalls.WithLabelValues(r.chainID.String(), r.name).Inc() + if err == nil { + promEVMPoolRPCNodeCallsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc() + logger.Sugared(lggr).Tracew(fmt.Sprintf("evmclient.Client#%s RPC call success", callName), results...) + } else { + promEVMPoolRPCNodeCallsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() + lggr.Debugw( + fmt.Sprintf("evmclient.Client#%s RPC call failure", callName), + append(results, "err", err)..., + ) + } + promEVMPoolRPCCallTiming. + WithLabelValues( + r.chainID.String(), // chain id + r.name, // rpcClient name + rpcDomain, // rpc domain + "false", // is send only + strconv.FormatBool(err == nil), // is successful + callName, // rpc call name + ). + Observe(float64(callDuration)) +} + +func (r *rpcClient) getRPCDomain() string { + if r.http != nil { + return r.http.uri.Host + } + return r.ws.uri.Host +} + +// registerSub adds the sub to the rpcClient list +func (r *rpcClient) registerSub(sub ethereum.Subscription) { + r.stateMu.Lock() + defer r.stateMu.Unlock() + r.subs = append(r.subs, sub) +} + +// disconnectAll disconnects all clients connected to the rpcClient +// WARNING: NOT THREAD-SAFE +// This must be called from within the r.stateMu lock +func (r *rpcClient) DisconnectAll() { + if r.ws.rpc != nil { + r.ws.rpc.Close() + } + r.cancelInflightRequests() + r.unsubscribeAll() +} + +// unsubscribeAll unsubscribes all subscriptions +// WARNING: NOT THREAD-SAFE +// This must be called from within the r.stateMu lock +func (r *rpcClient) unsubscribeAll() { + for _, sub := range r.subs { + sub.Unsubscribe() + } + r.subs = nil +} +func (r *rpcClient) SetAliveLoopSub(sub commontypes.Subscription) { + r.stateMu.Lock() + defer r.stateMu.Unlock() + + r.aliveLoopSub = sub +} + +// SubscribersCount returns the number of client subscribed to the node +func (r *rpcClient) SubscribersCount() int32 { + r.stateMu.RLock() + defer r.stateMu.RUnlock() + return int32(len(r.subs)) +} + +// UnsubscribeAllExceptAliveLoop disconnects all subscriptions to the node except the alive loop subscription +// while holding the n.stateMu lock +func (r *rpcClient) UnsubscribeAllExceptAliveLoop() { + r.stateMu.Lock() + defer r.stateMu.Unlock() + + for _, s := range r.subs { + if s != r.aliveLoopSub { + s.Unsubscribe() + } + } +} + +// RPC wrappers + +// CallContext implementation +func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + defer cancel() + lggr := r.newRqLggr().With( + "method", method, + "args", args, + ) + + lggr.Debug("RPC call: evmclient.Client#CallContext") + start := time.Now() + if http != nil { + err = r.wrapHTTP(http.rpc.CallContext(ctx, result, method, args...)) + } else { + err = r.wrapWS(ws.rpc.CallContext(ctx, result, method, args...)) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContext") + + return err +} + +func (r *rpcClient) BatchCallContext(ctx context.Context, b []any) error { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + batch := make([]rpc.BatchElem, len(b)) + for i, arg := range b { + batch[i] = arg.(rpc.BatchElem) + } + defer cancel() + lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b) + + lggr.Trace("RPC call: evmclient.Client#BatchCallContext") + start := time.Now() + if http != nil { + err = r.wrapHTTP(http.rpc.BatchCallContext(ctx, batch)) + } else { + err = r.wrapWS(ws.rpc.BatchCallContext(ctx, batch)) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "BatchCallContext") + + return err +} + +func (r *rpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (commontypes.Subscription, error) { + ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("args", args) + + lggr.Debug("RPC call: evmclient.Client#EthSubscribe") + start := time.Now() + sub, err := ws.rpc.EthSubscribe(ctx, channel, args...) + if err == nil { + r.registerSub(sub) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "EthSubscribe") + + return sub, err +} + +// GethClient wrappers + +func (r *rpcClient) TransactionReceipt(ctx context.Context, txHash common.Hash) (receipt *evmtypes.Receipt, err error) { + err = r.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash, false) + if err != nil { + return nil, err + } + if receipt == nil { + err = ethereum.NotFound + return + } + return +} + +func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Hash) (receipt *types.Receipt, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("txHash", txHash) + + lggr.Debug("RPC call: evmclient.Client#TransactionReceipt") + + start := time.Now() + if http != nil { + receipt, err = http.geth.TransactionReceipt(ctx, txHash) + err = r.wrapHTTP(err) + } else { + receipt, err = ws.geth.TransactionReceipt(ctx, txHash) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "TransactionReceipt", + "receipt", receipt, + ) + + return +} +func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("txHash", txHash) + + lggr.Debug("RPC call: evmclient.Client#TransactionByHash") + + start := time.Now() + if http != nil { + tx, _, err = http.geth.TransactionByHash(ctx, txHash) + err = r.wrapHTTP(err) + } else { + tx, _, err = ws.geth.TransactionByHash(ctx, txHash) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "TransactionByHash", + "receipt", tx, + ) + + return +} + +func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header *types.Header, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("number", number) + + lggr.Debug("RPC call: evmclient.Client#HeaderByNumber") + start := time.Now() + if http != nil { + header, err = http.geth.HeaderByNumber(ctx, number) + err = r.wrapHTTP(err) + } else { + header, err = ws.geth.HeaderByNumber(ctx, number) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "HeaderByNumber", "header", header) + + return +} + +func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header *types.Header, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("hash", hash) + + lggr.Debug("RPC call: evmclient.Client#HeaderByHash") + start := time.Now() + if http != nil { + header, err = http.geth.HeaderByHash(ctx, hash) + err = r.wrapHTTP(err) + } else { + header, err = ws.geth.HeaderByHash(ctx, hash) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "HeaderByHash", + "header", header, + ) + + return +} + +func (r *rpcClient) BlockByNumber(ctx context.Context, number *big.Int) (head *evmtypes.Head, err error) { + hex := ToBlockNumArg(number) + err = r.CallContext(ctx, &head, "eth_getBlockByNumber", hex, false) + if err != nil { + return nil, err + } + if head == nil { + err = ethereum.NotFound + return + } + head.EVMChainID = ubig.New(r.chainID) + return +} + +func (r *rpcClient) BlockByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { + err = r.CallContext(ctx, &head, "eth_getBlockByHash", hash.Hex(), false) + if err != nil { + return nil, err + } + if head == nil { + err = ethereum.NotFound + return + } + head.EVMChainID = ubig.New(r.chainID) + return +} + +func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (block *types.Block, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("hash", hash) + + lggr.Debug("RPC call: evmclient.Client#BlockByHash") + start := time.Now() + if http != nil { + block, err = http.geth.BlockByHash(ctx, hash) + err = r.wrapHTTP(err) + } else { + block, err = ws.geth.BlockByHash(ctx, hash) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockByHash", + "block", block, + ) + + return +} + +func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (block *types.Block, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("number", number) + + lggr.Debug("RPC call: evmclient.Client#BlockByNumber") + start := time.Now() + if http != nil { + block, err = http.geth.BlockByNumber(ctx, number) + err = r.wrapHTTP(err) + } else { + block, err = ws.geth.BlockByNumber(ctx, number) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockByNumber", + "block", block, + ) + + return +} + +func (r *rpcClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return err + } + defer cancel() + lggr := r.newRqLggr().With("tx", tx) + + lggr.Debug("RPC call: evmclient.Client#SendTransaction") + start := time.Now() + if http != nil { + err = r.wrapHTTP(http.geth.SendTransaction(ctx, tx)) + } else { + err = r.wrapWS(ws.geth.SendTransaction(ctx, tx)) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "SendTransaction") + + return err +} + +func (r *rpcClient) SimulateTransaction(ctx context.Context, tx *types.Transaction) error { + // Not Implemented + return errors.New("SimulateTransaction not implemented") +} + +func (r *rpcClient) SendEmptyTransaction( + ctx context.Context, + newTxAttempt func(nonce evmtypes.Nonce, feeLimit uint32, fee *assets.Wei, fromAddress common.Address) (attempt any, err error), + nonce evmtypes.Nonce, + gasLimit uint32, + fee *assets.Wei, + fromAddress common.Address, +) (txhash string, err error) { + // Not Implemented + return "", errors.New("SendEmptyTransaction not implemented") +} + +// PendingSequenceAt returns one higher than the highest nonce from both mempool and mined transactions +func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Address) (nonce evmtypes.Nonce, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := r.newRqLggr().With("account", account) + + lggr.Debug("RPC call: evmclient.Client#PendingNonceAt") + start := time.Now() + var n uint64 + if http != nil { + n, err = http.geth.PendingNonceAt(ctx, account) + nonce = evmtypes.Nonce(int64(n)) + err = r.wrapHTTP(err) + } else { + n, err = ws.geth.PendingNonceAt(ctx, account) + nonce = evmtypes.Nonce(int64(n)) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "PendingNonceAt", + "nonce", nonce, + ) + + return +} + +// SequenceAt is a bit of a misnomer. You might expect it to return the highest +// mined nonce at the given block number, but it actually returns the total +// transaction count which is the highest mined nonce + 1 +func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (nonce evmtypes.Nonce, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#NonceAt") + start := time.Now() + var n uint64 + if http != nil { + n, err = http.geth.NonceAt(ctx, account, blockNumber) + nonce = evmtypes.Nonce(int64(n)) + err = r.wrapHTTP(err) + } else { + n, err = ws.geth.NonceAt(ctx, account, blockNumber) + nonce = evmtypes.Nonce(int64(n)) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "NonceAt", + "nonce", nonce, + ) + + return +} + +func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) (code []byte, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("account", account) + + lggr.Debug("RPC call: evmclient.Client#PendingCodeAt") + start := time.Now() + if http != nil { + code, err = http.geth.PendingCodeAt(ctx, account) + err = r.wrapHTTP(err) + } else { + code, err = ws.geth.PendingCodeAt(ctx, account) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "PendingCodeAt", + "code", code, + ) + + return +} + +func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) (code []byte, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#CodeAt") + start := time.Now() + if http != nil { + code, err = http.geth.CodeAt(ctx, account, blockNumber) + err = r.wrapHTTP(err) + } else { + code, err = ws.geth.CodeAt(ctx, account, blockNumber) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "CodeAt", + "code", code, + ) + + return +} + +func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + call := c.(ethereum.CallMsg) + lggr := r.newRqLggr().With("call", call) + + lggr.Debug("RPC call: evmclient.Client#EstimateGas") + start := time.Now() + if http != nil { + gas, err = http.geth.EstimateGas(ctx, call) + err = r.wrapHTTP(err) + } else { + gas, err = ws.geth.EstimateGas(ctx, call) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "EstimateGas", + "gas", gas, + ) + + return +} + +func (r *rpcClient) SuggestGasPrice(ctx context.Context) (price *big.Int, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#SuggestGasPrice") + start := time.Now() + if http != nil { + price, err = http.geth.SuggestGasPrice(ctx) + err = r.wrapHTTP(err) + } else { + price, err = ws.geth.SuggestGasPrice(ctx) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "SuggestGasPrice", + "price", price, + ) + + return +} + +func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumber *big.Int) (val []byte, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) + message := msg.(ethereum.CallMsg) + + lggr.Debug("RPC call: evmclient.Client#CallContract") + start := time.Now() + if http != nil { + val, err = http.geth.CallContract(ctx, message, blockNumber) + err = r.wrapHTTP(err) + } else { + val, err = ws.geth.CallContract(ctx, message, blockNumber) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "CallContract", + "val", val, + ) + + return + +} + +func (r *rpcClient) PendingCallContract(ctx context.Context, msg interface{}) (val []byte, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("callMsg", msg) + message := msg.(ethereum.CallMsg) + + lggr.Debug("RPC call: evmclient.Client#PendingCallContract") + start := time.Now() + if http != nil { + val, err = http.geth.PendingCallContract(ctx, message) + err = r.wrapHTTP(err) + } else { + val, err = ws.geth.PendingCallContract(ctx, message) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "PendingCallContract", + "val", val, + ) + + return + +} + +func (r *rpcClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + var height big.Int + h, err := r.BlockNumber(ctx) + return height.SetUint64(h), err +} + +func (r *rpcClient) BlockNumber(ctx context.Context) (height uint64, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return 0, err + } + defer cancel() + lggr := r.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#BlockNumber") + start := time.Now() + if http != nil { + height, err = http.geth.BlockNumber(ctx) + err = r.wrapHTTP(err) + } else { + height, err = ws.geth.BlockNumber(ctx) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "BlockNumber", + "height", height, + ) + + return +} + +func (r *rpcClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (balance *big.Int, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) + + lggr.Debug("RPC call: evmclient.Client#BalanceAt") + start := time.Now() + if http != nil { + balance, err = http.geth.BalanceAt(ctx, account, blockNumber) + err = r.wrapHTTP(err) + } else { + balance, err = ws.geth.BalanceAt(ctx, account, blockNumber) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "BalanceAt", + "balance", balance, + ) + + return +} + +// TokenBalance returns the balance of the given address for the token contract address. +func (r *rpcClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (*big.Int, error) { + result := "" + numLinkBigInt := new(big.Int) + functionSelector := evmtypes.HexToFunctionSelector(BALANCE_OF_ADDRESS_FUNCTION_SELECTOR) // balanceOf(address) + data := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(address.Bytes(), utils.EVMWordByteLen)) + args := CallArgs{ + To: contractAddress, + Data: data, + } + err := r.CallContext(ctx, &result, "eth_call", args, "latest") + if err != nil { + return numLinkBigInt, err + } + if _, ok := numLinkBigInt.SetString(result, 0); !ok { + return nil, fmt.Errorf("failed to parse int: %s", result) + } + return numLinkBigInt, nil +} + +// PLIBalance returns the balance of PLI at the given address +func (r *rpcClient) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*commonassets.Link, error) { + balance, err := r.TokenBalance(ctx, address, linkAddress) + if err != nil { + return commonassets.NewLinkFromJuels(0), err + } + return (*commonassets.Link)(balance), nil +} + +func (r *rpcClient) FilterEvents(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return r.FilterLogs(ctx, q) +} + +func (r *rpcClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []types.Log, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("q", q) + + lggr.Debug("RPC call: evmclient.Client#FilterLogs") + start := time.Now() + if http != nil { + l, err = http.geth.FilterLogs(ctx, q) + err = r.wrapHTTP(err) + } else { + l, err = ws.geth.FilterLogs(ctx, q) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "FilterLogs", + "log", l, + ) + + return +} + +func (r *rpcClient) ClientVersion(ctx context.Context) (version string, err error) { + err = r.CallContext(ctx, &version, "web3_clientVersion") + return +} + +func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (sub ethereum.Subscription, err error) { + ctx, cancel, ws, _, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr().With("q", q) + + lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs") + start := time.Now() + sub, err = ws.geth.SubscribeFilterLogs(ctx, q, ch) + if err == nil { + r.registerSub(sub) + } + err = r.wrapWS(err) + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "SubscribeFilterLogs") + + return +} + +func (r *rpcClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + if err != nil { + return nil, err + } + defer cancel() + lggr := r.newRqLggr() + + lggr.Debug("RPC call: evmclient.Client#SuggestGasTipCap") + start := time.Now() + if http != nil { + tipCap, err = http.geth.SuggestGasTipCap(ctx) + err = r.wrapHTTP(err) + } else { + tipCap, err = ws.geth.SuggestGasTipCap(ctx) + err = r.wrapWS(err) + } + duration := time.Since(start) + + r.logResult(lggr, err, duration, r.getRPCDomain(), "SuggestGasTipCap", + "tipCap", tipCap, + ) + + return +} + +// Returns the ChainID according to the geth client. This is useful for functions like verify() +// the common node. +func (r *rpcClient) ChainID(ctx context.Context) (chainID *big.Int, err error) { + ctx, cancel, ws, http, err := r.makeLiveQueryCtxAndSafeGetClients(ctx) + + defer cancel() + + if http != nil { + chainID, err = http.geth.ChainID(ctx) + err = r.wrapHTTP(err) + } else { + chainID, err = ws.geth.ChainID(ctx) + err = r.wrapWS(err) + } + return +} + +// newRqLggr generates a new logger with a unique request ID +func (r *rpcClient) newRqLggr() logger.SugaredLogger { + return r.rpcLog.With("requestID", uuid.New()) +} + +func wrapCallError(err error, tp string) error { + if err == nil { + return nil + } + if errors.Cause(err).Error() == "context deadline exceeded" { + err = errors.Wrap(err, "remote node timed out") + } + return errors.Wrapf(err, "%s call failed", tp) +} + +func (r *rpcClient) wrapWS(err error) error { + err = wrapCallError(err, fmt.Sprintf("%s websocket (%s)", r.tier.String(), r.ws.uri.Redacted())) + return err +} + +func (r *rpcClient) wrapHTTP(err error) error { + err = wrapCallError(err, fmt.Sprintf("%s http (%s)", r.tier.String(), r.http.uri.Redacted())) + if err != nil { + r.rpcLog.Debugw("Call failed", "err", err) + } else { + r.rpcLog.Trace("Call succeeded") + } + return err +} + +// makeLiveQueryCtxAndSafeGetClients wraps makeQueryCtx +func (r *rpcClient) makeLiveQueryCtxAndSafeGetClients(parentCtx context.Context) (ctx context.Context, cancel context.CancelFunc, ws rawclient, http *rawclient, err error) { + // Need to wrap in mutex because state transition can cancel and replace the + // context + r.stateMu.RLock() + cancelCh := r.chStopInFlight + ws = r.ws + if r.http != nil { + cp := *r.http + http = &cp + } + r.stateMu.RUnlock() + ctx, cancel = makeQueryCtx(parentCtx, cancelCh) + return +} + +func (r *rpcClient) makeQueryCtx(ctx context.Context) (context.Context, context.CancelFunc) { + return makeQueryCtx(ctx, r.getChStopInflight()) +} + +// getChStopInflight provides a convenience helper that mutex wraps a +// read to the chStopInFlight +func (r *rpcClient) getChStopInflight() chan struct{} { + r.stateMu.RLock() + defer r.stateMu.RUnlock() + return r.chStopInFlight +} + +func (r *rpcClient) Name() string { + return r.name +} + +func Name(r *rpcClient) string { + return r.name +} diff --git a/core/chains/evm/client/send_only_node.go b/core/chains/evm/client/send_only_node.go new file mode 100644 index 00000000..4c4e72e4 --- /dev/null +++ b/core/chains/evm/client/send_only_node.go @@ -0,0 +1,267 @@ +package client + +import ( + "context" + "fmt" + "log" + "math/big" + "net/url" + "strconv" + "sync" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" +) + +//go:generate mockery --quiet --name SendOnlyNode --output ../mocks/ --case=underscore + +// SendOnlyNode represents one ethereum node used as a sendonly +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.SendOnlyNode] +type SendOnlyNode interface { + // Start may attempt to connect to the node, but should only return error for misconfiguration - never for temporary errors. + Start(context.Context) error + Close() error + + ChainID() (chainID *big.Int) + + SendTransaction(ctx context.Context, tx *types.Transaction) error + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + + String() string + // State returns NodeState + State() NodeState + // Name is a unique identifier for this node. + Name() string +} + +//go:generate mockery --quiet --name TxSender --output ./mocks/ --case=underscore + +type TxSender interface { + SendTransaction(ctx context.Context, tx *types.Transaction) error + ChainID(context.Context) (*big.Int, error) +} + +//go:generate mockery --quiet --name BatchSender --output ./mocks/ --case=underscore + +type BatchSender interface { + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error +} + +var _ SendOnlyNode = &sendOnlyNode{} + +// It only supports sending transactions +// It must a http(s) url +type sendOnlyNode struct { + services.StateMachine + + stateMu sync.RWMutex // protects state* fields + state NodeState + + uri url.URL + batchSender BatchSender + sender TxSender + log logger.Logger + dialed bool + name string + chainID *big.Int + chStop services.StopChan + wg sync.WaitGroup +} + +// NewSendOnlyNode returns a new sendonly node +// +// Deprecated: use [pkg/github.com/goplugin/pluginv3.0/v2/common/client.NewSendOnlyNode] +func NewSendOnlyNode(lggr logger.Logger, httpuri url.URL, name string, chainID *big.Int) SendOnlyNode { + s := new(sendOnlyNode) + s.name = name + s.log = logger.Named(logger.Named(lggr, "SendOnlyNode"), name) + s.log = logger.With(s.log, + "nodeTier", "sendonly", + ) + s.uri = httpuri + s.chainID = chainID + s.chStop = make(chan struct{}) + return s +} + +func (s *sendOnlyNode) Start(ctx context.Context) error { + return s.StartOnce(s.name, func() error { + s.start(ctx) + return nil + }) +} + +// Start setups up and verifies the sendonly node +// Should only be called once in a node's lifecycle +func (s *sendOnlyNode) start(startCtx context.Context) { + if s.state != NodeStateUndialed { + panic(fmt.Sprintf("cannot dial node with state %v", s.state)) + } + + s.log.Debugw("evmclient.Client#Dial(...)") + if s.dialed { + panic("evmclient.Client.Dial(...) should only be called once during the node's lifetime.") + } + + // DialHTTP doesn't actually make any external HTTP calls + // It can only return error if the URL is malformed. No amount of retries + // will change this result. + rpc, err := rpc.DialHTTP(s.uri.String()) + if err != nil { + promEVMPoolRPCNodeTransitionsToUnusable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw("Dial failed: EVM SendOnly Node is unusable", "err", err) + s.setState(NodeStateUnusable) + return + } + s.dialed = true + geth := ethclient.NewClient(rpc) + s.SetEthClient(rpc, geth) + + if s.chainID.Cmp(big.NewInt(0)) == 0 { + // Skip verification if chainID is zero + s.log.Warn("sendonly rpc ChainID verification skipped") + } else { + verifyCtx, verifyCancel := s.makeQueryCtx(startCtx) + defer verifyCancel() + + chainID, err := s.sender.ChainID(verifyCtx) + if err != nil || chainID.Cmp(s.chainID) != 0 { + promEVMPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + if err != nil { + promEVMPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + s.setState(NodeStateUnreachable) + } else { + promEVMPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + s.setState(NodeStateInvalidChainID) + } + // Since it has failed, spin up the verifyLoop that will keep + // retrying until success + s.wg.Add(1) + go s.verifyLoop() + return + } + } + + promEVMPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + s.setState(NodeStateAlive) + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) +} + +func (s *sendOnlyNode) SetEthClient(newBatchSender BatchSender, newSender TxSender) { + if s.sender != nil { + log.Panicf("sendOnlyNode.SetEthClient should only be called once!") + return + } + s.batchSender = newBatchSender + s.sender = newSender +} + +func (s *sendOnlyNode) Close() error { + return s.StopOnce(s.name, func() error { + close(s.chStop) + s.wg.Wait() + s.setState(NodeStateClosed) + return nil + }) +} + +func (s *sendOnlyNode) logTiming(lggr logger.Logger, duration time.Duration, err error, callName string) { + promEVMPoolRPCCallTiming. + WithLabelValues( + s.chainID.String(), // chain id + s.name, // node name + s.uri.Host, // rpc domain + "true", // is send only + strconv.FormatBool(err == nil), // is successful + callName, // rpc call name + ). + Observe(float64(duration)) + lggr.Debugw(fmt.Sprintf("SendOnly RPC call: evmclient.#%s", callName), + "duration", duration, + "rpcDomain", s.uri.Host, + "name", s.name, + "chainID", s.chainID, + "sendOnly", true, + "err", err, + ) +} + +func (s *sendOnlyNode) SendTransaction(parentCtx context.Context, tx *types.Transaction) (err error) { + defer func(start time.Time) { + s.logTiming(s.log, time.Since(start), err, "SendTransaction") + }(time.Now()) + + ctx, cancel := s.makeQueryCtx(parentCtx) + defer cancel() + return s.wrap(s.sender.SendTransaction(ctx, tx)) +} + +func (s *sendOnlyNode) BatchCallContext(parentCtx context.Context, b []rpc.BatchElem) (err error) { + defer func(start time.Time) { + s.logTiming(logger.With(s.log, "nBatchElems", len(b)), time.Since(start), err, "BatchCallContext") + }(time.Now()) + + ctx, cancel := s.makeQueryCtx(parentCtx) + defer cancel() + return s.wrap(s.batchSender.BatchCallContext(ctx, b)) +} + +func (s *sendOnlyNode) ChainID() (chainID *big.Int) { + return s.chainID +} + +func (s *sendOnlyNode) wrap(err error) error { + return wrap(err, fmt.Sprintf("sendonly http (%s)", s.uri.Redacted())) +} + +func (s *sendOnlyNode) String() string { + return fmt.Sprintf("(secondary)%s:%s", s.name, s.uri.Redacted()) +} + +// makeQueryCtx returns a context that cancels if: +// 1. Passed in ctx cancels +// 2. chStop is closed +// 3. Default timeout is reached (queryTimeout) +func (s *sendOnlyNode) makeQueryCtx(ctx context.Context) (context.Context, context.CancelFunc) { + var chCancel, timeoutCancel context.CancelFunc + ctx, chCancel = s.chStop.Ctx(ctx) + ctx, timeoutCancel = context.WithTimeout(ctx, queryTimeout) + cancel := func() { + chCancel() + timeoutCancel() + } + return ctx, cancel +} + +func (s *sendOnlyNode) setState(state NodeState) (changed bool) { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state == state { + return false + } + s.state = state + return true +} + +func (s *sendOnlyNode) State() NodeState { + s.stateMu.RLock() + defer s.stateMu.RUnlock() + return s.state +} + +func (s *sendOnlyNode) Name() string { + return s.name +} diff --git a/core/chains/evm/client/send_only_node_lifecycle.go b/core/chains/evm/client/send_only_node_lifecycle.go new file mode 100644 index 00000000..1d846db6 --- /dev/null +++ b/core/chains/evm/client/send_only_node_lifecycle.go @@ -0,0 +1,68 @@ +package client + +import ( + "fmt" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +// verifyLoop may only be triggered once, on Start, if initial chain ID check +// fails. +// +// It will continue checking until success and then exit permanently. +func (s *sendOnlyNode) verifyLoop() { + defer s.wg.Done() + ctx, cancel := s.chStop.NewCtx() + defer cancel() + + backoff := utils.NewRedialBackoff() + for { + select { + case <-time.After(backoff.Duration()): + chainID, err := s.sender.ChainID(ctx) + if err != nil { + ok := s.IfStarted(func() { + if changed := s.setState(NodeStateUnreachable); changed { + promEVMPoolRPCNodeTransitionsToUnreachable.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorw(fmt.Sprintf("Verify failed: %v", err), "err", err) + continue + } else if chainID.Cmp(s.chainID) != 0 { + ok := s.IfStarted(func() { + if changed := s.setState(NodeStateInvalidChainID); changed { + promEVMPoolRPCNodeTransitionsToInvalidChainID.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Errorf( + "sendonly rpc ChainID doesn't match local chain ID: RPC ID=%s, local ID=%s, node name=%s", + chainID.String(), + s.chainID.String(), + s.name, + ) + + continue + } else { + ok := s.IfStarted(func() { + if changed := s.setState(NodeStateAlive); changed { + promEVMPoolRPCNodeTransitionsToAlive.WithLabelValues(s.chainID.String(), s.name).Inc() + } + }) + if !ok { + return + } + s.log.Infow("Sendonly RPC Node is online", "nodeState", s.state) + return + } + case <-ctx.Done(): + return + } + } +} diff --git a/core/chains/evm/client/send_only_node_test.go b/core/chains/evm/client/send_only_node_test.go new file mode 100644 index 00000000..55b2436f --- /dev/null +++ b/core/chains/evm/client/send_only_node_test.go @@ -0,0 +1,171 @@ +package client_test + +import ( + "fmt" + "math/big" + "net/url" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestNewSendOnlyNode(t *testing.T) { + t.Parallel() + + urlFormat := "http://user:%s@testurl.com" + password := "pass" + url := testutils.MustParseURL(t, fmt.Sprintf(urlFormat, password)) + redacted := fmt.Sprintf(urlFormat, "xxxxx") + lggr := logger.Test(t) + name := "TestNewSendOnlyNode" + chainID := testutils.NewRandomEVMChainID() + + node := evmclient.NewSendOnlyNode(lggr, *url, name, chainID) + assert.NotNil(t, node) + + // Must contain name & url with redacted password + assert.Contains(t, node.String(), fmt.Sprintf("%s:%s", name, redacted)) + assert.Equal(t, node.ChainID(), chainID) +} + +func TestStartSendOnlyNode(t *testing.T) { + t.Parallel() + + t.Run("Start with Random ChainID", func(t *testing.T) { + t.Parallel() + chainID := testutils.NewRandomEVMChainID() + r := chainIDResp{chainID.Int64(), nil} + url := r.newHTTPServer(t) + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + s := evmclient.NewSendOnlyNode(lggr, *url, t.Name(), chainID) + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(testutils.Context(t)) + assert.NoError(t, err) // No errors expected + assert.Equal(t, 0, observedLogs.Len()) // No warnings expected + }) + + t.Run("Start with ChainID=0", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + chainID := testutils.FixtureChainID + r := chainIDResp{chainID.Int64(), nil} + url := r.newHTTPServer(t) + s := evmclient.NewSendOnlyNode(lggr, *url, t.Name(), testutils.FixtureChainID) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(testutils.Context(t)) + assert.NoError(t, err) + // If ChainID = 0, this should get converted into a warning from Start() + testutils.WaitForLogMessage(t, observedLogs, "ChainID verification skipped") + }) + + t.Run("becomes unusable (and remains undialed) if initial dial fails", func(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestObserved(t, zap.WarnLevel) + invalidURL := url.URL{Scheme: "some rubbish", Host: "not a valid host"} + s := evmclient.NewSendOnlyNode(lggr, invalidURL, t.Name(), testutils.FixtureChainID) + + defer func() { assert.NoError(t, s.Close()) }() + err := s.Start(testutils.Context(t)) + require.NoError(t, err) + + assert.False(t, client.IsDialed(s)) + testutils.RequireLogMessage(t, observedLogs, "Dial failed: EVM SendOnly Node is unusable") + }) +} + +func createSignedTx(t *testing.T, chainID *big.Int, nonce uint64, data []byte) *types.Transaction { + key, err := crypto.GenerateKey() + require.NoError(t, err) + sender, err := bind.NewKeyedTransactorWithChainID(key, chainID) + require.NoError(t, err) + tx := cltest.NewLegacyTransaction( + nonce, sender.From, + assets.Ether(100).ToInt(), + 21000, big.NewInt(1000000000), data, + ) + signedTx, err := sender.Signer(sender.From, tx) + require.NoError(t, err) + return signedTx +} + +func TestSendTransaction(t *testing.T) { + t.Parallel() + + chainID := testutils.FixtureChainID + lggr, observedLogs := logger.TestObserved(t, zap.DebugLevel) + url := testutils.MustParseURL(t, "http://place.holder") + s := evmclient.NewSendOnlyNode(lggr, + *url, + t.Name(), + testutils.FixtureChainID).(evmclient.TestableSendOnlyNode) + require.NotNil(t, s) + + signedTx := createSignedTx(t, chainID, 1, []byte{1, 2, 3}) + + mockTxSender := mocks.NewTxSender(t) + mockTxSender.On("SendTransaction", mock.Anything, mock.MatchedBy( + func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(1) + }, + )).Once().Return(nil) + s.SetEthClient(nil, mockTxSender) + + err := s.SendTransaction(testutils.Context(t), signedTx) + assert.NoError(t, err) + testutils.WaitForLogMessage(t, observedLogs, "SendOnly RPC call") +} + +func TestBatchCallContext(t *testing.T) { + t.Parallel() + + lggr := logger.Test(t) + chainID := testutils.FixtureChainID + url := testutils.MustParseURL(t, "http://place.holder") + s := evmclient.NewSendOnlyNode( + lggr, + *url, "TestBatchCallContext", + chainID).(evmclient.TestableSendOnlyNode) + + blockNum := hexutil.EncodeBig(big.NewInt(42)) + req := []rpc.BatchElem{ + { + Method: "eth_getBlockByNumber", + Args: []interface{}{blockNum, true}, + Result: &types.Block{}, + }, + { + Method: "method", + Args: []interface{}{1, false}}, + } + + mockBatchSender := mocks.NewBatchSender(t) + mockBatchSender.On("BatchCallContext", mock.Anything, + mock.MatchedBy( + func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == blockNum && b[0].Args[1].(bool) + })).Return(nil).Once().Return(nil) + + s.SetEthClient(mockBatchSender, nil) + + err := s.BatchCallContext(testutils.Context(t), req) + assert.NoError(t, err) +} diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go new file mode 100644 index 00000000..81f802cc --- /dev/null +++ b/core/chains/evm/client/simulated_backend_client.go @@ -0,0 +1,780 @@ +package client + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +func init() { + var err error + + balanceOfABI, err = abi.JSON(strings.NewReader(balanceOfABIString)) + if err != nil { + panic(fmt.Errorf("%w: while parsing erc20ABI", err)) + } +} + +var ( + balanceOfABIString = `[ + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "name": "balance", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + } +]` + + balanceOfABI abi.ABI +) + +// SimulatedBackendClient is an Client implementation using a simulated +// blockchain backend. Note that not all RPC methods are implemented here. +type SimulatedBackendClient struct { + b *backends.SimulatedBackend + t testing.TB + chainId *big.Int +} + +// NewSimulatedBackendClient creates an eth client backed by a simulated backend. +func NewSimulatedBackendClient(t testing.TB, b *backends.SimulatedBackend, chainId *big.Int) *SimulatedBackendClient { + return &SimulatedBackendClient{ + b: b, + t: t, + chainId: chainId, + } +} + +// Dial noop for the sim. +func (c *SimulatedBackendClient) Dial(context.Context) error { + return nil +} + +// Close does nothing. We ought not close the underlying backend here since +// other simulated clients might still be using it +func (c *SimulatedBackendClient) Close() {} + +// CallContext mocks the ethereum client RPC calls used by plugin, copying the +// return value into result. +// The simulated client avoids the old block error from the simulated backend by +// passing `nil` to `CallContract` when calling `CallContext` or `BatchCallContext` +// and will not return an error when an old block is used. +func (c *SimulatedBackendClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + switch method { + case "eth_getTransactionReceipt": + return c.ethGetTransactionReceipt(ctx, result, args...) + case "eth_getBlockByNumber": + return c.ethGetBlockByNumber(ctx, result, args...) + case "eth_call": + return c.ethCall(ctx, result, args...) + case "eth_getHeaderByNumber": + return c.ethGetHeaderByNumber(ctx, result, args...) + case "eth_estimateGas": + return c.ethEstimateGas(ctx, result, args...) + default: + return fmt.Errorf("second arg to SimulatedBackendClient.Call is an RPC API method which has not yet been implemented: %s. Add processing for it here", method) + } +} + +// FilterLogs returns all logs that respect the passed filter query. +func (c *SimulatedBackendClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (logs []types.Log, err error) { + return c.b.FilterLogs(ctx, q) +} + +// SubscribeFilterLogs registers a subscription for push notifications of logs +// from a given address. +func (c *SimulatedBackendClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, channel chan<- types.Log) (ethereum.Subscription, error) { + return c.b.SubscribeFilterLogs(ctx, q, channel) +} + +// currentBlockNumber returns index of *pending* block in simulated blockchain +func (c *SimulatedBackendClient) currentBlockNumber() *big.Int { + return c.b.Blockchain().CurrentBlock().Number +} + +func (c *SimulatedBackendClient) TokenBalance(ctx context.Context, address common.Address, contractAddress common.Address) (balance *big.Int, err error) { + callData, err := balanceOfABI.Pack("balanceOf", address) + if err != nil { + return nil, fmt.Errorf("%w: while seeking the ERC20 balance of %s on %s", err, + address, contractAddress) + } + b, err := c.b.CallContract(ctx, ethereum.CallMsg{ + To: &contractAddress, Data: callData}, + c.currentBlockNumber()) + if err != nil { + return nil, fmt.Errorf("%w: while calling ERC20 balanceOf method on %s "+ + "for balance of %s", err, contractAddress, address) + } + err = balanceOfABI.UnpackIntoInterface(balance, "balanceOf", b) + if err != nil { + return nil, fmt.Errorf("unable to unpack balance") + } + return balance, nil +} + +// GetPLIBalance get link balance. +func (c *SimulatedBackendClient) PLIBalance(ctx context.Context, address common.Address, linkAddress common.Address) (*assets.Link, error) { + panic("not implemented") +} + +// TransactionReceipt returns the transaction receipt for the given transaction hash. +func (c *SimulatedBackendClient) TransactionReceipt(ctx context.Context, receipt common.Hash) (*types.Receipt, error) { + return c.b.TransactionReceipt(ctx, receipt) +} + +func (c *SimulatedBackendClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, err error) { + tx, _, err = c.b.TransactionByHash(ctx, txHash) + return +} + +func (c *SimulatedBackendClient) blockNumber(number interface{}) (blockNumber *big.Int, err error) { + switch n := number.(type) { + case string: + switch n { + case "latest": + return c.currentBlockNumber(), nil + case "earliest": + return big.NewInt(0), nil + case "pending": + panic("pending block not supported by simulated backend client") // I don't understand the semantics of this. + // return big.NewInt(0).Add(c.currentBlockNumber(), big.NewInt(1)), nil + default: + blockNumber, err := hexutil.DecodeBig(n) + if err != nil { + return nil, fmt.Errorf("%w: while parsing '%s' as hex-encoded block number", err, n) + } + return blockNumber, nil + } + case *big.Int: + if n.Sign() < 0 { + return nil, fmt.Errorf("block number must be non-negative") + } + return n, nil + } + panic("can never reach here") +} + +// HeadByNumber returns our own header type. +func (c *SimulatedBackendClient) HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + if n == nil { + n = c.currentBlockNumber() + } + header, err := c.b.HeaderByNumber(ctx, n) + if err != nil { + return nil, err + } else if header == nil { + return nil, ethereum.NotFound + } + return &evmtypes.Head{ + EVMChainID: ubig.NewI(c.chainId.Int64()), + Hash: header.Hash(), + Number: header.Number.Int64(), + ParentHash: header.ParentHash, + Timestamp: time.Unix(int64(header.Time), 0), + }, nil +} + +// HeadByHash returns our own header type. +func (c *SimulatedBackendClient) HeadByHash(ctx context.Context, h common.Hash) (*evmtypes.Head, error) { + header, err := c.b.HeaderByHash(ctx, h) + if err != nil { + return nil, err + } else if header == nil { + return nil, ethereum.NotFound + } + return &evmtypes.Head{ + EVMChainID: ubig.NewI(c.chainId.Int64()), + Hash: header.Hash(), + Number: header.Number.Int64(), + ParentHash: header.ParentHash, + Timestamp: time.Unix(int64(header.Time), 0), + }, nil +} + +// BlockByNumber returns a geth block type. +func (c *SimulatedBackendClient) BlockByNumber(ctx context.Context, n *big.Int) (*types.Block, error) { + return c.b.BlockByNumber(ctx, n) +} + +// BlockByNumber returns a geth block type. +func (c *SimulatedBackendClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return c.b.BlockByHash(ctx, hash) +} + +func (c *SimulatedBackendClient) LatestBlockHeight(ctx context.Context) (*big.Int, error) { + header, err := c.b.HeaderByNumber(ctx, nil) + return header.Number, err +} + +// ChainID returns the ethereum ChainID. +func (c *SimulatedBackendClient) ConfiguredChainID() *big.Int { + return c.chainId +} + +// ChainID RPC call +func (c *SimulatedBackendClient) ChainID() (*big.Int, error) { + panic("not implemented") +} + +// PendingNonceAt gets pending nonce i.e. mempool nonce. +func (c *SimulatedBackendClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + return c.b.PendingNonceAt(ctx, account) +} + +// NonceAt gets nonce as of a specified block. +func (c *SimulatedBackendClient) SequenceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (evmtypes.Nonce, error) { + nonce, err := c.b.NonceAt(ctx, account, blockNumber) + return evmtypes.Nonce(nonce), err +} + +// BalanceAt gets balance as of a specified block. +func (c *SimulatedBackendClient) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + return c.b.BalanceAt(ctx, account, blockNumber) +} + +type headSubscription struct { + unSub chan chan struct{} + subscription ethereum.Subscription +} + +var _ ethereum.Subscription = (*headSubscription)(nil) + +func (h *headSubscription) Unsubscribe() { + done := make(chan struct{}) + h.unSub <- done + <-done +} + +// Err returns err channel +func (h *headSubscription) Err() <-chan error { return h.subscription.Err() } + +// SubscribeNewHead registers a subscription for push notifications of new blocks. +// Note the sim's API only accepts types.Head so we have this goroutine +// to convert those into evmtypes.Head. +func (c *SimulatedBackendClient) SubscribeNewHead( + ctx context.Context, + channel chan<- *evmtypes.Head, +) (ethereum.Subscription, error) { + subscription := &headSubscription{unSub: make(chan chan struct{})} + ch := make(chan *types.Header) + + var err error + subscription.subscription, err = c.b.SubscribeNewHead(ctx, ch) + if err != nil { + return nil, fmt.Errorf("%w: could not subscribe to new heads on "+ + "simulated backend", err) + } + go func() { + var lastHead *evmtypes.Head + for { + select { + case h := <-ch: + var head *evmtypes.Head + if h != nil { + head = &evmtypes.Head{Difficulty: h.Difficulty, Timestamp: time.Unix(int64(h.Time), 0), Number: h.Number.Int64(), Hash: h.Hash(), ParentHash: h.ParentHash, Parent: lastHead, EVMChainID: ubig.New(c.chainId)} + lastHead = head + } + select { + case channel <- head: + case done := <-subscription.unSub: + subscription.subscription.Unsubscribe() + close(done) + return + } + + case done := <-subscription.unSub: + subscription.subscription.Unsubscribe() + close(done) + return + } + } + }() + return subscription, err +} + +// HeaderByNumber returns the geth header type. +func (c *SimulatedBackendClient) HeaderByNumber(ctx context.Context, n *big.Int) (*types.Header, error) { + return c.b.HeaderByNumber(ctx, n) +} + +func (c *SimulatedBackendClient) HeaderByHash(ctx context.Context, h common.Hash) (*types.Header, error) { + return c.b.HeaderByHash(ctx, h) +} + +func (c *SimulatedBackendClient) SendTransactionReturnCode(ctx context.Context, tx *types.Transaction, fromAddress common.Address) (commonclient.SendTxReturnCode, error) { + err := c.SendTransaction(ctx, tx) + if err == nil { + return commonclient.Successful, nil + } + if strings.Contains(err.Error(), "could not fetch parent") || strings.Contains(err.Error(), "invalid transaction") { + return commonclient.Fatal, err + } + // All remaining error messages returned from SendTransaction are considered Unknown. + return commonclient.Unknown, err +} + +// SendTransaction sends a transaction. +func (c *SimulatedBackendClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { + sender, err := types.Sender(types.NewLondonSigner(c.chainId), tx) + if err != nil { + logger.Test(c.t).Panic(fmt.Errorf("invalid transaction: %v (tx: %#v)", err, tx)) + } + pendingNonce, err := c.b.PendingNonceAt(ctx, sender) + if err != nil { + panic(fmt.Errorf("unable to determine nonce for account %s: %v", sender.Hex(), err)) + } + // the simulated backend does not gracefully handle tx rebroadcasts (gas bumping) so just + // ignore the situation where nonces are reused + // github.com/ethereum/go-ethereum/blob/fb2c79df1995b4e8dfe79f9c75464d29d23aaaf4/accounts/abi/bind/backends/simulated.go#L556 + if tx.Nonce() < pendingNonce { + return nil + } + + err = c.b.SendTransaction(ctx, tx) + return err +} + +type revertError struct { + error + reason string +} + +func (e *revertError) ErrorCode() int { + return 3 +} + +// ErrorData returns the hex encoded revert reason. +func (e *revertError) ErrorData() interface{} { + return e.reason +} + +var _ rpc.DataError = &revertError{} + +// CallContract calls a contract. +func (c *SimulatedBackendClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + // Expected error is + // type JsonError struct { + // Code int `json:"code"` + // Message string `json:"message"` + // Data interface{} `json:"data,omitempty"` + //} + res, err := c.b.CallContract(ctx, msg, blockNumber) + if err != nil { + dataErr := revertError{} + if errors.Is(err, &dataErr) { + return nil, &JsonError{Data: dataErr.ErrorData(), Message: dataErr.Error(), Code: 3} + } + // Generic revert, no data + return nil, &JsonError{Data: []byte{}, Message: err.Error(), Code: 3} + } + return res, nil +} + +func (c *SimulatedBackendClient) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + // Expected error is + // type JsonError struct { + // Code int `json:"code"` + // Message string `json:"message"` + // Data interface{} `json:"data,omitempty"` + //} + res, err := c.b.PendingCallContract(ctx, msg) + if err != nil { + dataErr := revertError{} + if errors.Is(err, &dataErr) { + return nil, &JsonError{Data: dataErr.ErrorData(), Message: dataErr.Error(), Code: 3} + } + // Generic revert, no data + return nil, &JsonError{Data: []byte{}, Message: err.Error(), Code: 3} + } + return res, nil +} + +// CodeAt gets the code associated with an account as of a specified block. +func (c *SimulatedBackendClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + return c.b.CodeAt(ctx, account, blockNumber) +} + +// PendingCodeAt gets the latest code. +func (c *SimulatedBackendClient) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + return c.b.PendingCodeAt(ctx, account) +} + +// EstimateGas estimates gas for a msg. +func (c *SimulatedBackendClient) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { + return c.b.EstimateGas(ctx, call) +} + +// SuggestGasPrice recommends a gas price. +func (c *SimulatedBackendClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + panic("unimplemented") +} + +// BatchCallContext makes a batch rpc call. +// The simulated client avoids the old block error from the simulated backend by +// passing `nil` to `CallContract` when calling `CallContext` or `BatchCallContext` +// and will not return an error when an old block is used. +func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + select { + case <-ctx.Done(): + return errors.New("context canceled") + default: + //do nothing + } + + for i, elem := range b { + switch elem.Method { + case "eth_getTransactionReceipt": + b[i].Error = c.ethGetTransactionReceipt(ctx, b[i].Result, b[i].Args...) + case "eth_getBlockByNumber": + b[i].Error = c.ethGetBlockByNumber(ctx, b[i].Result, b[i].Args...) + case "eth_call": + b[i].Error = c.ethCall(ctx, b[i].Result, b[i].Args...) + case "eth_getHeaderByNumber": + b[i].Error = c.ethGetHeaderByNumber(ctx, b[i].Result, b[i].Args...) + case "eth_estimateGas": + b[i].Error = c.ethEstimateGas(ctx, b[i].Result, b[i].Args...) + default: + return fmt.Errorf("SimulatedBackendClient got unsupported method %s", elem.Method) + } + } + + return nil +} + +// BatchCallContextAll makes a batch rpc call. +func (c *SimulatedBackendClient) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error { + return c.BatchCallContext(ctx, b) +} + +// SuggestGasTipCap suggests a gas tip cap. +func (c *SimulatedBackendClient) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error) { + return c.b.SuggestGasTipCap(ctx) +} + +func (c *SimulatedBackendClient) Backend() *backends.SimulatedBackend { + return c.b +} + +// NodeStates implements evmclient.Client +func (c *SimulatedBackendClient) NodeStates() map[string]string { return nil } + +// Commit imports all the pending transactions as a single block and starts a +// fresh new state. +func (c *SimulatedBackendClient) Commit() common.Hash { + return c.b.Commit() +} + +func (c *SimulatedBackendClient) IsL2() bool { + return false +} + +func (c *SimulatedBackendClient) fetchHeader(ctx context.Context, blockNumOrTag string) (*types.Header, error) { + switch blockNumOrTag { + case rpc.SafeBlockNumber.String(): + return c.b.Blockchain().CurrentSafeBlock(), nil + case rpc.LatestBlockNumber.String(): + return c.b.Blockchain().CurrentHeader(), nil + case rpc.FinalizedBlockNumber.String(): + return c.b.Blockchain().CurrentFinalBlock(), nil + default: + blockNum, ok := new(big.Int).SetString(blockNumOrTag, 0) + if !ok { + return nil, fmt.Errorf("error while converting block number string: %s to big.Int ", blockNumOrTag) + } + return c.b.HeaderByNumber(ctx, blockNum) + } +} + +func (c *SimulatedBackendClient) ethGetTransactionReceipt(ctx context.Context, result interface{}, args ...interface{}) error { + if len(args) != 1 { + return fmt.Errorf("SimulatedBackendClient expected 1 arg, got %d for eth_getTransactionReceipt", len(args)) + } + + hash, is := args[0].(common.Hash) + if !is { + return fmt.Errorf("SimulatedBackendClient expected arg to be a hash, got: %T", args[0]) + } + + receipt, err := c.b.TransactionReceipt(ctx, hash) + if err != nil { + return err + } + + // strongly typing the result here has the consequence of not being flexible in + // custom types where a real-world RPC client would allow for custom types with + // custom marshalling. + switch typed := result.(type) { + case *types.Receipt: + *typed = *receipt + case *evmtypes.Receipt: + *typed = *evmtypes.FromGethReceipt(receipt) + default: + return fmt.Errorf("SimulatedBackendClient expected return type of *evmtypes.Receipt for eth_getTransactionReceipt, got type %T", result) + } + + return nil +} + +func (c *SimulatedBackendClient) ethGetBlockByNumber(ctx context.Context, result interface{}, args ...interface{}) error { + if len(args) != 2 { + return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getBlockByNumber", len(args)) + } + + blockNumOrTag, is := args[0].(string) + if !is { + return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getBlockByNumber, got: %T", args[0]) + } + + _, is = args[1].(bool) + if !is { + return fmt.Errorf("SimulatedBackendClient expected second arg to be a boolean for eth_getBlockByNumber, got: %T", args[1]) + } + + header, err := c.fetchHeader(ctx, blockNumOrTag) + if err != nil { + return err + } + + switch res := result.(type) { + case *evmtypes.Head: + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() + case *evmtypes.Block: + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() + default: + return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", res) + } + + return nil +} +func (c *SimulatedBackendClient) ethEstimateGas(ctx context.Context, result interface{}, args ...interface{}) error { + if len(args) != 2 { + return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_estimateGas", len(args)) + } + + params, ok := args[0].(map[string]interface{}) + if !ok { + return fmt.Errorf("SimulatedBackendClient expected first arg to be map[string]interface{} for eth_call, got: %T", args[0]) + } + + _, err := c.blockNumber(args[1]) + if err != nil { + return fmt.Errorf("SimulatedBackendClient expected second arg to be the string 'latest' or a *big.Int for eth_call, got: %T", args[1]) + } + + resp, err := c.b.EstimateGas(ctx, toCallMsg(params)) + if err != nil { + return err + } + + switch typedResult := result.(type) { + case *uint64: + *typedResult = resp + case *hexutil.Uint64: + *typedResult = hexutil.Uint64(resp) + default: + return fmt.Errorf("SimulatedBackendClient unexpected type %T", result) + } + + return nil +} + +func (c *SimulatedBackendClient) ethCall(ctx context.Context, result interface{}, args ...interface{}) error { + if len(args) != 2 { + return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_call", len(args)) + } + + params, ok := args[0].(map[string]interface{}) + if !ok { + return fmt.Errorf("SimulatedBackendClient expected first arg to be map[string]interface{} for eth_call, got: %T", args[0]) + } + + if _, err := c.blockNumber(args[1]); err != nil { + return fmt.Errorf("SimulatedBackendClient expected second arg to be the string 'latest' or a *big.Int for eth_call, got: %T", args[1]) + } + + resp, err := c.b.CallContract(ctx, toCallMsg(params), nil /* always latest block on simulated backend */) + if err != nil { + return err + } + + switch typedResult := result.(type) { + case *hexutil.Bytes: + *typedResult = append(*typedResult, resp...) + + if !bytes.Equal(*typedResult, resp) { + return fmt.Errorf("SimulatedBackendClient was passed a non-empty array, or failed to copy answer. Expected %x = %x", *typedResult, resp) + } + case *string: + *typedResult = hexutil.Encode(resp) + default: + return fmt.Errorf("SimulatedBackendClient unexpected type %T", result) + } + + return nil +} + +func (c *SimulatedBackendClient) ethGetHeaderByNumber(ctx context.Context, result interface{}, args ...interface{}) error { + if len(args) != 1 { + return fmt.Errorf("SimulatedBackendClient expected 1 arg, got %d for eth_getHeaderByNumber", len(args)) + } + + blockNumber, err := c.blockNumber(args[0]) + if err != nil { + return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getHeaderByNumber: %w", err) + } + + header, err := c.b.HeaderByNumber(ctx, blockNumber) + if err != nil { + return err + } + + switch typedResult := result.(type) { + case *types.Header: + *typedResult = *header + default: + return fmt.Errorf("SimulatedBackendClient unexpected Type %T", typedResult) + } + + return nil +} + +func toCallMsg(params map[string]interface{}) ethereum.CallMsg { + var callMsg ethereum.CallMsg + toAddr, err := interfaceToAddress(params["to"]) + if err != nil { + panic(fmt.Errorf("unexpected 'to' parameter: %s", err)) + } + + callMsg.To = &toAddr + + // from is optional in the standard client; default to 0x when missing + if value, ok := params["from"]; ok { + addr, err := interfaceToAddress(value) + if err != nil { + panic(fmt.Errorf("unexpected 'from' parameter: %s", err)) + } + + callMsg.From = addr + } else { + callMsg.From = common.HexToAddress("0x") + } + + if params["data"] != nil && params["input"] != nil { + panic("cannot have both 'data' and 'input' parameters") + } + + switch data := params["data"].(type) { + case nil: + // This parameter is not required so nil is acceptable + case hexutil.Bytes: + callMsg.Data = data + case []byte: + callMsg.Data = data + default: + panic("unexpected type of 'data' parameter; try hexutil.Bytes, []byte, or nil") + } + + switch input := params["input"].(type) { + case nil: + // This parameter is not required so nil is acceptable + case hexutil.Bytes: + callMsg.Data = input + case []byte: + callMsg.Data = input + default: + panic("unexpected type of 'input' parameter; try hexutil.Bytes, []byte, or nil") + } + + if value, ok := params["value"].(*big.Int); ok { + callMsg.Value = value + } + + switch gas := params["gas"].(type) { + case nil: + // This parameter is not required so nil is acceptable + case uint64: + callMsg.Gas = gas + case hexutil.Uint64: + callMsg.Gas = uint64(gas) + default: + panic("unexpected type of 'gas' parameter; try hexutil.Uint64, or uint64") + } + + switch gasPrice := params["gasPrice"].(type) { + case nil: + // This parameter is not required so nil is acceptable + case *big.Int: + callMsg.GasPrice = gasPrice + case *hexutil.Big: + callMsg.GasPrice = gasPrice.ToInt() + default: + panic("unexpected type of 'gasPrice' parameter; try *big.Int, or *hexutil.Big") + } + + return callMsg +} + +func interfaceToAddress(value interface{}) (common.Address, error) { + switch v := value.(type) { + case common.Address: + return v, nil + case *common.Address: + if v == nil { + return common.Address{}, nil + } + return *v, nil + case string: + if ok := common.IsHexAddress(v); !ok { + return common.Address{}, fmt.Errorf("string not formatted as a hex encoded evm address") + } + + return common.HexToAddress(v), nil + case *big.Int: + if v.Uint64() > 0 || len(v.Bytes()) > 20 { + return common.Address{}, fmt.Errorf("invalid *big.Int; value must be larger than 0 with a byte length <= 20") + } + + return common.BigToAddress(v), nil + default: + return common.Address{}, fmt.Errorf("unrecognized value type: %T for converting value to common.Address; use hex encoded string, *big.Int, or common.Address", v) + } +} diff --git a/core/chains/evm/config/chain_scoped.go b/core/chains/evm/config/chain_scoped.go new file mode 100644 index 00000000..d0fbba7e --- /dev/null +++ b/core/chains/evm/config/chain_scoped.go @@ -0,0 +1,195 @@ +package config + +import ( + "math/big" + "time" + + "go.uber.org/multierr" + + ocr "github.com/goplugin/libocr/offchainreporting" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/logger" + + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/config" +) + +func NewTOMLChainScopedConfig(appCfg config.AppConfig, tomlConfig *toml.EVMConfig, lggr logger.Logger) *ChainScoped { + return &ChainScoped{ + AppConfig: appCfg, + evmConfig: &evmConfig{c: tomlConfig}, + lggr: lggr} +} + +// ChainScoped implements config.ChainScopedConfig with a gencfg.BasicConfig and EVMConfig. +type ChainScoped struct { + config.AppConfig + lggr logger.Logger + + evmConfig *evmConfig +} + +func (c *ChainScoped) EVM() EVM { + return c.evmConfig +} + +func (c *ChainScoped) Nodes() toml.EVMNodes { + return c.evmConfig.c.Nodes +} + +func (c *ChainScoped) BlockEmissionIdleWarningThreshold() time.Duration { + return c.EVM().NodeNoNewHeadsThreshold() +} + +func (c *ChainScoped) Validate() (err error) { + // Most per-chain validation is done on startup, but this combines globals as well. + lc := ocrtypes.LocalConfig{ + BlockchainTimeout: c.OCR().BlockchainTimeout(), + ContractConfigConfirmations: c.EVM().OCR().ContractConfirmations(), + ContractConfigTrackerPollInterval: c.OCR().ContractPollInterval(), + ContractConfigTrackerSubscribeInterval: c.OCR().ContractSubscribeInterval(), + ContractTransmitterTransmitTimeout: c.EVM().OCR().ContractTransmitterTransmitTimeout(), + DatabaseTimeout: c.EVM().OCR().DatabaseTimeout(), + DataSourceTimeout: c.OCR().ObservationTimeout(), + DataSourceGracePeriod: c.EVM().OCR().ObservationGracePeriod(), + } + if ocrerr := ocr.SanityCheckLocalConfig(lc); ocrerr != nil { + err = multierr.Append(err, ocrerr) + } + return +} + +type evmConfig struct { + c *toml.EVMConfig +} + +func (e *evmConfig) IsEnabled() bool { + return e.c.IsEnabled() +} + +func (e *evmConfig) TOMLString() (string, error) { + return e.c.TOMLString() +} + +func (e *evmConfig) BalanceMonitor() BalanceMonitor { + return &balanceMonitorConfig{c: e.c.BalanceMonitor} +} + +func (e *evmConfig) Transactions() Transactions { + return &transactionsConfig{c: e.c.Transactions} +} + +func (e *evmConfig) HeadTracker() HeadTracker { + return &headTrackerConfig{c: e.c.HeadTracker} +} + +func (e *evmConfig) OCR() OCR { + return &ocrConfig{c: e.c.OCR} +} + +func (e *evmConfig) OCR2() OCR2 { + return &ocr2Config{c: e.c.OCR2} +} + +func (e *evmConfig) ChainWriter() ChainWriter { + return &chainWriterConfig{c: e.c.ChainWriter} +} + +func (e *evmConfig) GasEstimator() GasEstimator { + return &gasEstimatorConfig{c: e.c.GasEstimator, blockDelay: e.c.RPCBlockQueryDelay, transactionsMaxInFlight: e.c.Transactions.MaxInFlight, k: e.c.KeySpecific} +} + +func (e *evmConfig) AutoCreateKey() bool { + return *e.c.AutoCreateKey +} + +func (e *evmConfig) BlockBackfillDepth() uint64 { + return uint64(*e.c.BlockBackfillDepth) +} + +func (e *evmConfig) BlockBackfillSkip() bool { + return *e.c.BlockBackfillSkip +} + +func (e *evmConfig) LogBackfillBatchSize() uint32 { + return *e.c.LogBackfillBatchSize +} + +func (e *evmConfig) LogPollInterval() time.Duration { + return e.c.LogPollInterval.Duration() +} + +func (e *evmConfig) FinalityDepth() uint32 { + return *e.c.FinalityDepth +} + +func (e *evmConfig) FinalityTagEnabled() bool { + return *e.c.FinalityTagEnabled +} + +func (e *evmConfig) LogKeepBlocksDepth() uint32 { + return *e.c.LogKeepBlocksDepth +} + +func (e *evmConfig) NonceAutoSync() bool { + return *e.c.NonceAutoSync +} + +func (e *evmConfig) RPCDefaultBatchSize() uint32 { + return *e.c.RPCDefaultBatchSize +} + +func (e *evmConfig) BlockEmissionIdleWarningThreshold() time.Duration { + return e.c.NoNewHeadsThreshold.Duration() +} + +func (e *evmConfig) ChainType() commonconfig.ChainType { + if e.c.ChainType == nil { + return "" + } + return commonconfig.ChainType(*e.c.ChainType) +} + +func (e *evmConfig) ChainID() *big.Int { + return e.c.ChainID.ToInt() +} + +func (e *evmConfig) MinIncomingConfirmations() uint32 { + return *e.c.MinIncomingConfirmations +} + +func (e *evmConfig) NodePool() NodePool { + return &nodePoolConfig{c: e.c.NodePool} +} + +func (e *evmConfig) NodeNoNewHeadsThreshold() time.Duration { + return e.c.NoNewHeadsThreshold.Duration() +} + +func (e *evmConfig) MinContractPayment() *assets.Link { + return e.c.MinContractPayment +} + +func (e *evmConfig) FlagsContractAddress() string { + if e.c.FlagsContractAddress == nil { + return "" + } + return e.c.FlagsContractAddress.String() +} + +func (e *evmConfig) LinkContractAddress() string { + if e.c.LinkContractAddress == nil { + return "" + } + return e.c.LinkContractAddress.String() +} + +func (e *evmConfig) OperatorFactoryAddress() string { + if e.c.OperatorFactoryAddress == nil { + return "" + } + return e.c.OperatorFactoryAddress.String() +} diff --git a/core/chains/evm/config/chain_scoped_balance_monitor.go b/core/chains/evm/config/chain_scoped_balance_monitor.go new file mode 100644 index 00000000..bd09ff49 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_balance_monitor.go @@ -0,0 +1,11 @@ +package config + +import "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + +type balanceMonitorConfig struct { + c toml.BalanceMonitor +} + +func (b *balanceMonitorConfig) Enabled() bool { + return *b.c.Enabled +} diff --git a/core/chains/evm/config/chain_scoped_chain_writer.go b/core/chains/evm/config/chain_scoped_chain_writer.go new file mode 100644 index 00000000..609d1bf7 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_chain_writer.go @@ -0,0 +1,18 @@ +package config + +import ( + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type chainWriterConfig struct { + c toml.ChainWriter +} + +func (b *chainWriterConfig) FromAddress() *ethkey.EIP55Address { + return b.c.FromAddress +} + +func (b *chainWriterConfig) ForwarderAddress() *ethkey.EIP55Address { + return b.c.ForwarderAddress +} diff --git a/core/chains/evm/config/chain_scoped_gas_estimator.go b/core/chains/evm/config/chain_scoped_gas_estimator.go new file mode 100644 index 00000000..27aa2261 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_gas_estimator.go @@ -0,0 +1,174 @@ +package config + +import ( + gethcommon "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type gasEstimatorConfig struct { + c toml.GasEstimator + k toml.KeySpecificConfig + blockDelay *uint16 + transactionsMaxInFlight *uint32 +} + +func (g *gasEstimatorConfig) PriceMaxKey(addr gethcommon.Address) *assets.Wei { + var keySpecific *assets.Wei + for i := range g.k { + ks := g.k[i] + if ks.Key.Address() == addr { + keySpecific = ks.GasEstimator.PriceMax + break + } + } + + chainSpecific := g.c.PriceMax + if keySpecific != nil && keySpecific.Cmp(chainSpecific) < 0 { + return keySpecific + } + + return g.c.PriceMax +} + +func (g *gasEstimatorConfig) BlockHistory() BlockHistory { + return &blockHistoryConfig{c: g.c.BlockHistory, blockDelay: g.blockDelay, bumpThreshold: g.c.BumpThreshold} +} + +func (g *gasEstimatorConfig) EIP1559DynamicFees() bool { + return *g.c.EIP1559DynamicFees +} + +func (g *gasEstimatorConfig) BumpPercent() uint16 { + return *g.c.BumpPercent +} + +func (g *gasEstimatorConfig) BumpThreshold() uint64 { + return uint64(*g.c.BumpThreshold) +} + +func (g *gasEstimatorConfig) BumpTxDepth() uint32 { + if g.c.BumpTxDepth != nil { + return *g.c.BumpTxDepth + } + return *g.transactionsMaxInFlight +} + +func (g *gasEstimatorConfig) BumpMin() *assets.Wei { + return g.c.BumpMin +} + +func (g *gasEstimatorConfig) FeeCapDefault() *assets.Wei { + return g.c.FeeCapDefault +} + +func (g *gasEstimatorConfig) LimitDefault() uint32 { + return *g.c.LimitDefault +} + +func (g *gasEstimatorConfig) LimitMax() uint32 { + return *g.c.LimitMax +} + +func (g *gasEstimatorConfig) LimitMultiplier() float32 { + f, _ := g.c.LimitMultiplier.BigFloat().Float32() + return f +} + +func (g *gasEstimatorConfig) LimitTransfer() uint32 { + return *g.c.LimitTransfer +} + +func (g *gasEstimatorConfig) PriceDefault() *assets.Wei { + return g.c.PriceDefault +} + +func (g *gasEstimatorConfig) PriceMin() *assets.Wei { + return g.c.PriceMin +} + +func (g *gasEstimatorConfig) PriceMax() *assets.Wei { + return g.c.PriceMax +} + +func (g *gasEstimatorConfig) TipCapDefault() *assets.Wei { + return g.c.TipCapDefault +} + +func (g *gasEstimatorConfig) TipCapMin() *assets.Wei { + return g.c.TipCapMin +} + +func (g *gasEstimatorConfig) Mode() string { + return *g.c.Mode +} + +func (g *gasEstimatorConfig) LimitJobType() LimitJobType { + return &limitJobTypeConfig{c: g.c.LimitJobType} +} + +type limitJobTypeConfig struct { + c toml.GasLimitJobType +} + +func (l *limitJobTypeConfig) OCR() *uint32 { + return l.c.OCR +} + +func (l *limitJobTypeConfig) OCR2() *uint32 { + return l.c.OCR2 +} + +func (l *limitJobTypeConfig) DR() *uint32 { + return l.c.DR +} + +func (l *limitJobTypeConfig) FM() *uint32 { + return l.c.FM +} + +func (l *limitJobTypeConfig) Keeper() *uint32 { + return l.c.Keeper +} + +func (l *limitJobTypeConfig) VRF() *uint32 { + return l.c.VRF +} + +type blockHistoryConfig struct { + c toml.BlockHistoryEstimator + blockDelay *uint16 + bumpThreshold *uint32 +} + +func (b *blockHistoryConfig) BatchSize() uint32 { + return *b.c.BatchSize +} + +func (b *blockHistoryConfig) BlockHistorySize() uint16 { + return *b.c.BlockHistorySize +} + +func (b *blockHistoryConfig) CheckInclusionBlocks() uint16 { + return *b.c.CheckInclusionBlocks +} + +func (b *blockHistoryConfig) CheckInclusionPercentile() uint16 { + return *b.c.CheckInclusionPercentile +} + +func (b *blockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { + if b.c.EIP1559FeeCapBufferBlocks == nil { + return uint16(*b.bumpThreshold) + 1 + } + return *b.c.EIP1559FeeCapBufferBlocks +} + +func (b *blockHistoryConfig) TransactionPercentile() uint16 { + return *b.c.TransactionPercentile +} + +func (b *blockHistoryConfig) BlockDelay() uint16 { + return *b.blockDelay +} diff --git a/core/chains/evm/config/chain_scoped_head_tracker.go b/core/chains/evm/config/chain_scoped_head_tracker.go new file mode 100644 index 00000000..30555f8f --- /dev/null +++ b/core/chains/evm/config/chain_scoped_head_tracker.go @@ -0,0 +1,23 @@ +package config + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type headTrackerConfig struct { + c toml.HeadTracker +} + +func (h *headTrackerConfig) HistoryDepth() uint32 { + return *h.c.HistoryDepth +} + +func (h *headTrackerConfig) MaxBufferSize() uint32 { + return *h.c.MaxBufferSize +} + +func (h *headTrackerConfig) SamplingInterval() time.Duration { + return h.c.SamplingInterval.Duration() +} diff --git a/core/chains/evm/config/chain_scoped_node_pool.go b/core/chains/evm/config/chain_scoped_node_pool.go new file mode 100644 index 00000000..ce35be11 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_node_pool.go @@ -0,0 +1,31 @@ +package config + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type nodePoolConfig struct { + c toml.NodePool +} + +func (n *nodePoolConfig) PollFailureThreshold() uint32 { + return *n.c.PollFailureThreshold +} + +func (n *nodePoolConfig) PollInterval() time.Duration { + return n.c.PollInterval.Duration() +} + +func (n *nodePoolConfig) SelectionMode() string { + return *n.c.SelectionMode +} + +func (n *nodePoolConfig) SyncThreshold() uint32 { + return *n.c.SyncThreshold +} + +func (n *nodePoolConfig) LeaseDuration() time.Duration { + return n.c.LeaseDuration.Duration() +} diff --git a/core/chains/evm/config/chain_scoped_ocr.go b/core/chains/evm/config/chain_scoped_ocr.go new file mode 100644 index 00000000..b92c95d6 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_ocr.go @@ -0,0 +1,35 @@ +package config + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type ocrConfig struct { + c toml.OCR +} + +func (o *ocrConfig) ContractConfirmations() uint16 { + return *o.c.ContractConfirmations +} + +func (o *ocrConfig) ContractTransmitterTransmitTimeout() time.Duration { + return o.c.ContractTransmitterTransmitTimeout.Duration() +} + +func (o *ocrConfig) ObservationGracePeriod() time.Duration { + return o.c.ObservationGracePeriod.Duration() +} + +func (o *ocrConfig) DatabaseTimeout() time.Duration { + return o.c.DatabaseTimeout.Duration() +} + +func (o *ocrConfig) DeltaCOverride() time.Duration { + return o.c.DeltaCOverride.Duration() +} + +func (o *ocrConfig) DeltaCJitterOverride() time.Duration { + return o.c.DeltaCJitterOverride.Duration() +} diff --git a/core/chains/evm/config/chain_scoped_ocr2.go b/core/chains/evm/config/chain_scoped_ocr2.go new file mode 100644 index 00000000..9ea9f746 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_ocr2.go @@ -0,0 +1,25 @@ +package config + +import ( + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type ocr2Automation struct { + c toml.Automation +} + +func (o *ocr2Automation) GasLimit() uint32 { + return *o.c.GasLimit +} + +type ocr2Config struct { + c toml.OCR2 +} + +func (o *ocr2Config) Automation() OCR2Automation { + return &ocr2Automation{c: o.c.Automation} +} + +func (o *ocr2Config) ContractConfirmations() uint16 { + return uint16(*o.c.Automation.GasLimit) +} diff --git a/core/chains/evm/config/chain_scoped_ocr2_test.go b/core/chains/evm/config/chain_scoped_ocr2_test.go new file mode 100644 index 00000000..3d24e6c8 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_ocr2_test.go @@ -0,0 +1,14 @@ +package config_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" +) + +func Test_ocr2Config(t *testing.T) { + evmOcrCfg := cltest.NewTestChainScopedConfig(t) //fallback.toml values + require.Equal(t, uint32(5400000), evmOcrCfg.EVM().OCR2().Automation().GasLimit()) +} diff --git a/core/chains/evm/config/chain_scoped_ocr_test.go b/core/chains/evm/config/chain_scoped_ocr_test.go new file mode 100644 index 00000000..ce3da3dc --- /dev/null +++ b/core/chains/evm/config/chain_scoped_ocr_test.go @@ -0,0 +1,17 @@ +package config_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" +) + +func Test_ocrConfig(t *testing.T) { + evmOcrCfg := cltest.NewTestChainScopedConfig(t) //fallback.toml values + require.Equal(t, uint16(4), evmOcrCfg.EVM().OCR().ContractConfirmations()) + require.Equal(t, cltest.MustParseDuration(t, "10s"), evmOcrCfg.EVM().OCR().ContractTransmitterTransmitTimeout()) + require.Equal(t, cltest.MustParseDuration(t, "10s"), evmOcrCfg.EVM().OCR().DatabaseTimeout()) + require.Equal(t, cltest.MustParseDuration(t, "1s"), evmOcrCfg.EVM().OCR().ObservationGracePeriod()) +} diff --git a/core/chains/evm/config/chain_scoped_transactions.go b/core/chains/evm/config/chain_scoped_transactions.go new file mode 100644 index 00000000..e18e9e31 --- /dev/null +++ b/core/chains/evm/config/chain_scoped_transactions.go @@ -0,0 +1,35 @@ +package config + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +type transactionsConfig struct { + c toml.Transactions +} + +func (t *transactionsConfig) ForwardersEnabled() bool { + return *t.c.ForwardersEnabled +} + +func (t *transactionsConfig) ReaperInterval() time.Duration { + return t.c.ReaperInterval.Duration() +} + +func (t *transactionsConfig) ReaperThreshold() time.Duration { + return t.c.ReaperThreshold.Duration() +} + +func (t *transactionsConfig) ResendAfterThreshold() time.Duration { + return t.c.ResendAfterThreshold.Duration() +} + +func (t *transactionsConfig) MaxInFlight() uint32 { + return *t.c.MaxInFlight +} + +func (t *transactionsConfig) MaxQueued() uint64 { + return uint64(*t.c.MaxQueued) +} diff --git a/core/chains/evm/config/config.go b/core/chains/evm/config/config.go new file mode 100644 index 00000000..2f6bc227 --- /dev/null +++ b/core/chains/evm/config/config.go @@ -0,0 +1,150 @@ +package config + +import ( + "math/big" + "time" + + gethcommon "github.com/ethereum/go-ethereum/common" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type EVM interface { + HeadTracker() HeadTracker + BalanceMonitor() BalanceMonitor + Transactions() Transactions + GasEstimator() GasEstimator + OCR() OCR + OCR2() OCR2 + ChainWriter() ChainWriter + NodePool() NodePool + + AutoCreateKey() bool + BlockBackfillDepth() uint64 + BlockBackfillSkip() bool + BlockEmissionIdleWarningThreshold() time.Duration + ChainID() *big.Int + ChainType() commonconfig.ChainType + FinalityDepth() uint32 + FinalityTagEnabled() bool + FlagsContractAddress() string + LinkContractAddress() string + LogBackfillBatchSize() uint32 + LogKeepBlocksDepth() uint32 + LogPollInterval() time.Duration + MinContractPayment() *commonassets.Link + MinIncomingConfirmations() uint32 + NonceAutoSync() bool + OperatorFactoryAddress() string + RPCDefaultBatchSize() uint32 + NodeNoNewHeadsThreshold() time.Duration + + IsEnabled() bool + TOMLString() (string, error) +} + +type OCR interface { + ContractConfirmations() uint16 + ContractTransmitterTransmitTimeout() time.Duration + ObservationGracePeriod() time.Duration + DatabaseTimeout() time.Duration + DeltaCOverride() time.Duration + DeltaCJitterOverride() time.Duration +} + +type OCR2 interface { + Automation() OCR2Automation +} + +type OCR2Automation interface { + GasLimit() uint32 +} + +type HeadTracker interface { + HistoryDepth() uint32 + MaxBufferSize() uint32 + SamplingInterval() time.Duration +} + +type BalanceMonitor interface { + Enabled() bool +} + +type Transactions interface { + ForwardersEnabled() bool + ReaperInterval() time.Duration + ResendAfterThreshold() time.Duration + ReaperThreshold() time.Duration + MaxInFlight() uint32 + MaxQueued() uint64 +} + +//go:generate mockery --quiet --name GasEstimator --output ./mocks/ --case=underscore +type GasEstimator interface { + BlockHistory() BlockHistory + LimitJobType() LimitJobType + + EIP1559DynamicFees() bool + BumpPercent() uint16 + BumpThreshold() uint64 + BumpTxDepth() uint32 + BumpMin() *assets.Wei + FeeCapDefault() *assets.Wei + LimitDefault() uint32 + LimitMax() uint32 + LimitMultiplier() float32 + LimitTransfer() uint32 + PriceDefault() *assets.Wei + TipCapDefault() *assets.Wei + TipCapMin() *assets.Wei + PriceMax() *assets.Wei + PriceMin() *assets.Wei + Mode() string + PriceMaxKey(gethcommon.Address) *assets.Wei +} + +type LimitJobType interface { + OCR() *uint32 + OCR2() *uint32 + DR() *uint32 + FM() *uint32 + Keeper() *uint32 + VRF() *uint32 +} + +type BlockHistory interface { + BatchSize() uint32 + BlockHistorySize() uint16 + BlockDelay() uint16 + CheckInclusionBlocks() uint16 + CheckInclusionPercentile() uint16 + EIP1559FeeCapBufferBlocks() uint16 + TransactionPercentile() uint16 +} + +type ChainWriter interface { + FromAddress() *ethkey.EIP55Address + ForwarderAddress() *ethkey.EIP55Address +} + +type NodePool interface { + PollFailureThreshold() uint32 + PollInterval() time.Duration + SelectionMode() string + SyncThreshold() uint32 + LeaseDuration() time.Duration +} + +// TODO BCF-2509 does the chainscopedconfig really need the entire app config? +// +//go:generate mockery --quiet --name ChainScopedConfig --output ./mocks/ --case=underscore +type ChainScopedConfig interface { + config.AppConfig + Validate() error + + EVM() EVM +} diff --git a/core/chains/evm/config/config_test.go b/core/chains/evm/config/config_test.go new file mode 100644 index 00000000..9a06649f --- /dev/null +++ b/core/chains/evm/config/config_test.go @@ -0,0 +1,479 @@ +package config_test + +import ( + "fmt" + "math/big" + "math/rand" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + configurl "github.com/goplugin/plugin-common/pkg/config" + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func TestChainScopedConfig(t *testing.T) { + t.Parallel() + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + id := ubig.New(big.NewInt(rand.Int63())) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Chain: toml.Defaults(id, &toml.Chain{ + GasEstimator: toml.GasEstimator{PriceMax: assets.NewWeiI(100000000000000)}, + }), + } + }) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + overrides := func(c *plugin.Config, s *plugin.Secrets) { + id := ubig.New(big.NewInt(rand.Int63())) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Chain: toml.Defaults(id, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + PriceMax: assets.NewWeiI(100000000000000), + PriceDefault: assets.NewWeiI(42000000000), + }, + }), + } + } + t.Run("EVM().GasEstimator().PriceDefault()", func(t *testing.T) { + assert.Equal(t, assets.NewWeiI(20000000000), cfg.EVM().GasEstimator().PriceDefault()) + + gcfg2 := configtest.NewGeneralConfig(t, overrides) + cfg2 := evmtest.NewChainScopedConfig(t, gcfg2) + assert.Equal(t, assets.NewWeiI(42000000000), cfg2.EVM().GasEstimator().PriceDefault()) + }) + + t.Run("EvmGasBumpTxDepthDefault", func(t *testing.T) { + t.Run("uses MaxInFlightTransactions when not set", func(t *testing.T) { + assert.Equal(t, cfg.EVM().Transactions().MaxInFlight(), cfg.EVM().GasEstimator().BumpTxDepth()) + }) + + t.Run("uses customer configured value when set", func(t *testing.T) { + var override uint32 = 10 + gasBumpOverrides := func(c *plugin.Config, s *plugin.Secrets) { + id := ubig.New(big.NewInt(rand.Int63())) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Chain: toml.Defaults(id, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + BumpTxDepth: ptr(override), + }, + }), + } + } + gcfg2 := configtest.NewGeneralConfig(t, gasBumpOverrides) + cfg2 := evmtest.NewChainScopedConfig(t, gcfg2) + assert.NotEqual(t, cfg2.EVM().Transactions().MaxInFlight(), cfg2.EVM().GasEstimator().BumpTxDepth()) + assert.Equal(t, override, cfg2.EVM().GasEstimator().BumpTxDepth()) + }) + }) + + t.Run("PriceMaxKey", func(t *testing.T) { + addr := testutils.NewAddress() + randomOtherAddr := testutils.NewAddress() + gcfg2 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + overrides(c, s) + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(randomOtherAddr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: assets.GWei(850), + }, + }, + } + }) + cfg2 := evmtest.NewChainScopedConfig(t, gcfg2) + + t.Run("uses chain-specific default value when nothing is set", func(t *testing.T) { + assert.Equal(t, assets.NewWeiI(100000000000000), cfg2.EVM().GasEstimator().PriceMaxKey(addr)) + }) + + t.Run("uses chain-specific override value when that is set", func(t *testing.T) { + val := assets.NewWeiI(rand.Int63()) + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = val + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, val.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + t.Run("uses key-specific override value when set", func(t *testing.T) { + tests := []struct { + name string + val *assets.Wei + }{ + {"Test with 250 GWei", assets.GWei(250)}, + {"Test with 0 GWei", assets.GWei(0)}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(addr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: tt.val, + }, + }, + } + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, tt.val.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + } + }) + t.Run("uses key-specific override value when set and lower than chain specific config", func(t *testing.T) { + keySpecificPrice := assets.GWei(900) + chainSpecificPrice := assets.GWei(1200) + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = chainSpecificPrice + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(addr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: keySpecificPrice, + }, + }, + } + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, keySpecificPrice.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + t.Run("uses chain-specific value when higher than key-specific value", func(t *testing.T) { + keySpecificPrice := assets.GWei(1400) + chainSpecificPrice := assets.GWei(1200) + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = chainSpecificPrice + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(addr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: keySpecificPrice, + }, + }, + } + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, chainSpecificPrice.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + t.Run("uses key-specific override value when set and lower than global config", func(t *testing.T) { + keySpecificPrice := assets.GWei(900) + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(addr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: keySpecificPrice, + }, + }, + } + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, keySpecificPrice.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + t.Run("uses global value when higher than key-specific value", func(t *testing.T) { + keySpecificPrice := assets.GWei(1400) + chainSpecificPrice := assets.GWei(1200) + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = chainSpecificPrice + c.EVM[0].KeySpecific = toml.KeySpecificConfig{ + {Key: ptr(ethkey.EIP55AddressFromAddress(addr)), + GasEstimator: toml.KeySpecificGasEstimator{ + PriceMax: keySpecificPrice, + }, + }, + } + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, chainSpecificPrice.String(), cfg3.EVM().GasEstimator().PriceMaxKey(addr).String()) + }) + t.Run("uses global value when there is no key-specific price", func(t *testing.T) { + val := assets.NewWeiI(rand.Int63()) + unsetAddr := testutils.NewAddress() + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = val + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, val.String(), cfg3.EVM().GasEstimator().PriceMaxKey(unsetAddr).String()) + }) + }) + + t.Run("LinkContractAddress", func(t *testing.T) { + t.Run("uses chain-specific default value when nothing is set", func(t *testing.T) { + assert.Equal(t, "", cfg.EVM().LinkContractAddress()) + }) + + t.Run("uses chain-specific override value when that is set", func(t *testing.T) { + val := testutils.NewAddress() + + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].LinkContractAddress = ptr(ethkey.EIP55AddressFromAddress(val)) + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, val.String(), cfg3.EVM().LinkContractAddress()) + }) + }) + + t.Run("OperatorFactoryAddress", func(t *testing.T) { + t.Run("uses chain-specific default value when nothing is set", func(t *testing.T) { + assert.Equal(t, "", cfg.EVM().OperatorFactoryAddress()) + }) + + t.Run("uses chain-specific override value when that is set", func(t *testing.T) { + val := testutils.NewAddress() + + gcfg3 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].OperatorFactoryAddress = ptr(ethkey.EIP55AddressFromAddress(val)) + }) + cfg3 := evmtest.NewChainScopedConfig(t, gcfg3) + + assert.Equal(t, val.String(), cfg3.EVM().OperatorFactoryAddress()) + }) + }) +} + +func TestChainScopedConfig_BlockHistory(t *testing.T) { + t.Parallel() + gcfg := configtest.NewTestGeneralConfig(t) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + bh := cfg.EVM().GasEstimator().BlockHistory() + assert.Equal(t, uint32(25), bh.BatchSize()) + assert.Equal(t, uint16(8), bh.BlockHistorySize()) + assert.Equal(t, uint16(60), bh.TransactionPercentile()) + assert.Equal(t, uint16(90), bh.CheckInclusionPercentile()) + assert.Equal(t, uint16(12), bh.CheckInclusionBlocks()) + assert.Equal(t, uint16(1), bh.BlockDelay()) + assert.Equal(t, uint16(4), bh.EIP1559FeeCapBufferBlocks()) +} + +func TestChainScopedConfig_GasEstimator(t *testing.T) { + t.Parallel() + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.GWei(500) + }) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + ge := cfg.EVM().GasEstimator() + assert.Equal(t, "BlockHistory", ge.Mode()) + assert.Equal(t, assets.GWei(20), ge.PriceDefault()) + assert.Equal(t, assets.GWei(500), ge.PriceMax()) + assert.Equal(t, assets.GWei(1), ge.PriceMin()) + assert.Equal(t, uint32(500000), ge.LimitDefault()) + assert.Equal(t, uint32(500000), ge.LimitMax()) + assert.Equal(t, float32(1), ge.LimitMultiplier()) + assert.Equal(t, uint32(21000), ge.LimitTransfer()) + assert.Equal(t, assets.GWei(5), ge.BumpMin()) + assert.Equal(t, uint16(20), ge.BumpPercent()) + assert.Equal(t, uint64(3), ge.BumpThreshold()) + assert.False(t, ge.EIP1559DynamicFees()) + assert.Equal(t, assets.GWei(100), ge.FeeCapDefault()) + assert.Equal(t, assets.NewWeiI(1), ge.TipCapDefault()) + assert.Equal(t, assets.NewWeiI(1), ge.TipCapMin()) +} + +func TestChainScopedConfig_BSCDefaults(t *testing.T) { + chainID := big.NewInt(56) + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, secrets *plugin.Secrets) { + id := ubig.New(chainID) + cfg := toml.Defaults(id) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Enabled: ptr(true), + Chain: cfg, + } + }) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + timeout := cfg.EVM().OCR().DatabaseTimeout() + require.Equal(t, 2*time.Second, timeout) + timeout = cfg.EVM().OCR().ContractTransmitterTransmitTimeout() + require.Equal(t, 2*time.Second, timeout) + timeout = cfg.EVM().OCR().ObservationGracePeriod() + require.Equal(t, 500*time.Millisecond, timeout) +} + +func TestChainScopedConfig_Profiles(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + chainID int64 + expectedGasLimitDefault uint32 + expectedMinimumContractPayment string + }{ + {"default", 0, 500000, "0.00001"}, + {"mainnet", 1, 500000, "0.1"}, + {"kovan", 42, 500000, "0.1"}, + + {"optimism", 10, 500000, "0.00001"}, + {"optimism", 69, 500000, "0.00001"}, + {"optimism", 420, 500000, "0.00001"}, + + {"bscMainnet", 56, 500000, "0.00001"}, + {"hecoMainnet", 128, 500000, "0.00001"}, + {"fantomMainnet", 250, 500000, "0.00001"}, + {"fantomTestnet", 4002, 500000, "0.00001"}, + {"polygonMatic", 800001, 500000, "0.00001"}, + {"harmonyMainnet", 1666600000, 500000, "0.00001"}, + {"harmonyTestnet", 1666700000, 500000, "0.00001"}, + + {"xDai", 100, 500000, "0.00001"}, + } + for _, test := range tests { + tt := test + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, secrets *plugin.Secrets) { + id := ubig.NewI(tt.chainID) + cfg := toml.Defaults(id) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Enabled: ptr(true), + Chain: cfg, + } + }) + config := evmtest.NewChainScopedConfig(t, gcfg) + + assert.Equal(t, tt.expectedGasLimitDefault, config.EVM().GasEstimator().LimitDefault()) + assert.Nil(t, config.EVM().GasEstimator().LimitJobType().OCR()) + assert.Nil(t, config.EVM().GasEstimator().LimitJobType().DR()) + assert.Nil(t, config.EVM().GasEstimator().LimitJobType().VRF()) + assert.Nil(t, config.EVM().GasEstimator().LimitJobType().FM()) + assert.Nil(t, config.EVM().GasEstimator().LimitJobType().Keeper()) + assert.Equal(t, tt.expectedMinimumContractPayment, strings.TrimRight(config.EVM().MinContractPayment().Link(), "0")) + }) + } +} + +func TestChainScopedConfig_HeadTracker(t *testing.T) { + t.Parallel() + gcfg := configtest.NewTestGeneralConfig(t) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + ht := cfg.EVM().HeadTracker() + assert.Equal(t, uint32(100), ht.HistoryDepth()) + assert.Equal(t, uint32(3), ht.MaxBufferSize()) + assert.Equal(t, time.Second, ht.SamplingInterval()) +} + +func Test_chainScopedConfig_Validate(t *testing.T) { + configWithChains := func(t *testing.T, id int64, chains ...*toml.Chain) config.AppConfig { + return configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + chainID := ubig.NewI(id) + c.EVM[0] = &toml.EVMConfig{ChainID: chainID, Enabled: ptr(true), Chain: toml.Defaults(chainID, chains...), + Nodes: toml.EVMNodes{{ + Name: ptr("fake"), + WSURL: configurl.MustParseURL("wss://foo.test/ws"), + HTTPURL: configurl.MustParseURL("http://foo.test"), + }}} + }) + } + + // Validate built-in + for _, id := range toml.DefaultIDs { + id := id + t.Run(fmt.Sprintf("chainID-%s", id), func(t *testing.T) { + cfg := configWithChains(t, id.Int64()) + assert.NoError(t, cfg.Validate()) + }) + } + + // Invalid Cases: + + t.Run("arbitrum-estimator", func(t *testing.T) { + t.Run("custom", func(t *testing.T) { + cfg := configWithChains(t, 0, &toml.Chain{ + ChainType: ptr(string(commonconfig.ChainArbitrum)), + GasEstimator: toml.GasEstimator{ + Mode: ptr("BlockHistory"), + }, + }) + assert.NoError(t, cfg.Validate()) + }) + t.Run("mainnet", func(t *testing.T) { + cfg := configWithChains(t, 42161, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + Mode: ptr("BlockHistory"), + BlockHistory: toml.BlockHistoryEstimator{ + BlockHistorySize: ptr[uint16](1), + }, + }, + }) + assert.NoError(t, cfg.Validate()) + }) + t.Run("testnet", func(t *testing.T) { + cfg := configWithChains(t, 421611, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + Mode: ptr("SuggestedPrice"), + }, + }) + assert.NoError(t, cfg.Validate()) + }) + }) + + t.Run("optimism-estimator", func(t *testing.T) { + t.Run("custom", func(t *testing.T) { + cfg := configWithChains(t, 0, &toml.Chain{ + ChainType: ptr(string(commonconfig.ChainOptimismBedrock)), + GasEstimator: toml.GasEstimator{ + Mode: ptr("BlockHistory"), + }, + }) + assert.NoError(t, cfg.Validate()) + }) + t.Run("mainnet", func(t *testing.T) { + cfg := configWithChains(t, 10, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + Mode: ptr("FixedPrice"), + }, + }) + assert.NoError(t, cfg.Validate()) + }) + t.Run("testnet", func(t *testing.T) { + cfg := configWithChains(t, 69, &toml.Chain{ + GasEstimator: toml.GasEstimator{ + Mode: ptr("FixedPrice"), + }, + }) + assert.NoError(t, cfg.Validate()) + }) + }) +} + +func TestNodePoolConfig(t *testing.T) { + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + id := ubig.New(big.NewInt(rand.Int63())) + c.EVM[0] = &toml.EVMConfig{ + ChainID: id, + Chain: toml.Defaults(id, &toml.Chain{}), + } + }) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + require.Equal(t, "HighestHead", cfg.EVM().NodePool().SelectionMode()) + require.Equal(t, uint32(5), cfg.EVM().NodePool().SyncThreshold()) + require.Equal(t, time.Duration(10000000000), cfg.EVM().NodePool().PollInterval()) + require.Equal(t, uint32(5), cfg.EVM().NodePool().PollFailureThreshold()) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/chains/evm/config/mocks/chain_scoped_config.go b/core/chains/evm/config/mocks/chain_scoped_config.go new file mode 100644 index 00000000..9eeba8a7 --- /dev/null +++ b/core/chains/evm/config/mocks/chain_scoped_config.go @@ -0,0 +1,708 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + config "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + coreconfig "github.com/goplugin/pluginv3.0/v2/core/config" + + mock "github.com/stretchr/testify/mock" + + time "time" + + uuid "github.com/google/uuid" + + zapcore "go.uber.org/zap/zapcore" +) + +// ChainScopedConfig is an autogenerated mock type for the ChainScopedConfig type +type ChainScopedConfig struct { + mock.Mock +} + +// AppID provides a mock function with given fields: +func (_m *ChainScopedConfig) AppID() uuid.UUID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AppID") + } + + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func() uuid.UUID); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } + } + + return r0 +} + +// AuditLogger provides a mock function with given fields: +func (_m *ChainScopedConfig) AuditLogger() coreconfig.AuditLogger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AuditLogger") + } + + var r0 coreconfig.AuditLogger + if rf, ok := ret.Get(0).(func() coreconfig.AuditLogger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.AuditLogger) + } + } + + return r0 +} + +// AutoPprof provides a mock function with given fields: +func (_m *ChainScopedConfig) AutoPprof() coreconfig.AutoPprof { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AutoPprof") + } + + var r0 coreconfig.AutoPprof + if rf, ok := ret.Get(0).(func() coreconfig.AutoPprof); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.AutoPprof) + } + } + + return r0 +} + +// CosmosEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) CosmosEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CosmosEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Database provides a mock function with given fields: +func (_m *ChainScopedConfig) Database() coreconfig.Database { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Database") + } + + var r0 coreconfig.Database + if rf, ok := ret.Get(0).(func() coreconfig.Database); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Database) + } + } + + return r0 +} + +// EVM provides a mock function with given fields: +func (_m *ChainScopedConfig) EVM() config.EVM { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVM") + } + + var r0 config.EVM + if rf, ok := ret.Get(0).(func() config.EVM); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.EVM) + } + } + + return r0 +} + +// EVMEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) EVMEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// EVMRPCEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) EVMRPCEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMRPCEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Feature provides a mock function with given fields: +func (_m *ChainScopedConfig) Feature() coreconfig.Feature { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Feature") + } + + var r0 coreconfig.Feature + if rf, ok := ret.Get(0).(func() coreconfig.Feature); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Feature) + } + } + + return r0 +} + +// FluxMonitor provides a mock function with given fields: +func (_m *ChainScopedConfig) FluxMonitor() coreconfig.FluxMonitor { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FluxMonitor") + } + + var r0 coreconfig.FluxMonitor + if rf, ok := ret.Get(0).(func() coreconfig.FluxMonitor); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.FluxMonitor) + } + } + + return r0 +} + +// Insecure provides a mock function with given fields: +func (_m *ChainScopedConfig) Insecure() coreconfig.Insecure { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Insecure") + } + + var r0 coreconfig.Insecure + if rf, ok := ret.Get(0).(func() coreconfig.Insecure); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Insecure) + } + } + + return r0 +} + +// InsecureFastScrypt provides a mock function with given fields: +func (_m *ChainScopedConfig) InsecureFastScrypt() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for InsecureFastScrypt") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// JobPipeline provides a mock function with given fields: +func (_m *ChainScopedConfig) JobPipeline() coreconfig.JobPipeline { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for JobPipeline") + } + + var r0 coreconfig.JobPipeline + if rf, ok := ret.Get(0).(func() coreconfig.JobPipeline); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.JobPipeline) + } + } + + return r0 +} + +// Keeper provides a mock function with given fields: +func (_m *ChainScopedConfig) Keeper() coreconfig.Keeper { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Keeper") + } + + var r0 coreconfig.Keeper + if rf, ok := ret.Get(0).(func() coreconfig.Keeper); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Keeper) + } + } + + return r0 +} + +// Log provides a mock function with given fields: +func (_m *ChainScopedConfig) Log() coreconfig.Log { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Log") + } + + var r0 coreconfig.Log + if rf, ok := ret.Get(0).(func() coreconfig.Log); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Log) + } + } + + return r0 +} + +// LogConfiguration provides a mock function with given fields: log, warn +func (_m *ChainScopedConfig) LogConfiguration(log coreconfig.LogfFn, warn coreconfig.LogfFn) { + _m.Called(log, warn) +} + +// Mercury provides a mock function with given fields: +func (_m *ChainScopedConfig) Mercury() coreconfig.Mercury { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Mercury") + } + + var r0 coreconfig.Mercury + if rf, ok := ret.Get(0).(func() coreconfig.Mercury); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Mercury) + } + } + + return r0 +} + +// OCR provides a mock function with given fields: +func (_m *ChainScopedConfig) OCR() coreconfig.OCR { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR") + } + + var r0 coreconfig.OCR + if rf, ok := ret.Get(0).(func() coreconfig.OCR); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.OCR) + } + } + + return r0 +} + +// OCR2 provides a mock function with given fields: +func (_m *ChainScopedConfig) OCR2() coreconfig.OCR2 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR2") + } + + var r0 coreconfig.OCR2 + if rf, ok := ret.Get(0).(func() coreconfig.OCR2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.OCR2) + } + } + + return r0 +} + +// P2P provides a mock function with given fields: +func (_m *ChainScopedConfig) P2P() coreconfig.P2P { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for P2P") + } + + var r0 coreconfig.P2P + if rf, ok := ret.Get(0).(func() coreconfig.P2P); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.P2P) + } + } + + return r0 +} + +// Password provides a mock function with given fields: +func (_m *ChainScopedConfig) Password() coreconfig.Password { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Password") + } + + var r0 coreconfig.Password + if rf, ok := ret.Get(0).(func() coreconfig.Password); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Password) + } + } + + return r0 +} + +// Prometheus provides a mock function with given fields: +func (_m *ChainScopedConfig) Prometheus() coreconfig.Prometheus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Prometheus") + } + + var r0 coreconfig.Prometheus + if rf, ok := ret.Get(0).(func() coreconfig.Prometheus); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Prometheus) + } + } + + return r0 +} + +// Pyroscope provides a mock function with given fields: +func (_m *ChainScopedConfig) Pyroscope() coreconfig.Pyroscope { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Pyroscope") + } + + var r0 coreconfig.Pyroscope + if rf, ok := ret.Get(0).(func() coreconfig.Pyroscope); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Pyroscope) + } + } + + return r0 +} + +// RootDir provides a mock function with given fields: +func (_m *ChainScopedConfig) RootDir() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RootDir") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Sentry provides a mock function with given fields: +func (_m *ChainScopedConfig) Sentry() coreconfig.Sentry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Sentry") + } + + var r0 coreconfig.Sentry + if rf, ok := ret.Get(0).(func() coreconfig.Sentry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Sentry) + } + } + + return r0 +} + +// SetLogLevel provides a mock function with given fields: lvl +func (_m *ChainScopedConfig) SetLogLevel(lvl zapcore.Level) error { + ret := _m.Called(lvl) + + if len(ret) == 0 { + panic("no return value specified for SetLogLevel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(zapcore.Level) error); ok { + r0 = rf(lvl) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetLogSQL provides a mock function with given fields: logSQL +func (_m *ChainScopedConfig) SetLogSQL(logSQL bool) { + _m.Called(logSQL) +} + +// SetPasswords provides a mock function with given fields: keystore, vrf +func (_m *ChainScopedConfig) SetPasswords(keystore *string, vrf *string) { + _m.Called(keystore, vrf) +} + +// ShutdownGracePeriod provides a mock function with given fields: +func (_m *ChainScopedConfig) ShutdownGracePeriod() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ShutdownGracePeriod") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SolanaEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) SolanaEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SolanaEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// StarkNetEnabled provides a mock function with given fields: +func (_m *ChainScopedConfig) StarkNetEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StarkNetEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// TelemetryIngress provides a mock function with given fields: +func (_m *ChainScopedConfig) TelemetryIngress() coreconfig.TelemetryIngress { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TelemetryIngress") + } + + var r0 coreconfig.TelemetryIngress + if rf, ok := ret.Get(0).(func() coreconfig.TelemetryIngress); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.TelemetryIngress) + } + } + + return r0 +} + +// Threshold provides a mock function with given fields: +func (_m *ChainScopedConfig) Threshold() coreconfig.Threshold { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Threshold") + } + + var r0 coreconfig.Threshold + if rf, ok := ret.Get(0).(func() coreconfig.Threshold); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Threshold) + } + } + + return r0 +} + +// Tracing provides a mock function with given fields: +func (_m *ChainScopedConfig) Tracing() coreconfig.Tracing { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tracing") + } + + var r0 coreconfig.Tracing + if rf, ok := ret.Get(0).(func() coreconfig.Tracing); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.Tracing) + } + } + + return r0 +} + +// Validate provides a mock function with given fields: +func (_m *ChainScopedConfig) Validate() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Validate") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateDB provides a mock function with given fields: +func (_m *ChainScopedConfig) ValidateDB() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ValidateDB") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WebServer provides a mock function with given fields: +func (_m *ChainScopedConfig) WebServer() coreconfig.WebServer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for WebServer") + } + + var r0 coreconfig.WebServer + if rf, ok := ret.Get(0).(func() coreconfig.WebServer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(coreconfig.WebServer) + } + } + + return r0 +} + +// NewChainScopedConfig creates a new instance of ChainScopedConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChainScopedConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *ChainScopedConfig { + mock := &ChainScopedConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/config/mocks/gas_estimator.go b/core/chains/evm/config/mocks/gas_estimator.go new file mode 100644 index 00000000..b3b1ed64 --- /dev/null +++ b/core/chains/evm/config/mocks/gas_estimator.go @@ -0,0 +1,393 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + config "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + + mock "github.com/stretchr/testify/mock" +) + +// GasEstimator is an autogenerated mock type for the GasEstimator type +type GasEstimator struct { + mock.Mock +} + +// BlockHistory provides a mock function with given fields: +func (_m *GasEstimator) BlockHistory() config.BlockHistory { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockHistory") + } + + var r0 config.BlockHistory + if rf, ok := ret.Get(0).(func() config.BlockHistory); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.BlockHistory) + } + } + + return r0 +} + +// BumpMin provides a mock function with given fields: +func (_m *GasEstimator) BumpMin() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BumpMin") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// BumpPercent provides a mock function with given fields: +func (_m *GasEstimator) BumpPercent() uint16 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BumpPercent") + } + + var r0 uint16 + if rf, ok := ret.Get(0).(func() uint16); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint16) + } + + return r0 +} + +// BumpThreshold provides a mock function with given fields: +func (_m *GasEstimator) BumpThreshold() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BumpThreshold") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// BumpTxDepth provides a mock function with given fields: +func (_m *GasEstimator) BumpTxDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BumpTxDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// EIP1559DynamicFees provides a mock function with given fields: +func (_m *GasEstimator) EIP1559DynamicFees() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EIP1559DynamicFees") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// FeeCapDefault provides a mock function with given fields: +func (_m *GasEstimator) FeeCapDefault() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FeeCapDefault") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// LimitDefault provides a mock function with given fields: +func (_m *GasEstimator) LimitDefault() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitDefault") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// LimitJobType provides a mock function with given fields: +func (_m *GasEstimator) LimitJobType() config.LimitJobType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitJobType") + } + + var r0 config.LimitJobType + if rf, ok := ret.Get(0).(func() config.LimitJobType); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.LimitJobType) + } + } + + return r0 +} + +// LimitMax provides a mock function with given fields: +func (_m *GasEstimator) LimitMax() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitMax") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// LimitMultiplier provides a mock function with given fields: +func (_m *GasEstimator) LimitMultiplier() float32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitMultiplier") + } + + var r0 float32 + if rf, ok := ret.Get(0).(func() float32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(float32) + } + + return r0 +} + +// LimitTransfer provides a mock function with given fields: +func (_m *GasEstimator) LimitTransfer() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitTransfer") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// Mode provides a mock function with given fields: +func (_m *GasEstimator) Mode() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Mode") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// PriceDefault provides a mock function with given fields: +func (_m *GasEstimator) PriceDefault() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PriceDefault") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// PriceMax provides a mock function with given fields: +func (_m *GasEstimator) PriceMax() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PriceMax") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// PriceMaxKey provides a mock function with given fields: _a0 +func (_m *GasEstimator) PriceMaxKey(_a0 common.Address) *assets.Wei { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for PriceMaxKey") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func(common.Address) *assets.Wei); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// PriceMin provides a mock function with given fields: +func (_m *GasEstimator) PriceMin() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PriceMin") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// TipCapDefault provides a mock function with given fields: +func (_m *GasEstimator) TipCapDefault() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TipCapDefault") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// TipCapMin provides a mock function with given fields: +func (_m *GasEstimator) TipCapMin() *assets.Wei { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TipCapMin") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func() *assets.Wei); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// NewGasEstimator creates a new instance of GasEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGasEstimator(t interface { + mock.TestingT + Cleanup(func()) +}) *GasEstimator { + mock := &GasEstimator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/config/toml/config.go b/core/chains/evm/config/toml/config.go new file mode 100644 index 00000000..fc21f4f8 --- /dev/null +++ b/core/chains/evm/config/toml/config.go @@ -0,0 +1,834 @@ +package toml + +import ( + "fmt" + "net/url" + "slices" + "strconv" + + "github.com/ethereum/go-ethereum/core/txpool/legacypool" + "github.com/pelletier/go-toml/v2" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + "gopkg.in/guregu/null.v4" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type HasEVMConfigs interface { + EVMConfigs() EVMConfigs +} + +type EVMConfigs []*EVMConfig + +func (cs EVMConfigs) ValidateConfig() (err error) { + return cs.validateKeys() +} + +func (cs EVMConfigs) validateKeys() (err error) { + // Unique chain IDs + chainIDs := commonconfig.UniqueStrings{} + for i, c := range cs { + if chainIDs.IsDupeFmt(c.ChainID) { + err = multierr.Append(err, commonconfig.NewErrDuplicate(fmt.Sprintf("%d.ChainID", i), c.ChainID.String())) + } + } + + // Unique node names + names := commonconfig.UniqueStrings{} + for i, c := range cs { + for j, n := range c.Nodes { + if names.IsDupe(n.Name) { + err = multierr.Append(err, commonconfig.NewErrDuplicate(fmt.Sprintf("%d.Nodes.%d.Name", i, j), *n.Name)) + } + } + } + + // Unique node WSURLs + wsURLs := commonconfig.UniqueStrings{} + for i, c := range cs { + for j, n := range c.Nodes { + u := (*url.URL)(n.WSURL) + if wsURLs.IsDupeFmt(u) { + err = multierr.Append(err, commonconfig.NewErrDuplicate(fmt.Sprintf("%d.Nodes.%d.WSURL", i, j), u.String())) + } + } + } + + // Unique node HTTPURLs + httpURLs := commonconfig.UniqueStrings{} + for i, c := range cs { + for j, n := range c.Nodes { + u := (*url.URL)(n.HTTPURL) + if httpURLs.IsDupeFmt(u) { + err = multierr.Append(err, commonconfig.NewErrDuplicate(fmt.Sprintf("%d.Nodes.%d.HTTPURL", i, j), u.String())) + } + } + } + return +} + +func (cs *EVMConfigs) SetFrom(fs *EVMConfigs) (err error) { + if err1 := fs.validateKeys(); err1 != nil { + return err1 + } + for _, f := range *fs { + if f.ChainID == nil { + *cs = append(*cs, f) + } else if i := slices.IndexFunc(*cs, func(c *EVMConfig) bool { + return c.ChainID != nil && c.ChainID.Cmp(f.ChainID) == 0 + }); i == -1 { + *cs = append(*cs, f) + } else { + (*cs)[i].SetFrom(f) + } + } + return +} + +func (cs EVMConfigs) totalChains() int { + total := 0 + for _, ch := range cs { + if ch == nil { + continue + } + total++ + } + return total +} +func (cs EVMConfigs) Chains(ids ...string) (r []commontypes.ChainStatus, total int, err error) { + total = cs.totalChains() + for _, ch := range cs { + if ch == nil { + continue + } + chainID := ch.ChainID.String() + if len(ids) > 0 { + var match bool + for _, id := range ids { + if id == chainID { + match = true + break + } + } + if !match { + continue + } + } + ch2 := commontypes.ChainStatus{ + ID: ch.ChainID.String(), + Enabled: ch.IsEnabled(), + } + ch2.Config, err = ch.TOMLString() + if err != nil { + return + } + r = append(r, ch2) + } + return +} + +func (cs EVMConfigs) Node(name string) (types.Node, error) { + for i := range cs { + for _, n := range cs[i].Nodes { + if n.Name != nil && *n.Name == name { + return legacyNode(n, cs[i].ChainID), nil + } + } + } + return types.Node{}, fmt.Errorf("node %s: %w", name, chains.ErrNotFound) +} + +func (cs EVMConfigs) NodeStatus(name string) (commontypes.NodeStatus, error) { + for i := range cs { + for _, n := range cs[i].Nodes { + if n.Name != nil && *n.Name == name { + return nodeStatus(n, cs[i].ChainID.String()) + } + } + } + return commontypes.NodeStatus{}, fmt.Errorf("node %s: %w", name, chains.ErrNotFound) +} + +func legacyNode(n *Node, chainID *big.Big) (v2 types.Node) { + v2.Name = *n.Name + v2.EVMChainID = *chainID + if n.HTTPURL != nil { + v2.HTTPURL = null.StringFrom(n.HTTPURL.String()) + } + if n.WSURL != nil { + v2.WSURL = null.StringFrom(n.WSURL.String()) + } + if n.SendOnly != nil { + v2.SendOnly = *n.SendOnly + } + if n.Order != nil { + v2.Order = *n.Order + } + return +} + +func nodeStatus(n *Node, chainID string) (commontypes.NodeStatus, error) { + var s commontypes.NodeStatus + s.ChainID = chainID + s.Name = *n.Name + b, err := toml.Marshal(n) + if err != nil { + return commontypes.NodeStatus{}, err + } + s.Config = string(b) + return s, nil +} + +func (cs EVMConfigs) nodes(id string) (ns EVMNodes) { + for _, c := range cs { + if c.ChainID.String() == id { + return c.Nodes + } + } + return nil +} + +func (cs EVMConfigs) Nodes(chainID string) (ns []types.Node, err error) { + evmID, err := ChainIDInt64(chainID) + if err != nil { + return nil, fmt.Errorf("invalid evm chain id %q : %w", chainID, err) + } + nodes := cs.nodes(chainID) + if nodes == nil { + err = fmt.Errorf("no nodes: chain %q: %w", chainID, chains.ErrNotFound) + return + } + for _, n := range nodes { + if n == nil { + continue + } + + ns = append(ns, legacyNode(n, big.NewI(evmID))) + } + return +} + +func (cs EVMConfigs) NodeStatuses(chainIDs ...string) (ns []commontypes.NodeStatus, err error) { + if len(chainIDs) == 0 { + for i := range cs { + for _, n := range cs[i].Nodes { + if n == nil { + continue + } + n2, err := nodeStatus(n, cs[i].ChainID.String()) + if err != nil { + return nil, err + } + ns = append(ns, n2) + } + } + return + } + for _, id := range chainIDs { + for _, n := range cs.nodes(id) { + if n == nil { + continue + } + n2, err := nodeStatus(n, id) + if err != nil { + return nil, err + } + ns = append(ns, n2) + } + } + return +} + +type EVMNodes []*Node + +func (ns *EVMNodes) SetFrom(fs *EVMNodes) { + for _, f := range *fs { + if f.Name == nil { + *ns = append(*ns, f) + } else if i := slices.IndexFunc(*ns, func(n *Node) bool { + return n.Name != nil && *n.Name == *f.Name + }); i == -1 { + *ns = append(*ns, f) + } else { + (*ns)[i].SetFrom(f) + } + } +} + +type EVMConfig struct { + ChainID *big.Big + Enabled *bool + Chain + Nodes EVMNodes +} + +func (c *EVMConfig) IsEnabled() bool { + return c.Enabled == nil || *c.Enabled +} + +func (c *EVMConfig) SetFrom(f *EVMConfig) { + if f.ChainID != nil { + c.ChainID = f.ChainID + } + if f.Enabled != nil { + c.Enabled = f.Enabled + } + c.Chain.SetFrom(&f.Chain) + c.Nodes.SetFrom(&f.Nodes) +} + +func (c *EVMConfig) ValidateConfig() (err error) { + if c.ChainID == nil { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "ChainID", Msg: "required for all chains"}) + } else if c.ChainID.String() == "" { + err = multierr.Append(err, commonconfig.ErrEmpty{Name: "ChainID", Msg: "required for all chains"}) + } else if must, ok := ChainTypeForID(c.ChainID); ok { // known chain id + if c.ChainType == nil && must != "" { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "ChainType", + Msg: fmt.Sprintf("only %q can be used with this chain id", must)}) + } else if c.ChainType != nil && *c.ChainType != string(must) { + if *c.ChainType == "" { + err = multierr.Append(err, commonconfig.ErrEmpty{Name: "ChainType", + Msg: fmt.Sprintf("only %q can be used with this chain id", must)}) + } else if must == "" { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "ChainType", Value: *c.ChainType, + Msg: "must not be set with this chain id"}) + } else { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "ChainType", Value: *c.ChainType, + Msg: fmt.Sprintf("only %q can be used with this chain id", must)}) + } + } + } + + if len(c.Nodes) == 0 { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "Nodes", Msg: "must have at least one node"}) + } else { + var hasPrimary bool + for _, n := range c.Nodes { + if n.SendOnly != nil && *n.SendOnly { + continue + } + hasPrimary = true + break + } + if !hasPrimary { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "Nodes", + Msg: "must have at least one primary node with WSURL"}) + } + } + + err = multierr.Append(err, c.Chain.ValidateConfig()) + + return +} + +func (c *EVMConfig) TOMLString() (string, error) { + b, err := toml.Marshal(c) + if err != nil { + return "", err + } + return string(b), nil +} + +type Chain struct { + AutoCreateKey *bool + BlockBackfillDepth *uint32 + BlockBackfillSkip *bool + ChainType *string + FinalityDepth *uint32 + FinalityTagEnabled *bool + FlagsContractAddress *ethkey.EIP55Address + LinkContractAddress *ethkey.EIP55Address + LogBackfillBatchSize *uint32 + LogPollInterval *commonconfig.Duration + LogKeepBlocksDepth *uint32 + MinIncomingConfirmations *uint32 + MinContractPayment *commonassets.Link + NonceAutoSync *bool + NoNewHeadsThreshold *commonconfig.Duration + OperatorFactoryAddress *ethkey.EIP55Address + RPCDefaultBatchSize *uint32 + RPCBlockQueryDelay *uint16 + + Transactions Transactions `toml:",omitempty"` + BalanceMonitor BalanceMonitor `toml:",omitempty"` + GasEstimator GasEstimator `toml:",omitempty"` + HeadTracker HeadTracker `toml:",omitempty"` + KeySpecific KeySpecificConfig `toml:",omitempty"` + NodePool NodePool `toml:",omitempty"` + OCR OCR `toml:",omitempty"` + OCR2 OCR2 `toml:",omitempty"` + ChainWriter ChainWriter `toml:",omitempty"` +} + +func (c *Chain) ValidateConfig() (err error) { + var chainType config.ChainType + if c.ChainType != nil { + chainType = config.ChainType(*c.ChainType) + } + if !chainType.IsValid() { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "ChainType", Value: *c.ChainType, + Msg: config.ErrInvalidChainType.Error()}) + } + + if c.GasEstimator.BumpTxDepth != nil && *c.GasEstimator.BumpTxDepth > *c.Transactions.MaxInFlight { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "GasEstimator.BumpTxDepth", Value: *c.GasEstimator.BumpTxDepth, + Msg: "must be less than or equal to Transactions.MaxInFlight"}) + } + if *c.HeadTracker.HistoryDepth < *c.FinalityDepth { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "HeadTracker.HistoryDepth", Value: *c.HeadTracker.HistoryDepth, + Msg: "must be equal to or greater than FinalityDepth"}) + } + if *c.FinalityDepth < 1 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "FinalityDepth", Value: *c.FinalityDepth, + Msg: "must be greater than or equal to 1"}) + } + if *c.MinIncomingConfirmations < 1 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "MinIncomingConfirmations", Value: *c.MinIncomingConfirmations, + Msg: "must be greater than or equal to 1"}) + } + return +} + +type Transactions struct { + ForwardersEnabled *bool + MaxInFlight *uint32 + MaxQueued *uint32 + ReaperInterval *commonconfig.Duration + ReaperThreshold *commonconfig.Duration + ResendAfterThreshold *commonconfig.Duration +} + +func (t *Transactions) setFrom(f *Transactions) { + if v := f.ForwardersEnabled; v != nil { + t.ForwardersEnabled = v + } + if v := f.MaxInFlight; v != nil { + t.MaxInFlight = v + } + if v := f.MaxQueued; v != nil { + t.MaxQueued = v + } + if v := f.ReaperInterval; v != nil { + t.ReaperInterval = v + } + if v := f.ReaperThreshold; v != nil { + t.ReaperThreshold = v + } + if v := f.ResendAfterThreshold; v != nil { + t.ResendAfterThreshold = v + } +} + +type OCR2 struct { + Automation Automation `toml:",omitempty"` +} + +func (o *OCR2) setFrom(f *OCR2) { + o.Automation.setFrom(&f.Automation) +} + +type Automation struct { + GasLimit *uint32 +} + +func (a *Automation) setFrom(f *Automation) { + if v := f.GasLimit; v != nil { + a.GasLimit = v + } +} + +type ChainWriter struct { + FromAddress *ethkey.EIP55Address `toml:",omitempty"` + ForwarderAddress *ethkey.EIP55Address `toml:",omitempty"` +} + +func (m *ChainWriter) setFrom(f *ChainWriter) { + if v := f.FromAddress; v != nil { + m.FromAddress = v + } + if v := f.ForwarderAddress; v != nil { + m.ForwarderAddress = v + } +} + +type BalanceMonitor struct { + Enabled *bool +} + +func (m *BalanceMonitor) setFrom(f *BalanceMonitor) { + if v := f.Enabled; v != nil { + m.Enabled = v + } +} + +type GasEstimator struct { + Mode *string + + PriceDefault *assets.Wei + PriceMax *assets.Wei + PriceMin *assets.Wei + + LimitDefault *uint32 + LimitMax *uint32 + LimitMultiplier *decimal.Decimal + LimitTransfer *uint32 + LimitJobType GasLimitJobType `toml:",omitempty"` + + BumpMin *assets.Wei + BumpPercent *uint16 + BumpThreshold *uint32 + BumpTxDepth *uint32 + + EIP1559DynamicFees *bool + + FeeCapDefault *assets.Wei + TipCapDefault *assets.Wei + TipCapMin *assets.Wei + + BlockHistory BlockHistoryEstimator `toml:",omitempty"` +} + +func (e *GasEstimator) ValidateConfig() (err error) { + if uint64(*e.BumpPercent) < legacypool.DefaultConfig.PriceBump { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "BumpPercent", Value: *e.BumpPercent, + Msg: fmt.Sprintf("may not be less than Geth's default of %d", legacypool.DefaultConfig.PriceBump)}) + } + if e.TipCapDefault.Cmp(e.TipCapMin) < 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "TipCapDefault", Value: e.TipCapDefault, + Msg: "must be greater than or equal to TipCapMinimum"}) + } + if e.FeeCapDefault.Cmp(e.TipCapDefault) < 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "FeeCapDefault", Value: e.TipCapDefault, + Msg: "must be greater than or equal to TipCapDefault"}) + } + if *e.Mode == "FixedPrice" && *e.BumpThreshold == 0 && *e.EIP1559DynamicFees && e.FeeCapDefault.Cmp(e.PriceMax) != 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "FeeCapDefault", Value: e.FeeCapDefault, + Msg: fmt.Sprintf("must be equal to PriceMax (%s) since you are using FixedPrice estimation with gas bumping disabled in "+ + "EIP1559 mode - PriceMax will be used as the FeeCap for transactions instead of FeeCapDefault", e.PriceMax)}) + } else if e.FeeCapDefault.Cmp(e.PriceMax) > 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "FeeCapDefault", Value: e.FeeCapDefault, + Msg: fmt.Sprintf("must be less than or equal to PriceMax (%s)", e.PriceMax)}) + } + + if e.PriceMin.Cmp(e.PriceDefault) > 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "PriceMin", Value: e.PriceMin, + Msg: "must be less than or equal to PriceDefault"}) + } + if e.PriceMax.Cmp(e.PriceDefault) < 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "PriceMax", Value: e.PriceMin, + Msg: "must be greater than or equal to PriceDefault"}) + } + if *e.Mode == "BlockHistory" && *e.BlockHistory.BlockHistorySize <= 0 { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "BlockHistory.BlockHistorySize", Value: *e.BlockHistory.BlockHistorySize, + Msg: "must be greater than or equal to 1 with BlockHistory Mode"}) + } + + return +} + +func (e *GasEstimator) setFrom(f *GasEstimator) { + if v := f.Mode; v != nil { + e.Mode = v + } + if v := f.EIP1559DynamicFees; v != nil { + e.EIP1559DynamicFees = v + } + if v := f.BumpPercent; v != nil { + e.BumpPercent = v + } + if v := f.BumpThreshold; v != nil { + e.BumpThreshold = v + } + if v := f.BumpTxDepth; v != nil { + e.BumpTxDepth = v + } + if v := f.BumpMin; v != nil { + e.BumpMin = v + } + if v := f.FeeCapDefault; v != nil { + e.FeeCapDefault = v + } + if v := f.LimitDefault; v != nil { + e.LimitDefault = v + } + if v := f.LimitMax; v != nil { + e.LimitMax = v + } + if v := f.LimitMultiplier; v != nil { + e.LimitMultiplier = v + } + if v := f.LimitTransfer; v != nil { + e.LimitTransfer = v + } + if v := f.PriceDefault; v != nil { + e.PriceDefault = v + } + if v := f.TipCapDefault; v != nil { + e.TipCapDefault = v + } + if v := f.TipCapMin; v != nil { + e.TipCapMin = v + } + if v := f.PriceMax; v != nil { + e.PriceMax = v + } + if v := f.PriceMin; v != nil { + e.PriceMin = v + } + e.LimitJobType.setFrom(&f.LimitJobType) + e.BlockHistory.setFrom(&f.BlockHistory) +} + +type GasLimitJobType struct { + OCR *uint32 `toml:",inline"` + OCR2 *uint32 `toml:",inline"` + DR *uint32 `toml:",inline"` + VRF *uint32 `toml:",inline"` + FM *uint32 `toml:",inline"` + Keeper *uint32 `toml:",inline"` +} + +func (t *GasLimitJobType) setFrom(f *GasLimitJobType) { + if f.OCR != nil { + t.OCR = f.OCR + } + if f.OCR2 != nil { + t.OCR2 = f.OCR2 + } + if f.DR != nil { + t.DR = f.DR + } + if f.VRF != nil { + t.VRF = f.VRF + } + if f.FM != nil { + t.FM = f.FM + } + if f.Keeper != nil { + t.Keeper = f.Keeper + } +} + +type BlockHistoryEstimator struct { + BatchSize *uint32 + BlockHistorySize *uint16 + CheckInclusionBlocks *uint16 + CheckInclusionPercentile *uint16 + EIP1559FeeCapBufferBlocks *uint16 + TransactionPercentile *uint16 +} + +func (e *BlockHistoryEstimator) setFrom(f *BlockHistoryEstimator) { + if v := f.BatchSize; v != nil { + e.BatchSize = v + } + if v := f.BlockHistorySize; v != nil { + e.BlockHistorySize = v + } + if v := f.CheckInclusionBlocks; v != nil { + e.CheckInclusionBlocks = v + } + if v := f.CheckInclusionPercentile; v != nil { + e.CheckInclusionPercentile = v + } + if v := f.EIP1559FeeCapBufferBlocks; v != nil { + e.EIP1559FeeCapBufferBlocks = v + } + if v := f.TransactionPercentile; v != nil { + e.TransactionPercentile = v + } +} + +type KeySpecificConfig []KeySpecific + +func (ks KeySpecificConfig) ValidateConfig() (err error) { + addrs := map[string]struct{}{} + for _, k := range ks { + addr := k.Key.String() + if _, ok := addrs[addr]; ok { + err = multierr.Append(err, commonconfig.NewErrDuplicate("Key", addr)) + } else { + addrs[addr] = struct{}{} + } + } + return +} + +type KeySpecific struct { + Key *ethkey.EIP55Address + GasEstimator KeySpecificGasEstimator `toml:",omitempty"` +} + +type KeySpecificGasEstimator struct { + PriceMax *assets.Wei +} + +func (e *KeySpecificGasEstimator) setFrom(f *KeySpecificGasEstimator) { + if v := f.PriceMax; v != nil { + e.PriceMax = v + } +} + +type HeadTracker struct { + HistoryDepth *uint32 + MaxBufferSize *uint32 + SamplingInterval *commonconfig.Duration +} + +func (t *HeadTracker) setFrom(f *HeadTracker) { + if v := f.HistoryDepth; v != nil { + t.HistoryDepth = v + } + if v := f.MaxBufferSize; v != nil { + t.MaxBufferSize = v + } + if v := f.SamplingInterval; v != nil { + t.SamplingInterval = v + } +} + +type NodePool struct { + PollFailureThreshold *uint32 + PollInterval *commonconfig.Duration + SelectionMode *string + SyncThreshold *uint32 + LeaseDuration *commonconfig.Duration +} + +func (p *NodePool) setFrom(f *NodePool) { + if v := f.PollFailureThreshold; v != nil { + p.PollFailureThreshold = v + } + if v := f.PollInterval; v != nil { + p.PollInterval = v + } + if v := f.SelectionMode; v != nil { + p.SelectionMode = v + } + if v := f.SyncThreshold; v != nil { + p.SyncThreshold = v + } + if v := f.LeaseDuration; v != nil { + p.LeaseDuration = v + } +} + +type OCR struct { + ContractConfirmations *uint16 + ContractTransmitterTransmitTimeout *commonconfig.Duration + DatabaseTimeout *commonconfig.Duration + DeltaCOverride *commonconfig.Duration + DeltaCJitterOverride *commonconfig.Duration + ObservationGracePeriod *commonconfig.Duration +} + +func (o *OCR) setFrom(f *OCR) { + if v := f.ContractConfirmations; v != nil { + o.ContractConfirmations = v + } + if v := f.ContractTransmitterTransmitTimeout; v != nil { + o.ContractTransmitterTransmitTimeout = v + } + if v := f.DatabaseTimeout; v != nil { + o.DatabaseTimeout = v + } + if v := f.DeltaCOverride; v != nil { + o.DeltaCOverride = v + } + if v := f.DeltaCJitterOverride; v != nil { + o.DeltaCJitterOverride = v + } + if v := f.ObservationGracePeriod; v != nil { + o.ObservationGracePeriod = v + } +} + +type Node struct { + Name *string + WSURL *commonconfig.URL + HTTPURL *commonconfig.URL + SendOnly *bool + Order *int32 +} + +func (n *Node) ValidateConfig() (err error) { + if n.Name == nil { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "Name", Msg: "required for all nodes"}) + } else if *n.Name == "" { + err = multierr.Append(err, commonconfig.ErrEmpty{Name: "Name", Msg: "required for all nodes"}) + } + + var sendOnly bool + if n.SendOnly != nil { + sendOnly = *n.SendOnly + } + if n.WSURL == nil { + if !sendOnly { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "WSURL", Msg: "required for primary nodes"}) + } + } else if n.WSURL.IsZero() { + if !sendOnly { + err = multierr.Append(err, commonconfig.ErrEmpty{Name: "WSURL", Msg: "required for primary nodes"}) + } + } else { + switch n.WSURL.Scheme { + case "ws", "wss": + default: + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "WSURL", Value: n.WSURL.Scheme, Msg: "must be ws or wss"}) + } + } + + if n.HTTPURL == nil { + err = multierr.Append(err, commonconfig.ErrMissing{Name: "HTTPURL", Msg: "required for all nodes"}) + } else if n.HTTPURL.IsZero() { + err = multierr.Append(err, commonconfig.ErrEmpty{Name: "HTTPURL", Msg: "required for all nodes"}) + } else { + switch n.HTTPURL.Scheme { + case "http", "https": + default: + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "HTTPURL", Value: n.HTTPURL.Scheme, Msg: "must be http or https"}) + } + } + + if n.Order != nil && (*n.Order < 1 || *n.Order > 100) { + err = multierr.Append(err, commonconfig.ErrInvalid{Name: "Order", Value: *n.Order, Msg: "must be between 1 and 100"}) + } else if n.Order == nil { + z := int32(100) + n.Order = &z + } + + return +} + +func (n *Node) SetFrom(f *Node) { + if f.Name != nil { + n.Name = f.Name + } + if f.WSURL != nil { + n.WSURL = f.WSURL + } + if f.HTTPURL != nil { + n.HTTPURL = f.HTTPURL + } + if f.SendOnly != nil { + n.SendOnly = f.SendOnly + } + if f.Order != nil { + n.Order = f.Order + } +} + +func ChainIDInt64(cid string) (int64, error) { + return strconv.ParseInt(cid, 10, 64) +} diff --git a/core/chains/evm/config/toml/defaults.go b/core/chains/evm/config/toml/defaults.go new file mode 100644 index 00000000..e5e1594c --- /dev/null +++ b/core/chains/evm/config/toml/defaults.go @@ -0,0 +1,181 @@ +package toml + +import ( + "bytes" + "embed" + "log" + "path/filepath" + "slices" + "strings" + + cconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +var ( + //go:embed defaults/*.toml + defaultsFS embed.FS + fallback Chain + defaults = map[string]Chain{} + defaultNames = map[string]string{} + + // DefaultIDs is the set of chain ids which have defaults. + DefaultIDs []*big.Big +) + +func init() { + fes, err := defaultsFS.ReadDir("defaults") + if err != nil { + log.Fatalf("failed to read defaults/: %v", err) + } + for _, fe := range fes { + path := filepath.Join("defaults", fe.Name()) + b, err := defaultsFS.ReadFile(path) + if err != nil { + log.Fatalf("failed to read %q: %v", path, err) + } + var config = struct { + ChainID *big.Big + Chain + }{} + + if err := cconfig.DecodeTOML(bytes.NewReader(b), &config); err != nil { + log.Fatalf("failed to decode %q: %v", path, err) + } + if fe.Name() == "fallback.toml" { + if config.ChainID != nil { + log.Fatalf("fallback ChainID must be nil, not: %s", config.ChainID) + } + fallback = config.Chain + continue + } + if config.ChainID == nil { + log.Fatalf("missing ChainID: %s", path) + } + DefaultIDs = append(DefaultIDs, config.ChainID) + id := config.ChainID.String() + if _, ok := defaults[id]; ok { + log.Fatalf("%q contains duplicate ChainID: %s", path, id) + } + defaults[id] = config.Chain + defaultNames[id] = strings.ReplaceAll(strings.TrimSuffix(fe.Name(), ".toml"), "_", " ") + } + slices.SortFunc(DefaultIDs, func(a, b *big.Big) int { + return a.Cmp(b) + }) +} + +// DefaultsNamed returns the default Chain values, optionally for the given chainID, as well as a name if the chainID is known. +func DefaultsNamed(chainID *big.Big) (c Chain, name string) { + c.SetFrom(&fallback) + if chainID == nil { + return + } + s := chainID.String() + if d, ok := defaults[s]; ok { + c.SetFrom(&d) + name = defaultNames[s] + } + return +} + +// Defaults returns a Chain based on the defaults for chainID and fields from with, applied in order so later Chains +// override earlier ones. +func Defaults(chainID *big.Big, with ...*Chain) Chain { + c, _ := DefaultsNamed(chainID) + for _, w := range with { + c.SetFrom(w) + } + return c +} + +func ChainTypeForID(chainID *big.Big) (config.ChainType, bool) { + s := chainID.String() + if d, ok := defaults[s]; ok { + if d.ChainType == nil { + return "", true + } + return config.ChainType(*d.ChainType), true + } + return "", false +} + +// SetFrom updates c with any non-nil values from f. +func (c *Chain) SetFrom(f *Chain) { + if v := f.AutoCreateKey; v != nil { + c.AutoCreateKey = v + } + if v := f.BlockBackfillDepth; v != nil { + c.BlockBackfillDepth = v + } + if v := f.BlockBackfillSkip; v != nil { + c.BlockBackfillSkip = v + } + if v := f.ChainType; v != nil { + c.ChainType = v + } + if v := f.FinalityDepth; v != nil { + c.FinalityDepth = v + } + if v := f.FinalityTagEnabled; v != nil { + c.FinalityTagEnabled = v + } + if v := f.FlagsContractAddress; v != nil { + c.FlagsContractAddress = v + } + if v := f.LinkContractAddress; v != nil { + c.LinkContractAddress = v + } + if v := f.LogBackfillBatchSize; v != nil { + c.LogBackfillBatchSize = v + } + if v := f.LogPollInterval; v != nil { + c.LogPollInterval = v + } + if v := f.LogKeepBlocksDepth; v != nil { + c.LogKeepBlocksDepth = v + } + if v := f.MinIncomingConfirmations; v != nil { + c.MinIncomingConfirmations = v + } + if v := f.MinContractPayment; v != nil { + c.MinContractPayment = v + } + if v := f.NonceAutoSync; v != nil { + c.NonceAutoSync = v + } + if v := f.NoNewHeadsThreshold; v != nil { + c.NoNewHeadsThreshold = v + } + if v := f.OperatorFactoryAddress; v != nil { + c.OperatorFactoryAddress = v + } + if v := f.RPCDefaultBatchSize; v != nil { + c.RPCDefaultBatchSize = v + } + if v := f.RPCBlockQueryDelay; v != nil { + c.RPCBlockQueryDelay = v + } + + c.Transactions.setFrom(&f.Transactions) + c.BalanceMonitor.setFrom(&f.BalanceMonitor) + c.GasEstimator.setFrom(&f.GasEstimator) + + if ks := f.KeySpecific; ks != nil { + for i := range ks { + v := ks[i] + if i := slices.IndexFunc(c.KeySpecific, func(k KeySpecific) bool { return k.Key == v.Key }); i == -1 { + c.KeySpecific = append(c.KeySpecific, v) + } else { + c.KeySpecific[i].GasEstimator.setFrom(&v.GasEstimator) + } + } + } + + c.HeadTracker.setFrom(&f.HeadTracker) + c.NodePool.setFrom(&f.NodePool) + c.OCR.setFrom(&f.OCR) + c.OCR2.setFrom(&f.OCR2) + c.ChainWriter.setFrom(&f.ChainWriter) +} diff --git a/core/chains/evm/config/toml/defaults/Arbitrum_Goerli.toml b/core/chains/evm/config/toml/defaults/Arbitrum_Goerli.toml new file mode 100644 index 00000000..598b5713 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Arbitrum_Goerli.toml @@ -0,0 +1,27 @@ +ChainID = '421613' +ChainType = 'arbitrum' +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 +LinkContractAddress = '0xd14838A68E8AFBAdE5efb411d5871ea0011AFd28' +LogPollInterval = '1s' + +[GasEstimator] +Mode = 'Arbitrum' +LimitMax = 1_000_000_000 +# Arbitrum uses the suggested gas price, so we don't want to place any limits on the minimum +PriceMin = '0' +PriceDefault = '0.1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +FeeCapDefault = '1000 gwei' +# Disable gas bumping on arbitrum +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone set GAS_UPDATER_ENABLED=true by accident; we never want to run the block history estimator on arbitrum +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 + +[OCR2.Automation] +GasLimit = 14500000 diff --git a/core/chains/evm/config/toml/defaults/Arbitrum_Mainnet.toml b/core/chains/evm/config/toml/defaults/Arbitrum_Mainnet.toml new file mode 100644 index 00000000..350d15cf --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Arbitrum_Mainnet.toml @@ -0,0 +1,29 @@ +# Arbitrum is an L2 chain. Pending proper L2 support, for now we rely on their sequencer +ChainID = '42161' +ChainType = 'arbitrum' +LinkContractAddress = "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4" +LogPollInterval = '1s' +# Arbitrum only emits blocks when a new tx is received, so this method of liveness detection is not useful +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'Arbitrum' +LimitMax = 1_000_000_000 +# Arbitrum uses the suggested gas price, so we don't want to place any limits on the minimum +PriceMin = '0' +PriceDefault = '0.1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +FeeCapDefault = '1000 gwei' +# Disable gas bumping on arbitrum +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone set GAS_UPDATER_ENABLED=true by accident; we never want to run the block history estimator on arbitrum +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 + +[OCR2.Automation] +GasLimit = 14500000 diff --git a/core/chains/evm/config/toml/defaults/Arbitrum_Rinkeby.toml b/core/chains/evm/config/toml/defaults/Arbitrum_Rinkeby.toml new file mode 100644 index 00000000..ef9a5408 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Arbitrum_Rinkeby.toml @@ -0,0 +1,24 @@ +ChainID = '421611' +ChainType = 'arbitrum' +LinkContractAddress = "0x615fBe6372676474d9e6933d310469c9b68e9726" +LogPollInterval = '1s' +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'Arbitrum' +LimitMax = 1_000_000_000 +# Arbitrum uses the suggested gas price, so we don't want to place any limits on the minimum +PriceMin = '0' +PriceDefault = '0.1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +FeeCapDefault = '1000 gwei' +# Disable gas bumping on arbitrum +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone set GAS_UPDATER_ENABLED=true by accident; we never want to run the block history estimator on arbitrum +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Arbitrum_Sepolia.toml b/core/chains/evm/config/toml/defaults/Arbitrum_Sepolia.toml new file mode 100644 index 00000000..e26a137d --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Arbitrum_Sepolia.toml @@ -0,0 +1,26 @@ +ChainID = '421614' +ChainType = 'arbitrum' +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 +LogPollInterval = '1s' + +[GasEstimator] +Mode = 'Arbitrum' +LimitMax = 1_000_000_000 +# Arbitrum uses the suggested gas price, so we don't want to place any limits on the minimum +PriceMin = '0' +PriceDefault = '0.1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +FeeCapDefault = '1000 gwei' +# Disable gas bumping on arbitrum +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone set GAS_UPDATER_ENABLED=true by accident; we never want to run the block history estimator on arbitrum +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 + +[OCR2.Automation] +GasLimit = 14500000 diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml new file mode 100644 index 00000000..98c4f9b4 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Avalanche_Fuji.toml @@ -0,0 +1,17 @@ +ChainID = '43113' +FinalityDepth = 1 +LinkContractAddress = '0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846' +LogPollInterval = '3s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' +OCR.ContractConfirmations = 1 +RPCBlockQueryDelay = 2 + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '25 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '25 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 diff --git a/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml new file mode 100644 index 00000000..77ba6e6b --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Avalanche_Mainnet.toml @@ -0,0 +1,18 @@ +ChainID = '43114' +FinalityDepth = 1 +LinkContractAddress = '0x5947BB275c521040051D82396192181b413227A3' +LogPollInterval = '3s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' +OCR.ContractConfirmations = 1 +RPCBlockQueryDelay = 2 + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '25 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '25 gwei' + +[GasEstimator.BlockHistory] +# Average block time of 2s +BlockHistorySize = 24 diff --git a/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml new file mode 100644 index 00000000..4268cb25 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/BSC_Mainnet.toml @@ -0,0 +1,37 @@ +# BSC uses Clique consensus with ~3s block times +# Clique offers finality within (N/2)+1 blocks where N is number of signers +# There are 21 BSC validators so theoretically finality should occur after 21/2+1 = 11 blocks +ChainID = '56' +# Keeping this >> 11 because it's not expensive and gives us a safety margin +FinalityDepth = 50 +LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' +LogPollInterval = '3s' +MinIncomingConfirmations = 3 +NoNewHeadsThreshold = '30s' +RPCBlockQueryDelay = 2 +Transactions.ResendAfterThreshold = '1m' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +PriceDefault = '5 gwei' +PriceMin = '1 gwei' +BumpMin = '5 gwei' +# 15s delay since feeds update every minute in volatile situations +BumpThreshold = 5 + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +HistoryDepth = 100 +SamplingInterval = '1s' + +[OCR] +DatabaseTimeout = '2s' +ContractTransmitterTransmitTimeout = '2s' +ObservationGracePeriod = '500ms' + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/BSC_Testnet.toml b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml new file mode 100644 index 00000000..f97dafa0 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/BSC_Testnet.toml @@ -0,0 +1,37 @@ +# BSC uses Clique consensus with ~3s block times +# Clique offers finality within (N/2)+1 blocks where N is number of signers +# There are 21 BSC validators so theoretically finality should occur after 21/2+1 = 11 blocks +ChainID = '97' +# Keeping this >> 11 because it's not expensive and gives us a safety margin +FinalityDepth = 50 +LinkContractAddress = '0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06' +LogPollInterval = '3s' +MinIncomingConfirmations = 3 +NoNewHeadsThreshold = '30s' +RPCBlockQueryDelay = 2 +Transactions.ResendAfterThreshold = '1m' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +PriceDefault = '5 gwei' +PriceMin = '1 gwei' +BumpMin = '5 gwei' +# 15s delay since feeds update every minute in volatile situations +BumpThreshold = 5 + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +HistoryDepth = 100 +SamplingInterval = '1s' + +[OCR] +DatabaseTimeout = '2s' +ContractTransmitterTransmitTimeout = '2s' +ObservationGracePeriod = '500ms' + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Base_Goerli.toml b/core/chains/evm/config/toml/defaults/Base_Goerli.toml new file mode 100644 index 00000000..5ecfd036 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Base_Goerli.toml @@ -0,0 +1,29 @@ +ChainID = '84531' +ChainType = 'optimismBedrock' +FinalityDepth = 200 +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 60 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 300 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 + +[OCR2.Automation] +GasLimit = 6500000 diff --git a/core/chains/evm/config/toml/defaults/Base_Mainnet.toml b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml new file mode 100644 index 00000000..314c12f8 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Base_Mainnet.toml @@ -0,0 +1,29 @@ +ChainID = '8453' +ChainType = 'optimismBedrock' +FinalityDepth = 200 +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 300 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 + +[OCR2.Automation] +GasLimit = 6500000 diff --git a/core/chains/evm/config/toml/defaults/Base_Sepolia.toml b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml new file mode 100644 index 00000000..6458dda8 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Base_Sepolia.toml @@ -0,0 +1,29 @@ +ChainID = '84532' +ChainType = 'optimismBedrock' +FinalityDepth = 200 +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 60 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 300 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 + +[OCR2.Automation] +GasLimit = 6500000 diff --git a/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml new file mode 100644 index 00000000..87c63840 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Celo_Mainnet.toml @@ -0,0 +1,19 @@ +ChainID = '42220' +ChainType = 'celo' +FinalityDepth = 1 +LogPollInterval = '5s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '1m' +OCR.ContractConfirmations = 1 + +[GasEstimator] +PriceDefault = '5 gwei' +PriceMax = '500 gwei' +PriceMin = '5 gwei' +BumpMin = '2 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 12 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/Celo_Testnet.toml b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml new file mode 100644 index 00000000..0508e86d --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Celo_Testnet.toml @@ -0,0 +1,19 @@ +ChainID = '44787' +ChainType = 'celo' +FinalityDepth = 1 +LogPollInterval = '5s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '1m' +OCR.ContractConfirmations = 1 + +[GasEstimator] +PriceDefault = '5 gwei' +PriceMax = '500 gwei' +PriceMin = '5 gwei' +BumpMin = '2 gwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Goerli.toml b/core/chains/evm/config/toml/defaults/Ethereum_Goerli.toml new file mode 100644 index 00000000..c2f4c605 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Goerli.toml @@ -0,0 +1,11 @@ +ChainID = '5' +LinkContractAddress = '0x326C977E6efc84E512bB9C30f76E30c160eD06FB' +MinContractPayment = '0.1 pli' + +[GasEstimator] +EIP1559DynamicFees = true + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +TransactionPercentile = 50 diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Kovan.toml b/core/chains/evm/config/toml/defaults/Ethereum_Kovan.toml new file mode 100644 index 00000000..472c7172 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Kovan.toml @@ -0,0 +1,14 @@ +ChainID = '42' +LinkContractAddress = '0xa36085F69e2889c224210F603D836748e7dC0088' +MinContractPayment = '0.1 pli' +OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' + +[GasEstimator] +# FIXME: Kovan has strange behaviour with EIP1559, see: +# https://app.shortcut.com/pluginlabs/story/34098/kovan-can-emit-blocks-that-violate-assumptions-in-block-history-estimator +EIP1559DynamicFees = false + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +TransactionPercentile = 50 \ No newline at end of file diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml new file mode 100644 index 00000000..961db267 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Mainnet.toml @@ -0,0 +1,13 @@ +ChainID = '1' +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +MinContractPayment = '0.1 pli' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' + +[GasEstimator] +EIP1559DynamicFees = true + +[GasEstimator.BlockHistory] +BatchSize = 25 +# EIP-1559 does well on a smaller block history size +BlockHistorySize = 4 +TransactionPercentile = 50 diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Rinkeby.toml b/core/chains/evm/config/toml/defaults/Ethereum_Rinkeby.toml new file mode 100644 index 00000000..15795d33 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Rinkeby.toml @@ -0,0 +1,13 @@ +ChainID = '4' +LinkContractAddress = '0x01BE23585060835E02B77ef475b0Cc51aA1e0709' +MinContractPayment = '0.1 pli' + +[GasEstimator] +# TODO: EIP1559 on rinkeby has not been adequately tested, see: +# https://app.shortcut.com/pluginlabs/story/34098/kovan-can-emit-blocks-that-violate-assumptions-in-block-history-estimator +EIP1559DynamicFees = false + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +TransactionPercentile = 50 diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Ropsten.toml b/core/chains/evm/config/toml/defaults/Ethereum_Ropsten.toml new file mode 100644 index 00000000..5aacc611 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Ropsten.toml @@ -0,0 +1,11 @@ +ChainID = '3' +LinkContractAddress = '0x20fE562d797A42Dcb3399062AE9546cd06f63280' +MinContractPayment = '0.1 pli' + +[GasEstimator] +EIP1559DynamicFees = true + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +TransactionPercentile = 50 diff --git a/core/chains/evm/config/toml/defaults/Ethereum_Sepolia.toml b/core/chains/evm/config/toml/defaults/Ethereum_Sepolia.toml new file mode 100644 index 00000000..425c041f --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Ethereum_Sepolia.toml @@ -0,0 +1,11 @@ +ChainID = '11155111' +LinkContractAddress = '0x779877A7B0D9E8603169DdbD7836e478b4624789' +MinContractPayment = '0.1 pli' + +[GasEstimator] +EIP1559DynamicFees = true + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +TransactionPercentile = 50 diff --git a/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml b/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml new file mode 100644 index 00000000..c7fb6ba4 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Fantom_Mainnet.toml @@ -0,0 +1,16 @@ +ChainID = '250' +LinkContractAddress = '0x6F43FF82CCA38001B6699a8AC47A2d0E66939407' +LogPollInterval = '1s' +MinIncomingConfirmations = 3 +NoNewHeadsThreshold = '30s' +RPCBlockQueryDelay = 2 + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +# Fantom network has been slow to include txs at times when using the BlockHistory estimator, and the recommendation is to use SuggestedPrice mode. +Mode = 'SuggestedPrice' + +[OCR2.Automation] +GasLimit = 3800000 \ No newline at end of file diff --git a/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml b/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml new file mode 100644 index 00000000..1e1aab14 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Fantom_Testnet.toml @@ -0,0 +1,13 @@ +ChainID = '4002' +LinkContractAddress = '0xfaFedb041c0DD4fA2Dc0d87a6B0979Ee6FA7af5F' +LogPollInterval = '1s' +MinIncomingConfirmations = 3 +# Fantom testnet only emits blocks when a new tx is received, so this method of liveness detection is not useful +NoNewHeadsThreshold = '0' +RPCBlockQueryDelay = 2 + +[GasEstimator] +Mode = 'SuggestedPrice' + +[OCR2.Automation] +GasLimit = 3800000 \ No newline at end of file diff --git a/core/chains/evm/config/toml/defaults/Harmony_Mainnet.toml b/core/chains/evm/config/toml/defaults/Harmony_Mainnet.toml new file mode 100644 index 00000000..e90d2ff7 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Harmony_Mainnet.toml @@ -0,0 +1,8 @@ +ChainID = '1666600000' +LinkContractAddress = '0x218532a12a389a4a92fC0C5Fb22901D1c19198aA' +LogPollInterval = '2s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' + +[GasEstimator] +PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Harmony_Testnet.toml b/core/chains/evm/config/toml/defaults/Harmony_Testnet.toml new file mode 100644 index 00000000..382e3b21 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Harmony_Testnet.toml @@ -0,0 +1,8 @@ +ChainID = '1666700000' +LinkContractAddress = '0x8b12Ac23BFe11cAb03a634C1F117D64a7f2cFD3e' +LogPollInterval = '2s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' + +[GasEstimator] +PriceDefault = '5 gwei' diff --git a/core/chains/evm/config/toml/defaults/Heco_Mainnet.toml b/core/chains/evm/config/toml/defaults/Heco_Mainnet.toml new file mode 100644 index 00000000..8a45ba99 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Heco_Mainnet.toml @@ -0,0 +1,34 @@ +# Heco uses BSC's settings. +ChainID = '128' +FinalityDepth = 50 +LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' +LogPollInterval = '3s' +MinIncomingConfirmations = 3 +NoNewHeadsThreshold = '30s' +RPCBlockQueryDelay = 2 +Transactions.ResendAfterThreshold = '1m' + + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +PriceDefault = '5 gwei' +PriceMin = '1 gwei' +BumpMin = '5 gwei' +BumpThreshold = 5 + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +HistoryDepth = 100 +SamplingInterval = '1s' + +[OCR] +DatabaseTimeout = '2s' +ContractTransmitterTransmitTimeout = '2s' +ObservationGracePeriod = '500ms' + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml b/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml new file mode 100644 index 00000000..c68f03b0 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Klaytn_Mainnet.toml @@ -0,0 +1,10 @@ +ChainID = '8217' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '750 gwei' # gwei = ston +BumpThreshold = 0 diff --git a/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml b/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml new file mode 100644 index 00000000..864aa0fa --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Klaytn_Testnet.toml @@ -0,0 +1,10 @@ +ChainID = '1001' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '30s' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '750 gwei' # gwei = ston +BumpThreshold = 0 diff --git a/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml b/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml new file mode 100644 index 00000000..55154bf7 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Kroma_Mainnet.toml @@ -0,0 +1,26 @@ +ChainID = '255' +ChainType = 'kroma' # Kroma is based on the Optimism Bedrock architechture +FinalityDepth = 400 +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 400 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml b/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml new file mode 100644 index 00000000..643b0556 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Kroma_Sepolia.toml @@ -0,0 +1,26 @@ +ChainID = '2358' +ChainType = 'kroma' # Kroma is based on the Optimism Bedrock architechture +FinalityDepth = 400 +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 400 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/Linea_Goerli.toml b/core/chains/evm/config/toml/defaults/Linea_Goerli.toml new file mode 100644 index 00000000..91572726 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Linea_Goerli.toml @@ -0,0 +1,12 @@ +ChainID = '59140' +# Block time 12s, finality < 3m +FinalityDepth = 15 +# Blocks are only emitted when a transaction happens / no empty blocks +NoNewHeadsThreshold = '0' + +[GasEstimator] +BumpPercent = 40 + +[Transactions] +# increase resend time to align with finality +ResendAfterThreshold = '3m' diff --git a/core/chains/evm/config/toml/defaults/Linea_Mainnet.toml b/core/chains/evm/config/toml/defaults/Linea_Mainnet.toml new file mode 100644 index 00000000..94d8bedc --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Linea_Mainnet.toml @@ -0,0 +1,17 @@ +ChainID = '59144' +# Block time 12s, finality < 60m +FinalityDepth = 300 +# Blocks are only emitted when a transaction happens / no empty blocks +NoNewHeadsThreshold = '0' + +[GasEstimator] +BumpPercent = 40 +PriceMin = '400 mwei' + +[Transactions] +# increase resend time to align with finality +ResendAfterThreshold = '3m' + +# set greater than finality depth +[HeadTracker] +HistoryDepth = 350 diff --git a/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml b/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml new file mode 100644 index 00000000..3e8efa53 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Metis_Mainnet.toml @@ -0,0 +1,25 @@ +# Metis is an L2 chain based on Optimism. +ChainID = '1088' +ChainType = 'metis' +# Sequencer offers absolute finality +FinalityDepth = 1 +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'SuggestedPrice' +# Metis uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price +PriceMin = '0' +# Never bump gas on metis +BumpThreshold = 0 + +[BalanceMonitor] +Enabled = true + +[GasEstimator.BlockHistory] +# Force an error if someone enables the estimator by accident; we never want to run the block history estimator on metisaa +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml b/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml new file mode 100644 index 00000000..7d9fec90 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Metis_Rinkeby.toml @@ -0,0 +1,20 @@ +ChainID = '588' +ChainType = 'metis' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceMin = '0' +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +BlockHistorySize = 0 + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/OKX_Mainnet.toml b/core/chains/evm/config/toml/defaults/OKX_Mainnet.toml new file mode 100644 index 00000000..d0b26ede --- /dev/null +++ b/core/chains/evm/config/toml/defaults/OKX_Mainnet.toml @@ -0,0 +1 @@ +ChainID = '66' diff --git a/core/chains/evm/config/toml/defaults/OKX_Testnet.toml b/core/chains/evm/config/toml/defaults/OKX_Testnet.toml new file mode 100644 index 00000000..2587f010 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/OKX_Testnet.toml @@ -0,0 +1 @@ +ChainID = '65' diff --git a/core/chains/evm/config/toml/defaults/Optimism_Goerli.toml b/core/chains/evm/config/toml/defaults/Optimism_Goerli.toml new file mode 100644 index 00000000..458b3b08 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Optimism_Goerli.toml @@ -0,0 +1,30 @@ +ChainID = '420' +ChainType = 'optimismBedrock' +FinalityDepth = 200 +LinkContractAddress = '0xdc2CC710e42857672E7907CF474a69B63B93089f' +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 60 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 300 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 + +[OCR2.Automation] +GasLimit = 6500000 diff --git a/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml new file mode 100644 index 00000000..fd4dd9f3 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Optimism_Mainnet.toml @@ -0,0 +1,30 @@ +ChainID = '10' +ChainType = 'optimismBedrock' +FinalityDepth = 200 +LinkContractAddress = '0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6' +LogPollInterval = '2s' +NoNewHeadsThreshold = '40s' +MinIncomingConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +PriceMin = '1 wei' +BumpMin = '100 wei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[Transactions] +ResendAfterThreshold = '30s' + +[HeadTracker] +HistoryDepth = 300 + +[NodePool] +SyncThreshold = 10 + +[OCR] +ContractConfirmations = 1 + +[OCR2.Automation] +GasLimit = 6500000 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml new file mode 100644 index 00000000..c4246bb8 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Polygon_Mainnet.toml @@ -0,0 +1,41 @@ +# Polygon has a 1s block time and looser finality guarantees than ethereum. +ChainID = '137' +# It is quite common to see re-orgs on polygon go several hundred blocks deep. See: https://polygonscan.com/blocks_forked +FinalityDepth = 500 +LinkContractAddress = '0xb0897686c545045aFc77CF20eC7A532E3120E0F1' +LogPollInterval = '1s' +MinIncomingConfirmations = 5 +NoNewHeadsThreshold = '30s' +# Must be set to something large here because Polygon has so many re-orgs that otherwise we are constantly refetching +RPCBlockQueryDelay = 10 +RPCDefaultBatchSize = 100 + +[Transactions] +# Matic nodes under high mempool pressure are liable to drop txes, we need to ensure we keep sending them +ResendAfterThreshold = '1m' +# Since re-orgs on Polygon can be so large, we need a large safety buffer to allow time for the queue to clear down before we start dropping transactions +MaxQueued = 5000 + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +# Many Polygon RPC providers set a minimum of 30 GWei on mainnet to prevent spam +PriceDefault = '30 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +# Many Polygon RPC providers set a minimum of 30 GWei on mainnet to prevent spam +PriceMin = '30 gwei' +BumpMin = '20 gwei' +# 10s delay since feeds update every minute in volatile situations +BumpThreshold = 5 + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +# Polygon suffers from a tremendous number of re-orgs, we need to set this to something very large to be conservative enough +HistoryDepth = 2000 +SamplingInterval = '1s' + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml b/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml new file mode 100644 index 00000000..e3dd2f6c --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Polygon_Mumbai.toml @@ -0,0 +1,32 @@ +ChainID = '80001' +FinalityDepth = 500 +LinkContractAddress = '0x326C977E6efc84E512bB9C30f76E30c160eD06FB' +LogPollInterval = '1s' +MinIncomingConfirmations = 5 +NoNewHeadsThreshold = '30s' +RPCBlockQueryDelay = 10 +RPCDefaultBatchSize = 100 + +[Transactions] +ResendAfterThreshold = '1m' +MaxQueued = 5000 + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +PriceDefault = '1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +BumpMin = '20 gwei' +BumpThreshold = 5 + +[GasEstimator.BlockHistory] +BlockHistorySize = 24 + +[HeadTracker] +HistoryDepth = 2000 +SamplingInterval = '1s' + +[NodePool] +SyncThreshold = 10 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Goerli.toml b/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Goerli.toml new file mode 100644 index 00000000..58451679 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Goerli.toml @@ -0,0 +1,23 @@ +ChainID = '1442' +FinalityDepth = 1 +NoNewHeadsThreshold = '12m' +MinIncomingConfirmations = 1 +LogPollInterval = '30s' +RPCDefaultBatchSize = 100 + +[OCR] +ContractConfirmations = 1 + +[Transactions] +ResendAfterThreshold = '3m' + +[GasEstimator] +PriceMin = '50 mwei' +BumpPercent = 40 +BumpMin = '20 mwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 12 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Mainnet.toml b/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Mainnet.toml new file mode 100644 index 00000000..6be91b0e --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Polygon_Zkevm_Mainnet.toml @@ -0,0 +1,24 @@ +ChainID = '1101' +FinalityDepth = 1 +NoNewHeadsThreshold = '6m' +MinIncomingConfirmations = 1 +LogPollInterval = '30s' +RPCBlockQueryDelay = 15 +RPCDefaultBatchSize = 100 + +[OCR] +ContractConfirmations = 1 + +[Transactions] +ResendAfterThreshold = '3m' + +[GasEstimator] +PriceMin = '100 mwei' +BumpPercent = 40 +BumpMin = '100 mwei' + +[GasEstimator.BlockHistory] +BlockHistorySize = 12 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/README.md b/core/chains/evm/config/toml/defaults/README.md new file mode 100644 index 00000000..744efedd --- /dev/null +++ b/core/chains/evm/config/toml/defaults/README.md @@ -0,0 +1,7 @@ +# Default Configurations + +:warning: IMPORTANT :warning: + +All config sets **inherit** from `fallback.toml` first and overwrite +fields as necessary. Do not create a new full configuration from +scratch. diff --git a/core/chains/evm/config/toml/defaults/RSK_Mainnet.toml b/core/chains/evm/config/toml/defaults/RSK_Mainnet.toml new file mode 100644 index 00000000..1f81428e --- /dev/null +++ b/core/chains/evm/config/toml/defaults/RSK_Mainnet.toml @@ -0,0 +1,13 @@ +# RSK prices its txes in sats not wei +ChainID = '30' +LinkContractAddress = '0x14AdaE34beF7ca957Ce2dDe5ADD97ea050123827' +LogPollInterval = '30s' +MinContractPayment = '0.001 pli' + +[GasEstimator] +# It's about 100 times more expensive than Wei, very roughly speaking +PriceDefault = '50 mwei' +PriceMax = '50 gwei' +PriceMin = '0' +# rsk does not yet support EIP-1559 but this allows validation to pass +FeeCapDefault = '100 mwei' diff --git a/core/chains/evm/config/toml/defaults/RSK_Testnet.toml b/core/chains/evm/config/toml/defaults/RSK_Testnet.toml new file mode 100644 index 00000000..72b48b03 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/RSK_Testnet.toml @@ -0,0 +1,10 @@ +ChainID = '31' +LinkContractAddress = '0x8bBbd80981FE76d44854D8DF305e8985c19f0e78' +MinContractPayment = '0.001 pli' +LogPollInterval = '30s' + +[GasEstimator] +PriceDefault = '50 mwei' +PriceMax = '50 gwei' +PriceMin = '0' +FeeCapDefault = '100 mwei' diff --git a/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml b/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml new file mode 100644 index 00000000..e087b86f --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Scroll_Mainnet.toml @@ -0,0 +1,22 @@ +ChainID = '534352' +FinalityDepth = 1 +ChainType = 'scroll' +LogPollInterval = '3s' +MinIncomingConfirmations = 1 +# Scroll only emits blocks when a new tx is received, so this method of liveness detection is not useful +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'SuggestedPrice' +# Scroll uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price +PriceMin = '0' +# Never bump gas on Scroll +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone enables the estimator by accident; we never want to run the block history estimator on Scroll +BlockHistorySize = 0 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml b/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml new file mode 100644 index 00000000..9db7a8eb --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Scroll_Sepolia.toml @@ -0,0 +1,22 @@ +ChainID = '534351' +FinalityDepth = 1 +ChainType = 'scroll' +LogPollInterval = '3s' +MinIncomingConfirmations = 1 +# Scroll only emits blocks when a new tx is received, so this method of liveness detection is not useful +NoNewHeadsThreshold = '0' +OCR.ContractConfirmations = 1 + +[GasEstimator] +Mode = 'SuggestedPrice' +# Scroll uses the SuggestedPrice estimator; we don't want to place any limits on the minimum gas price +PriceMin = '0' +# Never bump gas on Scroll +BumpThreshold = 0 + +[GasEstimator.BlockHistory] +# Force an error if someone enables the estimator by accident; we never want to run the block history estimator on Scroll +BlockHistorySize = 0 + +[HeadTracker] +HistoryDepth = 50 diff --git a/core/chains/evm/config/toml/defaults/Simulated.toml b/core/chains/evm/config/toml/defaults/Simulated.toml new file mode 100644 index 00000000..3d01429c --- /dev/null +++ b/core/chains/evm/config/toml/defaults/Simulated.toml @@ -0,0 +1,27 @@ +ChainID = '1337' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +MinContractPayment = '100' +NoNewHeadsThreshold = '0s' + +[Transactions] +ReaperThreshold = '0s' +ResendAfterThreshold = '0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'FixedPrice' +PriceMin = '0' +BumpThreshold = 0 +FeeCapDefault = '100 micro' +PriceMax = '100 micro' + +[HeadTracker] +HistoryDepth = 10 +MaxBufferSize = 100 +SamplingInterval = '0s' + +[OCR] +ContractConfirmations = 1 diff --git a/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml new file mode 100644 index 00000000..ee50a984 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/WeMix_Mainnet.toml @@ -0,0 +1,14 @@ +ChainID = '1111' +ChainType = 'wemix' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +# WeMix emits a block every 1 second, regardless of transactions +LogPollInterval = '3s' +NoNewHeadsThreshold = '30s' + +[OCR] +ContractConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +TipCapDefault = '100 gwei' diff --git a/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml new file mode 100644 index 00000000..6cdb451e --- /dev/null +++ b/core/chains/evm/config/toml/defaults/WeMix_Testnet.toml @@ -0,0 +1,14 @@ +ChainID = '1112' +ChainType = 'wemix' +FinalityDepth = 1 +MinIncomingConfirmations = 1 +# WeMix emits a block every 1 second, regardless of transactions +LogPollInterval = '3s' +NoNewHeadsThreshold = '30s' + +[OCR] +ContractConfirmations = 1 + +[GasEstimator] +EIP1559DynamicFees = true +TipCapDefault = '100 gwei' diff --git a/core/chains/evm/config/toml/defaults/fallback.toml b/core/chains/evm/config/toml/defaults/fallback.toml new file mode 100644 index 00000000..e99b71a0 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/fallback.toml @@ -0,0 +1,72 @@ +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinContractPayment = '.00001 pli' +MinIncomingConfirmations = 3 +NonceAutoSync = true +NoNewHeadsThreshold = '3m' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h' +ReaperThreshold = '168h' +ResendAfterThreshold = '1m' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500_000 +LimitMax = 500_000 +LimitMultiplier = '1' +LimitTransfer = 21_000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1' +TipCapMin = '1' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h' +DeltaCJitterOverride = '1h' +ObservationGracePeriod = '1s' + +[OCR2.Automation] +GasLimit = 5400000 diff --git a/core/chains/evm/config/toml/defaults/xDai_Mainnet.toml b/core/chains/evm/config/toml/defaults/xDai_Mainnet.toml new file mode 100644 index 00000000..cc4e27e7 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/xDai_Mainnet.toml @@ -0,0 +1,19 @@ +# xDai currently uses AuRa (like Parity) consensus so finality rules will be similar to parity +# See: https://www.poa.network/for-users/whitepaper/poadao-v1/proof-of-authority +# NOTE: xDai is planning to move to Honeybadger BFT which might have different finality guarantees +# https://www.xdaichain.com/for-validators/consensus/honeybadger-bft-consensus +# For worst case re-org depth on AuRa, assume 2n+2 (see: https://github.com/poanetwork/wiki/wiki/Aura-Consensus-Protocol-Audit) +# With xDai's current maximum of 19 validators then 40 blocks is the maximum possible re-org) +# The mainnet default of 50 blocks is ok here +ChainID = '100' +ChainType = 'xdai' +LinkContractAddress = '0xE2e73A1c69ecF83F464EFCE6A5be353a37cA09b2' +LogPollInterval = '5s' + +[GasEstimator] +PriceDefault = '1 gwei' +PriceMax = '500 gwei' +# 1 Gwei is the minimum accepted by the validators (unless whitelisted) +PriceMin = '1 gwei' +# 15s delay since feeds update every minute in volatile situations +BumpThreshold = 3 diff --git a/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml b/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml new file mode 100644 index 00000000..04529a41 --- /dev/null +++ b/core/chains/evm/config/toml/defaults/zkSync_Goerli.toml @@ -0,0 +1,14 @@ +ChainID = '280' +ChainType = 'zksync' +FinalityDepth = 1 +LogPollInterval = '5s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '1m' + +[GasEstimator] +LimitDefault = 3_500_000 +PriceMax = 18446744073709551615 +PriceMin = 0 + +[HeadTracker] +HistoryDepth = 5 diff --git a/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml b/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml new file mode 100644 index 00000000..d7808edd --- /dev/null +++ b/core/chains/evm/config/toml/defaults/zkSync_Mainnet.toml @@ -0,0 +1,14 @@ +ChainID = '324' +ChainType = 'zksync' +FinalityDepth = 1 +LogPollInterval = '5s' +MinIncomingConfirmations = 1 +NoNewHeadsThreshold = '1m' + +[GasEstimator] +LimitDefault = 3_500_000 +PriceMax = 18446744073709551615 +PriceMin = 0 + +[HeadTracker] +HistoryDepth = 5 diff --git a/core/chains/evm/forwarders/forwarder.go b/core/chains/evm/forwarders/forwarder.go new file mode 100644 index 00000000..6464a3b3 --- /dev/null +++ b/core/chains/evm/forwarders/forwarder.go @@ -0,0 +1,18 @@ +package forwarders + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// Forwarder is the struct for Forwarder Addresses +type Forwarder struct { + ID int64 + Address common.Address + EVMChainID big.Big + CreatedAt time.Time + UpdatedAt time.Time +} diff --git a/core/chains/evm/forwarders/forwarder_manager.go b/core/chains/evm/forwarders/forwarder_manager.go new file mode 100644 index 00000000..a17ff8d0 --- /dev/null +++ b/core/chains/evm/forwarders/forwarder_manager.go @@ -0,0 +1,327 @@ +package forwarders + +import ( + "context" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmlogpoller "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_receiver" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/offchain_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var forwardABI = evmtypes.MustGetABI(authorized_forwarder.AuthorizedForwarderABI).Methods["forward"] +var authChangedTopic = authorized_receiver.AuthorizedReceiverAuthorizedSendersChanged{}.Topic() + +type Config interface { + FinalityDepth() uint32 +} + +type FwdMgr struct { + services.StateMachine + ORM ORM + evmClient evmclient.Client + cfg Config + logger logger.SugaredLogger + logpoller evmlogpoller.LogPoller + + // TODO(samhassan): sendersCache should be an LRU capped cache + // https://app.shortcut.com/pluginlabs/story/37884/forwarder-manager-uses-lru-for-caching-dest-addresses + sendersCache map[common.Address][]common.Address + latestBlock int64 + + authRcvr authorized_receiver.AuthorizedReceiverInterface + offchainAgg offchain_aggregator_wrapper.OffchainAggregatorInterface + + ctx context.Context + cancel context.CancelFunc + + cacheMu sync.RWMutex + wg sync.WaitGroup +} + +func NewFwdMgr(db *sqlx.DB, client evmclient.Client, logpoller evmlogpoller.LogPoller, l logger.Logger, cfg Config, dbConfig pg.QConfig) *FwdMgr { + lggr := logger.Sugared(logger.Named(l, "EVMForwarderManager")) + fwdMgr := FwdMgr{ + logger: lggr, + cfg: cfg, + evmClient: client, + ORM: NewORM(db, lggr, dbConfig), + logpoller: logpoller, + sendersCache: make(map[common.Address][]common.Address), + } + fwdMgr.ctx, fwdMgr.cancel = context.WithCancel(context.Background()) + return &fwdMgr +} + +func (f *FwdMgr) Name() string { + return f.logger.Name() +} + +// Start starts Forwarder Manager. +func (f *FwdMgr) Start(ctx context.Context) error { + return f.StartOnce("EVMForwarderManager", func() error { + f.logger.Debug("Initializing EVM forwarder manager") + chainId := f.evmClient.ConfiguredChainID() + + fwdrs, err := f.ORM.FindForwardersByChain(big.Big(*chainId)) + if err != nil { + return errors.Wrapf(err, "Failed to retrieve forwarders for chain %d", chainId) + } + if len(fwdrs) != 0 { + f.initForwardersCache(ctx, fwdrs) + if err = f.subscribeForwardersLogs(fwdrs); err != nil { + return err + } + } + + f.authRcvr, err = authorized_receiver.NewAuthorizedReceiver(common.Address{}, f.evmClient) + if err != nil { + return errors.Wrap(err, "Failed to init AuthorizedReceiver") + } + + f.offchainAgg, err = offchain_aggregator_wrapper.NewOffchainAggregator(common.Address{}, f.evmClient) + if err != nil { + return errors.Wrap(err, "Failed to init OffchainAggregator") + } + + f.wg.Add(1) + go f.runLoop() + return nil + }) +} + +func FilterName(addr common.Address) string { + return evmlogpoller.FilterName("ForwarderManager AuthorizedSendersChanged", addr.String()) +} + +func (f *FwdMgr) ForwarderFor(addr common.Address) (forwarder common.Address, err error) { + // Gets forwarders for current chain. + fwdrs, err := f.ORM.FindForwardersByChain(big.Big(*f.evmClient.ConfiguredChainID())) + if err != nil { + return common.Address{}, err + } + + for _, fwdr := range fwdrs { + eoas, err := f.getContractSenders(fwdr.Address) + if err != nil { + f.logger.Errorw("Failed to get forwarder senders", "forwarder", fwdr.Address, "err", err) + continue + } + for _, eoa := range eoas { + if eoa == addr { + return fwdr.Address, nil + } + } + } + return common.Address{}, errors.Errorf("Cannot find forwarder for given EOA") +} + +func (f *FwdMgr) ConvertPayload(dest common.Address, origPayload []byte) ([]byte, error) { + databytes, err := f.getForwardedPayload(dest, origPayload) + if err != nil { + if err != nil { + f.logger.AssumptionViolationw("Forwarder encoding failed, this should never happen", + "err", err, "to", dest, "payload", origPayload) + f.SvcErrBuffer.Append(err) + } + } + return databytes, nil +} + +func (f *FwdMgr) getForwardedPayload(dest common.Address, origPayload []byte) ([]byte, error) { + callArgs, err := forwardABI.Inputs.Pack(dest, origPayload) + if err != nil { + return nil, errors.Wrap(err, "Failed to pack forwarder payload") + } + + dataBytes := append(forwardABI.ID, callArgs...) + return dataBytes, nil +} + +func (f *FwdMgr) getContractSenders(addr common.Address) ([]common.Address, error) { + if senders, ok := f.getCachedSenders(addr); ok { + return senders, nil + } + senders, err := f.getAuthorizedSenders(f.ctx, addr) + if err != nil { + return nil, errors.Wrapf(err, "Failed to call getAuthorizedSenders on %s", addr) + } + f.setCachedSenders(addr, senders) + if err = f.subscribeSendersChangedLogs(addr); err != nil { + return nil, err + } + return senders, nil +} + +func (f *FwdMgr) getAuthorizedSenders(ctx context.Context, addr common.Address) ([]common.Address, error) { + c, err := authorized_receiver.NewAuthorizedReceiverCaller(addr, f.evmClient) + if err != nil { + return nil, errors.Wrap(err, "Failed to init forwarder caller") + } + opts := bind.CallOpts{Context: ctx, Pending: false} + senders, err := c.GetAuthorizedSenders(&opts) + if err != nil { + return nil, err + } + return senders, nil +} + +func (f *FwdMgr) initForwardersCache(ctx context.Context, fwdrs []Forwarder) { + for _, fwdr := range fwdrs { + senders, err := f.getAuthorizedSenders(ctx, fwdr.Address) + if err != nil { + f.logger.Warnw("Failed to call getAuthorizedSenders on forwarder", fwdr, "err", err) + continue + } + f.setCachedSenders(fwdr.Address, senders) + + } +} + +func (f *FwdMgr) subscribeForwardersLogs(fwdrs []Forwarder) error { + for _, fwdr := range fwdrs { + if err := f.subscribeSendersChangedLogs(fwdr.Address); err != nil { + return err + } + } + return nil +} + +func (f *FwdMgr) subscribeSendersChangedLogs(addr common.Address) error { + if err := f.logpoller.Ready(); err != nil { + f.logger.Warnw("Unable to subscribe to AuthorizedSendersChanged logs", "forwarder", addr, "err", err) + return nil + } + + err := f.logpoller.RegisterFilter( + evmlogpoller.Filter{ + Name: FilterName(addr), + EventSigs: []common.Hash{authChangedTopic}, + Addresses: []common.Address{addr}, + }) + return err +} + +func (f *FwdMgr) setCachedSenders(addr common.Address, senders []common.Address) { + f.cacheMu.Lock() + defer f.cacheMu.Unlock() + f.sendersCache[addr] = senders +} + +func (f *FwdMgr) getCachedSenders(addr common.Address) ([]common.Address, bool) { + f.cacheMu.RLock() + defer f.cacheMu.RUnlock() + addrs, ok := f.sendersCache[addr] + return addrs, ok +} + +func (f *FwdMgr) runLoop() { + defer f.wg.Done() + tick := time.After(0) + + for ; ; tick = time.After(utils.WithJitter(time.Minute)) { + select { + case <-tick: + if err := f.logpoller.Ready(); err != nil { + f.logger.Warnw("Skipping log syncing", "err", err) + continue + } + + addrs := f.collectAddresses() + if len(addrs) == 0 { + f.logger.Debug("Skipping log syncing, no forwarders tracked.") + continue + } + + logs, err := f.logpoller.LatestLogEventSigsAddrsWithConfs( + f.latestBlock, + []common.Hash{authChangedTopic}, + addrs, + evmlogpoller.Confirmations(f.cfg.FinalityDepth()), + pg.WithParentCtx(f.ctx), + ) + if err != nil { + f.logger.Errorw("Failed to retrieve latest log round", "err", err) + continue + } + if len(logs) == 0 { + f.logger.Debugf("Empty auth update round for addrs: %s, skipping", addrs) + continue + } + f.logger.Debugf("Handling new %d auth updates", len(logs)) + for _, log := range logs { + if err = f.handleAuthChange(log); err != nil { + f.logger.Warnw("Error handling auth change", "TxHash", log.TxHash, "err", err) + } + } + + case <-f.ctx.Done(): + return + } + } +} + +func (f *FwdMgr) handleAuthChange(log evmlogpoller.Log) error { + if f.latestBlock > log.BlockNumber { + return nil + } + + f.latestBlock = log.BlockNumber + + ethLog := types.Log{ + Address: log.Address, + Data: log.Data, + Topics: log.GetTopics(), + TxHash: log.TxHash, + BlockHash: log.BlockHash, + } + + if ethLog.Topics[0] == authChangedTopic { + event, err := f.authRcvr.ParseAuthorizedSendersChanged(ethLog) + if err != nil { + return errors.New("Failed to parse senders change log") + } + f.setCachedSenders(event.Raw.Address, event.Senders) + } + + return nil +} + +func (f *FwdMgr) collectAddresses() (addrs []common.Address) { + f.cacheMu.RLock() + defer f.cacheMu.RUnlock() + for addr := range f.sendersCache { + addrs = append(addrs, addr) + } + return +} + +// Stop cancels all outgoings calls and stops internal ticker loop. +func (f *FwdMgr) Close() error { + return f.StopOnce("EVMForwarderManager", func() (err error) { + f.cancel() + f.wg.Wait() + return nil + }) +} + +func (f *FwdMgr) HealthReport() map[string]error { + return map[string]error{f.Name(): f.Healthy()} +} diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go new file mode 100644 index 00000000..8a800ed1 --- /dev/null +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -0,0 +1,134 @@ +package forwarders_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_receiver" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var GetAuthorisedSendersABI = evmtypes.MustGetABI(authorized_receiver.AuthorizedReceiverABI).Methods["getAuthorizedSenders"] + +var SimpleOracleCallABI = evmtypes.MustGetABI(operator_wrapper.OperatorABI).Methods["getPluginToken"] + +func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { + lggr := logger.Test(t) + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + owner := testutils.MustNewSimTransactor(t) + + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + t.Cleanup(func() { ec.Close() }) + linkAddr := common.HexToAddress("0x01BE23585060835E02B77ef475b0Cc51aA1e0709") + operatorAddr, _, _, err := operator_wrapper.DeployOperator(owner, ec, linkAddr, owner.From) + require.NoError(t, err) + forwarderAddr, _, forwarder, err := authorized_forwarder.DeployAuthorizedForwarder(owner, ec, linkAddr, owner.From, operatorAddr, []byte{}) + require.NoError(t, err) + ec.Commit() + _, err = forwarder.SetAuthorizedSenders(owner, []common.Address{owner.From}) + require.NoError(t, err) + ec.Commit() + authorized, err := forwarder.GetAuthorizedSenders(nil) + require.NoError(t, err) + t.Log(authorized) + + evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) + fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) + + fwd, err := fwdMgr.ORM.CreateForwarder(forwarderAddr, ubig.Big(*testutils.FixtureChainID)) + require.NoError(t, err) + lst, err := fwdMgr.ORM.FindForwardersByChain(ubig.Big(*testutils.FixtureChainID)) + require.NoError(t, err) + require.Equal(t, len(lst), 1) + require.Equal(t, lst[0].Address, forwarderAddr) + + require.NoError(t, fwdMgr.Start(testutils.Context(t))) + addr, err := fwdMgr.ForwarderFor(owner.From) + require.NoError(t, err) + require.Equal(t, addr.String(), forwarderAddr.String()) + err = fwdMgr.Close() + require.NoError(t, err) + + cleanupCalled := false + cleanup := func(tx pg.Queryer, evmChainId int64, addr common.Address) error { + require.Equal(t, testutils.FixtureChainID.Int64(), evmChainId) + require.Equal(t, forwarderAddr, addr) + require.NotNil(t, tx) + cleanupCalled = true + return nil + } + + err = fwdMgr.ORM.DeleteForwarder(fwd.ID, cleanup) + assert.NoError(t, err) + assert.True(t, cleanupCalled) +} + +func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { + lggr := logger.Test(t) + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + owner := testutils.MustNewSimTransactor(t) + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + t.Cleanup(func() { ec.Close() }) + linkAddr := common.HexToAddress("0x01BE23585060835E02B77ef475b0Cc51aA1e0709") + operatorAddr, _, _, err := operator_wrapper.DeployOperator(owner, ec, linkAddr, owner.From) + require.NoError(t, err) + + forwarderAddr, _, _, err := authorized_forwarder.DeployAuthorizedForwarder(owner, ec, linkAddr, owner.From, operatorAddr, []byte{}) + require.NoError(t, err) + ec.Commit() + + evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) + fwdMgr.ORM = forwarders.NewORM(db, logger.Test(t), cfg.Database()) + + _, err = fwdMgr.ORM.CreateForwarder(forwarderAddr, ubig.Big(*testutils.FixtureChainID)) + require.NoError(t, err) + lst, err := fwdMgr.ORM.FindForwardersByChain(ubig.Big(*testutils.FixtureChainID)) + require.NoError(t, err) + require.Equal(t, len(lst), 1) + require.Equal(t, lst[0].Address, forwarderAddr) + + err = fwdMgr.Start(testutils.Context(t)) + require.NoError(t, err) + addr, err := fwdMgr.ForwarderFor(owner.From) + require.ErrorContains(t, err, "Cannot find forwarder for given EOA") + require.True(t, utils.IsZero(addr)) + err = fwdMgr.Close() + require.NoError(t, err) +} diff --git a/core/chains/evm/forwarders/mocks/orm.go b/core/chains/evm/forwarders/mocks/orm.go new file mode 100644 index 00000000..44508ec5 --- /dev/null +++ b/core/chains/evm/forwarders/mocks/orm.go @@ -0,0 +1,176 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + big "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + + forwarders "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// CreateForwarder provides a mock function with given fields: addr, evmChainId +func (_m *ORM) CreateForwarder(addr common.Address, evmChainId big.Big) (forwarders.Forwarder, error) { + ret := _m.Called(addr, evmChainId) + + if len(ret) == 0 { + panic("no return value specified for CreateForwarder") + } + + var r0 forwarders.Forwarder + var r1 error + if rf, ok := ret.Get(0).(func(common.Address, big.Big) (forwarders.Forwarder, error)); ok { + return rf(addr, evmChainId) + } + if rf, ok := ret.Get(0).(func(common.Address, big.Big) forwarders.Forwarder); ok { + r0 = rf(addr, evmChainId) + } else { + r0 = ret.Get(0).(forwarders.Forwarder) + } + + if rf, ok := ret.Get(1).(func(common.Address, big.Big) error); ok { + r1 = rf(addr, evmChainId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteForwarder provides a mock function with given fields: id, cleanup +func (_m *ORM) DeleteForwarder(id int64, cleanup func(pg.Queryer, int64, common.Address) error) error { + ret := _m.Called(id, cleanup) + + if len(ret) == 0 { + panic("no return value specified for DeleteForwarder") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, func(pg.Queryer, int64, common.Address) error) error); ok { + r0 = rf(id, cleanup) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindForwarders provides a mock function with given fields: offset, limit +func (_m *ORM) FindForwarders(offset int, limit int) ([]forwarders.Forwarder, int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for FindForwarders") + } + + var r0 []forwarders.Forwarder + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]forwarders.Forwarder, int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []forwarders.Forwarder); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]forwarders.Forwarder) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// FindForwardersByChain provides a mock function with given fields: evmChainId +func (_m *ORM) FindForwardersByChain(evmChainId big.Big) ([]forwarders.Forwarder, error) { + ret := _m.Called(evmChainId) + + if len(ret) == 0 { + panic("no return value specified for FindForwardersByChain") + } + + var r0 []forwarders.Forwarder + var r1 error + if rf, ok := ret.Get(0).(func(big.Big) ([]forwarders.Forwarder, error)); ok { + return rf(evmChainId) + } + if rf, ok := ret.Get(0).(func(big.Big) []forwarders.Forwarder); ok { + r0 = rf(evmChainId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]forwarders.Forwarder) + } + } + + if rf, ok := ret.Get(1).(func(big.Big) error); ok { + r1 = rf(evmChainId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindForwardersInListByChain provides a mock function with given fields: evmChainId, addrs +func (_m *ORM) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]forwarders.Forwarder, error) { + ret := _m.Called(evmChainId, addrs) + + if len(ret) == 0 { + panic("no return value specified for FindForwardersInListByChain") + } + + var r0 []forwarders.Forwarder + var r1 error + if rf, ok := ret.Get(0).(func(big.Big, []common.Address) ([]forwarders.Forwarder, error)); ok { + return rf(evmChainId, addrs) + } + if rf, ok := ret.Get(0).(func(big.Big, []common.Address) []forwarders.Forwarder); ok { + r0 = rf(evmChainId, addrs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]forwarders.Forwarder) + } + } + + if rf, ok := ret.Get(1).(func(big.Big, []common.Address) error); ok { + r1 = rf(evmChainId, addrs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/forwarders/orm.go b/core/chains/evm/forwarders/orm.go new file mode 100644 index 00000000..d261babe --- /dev/null +++ b/core/chains/evm/forwarders/orm.go @@ -0,0 +1,135 @@ +package forwarders + +import ( + "database/sql" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +type ORM interface { + CreateForwarder(addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) + FindForwarders(offset, limit int) ([]Forwarder, int, error) + FindForwardersByChain(evmChainId big.Big) ([]Forwarder, error) + DeleteForwarder(id int64, cleanup func(tx pg.Queryer, evmChainId int64, addr common.Address) error) error + FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) +} + +type orm struct { + q pg.Q +} + +var _ ORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *orm { + return &orm{pg.NewQ(db, lggr, cfg)} +} + +// CreateForwarder creates the Forwarder address associated with the current EVM chain id. +func (o *orm) CreateForwarder(addr common.Address, evmChainId big.Big) (fwd Forwarder, err error) { + sql := `INSERT INTO evm.forwarders (address, evm_chain_id, created_at, updated_at) VALUES ($1, $2, now(), now()) RETURNING *` + err = o.q.Get(&fwd, sql, addr, evmChainId) + return fwd, err +} + +// DeleteForwarder removes a forwarder address. +// If cleanup is non-nil, it can be used to perform any chain- or contract-specific cleanup that need to happen atomically +// on forwarder deletion. If cleanup returns an error, forwarder deletion will be aborted. +func (o *orm) DeleteForwarder(id int64, cleanup func(tx pg.Queryer, evmChainID int64, addr common.Address) error) (err error) { + var dest struct { + EvmChainId int64 + Address common.Address + } + + var rowsAffected int64 + err = o.q.Transaction(func(tx pg.Queryer) error { + err = tx.Get(&dest, `SELECT evm_chain_id, address FROM evm.forwarders WHERE id = $1`, id) + if err != nil { + return err + } + if cleanup != nil { + if err = cleanup(tx, dest.EvmChainId, dest.Address); err != nil { + return err + } + } + + result, err2 := o.q.Exec(`DELETE FROM evm.forwarders WHERE id = $1`, id) + // If the forwarder wasn't found, we still want to delete the filter. + // In that case, the transaction must return nil, even though DeleteForwarder + // will return sql.ErrNoRows + if err2 != nil && !errors.Is(err2, sql.ErrNoRows) { + return err2 + } + rowsAffected, err2 = result.RowsAffected() + + return err2 + }) + + if err == nil && rowsAffected == 0 { + err = sql.ErrNoRows + } + return err +} + +// FindForwarders returns all forwarder addresses from offset up until limit. +func (o *orm) FindForwarders(offset, limit int) (fwds []Forwarder, count int, err error) { + sql := `SELECT count(*) FROM evm.forwarders` + if err = o.q.Get(&count, sql); err != nil { + return + } + + sql = `SELECT * FROM evm.forwarders ORDER BY created_at DESC, id DESC LIMIT $1 OFFSET $2` + if err = o.q.Select(&fwds, sql, limit, offset); err != nil { + return + } + return +} + +// FindForwardersByChain returns all forwarder addresses for a chain. +func (o *orm) FindForwardersByChain(evmChainId big.Big) (fwds []Forwarder, err error) { + sql := `SELECT * FROM evm.forwarders where evm_chain_id = $1 ORDER BY created_at DESC, id DESC` + err = o.q.Select(&fwds, sql, evmChainId) + return +} + +func (o *orm) FindForwardersInListByChain(evmChainId big.Big, addrs []common.Address) ([]Forwarder, error) { + var fwdrs []Forwarder + + arg := map[string]interface{}{ + "addresses": addrs, + "chainid": evmChainId, + } + + query, args, err := sqlx.Named(` + SELECT * FROM evm.forwarders + WHERE evm_chain_id = :chainid + AND address IN (:addresses) + ORDER BY created_at DESC, id DESC`, + arg, + ) + + if err != nil { + return nil, errors.Wrap(err, "Failed to format query") + } + + query, args, err = sqlx.In(query, args...) + if err != nil { + return nil, errors.Wrap(err, "Failed to run sqlx.IN on query") + } + + query = o.q.Rebind(query) + err = o.q.Select(&fwdrs, query, args...) + + if err != nil { + return nil, errors.Wrap(err, "Failed to execute query") + } + + return fwdrs, nil +} diff --git a/core/chains/evm/forwarders/orm_test.go b/core/chains/evm/forwarders/orm_test.go new file mode 100644 index 00000000..ced7f797 --- /dev/null +++ b/core/chains/evm/forwarders/orm_test.go @@ -0,0 +1,70 @@ +package forwarders + +import ( + "database/sql" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/jmoiron/sqlx" +) + +type TestORM struct { + ORM + db *sqlx.DB +} + +func setupORM(t *testing.T) *TestORM { + t.Helper() + + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.Test(t) + orm = NewORM(db, lggr, pgtest.NewQConfig(true)) + ) + + return &TestORM{ORM: orm, db: db} +} + +// Tests the atomicity of cleanup function passed to DeleteForwarder, during DELETE operation +func Test_DeleteForwarder(t *testing.T) { + t.Parallel() + orm := setupORM(t) + addr := testutils.NewAddress() + chainID := testutils.FixtureChainID + + fwd, err := orm.CreateForwarder(addr, *big.New(chainID)) + require.NoError(t, err) + assert.Equal(t, addr, fwd.Address) + + ErrCleaningUp := errors.New("error during cleanup") + + cleanupCalled := 0 + + // Cleanup should fail the first time, causing delete to abort. When cleanup succeeds the second time, + // delete should succeed. Should fail the 3rd and 4th time since the forwarder has already been deleted. + // cleanup should only be called the first two times (when DELETE can succeed). + rets := []error{ErrCleaningUp, nil, nil, ErrCleaningUp} + expected := []error{ErrCleaningUp, nil, sql.ErrNoRows, sql.ErrNoRows} + + testCleanupFn := func(q pg.Queryer, evmChainID int64, addr common.Address) error { + require.Less(t, cleanupCalled, len(rets)) + cleanupCalled++ + return rets[cleanupCalled-1] + } + + for _, expect := range expected { + err = orm.DeleteForwarder(fwd.ID, testCleanupFn) + assert.ErrorIs(t, err, expect) + } + assert.Equal(t, 2, cleanupCalled) +} diff --git a/core/chains/evm/gas/arbitrum_estimator.go b/core/chains/evm/gas/arbitrum_estimator.go new file mode 100644 index 00000000..ee50ba5a --- /dev/null +++ b/core/chains/evm/gas/arbitrum_estimator.go @@ -0,0 +1,263 @@ +package gas + +import ( + "context" + "fmt" + "math" + "math/big" + "slices" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +type ArbConfig interface { + LimitMax() uint32 + BumpPercent() uint16 + BumpMin() *assets.Wei +} + +//go:generate mockery --quiet --name ethClient --output ./mocks/ --case=underscore --structname ETHClient +type ethClient interface { + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} + +// arbitrumEstimator is an Estimator which extends SuggestedPriceEstimator to use getPricesInArbGas() for gas limit estimation. +type arbitrumEstimator struct { + services.StateMachine + cfg ArbConfig + + EvmEstimator // *SuggestedPriceEstimator + + client ethClient + pollPeriod time.Duration + logger logger.Logger + + getPricesInArbGasMu sync.RWMutex + perL2Tx uint32 + perL1CalldataUnit uint32 + + chForceRefetch chan (chan struct{}) + chInitialised chan struct{} + chStop services.StopChan + chDone chan struct{} +} + +func NewArbitrumEstimator(lggr logger.Logger, cfg ArbConfig, rpcClient rpcClient, ethClient ethClient) EvmEstimator { + lggr = logger.Named(lggr, "ArbitrumEstimator") + return &arbitrumEstimator{ + cfg: cfg, + EvmEstimator: NewSuggestedPriceEstimator(lggr, rpcClient, cfg), + client: ethClient, + pollPeriod: 10 * time.Second, + logger: lggr, + chForceRefetch: make(chan (chan struct{})), + chInitialised: make(chan struct{}), + chStop: make(chan struct{}), + chDone: make(chan struct{}), + } +} + +func (a *arbitrumEstimator) Name() string { + return a.logger.Name() +} + +func (a *arbitrumEstimator) Start(ctx context.Context) error { + return a.StartOnce("ArbitrumEstimator", func() error { + if err := a.EvmEstimator.Start(ctx); err != nil { + return errors.Wrap(err, "failed to start gas price estimator") + } + go a.run() + <-a.chInitialised + return nil + }) +} +func (a *arbitrumEstimator) Close() error { + return a.StopOnce("ArbitrumEstimator", func() (err error) { + close(a.chStop) + err = errors.Wrap(a.EvmEstimator.Close(), "failed to stop gas price estimator") + <-a.chDone + return + }) +} + +func (a *arbitrumEstimator) Ready() error { return a.StateMachine.Ready() } + +func (a *arbitrumEstimator) HealthReport() map[string]error { + hp := map[string]error{a.Name(): a.Healthy()} + services.CopyHealth(hp, a.EvmEstimator.HealthReport()) + return hp +} + +// GetLegacyGas estimates both the gas price and the gas limit. +// - Price is delegated to the embedded SuggestedPriceEstimator. +// - Limit is computed from the dynamic values perL2Tx and perL1CalldataUnit, provided by the getPricesInArbGas() method +// of the precompilie contract at ArbGasInfoAddress. perL2Tx is a constant amount of gas, and perL1CalldataUnit is +// multiplied by the length of the tx calldata. The sum of these two values plus the original l2GasLimit is returned. +func (a *arbitrumEstimator) GetLegacyGas(ctx context.Context, calldata []byte, l2GasLimit uint32, maxGasPriceWei *assets.Wei, opts ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + gasPrice, _, err = a.EvmEstimator.GetLegacyGas(ctx, calldata, l2GasLimit, maxGasPriceWei, opts...) + if err != nil { + return + } + gasPrice = a.gasPriceWithBuffer(gasPrice, maxGasPriceWei) + ok := a.IfStarted(func() { + if slices.Contains(opts, feetypes.OptForceRefetch) { + ch := make(chan struct{}) + select { + case a.chForceRefetch <- ch: + case <-a.chStop: + err = errors.New("estimator stopped") + return + case <-ctx.Done(): + err = ctx.Err() + return + } + select { + case <-ch: + case <-a.chStop: + err = errors.New("estimator stopped") + return + case <-ctx.Done(): + err = ctx.Err() + return + } + } + perL2Tx, perL1CalldataUnit := a.getPricesInArbGas() + chainSpecificGasLimit = l2GasLimit + perL2Tx + uint32(len(calldata))*perL1CalldataUnit + a.logger.Debugw("GetLegacyGas", "l2GasLimit", l2GasLimit, "calldataLen", len(calldata), "perL2Tx", perL2Tx, + "perL1CalldataUnit", perL1CalldataUnit, "chainSpecificGasLimit", chainSpecificGasLimit) + }) + if !ok { + return nil, 0, errors.New("estimator is not started") + } else if err != nil { + return + } + if max := a.cfg.LimitMax(); chainSpecificGasLimit > max { + err = fmt.Errorf("estimated gas limit: %d is greater than the maximum gas limit configured: %d", chainSpecificGasLimit, max) + return + } + return +} + +// During network congestion Arbitrum's suggested gas price can be extremely volatile, making gas estimations less accurate. For any transaction, Arbitrum will only charge +// the block's base fee. If the base fee increases rapidly there is a chance the suggested gas price will fall under that value, resulting in a fee too low error. +// We use gasPriceWithBuffer to increase the estimated gas price by some percentage to avoid fee too low errors. Eventually, only the base fee will be paid, regardless of the price. +func (a *arbitrumEstimator) gasPriceWithBuffer(gasPrice *assets.Wei, maxGasPriceWei *assets.Wei) *assets.Wei { + const gasPriceBufferPercentage = 50 + + gasPrice = gasPrice.AddPercentage(gasPriceBufferPercentage) + if gasPrice.Cmp(maxGasPriceWei) > 0 { + a.logger.Warnw("Updated gasPrice with buffer is higher than the max gas price limit. Falling back to max gas price", "gasPriceWithBuffer", gasPrice, "maxGasPriceWei", maxGasPriceWei) + gasPrice = maxGasPriceWei + } + a.logger.Debugw("gasPriceWithBuffer", "updatedGasPrice", gasPrice) + return gasPrice +} + +func (a *arbitrumEstimator) getPricesInArbGas() (perL2Tx uint32, perL1CalldataUnit uint32) { + a.getPricesInArbGasMu.RLock() + perL2Tx, perL1CalldataUnit = a.perL2Tx, a.perL1CalldataUnit + a.getPricesInArbGasMu.RUnlock() + return +} + +func (a *arbitrumEstimator) run() { + defer close(a.chDone) + + t := a.refreshPricesInArbGas() + close(a.chInitialised) + + for { + select { + case <-a.chStop: + return + case ch := <-a.chForceRefetch: + t.Stop() + t = a.refreshPricesInArbGas() + close(ch) + case <-t.C: + t = a.refreshPricesInArbGas() + } + } +} + +// refreshPricesInArbGas calls getPricesInArbGas() and caches the refreshed prices. +func (a *arbitrumEstimator) refreshPricesInArbGas() (t *time.Timer) { + t = time.NewTimer(utils.WithJitter(a.pollPeriod)) + + perL2Tx, perL1CalldataUnit, err := a.callGetPricesInArbGas() + if err != nil { + a.logger.Warnw("Failed to refresh prices", "err", err) + return + } + + a.logger.Debugw("refreshPricesInArbGas", "perL2Tx", perL2Tx, "perL2CalldataUnit", perL1CalldataUnit) + + a.getPricesInArbGasMu.Lock() + a.perL2Tx = perL2Tx + a.perL1CalldataUnit = perL1CalldataUnit + a.getPricesInArbGasMu.Unlock() + return +} + +const ( + // ArbGasInfoAddress is the address of the "Precompiled contract that exists in every Arbitrum chain." + // https://github.com/OffchainLabs/nitro/blob/f7645453cfc77bf3e3644ea1ac031eff629df325/contracts/src/precompiles/ArbGasInfo.sol + ArbGasInfoAddress = "0x000000000000000000000000000000000000006C" + // ArbGasInfo_getPricesInArbGas is the a hex encoded call to: + // `function getPricesInArbGas() external view returns (uint256, uint256, uint256);` + ArbGasInfo_getPricesInArbGas = "02199f34" +) + +// callGetPricesInArbGas calls ArbGasInfo.getPricesInArbGas() on the precompile contract ArbGasInfoAddress. +// +// @return (per L2 tx, per L1 calldata unit, per storage allocation) +// function getPricesInArbGas() external view returns (uint256, uint256, uint256); +// +// https://github.com/OffchainLabs/nitro/blob/f7645453cfc77bf3e3644ea1ac031eff629df325/contracts/src/precompiles/ArbGasInfo.sol#L69 +func (a *arbitrumEstimator) callGetPricesInArbGas() (perL2Tx uint32, perL1CalldataUnit uint32, err error) { + ctx, cancel := a.chStop.CtxCancel(evmclient.ContextWithDefaultTimeout()) + defer cancel() + + precompile := common.HexToAddress(ArbGasInfoAddress) + b, err := a.client.CallContract(ctx, ethereum.CallMsg{ + To: &precompile, + Data: common.Hex2Bytes(ArbGasInfo_getPricesInArbGas), + }, big.NewInt(-1)) + if err != nil { + return 0, 0, err + } + + if len(b) != 3*32 { // returns (uint256, uint256, uint256); + err = fmt.Errorf("return data length (%d) different than expected (%d)", len(b), 3*32) + return + } + bPerL2Tx := new(big.Int).SetBytes(b[:32]) + bPerL1CalldataUnit := new(big.Int).SetBytes(b[32:64]) + // ignore perStorageAllocation + if !bPerL2Tx.IsUint64() || !bPerL1CalldataUnit.IsUint64() { + err = fmt.Errorf("returned integers are not uint64 (%s, %s)", bPerL2Tx.String(), bPerL1CalldataUnit.String()) + return + } + + perL2TxU64 := bPerL2Tx.Uint64() + perL1CalldataUnitU64 := bPerL1CalldataUnit.Uint64() + if perL2TxU64 > math.MaxUint32 || perL1CalldataUnitU64 > math.MaxUint32 { + err = fmt.Errorf("returned integers are not uint32 (%d, %d)", perL2TxU64, perL1CalldataUnitU64) + return + } + perL2Tx = uint32(perL2TxU64) + perL1CalldataUnit = uint32(perL1CalldataUnitU64) + return +} diff --git a/core/chains/evm/gas/arbitrum_estimator_test.go b/core/chains/evm/gas/arbitrum_estimator_test.go new file mode 100644 index 00000000..7c885325 --- /dev/null +++ b/core/chains/evm/gas/arbitrum_estimator_test.go @@ -0,0 +1,251 @@ +package gas_test + +import ( + "bytes" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type arbConfig struct { + v uint32 + bumpPercent uint16 + bumpMin *assets.Wei +} + +func (a *arbConfig) LimitMax() uint32 { + return a.v +} + +func (a *arbConfig) BumpPercent() uint16 { + return a.bumpPercent +} + +func (a *arbConfig) BumpMin() *assets.Wei { + return a.bumpMin +} + +func TestArbitrumEstimator(t *testing.T) { + t.Parallel() + + maxGasPrice := assets.NewWeiI(100) + const maxGasLimit uint32 = 500_000 + calldata := []byte{0x00, 0x00, 0x01, 0x02, 0x03} + const gasLimit uint32 = 80000 + const gasPriceBufferPercentage = 50 + const bumpPercent = 10 + var bumpMin = assets.NewWei(big.NewInt(1)) + + t.Run("calling GetLegacyGas on unstarted estimator returns error", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, rpcClient, ethClient) + _, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + assert.EqualError(t, err, "estimator is not started") + }) + + var zeros bytes.Buffer + zeros.Write(common.BigToHash(big.NewInt(0)).Bytes()) + zeros.Write(common.BigToHash(big.NewInt(0)).Bytes()) + zeros.Write(common.BigToHash(big.NewInt(123455)).Bytes()) + t.Run("calling GetLegacyGas on started estimator returns estimates", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + rpcClient.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(zeros.Bytes(), nil) + + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{v: maxGasLimit, bumpPercent: bumpPercent, bumpMin: bumpMin}, rpcClient, ethClient) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + require.NoError(t, err) + // Expected price for a standard l2_suggested_estimator would be 42, but we add a fixed gasPriceBufferPercentage. + assert.Equal(t, assets.NewWeiI(42).AddPercentage(gasPriceBufferPercentage), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("gas price is lower than user specified max gas price", func(t *testing.T) { + client := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, client, ethClient) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(zeros.Bytes(), nil) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, assets.NewWeiI(40)) + require.Error(t, err) + assert.EqualError(t, err, "estimated gas price: 42 wei is greater than the maximum gas price configured: 40 wei") + assert.Nil(t, gasPrice) + assert.Equal(t, uint32(0), chainSpecificGasLimit) + }) + + t.Run("gas price is lower than global max gas price", func(t *testing.T) { + ethClient := mocks.NewETHClient(t) + client := mocks.NewRPCClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, client, ethClient) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(120) + }) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(zeros.Bytes(), nil) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, assets.NewWeiI(110)) + assert.EqualError(t, err, "estimated gas price: 120 wei is greater than the maximum gas price configured: 110 wei") + assert.Nil(t, gasPrice) + assert.Equal(t, uint32(0), chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on unstarted arbitrum estimator returns error", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, rpcClient, ethClient) + _, _, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), gasLimit, assets.NewWeiI(10), nil) + assert.EqualError(t, err, "estimator is not started") + }) + + t.Run("calling GetLegacyGas on started estimator if initial call failed returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, client, ethClient) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(errors.New("kaboom")) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(zeros.Bytes(), nil) + + servicetest.RunHealthy(t, o) + + _, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + assert.EqualError(t, err, "failed to estimate gas; gas price not set") + }) + + t.Run("calling GetDynamicFee always returns error", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, rpcClient, ethClient) + _, _, err := o.GetDynamicFee(testutils.Context(t), gasLimit, maxGasPrice) + assert.EqualError(t, err, "dynamic fees are not implemented for this estimator") + }) + + t.Run("calling BumpDynamicFee always returns error", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{}, rpcClient, ethClient) + fee := gas.DynamicFee{ + FeeCap: assets.NewWeiI(42), + TipCap: assets.NewWeiI(5), + } + _, _, err := o.BumpDynamicFee(testutils.Context(t), fee, gasLimit, maxGasPrice, nil) + assert.EqualError(t, err, "dynamic fees are not implemented for this estimator") + }) + + t.Run("limit computes", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + rpcClient.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + const ( + perL2Tx = 50_000 + perL1Calldata = 10_000 + ) + var expLimit = gasLimit + perL2Tx + perL1Calldata*uint32(len(calldata)) + + var b bytes.Buffer + b.Write(common.BigToHash(big.NewInt(perL2Tx)).Bytes()) + b.Write(common.BigToHash(big.NewInt(perL1Calldata)).Bytes()) + b.Write(common.BigToHash(big.NewInt(123455)).Bytes()) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(b.Bytes(), nil) + + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{v: maxGasLimit, bumpPercent: bumpPercent, bumpMin: bumpMin}, rpcClient, ethClient) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + require.NoError(t, err) + require.NotNil(t, gasPrice) + // Again, a normal l2_suggested_estimator would return 42, but arbitrum_estimator adds a buffer. + assert.Equal(t, "63 wei", gasPrice.String()) + assert.Equal(t, expLimit, chainSpecificGasLimit, "expected %d but got %d", expLimit, chainSpecificGasLimit) + }) + + t.Run("limit exceeds max", func(t *testing.T) { + rpcClient := mocks.NewRPCClient(t) + ethClient := mocks.NewETHClient(t) + rpcClient.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + const ( + perL2Tx = 500_000 + perL1Calldata = 100_000 + ) + + var b bytes.Buffer + b.Write(common.BigToHash(big.NewInt(perL2Tx)).Bytes()) + b.Write(common.BigToHash(big.NewInt(perL1Calldata)).Bytes()) + b.Write(common.BigToHash(big.NewInt(123455)).Bytes()) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + assert.Equal(t, gas.ArbGasInfoAddress, callMsg.To.String()) + assert.Equal(t, gas.ArbGasInfo_getPricesInArbGas, fmt.Sprintf("%x", callMsg.Data)) + assert.Equal(t, big.NewInt(-1), blockNumber) + }).Return(b.Bytes(), nil) + + o := gas.NewArbitrumEstimator(logger.Test(t), &arbConfig{v: maxGasLimit, bumpPercent: bumpPercent, bumpMin: bumpMin}, rpcClient, ethClient) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + require.Error(t, err, "expected error but got (%s, %d)", gasPrice, chainSpecificGasLimit) + }) +} diff --git a/core/chains/evm/gas/block_history_estimator.go b/core/chains/evm/gas/block_history_estimator.go new file mode 100644 index 00000000..fcc0dd13 --- /dev/null +++ b/core/chains/evm/gas/block_history_estimator.go @@ -0,0 +1,912 @@ +package gas + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + "github.com/goplugin/pluginv3.0/v2/common/config" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// MaxStartTime is the maximum amount of time we are allowed to spend +// trying to fill initial data on start. This must be capped because it can +// block the application from starting. +var MaxStartTime = 10 * time.Second + +var ( + promBlockHistoryEstimatorAllGasPricePercentiles = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gas_updater_all_gas_price_percentiles", + Help: "Gas price at given percentile", + }, + []string{"percentile", "evmChainID"}, + ) + + promBlockHistoryEstimatorAllTipCapPercentiles = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gas_updater_all_tip_cap_percentiles", + Help: "Tip cap at given percentile", + }, + []string{"percentile", "evmChainID"}, + ) + + promBlockHistoryEstimatorSetGasPrice = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gas_updater_set_gas_price", + Help: "Gas updater set gas price (in Wei)", + }, + []string{"percentile", "evmChainID"}, + ) + + promBlockHistoryEstimatorSetTipCap = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gas_updater_set_tip_cap", + Help: "Gas updater set gas tip cap (in Wei)", + }, + []string{"percentile", "evmChainID"}, + ) + promBlockHistoryEstimatorCurrentBaseFee = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gas_updater_current_base_fee", + Help: "Gas updater current block base fee in Wei", + }, + []string{"evmChainID"}, + ) + promBlockHistoryEstimatorConnectivityFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "block_history_estimator_connectivity_failure_count", + Help: "Counter is incremented every time a gas bump is prevented due to a detected network propagation/connectivity issue", + }, + []string{"evmChainID", "mode"}, + ) +) + +const BumpingHaltedLabel = "Tx gas bumping halted since price exceeds current block prices by significant margin; tx will continue to be rebroadcasted but your node, RPC, or the chain might be experiencing connectivity issues; please investigate and fix ASAP" + +var _ EvmEstimator = &BlockHistoryEstimator{} + +type chainConfig interface { + ChainType() config.ChainType +} + +type estimatorGasEstimatorConfig interface { + EIP1559DynamicFees() bool + BumpThreshold() uint64 + LimitMultiplier() float32 + PriceDefault() *assets.Wei + TipCapDefault() *assets.Wei + TipCapMin() *assets.Wei + PriceMax() *assets.Wei + PriceMin() *assets.Wei + bumpConfig +} + +//go:generate mockery --quiet --name Config --output ./mocks/ --case=underscore +type ( + BlockHistoryEstimator struct { + services.StateMachine + ethClient evmclient.Client + chainID big.Int + config chainConfig + eConfig estimatorGasEstimatorConfig + bhConfig BlockHistoryConfig + // NOTE: it is assumed that blocks will be kept sorted by + // block number ascending + blocks []evmtypes.Block + blocksMu sync.RWMutex + size int64 + mb *mailbox.Mailbox[*evmtypes.Head] + wg *sync.WaitGroup + ctx context.Context + ctxCancel context.CancelFunc + + gasPrice *assets.Wei + tipCap *assets.Wei + priceMu sync.RWMutex + latest *evmtypes.Head + latestMu sync.RWMutex + initialFetch atomic.Bool + + logger logger.SugaredLogger + } +) + +// NewBlockHistoryEstimator returns a new BlockHistoryEstimator that listens +// for new heads and updates the base gas price dynamically based on the +// configured percentile of gas prices in that block +func NewBlockHistoryEstimator(lggr logger.Logger, ethClient evmclient.Client, cfg chainConfig, eCfg estimatorGasEstimatorConfig, bhCfg BlockHistoryConfig, chainID big.Int) EvmEstimator { + ctx, cancel := context.WithCancel(context.Background()) + b := &BlockHistoryEstimator{ + ethClient: ethClient, + chainID: chainID, + config: cfg, + eConfig: eCfg, + bhConfig: bhCfg, + blocks: make([]evmtypes.Block, 0), + // Must have enough blocks for both estimator and connectivity checker + size: int64(mathutil.Max(bhCfg.BlockHistorySize(), bhCfg.CheckInclusionBlocks())), + mb: mailbox.NewSingle[*evmtypes.Head](), + wg: new(sync.WaitGroup), + ctx: ctx, + ctxCancel: cancel, + logger: logger.Sugared(logger.Named(lggr, "BlockHistoryEstimator")), + } + + return b +} + +// OnNewLongestChain recalculates and sets global gas price if a sampled new head comes +// in and we are not currently fetching +func (b *BlockHistoryEstimator) OnNewLongestChain(_ context.Context, head *evmtypes.Head) { + // set latest base fee here to avoid potential lag introduced by block delay + // it is really important that base fee be as up-to-date as possible + b.setLatest(head) + b.mb.Deliver(head) +} + +// setLatest assumes that head won't be mutated +func (b *BlockHistoryEstimator) setLatest(head *evmtypes.Head) { + // Non-eip1559 blocks don't include base fee + if baseFee := head.BaseFeePerGas; baseFee != nil { + promBlockHistoryEstimatorCurrentBaseFee.WithLabelValues(b.chainID.String()).Set(float64(baseFee.Int64())) + } + b.logger.Debugw("Set latest block", "blockNum", head.Number, "blockHash", head.Hash, "baseFee", head.BaseFeePerGas, "baseFeeWei", head.BaseFeePerGas.ToInt()) + b.latestMu.Lock() + defer b.latestMu.Unlock() + b.latest = head +} + +func (b *BlockHistoryEstimator) getCurrentBaseFee() *assets.Wei { + b.latestMu.RLock() + defer b.latestMu.RUnlock() + if b.latest == nil { + return nil + } + return b.latest.BaseFeePerGas +} + +func (b *BlockHistoryEstimator) getCurrentBlockNum() *int64 { + b.latestMu.RLock() + defer b.latestMu.RUnlock() + if b.latest == nil { + return nil + } + return &b.latest.Number +} + +func (b *BlockHistoryEstimator) getBlocks() []evmtypes.Block { + b.blocksMu.RLock() + defer b.blocksMu.RUnlock() + return b.blocks +} + +// Start starts BlockHistoryEstimator service. +// The provided context can be used to terminate Start sequence. +func (b *BlockHistoryEstimator) Start(ctx context.Context) error { + return b.StartOnce("BlockHistoryEstimator", func() error { + b.logger.Trace("Starting") + + if b.bhConfig.CheckInclusionBlocks() > 0 { + b.logger.Infof("Inclusion checking enabled, bumping will be prevented on transactions that have been priced above the %d percentile for %d blocks", b.bhConfig.CheckInclusionPercentile(), b.bhConfig.CheckInclusionBlocks()) + } + if b.bhConfig.BlockHistorySize() == 0 { + return errors.New("BlockHistorySize must be set to a value greater than 0") + } + + fetchCtx, cancel := context.WithTimeout(ctx, MaxStartTime) + defer cancel() + latestHead, err := b.ethClient.HeadByNumber(fetchCtx, nil) + if err != nil { + b.logger.Warnw("Initial check for latest head failed", "err", err) + } else if latestHead == nil { + b.logger.Warnw("initial check for latest head failed, head was unexpectedly nil") + } else { + b.logger.Debugw("Got latest head", "number", latestHead.Number, "blockHash", latestHead.Hash.Hex()) + b.setLatest(latestHead) + b.FetchBlocksAndRecalculate(fetchCtx, latestHead) + } + + // NOTE: This only checks the start context, not the fetch context + if ctx.Err() != nil { + return errors.Wrap(ctx.Err(), "failed to start BlockHistoryEstimator due to main context error") + } + + b.wg.Add(1) + go b.runLoop() + + b.logger.Trace("Started") + return nil + }) +} + +func (b *BlockHistoryEstimator) Close() error { + return b.StopOnce("BlockHistoryEstimator", func() error { + b.ctxCancel() + b.wg.Wait() + return nil + }) +} + +func (b *BlockHistoryEstimator) Name() string { + return b.logger.Name() +} +func (b *BlockHistoryEstimator) HealthReport() map[string]error { + return map[string]error{b.Name(): b.Healthy()} +} + +func (b *BlockHistoryEstimator) GetLegacyGas(_ context.Context, _ []byte, gasLimit uint32, maxGasPriceWei *assets.Wei, _ ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + ok := b.IfStarted(func() { + gasPrice = b.getGasPrice() + }) + if !ok { + return nil, 0, errors.New("BlockHistoryEstimator is not started; cannot estimate gas") + } + if gasPrice == nil { + if !b.initialFetch.Load() { + return nil, 0, errors.New("BlockHistoryEstimator has not finished the first gas estimation yet, likely because a failure on start") + } + b.logger.Warnw("Failed to estimate gas price. This is likely because there aren't any valid transactions to estimate from."+ + "Using Evm.GasEstimator.PriceDefault as fallback.", "blocks", b.getBlockHistoryNumbers()) + gasPrice = b.eConfig.PriceDefault() + } + gasPrice = capGasPrice(gasPrice, maxGasPriceWei, b.eConfig.PriceMax()) + chainSpecificGasLimit, err = commonfee.ApplyMultiplier(gasLimit, b.eConfig.LimitMultiplier()) + return +} + +func (b *BlockHistoryEstimator) getGasPrice() *assets.Wei { + b.priceMu.RLock() + defer b.priceMu.RUnlock() + return b.gasPrice +} + +func (b *BlockHistoryEstimator) getBlockHistoryNumbers() (numsInHistory []int64) { + for _, b := range b.blocks { + numsInHistory = append(numsInHistory, b.Number) + } + return +} + +func (b *BlockHistoryEstimator) getTipCap() *assets.Wei { + b.priceMu.RLock() + defer b.priceMu.RUnlock() + return b.tipCap +} + +func (b *BlockHistoryEstimator) BumpLegacyGas(_ context.Context, originalGasPrice *assets.Wei, gasLimit uint32, maxGasPriceWei *assets.Wei, attempts []EvmPriorAttempt) (bumpedGasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + if b.bhConfig.CheckInclusionBlocks() > 0 { + if err = b.checkConnectivity(attempts); err != nil { + if errors.Is(err, commonfee.ErrConnectivity) { + b.logger.Criticalw(BumpingHaltedLabel, "err", err) + b.SvcErrBuffer.Append(err) + promBlockHistoryEstimatorConnectivityFailureCount.WithLabelValues(b.chainID.String(), "legacy").Inc() + } + return nil, 0, err + } + } + return BumpLegacyGasPriceOnly(b.eConfig, b.logger, b.getGasPrice(), originalGasPrice, gasLimit, maxGasPriceWei) +} + +// checkConnectivity detects if the transaction is not being included due to +// some kind of mempool propagation or connectivity issue rather than +// insufficiently high pricing and returns error if so +func (b *BlockHistoryEstimator) checkConnectivity(attempts []EvmPriorAttempt) error { + percentile := int(b.bhConfig.CheckInclusionPercentile()) + // how many blocks since broadcast? + latestBlockNum := b.getCurrentBlockNum() + if latestBlockNum == nil { + b.logger.Warn("Latest block is unknown; skipping inclusion check") + // can't determine anything if we don't have/know latest block num yet + return nil + } + expectInclusionWithinBlocks := int(b.bhConfig.CheckInclusionBlocks()) + blockHistory := b.getBlocks() + if len(blockHistory) < expectInclusionWithinBlocks { + b.logger.Warnf("Block history in memory with length %d is insufficient to determine whether transaction should have been included within the past %d blocks", len(blockHistory), b.bhConfig.CheckInclusionBlocks()) + return nil + } + for _, attempt := range attempts { + if attempt.BroadcastBeforeBlockNum == nil { + // this shouldn't happen; any broadcast attempt ought to have a + // BroadcastBeforeBlockNum otherwise its an assumption violation + return errors.Errorf("BroadcastBeforeBlockNum was unexpectedly nil for attempt %s", attempt.TxHash) + } + broadcastBeforeBlockNum := *attempt.BroadcastBeforeBlockNum + blocksSinceBroadcast := *latestBlockNum - broadcastBeforeBlockNum + if blocksSinceBroadcast < int64(expectInclusionWithinBlocks) { + // only check attempts that have been waiting around longer than + // CheckInclusionBlocks + continue + } + // has not been included for at least the required number of blocks + b.logger.Debugw(fmt.Sprintf("transaction %s has been pending inclusion for %d blocks which equals or exceeds expected specified check inclusion blocks of %d", attempt.TxHash, blocksSinceBroadcast, expectInclusionWithinBlocks), "broadcastBeforeBlockNum", broadcastBeforeBlockNum, "latestBlockNum", *latestBlockNum) + // is the price in the right percentile for all of these blocks? + var blocks []evmtypes.Block + l := expectInclusionWithinBlocks + // reverse order since we want to go highest -> lowest block number and bail out early + for i := l - 1; i >= 0; i-- { + block := blockHistory[i] + if block.Number < broadcastBeforeBlockNum { + break + } + blocks = append(blocks, block) + } + var eip1559 bool + switch attempt.TxType { + case 0x0, 0x1: + eip1559 = false + case 0x2: + eip1559 = true + default: + return errors.Errorf("attempt %s has unknown transaction type 0x%d", attempt.TxHash, attempt.TxType) + } + gasPrice, tipCap, err := b.calculatePercentilePrices(blocks, percentile, eip1559, nil, nil) + if err != nil { + if errors.Is(err, ErrNoSuitableTransactions) { + b.logger.Warnf("no suitable transactions found to verify if transaction %s has been included within expected inclusion blocks of %d", attempt.TxHash, expectInclusionWithinBlocks) + return nil + } + b.logger.AssumptionViolationw("unexpected error while verifying transaction inclusion", "err", err, "txHash", attempt.TxHash.String()) + return nil + } + if !eip1559 { + if attempt.GasPrice.Cmp(gasPrice) > 0 { + return errors.Wrapf(commonfee.ErrConnectivity, "transaction %s has gas price of %s, which is above percentile=%d%% (percentile price: %s) for blocks %d thru %d (checking %d blocks)", attempt.TxHash, attempt.GasPrice, percentile, gasPrice, blockHistory[l-1].Number, blockHistory[0].Number, expectInclusionWithinBlocks) + } + continue + } + sufficientFeeCap := true + for _, b := range blocks { + // feecap must >= tipcap+basefee for the block, otherwise there + // is no way this could have been included, and we must bail + // out of the check + attemptFeeCap := attempt.DynamicFee.FeeCap + attemptTipCap := attempt.DynamicFee.TipCap + if attemptFeeCap.Cmp(attemptTipCap.Add(b.BaseFeePerGas)) < 0 { + sufficientFeeCap = false + break + } + } + if sufficientFeeCap && attempt.DynamicFee.TipCap.Cmp(tipCap) > 0 { + return errors.Wrapf(commonfee.ErrConnectivity, "transaction %s has tip cap of %s, which is above percentile=%d%% (percentile tip cap: %s) for blocks %d thru %d (checking %d blocks)", attempt.TxHash, attempt.DynamicFee.TipCap, percentile, tipCap, blockHistory[l-1].Number, blockHistory[0].Number, expectInclusionWithinBlocks) + } + } + return nil +} + +func (b *BlockHistoryEstimator) GetDynamicFee(_ context.Context, gasLimit uint32, maxGasPriceWei *assets.Wei) (fee DynamicFee, chainSpecificGasLimit uint32, err error) { + if !b.eConfig.EIP1559DynamicFees() { + return fee, 0, errors.New("Can't get dynamic fee, EIP1559 is disabled") + } + + var feeCap *assets.Wei + var tipCap *assets.Wei + ok := b.IfStarted(func() { + chainSpecificGasLimit, err = commonfee.ApplyMultiplier(gasLimit, b.eConfig.LimitMultiplier()) + if err != nil { + return + } + b.priceMu.RLock() + defer b.priceMu.RUnlock() + tipCap = b.tipCap + if tipCap == nil { + if !b.initialFetch.Load() { + err = errors.New("BlockHistoryEstimator has not finished the first gas estimation yet, likely because a failure on start") + return + } + b.logger.Warnw("Failed to estimate gas price. This is likely because there aren't any valid transactions to estimate from."+ + "Using Evm.GasEstimator.TipCapDefault as fallback.", "blocks", b.getBlockHistoryNumbers()) + tipCap = b.eConfig.TipCapDefault() + } + maxGasPrice := getMaxGasPrice(maxGasPriceWei, b.eConfig.PriceMax()) + if b.eConfig.BumpThreshold() == 0 { + // just use the max gas price if gas bumping is disabled + feeCap = maxGasPrice + } else if b.getCurrentBaseFee() != nil { + // HACK: due to a flaw of how EIP-1559 is implemented we have to + // set a much lower FeeCap than the actual maximum we are willing + // to pay in order to give ourselves headroom for bumping + // See: https://github.com/ethereum/go-ethereum/issues/24284 + feeCap = calcFeeCap(b.getCurrentBaseFee(), int(b.bhConfig.EIP1559FeeCapBufferBlocks()), tipCap, maxGasPrice) + } else { + // This shouldn't happen on EIP-1559 blocks, since if the tip cap + // is set, Start must have succeeded and we would expect an initial + // base fee to be set as well + err = errors.New("BlockHistoryEstimator: no value for latest block base fee; cannot estimate EIP-1559 base fee. Are you trying to run with EIP1559 enabled on a non-EIP1559 chain?") + return + } + }) + if !ok { + return fee, 0, errors.New("BlockHistoryEstimator is not started; cannot estimate gas") + } + if err != nil { + return fee, 0, err + } + fee.FeeCap = feeCap + fee.TipCap = tipCap + return +} + +func calcFeeCap(latestAvailableBaseFeePerGas *assets.Wei, bufferBlocks int, tipCap *assets.Wei, maxGasPriceWei *assets.Wei) (feeCap *assets.Wei) { + const maxBaseFeeIncreasePerBlock float64 = 1.125 + + baseFee := new(big.Float) + baseFee.SetInt(latestAvailableBaseFeePerGas.ToInt()) + // Find out the worst case base fee before we should bump + multiplier := big.NewFloat(maxBaseFeeIncreasePerBlock) + for i := 0; i < bufferBlocks; i++ { + baseFee.Mul(baseFee, multiplier) + } + + baseFeeInt, _ := baseFee.Int(nil) + feeCap = assets.NewWei(baseFeeInt.Add(baseFeeInt, tipCap.ToInt())) + + if feeCap.Cmp(maxGasPriceWei) > 0 { + return maxGasPriceWei + } + return feeCap +} + +func (b *BlockHistoryEstimator) BumpDynamicFee(_ context.Context, originalFee DynamicFee, originalGasLimit uint32, maxGasPriceWei *assets.Wei, attempts []EvmPriorAttempt) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) { + if b.bhConfig.CheckInclusionBlocks() > 0 { + if err = b.checkConnectivity(attempts); err != nil { + if errors.Is(err, commonfee.ErrConnectivity) { + b.logger.Criticalw(BumpingHaltedLabel, "err", err) + b.SvcErrBuffer.Append(err) + promBlockHistoryEstimatorConnectivityFailureCount.WithLabelValues(b.chainID.String(), "eip1559").Inc() + } + return bumped, 0, err + } + } + return BumpDynamicFeeOnly(b.eConfig, b.bhConfig.EIP1559FeeCapBufferBlocks(), b.logger, b.getTipCap(), b.getCurrentBaseFee(), originalFee, originalGasLimit, maxGasPriceWei) +} + +func (b *BlockHistoryEstimator) runLoop() { + defer b.wg.Done() + for { + select { + case <-b.ctx.Done(): + return + case <-b.mb.Notify(): + head, exists := b.mb.Retrieve() + if !exists { + b.logger.Debug("No head to retrieve") + continue + } + b.FetchBlocksAndRecalculate(b.ctx, head) + } + } +} + +// FetchBlocksAndRecalculate fetches block history leading up to head and recalculates gas price. +func (b *BlockHistoryEstimator) FetchBlocksAndRecalculate(ctx context.Context, head *evmtypes.Head) { + if err := b.FetchBlocks(ctx, head); err != nil { + b.logger.Warnw("Error fetching blocks", "head", head, "err", err) + return + } + b.initialFetch.Store(true) + b.Recalculate(head) +} + +// Recalculate adds the given heads to the history and recalculates gas price. +func (b *BlockHistoryEstimator) Recalculate(head *evmtypes.Head) { + percentile := int(b.bhConfig.TransactionPercentile()) + + lggr := b.logger.With("head", head) + + blockHistory := b.getBlocks() + if len(blockHistory) == 0 { + lggr.Debug("No blocks in history, cannot set gas price") + return + } + + l := mathutil.Min(len(blockHistory), int(b.bhConfig.BlockHistorySize())) + blocks := blockHistory[:l] + + eip1559 := b.eConfig.EIP1559DynamicFees() + percentileGasPrice, percentileTipCap, err := b.calculatePercentilePrices(blocks, percentile, eip1559, + func(gasPrices []*assets.Wei) { + for i := 0; i <= 100; i += 5 { + jdx := ((len(gasPrices) - 1) * i) / 100 + promBlockHistoryEstimatorAllGasPricePercentiles.WithLabelValues(fmt.Sprintf("%v%%", i), b.chainID.String()).Set(float64(gasPrices[jdx].Int64())) + } + }, func(tipCaps []*assets.Wei) { + for i := 0; i <= 100; i += 5 { + jdx := ((len(tipCaps) - 1) * i) / 100 + promBlockHistoryEstimatorAllTipCapPercentiles.WithLabelValues(fmt.Sprintf("%v%%", i), b.chainID.String()).Set(float64(tipCaps[jdx].Int64())) + } + }) + if err != nil { + if errors.Is(err, ErrNoSuitableTransactions) { + lggr.Debug("No suitable transactions, skipping") + } else { + lggr.Warnw("Cannot calculate percentile prices", "err", err) + } + return + } + + var numsInHistory []int64 + for _, b := range blockHistory { + numsInHistory = append(numsInHistory, b.Number) + } + + float := new(big.Float).SetInt(percentileGasPrice.ToInt()) + gwei, _ := big.NewFloat(0).Quo(float, big.NewFloat(1000000000)).Float64() + gasPriceGwei := fmt.Sprintf("%.2f", gwei) + + lggrFields := []interface{}{ + "gasPriceWei", percentileGasPrice, + "gasPriceGWei", gasPriceGwei, + "maxGasPriceWei", b.eConfig.PriceMax(), + "headNum", head.Number, + "blocks", numsInHistory, + } + b.setPercentileGasPrice(percentileGasPrice) + promBlockHistoryEstimatorSetGasPrice.WithLabelValues(fmt.Sprintf("%v%%", percentile), b.chainID.String()).Set(float64(percentileGasPrice.Int64())) + + if !eip1559 { + lggr.Debugw(fmt.Sprintf("Setting new default gas price: %v Gwei", gasPriceGwei), lggrFields...) + return + } + float = new(big.Float).SetInt(percentileTipCap.ToInt()) + gwei, _ = big.NewFloat(0).Quo(float, big.NewFloat(1000000000)).Float64() + tipCapGwei := fmt.Sprintf("%.2f", gwei) + lggrFields = append(lggrFields, []interface{}{ + "tipCapWei", percentileTipCap, + "tipCapGwei", tipCapGwei, + }...) + lggr.Debugw(fmt.Sprintf("Setting new default prices, GasPrice: %v Gwei, TipCap: %v Gwei", gasPriceGwei, tipCapGwei), lggrFields...) + b.setPercentileTipCap(percentileTipCap) + promBlockHistoryEstimatorSetTipCap.WithLabelValues(fmt.Sprintf("%v%%", percentile), b.chainID.String()).Set(float64(percentileTipCap.Int64())) +} + +// FetchBlocks fetches block history leading up to the given head. +func (b *BlockHistoryEstimator) FetchBlocks(ctx context.Context, head *evmtypes.Head) error { + // HACK: blockDelay is the number of blocks that the block history estimator trails behind head. + // E.g. if this is set to 3, and we receive block 10, block history estimator will + // fetch block 7. + // This is necessary because geth/parity send heads as soon as they get + // them and often the actual block is not available until later. Fetching + // it too early results in an empty block. + blockDelay := int64(b.bhConfig.BlockDelay()) + historySize := b.size + + if historySize <= 0 { + return errors.Errorf("BlockHistoryEstimator: history size must be > 0, got: %d", historySize) + } + + highestBlockToFetch := head.Number - blockDelay + if highestBlockToFetch < 0 { + return errors.Errorf("BlockHistoryEstimator: cannot fetch, current block height %v is lower than EVM.RPCBlockQueryDelay=%v", head.Number, blockDelay) + } + lowestBlockToFetch := head.Number - historySize - blockDelay + 1 + if lowestBlockToFetch < 0 { + lowestBlockToFetch = 0 + } + + blocks := make(map[int64]evmtypes.Block) + for _, block := range b.getBlocks() { + // Make a best-effort to be re-org resistant using the head + // chain, refetch blocks that got re-org'd out. + // NOTE: Any blocks in the history that are older than the oldest block + // in the provided chain will be assumed final. + if block.Number < head.EarliestInChain().BlockNumber() { + blocks[block.Number] = block + } else if head.IsInChain(block.Hash) { + blocks[block.Number] = block + } + } + + var reqs []rpc.BatchElem + // Fetch blocks in reverse order so if it times out halfway through we bias + // towards more recent blocks + for i := highestBlockToFetch; i >= lowestBlockToFetch; i-- { + // NOTE: To save rpc calls, don't fetch blocks we already have in the history + if _, exists := blocks[i]; exists { + continue + } + + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{Int64ToHex(i), true}, + Result: &evmtypes.Block{}, + } + reqs = append(reqs, req) + } + + lggr := b.logger.With("head", head) + + lggr.Tracew(fmt.Sprintf("Fetching %v blocks (%v in local history)", len(reqs), len(blocks)), "n", len(reqs), "inHistory", len(blocks), "blockNum", head.Number) + if err := b.batchFetch(ctx, reqs); err != nil { + return err + } + + var missingBlocks []int64 + for _, req := range reqs { + result, err := req.Result, req.Error + if err != nil { + if errors.Is(err, evmtypes.ErrMissingBlock) { + num := HexToInt64(req.Args[0]) + missingBlocks = append(missingBlocks, num) + lggr.Debugw( + fmt.Sprintf("Failed to fetch block: RPC node returned a missing block on query for block number %d even though the WS subscription already sent us this block. It might help to increase EVM.RPCBlockQueryDelay (currently %d)", + num, blockDelay, + ), + "err", err, "blockNum", num, "headNum", head.Number) + } else { + lggr.Warnw("Failed to fetch block", "err", err, "blockNum", HexToInt64(req.Args[0]), "headNum", head.Number) + } + continue + } + + block, is := result.(*evmtypes.Block) + if !is { + return errors.Errorf("expected result to be a %T, got %T", &evmtypes.Block{}, result) + } + if block == nil { + return errors.New("invariant violation: got nil block") + } + if block.Hash == (common.Hash{}) { + lggr.Warnw("Block was missing hash", "block", b, "headNum", head.Number, "blockNum", block.Number) + continue + } + + blocks[block.Number] = *block + } + + if len(missingBlocks) > 1 { + lggr.Errorw( + fmt.Sprintf("RPC node returned multiple missing blocks on query for block numbers %v even though the WS subscription already sent us these blocks. It might help to increase EVM.RPCBlockQueryDelay (currently %d)", + missingBlocks, blockDelay, + ), + "blockNums", missingBlocks, "headNum", head.Number) + } + + newBlockHistory := make([]evmtypes.Block, 0) + + for _, block := range blocks { + newBlockHistory = append(newBlockHistory, block) + } + sort.Slice(newBlockHistory, func(i, j int) bool { + return newBlockHistory[i].Number < newBlockHistory[j].Number + }) + + start := len(newBlockHistory) - int(historySize) + if start < 0 { + lggr.Debugw(fmt.Sprintf("Using fewer blocks than the specified history size: %v/%v", len(newBlockHistory), historySize), "blocksSize", historySize, "headNum", head.Number, "blocksAvailable", len(newBlockHistory)) + start = 0 + } + + b.blocksMu.Lock() + b.blocks = newBlockHistory[start:] + b.blocksMu.Unlock() + + return nil +} + +func (b *BlockHistoryEstimator) batchFetch(ctx context.Context, reqs []rpc.BatchElem) error { + batchSize := int(b.bhConfig.BatchSize()) + + if batchSize == 0 { + batchSize = len(reqs) + } + + for i := 0; i < len(reqs); i += batchSize { + j := i + batchSize + if j > len(reqs) { + j = len(reqs) + } + + b.logger.Tracew(fmt.Sprintf("Batch fetching blocks %v thru %v", HexToInt64(reqs[i].Args[0]), HexToInt64(reqs[j-1].Args[0]))) + + err := b.ethClient.BatchCallContext(ctx, reqs[i:j]) + if errors.Is(err, context.DeadlineExceeded) { + // We ran out of time, return what we have + b.logger.Warnf("Batch fetching timed out; loaded %d/%d results", i, len(reqs)) + for k := i; k < len(reqs); k++ { + if k < j { + reqs[k].Error = errors.Wrap(err, "request failed") + } else { + reqs[k].Error = errors.Wrap(err, "request skipped; previous request exceeded deadline") + } + } + return nil + } else if err != nil { + return errors.Wrap(err, "BlockHistoryEstimator#fetchBlocks error fetching blocks with BatchCallContext") + } + } + return nil +} + +var ( + ErrNoSuitableTransactions = errors.New("no suitable transactions") +) + +func (b *BlockHistoryEstimator) calculatePercentilePrices(blocks []evmtypes.Block, percentile int, eip1559 bool, f func(gasPrices []*assets.Wei), f2 func(tipCaps []*assets.Wei)) (gasPrice, tipCap *assets.Wei, err error) { + gasPrices, tipCaps := b.getPricesFromBlocks(blocks, eip1559) + if len(gasPrices) == 0 { + return nil, nil, ErrNoSuitableTransactions + } + sort.Slice(gasPrices, func(i, j int) bool { return gasPrices[i].Cmp(gasPrices[j]) < 0 }) + if f != nil { + f(gasPrices) + } + gasPrice = gasPrices[((len(gasPrices)-1)*percentile)/100] + + if !eip1559 { + return + } + if len(tipCaps) == 0 { + return nil, nil, ErrNoSuitableTransactions + } + sort.Slice(tipCaps, func(i, j int) bool { return tipCaps[i].Cmp(tipCaps[j]) < 0 }) + if f2 != nil { + f2(tipCaps) + } + tipCap = tipCaps[((len(tipCaps)-1)*percentile)/100] + + return +} + +func (b *BlockHistoryEstimator) getPricesFromBlocks(blocks []evmtypes.Block, eip1559 bool) (gasPrices, tipCaps []*assets.Wei) { + gasPrices = make([]*assets.Wei, 0) + tipCaps = make([]*assets.Wei, 0) + for _, block := range blocks { + if err := verifyBlock(block, eip1559); err != nil { + b.logger.Warnw(fmt.Sprintf("Block %v is not usable, %s", block.Number, err.Error()), "block", block, "err", err) + } + for _, tx := range block.Transactions { + if b.IsUsable(tx, block, b.config.ChainType(), b.eConfig.PriceMin(), b.logger) { + gp := b.EffectiveGasPrice(block, tx) + if gp == nil { + b.logger.Warnw("Unable to get gas price for tx", "tx", tx, "block", block) + continue + } + gasPrices = append(gasPrices, gp) + if !eip1559 { + continue + } + tc := b.EffectiveTipCap(block, tx) + if tc == nil { + b.logger.Warnw("Unable to get tip cap for tx", "tx", tx, "block", block) + continue + } + tipCaps = append(tipCaps, tc) + } + } + } + return +} + +func verifyBlock(block evmtypes.Block, eip1559 bool) error { + if eip1559 && block.BaseFeePerGas == nil { + return errors.New("EIP-1559 mode was enabled, but block was missing baseFeePerGas") + } + return nil +} + +func (b *BlockHistoryEstimator) setPercentileTipCap(tipCap *assets.Wei) { + max := b.eConfig.PriceMax() + min := b.eConfig.TipCapMin() + + b.priceMu.Lock() + defer b.priceMu.Unlock() + if tipCap.Cmp(max) > 0 { + b.logger.Warnw(fmt.Sprintf("Calculated gas tip cap of %s exceeds EVM.GasEstimator.PriceMax=%[2]s, setting gas tip cap to the maximum allowed value of %[2]s instead", tipCap.String(), max.String()), "tipCapWei", tipCap, "minTipCapWei", min, "maxTipCapWei", max) + b.tipCap = max + } else if tipCap.Cmp(min) < 0 { + b.logger.Warnw(fmt.Sprintf("Calculated gas tip cap of %s falls below EVM.GasEstimator.TipCapMin=%[2]s, setting gas tip cap to the minimum allowed value of %[2]s instead", tipCap.String(), min.String()), "tipCapWei", tipCap, "minTipCapWei", min, "maxTipCapWei", max) + b.tipCap = min + } else { + b.tipCap = tipCap + } +} + +func (b *BlockHistoryEstimator) setPercentileGasPrice(gasPrice *assets.Wei) { + max := b.eConfig.PriceMax() + min := b.eConfig.PriceMin() + + b.priceMu.Lock() + defer b.priceMu.Unlock() + if gasPrice.Cmp(max) > 0 { + b.logger.Warnw(fmt.Sprintf("Calculated gas price of %s exceeds EVM.GasEstimator.PriceMax=%[2]s, setting gas price to the maximum allowed value of %[2]s instead", gasPrice.String(), max.String()), "gasPriceWei", gasPrice, "maxGasPriceWei", max) + b.gasPrice = max + } else if gasPrice.Cmp(min) < 0 { + b.logger.Warnw(fmt.Sprintf("Calculated gas price of %s falls below EVM.Transactions.PriceMin=%[2]s, setting gas price to the minimum allowed value of %[2]s instead", gasPrice.String(), min.String()), "gasPriceWei", gasPrice, "minGasPriceWei", min) + b.gasPrice = min + } else { + b.gasPrice = gasPrice + } +} + +// isUsable returns true if the tx is usable both generally and specifically for +// this Config. +func (b *BlockHistoryEstimator) IsUsable(tx evmtypes.Transaction, block evmtypes.Block, chainType config.ChainType, minGasPrice *assets.Wei, lggr logger.Logger) bool { + // GasLimit 0 is impossible on Ethereum official, but IS possible + // on forks/clones such as RSK. We should ignore these transactions + // if they come up on any chain since they are not normal. + if tx.GasLimit == 0 { + return false + } + // NOTE: This really shouldn't be possible, but at least one node op has + // reported it happening on mainnet so we need to handle this case + if tx.GasPrice == nil && tx.Type == 0x0 { + lggr.Debugw("Ignoring transaction that was unexpectedly missing gas price", "tx", tx) + return false + } + return chainSpecificIsUsable(tx, block.BaseFeePerGas, chainType, minGasPrice) +} + +func (b *BlockHistoryEstimator) EffectiveGasPrice(block evmtypes.Block, tx evmtypes.Transaction) *assets.Wei { + switch tx.Type { + case 0x0, 0x1: + return tx.GasPrice + case 0x2: + if block.BaseFeePerGas == nil || tx.MaxPriorityFeePerGas == nil || tx.MaxFeePerGas == nil { + b.logger.Warnw("Got transaction type 0x2 but one of the required EIP1559 fields was missing, falling back to gasPrice", "block", block, "tx", tx) + return tx.GasPrice + } + if tx.GasPrice != nil { + // Always use the gas price if provided + return tx.GasPrice + } + if tx.MaxFeePerGas.Cmp(block.BaseFeePerGas) < 0 { + b.logger.AssumptionViolationw("MaxFeePerGas >= BaseFeePerGas", "block", block, "tx", tx) + return nil + } + if tx.MaxFeePerGas.Cmp(tx.MaxPriorityFeePerGas) < 0 { + b.logger.AssumptionViolationw("MaxFeePerGas >= MaxPriorityFeePerGas", "block", block, "tx", tx) + return nil + } + + // From: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md + priorityFeePerGas := tx.MaxPriorityFeePerGas + maxFeeMinusBaseFee := tx.MaxFeePerGas.Sub(block.BaseFeePerGas) + if maxFeeMinusBaseFee.Cmp(priorityFeePerGas) < 0 { + priorityFeePerGas = maxFeeMinusBaseFee + } + + effectiveGasPrice := priorityFeePerGas.Add(block.BaseFeePerGas) + return effectiveGasPrice + default: + b.logger.Warnw(fmt.Sprintf("Ignoring unknown transaction type %v", tx.Type), "block", block, "tx", tx) + return nil + } +} + +func (b *BlockHistoryEstimator) EffectiveTipCap(block evmtypes.Block, tx evmtypes.Transaction) *assets.Wei { + switch tx.Type { + case 0x2: + return tx.MaxPriorityFeePerGas + case 0x0, 0x1: + if tx.GasPrice == nil { + return nil + } + if block.BaseFeePerGas == nil { + return nil + } + effectiveTipCap := tx.GasPrice.Sub(block.BaseFeePerGas) + if effectiveTipCap.IsNegative() { + b.logger.AssumptionViolationw("GasPrice - BaseFeePerGas may not be negative", "block", block, "tx", tx) + return nil + } + return effectiveTipCap + default: + b.logger.Warnw(fmt.Sprintf("Ignoring unknown transaction type %v", tx.Type), "block", block, "tx", tx) + return nil + } +} diff --git a/core/chains/evm/gas/block_history_estimator_test.go b/core/chains/evm/gas/block_history_estimator_test.go new file mode 100644 index 00000000..1a4034a8 --- /dev/null +++ b/core/chains/evm/gas/block_history_estimator_test.go @@ -0,0 +1,2560 @@ +package gas_test + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/config" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" +) + +func NewEvmHash() common.Hash { + return utils.NewHash() +} + +func newBlockHistoryConfig() *gas.MockBlockHistoryConfig { + c := &gas.MockBlockHistoryConfig{} + c.BlockHistorySizeF = 8 + return c +} + +func newBlockHistoryEstimatorWithChainID(t *testing.T, c evmclient.Client, cfg gas.Config, gCfg gas.GasEstimatorConfig, bhCfg gas.BlockHistoryConfig, cid big.Int) gas.EvmEstimator { + return gas.NewBlockHistoryEstimator(logger.Test(t), c, cfg, gCfg, bhCfg, cid) +} + +func newBlockHistoryEstimator(t *testing.T, c evmclient.Client, cfg gas.Config, gCfg gas.GasEstimatorConfig, bhCfg gas.BlockHistoryConfig) *gas.BlockHistoryEstimator { + iface := newBlockHistoryEstimatorWithChainID(t, c, cfg, gCfg, bhCfg, cltest.FixtureChainID) + return gas.BlockHistoryEstimatorFromInterface(iface) +} + +func TestBlockHistoryEstimator_Start(t *testing.T) { + t.Parallel() + + cfg := gas.NewMockConfig() + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhCfg := newBlockHistoryConfig() + + var batchSize uint32 + var blockDelay uint16 + var historySize uint16 = 2 + var percentile uint16 = 35 + minGasPrice := assets.NewWeiI(1) + maxGasPrice := assets.NewWeiI(100) + + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMinF = minGasPrice + geCfg.PriceMaxF = maxGasPrice + + bhCfg.BatchSizeF = batchSize + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.TransactionPercentileF = percentile + + t.Run("loads initial state", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(420)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == "0x2a" && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == "0x29" && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + } + elems[1].Result = &evmtypes.Block{ + Number: 41, + Hash: utils.NewHash(), + } + }).Once() + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + assert.Len(t, gas.GetRollingBlockHistory(bhe), 2) + assert.Equal(t, int(gas.GetRollingBlockHistory(bhe)[0].Number), 41) + assert.Equal(t, int(gas.GetRollingBlockHistory(bhe)[1].Number), 42) + + assert.Equal(t, assets.NewWeiI(420), gas.GetLatestBaseFee(bhe)) + }) + + t.Run("starts and loads partial history if fetch context times out", func(t *testing.T) { + geCfg2 := &gas.MockGasEstimatorConfig{} + geCfg2.EIP1559DynamicFeesF = true + geCfg2.LimitMultiplierF = float32(1) + geCfg2.PriceMinF = minGasPrice + + bhCfg2 := newBlockHistoryConfig() + bhCfg2.BatchSizeF = uint32(1) + bhCfg2.BlockDelayF = blockDelay + bhCfg2.BlockHistorySizeF = historySize + bhCfg2.TransactionPercentileF = percentile + + cfg2 := gas.NewMockConfig() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg2, geCfg2, bhCfg2) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(420)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + // First succeeds (42) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(42) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + } + }).Once() + // Second fails (41) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(41) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Return(errors.Wrap(context.DeadlineExceeded, "some error message")).Once() + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 1) + assert.Equal(t, int(gas.GetRollingBlockHistory(bhe)[0].Number), 42) + + assert.Equal(t, assets.NewWeiI(420), gas.GetLatestBaseFee(bhe)) + }) + + t.Run("boots even if initial batch call returns nothing", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == int(historySize) + })).Return(nil) + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + // non-eip1559 block + assert.Nil(t, gas.GetLatestBaseFee(bhe)) + }) + + t.Run("starts anyway if fetching latest head fails", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, errors.New("something exploded")) + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + assert.Nil(t, gas.GetLatestBaseFee(bhe)) + + _, _, err = bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + + _, _, err = bhe.GetDynamicFee(testutils.Context(t), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + }) + + t.Run("starts anyway if fetching first fetch fails, but errors on estimation", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(420)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.Anything).Return(errors.New("something went wrong")) + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWeiI(420), gas.GetLatestBaseFee(bhe)) + + _, _, err = bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + + _, _, err = bhe.GetDynamicFee(testutils.Context(t), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + }) + + t.Run("returns error if main context is cancelled", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(420)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.Anything).Return(errors.New("this error doesn't matter")) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + err := bhe.Start(ctx) + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + }) + + t.Run("starts anyway even if the fetch context is cancelled due to taking longer than the MaxStartTime", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(420)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.Anything).Return(errors.New("this error doesn't matter")).Run(func(_ mock.Arguments) { + time.Sleep(gas.MaxStartTime + 1*time.Second) + }) + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWeiI(420), gas.GetLatestBaseFee(bhe)) + + _, _, err = bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + + _, _, err = bhe.GetDynamicFee(testutils.Context(t), 100, maxGasPrice) + require.Error(t, err) + require.Contains(t, err.Error(), "has not finished the first gas estimation yet, likely because a failure on start") + }) +} + +func TestBlockHistoryEstimator_OnNewLongestChain(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + assert.Nil(t, gas.GetLatestBaseFee(bhe)) + + // non EIP-1559 block + h := cltest.Head(1) + bhe.OnNewLongestChain(testutils.Context(t), h) + assert.Nil(t, gas.GetLatestBaseFee(bhe)) + + // EIP-1559 block + h = cltest.Head(2) + h.BaseFeePerGas = assets.NewWeiI(500) + bhe.OnNewLongestChain(testutils.Context(t), h) + + assert.Equal(t, assets.NewWeiI(500), gas.GetLatestBaseFee(bhe)) +} + +func TestBlockHistoryEstimator_FetchBlocks(t *testing.T) { + t.Parallel() + + t.Run("with history size of 0, errors", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 = 3 + var historySize uint16 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + head := cltest.Head(42) + err := bhe.FetchBlocks(testutils.Context(t), head) + require.Error(t, err) + require.EqualError(t, err, "BlockHistoryEstimator: history size must be > 0, got: 0") + }) + + t.Run("with current block height less than block delay does nothing", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 = 3 + var historySize uint16 = 1 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + for i := -1; i < 3; i++ { + head := cltest.Head(i) + err := bhe.FetchBlocks(testutils.Context(t), head) + require.Error(t, err) + require.EqualError(t, err, fmt.Sprintf("BlockHistoryEstimator: cannot fetch, current block height %v is lower than EVM.RPCBlockQueryDelay=3", i)) + } + }) + + t.Run("with error retrieving blocks returns error", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 = 3 + var historySize uint16 = 3 + var batchSize uint32 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.BatchSizeF = batchSize + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + ethClient.On("BatchCallContext", mock.Anything, mock.Anything).Return(errors.New("something exploded")) + + err := bhe.FetchBlocks(testutils.Context(t), cltest.Head(42)) + require.Error(t, err) + assert.EqualError(t, err, "BlockHistoryEstimator#fetchBlocks error fetching blocks with BatchCallContext: something exploded") + }) + + t.Run("batch fetches heads and transactions and sets them on the block history estimator instance", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 + var historySize uint16 = 3 + var batchSize uint32 = 2 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + // Test batching + bhCfg.BatchSizeF = batchSize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b41 := evmtypes.Block{ + Number: 41, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + b42 := evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(3), + } + b43 := evmtypes.Block{ + Number: 43, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(43) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(42) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b43 + // This errored block (42) will be ignored + elems[1].Error = errors.New("something went wrong") + }) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(41) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b41 + }) + + err := bhe.FetchBlocks(testutils.Context(t), cltest.Head(43)) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 2) + assert.Equal(t, 41, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + // 42 is missing because the fetch errored + assert.Equal(t, 43, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[0].Transactions, 2) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[1].Transactions, 0) + + // On new fetch, rolls over the history and drops the old heads + + b44 := evmtypes.Block{ + Number: 44, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(4), + } + + // We are gonna refetch blocks 42 and 44 + // 43 is skipped because it was already in the history + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(44) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(42) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b44 + elems[1].Result = &b42 + }) + + head := evmtypes.NewHead(big.NewInt(44), b44.Hash, b43.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + err = bhe.FetchBlocks(testutils.Context(t), &head) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 3) + assert.Equal(t, 42, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + assert.Equal(t, 43, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Equal(t, 44, int(gas.GetRollingBlockHistory(bhe)[2].Number)) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[0].Transactions, 1) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[1].Transactions, 0) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[2].Transactions, 1) + }) + + t.Run("does not refetch blocks below EVM.FinalityDepth", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 + var historySize uint16 = 3 + var batchSize uint32 = 2 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.BatchSizeF = batchSize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b0 := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9001), + } + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9002), + } + blocks := []evmtypes.Block{b0, b1} + + gas.SetRollingBlockHistory(bhe, blocks) + + b2 := evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + b3 := evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(3) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(2) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b3 + elems[1].Result = &b2 + }) + + head2 := evmtypes.NewHead(big.NewInt(2), b2.Hash, b1.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3 := evmtypes.NewHead(big.NewInt(3), b3.Hash, b2.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3.Parent = &head2 + err := bhe.FetchBlocks(testutils.Context(t), &head3) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 3) + assert.Equal(t, 1, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + assert.Equal(t, 2, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Equal(t, 3, int(gas.GetRollingBlockHistory(bhe)[2].Number)) + }) + + t.Run("replaces blocks on re-org within EVM.FinalityDepth", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + var blockDelay uint16 + var historySize uint16 = 3 + var batchSize uint32 = 2 + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.BatchSizeF = batchSize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b0 := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9001), + } + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9002), + } + b2 := evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + b3 := evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + blocks := []evmtypes.Block{b0, b1, b2, b3} + + gas.SetRollingBlockHistory(bhe, blocks) + + // RE-ORG, head2 and head3 have different hash than saved b2 and b3 + head2 := evmtypes.NewHead(big.NewInt(2), utils.NewHash(), b1.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3 := evmtypes.NewHead(big.NewInt(3), utils.NewHash(), head2.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3.Parent = &head2 + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(3) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(2) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + b2New := b2 + b2New.Hash = head2.Hash + elems[1].Result = &b2New + b3New := b3 + b3New.Hash = head3.Hash + elems[0].Result = &b3New + }) + + err := bhe.FetchBlocks(testutils.Context(t), &head3) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 3) + assert.Equal(t, 1, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + assert.Equal(t, 2, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Equal(t, 3, int(gas.GetRollingBlockHistory(bhe)[2].Number)) + assert.Equal(t, b1.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[0].Hash.Hex()) + assert.Equal(t, head2.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[1].Hash.Hex()) + assert.Equal(t, head3.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[2].Hash.Hex()) + }) + + t.Run("uses locally cached blocks if they are in the chain", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + var blockDelay uint16 + var historySize uint16 = 3 + var batchSize uint32 = 2 + bhCfg := newBlockHistoryConfig() + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.BatchSizeF = batchSize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b0 := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9001), + } + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9002), + } + b2 := evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + b3 := evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1, 2), + } + blocks := []evmtypes.Block{b0, b1, b2, b3} + + gas.SetRollingBlockHistory(bhe, blocks) + + // head2 and head3 have identical hash to saved blocks + head2 := evmtypes.NewHead(big.NewInt(2), b2.Hash, b1.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3 := evmtypes.NewHead(big.NewInt(3), b3.Hash, head2.Hash, uint64(time.Now().Unix()), ubig.New(&cltest.FixtureChainID)) + head3.Parent = &head2 + + err := bhe.FetchBlocks(testutils.Context(t), &head3) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 3) + assert.Equal(t, 1, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + assert.Equal(t, 2, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Equal(t, 3, int(gas.GetRollingBlockHistory(bhe)[2].Number)) + assert.Equal(t, b1.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[0].Hash.Hex()) + assert.Equal(t, head2.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[1].Hash.Hex()) + assert.Equal(t, head3.Hash.Hex(), gas.GetRollingBlockHistory(bhe)[2].Hash.Hex()) + }) + + t.Run("fetches max(BlockHistoryEstimatorCheckInclusionBlocks, BlockHistoryEstimatorBlockHistorySize)", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + var blockDelay uint16 + var historySize uint16 = 1 + var batchSize uint32 = 2 + var checkInclusionBlocks uint16 = 2 + bhCfg := newBlockHistoryConfig() + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.BatchSizeF = batchSize + bhCfg.CheckInclusionBlocksF = checkInclusionBlocks + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b42 := evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(3), + } + b43 := evmtypes.Block{ + Number: 43, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(43) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(42) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Once().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b43 + elems[1].Result = &b42 + }) + + err := bhe.FetchBlocks(testutils.Context(t), cltest.Head(43)) + require.NoError(t, err) + + require.Len(t, gas.GetRollingBlockHistory(bhe), 2) + assert.Equal(t, 42, int(gas.GetRollingBlockHistory(bhe)[0].Number)) + assert.Equal(t, 43, int(gas.GetRollingBlockHistory(bhe)[1].Number)) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[0].Transactions, 1) + assert.Len(t, gas.GetRollingBlockHistory(bhe)[1].Transactions, 0) + }) +} + +func TestBlockHistoryEstimator_FetchBlocksAndRecalculate_NoEIP1559(t *testing.T) { + t.Parallel() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + bhCfg.BlockDelayF = uint16(0) + bhCfg.TransactionPercentileF = uint16(35) + bhCfg.BlockHistorySizeF = uint16(3) + bhCfg.BatchSizeF = uint32(0) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = assets.NewWeiI(1000) + geCfg.PriceMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1), + } + b2 := evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(2), + } + b3 := evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(200, 300, 100, 100, 100, 100), + } + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + b[0].Args[0] == "0x3" && + b[1].Args[0] == "0x2" && + b[2].Args[0] == "0x1" + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b3 + elems[1].Result = &b2 + elems[2].Result = &b1 + }) + + bhe.FetchBlocksAndRecalculate(testutils.Context(t), cltest.Head(3)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, assets.NewWeiI(100), price) + + assert.Len(t, gas.GetRollingBlockHistory(bhe), 3) +} + +func TestBlockHistoryEstimator_Recalculate_NoEIP1559(t *testing.T) { + t.Parallel() + + maxGasPrice := assets.NewWeiI(100) + minGasPrice := assets.NewWeiI(10) + + t.Run("does not crash or set gas price to zero if there are no transactions", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + blocks = []evmtypes.Block{evmtypes.Block{}} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + blocks = []evmtypes.Block{evmtypes.Block{Transactions: []evmtypes.Transaction{}}} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + }) + + t.Run("sets gas price to EVM.GasEstimator.PriceMax if the calculation would otherwise exceed it", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = minGasPrice + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9001), + }, + evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(9002), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(1)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, maxGasPrice, price) + }) + + t.Run("sets gas price to EVM.Transactions.PriceMin if the calculation would otherwise fall below it", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = minGasPrice + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(5), + }, + evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(7), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(1)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, minGasPrice, price) + }) + + t.Run("ignores any transaction with a zero gas limit", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(100) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = minGasPrice + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + b2Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + { + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.LegacyTransactionsFromGasPrices(50), + }, + { + Number: 1, + Hash: b2Hash, + ParentHash: b1Hash, + Transactions: []evmtypes.Transaction{evmtypes.Transaction{GasPrice: assets.NewWeiI(70), GasLimit: 42}}, + }, + { + Number: 2, + Hash: utils.NewHash(), + ParentHash: b2Hash, + Transactions: []evmtypes.Transaction{evmtypes.Transaction{GasPrice: assets.NewWeiI(90), GasLimit: 0}}, + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(2)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, assets.NewWeiI(70), price) + }) + + t.Run("takes into account zero priced transactions if chain is not xDai", func(t *testing.T) { + // Because everyone loves free gas! + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(50) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.LegacyTransactionsFromGasPrices(0, 0, 0, 0, 100), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, assets.NewWeiI(0), price) + }) + + t.Run("ignores zero priced transactions on xDai", func(t *testing.T) { + chainID := big.NewInt(100) + + ethClient := evmtest.NewEthClientMock(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(50) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(100) + + ibhe := newBlockHistoryEstimatorWithChainID(t, ethClient, cfg, geCfg, bhCfg, *chainID) + bhe := gas.BlockHistoryEstimatorFromInterface(ibhe) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.LegacyTransactionsFromGasPrices(0, 0, 0, 0, 100), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, assets.NewWeiI(100), price) + }) + + t.Run("handles unreasonably large gas prices (larger than a 64 bit int can hold)", func(t *testing.T) { + // Seems unlikely we will ever experience gas prices > 9 Petawei on mainnet (praying to the eth Gods 🙏) + // But other chains could easily use a different base of account + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + reasonablyHugeGasPrice := assets.NewWeiI(1000).Mul(big.NewInt(math.MaxInt64)) + + bhCfg.TransactionPercentileF = uint16(50) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = reasonablyHugeGasPrice + geCfg.PriceMinF = assets.NewWeiI(10) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + unreasonablyHugeGasPrice := assets.NewWeiI(1000000).Mul(big.NewInt(math.MaxInt64)) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: []evmtypes.Transaction{ + evmtypes.Transaction{GasPrice: assets.NewWeiI(50), GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + evmtypes.Transaction{GasPrice: unreasonablyHugeGasPrice, GasLimit: 42}, + }, + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, reasonablyHugeGasPrice, price) + }) + + t.Run("doesn't panic if gas price is nil (although I'm still unsure how this can happen)", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(50) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(100) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: []evmtypes.Transaction{ + {GasPrice: nil, GasLimit: 42, Hash: utils.NewHash()}, + {GasPrice: assets.NewWeiI(100), GasLimit: 42, Hash: utils.NewHash()}, + }, + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetGasPrice(bhe) + require.Equal(t, assets.NewWeiI(100), price) + }) +} + +func newBlockWithBaseFee() evmtypes.Block { + return evmtypes.Block{BaseFeePerGas: assets.GWei(5)} +} + +func TestBlockHistoryEstimator_Recalculate_EIP1559(t *testing.T) { + t.Parallel() + + maxGasPrice := assets.NewWeiI(100) + + t.Run("does not crash or set gas price to zero if there are no transactions", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + blocks = []evmtypes.Block{evmtypes.Block{}} // No base fee (doesn't crash) + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + blocks = []evmtypes.Block{newBlockWithBaseFee()} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + empty := newBlockWithBaseFee() + empty.Transactions = []evmtypes.Transaction{} + blocks = []evmtypes.Block{empty} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + + withOnlyLegacyTransactions := newBlockWithBaseFee() + withOnlyLegacyTransactions.Transactions = cltest.LegacyTransactionsFromGasPrices(9001) + blocks = []evmtypes.Block{withOnlyLegacyTransactions} + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + }) + + t.Run("does not set tip higher than EVM.GasEstimator.PriceMax", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + geCfg.TipCapMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(9001), + }, + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(9002), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(1)) + + tipCap := gas.GetTipCap(bhe) + require.Equal(t, tipCap.Int64(), maxGasPrice.Int64()) + }) + + t.Run("sets tip cap to EVM.Transactions.PriceMin if the calculation would otherwise fall below it", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + geCfg.TipCapMinF = assets.NewWeiI(10) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(5), + }, + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(7), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(1)) + + price := gas.GetTipCap(bhe) + require.Equal(t, assets.NewWeiI(10), price) + }) + + t.Run("ignores any transaction with a zero gas limit", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(95) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + geCfg.TipCapMinF = assets.NewWeiI(10) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + b2Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + { + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.LegacyTransactionsFromGasPrices(50), + }, + { + BaseFeePerGas: assets.NewWeiI(10), + Number: 1, + Hash: b2Hash, + ParentHash: b1Hash, + Transactions: []evmtypes.Transaction{evmtypes.Transaction{Type: 0x2, MaxFeePerGas: assets.NewWeiI(1000), MaxPriorityFeePerGas: assets.NewWeiI(60), GasLimit: 42}}, + }, + { + Number: 2, + Hash: utils.NewHash(), + ParentHash: b2Hash, + Transactions: []evmtypes.Transaction{evmtypes.Transaction{Type: 0x2, MaxFeePerGas: assets.NewWeiI(1000), MaxPriorityFeePerGas: assets.NewWeiI(80), GasLimit: 0}}, + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(2)) + + price := gas.GetTipCap(bhe) + require.Equal(t, assets.NewWeiI(60), price) + }) + + t.Run("respects minimum gas tip cap", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + geCfg.TipCapMinF = assets.NewWeiI(1) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(10), + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(0, 0, 0, 0, 100), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetTipCap(bhe) + assert.Equal(t, assets.NewWeiI(1), price) + }) + + t.Run("allows to set zero tip cap if minimum allows it", func(t *testing.T) { + // Because everyone loves *cheap* gas! + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + geCfg.TipCapMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + b1Hash := utils.NewHash() + + blocks := []evmtypes.Block{ + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(10), + Number: 0, + Hash: b1Hash, + ParentHash: common.Hash{}, + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(0, 0, 0, 0, 100), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(0)) + + price := gas.GetTipCap(bhe) + require.Equal(t, assets.NewWeiI(0), price) + }) +} + +func TestBlockHistoryEstimator_IsUsable(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + block := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + ParentHash: common.Hash{}, + BaseFeePerGas: assets.NewWeiI(100), + } + t.Run("returns false if transaction has 0 gas limit", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(10), GasLimit: 0, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction gas limit is nil and tx type is 0x0", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x0, GasPrice: nil, GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction is of type 0x7e only on Optimism", func(t *testing.T) { + cfg.ChainTypeF = "optimismBedrock" + tx := evmtypes.Transaction{Type: 0x7e, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + cfg.ChainTypeF = "" + assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction is of type 0x7c or 0x7b only on Celo", func(t *testing.T) { + cfg.ChainTypeF = "celo" + tx := evmtypes.Transaction{Type: 0x7c, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + tx2 := evmtypes.Transaction{Type: 0x7b, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx2, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + cfg.ChainTypeF = "" + assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + assert.Equal(t, true, bhe.IsUsable(tx2, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction is of type 0x16 only on WeMix", func(t *testing.T) { + cfg.ChainTypeF = "wemix" + tx := evmtypes.Transaction{Type: 0x16, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction has base fee higher than the gas price only on Celo", func(t *testing.T) { + cfg.ChainTypeF = "celo" + tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + tx2 := evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), + GasPrice: assets.NewWeiI(50), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + cfg.ChainTypeF = "" + assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + assert.Equal(t, true, bhe.IsUsable(tx2, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) + + t.Run("returns false if transaction is of type 0x71 or 0xff only on zkSync", func(t *testing.T) { + cfg.ChainTypeF = string(config.ChainZkSync) + tx := evmtypes.Transaction{Type: 0x71, GasPrice: assets.NewWeiI(10), GasLimit: 42, Hash: utils.NewHash()} + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + tx.Type = 0x02 + assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + tx.Type = 0xff + assert.Equal(t, false, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + + cfg.ChainTypeF = "" + assert.Equal(t, true, bhe.IsUsable(tx, block, cfg.ChainType(), geCfg.PriceMin(), logger.Test(t))) + }) +} + +func TestBlockHistoryEstimator_EffectiveTipCap(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + block := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + ParentHash: common.Hash{}, + } + + eipblock := block + eipblock.BaseFeePerGas = assets.NewWeiI(100) + + t.Run("returns nil if block is missing base fee", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(42), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveTipCap(block, tx) + assert.Nil(t, res) + }) + t.Run("legacy transaction type infers tip cap from tx.gas_price - block.base_fee_per_gas", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(142), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveTipCap(eipblock, tx) + assert.Equal(t, "42 wei", res.String()) + }) + t.Run("tx type 2 should calculate gas price", func(t *testing.T) { + // 0x2 transaction (should use MaxPriorityFeePerGas) + tx := evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveTipCap(eipblock, tx) + assert.Equal(t, "200 wei", res.String()) + // 0x2 transaction (should use MaxPriorityFeePerGas, ignoring gas price) + tx = evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(400), MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(350), GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveTipCap(eipblock, tx) + assert.Equal(t, "200 wei", res.String()) + }) + t.Run("missing field returns nil", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(132), MaxFeePerGas: assets.NewWeiI(200), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveTipCap(eipblock, tx) + assert.Nil(t, res) + }) + t.Run("unknown type returns nil", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x3, GasPrice: assets.NewWeiI(55555), MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveTipCap(eipblock, tx) + assert.Nil(t, res) + }) +} + +func TestBlockHistoryEstimator_EffectiveGasPrice(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + block := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + ParentHash: common.Hash{}, + } + + eipblock := block + eipblock.BaseFeePerGas = assets.NewWeiI(100) + + t.Run("legacy transaction type should use GasPrice", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x0, GasPrice: assets.NewWeiI(42), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "42 wei", res.String()) + tx = evmtypes.Transaction{Type: 0x0, GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveGasPrice(eipblock, tx) + assert.Nil(t, res) + tx = evmtypes.Transaction{Type: 0x1, GasPrice: assets.NewWeiI(42), GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "42 wei", res.String()) + }) + t.Run("tx type 2 should calculate gas price", func(t *testing.T) { + // 0x2 transaction (should calculate to 250) + tx := evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "250 wei", res.String()) + // 0x2 transaction (should calculate to 300) + tx = evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(350), GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "300 wei", res.String()) + // 0x2 transaction (should calculate to 300, ignoring gas price) + tx = evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(350), GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "300 wei", res.String()) + // 0x2 transaction (should fall back to gas price since MaxFeePerGas is missing) + tx = evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(32), MaxPriorityFeePerGas: assets.NewWeiI(200), GasLimit: 42, Hash: utils.NewHash()} + res = bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "32 wei", res.String()) + }) + t.Run("tx type 2 has block missing base fee (should never happen but must handle gracefully)", func(t *testing.T) { + // 0x2 transaction (should calculate to 250) + tx := evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(55555), MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(block, tx) + assert.Equal(t, "55.555 kwei", res.String()) + }) + t.Run("unknown type returns nil", func(t *testing.T) { + tx := evmtypes.Transaction{Type: 0x3, GasPrice: assets.NewWeiI(55555), MaxPriorityFeePerGas: assets.NewWeiI(200), MaxFeePerGas: assets.NewWeiI(250), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(block, tx) + assert.Nil(t, res) + }) + t.Run("Assumption violation of MaxFeePerGas >= BaseFeePerGas returns gas price if specified", func(t *testing.T) { + // Max: 1, Base: 100 + tx := evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(42), MaxPriorityFeePerGas: assets.NewWeiI(1), MaxFeePerGas: assets.NewWeiI(1), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "42 wei", res.String()) + }) + t.Run("Assumption violation of MaxFeePerGas >= MaxPriorityFeePerGas returns gas price if specified", func(t *testing.T) { + // Max Priority: 201, Max: 200, Base: 100 + tx := evmtypes.Transaction{Type: 0x2, GasPrice: assets.NewWeiI(42), MaxPriorityFeePerGas: assets.NewWeiI(201), MaxFeePerGas: assets.NewWeiI(200), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Equal(t, "42 wei", res.String()) + }) + t.Run("Assumption violation of MaxFeePerGas >= BaseFeePerGas returns nil if no gas price is specified", func(t *testing.T) { + // Max: 1, Base: 100 + tx := evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(1), MaxFeePerGas: assets.NewWeiI(1), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Nil(t, res) + }) + t.Run("Assumption violation of MaxFeePerGas >= MaxPriorityFeePerGas returns nil if no gas price is specified", func(t *testing.T) { + // Max Priority: 201, Max: 200, Base: 100 + tx := evmtypes.Transaction{Type: 0x2, MaxPriorityFeePerGas: assets.NewWeiI(201), MaxFeePerGas: assets.NewWeiI(200), GasLimit: 42, Hash: utils.NewHash()} + res := bhe.EffectiveGasPrice(eipblock, tx) + assert.Nil(t, res) + }) + +} + +func TestBlockHistoryEstimator_Block_Unmarshal(t *testing.T) { + blockJSON := ` +{ + "author": "0x1438087186fdbfd4c256fa2df446921e30e54df8", + "difficulty": "0xfffffffffffffffffffffffffffffffd", + "extraData": "0xdb830302058c4f70656e457468657265756d86312e35312e30826c69", + "gasLimit": "0xbebc20", + "gasUsed": "0xbb58ce", + "hash": "0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071ea955c", + "logsBloom": "0x0004000021000004000020200088810004110800400030002140000020801020120020000000000108002087c030000a80402800001600080400000c00010002100001881002008000004809126000002802a0a801004001000012100000000010000000120000068000000010200800400000004400010400010098540440400044200020008480000000800040000000000c818000510002200c000020000400800221d20100000081800101840000080100041000002080080000408243424280020200680000000201224500000c120008000800220000800009080028088020400000000040002000400000046000000000400000000000000802008000", + "miner": "0x1438087186fdbfd4c256fa2df446921e30e54df8", + "number": "0xf47e79", + "parentHash": "0xb47ab3b1dc5c2c090dcecdc744a65a279ea6bb8dec11fb3c247df4cc2f584848", + "receiptsRoot": "0x6c0a0e448f63da4b6552333aaead47a9702cd5d08c9c42edbdc30622706c840b", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "signature": "0x30c7bfa28eceacb9f6b7c4acbb5b82e21792825ab20db8ecd3570b7e106f362b715b51e98f85aa9bb02e411fa1916c3cbb6a0ca34cc66d32e1142ec5282d829500", + "size": "0x10fd", + "stateRoot": "0x32cfd26ec2360c44797fc631c2e2d0395befb8369601bd16d482e3e7be4ebf2c", + "step": 324172559, + "totalDifficulty": "0xf47e78ffffffffffffffffffffffffebbb0678", + "timestamp": "0x609c674b", + "transactions": [ + { + "hash": "0x3f8e13d8c15d929bd3f7d99be94484eb82f328bbb76052c9464614c12f10b990", + "nonce": "0x2bb04", + "blockHash": "0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071ea955c", + "blockNumber": "0xf47e79", + "transactionIndex": "0x0", + "from": "0x1438087186fdbfd4c256fa2df446921e30e54df8", + "to": "0x5870b0527dedb1cfbd9534343feda1a41ce47766", + "value": "0x0", + "gasPrice": "0x1", + "gas": "0x1", + "data": "0x0b61ba8554b40c84fe2c9b5aad2fb692bdc00a9ba7f87d0abd35c68715bb347440c841d9000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000910411107ae9ec4e54f9b9e76d2a269a75dfab916c1edb866159e152e370f1ca8f72e95bf922fa069af9d532bef4fee8c89a401a501c622d763e4944ecacad16b4ace8dd0d532124b7c376cb5b04e63c4bf43b704eeb7ca822ec4258d8b0c2b2f5ef3680b858d15bcdf2f3632ad9e92963f37234c51f809981f3d4e34519d1f853408bbbe015e9572f9fcd55e9c0c38333ff000000000000000000000000000000", + "input": "0x0b61ba8554b40c84fe2c9b5aad2fb692bdc00a9ba7f87d0abd35c68715bb347440c841d9000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000910411107ae9ec4e54f9b9e76d2a269a75dfab916c1edb866159e152e370f1ca8f72e95bf922fa069af9d532bef4fee8c89a401a501c622d763e4944ecacad16b4ace8dd0d532124b7c376cb5b04e63c4bf43b704eeb7ca822ec4258d8b0c2b2f5ef3680b858d15bcdf2f3632ad9e92963f37234c51f809981f3d4e34519d1f853408bbbe015e9572f9fcd55e9c0c38333ff000000000000000000000000000000", + "v": "0xeb", + "s": "0x7bbc91758d2485a0d97e92bc4f0c226bf961c8aeb7db59d152206995937cd907", + "r": "0xe34e3a2a8f3159238dc843250d4ae0507d12ef49dec7bcf3057e6bd7b8560ae" + }, + { + "hash": "0x3f8e13d8c15d929bd3f7d99be94484eb82f328bbb76052c9464614c12f10b990", + "nonce": "0x2bb04", + "blockHash": "0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071ea955c", + "blockNumber": "0xf47e79", + "transactionIndex": "0x0", + "from": "0x1438087186fdbfd4c256fa2df446921e30e54df8", + "to": "0x5870b0527dedb1cfbd9534343feda1a41ce47766", + "value": "0x0", + "gasPrice": "0x0", + "gas": "0x0", + "data": "0x0b61ba8554b40c84fe2c9b5aad2fb692bdc00a9ba7f87d0abd35c68715bb347440c841d9000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000910411107ae9ec4e54f9b9e76d2a269a75dfab916c1edb866159e152e370f1ca8f72e95bf922fa069af9d532bef4fee8c89a401a501c622d763e4944ecacad16b4ace8dd0d532124b7c376cb5b04e63c4bf43b704eeb7ca822ec4258d8b0c2b2f5ef3680b858d15bcdf2f3632ad9e92963f37234c51f809981f3d4e34519d1f853408bbbe015e9572f9fcd55e9c0c38333ff000000000000000000000000000000", + "input": "0x0b61ba8554b40c84fe2c9b5aad2fb692bdc00a9ba7f87d0abd35c68715bb347440c841d9000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000910411107ae9ec4e54f9b9e76d2a269a75dfab916c1edb866159e152e370f1ca8f72e95bf922fa069af9d532bef4fee8c89a401a501c622d763e4944ecacad16b4ace8dd0d532124b7c376cb5b04e63c4bf43b704eeb7ca822ec4258d8b0c2b2f5ef3680b858d15bcdf2f3632ad9e92963f37234c51f809981f3d4e34519d1f853408bbbe015e9572f9fcd55e9c0c38333ff000000000000000000000000000000", + "type": "0x00", + "v": "0xeb", + "s": "0x7bbc91758d2485a0d97e92bc4f0c226bf961c8aeb7db59d152206995937cd907", + "r": "0xe34e3a2a8f3159238dc843250d4ae0507d12ef49dec7bcf3057e6bd7b8560ae" + }, + { + "hash": "0x238423bddc38e241f35ea3ed52cb096352c71d423b9ea3441937754f4edcb312", + "nonce": "0xb847", + "blockHash": "0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071ea955c", + "blockNumber": "0xf47e79", + "transactionIndex": "0x1", + "from": "0x25461d55ca1ddf4317160fd917192fe1d981b908", + "to": "0x5d9593586b4b5edbd23e7eba8d88fd8f09d83ebd", + "value": "0x0", + "gasPrice": "0x42725ae1000", + "gas": "0x1e8480", + "data": "0x893d242d000000000000000000000000eac6cee594edd353351babc145c624849bb70b1100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001e57396fe60670c00000000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000000", + "input": "0x893d242d000000000000000000000000eac6cee594edd353351babc145c624849bb70b1100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001e57396fe60670c00000000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000000", + "type": "0x00", + "v": "0xeb", + "s": "0x7f795b5cb15410b41c1518edc1aed2f1e984b8c93e357bdee79b23bba8dc841d", + "r": "0x958db39caa6dd066d3b010a4d9e6427399601738e0071470d822594e4565aa99" + } + ] +} +` + + var block evmtypes.Block + err := json.Unmarshal([]byte(blockJSON), &block) + assert.NoError(t, err) + + assert.Equal(t, int64(16023161), block.Number) + assert.Equal(t, common.HexToHash("0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071ea955c"), block.Hash) + assert.Equal(t, common.HexToHash("0xb47ab3b1dc5c2c090dcecdc744a65a279ea6bb8dec11fb3c247df4cc2f584848"), block.ParentHash) + + require.Len(t, block.Transactions, 3) + + assert.Equal(t, int64(1), block.Transactions[0].GasPrice.Int64()) + assert.Equal(t, uint32(1), block.Transactions[0].GasLimit) + + assert.Equal(t, int64(0), block.Transactions[1].GasPrice.Int64()) + assert.Equal(t, uint32(0), block.Transactions[1].GasLimit) + + assert.Equal(t, assets.NewWeiI(4566182400000), block.Transactions[2].GasPrice) + assert.Equal(t, uint32(2000000), block.Transactions[2].GasLimit) +} + +func TestBlockHistoryEstimator_EIP1559Block_Unmarshal(t *testing.T) { + blockJSON := ` +{ + "baseFeePerGas": "0xa1894585c", + "difficulty": "0x1cc4a2d7045f39", + "extraData": "0x73656f32", + "gasLimit": "0x1c9c380", + "gasUsed": "0x1c9c203", + "hash": "0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0", + "logsBloom": "0x2b181cd7982005346543c60498149414cc92419055218c5111988a6c81c7560105c91c82ec3348283288c2187b0111407e28c08c4b45b4ea2e980893c050002588606218aa083c0c0824e46923b850d07048da924052828c26082c910663fac682070310ba3189bed51194261220990c2920cc434d042c06a1941158dfc91eeb572107e1c5595a0032051109c500ba42a093398850ad020b1118d41716d371286ba348e041685144210401078b8901281001e840290d0e9391c00138cf00120d92499ca250d3026003e13c1e10bac2a3a57499007a2213002714a2a2f24f24480d0539c30142f2ed09105d5b10038330ac1622cc188a00f0c3108801455882cc", + "miner": "0x3ecef08d0e2dad803847e052249bb4f8bff2d5bb", + "mixHash": "0x57f4a273c69c4028916abfaa57252035fb7e71ce8444034764b8988d9a89c7b6", + "nonce": "0x015e0d851f990730", + "number": "0xc65d68", + "parentHash": "0x1ae6168805dfd2e48311181774019c17fb09b24ab75dcad6566d18d38d5c4071", + "receiptsRoot": "0x3ced645d38426647aad078b8e4bc62ff03571a74b099c983133eb34808240309", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x2655", + "stateRoot": "0x073e7b70e9b1357329cbf0b19a10a981057a29accbafcc34d52b592dc0be9848", + "timestamp": "0x6112f709", + "totalDifficulty": "0x6171fd1e7626bc65d9b", + "transactions": [ + { + "blockHash": "0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0", + "blockNumber": "0xc65d68", + "from": "0x305bf59bbd7a89ca9ce4d460b0efb54266d9e6c3", + "gas": "0xdbba0", + "gasPrice": "0x9f05f8ee00", + "hash": "0x8e58af889f4e831ef9a67df84058bcfb7090cbcb5c6f1046c211dafee6050944", + "input": "0xc18a84bc0000000000000000000000007ae132b71ddc6f4866fbf103be655830d9ca666c00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000124e94584ee00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000bb0e17ef65f82ab018d8edd776e8dd940327b28b00000000000000000000000000000000000000000000002403ecad7d36e5bda0000000000000000000000000000000000000000000000000af7c8acfe5037ea80000000000000000000000000000000000000000000000000000000000c65d680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002bbb0e17ef65f82ab018d8edd776e8dd940327b28b000bb8c02aaa39b223fe8d0a0e5c4f27ead9083c756cc200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x6654", + "to": "0x4d246be90c2f36730bb853ad41d0a189061192d3", + "transactionIndex": "0x0", + "value": "0x0", + "type": "0x0", + "v": "0x25", + "r": "0x9f8af9e6424f264daaba992c09c2b38d05444cbb5e6bd5e26c965393e287c9fa", + "s": "0x76802388299eb0baa80a678831ef0722c5b1e1212f5eca26a5e911cb81388b2b" + }, + { + "blockHash": "0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0", + "blockNumber": "0xc65d68", + "from": "0xef3f063136fe5002065bf7c4a2d85ff34cfb0ac0", + "gas": "0xdfeae", + "gasPrice": "0x2ba7def3000", + "hash": "0x0190f436ce165abb741b8513f64d194682677e1db72422f0f533fe6c0248e59a", + "input": "0x926427440000000000000000000000000000000000000000000000000000000000000005", + "nonce": "0x267", + "to": "0xad9fd7cb4fc7a0fbce08d64068f60cbde22ed34c", + "transactionIndex": "0x1", + "value": "0x62967a5c8460000", + "type": "0x0", + "v": "0x26", + "r": "0xd06f53ad57d61543526b529c2532903ac0d45b1d727567d04dc9b2f4e6340521", + "s": "0x6332bcec6a66abf4bed4df24e25e1e4dfc61c5d5bc32a441033c285c14c402d" + }, + { + "blockHash": "0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0", + "blockNumber": "0xc65d68", + "from": "0xff54553ff5edf0e93d58555303291805770e5793", + "gas": "0x5208", + "gasPrice": "0x746a528800", + "maxFeePerGas": "0x746a528800", + "maxPriorityFeePerGas": "0x746a528800", + "hash": "0x136aa666e6b8109b2b4aca8008ecad8df2047f4e2aced4808248fa8927a13395", + "input": "0x", + "nonce": "0x1", + "to": "0xb5d85cbf7cb3ee0d56b3bb207d5fc4b82f43f511", + "transactionIndex": "0x3b", + "value": "0x1302a5a6ad330400", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0x2806aa357d15790319e1def013902135dc8fa191182e2f87edae352e50ef281", + "s": "0x61d160d7de9af375c7fc40aed956e711af3af20146afe27d5122adf28cd25c9" + }, + { + "blockHash": "0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0", + "blockNumber": "0xc65d68", + "from": "0xb090838386b9207994a42f740217066af2de53ad", + "gas": "0x5208", + "maxFeePerGas": "0x746a528800", + "maxPriorityFeePerGas": "0x746a528800", + "hash": "0x13d4ecea98e37359e63e39e350ed0b1456e1acbf985eb8d4a0ef0e89a705c10d", + "input": "0x", + "nonce": "0x1", + "to": "0xb5d85cbf7cb3ee0d56b3bb207d5fc4b82f43f511", + "transactionIndex": "0x3c", + "value": "0xe95497bc358fe60", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0xa0d09f41bb4279d73e4255a1c1ce6cb10cb1fba04b4eca4af582ab2928201b27", + "s": "0x682f2a7a734b7c5887c5e228d35af4d3d3ad240c2c14f97aa9145a6c9edcd0a1" + } + ] +} +` + + var block evmtypes.Block + err := json.Unmarshal([]byte(blockJSON), &block) + assert.NoError(t, err) + + assert.Equal(t, int64(13000040), block.Number) + assert.Equal(t, "43.362048092 gwei", block.BaseFeePerGas.String()) + assert.Equal(t, common.HexToHash("0x11ac873a6cd8b8b7b57ec1efe3984b706362aa5e8f5749a5ec9b1f64bb4615f0"), block.Hash) + assert.Equal(t, common.HexToHash("0x1ae6168805dfd2e48311181774019c17fb09b24ab75dcad6566d18d38d5c4071"), block.ParentHash) + + require.Len(t, block.Transactions, 4) + + assert.Equal(t, int64(683000000000), block.Transactions[0].GasPrice.Int64()) + assert.Equal(t, 900000, int(block.Transactions[0].GasLimit)) + assert.Nil(t, block.Transactions[0].MaxFeePerGas) + assert.Nil(t, block.Transactions[0].MaxPriorityFeePerGas) + assert.Equal(t, evmtypes.TxType(0x0), block.Transactions[0].Type) + assert.Equal(t, "0x8e58af889f4e831ef9a67df84058bcfb7090cbcb5c6f1046c211dafee6050944", block.Transactions[0].Hash.String()) + + assert.Equal(t, assets.NewWeiI(3000000000000), block.Transactions[1].GasPrice) + assert.Equal(t, "0x0190f436ce165abb741b8513f64d194682677e1db72422f0f533fe6c0248e59a", block.Transactions[1].Hash.String()) + + assert.Equal(t, int64(500000000000), block.Transactions[2].GasPrice.Int64()) + assert.Equal(t, 21000, int(block.Transactions[2].GasLimit)) + assert.Equal(t, int64(500000000000), block.Transactions[2].MaxFeePerGas.Int64()) + assert.Equal(t, int64(500000000000), block.Transactions[2].MaxPriorityFeePerGas.Int64()) + assert.Equal(t, evmtypes.TxType(0x2), block.Transactions[2].Type) + assert.Equal(t, "0x136aa666e6b8109b2b4aca8008ecad8df2047f4e2aced4808248fa8927a13395", block.Transactions[2].Hash.String()) + + assert.Nil(t, block.Transactions[3].GasPrice) + assert.Equal(t, 21000, int(block.Transactions[3].GasLimit)) + assert.Equal(t, "0x13d4ecea98e37359e63e39e350ed0b1456e1acbf985eb8d4a0ef0e89a705c10d", block.Transactions[3].Hash.String()) +} + +func TestBlockHistoryEstimator_GetLegacyGas(t *testing.T) { + t.Parallel() + + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.TransactionPercentileF = uint16(35) + bhCfg.CheckInclusionBlocksF = uint16(0) + bhCfg.BlockHistorySizeF = uint16(8) + + maxGasPrice := assets.NewWeiI(1000000) + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMaxF = maxGasPrice + geCfg.PriceMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + { + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1000), + }, + { + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1200), + }, + } + + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + gas.SimulateStart(t, bhe) + + t.Run("if gas price is lower than global max and user specified max gas price", func(t *testing.T) { + fee, limit, err := bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 10000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, assets.NewWeiI(1000), fee) + assert.Equal(t, 10000, int(limit)) + }) + + t.Run("if gas price is higher than user-specified max", func(t *testing.T) { + fee, limit, err := bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 10000, assets.NewWeiI(800)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWeiI(800), fee) + assert.Equal(t, 10000, int(limit)) + }) + + bhCfg.TransactionPercentileF = uint16(35) + + cfg = gas.NewMockConfig() + + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMaxF = assets.NewWeiI(700) + geCfg.PriceMinF = assets.NewWeiI(0) + + geCfg.EIP1559DynamicFeesF = false + + bhe = newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + gas.SetRollingBlockHistory(bhe, blocks) + bhe.Recalculate(cltest.Head(1)) + gas.SimulateStart(t, bhe) + + t.Run("if gas price is higher than global max", func(t *testing.T) { + fee, limit, err := bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 10000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, assets.NewWeiI(700), fee) + assert.Equal(t, 10000, int(limit)) + }) +} + +func TestBlockHistoryEstimator_UseDefaultPriceAsFallback(t *testing.T) { + t.Parallel() + + var batchSize uint32 + var blockDelay uint16 + var historySize uint16 = 3 + var specialTxTypeCode evmtypes.TxType = 0x7e + + t.Run("fallbacks to EvmGasPriceDefault if there aren't any valid transactions to estimate from.", func(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + bhCfg.BatchSizeF = batchSize + bhCfg.TransactionPercentileF = uint16(35) + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMaxF = assets.NewWeiI(1000000) + geCfg.PriceDefaultF = assets.NewWeiI(100) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: nil} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(42) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(41) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[2].Method == "eth_getBlockByNumber" && b[2].Args[0] == gas.Int64ToHex(40) && b[1].Args[1].(bool) && reflect.TypeOf(b[2].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + } + elems[1].Result = &evmtypes.Block{ + Number: 41, + Hash: utils.NewHash(), + } + elems[2].Result = &evmtypes.Block{ + Number: 40, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPricesTxType(specialTxTypeCode, 1), + } + }).Once() + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + + fee, limit, err := bhe.GetLegacyGas(testutils.Context(t), make([]byte, 0), 10000, assets.NewWeiI(800)) + require.NoError(t, err) + require.Equal(t, geCfg.PriceDefault(), fee) + assert.Equal(t, 10000, int(limit)) + }) + + t.Run("fallbacks to EvmGasTipCapDefault if there aren't any valid transactions to estimate from.", func(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + bhCfg.BatchSizeF = batchSize + bhCfg.TransactionPercentileF = uint16(35) + bhCfg.BlockDelayF = blockDelay + bhCfg.BlockHistorySizeF = historySize + bhCfg.EIP1559FeeCapBufferBlocksF = uint16(4) + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMaxF = assets.NewWeiI(1000000) + geCfg.PriceDefaultF = assets.NewWeiI(100) + geCfg.TipCapDefaultF = assets.NewWeiI(50) + geCfg.BumpThresholdF = uint64(1) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + bhe := newBlockHistoryEstimator(t, ethClient, cfg, geCfg, bhCfg) + + h := &evmtypes.Head{Hash: utils.NewHash(), Number: 42, BaseFeePerGas: assets.NewWeiI(40)} + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(h, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == gas.Int64ToHex(42) && b[0].Args[1].(bool) && reflect.TypeOf(b[0].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == gas.Int64ToHex(41) && b[1].Args[1].(bool) && reflect.TypeOf(b[1].Result) == reflect.TypeOf(&evmtypes.Block{}) && + b[2].Method == "eth_getBlockByNumber" && b[2].Args[0] == gas.Int64ToHex(40) && b[1].Args[1].(bool) && reflect.TypeOf(b[2].Result) == reflect.TypeOf(&evmtypes.Block{}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Block{ + Number: 42, + Hash: utils.NewHash(), + } + elems[1].Result = &evmtypes.Block{ + Number: 41, + Hash: utils.NewHash(), + } + elems[2].Result = &evmtypes.Block{ + Number: 40, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCapsTxType(specialTxTypeCode, 1), + } + }).Once() + + err := bhe.Start(testutils.Context(t)) + require.NoError(t, err) + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, assets.NewWeiI(200)) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(114), TipCap: geCfg.TipCapDefault()}, fee) + assert.Equal(t, 100000, int(limit)) + }) +} + +func TestBlockHistoryEstimator_GetDynamicFee(t *testing.T) { + t.Parallel() + + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + maxGasPrice := assets.NewWeiI(1000000) + bhCfg.EIP1559FeeCapBufferBlocksF = uint16(4) + bhCfg.TransactionPercentileF = uint16(35) + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.LimitMultiplierF = float32(1) + geCfg.PriceMaxF = maxGasPrice + geCfg.TipCapMinF = assets.NewWeiI(0) + geCfg.PriceMinF = assets.NewWeiI(0) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + blocks := []evmtypes.Block{ + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(88889), + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(5000, 6000, 6000), + }, + evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(100000), + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(10000), + }, + } + gas.SetRollingBlockHistory(bhe, blocks) + + bhe.Recalculate(cltest.Head(1)) + gas.SimulateStart(t, bhe) + + t.Run("if estimator is missing base fee and gas bumping is enabled", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(1) + + _, _, err := bhe.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.Error(t, err) + assert.Contains(t, err.Error(), "BlockHistoryEstimator: no value for latest block base fee; cannot estimate EIP-1559 base fee. Are you trying to run with EIP1559 enabled on a non-EIP1559 chain?") + }) + + t.Run("if estimator is missing base fee and gas bumping is disabled", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(0) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, gas.DynamicFee{FeeCap: maxGasPrice, TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) + + h := cltest.Head(1) + h.BaseFeePerGas = assets.NewWeiI(112500) + bhe.OnNewLongestChain(testutils.Context(t), h) + + t.Run("if gas bumping is enabled", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(1) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(186203), TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) + + t.Run("if gas bumping is disabled", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(0) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: maxGasPrice, TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) + + t.Run("if gas bumping is enabled and local max gas price set", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(1) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, assets.NewWeiI(180000)) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(180000), TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) + + t.Run("if bump threshold is 0 and local max gas price set", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(0) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, assets.NewWeiI(100)) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) + + h = cltest.Head(1) + h.BaseFeePerGas = assets.NewWeiI(900000) + bhe.OnNewLongestChain(testutils.Context(t), h) + + t.Run("if gas bumping is enabled and global max gas price lower than local max gas price", func(t *testing.T) { + geCfg.BumpThresholdF = uint64(1) + + fee, limit, err := bhe.GetDynamicFee(testutils.Context(t), 100000, assets.NewWeiI(1200000)) + require.NoError(t, err) + + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(1000000), TipCap: assets.NewWeiI(6000)}, fee) + assert.Equal(t, 100000, int(limit)) + }) +} + +func TestBlockHistoryEstimator_CheckConnectivity(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + bhCfg.CheckInclusionBlocksF = uint16(4) + lggr, obs := logger.TestObserved(t, zapcore.DebugLevel) + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + + bhe := gas.BlockHistoryEstimatorFromInterface( + gas.NewBlockHistoryEstimator(lggr, nil, cfg, geCfg, bhCfg, *testutils.NewRandomEVMChainID()), + ) + + attempts := []gas.EvmPriorAttempt{ + {TxType: 0x0, TxHash: NewEvmHash()}, + } + + t.Run("skips connectivity check if latest block is not present", func(t *testing.T) { + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + + testutils.WaitForLogMessage(t, obs, "Latest block is unknown; skipping inclusion check") + }) + + h := cltest.Head(1) + h.BaseFeePerGas = assets.NewWeiI(112500) + bhe.OnNewLongestChain(testutils.Context(t), h) + + b0 := evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b0}) + + t.Run("skips connectivity check if block history has insufficient size", func(t *testing.T) { + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + + testutils.WaitForLogMessage(t, obs, "Block history in memory with length 1 is insufficient to determine whether transaction should have been included within the past 4 blocks") + }) + + t.Run("skips connectivity check if attempts is nil or empty", func(t *testing.T) { + err := bhe.CheckConnectivity(nil) + require.NoError(t, err) + }) + + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + ParentHash: b0.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + b2 := evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + ParentHash: b1.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + b3 := evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + ParentHash: b2.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b0, b1, b2, b3}) + h = cltest.Head(5) + h.BaseFeePerGas = assets.NewWeiI(112500) + bhe.OnNewLongestChain(testutils.Context(t), h) + + t.Run("returns error if one of the supplied attempts is missing BroadcastBeforeBlockNum", func(t *testing.T) { + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("BroadcastBeforeBlockNum was unexpectedly nil for attempt %s", attempts[0].TxHash)) + }) + + num := int64(0) + hash := utils.NewHash() + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x3, BroadcastBeforeBlockNum: &num, TxHash: hash}, + } + + t.Run("returns error if one of the supplied attempts has an unknown transaction type", func(t *testing.T) { + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("attempt %s has unknown transaction type 0x3", hash)) + }) + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x0, BroadcastBeforeBlockNum: &num, TxHash: hash}, + } + + t.Run("skips connectivity check if no transactions are suitable", func(t *testing.T) { + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + + testutils.WaitForLogMessage(t, obs, fmt.Sprintf("no suitable transactions found to verify if transaction %s has been included within expected inclusion blocks of 4", hash)) + }) + + t.Run("in legacy mode", func(t *testing.T) { + b0 = evmtypes.Block{ + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1000), + } + b1 = evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + ParentHash: b0.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(2, 3, 4, 5, 6), + } + b2 = evmtypes.Block{ + Number: 2, + Hash: utils.NewHash(), + ParentHash: b1.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9), + } + b3 = evmtypes.Block{ + Number: 3, + Hash: utils.NewHash(), + ParentHash: b2.Hash, + Transactions: cltest.LegacyTransactionsFromGasPrices(3, 4, 5, 6, 7), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b0, b1, b2, b3}) + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(1000), BroadcastBeforeBlockNum: testutils.Ptr(int64(4))}, // This is very expensive but will be ignored due to BroadcastBeforeBlockNum being too recent + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(3), BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(5), BroadcastBeforeBlockNum: testutils.Ptr(int64(1))}, + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(7), BroadcastBeforeBlockNum: testutils.Ptr(int64(1))}, + } + + t.Run("passes check if all blocks have percentile price higher or exactly at the highest transaction gas price", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 80 // percentile price is 7 wei + + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + }) + t.Run("fails check if one or more blocks has percentile price higher than highest transaction gas price", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 40 // percentile price is 5 wei + + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has gas price of 7 wei, which is above percentile=40%% (percentile price: 5 wei) for blocks 2 thru 0 (checking 3 blocks)", attempts[3].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + }) + + t.Run("fails check if one or more blocks has percentile price higher than any transaction gas price", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 30 // percentile price is 4 wei + + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has gas price of 5 wei, which is above percentile=30%% (percentile price: 4 wei) for blocks 2 thru 0 (checking 3 blocks)", attempts[2].TxHash)) + + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 5 // percentile price is 2 wei + + err = bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has gas price of 3 wei, which is above percentile=5%% (percentile price: 2 wei) for blocks 2 thru 0 (checking 3 blocks)", attempts[1].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + }) + }) + + t.Run("handles mixed legacy and EIP-1559 transactions", func(t *testing.T) { + b0 = evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 3, + Hash: utils.NewHash(), + Transactions: append(cltest.LegacyTransactionsFromGasPrices(1, 2, 3, 4, 5), cltest.DynamicFeeTransactionsFromTipCaps(6, 7, 8, 9, 10)...), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b0}) + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(1), TipCap: assets.NewWeiI(3)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(10), BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + } + + t.Run("passes check if both transactions are ok", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 1 + bhCfg.CheckInclusionPercentileF = 90 // percentile price is 5 wei + + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + }) + t.Run("fails check if legacy transaction fails", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 1 + bhCfg.CheckInclusionPercentileF = 60 + + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has gas price of 10 wei, which is above percentile=60%% (percentile price: 7 wei) for blocks 3 thru 3 (checking 1 blocks)", attempts[1].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + }) + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(11), TipCap: assets.NewWeiI(10)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(3), BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + } + + t.Run("fails check if dynamic fee transaction fails", func(t *testing.T) { + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b0}) + bhCfg.CheckInclusionBlocksF = 1 + bhCfg.CheckInclusionPercentileF = 60 + + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has tip cap of 10 wei, which is above percentile=60%% (percentile tip cap: 6 wei) for blocks 3 thru 3 (checking 1 blocks)", attempts[0].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + }) + + }) + + t.Run("in EIP-1559 mode", func(t *testing.T) { + b0 = evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(5), + Number: 0, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(1000), + } + b1 = evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(8), + Number: 1, + Hash: utils.NewHash(), + ParentHash: b0.Hash, + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(2, 3, 4, 5, 6), + } + b2 = evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(13), + Number: 2, + Hash: utils.NewHash(), + ParentHash: b1.Hash, + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9), + } + b3 = evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(21), + Number: 3, + Hash: utils.NewHash(), + ParentHash: b2.Hash, + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(3, 4, 5, 6, 7), + } + blocks := []evmtypes.Block{b0, b1, b2, b3} + gas.SetRollingBlockHistory(bhe, blocks) + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(30), TipCap: assets.NewWeiI(1000)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(4))}, // This is very expensive but will be ignored due to BroadcastBeforeBlockNum being too recent + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(30), TipCap: assets.NewWeiI(3)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(30), TipCap: assets.NewWeiI(5)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(1))}, + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(30), TipCap: assets.NewWeiI(7)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(1))}, + } + + t.Run("passes check if all blocks have 90th percentile price higher than highest transaction tip cap", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 80 + + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + }) + + t.Run("fails check if one or more blocks has percentile tip cap higher than any transaction tip cap, and base fee higher than the block base fee", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 20 + + err := bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has tip cap of 5 wei, which is above percentile=20%% (percentile tip cap: 4 wei) for blocks 2 thru 0 (checking 3 blocks)", attempts[2].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 5 + + err = bhe.CheckConnectivity(attempts) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has tip cap of 3 wei, which is above percentile=5%% (percentile tip cap: 2 wei) for blocks 2 thru 0 (checking 3 blocks)", attempts[1].TxHash)) + require.ErrorIs(t, err, commonfee.ErrConnectivity) + }) + + t.Run("passes check if, for at least one block, feecap < tipcap+basefee, even if percentile is not reached", func(t *testing.T) { + bhCfg.CheckInclusionBlocksF = 3 + bhCfg.CheckInclusionPercentileF = 5 + + attempts = []gas.EvmPriorAttempt{ + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{FeeCap: assets.NewWeiI(4), TipCap: assets.NewWeiI(7)}, BroadcastBeforeBlockNum: testutils.Ptr(int64(1))}, + } + + err := bhe.CheckConnectivity(attempts) + require.NoError(t, err) + }) + }) +} + +func TestBlockHistoryEstimator_Bumps(t *testing.T) { + t.Parallel() + maxGasPrice := assets.NewWeiI(1000000) + bhCfg := newBlockHistoryConfig() + + t.Run("BumpLegacyGas checks connectivity", func(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg.CheckInclusionBlocksF = 1 + bhCfg.CheckInclusionPercentileF = 10 + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.BumpPercentF = 10 + geCfg.BumpMinF = assets.NewWeiI(150) + geCfg.PriceMaxF = maxGasPrice + geCfg.LimitMultiplierF = float32(1.1) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + b1 := evmtypes.Block{ + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(1), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b1}) + head := cltest.Head(1) + bhe.OnNewLongestChain(testutils.Context(t), head) + + attempts := []gas.EvmPriorAttempt{ + {TxType: 0x0, TxHash: NewEvmHash(), GasPrice: assets.NewWeiI(1000), BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}, + } + + _, _, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, attempts) + require.Error(t, err) + assert.True(t, errors.Is(err, commonfee.ErrConnectivity)) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has gas price of 1 kwei, which is above percentile=10%% (percentile price: 1 wei) for blocks 1 thru 1 (checking 1 blocks)", attempts[0].TxHash)) + }) + + t.Run("BumpLegacyGas calls BumpLegacyGasPriceOnly with proper current gas price", func(t *testing.T) { + cfg := gas.NewMockConfig() + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = false + geCfg.BumpPercentF = 10 + geCfg.BumpMinF = assets.NewWeiI(150) + geCfg.PriceMaxF = maxGasPrice + geCfg.LimitMultiplierF = float32(1.1) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + t.Run("ignores nil current gas price", func(t *testing.T) { + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, nil) + require.NoError(t, err) + + expectedGasPrice, expectedGasLimit, err := gas.BumpLegacyGasPriceOnly(geCfg, logger.TestSugared(t), nil, assets.NewWeiI(42), 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, expectedGasLimit, gasLimit) + assert.Equal(t, expectedGasPrice, gasPrice) + }) + + t.Run("ignores current gas price > max gas price", func(t *testing.T) { + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, nil) + require.NoError(t, err) + + massive := assets.NewWeiI(100000000000000) + gas.SetGasPrice(bhe, massive) + + expectedGasPrice, expectedGasLimit, err := gas.BumpLegacyGasPriceOnly(geCfg, logger.TestSugared(t), massive, assets.NewWeiI(42), 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, expectedGasLimit, gasLimit) + assert.Equal(t, expectedGasPrice, gasPrice) + }) + + t.Run("ignores current gas price < bumped gas price", func(t *testing.T) { + gas.SetGasPrice(bhe, assets.NewWeiI(191)) + + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, assets.NewWeiI(192), gasPrice) + }) + + t.Run("uses current gas price > bumped gas price", func(t *testing.T) { + gas.SetGasPrice(bhe, assets.NewWeiI(193)) + + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, assets.NewWeiI(193), gasPrice) + }) + + t.Run("bumped gas price > max gas price", func(t *testing.T) { + gas.SetGasPrice(bhe, assets.NewWeiI(191)) + + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, assets.NewWeiI(100), nil) + require.Error(t, err) + + assert.Nil(t, gasPrice) + assert.Equal(t, 0, int(gasLimit)) + assert.Contains(t, err.Error(), "bumped gas price of 192 wei would exceed configured max gas price of 100 wei (original price was 42 wei).") + }) + + t.Run("current gas price > max gas price", func(t *testing.T) { + gas.SetGasPrice(bhe, assets.NewWeiI(193)) + + gasPrice, gasLimit, err := bhe.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, assets.NewWeiI(100), nil) + require.Error(t, err) + + assert.Nil(t, gasPrice) + assert.Equal(t, 0, int(gasLimit)) + assert.Contains(t, err.Error(), "bumped gas price of 192 wei would exceed configured max gas price of 100 wei (original price was 42 wei).") + }) + }) + + t.Run("BumpDynamicFee checks connectivity", func(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + bhCfg.CheckInclusionBlocksF = 1 + bhCfg.CheckInclusionPercentileF = 10 + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.BumpPercentF = 10 + geCfg.BumpMinF = assets.NewWeiI(150) + geCfg.PriceMaxF = maxGasPrice + geCfg.LimitMultiplierF = float32(1.1) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + b1 := evmtypes.Block{ + BaseFeePerGas: assets.NewWeiI(1), + Number: 1, + Hash: utils.NewHash(), + Transactions: cltest.DynamicFeeTransactionsFromTipCaps(1), + } + gas.SetRollingBlockHistory(bhe, []evmtypes.Block{b1}) + head := cltest.Head(1) + bhe.OnNewLongestChain(testutils.Context(t), head) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + attempts := []gas.EvmPriorAttempt{ + {TxType: 0x2, TxHash: NewEvmHash(), DynamicFee: gas.DynamicFee{TipCap: originalFee.TipCap, FeeCap: originalFee.FeeCap}, BroadcastBeforeBlockNum: testutils.Ptr(int64(0))}} + + _, _, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, attempts) + require.Error(t, err) + assert.True(t, errors.Is(err, commonfee.ErrConnectivity)) + assert.Contains(t, err.Error(), fmt.Sprintf("transaction %s has tip cap of 25 wei, which is above percentile=10%% (percentile tip cap: 1 wei) for blocks 1 thru 1 (checking 1 blocks)", attempts[0].TxHash)) + }) + + t.Run("BumpDynamicFee bumps the fee", func(t *testing.T) { + cfg := gas.NewMockConfig() + bhCfg := newBlockHistoryConfig() + + geCfg := &gas.MockGasEstimatorConfig{} + geCfg.EIP1559DynamicFeesF = true + geCfg.BumpPercentF = 10 + geCfg.BumpMinF = assets.NewWeiI(150) + geCfg.PriceMaxF = maxGasPrice + geCfg.LimitMultiplierF = float32(1.1) + geCfg.TipCapDefaultF = assets.NewWeiI(52) + + bhe := newBlockHistoryEstimator(t, nil, cfg, geCfg, bhCfg) + + t.Run("when current tip cap is nil", func(t *testing.T) { + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(250), TipCap: assets.NewWeiI(202)}, fee) + }) + t.Run("ignores current tip cap that is smaller than original fee with bump applied", func(t *testing.T) { + gas.SetTipCap(bhe, assets.NewWeiI(201)) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(250), TipCap: assets.NewWeiI(202)}, fee) + }) + t.Run("uses current tip cap that is larger than original fee with bump applied", func(t *testing.T) { + gas.SetTipCap(bhe, assets.NewWeiI(203)) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(250), TipCap: assets.NewWeiI(203)}, fee) + }) + t.Run("ignores absurdly large current tip cap", func(t *testing.T) { + gas.SetTipCap(bhe, assets.NewWeiI(1000000000000000)) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.NoError(t, err) + + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{FeeCap: assets.NewWeiI(250), TipCap: assets.NewWeiI(202)}, fee) + }) + + t.Run("bumped tip cap price > max gas price", func(t *testing.T) { + gas.SetTipCap(bhe, assets.NewWeiI(203)) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(990000)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.Error(t, err) + + assert.Equal(t, 0, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{}, fee) + assert.Contains(t, err.Error(), "bumped tip cap of 1.089 mwei would exceed configured max gas price of 1 mwei (original fee: tip cap 990 kwei, fee cap 100 wei)") + }) + + t.Run("bumped fee cap price > max gas price", func(t *testing.T) { + gas.SetTipCap(bhe, assets.NewWeiI(203)) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(990000), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := bhe.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.Error(t, err) + + assert.Equal(t, 0, int(gasLimit)) + assert.Equal(t, gas.DynamicFee{}, fee) + assert.Contains(t, err.Error(), "bumped fee cap of 1.089 mwei would exceed configured max gas price of 1 mwei (original fee: tip cap 25 wei, fee cap 990 kwei)") + }) + }) +} diff --git a/core/chains/evm/gas/chain_specific.go b/core/chains/evm/gas/chain_specific.go new file mode 100644 index 00000000..daa1ebb3 --- /dev/null +++ b/core/chains/evm/gas/chain_specific.go @@ -0,0 +1,60 @@ +package gas + +import ( + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// chainSpecificIsUsable allows for additional logic specific to a particular +// Config that determines whether a transaction should be used for gas estimation +func chainSpecificIsUsable(tx evmtypes.Transaction, baseFee *assets.Wei, chainType config.ChainType, minGasPriceWei *assets.Wei) bool { + if chainType == config.ChainXDai { + // GasPrice 0 on most chains is great since it indicates cheap/free transactions. + // However, xDai reserves a special type of "bridge" transaction with 0 gas + // price that is always processed at top priority. Ordinary transactions + // must be priced at least 1GWei, so we have to discard anything priced + // below that (unless the contract is whitelisted). + if tx.GasPrice != nil && tx.GasPrice.Cmp(minGasPriceWei) < 0 { + return false + } + } + if chainType == config.ChainOptimismBedrock || chainType == config.ChainKroma { + // This is a special deposit transaction type introduced in Bedrock upgrade. + // This is a system transaction that it will occur at least one time per block. + // We should discard this type before even processing it to avoid flooding the + // logs with warnings. + // https://github.com/ethereum-optimism/optimism/blob/develop/specs/deposits.md + if tx.Type == 0x7e { + return false + } + } + if chainType == config.ChainCelo { + // Celo specific transaction types that utilize the feeCurrency field. + if tx.Type == 0x7c || tx.Type == 0x7b { + return false + } + // Celo has not yet fully migrated to the 0x7c type for special feeCurrency transactions + // and uses the standard 0x0, 0x2 types instead. We need to discard any invalid transactions + // and not throw an error since this can happen from time to time and it's an expected behavior + // until they fully migrate to 0x7c. + if baseFee != nil && tx.GasPrice.Cmp(baseFee) < 0 { + return false + } + } + if chainType == config.ChainWeMix { + // WeMix specific transaction types that enables fee delegation. + // https://docs.wemix.com/v/en/design/fee-delegation + if tx.Type == 0x16 { + return false + } + } + if chainType == config.ChainZkSync { + // zKSync specific type for contract deployment & priority transactions + // https://era.zksync.io/docs/reference/concepts/transactions.html#eip-712-0x71 + if tx.Type == 0x71 || tx.Type == 0xff { + return false + } + } + return true +} diff --git a/core/chains/evm/gas/cmd/arbgas/main.go b/core/chains/evm/gas/cmd/arbgas/main.go new file mode 100644 index 00000000..fd660cc5 --- /dev/null +++ b/core/chains/evm/gas/cmd/arbgas/main.go @@ -0,0 +1,85 @@ +// arbgas takes a single URL argument and prints the result of three GetLegacyGas calls to the Arbitrum gas estimator. +package main + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/goplugin/plugin-common/pkg/logger" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" +) + +func main() { + if l := len(os.Args); l != 2 { + log.Fatal("Expected one URL argument but got", l-1) + } + url := os.Args[1] + lggr, err := logger.New() + if err != nil { + log.Fatal("Failed to create logger:", err) + } + + ctx := context.Background() + withEstimator(ctx, logger.Sugared(lggr), url, func(e gas.EvmEstimator) { + printGetLegacyGas(ctx, e, make([]byte, 10), 500_000, assets.GWei(1)) + printGetLegacyGas(ctx, e, make([]byte, 10), 500_000, assets.GWei(1), feetypes.OptForceRefetch) + printGetLegacyGas(ctx, e, make([]byte, 10), max, assets.GWei(1)) + }) +} + +func printGetLegacyGas(ctx context.Context, e gas.EvmEstimator, calldata []byte, l2GasLimit uint32, maxGasPrice *assets.Wei, opts ...feetypes.Opt) { + price, limit, err := e.GetLegacyGas(ctx, calldata, l2GasLimit, maxGasPrice, opts...) + if err != nil { + log.Println("failed to get legacy gas:", err) + return + } + fmt.Println("Price:", price) + fmt.Println("Limit:", limit) +} + +const max = 50_000_000 + +func withEstimator(ctx context.Context, lggr logger.SugaredLogger, url string, f func(e gas.EvmEstimator)) { + rc, err := rpc.Dial(url) + if err != nil { + log.Fatal(err) + } + ec := ethclient.NewClient(rc) + e := gas.NewArbitrumEstimator(lggr, &config{max: max}, rc, ec) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + err = e.Start(ctx) + if err != nil { + log.Fatal(err) + } + defer lggr.ErrorIfFn(e.Close, "Error closing ArbitrumEstimator") + + f(e) +} + +var _ gas.ArbConfig = &config{} + +type config struct { + max uint32 + bumpPercent uint16 + bumpMin *assets.Wei +} + +func (c *config) LimitMax() uint32 { + return c.max +} + +func (c *config) BumpPercent() uint16 { + return c.bumpPercent +} + +func (c *config) BumpMin() *assets.Wei { + return c.bumpMin +} diff --git a/core/chains/evm/gas/fixed_price_estimator.go b/core/chains/evm/gas/fixed_price_estimator.go new file mode 100644 index 00000000..865c539c --- /dev/null +++ b/core/chains/evm/gas/fixed_price_estimator.go @@ -0,0 +1,148 @@ +package gas + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var _ EvmEstimator = (*fixedPriceEstimator)(nil) + +type fixedPriceEstimator struct { + config fixedPriceEstimatorConfig + bhConfig fixedPriceEstimatorBlockHistoryConfig + lggr logger.SugaredLogger +} +type bumpConfig interface { + LimitMultiplier() float32 + PriceMax() *assets.Wei + BumpPercent() uint16 + BumpMin() *assets.Wei + TipCapDefault() *assets.Wei +} + +type fixedPriceEstimatorConfig interface { + BumpThreshold() uint64 + FeeCapDefault() *assets.Wei + LimitMultiplier() float32 + PriceDefault() *assets.Wei + TipCapDefault() *assets.Wei + PriceMax() *assets.Wei + Mode() string + bumpConfig +} + +type fixedPriceEstimatorBlockHistoryConfig interface { + EIP1559FeeCapBufferBlocks() uint16 +} + +// NewFixedPriceEstimator returns a new "FixedPrice" estimator which will +// always use the config default values for gas prices and limits +func NewFixedPriceEstimator(cfg fixedPriceEstimatorConfig, bhCfg fixedPriceEstimatorBlockHistoryConfig, lggr logger.Logger) EvmEstimator { + return &fixedPriceEstimator{cfg, bhCfg, logger.Sugared(logger.Named(lggr, "FixedPriceEstimator"))} +} + +func (f *fixedPriceEstimator) Start(context.Context) error { + if f.config.BumpThreshold() == 0 && f.config.Mode() == "FixedPrice" { + // EvmGasFeeCapDefault is ignored if fixed estimator mode is on and gas bumping is disabled + if f.config.FeeCapDefault().Cmp(f.config.PriceMax()) != 0 { + f.lggr.Infof("You are using FixedPrice estimator with gas bumping disabled. EVM.GasEstimator.PriceMax (value: %s) will be used as the FeeCap for transactions", f.config.PriceMax()) + } + } + return nil +} + +func (f *fixedPriceEstimator) GetLegacyGas(_ context.Context, _ []byte, gasLimit uint32, maxGasPriceWei *assets.Wei, _ ...feetypes.Opt) (*assets.Wei, uint32, error) { + gasPrice := commonfee.CalculateFee(f.config.PriceDefault().ToInt(), maxGasPriceWei.ToInt(), f.config.PriceMax().ToInt()) + chainSpecificGasLimit, err := commonfee.ApplyMultiplier(gasLimit, f.config.LimitMultiplier()) + if err != nil { + return nil, 0, err + } + return assets.NewWei(gasPrice), chainSpecificGasLimit, nil +} + +func (f *fixedPriceEstimator) BumpLegacyGas( + _ context.Context, + originalGasPrice *assets.Wei, + originalGasLimit uint32, + maxGasPriceWei *assets.Wei, + _ []EvmPriorAttempt, +) (*assets.Wei, uint32, error) { + gasPrice, err := commonfee.CalculateBumpedFee( + f.lggr, + f.config.PriceDefault().ToInt(), + originalGasPrice.ToInt(), + maxGasPriceWei.ToInt(), + f.config.PriceMax().ToInt(), + f.config.BumpMin().ToInt(), + f.config.BumpPercent(), + assets.FormatWei, + ) + if err != nil { + return nil, 0, err + } + + chainSpecificGasLimit, err := commonfee.ApplyMultiplier(originalGasLimit, f.config.LimitMultiplier()) + if err != nil { + return nil, 0, err + } + return assets.NewWei(gasPrice), chainSpecificGasLimit, err +} + +func (f *fixedPriceEstimator) GetDynamicFee(_ context.Context, originalGasLimit uint32, maxGasPriceWei *assets.Wei) (d DynamicFee, chainSpecificGasLimit uint32, err error) { + gasTipCap := f.config.TipCapDefault() + + if gasTipCap == nil { + return d, 0, errors.New("cannot calculate dynamic fee: EthGasTipCapDefault was not set") + } + chainSpecificGasLimit, err = commonfee.ApplyMultiplier(originalGasLimit, f.config.LimitMultiplier()) + if err != nil { + return d, 0, err + } + + var feeCap *assets.Wei + if f.config.BumpThreshold() == 0 { + // Gas bumping is disabled, just use the max fee cap + feeCap = getMaxGasPrice(maxGasPriceWei, f.config.PriceMax()) + } else { + // Need to leave headroom for bumping so we fallback to the default value here + feeCap = f.config.FeeCapDefault() + } + + return DynamicFee{ + FeeCap: feeCap, + TipCap: gasTipCap, + }, chainSpecificGasLimit, nil +} + +func (f *fixedPriceEstimator) BumpDynamicFee( + _ context.Context, + originalFee DynamicFee, + originalGasLimit uint32, + maxGasPriceWei *assets.Wei, + _ []EvmPriorAttempt, +) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) { + + return BumpDynamicFeeOnly( + f.config, + f.bhConfig.EIP1559FeeCapBufferBlocks(), + f.lggr, + f.config.TipCapDefault(), + nil, + originalFee, + originalGasLimit, + maxGasPriceWei, + ) +} + +func (f *fixedPriceEstimator) Name() string { return f.lggr.Name() } +func (f *fixedPriceEstimator) Ready() error { return nil } +func (f *fixedPriceEstimator) HealthReport() map[string]error { return map[string]error{} } +func (f *fixedPriceEstimator) Close() error { return nil } +func (f *fixedPriceEstimator) OnNewLongestChain(_ context.Context, _ *evmtypes.Head) {} diff --git a/core/chains/evm/gas/fixed_price_estimator_test.go b/core/chains/evm/gas/fixed_price_estimator_test.go new file mode 100644 index 00000000..5d431e24 --- /dev/null +++ b/core/chains/evm/gas/fixed_price_estimator_test.go @@ -0,0 +1,146 @@ +package gas_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type blockHistoryConfig struct { + v uint16 +} + +func (b *blockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { + return b.v +} + +func Test_FixedPriceEstimator(t *testing.T) { + t.Parallel() + maxGasPrice := assets.NewWeiI(1000000) + + t.Run("GetLegacyGas returns EvmGasPriceDefault from config, with multiplier applied", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, logger.Test(t)) + + config.PriceDefaultF = assets.NewWeiI(42) + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = maxGasPrice + + gasPrice, gasLimit, err := f.GetLegacyGas(testutils.Context(t), nil, 100000, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, assets.NewWeiI(42), gasPrice) + }) + + t.Run("GetLegacyGas returns user specified maximum gas price", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + config.PriceDefaultF = assets.NewWeiI(42) + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = assets.NewWeiI(35) + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, logger.Test(t)) + + gasPrice, gasLimit, err := f.GetLegacyGas(testutils.Context(t), nil, 100000, assets.NewWeiI(30)) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, assets.NewWeiI(30), gasPrice) + }) + + t.Run("GetLegacyGas returns global maximum gas price", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + config.PriceDefaultF = assets.NewWeiI(42) + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = assets.NewWeiI(20) + + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, logger.Test(t)) + gasPrice, gasLimit, err := f.GetLegacyGas(testutils.Context(t), nil, 100000, assets.NewWeiI(30)) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + assert.Equal(t, assets.NewWeiI(20), gasPrice) + }) + + t.Run("BumpLegacyGas calls BumpLegacyGasPriceOnly", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + config.PriceDefaultF = assets.NewWeiI(42) + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = maxGasPrice + config.BumpPercentF = uint16(10) + config.BumpMinF = assets.NewWeiI(150) + + lggr := logger.TestSugared(t) + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, lggr) + + gasPrice, gasLimit, err := f.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), 100000, maxGasPrice, nil) + require.NoError(t, err) + + expectedGasPrice, expectedGasLimit, err := gas.BumpLegacyGasPriceOnly(config, lggr, nil, assets.NewWeiI(42), 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, expectedGasLimit, gasLimit) + assert.Equal(t, expectedGasPrice, gasPrice) + }) + + t.Run("GetDynamicFee returns defaults from config, with multiplier applied", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = maxGasPrice + config.TipCapDefaultF = assets.NewWeiI(52) + config.FeeCapDefaultF = assets.NewWeiI(100) + config.BumpThresholdF = uint64(3) + + lggr := logger.Test(t) + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, lggr) + + fee, gasLimit, err := f.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + + assert.Equal(t, assets.NewWeiI(52), fee.TipCap) + assert.Equal(t, assets.NewWeiI(100), fee.FeeCap) + + // Gas bumping disabled + config.BumpThresholdF = uint64(0) + + fee, gasLimit, err = f.GetDynamicFee(testutils.Context(t), 100000, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + + assert.Equal(t, assets.NewWeiI(52), fee.TipCap) + assert.Equal(t, maxGasPrice, fee.FeeCap) + + // override max gas price + fee, gasLimit, err = f.GetDynamicFee(testutils.Context(t), 100000, assets.NewWeiI(10)) + require.NoError(t, err) + assert.Equal(t, 110000, int(gasLimit)) + + assert.Equal(t, assets.NewWeiI(52), fee.TipCap) + assert.Equal(t, assets.NewWeiI(10), fee.FeeCap) + }) + + t.Run("BumpDynamicFee calls BumpDynamicFeeOnly", func(t *testing.T) { + config := &gas.MockGasEstimatorConfig{} + config.LimitMultiplierF = float32(1.1) + config.PriceMaxF = maxGasPrice + config.TipCapDefaultF = assets.NewWeiI(52) + config.BumpMinF = assets.NewWeiI(150) + config.BumpPercentF = uint16(10) + + lggr := logger.TestSugared(t) + f := gas.NewFixedPriceEstimator(config, &blockHistoryConfig{}, lggr) + + originalFee := gas.DynamicFee{FeeCap: assets.NewWeiI(100), TipCap: assets.NewWeiI(25)} + fee, gasLimit, err := f.BumpDynamicFee(testutils.Context(t), originalFee, 100000, maxGasPrice, nil) + require.NoError(t, err) + + expectedFee, expectedGasLimit, err := gas.BumpDynamicFeeOnly(config, 0, lggr, nil, nil, originalFee, 100000, maxGasPrice) + require.NoError(t, err) + + assert.Equal(t, expectedGasLimit, gasLimit) + assert.Equal(t, expectedFee, fee) + }) +} diff --git a/core/chains/evm/gas/gas_test.go b/core/chains/evm/gas/gas_test.go new file mode 100644 index 00000000..d9fb63a2 --- /dev/null +++ b/core/chains/evm/gas/gas_test.go @@ -0,0 +1,349 @@ +package gas_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" +) + +func Test_BumpLegacyGasPriceOnly(t *testing.T) { + t.Parallel() + + for _, test := range []struct { + name string + currentGasPrice *assets.Wei + originalGasPrice *assets.Wei + bumpPercent uint16 + bumpMin *assets.Wei + priceMax *assets.Wei + expectedGasPrice *assets.Wei + originalLimit uint32 + limitMultiplierPercent float32 + expectedLimit uint64 + }{ + { + name: "defaults", + currentGasPrice: toWei("2e10"), // 20 GWei + originalGasPrice: toWei("3e10"), // 30 GWei + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: toWei("5e11"), // 0.5 uEther + expectedGasPrice: toWei("3.6e10"), // 36 GWei + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "defaults with nil currentGasPrice", + currentGasPrice: nil, + originalGasPrice: toWei("3e10"), // 30 GWei + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: toWei("5e11"), // 0.5 uEther + expectedGasPrice: toWei("3.6e10"), // 36 GWei + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "original + percentage wins", + currentGasPrice: toWei("2e10"), // 20 GWei + originalGasPrice: toWei("3e10"), // 30 GWei + bumpPercent: 30, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: toWei("5e11"), // 0.5 uEther + expectedGasPrice: toWei("3.9e10"), // 39 GWei + originalLimit: 100000, + limitMultiplierPercent: 1.1, + expectedLimit: 110000, + }, + { + name: "original + fixed wins", + currentGasPrice: toWei("2e10"), // 20 GWei + originalGasPrice: toWei("3e10"), // 30 GWei + bumpPercent: 20, + bumpMin: toWei("8e9"), // 0.8 GWei + priceMax: toWei("5e11"), // 0.5 uEther + expectedGasPrice: toWei("3.8e10"), // 38 GWei + originalLimit: 100000, + limitMultiplierPercent: 0.8, + expectedLimit: 80000, + }, + { + name: "current wins", + currentGasPrice: toWei("4e10"), + originalGasPrice: toWei("3e10"), // 30 GWei + bumpPercent: 20, + bumpMin: toWei("9e9"), // 0.9 GWei + priceMax: toWei("5e11"), // 0.5 uEther + expectedGasPrice: toWei("4e10"), // 40 GWei + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + } { + t.Run(test.name, func(t *testing.T) { + cfg := &gas.MockGasEstimatorConfig{} + cfg.BumpPercentF = test.bumpPercent + cfg.BumpMinF = test.bumpMin + cfg.PriceMaxF = test.priceMax + cfg.LimitMultiplierF = test.limitMultiplierPercent + actual, limit, err := gas.BumpLegacyGasPriceOnly(cfg, logger.TestSugared(t), test.currentGasPrice, test.originalGasPrice, test.originalLimit, test.priceMax) + require.NoError(t, err) + if actual.Cmp(test.expectedGasPrice) != 0 { + t.Fatalf("Expected %s but got %s", test.expectedGasPrice.String(), actual.String()) + } + assert.Equal(t, int(test.expectedLimit), int(limit)) + }) + } +} + +func Test_BumpLegacyGasPriceOnly_HitsMaxError(t *testing.T) { + t.Parallel() + + priceMax := assets.GWei(40) + cfg := &gas.MockGasEstimatorConfig{} + cfg.BumpPercentF = uint16(50) + cfg.BumpMinF = assets.NewWeiI(5000000000) + cfg.PriceMaxF = priceMax + + originalGasPrice := toWei("3e10") // 30 GWei + _, _, err := gas.BumpLegacyGasPriceOnly(cfg, logger.TestSugared(t), nil, originalGasPrice, 42, priceMax) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped gas price of 45 gwei would exceed configured max gas price of 40 gwei (original price was 30 gwei)") +} + +func Test_BumpLegacyGasPriceOnly_NoBumpError(t *testing.T) { + t.Parallel() + + priceMax := assets.GWei(40) + lggr := logger.TestSugared(t) + + cfg := &gas.MockGasEstimatorConfig{} + cfg.BumpPercentF = uint16(0) + cfg.BumpMinF = assets.NewWeiI(0) + cfg.PriceMaxF = priceMax + + originalGasPrice := toWei("3e10") // 30 GWei + _, _, err := gas.BumpLegacyGasPriceOnly(cfg, lggr, nil, originalGasPrice, 42, priceMax) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped gas price of 30 gwei is equal to original gas price of 30 gwei. ACTION REQUIRED: This is a configuration error, you must increase either EVM.GasEstimator.BumpPercent or EVM.GasEstimator.BumpMin") + + // Even if it's exactly the maximum + originalGasPrice = toWei("4e10") // 40 GWei + _, _, err = gas.BumpLegacyGasPriceOnly(cfg, lggr, nil, originalGasPrice, 42, priceMax) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped gas price of 40 gwei is equal to original gas price of 40 gwei. ACTION REQUIRED: This is a configuration error, you must increase either EVM.GasEstimator.BumpPercent or EVM.GasEstimator.BumpMin") +} + +func Test_BumpDynamicFeeOnly(t *testing.T) { + t.Parallel() + + for _, test := range []struct { + name string + currentTipCap *assets.Wei + currentBaseFee *assets.Wei + originalFee gas.DynamicFee + tipCapDefault *assets.Wei + bumpPercent uint16 + bumpMin *assets.Wei + priceMax *assets.Wei + expectedFee gas.DynamicFee + originalLimit uint32 + limitMultiplierPercent float32 + expectedLimit uint64 + }{ + { + name: "defaults", + currentTipCap: nil, + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(4000)}, + tipCapDefault: assets.GWei(20), + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: assets.GWei(5000), + expectedFee: gas.DynamicFee{TipCap: assets.GWei(36), FeeCap: assets.GWei(4800)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "original + percentage wins", + currentTipCap: nil, + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(100)}, + tipCapDefault: assets.GWei(20), + bumpPercent: 30, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(39), FeeCap: assets.GWei(130)}, + originalLimit: 100000, + limitMultiplierPercent: 1.1, + expectedLimit: 110000, + }, + { + name: "original + fixed wins", + currentTipCap: nil, + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(20), + bumpPercent: 20, + bumpMin: toWei("8e9"), // 0.8 GWei + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(38), FeeCap: assets.GWei(480)}, + originalLimit: 100000, + limitMultiplierPercent: 0.8, + expectedLimit: 80000, + }, + { + name: "default + percentage wins", + currentTipCap: nil, + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(40), + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(48), FeeCap: assets.GWei(480)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "default + fixed wins", + currentTipCap: assets.GWei(48), + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(40), + bumpPercent: 20, + bumpMin: toWei("9e9"), // 0.9 GWei + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(49), FeeCap: assets.GWei(480)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "higher current tip cap wins", + currentTipCap: assets.GWei(50), + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(40), + bumpPercent: 20, + bumpMin: toWei("9e9"), // 0.9 GWei + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(50), FeeCap: assets.GWei(480)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "if bumped tip cap would exceed bumped fee cap, adds fixed value to expectedFee", + currentTipCap: nil, + currentBaseFee: nil, + originalFee: gas.DynamicFee{TipCap: assets.GWei(10), FeeCap: assets.GWei(20)}, + tipCapDefault: assets.GWei(5), + bumpPercent: 5, + bumpMin: assets.GWei(50), + priceMax: toWei("5e11"), // 500GWei + expectedFee: gas.DynamicFee{TipCap: assets.GWei(60), FeeCap: assets.GWei(70)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "ignores current base fee and uses previous fee cap if calculated fee cap would be lower", + currentTipCap: assets.GWei(20), + currentBaseFee: assets.GWei(100), + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(20), + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: assets.GWei(5000), + expectedFee: gas.DynamicFee{TipCap: assets.GWei(36), FeeCap: assets.GWei(480)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + { + name: "uses current base fee to calculate fee cap if that would be higher than the existing one", + currentTipCap: assets.GWei(20), + currentBaseFee: assets.GWei(1000), + originalFee: gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(400)}, + tipCapDefault: assets.GWei(20), + bumpPercent: 20, + bumpMin: toWei("5e9"), // 0.5 GWei + priceMax: assets.GWei(5000), + // base fee * 4 blocks * 1.125 % plus new tip cap to give max + // 1000 * (1.125 ^ 4) + 36 ~= 1637 + expectedFee: gas.DynamicFee{TipCap: assets.GWei(36), FeeCap: assets.NewWeiI(1637806640625)}, + originalLimit: 100000, + limitMultiplierPercent: 1.0, + expectedLimit: 100000, + }, + } { + t.Run(test.name, func(t *testing.T) { + cfg := &gas.MockGasEstimatorConfig{} + cfg.BumpPercentF = test.bumpPercent + cfg.TipCapDefaultF = test.tipCapDefault + cfg.BumpMinF = test.bumpMin + cfg.PriceMaxF = test.priceMax + cfg.LimitMultiplierF = test.limitMultiplierPercent + + bufferBlocks := uint16(4) + actual, limit, err := gas.BumpDynamicFeeOnly(cfg, bufferBlocks, logger.TestSugared(t), test.currentTipCap, test.currentBaseFee, test.originalFee, test.originalLimit, test.priceMax) + require.NoError(t, err) + if actual.TipCap.Cmp(test.expectedFee.TipCap) != 0 { + t.Fatalf("TipCap not equal, expected %s but got %s", test.expectedFee.TipCap.String(), actual.TipCap.String()) + } + if actual.FeeCap.Cmp(test.expectedFee.FeeCap) != 0 { + t.Fatalf("FeeCap not equal, expected %s but got %s", test.expectedFee.FeeCap.String(), actual.FeeCap.String()) + } + assert.Equal(t, int(test.expectedLimit), int(limit)) + }) + } +} + +func Test_BumpDynamicFeeOnly_HitsMaxError(t *testing.T) { + t.Parallel() + + priceMax := assets.GWei(40) + + cfg := &gas.MockGasEstimatorConfig{} + cfg.BumpPercentF = uint16(50) + cfg.TipCapDefaultF = assets.GWei(0) + cfg.BumpMinF = assets.NewWeiI(5000000000) + cfg.PriceMaxF = priceMax + + t.Run("tip cap hits max", func(t *testing.T) { + originalFee := gas.DynamicFee{TipCap: assets.GWei(30), FeeCap: assets.GWei(100)} + _, _, err := gas.BumpDynamicFeeOnly(cfg, 0, logger.TestSugared(t), nil, nil, originalFee, 42, priceMax) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped tip cap of 45 gwei would exceed configured max gas price of 40 gwei (original fee: tip cap 30 gwei, fee cap 100 gwei)") + }) + + t.Run("fee cap hits max", func(t *testing.T) { + originalFee := gas.DynamicFee{TipCap: assets.GWei(10), FeeCap: assets.GWei(100)} + _, _, err := gas.BumpDynamicFeeOnly(cfg, 0, logger.TestSugared(t), nil, nil, originalFee, 42, priceMax) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped fee cap of 150 gwei would exceed configured max gas price of 40 gwei (original fee: tip cap 10 gwei, fee cap 100 gwei)") + }) +} + +// toWei is used to convert scientific notation string to a *assets.Wei +func toWei(input string) *assets.Wei { + flt, _, err := big.ParseFloat(input, 10, 0, big.ToNearestEven) + if err != nil { + panic(fmt.Sprintf("unable to parse '%s' into a big.Float: %v", input, err)) + } + var i = new(big.Int) + i, _ = flt.Int(i) + return assets.NewWei(i) +} diff --git a/core/chains/evm/gas/helpers_test.go b/core/chains/evm/gas/helpers_test.go new file mode 100644 index 00000000..9e0db437 --- /dev/null +++ b/core/chains/evm/gas/helpers_test.go @@ -0,0 +1,200 @@ +package gas + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +func init() { + // No need to wait 10 seconds in tests + MaxStartTime = 1 * time.Second +} + +func (b *BlockHistoryEstimator) CheckConnectivity(attempts []EvmPriorAttempt) error { + return b.checkConnectivity(attempts) +} + +func BlockHistoryEstimatorFromInterface(bhe EvmEstimator) *BlockHistoryEstimator { + return bhe.(*BlockHistoryEstimator) +} + +func SetRollingBlockHistory(bhe EvmEstimator, blocks []evmtypes.Block) { + bhe.(*BlockHistoryEstimator).blocksMu.Lock() + defer bhe.(*BlockHistoryEstimator).blocksMu.Unlock() + bhe.(*BlockHistoryEstimator).blocks = blocks +} + +func GetRollingBlockHistory(bhe EvmEstimator) []evmtypes.Block { + return bhe.(*BlockHistoryEstimator).getBlocks() +} + +func SetGasPrice(b *BlockHistoryEstimator, gp *assets.Wei) { + b.priceMu.Lock() + defer b.priceMu.Unlock() + b.gasPrice = gp +} + +func SetTipCap(b *BlockHistoryEstimator, gp *assets.Wei) { + b.priceMu.Lock() + defer b.priceMu.Unlock() + b.tipCap = gp +} + +func GetGasPrice(b *BlockHistoryEstimator) *assets.Wei { + b.priceMu.RLock() + defer b.priceMu.RUnlock() + return b.gasPrice +} + +func GetTipCap(b *BlockHistoryEstimator) *assets.Wei { + b.priceMu.RLock() + defer b.priceMu.RUnlock() + return b.tipCap +} + +func GetLatestBaseFee(b *BlockHistoryEstimator) *assets.Wei { + b.latestMu.RLock() + defer b.latestMu.RUnlock() + if b.latest == nil { + return nil + } + return b.latest.BaseFeePerGas +} + +func SimulateStart(t *testing.T, b *BlockHistoryEstimator) { + require.NoError(t, b.StartOnce("BlockHistoryEstimatorSimulatedStart", func() error { return nil })) +} + +type MockBlockHistoryConfig struct { + BatchSizeF uint32 + BlockDelayF uint16 + BlockHistorySizeF uint16 + CheckInclusionBlocksF uint16 + CheckInclusionPercentileF uint16 + EIP1559FeeCapBufferBlocksF uint16 + TransactionPercentileF uint16 + FinalityTagEnabledF bool +} + +func (m *MockBlockHistoryConfig) BatchSize() uint32 { + return m.BatchSizeF +} + +func (m *MockBlockHistoryConfig) BlockDelay() uint16 { + return m.BlockDelayF +} + +func (m *MockBlockHistoryConfig) BlockHistorySize() uint16 { + return m.BlockHistorySizeF +} + +func (m *MockBlockHistoryConfig) CheckInclusionPercentile() uint16 { + return m.CheckInclusionPercentileF +} + +func (m *MockBlockHistoryConfig) CheckInclusionBlocks() uint16 { + return m.CheckInclusionBlocksF +} + +func (m *MockBlockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { + return m.EIP1559FeeCapBufferBlocksF +} + +func (m *MockBlockHistoryConfig) TransactionPercentile() uint16 { + return m.TransactionPercentileF +} + +type MockConfig struct { + ChainTypeF string + FinalityTagEnabledF bool +} + +func NewMockConfig() *MockConfig { + return &MockConfig{} +} + +func (m *MockConfig) ChainType() config.ChainType { + return config.ChainType(m.ChainTypeF) +} + +func (m *MockConfig) FinalityDepth() uint32 { + panic("not implemented") // TODO: Implement +} + +func (m *MockConfig) FinalityTagEnabled() bool { + return m.FinalityTagEnabledF +} + +type MockGasEstimatorConfig struct { + EIP1559DynamicFeesF bool + BumpPercentF uint16 + BumpThresholdF uint64 + BumpMinF *assets.Wei + LimitMultiplierF float32 + TipCapDefaultF *assets.Wei + TipCapMinF *assets.Wei + PriceMaxF *assets.Wei + PriceMinF *assets.Wei + PriceDefaultF *assets.Wei + FeeCapDefaultF *assets.Wei + LimitMaxF uint32 + ModeF string +} + +func (m *MockGasEstimatorConfig) BumpPercent() uint16 { + return m.BumpPercentF +} + +func (m *MockGasEstimatorConfig) BumpThreshold() uint64 { + return m.BumpThresholdF +} + +func (m *MockGasEstimatorConfig) BumpMin() *assets.Wei { + return m.BumpMinF +} + +func (m *MockGasEstimatorConfig) EIP1559DynamicFees() bool { + return m.EIP1559DynamicFeesF +} + +func (m *MockGasEstimatorConfig) LimitMultiplier() float32 { + return m.LimitMultiplierF +} + +func (m *MockGasEstimatorConfig) PriceDefault() *assets.Wei { + return m.PriceDefaultF +} + +func (m *MockGasEstimatorConfig) TipCapDefault() *assets.Wei { + return m.TipCapDefaultF +} + +func (m *MockGasEstimatorConfig) TipCapMin() *assets.Wei { + return m.TipCapMinF +} + +func (m *MockGasEstimatorConfig) PriceMax() *assets.Wei { + return m.PriceMaxF +} + +func (m *MockGasEstimatorConfig) PriceMin() *assets.Wei { + return m.PriceMinF +} + +func (m *MockGasEstimatorConfig) FeeCapDefault() *assets.Wei { + return m.FeeCapDefaultF +} + +func (m *MockGasEstimatorConfig) LimitMax() uint32 { + return m.LimitMaxF +} + +func (m *MockGasEstimatorConfig) Mode() string { + return m.ModeF +} diff --git a/core/chains/evm/gas/mocks/config.go b/core/chains/evm/gas/mocks/config.go new file mode 100644 index 00000000..c47f1fd1 --- /dev/null +++ b/core/chains/evm/gas/mocks/config.go @@ -0,0 +1,82 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + config "github.com/goplugin/pluginv3.0/v2/common/config" + + mock "github.com/stretchr/testify/mock" +) + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// ChainType provides a mock function with given fields: +func (_m *Config) ChainType() config.ChainType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainType") + } + + var r0 config.ChainType + if rf, ok := ret.Get(0).(func() config.ChainType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(config.ChainType) + } + + return r0 +} + +// FinalityDepth provides a mock function with given fields: +func (_m *Config) FinalityDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// FinalityTagEnabled provides a mock function with given fields: +func (_m *Config) FinalityTagEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityTagEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/mocks/eth_client.go b/core/chains/evm/gas/mocks/eth_client.go new file mode 100644 index 00000000..bb0784f8 --- /dev/null +++ b/core/chains/evm/gas/mocks/eth_client.go @@ -0,0 +1,61 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" +) + +// ETHClient is an autogenerated mock type for the ethClient type +type ETHClient struct { + mock.Mock +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *ETHClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewETHClient creates a new instance of ETHClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewETHClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ETHClient { + mock := ÐClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/mocks/evm_estimator.go b/core/chains/evm/gas/mocks/evm_estimator.go new file mode 100644 index 00000000..65cacb06 --- /dev/null +++ b/core/chains/evm/gas/mocks/evm_estimator.go @@ -0,0 +1,284 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + gas "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/common/fee/types" +) + +// EvmEstimator is an autogenerated mock type for the EvmEstimator type +type EvmEstimator struct { + mock.Mock +} + +// BumpDynamicFee provides a mock function with given fields: ctx, original, gasLimit, maxGasPriceWei, attempts +func (_m *EvmEstimator) BumpDynamicFee(ctx context.Context, original gas.DynamicFee, gasLimit uint32, maxGasPriceWei *assets.Wei, attempts []gas.EvmPriorAttempt) (gas.DynamicFee, uint32, error) { + ret := _m.Called(ctx, original, gasLimit, maxGasPriceWei, attempts) + + if len(ret) == 0 { + panic("no return value specified for BumpDynamicFee") + } + + var r0 gas.DynamicFee + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, gas.DynamicFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) (gas.DynamicFee, uint32, error)); ok { + return rf(ctx, original, gasLimit, maxGasPriceWei, attempts) + } + if rf, ok := ret.Get(0).(func(context.Context, gas.DynamicFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) gas.DynamicFee); ok { + r0 = rf(ctx, original, gasLimit, maxGasPriceWei, attempts) + } else { + r0 = ret.Get(0).(gas.DynamicFee) + } + + if rf, ok := ret.Get(1).(func(context.Context, gas.DynamicFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) uint32); ok { + r1 = rf(ctx, original, gasLimit, maxGasPriceWei, attempts) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, gas.DynamicFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) error); ok { + r2 = rf(ctx, original, gasLimit, maxGasPriceWei, attempts) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// BumpLegacyGas provides a mock function with given fields: ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts +func (_m *EvmEstimator) BumpLegacyGas(ctx context.Context, originalGasPrice *assets.Wei, gasLimit uint32, maxGasPriceWei *assets.Wei, attempts []gas.EvmPriorAttempt) (*assets.Wei, uint32, error) { + ret := _m.Called(ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts) + + if len(ret) == 0 { + panic("no return value specified for BumpLegacyGas") + } + + var r0 *assets.Wei + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *assets.Wei, uint32, *assets.Wei, []gas.EvmPriorAttempt) (*assets.Wei, uint32, error)); ok { + return rf(ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts) + } + if rf, ok := ret.Get(0).(func(context.Context, *assets.Wei, uint32, *assets.Wei, []gas.EvmPriorAttempt) *assets.Wei); ok { + r0 = rf(ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *assets.Wei, uint32, *assets.Wei, []gas.EvmPriorAttempt) uint32); ok { + r1 = rf(ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, *assets.Wei, uint32, *assets.Wei, []gas.EvmPriorAttempt) error); ok { + r2 = rf(ctx, originalGasPrice, gasLimit, maxGasPriceWei, attempts) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Close provides a mock function with given fields: +func (_m *EvmEstimator) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetDynamicFee provides a mock function with given fields: ctx, gasLimit, maxGasPriceWei +func (_m *EvmEstimator) GetDynamicFee(ctx context.Context, gasLimit uint32, maxGasPriceWei *assets.Wei) (gas.DynamicFee, uint32, error) { + ret := _m.Called(ctx, gasLimit, maxGasPriceWei) + + if len(ret) == 0 { + panic("no return value specified for GetDynamicFee") + } + + var r0 gas.DynamicFee + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, *assets.Wei) (gas.DynamicFee, uint32, error)); ok { + return rf(ctx, gasLimit, maxGasPriceWei) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, *assets.Wei) gas.DynamicFee); ok { + r0 = rf(ctx, gasLimit, maxGasPriceWei) + } else { + r0 = ret.Get(0).(gas.DynamicFee) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, *assets.Wei) uint32); ok { + r1 = rf(ctx, gasLimit, maxGasPriceWei) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32, *assets.Wei) error); ok { + r2 = rf(ctx, gasLimit, maxGasPriceWei) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetLegacyGas provides a mock function with given fields: ctx, calldata, gasLimit, maxGasPriceWei, opts +func (_m *EvmEstimator) GetLegacyGas(ctx context.Context, calldata []byte, gasLimit uint32, maxGasPriceWei *assets.Wei, opts ...types.Opt) (*assets.Wei, uint32, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, calldata, gasLimit, maxGasPriceWei) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetLegacyGas") + } + + var r0 *assets.Wei + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) (*assets.Wei, uint32, error)); ok { + return rf(ctx, calldata, gasLimit, maxGasPriceWei, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) *assets.Wei); ok { + r0 = rf(ctx, calldata, gasLimit, maxGasPriceWei, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) uint32); ok { + r1 = rf(ctx, calldata, gasLimit, maxGasPriceWei, opts...) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) error); ok { + r2 = rf(ctx, calldata, gasLimit, maxGasPriceWei, opts...) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// HealthReport provides a mock function with given fields: +func (_m *EvmEstimator) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *EvmEstimator) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *EvmEstimator) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *EvmEstimator) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *EvmEstimator) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewEvmEstimator creates a new instance of EvmEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEvmEstimator(t interface { + mock.TestingT + Cleanup(func()) +}) *EvmEstimator { + mock := &EvmEstimator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/mocks/evm_fee_estimator.go b/core/chains/evm/gas/mocks/evm_fee_estimator.go new file mode 100644 index 00000000..89cca458 --- /dev/null +++ b/core/chains/evm/gas/mocks/evm_fee_estimator.go @@ -0,0 +1,271 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + context "context" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + gas "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + + mock "github.com/stretchr/testify/mock" + + rollups "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/rollups" + + types "github.com/goplugin/pluginv3.0/v2/common/fee/types" +) + +// EvmFeeEstimator is an autogenerated mock type for the EvmFeeEstimator type +type EvmFeeEstimator struct { + mock.Mock +} + +// BumpFee provides a mock function with given fields: ctx, originalFee, feeLimit, maxFeePrice, attempts +func (_m *EvmFeeEstimator) BumpFee(ctx context.Context, originalFee gas.EvmFee, feeLimit uint32, maxFeePrice *assets.Wei, attempts []gas.EvmPriorAttempt) (gas.EvmFee, uint32, error) { + ret := _m.Called(ctx, originalFee, feeLimit, maxFeePrice, attempts) + + if len(ret) == 0 { + panic("no return value specified for BumpFee") + } + + var r0 gas.EvmFee + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, gas.EvmFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) (gas.EvmFee, uint32, error)); ok { + return rf(ctx, originalFee, feeLimit, maxFeePrice, attempts) + } + if rf, ok := ret.Get(0).(func(context.Context, gas.EvmFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) gas.EvmFee); ok { + r0 = rf(ctx, originalFee, feeLimit, maxFeePrice, attempts) + } else { + r0 = ret.Get(0).(gas.EvmFee) + } + + if rf, ok := ret.Get(1).(func(context.Context, gas.EvmFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) uint32); ok { + r1 = rf(ctx, originalFee, feeLimit, maxFeePrice, attempts) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, gas.EvmFee, uint32, *assets.Wei, []gas.EvmPriorAttempt) error); ok { + r2 = rf(ctx, originalFee, feeLimit, maxFeePrice, attempts) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Close provides a mock function with given fields: +func (_m *EvmFeeEstimator) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetFee provides a mock function with given fields: ctx, calldata, feeLimit, maxFeePrice, opts +func (_m *EvmFeeEstimator) GetFee(ctx context.Context, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...types.Opt) (gas.EvmFee, uint32, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, calldata, feeLimit, maxFeePrice) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetFee") + } + + var r0 gas.EvmFee + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) (gas.EvmFee, uint32, error)); ok { + return rf(ctx, calldata, feeLimit, maxFeePrice, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) gas.EvmFee); ok { + r0 = rf(ctx, calldata, feeLimit, maxFeePrice, opts...) + } else { + r0 = ret.Get(0).(gas.EvmFee) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) uint32); ok { + r1 = rf(ctx, calldata, feeLimit, maxFeePrice, opts...) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, uint32, *assets.Wei, ...types.Opt) error); ok { + r2 = rf(ctx, calldata, feeLimit, maxFeePrice, opts...) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetMaxCost provides a mock function with given fields: ctx, amount, calldata, feeLimit, maxFeePrice, opts +func (_m *EvmFeeEstimator) GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...types.Opt) (*big.Int, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, amount, calldata, feeLimit, maxFeePrice) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetMaxCost") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) (*big.Int, error)); ok { + return rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) *big.Int); ok { + r0 = rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) error); ok { + r1 = rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HealthReport provides a mock function with given fields: +func (_m *EvmFeeEstimator) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// L1Oracle provides a mock function with given fields: +func (_m *EvmFeeEstimator) L1Oracle() rollups.L1Oracle { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for L1Oracle") + } + + var r0 rollups.L1Oracle + if rf, ok := ret.Get(0).(func() rollups.L1Oracle); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(rollups.L1Oracle) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *EvmFeeEstimator) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *EvmFeeEstimator) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *EvmFeeEstimator) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *EvmFeeEstimator) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewEvmFeeEstimator creates a new instance of EvmFeeEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEvmFeeEstimator(t interface { + mock.TestingT + Cleanup(func()) +}) *EvmFeeEstimator { + mock := &EvmFeeEstimator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/mocks/rpc_client.go b/core/chains/evm/gas/mocks/rpc_client.go new file mode 100644 index 00000000..d1262665 --- /dev/null +++ b/core/chains/evm/gas/mocks/rpc_client.go @@ -0,0 +1,49 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// RPCClient is an autogenerated mock type for the rpcClient type +type RPCClient struct { + mock.Mock +} + +// CallContext provides a mock function with given fields: ctx, result, method, args +func (_m *RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, ctx, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { + r0 = rf(ctx, result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRPCClient creates a new instance of RPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRPCClient(t interface { + mock.TestingT + Cleanup(func()) +}) *RPCClient { + mock := &RPCClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go new file mode 100644 index 00000000..e02c1bdc --- /dev/null +++ b/core/chains/evm/gas/models.go @@ -0,0 +1,484 @@ +package gas + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + + "github.com/goplugin/pluginv3.0/v2/common/config" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/rollups" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/label" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// EvmFeeEstimator provides a unified interface that wraps EvmEstimator and can determine if legacy or dynamic fee estimation should be used +// +//go:generate mockery --quiet --name EvmFeeEstimator --output ./mocks/ --case=underscore +type EvmFeeEstimator interface { + services.Service + commontypes.HeadTrackable[*evmtypes.Head, common.Hash] + + // L1Oracle returns the L1 gas price oracle only if the chain has one, e.g. OP stack L2s and Arbitrum. + L1Oracle() rollups.L1Oracle + GetFee(ctx context.Context, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (fee EvmFee, chainSpecificFeeLimit uint32, err error) + BumpFee(ctx context.Context, originalFee EvmFee, feeLimit uint32, maxFeePrice *assets.Wei, attempts []EvmPriorAttempt) (bumpedFee EvmFee, chainSpecificFeeLimit uint32, err error) + + // GetMaxCost returns the total value = max price x fee units + transferred value + GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (*big.Int, error) +} + +// NewEstimator returns the estimator for a given config +func NewEstimator(lggr logger.Logger, ethClient evmclient.Client, cfg Config, geCfg evmconfig.GasEstimator) EvmFeeEstimator { + bh := geCfg.BlockHistory() + s := geCfg.Mode() + lggr.Infow(fmt.Sprintf("Initializing EVM gas estimator in mode: %s", s), + "estimatorMode", s, + "batchSize", bh.BatchSize(), + "blockDelay", bh.BlockDelay(), + "blockHistorySize", bh.BlockHistorySize(), + "eip1559FeeCapBufferBlocks", bh.EIP1559FeeCapBufferBlocks(), + "transactionPercentile", bh.TransactionPercentile(), + "eip1559DynamicFees", geCfg.EIP1559DynamicFees(), + "gasBumpPercent", geCfg.BumpPercent(), + "gasBumpThreshold", geCfg.BumpThreshold(), + "bumpMin", geCfg.BumpMin(), + "feeCapDefault", geCfg.FeeCapDefault(), + "limitMultiplier", geCfg.LimitMultiplier(), + "priceDefault", geCfg.PriceDefault(), + "tipCapDefault", geCfg.TipCapDefault(), + "tipCapMin", geCfg.TipCapMin(), + "priceMax", geCfg.PriceMax(), + "priceMin", geCfg.PriceMin(), + ) + df := geCfg.EIP1559DynamicFees() + + // create l1Oracle only if it is supported for the chain + var l1Oracle rollups.L1Oracle + if rollups.IsRollupWithL1Support(cfg.ChainType()) { + l1Oracle = rollups.NewL1GasOracle(lggr, ethClient, cfg.ChainType()) + } + var newEstimator func(logger.Logger) EvmEstimator + switch s { + case "Arbitrum": + newEstimator = func(l logger.Logger) EvmEstimator { + return NewArbitrumEstimator(lggr, geCfg, ethClient, ethClient) + } + case "BlockHistory": + newEstimator = func(l logger.Logger) EvmEstimator { + return NewBlockHistoryEstimator(lggr, ethClient, cfg, geCfg, bh, *ethClient.ConfiguredChainID()) + } + case "FixedPrice": + newEstimator = func(l logger.Logger) EvmEstimator { + return NewFixedPriceEstimator(geCfg, bh, lggr) + } + case "L2Suggested", "SuggestedPrice": + newEstimator = func(l logger.Logger) EvmEstimator { + return NewSuggestedPriceEstimator(lggr, ethClient, geCfg) + } + default: + lggr.Warnf("GasEstimator: unrecognised mode '%s', falling back to FixedPriceEstimator", s) + newEstimator = func(l logger.Logger) EvmEstimator { + return NewFixedPriceEstimator(geCfg, bh, lggr) + } + } + return NewWrappedEvmEstimator(lggr, newEstimator, df, l1Oracle) +} + +// DynamicFee encompasses both FeeCap and TipCap for EIP1559 transactions +type DynamicFee struct { + FeeCap *assets.Wei + TipCap *assets.Wei +} + +type EvmPriorAttempt struct { + ChainSpecificFeeLimit uint32 + BroadcastBeforeBlockNum *int64 + TxHash common.Hash + TxType int + GasPrice *assets.Wei + DynamicFee DynamicFee +} + +// Estimator provides an interface for estimating gas price and limit +// +//go:generate mockery --quiet --name EvmEstimator --output ./mocks/ --case=underscore +type EvmEstimator interface { + commontypes.HeadTrackable[*evmtypes.Head, common.Hash] + services.Service + + // GetLegacyGas Calculates initial gas fee for non-EIP1559 transaction + // maxGasPriceWei parameter is the highest possible gas fee cap that the function will return + GetLegacyGas(ctx context.Context, calldata []byte, gasLimit uint32, maxGasPriceWei *assets.Wei, opts ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) + // BumpLegacyGas Increases gas price and/or limit for non-EIP1559 transactions + // if the bumped gas fee is greater than maxGasPriceWei, the method returns an error + // attempts must: + // - be sorted in order from highest price to lowest price + // - all be of transaction type 0x0 or 0x1 + BumpLegacyGas(ctx context.Context, originalGasPrice *assets.Wei, gasLimit uint32, maxGasPriceWei *assets.Wei, attempts []EvmPriorAttempt) (bumpedGasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) + // GetDynamicFee Calculates initial gas fee for gas for EIP1559 transactions + // maxGasPriceWei parameter is the highest possible gas fee cap that the function will return + GetDynamicFee(ctx context.Context, gasLimit uint32, maxGasPriceWei *assets.Wei) (fee DynamicFee, chainSpecificGasLimit uint32, err error) + // BumpDynamicFee Increases gas price and/or limit for non-EIP1559 transactions + // if the bumped gas fee or tip caps are greater than maxGasPriceWei, the method returns an error + // attempts must: + // - be sorted in order from highest price to lowest price + // - all be of transaction type 0x2 + BumpDynamicFee(ctx context.Context, original DynamicFee, gasLimit uint32, maxGasPriceWei *assets.Wei, attempts []EvmPriorAttempt) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) +} + +var _ feetypes.Fee = (*EvmFee)(nil) + +type EvmFee struct { + // legacy fees + Legacy *assets.Wei + + // dynamic/EIP1559 fees + DynamicFeeCap *assets.Wei + DynamicTipCap *assets.Wei +} + +func (fee EvmFee) String() string { + return fmt.Sprintf("{Legacy: %s, DynamicFeeCap: %s, DynamicTipCap: %s}", fee.Legacy, fee.DynamicFeeCap, fee.DynamicTipCap) +} + +func (fee EvmFee) ValidDynamic() bool { + return fee.DynamicFeeCap != nil && fee.DynamicTipCap != nil +} + +// WrappedEvmEstimator provides a struct that wraps the EVM specific dynamic and legacy estimators into one estimator that conforms to the generic FeeEstimator +type WrappedEvmEstimator struct { + services.StateMachine + lggr logger.Logger + EvmEstimator + EIP1559Enabled bool + l1Oracle rollups.L1Oracle +} + +var _ EvmFeeEstimator = (*WrappedEvmEstimator)(nil) + +func NewWrappedEvmEstimator(lggr logger.Logger, newEstimator func(logger.Logger) EvmEstimator, eip1559Enabled bool, l1Oracle rollups.L1Oracle) EvmFeeEstimator { + lggr = logger.Named(lggr, "WrappedEvmEstimator") + return &WrappedEvmEstimator{ + lggr: lggr, + EvmEstimator: newEstimator(lggr), + EIP1559Enabled: eip1559Enabled, + l1Oracle: l1Oracle, + } +} + +func (e *WrappedEvmEstimator) Name() string { + return e.lggr.Name() +} + +func (e *WrappedEvmEstimator) Start(ctx context.Context) error { + return e.StartOnce(e.Name(), func() error { + if err := e.EvmEstimator.Start(ctx); err != nil { + return errors.Wrap(err, "failed to start EVMEstimator") + } + if e.l1Oracle != nil { + if err := e.l1Oracle.Start(ctx); err != nil { + return errors.Wrap(err, "failed to start L1Oracle") + } + } + return nil + }) +} +func (e *WrappedEvmEstimator) Close() error { + return e.StopOnce(e.Name(), func() error { + var errEVM, errOracle error + + errEVM = errors.Wrap(e.EvmEstimator.Close(), "failed to stop EVMEstimator") + if e.l1Oracle != nil { + errOracle = errors.Wrap(e.l1Oracle.Close(), "failed to stop L1Oracle") + } + + if errEVM != nil { + return errEVM + } + return errOracle + }) +} + +func (e *WrappedEvmEstimator) Ready() error { + var errEVM, errOracle error + + errEVM = e.EvmEstimator.Ready() + if e.l1Oracle != nil { + errOracle = e.l1Oracle.Ready() + } + + if errEVM != nil { + return errEVM + } + return errOracle +} + +func (e *WrappedEvmEstimator) HealthReport() map[string]error { + report := map[string]error{e.Name(): e.Healthy()} + services.CopyHealth(report, e.EvmEstimator.HealthReport()) + if e.l1Oracle != nil { + services.CopyHealth(report, e.l1Oracle.HealthReport()) + } + + return report +} + +func (e *WrappedEvmEstimator) L1Oracle() rollups.L1Oracle { + return e.l1Oracle +} + +func (e *WrappedEvmEstimator) GetFee(ctx context.Context, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (fee EvmFee, chainSpecificFeeLimit uint32, err error) { + // get dynamic fee + if e.EIP1559Enabled { + var dynamicFee DynamicFee + dynamicFee, chainSpecificFeeLimit, err = e.EvmEstimator.GetDynamicFee(ctx, feeLimit, maxFeePrice) + fee.DynamicFeeCap = dynamicFee.FeeCap + fee.DynamicTipCap = dynamicFee.TipCap + return + } + + // get legacy fee + fee.Legacy, chainSpecificFeeLimit, err = e.EvmEstimator.GetLegacyGas(ctx, calldata, feeLimit, maxFeePrice, opts...) + return +} + +func (e *WrappedEvmEstimator) GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (*big.Int, error) { + fees, gasLimit, err := e.GetFee(ctx, calldata, feeLimit, maxFeePrice, opts...) + if err != nil { + return nil, err + } + + var gasPrice *assets.Wei + if e.EIP1559Enabled { + gasPrice = fees.DynamicFeeCap + } else { + gasPrice = fees.Legacy + } + + fee := new(big.Int).Mul(gasPrice.ToInt(), big.NewInt(int64(gasLimit))) + amountWithFees := new(big.Int).Add(amount.ToInt(), fee) + return amountWithFees, nil +} + +func (e *WrappedEvmEstimator) BumpFee(ctx context.Context, originalFee EvmFee, feeLimit uint32, maxFeePrice *assets.Wei, attempts []EvmPriorAttempt) (bumpedFee EvmFee, chainSpecificFeeLimit uint32, err error) { + // validate only 1 fee type is present + if (!originalFee.ValidDynamic() && originalFee.Legacy == nil) || (originalFee.ValidDynamic() && originalFee.Legacy != nil) { + err = errors.New("only one dynamic or legacy fee can be defined") + return + } + + // bump fee based on what fee the tx has previously used (not based on config) + // bump dynamic original + if originalFee.ValidDynamic() { + var bumpedDynamic DynamicFee + bumpedDynamic, chainSpecificFeeLimit, err = e.EvmEstimator.BumpDynamicFee(ctx, + DynamicFee{ + TipCap: originalFee.DynamicTipCap, + FeeCap: originalFee.DynamicFeeCap, + }, feeLimit, maxFeePrice, attempts) + bumpedFee.DynamicFeeCap = bumpedDynamic.FeeCap + bumpedFee.DynamicTipCap = bumpedDynamic.TipCap + return + } + + // bump legacy fee + bumpedFee.Legacy, chainSpecificFeeLimit, err = e.EvmEstimator.BumpLegacyGas(ctx, originalFee.Legacy, feeLimit, maxFeePrice, attempts) + return +} + +// Config defines an interface for configuration in the gas package +// +//go:generate mockery --quiet --name Config --output ./mocks/ --case=underscore +type Config interface { + ChainType() config.ChainType + FinalityDepth() uint32 + FinalityTagEnabled() bool +} + +type GasEstimatorConfig interface { + EIP1559DynamicFees() bool + BumpPercent() uint16 + BumpThreshold() uint64 + BumpMin() *assets.Wei + FeeCapDefault() *assets.Wei + LimitMax() uint32 + LimitMultiplier() float32 + PriceDefault() *assets.Wei + TipCapDefault() *assets.Wei + TipCapMin() *assets.Wei + PriceMin() *assets.Wei + PriceMax() *assets.Wei + Mode() string +} + +type BlockHistoryConfig interface { + evmconfig.BlockHistory +} + +// Int64ToHex converts an int64 into go-ethereum's hex representation +func Int64ToHex(n int64) string { + return hexutil.EncodeBig(big.NewInt(n)) +} + +// HexToInt64 performs the inverse of Int64ToHex +// Returns 0 on invalid input +func HexToInt64(input interface{}) int64 { + switch v := input.(type) { + case string: + big, err := hexutil.DecodeBig(v) + if err != nil { + return 0 + } + return big.Int64() + case []byte: + big, err := hexutil.DecodeBig(string(v)) + if err != nil { + return 0 + } + return big.Int64() + default: + return 0 + } +} + +// BumpLegacyGasPriceOnly will increase the price and apply multiplier to the gas limit +func BumpLegacyGasPriceOnly(cfg bumpConfig, lggr logger.SugaredLogger, currentGasPrice, originalGasPrice *assets.Wei, originalGasLimit uint32, maxGasPriceWei *assets.Wei) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + gasPrice, err = bumpGasPrice(cfg, lggr, currentGasPrice, originalGasPrice, maxGasPriceWei) + if err != nil { + return nil, 0, err + } + chainSpecificGasLimit, err = commonfee.ApplyMultiplier(originalGasLimit, cfg.LimitMultiplier()) + return +} + +// bumpGasPrice computes the next gas price to attempt as the largest of: +// - A configured percentage bump (EVM.GasEstimator.BumpPercent) on top of the baseline price. +// - A configured fixed amount of Wei (ETH_GAS_PRICE_WEI) on top of the baseline price. +// The baseline price is the maximum of the previous gas price attempt and the node's current gas price. +func bumpGasPrice(cfg bumpConfig, lggr logger.SugaredLogger, currentGasPrice, originalGasPrice, maxGasPriceWei *assets.Wei) (*assets.Wei, error) { + maxGasPrice := getMaxGasPrice(maxGasPriceWei, cfg.PriceMax()) + bumpedGasPrice := bumpFeePrice(originalGasPrice, cfg.BumpPercent(), cfg.BumpMin()) + + // Update bumpedGasPrice if currentGasPrice is higher than bumpedGasPrice and within maxGasPrice + bumpedGasPrice = maxBumpedFee(lggr, currentGasPrice, bumpedGasPrice, maxGasPrice, "gas price") + + if bumpedGasPrice.Cmp(maxGasPrice) > 0 { + return maxGasPrice, errors.Wrapf(commonfee.ErrBumpFeeExceedsLimit, "bumped gas price of %s would exceed configured max gas price of %s (original price was %s). %s", + bumpedGasPrice.String(), maxGasPrice, originalGasPrice.String(), label.NodeConnectivityProblemWarning) + } else if bumpedGasPrice.Cmp(originalGasPrice) == 0 { + // NOTE: This really shouldn't happen since we enforce minimums for + // EVM.GasEstimator.BumpPercent and EVM.GasEstimator.BumpMin in the config validation, + // but it's here anyway for a "belts and braces" approach + return bumpedGasPrice, errors.Wrapf(commonfee.ErrBump, "bumped gas price of %s is equal to original gas price of %s."+ + " ACTION REQUIRED: This is a configuration error, you must increase either "+ + "EVM.GasEstimator.BumpPercent or EVM.GasEstimator.BumpMin", bumpedGasPrice.String(), originalGasPrice.String()) + } + return bumpedGasPrice, nil +} + +// BumpDynamicFeeOnly bumps the tip cap and max gas price if necessary +func BumpDynamicFeeOnly(config bumpConfig, feeCapBufferBlocks uint16, lggr logger.SugaredLogger, currentTipCap, currentBaseFee *assets.Wei, originalFee DynamicFee, originalGasLimit uint32, maxGasPriceWei *assets.Wei) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) { + bumped, err = bumpDynamicFee(config, feeCapBufferBlocks, lggr, currentTipCap, currentBaseFee, originalFee, maxGasPriceWei) + if err != nil { + return bumped, 0, err + } + chainSpecificGasLimit, err = commonfee.ApplyMultiplier(originalGasLimit, config.LimitMultiplier()) + return +} + +// bumpDynamicFee computes the next tip cap to attempt as the largest of: +// - A configured percentage bump (EVM.GasEstimator.BumpPercent) on top of the baseline tip cap. +// - A configured fixed amount of Wei (ETH_GAS_PRICE_WEI) on top of the baseline tip cap. +// The baseline tip cap is the maximum of the previous tip cap attempt and the node's current tip cap. +// It increases the max fee cap by BumpPercent +// +// NOTE: We would prefer to have set a large FeeCap and leave it fixed, bumping +// the Tip only. Unfortunately due to a flaw of how EIP-1559 is implemented we +// have to bump FeeCap by at least 10% each time we bump the tip cap. +// See: https://github.com/ethereum/go-ethereum/issues/24284 +func bumpDynamicFee(cfg bumpConfig, feeCapBufferBlocks uint16, lggr logger.SugaredLogger, currentTipCap, currentBaseFee *assets.Wei, originalFee DynamicFee, maxGasPriceWei *assets.Wei) (bumpedFee DynamicFee, err error) { + maxGasPrice := getMaxGasPrice(maxGasPriceWei, cfg.PriceMax()) + baselineTipCap := assets.MaxWei(originalFee.TipCap, cfg.TipCapDefault()) + bumpedTipCap := bumpFeePrice(baselineTipCap, cfg.BumpPercent(), cfg.BumpMin()) + + // Update bumpedTipCap if currentTipCap is higher than bumpedTipCap and within maxGasPrice + bumpedTipCap = maxBumpedFee(lggr, currentTipCap, bumpedTipCap, maxGasPrice, "tip cap") + + if bumpedTipCap.Cmp(maxGasPrice) > 0 { + return bumpedFee, errors.Wrapf(commonfee.ErrBumpFeeExceedsLimit, "bumped tip cap of %s would exceed configured max gas price of %s (original fee: tip cap %s, fee cap %s). %s", + bumpedTipCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), label.NodeConnectivityProblemWarning) + } else if bumpedTipCap.Cmp(originalFee.TipCap) <= 0 { + // NOTE: This really shouldn't happen since we enforce minimums for + // EVM.GasEstimator.BumpPercent and EVM.GasEstimator.BumpMin in the config validation, + // but it's here anyway for a "belts and braces" approach + return bumpedFee, errors.Wrapf(commonfee.ErrBump, "bumped gas tip cap of %s is less than or equal to original gas tip cap of %s."+ + " ACTION REQUIRED: This is a configuration error, you must increase either "+ + "EVM.GasEstimator.BumpPercent or EVM.GasEstimator.BumpMin", bumpedTipCap.String(), originalFee.TipCap.String()) + } + + // Always bump the FeeCap by at least the bump percentage (should be greater than or + // equal to than geth's configured bump minimum which is 10%) + // See: https://github.com/ethereum/go-ethereum/blob/bff330335b94af3643ac2fb809793f77de3069d4/core/tx_list.go#L298 + bumpedFeeCap := bumpFeePrice(originalFee.FeeCap, cfg.BumpPercent(), cfg.BumpMin()) + + if currentBaseFee != nil { + if currentBaseFee.Cmp(maxGasPrice) > 0 { + lggr.Warnf("Ignoring current base fee of %s which is greater than max gas price of %s", currentBaseFee.String(), maxGasPrice.String()) + } else { + currentFeeCap := calcFeeCap(currentBaseFee, int(feeCapBufferBlocks), bumpedTipCap, maxGasPrice) + bumpedFeeCap = assets.WeiMax(bumpedFeeCap, currentFeeCap) + } + } + + if bumpedFeeCap.Cmp(maxGasPrice) > 0 { + return bumpedFee, errors.Wrapf(commonfee.ErrBumpFeeExceedsLimit, "bumped fee cap of %s would exceed configured max gas price of %s (original fee: tip cap %s, fee cap %s). %s", + bumpedFeeCap.String(), maxGasPrice, originalFee.TipCap.String(), originalFee.FeeCap.String(), label.NodeConnectivityProblemWarning) + } + + return DynamicFee{FeeCap: bumpedFeeCap, TipCap: bumpedTipCap}, nil +} + +func bumpFeePrice(originalFeePrice *assets.Wei, feeBumpPercent uint16, feeBumpUnits *assets.Wei) *assets.Wei { + bumpedFeePrice := assets.MaxWei( + originalFeePrice.AddPercentage(feeBumpPercent), + originalFeePrice.Add(feeBumpUnits), + ) + return bumpedFeePrice +} + +func maxBumpedFee(lggr logger.SugaredLogger, currentFeePrice, bumpedFeePrice, maxGasPrice *assets.Wei, feeType string) *assets.Wei { + if currentFeePrice != nil { + if currentFeePrice.Cmp(maxGasPrice) > 0 { + // Shouldn't happen because the estimator should not be allowed to + // estimate a higher gas than the maximum allowed + lggr.AssumptionViolationf("Ignoring current %s of %s that would exceed max %s of %s", feeType, currentFeePrice.String(), feeType, maxGasPrice.String()) + } else if bumpedFeePrice.Cmp(currentFeePrice) < 0 { + // If the current gas price is higher than the old price bumped, use that instead + bumpedFeePrice = currentFeePrice + } + } + return bumpedFeePrice +} + +func getMaxGasPrice(userSpecifiedMax, maxGasPriceWei *assets.Wei) *assets.Wei { + return assets.NewWei(bigmath.Min(userSpecifiedMax.ToInt(), maxGasPriceWei.ToInt())) +} + +func capGasPrice(calculatedGasPrice, userSpecifiedMax, maxGasPriceWei *assets.Wei) *assets.Wei { + maxGasPrice := commonfee.CalculateFee(calculatedGasPrice.ToInt(), userSpecifiedMax.ToInt(), maxGasPriceWei.ToInt()) + return assets.NewWei(maxGasPrice) +} diff --git a/core/chains/evm/gas/models_test.go b/core/chains/evm/gas/models_test.go new file mode 100644 index 00000000..c7a9e9cd --- /dev/null +++ b/core/chains/evm/gas/models_test.go @@ -0,0 +1,224 @@ +package gas_test + +import ( + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + rollupMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/rollups/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestWrappedEvmEstimator(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + // fee values + gasLimit := uint32(10) + legacyFee := assets.NewWeiI(10) + dynamicFee := gas.DynamicFee{ + FeeCap: assets.NewWeiI(20), + TipCap: assets.NewWeiI(1), + } + est := mocks.NewEvmEstimator(t) + est.On("GetDynamicFee", mock.Anything, mock.Anything, mock.Anything). + Return(dynamicFee, gasLimit, nil).Twice() + est.On("GetLegacyGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(legacyFee, gasLimit, nil).Twice() + est.On("BumpDynamicFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dynamicFee, gasLimit, nil).Once() + est.On("BumpLegacyGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(legacyFee, gasLimit, nil).Once() + getRootEst := func(logger.Logger) gas.EvmEstimator { return est } + + mockEstimatorName := "WrappedEvmEstimator" + mockEvmEstimatorName := "WrappedEvmEstimator.MockEstimator" + + // L1Oracle returns the correct L1Oracle interface + t.Run("L1Oracle", func(t *testing.T) { + lggr := logger.Test(t) + // expect nil + estimator := gas.NewWrappedEvmEstimator(lggr, getRootEst, false, nil) + l1Oracle := estimator.L1Oracle() + assert.Nil(t, l1Oracle) + + // expect l1Oracle + oracle := rollupMocks.NewL1Oracle(t) + estimator = gas.NewWrappedEvmEstimator(lggr, getRootEst, false, oracle) + l1Oracle = estimator.L1Oracle() + assert.Equal(t, oracle, l1Oracle) + }) + + // GetFee returns gas estimation based on configuration value + t.Run("GetFee", func(t *testing.T) { + lggr := logger.Test(t) + // expect legacy fee data + dynamicFees := false + estimator := gas.NewWrappedEvmEstimator(lggr, getRootEst, dynamicFees, nil) + fee, max, err := estimator.GetFee(ctx, nil, 0, nil) + require.NoError(t, err) + assert.Equal(t, gasLimit, max) + assert.True(t, legacyFee.Equal(fee.Legacy)) + assert.Nil(t, fee.DynamicTipCap) + assert.Nil(t, fee.DynamicFeeCap) + + // expect dynamic fee data + dynamicFees = true + estimator = gas.NewWrappedEvmEstimator(lggr, getRootEst, dynamicFees, nil) + fee, max, err = estimator.GetFee(ctx, nil, 0, nil) + require.NoError(t, err) + assert.Equal(t, gasLimit, max) + assert.True(t, dynamicFee.FeeCap.Equal(fee.DynamicFeeCap)) + assert.True(t, dynamicFee.TipCap.Equal(fee.DynamicTipCap)) + assert.Nil(t, fee.Legacy) + }) + + // BumpFee returns bumped fee type based on original fee calculation + t.Run("BumpFee", func(t *testing.T) { + lggr := logger.Test(t) + dynamicFees := false + estimator := gas.NewWrappedEvmEstimator(lggr, getRootEst, dynamicFees, nil) + + // expect legacy fee data + fee, max, err := estimator.BumpFee(ctx, gas.EvmFee{Legacy: assets.NewWeiI(0)}, 0, nil, nil) + require.NoError(t, err) + assert.Equal(t, gasLimit, max) + assert.True(t, legacyFee.Equal(fee.Legacy)) + assert.Nil(t, fee.DynamicTipCap) + assert.Nil(t, fee.DynamicFeeCap) + + // expect dynamic fee data + fee, max, err = estimator.BumpFee(ctx, gas.EvmFee{ + DynamicFeeCap: assets.NewWeiI(0), + DynamicTipCap: assets.NewWeiI(0), + }, 0, nil, nil) + require.NoError(t, err) + assert.Equal(t, gasLimit, max) + assert.True(t, dynamicFee.FeeCap.Equal(fee.DynamicFeeCap)) + assert.True(t, dynamicFee.TipCap.Equal(fee.DynamicTipCap)) + assert.Nil(t, fee.Legacy) + + // expect error + _, _, err = estimator.BumpFee(ctx, gas.EvmFee{}, 0, nil, nil) + assert.Error(t, err) + _, _, err = estimator.BumpFee(ctx, gas.EvmFee{ + Legacy: legacyFee, + DynamicFeeCap: dynamicFee.FeeCap, + DynamicTipCap: dynamicFee.TipCap, + }, 0, nil, nil) + assert.Error(t, err) + }) + + t.Run("GetMaxCost", func(t *testing.T) { + lggr := logger.Test(t) + val := assets.NewEthValue(1) + + // expect legacy fee data + dynamicFees := false + estimator := gas.NewWrappedEvmEstimator(lggr, getRootEst, dynamicFees, nil) + total, err := estimator.GetMaxCost(ctx, val, nil, gasLimit, nil) + require.NoError(t, err) + fee := new(big.Int).Mul(legacyFee.ToInt(), big.NewInt(int64(gasLimit))) + assert.Equal(t, new(big.Int).Add(val.ToInt(), fee), total) + + // expect dynamic fee data + dynamicFees = true + estimator = gas.NewWrappedEvmEstimator(lggr, getRootEst, dynamicFees, nil) + total, err = estimator.GetMaxCost(ctx, val, nil, gasLimit, nil) + require.NoError(t, err) + fee = new(big.Int).Mul(dynamicFee.FeeCap.ToInt(), big.NewInt(int64(gasLimit))) + assert.Equal(t, new(big.Int).Add(val.ToInt(), fee), total) + }) + + t.Run("Name", func(t *testing.T) { + lggr := logger.Test(t) + + oracle := rollupMocks.NewL1Oracle(t) + evmEstimator := mocks.NewEvmEstimator(t) + evmEstimator.On("Name").Return(mockEvmEstimatorName, nil).Once() + + estimator := gas.NewWrappedEvmEstimator(lggr, func(logger.Logger) gas.EvmEstimator { + return evmEstimator + }, false, oracle) + + require.Equal(t, mockEstimatorName, estimator.Name()) + require.Equal(t, mockEvmEstimatorName, evmEstimator.Name()) + }) + + t.Run("Start and stop calls both EVM estimator and L1Oracle", func(t *testing.T) { + lggr := logger.Test(t) + oracle := rollupMocks.NewL1Oracle(t) + evmEstimator := mocks.NewEvmEstimator(t) + + evmEstimator.On("Start", mock.Anything).Return(nil).Twice() + evmEstimator.On("Close").Return(nil).Twice() + oracle.On("Start", mock.Anything).Return(nil).Once() + oracle.On("Close").Return(nil).Once() + getEst := func(logger.Logger) gas.EvmEstimator { return evmEstimator } + + estimator := gas.NewWrappedEvmEstimator(lggr, getEst, false, nil) + err := estimator.Start(ctx) + require.NoError(t, err) + err = estimator.Close() + require.NoError(t, err) + + estimator = gas.NewWrappedEvmEstimator(lggr, getEst, false, oracle) + err = estimator.Start(ctx) + require.NoError(t, err) + err = estimator.Close() + require.NoError(t, err) + }) + + t.Run("Read calls both EVM estimator and L1Oracle", func(t *testing.T) { + lggr := logger.Test(t) + evmEstimator := mocks.NewEvmEstimator(t) + oracle := rollupMocks.NewL1Oracle(t) + + evmEstimator.On("Ready").Return(nil).Twice() + oracle.On("Ready").Return(nil).Once() + getEst := func(logger.Logger) gas.EvmEstimator { return evmEstimator } + + estimator := gas.NewWrappedEvmEstimator(lggr, getEst, false, nil) + err := estimator.Ready() + require.NoError(t, err) + + estimator = gas.NewWrappedEvmEstimator(lggr, getEst, false, oracle) + err = estimator.Ready() + require.NoError(t, err) + }) + + t.Run("HealthReport merges report from EVM estimator and L1Oracle", func(t *testing.T) { + lggr := logger.Test(t) + evmEstimator := mocks.NewEvmEstimator(t) + oracle := rollupMocks.NewL1Oracle(t) + + evmEstimatorKey := "evm" + evmEstimatorError := errors.New("evm error") + oracleKey := "oracle" + oracleError := errors.New("oracle error") + + evmEstimator.On("HealthReport").Return(map[string]error{evmEstimatorKey: evmEstimatorError}).Twice() + oracle.On("HealthReport").Return(map[string]error{oracleKey: oracleError}).Once() + getEst := func(logger.Logger) gas.EvmEstimator { return evmEstimator } + + estimator := gas.NewWrappedEvmEstimator(lggr, getEst, false, nil) + report := estimator.HealthReport() + require.True(t, errors.Is(report[evmEstimatorKey], evmEstimatorError)) + require.Nil(t, report[oracleKey]) + require.NotNil(t, report[mockEstimatorName]) + + estimator = gas.NewWrappedEvmEstimator(lggr, getEst, false, oracle) + report = estimator.HealthReport() + require.True(t, errors.Is(report[evmEstimatorKey], evmEstimatorError)) + require.True(t, errors.Is(report[oracleKey], oracleError)) + require.NotNil(t, report[mockEstimatorName]) + }) +} diff --git a/core/chains/evm/gas/rollups/l1_oracle.go b/core/chains/evm/gas/rollups/l1_oracle.go new file mode 100644 index 00000000..924831df --- /dev/null +++ b/core/chains/evm/gas/rollups/l1_oracle.go @@ -0,0 +1,331 @@ +package rollups + +import ( + "context" + "fmt" + "math/big" + "slices" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + gethtypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +//go:generate mockery --quiet --name ethClient --output ./mocks/ --case=underscore --structname ETHClient +type ethClient interface { + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} + +type priceEntry struct { + price *assets.Wei + timestamp time.Time +} + +// Reads L2-specific precompiles and caches the l1GasPrice set by the L2. +type l1Oracle struct { + services.StateMachine + client ethClient + pollPeriod time.Duration + logger logger.SugaredLogger + chainType config.ChainType + + l1GasPriceAddress string + gasPriceMethod string + l1GasPriceMethodAbi abi.ABI + l1GasPriceMu sync.RWMutex + l1GasPrice priceEntry + + l1GasCostAddress string + gasCostMethod string + l1GasCostMethodAbi abi.ABI + + chInitialised chan struct{} + chStop services.StopChan + chDone chan struct{} +} + +const ( + // ArbGasInfoAddress is the address of the "Precompiled contract that exists in every Arbitrum chain." + // https://github.com/OffchainLabs/nitro/blob/f7645453cfc77bf3e3644ea1ac031eff629df325/contracts/src/precompiles/ArbGasInfo.sol + ArbGasInfoAddress = "0x000000000000000000000000000000000000006C" + // ArbGasInfo_getL1BaseFeeEstimate is the a hex encoded call to: + // `function getL1BaseFeeEstimate() external view returns (uint256);` + ArbGasInfo_getL1BaseFeeEstimate = "getL1BaseFeeEstimate" + // NodeInterfaceAddress is the address of the precompiled contract that is only available through RPC + // https://github.com/OffchainLabs/nitro/blob/e815395d2e91fb17f4634cad72198f6de79c6e61/nodeInterface/NodeInterface.go#L37 + ArbNodeInterfaceAddress = "0x00000000000000000000000000000000000000C8" + // ArbGasInfo_getPricesInArbGas is the a hex encoded call to: + // `function gasEstimateL1Component(address to, bool contractCreation, bytes calldata data) external payable returns (uint64 gasEstimateForL1, uint256 baseFee, uint256 l1BaseFeeEstimate);` + ArbNodeInterface_gasEstimateL1Component = "gasEstimateL1Component" + + // OPGasOracleAddress is the address of the precompiled contract that exists on OP stack chain. + // This is the case for Optimism and Base. + OPGasOracleAddress = "0x420000000000000000000000000000000000000F" + // OPGasOracle_l1BaseFee is a hex encoded call to: + // `function l1BaseFee() external view returns (uint256);` + OPGasOracle_l1BaseFee = "l1BaseFee" + // OPGasOracle_getL1Fee is a hex encoded call to: + // `function getL1Fee(bytes) external view returns (uint256);` + OPGasOracle_getL1Fee = "getL1Fee" + + // ScrollGasOracleAddress is the address of the precompiled contract that exists on Scroll chain. + ScrollGasOracleAddress = "0x5300000000000000000000000000000000000002" + // ScrollGasOracle_l1BaseFee is a hex encoded call to: + // `function l1BaseFee() external view returns (uint256);` + ScrollGasOracle_l1BaseFee = "l1BaseFee" + // ScrollGasOracle_getL1Fee is a hex encoded call to: + // `function getL1Fee(bytes) external view returns (uint256);` + ScrollGasOracle_getL1Fee = "getL1Fee" + + // GasOracleAddress is the address of the precompiled contract that exists on Kroma chain. + // This is the case for Kroma. + KromaGasOracleAddress = "0x4200000000000000000000000000000000000005" + // GasOracle_l1BaseFee is the a hex encoded call to: + // `function l1BaseFee() external view returns (uint256);` + KromaGasOracle_l1BaseFee = "l1BaseFee" + + // Interval at which to poll for L1BaseFee. A good starting point is the L1 block time. + PollPeriod = 6 * time.Second +) + +var supportedChainTypes = []config.ChainType{config.ChainArbitrum, config.ChainOptimismBedrock, config.ChainKroma, config.ChainScroll} + +func IsRollupWithL1Support(chainType config.ChainType) bool { + return slices.Contains(supportedChainTypes, chainType) +} + +func NewL1GasOracle(lggr logger.Logger, ethClient ethClient, chainType config.ChainType) L1Oracle { + var l1GasPriceAddress, gasPriceMethod, l1GasCostAddress, gasCostMethod string + var l1GasPriceMethodAbi, l1GasCostMethodAbi abi.ABI + var gasPriceErr, gasCostErr error + switch chainType { + case config.ChainArbitrum: + l1GasPriceAddress = ArbGasInfoAddress + gasPriceMethod = ArbGasInfo_getL1BaseFeeEstimate + l1GasPriceMethodAbi, gasPriceErr = abi.JSON(strings.NewReader(GetL1BaseFeeEstimateAbiString)) + l1GasCostAddress = ArbNodeInterfaceAddress + gasCostMethod = ArbNodeInterface_gasEstimateL1Component + l1GasCostMethodAbi, gasCostErr = abi.JSON(strings.NewReader(GasEstimateL1ComponentAbiString)) + case config.ChainOptimismBedrock: + l1GasPriceAddress = OPGasOracleAddress + gasPriceMethod = OPGasOracle_l1BaseFee + l1GasPriceMethodAbi, gasPriceErr = abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + l1GasCostAddress = OPGasOracleAddress + gasCostMethod = OPGasOracle_getL1Fee + l1GasCostMethodAbi, gasCostErr = abi.JSON(strings.NewReader(GetL1FeeAbiString)) + case config.ChainKroma: + l1GasPriceAddress = KromaGasOracleAddress + gasPriceMethod = KromaGasOracle_l1BaseFee + l1GasPriceMethodAbi, gasPriceErr = abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + l1GasCostAddress = "" + gasCostMethod = "" + case config.ChainScroll: + l1GasPriceAddress = ScrollGasOracleAddress + gasPriceMethod = ScrollGasOracle_l1BaseFee + l1GasPriceMethodAbi, gasPriceErr = abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + l1GasCostAddress = ScrollGasOracleAddress + gasCostMethod = ScrollGasOracle_getL1Fee + l1GasCostMethodAbi, gasCostErr = abi.JSON(strings.NewReader(GetL1FeeAbiString)) + default: + panic(fmt.Sprintf("Received unspported chaintype %s", chainType)) + } + + if gasPriceErr != nil { + panic(fmt.Sprintf("Failed to parse L1 gas price method ABI for chain: %s", chainType)) + } + if gasCostErr != nil { + panic(fmt.Sprintf("Failed to parse L1 gas cost method ABI for chain: %s", chainType)) + } + + return &l1Oracle{ + client: ethClient, + pollPeriod: PollPeriod, + logger: logger.Sugared(logger.Named(lggr, fmt.Sprintf("L1GasOracle(%s)", chainType))), + chainType: chainType, + + l1GasPriceAddress: l1GasPriceAddress, + gasPriceMethod: gasPriceMethod, + l1GasPriceMethodAbi: l1GasPriceMethodAbi, + l1GasCostAddress: l1GasCostAddress, + gasCostMethod: gasCostMethod, + l1GasCostMethodAbi: l1GasCostMethodAbi, + + chInitialised: make(chan struct{}), + chStop: make(chan struct{}), + chDone: make(chan struct{}), + } +} + +func (o *l1Oracle) Name() string { + return o.logger.Name() +} + +func (o *l1Oracle) Start(ctx context.Context) error { + return o.StartOnce(o.Name(), func() error { + go o.run() + <-o.chInitialised + return nil + }) +} +func (o *l1Oracle) Close() error { + return o.StopOnce(o.Name(), func() error { + close(o.chStop) + <-o.chDone + return nil + }) +} + +func (o *l1Oracle) HealthReport() map[string]error { + return map[string]error{o.Name(): o.Healthy()} +} + +func (o *l1Oracle) run() { + defer close(o.chDone) + + t := o.refresh() + close(o.chInitialised) + + for { + select { + case <-o.chStop: + return + case <-t.C: + t = o.refresh() + } + } +} +func (o *l1Oracle) refresh() (t *time.Timer) { + t, err := o.refreshWithError() + if err != nil { + o.SvcErrBuffer.Append(err) + } + return +} + +func (o *l1Oracle) refreshWithError() (t *time.Timer, err error) { + t = time.NewTimer(utils.WithJitter(o.pollPeriod)) + + ctx, cancel := o.chStop.CtxCancel(evmclient.ContextWithDefaultTimeout()) + defer cancel() + + var callData, b []byte + precompile := common.HexToAddress(o.l1GasPriceAddress) + callData, err = o.l1GasPriceMethodAbi.Pack(o.gasPriceMethod) + if err != nil { + errMsg := fmt.Sprintf("failed to pack calldata for %s L1 gas price method", o.chainType) + o.logger.Errorf(errMsg) + return t, fmt.Errorf("%s: %w", errMsg, err) + } + b, err = o.client.CallContract(ctx, ethereum.CallMsg{ + To: &precompile, + Data: callData, + }, nil) + if err != nil { + errMsg := "gas oracle contract call failed" + o.logger.Errorf(errMsg) + return t, fmt.Errorf("%s: %w", errMsg, err) + } + + if len(b) != 32 { // returns uint256; + errMsg := fmt.Sprintf("return data length (%d) different than expected (%d)", len(b), 32) + o.logger.Criticalf(errMsg) + return t, fmt.Errorf(errMsg) + } + price := new(big.Int).SetBytes(b) + + o.l1GasPriceMu.Lock() + defer o.l1GasPriceMu.Unlock() + o.l1GasPrice = priceEntry{price: assets.NewWei(price), timestamp: time.Now()} + return +} + +func (o *l1Oracle) GasPrice(_ context.Context) (l1GasPrice *assets.Wei, err error) { + var timestamp time.Time + ok := o.IfStarted(func() { + o.l1GasPriceMu.RLock() + l1GasPrice = o.l1GasPrice.price + timestamp = o.l1GasPrice.timestamp + o.l1GasPriceMu.RUnlock() + }) + if !ok { + return l1GasPrice, fmt.Errorf("L1GasOracle is not started; cannot estimate gas") + } + if l1GasPrice == nil { + return l1GasPrice, fmt.Errorf("failed to get l1 gas price; gas price not set") + } + // Validate the price has been updated within the pollPeriod * 2 + // Allowing double the poll period before declaring the price stale to give ample time for the refresh to process + if time.Since(timestamp) > o.pollPeriod*2 { + return l1GasPrice, fmt.Errorf("gas price is stale") + } + return +} + +// Gets the L1 gas cost for the provided transaction at the specified block num +// If block num is not provided, the value on the latest block num is used +func (o *l1Oracle) GetGasCost(ctx context.Context, tx *gethtypes.Transaction, blockNum *big.Int) (*assets.Wei, error) { + ctx, cancel := context.WithTimeout(ctx, client.QueryTimeout) + defer cancel() + var callData, b []byte + var err error + if o.chainType == config.ChainOptimismBedrock || o.chainType == config.ChainScroll { + // Append rlp-encoded tx + var encodedtx []byte + if encodedtx, err = tx.MarshalBinary(); err != nil { + return nil, fmt.Errorf("failed to marshal tx for gas cost estimation: %w", err) + } + if callData, err = o.l1GasCostMethodAbi.Pack(o.gasCostMethod, encodedtx); err != nil { + return nil, fmt.Errorf("failed to pack calldata for %s L1 gas cost estimation method: %w", o.chainType, err) + } + } else if o.chainType == config.ChainArbitrum { + if callData, err = o.l1GasCostMethodAbi.Pack(o.gasCostMethod, tx.To(), false, tx.Data()); err != nil { + return nil, fmt.Errorf("failed to pack calldata for %s L1 gas cost estimation method: %w", o.chainType, err) + } + } else { + return nil, fmt.Errorf("L1 gas cost not supported for this chain: %s", o.chainType) + } + + precompile := common.HexToAddress(o.l1GasCostAddress) + b, err = o.client.CallContract(ctx, ethereum.CallMsg{ + To: &precompile, + Data: callData, + }, blockNum) + if err != nil { + errorMsg := fmt.Sprintf("gas oracle contract call failed: %v", err) + o.logger.Errorf(errorMsg) + return nil, fmt.Errorf(errorMsg) + } + + var l1GasCost *big.Int + if o.chainType == config.ChainOptimismBedrock || o.chainType == config.ChainScroll { + if len(b) != 32 { // returns uint256; + errorMsg := fmt.Sprintf("return data length (%d) different than expected (%d)", len(b), 32) + o.logger.Critical(errorMsg) + return nil, fmt.Errorf(errorMsg) + } + l1GasCost = new(big.Int).SetBytes(b) + } else if o.chainType == config.ChainArbitrum { + if len(b) != 8+2*32 { // returns (uint64 gasEstimateForL1, uint256 baseFee, uint256 l1BaseFeeEstimate); + errorMsg := fmt.Sprintf("return data length (%d) different than expected (%d)", len(b), 8+2*32) + o.logger.Critical(errorMsg) + return nil, fmt.Errorf(errorMsg) + } + l1GasCost = new(big.Int).SetBytes(b[:8]) + } + + return assets.NewWei(l1GasCost), nil +} diff --git a/core/chains/evm/gas/rollups/l1_oracle_abi.go b/core/chains/evm/gas/rollups/l1_oracle_abi.go new file mode 100644 index 00000000..77ef4d49 --- /dev/null +++ b/core/chains/evm/gas/rollups/l1_oracle_abi.go @@ -0,0 +1,13 @@ +package rollups + +/* ABIs for Arbitrum Gas Info and Node Interface precompile contract methods needed for the L1 oracle */ +// ABI found at https://arbiscan.io/address/0x000000000000000000000000000000000000006C#code +const GetL1BaseFeeEstimateAbiString = `[{"inputs":[],"name":"getL1BaseFeeEstimate","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]` + +// ABI found at https://arbiscan.io/address/0x00000000000000000000000000000000000000C8#code +const GasEstimateL1ComponentAbiString = `[{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"bool","name":"contractCreation","type":"bool"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"gasEstimateL1Component","outputs":[{"internalType":"uint64","name":"gasEstimateForL1","type":"uint64"},{"internalType":"uint256","name":"baseFee","type":"uint256"},{"internalType":"uint256","name":"l1BaseFeeEstimate","type":"uint256"}],"stateMutability":"payable","type":"function"}]` + +/* ABIs for Optimism, Scroll, and Kroma precompile contract methods needed for the L1 oracle */ +// All ABIs found at https://optimistic.etherscan.io/address/0xc0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3000f#code +const L1BaseFeeAbiString = `[{"inputs":[],"name":"l1BaseFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]` +const GetL1FeeAbiString = `[{"inputs":[{"internalType":"bytes","name":"_data","type":"bytes"}],"name":"getL1Fee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]` diff --git a/core/chains/evm/gas/rollups/l1_oracle_test.go b/core/chains/evm/gas/rollups/l1_oracle_test.go new file mode 100644 index 00000000..6d01ae47 --- /dev/null +++ b/core/chains/evm/gas/rollups/l1_oracle_test.go @@ -0,0 +1,270 @@ +package rollups + +import ( + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/rollups/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestL1Oracle(t *testing.T) { + t.Parallel() + + t.Run("Unsupported ChainType returns nil", func(t *testing.T) { + ethClient := mocks.NewETHClient(t) + + assert.Panicsf(t, func() { NewL1GasOracle(logger.Test(t), ethClient, config.ChainCelo) }, "Received unspported chaintype %s", config.ChainCelo) + }) +} + +func TestL1Oracle_GasPrice(t *testing.T) { + t.Parallel() + + t.Run("Calling GasPrice on unstarted L1Oracle returns error", func(t *testing.T) { + ethClient := mocks.NewETHClient(t) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainOptimismBedrock) + + _, err := oracle.GasPrice(testutils.Context(t)) + assert.EqualError(t, err, "L1GasOracle is not started; cannot estimate gas") + }) + + t.Run("Calling GasPrice on started Arbitrum L1Oracle returns Arbitrum l1GasPrice", func(t *testing.T) { + l1BaseFee := big.NewInt(100) + l1GasPriceMethodAbi, err := abi.JSON(strings.NewReader(GetL1BaseFeeEstimateAbiString)) + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasPriceMethodAbi.Pack("getL1BaseFeeEstimate") + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + assert.Nil(t, blockNumber) + }).Return(common.BigToHash(l1BaseFee).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainArbitrum) + servicetest.RunHealthy(t, oracle) + + gasPrice, err := oracle.GasPrice(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice) + }) + + t.Run("Calling GasPrice on started Kroma L1Oracle returns Kroma l1GasPrice", func(t *testing.T) { + l1BaseFee := big.NewInt(100) + l1GasPriceMethodAbi, err := abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasPriceMethodAbi.Pack("l1BaseFee") + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + assert.Nil(t, blockNumber) + }).Return(common.BigToHash(l1BaseFee).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainKroma) + servicetest.RunHealthy(t, oracle) + + gasPrice, err := oracle.GasPrice(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice) + }) + + t.Run("Calling GasPrice on started OPStack L1Oracle returns OPStack l1GasPrice", func(t *testing.T) { + l1BaseFee := big.NewInt(100) + l1GasPriceMethodAbi, err := abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasPriceMethodAbi.Pack("l1BaseFee") + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + assert.Nil(t, blockNumber) + }).Return(common.BigToHash(l1BaseFee).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainOptimismBedrock) + servicetest.RunHealthy(t, oracle) + + gasPrice, err := oracle.GasPrice(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice) + }) + + t.Run("Calling GasPrice on started Scroll L1Oracle returns Scroll l1GasPrice", func(t *testing.T) { + l1BaseFee := big.NewInt(200) + l1GasPriceMethodAbi, err := abi.JSON(strings.NewReader(L1BaseFeeAbiString)) + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasPriceMethodAbi.Pack("l1BaseFee") + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + assert.Nil(t, blockNumber) + }).Return(common.BigToHash(l1BaseFee).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainScroll) + require.NoError(t, oracle.Start(testutils.Context(t))) + t.Cleanup(func() { assert.NoError(t, oracle.Close()) }) + + gasPrice, err := oracle.GasPrice(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, assets.NewWei(l1BaseFee), gasPrice) + }) +} + +func TestL1Oracle_GetGasCost(t *testing.T) { + t.Parallel() + + t.Run("Calling GetGasCost on started Arbitrum L1Oracle returns Arbitrum getL1Fee", func(t *testing.T) { + l1GasCost := big.NewInt(100) + baseFee := utils.Uint256ToBytes32(big.NewInt(1000)) + l1BaseFeeEstimate := utils.Uint256ToBytes32(big.NewInt(500)) + blockNum := big.NewInt(1000) + toAddress := utils.RandomAddress() + callData := []byte{1, 2, 3, 4, 5, 6, 7} + l1GasCostMethodAbi, err := abi.JSON(strings.NewReader(GasEstimateL1ComponentAbiString)) + require.NoError(t, err) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: 42, + To: &toAddress, + Data: callData, + }) + result := common.LeftPadBytes(l1GasCost.Bytes(), 8) + result = append(result, baseFee...) + result = append(result, l1BaseFeeEstimate...) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasCostMethodAbi.Pack("gasEstimateL1Component", toAddress, false, callData) + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + require.Equal(t, blockNum, blockNumber) + }).Return(result, nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainArbitrum) + + gasCost, err := oracle.GetGasCost(testutils.Context(t), tx, blockNum) + require.NoError(t, err) + require.Equal(t, assets.NewWei(l1GasCost), gasCost) + }) + + t.Run("Calling GetGasCost on started Kroma L1Oracle returns error", func(t *testing.T) { + blockNum := big.NewInt(1000) + tx := types.NewTx(&types.LegacyTx{}) + + ethClient := mocks.NewETHClient(t) + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainKroma) + + _, err := oracle.GetGasCost(testutils.Context(t), tx, blockNum) + require.Error(t, err, "L1 gas cost not supported for this chain: kroma") + }) + + t.Run("Calling GetGasCost on started OPStack L1Oracle returns OPStack getL1Fee", func(t *testing.T) { + l1GasCost := big.NewInt(100) + blockNum := big.NewInt(1000) + toAddress := utils.RandomAddress() + callData := []byte{1, 2, 3} + l1GasCostMethodAbi, err := abi.JSON(strings.NewReader(GetL1FeeAbiString)) + require.NoError(t, err) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: 42, + To: &toAddress, + Data: callData, + }) + + encodedTx, err := tx.MarshalBinary() + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasCostMethodAbi.Pack("getL1Fee", encodedTx) + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + require.Equal(t, blockNum, blockNumber) + }).Return(common.BigToHash(l1GasCost).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainOptimismBedrock) + + gasCost, err := oracle.GetGasCost(testutils.Context(t), tx, blockNum) + require.NoError(t, err) + require.Equal(t, assets.NewWei(l1GasCost), gasCost) + }) + + t.Run("Calling GetGasCost on started Scroll L1Oracle returns Scroll getL1Fee", func(t *testing.T) { + l1GasCost := big.NewInt(100) + blockNum := big.NewInt(1000) + toAddress := utils.RandomAddress() + callData := []byte{1, 2, 3} + l1GasCostMethodAbi, err := abi.JSON(strings.NewReader(GetL1FeeAbiString)) + require.NoError(t, err) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: 42, + To: &toAddress, + Data: callData, + }) + + encodedTx, err := tx.MarshalBinary() + require.NoError(t, err) + + ethClient := mocks.NewETHClient(t) + ethClient.On("CallContract", mock.Anything, mock.IsType(ethereum.CallMsg{}), mock.IsType(&big.Int{})).Run(func(args mock.Arguments) { + callMsg := args.Get(1).(ethereum.CallMsg) + blockNumber := args.Get(2).(*big.Int) + var payload []byte + payload, err = l1GasCostMethodAbi.Pack("getL1Fee", encodedTx) + require.NoError(t, err) + require.Equal(t, payload, callMsg.Data) + require.Equal(t, blockNum, blockNumber) + }).Return(common.BigToHash(l1GasCost).Bytes(), nil) + + oracle := NewL1GasOracle(logger.Test(t), ethClient, config.ChainScroll) + + gasCost, err := oracle.GetGasCost(testutils.Context(t), tx, blockNum) + require.NoError(t, err) + require.Equal(t, assets.NewWei(l1GasCost), gasCost) + }) +} diff --git a/core/chains/evm/gas/rollups/mocks/eth_client.go b/core/chains/evm/gas/rollups/mocks/eth_client.go new file mode 100644 index 00000000..bb0784f8 --- /dev/null +++ b/core/chains/evm/gas/rollups/mocks/eth_client.go @@ -0,0 +1,61 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" +) + +// ETHClient is an autogenerated mock type for the ethClient type +type ETHClient struct { + mock.Mock +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *ETHClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewETHClient creates a new instance of ETHClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewETHClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ETHClient { + mock := ÐClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/rollups/mocks/l1_oracle.go b/core/chains/evm/gas/rollups/mocks/l1_oracle.go new file mode 100644 index 00000000..69eba72c --- /dev/null +++ b/core/chains/evm/gas/rollups/mocks/l1_oracle.go @@ -0,0 +1,186 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// L1Oracle is an autogenerated mock type for the L1Oracle type +type L1Oracle struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *L1Oracle) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GasPrice provides a mock function with given fields: ctx +func (_m *L1Oracle) GasPrice(ctx context.Context) (*assets.Wei, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GasPrice") + } + + var r0 *assets.Wei + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*assets.Wei, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *assets.Wei); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetGasCost provides a mock function with given fields: ctx, tx, blockNum +func (_m *L1Oracle) GetGasCost(ctx context.Context, tx *types.Transaction, blockNum *big.Int) (*assets.Wei, error) { + ret := _m.Called(ctx, tx, blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetGasCost") + } + + var r0 *assets.Wei + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, *big.Int) (*assets.Wei, error)); ok { + return rf(ctx, tx, blockNum) + } + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction, *big.Int) *assets.Wei); ok { + r0 = rf(ctx, tx, blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *types.Transaction, *big.Int) error); ok { + r1 = rf(ctx, tx, blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HealthReport provides a mock function with given fields: +func (_m *L1Oracle) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *L1Oracle) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *L1Oracle) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *L1Oracle) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewL1Oracle creates a new instance of L1Oracle. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1Oracle(t interface { + mock.TestingT + Cleanup(func()) +}) *L1Oracle { + mock := &L1Oracle{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/gas/rollups/models.go b/core/chains/evm/gas/rollups/models.go new file mode 100644 index 00000000..aec5bb7b --- /dev/null +++ b/core/chains/evm/gas/rollups/models.go @@ -0,0 +1,22 @@ +package rollups + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/services" +) + +// L1Oracle provides interface for fetching L1-specific fee components if the chain is an L2. +// For example, on Optimistic Rollups, this oracle can return rollup-specific l1BaseFee +// +//go:generate mockery --quiet --name L1Oracle --output ./mocks/ --case=underscore +type L1Oracle interface { + services.ServiceCtx + + GasPrice(ctx context.Context) (*assets.Wei, error) + GetGasCost(ctx context.Context, tx *types.Transaction, blockNum *big.Int) (*assets.Wei, error) +} diff --git a/core/chains/evm/gas/suggested_price_estimator.go b/core/chains/evm/gas/suggested_price_estimator.go new file mode 100644 index 00000000..6c47fc10 --- /dev/null +++ b/core/chains/evm/gas/suggested_price_estimator.go @@ -0,0 +1,238 @@ +package gas + +import ( + "context" + "fmt" + "slices" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + + "github.com/goplugin/pluginv3.0/v2/common/fee" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var ( + _ EvmEstimator = &SuggestedPriceEstimator{} +) + +type suggestedPriceConfig interface { + BumpPercent() uint16 + BumpMin() *assets.Wei +} + +//go:generate mockery --quiet --name rpcClient --output ./mocks/ --case=underscore --structname RPCClient +type rpcClient interface { + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error +} + +// SuggestedPriceEstimator is an Estimator which uses the suggested gas price from eth_gasPrice. +type SuggestedPriceEstimator struct { + services.StateMachine + + cfg suggestedPriceConfig + client rpcClient + pollPeriod time.Duration + logger logger.Logger + + gasPriceMu sync.RWMutex + GasPrice *assets.Wei + + chForceRefetch chan (chan struct{}) + chInitialised chan struct{} + chStop services.StopChan + chDone chan struct{} +} + +// NewSuggestedPriceEstimator returns a new Estimator which uses the suggested gas price. +func NewSuggestedPriceEstimator(lggr logger.Logger, client rpcClient, cfg suggestedPriceConfig) EvmEstimator { + return &SuggestedPriceEstimator{ + client: client, + pollPeriod: 10 * time.Second, + logger: logger.Named(lggr, "SuggestedPriceEstimator"), + cfg: cfg, + chForceRefetch: make(chan (chan struct{})), + chInitialised: make(chan struct{}), + chStop: make(chan struct{}), + chDone: make(chan struct{}), + } +} + +func (o *SuggestedPriceEstimator) Name() string { + return o.logger.Name() +} + +func (o *SuggestedPriceEstimator) Start(context.Context) error { + return o.StartOnce("SuggestedPriceEstimator", func() error { + go o.run() + <-o.chInitialised + return nil + }) +} +func (o *SuggestedPriceEstimator) Close() error { + return o.StopOnce("SuggestedPriceEstimator", func() error { + close(o.chStop) + <-o.chDone + return nil + }) +} + +func (o *SuggestedPriceEstimator) HealthReport() map[string]error { + return map[string]error{o.Name(): o.Healthy()} +} + +func (o *SuggestedPriceEstimator) run() { + defer close(o.chDone) + + t := o.refreshPrice() + close(o.chInitialised) + + for { + select { + case <-o.chStop: + return + case ch := <-o.chForceRefetch: + t.Stop() + t = o.refreshPrice() + close(ch) + case <-t.C: + t = o.refreshPrice() + } + } +} + +func (o *SuggestedPriceEstimator) refreshPrice() (t *time.Timer) { + t = time.NewTimer(utils.WithJitter(o.pollPeriod)) + + var res hexutil.Big + ctx, cancel := o.chStop.CtxCancel(evmclient.ContextWithDefaultTimeout()) + defer cancel() + + if err := o.client.CallContext(ctx, &res, "eth_gasPrice"); err != nil { + o.logger.Warnf("Failed to refresh prices, got error: %s", err) + return + } + bi := (*assets.Wei)(&res) + + o.logger.Debugw("refreshPrice", "GasPrice", bi) + + o.gasPriceMu.Lock() + defer o.gasPriceMu.Unlock() + o.GasPrice = bi + return +} + +// Uses the force refetch chan to trigger a price update and blocks until complete +func (o *SuggestedPriceEstimator) forceRefresh(ctx context.Context) (err error) { + ch := make(chan struct{}) + select { + case o.chForceRefetch <- ch: + case <-o.chStop: + return errors.New("estimator stopped") + case <-ctx.Done(): + return ctx.Err() + } + select { + case <-ch: + case <-o.chStop: + return errors.New("estimator stopped") + case <-ctx.Done(): + return ctx.Err() + } + return +} + +func (o *SuggestedPriceEstimator) OnNewLongestChain(context.Context, *evmtypes.Head) {} + +func (*SuggestedPriceEstimator) GetDynamicFee(_ context.Context, _ uint32, _ *assets.Wei) (fee DynamicFee, chainSpecificGasLimit uint32, err error) { + err = errors.New("dynamic fees are not implemented for this estimator") + return +} + +func (*SuggestedPriceEstimator) BumpDynamicFee(_ context.Context, _ DynamicFee, _ uint32, _ *assets.Wei, _ []EvmPriorAttempt) (bumped DynamicFee, chainSpecificGasLimit uint32, err error) { + err = errors.New("dynamic fees are not implemented for this estimator") + return +} + +func (o *SuggestedPriceEstimator) GetLegacyGas(ctx context.Context, _ []byte, GasLimit uint32, maxGasPriceWei *assets.Wei, opts ...feetypes.Opt) (gasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + chainSpecificGasLimit = GasLimit + ok := o.IfStarted(func() { + if slices.Contains(opts, feetypes.OptForceRefetch) { + err = o.forceRefresh(ctx) + } + if gasPrice = o.getGasPrice(); gasPrice == nil { + err = errors.New("failed to estimate gas; gas price not set") + return + } + o.logger.Debugw("GetLegacyGas", "GasPrice", gasPrice, "GasLimit", GasLimit) + }) + if !ok { + return nil, 0, errors.New("estimator is not started") + } else if err != nil { + return + } + // For L2 chains, submitting a transaction that is not priced high enough will cause the call to fail, so if the cap is lower than the RPC suggested gas price, this transaction cannot succeed + if gasPrice != nil && gasPrice.Cmp(maxGasPriceWei) > 0 { + return nil, 0, errors.Errorf("estimated gas price: %s is greater than the maximum gas price configured: %s", gasPrice.String(), maxGasPriceWei.String()) + } + return +} + +// Refreshes the gas price by making a call to the RPC in case the current one has gone stale. +// Adds the larger of BumpPercent and BumpMin configs as a buffer on top of the price returned from the RPC. +// The only reason bumping logic would be called on the SuggestedPriceEstimator is if there was a significant price spike +// between the last price update and when the tx was submitted. Refreshing the price helps ensure the latest market changes are accounted for. +func (o *SuggestedPriceEstimator) BumpLegacyGas(ctx context.Context, originalFee *assets.Wei, feeLimit uint32, maxGasPriceWei *assets.Wei, _ []EvmPriorAttempt) (newGasPrice *assets.Wei, chainSpecificGasLimit uint32, err error) { + chainSpecificGasLimit = feeLimit + ok := o.IfStarted(func() { + // Immediately return error if original fee is greater than or equal to the max gas price + // Prevents a loop of resubmitting the attempt with the max gas price + if originalFee.Cmp(maxGasPriceWei) >= 0 { + err = fmt.Errorf("original fee (%s) greater than or equal to max gas price (%s) so cannot be bumped further", originalFee.String(), maxGasPriceWei.String()) + return + } + err = o.forceRefresh(ctx) + if newGasPrice = o.getGasPrice(); newGasPrice == nil { + err = errors.New("failed to refresh and return gas; gas price not set") + return + } + o.logger.Debugw("GasPrice", newGasPrice, "GasLimit", feeLimit) + }) + if !ok { + return nil, 0, errors.New("estimator is not started") + } else if err != nil { + return + } + if newGasPrice != nil && newGasPrice.Cmp(maxGasPriceWei) > 0 { + return nil, 0, errors.Errorf("estimated gas price: %s is greater than the maximum gas price configured: %s", newGasPrice.String(), maxGasPriceWei.String()) + } + // Add a buffer on top of the gas price returned by the RPC. + // Bump logic when using the suggested gas price from an RPC is realistically only needed when there is increased volatility in gas price. + // This buffer is a precaution to increase the chance of getting this tx on chain + bufferedPrice := fee.MaxBumpedFee(newGasPrice.ToInt(), o.cfg.BumpPercent(), o.cfg.BumpMin().ToInt()) + // If the new suggested price is less than or equal to the max and the buffer puts the new price over the max, return the max price instead + // The buffer is added on top of the suggested price during bumping as just a precaution. It is better to resubmit the transaction with the max gas price instead of erroring. + newGasPrice = assets.NewWei(bigmath.Min(bufferedPrice, maxGasPriceWei.ToInt())) + + // Return the original price if the refreshed price with the buffer is lower to ensure the bumped gas price is always equal or higher to the previous attempt + if originalFee != nil && originalFee.Cmp(newGasPrice) > 0 { + return originalFee, chainSpecificGasLimit, nil + } + return +} + +func (o *SuggestedPriceEstimator) getGasPrice() (GasPrice *assets.Wei) { + o.gasPriceMu.RLock() + defer o.gasPriceMu.RUnlock() + return o.GasPrice +} diff --git a/core/chains/evm/gas/suggested_price_estimator_test.go b/core/chains/evm/gas/suggested_price_estimator_test.go new file mode 100644 index 00000000..98983cd3 --- /dev/null +++ b/core/chains/evm/gas/suggested_price_estimator_test.go @@ -0,0 +1,231 @@ +package gas_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestSuggestedPriceEstimator(t *testing.T) { + t.Parallel() + + maxGasPrice := assets.NewWeiI(100) + + calldata := []byte{0x00, 0x00, 0x01, 0x02, 0x03} + const gasLimit uint32 = 80000 + + cfg := &gas.MockGasEstimatorConfig{BumpPercentF: 10, BumpMinF: assets.NewWei(big.NewInt(1)), BumpThresholdF: 1} + + t.Run("calling GetLegacyGas on unstarted estimator returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + _, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + assert.EqualError(t, err, "estimator is not started") + }) + + t.Run("calling GetLegacyGas on started estimator returns prices", func(t *testing.T) { + client := mocks.NewRPCClient(t) + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(42), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("gas price is lower than user specified max gas price", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, assets.NewWeiI(40)) + require.Error(t, err) + assert.EqualError(t, err, "estimated gas price: 42 wei is greater than the maximum gas price configured: 40 wei") + assert.Nil(t, gasPrice) + assert.Equal(t, uint32(0), chainSpecificGasLimit) + }) + + t.Run("gas price is lower than global max gas price", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(120) + }) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, assets.NewWeiI(110)) + assert.EqualError(t, err, "estimated gas price: 120 wei is greater than the maximum gas price configured: 110 wei") + assert.Nil(t, gasPrice) + assert.Equal(t, uint32(0), chainSpecificGasLimit) + }) + + t.Run("calling GetLegacyGas on started estimator if initial call failed returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(errors.New("kaboom")) + + servicetest.RunHealthy(t, o) + + _, _, err := o.GetLegacyGas(testutils.Context(t), calldata, gasLimit, maxGasPrice) + assert.EqualError(t, err, "failed to estimate gas; gas price not set") + }) + + t.Run("calling GetDynamicFee always returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + _, _, err := o.GetDynamicFee(testutils.Context(t), gasLimit, maxGasPrice) + assert.EqualError(t, err, "dynamic fees are not implemented for this estimator") + }) + + t.Run("calling BumpLegacyGas on unstarted estimator returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + _, _, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(42), gasLimit, maxGasPrice, nil) + assert.EqualError(t, err, "estimator is not started") + }) + + t.Run("calling BumpDynamicFee always returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + fee := gas.DynamicFee{ + FeeCap: assets.NewWeiI(42), + TipCap: assets.NewWeiI(5), + } + _, _, err := o.BumpDynamicFee(testutils.Context(t), fee, gasLimit, maxGasPrice, nil) + assert.EqualError(t, err, "dynamic fees are not implemented for this estimator") + }) + + t.Run("calling BumpLegacyGas on started estimator returns new price buffered with bumpPercent", func(t *testing.T) { + client := mocks.NewRPCClient(t) + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(40) + }) + + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, maxGasPrice, nil) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(44), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on started estimator returns new price buffered with bumpMin", func(t *testing.T) { + client := mocks.NewRPCClient(t) + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(40) + }) + + testCfg := &gas.MockGasEstimatorConfig{BumpPercentF: 1, BumpMinF: assets.NewWei(big.NewInt(1)), BumpThresholdF: 1} + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, testCfg) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, maxGasPrice, nil) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(41), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on started estimator returns original price when lower than previous", func(t *testing.T) { + client := mocks.NewRPCClient(t) + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(5) + }) + + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, maxGasPrice, nil) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(10), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on started estimator returns error, suggested gas price is higher than max gas price", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(42) + }) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, assets.NewWeiI(40), nil) + require.Error(t, err) + assert.EqualError(t, err, "estimated gas price: 42 wei is greater than the maximum gas price configured: 40 wei") + assert.Nil(t, gasPrice) + assert.Equal(t, uint32(0), chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on started estimator returns max gas price when suggested price under max but the buffer exceeds it", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(39) + }) + + servicetest.RunHealthy(t, o) + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, assets.NewWeiI(40), nil) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(40), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) + + t.Run("calling BumpLegacyGas on started estimator if initial call failed returns error", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(errors.New("kaboom")) + + servicetest.RunHealthy(t, o) + + _, _, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, maxGasPrice, nil) + assert.EqualError(t, err, "failed to refresh and return gas; gas price not set") + }) + + t.Run("calling BumpLegacyGas on started estimator if refresh call failed returns price from previous update", func(t *testing.T) { + client := mocks.NewRPCClient(t) + o := gas.NewSuggestedPriceEstimator(logger.Test(t), client, cfg) + + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(nil).Run(func(args mock.Arguments) { + res := args.Get(1).(*hexutil.Big) + (*big.Int)(res).SetInt64(40) + }).Once() + client.On("CallContext", mock.Anything, mock.Anything, "eth_gasPrice").Return(errors.New("kaboom")) + + servicetest.RunHealthy(t, o) + + gasPrice, chainSpecificGasLimit, err := o.BumpLegacyGas(testutils.Context(t), assets.NewWeiI(10), gasLimit, maxGasPrice, nil) + require.NoError(t, err) + assert.Equal(t, assets.NewWeiI(44), gasPrice) + assert.Equal(t, gasLimit, chainSpecificGasLimit) + }) +} diff --git a/core/chains/evm/headtracker/config.go b/core/chains/evm/headtracker/config.go new file mode 100644 index 00000000..f2a044b2 --- /dev/null +++ b/core/chains/evm/headtracker/config.go @@ -0,0 +1,19 @@ +package headtracker + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" +) + +//go:generate mockery --quiet --name Config --output ./mocks/ --case=underscore + +// Config represents a subset of options needed by head tracker +type Config interface { + BlockEmissionIdleWarningThreshold() time.Duration + FinalityDepth() uint32 +} + +type HeadTrackerConfig interface { + config.HeadTracker +} diff --git a/core/chains/evm/headtracker/head_broadcaster.go b/core/chains/evm/headtracker/head_broadcaster.go new file mode 100644 index 00000000..4e6441de --- /dev/null +++ b/core/chains/evm/headtracker/head_broadcaster.go @@ -0,0 +1,20 @@ +package headtracker + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/headtracker" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type headBroadcaster = headtracker.HeadBroadcaster[*evmtypes.Head, common.Hash] + +var _ commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash] = &headBroadcaster{} + +func NewHeadBroadcaster( + lggr logger.Logger, +) *headBroadcaster { + return headtracker.NewHeadBroadcaster[*evmtypes.Head, common.Hash](lggr) +} diff --git a/core/chains/evm/headtracker/head_broadcaster_test.go b/core/chains/evm/headtracker/head_broadcaster_test.go new file mode 100644 index 00000000..aeff6fc6 --- /dev/null +++ b/core/chains/evm/headtracker/head_broadcaster_test.go @@ -0,0 +1,191 @@ +package headtracker_test + +import ( + "context" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + commonhtrk "github.com/goplugin/pluginv3.0/v2/common/headtracker" + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func waitHeadBroadcasterToStart(t *testing.T, hb types.HeadBroadcaster) { + t.Helper() + + subscriber := &cltest.MockHeadTrackable{} + _, unsubscribe := hb.Subscribe(subscriber) + defer unsubscribe() + + hb.BroadcastNewLongestChain(cltest.Head(1)) + g := gomega.NewWithT(t) + g.Eventually(subscriber.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) +} + +func TestHeadBroadcaster_Subscribe(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].HeadTracker.SamplingInterval = &commonconfig.Duration{} + }) + evmCfg := evmtest.NewChainScopedConfig(t, cfg) + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + + sub := commonmocks.NewSubscription(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + chchHeaders := make(chan chan<- *evmtypes.Head, 1) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + chchHeaders <- args.Get(1).(chan<- *evmtypes.Head) + }). + Return(sub, nil) + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(1), nil).Once() + ethClient.On("HeadByHash", mock.Anything, mock.Anything).Return(cltest.Head(1), nil) + + sub.On("Unsubscribe").Return() + sub.On("Err").Return(nil) + + checker1 := &cltest.MockHeadTrackable{} + checker2 := &cltest.MockHeadTrackable{} + + orm := headtracker.NewORM(db, logger, cfg.Database(), *ethClient.ConfiguredChainID()) + hs := headtracker.NewHeadSaver(logger, orm, evmCfg.EVM(), evmCfg.EVM().HeadTracker()) + mailMon := mailboxtest.NewMonitor(t) + servicetest.Run(t, mailMon) + hb := headtracker.NewHeadBroadcaster(logger) + servicetest.Run(t, hb) + ht := headtracker.NewHeadTracker(logger, ethClient, evmCfg.EVM(), evmCfg.EVM().HeadTracker(), hb, hs, mailMon) + servicetest.Run(t, ht) + + latest1, unsubscribe1 := hb.Subscribe(checker1) + // "latest head" is nil here because we didn't receive any yet + assert.Equal(t, (*evmtypes.Head)(nil), latest1) + + headers := <-chchHeaders + h := evmtypes.Head{Number: 1, Hash: utils.NewHash(), ParentHash: utils.NewHash(), EVMChainID: big.New(&cltest.FixtureChainID)} + headers <- &h + g.Eventually(checker1.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) + + latest2, _ := hb.Subscribe(checker2) + // "latest head" is set here to the most recent head received + assert.NotNil(t, latest2) + assert.Equal(t, h.Number, latest2.Number) + + unsubscribe1() + + headers <- &evmtypes.Head{Number: 2, Hash: utils.NewHash(), ParentHash: h.Hash, EVMChainID: big.New(&cltest.FixtureChainID)} + g.Eventually(checker2.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) +} + +func TestHeadBroadcaster_BroadcastNewLongestChain(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + lggr := logger.Test(t) + broadcaster := headtracker.NewHeadBroadcaster(lggr) + + err := broadcaster.Start(testutils.Context(t)) + require.NoError(t, err) + + waitHeadBroadcasterToStart(t, broadcaster) + + subscriber1 := &cltest.MockHeadTrackable{} + subscriber2 := &cltest.MockHeadTrackable{} + _, unsubscribe1 := broadcaster.Subscribe(subscriber1) + _, unsubscribe2 := broadcaster.Subscribe(subscriber2) + + broadcaster.BroadcastNewLongestChain(cltest.Head(1)) + g.Eventually(subscriber1.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) + + unsubscribe1() + + broadcaster.BroadcastNewLongestChain(cltest.Head(2)) + g.Eventually(subscriber2.OnNewLongestChainCount).Should(gomega.Equal(int32(2))) + + unsubscribe2() + + subscriber3 := &cltest.MockHeadTrackable{} + _, unsubscribe3 := broadcaster.Subscribe(subscriber3) + broadcaster.BroadcastNewLongestChain(cltest.Head(1)) + g.Eventually(subscriber3.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) + + unsubscribe3() + + // no subscribers - shall do nothing + broadcaster.BroadcastNewLongestChain(cltest.Head(0)) + + err = broadcaster.Close() + require.NoError(t, err) + + require.Equal(t, int32(1), subscriber3.OnNewLongestChainCount()) +} + +func TestHeadBroadcaster_TrackableCallbackTimeout(t *testing.T) { + t.Parallel() + + lggr := logger.Test(t) + broadcaster := headtracker.NewHeadBroadcaster(lggr) + + err := broadcaster.Start(testutils.Context(t)) + require.NoError(t, err) + + waitHeadBroadcasterToStart(t, broadcaster) + + slowAwaiter := cltest.NewAwaiter() + fastAwaiter := cltest.NewAwaiter() + slow := &sleepySubscriber{awaiter: slowAwaiter, delay: commonhtrk.TrackableCallbackTimeout * 2} + fast := &sleepySubscriber{awaiter: fastAwaiter, delay: commonhtrk.TrackableCallbackTimeout / 2} + _, unsubscribe1 := broadcaster.Subscribe(slow) + _, unsubscribe2 := broadcaster.Subscribe(fast) + + broadcaster.BroadcastNewLongestChain(cltest.Head(1)) + slowAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + fastAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + + require.True(t, slow.contextDone) + require.False(t, fast.contextDone) + + unsubscribe1() + unsubscribe2() + + err = broadcaster.Close() + require.NoError(t, err) +} + +type sleepySubscriber struct { + awaiter cltest.Awaiter + delay time.Duration + contextDone bool +} + +func (ss *sleepySubscriber) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + time.Sleep(ss.delay) + select { + case <-ctx.Done(): + ss.contextDone = true + default: + } + ss.awaiter.ItHappened() +} diff --git a/core/chains/evm/headtracker/head_listener.go b/core/chains/evm/headtracker/head_listener.go new file mode 100644 index 00000000..7a42f0a1 --- /dev/null +++ b/core/chains/evm/headtracker/head_listener.go @@ -0,0 +1,29 @@ +package headtracker + +import ( + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/headtracker" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type headListener = headtracker.HeadListener[*evmtypes.Head, ethereum.Subscription, *big.Int, common.Hash] + +var _ commontypes.HeadListener[*evmtypes.Head, common.Hash] = (*headListener)(nil) + +func NewHeadListener( + lggr logger.Logger, + ethClient evmclient.Client, + config Config, chStop chan struct{}, +) *headListener { + return headtracker.NewHeadListener[ + *evmtypes.Head, + ethereum.Subscription, *big.Int, common.Hash, + ](lggr, ethClient, config, chStop) +} diff --git a/core/chains/evm/headtracker/head_listener_test.go b/core/chains/evm/headtracker/head_listener_test.go new file mode 100644 index 00000000..2a1818cd --- /dev/null +++ b/core/chains/evm/headtracker/head_listener_test.go @@ -0,0 +1,253 @@ +package headtracker_test + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/logger" + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func Test_HeadListener_HappyPath(t *testing.T) { + // Logic: + // - spawn a listener instance + // - mock SubscribeNewHead/Err/Unsubscribe to track these calls + // - send 3 heads + // - ask listener to stop + // Asserts: + // - check Connected()/ReceivingHeads() are updated + // - 3 heads is passed to callback + // - ethClient methods are invoked + + lggr := logger.Test(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + // no need to test head timeouts here + c.EVM[0].NoNewHeadsThreshold = &commonconfig.Duration{} + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + chStop := make(chan struct{}) + hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), chStop) + + var headCount atomic.Int32 + handler := func(context.Context, *evmtypes.Head) error { + headCount.Add(1) + return nil + } + + subscribeAwaiter := cltest.NewAwaiter() + unsubscribeAwaiter := cltest.NewAwaiter() + var chHeads chan<- *evmtypes.Head + var chErr = make(chan error) + var chSubErr <-chan error = chErr + sub := commonmocks.NewSubscription(t) + ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { + chHeads = args.Get(1).(chan<- *evmtypes.Head) + subscribeAwaiter.ItHappened() + }) + sub.On("Err").Return(chSubErr) + sub.On("Unsubscribe").Return().Once().Run(func(mock.Arguments) { + unsubscribeAwaiter.ItHappened() + close(chHeads) + close(chErr) + }) + + doneAwaiter := cltest.NewAwaiter() + done := func() { + doneAwaiter.ItHappened() + } + go hl.ListenForNewHeads(handler, done) + + subscribeAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + require.Eventually(t, hl.Connected, testutils.WaitTimeout(t), testutils.TestInterval) + + chHeads <- cltest.Head(0) + chHeads <- cltest.Head(1) + chHeads <- cltest.Head(2) + + require.True(t, hl.ReceivingHeads()) + + close(chStop) + doneAwaiter.AwaitOrFail(t) + + unsubscribeAwaiter.AwaitOrFail(t) + require.Equal(t, int32(3), headCount.Load()) +} + +func Test_HeadListener_NotReceivingHeads(t *testing.T) { + // Logic: + // - same as Test_HeadListener_HappyPath, but + // - send one head, make sure ReceivingHeads() is true + // - do not send any heads within BlockEmissionIdleWarningThreshold and check ReceivingHeads() is false + + lggr := logger.Test(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NoNewHeadsThreshold = commonconfig.MustNewDuration(time.Second) + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + chStop := make(chan struct{}) + hl := headtracker.NewHeadListener(lggr, ethClient, evmcfg.EVM(), chStop) + + firstHeadAwaiter := cltest.NewAwaiter() + handler := func(context.Context, *evmtypes.Head) error { + firstHeadAwaiter.ItHappened() + return nil + } + + subscribeAwaiter := cltest.NewAwaiter() + var chHeads chan<- *evmtypes.Head + var chErr = make(chan error) + var chSubErr <-chan error = chErr + sub := commonmocks.NewSubscription(t) + ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { + chHeads = args.Get(1).(chan<- *evmtypes.Head) + subscribeAwaiter.ItHappened() + }) + sub.On("Err").Return(chSubErr) + sub.On("Unsubscribe").Return().Once().Run(func(_ mock.Arguments) { + close(chHeads) + close(chErr) + }) + + doneAwaiter := cltest.NewAwaiter() + done := func() { + doneAwaiter.ItHappened() + } + go hl.ListenForNewHeads(handler, done) + + subscribeAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + + chHeads <- cltest.Head(0) + firstHeadAwaiter.AwaitOrFail(t) + + require.True(t, hl.ReceivingHeads()) + + time.Sleep(time.Second * 2) + + require.False(t, hl.ReceivingHeads()) + + close(chStop) + doneAwaiter.AwaitOrFail(t) +} + +func Test_HeadListener_SubscriptionErr(t *testing.T) { + tests := []struct { + name string + err error + closeErr bool + }{ + {"nil error", nil, false}, + {"socket error", errors.New("close 1006 (abnormal closure): unexpected EOF"), false}, + {"close Err channel", nil, true}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + l := logger.Test(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := configtest.NewGeneralConfig(t, nil) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + chStop := make(chan struct{}) + hl := headtracker.NewHeadListener(l, ethClient, evmcfg.EVM(), chStop) + + hnhCalled := make(chan *evmtypes.Head) + hnh := func(_ context.Context, header *evmtypes.Head) error { + hnhCalled <- header + return nil + } + doneAwaiter := cltest.NewAwaiter() + done := doneAwaiter.ItHappened + + chSubErrTest := make(chan error) + var chSubErr <-chan error = chSubErrTest + sub := commonmocks.NewSubscription(t) + // sub.Err is called twice because we enter the select loop two times: once + // initially and once again after exactly one head has been received + sub.On("Err").Return(chSubErr).Twice() + + subscribeAwaiter := cltest.NewAwaiter() + var headsCh chan<- *evmtypes.Head + // Initial subscribe + ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub, nil).Once().Run(func(args mock.Arguments) { + headsCh = args.Get(1).(chan<- *evmtypes.Head) + subscribeAwaiter.ItHappened() + }) + go func() { + hl.ListenForNewHeads(hnh, done) + }() + + // Put a head on the channel to ensure we test all code paths + subscribeAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + head := cltest.Head(0) + headsCh <- head + + h := <-hnhCalled + assert.Equal(t, head, h) + + // Expect a call to unsubscribe on error + sub.On("Unsubscribe").Once().Run(func(_ mock.Arguments) { + close(headsCh) + // geth guarantees that Unsubscribe closes the errors channel + if !test.closeErr { + close(chSubErrTest) + } + }) + // Expect a resubscribe + chSubErrTest2 := make(chan error) + var chSubErr2 <-chan error = chSubErrTest2 + sub2 := commonmocks.NewSubscription(t) + sub2.On("Err").Return(chSubErr2) + subscribeAwaiter2 := cltest.NewAwaiter() + + var headsCh2 chan<- *evmtypes.Head + ethClient.On("SubscribeNewHead", mock.Anything, mock.AnythingOfType("chan<- *types.Head")).Return(sub2, nil).Once().Run(func(args mock.Arguments) { + headsCh2 = args.Get(1).(chan<- *evmtypes.Head) + subscribeAwaiter2.ItHappened() + }) + + // Sending test error + if test.closeErr { + close(chSubErrTest) + } else { + chSubErrTest <- test.err + } + + // Wait for it to resubscribe + subscribeAwaiter2.AwaitOrFail(t, testutils.WaitTimeout(t)) + + head2 := cltest.Head(1) + headsCh2 <- head2 + + h2 := <-hnhCalled + assert.Equal(t, head2, h2) + + // Second call to unsubscribe on close + sub2.On("Unsubscribe").Once().Run(func(_ mock.Arguments) { + close(headsCh2) + // geth guarantees that Unsubscribe closes the errors channel + close(chSubErrTest2) + }) + close(chStop) + doneAwaiter.AwaitOrFail(t) + }) + } +} diff --git a/core/chains/evm/headtracker/head_saver.go b/core/chains/evm/headtracker/head_saver.go new file mode 100644 index 00000000..23ad8722 --- /dev/null +++ b/core/chains/evm/headtracker/head_saver.go @@ -0,0 +1,83 @@ +package headtracker + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/logger" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type headSaver struct { + orm ORM + config Config + htConfig HeadTrackerConfig + logger logger.Logger + heads Heads +} + +var _ commontypes.HeadSaver[*evmtypes.Head, common.Hash] = (*headSaver)(nil) + +func NewHeadSaver(lggr logger.Logger, orm ORM, config Config, htConfig HeadTrackerConfig) httypes.HeadSaver { + return &headSaver{ + orm: orm, + config: config, + htConfig: htConfig, + logger: logger.Named(lggr, "HeadSaver"), + heads: NewHeads(), + } +} + +func (hs *headSaver) Save(ctx context.Context, head *evmtypes.Head) error { + if err := hs.orm.IdempotentInsertHead(ctx, head); err != nil { + return err + } + + historyDepth := uint(hs.htConfig.HistoryDepth()) + hs.heads.AddHeads(historyDepth, head) + + return hs.orm.TrimOldHeads(ctx, historyDepth) +} + +func (hs *headSaver) Load(ctx context.Context) (chain *evmtypes.Head, err error) { + historyDepth := uint(hs.htConfig.HistoryDepth()) + heads, err := hs.orm.LatestHeads(ctx, historyDepth) + if err != nil { + return nil, err + } + + hs.heads.AddHeads(historyDepth, heads...) + return hs.heads.LatestHead(), nil +} + +func (hs *headSaver) LatestHeadFromDB(ctx context.Context) (head *evmtypes.Head, err error) { + return hs.orm.LatestHead(ctx) +} + +func (hs *headSaver) LatestChain() *evmtypes.Head { + head := hs.heads.LatestHead() + if head == nil { + return nil + } + if head.ChainLength() < hs.config.FinalityDepth() { + hs.logger.Debugw("chain shorter than FinalityDepth", "chainLen", head.ChainLength(), "evmFinalityDepth", hs.config.FinalityDepth()) + } + return head +} + +func (hs *headSaver) Chain(hash common.Hash) *evmtypes.Head { + return hs.heads.HeadByHash(hash) +} + +var NullSaver httypes.HeadSaver = &nullSaver{} + +type nullSaver struct{} + +func (*nullSaver) Save(ctx context.Context, head *evmtypes.Head) error { return nil } +func (*nullSaver) Load(ctx context.Context) (*evmtypes.Head, error) { return nil, nil } +func (*nullSaver) LatestHeadFromDB(ctx context.Context) (*evmtypes.Head, error) { return nil, nil } +func (*nullSaver) LatestChain() *evmtypes.Head { return nil } +func (*nullSaver) Chain(hash common.Hash) *evmtypes.Head { return nil } diff --git a/core/chains/evm/headtracker/head_saver_test.go b/core/chains/evm/headtracker/head_saver_test.go new file mode 100644 index 00000000..722eba25 --- /dev/null +++ b/core/chains/evm/headtracker/head_saver_test.go @@ -0,0 +1,94 @@ +package headtracker_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +type headTrackerConfig struct { + historyDepth uint32 +} + +func (h *headTrackerConfig) HistoryDepth() uint32 { + return h.historyDepth +} + +func (h *headTrackerConfig) SamplingInterval() time.Duration { + return time.Duration(0) +} + +func (h *headTrackerConfig) MaxBufferSize() uint32 { + return uint32(0) +} + +type config struct { + finalityDepth uint32 + blockEmissionIdleWarningThreshold time.Duration +} + +func (c *config) FinalityDepth() uint32 { return c.finalityDepth } +func (c *config) BlockEmissionIdleWarningThreshold() time.Duration { + return c.blockEmissionIdleWarningThreshold +} + +func configureSaver(t *testing.T) (httypes.HeadSaver, headtracker.ORM) { + db := pgtest.NewSqlxDB(t) + lggr := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + htCfg := &config{finalityDepth: uint32(1)} + orm := headtracker.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + saver := headtracker.NewHeadSaver(lggr, orm, htCfg, &headTrackerConfig{historyDepth: 6}) + return saver, orm +} + +func TestHeadSaver_Save(t *testing.T) { + t.Parallel() + + saver, _ := configureSaver(t) + + head := cltest.Head(1) + err := saver.Save(testutils.Context(t), head) + require.NoError(t, err) + + latest, err := saver.LatestHeadFromDB(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, int64(1), latest.Number) + + latest = saver.LatestChain() + require.NotNil(t, latest) + require.Equal(t, int64(1), latest.Number) + + latest = saver.Chain(head.Hash) + require.NotNil(t, latest) + require.Equal(t, int64(1), latest.Number) +} + +func TestHeadSaver_Load(t *testing.T) { + t.Parallel() + + saver, orm := configureSaver(t) + + for i := 0; i < 5; i++ { + err := orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(i)) + require.NoError(t, err) + } + + latestHead, err := saver.Load(testutils.Context(t)) + require.NoError(t, err) + require.NotNil(t, latestHead) + require.Equal(t, int64(4), latestHead.Number) + + latestChain := saver.LatestChain() + require.NotNil(t, latestChain) + require.Equal(t, int64(4), latestChain.Number) +} diff --git a/core/chains/evm/headtracker/head_tracker.go b/core/chains/evm/headtracker/head_tracker.go new file mode 100644 index 00000000..7006446a --- /dev/null +++ b/core/chains/evm/headtracker/head_tracker.go @@ -0,0 +1,59 @@ +package headtracker + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/headtracker" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type headTracker = headtracker.HeadTracker[*evmtypes.Head, ethereum.Subscription, *big.Int, common.Hash] + +var _ commontypes.HeadTracker[*evmtypes.Head, common.Hash] = (*headTracker)(nil) + +func NewHeadTracker( + lggr logger.Logger, + ethClient evmclient.Client, + config Config, + htConfig HeadTrackerConfig, + headBroadcaster httypes.HeadBroadcaster, + headSaver httypes.HeadSaver, + mailMon *mailbox.Monitor, +) httypes.HeadTracker { + return headtracker.NewHeadTracker[*evmtypes.Head, ethereum.Subscription, *big.Int, common.Hash]( + lggr, + ethClient, + config, + htConfig, + headBroadcaster, + headSaver, + mailMon, + func() *evmtypes.Head { return nil }, + ) +} + +var NullTracker httypes.HeadTracker = &nullTracker{} + +type nullTracker struct{} + +func (*nullTracker) Start(context.Context) error { return nil } +func (*nullTracker) Close() error { return nil } +func (*nullTracker) Ready() error { return nil } +func (*nullTracker) HealthReport() map[string]error { return map[string]error{} } +func (*nullTracker) Name() string { return "" } +func (*nullTracker) SetLogLevel(zapcore.Level) {} +func (*nullTracker) Backfill(ctx context.Context, headWithChain *evmtypes.Head, depth uint) (err error) { + return nil +} +func (*nullTracker) LatestChain() *evmtypes.Head { return nil } diff --git a/core/chains/evm/headtracker/head_tracker_test.go b/core/chains/evm/headtracker/head_tracker_test.go new file mode 100644 index 00000000..aa4658e0 --- /dev/null +++ b/core/chains/evm/headtracker/head_tracker_test.go @@ -0,0 +1,1086 @@ +package headtracker_test + +import ( + "context" + "errors" + "math/big" + "slices" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/jmoiron/sqlx" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func firstHead(t *testing.T, db *sqlx.DB) (h evmtypes.Head) { + if err := db.Get(&h, `SELECT * FROM evm.heads ORDER BY number ASC LIMIT 1`); err != nil { + t.Fatal(err) + } + return h +} + +func TestHeadTracker_New(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := configtest.NewGeneralConfig(t, nil) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) + + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(1))) + last := cltest.Head(16) + assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), last)) + assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(10))) + + evmcfg := cltest.NewTestChainScopedConfig(t) + ht := createHeadTracker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm) + ht.Start(t) + + latest := ht.headSaver.LatestChain() + require.NotNil(t, latest) + assert.Equal(t, last.Number, latest.Number) +} + +func TestHeadTracker_Save_InsertsAndTrimsTable(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + for idx := 0; idx < 200; idx++ { + assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(idx))) + } + + ht := createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) + + h := cltest.Head(200) + require.NoError(t, ht.headSaver.Save(testutils.Context(t), h)) + assert.Equal(t, big.NewInt(200), ht.headSaver.LatestChain().ToInt()) + + firstHead := firstHead(t, db) + assert.Equal(t, big.NewInt(101), firstHead.ToInt()) + + lastHead, err := orm.LatestHead(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, int64(200), lastHead.Number) +} + +func TestHeadTracker_Get(t *testing.T) { + t.Parallel() + + start := cltest.Head(5) + + tests := []struct { + name string + initial *evmtypes.Head + toSave *evmtypes.Head + want *big.Int + }{ + {"greater", start, cltest.Head(6), big.NewInt(6)}, + {"less than", start, cltest.Head(1), big.NewInt(5)}, + {"zero", start, cltest.Head(0), big.NewInt(5)}, + {"nil", start, nil, big.NewInt(5)}, + {"nil no initial", nil, nil, big.NewInt(0)}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + chStarted := make(chan struct{}) + mockEth := &evmtest.MockEth{ + EthClient: ethClient, + } + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Maybe(). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + defer close(chStarted) + return mockEth.NewSub(t) + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) + + fnCall := ethClient.On("HeadByNumber", mock.Anything, mock.Anything) + fnCall.RunFn = func(args mock.Arguments) { + num := args.Get(1).(*big.Int) + fnCall.ReturnArguments = mock.Arguments{cltest.Head(num.Int64()), nil} + } + + if test.initial != nil { + assert.Nil(t, orm.IdempotentInsertHead(testutils.Context(t), test.initial)) + } + + ht := createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) + ht.Start(t) + + if test.toSave != nil { + err := ht.headSaver.Save(testutils.Context(t), test.toSave) + assert.NoError(t, err) + } + + assert.Equal(t, test.want, ht.headSaver.LatestChain().ToInt()) + }) + } +} + +func TestHeadTracker_Start_NewHeads(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + chStarted := make(chan struct{}) + mockEth := &evmtest.MockEth{EthClient: ethClient} + sub := mockEth.NewSub(t) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Run(func(mock.Arguments) { + close(chStarted) + }). + Return(sub, nil) + + ht := createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) + ht.Start(t) + + <-chStarted +} + +func TestHeadTracker_Start_CancelContext(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + chStarted := make(chan struct{}) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Run(func(args mock.Arguments) { + ctx := args.Get(0).(context.Context) + select { + case <-ctx.Done(): + return + case <-time.After(10 * time.Second): + assert.FailNow(t, "context was not cancelled within 10s") + } + }).Return(cltest.Head(0), nil) + mockEth := &evmtest.MockEth{EthClient: ethClient} + sub := mockEth.NewSub(t) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Run(func(mock.Arguments) { + close(chStarted) + }). + Return(sub, nil). + Maybe() + + ht := createHeadTracker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + go func() { + time.Sleep(1 * time.Second) + cancel() + }() + err := ht.headTracker.Start(ctx) + require.NoError(t, err) + require.NoError(t, ht.headTracker.Close()) +} + +func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil) + ethClient.On("HeadByHash", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Maybe() + + checker := &cltest.MockHeadTrackable{} + ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, checker) + + ht.Start(t) + assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) + + headers := <-chchHeaders + headers.TrySend(&evmtypes.Head{Number: 1, Hash: utils.NewHash(), EVMChainID: ubig.New(&cltest.FixtureChainID)}) + g.Eventually(checker.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) + + ht.Stop(t) + assert.Equal(t, int32(1), checker.OnNewLongestChainCount()) +} + +func TestHeadTracker_ReconnectOnError(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("cannot reconnect")) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { return mockEth.NewSub(t) }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(0), nil) + + checker := &cltest.MockHeadTrackable{} + ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, checker) + + // connect + ht.Start(t) + assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) + + // trigger reconnect loop + mockEth.SubsErr(errors.New("test error to force reconnect")) + g.Eventually(checker.OnNewLongestChainCount).Should(gomega.Equal(int32(1))) +} + +func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Once() + ethClient.On("HeadByHash", mock.Anything, mock.Anything).Return(cltest.Head(0), nil).Maybe() + + checker := &cltest.MockHeadTrackable{} + ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, checker) + + ht.Start(t) + assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) + + headers := <-chchHeaders + go func() { + headers.TrySend(cltest.Head(1)) + }() + + g.Eventually(func() bool { + report := ht.headTracker.HealthReport() + return !slices.ContainsFunc(maps.Values(report), func(e error) bool { return e != nil }) + }, 5*time.Second, testutils.TestInterval).Should(gomega.Equal(true)) + + // trigger reconnect loop + headers.CloseCh() + + // wait for full disconnect and a new subscription + g.Eventually(checker.OnNewLongestChainCount, 5*time.Second, testutils.TestInterval).Should(gomega.Equal(int32(1))) +} + +func TestHeadTracker_Start_LoadsLatestChain(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + config := cltest.NewTestChainScopedConfig(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + heads := []*evmtypes.Head{ + cltest.Head(0), + cltest.Head(1), + cltest.Head(2), + cltest.Head(3), + } + var parentHash gethCommon.Hash + for i := 0; i < len(heads); i++ { + if parentHash != (gethCommon.Hash{}) { + heads[i].ParentHash = parentHash + } + parentHash = heads[i].Hash + } + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(heads[3], nil).Maybe() + ethClient.On("HeadByHash", mock.Anything, heads[2].Hash).Return(heads[2], nil).Maybe() + ethClient.On("HeadByHash", mock.Anything, heads[1].Hash).Return(heads[1], nil).Maybe() + ethClient.On("HeadByHash", mock.Anything, heads[0].Hash).Return(heads[0], nil).Maybe() + + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + trackable := &cltest.MockHeadTrackable{} + ht := createHeadTrackerWithChecker(t, ethClient, config.EVM(), config.EVM().HeadTracker(), orm, trackable) + + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), heads[2])) + + ht.Start(t) + + assert.Equal(t, int32(0), trackable.OnNewLongestChainCount()) + + headers := <-chchHeaders + go func() { + headers.TrySend(cltest.Head(1)) + }() + + gomega.NewWithT(t).Eventually(func() bool { + report := ht.headTracker.HealthReport() + services.CopyHealth(report, ht.headBroadcaster.HealthReport()) + return !slices.ContainsFunc(maps.Values(report), func(e error) bool { return e != nil }) + }, 5*time.Second, testutils.TestInterval).Should(gomega.Equal(true)) + + h, err := orm.LatestHead(testutils.Context(t)) + require.NoError(t, err) + require.NotNil(t, h) + assert.Equal(t, h.Number, int64(3)) +} + +func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingEnabled(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + // Need to set the buffer to something large since we inject a lot of heads at once and otherwise they will be dropped + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) + c.EVM[0].HeadTracker.SamplingInterval = commonconfig.MustNewDuration(2500 * time.Millisecond) + }) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) + orm := headtracker.NewORM(db, logger, config.Database(), *evmtest.MustGetDefaultChainID(t, config.EVMConfigs())) + csCfg := evmtest.NewChainScopedConfig(t, config) + ht := createHeadTrackerWithChecker(t, ethClient, csCfg.EVM(), csCfg.EVM().HeadTracker(), orm, checker) + + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + + // --------------------- + blocks := cltest.NewBlocks(t, 10) + + head0 := blocks.Head(0) + // Initial query + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(head0, nil) + ht.Start(t) + + headSeq := cltest.NewHeadBuffer(t) + headSeq.Append(blocks.Head(0)) + headSeq.Append(blocks.Head(1)) + + // Blocks 2 and 3 are out of order + headSeq.Append(blocks.Head(3)) + headSeq.Append(blocks.Head(2)) + + // Block 4 comes in + headSeq.Append(blocks.Head(4)) + + // Another block at level 4 comes in, that will be uncled + headSeq.Append(blocks.NewHead(4)) + + // Reorg happened forking from block 2 + blocksForked := blocks.ForkAt(t, 2, 5) + headSeq.Append(blocksForked.Head(2)) + headSeq.Append(blocksForked.Head(3)) + headSeq.Append(blocksForked.Head(4)) + headSeq.Append(blocksForked.Head(5)) // Now the new chain is longer + + lastLongestChainAwaiter := cltest.NewAwaiter() + + // the callback is only called for head number 5 because of head sampling + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + + assert.Equal(t, int64(5), h.Number) + assert.Equal(t, blocksForked.Head(5).Hash, h.Hash) + + // This is the new longest chain, check that it came with its parents + if !assert.NotNil(t, h.Parent) { + return + } + assert.Equal(t, h.Parent.Hash, blocksForked.Head(4).Hash) + if !assert.NotNil(t, h.Parent.Parent) { + return + } + assert.Equal(t, h.Parent.Parent.Hash, blocksForked.Head(3).Hash) + if !assert.NotNil(t, h.Parent.Parent.Parent) { + return + } + assert.Equal(t, h.Parent.Parent.Parent.Hash, blocksForked.Head(2).Hash) + if !assert.NotNil(t, h.Parent.Parent.Parent.Parent) { + return + } + assert.Equal(t, h.Parent.Parent.Parent.Parent.Hash, blocksForked.Head(1).Hash) + lastLongestChainAwaiter.ItHappened() + }).Return().Once() + + headers := <-chchHeaders + + // This grotesque construction is the only way to do dynamic return values using + // the mock package. We need dynamic returns because we're simulating reorgs. + latestHeadByHash := make(map[gethCommon.Hash]*evmtypes.Head) + latestHeadByHashMu := new(sync.Mutex) + + fnCall := ethClient.On("HeadByHash", mock.Anything, mock.Anything).Maybe() + fnCall.RunFn = func(args mock.Arguments) { + latestHeadByHashMu.Lock() + defer latestHeadByHashMu.Unlock() + hash := args.Get(1).(gethCommon.Hash) + head := latestHeadByHash[hash] + fnCall.ReturnArguments = mock.Arguments{head, nil} + } + + for _, h := range headSeq.Heads { + latestHeadByHashMu.Lock() + latestHeadByHash[h.Hash] = h + latestHeadByHashMu.Unlock() + headers.TrySend(h) + } + + // default 10s may not be sufficient, so using testutils.WaitTimeout(t) + lastLongestChainAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + ht.Stop(t) + assert.Equal(t, int64(5), ht.headSaver.LatestChain().Number) + + for _, h := range headSeq.Heads { + c := ht.headSaver.Chain(h.Hash) + require.NotNil(t, c) + assert.Equal(t, c.ParentHash, h.ParentHash) + assert.Equal(t, c.Timestamp.Unix(), h.Timestamp.UTC().Unix()) + assert.Equal(t, c.Number, h.Number) + } +} + +func TestHeadTracker_SwitchesToLongestChainWithHeadSamplingDisabled(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + // Need to set the buffer to something large since we inject a lot of heads at once and otherwise they will be dropped + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) + c.EVM[0].HeadTracker.SamplingInterval = commonconfig.MustNewDuration(0) + }) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + checker := commonmocks.NewHeadTrackable[*evmtypes.Head, gethCommon.Hash](t) + orm := headtracker.NewORM(db, logger, config.Database(), cltest.FixtureChainID) + evmcfg := evmtest.NewChainScopedConfig(t, config) + ht := createHeadTrackerWithChecker(t, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), orm, checker) + + chchHeaders := make(chan evmtest.RawSub[*evmtypes.Head], 1) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchHeaders <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + + // --------------------- + blocks := cltest.NewBlocks(t, 10) + + head0 := blocks.Head(0) // evmtypes.Head{Number: 0, Hash: utils.NewHash(), ParentHash: utils.NewHash(), Timestamp: time.Unix(0, 0)} + // Initial query + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(head0, nil) + + headSeq := cltest.NewHeadBuffer(t) + headSeq.Append(blocks.Head(0)) + headSeq.Append(blocks.Head(1)) + + // Blocks 2 and 3 are out of order + headSeq.Append(blocks.Head(3)) + headSeq.Append(blocks.Head(2)) + + // Block 4 comes in + headSeq.Append(blocks.Head(4)) + + // Another block at level 4 comes in, that will be uncled + headSeq.Append(blocks.NewHead(4)) + + // Reorg happened forking from block 2 + blocksForked := blocks.ForkAt(t, 2, 5) + headSeq.Append(blocksForked.Head(2)) + headSeq.Append(blocksForked.Head(3)) + headSeq.Append(blocksForked.Head(4)) + headSeq.Append(blocksForked.Head(5)) // Now the new chain is longer + + lastLongestChainAwaiter := cltest.NewAwaiter() + + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + require.Equal(t, int64(0), h.Number) + require.Equal(t, blocks.Head(0).Hash, h.Hash) + }).Return().Once() + + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + require.Equal(t, int64(1), h.Number) + require.Equal(t, blocks.Head(1).Hash, h.Hash) + }).Return().Once() + + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + require.Equal(t, int64(3), h.Number) + require.Equal(t, blocks.Head(3).Hash, h.Hash) + }).Return().Once() + + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + require.Equal(t, int64(4), h.Number) + require.Equal(t, blocks.Head(4).Hash, h.Hash) + + // Check that the block came with its parents + require.NotNil(t, h.Parent) + require.Equal(t, h.Parent.Hash, blocks.Head(3).Hash) + require.NotNil(t, h.Parent.Parent.Hash) + require.Equal(t, h.Parent.Parent.Hash, blocks.Head(2).Hash) + require.NotNil(t, h.Parent.Parent.Parent) + require.Equal(t, h.Parent.Parent.Parent.Hash, blocks.Head(1).Hash) + }).Return().Once() + + checker.On("OnNewLongestChain", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + h := args.Get(1).(*evmtypes.Head) + + require.Equal(t, int64(5), h.Number) + require.Equal(t, blocksForked.Head(5).Hash, h.Hash) + + // This is the new longest chain, check that it came with its parents + require.NotNil(t, h.Parent) + require.Equal(t, h.Parent.Hash, blocksForked.Head(4).Hash) + require.NotNil(t, h.Parent.Parent) + require.Equal(t, h.Parent.Parent.Hash, blocksForked.Head(3).Hash) + require.NotNil(t, h.Parent.Parent.Parent) + require.Equal(t, h.Parent.Parent.Parent.Hash, blocksForked.Head(2).Hash) + require.NotNil(t, h.Parent.Parent.Parent.Parent) + require.Equal(t, h.Parent.Parent.Parent.Parent.Hash, blocksForked.Head(1).Hash) + lastLongestChainAwaiter.ItHappened() + }).Return().Once() + + ht.Start(t) + + headers := <-chchHeaders + + // This grotesque construction is the only way to do dynamic return values using + // the mock package. We need dynamic returns because we're simulating reorgs. + latestHeadByHash := make(map[gethCommon.Hash]*evmtypes.Head) + latestHeadByHashMu := new(sync.Mutex) + + fnCall := ethClient.On("HeadByHash", mock.Anything, mock.Anything).Maybe() + fnCall.RunFn = func(args mock.Arguments) { + latestHeadByHashMu.Lock() + defer latestHeadByHashMu.Unlock() + hash := args.Get(1).(gethCommon.Hash) + head := latestHeadByHash[hash] + fnCall.ReturnArguments = mock.Arguments{head, nil} + } + + for _, h := range headSeq.Heads { + latestHeadByHashMu.Lock() + latestHeadByHash[h.Hash] = h + latestHeadByHashMu.Unlock() + headers.TrySend(h) + time.Sleep(testutils.TestInterval) + } + + // default 10s may not be sufficient, so using testutils.WaitTimeout(t) + lastLongestChainAwaiter.AwaitOrFail(t, testutils.WaitTimeout(t)) + ht.Stop(t) + assert.Equal(t, int64(5), ht.headSaver.LatestChain().Number) + + for _, h := range headSeq.Heads { + c := ht.headSaver.Chain(h.Hash) + require.NotNil(t, c) + assert.Equal(t, c.ParentHash, h.ParentHash) + assert.Equal(t, c.Timestamp.Unix(), h.Timestamp.UTC().Unix()) + assert.Equal(t, c.Number, h.Number) + } +} + +func TestHeadTracker_Backfill(t *testing.T) { + t.Parallel() + + // Heads are arranged as follows: + // headN indicates an unpersisted ethereum header + // hN indicates a persisted head record + // + // (1)->(H0) + // + // (14Orphaned)-+ + // +->(13)->(12)->(11)->(H10)->(9)->(H8) + // (15)->(14)---------+ + + now := uint64(time.Now().UTC().Unix()) + + gethHead0 := &gethTypes.Header{ + Number: big.NewInt(0), + ParentHash: gethCommon.BigToHash(big.NewInt(0)), + Time: now, + } + head0 := evmtypes.NewHead(gethHead0.Number, utils.NewHash(), gethHead0.ParentHash, gethHead0.Time, ubig.New(&cltest.FixtureChainID)) + + h1 := *cltest.Head(1) + h1.ParentHash = head0.Hash + + gethHead8 := &gethTypes.Header{ + Number: big.NewInt(8), + ParentHash: utils.NewHash(), + Time: now, + } + head8 := evmtypes.NewHead(gethHead8.Number, utils.NewHash(), gethHead8.ParentHash, gethHead8.Time, ubig.New(&cltest.FixtureChainID)) + + h9 := *cltest.Head(9) + h9.ParentHash = head8.Hash + + gethHead10 := &gethTypes.Header{ + Number: big.NewInt(10), + ParentHash: h9.Hash, + Time: now, + } + head10 := evmtypes.NewHead(gethHead10.Number, utils.NewHash(), gethHead10.ParentHash, gethHead10.Time, ubig.New(&cltest.FixtureChainID)) + + h11 := *cltest.Head(11) + h11.ParentHash = head10.Hash + + h12 := *cltest.Head(12) + h12.ParentHash = h11.Hash + + h13 := *cltest.Head(13) + h13.ParentHash = h12.Hash + + h14Orphaned := *cltest.Head(14) + h14Orphaned.ParentHash = h13.Hash + + h14 := *cltest.Head(14) + h14.ParentHash = h13.Hash + + h15 := *cltest.Head(15) + h15.ParentHash = h14.Hash + + heads := []evmtypes.Head{ + h9, + h11, + h12, + h13, + h14Orphaned, + h14, + h15, + } + + ctx := testutils.Context(t) + + t.Run("does nothing if all the heads are in database", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h12, 2) + require.NoError(t, err) + }) + + t.Run("fetches a missing head", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("HeadByHash", mock.Anything, head10.Hash). + Return(&head10, nil) + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + var depth uint = 3 + + err := ht.Backfill(ctx, &h12, depth) + require.NoError(t, err) + + h := ht.headSaver.Chain(h12.Hash) + + assert.Equal(t, int64(12), h.Number) + require.NotNil(t, h.Parent) + assert.Equal(t, int64(11), h.Parent.Number) + require.NotNil(t, h.Parent.Parent) + assert.Equal(t, int64(10), h.Parent.Parent.Number) + require.NotNil(t, h.Parent.Parent.Parent) + assert.Equal(t, int64(9), h.Parent.Parent.Parent.Number) + + writtenHead, err := orm.HeadByHash(testutils.Context(t), head10.Hash) + require.NoError(t, err) + assert.Equal(t, int64(10), writtenHead.Number) + }) + + t.Run("fetches only heads that are missing", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + ethClient.On("HeadByHash", mock.Anything, head10.Hash). + Return(&head10, nil) + ethClient.On("HeadByHash", mock.Anything, head8.Hash). + Return(&head8, nil) + + // Needs to be 8 because there are 8 heads in chain (15,14,13,12,11,10,9,8) + var depth uint = 8 + + err := ht.Backfill(ctx, &h15, depth) + require.NoError(t, err) + + h := ht.headSaver.Chain(h15.Hash) + + require.Equal(t, uint32(8), h.ChainLength()) + earliestInChain := h.EarliestInChain() + assert.Equal(t, head8.Number, earliestInChain.BlockNumber()) + assert.Equal(t, head8.Hash, earliestInChain.BlockHash()) + }) + + t.Run("does not backfill if chain length is already greater than or equal to depth", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h15, 3) + require.NoError(t, err) + + err = ht.Backfill(ctx, &h15, 5) + require.NoError(t, err) + }) + + t.Run("only backfills to height 0 if chain length would otherwise cause it to try and fetch a negative head", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("HeadByHash", mock.Anything, head0.Hash). + Return(&head0, nil) + + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &h1)) + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h1, 400) + require.NoError(t, err) + + h := ht.headSaver.Chain(h1.Hash) + require.NotNil(t, h) + + require.Equal(t, uint32(2), h.ChainLength()) + require.Equal(t, int64(0), h.EarliestInChain().BlockNumber()) + }) + + t.Run("abandons backfill and returns error if the eth node returns not found", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("HeadByHash", mock.Anything, head10.Hash). + Return(&head10, nil). + Once() + ethClient.On("HeadByHash", mock.Anything, head8.Hash). + Return(nil, ethereum.NotFound). + Once() + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h12, 400) + require.Error(t, err) + require.EqualError(t, err, "fetchAndSaveHead failed: not found") + + h := ht.headSaver.Chain(h12.Hash) + + // Should contain 12, 11, 10, 9 + assert.Equal(t, 4, int(h.ChainLength())) + assert.Equal(t, int64(9), h.EarliestInChain().BlockNumber()) + }) + + t.Run("abandons backfill and returns error if the context time budget is exceeded", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + for i := range heads { + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), &heads[i])) + } + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("HeadByHash", mock.Anything, head10.Hash). + Return(&head10, nil) + ethClient.On("HeadByHash", mock.Anything, head8.Hash). + Return(nil, context.DeadlineExceeded) + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h12, 400) + require.Error(t, err) + require.EqualError(t, err, "fetchAndSaveHead failed: context deadline exceeded") + + h := ht.headSaver.Chain(h12.Hash) + + // Should contain 12, 11, 10, 9 + assert.Equal(t, 4, int(h.ChainLength())) + assert.Equal(t, int64(9), h.EarliestInChain().BlockNumber()) + }) + + t.Run("abandons backfill and returns error when fetching a block by hash fails, indicating a reorg", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + logger := logger.Test(t) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("HeadByHash", mock.Anything, h14.Hash).Return(&h14, nil).Once() + ethClient.On("HeadByHash", mock.Anything, h13.Hash).Return(&h13, nil).Once() + ethClient.On("HeadByHash", mock.Anything, h12.Hash).Return(nil, errors.New("not found")).Once() + + ht := createHeadTrackerWithNeverSleeper(t, ethClient, cfg, orm) + + err := ht.Backfill(ctx, &h15, 400) + + require.Error(t, err) + require.EqualError(t, err, "fetchAndSaveHead failed: not found") + + h := ht.headSaver.Chain(h14.Hash) + + // Should contain 14, 13 (15 was never added). When trying to get the parent of h13 by hash, a reorg happened and backfill exited. + assert.Equal(t, 2, int(h.ChainLength())) + assert.Equal(t, int64(13), h.EarliestInChain().BlockNumber()) + }) +} + +func createHeadTracker(t *testing.T, ethClient evmclient.Client, config headtracker.Config, htConfig headtracker.HeadTrackerConfig, orm headtracker.ORM) *headTrackerUniverse { + lggr := logger.Test(t) + hb := headtracker.NewHeadBroadcaster(lggr) + hs := headtracker.NewHeadSaver(lggr, orm, config, htConfig) + mailMon := mailboxtest.NewMonitor(t) + return &headTrackerUniverse{ + mu: new(sync.Mutex), + headTracker: headtracker.NewHeadTracker(lggr, ethClient, config, htConfig, hb, hs, mailMon), + headBroadcaster: hb, + headSaver: hs, + mailMon: mailMon, + } +} + +func createHeadTrackerWithNeverSleeper(t *testing.T, ethClient evmclient.Client, cfg plugin.GeneralConfig, orm headtracker.ORM) *headTrackerUniverse { + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + lggr := logger.Test(t) + hb := headtracker.NewHeadBroadcaster(lggr) + hs := headtracker.NewHeadSaver(lggr, orm, evmcfg.EVM(), evmcfg.EVM().HeadTracker()) + mailMon := mailboxtest.NewMonitor(t) + ht := headtracker.NewHeadTracker(lggr, ethClient, evmcfg.EVM(), evmcfg.EVM().HeadTracker(), hb, hs, mailMon) + _, err := hs.Load(testutils.Context(t)) + require.NoError(t, err) + return &headTrackerUniverse{ + mu: new(sync.Mutex), + headTracker: ht, + headBroadcaster: hb, + headSaver: hs, + mailMon: mailMon, + } +} + +func createHeadTrackerWithChecker(t *testing.T, ethClient evmclient.Client, config headtracker.Config, htConfig headtracker.HeadTrackerConfig, orm headtracker.ORM, checker httypes.HeadTrackable) *headTrackerUniverse { + lggr := logger.Test(t) + hb := headtracker.NewHeadBroadcaster(lggr) + hs := headtracker.NewHeadSaver(lggr, orm, config, htConfig) + hb.Subscribe(checker) + mailMon := mailboxtest.NewMonitor(t) + ht := headtracker.NewHeadTracker(lggr, ethClient, config, htConfig, hb, hs, mailMon) + return &headTrackerUniverse{ + mu: new(sync.Mutex), + headTracker: ht, + headBroadcaster: hb, + headSaver: hs, + mailMon: mailMon, + } +} + +type headTrackerUniverse struct { + mu *sync.Mutex + stopped bool + headTracker httypes.HeadTracker + headBroadcaster httypes.HeadBroadcaster + headSaver httypes.HeadSaver + mailMon *mailbox.Monitor +} + +func (u *headTrackerUniverse) Backfill(ctx context.Context, head *evmtypes.Head, depth uint) error { + return u.headTracker.Backfill(ctx, head, depth) +} + +func (u *headTrackerUniverse) Start(t *testing.T) { + u.mu.Lock() + defer u.mu.Unlock() + ctx := testutils.Context(t) + require.NoError(t, u.headBroadcaster.Start(ctx)) + require.NoError(t, u.headTracker.Start(ctx)) + require.NoError(t, u.mailMon.Start(ctx)) + + g := gomega.NewWithT(t) + g.Eventually(func() bool { + report := u.headBroadcaster.HealthReport() + return !slices.ContainsFunc(maps.Values(report), func(e error) bool { return e != nil }) + }, 5*time.Second, testutils.TestInterval).Should(gomega.Equal(true)) + + t.Cleanup(func() { + u.Stop(t) + }) +} + +func (u *headTrackerUniverse) Stop(t *testing.T) { + u.mu.Lock() + defer u.mu.Unlock() + if u.stopped { + return + } + u.stopped = true + require.NoError(t, u.headBroadcaster.Close()) + require.NoError(t, u.headTracker.Close()) + require.NoError(t, u.mailMon.Close()) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/chains/evm/headtracker/heads.go b/core/chains/evm/headtracker/heads.go new file mode 100644 index 00000000..f356deeb --- /dev/null +++ b/core/chains/evm/headtracker/heads.go @@ -0,0 +1,113 @@ +package headtracker + +import ( + "sort" + "sync" + + "github.com/ethereum/go-ethereum/common" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// Heads is a collection of heads. All methods are thread-safe. +type Heads interface { + // LatestHead returns the block header with the highest number that has been seen, or nil. + LatestHead() *evmtypes.Head + // HeadByHash returns a head for the specified hash, or nil. + HeadByHash(hash common.Hash) *evmtypes.Head + // AddHeads adds newHeads to the collection, eliminates duplicates, + // sorts by head number, fixes parents and cuts off old heads (historyDepth). + AddHeads(historyDepth uint, newHeads ...*evmtypes.Head) + // Count returns number of heads in the collection. + Count() int +} + +type heads struct { + heads []*evmtypes.Head + mu sync.RWMutex +} + +func NewHeads() Heads { + return &heads{} +} + +func (h *heads) LatestHead() *evmtypes.Head { + h.mu.RLock() + defer h.mu.RUnlock() + + if len(h.heads) == 0 { + return nil + } + return h.heads[0] +} + +func (h *heads) HeadByHash(hash common.Hash) *evmtypes.Head { + h.mu.RLock() + defer h.mu.RUnlock() + + for _, head := range h.heads { + if head.Hash == hash { + return head + } + } + return nil +} + +func (h *heads) Count() int { + h.mu.RLock() + defer h.mu.RUnlock() + + return len(h.heads) +} + +func (h *heads) AddHeads(historyDepth uint, newHeads ...*evmtypes.Head) { + h.mu.Lock() + defer h.mu.Unlock() + + headsMap := make(map[common.Hash]*evmtypes.Head, len(h.heads)+len(newHeads)) + for _, head := range append(h.heads, newHeads...) { + if head.Hash == head.ParentHash { + // shouldn't happen but it is untrusted input + continue + } + // copy all head objects to avoid races when a previous head chain is used + // elsewhere (since we mutate Parent here) + headCopy := *head + headCopy.Parent = nil // always build it from scratch in case it points to a head too old to be included + // map eliminates duplicates + headsMap[head.Hash] = &headCopy + } + + heads := make([]*evmtypes.Head, len(headsMap)) + // unsorted unique heads + { + var i int + for _, head := range headsMap { + heads[i] = head + i++ + } + } + + // sort the heads + sort.SliceStable(heads, func(i, j int) bool { + // sorting from the highest number to lowest + return heads[i].Number > heads[j].Number + }) + + // cut off the oldest + if uint(len(heads)) > historyDepth { + heads = heads[:historyDepth] + } + + // assign parents + for i := 0; i < len(heads)-1; i++ { + head := heads[i] + parent, exists := headsMap[head.ParentHash] + if exists { + head.Parent = parent + } + } + + // set + h.heads = heads +} diff --git a/core/chains/evm/headtracker/heads_test.go b/core/chains/evm/headtracker/heads_test.go new file mode 100644 index 00000000..8c134af4 --- /dev/null +++ b/core/chains/evm/headtracker/heads_test.go @@ -0,0 +1,110 @@ +package headtracker_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" +) + +func TestHeads_LatestHead(t *testing.T) { + t.Parallel() + + heads := headtracker.NewHeads() + heads.AddHeads(3, cltest.Head(100), cltest.Head(200), cltest.Head(300)) + + latest := heads.LatestHead() + require.NotNil(t, latest) + require.Equal(t, int64(300), latest.Number) + + heads.AddHeads(3, cltest.Head(250)) + latest = heads.LatestHead() + require.NotNil(t, latest) + require.Equal(t, int64(300), latest.Number) + + heads.AddHeads(3, cltest.Head(400)) + latest = heads.LatestHead() + require.NotNil(t, latest) + require.Equal(t, int64(400), latest.Number) +} + +func TestHeads_HeadByHash(t *testing.T) { + t.Parallel() + + var testHeads = []*evmtypes.Head{ + cltest.Head(100), + cltest.Head(200), + cltest.Head(300), + } + heads := headtracker.NewHeads() + heads.AddHeads(3, testHeads...) + + head := heads.HeadByHash(testHeads[1].Hash) + require.NotNil(t, head) + require.Equal(t, int64(200), head.Number) + + head = heads.HeadByHash(utils.NewHash()) + require.Nil(t, head) +} + +func TestHeads_Count(t *testing.T) { + t.Parallel() + + heads := headtracker.NewHeads() + require.Zero(t, heads.Count()) + + heads.AddHeads(3, cltest.Head(100), cltest.Head(200), cltest.Head(300)) + require.Equal(t, 3, heads.Count()) + + heads.AddHeads(1, cltest.Head(400)) + require.Equal(t, 1, heads.Count()) +} + +func TestHeads_AddHeads(t *testing.T) { + t.Parallel() + + uncleHash := utils.NewHash() + heads := headtracker.NewHeads() + + var testHeads []*evmtypes.Head + var parentHash common.Hash + for i := 0; i < 5; i++ { + hash := utils.NewHash() + h := evmtypes.NewHead(big.NewInt(int64(i)), hash, parentHash, uint64(time.Now().Unix()), ubig.NewI(0)) + testHeads = append(testHeads, &h) + if i == 2 { + // uncled block + h := evmtypes.NewHead(big.NewInt(int64(i)), uncleHash, parentHash, uint64(time.Now().Unix()), ubig.NewI(0)) + testHeads = append(testHeads, &h) + } + parentHash = hash + } + + heads.AddHeads(6, testHeads...) + // Add duplicates (should be ignored) + heads.AddHeads(6, testHeads[2:5]...) + require.Equal(t, 6, heads.Count()) + + head := heads.LatestHead() + require.NotNil(t, head) + require.Equal(t, 5, int(head.ChainLength())) + + head = heads.HeadByHash(uncleHash) + require.NotNil(t, head) + require.Equal(t, 3, int(head.ChainLength())) + + // Adding beyond the limit truncates + heads.AddHeads(2, testHeads...) + require.Equal(t, 2, heads.Count()) + head = heads.LatestHead() + require.NotNil(t, head) + require.Equal(t, 2, int(head.ChainLength())) +} diff --git a/core/chains/evm/headtracker/mocks/config.go b/core/chains/evm/headtracker/mocks/config.go new file mode 100644 index 00000000..74376a71 --- /dev/null +++ b/core/chains/evm/headtracker/mocks/config.go @@ -0,0 +1,64 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// BlockEmissionIdleWarningThreshold provides a mock function with given fields: +func (_m *Config) BlockEmissionIdleWarningThreshold() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BlockEmissionIdleWarningThreshold") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// FinalityDepth provides a mock function with given fields: +func (_m *Config) FinalityDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/headtracker/orm.go b/core/chains/evm/headtracker/orm.go new file mode 100644 index 00000000..6dc814af --- /dev/null +++ b/core/chains/evm/headtracker/orm.go @@ -0,0 +1,94 @@ +package headtracker + +import ( + "context" + "database/sql" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type ORM interface { + // IdempotentInsertHead inserts a head only if the hash is new. Will do nothing if hash exists already. + // No advisory lock required because this is thread safe. + IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error + // TrimOldHeads deletes heads such that only the top N block numbers remain + TrimOldHeads(ctx context.Context, n uint) (err error) + // LatestHead returns the highest seen head + LatestHead(ctx context.Context) (head *evmtypes.Head, err error) + // LatestHeads returns the latest heads up to given limit + LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) + // HeadByHash fetches the head with the given hash from the db, returns nil if none exists + HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) +} + +type orm struct { + q pg.Q + chainID ubig.Big +} + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, chainID big.Int) ORM { + return &orm{pg.NewQ(db, logger.Named(lggr, "HeadTrackerORM"), cfg), ubig.Big(chainID)} +} + +func (orm *orm) IdempotentInsertHead(ctx context.Context, head *evmtypes.Head) error { + // listener guarantees head.EVMChainID to be equal to orm.chainID + q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + query := ` + INSERT INTO evm.heads (hash, number, parent_hash, created_at, timestamp, l1_block_number, evm_chain_id, base_fee_per_gas) VALUES ( + :hash, :number, :parent_hash, :created_at, :timestamp, :l1_block_number, :evm_chain_id, :base_fee_per_gas) + ON CONFLICT (evm_chain_id, hash) DO NOTHING` + err := q.ExecQNamed(query, head) + return errors.Wrap(err, "IdempotentInsertHead failed to insert head") +} + +func (orm *orm) TrimOldHeads(ctx context.Context, n uint) (err error) { + q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + return q.ExecQ(` + DELETE FROM evm.heads + WHERE evm_chain_id = $1 AND number < ( + SELECT min(number) FROM ( + SELECT number + FROM evm.heads + WHERE evm_chain_id = $1 + ORDER BY number DESC + LIMIT $2 + ) numbers + )`, orm.chainID, n) +} + +func (orm *orm) LatestHead(ctx context.Context) (head *evmtypes.Head, err error) { + head = new(evmtypes.Head) + q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT 1`, orm.chainID) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + err = errors.Wrap(err, "LatestHead failed") + return +} + +func (orm *orm) LatestHeads(ctx context.Context, limit uint) (heads []*evmtypes.Head, err error) { + q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + err = q.Select(&heads, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 ORDER BY number DESC, created_at DESC, id DESC LIMIT $2`, orm.chainID, limit) + err = errors.Wrap(err, "LatestHeads failed") + return +} + +func (orm *orm) HeadByHash(ctx context.Context, hash common.Hash) (head *evmtypes.Head, err error) { + q := orm.q.WithOpts(pg.WithParentCtx(ctx)) + head = new(evmtypes.Head) + err = q.Get(head, `SELECT * FROM evm.heads WHERE evm_chain_id = $1 AND hash = $2`, orm.chainID, hash) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return head, err +} diff --git a/core/chains/evm/headtracker/orm_test.go b/core/chains/evm/headtracker/orm_test.go new file mode 100644 index 00000000..a409fec4 --- /dev/null +++ b/core/chains/evm/headtracker/orm_test.go @@ -0,0 +1,121 @@ +package headtracker_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + + "github.com/ethereum/go-ethereum/common" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +func TestORM_IdempotentInsertHead(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + // Returns nil when inserting first head + head := cltest.Head(0) + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), head)) + + // Head is inserted + foundHead, err := orm.LatestHead(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, head.Hash, foundHead.Hash) + + // Returns nil when inserting same head again + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), head)) + + // Head is still inserted + foundHead, err = orm.LatestHead(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, head.Hash, foundHead.Hash) +} + +func TestORM_TrimOldHeads(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + for i := 0; i < 10; i++ { + head := cltest.Head(i) + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), head)) + } + + err := orm.TrimOldHeads(testutils.Context(t), 5) + require.NoError(t, err) + + heads, err := orm.LatestHeads(testutils.Context(t), 10) + require.NoError(t, err) + + require.Equal(t, 5, len(heads)) + for i := 0; i < 5; i++ { + require.LessOrEqual(t, int64(5), heads[i].Number) + } +} + +func TestORM_HeadByHash(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + var hash common.Hash + for i := 0; i < 10; i++ { + head := cltest.Head(i) + if i == 5 { + hash = head.Hash + } + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), head)) + } + + head, err := orm.HeadByHash(testutils.Context(t), hash) + require.NoError(t, err) + require.Equal(t, hash, head.Hash) + require.Equal(t, int64(5), head.Number) +} + +func TestORM_HeadByHash_NotFound(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + hash := cltest.Head(123).Hash + head, err := orm.HeadByHash(testutils.Context(t), hash) + + require.Nil(t, head) + require.NoError(t, err) +} + +func TestORM_LatestHeads_NoRows(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logger := logger.Test(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := headtracker.NewORM(db, logger, cfg.Database(), cltest.FixtureChainID) + + heads, err := orm.LatestHeads(testutils.Context(t), 100) + + require.Zero(t, len(heads)) + require.NoError(t, err) +} diff --git a/core/chains/evm/headtracker/types/types.go b/core/chains/evm/headtracker/types/types.go new file mode 100644 index 00000000..e279acf5 --- /dev/null +++ b/core/chains/evm/headtracker/types/types.go @@ -0,0 +1,26 @@ +package types + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// HeadSaver maintains chains persisted in DB. All methods are thread-safe. +type HeadSaver interface { + commontypes.HeadSaver[*evmtypes.Head, common.Hash] + // LatestHeadFromDB returns the highest seen head from DB. + LatestHeadFromDB(ctx context.Context) (*evmtypes.Head, error) +} + +// Type Alias for EVM Head Tracker Components +type ( + HeadBroadcasterRegistry = commontypes.HeadBroadcasterRegistry[*evmtypes.Head, common.Hash] + HeadTracker = commontypes.HeadTracker[*evmtypes.Head, common.Hash] + HeadTrackable = commontypes.HeadTrackable[*evmtypes.Head, common.Hash] + HeadListener = commontypes.HeadListener[*evmtypes.Head, common.Hash] + HeadBroadcaster = commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash] +) diff --git a/core/chains/evm/label/label.go b/core/chains/evm/label/label.go new file mode 100644 index 00000000..7dd207e1 --- /dev/null +++ b/core/chains/evm/label/label.go @@ -0,0 +1,9 @@ +package label + +// nolint +const ( + MaxInFlightTransactionsWarning = `WARNING: If this happens a lot, you may need to increase EVM.Transactions.MaxInFlight to boost your node's transaction throughput, however you do this at your own risk. You MUST first ensure your ethereum node is configured not to ever evict local transactions that exceed this number otherwise the node can get permanently stuck. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + MaxQueuedTransactionsWarning = `WARNING: Hitting EVM.Transactions.MaxQueued is a sanity limit and should never happen under normal operation. Unless you are operating with very high throughput, this error is unlikely to be a problem with your Plugin node configuration, and instead more likely to be caused by a problem with your eth node's connectivity. Check your eth node: it may not be broadcasting transactions to the network, or it might be overloaded and evicting Plugin's transactions from its mempool. It is recommended to run Plugin with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation. Increasing EVM.Transactions.MaxQueued will allow Plugin to buffer more unsent transactions, but you should only do this if you need very high burst transmission rates. If you don't need very high burst throughput, increasing this limit is not the correct action to take here and will probably make things worse. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + NodeConnectivityProblemWarning = `WARNING: If this happens a lot, it may be a sign that your eth node has a connectivity problem, and your transactions are not making it to any miners. It is recommended to run Plugin with multiple primary and sendonly nodes for redundancy and to ensure fast and reliable transaction propagation. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` + RPCTxFeeCapConfiguredIncorrectlyWarning = `WARNING: Gas price was rejected by the eth node for being too high. By default, go-ethereum (and clones) have a built-in upper limit for gas price. It is preferable to disable this and rely Plugin's internal gas limits instead. Your RPC node's RPCTxFeeCap needs to be disabled or increased (recommended configuration: --rpc.gascap=0 --rpc.txfeecap=0). If you want to limit Plugin's max gas price, you may do so by setting EVM.GasEstimator.PriceMax on the Plugin node. Plugin will never send a transaction with a total cost higher than EVM.GasEstimator.PriceMax. See the performance guide for more details: https://docs.chain.link/docs/evm-performance-configuration/` +) diff --git a/core/chains/evm/log/broadcaster.go b/core/chains/evm/log/broadcaster.go new file mode 100644 index 00000000..9c1ce1b5 --- /dev/null +++ b/core/chains/evm/log/broadcaster.go @@ -0,0 +1,805 @@ +package log + +import ( + "context" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name Broadcaster --output ./mocks/ --case=underscore --structname Broadcaster --filename broadcaster.go + +type ( + // The Broadcaster manages log subscription requests for the Plugin node. Instead + // of creating a new subscription for each request, it multiplexes all subscriptions + // to all of the relevant contracts over a single connection and forwards the logs to the + // relevant subscribers. + // + // In case of node crash and/or restart, the logs will be backfilled for subscribers that are added before all + // dependents of LogBroadcaster are done. + // + // The backfill starts from the earliest block of either: + // - Latest DB head minus BlockBackfillDepth and the maximum number of confirmations. + // - Earliest pending or unconsumed log broadcast from DB. + // + // If a subscriber is added after the LogBroadcaster does the initial backfill, + // then it's possible/likely that the backfill fill only have depth: 1 (from latest head) + // + // Of course, these backfilled logs + any new logs will only be sent after the NumConfirmations for given subscriber. + Broadcaster interface { + utils.DependentAwaiter + services.Service + httypes.HeadTrackable + + // ReplayFromBlock enqueues a replay from the provided block number. If forceBroadcast is + // set to true, the broadcaster will broadcast logs that were already marked consumed + // previously by any subscribers. + ReplayFromBlock(number int64, forceBroadcast bool) + + IsConnected() bool + Register(listener Listener, opts ListenerOpts) (unsubscribe func()) + + WasAlreadyConsumed(lb Broadcast, qopts ...pg.QOpt) (bool, error) + MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error + + // MarkManyConsumed marks all the provided log broadcasts as consumed. + MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) error + + // NOTE: WasAlreadyConsumed, MarkConsumed and MarkManyConsumed MUST be used within a single goroutine in order for WasAlreadyConsumed to be accurate + } + + BroadcasterInTest interface { + Broadcaster + BackfillBlockNumber() null.Int64 + TrackedAddressesCount() uint32 + // Pause pauses the eventLoop until Resume is called. + Pause() + // Resume resumes the eventLoop after calling Pause. + Resume() + LogsFromBlock(bh common.Hash) int + } + + subscriberStatus int + + changeSubscriberStatus struct { + newStatus subscriberStatus + sub *subscriber + } + + replayRequest struct { + fromBlock int64 + forceBroadcast bool + } + + broadcaster struct { + services.StateMachine + orm ORM + config Config + connected atomic.Bool + evmChainID big.Int + + // a block number to start backfill from + backfillBlockNumber null.Int64 + + ethSubscriber *ethSubscriber + registrations *registrations + logPool *logPool + + mailMon *mailbox.Monitor + // Use the same channel for subs/unsubs so ordering is preserved + // (unsubscribe must happen after subscribe) + changeSubscriberStatus *mailbox.Mailbox[changeSubscriberStatus] + newHeads *mailbox.Mailbox[*evmtypes.Head] + + utils.DependentAwaiter + + chStop services.StopChan + wgDone sync.WaitGroup + trackedAddressesCount atomic.Uint32 + replayChannel chan replayRequest + highestSavedHead *evmtypes.Head + lastSeenHeadNumber atomic.Int64 + logger logger.Logger + + // used for testing only + testPause, testResume chan struct{} + } + + Config interface { + BlockBackfillDepth() uint64 + BlockBackfillSkip() bool + FinalityDepth() uint32 + LogBackfillBatchSize() uint32 + } + + ListenerOpts struct { + Contract common.Address + + // Event types to receive, with value filter for each field in the event + // No filter or an empty filter for a given field position mean: all values allowed + // the key should be a result of AbigenLog.Topic() call + // topic => topicValueFilters + LogsWithTopics map[common.Hash][][]Topic + + ParseLog ParseLogFunc + + // Minimum number of block confirmations before the log is received + MinIncomingConfirmations uint32 + + // ReplayStartedCallback is called by the log broadcaster once a replay request is received. + ReplayStartedCallback func() + } + + ParseLogFunc func(log types.Log) (generated.AbigenLog, error) + + subscriber struct { + listener Listener + opts ListenerOpts + } + + Topic common.Hash +) + +const ( + subscriberStatusSubscribe = iota + subscriberStatusUnsubscribe +) + +var _ Broadcaster = (*broadcaster)(nil) + +// NewBroadcaster creates a new instance of the broadcaster +func NewBroadcaster(orm ORM, ethClient evmclient.Client, config Config, lggr logger.Logger, highestSavedHead *evmtypes.Head, mailMon *mailbox.Monitor) *broadcaster { + chStop := make(chan struct{}) + lggr = logger.Named(lggr, "LogBroadcaster") + chainId := ethClient.ConfiguredChainID() + return &broadcaster{ + orm: orm, + config: config, + logger: lggr, + evmChainID: *chainId, + ethSubscriber: newEthSubscriber(ethClient, config, lggr, chStop), + registrations: newRegistrations(lggr, *chainId), + logPool: newLogPool(lggr), + mailMon: mailMon, + changeSubscriberStatus: mailbox.NewHighCapacity[changeSubscriberStatus](), + newHeads: mailbox.NewSingle[*evmtypes.Head](), + DependentAwaiter: utils.NewDependentAwaiter(), + chStop: chStop, + highestSavedHead: highestSavedHead, + replayChannel: make(chan replayRequest, 1), + } +} + +func (b *broadcaster) Start(context.Context) error { + return b.StartOnce("LogBroadcaster", func() error { + b.wgDone.Add(1) + go b.awaitInitialSubscribers() + b.mailMon.Monitor(b.changeSubscriberStatus, "LogBroadcaster", "ChangeSubscriber", b.evmChainID.String()) + return nil + }) +} + +// ReplayFromBlock implements the Broadcaster interface. +func (b *broadcaster) ReplayFromBlock(number int64, forceBroadcast bool) { + b.logger.Infow("Replay requested", "block number", number, "force", forceBroadcast) + select { + case b.replayChannel <- replayRequest{ + fromBlock: number, + forceBroadcast: forceBroadcast, + }: + default: + } +} + +func (b *broadcaster) Close() error { + return b.StopOnce("LogBroadcaster", func() error { + close(b.chStop) + b.wgDone.Wait() + return b.changeSubscriberStatus.Close() + }) +} + +func (b *broadcaster) Name() string { + return b.logger.Name() +} + +func (b *broadcaster) HealthReport() map[string]error { + return map[string]error{b.Name(): b.Healthy()} +} + +func (b *broadcaster) awaitInitialSubscribers() { + defer b.wgDone.Done() + b.logger.Debug("Starting to await initial subscribers until all dependents are ready...") + for { + select { + case <-b.changeSubscriberStatus.Notify(): + b.onChangeSubscriberStatus() + + case <-b.DependentAwaiter.AwaitDependents(): + // ensure that any queued dependent subscriptions are registered first + b.onChangeSubscriberStatus() + b.wgDone.Add(1) + go b.startResubscribeLoop() + return + + case <-b.chStop: + return + } + } +} + +func (b *broadcaster) Register(listener Listener, opts ListenerOpts) (unsubscribe func()) { + // IfNotStopped RLocks the state mutex so LB cannot be closed until this + // returns (no need to worry about listening for b.chStop) + // + // NOTE: We do not use IfStarted here because it is explicitly ok to + // register listeners before starting, this allows us to register many + // listeners then subscribe once on start, avoiding thrashing + ok := b.IfNotStopped(func() { + if len(opts.LogsWithTopics) == 0 { + b.logger.Panic("Must supply at least 1 LogsWithTopics element to Register") + } + if opts.MinIncomingConfirmations <= 0 { + b.logger.Warnw(fmt.Sprintf("LogBroadcaster requires that MinIncomingConfirmations must be at least 1 (got %v). Logs must have been confirmed in at least 1 block, it does not support reading logs from the mempool before they have been mined. MinIncomingConfirmations will be set to 1.", opts.MinIncomingConfirmations), "addr", opts.Contract.Hex(), "jobID", listener.JobID()) + opts.MinIncomingConfirmations = 1 + } + + sub := &subscriber{listener, opts} + b.logger.Debugf("Registering subscriber %p with job ID %v", sub, sub.listener.JobID()) + wasOverCapacity := b.changeSubscriberStatus.Deliver(changeSubscriberStatus{subscriberStatusSubscribe, sub}) + if wasOverCapacity { + b.logger.Panicf("LogBroadcaster subscribe: cannot subscribe %p with job ID %v; changeSubscriberStatus channel was full", sub, sub.listener.JobID()) + } + + // this is asynchronous but it shouldn't matter, since the channel is + // ordered then it will work properly as long as you call unsubscribe + // before subscribing a new listener with the same job/addr (e.g. on + // replacement of the same job) + unsubscribe = func() { + b.logger.Debugf("Unregistering subscriber %p with job ID %v", sub, sub.listener.JobID()) + wasOverCapacity := b.changeSubscriberStatus.Deliver(changeSubscriberStatus{subscriberStatusUnsubscribe, sub}) + if wasOverCapacity { + b.logger.Panicf("LogBroadcaster unsubscribe: cannot unsubscribe %p with job ID %v; changeSubscriberStatus channel was full", sub, sub.listener.JobID()) + } + } + }) + if !ok { + b.logger.Panic("Register cannot be called on a stopped log broadcaster (this is an invariant violation because all dependent services should have unregistered themselves before logbroadcaster.Close was called)") + } + return +} + +func (b *broadcaster) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + wasOverCapacity := b.newHeads.Deliver(head) + if wasOverCapacity { + b.logger.Debugw("Dropped the older head in the mailbox, while inserting latest (which is fine)", "latestBlockNumber", head.Number) + } +} + +func (b *broadcaster) IsConnected() bool { + return b.connected.Load() +} + +// The subscription is closed in two cases: +// - intentionally, when the set of contracts we're listening to changes +// - on a connection error +// +// This method recreates the subscription in both cases. In the event of a connection +// error, it attempts to reconnect. Any time there's a change in connection state, it +// notifies its subscribers. +func (b *broadcaster) startResubscribeLoop() { + defer b.wgDone.Done() + + var subscription managedSubscription = newNoopSubscription() + defer func() { subscription.Unsubscribe() }() + + if b.config.BlockBackfillSkip() && b.highestSavedHead != nil { + b.logger.Warn("BlockBackfillSkip is set to true, preventing a deep backfill - some earlier chain events might be missed.") + } else if b.highestSavedHead != nil { + // The backfill needs to start at an earlier block than the one last saved in DB, to account for: + // - keeping logs in the in-memory buffers in registration.go + // (which will be lost on node restart) for MAX(NumConfirmations of subscribers) + // - HeadTracker saving the heads to DB asynchronously versus LogBroadcaster, where a head + // (or more heads on fast chains) may be saved but not yet processed by LB + // using BlockBackfillDepth makes sure the backfill will be dependent on the per-chain configuration + from := b.highestSavedHead.Number - + int64(b.registrations.highestNumConfirmations) - + int64(b.config.BlockBackfillDepth()) + if from < 0 { + from = 0 + } + b.backfillBlockNumber = null.NewInt64(from, true) + } + + // Remove leftover unconsumed logs, maybe update pending broadcasts, and backfill sooner if necessary. + if backfillStart, abort := b.reinitialize(); abort { + return + } else if backfillStart != nil { + // No need to worry about r.highestNumConfirmations here because it's + // already at minimum this deep due to the latest seen head check above + if !b.backfillBlockNumber.Valid || *backfillStart < b.backfillBlockNumber.Int64 { + b.backfillBlockNumber.SetValid(*backfillStart) + } + } + + if b.backfillBlockNumber.Valid { + b.logger.Debugw("Using an override as a start of the backfill", + "blockNumber", b.backfillBlockNumber.Int64, + "highestNumConfirmations", b.registrations.highestNumConfirmations, + "blockBackfillDepth", b.config.BlockBackfillDepth(), + ) + } + + var chRawLogs chan types.Log + for { + b.logger.Infow("Resubscribing and backfilling logs...") + addresses, topics := b.registrations.addressesAndTopics() + + newSubscription, abort := b.ethSubscriber.createSubscription(addresses, topics) + if abort { + return + } + + chBackfilledLogs, abort := b.ethSubscriber.backfillLogs(b.backfillBlockNumber, addresses, topics) + if abort { + return + } + + b.backfillBlockNumber.Valid = false + + // Each time this loop runs, chRawLogs is reconstituted as: + // "remaining logs from last subscription <- backfilled logs <- logs from new subscription" + // There will be duplicated logs in this channel. It is the responsibility of subscribers + // to account for this using the helpers on the Broadcast type. + chRawLogs = b.appendLogChannel(chRawLogs, chBackfilledLogs) + chRawLogs = b.appendLogChannel(chRawLogs, newSubscription.Logs()) + subscription.Unsubscribe() + subscription = newSubscription + + b.connected.Store(true) + + b.trackedAddressesCount.Store(uint32(len(addresses))) + + shouldResubscribe, err := b.eventLoop(chRawLogs, subscription.Err()) + if err != nil { + b.logger.Warnw("Error in the event loop - will reconnect", "err", err) + b.connected.Store(false) + continue + } else if !shouldResubscribe { + b.connected.Store(false) + return + } + } +} + +func (b *broadcaster) reinitialize() (backfillStart *int64, abort bool) { + ctx, cancel := b.chStop.NewCtx() + defer cancel() + + evmutils.RetryWithBackoff(ctx, func() bool { + var err error + backfillStart, err = b.orm.Reinitialize(pg.WithParentCtx(ctx)) + if err != nil { + b.logger.Errorw("Failed to reinitialize database", "err", err) + return true + } + return false + }) + + select { + case <-b.chStop: + abort = true + default: + } + return +} + +func (b *broadcaster) eventLoop(chRawLogs <-chan types.Log, chErr <-chan error) (shouldResubscribe bool, _ error) { + // We debounce requests to subscribe and unsubscribe to avoid making too many + // RPC calls to the Ethereum node, particularly on startup. + var needsResubscribe bool + debounceResubscribe := time.NewTicker(1 * time.Second) + defer debounceResubscribe.Stop() + + b.logger.Debug("Starting the event loop") + for { + // Replay requests take priority. + select { + case req := <-b.replayChannel: + b.onReplayRequest(req) + return true, nil + default: + } + + select { + case rawLog := <-chRawLogs: + b.logger.Debugw("Received a log", + "blockNumber", rawLog.BlockNumber, "blockHash", rawLog.BlockHash, "address", rawLog.Address) + b.onNewLog(rawLog) + + case <-b.newHeads.Notify(): + b.onNewHeads() + + case err := <-chErr: + // The eth node connection was terminated so we need to backfill after resubscribing. + lggr := b.logger + // Do we have logs in the pool? + // They are are invalid, since we may have missed 'removed' logs. + if blockNum := b.invalidatePool(); blockNum > 0 { + lggr = logger.With(lggr, "blockNumber", blockNum) + } + lggr.Debugw("Subscription terminated. Backfilling after resubscribing") + return true, err + + case <-b.changeSubscriberStatus.Notify(): + needsResubscribe = b.onChangeSubscriberStatus() || needsResubscribe + + case req := <-b.replayChannel: + b.onReplayRequest(req) + return true, nil + + case <-debounceResubscribe.C: + if needsResubscribe { + b.logger.Debug("Returning from the event loop to resubscribe") + return true, nil + } + + case <-b.chStop: + return false, nil + + // testing only + case <-b.testPause: + select { + case <-b.testResume: + case <-b.chStop: + return false, nil + } + } + } +} + +// onReplayRequest clears the pool and sets the block backfill number. +func (b *broadcaster) onReplayRequest(replayReq replayRequest) { + // notify subscribers that we are about to replay. + for subscriber := range b.registrations.registeredSubs { + if subscriber.opts.ReplayStartedCallback != nil { + subscriber.opts.ReplayStartedCallback() + } + } + + _ = b.invalidatePool() + // NOTE: This ignores r.highestNumConfirmations, but it is + // generally assumed that this will only be performed rarely and + // manually by someone who knows what he is doing + b.backfillBlockNumber.SetValid(replayReq.fromBlock) + if replayReq.forceBroadcast { + ctx, cancel := b.chStop.NewCtx() + defer cancel() + + // Use a longer timeout in the event that a very large amount of logs need to be marked + // as consumed. + err := b.orm.MarkBroadcastsUnconsumed(replayReq.fromBlock, pg.WithParentCtx(ctx), pg.WithLongQueryTimeout()) + if err != nil { + b.logger.Errorw("Error marking broadcasts as unconsumed", + "err", err, "fromBlock", replayReq.fromBlock) + } + } + b.logger.Debugw( + "Returning from the event loop to replay logs from specific block number", + "fromBlock", replayReq.fromBlock, + "forceBroadcast", replayReq.forceBroadcast, + ) +} + +func (b *broadcaster) invalidatePool() int64 { + if min := b.logPool.heap.FindMin(); min != nil { + b.logPool = newLogPool(b.logger) + // Note: even if we crash right now, PendingMinBlock is preserved in the database and we will backfill the same. + blockNum := int64(min.(Uint64)) + b.backfillBlockNumber.SetValid(blockNum) + return blockNum + } + return -1 +} + +func (b *broadcaster) onNewLog(log types.Log) { + b.maybeWarnOnLargeBlockNumberDifference(int64(log.BlockNumber)) + + if log.Removed { + // Remove the whole block that contained this log. + b.logger.Debugw("Found reverted log", "log", log) + b.logPool.removeBlock(log.BlockHash, log.BlockNumber) + return + } else if !b.registrations.isAddressRegistered(log.Address) { + b.logger.Debugw("Found unregistered address", "address", log.Address) + return + } + if b.logPool.addLog(log) { + // First or new lowest block number + ctx, cancel := b.chStop.NewCtx() + defer cancel() + blockNumber := int64(log.BlockNumber) + if err := b.orm.SetPendingMinBlock(&blockNumber, pg.WithParentCtx(ctx)); err != nil { + b.logger.Errorw("Failed to set pending broadcasts number", "blockNumber", log.BlockNumber, "err", err) + } + } +} + +func (b *broadcaster) onNewHeads() { + var latestHead *evmtypes.Head + for { + // We only care about the most recent head + head := b.newHeads.RetrieveLatestAndClear() + if head == nil { + break + } + latestHead = head + } + + // latestHead may sometimes be nil on high rate of heads, + // when 'b.newHeads.Notify()' receives more times that the number of items in the mailbox + // Some heads may be missed (which is fine for LogBroadcaster logic) but the latest one in a burst will be received + if latestHead != nil { + b.logger.Debugw("Received head", "blockNumber", latestHead.Number, + "blockHash", latestHead.Hash, "parentHash", latestHead.ParentHash, "chainLen", latestHead.ChainLength()) + + b.lastSeenHeadNumber.Store(latestHead.Number) + + keptLogsDepth := b.config.FinalityDepth() + if b.registrations.highestNumConfirmations > keptLogsDepth { + keptLogsDepth = b.registrations.highestNumConfirmations + } + + latestBlockNum := latestHead.Number + keptDepth := latestBlockNum - int64(keptLogsDepth) + if keptDepth < 0 { + keptDepth = 0 + } + + ctx, cancel := b.chStop.NewCtx() + defer cancel() + + // if all subscribers requested 0 confirmations, we always get and delete all logs from the pool, + // without comparing their block numbers to the current head's block number. + if b.registrations.highestNumConfirmations == 0 { + logs, lowest, highest := b.logPool.getAndDeleteAll() + if len(logs) > 0 { + broadcasts, err := b.orm.FindBroadcasts(lowest, highest) + if err != nil { + b.logger.Errorf("Failed to query for log broadcasts, %v", err) + return + } + b.registrations.sendLogs(logs, *latestHead, broadcasts, b.orm) + if err := b.orm.SetPendingMinBlock(nil, pg.WithParentCtx(ctx)); err != nil { + b.logger.Errorw("Failed to set pending broadcasts number null", "err", err) + } + } + } else { + logs, minBlockNum := b.logPool.getLogsToSend(latestBlockNum) + + if len(logs) > 0 { + broadcasts, err := b.orm.FindBroadcasts(minBlockNum, latestBlockNum) + if err != nil { + b.logger.Errorf("Failed to query for log broadcasts, %v", err) + return + } + + b.registrations.sendLogs(logs, *latestHead, broadcasts, b.orm) + } + newMin := b.logPool.deleteOlderLogs(keptDepth) + if err := b.orm.SetPendingMinBlock(newMin); err != nil { + b.logger.Errorw("Failed to set pending broadcasts number", "blockNumber", keptDepth, "err", err) + } + } + } +} + +func (b *broadcaster) onChangeSubscriberStatus() (needsResubscribe bool) { + for { + change, exists := b.changeSubscriberStatus.Retrieve() + if !exists { + break + } + sub := change.sub + + if change.newStatus == subscriberStatusSubscribe { + b.logger.Debugw("Subscribing listener", "requiredBlockConfirmations", sub.opts.MinIncomingConfirmations, "address", sub.opts.Contract, "jobID", sub.listener.JobID()) + needsResub := b.registrations.addSubscriber(sub) + if needsResub { + needsResubscribe = true + } + } else { + b.logger.Debugw("Unsubscribing listener", "requiredBlockConfirmations", sub.opts.MinIncomingConfirmations, "address", sub.opts.Contract, "jobID", sub.listener.JobID()) + needsResub := b.registrations.removeSubscriber(sub) + if needsResub { + needsResubscribe = true + } + } + } + return +} + +func (b *broadcaster) appendLogChannel(ch1, ch2 <-chan types.Log) chan types.Log { + if ch1 == nil && ch2 == nil { + return nil + } + + chCombined := make(chan types.Log) + + go func() { + defer close(chCombined) + if ch1 != nil { + for rawLog := range ch1 { + select { + case chCombined <- rawLog: + case <-b.chStop: + return + } + } + } + if ch2 != nil { + for rawLog := range ch2 { + select { + case chCombined <- rawLog: + case <-b.chStop: + return + } + } + } + }() + + return chCombined +} + +func (b *broadcaster) maybeWarnOnLargeBlockNumberDifference(logBlockNumber int64) { + lastSeenHeadNumber := b.lastSeenHeadNumber.Load() + diff := logBlockNumber - lastSeenHeadNumber + if diff < 0 { + diff = -diff + } + + if lastSeenHeadNumber > 0 && diff > 1000 { + b.logger.Warnw("Detected a large block number difference between a log and recently seen head. "+ + "This may indicate a problem with data received from the chain or major network delays.", + "lastSeenHeadNumber", lastSeenHeadNumber, "logBlockNumber", logBlockNumber, "diff", diff) + } +} + +// WasAlreadyConsumed reports whether the given consumer had already consumed the given log +func (b *broadcaster) WasAlreadyConsumed(lb Broadcast, qopts ...pg.QOpt) (bool, error) { + return b.orm.WasBroadcastConsumed(lb.RawLog().BlockHash, lb.RawLog().Index, lb.JobID(), qopts...) +} + +// MarkConsumed marks the log as having been successfully consumed by the subscriber +func (b *broadcaster) MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error { + return b.orm.MarkBroadcastConsumed(lb.RawLog().BlockHash, lb.RawLog().BlockNumber, lb.RawLog().Index, lb.JobID(), qopts...) +} + +// MarkManyConsumed marks the logs as having been successfully consumed by the subscriber +func (b *broadcaster) MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) (err error) { + var ( + blockHashes = make([]common.Hash, len(lbs)) + blockNumbers = make([]uint64, len(lbs)) + logIndexes = make([]uint, len(lbs)) + jobIDs = make([]int32, len(lbs)) + ) + for i := range lbs { + blockHashes[i] = lbs[i].RawLog().BlockHash + blockNumbers[i] = lbs[i].RawLog().BlockNumber + logIndexes[i] = lbs[i].RawLog().Index + jobIDs[i] = lbs[i].JobID() + } + return b.orm.MarkBroadcastsConsumed(blockHashes, blockNumbers, logIndexes, jobIDs, qopts...) +} + +// test only +func (b *broadcaster) TrackedAddressesCount() uint32 { + return b.trackedAddressesCount.Load() +} + +// test only +func (b *broadcaster) BackfillBlockNumber() null.Int64 { + return b.backfillBlockNumber +} + +// test only +func (b *broadcaster) Pause() { + select { + case b.testPause <- struct{}{}: + case <-b.chStop: + } +} + +// test only +func (b *broadcaster) Resume() { + select { + case b.testResume <- struct{}{}: + case <-b.chStop: + } +} + +// test only +func (b *broadcaster) LogsFromBlock(bh common.Hash) int { + return b.logPool.testOnly_getNumLogsForBlock(bh) +} + +func topicsToHex(topics [][]Topic) [][]common.Hash { + var topicsInHex [][]common.Hash + for i := range topics { + var hexes []common.Hash + for j := range topics[i] { + hexes = append(hexes, common.Hash(topics[i][j])) + } + topicsInHex = append(topicsInHex, hexes) + } + return topicsInHex +} + +var _ BroadcasterInTest = &NullBroadcaster{} + +type NullBroadcaster struct{ ErrMsg string } + +func (n *NullBroadcaster) IsConnected() bool { return false } +func (n *NullBroadcaster) Register(listener Listener, opts ListenerOpts) (unsubscribe func()) { + return func() {} +} + +// ReplayFromBlock implements the Broadcaster interface. +func (n *NullBroadcaster) ReplayFromBlock(number int64, forceBroadcast bool) {} + +func (n *NullBroadcaster) BackfillBlockNumber() null.Int64 { + return null.NewInt64(0, false) +} +func (n *NullBroadcaster) TrackedAddressesCount() uint32 { + return 0 +} +func (n *NullBroadcaster) WasAlreadyConsumed(lb Broadcast, qopts ...pg.QOpt) (bool, error) { + return false, errors.New(n.ErrMsg) +} +func (n *NullBroadcaster) MarkConsumed(lb Broadcast, qopts ...pg.QOpt) error { + return errors.New(n.ErrMsg) +} +func (n *NullBroadcaster) MarkManyConsumed(lbs []Broadcast, qopts ...pg.QOpt) error { + return errors.New(n.ErrMsg) +} + +func (n *NullBroadcaster) AddDependents(int) {} +func (n *NullBroadcaster) AwaitDependents() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +} + +// DependentReady does noop for NullBroadcaster. +func (n *NullBroadcaster) DependentReady() {} + +func (n *NullBroadcaster) Name() string { return "NullBroadcaster" } + +// Start does noop for NullBroadcaster. +func (n *NullBroadcaster) Start(context.Context) error { return nil } +func (n *NullBroadcaster) Close() error { return nil } +func (n *NullBroadcaster) Ready() error { return nil } +func (n *NullBroadcaster) HealthReport() map[string]error { return nil } +func (n *NullBroadcaster) OnNewLongestChain(context.Context, *evmtypes.Head) {} +func (n *NullBroadcaster) Pause() {} +func (n *NullBroadcaster) Resume() {} +func (n *NullBroadcaster) LogsFromBlock(common.Hash) int { return -1 } diff --git a/core/chains/evm/log/eth_subscriber.go b/core/chains/evm/log/eth_subscriber.go new file mode 100644 index 00000000..c2124a65 --- /dev/null +++ b/core/chains/evm/log/eth_subscriber.go @@ -0,0 +1,274 @@ +package log + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +type ( + ethSubscriber struct { + ethClient evmclient.Client + config Config + logger logger.Logger + chStop services.StopChan + } +) + +func newEthSubscriber(ethClient evmclient.Client, config Config, lggr logger.Logger, chStop chan struct{}) *ethSubscriber { + return ðSubscriber{ + ethClient: ethClient, + config: config, + logger: logger.Named(lggr, "EthSubscriber"), + chStop: chStop, + } +} + +// backfillLogs - fetches earlier logs either from a relatively recent block (latest minus BlockBackfillDepth) or from the given fromBlockOverride +// note that the whole operation has no timeout - it relies on BlockBackfillSkip (set outside) to optionally prevent very deep, long backfills +// Max runtime is: (10 sec + 1 min * numBlocks/batchSize) * 3 retries +func (sub *ethSubscriber) backfillLogs(fromBlockOverride null.Int64, addresses []common.Address, topics []common.Hash) (chBackfilledLogs chan types.Log, abort bool) { + sub.logger.Infow("backfilling logs", "from", fromBlockOverride, "addresses", addresses) + if len(addresses) == 0 { + sub.logger.Debug("LogBroadcaster: No addresses to backfill for, returning") + ch := make(chan types.Log) + close(ch) + return ch, false + } + + ctxParent, cancel := sub.chStop.NewCtx() + defer cancel() + + var latestHeight int64 = -1 + retryCount := 0 + utils.RetryWithBackoff(ctxParent, func() (retry bool) { + if retryCount > 3 { + return false + } + retryCount++ + + if latestHeight < 0 { + latestBlock, err := sub.ethClient.HeadByNumber(ctxParent, nil) + if err != nil { + sub.logger.Warnw("LogBroadcaster: Backfill - could not fetch latest block header, will retry", "err", err) + return true + } else if latestBlock == nil { + sub.logger.Warn("LogBroadcaster: Got nil block header, will retry") + return true + } + latestHeight = latestBlock.Number + } + + // Backfill from `backfillDepth` blocks ago. It's up to the subscribers to + // filter out logs they've already dealt with. + fromBlock := uint64(latestHeight) - sub.config.BlockBackfillDepth() + if fromBlock > uint64(latestHeight) { + fromBlock = 0 // Overflow protection + } + + if fromBlockOverride.Valid { + fromBlock = uint64(fromBlockOverride.Int64) + } + + if fromBlock <= uint64(latestHeight) { + sub.logger.Infow(fmt.Sprintf("LogBroadcaster: Starting backfill of logs from %v blocks...", uint64(latestHeight)-fromBlock), "fromBlock", fromBlock, "latestHeight", latestHeight) + } else { + sub.logger.Infow("LogBroadcaster: Backfilling will be nop because fromBlock is above latestHeight", + "fromBlock", fromBlock, "latestHeight", latestHeight) + } + + q := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(fromBlock)), + Addresses: addresses, + Topics: [][]common.Hash{topics}, + } + + logs := make([]types.Log, 0) + start := time.Now() + // If we are significantly behind the latest head, there could be a very large (1000s) + // of blocks to check for logs. We read the blocks in batches to avoid hitting the websocket + // request data limit. + // On matic its 5MB [https://github.com/maticnetwork/bor/blob/3de2110886522ab17e0b45f3c4a6722da72b7519/rpc/http.go#L35] + // On ethereum its 15MB [https://github.com/ethereum/go-ethereum/blob/master/rpc/websocket.go#L40] + batchSize := int64(sub.config.LogBackfillBatchSize()) + for from := q.FromBlock.Int64(); from <= latestHeight; from += batchSize { + + to := from + batchSize - 1 + if to > latestHeight { + to = latestHeight + } + q.FromBlock = big.NewInt(from) + q.ToBlock = big.NewInt(to) + + ctx, cancel := context.WithTimeout(ctxParent, time.Minute) + batchLogs, err := sub.fetchLogBatch(ctx, q, start) + cancel() + + elapsed := time.Since(start) + + var elapsedMessage string + if elapsed > time.Minute { + elapsedMessage = " (backfill is taking a long time, delaying processing of newest logs - if it's an issue, consider setting the EVM.BlockBackfillSkip configuration variable to \"true\")" + } + if err != nil { + if ctx.Err() != nil { + sub.logger.Errorw("LogBroadcaster: Deadline exceeded, unable to backfill a batch of logs. Consider setting EVM.LogBackfillBatchSize to a lower value", "err", err, "elapsed", elapsed, "fromBlock", q.FromBlock.String(), "toBlock", q.ToBlock.String()) + } else { + sub.logger.Errorw("LogBroadcaster: Unable to backfill a batch of logs after retries", "err", err, "fromBlock", q.FromBlock.String(), "toBlock", q.ToBlock.String()) + } + return true + } + + sub.logger.Infow(fmt.Sprintf("LogBroadcaster: Fetched a batch of %v logs from %v to %v%s", len(batchLogs), from, to, elapsedMessage), "len", len(batchLogs), "fromBlock", from, "toBlock", to, "remaining", latestHeight-to) + + select { + case <-sub.chStop: + return false + default: + logs = append(logs, batchLogs...) + } + } + + sub.logger.Infof("LogBroadcaster: Fetched a total of %v logs for backfill", len(logs)) + + // unbufferred channel, as it will be filled in the goroutine, + // while the broadcaster's eventLoop is reading from it + chBackfilledLogs = make(chan types.Log) + go func() { + defer close(chBackfilledLogs) + for _, log := range logs { + select { + case chBackfilledLogs <- log: + case <-sub.chStop: + return + } + } + sub.logger.Infof("LogBroadcaster: Finished async backfill of %v logs", len(logs)) + }() + return false + }) + select { + case <-sub.chStop: + abort = true + default: + abort = false + } + return +} + +func (sub *ethSubscriber) fetchLogBatch(ctx context.Context, query ethereum.FilterQuery, start time.Time) ([]types.Log, error) { + var errOuter error + var result []types.Log + utils.RetryWithBackoff(ctx, func() (retry bool) { + batchLogs, err := sub.ethClient.FilterLogs(ctx, query) + + errOuter = err + + if err != nil { + if ctx.Err() != nil { + sub.logger.Errorw("LogBroadcaster: Inner deadline exceeded, unable to backfill a batch of logs. Consider setting EVM.LogBackfillBatchSize to a lower value", "err", err, "elapsed", time.Since(start), + "fromBlock", query.FromBlock.String(), "toBlock", query.ToBlock.String()) + } else { + sub.logger.Errorw("LogBroadcaster: Unable to backfill a batch of logs", "err", err, + "fromBlock", query.FromBlock.String(), "toBlock", query.ToBlock.String()) + } + return true + } + result = batchLogs + return false + }) + return result, errOuter +} + +// createSubscription creates a new log subscription starting at the current block. If previous logs +// are needed, they must be obtained through backfilling, as subscriptions can only be started from +// the current head. +func (sub *ethSubscriber) createSubscription(addresses []common.Address, topics []common.Hash) (subscr managedSubscription, abort bool) { + if len(addresses) == 0 { + return newNoopSubscription(), false + } + + ctx, cancel := sub.chStop.NewCtx() + defer cancel() + + utils.RetryWithBackoff(ctx, func() (retry bool) { + + filterQuery := ethereum.FilterQuery{ + Addresses: addresses, + Topics: [][]common.Hash{topics}, + } + chRawLogs := make(chan types.Log) + + sub.logger.Debugw("Calling SubscribeFilterLogs with params", "addresses", addresses, "topics", topics) + + innerSub, err := sub.ethClient.SubscribeFilterLogs(ctx, filterQuery, chRawLogs) + if err != nil { + sub.logger.Errorw("Log subscriber could not create subscription to Ethereum node", "err", err) + return true + } + + subscr = managedSubscriptionImpl{ + subscription: innerSub, + chRawLogs: chRawLogs, + } + return false + }) + select { + case <-sub.chStop: + abort = true + default: + abort = false + } + return +} + +// A managedSubscription acts as wrapper for the Subscription. Specifically, the +// managedSubscription closes the log channel as soon as the unsubscribe request is made +type managedSubscription interface { + Err() <-chan error + Logs() chan types.Log + Unsubscribe() +} + +type managedSubscriptionImpl struct { + subscription ethereum.Subscription + chRawLogs chan types.Log +} + +func (sub managedSubscriptionImpl) Err() <-chan error { + return sub.subscription.Err() +} + +func (sub managedSubscriptionImpl) Logs() chan types.Log { + return sub.chRawLogs +} + +func (sub managedSubscriptionImpl) Unsubscribe() { + sub.subscription.Unsubscribe() + <-sub.Err() // ensure sending has stopped before closing the chan + close(sub.chRawLogs) +} + +type noopSubscription struct { + chRawLogs chan types.Log +} + +func newNoopSubscription() noopSubscription { + return noopSubscription{make(chan types.Log)} +} + +func (b noopSubscription) Err() <-chan error { return nil } +func (b noopSubscription) Logs() chan types.Log { return b.chRawLogs } +func (b noopSubscription) Unsubscribe() { close(b.chRawLogs) } diff --git a/core/chains/evm/log/helpers_internal_test.go b/core/chains/evm/log/helpers_internal_test.go new file mode 100644 index 00000000..e862c604 --- /dev/null +++ b/core/chains/evm/log/helpers_internal_test.go @@ -0,0 +1,22 @@ +package log + +import ( + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// NewTestBroadcaster creates a broadcaster with Pause/Resume enabled. +func NewTestBroadcaster(orm ORM, ethClient evmclient.Client, config Config, lggr logger.Logger, highestSavedHead *evmtypes.Head, mailMon *mailbox.Monitor) *broadcaster { + b := NewBroadcaster(orm, ethClient, config, lggr, highestSavedHead, mailMon) + b.testPause, b.testResume = make(chan struct{}), make(chan struct{}) + return b +} + +func (b *broadcaster) ExportedAppendLogChannel(ch1, ch2 <-chan types.Log) chan types.Log { + return b.appendLogChannel(ch1, ch2) +} diff --git a/core/chains/evm/log/helpers_test.go b/core/chains/evm/log/helpers_test.go new file mode 100644 index 00000000..7c63a62a --- /dev/null +++ b/core/chains/evm/log/helpers_test.go @@ -0,0 +1,406 @@ +package log_test + +import ( + "context" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type broadcasterHelper struct { + t *testing.T + lb log.BroadcasterInTest + db *sqlx.DB + mockEth *evmtest.MockEth + globalConfig config.AppConfig + config evmconfig.ChainScopedConfig + + // each received channel corresponds to one eth subscription + chchRawLogs chan evmtest.RawSub[types.Log] + toUnsubscribe []func() + pipelineHelper cltest.JobPipelineV2TestHelper +} + +func newBroadcasterHelper(t *testing.T, blockHeight int64, timesSubscribe int, filterLogsResult []types.Log, overridesFn func(*plugin.Config, *plugin.Secrets)) *broadcasterHelper { + // ensure we check before registering any mock Cleanup assertions + testutils.SkipShortDB(t) + + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: timesSubscribe, + HeaderByNumber: 1, + FilterLogs: 1, + FilterLogsResult: filterLogsResult, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], timesSubscribe) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, nil, overridesFn) + helper.chchRawLogs = chchRawLogs + helper.mockEth = mockEth + return helper +} + +func newBroadcasterHelperWithEthClient(t *testing.T, ethClient evmclient.Client, highestSeenHead *evmtypes.Head, overridesFn func(*plugin.Config, *plugin.Secrets)) *broadcasterHelper { + globalConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.LogQueries = ptr(true) + finality := uint32(10) + c.EVM[0].FinalityDepth = &finality + + if overridesFn != nil { + overridesFn(c, s) + } + }) + config := evmtest.NewChainScopedConfig(t, globalConfig) + lggr := logger.Test(t) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + db := pgtest.NewSqlxDB(t) + orm := log.NewORM(db, lggr, config.Database(), cltest.FixtureChainID) + lb := log.NewTestBroadcaster(orm, ethClient, config.EVM(), lggr, highestSeenHead, mailMon) + kst := cltest.NewKeyStore(t, db, globalConfig.Database()) + + cc := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{ + Client: ethClient, + GeneralConfig: globalConfig, + DB: db, + KeyStore: kst.Eth(), + LogBroadcaster: &log.NullBroadcaster{}, + MailMon: mailMon, + }) + + m := make(map[string]legacyevm.Chain) + for _, r := range cc.Slice() { + m[r.Chain().ID().String()] = r.Chain() + } + legacyChains := legacyevm.NewLegacyChains(m, cc.AppConfig().EVMConfigs()) + pipelineHelper := cltest.NewJobPipelineV2(t, config.WebServer(), config.JobPipeline(), config.Database(), legacyChains, db, kst, nil, nil) + + return &broadcasterHelper{ + t: t, + lb: lb, + db: db, + globalConfig: globalConfig, + config: config, + pipelineHelper: pipelineHelper, + toUnsubscribe: make([]func(), 0), + } +} + +func (helper *broadcasterHelper) start() { + err := helper.lb.Start(testutils.Context(helper.t)) + require.NoError(helper.t, err) +} + +func (helper *broadcasterHelper) register(listener log.Listener, contract log.AbigenContract, numConfirmations uint32) { + logs := []generated.AbigenLog{ + flux_aggregator_wrapper.FluxAggregatorNewRound{}, + flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}, + } + helper.registerWithTopics(listener, contract, logs, numConfirmations) +} + +func (helper *broadcasterHelper) registerWithTopics(listener log.Listener, contract log.AbigenContract, logs []generated.AbigenLog, numConfirmations uint32) { + logsWithTopics := make(map[common.Hash][][]log.Topic) + for _, log := range logs { + logsWithTopics[log.Topic()] = nil + } + helper.registerWithTopicValues(listener, contract, numConfirmations, logsWithTopics) +} + +func (helper *broadcasterHelper) registerWithTopicValues(listener log.Listener, contract log.AbigenContract, numConfirmations uint32, + topics map[common.Hash][][]log.Topic) { + + unsubscribe := helper.lb.Register(listener, log.ListenerOpts{ + Contract: contract.Address(), + ParseLog: contract.ParseLog, + LogsWithTopics: topics, + MinIncomingConfirmations: numConfirmations, + }) + + helper.toUnsubscribe = append(helper.toUnsubscribe, unsubscribe) +} + +func (helper *broadcasterHelper) requireBroadcastCount(expectedCount int) { + helper.t.Helper() + g := gomega.NewGomegaWithT(helper.t) + + comparisonFunc := func() (int, error) { + var count struct{ Count int } + err := helper.db.Get(&count, `SELECT count(*) FROM log_broadcasts`) + return count.Count, err + } + + g.Eventually(comparisonFunc, testutils.WaitTimeout(helper.t), time.Second).Should(gomega.Equal(expectedCount)) + g.Consistently(comparisonFunc, 1*time.Second, 200*time.Millisecond).Should(gomega.Equal(expectedCount)) +} + +func (helper *broadcasterHelper) unsubscribeAll() { + for _, unsubscribe := range helper.toUnsubscribe { + unsubscribe() + } + time.Sleep(100 * time.Millisecond) +} +func (helper *broadcasterHelper) stop() { + err := helper.lb.Close() + assert.NoError(helper.t, err) +} + +func newMockContract(t *testing.T) *logmocks.AbigenContract { + addr := testutils.NewAddress() + contract := logmocks.NewAbigenContract(t) + contract.On("Address").Return(addr).Maybe() + return contract +} + +type logOnBlock struct { + logBlockNumber uint64 + blockNumber uint64 + blockHash common.Hash +} + +func (l logOnBlock) String() string { + return fmt.Sprintf("blockInfo(log:%v received on: %v %s)", l.logBlockNumber, l.blockNumber, l.blockHash) +} + +type received struct { + uniqueLogs []types.Log + logs []types.Log + broadcasts []log.Broadcast + sync.Mutex +} + +func newReceived(logs []types.Log) *received { + var rec received + rec.logs = logs + rec.uniqueLogs = logs + return &rec +} + +func (rec *received) getLogs() []types.Log { + rec.Lock() + defer rec.Unlock() + r := make([]types.Log, len(rec.logs)) + copy(r, rec.logs) + return r +} + +func (rec *received) getUniqueLogs() []types.Log { + rec.Lock() + defer rec.Unlock() + r := make([]types.Log, len(rec.uniqueLogs)) + copy(r, rec.uniqueLogs) + return r +} + +func (rec *received) logsOnBlocks() []logOnBlock { + rec.Lock() + defer rec.Unlock() + var blocks []logOnBlock + for _, broadcast := range rec.broadcasts { + blocks = append(blocks, logOnBlock{ + logBlockNumber: broadcast.RawLog().BlockNumber, + blockNumber: broadcast.LatestBlockNumber(), + blockHash: broadcast.LatestBlockHash(), + }) + } + return blocks +} + +type simpleLogListener struct { + name string + lggr logger.SugaredLogger + cfg pg.QConfig + received *received + t *testing.T + db *sqlx.DB + jobID int32 + skipMarkingConsumed atomic.Bool +} + +func (helper *broadcasterHelper) newLogListenerWithJob(name string) *simpleLogListener { + t := helper.t + db := helper.db + jb := &job.Job{ + Type: job.Cron, + SchemaVersion: 1, + CronSpec: &job.CronSpec{CronSchedule: "@every 1s"}, + PipelineSpec: &pipeline.Spec{}, + ExternalJobID: uuid.New(), + } + err := helper.pipelineHelper.Jrm.CreateJob(jb) + require.NoError(t, err) + + var rec received + return &simpleLogListener{ + db: db, + lggr: logger.Sugared(logger.Test(t)), + cfg: helper.config.Database(), + name: name, + received: &rec, + t: t, + jobID: jb.ID, + } +} + +func (listener *simpleLogListener) SkipMarkingConsumed(skip bool) { + listener.skipMarkingConsumed.Store(skip) +} + +func (listener *simpleLogListener) HandleLog(lb log.Broadcast) { + listener.received.Lock() + defer listener.received.Unlock() + listener.lggr.Tracef("Listener %v HandleLog for block %v %v received at %v %v", listener.name, lb.RawLog().BlockNumber, lb.RawLog().BlockHash, lb.LatestBlockNumber(), lb.LatestBlockHash()) + + listener.received.logs = append(listener.received.logs, lb.RawLog()) + listener.received.broadcasts = append(listener.received.broadcasts, lb) + consumed := listener.handleLogBroadcast(lb) + + if !consumed { + listener.received.uniqueLogs = append(listener.received.uniqueLogs, lb.RawLog()) + } else { + listener.lggr.Warnf("Listener %v: Log was already consumed!", listener.name) + } +} + +func (listener *simpleLogListener) JobID() int32 { + return listener.jobID +} + +func (listener *simpleLogListener) getUniqueLogs() []types.Log { + return listener.received.getUniqueLogs() +} + +func (listener *simpleLogListener) getUniqueLogsBlockNumbers() []uint64 { + var blockNums []uint64 + for _, uniqueLog := range listener.received.getUniqueLogs() { + blockNums = append(blockNums, uniqueLog.BlockNumber) + } + return blockNums +} + +func (listener *simpleLogListener) requireAllReceived(t *testing.T, expectedState *received) { + received := listener.received + defer func() { assert.EqualValues(t, expectedState.getUniqueLogs(), received.getUniqueLogs()) }() + require.Eventually(t, func() bool { + return len(received.getUniqueLogs()) == len(expectedState.getUniqueLogs()) + }, testutils.WaitTimeout(t), time.Second, "len(received.uniqueLogs): %v is not equal len(expectedState.uniqueLogs): %v", len(received.getUniqueLogs()), len(expectedState.getUniqueLogs())) +} + +func (listener *simpleLogListener) handleLogBroadcast(lb log.Broadcast) bool { + t := listener.t + consumed, err := listener.WasAlreadyConsumed(lb) + if !assert.NoError(t, err) { + return false + } + if !consumed && !listener.skipMarkingConsumed.Load() { + + err = listener.MarkConsumed(lb) + if assert.NoError(t, err) { + + consumed2, err := listener.WasAlreadyConsumed(lb) + if assert.NoError(t, err) { + assert.True(t, consumed2) + } + } + } + return consumed +} + +func (listener *simpleLogListener) WasAlreadyConsumed(broadcast log.Broadcast) (bool, error) { + return log.NewORM(listener.db, listener.lggr, listener.cfg, cltest.FixtureChainID).WasBroadcastConsumed(broadcast.RawLog().BlockHash, broadcast.RawLog().Index, listener.jobID) +} + +func (listener *simpleLogListener) MarkConsumed(broadcast log.Broadcast) error { + return log.NewORM(listener.db, listener.lggr, listener.cfg, cltest.FixtureChainID).MarkBroadcastConsumed(broadcast.RawLog().BlockHash, broadcast.RawLog().BlockNumber, broadcast.RawLog().Index, listener.jobID) +} + +type mockListener struct { + jobID int32 +} + +func (l *mockListener) JobID() int32 { return l.jobID } +func (l *mockListener) HandleLog(log.Broadcast) {} + +type mockEthClientExpectedCalls struct { + SubscribeFilterLogs int + HeaderByNumber int + FilterLogs int + + FilterLogsResult []types.Log +} + +func newMockEthClient(t *testing.T, chchRawLogs chan<- evmtest.RawSub[types.Log], blockHeight int64, expectedCalls mockEthClientExpectedCalls) *evmtest.MockEth { + ethClient := evmclimocks.NewClient(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + mockEth.EthClient.On("ConfiguredChainID", mock.Anything).Return(&cltest.FixtureChainID) + mockEth.EthClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). + Times(expectedCalls.SubscribeFilterLogs) + + mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: blockHeight}, nil). + Times(expectedCalls.HeaderByNumber) + + if expectedCalls.FilterLogs > 0 { + mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + filterQuery := args.Get(1).(ethereum.FilterQuery) + fromBlock := filterQuery.FromBlock.Int64() + toBlock := filterQuery.ToBlock.Int64() + if mockEth.CheckFilterLogs != nil { + mockEth.CheckFilterLogs(fromBlock, toBlock) + } + }). + Return(expectedCalls.FilterLogsResult, nil). + Times(expectedCalls.FilterLogs) + } + + return mockEth +} diff --git a/core/chains/evm/log/integration_test.go b/core/chains/evm/log/integration_test.go new file mode 100644 index 00000000..b1d8c730 --- /dev/null +++ b/core/chains/evm/log/integration_test.go @@ -0,0 +1,1634 @@ +package log_test + +import ( + "context" + "math/big" + "slices" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func TestBroadcaster_AwaitsInitialSubscribersOnStartup(t *testing.T) { + g := gomega.NewWithT(t) + + const blockHeight int64 = 123 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, nil) + helper.lb.AddDependents(2) + + var listener = helper.newLogListenerWithJob("A") + helper.register(listener, newMockContract(t), 1) + + helper.start() + defer helper.stop() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 0 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + + helper.lb.DependentReady() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 0 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + + helper.lb.DependentReady() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + + helper.unsubscribeAll() + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() == 1 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) +} + +func TestBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { + testutils.SkipShortDB(t) + const ( + numConfirmations = 1 + numContracts = 3 + blockHeight int64 = 123 + lastStoredBlockHeight = blockHeight - 25 + ) + + backfillTimes := 2 + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: backfillTimes, + HeaderByNumber: backfillTimes, + FilterLogs: backfillTimes, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight), nil) + helper.mockEth = mockEth + + blockBackfillDepth := helper.config.EVM().BlockBackfillDepth() + + var backfillCount atomic.Int64 + + // the first backfill should use the height of last head saved to the db, + // minus maxNumConfirmations of subscribers and minus blockBackfillDepth + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + backfillCount.Store(1) + require.Equal(t, lastStoredBlockHeight-numConfirmations-int64(blockBackfillDepth), fromBlock) + } + + listener := helper.newLogListenerWithJob("initial") + + helper.register(listener, newMockContract(t), numConfirmations) + + for i := 0; i < numContracts; i++ { + listener := helper.newLogListenerWithJob("") + helper.register(listener, newMockContract(t), 1) + } + + helper.start() + defer helper.stop() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, testutils.WaitTimeout(t), time.Second) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(0))) + + require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, testutils.WaitTimeout(t), 100*time.Millisecond) + helper.unsubscribeAll() + + // now the backfill must use the blockBackfillDepth + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + require.Equal(t, blockHeight-int64(blockBackfillDepth), fromBlock) + backfillCount.Store(2) + } + + listenerLast := helper.newLogListenerWithJob("last") + helper.register(listenerLast, newMockContract(t), 1) + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) + gomega.NewWithT(t).Consistently(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + + require.Eventually(t, func() bool { return backfillCount.Load() == 2 }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_BackfillOnNodeStartAndOnReplay(t *testing.T) { + testutils.SkipShortDB(t) + const ( + lastStoredBlockHeight = 100 + blockHeight int64 = 125 + replayFrom int64 = 40 + ) + + backfillTimes := 2 + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: backfillTimes, + HeaderByNumber: backfillTimes, + FilterLogs: 2, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight), nil) + helper.mockEth = mockEth + + maxNumConfirmations := int64(10) + + var backfillCount atomic.Int64 + + listener := helper.newLogListenerWithJob("one") + helper.register(listener, newMockContract(t), uint32(maxNumConfirmations)) + + listener2 := helper.newLogListenerWithJob("two") + helper.register(listener2, newMockContract(t), uint32(2)) + + blockBackfillDepth := helper.config.EVM().BlockBackfillDepth() + + // the first backfill should use the height of last head saved to the db, + // minus maxNumConfirmations of subscribers and minus blockBackfillDepth + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + times := backfillCount.Add(1) - 1 + if times == 0 { + require.Equal(t, lastStoredBlockHeight-maxNumConfirmations-int64(blockBackfillDepth), fromBlock) + } else if times == 1 { + require.Equal(t, replayFrom, fromBlock) + } + } + + func() { + helper.start() + defer helper.stop() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, testutils.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, testutils.WaitTimeout(t), time.Second) + + helper.lb.ReplayFromBlock(replayFrom, false) + + require.Eventually(t, func() bool { return backfillCount.Load() >= 2 }, testutils.WaitTimeout(t), time.Second) + }() + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_ReplaysLogs(t *testing.T) { + testutils.SkipShortDB(t) + const ( + blockHeight = 10 + ) + + blocks := cltest.NewBlocks(t, blockHeight+3) + contract, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + sentLogs := []types.Log{ + blocks.LogOnBlockNum(3, contract.Address()), + blocks.LogOnBlockNum(7, contract.Address()), + } + + mockEth := newMockEthClient(t, make(chan evmtest.RawSub[types.Log], 4), blockHeight, mockEthClientExpectedCalls{ + FilterLogs: 4, + FilterLogsResult: sentLogs, + }) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(blockHeight), nil) + helper.mockEth = mockEth + + listener := helper.newLogListenerWithJob("listener") + helper.register(listener, contract, 2) + + func() { + helper.start() + defer helper.stop() + + // To start, no logs are sent + require.Eventually(t, func() bool { return len(listener.getUniqueLogs()) == 0 }, testutils.WaitTimeout(t), time.Second, + "expected unique logs to be 0 but was %d", len(listener.getUniqueLogs())) + + // Replay from block 2, the logs should be delivered. An incoming head must be simulated to + // trigger log delivery. + helper.lb.ReplayFromBlock(2, false) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(10, 11), helper.lb) + require.Eventually(t, func() bool { return len(listener.getUniqueLogs()) == 2 }, testutils.WaitTimeout(t), time.Second, + "expected unique logs to be 2 but was %d", len(listener.getUniqueLogs())) + + // Replay again, the logs are already marked consumed, so they should not be included in + // getUniqueLogs. + helper.lb.ReplayFromBlock(2, false) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(11, 12), helper.lb) + require.Eventually(t, func() bool { return len(listener.getUniqueLogs()) == 2 }, testutils.WaitTimeout(t), time.Second, + "expected unique logs to be 2 but was %d", len(listener.getUniqueLogs())) + + // Replay again with forceBroadcast. The logs are consumed again. + helper.lb.ReplayFromBlock(2, true) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(12, 13), helper.lb) + require.Eventually(t, func() bool { return len(listener.getUniqueLogs()) == 4 }, testutils.WaitTimeout(t), time.Second, + "expected unique logs to be 4 but was %d", len(listener.getUniqueLogs())) + + }() + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_BackfillUnconsumedAfterCrash(t *testing.T) { + contract1 := newMockContract(t) + contract2 := newMockContract(t) + + blocks := cltest.NewBlocks(t, 10) + const ( + log1Block = 1 + log2Block = 4 + + confs = 2 + ) + log1 := blocks.LogOnBlockNum(log1Block, contract1.Address()) + log2 := blocks.LogOnBlockNum(log2Block, contract2.Address()) + logs := []types.Log{log1, log2} + + contract1.On("ParseLog", log1).Return(flux_aggregator_wrapper.FluxAggregatorNewRound{}, nil) + contract2.On("ParseLog", log2).Return(flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}, nil) + t.Run("pool two logs from subscription, then shut down", func(t *testing.T) { + helper := newBroadcasterHelper(t, 0, 1, logs, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](confs) + }) + lggr := logger.Test(t) + orm := log.NewORM(helper.db, lggr, helper.config.Database(), cltest.FixtureChainID) + + listener := helper.newLogListenerWithJob("one") + listener.SkipMarkingConsumed(true) + listener2 := helper.newLogListenerWithJob("two") + listener2.SkipMarkingConsumed(true) + expBlock := int64(log1.BlockNumber) + helper.simulateHeads(t, listener, listener2, contract1, contract2, confs, blocks.Slice(0, 2), orm, &expBlock, func() { + chRawLogs := <-helper.chchRawLogs + chRawLogs.TrySend(log1) + chRawLogs.TrySend(log2) + }) + // Pool min block in DB and neither listener received a broadcast + blockNum, err := orm.GetPendingMinBlock() + require.NoError(t, err) + require.NotNil(t, blockNum) + require.Equal(t, int64(log1.BlockNumber), *blockNum) + require.Empty(t, listener.getUniqueLogs()) + require.Empty(t, listener2.getUniqueLogs()) + helper.requireBroadcastCount(0) + }) + t.Run("backfill pool with both, then broadcast one, but don't consume", func(t *testing.T) { + helper := newBroadcasterHelper(t, 2, 1, logs, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](confs) + }) + lggr := logger.Test(t) + orm := log.NewORM(helper.db, lggr, helper.config.Database(), cltest.FixtureChainID) + + listener := helper.newLogListenerWithJob("one") + listener.SkipMarkingConsumed(true) + listener2 := helper.newLogListenerWithJob("two") + listener2.SkipMarkingConsumed(true) + expBlock := int64(log2.BlockNumber) + helper.simulateHeads(t, listener, listener2, contract1, contract2, confs, blocks.Slice(2, 5), orm, &expBlock, nil) + + // Pool min block in DB and one listener received but didn't consume + blockNum, err := orm.GetPendingMinBlock() + require.NoError(t, err) + require.NotNil(t, blockNum) + require.Equal(t, int64(log2.BlockNumber), *blockNum) + require.NotEmpty(t, listener.getUniqueLogs()) + require.Empty(t, listener2.getUniqueLogs()) + c, err := orm.WasBroadcastConsumed(log1.BlockHash, log1.Index, listener.JobID()) + require.NoError(t, err) + require.False(t, c) + }) + t.Run("backfill pool and broadcast two, but only consume one", func(t *testing.T) { + helper := newBroadcasterHelper(t, 4, 1, logs, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](confs) + }) + lggr := logger.Test(t) + orm := log.NewORM(helper.db, lggr, helper.config.Database(), cltest.FixtureChainID) + + listener := helper.newLogListenerWithJob("one") + listener2 := helper.newLogListenerWithJob("two") + listener2.SkipMarkingConsumed(true) + helper.simulateHeads(t, listener, listener2, contract1, contract2, confs, blocks.Slice(5, 8), orm, nil, nil) + + // Pool empty and one consumed but other didn't + blockNum, err := orm.GetPendingMinBlock() + require.NoError(t, err) + require.Nil(t, blockNum) + require.NotEmpty(t, listener.getUniqueLogs()) + require.NotEmpty(t, listener2.getUniqueLogs()) + c, err := orm.WasBroadcastConsumed(log1.BlockHash, log1.Index, listener.JobID()) + require.NoError(t, err) + require.True(t, c) + c, err = orm.WasBroadcastConsumed(log2.BlockHash, log2.Index, listener2.JobID()) + require.NoError(t, err) + require.False(t, c) + }) + t.Run("backfill pool, broadcast and consume one", func(t *testing.T) { + helper := newBroadcasterHelper(t, 7, 1, logs[1:], func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](confs) + }) + lggr := logger.Test(t) + orm := log.NewORM(helper.db, lggr, helper.config.Database(), cltest.FixtureChainID) + listener := helper.newLogListenerWithJob("one") + listener2 := helper.newLogListenerWithJob("two") + helper.simulateHeads(t, listener, listener2, contract1, contract2, confs, blocks.Slice(8, 9), orm, nil, nil) + + // Pool empty, one broadcasted and consumed + blockNum, err := orm.GetPendingMinBlock() + require.NoError(t, err) + require.Nil(t, blockNum) + require.Empty(t, listener.getUniqueLogs()) + require.NotEmpty(t, listener2.getUniqueLogs()) + c, err := orm.WasBroadcastConsumed(log2.BlockHash, log2.Index, listener2.JobID()) + require.NoError(t, err) + require.True(t, c) + }) +} + +func (helper *broadcasterHelper) simulateHeads(t *testing.T, listener, listener2 *simpleLogListener, + contract1, contract2 *logmocks.AbigenContract, confs uint32, heads []*evmtypes.Head, orm log.ORM, assertBlock *int64, do func()) { + helper.lb.AddDependents(2) + helper.start() + defer helper.stop() + helper.register(listener, contract1, confs) + helper.register(listener2, contract2, confs) + helper.lb.DependentReady() + helper.lb.DependentReady() + + headsDone := cltest.SimulateIncomingHeads(t, heads, helper.lb) + + if do != nil { + do() + } + + <-headsDone + + require.Eventually(t, func() bool { + blockNum, err := orm.GetPendingMinBlock() + if !assert.NoError(t, err) { + return false + } + if assertBlock == nil { + return blockNum == nil + } else if blockNum == nil { + return false + } + return *assertBlock == *blockNum + }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_ShallowBackfillOnNodeStart(t *testing.T) { + testutils.SkipShortDB(t) + const ( + lastStoredBlockHeight = 100 + blockHeight int64 = 125 + backfillDepth = 15 + ) + + backfillTimes := 1 + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: backfillTimes, + HeaderByNumber: backfillTimes, + FilterLogs: backfillTimes, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].BlockBackfillSkip = ptr(true) + c.EVM[0].BlockBackfillDepth = ptr[uint32](15) + }) + helper.mockEth = mockEth + + var backfillCount atomic.Int64 + + listener := helper.newLogListenerWithJob("one") + helper.register(listener, newMockContract(t), uint32(10)) + + listener2 := helper.newLogListenerWithJob("two") + helper.register(listener2, newMockContract(t), uint32(2)) + + // the backfill does not use the height from DB because BlockBackfillSkip is true + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + backfillCount.Store(1) + require.Equal(t, blockHeight-int64(backfillDepth), fromBlock) + } + + func() { + helper.start() + defer helper.stop() + + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, testutils.WaitTimeout(t), time.Second) + require.Eventually(t, func() bool { return backfillCount.Load() == 1 }, testutils.WaitTimeout(t), time.Second) + }() + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_BackfillInBatches(t *testing.T) { + testutils.SkipShortDB(t) + const ( + numConfirmations = 1 + blockHeight int64 = 120 + lastStoredBlockHeight = blockHeight - 29 + backfillTimes = 1 + batchSize int64 = 5 + expectedBatches = 9 + ) + + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: backfillTimes, + HeaderByNumber: backfillTimes, + FilterLogs: expectedBatches, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].LogBackfillBatchSize = ptr(uint32(batchSize)) + }) + helper.mockEth = mockEth + + blockBackfillDepth := helper.config.EVM().BlockBackfillDepth() + + var backfillCount atomic.Int64 + + lggr := logger.Test(t) + backfillStart := lastStoredBlockHeight - numConfirmations - int64(blockBackfillDepth) + // the first backfill should start from before the last stored head + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + times := backfillCount.Add(1) - 1 + lggr.Infof("Log Batch: --------- times %v - %v, %v", times, fromBlock, toBlock) + + if times <= 7 { + require.Equal(t, backfillStart+batchSize*times, fromBlock) + require.Equal(t, backfillStart+batchSize*(times+1)-1, toBlock) + } else { + // last batch is for a range of 1 + require.Equal(t, int64(120), fromBlock) + require.Equal(t, int64(120), toBlock) + } + } + + listener := helper.newLogListenerWithJob("initial") + helper.register(listener, newMockContract(t), numConfirmations) + helper.start() + + defer helper.stop() + + require.Eventually(t, func() bool { return backfillCount.Load() == expectedBatches }, testutils.WaitTimeout(t), time.Second) + + helper.unsubscribeAll() + + require.Eventually(t, func() bool { return helper.mockEth.UnsubscribeCallCount() >= 1 }, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_BackfillALargeNumberOfLogs(t *testing.T) { + testutils.SkipShortDB(t) + g := gomega.NewWithT(t) + const ( + lastStoredBlockHeight int64 = 10 + + // a large number of blocks since lastStoredBlockHeight + blockHeight int64 = 3000 + + backfillTimes = 1 + batchSize uint32 = 50 + expectedBatches = 61 + ) + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 7) + backfilledLogs := make([]types.Log, 0) + for i := 0; i < 50; i++ { + aLog := blocks.LogOnBlockNum(0, contract1.Address()) + backfilledLogs = append(backfilledLogs, aLog) + } + + expectedCalls := mockEthClientExpectedCalls{ + SubscribeFilterLogs: backfillTimes, + HeaderByNumber: backfillTimes, + FilterLogs: expectedBatches, + + FilterLogsResult: backfilledLogs, + } + + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + mockEth := newMockEthClient(t, chchRawLogs, blockHeight, expectedCalls) + helper := newBroadcasterHelperWithEthClient(t, mockEth.EthClient, cltest.Head(lastStoredBlockHeight), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].LogBackfillBatchSize = ptr(batchSize) + }) + helper.mockEth = mockEth + + var backfillCount atomic.Int64 + + lggr := logger.Test(t) + mockEth.CheckFilterLogs = func(fromBlock int64, toBlock int64) { + times := backfillCount.Add(1) - 1 + lggr.Warnf("Log Batch: --------- times %v - %v, %v", times, fromBlock, toBlock) + } + + listener := helper.newLogListenerWithJob("initial") + helper.register(listener, newMockContract(t), 1) + helper.start() + defer helper.stop() + g.Eventually(func() int64 { return backfillCount.Load() }, testutils.WaitTimeout(t), time.Second).Should(gomega.Equal(int64(expectedBatches))) + + helper.unsubscribeAll() + g.Eventually(func() int32 { return helper.mockEth.UnsubscribeCallCount() }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeNumerically(">=", int32(1))) +} + +func TestBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, nil) + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + contract2, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 10) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + addr2SentLogs := []types.Log{ + blocks.LogOnBlockNum(4, contract2.Address()), + blocks.LogOnBlockNum(5, contract2.Address()), + blocks.LogOnBlockNum(6, contract2.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + listener2 := helper.newLogListenerWithJob("listener 2") + listener3 := helper.newLogListenerWithJob("listener 3") + listener4 := helper.newLogListenerWithJob("listener 4") + + helper.register(listener1, contract1, 1) + helper.register(listener2, contract1, 1) + helper.register(listener3, contract2, 1) + helper.register(listener4, contract2, 1) + + func() { + helper.start() + defer helper.stop() + + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(0, 10), helper.lb) + + defer helper.unsubscribeAll() + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + for _, log := range addr2SentLogs { + chRawLogs.TrySend(log) + } + + <-headsDone + helper.requireBroadcastCount(12) + + requireEqualLogs(t, addr1SentLogs, listener1.received.getUniqueLogs()) + requireEqualLogs(t, addr1SentLogs, listener2.received.getUniqueLogs()) + + requireEqualLogs(t, addr2SentLogs, listener3.received.getUniqueLogs()) + requireEqualLogs(t, addr2SentLogs, listener4.received.getUniqueLogs()) + }() +} + +func TestBroadcaster_BroadcastsAtCorrectHeights(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, nil) + helper.start() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 10) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + listener2 := helper.newLogListenerWithJob("listener 2") + + helper.register(listener1, contract1, 1) + helper.register(listener2, contract1, 8) + + _ = cltest.SimulateIncomingHeads(t, blocks.Slice(0, 10), helper.lb) + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + helper.requireBroadcastCount(5) + helper.stop() + + require.Equal(t, []uint64{1, 2, 3}, listener1.getUniqueLogsBlockNumbers()) + require.Equal(t, []uint64{1, 2}, listener2.getUniqueLogsBlockNumbers()) + + requireEqualLogs(t, + addr1SentLogs, + listener1.received.getUniqueLogs(), + ) + requireEqualLogs(t, + []types.Log{ + addr1SentLogs[0], + addr1SentLogs[1], + }, + listener2.received.getUniqueLogs(), + ) + + // unique sends should be equal to sends overall + requireEqualLogs(t, + listener1.received.getUniqueLogs(), + listener1.received.getLogs(), + ) + requireEqualLogs(t, + listener2.received.getUniqueLogs(), + listener2.received.getLogs(), + ) + + // the logs should have been received at much later heights + logsOnBlocks := listener2.received.logsOnBlocks() + expectedLogsOnBlocks := []logOnBlock{ + { + logBlockNumber: 1, + blockNumber: 8, + blockHash: blocks.Hashes[8], + }, + { + logBlockNumber: 2, + blockNumber: 9, + blockHash: blocks.Hashes[9], + }, + } + + assert.Equal(t, len(logsOnBlocks), len(expectedLogsOnBlocks)) + require.Equal(t, logsOnBlocks, expectedLogsOnBlocks) +} + +func TestBroadcaster_DeletesOldLogsAfterNumberOfHeads(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](1) + }) + helper.start() + defer helper.stop() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 20) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + listener2 := helper.newLogListenerWithJob("listener 2") + listener3 := helper.newLogListenerWithJob("listener 3") + listener4 := helper.newLogListenerWithJob("listener 4") + + helper.register(listener1, contract1, 1) + helper.register(listener2, contract1, 3) + + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(0, 6), helper.lb) + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + helper.requireBroadcastCount(6) + <-headsDone + + helper.register(listener3, contract1, 1) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(6, 9), helper.lb) + + // the new listener should still receive 2 of the 3 logs + helper.requireBroadcastCount(8) + require.Equal(t, 2, len(listener3.received.getUniqueLogs())) + + helper.register(listener4, contract1, 1) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(9, 12), helper.lb) + + // but this one should receive none + require.Equal(t, 0, len(listener4.received.getUniqueLogs())) +} + +func TestBroadcaster_DeletesOldLogsOnlyAfterFinalityDepth(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](4) + }) + helper.start() + defer helper.stop() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 20) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + listener2 := helper.newLogListenerWithJob("listener 2") + listener3 := helper.newLogListenerWithJob("listener 3") + listener4 := helper.newLogListenerWithJob("listener 4") + + helper.register(listener1, contract1, 1) + helper.register(listener2, contract1, 3) + + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(0, 6), helper.lb) + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + <-headsDone + helper.requireBroadcastCount(6) + + helper.register(listener3, contract1, 1) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(7, 9), helper.lb) + + // the new listener should still receive 3 logs because of finality depth being higher than max NumConfirmations + helper.requireBroadcastCount(9) + require.Equal(t, 3, len(listener3.received.getUniqueLogs())) + + helper.register(listener4, contract1, 1) + <-cltest.SimulateIncomingHeads(t, blocks.Slice(10, 12), helper.lb) + + // but this one should receive none + require.Equal(t, 0, len(listener4.received.getUniqueLogs())) +} + +func TestBroadcaster_FilterByTopicValues(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](3) + }) + helper.start() + defer helper.stop() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 20) + + topic := (flux_aggregator_wrapper.FluxAggregatorNewRound{}).Topic() + field1Value1 := utils.NewHash() + field1Value2 := utils.NewHash() + field2Value1 := utils.NewHash() + field2Value2 := utils.NewHash() + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNumWithTopics(1, 0, contract1.Address(), []common.Hash{topic, field1Value1, field2Value1}), + blocks.LogOnBlockNumWithTopics(1, 1, contract1.Address(), []common.Hash{topic, field1Value2, field2Value2}), + blocks.LogOnBlockNumWithTopics(2, 0, contract1.Address(), []common.Hash{topic, utils.NewHash(), field2Value2}), + blocks.LogOnBlockNumWithTopics(2, 1, contract1.Address(), []common.Hash{topic, field1Value2, utils.NewHash()}), + } + + listener0 := helper.newLogListenerWithJob("listener 0") + listener1 := helper.newLogListenerWithJob("listener 1") + listener2 := helper.newLogListenerWithJob("listener 2") + listener3 := helper.newLogListenerWithJob("listener 3") + listener4 := helper.newLogListenerWithJob("listener 4") + + helper.registerWithTopicValues(listener0, contract1, 1, + map[common.Hash][][]log.Topic{ + topic: {}, // no filters, so all values allowed + }, + ) + helper.registerWithTopicValues(listener1, contract1, 1, + map[common.Hash][][]log.Topic{ + topic: {{} /**/, {}}, // two empty filters, so all values allowed + }, + ) + helper.registerWithTopicValues(listener2, contract1, 1, + map[common.Hash][][]log.Topic{ + topic: { + {log.Topic(field1Value1), log.Topic(field1Value2)} /**/, {log.Topic(field2Value1), log.Topic(field2Value2)}, // two values for each field allowed + }, + }, + ) + helper.registerWithTopicValues(listener3, contract1, 1, + map[common.Hash][][]log.Topic{ + topic: { + {log.Topic(field1Value1), log.Topic(field1Value2)} /**/, {}, // two values allowed for field 1, and any values for field 2 + }, + }, + ) + helper.registerWithTopicValues(listener4, contract1, 1, + map[common.Hash][][]log.Topic{ + topic: { + {log.Topic(field1Value1)} /**/, {log.Topic(field2Value1)}, // some values allowed + }, + }, + ) + + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(0, 6), helper.lb) + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + <-headsDone + + require.Eventually(t, func() bool { return len(listener0.received.getUniqueLogs()) == 4 }, testutils.WaitTimeout(t), 500*time.Millisecond) + require.Eventually(t, func() bool { return len(listener1.received.getUniqueLogs()) == 4 }, testutils.WaitTimeout(t), 500*time.Millisecond) + require.Eventually(t, func() bool { return len(listener2.received.getUniqueLogs()) == 2 }, testutils.WaitTimeout(t), 500*time.Millisecond) + require.Eventually(t, func() bool { return len(listener3.received.getUniqueLogs()) == 3 }, testutils.WaitTimeout(t), 500*time.Millisecond) + require.Eventually(t, func() bool { return len(listener4.received.getUniqueLogs()) == 1 }, testutils.WaitTimeout(t), 500*time.Millisecond) +} + +func TestBroadcaster_BroadcastsWithOneDelayedLog(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](2) + }) + helper.start() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 12) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + + // this log will arrive after head with block number 3 and a previous log for it were already processed + blocks.LogOnBlockNumWithIndex(3, 1, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + helper.register(listener1, contract1, 1) + + chRawLogs := <-helper.chchRawLogs + + chRawLogs.TrySend(addr1SentLogs[0]) + chRawLogs.TrySend(addr1SentLogs[1]) + chRawLogs.TrySend(addr1SentLogs[2]) + + <-cltest.SimulateIncomingHeads(t, blocks.Slice(0, 4), helper.lb) + + chRawLogs.TrySend(addr1SentLogs[3]) + + <-cltest.SimulateIncomingHeads(t, blocks.Slice(4, 9), helper.lb) + + helper.requireBroadcastCount(4) + helper.stop() +} + +func TestBroadcaster_BroadcastsAtCorrectHeightsWithLogsEarlierThanHeads(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, nil) + helper.start() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 10) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + helper.register(listener1, contract1, 1) + + chRawLogs := <-helper.chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + <-cltest.SimulateIncomingHeads(t, blocks.Slice(0, 10), helper.lb) + + helper.requireBroadcastCount(3) + helper.stop() + + requireEqualLogs(t, + addr1SentLogs, + listener1.received.getUniqueLogs(), + ) + + // unique sends should be equal to sends overall + requireEqualLogs(t, + listener1.received.getUniqueLogs(), + listener1.received.getLogs(), + ) +} + +func TestBroadcaster_BroadcastsAtCorrectHeightsWithHeadsEarlierThanLogs(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](2) + }) + helper.start() + + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(testutils.NewAddress(), nil) + require.NoError(t, err) + + blocks := cltest.NewBlocks(t, 12) + addr1SentLogs := []types.Log{ + blocks.LogOnBlockNum(1, contract1.Address()), + blocks.LogOnBlockNum(2, contract1.Address()), + blocks.LogOnBlockNum(3, contract1.Address()), + } + + listener1 := helper.newLogListenerWithJob("listener 1") + helper.register(listener1, contract1, 1) + + chRawLogs := <-helper.chchRawLogs + + <-cltest.SimulateIncomingHeads(t, blocks.Slice(0, 7), helper.lb) + + for _, log := range addr1SentLogs { + chRawLogs.TrySend(log) + } + + <-cltest.SimulateIncomingHeads(t, blocks.Slice(7, 9), helper.lb) + + helper.requireBroadcastCount(3) + helper.stop() + + requireEqualLogs(t, + addr1SentLogs, + listener1.received.getUniqueLogs(), + ) + + // unique sends should be equal to sends overall + requireEqualLogs(t, + listener1.received.getUniqueLogs(), + listener1.received.getLogs(), + ) +} + +func TestBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) { + testutils.SkipShortDB(t) + const ( + backfillTimes = 1 + blockHeight = 15 + expectedBlock = 5 + ) + var ( + ethClient = evmclimocks.NewClient(t) + contract0 = newMockContract(t) + contract1 = newMockContract(t) + contract2 = newMockContract(t) + ) + mockEth := &evmtest.MockEth{EthClient: ethClient} + chchRawLogs := make(chan evmtest.RawSub[types.Log], backfillTimes) + chStarted := make(chan struct{}) + ethClient.On("ConfiguredChainID", mock.Anything).Return(&cltest.FixtureChainID) + ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + defer close(chStarted) + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). + Once() + + ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchRawLogs <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). + Times(3) + + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: blockHeight}, nil) + + ethClient.On("FilterLogs", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + require.Equal(t, big.NewInt(expectedBlock), query.FromBlock) + require.Contains(t, query.Addresses, contract0.Address()) + require.Len(t, query.Addresses, 1) + }). + Return(nil, nil). + Times(backfillTimes) + + ethClient.On("FilterLogs", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + require.Equal(t, big.NewInt(expectedBlock), query.FromBlock) + require.Contains(t, query.Addresses, contract0.Address()) + require.Contains(t, query.Addresses, contract1.Address()) + require.Len(t, query.Addresses, 2) + }). + Return(nil, nil). + Once() + + ethClient.On("FilterLogs", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + require.Equal(t, big.NewInt(expectedBlock), query.FromBlock) + require.Contains(t, query.Addresses, contract0.Address()) + require.Contains(t, query.Addresses, contract1.Address()) + require.Contains(t, query.Addresses, contract2.Address()) + require.Len(t, query.Addresses, 3) + }). + Return(nil, nil). + Once() + + helper := newBroadcasterHelperWithEthClient(t, ethClient, nil, nil) + helper.lb.AddDependents(1) + helper.start() + defer helper.stop() + + listener0 := helper.newLogListenerWithJob("0") + listener1 := helper.newLogListenerWithJob("1") + listener2 := helper.newLogListenerWithJob("2") + + // Subscribe #0 + helper.register(listener0, contract0, 1) + defer helper.unsubscribeAll() + helper.lb.DependentReady() + + // Await startup + select { + case <-chStarted: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("never started") + } + + select { + case <-chchRawLogs: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("did not subscribe") + } + + // Subscribe #1 + helper.register(listener1, contract1, 1) + + select { + case <-chchRawLogs: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("did not subscribe") + } + + // Subscribe #2 + helper.register(listener2, contract2, 1) + + select { + case <-chchRawLogs: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("did not subscribe") + } + + // ReplayFrom will not lead to backfill because the number is above current height + helper.lb.ReplayFromBlock(125, false) + + select { + case <-chchRawLogs: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("did not subscribe") + } + + cltest.EventuallyExpectationsMet(t, ethClient, testutils.WaitTimeout(t), time.Second) +} + +func TestBroadcaster_ReceivesAllLogsWhenResubscribing(t *testing.T) { + addrA := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") + addrB := common.HexToAddress("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + + blocks := cltest.NewBlocks(t, 20) + + logsA := make(map[uint]types.Log) + logsB := make(map[uint]types.Log) + for n := 1; n < 18; n++ { + logsA[uint(n)] = blocks.LogOnBlockNumWithIndex(uint64(n), 0, addrA) + logsB[uint(n)] = blocks.LogOnBlockNumWithIndex(uint64(n), 1, addrB) + } + + tests := []struct { + name string + blockHeight1 int + blockHeight2 int + batch1 []uint + backfillableLogs []uint + batch2 []uint + expectedFilteredA []uint + expectedFilteredB []uint + }{ + { + name: "no backfilled logs, no overlap", + + blockHeight1: 0, + batch1: []uint{1, 2}, + + blockHeight2: 3, + backfillableLogs: nil, + batch2: []uint{7, 8}, + + expectedFilteredA: []uint{1, 2, 7, 8}, + expectedFilteredB: []uint{7, 8}, + }, + { + name: "no backfilled logs, overlap", + + blockHeight1: 0, + batch1: []uint{1, 2}, + + blockHeight2: 2, + backfillableLogs: nil, + batch2: []uint{2, 3}, + + expectedFilteredA: []uint{1, 2, 3}, + expectedFilteredB: []uint{2, 3}, + }, + { + name: "backfilled logs, no overlap", + + blockHeight1: 0, + batch1: []uint{1, 2}, + + blockHeight2: 15, + backfillableLogs: []uint{11, 12, 15}, + batch2: []uint{16, 17}, + + expectedFilteredA: []uint{1, 2, 11, 12, 15, 16, 17}, + expectedFilteredB: []uint{11, 12, 15, 16, 17}, + }, + { + name: "backfilled logs, overlap", + + blockHeight1: 0, + batch1: []uint{1, 11}, + + blockHeight2: 15, + backfillableLogs: []uint{11, 12, 15}, + batch2: []uint{16, 17}, + + expectedFilteredA: []uint{1, 11, 12, 15, 16, 17}, + expectedFilteredB: []uint{11, 12, 15, 16, 17}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + const backfillDepth = 5 + helper := newBroadcasterHelper(t, int64(test.blockHeight1), 2, nil, func(c *plugin.Config, s *plugin.Secrets) { + // something other than default + c.EVM[0].BlockBackfillDepth = ptr[uint32](backfillDepth) + }) + + helper.start() + defer helper.stop() + + logListenerA := helper.newLogListenerWithJob("logListenerA") + logListenerB := helper.newLogListenerWithJob("logListenerB") + + contractA, err := flux_aggregator_wrapper.NewFluxAggregator(addrA, nil) + require.NoError(t, err) + contractB, err := flux_aggregator_wrapper.NewFluxAggregator(addrB, nil) + require.NoError(t, err) + + // Register listener A + helper.register(logListenerA, contractA, 1) + + // Send initial logs + chRawLogs1 := <-helper.chchRawLogs + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(test.blockHeight1, test.blockHeight2+2), + helper.lb, cltest.HeadTrackableFunc(func(_ context.Context, head *evmtypes.Head) { + n := uint(head.Number) + if l, ok := logsA[n]; ok && slices.Contains(test.batch1, n) { + chRawLogs1.TrySend(l) + } + })) + + helper.requireBroadcastCount(len(test.batch1)) + expectedA := newReceived(pickLogs(logsA, test.batch1)) + logListenerA.requireAllReceived(t, expectedA) + + <-headsDone + helper.mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: int64(test.blockHeight2)}, nil).Once() + + combinedLogs := append(pickLogs(logsA, test.backfillableLogs), pickLogs(logsB, test.backfillableLogs)...) + call := helper.mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything).Return(combinedLogs, nil).Once() + call.Run(func(args mock.Arguments) { + // Validate that the ethereum.FilterQuery is specified correctly for the backfill that we expect + fromBlock := args.Get(1).(ethereum.FilterQuery).FromBlock + expected := big.NewInt(0) + + blockNumber := helper.lb.BackfillBlockNumber() + if blockNumber.Valid && blockNumber.Int64 > int64(test.blockHeight2-backfillDepth) { + expected = big.NewInt(blockNumber.Int64) + } else if test.blockHeight2 > backfillDepth { + expected = big.NewInt(int64(test.blockHeight2) - backfillDepth) + } + require.Equal(t, expected, fromBlock) + }) + + // Register listener B (triggers re-subscription) + helper.register(logListenerB, contractB, 1) + + // Send second batch of new logs + chRawLogs2 := <-helper.chchRawLogs + headsDone = cltest.SimulateIncomingHeads(t, blocks.Slice(test.blockHeight2, -1), + helper.lb, cltest.HeadTrackableFunc(func(_ context.Context, head *evmtypes.Head) { + n := uint(head.Number) + if l, ok := logsA[n]; ok && slices.Contains(test.batch2, n) { + chRawLogs2.TrySend(l) + } + if l, ok := logsB[n]; ok && slices.Contains(test.batch2, n) { + chRawLogs2.TrySend(l) + } + })) + + defer func() { <-headsDone }() + + expectedA = newReceived(pickLogs(logsA, test.expectedFilteredA)) + expectedB := newReceived(pickLogs(logsB, test.expectedFilteredB)) + logListenerA.requireAllReceived(t, expectedA) + logListenerB.requireAllReceived(t, expectedB) + helper.requireBroadcastCount(len(test.expectedFilteredA) + len(test.expectedFilteredB)) + }) + } +} + +func TestBroadcaster_AppendLogChannel(t *testing.T) { + logs1 := []types.Log{ + {BlockNumber: 1}, + {BlockNumber: 2}, + {BlockNumber: 3}, + {BlockNumber: 4}, + {BlockNumber: 5}, + } + + logs2 := []types.Log{ + {BlockNumber: 6}, + {BlockNumber: 7}, + {BlockNumber: 8}, + {BlockNumber: 9}, + {BlockNumber: 10}, + } + + logs3 := []types.Log{ + {BlockNumber: 11}, + {BlockNumber: 12}, + {BlockNumber: 13}, + {BlockNumber: 14}, + {BlockNumber: 15}, + } + + ch1 := make(chan types.Log) + ch2 := make(chan types.Log) + ch3 := make(chan types.Log) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + mailMon := servicetest.RunHealthy(t, mailboxtest.NewMonitor(t)) + lb := log.NewBroadcaster(nil, ethClient, nil, logger.Test(t), nil, mailMon) + chCombined := lb.ExportedAppendLogChannel(ch1, ch2) + chCombined = lb.ExportedAppendLogChannel(chCombined, ch3) + + go func() { + defer close(ch1) + for _, log := range logs1 { + ch1 <- log + } + }() + go func() { + defer close(ch2) + for _, log := range logs2 { + ch2 <- log + } + }() + go func() { + defer close(ch3) + for _, log := range logs3 { + ch3 <- log + } + }() + + expected := append(logs1, logs2...) + expected = append(expected, logs3...) + + var i int + for log := range chCombined { + require.Equal(t, expected[i], log) + i++ + } +} + +func TestBroadcaster_InjectsBroadcastRecordFunctions(t *testing.T) { + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 1, nil, nil) + helper.start() + defer helper.stop() + + blocks := cltest.NewBlocks(t, 20) + + logListener := helper.newLogListenerWithJob("logListener") + + contract := newMockContract(t) + log1, log2 := blocks.LogOnBlockNum(0, contract.Address()), blocks.LogOnBlockNum(1, contract.Address()) + contract.On("ParseLog", log1).Return(flux_aggregator_wrapper.FluxAggregatorNewRound{}, nil) + contract.On("ParseLog", log2).Return(flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}, nil) + + helper.register(logListener, contract, uint32(5)) + + headsDone := cltest.SimulateIncomingHeads(t, blocks.Slice(3, 20), helper.lb) + + chRawLogs := <-helper.chchRawLogs + + chRawLogs.TrySend(log1) + chRawLogs.TrySend(log2) + + <-headsDone + require.Eventually(t, func() bool { return len(logListener.received.getUniqueLogs()) >= 2 }, testutils.WaitTimeout(t), time.Second) + helper.requireBroadcastCount(2) +} + +func TestBroadcaster_ProcessesLogsFromReorgsAndMissedHead(t *testing.T) { + g := gomega.NewWithT(t) + + const startBlockHeight int64 = 0 + helper := newBroadcasterHelper(t, startBlockHeight, 1, nil, nil) + helper.start() + defer helper.stop() + + blocks := cltest.NewBlocks(t, 10) + blocksForked := blocks.ForkAt(t, 1, 5) + + var ( + addr = testutils.NewAddress() + + log0 = blocks.LogOnBlockNum(0, addr) + log1 = blocks.LogOnBlockNum(1, addr) + log2 = blocks.LogOnBlockNum(2, addr) + log1Removed = blocks.LogOnBlockNumRemoved(1, addr) + log2Removed = blocks.LogOnBlockNumRemoved(2, addr) + log1R = blocksForked.LogOnBlockNum(1, addr) + log2R = blocksForked.LogOnBlockNum(2, addr) + log3R1 = blocksForked.LogOnBlockNumWithIndex(3, 0, addr) + log3R2 = blocksForked.LogOnBlockNumWithIndex(3, 1, addr) // second log on the same block + + log1RRemoved = blocksForked.LogOnBlockNumRemoved(1, addr) + log2RRemoved = blocksForked.LogOnBlockNumRemoved(2, addr) + log3R1Removed = blocksForked.LogOnBlockNumWithIndexRemoved(3, 0, addr) + log3R2Removed = blocksForked.LogOnBlockNumWithIndexRemoved(3, 1, addr) + + events = []interface{}{ + blocks.Head(0), log0, + log1, // head1 missing + blocks.Head(2), log2, + blocks.Head(3), + blocksForked.Head(1), log1Removed, log2Removed, log1R, + blocksForked.Head(2), log2R, + log3R1, blocksForked.Head(3), log3R2, + blocksForked.Head(4), + log1RRemoved, log0, log1, blocks.Head(4), log2, log2RRemoved, log3R1Removed, log3R2Removed, // a reorg back to the previous chain + blocks.Head(5), + blocks.Head(6), + blocks.Head(7), + } + + expectedA = []types.Log{log0, log1, log2, log1R, log2R, log3R1, log3R2} + + // listenerB needs 3 confirmations, so log2 is not sent to after the first reorg, + // but is later - after the second reorg (back to the previous chain) + expectedB = []types.Log{log0, log1, log1R, log2R, log2} + ) + + contract, err := flux_aggregator_wrapper.NewFluxAggregator(addr, nil) + require.NoError(t, err) + + listenerA := helper.newLogListenerWithJob("listenerA") + listenerB := helper.newLogListenerWithJob("listenerB") + helper.register(listenerA, contract, 1) + helper.register(listenerB, contract, 3) + + chRawLogs := <-helper.chchRawLogs + + ctx := testutils.Context(t) + for _, event := range events { + switch x := event.(type) { + case *evmtypes.Head: + helper.lb.OnNewLongestChain(ctx, x) + case types.Log: + chRawLogs.TrySend(x) + } + time.Sleep(250 * time.Millisecond) + } + + g.Eventually(func() []uint64 { return listenerA.getUniqueLogsBlockNumbers() }, testutils.WaitTimeout(t), time.Second). + Should(gomega.Equal([]uint64{0, 1, 2, 1, 2, 3, 3})) + g.Eventually(func() []uint64 { return listenerB.getUniqueLogsBlockNumbers() }, testutils.WaitTimeout(t), time.Second). + Should(gomega.Equal([]uint64{0, 1, 1, 2, 2})) + + helper.unsubscribeAll() + + require.Equal(t, expectedA, listenerA.getUniqueLogs()) + require.Equal(t, expectedB, listenerB.getUniqueLogs()) +} + +func TestBroadcaster_BackfillsForNewListeners(t *testing.T) { + g := gomega.NewWithT(t) + + const blockHeight int64 = 0 + helper := newBroadcasterHelper(t, blockHeight, 2, nil, nil) + helper.mockEth.EthClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(&evmtypes.Head{Number: blockHeight}, nil).Times(1) + helper.mockEth.EthClient.On("FilterLogs", mock.Anything, mock.Anything).Return(nil, nil).Times(1) + + helper.start() + defer helper.stop() + + addr1 := testutils.NewAddress() + contract, err := flux_aggregator_wrapper.NewFluxAggregator(addr1, nil) + require.NoError(t, err) + + listener1 := helper.newLogListenerWithJob("1") + listener2 := helper.newLogListenerWithJob("2") + + topics1 := []generated.AbigenLog{ + flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}, + } + helper.registerWithTopics(listener1, contract, topics1, 1) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 1 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(1))) + + <-helper.chchRawLogs + + topics2 := []generated.AbigenLog{ + flux_aggregator_wrapper.FluxAggregatorNewRound{}, + } + helper.registerWithTopics(listener2, contract, topics2, 1) + require.Eventually(t, func() bool { return helper.mockEth.SubscribeCallCount() == 2 }, testutils.WaitTimeout(t), 100*time.Millisecond) + g.Consistently(func() int32 { return helper.mockEth.SubscribeCallCount() }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(int32(2))) + + helper.unsubscribeAll() +} + +func pickLogs(allLogs map[uint]types.Log, indices []uint) []types.Log { + var picked []types.Log + for _, idx := range indices { + picked = append(picked, allLogs[idx]) + } + return picked +} + +func requireEqualLogs(t *testing.T, expectedLogs, actualLogs []types.Log) { + t.Helper() + require.Equalf(t, len(expectedLogs), len(actualLogs), "log slices are not equal (len %v vs %v): expected(%v), actual(%v)", len(expectedLogs), len(actualLogs), expectedLogs, actualLogs) + for i := range expectedLogs { + require.Equalf(t, expectedLogs[i], actualLogs[i], "log slices are not equal (len %v vs %v): expected(%v), actual(%v)", len(expectedLogs), len(actualLogs), expectedLogs, actualLogs) + } +} + +func TestBroadcaster_BroadcastsWithZeroConfirmations(t *testing.T) { + testutils.SkipShortDB(t) + gm := gomega.NewWithT(t) + + ethClient := evmclimocks.NewClient(t) + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("ConfiguredChainID").Return(big.NewInt(0)).Maybe() + logsChCh := make(chan evmtest.RawSub[types.Log]) + ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything). + Return( + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) ethereum.Subscription { + sub := mockEth.NewSub(t) + logsChCh <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) error { + return nil + }, + ). + Once() + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: 1}, nil) + ethClient.On("FilterLogs", mock.Anything, mock.Anything). + Return(nil, nil) + + helper := newBroadcasterHelperWithEthClient(t, ethClient, nil, nil) + helper.start() + defer helper.stop() + + addr := common.HexToAddress("0xf0d54349aDdcf704F77AE15b96510dEA15cb7952") + contract1, err := flux_aggregator_wrapper.NewFluxAggregator(addr, nil) + require.NoError(t, err) + + // 3 logs all in the same block + bh := utils.NewHash() + addr1SentLogs := []types.Log{ + { + Address: addr, + BlockHash: bh, + BlockNumber: 2, + Index: 0, + Topics: []common.Hash{ + (flux_aggregator_wrapper.FluxAggregatorNewRound{}).Topic(), + utils.NewHash(), + utils.NewHash(), + }, + Data: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + }, + { + Address: addr, + BlockHash: bh, + BlockNumber: 2, + Index: 1, + Topics: []common.Hash{ + (flux_aggregator_wrapper.FluxAggregatorNewRound{}).Topic(), + utils.NewHash(), + utils.NewHash(), + }, + Data: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + }, + { + Address: addr, + BlockHash: bh, + BlockNumber: 2, + Index: 2, + Topics: []common.Hash{ + (flux_aggregator_wrapper.FluxAggregatorNewRound{}).Topic(), + utils.NewHash(), + utils.NewHash(), + }, + Data: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + }, + } + + listener1 := helper.newLogListenerWithJob("1") + helper.register(listener1, contract1, 0) + listener2 := helper.newLogListenerWithJob("2") + helper.register(listener2, contract1, 0) + + logs := <-logsChCh + + for _, log := range addr1SentLogs { + logs.TrySend(log) + } + // Wait until the logpool has the 3 logs + gm.Eventually(func() bool { + helper.lb.Pause() + defer helper.lb.Resume() + return helper.lb.LogsFromBlock(bh) == len(addr1SentLogs) + }, 2*time.Second, 100*time.Millisecond).Should(gomega.BeTrue()) + + // Send a block to trigger sending the logs from the pool + // to the subscribers + helper.lb.OnNewLongestChain(testutils.Context(t), &evmtypes.Head{Number: 2}) + + // The subs should each get exactly 3 broadcasts each + // If we do not receive a broadcast for 1 second + // we assume the log broadcaster is done sending. + gm.Eventually(func() bool { + return len(listener1.getUniqueLogs()) == len(addr1SentLogs) && len(listener2.getUniqueLogs()) == len(addr1SentLogs) + }, 2*time.Second, cltest.DBPollingInterval).Should(gomega.BeTrue()) + gm.Consistently(func() bool { + return len(listener1.getUniqueLogs()) == len(addr1SentLogs) && len(listener2.getUniqueLogs()) == len(addr1SentLogs) + }, 1*time.Second, cltest.DBPollingInterval).Should(gomega.BeTrue()) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/chains/evm/log/mocks/abigen_contract.go b/core/chains/evm/log/mocks/abigen_contract.go new file mode 100644 index 00000000..3f510c9e --- /dev/null +++ b/core/chains/evm/log/mocks/abigen_contract.go @@ -0,0 +1,81 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// AbigenContract is an autogenerated mock type for the AbigenContract type +type AbigenContract struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *AbigenContract) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// ParseLog provides a mock function with given fields: _a0 +func (_m *AbigenContract) ParseLog(_a0 types.Log) (generated.AbigenLog, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAbigenContract creates a new instance of AbigenContract. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAbigenContract(t interface { + mock.TestingT + Cleanup(func()) +}) *AbigenContract { + mock := &AbigenContract{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/log/mocks/broadcast.go b/core/chains/evm/log/mocks/broadcast.go new file mode 100644 index 00000000..6d9a8371 --- /dev/null +++ b/core/chains/evm/log/mocks/broadcast.go @@ -0,0 +1,222 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Broadcast is an autogenerated mock type for the Broadcast type +type Broadcast struct { + mock.Mock +} + +// DecodedLog provides a mock function with given fields: +func (_m *Broadcast) DecodedLog() interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DecodedLog") + } + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// EVMChainID provides a mock function with given fields: +func (_m *Broadcast) EVMChainID() big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMChainID") + } + + var r0 big.Int + if rf, ok := ret.Get(0).(func() big.Int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(big.Int) + } + + return r0 +} + +// JobID provides a mock function with given fields: +func (_m *Broadcast) JobID() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for JobID") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// LatestBlockHash provides a mock function with given fields: +func (_m *Broadcast) LatestBlockHash() common.Hash { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestBlockHash") + } + + var r0 common.Hash + if rf, ok := ret.Get(0).(func() common.Hash); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + return r0 +} + +// LatestBlockNumber provides a mock function with given fields: +func (_m *Broadcast) LatestBlockNumber() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestBlockNumber") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// RawLog provides a mock function with given fields: +func (_m *Broadcast) RawLog() types.Log { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RawLog") + } + + var r0 types.Log + if rf, ok := ret.Get(0).(func() types.Log); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.Log) + } + + return r0 +} + +// ReceiptsRoot provides a mock function with given fields: +func (_m *Broadcast) ReceiptsRoot() common.Hash { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ReceiptsRoot") + } + + var r0 common.Hash + if rf, ok := ret.Get(0).(func() common.Hash); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + return r0 +} + +// StateRoot provides a mock function with given fields: +func (_m *Broadcast) StateRoot() common.Hash { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StateRoot") + } + + var r0 common.Hash + if rf, ok := ret.Get(0).(func() common.Hash); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *Broadcast) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TransactionsRoot provides a mock function with given fields: +func (_m *Broadcast) TransactionsRoot() common.Hash { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TransactionsRoot") + } + + var r0 common.Hash + if rf, ok := ret.Get(0).(func() common.Hash); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + return r0 +} + +// NewBroadcast creates a new instance of Broadcast. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBroadcast(t interface { + mock.TestingT + Cleanup(func()) +}) *Broadcast { + mock := &Broadcast{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/log/mocks/broadcaster.go b/core/chains/evm/log/mocks/broadcaster.go new file mode 100644 index 00000000..fab3b9a5 --- /dev/null +++ b/core/chains/evm/log/mocks/broadcaster.go @@ -0,0 +1,288 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + log "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + types "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// Broadcaster is an autogenerated mock type for the Broadcaster type +type Broadcaster struct { + mock.Mock +} + +// AddDependents provides a mock function with given fields: n +func (_m *Broadcaster) AddDependents(n int) { + _m.Called(n) +} + +// AwaitDependents provides a mock function with given fields: +func (_m *Broadcaster) AwaitDependents() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AwaitDependents") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *Broadcaster) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DependentReady provides a mock function with given fields: +func (_m *Broadcaster) DependentReady() { + _m.Called() +} + +// HealthReport provides a mock function with given fields: +func (_m *Broadcaster) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// IsConnected provides a mock function with given fields: +func (_m *Broadcaster) IsConnected() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsConnected") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MarkConsumed provides a mock function with given fields: lb, qopts +func (_m *Broadcaster) MarkConsumed(lb log.Broadcast, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, lb) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for MarkConsumed") + } + + var r0 error + if rf, ok := ret.Get(0).(func(log.Broadcast, ...pg.QOpt) error); ok { + r0 = rf(lb, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MarkManyConsumed provides a mock function with given fields: lbs, qopts +func (_m *Broadcaster) MarkManyConsumed(lbs []log.Broadcast, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, lbs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for MarkManyConsumed") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]log.Broadcast, ...pg.QOpt) error); ok { + r0 = rf(lbs, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *Broadcaster) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *Broadcaster) OnNewLongestChain(ctx context.Context, head *types.Head) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *Broadcaster) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Register provides a mock function with given fields: listener, opts +func (_m *Broadcaster) Register(listener log.Listener, opts log.ListenerOpts) func() { + ret := _m.Called(listener, opts) + + if len(ret) == 0 { + panic("no return value specified for Register") + } + + var r0 func() + if rf, ok := ret.Get(0).(func(log.Listener, log.ListenerOpts) func()); ok { + r0 = rf(listener, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func()) + } + } + + return r0 +} + +// ReplayFromBlock provides a mock function with given fields: number, forceBroadcast +func (_m *Broadcaster) ReplayFromBlock(number int64, forceBroadcast bool) { + _m.Called(number, forceBroadcast) +} + +// Start provides a mock function with given fields: _a0 +func (_m *Broadcaster) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WasAlreadyConsumed provides a mock function with given fields: lb, qopts +func (_m *Broadcaster) WasAlreadyConsumed(lb log.Broadcast, qopts ...pg.QOpt) (bool, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, lb) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for WasAlreadyConsumed") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(log.Broadcast, ...pg.QOpt) (bool, error)); ok { + return rf(lb, qopts...) + } + if rf, ok := ret.Get(0).(func(log.Broadcast, ...pg.QOpt) bool); ok { + r0 = rf(lb, qopts...) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(log.Broadcast, ...pg.QOpt) error); ok { + r1 = rf(lb, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewBroadcaster creates a new instance of Broadcaster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBroadcaster(t interface { + mock.TestingT + Cleanup(func()) +}) *Broadcaster { + mock := &Broadcaster{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/log/models.go b/core/chains/evm/log/models.go new file mode 100644 index 00000000..29633fdc --- /dev/null +++ b/core/chains/evm/log/models.go @@ -0,0 +1,108 @@ +package log + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +//go:generate mockery --quiet --name Broadcast --output ./mocks/ --case=underscore --structname Broadcast --filename broadcast.go + +type ( + // The Broadcast type wraps a types.Log but provides additional functionality + // for determining whether or not the log has been consumed and for marking + // the log as consumed + Broadcast interface { + DecodedLog() interface{} + RawLog() types.Log + String() string + LatestBlockNumber() uint64 + LatestBlockHash() common.Hash + ReceiptsRoot() common.Hash + TransactionsRoot() common.Hash + StateRoot() common.Hash + JobID() int32 + EVMChainID() big.Int + } + + broadcast struct { + latestBlockNumber uint64 + latestBlockHash common.Hash + receiptsRoot common.Hash + transactionsRoot common.Hash + stateRoot common.Hash + decodedLog interface{} + rawLog types.Log + jobID int32 + evmChainID big.Int + } +) + +func (b *broadcast) DecodedLog() interface{} { + return b.decodedLog +} + +func (b *broadcast) LatestBlockNumber() uint64 { + return b.latestBlockNumber +} + +func (b *broadcast) LatestBlockHash() common.Hash { + return b.latestBlockHash +} + +func (b *broadcast) ReceiptsRoot() common.Hash { + return b.receiptsRoot +} + +func (b *broadcast) TransactionsRoot() common.Hash { + return b.transactionsRoot +} + +func (b *broadcast) StateRoot() common.Hash { + return b.stateRoot +} + +func (b *broadcast) RawLog() types.Log { + return b.rawLog +} + +func (b *broadcast) SetDecodedLog(newLog interface{}) { + b.decodedLog = newLog +} + +func (b *broadcast) JobID() int32 { + return b.jobID +} + +func (b *broadcast) String() string { + return fmt.Sprintf("Broadcast(JobID:%v,LogAddress:%v,Topics(%d):%v)", b.jobID, b.rawLog.Address, len(b.rawLog.Topics), b.rawLog.Topics) +} + +func (b *broadcast) EVMChainID() big.Int { + return b.evmChainID +} + +func NewLogBroadcast(rawLog types.Log, evmChainID big.Int, decodedLog interface{}) Broadcast { + return &broadcast{ + latestBlockNumber: 0, + latestBlockHash: common.Hash{}, + receiptsRoot: common.Hash{}, + transactionsRoot: common.Hash{}, + stateRoot: common.Hash{}, + decodedLog: decodedLog, + rawLog: rawLog, + jobID: 0, + evmChainID: evmChainID, + } +} + +//go:generate mockery --quiet --name AbigenContract --output ./mocks --case=underscore + +type AbigenContract interface { + Address() common.Address + ParseLog(log types.Log) (generated.AbigenLog, error) +} diff --git a/core/chains/evm/log/orm.go b/core/chains/evm/log/orm.go new file mode 100644 index 00000000..eddf5eea --- /dev/null +++ b/core/chains/evm/log/orm.go @@ -0,0 +1,278 @@ +package log + +import ( + "database/sql" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM is the interface for log broadcasts. +// - Unconsumed broadcasts are created just before notifying subscribers, who are responsible for marking them consumed. +// - Pending broadcast block numbers are synced to the min from the pool (or deleted when empty) +// - On reboot, backfill considers the min block number from unconsumed and pending broadcasts. Additionally, unconsumed +// entries are removed and the pending broadcasts number updated. +type ORM interface { + // FindBroadcasts returns broadcasts for a range of block numbers, both consumed and unconsumed. + FindBroadcasts(fromBlockNum int64, toBlockNum int64) ([]LogBroadcast, error) + // CreateBroadcast inserts an unconsumed log broadcast for jobID. + CreateBroadcast(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, qopts ...pg.QOpt) error + // WasBroadcastConsumed returns true if jobID consumed the log broadcast. + WasBroadcastConsumed(blockHash common.Hash, logIndex uint, jobID int32, qopts ...pg.QOpt) (bool, error) + // MarkBroadcastConsumed marks the log broadcast as consumed by jobID. + MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, qopts ...pg.QOpt) error + // MarkBroadcastsConsumed marks the log broadcasts as consumed by jobID. + MarkBroadcastsConsumed(blockHashes []common.Hash, blockNumbers []uint64, logIndexes []uint, jobIDs []int32, qopts ...pg.QOpt) error + // MarkBroadcastsUnconsumed marks all log broadcasts from all jobs on or after fromBlock as + // unconsumed. + MarkBroadcastsUnconsumed(fromBlock int64, qopts ...pg.QOpt) error + + // SetPendingMinBlock sets the minimum block number for which there are pending broadcasts in the pool, or nil if empty. + SetPendingMinBlock(blockNum *int64, qopts ...pg.QOpt) error + // GetPendingMinBlock returns the minimum block number for which there were pending broadcasts in the pool, or nil if it was empty. + GetPendingMinBlock(qopts ...pg.QOpt) (blockNumber *int64, err error) + + // Reinitialize cleans up the database by removing any unconsumed broadcasts, then updating (if necessary) and + // returning the pending minimum block number. + Reinitialize(qopts ...pg.QOpt) (blockNumber *int64, err error) +} + +type orm struct { + q pg.Q + evmChainID ubig.Big +} + +var _ ORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, evmChainID big.Int) *orm { + return &orm{pg.NewQ(db, lggr, cfg), *ubig.New(&evmChainID)} +} + +func (o *orm) WasBroadcastConsumed(blockHash common.Hash, logIndex uint, jobID int32, qopts ...pg.QOpt) (consumed bool, err error) { + query := ` + SELECT consumed FROM log_broadcasts + WHERE block_hash = $1 + AND log_index = $2 + AND job_id = $3 + AND evm_chain_id = $4 + ` + args := []interface{}{ + blockHash, + logIndex, + jobID, + o.evmChainID, + } + q := o.q.WithOpts(qopts...) + err = q.Get(&consumed, query, args...) + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + return consumed, err +} + +func (o *orm) FindBroadcasts(fromBlockNum int64, toBlockNum int64) ([]LogBroadcast, error) { + var broadcasts []LogBroadcast + query := ` + SELECT block_hash, consumed, log_index, job_id FROM log_broadcasts + WHERE block_number >= $1 + AND block_number <= $2 + AND evm_chain_id = $3 + ` + err := o.q.Select(&broadcasts, query, fromBlockNum, toBlockNum, o.evmChainID) + if err != nil { + return nil, errors.Wrap(err, "failed to find log broadcasts") + } + return broadcasts, err +} + +func (o *orm) CreateBroadcast(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + INSERT INTO log_broadcasts (block_hash, block_number, log_index, job_id, created_at, updated_at, consumed, evm_chain_id) + VALUES ($1, $2, $3, $4, NOW(), NOW(), false, $5) + `, blockHash, blockNumber, logIndex, jobID, o.evmChainID) + return errors.Wrap(err, "failed to create log broadcast") +} + +func (o *orm) MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + INSERT INTO log_broadcasts (block_hash, block_number, log_index, job_id, created_at, updated_at, consumed, evm_chain_id) + VALUES ($1, $2, $3, $4, NOW(), NOW(), true, $5) + ON CONFLICT (job_id, block_hash, log_index, evm_chain_id) DO UPDATE + SET consumed = true, updated_at = NOW() + `, blockHash, blockNumber, logIndex, jobID, o.evmChainID) + return errors.Wrap(err, "failed to mark log broadcast as consumed") +} + +// MarkBroadcastsConsumed marks many broadcasts as consumed. +// The lengths of all the provided slices must be equal, otherwise an error is returned. +func (o *orm) MarkBroadcastsConsumed(blockHashes []common.Hash, blockNumbers []uint64, logIndexes []uint, jobIDs []int32, qopts ...pg.QOpt) error { + if !utils.AllEqual(len(blockHashes), len(blockNumbers), len(logIndexes), len(jobIDs)) { + return fmt.Errorf("all arg slice lengths must be equal, got: %d %d %d %d", + len(blockHashes), len(blockNumbers), len(logIndexes), len(jobIDs), + ) + } + + type input struct { + BlockHash common.Hash `db:"blockHash"` + BlockNumber uint64 `db:"blockNumber"` + LogIndex uint `db:"logIndex"` + JobID int32 `db:"jobID"` + ChainID ubig.Big `db:"chainID"` + } + inputs := make([]input, len(blockHashes)) + query := ` +INSERT INTO log_broadcasts (block_hash, block_number, log_index, job_id, created_at, updated_at, consumed, evm_chain_id) +VALUES (:blockHash, :blockNumber, :logIndex, :jobID, NOW(), NOW(), true, :chainID) +ON CONFLICT (job_id, block_hash, log_index, evm_chain_id) DO UPDATE +SET consumed = true, updated_at = NOW(); + ` + for i := range blockHashes { + inputs[i] = input{ + BlockHash: blockHashes[i], + BlockNumber: blockNumbers[i], + LogIndex: logIndexes[i], + JobID: jobIDs[i], + ChainID: o.evmChainID, + } + } + q := o.q.WithOpts(qopts...) + _, err := q.NamedExec(query, inputs) + return errors.Wrap(err, "mark broadcasts consumed") +} + +// MarkBroadcastsUnconsumed implements the ORM interface. +func (o *orm) MarkBroadcastsUnconsumed(fromBlock int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + UPDATE log_broadcasts + SET consumed = false + WHERE block_number >= $1 + AND evm_chain_id = $2 + `, fromBlock, o.evmChainID) + return errors.Wrap(err, "failed to mark broadcasts unconsumed") +} + +func (o *orm) Reinitialize(qopts ...pg.QOpt) (*int64, error) { + // Minimum block number from the set of unconsumed logs, which we'll remove later. + minUnconsumed, err := o.getUnconsumedMinBlock(qopts...) + if err != nil { + return nil, err + } + // Minimum block number from the set of pending logs in the pool. + minPending, err := o.GetPendingMinBlock(qopts...) + if err != nil { + return nil, err + } + if minUnconsumed == nil { + // Nothing unconsumed to consider or cleanup, and pending minimum block number still stands. + return minPending, nil + } + if minPending == nil || *minUnconsumed < *minPending { + // Use the lesser minUnconsumed. + minPending = minUnconsumed + // Update the db so that we can safely delete the unconsumed entries. + if err := o.SetPendingMinBlock(minPending, qopts...); err != nil { + return nil, err + } + } + // Safe to delete old unconsumed entries since the pending minimum block covers this range. + if err := o.removeUnconsumed(qopts...); err != nil { + return nil, err + } + return minPending, nil +} + +func (o *orm) SetPendingMinBlock(blockNumber *int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + INSERT INTO log_broadcasts_pending (evm_chain_id, block_number, created_at, updated_at) VALUES ($1, $2, NOW(), NOW()) + ON CONFLICT (evm_chain_id) DO UPDATE SET block_number = $3, updated_at = NOW() + `, o.evmChainID, blockNumber, blockNumber) + return errors.Wrap(err, "failed to set pending broadcast block number") +} + +func (o *orm) GetPendingMinBlock(qopts ...pg.QOpt) (*int64, error) { + q := o.q.WithOpts(qopts...) + var blockNumber *int64 + err := q.Get(&blockNumber, ` + SELECT block_number FROM log_broadcasts_pending WHERE evm_chain_id = $1 + `, o.evmChainID) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } else if err != nil { + return nil, errors.Wrap(err, "failed to get broadcasts pending number") + } + return blockNumber, nil +} + +func (o *orm) getUnconsumedMinBlock(qopts ...pg.QOpt) (*int64, error) { + q := o.q.WithOpts(qopts...) + var blockNumber *int64 + err := q.Get(&blockNumber, ` + SELECT min(block_number) FROM log_broadcasts + WHERE evm_chain_id = $1 + AND consumed = false + AND block_number IS NOT NULL + `, o.evmChainID) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } else if err != nil { + return nil, errors.Wrap(err, "failed to get unconsumed broadcasts min block number") + } + return blockNumber, nil +} + +func (o *orm) removeUnconsumed(qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + DELETE FROM log_broadcasts + WHERE evm_chain_id = $1 + AND consumed = false + AND block_number IS NOT NULL + `, o.evmChainID) + return errors.Wrap(err, "failed to delete unconsumed broadcasts") +} + +// LogBroadcast - data from log_broadcasts table columns +type LogBroadcast struct { + BlockHash common.Hash + Consumed bool + LogIndex uint + JobID int32 +} + +func (b LogBroadcast) AsKey() LogBroadcastAsKey { + return LogBroadcastAsKey{ + b.BlockHash, + b.LogIndex, + b.JobID, + } +} + +// LogBroadcastAsKey - used as key in a map to filter out already consumed logs +type LogBroadcastAsKey struct { + BlockHash common.Hash + LogIndex uint + JobId int32 +} + +func NewLogBroadcastAsKey(log types.Log, listener Listener) LogBroadcastAsKey { + return LogBroadcastAsKey{ + log.BlockHash, + log.Index, + listener.JobID(), + } +} diff --git a/core/chains/evm/log/orm_test.go b/core/chains/evm/log/orm_test.go new file mode 100644 index 00000000..97a35560 --- /dev/null +++ b/core/chains/evm/log/orm_test.go @@ -0,0 +1,297 @@ +package log_test + +import ( + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestORM_broadcasts(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + lggr := logger.Test(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + orm := log.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + + _, addr := cltest.MustInsertRandomKey(t, ethKeyStore) + specV2 := cltest.MustInsertV2JobSpec(t, db, addr) + + const selectQuery = `SELECT consumed FROM log_broadcasts + WHERE block_hash = $1 AND block_number = $2 AND log_index = $3 AND job_id = $4 AND evm_chain_id = $5` + + listener := &mockListener{specV2.ID} + + rawLog := cltest.RandomLog(t) + queryArgs := []interface{}{rawLog.BlockHash, rawLog.BlockNumber, rawLog.Index, listener.JobID(), cltest.FixtureChainID.String()} + + // No rows + res, err := db.Exec(selectQuery, queryArgs...) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Zero(t, rowsAffected) + + t.Run("WasBroadcastConsumed_DNE", func(t *testing.T) { + _, err := orm.WasBroadcastConsumed(rawLog.BlockHash, rawLog.Index, listener.JobID()) + require.NoError(t, err) + }) + + require.True(t, t.Run("CreateBroadcast", func(t *testing.T) { + err := orm.CreateBroadcast(rawLog.BlockHash, rawLog.BlockNumber, rawLog.Index, listener.JobID()) + require.NoError(t, err) + + var consumed null.Bool + err = db.Get(&consumed, selectQuery, queryArgs...) + require.NoError(t, err) + require.Equal(t, null.BoolFrom(false), consumed) + })) + + t.Run("WasBroadcastConsumed_false", func(t *testing.T) { + was, err := orm.WasBroadcastConsumed(rawLog.BlockHash, rawLog.Index, listener.JobID()) + require.NoError(t, err) + require.False(t, was) + }) + + require.True(t, t.Run("MarkBroadcastConsumed", func(t *testing.T) { + err := orm.MarkBroadcastConsumed(rawLog.BlockHash, rawLog.BlockNumber, rawLog.Index, listener.JobID()) + require.NoError(t, err) + + var consumed null.Bool + err = db.Get(&consumed, selectQuery, queryArgs...) + require.NoError(t, err) + require.Equal(t, null.BoolFrom(true), consumed) + })) + + t.Run("MarkBroadcastsConsumed Success", func(t *testing.T) { + var ( + err error + blockHashes []common.Hash + blockNumbers []uint64 + logIndexes []uint + jobIDs []int32 + ) + for i := 0; i < 3; i++ { + l := cltest.RandomLog(t) + err = orm.CreateBroadcast(l.BlockHash, l.BlockNumber, l.Index, listener.JobID()) + require.NoError(t, err) + blockHashes = append(blockHashes, l.BlockHash) + blockNumbers = append(blockNumbers, l.BlockNumber) + logIndexes = append(logIndexes, l.Index) + jobIDs = append(jobIDs, listener.JobID()) + + } + err = orm.MarkBroadcastsConsumed(blockHashes, blockNumbers, logIndexes, jobIDs, pg.WithLongQueryTimeout()) + require.NoError(t, err) + + for i := range blockHashes { + was, err := orm.WasBroadcastConsumed(blockHashes[i], logIndexes[i], jobIDs[i]) + require.NoError(t, err) + require.True(t, was) + } + }) + + t.Run("MarkBroadcastsConsumed Failure", func(t *testing.T) { + var ( + err error + blockHashes []common.Hash + blockNumbers []uint64 + logIndexes []uint + jobIDs []int32 + ) + for i := 0; i < 5; i++ { + l := cltest.RandomLog(t) + err = orm.CreateBroadcast(l.BlockHash, l.BlockNumber, l.Index, listener.JobID()) + require.NoError(t, err) + blockHashes = append(blockHashes, l.BlockHash) + blockNumbers = append(blockNumbers, l.BlockNumber) + logIndexes = append(logIndexes, l.Index) + jobIDs = append(jobIDs, listener.JobID()) + } + err = orm.MarkBroadcastsConsumed(blockHashes[:len(blockHashes)-2], blockNumbers, logIndexes, jobIDs, pg.WithLongQueryTimeout()) + require.Error(t, err) + }) + + t.Run("WasBroadcastConsumed_true", func(t *testing.T) { + was, err := orm.WasBroadcastConsumed(rawLog.BlockHash, rawLog.Index, listener.JobID()) + require.NoError(t, err) + require.True(t, was) + }) +} + +func TestORM_pending(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + lggr := logger.Test(t) + orm := log.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + + num, err := orm.GetPendingMinBlock() + require.NoError(t, err) + require.Nil(t, num) + + var num10 int64 = 10 + err = orm.SetPendingMinBlock(&num10) + require.NoError(t, err) + + num, err = orm.GetPendingMinBlock() + require.NoError(t, err) + require.Equal(t, num10, *num) + + err = orm.SetPendingMinBlock(nil) + require.NoError(t, err) + + num, err = orm.GetPendingMinBlock() + require.NoError(t, err) + require.Nil(t, num) +} + +func TestORM_MarkUnconsumed(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + lggr := logger.Test(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + orm := log.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + + _, addr1 := cltest.MustInsertRandomKey(t, ethKeyStore) + job1 := cltest.MustInsertV2JobSpec(t, db, addr1) + + _, addr2 := cltest.MustInsertRandomKey(t, ethKeyStore) + job2 := cltest.MustInsertV2JobSpec(t, db, addr2) + + logBefore := cltest.RandomLog(t) + logBefore.BlockNumber = 34 + require.NoError(t, + orm.CreateBroadcast(logBefore.BlockHash, logBefore.BlockNumber, logBefore.Index, job1.ID)) + require.NoError(t, + orm.MarkBroadcastConsumed(logBefore.BlockHash, logBefore.BlockNumber, logBefore.Index, job1.ID)) + + logAt := cltest.RandomLog(t) + logAt.BlockNumber = 38 + require.NoError(t, + orm.CreateBroadcast(logAt.BlockHash, logAt.BlockNumber, logAt.Index, job1.ID)) + require.NoError(t, + orm.MarkBroadcastConsumed(logAt.BlockHash, logAt.BlockNumber, logAt.Index, job1.ID)) + + logAfter := cltest.RandomLog(t) + logAfter.BlockNumber = 40 + require.NoError(t, + orm.CreateBroadcast(logAfter.BlockHash, logAfter.BlockNumber, logAfter.Index, job2.ID)) + require.NoError(t, + orm.MarkBroadcastConsumed(logAfter.BlockHash, logAfter.BlockNumber, logAfter.Index, job2.ID)) + + // logAt and logAfter should now be marked unconsumed. logBefore is still consumed. + require.NoError(t, orm.MarkBroadcastsUnconsumed(38)) + + consumed, err := orm.WasBroadcastConsumed(logBefore.BlockHash, logBefore.Index, job1.ID) + require.NoError(t, err) + require.True(t, consumed) + + consumed, err = orm.WasBroadcastConsumed(logAt.BlockHash, logAt.Index, job1.ID) + require.NoError(t, err) + require.False(t, consumed) + + consumed, err = orm.WasBroadcastConsumed(logAfter.BlockHash, logAfter.Index, job2.ID) + require.NoError(t, err) + require.False(t, consumed) +} + +func TestORM_Reinitialize(t *testing.T) { + type TestLogBroadcast struct { + BlockNumber big.Int + log.LogBroadcast + } + var unconsumed = func(blockNum int64) TestLogBroadcast { + hash := common.BigToHash(big.NewInt(rand.Int63())) + return TestLogBroadcast{*big.NewInt(blockNum), + log.LogBroadcast{hash, false, uint(rand.Uint32()), 0}, + } + } + var consumed = func(blockNum int64) TestLogBroadcast { + hash := common.BigToHash(big.NewInt(rand.Int63())) + return TestLogBroadcast{*big.NewInt(blockNum), + log.LogBroadcast{hash, true, uint(rand.Uint32()), 0}, + } + } + + tests := []struct { + name string + pendingBlockNum *int64 + expPendingBlockNum *int64 + broadcasts []TestLogBroadcast + }{ + {name: "empty", expPendingBlockNum: nil}, + {name: "both-delete", expPendingBlockNum: null.IntFrom(10).Ptr(), + pendingBlockNum: null.IntFrom(10).Ptr(), broadcasts: []TestLogBroadcast{ + unconsumed(11), unconsumed(12), + consumed(9), + }}, + {name: "both-update", expPendingBlockNum: null.IntFrom(9).Ptr(), + pendingBlockNum: null.IntFrom(10).Ptr(), broadcasts: []TestLogBroadcast{ + unconsumed(9), unconsumed(10), + consumed(8), + }}, + {name: "broadcasts-update", expPendingBlockNum: null.IntFrom(9).Ptr(), + pendingBlockNum: nil, broadcasts: []TestLogBroadcast{ + unconsumed(9), unconsumed(10), + consumed(8), + }}, + {name: "pending-noop", expPendingBlockNum: null.IntFrom(10).Ptr(), + pendingBlockNum: null.IntFrom(10).Ptr(), broadcasts: []TestLogBroadcast{ + consumed(8), consumed(9), + }}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + lggr := logger.Test(t) + orm := log.NewORM(db, lggr, cfg.Database(), cltest.FixtureChainID) + + jobID := cltest.MustInsertV2JobSpec(t, db, common.BigToAddress(big.NewInt(rand.Int63()))).ID + + for _, b := range tt.broadcasts { + if b.Consumed { + err := orm.MarkBroadcastConsumed(b.BlockHash, b.BlockNumber.Uint64(), b.LogIndex, jobID) + require.NoError(t, err) + } else { + err := orm.CreateBroadcast(b.BlockHash, b.BlockNumber.Uint64(), b.LogIndex, jobID) + require.NoError(t, err) + } + } + if tt.pendingBlockNum != nil { + require.NoError(t, orm.SetPendingMinBlock(tt.pendingBlockNum)) + } + + pendingBlockNum, err := orm.Reinitialize() + require.NoError(t, err) + assert.Equal(t, tt.expPendingBlockNum, pendingBlockNum) + + pendingBlockNum, err = orm.GetPendingMinBlock() + if assert.NoError(t, err) { + assert.Equal(t, tt.expPendingBlockNum, pendingBlockNum) + } + + bs, err := orm.FindBroadcasts(0, 20) + if assert.NoError(t, err) { + for _, b := range bs { + assert.True(t, b.Consumed) + } + } + }) + } +} diff --git a/core/chains/evm/log/pool.go b/core/chains/evm/log/pool.go new file mode 100644 index 00000000..c9cecbb6 --- /dev/null +++ b/core/chains/evm/log/pool.go @@ -0,0 +1,198 @@ +package log + +import ( + "math" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + heaps "github.com/theodesp/go-heaps" + pairingHeap "github.com/theodesp/go-heaps/pairing" + + "github.com/goplugin/plugin-common/pkg/logger" +) + +// The Log Pool interface. +type iLogPool interface { + + // AddLog adds log to the pool and returns true if its block number is a new minimum. + addLog(log types.Log) bool + + // GetAndDeleteAll purges the pool completely, returns all logs, and also the minimum and + // maximum block numbers retrieved. + getAndDeleteAll() ([]logsOnBlock, int64, int64) + + // GetLogsToSend returns all logs upto the block number specified in latestBlockNum. + // Also returns the minimum block number in the result. + // In case the pool is empty, returns empty results, and min block number=0 + getLogsToSend(latestBlockNum int64) ([]logsOnBlock, int64) + + // DeleteOlderLogs deletes all logs in blocks that are less than specific block number keptDepth. + // Also returns the remaining minimum block number in pool after these deletions. + // Returns nil if this ends up emptying the pool. + deleteOlderLogs(keptDepth int64) *int64 + + // RemoveBlock removes all logs for the block identified by provided Block hash and number. + removeBlock(hash common.Hash, number uint64) + + // TestOnly_getNumLogsForBlock FOR TESTING USE ONLY. + // Returns all logs for the provided block hash. + testOnly_getNumLogsForBlock(bh common.Hash) int +} + +type logPool struct { + // A mapping of block numbers to a set of block hashes for all + // the logs in the pool. + hashesByBlockNumbers map[uint64]map[common.Hash]struct{} + + // A mapping of block hashes, to tx index within block, to log index, to logs + logsByBlockHash map[common.Hash]map[uint]map[uint]types.Log + + // This min-heap maintains block numbers of logs in the pool. + // it helps us easily determine the minimum log block number + // in the pool (while the set of log block numbers is dynamically changing). + heap *pairingHeap.PairHeap + logger logger.Logger +} + +func newLogPool(lggr logger.Logger) *logPool { + return &logPool{ + hashesByBlockNumbers: make(map[uint64]map[common.Hash]struct{}), + logsByBlockHash: make(map[common.Hash]map[uint]map[uint]types.Log), + heap: pairingHeap.New(), + logger: logger.Named(lggr, "LogPool"), + } +} + +func (pool *logPool) addLog(log types.Log) bool { + _, exists := pool.hashesByBlockNumbers[log.BlockNumber] + if !exists { + pool.hashesByBlockNumbers[log.BlockNumber] = make(map[common.Hash]struct{}) + } + pool.hashesByBlockNumbers[log.BlockNumber][log.BlockHash] = struct{}{} + if _, exists := pool.logsByBlockHash[log.BlockHash]; !exists { + pool.logsByBlockHash[log.BlockHash] = make(map[uint]map[uint]types.Log) + } + if _, exists := pool.logsByBlockHash[log.BlockHash][log.TxIndex]; !exists { + pool.logsByBlockHash[log.BlockHash][log.TxIndex] = make(map[uint]types.Log) + } + pool.logsByBlockHash[log.BlockHash][log.TxIndex][log.Index] = log + min := pool.heap.FindMin() + pool.heap.Insert(Uint64(log.BlockNumber)) + pool.logger.Debugw("Inserted block to log pool", "blockNumber", log.BlockNumber, "blockHash", log.BlockHash, "index", log.Index, "prevMinBlockNumber", min) + // first or new min + return min == nil || log.BlockNumber < uint64(min.(Uint64)) +} + +func (pool *logPool) getAndDeleteAll() ([]logsOnBlock, int64, int64) { + logsToReturn := make([]logsOnBlock, 0) + lowest := int64(math.MaxInt64) + highest := int64(0) + + for { + item := pool.heap.DeleteMin() + if item == nil { + break + } + + blockNum := uint64(item.(Uint64)) + hashes, exists := pool.hashesByBlockNumbers[blockNum] + if exists { + if int64(blockNum) < lowest { + lowest = int64(blockNum) + } + if int64(blockNum) > highest { + highest = int64(blockNum) + } + for hash := range hashes { + logsToReturn = append(logsToReturn, newLogsOnBlock(blockNum, pool.logsByBlockHash[hash])) + delete(pool.hashesByBlockNumbers[blockNum], hash) + delete(pool.logsByBlockHash, hash) + } + } + + delete(pool.hashesByBlockNumbers, blockNum) + } + return logsToReturn, lowest, highest +} + +func (pool *logPool) getLogsToSend(latestBlockNum int64) ([]logsOnBlock, int64) { + logsToReturn := make([]logsOnBlock, 0) + + // gathering logs to return - from min block number kept, to latestBlockNum + minBlockNumToSendItem := pool.heap.FindMin() + if minBlockNumToSendItem == nil { + return logsToReturn, 0 + } + minBlockNumToSend := int64(minBlockNumToSendItem.(Uint64)) + + for num := minBlockNumToSend; num <= latestBlockNum; num++ { + for hash := range pool.hashesByBlockNumbers[uint64(num)] { + logsToReturn = append(logsToReturn, newLogsOnBlock(uint64(num), pool.logsByBlockHash[hash])) + } + } + return logsToReturn, minBlockNumToSend +} + +func (pool *logPool) deleteOlderLogs(keptDepth int64) *int64 { + min := pool.heap.FindMin + for item := min(); item != nil; item = min() { + blockNum := uint64(item.(Uint64)) + if i := int64(blockNum); i >= keptDepth { + return &i + } + pool.heap.DeleteMin() + + for hash := range pool.hashesByBlockNumbers[blockNum] { + delete(pool.logsByBlockHash, hash) + } + delete(pool.hashesByBlockNumbers, blockNum) + } + return nil +} + +func (pool *logPool) removeBlock(hash common.Hash, number uint64) { + // deleting all logs for this log's block hash + delete(pool.logsByBlockHash, hash) + delete(pool.hashesByBlockNumbers[number], hash) + if len(pool.hashesByBlockNumbers[number]) == 0 { + delete(pool.hashesByBlockNumbers, number) + } +} + +func (pool *logPool) testOnly_getNumLogsForBlock(bh common.Hash) int { + var numLogs int + for _, txLogs := range pool.logsByBlockHash[bh] { + numLogs += len(txLogs) + } + return numLogs +} + +type Uint64 uint64 + +func (a Uint64) Compare(b heaps.Item) int { + a1 := a + a2 := b.(Uint64) + switch { + case a1 > a2: + return 1 + case a1 < a2: + return -1 + default: + return 0 + } +} + +type logsOnBlock struct { + BlockNumber uint64 + Logs []types.Log +} + +func newLogsOnBlock(num uint64, logsMap map[uint]map[uint]types.Log) logsOnBlock { + logs := make([]types.Log, 0, len(logsMap)) + for _, txLogs := range logsMap { + for _, l := range txLogs { + logs = append(logs, l) + } + } + return logsOnBlock{num, logs} +} diff --git a/core/chains/evm/log/pool_test.go b/core/chains/evm/log/pool_test.go new file mode 100644 index 00000000..79e59079 --- /dev/null +++ b/core/chains/evm/log/pool_test.go @@ -0,0 +1,360 @@ +package log + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +var ( + L1 = types.Log{ + BlockHash: common.HexToHash("1"), + Index: 1, + BlockNumber: 1, + } + + L21 = types.Log{ + BlockHash: common.HexToHash("2"), + Index: 21, + BlockNumber: 2, + } + + // L21 and L22 differ only in index + L22 = types.Log{ + BlockHash: common.HexToHash("2"), + Index: 22, + BlockNumber: 2, + } + + // L23 is a different BlockHash than L21 and L22 + L23 = types.Log{ + BlockHash: common.HexToHash("23"), + Index: 21, + BlockNumber: 2, + } + + L3 = types.Log{ + BlockHash: common.HexToHash("3"), + Index: 3, + BlockNumber: 3, + } +) + +func TestUnit_AddLog(t *testing.T) { + t.Parallel() + var p iLogPool = newLogPool(logger.Test(t)) + + blockHash := common.BigToHash(big.NewInt(1)) + l1 := types.Log{ + BlockHash: blockHash, + TxIndex: 37, + Index: 42, + BlockNumber: 1, + } + // 1st log added should be the minimum + assert.True(t, p.addLog(l1), "AddLog should have returned true for first log added") + require.Equal(t, 1, p.testOnly_getNumLogsForBlock(blockHash)) + + // Reattempting to add same log should work, but shouldn't be the minimum + assert.False(t, p.addLog(l1), "AddLog should have returned false for a 2nd reattempt") + require.Equal(t, 1, p.testOnly_getNumLogsForBlock(blockHash)) + + // 2nd log with higher logIndex but same blockhash should add a new log, which shouldn't be minimum + l2 := l1 + l2.Index = 43 + assert.False(t, p.addLog(l2), "AddLog should have returned false for later log added") + require.Equal(t, 2, p.testOnly_getNumLogsForBlock(blockHash)) + + // New log with same logIndex but lower txIndex should add a new log, which should be a minimum + l2 = l1 + l2.TxIndex = 13 + assert.False(t, p.addLog(l2), "AddLog should have returned false for earlier log added") + require.Equal(t, 3, p.testOnly_getNumLogsForBlock(blockHash)) + + // New log with different larger BlockNumber should add a new log, not as minimum + l3 := l1 + l3.BlockNumber = 3 + l3.BlockHash = common.BigToHash(big.NewInt(3)) + assert.False(t, p.addLog(l3), "AddLog should have returned false for same log added") + assert.Equal(t, 3, p.testOnly_getNumLogsForBlock(blockHash)) + require.Equal(t, 1, p.testOnly_getNumLogsForBlock(l3.BlockHash)) + + // New log with different smaller BlockNumber should add a new log, as minimum + l4 := l1 + l4.BlockNumber = 0 // New minimum block number + l4.BlockHash = common.BigToHash(big.NewInt(0)) + assert.True(t, p.addLog(l4), "AddLog should have returned true for smallest BlockNumber") + assert.Equal(t, 3, p.testOnly_getNumLogsForBlock(blockHash)) + assert.Equal(t, 1, p.testOnly_getNumLogsForBlock(l3.BlockHash)) + require.Equal(t, 1, p.testOnly_getNumLogsForBlock(l4.BlockHash)) + + // Adding duplicate log should not increase number of logs in pool + l5 := l1 + assert.False(t, p.addLog(l5), "AddLog should have returned false for smallest BlockNumber") + assert.Equal(t, 3, p.testOnly_getNumLogsForBlock(blockHash)) + assert.Equal(t, 1, p.testOnly_getNumLogsForBlock(l3.BlockHash)) + require.Equal(t, 1, p.testOnly_getNumLogsForBlock(l4.BlockHash)) +} + +func TestUnit_GetAndDeleteAll(t *testing.T) { + t.Parallel() + var p iLogPool = newLogPool(logger.Test(t)) + p.addLog(L1) + p.addLog(L1) // duplicate an add + p.addLog(L21) + p.addLog(L22) + p.addLog(L3) + + logsOnBlock, lowest, highest := p.getAndDeleteAll() + + assert.Equal(t, int64(1), lowest) + assert.Equal(t, int64(3), highest) + assert.Len(t, logsOnBlock, 3) + for _, logs := range logsOnBlock { + switch logs.BlockNumber { + case 1: + l1s := [1]types.Log{L1} + assert.ElementsMatch(t, l1s, logs.Logs) + case 2: + l2s := [2]types.Log{L21, L22} + assert.ElementsMatch(t, l2s, logs.Logs) + case 3: + l3s := [1]types.Log{L3} + assert.ElementsMatch(t, l3s, logs.Logs) + default: + t.Errorf("Received unexpected BlockNumber in results: %d", logs.BlockNumber) + } + } + assert.Equal(t, 0, p.testOnly_getNumLogsForBlock(L1.BlockHash)) + assert.Equal(t, 0, p.testOnly_getNumLogsForBlock(L21.BlockHash)) + assert.Equal(t, 0, p.testOnly_getNumLogsForBlock(L3.BlockHash)) +} + +func TestUnit_GetLogsToSendWhenEmptyPool(t *testing.T) { + t.Parallel() + var p iLogPool = newLogPool(logger.Test(t)) + logsOnBlocks, minBlockNumToSend := p.getLogsToSend(1) + assert.Equal(t, int64(0), minBlockNumToSend) + assert.ElementsMatch(t, []logsOnBlock{}, logsOnBlocks) +} + +func TestUnit_GetLogsToSend(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + latestBlockNumber int64 + expectedMinBlockNumToSend int64 + expectedLogs []logsOnBlock + }{ + { + name: "NoLogsToSend", + latestBlockNumber: 0, + expectedMinBlockNumToSend: 1, + expectedLogs: []logsOnBlock{}, + }, + { + name: "PartialLogsToSend", + latestBlockNumber: 2, + expectedMinBlockNumToSend: 1, + expectedLogs: []logsOnBlock{ + { + BlockNumber: 1, + Logs: []types.Log{ + L1, + }, + }, + { + BlockNumber: 2, + Logs: []types.Log{ + L21, + }, + }, + }, + }, + { + name: "AllLogsToSend", + latestBlockNumber: 4, + expectedMinBlockNumToSend: 1, + expectedLogs: []logsOnBlock{ + { + BlockNumber: 1, + Logs: []types.Log{ + L1, + }, + }, + { + BlockNumber: 2, + Logs: []types.Log{ + L21, + }, + }, + { + BlockNumber: 3, + Logs: []types.Log{ + L3, + }, + }, + }, + }, + } + + var p iLogPool = newLogPool(logger.Test(t)) + p.addLog(L1) + p.addLog(L21) + p.addLog(L3) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logsOnBlocks, minBlockNumToSend := p.getLogsToSend(test.latestBlockNumber) + assert.Equal(t, test.expectedMinBlockNumToSend, minBlockNumToSend) + assert.ElementsMatch(t, test.expectedLogs, logsOnBlocks) + }) + } +} + +func TestUnit_DeleteOlderLogsWhenEmptyPool(t *testing.T) { + t.Parallel() + var p iLogPool = newLogPool(logger.Test(t)) + keptDepth := p.deleteOlderLogs(1) + var expectedKeptDepth *int64 + require.Equal(t, expectedKeptDepth, keptDepth) +} + +func TestUnit_DeleteOlderLogs(t *testing.T) { + t.Parallel() + keptDepth3 := int64(3) + keptDepth1 := int64(1) + tests := []struct { + name string + keptDepth int64 + expectedOldestBlock *int64 + expectedKeptLogs []logsOnBlock + }{ + { + name: "AllLogsDeleted", + keptDepth: 4, + expectedOldestBlock: nil, + expectedKeptLogs: []logsOnBlock{}, + }, + { + name: "PartialLogsDeleted", + keptDepth: 3, + expectedOldestBlock: &keptDepth3, + expectedKeptLogs: []logsOnBlock{ + { + BlockNumber: 3, + Logs: []types.Log{ + L3, + }, + }, + }, + }, + { + name: "NoLogsDeleted", + keptDepth: 0, + expectedOldestBlock: &keptDepth1, + expectedKeptLogs: []logsOnBlock{ + { + BlockNumber: 3, + Logs: []types.Log{ + L3, + }, + }, + { + BlockNumber: 2, + Logs: []types.Log{ + L21, + }, + }, + { + BlockNumber: 1, + Logs: []types.Log{ + L1, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p iLogPool = newLogPool(logger.Test(t)) + p.addLog(L1) + p.addLog(L21) + p.addLog(L3) + + oldestKeptBlock := p.deleteOlderLogs(test.keptDepth) + + assert.Equal(t, test.expectedOldestBlock, oldestKeptBlock) + keptLogs, _ := p.getLogsToSend(4) + assert.ElementsMatch(t, test.expectedKeptLogs, keptLogs) + }) + } +} + +func TestUnit_RemoveBlockWhenEmptyPool(t *testing.T) { + t.Parallel() + var p iLogPool = newLogPool(logger.Test(t)) + p.removeBlock(L1.BlockHash, L1.BlockNumber) +} + +func TestUnit_RemoveBlock(t *testing.T) { + t.Parallel() + tests := []struct { + name string + blockHash common.Hash + blockNumber uint64 + expectedRemainingLogs []logsOnBlock + }{ + { + name: "BlockNotFound", + blockHash: L1.BlockHash, + blockNumber: L1.BlockNumber, + expectedRemainingLogs: []logsOnBlock{}, + }, + { + name: "BlockNumberWasUnique", + blockHash: L3.BlockHash, + blockNumber: L3.BlockNumber, + expectedRemainingLogs: []logsOnBlock{}, + }, + { + name: "MultipleBlocksWithSameBlockNumber", + blockHash: L21.BlockHash, + blockNumber: L21.BlockNumber, + expectedRemainingLogs: []logsOnBlock{ + { + BlockNumber: L23.BlockNumber, + Logs: []types.Log{ + L23, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p iLogPool = newLogPool(logger.Test(t)) + p.addLog(L21) + p.addLog(L22) + p.addLog(L23) + p.addLog(L3) + + p.removeBlock(test.blockHash, test.blockNumber) + + assert.Equal(t, 0, p.testOnly_getNumLogsForBlock(test.blockHash)) + p.deleteOlderLogs(int64(test.blockNumber)) // Pruning logs for easier testing next line + logsOnBlock, _ := p.getLogsToSend(int64(test.blockNumber)) + assert.ElementsMatch(t, test.expectedRemainingLogs, logsOnBlock) + }) + } +} diff --git a/core/chains/evm/log/registrations.go b/core/chains/evm/log/registrations.go new file mode 100644 index 00000000..2bc9f506 --- /dev/null +++ b/core/chains/evm/log/registrations.go @@ -0,0 +1,459 @@ +package log + +import ( + "fmt" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// 1. Each listener being registered can specify a custom NumConfirmations - number of block confirmations required for any log being sent to it. +// +// 2. All received logs are kept in an array and deleted ONLY after they are outside the confirmation range for all subscribers +// (when given log height is lower than (latest height - max(highestNumConfirmations, EVM.FinalityDepth)) ) -> see: pool.go +// +// 3. Information about already consumed logs is fetched from the database and used as a filter +// +// 4. The logs are attempted to be sent after every new head arrival: +// Each stored log is checked against every matched listener and is sent unless: +// A) is too young for that listener +// B) matches a log already consumed (via the database information from log_broadcasts table) +// +// A log might be sent multiple times, if a consumer processes logs asynchronously (e.g. via a queue or a Mailbox), in which case the log +// may not be marked as consumed before the next sending operation. That's why customers must still check the state via WasAlreadyConsumed +// before processing the log. +// +// The registrations' methods are NOT thread-safe. +type ( + registrations struct { + // Map only used for invariant checking: + // registeredSubs is used to sanity check adding/removing the exact same subscriber twice + registeredSubs map[*subscriber]struct{} + // Map only used for invariant checking: + // jobIDAddr enforces that no two listeners can share the same jobID and contract address + // This is because log_broadcasts table can only be consumed once and + // assumes one listener per job per log event + jobIDAddrs map[int32]map[common.Address]struct{} + + // handlersByConfs maps numConfirmations => *handler + handlersByConfs map[uint32]*handler + logger logger.SugaredLogger + evmChainID big.Int + + // highest 'NumConfirmations' per all listeners, used to decide about deleting older logs if it's higher than EvmFinalityDepth + // it's: max(listeners.map(l => l.num_confirmations) + highestNumConfirmations uint32 + } + + handler struct { + lookupSubs map[common.Address]map[common.Hash]subscribers // contractAddress => logTopic => *subscriber => topicValueFilters + evmChainID big.Int + logger logger.SugaredLogger + } + + // The Listener responds to log events through HandleLog. + Listener interface { + HandleLog(b Broadcast) + JobID() int32 + } + + // subscribers type for convenience and readability + subscribers map[*subscriber][][]Topic +) + +func newRegistrations(lggr logger.Logger, evmChainID big.Int) *registrations { + return ®istrations{ + registeredSubs: make(map[*subscriber]struct{}), + jobIDAddrs: make(map[int32]map[common.Address]struct{}), + handlersByConfs: make(map[uint32]*handler), + evmChainID: evmChainID, + logger: logger.Sugared(logger.Named(lggr, "Registrations")), + } +} + +func (r *registrations) addSubscriber(sub *subscriber) (needsResubscribe bool) { + if err := r.checkAddSubscriber(sub); err != nil { + r.logger.Panicw(err.Error(), "err", err, "addr", sub.opts.Contract.Hex(), "jobID", sub.listener.JobID()) + } + + r.logger.Tracef("Added subscription %p with job ID %v", sub, sub.listener.JobID()) + + handler, exists := r.handlersByConfs[sub.opts.MinIncomingConfirmations] + if !exists { + handler = newHandler(r.logger, r.evmChainID) + r.handlersByConfs[sub.opts.MinIncomingConfirmations] = handler + } + + needsResubscribe = handler.addSubscriber(sub, r.handlersWithGreaterConfs(sub.opts.MinIncomingConfirmations)) + + // increase the variable for highest number of confirmations among all subscribers, + // if the new subscriber has a higher value + if sub.opts.MinIncomingConfirmations > r.highestNumConfirmations { + r.highestNumConfirmations = sub.opts.MinIncomingConfirmations + } + return +} + +// handlersWithGreaterConfs allows for an optimisation - in the case that we +// are already listening on this topic for a handler with a GREATER +// MinIncomingConfirmations, it is not necessary to subscribe again +func (r *registrations) handlersWithGreaterConfs(confs uint32) (handlersWithGreaterConfs []*handler) { + for hConfs, handler := range r.handlersByConfs { + if hConfs > confs { + handlersWithGreaterConfs = append(handlersWithGreaterConfs, handler) + } + } + return +} + +// checkAddSubscriber registers the subsciber and makes sure we aren't violating any assumptions +// maps modified are only used for checks +func (r *registrations) checkAddSubscriber(sub *subscriber) error { + if sub.opts.MinIncomingConfirmations <= 0 { + return errors.Errorf("LogBroadcaster requires that MinIncomingConfirmations must be at least 1 (got %v). Logs must have been confirmed in at least 1 block, it does not support reading logs from the mempool before they have been mined", sub.opts.MinIncomingConfirmations) + } + + jobID := sub.listener.JobID() + if _, exists := r.registeredSubs[sub]; exists { + return errors.Errorf("Cannot add subscriber %p for job ID %v: already added", sub, jobID) + } + r.registeredSubs[sub] = struct{}{} + addrs, exists := r.jobIDAddrs[jobID] + if !exists { + r.jobIDAddrs[jobID] = make(map[common.Address]struct{}) + } + if _, exists := addrs[sub.opts.Contract]; exists { + return errors.Errorf("Cannot add subscriber %p: only one subscription is allowed per jobID/contract address. There is already a subscription with job ID %v listening on %s", sub, jobID, sub.opts.Contract.Hex()) + } + r.jobIDAddrs[jobID][sub.opts.Contract] = struct{}{} + return nil +} + +func (r *registrations) removeSubscriber(sub *subscriber) (needsResubscribe bool) { + if err := r.checkRemoveSubscriber(sub); err != nil { + r.logger.Panicw(err.Error(), "err", err, "addr", sub.opts.Contract.Hex(), "jobID", sub.listener.JobID()) + } + r.logger.Tracef("Removed subscription %p with job ID %v", sub, sub.listener.JobID()) + + handlers, exists := r.handlersByConfs[sub.opts.MinIncomingConfirmations] + if !exists { + return + } + + needsResubscribe = handlers.removeSubscriber(sub, r.handlersByConfs) + + if len(r.handlersByConfs[sub.opts.MinIncomingConfirmations].lookupSubs) == 0 { + delete(r.handlersByConfs, sub.opts.MinIncomingConfirmations) + r.resetHighestNumConfirmationsValue() + } + + return +} + +// checkRemoveSubscriber deregisters the subscriber and validates we aren't +// violating any assumptions +// maps modified are only used for checks +func (r *registrations) checkRemoveSubscriber(sub *subscriber) error { + jobID := sub.listener.JobID() + if _, exists := r.registeredSubs[sub]; !exists { + return errors.Errorf("Cannot remove subscriber %p for job ID %v: not registered", sub, jobID) + } + delete(r.registeredSubs, sub) + addrs, exists := r.jobIDAddrs[jobID] + if !exists { + return errors.Errorf("Cannot remove subscriber %p: jobIDAddrs was missing job ID %v", sub, jobID) + } + _, exists = addrs[sub.opts.Contract] + if !exists { + return errors.Errorf("Cannot remove subscriber %p: jobIDAddrs was missing address %s", sub, sub.opts.Contract.Hex()) + } + delete(r.jobIDAddrs[jobID], sub.opts.Contract) + if len(r.jobIDAddrs[jobID]) == 0 { + delete(r.jobIDAddrs, jobID) + } + return nil +} + +// reset the number tracking highest num confirmations among all subscribers +func (r *registrations) resetHighestNumConfirmationsValue() { + highestNumConfirmations := uint32(0) + + for numConfirmations := range r.handlersByConfs { + if numConfirmations > highestNumConfirmations { + highestNumConfirmations = numConfirmations + } + } + r.highestNumConfirmations = highestNumConfirmations +} + +func (r *registrations) addressesAndTopics() ([]common.Address, []common.Hash) { + var addresses []common.Address + var topics []common.Hash + for _, sub := range r.handlersByConfs { + add, t := sub.addressesAndTopics() + addresses = append(addresses, add...) + topics = append(topics, t...) + } + return addresses, topics +} + +func (r *registrations) isAddressRegistered(address common.Address) bool { + for _, sub := range r.handlersByConfs { + if sub.isAddressRegistered(address) { + return true + } + } + return false +} + +func (r *registrations) sendLogs(logsToSend []logsOnBlock, latestHead evmtypes.Head, broadcasts []LogBroadcast, bc broadcastCreator) { + broadcastsExisting := make(map[LogBroadcastAsKey]bool) + for _, b := range broadcasts { + broadcastsExisting[b.AsKey()] = b.Consumed + } + + latestBlockNumber := uint64(latestHead.Number) + + for _, logsPerBlock := range logsToSend { + for numConfirmations, handlers := range r.handlersByConfs { + + if numConfirmations != 0 && latestBlockNumber < uint64(numConfirmations) { + // Skipping send because the block is definitely too young + continue + } + + // We attempt the send multiple times per log + // so here we need to see if this particular listener actually should receive it at this depth + isOldEnough := numConfirmations == 0 || (logsPerBlock.BlockNumber+uint64(numConfirmations)-1) <= latestBlockNumber + if !isOldEnough { + continue + } + + for _, log := range logsPerBlock.Logs { + handlers.sendLog(log, latestHead, broadcastsExisting, bc, r.logger) + } + } + } +} + +// Returns true if there is at least one filter value (or no filters at all) that matches an actual received value for every index i, or false otherwise +func filtersContainValues(topicValues []common.Hash, filters [][]Topic) bool { + for i := 0; i < len(topicValues) && i < len(filters); i++ { + filterValues := filters[i] + valueFound := len(filterValues) == 0 // empty filter for given index means: all values allowed + for _, filterValue := range filterValues { + if common.Hash(filterValue) == topicValues[i] { + valueFound = true + break + } + } + if !valueFound { + return false + } + } + return true +} + +func newHandler(lggr logger.SugaredLogger, evmChainID big.Int) *handler { + return &handler{ + lookupSubs: make(map[common.Address]map[common.Hash]subscribers), + evmChainID: evmChainID, + logger: lggr, + } +} + +func (r *handler) addSubscriber(sub *subscriber, handlersWithGreaterConfs []*handler) (needsResubscribe bool) { + addr := sub.opts.Contract + + if sub.opts.MinIncomingConfirmations <= 0 { + r.logger.Panicw(fmt.Sprintf("LogBroadcaster requires that MinIncomingConfirmations must be at least 1 (got %v). Logs must have been confirmed in at least 1 block, it does not support reading logs from the mempool before they have been mined.", sub.opts.MinIncomingConfirmations), "addr", sub.opts.Contract.Hex(), "jobID", sub.listener.JobID()) + } + + if _, exists := r.lookupSubs[addr]; !exists { + r.lookupSubs[addr] = make(map[common.Hash]subscribers) + } + + for topic, topicValueFilters := range sub.opts.LogsWithTopics { + if _, exists := r.lookupSubs[addr][topic]; !exists { + r.logger.Tracef("No existing sub for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + r.lookupSubs[addr][topic] = make(subscribers) + + func() { + if !needsResubscribe { + // NOTE: This is an optimization; if we already have a + // subscription to this addr/topic at a higher + // MinIncomingConfirmations then we don't need to resubscribe + // again since even the worst case lookback is already covered + for _, existingHandler := range handlersWithGreaterConfs { + if _, exists := existingHandler.lookupSubs[addr][topic]; exists { + r.logger.Tracef("Sub already exists for addr %s and topic %s at greater than this MinIncomingConfirmations of %v. Resubscribe is not required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + return + } + } + r.logger.Tracef("No sub exists for addr %s and topic %s at this or greater MinIncomingConfirmations of %v. Resubscribe is required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + needsResubscribe = true + } + }() + } + r.lookupSubs[addr][topic][sub] = topicValueFilters + } + return +} + +func (r *handler) removeSubscriber(sub *subscriber, allHandlers map[uint32]*handler) (needsResubscribe bool) { + addr := sub.opts.Contract + + for topic := range sub.opts.LogsWithTopics { + // OK to panic on missing addr/topic here, since that would be an invariant violation: + // Both addr and topic will always have been added on addSubscriber + // LogsWithTopics should never be mutated + // Only removeSubscriber should ever remove anything from this map + addrTopics, exists := r.lookupSubs[addr] + if !exists { + r.logger.Panicf("AssumptionViolation: expected lookupSubs to contain addr %s for subscriber %p with job ID %v", addr.Hex(), sub, sub.listener.JobID()) + } + topicMap, exists := addrTopics[topic] + if !exists { + r.logger.Panicf("AssumptionViolation: expected addrTopics to contain topic %v for subscriber %p with job ID %v", topic, sub, sub.listener.JobID()) + } + if _, exists = topicMap[sub]; !exists { + r.logger.Panicf("AssumptionViolation: expected topicMap to contain subscriber %p with job ID %v", sub, sub.listener.JobID()) + } + delete(topicMap, sub) + + // cleanup and resubscribe if necessary + if len(topicMap) == 0 { + r.logger.Tracef("No subs left for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + + func() { + if !needsResubscribe { + // NOTE: This is an optimization. Resub not necessary if there + // are still any other handlers listening on this addr/topic. + for confs, otherHandler := range allHandlers { + if confs == sub.opts.MinIncomingConfirmations { + // no need to check ourself, already did this above + continue + } + if _, exists := otherHandler.lookupSubs[addr][topic]; exists { + r.logger.Tracef("Sub still exists for addr %s and topic %s. Resubscribe will not be performed", addr.Hex(), topic.Hex()) + return + } + } + + r.logger.Tracef("No sub exists for addr %s and topic %s. Resubscribe will be performed", addr.Hex(), topic.Hex()) + needsResubscribe = true + } + }() + delete(r.lookupSubs[addr], topic) + } + if len(r.lookupSubs[addr]) == 0 { + delete(r.lookupSubs, addr) + } + } + return +} + +func (r *handler) addressesAndTopics() ([]common.Address, []common.Hash) { + var addresses []common.Address + var topics []common.Hash + for addr := range r.lookupSubs { + addresses = append(addresses, addr) + for topic := range r.lookupSubs[addr] { + topics = append(topics, topic) + } + } + return addresses, topics +} + +func (r *handler) isAddressRegistered(addr common.Address) bool { + _, exists := r.lookupSubs[addr] + return exists +} + +var _ broadcastCreator = &orm{} + +type broadcastCreator interface { + CreateBroadcast(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID int32, pqOpts ...pg.QOpt) error +} + +func (r *handler) sendLog(log types.Log, latestHead evmtypes.Head, + broadcasts map[LogBroadcastAsKey]bool, + bc broadcastCreator, + logger logger.Logger) { + + topic := log.Topics[0] + + latestBlockNumber := uint64(latestHead.Number) + var wg sync.WaitGroup + for sub, filters := range r.lookupSubs[log.Address][topic] { + currentBroadcast := NewLogBroadcastAsKey(log, sub.listener) + consumed, exists := broadcasts[currentBroadcast] + if exists && consumed { + continue + } + + if len(filters) > 0 && len(log.Topics) > 1 { + topicValues := log.Topics[1:] + if !filtersContainValues(topicValues, filters) { + logger.Debugw("Filters did not contain expected topic", + "blockNumber", log.BlockNumber, "blockHash", log.BlockHash, + "address", log.Address, "latestBlockNumber", latestBlockNumber, + "topicValues", topicValues, "filters", topicsToHex(filters)) + continue + } + } + + logCopy := gethwrappers.DeepCopyLog(log) + + var decodedLog generated.AbigenLog + var err error + decodedLog, err = sub.opts.ParseLog(logCopy) + if err != nil { + logger.Errorw("Could not parse contract log", "err", err) + continue + } + + jobID := sub.listener.JobID() + if !exists { + // Create unconsumed broadcast + if err := bc.CreateBroadcast(log.BlockHash, log.BlockNumber, log.Index, jobID); err != nil { + logger.Errorw("Could not create broadcast log", "blockNumber", log.BlockNumber, + "blockHash", log.BlockHash, "address", log.Address, "jobID", jobID, "err", err) + continue + } + } + + logger.Debugw("LogBroadcaster: Sending out log", + "blockNumber", log.BlockNumber, "blockHash", log.BlockHash, + "address", log.Address, "latestBlockNumber", latestBlockNumber, "jobID", jobID) + + // must copy function pointer here since range pointer (sub) may not be + // used in goroutine below + handleLog := sub.listener.HandleLog + wg.Add(1) + go func() { + defer wg.Done() + handleLog(&broadcast{ + latestBlockNumber, + latestHead.Hash, + latestHead.ReceiptsRoot, + latestHead.TransactionsRoot, + latestHead.StateRoot, + decodedLog, + logCopy, + jobID, + r.evmChainID, + }) + }() + } + wg.Wait() +} diff --git a/core/chains/evm/log/registrations_test.go b/core/chains/evm/log/registrations_test.go new file mode 100644 index 00000000..102806ab --- /dev/null +++ b/core/chains/evm/log/registrations_test.go @@ -0,0 +1,250 @@ +package log + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +var _ Listener = testListener{} + +type testListener struct { + jobID int32 +} + +func (tl testListener) JobID() int32 { return tl.jobID } +func (tl testListener) HandleLog(Broadcast) { panic("not implemented") } + +func newTestListener(t *testing.T, jobID int32) testListener { + return testListener{jobID} +} + +func newTestRegistrations(t *testing.T) *registrations { + return newRegistrations(logger.Test(t), *testutils.FixtureChainID) +} + +func newTopic() Topic { + return Topic(utils.NewHash()) +} + +func TestUnit_Registrations_InvariantViolations(t *testing.T) { + l := newTestListener(t, 1) + r := newTestRegistrations(t) + + contractAddr := testutils.NewAddress() + opts := ListenerOpts{Contract: contractAddr, MinIncomingConfirmations: 1} + sub := &subscriber{l, opts} + + r.addSubscriber(sub) + + // Different subscriber same job ID different contract address is ok + subB := &subscriber{l, ListenerOpts{Contract: testutils.NewAddress(), MinIncomingConfirmations: 1}} + r.addSubscriber(subB) + + // Different subscriber same jobID/contract address is not ok + assert.Panics(t, func() { + subError := &subscriber{l, ListenerOpts{Contract: contractAddr, MinIncomingConfirmations: 1}} + + r.addSubscriber(subError) + }) + + l2 := newTestListener(t, 2) + sub2 := &subscriber{l2, opts} + + // Different subscriber different job ID same contract address is ok + r.addSubscriber(sub2) + + // Adding same subscriber twice is not ok + assert.Panics(t, func() { + r.addSubscriber(sub2) + }, "expected adding same subscription twice to panic") + + r.removeSubscriber(sub) + + // Removing subscriber twice also panics + assert.Panics(t, func() { + r.removeSubscriber(sub) + }, "expected removing a subscriber twice to panic") + + // Now we can add it again + r.addSubscriber(sub) +} + +func TestUnit_Registrations_addSubscriber_removeSubscriber(t *testing.T) { + contractAddr := testutils.NewAddress() + r := newTestRegistrations(t) + + l := newTestListener(t, 1) + topic1 := utils.NewHash() + topicValueFilters1 := [][]Topic{{newTopic(), newTopic()}, {newTopic()}, {}} + topic2 := utils.NewHash() + topicValueFilters2 := [][]Topic{{newTopic()}} + topic3 := utils.NewHash() + topicValueFilters3 := [][]Topic{} + logsWithTopics := make(map[common.Hash][][]Topic) + logsWithTopics[topic1] = topicValueFilters1 + logsWithTopics[topic2] = topicValueFilters2 + logsWithTopics[topic3] = topicValueFilters3 + opts := ListenerOpts{Contract: contractAddr, LogsWithTopics: logsWithTopics, MinIncomingConfirmations: 1} + sub := &subscriber{l, opts} + + // same contract, same topics + l2 := newTestListener(t, 2) + opts2 := opts + sub2 := &subscriber{l2, opts2} + + // same contract, different topics + l3 := newTestListener(t, 3) + topic4 := utils.NewHash() + topicValueFilters4 := [][]Topic{{newTopic()}} + logsWithTopics3 := make(map[common.Hash][][]Topic) + logsWithTopics3[topic4] = topicValueFilters4 + opts3 := opts + opts3.LogsWithTopics = logsWithTopics3 + sub3 := &subscriber{l3, opts3} + + // same contract, same topics, greater MinIncomingConfirmations + l4 := newTestListener(t, 4) + opts4 := opts3 + opts4.MinIncomingConfirmations = 42 + sub4 := &subscriber{l4, opts4} + + // same contract, same topics, midrange MinIncomingConfirmations + l5 := newTestListener(t, 5) + opts5 := opts3 + opts5.MinIncomingConfirmations = 21 + sub5 := &subscriber{l5, opts5} + + t.Run("addSubscriber", func(t *testing.T) { + needsResub := r.addSubscriber(sub) + assert.True(t, needsResub) + + // same contract, same topics + needsResub = r.addSubscriber(sub2) + assert.False(t, needsResub) + + // same contract, different topics + needsResub = r.addSubscriber(sub3) + assert.True(t, needsResub) + + assert.Equal(t, 1, int(r.highestNumConfirmations)) + + // same contract, same topics, different MinIncomingConfirmations + needsResub = r.addSubscriber(sub4) + // resub required because confirmations went higher + assert.True(t, needsResub) + assert.Equal(t, 42, int(r.highestNumConfirmations)) + + // same contract, same topics, midrange MinIncomingConfirmations + needsResub = r.addSubscriber(sub5) + // resub NOT required because confirmations is lower than the highest + assert.False(t, needsResub) + assert.Equal(t, 42, int(r.highestNumConfirmations)) + + assert.Len(t, r.registeredSubs, 5) + assert.Contains(t, r.registeredSubs, sub) + assert.Contains(t, r.registeredSubs, sub2) + assert.Contains(t, r.registeredSubs, sub3) + assert.Contains(t, r.registeredSubs, sub4) + assert.Contains(t, r.registeredSubs, sub5) + + assert.Len(t, r.handlersByConfs, 3) + require.Contains(t, r.handlersByConfs, uint32(1)) + require.Contains(t, r.handlersByConfs, uint32(21)) + require.Contains(t, r.handlersByConfs, uint32(42)) + + // contractAddress => logTopic => Listener + handlers1 := r.handlersByConfs[1].lookupSubs + assert.Len(t, handlers1, 1) + assert.Contains(t, handlers1, contractAddr) + h1 := handlers1[contractAddr] + // 4 topics on this contract addr + assert.Len(t, h1, 4) + assert.Contains(t, h1, topic1) + assert.Contains(t, h1, topic2) + assert.Contains(t, h1, topic3) + assert.Contains(t, h1, topic4) + // topics map to their subscribers + assert.Len(t, h1[topic1], 2) // listeners 1 and 2 + assert.Contains(t, h1[topic1], sub) + assert.Contains(t, h1[topic1], sub2) + assert.Len(t, h1[topic2], 2) // listeners 1 and 2 + assert.Contains(t, h1[topic2], sub) + assert.Contains(t, h1[topic2], sub2) + assert.Len(t, h1[topic3], 2) // listeners 1 and 2 + assert.Contains(t, h1[topic3], sub) + assert.Contains(t, h1[topic3], sub2) + assert.Len(t, h1[topic4], 1) // listener 3 + assert.Contains(t, h1[topic4], sub3) + + handlers42 := r.handlersByConfs[42].lookupSubs + assert.Len(t, handlers42, 1) + assert.Contains(t, handlers1, contractAddr) + h42 := handlers42[contractAddr] + // 1 topic on this contract addr + assert.Len(t, h42, 1) + assert.Contains(t, h1, topic4) + // topic maps to its subscriber + assert.Len(t, h42[topic4], 1) // listener 4 + assert.Contains(t, h42[topic4], sub4) + + handlers21 := r.handlersByConfs[21].lookupSubs + assert.Len(t, handlers21, 1) + assert.Contains(t, handlers1, contractAddr) + h21 := handlers21[contractAddr] + // 1 topic on this contract addr + assert.Len(t, h21, 1) + assert.Contains(t, h1, topic4) + // topic maps to its subscriber + assert.Len(t, h21[topic4], 1) // listener 5 + assert.Contains(t, h21[topic4], sub5) + }) + + t.Run("removeSubscriber", func(t *testing.T) { + needsResub := r.removeSubscriber(sub) + // No resub necessary: sub2 also needs all these topics + assert.False(t, needsResub) + + assert.Len(t, r.registeredSubs, 4) + assert.NotContains(t, r.registeredSubs, sub) + assert.Contains(t, r.registeredSubs, sub2) + assert.Contains(t, r.registeredSubs, sub3) + assert.Contains(t, r.registeredSubs, sub4) + assert.Contains(t, r.registeredSubs, sub5) + + needsResub = r.removeSubscriber(sub2) + // sub2 has topics in it that other subs don't cover + assert.True(t, needsResub) + assert.Len(t, r.registeredSubs, 3) + assert.NotContains(t, r.registeredSubs, sub2) + assert.Contains(t, r.registeredSubs, sub3) + assert.Contains(t, r.registeredSubs, sub4) + assert.Contains(t, r.registeredSubs, sub4) + + needsResub = r.removeSubscriber(sub3) + // sub5 and sub4 cover everything that sub3 does already, resub not necessary + assert.False(t, needsResub) + assert.Len(t, r.registeredSubs, 2) + assert.NotContains(t, r.registeredSubs, sub3) + assert.Contains(t, r.registeredSubs, sub4) + assert.Contains(t, r.registeredSubs, sub4) + + needsResub = r.removeSubscriber(sub4) + // sub5 covers everything that sub4 does already, resub not necessary + assert.False(t, needsResub) + assert.Len(t, r.registeredSubs, 1) + assert.NotContains(t, r.registeredSubs, sub4) + assert.Contains(t, r.registeredSubs, sub5) + + needsResub = r.removeSubscriber(sub5) + // Nothing left, need to refresh subscriptions + assert.True(t, needsResub) + assert.Len(t, r.registeredSubs, 0) + }) +} diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go new file mode 100644 index 00000000..adf6491f --- /dev/null +++ b/core/chains/evm/logpoller/disabled.go @@ -0,0 +1,112 @@ +package logpoller + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + ErrDisabled = errors.New("log poller disabled") + LogPollerDisabled LogPoller = disabled{} +) + +type disabled struct{} + +func (disabled) Name() string { return "disabledLogPoller" } + +func (disabled) Start(ctx context.Context) error { return ErrDisabled } + +func (disabled) Close() error { return ErrDisabled } + +func (disabled) Ready() error { return ErrDisabled } + +func (disabled) HealthReport() map[string]error { + return map[string]error{"disabledLogPoller": ErrDisabled} +} + +func (disabled) Replay(ctx context.Context, fromBlock int64) error { return ErrDisabled } + +func (disabled) ReplayAsync(fromBlock int64) {} + +func (disabled) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { return ErrDisabled } + +func (disabled) UnregisterFilter(name string, qopts ...pg.QOpt) error { return ErrDisabled } + +func (disabled) HasFilter(name string) bool { return false } + +func (disabled) LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) { + return LogPollerBlock{}, ErrDisabled +} + +func (disabled) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { + return nil, ErrDisabled +} + +func (disabled) Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { + return nil, ErrDisabled +} + +func (disabled) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (d disabled) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (disabled) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (d disabled) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (d disabled) LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} + +func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { + return 0, ErrDisabled +} + +func (d disabled) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return nil, ErrDisabled +} diff --git a/core/chains/evm/logpoller/doc.go b/core/chains/evm/logpoller/doc.go new file mode 100644 index 00000000..81c6cecf --- /dev/null +++ b/core/chains/evm/logpoller/doc.go @@ -0,0 +1,22 @@ +// Package logpoller is a service for querying EVM log data. +// +// It can be thought of as a more performant and sophisticated version +// of eth_getLogs https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getlogs. +// Having a local table of relevant, continually canonical logs allows us to 2 main advantages: +// - Have hundreds of jobs/clients querying for logs without overloading the underlying RPC provider. +// - Do more sophisticated querying (filter by confirmations/time/log contents, efficiently join between the logs table +// and other tables on the node, etc.) +// +// Guarantees provided by the poller: +// - Queries always return the logs from the _current_ canonical chain (same as eth_getLogs). In particular +// that means that querying unfinalized logs may change between queries but finalized logs remain stable. +// The threshold between unfinalized and finalized logs is the finalityDepth parameter, chosen such that with +// exceedingly high probability logs finalityDepth deep cannot be reorged. +// - After calling RegisterFilter with a particular event, it will never miss logs for that event +// despite node crashes and reorgs. The granularity of the filter is always at least one block (more when backfilling). +// - Old logs stored in the db will only be deleted if all filters matching them have explicit retention periods set, and all +// of them have expired. Default retention of 0 on any matching filter guarantees permanent retention. +// - After calling Replay(fromBlock), all blocks including that one to the latest chain tip will be polled +// with the current filter. This can be used on first time job add to specify a start block from which you wish to capture +// existing logs. +package logpoller diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go new file mode 100644 index 00000000..3b3b8d55 --- /dev/null +++ b/core/chains/evm/logpoller/helper_test.go @@ -0,0 +1,113 @@ +package logpoller_test + +import ( + "context" + "database/sql" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + EmitterABI, _ = abi.JSON(strings.NewReader(log_emitter.LogEmitterABI)) +) + +type TestHarness struct { + Lggr logger.Logger + // Chain2/ORM2 is just a dummy second chain, doesn't have a client. + ChainID, ChainID2 *big.Int + ORM, ORM2 *logpoller.DbORM + LogPoller logpoller.LogPollerTest + Client *backends.SimulatedBackend + Owner *bind.TransactOpts + Emitter1, Emitter2 *log_emitter.LogEmitter + EmitterAddress1, EmitterAddress2 common.Address + EthDB ethdb.Database +} + +func SetupTH(t testing.TB, useFinalityTag bool, finalityDepth, backfillBatchSize, rpcBatchSize, keepFinalizedBlocksDepth int64) TestHarness { + lggr := logger.Test(t) + chainID := testutils.NewRandomEVMChainID() + chainID2 := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + + o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + o2 := logpoller.NewORM(chainID2, db, lggr, pgtest.NewQConfig(true)) + owner := testutils.MustNewSimTransactor(t) + ethDB := rawdb.NewMemoryDatabase() + ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + // Poll period doesn't matter, we intend to call poll and save logs directly in the test. + // Set it to some insanely high value to not interfere with any tests. + esc := client.NewSimulatedBackendClient(t, ec, chainID) + // Mark genesis block as finalized to avoid any nulls in the tests + head := esc.Backend().Blockchain().CurrentHeader() + esc.Backend().Blockchain().SetFinalized(head) + lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, useFinalityTag, finalityDepth, backfillBatchSize, rpcBatchSize, keepFinalizedBlocksDepth) + emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + emitterAddress2, _, emitter2, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + ec.Commit() + return TestHarness{ + Lggr: lggr, + ChainID: chainID, + ChainID2: chainID2, + ORM: o, + ORM2: o2, + LogPoller: lp, + Client: ec, + Owner: owner, + Emitter1: emitter1, + Emitter2: emitter2, + EmitterAddress1: emitterAddress1, + EmitterAddress2: emitterAddress2, + EthDB: ethDB, + } +} + +func (th *TestHarness) PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) int64 { + th.LogPoller.PollAndSaveLogs(ctx, currentBlockNumber) + latest, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(ctx)) + return latest.BlockNumber + 1 +} + +func (th *TestHarness) assertDontHave(t *testing.T, start, end int) { + for i := start; i < end; i++ { + _, err := th.ORM.SelectBlockByNumber(int64(i)) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + } +} + +func (th *TestHarness) assertHaveCanonical(t *testing.T, start, end int) { + for i := start; i < end; i++ { + blk, err := th.ORM.SelectBlockByNumber(int64(i)) + require.NoError(t, err, "block %v", i) + chainBlk, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(int64(i))) + require.NoError(t, err) + assert.Equal(t, chainBlk.Hash().Bytes(), blk.BlockHash.Bytes(), "block %v", i) + } +} diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go new file mode 100644 index 00000000..268b6cac --- /dev/null +++ b/core/chains/evm/logpoller/log_poller.go @@ -0,0 +1,1196 @@ +package logpoller + +import ( + "bytes" + "context" + "database/sql" + "encoding/binary" + "fmt" + "math/big" + "sort" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "golang.org/x/exp/maps" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name LogPoller --output ./mocks/ --case=underscore --structname LogPoller --filename log_poller.go +type LogPoller interface { + services.Service + Replay(ctx context.Context, fromBlock int64) error + ReplayAsync(fromBlock int64) + RegisterFilter(filter Filter, qopts ...pg.QOpt) error + UnregisterFilter(name string, qopts ...pg.QOpt) error + HasFilter(name string) bool + LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) + GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) + + // General querying + Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) + LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) + LogsCreatedAfter(eventSig common.Hash, address common.Address, time time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) + LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) + + // Content based querying + IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) +} + +type Confirmations int + +const ( + Finalized = Confirmations(-1) + Unconfirmed = Confirmations(0) +) + +type LogPollerTest interface { + LogPoller + PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) + BackupPollAndSaveLogs(ctx context.Context, backupPollerBlockDelay int64) + Filter(from, to *big.Int, bh *common.Hash) ethereum.FilterQuery + GetReplayFromBlock(ctx context.Context, requested int64) (int64, error) + PruneOldBlocks(ctx context.Context) error +} + +type Client interface { + HeadByNumber(ctx context.Context, n *big.Int) (*evmtypes.Head, error) + HeadByHash(ctx context.Context, n common.Hash) (*evmtypes.Head, error) + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + ConfiguredChainID() *big.Int +} + +var ( + _ LogPollerTest = &logPoller{} + ErrReplayRequestAborted = errors.New("aborted, replay request cancelled") + ErrReplayInProgress = errors.New("replay request cancelled, but replay is already in progress") + ErrLogPollerShutdown = errors.New("replay aborted due to log poller shutdown") +) + +type logPoller struct { + services.StateMachine + ec Client + orm ORM + lggr logger.SugaredLogger + pollPeriod time.Duration // poll period set by block production rate + useFinalityTag bool // indicates whether logPoller should use chain's finality or pick a fixed depth for finality + finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized. If `useFinalityTag` is set to true, this value is ignored, because finalityDepth is fetched from chain + keepFinalizedBlocksDepth int64 // the number of blocks behind the last finalized block we keep in database + backfillBatchSize int64 // batch size to use when backfilling finalized logs + rpcBatchSize int64 // batch size to use for fallback RPC calls made in GetBlocks + backupPollerNextBlock int64 + + filterMu sync.RWMutex + filters map[string]Filter + filterDirty bool + cachedAddresses []common.Address + cachedEventSigs []common.Hash + + replayStart chan int64 + replayComplete chan error + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +// NewLogPoller creates a log poller. Note there is an assumption +// that blocks can be processed faster than they are produced for the given chain, or the poller will fall behind. +// Block processing involves the following calls in steady state (without reorgs): +// - eth_getBlockByNumber - headers only (transaction hashes, not full transaction objects), +// - eth_getLogs - get the logs for the block +// - 1 db read latest block - for checking reorgs +// - 1 db tx including block write and logs write to logs. +// +// How fast that can be done depends largely on network speed and DB, but even for the fastest +// support chain, polygon, which has 2s block times, we need RPCs roughly with <= 500ms latency +func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Duration, + useFinalityTag bool, finalityDepth int64, backfillBatchSize int64, rpcBatchSize int64, keepFinalizedBlocksDepth int64) *logPoller { + ctx, cancel := context.WithCancel(context.Background()) + return &logPoller{ + ctx: ctx, + cancel: cancel, + ec: ec, + orm: orm, + lggr: logger.Sugared(logger.Named(lggr, "LogPoller")), + replayStart: make(chan int64), + replayComplete: make(chan error), + pollPeriod: pollPeriod, + finalityDepth: finalityDepth, + useFinalityTag: useFinalityTag, + backfillBatchSize: backfillBatchSize, + rpcBatchSize: rpcBatchSize, + keepFinalizedBlocksDepth: keepFinalizedBlocksDepth, + filters: make(map[string]Filter), + filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet. + } +} + +type Filter struct { + Name string // see FilterName(id, args) below + EventSigs evmtypes.HashArray + Addresses evmtypes.AddressArray + Retention time.Duration +} + +// FilterName is a suggested convenience function for clients to construct unique filter names +// to populate Name field of struct Filter +func FilterName(id string, args ...any) string { + if len(args) == 0 { + return id + } + s := &strings.Builder{} + s.WriteString(id) + s.WriteString(" - ") + fmt.Fprintf(s, "%s", args[0]) + for _, a := range args[1:] { + fmt.Fprintf(s, ":%s", a) + } + return s.String() +} + +// Contains returns true if this filter already fully Contains a +// filter passed to it. +func (filter *Filter) Contains(other *Filter) bool { + if other == nil { + return true + } + addresses := make(map[common.Address]interface{}) + for _, addr := range filter.Addresses { + addresses[addr] = struct{}{} + } + events := make(map[common.Hash]interface{}) + for _, ev := range filter.EventSigs { + events[ev] = struct{}{} + } + + for _, addr := range other.Addresses { + if _, ok := addresses[addr]; !ok { + return false + } + } + for _, ev := range other.EventSigs { + if _, ok := events[ev]; !ok { + return false + } + } + return true +} + +// RegisterFilter adds the provided EventSigs and Addresses to the log poller's log filter query. +// If any eventSig is emitted from any address, it will be captured by the log poller. +// If an event matching any of the given event signatures is emitted from any of the provided Addresses, +// the log poller will pick those up and save them. For topic specific queries see content based querying. +// Clients may choose to MergeFilter and then Replay in order to ensure desired logs are present. +// NOTE: due to constraints of the eth filter, there is "leakage" between successive MergeFilter calls, for example +// +// RegisterFilter(event1, addr1) +// RegisterFilter(event2, addr2) +// +// will result in the poller saving (event1, addr2) or (event2, addr1) as well, should it exist. +// Generally speaking this is harmless. We enforce that EventSigs and Addresses are non-empty, +// which means that anonymous events are not supported and log.Topics >= 1 always (log.Topics[0] is the event signature). +// The filter may be unregistered later by Filter.Name +// Warnings/debug information is keyed by filter name. +func (lp *logPoller) RegisterFilter(filter Filter, qopts ...pg.QOpt) error { + if len(filter.Addresses) == 0 { + return errors.Errorf("at least one address must be specified") + } + if len(filter.EventSigs) == 0 { + return errors.Errorf("at least one event must be specified") + } + + for _, eventSig := range filter.EventSigs { + if eventSig == [common.HashLength]byte{} { + return errors.Errorf("empty event sig") + } + } + for _, addr := range filter.Addresses { + if addr == [common.AddressLength]byte{} { + return errors.Errorf("empty address") + } + } + + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + + if existingFilter, ok := lp.filters[filter.Name]; ok { + if existingFilter.Contains(&filter) { + // Nothing new in this Filter + lp.lggr.Warnw("Filter already present, no-op", "name", filter.Name, "filter", filter) + return nil + } + lp.lggr.Warnw("Updating existing filter with more events or addresses", "name", filter.Name, "filter", filter) + } + + if err := lp.orm.InsertFilter(filter, qopts...); err != nil { + return errors.Wrap(err, "error inserting filter") + } + lp.filters[filter.Name] = filter + lp.filterDirty = true + return nil +} + +// UnregisterFilter will remove the filter with the given name. +// If the name does not exist, it will log an error but not return an error. +// Warnings/debug information is keyed by filter name. +func (lp *logPoller) UnregisterFilter(name string, qopts ...pg.QOpt) error { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + + _, ok := lp.filters[name] + if !ok { + lp.lggr.Warnw("Filter not found", "name", name) + return nil + } + + if err := lp.orm.DeleteFilter(name, qopts...); err != nil { + return errors.Wrap(err, "error deleting filter") + } + delete(lp.filters, name) + lp.filterDirty = true + return nil +} + +// HasFilter returns true if the log poller has an active filter with the given name. +func (lp *logPoller) HasFilter(name string) bool { + lp.filterMu.RLock() + defer lp.filterMu.RUnlock() + + _, ok := lp.filters[name] + return ok +} + +func (lp *logPoller) Filter(from, to *big.Int, bh *common.Hash) ethereum.FilterQuery { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + if !lp.filterDirty { + return ethereum.FilterQuery{FromBlock: from, ToBlock: to, BlockHash: bh, Topics: [][]common.Hash{lp.cachedEventSigs}, Addresses: lp.cachedAddresses} + } + var ( + addresses []common.Address + eventSigs []common.Hash + addressMp = make(map[common.Address]struct{}) + eventSigMp = make(map[common.Hash]struct{}) + ) + // Merge filters. + for _, filter := range lp.filters { + for _, addr := range filter.Addresses { + addressMp[addr] = struct{}{} + } + for _, eventSig := range filter.EventSigs { + eventSigMp[eventSig] = struct{}{} + } + } + for addr := range addressMp { + addresses = append(addresses, addr) + } + sort.Slice(addresses, func(i, j int) bool { + return bytes.Compare(addresses[i][:], addresses[j][:]) < 0 + }) + for eventSig := range eventSigMp { + eventSigs = append(eventSigs, eventSig) + } + sort.Slice(eventSigs, func(i, j int) bool { + return bytes.Compare(eventSigs[i][:], eventSigs[j][:]) < 0 + }) + if len(eventSigs) == 0 && len(addresses) == 0 { + // If no filter specified, ignore everything. + // This allows us to keep the log poller up and running with no filters present (e.g. no jobs on the node), + // then as jobs are added dynamically start using their filters. + addresses = []common.Address{common.HexToAddress("0x0000000000000000000000000000000000000000")} + eventSigs = []common.Hash{} + } + lp.cachedAddresses = addresses + lp.cachedEventSigs = eventSigs + lp.filterDirty = false + return ethereum.FilterQuery{FromBlock: from, ToBlock: to, BlockHash: bh, Topics: [][]common.Hash{eventSigs}, Addresses: addresses} +} + +// Replay signals that the poller should resume from a new block. +// Blocks until the replay is complete. +// Replay can be used to ensure that filter modification has been applied for all blocks from "fromBlock" up to latest. +// If ctx is cancelled before the replay request has been initiated, ErrReplayRequestAborted is returned. If the replay +// is already in progress, the replay will continue and ErrReplayInProgress will be returned. If the client needs a +// guarantee that the replay is complete before proceeding, it should either avoid cancelling or retry until nil is returned +func (lp *logPoller) Replay(ctx context.Context, fromBlock int64) error { + lp.lggr.Debugf("Replaying from block %d", fromBlock) + latest, err := lp.ec.HeadByNumber(ctx, nil) + if err != nil { + return err + } + if fromBlock < 1 || fromBlock > latest.Number { + return errors.Errorf("Invalid replay block number %v, acceptable range [1, %v]", fromBlock, latest.Number) + } + // Block until replay notification accepted or cancelled. + select { + case lp.replayStart <- fromBlock: + case <-ctx.Done(): + return errors.Wrap(ErrReplayRequestAborted, ctx.Err().Error()) + } + // Block until replay complete or cancelled. + select { + case err = <-lp.replayComplete: + return err + case <-ctx.Done(): + // Note: this will not abort the actual replay, it just means the client gave up on waiting for it to complete + lp.wg.Add(1) + go lp.recvReplayComplete() + return ErrReplayInProgress + } +} + +func (lp *logPoller) recvReplayComplete() { + err := <-lp.replayComplete + if err != nil { + lp.lggr.Error(err) + } + lp.wg.Done() +} + +// Asynchronous wrapper for Replay() +func (lp *logPoller) ReplayAsync(fromBlock int64) { + lp.wg.Add(1) + go func() { + if err := lp.Replay(lp.ctx, fromBlock); err != nil { + lp.lggr.Error(err) + } + lp.wg.Done() + }() +} + +func (lp *logPoller) Start(context.Context) error { + return lp.StartOnce("LogPoller", func() error { + lp.wg.Add(1) + go lp.run() + return nil + }) +} + +func (lp *logPoller) Close() error { + return lp.StopOnce("LogPoller", func() error { + select { + case lp.replayComplete <- ErrLogPollerShutdown: + default: + } + lp.cancel() + lp.wg.Wait() + return nil + }) +} + +func (lp *logPoller) Name() string { + return lp.lggr.Name() +} + +func (lp *logPoller) HealthReport() map[string]error { + return map[string]error{lp.Name(): lp.Healthy()} +} + +func (lp *logPoller) GetReplayFromBlock(ctx context.Context, requested int64) (int64, error) { + lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + // Real DB error + return 0, err + } + // Nothing in db, use requested + return requested, nil + } + // We have lastProcessed, take min(requested, lastProcessed). + // This is to avoid replaying from a block later than what we have in the DB + // and skipping blocks. + return mathutil.Min(requested, lastProcessed.BlockNumber), nil +} + +func (lp *logPoller) run() { + defer lp.wg.Done() + logPollTick := time.After(0) + // stagger these somewhat, so they don't all run back-to-back + backupLogPollTick := time.After(100 * time.Millisecond) + blockPruneTick := time.After(3 * time.Second) + logPruneTick := time.After(5 * time.Second) + filtersLoaded := false + + loadFilters := func() error { + lp.filterMu.Lock() + defer lp.filterMu.Unlock() + filters, err := lp.orm.LoadFilters(pg.WithParentCtx(lp.ctx)) + + if err != nil { + return errors.Wrapf(err, "Failed to load initial filters from db, retrying") + } + + lp.filters = filters + lp.filterDirty = true + filtersLoaded = true + return nil + } + + for { + select { + case <-lp.ctx.Done(): + return + case fromBlockReq := <-lp.replayStart: + fromBlock, err := lp.GetReplayFromBlock(lp.ctx, fromBlockReq) + if err == nil { + if !filtersLoaded { + lp.lggr.Warnw("Received replayReq before filters loaded", "fromBlock", fromBlock, "requested", fromBlockReq) + if err = loadFilters(); err != nil { + lp.lggr.Errorw("Failed loading filters during Replay", "err", err, "fromBlock", fromBlock) + } + } + if err == nil { + // Serially process replay requests. + lp.lggr.Infow("Executing replay", "fromBlock", fromBlock, "requested", fromBlockReq) + lp.PollAndSaveLogs(lp.ctx, fromBlock) + lp.lggr.Infow("Executing replay finished", "fromBlock", fromBlock, "requested", fromBlockReq) + } + } else { + lp.lggr.Errorw("Error executing replay, could not get fromBlock", "err", err) + } + select { + case <-lp.ctx.Done(): + // We're shutting down, notify client and exit + select { + case lp.replayComplete <- ErrReplayRequestAborted: + default: + } + return + case lp.replayComplete <- err: + } + case <-logPollTick: + logPollTick = time.After(utils.WithJitter(lp.pollPeriod)) + if !filtersLoaded { + if err := loadFilters(); err != nil { + lp.lggr.Errorw("Failed loading filters in main logpoller loop, retrying later", "err", err) + continue + } + } + + // Always start from the latest block in the db. + var start int64 + lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(lp.ctx)) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + // Assume transient db reading issue, retry forever. + lp.lggr.Errorw("unable to get starting block", "err", err) + continue + } + // Otherwise this is the first poll _ever_ on a new chain. + // Only safe thing to do is to start at the first finalized block. + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(lp.ctx) + if err != nil { + lp.lggr.Warnw("Unable to get latest for first poll", "err", err) + continue + } + // Do not support polling chains which don't even have finality depth worth of blocks. + // Could conceivably support this but not worth the effort. + // Need last finalized block number to be higher than 0 + if latestFinalizedBlockNumber <= 0 { + lp.lggr.Warnw("Insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latestBlock.Number) + continue + } + // Starting at the first finalized block. We do not backfill the first finalized block. + start = latestFinalizedBlockNumber + } else { + start = lastProcessed.BlockNumber + 1 + } + lp.PollAndSaveLogs(lp.ctx, start) + case <-backupLogPollTick: + // Backup log poller: this serves as an emergency backup to protect against eventual-consistency behavior + // of an rpc node (seen occasionally on optimism, but possibly could happen on other chains?). If the first + // time we request a block, no logs or incomplete logs come back, this ensures that every log is eventually + // re-requested after it is finalized. This doesn't add much overhead, because we can request all of them + // in one shot, since we don't need to worry about re-orgs after finality depth, and it runs 100x less + // frequently than the primary log poller. + + // If pollPeriod is set to 1 block time, backup log poller will run once every 100 blocks + const backupPollerBlockDelay = 100 + + backupLogPollTick = time.After(utils.WithJitter(backupPollerBlockDelay * lp.pollPeriod)) + if !filtersLoaded { + lp.lggr.Warnw("Backup log poller ran before filters loaded, skipping") + continue + } + lp.BackupPollAndSaveLogs(lp.ctx, backupPollerBlockDelay) + case <-blockPruneTick: + blockPruneTick = time.After(utils.WithJitter(lp.pollPeriod * 1000)) + if err := lp.PruneOldBlocks(lp.ctx); err != nil { + lp.lggr.Errorw("Unable to prune old blocks", "err", err) + } + case <-logPruneTick: + logPruneTick = time.After(utils.WithJitter(lp.pollPeriod * 2401)) // = 7^5 avoids common factors with 1000 + if err := lp.orm.DeleteExpiredLogs(pg.WithParentCtx(lp.ctx)); err != nil { + lp.lggr.Error(err) + } + } + } +} + +func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context, backupPollerBlockDelay int64) { + if lp.backupPollerNextBlock == 0 { + lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + lp.lggr.Warnw("Backup log poller ran before first successful log poller run, skipping") + } else { + lp.lggr.Errorw("Backup log poller unable to get starting block", "err", err) + } + return + } + // If this is our first run, start from block min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) + backupStartBlock := mathutil.Min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) + // (or at block 0 if whole blockchain is too short) + lp.backupPollerNextBlock = mathutil.Max(backupStartBlock, 0) + } + + _, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) + if err != nil { + lp.lggr.Warnw("Backup logpoller failed to get latest block", "err", err) + return + } + + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 + if lastSafeBackfillBlock >= lp.backupPollerNextBlock { + lp.lggr.Infow("Backup poller started backfilling logs", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock) + if err = lp.backfill(ctx, lp.backupPollerNextBlock, lastSafeBackfillBlock); err != nil { + // If there's an error backfilling, we can just return and retry from the last block saved + // since we don't save any blocks on backfilling. We may re-insert the same logs but thats ok. + lp.lggr.Warnw("Backup poller failed", "err", err) + return + } + lp.lggr.Infow("Backup poller finished backfilling", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock) + lp.backupPollerNextBlock = lastSafeBackfillBlock + 1 + } +} + +// convertLogs converts an array of geth logs ([]type.Log) to an array of logpoller logs ([]Log) +// +// Block timestamps are extracted from blocks param. If len(blocks) == 1, the same timestamp from this block +// will be used for all logs. If len(blocks) == len(logs) then the block number of each block is used for the +// corresponding log. Any other length for blocks is invalid. +func convertLogs(logs []types.Log, blocks []LogPollerBlock, lggr logger.Logger, chainID *big.Int) []Log { + var lgs []Log + blockTimestamp := time.Now() + if len(logs) == 0 { + return lgs + } + if len(blocks) != 1 && len(blocks) != len(logs) { + lggr.Errorf("AssumptionViolation: invalid params passed to convertLogs, length of blocks must either be 1 or match length of logs") + return lgs + } + + for i, l := range logs { + if i == 0 || len(blocks) == len(logs) { + blockTimestamp = blocks[i].BlockTimestamp + } + lgs = append(lgs, Log{ + EvmChainId: ubig.New(chainID), + LogIndex: int64(l.Index), + BlockHash: l.BlockHash, + // We assume block numbers fit in int64 + // in many places. + BlockNumber: int64(l.BlockNumber), + BlockTimestamp: blockTimestamp, + EventSig: l.Topics[0], // First topic is always event signature. + Topics: convertTopics(l.Topics), + Address: l.Address, + TxHash: l.TxHash, + Data: l.Data, + }) + } + return lgs +} + +func convertTopics(topics []common.Hash) [][]byte { + var topicsForDB [][]byte + for _, t := range topics { + topicsForDB = append(topicsForDB, t.Bytes()) + } + return topicsForDB +} + +func (lp *logPoller) blocksFromLogs(ctx context.Context, logs []types.Log) (blocks []LogPollerBlock, err error) { + var numbers []uint64 + for _, log := range logs { + numbers = append(numbers, log.BlockNumber) + } + + return lp.GetBlocksRange(ctx, numbers) +} + +const jsonRpcLimitExceeded = -32005 // See https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1474.md + +// backfill will query FilterLogs in batches for logs in the +// block range [start, end] and save them to the db. +// Retries until ctx cancelled. Will return an error if cancelled +// or if there is an error backfilling. +func (lp *logPoller) backfill(ctx context.Context, start, end int64) error { + batchSize := lp.backfillBatchSize + for from := start; from <= end; from += batchSize { + to := mathutil.Min(from+batchSize-1, end) + gethLogs, err := lp.ec.FilterLogs(ctx, lp.Filter(big.NewInt(from), big.NewInt(to), nil)) + if err != nil { + var rpcErr client.JsonError + if errors.As(err, &rpcErr) { + if rpcErr.Code != jsonRpcLimitExceeded { + lp.lggr.Errorw("Unable to query for logs", "err", err, "from", from, "to", to) + return err + } + } + if batchSize == 1 { + lp.lggr.Criticalw("Too many log results in a single block, failed to retrieve logs! Node may be running in a degraded state.", "err", err, "from", from, "to", to, "LogBackfillBatchSize", lp.backfillBatchSize) + return err + } + batchSize /= 2 + lp.lggr.Warnw("Too many log results, halving block range batch size. Consider increasing LogBackfillBatchSize if this happens frequently", "err", err, "from", from, "to", to, "newBatchSize", batchSize, "LogBackfillBatchSize", lp.backfillBatchSize) + from -= batchSize // counteract +=batchSize on next loop iteration, so starting block does not change + continue + } + if len(gethLogs) == 0 { + continue + } + blocks, err := lp.blocksFromLogs(ctx, gethLogs) + if err != nil { + return err + } + + lp.lggr.Debugw("Backfill found logs", "from", from, "to", to, "logs", len(gethLogs), "blocks", blocks) + err = lp.orm.InsertLogsWithBlock(convertLogs(gethLogs, blocks, lp.lggr, lp.ec.ConfiguredChainID()), blocks[len(blocks)-1], pg.WithParentCtx(ctx)) + if err != nil { + lp.lggr.Warnw("Unable to insert logs, retrying", "err", err, "from", from, "to", to) + return err + } + } + return nil +} + +// getCurrentBlockMaybeHandleReorg accepts a block number +// and will return that block if its parent points to our last saved block. +// One can optionally pass the block header if it has already been queried to avoid an extra RPC call. +// If its parent does not point to our last saved block we know a reorg has occurred, +// so we: +// 1. Find the LCA by following parent hashes. +// 2. Delete all logs and blocks after the LCA +// 3. Return the LCA+1, i.e. our new current (unprocessed) block. +func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, currentBlockNumber int64, currentBlock *evmtypes.Head) (*evmtypes.Head, error) { + var err1 error + if currentBlock == nil { + // If we don't have the current block already, lets get it. + currentBlock, err1 = lp.ec.HeadByNumber(ctx, big.NewInt(currentBlockNumber)) + if err1 != nil { + lp.lggr.Warnw("Unable to get currentBlock", "err", err1, "currentBlockNumber", currentBlockNumber) + return nil, err1 + } + // Additional sanity checks, don't necessarily trust the RPC. + if currentBlock == nil { + lp.lggr.Errorf("Unexpected nil block from RPC", "currentBlockNumber", currentBlockNumber) + return nil, errors.Errorf("Got nil block for %d", currentBlockNumber) + } + if currentBlock.Number != currentBlockNumber { + lp.lggr.Warnw("Unable to get currentBlock, rpc returned incorrect block", "currentBlockNumber", currentBlockNumber, "got", currentBlock.Number) + return nil, errors.Errorf("Block mismatch have %d want %d", currentBlock.Number, currentBlockNumber) + } + } + // Does this currentBlock point to the same parent that we have saved? + // If not, there was a reorg, so we need to rewind. + expectedParent, err1 := lp.orm.SelectBlockByNumber(currentBlockNumber-1, pg.WithParentCtx(ctx)) + if err1 != nil && !errors.Is(err1, sql.ErrNoRows) { + // If err is not a 'no rows' error, assume transient db issue and retry + lp.lggr.Warnw("Unable to read latestBlockNumber currentBlock saved", "err", err1, "currentBlockNumber", currentBlockNumber) + return nil, errors.New("Unable to read latestBlockNumber currentBlock saved") + } + // We will not have the previous currentBlock on initial poll. + havePreviousBlock := err1 == nil + if !havePreviousBlock { + lp.lggr.Infow("Do not have previous block, first poll ever on new chain or after backfill", "currentBlockNumber", currentBlockNumber) + return currentBlock, nil + } + // Check for reorg. + if currentBlock.ParentHash != expectedParent.BlockHash { + // There can be another reorg while we're finding the LCA. + // That is ok, since we'll detect it on the next iteration. + // Since we go currentBlock by currentBlock for unfinalized logs, the mismatch starts at currentBlockNumber - 1. + blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock, expectedParent.FinalizedBlockNumber) + if err2 != nil { + lp.lggr.Warnw("Unable to find LCA after reorg, retrying", "err", err2) + return nil, errors.New("Unable to find LCA after reorg, retrying") + } + + lp.lggr.Infow("Reorg detected", "blockAfterLCA", blockAfterLCA.Number, "currentBlockNumber", currentBlockNumber) + // We truncate all the blocks and logs after the LCA. + // We could preserve the logs for forensics, since its possible + // that applications see them and take action upon it, however that + // results in significantly slower reads since we must then compute + // the canonical set per read. Typically, if an application took action on a log + // it would be saved elsewhere e.g. evm.txes, so it seems better to just support the fast reads. + // Its also nicely analogous to reading from the chain itself. + err2 = lp.orm.DeleteLogsAndBlocksAfter(blockAfterLCA.Number, pg.WithParentCtx(ctx)) + if err2 != nil { + // If we error on db commit, we can't know if the tx went through or not. + // We return an error here which will cause us to restart polling from lastBlockSaved + 1 + return nil, err2 + } + return blockAfterLCA, nil + } + // No reorg, return current block. + return currentBlock, nil +} + +// PollAndSaveLogs On startup/crash current is the first block after the last processed block. +// currentBlockNumber is the block from where new logs are to be polled & saved. Under normal +// conditions this would be equal to lastProcessed.BlockNumber + 1. +func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) { + lp.lggr.Debugw("Polling for logs", "currentBlockNumber", currentBlockNumber) + // Intentionally not using logPoller.finalityDepth directly but the latestFinalizedBlockNumber returned from lp.latestBlocks() + // latestBlocks knows how to pick a proper latestFinalizedBlockNumber based on the logPoller's configuration + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) + if err != nil { + lp.lggr.Warnw("Unable to get latestBlockNumber block", "err", err, "currentBlockNumber", currentBlockNumber) + return + } + latestBlockNumber := latestBlock.Number + if currentBlockNumber > latestBlockNumber { + // Note there can also be a reorg "shortening" i.e. chain height decreases but TDD increases. In that case + // we also just wait until the new tip is longer and then detect the reorg. + lp.lggr.Debugw("No new blocks since last poll", "currentBlockNumber", currentBlockNumber, "latestBlockNumber", latestBlockNumber) + return + } + var currentBlock *evmtypes.Head + if currentBlockNumber == latestBlockNumber { + // Can re-use our currentBlock and avoid an extra RPC call. + currentBlock = latestBlock + } + // Possibly handle a reorg. For example if we crash, we'll be in the middle of processing unfinalized blocks. + // Returns (currentBlock || LCA+1 if reorg detected, error) + currentBlock, err = lp.getCurrentBlockMaybeHandleReorg(ctx, currentBlockNumber, currentBlock) + if err != nil { + // If there's an error handling the reorg, we can't be sure what state the db was left in. + // Resume from the latest block saved and retry. + lp.lggr.Errorw("Unable to get current block, retrying", "err", err) + return + } + currentBlockNumber = currentBlock.Number + + // backfill finalized blocks if we can for performance. If we crash during backfill, we + // may reprocess logs. Log insertion is idempotent so this is ok. + // E.g. 1<-2<-3(currentBlockNumber)<-4<-5<-6<-7(latestBlockNumber), finality is 2. So 3,4 can be batched. + // Although 5 is finalized, we still need to save it to the db for reorg detection if 6 is a reorg. + // start = currentBlockNumber = 3, end = latestBlockNumber - finality - 1 = 7-2-1 = 4 (inclusive range). + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 + if lastSafeBackfillBlock >= currentBlockNumber { + lp.lggr.Infow("Backfilling logs", "start", currentBlockNumber, "end", lastSafeBackfillBlock) + if err = lp.backfill(ctx, currentBlockNumber, lastSafeBackfillBlock); err != nil { + // If there's an error backfilling, we can just return and retry from the last block saved + // since we don't save any blocks on backfilling. We may re-insert the same logs but thats ok. + lp.lggr.Warnw("Unable to backfill finalized logs, retrying later", "err", err) + return + } + currentBlockNumber = lastSafeBackfillBlock + 1 + } + + if currentBlockNumber > currentBlock.Number { + // If we successfully backfilled we have logs up to and including lastSafeBackfillBlock, + // now load the first unfinalized block. + currentBlock, err = lp.getCurrentBlockMaybeHandleReorg(ctx, currentBlockNumber, nil) + if err != nil { + // If there's an error handling the reorg, we can't be sure what state the db was left in. + // Resume from the latest block saved. + lp.lggr.Errorw("Unable to get current block", "err", err) + return + } + } + + for { + h := currentBlock.Hash + var logs []types.Log + logs, err = lp.ec.FilterLogs(ctx, lp.Filter(nil, nil, &h)) + if err != nil { + lp.lggr.Warnw("Unable to query for logs, retrying", "err", err, "block", currentBlockNumber) + return + } + lp.lggr.Debugw("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber, "blockHash", currentBlock.Hash, "timestamp", currentBlock.Timestamp.Unix()) + block := NewLogPollerBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber) + err = lp.orm.InsertLogsWithBlock( + convertLogs(logs, []LogPollerBlock{block}, lp.lggr, lp.ec.ConfiguredChainID()), + block, + ) + if err != nil { + lp.lggr.Warnw("Unable to save logs resuming from last saved block + 1", "err", err, "block", currentBlockNumber) + return + } + // Update current block. + // Same reorg detection on unfinalized blocks. + currentBlockNumber++ + if currentBlockNumber > latestBlockNumber { + break + } + currentBlock, err = lp.getCurrentBlockMaybeHandleReorg(ctx, currentBlockNumber, nil) + if err != nil { + // If there's an error handling the reorg, we can't be sure what state the db was left in. + // Resume from the latest block saved. + lp.lggr.Errorw("Unable to get current block", "err", err) + return + } + currentBlockNumber = currentBlock.Number + } +} + +// Returns information about latestBlock, latestFinalizedBlockNumber +// If finality tag is not enabled, latestFinalizedBlockNumber is calculated as latestBlockNumber - lp.finalityDepth (configured param) +// Otherwise, we return last finalized block number returned from chain +func (lp *logPoller) latestBlocks(ctx context.Context) (*evmtypes.Head, int64, error) { + // If finality is not enabled, we can only fetch the latest block + if !lp.useFinalityTag { + // Example: + // finalityDepth = 2 + // Blocks: 1->2->3->4->5(latestBlock) + // latestFinalizedBlockNumber would be 3 + latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + if err != nil { + return nil, 0, err + } + // If chain has fewer blocks than finalityDepth, return 0 + return latestBlock, mathutil.Max(latestBlock.Number-lp.finalityDepth, 0), nil + } + + // If finality is enabled, we need to get the latest and finalized blocks. + blocks, err := lp.batchFetchBlocks(ctx, []string{rpc.LatestBlockNumber.String(), rpc.FinalizedBlockNumber.String()}, 2) + if err != nil { + return nil, 0, err + } + latest := blocks[0] + finalized := blocks[1] + return latest, finalized.Number, nil +} + +// Find the first place where our chain and their chain have the same block, +// that block number is the LCA. Return the block after that, where we want to resume polling. +func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.Head, latestFinalizedBlockNumber int64) (*evmtypes.Head, error) { + // Current is where the mismatch starts. + // Check its parent to see if its the same as ours saved. + parent, err := lp.ec.HeadByHash(ctx, current.ParentHash) + if err != nil { + return nil, err + } + blockAfterLCA := *current + // We expect reorgs up to the block after latestFinalizedBlock + // We loop via parent instead of current so current always holds the LCA+1. + // If the parent block number becomes < the first finalized block our reorg is too deep. + // This can happen only if finalityTag is not enabled and fixed finalityDepth is provided via config. + for parent.Number >= latestFinalizedBlockNumber { + ourParentBlockHash, err := lp.orm.SelectBlockByNumber(parent.Number, pg.WithParentCtx(ctx)) + if err != nil { + return nil, err + } + if parent.Hash == ourParentBlockHash.BlockHash { + // If we do have the blockhash, return blockAfterLCA + return &blockAfterLCA, nil + } + // Otherwise get a new parent and update blockAfterLCA. + blockAfterLCA = *parent + parent, err = lp.ec.HeadByHash(ctx, parent.ParentHash) + if err != nil { + return nil, err + } + } + lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) + rerr := errors.New("Reorg greater than finality depth") + lp.SvcErrBuffer.Append(rerr) + return nil, rerr +} + +// PruneOldBlocks removes blocks that are > lp.keepFinalizedBlocksDepth behind the latest finalized block. +func (lp *logPoller) PruneOldBlocks(ctx context.Context) error { + latestBlock, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return err + } + if latestBlock == nil { + // No blocks saved yet. + return nil + } + if latestBlock.FinalizedBlockNumber <= lp.keepFinalizedBlocksDepth { + // No-op, keep all blocks + return nil + } + // 1-2-3-4-5(finalized)-6-7(latest), keepFinalizedBlocksDepth=3 + // Remove <= 2 + return lp.orm.DeleteBlocksBefore(latestBlock.FinalizedBlockNumber-lp.keepFinalizedBlocksDepth, pg.WithParentCtx(ctx)) +} + +// Logs returns logs matching topics and address (exactly) in the given block range, +// which are canonical at time of query. +func (lp *logPoller) Logs(start, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogs(start, end, address, eventSig, qopts...) +} + +func (lp *logPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsWithSigs(start, end, address, eventSigs, qopts...) +} + +func (lp *logPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsCreatedAfter(address, eventSig, after, confs, qopts...) +} + +// IndexedLogs finds all the logs that have a topic value in topicValues at index topicIndex. +func (lp *logPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogs(address, eventSig, topicIndex, topicValues, confs, qopts...) +} + +// IndexedLogsByBlockRange finds all the logs that have a topic value in topicValues at index topicIndex within the block range +func (lp *logPoller) IndexedLogsByBlockRange(start, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsByBlockRange(start, end, address, eventSig, topicIndex, topicValues, qopts...) +} + +func (lp *logPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsCreatedAfter(address, eventSig, topicIndex, topicValues, after, confs, qopts...) +} + +func (lp *logPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsByTxHash(address, eventSig, txHash, qopts...) +} + +// LogsDataWordGreaterThan note index is 0 based. +func (lp *logPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, confs, qopts...) +} + +// LogsDataWordRange note index is 0 based. +func (lp *logPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsDataWordRange(address, eventSig, wordIndex, wordValueMin, wordValueMax, confs, qopts...) +} + +// IndexedLogsTopicGreaterThan finds all the logs that have a topic value greater than topicValueMin at index topicIndex. +// Only works for integer topics. +func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) +} + +func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) +} + +// LatestBlock returns the latest block the log poller is on. It tracks blocks to be able +// to detect reorgs. +func (lp *logPoller) LatestBlock(qopts ...pg.QOpt) (LogPollerBlock, error) { + b, err := lp.orm.SelectLatestBlock(qopts...) + if err != nil { + return LogPollerBlock{}, err + } + + return *b, nil +} + +func (lp *logPoller) BlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { + return lp.orm.SelectBlockByNumber(n, qopts...) +} + +// LatestLogByEventSigWithConfs finds the latest log that has confs number of blocks on top of the log. +func (lp *logPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { + return lp.orm.SelectLatestLogByEventSigWithConfs(eventSig, address, confs, qopts...) +} + +func (lp *logPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLatestLogEventSigsAddrsWithConfs(fromBlock, addresses, eventSigs, confs, qopts...) +} + +func (lp *logPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { + return lp.orm.SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock, eventSigs, addresses, confs, qopts...) +} + +// LogsDataWordBetween retrieves a slice of Log records that match specific criteria. +// Besides generic filters like eventSig, address and confs, it also verifies data content against wordValue +// data[wordIndexMin] <= wordValue <= data[wordIndexMax]. +// +// Passing the same value for wordIndexMin and wordIndexMax will check the equality of the wordValue at that index. +// Leading to returning logs matching: data[wordIndexMin] == wordValue. +// +// This function is particularly useful for filtering logs by data word values and their positions within the event data. +// It returns an empty slice if no logs match the provided criteria. +func (lp *logPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectLogsDataWordBetween(address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) +} + +// GetBlocksRange tries to get the specified block numbers from the log pollers +// blocks table. It falls back to the RPC for any unfulfilled requested blocks. +func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { + var blocks []LogPollerBlock + + // Do nothing if no blocks are requested. + if len(numbers) == 0 { + return blocks, nil + } + + // Assign the requested blocks to a mapping. + blocksRequested := make(map[uint64]struct{}) + for _, b := range numbers { + blocksRequested[b] = struct{}{} + } + + // Retrieve all blocks within this range from the log poller. + blocksFound := make(map[uint64]LogPollerBlock) + qopts = append(qopts, pg.WithParentCtx(ctx)) + minRequestedBlock := int64(mathutil.Min(numbers[0], numbers[1:]...)) + maxRequestedBlock := int64(mathutil.Max(numbers[0], numbers[1:]...)) + lpBlocks, err := lp.orm.GetBlocksRange(minRequestedBlock, maxRequestedBlock, qopts...) + if err != nil { + lp.lggr.Warnw("Error while retrieving blocks from log pollers blocks table. Falling back to RPC...", "requestedBlocks", numbers, "err", err) + } else { + for _, b := range lpBlocks { + if _, ok := blocksRequested[uint64(b.BlockNumber)]; ok { + // Only fill requested blocks. + blocksFound[uint64(b.BlockNumber)] = b + } + } + lp.lggr.Debugw("Got blocks from log poller", "blockNumbers", maps.Keys(blocksFound)) + } + + // Fill any remaining blocks from the client. + blocksFoundFromRPC, err := lp.fillRemainingBlocksFromRPC(ctx, blocksRequested, blocksFound) + if err != nil { + return nil, err + } + for num, b := range blocksFoundFromRPC { + blocksFound[num] = b + } + + var blocksNotFound []uint64 + for _, num := range numbers { + b, ok := blocksFound[num] + if !ok { + blocksNotFound = append(blocksNotFound, num) + } + blocks = append(blocks, b) + } + + if len(blocksNotFound) > 0 { + return nil, errors.Errorf("blocks were not found in db or RPC call: %v", blocksNotFound) + } + + return blocks, nil +} + +func (lp *logPoller) fillRemainingBlocksFromRPC( + ctx context.Context, + blocksRequested map[uint64]struct{}, + blocksFound map[uint64]LogPollerBlock, +) (map[uint64]LogPollerBlock, error) { + var remainingBlocks []string + for num := range blocksRequested { + if _, ok := blocksFound[num]; !ok { + remainingBlocks = append(remainingBlocks, hexutil.EncodeBig(new(big.Int).SetUint64(num))) + } + } + + if len(remainingBlocks) > 0 { + lp.lggr.Debugw("Falling back to RPC for blocks not found in log poller blocks table", + "remainingBlocks", remainingBlocks) + } + + evmBlocks, err := lp.batchFetchBlocks(ctx, remainingBlocks, lp.rpcBatchSize) + if err != nil { + return nil, err + } + + logPollerBlocks := make(map[uint64]LogPollerBlock) + for _, head := range evmBlocks { + logPollerBlocks[uint64(head.Number)] = LogPollerBlock{ + EvmChainId: head.EVMChainID, + BlockHash: head.Hash, + BlockNumber: head.Number, + BlockTimestamp: head.Timestamp, + CreatedAt: head.Timestamp, + } + } + return logPollerBlocks, nil +} + +func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []string, batchSize int64) ([]*evmtypes.Head, error) { + reqs := make([]rpc.BatchElem, 0, len(blocksRequested)) + for _, num := range blocksRequested { + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{num, false}, + Result: &evmtypes.Head{}, + } + reqs = append(reqs, req) + } + + for i := 0; i < len(reqs); i += int(batchSize) { + j := i + int(batchSize) + if j > len(reqs) { + j = len(reqs) + } + + err := lp.ec.BatchCallContext(ctx, reqs[i:j]) + if err != nil { + return nil, err + } + } + + var blocks = make([]*evmtypes.Head, 0, len(reqs)) + for _, r := range reqs { + if r.Error != nil { + return nil, r.Error + } + block, is := r.Result.(*evmtypes.Head) + + if !is { + return nil, errors.Errorf("expected result to be a %T, got %T", &evmtypes.Head{}, r.Result) + } + if block == nil { + return nil, errors.New("invariant violation: got nil block") + } + if block.Hash == (common.Hash{}) { + return nil, errors.Errorf("missing block hash for block number: %d", block.Number) + } + if block.Number < 0 { + return nil, errors.Errorf("expected block number to be >= to 0, got %d", block.Number) + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// IndexedLogsWithSigsExcluding returns the set difference(A-B) of logs with signature sigA and sigB, matching is done on the topics index +// +// For example, query to retrieve unfulfilled requests by querying request log events without matching fulfillment log events. +// The order of events is not significant. Both logs must be inside the block range and have the minimum number of confirmations +func (lp *logPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return lp.orm.SelectIndexedLogsWithSigsExcluding(eventSigA, eventSigB, topicIndex, address, fromBlock, toBlock, confs, qopts...) +} + +func EvmWord(i uint64) common.Hash { + var b = make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return common.BytesToHash(b) +} diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go new file mode 100644 index 00000000..4d33e6fa --- /dev/null +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -0,0 +1,536 @@ +package logpoller + +import ( + "context" + "fmt" + "math/big" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + EmitterABI, _ = abi.JSON(strings.NewReader(log_emitter.LogEmitterABI)) +) + +// Validate that filters stored in log_filters_table match the filters stored in memory +func validateFiltersTable(t *testing.T, lp *logPoller, orm *DbORM) { + filters, err := orm.LoadFilters() + require.NoError(t, err) + require.Equal(t, len(filters), len(lp.filters)) + for name, dbFilter := range filters { + dbFilter := dbFilter + memFilter, ok := lp.filters[name] + require.True(t, ok) + assert.Truef(t, memFilter.Contains(&dbFilter), + "in-memory Filter %s is missing some addresses or events from db Filter table", name) + assert.Truef(t, dbFilter.Contains(&memFilter), "db Filter table %s is missing some addresses or events from in-memory Filter", name) + } +} + +func TestLogPoller_RegisterFilter(t *testing.T) { + t.Parallel() + a1 := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbb") + a2 := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbc") + + lggr, observedLogs := logger.TestObserved(t, zapcore.WarnLevel) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + // Set up a test chain with a log emitting contract deployed. + lp := NewLogPoller(orm, nil, lggr, time.Hour, false, 1, 1, 2, 1000) + + // We expect a zero Filter if nothing registered yet. + f := lp.Filter(nil, nil, nil) + require.Equal(t, 1, len(f.Addresses)) + assert.Equal(t, common.HexToAddress("0x0000000000000000000000000000000000000000"), f.Addresses[0]) + + err := lp.RegisterFilter(Filter{"Emitter Log 1", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{a1}, 0}) + require.NoError(t, err) + assert.Equal(t, []common.Address{a1}, lp.Filter(nil, nil, nil).Addresses) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, lp.Filter(nil, nil, nil).Topics) + validateFiltersTable(t, lp, orm) + + // Should de-dupe EventSigs + err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + require.NoError(t, err) + assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) + validateFiltersTable(t, lp, orm) + + // Should de-dupe Addresses + err = lp.RegisterFilter(Filter{"Emitter Log 1 + 2 dupe", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{a2}, 0}) + require.NoError(t, err) + assert.Equal(t, []common.Address{a1, a2}, lp.Filter(nil, nil, nil).Addresses) + assert.Equal(t, [][]common.Hash{{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}}, lp.Filter(nil, nil, nil).Topics) + validateFiltersTable(t, lp, orm) + + // Address required. + err = lp.RegisterFilter(Filter{"no address", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{}, 0}) + require.Error(t, err) + // Event required + err = lp.RegisterFilter(Filter{"No event", []common.Hash{}, []common.Address{a1}, 0}) + require.Error(t, err) + validateFiltersTable(t, lp, orm) + + // Removing non-existence Filter should log error but return nil + err = lp.UnregisterFilter("Filter doesn't exist") + require.NoError(t, err) + require.Equal(t, observedLogs.Len(), 1) + require.Contains(t, observedLogs.TakeAll()[0].Entry.Message, "not found") + + // Check that all filters are still there + _, ok := lp.filters["Emitter Log 1"] + require.True(t, ok, "'Emitter Log 1 Filter' missing") + _, ok = lp.filters["Emitter Log 1 + 2"] + require.True(t, ok, "'Emitter Log 1 + 2' Filter missing") + _, ok = lp.filters["Emitter Log 1 + 2 dupe"] + require.True(t, ok, "'Emitter Log 1 + 2 dupe' Filter missing") + + // Removing an existing Filter should remove it from both memory and db + err = lp.UnregisterFilter("Emitter Log 1 + 2") + require.NoError(t, err) + _, ok = lp.filters["Emitter Log 1 + 2"] + require.False(t, ok, "'Emitter Log 1 Filter' should have been removed by UnregisterFilter()") + require.Len(t, lp.filters, 2) + validateFiltersTable(t, lp, orm) + + err = lp.UnregisterFilter("Emitter Log 1 + 2 dupe") + require.NoError(t, err) + err = lp.UnregisterFilter("Emitter Log 1") + require.NoError(t, err) + assert.Len(t, lp.filters, 0) + filters, err := lp.orm.LoadFilters() + require.NoError(t, err) + assert.Len(t, filters, 0) + + // Make sure cache was invalidated + assert.Len(t, lp.Filter(nil, nil, nil).Addresses, 1) + assert.Equal(t, lp.Filter(nil, nil, nil).Addresses[0], common.HexToAddress("0x0000000000000000000000000000000000000000")) + assert.Len(t, lp.Filter(nil, nil, nil).Topics, 1) + assert.Len(t, lp.Filter(nil, nil, nil).Topics[0], 0) +} + +func TestLogPoller_ConvertLogs(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + + topics := []common.Hash{EmitterABI.Events["Log1"].ID} + + var cases = []struct { + name string + logs []types.Log + blocks []LogPollerBlock + expected int + }{ + {"SingleBlock", + []types.Log{{Topics: topics}, {Topics: topics}}, + []LogPollerBlock{{BlockTimestamp: time.Now()}}, + 2}, + {"BlockList", + []types.Log{{Topics: topics}, {Topics: topics}, {Topics: topics}}, + []LogPollerBlock{{BlockTimestamp: time.Now()}}, + 3}, + {"EmptyList", + []types.Log{}, + []LogPollerBlock{}, + 0}, + {"TooManyBlocks", + []types.Log{{}}, + []LogPollerBlock{{}, {}}, + 0}, + {"TooFewBlocks", + []types.Log{{}, {}, {}}, + []LogPollerBlock{{}, {}}, + 0}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + logs := convertLogs(c.logs, c.blocks, lggr, big.NewInt(53)) + require.Len(t, logs, c.expected) + for i := 0; i < c.expected; i++ { + if len(c.blocks) == 1 { + assert.Equal(t, c.blocks[0].BlockTimestamp, logs[i].BlockTimestamp) + } else { + assert.Equal(t, logs[i].BlockTimestamp, c.blocks[i].BlockTimestamp) + } + } + }) + } +} + +func TestFilterName(t *testing.T) { + t.Parallel() + assert.Equal(t, "a - b:c:d", FilterName("a", "b", "c", "d")) + assert.Equal(t, "empty args test", FilterName("empty args test")) +} + +func TestLogPoller_BackupPollerStartup(t *testing.T) { + addr := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbc") + lggr, observedLogs := logger.TestObserved(t, zapcore.WarnLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + head := evmtypes.Head{Number: 3} + events := []common.Hash{EmitterABI.Events["Log1"].ID} + log1 := types.Log{ + Index: 0, + BlockHash: common.Hash{}, + BlockNumber: uint64(3), + Topics: events, + Address: addr, + TxHash: common.HexToHash("0x1234"), + Data: EvmWord(uint64(300)).Bytes(), + } + + ec := evmclimocks.NewClient(t) + ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil) + ec.On("ConfiguredChainID").Return(chainID, nil) + + ctx := testutils.Context(t) + + lp := NewLogPoller(orm, ec, lggr, 1*time.Hour, false, 2, 3, 2, 1000) + lp.BackupPollAndSaveLogs(ctx, 100) + assert.Equal(t, int64(0), lp.backupPollerNextBlock) + assert.Equal(t, 1, observedLogs.FilterMessageSnippet("ran before first successful log poller run").Len()) + + lp.PollAndSaveLogs(ctx, 3) + + lastProcessed, err := lp.orm.SelectLatestBlock(pg.WithParentCtx(ctx)) + require.NoError(t, err) + require.Equal(t, int64(3), lastProcessed.BlockNumber) + + lp.BackupPollAndSaveLogs(ctx, 100) + assert.Equal(t, int64(1), lp.backupPollerNextBlock) // Ensure non-negative! +} + +func TestLogPoller_Replay(t *testing.T) { + t.Parallel() + addr := common.HexToAddress("0x2ab9a2dc53736b361b72d900cdf9f78f9406fbbc") + + lggr, observedLogs := logger.TestObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + head := evmtypes.Head{Number: 4} + events := []common.Hash{EmitterABI.Events["Log1"].ID} + log1 := types.Log{ + Index: 0, + BlockHash: common.Hash{}, + BlockNumber: uint64(head.Number), + Topics: events, + Address: addr, + TxHash: common.HexToHash("0x1234"), + Data: EvmWord(uint64(300)).Bytes(), + } + + ec := evmclimocks.NewClient(t) + ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil).Once() + ec.On("ConfiguredChainID").Return(chainID, nil) + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, 3, 3, 3, 20) + + // process 1 log in block 3 + lp.PollAndSaveLogs(testutils.Context(t), 4) + latest, err := lp.LatestBlock() + require.NoError(t, err) + require.Equal(t, int64(4), latest.BlockNumber) + + t.Run("abort before replayStart received", func(t *testing.T) { + // Replay() should abort immediately if caller's context is cancelled before request signal is read + ctx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + err = lp.Replay(ctx, 3) + assert.ErrorIs(t, err, ErrReplayRequestAborted) + }) + + recvStartReplay := func(ctx context.Context, block int64) { + select { + case fromBlock := <-lp.replayStart: + assert.Equal(t, block, fromBlock) + case <-ctx.Done(): + assert.NoError(t, ctx.Err(), "Timed out waiting to receive replay request from lp.replayStart") + } + } + + // Replay() should return error code received from replayComplete + t.Run("returns error code on replay complete", func(t *testing.T) { + ctx := testutils.Context(t) + anyErr := errors.New("any error") + done := make(chan struct{}) + go func() { + defer close(done) + recvStartReplay(ctx, 1) + lp.replayComplete <- anyErr + }() + assert.ErrorIs(t, lp.Replay(ctx, 1), anyErr) + <-done + }) + + // Replay() should return ErrReplayInProgress if caller's context is cancelled after replay has begun + t.Run("late abort returns ErrReplayInProgress", func(t *testing.T) { + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second) // Intentionally abort replay after 1s + done := make(chan struct{}) + go func() { + defer close(done) + recvStartReplay(ctx, 4) + cancel() + }() + assert.ErrorIs(t, lp.Replay(ctx, 4), ErrReplayInProgress) + <-done + lp.replayComplete <- nil + lp.wg.Wait() + }) + + // Main lp.run() loop shouldn't get stuck if client aborts + t.Run("client abort doesnt hang run loop", func(t *testing.T) { + lp.backupPollerNextBlock = 0 + + ctx := testutils.Context(t) + + pass := make(chan struct{}) + cancelled := make(chan struct{}) + + rctx, rcancel := context.WithCancel(testutils.Context(t)) + var wg sync.WaitGroup + defer func() { wg.Wait() }() + ec.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log1}, nil).Run(func(args mock.Arguments) { + wg.Add(1) + go func() { + defer wg.Done() + assert.ErrorIs(t, lp.Replay(rctx, 4), ErrReplayInProgress) + close(cancelled) + }() + }) + ec.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log1}, nil).Run(func(args mock.Arguments) { + rcancel() + wg.Add(1) + go func() { + defer wg.Done() + select { + case lp.replayStart <- 4: + close(pass) + case <-ctx.Done(): + return + } + }() + // We cannot return until we're sure that Replay() received the cancellation signal, + // otherwise replayComplete<- might be sent first + <-cancelled + }) + + ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil).Maybe() // in case task gets delayed by >= 100ms + + t.Cleanup(lp.reset) + servicetest.Run(t, lp) + + select { + case <-ctx.Done(): + t.Errorf("timed out waiting for lp.run() to respond to second replay event") + case <-pass: + } + }) + + // remove Maybe expectation from prior subtest, as it will override all expected calls in future subtests + ec.On("FilterLogs", mock.Anything, mock.Anything).Unset() + + // run() should abort if log poller shuts down while replay is in progress + t.Run("shutdown during replay", func(t *testing.T) { + lp.backupPollerNextBlock = 0 + + pass := make(chan struct{}) + done := make(chan struct{}) + defer func() { <-done }() + + ctx := testutils.Context(t) + ec.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log1}, nil).Run(func(args mock.Arguments) { + go func() { + defer close(done) + select { + case lp.replayStart <- 4: + case <-ctx.Done(): + } + }() + }) + ec.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log1}, nil).Run(func(args mock.Arguments) { + lp.cancel() + close(pass) + }) + ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil).Maybe() // in case task gets delayed by >= 100ms + + t.Cleanup(lp.reset) + servicetest.Run(t, lp) + + select { + case <-ctx.Done(): + t.Error("timed out waiting for lp.run() to respond to shutdown event during replay") + case <-pass: + } + }) + + // ReplayAsync should return as soon as replayStart is received + t.Run("ReplayAsync success", func(t *testing.T) { + t.Cleanup(lp.reset) + servicetest.Run(t, lp) + + lp.ReplayAsync(1) + + recvStartReplay(testutils.Context(t), 1) + }) + + t.Run("ReplayAsync error", func(t *testing.T) { + t.Cleanup(lp.reset) + servicetest.Run(t, lp) + + anyErr := errors.New("async error") + observedLogs.TakeAll() + + lp.ReplayAsync(4) + recvStartReplay(testutils.Context(t), 4) + + select { + case lp.replayComplete <- anyErr: + time.Sleep(2 * time.Second) + case <-lp.ctx.Done(): + t.Error("timed out waiting to send replaceComplete") + } + require.Equal(t, 1, observedLogs.Len()) + assert.Equal(t, observedLogs.All()[0].Message, anyErr.Error()) + }) +} + +func (lp *logPoller) reset() { + lp.StateMachine = services.StateMachine{} + lp.ctx, lp.cancel = context.WithCancel(context.Background()) +} + +func Test_latestBlockAndFinalityDepth(t *testing.T) { + lggr := logger.Test(t) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + t.Run("pick latest block from chain and use finality from config with finality disabled", func(t *testing.T) { + head := evmtypes.Head{Number: 4} + finalityDepth := int64(3) + ec := evmclimocks.NewClient(t) + ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, finalityDepth, 3, 3, 20) + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, latestBlock.Number, head.Number) + require.Equal(t, finalityDepth, latestBlock.Number-lastFinalizedBlockNumber) + }) + + t.Run("finality tags in use", func(t *testing.T) { + t.Run("client returns data properly", func(t *testing.T) { + expectedLatestBlockNumber := int64(20) + expectedLastFinalizedBlockNumber := int64(12) + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + reflect.DeepEqual(b[0].Args, []interface{}{"latest", false}) && + reflect.DeepEqual(b[1].Args, []interface{}{"finalized", false}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLatestBlockNumber, Hash: utils.RandomBytes32()} + // Finalized block details + *(elems[1].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLastFinalizedBlockNumber, Hash: utils.RandomBytes32()} + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, expectedLatestBlockNumber, latestBlock.Number) + require.Equal(t, expectedLastFinalizedBlockNumber, lastFinalizedBlockNumber) + }) + + t.Run("client returns error for at least one of the calls", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: 10} + // Finalized block details + elems[1].Error = fmt.Errorf("some error") + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(testutils.Context(t)) + require.Error(t, err) + }) + + t.Run("BatchCall returns an error", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(fmt.Errorf("some error")) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(testutils.Context(t)) + require.Error(t, err) + }) + }) +} + +func benchmarkFilter(b *testing.B, nFilters, nAddresses, nEvents int) { + lggr := logger.Test(b) + lp := NewLogPoller(nil, nil, lggr, 1*time.Hour, false, 2, 3, 2, 1000) + for i := 0; i < nFilters; i++ { + var addresses []common.Address + var events []common.Hash + for j := 0; j < nAddresses; j++ { + addresses = append(addresses, common.BigToAddress(big.NewInt(int64(j+1)))) + } + for j := 0; j < nEvents; j++ { + events = append(events, common.BigToHash(big.NewInt(int64(j+1)))) + } + err := lp.RegisterFilter(Filter{Name: "my Filter", EventSigs: events, Addresses: addresses}) + require.NoError(b, err) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + lp.Filter(nil, nil, nil) + } +} + +func BenchmarkFilter10_1(b *testing.B) { + benchmarkFilter(b, 10, 1, 1) +} +func BenchmarkFilter100_10(b *testing.B) { + benchmarkFilter(b, 100, 10, 10) +} +func BenchmarkFilter1000_100(b *testing.B) { + benchmarkFilter(b, 1000, 100, 100) +} diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go new file mode 100644 index 00000000..3a69263c --- /dev/null +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -0,0 +1,1674 @@ +package logpoller_test + +import ( + "context" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/logger" + commonutils "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func logRuntime(t testing.TB, start time.Time) { + t.Log("runtime", time.Since(start)) +} + +func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (common.Hash, common.Address, common.Address) { + event1 := EmitterABI.Events["Log1"].ID + address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") + address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") + startDate := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) + + for j := 1; j < 100; j++ { + var logs []logpoller.Log + // Max we can insert per batch + for i := 0; i < 1000; i++ { + addr := address1 + if (i+(1000*j))%2 == 0 { + addr = address2 + } + blockNumber := int64(i + (1000 * j)) + blockTimestamp := startDate.Add(time.Duration(j*1000) * time.Hour) + + logs = append(logs, logpoller.Log{ + EvmChainId: ubig.New(chainID), + LogIndex: 1, + BlockHash: common.HexToHash(fmt.Sprintf("0x%d", i+(1000*j))), + BlockNumber: blockNumber, + BlockTimestamp: blockTimestamp, + EventSig: event1, + Topics: [][]byte{event1[:], logpoller.EvmWord(uint64(i + 1000*j)).Bytes()}, + Address: addr, + TxHash: utils.RandomHash(), + Data: logpoller.EvmWord(uint64(i + 1000*j)).Bytes(), + CreatedAt: blockTimestamp, + }) + + } + require.NoError(t, o.InsertLogs(logs)) + require.NoError(t, o.InsertBlock(utils.RandomHash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) + } + + return event1, address1, address2 +} + +func BenchmarkSelectLogsCreatedAfter(b *testing.B) { + chainId := big.NewInt(137) + _, db := heavyweight.FullTestDBV2(b, nil) + o := logpoller.NewORM(chainId, db, logger.Test(b), pgtest.NewQConfig(false)) + event, address, _ := populateDatabase(b, o, chainId) + + // Setting searchDate to pick around 5k logs + searchDate := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + logs, err := o.SelectLogsCreatedAfter(address, event, searchDate, 500) + require.NotZero(b, len(logs)) + require.NoError(b, err) + } +} + +func TestPopulateLoadedDB(t *testing.T) { + t.Skip("Only for local load testing and query analysis") + _, db := heavyweight.FullTestDBV2(t, nil) + chainID := big.NewInt(137) + + o := logpoller.NewORM(big.NewInt(137), db, logger.Test(t), pgtest.NewQConfig(true)) + event1, address1, address2 := populateDatabase(t, o, chainID) + + func() { + defer logRuntime(t, time.Now()) + _, err1 := o.SelectLogs(750000, 800000, address1, event1) + require.NoError(t, err1) + }() + func() { + defer logRuntime(t, time.Now()) + _, err1 := o.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{address1}, []common.Hash{event1}, 0) + require.NoError(t, err1) + }() + + // Confirm all the logs. + require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now(), 0)) + func() { + defer logRuntime(t, time.Now()) + lgs, err1 := o.SelectLogsDataWordRange(address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + require.NoError(t, err1) + // 10 since every other log is for address1 + assert.Equal(t, 10, len(lgs)) + }() + + func() { + defer logRuntime(t, time.Now()) + lgs, err1 := o.SelectIndexedLogs(address2, event1, 1, []common.Hash{logpoller.EvmWord(500000), logpoller.EvmWord(500020)}, 0) + require.NoError(t, err1) + assert.Equal(t, 2, len(lgs)) + }() + + func() { + defer logRuntime(t, time.Now()) + lgs, err1 := o.SelectIndexedLogsTopicRange(address1, event1, 1, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) + require.NoError(t, err1) + assert.Equal(t, 10, len(lgs)) + }() +} + +func TestLogPoller_Integration(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + th.Client.Commit() // Block 2. Ensure we have finality number of blocks + + require.NoError(t, th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0})) + require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) + require.Len(t, th.LogPoller.Filter(nil, nil, nil).Topics, 1) + + require.Len(t, th.LogPoller.Filter(nil, nil, nil).Addresses, 1) + require.Len(t, th.LogPoller.Filter(nil, nil, nil).Topics, 1) + + // Emit some logs in blocks 3->7. + for i := 0; i < 5; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(testutils.Context(t))) + + // The poller starts on a new chain at latest-finality (5 in this case), + // Replaying from block 4 should guarantee we have block 4 immediately. (We will also get + // block 3 once the backup poller runs, since it always starts 100 blocks behind.) + require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) + + // We should immediately have at least logs 4-7 + logs, err := th.LogPoller.Logs(4, 7, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + require.Equal(t, 4, len(logs)) + + // Once the backup poller runs we should also have the log from block 3 + testutils.AssertEventually(t, func() bool { + l, err2 := th.LogPoller.Logs(3, 3, EmitterABI.Events["Log1"].ID, th.EmitterAddress1) + require.NoError(t, err2) + return len(l) == 1 + }) + + // Now let's update the Filter and replay to get Log2 logs. + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + "Emitter - log2", []common.Hash{EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1}, 0, + }) + require.NoError(t, err) + // Replay an invalid block should error + assert.Error(t, th.LogPoller.Replay(testutils.Context(t), 0)) + assert.Error(t, th.LogPoller.Replay(testutils.Context(t), 20)) + + // Still shouldn't have any Log2 logs yet + logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1) + require.NoError(t, err) + require.Len(t, logs, 0) + + // Replay only from block 4, so we should see logs in block 4,5,6,7 (4 logs) + require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) + + // We should immediately see 4 logs2 logs. + logs, err = th.LogPoller.Logs(2, 7, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 4, len(logs)) + + assert.NoError(t, th.LogPoller.Close()) + + // Cancelling a replay should return an error synchronously. + ctx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + assert.ErrorIs(t, th.LogPoller.Replay(ctx, 4), logpoller.ErrReplayRequestAborted) +} + +// Simulate a badly behaving rpc server, where unfinalized blocks can return different logs +// for the same block hash. We should be able to handle this without missing any logs, as +// long as the logs returned for finalized blocks are consistent. +func Test_BackupLogPoller(t *testing.T) { + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 2, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) + // later, we will need at least 32 blocks filled with logs for cache invalidation + for i := int64(0); i < 32; i++ { + // to invalidate geth's internal read-cache, a matching log must be found in the bloom Filter + // for each of the 32 blocks + tx, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(i + 7)}) + require.NoError(t, err) + require.NotNil(t, tx) + th.Client.Commit() + } + + ctx := testutils.Context(t) + + filter1 := logpoller.Filter{"filter1", []common.Hash{ + EmitterABI.Events["Log1"].ID, + EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1}, + 0} + err := th.LogPoller.RegisterFilter(filter1) + require.NoError(t, err) + + filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + require.Equal(t, 1, len(filters)) + require.Equal(t, filter1, filters["filter1"]) + + err = th.LogPoller.RegisterFilter( + logpoller.Filter{"filter2", + []common.Hash{EmitterABI.Events["Log1"].ID}, + []common.Address{th.EmitterAddress2}, 0}) + require.NoError(t, err) + + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) + }() + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) + }() + + // generate some tx's with logs + tx1, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + require.NotNil(t, tx1) + + tx2, err := th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + require.NotNil(t, tx2) + + tx3, err := th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + require.NotNil(t, tx3) + + th.Client.Commit() // commit block 34 with 3 tx's included + + h := th.Client.Blockchain().CurrentHeader() // get latest header + require.Equal(t, uint64(34), h.Number.Uint64()) + + // save these 3 receipts for later + receipts := rawdb.ReadReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), uint64(time.Now().Unix()), params.AllEthashProtocolChanges) + require.NotZero(t, receipts.Len()) + + // Simulate a situation where the rpc server has a block, but no logs available for it yet + // this can't happen with geth itself, but can with other clients. + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), types.Receipts{}) // wipes out all logs for block 34 + + body := rawdb.ReadBody(th.EthDB, h.Hash(), h.Number.Uint64()) + require.Equal(t, 3, len(body.Transactions)) + txs := body.Transactions // save transactions for later + body.Transactions = types.Transactions{} // number of tx's must match # of logs for GetLogs() to succeed + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + currentBlockNumber := th.PollAndSaveLogs(ctx, 1) + assert.Equal(t, int64(35), currentBlockNumber) + + // simulate logs becoming available + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), receipts) + require.True(t, rawdb.HasReceipts(th.EthDB, h.Hash(), h.Number.Uint64())) + body.Transactions = txs + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + // flush out cached block 34 by reading logs from first 32 blocks + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(2)), + ToBlock: big.NewInt(int64(33)), + Addresses: []common.Address{th.EmitterAddress1}, + Topics: [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, + } + fLogs, err := th.Client.FilterLogs(ctx, query) + require.NoError(t, err) + require.Equal(t, 32, len(fLogs)) + + // logs shouldn't show up yet + logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + + th.Client.Commit() + th.Client.Commit() + markBlockAsFinalized(t, th, 34) + + // Run ordinary poller + backup poller at least once + currentBlock, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + th.LogPoller.PollAndSaveLogs(ctx, currentBlock.BlockNumber+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + + require.Equal(t, int64(37), currentBlock.BlockNumber+1) + + // logs still shouldn't show up, because we don't want to backfill the last finalized log + // to help with reorg detection + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + th.Client.Commit() + markBlockAsFinalized(t, th, 35) + + // Run ordinary poller + backup poller at least once more + th.LogPoller.PollAndSaveLogs(ctx, currentBlockNumber+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + + require.Equal(t, int64(38), currentBlock.BlockNumber+1) + + // all 3 logs in block 34 should show up now, thanks to backup logger + logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 5, len(logs)) + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + }) + } +} + +func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { + emittedLogs := 30 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + backupLogPollerDelay := int64(0) + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2, 1000) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err2 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err2) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + // 0 (finalized) -> 1 -> 2 -> ... + currentBlock := th.PollAndSaveLogs(ctx, 1) + // currentBlock should be blockChain start + number of emitted logs + 1 + assert.Equal(t, int64(emittedLogs)+header.Number.Int64()+1, currentBlock) + + // LogPoller not working, but chain in the meantime has progressed + // 0 -> 1 -> 2 -> ... -> currentBlock - 10 (finalized) -> .. -> currentBlock + markBlockAsFinalized(t, th, currentBlock-10) + + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // LogPoller should backfill starting from the last finalized block stored in db (genesis block) + // till the latest finalized block reported by chain. + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + require.NoError(t, err) + + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs-10) + + // Progressing even more, move blockchain forward by 1 block and mark it as finalized + th.Client.Commit() + markBlockAsFinalized(t, th, currentBlock) + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + + // All emitted logs should be backfilled + logs, err = th.LogPoller.Logs( + 0, + currentBlock+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { + emittedLogs := 30 + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2, 1000) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Emit one more empty block + th.Client.Commit() + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + // Mark everything as finalized + markBlockAsFinalized(t, th, header.Number.Int64()) + + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... + th.PollAndSaveLogs(ctx, 1) + + // Check that latest block has the same properties as the head + latestBlock, err := th.LogPoller.LatestBlock() + require.NoError(t, err) + assert.Equal(t, latestBlock.BlockNumber, header.Number.Int64()) + assert.Equal(t, latestBlock.FinalizedBlockNumber, header.Number.Int64()) + assert.Equal(t, latestBlock.BlockHash, header.Hash()) + + // Register filter + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Should fallback to the backupPollerBlockDelay when finalization was very high in a previous PollAndSave + th.LogPoller.BackupPollAndSaveLogs(ctx, int64(emittedLogs)) + require.NoError(t, err) + + // All emitted logs should be backfilled + logs, err := th.LogPoller.Logs( + 0, + header.Number.Int64()+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) { + logsBatch := 10 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2, 1000) + + //header, err := th.Client.HeaderByNumber(ctx, nil) + //require.NoError(t, err) + + // Emit some logs in blocks + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... -> firstBatchBlock + firstBatchBlock := th.PollAndSaveLogs(ctx, 1) + // Mark current tip of the chain as finalized (after emitting 10 logs) + markBlockAsFinalized(t, th, firstBatchBlock) + + // Emit 2nd batch of block + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(100 + i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // 1 -> 2 -> ... -> firstBatchBlock (finalized) -> .. -> firstBatchBlock + emitted logs + secondBatchBlock := th.PollAndSaveLogs(ctx, firstBatchBlock) + // Mark current tip of the block as finalized (after emitting 20 logs) + markBlockAsFinalized(t, th, secondBatchBlock) + + // Register filter + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Should pick logs starting from one block behind the latest finalized block + th.LogPoller.BackupPollAndSaveLogs(ctx, 0) + require.NoError(t, err) + + // Only the 2nd batch + 1 log from a previous batch should be backfilled, because we perform backfill starting + // from one block behind the latest finalized block + logs, err := th.LogPoller.Logs( + 0, + secondBatchBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, logsBatch+1) + require.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), logs[0].Data) +} + +func TestLogPoller_BlockTimestamps(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + th := SetupTH(t, false, 2, 3, 2, 1000) + + addresses := []common.Address{th.EmitterAddress1, th.EmitterAddress2} + topics := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} + + err := th.LogPoller.RegisterFilter(logpoller.Filter{"convertLogs", topics, addresses, 0}) + require.NoError(t, err) + + blk, err := th.Client.BlockByNumber(ctx, nil) + require.NoError(t, err) + require.Equal(t, big.NewInt(1), blk.Number()) + start := blk.Time() + + // There is automatically a 10s delay between each block. To make sure it's including the correct block timestamps, + // we introduce irregularities by inserting two additional block delays. We can't control the block times for + // blocks produced by the log emitter, but we can adjust the time on empty blocks in between. Simulated time + // sequence: [ #1 ] ..(10s + delay1).. [ #2 ] ..10s.. [ #3 (LOG1) ] ..(10s + delay2).. [ #4 ] ..10s.. [ #5 (LOG2) ] + const delay1 = 589 + const delay2 = 643 + time1 := start + 20 + delay1 + time2 := time1 + 20 + delay2 + + require.NoError(t, th.Client.AdjustTime(delay1*time.Second)) + hash := th.Client.Commit() + + blk, err = th.Client.BlockByHash(ctx, hash) + require.NoError(t, err) + require.Equal(t, big.NewInt(2), blk.Number()) + assert.Equal(t, time1-10, blk.Time()) + + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + hash = th.Client.Commit() + + blk, err = th.Client.BlockByHash(ctx, hash) + require.NoError(t, err) + require.Equal(t, big.NewInt(3), blk.Number()) + assert.Equal(t, time1, blk.Time()) + + require.NoError(t, th.Client.AdjustTime(delay2*time.Second)) + th.Client.Commit() + _, err = th.Emitter2.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + hash = th.Client.Commit() + + blk, err = th.Client.BlockByHash(ctx, hash) + require.NoError(t, err) + require.Equal(t, big.NewInt(5), blk.Number()) + assert.Equal(t, time2, blk.Time()) + + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(2), + ToBlock: big.NewInt(5), + Topics: [][]common.Hash{topics}, + Addresses: []common.Address{th.EmitterAddress1, th.EmitterAddress2}} + + gethLogs, err := th.Client.FilterLogs(ctx, query) + require.NoError(t, err) + require.Len(t, gethLogs, 2) + + lb, _ := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + th.PollAndSaveLogs(ctx, lb.BlockNumber+1) + lg1, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(ctx)) + require.NoError(t, err) + lg2, err := th.LogPoller.Logs(0, 20, EmitterABI.Events["Log2"].ID, th.EmitterAddress2, + pg.WithParentCtx(ctx)) + require.NoError(t, err) + + // Logs should have correct timestamps + b, _ := th.Client.BlockByHash(ctx, lg1[0].BlockHash) + t.Log(len(lg1), lg1[0].BlockTimestamp) + assert.Equal(t, int64(b.Time()), lg1[0].BlockTimestamp.UTC().Unix(), time1) + b2, _ := th.Client.BlockByHash(ctx, lg2[0].BlockHash) + assert.Equal(t, int64(b2.Time()), lg2[0].BlockTimestamp.UTC().Unix(), time2) +} + +func TestLogPoller_SynchronizedWithGeth(t *testing.T) { + t.Parallel() + // The log poller's blocks table should remain synchronized + // with the canonical chain of geth's despite arbitrary mixes of mining and reorgs. + testParams := gopter.DefaultTestParameters() + testParams.MinSuccessfulTests = 100 + p := gopter.NewProperties(testParams) + numChainInserts := 3 + finalityDepth := 5 + lggr := logger.Test(t) + db := pgtest.NewSqlxDB(t) + + owner := testutils.MustNewSimTransactor(t) + owner.GasPrice = big.NewInt(10e9) + p.Property("synchronized with geth", prop.ForAll(func(mineOrReorg []uint64) bool { + // After the set of reorgs, we should have the same canonical blocks that geth does. + t.Log("Starting test", mineOrReorg) + chainID := testutils.NewRandomEVMChainID() + // Set up a test chain with a log emitting contract deployed. + orm := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + // Note this property test is run concurrently and the sim is not threadsafe. + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + _, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, false, int64(finalityDepth), 3, 2, 1000) + for i := 0; i < finalityDepth; i++ { // Have enough blocks that we could reorg the full finalityDepth-1. + ec.Commit() + } + currentBlockNumber := int64(1) + lp.PollAndSaveLogs(testutils.Context(t), currentBlockNumber) + currentBlock, err := lp.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + matchesGeth := func() bool { + // Check every block is identical + latest, err1 := ec.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err1) + for i := 1; i < int(latest.NumberU64()); i++ { + ourBlock, err1 := lp.BlockByNumber(int64(i)) + require.NoError(t, err1) + gethBlock, err1 := ec.BlockByNumber(testutils.Context(t), big.NewInt(int64(i))) + require.NoError(t, err1) + if ourBlock.BlockHash != gethBlock.Hash() { + t.Logf("Initial poll our block differs at height %d got %x want %x\n", i, ourBlock.BlockHash, gethBlock.Hash()) + return false + } + } + return true + } + if !matchesGeth() { + return false + } + // Randomly pick to mine or reorg + for i := 0; i < numChainInserts; i++ { + if rand.Bool() { + // Mine blocks + for j := 0; j < int(mineOrReorg[i]); j++ { + ec.Commit() + latest, err1 := ec.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err1) + t.Log("mined block", latest.Hash()) + } + } else { + // Reorg blocks + latest, err1 := ec.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err1) + reorgedBlock := big.NewInt(0).Sub(latest.Number(), big.NewInt(int64(mineOrReorg[i]))) + reorg, err1 := ec.BlockByNumber(testutils.Context(t), reorgedBlock) + require.NoError(t, err1) + require.NoError(t, ec.Fork(testutils.Context(t), reorg.Hash())) + t.Logf("Reorging from (%v, %x) back to (%v, %x)\n", latest.NumberU64(), latest.Hash(), reorgedBlock.Uint64(), reorg.Hash()) + // Actually need to change the block here to trigger the reorg. + _, err1 = emitter1.EmitLog1(owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err1) + for j := 0; j < int(mineOrReorg[i]+1); j++ { // Need +1 to make it actually longer height so we detect it. + ec.Commit() + } + latest, err1 = ec.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err1) + t.Logf("New latest (%v, %x), latest parent %x)\n", latest.NumberU64(), latest.Hash(), latest.ParentHash()) + } + lp.PollAndSaveLogs(testutils.Context(t), currentBlock.BlockNumber) + currentBlock, err = lp.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + } + return matchesGeth() + }, gen.SliceOfN(numChainInserts, gen.UInt64Range(1, uint64(finalityDepth-1))))) // Max reorg depth is finality depth - 1 + p.TestingRun(t) +} + +func TestLogPoller_PollAndSaveLogs(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) + + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, + }) + require.NoError(t, err) + + b, err := th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(1), b.NumberU64()) + require.Equal(t, uint64(10), b.Time()) + + // Test scenario: single block in chain, no logs. + // Chain genesis <- 1 + // DB: empty + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(2), newStart) + + // We expect to have saved block 1. + lpb, err := th.ORM.SelectBlockByNumber(1) + require.NoError(t, err) + assert.Equal(t, lpb.BlockHash, b.Hash()) + assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) + assert.Equal(t, int64(1), int64(b.NumberU64())) + assert.Equal(t, uint64(10), b.Time()) + + // No logs. + lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + th.assertHaveCanonical(t, 1, 1) + + // Polling again should be a noop, since we are at the latest. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(2), newStart) + latest, err := th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(1), latest.BlockNumber) + th.assertHaveCanonical(t, 1, 1) + + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() + + // Polling should get us the L1 log. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(3), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(2), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) + assert.Equal(t, latest.BlockTimestamp, lgs[0].BlockTimestamp) + assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), + lgs[0].Data) + + // Test scenario: single block reorg with log. + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 + // DB: 1, 2 + // - Detect a reorg, + // - Update the block 2's hash + // - Save L1' + // - L1_1 deleted + reorgedOutBlock, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(2)) + require.NoError(t, err) + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + // Create 2' + th.Client.Commit() + // Create 3 (we need a new block for us to do any polling and detect the reorg). + th.Client.Commit() + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(4), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(3), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 3) + + // Test scenario: reorg back to previous tip. + // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 + // \ 2'(L1_2) <- 3 + require.NoError(t, th.Client.Fork(testutils.Context(t), reorgedOutBlock.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + // Create 3' + th.Client.Commit() + // Create 4 + th.Client.Commit() + // Mark block 1 as finalized + markBlockAsFinalized(t, th, 1) + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(5), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(4), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. + assert.Equal(t, 2, len(lgs)) + assert.Equal(t, int64(2), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + assert.Equal(t, int64(3), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertHaveCanonical(t, 3, 4) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + + // Test scenario: multiple logs per block for many blocks (also after reorg). + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) + // \ 2'(L1_2) <- 3 + // DB: 1, 2', 3' + // - Should save 4, 5, 6 blocks + // - Should obtain logs L1_3, L2_5, L1_6 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(4)}) + require.NoError(t, err) + _, err = th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(5)}) + require.NoError(t, err) + // Create 4 + th.Client.Commit() + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(6)}) + require.NoError(t, err) + // Create 5 + th.Client.Commit() + // Mark block 2 as finalized + markBlockAsFinalized(t, th, 3) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(7), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) + assert.Equal(t, th.EmitterAddress2, lgs[1].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) + assert.Equal(t, th.EmitterAddress1, lgs[2].Address) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + th.assertHaveCanonical(t, 3, 6) + + // Test scenario: node down for exactly finality + 2 blocks + // Note we only backfill up to finalized - 1 blocks, because we need to save the + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10 (L1_10) + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6 + // - We expect block 7 to backfilled (treated as finalized) + // - Then block 8-10 to be handled block by block (treated as unfinalized). + for i := 7; i < 11; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 7 as finalized + markBlockAsFinalized(t, th, 7) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(11), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) + assert.Equal(t, int64(7), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) + assert.Equal(t, int64(8), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) + assert.Equal(t, int64(9), lgs[2].BlockNumber) + th.assertDontHave(t, 7, 7) // Do not expect to save backfilled blocks. + th.assertHaveCanonical(t, 8, 10) + + // Test scenario large backfill (multiple batches) + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..16 + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9, 10 + // - 11, 12, 13 backfilled in batch 1 + // - 14 backfilled in batch 2 + // - 15, 16, 17 to be treated as unfinalized + for i := 11; i < 18; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 14 as finalized + markBlockAsFinalized(t, th, 14) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(18), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) + require.NoError(t, err) + assert.Equal(t, 7, len(lgs)) + th.assertHaveCanonical(t, 14, 16) // Should have last finalized block plus unfinalized blocks + th.assertDontHave(t, 11, 13) // Should not have older finalized blocks + + // Verify that a custom block timestamp will get written to db correctly also + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(17), b.NumberU64()) + require.Equal(t, uint64(170), b.Time()) + require.NoError(t, th.Client.AdjustTime(1*time.Hour)) + th.Client.Commit() + + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(180+time.Hour.Seconds()), b.Time()) + }) + } +} + +func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) + + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1_1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() + markBlockAsFinalized(t, th, 1) + + // Polling should get us the L1 log. + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(3), newStart) + // Check that L1_1 has a proper data payload + lgs, err := th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + + // Single block reorg and log poller not working for a while, mine blocks and progress with finalization + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 <- 4 <- 5 <- 6 (finalized on chain) <- 7 <- 8 <- 9 <- 10 + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + // Create 2' + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + th.Client.Commit() + // Create 3-10 + for i := 3; i < 10; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + markBlockAsFinalized(t, th, 6) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(10), newStart) + + // Expect L1_2 to be properly updated + lgs, err = th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 3) // These blocks are backfilled + th.assertHaveCanonical(t, 5, 10) + }) + } +} + +func TestLogPoller_LoadFilters(t *testing.T) { + t.Parallel() + th := SetupTH(t, false, 2, 3, 2, 1000) + + filter1 := logpoller.Filter{"first Filter", []common.Hash{ + EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} + filter2 := logpoller.Filter{"second Filter", []common.Hash{ + EmitterABI.Events["Log2"].ID, EmitterABI.Events["Log3"].ID}, []common.Address{th.EmitterAddress2}, 0} + filter3 := logpoller.Filter{"third Filter", []common.Hash{ + EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} + + assert.True(t, filter1.Contains(nil)) + assert.False(t, filter1.Contains(&filter2)) + assert.False(t, filter2.Contains(&filter1)) + assert.True(t, filter1.Contains(&filter3)) + + err := th.LogPoller.RegisterFilter(filter1) + require.NoError(t, err) + err = th.LogPoller.RegisterFilter(filter2) + require.NoError(t, err) + err = th.LogPoller.RegisterFilter(filter3) + require.NoError(t, err) + + filters, err := th.ORM.LoadFilters() + require.NoError(t, err) + require.NotNil(t, filters) + require.Len(t, filters, 3) + + filter, ok := filters["first Filter"] + require.True(t, ok) + assert.True(t, filter.Contains(&filter1)) + assert.True(t, filter1.Contains(&filter)) + + filter, ok = filters["second Filter"] + require.True(t, ok) + assert.True(t, filter.Contains(&filter2)) + assert.True(t, filter2.Contains(&filter)) + + filter, ok = filters["third Filter"] + require.True(t, ok) + assert.True(t, filter.Contains(&filter3)) + assert.True(t, filter3.Contains(&filter)) + + t.Run("HasFilter", func(t *testing.T) { + assert.True(t, th.LogPoller.HasFilter("first Filter")) + assert.True(t, th.LogPoller.HasFilter("second Filter")) + assert.True(t, th.LogPoller.HasFilter("third Filter")) + assert.False(t, th.LogPoller.HasFilter("fourth Filter")) + }) +} + +func TestLogPoller_GetBlocks_Range(t *testing.T) { + t.Parallel() + th := SetupTH(t, false, 2, 3, 2, 1000) + + err := th.LogPoller.RegisterFilter(logpoller.Filter{"GetBlocks Test", []common.Hash{ + EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0}, + ) + require.NoError(t, err) + + // LP retrieves 0 blocks + blockNums := []uint64{} + blocks, err := th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, 0, len(blocks)) + + // LP retrieves block 1 + blockNums = []uint64{1} + blocks, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, 1, len(blocks)) + assert.Equal(t, 1, int(blocks[0].BlockNumber)) + + // LP fails to retrieve block 2 because it's neither in DB nor returned by RPC + blockNums = []uint64{2} + _, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.Error(t, err) + assert.Equal(t, "blocks were not found in db or RPC call: [2]", err.Error()) + + // Emit a log and mine block #2 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() + + // Assert block 2 is not yet in DB + _, err = th.ORM.SelectBlockByNumber(2) + require.Error(t, err) + + // getBlocksRange is able to retrieve block 2 by calling RPC + rpcBlocks, err := th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, 1, len(rpcBlocks)) + assert.Equal(t, 2, int(rpcBlocks[0].BlockNumber)) + + // Emit a log and mine block #3 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + th.Client.Commit() + + // Assert block 3 is not yet in DB + _, err = th.ORM.SelectBlockByNumber(3) + require.Error(t, err) + + // getBlocksRange is able to retrieve blocks 1 and 3, without retrieving block 2 + blockNums2 := []uint64{1, 3} + rpcBlocks2, err := th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums2) + require.NoError(t, err) + assert.Equal(t, 2, len(rpcBlocks2)) + assert.Equal(t, 1, int(rpcBlocks2[0].BlockNumber)) + assert.Equal(t, 3, int(rpcBlocks2[1].BlockNumber)) + + // after calling PollAndSaveLogs, block 2 & 3 are persisted in DB + th.LogPoller.PollAndSaveLogs(testutils.Context(t), 1) + block, err := th.ORM.SelectBlockByNumber(2) + require.NoError(t, err) + assert.Equal(t, 2, int(block.BlockNumber)) + block, err = th.ORM.SelectBlockByNumber(3) + require.NoError(t, err) + assert.Equal(t, 3, int(block.BlockNumber)) + + // getBlocksRange should still be able to return block 2 by fetching from DB + lpBlocks, err := th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, 1, len(lpBlocks)) + assert.Equal(t, rpcBlocks[0].BlockNumber, lpBlocks[0].BlockNumber) + assert.Equal(t, rpcBlocks[0].BlockHash, lpBlocks[0].BlockHash) + + // getBlocksRange return multiple blocks + blockNums = []uint64{1, 2} + blocks, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, 1, int(blocks[0].BlockNumber)) + assert.NotEmpty(t, blocks[0].BlockHash) + assert.Equal(t, 2, int(blocks[1].BlockNumber)) + assert.NotEmpty(t, blocks[1].BlockHash) + + // getBlocksRange return blocks in requested order + blockNums = []uint64{2, 1} + reversedBlocks, err := th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums) + require.NoError(t, err) + assert.Equal(t, blocks[0].BlockNumber, reversedBlocks[1].BlockNumber) + assert.Equal(t, blocks[0].BlockHash, reversedBlocks[1].BlockHash) + assert.Equal(t, blocks[1].BlockNumber, reversedBlocks[0].BlockNumber) + assert.Equal(t, blocks[1].BlockHash, reversedBlocks[0].BlockHash) + + // test RPC context cancellation + ctx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + _, err = th.LogPoller.GetBlocksRange(ctx, blockNums) + require.Error(t, err) + assert.Contains(t, err.Error(), "context canceled") + + // test still works when qopts is cancelled + // but context object is not + ctx, cancel = context.WithCancel(testutils.Context(t)) + qopts := pg.WithParentCtx(ctx) + cancel() + _, err = th.LogPoller.GetBlocksRange(testutils.Context(t), blockNums, qopts) + require.NoError(t, err) +} + +func TestGetReplayFromBlock(t *testing.T) { + t.Parallel() + th := SetupTH(t, false, 2, 3, 2, 1000) + // Commit a few blocks + for i := 0; i < 10; i++ { + th.Client.Commit() + } + + // Nothing in the DB yet, should use whatever we specify. + requested := int64(5) + fromBlock, err := th.LogPoller.GetReplayFromBlock(testutils.Context(t), requested) + require.NoError(t, err) + assert.Equal(t, requested, fromBlock) + + // Do a poll, then we should have up to block 11 (blocks 0 & 1 are contract deployments, 2-10 logs). + nextBlock := th.PollAndSaveLogs(testutils.Context(t), 1) + require.Equal(t, int64(12), nextBlock) + + // Commit a few more so chain is ahead. + for i := 0; i < 3; i++ { + th.Client.Commit() + } + // Should take min(latest, requested), in this case latest. + requested = int64(15) + fromBlock, err = th.LogPoller.GetReplayFromBlock(testutils.Context(t), requested) + require.NoError(t, err) + latest, err := th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, latest.BlockNumber, fromBlock) + + // Should take min(latest, requested) in this case requested. + requested = int64(7) + fromBlock, err = th.LogPoller.GetReplayFromBlock(testutils.Context(t), requested) + require.NoError(t, err) + assert.Equal(t, requested, fromBlock) +} + +func TestLogPoller_DBErrorHandling(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + lggr, observedLogs := logger.TestObserved(t, zapcore.WarnLevel) + chainID1 := testutils.NewRandomEVMChainID() + chainID2 := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + o := logpoller.NewORM(chainID1, db, lggr, pgtest.NewQConfig(true)) + + owner := testutils.MustNewSimTransactor(t) + ethDB := rawdb.NewMemoryDatabase() + ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + _, _, emitter, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + _, err = emitter.EmitLog1(owner, []*big.Int{big.NewInt(9)}) + require.NoError(t, err) + _, err = emitter.EmitLog1(owner, []*big.Int{big.NewInt(7)}) + require.NoError(t, err) + ec.Commit() + ec.Commit() + ec.Commit() + + lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, 1*time.Hour, false, 2, 3, 2, 1000) + + err = lp.Replay(ctx, 5) // block number too high + require.ErrorContains(t, err, "Invalid replay block number") + + // Force a db error while loading the filters (tx aborted, already rolled back) + require.Error(t, commonutils.JustError(db.Exec(`invalid query`))) + go func() { + err = lp.Replay(ctx, 2) + assert.ErrorContains(t, err, "current transaction is aborted") + }() + + time.Sleep(100 * time.Millisecond) + require.NoError(t, lp.Start(ctx)) + require.Eventually(t, func() bool { + return observedLogs.Len() >= 5 + }, 2*time.Second, 20*time.Millisecond) + lp.Close() + + logMsgs := make(map[string]int) + for _, obs := range observedLogs.All() { + _, ok := logMsgs[obs.Entry.Message] + if ok { + logMsgs[(obs.Entry.Message)] = 1 + } else { + logMsgs[(obs.Entry.Message)]++ + } + } + + assert.Contains(t, logMsgs, "SQL ERROR") + assert.Contains(t, logMsgs, "Failed loading filters in main logpoller loop, retrying later") + assert.Contains(t, logMsgs, "Error executing replay, could not get fromBlock") + assert.Contains(t, logMsgs, "Backup log poller ran before filters loaded, skipping") +} + +type getLogErrData struct { + From string + To string + Limit int +} + +func TestTooManyLogResults(t *testing.T) { + ctx := testutils.Context(t) + ec := evmtest.NewEthClientMockWithDefaultChain(t) + lggr, obs := logger.TestObserved(t, zapcore.DebugLevel) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, false, 2, 20, 10, 1000) + expected := []int64{10, 5, 2, 1} + + clientErr := client.JsonError{ + Code: -32005, + Data: getLogErrData{"0x100E698", "0x100E6D4", 10000}, + Message: "query returned more than 10000 results. Try with this block range [0x100E698, 0x100E6D4].", + } + + call1 := ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(func(ctx context.Context, blockNumber *big.Int) (*evmtypes.Head, error) { + if blockNumber == nil { + return &evmtypes.Head{Number: 300}, nil // Simulate currentBlock = 300 + } + return &evmtypes.Head{Number: blockNumber.Int64()}, nil + }) + + call2 := ec.On("FilterLogs", mock.Anything, mock.Anything).Return(func(ctx context.Context, fq ethereum.FilterQuery) (logs []types.Log, err error) { + if fq.BlockHash != nil { + return []types.Log{}, nil // succeed when single block requested + } + from := fq.FromBlock.Uint64() + to := fq.ToBlock.Uint64() + if to-from >= 4 { + return []types.Log{}, &clientErr // return "too many results" error if block range spans 4 or more blocks + } + return logs, err + }) + + addr := testutils.NewAddress() + err := lp.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{addr}, 0}) + require.NoError(t, err) + lp.PollAndSaveLogs(ctx, 5) + block, err2 := o.SelectLatestBlock() + require.NoError(t, err2) + assert.Equal(t, int64(298), block.BlockNumber) + + logs := obs.FilterLevelExact(zapcore.WarnLevel).FilterMessageSnippet("halving block range batch size").FilterFieldKey("newBatchSize").All() + // Should have tried again 3 times--first reducing batch size to 10, then 5, then 2 + require.Len(t, logs, 3) + for i, s := range expected[:3] { + assert.Equal(t, s, logs[i].ContextMap()["newBatchSize"]) + } + + obs.TakeAll() + call1.Unset() + call2.Unset() + + // Now jump to block 500, but return error no matter how small the block range gets. + // Should exit the loop with a critical error instead of hanging. + call1.On("HeadByNumber", mock.Anything, mock.Anything).Return(func(ctx context.Context, blockNumber *big.Int) (*evmtypes.Head, error) { + if blockNumber == nil { + return &evmtypes.Head{Number: 500}, nil // Simulate currentBlock = 300 + } + return &evmtypes.Head{Number: blockNumber.Int64()}, nil + }) + call2.On("FilterLogs", mock.Anything, mock.Anything).Return(func(ctx context.Context, fq ethereum.FilterQuery) (logs []types.Log, err error) { + if fq.BlockHash != nil { + return []types.Log{}, nil // succeed when single block requested + } + return []types.Log{}, &clientErr // return "too many results" error if block range spans 4 or more blocks + }) + + lp.PollAndSaveLogs(ctx, 298) + block, err2 = o.SelectLatestBlock() + require.NoError(t, err2) + assert.Equal(t, int64(298), block.BlockNumber) + warns := obs.FilterMessageSnippet("halving block range").FilterLevelExact(zapcore.WarnLevel).All() + crit := obs.FilterMessageSnippet("failed to retrieve logs").FilterLevelExact(zapcore.DPanicLevel).All() + require.Len(t, warns, 4) + for i, s := range expected { + assert.Equal(t, s, warns[i].ContextMap()["newBatchSize"]) + } + + require.Len(t, crit, 1) + assert.Contains(t, crit[0].Message, "Too many log results in a single block") +} + +func Test_PollAndQueryFinalizedBlocks(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + firstBatchLen := 3 + secondBatchLen := 5 + + th := SetupTH(t, true, 2, 3, 2, 1000) + + eventSig := EmitterABI.Events["Log1"].ID + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "GetBlocks Test", + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{th.EmitterAddress1}}, + ) + require.NoError(t, err) + + // Generate block that will be finalized + for i := 0; i < firstBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Mark current head as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) + + // Generate next blocks, not marked as finalized + for i := 0; i < secondBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + currentBlock := th.PollAndSaveLogs(ctx, 1) + require.Equal(t, int(currentBlock), firstBatchLen+secondBatchLen+2) + + finalizedLogs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, + th.EmitterAddress1, + 0, + common.Hash{}, + logpoller.Finalized, + ) + require.NoError(t, err) + require.Len(t, finalizedLogs, firstBatchLen) + + numberOfConfirmations := 1 + logsByConfs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, + th.EmitterAddress1, + 0, + common.Hash{}, + logpoller.Confirmations(numberOfConfirmations), + ) + require.NoError(t, err) + require.Len(t, logsByConfs, firstBatchLen+secondBatchLen-numberOfConfirmations) +} + +func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { + ctx := testutils.Context(t) + numberOfBlocks := 10 + + tests := []struct { + name string + useFinalityTag bool + finalityDepth int64 + expectedFinalizedBlock int64 + }{ + { + name: "using fixed finality depth", + useFinalityTag: false, + finalityDepth: 2, + expectedFinalizedBlock: int64(numberOfBlocks - 2), + }, + { + name: "setting last finalized block number to 0 if finality is too deep", + useFinalityTag: false, + finalityDepth: 20, + expectedFinalizedBlock: 0, + }, + { + name: "using finality from chain", + useFinalityTag: true, + finalityDepth: 0, + expectedFinalizedBlock: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.useFinalityTag, tt.finalityDepth, 3, 2, 1000) + // Should return error before the first poll and save + _, err := th.LogPoller.LatestBlock() + require.Error(t, err) + + // Mark first block as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) + + // Create a couple of blocks + for i := 0; i < numberOfBlocks-1; i++ { + th.Client.Commit() + } + + th.PollAndSaveLogs(ctx, 1) + + latestBlock, err := th.LogPoller.LatestBlock() + require.NoError(t, err) + require.Equal(t, int64(numberOfBlocks), latestBlock.BlockNumber) + require.Equal(t, tt.expectedFinalizedBlock, latestBlock.FinalizedBlockNumber) + }) + } +} + +func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { + emittedLogs := 60 + ctx := testutils.Context(t) + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 10, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2, 1000) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + genesisBlockTime := time.UnixMilli(int64(header.Time)) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err2 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err2) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + currentBlock := th.PollAndSaveLogs(ctx, 1) + + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Emit blocks to cover finality depth, because backup always backfill up to the one block before last finalized + for i := 0; i < int(tt.finalityDepth)+1; i++ { + bh := th.Client.Commit() + markBlockAsFinalizedByHash(t, th, bh) + } + + // LogPoller should backfill entire history + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + require.NoError(t, err) + + // Make sure that all logs are backfilled + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + + // We should get all the logs by the block_timestamp + logs, err = th.LogPoller.LogsCreatedAfter( + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + genesisBlockTime, + 0, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + }) + } +} + +func Test_PruneOldBlocks(t *testing.T) { + ctx := testutils.Context(t) + + tests := []struct { + name string + keepFinalizedBlocksDepth int64 + blockToCreate int + blocksLeft int + wantErr bool + }{ + { + name: "returns error if no blocks yet", + keepFinalizedBlocksDepth: 10, + blockToCreate: 0, + wantErr: true, + }, + { + name: "returns if there is not enough blocks in the db", + keepFinalizedBlocksDepth: 11, + blockToCreate: 10, + blocksLeft: 10, + }, + { + name: "prunes matching blocks", + keepFinalizedBlocksDepth: 1000, + blockToCreate: 2000, + blocksLeft: 1010, // last finalized block is 10 block behind + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, true, 0, 3, 2, tt.keepFinalizedBlocksDepth) + + for i := 1; i <= tt.blockToCreate; i++ { + err := th.ORM.InsertBlock(utils.RandomBytes32(), int64(i+10), time.Now(), int64(i)) + require.NoError(t, err) + } + + if tt.wantErr { + require.Error(t, th.LogPoller.PruneOldBlocks(ctx)) + return + } + + require.NoError(t, th.LogPoller.PruneOldBlocks(ctx)) + blocks, err := th.ORM.GetBlocksRange(0, math.MaxInt64, pg.WithParentCtx(ctx)) + require.NoError(t, err) + assert.Len(t, blocks, tt.blocksLeft) + }) + } +} + +func markBlockAsFinalized(t *testing.T, th TestHarness, blockNumber int64) { + b, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(blockNumber)) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) +} + +func markBlockAsFinalizedByHash(t *testing.T, th TestHarness, blockHash common.Hash) { + b, err := th.Client.BlockByHash(testutils.Context(t), blockHash) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) +} diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go new file mode 100644 index 00000000..bc2beb10 --- /dev/null +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -0,0 +1,880 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + logpoller "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + time "time" +) + +// LogPoller is an autogenerated mock type for the LogPoller type +type LogPoller struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *LogPoller) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetBlocksRange provides a mock function with given fields: ctx, numbers, qopts +func (_m *LogPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, numbers) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetBlocksRange") + } + + var r0 []logpoller.LogPollerBlock + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, ...pg.QOpt) ([]logpoller.LogPollerBlock, error)); ok { + return rf(ctx, numbers, qopts...) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint64, ...pg.QOpt) []logpoller.LogPollerBlock); ok { + r0 = rf(ctx, numbers, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.LogPollerBlock) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint64, ...pg.QOpt) error); ok { + r1 = rf(ctx, numbers, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasFilter provides a mock function with given fields: name +func (_m *LogPoller) HasFilter(name string) bool { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for HasFilter") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(name) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *LogPoller) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// IndexedLogs provides a mock function with given fields: eventSig, address, topicIndex, topicValues, confs, qopts +func (_m *LogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, topicIndex, topicValues, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogs") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValues, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsByBlockRange provides a mock function with given fields: start, end, eventSig, address, topicIndex, topicValues, qopts +func (_m *LogPoller) IndexedLogsByBlockRange(start int64, end int64, eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, start, end, eventSig, address, topicIndex, topicValues) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsByBlockRange") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, int, []common.Hash, ...pg.QOpt) error); ok { + r1 = rf(start, end, eventSig, address, topicIndex, topicValues, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsByTxHash provides a mock function with given fields: eventSig, address, txHash, qopts +func (_m *LogPoller) IndexedLogsByTxHash(eventSig common.Hash, address common.Address, txHash common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, txHash) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsByTxHash") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, txHash, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, txHash, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, common.Hash, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, txHash, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsCreatedAfter provides a mock function with given fields: eventSig, address, topicIndex, topicValues, after, confs, qopts +func (_m *LogPoller) IndexedLogsCreatedAfter(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, after time.Time, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, topicIndex, topicValues, after, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsCreatedAfter") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, []common.Hash, time.Time, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValues, after, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsTopicGreaterThan provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, confs, qopts +func (_m *LogPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, topicIndex, topicValueMin, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsTopicGreaterThan") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValueMin, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsTopicRange provides a mock function with given fields: eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts +func (_m *LogPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, topicIndex, topicValueMin, topicValueMax, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsTopicRange") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedLogsWithSigsExcluding provides a mock function with given fields: address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts +func (_m *LogPoller) IndexedLogsWithSigsExcluding(address common.Address, eventSigA common.Hash, eventSigB common.Hash, topicIndex int, fromBlock int64, toBlock int64, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IndexedLogsWithSigsExcluding") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Address, common.Hash, common.Hash, int, int64, int64, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(address, eventSigA, eventSigB, topicIndex, fromBlock, toBlock, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlock provides a mock function with given fields: qopts +func (_m *LogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LatestBlock") + } + + var r0 logpoller.LogPollerBlock + var r1 error + if rf, ok := ret.Get(0).(func(...pg.QOpt) (logpoller.LogPollerBlock, error)); ok { + return rf(qopts...) + } + if rf, ok := ret.Get(0).(func(...pg.QOpt) logpoller.LogPollerBlock); ok { + r0 = rf(qopts...) + } else { + r0 = ret.Get(0).(logpoller.LogPollerBlock) + } + + if rf, ok := ret.Get(1).(func(...pg.QOpt) error); ok { + r1 = rf(qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestBlockByEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs, qopts +func (_m *LogPoller) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, fromBlock, eventSigs, addresses, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LatestBlockByEventSigsAddrsWithConfs") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) (int64, error)); ok { + return rf(fromBlock, eventSigs, addresses, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) int64); ok { + r0 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestLogByEventSigWithConfs provides a mock function with given fields: eventSig, address, confs, qopts +func (_m *LogPoller) LatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) (*logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LatestLogByEventSigWithConfs") + } + + var r0 *logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) (*logpoller.Log, error)); ok { + return rf(eventSig, address, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) *logpoller.Log); ok { + r0 = rf(eventSig, address, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestLogEventSigsAddrsWithConfs provides a mock function with given fields: fromBlock, eventSigs, addresses, confs, qopts +func (_m *LogPoller) LatestLogEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, fromBlock, eventSigs, addresses, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LatestLogEventSigsAddrsWithConfs") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(fromBlock, eventSigs, addresses, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(int64, []common.Hash, []common.Address, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(fromBlock, eventSigs, addresses, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Logs provides a mock function with given fields: start, end, eventSig, address, qopts +func (_m *LogPoller) Logs(start int64, end int64, eventSig common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, start, end, eventSig, address) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Logs") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSig, address, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(start, end, eventSig, address, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(int64, int64, common.Hash, common.Address, ...pg.QOpt) error); ok { + r1 = rf(start, end, eventSig, address, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LogsCreatedAfter provides a mock function with given fields: eventSig, address, _a2, confs, qopts +func (_m *LogPoller) LogsCreatedAfter(eventSig common.Hash, address common.Address, _a2 time.Time, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, _a2, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LogsCreatedAfter") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, _a2, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, _a2, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, time.Time, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, _a2, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LogsDataWordBetween provides a mock function with given fields: eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts +func (_m *LogPoller) LogsDataWordBetween(eventSig common.Hash, address common.Address, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LogsDataWordBetween") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LogsDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, confs, qopts +func (_m *LogPoller) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LogsDataWordGreaterThan") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, wordIndex, wordValueMin, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LogsDataWordRange provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts +func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, wordValueMax common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, wordValueMax, confs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LogsDataWordRange") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, logpoller.Confirmations, ...pg.QOpt) error); ok { + r1 = rf(eventSig, address, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address, qopts +func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, start, end, eventSigs, address) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for LogsWithSigs") + } + + var r0 []logpoller.Log + var r1 error + if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) ([]logpoller.Log, error)); ok { + return rf(start, end, eventSigs, address, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) []logpoller.Log); ok { + r0 = rf(start, end, eventSigs, address, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]logpoller.Log) + } + } + + if rf, ok := ret.Get(1).(func(int64, int64, []common.Hash, common.Address, ...pg.QOpt) error); ok { + r1 = rf(start, end, eventSigs, address, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Name provides a mock function with given fields: +func (_m *LogPoller) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *LogPoller) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RegisterFilter provides a mock function with given fields: filter, qopts +func (_m *LogPoller) RegisterFilter(filter logpoller.Filter, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, filter) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RegisterFilter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(logpoller.Filter, ...pg.QOpt) error); ok { + r0 = rf(filter, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Replay provides a mock function with given fields: ctx, fromBlock +func (_m *LogPoller) Replay(ctx context.Context, fromBlock int64) error { + ret := _m.Called(ctx, fromBlock) + + if len(ret) == 0 { + panic("no return value specified for Replay") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, fromBlock) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReplayAsync provides a mock function with given fields: fromBlock +func (_m *LogPoller) ReplayAsync(fromBlock int64) { + _m.Called(fromBlock) +} + +// Start provides a mock function with given fields: _a0 +func (_m *LogPoller) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnregisterFilter provides a mock function with given fields: name, qopts +func (_m *LogPoller) UnregisterFilter(name string, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, name) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UnregisterFilter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, ...pg.QOpt) error); ok { + r0 = rf(name, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewLogPoller creates a new instance of LogPoller. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLogPoller(t interface { + mock.TestingT + Cleanup(func()) +}) *LogPoller { + mock := &LogPoller{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/logpoller/models.go b/core/chains/evm/logpoller/models.go new file mode 100644 index 00000000..66cd99d9 --- /dev/null +++ b/core/chains/evm/logpoller/models.go @@ -0,0 +1,67 @@ +package logpoller + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/lib/pq" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// LogPollerBlock represents an unfinalized block +// used for reorg detection when polling. +type LogPollerBlock struct { + EvmChainId *big.Big + BlockHash common.Hash + // Note geth uses int64 internally https://github.com/ethereum/go-ethereum/blob/f66f1a16b3c480d3a43ac7e8a09ab3e362e96ae4/eth/filters/api.go#L340 + BlockNumber int64 + BlockTimestamp time.Time + FinalizedBlockNumber int64 + CreatedAt time.Time +} + +// Log represents an EVM log. +type Log struct { + EvmChainId *big.Big + LogIndex int64 + BlockHash common.Hash + BlockNumber int64 + BlockTimestamp time.Time + Topics pq.ByteaArray + EventSig common.Hash + Address common.Address + TxHash common.Hash + Data []byte + CreatedAt time.Time +} + +func (l *Log) GetTopics() []common.Hash { + var tps []common.Hash + for _, topic := range l.Topics { + tps = append(tps, common.BytesToHash(topic)) + } + return tps +} + +func (l *Log) ToGethLog() types.Log { + return types.Log{ + Data: l.Data, + Address: l.Address, + BlockHash: l.BlockHash, + BlockNumber: uint64(l.BlockNumber), + Topics: l.GetTopics(), + TxHash: l.TxHash, + Index: uint(l.LogIndex), + } +} + +func NewLogPollerBlock(blockHash common.Hash, blockNumber int64, timestamp time.Time, finalizedBlockNumber int64) LogPollerBlock { + return LogPollerBlock{ + BlockHash: blockHash, + BlockNumber: blockNumber, + BlockTimestamp: timestamp, + FinalizedBlockNumber: finalizedBlockNumber, + } +} diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go new file mode 100644 index 00000000..dc37eafa --- /dev/null +++ b/core/chains/evm/logpoller/observability.go @@ -0,0 +1,300 @@ +package logpoller + +import ( + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type queryType string + +const ( + create queryType = "create" + read queryType = "read" + del queryType = "delete" +) + +var ( + sqlLatencyBuckets = []float64{ + float64(1 * time.Millisecond), + float64(5 * time.Millisecond), + float64(10 * time.Millisecond), + float64(20 * time.Millisecond), + float64(30 * time.Millisecond), + float64(40 * time.Millisecond), + float64(50 * time.Millisecond), + float64(60 * time.Millisecond), + float64(70 * time.Millisecond), + float64(80 * time.Millisecond), + float64(90 * time.Millisecond), + float64(100 * time.Millisecond), + float64(200 * time.Millisecond), + float64(300 * time.Millisecond), + float64(400 * time.Millisecond), + float64(500 * time.Millisecond), + float64(750 * time.Millisecond), + float64(1 * time.Second), + float64(2 * time.Second), + float64(5 * time.Second), + } + lpQueryDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "log_poller_query_duration", + Help: "Measures duration of Log Poller's queries fetching logs", + Buckets: sqlLatencyBuckets, + }, []string{"evmChainID", "query", "type"}) + lpQueryDataSets = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "log_poller_query_dataset_size", + Help: "Measures size of the datasets returned by Log Poller's queries", + }, []string{"evmChainID", "query", "type"}) + lpLogsInserted = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "log_poller_logs_inserted", + Help: "Counter to track number of logs inserted by Log Poller", + }, []string{"evmChainID"}) + lpBlockInserted = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "log_poller_blocks_inserted", + Help: "Counter to track number of blocks inserted by Log Poller", + }, []string{"evmChainID"}) +) + +// ObservedORM is a decorator layer for ORM used by LogPoller, responsible for pushing Prometheus metrics reporting duration and size of result set for the queries. +// It doesn't change internal logic, because all calls are delegated to the origin ORM +type ObservedORM struct { + ORM + queryDuration *prometheus.HistogramVec + datasetSize *prometheus.GaugeVec + logsInserted *prometheus.CounterVec + blocksInserted *prometheus.CounterVec + chainId string +} + +// NewObservedORM creates an observed version of log poller's ORM created by NewORM +// Please see ObservedLogPoller for more details on how latencies are measured +func NewObservedORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *ObservedORM { + return &ObservedORM{ + ORM: NewORM(chainID, db, lggr, cfg), + queryDuration: lpQueryDuration, + datasetSize: lpQueryDataSets, + logsInserted: lpLogsInserted, + blocksInserted: lpBlockInserted, + chainId: chainID.String(), + } +} + +func (o *ObservedORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { + err := withObservedExec(o, "InsertLogs", create, func() error { + return o.ORM.InsertLogs(logs, qopts...) + }) + trackInsertedLogsAndBlock(o, logs, nil, err) + return err +} + +func (o *ObservedORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error { + err := withObservedExec(o, "InsertLogsWithBlock", create, func() error { + return o.ORM.InsertLogsWithBlock(logs, block, qopts...) + }) + trackInsertedLogsAndBlock(o, logs, &block, err) + return err +} + +func (o *ObservedORM) InsertFilter(filter Filter, qopts ...pg.QOpt) error { + return withObservedExec(o, "InsertFilter", create, func() error { + return o.ORM.InsertFilter(filter, qopts...) + }) +} + +func (o *ObservedORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { + return withObservedQuery(o, "LoadFilters", func() (map[string]Filter, error) { + return o.ORM.LoadFilters(qopts...) + }) +} + +func (o *ObservedORM) DeleteFilter(name string, qopts ...pg.QOpt) error { + return withObservedExec(o, "DeleteFilter", del, func() error { + return o.ORM.DeleteFilter(name, qopts...) + }) +} + +func (o *ObservedORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error { + return withObservedExec(o, "DeleteBlocksBefore", del, func() error { + return o.ORM.DeleteBlocksBefore(end, qopts...) + }) +} + +func (o *ObservedORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error { + return withObservedExec(o, "DeleteLogsAndBlocksAfter", del, func() error { + return o.ORM.DeleteLogsAndBlocksAfter(start, qopts...) + }) +} + +func (o *ObservedORM) DeleteExpiredLogs(qopts ...pg.QOpt) error { + return withObservedExec(o, "DeleteExpiredLogs", del, func() error { + return o.ORM.DeleteExpiredLogs(qopts...) + }) +} + +func (o *ObservedORM) SelectBlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { + return withObservedQuery(o, "SelectBlockByNumber", func() (*LogPollerBlock, error) { + return o.ORM.SelectBlockByNumber(n, qopts...) + }) +} + +func (o *ObservedORM) SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) { + return withObservedQuery(o, "SelectLatestBlock", func() (*LogPollerBlock, error) { + return o.ORM.SelectLatestBlock(qopts...) + }) +} + +func (o *ObservedORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { + return withObservedQuery(o, "SelectLatestLogByEventSigWithConfs", func() (*Log, error) { + return o.ORM.SelectLatestLogByEventSigWithConfs(eventSig, address, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogsWithSigs", func() ([]Log, error) { + return o.ORM.SelectLogsWithSigs(start, end, address, eventSigs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogsCreatedAfter", func() ([]Log, error) { + return o.ORM.SelectLogsCreatedAfter(address, eventSig, after, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogs", func() ([]Log, error) { + return o.ORM.SelectIndexedLogs(address, eventSig, topicIndex, topicValues, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsByBlockRange", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsByBlockRange(start, end, address, eventSig, topicIndex, topicValues, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsCreatedAfter", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsCreatedAfter(address, eventSig, topicIndex, topicValues, after, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsWithSigsExcluding", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsWithSigsExcluding(sigA, sigB, topicIndex, address, startBlock, endBlock, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogs", func() ([]Log, error) { + return o.ORM.SelectLogs(start, end, address, eventSig, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsByTxHash", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsByTxHash(address, eventSig, txHash, qopts...) + }) +} + +func (o *ObservedORM) GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { + return withObservedQueryAndResults(o, "GetBlocksRange", func() ([]LogPollerBlock, error) { + return o.ORM.GetBlocksRange(start, end, qopts...) + }) +} + +func (o *ObservedORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLatestLogEventSigsAddrsWithConfs", func() ([]Log, error) { + return o.ORM.SelectLatestLogEventSigsAddrsWithConfs(fromBlock, addresses, eventSigs, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { + return withObservedQuery(o, "SelectLatestBlockByEventSigsAddrsWithConfs", func() (int64, error) { + return o.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock, eventSigs, addresses, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogsDataWordRange", func() ([]Log, error) { + return o.ORM.SelectLogsDataWordRange(address, eventSig, wordIndex, wordValueMin, wordValueMax, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogsDataWordGreaterThan", func() ([]Log, error) { + return o.ORM.SelectLogsDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectLogsDataWordBetween", func() ([]Log, error) { + return o.ORM.SelectLogsDataWordBetween(address, eventSig, wordIndexMin, wordIndexMax, wordValue, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsTopicGreaterThan", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) + }) +} + +func (o *ObservedORM) SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + return withObservedQueryAndResults(o, "SelectIndexedLogsTopicRange", func() ([]Log, error) { + return o.ORM.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) + }) +} + +func withObservedQueryAndResults[T any](o *ObservedORM, queryName string, query func() ([]T, error)) ([]T, error) { + results, err := withObservedQuery(o, queryName, query) + if err == nil { + o.datasetSize. + WithLabelValues(o.chainId, queryName, string(read)). + Set(float64(len(results))) + } + return results, err +} + +func withObservedQuery[T any](o *ObservedORM, queryName string, query func() (T, error)) (T, error) { + queryStarted := time.Now() + defer func() { + o.queryDuration. + WithLabelValues(o.chainId, queryName, string(read)). + Observe(float64(time.Since(queryStarted))) + }() + return query() +} + +func withObservedExec(o *ObservedORM, query string, queryType queryType, exec func() error) error { + queryStarted := time.Now() + defer func() { + o.queryDuration. + WithLabelValues(o.chainId, query, string(queryType)). + Observe(float64(time.Since(queryStarted))) + }() + return exec() +} + +func trackInsertedLogsAndBlock(o *ObservedORM, logs []Log, block *LogPollerBlock, err error) { + if err != nil { + return + } + o.logsInserted. + WithLabelValues(o.chainId). + Add(float64(len(logs))) + + if block != nil { + o.blocksInserted. + WithLabelValues(o.chainId). + Inc() + } +} diff --git a/core/chains/evm/logpoller/observability_test.go b/core/chains/evm/logpoller/observability_test.go new file mode 100644 index 00000000..d0c0bfd3 --- /dev/null +++ b/core/chains/evm/logpoller/observability_test.go @@ -0,0 +1,180 @@ +package logpoller + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/prometheus/client_golang/prometheus/testutil" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestMultipleMetricsArePublished(t *testing.T) { + ctx := testutils.Context(t) + orm := createObservedORM(t, 100) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) + + _, _ = orm.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _ = orm.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) + _ = orm.InsertLogsWithBlock([]Log{}, NewLogPollerBlock(common.Hash{}, 1, time.Now(), 0), pg.WithParentCtx(ctx)) + + require.Equal(t, 13, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 10, testutil.CollectAndCount(orm.datasetSize)) +} + +func TestShouldPublishDurationInCaseOfError(t *testing.T) { + ctx := testutils.Context(t) + orm := createObservedORM(t, 200) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) + + _, err := orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + require.Error(t, err) + + require.Equal(t, 1, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "200", "SelectLatestLogByEventSigWithConfs", "read")) +} + +func TestMetricsAreProperlyPopulatedWithLabels(t *testing.T) { + orm := createObservedORM(t, 420) + t.Cleanup(func() { resetMetrics(*orm) }) + expectedCount := 9 + expectedSize := 2 + + for i := 0; i < expectedCount; i++ { + _, err := withObservedQueryAndResults(orm, "query", func() ([]string, error) { return []string{"value1", "value2"}, nil }) + require.NoError(t, err) + } + + require.Equal(t, expectedCount, counterFromHistogramByLabels(t, orm.queryDuration, "420", "query", "read")) + require.Equal(t, expectedSize, counterFromGaugeByLabels(orm.datasetSize, "420", "query", "read")) + + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "420", "other_query", "read")) + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "5", "query", "read")) + + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "other_query", "read")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "5", "query", "read")) +} + +func TestNotPublishingDatasetSizeInCaseOfError(t *testing.T) { + orm := createObservedORM(t, 420) + + _, err := withObservedQueryAndResults(orm, "errorQuery", func() ([]string, error) { return nil, fmt.Errorf("error") }) + require.Error(t, err) + + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "420", "errorQuery", "read")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "errorQuery", "read")) +} + +func TestMetricsAreProperlyPopulatedForWrites(t *testing.T) { + orm := createObservedORM(t, 420) + require.NoError(t, withObservedExec(orm, "execQuery", create, func() error { return nil })) + require.Error(t, withObservedExec(orm, "execQuery", create, func() error { return fmt.Errorf("error") })) + + require.Equal(t, 2, counterFromHistogramByLabels(t, orm.queryDuration, "420", "execQuery", "create")) +} + +func TestCountersAreProperlyPopulatedForWrites(t *testing.T) { + orm := createObservedORM(t, 420) + logs := generateRandomLogs(420, 20) + + // First insert 10 logs + require.NoError(t, orm.InsertLogs(logs[:10])) + assert.Equal(t, float64(10), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) + + // Insert 5 more logs with block + require.NoError(t, orm.InsertLogsWithBlock(logs[10:15], NewLogPollerBlock(utils.RandomBytes32(), 10, time.Now(), 5))) + assert.Equal(t, float64(15), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) + assert.Equal(t, float64(1), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) + + // Insert 5 more logs with block + require.NoError(t, orm.InsertLogsWithBlock(logs[15:], NewLogPollerBlock(utils.RandomBytes32(), 15, time.Now(), 5))) + assert.Equal(t, float64(20), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) + assert.Equal(t, float64(2), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) + + // Don't update counters in case of an error + require.Error(t, orm.InsertLogsWithBlock(logs, NewLogPollerBlock(utils.RandomBytes32(), 0, time.Now(), 0))) + assert.Equal(t, float64(20), testutil.ToFloat64(orm.logsInserted.WithLabelValues("420"))) + assert.Equal(t, float64(2), testutil.ToFloat64(orm.blocksInserted.WithLabelValues("420"))) +} + +func generateRandomLogs(chainId, count int) []Log { + logs := make([]Log, count) + for i := range logs { + logs[i] = Log{ + EvmChainId: ubig.NewI(int64(chainId)), + LogIndex: int64(i + 1), + BlockHash: utils.RandomBytes32(), + BlockNumber: int64(i + 1), + BlockTimestamp: time.Now(), + Topics: [][]byte{}, + EventSig: utils.RandomBytes32(), + Address: utils.RandomAddress(), + TxHash: utils.RandomBytes32(), + Data: []byte{}, + CreatedAt: time.Now(), + } + } + return logs +} + +func createObservedORM(t *testing.T, chainId int64) *ObservedORM { + lggr, _ := logger.TestObserved(t, zapcore.ErrorLevel) + db := pgtest.NewSqlxDB(t) + return NewObservedORM( + big.NewInt(chainId), db, lggr, pgtest.NewQConfig(true), + ) +} + +func resetMetrics(lp ObservedORM) { + lp.queryDuration.Reset() + lp.datasetSize.Reset() + lp.logsInserted.Reset() + lp.blocksInserted.Reset() +} + +func counterFromGaugeByLabels(gaugeVec *prometheus.GaugeVec, labels ...string) int { + value := testutil.ToFloat64(gaugeVec.WithLabelValues(labels...)) + return int(value) +} + +func counterFromHistogramByLabels(t *testing.T, histogramVec *prometheus.HistogramVec, labels ...string) int { + observer, err := histogramVec.GetMetricWithLabelValues(labels...) + require.NoError(t, err) + + metricCh := make(chan prometheus.Metric, 1) + observer.(prometheus.Histogram).Collect(metricCh) + close(metricCh) + + metric := <-metricCh + pb := &io_prometheus_client.Metric{} + err = metric.Write(pb) + require.NoError(t, err) + + return int(pb.GetHistogram().GetSampleCount()) +} diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go new file mode 100644 index 00000000..6211ae85 --- /dev/null +++ b/core/chains/evm/logpoller/orm.go @@ -0,0 +1,784 @@ +package logpoller + +import ( + "context" + "database/sql" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM represents the persistent data access layer used by the log poller. At this moment, it's a bit leaky abstraction, because +// it exposes some of the database implementation details (e.g. pg.Q). Ideally it should be agnostic and could be applied to any persistence layer. +// What is more, LogPoller should not be aware of the underlying database implementation and delegate all the queries to the ORM. +type ORM interface { + InsertLogs(logs []Log, qopts ...pg.QOpt) error + InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error + InsertFilter(filter Filter, qopts ...pg.QOpt) error + + LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) + DeleteFilter(name string, qopts ...pg.QOpt) error + + DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error + DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error + DeleteExpiredLogs(qopts ...pg.QOpt) error + + GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) + SelectBlockByNumber(blockNumber int64, qopts ...pg.QOpt) (*LogPollerBlock, error) + SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) + + SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) + SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) ([]Log, error) + SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) + SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) + + SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) + SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) + SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) +} + +type DbORM struct { + chainID *big.Int + q pg.Q + lggr logger.Logger +} + +// NewORM creates a DbORM scoped to chainID. +func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *DbORM { + namedLogger := logger.Named(lggr, "Configs") + q := pg.NewQ(db, namedLogger, cfg) + return &DbORM{ + chainID: chainID, + q: q, + lggr: lggr, + } +} + +// InsertBlock is idempotent to support replays. +func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64, qopts ...pg.QOpt) error { + args, err := newQueryArgs(o.chainID). + withCustomHashArg("block_hash", blockHash). + withCustomArg("block_number", blockNumber). + withCustomArg("block_timestamp", blockTimestamp). + withCustomArg("finalized_block_number", finalizedBlock). + toArgs() + if err != nil { + return err + } + return o.q.WithOpts(qopts...).ExecQNamed(` + INSERT INTO evm.log_poller_blocks + (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) + ON CONFLICT DO NOTHING`, args) +} + +// InsertFilter is idempotent. +// +// Each address/event pair must have a unique job id, so it may be removed when the job is deleted. +// If a second job tries to overwrite the same pair, this should fail. +func (o *DbORM) InsertFilter(filter Filter, qopts ...pg.QOpt) (err error) { + args, err := newQueryArgs(o.chainID). + withCustomArg("name", filter.Name). + withCustomArg("retention", filter.Retention). + withAddressArray(filter.Addresses). + withEventSigArray(filter.EventSigs). + toArgs() + if err != nil { + return err + } + // '::' has to be escaped in the query string + // https://github.com/jmoiron/sqlx/issues/91, https://github.com/jmoiron/sqlx/issues/428 + return o.q.WithOpts(qopts...).ExecQNamed(` + INSERT INTO evm.log_poller_filters + (name, evm_chain_id, retention, created_at, address, event) + SELECT * FROM + (SELECT :name, :evm_chain_id ::::NUMERIC, :retention ::::BIGINT, NOW()) x, + (SELECT unnest(:address_array ::::BYTEA[]) addr) a, + (SELECT unnest(:event_sig_array ::::BYTEA[]) ev) e + ON CONFLICT (name, evm_chain_id, address, event) + DO UPDATE SET retention=:retention ::::BIGINT`, args) +} + +// DeleteFilter removes all events,address pairs associated with the Filter +func (o *DbORM) DeleteFilter(name string, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + return q.ExecQ(`DELETE FROM evm.log_poller_filters WHERE name = $1 AND evm_chain_id = $2`, name, ubig.New(o.chainID)) +} + +// LoadFiltersForChain returns all filters for this chain +func (o *DbORM) LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) { + q := o.q.WithOpts(qopts...) + rows := make([]Filter, 0) + err := q.Select(&rows, `SELECT name, + ARRAY_AGG(DISTINCT address)::BYTEA[] AS addresses, + ARRAY_AGG(DISTINCT event)::BYTEA[] AS event_sigs, + MAX(retention) AS retention + FROM evm.log_poller_filters WHERE evm_chain_id = $1 + GROUP BY name`, ubig.New(o.chainID)) + filters := make(map[string]Filter) + for _, filter := range rows { + filters[filter.Name] = filter + } + + return filters, err +} + +func (o *DbORM) SelectBlockByHash(hash common.Hash, qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE block_hash = $1 AND evm_chain_id = $2`, hash, ubig.New(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *DbORM) SelectBlockByNumber(n int64, qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE block_number = $1 AND evm_chain_id = $2`, n, ubig.New(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *DbORM) SelectLatestBlock(qopts ...pg.QOpt) (*LogPollerBlock, error) { + q := o.q.WithOpts(qopts...) + var b LogPollerBlock + if err := q.Get(&b, `SELECT * FROM evm.log_poller_blocks WHERE evm_chain_id = $1 ORDER BY block_number DESC LIMIT 1`, ubig.New(o.chainID)); err != nil { + return nil, err + } + return &b, nil +} + +func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address common.Address, confs Confirmations, qopts ...pg.QOpt) (*Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND event_sig = :event_sig + AND address = :address + AND block_number <= %s + ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) + var l Log + if err := o.q.WithOpts(qopts...).GetNamed(query, &l, args); err != nil { + return nil, err + } + return &l, nil +} + +// DeleteBlocksBefore delete all blocks before and including end. +func (o *DbORM) DeleteBlocksBefore(end int64, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`DELETE FROM evm.log_poller_blocks WHERE block_number <= $1 AND evm_chain_id = $2`, end, ubig.New(o.chainID)) + return err +} + +func (o *DbORM) DeleteLogsAndBlocksAfter(start int64, qopts ...pg.QOpt) error { + return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + args, err := newQueryArgs(o.chainID). + withStartBlock(start). + toArgs() + if err != nil { + o.lggr.Error("Cant build args for DeleteLogsAndBlocksAfter queries", "err", err) + return err + } + + // Applying upper bound filter is critical for Postgres performance (especially for evm.logs table) + // because it allows the planner to properly estimate the number of rows to be scanned. + // If not applied, these queries can become very slow. After some critical number + // of logs, Postgres will try to scan all the logs in the index by block_number. + // Latency without upper bound filter can be orders of magnitude higher for large number of logs. + _, err = tx.NamedExec(`DELETE FROM evm.log_poller_blocks + WHERE evm_chain_id = :evm_chain_id + AND block_number >= :start_block + AND block_number <= (SELECT MAX(block_number) FROM evm.log_poller_blocks WHERE evm_chain_id = :evm_chain_id)`, args) + if err != nil { + o.lggr.Warnw("Unable to clear reorged blocks, retrying", "err", err) + return err + } + + _, err = tx.NamedExec(`DELETE FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND block_number >= :start_block + AND block_number <= (SELECT MAX(block_number) FROM evm.logs WHERE evm_chain_id = :evm_chain_id)`, args) + if err != nil { + o.lggr.Warnw("Unable to clear reorged logs, retrying", "err", err) + return err + } + return nil + }) +} + +type Exp struct { + Address common.Address + EventSig common.Hash + Expiration time.Time + TimeNow time.Time + ShouldDelete bool +} + +func (o *DbORM) DeleteExpiredLogs(qopts ...pg.QOpt) error { + qopts = append(qopts, pg.WithLongQueryTimeout()) + q := o.q.WithOpts(qopts...) + + return q.ExecQ(`WITH r AS + ( SELECT address, event, MAX(retention) AS retention + FROM evm.log_poller_filters WHERE evm_chain_id=$1 + GROUP BY evm_chain_id,address, event HAVING NOT 0 = ANY(ARRAY_AGG(retention)) + ) DELETE FROM evm.logs l USING r + WHERE l.evm_chain_id = $1 AND l.address=r.address AND l.event_sig=r.event + AND l.created_at <= STATEMENT_TIMESTAMP() - (r.retention / 10^9 * interval '1 second')`, // retention is in nanoseconds (time.Duration aka BIGINT) + ubig.New(o.chainID)) +} + +// InsertLogs is idempotent to support replays. +func (o *DbORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { + if err := o.validateLogs(logs); err != nil { + return err + } + + return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + return o.insertLogsWithinTx(logs, tx) + }) +} + +func (o *DbORM) InsertLogsWithBlock(logs []Log, block LogPollerBlock, qopts ...pg.QOpt) error { + // Optimization, don't open TX when there is only a block to be persisted + if len(logs) == 0 { + return o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, qopts...) + } + + if err := o.validateLogs(logs); err != nil { + return err + } + + // Block and logs goes with the same TX to ensure atomicity + return o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + if err := o.InsertBlock(block.BlockHash, block.BlockNumber, block.BlockTimestamp, block.FinalizedBlockNumber, pg.WithQueryer(tx)); err != nil { + return err + } + return o.insertLogsWithinTx(logs, tx) + }) +} + +func (o *DbORM) insertLogsWithinTx(logs []Log, tx pg.Queryer) error { + batchInsertSize := 4000 + for i := 0; i < len(logs); i += batchInsertSize { + start, end := i, i+batchInsertSize + if end > len(logs) { + end = len(logs) + } + + _, err := tx.NamedExec(` + INSERT INTO evm.logs + (evm_chain_id, log_index, block_hash, block_number, block_timestamp, address, event_sig, topics, tx_hash, data, created_at) + VALUES + (:evm_chain_id, :log_index, :block_hash, :block_number, :block_timestamp, :address, :event_sig, :topics, :tx_hash, :data, NOW()) + ON CONFLICT DO NOTHING`, + logs[start:end], + ) + + if err != nil { + if errors.Is(err, context.DeadlineExceeded) && batchInsertSize > 500 { + // In case of DB timeouts, try to insert again with a smaller batch upto a limit + batchInsertSize /= 2 + i -= batchInsertSize // counteract +=batchInsertSize on next loop iteration + continue + } + return err + } + } + return nil +} + +func (o *DbORM) validateLogs(logs []Log) error { + for _, log := range logs { + if o.chainID.Cmp(log.EvmChainId.ToInt()) != 0 { + return errors.Errorf("invalid chainID in log got %v want %v", log.EvmChainId.ToInt(), o.chainID) + } + } + return nil +} + +func (o *DbORM) SelectLogsByBlockRange(start, end int64) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + var logs []Log + err = o.q.SelectNamed(&logs, ` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index, created_at)`, args) + if err != nil { + return nil, err + } + return logs, nil +} + +// SelectLogsByBlockRangeFilter finds the logs in a given block range. +func (o *DbORM) SelectLogs(start, end int64, address common.Address, eventSig common.Hash, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + var logs []Log + err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index)`, args) + if err != nil { + return nil, err + } + return logs, nil +} + +// SelectLogsCreatedAfter finds logs created after some timestamp. +func (o *DbORM) SelectLogsCreatedAfter(address common.Address, eventSig common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withBlockTimestampAfter(after). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND block_timestamp > :block_timestamp_after + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + + var logs []Log + if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +// SelectLogsWithSigsByBlockRangeFilter finds the logs in the given block range with the given event signatures +// emitted from the given address. +func (o *DbORM) SelectLogsWithSigs(start, end int64, address common.Address, eventSigs []common.Hash, qopts ...pg.QOpt) (logs []Log, err error) { + args, err := newQueryArgs(o.chainID). + withAddress(address). + withEventSigArray(eventSigs). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + + q := o.q.WithOpts(qopts...) + err = q.SelectNamed(&logs, ` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = ANY(:event_sig_array) + AND block_number BETWEEN :start_block AND :end_block + ORDER BY (block_number, log_index)`, args) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return logs, err +} + +func (o *DbORM) GetBlocksRange(start int64, end int64, qopts ...pg.QOpt) ([]LogPollerBlock, error) { + args, err := newQueryArgs(o.chainID). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + var blocks []LogPollerBlock + err = o.q.WithOpts(qopts...).SelectNamed(&blocks, ` + SELECT * FROM evm.log_poller_blocks + WHERE block_number >= :start_block + AND block_number <= :end_block + AND evm_chain_id = :evm_chain_id + ORDER BY block_number ASC`, args) + if err != nil { + return nil, err + } + return blocks, nil +} + +// SelectLatestLogEventSigsAddrsWithConfs finds the latest log by (address, event) combination that matches a list of Addresses and list of events +func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresses []common.Address, eventSigs []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withAddressArray(addresses). + withEventSigArray(eventSigs). + withStartBlock(fromBlock). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs WHERE (block_number, address, event_sig) IN ( + SELECT MAX(block_number), address, event_sig FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND event_sig = ANY(:event_sig_array) + AND address = ANY(:address_array) + AND block_number > :start_block + AND block_number <= %s + GROUP BY event_sig, address + ) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) + var logs []Log + if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, errors.Wrap(err, "failed to execute query") + } + return logs, nil +} + +// SelectLatestBlockNumberEventSigsAddrsWithConfs finds the latest block number that matches a list of Addresses and list of events. It returns 0 if there is no matching block +func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { + args, err := newQueryArgs(o.chainID). + withEventSigArray(eventSigs). + withAddressArray(addresses). + withStartBlock(fromBlock). + withConfs(confs). + toArgs() + if err != nil { + return 0, err + } + query := fmt.Sprintf(` + SELECT COALESCE(MAX(block_number), 0) FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND event_sig = ANY(:event_sig_array) + AND address = ANY(:address_array) + AND block_number > :start_block + AND block_number <= %s`, nestedBlockNumberQuery(confs)) + var blockNumber int64 + if err := o.q.WithOpts(qopts...).GetNamed(query, &blockNumber, args); err != nil { + return 0, err + } + return blockNumber, nil +} + +func (o *DbORM) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndex(wordIndex). + withWordValueMin(wordValueMin). + withWordValueMax(wordValueMax). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(`SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index+1 for 32) >= :word_value_min + AND substring(data from 32*:word_index+1 for 32) <= :word_value_max + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndex(wordIndex). + withWordValueMin(wordValueMin). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index+1 for 32) >= :word_value_min + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectLogsDataWordBetween(address common.Address, eventSig common.Hash, wordIndexMin int, wordIndexMax int, wordValue common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withWordIndexMin(wordIndexMin). + withWordIndexMax(wordIndexMax). + withWordValue(wordValue). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND substring(data from 32*:word_index_min+1 for 32) <= :word_value + AND substring(data from 32*:word_index_max+1 for 32) >= :word_value + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValueMin(topicValueMin). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] >= :topic_value_min + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectIndexedLogsTopicRange(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValueMin(topicValueMin). + withTopicValueMax(topicValueMax). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] >= :topic_value_min + AND topics[:topic_index] <= :topic_value_max + AND block_number <= %s + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + var logs []Log + if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +// SelectIndexedLogsByBlockRangeFilter finds the indexed logs in a given block range. +func (o *DbORM) SelectIndexedLogsByBlockRange(start, end int64, address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + withStartBlock(start). + withEndBlock(end). + toArgs() + if err != nil { + return nil, err + } + var logs []Log + err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) + AND block_number >= :start_block + AND block_number <= :end_block + ORDER BY (block_number, log_index)`, args) + if err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig common.Hash, topicIndex int, topicValues []common.Hash, after time.Time, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgsForEvent(o.chainID, address, eventSig). + withBlockTimestampAfter(after). + withConfs(confs). + withTopicIndex(topicIndex). + withTopicValues(topicValues). + toArgs() + if err != nil { + return nil, err + } + + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND topics[:topic_index] = ANY(:topic_values) + AND block_timestamp > :block_timestamp_after + AND block_number <= %s + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) + + var logs []Log + if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func (o *DbORM) SelectIndexedLogsByTxHash(address common.Address, eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withTxHash(txHash). + withAddress(address). + withEventSig(eventSig). + toArgs() + if err != nil { + return nil, err + } + var logs []Log + err = o.q.WithOpts(qopts...).SelectNamed(&logs, ` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :event_sig + AND tx_hash = :tx_hash + ORDER BY (block_number, log_index)`, args) + if err != nil { + return nil, err + } + return logs, nil +} + +// SelectIndexedLogsWithSigsExcluding query's for logs that have signature A and exclude logs that have a corresponding signature B, matching is done based on the topic index both logs should be inside the block range and have the minimum number of confirmations +func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topicIndex int, address common.Address, startBlock, endBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { + args, err := newQueryArgs(o.chainID). + withAddress(address). + withTopicIndex(topicIndex). + withStartBlock(startBlock). + withEndBlock(endBlock). + withCustomHashArg("sigA", sigA). + withCustomHashArg("sigB", sigB). + withConfs(confs). + toArgs() + if err != nil { + return nil, err + } + + nestedQuery := nestedBlockNumberQuery(confs) + query := fmt.Sprintf(` + SELECT * FROM evm.logs + WHERE evm_chain_id = :evm_chain_id + AND address = :address + AND event_sig = :sigA + AND block_number BETWEEN :start_block AND :end_block + AND block_number <= %s + EXCEPT + SELECT a.* FROM evm.logs AS a + INNER JOIN evm.logs B + ON a.evm_chain_id = b.evm_chain_id + AND a.address = b.address + AND a.topics[:topic_index] = b.topics[:topic_index] + AND a.event_sig = :sigA + AND b.event_sig = :sigB + AND b.block_number BETWEEN :start_block AND :end_block + AND b.block_number <= %s + ORDER BY block_number,log_index ASC`, nestedQuery, nestedQuery) + var logs []Log + if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { + return nil, err + } + return logs, nil +} + +func nestedBlockNumberQuery(confs Confirmations) string { + if confs == Finalized { + return ` + (SELECT finalized_block_number + FROM evm.log_poller_blocks + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` + } + // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number + // It doesn't impact logic of the outer query, because block numbers are never less or equal to 0 (guarded by log_poller_blocks_block_number_check) + return ` + (SELECT greatest(block_number - :confs, 0) + FROM evm.log_poller_blocks + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` + +} diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go new file mode 100644 index 00000000..62f05608 --- /dev/null +++ b/core/chains/evm/logpoller/orm_test.go @@ -0,0 +1,1567 @@ +package logpoller_test + +import ( + "bytes" + "database/sql" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type block struct { + number int64 + hash common.Hash + timestamp int64 +} + +func GenLog(chainID *big.Int, logIndex int64, blockNum int64, blockHash string, topic1 []byte, address common.Address) logpoller.Log { + return GenLogWithTimestamp(chainID, logIndex, blockNum, blockHash, topic1, address, time.Now()) +} + +func GenLogWithTimestamp(chainID *big.Int, logIndex int64, blockNum int64, blockHash string, topic1 []byte, address common.Address, blockTimestamp time.Time) logpoller.Log { + return logpoller.Log{ + EvmChainId: ubig.New(chainID), + LogIndex: logIndex, + BlockHash: common.HexToHash(blockHash), + BlockNumber: blockNum, + EventSig: common.BytesToHash(topic1), + Topics: [][]byte{topic1, topic1}, + Address: address, + TxHash: common.HexToHash("0x1234"), + Data: append([]byte("hello "), byte(blockNum)), + BlockTimestamp: blockTimestamp, + } +} + +func GenLogWithData(chainID *big.Int, address common.Address, eventSig common.Hash, logIndex int64, blockNum int64, data []byte) logpoller.Log { + return logpoller.Log{ + EvmChainId: ubig.New(chainID), + LogIndex: logIndex, + BlockHash: utils.RandomBytes32(), + BlockNumber: blockNum, + EventSig: eventSig, + Topics: [][]byte{}, + Address: address, + TxHash: utils.RandomBytes32(), + Data: data, + BlockTimestamp: time.Now(), + } +} + +func TestLogPoller_Batching(t *testing.T) { + t.Parallel() + th := SetupTH(t, false, 2, 3, 2, 1000) + var logs []logpoller.Log + // Inserts are limited to 65535 parameters. A log being 10 parameters this results in + // a maximum of 6553 log inserts per tx. As inserting more than 6553 would result in + // an error without batching, this test makes sure batching is enabled. + for i := 0; i < 15000; i++ { + logs = append(logs, GenLog(th.ChainID, int64(i+1), 1, "0x3", EmitterABI.Events["Log1"].ID.Bytes(), th.EmitterAddress1)) + } + require.NoError(t, th.ORM.InsertLogs(logs)) + lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + require.NoError(t, err) + // Make sure all logs are inserted + require.Equal(t, len(logs), len(lgs)) +} + +func TestORM_GetBlocks_From_Range(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + // Insert many blocks and read them back together + blocks := []block{ + { + number: 10, + hash: common.HexToHash("0x111"), + timestamp: 0, + }, + { + number: 11, + hash: common.HexToHash("0x112"), + timestamp: 10, + }, + { + number: 12, + hash: common.HexToHash("0x113"), + timestamp: 20, + }, + { + number: 13, + hash: common.HexToHash("0x114"), + timestamp: 30, + }, + { + number: 14, + hash: common.HexToHash("0x115"), + timestamp: 40, + }, + } + for _, b := range blocks { + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC(), 0)) + } + + var blockNumbers []int64 + for _, b := range blocks { + blockNumbers = append(blockNumbers, b.number) + } + + lpBlocks, err := o1.GetBlocksRange(blockNumbers[0], blockNumbers[len(blockNumbers)-1]) + require.NoError(t, err) + assert.Len(t, lpBlocks, len(blocks)) + + // Ignores non-existent block + lpBlocks2, err := o1.GetBlocksRange(blockNumbers[0], 15) + require.NoError(t, err) + assert.Len(t, lpBlocks2, len(blocks)) + + // Only non-existent blocks + lpBlocks3, err := o1.GetBlocksRange(15, 15) + require.NoError(t, err) + assert.Len(t, lpBlocks3, 0) +} + +func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + // Insert many blocks and read them back together + var recentBlocks []block + for i := 1; i <= 256; i++ { + recentBlocks = append(recentBlocks, block{number: int64(i), hash: common.HexToHash(fmt.Sprintf("0x%d", i))}) + } + for _, b := range recentBlocks { + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now(), 0)) + } + + var blockNumbers []int64 + for _, b := range recentBlocks { + blockNumbers = append(blockNumbers, b.number) + } + + lpBlocks, err := o1.GetBlocksRange(blockNumbers[0], blockNumbers[len(blockNumbers)-1]) + require.NoError(t, err) + assert.Len(t, lpBlocks, len(recentBlocks)) + + // Ignores non-existent block + lpBlocks2, err := o1.GetBlocksRange(blockNumbers[0], 257) + require.NoError(t, err) + assert.Len(t, lpBlocks2, len(recentBlocks)) + + // Only non-existent blocks + lpBlocks3, err := o1.GetBlocksRange(257, 257) + require.NoError(t, err) + assert.Len(t, lpBlocks3, 0) +} + +func TestORM(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + o2 := th.ORM2 + // Insert and read back a block. + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) + b, err := o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, err) + assert.Equal(t, b.BlockNumber, int64(10)) + assert.Equal(t, b.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) + assert.Equal(t, b.EvmChainId.String(), th.ChainID.String()) + + // Insert blocks from a different chain + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) + b2, err := o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.NoError(t, err) + assert.Equal(t, b2.BlockNumber, int64(11)) + assert.Equal(t, b2.BlockHash.Bytes(), common.HexToHash("0x1234").Bytes()) + assert.Equal(t, b2.EvmChainId.String(), th.ChainID2.String()) + + latest, err := o1.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(10), latest.BlockNumber) + + latest, err = o2.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(12), latest.BlockNumber) + + // Delete a block (only 10 on chain). + require.NoError(t, o1.DeleteLogsAndBlocksAfter(10)) + _, err = o1.SelectBlockByHash(common.HexToHash("0x1234")) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + + // Delete blocks from another chain. + require.NoError(t, o2.DeleteLogsAndBlocksAfter(11)) + _, err = o2.SelectBlockByHash(common.HexToHash("0x1234")) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + // Delete blocks after should also delete block 12. + _, err = o2.SelectBlockByHash(common.HexToHash("0x1235")) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + + // Should be able to insert and read back a log. + topic := common.HexToHash("0x1599") + topic2 := common.HexToHash("0x1600") + require.NoError(t, o1.InsertLogs([]logpoller.Log{ + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 1, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(10), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1234"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 2, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(11), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1234"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 3, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(12), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1235"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 4, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(13), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1235"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 5, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(14), + EventSig: topic2, + Topics: [][]byte{topic2[:]}, + Address: common.HexToAddress("0x1234"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello2"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 6, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(15), + EventSig: topic2, + Topics: [][]byte{topic2[:]}, + Address: common.HexToAddress("0x1235"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello2"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 7, + BlockHash: common.HexToHash("0x1237"), + BlockNumber: int64(16), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1236"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello short retention"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 8, + BlockHash: common.HexToHash("0x1238"), + BlockNumber: int64(17), + EventSig: topic2, + Topics: [][]byte{topic2[:]}, + Address: common.HexToAddress("0x1236"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello2 long retention"), + }, + })) + + t.Log(latest.BlockNumber) + logs, err := o1.SelectLogsByBlockRange(1, 17) + require.NoError(t, err) + require.Len(t, logs, 8) + + logs, err = o1.SelectLogsByBlockRange(10, 10) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + assert.Equal(t, []byte("hello"), logs[0].Data) + + logs, err = o1.SelectLogs(1, 1, common.HexToAddress("0x1234"), topic) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + logs, err = o1.SelectLogs(10, 10, common.HexToAddress("0x1234"), topic) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + + // With no blocks, should be an error + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + // With block 10, only 0 confs should work + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) + log, err := o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, err) + assert.Equal(t, int64(10), log.BlockNumber) + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + // With block 12, anything <=2 should work + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) + require.NoError(t, err) + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) + require.NoError(t, err) + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 2) + require.NoError(t, err) + _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 3) + require.Error(t, err) + assert.True(t, errors.Is(err, sql.ErrNoRows)) + + // Required for confirmations to work + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now(), 0)) + + // Latest log for topic for addr "0x1234" is @ block 11 + lgs, err := o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic}, 0) + require.NoError(t, err) + + require.Equal(t, 1, len(lgs)) + require.Equal(t, int64(11), lgs[0].BlockNumber) + + // should return two entries one for each address with the latest update + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic}, 0) + require.NoError(t, err) + require.Equal(t, 2, len(lgs)) + + // should return two entries one for each topic for addr 0x1234 + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic, topic2}, 0) + require.NoError(t, err) + require.Equal(t, 2, len(lgs)) + + // should return 4 entries one for each (address,topic) combination + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 0) + require.NoError(t, err) + require.Equal(t, 4, len(lgs)) + + // should return 3 entries of logs with atleast 1 confirmation + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 1) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + + // should return 2 entries of logs with atleast 2 confirmation + lgs, err = o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234"), common.HexToAddress("0x1235")}, []common.Hash{topic, topic2}, 2) + require.NoError(t, err) + require.Equal(t, 2, len(lgs)) + + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now(), 0)) + + filter0 := logpoller.Filter{ + Name: "permanent retention filter", + Addresses: []common.Address{common.HexToAddress("0x1234")}, + EventSigs: types.HashArray{topic, topic2}, + } + + filter12 := logpoller.Filter{ // retain both topic1 and topic2 on contract3 for at least 1ms + Name: "short retention filter", + Addresses: []common.Address{common.HexToAddress("0x1236")}, + EventSigs: types.HashArray{topic, topic2}, + Retention: time.Millisecond, + } + filter2 := logpoller.Filter{ // retain topic2 on contract3 for at least 1 hour + Name: "long retention filter", + Addresses: []common.Address{common.HexToAddress("0x1236")}, + EventSigs: types.HashArray{topic2}, + Retention: time.Hour, + } + + // Test inserting filters and reading them back + require.NoError(t, o1.InsertFilter(filter0)) + require.NoError(t, o1.InsertFilter(filter12)) + require.NoError(t, o1.InsertFilter(filter2)) + + filters, err := o1.LoadFilters() + require.NoError(t, err) + require.Len(t, filters, 3) + assert.Equal(t, filter0, filters["permanent retention filter"]) + assert.Equal(t, filter12, filters["short retention filter"]) + assert.Equal(t, filter2, filters["long retention filter"]) + + latest, err = o1.SelectLatestBlock() + require.NoError(t, err) + require.Equal(t, int64(17), latest.BlockNumber) + logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + require.NoError(t, err) + require.Len(t, logs, 8) + + // Delete expired logs + time.Sleep(2 * time.Millisecond) // just in case we haven't reached the end of the 1ms retention period + err = o1.DeleteExpiredLogs(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + require.NoError(t, err) + // The only log which should be deleted is the one which matches filter1 (ret=1ms) but not filter12 (ret=1 hour) + // Importantly, it shouldn't delete any logs matching only filter0 (ret=0 meaning permanent retention). Anything + // matching filter12 should be kept regardless of what other filters it matches. + assert.Len(t, logs, 7) + + // Delete logs after should delete all logs. + err = o1.DeleteLogsAndBlocksAfter(1) + require.NoError(t, err) + logs, err = o1.SelectLogsByBlockRange(1, latest.BlockNumber) + require.NoError(t, err) + require.Zero(t, len(logs)) +} + +func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbORM, addr common.Address, blockNumber int, eventSig common.Hash, start, stop int) { + var lgs []logpoller.Log + for i := start; i <= stop; i++ { + lgs = append(lgs, logpoller.Log{ + EvmChainId: ubig.New(chainID), + LogIndex: int64(i), + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(blockNumber), + EventSig: eventSig, + Topics: [][]byte{eventSig[:], logpoller.EvmWord(uint64(i)).Bytes()}, + Address: addr, + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello"), + }) + } + require.NoError(t, o.InsertLogs(lgs)) +} + +func TestORM_IndexedLogs(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + eventSig := common.HexToHash("0x1599") + addr := common.HexToAddress("0x1234") + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + insertLogsTopicValueRange(t, th.ChainID, o1, addr, 1, eventSig, 1, 3) + insertLogsTopicValueRange(t, th.ChainID, o1, addr, 2, eventSig, 4, 4) // unconfirmed + + lgs, err := o1.SelectIndexedLogs(addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}, 0) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, logpoller.EvmWord(1).Bytes(), lgs[0].GetTopics()[1].Bytes()) + + lgs, err = o1.SelectIndexedLogs(addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1), logpoller.EvmWord(2)}, 0) + require.NoError(t, err) + assert.Equal(t, 2, len(lgs)) + + lgs, err = o1.SelectIndexedLogsByBlockRange(1, 1, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + + lgs, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(2)}) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + + lgs, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 1, []common.Hash{logpoller.EvmWord(1)}) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + + _, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 0, []common.Hash{logpoller.EvmWord(1)}) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid index for topic: 0") + _, err = o1.SelectIndexedLogsByBlockRange(1, 2, addr, eventSig, 4, []common.Hash{logpoller.EvmWord(1)}) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid index for topic: 4") + + lgs, err = o1.SelectIndexedLogsTopicGreaterThan(addr, eventSig, 1, logpoller.EvmWord(2), 0) + require.NoError(t, err) + assert.Equal(t, 2, len(lgs)) + + lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + assert.Equal(t, logpoller.EvmWord(3).Bytes(), lgs[0].GetTopics()[1].Bytes()) + + lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(1), logpoller.EvmWord(3), 0) + require.NoError(t, err) + assert.Equal(t, 3, len(lgs)) + + // Check confirmations work as expected. + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) + lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) + lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) +} + +func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { + th := SetupTH(t, false, 0, 3, 2, 1000) + o1 := th.ORM + eventSig := common.HexToHash("0x1599") + txHash := common.HexToHash("0x1888") + addr := common.HexToAddress("0x1234") + + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + logs := []logpoller.Log{ + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(0), + BlockHash: common.HexToHash("0x1"), + BlockNumber: int64(1), + EventSig: eventSig, + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: txHash, + Data: logpoller.EvmWord(1).Bytes(), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(1), + BlockHash: common.HexToHash("0x1"), + BlockNumber: int64(1), + EventSig: eventSig, + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: txHash, + Data: append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...), + }, + // Different txHash + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(2), + BlockHash: common.HexToHash("0x1"), + BlockNumber: int64(1), + EventSig: eventSig, + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: common.HexToHash("0x1889"), + Data: append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...), + }, + // Different eventSig + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(3), + BlockHash: common.HexToHash("0x1"), + BlockNumber: int64(1), + EventSig: common.HexToHash("0x1600"), + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: txHash, + Data: append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...), + }, + } + require.NoError(t, o1.InsertLogs(logs)) + + retrievedLogs, err := o1.SelectIndexedLogsByTxHash(addr, eventSig, txHash) + require.NoError(t, err) + + require.Equal(t, 2, len(retrievedLogs)) + require.Equal(t, retrievedLogs[0].LogIndex, logs[0].LogIndex) + require.Equal(t, retrievedLogs[1].LogIndex, logs[1].LogIndex) +} + +func TestORM_DataWords(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + eventSig := common.HexToHash("0x1599") + addr := common.HexToAddress("0x1234") + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertLogs([]logpoller.Log{ + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(0), + BlockHash: common.HexToHash("0x1"), + BlockNumber: int64(1), + EventSig: eventSig, + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: common.HexToHash("0x1888"), + Data: logpoller.EvmWord(1).Bytes(), + }, + { + // In block 2, unconfirmed to start + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(1), + BlockHash: common.HexToHash("0x2"), + BlockNumber: int64(2), + EventSig: eventSig, + Topics: [][]byte{eventSig[:]}, + Address: addr, + TxHash: common.HexToHash("0x1888"), + Data: append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...), + }, + })) + // Outside range should fail. + lgs, err := o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(2), logpoller.EvmWord(2), 0) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + + // Range including log should succeed + lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(2), 0) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + + // Range only covering log should succeed + lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 0, logpoller.EvmWord(1), logpoller.EvmWord(1), 0) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + + // Cannot query for unconfirmed second log. + lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + // Confirm it, then can query. + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) + lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) + require.NoError(t, err) + assert.Equal(t, 1, len(lgs)) + assert.Equal(t, lgs[0].Data, append(logpoller.EvmWord(2).Bytes(), logpoller.EvmWord(3).Bytes()...)) + + // Check greater than 1 yields both logs. + lgs, err = o1.SelectLogsDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), 0) + require.NoError(t, err) + assert.Equal(t, 2, len(lgs)) +} + +func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + + // Insert logs on different topics, should be able to read them + // back using SelectLogsWithSigs and specifying + // said topics. + topic := common.HexToHash("0x1599") + topic2 := common.HexToHash("0x1600") + sourceAddr := common.HexToAddress("0x12345") + inputLogs := []logpoller.Log{ + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 1, + BlockHash: common.HexToHash("0x1234"), + BlockNumber: int64(10), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: sourceAddr, + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello1"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 2, + BlockHash: common.HexToHash("0x1235"), + BlockNumber: int64(11), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: sourceAddr, + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello2"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 3, + BlockHash: common.HexToHash("0x1236"), + BlockNumber: int64(12), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1235"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello3"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 4, + BlockHash: common.HexToHash("0x1237"), + BlockNumber: int64(13), + EventSig: topic, + Topics: [][]byte{topic[:]}, + Address: common.HexToAddress("0x1235"), + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello4"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 5, + BlockHash: common.HexToHash("0x1238"), + BlockNumber: int64(14), + EventSig: topic2, + Topics: [][]byte{topic2[:]}, + Address: sourceAddr, + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello5"), + }, + { + EvmChainId: ubig.New(th.ChainID), + LogIndex: 6, + BlockHash: common.HexToHash("0x1239"), + BlockNumber: int64(15), + EventSig: topic2, + Topics: [][]byte{topic2[:]}, + Address: sourceAddr, + TxHash: common.HexToHash("0x1888"), + Data: []byte("hello6"), + }, + } + require.NoError(t, o1.InsertLogs(inputLogs)) + + startBlock, endBlock := int64(10), int64(15) + logs, err := o1.SelectLogsWithSigs(startBlock, endBlock, sourceAddr, []common.Hash{ + topic, + topic2, + }) + require.NoError(t, err) + assert.Len(t, logs, 4) + for _, l := range logs { + assert.Equal(t, sourceAddr, l.Address, "wrong log address") + assert.True(t, bytes.Equal(topic.Bytes(), l.EventSig.Bytes()) || bytes.Equal(topic2.Bytes(), l.EventSig.Bytes()), "wrong log topic") + assert.True(t, l.BlockNumber >= startBlock && l.BlockNumber <= endBlock) + } +} + +func TestORM_DeleteBlocksBefore(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + o1 := th.ORM + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now(), 0)) + require.NoError(t, o1.DeleteBlocksBefore(1)) + // 1 should be gone. + _, err := o1.SelectBlockByNumber(1) + require.Equal(t, err, sql.ErrNoRows) + b, err := o1.SelectBlockByNumber(2) + require.NoError(t, err) + assert.Equal(t, int64(2), b.BlockNumber) + // Clear multiple + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now(), 0)) + require.NoError(t, o1.DeleteBlocksBefore(3)) + _, err = o1.SelectBlockByNumber(2) + require.Equal(t, err, sql.ErrNoRows) + _, err = o1.SelectBlockByNumber(3) + require.Equal(t, err, sql.ErrNoRows) +} + +func TestLogPoller_Logs(t *testing.T) { + t.Parallel() + th := SetupTH(t, false, 2, 3, 2, 1000) + event1 := EmitterABI.Events["Log1"].ID + event2 := EmitterABI.Events["Log2"].ID + address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") + address2 := common.HexToAddress("0x6E225058950f237371261C985Db6bDe26df2200E") + + // Block 1-3 + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLog(th.ChainID, 1, 1, "0x3", event1[:], address1), + GenLog(th.ChainID, 2, 1, "0x3", event2[:], address2), + GenLog(th.ChainID, 1, 2, "0x4", event1[:], address2), + GenLog(th.ChainID, 2, 2, "0x4", event2[:], address1), + GenLog(th.ChainID, 1, 3, "0x5", event1[:], address1), + GenLog(th.ChainID, 2, 3, "0x5", event2[:], address2), + })) + + // Select for all Addresses + lgs, err := th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 6, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[1].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[2].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[3].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[4].BlockHash.String()) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[5].BlockHash.String()) + + // Filter by Address and topic + lgs, err = th.ORM.SelectLogs(1, 3, address1, event1) + require.NoError(t, err) + require.Equal(t, 2, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000003", lgs[0].BlockHash.String()) + assert.Equal(t, address1, lgs[0].Address) + assert.Equal(t, event1.Bytes(), lgs[0].Topics[0]) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000005", lgs[1].BlockHash.String()) + assert.Equal(t, address1, lgs[1].Address) + + // Filter by block + lgs, err = th.ORM.SelectLogs(2, 2, address2, event1) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000004", lgs[0].BlockHash.String()) + assert.Equal(t, int64(1), lgs[0].LogIndex) + assert.Equal(t, address2, lgs[0].Address) + assert.Equal(t, event1.Bytes(), lgs[0].Topics[0]) +} + +func BenchmarkLogs(b *testing.B) { + th := SetupTH(b, false, 2, 3, 2, 1000) + o := th.ORM + var lgs []logpoller.Log + addr := common.HexToAddress("0x1234") + for i := 0; i < 10_000; i++ { + lgs = append(lgs, logpoller.Log{ + EvmChainId: ubig.New(th.ChainID), + LogIndex: int64(i), + BlockHash: common.HexToHash("0x1"), + BlockNumber: 1, + EventSig: EmitterABI.Events["Log1"].ID, + Topics: [][]byte{}, + Address: addr, + TxHash: common.HexToHash("0x1234"), + Data: common.HexToHash(fmt.Sprintf("0x%d", i)).Bytes(), + }) + } + require.NoError(b, o.InsertLogs(lgs)) + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err := o.SelectLogsDataWordRange(addr, EmitterABI.Events["Log1"].ID, 0, logpoller.EvmWord(8000), logpoller.EvmWord(8002), 0) + require.NoError(b, err) + } +} + +func TestSelectLogsWithSigsExcluding(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + orm := th.ORM + addressA := common.HexToAddress("0x11111") + addressB := common.HexToAddress("0x22222") + addressC := common.HexToAddress("0x33333") + + requestSigA := common.HexToHash("0x01") + responseSigA := common.HexToHash("0x02") + requestSigB := common.HexToHash("0x03") + responseSigB := common.HexToHash("0x04") + + topicA := common.HexToHash("0x000a") + topicB := common.HexToHash("0x000b") + topicC := common.HexToHash("0x000c") + topicD := common.HexToHash("0x000d") + + //Insert two logs that mimics an oracle request from 2 different addresses (matching will be on topic index 1) + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 1, + BlockHash: common.HexToHash("0x1"), + BlockNumber: 1, + BlockTimestamp: time.Now(), + Topics: [][]byte{requestSigA.Bytes(), topicA.Bytes(), topicB.Bytes()}, + EventSig: requestSigA, + Address: addressA, + TxHash: common.HexToHash("0x0001"), + Data: []byte("requestID-A1"), + }, + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 2, + BlockHash: common.HexToHash("0x1"), + BlockNumber: 1, + BlockTimestamp: time.Now(), + Topics: [][]byte{requestSigB.Bytes(), topicA.Bytes(), topicB.Bytes()}, + EventSig: requestSigB, + Address: addressB, + TxHash: common.HexToHash("0x0002"), + Data: []byte("requestID-B1"), + }, + })) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) + + //Get any requestSigA from addressA that do not have a equivalent responseSigA + logs, err := orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-A1")) + + //Get any requestSigB from addressB that do not have a equivalent responseSigB + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-B1")) + + //Insert a log that mimics response for requestID-A1 + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 3, + BlockHash: common.HexToHash("0x2"), + BlockNumber: 2, + BlockTimestamp: time.Now(), + Topics: [][]byte{responseSigA.Bytes(), topicA.Bytes(), topicC.Bytes(), topicD.Bytes()}, + EventSig: responseSigA, + Address: addressA, + TxHash: common.HexToHash("0x0002"), + Data: []byte("responseID-A1"), + }, + })) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) + + //Should return nothing as requestID-A1 has been fulfilled + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) + require.NoError(t, err) + require.Len(t, logs, 0) + + //requestID-B1 should still be unfulfilled + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-B1")) + + //Insert 3 request from addressC (matching will be on topic index 3) + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 5, + BlockHash: common.HexToHash("0x2"), + BlockNumber: 3, + BlockTimestamp: time.Now(), + Topics: [][]byte{requestSigB.Bytes(), topicD.Bytes(), topicB.Bytes(), topicC.Bytes()}, + EventSig: requestSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("requestID-C1"), + }, + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 6, + BlockHash: common.HexToHash("0x2"), + BlockNumber: 3, + BlockTimestamp: time.Now(), + Topics: [][]byte{requestSigB.Bytes(), topicD.Bytes(), topicB.Bytes(), topicA.Bytes()}, + EventSig: requestSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("requestID-C2"), + }, { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 7, + BlockHash: common.HexToHash("0x2"), + BlockNumber: 3, + BlockTimestamp: time.Now(), + Topics: [][]byte{requestSigB.Bytes(), topicD.Bytes(), topicB.Bytes(), topicD.Bytes()}, + EventSig: requestSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("requestID-C3"), + }, + })) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) + + //Get all unfulfilled requests from addressC, match on topic index 3 + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + require.NoError(t, err) + require.Len(t, logs, 3) + require.Equal(t, logs[0].Data, []byte("requestID-C1")) + require.Equal(t, logs[1].Data, []byte("requestID-C2")) + require.Equal(t, logs[2].Data, []byte("requestID-C3")) + + //Fulfill requestID-C2 + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 8, + BlockHash: common.HexToHash("0x3"), + BlockNumber: 3, + BlockTimestamp: time.Now(), + Topics: [][]byte{responseSigB.Bytes(), topicC.Bytes(), topicD.Bytes(), topicA.Bytes()}, + EventSig: responseSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("responseID-C2"), + }, + })) + + //Verify that requestID-C2 is now fulfilled (not returned) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + require.NoError(t, err) + require.Len(t, logs, 2) + require.Equal(t, logs[0].Data, []byte("requestID-C1")) + require.Equal(t, logs[1].Data, []byte("requestID-C3")) + + //Fulfill requestID-C3 + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 9, + BlockHash: common.HexToHash("0x3"), + BlockNumber: 3, + BlockTimestamp: time.Now(), + Topics: [][]byte{responseSigB.Bytes(), topicC.Bytes(), topicD.Bytes(), topicD.Bytes()}, + EventSig: responseSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("responseID-C3"), + }, + })) + + //Verify that requestID-C3 is now fulfilled (not returned) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-C1")) + + //Should return no logs as the number of confirmations is not satisfied + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 3) + require.NoError(t, err) + require.Len(t, logs, 0) + + require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now(), 0)) + + //Fulfill requestID-C3 + require.NoError(t, orm.InsertLogs([]logpoller.Log{ + { + EvmChainId: (*ubig.Big)(th.ChainID), + LogIndex: 10, + BlockHash: common.HexToHash("0x2"), + BlockNumber: 10, + BlockTimestamp: time.Now(), + Topics: [][]byte{responseSigB.Bytes(), topicD.Bytes(), topicB.Bytes(), topicC.Bytes()}, + EventSig: responseSigB, + Address: addressC, + TxHash: common.HexToHash("0x0002"), + Data: []byte("responseID-C1"), + }, + })) + + //All logs for addressC should be fulfilled, query should return 0 logs + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) + require.NoError(t, err) + require.Len(t, logs, 0) + + //Should return 1 log as it does not satisfy the required number of confirmations + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 3) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-C1")) + + //Insert 3 more blocks so that the requestID-C1 has enough confirmations + require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now(), 0)) + + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) + require.NoError(t, err) + require.Len(t, logs, 0) + + //AddressB should still have an unfulfilled log (requestID-B1) + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 0, 3, 0) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-B1")) + + //Should return requestID-A1 as the fulfillment event is out of the block range + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 1, 10) + require.NoError(t, err) + require.Len(t, logs, 1) + require.Equal(t, logs[0].Data, []byte("requestID-A1")) + + //Should return nothing as requestID-B1 is before the block range + logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 1, addressB, 2, 13, 0) + require.NoError(t, err) + require.Len(t, logs, 0) +} + +func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + event1 := EmitterABI.Events["Log1"].ID + event2 := EmitterABI.Events["Log2"].ID + address1 := utils.RandomAddress() + address2 := utils.RandomAddress() + + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLog(th.ChainID, 1, 1, utils.RandomAddress().String(), event1[:], address1), + GenLog(th.ChainID, 2, 1, utils.RandomAddress().String(), event2[:], address2), + GenLog(th.ChainID, 2, 2, utils.RandomAddress().String(), event2[:], address2), + GenLog(th.ChainID, 2, 3, utils.RandomAddress().String(), event2[:], address2), + })) + require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 3, time.Now(), 1)) + + tests := []struct { + name string + events []common.Hash + addrs []common.Address + confs logpoller.Confirmations + fromBlock int64 + expectedBlockNumber int64 + }{ + { + name: "no matching logs returns 0 block number", + events: []common.Hash{event2}, + addrs: []common.Address{address1}, + confs: 0, + fromBlock: 0, + expectedBlockNumber: 0, + }, + { + name: "not enough confirmations block returns 0 block number", + events: []common.Hash{event2}, + addrs: []common.Address{address2}, + confs: 5, + fromBlock: 0, + expectedBlockNumber: 0, + }, + { + name: "single matching event and address returns last block", + events: []common.Hash{event1}, + addrs: []common.Address{address1}, + confs: 0, + fromBlock: 0, + expectedBlockNumber: 1, + }, + { + name: "only finalized log is picked", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: logpoller.Finalized, + fromBlock: 0, + expectedBlockNumber: 1, + }, + { + name: "picks max block from two events", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: 0, + fromBlock: 0, + expectedBlockNumber: 3, + }, + { + name: "picks previous block number for confirmations set to 1", + events: []common.Hash{event2}, + addrs: []common.Address{address2}, + confs: 1, + fromBlock: 0, + expectedBlockNumber: 2, + }, + { + name: "returns 0 if from block is not matching", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: 0, + fromBlock: 3, + expectedBlockNumber: 0, + }, + { + name: "picks max block from two events when from block is lower", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: 0, + fromBlock: 2, + expectedBlockNumber: 3, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blockNumber, err := th.ORM.SelectLatestBlockByEventSigsAddrsWithConfs(tt.fromBlock, tt.events, tt.addrs, tt.confs) + require.NoError(t, err) + assert.Equal(t, tt.expectedBlockNumber, blockNumber) + }) + } +} + +func TestSelectLogsCreatedAfter(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + event := EmitterABI.Events["Log1"].ID + address := utils.RandomAddress() + + block1ts := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) + block2ts := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) + block3ts := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) + + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, block1ts), + GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, block3ts), + })) + require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 1, block1ts, 0)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 2, block2ts, 1)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 3, block3ts, 2)) + + type expectedLog struct { + block int64 + log int64 + } + + tests := []struct { + name string + confs logpoller.Confirmations + after time.Time + expectedLogs []expectedLog + }{ + { + name: "picks logs after block 1", + confs: 0, + after: block1ts, + expectedLogs: []expectedLog{ + {block: 2, log: 1}, + {block: 2, log: 2}, + {block: 3, log: 1}, + }, + }, + { + name: "skips blocks with not enough confirmations", + confs: 1, + after: block1ts, + expectedLogs: []expectedLog{ + {block: 2, log: 1}, + {block: 2, log: 2}, + }, + }, + { + name: "limits number of blocks by block_timestamp", + confs: 0, + after: block2ts, + expectedLogs: []expectedLog{ + {block: 3, log: 1}, + }, + }, + { + name: "returns empty dataset for future timestamp", + confs: 0, + after: block3ts, + expectedLogs: []expectedLog{}, + }, + { + name: "returns empty dataset when too many confirmations are required", + confs: 3, + after: block1ts, + expectedLogs: []expectedLog{}, + }, + { + name: "returns only finalized log", + confs: logpoller.Finalized, + after: block1ts, + expectedLogs: []expectedLog{ + {block: 2, log: 1}, + {block: 2, log: 2}, + }, + }, + } + for _, tt := range tests { + t.Run("SelectLogsCreatedAfter"+tt.name, func(t *testing.T) { + logs, err := th.ORM.SelectLogsCreatedAfter(address, event, tt.after, tt.confs) + require.NoError(t, err) + require.Len(t, logs, len(tt.expectedLogs)) + + for i, log := range logs { + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + } + }) + + t.Run("SelectIndexedLogsCreatedAfter"+tt.name, func(t *testing.T) { + logs, err := th.ORM.SelectIndexedLogsCreatedAfter(address, event, 1, []common.Hash{event}, tt.after, tt.confs) + require.NoError(t, err) + require.Len(t, logs, len(tt.expectedLogs)) + + for i, log := range logs { + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + } + }) + } +} + +func TestNestedLogPollerBlocksQuery(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2, 1000) + event := EmitterABI.Events["Log1"].ID + address := utils.RandomAddress() + + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLog(th.ChainID, 1, 8, utils.RandomAddress().String(), event[:], address), + })) + + // Empty logs when block are not persisted + logs, err := th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 0) + + // Persist block + require.NoError(t, th.ORM.InsertBlock(utils.RandomHash(), 10, time.Now(), 0)) + + // Check if query actually works well with provided dataset + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 1) + + // Empty logs when number of confirmations is too deep + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Confirmations(4)) + require.NoError(t, err) + require.Len(t, logs, 0) +} + +func TestInsertLogsWithBlock(t *testing.T) { + chainID := testutils.NewRandomEVMChainID() + event := utils.RandomBytes32() + address := utils.RandomAddress() + + // We need full db here, because we want to test transaction rollbacks. + // Using pgtest.NewSqlxDB(t) will run all tests in TXs which is not desired for this type of test + // (inner tx rollback will rollback outer tx, blocking rest of execution) + _, db := heavyweight.FullTestDBV2(t, nil) + o := logpoller.NewORM(chainID, db, logger.Test(t), pgtest.NewQConfig(true)) + + correctLog := GenLog(chainID, 1, 1, utils.RandomAddress().String(), event[:], address) + invalidLog := GenLog(chainID, -10, -10, utils.RandomAddress().String(), event[:], address) + correctBlock := logpoller.NewLogPollerBlock(utils.RandomBytes32(), 20, time.Now(), 10) + invalidBlock := logpoller.NewLogPollerBlock(utils.RandomBytes32(), -10, time.Now(), -10) + + tests := []struct { + name string + logs []logpoller.Log + block logpoller.LogPollerBlock + shouldRollback bool + }{ + { + name: "properly persist all data", + logs: []logpoller.Log{correctLog}, + block: correctBlock, + shouldRollback: false, + }, + { + name: "rollbacks transaction when block is invalid", + logs: []logpoller.Log{correctLog}, + block: invalidBlock, + shouldRollback: true, + }, + { + name: "rollbacks transaction when log is invalid", + logs: []logpoller.Log{invalidLog}, + block: correctBlock, + shouldRollback: true, + }, + { + name: "rollback when only some logs are invalid", + logs: []logpoller.Log{correctLog, invalidLog}, + block: correctBlock, + shouldRollback: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // clean all logs and blocks between test cases + defer func() { _ = o.DeleteLogsAndBlocksAfter(0) }() + insertError := o.InsertLogsWithBlock(tt.logs, tt.block) + + logs, logsErr := o.SelectLogs(0, math.MaxInt, address, event) + block, blockErr := o.SelectLatestBlock() + + if tt.shouldRollback { + assert.Error(t, insertError) + + assert.NoError(t, logsErr) + assert.Len(t, logs, 0) + + assert.Error(t, blockErr) + } else { + assert.NoError(t, insertError) + + assert.NoError(t, logsErr) + assert.Len(t, logs, len(tt.logs)) + + assert.NoError(t, blockErr) + assert.Equal(t, block.BlockNumber, tt.block.BlockNumber) + } + }) + } +} + +func TestInsertLogsInTx(t *testing.T) { + chainID := testutils.NewRandomEVMChainID() + event := utils.RandomBytes32() + address := utils.RandomAddress() + maxLogsSize := 9000 + + // We need full db here, because we want to test transaction rollbacks. + _, db := heavyweight.FullTestDBV2(t, nil) + o := logpoller.NewORM(chainID, db, logger.Test(t), pgtest.NewQConfig(true)) + + logs := make([]logpoller.Log, maxLogsSize, maxLogsSize+1) + for i := 0; i < maxLogsSize; i++ { + logs[i] = GenLog(chainID, int64(i+1), int64(i+1), utils.RandomAddress().String(), event[:], address) + } + invalidLog := GenLog(chainID, -10, -10, utils.RandomAddress().String(), event[:], address) + + tests := []struct { + name string + logs []logpoller.Log + shouldRollback bool + }{ + { + name: "all logs persisted", + logs: logs, + shouldRollback: false, + }, + { + name: "rollback when invalid log is passed", + logs: append(logs, invalidLog), + shouldRollback: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // clean all logs and blocks between test cases + defer func() { _, _ = db.Exec("truncate evm.logs") }() + + insertErr := o.InsertLogs(tt.logs) + logsFromDb, err := o.SelectLogs(0, math.MaxInt, address, event) + assert.NoError(t, err) + + if tt.shouldRollback { + assert.Error(t, insertErr) + assert.Len(t, logsFromDb, 0) + } else { + assert.NoError(t, insertErr) + assert.Len(t, logsFromDb, len(tt.logs)) + } + }) + } +} + +func TestSelectLogsDataWordBetween(t *testing.T) { + address := utils.RandomAddress() + eventSig := utils.RandomBytes32() + th := SetupTH(t, false, 2, 3, 2, 1000) + + firstLogData := make([]byte, 0, 64) + firstLogData = append(firstLogData, logpoller.EvmWord(1).Bytes()...) + firstLogData = append(firstLogData, logpoller.EvmWord(10).Bytes()...) + + secondLogData := make([]byte, 0, 64) + secondLogData = append(secondLogData, logpoller.EvmWord(5).Bytes()...) + secondLogData = append(secondLogData, logpoller.EvmWord(20).Bytes()...) + + err := th.ORM.InsertLogsWithBlock( + []logpoller.Log{ + GenLogWithData(th.ChainID, address, eventSig, 1, 1, firstLogData), + GenLogWithData(th.ChainID, address, eventSig, 2, 2, secondLogData), + }, + logpoller.NewLogPollerBlock(utils.RandomBytes32(), 10, time.Now(), 1), + ) + require.NoError(t, err) + + tests := []struct { + name string + wordValue uint64 + expectedLogs []int64 + }{ + { + name: "returns only first log", + wordValue: 2, + expectedLogs: []int64{1}, + }, + { + name: "returns only second log", + wordValue: 11, + expectedLogs: []int64{2}, + }, + { + name: "returns both logs if word value is between", + wordValue: 5, + expectedLogs: []int64{1, 2}, + }, + { + name: "returns no logs if word value is outside of the range", + wordValue: 21, + expectedLogs: []int64{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logs, err1 := th.ORM.SelectLogsDataWordBetween(address, eventSig, 0, 1, logpoller.EvmWord(tt.wordValue), logpoller.Unconfirmed) + assert.NoError(t, err1) + assert.Len(t, logs, len(tt.expectedLogs)) + + for index := range logs { + assert.Equal(t, tt.expectedLogs[index], logs[index].BlockNumber) + } + }) + } +} + +func Benchmark_LogsDataWordBetween(b *testing.B) { + chainId := big.NewInt(137) + _, db := heavyweight.FullTestDBV2(b, nil) + o := logpoller.NewORM(chainId, db, logger.Test(b), pgtest.NewQConfig(false)) + + numberOfReports := 100_000 + numberOfMessagesPerReport := 256 + + commitStoreAddress := utils.RandomAddress() + commitReportAccepted := utils.RandomBytes32() + + var dbLogs []logpoller.Log + for i := 0; i < numberOfReports; i++ { + data := make([]byte, 64) + // MinSeqNr + data = append(data, logpoller.EvmWord(uint64(numberOfMessagesPerReport*i+1)).Bytes()...) + // MaxSeqNr + data = append(data, logpoller.EvmWord(uint64(numberOfMessagesPerReport*(i+1))).Bytes()...) + + dbLogs = append(dbLogs, logpoller.Log{ + EvmChainId: ubig.New(chainId), + LogIndex: int64(i + 1), + BlockHash: utils.RandomBytes32(), + BlockNumber: int64(i + 1), + BlockTimestamp: time.Now(), + EventSig: commitReportAccepted, + Topics: [][]byte{}, + Address: commitStoreAddress, + TxHash: utils.RandomHash(), + Data: data, + CreatedAt: time.Now(), + }) + } + require.NoError(b, o.InsertBlock(utils.RandomHash(), int64(numberOfReports*numberOfMessagesPerReport), time.Now(), int64(numberOfReports*numberOfMessagesPerReport))) + require.NoError(b, o.InsertLogs(dbLogs)) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + logs, err := o.SelectLogsDataWordBetween( + commitStoreAddress, + commitReportAccepted, + 2, + 3, + logpoller.EvmWord(uint64(numberOfReports*numberOfMessagesPerReport/2)), // Pick the middle report + logpoller.Unconfirmed, + ) + assert.NoError(b, err) + assert.Len(b, logs, 1) + } +} diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go new file mode 100644 index 00000000..b31210d3 --- /dev/null +++ b/core/chains/evm/logpoller/query.go @@ -0,0 +1,144 @@ +package logpoller + +import ( + "errors" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +type bytesProducer interface { + Bytes() []byte +} + +func concatBytes[T bytesProducer](byteSlice []T) pq.ByteaArray { + var output [][]byte + for _, b := range byteSlice { + output = append(output, b.Bytes()) + } + return output +} + +// queryArgs is a helper for building the arguments to a postgres query created by DbORM +// Besides the convenience methods, it also keeps track of arguments validation and sanitization. +type queryArgs struct { + args map[string]interface{} + err []error +} + +func newQueryArgs(chainId *big.Int) *queryArgs { + return &queryArgs{ + args: map[string]interface{}{ + "evm_chain_id": ubig.New(chainId), + }, + err: []error{}, + } +} + +func newQueryArgsForEvent(chainId *big.Int, address common.Address, eventSig common.Hash) *queryArgs { + return newQueryArgs(chainId). + withAddress(address). + withEventSig(eventSig) +} + +func (q *queryArgs) withEventSig(eventSig common.Hash) *queryArgs { + return q.withCustomHashArg("event_sig", eventSig) +} + +func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { + return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) +} + +func (q *queryArgs) withAddress(address common.Address) *queryArgs { + return q.withCustomArg("address", address) +} + +func (q *queryArgs) withAddressArray(addresses []common.Address) *queryArgs { + return q.withCustomArg("address_array", concatBytes(addresses)) +} + +func (q *queryArgs) withStartBlock(startBlock int64) *queryArgs { + return q.withCustomArg("start_block", startBlock) +} + +func (q *queryArgs) withEndBlock(endBlock int64) *queryArgs { + return q.withCustomArg("end_block", endBlock) +} + +func (q *queryArgs) withWordIndex(wordIndex int) *queryArgs { + return q.withCustomArg("word_index", wordIndex) +} + +func (q *queryArgs) withWordValueMin(wordValueMin common.Hash) *queryArgs { + return q.withCustomHashArg("word_value_min", wordValueMin) +} + +func (q *queryArgs) withWordValueMax(wordValueMax common.Hash) *queryArgs { + return q.withCustomHashArg("word_value_max", wordValueMax) +} + +func (q *queryArgs) withWordIndexMin(wordIndex int) *queryArgs { + return q.withCustomArg("word_index_min", wordIndex) +} + +func (q *queryArgs) withWordIndexMax(wordIndex int) *queryArgs { + return q.withCustomArg("word_index_max", wordIndex) +} + +func (q *queryArgs) withWordValue(wordValue common.Hash) *queryArgs { + return q.withCustomHashArg("word_value", wordValue) +} + +func (q *queryArgs) withConfs(confs Confirmations) *queryArgs { + return q.withCustomArg("confs", confs) +} + +func (q *queryArgs) withTopicIndex(index int) *queryArgs { + // Only topicIndex 1 through 3 is valid. 0 is the event sig and only 4 total topics are allowed + if !(index == 1 || index == 2 || index == 3) { + q.err = append(q.err, fmt.Errorf("invalid index for topic: %d", index)) + } + // Add 1 since postgresql arrays are 1-indexed. + return q.withCustomArg("topic_index", index+1) +} + +func (q *queryArgs) withTopicValueMin(valueMin common.Hash) *queryArgs { + return q.withCustomHashArg("topic_value_min", valueMin) +} + +func (q *queryArgs) withTopicValueMax(valueMax common.Hash) *queryArgs { + return q.withCustomHashArg("topic_value_max", valueMax) +} + +func (q *queryArgs) withTopicValues(values []common.Hash) *queryArgs { + return q.withCustomArg("topic_values", concatBytes(values)) +} + +func (q *queryArgs) withBlockTimestampAfter(after time.Time) *queryArgs { + return q.withCustomArg("block_timestamp_after", after) +} + +func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { + return q.withCustomHashArg("tx_hash", hash) +} + +func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { + return q.withCustomArg(name, arg.Bytes()) +} + +func (q *queryArgs) withCustomArg(name string, arg any) *queryArgs { + q.args[name] = arg + return q +} + +func (q *queryArgs) toArgs() (map[string]interface{}, error) { + if len(q.err) > 0 { + return nil, errors.Join(q.err...) + } + return q.args, nil +} diff --git a/core/chains/evm/logpoller/query_test.go b/core/chains/evm/logpoller/query_test.go new file mode 100644 index 00000000..a6715af8 --- /dev/null +++ b/core/chains/evm/logpoller/query_test.go @@ -0,0 +1,83 @@ +package logpoller + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +func Test_QueryArgs(t *testing.T) { + tests := []struct { + name string + queryArgs *queryArgs + want map[string]interface{} + wantErr bool + }{ + { + name: "valid arguments", + queryArgs: newQueryArgs(big.NewInt(20)).withAddress(utils.ZeroAddress), + want: map[string]interface{}{ + "evm_chain_id": ubig.NewI(20), + "address": utils.ZeroAddress, + }, + }, + { + name: "invalid topic index", + queryArgs: newQueryArgs(big.NewInt(20)).withTopicIndex(0), + wantErr: true, + }, + { + name: "custom argument", + queryArgs: newEmptyArgs().withCustomArg("arg", "value"), + want: map[string]interface{}{ + "arg": "value", + }, + }, + { + name: "hash converted to bytes", + queryArgs: newEmptyArgs().withCustomHashArg("hash", common.Hash{}), + want: map[string]interface{}{ + "hash": make([]byte, 32), + }, + }, + { + name: "hash array converted to bytes array", + queryArgs: newEmptyArgs().withEventSigArray([]common.Hash{{}, {}}), + want: map[string]interface{}{ + "event_sig_array": pq.ByteaArray{make([]byte, 32), make([]byte, 32)}, + }, + }, + { + name: "topic index incremented", + queryArgs: newEmptyArgs().withTopicIndex(2), + want: map[string]interface{}{ + "topic_index": 3, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + args, err := tt.queryArgs.toArgs() + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, args) + } + }) + } +} + +func newEmptyArgs() *queryArgs { + return &queryArgs{ + args: map[string]interface{}{}, + err: []error{}, + } +} diff --git a/core/chains/evm/mocks/balance_monitor.go b/core/chains/evm/mocks/balance_monitor.go new file mode 100644 index 00000000..05b030cf --- /dev/null +++ b/core/chains/evm/mocks/balance_monitor.go @@ -0,0 +1,150 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// BalanceMonitor is an autogenerated mock type for the BalanceMonitor type +type BalanceMonitor struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *BalanceMonitor) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetEthBalance provides a mock function with given fields: _a0 +func (_m *BalanceMonitor) GetEthBalance(_a0 common.Address) *assets.Eth { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetEthBalance") + } + + var r0 *assets.Eth + if rf, ok := ret.Get(0).(func(common.Address) *assets.Eth); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Eth) + } + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *BalanceMonitor) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *BalanceMonitor) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnNewLongestChain provides a mock function with given fields: ctx, head +func (_m *BalanceMonitor) OnNewLongestChain(ctx context.Context, head *types.Head) { + _m.Called(ctx, head) +} + +// Ready provides a mock function with given fields: +func (_m *BalanceMonitor) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *BalanceMonitor) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBalanceMonitor creates a new instance of BalanceMonitor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBalanceMonitor(t interface { + mock.TestingT + Cleanup(func()) +}) *BalanceMonitor { + mock := &BalanceMonitor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/mocks/node.go b/core/chains/evm/mocks/node.go new file mode 100644 index 00000000..0de3d491 --- /dev/null +++ b/core/chains/evm/mocks/node.go @@ -0,0 +1,881 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + client "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + + context "context" + + ethereum "github.com/ethereum/go-ethereum" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Node is an autogenerated mock type for the Node type +type Node struct { + mock.Mock +} + +// BalanceAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Node) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for BalanceAt") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (*big.Int, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) *big.Int); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *Node) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Node) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *Node) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *Node) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CallContext provides a mock function with given fields: ctx, result, method, args +func (_m *Node) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, ctx, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, string, ...interface{}) error); ok { + r0 = rf(ctx, result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CallContract provides a mock function with given fields: ctx, msg, blockNumber +func (_m *Node) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, msg, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, msg, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, msg, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, msg, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainID provides a mock function with given fields: +func (_m *Node) ChainID() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *Node) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CodeAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Node) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, account, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *Node) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthSubscribe provides a mock function with given fields: ctx, channel, args +func (_m *Node) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, args ...interface{}) (ethereum.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, ctx, channel) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for EthSubscribe") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head, ...interface{}) (ethereum.Subscription, error)); ok { + return rf(ctx, channel, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *evmtypes.Head, ...interface{}) ethereum.Subscription); ok { + r0 = rf(ctx, channel, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *evmtypes.Head, ...interface{}) error); ok { + r1 = rf(ctx, channel, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *Node) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByHash provides a mock function with given fields: _a0, _a1 +func (_m *Node) HeaderByHash(_a0 context.Context, _a1 common.Hash) (*types.Header, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByNumber provides a mock function with given fields: _a0, _a1 +func (_m *Node) HeaderByNumber(_a0 context.Context, _a1 *big.Int) (*types.Header, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Name provides a mock function with given fields: +func (_m *Node) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NonceAt provides a mock function with given fields: ctx, account, blockNumber +func (_m *Node) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + ret := _m.Called(ctx, account, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for NonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (uint64, error)); ok { + return rf(ctx, account, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) uint64); ok { + r0 = rf(ctx, account, blockNumber) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Order provides a mock function with given fields: +func (_m *Node) Order() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Order") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// PendingCallContract provides a mock function with given fields: ctx, msg +func (_m *Node) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { + ret := _m.Called(ctx, msg) + + if len(ret) == 0 { + panic("no return value specified for PendingCallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) ([]byte, error)); ok { + return rf(ctx, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) []byte); ok { + r0 = rf(ctx, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *Node) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *Node) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *Node) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *Node) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with given fields: +func (_m *Node) State() client.NodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 client.NodeState + if rf, ok := ret.Get(0).(func() client.NodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.NodeState) + } + + return r0 +} + +// StateAndLatest provides a mock function with given fields: +func (_m *Node) StateAndLatest() (client.NodeState, int64, *big.Int) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StateAndLatest") + } + + var r0 client.NodeState + var r1 int64 + var r2 *big.Int + if rf, ok := ret.Get(0).(func() (client.NodeState, int64, *big.Int)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() client.NodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.NodeState) + } + + if rf, ok := ret.Get(1).(func() int64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(int64) + } + + if rf, ok := ret.Get(2).(func() *big.Int); ok { + r2 = rf() + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).(*big.Int) + } + } + + return r0, r1, r2 +} + +// String provides a mock function with given fields: +func (_m *Node) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *Node) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribersCount provides a mock function with given fields: +func (_m *Node) SubscribersCount() int32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribersCount") + } + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *Node) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *Node) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionByHash provides a mock function with given fields: ctx, txHash +func (_m *Node) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionByHash") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Transaction, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Transaction); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionReceipt provides a mock function with given fields: ctx, txHash +func (_m *Node) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { + ret := _m.Called(ctx, txHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionReceipt") + } + + var r0 *types.Receipt + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Receipt, error)); ok { + return rf(ctx, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Receipt); ok { + r0 = rf(ctx, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Receipt) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UnsubscribeAllExceptAliveLoop provides a mock function with given fields: +func (_m *Node) UnsubscribeAllExceptAliveLoop() { + _m.Called() +} + +// NewNode creates a new instance of Node. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNode(t interface { + mock.TestingT + Cleanup(func()) +}) *Node { + mock := &Node{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/mocks/send_only_node.go b/core/chains/evm/mocks/send_only_node.go new file mode 100644 index 00000000..352c618e --- /dev/null +++ b/core/chains/evm/mocks/send_only_node.go @@ -0,0 +1,181 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + client "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + + mock "github.com/stretchr/testify/mock" + + rpc "github.com/ethereum/go-ethereum/rpc" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// SendOnlyNode is an autogenerated mock type for the SendOnlyNode type +type SendOnlyNode struct { + mock.Mock +} + +// BatchCallContext provides a mock function with given fields: ctx, b +func (_m *SendOnlyNode) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for BatchCallContext") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ChainID provides a mock function with given fields: +func (_m *SendOnlyNode) ChainID() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *SendOnlyNode) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *SendOnlyNode) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *SendOnlyNode) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *SendOnlyNode) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// State provides a mock function with given fields: +func (_m *SendOnlyNode) State() client.NodeState { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for State") + } + + var r0 client.NodeState + if rf, ok := ret.Get(0).(func() client.NodeState); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.NodeState) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *SendOnlyNode) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewSendOnlyNode creates a new instance of SendOnlyNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSendOnlyNode(t interface { + mock.TestingT + Cleanup(func()) +}) *SendOnlyNode { + mock := &SendOnlyNode{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/monitor/balance.go b/core/chains/evm/monitor/balance.go new file mode 100644 index 00000000..db7622c3 --- /dev/null +++ b/core/chains/evm/monitor/balance.go @@ -0,0 +1,237 @@ +package monitor + +import ( + "context" + "fmt" + "math" + "math/big" + "sync" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +//go:generate mockery --quiet --name BalanceMonitor --output ../mocks/ --case=underscore +type ( + // BalanceMonitor checks the balance for each key on every new head + BalanceMonitor interface { + httypes.HeadTrackable + GetEthBalance(gethCommon.Address) *assets.Eth + services.Service + } + + balanceMonitor struct { + services.StateMachine + logger logger.Logger + ethClient evmclient.Client + chainID *big.Int + chainIDStr string + ethKeyStore keystore.Eth + ethBalances map[gethCommon.Address]*assets.Eth + ethBalancesMtx *sync.RWMutex + sleeperTask *utils.SleeperTask + } + + NullBalanceMonitor struct{} +) + +var _ BalanceMonitor = (*balanceMonitor)(nil) + +// NewBalanceMonitor returns a new balanceMonitor +func NewBalanceMonitor(ethClient evmclient.Client, ethKeyStore keystore.Eth, lggr logger.Logger) *balanceMonitor { + chainId := ethClient.ConfiguredChainID() + bm := &balanceMonitor{ + services.StateMachine{}, + logger.Named(lggr, "BalanceMonitor"), + ethClient, + chainId, + chainId.String(), + ethKeyStore, + make(map[gethCommon.Address]*assets.Eth), + new(sync.RWMutex), + nil, + } + bm.sleeperTask = utils.NewSleeperTask(&worker{bm: bm}) + return bm +} + +func (bm *balanceMonitor) Start(ctx context.Context) error { + return bm.StartOnce("BalanceMonitor", func() error { + // Always query latest balance on start + (&worker{bm}).WorkCtx(ctx) + return nil + }) +} + +// Close shuts down the BalanceMonitor, should not be used after this +func (bm *balanceMonitor) Close() error { + return bm.StopOnce("BalanceMonitor", func() error { + return bm.sleeperTask.Stop() + }) +} + +func (bm *balanceMonitor) Ready() error { + return nil +} + +func (bm *balanceMonitor) Name() string { + return bm.logger.Name() +} + +func (bm *balanceMonitor) HealthReport() map[string]error { + return map[string]error{bm.Name(): bm.Healthy()} +} + +// OnNewLongestChain checks the balance for each key +func (bm *balanceMonitor) OnNewLongestChain(_ context.Context, head *evmtypes.Head) { + ok := bm.IfStarted(func() { + bm.checkBalance(head) + }) + if !ok { + bm.logger.Debugw("BalanceMonitor: ignoring OnNewLongestChain call, balance monitor is not started", "state", bm.State()) + } + +} + +func (bm *balanceMonitor) checkBalance(head *evmtypes.Head) { + bm.logger.Debugw("BalanceMonitor: signalling balance worker") + bm.sleeperTask.WakeUp() +} + +func (bm *balanceMonitor) updateBalance(ethBal assets.Eth, address gethCommon.Address) { + bm.promUpdateEthBalance(ðBal, address) + + bm.ethBalancesMtx.Lock() + oldBal := bm.ethBalances[address] + bm.ethBalances[address] = ðBal + bm.ethBalancesMtx.Unlock() + + lgr := logger.Named(bm.logger, "BalanceLog") + lgr = logger.With(lgr, + "address", address.Hex(), + "ethBalance", ethBal.String(), + "weiBalance", ethBal.ToInt()) + + if oldBal == nil { + lgr.Infof("ETH balance for %s: %s", address.Hex(), ethBal.String()) + return + } + + if ethBal.Cmp(oldBal) != 0 { + lgr.Infof("New ETH balance for %s: %s", address.Hex(), ethBal.String()) + } +} + +func (bm *balanceMonitor) GetEthBalance(address gethCommon.Address) *assets.Eth { + bm.ethBalancesMtx.RLock() + defer bm.ethBalancesMtx.RUnlock() + return bm.ethBalances[address] +} + +var promETHBalance = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "eth_balance", + Help: "Each Ethereum account's balance", + }, + []string{"account", "evmChainID"}, +) + +func (bm *balanceMonitor) promUpdateEthBalance(balance *assets.Eth, from gethCommon.Address) { + balanceFloat, err := ApproximateFloat64(balance) + + if err != nil { + bm.logger.Error(fmt.Errorf("updatePrometheusEthBalance: %v", err)) + return + } + + promETHBalance.WithLabelValues(from.Hex(), bm.chainIDStr).Set(balanceFloat) +} + +type worker struct { + bm *balanceMonitor +} + +func (*worker) Name() string { + return "BalanceMonitorWorker" +} + +func (w *worker) Work() { + // Used with SleeperTask + w.WorkCtx(context.Background()) +} + +func (w *worker) WorkCtx(ctx context.Context) { + enabledAddresses, err := w.bm.ethKeyStore.EnabledAddressesForChain(w.bm.chainID) + if err != nil { + w.bm.logger.Error("BalanceMonitor: error getting keys", err) + } + + var wg sync.WaitGroup + + wg.Add(len(enabledAddresses)) + for _, address := range enabledAddresses { + go func(k gethCommon.Address) { + defer wg.Done() + w.checkAccountBalance(ctx, k) + }(address) + } + wg.Wait() +} + +// Approximately ETH block time +const ethFetchTimeout = 15 * time.Second + +func (w *worker) checkAccountBalance(ctx context.Context, address gethCommon.Address) { + ctx, cancel := context.WithTimeout(ctx, ethFetchTimeout) + defer cancel() + + bal, err := w.bm.ethClient.BalanceAt(ctx, address, nil) + if err != nil { + w.bm.logger.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s", address.Hex()), + "err", err, + "address", address, + ) + } else if bal == nil { + w.bm.logger.Errorw(fmt.Sprintf("BalanceMonitor: error getting balance for key %s: invariant violation, bal may not be nil", address.Hex()), + "err", err, + "address", address, + ) + } else { + ethBal := assets.Eth(*bal) + w.bm.updateBalance(ethBal, address) + } +} + +func (*NullBalanceMonitor) GetEthBalance(gethCommon.Address) *assets.Eth { + return nil +} + +// Start does noop for NullBalanceMonitor. +func (*NullBalanceMonitor) Start(context.Context) error { return nil } +func (*NullBalanceMonitor) Close() error { return nil } +func (*NullBalanceMonitor) Ready() error { return nil } +func (*NullBalanceMonitor) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) {} + +func ApproximateFloat64(e *assets.Eth) (float64, error) { + ef := new(big.Float).SetInt(e.ToInt()) + weif := new(big.Float).SetInt(evmtypes.WeiPerEth) + bf := new(big.Float).Quo(ef, weif) + f64, _ := bf.Float64() + if f64 == math.Inf(1) || f64 == math.Inf(-1) { + return math.Inf(1), errors.New("assets.Eth.Float64: Could not approximate Eth value into float") + } + return f64, nil +} diff --git a/core/chains/evm/monitor/balance_helpers_test.go b/core/chains/evm/monitor/balance_helpers_test.go new file mode 100644 index 00000000..ed949882 --- /dev/null +++ b/core/chains/evm/monitor/balance_helpers_test.go @@ -0,0 +1,5 @@ +package monitor + +func (bm *balanceMonitor) WorkDone() <-chan struct{} { + return bm.sleeperTask.WorkDone() +} diff --git a/core/chains/evm/monitor/balance_test.go b/core/chains/evm/monitor/balance_test.go new file mode 100644 index 00000000..b4de5e9e --- /dev/null +++ b/core/chains/evm/monitor/balance_test.go @@ -0,0 +1,262 @@ +package monitor_test + +import ( + "context" + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/monitor" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +var nilBigInt *big.Int + +func newEthClientMock(t *testing.T) *evmclimocks.Client { + mockEth := evmclimocks.NewClient(t) + mockEth.On("ConfiguredChainID").Maybe().Return(big.NewInt(0)) + return mockEth +} + +func TestBalanceMonitor_Start(t *testing.T) { + t.Parallel() + + cfg := configtest.NewGeneralConfig(t, nil) + + t.Run("updates balance from nil for multiple keys", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := newEthClientMock(t) + _, k1Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + _, k0Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + + k0bal := big.NewInt(42) + k1bal := big.NewInt(43) + assert.Nil(t, bm.GetEthBalance(k0Addr)) + assert.Nil(t, bm.GetEthBalance(k1Addr)) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Return(k0bal, nil) + ethClient.On("BalanceAt", mock.Anything, k1Addr, nilBigInt).Once().Return(k1bal, nil) + + servicetest.RunHealthy(t, bm) + + gomega.NewWithT(t).Eventually(func() *big.Int { + return bm.GetEthBalance(k0Addr).ToInt() + }).Should(gomega.Equal(k0bal)) + gomega.NewWithT(t).Eventually(func() *big.Int { + return bm.GetEthBalance(k1Addr).ToInt() + }).Should(gomega.Equal(k1bal)) + }) + + t.Run("handles nil head", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := newEthClientMock(t) + + _, k0Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + k0bal := big.NewInt(42) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Return(k0bal, nil) + + servicetest.RunHealthy(t, bm) + + gomega.NewWithT(t).Eventually(func() *big.Int { + return bm.GetEthBalance(k0Addr).ToInt() + }).Should(gomega.Equal(k0bal)) + }) + + t.Run("cancelled context", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := newEthClientMock(t) + + _, k0Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + ctxCancelledAwaiter := cltest.NewAwaiter() + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Run(func(args mock.Arguments) { + ctx := args.Get(0).(context.Context) + select { + case <-time.After(testutils.WaitTimeout(t)): + case <-ctx.Done(): + ctxCancelledAwaiter.ItHappened() + } + }).Return(nil, nil) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + go func() { + <-time.After(time.Second) + cancel() + }() + assert.NoError(t, bm.Start(ctx)) + + ctxCancelledAwaiter.AwaitOrFail(t) + }) + + t.Run("recovers on error", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := newEthClientMock(t) + + _, k0Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt). + Once(). + Return(nil, errors.New("a little easter egg for the 4chan link marines error")) + + servicetest.RunHealthy(t, bm) + + gomega.NewWithT(t).Consistently(func() *big.Int { + return bm.GetEthBalance(k0Addr).ToInt() + }).Should(gomega.BeNil()) + }) +} + +func TestBalanceMonitor_OnNewLongestChain_UpdatesBalance(t *testing.T) { + t.Parallel() + + cfg := configtest.NewGeneralConfig(t, nil) + + t.Run("updates balance for multiple keys", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := newEthClientMock(t) + + _, k0Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + _, k1Addr := cltest.MustInsertRandomKey(t, ethKeyStore) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + k0bal := big.NewInt(42) + // Deliberately larger than a 64 bit unsigned integer to test overflow + k1bal := big.NewInt(0) + k1bal.SetString("19223372036854776000", 10) + + head := cltest.Head(0) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Return(k0bal, nil) + ethClient.On("BalanceAt", mock.Anything, k1Addr, nilBigInt).Once().Return(k1bal, nil) + + servicetest.RunHealthy(t, bm) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Return(k0bal, nil) + ethClient.On("BalanceAt", mock.Anything, k1Addr, nilBigInt).Once().Return(k1bal, nil) + + // Do the thing + bm.OnNewLongestChain(testutils.Context(t), head) + + <-bm.WorkDone() + assert.Equal(t, k0bal, bm.GetEthBalance(k0Addr).ToInt()) + assert.Equal(t, k1bal, bm.GetEthBalance(k1Addr).ToInt()) + + // Do it again + k0bal2 := big.NewInt(142) + k1bal2 := big.NewInt(142) + + head = cltest.Head(1) + + ethClient.On("BalanceAt", mock.Anything, k0Addr, nilBigInt).Once().Return(k0bal2, nil) + ethClient.On("BalanceAt", mock.Anything, k1Addr, nilBigInt).Once().Return(k1bal2, nil) + + bm.OnNewLongestChain(testutils.Context(t), head) + + <-bm.WorkDone() + assert.Equal(t, k0bal2, bm.GetEthBalance(k0Addr).ToInt()) + assert.Equal(t, k1bal2, bm.GetEthBalance(k1Addr).ToInt()) + }) +} + +func TestBalanceMonitor_FewerRPCCallsWhenBehind(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + cltest.MustInsertRandomKey(t, ethKeyStore) + + ethClient := newEthClientMock(t) + + bm := monitor.NewBalanceMonitor(ethClient, ethKeyStore, logger.Test(t)) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything). + Once(). + Return(big.NewInt(1), nil) + servicetest.RunHealthy(t, bm) + + head := cltest.Head(0) + + // Only expect this twice, even though 10 heads will come in + mockUnblocker := make(chan time.Time) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything). + WaitUntil(mockUnblocker). + Once(). + Return(big.NewInt(42), nil) + // This second call is Maybe because the SleeperTask may not have started + // before we call `OnNewLongestChain` 10 times, in which case it's only + // executed once + var callCount atomic.Int32 + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything). + Run(func(mock.Arguments) { callCount.Add(1) }). + Maybe(). + Return(big.NewInt(42), nil) + + // Do the thing multiple times + for i := 0; i < 10; i++ { + bm.OnNewLongestChain(testutils.Context(t), head) + } + + // Unblock the first mock + cltest.CallbackOrTimeout(t, "FewerRPCCallsWhenBehind unblock BalanceAt", func() { + mockUnblocker <- time.Time{} + }) + + // Make sure the BalanceAt mock wasn't called more than once + assert.LessOrEqual(t, callCount.Load(), int32(1)) +} + +func Test_ApproximateFloat64(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + want float64 + wantError bool + }{ + {"zero", "0", 0, false}, + {"small", "1", 0.000000000000000001, false}, + {"rounding", "12345678901234567890", 12.345678901234567, false}, + {"large", "123456789012345678901234567890", 123456789012.34567, false}, + {"extreme", "1234567890123456789012345678901234567890123456789012345678901234567890", 1.2345678901234568e+51, false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + eth := assets.NewEth(0) + eth.SetString(test.input, 10) + float, err := monitor.ApproximateFloat64(eth) + require.NoError(t, err) + require.Equal(t, test.want, float) + }) + } +} diff --git a/core/chains/evm/txmgr/attempts.go b/core/chains/evm/txmgr/attempts.go new file mode 100644 index 00000000..4674228a --- /dev/null +++ b/core/chains/evm/txmgr/attempts.go @@ -0,0 +1,333 @@ +package txmgr + +import ( + "bytes" + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + feetypes "github.com/goplugin/pluginv3.0/v2/common/fee/types" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type TxAttemptSigner[ADDR commontypes.Hashable] interface { + SignTx(fromAddress ADDR, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) +} + +var _ TxAttemptBuilder = (*evmTxAttemptBuilder)(nil) + +type evmTxAttemptBuilder struct { + chainID big.Int + feeConfig evmTxAttemptBuilderFeeConfig + keystore TxAttemptSigner[common.Address] + gas.EvmFeeEstimator +} + +type evmTxAttemptBuilderFeeConfig interface { + EIP1559DynamicFees() bool + TipCapMin() *assets.Wei + PriceMin() *assets.Wei + PriceMaxKey(common.Address) *assets.Wei +} + +func NewEvmTxAttemptBuilder(chainID big.Int, feeConfig evmTxAttemptBuilderFeeConfig, keystore TxAttemptSigner[common.Address], estimator gas.EvmFeeEstimator) *evmTxAttemptBuilder { + return &evmTxAttemptBuilder{chainID, feeConfig, keystore, estimator} +} + +// NewTxAttempt builds an new attempt using the configured fee estimator + using the EIP1559 config to determine tx type +// used for when a brand new transaction is being created in the txm +func (c *evmTxAttemptBuilder) NewTxAttempt(ctx context.Context, etx Tx, lggr logger.Logger, opts ...feetypes.Opt) (attempt TxAttempt, fee gas.EvmFee, feeLimit uint32, retryable bool, err error) { + txType := 0x0 + if c.feeConfig.EIP1559DynamicFees() { + txType = 0x2 + } + return c.NewTxAttemptWithType(ctx, etx, lggr, txType, opts...) +} + +// NewTxAttemptWithType builds a new attempt with a new fee estimation where the txType can be specified by the caller +// used for L2 re-estimation on broadcasting (note EIP1559 must be disabled otherwise this will fail with mismatched fees + tx type) +func (c *evmTxAttemptBuilder) NewTxAttemptWithType(ctx context.Context, etx Tx, lggr logger.Logger, txType int, opts ...feetypes.Opt) (attempt TxAttempt, fee gas.EvmFee, feeLimit uint32, retryable bool, err error) { + keySpecificMaxGasPriceWei := c.feeConfig.PriceMaxKey(etx.FromAddress) + fee, feeLimit, err = c.EvmFeeEstimator.GetFee(ctx, etx.EncodedPayload, etx.FeeLimit, keySpecificMaxGasPriceWei, opts...) + if err != nil { + return attempt, fee, feeLimit, true, errors.Wrap(err, "failed to get fee") // estimator errors are retryable + } + + attempt, retryable, err = c.NewCustomTxAttempt(etx, fee, feeLimit, txType, lggr) + return attempt, fee, feeLimit, retryable, err +} + +// NewBumpTxAttempt builds a new attempt with a bumped fee - based on the previous attempt tx type +// used in the txm broadcaster + confirmer when tx ix rejected for too low fee or is not included in a timely manner +func (c *evmTxAttemptBuilder) NewBumpTxAttempt(ctx context.Context, etx Tx, previousAttempt TxAttempt, priorAttempts []TxAttempt, lggr logger.Logger) (attempt TxAttempt, bumpedFee gas.EvmFee, bumpedFeeLimit uint32, retryable bool, err error) { + keySpecificMaxGasPriceWei := c.feeConfig.PriceMaxKey(etx.FromAddress) + + bumpedFee, bumpedFeeLimit, err = c.EvmFeeEstimator.BumpFee(ctx, previousAttempt.TxFee, etx.FeeLimit, keySpecificMaxGasPriceWei, newEvmPriorAttempts(priorAttempts)) + if err != nil { + return attempt, bumpedFee, bumpedFeeLimit, true, errors.Wrap(err, "failed to bump fee") // estimator errors are retryable + } + + attempt, retryable, err = c.NewCustomTxAttempt(etx, bumpedFee, bumpedFeeLimit, previousAttempt.TxType, lggr) + return attempt, bumpedFee, bumpedFeeLimit, retryable, err +} + +// NewCustomTxAttempt is the lowest level func where the fee parameters + tx type must be passed in +// used in the txm for force rebroadcast where fees and tx type are pre-determined without an estimator +func (c *evmTxAttemptBuilder) NewCustomTxAttempt(etx Tx, fee gas.EvmFee, gasLimit uint32, txType int, lggr logger.Logger) (attempt TxAttempt, retryable bool, err error) { + switch txType { + case 0x0: // legacy + if fee.Legacy == nil { + err = errors.Errorf("Attempt %v is a type 0 transaction but estimator did not return legacy fee bump", attempt.ID) + logger.Sugared(lggr).AssumptionViolation(err.Error()) + return attempt, false, err // not retryable + } + attempt, err = c.newLegacyAttempt(etx, fee.Legacy, gasLimit) + return attempt, true, err + case 0x2: // dynamic, EIP1559 + if !fee.ValidDynamic() { + err = errors.Errorf("Attempt %v is a type 2 transaction but estimator did not return dynamic fee bump", attempt.ID) + logger.Sugared(lggr).AssumptionViolation(err.Error()) + return attempt, false, err // not retryable + } + attempt, err = c.newDynamicFeeAttempt(etx, gas.DynamicFee{ + FeeCap: fee.DynamicFeeCap, + TipCap: fee.DynamicTipCap, + }, gasLimit) + return attempt, true, err + default: + err = errors.Errorf("invariant violation: Attempt %v had unrecognised transaction type %v"+ + "This is a bug! Please report to https://github.com/goplugin/pluginv3.0/issues", attempt.ID, attempt.TxType) + logger.Sugared(lggr).AssumptionViolation(err.Error()) + return attempt, false, err // not retryable + } +} + +// NewEmptyTxAttempt is used in ForceRebroadcast to create a signed tx with zero value sent to the zero address +func (c *evmTxAttemptBuilder) NewEmptyTxAttempt(nonce evmtypes.Nonce, feeLimit uint32, fee gas.EvmFee, fromAddress common.Address) (attempt TxAttempt, err error) { + value := big.NewInt(0) + payload := []byte{} + + if fee.Legacy == nil { + return attempt, errors.New("NewEmptyTranscation: legacy fee cannot be nil") + } + + tx := newLegacyTransaction( + uint64(nonce), + fromAddress, + value, + feeLimit, + fee.Legacy, + payload, + ) + + transaction := types.NewTx(&tx) + hash, signedTxBytes, err := c.SignTx(fromAddress, transaction) + if err != nil { + return attempt, errors.Wrapf(err, "error using account %s to sign empty transaction", fromAddress.String()) + } + + attempt.SignedRawTx = signedTxBytes + attempt.Hash = hash + return attempt, nil + +} + +func (c *evmTxAttemptBuilder) newDynamicFeeAttempt(etx Tx, fee gas.DynamicFee, gasLimit uint32) (attempt TxAttempt, err error) { + if err = validateDynamicFeeGas(c.feeConfig, c.feeConfig.TipCapMin(), fee, gasLimit, etx); err != nil { + return attempt, errors.Wrap(err, "error validating gas") + } + + d := newDynamicFeeTransaction( + uint64(*etx.Sequence), + etx.ToAddress, + &etx.Value, + gasLimit, + &c.chainID, + fee.TipCap, + fee.FeeCap, + etx.EncodedPayload, + ) + tx := types.NewTx(&d) + attempt, err = c.newSignedAttempt(etx, tx) + if err != nil { + return attempt, err + } + attempt.TxFee = gas.EvmFee{ + DynamicFeeCap: fee.FeeCap, + DynamicTipCap: fee.TipCap, + } + attempt.ChainSpecificFeeLimit = gasLimit + attempt.TxType = 2 + return attempt, nil +} + +var Max256BitUInt = big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil) + +type keySpecificEstimator interface { + PriceMaxKey(addr common.Address) *assets.Wei +} + +// validateDynamicFeeGas is a sanity check - we have other checks elsewhere, but this +// makes sure we _never_ create an invalid attempt +func validateDynamicFeeGas(kse keySpecificEstimator, tipCapMinimum *assets.Wei, fee gas.DynamicFee, gasLimit uint32, etx Tx) error { + gasTipCap, gasFeeCap := fee.TipCap, fee.FeeCap + + if gasTipCap == nil { + panic("gas tip cap missing") + } + if gasFeeCap == nil { + panic("gas fee cap missing") + } + // Assertions from: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md + // Prevent impossibly large numbers + if gasFeeCap.ToInt().Cmp(Max256BitUInt) > 0 { + return errors.New("impossibly large fee cap") + } + if gasTipCap.ToInt().Cmp(Max256BitUInt) > 0 { + return errors.New("impossibly large tip cap") + } + // The total must be at least as large as the tip + if gasFeeCap.Cmp(gasTipCap) < 0 { + return errors.Errorf("gas fee cap must be greater than or equal to gas tip cap (fee cap: %s, tip cap: %s)", gasFeeCap.String(), gasTipCap.String()) + } + + // Configuration sanity-check + max := kse.PriceMaxKey(etx.FromAddress) + if gasFeeCap.Cmp(max) > 0 { + return errors.Errorf("cannot create tx attempt: specified gas fee cap of %s would exceed max configured gas price of %s for key %s", gasFeeCap.String(), max.String(), etx.FromAddress.String()) + } + // Tip must be above minimum + minTip := tipCapMinimum + if gasTipCap.Cmp(minTip) < 0 { + return errors.Errorf("cannot create tx attempt: specified gas tip cap of %s is below min configured gas tip of %s for key %s", gasTipCap.String(), minTip.String(), etx.FromAddress.String()) + } + return nil +} + +func newDynamicFeeTransaction(nonce uint64, to common.Address, value *big.Int, gasLimit uint32, chainID *big.Int, gasTipCap, gasFeeCap *assets.Wei, data []byte) types.DynamicFeeTx { + return types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + GasTipCap: gasTipCap.ToInt(), + GasFeeCap: gasFeeCap.ToInt(), + Gas: uint64(gasLimit), + To: &to, + Value: value, + Data: data, + } +} + +func (c *evmTxAttemptBuilder) newLegacyAttempt(etx Tx, gasPrice *assets.Wei, gasLimit uint32) (attempt TxAttempt, err error) { + if err = validateLegacyGas(c.feeConfig, c.feeConfig.PriceMin(), gasPrice, gasLimit, etx); err != nil { + return attempt, errors.Wrap(err, "error validating gas") + } + + tx := newLegacyTransaction( + uint64(*etx.Sequence), + etx.ToAddress, + &etx.Value, + gasLimit, + gasPrice, + etx.EncodedPayload, + ) + + transaction := types.NewTx(&tx) + hash, signedTxBytes, err := c.SignTx(etx.FromAddress, transaction) + if err != nil { + return attempt, errors.Wrapf(err, "error using account %s to sign transaction %v", etx.FromAddress, etx.ID) + } + + attempt.State = txmgrtypes.TxAttemptInProgress + attempt.SignedRawTx = signedTxBytes + attempt.TxID = etx.ID + attempt.TxFee = gas.EvmFee{Legacy: gasPrice} + attempt.Hash = hash + attempt.TxType = 0 + attempt.ChainSpecificFeeLimit = gasLimit + attempt.Tx = etx + + return attempt, nil +} + +// validateLegacyGas is a sanity check - we have other checks elsewhere, but this +// makes sure we _never_ create an invalid attempt +func validateLegacyGas(kse keySpecificEstimator, minGasPriceWei, gasPrice *assets.Wei, gasLimit uint32, etx Tx) error { + if gasPrice == nil { + panic("gas price missing") + } + max := kse.PriceMaxKey(etx.FromAddress) + if gasPrice.Cmp(max) > 0 { + return errors.Errorf("cannot create tx attempt: specified gas price of %s would exceed max configured gas price of %s for key %s", gasPrice.String(), max.String(), etx.FromAddress.String()) + } + min := minGasPriceWei + if gasPrice.Cmp(min) < 0 { + return errors.Errorf("cannot create tx attempt: specified gas price of %s is below min configured gas price of %s for key %s", gasPrice.String(), min.String(), etx.FromAddress.String()) + } + return nil +} + +func (c *evmTxAttemptBuilder) newSignedAttempt(etx Tx, tx *types.Transaction) (attempt TxAttempt, err error) { + hash, signedTxBytes, err := c.SignTx(etx.FromAddress, tx) + if err != nil { + return attempt, errors.Wrapf(err, "error using account %s to sign transaction %v", etx.FromAddress.String(), etx.ID) + } + + attempt.State = txmgrtypes.TxAttemptInProgress + attempt.SignedRawTx = signedTxBytes + attempt.TxID = etx.ID + attempt.Tx = etx + attempt.Hash = hash + + return attempt, nil +} + +func newLegacyTransaction(nonce uint64, to common.Address, value *big.Int, gasLimit uint32, gasPrice *assets.Wei, data []byte) types.LegacyTx { + return types.LegacyTx{ + Nonce: nonce, + To: &to, + Value: value, + Gas: uint64(gasLimit), + GasPrice: gasPrice.ToInt(), + Data: data, + } +} + +func (c *evmTxAttemptBuilder) SignTx(address common.Address, tx *types.Transaction) (common.Hash, []byte, error) { + signedTx, err := c.keystore.SignTx(address, tx, &c.chainID) + if err != nil { + return common.Hash{}, nil, fmt.Errorf("failed to sign tx: %w", err) + } + rlp := new(bytes.Buffer) + if err := signedTx.EncodeRLP(rlp); err != nil { + return common.Hash{}, nil, errors.Wrap(err, "SignTx failed") + } + txHash := signedTx.Hash() + return txHash, rlp.Bytes(), nil +} + +func newEvmPriorAttempts(attempts []TxAttempt) (prior []gas.EvmPriorAttempt) { + for i := range attempts { + priorAttempt := gas.EvmPriorAttempt{ + ChainSpecificFeeLimit: attempts[i].ChainSpecificFeeLimit, + BroadcastBeforeBlockNum: attempts[i].BroadcastBeforeBlockNum, + TxHash: attempts[i].Hash, + TxType: attempts[i].TxType, + GasPrice: attempts[i].TxFee.Legacy, + DynamicFee: gas.DynamicFee{ + FeeCap: attempts[i].TxFee.DynamicFeeCap, + TipCap: attempts[i].TxFee.DynamicTipCap, + }, + } + prior = append(prior, priorAttempt) + } + return +} diff --git a/core/chains/evm/txmgr/attempts_test.go b/core/chains/evm/txmgr/attempts_test.go new file mode 100644 index 00000000..b0b747c9 --- /dev/null +++ b/core/chains/evm/txmgr/attempts_test.go @@ -0,0 +1,286 @@ +package txmgr_test + +import ( + "fmt" + "math/big" + "testing" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + gasmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" +) + +func NewEvmAddress() gethcommon.Address { + return testutils.NewAddress() +} + +type feeConfig struct { + eip1559DynamicFees bool + tipCapMin *assets.Wei + priceMin *assets.Wei + priceMax *assets.Wei +} + +func newFeeConfig() *feeConfig { + return &feeConfig{ + tipCapMin: assets.NewWeiI(0), + priceMin: assets.NewWeiI(0), + priceMax: assets.NewWeiI(0), + } +} + +func (g *feeConfig) EIP1559DynamicFees() bool { return g.eip1559DynamicFees } +func (g *feeConfig) TipCapMin() *assets.Wei { return g.tipCapMin } +func (g *feeConfig) PriceMin() *assets.Wei { return g.priceMin } +func (g *feeConfig) PriceMaxKey(addr gethcommon.Address) *assets.Wei { return g.priceMax } + +func TestTxm_SignTx(t *testing.T) { + t.Parallel() + + addr := gethcommon.HexToAddress("0xb921F7763960b296B9cbAD586ff066A18D749724") + to := gethcommon.HexToAddress("0xb921F7763960b296B9cbAD586ff066A18D749724") + tx := gethtypes.NewTx(&gethtypes.LegacyTx{ + Nonce: 42, + To: &to, + Value: big.NewInt(142), + Gas: 242, + GasPrice: big.NewInt(342), + Data: []byte{1, 2, 3}, + }) + + t.Run("returns correct hash for non-okex chains", func(t *testing.T) { + chainID := big.NewInt(1) + kst := ksmocks.NewEth(t) + kst.On("SignTx", to, tx, chainID).Return(tx, nil).Once() + cks := txmgr.NewEvmTxAttemptBuilder(*chainID, newFeeConfig(), kst, nil) + hash, rawBytes, err := cks.SignTx(addr, tx) + require.NoError(t, err) + require.NotNil(t, rawBytes) + require.Equal(t, "0xdd68f554373fdea7ec6713a6e437e7646465d553a6aa0b43233093366cc87ef0", hash.String()) + }) + // okex used to have a custom hash but now this just verifies that is it the same + t.Run("returns correct hash for okex chains", func(t *testing.T) { + chainID := big.NewInt(1) + kst := ksmocks.NewEth(t) + kst.On("SignTx", to, tx, chainID).Return(tx, nil).Once() + cks := txmgr.NewEvmTxAttemptBuilder(*chainID, newFeeConfig(), kst, nil) + hash, rawBytes, err := cks.SignTx(addr, tx) + require.NoError(t, err) + require.NotNil(t, rawBytes) + require.Equal(t, "0xdd68f554373fdea7ec6713a6e437e7646465d553a6aa0b43233093366cc87ef0", hash.String()) + }) + t.Run("can properly encoded and decode raw transaction for LegacyTx", func(t *testing.T) { + chainID := big.NewInt(1) + kst := ksmocks.NewEth(t) + kst.On("SignTx", to, tx, chainID).Return(tx, nil).Once() + cks := txmgr.NewEvmTxAttemptBuilder(*chainID, newFeeConfig(), kst, nil) + + _, rawBytes, err := cks.SignTx(addr, tx) + require.NoError(t, err) + require.NotNil(t, rawBytes) + require.Equal(t, "0xe42a82015681f294b921f7763960b296b9cbad586ff066a18d749724818e83010203808080", hexutil.Encode(rawBytes)) + + var decodedTx *gethtypes.Transaction + decodedTx, err = txmgr.GetGethSignedTx(rawBytes) + require.NoError(t, err) + require.Equal(t, tx.Hash(), decodedTx.Hash()) + }) + t.Run("can properly encoded and decode raw transaction for DynamicFeeTx", func(t *testing.T) { + chainID := big.NewInt(1) + kst := ksmocks.NewEth(t) + typedTx := gethtypes.NewTx(&gethtypes.DynamicFeeTx{ + Nonce: 42, + To: &to, + Value: big.NewInt(142), + Gas: 242, + Data: []byte{1, 2, 3}, + }) + kst.On("SignTx", to, typedTx, chainID).Return(typedTx, nil).Once() + cks := txmgr.NewEvmTxAttemptBuilder(*chainID, newFeeConfig(), kst, nil) + _, rawBytes, err := cks.SignTx(addr, typedTx) + require.NoError(t, err) + require.NotNil(t, rawBytes) + require.Equal(t, "0xa702e5802a808081f294b921f7763960b296b9cbad586ff066a18d749724818e83010203c0808080", hexutil.Encode(rawBytes)) + + var decodedTx *gethtypes.Transaction + decodedTx, err = txmgr.GetGethSignedTx(rawBytes) + require.NoError(t, err) + require.Equal(t, typedTx.Hash(), decodedTx.Hash()) + }) +} + +func TestTxm_NewDynamicFeeTx(t *testing.T) { + addr := NewEvmAddress() + tx := types.NewTx(&types.DynamicFeeTx{}) + kst := ksmocks.NewEth(t) + kst.On("SignTx", addr, mock.Anything, big.NewInt(1)).Return(tx, nil) + var n evmtypes.Nonce + lggr := logger.Test(t) + + t.Run("creates attempt with fields", func(t *testing.T) { + feeCfg := newFeeConfig() + feeCfg.priceMax = assets.GWei(200) + cks := txmgr.NewEvmTxAttemptBuilder(*big.NewInt(1), feeCfg, kst, nil) + dynamicFee := gas.DynamicFee{TipCap: assets.GWei(100), FeeCap: assets.GWei(200)} + a, _, err := cks.NewCustomTxAttempt(txmgr.Tx{Sequence: &n, FromAddress: addr}, gas.EvmFee{ + DynamicTipCap: dynamicFee.TipCap, + DynamicFeeCap: dynamicFee.FeeCap, + }, 100, 0x2, lggr) + require.NoError(t, err) + assert.Equal(t, 100, int(a.ChainSpecificFeeLimit)) + assert.Nil(t, a.TxFee.Legacy) + assert.NotNil(t, a.TxFee.DynamicTipCap) + assert.Equal(t, assets.GWei(100).String(), a.TxFee.DynamicTipCap.String()) + assert.NotNil(t, a.TxFee.DynamicFeeCap) + assert.Equal(t, assets.GWei(200).String(), a.TxFee.DynamicFeeCap.String()) + }) + + t.Run("verifies gas tip and fees", func(t *testing.T) { + tests := []struct { + name string + tipcap *assets.Wei + feecap *assets.Wei + setCfg func(*plugin.Config, *plugin.Secrets) + expectError string + }{ + {"gas tip = fee cap", assets.GWei(5), assets.GWei(5), nil, ""}, + {"gas tip < fee cap", assets.GWei(4), assets.GWei(5), nil, ""}, + {"gas tip > fee cap", assets.GWei(6), assets.GWei(5), nil, "gas fee cap must be greater than or equal to gas tip cap (fee cap: 5 gwei, tip cap: 6 gwei)"}, + {"fee cap exceeds max allowed", assets.GWei(5), assets.GWei(5), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.GWei(4) + }, "specified gas fee cap of 5 gwei would exceed max configured gas price of 4 gwei"}, + {"ignores global min gas price", assets.GWei(5), assets.GWei(5), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMin = assets.GWei(6) + }, ""}, + {"tip cap below min allowed", assets.GWei(5), assets.GWei(5), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.TipCapMin = assets.GWei(6) + }, "specified gas tip cap of 5 gwei is below min configured gas tip of 6 gwei"}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + gcfg := configtest.NewGeneralConfig(t, test.setCfg) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + cks := txmgr.NewEvmTxAttemptBuilder(*big.NewInt(1), cfg.EVM().GasEstimator(), kst, nil) + dynamicFee := gas.DynamicFee{TipCap: test.tipcap, FeeCap: test.feecap} + _, _, err := cks.NewCustomTxAttempt(txmgr.Tx{Sequence: &n, FromAddress: addr}, gas.EvmFee{ + DynamicTipCap: dynamicFee.TipCap, + DynamicFeeCap: dynamicFee.FeeCap, + }, 100, 0x2, lggr) + if test.expectError == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, test.expectError) + } + }) + } + }) +} + +func TestTxm_NewLegacyAttempt(t *testing.T) { + addr := NewEvmAddress() + kst := ksmocks.NewEth(t) + tx := types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", addr, mock.Anything, big.NewInt(1)).Return(tx, nil) + gc := newFeeConfig() + gc.priceMin = assets.NewWeiI(10) + gc.priceMax = assets.NewWeiI(50) + cks := txmgr.NewEvmTxAttemptBuilder(*big.NewInt(1), gc, kst, nil) + lggr := logger.Test(t) + + t.Run("creates attempt with fields", func(t *testing.T) { + var n evmtypes.Nonce + a, _, err := cks.NewCustomTxAttempt(txmgr.Tx{Sequence: &n, FromAddress: addr}, gas.EvmFee{Legacy: assets.NewWeiI(25)}, 100, 0x0, lggr) + require.NoError(t, err) + assert.Equal(t, 100, int(a.ChainSpecificFeeLimit)) + assert.NotNil(t, a.TxFee.Legacy) + assert.Equal(t, "25 wei", a.TxFee.Legacy.String()) + assert.Nil(t, a.TxFee.DynamicTipCap) + assert.Nil(t, a.TxFee.DynamicFeeCap) + }) + + t.Run("verifies max gas price", func(t *testing.T) { + _, _, err := cks.NewCustomTxAttempt(txmgr.Tx{FromAddress: addr}, gas.EvmFee{Legacy: assets.NewWeiI(100)}, 100, 0x0, lggr) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("specified gas price of 100 wei would exceed max configured gas price of 50 wei for key %s", addr.String())) + }) +} + +func TestTxm_NewCustomTxAttempt_NonRetryableErrors(t *testing.T) { + t.Parallel() + + kst := ksmocks.NewEth(t) + lggr := logger.Test(t) + cks := txmgr.NewEvmTxAttemptBuilder(*big.NewInt(1), newFeeConfig(), kst, nil) + + dynamicFee := gas.DynamicFee{TipCap: assets.GWei(100), FeeCap: assets.GWei(200)} + legacyFee := assets.NewWeiI(100) + + t.Run("dynamic fee with legacy tx type", func(t *testing.T) { + _, retryable, err := cks.NewCustomTxAttempt(txmgr.Tx{}, gas.EvmFee{ + DynamicTipCap: dynamicFee.TipCap, + DynamicFeeCap: dynamicFee.FeeCap, + }, 100, 0x0, lggr) + require.Error(t, err) + assert.False(t, retryable) + }) + t.Run("legacy fee with dynamic tx type", func(t *testing.T) { + _, retryable, err := cks.NewCustomTxAttempt(txmgr.Tx{}, gas.EvmFee{Legacy: legacyFee}, 100, 0x2, lggr) + require.Error(t, err) + assert.False(t, retryable) + }) + + t.Run("invalid type", func(t *testing.T) { + _, retryable, err := cks.NewCustomTxAttempt(txmgr.Tx{}, gas.EvmFee{}, 100, 0xA, lggr) + require.Error(t, err) + assert.False(t, retryable) + }) +} + +func TestTxm_EvmTxAttemptBuilder_RetryableEstimatorError(t *testing.T) { + est := gasmocks.NewEvmFeeEstimator(t) + est.On("GetFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(gas.EvmFee{}, uint32(0), errors.New("fail")) + est.On("BumpFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(gas.EvmFee{}, uint32(0), errors.New("fail")) + + kst := ksmocks.NewEth(t) + lggr := logger.Test(t) + ctx := testutils.Context(t) + cks := txmgr.NewEvmTxAttemptBuilder(*big.NewInt(1), &feeConfig{eip1559DynamicFees: true}, kst, est) + + t.Run("NewAttempt", func(t *testing.T) { + _, _, _, retryable, err := cks.NewTxAttempt(ctx, txmgr.Tx{}, lggr) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get fee") + assert.True(t, retryable) + }) + t.Run("NewAttemptWithType", func(t *testing.T) { + _, _, _, retryable, err := cks.NewTxAttemptWithType(ctx, txmgr.Tx{}, lggr, 0x0) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to get fee") + assert.True(t, retryable) + }) + t.Run("NewBumpAttempt", func(t *testing.T) { + _, _, _, retryable, err := cks.NewBumpTxAttempt(ctx, txmgr.Tx{}, txmgr.TxAttempt{}, nil, lggr) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to bump fee") + assert.True(t, retryable) + }) +} diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go new file mode 100644 index 00000000..1d3ccfd8 --- /dev/null +++ b/core/chains/evm/txmgr/broadcaster_test.go @@ -0,0 +1,2042 @@ +package txmgr_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/big" + "math/rand" + "strconv" + "testing" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/sqlutil" + commonutils "github.com/goplugin/plugin-common/pkg/utils" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + gasmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" +) + +// NewEthBroadcaster creates a new txmgr.EthBroadcaster for use in testing. +func NewTestEthBroadcaster( + t testing.TB, + txStore txmgr.TestEvmTxStore, + ethClient client.Client, + keyStore keystore.Eth, + config evmconfig.ChainScopedConfig, + checkerFactory txmgr.TransmitCheckerFactory, + nonceAutoSync bool, +) *txmgr.Broadcaster { + t.Helper() + + lggr := logger.Test(t) + ge := config.EVM().GasEstimator() + estimator := gas.NewWrappedEvmEstimator(lggr, func(lggr logger.Logger) gas.EvmEstimator { + return gas.NewFixedPriceEstimator(config.EVM().GasEstimator(), ge.BlockHistory(), lggr) + }, ge.EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, keyStore, estimator) + txNonceSyncer := txmgr.NewNonceSyncer(txStore, lggr, ethClient) + ethBroadcaster := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(config.EVM().GasEstimator()), config.EVM().Transactions(), config.Database().Listener(), keyStore, txBuilder, txNonceSyncer, lggr, checkerFactory, nonceAutoSync) + + // Mark instance as test + ethBroadcaster.XXXTestDisableUnstartedTxAutoProcessing() + servicetest.Run(t, ethBroadcaster) + return ethBroadcaster +} + +func TestEthBroadcaster_Lifecycle(t *testing.T) { + cfg, db := heavyweight.FullTestDBV2(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + estimator := gasmocks.NewEvmFeeEstimator(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + eb := txmgr.NewEvmBroadcaster( + txStore, + txmgr.NewEvmTxmClient(ethClient), + txmgr.NewEvmTxmConfig(evmcfg.EVM()), + txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), + evmcfg.EVM().Transactions(), + evmcfg.Database().Listener(), + ethKeyStore, + txBuilder, + nil, + logger.Test(t), + &testCheckerFactory{}, + false, + ) + + // Can't close an unstarted instance + err := eb.Close() + require.Error(t, err) + ctx := testutils.Context(t) + + // Can start a new instance + err = eb.Start(ctx) + require.NoError(t, err) + + // Can successfully close once + err = eb.Close() + require.NoError(t, err) + + // Can't start more than once (Broadcaster uses services.StateMachine) + err = eb.Start(ctx) + require.Error(t, err) + // Can't close more than once (Broadcaster uses services.StateMachine) + err = eb.Close() + require.Error(t, err) + + // Can't closeInternal unstarted instance + require.Error(t, eb.XXXTestCloseInternal()) + + // Can successfully startInternal a previously closed instance + require.NoError(t, eb.XXXTestStartInternal(ctx)) + // Can't startInternal already started instance + require.Error(t, eb.XXXTestStartInternal(ctx)) + // Can successfully closeInternal again + require.NoError(t, eb.XXXTestCloseInternal()) +} + +// Failure to load next sequnce map should not fail Broadcaster startup +func TestEthBroadcaster_LoadNextSequenceMapFailure_StartupSuccess(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + estimator := gasmocks.NewEvmFeeEstimator(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), errors.New("Getting on-chain nonce failed")) + eb := txmgr.NewEvmBroadcaster( + txStore, + txmgr.NewEvmTxmClient(ethClient), + txmgr.NewEvmTxmConfig(evmcfg.EVM()), + txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), + evmcfg.EVM().Transactions(), + evmcfg.Database().Listener(), + ethKeyStore, + txBuilder, + nil, + logger.Test(t), + &testCheckerFactory{}, + false, + ) + + // Instance starts without error even if loading next sequence map fails + err := eb.Start(testutils.Context(t)) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, eb.Close()) }) +} + +func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, otherAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, otherAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, checkerFactory, false) + + toAddress := gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411") + timeNow := time.Now() + + encodedPayload := []byte{1, 2, 3} + value := big.Int(assets.NewEthValue(142)) + gasLimit := uint32(242) + checker := txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + } + + t.Run("no eth_txes at all", func(t *testing.T) { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + }) + + t.Run("eth_txes exist for a different from address", func(t *testing.T) { + mustCreateUnstartedTx(t, txStore, otherAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + }) + + t.Run("existing eth_txes with broadcast_at or error", func(t *testing.T) { + nonce := evmtypes.Nonce(342) + errStr := "some error" + + etxUnconfirmed := txmgr.Tx{ + Sequence: &nonce, + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + BroadcastAt: &timeNow, + InitialBroadcastAt: &timeNow, + Error: null.String{}, + State: txmgrcommon.TxUnconfirmed, + } + etxWithError := txmgr.Tx{ + Sequence: nil, + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + Error: null.StringFrom(errStr), + State: txmgrcommon.TxFatalError, + } + + require.NoError(t, txStore.InsertTx(&etxUnconfirmed)) + require.NoError(t, txStore.InsertTx(&etxWithError)) + + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + }) + + t.Run("sends 3 EthTxs in order with higher value last, and lower values starting from the earliest", func(t *testing.T) { + // Higher value + expensiveEthTx := txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: []byte{42, 42, 0}, + Value: big.Int(assets.NewEthValue(242)), + FeeLimit: gasLimit, + CreatedAt: time.Unix(0, 0), + State: txmgrcommon.TxUnstarted, + } + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(2) && tx.Value().Cmp(big.NewInt(242)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Earlier + tr := int32(99) + b, err := json.Marshal(txmgr.TxMeta{JobID: &tr}) + require.NoError(t, err) + meta := sqlutil.JSON(b) + earlierEthTx := txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: []byte{42, 42, 0}, + Value: value, + FeeLimit: gasLimit, + CreatedAt: time.Unix(0, 1), + State: txmgrcommon.TxUnstarted, + Meta: &meta, + } + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + if tx.Nonce() != uint64(0) { + return false + } + require.Equal(t, evmcfg.EVM().ChainID(), tx.ChainId()) + require.Equal(t, uint64(gasLimit), tx.Gas()) + require.Equal(t, evmcfg.EVM().GasEstimator().PriceDefault().ToInt(), tx.GasPrice()) + require.Equal(t, toAddress, *tx.To()) + require.Equal(t, value.String(), tx.Value().String()) + require.Equal(t, earlierEthTx.EncodedPayload, tx.Data()) + return true + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Later + laterEthTx := txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: []byte{42, 42, 1}, + Value: value, + FeeLimit: gasLimit, + CreatedAt: time.Unix(1, 0), + State: txmgrcommon.TxUnstarted, + } + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + if tx.Nonce() != uint64(1) { + return false + } + require.Equal(t, evmcfg.EVM().ChainID(), tx.ChainId()) + require.Equal(t, uint64(gasLimit), tx.Gas()) + require.Equal(t, evmcfg.EVM().GasEstimator().PriceDefault().ToInt(), tx.GasPrice()) + require.Equal(t, toAddress, *tx.To()) + require.Equal(t, value.String(), tx.Value().String()) + require.Equal(t, laterEthTx.EncodedPayload, tx.Data()) + return true + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Insertion order deliberately reversed to test ordering + require.NoError(t, txStore.InsertTx(&expensiveEthTx)) + require.NoError(t, txStore.InsertTx(&laterEthTx)) + require.NoError(t, txStore.InsertTx(&earlierEthTx)) + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check earlierEthTx and it's attempt + // This was the earlier one sent so it has the lower nonce + earlierTransaction, err := txStore.FindTxWithAttempts(earlierEthTx.ID) + require.NoError(t, err) + assert.False(t, earlierTransaction.Error.Valid) + require.NotNil(t, earlierTransaction.FromAddress) + assert.Equal(t, fromAddress, earlierTransaction.FromAddress) + require.NotNil(t, earlierTransaction.Sequence) + assert.Equal(t, evmtypes.Nonce(0), *earlierTransaction.Sequence) + assert.NotNil(t, earlierTransaction.BroadcastAt) + assert.NotNil(t, earlierTransaction.InitialBroadcastAt) + assert.Len(t, earlierTransaction.TxAttempts, 1) + var m txmgr.TxMeta + err = json.Unmarshal(*earlierEthTx.Meta, &m) + require.NoError(t, err) + assert.NotNil(t, m.JobID) + assert.Equal(t, tr, *m.JobID) + + attempt := earlierTransaction.TxAttempts[0] + + assert.Equal(t, earlierTransaction.ID, attempt.TxID) + assert.NotNil(t, attempt.TxFee.Legacy) + assert.Nil(t, attempt.TxFee.DynamicTipCap) + assert.Nil(t, attempt.TxFee.DynamicFeeCap) + assert.Equal(t, evmcfg.EVM().GasEstimator().PriceDefault(), attempt.TxFee.Legacy) + + _, err = txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + require.Len(t, attempt.Receipts, 0) + + // Check laterEthTx and it's attempt + // This was the later one sent so it has the higher nonce + laterTransaction, err := txStore.FindTxWithAttempts(laterEthTx.ID) + require.NoError(t, err) + assert.False(t, earlierTransaction.Error.Valid) + require.NotNil(t, laterTransaction.FromAddress) + assert.Equal(t, fromAddress, laterTransaction.FromAddress) + require.NotNil(t, laterTransaction.Sequence) + assert.Equal(t, evmtypes.Nonce(1), *laterTransaction.Sequence) + assert.NotNil(t, laterTransaction.BroadcastAt) + assert.NotNil(t, earlierTransaction.InitialBroadcastAt) + assert.Len(t, laterTransaction.TxAttempts, 1) + + attempt = laterTransaction.TxAttempts[0] + + assert.Equal(t, laterTransaction.ID, attempt.TxID) + assert.Equal(t, evmcfg.EVM().GasEstimator().PriceDefault(), attempt.TxFee.Legacy) + + _, err = txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + require.Len(t, attempt.Receipts, 0) + }) + + rnd := int64(1000000000 + rand.Intn(5000)) + cfg = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.TipCapDefault = assets.NewWeiI(rnd) + c.EVM[0].GasEstimator.FeeCapDefault = assets.NewWeiI(rnd + 1) + c.EVM[0].GasEstimator.PriceMax = assets.NewWeiI(rnd + 2) + }) + evmcfg = evmtest.NewChainScopedConfig(t, cfg) + ethClient.On("PendingNonceAt", mock.Anything, otherAddress).Return(uint64(1), nil).Once() + eb = NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, checkerFactory, false) + + t.Run("sends transactions with type 0x2 in EIP-1559 mode", func(t *testing.T) { + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(343) && tx.Value().Cmp(big.NewInt(242)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, []byte{42, 42, 0}, gasLimit, big.Int(assets.NewEthValue(242)), &cltest.FixtureChainID) + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check eipTxWithAl and it's attempt + // This was the earlier one sent so it has the lower nonce + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.False(t, etx.Error.Valid) + require.NotNil(t, etx.FromAddress) + assert.Equal(t, fromAddress, etx.FromAddress) + require.NotNil(t, etx.Sequence) + assert.Equal(t, evmtypes.Nonce(343), *etx.Sequence) + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + assert.Len(t, etx.TxAttempts, 1) + + attempt := etx.TxAttempts[0] + + assert.Equal(t, etx.ID, attempt.TxID) + assert.Nil(t, attempt.TxFee.Legacy) + assert.Equal(t, rnd, attempt.TxFee.DynamicTipCap.ToInt().Int64()) + assert.Equal(t, rnd+1, attempt.TxFee.DynamicFeeCap.ToInt().Int64()) + + _, err = txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + require.Len(t, attempt.Receipts, 0) + }) + + t.Run("transaction simulation", func(t *testing.T) { + t.Run("when simulation succeeds, sends tx as normal", func(t *testing.T) { + txRequest := txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: []byte{42, 0, 0}, + Value: big.Int(assets.NewEthValue(442)), + FeeLimit: gasLimit, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Checker: txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + }, + } + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(344) && tx.Value().Cmp(big.NewInt(442)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + ethClient.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", mock.MatchedBy(func(callarg map[string]interface{}) bool { + if fmt.Sprintf("%s", callarg["value"]) == "0x1ba" { // 442 + assert.Equal(t, txRequest.FromAddress, callarg["from"]) + assert.Equal(t, &txRequest.ToAddress, callarg["to"]) + assert.Equal(t, hexutil.Uint64(txRequest.FeeLimit), callarg["gas"]) + assert.Nil(t, callarg["gasPrice"]) + assert.Nil(t, callarg["maxFeePerGas"]) + assert.Nil(t, callarg["maxPriorityFeePerGas"]) + assert.Equal(t, (*hexutil.Big)(&txRequest.Value), callarg["value"]) + assert.Equal(t, hexutil.Bytes(txRequest.EncodedPayload), callarg["data"]) + return true + } + return false + }), "latest").Return(nil).Once() + + ethTx := mustCreateUnstartedTxFromEvmTxRequest(t, txStore, txRequest, &cltest.FixtureChainID) + + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check ethtx was sent + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, ethTx.State) + }) + + t.Run("with unknown error, sends tx as normal", func(t *testing.T) { + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(345) && tx.Value().Cmp(big.NewInt(542)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + ethClient.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", mock.MatchedBy(func(callarg map[string]interface{}) bool { + return fmt.Sprintf("%s", callarg["value"]) == "0x21e" // 542 + }), "latest").Return(errors.New("this is not a revert, something unexpected went wrong")).Once() + + ethTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, + txRequestWithChecker(checker), + txRequestWithValue(big.Int(assets.NewEthValue(542)))) + + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, ethTx.State) + }) + + t.Run("on revert, marks tx as fatally errored and does not send", func(t *testing.T) { + jerr := client.JsonError{ + Code: 42, + Message: "oh no, it reverted", + Data: []byte{42, 166, 34}, + } + ethClient.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", mock.MatchedBy(func(callarg map[string]interface{}) bool { + return fmt.Sprintf("%s", callarg["value"]) == "0x282" // 642 + }), "latest").Return(&jerr).Once() + + ethTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, + txRequestWithChecker(checker), + txRequestWithValue(big.Int(assets.NewEthValue(642)))) + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxFatalError, ethTx.State) + assert.True(t, ethTx.Error.Valid) + assert.Equal(t, "transaction reverted during simulation: json-rpc error { Code = 42, Message = 'oh no, it reverted', Data = 'KqYi' }", ethTx.Error.String) + }) + }) +} + +func TestEthBroadcaster_TransmitChecking(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &testCheckerFactory{} + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, checkerFactory, false) + + checker := txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + } + t.Run("when transmit checking times out, sends tx as normal", func(t *testing.T) { + // Checker will return a canceled error + checkerFactory.err = context.Canceled + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == 0 && tx.Value().Cmp(big.NewInt(442)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + ethTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, + txRequestWithValue(big.Int(assets.NewEthValue(442))), + txRequestWithChecker(checker)) + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check ethtx was sent + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, ethTx.State) + }) + + t.Run("when transmit checking succeeds, sends tx as normal", func(t *testing.T) { + // Checker will return no error + checkerFactory.err = nil + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == 1 && tx.Value().Cmp(big.NewInt(442)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + ethTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, + txRequestWithValue(big.Int(assets.NewEthValue(442))), + txRequestWithChecker(checker)) + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check ethtx was sent + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, ethTx.State) + }) + + t.Run("when transmit errors, fatally error transaction", func(t *testing.T) { + // Checker will return a fatal error + checkerFactory.err = errors.New("fatal checker error") + + ethTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, txRequestWithChecker(checker)) + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check ethtx was sent + ethTx, err := txStore.FindTxWithAttempts(ethTx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxFatalError, ethTx.State) + assert.True(t, ethTx.Error.Valid) + assert.Equal(t, "fatal checker error", ethTx.Error.String) + }) +} + +func TestEthBroadcaster_ProcessUnstartedEthTxs_OptimisticLockingOnEthTx(t *testing.T) { + // non-transactional DB needed because we deliberately test for FK violation + cfg, db := heavyweight.FullTestDBV2(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ccfg := evmtest.NewChainScopedConfig(t, cfg) + evmcfg := txmgr.NewEvmTxmConfig(ccfg.EVM()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + estimator := gasmocks.NewEvmFeeEstimator(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ccfg.EVM().GasEstimator(), ethKeyStore, estimator) + + chStartEstimate := make(chan struct{}) + chBlock := make(chan struct{}) + + estimator.On("GetFee", mock.Anything, mock.Anything, mock.Anything, ccfg.EVM().GasEstimator().PriceMaxKey(fromAddress)).Return(gas.EvmFee{Legacy: assets.GWei(32)}, uint32(500), nil).Run(func(_ mock.Arguments) { + close(chStartEstimate) + <-chBlock + }).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil) + eb := txmgr.NewEvmBroadcaster( + txStore, + txmgr.NewEvmTxmClient(ethClient), + evmcfg, + txmgr.NewEvmTxmFeeConfig(ccfg.EVM().GasEstimator()), + ccfg.EVM().Transactions(), + cfg.Database().Listener(), + ethKeyStore, + txBuilder, + nil, + logger.Test(t), + &testCheckerFactory{}, + false, + ) + eb.XXXTestDisableUnstartedTxAutoProcessing() + + // Start instance of broadcaster + servicetest.Run(t, eb) + + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + + go func() { + select { + case <-chStartEstimate: + case <-time.After(5 * time.Second): + t.Log("timed out waiting for estimator to be called") + return + } + + // Simulate a "PruneQueue" call + assert.NoError(t, commonutils.JustError(db.Exec(`DELETE FROM evm.txes WHERE state = 'unstarted'`))) + close(chBlock) + }() + + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } +} + +func TestEthBroadcaster_ProcessUnstartedEthTxs_Success_WithMultiplier(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + // Configured gas price changed + lm := decimal.RequireFromString("1.3") + c.EVM[0].GasEstimator.LimitMultiplier = &lm + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + assert.Equal(t, int(1600), int(tx.Gas())) + return true + }), fromAddress).Return(commonclient.Successful, nil).Once() + + txRequest := txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411"), + EncodedPayload: []byte{42, 42, 0}, + Value: big.Int(assets.NewEthValue(242)), + FeeLimit: 1231, + Strategy: txmgrcommon.NewSendEveryStrategy(), + } + mustCreateUnstartedTxFromEvmTxRequest(t, txStore, txRequest, &cltest.FixtureChainID) + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } +} + +func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) { + toAddress := gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411") + value := big.Int(assets.NewEthValue(142)) + gasLimit := uint32(242) + encodedPayload := []byte{0, 1} + nextNonce := evmtypes.Nonce(916714082576372851) + firstNonce := nextNonce + secondNonce := nextNonce + 1 + cfg := configtest.NewGeneralConfig(t, nil) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + t.Run("cannot be more than one transaction per address in an unfinished state", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + firstInProgress := txmgr.Tx{ + FromAddress: fromAddress, + Sequence: &firstNonce, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + Error: null.String{}, + State: txmgrcommon.TxInProgress, + } + + secondInProgress := txmgr.Tx{ + FromAddress: fromAddress, + Sequence: &secondNonce, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + Error: null.String{}, + State: txmgrcommon.TxInProgress, + } + + require.NoError(t, txStore.InsertTx(&firstInProgress)) + err := txStore.InsertTx(&secondInProgress) + require.Error(t, err) + assert.Contains(t, err.Error(), "ERROR: duplicate key value violates unique constraint \"idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id\" (SQLSTATE 23505)") + }) + + t.Run("previous run assigned nonce but never broadcast", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved + // the nonce to the eth_tx so evm.key_states.next_nonce has not been + // incremented yet + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(firstNonce) + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[0].State) + }) + + t.Run("previous run assigned nonce and broadcast but it fatally errored before we could save", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved the nonce to the eth_tx + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(firstNonce) + }), fromAddress).Return(commonclient.Fatal, errors.New("exceeds block gas limit")).Once() + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + assert.True(t, etx.Error.Valid) + assert.Equal(t, "exceeds block gas limit", etx.Error.String) + assert.Len(t, etx.TxAttempts, 0) + }) + + t.Run("previous run assigned nonce and broadcast and is now in mempool", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved the nonce to the eth_tx + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(firstNonce) + }), fromAddress).Return(commonclient.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once() + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + }) + + t.Run("previous run assigned nonce and broadcast and now the transaction has been confirmed", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved the nonce to the eth_tx + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(firstNonce) + }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once() + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + require.NotNil(t, etx.BroadcastAt) + assert.Equal(t, *etx.BroadcastAt, etx.CreatedAt) + assert.NotNil(t, etx.InitialBroadcastAt) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + }) + + t.Run("previous run assigned nonce and then failed to reach node for some reason and node is still down", func(t *testing.T) { + failedToReachNodeError := context.DeadlineExceeded + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved the nonce to the eth_tx + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(firstNonce) + }), fromAddress).Return(commonclient.Retryable, failedToReachNodeError).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + require.Error(t, err) + assert.Contains(t, err.Error(), failedToReachNodeError.Error()) + assert.True(t, retryable) + + // Check it was left in the unfinished state + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + assert.Equal(t, nextNonce, *etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + }) + + t.Run("previous run assigned nonce and broadcast transaction then crashed and rebooted with a different configured gas price", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Nonce: nextNonce.Int64()}.MustInsertWithState(t, ethKeyStore) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + // Configured gas price changed + c.EVM[0].GasEstimator.PriceDefault = assets.NewWeiI(500000000000) + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + // Crashed right after we commit the database transaction that saved the nonce to the eth_tx + inProgressEthTx := mustInsertInProgressEthTxWithAttempt(t, txStore, firstNonce, fromAddress) + require.Len(t, inProgressEthTx.TxAttempts, 1) + attempt := inProgressEthTx.TxAttempts[0] + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + // Ensure that the gas price is the same as the original attempt + s, e := txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, e) + return tx.Nonce() == uint64(firstNonce) && tx.GasPrice().Int64() == s.GasPrice().Int64() + }), fromAddress).Return(commonclient.Successful, errors.New("known transaction: a1313bd99a81fb4d8ad1d2e90b67c6b3fa77545c990d6251444b83b70b6f8980")).Once() + + // Do the thing + { + retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(inProgressEthTx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + s, err := txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + assert.Equal(t, int64(342), s.GasPrice().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) +} + +func getLocalNextNonce(t *testing.T, eb *txmgr.Broadcaster, fromAddress gethCommon.Address) uint64 { + n, err := eb.GetNextSequence(testutils.Context(t), fromAddress) + require.NoError(t, err) + require.NotNil(t, n) + return uint64(n) +} + +// Note that all of these tests share the same database, and ordering matters. +// This in order to more deeply test ProcessUnstartedEthTxs over +// multiple runs with previous errors in the database. +func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { + toAddress := gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411") + value := big.Int(assets.NewEthValue(142)) + gasLimit := uint32(242) + encodedPayload := []byte{0, 1} + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false) + ctx := testutils.Context(t) + + require.NoError(t, commonutils.JustError(db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`))) + + t.Run("if external wallet sent a transaction from the account and now the nonce is one higher than it should be and we got replacement underpriced then we assume a previous transaction of ours was the one that succeeded, and hand off to EthConfirmer", func(t *testing.T) { + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + // First send, replacement underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == uint64(0) + }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check that the transaction was saved correctly with its attempt + // We assume success and hand off to eth confirmer to eventually mark it as failed + var latestID int64 + var etx1 txmgr.Tx + require.NoError(t, db.Get(&latestID, "SELECT max(id) FROM evm.txes")) + etx1, err = txStore.FindTxWithAttempts(latestID) + require.NoError(t, err) + require.NotNil(t, etx1.BroadcastAt) + assert.NotEqual(t, etx1.CreatedAt, *etx1.BroadcastAt) + assert.NotNil(t, etx1.InitialBroadcastAt) + require.NotNil(t, etx1.Sequence) + assert.Equal(t, evmtypes.Nonce(0), *etx1.Sequence) + assert.False(t, etx1.Error.Valid) + assert.Len(t, etx1.TxAttempts, 1) + + // Check that the local nonce was incremented by one + finalNextNonce := getLocalNextNonce(t, eb, fromAddress) + require.NoError(t, err) + require.NotNil(t, finalNextNonce) + require.Equal(t, int64(1), int64(finalNextNonce)) + }) + + t.Run("geth Client returns an error in the fatal errors category", func(t *testing.T) { + fatalErrorExample := "exceeds block gas limit" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + + t.Run("without callback", func(t *testing.T) { + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once() + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + require.Nil(t, etx.Sequence) + assert.True(t, etx.Error.Valid) + assert.Contains(t, etx.Error.String, "exceeds block gas limit") + assert.Len(t, etx.TxAttempts, 0) + + // Check that the key had its nonce reset + var nonce evmtypes.Nonce + nonce, err = eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + // Saved NextNonce must be the same as before because this transaction + // was not accepted by the eth node and never can be + require.Equal(t, int64(localNextNonce), int64(nonce)) + + }) + + t.Run("with callback", func(t *testing.T) { + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + etx := txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + State: txmgrcommon.TxUnstarted, + PipelineTaskRunID: uuid.NullUUID{UUID: tr.ID, Valid: true}, + SignalCallback: true, + } + + t.Run("with erroring callback bails out", func(t *testing.T) { + require.NoError(t, txStore.InsertTx(&etx)) + fn := func(id uuid.UUID, result interface{}, err error) error { + return errors.New("something exploded in the callback") + } + + eb.SetResumeCallback(fn) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once() + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), "something exploded in the callback") + assert.True(t, retryable) + }) + + t.Run("calls resume with error", func(t *testing.T) { + fn := func(id uuid.UUID, result interface{}, err error) error { + require.Equal(t, id, tr.ID) + require.Nil(t, result) + require.Error(t, err) + require.Contains(t, err.Error(), "fatal error while sending transaction: exceeds block gas limit") + return nil + } + + eb.SetResumeCallback(fn) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Fatal, errors.New(fatalErrorExample)).Once() + + { + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + } + + // same as the parent test, but callback is set by ctor + t.Run("callback set by ctor", func(t *testing.T) { + lggr := logger.Test(t) + estimator := gas.NewWrappedEvmEstimator(lggr, func(lggr logger.Logger) gas.EvmEstimator { + return gas.NewFixedPriceEstimator(evmcfg.EVM().GasEstimator(), evmcfg.EVM().GasEstimator().BlockHistory(), lggr) + }, evmcfg.EVM().GasEstimator().EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator) + localNextNonce = getLocalNextNonce(t, eb, fromAddress) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once() + eb2 := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), evmcfg.Database().Listener(), ethKeyStore, txBuilder, nil, lggr, &testCheckerFactory{}, false) + retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + }) + }) + }) + }) + + eb.SetResumeCallback(nil) + + t.Run("geth Client fails with error indicating that the transaction was too expensive", func(t *testing.T) { + TxFeeExceedsCapError := "tx fee (1.10 ether) exceeds the configured cap (1.00 ether)" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.ExceedsMaxFee, errors.New(TxFeeExceedsCapError)).Twice() + // In the first case, the tx was NOT accepted into the mempool. In the case + // of multiple RPC nodes, it is possible that it can be accepted by + // another node even if the primary one returns "exceeds the configured + // cap" + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + assert.Contains(t, err.Error(), "tx fee (1.10 ether) exceeds the configured cap (1.00 ether)") + assert.Contains(t, err.Error(), "error while sending transaction") + assert.True(t, retryable) + + // Check it was saved with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) // Note that InitialBroadcastAt really means "InitialDefinitelySuccessfulBroadcastAt" + assert.Equal(t, evmtypes.Nonce(localNextNonce), *etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + + // Check that the key had its nonce reset + var nonce evmtypes.Nonce + nonce, err = eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + // Saved NextNonce must be the same as before because this transaction + // was not accepted by the eth node and never can be + require.Equal(t, int64(localNextNonce), int64(nonce)) + + // On the second try, the tx has been accepted into the mempool + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce+1, nil).Once() + + retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) // Note that InitialBroadcastAt really means "InitialDefinitelySuccessfulBroadcastAt" + assert.Equal(t, evmtypes.Nonce(localNextNonce), *etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("eth Client call fails with an unexpected random error, and transaction was not accepted into mempool", func(t *testing.T) { + retryableErrorExample := "some unknown error" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once() + // Nonce is the same as localNextNonce, implying that this sent transaction has not been accepted + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), retryableErrorExample) + assert.True(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + + // Now on the second run, it is successful + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Successful, nil).Once() + + retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("eth client call fails with an unexpected random error, and the nonce check also subsequently fails", func(t *testing.T) { + retryableErrorExample := "some unknown error" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), errors.New("pending nonce fetch failed")).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), retryableErrorExample) + require.Contains(t, err.Error(), "pending nonce fetch failed") + assert.True(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + + // Now on the second run, it is successful + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Successful, nil).Once() + + retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("eth Client call fails with an unexpected random error, and transaction was accepted into mempool", func(t *testing.T) { + retryableErrorExample := "some strange RPC returns an unexpected thing" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Unknown, errors.New(retryableErrorExample)).Once() + // Nonce is one higher than localNextNonce, implying that despite the error, this sent transaction has been accepted into the mempool + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce+1, nil).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt, in a broadcast state + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("eth node returns underpriced transaction", func(t *testing.T) { + // This happens if a transaction's gas price is below the minimum + // configured for the transaction pool. + // This is a configuration error by the node operator, since it means they set the base gas level too low. + underpricedError := "transaction underpriced" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + + // First was underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(evmcfg.EVM().GasEstimator().PriceDefault().ToInt()) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + + // Second with gas bump was still underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(big.NewInt(25000000000)) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + + // Third succeeded + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(big.NewInt(30000000000)) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, "30 gwei", attempt.TxFee.Legacy.String()) + }) + + etxUnfinished := txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + State: txmgrcommon.TxUnstarted, + } + require.NoError(t, txStore.InsertTx(&etxUnfinished)) + + t.Run("failed to reach node for some reason", func(t *testing.T) { + failedToReachNodeError := context.DeadlineExceeded + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Retryable, failedToReachNodeError).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") + assert.True(t, retryable) + + // Check it was left in the unfinished state + etx, err := txStore.FindTxWithAttempts(etxUnfinished.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + assert.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + assert.Len(t, etx.TxAttempts, 1) + assert.Equal(t, txmgrtypes.TxAttemptInProgress, etx.TxAttempts[0].State) + }) + + t.Run("eth node returns temporarily underpriced transaction", func(t *testing.T) { + // This happens if parity is rejecting transactions that are not priced high enough to even get into the mempool at all + // It should pretend it was accepted into the mempool and hand off to ethConfirmer to bump gas as normal + temporarilyUnderpricedError := "There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee." + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + + // Re-use the previously unfinished transaction, no need to insert new + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Successful, errors.New(temporarilyUnderpricedError)).Once() + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + assert.NoError(t, err) + assert.False(t, retryable) + + // Check it was saved correctly with its attempt + etx, err := txStore.FindTxWithAttempts(etxUnfinished.ID) + require.NoError(t, err) + + assert.NotNil(t, etx.BroadcastAt) + assert.NotNil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, "20 gwei", attempt.TxFee.Legacy.String()) + }) + + t.Run("eth node returns underpriced transaction and bumping gas doesn't increase it", func(t *testing.T) { + // This happens if a transaction's gas price is below the minimum + // configured for the transaction pool. + // This is a configuration error by the node operator, since it means they set the base gas level too low. + underpricedError := "transaction underpriced" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + // In this scenario the node operator REALLY fucked up and set the bump + // to zero (even though that should not be possible due to config + // validation) + evmcfg2 := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.BumpMin = assets.NewWeiI(0) + c.EVM[0].GasEstimator.BumpPercent = ptr[uint16](0) + })) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once() + eb2 := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg2, &testCheckerFactory{}, false) + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + + // First was underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasPrice().Cmp(evmcfg2.EVM().GasEstimator().PriceDefault().ToInt()) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + + // Do the thing + retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped fee price of 20 gwei is equal to original fee price of 20 gwei. ACTION REQUIRED: This is a configuration error, you must increase either FeeEstimator.BumpPercent or FeeEstimator.BumpMin") + assert.True(t, retryable) + + // TEARDOWN: Clear out the unsent tx before the next test + pgtest.MustExec(t, db, `DELETE FROM evm.txes WHERE nonce = $1`, localNextNonce) + }) + + t.Run("eth tx is left in progress if eth node returns insufficient eth", func(t *testing.T) { + insufficientEthError := "insufficient funds for transfer" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.InsufficientFunds, errors.New(insufficientEthError)).Once() + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + assert.Contains(t, err.Error(), "insufficient funds for transfer") + assert.True(t, retryable) + + // Check it was saved correctly with its attempt + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + require.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + assert.Nil(t, attempt.BroadcastBeforeBlockNum) + }) + + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + + t.Run("eth tx is left in progress if nonce is too high", func(t *testing.T) { + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + nonceGapError := "NonceGap, Future nonce. Expected nonce: " + strconv.FormatUint(localNextNonce, 10) + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce + }), fromAddress).Return(commonclient.Retryable, errors.New(nonceGapError)).Once() + + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + assert.Contains(t, err.Error(), nonceGapError) + assert.True(t, retryable) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Nil(t, etx.BroadcastAt) + assert.Nil(t, etx.InitialBroadcastAt) + require.NotNil(t, etx.Sequence) + assert.False(t, etx.Error.Valid) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + require.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + assert.Nil(t, attempt.BroadcastBeforeBlockNum) + + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + }) + + t.Run("eth node returns underpriced transaction and bumping gas doesn't increase it in EIP-1559 mode", func(t *testing.T) { + // This happens if a transaction's gas price is below the minimum + // configured for the transaction pool. + // This is a configuration error by the node operator, since it means they set the base gas level too low. + + // In this scenario the node operator REALLY fucked up and set the bump + // to zero (even though that should not be possible due to config + // validation) + evmcfg2 := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.BumpMin = assets.NewWeiI(0) + c.EVM[0].GasEstimator.BumpPercent = ptr[uint16](0) + })) + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once() + eb2 := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg2, &testCheckerFactory{}, false) + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + underpricedError := "transaction underpriced" + localNextNonce = getLocalNextNonce(t, eb, fromAddress) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(1)) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + + // Check gas tip cap verification + retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), "bumped gas tip cap of 1 wei is less than or equal to original gas tip cap of 1 wei") + assert.True(t, retryable) + + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + }) + + t.Run("eth node returns underpriced transaction in EIP-1559 mode, bumps until inclusion", func(t *testing.T) { + // This happens if a transaction's gas price is below the minimum + // configured for the transaction pool. + // This is a configuration error by the node operator, since it means they set the base gas level too low. + underpricedError := "transaction underpriced" + localNextNonce := getLocalNextNonce(t, eb, fromAddress) + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + + // Check gas tip cap verification + evmcfg2 := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.TipCapDefault = assets.NewWeiI(0) + })) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once() + eb2 := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg2, &testCheckerFactory{}, false) + + retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), "specified gas tip cap of 0 is below min configured gas tip of 1 wei for key") + assert.True(t, retryable) + + gasTipCapDefault := assets.NewWeiI(42) + + evmcfg2 = evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.TipCapDefault = gasTipCapDefault + })) + localNextNonce = getLocalNextNonce(t, eb, fromAddress) + eb2 = NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg2, &testCheckerFactory{}, false) + + // Second was underpriced but above minimum + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(gasTipCapDefault.ToInt()) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + // Resend at the bumped price + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(0).Add(gasTipCapDefault.ToInt(), evmcfg2.EVM().GasEstimator().BumpMin().ToInt())) == 0 + }), fromAddress).Return(commonclient.Underpriced, errors.New(underpricedError)).Once() + // Final bump succeeds + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(0).Add(gasTipCapDefault.ToInt(), big.NewInt(0).Mul(evmcfg2.EVM().GasEstimator().BumpMin().ToInt(), big.NewInt(2)))) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + retryable, err = eb2.ProcessUnstartedTxs(ctx, fromAddress) + require.NoError(t, err) + assert.False(t, retryable) + + // TEARDOWN: Clear out the unsent tx before the next test + pgtest.MustExec(t, db, `DELETE FROM evm.txes WHERE nonce = $1`, localNextNonce) + }) + +} + +func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) { + toAddress := gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411") + value := big.Int(assets.NewEthValue(142)) + gasLimit := uint32(242) + encodedPayload := []byte{0, 1} + localNonce := 0 + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + realKeystore := cltest.NewKeyStore(t, db, cfg.Database()) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, realKeystore.Eth()) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, kst, evmcfg, &testCheckerFactory{}, false) + ctx := testutils.Context(t) + _, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + + t.Run("tx signing fails", func(t *testing.T) { + etx := mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, gasLimit, value, &cltest.FixtureChainID) + tx := *gethTypes.NewTx(&gethTypes.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.AnythingOfType("*types.Transaction"), + mock.MatchedBy(func(chainID *big.Int) bool { + return chainID.Cmp(evmcfg.EVM().ChainID()) == 0 + })).Return(&tx, errors.New("could not sign transaction")) + + // Do the thing + retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress) + require.Error(t, err) + require.Contains(t, err.Error(), "could not sign transaction") + assert.True(t, retryable) + + // Check that the transaction is left in unstarted state + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnstarted, etx.State) + assert.Len(t, etx.TxAttempts, 0) + + // Check that the key did not have its nonce incremented + var nonce types.Nonce + nonce, err = eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + require.Equal(t, int64(localNonce), int64(nonce)) + }) +} + +func TestEthBroadcaster_GetNextNonce(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + fromAddress := testutils.NewAddress() + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, kst, evmcfg, &testCheckerFactory{}, false) + nonce := getLocalNextNonce(t, eb, fromAddress) + require.NotNil(t, nonce) + assert.Equal(t, int64(0), int64(nonce)) +} + +func TestEthBroadcaster_IncrementNextNonce(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + kst := ksmocks.NewEth(t) + fromAddress := testutils.NewAddress() + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, kst, evmcfg, &testCheckerFactory{}, false) + + ctx := testutils.Context(t) + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + eb.IncrementNextSequence(fromAddress, nonce) + + // Nonce bumped to 1 + nonce, err = eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + require.Equal(t, int64(1), int64(nonce)) +} + +func TestEthBroadcaster_Trigger(t *testing.T) { + t.Parallel() + + // Simple sanity check to make sure it doesn't block + db := pgtest.NewSqlxDB(t) + + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + eb := NewTestEthBroadcaster(t, txStore, evmtest.NewEthClientMockWithDefaultChain(t), ethKeyStore, evmcfg, &testCheckerFactory{}, false) + + eb.Trigger(testutils.NewAddress()) + eb.Trigger(testutils.NewAddress()) + eb.Trigger(testutils.NewAddress()) +} + +func TestEthBroadcaster_SyncNonce(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ctx := testutils.Context(t) + + lggr, observed := logger.TestObserved(t, zapcore.DebugLevel) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(true) + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + evmTxmCfg := txmgr.NewEvmTxmConfig(evmcfg.EVM()) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + kst := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.RandomKey{Disabled: false}.MustInsertWithState(t, kst) + _, disabledAddress := cltest.RandomKey{Disabled: true}.MustInsertWithState(t, kst) + + ethNodeNonce := uint64(22) + + estimator := gas.NewWrappedEvmEstimator(lggr, func(lggr logger.Logger) gas.EvmEstimator { + return gas.NewFixedPriceEstimator(evmcfg.EVM().GasEstimator(), evmcfg.EVM().GasEstimator().BlockHistory(), lggr) + }, evmcfg.EVM().GasEstimator().EIP1559DynamicFees(), nil) + checkerFactory := &testCheckerFactory{} + + ge := evmcfg.EVM().GasEstimator() + + t.Run("does nothing if nonce sync is disabled", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, kst, estimator) + + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, nil, lggr, checkerFactory, false) + err := eb.Start(ctx) + assert.NoError(t, err) + + defer func() { assert.NoError(t, eb.Close()) }() + + testutils.WaitForLogMessage(t, observed, "Skipping sequence auto-sync") + }) + + t.Run("when nonce syncer returns new nonce, successfully sets nonce", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, kst, estimator) + + txNonceSyncer := txmgr.NewNonceSyncer(txStore, lggr, ethClient) + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, txNonceSyncer, lggr, checkerFactory, true) + + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(ethNodeNonce, nil).Once() + servicetest.Run(t, eb) + + testutils.WaitForLogMessage(t, observed, "Fast-forward sequence") + + // Check nextSequenceMap to make sure it has correct nonce assigned + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + require.Equal(t, strconv.FormatUint(ethNodeNonce, 10), nonce.String()) + + // The disabled key did not get updated + _, err = eb.GetNextSequence(ctx, disabledAddress) + require.Error(t, err) + }) + + ethNodeNonce++ + observed.TakeAll() + + t.Run("when nonce syncer returns error, retries and successfully sets nonce", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, kst, estimator) + txNonceSyncer := txmgr.NewNonceSyncer(txStore, lggr, ethClient) + + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + + eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, txBuilder, txNonceSyncer, lggr, checkerFactory, true) + eb.XXXTestDisableUnstartedTxAutoProcessing() + + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), errors.New("something exploded")).Once() + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(ethNodeNonce, nil) + + servicetest.Run(t, eb) + + testutils.WaitForLogMessage(t, observed, "Fast-forward sequence") + + // Check keyState to make sure it has correct nonce assigned + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + assert.Equal(t, int64(ethNodeNonce), int64(nonce)) + + // The disabled key did not get updated + _, err = eb.GetNextSequence(ctx, disabledAddress) + require.Error(t, err) + }) +} + +func Test_LoadSequenceMap(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + t.Run("set next nonce using entries from tx table", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + _, fromAddress := cltest.MustInsertRandomKey(t, ks) + cltest.MustInsertUnconfirmedEthTx(t, txStore, int64(0), fromAddress) + cltest.MustInsertUnconfirmedEthTx(t, txStore, int64(1), fromAddress) + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + require.Equal(t, int64(2), int64(nonce)) + }) + + t.Run("set next nonce using client when not found in tx table", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + _, fromAddress := cltest.MustInsertRandomKey(t, ks) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(10), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + require.Equal(t, int64(10), int64(nonce)) + }) +} + +func Test_NextNonce(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + randNonce := testutils.NewRandomPositiveInt64() + _, addr1 := cltest.MustInsertRandomKey(t, ks) + ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + ctx := testutils.Context(t) + cltest.MustInsertRandomKey(t, ks, *ubig.New(testutils.FixtureChainID)) + + nonce, err := eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + require.Equal(t, randNonce, int64(nonce)) + + randAddr1 := utils.RandomAddress() + _, err = eb.GetNextSequence(ctx, randAddr1) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr1.Hex())) + + randAddr2 := utils.RandomAddress() + _, err = eb.GetNextSequence(ctx, randAddr2) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr2.Hex())) + +} + +func Test_SetNonceAfterInit(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + randNonce := testutils.NewRandomPositiveInt64() + _, addr1 := cltest.MustInsertRandomKey(t, ks) + ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(0), errors.New("failed to retrieve nonce at startup")).Once() + ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + + ctx := testutils.Context(t) + nonce, err := eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + require.Equal(t, randNonce, int64(nonce)) + + // Test that the new nonce is set in the map and does not need a client call to retrieve on subsequent calls + nonce, err = eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + require.Equal(t, randNonce, int64(nonce)) +} + +func Test_IncrementNextNonce(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + randNonce := testutils.NewRandomPositiveInt64() + _, addr1 := cltest.MustInsertRandomKey(t, ks) + ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + + ctx := testutils.Context(t) + nonce, err := eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + eb.IncrementNextSequence(addr1, nonce) + + nonce, err = eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + assert.Equal(t, randNonce+1, int64(nonce)) + + eb.IncrementNextSequence(addr1, nonce) + nonce, err = eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + assert.Equal(t, randNonce+2, int64(nonce)) + + randAddr1 := utils.RandomAddress() + _, err = eb.GetNextSequence(ctx, randAddr1) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr1.Hex())) + + // verify it didnt get changed by any erroring calls + nonce, err = eb.GetNextSequence(ctx, addr1) + require.NoError(t, err) + assert.Equal(t, randNonce+2, int64(nonce)) +} + +func Test_SetNextNonce(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + checkerFactory := &txmgr.CheckerFactory{Client: ethClient} + _, fromAddress := cltest.MustInsertRandomKey(t, ks) + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false) + ctx := testutils.Context(t) + + t.Run("update next nonce", func(t *testing.T) { + nonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + assert.Equal(t, int64(0), int64(nonce)) + eb.SetNextSequence(fromAddress, evmtypes.Nonce(24)) + + newNextNonce, err := eb.GetNextSequence(ctx, fromAddress) + require.NoError(t, err) + assert.Equal(t, int64(24), int64(newNextNonce)) + }) +} + +type testCheckerFactory struct { + err error +} + +func (t *testCheckerFactory) BuildChecker(spec txmgr.TransmitCheckerSpec) (txmgr.TransmitChecker, error) { + return &testChecker{t.err}, nil +} + +type testChecker struct { + err error +} + +func (t *testChecker) Check( + _ context.Context, + _ logger.SugaredLogger, + _ txmgr.Tx, + _ txmgr.TxAttempt, +) error { + return t.err +} diff --git a/core/chains/evm/txmgr/builder.go b/core/chains/evm/txmgr/builder.go new file mode 100644 index 00000000..b6f4b09e --- /dev/null +++ b/core/chains/evm/txmgr/builder.go @@ -0,0 +1,145 @@ +package txmgr + +import ( + "math/big" + "time" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +// NewTxm constructs the necessary dependencies for the EvmTxm (broadcaster, confirmer, etc) and returns a new EvmTxManager +func NewTxm( + db *sqlx.DB, + chainConfig ChainConfig, + fCfg FeeConfig, + txConfig config.Transactions, + dbConfig DatabaseConfig, + listenerConfig ListenerConfig, + client evmclient.Client, + lggr logger.Logger, + logPoller logpoller.LogPoller, + keyStore keystore.Eth, + estimator gas.EvmFeeEstimator, +) (txm TxManager, + err error, +) { + var fwdMgr FwdMgr + + if txConfig.ForwardersEnabled() { + fwdMgr = forwarders.NewFwdMgr(db, client, logPoller, lggr, chainConfig, dbConfig) + } else { + lggr.Info("EvmForwarderManager: Disabled") + } + checker := &CheckerFactory{Client: client} + // create tx attempt builder + txAttemptBuilder := NewEvmTxAttemptBuilder(*client.ConfiguredChainID(), fCfg, keyStore, estimator) + txStore := NewTxStore(db, lggr, dbConfig) + txNonceSyncer := NewNonceSyncer(txStore, lggr, client) + + txmCfg := NewEvmTxmConfig(chainConfig) // wrap Evm specific config + feeCfg := NewEvmTxmFeeConfig(fCfg) // wrap Evm specific config + txmClient := NewEvmTxmClient(client) // wrap Evm specific client + chainID := txmClient.ConfiguredChainID() + evmBroadcaster := NewEvmBroadcaster(txStore, txmClient, txmCfg, feeCfg, txConfig, listenerConfig, keyStore, txAttemptBuilder, txNonceSyncer, lggr, checker, chainConfig.NonceAutoSync()) + evmTracker := NewEvmTracker(txStore, keyStore, chainID, lggr) + evmConfirmer := NewEvmConfirmer(txStore, txmClient, txmCfg, feeCfg, txConfig, dbConfig, keyStore, txAttemptBuilder, lggr) + var evmResender *Resender + if txConfig.ResendAfterThreshold() > 0 { + evmResender = NewEvmResender(lggr, txStore, txmClient, evmTracker, keyStore, txmgr.DefaultResenderPollInterval, chainConfig, txConfig) + } + txm = NewEvmTxm(chainID, txmCfg, txConfig, keyStore, lggr, checker, fwdMgr, txAttemptBuilder, txStore, txNonceSyncer, evmBroadcaster, evmConfirmer, evmResender, evmTracker) + return txm, nil +} + +// NewEvmTxm creates a new concrete EvmTxm +func NewEvmTxm( + chainId *big.Int, + cfg txmgrtypes.TransactionManagerChainConfig, + txCfg txmgrtypes.TransactionManagerTransactionsConfig, + keyStore KeyStore, + lggr logger.Logger, + checkerFactory TransmitCheckerFactory, + fwdMgr FwdMgr, + txAttemptBuilder TxAttemptBuilder, + txStore TxStore, + nonceSyncer NonceSyncer, + broadcaster *Broadcaster, + confirmer *Confirmer, + resender *Resender, + tracker *Tracker, +) *Txm { + return txmgr.NewTxm(chainId, cfg, txCfg, keyStore, lggr, checkerFactory, fwdMgr, txAttemptBuilder, txStore, nonceSyncer, broadcaster, confirmer, resender, tracker) +} + +// NewEvmResender creates a new concrete EvmResender +func NewEvmResender( + lggr logger.Logger, + txStore TransactionStore, + client TransactionClient, + tracker *Tracker, + ks KeyStore, + pollInterval time.Duration, + config EvmResenderConfig, + txConfig txmgrtypes.ResenderTransactionsConfig, +) *Resender { + return txmgr.NewResender(lggr, txStore, client, tracker, ks, pollInterval, config, txConfig) +} + +// NewEvmReaper instantiates a new EVM-specific reaper object +func NewEvmReaper(lggr logger.Logger, store txmgrtypes.TxHistoryReaper[*big.Int], config EvmReaperConfig, txConfig txmgrtypes.ReaperTransactionsConfig, chainID *big.Int) *Reaper { + return txmgr.NewReaper(lggr, store, config, txConfig, chainID) +} + +// NewEvmConfirmer instantiates a new EVM confirmer +func NewEvmConfirmer( + txStore TxStore, + client TxmClient, + chainConfig txmgrtypes.ConfirmerChainConfig, + feeConfig txmgrtypes.ConfirmerFeeConfig, + txConfig txmgrtypes.ConfirmerTransactionsConfig, + dbConfig txmgrtypes.ConfirmerDatabaseConfig, + keystore KeyStore, + txAttemptBuilder TxAttemptBuilder, + lggr logger.Logger, +) *Confirmer { + return txmgr.NewConfirmer(txStore, client, chainConfig, feeConfig, txConfig, dbConfig, keystore, txAttemptBuilder, lggr, func(r *evmtypes.Receipt) bool { return r == nil }) +} + +// NewEvmTracker instantiates a new EVM tracker for abandoned transactions +func NewEvmTracker( + txStore TxStore, + keyStore KeyStore, + chainID *big.Int, + lggr logger.Logger, +) *Tracker { + return txmgr.NewTracker(txStore, keyStore, chainID, lggr) +} + +// NewEvmBroadcaster returns a new concrete EvmBroadcaster +func NewEvmBroadcaster( + txStore TransactionStore, + client TransactionClient, + chainConfig txmgrtypes.BroadcasterChainConfig, + feeConfig txmgrtypes.BroadcasterFeeConfig, + txConfig txmgrtypes.BroadcasterTransactionsConfig, + listenerConfig txmgrtypes.BroadcasterListenerConfig, + keystore KeyStore, + txAttemptBuilder TxAttemptBuilder, + nonceSyncer NonceSyncer, + logger logger.Logger, + checkerFactory TransmitCheckerFactory, + autoSyncNonce bool, +) *Broadcaster { + return txmgr.NewBroadcaster(txStore, client, chainConfig, feeConfig, txConfig, listenerConfig, keystore, txAttemptBuilder, nonceSyncer, logger, checkerFactory, autoSyncNonce, evmtypes.GenerateNextNonce) +} diff --git a/core/chains/evm/txmgr/client.go b/core/chains/evm/txmgr/client.go new file mode 100644 index 00000000..f8c32d32 --- /dev/null +++ b/core/chains/evm/txmgr/client.go @@ -0,0 +1,183 @@ +package txmgr + +import ( + "context" + "errors" + "fmt" + "math" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var _ TxmClient = (*evmTxmClient)(nil) + +type evmTxmClient struct { + client client.Client +} + +func NewEvmTxmClient(c client.Client) *evmTxmClient { + return &evmTxmClient{client: c} +} + +func (c *evmTxmClient) PendingSequenceAt(ctx context.Context, addr common.Address) (evmtypes.Nonce, error) { + return c.PendingNonceAt(ctx, addr) +} + +func (c *evmTxmClient) ConfiguredChainID() *big.Int { + return c.client.ConfiguredChainID() +} + +func (c *evmTxmClient) BatchSendTransactions( + ctx context.Context, + attempts []TxAttempt, + batchSize int, + lggr logger.SugaredLogger, +) ( + codes []commonclient.SendTxReturnCode, + txErrs []error, + broadcastTime time.Time, + successfulTxIDs []int64, + err error, +) { + // preallocate + codes = make([]commonclient.SendTxReturnCode, len(attempts)) + txErrs = make([]error, len(attempts)) + + reqs, broadcastTime, successfulTxIDs, batchErr := batchSendTransactions(ctx, attempts, batchSize, lggr, c.client) + err = errors.Join(err, batchErr) // this error does not block processing + + // safety check - exits before processing + if len(reqs) != len(attempts) { + lenErr := fmt.Errorf("Returned request data length (%d) != number of tx attempts (%d)", len(reqs), len(attempts)) + err = errors.Join(err, lenErr) + lggr.Criticalw("Mismatched length", "err", err) + return + } + + // for each batched tx convert response to standard error code + var wg sync.WaitGroup + wg.Add(len(reqs)) + processingErr := make([]error, len(attempts)) + for index := range reqs { + go func(i int) { + defer wg.Done() + + // convert to tx for logging purposes - exits early if error occurs + tx, signedErr := GetGethSignedTx(attempts[i].SignedRawTx) + if signedErr != nil { + signedErrMsg := fmt.Sprintf("failed to process tx (index %d)", i) + lggr.Errorw(signedErrMsg, "err", signedErr) + processingErr[i] = fmt.Errorf("%s: %w", signedErrMsg, signedErr) + return + } + sendErr := reqs[i].Error + codes[i] = client.ClassifySendError(sendErr, lggr, tx, attempts[i].Tx.FromAddress, c.client.IsL2()) + txErrs[i] = sendErr + }(index) + } + wg.Wait() + err = errors.Join(err, errors.Join(processingErr...)) // merge errors together + return +} + +func (c *evmTxmClient) SendTransactionReturnCode(ctx context.Context, etx Tx, attempt TxAttempt, lggr logger.SugaredLogger) (commonclient.SendTxReturnCode, error) { + signedTx, err := GetGethSignedTx(attempt.SignedRawTx) + if err != nil { + lggr.Criticalw("Fatal error signing transaction", "err", err, "etx", etx) + return commonclient.Fatal, err + } + return c.client.SendTransactionReturnCode(ctx, signedTx, etx.FromAddress) +} + +func (c *evmTxmClient) PendingNonceAt(ctx context.Context, fromAddress common.Address) (n evmtypes.Nonce, err error) { + nextNonce, err := c.client.PendingNonceAt(ctx, fromAddress) + if err != nil { + return n, err + } + + if nextNonce > math.MaxInt64 { + return n, fmt.Errorf("nonce overflow, got: %v", nextNonce) + } + return evmtypes.Nonce(nextNonce), nil +} + +func (c *evmTxmClient) SequenceAt(ctx context.Context, addr common.Address, blockNum *big.Int) (evmtypes.Nonce, error) { + return c.client.SequenceAt(ctx, addr, blockNum) +} + +func (c *evmTxmClient) BatchGetReceipts(ctx context.Context, attempts []TxAttempt) (txReceipt []*evmtypes.Receipt, txErr []error, funcErr error) { + var reqs []rpc.BatchElem + for _, attempt := range attempts { + res := &evmtypes.Receipt{} + req := rpc.BatchElem{ + Method: "eth_getTransactionReceipt", + Args: []interface{}{attempt.Hash}, + Result: res, + } + txReceipt = append(txReceipt, res) + reqs = append(reqs, req) + } + + if err := c.client.BatchCallContext(ctx, reqs); err != nil { + return nil, nil, fmt.Errorf("EthConfirmer#batchFetchReceipts error fetching receipts with BatchCallContext: %w", err) + } + + for _, req := range reqs { + txErr = append(txErr, req.Error) + } + return txReceipt, txErr, nil +} + +// sendEmptyTransaction sends a transaction with 0 Eth and an empty payload to the burn address +// May be useful for clearing stuck nonces +func (c *evmTxmClient) SendEmptyTransaction( + ctx context.Context, + newTxAttempt func(seq evmtypes.Nonce, feeLimit uint32, fee gas.EvmFee, fromAddress common.Address) (attempt TxAttempt, err error), + seq evmtypes.Nonce, + gasLimit uint32, + fee gas.EvmFee, + fromAddress common.Address, +) (txhash string, err error) { + defer utils.WrapIfError(&err, "sendEmptyTransaction failed") + + attempt, err := newTxAttempt(seq, gasLimit, fee, fromAddress) + if err != nil { + return txhash, err + } + + signedTx, err := GetGethSignedTx(attempt.SignedRawTx) + if err != nil { + return txhash, err + } + + _, err = c.client.SendTransactionReturnCode(ctx, signedTx, fromAddress) + return signedTx.Hash().String(), err +} + +func (c *evmTxmClient) CallContract(ctx context.Context, a TxAttempt, blockNumber *big.Int) (rpcErr fmt.Stringer, extractErr error) { + _, errCall := c.client.CallContract(ctx, ethereum.CallMsg{ + From: a.Tx.FromAddress, + To: &a.Tx.ToAddress, + Gas: uint64(a.Tx.FeeLimit), + GasPrice: a.TxFee.Legacy.ToInt(), + GasFeeCap: a.TxFee.DynamicFeeCap.ToInt(), + GasTipCap: a.TxFee.DynamicTipCap.ToInt(), + Value: nil, + Data: a.Tx.EncodedPayload, + AccessList: nil, + }, blockNumber) + return client.ExtractRPCError(errCall) +} diff --git a/core/chains/evm/txmgr/common.go b/core/chains/evm/txmgr/common.go new file mode 100644 index 00000000..6d606539 --- /dev/null +++ b/core/chains/evm/txmgr/common.go @@ -0,0 +1,82 @@ +package txmgr + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +// Tries to send transactions in batches. Even if some batch(es) fail to get sent, it tries all remaining batches, +// before returning with error for the latest batch send. If a batch send fails, this sets the error on all +// elements in that batch. +func batchSendTransactions( + ctx context.Context, + attempts []TxAttempt, + batchSize int, + logger logger.Logger, + ethClient evmclient.Client, +) ( + []rpc.BatchElem, + time.Time, // batch broadcast time + []int64, // successfully broadcast tx IDs + error) { + if len(attempts) == 0 { + return nil, time.Now(), nil, nil + } + + reqs := make([]rpc.BatchElem, len(attempts)) + ethTxIDs := make([]int64, len(attempts)) + hashes := make([]string, len(attempts)) + now := time.Now() + successfulBroadcast := []int64{} + for i, attempt := range attempts { + ethTxIDs[i] = attempt.TxID + hashes[i] = attempt.Hash.String() + // Decode the signed raw tx back into a Transaction object + signedTx, decodeErr := GetGethSignedTx(attempt.SignedRawTx) + if decodeErr != nil { + return reqs, now, successfulBroadcast, fmt.Errorf("failed to decode signed raw tx into Transaction object: %w", decodeErr) + } + // Get the canonical encoding of the Transaction object needed for the eth_sendRawTransaction request + // The signed raw tx cannot be used directly because it uses a different encoding + txBytes, marshalErr := signedTx.MarshalBinary() + if marshalErr != nil { + return reqs, now, successfulBroadcast, fmt.Errorf("failed to marshal tx into canonical encoding: %w", marshalErr) + } + req := rpc.BatchElem{ + Method: "eth_sendRawTransaction", + Args: []interface{}{hexutil.Encode(txBytes)}, + Result: &common.Hash{}, + } + reqs[i] = req + } + + logger.Debugw(fmt.Sprintf("Batch sending %d unconfirmed transactions.", len(attempts)), "n", len(attempts), "ethTxIDs", ethTxIDs, "hashes", hashes) + + if batchSize == 0 { + batchSize = len(reqs) + } + + for i := 0; i < len(reqs); i += batchSize { + j := i + batchSize + if j > len(reqs) { + j = len(reqs) + } + + logger.Debugw(fmt.Sprintf("Batch sending transactions %v thru %v", i, j)) + + if err := ethClient.BatchCallContextAll(ctx, reqs[i:j]); err != nil { + return reqs, now, successfulBroadcast, errors.Wrap(err, "failed to batch send transactions") + } + successfulBroadcast = append(successfulBroadcast, ethTxIDs[i:j]...) + } + return reqs, now, successfulBroadcast, nil +} diff --git a/core/chains/evm/txmgr/config.go b/core/chains/evm/txmgr/config.go new file mode 100644 index 00000000..a3bcd23c --- /dev/null +++ b/core/chains/evm/txmgr/config.go @@ -0,0 +1,80 @@ +package txmgr + +import ( + "time" + + gethcommon "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/common/config" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" +) + +// ChainConfig encompasses config used by txmgr package +// Unless otherwise specified, these should support changing at runtime +// +//go:generate mockery --quiet --recursive --name ChainConfig --output ./mocks/ --case=underscore --structname Config --filename config.go +type ChainConfig interface { + ChainType() config.ChainType + FinalityDepth() uint32 + FinalityTagEnabled() bool + NonceAutoSync() bool + RPCDefaultBatchSize() uint32 +} + +type FeeConfig interface { + EIP1559DynamicFees() bool + BumpPercent() uint16 + BumpThreshold() uint64 + BumpTxDepth() uint32 + LimitDefault() uint32 + PriceDefault() *assets.Wei + TipCapMin() *assets.Wei + PriceMax() *assets.Wei + PriceMin() *assets.Wei + PriceMaxKey(gethcommon.Address) *assets.Wei +} + +type DatabaseConfig interface { + DefaultQueryTimeout() time.Duration + LogSQL() bool +} + +type ListenerConfig interface { + FallbackPollInterval() time.Duration +} + +type ( + EvmTxmConfig txmgrtypes.TransactionManagerChainConfig + EvmTxmFeeConfig txmgrtypes.TransactionManagerFeeConfig + EvmBroadcasterConfig txmgrtypes.BroadcasterChainConfig + EvmConfirmerConfig txmgrtypes.ConfirmerChainConfig + EvmResenderConfig txmgrtypes.ResenderChainConfig + EvmReaperConfig txmgrtypes.ReaperChainConfig +) + +var _ EvmTxmConfig = (*evmTxmConfig)(nil) + +type evmTxmConfig struct { + ChainConfig +} + +func NewEvmTxmConfig(c ChainConfig) *evmTxmConfig { + return &evmTxmConfig{c} +} + +func (c evmTxmConfig) IsL2() bool { return c.ChainType().IsL2() } + +var _ EvmTxmFeeConfig = (*evmTxmFeeConfig)(nil) + +type evmTxmFeeConfig struct { + FeeConfig +} + +func NewEvmTxmFeeConfig(c FeeConfig) *evmTxmFeeConfig { + return &evmTxmFeeConfig{c} +} + +func (c evmTxmFeeConfig) MaxFeePrice() string { return c.PriceMax().String() } + +func (c evmTxmFeeConfig) FeePriceDefault() string { return c.PriceDefault().String() } diff --git a/core/chains/evm/txmgr/confirmer_test.go b/core/chains/evm/txmgr/confirmer_test.go new file mode 100644 index 00000000..ad999a9f --- /dev/null +++ b/core/chains/evm/txmgr/confirmer_test.go @@ -0,0 +1,3105 @@ +package txmgr_test + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "reflect" + "testing" + "time" + + "github.com/google/uuid" + pkgerrors "github.com/pkg/errors" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + commonfee "github.com/goplugin/pluginv3.0/v2/common/fee" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + gasmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" +) + +func newTestChainScopedConfig(t *testing.T) evmconfig.ChainScopedConfig { + cfg := configtest.NewTestGeneralConfig(t) + return evmtest.NewChainScopedConfig(t, cfg) +} + +func newBroadcastLegacyEthTxAttempt(t *testing.T, etxID int64, gasPrice ...int64) txmgr.TxAttempt { + attempt := cltest.NewLegacyEthTxAttempt(t, etxID) + attempt.State = txmgrtypes.TxAttemptBroadcast + if len(gasPrice) > 0 { + gp := gasPrice[0] + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(gp)} + } + return attempt +} + +func mustTxBeInState(t *testing.T, txStore txmgr.TestEvmTxStore, tx txmgr.Tx, expectedState txmgrtypes.TxState) { + etx, err := txStore.FindTxWithAttempts(tx.ID) + require.NoError(t, err) + require.Equal(t, expectedState, etx.State) +} + +func newTxReceipt(hash gethCommon.Hash, blockNumber int, txIndex uint) evmtypes.Receipt { + return evmtypes.Receipt{ + TxHash: hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(int64(blockNumber)), + TransactionIndex: txIndex, + Status: uint64(1), + } +} + +func newInProgressLegacyEthTxAttempt(t *testing.T, etxID int64, gasPrice ...int64) txmgr.TxAttempt { + attempt := cltest.NewLegacyEthTxAttempt(t, etxID) + attempt.State = txmgrtypes.TxAttemptInProgress + if len(gasPrice) > 0 { + gp := gasPrice[0] + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(gp)} + } + return attempt +} + +func mustInsertInProgressEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress gethCommon.Address) txmgr.Tx { + etx := cltest.NewEthTx(fromAddress) + etx.State = txmgrcommon.TxInProgress + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + require.NoError(t, txStore.InsertTx(&etx)) + + return etx +} + +func mustInsertConfirmedEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress gethCommon.Address) txmgr.Tx { + etx := cltest.NewEthTx(fromAddress) + etx.State = txmgrcommon.TxConfirmed + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + now := time.Now() + etx.BroadcastAt = &now + etx.InitialBroadcastAt = &now + require.NoError(t, txStore.InsertTx(&etx)) + + return etx +} + +func TestEthConfirmer_Lifecycle(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := newTestChainScopedConfig(t) + txStore := newTxStore(t, db, config.Database()) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + // Add some fromAddresses + cltest.MustInsertRandomKey(t, ethKeyStore) + cltest.MustInsertRandomKey(t, ethKeyStore) + estimator := gasmocks.NewEvmEstimator(t) + newEst := func(logger.Logger) gas.EvmEstimator { return estimator } + lggr := logger.Test(t) + ge := config.EVM().GasEstimator() + feeEstimator := gas.NewWrappedEvmEstimator(lggr, newEst, ge.EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, ethKeyStore, feeEstimator) + ec := txmgr.NewEvmConfirmer(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(ge), config.EVM().Transactions(), config.Database(), ethKeyStore, txBuilder, lggr) + ctx := testutils.Context(t) + + // Can't close unstarted instance + err := ec.Close() + require.Error(t, err) + + // Can successfully start once + err = ec.Start(ctx) + require.NoError(t, err) + + // Can't start an already started instance + err = ec.Start(ctx) + require.Error(t, err) + head := evmtypes.Head{ + Hash: utils.NewHash(), + Number: 10, + Parent: &evmtypes.Head{ + Hash: utils.NewHash(), + Number: 9, + Parent: &evmtypes.Head{ + Number: 8, + Hash: utils.NewHash(), + Parent: nil, + }, + }, + } + err = ec.ProcessHead(ctx, &head) + require.NoError(t, err) + // Can successfully close once + err = ec.Close() + require.NoError(t, err) + + // Can't start more than once (Confirmer uses services.StateMachine) + err = ec.Start(ctx) + require.Error(t, err) + // Can't close more than once (Confirmer use services.StateMachine) + err = ec.Close() + require.Error(t, err) + + // Can't closeInternal unstarted instance + require.Error(t, ec.XXXTestCloseInternal()) + + // Can successfully startInternal a previously closed instance + require.NoError(t, ec.XXXTestStartInternal()) + // Can't startInternal already started instance + require.Error(t, ec.XXXTestStartInternal()) + // Can successfully closeInternal again + require.NoError(t, ec.XXXTestCloseInternal()) +} + +func TestEthConfirmer_CheckForReceipts(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, config.Database()) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + nonce := int64(0) + ctx := testutils.Context(t) + blockNum := int64(0) + + t.Run("only finds eth_txes in unconfirmed state with at least one broadcast attempt", func(t *testing.T) { + mustInsertFatalErrorEthTx(t, txStore, fromAddress) + mustInsertInProgressEthTx(t, txStore, nonce, fromAddress) + nonce++ + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, nonce, 1, fromAddress) + nonce++ + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, nonce, fromAddress) + nonce++ + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, config.EVM().ChainID()) + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + }) + + etx1 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + require.Len(t, etx1.TxAttempts, 1) + attempt1_1 := etx1.TxAttempts[0] + hashAttempt1_1 := attempt1_1.Hash + require.Len(t, attempt1_1.Receipts, 0) + + t.Run("fetches receipt for one unconfirmed eth_tx", func(t *testing.T) { + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + // Transaction not confirmed yet, receipt is nil + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], hashAttempt1_1, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Receipt{} + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + var err error + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + assert.NoError(t, err) + require.Len(t, etx1.TxAttempts, 1) + attempt1_1 = etx1.TxAttempts[0] + require.NoError(t, err) + require.Len(t, attempt1_1.Receipts, 0) + }) + + t.Run("saves nothing if returned receipt does not match the attempt", func(t *testing.T) { + txmReceipt := evmtypes.Receipt{ + TxHash: utils.NewHash(), + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + // First transaction confirmed + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], hashAttempt1_1, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + }).Once() + + // No error because it is merely logged + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + etx, err := txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 1) + + require.Len(t, etx.TxAttempts[0].Receipts, 0) + }) + + t.Run("saves nothing if query returns error", func(t *testing.T) { + txmReceipt := evmtypes.Receipt{ + TxHash: attempt1_1.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + // First transaction confirmed + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], hashAttempt1_1, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + elems[0].Error = errors.New("foo") + }).Once() + + // No error because it is merely logged + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + etx, err := txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 1) + require.Len(t, etx.TxAttempts[0].Receipts, 0) + }) + + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + require.Len(t, etx2.TxAttempts, 1) + attempt2_1 := etx2.TxAttempts[0] + require.Len(t, attempt2_1.Receipts, 0) + + t.Run("saves eth_receipt and marks eth_tx as confirmed when geth client returns valid receipt", func(t *testing.T) { + txmReceipt := evmtypes.Receipt{ + TxHash: attempt1_1.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempt1_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt2_1.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First transaction confirmed + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + // Second transaction still unconfirmed + elems[1].Result = &evmtypes.Receipt{} + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // Check that the receipt was saved + etx, err := txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt1_1 = etx.TxAttempts[0] + require.Len(t, attempt1_1.Receipts, 1) + + ethReceipt := attempt1_1.Receipts[0] + + assert.Equal(t, txmReceipt.TxHash, ethReceipt.GetTxHash()) + assert.Equal(t, txmReceipt.BlockHash, ethReceipt.GetBlockHash()) + assert.Equal(t, txmReceipt.BlockNumber.Int64(), ethReceipt.GetBlockNumber().Int64()) + assert.Equal(t, txmReceipt.TransactionIndex, ethReceipt.GetTransactionIndex()) + + receiptJSON, err := json.Marshal(txmReceipt) + require.NoError(t, err) + + j, err := json.Marshal(ethReceipt) + require.NoError(t, err) + assert.JSONEq(t, string(receiptJSON), string(j)) + }) + + t.Run("fetches and saves receipts for several attempts in gas price order", func(t *testing.T) { + attempt2_2 := newBroadcastLegacyEthTxAttempt(t, etx2.ID) + attempt2_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(10)} + + attempt2_3 := newBroadcastLegacyEthTxAttempt(t, etx2.ID) + attempt2_3.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(20)} + + // Insert order deliberately reversed to test sorting by gas price + require.NoError(t, txStore.InsertTxAttempt(&attempt2_3)) + require.NoError(t, txStore.InsertTxAttempt(&attempt2_2)) + + txmReceipt := evmtypes.Receipt{ + TxHash: attempt2_2.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + cltest.BatchElemMatchesParams(b[2], attempt2_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt2_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[0], attempt2_3.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Most expensive attempt still unconfirmed + elems[2].Result = &evmtypes.Receipt{} + // Second most expensive attempt is confirmed + *(elems[1].Result.(*evmtypes.Receipt)) = txmReceipt + // Cheapest attempt still unconfirmed + elems[0].Result = &evmtypes.Receipt{} + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // Check that the state was updated + etx, err := txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + + require.Equal(t, txmgrcommon.TxConfirmed, etx.State) + require.Len(t, etx.TxAttempts, 3) + }) + + etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + attempt3_1 := etx3.TxAttempts[0] + nonce++ + + t.Run("ignores receipt missing BlockHash that comes from querying parity too early", func(t *testing.T) { + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + receipt := evmtypes.Receipt{ + TxHash: attempt3_1.Hash, + Status: uint64(1), + } + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], attempt3_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = receipt + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // No receipt, but no error either + etx, err := txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt3_1 = etx.TxAttempts[0] + require.Len(t, attempt3_1.Receipts, 0) + }) + + t.Run("does not panic if receipt has BlockHash but is missing some other fields somehow", func(t *testing.T) { + // NOTE: This should never happen, but we shouldn't panic regardless + receipt := evmtypes.Receipt{ + TxHash: attempt3_1.Hash, + BlockHash: utils.NewHash(), + Status: uint64(1), + } + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], attempt3_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = receipt + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // No receipt, but no error either + etx, err := txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt3_1 = etx.TxAttempts[0] + require.Len(t, attempt3_1.Receipts, 0) + }) + t.Run("handles case where eth_receipt already exists somehow", func(t *testing.T) { + ethReceipt := mustInsertEthReceipt(t, txStore, 42, utils.NewHash(), attempt3_1.Hash) + txmReceipt := evmtypes.Receipt{ + TxHash: attempt3_1.Hash, + BlockHash: ethReceipt.BlockHash, + BlockNumber: big.NewInt(ethReceipt.BlockNumber), + TransactionIndex: ethReceipt.TransactionIndex, + Status: uint64(1), + } + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], attempt3_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // Check that the receipt was unchanged + etx, err := txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + assert.Len(t, etx.TxAttempts, 1) + attempt3_1 = etx.TxAttempts[0] + require.Len(t, attempt3_1.Receipts, 1) + + ethReceipt3_1 := attempt3_1.Receipts[0] + + assert.Equal(t, txmReceipt.TxHash, ethReceipt3_1.GetTxHash()) + assert.Equal(t, txmReceipt.BlockHash, ethReceipt3_1.GetBlockHash()) + assert.Equal(t, txmReceipt.BlockNumber.Int64(), ethReceipt3_1.GetBlockNumber().Int64()) + assert.Equal(t, txmReceipt.TransactionIndex, ethReceipt3_1.GetTransactionIndex()) + }) + + etx4 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + attempt4_1 := etx4.TxAttempts[0] + nonce++ + + t.Run("on receipt fetch marks in_progress eth_tx_attempt as broadcast", func(t *testing.T) { + attempt4_2 := newInProgressLegacyEthTxAttempt(t, etx4.ID) + attempt4_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(10)} + + require.NoError(t, txStore.InsertTxAttempt(&attempt4_2)) + + txmReceipt := evmtypes.Receipt{ + TxHash: attempt4_2.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + // Second attempt is confirmed + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempt4_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt4_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First attempt still unconfirmed + elems[1].Result = &evmtypes.Receipt{} + // Second attempt is confirmed + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // Check that the state was updated + var err error + etx4, err = txStore.FindTxWithAttempts(etx4.ID) + require.NoError(t, err) + + attempt4_1 = etx4.TxAttempts[1] + attempt4_2 = etx4.TxAttempts[0] + + // And the attempts + require.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt4_1.State) + require.Nil(t, attempt4_1.BroadcastBeforeBlockNum) + require.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt4_2.State) + require.Equal(t, int64(42), *attempt4_2.BroadcastBeforeBlockNum) + + // Check receipts + require.Len(t, attempt4_1.Receipts, 0) + require.Len(t, attempt4_2.Receipts, 1) + }) + + etx5 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + attempt5_1 := etx5.TxAttempts[0] + nonce++ + + t.Run("simulate on revert", func(t *testing.T) { + txmReceipt := evmtypes.Receipt{ + TxHash: attempt5_1.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(0), + } + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + // First attempt is confirmed and reverted + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + cltest.BatchElemMatchesParams(b[0], attempt5_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First attempt still unconfirmed + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt + }).Once() + data, err := utils.ABIEncode(`[{"type":"uint256"}]`, big.NewInt(10)) + require.NoError(t, err) + sig := utils.Keccak256Fixed([]byte(`MyError(uint256)`)) + ethClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, &client.JsonError{ + Code: 1, + Message: "reverted", + Data: utils.ConcatBytes(sig[:4], data), + }).Once() + + // Do the thing + require.NoError(t, ec.CheckForReceipts(ctx, blockNum)) + + // Check that the state was updated + etx5, err = txStore.FindTxWithAttempts(etx5.ID) + require.NoError(t, err) + + attempt5_1 = etx5.TxAttempts[0] + + // And the attempts + require.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt5_1.State) + require.NotNil(t, attempt5_1.BroadcastBeforeBlockNum) + // Check receipts + require.Len(t, attempt5_1.Receipts, 1) + }) +} + +func TestEthConfirmer_CheckForReceipts_batching(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].RPCDefaultBatchSize = ptr[uint32](2) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress) + var attempts []txmgr.TxAttempt + + // Total of 5 attempts should lead to 3 batched fetches (2, 2, 1) + for i := 0; i < 5; i++ { + attempt := newBroadcastLegacyEthTxAttempt(t, etx.ID, int64(i+2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + attempts = append(attempts, attempt) + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempts[4].Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempts[3].Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + }).Once() + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempts[2].Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempts[1].Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + }).Once() + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + cltest.BatchElemMatchesParams(b[0], attempts[0].Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &evmtypes.Receipt{} + }).Once() + + require.NoError(t, ec.CheckForReceipts(ctx, 42)) +} + +func TestEthConfirmer_CheckForReceipts_HandlesNonFwdTxsWithForwardingEnabled(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].RPCDefaultBatchSize = ptr[uint32](1) + c.EVM[0].Transactions.ForwardersEnabled = ptr(true) + }) + + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + // tx is not forwarded and doesn't have meta set. EthConfirmer should handle nil meta values + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress) + attempt := newBroadcastLegacyEthTxAttempt(t, etx.ID, 2) + attempt.Tx.Meta = nil + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + dbtx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Equal(t, 0, len(dbtx.TxAttempts[0].Receipts)) + + txmReceipt := evmtypes.Receipt{ + TxHash: attempt.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + cltest.BatchElemMatchesParams(b[0], attempt.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt // confirmed + }).Once() + + require.NoError(t, ec.CheckForReceipts(ctx, 42)) + + // Check receipt is inserted correctly. + dbtx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Equal(t, 1, len(dbtx.TxAttempts[0].Receipts)) +} + +func TestEthConfirmer_CheckForReceipts_only_likely_confirmed(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].RPCDefaultBatchSize = ptr[uint32](6) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + var attempts []txmgr.TxAttempt + // inserting in DESC nonce order to test DB ASC ordering + etx2 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 1, fromAddress) + for i := 0; i < 4; i++ { + attempt := newBroadcastLegacyEthTxAttempt(t, etx2.ID, int64(100-i)) + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + } + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress) + for i := 0; i < 4; i++ { + attempt := newBroadcastLegacyEthTxAttempt(t, etx.ID, int64(100-i)) + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + + // only adding these because a batch for only those attempts should be sent + attempts = append(attempts, attempt) + } + + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(0), nil) + + var captured []rpc.BatchElem + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 4 + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + captured = append(captured, elems...) + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + elems[2].Result = &evmtypes.Receipt{} + elems[3].Result = &evmtypes.Receipt{} + }).Once() + + require.NoError(t, ec.CheckForReceipts(ctx, 42)) + + cltest.BatchElemMustMatchParams(t, captured[0], attempts[0].Hash, "eth_getTransactionReceipt") + cltest.BatchElemMustMatchParams(t, captured[1], attempts[1].Hash, "eth_getTransactionReceipt") + cltest.BatchElemMustMatchParams(t, captured[2], attempts[2].Hash, "eth_getTransactionReceipt") + cltest.BatchElemMustMatchParams(t, captured[3], attempts[3].Hash, "eth_getTransactionReceipt") +} + +func TestEthConfirmer_CheckForReceipts_should_not_check_for_likely_unconfirmed(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, config.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + ctx := testutils.Context(t) + + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 1, fromAddress) + for i := 0; i < 4; i++ { + attempt := newBroadcastLegacyEthTxAttempt(t, etx.ID, int64(100-i)) + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + } + + // latest nonce is lower that all attempts' nonces + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(0), nil) + + require.NoError(t, ec.CheckForReceipts(ctx, 42)) +} + +func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt_scoped_to_key(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress1_1 := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, fromAddress1_2 := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, fromAddress2_1 := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(20), nil) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + // STATE + // key 1, tx with nonce 0 is unconfirmed + // key 1, tx with nonce 1 is unconfirmed + // key 2, tx with nonce 9 is unconfirmed and gets a receipt in block 10 + etx1_0 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress1_1) + etx1_1 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 1, fromAddress1_1) + etx2_9 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 3, fromAddress1_2) + // there also happens to be a confirmed tx with a higher nonce from a different chain in the DB + etx_other_chain := cltest.MustInsertUnconfirmedEthTx(t, txStore, 8, fromAddress2_1) + pgtest.MustExec(t, db, `UPDATE evm.txes SET state='confirmed' WHERE id = $1`, etx_other_chain.ID) + + attempt2_9 := newBroadcastLegacyEthTxAttempt(t, etx2_9.ID, int64(1)) + require.NoError(t, txStore.InsertTxAttempt(&attempt2_9)) + txmReceipt2_9 := newTxReceipt(attempt2_9.Hash, 10, 1) + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], attempt2_9.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt2_9 + }).Once() + + require.NoError(t, ec.CheckForReceipts(ctx, 10)) + + mustTxBeInState(t, txStore, etx1_0, txmgrcommon.TxUnconfirmed) + mustTxBeInState(t, txStore, etx1_1, txmgrcommon.TxUnconfirmed) + mustTxBeInState(t, txStore, etx2_9, txmgrcommon.TxConfirmed) + + // Now etx1_1 gets a receipt in block 11, which should mark etx1_0 as confirmed_missing_receipt + attempt1_1 := newBroadcastLegacyEthTxAttempt(t, etx1_1.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt1_1)) + txmReceipt1_1 := newTxReceipt(attempt1_1.Hash, 11, 1) + + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && cltest.BatchElemMatchesParams(b[0], attempt1_1.Hash, "eth_getTransactionReceipt") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt1_1 + }).Once() + + require.NoError(t, ec.CheckForReceipts(ctx, 11)) + + mustTxBeInState(t, txStore, etx1_0, txmgrcommon.TxConfirmedMissingReceipt) + mustTxBeInState(t, txStore, etx1_1, txmgrcommon.TxConfirmed) + mustTxBeInState(t, txStore, etx2_9, txmgrcommon.TxConfirmed) +} + +func TestEthConfirmer_CheckForReceipts_confirmed_missing_receipt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + // STATE + // eth_txes with nonce 0 has two attempts (broadcast before block 21 and 41) the first of which will get a receipt + // eth_txes with nonce 1 has two attempts (broadcast before block 21 and 41) neither of which will ever get a receipt + // eth_txes with nonce 2 has an attempt (broadcast before block 41) that will not get a receipt on the first try but will get one later + // eth_txes with nonce 3 has an attempt (broadcast before block 41) that has been confirmed in block 42 + // All other attempts were broadcast before block 41 + b := int64(21) + + etx0 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress) + attempt0_1 := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(1)) + attempt0_2 := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(2)) + attempt0_2.BroadcastBeforeBlockNum = &b + require.NoError(t, txStore.InsertTxAttempt(&attempt0_1)) + require.NoError(t, txStore.InsertTxAttempt(&attempt0_2)) + + etx1 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 1, fromAddress) + attempt1_1 := newBroadcastLegacyEthTxAttempt(t, etx1.ID, int64(1)) + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID, int64(2)) + attempt1_2.BroadcastBeforeBlockNum = &b + require.NoError(t, txStore.InsertTxAttempt(&attempt1_1)) + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + + etx2 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 2, fromAddress) + attempt2_1 := newBroadcastLegacyEthTxAttempt(t, etx2.ID, int64(1)) + require.NoError(t, txStore.InsertTxAttempt(&attempt2_1)) + + etx3 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 3, fromAddress) + attempt3_1 := newBroadcastLegacyEthTxAttempt(t, etx3.ID, int64(1)) + require.NoError(t, txStore.InsertTxAttempt(&attempt3_1)) + + pgtest.MustExec(t, db, `UPDATE evm.tx_attempts SET broadcast_before_block_num = 41 WHERE broadcast_before_block_num IS NULL`) + + t.Run("marks buried eth_txes as 'confirmed_missing_receipt'", func(t *testing.T) { + txmReceipt0 := evmtypes.Receipt{ + TxHash: attempt0_2.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + txmReceipt3 := evmtypes.Receipt{ + TxHash: attempt3_1.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + Status: uint64(1), + } + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(4), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 6 && + cltest.BatchElemMatchesParams(b[0], attempt0_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt0_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[2], attempt1_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[3], attempt1_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[4], attempt2_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[5], attempt3_1.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First transaction confirmed + *(elems[0].Result.(*evmtypes.Receipt)) = txmReceipt0 + elems[1].Result = &evmtypes.Receipt{} + // Second transaction stil unconfirmed + elems[2].Result = &evmtypes.Receipt{} + elems[3].Result = &evmtypes.Receipt{} + // Third transaction still unconfirmed + elems[4].Result = &evmtypes.Receipt{} + // Fourth transaction is confirmed + *(elems[5].Result.(*evmtypes.Receipt)) = txmReceipt3 + }).Once() + + // PERFORM + // Block num of 43 is one higher than the receipt (as would generally be expected) + require.NoError(t, ec.CheckForReceipts(ctx, 43)) + + // Expected state is that the "top" eth_tx is now confirmed, with the + // two below it "confirmed_missing_receipt" and the "bottom" eth_tx also confirmed + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx3.State) + + ethReceipt := etx3.TxAttempts[0].Receipts[0] + require.Equal(t, txmReceipt3.BlockHash, ethReceipt.GetBlockHash()) + + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx2.State) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx1.State) + + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx0.State) + + require.Len(t, etx0.TxAttempts, 2) + require.Len(t, etx0.TxAttempts[0].Receipts, 1) + ethReceipt = etx0.TxAttempts[0].Receipts[0] + require.Equal(t, txmReceipt0.BlockHash, ethReceipt.GetBlockHash()) + }) + + // STATE + // eth_txes with nonce 0 is confirmed + // eth_txes with nonce 1 is confirmed_missing_receipt + // eth_txes with nonce 2 is confirmed_missing_receipt + // eth_txes with nonce 3 is confirmed + + t.Run("marks eth_txes with state 'confirmed_missing_receipt' as 'confirmed' if a receipt finally shows up", func(t *testing.T) { + txmReceipt := evmtypes.Receipt{ + TxHash: attempt2_1.Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(43), + TransactionIndex: uint(1), + Status: uint64(1), + } + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[2], attempt2_1.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First transaction still unconfirmed + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + // Second transaction confirmed + *(elems[2].Result.(*evmtypes.Receipt)) = txmReceipt + }).Once() + + // PERFORM + // Block num of 44 is one higher than the receipt (as would generally be expected) + require.NoError(t, ec.CheckForReceipts(ctx, 44)) + + // Expected state is that the "top" two eth_txes are now confirmed, with the + // one below it still "confirmed_missing_receipt" and the bottom one remains confirmed + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx3.State) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx2.State) + + ethReceipt := etx2.TxAttempts[0].Receipts[0] + require.Equal(t, txmReceipt.BlockHash, ethReceipt.GetBlockHash()) + + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx1.State) + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx0.State) + }) + + // STATE + // eth_txes with nonce 0 is confirmed + // eth_txes with nonce 1 is confirmed_missing_receipt + // eth_txes with nonce 2 is confirmed + // eth_txes with nonce 3 is confirmed + + t.Run("continues to leave eth_txes with state 'confirmed_missing_receipt' unchanged if at least one attempt is above EVM.FinalityDepth", func(t *testing.T) { + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Both attempts still unconfirmed + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + }).Once() + + // PERFORM + // Block num of 80 puts the first attempt (21) below threshold but second attempt (41) still above + require.NoError(t, ec.CheckForReceipts(ctx, 80)) + + // Expected state is that the "top" two eth_txes are now confirmed, with the + // one below it still "confirmed_missing_receipt" and the bottom one remains confirmed + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx3.State) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx2.State) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx1.State) + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx0.State) + }) + + // STATE + // eth_txes with nonce 0 is confirmed + // eth_txes with nonce 1 is confirmed_missing_receipt + // eth_txes with nonce 2 is confirmed + // eth_txes with nonce 3 is confirmed + + t.Run("marks eth_Txes with state 'confirmed_missing_receipt' as 'errored' if a receipt fails to show up and all attempts are buried deeper than EVM.FinalityDepth", func(t *testing.T) { + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(10), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + cltest.BatchElemMatchesParams(b[0], attempt1_2.Hash, "eth_getTransactionReceipt") && + cltest.BatchElemMatchesParams(b[1], attempt1_1.Hash, "eth_getTransactionReceipt") + + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Both attempts still unconfirmed + elems[0].Result = &evmtypes.Receipt{} + elems[1].Result = &evmtypes.Receipt{} + }).Once() + + // PERFORM + // Block num of 100 puts the first attempt (21) and second attempt (41) below threshold + require.NoError(t, ec.CheckForReceipts(ctx, 100)) + + // Expected state is that the "top" two eth_txes are now confirmed, with the + // one below it marked as "fatal_error" and the bottom one remains confirmed + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx3.State) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx2.State) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxFatalError, etx1.State) + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxConfirmed, etx0.State) + }) +} + +func TestEthConfirmer_CheckConfirmedMissingReceipt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + // STATE + // eth_txes with nonce 0 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 1 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 2 has one attempt + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + attempt0_2 := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt0_2)) + etx1 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 1, 1, originalBroadcastAt, fromAddress) + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + etx2 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 2, 1, originalBroadcastAt, fromAddress) + attempt2_1 := etx2.TxAttempts[0] + etx3 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 3, 1, originalBroadcastAt, fromAddress) + attempt3_1 := etx3.TxAttempts[0] + + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 4 && + cltest.BatchElemMatchesParams(b[0], hexutil.Encode(attempt0_2.SignedRawTx), "eth_sendRawTransaction") && + cltest.BatchElemMatchesParams(b[1], hexutil.Encode(attempt1_2.SignedRawTx), "eth_sendRawTransaction") && + cltest.BatchElemMatchesParams(b[2], hexutil.Encode(attempt2_1.SignedRawTx), "eth_sendRawTransaction") && + cltest.BatchElemMatchesParams(b[3], hexutil.Encode(attempt3_1.SignedRawTx), "eth_sendRawTransaction") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First transaction confirmed + elems[0].Error = errors.New("nonce too low") + elems[1].Error = errors.New("transaction underpriced") + elems[2].Error = nil + elems[3].Error = errors.New("transaction already finalized") + }).Once() + + // PERFORM + require.NoError(t, ec.CheckConfirmedMissingReceipt(ctx)) + + // Expected state is that the "top" eth_tx is untouched but the other two + // are marked as unconfirmed + var err error + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx0.State) + assert.Greater(t, etx0.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx1.State) + assert.Greater(t, etx1.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx2.State) + assert.Greater(t, etx2.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx3.State) + assert.Greater(t, etx3.BroadcastAt.Unix(), originalBroadcastAt.Unix()) +} + +func TestEthConfirmer_CheckConfirmedMissingReceipt_batchSendTransactions_fails(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + // STATE + // eth_txes with nonce 0 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 1 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 2 has one attempt + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + attempt0_2 := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt0_2)) + etx1 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 1, 1, originalBroadcastAt, fromAddress) + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + etx2 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 2, 1, originalBroadcastAt, fromAddress) + attempt2_1 := etx2.TxAttempts[0] + + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 3 && + cltest.BatchElemMatchesParams(b[0], hexutil.Encode(attempt0_2.SignedRawTx), "eth_sendRawTransaction") && + cltest.BatchElemMatchesParams(b[1], hexutil.Encode(attempt1_2.SignedRawTx), "eth_sendRawTransaction") && + cltest.BatchElemMatchesParams(b[2], hexutil.Encode(attempt2_1.SignedRawTx), "eth_sendRawTransaction") + })).Return(errors.New("Timed out")).Once() + + // PERFORM + require.NoError(t, ec.CheckConfirmedMissingReceipt(ctx)) + + // Expected state is that all txes are marked as unconfirmed, since the batch call had failed + var err error + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx0.State) + assert.Equal(t, etx0.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx1.State) + assert.Equal(t, etx1.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx2.State) + assert.Equal(t, etx2.BroadcastAt.Unix(), originalBroadcastAt.Unix()) +} + +func TestEthConfirmer_CheckConfirmedMissingReceipt_smallEvmRPCBatchSize_middleBatchSendTransactionFails(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FinalityDepth = ptr[uint32](50) + c.EVM[0].RPCDefaultBatchSize = ptr[uint32](1) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + ctx := testutils.Context(t) + + // STATE + // eth_txes with nonce 0 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 1 has two attempts, the later attempt with higher gas fees + // eth_txes with nonce 2 has one attempt + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + attempt0_2 := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt0_2)) + etx1 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 1, 1, originalBroadcastAt, fromAddress) + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID, int64(2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + etx2 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 2, 1, originalBroadcastAt, fromAddress) + + // Expect eth_sendRawTransaction in 3 batches. First batch will pass, 2nd will fail, 3rd never attempted. + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + cltest.BatchElemMatchesParams(b[0], hexutil.Encode(attempt0_2.SignedRawTx), "eth_sendRawTransaction") + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // First transaction confirmed + elems[0].Error = errors.New("nonce too low") + }).Once() + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + cltest.BatchElemMatchesParams(b[0], hexutil.Encode(attempt1_2.SignedRawTx), "eth_sendRawTransaction") + })).Return(errors.New("Timed out")).Once() + + // PERFORM + require.NoError(t, ec.CheckConfirmedMissingReceipt(ctx)) + + // Expected state is that all transactions since failed batch will be unconfirmed + var err error + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx0.State) + assert.Greater(t, etx0.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx1, err = txStore.FindTxWithAttempts(etx1.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx1.State) + assert.Equal(t, etx1.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + assert.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx2.State) + assert.Equal(t, etx2.BroadcastAt.Unix(), originalBroadcastAt.Unix()) +} + +func TestEthConfirmer_FindTxsRequiringRebroadcast(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + evmFromAddress := fromAddress + currentHead := int64(30) + gasBumpThreshold := int64(10) + tooNew := int64(21) + onTheMoney := int64(20) + oldEnough := int64(19) + nonce := int64(0) + + mustInsertConfirmedEthTx(t, txStore, nonce, fromAddress) + nonce++ + + _, otherAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + evmOtherAddress := otherAddress + + lggr := logger.Test(t) + + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + + t.Run("returns nothing when there are no transactions", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + mustInsertInProgressEthTx(t, txStore, nonce, fromAddress) + nonce++ + + t.Run("returns nothing when the transaction is in_progress", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + // This one has BroadcastBeforeBlockNum set as nil... which can happen, but it should be ignored + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + + t.Run("ignores unconfirmed transactions with nil BroadcastBeforeBlockNum", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + etx1 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt1_1 := etx1.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1_1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, tooNew, attempt1_1.ID)) + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etx1.ID) + attempt1_2.BroadcastBeforeBlockNum = &onTheMoney + attempt1_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(30000)} + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + + t.Run("returns nothing when the transaction is unconfirmed with an attempt that is recent", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt2_1 := etx2.TxAttempts[0] + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt2_1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, tooNew, attempt2_1.ID)) + + t.Run("returns nothing when the transaction has attempts that are too new", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + etxWithoutAttempts := cltest.NewEthTx(fromAddress) + { + n := evmtypes.Nonce(nonce) + etxWithoutAttempts.Sequence = &n + } + now := time.Now() + etxWithoutAttempts.BroadcastAt = &now + etxWithoutAttempts.InitialBroadcastAt = &now + etxWithoutAttempts.State = txmgrcommon.TxUnconfirmed + require.NoError(t, txStore.InsertTx(&etxWithoutAttempts)) + nonce++ + + t.Run("does nothing if the transaction is from a different address than the one given", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmOtherAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + t.Run("returns the transaction if it is unconfirmed and has no attempts (note that this is an invariant violation, but we handle it anyway)", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 1) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + }) + + t.Run("returns nothing for different chain id", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, big.NewInt(42)) + require.NoError(t, err) + + require.Len(t, etxs, 0) + }) + + etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt3_1 := etx3.TxAttempts[0] + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt3_1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_1.ID)) + + // NOTE: It should ignore qualifying eth_txes from a different address + etxOther := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, otherAddress) + attemptOther1 := etxOther.TxAttempts[0] + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attemptOther1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attemptOther1.ID)) + + t.Run("returns the transaction if it is unconfirmed with an attempt that is older than gasBumpThreshold blocks", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 2) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, etx3.ID, etxs[1].ID) + }) + + t.Run("returns nothing if threshold is zero", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, 0, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 0) + }) + + t.Run("does not return more transactions for gas bumping than gasBumpThreshold", func(t *testing.T) { + // Unconfirmed txes in DB are: + // (unnamed) (nonce 2) + // etx1 (nonce 3) + // etx2 (nonce 4) + // etxWithoutAttempts (nonce 5) + // etx3 (nonce 6) - ready for bump + // etx4 (nonce 7) - ready for bump + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 4, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 1) // returns etxWithoutAttempts only - eligible for gas bumping because it technically doesn't have any attempts within gasBumpThreshold blocks + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + + etxs, err = ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 5, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 2) // includes etxWithoutAttempts, etx3 and etx4 + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, etx3.ID, etxs[1].ID) + + // Zero limit disables it + etxs, err = ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 0, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 2) // includes etxWithoutAttempts, etx3 and etx4 + }) + + etx4 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt4_1 := etx4.TxAttempts[0] + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&attempt4_1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt4_1.ID)) + + t.Run("ignores pending transactions for another key", func(t *testing.T) { + // Re-use etx3 nonce for another key, it should not affect the results for this key + etxOther := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, (*etx3.Sequence).Int64(), otherAddress) + aOther := etxOther.TxAttempts[0] + dbAttempt = txmgr.DbEthTxAttempt{} + dbAttempt.FromTxAttempt(&aOther) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, aOther.ID)) + + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 6, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 3) // includes etxWithoutAttempts, etx3 and etx4 + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, etx3.ID, etxs[1].ID) + assert.Equal(t, etx4.ID, etxs[2].ID) + }) + + attempt3_2 := newBroadcastLegacyEthTxAttempt(t, etx3.ID) + attempt3_2.BroadcastBeforeBlockNum = &oldEnough + attempt3_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(30000)} + require.NoError(t, txStore.InsertTxAttempt(&attempt3_2)) + + t.Run("returns the transaction if it is unconfirmed with two attempts that are older than gasBumpThreshold blocks", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 3) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, etx3.ID, etxs[1].ID) + assert.Equal(t, etx4.ID, etxs[2].ID) + }) + + attempt3_3 := newBroadcastLegacyEthTxAttempt(t, etx3.ID) + attempt3_3.BroadcastBeforeBlockNum = &tooNew + attempt3_3.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(40000)} + require.NoError(t, txStore.InsertTxAttempt(&attempt3_3)) + + t.Run("does not return the transaction if it has some older but one newer attempt", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 2) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, *etxWithoutAttempts.Sequence, *(etxs[0].Sequence)) + require.Equal(t, evmtypes.Nonce(5), *etxWithoutAttempts.Sequence) + assert.Equal(t, etx4.ID, etxs[1].ID) + assert.Equal(t, *etx4.Sequence, *(etxs[1].Sequence)) + require.Equal(t, evmtypes.Nonce(7), *etx4.Sequence) + }) + + attempt0_1 := newBroadcastLegacyEthTxAttempt(t, etxWithoutAttempts.ID) + attempt0_1.State = txmgrtypes.TxAttemptInsufficientFunds + require.NoError(t, txStore.InsertTxAttempt(&attempt0_1)) + + // This attempt has insufficient_eth, but there is also another attempt4_1 + // which is old enough, so this will be caught by both queries and should + // not be duplicated + attempt4_2 := cltest.NewLegacyEthTxAttempt(t, etx4.ID) + attempt4_2.State = txmgrtypes.TxAttemptInsufficientFunds + attempt4_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(40000)} + require.NoError(t, txStore.InsertTxAttempt(&attempt4_2)) + + etx5 := mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, nonce, fromAddress) + nonce++ + + // This etx has one attempt that is too new, which would exclude it from + // the gas bumping query, but it should still be caught by the insufficient + // eth query + etx6 := mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, nonce, fromAddress) + attempt6_2 := newBroadcastLegacyEthTxAttempt(t, etx3.ID) + attempt6_2.BroadcastBeforeBlockNum = &tooNew + attempt6_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(30001)} + require.NoError(t, txStore.InsertTxAttempt(&attempt6_2)) + + t.Run("returns unique attempts requiring resubmission due to insufficient eth, ordered by nonce asc", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 0, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 4) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, *etxWithoutAttempts.Sequence, *(etxs[0].Sequence)) + assert.Equal(t, etx4.ID, etxs[1].ID) + assert.Equal(t, *etx4.Sequence, *(etxs[1].Sequence)) + assert.Equal(t, etx5.ID, etxs[2].ID) + assert.Equal(t, *etx5.Sequence, *(etxs[2].Sequence)) + assert.Equal(t, etx6.ID, etxs[3].ID) + assert.Equal(t, *etx6.Sequence, *(etxs[3].Sequence)) + }) + + t.Run("applies limit", func(t *testing.T) { + etxs, err := ec.FindTxsRequiringRebroadcast(testutils.Context(t), lggr, evmFromAddress, currentHead, gasBumpThreshold, 10, 2, &cltest.FixtureChainID) + require.NoError(t, err) + + require.Len(t, etxs, 2) + assert.Equal(t, etxWithoutAttempts.ID, etxs[0].ID) + assert.Equal(t, *etxWithoutAttempts.Sequence, *(etxs[0].Sequence)) + assert.Equal(t, etx4.ID, etxs[1].ID) + assert.Equal(t, *etx4.Sequence, *(etxs[1].Sequence)) + }) +} + +func TestEthConfirmer_RebroadcastWhereNecessary_WithConnectivityCheck(t *testing.T) { + t.Parallel() + lggr := logger.Test(t) + + db := pgtest.NewSqlxDB(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + t.Run("should retry previous attempt if connectivity check failed for legacy transactions", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(false) + c.EVM[0].GasEstimator.BlockHistory.BlockHistorySize = ptr[uint16](2) + c.EVM[0].GasEstimator.BlockHistory.CheckInclusionBlocks = ptr[uint16](4) + }) + ccfg := evmtest.NewChainScopedConfig(t, cfg) + + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + kst := ksmocks.NewEth(t) + + estimator := gasmocks.NewEvmEstimator(t) + newEst := func(logger.Logger) gas.EvmEstimator { return estimator } + estimator.On("BumpLegacyGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, uint32(0), pkgerrors.Wrapf(commonfee.ErrConnectivity, "transaction...")) + ge := ccfg.EVM().GasEstimator() + feeEstimator := gas.NewWrappedEvmEstimator(lggr, newEst, ge.EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, kst, feeEstimator) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Maybe() + // Create confirmer with necessary state + ec := txmgr.NewEvmConfirmer(txStore, txmgr.NewEvmTxmClient(ethClient), ccfg.EVM(), txmgr.NewEvmTxmFeeConfig(ccfg.EVM().GasEstimator()), ccfg.EVM().Transactions(), cfg.Database(), kst, txBuilder, lggr) + servicetest.Run(t, ec) + currentHead := int64(30) + oldEnough := int64(15) + nonce := int64(0) + originalBroadcastAt := time.Unix(1616509100, 0) + + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress, originalBroadcastAt) + attempt1 := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID)) + + // Send transaction and assume success. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(commonclient.Successful, nil).Once() + + err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead) + require.NoError(t, err) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 1) + }) + + t.Run("should retry previous attempt if connectivity check failed for dynamic transactions", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.BlockHistory.BlockHistorySize = ptr[uint16](2) + c.EVM[0].GasEstimator.BlockHistory.CheckInclusionBlocks = ptr[uint16](4) + }) + ccfg := evmtest.NewChainScopedConfig(t, cfg) + + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + kst := ksmocks.NewEth(t) + + estimator := gasmocks.NewEvmEstimator(t) + estimator.On("BumpDynamicFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(gas.DynamicFee{}, uint32(0), pkgerrors.Wrapf(commonfee.ErrConnectivity, "transaction...")) + newEst := func(logger.Logger) gas.EvmEstimator { return estimator } + // Create confirmer with necessary state + ge := ccfg.EVM().GasEstimator() + feeEstimator := gas.NewWrappedEvmEstimator(lggr, newEst, ge.EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, kst, feeEstimator) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Maybe() + ec := txmgr.NewEvmConfirmer(txStore, txmgr.NewEvmTxmClient(ethClient), ccfg.EVM(), txmgr.NewEvmTxmFeeConfig(ccfg.EVM().GasEstimator()), ccfg.EVM().Transactions(), cfg.Database(), kst, txBuilder, lggr) + servicetest.Run(t, ec) + currentHead := int64(30) + oldEnough := int64(15) + nonce := int64(0) + originalBroadcastAt := time.Unix(1616509100, 0) + + etx := mustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, nonce, fromAddress, originalBroadcastAt) + attempt1 := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1.ID)) + + // Send transaction and assume success. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return(commonclient.Successful, nil).Once() + + err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead) + require.NoError(t, err) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 1) + }) +} + +func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.GWei(500) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + _, _ = cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Maybe() + // Use a mock keystore for this test + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, kst, nil) + currentHead := int64(30) + oldEnough := int64(19) + nonce := int64(0) + + t.Run("does nothing if no transactions require bumping", func(t *testing.T) { + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + }) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress, originalBroadcastAt) + nonce++ + attempt1_1 := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1_1.ID)) + + t.Run("re-sends previous transaction on keystore error", func(t *testing.T) { + // simulate bumped transaction that is somehow impossible to sign + kst.On("SignTx", fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx.Sequence) + }), + mock.Anything).Return(nil, errors.New("signing error")).Once() + + // Do the thing + err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead) + require.Error(t, err) + require.Contains(t, err.Error(), "signing error") + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + + require.Len(t, etx.TxAttempts, 1) + }) + + t.Run("does nothing and continues on fatal error", func(t *testing.T) { + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if tx.Nonce() != uint64(*etx.Sequence) { + return false + } + ethTx = *tx + return true + }), + mock.MatchedBy(func(chainID *big.Int) bool { + return chainID.Cmp(evmcfg.EVM().ChainID()) == 0 + })).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx.Sequence) + }), fromAddress).Return(commonclient.Fatal, errors.New("exceeds block gas limit")).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + require.Len(t, etx.TxAttempts, 1) + }) + + ethClient = evmtest.NewEthClientMockWithDefaultChain(t) + ec.XXXTestSetClient(txmgr.NewEvmTxmClient(ethClient)) + + t.Run("does nothing and continues if bumped attempt transaction was too expensive", func(t *testing.T) { + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if tx.Nonce() != uint64(*etx.Sequence) { + return false + } + ethTx = *tx + return true + }), + mock.MatchedBy(func(chainID *big.Int) bool { + return chainID.Cmp(evmcfg.EVM().ChainID()) == 0 + })).Return(ðTx, nil).Once() + + // Once for the bumped attempt which exceeds limit + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx.Sequence) && tx.GasPrice().Int64() == int64(20000000000) + }), fromAddress).Return(commonclient.ExceedsMaxFee, errors.New("tx fee (1.10 ether) exceeds the configured cap (1.00 ether)")).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + // Did not create an additional attempt + require.Len(t, etx.TxAttempts, 1) + + // broadcast_at did not change + require.Equal(t, etx.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + require.Equal(t, etx.InitialBroadcastAt.Unix(), originalBroadcastAt.Unix()) + }) + + var attempt1_2 txmgr.TxAttempt + ethClient = evmtest.NewEthClientMockWithDefaultChain(t) + ec.XXXTestSetClient(txmgr.NewEvmTxmClient(ethClient)) + + t.Run("creates new attempt with higher gas price if transaction has an attempt older than threshold", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(20000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_1.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.MatchedBy(func(chainID *big.Int) bool { + return chainID.Cmp(evmcfg.EVM().ChainID()) == 0 + })).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + require.Len(t, etx.TxAttempts, 2) + require.Equal(t, attempt1_1.ID, etx.TxAttempts[1].ID) + + // Got the new attempt + attempt1_2 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt1_2.State) + }) + + t.Run("does nothing if there is an attempt without BroadcastBeforeBlockNum set", func(t *testing.T) { + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + require.Len(t, etx.TxAttempts, 2) + }) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1_2.ID)) + var attempt1_3 txmgr.TxAttempt + + t.Run("creates new attempt with higher gas price if transaction is already in mempool (e.g. due to previous crash before we could save the new attempt)", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(25000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx.Sequence || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + require.Len(t, etx.TxAttempts, 3) + require.Equal(t, attempt1_1.ID, etx.TxAttempts[2].ID) + require.Equal(t, attempt1_2.ID, etx.TxAttempts[1].ID) + + // Got the new attempt + attempt1_3 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_3.TxFee.Legacy.ToInt().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt1_3.State) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1_3.ID)) + var attempt1_4 txmgr.TxAttempt + + t.Run("saves new attempt even for transaction that has already been confirmed (nonce already used)", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(30000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + receipt := evmtypes.Receipt{BlockNumber: big.NewInt(40)} + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx.Sequence || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + receipt.TxHash = tx.Hash() + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx.State) + + // Got the new attempt + attempt1_4 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_4.TxFee.Legacy.ToInt().Int64()) + + require.Len(t, etx.TxAttempts, 4) + require.Equal(t, attempt1_1.ID, etx.TxAttempts[3].ID) + require.Equal(t, attempt1_2.ID, etx.TxAttempts[2].ID) + require.Equal(t, attempt1_3.ID, etx.TxAttempts[1].ID) + require.Equal(t, attempt1_4.ID, etx.TxAttempts[0].ID) + require.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[0].State) + require.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[1].State) + require.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[2].State) + require.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[3].State) + }) + + // Mark original tx as confirmed, so we won't pick it up anymore + pgtest.MustExec(t, db, `UPDATE evm.txes SET state = 'confirmed'`) + + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt2_1 := etx2.TxAttempts[0] + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt2_1.ID)) + var attempt2_2 txmgr.TxAttempt + + t.Run("saves in_progress attempt on temporary error and returns error", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(20000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt2_1.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + n := *etx2.Sequence + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != n || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Unknown, errors.New("some network error")).Once() + + // Do the thing + err := ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead) + require.Error(t, err) + require.Contains(t, err.Error(), "some network error") + + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx2.State) + + // Old attempt is untouched + require.Len(t, etx2.TxAttempts, 2) + require.Equal(t, attempt2_1.ID, etx2.TxAttempts[1].ID) + attempt2_1 = etx2.TxAttempts[1] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt2_1.State) + assert.Equal(t, oldEnough, *attempt2_1.BroadcastBeforeBlockNum) + + // New in_progress attempt saved + attempt2_2 = etx2.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attempt2_2.State) + assert.Nil(t, attempt2_2.BroadcastBeforeBlockNum) + + // Do it again and move the attempt into "broadcast" + n = *etx2.Sequence + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + + // Attempt marked "broadcast" + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx2.State) + + // New in_progress attempt saved + require.Len(t, etx2.TxAttempts, 2) + require.Equal(t, attempt2_2.ID, etx2.TxAttempts[0].ID) + attempt2_2 = etx2.TxAttempts[0] + require.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt2_2.State) + assert.Nil(t, attempt2_2.BroadcastBeforeBlockNum) + }) + + // Set BroadcastBeforeBlockNum again so the next test will pick it up + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt2_2.ID)) + + t.Run("assumes that 'nonce too low' error means confirmed_missing_receipt", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(25000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt2_1.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + n := *etx2.Sequence + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != n || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == n && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.TransactionAlreadyKnown, errors.New("nonce too low")).Once() + + // Creates new attempt as normal if currentHead is not high enough + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx2, err = txStore.FindTxWithAttempts(etx2.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx2.State) + + // One new attempt saved + require.Len(t, etx2.TxAttempts, 3) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx2.TxAttempts[0].State) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx2.TxAttempts[1].State) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx2.TxAttempts[2].State) + }) + + // Original tx is confirmed, so we won't pick it up anymore + etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt3_1 := etx3.TxAttempts[0] + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1, gas_price=$2 WHERE id=$3 RETURNING *`, oldEnough, assets.NewWeiI(35000000000), attempt3_1.ID)) + + var attempt3_2 txmgr.TxAttempt + + t.Run("saves attempt anyway if replacement transaction is underpriced because the bumped gas price is insufficiently higher than the previous one", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(42000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt3_1.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx3.Sequence || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx3.State) + + require.Len(t, etx3.TxAttempts, 2) + require.Equal(t, attempt3_1.ID, etx3.TxAttempts[1].ID) + attempt3_2 = etx3.TxAttempts[0] + + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt3_2.TxFee.Legacy.ToInt().Int64()) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_2.ID)) + var attempt3_3 txmgr.TxAttempt + + t.Run("handles case where transaction is already known somehow", func(t *testing.T) { + expectedBumpedGasPrice := big.NewInt(50400000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt3_1.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx3.Sequence || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, fmt.Errorf("known transaction: %s", ethTx.Hash().Hex())).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx3.State) + + require.Len(t, etx3.TxAttempts, 3) + attempt3_3 = etx3.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt3_3.TxFee.Legacy.ToInt().Int64()) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_3.ID)) + var attempt3_4 txmgr.TxAttempt + + t.Run("pretends it was accepted and continues the cycle if rejected for being temporarily underpriced", func(t *testing.T) { + // This happens if parity is rejecting transactions that are not priced high enough to even get into the mempool at all + // It should pretend it was accepted into the mempool and hand off to the next cycle to continue bumping gas as normal + temporarilyUnderpricedError := "There are too many transactions in the queue. Your transaction was dropped due to limit. Try increasing the fee." + + expectedBumpedGasPrice := big.NewInt(60480000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt3_2.TxFee.Legacy.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx3.Sequence || expectedBumpedGasPrice.Cmp(tx.GasPrice()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, errors.New(temporarilyUnderpricedError)).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx3.State) + + require.Len(t, etx3.TxAttempts, 4) + attempt3_4 = etx3.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt3_4.TxFee.Legacy.ToInt().Int64()) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_4.ID)) + + t.Run("resubmits at the old price and does not create a new attempt if one of the bumped transactions would exceed EVM.GasEstimator.PriceMax", func(t *testing.T) { + // Set price such that the next bump will exceed EVM.GasEstimator.PriceMax + // Existing gas price is: 60480000000 + gasPrice := attempt3_4.TxFee.Legacy.ToInt() + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.NewWeiI(60500000000) + }) + newCfg := evmtest.NewChainScopedConfig(t, gcfg) + ec2 := newEthConfirmer(t, txStore, ethClient, newCfg, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx + + // Do the thing + require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx3.State) + + // No new tx attempts + require.Len(t, etx3.TxAttempts, 4) + attempt3_4 = etx3.TxAttempts[0] + assert.Equal(t, gasPrice.Int64(), attempt3_4.TxFee.Legacy.ToInt().Int64()) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt3_4.ID)) + + t.Run("resubmits at the old price and does not create a new attempt if the current price is exactly EVM.GasEstimator.PriceMax", func(t *testing.T) { + // Set price such that the current price is already at EVM.GasEstimator.PriceMax + // Existing gas price is: 60480000000 + gasPrice := attempt3_4.TxFee.Legacy.ToInt() + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.NewWeiI(60480000000) + }) + newCfg := evmtest.NewChainScopedConfig(t, gcfg) + ec2 := newEthConfirmer(t, txStore, ethClient, newCfg, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx3.Sequence && gasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, errors.New("already known")).Once() // we already submitted at this price, now it's time to bump and submit again but since we simply resubmitted rather than increasing gas price, geth already knows about this tx + + // Do the thing + require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx3, err = txStore.FindTxWithAttempts(etx3.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx3.State) + + // No new tx attempts + require.Len(t, etx3.TxAttempts, 4) + attempt3_4 = etx3.TxAttempts[0] + assert.Equal(t, gasPrice.Int64(), attempt3_4.TxFee.Legacy.ToInt().Int64()) + }) + + // The EIP-1559 etx and attempt + etx4 := mustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, nonce, fromAddress) + attempt4_1 := etx4.TxAttempts[0] + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1, gas_tip_cap=$2, gas_fee_cap=$3 WHERE id=$4 RETURNING *`, + oldEnough, assets.GWei(35), assets.GWei(100), attempt4_1.ID)) + var attempt4_2 txmgr.TxAttempt + + t.Run("EIP-1559: bumps using EIP-1559 rules when existing attempts are of type 0x2", func(t *testing.T) { + ethTx := *types.NewTx(&types.DynamicFeeTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx4.Sequence { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + // This is the new, EIP-1559 attempt + gasTipCap := assets.GWei(42) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && gasTipCap.ToInt().Cmp(tx.GasTipCap()) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx4, err = txStore.FindTxWithAttempts(etx4.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx4.State) + + // A new, bumped attempt + require.Len(t, etx4.TxAttempts, 2) + attempt4_2 = etx4.TxAttempts[0] + assert.Nil(t, attempt4_2.TxFee.Legacy) + assert.Equal(t, assets.GWei(42).String(), attempt4_2.TxFee.DynamicTipCap.String()) + assert.Equal(t, assets.GWei(120).String(), attempt4_2.TxFee.DynamicFeeCap.String()) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt1_2.State) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1, gas_tip_cap=$2, gas_fee_cap=$3 WHERE id=$4 RETURNING *`, + oldEnough, assets.GWei(999), assets.GWei(1000), attempt4_2.ID)) + + t.Run("EIP-1559: resubmits at the old price and does not create a new attempt if one of the bumped EIP-1559 transactions would have its tip cap exceed EVM.GasEstimator.PriceMax", func(t *testing.T) { + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.GWei(1000) + }) + newCfg := evmtest.NewChainScopedConfig(t, gcfg) + ec2 := newEthConfirmer(t, txStore, ethClient, newCfg, ethKeyStore, nil) + + // Third attempt failed to bump, resubmits old one instead + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && attempt4_2.Hash.String() == tx.Hash().String() + }), fromAddress).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec2.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx4, err = txStore.FindTxWithAttempts(etx4.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx4.State) + + // No new tx attempts + require.Len(t, etx4.TxAttempts, 2) + assert.Equal(t, assets.GWei(999).Int64(), etx4.TxAttempts[0].TxFee.DynamicTipCap.ToInt().Int64()) + assert.Equal(t, assets.GWei(1000).Int64(), etx4.TxAttempts[0].TxFee.DynamicFeeCap.ToInt().Int64()) + }) + + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1, gas_tip_cap=$2, gas_fee_cap=$3 WHERE id=$4 RETURNING *`, + oldEnough, assets.GWei(45), assets.GWei(100), attempt4_2.ID)) + + t.Run("EIP-1559: saves attempt anyway if replacement transaction is underpriced because the bumped gas price is insufficiently higher than the previous one", func(t *testing.T) { + // NOTE: This test case was empirically impossible when I tried it on eth mainnet (any EIP1559 transaction with a higher tip cap is accepted even if it's only 1 wei more) but appears to be possible on Polygon/Matic, probably due to poor design that applies the 10% minimum to the overall value (base fee + tip cap) + expectedBumpedTipCap := assets.GWei(54) + require.Greater(t, expectedBumpedTipCap.Int64(), attempt4_2.TxFee.DynamicTipCap.ToInt().Int64()) + + ethTx := *types.NewTx(&types.LegacyTx{}) + kst.On("SignTx", + fromAddress, + mock.MatchedBy(func(tx *types.Transaction) bool { + if evmtypes.Nonce(tx.Nonce()) != *etx4.Sequence || expectedBumpedTipCap.ToInt().Cmp(tx.GasTipCap()) != 0 { + return false + } + ethTx = *tx + return true + }), + mock.Anything).Return(ðTx, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return evmtypes.Nonce(tx.Nonce()) == *etx4.Sequence && expectedBumpedTipCap.ToInt().Cmp(tx.GasTipCap()) == 0 + }), fromAddress).Return(commonclient.Successful, errors.New("replacement transaction underpriced")).Once() + + // Do it + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + var err error + etx4, err = txStore.FindTxWithAttempts(etx4.ID) + require.NoError(t, err) + + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx4.State) + + require.Len(t, etx4.TxAttempts, 3) + require.Equal(t, attempt4_1.ID, etx4.TxAttempts[2].ID) + require.Equal(t, attempt4_2.ID, etx4.TxAttempts[1].ID) + attempt4_3 := etx4.TxAttempts[0] + + assert.Equal(t, expectedBumpedTipCap.Int64(), attempt4_3.TxFee.DynamicTipCap.ToInt().Int64()) + }) +} + +func TestEthConfirmer_RebroadcastWhereNecessary_TerminallyUnderpriced_ThenGoesThrough(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.PriceMax = assets.GWei(500) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + + _, _ = cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + // Use a mock keystore for this test + kst := ksmocks.NewEth(t) + addresses := []gethCommon.Address{fromAddress} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Maybe() + currentHead := int64(30) + oldEnough := 5 + nonce := int64(0) + + t.Run("terminally underpriced transaction with in_progress attempt is retried with more gas", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, kst, nil) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx := mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, nonce, fromAddress, txmgrtypes.TxAttemptInProgress, originalBroadcastAt) + require.Equal(t, originalBroadcastAt, *etx.BroadcastAt) + nonce++ + attempt := etx.TxAttempts[0] + signedTx, err := txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + + // Fail the first time with terminally underpriced. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Once() + // Succeed the second time after bumping gas. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Successful, nil).Once() + kst.On("SignTx", mock.Anything, mock.Anything, mock.Anything).Return( + signedTx, nil, + ).Once() + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + }) + + t.Run("multiple gas bumps with existing broadcast attempts are retried with more gas until success in legacy mode", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, kst, nil) + + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + legacyAttempt := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&legacyAttempt) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, legacyAttempt.ID)) + + // Fail a few times with terminally underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Underpriced, errors.New("Transaction gas price is too low. It does not satisfy your node's minimal gas price")).Times(3) + // Succeed the second time after bumping gas. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Successful, nil).Once() + signedLegacyTx := new(types.Transaction) + kst.On("SignTx", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Type() == 0x0 && tx.Nonce() == uint64(*etx.Sequence) + }), mock.Anything).Return( + signedLegacyTx, nil, + ).Run(func(args mock.Arguments) { + unsignedLegacyTx := args.Get(1).(*types.Transaction) + // Use the real keystore to do the actual signing + thisSignedLegacyTx, err := ethKeyStore.SignTx(fromAddress, unsignedLegacyTx, testutils.FixtureChainID) + require.NoError(t, err) + *signedLegacyTx = *thisSignedLegacyTx + }).Times(4) // 3 failures 1 success + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + }) + + t.Run("multiple gas bumps with existing broadcast attempts are retried with more gas until success in EIP-1559 mode", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, kst, nil) + + etx := mustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, nonce, fromAddress) + nonce++ + dxFeeAttempt := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&dxFeeAttempt) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, dxFeeAttempt.ID)) + + // Fail a few times with terminally underpriced + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Underpriced, errors.New("transaction underpriced")).Times(3) + // Succeed the second time after bumping gas. + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Successful, nil).Once() + signedDxFeeTx := new(types.Transaction) + kst.On("SignTx", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Type() == 0x2 && tx.Nonce() == uint64(*etx.Sequence) + }), mock.Anything).Return( + signedDxFeeTx, nil, + ).Run(func(args mock.Arguments) { + unsignedDxFeeTx := args.Get(1).(*types.Transaction) + // Use the real keystore to do the actual signing + thisSignedDxFeeTx, err := ethKeyStore.SignTx(fromAddress, unsignedDxFeeTx, testutils.FixtureChainID) + require.NoError(t, err) + *signedDxFeeTx = *thisSignedDxFeeTx + }).Times(4) // 3 failures 1 success + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + }) +} + +func TestEthConfirmer_RebroadcastWhereNecessary_WhenOutOfEth(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + _, err := ethKeyStore.EnabledKeysForChain(testutils.FixtureChainID) + require.NoError(t, err) + require.NoError(t, err) + // keyStates, err := ethKeyStore.GetStatesForKeys(keys) + // require.NoError(t, err) + + config := newTestChainScopedConfig(t) + currentHead := int64(30) + oldEnough := int64(19) + nonce := int64(0) + + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, nonce, fromAddress) + nonce++ + attempt1_1 := etx.TxAttempts[0] + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt1_1) + require.NoError(t, db.Get(&dbAttempt, `UPDATE evm.tx_attempts SET broadcast_before_block_num=$1 WHERE id=$2 RETURNING *`, oldEnough, attempt1_1.ID)) + var attempt1_2 txmgr.TxAttempt + + insufficientEthError := errors.New("insufficient funds for gas * price + value") + + t.Run("saves attempt with state 'insufficient_eth' if eth node returns this error", func(t *testing.T) { + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + expectedBumpedGasPrice := big.NewInt(20000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_1.TxFee.Legacy.ToInt().Int64()) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.InsufficientFunds, insufficientEthError).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + require.Len(t, etx.TxAttempts, 2) + require.Equal(t, attempt1_1.ID, etx.TxAttempts[1].ID) + + // Got the new attempt + attempt1_2 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptInsufficientFunds, attempt1_2.State) + assert.Nil(t, attempt1_2.BroadcastBeforeBlockNum) + }) + + t.Run("does not bump gas when previous error was 'out of eth', instead resubmits existing transaction", func(t *testing.T) { + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + expectedBumpedGasPrice := big.NewInt(20000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_1.TxFee.Legacy.ToInt().Int64()) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.InsufficientFunds, insufficientEthError).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + // New attempt was NOT created + require.Len(t, etx.TxAttempts, 2) + + // The attempt is still "out of eth" + attempt1_2 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptInsufficientFunds, attempt1_2.State) + }) + + t.Run("saves the attempt as broadcast after node wallet has been topped up with sufficient balance", func(t *testing.T) { + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + expectedBumpedGasPrice := big.NewInt(20000000000) + require.Greater(t, expectedBumpedGasPrice.Int64(), attempt1_1.TxFee.Legacy.ToInt().Int64()) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return expectedBumpedGasPrice.Cmp(tx.GasPrice()) == 0 + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + + // New attempt was NOT created + require.Len(t, etx.TxAttempts, 2) + + // Attempt is now 'broadcast' + attempt1_2 = etx.TxAttempts[0] + assert.Equal(t, expectedBumpedGasPrice.Int64(), attempt1_2.TxFee.Legacy.ToInt().Int64()) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt1_2.State) + }) + + t.Run("resubmitting due to insufficient eth is not limited by EVM.GasEstimator.BumpTxDepth", func(t *testing.T) { + depth := 2 + etxCount := 4 + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.BumpTxDepth = ptr(uint32(depth)) + }) + evmcfg := evmtest.NewChainScopedConfig(t, cfg) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, nil) + + for i := 0; i < etxCount; i++ { + n := nonce + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, nonce, fromAddress) + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(n) + }), fromAddress).Return(commonclient.Successful, nil).Once() + + nonce++ + } + + require.NoError(t, ec.RebroadcastWhereNecessary(testutils.Context(t), currentHead)) + + var dbAttempts []txmgr.DbEthTxAttempt + + require.NoError(t, db.Select(&dbAttempts, "SELECT * FROM evm.tx_attempts WHERE state = 'insufficient_eth'")) + require.Len(t, dbAttempts, 0) + }) +} + +func TestEthConfirmer_EnsureConfirmedTransactionsInLongestChain(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + config := newTestChainScopedConfig(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + head := evmtypes.Head{ + Hash: utils.NewHash(), + Number: 10, + Parent: &evmtypes.Head{ + Hash: utils.NewHash(), + Number: 9, + Parent: &evmtypes.Head{ + Number: 8, + Hash: utils.NewHash(), + Parent: nil, + }, + }, + } + + t.Run("does nothing if there aren't any transactions", func(t *testing.T) { + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + }) + + t.Run("does nothing to unconfirmed transactions", func(t *testing.T) { + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress) + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + }) + + t.Run("does nothing to confirmed transactions with receipts within head height of the chain and included in the chain", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 2, 1, fromAddress) + mustInsertEthReceipt(t, txStore, head.Number, head.Hash, etx.TxAttempts[0].Hash) + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + }) + + t.Run("does nothing to confirmed transactions that only have receipts older than the start of the chain", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 1, fromAddress) + // Add receipt that is older than the lowest block of the chain + mustInsertEthReceipt(t, txStore, head.Parent.Parent.Number-1, utils.NewHash(), etx.TxAttempts[0].Hash) + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + }) + + t.Run("unconfirms and rebroadcasts transactions that have receipts within head height of the chain but not included in the chain", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 1, fromAddress) + attempt := etx.TxAttempts[0] + // Include one within head height but a different block hash + mustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attempt.Hash) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + atx, err := txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + // Keeps gas price and nonce the same + return atx.GasPrice().Cmp(tx.GasPrice()) == 0 && atx.Nonce() == tx.Nonce() + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + require.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("unconfirms and rebroadcasts transactions that have receipts within head height of chain but not included in the chain even if a receipt exists older than the start of the chain", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 5, 1, fromAddress) + attempt := etx.TxAttempts[0] + attemptHash := attempt.Hash + // Add receipt that is older than the lowest block of the chain + mustInsertEthReceipt(t, txStore, head.Parent.Parent.Number-1, utils.NewHash(), attemptHash) + // Include one within head height but a different block hash + mustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attemptHash) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, fromAddress).Return( + commonclient.Successful, nil).Once() + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + require.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) + + t.Run("if more than one attempt has a receipt (should not be possible but isn't prevented by database constraints) unconfirms and rebroadcasts only the attempt with the highest gas price", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 6, 1, fromAddress) + require.Len(t, etx.TxAttempts, 1) + // Sanity check to assert the included attempt has the lowest gas price + require.Less(t, etx.TxAttempts[0].TxFee.Legacy.ToInt().Int64(), int64(30000)) + + attempt2 := newBroadcastLegacyEthTxAttempt(t, etx.ID, 30000) + attempt2.SignedRawTx = hexutil.MustDecode("0xf88c8301f3a98503b9aca000832ab98094f5fff180082d6017036b771ba883025c654bc93580a4daa6d556000000000000000000000000000000000000000000000000000000000000000026a0f25601065ee369b6470c0399a2334afcfbeb0b5c8f3d9a9042e448ed29b5bcbda05b676e00248b85faf4dd889f0e2dcf91eb867e23ac9eeb14a73f9e4c14972cdf") + attempt3 := newBroadcastLegacyEthTxAttempt(t, etx.ID, 40000) + attempt3.SignedRawTx = hexutil.MustDecode("0xf88c8301f3a88503b9aca0008316e36094151445852b0cfdf6a4cc81440f2af99176e8ad0880a4daa6d556000000000000000000000000000000000000000000000000000000000000000026a0dcb5a7ad52b96a866257134429f944c505820716567f070e64abb74899803855a04c13eff2a22c218e68da80111e1bb6dc665d3dea7104ab40ff8a0275a99f630d") + require.NoError(t, txStore.InsertTxAttempt(&attempt2)) + require.NoError(t, txStore.InsertTxAttempt(&attempt3)) + + // Receipt is within head height but a different block hash + mustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attempt2.Hash) + // Receipt is within head height but a different block hash + mustInsertEthReceipt(t, txStore, head.Parent.Number, utils.NewHash(), attempt3.Hash) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + s, err := txmgr.GetGethSignedTx(attempt3.SignedRawTx) + require.NoError(t, err) + return tx.Hash() == s.Hash() + }), fromAddress).Return(commonclient.Successful, nil).Once() + + // Do the thing + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx.State) + require.Len(t, etx.TxAttempts, 3) + attempt1 := etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt1.State) + attempt2 = etx.TxAttempts[1] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt2.State) + attempt3 = etx.TxAttempts[2] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt3.State) + }) + + t.Run("if receipt has a block number that is in the future, does not mark for rebroadcast (the safe thing to do is simply wait until heads catches up)", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 7, 1, fromAddress) + attempt := etx.TxAttempts[0] + // Add receipt that is higher than head + mustInsertEthReceipt(t, txStore, head.Number+1, utils.NewHash(), attempt.Hash) + + require.NoError(t, ec.EnsureConfirmedTransactionsInLongestChain(testutils.Context(t), &head)) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + require.Len(t, etx.TxAttempts, 1) + attempt = etx.TxAttempts[0] + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + assert.Len(t, attempt.Receipts, 1) + }) +} + +func TestEthConfirmer_ForceRebroadcast(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + config := newTestChainScopedConfig(t) + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, config.EVM().ChainID()) + mustInsertInProgressEthTx(t, txStore, 0, fromAddress) + etx1 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress) + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress) + + gasPriceWei := gas.EvmFee{Legacy: assets.GWei(52)} + overrideGasLimit := uint32(20000) + + t.Run("rebroadcasts one eth_tx if it falls within in nonce range", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx1.Sequence) && + tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && + tx.Gas() == uint64(overrideGasLimit) && + reflect.DeepEqual(tx.Data(), etx1.EncodedPayload) && + tx.To().String() == etx1.ToAddress.String() + }), mock.Anything).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec.ForceRebroadcast(testutils.Context(t), []evmtypes.Nonce{1}, gasPriceWei, fromAddress, overrideGasLimit)) + }) + + t.Run("uses default gas limit if overrideGasLimit is 0", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx1.Sequence) && + tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && + tx.Gas() == uint64(etx1.FeeLimit) && + reflect.DeepEqual(tx.Data(), etx1.EncodedPayload) && + tx.To().String() == etx1.ToAddress.String() + }), mock.Anything).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec.ForceRebroadcast(testutils.Context(t), []evmtypes.Nonce{(1)}, gasPriceWei, fromAddress, 0)) + }) + + t.Run("rebroadcasts several eth_txes in nonce range", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx1.Sequence) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && tx.Gas() == uint64(overrideGasLimit) + }), mock.Anything).Return(commonclient.Successful, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(*etx2.Sequence) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && tx.Gas() == uint64(overrideGasLimit) + }), mock.Anything).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec.ForceRebroadcast(testutils.Context(t), []evmtypes.Nonce{(1), (2)}, gasPriceWei, fromAddress, overrideGasLimit)) + }) + + t.Run("broadcasts zero transactions if eth_tx doesn't exist for that nonce", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(1) + }), mock.Anything).Return(commonclient.Successful, nil).Once() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(2) + }), mock.Anything).Return(commonclient.Successful, nil).Once() + for i := 3; i <= 5; i++ { + nonce := i + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(nonce) && + tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && + tx.Gas() == uint64(overrideGasLimit) && + *tx.To() == fromAddress && + tx.Value().Cmp(big.NewInt(0)) == 0 && + len(tx.Data()) == 0 + }), mock.Anything).Return(commonclient.Successful, nil).Once() + } + nonces := []evmtypes.Nonce{(1), (2), (3), (4), (5)} + + require.NoError(t, ec.ForceRebroadcast(testutils.Context(t), nonces, gasPriceWei, fromAddress, overrideGasLimit)) + }) + + t.Run("zero transactions use default gas limit if override wasn't specified", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ec := newEthConfirmer(t, txStore, ethClient, config, ethKeyStore, nil) + + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *types.Transaction) bool { + return tx.Nonce() == uint64(0) && tx.GasPrice().Int64() == gasPriceWei.Legacy.Int64() && uint32(tx.Gas()) == config.EVM().GasEstimator().LimitDefault() + }), mock.Anything).Return(commonclient.Successful, nil).Once() + + require.NoError(t, ec.ForceRebroadcast(testutils.Context(t), []evmtypes.Nonce{(0)}, gasPriceWei, fromAddress, 0)) + }) +} + +func TestEthConfirmer_ResumePendingRuns(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, config.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + evmcfg := evmtest.NewChainScopedConfig(t, config) + + head := evmtypes.Head{ + Hash: utils.NewHash(), + Number: 10, + Parent: &evmtypes.Head{ + Hash: utils.NewHash(), + Number: 9, + Parent: &evmtypes.Head{ + Number: 8, + Hash: utils.NewHash(), + Parent: nil, + }, + }, + } + + minConfirmations := int64(2) + + pgtest.MustExec(t, db, `SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`) + + t.Run("doesn't process task runs that are not suspended (possibly already previously resumed)", func(t *testing.T) { + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(uuid.UUID, interface{}, error) error { + t.Fatal("No value expected") + return nil + }) + + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 1, 1, fromAddress) + mustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash) + // Setting both signal_callback and callback_completed to TRUE to simulate a completed pipeline task + // It would only be in a state past suspended if the resume callback was called and callback_completed was set to TRUE + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE, callback_completed = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID) + + err := ec.ResumePendingTaskRuns(testutils.Context(t), &head) + require.NoError(t, err) + }) + + t.Run("doesn't process task runs where the receipt is younger than minConfirmations", func(t *testing.T) { + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(uuid.UUID, interface{}, error) error { + t.Fatal("No value expected") + return nil + }) + + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 2, 1, fromAddress) + mustInsertEthReceipt(t, txStore, head.Number, head.Hash, etx.TxAttempts[0].Hash) + + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID) + + err := ec.ResumePendingTaskRuns(testutils.Context(t), &head) + require.NoError(t, err) + }) + + t.Run("processes eth_txes with receipts older than minConfirmations", func(t *testing.T) { + ch := make(chan interface{}) + nonce := evmtypes.Nonce(3) + var err error + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(id uuid.UUID, value interface{}, thisErr error) error { + err = thisErr + ch <- value + return nil + }) + + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run.ID) + + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`) + receipt := mustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash) + + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID) + + done := make(chan struct{}) + t.Cleanup(func() { <-done }) + go func() { + defer close(done) + err2 := ec.ResumePendingTaskRuns(testutils.Context(t), &head) + if !assert.NoError(t, err2) { + return + } + // Retrieve Tx to check if callback completed flag was set to true + updateTx, err3 := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce) + if assert.NoError(t, err3) { + assert.Equal(t, true, updateTx.CallbackCompleted) + } + }() + + select { + case data := <-ch: + assert.NoError(t, err) + + require.IsType(t, &evmtypes.Receipt{}, data) + r := data.(*evmtypes.Receipt) + require.Equal(t, receipt.TxHash, r.TxHash) + + case <-time.After(time.Second): + t.Fatal("no value received") + } + }) + + pgtest.MustExec(t, db, `DELETE FROM pipeline_runs`) + + t.Run("processes eth_txes with receipt older than minConfirmations that reverted", func(t *testing.T) { + type data struct { + value any + error + } + ch := make(chan data) + nonce := evmtypes.Nonce(4) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(id uuid.UUID, value interface{}, err error) error { + ch <- data{value, err} + return nil + }) + + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run.ID) + + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`) + + // receipt is not passed through as a value since it reverted and caused an error + mustInsertRevertedEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash) + + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID) + + done := make(chan struct{}) + t.Cleanup(func() { <-done }) + go func() { + defer close(done) + err2 := ec.ResumePendingTaskRuns(testutils.Context(t), &head) + if !assert.NoError(t, err2) { + return + } + // Retrieve Tx to check if callback completed flag was set to true + updateTx, err3 := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce) + if assert.NoError(t, err3) { + assert.Equal(t, true, updateTx.CallbackCompleted) + } + }() + + select { + case data := <-ch: + assert.Error(t, data.error) + + assert.EqualError(t, data.error, fmt.Sprintf("transaction %s reverted on-chain", etx.TxAttempts[0].Hash.String())) + + assert.Nil(t, data.value) + + case <-testutils.AfterWaitTimeout(t): + t.Fatal("no value received") + } + }) + + t.Run("does not mark callback complete if callback fails", func(t *testing.T) { + nonce := evmtypes.Nonce(5) + ec := newEthConfirmer(t, txStore, ethClient, evmcfg, ethKeyStore, func(uuid.UUID, interface{}, error) error { + return errors.New("error") + }) + + run := cltest.MustInsertPipelineRun(t, db) + tr := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run.ID) + + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(nonce), 1, fromAddress) + mustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, etx.TxAttempts[0].Hash) + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr.ID, minConfirmations, etx.ID) + + err := ec.ResumePendingTaskRuns(testutils.Context(t), &head) + require.Error(t, err) + + // Retrieve Tx to check if callback completed flag was left unchanged + updateTx, err := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, nonce) + require.NoError(t, err) + require.Equal(t, false, updateTx.CallbackCompleted) + }) +} + +func ptr[T any](t T) *T { return &t } + +func newEthConfirmer(t testing.TB, txStore txmgr.EvmTxStore, ethClient client.Client, config evmconfig.ChainScopedConfig, ks keystore.Eth, fn txmgrcommon.ResumeCallback) *txmgr.Confirmer { + lggr := logger.Test(t) + ge := config.EVM().GasEstimator() + estimator := gas.NewWrappedEvmEstimator(lggr, func(lggr logger.Logger) gas.EvmEstimator { + return gas.NewFixedPriceEstimator(ge, ge.BlockHistory(), lggr) + }, ge.EIP1559DynamicFees(), nil) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), ge, ks, estimator) + ec := txmgr.NewEvmConfirmer(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(config.EVM()), txmgr.NewEvmTxmFeeConfig(ge), config.EVM().Transactions(), config.Database(), ks, txBuilder, lggr) + ec.SetResumeCallback(fn) + servicetest.Run(t, ec) + return ec +} diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go new file mode 100644 index 00000000..95a433e7 --- /dev/null +++ b/core/chains/evm/txmgr/evm_tx_store.go @@ -0,0 +1,2085 @@ +package txmgr + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/jackc/pgconn" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + pkgerrors "github.com/pkg/errors" + nullv4 "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/sqlutil" + "github.com/goplugin/plugin-common/pkg/utils/null" + + "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/label" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + ErrKeyNotUpdated = errors.New("evmTxStore: Key not updated") + ErrInvalidQOpt = errors.New("evmTxStore: Invalid QOpt") + + // ErrCouldNotGetReceipt is the error string we save if we reach our finality depth for a confirmed transaction without ever getting a receipt + // This most likely happened because an external wallet used the account for this nonce + ErrCouldNotGetReceipt = "could not get receipt" +) + +// EvmTxStore combines the txmgr tx store interface and the interface needed for the the API to read from the tx DB +// +//go:generate mockery --quiet --name EvmTxStore --output ./mocks/ --case=underscore +type EvmTxStore interface { + // redeclare TxStore for mockery + txmgrtypes.TxStore[common.Address, *big.Int, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + TxStoreWebApi +} + +// TxStoreWebApi encapsulates the methods that are not used by the txmgr and only used by the various web controllers and readers +type TxStoreWebApi interface { + FindTxAttemptConfirmedByTxIDs(ids []int64) ([]TxAttempt, error) + FindTxByHash(hash common.Hash) (*Tx, error) + Transactions(offset, limit int) ([]Tx, int, error) + TxAttempts(offset, limit int) ([]TxAttempt, int, error) + TransactionsWithAttempts(offset, limit int) ([]Tx, int, error) + FindTxAttempt(hash common.Hash) (*TxAttempt, error) + FindTxWithAttempts(etxID int64) (etx Tx, err error) +} + +type TestEvmTxStore interface { + EvmTxStore + + // methods only used for testing purposes + InsertReceipt(receipt *evmtypes.Receipt) (int64, error) + InsertTx(etx *Tx) error + FindTxAttemptsByTxIDs(ids []int64) ([]TxAttempt, error) + InsertTxAttempt(attempt *TxAttempt) error + LoadTxesAttempts(etxs []*Tx, qopts ...pg.QOpt) error + GetFatalTransactions(ctx context.Context) (txes []*Tx, err error) + GetAllTxes(ctx context.Context) (txes []*Tx, err error) + GetAllTxAttempts(ctx context.Context) (attempts []TxAttempt, err error) + CountTxesByStateAndSubject(ctx context.Context, state txmgrtypes.TxState, subject uuid.UUID) (count int, err error) + FindTxesByFromAddressAndState(ctx context.Context, fromAddress common.Address, state string) (txes []*Tx, err error) + UpdateTxAttemptBroadcastBeforeBlockNum(ctx context.Context, id int64, blockNum uint) error +} + +type evmTxStore struct { + q pg.Q + logger logger.SugaredLogger + ctx context.Context + ctxCancel context.CancelFunc +} + +var _ EvmTxStore = (*evmTxStore)(nil) +var _ TestEvmTxStore = (*evmTxStore)(nil) + +// Directly maps to columns of database table "evm.receipts". +// Do not modify type unless you +// intend to modify the database schema +type dbReceipt struct { + ID int64 + TxHash common.Hash + BlockHash common.Hash + BlockNumber int64 + TransactionIndex uint + Receipt evmtypes.Receipt + CreatedAt time.Time +} + +func DbReceiptFromEvmReceipt(evmReceipt *evmtypes.Receipt) dbReceipt { + return dbReceipt{ + TxHash: evmReceipt.TxHash, + BlockHash: evmReceipt.BlockHash, + BlockNumber: evmReceipt.BlockNumber.Int64(), + TransactionIndex: evmReceipt.TransactionIndex, + Receipt: *evmReceipt, + } +} + +func DbReceiptToEvmReceipt(receipt *dbReceipt) *evmtypes.Receipt { + return &receipt.Receipt +} + +// Directly maps to onchain receipt schema. +type rawOnchainReceipt = evmtypes.Receipt + +// Directly maps to some columns of few database tables. +// Does not map to a single database table. +// It's comprised of fields from different tables. +type dbReceiptPlus struct { + ID uuid.UUID `db:"pipeline_task_run_id"` + Receipt evmtypes.Receipt `db:"receipt"` + FailOnRevert bool `db:"FailOnRevert"` +} + +func fromDBReceipts(rs []dbReceipt) []*evmtypes.Receipt { + receipts := make([]*evmtypes.Receipt, len(rs)) + for i := 0; i < len(rs); i++ { + receipts[i] = DbReceiptToEvmReceipt(&rs[i]) + } + return receipts +} + +func fromDBReceiptsPlus(rs []dbReceiptPlus) []ReceiptPlus { + receipts := make([]ReceiptPlus, len(rs)) + for i := 0; i < len(rs); i++ { + receipts[i] = ReceiptPlus{ + ID: rs[i].ID, + Receipt: &rs[i].Receipt, + FailOnRevert: rs[i].FailOnRevert, + } + } + return receipts +} + +func toOnchainReceipt(rs []*evmtypes.Receipt) []rawOnchainReceipt { + receipts := make([]rawOnchainReceipt, len(rs)) + for i := 0; i < len(rs); i++ { + receipts[i] = *rs[i] + } + return receipts +} + +// Directly maps to columns of database table "evm.txes". +// This is exported, as tests and other external code still directly reads DB using this schema. +type DbEthTx struct { + ID int64 + IdempotencyKey *string + Nonce *int64 + FromAddress common.Address + ToAddress common.Address + EncodedPayload []byte + Value assets.Eth + // GasLimit on the EthTx is always the conceptual gas limit, which is not + // necessarily the same as the on-chain encoded value (i.e. Optimism) + GasLimit uint32 + Error nullv4.String + // BroadcastAt is updated every time an attempt for this eth_tx is re-sent + // In almost all cases it will be within a second or so of the actual send time. + BroadcastAt *time.Time + // InitialBroadcastAt is recorded once, the first ever time this eth_tx is sent + CreatedAt time.Time + State txmgrtypes.TxState + // Marshalled EvmTxMeta + // Used for additional context around transactions which you want to log + // at send time. + Meta *sqlutil.JSON + Subject uuid.NullUUID + PipelineTaskRunID uuid.NullUUID + MinConfirmations null.Uint32 + EVMChainID ubig.Big + // TransmitChecker defines the check that should be performed before a transaction is submitted on + // chain. + TransmitChecker *sqlutil.JSON + InitialBroadcastAt *time.Time + // Marks tx requiring callback + SignalCallback bool + // Marks tx callback as signaled + CallbackCompleted bool +} + +func (db *DbEthTx) FromTx(tx *Tx) { + db.ID = tx.ID + db.IdempotencyKey = tx.IdempotencyKey + db.FromAddress = tx.FromAddress + db.ToAddress = tx.ToAddress + db.EncodedPayload = tx.EncodedPayload + db.Value = assets.Eth(tx.Value) + db.GasLimit = tx.FeeLimit + db.Error = tx.Error + db.BroadcastAt = tx.BroadcastAt + db.CreatedAt = tx.CreatedAt + db.State = tx.State + db.Meta = tx.Meta + db.Subject = tx.Subject + db.PipelineTaskRunID = tx.PipelineTaskRunID + db.MinConfirmations = tx.MinConfirmations + db.TransmitChecker = tx.TransmitChecker + db.InitialBroadcastAt = tx.InitialBroadcastAt + db.SignalCallback = tx.SignalCallback + db.CallbackCompleted = tx.CallbackCompleted + + if tx.ChainID != nil { + db.EVMChainID = *ubig.New(tx.ChainID) + } + if tx.Sequence != nil { + n := tx.Sequence.Int64() + db.Nonce = &n + } +} + +func (db DbEthTx) ToTx(tx *Tx) { + tx.ID = db.ID + if db.Nonce != nil { + n := evmtypes.Nonce(*db.Nonce) + tx.Sequence = &n + } + tx.IdempotencyKey = db.IdempotencyKey + tx.FromAddress = db.FromAddress + tx.ToAddress = db.ToAddress + tx.EncodedPayload = db.EncodedPayload + tx.Value = *db.Value.ToInt() + tx.FeeLimit = db.GasLimit + tx.Error = db.Error + tx.BroadcastAt = db.BroadcastAt + tx.CreatedAt = db.CreatedAt + tx.State = db.State + tx.Meta = db.Meta + tx.Subject = db.Subject + tx.PipelineTaskRunID = db.PipelineTaskRunID + tx.MinConfirmations = db.MinConfirmations + tx.ChainID = db.EVMChainID.ToInt() + tx.TransmitChecker = db.TransmitChecker + tx.InitialBroadcastAt = db.InitialBroadcastAt + tx.SignalCallback = db.SignalCallback + tx.CallbackCompleted = db.CallbackCompleted +} + +func dbEthTxsToEvmEthTxs(dbEthTxs []DbEthTx) []Tx { + evmEthTxs := make([]Tx, len(dbEthTxs)) + for i, dbTx := range dbEthTxs { + dbTx.ToTx(&evmEthTxs[i]) + } + return evmEthTxs +} + +func dbEthTxsToEvmEthTxPtrs(dbEthTxs []DbEthTx, evmEthTxs []*Tx) { + for i, dbTx := range dbEthTxs { + evmEthTxs[i] = &Tx{} + dbTx.ToTx(evmEthTxs[i]) + } +} + +// Directly maps to columns of database table "evm.tx_attempts". +// This is exported, as tests and other external code still directly reads DB using this schema. +type DbEthTxAttempt struct { + ID int64 + EthTxID int64 + GasPrice *assets.Wei + SignedRawTx []byte + Hash common.Hash + BroadcastBeforeBlockNum *int64 + State string + CreatedAt time.Time + ChainSpecificGasLimit uint32 + TxType int + GasTipCap *assets.Wei + GasFeeCap *assets.Wei +} + +func (db *DbEthTxAttempt) FromTxAttempt(attempt *TxAttempt) { + db.ID = attempt.ID + db.EthTxID = attempt.TxID + db.GasPrice = attempt.TxFee.Legacy + db.SignedRawTx = attempt.SignedRawTx + db.Hash = attempt.Hash + db.BroadcastBeforeBlockNum = attempt.BroadcastBeforeBlockNum + db.CreatedAt = attempt.CreatedAt + db.ChainSpecificGasLimit = attempt.ChainSpecificFeeLimit + db.TxType = attempt.TxType + db.GasTipCap = attempt.TxFee.DynamicTipCap + db.GasFeeCap = attempt.TxFee.DynamicFeeCap + + // handle state naming difference between generic + EVM + if attempt.State == txmgrtypes.TxAttemptInsufficientFunds { + db.State = "insufficient_eth" + } else { + db.State = attempt.State.String() + } +} + +func DbEthTxAttemptStateToTxAttemptState(state string) txmgrtypes.TxAttemptState { + if state == "insufficient_eth" { + return txmgrtypes.TxAttemptInsufficientFunds + } + return txmgrtypes.NewTxAttemptState(state) +} + +func (db DbEthTxAttempt) ToTxAttempt(attempt *TxAttempt) { + attempt.ID = db.ID + attempt.TxID = db.EthTxID + attempt.SignedRawTx = db.SignedRawTx + attempt.Hash = db.Hash + attempt.BroadcastBeforeBlockNum = db.BroadcastBeforeBlockNum + attempt.State = DbEthTxAttemptStateToTxAttemptState(db.State) + attempt.CreatedAt = db.CreatedAt + attempt.ChainSpecificFeeLimit = db.ChainSpecificGasLimit + attempt.TxType = db.TxType + attempt.TxFee = gas.EvmFee{ + Legacy: db.GasPrice, + DynamicTipCap: db.GasTipCap, + DynamicFeeCap: db.GasFeeCap, + } +} + +func dbEthTxAttemptsToEthTxAttempts(dbEthTxAttempt []DbEthTxAttempt) []TxAttempt { + evmEthTxAttempt := make([]TxAttempt, len(dbEthTxAttempt)) + for i, dbTxAttempt := range dbEthTxAttempt { + dbTxAttempt.ToTxAttempt(&evmEthTxAttempt[i]) + } + return evmEthTxAttempt +} + +func NewTxStore( + db *sqlx.DB, + lggr logger.Logger, + cfg pg.QConfig, +) *evmTxStore { + namedLogger := logger.Named(lggr, "TxmStore") + ctx, cancel := context.WithCancel(context.Background()) + q := pg.NewQ(db, namedLogger, cfg, pg.WithParentCtx(ctx)) + return &evmTxStore{ + q: q, + logger: logger.Sugared(namedLogger), + ctx: ctx, + ctxCancel: cancel, + } +} + +const insertIntoEthTxAttemptsQuery = ` +INSERT INTO evm.tx_attempts (eth_tx_id, gas_price, signed_raw_tx, hash, broadcast_before_block_num, state, created_at, chain_specific_gas_limit, tx_type, gas_tip_cap, gas_fee_cap) +VALUES (:eth_tx_id, :gas_price, :signed_raw_tx, :hash, :broadcast_before_block_num, :state, NOW(), :chain_specific_gas_limit, :tx_type, :gas_tip_cap, :gas_fee_cap) +RETURNING *; +` + +// TODO: create method to pass in new context to evmTxStore (which will also create a new pg.Q) + +func (o *evmTxStore) Close() { + o.ctxCancel() +} + +func (o *evmTxStore) preloadTxAttempts(txs []Tx) error { + // Preload TxAttempts + var ids []int64 + for _, tx := range txs { + ids = append(ids, tx.ID) + } + if len(ids) == 0 { + return nil + } + var dbAttempts []DbEthTxAttempt + sql := `SELECT * FROM evm.tx_attempts WHERE eth_tx_id IN (?) ORDER BY id desc;` + query, args, err := sqlx.In(sql, ids) + if err != nil { + return err + } + query = o.q.Rebind(query) + if err = o.q.Select(&dbAttempts, query, args...); err != nil { + return err + } + // fill in attempts + for _, dbAttempt := range dbAttempts { + for i, tx := range txs { + if tx.ID == dbAttempt.EthTxID { + var attempt TxAttempt + dbAttempt.ToTxAttempt(&attempt) + txs[i].TxAttempts = append(txs[i].TxAttempts, attempt) + } + } + } + return nil +} + +func (o *evmTxStore) PreloadTxes(ctx context.Context, attempts []TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + return o.preloadTxesAtomic(attempts, pg.WithParentCtx(ctx)) +} + +// Only to be used for atomic transactions internal to the tx store +func (o *evmTxStore) preloadTxesAtomic(attempts []TxAttempt, qopts ...pg.QOpt) error { + ethTxM := make(map[int64]Tx) + for _, attempt := range attempts { + ethTxM[attempt.TxID] = Tx{} + } + ethTxIDs := make([]int64, len(ethTxM)) + var i int + for id := range ethTxM { + ethTxIDs[i] = id + i++ + } + dbEthTxs := make([]DbEthTx, len(ethTxIDs)) + qq := o.q.WithOpts(qopts...) + if err := qq.Select(&dbEthTxs, `SELECT * FROM evm.txes WHERE id = ANY($1)`, pq.Array(ethTxIDs)); err != nil { + return pkgerrors.Wrap(err, "loadEthTxes failed") + } + for _, dbEtx := range dbEthTxs { + etx := ethTxM[dbEtx.ID] + dbEtx.ToTx(&etx) + ethTxM[etx.ID] = etx + } + for i, attempt := range attempts { + attempts[i].Tx = ethTxM[attempt.TxID] + } + return nil +} + +// Transactions returns all eth transactions without loaded relations +// limited by passed parameters. +func (o *evmTxStore) Transactions(offset, limit int) (txs []Tx, count int, err error) { + sql := `SELECT count(*) FROM evm.txes WHERE id IN (SELECT DISTINCT eth_tx_id FROM evm.tx_attempts)` + if err = o.q.Get(&count, sql); err != nil { + return + } + + sql = `SELECT * FROM evm.txes WHERE id IN (SELECT DISTINCT eth_tx_id FROM evm.tx_attempts) ORDER BY id desc LIMIT $1 OFFSET $2` + var dbEthTxs []DbEthTx + if err = o.q.Select(&dbEthTxs, sql, limit, offset); err != nil { + return + } + txs = dbEthTxsToEvmEthTxs(dbEthTxs) + return +} + +// TransactionsWithAttempts returns all eth transactions with at least one attempt +// limited by passed parameters. Attempts are sorted by id. +func (o *evmTxStore) TransactionsWithAttempts(offset, limit int) (txs []Tx, count int, err error) { + sql := `SELECT count(*) FROM evm.txes WHERE id IN (SELECT DISTINCT eth_tx_id FROM evm.tx_attempts)` + if err = o.q.Get(&count, sql); err != nil { + return + } + + sql = `SELECT * FROM evm.txes WHERE id IN (SELECT DISTINCT eth_tx_id FROM evm.tx_attempts) ORDER BY id desc LIMIT $1 OFFSET $2` + var dbTxs []DbEthTx + if err = o.q.Select(&dbTxs, sql, limit, offset); err != nil { + return + } + txs = dbEthTxsToEvmEthTxs(dbTxs) + err = o.preloadTxAttempts(txs) + return +} + +// TxAttempts returns the last tx attempts sorted by created_at descending. +func (o *evmTxStore) TxAttempts(offset, limit int) (txs []TxAttempt, count int, err error) { + sql := `SELECT count(*) FROM evm.tx_attempts` + if err = o.q.Get(&count, sql); err != nil { + return + } + + sql = `SELECT * FROM evm.tx_attempts ORDER BY created_at DESC, id DESC LIMIT $1 OFFSET $2` + var dbTxs []DbEthTxAttempt + if err = o.q.Select(&dbTxs, sql, limit, offset); err != nil { + return + } + txs = dbEthTxAttemptsToEthTxAttempts(dbTxs) + err = o.preloadTxesAtomic(txs) + return +} + +// FindTxAttempt returns an individual TxAttempt +func (o *evmTxStore) FindTxAttempt(hash common.Hash) (*TxAttempt, error) { + dbTxAttempt := DbEthTxAttempt{} + sql := `SELECT * FROM evm.tx_attempts WHERE hash = $1` + if err := o.q.Get(&dbTxAttempt, sql, hash); err != nil { + return nil, err + } + // reuse the preload + var attempt TxAttempt + dbTxAttempt.ToTxAttempt(&attempt) + attempts := []TxAttempt{attempt} + err := o.preloadTxesAtomic(attempts) + return &attempts[0], err +} + +// FindTxAttemptsByTxIDs returns a list of attempts by ETH Tx IDs +func (o *evmTxStore) FindTxAttemptsByTxIDs(ids []int64) ([]TxAttempt, error) { + sql := `SELECT * FROM evm.tx_attempts WHERE eth_tx_id = ANY($1)` + var dbTxAttempts []DbEthTxAttempt + if err := o.q.Select(&dbTxAttempts, sql, ids); err != nil { + return nil, err + } + return dbEthTxAttemptsToEthTxAttempts(dbTxAttempts), nil +} + +func (o *evmTxStore) FindTxByHash(hash common.Hash) (*Tx, error) { + var dbEtx DbEthTx + err := o.q.Transaction(func(tx pg.Queryer) error { + sql := `SELECT evm.txes.* FROM evm.txes WHERE id IN (SELECT DISTINCT eth_tx_id FROM evm.tx_attempts WHERE hash = $1)` + if err := tx.Get(&dbEtx, sql, hash); err != nil { + return pkgerrors.Wrapf(err, "failed to find eth_tx with hash %d", hash) + } + return nil + }, pg.OptReadOnlyTx()) + + var etx Tx + dbEtx.ToTx(&etx) + return &etx, pkgerrors.Wrap(err, "FindEthTxByHash failed") +} + +// InsertTx inserts a new evm tx into the database +func (o *evmTxStore) InsertTx(etx *Tx) error { + if etx.CreatedAt == (time.Time{}) { + etx.CreatedAt = time.Now() + } + const insertEthTxSQL = `INSERT INTO evm.txes (nonce, from_address, to_address, encoded_payload, value, gas_limit, error, broadcast_at, initial_broadcast_at, created_at, state, meta, subject, pipeline_task_run_id, min_confirmations, evm_chain_id, transmit_checker, idempotency_key, signal_callback, callback_completed) VALUES ( +:nonce, :from_address, :to_address, :encoded_payload, :value, :gas_limit, :error, :broadcast_at, :initial_broadcast_at, :created_at, :state, :meta, :subject, :pipeline_task_run_id, :min_confirmations, :evm_chain_id, :transmit_checker, :idempotency_key, :signal_callback, :callback_completed +) RETURNING *` + var dbTx DbEthTx + dbTx.FromTx(etx) + err := o.q.GetNamed(insertEthTxSQL, &dbTx, &dbTx) + dbTx.ToTx(etx) + return pkgerrors.Wrap(err, "InsertTx failed") +} + +// InsertTxAttempt inserts a new txAttempt into the database +func (o *evmTxStore) InsertTxAttempt(attempt *TxAttempt) error { + var dbTxAttempt DbEthTxAttempt + dbTxAttempt.FromTxAttempt(attempt) + err := o.q.GetNamed(insertIntoEthTxAttemptsQuery, &dbTxAttempt, &dbTxAttempt) + dbTxAttempt.ToTxAttempt(attempt) + return pkgerrors.Wrap(err, "InsertTxAttempt failed") +} + +// InsertReceipt only used in tests. Use SaveFetchedReceipts instead +func (o *evmTxStore) InsertReceipt(receipt *evmtypes.Receipt) (int64, error) { + // convert to database representation + r := DbReceiptFromEvmReceipt(receipt) + + const insertEthReceiptSQL = `INSERT INTO evm.receipts (tx_hash, block_hash, block_number, transaction_index, receipt, created_at) VALUES ( +:tx_hash, :block_hash, :block_number, :transaction_index, :receipt, NOW() +) RETURNING *` + err := o.q.GetNamed(insertEthReceiptSQL, &r, &r) + + return r.ID, pkgerrors.Wrap(err, "InsertReceipt failed") +} + +func (o *evmTxStore) GetFatalTransactions(ctx context.Context) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + stmt := `SELECT * FROM evm.txes WHERE state = 'fatal_error'` + var dbEtxs []DbEthTx + if err = tx.Select(&dbEtxs, stmt); err != nil { + return fmt.Errorf("failed to load evm.txes: %w", err) + } + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + err = o.LoadTxesAttempts(txes, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + if err != nil { + return fmt.Errorf("failed to load evm.tx_attempts: %w", err) + } + return nil + }, pg.OptReadOnlyTx()) + + return txes, nil +} + +// FindTxWithAttempts finds the Tx with its attempts and receipts preloaded +func (o *evmTxStore) FindTxWithAttempts(etxID int64) (etx Tx, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + var dbEtx DbEthTx + if err = tx.Get(&dbEtx, `SELECT * FROM evm.txes WHERE id = $1 ORDER BY created_at ASC, id ASC`, etxID); err != nil { + return pkgerrors.Wrapf(err, "failed to find evm.tx with id %d", etxID) + } + dbEtx.ToTx(&etx) + if err = o.loadTxAttemptsAtomic(&etx, pg.WithQueryer(tx)); err != nil { + return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for evm.tx with id %d", etxID) + } + if err = loadEthTxAttemptsReceipts(tx, &etx); err != nil { + return pkgerrors.Wrapf(err, "failed to load evm.receipts for evm.tx with id %d", etxID) + } + return nil + }, pg.OptReadOnlyTx()) + return etx, pkgerrors.Wrap(err, "FindTxWithAttempts failed") +} + +func (o *evmTxStore) FindTxAttemptConfirmedByTxIDs(ids []int64) ([]TxAttempt, error) { + var txAttempts []TxAttempt + err := o.q.Transaction(func(tx pg.Queryer) error { + var dbAttempts []DbEthTxAttempt + if err := tx.Select(&dbAttempts, `SELECT eta.* + FROM evm.tx_attempts eta + join evm.receipts er on eta.hash = er.tx_hash where eta.eth_tx_id = ANY($1) ORDER BY eta.gas_price DESC, eta.gas_tip_cap DESC`, ids); err != nil { + return err + } + txAttempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + return loadConfirmedAttemptsReceipts(tx, txAttempts) + }, pg.OptReadOnlyTx()) + return txAttempts, pkgerrors.Wrap(err, "FindTxAttemptConfirmedByTxIDs failed") +} + +// Only used internally for atomic transactions +func (o *evmTxStore) LoadTxesAttempts(etxs []*Tx, qopts ...pg.QOpt) error { + qq := o.q.WithOpts(qopts...) + ethTxIDs := make([]int64, len(etxs)) + ethTxesM := make(map[int64]*Tx, len(etxs)) + for i, etx := range etxs { + etx.TxAttempts = nil // this will overwrite any previous preload + ethTxIDs[i] = etx.ID + ethTxesM[etx.ID] = etxs[i] + } + var dbTxAttempts []DbEthTxAttempt + if err := qq.Select(&dbTxAttempts, `SELECT * FROM evm.tx_attempts WHERE eth_tx_id = ANY($1) ORDER BY evm.tx_attempts.gas_price DESC, evm.tx_attempts.gas_tip_cap DESC`, pq.Array(ethTxIDs)); err != nil { + return pkgerrors.Wrap(err, "loadEthTxesAttempts failed to load evm.tx_attempts") + } + for _, dbAttempt := range dbTxAttempts { + etx := ethTxesM[dbAttempt.EthTxID] + var attempt TxAttempt + dbAttempt.ToTxAttempt(&attempt) + etx.TxAttempts = append(etx.TxAttempts, attempt) + } + return nil +} + +func (o *evmTxStore) LoadTxAttempts(ctx context.Context, etx *Tx) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + return o.loadTxAttemptsAtomic(etx, pg.WithParentCtx(ctx)) +} + +// Only to be used for atomic transactions internal to the tx store +func (o *evmTxStore) loadTxAttemptsAtomic(etx *Tx, qopts ...pg.QOpt) error { + return o.LoadTxesAttempts([]*Tx{etx}, qopts...) +} + +func loadEthTxAttemptsReceipts(q pg.Queryer, etx *Tx) (err error) { + return loadEthTxesAttemptsReceipts(q, []*Tx{etx}) +} + +func loadEthTxesAttemptsReceipts(q pg.Queryer, etxs []*Tx) (err error) { + if len(etxs) == 0 { + return nil + } + attemptHashM := make(map[common.Hash]*TxAttempt, len(etxs)) // len here is lower bound + attemptHashes := make([][]byte, len(etxs)) // len here is lower bound + for _, etx := range etxs { + for i, attempt := range etx.TxAttempts { + attemptHashM[attempt.Hash] = &etx.TxAttempts[i] + attemptHashes = append(attemptHashes, attempt.Hash.Bytes()) + } + } + var rs []dbReceipt + if err = q.Select(&rs, `SELECT * FROM evm.receipts WHERE tx_hash = ANY($1)`, pq.Array(attemptHashes)); err != nil { + return pkgerrors.Wrap(err, "loadEthTxesAttemptsReceipts failed to load evm.receipts") + } + + var receipts []*evmtypes.Receipt = fromDBReceipts(rs) + + for _, receipt := range receipts { + attempt := attemptHashM[receipt.TxHash] + // Although the attempts struct supports multiple receipts, the expectation for EVM is that there is only one receipt + // per tx and therefore attempt too. + attempt.Receipts = append(attempt.Receipts, receipt) + } + return nil +} + +func loadConfirmedAttemptsReceipts(q pg.Queryer, attempts []TxAttempt) error { + byHash := make(map[string]*TxAttempt, len(attempts)) + hashes := make([][]byte, len(attempts)) + for i, attempt := range attempts { + byHash[attempt.Hash.String()] = &attempts[i] + hashes = append(hashes, attempt.Hash.Bytes()) + } + var rs []dbReceipt + if err := q.Select(&rs, `SELECT * FROM evm.receipts WHERE tx_hash = ANY($1)`, pq.Array(hashes)); err != nil { + return pkgerrors.Wrap(err, "loadConfirmedAttemptsReceipts failed to load evm.receipts") + } + var receipts []*evmtypes.Receipt = fromDBReceipts(rs) + for _, receipt := range receipts { + attempt := byHash[receipt.TxHash.String()] + attempt.Receipts = append(attempt.Receipts, receipt) + } + return nil +} + +// FindTxAttemptsRequiringResend returns the highest priced attempt for each +// eth_tx that was last sent before or at the given time (up to limit) +func (o *evmTxStore) FindTxAttemptsRequiringResend(ctx context.Context, olderThan time.Time, maxInFlightTransactions uint32, chainID *big.Int, address common.Address) (attempts []TxAttempt, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var limit null.Uint32 + if maxInFlightTransactions > 0 { + limit = null.Uint32From(maxInFlightTransactions) + } + var dbAttempts []DbEthTxAttempt + // this select distinct works because of unique index on evm.txes + // (evm_chain_id, from_address, nonce) + err = qq.Select(&dbAttempts, ` +SELECT DISTINCT ON (evm.txes.nonce) evm.tx_attempts.* +FROM evm.tx_attempts +JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.txes.state IN ('unconfirmed', 'confirmed_missing_receipt') +WHERE evm.tx_attempts.state <> 'in_progress' AND evm.txes.broadcast_at <= $1 AND evm_chain_id = $2 AND from_address = $3 +ORDER BY evm.txes.nonce ASC, evm.tx_attempts.gas_price DESC, evm.tx_attempts.gas_tip_cap DESC +LIMIT $4 +`, olderThan, chainID.String(), address, limit) + + attempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + return attempts, pkgerrors.Wrap(err, "FindEthTxAttemptsRequiringResend failed to load evm.tx_attempts") +} + +func (o *evmTxStore) UpdateBroadcastAts(ctx context.Context, now time.Time, etxIDs []int64) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + // Deliberately do nothing on NULL broadcast_at because that indicates the + // tx has been moved into a state where broadcast_at is not relevant, e.g. + // fatally errored. + // + // Since EthConfirmer/EthResender can race (totally OK since highest + // priced transaction always wins) we only want to update broadcast_at if + // our version is later. + _, err := qq.Exec(`UPDATE evm.txes SET broadcast_at = $1 WHERE id = ANY($2) AND broadcast_at < $1`, now, pq.Array(etxIDs)) + return pkgerrors.Wrap(err, "updateBroadcastAts failed to update evm.txes") +} + +// SetBroadcastBeforeBlockNum updates already broadcast attempts with the +// current block number. This is safe no matter how old the head is because if +// the attempt is already broadcast it _must_ have been before this head. +func (o *evmTxStore) SetBroadcastBeforeBlockNum(ctx context.Context, blockNum int64, chainID *big.Int) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + _, err := qq.Exec( + `UPDATE evm.tx_attempts +SET broadcast_before_block_num = $1 +FROM evm.txes +WHERE evm.tx_attempts.broadcast_before_block_num IS NULL AND evm.tx_attempts.state = 'broadcast' +AND evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.txes.evm_chain_id = $2`, + blockNum, chainID.String(), + ) + return pkgerrors.Wrap(err, "SetBroadcastBeforeBlockNum failed") +} + +func (o *evmTxStore) FindTxAttemptsConfirmedMissingReceipt(ctx context.Context, chainID *big.Int) (attempts []TxAttempt, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbAttempts []DbEthTxAttempt + err = qq.Select(&dbAttempts, + `SELECT DISTINCT ON (evm.tx_attempts.eth_tx_id) evm.tx_attempts.* + FROM evm.tx_attempts + JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.txes.state = 'confirmed_missing_receipt' + WHERE evm_chain_id = $1 + ORDER BY evm.tx_attempts.eth_tx_id ASC, evm.tx_attempts.gas_price DESC, evm.tx_attempts.gas_tip_cap DESC`, + chainID.String()) + if err != nil { + err = pkgerrors.Wrap(err, "FindEtxAttemptsConfirmedMissingReceipt failed to query") + } + attempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + return +} + +func (o *evmTxStore) UpdateTxsUnconfirmed(ctx context.Context, ids []int64) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + _, err := qq.Exec(`UPDATE evm.txes SET state='unconfirmed' WHERE id = ANY($1)`, pq.Array(ids)) + + if err != nil { + return pkgerrors.Wrap(err, "UpdateEthTxsUnconfirmed failed to execute") + } + return nil +} + +func (o *evmTxStore) FindTxAttemptsRequiringReceiptFetch(ctx context.Context, chainID *big.Int) (attempts []TxAttempt, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbAttempts []DbEthTxAttempt + err = tx.Select(&dbAttempts, ` +SELECT evm.tx_attempts.* FROM evm.tx_attempts +JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.txes.state IN ('unconfirmed', 'confirmed_missing_receipt') AND evm.txes.evm_chain_id = $1 +WHERE evm.tx_attempts.state != 'insufficient_eth' +ORDER BY evm.txes.nonce ASC, evm.tx_attempts.gas_price DESC, evm.tx_attempts.gas_tip_cap DESC +`, chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "FindEthTxAttemptsRequiringReceiptFetch failed to load evm.tx_attempts") + } + attempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + err = o.preloadTxesAtomic(attempts, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + return pkgerrors.Wrap(err, "FindEthTxAttemptsRequiringReceiptFetch failed to load evm.txes") + }, pg.OptReadOnlyTx()) + return +} + +func (o *evmTxStore) SaveFetchedReceipts(ctx context.Context, r []*evmtypes.Receipt, chainID *big.Int) (err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + receipts := toOnchainReceipt(r) + if len(receipts) == 0 { + return nil + } + + // Notes on this query: + // + // # Receipts insert + // Conflict on (tx_hash, block_hash) shouldn't be possible because there + // should only ever be one receipt for an eth_tx. + // + // ASIDE: This is because we mark confirmed atomically with receipt insert + // in this query, and delete receipts upon marking unconfirmed - see + // markForRebroadcast. + // + // If a receipt with the same (tx_hash, block_hash) exists then the + // transaction is marked confirmed which means we _should_ never get here. + // However, even so, it still shouldn't be an error to upsert a receipt we + // already have. + // + // # EthTxAttempts update + // It should always be safe to mark the attempt as broadcast here because + // if it were not successfully broadcast how could it possibly have a + // receipt? + // + // This state is reachable for example if the eth node errors so the + // attempt was left in_progress but the transaction was actually accepted + // and mined. + // + // # EthTxes update + // Should be self-explanatory. If we got a receipt, the eth_tx is confirmed. + // + var valueStrs []string + var valueArgs []interface{} + for _, r := range receipts { + var receiptJSON []byte + receiptJSON, err = json.Marshal(r) + if err != nil { + return pkgerrors.Wrap(err, "saveFetchedReceipts failed to marshal JSON") + } + valueStrs = append(valueStrs, "(?,?,?,?,?,NOW())") + valueArgs = append(valueArgs, r.TxHash, r.BlockHash, r.BlockNumber.Int64(), r.TransactionIndex, receiptJSON) + } + valueArgs = append(valueArgs, chainID.String()) + + /* #nosec G201 */ + sql := ` + WITH inserted_receipts AS ( + INSERT INTO evm.receipts (tx_hash, block_hash, block_number, transaction_index, receipt, created_at) + VALUES %s + ON CONFLICT (tx_hash, block_hash) DO UPDATE SET + block_number = EXCLUDED.block_number, + transaction_index = EXCLUDED.transaction_index, + receipt = EXCLUDED.receipt + RETURNING evm.receipts.tx_hash, evm.receipts.block_number + ), + updated_eth_tx_attempts AS ( + UPDATE evm.tx_attempts + SET + state = 'broadcast', + broadcast_before_block_num = COALESCE(evm.tx_attempts.broadcast_before_block_num, inserted_receipts.block_number) + FROM inserted_receipts + WHERE inserted_receipts.tx_hash = evm.tx_attempts.hash + RETURNING evm.tx_attempts.eth_tx_id + ) + UPDATE evm.txes + SET state = 'confirmed' + FROM updated_eth_tx_attempts + WHERE updated_eth_tx_attempts.eth_tx_id = evm.txes.id + AND evm_chain_id = ? + ` + + stmt := fmt.Sprintf(sql, strings.Join(valueStrs, ",")) + + stmt = sqlx.Rebind(sqlx.DOLLAR, stmt) + + err = qq.ExecQ(stmt, valueArgs...) + return pkgerrors.Wrap(err, "SaveFetchedReceipts failed to save receipts") +} + +// MarkAllConfirmedMissingReceipt +// It is possible that we can fail to get a receipt for all evm.tx_attempts +// even though a transaction with this nonce has long since been confirmed (we +// know this because transactions with higher nonces HAVE returned a receipt). +// +// This can probably only happen if an external wallet used the account (or +// conceivably because of some bug in the remote eth node that prevents it +// from returning a receipt for a valid transaction). +// +// In this case we mark these transactions as 'confirmed_missing_receipt' to +// prevent gas bumping. +// +// NOTE: We continue to attempt to resend evm.txes in this state on +// every head to guard against the extremely rare scenario of nonce gap due to +// reorg that excludes the transaction (from another wallet) that had this +// nonce (until finality depth is reached, after which we make the explicit +// decision to give up). This is done in the EthResender. +// +// We will continue to try to fetch a receipt for these attempts until all +// attempts are below the finality depth from current head. +func (o *evmTxStore) MarkAllConfirmedMissingReceipt(ctx context.Context, chainID *big.Int) (err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + res, err := qq.Exec(` +UPDATE evm.txes +SET state = 'confirmed_missing_receipt' +FROM ( + SELECT from_address, MAX(nonce) as max_nonce + FROM evm.txes + WHERE state = 'confirmed' AND evm_chain_id = $1 + GROUP BY from_address +) AS max_table +WHERE state = 'unconfirmed' + AND evm_chain_id = $1 + AND nonce < max_table.max_nonce + AND evm.txes.from_address = max_table.from_address + `, chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "markAllConfirmedMissingReceipt failed") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return pkgerrors.Wrap(err, "markAllConfirmedMissingReceipt RowsAffected failed") + } + if rowsAffected > 0 { + o.logger.Infow(fmt.Sprintf("%d transactions missing receipt", rowsAffected), "n", rowsAffected) + } + return +} + +func (o *evmTxStore) GetInProgressTxAttempts(ctx context.Context, address common.Address, chainID *big.Int) (attempts []TxAttempt, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbAttempts []DbEthTxAttempt + err = tx.Select(&dbAttempts, ` +SELECT evm.tx_attempts.* FROM evm.tx_attempts +INNER JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.txes.state in ('confirmed', 'confirmed_missing_receipt', 'unconfirmed') +WHERE evm.tx_attempts.state = 'in_progress' AND evm.txes.from_address = $1 AND evm.txes.evm_chain_id = $2 +`, address, chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "getInProgressEthTxAttempts failed to load evm.tx_attempts") + } + attempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + err = o.preloadTxesAtomic(attempts, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + return pkgerrors.Wrap(err, "getInProgressEthTxAttempts failed to load evm.txes") + }, pg.OptReadOnlyTx()) + return attempts, pkgerrors.Wrap(err, "getInProgressEthTxAttempts failed") +} + +// Find confirmed txes requiring callback but have not yet been signaled +func (o *evmTxStore) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID *big.Int) (receiptsPlus []ReceiptPlus, err error) { + var rs []dbReceiptPlus + + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + err = o.q.SelectContext(ctx, &rs, ` + SELECT evm.txes.pipeline_task_run_id, evm.receipts.receipt, COALESCE((evm.txes.meta->>'FailOnRevert')::boolean, false) "FailOnRevert" FROM evm.txes + INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id + INNER JOIN evm.receipts ON evm.tx_attempts.hash = evm.receipts.tx_hash + WHERE evm.txes.pipeline_task_run_id IS NOT NULL AND evm.txes.signal_callback = TRUE AND evm.txes.callback_completed = FALSE + AND evm.receipts.block_number <= ($1 - evm.txes.min_confirmations) AND evm.txes.evm_chain_id = $2 + `, blockNum, chainID.String()) + if err != nil { + return nil, fmt.Errorf("failed to retrieve transactions pending pipeline resume callback: %w", err) + } + receiptsPlus = fromDBReceiptsPlus(rs) + return +} + +// Update tx to mark that its callback has been signaled +func (o *evmTxStore) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunId uuid.UUID, chainId *big.Int) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + _, err := qq.Exec(`UPDATE evm.txes SET callback_completed = TRUE WHERE pipeline_task_run_id = $1 AND evm_chain_id = $2`, pipelineTaskRunId, chainId.String()) + if err != nil { + return fmt.Errorf("failed to mark callback completed for transaction: %w", err) + } + return nil +} + +func (o *evmTxStore) FindLatestSequence(ctx context.Context, fromAddress common.Address, chainId *big.Int) (nonce evmtypes.Nonce, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + sql := `SELECT nonce FROM evm.txes WHERE from_address = $1 AND evm_chain_id = $2 AND nonce IS NOT NULL ORDER BY nonce DESC LIMIT 1` + err = qq.Get(&nonce, sql, fromAddress, chainId.String()) + return +} + +// FindTxWithIdempotencyKey returns any broadcast ethtx with the given idempotencyKey and chainID +func (o *evmTxStore) FindTxWithIdempotencyKey(ctx context.Context, idempotencyKey string, chainID *big.Int) (etx *Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtx DbEthTx + err = qq.Get(&dbEtx, `SELECT * FROM evm.txes WHERE idempotency_key = $1 and evm_chain_id = $2`, idempotencyKey, chainID.String()) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return nil, pkgerrors.Wrap(err, "FindTxWithIdempotencyKey failed to load evm.txes") + } + etx = new(Tx) + dbEtx.ToTx(etx) + return +} + +// FindTxWithSequence returns any broadcast ethtx with the given nonce +func (o *evmTxStore) FindTxWithSequence(ctx context.Context, fromAddress common.Address, nonce evmtypes.Nonce) (etx *Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + etx = new(Tx) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbEtx DbEthTx + err = tx.Get(&dbEtx, ` +SELECT * FROM evm.txes WHERE from_address = $1 AND nonce = $2 AND state IN ('confirmed', 'confirmed_missing_receipt', 'unconfirmed') +`, fromAddress, nonce.Int64()) + if err != nil { + return pkgerrors.Wrap(err, "FindEthTxWithNonce failed to load evm.txes") + } + dbEtx.ToTx(etx) + err = o.loadTxAttemptsAtomic(etx, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + return pkgerrors.Wrap(err, "FindEthTxWithNonce failed to load evm.tx_attempts") + }, pg.OptReadOnlyTx()) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return +} + +func updateEthTxAttemptUnbroadcast(q pg.Queryer, attempt TxAttempt) error { + if attempt.State != txmgrtypes.TxAttemptBroadcast { + return errors.New("expected eth_tx_attempt to be broadcast") + } + _, err := q.Exec(`UPDATE evm.tx_attempts SET broadcast_before_block_num = NULL, state = 'in_progress' WHERE id = $1`, attempt.ID) + return pkgerrors.Wrap(err, "updateEthTxAttemptUnbroadcast failed") +} + +func updateEthTxUnconfirm(q pg.Queryer, etx Tx) error { + if etx.State != txmgr.TxConfirmed { + return errors.New("expected eth_tx state to be confirmed") + } + _, err := q.Exec(`UPDATE evm.txes SET state = 'unconfirmed' WHERE id = $1`, etx.ID) + return pkgerrors.Wrap(err, "updateEthTxUnconfirm failed") +} + +func deleteEthReceipts(q pg.Queryer, etxID int64) (err error) { + _, err = q.Exec(` +DELETE FROM evm.receipts +USING evm.tx_attempts +WHERE evm.receipts.tx_hash = evm.tx_attempts.hash +AND evm.tx_attempts.eth_tx_id = $1 + `, etxID) + return pkgerrors.Wrap(err, "deleteEthReceipts failed") +} + +func (o *evmTxStore) UpdateTxForRebroadcast(ctx context.Context, etx Tx, etxAttempt TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + return qq.Transaction(func(tx pg.Queryer) error { + if err := deleteEthReceipts(tx, etx.ID); err != nil { + return pkgerrors.Wrapf(err, "deleteEthReceipts failed for etx %v", etx.ID) + } + if err := updateEthTxUnconfirm(tx, etx); err != nil { + return pkgerrors.Wrapf(err, "updateEthTxUnconfirm failed for etx %v", etx.ID) + } + return updateEthTxAttemptUnbroadcast(tx, etxAttempt) + }) +} + +func (o *evmTxStore) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber, lowBlockNumber int64, chainID *big.Int) (etxs []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbEtxs []DbEthTx + err = tx.Select(&dbEtxs, ` +SELECT DISTINCT evm.txes.* FROM evm.txes +INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.tx_attempts.state = 'broadcast' +INNER JOIN evm.receipts ON evm.receipts.tx_hash = evm.tx_attempts.hash +WHERE evm.txes.state IN ('confirmed', 'confirmed_missing_receipt') AND block_number BETWEEN $1 AND $2 AND evm_chain_id = $3 +ORDER BY nonce ASC +`, lowBlockNumber, highBlockNumber, chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "FindTransactionsConfirmedInBlockRange failed to load evm.txes") + } + etxs = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, etxs) + if err = o.LoadTxesAttempts(etxs, pg.WithParentCtx(ctx), pg.WithQueryer(tx)); err != nil { + return pkgerrors.Wrap(err, "FindTransactionsConfirmedInBlockRange failed to load evm.tx_attempts") + } + err = loadEthTxesAttemptsReceipts(tx, etxs) + return pkgerrors.Wrap(err, "FindTransactionsConfirmedInBlockRange failed to load evm.receipts") + }, pg.OptReadOnlyTx()) + return etxs, pkgerrors.Wrap(err, "FindTransactionsConfirmedInBlockRange failed") +} + +func (o *evmTxStore) FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID *big.Int) (broadcastAt nullv4.Time, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + if err = qq.QueryRowContext(ctx, `SELECT min(initial_broadcast_at) FROM evm.txes WHERE state = 'unconfirmed' AND evm_chain_id = $1`, chainID.String()).Scan(&broadcastAt); err != nil { + return fmt.Errorf("failed to query for unconfirmed eth_tx count: %w", err) + } + return nil + }, pg.OptReadOnlyTx()) + return broadcastAt, err +} + +func (o *evmTxStore) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context, chainID *big.Int) (earliestUnconfirmedTxBlock nullv4.Int, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + err = qq.QueryRowContext(ctx, ` +SELECT MIN(broadcast_before_block_num) FROM evm.tx_attempts +JOIN evm.txes ON evm.txes.id = evm.tx_attempts.eth_tx_id +WHERE evm.txes.state = 'unconfirmed' +AND evm_chain_id = $1`, chainID.String()).Scan(&earliestUnconfirmedTxBlock) + if err != nil { + return fmt.Errorf("failed to query for earliest unconfirmed tx block: %w", err) + } + return nil + }, pg.OptReadOnlyTx()) + return earliestUnconfirmedTxBlock, err +} + +func (o *evmTxStore) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID *big.Int) (finalized bool, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + + var count int32 + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.GetContext(ctx, &count, ` + SELECT COUNT(evm.receipts.receipt) FROM evm.txes + INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id + INNER JOIN evm.receipts ON evm.tx_attempts.hash = evm.receipts.tx_hash + WHERE evm.receipts.block_number <= ($1 - evm.txes.min_confirmations) + AND evm.txes.id = $2 AND evm.txes.evm_chain_id = $3`, blockHeight, txID, chainID.String()) + if err != nil { + return false, fmt.Errorf("failed to retrieve transaction reciepts: %w", err) + } + return count > 0, nil +} + +func saveAttemptWithNewState(ctx context.Context, q pg.Queryer, logger logger.Logger, attempt TxAttempt, broadcastAt time.Time) error { + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt) + return pg.SqlxTransaction(ctx, q, logger, func(tx pg.Queryer) error { + // In case of null broadcast_at (shouldn't happen) we don't want to + // update anyway because it indicates a state where broadcast_at makes + // no sense e.g. fatal_error + if _, err := tx.Exec(`UPDATE evm.txes SET broadcast_at = $1 WHERE id = $2 AND broadcast_at < $1`, broadcastAt, dbAttempt.EthTxID); err != nil { + return pkgerrors.Wrap(err, "saveAttemptWithNewState failed to update evm.txes") + } + _, err := tx.Exec(`UPDATE evm.tx_attempts SET state=$1 WHERE id=$2`, dbAttempt.State, dbAttempt.ID) + return pkgerrors.Wrap(err, "saveAttemptWithNewState failed to update evm.tx_attempts") + }) +} + +func (o *evmTxStore) SaveInsufficientFundsAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt, broadcastAt time.Time) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if !(attempt.State == txmgrtypes.TxAttemptInProgress || attempt.State == txmgrtypes.TxAttemptInsufficientFunds) { + return errors.New("expected state to be either in_progress or insufficient_eth") + } + attempt.State = txmgrtypes.TxAttemptInsufficientFunds + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + return pkgerrors.Wrap(saveAttemptWithNewState(ctx, qq, o.logger, *attempt, broadcastAt), "saveInsufficientEthAttempt failed") +} + +func saveSentAttempt(ctx context.Context, q pg.Queryer, timeout time.Duration, logger logger.Logger, attempt *TxAttempt, broadcastAt time.Time) error { + if attempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("expected state to be in_progress") + } + attempt.State = txmgrtypes.TxAttemptBroadcast + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return pkgerrors.Wrap(saveAttemptWithNewState(ctx, q, logger, *attempt, broadcastAt), "saveSentAttempt failed") +} + +func (o *evmTxStore) SaveSentAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt, broadcastAt time.Time) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + return saveSentAttempt(ctx, qq, timeout, o.logger, attempt, broadcastAt) +} + +func (o *evmTxStore) SaveConfirmedMissingReceiptAttempt(ctx context.Context, timeout time.Duration, attempt *TxAttempt, broadcastAt time.Time) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err := qq.Transaction(func(tx pg.Queryer) error { + if err := saveSentAttempt(ctx, tx, timeout, o.logger, attempt, broadcastAt); err != nil { + return err + } + if _, err := tx.Exec(`UPDATE evm.txes SET state = 'confirmed_missing_receipt' WHERE id = $1`, attempt.TxID); err != nil { + return pkgerrors.Wrap(err, "failed to update evm.txes") + + } + return nil + }) + return pkgerrors.Wrap(err, "SaveConfirmedMissingReceiptAttempt failed") +} + +func (o *evmTxStore) DeleteInProgressAttempt(ctx context.Context, attempt TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if attempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("DeleteInProgressAttempt: expected attempt state to be in_progress") + } + if attempt.ID == 0 { + return errors.New("DeleteInProgressAttempt: expected attempt to have an id") + } + _, err := qq.Exec(`DELETE FROM evm.tx_attempts WHERE id = $1`, attempt.ID) + return pkgerrors.Wrap(err, "DeleteInProgressAttempt failed") +} + +// SaveInProgressAttempt inserts or updates an attempt +func (o *evmTxStore) SaveInProgressAttempt(ctx context.Context, attempt *TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if attempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("SaveInProgressAttempt failed: attempt state must be in_progress") + } + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(attempt) + // Insert is the usual mode because the attempt is new + if attempt.ID == 0 { + query, args, e := qq.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) + if e != nil { + return pkgerrors.Wrap(e, "SaveInProgressAttempt failed to BindNamed") + } + e = qq.Get(&dbAttempt, query, args...) + dbAttempt.ToTxAttempt(attempt) + return pkgerrors.Wrap(e, "SaveInProgressAttempt failed to insert into evm.tx_attempts") + } + // Update only applies to case of insufficient eth and simply changes the state to in_progress + res, err := qq.Exec(`UPDATE evm.tx_attempts SET state=$1, broadcast_before_block_num=$2 WHERE id=$3`, dbAttempt.State, dbAttempt.BroadcastBeforeBlockNum, dbAttempt.ID) + if err != nil { + return pkgerrors.Wrap(err, "SaveInProgressAttempt failed to update evm.tx_attempts") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return pkgerrors.Wrap(err, "SaveInProgressAttempt failed to get RowsAffected") + } + if rowsAffected == 0 { + return pkgerrors.Wrapf(sql.ErrNoRows, "SaveInProgressAttempt tried to update evm.tx_attempts but no rows matched id %d", attempt.ID) + } + return nil +} + +func (o *evmTxStore) GetNonFatalTransactions(ctx context.Context, chainID *big.Int) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + stmt := `SELECT * FROM evm.txes WHERE state <> 'fatal_error' AND evm_chain_id = $1` + var dbEtxs []DbEthTx + if err = tx.Select(&dbEtxs, stmt, chainID.String()); err != nil { + return fmt.Errorf("failed to load evm.txes: %w", err) + } + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + err = o.LoadTxesAttempts(txes, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + if err != nil { + return fmt.Errorf("failed to load evm.txes: %w", err) + } + return nil + }, pg.OptReadOnlyTx()) + + return txes, nil +} + +func (o *evmTxStore) GetTxByID(ctx context.Context, id int64) (txe *Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + + err = qq.Transaction(func(tx pg.Queryer) error { + stmt := `SELECT * FROM evm.txes WHERE id = $1` + var dbEtxs []DbEthTx + if err = tx.Select(&dbEtxs, stmt, id); err != nil { + return fmt.Errorf("failed to load evm.txes: %w", err) + } + txes := make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + if len(txes) != 1 { + return fmt.Errorf("failed to get tx with id %v", id) + } + txe = txes[0] + err = o.LoadTxesAttempts(txes, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + if err != nil { + return fmt.Errorf("failed to load evm.tx_attempts: %w", err) + } + return nil + }, pg.OptReadOnlyTx()) + + return txe, nil +} + +// FindTxsRequiringGasBump returns transactions that have all +// attempts which are unconfirmed for at least gasBumpThreshold blocks, +// limited by limit pending transactions +// +// It also returns evm.txes that are unconfirmed with no evm.tx_attempts +func (o *evmTxStore) FindTxsRequiringGasBump(ctx context.Context, address common.Address, blockNum, gasBumpThreshold, depth int64, chainID *big.Int) (etxs []*Tx, err error) { + if gasBumpThreshold == 0 { + return + } + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + stmt := ` +SELECT evm.txes.* FROM evm.txes +LEFT JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id AND (broadcast_before_block_num > $4 OR broadcast_before_block_num IS NULL OR evm.tx_attempts.state != 'broadcast') +WHERE evm.txes.state = 'unconfirmed' AND evm.tx_attempts.id IS NULL AND evm.txes.from_address = $1 AND evm.txes.evm_chain_id = $2 + AND (($3 = 0) OR (evm.txes.id IN (SELECT id FROM evm.txes WHERE state = 'unconfirmed' AND from_address = $1 ORDER BY nonce ASC LIMIT $3))) +ORDER BY nonce ASC +` + var dbEtxs []DbEthTx + if err = tx.Select(&dbEtxs, stmt, address, chainID.String(), depth, blockNum-gasBumpThreshold); err != nil { + return pkgerrors.Wrap(err, "FindEthTxsRequiringGasBump failed to load evm.txes") + } + etxs = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, etxs) + err = o.LoadTxesAttempts(etxs, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + return pkgerrors.Wrap(err, "FindEthTxsRequiringGasBump failed to load evm.tx_attempts") + }, pg.OptReadOnlyTx()) + return +} + +// FindTxsRequiringResubmissionDueToInsufficientFunds returns transactions +// that need to be re-sent because they hit an out-of-eth error on a previous +// block +func (o *evmTxStore) FindTxsRequiringResubmissionDueToInsufficientFunds(ctx context.Context, address common.Address, chainID *big.Int) (etxs []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbEtxs []DbEthTx + err = tx.Select(&dbEtxs, ` +SELECT DISTINCT evm.txes.* FROM evm.txes +INNER JOIN evm.tx_attempts ON evm.txes.id = evm.tx_attempts.eth_tx_id AND evm.tx_attempts.state = 'insufficient_eth' +WHERE evm.txes.from_address = $1 AND evm.txes.state = 'unconfirmed' AND evm.txes.evm_chain_id = $2 +ORDER BY nonce ASC +`, address, chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "FindEthTxsRequiringResubmissionDueToInsufficientEth failed to load evm.txes") + } + etxs = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, etxs) + err = o.LoadTxesAttempts(etxs, pg.WithParentCtx(ctx), pg.WithQueryer(tx)) + return pkgerrors.Wrap(err, "FindEthTxsRequiringResubmissionDueToInsufficientEth failed to load evm.tx_attempts") + }, pg.OptReadOnlyTx()) + return +} + +// markOldTxesMissingReceiptAsErrored +// +// Once eth_tx has all of its attempts broadcast before some cutoff threshold +// without receiving any receipts, we mark it as fatally errored (never sent). +// +// The job run will also be marked as errored in this case since we never got a +// receipt and thus cannot pass on any transaction hash +func (o *evmTxStore) MarkOldTxesMissingReceiptAsErrored(ctx context.Context, blockNum int64, finalityDepth uint32, chainID *big.Int) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + // cutoffBlockNum is a block height + // Any 'confirmed_missing_receipt' eth_tx with all attempts older than this block height will be marked as errored + // We will not try to query for receipts for this transaction any more + cutoff := blockNum - int64(finalityDepth) + if cutoff <= 0 { + return nil + } + if cutoff <= 0 { + return nil + } + // note: if QOpt passes in a sql.Tx this will reuse it + return qq.Transaction(func(q pg.Queryer) error { + type etx struct { + ID int64 + Nonce int64 + } + var data []etx + err := q.Select(&data, ` +UPDATE evm.txes +SET state='fatal_error', nonce=NULL, error=$1, broadcast_at=NULL, initial_broadcast_at=NULL +FROM ( + SELECT e1.id, e1.nonce, e1.from_address FROM evm.txes AS e1 WHERE id IN ( + SELECT e2.id FROM evm.txes AS e2 + INNER JOIN evm.tx_attempts ON e2.id = evm.tx_attempts.eth_tx_id + WHERE e2.state = 'confirmed_missing_receipt' + AND e2.evm_chain_id = $3 + GROUP BY e2.id + HAVING max(evm.tx_attempts.broadcast_before_block_num) < $2 + ) + FOR UPDATE OF e1 +) e0 +WHERE e0.id = evm.txes.id +RETURNING e0.id, e0.nonce`, ErrCouldNotGetReceipt, cutoff, chainID.String()) + + if err != nil { + return pkgerrors.Wrap(err, "markOldTxesMissingReceiptAsErrored failed to query") + } + + // We need this little lookup table because we have to have the nonce + // from the first query, BEFORE it was updated/nullified + lookup := make(map[int64]etx) + for _, d := range data { + lookup[d.ID] = d + } + etxIDs := make([]int64, len(data)) + for i := 0; i < len(data); i++ { + etxIDs[i] = data[i].ID + } + + type result struct { + ID int64 + FromAddress common.Address + MaxBroadcastBeforeBlockNum int64 + TxHashes pq.ByteaArray + } + + var results []result + err = q.Select(&results, ` +SELECT e.id, e.from_address, max(a.broadcast_before_block_num) AS max_broadcast_before_block_num, array_agg(a.hash) AS tx_hashes +FROM evm.txes e +INNER JOIN evm.tx_attempts a ON e.id = a.eth_tx_id +WHERE e.id = ANY($1) +GROUP BY e.id +`, etxIDs) + + if err != nil { + return pkgerrors.Wrap(err, "markOldTxesMissingReceiptAsErrored failed to load additional data") + } + + for _, r := range results { + nonce := lookup[r.ID].Nonce + txHashesHex := make([]common.Address, len(r.TxHashes)) + for i := 0; i < len(r.TxHashes); i++ { + txHashesHex[i] = common.BytesToAddress(r.TxHashes[i]) + } + + o.logger.Criticalw(fmt.Sprintf("eth_tx with ID %v expired without ever getting a receipt for any of our attempts. "+ + "Current block height is %v, transaction was broadcast before block height %v. This transaction may not have not been sent and will be marked as fatally errored. "+ + "This can happen if there is another instance of plugin running that is using the same private key, or if "+ + "an external wallet has been used to send a transaction from account %s with nonce %v."+ + " Please note that Plugin requires exclusive ownership of it's private keys and sharing keys across multiple"+ + " plugin instances, or using the plugin keys with an external wallet is NOT SUPPORTED and WILL lead to missed transactions", + r.ID, blockNum, r.MaxBroadcastBeforeBlockNum, r.FromAddress, nonce), "ethTxID", r.ID, "nonce", nonce, "fromAddress", r.FromAddress, "txHashes", txHashesHex) + } + + return nil + }) +} + +func (o *evmTxStore) SaveReplacementInProgressAttempt(ctx context.Context, oldAttempt TxAttempt, replacementAttempt *TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if oldAttempt.State != txmgrtypes.TxAttemptInProgress || replacementAttempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("expected attempts to be in_progress") + } + if oldAttempt.ID == 0 { + return errors.New("expected oldAttempt to have an ID") + } + return qq.Transaction(func(tx pg.Queryer) error { + if _, err := tx.Exec(`DELETE FROM evm.tx_attempts WHERE id=$1`, oldAttempt.ID); err != nil { + return pkgerrors.Wrap(err, "saveReplacementInProgressAttempt failed to delete from evm.tx_attempts") + } + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(replacementAttempt) + query, args, e := tx.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) + if e != nil { + return pkgerrors.Wrap(e, "saveReplacementInProgressAttempt failed to BindNamed") + } + e = tx.Get(&dbAttempt, query, args...) + dbAttempt.ToTxAttempt(replacementAttempt) + return pkgerrors.Wrap(e, "saveReplacementInProgressAttempt failed to insert replacement attempt") + }) +} + +// Finds earliest saved transaction that has yet to be broadcast from the given address +func (o *evmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *Tx, fromAddress common.Address, chainID *big.Int) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtx DbEthTx + err := qq.Get(&dbEtx, `SELECT * FROM evm.txes WHERE from_address = $1 AND state = 'unstarted' AND evm_chain_id = $2 ORDER BY value ASC, created_at ASC, id ASC`, fromAddress, chainID.String()) + dbEtx.ToTx(etx) + return pkgerrors.Wrap(err, "failed to FindNextUnstartedTransactionFromAddress") +} + +func (o *evmTxStore) UpdateTxFatalError(ctx context.Context, etx *Tx) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if etx.State != txmgr.TxInProgress && etx.State != txmgr.TxUnstarted { + return pkgerrors.Errorf("can only transition to fatal_error from in_progress or unstarted, transaction is currently %s", etx.State) + } + if !etx.Error.Valid { + return errors.New("expected error field to be set") + } + + etx.Sequence = nil + etx.State = txmgr.TxFatalError + + return qq.Transaction(func(tx pg.Queryer) error { + if _, err := tx.Exec(`DELETE FROM evm.tx_attempts WHERE eth_tx_id = $1`, etx.ID); err != nil { + return pkgerrors.Wrapf(err, "saveFatallyErroredTransaction failed to delete eth_tx_attempt with eth_tx.ID %v", etx.ID) + } + var dbEtx DbEthTx + dbEtx.FromTx(etx) + err := pkgerrors.Wrap(tx.Get(&dbEtx, `UPDATE evm.txes SET state=$1, error=$2, broadcast_at=NULL, initial_broadcast_at=NULL, nonce=NULL WHERE id=$3 RETURNING *`, etx.State, etx.Error, etx.ID), "saveFatallyErroredTransaction failed to save eth_tx") + dbEtx.ToTx(etx) + return err + }) +} + +// Updates eth attempt from in_progress to broadcast. Also updates the eth tx to unconfirmed. +func (o *evmTxStore) UpdateTxAttemptInProgressToBroadcast(ctx context.Context, etx *Tx, attempt TxAttempt, NewAttemptState txmgrtypes.TxAttemptState) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if etx.BroadcastAt == nil { + return errors.New("unconfirmed transaction must have broadcast_at time") + } + if etx.InitialBroadcastAt == nil { + return errors.New("unconfirmed transaction must have initial_broadcast_at time") + } + if etx.State != txmgr.TxInProgress { + return pkgerrors.Errorf("can only transition to unconfirmed from in_progress, transaction is currently %s", etx.State) + } + if attempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("attempt must be in in_progress state") + } + if NewAttemptState != txmgrtypes.TxAttemptBroadcast { + return pkgerrors.Errorf("new attempt state must be broadcast, got: %s", NewAttemptState) + } + etx.State = txmgr.TxUnconfirmed + attempt.State = NewAttemptState + return qq.Transaction(func(tx pg.Queryer) error { + var dbEtx DbEthTx + dbEtx.FromTx(etx) + if err := tx.Get(&dbEtx, `UPDATE evm.txes SET state=$1, error=$2, broadcast_at=$3, initial_broadcast_at=$4 WHERE id = $5 RETURNING *`, dbEtx.State, dbEtx.Error, dbEtx.BroadcastAt, dbEtx.InitialBroadcastAt, dbEtx.ID); err != nil { + return pkgerrors.Wrap(err, "SaveEthTxAttempt failed to save eth_tx") + } + dbEtx.ToTx(etx) + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(&attempt) + if err := tx.Get(&dbAttempt, `UPDATE evm.tx_attempts SET state = $1 WHERE id = $2 RETURNING *`, dbAttempt.State, dbAttempt.ID); err != nil { + return pkgerrors.Wrap(err, "SaveEthTxAttempt failed to save eth_tx_attempt") + } + return nil + }) +} + +// Updates eth tx from unstarted to in_progress and inserts in_progress eth attempt +func (o *evmTxStore) UpdateTxUnstartedToInProgress(ctx context.Context, etx *Tx, attempt *TxAttempt) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if etx.Sequence == nil { + return errors.New("in_progress transaction must have nonce") + } + if etx.State != txmgr.TxUnstarted { + return pkgerrors.Errorf("can only transition to in_progress from unstarted, transaction is currently %s", etx.State) + } + if attempt.State != txmgrtypes.TxAttemptInProgress { + return errors.New("attempt state must be in_progress") + } + etx.State = txmgr.TxInProgress + return qq.Transaction(func(tx pg.Queryer) error { + // If a replay was triggered while unconfirmed transactions were pending, they will be marked as fatal_error => abandoned. + // In this case, we must remove the abandoned attempt from evm.tx_attempts before replacing it with a new one. In any other + // case, we uphold the constraint, leaving the original tx attempt as-is and returning the constraint violation error. + // + // Note: the record of the original abandoned transaction will remain in evm.txes, only the attempt is replaced. (Any receipt + // associated with the abandoned attempt would also be lost, although this shouldn't happen since only unconfirmed transactions + // can be abandoned.) + res, err2 := tx.Exec(`DELETE FROM evm.tx_attempts a USING evm.txes t + WHERE t.id = a.eth_tx_id AND a.hash = $1 AND t.state = $2 AND t.error = 'abandoned'`, + attempt.Hash, txmgr.TxFatalError, + ) + + if err2 != nil { + // If the DELETE fails, we don't want to abort before at least attempting the INSERT. tx hash conflicts with + // abandoned transactions can only happen after a nonce reset. If the node is operating normally but there is + // some unexpected issue with the DELETE query, blocking the txmgr from sending transactions would be risky + // and could potentially get the node stuck. If the INSERT is going to succeed then we definitely want to continue. + // And even if the INSERT fails, an error message showing the txmgr is having trouble inserting tx's in the db may be + // easier to understand quickly if there is a problem with the node. + o.logger.Errorw("Ignoring unexpected db error while checking for txhash conflict", "err", err2) + } else if rows, err := res.RowsAffected(); err != nil { + o.logger.Errorw("Ignoring unexpected db error reading rows affected while checking for txhash conflict", "err", err) + } else if rows > 0 { + o.logger.Debugf("Replacing abandoned tx with tx hash %s with tx_id=%d with identical tx hash", attempt.Hash, attempt.TxID) + } + + var dbAttempt DbEthTxAttempt + dbAttempt.FromTxAttempt(attempt) + query, args, e := tx.BindNamed(insertIntoEthTxAttemptsQuery, &dbAttempt) + if e != nil { + return pkgerrors.Wrap(e, "failed to BindNamed") + } + err := tx.Get(&dbAttempt, query, args...) + if err != nil { + var pqErr *pgconn.PgError + if isPqErr := errors.As(err, &pqErr); isPqErr && + pqErr.SchemaName == "evm" && + pqErr.ConstraintName == "eth_tx_attempts_eth_tx_id_fkey" { + return txmgr.ErrTxRemoved + } + if err != nil { + return pkgerrors.Wrap(err, "UpdateTxUnstartedToInProgress failed to create eth_tx_attempt") + } + } + dbAttempt.ToTxAttempt(attempt) + var dbEtx DbEthTx + dbEtx.FromTx(etx) + err = tx.Get(&dbEtx, `UPDATE evm.txes SET nonce=$1, state=$2, broadcast_at=$3, initial_broadcast_at=$4 WHERE id=$5 RETURNING *`, etx.Sequence, etx.State, etx.BroadcastAt, etx.InitialBroadcastAt, etx.ID) + dbEtx.ToTx(etx) + return pkgerrors.Wrap(err, "UpdateTxUnstartedToInProgress failed to update eth_tx") + }) +} + +// GetTxInProgress returns either 0 or 1 transaction that was left in +// an unfinished state because something went screwy the last time. Most likely +// the node crashed in the middle of the ProcessUnstartedEthTxs loop. +// It may or may not have been broadcast to an eth node. +func (o *evmTxStore) GetTxInProgress(ctx context.Context, fromAddress common.Address) (etx *Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + etx = new(Tx) + if err != nil { + return etx, pkgerrors.Wrap(err, "getInProgressEthTx failed") + } + err = qq.Transaction(func(tx pg.Queryer) error { + var dbEtx DbEthTx + err = tx.Get(&dbEtx, `SELECT * FROM evm.txes WHERE from_address = $1 and state = 'in_progress'`, fromAddress) + if errors.Is(err, sql.ErrNoRows) { + etx = nil + return nil + } else if err != nil { + return pkgerrors.Wrap(err, "GetTxInProgress failed while loading eth tx") + } + dbEtx.ToTx(etx) + if err = o.loadTxAttemptsAtomic(etx, pg.WithParentCtx(ctx), pg.WithQueryer(tx)); err != nil { + return pkgerrors.Wrap(err, "GetTxInProgress failed while loading EthTxAttempts") + } + if len(etx.TxAttempts) != 1 || etx.TxAttempts[0].State != txmgrtypes.TxAttemptInProgress { + return pkgerrors.Errorf("invariant violation: expected in_progress transaction %v to have exactly one unsent attempt. "+ + "Your database is in an inconsistent state and this node will not function correctly until the problem is resolved", etx.ID) + } + return nil + }) + + return etx, pkgerrors.Wrap(err, "getInProgressEthTx failed") +} + +func (o *evmTxStore) HasInProgressTransaction(ctx context.Context, account common.Address, chainID *big.Int) (exists bool, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Get(&exists, `SELECT EXISTS(SELECT 1 FROM evm.txes WHERE state = 'in_progress' AND from_address = $1 AND evm_chain_id = $2)`, account, chainID.String()) + return exists, pkgerrors.Wrap(err, "hasInProgressTransaction failed") +} + +func (o *evmTxStore) UpdateKeyNextSequence(newNextNonce, currentNextNonce evmtypes.Nonce, address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + qq := o.q.WithOpts(qopts...) + return qq.Transaction(func(tx pg.Queryer) error { + // We filter by next_nonce here as an optimistic lock to make sure it + // didn't get changed out from under us. Shouldn't happen but can't hurt. + res, err := tx.Exec(`UPDATE evm.key_states SET next_nonce = $1, updated_at = $2 WHERE address = $3 AND next_nonce = $4 AND evm_chain_id = $5`, newNextNonce.Int64(), time.Now(), address, currentNextNonce.Int64(), chainID.String()) + if err != nil { + return pkgerrors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary failed to update keys.next_nonce") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return pkgerrors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary failed to get RowsAffected") + } + if rowsAffected == 0 { + return ErrKeyNotUpdated + } + return nil + }) +} + +func (o *evmTxStore) countTransactionsWithState(ctx context.Context, fromAddress common.Address, state txmgrtypes.TxState, chainID *big.Int) (count uint32, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Get(&count, `SELECT count(*) FROM evm.txes WHERE from_address = $1 AND state = $2 AND evm_chain_id = $3`, + fromAddress, state, chainID.String()) + return count, pkgerrors.Wrap(err, "failed to countTransactionsWithState") +} + +// CountUnconfirmedTransactions returns the number of unconfirmed transactions +func (o *evmTxStore) CountUnconfirmedTransactions(ctx context.Context, fromAddress common.Address, chainID *big.Int) (count uint32, err error) { + return o.countTransactionsWithState(ctx, fromAddress, txmgr.TxUnconfirmed, chainID) +} + +// CountTransactionsByState returns the number of transactions with any fromAddress in the given state +func (o *evmTxStore) CountTransactionsByState(ctx context.Context, state txmgrtypes.TxState, chainID *big.Int) (count uint32, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Get(&count, `SELECT count(*) FROM evm.txes WHERE state = $1 AND evm_chain_id = $2`, + state, chainID.String()) + if err != nil { + return 0, fmt.Errorf("failed to CountTransactionsByState: %w", err) + } + return count, nil +} + +// CountUnstartedTransactions returns the number of unconfirmed transactions +func (o *evmTxStore) CountUnstartedTransactions(ctx context.Context, fromAddress common.Address, chainID *big.Int) (count uint32, err error) { + return o.countTransactionsWithState(ctx, fromAddress, txmgr.TxUnstarted, chainID) +} + +func (o *evmTxStore) CheckTxQueueCapacity(ctx context.Context, fromAddress common.Address, maxQueuedTransactions uint64, chainID *big.Int) (err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + if maxQueuedTransactions == 0 { + return nil + } + var count uint64 + err = qq.Get(&count, `SELECT count(*) FROM evm.txes WHERE from_address = $1 AND state = 'unstarted' AND evm_chain_id = $2`, fromAddress, chainID.String()) + if err != nil { + err = pkgerrors.Wrap(err, "CheckTxQueueCapacity query failed") + return + } + + if count >= maxQueuedTransactions { + err = pkgerrors.Errorf("cannot create transaction; too many unstarted transactions in the queue (%v/%v). %s", count, maxQueuedTransactions, label.MaxQueuedTransactionsWarning) + } + return +} + +func (o *evmTxStore) CreateTransaction(ctx context.Context, txRequest TxRequest, chainID *big.Int) (tx Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtx DbEthTx + err = qq.Transaction(func(tx pg.Queryer) error { + if txRequest.PipelineTaskRunID != nil { + + err = tx.Get(&dbEtx, `SELECT * FROM evm.txes WHERE pipeline_task_run_id = $1 AND evm_chain_id = $2`, txRequest.PipelineTaskRunID, chainID.String()) + // If no eth_tx matches (the common case) then continue + if !errors.Is(err, sql.ErrNoRows) { + if err != nil { + return pkgerrors.Wrap(err, "CreateEthTransaction") + } + // if a previous transaction for this task run exists, immediately return it + return nil + } + } + err = tx.Get(&dbEtx, ` +INSERT INTO evm.txes (from_address, to_address, encoded_payload, value, gas_limit, state, created_at, meta, subject, evm_chain_id, min_confirmations, pipeline_task_run_id, transmit_checker, idempotency_key, signal_callback) +VALUES ( +$1,$2,$3,$4,$5,'unstarted',NOW(),$6,$7,$8,$9,$10,$11,$12,$13 +) +RETURNING "txes".* +`, txRequest.FromAddress, txRequest.ToAddress, txRequest.EncodedPayload, assets.Eth(txRequest.Value), txRequest.FeeLimit, txRequest.Meta, txRequest.Strategy.Subject(), chainID.String(), txRequest.MinConfirmations, txRequest.PipelineTaskRunID, txRequest.Checker, txRequest.IdempotencyKey, txRequest.SignalCallback) + if err != nil { + return pkgerrors.Wrap(err, "CreateEthTransaction failed to insert evm tx") + } + return nil + }) + var etx Tx + dbEtx.ToTx(&etx) + return etx, err +} + +func (o *evmTxStore) PruneUnstartedTxQueue(ctx context.Context, queueSize uint32, subject uuid.UUID) (ids []int64, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + err := qq.Select(&ids, ` +DELETE FROM evm.txes +WHERE state = 'unstarted' AND subject = $1 AND +id < ( + SELECT min(id) FROM ( + SELECT id + FROM evm.txes + WHERE state = 'unstarted' AND subject = $2 + ORDER BY id DESC + LIMIT $3 + ) numbers +) RETURNING id`, subject, subject, queueSize) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil + } + return fmt.Errorf("PruneUnstartedTxQueue failed: %w", err) + } + return err + }) + return +} + +func (o *evmTxStore) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID *big.Int) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + // Delete old confirmed evm.txes + // NOTE that this relies on foreign key triggers automatically removing + // the evm.tx_attempts and evm.receipts linked to every eth_tx + err := pg.Batch(func(_, limit uint) (count uint, err error) { + res, err := qq.Exec(` +WITH old_enough_receipts AS ( + SELECT tx_hash FROM evm.receipts + WHERE block_number < $1 + ORDER BY block_number ASC, id ASC + LIMIT $2 +) +DELETE FROM evm.txes +USING old_enough_receipts, evm.tx_attempts +WHERE evm.tx_attempts.eth_tx_id = evm.txes.id +AND evm.tx_attempts.hash = old_enough_receipts.tx_hash +AND evm.txes.created_at < $3 +AND evm.txes.state = 'confirmed' +AND evm_chain_id = $4`, minBlockNumberToKeep, limit, timeThreshold, chainID.String()) + if err != nil { + return count, pkgerrors.Wrap(err, "ReapTxes failed to delete old confirmed evm.txes") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return count, pkgerrors.Wrap(err, "ReapTxes failed to get rows affected") + } + return uint(rowsAffected), err + }) + if err != nil { + return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of confirmed evm.txes failed") + } + // Delete old 'fatal_error' evm.txes + err = pg.Batch(func(_, limit uint) (count uint, err error) { + res, err := qq.Exec(` +DELETE FROM evm.txes +WHERE created_at < $1 +AND state = 'fatal_error' +AND evm_chain_id = $2`, timeThreshold, chainID.String()) + if err != nil { + return count, pkgerrors.Wrap(err, "ReapTxes failed to delete old fatally errored evm.txes") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return count, pkgerrors.Wrap(err, "ReapTxes failed to get rows affected") + } + return uint(rowsAffected), err + }) + if err != nil { + return pkgerrors.Wrap(err, "TxmReaper#reapEthTxes batch delete of fatally errored evm.txes failed") + } + + return nil +} + +func (o *evmTxStore) Abandon(ctx context.Context, chainID *big.Int, addr common.Address) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + _, err := qq.Exec(`UPDATE evm.txes SET state='fatal_error', nonce = NULL, error = 'abandoned' WHERE state IN ('unconfirmed', 'in_progress', 'unstarted') AND evm_chain_id = $1 AND from_address = $2`, chainID.String(), addr) + return err +} + +// Find transactions by a field in the TxMeta blob and transaction states +func (o *evmTxStore) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []txmgrtypes.TxState, chainID *big.Int) ([]*Tx, error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtxs []DbEthTx + sql := fmt.Sprintf("SELECT * FROM evm.txes WHERE evm_chain_id = $1 AND meta->>'%s' = $2 AND state = ANY($3)", metaField) + err := qq.Select(&dbEtxs, sql, chainID.String(), metaValue, pq.Array(states)) + txes := make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + return txes, pkgerrors.Wrap(err, "failed to FindTxesByMetaFieldAndStates") +} + +// Find transactions with a non-null TxMeta field that was provided by transaction states +func (o *evmTxStore) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []txmgrtypes.TxState, chainID *big.Int) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtxs []DbEthTx + sql := fmt.Sprintf("SELECT * FROM evm.txes WHERE meta->'%s' IS NOT NULL AND state = ANY($1) AND evm_chain_id = $2", metaField) + err = qq.Select(&dbEtxs, sql, pq.Array(states), chainID.String()) + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + return txes, pkgerrors.Wrap(err, "failed to FindTxesWithMetaFieldByStates") +} + +// Find transactions with a non-null TxMeta field that was provided and a receipt block number greater than or equal to the one provided +func (o *evmTxStore) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtxs []DbEthTx + sql := fmt.Sprintf("SELECT et.* FROM evm.txes et JOIN evm.tx_attempts eta on et.id = eta.eth_tx_id JOIN evm.receipts er on eta.hash = er.tx_hash WHERE et.meta->'%s' IS NOT NULL AND er.block_number >= $1 AND et.evm_chain_id = $2", metaField) + err = qq.Select(&dbEtxs, sql, blockNum, chainID.String()) + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + return txes, pkgerrors.Wrap(err, "failed to FindTxesWithMetaFieldByReceiptBlockNum") +} + +// Find transactions loaded with transaction attempts and receipts by transaction IDs and states +func (o *evmTxStore) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []txmgrtypes.TxState, chainID *big.Int) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + err = qq.Transaction(func(tx pg.Queryer) error { + var dbEtxs []DbEthTx + if err = tx.Select(&dbEtxs, `SELECT * FROM evm.txes WHERE id = ANY($1) AND state = ANY($2) AND evm_chain_id = $3`, pq.Array(ids), pq.Array(states), chainID.String()); err != nil { + return pkgerrors.Wrapf(err, "failed to find evm.txes") + } + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + if err = o.LoadTxesAttempts(txes, pg.WithQueryer(tx)); err != nil { + return pkgerrors.Wrapf(err, "failed to load evm.tx_attempts for evm.tx") + } + if err = loadEthTxesAttemptsReceipts(tx, txes); err != nil { + return pkgerrors.Wrapf(err, "failed to load evm.receipts for evm.tx") + } + return nil + }) + return txes, pkgerrors.Wrap(err, "FindTxesWithAttemptsAndReceiptsByIdsAndState failed") +} + +// For testing only, get all txes in the DB +func (o *evmTxStore) GetAllTxes(ctx context.Context) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbEtxs []DbEthTx + sql := "SELECT * FROM evm.txes" + err = qq.Select(&dbEtxs, sql) + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + return txes, err +} + +// For testing only, get all tx attempts in the DB +func (o *evmTxStore) GetAllTxAttempts(ctx context.Context) (attempts []TxAttempt, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + var dbAttempts []DbEthTxAttempt + sql := "SELECT * FROM evm.tx_attempts" + err = qq.Select(&dbAttempts, sql) + attempts = dbEthTxAttemptsToEthTxAttempts(dbAttempts) + return attempts, err +} + +func (o *evmTxStore) CountTxesByStateAndSubject(ctx context.Context, state txmgrtypes.TxState, subject uuid.UUID) (count int, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + sql := "SELECT COUNT(*) FROM evm.txes WHERE state = $1 AND subject = $2" + err = qq.Get(&count, sql, state, subject) + return count, err +} + +func (o *evmTxStore) FindTxesByFromAddressAndState(ctx context.Context, fromAddress common.Address, state string) (txes []*Tx, err error) { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + sql := "SELECT * FROM evm.txes WHERE from_address = $1 AND state = $2" + var dbEtxs []DbEthTx + err = qq.Select(&dbEtxs, sql, fromAddress, state) + txes = make([]*Tx, len(dbEtxs)) + dbEthTxsToEvmEthTxPtrs(dbEtxs, txes) + return txes, err +} + +func (o *evmTxStore) UpdateTxAttemptBroadcastBeforeBlockNum(ctx context.Context, id int64, blockNum uint) error { + var cancel context.CancelFunc + ctx, cancel = o.mergeContexts(ctx) + defer cancel() + qq := o.q.WithOpts(pg.WithParentCtx(ctx)) + sql := "UPDATE evm.tx_attempts SET broadcast_before_block_num = $1 WHERE eth_tx_id = $2" + _, err := qq.Exec(sql, blockNum, id) + return err +} + +// Returns a context that contains the values of the provided context, +// and which is canceled when either the provided contextg or TxStore parent context is canceled. +func (o *evmTxStore) mergeContexts(ctx context.Context) (context.Context, context.CancelFunc) { + var cancel context.CancelCauseFunc + ctx, cancel = context.WithCancelCause(ctx) + stop := context.AfterFunc(o.q.ParentCtx, func() { + cancel(context.Cause(o.q.ParentCtx)) + }) + return ctx, func() { + stop() + cancel(context.Canceled) + } +} diff --git a/core/chains/evm/txmgr/evm_tx_store_test.go b/core/chains/evm/txmgr/evm_tx_store_test.go new file mode 100644 index 00000000..47b51a6b --- /dev/null +++ b/core/chains/evm/txmgr/evm_tx_store_test.go @@ -0,0 +1,1840 @@ +package txmgr_test + +import ( + "database/sql" + "fmt" + "math/big" + "testing" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/logger" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" +) + +func TestORM_TransactionsWithAttempts(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) // tx1 + tx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 1, 2, from) // tx2 + + // add 2nd attempt to tx2 + blockNum := int64(3) + attempt := cltest.NewLegacyEthTxAttempt(t, tx2.ID) + attempt.State = txmgrtypes.TxAttemptBroadcast + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(3)} + attempt.BroadcastBeforeBlockNum = &blockNum + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + + // tx 3 has no attempts + mustCreateUnstartedGeneratedTx(t, txStore, from, &cltest.FixtureChainID) + + var count int + err := db.Get(&count, `SELECT count(*) FROM evm.txes`) + require.NoError(t, err) + require.Equal(t, 3, count) + + txs, count, err := txStore.TransactionsWithAttempts(0, 100) // should omit tx3 + require.NoError(t, err) + assert.Equal(t, 2, count, "only eth txs with attempts are counted") + assert.Len(t, txs, 2) + assert.Equal(t, evmtypes.Nonce(1), *txs[0].Sequence, "transactions should be sorted by nonce") + assert.Equal(t, evmtypes.Nonce(0), *txs[1].Sequence, "transactions should be sorted by nonce") + assert.Len(t, txs[0].TxAttempts, 2, "all eth tx attempts are preloaded") + assert.Len(t, txs[1].TxAttempts, 1) + assert.Equal(t, int64(3), *txs[0].TxAttempts[0].BroadcastBeforeBlockNum, "attempts should be sorted by created_at") + assert.Equal(t, int64(2), *txs[0].TxAttempts[1].BroadcastBeforeBlockNum, "attempts should be sorted by created_at") + + txs, count, err = txStore.TransactionsWithAttempts(0, 1) + require.NoError(t, err) + assert.Equal(t, 2, count, "only eth txs with attempts are counted") + assert.Len(t, txs, 1, "limit should apply to length of results") + assert.Equal(t, evmtypes.Nonce(1), *txs[0].Sequence, "transactions should be sorted by nonce") +} + +func TestORM_Transactions(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) // tx1 + tx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 1, 2, from) // tx2 + + // add 2nd attempt to tx2 + blockNum := int64(3) + attempt := cltest.NewLegacyEthTxAttempt(t, tx2.ID) + attempt.State = txmgrtypes.TxAttemptBroadcast + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(3)} + attempt.BroadcastBeforeBlockNum = &blockNum + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + + // tx 3 has no attempts + mustCreateUnstartedGeneratedTx(t, txStore, from, &cltest.FixtureChainID) + + var count int + err := db.Get(&count, `SELECT count(*) FROM evm.txes`) + require.NoError(t, err) + require.Equal(t, 3, count) + + txs, count, err := txStore.Transactions(0, 100) + require.NoError(t, err) + assert.Equal(t, 2, count, "only eth txs with attempts are counted") + assert.Len(t, txs, 2) + assert.Equal(t, evmtypes.Nonce(1), *txs[0].Sequence, "transactions should be sorted by nonce") + assert.Equal(t, evmtypes.Nonce(0), *txs[1].Sequence, "transactions should be sorted by nonce") + assert.Len(t, txs[0].TxAttempts, 0, "eth tx attempts should not be preloaded") + assert.Len(t, txs[1].TxAttempts, 0) +} + +func TestORM(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + orm := cltest.NewTestTxStore(t, db, cfg.Database()) + _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + var etx txmgr.Tx + t.Run("InsertTx", func(t *testing.T) { + etx = cltest.NewEthTx(fromAddress) + require.NoError(t, orm.InsertTx(&etx)) + assert.Greater(t, int(etx.ID), 0) + cltest.AssertCount(t, db, "evm.txes", 1) + }) + var attemptL txmgr.TxAttempt + var attemptD txmgr.TxAttempt + t.Run("InsertTxAttempt", func(t *testing.T) { + attemptD = cltest.NewDynamicFeeEthTxAttempt(t, etx.ID) + require.NoError(t, orm.InsertTxAttempt(&attemptD)) + assert.Greater(t, int(attemptD.ID), 0) + cltest.AssertCount(t, db, "evm.tx_attempts", 1) + + attemptL = cltest.NewLegacyEthTxAttempt(t, etx.ID) + attemptL.State = txmgrtypes.TxAttemptBroadcast + attemptL.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(42)} + require.NoError(t, orm.InsertTxAttempt(&attemptL)) + assert.Greater(t, int(attemptL.ID), 0) + cltest.AssertCount(t, db, "evm.tx_attempts", 2) + }) + var r txmgr.Receipt + t.Run("InsertReceipt", func(t *testing.T) { + r = newEthReceipt(42, utils.NewHash(), attemptD.Hash, 0x1) + id, err := orm.InsertReceipt(&r.Receipt) + r.ID = id + require.NoError(t, err) + assert.Greater(t, int(r.ID), 0) + cltest.AssertCount(t, db, "evm.receipts", 1) + }) + t.Run("FindTxWithAttempts", func(t *testing.T) { + var err error + etx, err = orm.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 2) + assert.Equal(t, etx.TxAttempts[0].ID, attemptD.ID) + assert.Equal(t, etx.TxAttempts[1].ID, attemptL.ID) + require.Len(t, etx.TxAttempts[0].Receipts, 1) + require.Len(t, etx.TxAttempts[1].Receipts, 0) + assert.Equal(t, r.BlockHash, etx.TxAttempts[0].Receipts[0].GetBlockHash()) + }) + t.Run("FindTxByHash", func(t *testing.T) { + foundEtx, err := orm.FindTxByHash(attemptD.Hash) + require.NoError(t, err) + assert.Equal(t, etx.ID, foundEtx.ID) + assert.Equal(t, etx.ChainID, foundEtx.ChainID) + }) + t.Run("FindTxAttemptsByTxIDs", func(t *testing.T) { + attempts, err := orm.FindTxAttemptsByTxIDs([]int64{etx.ID}) + require.NoError(t, err) + require.Len(t, attempts, 2) + assert.Equal(t, etx.TxAttempts[0].ID, attemptD.ID) + assert.Equal(t, etx.TxAttempts[1].ID, attemptL.ID) + require.Len(t, etx.TxAttempts[0].Receipts, 1) + require.Len(t, etx.TxAttempts[1].Receipts, 0) + assert.Equal(t, r.BlockHash, etx.TxAttempts[0].Receipts[0].GetBlockHash()) + }) +} + +func TestORM_FindTxAttemptConfirmedByTxIDs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + tx1 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, orm, 0, 1, from) // tx1 + tx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, orm, 1, 2, from) // tx2 + + // add 2nd attempt to tx2 + blockNum := int64(3) + attempt := cltest.NewLegacyEthTxAttempt(t, tx2.ID) + attempt.State = txmgrtypes.TxAttemptBroadcast + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(3)} + attempt.BroadcastBeforeBlockNum = &blockNum + require.NoError(t, orm.InsertTxAttempt(&attempt)) + + // add receipt for the second attempt + r := newEthReceipt(4, utils.NewHash(), attempt.Hash, 0x1) + _, err := orm.InsertReceipt(&r.Receipt) + require.NoError(t, err) + + // tx 3 has no attempts + mustCreateUnstartedGeneratedTx(t, orm, from, &cltest.FixtureChainID) + + cltest.MustInsertUnconfirmedEthTx(t, orm, 3, from) // tx4 + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, orm, 4, from) // tx5 + + var count int + err = db.Get(&count, `SELECT count(*) FROM evm.txes`) + require.NoError(t, err) + require.Equal(t, 5, count) + + err = db.Get(&count, `SELECT count(*) FROM evm.tx_attempts`) + require.NoError(t, err) + require.Equal(t, 4, count) + + confirmedAttempts, err := orm.FindTxAttemptConfirmedByTxIDs([]int64{tx1.ID, tx2.ID}) // should omit tx3 + require.NoError(t, err) + assert.Equal(t, 4, count, "only eth txs with attempts are counted") + require.Len(t, confirmedAttempts, 1) + assert.Equal(t, confirmedAttempts[0].ID, attempt.ID) + require.Len(t, confirmedAttempts[0].Receipts, 1, "should have only one EthRecipts for a confirmed transaction") + assert.Equal(t, confirmedAttempts[0].Receipts[0].GetBlockHash(), r.BlockHash) + assert.Equal(t, confirmedAttempts[0].Hash, attempt.Hash, "confirmed Recieipt Hash should match the attempt hash") +} + +func TestORM_FindTxAttemptsRequiringResend(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logCfg := pgtest.NewQConfig(true) + txStore := cltest.NewTestTxStore(t, db, logCfg) + + ethKeyStore := cltest.NewKeyStore(t, db, logCfg).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + t.Run("returns nothing if there are no transactions", func(t *testing.T) { + olderThan := time.Now() + attempts, err := txStore.FindTxAttemptsRequiringResend(testutils.Context(t), olderThan, 10, &cltest.FixtureChainID, fromAddress) + require.NoError(t, err) + assert.Len(t, attempts, 0) + }) + + // Mix up the insert order to assure that they come out sorted by nonce not implicitly or by ID + e1 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress, time.Unix(1616509200, 0)) + e3 := mustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t, txStore, 3, fromAddress, time.Unix(1616509400, 0)) + e0 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress, time.Unix(1616509100, 0)) + e2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress, time.Unix(1616509300, 0)) + + etxs := []txmgr.Tx{ + e0, + e1, + e2, + e3, + } + attempt1_2 := newBroadcastLegacyEthTxAttempt(t, etxs[0].ID) + attempt1_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(10)} + require.NoError(t, txStore.InsertTxAttempt(&attempt1_2)) + + attempt3_2 := newInProgressLegacyEthTxAttempt(t, etxs[2].ID) + attempt3_2.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(10)} + require.NoError(t, txStore.InsertTxAttempt(&attempt3_2)) + + attempt4_2 := cltest.NewDynamicFeeEthTxAttempt(t, etxs[3].ID) + attempt4_2.TxFee.DynamicTipCap = assets.NewWeiI(10) + attempt4_2.TxFee.DynamicFeeCap = assets.NewWeiI(20) + attempt4_2.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt4_2)) + attempt4_4 := cltest.NewDynamicFeeEthTxAttempt(t, etxs[3].ID) + attempt4_4.TxFee.DynamicTipCap = assets.NewWeiI(30) + attempt4_4.TxFee.DynamicFeeCap = assets.NewWeiI(40) + attempt4_4.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt4_4)) + attempt4_3 := cltest.NewDynamicFeeEthTxAttempt(t, etxs[3].ID) + attempt4_3.TxFee.DynamicTipCap = assets.NewWeiI(20) + attempt4_3.TxFee.DynamicFeeCap = assets.NewWeiI(30) + attempt4_3.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt4_3)) + + t.Run("returns nothing if there are transactions from a different key", func(t *testing.T) { + olderThan := time.Now() + attempts, err := txStore.FindTxAttemptsRequiringResend(testutils.Context(t), olderThan, 10, &cltest.FixtureChainID, utils.RandomAddress()) + require.NoError(t, err) + assert.Len(t, attempts, 0) + }) + + t.Run("returns the highest price attempt for each transaction that was last broadcast before or on the given time", func(t *testing.T) { + olderThan := time.Unix(1616509200, 0) + attempts, err := txStore.FindTxAttemptsRequiringResend(testutils.Context(t), olderThan, 0, &cltest.FixtureChainID, fromAddress) + require.NoError(t, err) + assert.Len(t, attempts, 2) + assert.Equal(t, attempt1_2.ID, attempts[0].ID) + assert.Equal(t, etxs[1].TxAttempts[0].ID, attempts[1].ID) + }) + + t.Run("returns the highest price attempt for EIP-1559 transactions", func(t *testing.T) { + olderThan := time.Unix(1616509400, 0) + attempts, err := txStore.FindTxAttemptsRequiringResend(testutils.Context(t), olderThan, 0, &cltest.FixtureChainID, fromAddress) + require.NoError(t, err) + assert.Len(t, attempts, 4) + assert.Equal(t, attempt4_4.ID, attempts[3].ID) + }) + + t.Run("applies limit", func(t *testing.T) { + olderThan := time.Unix(1616509200, 0) + attempts, err := txStore.FindTxAttemptsRequiringResend(testutils.Context(t), olderThan, 1, &cltest.FixtureChainID, fromAddress) + require.NoError(t, err) + assert.Len(t, attempts, 1) + assert.Equal(t, attempt1_2.ID, attempts[0].ID) + }) +} + +func TestORM_UpdateBroadcastAts(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + orm := cltest.NewTestTxStore(t, db, cfg.Database()) + _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + t.Run("does not update when broadcast_at is NULL", func(t *testing.T) { + t.Parallel() + + etx := mustCreateUnstartedGeneratedTx(t, orm, fromAddress, &cltest.FixtureChainID) + + var nullTime *time.Time + assert.Equal(t, nullTime, etx.BroadcastAt) + + currTime := time.Now() + err := orm.UpdateBroadcastAts(testutils.Context(t), currTime, []int64{etx.ID}) + require.NoError(t, err) + etx, err = orm.FindTxWithAttempts(etx.ID) + + require.NoError(t, err) + assert.Equal(t, nullTime, etx.BroadcastAt) + }) + + t.Run("updates when broadcast_at is non-NULL", func(t *testing.T) { + t.Parallel() + + time1 := time.Now() + etx := cltest.NewEthTx(fromAddress) + etx.Sequence = new(evmtypes.Nonce) + etx.State = txmgrcommon.TxUnconfirmed + etx.BroadcastAt = &time1 + etx.InitialBroadcastAt = &time1 + err := orm.InsertTx(&etx) + require.NoError(t, err) + + time2 := time.Date(2077, 8, 14, 10, 0, 0, 0, time.UTC) + err = orm.UpdateBroadcastAts(testutils.Context(t), time2, []int64{etx.ID}) + require.NoError(t, err) + etx, err = orm.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + // assert year due to time rounding after database save + assert.Equal(t, etx.BroadcastAt.Year(), time2.Year()) + }) +} + +func TestORM_SetBroadcastBeforeBlockNum(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress) + chainID := ethClient.ConfiguredChainID() + + headNum := int64(9000) + var err error + + t.Run("saves block num to unconfirmed evm.tx_attempts without one", func(t *testing.T) { + // Do the thing + require.NoError(t, txStore.SetBroadcastBeforeBlockNum(testutils.Context(t), headNum, chainID)) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 1) + attempt := etx.TxAttempts[0] + + assert.Equal(t, int64(9000), *attempt.BroadcastBeforeBlockNum) + }) + + t.Run("does not change evm.tx_attempts that already have BroadcastBeforeBlockNum set", func(t *testing.T) { + n := int64(42) + attempt := newBroadcastLegacyEthTxAttempt(t, etx.ID, 2) + attempt.BroadcastBeforeBlockNum = &n + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + + // Do the thing + require.NoError(t, txStore.SetBroadcastBeforeBlockNum(testutils.Context(t), headNum, chainID)) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, etx.TxAttempts, 2) + attempt = etx.TxAttempts[0] + + assert.Equal(t, int64(42), *attempt.BroadcastBeforeBlockNum) + }) + + t.Run("only updates evm.tx_attempts for the current chain", func(t *testing.T) { + require.NoError(t, ethKeyStore.Add(fromAddress, testutils.SimulatedChainID)) + require.NoError(t, ethKeyStore.Enable(fromAddress, testutils.SimulatedChainID)) + etxThisChain := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress, cfg.EVM().ChainID()) + etxOtherChain := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress, testutils.SimulatedChainID) + + require.NoError(t, txStore.SetBroadcastBeforeBlockNum(testutils.Context(t), headNum, chainID)) + + etxThisChain, err = txStore.FindTxWithAttempts(etxThisChain.ID) + require.NoError(t, err) + require.Len(t, etxThisChain.TxAttempts, 1) + attempt := etxThisChain.TxAttempts[0] + + assert.Equal(t, int64(9000), *attempt.BroadcastBeforeBlockNum) + + etxOtherChain, err = txStore.FindTxWithAttempts(etxOtherChain.ID) + require.NoError(t, err) + require.Len(t, etxOtherChain.TxAttempts, 1) + attempt = etxOtherChain.TxAttempts[0] + + assert.Nil(t, attempt.BroadcastBeforeBlockNum) + }) +} + +func TestORM_FindTxAttemptsConfirmedMissingReceipt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + + attempts, err := txStore.FindTxAttemptsConfirmedMissingReceipt(testutils.Context(t), ethClient.ConfiguredChainID()) + + require.NoError(t, err) + + assert.Len(t, attempts, 1) + assert.Len(t, etx0.TxAttempts, 1) + assert.Equal(t, etx0.TxAttempts[0].ID, attempts[0].ID) +} + +func TestORM_UpdateTxsUnconfirmed(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + assert.Equal(t, etx0.State, txmgrcommon.TxConfirmedMissingReceipt) + require.NoError(t, txStore.UpdateTxsUnconfirmed(testutils.Context(t), []int64{etx0.ID})) + + etx0, err := txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + assert.Equal(t, etx0.State, txmgrcommon.TxUnconfirmed) +} + +func TestORM_FindTxAttemptsRequiringReceiptFetch(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + + attempts, err := txStore.FindTxAttemptsRequiringReceiptFetch(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + assert.Len(t, attempts, 1) + assert.Len(t, etx0.TxAttempts, 1) + assert.Equal(t, etx0.TxAttempts[0].ID, attempts[0].ID) +} + +func TestORM_SaveFetchedReceipts(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx0 := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t, txStore, 0, 1, originalBroadcastAt, fromAddress) + require.Len(t, etx0.TxAttempts, 1) + + // create receipt associated with transaction + txmReceipt := evmtypes.Receipt{ + TxHash: etx0.TxAttempts[0].Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(42), + TransactionIndex: uint(1), + } + + err := txStore.SaveFetchedReceipts(testutils.Context(t), []*evmtypes.Receipt{&txmReceipt}, ethClient.ConfiguredChainID()) + + require.NoError(t, err) + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + require.Len(t, etx0.TxAttempts, 1) + require.Len(t, etx0.TxAttempts[0].Receipts, 1) + require.Equal(t, txmReceipt.BlockHash, etx0.TxAttempts[0].Receipts[0].GetBlockHash()) + require.Equal(t, txmgrcommon.TxConfirmed, etx0.State) +} + +func TestORM_MarkAllConfirmedMissingReceipt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + // create transaction 0 (nonce 0) that is unconfirmed (block 7) + etx0_blocknum := int64(7) + etx0 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 0, fromAddress) + etx0_attempt := newBroadcastLegacyEthTxAttempt(t, etx0.ID, int64(1)) + etx0_attempt.BroadcastBeforeBlockNum = &etx0_blocknum + require.NoError(t, txStore.InsertTxAttempt(&etx0_attempt)) + assert.Equal(t, txmgrcommon.TxUnconfirmed, etx0.State) + + // create transaction 1 (nonce 1) that is confirmed (block 77) + etx1 := mustInsertConfirmedEthTxBySaveFetchedReceipts(t, txStore, fromAddress, int64(1), int64(77), *ethClient.ConfiguredChainID()) + assert.Equal(t, etx1.State, txmgrcommon.TxConfirmed) + + // mark transaction 0 confirmed_missing_receipt + err := txStore.MarkAllConfirmedMissingReceipt(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + etx0, err = txStore.FindTxWithAttempts(etx0.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx0.State) +} + +func TestORM_PreloadTxes(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("loads eth transaction", func(t *testing.T) { + // insert etx with attempt + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, int64(7), fromAddress) + + // create unloaded attempt + unloadedAttempt := txmgr.TxAttempt{TxID: etx.ID} + + // uninitialized EthTx + assert.Equal(t, int64(0), unloadedAttempt.Tx.ID) + + attempts := []txmgr.TxAttempt{unloadedAttempt} + + err := txStore.PreloadTxes(testutils.Context(t), attempts) + require.NoError(t, err) + + assert.Equal(t, etx.ID, attempts[0].Tx.ID) + }) + + t.Run("returns nil when attempts slice is empty", func(t *testing.T) { + emptyAttempts := []txmgr.TxAttempt{} + err := txStore.PreloadTxes(testutils.Context(t), emptyAttempts) + require.NoError(t, err) + }) +} + +func TestORM_GetInProgressTxAttempts(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + // insert etx with attempt + etx := mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, int64(7), fromAddress, txmgrtypes.TxAttemptInProgress) + + // fetch attempt + attempts, err := txStore.GetInProgressTxAttempts(testutils.Context(t), fromAddress, ethClient.ConfiguredChainID()) + require.NoError(t, err) + + assert.Len(t, attempts, 1) + assert.Equal(t, etx.TxAttempts[0].ID, attempts[0].ID) +} + +func TestORM_FindTxesPendingCallback(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + pgtest.MustExec(t, db, `SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`) + + head := evmtypes.Head{ + Hash: utils.NewHash(), + Number: 10, + Parent: &evmtypes.Head{ + Hash: utils.NewHash(), + Number: 9, + Parent: &evmtypes.Head{ + Number: 8, + Hash: utils.NewHash(), + Parent: nil, + }, + }, + } + + minConfirmations := int64(2) + + // Suspended run waiting for callback + run1 := cltest.MustInsertPipelineRun(t, db) + tr1 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run1.ID) + pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run1.ID) + etx1 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": true}'`) + attempt1 := etx1.TxAttempts[0] + mustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, attempt1.Hash) + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr1.ID, minConfirmations, etx1.ID) + + // Callback to pipeline service completed. Should be ignored + run2 := cltest.MustInsertPipelineRunWithStatus(t, db, 0, pipeline.RunStatusCompleted) + tr2 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run2.ID) + etx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": false}'`) + attempt2 := etx2.TxAttempts[0] + mustInsertEthReceipt(t, txStore, head.Number-minConfirmations, head.Hash, attempt2.Hash) + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE, callback_completed = TRUE WHERE id = $3`, &tr2.ID, minConfirmations, etx2.ID) + + // Suspended run younger than minConfirmations. Should be ignored + run3 := cltest.MustInsertPipelineRun(t, db) + tr3 := cltest.MustInsertUnfinishedPipelineTaskRun(t, db, run3.ID) + pgtest.MustExec(t, db, `UPDATE pipeline_runs SET state = 'suspended' WHERE id = $1`, run3.ID) + etx3 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 5, 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET meta='{"FailOnRevert": false}'`) + attempt3 := etx3.TxAttempts[0] + mustInsertEthReceipt(t, txStore, head.Number, head.Hash, attempt3.Hash) + pgtest.MustExec(t, db, `UPDATE evm.txes SET pipeline_task_run_id = $1, min_confirmations = $2, signal_callback = TRUE WHERE id = $3`, &tr3.ID, minConfirmations, etx3.ID) + + // Tx not marked for callback. Should be ignore + etx4 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 6, 1, fromAddress) + attempt4 := etx4.TxAttempts[0] + mustInsertEthReceipt(t, txStore, head.Number, head.Hash, attempt4.Hash) + pgtest.MustExec(t, db, `UPDATE evm.txes SET min_confirmations = $1 WHERE id = $2`, minConfirmations, etx4.ID) + + // Unconfirmed Tx without receipts. Should be ignored + etx5 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 7, 1, fromAddress) + pgtest.MustExec(t, db, `UPDATE evm.txes SET min_confirmations = $1 WHERE id = $2`, minConfirmations, etx5.ID) + + // Search evm.txes table for tx requiring callback + receiptsPlus, err := txStore.FindTxesPendingCallback(testutils.Context(t), head.Number, ethClient.ConfiguredChainID()) + require.NoError(t, err) + assert.Len(t, receiptsPlus, 1) + assert.Equal(t, tr1.ID, receiptsPlus[0].ID) +} + +func Test_FindTxWithIdempotencyKey(t *testing.T) { + t.Parallel() + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("returns nil if no results", func(t *testing.T) { + idempotencyKey := "777" + etx, err := txStore.FindTxWithIdempotencyKey(testutils.Context(t), idempotencyKey, big.NewInt(0)) + require.NoError(t, err) + assert.Nil(t, etx) + }) + + t.Run("returns transaction if it exists", func(t *testing.T) { + idempotencyKey := "777" + cfg.EVM().ChainID() + etx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, big.NewInt(0), + txRequestWithIdempotencyKey(idempotencyKey)) + require.Equal(t, idempotencyKey, *etx.IdempotencyKey) + + res, err := txStore.FindTxWithIdempotencyKey(testutils.Context(t), idempotencyKey, big.NewInt(0)) + require.NoError(t, err) + assert.Equal(t, etx.Sequence, res.Sequence) + require.Equal(t, idempotencyKey, *res.IdempotencyKey) + }) +} + +func TestORM_FindTxWithSequence(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("returns nil if no results", func(t *testing.T) { + etx, err := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, evmtypes.Nonce(777)) + require.NoError(t, err) + assert.Nil(t, etx) + }) + + t.Run("returns transaction if it exists", func(t *testing.T) { + etx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 777, 1, fromAddress) + require.Equal(t, evmtypes.Nonce(777), *etx.Sequence) + + res, err := txStore.FindTxWithSequence(testutils.Context(t), fromAddress, evmtypes.Nonce(777)) + require.NoError(t, err) + assert.Equal(t, etx.Sequence, res.Sequence) + }) +} + +func TestORM_UpdateTxForRebroadcast(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("delete all receipts for eth transaction", func(t *testing.T) { + etx := mustInsertConfirmedEthTxWithReceipt(t, txStore, fromAddress, 777, 1) + etx, err := txStore.FindTxWithAttempts(etx.ID) + assert.NoError(t, err) + // assert attempt state + attempt := etx.TxAttempts[0] + require.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + // assert tx state + assert.Equal(t, txmgrcommon.TxConfirmed, etx.State) + // assert receipt + assert.Len(t, etx.TxAttempts[0].Receipts, 1) + + // use exported method + err = txStore.UpdateTxForRebroadcast(testutils.Context(t), etx, attempt) + require.NoError(t, err) + + resultTx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + require.Len(t, resultTx.TxAttempts, 1) + resultTxAttempt := resultTx.TxAttempts[0] + + // assert attempt state + assert.Equal(t, txmgrtypes.TxAttemptInProgress, resultTxAttempt.State) + assert.Nil(t, resultTxAttempt.BroadcastBeforeBlockNum) + // assert tx state + assert.Equal(t, txmgrcommon.TxUnconfirmed, resultTx.State) + // assert receipt + assert.Len(t, resultTxAttempt.Receipts, 0) + }) +} + +func TestORM_IsTxFinalized(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + t.Run("confirmed tx not past finality_depth", func(t *testing.T) { + confirmedAddr := cltest.MustGenerateRandomKey(t).Address + tx := mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1) + finalized, err := txStore.IsTxFinalized(testutils.Context(t), 2, tx.ID, ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.False(t, finalized) + }) + + t.Run("confirmed tx past finality_depth", func(t *testing.T) { + confirmedAddr := cltest.MustGenerateRandomKey(t).Address + tx := mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1) + finalized, err := txStore.IsTxFinalized(testutils.Context(t), 10, tx.ID, ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.True(t, finalized) + }) +} + +func TestORM_FindTransactionsConfirmedInBlockRange(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + head := evmtypes.Head{ + Hash: utils.NewHash(), + Number: 10, + Parent: &evmtypes.Head{ + Hash: utils.NewHash(), + Number: 9, + Parent: &evmtypes.Head{ + Number: 8, + Hash: utils.NewHash(), + Parent: nil, + }, + }, + } + + t.Run("find all transactions confirmed in range", func(t *testing.T) { + etx_8 := mustInsertConfirmedEthTxWithReceipt(t, txStore, fromAddress, 700, 8) + etx_9 := mustInsertConfirmedEthTxWithReceipt(t, txStore, fromAddress, 777, 9) + + etxes, err := txStore.FindTransactionsConfirmedInBlockRange(testutils.Context(t), head.Number, 8, ethClient.ConfiguredChainID()) + require.NoError(t, err) + assert.Len(t, etxes, 2) + assert.Equal(t, etxes[0].Sequence, etx_8.Sequence) + assert.Equal(t, etxes[1].Sequence, etx_9.Sequence) + }) +} + +func TestORM_FindEarliestUnconfirmedBroadcastTime(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("no unconfirmed eth txes", func(t *testing.T) { + broadcastAt, err := txStore.FindEarliestUnconfirmedBroadcastTime(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.False(t, broadcastAt.Valid) + }) + + t.Run("verify broadcast time", func(t *testing.T) { + tx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 123, fromAddress) + broadcastAt, err := txStore.FindEarliestUnconfirmedBroadcastTime(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.True(t, broadcastAt.Ptr().Equal(*tx.BroadcastAt)) + }) +} + +func TestORM_FindEarliestUnconfirmedTxAttemptBlock(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, fromAddress2 := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("no earliest unconfirmed tx block", func(t *testing.T) { + earliestBlock, err := txStore.FindEarliestUnconfirmedTxAttemptBlock(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.False(t, earliestBlock.Valid) + }) + + t.Run("verify earliest unconfirmed tx block", func(t *testing.T) { + var blockNum int64 = 2 + tx := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 123, blockNum, time.Now(), fromAddress) + _ = mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 123, blockNum, time.Now().Add(time.Minute), fromAddress2) + err := txStore.UpdateTxsUnconfirmed(testutils.Context(t), []int64{tx.ID}) + require.NoError(t, err) + + earliestBlock, err := txStore.FindEarliestUnconfirmedTxAttemptBlock(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.True(t, earliestBlock.Valid) + require.Equal(t, blockNum, earliestBlock.Int64) + }) +} + +func TestORM_SaveInsufficientEthAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + defaultDuration, err := time.ParseDuration("5s") + require.NoError(t, err) + + t.Run("updates attempt state", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 1, fromAddress) + now := time.Now() + + err = txStore.SaveInsufficientFundsAttempt(testutils.Context(t), defaultDuration, &etx.TxAttempts[0], now) + require.NoError(t, err) + + attempt, err := txStore.FindTxAttempt(etx.TxAttempts[0].Hash) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptInsufficientFunds, attempt.State) + }) +} + +func TestORM_SaveSentAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + defaultDuration, err := time.ParseDuration("5s") + require.NoError(t, err) + + t.Run("updates attempt state to 'broadcast'", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 1, fromAddress) + require.Nil(t, etx.BroadcastAt) + now := time.Now() + + err = txStore.SaveSentAttempt(testutils.Context(t), defaultDuration, &etx.TxAttempts[0], now) + require.NoError(t, err) + + attempt, err := txStore.FindTxAttempt(etx.TxAttempts[0].Hash) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempt.State) + }) +} + +func TestORM_SaveConfirmedMissingReceiptAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + defaultDuration, err := time.ParseDuration("5s") + require.NoError(t, err) + + t.Run("updates attempt to 'broadcast' and transaction to 'confirm_missing_receipt'", func(t *testing.T) { + etx := mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, 1, fromAddress, txmgrtypes.TxAttemptInProgress) + now := time.Now() + + err = txStore.SaveConfirmedMissingReceiptAttempt(testutils.Context(t), defaultDuration, &etx.TxAttempts[0], now) + require.NoError(t, err) + + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxConfirmedMissingReceipt, etx.State) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, etx.TxAttempts[0].State) + }) +} + +func TestORM_DeleteInProgressAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("deletes in_progress attempt", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 1, fromAddress) + attempt := etx.TxAttempts[0] + + err := txStore.DeleteInProgressAttempt(testutils.Context(t), etx.TxAttempts[0]) + require.NoError(t, err) + + nilResult, err := txStore.FindTxAttempt(attempt.Hash) + assert.Nil(t, nilResult) + require.Error(t, err) + }) +} + +func TestORM_SaveInProgressAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("saves new in_progress attempt if attempt is new", func(t *testing.T) { + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, 1, fromAddress) + + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + require.Equal(t, int64(0), attempt.ID) + + err := txStore.SaveInProgressAttempt(testutils.Context(t), &attempt) + require.NoError(t, err) + + attemptResult, err := txStore.FindTxAttempt(attempt.Hash) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attemptResult.State) + }) + + t.Run("updates old attempt to in_progress when insufficient_eth", func(t *testing.T) { + etx := mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 23, fromAddress) + attempt := etx.TxAttempts[0] + require.Equal(t, txmgrtypes.TxAttemptInsufficientFunds, attempt.State) + require.NotEqual(t, 0, attempt.ID) + + attempt.BroadcastBeforeBlockNum = nil + attempt.State = txmgrtypes.TxAttemptInProgress + err := txStore.SaveInProgressAttempt(testutils.Context(t), &attempt) + + require.NoError(t, err) + attemptResult, err := txStore.FindTxAttempt(attempt.Hash) + require.NoError(t, err) + assert.Equal(t, txmgrtypes.TxAttemptInProgress, attemptResult.State) + + }) +} + +func TestORM_FindTxsRequiringGasBump(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + currentBlockNum := int64(10) + + t.Run("gets txs requiring gas bump", func(t *testing.T) { + etx := mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, 1, fromAddress, txmgrtypes.TxAttemptBroadcast) + err := txStore.SetBroadcastBeforeBlockNum(testutils.Context(t), currentBlockNum, ethClient.ConfiguredChainID()) + require.NoError(t, err) + + // this tx will require gas bump + etx, err = txStore.FindTxWithAttempts(etx.ID) + attempts := etx.TxAttempts + require.NoError(t, err) + assert.Len(t, attempts, 1) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attempts[0].State) + assert.Equal(t, currentBlockNum, *attempts[0].BroadcastBeforeBlockNum) + + // this tx will not require gas bump + mustInsertUnconfirmedEthTxWithAttemptState(t, txStore, 2, fromAddress, txmgrtypes.TxAttemptBroadcast) + err = txStore.SetBroadcastBeforeBlockNum(testutils.Context(t), currentBlockNum+1, ethClient.ConfiguredChainID()) + require.NoError(t, err) + + // any tx broadcast <= 10 will require gas bump + newBlock := int64(12) + gasBumpThreshold := int64(2) + etxs, err := txStore.FindTxsRequiringGasBump(testutils.Context(t), fromAddress, newBlock, gasBumpThreshold, int64(0), ethClient.ConfiguredChainID()) + require.NoError(t, err) + assert.Len(t, etxs, 1) + assert.Equal(t, etx.ID, etxs[0].ID) + }) +} + +func TestEthConfirmer_FindTxsRequiringResubmissionDueToInsufficientEth(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + _, otherAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + // Insert order is mixed up to test sorting + etx2 := mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 1, fromAddress) + etx3 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress) + attempt3_2 := cltest.NewLegacyEthTxAttempt(t, etx3.ID) + attempt3_2.State = txmgrtypes.TxAttemptInsufficientFunds + attempt3_2.TxFee.Legacy = assets.NewWeiI(100) + require.NoError(t, txStore.InsertTxAttempt(&attempt3_2)) + etx1 := mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, fromAddress) + + // These should never be returned + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 3, fromAddress) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 100, fromAddress) + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, otherAddress) + + t.Run("returns all eth_txes with at least one attempt that is in insufficient_eth state", func(t *testing.T) { + etxs, err := txStore.FindTxsRequiringResubmissionDueToInsufficientFunds(testutils.Context(t), fromAddress, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 3) + + assert.Equal(t, *etx1.Sequence, *etxs[0].Sequence) + assert.Equal(t, etx1.ID, etxs[0].ID) + assert.Equal(t, *etx2.Sequence, *etxs[1].Sequence) + assert.Equal(t, etx2.ID, etxs[1].ID) + assert.Equal(t, *etx3.Sequence, *etxs[2].Sequence) + assert.Equal(t, etx3.ID, etxs[2].ID) + }) + + t.Run("does not return eth_txes with different chain ID", func(t *testing.T) { + etxs, err := txStore.FindTxsRequiringResubmissionDueToInsufficientFunds(testutils.Context(t), fromAddress, big.NewInt(42)) + require.NoError(t, err) + + assert.Len(t, etxs, 0) + }) + + t.Run("does not return confirmed or fatally errored eth_txes", func(t *testing.T) { + pgtest.MustExec(t, db, `UPDATE evm.txes SET state='confirmed' WHERE id = $1`, etx1.ID) + pgtest.MustExec(t, db, `UPDATE evm.txes SET state='fatal_error', nonce=NULL, error='foo', broadcast_at=NULL, initial_broadcast_at=NULL WHERE id = $1`, etx2.ID) + + etxs, err := txStore.FindTxsRequiringResubmissionDueToInsufficientFunds(testutils.Context(t), fromAddress, &cltest.FixtureChainID) + require.NoError(t, err) + + assert.Len(t, etxs, 1) + + assert.Equal(t, *etx3.Sequence, *etxs[0].Sequence) + assert.Equal(t, etx3.ID, etxs[0].ID) + }) +} + +func TestORM_MarkOldTxesMissingReceiptAsErrored(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + // tx state should be confirmed missing receipt + // attempt should be broadcast before cutoff time + t.Run("successfully mark errored transactions", func(t *testing.T) { + etx := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 1, 7, time.Now(), fromAddress) + + err := txStore.MarkOldTxesMissingReceiptAsErrored(testutils.Context(t), 10, 2, ethClient.ConfiguredChainID()) + require.NoError(t, err) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxFatalError, etx.State) + }) + + t.Run("successfully mark errored transactions w/ qopt passing in sql.Tx", func(t *testing.T) { + etx := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 1, 7, time.Now(), fromAddress) + err := txStore.MarkOldTxesMissingReceiptAsErrored(testutils.Context(t), 10, 2, ethClient.ConfiguredChainID()) + require.NoError(t, err) + + // must run other query outside of postgres transaction so changes are committed + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxFatalError, etx.State) + }) +} + +func TestORM_LoadEthTxesAttempts(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("load eth tx attempt", func(t *testing.T) { + etx := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 1, 7, time.Now(), fromAddress) + etx.TxAttempts = []txmgr.TxAttempt{} + + err := txStore.LoadTxesAttempts([]*txmgr.Tx{&etx}) + require.NoError(t, err) + assert.Len(t, etx.TxAttempts, 1) + }) + + t.Run("load new attempt inserted in current postgres transaction", func(t *testing.T) { + etx := mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt(t, txStore, 3, 9, time.Now(), fromAddress) + etx.TxAttempts = []txmgr.TxAttempt{} + + q := pg.NewQ(db, logger.Test(t), cfg.Database()) + + newAttempt := cltest.NewDynamicFeeEthTxAttempt(t, etx.ID) + var dbAttempt txmgr.DbEthTxAttempt + dbAttempt.FromTxAttempt(&newAttempt) + err := q.Transaction(func(tx pg.Queryer) error { + const insertEthTxAttemptSQL = `INSERT INTO evm.tx_attempts (eth_tx_id, gas_price, signed_raw_tx, hash, broadcast_before_block_num, state, created_at, chain_specific_gas_limit, tx_type, gas_tip_cap, gas_fee_cap) VALUES ( + :eth_tx_id, :gas_price, :signed_raw_tx, :hash, :broadcast_before_block_num, :state, NOW(), :chain_specific_gas_limit, :tx_type, :gas_tip_cap, :gas_fee_cap + ) RETURNING *` + _, err := tx.NamedExec(insertEthTxAttemptSQL, dbAttempt) + require.NoError(t, err) + + err = txStore.LoadTxesAttempts([]*txmgr.Tx{&etx}, pg.WithQueryer(tx)) + require.NoError(t, err) + assert.Len(t, etx.TxAttempts, 2) + + return nil + }) + require.NoError(t, err) + // also check after postgres transaction is committed + etx.TxAttempts = []txmgr.TxAttempt{} + err = txStore.LoadTxesAttempts([]*txmgr.Tx{&etx}) + require.NoError(t, err) + assert.Len(t, etx.TxAttempts, 2) + }) +} + +func TestORM_SaveReplacementInProgressAttempt(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("replace eth tx attempt", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 123, fromAddress) + oldAttempt := etx.TxAttempts[0] + + newAttempt := cltest.NewDynamicFeeEthTxAttempt(t, etx.ID) + err := txStore.SaveReplacementInProgressAttempt(testutils.Context(t), oldAttempt, &newAttempt) + require.NoError(t, err) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Len(t, etx.TxAttempts, 1) + require.Equal(t, etx.TxAttempts[0].Hash, newAttempt.Hash) + }) +} + +func TestORM_FindNextUnstartedTransactionFromAddress(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("cannot find unstarted tx", func(t *testing.T) { + mustInsertInProgressEthTxWithAttempt(t, txStore, 13, fromAddress) + + resultEtx := new(txmgr.Tx) + err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), resultEtx, fromAddress, ethClient.ConfiguredChainID()) + assert.ErrorIs(t, err, sql.ErrNoRows) + }) + + t.Run("finds unstarted tx", func(t *testing.T) { + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + resultEtx := new(txmgr.Tx) + err := txStore.FindNextUnstartedTransactionFromAddress(testutils.Context(t), resultEtx, fromAddress, ethClient.ConfiguredChainID()) + require.NoError(t, err) + }) +} + +func TestORM_UpdateTxFatalError(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("update successful", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 13, fromAddress) + etxPretendError := null.StringFrom("no more toilet paper") + etx.Error = etxPretendError + + err := txStore.UpdateTxFatalError(testutils.Context(t), &etx) + require.NoError(t, err) + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Len(t, etx.TxAttempts, 0) + assert.Equal(t, txmgrcommon.TxFatalError, etx.State) + }) +} + +func TestORM_UpdateTxAttemptInProgressToBroadcast(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("update successful", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 13, fromAddress) + attempt := etx.TxAttempts[0] + require.Equal(t, txmgrtypes.TxAttemptInProgress, attempt.State) + + time1 := time.Now() + i := int16(0) + etx.BroadcastAt = &time1 + etx.InitialBroadcastAt = &time1 + err := txStore.UpdateTxAttemptInProgressToBroadcast(testutils.Context(t), &etx, attempt, txmgrtypes.TxAttemptBroadcast) + require.NoError(t, err) + // Increment sequence + i++ + + attemptResult, err := txStore.FindTxAttempt(attempt.Hash) + require.NoError(t, err) + require.Equal(t, attempt.Hash, attemptResult.Hash) + assert.Equal(t, txmgrtypes.TxAttemptBroadcast, attemptResult.State) + assert.Equal(t, int16(1), i) + }) +} + +func TestORM_UpdateTxUnstartedToInProgress(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + q := pg.NewQ(db, logger.Test(t), cfg.Database()) + nonce := evmtypes.Nonce(123) + + t.Run("update successful", func(t *testing.T) { + etx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + etx.Sequence = &nonce + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + + err := txStore.UpdateTxUnstartedToInProgress(testutils.Context(t), &etx, &attempt) + require.NoError(t, err) + + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + assert.Equal(t, txmgrcommon.TxInProgress, etx.State) + assert.Len(t, etx.TxAttempts, 1) + }) + + t.Run("update fails because tx is removed", func(t *testing.T) { + etx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + etx.Sequence = &nonce + + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + + err := q.ExecQ("DELETE FROM evm.txes WHERE id = $1", etx.ID) + require.NoError(t, err) + + err = txStore.UpdateTxUnstartedToInProgress(testutils.Context(t), &etx, &attempt) + require.ErrorContains(t, err, "tx removed") + }) + + db = pgtest.NewSqlxDB(t) + cfg = newTestChainScopedConfig(t) + txStore = cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore = cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress = cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + q = pg.NewQ(db, logger.Test(t), cfg.Database()) + + t.Run("update replaces abandoned tx with same hash", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, nonce, fromAddress) + require.Len(t, etx.TxAttempts, 1) + + zero := commonconfig.MustNewDuration(time.Duration(0)) + evmCfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Chain.Transactions.ReaperInterval = zero + c.EVM[0].Chain.Transactions.ReaperThreshold = zero + c.EVM[0].Chain.Transactions.ResendAfterThreshold = zero + }) + + ccfg := evmtest.NewChainScopedConfig(t, evmCfg) + evmTxmCfg := txmgr.NewEvmTxmConfig(ccfg.EVM()) + ec := evmtest.NewEthClientMockWithDefaultChain(t) + txMgr := txmgr.NewEvmTxm(ec.ConfiguredChainID(), evmTxmCfg, ccfg.EVM().Transactions(), nil, logger.Test(t), nil, nil, + nil, txStore, nil, nil, nil, nil, nil) + err := txMgr.XXXTestAbandon(fromAddress) // mark transaction as abandoned + require.NoError(t, err) + + etx2 := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + etx2.Sequence = &nonce + attempt2 := cltest.NewLegacyEthTxAttempt(t, etx2.ID) + attempt2.Hash = etx.TxAttempts[0].Hash + + // Even though this will initially fail due to idx_eth_tx_attempts_hash constraint, because the conflicting tx has been abandoned + // it should succeed after removing the abandoned attempt and retrying the insert + err = txStore.UpdateTxUnstartedToInProgress(testutils.Context(t), &etx2, &attempt2) + require.NoError(t, err) + }) + + _, fromAddress = cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + // Same flow as previous test, but without calling txMgr.Abandon() + t.Run("duplicate tx hash disallowed in tx_eth_attempts", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, nonce, fromAddress) + require.Len(t, etx.TxAttempts, 1) + + etx.State = txmgrcommon.TxUnstarted + + // Should fail due to idx_eth_tx_attempt_hash constraint + err := txStore.UpdateTxUnstartedToInProgress(testutils.Context(t), &etx, &etx.TxAttempts[0]) + assert.ErrorContains(t, err, "idx_eth_tx_attempts_hash") + txStore = cltest.NewTestTxStore(t, db, cfg.Database()) // current txStore is poisened now, next test will need fresh one + }) +} + +func TestORM_GetTxInProgress(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("gets 0 in progress eth transaction", func(t *testing.T) { + etxResult, err := txStore.GetTxInProgress(testutils.Context(t), fromAddress) + require.NoError(t, err) + require.Nil(t, etxResult) + }) + + t.Run("get 1 in progress eth transaction", func(t *testing.T) { + etx := mustInsertInProgressEthTxWithAttempt(t, txStore, 123, fromAddress) + + etxResult, err := txStore.GetTxInProgress(testutils.Context(t), fromAddress) + require.NoError(t, err) + assert.Equal(t, etxResult.ID, etx.ID) + }) +} + +func TestORM_GetNonFatalTransactions(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("gets 0 non finalized eth transaction", func(t *testing.T) { + txes, err := txStore.GetNonFatalTransactions(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.Empty(t, txes) + }) + + t.Run("get in progress, unstarted, and unconfirmed eth transactions", func(t *testing.T) { + inProgressTx := mustInsertInProgressEthTxWithAttempt(t, txStore, 123, fromAddress) + unstartedTx := mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, ethClient.ConfiguredChainID()) + + txes, err := txStore.GetNonFatalTransactions(testutils.Context(t), ethClient.ConfiguredChainID()) + require.NoError(t, err) + + for _, tx := range txes { + require.True(t, tx.ID == inProgressTx.ID || tx.ID == unstartedTx.ID) + } + }) +} + +func TestORM_GetTxByID(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("no transaction", func(t *testing.T) { + tx, err := txStore.GetTxByID(testutils.Context(t), int64(0)) + require.NoError(t, err) + require.Nil(t, tx) + }) + + t.Run("get transaction by ID", func(t *testing.T) { + insertedTx := mustInsertInProgressEthTxWithAttempt(t, txStore, 123, fromAddress) + tx, err := txStore.GetTxByID(testutils.Context(t), insertedTx.ID) + require.NoError(t, err) + require.NotNil(t, tx) + }) +} + +func TestORM_GetFatalTransactions(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("gets 0 fatal eth transactions", func(t *testing.T) { + txes, err := txStore.GetFatalTransactions(testutils.Context(t)) + require.NoError(t, err) + require.Empty(t, txes) + }) + + t.Run("get fatal transactions", func(t *testing.T) { + fatalTx := mustInsertFatalErrorEthTx(t, txStore, fromAddress) + txes, err := txStore.GetFatalTransactions(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, txes[0].ID, fatalTx.ID) + }) +} + +func TestORM_HasInProgressTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("no in progress eth transaction", func(t *testing.T) { + exists, err := txStore.HasInProgressTransaction(testutils.Context(t), fromAddress, ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.False(t, exists) + }) + + t.Run("has in progress eth transaction", func(t *testing.T) { + mustInsertInProgressEthTxWithAttempt(t, txStore, 123, fromAddress) + + exists, err := txStore.HasInProgressTransaction(testutils.Context(t), fromAddress, ethClient.ConfiguredChainID()) + require.NoError(t, err) + require.True(t, exists) + }) +} + +func TestORM_CountUnconfirmedTransactions(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, otherAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, otherAddress) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress) + + count, err := txStore.CountUnconfirmedTransactions(testutils.Context(t), fromAddress, &cltest.FixtureChainID) + require.NoError(t, err) + assert.Equal(t, int(count), 3) +} + +func TestORM_CountTransactionsByState(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress1 := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress3 := cltest.MustInsertRandomKey(t, ethKeyStore) + + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress1) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress2) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress3) + + count, err := txStore.CountTransactionsByState(testutils.Context(t), txmgrcommon.TxUnconfirmed, &cltest.FixtureChainID) + require.NoError(t, err) + assert.Equal(t, int(count), 3) +} + +func TestORM_CountUnstartedTransactions(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, otherAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID) + mustCreateUnstartedGeneratedTx(t, txStore, otherAddress, &cltest.FixtureChainID) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress) + + count, err := txStore.CountUnstartedTransactions(testutils.Context(t), fromAddress, &cltest.FixtureChainID) + require.NoError(t, err) + assert.Equal(t, int(count), 2) +} + +func TestORM_CheckTxQueueCapacity(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, otherAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + toAddress := testutils.NewAddress() + encodedPayload := []byte{1, 2, 3} + feeLimit := uint32(1000000000) + value := big.Int(assets.NewEthValue(142)) + var maxUnconfirmedTransactions uint64 = 2 + + t.Run("with no eth_txes returns nil", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + // deliberately one extra to exceed limit + for i := 0; i <= int(maxUnconfirmedTransactions); i++ { + mustCreateUnstartedTx(t, txStore, otherAddress, toAddress, encodedPayload, feeLimit, value, &cltest.FixtureChainID) + } + + t.Run("with eth_txes from another address returns nil", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + for i := 0; i <= int(maxUnconfirmedTransactions); i++ { + mustInsertFatalErrorEthTx(t, txStore, otherAddress) + } + + t.Run("ignores fatally_errored transactions", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + var n int64 + mustInsertInProgressEthTxWithAttempt(t, txStore, evmtypes.Nonce(n), fromAddress) + n++ + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, n, fromAddress) + n++ + + t.Run("unconfirmed and in_progress transactions do not count", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, 1, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + // deliberately one extra to exceed limit + for i := 0; i <= int(maxUnconfirmedTransactions); i++ { + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, n, 42, fromAddress) + n++ + } + + t.Run("with many confirmed eth_txes from the same address returns nil", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + for i := 0; i < int(maxUnconfirmedTransactions)-1; i++ { + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, feeLimit, value, &cltest.FixtureChainID) + } + + t.Run("with fewer unstarted eth_txes than limit returns nil", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.NoError(t, err) + }) + + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, feeLimit, value, &cltest.FixtureChainID) + + t.Run("with equal or more unstarted eth_txes than limit returns error", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (2/%d). WARNING: Hitting EVM.Transactions.MaxQueued", maxUnconfirmedTransactions)) + + mustCreateUnstartedTx(t, txStore, fromAddress, toAddress, encodedPayload, feeLimit, value, &cltest.FixtureChainID) + err = txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, &cltest.FixtureChainID) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("cannot create transaction; too many unstarted transactions in the queue (3/%d). WARNING: Hitting EVM.Transactions.MaxQueued", maxUnconfirmedTransactions)) + }) + + t.Run("with different chain ID ignores txes", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, maxUnconfirmedTransactions, big.NewInt(42)) + require.NoError(t, err) + }) + + t.Run("disables check with 0 limit", func(t *testing.T) { + err := txStore.CheckTxQueueCapacity(testutils.Context(t), fromAddress, 0, &cltest.FixtureChainID) + require.NoError(t, err) + }) +} + +func TestORM_CreateTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := newTxStore(t, db, cfg.Database()) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + + _, fromAddress := cltest.MustInsertRandomKey(t, kst.Eth()) + toAddress := testutils.NewAddress() + gasLimit := uint32(1000) + payload := []byte{1, 2, 3} + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + t.Run("with queue under capacity inserts eth_tx", func(t *testing.T) { + subject := uuid.New() + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true}) + etx, err := txStore.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }, ethClient.ConfiguredChainID()) + assert.NoError(t, err) + + assert.Greater(t, etx.ID, int64(0)) + assert.Equal(t, etx.State, txmgrcommon.TxUnstarted) + assert.Equal(t, gasLimit, etx.FeeLimit) + assert.Equal(t, fromAddress, etx.FromAddress) + assert.Equal(t, toAddress, etx.ToAddress) + assert.Equal(t, payload, etx.EncodedPayload) + assert.Equal(t, big.Int(assets.NewEthValue(0)), etx.Value) + assert.Equal(t, subject, etx.Subject.UUID) + + cltest.AssertCount(t, db, "evm.txes", 1) + + var dbEthTx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEthTx, `SELECT * FROM evm.txes ORDER BY id ASC LIMIT 1`)) + + assert.Equal(t, dbEthTx.State, txmgrcommon.TxUnstarted) + assert.Equal(t, gasLimit, dbEthTx.GasLimit) + assert.Equal(t, fromAddress, dbEthTx.FromAddress) + assert.Equal(t, toAddress, dbEthTx.ToAddress) + assert.Equal(t, payload, dbEthTx.EncodedPayload) + assert.Equal(t, assets.NewEthValue(0), dbEthTx.Value) + assert.Equal(t, subject, dbEthTx.Subject.UUID) + }) + + t.Run("doesn't insert eth_tx if a matching tx already exists for that pipeline_task_run_id", func(t *testing.T) { + id := uuid.New() + txRequest := txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + } + tx1, err := txStore.CreateTransaction(testutils.Context(t), txRequest, ethClient.ConfiguredChainID()) + assert.NoError(t, err) + + tx2, err := txStore.CreateTransaction(testutils.Context(t), txRequest, ethClient.ConfiguredChainID()) + assert.NoError(t, err) + + assert.Equal(t, tx1.GetID(), tx2.GetID()) + }) + + t.Run("sets signal callback flag", func(t *testing.T) { + subject := uuid.New() + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true}) + etx, err := txStore.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + SignalCallback: true, + }, ethClient.ConfiguredChainID()) + assert.NoError(t, err) + + assert.Greater(t, etx.ID, int64(0)) + assert.Equal(t, fromAddress, etx.FromAddress) + assert.Equal(t, true, etx.SignalCallback) + + cltest.AssertCount(t, db, "evm.txes", 3) + + var dbEthTx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEthTx, `SELECT * FROM evm.txes ORDER BY id DESC LIMIT 1`)) + + assert.Equal(t, fromAddress, dbEthTx.FromAddress) + assert.Equal(t, true, dbEthTx.SignalCallback) + }) +} + +func TestORM_PruneUnstartedTxQueue(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := txmgr.NewTxStore(db, logger.Test(t), cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + evmtest.NewEthClientMockWithDefaultChain(t) + _, fromAddress := cltest.MustInsertRandomKeyReturningState(t, ethKeyStore) + + t.Run("does not prune if queue has not exceeded capacity-1", func(t *testing.T) { + subject1 := uuid.New() + strategy1 := txmgrcommon.NewDropOldestStrategy(subject1, uint32(5), cfg.Database().DefaultQueryTimeout()) + for i := 0; i < 5; i++ { + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, txRequestWithStrategy(strategy1)) + } + AssertCountPerSubject(t, txStore, int64(4), subject1) + }) + + t.Run("prunes if queue has exceeded capacity-1", func(t *testing.T) { + subject2 := uuid.New() + strategy2 := txmgrcommon.NewDropOldestStrategy(subject2, uint32(3), cfg.Database().DefaultQueryTimeout()) + for i := 0; i < 5; i++ { + mustCreateUnstartedGeneratedTx(t, txStore, fromAddress, &cltest.FixtureChainID, txRequestWithStrategy(strategy2)) + } + AssertCountPerSubject(t, txStore, int64(2), subject2) + }) +} + +func AssertCountPerSubject(t *testing.T, txStore txmgr.TestEvmTxStore, expected int64, subject uuid.UUID) { + t.Helper() + count, err := txStore.CountTxesByStateAndSubject(testutils.Context(t), "unstarted", subject) + require.NoError(t, err) + require.Equal(t, int(expected), count) +} diff --git a/core/chains/evm/txmgr/mocks/config.go b/core/chains/evm/txmgr/mocks/config.go new file mode 100644 index 00000000..60aea3ee --- /dev/null +++ b/core/chains/evm/txmgr/mocks/config.go @@ -0,0 +1,117 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + config "github.com/goplugin/pluginv3.0/v2/common/config" + mock "github.com/stretchr/testify/mock" +) + +// Config is an autogenerated mock type for the ChainConfig type +type Config struct { + mock.Mock +} + +// ChainType provides a mock function with given fields: +func (_m *Config) ChainType() config.ChainType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainType") + } + + var r0 config.ChainType + if rf, ok := ret.Get(0).(func() config.ChainType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(config.ChainType) + } + + return r0 +} + +// FinalityDepth provides a mock function with given fields: +func (_m *Config) FinalityDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// FinalityTagEnabled provides a mock function with given fields: +func (_m *Config) FinalityTagEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityTagEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NonceAutoSync provides a mock function with given fields: +func (_m *Config) NonceAutoSync() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NonceAutoSync") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// RPCDefaultBatchSize provides a mock function with given fields: +func (_m *Config) RPCDefaultBatchSize() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RPCDefaultBatchSize") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/txmgr/mocks/evm_tx_store.go b/core/chains/evm/txmgr/mocks/evm_tx_store.go new file mode 100644 index 00000000..2f4adf0b --- /dev/null +++ b/core/chains/evm/txmgr/mocks/evm_tx_store.go @@ -0,0 +1,1483 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + gas "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + + mock "github.com/stretchr/testify/mock" + + null "gopkg.in/guregu/null.v4" + + time "time" + + types "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + + uuid "github.com/google/uuid" +) + +// EvmTxStore is an autogenerated mock type for the EvmTxStore type +type EvmTxStore struct { + mock.Mock +} + +// Abandon provides a mock function with given fields: ctx, id, addr +func (_m *EvmTxStore) Abandon(ctx context.Context, id *big.Int, addr common.Address) error { + ret := _m.Called(ctx, id, addr) + + if len(ret) == 0 { + panic("no return value specified for Abandon") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int, common.Address) error); ok { + r0 = rf(ctx, id, addr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CheckTxQueueCapacity provides a mock function with given fields: ctx, fromAddress, maxQueuedTransactions, chainID +func (_m *EvmTxStore) CheckTxQueueCapacity(ctx context.Context, fromAddress common.Address, maxQueuedTransactions uint64, chainID *big.Int) error { + ret := _m.Called(ctx, fromAddress, maxQueuedTransactions, chainID) + + if len(ret) == 0 { + panic("no return value specified for CheckTxQueueCapacity") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, uint64, *big.Int) error); ok { + r0 = rf(ctx, fromAddress, maxQueuedTransactions, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *EvmTxStore) Close() { + _m.Called() +} + +// CountTransactionsByState provides a mock function with given fields: ctx, state, chainID +func (_m *EvmTxStore) CountTransactionsByState(ctx context.Context, state types.TxState, chainID *big.Int) (uint32, error) { + ret := _m.Called(ctx, state, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountTransactionsByState") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxState, *big.Int) (uint32, error)); ok { + return rf(ctx, state, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, types.TxState, *big.Int) uint32); ok { + r0 = rf(ctx, state, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.TxState, *big.Int) error); ok { + r1 = rf(ctx, state, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountUnconfirmedTransactions provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *EvmTxStore) CountUnconfirmedTransactions(ctx context.Context, fromAddress common.Address, chainID *big.Int) (uint32, error) { + ret := _m.Called(ctx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountUnconfirmedTransactions") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (uint32, error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) uint32); ok { + r0 = rf(ctx, fromAddress, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountUnstartedTransactions provides a mock function with given fields: ctx, fromAddress, chainID +func (_m *EvmTxStore) CountUnstartedTransactions(ctx context.Context, fromAddress common.Address, chainID *big.Int) (uint32, error) { + ret := _m.Called(ctx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for CountUnstartedTransactions") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (uint32, error)); ok { + return rf(ctx, fromAddress, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) uint32); ok { + r0 = rf(ctx, fromAddress, chainID) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, fromAddress, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateTransaction provides a mock function with given fields: ctx, txRequest, chainID +func (_m *EvmTxStore) CreateTransaction(ctx context.Context, txRequest types.TxRequest[common.Address, common.Hash], chainID *big.Int) (types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, txRequest, chainID) + + if len(ret) == 0 { + panic("no return value specified for CreateTransaction") + } + + var r0 types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxRequest[common.Address, common.Hash], *big.Int) (types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, txRequest, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, types.TxRequest[common.Address, common.Hash], *big.Int) types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, txRequest, chainID) + } else { + r0 = ret.Get(0).(types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + + if rf, ok := ret.Get(1).(func(context.Context, types.TxRequest[common.Address, common.Hash], *big.Int) error); ok { + r1 = rf(ctx, txRequest, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteInProgressAttempt provides a mock function with given fields: ctx, attempt +func (_m *EvmTxStore) DeleteInProgressAttempt(ctx context.Context, attempt types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, attempt) + + if len(ret) == 0 { + panic("no return value specified for DeleteInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindEarliestUnconfirmedBroadcastTime provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) FindEarliestUnconfirmedBroadcastTime(ctx context.Context, chainID *big.Int) (null.Time, error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedBroadcastTime") + } + + var r0 null.Time + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (null.Time, error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) null.Time); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Get(0).(null.Time) + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindEarliestUnconfirmedTxAttemptBlock provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) FindEarliestUnconfirmedTxAttemptBlock(ctx context.Context, chainID *big.Int) (null.Int, error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindEarliestUnconfirmedTxAttemptBlock") + } + + var r0 null.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (null.Int, error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) null.Int); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Get(0).(null.Int) + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindLatestSequence provides a mock function with given fields: ctx, fromAddress, chainId +func (_m *EvmTxStore) FindLatestSequence(ctx context.Context, fromAddress common.Address, chainId *big.Int) (evmtypes.Nonce, error) { + ret := _m.Called(ctx, fromAddress, chainId) + + if len(ret) == 0 { + panic("no return value specified for FindLatestSequence") + } + + var r0 evmtypes.Nonce + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (evmtypes.Nonce, error)); ok { + return rf(ctx, fromAddress, chainId) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) evmtypes.Nonce); ok { + r0 = rf(ctx, fromAddress, chainId) + } else { + r0 = ret.Get(0).(evmtypes.Nonce) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, fromAddress, chainId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindNextUnstartedTransactionFromAddress provides a mock function with given fields: ctx, etx, fromAddress, chainID +func (_m *EvmTxStore) FindNextUnstartedTransactionFromAddress(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], fromAddress common.Address, chainID *big.Int) error { + ret := _m.Called(ctx, etx, fromAddress, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindNextUnstartedTransactionFromAddress") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], common.Address, *big.Int) error); ok { + r0 = rf(ctx, etx, fromAddress, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindTransactionsConfirmedInBlockRange provides a mock function with given fields: ctx, highBlockNumber, lowBlockNumber, chainID +func (_m *EvmTxStore) FindTransactionsConfirmedInBlockRange(ctx context.Context, highBlockNumber int64, lowBlockNumber int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, highBlockNumber, lowBlockNumber, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTransactionsConfirmedInBlockRange") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, *big.Int) error); ok { + r1 = rf(ctx, highBlockNumber, lowBlockNumber, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttempt provides a mock function with given fields: hash +func (_m *EvmTxStore) FindTxAttempt(hash common.Hash) (*types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(hash) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttempt") + } + + var r0 *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(hash) + } + if rf, ok := ret.Get(0).(func(common.Hash) *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptConfirmedByTxIDs provides a mock function with given fields: ids +func (_m *EvmTxStore) FindTxAttemptConfirmedByTxIDs(ids []int64) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptConfirmedByTxIDs") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsConfirmedMissingReceipt provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) FindTxAttemptsConfirmedMissingReceipt(ctx context.Context, chainID *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsConfirmedMissingReceipt") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsRequiringReceiptFetch provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) FindTxAttemptsRequiringReceiptFetch(ctx context.Context, chainID *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsRequiringReceiptFetch") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxAttemptsRequiringResend provides a mock function with given fields: ctx, olderThan, maxInFlightTransactions, chainID, address +func (_m *EvmTxStore) FindTxAttemptsRequiringResend(ctx context.Context, olderThan time.Time, maxInFlightTransactions uint32, chainID *big.Int, address common.Address) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, olderThan, maxInFlightTransactions, chainID, address) + + if len(ret) == 0 { + panic("no return value specified for FindTxAttemptsRequiringResend") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, uint32, *big.Int, common.Address) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } + if rf, ok := ret.Get(0).(func(context.Context, time.Time, uint32, *big.Int, common.Address) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, time.Time, uint32, *big.Int, common.Address) error); ok { + r1 = rf(ctx, olderThan, maxInFlightTransactions, chainID, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxByHash provides a mock function with given fields: hash +func (_m *EvmTxStore) FindTxByHash(hash common.Hash) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(hash) + + if len(ret) == 0 { + panic("no return value specified for FindTxByHash") + } + + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(hash) + } + if rf, ok := ret.Get(0).(func(common.Hash) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxWithAttempts provides a mock function with given fields: etxID +func (_m *EvmTxStore) FindTxWithAttempts(etxID int64) (types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(etxID) + + if len(ret) == 0 { + panic("no return value specified for FindTxWithAttempts") + } + + var r0 types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(int64) (types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(etxID) + } + if rf, ok := ret.Get(0).(func(int64) types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(etxID) + } else { + r0 = ret.Get(0).(types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(etxID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxWithIdempotencyKey provides a mock function with given fields: ctx, idempotencyKey, chainID +func (_m *EvmTxStore) FindTxWithIdempotencyKey(ctx context.Context, idempotencyKey string, chainID *big.Int) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, idempotencyKey, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxWithIdempotencyKey") + } + + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, *big.Int) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, idempotencyKey, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, *big.Int) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, idempotencyKey, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, *big.Int) error); ok { + r1 = rf(ctx, idempotencyKey, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxWithSequence provides a mock function with given fields: ctx, fromAddress, seq +func (_m *EvmTxStore) FindTxWithSequence(ctx context.Context, fromAddress common.Address, seq evmtypes.Nonce) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, fromAddress, seq) + + if len(ret) == 0 { + panic("no return value specified for FindTxWithSequence") + } + + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, evmtypes.Nonce) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, fromAddress, seq) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, evmtypes.Nonce) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, fromAddress, seq) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, evmtypes.Nonce) error); ok { + r1 = rf(ctx, fromAddress, seq) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesByMetaFieldAndStates provides a mock function with given fields: ctx, metaField, metaValue, states, chainID +func (_m *EvmTxStore) FindTxesByMetaFieldAndStates(ctx context.Context, metaField string, metaValue string, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, metaField, metaValue, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesByMetaFieldAndStates") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, metaField, metaValue, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, metaField, metaValue, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, []types.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, metaValue, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesPendingCallback provides a mock function with given fields: ctx, blockNum, chainID +func (_m *EvmTxStore) FindTxesPendingCallback(ctx context.Context, blockNum int64, chainID *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error) { + ret := _m.Called(ctx, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesPendingCallback") + } + + var r0 []types.ReceiptPlus[*evmtypes.Receipt] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) ([]types.ReceiptPlus[*evmtypes.Receipt], error)); ok { + return rf(ctx, blockNum, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) []types.ReceiptPlus[*evmtypes.Receipt]); ok { + r0 = rf(ctx, blockNum, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.ReceiptPlus[*evmtypes.Receipt]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, *big.Int) error); ok { + r1 = rf(ctx, blockNum, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithAttemptsAndReceiptsByIdsAndState provides a mock function with given fields: ctx, ids, states, chainID +func (_m *EvmTxStore) FindTxesWithAttemptsAndReceiptsByIdsAndState(ctx context.Context, ids []big.Int, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, ids, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithAttemptsAndReceiptsByIdsAndState") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, ids, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, []big.Int, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, ids, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []big.Int, []types.TxState, *big.Int) error); ok { + r1 = rf(ctx, ids, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByReceiptBlockNum provides a mock function with given fields: ctx, metaField, blockNum, chainID +func (_m *EvmTxStore) FindTxesWithMetaFieldByReceiptBlockNum(ctx context.Context, metaField string, blockNum int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, metaField, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByReceiptBlockNum") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, metaField, blockNum, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, int64, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, metaField, blockNum, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, int64, *big.Int) error); ok { + r1 = rf(ctx, metaField, blockNum, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxesWithMetaFieldByStates provides a mock function with given fields: ctx, metaField, states, chainID +func (_m *EvmTxStore) FindTxesWithMetaFieldByStates(ctx context.Context, metaField string, states []types.TxState, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, metaField, states, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxesWithMetaFieldByStates") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []types.TxState, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, metaField, states, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []types.TxState, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, metaField, states, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []types.TxState, *big.Int) error); ok { + r1 = rf(ctx, metaField, states, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxsRequiringGasBump provides a mock function with given fields: ctx, address, blockNum, gasBumpThreshold, depth, chainID +func (_m *EvmTxStore) FindTxsRequiringGasBump(ctx context.Context, address common.Address, blockNum int64, gasBumpThreshold int64, depth int64, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxsRequiringGasBump") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, int64, int64, int64, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, int64, int64, int64, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, int64, int64, int64, *big.Int) error); ok { + r1 = rf(ctx, address, blockNum, gasBumpThreshold, depth, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTxsRequiringResubmissionDueToInsufficientFunds provides a mock function with given fields: ctx, address, chainID +func (_m *EvmTxStore) FindTxsRequiringResubmissionDueToInsufficientFunds(ctx context.Context, address common.Address, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, address, chainID) + + if len(ret) == 0 { + panic("no return value specified for FindTxsRequiringResubmissionDueToInsufficientFunds") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, address, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, address, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, address, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetInProgressTxAttempts provides a mock function with given fields: ctx, address, chainID +func (_m *EvmTxStore) GetInProgressTxAttempts(ctx context.Context, address common.Address, chainID *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, address, chainID) + + if len(ret) == 0 { + panic("no return value specified for GetInProgressTxAttempts") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, address, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, address, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, address, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNonFatalTransactions provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) GetNonFatalTransactions(ctx context.Context, chainID *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for GetNonFatalTransactions") + } + + var r0 []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) ([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) []*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTxByID provides a mock function with given fields: ctx, id +func (_m *EvmTxStore) GetTxByID(ctx context.Context, id int64) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetTxByID") + } + + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, int64) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTxInProgress provides a mock function with given fields: ctx, fromAddress +func (_m *EvmTxStore) GetTxInProgress(ctx context.Context, fromAddress common.Address) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error) { + ret := _m.Called(ctx, fromAddress) + + if len(ret) == 0 { + panic("no return value specified for GetTxInProgress") + } + + var r0 *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], error)); ok { + return rf(ctx, fromAddress) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(ctx, fromAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, fromAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasInProgressTransaction provides a mock function with given fields: ctx, account, chainID +func (_m *EvmTxStore) HasInProgressTransaction(ctx context.Context, account common.Address, chainID *big.Int) (bool, error) { + ret := _m.Called(ctx, account, chainID) + + if len(ret) == 0 { + panic("no return value specified for HasInProgressTransaction") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) (bool, error)); ok { + return rf(ctx, account, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) bool); ok { + r0 = rf(ctx, account, chainID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, account, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsTxFinalized provides a mock function with given fields: ctx, blockHeight, txID, chainID +func (_m *EvmTxStore) IsTxFinalized(ctx context.Context, blockHeight int64, txID int64, chainID *big.Int) (bool, error) { + ret := _m.Called(ctx, blockHeight, txID, chainID) + + if len(ret) == 0 { + panic("no return value specified for IsTxFinalized") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) (bool, error)); ok { + return rf(ctx, blockHeight, txID, chainID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, int64, *big.Int) bool); ok { + r0 = rf(ctx, blockHeight, txID, chainID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, int64, *big.Int) error); ok { + r1 = rf(ctx, blockHeight, txID, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadTxAttempts provides a mock function with given fields: ctx, etx +func (_m *EvmTxStore) LoadTxAttempts(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, etx) + + if len(ret) == 0 { + panic("no return value specified for LoadTxAttempts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, etx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MarkAllConfirmedMissingReceipt provides a mock function with given fields: ctx, chainID +func (_m *EvmTxStore) MarkAllConfirmedMissingReceipt(ctx context.Context, chainID *big.Int) error { + ret := _m.Called(ctx, chainID) + + if len(ret) == 0 { + panic("no return value specified for MarkAllConfirmedMissingReceipt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) error); ok { + r0 = rf(ctx, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MarkOldTxesMissingReceiptAsErrored provides a mock function with given fields: ctx, blockNum, finalityDepth, chainID +func (_m *EvmTxStore) MarkOldTxesMissingReceiptAsErrored(ctx context.Context, blockNum int64, finalityDepth uint32, chainID *big.Int) error { + ret := _m.Called(ctx, blockNum, finalityDepth, chainID) + + if len(ret) == 0 { + panic("no return value specified for MarkOldTxesMissingReceiptAsErrored") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, uint32, *big.Int) error); ok { + r0 = rf(ctx, blockNum, finalityDepth, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PreloadTxes provides a mock function with given fields: ctx, attempts +func (_m *EvmTxStore) PreloadTxes(ctx context.Context, attempts []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, attempts) + + if len(ret) == 0 { + panic("no return value specified for PreloadTxes") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, attempts) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PruneUnstartedTxQueue provides a mock function with given fields: ctx, queueSize, subject +func (_m *EvmTxStore) PruneUnstartedTxQueue(ctx context.Context, queueSize uint32, subject uuid.UUID) ([]int64, error) { + ret := _m.Called(ctx, queueSize, subject) + + if len(ret) == 0 { + panic("no return value specified for PruneUnstartedTxQueue") + } + + var r0 []int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, uuid.UUID) ([]int64, error)); ok { + return rf(ctx, queueSize, subject) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, uuid.UUID) []int64); ok { + r0 = rf(ctx, queueSize, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, uuid.UUID) error); ok { + r1 = rf(ctx, queueSize, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReapTxHistory provides a mock function with given fields: ctx, minBlockNumberToKeep, timeThreshold, chainID +func (_m *EvmTxStore) ReapTxHistory(ctx context.Context, minBlockNumberToKeep int64, timeThreshold time.Time, chainID *big.Int) error { + ret := _m.Called(ctx, minBlockNumberToKeep, timeThreshold, chainID) + + if len(ret) == 0 { + panic("no return value specified for ReapTxHistory") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time, *big.Int) error); ok { + r0 = rf(ctx, minBlockNumberToKeep, timeThreshold, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveConfirmedMissingReceiptAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *EvmTxStore) SaveConfirmedMissingReceiptAttempt(ctx context.Context, timeout time.Duration, attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveConfirmedMissingReceiptAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveFetchedReceipts provides a mock function with given fields: ctx, receipts, chainID +func (_m *EvmTxStore) SaveFetchedReceipts(ctx context.Context, receipts []*evmtypes.Receipt, chainID *big.Int) error { + ret := _m.Called(ctx, receipts, chainID) + + if len(ret) == 0 { + panic("no return value specified for SaveFetchedReceipts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []*evmtypes.Receipt, *big.Int) error); ok { + r0 = rf(ctx, receipts, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveInProgressAttempt provides a mock function with given fields: ctx, attempt +func (_m *EvmTxStore) SaveInProgressAttempt(ctx context.Context, attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, attempt) + + if len(ret) == 0 { + panic("no return value specified for SaveInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveInsufficientFundsAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *EvmTxStore) SaveInsufficientFundsAttempt(ctx context.Context, timeout time.Duration, attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveInsufficientFundsAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveReplacementInProgressAttempt provides a mock function with given fields: ctx, oldAttempt, replacementAttempt +func (_m *EvmTxStore) SaveReplacementInProgressAttempt(ctx context.Context, oldAttempt types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], replacementAttempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, oldAttempt, replacementAttempt) + + if len(ret) == 0 { + panic("no return value specified for SaveReplacementInProgressAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, oldAttempt, replacementAttempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveSentAttempt provides a mock function with given fields: ctx, timeout, attempt, broadcastAt +func (_m *EvmTxStore) SaveSentAttempt(ctx context.Context, timeout time.Duration, attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], broadcastAt time.Time) error { + ret := _m.Called(ctx, timeout, attempt, broadcastAt) + + if len(ret) == 0 { + panic("no return value specified for SaveSentAttempt") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration, *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], time.Time) error); ok { + r0 = rf(ctx, timeout, attempt, broadcastAt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetBroadcastBeforeBlockNum provides a mock function with given fields: ctx, blockNum, chainID +func (_m *EvmTxStore) SetBroadcastBeforeBlockNum(ctx context.Context, blockNum int64, chainID *big.Int) error { + ret := _m.Called(ctx, blockNum, chainID) + + if len(ret) == 0 { + panic("no return value specified for SetBroadcastBeforeBlockNum") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, *big.Int) error); ok { + r0 = rf(ctx, blockNum, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Transactions provides a mock function with given fields: offset, limit +func (_m *EvmTxStore) Transactions(offset int, limit int) ([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for Transactions") + } + + var r0 []types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// TransactionsWithAttempts provides a mock function with given fields: offset, limit +func (_m *EvmTxStore) TransactionsWithAttempts(offset int, limit int) ([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for TransactionsWithAttempts") + } + + var r0 []types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// TxAttempts provides a mock function with given fields: offset, limit +func (_m *EvmTxStore) TxAttempts(offset int, limit int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for TxAttempts") + } + + var r0 []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UpdateBroadcastAts provides a mock function with given fields: ctx, now, etxIDs +func (_m *EvmTxStore) UpdateBroadcastAts(ctx context.Context, now time.Time, etxIDs []int64) error { + ret := _m.Called(ctx, now, etxIDs) + + if len(ret) == 0 { + panic("no return value specified for UpdateBroadcastAts") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, []int64) error); ok { + r0 = rf(ctx, now, etxIDs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxAttemptInProgressToBroadcast provides a mock function with given fields: ctx, etx, attempt, NewAttemptState +func (_m *EvmTxStore) UpdateTxAttemptInProgressToBroadcast(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], attempt types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], NewAttemptState types.TxAttemptState) error { + ret := _m.Called(ctx, etx, attempt, NewAttemptState) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxAttemptInProgressToBroadcast") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], types.TxAttemptState) error); ok { + r0 = rf(ctx, etx, attempt, NewAttemptState) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxCallbackCompleted provides a mock function with given fields: ctx, pipelineTaskRunRid, chainId +func (_m *EvmTxStore) UpdateTxCallbackCompleted(ctx context.Context, pipelineTaskRunRid uuid.UUID, chainId *big.Int) error { + ret := _m.Called(ctx, pipelineTaskRunRid, chainId) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxCallbackCompleted") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, *big.Int) error); ok { + r0 = rf(ctx, pipelineTaskRunRid, chainId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxFatalError provides a mock function with given fields: ctx, etx +func (_m *EvmTxStore) UpdateTxFatalError(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, etx) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxFatalError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, etx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxForRebroadcast provides a mock function with given fields: ctx, etx, etxAttempt +func (_m *EvmTxStore) UpdateTxForRebroadcast(ctx context.Context, etx types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], etxAttempt types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, etx, etxAttempt) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxForRebroadcast") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, etx, etxAttempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxUnstartedToInProgress provides a mock function with given fields: ctx, etx, attempt +func (_m *EvmTxStore) UpdateTxUnstartedToInProgress(ctx context.Context, etx *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], attempt *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error { + ret := _m.Called(ctx, etx, attempt) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxUnstartedToInProgress") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee], *types.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) error); ok { + r0 = rf(ctx, etx, attempt) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateTxsUnconfirmed provides a mock function with given fields: ctx, ids +func (_m *EvmTxStore) UpdateTxsUnconfirmed(ctx context.Context, ids []int64) error { + ret := _m.Called(ctx, ids) + + if len(ret) == 0 { + panic("no return value specified for UpdateTxsUnconfirmed") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []int64) error); ok { + r0 = rf(ctx, ids) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewEvmTxStore creates a new instance of EvmTxStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEvmTxStore(t interface { + mock.TestingT + Cleanup(func()) +}) *EvmTxStore { + mock := &EvmTxStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/evm/txmgr/mocks/utils.go b/core/chains/evm/txmgr/mocks/utils.go new file mode 100644 index 00000000..8a09275d --- /dev/null +++ b/core/chains/evm/txmgr/mocks/utils.go @@ -0,0 +1,18 @@ +package mocks + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + + txmgrmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type MockEvmTxManager = txmgrmocks.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + +func NewMockEvmTxManager(t *testing.T) *MockEvmTxManager { + return txmgrmocks.NewTxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee](t) +} diff --git a/core/chains/evm/txmgr/models.go b/core/chains/evm/txmgr/models.go new file mode 100644 index 00000000..1706695f --- /dev/null +++ b/core/chains/evm/txmgr/models.go @@ -0,0 +1,74 @@ +package txmgr + +import ( + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +// Type aliases for EVM +type ( + Confirmer = txmgr.Confirmer[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + Broadcaster = txmgr.Broadcaster[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + Resender = txmgr.Resender[*big.Int, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + Tracker = txmgr.Tracker[*big.Int, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + Reaper = txmgr.Reaper[*big.Int] + TxStore = txmgrtypes.TxStore[common.Address, *big.Int, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + TransactionStore = txmgrtypes.TransactionStore[common.Address, *big.Int, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + KeyStore = txmgrtypes.KeyStore[common.Address, *big.Int, evmtypes.Nonce] + TxAttemptBuilder = txmgrtypes.TxAttemptBuilder[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + NonceSyncer = txmgr.SequenceSyncer[common.Address, common.Hash, common.Hash, evmtypes.Nonce] + TransmitCheckerFactory = txmgr.TransmitCheckerFactory[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + Txm = txmgr.Txm[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + TxManager = txmgr.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + NullTxManager = txmgr.NullTxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + FwdMgr = txmgrtypes.ForwarderManager[common.Address] + TxRequest = txmgrtypes.TxRequest[common.Address, common.Hash] + Tx = txmgrtypes.Tx[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + TxMeta = txmgrtypes.TxMeta[common.Address, common.Hash] + TxAttempt = txmgrtypes.TxAttempt[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + Receipt = dbReceipt // EvmReceipt is the exported DB table model for receipts + ReceiptPlus = txmgrtypes.ReceiptPlus[*evmtypes.Receipt] + TxmClient = txmgrtypes.TxmClient[*big.Int, common.Address, common.Hash, common.Hash, *evmtypes.Receipt, evmtypes.Nonce, gas.EvmFee] + TransactionClient = txmgrtypes.TransactionClient[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + ChainReceipt = txmgrtypes.ChainReceipt[common.Hash, common.Hash] +) + +var _ KeyStore = (keystore.Eth)(nil) // check interface in txmgr to avoid circular import + +const ( + // TransmitCheckerTypeSimulate is a checker that simulates the transaction before executing on + // chain. + TransmitCheckerTypeSimulate = txmgrtypes.TransmitCheckerType("simulate") + + // TransmitCheckerTypeVRFV1 is a checker that will not submit VRF V1 fulfillment requests that + // have already been fulfilled. This could happen if the request was fulfilled by another node. + TransmitCheckerTypeVRFV1 = txmgrtypes.TransmitCheckerType("vrf_v1") + + // TransmitCheckerTypeVRFV2 is a checker that will not submit VRF V2 fulfillment requests that + // have already been fulfilled. This could happen if the request was fulfilled by another node. + TransmitCheckerTypeVRFV2 = txmgrtypes.TransmitCheckerType("vrf_v2") + + // TransmitCheckerTypeVRFV2Plus is a checker that will not submit VRF V2 plus fulfillment requests that + // have already been fulfilled. This could happen if the request was fulfilled by another node. + TransmitCheckerTypeVRFV2Plus = txmgrtypes.TransmitCheckerType("vrf_v2plus") +) + +// GetGethSignedTx decodes the SignedRawTx into a types.Transaction struct +func GetGethSignedTx(signedRawTx []byte) (*types.Transaction, error) { + s := rlp.NewStream(bytes.NewReader(signedRawTx), 0) + signedTx := new(types.Transaction) + if err := signedTx.DecodeRLP(s); err != nil { + return nil, err + } + return signedTx, nil +} diff --git a/core/chains/evm/txmgr/nonce_syncer.go b/core/chains/evm/txmgr/nonce_syncer.go new file mode 100644 index 00000000..82250072 --- /dev/null +++ b/core/chains/evm/txmgr/nonce_syncer.go @@ -0,0 +1,106 @@ +package txmgr + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/common/txmgr" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// NonceSyncer manages the delicate task of syncing the local nonce with the +// chain nonce in case of divergence. +// +// On startup, we check each key for the nonce value on chain and compare +// it to our local value. +// +// Usually the on-chain nonce will be the same as (or lower than) the +// highest sequence in the DB, in which case we do nothing. +// +// If we are restoring from a backup however, or another wallet has used the +// account, the chain nonce might be higher than our local one. In this +// scenario, we must fastforward the local nonce to match the chain nonce. +// +// The problem with doing this is that now Plugin does not have any +// ownership or control over potentially pending transactions with nonces +// between our local highest nonce and the chain nonce. If one of those +// transactions is pushed out of the mempool or re-org'd out of the chain, +// we run the risk of being stuck with a gap in the nonce sequence that +// will never be filled. +// +// The solution is to query the chain for our own transactions and take +// ownership of them by writing them to the database and letting the +// EthConfirmer handle them as it would any other transaction. +// +// This is not quite as straightforward as one might expect. We cannot +// query transactions from our account to infinite depth (geth does not +// support this). The best we can do is to query for all transactions sent +// within the past EVM.FinalityDepth blocks and find the ones sent by our +// address(es). +// +// This gives us re-org protection up to EVM.FinalityDepth deep in the +// worst case, which is in line with our other guarantees. +var _ txmgr.SequenceSyncer[common.Address, common.Hash, common.Hash, types.Nonce] = &nonceSyncerImpl{} + +type nonceSyncerImpl struct { + txStore EvmTxStore + client TxmClient + chainID *big.Int + logger logger.Logger +} + +// NewNonceSyncer returns a new syncer +func NewNonceSyncer( + txStore EvmTxStore, + lggr logger.Logger, + ethClient evmclient.Client, +) NonceSyncer { + lggr = logger.Named(lggr, "NonceSyncer") + return &nonceSyncerImpl{ + txStore: txStore, + client: NewEvmTxmClient(ethClient), + chainID: ethClient.ConfiguredChainID(), + logger: lggr, + } +} + +// SyncAll syncs nonces for all enabled keys in parallel +// +// This should only be called once, before the EthBroadcaster has started. +// Calling it later is not safe and could lead to races. +func (s nonceSyncerImpl) Sync(ctx context.Context, addr common.Address, localNonce types.Nonce) (nonce types.Nonce, err error) { + nonce, err = s.fastForwardNonceIfNecessary(ctx, addr, localNonce) + return nonce, errors.Wrap(err, "NonceSyncer#fastForwardNoncesIfNecessary failed") +} + +func (s nonceSyncerImpl) fastForwardNonceIfNecessary(ctx context.Context, address common.Address, localNonce types.Nonce) (types.Nonce, error) { + chainNonce, err := s.pendingNonceFromEthClient(ctx, address) + if err != nil { + return localNonce, errors.Wrap(err, "GetNextNonce failed to loadInitialNonceFromEthClient") + } + if chainNonce == 0 { + return localNonce, nil + } + if chainNonce <= localNonce { + return localNonce, nil + } + s.logger.Warnw(fmt.Sprintf("address %s has been used before, either by an external wallet or a different Plugin node. "+ + "Local nonce is %v but the on-chain nonce for this account was %v. "+ + "It's possible that this node was restored from a backup. If so, transactions sent by the previous node will NOT be re-org protected and in rare cases may need to be manually bumped/resubmitted. "+ + "Please note that using the plugin keys with an external wallet is NOT SUPPORTED and can lead to missed or stuck transactions. ", + address, localNonce, chainNonce), + "address", address.String(), "localNonce", localNonce, "chainNonce", chainNonce) + + return chainNonce, nil +} + +func (s nonceSyncerImpl) pendingNonceFromEthClient(ctx context.Context, account common.Address) (types.Nonce, error) { + nextNonce, err := s.client.PendingSequenceAt(ctx, account) + return nextNonce, errors.WithStack(err) +} diff --git a/core/chains/evm/txmgr/nonce_syncer_test.go b/core/chains/evm/txmgr/nonce_syncer_test.go new file mode 100644 index 00000000..f6d9fc48 --- /dev/null +++ b/core/chains/evm/txmgr/nonce_syncer_test.go @@ -0,0 +1,114 @@ +package txmgr_test + +import ( + "testing" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_NonceSyncer_Sync(t *testing.T) { + t.Parallel() + + t.Run("returns error if PendingNonceAt fails", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + ns := txmgr.NewNonceSyncer(txStore, logger.Test(t), ethClient) + + ethClient.On("PendingNonceAt", mock.Anything, from).Return(uint64(0), errors.New("something exploded")) + _, err := ns.Sync(testutils.Context(t), from, types.Nonce(0)) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + + cltest.AssertCount(t, db, "evm.txes", 0) + cltest.AssertCount(t, db, "evm.tx_attempts", 0) + }) + + t.Run("does nothing if chain nonce reflects local nonce", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + ns := txmgr.NewNonceSyncer(txStore, logger.Test(t), ethClient) + + ethClient.On("PendingNonceAt", mock.Anything, from).Return(uint64(0), nil) + + nonce, err := ns.Sync(testutils.Context(t), from, 0) + require.Equal(t, nonce.Int64(), int64(0)) + require.NoError(t, err) + + cltest.AssertCount(t, db, "evm.txes", 0) + cltest.AssertCount(t, db, "evm.tx_attempts", 0) + }) + + t.Run("does nothing if chain nonce is behind local nonce", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + _, fromAddress := cltest.RandomKey{Nonce: 32}.MustInsert(t, ks) + + ns := txmgr.NewNonceSyncer(txStore, logger.Test(t), ethClient) + + // Used to mock the chain nonce + ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(5), nil) + nonce, err := ns.Sync(testutils.Context(t), fromAddress, types.Nonce(32)) + require.Equal(t, nonce.Int64(), int64(32)) + require.NoError(t, err) + + cltest.AssertCount(t, db, "evm.txes", 0) + cltest.AssertCount(t, db, "evm.tx_attempts", 0) + }) + + t.Run("fast forwards if chain nonce is ahead of local nonce", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, key1 := cltest.MustInsertRandomKey(t, ethKeyStore) + _, key2 := cltest.RandomKey{Nonce: 32}.MustInsert(t, ethKeyStore) + + key1LocalNonce := types.Nonce(0) + key2LocalNonce := types.Nonce(32) + + ns := txmgr.NewNonceSyncer(txStore, logger.Test(t), ethClient) + + // Used to mock the chain nonce + ethClient.On("PendingNonceAt", mock.Anything, key1).Return(uint64(5), nil).Once() + ethClient.On("PendingNonceAt", mock.Anything, key2).Return(uint64(32), nil).Once() + + syncerNonce, err := ns.Sync(testutils.Context(t), key1, key1LocalNonce) + require.NoError(t, err) + require.Greater(t, syncerNonce, key1LocalNonce) + + syncerNonce, err = ns.Sync(testutils.Context(t), key2, key2LocalNonce) + require.NoError(t, err) + require.Equal(t, syncerNonce, key2LocalNonce) + }) +} diff --git a/core/chains/evm/txmgr/reaper_test.go b/core/chains/evm/txmgr/reaper_test.go new file mode 100644 index 00000000..01d32060 --- /dev/null +++ b/core/chains/evm/txmgr/reaper_test.go @@ -0,0 +1,145 @@ +package txmgr_test + +import ( + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + txmgrmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +func newReaperWithChainID(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], cfg txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig, cid *big.Int) *txmgr.Reaper { + return txmgr.NewEvmReaper(logger.Test(t), db, cfg, txConfig, cid) +} + +func newReaper(t *testing.T, db txmgrtypes.TxHistoryReaper[*big.Int], cfg txmgrtypes.ReaperChainConfig, txConfig txmgrtypes.ReaperTransactionsConfig) *txmgr.Reaper { + return newReaperWithChainID(t, db, cfg, txConfig, &cltest.FixtureChainID) +} + +type reaperConfig struct { + reaperInterval time.Duration + reaperThreshold time.Duration +} + +func (r *reaperConfig) ReaperInterval() time.Duration { + return r.reaperInterval +} + +func (r *reaperConfig) ReaperThreshold() time.Duration { + return r.reaperThreshold +} + +func TestReaper_ReapTxes(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + var nonce int64 + oneDayAgo := time.Now().Add(-24 * time.Hour) + + t.Run("with nothing in the database, doesn't error", func(t *testing.T) { + config := txmgrmocks.NewReaperConfig(t) + config.On("FinalityDepth").Return(uint32(10)) + + tc := &reaperConfig{reaperThreshold: 1 * time.Hour} + + r := newReaper(t, txStore, config, tc) + + err := r.ReapTxes(42) + assert.NoError(t, err) + }) + + // Confirmed in block number 5 + mustInsertConfirmedEthTxWithReceipt(t, txStore, from, nonce, 5) + + t.Run("skips if threshold=0", func(t *testing.T) { + config := txmgrmocks.NewReaperConfig(t) + + tc := &reaperConfig{reaperThreshold: 0 * time.Second} + + r := newReaper(t, txStore, config, tc) + + err := r.ReapTxes(42) + assert.NoError(t, err) + + cltest.AssertCount(t, db, "evm.txes", 1) + }) + + t.Run("doesn't touch ethtxes with different chain ID", func(t *testing.T) { + config := txmgrmocks.NewReaperConfig(t) + config.On("FinalityDepth").Return(uint32(10)) + + tc := &reaperConfig{reaperThreshold: 1 * time.Hour} + + r := newReaperWithChainID(t, txStore, config, tc, big.NewInt(42)) + + err := r.ReapTxes(42) + assert.NoError(t, err) + // Didn't delete because eth_tx has chain ID of 0 + cltest.AssertCount(t, db, "evm.txes", 1) + }) + + t.Run("deletes confirmed evm.txes that exceed the age threshold with at least EVM.FinalityDepth blocks above their receipt", func(t *testing.T) { + config := txmgrmocks.NewReaperConfig(t) + config.On("FinalityDepth").Return(uint32(10)) + + tc := &reaperConfig{reaperThreshold: 1 * time.Hour} + + r := newReaper(t, txStore, config, tc) + + err := r.ReapTxes(42) + assert.NoError(t, err) + // Didn't delete because eth_tx was not old enough + cltest.AssertCount(t, db, "evm.txes", 1) + + pgtest.MustExec(t, db, `UPDATE evm.txes SET created_at=$1`, oneDayAgo) + + err = r.ReapTxes(12) + assert.NoError(t, err) + // Didn't delete because eth_tx although old enough, was still within EVM.FinalityDepth of the current head + cltest.AssertCount(t, db, "evm.txes", 1) + + err = r.ReapTxes(42) + assert.NoError(t, err) + // Now it deleted because the eth_tx was past EVM.FinalityDepth + cltest.AssertCount(t, db, "evm.txes", 0) + }) + + mustInsertFatalErrorEthTx(t, txStore, from) + + t.Run("deletes errored evm.txes that exceed the age threshold", func(t *testing.T) { + config := txmgrmocks.NewReaperConfig(t) + config.On("FinalityDepth").Return(uint32(10)) + + tc := &reaperConfig{reaperThreshold: 1 * time.Hour} + + r := newReaper(t, txStore, config, tc) + + err := r.ReapTxes(42) + assert.NoError(t, err) + // Didn't delete because eth_tx was not old enough + cltest.AssertCount(t, db, "evm.txes", 1) + + require.NoError(t, utils.JustError(db.Exec(`UPDATE evm.txes SET created_at=$1`, oneDayAgo))) + + err = r.ReapTxes(42) + assert.NoError(t, err) + // Deleted because it is old enough now + cltest.AssertCount(t, db, "evm.txes", 0) + }) +} diff --git a/core/chains/evm/txmgr/resender_test.go b/core/chains/evm/txmgr/resender_test.go new file mode 100644 index 00000000..b14d9116 --- /dev/null +++ b/core/chains/evm/txmgr/resender_test.go @@ -0,0 +1,199 @@ +package txmgr_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func Test_EthResender_resendUnconfirmed(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logCfg := pgtest.NewQConfig(true) + lggr := logger.Test(t) + ethKeyStore := cltest.NewKeyStore(t, db, logCfg).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) {}) + ccfg := evmtest.NewChainScopedConfig(t, cfg) + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress3 := cltest.MustInsertRandomKey(t, ethKeyStore) + + txStore := cltest.NewTestTxStore(t, db, logCfg) + + originalBroadcastAt := time.Unix(1616509100, 0) + + txConfig := ccfg.EVM().Transactions() + var addr1TxesRawHex, addr2TxesRawHex, addr3TxesRawHex []string + // fewer than EvmMaxInFlightTransactions + for i := uint32(0); i < txConfig.MaxInFlight()/2; i++ { + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, int64(i), fromAddress, originalBroadcastAt) + addr1TxesRawHex = append(addr1TxesRawHex, hexutil.Encode(etx.TxAttempts[0].SignedRawTx)) + } + + // exactly EvmMaxInFlightTransactions + for i := uint32(0); i < txConfig.MaxInFlight(); i++ { + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, int64(i), fromAddress2, originalBroadcastAt) + addr2TxesRawHex = append(addr2TxesRawHex, hexutil.Encode(etx.TxAttempts[0].SignedRawTx)) + } + + // more than EvmMaxInFlightTransactions + for i := uint32(0); i < txConfig.MaxInFlight()*2; i++ { + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, int64(i), fromAddress3, originalBroadcastAt) + addr3TxesRawHex = append(addr3TxesRawHex, hexutil.Encode(etx.TxAttempts[0].SignedRawTx)) + } + + er := txmgr.NewEvmResender(lggr, txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTracker(txStore, ethKeyStore, big.NewInt(0), lggr), ethKeyStore, 100*time.Millisecond, ccfg.EVM(), ccfg.EVM().Transactions()) + + var resentHex = make(map[string]struct{}) + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(elems []rpc.BatchElem) bool { + for _, elem := range elems { + resentHex[elem.Args[0].(string)] = struct{}{} + } + assert.Len(t, elems, len(addr1TxesRawHex)+len(addr2TxesRawHex)+int(txConfig.MaxInFlight())) + // All addr1TxesRawHex should be included + for _, addr := range addr1TxesRawHex { + assert.Contains(t, resentHex, addr) + } + // All addr2TxesRawHex should be included + for _, addr := range addr2TxesRawHex { + assert.Contains(t, resentHex, addr) + } + // Up to limit EvmMaxInFlightTransactions addr3TxesRawHex should be included + for i, addr := range addr3TxesRawHex { + if i >= int(txConfig.MaxInFlight()) { + // Above limit EvmMaxInFlightTransactions addr3TxesRawHex should NOT be included + assert.NotContains(t, resentHex, addr) + } else { + assert.Contains(t, resentHex, addr) + } + } + return true + })).Run(func(args mock.Arguments) {}).Return(nil) + + err := er.XXXTestResendUnconfirmed() + require.NoError(t, err) +} + +func Test_EthResender_alertUnconfirmed(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + logCfg := pgtest.NewQConfig(true) + lggr, o := logger.TestObserved(t, zapcore.DebugLevel) + ethKeyStore := cltest.NewKeyStore(t, db, logCfg).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + // Set this to the smallest non-zero value possible for the attempt to be eligible for resend + delay := commonconfig.MustNewDuration(1 * time.Nanosecond) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0] = &toml.EVMConfig{ + Chain: toml.Defaults(ubig.New(big.NewInt(0)), &toml.Chain{ + Transactions: toml.Transactions{ResendAfterThreshold: delay}, + }), + } + }) + ccfg := evmtest.NewChainScopedConfig(t, cfg) + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + txStore := cltest.NewTestTxStore(t, db, logCfg) + + originalBroadcastAt := time.Unix(1616509100, 0) + er := txmgr.NewEvmResender(lggr, txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTracker(txStore, ethKeyStore, big.NewInt(0), lggr), ethKeyStore, 100*time.Millisecond, ccfg.EVM(), ccfg.EVM().Transactions()) + + t.Run("alerts only once for unconfirmed transaction attempt within the unconfirmedTxAlertDelay duration", func(t *testing.T) { + _ = cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, int64(1), fromAddress, originalBroadcastAt) + + ethClient.On("BatchCallContextAll", mock.Anything, mock.Anything).Return(nil) + + // Try to resend the same unconfirmed attempt twice within the unconfirmedTxAlertDelay to only receive one alert + err1 := er.XXXTestResendUnconfirmed() + require.NoError(t, err1) + + err2 := er.XXXTestResendUnconfirmed() + require.NoError(t, err2) + testutils.WaitForLogMessageCount(t, o, "TxAttempt has been unconfirmed for more than max duration", 1) + }) +} + +func Test_EthResender_Start(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + // This can be anything as long as it isn't zero + c.EVM[0].Transactions.ResendAfterThreshold = commonconfig.MustNewDuration(42 * time.Hour) + // Set batch size low to test batching + c.EVM[0].RPCDefaultBatchSize = ptr[uint32](1) + }) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + ccfg := evmtest.NewChainScopedConfig(t, cfg) + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + lggr := logger.Test(t) + + t.Run("resends transactions that have been languishing unconfirmed for too long", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + er := txmgr.NewEvmResender(lggr, txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTracker(txStore, ethKeyStore, big.NewInt(0), lggr), ethKeyStore, 100*time.Millisecond, ccfg.EVM(), ccfg.EVM().Transactions()) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress, originalBroadcastAt) + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress, originalBroadcastAt) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress, time.Now().Add(1*time.Hour)) + + // First batch of 1 + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + b[0].Method == "eth_sendRawTransaction" && b[0].Args[0] == hexutil.Encode(etx.TxAttempts[0].SignedRawTx) + })).Return(nil) + // Second batch of 1 + ethClient.On("BatchCallContextAll", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && + b[0].Method == "eth_sendRawTransaction" && b[0].Args[0] == hexutil.Encode(etx2.TxAttempts[0].SignedRawTx) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // It should update BroadcastAt even if there is an error here + elems[0].Error = errors.New("kaboom") + }) + + func() { + er.Start() + defer er.Stop() + + cltest.EventuallyExpectationsMet(t, ethClient, 5*time.Second, time.Second) + }() + + var dbEtx txmgr.DbEthTx + err := db.Get(&dbEtx, `SELECT * FROM evm.txes WHERE id = $1`, etx.ID) + require.NoError(t, err) + var dbEtx2 txmgr.DbEthTx + err = db.Get(&dbEtx2, `SELECT * FROM evm.txes WHERE id = $1`, etx2.ID) + require.NoError(t, err) + + assert.Greater(t, dbEtx.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + assert.Greater(t, dbEtx2.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + }) +} diff --git a/core/chains/evm/txmgr/strategies_test.go b/core/chains/evm/txmgr/strategies_test.go new file mode 100644 index 00000000..366ae3bb --- /dev/null +++ b/core/chains/evm/txmgr/strategies_test.go @@ -0,0 +1,55 @@ +package txmgr_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" +) + +func Test_SendEveryStrategy(t *testing.T) { + t.Parallel() + + s := txmgrcommon.SendEveryStrategy{} + + assert.Equal(t, uuid.NullUUID{}, s.Subject()) + + ids, err := s.PruneQueue(testutils.Context(t), nil) + assert.NoError(t, err) + assert.Len(t, ids, 0) +} + +func Test_DropOldestStrategy_Subject(t *testing.T) { + t.Parallel() + cfg := configtest.NewGeneralConfig(t, nil) + + subject := uuid.New() + s := txmgrcommon.NewDropOldestStrategy(subject, 1, cfg.Database().DefaultQueryTimeout()) + + assert.True(t, s.Subject().Valid) + assert.Equal(t, subject, s.Subject().UUID) +} + +func Test_DropOldestStrategy_PruneQueue(t *testing.T) { + t.Parallel() + cfg := configtest.NewGeneralConfig(t, nil) + subject := uuid.New() + queueSize := uint32(2) + queryTimeout := cfg.Database().DefaultQueryTimeout() + mockTxStore := mocks.NewEvmTxStore(t) + + t.Run("calls PrineUnstartedTxQueue for the given subject and queueSize, ignoring fromAddress", func(t *testing.T) { + strategy1 := txmgrcommon.NewDropOldestStrategy(subject, queueSize, queryTimeout) + mockTxStore.On("PruneUnstartedTxQueue", mock.Anything, queueSize-1, subject, mock.Anything, mock.Anything).Once().Return([]int64{1, 2}, nil) + ids, err := strategy1.PruneQueue(testutils.Context(t), mockTxStore) + require.NoError(t, err) + assert.Equal(t, []int64{1, 2}, ids) + }) +} diff --git a/core/chains/evm/txmgr/test_helpers.go b/core/chains/evm/txmgr/test_helpers.go new file mode 100644 index 00000000..90cfbe32 --- /dev/null +++ b/core/chains/evm/txmgr/test_helpers.go @@ -0,0 +1,152 @@ +package txmgr + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" +) + +func ptr[T any](t T) *T { return &t } + +type TestDatabaseConfig struct { + config.Database + defaultQueryTimeout time.Duration +} + +func (d *TestDatabaseConfig) DefaultQueryTimeout() time.Duration { + return d.defaultQueryTimeout +} + +func (d *TestDatabaseConfig) LogSQL() bool { + return false +} + +type TestListenerConfig struct { + config.Listener +} + +func (l *TestListenerConfig) FallbackPollInterval() time.Duration { + return 1 * time.Minute +} + +func (d *TestDatabaseConfig) Listener() config.Listener { + return &TestListenerConfig{} +} + +type TestEvmConfig struct { + evmconfig.EVM + MaxInFlight uint32 + ReaperInterval time.Duration + ReaperThreshold time.Duration + ResendAfterThreshold time.Duration + BumpThreshold uint64 + MaxQueued uint64 +} + +func (e *TestEvmConfig) Transactions() evmconfig.Transactions { + return &transactionsConfig{e: e} +} + +func (e *TestEvmConfig) NonceAutoSync() bool { return true } + +func (e *TestEvmConfig) FinalityDepth() uint32 { return 42 } + +type TestGasEstimatorConfig struct { + bumpThreshold uint64 +} + +func (g *TestGasEstimatorConfig) BlockHistory() evmconfig.BlockHistory { + return &TestBlockHistoryConfig{} +} + +func (g *TestGasEstimatorConfig) EIP1559DynamicFees() bool { return false } +func (g *TestGasEstimatorConfig) LimitDefault() uint32 { return 42 } +func (g *TestGasEstimatorConfig) BumpPercent() uint16 { return 42 } +func (g *TestGasEstimatorConfig) BumpThreshold() uint64 { return g.bumpThreshold } +func (g *TestGasEstimatorConfig) BumpMin() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) FeeCapDefault() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) PriceDefault() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) TipCapDefault() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) TipCapMin() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) LimitMax() uint32 { return 0 } +func (g *TestGasEstimatorConfig) LimitMultiplier() float32 { return 0 } +func (g *TestGasEstimatorConfig) BumpTxDepth() uint32 { return 42 } +func (g *TestGasEstimatorConfig) LimitTransfer() uint32 { return 42 } +func (g *TestGasEstimatorConfig) PriceMax() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) PriceMin() *assets.Wei { return assets.NewWeiI(42) } +func (g *TestGasEstimatorConfig) Mode() string { return "FixedPrice" } +func (g *TestGasEstimatorConfig) LimitJobType() evmconfig.LimitJobType { + return &TestLimitJobTypeConfig{} +} +func (g *TestGasEstimatorConfig) PriceMaxKey(addr common.Address) *assets.Wei { + return assets.NewWeiI(42) +} + +func (e *TestEvmConfig) GasEstimator() evmconfig.GasEstimator { + return &TestGasEstimatorConfig{bumpThreshold: e.BumpThreshold} +} + +type TestLimitJobTypeConfig struct { +} + +func (l *TestLimitJobTypeConfig) OCR() *uint32 { return ptr(uint32(0)) } +func (l *TestLimitJobTypeConfig) OCR2() *uint32 { return ptr(uint32(0)) } +func (l *TestLimitJobTypeConfig) DR() *uint32 { return ptr(uint32(0)) } +func (l *TestLimitJobTypeConfig) FM() *uint32 { return ptr(uint32(0)) } +func (l *TestLimitJobTypeConfig) Keeper() *uint32 { return ptr(uint32(0)) } +func (l *TestLimitJobTypeConfig) VRF() *uint32 { return ptr(uint32(0)) } + +type TestBlockHistoryConfig struct { + evmconfig.BlockHistory +} + +func (b *TestBlockHistoryConfig) BatchSize() uint32 { return 42 } +func (b *TestBlockHistoryConfig) BlockDelay() uint16 { return 42 } +func (b *TestBlockHistoryConfig) BlockHistorySize() uint16 { return 42 } +func (b *TestBlockHistoryConfig) EIP1559FeeCapBufferBlocks() uint16 { return 42 } +func (b *TestBlockHistoryConfig) TransactionPercentile() uint16 { return 42 } + +type transactionsConfig struct { + evmconfig.Transactions + e *TestEvmConfig +} + +func (*transactionsConfig) ForwardersEnabled() bool { return true } +func (t *transactionsConfig) MaxInFlight() uint32 { return t.e.MaxInFlight } +func (t *transactionsConfig) MaxQueued() uint64 { return t.e.MaxQueued } +func (t *transactionsConfig) ReaperInterval() time.Duration { return t.e.ReaperInterval } +func (t *transactionsConfig) ReaperThreshold() time.Duration { return t.e.ReaperThreshold } +func (t *transactionsConfig) ResendAfterThreshold() time.Duration { return t.e.ResendAfterThreshold } + +type MockConfig struct { + EvmConfig *TestEvmConfig + RpcDefaultBatchSize uint32 + finalityDepth uint32 + finalityTagEnabled bool +} + +func (c *MockConfig) EVM() evmconfig.EVM { + return c.EvmConfig +} + +func (c *MockConfig) NonceAutoSync() bool { return true } +func (c *MockConfig) ChainType() commonconfig.ChainType { return "" } +func (c *MockConfig) FinalityDepth() uint32 { return c.finalityDepth } +func (c *MockConfig) SetFinalityDepth(fd uint32) { c.finalityDepth = fd } +func (c *MockConfig) FinalityTagEnabled() bool { return c.finalityTagEnabled } +func (c *MockConfig) RPCDefaultBatchSize() uint32 { return c.RpcDefaultBatchSize } + +func MakeTestConfigs(t *testing.T) (*MockConfig, *TestDatabaseConfig, *TestEvmConfig) { + db := &TestDatabaseConfig{defaultQueryTimeout: pg.DefaultQueryTimeout} + ec := &TestEvmConfig{BumpThreshold: 42, MaxInFlight: uint32(42), MaxQueued: uint64(0), ReaperInterval: time.Duration(0), ReaperThreshold: time.Duration(0)} + config := &MockConfig{EvmConfig: ec} + return config, db, ec +} diff --git a/core/chains/evm/txmgr/tracker_test.go b/core/chains/evm/txmgr/tracker_test.go new file mode 100644 index 00000000..03161ffa --- /dev/null +++ b/core/chains/evm/txmgr/tracker_test.go @@ -0,0 +1,165 @@ +package txmgr_test + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +const waitTime = 5 * time.Millisecond + +func newTestEvmTrackerSetup(t *testing.T) (*txmgr.Tracker, txmgr.TestEvmTxStore, keystore.Eth, []common.Address) { + db := pgtest.NewSqlxDB(t) + cfg := newTestChainScopedConfig(t) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + chainID := big.NewInt(0) + enabledAddresses := generateEnabledAddresses(t, ethKeyStore, chainID) + lggr := logger.TestLogger(t) + return txmgr.NewEvmTracker(txStore, ethKeyStore, chainID, lggr), txStore, ethKeyStore, enabledAddresses +} + +func generateEnabledAddresses(t *testing.T, keyStore keystore.Eth, chainID *big.Int) []common.Address { + var enabledAddresses []common.Address + _, addr1 := cltest.MustInsertRandomKey(t, keyStore, *ubig.NewI(chainID.Int64())) + _, addr2 := cltest.MustInsertRandomKey(t, keyStore, *ubig.NewI(chainID.Int64())) + enabledAddresses = append(enabledAddresses, addr1, addr2) + return enabledAddresses +} + +func containsID(txes []*txmgr.Tx, id int64) bool { + for _, tx := range txes { + if tx.ID == id { + return true + } + } + return false +} + +func TestEvmTracker_Initialization(t *testing.T) { + t.Skip("BCI-2638 tracker disabled") + t.Parallel() + + tracker, _, _, _ := newTestEvmTrackerSetup(t) + + err := tracker.Start(context.Background()) + require.NoError(t, err) + require.True(t, tracker.IsStarted()) + + t.Run("stop tracker", func(t *testing.T) { + err := tracker.Close() + require.NoError(t, err) + require.False(t, tracker.IsStarted()) + }) +} + +func TestEvmTracker_AddressTracking(t *testing.T) { + t.Skip("BCI-2638 tracker disabled") + t.Parallel() + + t.Run("track abandoned addresses", func(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + tracker, txStore, _, _ := newTestEvmTrackerSetup(t) + inProgressAddr := cltest.MustGenerateRandomKey(t).Address + unstartedAddr := cltest.MustGenerateRandomKey(t).Address + unconfirmedAddr := cltest.MustGenerateRandomKey(t).Address + confirmedAddr := cltest.MustGenerateRandomKey(t).Address + _ = mustInsertInProgressEthTxWithAttempt(t, txStore, 123, inProgressAddr) + _ = cltest.MustInsertUnconfirmedEthTx(t, txStore, 123, unconfirmedAddr) + _ = mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1) + _ = mustCreateUnstartedTx(t, txStore, unstartedAddr, cltest.MustGenerateRandomKey(t).Address, []byte{}, 0, big.Int{}, ethClient.ConfiguredChainID()) + + err := tracker.Start(context.Background()) + require.NoError(t, err) + defer func(tracker *txmgr.Tracker) { + err = tracker.Close() + require.NoError(t, err) + }(tracker) + + addrs := tracker.GetAbandonedAddresses() + require.NotContains(t, addrs, inProgressAddr) + require.NotContains(t, addrs, unstartedAddr) + require.Contains(t, addrs, confirmedAddr) + require.Contains(t, addrs, unconfirmedAddr) + }) + + t.Run("stop tracking finalized tx", func(t *testing.T) { + t.Skip("BCI-2638 tracker disabled") + tracker, txStore, _, _ := newTestEvmTrackerSetup(t) + confirmedAddr := cltest.MustGenerateRandomKey(t).Address + _ = mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1) + + err := tracker.Start(context.Background()) + require.NoError(t, err) + defer func(tracker *txmgr.Tracker) { + err = tracker.Close() + require.NoError(t, err) + }(tracker) + + addrs := tracker.GetAbandonedAddresses() + require.Contains(t, addrs, confirmedAddr) + + // deliver block past minConfirmations to finalize tx + tracker.XXXDeliverBlock(10) + time.Sleep(waitTime) + + addrs = tracker.GetAbandonedAddresses() + require.NotContains(t, addrs, confirmedAddr) + }) +} + +func TestEvmTracker_ExceedingTTL(t *testing.T) { + t.Skip("BCI-2638 tracker disabled") + t.Parallel() + + t.Run("confirmed but unfinalized transaction still tracked", func(t *testing.T) { + tracker, txStore, _, _ := newTestEvmTrackerSetup(t) + addr1 := cltest.MustGenerateRandomKey(t).Address + _ = mustInsertConfirmedEthTxWithReceipt(t, txStore, addr1, 123, 1) + + err := tracker.Start(context.Background()) + require.NoError(t, err) + defer func(tracker *txmgr.Tracker) { + err = tracker.Close() + require.NoError(t, err) + }(tracker) + + require.Contains(t, tracker.GetAbandonedAddresses(), addr1) + }) + + t.Run("exceeding ttl", func(t *testing.T) { + tracker, txStore, _, _ := newTestEvmTrackerSetup(t) + addr1 := cltest.MustGenerateRandomKey(t).Address + addr2 := cltest.MustGenerateRandomKey(t).Address + tx1 := mustInsertInProgressEthTxWithAttempt(t, txStore, 123, addr1) + tx2 := cltest.MustInsertUnconfirmedEthTx(t, txStore, 123, addr2) + + tracker.XXXTestSetTTL(time.Nanosecond) + err := tracker.Start(context.Background()) + require.NoError(t, err) + defer func(tracker *txmgr.Tracker) { + err = tracker.Close() + require.NoError(t, err) + }(tracker) + + time.Sleep(waitTime) + require.NotContains(t, tracker.GetAbandonedAddresses(), addr1, addr2) + + fatalTxes, err := txStore.GetFatalTransactions(context.Background()) + require.NoError(t, err) + require.True(t, containsID(fatalTxes, tx1.ID)) + require.True(t, containsID(fatalTxes, tx2.ID)) + }) +} diff --git a/core/chains/evm/txmgr/transmitchecker.go b/core/chains/evm/txmgr/transmitchecker.go new file mode 100644 index 00000000..f3e5b3f9 --- /dev/null +++ b/core/chains/evm/txmgr/transmitchecker.go @@ -0,0 +1,361 @@ +package txmgr + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/logger" + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + "github.com/goplugin/plugin-common/pkg/utils/bytes" + + "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + v1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + v2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" +) + +type ( + TransmitChecker = txmgr.TransmitChecker[*big.Int, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + TransmitCheckerSpec = txmgrtypes.TransmitCheckerSpec[common.Address] +) + +var ( + // NoChecker is a TransmitChecker that always determines a transaction should be submitted. + NoChecker TransmitChecker = noChecker{} + + _ TransmitCheckerFactory = &CheckerFactory{} + _ TransmitChecker = &SimulateChecker{} + _ TransmitChecker = &VRFV1Checker{} + _ TransmitChecker = &VRFV2Checker{} +) + +// CheckerFactory is a real implementation of TransmitCheckerFactory. +type CheckerFactory struct { + Client evmclient.Client +} + +// BuildChecker satisfies the TransmitCheckerFactory interface. +func (c *CheckerFactory) BuildChecker(spec TransmitCheckerSpec) (TransmitChecker, error) { + switch spec.CheckerType { + case TransmitCheckerTypeSimulate: + return &SimulateChecker{c.Client}, nil + case TransmitCheckerTypeVRFV1: + if spec.VRFCoordinatorAddress == nil { + return nil, errors.Errorf("malformed checker, expected non-nil VRFCoordinatorAddress, got: %v", spec) + } + coord, err := v1.NewVRFCoordinator(*spec.VRFCoordinatorAddress, c.Client) + if err != nil { + return nil, errors.Wrapf(err, + "failed to create VRF V1 coordinator at address %v", spec.VRFCoordinatorAddress) + } + return &VRFV1Checker{ + Callbacks: coord.Callbacks, + Client: c.Client, + }, nil + case TransmitCheckerTypeVRFV2: + if spec.VRFCoordinatorAddress == nil { + return nil, errors.Errorf("malformed checker, expected non-nil VRFCoordinatorAddress, got: %v", spec) + } + coord, err := v2.NewVRFCoordinatorV2(*spec.VRFCoordinatorAddress, c.Client) + if err != nil { + return nil, errors.Wrapf(err, + "failed to create VRF V2 coordinator at address %v", spec.VRFCoordinatorAddress) + } + if spec.VRFRequestBlockNumber == nil { + return nil, errors.New("VRFRequestBlockNumber parameter must be non-nil") + } + return &VRFV2Checker{ + GetCommitment: coord.GetCommitment, + HeadByNumber: c.Client.HeadByNumber, + RequestBlockNumber: spec.VRFRequestBlockNumber, + }, nil + case TransmitCheckerTypeVRFV2Plus: + if spec.VRFCoordinatorAddress == nil { + return nil, errors.Errorf("malformed checker, expected non-nil VRFCoordinatorAddress, got: %v", spec) + } + coord, err := vrf_coordinator_v2plus_interface.NewIVRFCoordinatorV2PlusInternal(*spec.VRFCoordinatorAddress, c.Client) + if err != nil { + return nil, errors.Wrapf(err, + "failed to create VRF V2 coordinator plus at address %v", spec.VRFCoordinatorAddress) + } + if spec.VRFRequestBlockNumber == nil { + return nil, errors.New("VRFRequestBlockNumber parameter must be non-nil") + } + return &VRFV2Checker{ + GetCommitment: coord.SRequestCommitments, + HeadByNumber: c.Client.HeadByNumber, + RequestBlockNumber: spec.VRFRequestBlockNumber, + }, nil + case "": + return NoChecker, nil + default: + return nil, errors.Errorf("unrecognized checker type: %s", spec.CheckerType) + } +} + +type noChecker struct{} + +// Check satisfies the TransmitChecker interface. +func (noChecker) Check( + _ context.Context, + _ logger.SugaredLogger, + _ Tx, + _ TxAttempt, +) error { + return nil +} + +// SimulateChecker simulates transactions, producing an error if they revert on chain. +type SimulateChecker struct { + Client evmclient.Client +} + +// Check satisfies the TransmitChecker interface. +func (s *SimulateChecker) Check( + ctx context.Context, + l logger.SugaredLogger, + tx Tx, + a TxAttempt, +) error { + // See: https://github.com/ethereum/go-ethereum/blob/acdf9238fb03d79c9b1c20c2fa476a7e6f4ac2ac/ethclient/gethclient/gethclient.go#L193 + callArg := map[string]interface{}{ + "from": tx.FromAddress, + "to": &tx.ToAddress, + "gas": hexutil.Uint64(a.ChainSpecificFeeLimit), + // NOTE: Deliberately do not include gas prices. We never want to fatally error a + // transaction just because the wallet has insufficient eth. + // Relevant info regarding EIP1559 transactions: https://github.com/ethereum/go-ethereum/pull/23027 + "gasPrice": nil, + "maxFeePerGas": nil, + "maxPriorityFeePerGas": nil, + "value": (*hexutil.Big)(&tx.Value), + "data": hexutil.Bytes(tx.EncodedPayload), + } + var b hexutil.Bytes + // always run simulation on "latest" block + err := s.Client.CallContext(ctx, &b, "eth_call", callArg, evmclient.ToBlockNumArg(nil)) + if err != nil { + if jErr := evmclient.ExtractRPCErrorOrNil(err); jErr != nil { + l.Criticalw("Transaction reverted during simulation", + "ethTxAttemptID", a.ID, "txHash", a.Hash, "err", err, "rpcErr", jErr.String(), "returnValue", b.String()) + return errors.Errorf("transaction reverted during simulation: %s", jErr.String()) + } + l.Warnw("Transaction simulation failed, will attempt to send anyway", + "ethTxAttemptID", a.ID, "txHash", a.Hash, "err", err, "returnValue", b.String()) + } else { + l.Debugw("Transaction simulation succeeded", + "ethTxAttemptID", a.ID, "txHash", a.Hash, "returnValue", b.String()) + } + return nil +} + +// VRFV1Checker is an implementation of TransmitChecker that checks whether a VRF V1 fulfillment +// has already been fulfilled. +type VRFV1Checker struct { + + // Callbacks checks whether a VRF V1 request has already been fulfilled on the VRFCoordinator + // Solidity contract + Callbacks func(opts *bind.CallOpts, reqID [32]byte) (v1.Callbacks, error) + + Client evmclient.Client +} + +// Check satisfies the TransmitChecker interface. +func (v *VRFV1Checker) Check( + ctx context.Context, + l logger.SugaredLogger, + tx Tx, + _ TxAttempt, +) error { + meta, err := tx.GetMeta() + if err != nil { + l.Errorw("Failed to parse transaction meta. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + if meta == nil { + l.Errorw("Expected a non-nil meta for a VRF transaction. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + if len(meta.RequestID.Bytes()) != 32 { + l.Errorw("Unexpected request ID. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + if meta.RequestTxHash == nil { + l.Errorw("Request tx hash is nil. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + // Construct and execute batch call to retrieve most the recent block number and the + // block number of the request transaction. + mostRecentHead := &types.Head{} + requestTransactionReceipt := &gethtypes.Receipt{} + batch := []rpc.BatchElem{{ + Method: "eth_getBlockByNumber", + Args: []interface{}{"latest", false}, + Result: mostRecentHead, + }, { + Method: "eth_getTransactionReceipt", + Args: []interface{}{*meta.RequestTxHash}, + Result: requestTransactionReceipt, + }} + err = v.Client.BatchCallContext(ctx, batch) + if err != nil { + l.Errorw("Failed to fetch latest header and transaction receipt. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + ) + return nil + } + + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not found" + // errors. + latest := new(big.Int).Sub(big.NewInt(mostRecentHead.Number), big.NewInt(5)) + blockNumber := bigmath.Max(latest, requestTransactionReceipt.BlockNumber) + var reqID [32]byte + copy(reqID[:], meta.RequestID.Bytes()) + callback, err := v.Callbacks(&bind.CallOpts{ + Context: ctx, + BlockNumber: blockNumber, + }, reqID) + if err != nil { + l.Errorw("Unable to check if already fulfilled. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + "reqID", reqID) + return nil + } else if bytes.IsEmpty(callback.SeedAndBlockNum[:]) { + // Request already fulfilled + l.Infow("Request already fulfilled", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + "reqID", reqID) + return errors.New("request already fulfilled") + } + // Request not fulfilled + return nil +} + +// VRFV2Checker is an implementation of TransmitChecker that checks whether a VRF V2 fulfillment +// has already been fulfilled. +type VRFV2Checker struct { + + // GetCommitment checks whether a VRF V2 request has been fulfilled on the VRFCoordinatorV2 + // Solidity contract. + GetCommitment func(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) + + // HeadByNumber fetches the head given the number. If nil is provided, + // the latest header is fetched. + HeadByNumber func(ctx context.Context, n *big.Int) (*types.Head, error) + + // RequestBlockNumber is the block number of the VRFV2 request. + RequestBlockNumber *big.Int +} + +// Check satisfies the TransmitChecker interface. +func (v *VRFV2Checker) Check( + ctx context.Context, + l logger.SugaredLogger, + tx Tx, + _ TxAttempt, +) error { + meta, err := tx.GetMeta() + if err != nil { + l.Errorw("Failed to parse transaction meta. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + if meta == nil { + l.Errorw("Expected a non-nil meta for a VRF transaction. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta) + return nil + } + + h, err := v.HeadByNumber(ctx, nil) + if err != nil { + l.Errorw("Failed to fetch latest header. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + ) + return nil + } + + // If the request block number is not provided, transmit anyway just to be safe. + // Worst we can do is revert due to the request already being fulfilled. + if v.RequestBlockNumber == nil { + l.Errorw("Was provided with a nil request block number. Attempting to transmit anyway.", + "ethTxID", tx.ID, + "meta", tx.Meta, + ) + return nil + } + + vrfRequestID := meta.RequestID.Big() + + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not found" + // errors. + latest := new(big.Int).Sub(big.NewInt(h.Number), big.NewInt(5)) + blockNumber := bigmath.Max(latest, v.RequestBlockNumber) + callback, err := v.GetCommitment(&bind.CallOpts{ + Context: ctx, + BlockNumber: blockNumber, + }, vrfRequestID) + if err != nil { + l.Errorw("Failed to check request fulfillment status, error calling GetCommitment. Attempting to transmit anyway.", + "err", err, + "ethTxID", tx.ID, + "meta", tx.Meta, + "vrfRequestId", vrfRequestID, + "blockNumber", h.Number, + ) + return nil + } else if bytes.IsEmpty(callback[:]) { + // If seedAndBlockNumber is zero then the response has been fulfilled and we should skip it. + l.Infow("Request already fulfilled.", + "ethTxID", tx.ID, + "meta", tx.Meta, + "vrfRequestId", vrfRequestID) + return errors.New("request already fulfilled") + } + l.Debugw("Request not yet fulfilled", + "ethTxID", tx.ID, + "meta", tx.Meta, + "vrfRequestId", vrfRequestID) + return nil + +} diff --git a/core/chains/evm/txmgr/transmitchecker_test.go b/core/chains/evm/txmgr/transmitchecker_test.go new file mode 100644 index 00000000..42c41fe4 --- /dev/null +++ b/core/chains/evm/txmgr/transmitchecker_test.go @@ -0,0 +1,375 @@ +package txmgr_test + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/plugin-common/pkg/logger" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/sqlutil" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + v1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" +) + +func TestFactory(t *testing.T) { + client := cltest.NewEthMocksWithDefaultChain(t) + factory := &txmgr.CheckerFactory{Client: client} + + t.Run("no checker", func(t *testing.T) { + c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{}) + require.NoError(t, err) + require.Equal(t, txmgr.NoChecker, c) + }) + + t.Run("vrf v1 checker", func(t *testing.T) { + c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV1, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + }) + require.NoError(t, err) + require.IsType(t, &txmgr.VRFV1Checker{}, c) + }) + + t.Run("vrf v2 checker", func(t *testing.T) { + c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + VRFRequestBlockNumber: big.NewInt(1), + }) + require.NoError(t, err) + require.IsType(t, &txmgr.VRFV2Checker{}, c) + + // request block number not provided should error out. + c, err = factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + }) + require.Error(t, err) + require.Nil(t, c) + }) + + t.Run("vrf v2 plus checker", func(t *testing.T) { + c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2Plus, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + VRFRequestBlockNumber: big.NewInt(1), + }) + require.NoError(t, err) + require.IsType(t, &txmgr.VRFV2Checker{}, c) + + // request block number not provided should error out. + c, err = factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2Plus, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + }) + require.Error(t, err) + require.Nil(t, c) + }) + + t.Run("simulate checker", func(t *testing.T) { + c, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + }) + require.NoError(t, err) + require.Equal(t, &txmgr.SimulateChecker{Client: client}, c) + }) + + t.Run("invalid checker type", func(t *testing.T) { + _, err := factory.BuildChecker(txmgr.TransmitCheckerSpec{ + CheckerType: "invalid", + }) + require.EqualError(t, err, "unrecognized checker type: invalid") + }) +} + +func TestTransmitCheckers(t *testing.T) { + client := evmtest.NewEthClientMockWithDefaultChain(t) + log := logger.Sugared(logger.Test(t)) + ctx := testutils.Context(t) + + t.Run("no checker", func(t *testing.T) { + checker := txmgr.NoChecker + require.NoError(t, checker.Check(ctx, log, txmgr.Tx{}, txmgr.TxAttempt{})) + }) + + t.Run("simulate", func(t *testing.T) { + checker := txmgr.SimulateChecker{Client: client} + + tx := txmgr.Tx{ + FromAddress: common.HexToAddress("0xfe0629509E6CB8dfa7a99214ae58Ceb465d5b5A9"), + ToAddress: common.HexToAddress("0xff0Aac13eab788cb9a2D662D3FB661Aa5f58FA21"), + EncodedPayload: []byte{42, 0, 0}, + Value: big.Int(assets.NewEthValue(642)), + FeeLimit: 1e9, + CreatedAt: time.Unix(0, 0), + State: txmgrcommon.TxUnstarted, + } + attempt := txmgr.TxAttempt{ + Tx: tx, + Hash: common.Hash{}, + CreatedAt: tx.CreatedAt, + State: txmgrtypes.TxAttemptInProgress, + } + + t.Run("success", func(t *testing.T) { + client.On("CallContext", mock.Anything, + mock.AnythingOfType("*hexutil.Bytes"), "eth_call", + mock.MatchedBy(func(callarg map[string]interface{}) bool { + return fmt.Sprintf("%s", callarg["value"]) == "0x282" // 642 + }), "latest").Return(nil).Once() + + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("revert", func(t *testing.T) { + jerr := evmclient.JsonError{ + Code: 42, + Message: "oh no, it reverted", + Data: []byte{42, 166, 34}, + } + client.On("CallContext", mock.Anything, + mock.AnythingOfType("*hexutil.Bytes"), "eth_call", + mock.MatchedBy(func(callarg map[string]interface{}) bool { + return fmt.Sprintf("%s", callarg["value"]) == "0x282" // 642 + }), "latest").Return(&jerr).Once() + + err := checker.Check(ctx, log, tx, attempt) + expErrMsg := "transaction reverted during simulation: json-rpc error { Code = 42, Message = 'oh no, it reverted', Data = 'KqYi' }" + require.EqualError(t, err, expErrMsg) + }) + + t.Run("non revert error", func(t *testing.T) { + client.On("CallContext", mock.Anything, + mock.AnythingOfType("*hexutil.Bytes"), "eth_call", + mock.MatchedBy(func(callarg map[string]interface{}) bool { + return fmt.Sprintf("%s", callarg["value"]) == "0x282" // 642 + }), "latest").Return(errors.New("error")).Once() + + // Non-revert errors are logged but should not prevent transmission, and do not need + // to be passed to the caller + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + }) + + t.Run("VRF V1", func(t *testing.T) { + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" + + txRequest := func(t *testing.T, vrfReqID [32]byte, nilTxHash bool) (txmgr.Tx, txmgr.TxAttempt) { + h := common.BytesToHash(vrfReqID[:]) + txHash := common.Hash{} + meta := txmgr.TxMeta{ + RequestID: &h, + MaxLink: &testDefaultMaxLink, // 1 PLI + SubID: &testDefaultSubID, + RequestTxHash: &txHash, + } + + if nilTxHash { + meta.RequestTxHash = nil + } + + b, err := json.Marshal(meta) + require.NoError(t, err) + metaJson := sqlutil.JSON(b) + + tx := txmgr.Tx{ + FromAddress: common.HexToAddress("0xfe0629509E6CB8dfa7a99214ae58Ceb465d5b5A9"), + ToAddress: common.HexToAddress("0xff0Aac13eab788cb9a2D662D3FB661Aa5f58FA21"), + EncodedPayload: []byte{42, 0, 0}, + Value: big.Int(assets.NewEthValue(642)), + FeeLimit: 1e9, + CreatedAt: time.Unix(0, 0), + State: txmgrcommon.TxUnstarted, + Meta: &metaJson, + } + return tx, txmgr.TxAttempt{ + Tx: tx, + Hash: common.Hash{}, + CreatedAt: tx.CreatedAt, + State: txmgrtypes.TxAttemptInProgress, + } + } + + r1 := [32]byte{1} + r2 := [32]byte{2} + r3 := [32]byte{3} + + checker := txmgr.VRFV1Checker{ + Callbacks: func(opts *bind.CallOpts, reqID [32]byte) (v1.Callbacks, error) { + if opts.BlockNumber.Cmp(big.NewInt(6)) != 0 { + // Ensure correct logic is applied to get callbacks. + return v1.Callbacks{}, errors.New("error getting callback") + } + if reqID == r1 { + // Request 1 is already fulfilled + return v1.Callbacks{ + SeedAndBlockNum: [32]byte{}, + }, nil + } else if reqID == r2 { + // Request 2 errors + return v1.Callbacks{}, errors.New("error getting commitment") + } + return v1.Callbacks{ + SeedAndBlockNum: [32]byte{1}, + }, nil + }, + Client: client, + } + + mockBatch := client.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && b[0].Method == "eth_getBlockByNumber" && b[1].Method == "eth_getTransactionReceipt" + })).Return(nil).Run(func(args mock.Arguments) { + batch := args.Get(1).([]rpc.BatchElem) + + // Return block 10 for eth_getBlockByNumber + mostRecentHead := batch[0].Result.(*evmtypes.Head) + mostRecentHead.Number = 10 + + // Return block 6 for eth_getTransactionReceipt + requestTransactionReceipt := batch[1].Result.(*types.Receipt) + requestTransactionReceipt.BlockNumber = big.NewInt(6) + }) + + t.Run("already fulfilled", func(t *testing.T) { + tx, attempt := txRequest(t, r1, false) + err := checker.Check(ctx, log, tx, attempt) + require.Error(t, err, "request already fulfilled") + }) + + t.Run("nil RequestTxHash", func(t *testing.T) { + tx, attempt := txRequest(t, r1, true) + err := checker.Check(ctx, log, tx, attempt) + require.NoError(t, err) + }) + + t.Run("not fulfilled", func(t *testing.T) { + tx, attempt := txRequest(t, r3, false) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("error checking fulfillment, should transmit", func(t *testing.T) { + tx, attempt := txRequest(t, r2, false) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("failure fetching tx receipt and block head", func(t *testing.T) { + tx, attempt := txRequest(t, r1, false) + mockBatch.Return(errors.New("could not fetch")) + err := checker.Check(ctx, log, tx, attempt) + require.NoError(t, err) + }) + }) + + t.Run("VRF V2", func(t *testing.T) { + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" + + txRequest := func(t *testing.T, vrfReqID *big.Int) (txmgr.Tx, txmgr.TxAttempt) { + h := common.BytesToHash(vrfReqID.Bytes()) + meta := txmgr.TxMeta{ + RequestID: &h, + MaxLink: &testDefaultMaxLink, // 1 PLI + SubID: &testDefaultSubID, + } + + b, err := json.Marshal(meta) + require.NoError(t, err) + metaJson := sqlutil.JSON(b) + + tx := txmgr.Tx{ + FromAddress: common.HexToAddress("0xfe0629509E6CB8dfa7a99214ae58Ceb465d5b5A9"), + ToAddress: common.HexToAddress("0xff0Aac13eab788cb9a2D662D3FB661Aa5f58FA21"), + EncodedPayload: []byte{42, 0, 0}, + Value: big.Int(assets.NewEthValue(642)), + FeeLimit: 1e9, + CreatedAt: time.Unix(0, 0), + State: txmgrcommon.TxUnstarted, + Meta: &metaJson, + } + return tx, txmgr.TxAttempt{ + Tx: tx, + Hash: common.Hash{}, + CreatedAt: tx.CreatedAt, + State: txmgrtypes.TxAttemptInProgress, + } + } + + checker := txmgr.VRFV2Checker{ + GetCommitment: func(_ *bind.CallOpts, requestID *big.Int) ([32]byte, error) { + if requestID.String() == "1" { + // Request 1 is already fulfilled + return [32]byte{}, nil + } else if requestID.String() == "2" { + // Request 2 errors + return [32]byte{}, errors.New("error getting commitment") + } + // All other requests are unfulfilled + return [32]byte{1}, nil + }, + HeadByNumber: func(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + return &evmtypes.Head{ + Number: 1, + }, nil + }, + RequestBlockNumber: big.NewInt(1), + } + + t.Run("already fulfilled", func(t *testing.T) { + tx, attempt := txRequest(t, big.NewInt(1)) + err := checker.Check(ctx, log, tx, attempt) + require.Error(t, err, "request already fulfilled") + }) + + t.Run("not fulfilled", func(t *testing.T) { + tx, attempt := txRequest(t, big.NewInt(3)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("error checking fulfillment, should transmit", func(t *testing.T) { + tx, attempt := txRequest(t, big.NewInt(2)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("can't get header", func(t *testing.T) { + checker.HeadByNumber = func(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + return nil, errors.New("can't get head") + } + tx, attempt := txRequest(t, big.NewInt(3)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + + t.Run("nil request block number", func(t *testing.T) { + checker.HeadByNumber = func(ctx context.Context, n *big.Int) (*evmtypes.Head, error) { + return &evmtypes.Head{ + Number: 1, + }, nil + } + checker.RequestBlockNumber = nil + tx, attempt := txRequest(t, big.NewInt(4)) + require.NoError(t, checker.Check(ctx, log, tx, attempt)) + }) + }) +} diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go new file mode 100644 index 00000000..955e0e2a --- /dev/null +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -0,0 +1,830 @@ +package txmgr_test + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + commonutils "github.com/goplugin/plugin-common/pkg/utils" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + commontxmmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func makeTestEvmTxm( + t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth) (txmgr.TxManager, error) { + lggr := logger.Test(t) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + + // logic for building components (from evm/evm_txm.go) ------- + lggr.Infow("Initializing EVM transaction manager", + "bumpTxDepth", fcfg.BumpTxDepth(), + "maxInFlightTransactions", txConfig.MaxInFlight(), + "maxQueuedTransactions", txConfig.MaxQueued(), + "nonceAutoSync", ccfg.NonceAutoSync(), + "limitDefault", fcfg.LimitDefault(), + ) + + return txmgr.NewTxm( + db, + ccfg, + fcfg, + txConfig, + dbConfig, + listenerConfig, + ethClient, + lggr, + lp, + keyStore, + estimator) +} + +func TestTxm_SendNativeToken_DoesNotSendToZero(t *testing.T) { + t.Parallel() + db := pgtest.NewSqlxDB(t) + + from := utils.ZeroAddress + to := utils.ZeroAddress + value := assets.NewEth(1).ToInt() + + config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + + keyStore := cltest.NewKeyStore(t, db, dbConfig).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + estimator := gas.NewEstimator(logger.Test(t), ethClient, config, evmConfig.GasEstimator()) + txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), keyStore) + require.NoError(t, err) + + _, err = txm.SendNativeToken(testutils.Context(t), big.NewInt(0), from, to, *value, 21000) + require.Error(t, err) + require.EqualError(t, err, "cannot send native token to zero address") +} + +func TestTxm_CreateTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + + _, fromAddress := cltest.MustInsertRandomKey(t, kst.Eth()) + toAddress := testutils.NewAddress() + gasLimit := uint32(1000) + payload := []byte{1, 2, 3} + + config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + + estimator := gas.NewEstimator(logger.Test(t), ethClient, config, evmConfig.GasEstimator()) + txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst.Eth()) + require.NoError(t, err) + + t.Run("with queue under capacity inserts eth_tx", func(t *testing.T) { + subject := uuid.New() + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true}) + strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(nil, nil) + evmConfig.MaxQueued = uint64(1) + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }) + assert.NoError(t, err) + assert.Greater(t, etx.ID, int64(0)) + assert.Equal(t, etx.State, txmgrcommon.TxUnstarted) + assert.Equal(t, gasLimit, etx.FeeLimit) + assert.Equal(t, fromAddress, etx.FromAddress) + assert.Equal(t, toAddress, etx.ToAddress) + assert.Equal(t, payload, etx.EncodedPayload) + assert.Equal(t, big.Int(assets.NewEthValue(0)), etx.Value) + assert.Equal(t, subject, etx.Subject.UUID) + + cltest.AssertCount(t, db, "evm.txes", 1) + + var dbEtx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEtx, `SELECT * FROM evm.txes ORDER BY id ASC LIMIT 1`)) + + assert.Equal(t, etx.State, txmgrcommon.TxUnstarted) + assert.Equal(t, gasLimit, etx.FeeLimit) + assert.Equal(t, fromAddress, etx.FromAddress) + assert.Equal(t, toAddress, etx.ToAddress) + assert.Equal(t, payload, etx.EncodedPayload) + assert.Equal(t, big.Int(assets.NewEthValue(0)), etx.Value) + assert.Equal(t, subject, etx.Subject.UUID) + }) + + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, fromAddress) + + t.Run("with queue at capacity does not insert eth_tx", func(t *testing.T) { + evmConfig.MaxQueued = uint64(1) + _, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + Meta: nil, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "Txm#CreateTransaction: cannot create transaction; too many unstarted transactions in the queue (1/1). WARNING: Hitting EVM.Transactions.MaxQueued") + }) + + t.Run("doesn't insert eth_tx if a matching tx already exists for that pipeline_task_run_id", func(t *testing.T) { + evmConfig.MaxQueued = uint64(3) + id := uuid.New() + tx1, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + + tx2, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + + assert.Equal(t, tx1.GetID(), tx2.GetID()) + }) + + t.Run("returns error if eth key state is missing or doesn't match chain ID", func(t *testing.T) { + rndAddr := testutils.NewAddress() + _, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: rndAddr, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("no eth key exists with address %s", rndAddr.String())) + + _, otherAddress := cltest.MustInsertRandomKey(t, kst.Eth(), *ubig.NewI(1337)) + + _, err = txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: otherAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("cannot send transaction from %s on chain ID 0: eth key with address %s exists but is has not been enabled for chain 0 (enabled only for chain IDs: 1337)", otherAddress.Hex(), otherAddress.Hex())) + }) + + t.Run("simulate transmit checker", func(t *testing.T) { + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + + checker := txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeSimulate, + } + evmConfig.MaxQueued = uint64(1) + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Checker: checker, + }) + assert.NoError(t, err) + cltest.AssertCount(t, db, "evm.txes", 1) + var dbEtx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEtx, `SELECT * FROM evm.txes ORDER BY id ASC LIMIT 1`)) + + var c txmgr.TransmitCheckerSpec + require.NotNil(t, etx.TransmitChecker) + require.NoError(t, json.Unmarshal(*etx.TransmitChecker, &c)) + require.Equal(t, checker, c) + }) + + t.Run("meta and vrf checker", func(t *testing.T) { + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + testDefaultSubID := uint64(2) + testDefaultMaxLink := "1000000000000000000" + testDefaultMaxEth := "2000000000000000000" + // max uint256 is 1.1579209e+77 + testDefaultGlobalSubID := crypto.Keccak256Hash([]byte("sub id")).String() + jobID := int32(25) + requestID := common.HexToHash("abcd") + requestTxHash := common.HexToHash("dcba") + meta := &txmgr.TxMeta{ + JobID: &jobID, + RequestID: &requestID, + RequestTxHash: &requestTxHash, + MaxLink: &testDefaultMaxLink, // 1e18 + MaxEth: &testDefaultMaxEth, // 2e18 + SubID: &testDefaultSubID, + GlobalSubID: &testDefaultGlobalSubID, + } + evmConfig.MaxQueued = uint64(1) + checker := txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: testutils.NewAddressPtr(), + } + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: meta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Checker: checker, + }) + assert.NoError(t, err) + cltest.AssertCount(t, db, "evm.txes", 1) + var dbEtx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEtx, `SELECT * FROM evm.txes ORDER BY id ASC LIMIT 1`)) + + m, err := etx.GetMeta() + require.NoError(t, err) + require.Equal(t, meta, m) + + var c txmgr.TransmitCheckerSpec + require.NotNil(t, etx.TransmitChecker) + require.NoError(t, json.Unmarshal(*etx.TransmitChecker, &c)) + require.Equal(t, checker, c) + }) + + t.Run("forwards tx when a proper forwarder is set up", func(t *testing.T) { + pgtest.MustExec(t, db, `DELETE FROM evm.txes`) + pgtest.MustExec(t, db, `DELETE FROM evm.forwarders`) + evmConfig.MaxQueued = uint64(1) + + // Create mock forwarder, mock authorizedsenders call. + form := forwarders.NewORM(db, logger.Test(t), cfg.Database()) + fwdrAddr := testutils.NewAddress() + fwdr, err := form.CreateForwarder(fwdrAddr, ubig.Big(cltest.FixtureChainID)) + require.NoError(t, err) + require.Equal(t, fwdr.Address, fwdrAddr) + + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: fwdr.Address, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + cltest.AssertCount(t, db, "evm.txes", 1) + + var dbEtx txmgr.DbEthTx + require.NoError(t, db.Get(&dbEtx, `SELECT * FROM evm.txes ORDER BY id ASC LIMIT 1`)) + + m, err := etx.GetMeta() + require.NoError(t, err) + require.NotNil(t, m.FwdrDestAddress) + require.Equal(t, etx.ToAddress.String(), fwdrAddr.String()) + }) + + t.Run("insert Tx successfully with a IdempotencyKey", func(t *testing.T) { + evmConfig.MaxQueued = uint64(3) + id := uuid.New() + idempotencyKey := "1" + _, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + IdempotencyKey: &idempotencyKey, + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + }) + + t.Run("doesn't insert eth_tx if a matching tx already exists for that IdempotencyKey", func(t *testing.T) { + evmConfig.MaxQueued = uint64(3) + id := uuid.New() + idempotencyKey := "2" + tx1, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + IdempotencyKey: &idempotencyKey, + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + + tx2, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + IdempotencyKey: &idempotencyKey, + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: 21000, + PipelineTaskRunID: &id, + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + assert.NoError(t, err) + + assert.Equal(t, tx1.GetID(), tx2.GetID()) + }) +} + +func newMockTxStrategy(t *testing.T) *commontxmmocks.TxStrategy { + return commontxmmocks.NewTxStrategy(t) +} + +func TestTxm_CreateTransaction_OutOfEth(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + etKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + thisKey, _ := cltest.RandomKey{Nonce: 1}.MustInsert(t, etKeyStore) + otherKey, _ := cltest.RandomKey{Nonce: 1}.MustInsert(t, etKeyStore) + + fromAddress := thisKey.Address + evmFromAddress := fromAddress + gasLimit := uint32(1000) + toAddress := testutils.NewAddress() + + config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + estimator := gas.NewEstimator(logger.Test(t), ethClient, config, evmConfig.GasEstimator()) + txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), etKeyStore) + require.NoError(t, err) + + t.Run("if another key has any transactions with insufficient eth errors, transmits as normal", func(t *testing.T) { + payload := cltest.MustRandomBytes(t, 100) + + evmConfig.MaxQueued = uint64(1) + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, otherKey.Address) + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{}) + strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(nil, nil) + + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: evmFromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }) + assert.NoError(t, err) + + require.Equal(t, payload, etx.EncodedPayload) + }) + + require.NoError(t, commonutils.JustError(db.Exec(`DELETE FROM evm.txes WHERE from_address = $1`, thisKey.Address))) + + t.Run("if this key has any transactions with insufficient eth errors, inserts it anyway", func(t *testing.T) { + payload := cltest.MustRandomBytes(t, 100) + evmConfig.MaxQueued = uint64(1) + + mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t, txStore, 0, thisKey.Address) + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{}) + strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(nil, nil) + + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: evmFromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }) + require.NoError(t, err) + require.Equal(t, payload, etx.EncodedPayload) + }) + + require.NoError(t, commonutils.JustError(db.Exec(`DELETE FROM evm.txes WHERE from_address = $1`, thisKey.Address))) + + t.Run("if this key has transactions but no insufficient eth errors, transmits as normal", func(t *testing.T) { + payload := cltest.MustRandomBytes(t, 100) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 42, thisKey.Address) + strategy := newMockTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{}) + strategy.On("PruneQueue", mock.Anything, mock.Anything).Return(nil, nil) + + evmConfig.MaxQueued = uint64(1) + etx, err := txm.CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: evmFromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }) + require.NoError(t, err) + require.Equal(t, payload, etx.EncodedPayload) + }) +} + +func TestTxm_Lifecycle(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + kst := ksmocks.NewEth(t) + + config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + config.SetFinalityDepth(uint32(42)) + config.RpcDefaultBatchSize = uint32(4) + + evmConfig.ResendAfterThreshold = 1 * time.Hour + evmConfig.ReaperThreshold = 1 * time.Hour + evmConfig.ReaperInterval = 1 * time.Hour + + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return([]common.Address{}, nil) + + keyChangeCh := make(chan struct{}) + unsub := cltest.NewAwaiter() + kst.On("SubscribeToKeyChanges").Return(keyChangeCh, unsub.ItHappened) + estimator := gas.NewEstimator(logger.Test(t), ethClient, config, evmConfig.GasEstimator()) + txm, err := makeTestEvmTxm(t, db, ethClient, estimator, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), kst) + require.NoError(t, err) + + head := cltest.Head(42) + // It should not hang or panic + txm.OnNewLongestChain(testutils.Context(t), head) + + evmConfig.BumpThreshold = uint64(1) + + require.NoError(t, txm.Start(testutils.Context(t))) + + ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second) + t.Cleanup(cancel) + txm.OnNewLongestChain(ctx, head) + require.NoError(t, ctx.Err()) + + keyState := cltest.MustGenerateRandomKeyState(t) + + addr := []common.Address{keyState.Address.Address()} + kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addr, nil) + ethClient.On("PendingNonceAt", mock.AnythingOfType("*context.cancelCtx"), common.Address{}).Return(uint64(0), nil).Maybe() + keyChangeCh <- struct{}{} + + require.NoError(t, txm.Close()) + unsub.AwaitOrFail(t, 1*time.Second) +} + +func TestTxm_Reset(t *testing.T) { + t.Parallel() + + // Lots of boilerplate setup since we actually want to test start/stop of EthBroadcaster/EthConfirmer + db := pgtest.NewSqlxDB(t) + gcfg := configtest.NewTestGeneralConfig(t) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + + _, addr := cltest.RandomKey{}.MustInsert(t, kst.Eth()) + _, addr2 := cltest.RandomKey{}.MustInsert(t, kst.Eth()) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + // 4 confirmed tx from addr1 + for i := int64(0); i < 4; i++ { + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, i, i*42+1, addr) + } + // 2 confirmed from addr2 + for i := int64(0); i < 2; i++ { + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, i, i*42+1, addr2) + } + + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(nil, nil) + ethClient.On("BatchCallContextAll", mock.Anything, mock.Anything).Return(nil).Maybe() + ethClient.On("PendingNonceAt", mock.Anything, addr).Return(uint64(128), nil).Maybe() + ethClient.On("PendingNonceAt", mock.Anything, addr2).Return(uint64(44), nil).Maybe() + + estimator := gas.NewEstimator(logger.Test(t), ethClient, cfg.EVM(), cfg.EVM().GasEstimator()) + txm, err := makeTestEvmTxm(t, db, ethClient, estimator, cfg.EVM(), cfg.EVM().GasEstimator(), cfg.EVM().Transactions(), cfg.Database(), cfg.Database().Listener(), kst.Eth()) + require.NoError(t, err) + + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, addr2) + for i := 0; i < 1000; i++ { + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 4+int64(i), addr) + } + + t.Run("returns error if not started", func(t *testing.T) { + err := txm.Reset(addr, false) + require.Error(t, err) + assert.EqualError(t, err, "not started") + }) + + servicetest.Run(t, txm) + + t.Run("returns no error if started", func(t *testing.T) { + err := txm.Reset(addr, false) + require.NoError(t, err) + }) + + t.Run("deletes relevant evm.txes if abandon=true", func(t *testing.T) { + err := txm.Reset(addr, true) + require.NoError(t, err) + + var s string + err = db.Get(&s, `SELECT error FROM evm.txes WHERE from_address = $1 AND state = 'fatal_error'`, addr) + require.NoError(t, err) + assert.Equal(t, "abandoned", s) + + // the other address didn't get touched + var count int + err = db.Get(&count, `SELECT count(*) FROM evm.txes WHERE from_address = $1 AND state = 'fatal_error'`, addr2) + require.NoError(t, err) + assert.Equal(t, 0, count) + }) +} + +func newTxStore(t *testing.T, db *sqlx.DB, cfg pg.QConfig) txmgr.EvmTxStore { + return txmgr.NewTxStore(db, logger.Test(t), cfg) +} + +func newEthReceipt(blockNumber int64, blockHash common.Hash, txHash common.Hash, status uint64) txmgr.Receipt { + transactionIndex := uint(cltest.NewRandomPositiveInt64()) + + receipt := evmtypes.Receipt{ + BlockNumber: big.NewInt(blockNumber), + BlockHash: blockHash, + TxHash: txHash, + TransactionIndex: transactionIndex, + Status: status, + } + + r := txmgr.Receipt{ + BlockNumber: blockNumber, + BlockHash: blockHash, + TxHash: txHash, + TransactionIndex: transactionIndex, + Receipt: receipt, + } + return r +} + +func mustInsertEthReceipt(t *testing.T, txStore txmgr.TestEvmTxStore, blockNumber int64, blockHash common.Hash, txHash common.Hash) txmgr.Receipt { + r := newEthReceipt(blockNumber, blockHash, txHash, 0x1) + id, err := txStore.InsertReceipt(&r.Receipt) + require.NoError(t, err) + r.ID = id + return r +} + +func mustInsertRevertedEthReceipt(t *testing.T, txStore txmgr.TestEvmTxStore, blockNumber int64, blockHash common.Hash, txHash common.Hash) txmgr.Receipt { + r := newEthReceipt(blockNumber, blockHash, txHash, 0x0) + id, err := txStore.InsertReceipt(&r.Receipt) + require.NoError(t, err) + r.ID = id + return r +} + +// Inserts into evm.receipts but does not update evm.txes or evm.tx_attempts +func mustInsertConfirmedEthTxWithReceipt(t *testing.T, txStore txmgr.TestEvmTxStore, fromAddress common.Address, nonce, blockNum int64) (etx txmgr.Tx) { + etx = cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, nonce, blockNum, fromAddress) + mustInsertEthReceipt(t, txStore, blockNum, utils.NewHash(), etx.TxAttempts[0].Hash) + return etx +} + +func mustInsertConfirmedEthTxBySaveFetchedReceipts(t *testing.T, txStore txmgr.TestEvmTxStore, fromAddress common.Address, nonce int64, blockNum int64, chainID big.Int) (etx txmgr.Tx) { + etx = cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, nonce, blockNum, fromAddress) + receipt := evmtypes.Receipt{ + TxHash: etx.TxAttempts[0].Hash, + BlockHash: utils.NewHash(), + BlockNumber: big.NewInt(nonce), + TransactionIndex: uint(1), + } + err := txStore.SaveFetchedReceipts(testutils.Context(t), []*evmtypes.Receipt{&receipt}, &chainID) + require.NoError(t, err) + return etx +} + +func mustInsertFatalErrorEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, fromAddress common.Address) txmgr.Tx { + etx := cltest.NewEthTx(fromAddress) + etx.Error = null.StringFrom("something exploded") + etx.State = txmgrcommon.TxFatalError + + require.NoError(t, txStore.InsertTx(&etx)) + return etx +} + +func mustInsertUnconfirmedEthTxWithAttemptState(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress common.Address, txAttemptState txmgrtypes.TxAttemptState, opts ...interface{}) txmgr.Tx { + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, nonce, fromAddress, opts...) + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + + tx := cltest.NewLegacyTransaction(uint64(nonce), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + attempt.SignedRawTx = rlp.Bytes() + + attempt.State = txAttemptState + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + return etx +} + +func mustInsertUnconfirmedEthTxWithBroadcastDynamicFeeAttempt(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress common.Address, opts ...interface{}) txmgr.Tx { + etx := cltest.MustInsertUnconfirmedEthTx(t, txStore, nonce, fromAddress, opts...) + attempt := cltest.NewDynamicFeeEthTxAttempt(t, etx.ID) + + addr := testutils.NewAddress() + dtx := types.DynamicFeeTx{ + ChainID: big.NewInt(0), + Nonce: uint64(nonce), + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 242, + To: &addr, + Value: big.NewInt(342), + Data: []byte{2, 3, 4}, + } + tx := types.NewTx(&dtx) + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + attempt.SignedRawTx = rlp.Bytes() + + attempt.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + return etx +} + +func mustInsertUnconfirmedEthTxWithInsufficientEthAttempt(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress common.Address) txmgr.Tx { + timeNow := time.Now() + etx := cltest.NewEthTx(fromAddress) + + etx.BroadcastAt = &timeNow + etx.InitialBroadcastAt = &timeNow + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + etx.State = txmgrcommon.TxUnconfirmed + require.NoError(t, txStore.InsertTx(&etx)) + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + + tx := cltest.NewLegacyTransaction(uint64(nonce), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + attempt.SignedRawTx = rlp.Bytes() + + attempt.State = txmgrtypes.TxAttemptInsufficientFunds + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + return etx +} + +func mustInsertConfirmedMissingReceiptEthTxWithLegacyAttempt( + t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, broadcastBeforeBlockNum int64, + broadcastAt time.Time, fromAddress common.Address) txmgr.Tx { + etx := cltest.NewEthTx(fromAddress) + + etx.BroadcastAt = &broadcastAt + etx.InitialBroadcastAt = &broadcastAt + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + etx.State = txmgrcommon.TxConfirmedMissingReceipt + require.NoError(t, txStore.InsertTx(&etx)) + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + attempt.BroadcastBeforeBlockNum = &broadcastBeforeBlockNum + attempt.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + etx.TxAttempts = append(etx.TxAttempts, attempt) + return etx +} + +func mustInsertInProgressEthTxWithAttempt(t *testing.T, txStore txmgr.TestEvmTxStore, nonce evmtypes.Nonce, fromAddress common.Address) txmgr.Tx { + etx := cltest.NewEthTx(fromAddress) + + etx.Sequence = &nonce + etx.State = txmgrcommon.TxInProgress + require.NoError(t, txStore.InsertTx(&etx)) + attempt := cltest.NewLegacyEthTxAttempt(t, etx.ID) + tx := cltest.NewLegacyTransaction(uint64(nonce), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + attempt.SignedRawTx = rlp.Bytes() + attempt.State = txmgrtypes.TxAttemptInProgress + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + var err error + etx, err = txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + return etx +} + +func mustCreateUnstartedGeneratedTx(t testing.TB, txStore txmgr.EvmTxStore, fromAddress common.Address, chainID *big.Int, opts ...func(*txmgr.TxRequest)) (tx txmgr.Tx) { + txRequest := txmgr.TxRequest{ + FromAddress: fromAddress, + } + + // Apply the default options + withDefaults()(&txRequest) + // Apply the optional parameters + for _, opt := range opts { + opt(&txRequest) + } + return mustCreateUnstartedTxFromEvmTxRequest(t, txStore, txRequest, chainID) +} + +func withDefaults() func(*txmgr.TxRequest) { + return func(tx *txmgr.TxRequest) { + tx.ToAddress = testutils.NewAddress() + tx.EncodedPayload = []byte{1, 2, 3} + tx.Value = big.Int(assets.NewEthValue(142)) + tx.FeeLimit = uint32(1000000000) + tx.Strategy = txmgrcommon.NewSendEveryStrategy() + // Set default values for other fields if needed + } +} + +func mustCreateUnstartedTx(t testing.TB, txStore txmgr.EvmTxStore, fromAddress common.Address, toAddress common.Address, encodedPayload []byte, gasLimit uint32, value big.Int, chainID *big.Int, opts ...interface{}) (tx txmgr.Tx) { + txRequest := txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: encodedPayload, + Value: value, + FeeLimit: gasLimit, + Strategy: txmgrcommon.NewSendEveryStrategy(), + } + + return mustCreateUnstartedTxFromEvmTxRequest(t, txStore, txRequest, chainID) +} + +func mustCreateUnstartedTxFromEvmTxRequest(t testing.TB, txStore txmgr.EvmTxStore, txRequest txmgr.TxRequest, chainID *big.Int) (tx txmgr.Tx) { + tx, err := txStore.CreateTransaction(testutils.Context(t), txRequest, chainID) + require.NoError(t, err) + + _, err = txRequest.Strategy.PruneQueue(testutils.Context(t), txStore) + require.NoError(t, err) + + return tx +} + +func txRequestWithStrategy(strategy txmgrtypes.TxStrategy) func(*txmgr.TxRequest) { + return func(tx *txmgr.TxRequest) { + tx.Strategy = strategy + } +} + +func txRequestWithChecker(checker txmgr.TransmitCheckerSpec) func(*txmgr.TxRequest) { + return func(tx *txmgr.TxRequest) { + tx.Checker = checker + } +} +func txRequestWithValue(value big.Int) func(*txmgr.TxRequest) { + return func(tx *txmgr.TxRequest) { + tx.Value = value + } +} + +func txRequestWithIdempotencyKey(idempotencyKey string) func(*txmgr.TxRequest) { + return func(tx *txmgr.TxRequest) { + tx.IdempotencyKey = &idempotencyKey + } +} diff --git a/core/chains/evm/types/block_json_benchmark_test.go b/core/chains/evm/types/block_json_benchmark_test.go new file mode 100644 index 00000000..68357088 --- /dev/null +++ b/core/chains/evm/types/block_json_benchmark_test.go @@ -0,0 +1,89 @@ +package types_test + +import ( + "encoding/binary" + "encoding/json" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +func makeTestBlock(nTx int) *evmtypes.Block { + txns := make([]evmtypes.Transaction, nTx) + + generateHash := func(x int64) common.Hash { + out := make([]byte, 0, 32) + + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(x)) + + for i := 0; i < 4; i++ { + out = append(out, b...) + } + return common.BytesToHash(out) + } + for i := 0; i < nTx; i++ { + wei := assets.NewWei(big.NewInt(int64(i))) + txns[i] = evmtypes.Transaction{ + GasPrice: wei, + GasLimit: uint32(i), + MaxFeePerGas: wei, + MaxPriorityFeePerGas: wei, + Type: evmtypes.TxType(i), + Hash: generateHash(int64(i)), + } + } + return &evmtypes.Block{ + Number: int64(nTx), + Hash: generateHash(int64(1024 * 1024)), + ParentHash: generateHash(int64(512 * 1024)), + BaseFeePerGas: assets.NewWei(big.NewInt(3)), + Timestamp: time.Unix(0, 0), + Transactions: txns, + } +} + +var ( + smallBlock = makeTestBlock(2) + mediumBlock = makeTestBlock(64) + largeBlock = makeTestBlock(512) + xlBlock = makeTestBlock(4 * 1024) +) + +func unmarshal_block(b *testing.B, block *evmtypes.Block) { + jsonBytes, err := json.Marshal(&block) + if err != nil { + b.Fatalf("failed to create test json %+v", err) + } + b.ResetTimer() + + var temp evmtypes.Block + for i := 0; i < b.N; i++ { + err := json.Unmarshal(jsonBytes, &temp) + if err != nil { + b.Fatalf("err %+v", err) + } + } +} + +func BenchmarkBlock_Small_JSONUnmarshal(b *testing.B) { + unmarshal_block(b, smallBlock) + +} + +func BenchmarkBlock_Medium_JSONUnmarshal(b *testing.B) { + unmarshal_block(b, mediumBlock) +} + +func BenchmarkBlock_Large_JSONUnmarshal(b *testing.B) { + unmarshal_block(b, largeBlock) +} + +func BenchmarkBlock_XL_JSONUnmarshal(b *testing.B) { + unmarshal_block(b, xlBlock) +} diff --git a/core/chains/evm/types/internal/blocks/block.go b/core/chains/evm/types/internal/blocks/block.go new file mode 100644 index 00000000..942b8d54 --- /dev/null +++ b/core/chains/evm/types/internal/blocks/block.go @@ -0,0 +1,29 @@ +package blocks + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +//go:generate codecgen -o internal_types_codecgen.go -j true -d 1709 transactions.go block.go + +// BlockInternal is JSON-serialization optimized intermediate representation between EVM blocks +// and our public representation +type BlockInternal struct { + Number string `json:"number"` + Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas"` + Timestamp hexutil.Uint64 `json:"timestamp"` + Transactions []TransactionInternal `json:"transactions"` +} + +func (bi BlockInternal) Empty() bool { + var dflt BlockInternal + + return len(bi.Transactions) == 0 && + bi.Hash == dflt.Hash && + bi.ParentHash == dflt.ParentHash && + bi.BaseFeePerGas == dflt.BaseFeePerGas && + bi.Timestamp == dflt.Timestamp +} diff --git a/core/chains/evm/types/internal/blocks/internal_types_codecgen.go b/core/chains/evm/types/internal/blocks/internal_types_codecgen.go new file mode 100644 index 00000000..1c4baa22 --- /dev/null +++ b/core/chains/evm/types/internal/blocks/internal_types_codecgen.go @@ -0,0 +1,1124 @@ +//go:build go1.6 +// +build go1.6 + +// Code generated by codecgen - DO NOT EDIT. + +package blocks + +import ( + "errors" + pkg2_common "github.com/ethereum/go-ethereum/common" + pkg1_hexutil "github.com/ethereum/go-ethereum/common/hexutil" + codec1978 "github.com/ugorji/go/codec" + "runtime" + "sort" + "strconv" +) + +const ( + // ----- content types ---- + codecSelferCcUTF81709 = 1 + codecSelferCcRAW1709 = 255 + // ----- value types used ---- + codecSelferValueTypeArray1709 = 10 + codecSelferValueTypeMap1709 = 9 + codecSelferValueTypeString1709 = 6 + codecSelferValueTypeInt1709 = 2 + codecSelferValueTypeUint1709 = 3 + codecSelferValueTypeFloat1709 = 4 + codecSelferValueTypeNil1709 = 1 + codecSelferBitsize1709 = uint8(32 << (^uint(0) >> 63)) + codecSelferDecContainerLenNil1709 = -2147483648 +) + +var ( + errCodecSelferOnlyMapOrArrayEncodeToStruct1709 = errors.New(`only encoded map or array can be decoded into a struct`) + _ sort.Interface = nil +) + +type codecSelfer1709 struct{} + +func codecSelfer1709False() bool { return false } +func codecSelfer1709True() bool { return true } + +type codecSelfer1709stringSlice []string + +func (p codecSelfer1709stringSlice) Len() int { return len(p) } +func (p codecSelfer1709stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } +func (p codecSelfer1709stringSlice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } + +type codecSelfer1709uint64Slice []uint64 + +func (p codecSelfer1709uint64Slice) Len() int { return len(p) } +func (p codecSelfer1709uint64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } +func (p codecSelfer1709uint64Slice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } + +type codecSelfer1709int64Slice []int64 + +func (p codecSelfer1709int64Slice) Len() int { return len(p) } +func (p codecSelfer1709int64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } +func (p codecSelfer1709int64Slice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } + +type codecSelfer1709float64Slice []float64 + +func (p codecSelfer1709float64Slice) Len() int { return len(p) } +func (p codecSelfer1709float64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] } +func (p codecSelfer1709float64Slice) Less(i, j int) bool { return p[uint(i)] < p[uint(j)] } + +func init() { + if codec1978.GenVersion != 28 { + _, file, _, _ := runtime.Caller(0) + ver := strconv.FormatInt(int64(codec1978.GenVersion), 10) + panic(errors.New("codecgen version mismatch: current: 28, need " + ver + ". Re-generate file: " + file)) + } + if false { // reference the types, but skip this branch at build/run time + var _ pkg2_common.Hash + var _ pkg1_hexutil.Big + } +} + +func (TxType) codecSelferViaCodecgen() {} +func (x TxType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Encoder(e) + _, _, _ = h, z, r + if !z.EncBinary() { + z.EncTextMarshal(&x) + } else { + r.EncodeUint(uint64(x)) + } +} + +func (x *TxType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x) + } else { + *x = (TxType)(z.C.UintV(r.DecodeUint64(), 8)) + } +} + +func (TransactionInternal) codecSelferViaCodecgen() {} +func (x *TransactionInternal) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Encoder(e) + _, _, _ = h, z, r + if z.EncBasicHandle().CheckCircularRef { + z.EncEncode(x) + return + } + if x == nil { + r.EncodeNil() + } else { + yy2arr2 := z.EncBasicHandle().StructToArray + _ = yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + var yyn3 bool = x.GasPrice == nil + var yyn4 bool = x.Gas == nil + var yyn5 bool = x.MaxFeePerGas == nil + var yyn6 bool = x.MaxPriorityFeePerGas == nil + var yyn7 bool = x.Type == nil + if yyr2 || yy2arr2 { + z.EncWriteArrayStart(6) + if yyn3 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + if yyxt9 := z.Extension(x.GasPrice); yyxt9 != nil { + z.EncExtension(x.GasPrice, yyxt9) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.GasPrice) + } else { + z.EncFallback(x.GasPrice) + } + } + if yyn4 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + yy10 := *x.Gas + if yyxt11 := z.Extension(yy10); yyxt11 != nil { + z.EncExtension(yy10, yyxt11) + } else if !z.EncBinary() { + z.EncTextMarshal(yy10) + } else { + r.EncodeUint(uint64(yy10)) + } + } + if yyn5 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + if yyxt12 := z.Extension(x.MaxFeePerGas); yyxt12 != nil { + z.EncExtension(x.MaxFeePerGas, yyxt12) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxFeePerGas) + } else { + z.EncFallback(x.MaxFeePerGas) + } + } + if yyn6 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + if yyxt13 := z.Extension(x.MaxPriorityFeePerGas); yyxt13 != nil { + z.EncExtension(x.MaxPriorityFeePerGas, yyxt13) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxPriorityFeePerGas) + } else { + z.EncFallback(x.MaxPriorityFeePerGas) + } + } + if yyn7 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + yy14 := *x.Type + if yyxt15 := z.Extension(yy14); yyxt15 != nil { + z.EncExtension(yy14, yyxt15) + } else { + yy14.CodecEncodeSelf(e) + } + } + z.EncWriteArrayElem() + yy16 := &x.Hash + if yyxt17 := z.Extension(yy16); yyxt17 != nil { + z.EncExtension(yy16, yyxt17) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy16) + } else { + z.F.EncSliceUint8V(([]uint8)(yy16[:]), e) + } + z.EncWriteArrayEnd() + } else { + z.EncWriteMapStart(6) + if z.EncBasicHandle().Canonical { + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"gas\"") + z.EncWriteMapElemValue() + if yyn4 { + r.EncodeNil() + } else { + yy18 := *x.Gas + if yyxt19 := z.Extension(yy18); yyxt19 != nil { + z.EncExtension(yy18, yyxt19) + } else if !z.EncBinary() { + z.EncTextMarshal(yy18) + } else { + r.EncodeUint(uint64(yy18)) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"gasPrice\"") + z.EncWriteMapElemValue() + if yyn3 { + r.EncodeNil() + } else { + if yyxt20 := z.Extension(x.GasPrice); yyxt20 != nil { + z.EncExtension(x.GasPrice, yyxt20) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.GasPrice) + } else { + z.EncFallback(x.GasPrice) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"hash\"") + z.EncWriteMapElemValue() + yy21 := &x.Hash + if yyxt22 := z.Extension(yy21); yyxt22 != nil { + z.EncExtension(yy21, yyxt22) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy21) + } else { + z.F.EncSliceUint8V(([]uint8)(yy21[:]), e) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"maxFeePerGas\"") + z.EncWriteMapElemValue() + if yyn5 { + r.EncodeNil() + } else { + if yyxt23 := z.Extension(x.MaxFeePerGas); yyxt23 != nil { + z.EncExtension(x.MaxFeePerGas, yyxt23) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxFeePerGas) + } else { + z.EncFallback(x.MaxFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"maxPriorityFeePerGas\"") + z.EncWriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if yyxt24 := z.Extension(x.MaxPriorityFeePerGas); yyxt24 != nil { + z.EncExtension(x.MaxPriorityFeePerGas, yyxt24) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxPriorityFeePerGas) + } else { + z.EncFallback(x.MaxPriorityFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"type\"") + z.EncWriteMapElemValue() + if yyn7 { + r.EncodeNil() + } else { + yy25 := *x.Type + if yyxt26 := z.Extension(yy25); yyxt26 != nil { + z.EncExtension(yy25, yyxt26) + } else { + yy25.CodecEncodeSelf(e) + } + } + } else { + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"gasPrice\"") + z.EncWriteMapElemValue() + if yyn3 { + r.EncodeNil() + } else { + if yyxt27 := z.Extension(x.GasPrice); yyxt27 != nil { + z.EncExtension(x.GasPrice, yyxt27) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.GasPrice) + } else { + z.EncFallback(x.GasPrice) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"gas\"") + z.EncWriteMapElemValue() + if yyn4 { + r.EncodeNil() + } else { + yy28 := *x.Gas + if yyxt29 := z.Extension(yy28); yyxt29 != nil { + z.EncExtension(yy28, yyxt29) + } else if !z.EncBinary() { + z.EncTextMarshal(yy28) + } else { + r.EncodeUint(uint64(yy28)) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"maxFeePerGas\"") + z.EncWriteMapElemValue() + if yyn5 { + r.EncodeNil() + } else { + if yyxt30 := z.Extension(x.MaxFeePerGas); yyxt30 != nil { + z.EncExtension(x.MaxFeePerGas, yyxt30) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxFeePerGas) + } else { + z.EncFallback(x.MaxFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"maxPriorityFeePerGas\"") + z.EncWriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if yyxt31 := z.Extension(x.MaxPriorityFeePerGas); yyxt31 != nil { + z.EncExtension(x.MaxPriorityFeePerGas, yyxt31) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.MaxPriorityFeePerGas) + } else { + z.EncFallback(x.MaxPriorityFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"type\"") + z.EncWriteMapElemValue() + if yyn7 { + r.EncodeNil() + } else { + yy32 := *x.Type + if yyxt33 := z.Extension(yy32); yyxt33 != nil { + z.EncExtension(yy32, yyxt33) + } else { + yy32.CodecEncodeSelf(e) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"hash\"") + z.EncWriteMapElemValue() + yy34 := &x.Hash + if yyxt35 := z.Extension(yy34); yyxt35 != nil { + z.EncExtension(yy34, yyxt35) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy34) + } else { + z.F.EncSliceUint8V(([]uint8)(yy34[:]), e) + } + } + z.EncWriteMapEnd() + } + } +} + +func (x *TransactionInternal) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeNil1709 { + *(x) = TransactionInternal{} + } else if yyct2 == codecSelferValueTypeMap1709 { + yyl2 := z.DecReadMapStart() + if yyl2 == 0 { + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + z.DecReadMapEnd() + } else if yyct2 == codecSelferValueTypeArray1709 { + yyl2 := z.DecReadArrayStart() + if yyl2 != 0 { + x.codecDecodeSelfFromArray(yyl2, d) + } + z.DecReadArrayEnd() + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct1709) + } +} + +func (x *TransactionInternal) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; z.DecContainerNext(yyj3, l, yyhl3); yyj3++ { + z.DecReadMapElemKey() + yys3 := r.DecodeStringAsBytes() + z.DecReadMapElemValue() + switch string(yys3) { + case "gasPrice": + if r.TryNil() { + if x.GasPrice != nil { // remove the if-true + x.GasPrice = nil + } + } else { + if x.GasPrice == nil { + x.GasPrice = new(pkg1_hexutil.Big) + } + if yyxt5 := z.Extension(x.GasPrice); yyxt5 != nil { + z.DecExtension(x.GasPrice, yyxt5) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.GasPrice) + } else { + z.DecFallback(x.GasPrice, false) + } + } + case "gas": + if r.TryNil() { + if x.Gas != nil { // remove the if-true + x.Gas = nil + } + } else { + if x.Gas == nil { + x.Gas = new(pkg1_hexutil.Uint64) + } + if yyxt7 := z.Extension(x.Gas); yyxt7 != nil { + z.DecExtension(x.Gas, yyxt7) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Gas) + } else { + *x.Gas = (pkg1_hexutil.Uint64)(r.DecodeUint64()) + } + } + case "maxFeePerGas": + if r.TryNil() { + if x.MaxFeePerGas != nil { // remove the if-true + x.MaxFeePerGas = nil + } + } else { + if x.MaxFeePerGas == nil { + x.MaxFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt9 := z.Extension(x.MaxFeePerGas); yyxt9 != nil { + z.DecExtension(x.MaxFeePerGas, yyxt9) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxFeePerGas) + } else { + z.DecFallback(x.MaxFeePerGas, false) + } + } + case "maxPriorityFeePerGas": + if r.TryNil() { + if x.MaxPriorityFeePerGas != nil { // remove the if-true + x.MaxPriorityFeePerGas = nil + } + } else { + if x.MaxPriorityFeePerGas == nil { + x.MaxPriorityFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt11 := z.Extension(x.MaxPriorityFeePerGas); yyxt11 != nil { + z.DecExtension(x.MaxPriorityFeePerGas, yyxt11) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxPriorityFeePerGas) + } else { + z.DecFallback(x.MaxPriorityFeePerGas, false) + } + } + case "type": + if r.TryNil() { + if x.Type != nil { // remove the if-true + x.Type = nil + } + } else { + if x.Type == nil { + x.Type = new(TxType) + } + if yyxt13 := z.Extension(x.Type); yyxt13 != nil { + z.DecExtension(x.Type, yyxt13) + } else { + x.Type.CodecDecodeSelf(d) + } + } + case "hash": + if yyxt15 := z.Extension(x.Hash); yyxt15 != nil { + z.DecExtension(&x.Hash, yyxt15) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Hash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.Hash[:]), d) + } + default: + z.DecStructFieldNotFound(-1, string(yys3)) + } // end switch yys3 + } // end for yyj3 +} + +func (x *TransactionInternal) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.GasPrice != nil { // remove the if-true + x.GasPrice = nil + } + } else { + if x.GasPrice == nil { + x.GasPrice = new(pkg1_hexutil.Big) + } + if yyxt18 := z.Extension(x.GasPrice); yyxt18 != nil { + z.DecExtension(x.GasPrice, yyxt18) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.GasPrice) + } else { + z.DecFallback(x.GasPrice, false) + } + } + yyj16++ + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.Gas != nil { // remove the if-true + x.Gas = nil + } + } else { + if x.Gas == nil { + x.Gas = new(pkg1_hexutil.Uint64) + } + if yyxt20 := z.Extension(x.Gas); yyxt20 != nil { + z.DecExtension(x.Gas, yyxt20) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Gas) + } else { + *x.Gas = (pkg1_hexutil.Uint64)(r.DecodeUint64()) + } + } + yyj16++ + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.MaxFeePerGas != nil { // remove the if-true + x.MaxFeePerGas = nil + } + } else { + if x.MaxFeePerGas == nil { + x.MaxFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt22 := z.Extension(x.MaxFeePerGas); yyxt22 != nil { + z.DecExtension(x.MaxFeePerGas, yyxt22) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxFeePerGas) + } else { + z.DecFallback(x.MaxFeePerGas, false) + } + } + yyj16++ + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.MaxPriorityFeePerGas != nil { // remove the if-true + x.MaxPriorityFeePerGas = nil + } + } else { + if x.MaxPriorityFeePerGas == nil { + x.MaxPriorityFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt24 := z.Extension(x.MaxPriorityFeePerGas); yyxt24 != nil { + z.DecExtension(x.MaxPriorityFeePerGas, yyxt24) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxPriorityFeePerGas) + } else { + z.DecFallback(x.MaxPriorityFeePerGas, false) + } + } + yyj16++ + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.Type != nil { // remove the if-true + x.Type = nil + } + } else { + if x.Type == nil { + x.Type = new(TxType) + } + if yyxt26 := z.Extension(x.Type); yyxt26 != nil { + z.DecExtension(x.Type, yyxt26) + } else { + x.Type.CodecDecodeSelf(d) + } + } + yyj16++ + yyb16 = !z.DecContainerNext(yyj16, l, yyhl16) + if yyb16 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if yyxt28 := z.Extension(x.Hash); yyxt28 != nil { + z.DecExtension(&x.Hash, yyxt28) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Hash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.Hash[:]), d) + } + yyj16++ + for ; z.DecContainerNext(yyj16, l, yyhl16); yyj16++ { + z.DecReadArrayElem() + z.DecStructFieldNotFound(yyj16-1, "") + } +} + +func (x *TransactionInternal) IsCodecEmpty() bool { + return !(x.Hash != pkg2_common.Hash{} || false) +} + +func (BlockInternal) codecSelferViaCodecgen() {} +func (x *BlockInternal) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Encoder(e) + _, _, _ = h, z, r + if z.EncBasicHandle().CheckCircularRef { + z.EncEncode(x) + return + } + if x == nil { + r.EncodeNil() + } else { + yy2arr2 := z.EncBasicHandle().StructToArray + _ = yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + var yyn6 bool = x.BaseFeePerGas == nil + if yyr2 || yy2arr2 { + z.EncWriteArrayStart(6) + z.EncWriteArrayElem() + r.EncodeString(string(x.Number)) + z.EncWriteArrayElem() + yy10 := &x.Hash + if yyxt11 := z.Extension(yy10); yyxt11 != nil { + z.EncExtension(yy10, yyxt11) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy10) + } else { + z.F.EncSliceUint8V(([]uint8)(yy10[:]), e) + } + z.EncWriteArrayElem() + yy12 := &x.ParentHash + if yyxt13 := z.Extension(yy12); yyxt13 != nil { + z.EncExtension(yy12, yyxt13) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy12) + } else { + z.F.EncSliceUint8V(([]uint8)(yy12[:]), e) + } + if yyn6 { + z.EncWriteArrayElem() + r.EncodeNil() + } else { + z.EncWriteArrayElem() + if yyxt14 := z.Extension(x.BaseFeePerGas); yyxt14 != nil { + z.EncExtension(x.BaseFeePerGas, yyxt14) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.BaseFeePerGas) + } else { + z.EncFallback(x.BaseFeePerGas) + } + } + z.EncWriteArrayElem() + if yyxt15 := z.Extension(x.Timestamp); yyxt15 != nil { + z.EncExtension(x.Timestamp, yyxt15) + } else if !z.EncBinary() { + z.EncTextMarshal(x.Timestamp) + } else { + r.EncodeUint(uint64(x.Timestamp)) + } + z.EncWriteArrayElem() + if x.Transactions == nil { + r.EncodeNil() + } else { + h.encSliceTransactionInternal(([]TransactionInternal)(x.Transactions), e) + } // end block: if x.Transactions slice == nil + z.EncWriteArrayEnd() + } else { + z.EncWriteMapStart(6) + if z.EncBasicHandle().Canonical { + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"baseFeePerGas\"") + z.EncWriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if yyxt17 := z.Extension(x.BaseFeePerGas); yyxt17 != nil { + z.EncExtension(x.BaseFeePerGas, yyxt17) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.BaseFeePerGas) + } else { + z.EncFallback(x.BaseFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"hash\"") + z.EncWriteMapElemValue() + yy18 := &x.Hash + if yyxt19 := z.Extension(yy18); yyxt19 != nil { + z.EncExtension(yy18, yyxt19) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy18) + } else { + z.F.EncSliceUint8V(([]uint8)(yy18[:]), e) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"number\"") + z.EncWriteMapElemValue() + r.EncodeString(string(x.Number)) + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"parentHash\"") + z.EncWriteMapElemValue() + yy21 := &x.ParentHash + if yyxt22 := z.Extension(yy21); yyxt22 != nil { + z.EncExtension(yy21, yyxt22) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy21) + } else { + z.F.EncSliceUint8V(([]uint8)(yy21[:]), e) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"timestamp\"") + z.EncWriteMapElemValue() + if yyxt23 := z.Extension(x.Timestamp); yyxt23 != nil { + z.EncExtension(x.Timestamp, yyxt23) + } else if !z.EncBinary() { + z.EncTextMarshal(x.Timestamp) + } else { + r.EncodeUint(uint64(x.Timestamp)) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"transactions\"") + z.EncWriteMapElemValue() + if x.Transactions == nil { + r.EncodeNil() + } else { + h.encSliceTransactionInternal(([]TransactionInternal)(x.Transactions), e) + } // end block: if x.Transactions slice == nil + } else { + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"number\"") + z.EncWriteMapElemValue() + r.EncodeString(string(x.Number)) + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"hash\"") + z.EncWriteMapElemValue() + yy26 := &x.Hash + if yyxt27 := z.Extension(yy26); yyxt27 != nil { + z.EncExtension(yy26, yyxt27) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy26) + } else { + z.F.EncSliceUint8V(([]uint8)(yy26[:]), e) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"parentHash\"") + z.EncWriteMapElemValue() + yy28 := &x.ParentHash + if yyxt29 := z.Extension(yy28); yyxt29 != nil { + z.EncExtension(yy28, yyxt29) + } else if !z.EncBinary() { + z.EncTextMarshal(*yy28) + } else { + z.F.EncSliceUint8V(([]uint8)(yy28[:]), e) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"baseFeePerGas\"") + z.EncWriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if yyxt30 := z.Extension(x.BaseFeePerGas); yyxt30 != nil { + z.EncExtension(x.BaseFeePerGas, yyxt30) + } else if !z.EncBinary() { + z.EncTextMarshal(*x.BaseFeePerGas) + } else { + z.EncFallback(x.BaseFeePerGas) + } + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"timestamp\"") + z.EncWriteMapElemValue() + if yyxt31 := z.Extension(x.Timestamp); yyxt31 != nil { + z.EncExtension(x.Timestamp, yyxt31) + } else if !z.EncBinary() { + z.EncTextMarshal(x.Timestamp) + } else { + r.EncodeUint(uint64(x.Timestamp)) + } + z.EncWriteMapElemKey() + z.EncWr().WriteStr("\"transactions\"") + z.EncWriteMapElemValue() + if x.Transactions == nil { + r.EncodeNil() + } else { + h.encSliceTransactionInternal(([]TransactionInternal)(x.Transactions), e) + } // end block: if x.Transactions slice == nil + } + z.EncWriteMapEnd() + } + } +} + +func (x *BlockInternal) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeNil1709 { + *(x) = BlockInternal{} + } else if yyct2 == codecSelferValueTypeMap1709 { + yyl2 := z.DecReadMapStart() + if yyl2 == 0 { + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + z.DecReadMapEnd() + } else if yyct2 == codecSelferValueTypeArray1709 { + yyl2 := z.DecReadArrayStart() + if yyl2 != 0 { + x.codecDecodeSelfFromArray(yyl2, d) + } + z.DecReadArrayEnd() + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct1709) + } +} + +func (x *BlockInternal) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; z.DecContainerNext(yyj3, l, yyhl3); yyj3++ { + z.DecReadMapElemKey() + yys3 := r.DecodeStringAsBytes() + z.DecReadMapElemValue() + switch string(yys3) { + case "number": + x.Number = (string)(z.DecStringZC(r.DecodeStringAsBytes())) + case "hash": + if yyxt6 := z.Extension(x.Hash); yyxt6 != nil { + z.DecExtension(&x.Hash, yyxt6) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Hash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.Hash[:]), d) + } + case "parentHash": + if yyxt8 := z.Extension(x.ParentHash); yyxt8 != nil { + z.DecExtension(&x.ParentHash, yyxt8) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.ParentHash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.ParentHash[:]), d) + } + case "baseFeePerGas": + if r.TryNil() { + if x.BaseFeePerGas != nil { // remove the if-true + x.BaseFeePerGas = nil + } + } else { + if x.BaseFeePerGas == nil { + x.BaseFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt10 := z.Extension(x.BaseFeePerGas); yyxt10 != nil { + z.DecExtension(x.BaseFeePerGas, yyxt10) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.BaseFeePerGas) + } else { + z.DecFallback(x.BaseFeePerGas, false) + } + } + case "timestamp": + if yyxt12 := z.Extension(x.Timestamp); yyxt12 != nil { + z.DecExtension(&x.Timestamp, yyxt12) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Timestamp) + } else { + x.Timestamp = (pkg1_hexutil.Uint64)(r.DecodeUint64()) + } + case "transactions": + h.decSliceTransactionInternal((*[]TransactionInternal)(&x.Transactions), d) + default: + z.DecStructFieldNotFound(-1, string(yys3)) + } // end switch yys3 + } // end for yyj3 +} + +func (x *BlockInternal) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.Number = (string)(z.DecStringZC(r.DecodeStringAsBytes())) + yyj15++ + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if yyxt18 := z.Extension(x.Hash); yyxt18 != nil { + z.DecExtension(&x.Hash, yyxt18) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Hash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.Hash[:]), d) + } + yyj15++ + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if yyxt20 := z.Extension(x.ParentHash); yyxt20 != nil { + z.DecExtension(&x.ParentHash, yyxt20) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.ParentHash) + } else { + z.F.DecSliceUint8N(([]uint8)(x.ParentHash[:]), d) + } + yyj15++ + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if r.TryNil() { + if x.BaseFeePerGas != nil { // remove the if-true + x.BaseFeePerGas = nil + } + } else { + if x.BaseFeePerGas == nil { + x.BaseFeePerGas = new(pkg1_hexutil.Big) + } + if yyxt22 := z.Extension(x.BaseFeePerGas); yyxt22 != nil { + z.DecExtension(x.BaseFeePerGas, yyxt22) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.BaseFeePerGas) + } else { + z.DecFallback(x.BaseFeePerGas, false) + } + } + yyj15++ + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if yyxt24 := z.Extension(x.Timestamp); yyxt24 != nil { + z.DecExtension(&x.Timestamp, yyxt24) + } else if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Timestamp) + } else { + x.Timestamp = (pkg1_hexutil.Uint64)(r.DecodeUint64()) + } + yyj15++ + yyb15 = !z.DecContainerNext(yyj15, l, yyhl15) + if yyb15 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + h.decSliceTransactionInternal((*[]TransactionInternal)(&x.Transactions), d) + yyj15++ + for ; z.DecContainerNext(yyj15, l, yyhl15); yyj15++ { + z.DecReadArrayElem() + z.DecStructFieldNotFound(yyj15-1, "") + } +} + +func (x *BlockInternal) IsCodecEmpty() bool { + return !(x.Number != "" || x.Hash != pkg2_common.Hash{} || x.ParentHash != pkg2_common.Hash{} || x.Timestamp != 0 || len(x.Transactions) != 0 || false) +} + +func (x codecSelfer1709) encSliceTransactionInternal(v []TransactionInternal, e *codec1978.Encoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Encoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + z.EncWriteArrayStart(len(v)) + for yyv1 := range v { + z.EncWriteArrayElem() + yy2 := &v[yyv1] + if yyxt3 := z.Extension(yy2); yyxt3 != nil { + z.EncExtension(yy2, yyxt3) + } else { + yy2.CodecEncodeSelf(e) + } + } + z.EncWriteArrayEnd() +} + +func (x codecSelfer1709) decSliceTransactionInternal(v *[]TransactionInternal, d *codec1978.Decoder) { + var h codecSelfer1709 + z, r := codec1978.GenHelper().Decoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyh1.IsNil { + if yyv1 != nil { + yyv1 = nil + yyc1 = true + } + } else if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []TransactionInternal{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]TransactionInternal, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + for yyj1 = 0; z.DecContainerNext(yyj1, yyl1, yyhl1); yyj1++ { + if yyj1 == 0 && yyv1 == nil { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + } else { + yyrl1 = 8 + } + yyv1 = make([]TransactionInternal, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, TransactionInternal{}) + yyc1 = true + } + if yydb1 { + z.DecSwallow() + } else { + if yyxt3 := z.Extension(yyv1[yyj1]); yyxt3 != nil { + z.DecExtension(&yyv1[yyj1], yyxt3) + } else { + yyv1[yyj1].CodecDecodeSelf(d) + } + } + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []TransactionInternal{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/core/chains/evm/types/internal/blocks/transactions.go b/core/chains/evm/types/internal/blocks/transactions.go new file mode 100644 index 00000000..b607e2dc --- /dev/null +++ b/core/chains/evm/types/internal/blocks/transactions.go @@ -0,0 +1,45 @@ +package blocks + +import ( + "bytes" + "math" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" +) + +type TxType uint8 + +// NOTE: Need to roll our own unmarshaller since geth's hexutil.Uint64 does not +// handle double zeroes e.g. 0x00 +func (txt *TxType) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte(`"0x00"`)) { + data = []byte(`"0x0"`) + } + var hx hexutil.Uint64 + if err := (&hx).UnmarshalJSON(data); err != nil { + return err + } + if hx > math.MaxUint8 { + return errors.Errorf("expected 'type' to fit into a single byte, got: '%s'", data) + } + *txt = TxType(hx) + return nil +} + +func (txt *TxType) MarshalText() ([]byte, error) { + hx := (hexutil.Uint64)(*txt) + return hx.MarshalText() +} + +// TransactionInternal is JSON-serialization optimized intermediate representation between EVM blocks +// and our public representation +type TransactionInternal struct { + GasPrice *hexutil.Big `json:"gasPrice"` + Gas *hexutil.Uint64 `json:"gas"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` + Type *TxType `json:"type"` + Hash common.Hash `json:"hash"` +} diff --git a/core/chains/evm/types/models.go b/core/chains/evm/types/models.go new file mode 100644 index 00000000..97e6ddc3 --- /dev/null +++ b/core/chains/evm/types/models.go @@ -0,0 +1,613 @@ +package types + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" + "math/big" + "regexp" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/ugorji/go/codec" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + htrktypes "github.com/goplugin/pluginv3.0/v2/common/headtracker/types" + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types/internal/blocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +// Head represents a BlockNumber, BlockHash. +type Head struct { + ID uint64 + Hash common.Hash + Number int64 + L1BlockNumber null.Int64 + ParentHash common.Hash + Parent *Head + EVMChainID *ubig.Big + Timestamp time.Time + CreatedAt time.Time + BaseFeePerGas *assets.Wei + ReceiptsRoot common.Hash + TransactionsRoot common.Hash + StateRoot common.Hash + Difficulty *big.Int + TotalDifficulty *big.Int +} + +var _ commontypes.Head[common.Hash] = &Head{} +var _ htrktypes.Head[common.Hash, *big.Int] = &Head{} + +// NewHead returns a Head instance. +func NewHead(number *big.Int, blockHash common.Hash, parentHash common.Hash, timestamp uint64, chainID *ubig.Big) Head { + return Head{ + Number: number.Int64(), + Hash: blockHash, + ParentHash: parentHash, + Timestamp: time.Unix(int64(timestamp), 0), + EVMChainID: chainID, + } +} + +func (h *Head) BlockNumber() int64 { + return h.Number +} + +func (h *Head) BlockHash() common.Hash { + return h.Hash +} + +func (h *Head) GetParentHash() common.Hash { + return h.ParentHash +} + +func (h *Head) GetParent() commontypes.Head[common.Hash] { + if h.Parent == nil { + return nil + } + return h.Parent +} + +func (h *Head) GetTimestamp() time.Time { + return h.Timestamp +} + +func (h *Head) BlockDifficulty() *big.Int { + return h.Difficulty +} + +// EarliestInChain recurses through parents until it finds the earliest one +func (h *Head) EarliestInChain() *Head { + for h.Parent != nil { + h = h.Parent + } + return h +} + +// EarliestHeadInChain recurses through parents until it finds the earliest one +func (h *Head) EarliestHeadInChain() commontypes.Head[common.Hash] { + return h.EarliestInChain() +} + +// IsInChain returns true if the given hash matches the hash of a head in the chain +func (h *Head) IsInChain(blockHash common.Hash) bool { + for { + if h.Hash == blockHash { + return true + } + if h.Parent == nil { + break + } + h = h.Parent + } + return false +} + +// HashAtHeight returns the hash of the block at the given height, if it is in the chain. +// If not in chain, returns the zero hash +func (h *Head) HashAtHeight(blockNum int64) common.Hash { + for { + if h.Number == blockNum { + return h.Hash + } + if h.Parent == nil { + break + } + h = h.Parent + } + return common.Hash{} +} + +// ChainLength returns the length of the chain followed by recursively looking up parents +func (h *Head) ChainLength() uint32 { + if h == nil { + return 0 + } + l := uint32(1) + + for { + if h.Parent == nil { + break + } + l++ + if h == h.Parent { + panic("circular reference detected") + } + h = h.Parent + } + return l +} + +// ChainHashes returns an array of block hashes by recursively looking up parents +func (h *Head) ChainHashes() []common.Hash { + var hashes []common.Hash + + for { + hashes = append(hashes, h.Hash) + if h.Parent == nil { + break + } + if h == h.Parent { + panic("circular reference detected") + } + h = h.Parent + } + return hashes +} + +func (h *Head) ChainID() *big.Int { + return h.EVMChainID.ToInt() +} + +func (h *Head) HasChainID() bool { + return h.EVMChainID != nil +} + +func (h *Head) IsValid() bool { + return h != nil +} + +func (h *Head) ChainString() string { + var sb strings.Builder + + for { + sb.WriteString(h.String()) + if h.Parent == nil { + break + } + if h == h.Parent { + panic("circular reference detected") + } + sb.WriteString("->") + h = h.Parent + } + sb.WriteString("->nil") + return sb.String() +} + +// String returns a string representation of this head +func (h *Head) String() string { + return fmt.Sprintf("Head{Number: %d, Hash: %s, ParentHash: %s}", h.ToInt(), h.Hash.Hex(), h.ParentHash.Hex()) +} + +// ToInt return the height as a *big.Int. Also handles nil by returning nil. +func (h *Head) ToInt() *big.Int { + if h == nil { + return nil + } + return big.NewInt(h.Number) +} + +// GreaterThan compares BlockNumbers and returns true if the receiver BlockNumber is greater than +// the supplied BlockNumber +func (h *Head) GreaterThan(r *Head) bool { + if h == nil { + return false + } + if h != nil && r == nil { + return true + } + return h.Number > r.Number +} + +// NextInt returns the next BlockNumber as big.int, or nil if nil to represent latest. +func (h *Head) NextInt() *big.Int { + if h == nil { + return nil + } + return new(big.Int).Add(h.ToInt(), big.NewInt(1)) +} + +// AsSlice returns a slice of heads up to length k +// len(heads) may be less than k if the available chain is not long enough +func (h *Head) AsSlice(k int) (heads []*Head) { + if k < 1 || h == nil { + return + } + heads = make([]*Head, 1) + heads[0] = h + for len(heads) < k && h.Parent != nil { + h = h.Parent + heads = append(heads, h) + } + return +} + +func (h *Head) UnmarshalJSON(bs []byte) error { + type head struct { + Hash common.Hash `json:"hash"` + Number *hexutil.Big `json:"number"` + ParentHash common.Hash `json:"parentHash"` + Timestamp hexutil.Uint64 `json:"timestamp"` + L1BlockNumber *hexutil.Big `json:"l1BlockNumber"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas"` + ReceiptsRoot common.Hash `json:"receiptsRoot"` + TransactionsRoot common.Hash `json:"transactionsRoot"` + StateRoot common.Hash `json:"stateRoot"` + Difficulty *hexutil.Big `json:"difficulty"` + TotalDifficulty *hexutil.Big `json:"totalDifficulty"` + } + + var jsonHead head + err := json.Unmarshal(bs, &jsonHead) + if err != nil { + return err + } + + if jsonHead.Number == nil { + *h = Head{} + return nil + } + + h.Hash = jsonHead.Hash + h.Number = (*big.Int)(jsonHead.Number).Int64() + h.ParentHash = jsonHead.ParentHash + h.Timestamp = time.Unix(int64(jsonHead.Timestamp), 0).UTC() + h.BaseFeePerGas = assets.NewWei((*big.Int)(jsonHead.BaseFeePerGas)) + if jsonHead.L1BlockNumber != nil { + h.L1BlockNumber = null.Int64From((*big.Int)(jsonHead.L1BlockNumber).Int64()) + } + h.ReceiptsRoot = jsonHead.ReceiptsRoot + h.TransactionsRoot = jsonHead.TransactionsRoot + h.StateRoot = jsonHead.StateRoot + h.Difficulty = jsonHead.Difficulty.ToInt() + h.TotalDifficulty = jsonHead.TotalDifficulty.ToInt() + return nil +} + +func (h *Head) MarshalJSON() ([]byte, error) { + type head struct { + Hash *common.Hash `json:"hash,omitempty"` + Number *hexutil.Big `json:"number,omitempty"` + ParentHash *common.Hash `json:"parentHash,omitempty"` + Timestamp *hexutil.Uint64 `json:"timestamp,omitempty"` + ReceiptsRoot *common.Hash `json:"receiptsRoot,omitempty"` + TransactionsRoot *common.Hash `json:"transactionsRoot,omitempty"` + StateRoot *common.Hash `json:"stateRoot,omitempty"` + Difficulty *hexutil.Big `json:"difficulty,omitempty"` + TotalDifficulty *hexutil.Big `json:"totalDifficulty,omitempty"` + } + + var jsonHead head + if h.Hash != (common.Hash{}) { + jsonHead.Hash = &h.Hash + } + if h.ReceiptsRoot != (common.Hash{}) { + jsonHead.ReceiptsRoot = &h.ReceiptsRoot + } + if h.TransactionsRoot != (common.Hash{}) { + jsonHead.TransactionsRoot = &h.TransactionsRoot + } + if h.StateRoot != (common.Hash{}) { + jsonHead.StateRoot = &h.StateRoot + } + jsonHead.Number = (*hexutil.Big)(big.NewInt(h.Number)) + if h.ParentHash != (common.Hash{}) { + jsonHead.ParentHash = &h.ParentHash + } + if h.Timestamp != (time.Time{}) { + t := hexutil.Uint64(h.Timestamp.UTC().Unix()) + jsonHead.Timestamp = &t + } + jsonHead.Difficulty = (*hexutil.Big)(h.Difficulty) + jsonHead.TotalDifficulty = (*hexutil.Big)(h.TotalDifficulty) + return json.Marshal(jsonHead) +} + +// Block represents an ethereum block +// This type is only used for the block history estimator, and can be expensive to unmarshal. Don't add unnecessary fields here. +type Block struct { + Number int64 + Hash common.Hash + ParentHash common.Hash + BaseFeePerGas *assets.Wei + Timestamp time.Time + Transactions []Transaction +} + +// MarshalJSON implements json marshalling for Block +func (b Block) MarshalJSON() ([]byte, error) { + bi := &blocks.BlockInternal{ + Number: hexutil.EncodeBig(big.NewInt(b.Number)), + Hash: b.Hash, + ParentHash: b.ParentHash, + BaseFeePerGas: (*hexutil.Big)(b.BaseFeePerGas), + Timestamp: (hexutil.Uint64)(uint64(b.Timestamp.Unix())), + Transactions: toInternalTxnSlice(b.Transactions), + } + + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + enc := codec.NewEncoder(buf, &codec.JsonHandle{}) + err := enc.Encode(bi) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +var ErrMissingBlock = errors.New("missing block") + +// UnmarshalJSON unmarshals to a Block +func (b *Block) UnmarshalJSON(data []byte) error { + + var h codec.Handle = new(codec.JsonHandle) + bi := blocks.BlockInternal{} + + dec := codec.NewDecoderBytes(data, h) + err := dec.Decode(&bi) + + if err != nil { + return err + } + if bi.Empty() { + return errors.WithStack(ErrMissingBlock) + } + + n, err := hexutil.DecodeBig(bi.Number) + if err != nil { + return errors.Wrapf(err, "failed to decode block number while unmarshalling block, got: '%s' in '%s'", bi.Number, data) + } + *b = Block{ + Number: n.Int64(), + Hash: bi.Hash, + ParentHash: bi.ParentHash, + BaseFeePerGas: (*assets.Wei)(bi.BaseFeePerGas), + Timestamp: time.Unix((int64((uint64)(bi.Timestamp))), 0), + Transactions: fromInternalTxnSlice(bi.Transactions), + } + return nil +} + +// thin public wrapper for internal type of the same name +// and which has to be internal for JSON un/marshal'ing code gen consistency +type TxType uint8 + +// Transaction represents an ethereum transaction +// Use our own type because geth's type has validation failures on e.g. zero +// gas used, which can occur on other chains. +// This type is only used for the block history estimator, and can be expensive to unmarshal. Don't add unnecessary fields here. +type Transaction struct { + GasPrice *assets.Wei `json:"gasPrice"` + GasLimit uint32 `json:"gasLimit"` + MaxFeePerGas *assets.Wei `json:"maxFeePerGas"` + MaxPriorityFeePerGas *assets.Wei `json:"maxPriorityFeePerGas"` + Type TxType `json:"type"` + Hash common.Hash `json:"hash"` +} + +const LegacyTxType = blocks.TxType(0x0) + +// UnmarshalJSON unmarshals a Transaction +func (t *Transaction) UnmarshalJSON(data []byte) error { + + var h codec.Handle = new(codec.JsonHandle) + ti := blocks.TransactionInternal{} + + dec := codec.NewDecoderBytes(data, h) + err := dec.Decode(&ti) + + if err != nil { + return err + } + + if ti.Gas == nil { + return errors.Errorf("expected 'gas' to not be null, got: '%s'", data) + } + if ti.Type == nil { + tpe := LegacyTxType + ti.Type = &tpe + } + *t = fromInternalTxn(ti) + + return nil +} + +func (t *Transaction) MarshalJSON() ([]byte, error) { + + ti := toInternalTxn(*t) + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + enc := codec.NewEncoder(buf, &codec.JsonHandle{}) + + err := enc.Encode(ti) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// WeiPerEth is amount of Wei currency units in one Eth. +var WeiPerEth = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil) + +// PluginFulfilledTopic is the signature for the event emitted after calling +// PluginClient.validatePluginCallback(requestId). See +// ../../contracts/src/v0.6/PluginClient.sol +var PluginFulfilledTopic = utils.MustHash("PluginFulfilled(bytes32)") + +// ReceiptIndicatesRunLogFulfillment returns true if this tx receipt is the result of a +// fulfilled run log. +func ReceiptIndicatesRunLogFulfillment(txr types.Receipt) bool { + for _, log := range txr.Logs { + if log.Topics[0] == PluginFulfilledTopic { + return true + } + } + return false +} + +// FunctionSelector is the first four bytes of the call data for a +// function call and specifies the function to be called. +type FunctionSelector [FunctionSelectorLength]byte + +// FunctionSelectorLength should always be a length of 4 as a byte. +const FunctionSelectorLength = 4 + +// BytesToFunctionSelector converts the given bytes to a FunctionSelector. +func BytesToFunctionSelector(b []byte) FunctionSelector { + var f FunctionSelector + f.SetBytes(b) + return f +} + +// HexToFunctionSelector converts the given string to a FunctionSelector. +func HexToFunctionSelector(s string) FunctionSelector { + return BytesToFunctionSelector(common.FromHex(s)) +} + +// String returns the FunctionSelector as a string type. +func (f FunctionSelector) String() string { return hexutil.Encode(f[:]) } + +// Bytes returns the FunctionSelector as a byte slice +func (f FunctionSelector) Bytes() []byte { return f[:] } + +// SetBytes sets the FunctionSelector to that of the given bytes (will trim). +func (f *FunctionSelector) SetBytes(b []byte) { copy(f[:], b[:FunctionSelectorLength]) } + +var hexRegexp = regexp.MustCompile("^[0-9a-fA-F]*$") + +func unmarshalFromString(s string, f *FunctionSelector) error { + if hex.HasPrefix(s) { + if !hexRegexp.Match([]byte(s)[2:]) { + return fmt.Errorf("function selector %s must be 0x-hex encoded", s) + } + bytes := common.FromHex(s) + if len(bytes) != FunctionSelectorLength { + return errors.New("function ID must be 4 bytes in length") + } + f.SetBytes(bytes) + } else { + bytes, err := utils.Keccak256([]byte(s)) + if err != nil { + return err + } + f.SetBytes(bytes[0:4]) + } + return nil +} + +// UnmarshalJSON parses the raw FunctionSelector and sets the FunctionSelector +// type to the given input. +func (f *FunctionSelector) UnmarshalJSON(input []byte) error { + var s string + err := json.Unmarshal(input, &s) + if err != nil { + return err + } + return unmarshalFromString(s, f) +} + +// MarshalJSON returns the JSON encoding of f +func (f FunctionSelector) MarshalJSON() ([]byte, error) { + return json.Marshal(f.String()) +} + +// Value returns this instance serialized for database storage +func (f FunctionSelector) Value() (driver.Value, error) { + return f.Bytes(), nil +} + +// Scan returns the selector from its serialization in the database +func (f *FunctionSelector) Scan(value interface{}) error { + temp, ok := value.([]byte) + if !ok { + return fmt.Errorf("unable to convent %v of type %T to FunctionSelector", value, value) + } + if len(temp) != FunctionSelectorLength { + return fmt.Errorf("function selector %v should have length %d, but has length %d", + temp, FunctionSelectorLength, len(temp)) + } + copy(f[:], temp) + return nil +} + +// This data can contain anything and is submitted by user on-chain, so we must +// be extra careful how we interact with it +type UntrustedBytes []byte + +// SafeByteSlice returns an error on out of bounds access to a byte array, where a +// normal slice would panic instead +func (ary UntrustedBytes) SafeByteSlice(start int, end int) ([]byte, error) { + if end > len(ary) || start > end || start < 0 || end < 0 { + var empty []byte + return empty, errors.New("out of bounds slice access") + } + return ary[start:end], nil +} + +// toInternalTxn converts a Transaction into the internal intermediate representation +func toInternalTxn(txn Transaction) blocks.TransactionInternal { + gas := (hexutil.Uint64)(uint64(txn.GasLimit)) + itype := blocks.TxType(txn.Type) + return blocks.TransactionInternal{ + GasPrice: (*hexutil.Big)(txn.GasPrice), + Gas: &gas, + MaxFeePerGas: (*hexutil.Big)(txn.MaxFeePerGas), + MaxPriorityFeePerGas: (*hexutil.Big)(txn.MaxPriorityFeePerGas), + Type: &itype, + Hash: txn.Hash, + } +} + +// toInternalTxn converts a []Transaction into the internal intermediate representation +func toInternalTxnSlice(txns []Transaction) []blocks.TransactionInternal { + out := make([]blocks.TransactionInternal, len(txns)) + for i, txn := range txns { + out[i] = toInternalTxn(txn) + } + return out +} + +// fromInternalTxn converts an internal intermediate representation into a Transaction +func fromInternalTxn(ti blocks.TransactionInternal) Transaction { + if ti.Type == nil { + tpe := LegacyTxType + ti.Type = &tpe + } + return Transaction{ + GasPrice: (*assets.Wei)(ti.GasPrice), + GasLimit: uint32(*ti.Gas), + MaxFeePerGas: (*assets.Wei)(ti.MaxFeePerGas), + MaxPriorityFeePerGas: (*assets.Wei)(ti.MaxPriorityFeePerGas), + Type: TxType(*ti.Type), + Hash: ti.Hash, + } +} + +// fromInternalTxnSlice converts a slice of internal intermediate representation into a []Transaction +func fromInternalTxnSlice(tis []blocks.TransactionInternal) []Transaction { + out := make([]Transaction, len(tis)) + for i, ti := range tis { + out[i] = fromInternalTxn(ti) + } + return out +} diff --git a/core/chains/evm/types/models_test.go b/core/chains/evm/types/models_test.go new file mode 100644 index 00000000..cd2f40c7 --- /dev/null +++ b/core/chains/evm/types/models_test.go @@ -0,0 +1,863 @@ +package types_test + +import ( + "bytes" + "encoding/json" + "fmt" + "math" + "math/big" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +func TestHead_NewHead(t *testing.T) { + t.Parallel() + tests := []struct { + input *big.Int + want string + }{ + {big.NewInt(0), "0"}, + {big.NewInt(0xf), "f"}, + {big.NewInt(0x10), "10"}, + } + for _, test := range tests { + t.Run(test.want, func(t *testing.T) { + num := evmtypes.NewHead(test.input, utils.NewHash(), utils.NewHash(), 0, nil) + assert.Equal(t, test.want, fmt.Sprintf("%x", num.ToInt())) + }) + } +} + +func TestHead_GreaterThan(t *testing.T) { + t.Parallel() + tests := []struct { + name string + left *evmtypes.Head + right *evmtypes.Head + greater bool + }{ + {"nil nil", nil, nil, false}, + {"present nil", cltest.Head(1), nil, true}, + {"nil present", nil, cltest.Head(1), false}, + {"less", cltest.Head(1), cltest.Head(2), false}, + {"equal", cltest.Head(2), cltest.Head(2), false}, + {"greater", cltest.Head(2), cltest.Head(1), true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.greater, test.left.GreaterThan(test.right)) + }) + } +} + +func TestHead_NextInt(t *testing.T) { + t.Parallel() + tests := []struct { + name string + bn *evmtypes.Head + want *big.Int + }{ + {"nil", nil, nil}, + {"one", cltest.Head(1), big.NewInt(2)}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + assert.Equal(t, test.want, test.bn.NextInt()) + }) + } +} + +func TestEthTx_GetID(t *testing.T) { + tx := txmgr.Tx{ID: math.MinInt64} + assert.Equal(t, "-9223372036854775808", tx.GetID()) +} + +func TestEthTxAttempt_GetSignedTx(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + tx := cltest.NewLegacyTransaction(uint64(42), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + + chainID := big.NewInt(3) + + signedTx, err := ethKeyStore.SignTx(fromAddress, tx, chainID) + require.NoError(t, err) + rlp := new(bytes.Buffer) + require.NoError(t, signedTx.EncodeRLP(rlp)) + + attempt := txmgr.TxAttempt{SignedRawTx: rlp.Bytes()} + + gotSignedTx, err := txmgr.GetGethSignedTx(attempt.SignedRawTx) + require.NoError(t, err) + decodedEncoded := new(bytes.Buffer) + require.NoError(t, gotSignedTx.EncodeRLP(decodedEncoded)) + + require.Equal(t, signedTx.Hash(), gotSignedTx.Hash()) + require.Equal(t, attempt.SignedRawTx, decodedEncoded.Bytes()) +} + +func TestHead_ChainLength(t *testing.T) { + head := evmtypes.Head{ + Parent: &evmtypes.Head{ + Parent: &evmtypes.Head{}, + }, + } + + assert.Equal(t, uint32(3), head.ChainLength()) + + var head2 *evmtypes.Head + assert.Equal(t, uint32(0), head2.ChainLength()) +} + +func TestHead_AsSlice(t *testing.T) { + h1 := &evmtypes.Head{ + Number: 1, + } + h2 := &evmtypes.Head{ + Number: 2, + Parent: h1, + } + h3 := &evmtypes.Head{ + Number: 3, + Parent: h2, + } + + assert.Len(t, (*evmtypes.Head)(nil).AsSlice(0), 0) + assert.Len(t, (*evmtypes.Head)(nil).AsSlice(1), 0) + + assert.Len(t, h3.AsSlice(0), 0) + assert.Equal(t, []*evmtypes.Head{h3}, h3.AsSlice(1)) + assert.Equal(t, []*evmtypes.Head{h3, h2}, h3.AsSlice(2)) + assert.Equal(t, []*evmtypes.Head{h3, h2, h1}, h3.AsSlice(3)) + assert.Equal(t, []*evmtypes.Head{h3, h2, h1}, h3.AsSlice(4)) +} + +func TestModels_HexToFunctionSelector(t *testing.T) { + t.Parallel() + fid := evmtypes.HexToFunctionSelector("0xb3f98adc") + assert.Equal(t, "0xb3f98adc", fid.String()) +} + +func TestModels_HexToFunctionSelectorOverflow(t *testing.T) { + t.Parallel() + fid := evmtypes.HexToFunctionSelector("0xb3f98adc123456") + assert.Equal(t, "0xb3f98adc", fid.String()) +} + +func TestModels_FunctionSelectorUnmarshalJSON(t *testing.T) { + t.Parallel() + bytes := []byte(`"0xb3f98adc"`) + var fid evmtypes.FunctionSelector + err := json.Unmarshal(bytes, &fid) + assert.NoError(t, err) + assert.Equal(t, "0xb3f98adc", fid.String()) +} + +func TestModels_FunctionSelectorUnmarshalJSONLiteral(t *testing.T) { + t.Parallel() + literalSelectorBytes := []byte(`"setBytes(bytes)"`) + var fid evmtypes.FunctionSelector + err := json.Unmarshal(literalSelectorBytes, &fid) + assert.NoError(t, err) + assert.Equal(t, "0xda359dc8", fid.String()) +} + +func TestModels_FunctionSelectorUnmarshalJSONError(t *testing.T) { + t.Parallel() + bytes := []byte(`"0xb3f98adc123456"`) + var fid evmtypes.FunctionSelector + err := json.Unmarshal(bytes, &fid) + assert.Error(t, err) +} + +func TestSafeByteSlice_Success(t *testing.T) { + tests := []struct { + ary evmtypes.UntrustedBytes + start int + end int + expected []byte + }{ + {[]byte{1, 2, 3}, 0, 0, []byte{}}, + {[]byte{1, 2, 3}, 0, 1, []byte{1}}, + {[]byte{1, 2, 3}, 1, 3, []byte{2, 3}}, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + actual, err := test.ary.SafeByteSlice(test.start, test.end) + assert.NoError(t, err) + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestSafeByteSlice_Error(t *testing.T) { + tests := []struct { + ary evmtypes.UntrustedBytes + start int + end int + }{ + {[]byte{1, 2, 3}, 2, -1}, + {[]byte{1, 2, 3}, 0, 4}, + {[]byte{1, 2, 3}, 3, 4}, + {[]byte{1, 2, 3}, 3, 2}, + {[]byte{1, 2, 3}, -1, 2}, + } + + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + actual, err := test.ary.SafeByteSlice(test.start, test.end) + assert.EqualError(t, err, "out of bounds slice access") + var expected []byte + assert.Equal(t, expected, actual) + }) + } +} + +func TestHead_EarliestInChain(t *testing.T) { + head := evmtypes.Head{ + Number: 3, + Parent: &evmtypes.Head{ + Number: 2, + Parent: &evmtypes.Head{ + Number: 1, + }, + }, + } + + assert.Equal(t, int64(1), head.EarliestInChain().BlockNumber()) +} + +func TestHead_IsInChain(t *testing.T) { + hash1 := utils.NewHash() + hash2 := utils.NewHash() + hash3 := utils.NewHash() + + head := evmtypes.Head{ + Number: 3, + Hash: hash3, + Parent: &evmtypes.Head{ + Hash: hash2, + Number: 2, + Parent: &evmtypes.Head{ + Hash: hash1, + Number: 1, + }, + }, + } + + assert.True(t, head.IsInChain(hash1)) + assert.True(t, head.IsInChain(hash2)) + assert.True(t, head.IsInChain(hash3)) + assert.False(t, head.IsInChain(utils.NewHash())) + assert.False(t, head.IsInChain(common.Hash{})) +} + +func TestTxReceipt_ReceiptIndicatesRunLogFulfillment(t *testing.T) { + tests := []struct { + name string + path string + want bool + }{ + {"basic", "../../../testdata/jsonrpc/getTransactionReceipt.json", false}, + {"runlog request", "../../../testdata/jsonrpc/runlogReceipt.json", false}, + {"runlog response", "../../../testdata/jsonrpc/responseReceipt.json", true}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + receipt := cltest.TxReceiptFromFixture(t, test.path) + require.Equal(t, test.want, evmtypes.ReceiptIndicatesRunLogFulfillment(*receipt)) + }) + } +} + +func TestHead_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + json string + expected evmtypes.Head + }{ + {"geth", + `{"difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x100","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`, + evmtypes.Head{ + Hash: common.HexToHash("0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a"), + Number: 0x100, + ParentHash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"), + Timestamp: time.Unix(0x58318da2, 0).UTC(), + ReceiptsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + TransactionsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + StateRoot: common.HexToHash("0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b"), + }, + }, + {"parity", + `{"author":"0xd1aeb42885a43b72b518182ef893125814811048","difficulty":"0xf3a00","extraData":"0xd883010503846765746887676f312e372e318664617277696e","gasLimit":"0xffc001","gasUsed":"0x0","hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0xd1aeb42885a43b72b518182ef893125814811048","mixHash":"0x0f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","nonce":"0x0ece08ea8c49dfd9","number":"0x100","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":["0xa00f98b15f1a4901a7e9204f3c500a7bd527b3fb2c3340e12176a44b83e414a69e","0x880ece08ea8c49dfd9"],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x218","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b","timestamp":"0x58318da2","totalDifficulty":"0x1f3a00","transactions":[],"transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","uncles":[]}`, + evmtypes.Head{ + Hash: common.HexToHash("0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a"), + Number: 0x100, + ParentHash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"), + Timestamp: time.Unix(0x58318da2, 0).UTC(), + ReceiptsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + TransactionsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + StateRoot: common.HexToHash("0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b"), + }, + }, + {"arbitrum", + `{"number":"0x15156","hash":"0x752dab43f7a2482db39227d46cd307623b26167841e2207e93e7566ab7ab7871","parentHash":"0x923ad1e27c1d43cb2d2fb09e26d2502ca4b4914a2e0599161d279c6c06117d34","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","transactionsRoot":"0x71448077f5ce420a8e24db62d4d58e8d8e6ad2c7e76318868e089d41f7e0faf3","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x2c292672b8fc9d223647a2569e19721f0757c96a1421753a93e141f8e56cf504","miner":"0x0000000000000000000000000000000000000000","difficulty":"0x0","totalDifficulty":"0x0","extraData":"0x","size":"0x0","gasLimit":"0x11278208","gasUsed":"0x3d1fe9","timestamp":"0x60d0952d","transactions":["0xa1ea93556b93ed3b45cb24f21c8deb584e6a9049c35209242651bf3533c23b98","0xfc6593c45ba92351d17173aa1381e84734d252ab0169887783039212c4a41024","0x85ee9d04fd0ebb5f62191eeb53cb45d9c0945d43eba444c3548de2ac8421682f","0x50d120936473e5b75f6e04829ad4eeca7a1df7d3c5026ebb5d34af936a39b29c"],"uncles":[],"l1BlockNumber":"0x8652f9"}`, + evmtypes.Head{ + Hash: common.HexToHash("0x752dab43f7a2482db39227d46cd307623b26167841e2207e93e7566ab7ab7871"), + Number: 0x15156, + ParentHash: common.HexToHash("0x923ad1e27c1d43cb2d2fb09e26d2502ca4b4914a2e0599161d279c6c06117d34"), + Timestamp: time.Unix(0x60d0952d, 0).UTC(), + L1BlockNumber: null.Int64From(0x8652f9), + ReceiptsRoot: common.HexToHash("0x2c292672b8fc9d223647a2569e19721f0757c96a1421753a93e141f8e56cf504"), + TransactionsRoot: common.HexToHash("0x71448077f5ce420a8e24db62d4d58e8d8e6ad2c7e76318868e089d41f7e0faf3"), + StateRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + }, + }, + {"not found", + `null`, + evmtypes.Head{}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + var head evmtypes.Head + err := head.UnmarshalJSON([]byte(test.json)) + require.NoError(t, err) + assert.Equal(t, test.expected.Hash, head.Hash) + assert.Equal(t, test.expected.Number, head.Number) + assert.Equal(t, test.expected.ParentHash, head.ParentHash) + assert.Equal(t, test.expected.Timestamp.UTC().Unix(), head.Timestamp.UTC().Unix()) + assert.Equal(t, test.expected.L1BlockNumber, head.L1BlockNumber) + assert.Equal(t, test.expected.ReceiptsRoot, head.ReceiptsRoot) + assert.Equal(t, test.expected.TransactionsRoot, head.TransactionsRoot) + assert.Equal(t, test.expected.StateRoot, head.StateRoot) + }) + } +} + +func TestHead_MarshalJSON(t *testing.T) { + tests := []struct { + name string + head evmtypes.Head + expected string + }{ + {"happy", + evmtypes.Head{ + Hash: common.HexToHash("0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a"), + Number: 0x100, + ParentHash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"), + Timestamp: time.Unix(0x58318da2, 0).UTC(), + ReceiptsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + TransactionsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + StateRoot: common.HexToHash("0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b"), + }, + `{"hash":"0x41800b5c3f1717687d85fc9018faac0a6e90b39deaa0b99e7fe4fe796ddeb26a","number":"0x100","parentHash":"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d","timestamp":"0x58318da2","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","stateRoot":"0xc7b01007a10da045eacb90385887dd0c38fcb5db7393006bdde24b93873c334b"}`, + }, + {"empty", + evmtypes.Head{}, + `{"number":"0x0"}`, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + bs, err := test.head.MarshalJSON() + require.NoError(t, err) + require.Equal(t, test.expected, string(bs)) + }) + } +} + +const gethSampleBlock = ` +{ + "baseFeePerGas": "0x93d0d7cd1", + "difficulty": "0x2cea10d39a7363", + "extraData": "0x706f6f6c696e2e636f6d22ed69bddc3cbc3b40", + "gasLimit": "0x1c95111", + "gasUsed": "0x1c93dad", + "hash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "logsBloom": "0x6fb5cf075bea78f7df6daa6f9bf477bb6fefb775ddcdae7da49dedbbbff7d9ff6ffddbeffeb72df6fe1bffb1d60f4ff3d277f67d7b17b9d8f2bda5c7ba7ff95d7ddbd5fb37fa4ffffdcbaebf35f7fbf71fef76e4dffd19e8be4f3fc5ebd7bb9ef75debf822bffeaf553d15fde5fc2dd136bfcef9dfff0dfbceb897feef7e65c7f2f1afa32fef697eb64d356f4bf8f63f5fb5f5fdb3fd763db7fdafeaeff57fb1ffce7bd737ffed777bdfffd3df5ccdff58fdb66f76d0f3bbfff5fff671edbe777ddbfb373fa5b6e7bfafeffff68ff7ddbf756fdadf9e85dd4fb7bfbbd6de7ffffff377afda5eb5bfff3f79b7fd37ffe73e7dbbf7fbbd5d6fb05b3bf7ee77f2ef", + "miner": "0x2daa35962a6d43eb54c48367b33d0b379c930e5e", + "mixHash": "0x595101379a658f7ff50495da8d9c81dacd9335662f2fd69916db5875cf99f114", + "nonce": "0xadd59e902f02fa41", + "number": "0xe5a952", + "parentHash": "0x653ea251c180d93296ef79378e64d7dc9a74f565a54df477faeb64d3330977dd", + "receiptsRoot": "0xa47d1aa7bd72e74d3d3770ffa64bf934de880f72a63fd80311397065a54d69b5", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x39455", + "stateRoot": "0xf8d62881bd11dc8b5054cbcd9b2650d4bf63699ee9013bd0ddd2c9dca4b3e942", + "timestamp": "0x62bdc207", + "totalDifficulty": "0xb35d9b2c89eef362797", + "transactions": [ + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0x76e40d0a69fd81826b5eb7d18145626d46eafdef", + "gas": "0xdbba0", + "gasPrice": "0x978a846d2", + "maxFeePerGas": "0xd0892241d", + "maxPriorityFeePerGas": "0x3b9aca01", + "hash": "0x754f49f0a2ca7680806d261dd36ee95ac88a81da59fef0b5d8d691478f075d46", + "input": "0x1cff79cd000000000000000000000000343933efdf64d2d6eeaf2dcd5fbd701541d64f67000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e44a7d794a000000000000000000000000000000000000000000000005bc94810a20626a9a0000000000000000000000000000000000000000000000000e52b79acdb06152000000000000000000000000000000000000798f836298dfb377b3deeb7ade400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000062bdc2400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000798e37fb7bc47a14e3b89fa086d600000000000000000000000000000000000000000000000000000000", + "nonce": "0xbf65", + "to": "0x4cb18386e5d1f34dc6eea834bf3534a970a3f8e7", + "transactionIndex": "0x0", + "value": "0xa907", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x0", + "r": "0xcbdf4705610d7b20326dcd153491f37f133c34026f3e0abf72f9db03ac98de0e", + "s": "0xa2b2d625d34315e8d6d0543e0f9393d2a14dddaf3678d7f0ed432df9cb8e5c3" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0xdc54b1e90a6da20b6a63ba5ffcace3e2883f641f", + "gas": "0xda8e", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0xf0fa066f3", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0xec7c12169f1f1e1fc5b0d0723dda88b175bd8d99213a9c99b6019c927931f315", + "input": "0x095ea7b300000000000000000000000068b3465833fb72a70ecdf485e0e4c7bd8665fc45ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "nonce": "0x1c", + "to": "0x106552c11272420aad5d7e94f8acab9095a6c952", + "transactionIndex": "0x191", + "value": "0x0", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0x80039a5548aa5937d0988fc45b495296bde1449d790d2b74439149e17fc11b7a", + "s": "0x126beedbe5a2876c462f2c7e29152694c03df13e9d68273f21a2716859aec2e0" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0x699e6d0ea5cdbaf3eeed5dc68541fbb826184586", + "gas": "0x5208", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0xebffe7460", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0x94947a2a3882da035babf81e6579f177d1fb2086e4806324359de9ae3e2b220d", + "input": "0x", + "nonce": "0x153", + "to": "0x6cb24a9e81d9da0b85d2fd22e8092ea3786b85a4", + "transactionIndex": "0x192", + "value": "0x7ad64bd6230c63a", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x0", + "r": "0xa6a0960d3ee106436591783cc2f03da462aadc731985212231b003675b975cbe", + "s": "0x28e7801e2c24fb2ad841a087f8f6d9b02478a2f0e880453340c4c0fe182c5ef0" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0x0807c5c8fa8a8229870cfecb6e2e71dcb6a78261", + "gas": "0x183ac", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0xe06f90913", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0x44e6a280c2caf190855435603319f75f9260d9ed706537d67aabeac0e48e9cdf", + "input": "0xa9059cbb00000000000000000000000085b82324f03f389b7d0d56c25ec8d1ba83fad7b800000000000000000000000000000000000000000000000000000000240284ef", + "nonce": "0x2a", + "to": "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", + "transactionIndex": "0x193", + "value": "0x0", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0xe3224640b70f6bb899fd1e9695531e272f344fa2102465063d8427a2147331", + "s": "0x59a3640055651ab2db9108e55cf3dfcbe78cadc8973fe0ce95667214e0cede99" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0xcbe6fe602ca089489675e617ba2ce6faa52c49df", + "gas": "0x7f8e", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0xe06f90913", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0x4309e69bf880efba8fb2fc10217f1b032c1711b3172e62901dc75dd646fb0412", + "input": "0x1998aeef", + "nonce": "0x61", + "to": "0xb75f09b4340aeb85cd5f2dd87d31751edc11ed39", + "transactionIndex": "0x194", + "value": "0xc249fdd32778000", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0xb7b480f5ee42552a9da24a6d1d1f9707e52489e3ef21c39e55e38be9e7bf9d04", + "s": "0x77c6bd1ccda3b5b6940f20e847b99bb7f3389d068d3aa1f54b364b0c14196af8" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0xcba1a597261fc24a3569a161c6d8157999b3a342", + "gas": "0x5208", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0x10a414b8d2", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0x804b80531b93cdb3f34d360b68cc1fec052269742033fcab6bc6240fbda782bd", + "input": "0x", + "nonce": "0x160", + "to": "0xc941abf0ac772d5ca490dea8d7bd0ae123ce1409", + "transactionIndex": "0x195", + "value": "0x3782dace9d900000", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0xdef9f95429ccee8c280e8aa01076a0d69bb99a326391ac8cbc7da4ff8ea72edb", + "s": "0x6e56cee027e245d90e02093e1cd2126f25e9025339d09a47d65cd6692cced0ec" + }, + { + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0x1e082cc52edd1558870f69f46b58d9e09c8b9954", + "gas": "0x5208", + "gasPrice": "0x99675abd1", + "maxFeePerGas": "0xe06f90913", + "maxPriorityFeePerGas": "0x59682f00", + "hash": "0x8bee254970c86f934d25c2b4256e3ae914c45298b353b03e663e55b58cfad334", + "input": "0x", + "nonce": "0x8f", + "to": "0x4733ec9cd2d5ae5bfb41fb4833e7a15e54c4a7a7", + "transactionIndex": "0x196", + "value": "0xb1a2bc2ec50000", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x1", + "r": "0xbe6e36292dc34a4e14f720e769a27edc0434bcad94d4df4be8ba1b60fcd849c6", + "s": "0x493f73cbdf9ad54c9626c778e8dce08f2ae08895d449efbdece719d8a9115654" + } + ], + "transactionsRoot": "0xa2ee120ac64a18e32a2f8a35ff6424992caf95995faba97e5184ecbdff0af289", + "uncles": [] +} +` + +const paritySampleBlock = ` +{ + "author": "0x03801efb0efe2a25ede5dd3a003ae880c0292e4d", + "baseFeePerGas": "0x7", + "difficulty": "0xfffffffffffffffffffffffffffffffe", + "extraData": "0xdb830303038c4f70656e457468657265756d86312e34372e30826c69", + "gasLimit": "0x1c9c380", + "gasUsed": "0x27dfb2", + "hash": "0x0ec62c2a397e114d84ce932387d841787d7ec5757ceba3708386da87934b7c82", + "logsBloom": "0x00000000000000000000000000000000000000000000000000800000000100000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000400000000000000000000000000000000000000000010000000000000000000000000004004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000020000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x03801efb0efe2a25ede5dd3a003ae880c0292e4d", + "number": "0x1ef81ff", + "parentHash": "0x3aa1c729fb45888bc1ce777d00bad9637c0b5f7cb48b145ebacc16098e0132d4", + "receiptsRoot": "0xa9fd0285ccd79ad2bd8c6de7cd6b687a1d942b810c613acd43729d2e6cc98240", + "sealFields": [ + "0x8418af6ffb", + "0xb841206930e3357eca1bb5181a07fa4bac1e39008add26679102af0984a69d8c2d3e0d53f8636a1259a395320ac7937a280c20e736ec4c0e7f72f0656409f068de2d00" + ], + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "signature": "206930e3357eca1bb5181a07fa4bac1e39008add26679102af0984a69d8c2d3e0d53f8636a1259a395320ac7937a280c20e736ec4c0e7f72f0656409f068de2d00", + "size": "0x2f65", + "stateRoot": "0xc0c8637c447cd1f2ce3fdf55cd1f9293bac276f5ed37207d9cd6734dd858a54a", + "step": "414150651", + "timestamp": "0x62bdbfec", + "totalDifficulty": "0x1ed3ef000000000000000000000000481b7ff2f", + "transactions": [ + { + "accessList": [], + "blockHash": "0x0ec62c2a397e114d84ce932387d841787d7ec5757ceba3708386da87934b7c82", + "blockNumber": "0x1ef81ff", + "chainId": "0x2a", + "condition": null, + "creates": "0x4f6db99e4652915f1aa7d064e0821f9b35b5e4c5", + "from": "0xefc474ebbe74e0b96f51cf84a00394f7dce563b0", + "gas": "0x272cef", + "gasPrice": "0x21ffed910", + "hash": "0x6c5faccfc9d7a24710ac68ebf5a106d0b8cc3e6a0318098d4867ae0342c3cf4a", + "input": "0x6080604052600180546001600160a01b031990811673b27308f9f90d607463bb33ea1bebb41c27ce5ab61790915560028054909116731f98431c8ad98523631ae4a59f267346ea31f9841790553480156200005957600080fd5b5060405162002bc738038062002bc78339810160408190526200007c916200014d565b6200008733620000e0565b600480546001600160a01b03199081166001600160a01b03938416179091556005805482169483169490941790935560038054909316911617905560326006556007805461012c63ffffffff1990911617905562000197565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80516001600160a01b03811681146200014857600080fd5b919050565b6000806000606084860312156200016357600080fd5b6200016e8462000130565b92506200017e6020850162000130565b91506200018e6040850162000130565b90509250925092565b612a2080620001a76000396000f3fe6080604052600436106100f75760003560e01c8063ad5c46481161008a578063e8e3370011610059578063e8e33700146102be578063ee882d6e146102ec578063f2fde38b1461031c578063fada19f21461033c57600080fd5b8063ad5c464814610229578063baa2abde14610249578063c6bbd5a71461027e578063d9f165dc1461029e57600080fd5b80637485706f116100c65780637485706f146101925780637cc281f4146101b25780638da5cb5b146101d2578063938cfd0e1461020957600080fd5b8063070cb2ba1461011d57806326b156311461013d578063426172d61461015d578063715018a61461017d57600080fd5b36610118576005546001600160a01b03163314610116576101166120dc565b005b600080fd5b34801561012957600080fd5b50610116610138366004612107565b61035c565b34801561014957600080fd5b5061011661015836600461212b565b6103b1565b34801561016957600080fd5b50610116610178366004612107565b610663565b34801561018957600080fd5b506101166106af565b34801561019e57600080fd5b506101166101ad366004612186565b6106e5565b3480156101be57600080fd5b506101166101cd3660046121e2565b6107e8565b3480156101de57600080fd5b506000546001600160a01b03165b6040516001600160a01b0390911681526020015b60405180910390f35b34801561021557600080fd5b506004546101ec906001600160a01b031681565b34801561023557600080fd5b506005546101ec906001600160a01b031681565b34801561025557600080fd5b50610269610264366004612252565b610903565b60408051928352602083019190915201610200565b34801561028a57600080fd5b506001546101ec906001600160a01b031681565b3480156102aa57600080fd5b506101166102b9366004612107565b610bb7565b3480156102ca57600080fd5b506102de6102d93660046122c4565b610c03565b604051908152602001610200565b3480156102f857600080fd5b5061030c610307366004612340565b611240565b6040519015158152602001610200565b34801561032857600080fd5b50610116610337366004612107565b611b0f565b34801561034857600080fd5b50610116610357366004612379565b611baa565b6000546001600160a01b0316331461038f5760405162461bcd60e51b8152600401610386906123ea565b60405180910390fd5b600480546001600160a01b0319166001600160a01b0392909216919091179055565b6103bb8383611240565b6103d75760405162461bcd60e51b81526004016103869061241f565b6040516370a0823160e01b815233600482015281906001600160a01b038516906370a0823190602401602060405180830381865afa15801561041d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610441919061246e565b116104b45760405162461bcd60e51b815260206004820152603960248201527f4e6f7420456e6f75676820436f6c6c65746572616c20496e205573657227732060448201527f57616c6c657420546f2061646420746f20706f736974696f6e000000000000006064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906104e79087908790600401612487565b602060405180830381865afa158015610504573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061052891906124a1565b6040516323b872dd60e01b81529091506001600160a01b038516906323b872dd9061055b908990859087906004016124be565b6020604051808303816000875af115801561057a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061059e91906124f7565b6105ea5760405162461bcd60e51b815260206004820152601f60248201527f416d6f756e7420646964206e6f74207472616e7366657220746f2070616972006044820152606401610386565b604051635ccb6d3f60e11b81526001600160a01b0387811660048301526024820187905285811660448301526064820184905282169063b996da7e90608401600060405180830381600087803b15801561064357600080fd5b505af1158015610657573d6000803e3d6000fd5b50505050505050505050565b6000546001600160a01b0316331461068d5760405162461bcd60e51b8152600401610386906123ea565b600180546001600160a01b0319166001600160a01b0392909216919091179055565b6000546001600160a01b031633146106d95760405162461bcd60e51b8152600401610386906123ea565b6106e36000611f11565b565b6000546001600160a01b0316331461070f5760405162461bcd60e51b8152600401610386906123ea565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906107429086908690600401612487565b602060405180830381865afa15801561075f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061078391906124a1565b604051630386595d60e11b81526001600160a01b0386811660048301529192509082169063070cb2ba90602401600060405180830381600087803b1580156107ca57600080fd5b505af11580156107de573d6000803e3d6000fd5b5050505050505050565b6107f28484611240565b61080e5760405162461bcd60e51b81526004016103869061241f565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906108419088908890600401612487565b602060405180830381865afa15801561085e573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061088291906124a1565b60405163cadcf27b60e01b81526001600160a01b038981166004830152602482018990526044820186905262ffffff851660648301529192509082169063cadcf27b90608401600060405180830381600087803b1580156108e257600080fd5b505af11580156108f6573d6000803e3d6000fd5b5050505050505050505050565b60008082428110156109575760405162461bcd60e51b815260206004820152601f60248201527f4675747572697a655631526f75746572526f757465723a2045585049524544006044820152606401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a439059061098a908e908e90600401612487565b602060405180830381865afa1580156109a7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109cb91906124a1565b90506109d78b8b611240565b6109f35760405162461bcd60e51b81526004016103869061241f565b6040516323b872dd60e01b81526001600160a01b038216906323b872dd90610a2390339085908e906004016124be565b6020604051808303816000875af1158015610a42573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a6691906124f7565b5060405163226bf2d160e21b81526001600160a01b03878116600483015260009182918416906389afcb449060240160408051808303816000875af1158015610ab3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ad79190612512565b9150915089861015610b415760405162461bcd60e51b815260206004820152602d60248201527f4675747572697a655631526f75746572526f757465723a20494e53554646494360448201526c1251539517d057d05353d55395609a1b6064820152608401610386565b88851015610ba75760405162461bcd60e51b815260206004820152602d60248201527f4675747572697a655631526f75746572526f757465723a20494e53554646494360448201526c1251539517d097d05353d55395609a1b6064820152608401610386565b5050505097509795505050505050565b6000546001600160a01b03163314610be15760405162461bcd60e51b8152600401610386906123ea565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b60008142811015610c565760405162461bcd60e51b815260206004820152601f60248201527f4675747572697a655631526f75746572526f757465723a2045585049524544006044820152606401610386565b610c956040518060400160405280602081526020017f616d6f756e74424465736972656420696e20726f75746572206669727374203a81525088611f61565b600254604051630b4c774160e11b81526001600160a01b038c811660048301528b81166024830152610bb860448301526000921690631698ee8290606401602060405180830381865afa158015610cf0573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d1491906124a1565b6001600160a01b03161415610d815760405162461bcd60e51b815260206004820152602d60248201527f4120556e6953776170563320506f6f6c2057697468205468697320466565204460448201526c1bd95cc8139bdd08115e1a5cdd609a1b6064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a4390590610db4908e908e90600401612487565b602060405180830381865afa158015610dd1573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610df591906124a1565b6001600160a01b03161415610e8b5760035460048054604051630edef2e760e31b81526001600160a01b038e8116938201939093528c8316602482015290821660448201529116906376f79738906064016020604051808303816000875af1158015610e65573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e8991906124a1565b505b610e958a8a611240565b610eb15760405162461bcd60e51b81526004016103869061241f565b896001600160a01b03166323b872dd33600360009054906101000a90046001600160a01b03166001600160a01b031663e6a439058e8e6040518363ffffffff1660e01b8152600401610f04929190612487565b602060405180830381865afa158015610f21573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f4591906124a1565b8b6040518463ffffffff1660e01b8152600401610f64939291906124be565b6020604051808303816000875af1158015610f83573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fa791906124f7565b50886001600160a01b03166323b872dd33600360009054906101000a90046001600160a01b03166001600160a01b031663e6a439058e8e6040518363ffffffff1660e01b8152600401610ffb929190612487565b602060405180830381865afa158015611018573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061103c91906124a1565b8a6040518463ffffffff1660e01b815260040161105b939291906124be565b6020604051808303816000875af115801561107a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061109e91906124f7565b5060035460405163e6a4390560e01b81526001600160a01b039091169063e6a43905906110d1908d908d90600401612487565b602060405180830381865afa1580156110ee573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061111291906124a1565b60015460405163f7729d4360e01b81526001600160a01b038c811660048301528d81166024830152610bb86044830152606482018b9052600060848301529283169263a647e8ec92889291169063f7729d439060a4016020604051808303816000875af1158015611187573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111ab919061246e565b6111b5908c61254c565b6040516001600160e01b031960e085901b1681526001600160a01b0390921660048301526024820152604481018b9052606481018a90526084016020604051808303816000875af115801561120e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611232919061246e565b9a9950505050505050505050565b60035460405163e6a4390560e01b815260009182916001600160a01b039091169063e6a43905906112779087908790600401612487565b602060405180830381865afa158015611294573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112b891906124a1565b6001600160a01b031663e6eba1f485856040518363ffffffff1660e01b81526004016112e5929190612487565b602060405180830381865afa158015611302573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113269190612564565b600254604051630b4c774160e11b81526001600160a01b038781166004830152868116602483015262ffffff8416604483015292935060009290911690631698ee8290606401602060405180830381865afa158015611389573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113ad91906124a1565b60408051600280825260608201835292935060009290916020830190803683375050600754825192935063ffffffff16918391506000906113f0576113f0612597565b602002602001019063ffffffff16908163ffffffff168152505060008160018151811061141f5761141f612597565b63ffffffff9283166020918202929092010152600754600854606092919091169061144a90426125ad565b10156114cb5760098054806020026020016040519081016040528092919081815260200182805480156114bf57602002820191906000526020600020906000905b825461010083900a900460060b81526020600d830181900493840193600103600790930192909202910180841161148b5790505b50505050509050611559565b60405163883bdbfd60e01b81526001600160a01b0384169063883bdbfd906114f79085906004016125c4565b600060405180830381865afa158015611514573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f1916820160405261153c91908101906126d7565b50805190915061155390600990602084019061200c565b50426008555b60008160008151811061156e5761156e612597565b60200260200101518260018151811061158957611589612597565b602002602001015161159b91906127a3565b6007549091506000906115b79063ffffffff1660030b83612809565b60020b905060008082126115ce578160060b6115db565b8160060b6115db90612847565b9050620d89e88111156116145760405162461bcd60e51b81526020600482015260016024820152601560fa1b6044820152606401610386565b60006001821661162857600160801b61163a565b6ffffcb933bd6fad37aa2d162d1a5940015b70ffffffffffffffffffffffffffffffffff1690506002821615611679576080611674826ffff97272373d413259a46990580e213a612864565b901c90505b60048216156116a357608061169e826ffff2e50f5f656932ef12357cf3c7fdcc612864565b901c90505b60088216156116cd5760806116c8826fffe5caca7e10e4e61c3624eaa0941cd0612864565b901c90505b60108216156116f75760806116f2826fffcb9843d60f6159c9db58835c926644612864565b901c90505b602082161561172157608061171c826fff973b41fa98c081472e6896dfb254c0612864565b901c90505b604082161561174b576080611746826fff2ea16466c96a3843ec78b326b52861612864565b901c90505b6080821615611775576080611770826ffe5dee046a99a2a811c461f1969c3053612864565b901c90505b6101008216156117a057608061179b826ffcbe86c7900a88aedcffc83b479aa3a4612864565b901c90505b6102008216156117cb5760806117c6826ff987a7253ac413176f2b074cf7815e54612864565b901c90505b6104008216156117f65760806117f1826ff3392b0822b70005940c7a398e4b70f3612864565b901c90505b61080082161561182157608061181c826fe7159475a2c29b7443b29c7fa6e889d9612864565b901c90505b61100082161561184c576080611847826fd097f3bdfd2022b8845ad8f792aa5825612864565b901c90505b612000821615611877576080611872826fa9f746462d870fdf8a65dc1f90e061e5612864565b901c90505b6140008216156118a257608061189d826f70d869a156d2a1b890bb3df62baf32f7612864565b901c90505b6180008216156118cd5760806118c8826f31be135f97d08fd981231505542fcfa6612864565b901c90505b620100008216156118f95760806118f4826f09aa508b5b7a84e1c677de54f3e99bc9612864565b901c90505b6202000082161561192457608061191f826e5d6af8dedb81196699c329225ee604612864565b901c90505b6204000082161561194e576080611949826d2216e584f5fa1ea926041bedfe98612864565b901c90505b62080000821615611976576080611971826b048a170391f7dc42444e8fa2612864565b901c90505b60008360060b13156119915761198e81600019612883565b90505b60006119a264010000000083612897565b156119ae5760016119b1565b60005b6119c29060ff16602084901c61254c565b905060006103e8826001600160a01b03166006546103e86119e3919061254c565b6119ed9190612864565b6119f79190612883565b905060006103e8836001600160a01b03166006546103e8611a1891906125ad565b611a229190612864565b611a2c9190612883565b905060008a6001600160a01b0316633850c7bd6040518163ffffffff1660e01b815260040160e060405180830381865afa158015611a6e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a9291906128bd565b505050505050905081816001600160a01b03161080611ab9575082816001600160a01b0316115b611ac75760019c508c611acd565b60009c508c5b50611afd6040518060400160405280600d81526020016c7061737354776170436865636b60981b8152508e611faa565b50505050505050505050505092915050565b6000546001600160a01b03163314611b395760405162461bcd60e51b8152600401610386906123ea565b6001600160a01b038116611b9e5760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b6064820152608401610386565b611ba781611f11565b50565b84151580611bb757508315155b611c035760405162461bcd60e51b815260206004820152601b60248201527f4f6e6520416d6f756e74204d757374204e6f74204265205a65726f00000000006044820152606401610386565b841580611c0e575083155b611c5a5760405162461bcd60e51b815260206004820152601760248201527f4f6e6520416d6f756e74204d757374204265205a65726f0000000000000000006044820152606401610386565b60008515611c685787611c6a565b865b905060008615611c7a5787611c7c565b885b9050611c888282611240565b611ca45760405162461bcd60e51b81526004016103869061241f565b60008715611cb25787611cb4565b865b6040516370a0823160e01b815233600482015290915081906001600160a01b038516906370a0823190602401602060405180830381865afa158015611cfd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d21919061246e565b11611d945760405162461bcd60e51b815260206004820152603760248201527f4e6f7420456e6f75676820436f6c6c65746572616c20496e205573657227732060448201527f57616c6c657420546f204f70656e20506f736974696f6e0000000000000000006064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a4390590611dc79087908790600401612487565b602060405180830381865afa158015611de4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611e0891906124a1565b6040516323b872dd60e01b81529091506001600160a01b038516906323b872dd90611e3b903390859087906004016124be565b6020604051808303816000875af1158015611e5a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611e7e91906124f7565b5060405163dd85b4d360e01b81526001600160a01b038581166004830152336024830152604482018490526064820188905260006084830152610bb860a483015260c4820187905282169063dd85b4d39060e401600060405180830381600087803b158015611eec57600080fd5b505af1158015611f00573d6000803e3d6000fd5b505050505050505050505050505050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b611fa68282604051602401611f779291906129a4565b60408051601f198184030181529190526020810180516001600160e01b03166309710a9d60e41b179052611feb565b5050565b611fa68282604051602401611fc09291906129c6565b60408051601f198184030181529190526020810180516001600160e01b031663c3b5563560e01b1790525b80516a636f6e736f6c652e6c6f67602083016000808483855afa5050505050565b828054828255906000526020600020906003016004900481019282156120b75791602002820160005b8382111561208257835183826101000a81548166ffffffffffffff021916908360060b66ffffffffffffff1602179055509260200192600701602081600601049283019260010302612035565b80156120b55782816101000a81549066ffffffffffffff0219169055600701602081600601049283019260010302612082565b505b506120c39291506120c7565b5090565b5b808211156120c357600081556001016120c8565b634e487b7160e01b600052600160045260246000fd5b6001600160a01b0381168114611ba757600080fd5b60006020828403121561211957600080fd5b8135612124816120f2565b9392505050565b600080600080600060a0868803121561214357600080fd5b853561214e816120f2565b9450602086013593506040860135612165816120f2565b92506060860135612175816120f2565b949793965091946080013592915050565b60008060006060848603121561219b57600080fd5b83356121a6816120f2565b925060208401356121b6816120f2565b915060408401356121c6816120f2565b809150509250925092565b62ffffff81168114611ba757600080fd5b60008060008060008060c087890312156121fb57600080fd5b8635612206816120f2565b955060208701359450604087013561221d816120f2565b9350606087013561222d816120f2565b92506080870135915060a0870135612244816121d1565b809150509295509295509295565b600080600080600080600060e0888a03121561226d57600080fd5b8735612278816120f2565b96506020880135612288816120f2565b955060408801359450606088013593506080880135925060a08801356122ad816120f2565b8092505060c0880135905092959891949750929550565b600080600080600080600080610100898b0312156122e157600080fd5b88356122ec816120f2565b975060208901356122fc816120f2565b965060408901359550606089013594506080890135935060a0890135925060c0890135612328816120f2565b8092505060e089013590509295985092959890939650565b6000806040838503121561235357600080fd5b823561235e816120f2565b9150602083013561236e816120f2565b809150509250929050565b600080600080600080600060e0888a03121561239457600080fd5b873561239f816120f2565b965060208801356123af816120f2565b9550604088013594506060880135935060808801356123cd816120f2565b9699959850939692959460a0840135945060c09093013592915050565b6020808252818101527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604082015260600190565b6020808252602f908201527f546865726520697320612070726f626c656d207769746820747761702070726960408201526e18d9481a5b881d1a1a5cc81c1bdbdb608a1b606082015260800190565b60006020828403121561248057600080fd5b5051919050565b6001600160a01b0392831681529116602082015260400190565b6000602082840312156124b357600080fd5b8151612124816120f2565b6001600160a01b039384168152919092166020820152604081019190915260600190565b805180151581146124f257600080fd5b919050565b60006020828403121561250957600080fd5b612124826124e2565b6000806040838503121561252557600080fd5b505080516020909101519092909150565b634e487b7160e01b600052601160045260246000fd5b6000821982111561255f5761255f612536565b500190565b60006020828403121561257657600080fd5b8151612124816121d1565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b6000828210156125bf576125bf612536565b500390565b6020808252825182820181905260009190848201906040850190845b8181101561260257835163ffffffff16835292840192918401916001016125e0565b50909695505050505050565b604051601f8201601f1916810167ffffffffffffffff8111828210171561263757612637612581565b604052919050565b600067ffffffffffffffff82111561265957612659612581565b5060051b60200190565b600082601f83011261267457600080fd5b815160206126896126848361263f565b61260e565b82815260059290921b840181019181810190868411156126a857600080fd5b8286015b848110156126cc5780516126bf816120f2565b83529183019183016126ac565b509695505050505050565b600080604083850312156126ea57600080fd5b825167ffffffffffffffff8082111561270257600080fd5b818501915085601f83011261271657600080fd5b815160206127266126848361263f565b82815260059290921b8401810191818101908984111561274557600080fd5b948201945b838610156127735785518060060b81146127645760008081fd5b8252948201949082019061274a565b9188015191965090935050508082111561278c57600080fd5b5061279985828601612663565b9150509250929050565b60008160060b8360060b6000811281667fffffffffffff19018312811516156127ce576127ce612536565b81667fffffffffffff0183138116156127e9576127e9612536565b5090039392505050565b634e487b7160e01b600052601260045260246000fd5b60008160060b8360060b80612820576128206127f3565b667fffffffffffff1982146000198214161561283e5761283e612536565b90059392505050565b6000600160ff1b82141561285d5761285d612536565b5060000390565b600081600019048311821515161561287e5761287e612536565b500290565b600082612892576128926127f3565b500490565b6000826128a6576128a66127f3565b500690565b805161ffff811681146124f257600080fd5b600080600080600080600060e0888a0312156128d857600080fd5b87516128e3816120f2565b8097505060208801518060020b81146128fb57600080fd5b9550612909604089016128ab565b9450612917606089016128ab565b9350612925608089016128ab565b925060a088015160ff8116811461293b57600080fd5b915061294960c089016124e2565b905092959891949750929550565b6000815180845260005b8181101561297d57602081850181015186830182015201612961565b8181111561298f576000602083870101525b50601f01601f19169290920160200192915050565b6040815260006129b76040830185612957565b90508260208301529392505050565b6040815260006129d96040830185612957565b90508215156020830152939250505056fea26469706673582212204807886642c533483ad98dd653bbd64355a043df65a844d3e2a67f54adfb3bc464736f6c634300080a0033000000000000000000000000d0a1e359811322d97991e03f863a0c30c2cf029c000000000000000000000000010573fa5ca52cb479337bc2a6ba4baba0b9ff86000000000000000000000000b19a40e2faa6d6ad60240af8dac105a428f353bf", + "maxFeePerGas": "0x21ffed911", + "maxPriorityFeePerGas": "0x21ffed909", + "nonce": "0xa6", + "publicKey": "0x315947dd868c7c5c77a1052ca94381db3451c67cde16b0bb3822460a5630f73807bed17c6d53eff84e862c8eeb7623d1264328122e45e7d7c4d5ae3f341521a8", + "r": "0x8aa81dd5b9caa6d950753d7cf2c0448f4c077efbae50ae1fe31028b34510192a", + "raw": "0x02f92c832a81a685021ffed90985021ffed91183272cef8080b92c276080604052600180546001600160a01b031990811673b27308f9f90d607463bb33ea1bebb41c27ce5ab61790915560028054909116731f98431c8ad98523631ae4a59f267346ea31f9841790553480156200005957600080fd5b5060405162002bc738038062002bc78339810160408190526200007c916200014d565b6200008733620000e0565b600480546001600160a01b03199081166001600160a01b03938416179091556005805482169483169490941790935560038054909316911617905560326006556007805461012c63ffffffff1990911617905562000197565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80516001600160a01b03811681146200014857600080fd5b919050565b6000806000606084860312156200016357600080fd5b6200016e8462000130565b92506200017e6020850162000130565b91506200018e6040850162000130565b90509250925092565b612a2080620001a76000396000f3fe6080604052600436106100f75760003560e01c8063ad5c46481161008a578063e8e3370011610059578063e8e33700146102be578063ee882d6e146102ec578063f2fde38b1461031c578063fada19f21461033c57600080fd5b8063ad5c464814610229578063baa2abde14610249578063c6bbd5a71461027e578063d9f165dc1461029e57600080fd5b80637485706f116100c65780637485706f146101925780637cc281f4146101b25780638da5cb5b146101d2578063938cfd0e1461020957600080fd5b8063070cb2ba1461011d57806326b156311461013d578063426172d61461015d578063715018a61461017d57600080fd5b36610118576005546001600160a01b03163314610116576101166120dc565b005b600080fd5b34801561012957600080fd5b50610116610138366004612107565b61035c565b34801561014957600080fd5b5061011661015836600461212b565b6103b1565b34801561016957600080fd5b50610116610178366004612107565b610663565b34801561018957600080fd5b506101166106af565b34801561019e57600080fd5b506101166101ad366004612186565b6106e5565b3480156101be57600080fd5b506101166101cd3660046121e2565b6107e8565b3480156101de57600080fd5b506000546001600160a01b03165b6040516001600160a01b0390911681526020015b60405180910390f35b34801561021557600080fd5b506004546101ec906001600160a01b031681565b34801561023557600080fd5b506005546101ec906001600160a01b031681565b34801561025557600080fd5b50610269610264366004612252565b610903565b60408051928352602083019190915201610200565b34801561028a57600080fd5b506001546101ec906001600160a01b031681565b3480156102aa57600080fd5b506101166102b9366004612107565b610bb7565b3480156102ca57600080fd5b506102de6102d93660046122c4565b610c03565b604051908152602001610200565b3480156102f857600080fd5b5061030c610307366004612340565b611240565b6040519015158152602001610200565b34801561032857600080fd5b50610116610337366004612107565b611b0f565b34801561034857600080fd5b50610116610357366004612379565b611baa565b6000546001600160a01b0316331461038f5760405162461bcd60e51b8152600401610386906123ea565b60405180910390fd5b600480546001600160a01b0319166001600160a01b0392909216919091179055565b6103bb8383611240565b6103d75760405162461bcd60e51b81526004016103869061241f565b6040516370a0823160e01b815233600482015281906001600160a01b038516906370a0823190602401602060405180830381865afa15801561041d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610441919061246e565b116104b45760405162461bcd60e51b815260206004820152603960248201527f4e6f7420456e6f75676820436f6c6c65746572616c20496e205573657227732060448201527f57616c6c657420546f2061646420746f20706f736974696f6e000000000000006064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906104e79087908790600401612487565b602060405180830381865afa158015610504573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061052891906124a1565b6040516323b872dd60e01b81529091506001600160a01b038516906323b872dd9061055b908990859087906004016124be565b6020604051808303816000875af115801561057a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061059e91906124f7565b6105ea5760405162461bcd60e51b815260206004820152601f60248201527f416d6f756e7420646964206e6f74207472616e7366657220746f2070616972006044820152606401610386565b604051635ccb6d3f60e11b81526001600160a01b0387811660048301526024820187905285811660448301526064820184905282169063b996da7e90608401600060405180830381600087803b15801561064357600080fd5b505af1158015610657573d6000803e3d6000fd5b50505050505050505050565b6000546001600160a01b0316331461068d5760405162461bcd60e51b8152600401610386906123ea565b600180546001600160a01b0319166001600160a01b0392909216919091179055565b6000546001600160a01b031633146106d95760405162461bcd60e51b8152600401610386906123ea565b6106e36000611f11565b565b6000546001600160a01b0316331461070f5760405162461bcd60e51b8152600401610386906123ea565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906107429086908690600401612487565b602060405180830381865afa15801561075f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061078391906124a1565b604051630386595d60e11b81526001600160a01b0386811660048301529192509082169063070cb2ba90602401600060405180830381600087803b1580156107ca57600080fd5b505af11580156107de573d6000803e3d6000fd5b5050505050505050565b6107f28484611240565b61080e5760405162461bcd60e51b81526004016103869061241f565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a43905906108419088908890600401612487565b602060405180830381865afa15801561085e573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061088291906124a1565b60405163cadcf27b60e01b81526001600160a01b038981166004830152602482018990526044820186905262ffffff851660648301529192509082169063cadcf27b90608401600060405180830381600087803b1580156108e257600080fd5b505af11580156108f6573d6000803e3d6000fd5b5050505050505050505050565b60008082428110156109575760405162461bcd60e51b815260206004820152601f60248201527f4675747572697a655631526f75746572526f757465723a2045585049524544006044820152606401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a439059061098a908e908e90600401612487565b602060405180830381865afa1580156109a7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109cb91906124a1565b90506109d78b8b611240565b6109f35760405162461bcd60e51b81526004016103869061241f565b6040516323b872dd60e01b81526001600160a01b038216906323b872dd90610a2390339085908e906004016124be565b6020604051808303816000875af1158015610a42573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a6691906124f7565b5060405163226bf2d160e21b81526001600160a01b03878116600483015260009182918416906389afcb449060240160408051808303816000875af1158015610ab3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ad79190612512565b9150915089861015610b415760405162461bcd60e51b815260206004820152602d60248201527f4675747572697a655631526f75746572526f757465723a20494e53554646494360448201526c1251539517d057d05353d55395609a1b6064820152608401610386565b88851015610ba75760405162461bcd60e51b815260206004820152602d60248201527f4675747572697a655631526f75746572526f757465723a20494e53554646494360448201526c1251539517d097d05353d55395609a1b6064820152608401610386565b5050505097509795505050505050565b6000546001600160a01b03163314610be15760405162461bcd60e51b8152600401610386906123ea565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b60008142811015610c565760405162461bcd60e51b815260206004820152601f60248201527f4675747572697a655631526f75746572526f757465723a2045585049524544006044820152606401610386565b610c956040518060400160405280602081526020017f616d6f756e74424465736972656420696e20726f75746572206669727374203a81525088611f61565b600254604051630b4c774160e11b81526001600160a01b038c811660048301528b81166024830152610bb860448301526000921690631698ee8290606401602060405180830381865afa158015610cf0573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d1491906124a1565b6001600160a01b03161415610d815760405162461bcd60e51b815260206004820152602d60248201527f4120556e6953776170563320506f6f6c2057697468205468697320466565204460448201526c1bd95cc8139bdd08115e1a5cdd609a1b6064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a4390590610db4908e908e90600401612487565b602060405180830381865afa158015610dd1573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610df591906124a1565b6001600160a01b03161415610e8b5760035460048054604051630edef2e760e31b81526001600160a01b038e8116938201939093528c8316602482015290821660448201529116906376f79738906064016020604051808303816000875af1158015610e65573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e8991906124a1565b505b610e958a8a611240565b610eb15760405162461bcd60e51b81526004016103869061241f565b896001600160a01b03166323b872dd33600360009054906101000a90046001600160a01b03166001600160a01b031663e6a439058e8e6040518363ffffffff1660e01b8152600401610f04929190612487565b602060405180830381865afa158015610f21573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f4591906124a1565b8b6040518463ffffffff1660e01b8152600401610f64939291906124be565b6020604051808303816000875af1158015610f83573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fa791906124f7565b50886001600160a01b03166323b872dd33600360009054906101000a90046001600160a01b03166001600160a01b031663e6a439058e8e6040518363ffffffff1660e01b8152600401610ffb929190612487565b602060405180830381865afa158015611018573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061103c91906124a1565b8a6040518463ffffffff1660e01b815260040161105b939291906124be565b6020604051808303816000875af115801561107a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061109e91906124f7565b5060035460405163e6a4390560e01b81526001600160a01b039091169063e6a43905906110d1908d908d90600401612487565b602060405180830381865afa1580156110ee573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061111291906124a1565b60015460405163f7729d4360e01b81526001600160a01b038c811660048301528d81166024830152610bb86044830152606482018b9052600060848301529283169263a647e8ec92889291169063f7729d439060a4016020604051808303816000875af1158015611187573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111ab919061246e565b6111b5908c61254c565b6040516001600160e01b031960e085901b1681526001600160a01b0390921660048301526024820152604481018b9052606481018a90526084016020604051808303816000875af115801561120e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611232919061246e565b9a9950505050505050505050565b60035460405163e6a4390560e01b815260009182916001600160a01b039091169063e6a43905906112779087908790600401612487565b602060405180830381865afa158015611294573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112b891906124a1565b6001600160a01b031663e6eba1f485856040518363ffffffff1660e01b81526004016112e5929190612487565b602060405180830381865afa158015611302573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113269190612564565b600254604051630b4c774160e11b81526001600160a01b038781166004830152868116602483015262ffffff8416604483015292935060009290911690631698ee8290606401602060405180830381865afa158015611389573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113ad91906124a1565b60408051600280825260608201835292935060009290916020830190803683375050600754825192935063ffffffff16918391506000906113f0576113f0612597565b602002602001019063ffffffff16908163ffffffff168152505060008160018151811061141f5761141f612597565b63ffffffff9283166020918202929092010152600754600854606092919091169061144a90426125ad565b10156114cb5760098054806020026020016040519081016040528092919081815260200182805480156114bf57602002820191906000526020600020906000905b825461010083900a900460060b81526020600d830181900493840193600103600790930192909202910180841161148b5790505b50505050509050611559565b60405163883bdbfd60e01b81526001600160a01b0384169063883bdbfd906114f79085906004016125c4565b600060405180830381865afa158015611514573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f1916820160405261153c91908101906126d7565b50805190915061155390600990602084019061200c565b50426008555b60008160008151811061156e5761156e612597565b60200260200101518260018151811061158957611589612597565b602002602001015161159b91906127a3565b6007549091506000906115b79063ffffffff1660030b83612809565b60020b905060008082126115ce578160060b6115db565b8160060b6115db90612847565b9050620d89e88111156116145760405162461bcd60e51b81526020600482015260016024820152601560fa1b6044820152606401610386565b60006001821661162857600160801b61163a565b6ffffcb933bd6fad37aa2d162d1a5940015b70ffffffffffffffffffffffffffffffffff1690506002821615611679576080611674826ffff97272373d413259a46990580e213a612864565b901c90505b60048216156116a357608061169e826ffff2e50f5f656932ef12357cf3c7fdcc612864565b901c90505b60088216156116cd5760806116c8826fffe5caca7e10e4e61c3624eaa0941cd0612864565b901c90505b60108216156116f75760806116f2826fffcb9843d60f6159c9db58835c926644612864565b901c90505b602082161561172157608061171c826fff973b41fa98c081472e6896dfb254c0612864565b901c90505b604082161561174b576080611746826fff2ea16466c96a3843ec78b326b52861612864565b901c90505b6080821615611775576080611770826ffe5dee046a99a2a811c461f1969c3053612864565b901c90505b6101008216156117a057608061179b826ffcbe86c7900a88aedcffc83b479aa3a4612864565b901c90505b6102008216156117cb5760806117c6826ff987a7253ac413176f2b074cf7815e54612864565b901c90505b6104008216156117f65760806117f1826ff3392b0822b70005940c7a398e4b70f3612864565b901c90505b61080082161561182157608061181c826fe7159475a2c29b7443b29c7fa6e889d9612864565b901c90505b61100082161561184c576080611847826fd097f3bdfd2022b8845ad8f792aa5825612864565b901c90505b612000821615611877576080611872826fa9f746462d870fdf8a65dc1f90e061e5612864565b901c90505b6140008216156118a257608061189d826f70d869a156d2a1b890bb3df62baf32f7612864565b901c90505b6180008216156118cd5760806118c8826f31be135f97d08fd981231505542fcfa6612864565b901c90505b620100008216156118f95760806118f4826f09aa508b5b7a84e1c677de54f3e99bc9612864565b901c90505b6202000082161561192457608061191f826e5d6af8dedb81196699c329225ee604612864565b901c90505b6204000082161561194e576080611949826d2216e584f5fa1ea926041bedfe98612864565b901c90505b62080000821615611976576080611971826b048a170391f7dc42444e8fa2612864565b901c90505b60008360060b13156119915761198e81600019612883565b90505b60006119a264010000000083612897565b156119ae5760016119b1565b60005b6119c29060ff16602084901c61254c565b905060006103e8826001600160a01b03166006546103e86119e3919061254c565b6119ed9190612864565b6119f79190612883565b905060006103e8836001600160a01b03166006546103e8611a1891906125ad565b611a229190612864565b611a2c9190612883565b905060008a6001600160a01b0316633850c7bd6040518163ffffffff1660e01b815260040160e060405180830381865afa158015611a6e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a9291906128bd565b505050505050905081816001600160a01b03161080611ab9575082816001600160a01b0316115b611ac75760019c508c611acd565b60009c508c5b50611afd6040518060400160405280600d81526020016c7061737354776170436865636b60981b8152508e611faa565b50505050505050505050505092915050565b6000546001600160a01b03163314611b395760405162461bcd60e51b8152600401610386906123ea565b6001600160a01b038116611b9e5760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201526564647265737360d01b6064820152608401610386565b611ba781611f11565b50565b84151580611bb757508315155b611c035760405162461bcd60e51b815260206004820152601b60248201527f4f6e6520416d6f756e74204d757374204e6f74204265205a65726f00000000006044820152606401610386565b841580611c0e575083155b611c5a5760405162461bcd60e51b815260206004820152601760248201527f4f6e6520416d6f756e74204d757374204265205a65726f0000000000000000006044820152606401610386565b60008515611c685787611c6a565b865b905060008615611c7a5787611c7c565b885b9050611c888282611240565b611ca45760405162461bcd60e51b81526004016103869061241f565b60008715611cb25787611cb4565b865b6040516370a0823160e01b815233600482015290915081906001600160a01b038516906370a0823190602401602060405180830381865afa158015611cfd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d21919061246e565b11611d945760405162461bcd60e51b815260206004820152603760248201527f4e6f7420456e6f75676820436f6c6c65746572616c20496e205573657227732060448201527f57616c6c657420546f204f70656e20506f736974696f6e0000000000000000006064820152608401610386565b60035460405163e6a4390560e01b81526000916001600160a01b03169063e6a4390590611dc79087908790600401612487565b602060405180830381865afa158015611de4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611e0891906124a1565b6040516323b872dd60e01b81529091506001600160a01b038516906323b872dd90611e3b903390859087906004016124be565b6020604051808303816000875af1158015611e5a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611e7e91906124f7565b5060405163dd85b4d360e01b81526001600160a01b038581166004830152336024830152604482018490526064820188905260006084830152610bb860a483015260c4820187905282169063dd85b4d39060e401600060405180830381600087803b158015611eec57600080fd5b505af1158015611f00573d6000803e3d6000fd5b505050505050505050505050505050565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b611fa68282604051602401611f779291906129a4565b60408051601f198184030181529190526020810180516001600160e01b03166309710a9d60e41b179052611feb565b5050565b611fa68282604051602401611fc09291906129c6565b60408051601f198184030181529190526020810180516001600160e01b031663c3b5563560e01b1790525b80516a636f6e736f6c652e6c6f67602083016000808483855afa5050505050565b828054828255906000526020600020906003016004900481019282156120b75791602002820160005b8382111561208257835183826101000a81548166ffffffffffffff021916908360060b66ffffffffffffff1602179055509260200192600701602081600601049283019260010302612035565b80156120b55782816101000a81549066ffffffffffffff0219169055600701602081600601049283019260010302612082565b505b506120c39291506120c7565b5090565b5b808211156120c357600081556001016120c8565b634e487b7160e01b600052600160045260246000fd5b6001600160a01b0381168114611ba757600080fd5b60006020828403121561211957600080fd5b8135612124816120f2565b9392505050565b600080600080600060a0868803121561214357600080fd5b853561214e816120f2565b9450602086013593506040860135612165816120f2565b92506060860135612175816120f2565b949793965091946080013592915050565b60008060006060848603121561219b57600080fd5b83356121a6816120f2565b925060208401356121b6816120f2565b915060408401356121c6816120f2565b809150509250925092565b62ffffff81168114611ba757600080fd5b60008060008060008060c087890312156121fb57600080fd5b8635612206816120f2565b955060208701359450604087013561221d816120f2565b9350606087013561222d816120f2565b92506080870135915060a0870135612244816121d1565b809150509295509295509295565b600080600080600080600060e0888a03121561226d57600080fd5b8735612278816120f2565b96506020880135612288816120f2565b955060408801359450606088013593506080880135925060a08801356122ad816120f2565b8092505060c0880135905092959891949750929550565b600080600080600080600080610100898b0312156122e157600080fd5b88356122ec816120f2565b975060208901356122fc816120f2565b965060408901359550606089013594506080890135935060a0890135925060c0890135612328816120f2565b8092505060e089013590509295985092959890939650565b6000806040838503121561235357600080fd5b823561235e816120f2565b9150602083013561236e816120f2565b809150509250929050565b600080600080600080600060e0888a03121561239457600080fd5b873561239f816120f2565b965060208801356123af816120f2565b9550604088013594506060880135935060808801356123cd816120f2565b9699959850939692959460a0840135945060c09093013592915050565b6020808252818101527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604082015260600190565b6020808252602f908201527f546865726520697320612070726f626c656d207769746820747761702070726960408201526e18d9481a5b881d1a1a5cc81c1bdbdb608a1b606082015260800190565b60006020828403121561248057600080fd5b5051919050565b6001600160a01b0392831681529116602082015260400190565b6000602082840312156124b357600080fd5b8151612124816120f2565b6001600160a01b039384168152919092166020820152604081019190915260600190565b805180151581146124f257600080fd5b919050565b60006020828403121561250957600080fd5b612124826124e2565b6000806040838503121561252557600080fd5b505080516020909101519092909150565b634e487b7160e01b600052601160045260246000fd5b6000821982111561255f5761255f612536565b500190565b60006020828403121561257657600080fd5b8151612124816121d1565b634e487b7160e01b600052604160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b6000828210156125bf576125bf612536565b500390565b6020808252825182820181905260009190848201906040850190845b8181101561260257835163ffffffff16835292840192918401916001016125e0565b50909695505050505050565b604051601f8201601f1916810167ffffffffffffffff8111828210171561263757612637612581565b604052919050565b600067ffffffffffffffff82111561265957612659612581565b5060051b60200190565b600082601f83011261267457600080fd5b815160206126896126848361263f565b61260e565b82815260059290921b840181019181810190868411156126a857600080fd5b8286015b848110156126cc5780516126bf816120f2565b83529183019183016126ac565b509695505050505050565b600080604083850312156126ea57600080fd5b825167ffffffffffffffff8082111561270257600080fd5b818501915085601f83011261271657600080fd5b815160206127266126848361263f565b82815260059290921b8401810191818101908984111561274557600080fd5b948201945b838610156127735785518060060b81146127645760008081fd5b8252948201949082019061274a565b9188015191965090935050508082111561278c57600080fd5b5061279985828601612663565b9150509250929050565b60008160060b8360060b6000811281667fffffffffffff19018312811516156127ce576127ce612536565b81667fffffffffffff0183138116156127e9576127e9612536565b5090039392505050565b634e487b7160e01b600052601260045260246000fd5b60008160060b8360060b80612820576128206127f3565b667fffffffffffff1982146000198214161561283e5761283e612536565b90059392505050565b6000600160ff1b82141561285d5761285d612536565b5060000390565b600081600019048311821515161561287e5761287e612536565b500290565b600082612892576128926127f3565b500490565b6000826128a6576128a66127f3565b500690565b805161ffff811681146124f257600080fd5b600080600080600080600060e0888a0312156128d857600080fd5b87516128e3816120f2565b8097505060208801518060020b81146128fb57600080fd5b9550612909604089016128ab565b9450612917606089016128ab565b9350612925608089016128ab565b925060a088015160ff8116811461293b57600080fd5b915061294960c089016124e2565b905092959891949750929550565b6000815180845260005b8181101561297d57602081850181015186830182015201612961565b8181111561298f576000602083870101525b50601f01601f19169290920160200192915050565b6040815260006129b76040830185612957565b90508260208301529392505050565b6040815260006129d96040830185612957565b90508215156020830152939250505056fea26469706673582212204807886642c533483ad98dd653bbd64355a043df65a844d3e2a67f54adfb3bc464736f6c634300080a0033000000000000000000000000d0a1e359811322d97991e03f863a0c30c2cf029c000000000000000000000000010573fa5ca52cb479337bc2a6ba4baba0b9ff86000000000000000000000000b19a40e2faa6d6ad60240af8dac105a428f353bfc001a08aa81dd5b9caa6d950753d7cf2c0448f4c077efbae50ae1fe31028b34510192aa025dce8987a6850159cdd2e39af17474f299a4f82ca3436424799168456ec2479", + "s": "0x25dce8987a6850159cdd2e39af17474f299a4f82ca3436424799168456ec2479", + "to": null, + "transactionIndex": "0x0", + "type": "0x2", + "v": "0x1", + "value": "0x0" + }, + { + "blockHash": "0x0ec62c2a397e114d84ce932387d841787d7ec5757ceba3708386da87934b7c82", + "blockNumber": "0x1ef81ff", + "chainId": null, + "condition": null, + "creates": null, + "from": "0xe6d63ed2c574b150a205d1d9cc7aaff1b7e4b59d", + "gas": "0x2dc6c0", + "gasPrice": "0x4f7915f5", + "hash": "0xbe6122d6aaf84fb85f4df136d4662c6dc344248e987255c0daa1193b3f17d5a9", + "input": "0xfdacd5760000000000000000000000000000000000000000000000000000000000000001", + "nonce": "0x7", + "publicKey": "0xb2a40fd8ec8703916fde9a0d3bb3c2391d7a2a1a6c1cd6492a7842d9daed24d5064847aaa242e635440f6dd06107044e1bd9387da6c32da3eaee56b928c6bdbf", + "r": "0x4b3f442f3a014468b40d2fadea1b152b36582420d28fb69695658be40ffbdffa", + "raw": "0xf88807844f7915f5832dc6c0949c97d0e47d81e0ffd0e41450427973e30ff1657b80a4fdacd57600000000000000000000000000000000000000000000000000000000000000011ba04b3f442f3a014468b40d2fadea1b152b36582420d28fb69695658be40ffbdffaa065f626f3a91ca662e56c42ea4376ee8f3db65a4ad613b9bdd2776c292869fee7", + "s": "0x65f626f3a91ca662e56c42ea4376ee8f3db65a4ad613b9bdd2776c292869fee7", + "standardV": "0x0", + "to": "0x9c97d0e47d81e0ffd0e41450427973e30ff1657b", + "transactionIndex": "0x1", + "v": "0x1b", + "value": "0x0" + } + ], + "transactionsRoot": "0x83b29816e5acaf6110a616ec1e937fa9d78dc643cd580ce06eb54195b8fc1a70", + "uncles": [] +} + +` + +func TestBlock_UnmarshalJSON(t *testing.T) { + t.Run("unmarshals parity block", func(t *testing.T) { + b := new(evmtypes.Block) + err := b.UnmarshalJSON([]byte(paritySampleBlock)) + assert.NoError(t, err) + + assert.Equal(t, int64(32473599), b.Number) + assert.Equal(t, "0x0ec62c2a397e114d84ce932387d841787d7ec5757ceba3708386da87934b7c82", b.Hash.Hex()) + assert.Equal(t, "0x3aa1c729fb45888bc1ce777d00bad9637c0b5f7cb48b145ebacc16098e0132d4", b.ParentHash.Hex()) + assert.Equal(t, assets.NewWeiI(7), b.BaseFeePerGas) + assert.Equal(t, int64(1656602604), b.Timestamp.Unix()) + assert.Len(t, b.Transactions, 2) + }) + t.Run("unmarshals geth block", func(t *testing.T) { + b := new(evmtypes.Block) + err := b.UnmarshalJSON([]byte(gethSampleBlock)) + assert.NoError(t, err) + + assert.Equal(t, int64(15051090), b.Number) + assert.Equal(t, "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", b.Hash.Hex()) + assert.Equal(t, "0x653ea251c180d93296ef79378e64d7dc9a74f565a54df477faeb64d3330977dd", b.ParentHash.Hex()) + assert.Equal(t, assets.NewWeiI(39678999761), b.BaseFeePerGas) + assert.Equal(t, int64(1656603143), b.Timestamp.Unix()) + assert.Len(t, b.Transactions, 7) + }) + t.Run("handles empty result", func(t *testing.T) { + b := new(evmtypes.Block) + err := b.UnmarshalJSON([]byte("null")) + assert.Error(t, err) + assert.Equal(t, errors.Cause(err), evmtypes.ErrMissingBlock) + assert.True(t, errors.Is(err, evmtypes.ErrMissingBlock)) + }) +} + +func TestTransaction_UnmarshalJSON(t *testing.T) { + t.Parallel() + type args struct { + data []byte + } + tests := []struct { + name string + args args + wantErr bool + want *evmtypes.Transaction + }{ + { + name: "sample geth txn", + args: args{ + []byte( + `{ + "blockHash": "0x45eb0a650b6b0b9fd1ee676b870e43fa7614f1034f7404070327a332faed05c0", + "blockNumber": "0xe5a952", + "from": "0x76e40d0a69fd81826b5eb7d18145626d46eafdef", + "gas": "0xdbba0", + "gasPrice": "0x978a846d2", + "maxFeePerGas": "0xd0892241d", + "maxPriorityFeePerGas": "0x3b9aca01", + "hash": "0x754f49f0a2ca7680806d261dd36ee95ac88a81da59fef0b5d8d691478f075d46", + "input": "0x1cff79cd000000000000000000000000343933efdf64d2d6eeaf2dcd5fbd701541d64f67000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000e44a7d794a000000000000000000000000000000000000000000000005bc94810a20626a9a0000000000000000000000000000000000000000000000000e52b79acdb06152000000000000000000000000000000000000798f836298dfb377b3deeb7ade400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000062bdc2400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000798e37fb7bc47a14e3b89fa086d600000000000000000000000000000000000000000000000000000000", + "nonce": "0xbf65", + "to": "0x4cb18386e5d1f34dc6eea834bf3534a970a3f8e7", + "transactionIndex": "0x0", + "value": "0xa907", + "type": "0x2", + "accessList": [], + "chainId": "0x1", + "v": "0x0", + "r": "0xcbdf4705610d7b20326dcd153491f37f133c34026f3e0abf72f9db03ac98de0e", + "s": "0xa2b2d625d34315e8d6d0543e0f9393d2a14dddaf3678d7f0ed432df9cb8e5c3" +}`, + ), + }, + want: &evmtypes.Transaction{ + GasPrice: assets.NewWei(mustHexToBig(t, "978a846d2")), + GasLimit: mustHextoUint32(t, "0xdbba0"), + MaxFeePerGas: assets.NewWei(mustHexToBig(t, "d0892241d")), + MaxPriorityFeePerGas: assets.NewWei(mustHexToBig(t, "3b9aca01")), + Type: 0x2, + Hash: common.HexToHash("0x754f49f0a2ca7680806d261dd36ee95ac88a81da59fef0b5d8d691478f075d46"), + }, + }, + { + name: "sample parity txn", + args: args{[]byte( + ` { + "blockHash": "0x0ec62c2a397e114d84ce932387d841787d7ec5757ceba3708386da87934b7c82", + "blockNumber": "0x1ef81ff", + "chainId": null, + "condition": null, + "creates": null, + "from": "0xe6d63ed2c574b150a205d1d9cc7aaff1b7e4b59d", + "gas": "0x2dc6c0", + "gasPrice": "0x4f7915f5", + "hash": "0xbe6122d6aaf84fb85f4df136d4662c6dc344248e987255c0daa1193b3f17d5a9", + "input": "0xfdacd5760000000000000000000000000000000000000000000000000000000000000001", + "nonce": "0x7", + "publicKey": "0xb2a40fd8ec8703916fde9a0d3bb3c2391d7a2a1a6c1cd6492a7842d9daed24d5064847aaa242e635440f6dd06107044e1bd9387da6c32da3eaee56b928c6bdbf", + "r": "0x4b3f442f3a014468b40d2fadea1b152b36582420d28fb69695658be40ffbdffa", + "raw": "0xf88807844f7915f5832dc6c0949c97d0e47d81e0ffd0e41450427973e30ff1657b80a4fdacd57600000000000000000000000000000000000000000000000000000000000000011ba04b3f442f3a014468b40d2fadea1b152b36582420d28fb69695658be40ffbdffaa065f626f3a91ca662e56c42ea4376ee8f3db65a4ad613b9bdd2776c292869fee7", + "s": "0x65f626f3a91ca662e56c42ea4376ee8f3db65a4ad613b9bdd2776c292869fee7", + "standardV": "0x0", + "to": "0x9c97d0e47d81e0ffd0e41450427973e30ff1657b", + "transactionIndex": "0x1", + "v": "0x1b", + "value": "0x0" + }`, + )}, + want: &evmtypes.Transaction{ + GasPrice: assets.NewWei(mustHexToBig(t, "4f7915f5")), + GasLimit: mustHextoUint32(t, "0x2dc6c0"), + MaxFeePerGas: nil, + MaxPriorityFeePerGas: nil, + Type: 0, + Hash: common.HexToHash("0xbe6122d6aaf84fb85f4df136d4662c6dc344248e987255c0daa1193b3f17d5a9"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := &evmtypes.Transaction{} + err := got.UnmarshalJSON(tt.args.data) + require.NoError(t, err) + require.Equal(t, tt.want, got) + + }) + } +} + +func TestTransaction_JSONRoundtrip(t *testing.T) { + t.Parallel() + want := &evmtypes.Transaction{ + GasPrice: assets.NewWei(mustHexToBig(t, "978a846d2")), + GasLimit: mustHextoUint32(t, "0xdbba0"), + MaxFeePerGas: assets.NewWei(mustHexToBig(t, "d0892241d")), + MaxPriorityFeePerGas: assets.NewWei(mustHexToBig(t, "3b9aca01")), + Type: evmtypes.TxType(2), + Hash: common.HexToHash("0x754f49f0a2ca7680806d261dd36ee95ac88a81da59fef0b5d8d691478f075d46"), + } + + d, err := json.Marshal(want) + require.NoError(t, err) + got := new(evmtypes.Transaction) + err = json.Unmarshal(d, got) + require.NoError(t, err) + assert.Equal(t, want, got) +} + +func TestBlock_JSONRoundtrip(t *testing.T) { + t.Parallel() + + d, err := json.Marshal(smallBlock) + require.NoError(t, err) + got := new(evmtypes.Block) + err = json.Unmarshal(d, got) + require.NoError(t, err) + assert.Equal(t, smallBlock.Hash, got.Hash) + assert.Equal(t, smallBlock.BaseFeePerGas, got.BaseFeePerGas) + assert.Equal(t, smallBlock.Number, got.Number) + assert.Equal(t, smallBlock.ParentHash, got.ParentHash) + assert.Equal(t, smallBlock.Timestamp, got.Timestamp) + + assertTxnsEqual(t, smallBlock.Transactions, got.Transactions) +} + +func assertTxnsEqual(t *testing.T, txns1, txns2 []evmtypes.Transaction) { + require.Equal(t, len(txns1), len(txns2)) + for i := range txns1 { + assert.Equal(t, txns1[i].GasLimit, txns2[i].GasLimit) + assert.True(t, txns1[i].GasPrice.Equal(txns2[i].GasPrice)) + assert.Equal(t, txns1[i].Hash, txns2[i].Hash) + assert.True(t, txns1[i].MaxFeePerGas.Equal(txns2[i].MaxFeePerGas)) + assert.True(t, txns1[i].MaxPriorityFeePerGas.Equal(txns2[i].MaxPriorityFeePerGas)) + assert.Equal(t, txns1[i].Type, txns2[i].Type) + } +} +func TestTxType_JSONRoundtrip(t *testing.T) { + + t.Run("non zero", func(t *testing.T) { + t.Parallel() + want := evmtypes.TxType(2) + d, err := json.Marshal(&want) + require.NoError(t, err) + + got := new(evmtypes.TxType) + err = json.Unmarshal(d, got) + require.NoError(t, err) + assert.Equal(t, want, *got) + }) + + t.Run("zero", func(t *testing.T) { + t.Parallel() + want := evmtypes.TxType(0) + d, err := json.Marshal(&want) + require.NoError(t, err) + + got := new(evmtypes.TxType) + err = json.Unmarshal(d, got) + require.NoError(t, err) + assert.Equal(t, want, *got) + }) +} + +func mustHextoUint32(t *testing.T, hx string) uint32 { + temp := new(hexutil.Uint64) + err := temp.UnmarshalText([]byte(hx)) + require.NoError(t, err) + return uint32(*temp) +} + +func mustHexToBig(t *testing.T, hx string) *big.Int { + n, err := hex.ParseBig(hx) + require.NoError(t, err) + return n +} diff --git a/core/chains/evm/types/nonce.go b/core/chains/evm/types/nonce.go new file mode 100644 index 00000000..be295bdd --- /dev/null +++ b/core/chains/evm/types/nonce.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "strconv" +) + +var _ fmt.Stringer = Nonce(0) + +// Nonce wraps an EVM nonce into a stringable type +type Nonce int64 + +func (n Nonce) Int64() int64 { + return int64(n) +} + +func (n Nonce) String() string { + return strconv.FormatInt(n.Int64(), 10) +} + +func GenerateNextNonce(prev Nonce) Nonce { + return prev + 1 +} diff --git a/core/chains/evm/types/types.go b/core/chains/evm/types/types.go new file mode 100644 index 00000000..9b2ff603 --- /dev/null +++ b/core/chains/evm/types/types.go @@ -0,0 +1,378 @@ +package types + +import ( + "database/sql/driver" + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/jackc/pgtype" + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +type Configs interface { + Chains(ids ...string) ([]types.ChainStatus, int, error) + Node(name string) (Node, error) + Nodes(chainID string) (nodes []Node, err error) + NodeStatus(name string) (types.NodeStatus, error) +} + +type Node struct { + Name string + EVMChainID ubig.Big + WSURL null.String + HTTPURL null.String + SendOnly bool + Order int32 + + State string +} + +// Receipt represents an ethereum receipt. +// +// Copied from go-ethereum: https://github.com/ethereum/go-ethereum/blob/ce9a289fa48e0d2593c4aaa7e207c8a5dd3eaa8a/core/types/receipt.go#L50 +// +// We use our own version because Geth's version specifies various +// gencodec:"required" fields which cause unhelpful errors when unmarshalling +// from an empty JSON object which can happen in the batch fetcher. +type Receipt struct { + PostState []byte `json:"root"` + Status uint64 `json:"status"` + CumulativeGasUsed uint64 `json:"cumulativeGasUsed"` + Bloom gethTypes.Bloom `json:"logsBloom"` + Logs []*Log `json:"logs"` + TxHash common.Hash `json:"transactionHash"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed uint64 `json:"gasUsed"` + BlockHash common.Hash `json:"blockHash,omitempty"` + BlockNumber *big.Int `json:"blockNumber,omitempty"` + TransactionIndex uint `json:"transactionIndex"` +} + +// FromGethReceipt converts a gethTypes.Receipt to a Receipt +func FromGethReceipt(gr *gethTypes.Receipt) *Receipt { + if gr == nil { + return nil + } + logs := make([]*Log, len(gr.Logs)) + for i, glog := range gr.Logs { + logs[i] = FromGethLog(glog) + } + return &Receipt{ + gr.PostState, + gr.Status, + gr.CumulativeGasUsed, + gr.Bloom, + logs, + gr.TxHash, + gr.ContractAddress, + gr.GasUsed, + gr.BlockHash, + gr.BlockNumber, + gr.TransactionIndex, + } +} + +// IsZero returns true if receipt is the zero receipt +// Batch calls to the RPC will return a pointer to an empty Receipt struct +// Easiest way to check if the receipt was missing is to see if the hash is 0x0 +// Real receipts will always have the TxHash set +func (r *Receipt) IsZero() bool { + return r.TxHash == utils.EmptyHash +} + +// IsUnmined returns true if the receipt is for a TX that has not been mined yet. +// Supposedly according to the spec this should never happen, but Parity does +// it anyway. +func (r *Receipt) IsUnmined() bool { + return r.BlockHash == utils.EmptyHash +} + +// MarshalJSON marshals Receipt as JSON. +// Copied from: https://github.com/ethereum/go-ethereum/blob/ce9a289fa48e0d2593c4aaa7e207c8a5dd3eaa8a/core/types/gen_receipt_json.go +func (r Receipt) MarshalJSON() ([]byte, error) { + type Receipt struct { + PostState hexutil.Bytes `json:"root"` + Status hexutil.Uint64 `json:"status"` + CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed"` + Bloom gethTypes.Bloom `json:"logsBloom"` + Logs []*Log `json:"logs"` + TxHash common.Hash `json:"transactionHash"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + BlockHash common.Hash `json:"blockHash,omitempty"` + BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` + TransactionIndex hexutil.Uint `json:"transactionIndex"` + } + var enc Receipt + enc.PostState = r.PostState + enc.Status = hexutil.Uint64(r.Status) + enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed) + enc.Bloom = r.Bloom + enc.Logs = r.Logs + enc.TxHash = r.TxHash + enc.ContractAddress = r.ContractAddress + enc.GasUsed = hexutil.Uint64(r.GasUsed) + enc.BlockHash = r.BlockHash + enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) + enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (r *Receipt) UnmarshalJSON(input []byte) error { + type Receipt struct { + PostState *hexutil.Bytes `json:"root"` + Status *hexutil.Uint64 `json:"status"` + CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed"` + Bloom *gethTypes.Bloom `json:"logsBloom"` + Logs []*Log `json:"logs"` + TxHash *common.Hash `json:"transactionHash"` + ContractAddress *common.Address `json:"contractAddress"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + BlockHash *common.Hash `json:"blockHash,omitempty"` + BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` + TransactionIndex *hexutil.Uint `json:"transactionIndex"` + } + var dec Receipt + if err := json.Unmarshal(input, &dec); err != nil { + return errors.Wrap(err, "could not unmarshal receipt") + } + if dec.PostState != nil { + r.PostState = *dec.PostState + } + if dec.Status != nil { + r.Status = uint64(*dec.Status) + } + if dec.CumulativeGasUsed != nil { + r.CumulativeGasUsed = uint64(*dec.CumulativeGasUsed) + } + if dec.Bloom != nil { + r.Bloom = *dec.Bloom + } + r.Logs = dec.Logs + if dec.TxHash != nil { + r.TxHash = *dec.TxHash + } + if dec.ContractAddress != nil { + r.ContractAddress = *dec.ContractAddress + } + if dec.GasUsed != nil { + r.GasUsed = uint64(*dec.GasUsed) + } + if dec.BlockHash != nil { + r.BlockHash = *dec.BlockHash + } + if dec.BlockNumber != nil { + r.BlockNumber = (*big.Int)(dec.BlockNumber) + } + if dec.TransactionIndex != nil { + r.TransactionIndex = uint(*dec.TransactionIndex) + } + return nil +} + +func (r *Receipt) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, r) +} + +func (r *Receipt) Value() (driver.Value, error) { + return json.Marshal(r) +} + +func (r *Receipt) GetStatus() uint64 { + return r.Status +} + +func (r *Receipt) GetTxHash() common.Hash { + return r.TxHash +} + +func (r *Receipt) GetBlockNumber() *big.Int { + return r.BlockNumber +} + +func (r *Receipt) GetFeeUsed() uint64 { + return r.GasUsed +} + +func (r *Receipt) GetTransactionIndex() uint { + return r.TransactionIndex +} + +func (r *Receipt) GetBlockHash() common.Hash { + return r.BlockHash +} + +// Log represents a contract log event. +// +// Copied from go-ethereum: https://github.com/ethereum/go-ethereum/blob/ce9a289fa48e0d2593c4aaa7e207c8a5dd3eaa8a/core/types/log.go +// +// We use our own version because Geth's version specifies various +// gencodec:"required" fields which cause unhelpful errors when unmarshalling +// from an empty JSON object which can happen in the batch fetcher. +type Log struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data []byte `json:"data"` + BlockNumber uint64 `json:"blockNumber"` + TxHash common.Hash `json:"transactionHash"` + TxIndex uint `json:"transactionIndex"` + BlockHash common.Hash `json:"blockHash"` + Index uint `json:"logIndex"` + Removed bool `json:"removed"` +} + +// FromGethLog converts a gethTypes.Log to a Log +func FromGethLog(gl *gethTypes.Log) *Log { + if gl == nil { + return nil + } + return &Log{ + gl.Address, + gl.Topics, + gl.Data, + gl.BlockNumber, + gl.TxHash, + gl.TxIndex, + gl.BlockHash, + gl.Index, + gl.Removed, + } +} + +// MarshalJSON marshals as JSON. +func (l Log) MarshalJSON() ([]byte, error) { + type Log struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` + BlockNumber hexutil.Uint64 `json:"blockNumber"` + TxHash common.Hash `json:"transactionHash"` + TxIndex hexutil.Uint `json:"transactionIndex"` + BlockHash common.Hash `json:"blockHash"` + Index hexutil.Uint `json:"logIndex"` + Removed bool `json:"removed"` + } + var enc Log + enc.Address = l.Address + enc.Topics = l.Topics + enc.Data = l.Data + enc.BlockNumber = hexutil.Uint64(l.BlockNumber) + enc.TxHash = l.TxHash + enc.TxIndex = hexutil.Uint(l.TxIndex) + enc.BlockHash = l.BlockHash + enc.Index = hexutil.Uint(l.Index) + enc.Removed = l.Removed + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (l *Log) UnmarshalJSON(input []byte) error { + type Log struct { + Address *common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data *hexutil.Bytes `json:"data"` + BlockNumber *hexutil.Uint64 `json:"blockNumber"` + TxHash *common.Hash `json:"transactionHash"` + TxIndex *hexutil.Uint `json:"transactionIndex"` + BlockHash *common.Hash `json:"blockHash"` + Index *hexutil.Uint `json:"logIndex"` + Removed *bool `json:"removed"` + } + var dec Log + if err := json.Unmarshal(input, &dec); err != nil { + return errors.Wrap(err, "could not unmarshal log") + } + if dec.Address != nil { + l.Address = *dec.Address + } + l.Topics = dec.Topics + if dec.Data != nil { + l.Data = *dec.Data + } + if dec.BlockNumber != nil { + l.BlockNumber = uint64(*dec.BlockNumber) + } + if dec.TxHash != nil { + l.TxHash = *dec.TxHash + } + if dec.TxIndex != nil { + l.TxIndex = uint(*dec.TxIndex) + } + if dec.BlockHash != nil { + l.BlockHash = *dec.BlockHash + } + if dec.Index != nil { + l.Index = uint(*dec.Index) + } + if dec.Removed != nil { + l.Removed = *dec.Removed + } + return nil +} + +type AddressArray []common.Address + +func (a *AddressArray) Scan(src interface{}) error { + baArray := pgtype.ByteaArray{} + err := baArray.Scan(src) + if err != nil { + return errors.Wrap(err, "Expected BYTEA[] column for AddressArray") + } + if baArray.Status != pgtype.Present || len(baArray.Dimensions) > 1 { + return errors.Errorf("Expected AddressArray to be 1-dimensional. Dimensions = %v", baArray.Dimensions) + } + + for i, ba := range baArray.Elements { + addr := common.Address{} + if ba.Status != pgtype.Present { + return errors.Errorf("Expected all addresses in AddressArray to be non-NULL. Got AddressArray[%d] = NULL", i) + } + err = addr.Scan(ba.Bytes) + if err != nil { + return err + } + *a = append(*a, addr) + } + + return nil +} + +type HashArray []common.Hash + +func (h *HashArray) Scan(src interface{}) error { + baArray := pgtype.ByteaArray{} + err := baArray.Scan(src) + if err != nil { + return errors.Wrap(err, "Expected BYTEA[] column for HashArray") + } + if baArray.Status != pgtype.Present || len(baArray.Dimensions) > 1 { + return errors.Errorf("Expected HashArray to be 1-dimensional. Dimensions = %v", baArray.Dimensions) + } + + for i, ba := range baArray.Elements { + hash := common.Hash{} + if ba.Status != pgtype.Present { + return errors.Errorf("Expected all addresses in HashArray to be non-NULL. Got HashArray[%d] = NULL", i) + } + err = hash.Scan(ba.Bytes) + if err != nil { + return err + } + *h = append(*h, hash) + } + return err +} diff --git a/core/chains/evm/types/types_test.go b/core/chains/evm/types/types_test.go new file mode 100644 index 00000000..5c6952c3 --- /dev/null +++ b/core/chains/evm/types/types_test.go @@ -0,0 +1,258 @@ +package types_test + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +var ( + testGethLog1 = &gethTypes.Log{ + Address: common.HexToAddress("0x11111111"), + Topics: []common.Hash{ + common.HexToHash("0xaaaaaaaa"), + common.HexToHash("0xbbbbbbbb"), + }, + Data: []byte{1, 2, 3, 4, 5}, + BlockNumber: 1, + BlockHash: common.HexToHash("0xffffffff"), + TxHash: common.HexToHash("0xcccccccc"), + TxIndex: 100, + Index: 200, + Removed: false, + } + + testGethLog2 = &gethTypes.Log{ + Address: common.HexToAddress("0x11111112"), + Topics: []common.Hash{ + common.HexToHash("0xaaaaaaab"), + common.HexToHash("0xbbbbbbbc"), + }, + Data: []byte{2, 3, 4, 5, 6}, + BlockNumber: 1, + BlockHash: common.HexToHash("0xfffffff0"), + TxHash: common.HexToHash("0xcccccccd"), + TxIndex: 101, + Index: 201, + Removed: true, + } + + testGethReceipt = &gethTypes.Receipt{ + PostState: []byte{1, 2, 3, 4, 5}, + Status: 1, + CumulativeGasUsed: 100, + Bloom: gethTypes.BytesToBloom([]byte{1, 3, 4}), + TxHash: common.HexToHash("0x1020304050"), + ContractAddress: common.HexToAddress("0x1122334455"), + GasUsed: 123, + BlockHash: common.HexToHash("0x11111111111111"), + BlockNumber: big.NewInt(555), + TransactionIndex: 777, + Logs: []*gethTypes.Log{ + testGethLog1, + testGethLog2, + }, + } +) + +func TestFromGethReceipt(t *testing.T) { + t.Parallel() + + receipt := types.FromGethReceipt(testGethReceipt) + + assert.NotNil(t, receipt) + assert.Equal(t, testGethReceipt.PostState, receipt.PostState) + assert.Equal(t, testGethReceipt.Status, receipt.Status) + assert.Equal(t, testGethReceipt.CumulativeGasUsed, receipt.CumulativeGasUsed) + assert.Equal(t, testGethReceipt.Bloom, receipt.Bloom) + assert.Equal(t, testGethReceipt.TxHash, receipt.TxHash) + assert.Equal(t, testGethReceipt.ContractAddress, receipt.ContractAddress) + assert.Equal(t, testGethReceipt.GasUsed, receipt.GasUsed) + assert.Equal(t, testGethReceipt.BlockHash, receipt.BlockHash) + assert.Equal(t, testGethReceipt.BlockNumber, receipt.BlockNumber) + assert.Equal(t, testGethReceipt.TransactionIndex, receipt.TransactionIndex) + assert.Len(t, receipt.Logs, len(testGethReceipt.Logs)) + + for i, log := range receipt.Logs { + expectedLog := testGethReceipt.Logs[i] + assert.Equal(t, expectedLog.Address, log.Address) + assert.Equal(t, expectedLog.Topics, log.Topics) + assert.Equal(t, expectedLog.Data, log.Data) + assert.Equal(t, expectedLog.BlockHash, log.BlockHash) + assert.Equal(t, expectedLog.BlockNumber, log.BlockNumber) + assert.Equal(t, expectedLog.TxHash, log.TxHash) + assert.Equal(t, expectedLog.TxIndex, log.TxIndex) + assert.Equal(t, expectedLog.Index, log.Index) + assert.Equal(t, expectedLog.Removed, log.Removed) + } +} + +func TestReceipt_IsZero(t *testing.T) { + t.Parallel() + + receipt := types.FromGethReceipt(testGethReceipt) + assert.False(t, receipt.IsZero()) + + zeroTxHash := *testGethReceipt + zeroTxHash.TxHash = common.HexToHash("0x0") + receipt = types.FromGethReceipt(&zeroTxHash) + assert.True(t, receipt.IsZero()) +} + +func TestReceipt_IsUnmined(t *testing.T) { + t.Parallel() + + receipt := types.FromGethReceipt(testGethReceipt) + assert.False(t, receipt.IsUnmined()) + + zeroBlockHash := *testGethReceipt + zeroBlockHash.BlockHash = common.HexToHash("0x0") + receipt = types.FromGethReceipt(&zeroBlockHash) + assert.True(t, receipt.IsUnmined()) +} + +func TestReceipt_MarshalUnmarshalJson(t *testing.T) { + t.Parallel() + + receipt := types.FromGethReceipt(testGethReceipt) + json, err := receipt.MarshalJSON() + assert.NoError(t, err) + assert.NotEmpty(t, json) + + parsedReceipt := &types.Receipt{} + err = parsedReceipt.UnmarshalJSON(json) + assert.NoError(t, err) + + assert.Equal(t, receipt, parsedReceipt) +} + +func TestLog_MarshalUnmarshalJson(t *testing.T) { + t.Parallel() + + log := types.FromGethLog(testGethLog1) + json, err := log.MarshalJSON() + assert.NoError(t, err) + assert.NotEmpty(t, json) + + parsedLog := &types.Log{} + err = parsedLog.UnmarshalJSON(json) + assert.NoError(t, err) + + assert.Equal(t, log, parsedLog) +} + +// constraint satisfied by common.Hash and common.Address +type ByteString interface { + Bytes() []byte +} + +type ScannableArrayType interface { + Scan(src any) error +} + +type HexArrayScanTestArgs struct { + b1 []byte + b2 []byte + wrongsize []byte +} + +func testHexArrayScan[T ScannableArrayType](t *testing.T, dest T, args HexArrayScanTestArgs) { + b0 := "NULL" + empty := "{}" + b1, b2, wrongsize := args.b1, args.b2, args.wrongsize + + src1 := fmt.Sprintf("{\"\\\\x%x\"}", b1) + src2 := fmt.Sprintf("{\"\\\\x%x\",\"\\\\x%x\"}", b2, b2) + src3 := fmt.Sprintf("{\"\\\\x%x\"}", wrongsize) + invalid := fmt.Sprintf("{\"\\\\x%x\", NULL}", b1) + d2 := fmt.Sprintf("[1][1]={{\"\\\\x%x\"}}", b1) + + get := func(d T, ind int) (bs ByteString) { + switch val := (ScannableArrayType(dest)).(type) { + case *types.HashArray: + bs = ([]common.Hash(*val))[ind] + case *types.AddressArray: + bs = ([]common.Address(*val))[ind] + } + return bs + } + + length := func(d T) (l int) { + switch val := (ScannableArrayType(dest)).(type) { + case *types.HashArray: + l = len([]common.Hash(*val)) + case *types.AddressArray: + l = len([]common.Address(*val)) + } + return l + } + + err := dest.Scan(b0) + require.Error(t, err) + + err = dest.Scan(empty) + assert.NoError(t, err) + + err = dest.Scan(src1) + require.NoError(t, err) + require.Equal(t, length(dest), 1) + assert.Equal(t, get(dest, 0).Bytes(), b1) + + err = dest.Scan(src2) + require.NoError(t, err) + require.Equal(t, length(dest), 3) + assert.Equal(t, get(dest, 1).Bytes(), b2) + assert.Equal(t, get(dest, 2).Bytes(), b2) + + err = dest.Scan(src3) + require.Error(t, err) + + err = dest.Scan(invalid) + require.Error(t, err) + + err = dest.Scan(d2) + require.Error(t, err) +} + +func Test_AddressArrayScan(t *testing.T) { + t.Parallel() + addr1, err := hex.DecodeString("2ab9a2dc53736b361b72d900cdf9f78f9406fbbb") + require.NoError(t, err) + require.Len(t, addr1, 20) + addr2, err := hex.DecodeString("56b9a2dc53736b361b72d900cdf9f78f9406fbbb") + require.NoError(t, err) + require.Len(t, addr2, 20) + toolong, err := hex.DecodeString("6b361b72d900cdf9f78f9406fbbb6b361b72d900cdf9f78f9406fbbb") + require.NoError(t, err) + require.Len(t, toolong, 28) + + a := types.AddressArray{} + args := HexArrayScanTestArgs{addr1, addr2, toolong} + testHexArrayScan[*types.AddressArray](t, &a, args) +} + +func Test_HashArrayScan(t *testing.T) { + t.Parallel() + + h1, err := hex.DecodeString("2ab9130c6b361b72d900cdf9f78f9406fbbb6b361b72d900cdf9f78f9406fbbb") + require.NoError(t, err) + require.Len(t, h1, 32) + h2, err := hex.DecodeString("56b9a2dc53736b361b72d900cdf9f78f9406fbbb06fbbb6b361b7206fbbb6b36") + require.NoError(t, err) + require.Len(t, h2, 32) + tooshort, err := hex.DecodeString("6b361b72d900cdf9f78f9406fbbb6b361b72d900cdf9f78f9406fbbb") + require.NoError(t, err) + require.Len(t, tooshort, 28) + + h := types.HashArray{} + args := HexArrayScanTestArgs{h1, h2, tooshort} + testHexArrayScan[*types.HashArray](t, &h, args) +} diff --git a/core/chains/evm/types/utils.go b/core/chains/evm/types/utils.go new file mode 100644 index 00000000..96e0a02e --- /dev/null +++ b/core/chains/evm/types/utils.go @@ -0,0 +1,15 @@ +package types + +import ( + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +func MustGetABI(json string) abi.ABI { + abi, err := abi.JSON(strings.NewReader(json)) + if err != nil { + panic("could not parse ABI: " + err.Error()) + } + return abi +} diff --git a/core/chains/evm/utils/big/big.go b/core/chains/evm/utils/big/big.go new file mode 100644 index 00000000..50f74c97 --- /dev/null +++ b/core/chains/evm/utils/big/big.go @@ -0,0 +1,190 @@ +package big + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" + + bigmath "github.com/goplugin/plugin-common/pkg/utils/big_math" + "github.com/goplugin/plugin-common/pkg/utils/hex" + + "github.com/goplugin/plugin-common/pkg/utils/bytes" +) + +const base10 = 10 + +// BigFloat accepts both string and float JSON values. +type BigFloat big.Float + +// MarshalJSON implements the json.Marshaler interface. +func (b BigFloat) MarshalJSON() ([]byte, error) { + var j = big.Float(b) + return json.Marshal(&j) +} + +// UnmarshalJSON implements the json.Unmarshal interface. +func (b *BigFloat) UnmarshalJSON(buf []byte) error { + var n json.Number + if err := json.Unmarshal(buf, &n); err == nil { + f, _, err := new(big.Float).Parse(n.String(), 0) + if err != nil { + return err + } + *b = BigFloat(*f) + return nil + } + var bf big.Float + if err := json.Unmarshal(buf, &bf); err != nil { + return err + } + *b = BigFloat(bf) + return nil +} + +// Value returns the big.Float value. +func (b *BigFloat) Value() *big.Float { + return (*big.Float)(b) +} + +// Big stores large integers and can deserialize a variety of inputs. +type Big big.Int + +// New constructs a Big from *big.Int. +func New(i *big.Int) *Big { + if i != nil { + var b big.Int + b.Set(i) + return (*Big)(&b) + } + return nil +} + +// NewI constructs a Big from int64. +func NewI(i int64) *Big { + return New(big.NewInt(i)) +} + +// MarshalText marshals this instance to base 10 number as string. +func (b Big) MarshalText() ([]byte, error) { + return []byte((*big.Int)(&b).Text(base10)), nil +} + +// MarshalJSON marshals this instance to base 10 number as string. +func (b Big) MarshalJSON() ([]byte, error) { + text, err := b.MarshalText() + if err != nil { + return nil, err + } + return json.Marshal(string(text)) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (b *Big) UnmarshalText(input []byte) error { + input = bytes.TrimQuotes(input) + str := string(input) + if hex.HasPrefix(str) { + decoded, err := hexutil.DecodeBig(str) + if err != nil { + return err + } + *b = Big(*decoded) + return nil + } + + _, ok := b.setString(str, 10) + if !ok { + return fmt.Errorf("unable to convert %s to Big", str) + } + return nil +} + +func (b *Big) setString(s string, base int) (*Big, bool) { + w, ok := (*big.Int)(b).SetString(s, base) + return (*Big)(w), ok +} + +// UnmarshalJSON implements encoding.JSONUnmarshaler. +func (b *Big) UnmarshalJSON(input []byte) error { + return b.UnmarshalText(input) +} + +// Value returns this instance serialized for database storage. +func (b Big) Value() (driver.Value, error) { + return b.String(), nil +} + +// Scan reads the database value and returns an instance. +func (b *Big) Scan(value interface{}) error { + switch v := value.(type) { + case string: + decoded, ok := b.setString(v, 10) + if !ok { + return fmt.Errorf("unable to set string %v of %T to base 10 big.Int for Big", value, value) + } + *b = *decoded + case []uint8: + // The SQL library returns numeric() types as []uint8 of the string representation + decoded, ok := b.setString(string(v), 10) + if !ok { + return fmt.Errorf("unable to set string %v of %T to base 10 big.Int for Big", value, value) + } + *b = *decoded + default: + return fmt.Errorf("unable to convert %v of %T to Big", value, value) + } + + return nil +} + +// ToInt converts b to a big.Int. +func (b *Big) ToInt() *big.Int { + return (*big.Int)(b) +} + +// String returns the base 10 encoding of b. +func (b *Big) String() string { + return b.ToInt().String() +} + +// Bytes returns the absolute value of b as a big-endian byte slice. +func (b *Big) Hex() string { + return hexutil.EncodeBig(b.ToInt()) +} + +// Bytes returns the +func (b *Big) Bytes() []byte { + return b.ToInt().Bytes() +} + +// Cmp compares b and c as big.Ints. +func (b *Big) Cmp(c *Big) int { + return b.ToInt().Cmp(c.ToInt()) +} + +// Equal returns true if c is equal according to Cmp. +func (b *Big) Equal(c *Big) bool { + return b.Cmp(c) == 0 +} + +// Int64 casts b as an int64 type +func (b *Big) Int64() int64 { + return b.ToInt().Int64() +} + +// Add returns the sum of b and c +func (b *Big) Add(c *Big) *Big { + return New(bigmath.Add(b.ToInt(), c.ToInt())) +} + +// Sub returns the differencs between b and c +func (b *Big) Sub(c *Big) *Big { + return New(bigmath.Sub(b.ToInt(), c.ToInt())) +} + +// Sub returns b % c +func (b *Big) Mod(c *Big) *Big { + return New(bigmath.Mod(b.ToInt(), c.ToInt())) +} diff --git a/core/chains/evm/utils/big/big_test.go b/core/chains/evm/utils/big/big_test.go new file mode 100644 index 00000000..c4774cf1 --- /dev/null +++ b/core/chains/evm/utils/big/big_test.go @@ -0,0 +1,251 @@ +package big + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBigFloatMarshal(t *testing.T) { + tests := []struct { + obj BigFloat + exp string + }{ + {BigFloat(*big.NewFloat(1)), `"1"`}, + } + + for _, tc := range tests { + buf, err := json.Marshal(tc.obj) + require.NoError(t, err) + assert.Equal(t, tc.exp, string(buf)) + } +} + +func TestBigFloatUnmarshalFloat64(t *testing.T) { + tests := []struct { + payload string + exp *big.Float + }{ + {"-1", big.NewFloat(-1)}, + {`"-1"`, big.NewFloat(-1)}, + {"100", big.NewFloat(100)}, + {`"100"`, big.NewFloat(100)}, + {"3.146", big.NewFloat(3.146)}, + {`"3.146"`, big.NewFloat(3.146)}, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.payload, func(t *testing.T) { + var b BigFloat + err := json.Unmarshal([]byte(tc.payload), &b) + require.NoError(t, err) + assert.Equal(t, tc.exp.String(), b.Value().String()) + }) + } +} + +func TestBigFloatUnmarshalString(t *testing.T) { + tests := []struct { + payload string + exp *big.Float + }{ + {"-1", big.NewFloat(-1)}, + {"100", big.NewFloat(100)}, + {"3.146", big.NewFloat(3.146)}, + {"1.000000000000000001", decimal.RequireFromString("1.000000000000000001").BigFloat()}, + {"1000000.000000000000000001", decimal.RequireFromString("1000000.000000000000000001").BigFloat()}, + {"1000000000.000000000000000001", decimal.RequireFromString("1000000000.000000000000000001").BigFloat()}, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.payload, func(t *testing.T) { + var b BigFloat + err := json.Unmarshal([]byte(tc.payload), &b) + require.NoError(t, err) + assert.Equal(t, tc.exp.String(), b.Value().String()) + }) + } +} + +func TestBig_UnmarshalText(t *testing.T) { + t.Parallel() + + i := &Big{} + tests := []struct { + name string + input string + want *big.Int + }{ + {"number", `1234`, big.NewInt(1234)}, + {"string", `"1234"`, big.NewInt(1234)}, + {"hex number", `0x1234`, big.NewInt(4660)}, + {"hex string", `"0x1234"`, big.NewInt(4660)}, + {"single quoted", `'1234'`, big.NewInt(1234)}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := i.UnmarshalText([]byte(test.input)) + require.NoError(t, err) + assert.Equal(t, test.want, i.ToInt()) + }) + } +} + +func TestBig_UnmarshalTextErrors(t *testing.T) { + t.Parallel() + + i := &Big{} + tests := []struct { + name string + input string + want *big.Int + }{ + {"quoted word", `"word"`, big.NewInt(0)}, + {"word", `word`, big.NewInt(0)}, + {"empty", ``, big.NewInt(0)}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := i.UnmarshalText([]byte(test.input)) + require.Error(t, err) + }) + } +} + +func TestBig_MarshalJSON(t *testing.T) { + t.Parallel() + + plusOneTo64bit, ok := new(big.Int).SetString("9223372036854775808", 10) + require.True(t, ok) + + tests := []struct { + name string + input *big.Int + want string + }{ + {"zero", big.NewInt(0), `"0"`}, + {"number", big.NewInt(1234), `"1234"`}, + {"big number", plusOneTo64bit, `"9223372036854775808"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i := (*Big)(test.input) + b, err := json.Marshal(&i) + assert.NoError(t, err) + assert.Equal(t, test.want, string(b)) + }) + } +} + +func TestBig_UnMarshalJSON(t *testing.T) { + t.Parallel() + + plusOneTo64bit, ok := new(big.Int).SetString("9223372036854775808", 10) + require.True(t, ok) + + tests := []struct { + name string + input string + want *Big + }{ + {"zero", `"0"`, (*Big)(big.NewInt(0))}, + {"number", `"1234"`, (*Big)(big.NewInt(1234))}, + {"big number", `"9223372036854775808"`, (*Big)(plusOneTo64bit)}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i := new(Big) + err := json.Unmarshal([]byte(test.input), &i) + assert.NoError(t, err) + assert.Equal(t, test.want, i) + }) + } +} + +func TestBig_UnMarshalJSON_errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + }{ + {"empty", `""`}, + {"NaN", `"NaN"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i := new(Big) + err := json.Unmarshal([]byte(test.input), &i) + assert.Error(t, err) + }) + } +} + +func TestBig_Scan(t *testing.T) { + t.Parallel() + + uint256Max, ok := new(big.Int).SetString("115792089237316195423570985008687907853269984665640564039457584007913129639935", 10) + require.True(t, ok) + + tests := []struct { + name string + input interface{} + want *Big + }{ + {"zero string", "0", New(big.NewInt(0))}, + {"one string", "1", New(big.NewInt(1))}, + { + "large string", + "115792089237316195423570985008687907853269984665640564039457584007913129639935", + New(uint256Max), + }, + {"zero as bytes", []uint8{48}, New(big.NewInt(0))}, + {"small number as bytes", []uint8{49, 52}, New(big.NewInt(14))}, + { + "max number as bytes", + []uint8{ + 49, 49, 53, 55, 57, 50, 48, 56, 57, 50, 51, 55, 51, 49, 54, 49, 57, 53, + 52, 50, 51, 53, 55, 48, 57, 56, 53, 48, 48, 56, 54, 56, 55, 57, 48, 55, + 56, 53, 51, 50, 54, 57, 57, 56, 52, 54, 54, 53, 54, 52, 48, 53, 54, 52, + 48, 51, 57, 52, 53, 55, 53, 56, 52, 48, 48, 55, 57, 49, 51, 49, 50, 57, + 54, 51, 57, 57, 51, 53, + }, + New(uint256Max), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + big := &Big{} + err := big.Scan(test.input) + require.NoError(t, err) + assert.Equal(t, test.want, big) + }) + } +} + +func TestBig_ScanErrors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + }{ + {"zero integer", 0}, + {"one integer", 1}, + {"zero wrapped string", `"0"`}, + {"one wrapped string", `"1"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + big := &Big{} + err := big.Scan(test.input) + require.Error(t, err) + }) + } +} diff --git a/core/chains/evm/utils/ethabi.go b/core/chains/evm/utils/ethabi.go new file mode 100644 index 00000000..8a05200f --- /dev/null +++ b/core/chains/evm/utils/ethabi.go @@ -0,0 +1,274 @@ +package utils + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/tidwall/gjson" + + "github.com/goplugin/plugin-common/pkg/utils/hex" +) + +const ( + // FormatBytes encodes the output as bytes + FormatBytes = "bytes" + // FormatPreformatted encodes the output, assumed to be hex, as bytes. + FormatPreformatted = "preformatted" + // FormatUint256 encodes the output as bytes containing a uint256 + FormatUint256 = "uint256" + // FormatInt256 encodes the output as bytes containing an int256 + FormatInt256 = "int256" + // FormatBool encodes the output as bytes containing a bool + FormatBool = "bool" +) + +// ABIEncode is the equivalent of abi.encode. +// See a full set of examples https://github.com/ethereum/go-ethereum/blob/420b78659bef661a83c5c442121b13f13288c09f/accounts/abi/packing_test.go#L31 +func ABIEncode(abiStr string, values ...interface{}) ([]byte, error) { + // Create a dummy method with arguments + inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "inputs": %s}]`, abiStr) + inAbi, err := abi.JSON(strings.NewReader(inDef)) + if err != nil { + return nil, err + } + res, err := inAbi.Pack("method", values...) + if err != nil { + return nil, err + } + return res[4:], nil +} + +// ABIEncode is the equivalent of abi.decode. +// See a full set of examples https://github.com/ethereum/go-ethereum/blob/420b78659bef661a83c5c442121b13f13288c09f/accounts/abi/packing_test.go#L31 +func ABIDecode(abiStr string, data []byte) ([]interface{}, error) { + inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "outputs": %s}]`, abiStr) + inAbi, err := abi.JSON(strings.NewReader(inDef)) + if err != nil { + return nil, err + } + return inAbi.Unpack("method", data) +} + +// ConcatBytes appends a bunch of byte arrays into a single byte array +func ConcatBytes(bufs ...[]byte) []byte { + return bytes.Join(bufs, []byte{}) +} + +func roundToEVMWordBorder(length int) int { + mod := length % EVMWordByteLen + if mod == 0 { + return 0 + } + return EVMWordByteLen - mod +} + +// EVMEncodeBytes encodes arbitrary bytes as bytes expected by the EVM +func EVMEncodeBytes(input []byte) []byte { + length := len(input) + return ConcatBytes( + EVMWordUint64(uint64(length)), + input, + make([]byte, roundToEVMWordBorder(length))) +} + +// EVMTranscodeBool converts a json input to an EVM bool +func EVMTranscodeBool(value gjson.Result) ([]byte, error) { + var output uint64 + + switch value.Type { + case gjson.Number: + if value.Num != 0 { + output = 1 + } + + case gjson.String: + if len(value.Str) > 0 { + output = 1 + } + + case gjson.True: + output = 1 + + case gjson.JSON: + value.ForEach(func(key, value gjson.Result) bool { + output = 1 + return false + }) + + case gjson.False, gjson.Null: + + default: + panic(fmt.Errorf("unreachable/unsupported encoding for value: %s", value.Type)) + } + + return EVMWordUint64(output), nil +} + +func parseDecimalString(input string) (*big.Int, error) { + d, err := decimal.NewFromString(input) + return d.BigInt(), err +} + +func parseNumericString(input string) (*big.Int, error) { + if hex.HasPrefix(input) { + output, ok := big.NewInt(0).SetString(hex.TrimPrefix(input), 16) + if !ok { + return nil, fmt.Errorf("error parsing hex %s", input) + } + return output, nil + } + + output, ok := big.NewInt(0).SetString(input, 10) + if !ok { + return parseDecimalString(input) + } + return output, nil +} + +func parseJSONAsEVMWord(value gjson.Result) (*big.Int, error) { + output := new(big.Int) + + switch value.Type { + case gjson.String: + var err error + output, err = parseNumericString(value.Str) + if err != nil { + return nil, err + } + + case gjson.Number: + output.SetInt64(int64(value.Num)) + + case gjson.Null: + + default: + return nil, fmt.Errorf("unsupported encoding for value: %s", value.Type) + } + + return output, nil +} + +// EVMTranscodeUint256 converts a json input to an EVM uint256 +func EVMTranscodeUint256(value gjson.Result) ([]byte, error) { + output, err := parseJSONAsEVMWord(value) + if err != nil { + return nil, err + } + + if output.Cmp(big.NewInt(0)) < 0 { + return nil, fmt.Errorf("%v cannot be represented as uint256", output) + } + + return EVMWordBigInt(output) +} + +// EVMTranscodeInt256 converts a json input to an EVM int256 +func EVMTranscodeInt256(value gjson.Result) ([]byte, error) { + output, err := parseJSONAsEVMWord(value) + if err != nil { + return nil, err + } + + return EVMWordSignedBigInt(output) +} + +// EVMWordUint64 returns a uint64 as an EVM word byte array. +func EVMWordUint64(val uint64) []byte { + word := make([]byte, EVMWordByteLen) + binary.BigEndian.PutUint64(word[EVMWordByteLen-8:], val) + return word +} + +// EVMWordUint32 returns a uint32 as an EVM word byte array. +func EVMWordUint32(val uint32) []byte { + word := make([]byte, EVMWordByteLen) + binary.BigEndian.PutUint32(word[EVMWordByteLen-4:], val) + return word +} + +// EVMWordUint128 returns a uint128 as an EVM word byte array. +func EVMWordUint128(val *big.Int) ([]byte, error) { + bytes := val.Bytes() + if val.BitLen() > 128 { + return nil, fmt.Errorf("overflow saving uint128 to EVM word: %v", val) + } else if val.Sign() == -1 { + return nil, fmt.Errorf("invalid attempt to save negative value as uint128 to EVM word: %v", val) + } + return common.LeftPadBytes(bytes, EVMWordByteLen), nil +} + +// EVMWordSignedBigInt returns a big.Int as an EVM word byte array, with +// support for a signed representation. Returns error on overflow. +func EVMWordSignedBigInt(val *big.Int) ([]byte, error) { + bytes := val.Bytes() + if val.BitLen() > (8*EVMWordByteLen - 1) { + return nil, fmt.Errorf("overflow saving signed big.Int to EVM word: %v", val) + } + if val.Sign() == -1 { + twosComplement := new(big.Int).Add(val, MaxUint256) + bytes = new(big.Int).Add(twosComplement, big.NewInt(1)).Bytes() + } + return common.LeftPadBytes(bytes, EVMWordByteLen), nil +} + +// EVMWordBigInt returns a big.Int as an EVM word byte array, with support for +// a signed representation. Returns error on overflow. +func EVMWordBigInt(val *big.Int) ([]byte, error) { + if val.Sign() == -1 { + return nil, errors.New("Uint256 cannot be negative") + } + bytes := val.Bytes() + if len(bytes) > EVMWordByteLen { + return nil, fmt.Errorf("overflow saving big.Int to EVM word: %v", val) + } + return common.LeftPadBytes(bytes, EVMWordByteLen), nil +} + +// Bytes32FromString returns a 32 byte array filled from the given string, which may be of any length. +func Bytes32FromString(s string) [32]byte { + var b32 [32]byte + copy(b32[:], s) + return b32 +} + +// Bytes4FromString returns a 4 byte array filled from the given string, which may be of any length. +func Bytes4FromString(s string) [4]byte { + var b4 [4]byte + copy(b4[:], s) + return b4 +} + +func MustAbiType(ts string, components []abi.ArgumentMarshaling) abi.Type { + ty, err := abi.NewType(ts, "", components) + if err != nil { + panic(err) + } + return ty +} + +// "Constants" used by EVM words +var ( + maxUint257 = &big.Int{} + // MaxUint256 represents the largest number represented by an EVM word + MaxUint256 = &big.Int{} + // MaxInt256 represents the largest number represented by an EVM word using + // signed encoding. + MaxInt256 = &big.Int{} + // MinInt256 represents the smallest number represented by an EVM word using + // signed encoding. + MinInt256 = &big.Int{} +) + +func init() { + maxUint257 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), nil) + MaxUint256 = new(big.Int).Sub(maxUint257, big.NewInt(1)) + MaxInt256 = new(big.Int).Div(MaxUint256, big.NewInt(2)) + MinInt256 = new(big.Int).Neg(MaxInt256) +} diff --git a/core/chains/evm/utils/ethabi_test.go b/core/chains/evm/utils/ethabi_test.go new file mode 100644 index 00000000..b6d14460 --- /dev/null +++ b/core/chains/evm/utils/ethabi_test.go @@ -0,0 +1,698 @@ +package utils + +import ( + "encoding/hex" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + commonhex "github.com/goplugin/plugin-common/pkg/utils/hex" +) + +func pow2(arg int64) *big.Int { + return new(big.Int).Exp(big.NewInt(2), big.NewInt(arg), nil) +} + +func TestEVMWordUint64(t *testing.T) { + assert.Equal(t, + hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000001"), + EVMWordUint64(1)) + assert.Equal(t, + hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000100"), + EVMWordUint64(256)) + assert.Equal(t, + hexutil.MustDecode("0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + EVMWordUint64(math.MaxUint64)) +} + +func TestEVMWordUint128(t *testing.T) { + tests := []struct { + name string + val *big.Int + exp string + }{ + { + name: "1", + val: big.NewInt(1), + exp: "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + name: "256", + val: big.NewInt(256), + exp: "0x0000000000000000000000000000000000000000000000000000000000000100", + }, + { + name: "Max Uint 128", + val: new(big.Int).Sub(pow2(128), big.NewInt(1)), + exp: "0x00000000000000000000000000000000ffffffffffffffffffffffffffffffff", + }, + } + for _, test := range tests { + t.Log(test.name) + ret, err := EVMWordUint128(test.val) + assert.Equal(t, hexutil.MustDecode(test.exp), ret) + require.NoError(t, err) + } +} + +func TestEVMWordUint128_Error(t *testing.T) { + tests := []struct { + name string + val *big.Int + }{ + { + name: "Negative number", + val: big.NewInt(-1), + }, + { + name: "Number too large: 128", + val: pow2(128), + }, + } + for _, test := range tests { + t.Log(test.name) + _, err := EVMWordUint128(test.val) + assert.Error(t, err) + } +} + +func TestEVMWordSignedBigInt(t *testing.T) { + val, err := EVMWordSignedBigInt(&big.Int{}) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000000"), val) + + val, err = EVMWordSignedBigInt(new(big.Int).SetInt64(1)) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000001"), val) + + val, err = EVMWordSignedBigInt(new(big.Int).SetInt64(256)) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000100"), val) + + val, err = EVMWordSignedBigInt(new(big.Int).SetInt64(-1)) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), val) + + val, err = EVMWordSignedBigInt(MaxInt256) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), val) + + val, err = EVMWordSignedBigInt(new(big.Int).Add(MaxInt256, big.NewInt(1))) + assert.Error(t, err) + assert.Nil(t, val) +} + +func TestEVMWordBigInt(t *testing.T) { + val, err := EVMWordBigInt(&big.Int{}) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000000"), val) + + val, err = EVMWordBigInt(new(big.Int).SetInt64(1)) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000001"), val) + + val, err = EVMWordBigInt(new(big.Int).SetInt64(256)) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000100"), val) + + val, err = EVMWordBigInt(new(big.Int).SetInt64(-1)) + assert.Error(t, err) + assert.Nil(t, val) + + val, err = EVMWordBigInt(MaxUint256) + assert.NoError(t, err) + assert.Equal(t, hexutil.MustDecode("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), val) + + val, err = EVMWordBigInt(new(big.Int).Add(MaxUint256, big.NewInt(1))) + assert.Error(t, err) + assert.Nil(t, val) +} + +func TestEVMTranscodeBytes(t *testing.T) { + tests := []struct { + name string + input string + output string + }{ + { + "value is string", + `"hello world"`, + "0x" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "68656c6c6f20776f726c64000000000000000000000000000000000000000000", + }, + { + "value is bool true", + `true`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "value is bool false", + `false`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "value is positive integer", + `19`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000013", + }, + { + "value is negative integer", + `-23`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe9", + }, + // NB: The following is undesirable behavior. For more details, please see + // https://www.pivotaltracker.com/n/workspaces/755483 + {"value is a number but not an integer", + `19.99`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000013", + }, + } + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + input := gjson.Parse(test.input) + out, err := EVMTranscodeBytes(input) + assert.NoError(t, err) + assert.Equal(t, test.output, hexutil.Encode(out)) + }) + } +} + +func TestEVMTranscodeBytes_ErrorsOnOverflow(t *testing.T) { + input := gjson.Parse("1e+300") + _, err := EVMTranscodeBytes(input) + assert.Error(t, err) + assert.Contains(t, err.Error(), "overflow saving signed big.Int to EVM word") +} + +func TestEVMTranscodeBytes_UnsupportedEncoding(t *testing.T) { + input := gjson.Parse("{}") + _, err := EVMTranscodeBytes(input) + assert.Error(t, err) +} + +func TestEVMTranscodeBool(t *testing.T) { + tests := []struct { + name string + input gjson.Result + output string + }{ + { + "true", + gjson.Result{Type: gjson.True}, + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "false", + gjson.Result{Type: gjson.False}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "null", + gjson.Result{Type: gjson.Null}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "empty string", + gjson.Result{Type: gjson.String, Str: ""}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "string", + gjson.Result{Type: gjson.String, Str: "hello world"}, + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "zero", + gjson.Result{Type: gjson.Number, Num: 0.0}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "positive integer", + gjson.Result{Type: gjson.Number, Num: 1239812}, + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "empty object", + gjson.Result{Type: gjson.JSON, Raw: "{}"}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "object with keys", + gjson.Result{Type: gjson.JSON, Raw: `{"key": "value"}`}, + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "empty array", + gjson.Result{Type: gjson.JSON, Raw: "[]"}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + }, + { + "array with values", + gjson.Result{Type: gjson.JSON, Raw: `["value"]`}, + "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + + out, err := EVMTranscodeBool(test.input) + assert.NoError(t, err) + assert.Equal(t, test.output, hexutil.Encode(out)) + }) + } +} + +func TestEVMTranscodeUint256(t *testing.T) { + tests := []struct { + name string + input gjson.Result + output string + wantError bool + }{ + { + "true", + gjson.Result{Type: gjson.True}, + "", + true, + }, + { + "false", + gjson.Result{Type: gjson.False}, + "", + true, + }, + { + "null", + gjson.Result{Type: gjson.Null}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + false, + }, + { + "empty string", + gjson.Result{Type: gjson.String, Str: ""}, + "", + true, + }, + { + "string", + gjson.Result{Type: gjson.String, Str: "hello world"}, + "", + true, + }, + { + "string decimal", + gjson.Result{Type: gjson.String, Str: "120"}, + "0x0000000000000000000000000000000000000000000000000000000000000078", + false, + }, + { + "string hex", + gjson.Result{Type: gjson.String, Str: "0xba"}, + "0x00000000000000000000000000000000000000000000000000000000000000ba", + false, + }, + { + "zero", + gjson.Result{Type: gjson.Number, Num: 0.0}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + false, + }, + { + "positive integer", + gjson.Result{Type: gjson.Number, Num: 231}, + "0x00000000000000000000000000000000000000000000000000000000000000e7", + false, + }, + { + "negative integer", + gjson.Result{Type: gjson.Number, Num: -912}, + "", + true, + }, + { + "unsupported encoding", + gjson.Result{Type: gjson.JSON}, + "", + true, + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + + out, err := EVMTranscodeUint256(test.input) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.output, hexutil.Encode(out)) + } + }) + } +} + +func TestEVMTranscodeInt256(t *testing.T) { + tests := []struct { + name string + input gjson.Result + output string + wantError bool + }{ + { + "true", + gjson.Result{Type: gjson.True}, + "", + true, + }, + { + "false", + gjson.Result{Type: gjson.False}, + "", + true, + }, + { + "null", + gjson.Result{Type: gjson.Null}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + false, + }, + { + "empty string", + gjson.Result{Type: gjson.String, Str: ""}, + "", + true, + }, + { + "string", + gjson.Result{Type: gjson.String, Str: "hello world"}, + "", + true, + }, + { + "string decimal", + gjson.Result{Type: gjson.String, Str: "120"}, + "0x0000000000000000000000000000000000000000000000000000000000000078", + false, + }, + { + "string hex", + gjson.Result{Type: gjson.String, Str: "0xba"}, + "0x00000000000000000000000000000000000000000000000000000000000000ba", + false, + }, + { + "zero", + gjson.Result{Type: gjson.Number, Num: 0.0}, + "0x0000000000000000000000000000000000000000000000000000000000000000", + false, + }, + { + "positive integer", + gjson.Result{Type: gjson.Number, Num: 231}, + "0x00000000000000000000000000000000000000000000000000000000000000e7", + false, + }, + { + "negative integer", + gjson.Result{Type: gjson.Number, Num: -912}, + "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc70", + false, + }, + { + "unsupported encoding", + gjson.Result{Type: gjson.JSON}, + "", + true, + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + + out, err := EVMTranscodeInt256(test.input) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.output, hexutil.Encode(out)) + } + }) + } +} + +func TestEVMTranscodeJSONWithFormat(t *testing.T) { + tests := []struct { + name string + format string + input string + output string + }{ + { + "result is string", + FormatBytes, + `{"result": "hello world"}`, + "0x" + + "000000000000000000000000000000000000000000000000000000000000000b" + + "68656c6c6f20776f726c64000000000000000000000000000000000000000000", + }, + { + "result is number", + FormatUint256, + `{"result": 31223}`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "00000000000000000000000000000000000000000000000000000000000079f7", + }, + { + "result is negative number", + FormatInt256, + `{"result": -123481273.1}`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffff8a3d347", + }, + { + "result is true", + FormatBool, + `{"result": true}`, + "0x" + + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000001", + }, + { + "result is preformatted", + FormatPreformatted, + `{"result": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}`, + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + input := gjson.GetBytes([]byte(test.input), "result") + out, err := EVMTranscodeJSONWithFormat(input, test.format) + require.NoError(t, err) + assert.Equal(t, test.output, hexutil.Encode(out)) + }) + } +} + +func TestEVMTranscodeJSONWithFormat_UnsupportedEncoding(t *testing.T) { + _, err := EVMTranscodeJSONWithFormat(gjson.Result{}, "burgh") + assert.Error(t, err) +} + +func TestRoundToEVMWordBorder(t *testing.T) { + assert.Equal(t, 0, roundToEVMWordBorder(0)) + assert.Equal(t, 0, roundToEVMWordBorder(32)) + assert.Equal(t, 31, roundToEVMWordBorder(1)) + assert.Equal(t, 1, roundToEVMWordBorder(31)) +} + +func TestParseNumericString(t *testing.T) { + tests := []struct { + input string + output string + }{ + {"0x0", "0"}, + {"0xfffffffffffffffff", "295147905179352825855"}, + {"1.0", "1"}, + {"0", "0"}, + {"1", "1"}, + {"1.0E+0", "1"}, + } + + for _, test := range tests { + out, err := parseNumericString(test.input) + assert.NoError(t, err) + assert.Equal(t, test.output, out.String()) + } +} + +func TestParseNumericString_InvalidHex(t *testing.T) { + _, err := parseNumericString("0xfZ") + assert.Error(t, err) +} + +func TestParseDecimalString(t *testing.T) { + tests := []struct { + input string + output string + }{ + {"1.0", "1"}, + {"0", "0"}, + {"1", "1"}, + {"1.0E+0", "1"}, + {"1E+0", "1"}, + {"1e+0", "1"}, + {"0.01e+02", "1"}, + {"12072e-4", "1"}, + {"1.2072e+20", "120720000000000000000"}, + {"-1.2072e+20", "-120720000000000000000"}, + {"1.55555555555555555555e+20", "155555555555555555555"}, + {"1.000000000000000001e+18", "1000000000000000001"}, + {"1000000.000000000000000001e+18", "1000000000000000000000001"}, + } + + for _, test := range tests { + out, err := parseDecimalString(test.input) + assert.NoError(t, err) + assert.Equal(t, test.output, out.String()) + } +} + +// EVMTranscodeJSONWithFormat given a JSON input and a format specifier, encode the +// value for use by the EVM +func EVMTranscodeJSONWithFormat(value gjson.Result, format string) ([]byte, error) { + switch format { + case FormatBytes: + return EVMTranscodeBytes(value) + case FormatPreformatted: + return hex.DecodeString(commonhex.TrimPrefix(value.Str)) + case FormatUint256: + data, err := EVMTranscodeUint256(value) + if err != nil { + return []byte{}, err + } + return EVMEncodeBytes(data), nil + + case FormatInt256: + data, err := EVMTranscodeInt256(value) + if err != nil { + return []byte{}, err + } + return EVMEncodeBytes(data), nil + + case FormatBool: + data, err := EVMTranscodeBool(value) + if err != nil { + return []byte{}, err + } + return EVMEncodeBytes(data), nil + + default: + return []byte{}, fmt.Errorf("unsupported format: %s", format) + } +} + +// EVMTranscodeBytes converts a json input to an EVM bytes array +func EVMTranscodeBytes(value gjson.Result) ([]byte, error) { + switch value.Type { + case gjson.String: + return EVMEncodeBytes([]byte(value.Str)), nil + + case gjson.False: + return EVMEncodeBytes(EVMWordUint64(0)), nil + + case gjson.True: + return EVMEncodeBytes(EVMWordUint64(1)), nil + + case gjson.Number: + v := big.NewFloat(value.Num) // precision limited to float64 + vInt, _ := v.Int(nil) + word, err := EVMWordSignedBigInt(vInt) + if err != nil { + return nil, errors.Wrap(err, "while converting float to int256") + } + return EVMEncodeBytes(word), nil + default: + return []byte{}, fmt.Errorf("unsupported encoding for value: %s", value.Type) + } +} + +func TestABIEncodeDecode(t *testing.T) { + // Note this is just a sanity check test, + // ABIEncode/ABIDecode is a thin wrapper around the geth abi library + // which has its own exhaustive test suite. + var tt = []struct { + abiStr string + vals []interface{} + name string + expectErr bool + }{ + { + abiStr: `[{ "type": "bool" }]`, + vals: []interface{}{true}, + name: "single value", + }, + { + abiStr: `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]`, + vals: []interface{}{struct { + Int1 *big.Int `json:"int1"` + Int2 *big.Int `json:"int2"` + }{big.NewInt(10), big.NewInt(12)}}, + name: "struct", + }, + { + abiStr: `[{ "type": "bool" }, {"type": "uint256"}]`, + vals: []interface{}{true, big.NewInt(10)}, + name: "multiple values", + }, + { + abiStr: `[{ "type": "bool" }, {"type": "uint256"}]`, + vals: []interface{}{big.NewInt(1), big.NewInt(10)}, + name: "mismatch", + expectErr: true, + }, + } + for _, tc := range tt { + // Round trip should remain the same. + tc := tc + t.Run(tc.name, func(t *testing.T) { + abiBytes, err := ABIEncode(tc.abiStr, tc.vals...) + if tc.expectErr { + t.Log(err) + require.Error(t, err) + return + } + require.NoError(t, err) + res, err := ABIDecode(tc.abiStr, abiBytes) + require.NoError(t, err) + assert.Equal(t, tc.vals, res) + }) + } +} diff --git a/core/chains/evm/utils/utils.go b/core/chains/evm/utils/utils.go new file mode 100644 index 00000000..197d8a5d --- /dev/null +++ b/core/chains/evm/utils/utils.go @@ -0,0 +1,263 @@ +package utils + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jpillora/backoff" + "golang.org/x/crypto/sha3" + + "github.com/goplugin/plugin-common/pkg/utils/hex" +) + +// EVMWordByteLen the length of an EVM Word Byte +const EVMWordByteLen = 32 + +// ZeroAddress is an address of all zeroes, otherwise in Ethereum as +// 0x0000000000000000000000000000000000000000 +var ZeroAddress = common.Address{} + +// EmptyHash is a hash of all zeroes, otherwise in Ethereum as +// 0x0000000000000000000000000000000000000000000000000000000000000000 +var EmptyHash = common.Hash{} + +func RandomAddress() common.Address { + b := make([]byte, 20) + _, _ = rand.Read(b) // Assignment for errcheck. Only used in tests so we can ignore. + return common.BytesToAddress(b) +} + +func RandomHash() common.Hash { + b := make([]byte, 32) + _, _ = rand.Read(b) // Assignment for errcheck. Only used in tests so we can ignore. + return common.BytesToHash(b) +} + +// IsEmptyAddress checks that the address is empty, synonymous with the zero +// account/address. No logs can come from this address, as there is no contract +// present there. +// +// See https://stackoverflow.com/questions/48219716/what-is-address0-in-solidity +// for the more info on the zero address. +func IsEmptyAddress(addr common.Address) bool { + return addr == ZeroAddress +} + +func RandomBytes32() (r [32]byte) { + b := make([]byte, 32) + _, _ = rand.Read(b[:]) // Assignment for errcheck. Only used in tests so we can ignore. + copy(r[:], b) + return +} + +func Bytes32ToSlice(a [32]byte) (r []byte) { + r = append(r, a[:]...) + return +} + +// Uint256ToBytes is x represented as the bytes of a uint256 +func Uint256ToBytes(x *big.Int) (uint256 []byte, err error) { + if x.Cmp(MaxUint256) > 0 { + return nil, fmt.Errorf("too large to convert to uint256") + } + uint256 = common.LeftPadBytes(x.Bytes(), EVMWordByteLen) + if x.Cmp(big.NewInt(0).SetBytes(uint256)) != 0 { + panic("failed to round-trip uint256 back to source big.Int") + } + return uint256, err +} + +// NewHash return random Keccak256 +func NewHash() common.Hash { + b := make([]byte, 32) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + return common.BytesToHash(b) +} + +// PadByteToHash returns a hash with zeros padded on the left of the given byte. +func PadByteToHash(b byte) common.Hash { + var h [32]byte + h[31] = b + return h +} + +// Uint256ToBytes32 returns the bytes32 encoding of the big int provided +func Uint256ToBytes32(n *big.Int) []byte { + if n.BitLen() > 256 { + panic("vrf.uint256ToBytes32: too big to marshal to uint256") + } + return common.LeftPadBytes(n.Bytes(), 32) +} + +// MustHash returns the keccak256 hash, or panics on failure. +func MustHash(in string) common.Hash { + out, err := Keccak256([]byte(in)) + if err != nil { + panic(err) + } + return common.BytesToHash(out) +} + +// HexToUint256 returns the uint256 represented by s, or an error if it doesn't +// represent one. +func HexToUint256(s string) (*big.Int, error) { + rawNum, err := hexutil.Decode(s) + if err != nil { + return nil, fmt.Errorf("error while parsing %s as hex: %w", s, err) + } + rv := big.NewInt(0).SetBytes(rawNum) // can't be negative number + if err := CheckUint256(rv); err != nil { + return nil, err + } + return rv, nil +} + +var zero = big.NewInt(0) + +// CheckUint256 returns an error if n is out of bounds for a uint256 +func CheckUint256(n *big.Int) error { + if n.Cmp(zero) < 0 || n.Cmp(MaxUint256) >= 0 { + return fmt.Errorf("number out of range for uint256") + } + return nil +} + +// Keccak256 is a simplified interface for the legacy SHA3 implementation that +// Ethereum uses. +func Keccak256(in []byte) ([]byte, error) { + hash := sha3.NewLegacyKeccak256() + _, err := hash.Write(in) + return hash.Sum(nil), err +} + +func Keccak256Fixed(in []byte) [32]byte { + hash := sha3.NewLegacyKeccak256() + // Note this Keccak256 cannot error https://github.com/golang/crypto/blob/master/sha3/sha3.go#L126 + // if we start supporting hashing algos which do, we can change this API to include an error. + hash.Write(in) + var h [32]byte + copy(h[:], hash.Sum(nil)) + return h +} + +// EIP55CapitalizedAddress returns true iff possibleAddressString has the correct +// capitalization for an Ethereum address, per EIP 55 +func EIP55CapitalizedAddress(possibleAddressString string) bool { + possibleAddressString = hex.EnsurePrefix(possibleAddressString) + EIP55Capitalized := common.HexToAddress(possibleAddressString).Hex() + return possibleAddressString == EIP55Capitalized +} + +// ParseEthereumAddress returns addressString as a go-ethereum Address, or an +// error if it's invalid, e.g. if EIP 55 capitalization check fails +func ParseEthereumAddress(addressString string) (common.Address, error) { + if !common.IsHexAddress(addressString) { + return common.Address{}, fmt.Errorf( + "not a valid Ethereum address: %s", addressString) + } + address := common.HexToAddress(addressString) + if !EIP55CapitalizedAddress(addressString) { + return common.Address{}, fmt.Errorf( + "%s treated as Ethereum address, but it has an invalid capitalization! "+ + "The correctly-capitalized address would be %s, but "+ + "check carefully before copying and pasting! ", + addressString, address.Hex()) + } + return address, nil +} + +// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to +// unreachable network endpoints +func NewRedialBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } + +} + +// RetryWithBackoff retries the sleeper and backs off if not Done +func RetryWithBackoff(ctx context.Context, fn func() (retry bool)) { + sleeper := NewBackoffSleeper() + sleeper.Reset() + for { + retry := fn() + if !retry { + return + } + + select { + case <-ctx.Done(): + return + case <-time.After(sleeper.After()): + continue + } + } +} + +// NewBackoffSleeper returns a BackoffSleeper that is configured to +// sleep for 0 seconds initially, then backs off from 1 second minimum +// to 10 seconds maximum. +func NewBackoffSleeper() *BackoffSleeper { + return &BackoffSleeper{ + Backoff: backoff.Backoff{ + Min: 1 * time.Second, + Max: 10 * time.Second, + }, + } +} + +// BackoffSleeper is a sleeper that backs off on subsequent attempts. +type BackoffSleeper struct { + backoff.Backoff + beenRun atomic.Bool +} + +// Sleep waits for the given duration, incrementing the back off. +func (bs *BackoffSleeper) Sleep() { + if bs.beenRun.CompareAndSwap(false, true) { + return + } + time.Sleep(bs.Backoff.Duration()) +} + +// After returns the duration for the next stop, and increments the backoff. +func (bs *BackoffSleeper) After() time.Duration { + if bs.beenRun.CompareAndSwap(false, true) { + return 0 + } + return bs.Backoff.Duration() +} + +// Duration returns the current duration value. +func (bs *BackoffSleeper) Duration() time.Duration { + if !bs.beenRun.Load() { + return 0 + } + return bs.ForAttempt(bs.Attempt()) +} + +// Reset resets the backoff intervals. +func (bs *BackoffSleeper) Reset() { + bs.beenRun.Store(false) + bs.Backoff.Reset() +} + +// RandUint256 generates a random bigNum up to 2 ** 256 - 1 +func RandUint256() *big.Int { + n, err := rand.Int(rand.Reader, MaxUint256) + if err != nil { + panic(err) + } + return n +} diff --git a/core/chains/evm/utils/utils_test.go b/core/chains/evm/utils/utils_test.go new file mode 100644 index 00000000..d403df79 --- /dev/null +++ b/core/chains/evm/utils/utils_test.go @@ -0,0 +1,231 @@ +package utils_test + +import ( + "context" + "math/big" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestKeccak256(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + want string + }{ + {"basic", "0xf00b", "0x2433bb36d5f9b14e4fea87c2d32d79abfe34e56808b891e471f4400fca2a336c"}, + {"long input", "0xf00b2433bb36d5f9b14e4fea87c2d32d79abfe34e56808b891e471f4400fca2a336c", "0x6b917c56ad7bea7d09132b9e1e29bb5d9aa7d32d067c638dfa886bbbf6874cdf"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + input, err := hexutil.Decode(test.input) + assert.NoError(t, err) + result, err := utils.Keccak256(input) + assert.NoError(t, err) + + assert.Equal(t, test.want, hexutil.Encode(result)) + }) + } +} + +func TestUtils_IsEmptyAddress(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + addr common.Address + want bool + }{ + {"zero address", common.Address{}, true}, + {"non-zero address", testutils.NewAddress(), false}, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + actual := utils.IsEmptyAddress(test.addr) + assert.Equal(t, test.want, actual) + }) + } +} + +// From https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md#test-cases +var testAddresses = []string{ + "0x52908400098527886E0F7030069857D2E4169EE7", + "0x8617E340B3D01FA5F11F306F4090FD50E238070D", + "0xde709f2102306220921060314715629080e2fb77", + "0x27b1fdb04752bbc536007a920d24acb045561c26", + "0x5aAeb6053F3E94C9b9A09f33669435E7Ef1BeAed", + "0xfB6916095ca1df60bB79Ce92cE3Ea74c37c5d359", + "0xdbF03B407c01E7cD3CBea99509d93f8DDDC8C6FB", + "0xD1220A0cf47c7B9Be7A2E6BA89F429762e7b9aDb", +} + +func TestClient_EIP55CapitalizedAddress(t *testing.T) { + t.Parallel() + + valid := utils.EIP55CapitalizedAddress + for _, address := range testAddresses { + assert.True(t, valid(address)) + assert.False(t, valid(strings.ToLower(address)) && + valid(strings.ToUpper(address))) + } +} + +func TestClient_ParseEthereumAddress(t *testing.T) { + t.Parallel() + + parse := utils.ParseEthereumAddress + for _, address := range testAddresses { + a1, err := parse(address) + assert.NoError(t, err) + no0xPrefix := address[2:] + a2, err := parse(no0xPrefix) + assert.NoError(t, err) + assert.True(t, a1 == a2) + _, lowerErr := parse(strings.ToLower(address)) + _, upperErr := parse(strings.ToUpper(address)) + shouldBeError := multierr.Combine(lowerErr, upperErr) + assert.Error(t, shouldBeError) + assert.True(t, strings.Contains(shouldBeError.Error(), no0xPrefix)) + } + _, notHexErr := parse("0xCeci n'est pas une chaîne hexadécimale") + assert.Error(t, notHexErr) + _, tooLongErr := parse("0x0123456789abcdef0123456789abcdef0123456789abcdef") + assert.Error(t, tooLongErr) +} + +func TestUint256ToBytes(t *testing.T) { + t.Parallel() + + v := big.NewInt(0).Sub(utils.MaxUint256, big.NewInt(1)) + uint256, err := utils.Uint256ToBytes(v) + assert.NoError(t, err) + + b32 := utils.Uint256ToBytes32(v) + assert.Equal(t, uint256, b32) + + large := big.NewInt(0).Add(utils.MaxUint256, big.NewInt(1)) + _, err = utils.Uint256ToBytes(large) + assert.Error(t, err, "too large to convert to uint256") + + negative := big.NewInt(-1) + assert.Panics(t, func() { + _, _ = utils.Uint256ToBytes(negative) + }, "failed to round-trip uint256 back to source big.Int") +} + +func TestCheckUint256(t *testing.T) { + t.Parallel() + + large := big.NewInt(0).Add(utils.MaxUint256, big.NewInt(1)) + err := utils.CheckUint256(large) + assert.Error(t, err, "number out of range for uint256") + + negative := big.NewInt(-123) + err = utils.CheckUint256(negative) + assert.Error(t, err, "number out of range for uint256") + + err = utils.CheckUint256(big.NewInt(123)) + assert.NoError(t, err) +} + +func TestRandUint256(t *testing.T) { + t.Parallel() + + for i := 0; i < 1000; i++ { + uint256 := utils.RandUint256() + assert.NoError(t, utils.CheckUint256(uint256)) + } +} + +func TestHexToUint256(t *testing.T) { + t.Parallel() + + b, err := utils.HexToUint256("0x00") + assert.NoError(t, err) + assert.Zero(t, b.Cmp(big.NewInt(0))) + + b, err = utils.HexToUint256("0xFFFFFFFF") + assert.NoError(t, err) + assert.Zero(t, b.Cmp(big.NewInt(4294967295))) +} + +func TestNewHash(t *testing.T) { + t.Parallel() + + h1 := utils.NewHash() + h2 := utils.NewHash() + assert.NotEqual(t, h1, h2) + assert.NotEqual(t, h1, common.HexToHash("0x0")) + assert.NotEqual(t, h2, common.HexToHash("0x0")) +} + +func TestPadByteToHash(t *testing.T) { + t.Parallel() + + h := utils.PadByteToHash(1) + assert.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000001", h.String()) +} + +func TestUtils_BackoffSleeper(t *testing.T) { + t.Parallel() + + bs := utils.NewBackoffSleeper() + assert.Equal(t, time.Duration(0), bs.Duration(), "should initially return immediately") + bs.Sleep() + + d := 1 * time.Nanosecond + bs.Min = d + bs.Factor = 2 + assert.Equal(t, d, bs.Duration()) + bs.Sleep() + + d2 := 2 * time.Nanosecond + assert.Equal(t, d2, bs.Duration()) + + bs.Reset() + assert.Equal(t, time.Duration(0), bs.Duration(), "should initially return immediately") +} + +func TestRetryWithBackoff(t *testing.T) { + t.Parallel() + + var counter atomic.Int32 + ctx, cancel := context.WithCancel(testutils.Context(t)) + + utils.RetryWithBackoff(ctx, func() bool { + return false + }) + + retry := func() bool { + return counter.Add(1) < 3 + } + + go utils.RetryWithBackoff(ctx, retry) + + assert.Eventually(t, func() bool { + return counter.Load() == 3 + }, testutils.WaitTimeout(t), testutils.TestInterval) + + cancel() + + utils.RetryWithBackoff(ctx, retry) + assert.Equal(t, int32(4), counter.Load()) +} diff --git a/core/chains/legacyevm/chain.go b/core/chains/legacyevm/chain.go new file mode 100644 index 00000000..7a3012e7 --- /dev/null +++ b/core/chains/legacyevm/chain.go @@ -0,0 +1,492 @@ +package legacyevm + +import ( + "context" + "errors" + "fmt" + "math/big" + "net/url" + "time" + + gotoml "github.com/pelletier/go-toml/v2" + "go.uber.org/multierr" + + "github.com/jmoiron/sqlx" + + common "github.com/goplugin/plugin-common/pkg/chains" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + commonclient "github.com/goplugin/pluginv3.0/v2/common/client" + commonconfig "github.com/goplugin/pluginv3.0/v2/common/config" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/monitor" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +//go:generate mockery --quiet --name Chain --output ./mocks/ --case=underscore +type Chain interface { + types.ChainService + + ID() *big.Int + Client() evmclient.Client + Config() evmconfig.ChainScopedConfig + LogBroadcaster() log.Broadcaster + HeadBroadcaster() httypes.HeadBroadcaster + TxManager() txmgr.TxManager + HeadTracker() httypes.HeadTracker + Logger() logger.Logger + BalanceMonitor() monitor.BalanceMonitor + LogPoller() logpoller.LogPoller + GasEstimator() gas.EvmFeeEstimator +} + +var ( + _ Chain = &chain{} + nilBigInt *big.Int + emptyString string +) + +// LegacyChains implements [LegacyChainContainer] +type LegacyChains struct { + *chains.ChainsKV[Chain] + + cfgs toml.EVMConfigs +} + +// LegacyChainContainer is container for EVM chains. +// +//go:generate mockery --quiet --name LegacyChainContainer --output ./mocks/ --case=underscore +type LegacyChainContainer interface { + Get(id string) (Chain, error) + Len() int + List(ids ...string) ([]Chain, error) + Slice() []Chain + + // BCF-2516: this is only used for EVMORM. When we delete that + // we can promote/move the needed funcs from it to LegacyChainContainer + // so instead of EVMORM().XYZ() we'd have something like legacyChains.XYZ() + ChainNodeConfigs() evmtypes.Configs +} + +var _ LegacyChainContainer = &LegacyChains{} + +func NewLegacyChains(m map[string]Chain, evmCfgs toml.EVMConfigs) *LegacyChains { + return &LegacyChains{ + ChainsKV: chains.NewChainsKV[Chain](m), + cfgs: evmCfgs, + } +} + +func (c *LegacyChains) ChainNodeConfigs() evmtypes.Configs { + return c.cfgs +} + +// backward compatibility. +// eth keys are represented as multiple types in the code base; +// *big.Int, string, and int64. +// +// TODO BCF-2507 unify the type system +func (c *LegacyChains) Get(id string) (Chain, error) { + if id == nilBigInt.String() || id == emptyString { + return nil, fmt.Errorf("invalid chain id requested: %q", id) + } + return c.ChainsKV.Get(id) +} + +type chain struct { + services.StateMachine + id *big.Int + cfg *evmconfig.ChainScoped + client evmclient.Client + txm txmgr.TxManager + logger logger.Logger + headBroadcaster httypes.HeadBroadcaster + headTracker httypes.HeadTracker + logBroadcaster log.Broadcaster + logPoller logpoller.LogPoller + balanceMonitor monitor.BalanceMonitor + keyStore keystore.Eth + gasEstimator gas.EvmFeeEstimator +} + +type errChainDisabled struct { + ChainID *ubig.Big +} + +func (e errChainDisabled) Error() string { + return fmt.Sprintf("cannot create new chain with ID %s, the chain is disabled", e.ChainID.String()) +} + +// TODO BCF-2509 what is this and does it need the entire app config? +type AppConfig interface { + config.AppConfig + toml.HasEVMConfigs +} + +type ChainRelayExtenderConfig struct { + Logger logger.Logger + KeyStore keystore.Eth + ChainOpts +} + +func (c ChainRelayExtenderConfig) Validate() error { + err := c.ChainOpts.Validate() + if c.Logger == nil { + err = errors.Join(err, errors.New("nil Logger")) + } + if c.KeyStore == nil { + err = errors.Join(err, errors.New("nil Keystore")) + } + + if err != nil { + err = fmt.Errorf("invalid ChainRelayerExtenderConfig: %w", err) + } + return err +} + +type ChainOpts struct { + AppConfig AppConfig + + MailMon *mailbox.Monitor + GasEstimator gas.EvmFeeEstimator + + *sqlx.DB + + // TODO BCF-2513 remove test code from the API + // Gen-functions are useful for dependency injection by tests + GenEthClient func(*big.Int) client.Client + GenLogBroadcaster func(*big.Int) log.Broadcaster + GenLogPoller func(*big.Int) logpoller.LogPoller + GenHeadTracker func(*big.Int, httypes.HeadBroadcaster) httypes.HeadTracker + GenTxManager func(*big.Int) txmgr.TxManager + GenGasEstimator func(*big.Int) gas.EvmFeeEstimator +} + +func (o ChainOpts) Validate() error { + var err error + if o.AppConfig == nil { + err = errors.Join(err, errors.New("nil AppConfig")) + } + + if o.MailMon == nil { + err = errors.Join(err, errors.New("nil MailMon")) + } + if o.DB == nil { + err = errors.Join(err, errors.New("nil DB")) + } + if err != nil { + err = fmt.Errorf("invalid ChainOpts: %w", err) + } + return err +} + +func NewTOMLChain(ctx context.Context, chain *toml.EVMConfig, opts ChainRelayExtenderConfig) (Chain, error) { + err := opts.Validate() + if err != nil { + return nil, err + } + chainID := chain.ChainID + l := opts.Logger.With("evmChainID", chainID.String()) + if !chain.IsEnabled() { + return nil, errChainDisabled{ChainID: chainID} + } + cfg := evmconfig.NewTOMLChainScopedConfig(opts.AppConfig, chain, l) + // note: per-chain validation is not necessary at this point since everything is checked earlier on boot. + return newChain(ctx, cfg, chain.Nodes, opts) +} + +func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Node, opts ChainRelayExtenderConfig) (*chain, error) { + chainID, chainType := cfg.EVM().ChainID(), cfg.EVM().ChainType() + l := opts.Logger + var client evmclient.Client + if !cfg.EVMRPCEnabled() { + client = evmclient.NewNullClient(chainID, l) + } else if opts.GenEthClient == nil { + client = newEthClientFromCfg(cfg.EVM().NodePool(), cfg.EVM().NodeNoNewHeadsThreshold(), l, chainID, chainType, nodes) + } else { + client = opts.GenEthClient(chainID) + } + + db := opts.DB + headBroadcaster := headtracker.NewHeadBroadcaster(l) + headSaver := headtracker.NullSaver + var headTracker httypes.HeadTracker + if !cfg.EVMRPCEnabled() { + headTracker = headtracker.NullTracker + } else if opts.GenHeadTracker == nil { + orm := headtracker.NewORM(db, l, cfg.Database(), *chainID) + headSaver = headtracker.NewHeadSaver(l, orm, cfg.EVM(), cfg.EVM().HeadTracker()) + headTracker = headtracker.NewHeadTracker(l, client, cfg.EVM(), cfg.EVM().HeadTracker(), headBroadcaster, headSaver, opts.MailMon) + } else { + headTracker = opts.GenHeadTracker(chainID, headBroadcaster) + } + + logPoller := logpoller.LogPollerDisabled + if cfg.Feature().LogPoller() { + if opts.GenLogPoller != nil { + logPoller = opts.GenLogPoller(chainID) + } else { + logPoller = logpoller.NewLogPoller( + logpoller.NewObservedORM(chainID, db, l, cfg.Database()), + client, + l, + cfg.EVM().LogPollInterval(), + cfg.EVM().FinalityTagEnabled(), + int64(cfg.EVM().FinalityDepth()), + int64(cfg.EVM().LogBackfillBatchSize()), + int64(cfg.EVM().RPCDefaultBatchSize()), + int64(cfg.EVM().LogKeepBlocksDepth())) + } + } + + // note: gas estimator is started as a part of the txm + txm, gasEstimator, err := newEvmTxm(db, cfg.EVM(), cfg.EVMRPCEnabled(), cfg.Database(), cfg.Database().Listener(), client, l, logPoller, opts) + if err != nil { + return nil, fmt.Errorf("failed to instantiate EvmTxm for chain with ID %s: %w", chainID.String(), err) + } + + headBroadcaster.Subscribe(txm) + + // Highest seen head height is used as part of the start of LogBroadcaster backfill range + highestSeenHead, err := headSaver.LatestHeadFromDB(ctx) + if err != nil { + return nil, err + } + + var balanceMonitor monitor.BalanceMonitor + if cfg.EVMRPCEnabled() && cfg.EVM().BalanceMonitor().Enabled() { + balanceMonitor = monitor.NewBalanceMonitor(client, opts.KeyStore, l) + headBroadcaster.Subscribe(balanceMonitor) + } + + var logBroadcaster log.Broadcaster + if !cfg.EVMRPCEnabled() { + logBroadcaster = &log.NullBroadcaster{ErrMsg: fmt.Sprintf("Ethereum is disabled for chain %d", chainID)} + } else if opts.GenLogBroadcaster == nil { + logORM := log.NewORM(db, l, cfg.Database(), *chainID) + logBroadcaster = log.NewBroadcaster(logORM, client, cfg.EVM(), l, highestSeenHead, opts.MailMon) + } else { + logBroadcaster = opts.GenLogBroadcaster(chainID) + } + + // AddDependent for this chain + // log broadcaster will not start until dependent ready is called by a + // subsequent routine (job spawner) + logBroadcaster.AddDependents(1) + + headBroadcaster.Subscribe(logBroadcaster) + + return &chain{ + id: chainID, + cfg: cfg, + client: client, + txm: txm, + logger: l, + headBroadcaster: headBroadcaster, + headTracker: headTracker, + logBroadcaster: logBroadcaster, + logPoller: logPoller, + balanceMonitor: balanceMonitor, + keyStore: opts.KeyStore, + gasEstimator: gasEstimator, + }, nil +} + +func (c *chain) Start(ctx context.Context) error { + return c.StartOnce("Chain", func() error { + c.logger.Debugf("Chain: starting with ID %s", c.ID().String()) + // Must ensure that EthClient is dialed first because subsequent + // services may make eth calls on startup + if err := c.client.Dial(ctx); err != nil { + return fmt.Errorf("failed to dial ethclient: %w", err) + } + // Services should be able to handle a non-functional eth client and + // not block start in this case, instead retrying in a background loop + // until it becomes available. + // + // We do not start the log poller here, it gets + // started after the jobs so they have a chance to apply their filters. + var ms services.MultiStart + if err := ms.Start(ctx, c.txm, c.headBroadcaster, c.headTracker, c.logBroadcaster); err != nil { + return err + } + if c.balanceMonitor != nil { + if err := ms.Start(ctx, c.balanceMonitor); err != nil { + return err + } + } + + return nil + }) +} + +func (c *chain) Close() error { + return c.StopOnce("Chain", func() (merr error) { + c.logger.Debug("Chain: stopping") + + if c.balanceMonitor != nil { + c.logger.Debug("Chain: stopping balance monitor") + merr = c.balanceMonitor.Close() + } + c.logger.Debug("Chain: stopping logBroadcaster") + merr = multierr.Combine(merr, c.logBroadcaster.Close()) + c.logger.Debug("Chain: stopping headTracker") + merr = multierr.Combine(merr, c.headTracker.Close()) + c.logger.Debug("Chain: stopping headBroadcaster") + merr = multierr.Combine(merr, c.headBroadcaster.Close()) + c.logger.Debug("Chain: stopping evmTxm") + merr = multierr.Combine(merr, c.txm.Close()) + c.logger.Debug("Chain: stopping client") + c.client.Close() + c.logger.Debug("Chain: stopped") + return merr + }) +} + +func (c *chain) Ready() (merr error) { + merr = multierr.Combine( + c.StateMachine.Ready(), + c.txm.Ready(), + c.headBroadcaster.Ready(), + c.headTracker.Ready(), + c.logBroadcaster.Ready(), + ) + if c.balanceMonitor != nil { + merr = multierr.Combine(merr, c.balanceMonitor.Ready()) + } + return +} + +func (c *chain) Name() string { + return c.logger.Name() +} + +func (c *chain) HealthReport() map[string]error { + report := map[string]error{c.Name(): c.Healthy()} + services.CopyHealth(report, c.txm.HealthReport()) + services.CopyHealth(report, c.headBroadcaster.HealthReport()) + services.CopyHealth(report, c.headTracker.HealthReport()) + services.CopyHealth(report, c.logBroadcaster.HealthReport()) + + if c.balanceMonitor != nil { + services.CopyHealth(report, c.balanceMonitor.HealthReport()) + } + + return report +} + +func (c *chain) Transact(ctx context.Context, from, to string, amount *big.Int, balanceCheck bool) error { + return chains.ErrLOOPPUnsupported +} + +func (c *chain) SendTx(ctx context.Context, from, to string, amount *big.Int, balanceCheck bool) error { + return c.Transact(ctx, from, to, amount, balanceCheck) +} + +func (c *chain) GetChainStatus(ctx context.Context) (types.ChainStatus, error) { + toml, err := c.cfg.EVM().TOMLString() + if err != nil { + return types.ChainStatus{}, err + } + return types.ChainStatus{ + ID: c.ID().String(), + Enabled: c.cfg.EVM().IsEnabled(), + Config: toml, + }, nil +} + +// TODO BCF-2602 statuses are static for non-evm chain and should be dynamic +func (c *chain) listNodeStatuses(start, end int) ([]types.NodeStatus, int, error) { + nodes := c.cfg.Nodes() + total := len(nodes) + if start >= total { + return nil, total, common.ErrOutOfRange + } + if end > total { + end = total + } + stats := make([]types.NodeStatus, 0) + + states := c.Client().NodeStates() + for _, n := range nodes[start:end] { + var ( + nodeState string + exists bool + ) + toml, err := gotoml.Marshal(n) + if err != nil { + return nil, -1, err + } + if states == nil { + nodeState = "Unknown" + } else { + nodeState, exists = states[*n.Name] + if !exists { + // The node is in the DB and the chain is enabled but it's not running + nodeState = "NotLoaded" + } + } + stats = append(stats, types.NodeStatus{ + ChainID: c.ID().String(), + Name: *n.Name, + Config: string(toml), + State: nodeState, + }) + } + return stats, total, nil +} + +func (c *chain) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) (stats []types.NodeStatus, nextPageToken string, total int, err error) { + return common.ListNodeStatuses(int(pageSize), pageToken, c.listNodeStatuses) +} + +func (c *chain) ID() *big.Int { return c.id } +func (c *chain) Client() evmclient.Client { return c.client } +func (c *chain) Config() evmconfig.ChainScopedConfig { return c.cfg } +func (c *chain) LogBroadcaster() log.Broadcaster { return c.logBroadcaster } +func (c *chain) LogPoller() logpoller.LogPoller { return c.logPoller } +func (c *chain) HeadBroadcaster() httypes.HeadBroadcaster { return c.headBroadcaster } +func (c *chain) TxManager() txmgr.TxManager { return c.txm } +func (c *chain) HeadTracker() httypes.HeadTracker { return c.headTracker } +func (c *chain) Logger() logger.Logger { return c.logger } +func (c *chain) BalanceMonitor() monitor.BalanceMonitor { return c.balanceMonitor } +func (c *chain) GasEstimator() gas.EvmFeeEstimator { return c.gasEstimator } + +func newEthClientFromCfg(cfg evmconfig.NodePool, noNewHeadsThreshold time.Duration, lggr logger.Logger, chainID *big.Int, chainType commonconfig.ChainType, nodes []*toml.Node) evmclient.Client { + var empty url.URL + var primaries []commonclient.Node[*big.Int, *evmtypes.Head, evmclient.RPCCLient] + var sendonlys []commonclient.SendOnlyNode[*big.Int, evmclient.RPCCLient] + for i, node := range nodes { + if node.SendOnly != nil && *node.SendOnly { + rpc := evmclient.NewRPCClient(lggr, empty, (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, + commonclient.Secondary) + sendonly := commonclient.NewSendOnlyNode[*big.Int, evmclient.RPCCLient](lggr, (url.URL)(*node.HTTPURL), + *node.Name, chainID, rpc) + sendonlys = append(sendonlys, sendonly) + } else { + rpc := evmclient.NewRPCClient(lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), + chainID, commonclient.Primary) + primaryNode := commonclient.NewNode[*big.Int, *evmtypes.Head, evmclient.RPCCLient](cfg, noNewHeadsThreshold, + lggr, (url.URL)(*node.WSURL), (*url.URL)(node.HTTPURL), *node.Name, int32(i), chainID, *node.Order, + rpc, "EVM") + primaries = append(primaries, primaryNode) + } + } + return evmclient.NewChainClient(lggr, cfg.SelectionMode(), cfg.LeaseDuration(), noNewHeadsThreshold, primaries, sendonlys, chainID, chainType) +} diff --git a/core/chains/legacyevm/chain_test.go b/core/chains/legacyevm/chain_test.go new file mode 100644 index 00000000..5b029daa --- /dev/null +++ b/core/chains/legacyevm/chain_test.go @@ -0,0 +1,75 @@ +package legacyevm_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +func TestLegacyChains(t *testing.T) { + legacyevmCfg := configtest.NewGeneralConfig(t, nil) + + c := mocks.NewChain(t) + c.On("ID").Return(big.NewInt(7)) + m := map[string]legacyevm.Chain{c.ID().String(): c} + + l := legacyevm.NewLegacyChains(m, legacyevmCfg.EVMConfigs()) + assert.NotNil(t, l.ChainNodeConfigs()) + got, err := l.Get(c.ID().String()) + assert.NoError(t, err) + assert.Equal(t, c, got) + +} + +func TestChainOpts_Validate(t *testing.T) { + type fields struct { + AppConfig legacyevm.AppConfig + MailMon *mailbox.Monitor + DB *sqlx.DB + } + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "valid", + fields: fields{ + AppConfig: configtest.NewTestGeneralConfig(t), + MailMon: &mailbox.Monitor{}, + DB: pgtest.NewSqlxDB(t), + }, + }, + { + name: "invalid", + fields: fields{ + AppConfig: nil, + MailMon: nil, + DB: nil, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := legacyevm.ChainOpts{ + AppConfig: tt.fields.AppConfig, + MailMon: tt.fields.MailMon, + DB: tt.fields.DB, + } + if err := o.Validate(); (err != nil) != tt.wantErr { + t.Errorf("ChainOpts.Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/core/chains/legacyevm/evm_txm.go b/core/chains/legacyevm/evm_txm.go new file mode 100644 index 00000000..71e588cb --- /dev/null +++ b/core/chains/legacyevm/evm_txm.go @@ -0,0 +1,69 @@ +package legacyevm + +import ( + "fmt" + + "github.com/jmoiron/sqlx" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func newEvmTxm( + db *sqlx.DB, + cfg evmconfig.EVM, + evmRPCEnabled bool, + databaseConfig txmgr.DatabaseConfig, + listenerConfig txmgr.ListenerConfig, + client evmclient.Client, + lggr logger.Logger, + logPoller logpoller.LogPoller, + opts ChainRelayExtenderConfig, +) (txm txmgr.TxManager, + estimator gas.EvmFeeEstimator, + err error, +) { + chainID := cfg.ChainID() + if !evmRPCEnabled { + txm = &txmgr.NullTxManager{ErrMsg: fmt.Sprintf("Ethereum is disabled for chain %d", chainID)} + return txm, nil, nil + } + + lggr = lggr.Named("Txm") + lggr.Infow("Initializing EVM transaction manager", + "bumpTxDepth", cfg.GasEstimator().BumpTxDepth(), + "maxInFlightTransactions", cfg.Transactions().MaxInFlight(), + "maxQueuedTransactions", cfg.Transactions().MaxQueued(), + "nonceAutoSync", cfg.NonceAutoSync(), + "limitDefault", cfg.GasEstimator().LimitDefault(), + ) + + // build estimator from factory + if opts.GenGasEstimator == nil { + estimator = gas.NewEstimator(lggr, client, cfg, cfg.GasEstimator()) + } else { + estimator = opts.GenGasEstimator(chainID) + } + + if opts.GenTxManager == nil { + txm, err = txmgr.NewTxm( + db, + cfg, + txmgr.NewEvmTxmFeeConfig(cfg.GasEstimator()), + cfg.Transactions(), + databaseConfig, + listenerConfig, + client, + lggr, + logPoller, + opts.KeyStore, + estimator) + } else { + txm = opts.GenTxManager(chainID) + } + return +} diff --git a/core/chains/legacyevm/mocks/chain.go b/core/chains/legacyevm/mocks/chain.go new file mode 100644 index 00000000..6fe9debe --- /dev/null +++ b/core/chains/legacyevm/mocks/chain.go @@ -0,0 +1,455 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + client "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + + commontypes "github.com/goplugin/pluginv3.0/v2/common/types" + + config "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + + context "context" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + gas "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + + log "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + + logger "github.com/goplugin/pluginv3.0/v2/core/logger" + + logpoller "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + + mock "github.com/stretchr/testify/mock" + + monitor "github.com/goplugin/pluginv3.0/v2/core/chains/evm/monitor" + + txmgr "github.com/goplugin/pluginv3.0/v2/common/txmgr" + + types "github.com/goplugin/plugin-common/pkg/types" +) + +// Chain is an autogenerated mock type for the Chain type +type Chain struct { + mock.Mock +} + +// BalanceMonitor provides a mock function with given fields: +func (_m *Chain) BalanceMonitor() monitor.BalanceMonitor { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BalanceMonitor") + } + + var r0 monitor.BalanceMonitor + if rf, ok := ret.Get(0).(func() monitor.BalanceMonitor); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(monitor.BalanceMonitor) + } + } + + return r0 +} + +// Client provides a mock function with given fields: +func (_m *Chain) Client() client.Client { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Client") + } + + var r0 client.Client + if rf, ok := ret.Get(0).(func() client.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Client) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *Chain) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Config provides a mock function with given fields: +func (_m *Chain) Config() config.ChainScopedConfig { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Config") + } + + var r0 config.ChainScopedConfig + if rf, ok := ret.Get(0).(func() config.ChainScopedConfig); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.ChainScopedConfig) + } + } + + return r0 +} + +// GasEstimator provides a mock function with given fields: +func (_m *Chain) GasEstimator() gas.EvmFeeEstimator { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GasEstimator") + } + + var r0 gas.EvmFeeEstimator + if rf, ok := ret.Get(0).(func() gas.EvmFeeEstimator); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(gas.EvmFeeEstimator) + } + } + + return r0 +} + +// GetChainStatus provides a mock function with given fields: ctx +func (_m *Chain) GetChainStatus(ctx context.Context) (types.ChainStatus, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetChainStatus") + } + + var r0 types.ChainStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (types.ChainStatus, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) types.ChainStatus); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(types.ChainStatus) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeadBroadcaster provides a mock function with given fields: +func (_m *Chain) HeadBroadcaster() commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HeadBroadcaster") + } + + var r0 commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash] + if rf, ok := ret.Get(0).(func() commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(commontypes.HeadBroadcaster[*evmtypes.Head, common.Hash]) + } + } + + return r0 +} + +// HeadTracker provides a mock function with given fields: +func (_m *Chain) HeadTracker() commontypes.HeadTracker[*evmtypes.Head, common.Hash] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HeadTracker") + } + + var r0 commontypes.HeadTracker[*evmtypes.Head, common.Hash] + if rf, ok := ret.Get(0).(func() commontypes.HeadTracker[*evmtypes.Head, common.Hash]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(commontypes.HeadTracker[*evmtypes.Head, common.Hash]) + } + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *Chain) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// ID provides a mock function with given fields: +func (_m *Chain) ID() *big.Int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 *big.Int + if rf, ok := ret.Get(0).(func() *big.Int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + return r0 +} + +// ListNodeStatuses provides a mock function with given fields: ctx, pageSize, pageToken +func (_m *Chain) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) ([]types.NodeStatus, string, int, error) { + ret := _m.Called(ctx, pageSize, pageToken) + + if len(ret) == 0 { + panic("no return value specified for ListNodeStatuses") + } + + var r0 []types.NodeStatus + var r1 string + var r2 int + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, int32, string) ([]types.NodeStatus, string, int, error)); ok { + return rf(ctx, pageSize, pageToken) + } + if rf, ok := ret.Get(0).(func(context.Context, int32, string) []types.NodeStatus); ok { + r0 = rf(ctx, pageSize, pageToken) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.NodeStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int32, string) string); ok { + r1 = rf(ctx, pageSize, pageToken) + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func(context.Context, int32, string) int); ok { + r2 = rf(ctx, pageSize, pageToken) + } else { + r2 = ret.Get(2).(int) + } + + if rf, ok := ret.Get(3).(func(context.Context, int32, string) error); ok { + r3 = rf(ctx, pageSize, pageToken) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// LogBroadcaster provides a mock function with given fields: +func (_m *Chain) LogBroadcaster() log.Broadcaster { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LogBroadcaster") + } + + var r0 log.Broadcaster + if rf, ok := ret.Get(0).(func() log.Broadcaster); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(log.Broadcaster) + } + } + + return r0 +} + +// LogPoller provides a mock function with given fields: +func (_m *Chain) LogPoller() logpoller.LogPoller { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LogPoller") + } + + var r0 logpoller.LogPoller + if rf, ok := ret.Get(0).(func() logpoller.LogPoller); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logpoller.LogPoller) + } + } + + return r0 +} + +// Logger provides a mock function with given fields: +func (_m *Chain) Logger() logger.Logger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Logger") + } + + var r0 logger.Logger + if rf, ok := ret.Get(0).(func() logger.Logger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logger.Logger) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *Chain) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *Chain) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Chain) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Transact provides a mock function with given fields: ctx, from, to, amount, balanceCheck +func (_m *Chain) Transact(ctx context.Context, from string, to string, amount *big.Int, balanceCheck bool) error { + ret := _m.Called(ctx, from, to, amount, balanceCheck) + + if len(ret) == 0 { + panic("no return value specified for Transact") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *big.Int, bool) error); ok { + r0 = rf(ctx, from, to, amount, balanceCheck) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TxManager provides a mock function with given fields: +func (_m *Chain) TxManager() txmgr.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TxManager") + } + + var r0 txmgr.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] + if rf, ok := ret.Get(0).(func() txmgr.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(txmgr.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee]) + } + } + + return r0 +} + +// NewChain creates a new instance of Chain. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChain(t interface { + mock.TestingT + Cleanup(func()) +}) *Chain { + mock := &Chain{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/chains/legacyevm/mocks/legacy_chain_container.go b/core/chains/legacyevm/mocks/legacy_chain_container.go new file mode 100644 index 00000000..47be4a8d --- /dev/null +++ b/core/chains/legacyevm/mocks/legacy_chain_container.go @@ -0,0 +1,153 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + legacyevm "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// LegacyChainContainer is an autogenerated mock type for the LegacyChainContainer type +type LegacyChainContainer struct { + mock.Mock +} + +// ChainNodeConfigs provides a mock function with given fields: +func (_m *LegacyChainContainer) ChainNodeConfigs() types.Configs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainNodeConfigs") + } + + var r0 types.Configs + if rf, ok := ret.Get(0).(func() types.Configs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Configs) + } + } + + return r0 +} + +// Get provides a mock function with given fields: id +func (_m *LegacyChainContainer) Get(id string) (legacyevm.Chain, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 legacyevm.Chain + var r1 error + if rf, ok := ret.Get(0).(func(string) (legacyevm.Chain, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) legacyevm.Chain); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(legacyevm.Chain) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Len provides a mock function with given fields: +func (_m *LegacyChainContainer) Len() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Len") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// List provides a mock function with given fields: ids +func (_m *LegacyChainContainer) List(ids ...string) ([]legacyevm.Chain, error) { + _va := make([]interface{}, len(ids)) + for _i := range ids { + _va[_i] = ids[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 []legacyevm.Chain + var r1 error + if rf, ok := ret.Get(0).(func(...string) ([]legacyevm.Chain, error)); ok { + return rf(ids...) + } + if rf, ok := ret.Get(0).(func(...string) []legacyevm.Chain); ok { + r0 = rf(ids...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]legacyevm.Chain) + } + } + + if rf, ok := ret.Get(1).(func(...string) error); ok { + r1 = rf(ids...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Slice provides a mock function with given fields: +func (_m *LegacyChainContainer) Slice() []legacyevm.Chain { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Slice") + } + + var r0 []legacyevm.Chain + if rf, ok := ret.Get(0).(func() []legacyevm.Chain); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]legacyevm.Chain) + } + } + + return r0 +} + +// NewLegacyChainContainer creates a new instance of LegacyChainContainer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLegacyChainContainer(t interface { + mock.TestingT + Cleanup(func()) +}) *LegacyChainContainer { + mock := &LegacyChainContainer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/cmd/admin_commands.go b/core/cmd/admin_commands.go new file mode 100644 index 00000000..551aa27a --- /dev/null +++ b/core/cmd/admin_commands.go @@ -0,0 +1,414 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/manyminds/api2go/jsonapi" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initAdminSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "chpass", + Usage: "Change your API password remotely", + Action: s.ChangePassword, + }, + { + Name: "login", + Usage: "Login to remote client by creating a session cookie", + Action: s.RemoteLogin, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "file, f", + Usage: "text file holding the API email and password needed to create a session cookie", + }, + cli.BoolFlag{ + Name: "bypass-version-check", + Usage: "Bypass versioning check for compatibility of remote node", + }, + }, + }, + { + Name: "logout", + Usage: "Delete any local sessions", + Action: s.Logout, + }, + { + Name: "profile", + Usage: "Collects profile metrics from the node.", + Action: s.Profile, + Flags: []cli.Flag{ + cli.Uint64Flag{ + Name: "seconds, s", + Usage: "duration of profile capture", + Value: 8, + }, + cli.StringFlag{ + Name: "output_dir, o", + Usage: "output directory of the captured profile", + Value: "/tmp/", + }, + }, + }, + { + Name: "status", + Usage: "Displays the health of various services running inside the node.", + Action: s.Status, + Flags: []cli.Flag{}, + }, + { + Name: "users", + Usage: "Create, edit permissions, or delete API users", + Subcommands: cli.Commands{ + { + Name: "list", + Usage: "Lists all API users and their roles", + Action: s.ListUsers, + }, + { + Name: "create", + Usage: "Create a new API user", + Action: s.CreateUser, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "email", + Usage: "Email of new user to create", + Required: true, + }, + cli.StringFlag{ + Name: "role", + Usage: "Permission level of new user. Options: 'admin', 'edit', 'run', 'view'.", + Required: true, + }, + }, + }, + { + Name: "chrole", + Usage: "Changes an API user's role", + Action: s.ChangeRole, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "email", + Usage: "email of user to be edited", + Required: true, + }, + cli.StringFlag{ + Name: "new-role, newrole", + Usage: "new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'.", + Required: true, + }, + }, + }, + { + Name: "delete", + Usage: "Delete an API user", + Action: s.DeleteUser, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "email", + Usage: "Email of API user to delete", + Required: true, + }, + }, + }, + }, + }, + } +} + +type AdminUsersPresenter struct { + JAID + presenters.UserResource +} + +var adminUsersTableHeaders = []string{"Email", "Role", "Has API token", "Created at", "Updated at"} + +func (p *AdminUsersPresenter) ToRow() []string { + row := []string{ + p.ID, + string(p.Role), + p.HasActiveApiToken, + p.CreatedAt.String(), + p.UpdatedAt.String(), + } + return row +} + +// RenderTable implements TableRenderer +func (p *AdminUsersPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{p.ToRow()} + + renderList(adminUsersTableHeaders, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +type AdminUsersPresenters []AdminUsersPresenter + +// RenderTable implements TableRenderer +func (ps AdminUsersPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("Users\n")); err != nil { + return err + } + renderList(adminUsersTableHeaders, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +// ListUsers renders all API users and their roles +func (s *Shell) ListUsers(_ *cli.Context) (err error) { + resp, err := s.HTTP.Get(s.ctx(), "/v2/users/", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &AdminUsersPresenters{}) +} + +// CreateUser creates a new user by prompting for email, password, and role +func (s *Shell) CreateUser(c *cli.Context) (err error) { + resp, err := s.HTTP.Get(s.ctx(), "/v2/users/", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + var links jsonapi.Links + var users AdminUsersPresenters + if err = s.deserializeAPIResponse(resp, &users, &links); err != nil { + return s.errorOut(err) + } + for _, user := range users { + if strings.EqualFold(user.Email, c.String("email")) { + return s.errorOut(fmt.Errorf("user with email %s already exists", user.Email)) + } + } + + fmt.Println("Password of new user:") + pwd := s.PasswordPrompter.Prompt() + + request := struct { + Email string `json:"email"` + Role string `json:"role"` + Password string `json:"password"` + }{ + Email: c.String("email"), + Role: c.String("role"), + Password: pwd, + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + response, err := s.HTTP.Post(s.ctx(), "/v2/users", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := response.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully created new API user") +} + +// ChangeRole can change a user's role +func (s *Shell) ChangeRole(c *cli.Context) (err error) { + request := struct { + Email string `json:"email"` + NewRole string `json:"newRole"` + }{ + Email: c.String("email"), + NewRole: c.String("new-role"), + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + response, err := s.HTTP.Patch(s.ctx(), "/v2/users", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := response.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully updated API user") +} + +// DeleteUser deletes an API user by email +func (s *Shell) DeleteUser(c *cli.Context) (err error) { + email := c.String("email") + if email == "" { + return s.errorOut(errors.New("email flag is empty, must specify an email")) + } + + response, err := s.HTTP.Delete(s.ctx(), fmt.Sprintf("/v2/users/%s", email)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := response.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(response, &AdminUsersPresenter{}, "Successfully deleted API user") +} + +// Status will display the health of various services +func (s *Shell) Status(c *cli.Context) error { + resp, err := s.HTTP.Get(s.ctx(), "/health?full=1", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &HealthCheckPresenters{}) +} + +// Profile will collect pprof metrics and store them in a folder. +func (s *Shell) Profile(c *cli.Context) error { + ctx := s.ctx() + seconds := c.Uint("seconds") + baseDir := c.String("output_dir") + + genDir := filepath.Join(baseDir, fmt.Sprintf("debuginfo-%s", time.Now().Format(time.RFC3339))) + + if err := os.Mkdir(genDir, 0o755); err != nil { + return s.errorOut(err) + } + var wgPprof sync.WaitGroup + vitals := []string{ + "allocs", // A sampling of all past memory allocations + "block", // Stack traces that led to blocking on synchronization primitives + "cmdline", // The command line invocation of the current program + "goroutine", // Stack traces of all current goroutines + "heap", // A sampling of memory allocations of live objects. + "mutex", // Stack traces of holders of contended mutexes + "profile", // CPU profile. + "threadcreate", // Stack traces that led to the creation of new OS threads + "trace", // A trace of execution of the current program. + } + wgPprof.Add(len(vitals)) + s.Logger.Infof("Collecting profiles: %v", vitals) + s.Logger.Infof("writing debug info to %s", genDir) + + errs := make(chan error, len(vitals)) + for _, vt := range vitals { + go func(vt string) { + defer wgPprof.Done() + uri := fmt.Sprintf("/v2/debug/pprof/%s?seconds=%d", vt, seconds) + resp, err := s.HTTP.Get(ctx, uri) + if err != nil { + errs <- fmt.Errorf("error collecting %s: %w", vt, err) + return + } + defer func() { + if resp.Body != nil { + resp.Body.Close() + } + }() + if resp.StatusCode == http.StatusUnauthorized { + errs <- fmt.Errorf("error collecting %s: %w", vt, errUnauthorized) + return + } + if resp.StatusCode == http.StatusBadRequest { + // best effort to interpret the underlying problem + pprofVersion := resp.Header.Get("X-Go-Pprof") + if pprofVersion == "1" { + b, err2 := io.ReadAll(resp.Body) + if err2 != nil { + errs <- fmt.Errorf("error collecting %s: %w", vt, err2) + return + } + respContent := string(b) + // taken from pprof.Profile https://github.com/golang/go/blob/release-branch.go1.20/src/net/http/pprof/pprof.go#L133 + if strings.Contains(respContent, "profile duration exceeds server's WriteTimeout") { + errs <- fmt.Errorf("%w: %s", ErrProfileTooLong, respContent) + } else { + errs <- fmt.Errorf("error collecting %s: %w: %s", vt, errBadRequest, respContent) + } + } else { + errs <- fmt.Errorf("error collecting %s: %w", vt, errBadRequest) + } + return + } + // write to file + f, err := os.Create(filepath.Join(genDir, vt)) + if err != nil { + errs <- fmt.Errorf("error creating file for %s: %w", vt, err) + return + } + wc := utils.NewDeferableWriteCloser(f) + defer wc.Close() + + _, err = io.Copy(wc, resp.Body) + if err != nil { + errs <- fmt.Errorf("error writing to file for %s: %w", vt, err) + return + } + err = wc.Close() + if err != nil { + errs <- fmt.Errorf("error closing file for %s: %w", vt, err) + return + } + }(vt) + } + wgPprof.Wait() + close(errs) + // Atmost one err is emitted per vital. + s.Logger.Infof("collected %d/%d profiles", len(vitals)-len(errs), len(vitals)) + if len(errs) > 0 { + var merr error + for err := range errs { + merr = errors.Join(merr, err) + } + return s.errorOut(fmt.Errorf("profile collection failed:\n%v", merr)) + } + return nil +} diff --git a/core/cmd/admin_commands_test.go b/core/cmd/admin_commands_test.go new file mode 100644 index 00000000..0fde3f13 --- /dev/null +++ b/core/cmd/admin_commands_test.go @@ -0,0 +1,189 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestShell_CreateUser(t *testing.T) { + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + client.PasswordPrompter = cltest.MockPasswordPrompter{ + Password: cltest.Password, + } + + tests := []struct { + name string + email string + role string + err string + }{ + {"Invalid request", "//", "", "parseResponse error"}, + {"No params", "", "", "Invalid role"}, + {"No email", "", "view", "Must enter an email"}, + {"User exists", cltest.APIEmailAdmin, "admin", fmt.Sprintf(`user with email %s already exists`, cltest.APIEmailAdmin)}, + {"Valid params", cltest.MustRandomUser(t).Email, "view", ""}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.CreateUser, set, "") + + require.NoError(t, set.Set("email", test.email)) + require.NoError(t, set.Set("role", test.role)) + + c := cli.NewContext(nil, set, nil) + if test.err != "" { + assert.ErrorContains(t, client.CreateUser(c), test.err) + } else { + assert.NoError(t, client.CreateUser(c)) + } + }) + } +} + +func TestShell_ChangeRole(t *testing.T) { + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + tests := []struct { + name string + email string + role string + err string + }{ + {"Invalid request", "//", "", "parseResponse error"}, + {"No params", "", "", "must specify an email"}, + {"No email", "", "view", "must specify an email"}, + {"No role", user.Email, "", "must specify a new role"}, + {"Unknown role", user.Email, "foo", "new role does not exist"}, + {"Unknown user", cltest.MustRandomUser(t).Email, "admin", "error updating API user"}, + {"Valid params", user.Email, "view", ""}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ChangeRole, set, "") + + require.NoError(t, set.Set("email", test.email)) + require.NoError(t, set.Set("new-role", test.role)) + c := cli.NewContext(nil, set, nil) + if test.err != "" { + assert.ErrorContains(t, client.ChangeRole(c), test.err) + } else { + assert.NoError(t, client.ChangeRole(c)) + } + }) + } +} + +func TestShell_DeleteUser(t *testing.T) { + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + user := cltest.MustRandomUser(t) + require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user)) + + tests := []struct { + name string + email string + err string + }{ + {"Invalid request", "//", "parseResponse error"}, + {"No email", "", "must specify an email"}, + {"Unknown email", "foo", "specified user not found"}, + {"Valid params", user.Email, ""}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteUser, set, "") + + require.NoError(t, set.Set("email", test.email)) + c := cli.NewContext(nil, set, nil) + if test.err != "" { + assert.ErrorContains(t, client.DeleteUser(c), test.err) + } else { + assert.NoError(t, client.DeleteUser(c)) + } + }) + } +} + +func TestShell_ListUsers(t *testing.T) { + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ListUsers, set, "") + c := cli.NewContext(nil, set, nil) + + buffer := bytes.NewBufferString("") + client.Renderer = cmd.RendererTable{Writer: buffer} + + assert.NoError(t, client.ListUsers(c), user.Email) + + output := buffer.String() + assert.Contains(t, output, user.Email) + assert.Contains(t, output, user.Role) + assert.Contains(t, output, user.TokenKey.String) + assert.Contains(t, output, user.CreatedAt.String()) + assert.Contains(t, output, user.UpdatedAt.String()) +} + +func TestAdminUsersPresenter_RenderTable(t *testing.T) { + user := sessions.User{ + Email: "foo@bar.com", + Role: "admin", + CreatedAt: time.Now(), + TokenKey: null.StringFrom("tokenKey"), + UpdatedAt: time.Now().Add(time.Duration(rand.Intn(10000)) * time.Second), + } + + presenter := cmd.AdminUsersPresenter{ + JAID: cmd.JAID{ID: user.Email}, + UserResource: presenters.UserResource{ + JAID: presenters.JAID{ID: user.Email}, + Email: user.Email, + Role: user.Role, + HasActiveApiToken: user.TokenKey.String, + CreatedAt: user.CreatedAt, + UpdatedAt: user.UpdatedAt, + }, + } + + buffer := bytes.NewBufferString("") + r := cmd.RendererTable{Writer: buffer} + + require.NoError(t, presenter.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, user.Email) + assert.Contains(t, output, user.Role) + assert.Contains(t, output, user.TokenKey.String) + assert.Contains(t, output, user.CreatedAt.String()) + assert.Contains(t, output, user.UpdatedAt.String()) +} diff --git a/core/cmd/app.go b/core/cmd/app.go new file mode 100644 index 00000000..17e9efcf --- /dev/null +++ b/core/cmd/app.go @@ -0,0 +1,329 @@ +package cmd + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + "regexp" + + "github.com/pkg/errors" + "github.com/urfave/cli" + + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func removeHidden(cmds ...cli.Command) []cli.Command { + var ret []cli.Command + for _, cmd := range cmds { + if cmd.Hidden { + continue + } + ret = append(ret, cmd) + } + return ret +} + +// NewApp returns the command-line parser/function-router for the given client +func NewApp(s *Shell) *cli.App { + app := cli.NewApp() + app.Usage = "CLI for Plugin" + app.Version = fmt.Sprintf("%v@%v", static.Version, static.Sha) + // TOML + var opts plugin.GeneralConfigOpts + + app.Flags = []cli.Flag{ + cli.BoolFlag{ + Name: "json, j", + Usage: "json output as opposed to table", + }, + cli.StringFlag{ + Name: "admin-credentials-file", + Usage: fmt.Sprintf("optional, applies only in client mode when making remote API calls. If provided, `FILE` containing admin credentials will be used for logging in, allowing to avoid an additional login step. If `FILE` is missing, it will be ignored. Defaults to %s", filepath.Join("", "apicredentials")), + }, + cli.StringFlag{ + Name: "remote-node-url", + Usage: "optional, applies only in client mode when making remote API calls. If provided, `URL` will be used as the remote Plugin API endpoint", + Value: "http://localhost:6688", + }, + cli.BoolFlag{ + Name: "insecure-skip-verify", + Usage: "optional, applies only in client mode when making remote API calls. If turned on, SSL certificate verification will be disabled. This is mostly useful for people who want to use Plugin with a self-signed TLS certificate", + }, + cli.StringSliceFlag{ + Name: "config, c", + Usage: "TOML configuration file(s) via flag, or raw TOML via env var. If used, legacy env vars must not be set. Multiple files can be used (-c configA.toml -c configB.toml), and they are applied in order with duplicated fields overriding any earlier values. If the 'CL_CONFIG' env var is specified, it is always processed last with the effect of being the final override. [$CL_CONFIG]", + // Note: we cannot use the EnvVar field since it will combine with the flags. + Hidden: true, + }, + cli.StringSliceFlag{ + Name: "secrets, s", + Usage: "TOML configuration file for secrets. Must be set if and only if config is set. Multiple files can be used (-s secretsA.toml -s secretsB.toml), and they are applied in order. No overrides are allowed.", + Hidden: true, + }, + } + app.Before = func(c *cli.Context) error { + s.configFiles = c.StringSlice("config") + s.configFilesIsSet = c.IsSet("config") + s.secretsFiles = c.StringSlice("secrets") + s.secretsFileIsSet = c.IsSet("secrets") + + // Default to using a stdout logger only. + // This is overidden for server commands which may start a rotating + // logger instead. + lggr, closeFn := logger.NewLogger() + + cfg, err := opts.New() + if err != nil { + return err + } + + s.Logger = lggr + s.CloseLogger = closeFn + s.Config = cfg + + if c.Bool("json") { + s.Renderer = RendererJSON{Writer: os.Stdout} + } + + cookieJar, err := NewUserCache("cookies", func() logger.Logger { return s.Logger }) + if err != nil { + return fmt.Errorf("error initialize plugin cookie cache: %w", err) + } + + urlStr := c.String("remote-node-url") + remoteNodeURL, err := url.Parse(urlStr) + if err != nil { + return errors.Wrapf(err, "%s is not a valid URL", urlStr) + } + + insecureSkipVerify := c.Bool("insecure-skip-verify") + clientOpts := ClientOpts{RemoteNodeURL: *remoteNodeURL, InsecureSkipVerify: insecureSkipVerify} + cookieAuth := NewSessionCookieAuthenticator(clientOpts, DiskCookieStore{Config: cookieJar}, s.Logger) + sessionRequestBuilder := NewFileSessionRequestBuilder(s.Logger) + + credentialsFile := c.String("admin-credentials-file") + sr, err := sessionRequestBuilder.Build(credentialsFile) + if err != nil && !errors.Is(errors.Cause(err), ErrNoCredentialFile) && !os.IsNotExist(err) { + return errors.Wrapf(err, "failed to load API credentials from file %s", credentialsFile) + } + + s.HTTP = NewAuthenticatedHTTPClient(s.Logger, clientOpts, cookieAuth, sr) + s.CookieAuthenticator = cookieAuth + s.FileSessionRequestBuilder = sessionRequestBuilder + + // Allow for initServerConfig to be called if the flag is provided. + if c.Bool("applyInitServerConfig") { + cfg, err = initServerConfig(&opts, s.configFiles, s.secretsFiles) + if err != nil { + return err + } + s.Config = cfg + } + + return nil + + } + app.After = func(c *cli.Context) error { + if s.CloseLogger != nil { + return s.CloseLogger() + } + return nil + } + app.Commands = removeHidden([]cli.Command{ + { + Name: "admin", + Usage: "Commands for remotely taking admin related actions", + Subcommands: initAdminSubCmds(s), + }, + { + Name: "attempts", + Aliases: []string{"txas"}, + Usage: "Commands for managing Ethereum Transaction Attempts", + Subcommands: initAttemptsSubCmds(s), + }, + { + Name: "blocks", + Aliases: []string{}, + Usage: "Commands for managing blocks", + Subcommands: initBlocksSubCmds(s), + }, + { + Name: "bridges", + Usage: "Commands for Bridges communicating with External Adapters", + Subcommands: initBrideSubCmds(s), + }, + { + Name: "config", + Usage: "Commands for the node's configuration", + Subcommands: initRemoteConfigSubCmds(s), + }, + { + Name: "health", + Usage: "Prints a health report", + Action: s.Health, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "json, j", + Usage: "json output", + }, + }, + }, + { + Name: "jobs", + Usage: "Commands for managing Jobs", + Subcommands: initJobsSubCmds(s), + }, + { + Name: "keys", + Usage: "Commands for managing various types of keys used by the Plugin node", + Subcommands: []cli.Command{ + // TODO unify init vs keysCommand + // out of scope for initial refactor because it breaks usage messages. + initEthKeysSubCmd(s), + initP2PKeysSubCmd(s), + initCSAKeysSubCmd(s), + initOCRKeysSubCmd(s), + initOCR2KeysSubCmd(s), + + keysCommand("Cosmos", NewCosmosKeysClient(s)), + keysCommand("Solana", NewSolanaKeysClient(s)), + keysCommand("StarkNet", NewStarkNetKeysClient(s)), + keysCommand("DKGSign", NewDKGSignKeysClient(s)), + keysCommand("DKGEncrypt", NewDKGEncryptKeysClient(s)), + + initVRFKeysSubCmd(s), + }, + }, + { + Name: "node", + Aliases: []string{"local"}, + Usage: "Commands for admin actions that must be run locally", + Description: "Commands can only be run from on the same machine as the Plugin node.", + Subcommands: initLocalSubCmds(s, build.IsProd()), + Flags: []cli.Flag{ + cli.StringSliceFlag{ + Name: "config, c", + Usage: "TOML configuration file(s) via flag, or raw TOML via env var. If used, legacy env vars must not be set. Multiple files can be used (-c configA.toml -c configB.toml), and they are applied in order with duplicated fields overriding any earlier values. If the 'CL_CONFIG' env var is specified, it is always processed last with the effect of being the final override. [$CL_CONFIG]", + }, + cli.StringSliceFlag{ + Name: "secrets, s", + Usage: "TOML configuration file for secrets. Must be set if and only if config is set. Multiple files can be used (-s secretsA.toml -s secretsB.toml), and fields from the files will be merged. No overrides are allowed.", + }, + }, + Before: func(c *cli.Context) error { + errNoDuplicateFlags := fmt.Errorf("multiple commands with --config or --secrets flags. only one command may specify these flags. when secrets are used, they must be specific together in the same command") + if c.IsSet("config") { + if s.configFilesIsSet || s.secretsFileIsSet { + return errNoDuplicateFlags + } + s.configFiles = c.StringSlice("config") + } + + if c.IsSet("secrets") { + if s.configFilesIsSet || s.secretsFileIsSet { + return errNoDuplicateFlags + } + s.secretsFiles = c.StringSlice("secrets") + } + + // flags here, or ENV VAR only + cfg, err := initServerConfig(&opts, s.configFiles, s.secretsFiles) + if err != nil { + return err + } + s.Config = cfg + + logFileMaxSizeMB := s.Config.Log().File().MaxSize() / utils.MB + if logFileMaxSizeMB > 0 { + err = utils.EnsureDirAndMaxPerms(s.Config.Log().File().Dir(), os.FileMode(0700)) + if err != nil { + return err + } + } + + // Swap out the logger, replacing the old one. + err = s.CloseLogger() + if err != nil { + return err + } + + lggrCfg := logger.Config{ + LogLevel: s.Config.Log().Level(), + Dir: s.Config.Log().File().Dir(), + JsonConsole: s.Config.Log().JSONConsole(), + UnixTS: s.Config.Log().UnixTimestamps(), + FileMaxSizeMB: int(logFileMaxSizeMB), + FileMaxAgeDays: int(s.Config.Log().File().MaxAgeDays()), + FileMaxBackups: int(s.Config.Log().File().MaxBackups()), + } + l, closeFn := lggrCfg.New() + + s.Logger = l + s.CloseLogger = closeFn + + return nil + }, + }, + { + Name: "initiators", + Usage: "Commands for managing External Initiators", + Subcommands: initInitiatorsSubCmds(s), + }, + { + Name: "txs", + Usage: "Commands for handling transactions", + Subcommands: []cli.Command{ + initEVMTxSubCmd(s), + initCosmosTxSubCmd(s), + initSolanaTxSubCmd(s), + }, + }, + { + Name: "chains", + Usage: "Commands for handling chain configuration", + Subcommands: cli.Commands{ + chainCommand("EVM", EVMChainClient(s), cli.Int64Flag{Name: "id", Usage: "chain ID"}), + chainCommand("Cosmos", CosmosChainClient(s), cli.StringFlag{Name: "id", Usage: "chain ID"}), + chainCommand("Solana", SolanaChainClient(s), + cli.StringFlag{Name: "id", Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]"}), + chainCommand("StarkNet", StarkNetChainClient(s), cli.StringFlag{Name: "id", Usage: "chain ID"}), + }, + }, + { + Name: "nodes", + Usage: "Commands for handling node configuration", + Subcommands: cli.Commands{ + initEVMNodeSubCmd(s), + initCosmosNodeSubCmd(s), + initSolanaNodeSubCmd(s), + initStarkNetNodeSubCmd(s), + }, + }, + { + Name: "forwarders", + Usage: "Commands for managing forwarder addresses.", + Subcommands: initFowardersSubCmds(s), + }, + }...) + return app +} + +var whitespace = regexp.MustCompile(`\s+`) + +// format returns result of replacing all whitespace in s with a single space +func format(s string) string { + return string(whitespace.ReplaceAll([]byte(s), []byte(" "))) +} + +func initServerConfig(opts *plugin.GeneralConfigOpts, configFiles []string, secretsFiles []string) (plugin.GeneralConfig, error) { + err := opts.Setup(configFiles, secretsFiles) + if err != nil { + return nil, err + } + return opts.New() +} diff --git a/core/cmd/app_test.go b/core/cmd/app_test.go new file mode 100644 index 00000000..258f73d1 --- /dev/null +++ b/core/cmd/app_test.go @@ -0,0 +1,246 @@ +package cmd + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var ( + setInFile = "set in config file" + setInEnv = "set in env" + + testEnvContents = fmt.Sprintf("P2P.V2.AnnounceAddresses = ['%s']", setInEnv) + + testConfigFileContents = plugin.Config{ + Core: toml.Core{ + RootDir: &setInFile, + P2P: toml.P2P{ + V2: toml.P2PV2{ + AnnounceAddresses: &[]string{setInFile}, + ListenAddresses: &[]string{setInFile}, + }, + }, + }, + } + + testSecretsFileContents = plugin.Secrets{ + Secrets: toml.Secrets{ + Prometheus: toml.PrometheusSecrets{ + AuthToken: models.NewSecret("PROM_TOKEN"), + }, + }, + } + + testSecretsRedactedContents = plugin.Secrets{ + Secrets: toml.Secrets{ + Prometheus: toml.PrometheusSecrets{ + AuthToken: models.NewSecret("xxxxx"), + }, + }, + } +) + +func withDefaults(t *testing.T, c plugin.Config, s plugin.Secrets) plugin.GeneralConfig { + cfg, err := plugin.GeneralConfigOpts{Config: c, Secrets: s}.New() + require.NoError(t, err) + return cfg +} + +func Test_initServerConfig(t *testing.T) { + type args struct { + opts *plugin.GeneralConfigOpts + fileNames []string + secretsFiles []string + envVar string + } + tests := []struct { + name string + args args + wantErr bool + wantCfg plugin.GeneralConfig + }{ + { + name: "env only", + args: args{ + opts: new(plugin.GeneralConfigOpts), + envVar: testEnvContents, + }, + wantCfg: withDefaults(t, plugin.Config{ + Core: toml.Core{ + P2P: toml.P2P{ + V2: toml.P2PV2{ + AnnounceAddresses: &[]string{setInEnv}, + }, + }, + }, + }, plugin.Secrets{}), + }, + { + name: "files only", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + }, + wantCfg: withDefaults(t, testConfigFileContents, plugin.Secrets{}), + }, + { + name: "file error", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{"notexist"}, + }, + wantErr: true, + }, + { + name: "env overlay of file", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + envVar: testEnvContents, + }, + wantCfg: withDefaults(t, plugin.Config{ + Core: toml.Core{ + RootDir: &setInFile, + P2P: toml.P2P{ + V2: toml.P2PV2{ + // env should override this specific field + AnnounceAddresses: &[]string{setInEnv}, + ListenAddresses: &[]string{setInFile}, + }, + }, + }, + }, plugin.Secrets{}), + }, + { + name: "failed to read secrets", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{"/doesnt-exist"}, + }, + wantErr: true, + }, + { + name: "reading secrets", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{configtest.WriteTOMLFile(t, testSecretsFileContents, "test_secrets.toml")}, + }, + wantCfg: withDefaults(t, testConfigFileContents, testSecretsRedactedContents), + }, + { + name: "reading multiple secrets", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../services/plugin/testdata/mergingsecretsdata/secrets-database.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-password.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-pyroscope.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-prometheus.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-mercury-split-one.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-mercury-split-two.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-threshold.toml", + "../services/plugin/testdata/mergingsecretsdata/secrets-webserver-ldap.toml", + }, + }, + wantErr: false, + }, + { + name: "reading multiple secrets with overrides: Database", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-database.toml", + "../testdata/mergingsecretsdata/secrets-database.toml", + }, + }, + wantErr: true, + }, + { + name: "reading multiple secrets with overrides: Password", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-password.toml", + "../testdata/mergingsecretsdata/secrets-password.toml", + }, + }, + wantErr: true, + }, + { + name: "reading multiple secrets with overrides: Pyroscope", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-pyroscope.toml", + "../testdata/mergingsecretsdata/secrets-pyroscope.toml", + }, + }, + wantErr: true, + }, + { + name: "reading multiple secrets with overrides: Prometheus", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-prometheus.toml", + "../testdata/mergingsecretsdata/secrets-prometheus.toml", + }, + }, + wantErr: true, + }, + { + name: "reading multiple secrets with overrides: Mercury", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-mercury-split-one.toml", + "../testdata/mergingsecretsdata/secrets-mercury-split-one.toml", + }, + }, + wantErr: true, + }, + { + name: "reading multiple secrets with overrides: Threshold", + args: args{ + opts: new(plugin.GeneralConfigOpts), + fileNames: []string{configtest.WriteTOMLFile(t, testConfigFileContents, "test.toml")}, + secretsFiles: []string{ + "../testdata/mergingsecretsdata/secrets-threshold.toml", + "../testdata/mergingsecretsdata/secrets-threshold.toml", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.args.envVar != "" { + t.Setenv(string(env.Config), tt.args.envVar) + } + cfg, err := initServerConfig(tt.args.opts, tt.args.fileNames, tt.args.secretsFiles) + if (err != nil) != tt.wantErr { + t.Errorf("loadOpts() error = %v, wantErr %v", err, tt.wantErr) + } + if tt.wantCfg != nil { + assert.Equal(t, tt.wantCfg, cfg) + } + }) + } +} diff --git a/core/cmd/attempts_commands.go b/core/cmd/attempts_commands.go new file mode 100644 index 00000000..96468d33 --- /dev/null +++ b/core/cmd/attempts_commands.go @@ -0,0 +1,25 @@ +package cmd + +import "github.com/urfave/cli" + +func initAttemptsSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "list", + Usage: "List the Transaction Attempts in descending order", + Action: s.IndexTxAttempts, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "page", + Usage: "page of results to display", + }, + }, + }, + } +} + +// IndexTxAttempts returns the list of transactions in descending order, +// taking an optional page parameter +func (s *Shell) IndexTxAttempts(c *cli.Context) error { + return s.getPage("/v2/tx_attempts/evm", c.Int("page"), &EthTxPresenters{}) +} diff --git a/core/cmd/blocks_commands.go b/core/cmd/blocks_commands.go new file mode 100644 index 00000000..72b0523e --- /dev/null +++ b/core/cmd/blocks_commands.go @@ -0,0 +1,77 @@ +package cmd + +import ( + "bytes" + "fmt" + "net/url" + "strconv" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" +) + +func initBlocksSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "replay", + Usage: "Replays block data from the given number", + Action: s.ReplayFromBlock, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "block-number", + Usage: "Block number to replay from", + Required: true, + }, + cli.BoolFlag{ + Name: "force", + Usage: "Whether to force broadcasting logs which were already consumed and that would otherwise be skipped", + }, + cli.Int64Flag{ + Name: "evm-chain-id", + Usage: "Chain ID of the EVM-based blockchain", + Required: false, + }, + }, + }, + } +} + +// ReplayFromBlock replays chain data from the given block number until the most recent +func (s *Shell) ReplayFromBlock(c *cli.Context) (err error) { + blockNumber := c.Int64("block-number") + if blockNumber <= 0 { + return s.errorOut(errors.New("Must pass a positive value in '--block-number' parameter")) + } + + v := url.Values{} + v.Add("force", strconv.FormatBool(c.Bool("force"))) + + if c.IsSet("evm-chain-id") { + v.Add("evmChainID", fmt.Sprintf("%d", c.Int64("evm-chain-id"))) + } + + buf := bytes.NewBufferString("{}") + resp, err := s.HTTP.Post(s.ctx(), + fmt.Sprintf( + "/v2/replay_from_block/%v?%s", + blockNumber, + v.Encode(), + ), buf) + if err != nil { + return s.errorOut(err) + } + + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + _, err = s.parseResponse(resp) + if err != nil { + return s.errorOut(err) + } + fmt.Println("Replay started") + return nil +} diff --git a/core/cmd/blocks_commands_test.go b/core/cmd/blocks_commands_test.go new file mode 100644 index 00000000..9e1a6f59 --- /dev/null +++ b/core/cmd/blocks_commands_test.go @@ -0,0 +1,43 @@ +package cmd_test + +import ( + "flag" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func Test_ReplayFromBlock(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(big.NewInt(5)) + c.EVM[0].Enabled = ptr(true) + }) + + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ReplayFromBlock, set, "") + + //Incorrect block number + require.NoError(t, set.Set("block-number", "0")) + c := cli.NewContext(nil, set, nil) + require.ErrorContains(t, client.ReplayFromBlock(c), "Must pass a positive value in") + + //Incorrect chain ID + require.NoError(t, set.Set("block-number", "1")) + require.NoError(t, set.Set("evm-chain-id", "1")) + c = cli.NewContext(nil, set, nil) + require.ErrorContains(t, client.ReplayFromBlock(c), "does not match any local chains") + + //Correct chain ID + require.NoError(t, set.Set("evm-chain-id", "5")) + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.ReplayFromBlock(c)) +} diff --git a/core/cmd/bridge_commands.go b/core/cmd/bridge_commands.go new file mode 100644 index 00000000..f363dae6 --- /dev/null +++ b/core/cmd/bridge_commands.go @@ -0,0 +1,148 @@ +package cmd + +import ( + "errors" + "strconv" + + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initBrideSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "create", + Usage: "Create a new Bridge to an External Adapter", + Action: s.CreateBridge, + }, + { + Name: "destroy", + Usage: "Destroys the Bridge for an External Adapter", + Action: s.RemoveBridge, + }, + { + Name: "list", + Usage: "List all Bridges to External Adapters", + Action: s.IndexBridges, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "page", + Usage: "page of results to display", + }, + }, + }, + { + Name: "show", + Usage: "Show a Bridge's details", + Action: s.ShowBridge, + }, + } +} + +type BridgePresenter struct { + presenters.BridgeResource +} + +// FriendlyConfirmations converts the confirmations to a string +func (p *BridgePresenter) FriendlyConfirmations() string { + return strconv.FormatUint(uint64(p.Confirmations), 10) +} + +// RenderTable implements TableRenderer +func (p *BridgePresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Name", "URL", "Default Confirmations", "Outgoing Token"}) + table.Append([]string{ + p.Name, + p.URL, + p.FriendlyConfirmations(), + p.OutgoingToken, + }) + render("Bridge", table) + return nil +} + +type BridgePresenters []BridgePresenter + +// RenderTable implements TableRenderer +func (ps BridgePresenters) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Name", "URL", "Confirmations"}) + for _, p := range ps { + table.Append([]string{ + p.Name, + p.URL, + p.FriendlyConfirmations(), + }) + } + + render("Bridges", table) + return nil +} + +// IndexBridges returns all bridges. +func (s *Shell) IndexBridges(c *cli.Context) (err error) { + return s.getPage("/v2/bridge_types", c.Int("page"), &BridgePresenters{}) +} + +// ShowBridge returns the info for the given Bridge name. +func (s *Shell) ShowBridge(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the name of the bridge to be shown")) + } + bridgeName := c.Args().First() + resp, err := s.HTTP.Get(s.ctx(), "/v2/bridge_types/"+bridgeName) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &BridgePresenter{}) +} + +// CreateBridge adds a new bridge to the plugin node +func (s *Shell) CreateBridge(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass in the bridge's parameters [JSON blob | JSON filepath]")) + } + + buf, err := getBufferFromJSON(c.Args().First()) + if err != nil { + return s.errorOut(err) + } + + resp, err := s.HTTP.Post(s.ctx(), "/v2/bridge_types", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &BridgePresenter{}) +} + +// RemoveBridge removes a specific Bridge by name. +func (s *Shell) RemoveBridge(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the name of the bridge to be removed")) + } + bridgeName := c.Args().First() + resp, err := s.HTTP.Delete(s.ctx(), "/v2/bridge_types/"+bridgeName) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &BridgePresenter{}) +} diff --git a/core/cmd/bridge_commands_test.go b/core/cmd/bridge_commands_test.go new file mode 100644 index 00000000..3c5fc67c --- /dev/null +++ b/core/cmd/bridge_commands_test.go @@ -0,0 +1,192 @@ +package cmd_test + +import ( + "bytes" + "flag" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestBridgePresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + name = "Bridge 1" + url = "http://example.com" + createdAt = time.Now() + outgoingToken = "anoutgoingtoken" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.BridgePresenter{ + BridgeResource: presenters.BridgeResource{ + JAID: presenters.NewJAID(name), + Name: name, + URL: url, + Confirmations: 10, + OutgoingToken: outgoingToken, + CreatedAt: createdAt, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, name) + assert.Contains(t, output, url) + assert.Contains(t, output, "10") + assert.Contains(t, output, outgoingToken) + + // Render many resources + buffer.Reset() + ps := cmd.BridgePresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, name) + assert.Contains(t, output, url) + assert.Contains(t, output, "10") + assert.NotContains(t, output, outgoingToken) +} + +func TestShell_IndexBridges(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + bt1 := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName("cliindexbridges1"), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + err := app.BridgeORM().CreateBridgeType(bt1) + require.NoError(t, err) + + bt2 := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName("cliindexbridges2"), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + err = app.BridgeORM().CreateBridgeType(bt2) + require.NoError(t, err) + + require.Nil(t, client.IndexBridges(cltest.EmptyCLIContext())) + bridges := *r.Renders[0].(*cmd.BridgePresenters) + require.Equal(t, 2, len(bridges)) + p := bridges[0] + assert.Equal(t, bt1.Name.String(), p.Name) + assert.Equal(t, bt1.URL.String(), p.URL) + assert.Equal(t, bt1.Confirmations, p.Confirmations) + + p = bridges[1] + assert.Equal(t, bt2.Name.String(), p.Name) + assert.Equal(t, bt2.URL.String(), p.URL) + assert.Equal(t, bt2.Confirmations, p.Confirmations) +} + +func TestShell_ShowBridge(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + bt := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName(testutils.RandomizeName("showbridge")), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + require.NoError(t, app.BridgeORM().CreateBridgeType(bt)) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ShowBridge, set, "") + + require.NoError(t, set.Parse([]string{bt.Name.String()})) + + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.ShowBridge(c)) + require.Len(t, r.Renders, 1) + p := r.Renders[0].(*cmd.BridgePresenter) + assert.Equal(t, bt.Name.String(), p.Name) + assert.Equal(t, bt.URL.String(), p.URL) + assert.Equal(t, bt.Confirmations, p.Confirmations) +} + +func TestShell_CreateBridge(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + tests := []struct { + name string + param string + errored bool + }{ + {"EmptyString", "", true}, + {"ValidString", `{ "name": "TestBridge", "url": "http://localhost:3000/randomNumber" }`, false}, + {"InvalidString", `{ "noname": "", "nourl": "" }`, true}, + {"InvalidChar", `{ "badname": "path/bridge", "nourl": "" }`, true}, + {"ValidPath", "../testdata/apiresponses/create_random_number_bridge_type.json", false}, + {"InvalidPath", "bad/filepath/", true}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + set := flag.NewFlagSet("bridge", 0) + flagSetApplyFromAction(client.CreateBridge, set, "") + + require.NoError(t, set.Parse([]string{test.param})) + + c := cli.NewContext(nil, set, nil) + if test.errored { + assert.Error(t, client.CreateBridge(c)) + } else { + assert.Nil(t, client.CreateBridge(c)) + } + }) + } +} + +func TestShell_RemoveBridge(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + bt := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName(testutils.RandomizeName("removebridge")), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + err := app.BridgeORM().CreateBridgeType(bt) + require.NoError(t, err) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoveBridge, set, "") + + require.NoError(t, set.Parse([]string{bt.Name.String()})) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.RemoveBridge(c)) + + require.Len(t, r.Renders, 1) + p := r.Renders[0].(*cmd.BridgePresenter) + assert.Equal(t, bt.Name.String(), p.Name) + assert.Equal(t, bt.URL.String(), p.URL) + assert.Equal(t, bt.Confirmations, p.Confirmations) +} diff --git a/core/cmd/chains_commands.go b/core/cmd/chains_commands.go new file mode 100644 index 00000000..6edb5afc --- /dev/null +++ b/core/cmd/chains_commands.go @@ -0,0 +1,55 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/urfave/cli" +) + +var chainHeaders = []string{"ID", "Enabled", "Config"} + +// chainCommand returns a cli.Command with subcommands for the given ChainClient. +// The chainId cli.Flag must be named "id", but may be String or Int. +func chainCommand(typ string, client ChainClient, chainID cli.Flag) cli.Command { + if flagName := chainID.GetName(); flagName != "id" { + panic(fmt.Errorf("chainID flag name must be 'id', got: %s", flagName)) + } + lower := strings.ToLower(typ) + return cli.Command{ + Name: lower, + Usage: fmt.Sprintf("Commands for handling %s chains", typ), + Subcommands: cli.Commands{ + { + Name: "list", + Usage: fmt.Sprintf("List all existing %s chains", typ), + Action: client.IndexChains, + }, + }, + } +} + +// ChainClient is a generic client interface for any type of chain. +type ChainClient interface { + IndexChains(c *cli.Context) error +} + +type chainClient[P TableRenderer] struct { + *Shell + path string +} + +// newChainClient returns a new ChainClient for a particular type of chains.Config. +// P is a TableRenderer corresponding to R, and P2 is the slice variant (type P2 []P). +func newChainClient[P TableRenderer](s *Shell, name string) ChainClient { + return &chainClient[P]{ + Shell: s, + path: "/v2/chains/" + name, + } +} + +// IndexChains returns all chains. +func (cli *chainClient[P]) IndexChains(c *cli.Context) (err error) { + var p P + return cli.getPage(cli.path, c.Int("page"), &p) +} diff --git a/core/cmd/cosmos_chains_commands.go b/core/cmd/cosmos_chains_commands.go new file mode 100644 index 00000000..58c4f8ab --- /dev/null +++ b/core/cmd/cosmos_chains_commands.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// CosmosChainPresenter implements TableRenderer for a CosmosChainResource +type CosmosChainPresenter struct { + presenters.CosmosChainResource +} + +// ToRow presents the CosmosChainResource as a slice of strings. +func (p *CosmosChainPresenter) ToRow() []string { + return []string{p.GetID(), strconv.FormatBool(p.Enabled), p.Config} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p CosmosChainPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +// CosmosChainPresenters implements TableRenderer for a slice of CosmosChainPresenters. +type CosmosChainPresenters []CosmosChainPresenter + +// RenderTable implements TableRenderer +func (ps CosmosChainPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +func CosmosChainClient(s *Shell) ChainClient { + return newChainClient[CosmosChainPresenters](s, "cosmos") +} diff --git a/core/cmd/cosmos_chains_commands_test.go b/core/cmd/cosmos_chains_commands_test.go new file mode 100644 index 00000000..3c7a846f --- /dev/null +++ b/core/cmd/cosmos_chains_commands_test.go @@ -0,0 +1,33 @@ +package cmd_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/cosmostest" +) + +func TestShell_IndexCosmosChains(t *testing.T) { + t.Parallel() + + chainID := cosmostest.RandomChainID() + chain := coscfg.TOMLConfig{ + ChainID: ptr(chainID), + Enabled: ptr(true), + } + app := cosmosStartNewApplication(t, &chain) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.CosmosChainClient(client).IndexChains(cltest.EmptyCLIContext())) + chains := *r.Renders[0].(*cmd.CosmosChainPresenters) + require.Len(t, chains, 1) + c := chains[0] + assert.Equal(t, chainID, c.ID) + assertTableRenders(t, r) +} diff --git a/core/cmd/cosmos_keys_commands.go b/core/cmd/cosmos_keys_commands.go new file mode 100644 index 00000000..e98296ad --- /dev/null +++ b/core/cmd/cosmos_keys_commands.go @@ -0,0 +1,57 @@ +package cmd + +import ( + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type CosmosKeyPresenter struct { + JAID + presenters.CosmosKeyResource +} + +// RenderTable implements TableRenderer +func (p CosmosKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 Cosmos Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *CosmosKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PubKey, + } + + return row +} + +type CosmosKeyPresenters []CosmosKeyPresenter + +// RenderTable implements TableRenderer +func (ps CosmosKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 Cosmos Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewCosmosKeysClient(s *Shell) KeysClient { + return newKeysClient[cosmoskey.Key, CosmosKeyPresenter, CosmosKeyPresenters]("Cosmos", s) +} diff --git a/core/cmd/cosmos_keys_commands_test.go b/core/cmd/cosmos_keys_commands_test.go new file mode 100644 index 00000000..a9bf6e1c --- /dev/null +++ b/core/cmd/cosmos_keys_commands_test.go @@ -0,0 +1,171 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestCosmosKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.CosmosKeyPresenter{ + JAID: cmd.JAID{ID: id}, + CosmosKeyResource: presenters.CosmosKeyResource{ + JAID: presenters.NewJAID(id), + PubKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.CosmosKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) +} + +func TestShell_CosmosKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().Cosmos() + cleanup := func() { + keys, err := ks.GetAll() + require.NoError(t, err) + for _, key := range keys { + require.NoError(t, utils.JustError(ks.Delete(key.ID()))) + } + requireCosmosKeyCount(t, app, 0) + } + + t.Run("ListCosmosKeys", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Cosmos().Create() + require.NoError(t, err) + requireCosmosKeyCount(t, app, 1) + assert.Nil(t, cmd.NewCosmosKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.CosmosKeyPresenters) + assert.True(t, key.PublicKeyStr() == keys[0].PubKey) + + }) + + t.Run("CreateCosmosKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + require.NoError(t, cmd.NewCosmosKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().Cosmos().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + }) + + t.Run("DeleteCosmosKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Cosmos().Create() + require.NoError(t, err) + requireCosmosKeyCount(t, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewCosmosKeysClient(client).DeleteKey, set, "cosmos") + + strID := key.ID() + require.NoError(tt, set.Set("yes", "true")) + require.NoError(tt, set.Parse([]string{strID})) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewCosmosKeysClient(client).DeleteKey(c) + require.NoError(t, err) + requireCosmosKeyCount(t, app, 0) + }) + + t.Run("ImportExportCosmosKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(t) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().Cosmos().Create() + require.NoError(t, err) + + keys := requireCosmosKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test Cosmos export", 0) + flagSetApplyFromAction(cmd.NewCosmosKeysClient(client).ExportKey, set, "cosmos") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + tclient := cmd.NewCosmosKeysClient(client) + err = tclient.ExportKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test Cosmos export", 0) + flagSetApplyFromAction(cmd.NewCosmosKeysClient(client).ExportKey, set, "cosmos") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, tclient.ExportKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().Cosmos().Delete(key.ID()))) + requireCosmosKeyCount(t, app, 0) + + set = flag.NewFlagSet("test Cosmos import", 0) + flagSetApplyFromAction(cmd.NewCosmosKeysClient(client).ImportKey, set, "cosmos") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, tclient.ImportKey(c)) + + requireCosmosKeyCount(t, app, 1) + }) +} + +func requireCosmosKeyCount(t *testing.T, app plugin.Application, length int) []cosmoskey.Key { + t.Helper() + keys, err := app.GetKeyStore().Cosmos().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/cosmos_node_commands.go b/core/cmd/cosmos_node_commands.go new file mode 100644 index 00000000..ca148bdc --- /dev/null +++ b/core/cmd/cosmos_node_commands.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// CosmosNodePresenter implements TableRenderer for a CosmosNodeResource. +type CosmosNodePresenter struct { + presenters.CosmosNodeResource +} + +// ToRow presents the CosmosNodeResource as a slice of strings. +func (p *CosmosNodePresenter) ToRow() []string { + return []string{p.Name, p.ChainID, p.State, p.Config} +} + +// RenderTable implements TableRenderer +func (p CosmosNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +// CosmosNodePresenters implements TableRenderer for a slice of CosmosNodePresenter. +type CosmosNodePresenters []CosmosNodePresenter + +// RenderTable implements TableRenderer +func (ps CosmosNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +func NewCosmosNodeClient(s *Shell) NodeClient { + return newNodeClient[CosmosNodePresenters](s, "cosmos") +} diff --git a/core/cmd/cosmos_node_commands_test.go b/core/cmd/cosmos_node_commands_test.go new file mode 100644 index 00000000..5d0f8931 --- /dev/null +++ b/core/cmd/cosmos_node_commands_test.go @@ -0,0 +1,71 @@ +package cmd_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/config" + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/cosmostest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func cosmosStartNewApplication(t *testing.T, cfgs ...*coscfg.TOMLConfig) *cltest.TestApplication { + for i := range cfgs { + cfgs[i].SetDefaults() + } + return startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Cosmos = cfgs + c.EVM = nil + }) +} + +func TestShell_IndexCosmosNodes(t *testing.T) { + t.Parallel() + + chainID := cosmostest.RandomChainID() + node := coscfg.Node{ + Name: ptr("second"), + TendermintURL: config.MustParseURL("http://tender.mint.test/bombay-12"), + } + chain := coscfg.TOMLConfig{ + ChainID: ptr(chainID), + Enabled: ptr(true), + Nodes: coscfg.Nodes{&node}, + } + app := cosmosStartNewApplication(t, &chain) + client, r := app.NewShellAndRenderer() + require.Nil(t, cmd.NewCosmosNodeClient(client).IndexNodes(cltest.EmptyCLIContext())) + require.NotEmpty(t, r.Renders) + nodes := *r.Renders[0].(*cmd.CosmosNodePresenters) + require.Len(t, nodes, 1) + n := nodes[0] + assert.Equal(t, cltest.FormatWithPrefixedChainID(chainID, *node.Name), n.ID) + assert.Equal(t, chainID, n.ChainID) + assert.Equal(t, *node.Name, n.Name) + wantConfig, err := toml.Marshal(node) + require.NoError(t, err) + assert.Equal(t, string(wantConfig), n.Config) + assertTableRenders(t, r) + + //Render table and check the fields order + b := new(bytes.Buffer) + rt := cmd.RendererTable{b} + require.NoError(t, nodes.RenderTable(rt)) + renderLines := strings.Split(b.String(), "\n") + assert.Equal(t, 10, len(renderLines)) + assert.Contains(t, renderLines[2], "Name") + assert.Contains(t, renderLines[2], n.Name) + assert.Contains(t, renderLines[3], "Chain ID") + assert.Contains(t, renderLines[3], n.ChainID) + assert.Contains(t, renderLines[4], "State") + assert.Contains(t, renderLines[4], n.State) +} diff --git a/core/cmd/cosmos_transaction_commands.go b/core/cmd/cosmos_transaction_commands.go new file mode 100644 index 00000000..3cdd332a --- /dev/null +++ b/core/cmd/cosmos_transaction_commands.go @@ -0,0 +1,130 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/store/models/cosmos" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initCosmosTxSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "cosmos", + Usage: "Commands for handling Cosmos transactions", + Subcommands: []cli.Command{ + { + Name: "create", + Usage: "Send of from node Cosmos account to destination .", + Action: s.CosmosSendNativeToken, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Usage: "allows to send a higher amount than the account's balance", + }, + cli.StringFlag{ + Name: "id", + Usage: "chain ID", + }, + }, + }, + }, + } +} + +type CosmosMsgPresenter struct { + JAID + presenters.CosmosMsgResource +} + +// RenderTable implements TableRenderer +func (p *CosmosMsgPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Chain ID", "Contract ID", "State", "Tx Hash"}) + var hash string + if p.TxHash != nil { + hash = *p.TxHash + } + table.Append([]string{ + p.ChainID, + p.ContractID, + p.State, + hash, + }) + + render(fmt.Sprintf("Cosmos Message %v", p.ID), table) + return nil +} + +// CosmosSendNativeToken transfers coins from the node's account to a specified address. +func (s *Shell) CosmosSendNativeToken(c *cli.Context) (err error) { + if c.NArg() < 3 { + return s.errorOut(errors.New("four arguments expected: token, amount, fromAddress and toAddress")) + } + + err = sdk.ValidateDenom(c.Args().Get(0)) + if err != nil { + return s.errorOut(fmt.Errorf("invalid native token: %w", err)) + } + + amount, err := sdk.NewDecFromStr(c.Args().Get(1)) + if err != nil { + return s.errorOut(multierr.Combine( + fmt.Errorf("invalid coin: %w", err))) + } + + unparsedFromAddress := c.Args().Get(2) + fromAddress, err := sdk.AccAddressFromBech32(unparsedFromAddress) + if err != nil { + return s.errorOut(multierr.Combine( + fmt.Errorf("while parsing withdrawal source address %v", + unparsedFromAddress), err)) + } + + unparsedDestinationAddress := c.Args().Get(3) + destinationAddress, err := sdk.AccAddressFromBech32(unparsedDestinationAddress) + if err != nil { + return s.errorOut(multierr.Combine( + fmt.Errorf("while parsing withdrawal destination address %v", + unparsedDestinationAddress), err)) + } + + chainID := c.String("id") + if chainID == "" { + return s.errorOut(errors.New("missing id")) + } + + request := cosmos.SendRequest{ + DestinationAddress: destinationAddress, + FromAddress: fromAddress, + Amount: amount, + CosmosChainID: chainID, + Token: c.Args().Get(0), + AllowHigherAmounts: c.IsSet("force"), + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + + resp, err := s.HTTP.Post(s.ctx(), "/v2/transfers/cosmos", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = s.renderAPIResponse(resp, &CosmosMsgPresenter{}) + return err +} diff --git a/core/cmd/cosmos_transaction_commands_test.go b/core/cmd/cosmos_transaction_commands_test.go new file mode 100644 index 00000000..d6e2b433 --- /dev/null +++ b/core/cmd/cosmos_transaction_commands_test.go @@ -0,0 +1,131 @@ +//go:build integration + +package cmd_test + +import ( + "flag" + "os" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/config" + cosmosclient "github.com/goplugin/plugin-cosmos/pkg/cosmos/client" + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + cosmosdb "github.com/goplugin/plugin-cosmos/pkg/cosmos/db" + "github.com/goplugin/plugin-cosmos/pkg/cosmos/denom" + "github.com/goplugin/plugin-cosmos/pkg/cosmos/params" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/cosmostest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" +) + +var nativeToken = "cosm" + +func TestMain(m *testing.M) { + + params.InitCosmosSdk( + /* bech32Prefix= */ "wasm", + /* token= */ nativeToken, + ) + + code := m.Run() + os.Exit(code) +} + +func TestShell_SendCosmosCoins(t *testing.T) { + // TODO(BCI-978): cleanup once SetupLocalCosmosNode is updated + chainID := cosmostest.RandomChainID() + cosmosChain := coscfg.Chain{} + cosmosChain.SetDefaults() + accounts, _, url := cosmosclient.SetupLocalCosmosNode(t, chainID, *cosmosChain.GasToken) + require.Greater(t, len(accounts), 1) + nodes := coscfg.Nodes{ + &coscfg.Node{ + Name: ptr("random"), + TendermintURL: config.MustParseURL(url), + }, + } + chainConfig := coscfg.TOMLConfig{ChainID: &chainID, Enabled: ptr(true), Chain: cosmosChain, Nodes: nodes} + app := cosmosStartNewApplication(t, &chainConfig) + + from := accounts[0] + to := accounts[1] + require.NoError(t, app.GetKeyStore().Cosmos().Add(cosmoskey.Raw(from.PrivateKey.Bytes()).Key())) + chain, err := app.GetRelayers().LegacyCosmosChains().Get(chainID) + require.NoError(t, err) + + reader, err := chain.Reader("") + require.NoError(t, err) + + require.Eventually(t, func() bool { + coin, err := reader.Balance(from.Address, *cosmosChain.GasToken) + if !assert.NoError(t, err) { + return false + } + return coin.IsPositive() + }, time.Minute, 5*time.Second) + + client, r := app.NewShellAndRenderer() + cliapp := cli.NewApp() + + for _, tt := range []struct { + amount string + expErr string + }{ + {amount: "0.000001"}, + {amount: "1"}, + {amount: "30.000001"}, + {amount: "1000", expErr: "is too low for this transaction to be executed:"}, + {amount: "0", expErr: "amount must be greater than zero:"}, + {amount: "asdf", expErr: "invalid coin: failed to set decimal string"}, + } { + tt := tt + t.Run(tt.amount, func(t *testing.T) { + startBal, err := reader.Balance(from.Address, *cosmosChain.GasToken) + require.NoError(t, err) + + set := flag.NewFlagSet("sendcosmoscoins", 0) + flagSetApplyFromAction(client.CosmosSendNativeToken, set, "cosmos") + + require.NoError(t, set.Set("id", chainID)) + require.NoError(t, set.Parse([]string{nativeToken, tt.amount, from.Address.String(), to.Address.String()})) + + c := cli.NewContext(cliapp, set, nil) + err = client.CosmosSendNativeToken(c) + if tt.expErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expErr) + return + } + + // Check CLI output + require.Greater(t, len(r.Renders), 0) + renderer := r.Renders[len(r.Renders)-1] + renderedMsg := renderer.(*cmd.CosmosMsgPresenter) + require.NotEmpty(t, renderedMsg.ID) + assert.Equal(t, string(cosmosdb.Unstarted), renderedMsg.State) + assert.Nil(t, renderedMsg.TxHash) + + // Check balance + sent, err := denom.ConvertDecCoinToDenom(sdk.NewDecCoinFromDec(nativeToken, sdk.MustNewDecFromStr(tt.amount)), *cosmosChain.GasToken) + require.NoError(t, err) + expBal := startBal.Sub(sent) + + testutils.AssertEventually(t, func() bool { + endBal, err := reader.Balance(from.Address, *cosmosChain.GasToken) + require.NoError(t, err) + t.Logf("%s <= %s", endBal, expBal) + return endBal.IsLTE(expBal) + }) + }) + } +} diff --git a/core/cmd/csa_keys_commands.go b/core/cmd/csa_keys_commands.go new file mode 100644 index 00000000..3d3d465f --- /dev/null +++ b/core/cmd/csa_keys_commands.go @@ -0,0 +1,241 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initCSAKeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "csa", + Usage: "Remote commands for administering the node's CSA keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: format(`Create a CSA key, encrypted with password from the password file, and store it in the database.`), + Action: s.CreateCSAKey, + }, + { + Name: "list", + Usage: format(`List available CSA keys`), + Action: s.ListCSAKeys, + }, + { + Name: "import", + Usage: format(`Imports a CSA key from a JSON file.`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: s.ImportCSAKey, + }, + { + Name: "export", + Usage: format(`Exports an existing CSA key by its ID.`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: s.ExportCSAKey, + }, + }, + } +} + +type CSAKeyPresenter struct { + JAID + presenters.CSAKeyResource +} + +// RenderTable implements TableRenderer +func (p *CSAKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 CSA Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return nil +} + +func (p *CSAKeyPresenter) ToRow() []string { + row := []string{ + p.PubKey, + } + + return row +} + +type CSAKeyPresenters []CSAKeyPresenter + +// RenderTable implements TableRenderer +func (ps CSAKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 CSA Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + return cutils.JustError(rt.Write([]byte("\n"))) +} + +// ListCSAKeys retrieves a list of all CSA keys +func (s *Shell) ListCSAKeys(_ *cli.Context) (err error) { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/csa", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &CSAKeyPresenters{}) +} + +// CreateCSAKey creates a new CSA key +func (s *Shell) CreateCSAKey(_ *cli.Context) (err error) { + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/csa", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &CSAKeyPresenter{}, "Created CSA key") +} + +// ImportCSAKey imports and stores a CSA key. Path to key must be passed. +func (s *Shell) ImportCSAKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + exportUrl := url.URL{ + Path: "/v2/keys/csa/import", + } + + query := exportUrl.Query() + query.Set("oldpassword", normalizePassword(string(oldPassword))) + + exportUrl.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), exportUrl.String(), bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &CSAKeyPresenter{}, "🔑 Imported CSA key") +} + +// ExportCSAKey exports a CSA key. Key ID must be passed. +func (s *Shell) ExportCSAKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the ID of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + ID := c.Args().Get(0) + exportUrl := url.URL{ + Path: "/v2/keys/csa/export/" + ID, + } + + query := exportUrl.Query() + query.Set("newpassword", normalizePassword(string(newPassword))) + + exportUrl.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), exportUrl.String(), nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported P2P key %s to %s\n", ID, filepath)) + if err != nil { + return s.errorOut(err) + } + + return nil +} diff --git a/core/cmd/csa_keys_commands_test.go b/core/cmd/csa_keys_commands_test.go new file mode 100644 index 00000000..2bd324a1 --- /dev/null +++ b/core/cmd/csa_keys_commands_test.go @@ -0,0 +1,148 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestCSAKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.CSAKeyPresenter{ + JAID: cmd.JAID{ID: pubKey}, + CSAKeyResource: presenters.CSAKeyResource{ + JAID: presenters.NewJAID(pubKey), + PubKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.CSAKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, pubKey) +} + +func TestShell_ListCSAKeys(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + key, err := app.GetKeyStore().CSA().Create() + require.NoError(t, err) + + requireCSAKeyCount(t, app, 1) + + client, r := app.NewShellAndRenderer() + + assert.Nil(t, client.ListCSAKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.CSAKeyPresenters) + assert.Equal(t, fmt.Sprintf("csa_%s", key.PublicKeyString()), keys[0].PubKey) +} + +func TestShell_CreateCSAKey(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + requireCSAKeyCount(t, app, 0) + + require.NoError(t, client.CreateCSAKey(nilContext)) + + requireCSAKeyCount(t, app, 1) +} + +func TestShell_ImportExportCsaKey(t *testing.T) { + t.Parallel() + + defer deleteKeyExportFile(t) + + app := startNewApplicationV2(t, nil) + + client, _ := app.NewShellAndRenderer() + _, err := app.GetKeyStore().CSA().Create() + require.NoError(t, err) + + keys := requireCSAKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test CSA export", 0) + flagSetApplyFromAction(client.ExportCSAKey, set, "") + + require.NoError(t, set.Parse([]string{"0"})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = client.ExportCSAKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test CSA export", 0) + flagSetApplyFromAction(client.ExportCSAKey, set, "") + + require.NoError(t, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, client.ExportCSAKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().CSA().Delete(key.ID()))) + requireCSAKeyCount(t, app, 0) + + //Import test + set = flag.NewFlagSet("test CSA import", 0) + flagSetApplyFromAction(client.ImportCSAKey, set, "") + + require.NoError(t, set.Parse([]string{keyName})) + require.NoError(t, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.ImportCSAKey(c)) + + requireCSAKeyCount(t, app, 1) +} + +func requireCSAKeyCount(t *testing.T, app plugin.Application, length int) []csakey.KeyV2 { + t.Helper() + + keys, err := app.GetKeyStore().CSA().GetAll() + require.NoError(t, err) + require.Equal(t, length, len(keys)) + return keys +} diff --git a/core/cmd/direct-request-spec-template.yml b/core/cmd/direct-request-spec-template.yml new file mode 100644 index 00000000..5774e9a7 --- /dev/null +++ b/core/cmd/direct-request-spec-template.yml @@ -0,0 +1,13 @@ +type = "directrequest" +schemaVersion = 1 +evmChainID = "0" +name = "%s" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "%s" +observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_merge [type=merge left="{}"] + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; +""" diff --git a/core/cmd/dkgencrypt_keys_commands.go b/core/cmd/dkgencrypt_keys_commands.go new file mode 100644 index 00000000..e51fbf6d --- /dev/null +++ b/core/cmd/dkgencrypt_keys_commands.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type DKGEncryptKeyPresenter struct { + JAID + presenters.DKGEncryptKeyResource +} + +var _ TableRenderer = DKGEncryptKeyPresenter{} +var _ TableRenderer = DKGEncryptKeyPresenters{} + +// RenderTable implements TableRenderer +func (p DKGEncryptKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 DKGEncrypt Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *DKGEncryptKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PublicKey, + } + + return row +} + +type DKGEncryptKeyPresenters []DKGEncryptKeyPresenter + +// RenderTable implements TableRenderer +func (ps DKGEncryptKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 DKGEncrypt Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewDKGEncryptKeysClient(s *Shell) KeysClient { + return newKeysClient[dkgsignkey.Key, DKGEncryptKeyPresenter, DKGEncryptKeyPresenters]("DKGEncrypt", s) +} diff --git a/core/cmd/dkgencrypt_keys_commands_test.go b/core/cmd/dkgencrypt_keys_commands_test.go new file mode 100644 index 00000000..cfbfdbcd --- /dev/null +++ b/core/cmd/dkgencrypt_keys_commands_test.go @@ -0,0 +1,172 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestDKGEncryptKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.DKGEncryptKeyPresenter{ + JAID: cmd.JAID{ID: id}, + DKGEncryptKeyResource: presenters.DKGEncryptKeyResource{ + JAID: presenters.NewJAID(id), + PublicKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.DKGEncryptKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) +} + +func TestShell_DKGEncryptKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().DKGEncrypt() + cleanup := func() { + keys, err := ks.GetAll() + assert.NoError(t, err) + for _, key := range keys { + assert.NoError(t, utils.JustError(ks.Delete(key.ID()))) + } + requireDKGEncryptKeyCount(t, app, 0) + } + + t.Run("ListDKGEncryptKeys", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().DKGEncrypt().Create() + assert.NoError(tt, err) + requireDKGEncryptKeyCount(t, app, 1) + assert.Nil(t, cmd.NewDKGEncryptKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + assert.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.DKGEncryptKeyPresenters) + assert.True(t, key.PublicKeyString() == keys[0].PublicKey) + }) + + t.Run("CreateDKGEncryptKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + assert.NoError(tt, cmd.NewDKGEncryptKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().DKGEncrypt().GetAll() + assert.NoError(tt, err) + assert.Len(t, keys, 1) + }) + + t.Run("DeleteDKGEncryptKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().DKGEncrypt().Create() + assert.NoError(tt, err) + requireDKGEncryptKeyCount(tt, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewDKGEncryptKeysClient(client).DeleteKey, set, "") + + require.NoError(tt, set.Set("yes", "true")) + + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = cmd.NewDKGEncryptKeysClient(client).DeleteKey(c) + assert.NoError(tt, err) + requireDKGEncryptKeyCount(tt, app, 0) + }) + + t.Run("ImportExportDKGEncryptKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(tt) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().DKGEncrypt().Create() + require.NoError(tt, err) + + keys := requireDKGEncryptKeyCount(tt, app, 1) + key := keys[0] + t.Log("key id:", key.ID()) + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test DKGEncrypt export", 0) + flagSetApplyFromAction(cmd.NewDKGEncryptKeysClient(client).ExportKey, set, "") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewDKGEncryptKeysClient(client).ExportKey(c) + require.Error(tt, err, "Error exporting") + require.Error(tt, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test DKGEncrypt export", 0) + flagSetApplyFromAction(cmd.NewDKGEncryptKeysClient(client).ExportKey, set, "") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(tt, cmd.NewDKGEncryptKeysClient(client).ExportKey(c)) + require.NoError(tt, utils.JustError(os.Stat(keyName))) + + require.NoError(tt, utils.JustError(app.GetKeyStore().DKGEncrypt().Delete(key.ID()))) + requireDKGEncryptKeyCount(tt, app, 0) + + //Import test + set = flag.NewFlagSet("test DKGEncrypt import", 0) + flagSetApplyFromAction(cmd.NewDKGEncryptKeysClient(client).ImportKey, set, "") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(tt, cmd.NewDKGEncryptKeysClient(client).ImportKey(c)) + + requireDKGEncryptKeyCount(tt, app, 1) + }) +} + +func requireDKGEncryptKeyCount(t *testing.T, app plugin.Application, length int) []dkgencryptkey.Key { + t.Helper() + keys, err := app.GetKeyStore().DKGEncrypt().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/dkgsign_keys_commands.go b/core/cmd/dkgsign_keys_commands.go new file mode 100644 index 00000000..071e302d --- /dev/null +++ b/core/cmd/dkgsign_keys_commands.go @@ -0,0 +1,60 @@ +package cmd + +import ( + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type DKGSignKeyPresenter struct { + JAID + presenters.DKGSignKeyResource +} + +var _ TableRenderer = DKGSignKeyPresenter{} +var _ TableRenderer = DKGSignKeyPresenters{} + +// RenderTable implements TableRenderer +func (p DKGSignKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 DKGSign Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *DKGSignKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PublicKey, + } + + return row +} + +type DKGSignKeyPresenters []DKGSignKeyPresenter + +// RenderTable implements TableRenderer +func (ps DKGSignKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 DKGSign Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewDKGSignKeysClient(s *Shell) KeysClient { + return newKeysClient[dkgsignkey.Key, DKGSignKeyPresenter, DKGSignKeyPresenters]("DKGSign", s) +} diff --git a/core/cmd/dkgsign_keys_commands_test.go b/core/cmd/dkgsign_keys_commands_test.go new file mode 100644 index 00000000..25deded0 --- /dev/null +++ b/core/cmd/dkgsign_keys_commands_test.go @@ -0,0 +1,170 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestDKGSignKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.DKGSignKeyPresenter{ + JAID: cmd.JAID{ID: id}, + DKGSignKeyResource: presenters.DKGSignKeyResource{ + JAID: presenters.NewJAID(id), + PublicKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.DKGSignKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) +} + +func TestShell_DKGSignKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().DKGSign() + cleanup := func() { + keys, err := ks.GetAll() + assert.NoError(t, err) + for _, key := range keys { + assert.NoError(t, utils.JustError(ks.Delete(key.ID()))) + } + requireDKGSignKeyCount(t, app, 0) + } + + t.Run("ListDKGSignKeys", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().DKGSign().Create() + assert.NoError(tt, err) + requireDKGSignKeyCount(t, app, 1) + assert.Nil(t, cmd.NewDKGSignKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + assert.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.DKGSignKeyPresenters) + assert.True(t, key.PublicKeyString() == keys[0].PublicKey) + }) + + t.Run("CreateDKGSignKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + assert.NoError(tt, cmd.NewDKGSignKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().DKGSign().GetAll() + assert.NoError(tt, err) + assert.Len(t, keys, 1) + }) + + t.Run("DeleteDKGSignKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().DKGSign().Create() + assert.NoError(tt, err) + requireDKGSignKeyCount(tt, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewDKGSignKeysClient(client).DeleteKey, set, "") + + require.NoError(tt, set.Set("yes", "true")) + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = cmd.NewDKGSignKeysClient(client).DeleteKey(c) + assert.NoError(tt, err) + requireDKGSignKeyCount(tt, app, 0) + }) + + t.Run("ImportExportDKGSignKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(tt) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().DKGSign().Create() + require.NoError(tt, err) + + keys := requireDKGSignKeyCount(tt, app, 1) + key := keys[0] + t.Log("key id:", key.ID()) + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test DKGSign export", 0) + flagSetApplyFromAction(cmd.NewDKGSignKeysClient(client).ExportKey, set, "") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewDKGSignKeysClient(client).ExportKey(c) + require.Error(tt, err) + require.Error(tt, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test DKGSign export", 0) + flagSetApplyFromAction(cmd.NewDKGSignKeysClient(client).ExportKey, set, "") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(tt, cmd.NewDKGSignKeysClient(client).ExportKey(c)) + require.NoError(tt, utils.JustError(os.Stat(keyName))) + + require.NoError(tt, utils.JustError(app.GetKeyStore().DKGSign().Delete(key.ID()))) + requireDKGSignKeyCount(tt, app, 0) + + set = flag.NewFlagSet("test DKGSign import", 0) + flagSetApplyFromAction(cmd.NewDKGSignKeysClient(client).ImportKey, set, "") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(tt, cmd.NewDKGSignKeysClient(client).ImportKey(c)) + + requireDKGSignKeyCount(tt, app, 1) + }) +} + +func requireDKGSignKeyCount(t *testing.T, app plugin.Application, length int) []dkgsignkey.Key { + t.Helper() + keys, err := app.GetKeyStore().DKGSign().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/doc.go b/core/cmd/doc.go new file mode 100644 index 00000000..464cc423 --- /dev/null +++ b/core/cmd/doc.go @@ -0,0 +1,25 @@ +// Package cmd is the front-end interface for the application +// as a command-line utility. +// +// # KeyStoreAuthenticator +// +// KeyStoreAuthenticator prompts the user for their password, which +// is used to unlock their keystore file to interact with the +// Ethereum blockchain. Since multiple keystore files can exist +// at the configured directory, the KeyStoreAuthenticator will try the +// password on all keystore files present. +// +// # Shell +// +// Shell is how the application is invoked from the command +// line. When you run the binary, for example `./plugin n`, +// Shell.RunNode is called to start the Plugin core. +// Similarly, running `./plugin j` returns information on +// all jobs in the node, and `./plugin s` with another +// argument as a JobID gives information specific to that job. +// +// # Renderer +// +// Renderer helps format and display data (based on the kind +// of data it is) to the command line. +package cmd diff --git a/core/cmd/errors.go b/core/cmd/errors.go new file mode 100644 index 00000000..24f38421 --- /dev/null +++ b/core/cmd/errors.go @@ -0,0 +1,15 @@ +package cmd + +import ( + "fmt" + "io" + "net/http" +) + +func httpError(resp *http.Response) error { + errResult, err2 := io.ReadAll(resp.Body) + if err2 != nil { + return fmt.Errorf("status %d %q: error reading body %w", resp.StatusCode, http.StatusText(resp.StatusCode), err2) + } + return fmt.Errorf("status %d %q: %s", resp.StatusCode, http.StatusText(resp.StatusCode), string(errResult)) +} diff --git a/core/cmd/eth_keys_commands.go b/core/cmd/eth_keys_commands.go new file mode 100644 index 00000000..12fbabc1 --- /dev/null +++ b/core/cmd/eth_keys_commands.go @@ -0,0 +1,403 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initEthKeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "eth", + Usage: "Remote commands for administering the node's Ethereum keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: "Create a key in the node's keystore alongside the existing key; to create an original key, just run the node", + Action: s.CreateETHKey, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "evm-chain-id, evmChainID", + Usage: "Chain ID for the key. If left blank, default chain will be used.", + }, + cli.Uint64Flag{ + Name: "max-gas-price-gwei, maxGasPriceGWei", + Usage: "Optional maximum gas price (GWei) for the creating key.", + }, + }, + }, + { + Name: "list", + Usage: "List available Ethereum accounts with their ETH & PLI balances and other metadata", + Action: s.ListETHKeys, + }, + { + Name: "delete", + Usage: format(`Delete the ETH key by address (irreversible!)`), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + }, + Action: s.DeleteETHKey, + }, + { + Name: "import", + Usage: format(`Import an ETH key from a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + cli.StringFlag{ + Name: "evm-chain-id, evmChainID", + Usage: "Chain ID for the key. If left blank, default chain will be used.", + }, + }, + Action: s.ImportETHKey, + }, + { + Name: "export", + Usage: format(`Exports an ETH key to a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "Path where the JSON file will be saved (required)", + }, + }, + Action: s.ExportETHKey, + }, + { + Name: "chain", + Usage: "Update an EVM key for the given chain", + Action: s.UpdateChainEVMKey, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "address", + Usage: "address of the key", + Required: true, + }, + cli.StringFlag{ + Name: "evm-chain-id, evmChainID", + Usage: "chain ID of the key", + Required: true, + }, + cli.BoolFlag{ + Name: "enable", + Usage: "enable the key for the given chain", + }, + cli.BoolFlag{ + Name: "disable", + Usage: "disable the key for the given chain", + }, + cli.BoolFlag{ + Name: "abandon", + Usage: "if set, will abandon all pending and unconfirmed transactions and mark them as fatally errored. Use with caution, this can result in nonce gaps or 'stuck' transactions", + }, + }, + }, + }, + } +} + +type EthKeyPresenter struct { + presenters.ETHKeyResource +} + +func (p *EthKeyPresenter) ToRow() []string { + eth := "Unknown" + if p.EthBalance != nil { + eth = p.EthBalance.String() + } + link := "Unknown" + if p.LinkBalance != nil { + link = p.LinkBalance.String() + } + gas := "None" + if p.MaxGasPriceWei != nil { + gas = p.MaxGasPriceWei.String() + } + return []string{ + p.Address, + p.EVMChainID.String(), + eth, + link, + fmt.Sprintf("%v", p.Disabled), + p.CreatedAt.String(), + p.UpdatedAt.String(), + gas, + } +} + +var ethKeysTableHeaders = []string{"Address", "EVM Chain ID", "ETH", "PLI", "Disabled", "Created", "Updated", "Max Gas Price Wei"} + +// RenderTable implements TableRenderer +func (p *EthKeyPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{p.ToRow()} + + renderList(ethKeysTableHeaders, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +type EthKeyPresenters []EthKeyPresenter + +// RenderTable implements TableRenderer +func (ps EthKeyPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(ethKeysTableHeaders, rows, rt.Writer) + + return nil +} + +// ListETHKeys renders the active account address with its ETH & PLI balance +func (s *Shell) ListETHKeys(_ *cli.Context) (err error) { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/evm") + + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &EthKeyPresenters{}, "🔑 ETH keys") +} + +// CreateETHKey creates a new ethereum key with the same password +// as the one used to unlock the existing key. +func (s *Shell) CreateETHKey(c *cli.Context) (err error) { + createUrl := url.URL{ + Path: "/v2/keys/evm", + } + query := createUrl.Query() + + if c.IsSet("evm-chain-id") { + query.Set("evmChainID", c.String("evm-chain-id")) + } + if c.IsSet("max-gas-price-gwei") { + query.Set("maxGasPriceGWei", c.String("max-gas-price-gwei")) + } + + createUrl.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), createUrl.String(), nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &EthKeyPresenter{}, "ETH key created.\n\n🔑 New key") +} + +// DeleteETHKey hard deletes an Ethereum key, +// address of key must be passed +func (s *Shell) DeleteETHKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the address of the key to be deleted")) + } + address := c.Args().Get(0) + + if !confirmAction(c) { + return nil + } + + resp, err := s.HTTP.Delete(s.ctx(), "/v2/keys/evm/"+address) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Failed to read request response")) + } + var result *models.JSONAPIErrors + err = json.Unmarshal(body, &result) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Unable to unmarshal json from body '%s'", string(body))) + } + return s.errorOut(errors.Errorf("Delete ETH key failed: %s", result.Error())) + } + return s.renderAPIResponse(resp, &EthKeyPresenter{}, fmt.Sprintf("🔑 Deleted ETH key: %s\n", address)) +} + +// ImportETHKey imports an Ethereum key, +// file path must be passed +func (s *Shell) ImportETHKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + importUrl := url.URL{ + Path: "/v2/keys/evm/import", + } + query := importUrl.Query() + + query.Set("oldpassword", strings.TrimSpace(string(oldPassword))) + + if c.IsSet("evmChainID") { + query.Set("evmChainID", c.String("evmChainID")) + } + + importUrl.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), importUrl.String(), bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &EthKeyPresenter{}, "🔑 Imported ETH key") +} + +// ExportETHKey exports an ETH key, +// address must be passed +func (s *Shell) ExportETHKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the address of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(newPassword) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + address := c.Args().Get(0) + exportUrl := url.URL{ + Path: "/v2/keys/evm/export/" + address, + } + query := exportUrl.Query() + query.Set("newpassword", strings.TrimSpace(string(newPassword))) + + exportUrl.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), exportUrl.String(), nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString("🔑 Exported ETH key " + address + " to " + filepath + "\n") + if err != nil { + return s.errorOut(err) + } + + return nil +} + +// UpdateChainEVMKey updates settings for the given key on the given chain +func (s *Shell) UpdateChainEVMKey(c *cli.Context) (err error) { + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + addr := c.String("address") + query.Set("address", addr) + cid := c.String("evmChainID") + query.Set("evmChainID", cid) + abandon := c.String("abandon") + query.Set("abandon", abandon) + + if c.IsSet("enable") && c.IsSet("disable") { + return s.errorOut(errors.New("cannot set both --enable and --disable simultaneously")) + } else if c.Bool("enable") { + query.Set("enabled", "true") + } else if c.Bool("disable") { + query.Set("enabled", "false") + } + + chainURL.RawQuery = query.Encode() + resp, err := s.HTTP.Post(s.ctx(), chainURL.String(), nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error resetting key: %w", httpError(resp))) + } + + return s.renderAPIResponse(resp, &EthKeyPresenter{}, "🔑 Updated ETH key") +} diff --git a/core/cmd/eth_keys_commands_test.go b/core/cmd/eth_keys_commands_test.go new file mode 100644 index 00000000..389b982e --- /dev/null +++ b/core/cmd/eth_keys_commands_test.go @@ -0,0 +1,454 @@ +package cmd_test + +import ( + "bytes" + "flag" + "math/big" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" +) + +func ptr[T any](t T) *T { return &t } + +func TestEthKeysPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + address = "0x5431F5F973781809D18643b87B44921b11355d81" + ethBalance = assets.NewEth(1) + linkBalance = commonassets.NewLinkFromJuels(2) + isDisabled = true + createdAt = time.Now() + updatedAt = time.Now().Add(time.Second) + maxGasPriceWei = ubig.NewI(12345) + bundleID = cltest.DefaultOCRKeyBundleID + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.EthKeyPresenter{ + ETHKeyResource: presenters.ETHKeyResource{ + JAID: presenters.NewJAID(bundleID), + Address: address, + EthBalance: ethBalance, + LinkBalance: linkBalance, + Disabled: isDisabled, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + MaxGasPriceWei: maxGasPriceWei, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, address) + assert.Contains(t, output, ethBalance.String()) + assert.Contains(t, output, linkBalance.String()) + assert.Contains(t, output, strconv.FormatBool(isDisabled)) + assert.Contains(t, output, createdAt.String()) + assert.Contains(t, output, updatedAt.String()) + assert.Contains(t, output, maxGasPriceWei.String()) + + // Render many resources + buffer.Reset() + ps := cmd.EthKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, address) + assert.Contains(t, output, ethBalance.String()) + assert.Contains(t, output, linkBalance.String()) + assert.Contains(t, output, strconv.FormatBool(isDisabled)) + assert.Contains(t, output, createdAt.String()) + assert.Contains(t, output, updatedAt.String()) + assert.Contains(t, output, maxGasPriceWei.String()) +} + +func TestShell_ListETHKeys(t *testing.T) { + t.Parallel() + + ethClient := newEthMock(t) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(commonassets.NewLinkFromJuels(13), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withKey(), + withMocks(ethClient), + ) + client, r := app.NewShellAndRenderer() + + assert.Nil(t, client.ListETHKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + balances := *r.Renders[0].(*cmd.EthKeyPresenters) + assert.Equal(t, app.Keys[0].Address.Hex(), balances[0].Address) + assert.Equal(t, "0.000000000000000042", balances[0].EthBalance.String()) + assert.Equal(t, "13", balances[0].LinkBalance.String()) +} + +func TestShell_ListETHKeys_Error(t *testing.T) { + t.Parallel() + + ethClient := newEthMock(t) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("fake error")) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("fake error")) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withKey(), + withMocks(ethClient), + ) + client, r := app.NewShellAndRenderer() + + assert.Nil(t, client.ListETHKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + balances := *r.Renders[0].(*cmd.EthKeyPresenters) + assert.Equal(t, app.Keys[0].Address.Hex(), balances[0].Address) + assert.Nil(t, balances[0].EthBalance) + assert.Nil(t, balances[0].LinkBalance) +} + +func TestShell_ListETHKeys_Disabled(t *testing.T) { + t.Parallel() + + ethClient := newEthMock(t) + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(false) + }, + withKey(), + withMocks(ethClient), + ) + client, r := app.NewShellAndRenderer() + keys, err := app.KeyStore.Eth().GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + k := keys[0] + + assert.Nil(t, client.ListETHKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + balances := *r.Renders[0].(*cmd.EthKeyPresenters) + assert.Equal(t, app.Keys[0].Address.Hex(), balances[0].Address) + assert.Nil(t, balances[0].EthBalance) + assert.Nil(t, balances[0].LinkBalance) + assert.Nil(t, balances[0].MaxGasPriceWei) + assert.Equal(t, []string{ + k.Address.String(), "0", "Unknown", "Unknown", "false", + balances[0].UpdatedAt.String(), balances[0].CreatedAt.String(), "None", + }, balances[0].ToRow()) +} + +func TestShell_CreateETHKey(t *testing.T) { + t.Parallel() + + ethClient := newEthMock(t) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(commonassets.NewLinkFromJuels(42), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withKey(), + withMocks(ethClient), + ) + db := app.GetSqlxDB() + client, _ := app.NewShellAndRenderer() + + cltest.AssertCount(t, db, "evm.key_states", 1) // The initial funding key + keys, err := app.KeyStore.Eth().GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + + id := big.NewInt(0) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.CreateETHKey, set, "") + + require.NoError(t, set.Set("evm-chain-id", testutils.FixtureChainID.String())) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, set.Parse([]string{"-evm-chain-id", id.String()})) + assert.NoError(t, client.CreateETHKey(c)) + + cltest.AssertCount(t, db, "evm.key_states", 2) + keys, err = app.KeyStore.Eth().GetAll() + require.NoError(t, err) + require.Equal(t, 2, len(keys)) +} + +func TestShell_DeleteETHKey(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withKey(), + ) + ethKeyStore := app.GetKeyStore().Eth() + client, _ := app.NewShellAndRenderer() + + // Create the key + key, err := ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + + // Delete the key + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteETHKey, set, "") + + require.NoError(t, set.Set("yes", "true")) + require.NoError(t, set.Parse([]string{key.Address.Hex()})) + + c := cli.NewContext(nil, set, nil) + err = client.DeleteETHKey(c) + require.NoError(t, err) + + _, err = ethKeyStore.Get(key.Address.Hex()) + assert.Error(t, err) +} + +func TestShell_ImportExportETHKey_NoChains(t *testing.T) { + t.Parallel() + + t.Cleanup(func() { deleteKeyExportFile(t) }) + + ethClient := newEthMock(t) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(commonassets.NewLinkFromJuels(42), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withMocks(ethClient), + ) + client, r := app.NewShellAndRenderer() + ethKeyStore := app.GetKeyStore().Eth() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", "internal/fixtures/apicredentials")) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + require.NoError(t, err) + + err = client.ListETHKeys(c) + require.NoError(t, err) + keys := *r.Renders[0].(*cmd.EthKeyPresenters) + require.Len(t, keys, 1) + address := keys[0].Address + + r.Renders = nil + + // Export the key + testdir := filepath.Join(os.TempDir(), t.Name()) + err = os.MkdirAll(testdir, 0700|os.ModeDir) + require.NoError(t, err) + defer os.RemoveAll(testdir) + keyfilepath := filepath.Join(testdir, "key") + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ExportETHKey, set, "") + + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyfilepath)) + require.NoError(t, set.Parse([]string{address})) + + c = cli.NewContext(nil, set, nil) + err = client.ExportETHKey(c) + require.NoError(t, err) + + // Delete the key + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteETHKey, set, "") + + require.NoError(t, set.Set("yes", "true")) + require.NoError(t, set.Parse([]string{address})) + + c = cli.NewContext(nil, set, nil) + err = client.DeleteETHKey(c) + require.NoError(t, err) + _, err = ethKeyStore.Get(address) + require.Error(t, err) + + cltest.AssertCount(t, app.GetSqlxDB(), "evm.key_states", 0) + + // Import the key + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ImportETHKey, set, "") + + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Parse([]string{keyfilepath})) + + c = cli.NewContext(nil, set, nil) + err = client.ImportETHKey(c) + require.NoError(t, err) + + r.Renders = nil + + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ListETHKeys, set, "") + c = cli.NewContext(nil, set, nil) + err = client.ListETHKeys(c) + require.NoError(t, err) + require.Len(t, *r.Renders[0].(*cmd.EthKeyPresenters), 1) + _, err = ethKeyStore.Get(address) + require.NoError(t, err) + + // Export test invalid id + keyName := keyNameForTest(t) + set = flag.NewFlagSet("test Eth export invalid id", 0) + flagSetApplyFromAction(client.ExportETHKey, set, "") + + require.NoError(t, set.Parse([]string{"999"})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/apicredentials")) + require.NoError(t, set.Set("output", "keyName")) + + c = cli.NewContext(nil, set, nil) + err = client.ExportETHKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) +} +func TestShell_ImportExportETHKey_WithChains(t *testing.T) { + t.Parallel() + + t.Cleanup(func() { deleteKeyExportFile(t) }) + + ethClient := newEthMock(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }, + withMocks(ethClient), + ) + client, r := app.NewShellAndRenderer() + ethKeyStore := app.GetKeyStore().Eth() + + ethClient.On("Dial", mock.Anything).Maybe() + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(commonassets.NewLinkFromJuels(42), nil) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", "internal/fixtures/apicredentials")) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + require.NoError(t, err) + + err = client.ListETHKeys(c) + require.NoError(t, err) + keys := *r.Renders[0].(*cmd.EthKeyPresenters) + require.Len(t, keys, 1) + address := keys[0].Address + + r.Renders = nil + + // Export the key + testdir := filepath.Join(os.TempDir(), t.Name()) + err = os.MkdirAll(testdir, 0700|os.ModeDir) + require.NoError(t, err) + defer os.RemoveAll(testdir) + keyfilepath := filepath.Join(testdir, "key") + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ExportETHKey, set, "") + + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyfilepath)) + require.NoError(t, set.Parse([]string{address})) + + c = cli.NewContext(nil, set, nil) + err = client.ExportETHKey(c) + require.NoError(t, err) + + // Delete the key + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteETHKey, set, "") + + require.NoError(t, set.Set("yes", "true")) + require.NoError(t, set.Parse([]string{address})) + + c = cli.NewContext(nil, set, nil) + err = client.DeleteETHKey(c) + require.NoError(t, err) + _, err = ethKeyStore.Get(address) + require.Error(t, err) + + // Import the key + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ImportETHKey, set, "") + + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Parse([]string{keyfilepath})) + + c = cli.NewContext(nil, set, nil) + err = client.ImportETHKey(c) + require.NoError(t, err) + + r.Renders = nil + + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.ListETHKeys, set, "") + c = cli.NewContext(nil, set, nil) + err = client.ListETHKeys(c) + require.NoError(t, err) + require.Len(t, *r.Renders[0].(*cmd.EthKeyPresenters), 1) + _, err = ethKeyStore.Get(address) + require.NoError(t, err) + + // Export test invalid id + keyName := keyNameForTest(t) + set = flag.NewFlagSet("test Eth export invalid id", 0) + flagSetApplyFromAction(client.ExportETHKey, set, "") + + require.NoError(t, set.Parse([]string{"999"})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/apicredentials")) + require.NoError(t, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + err = client.ExportETHKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) +} diff --git a/core/cmd/evm_chains_commands.go b/core/cmd/evm_chains_commands.go new file mode 100644 index 00000000..90b30f44 --- /dev/null +++ b/core/cmd/evm_chains_commands.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// EVMChainPresenter implements TableRenderer for an EVMChainResource. +type EVMChainPresenter struct { + presenters.EVMChainResource +} + +// ToRow presents the EVMChainResource as a slice of strings. +func (p *EVMChainPresenter) ToRow() []string { + return []string{p.GetID(), strconv.FormatBool(p.Enabled), p.Config} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p EVMChainPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +// EVMChainPresenters implements TableRenderer for a slice of EVMChainPresenters. +type EVMChainPresenters []EVMChainPresenter + +// RenderTable implements TableRenderer +func (ps EVMChainPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +func EVMChainClient(s *Shell) ChainClient { + return newChainClient[EVMChainPresenters](s, "evm") +} diff --git a/core/cmd/evm_chains_commands_test.go b/core/cmd/evm_chains_commands_test.go new file mode 100644 index 00000000..1c0e3f17 --- /dev/null +++ b/core/cmd/evm_chains_commands_test.go @@ -0,0 +1,38 @@ +package cmd_test + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + client2 "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func newRandChainID() *big.Big { + return big.New(testutils.NewRandomEVMChainID()) +} + +func TestShell_IndexEVMChains(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.EVMChainClient(client).IndexChains(cltest.EmptyCLIContext())) + chains := *r.Renders[0].(*cmd.EVMChainPresenters) + require.Len(t, chains, 1) + c := chains[0] + assert.Equal(t, strconv.Itoa(client2.NullClientChainID), c.ID) + assertTableRenders(t, r) +} diff --git a/core/cmd/evm_node_commands.go b/core/cmd/evm_node_commands.go new file mode 100644 index 00000000..7cdf3c2b --- /dev/null +++ b/core/cmd/evm_node_commands.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// EVMNodePresenter implements TableRenderer for an EVMNodeResource. +type EVMNodePresenter struct { + presenters.EVMNodeResource +} + +// ToRow presents the EVMNodeResource as a slice of strings. +func (p *EVMNodePresenter) ToRow() []string { + return []string{p.Name, p.ChainID, p.State, p.Config} +} + +// RenderTable implements TableRenderer +func (p EVMNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +// EVMNodePresenters implements TableRenderer for a slice of EVMNodePresenter. +type EVMNodePresenters []EVMNodePresenter + +// RenderTable implements TableRenderer +func (ps EVMNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +func NewEVMNodeClient(s *Shell) NodeClient { + return newNodeClient[EVMNodePresenters](s, "evm") +} diff --git a/core/cmd/evm_node_commands_test.go b/core/cmd/evm_node_commands_test.go new file mode 100644 index 00000000..5fcf1fd6 --- /dev/null +++ b/core/cmd/evm_node_commands_test.go @@ -0,0 +1,94 @@ +package cmd_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func assertTableRenders(t *testing.T, r *cltest.RendererMock) { + // Should be no error rendering any of the responses as tables + b := bytes.NewBuffer([]byte{}) + tb := cmd.RendererTable{b} + for _, rn := range r.Renders { + require.NoError(t, tb.Render(rn)) + } +} + +func TestShell_IndexEVMNodes(t *testing.T) { + t.Parallel() + + chainID := newRandChainID() + node1 := evmcfg.Node{ + Name: ptr("Test node 1"), + WSURL: commonconfig.MustParseURL("ws://localhost:8546"), + HTTPURL: commonconfig.MustParseURL("http://localhost:8546"), + SendOnly: ptr(false), + Order: ptr(int32(15)), + } + node2 := evmcfg.Node{ + Name: ptr("Test node 2"), + WSURL: commonconfig.MustParseURL("ws://localhost:8547"), + HTTPURL: commonconfig.MustParseURL("http://localhost:8547"), + SendOnly: ptr(false), + Order: ptr(int32(36)), + } + chain := evmcfg.EVMConfig{ + ChainID: chainID, + Chain: evmcfg.Defaults(chainID), + Nodes: evmcfg.EVMNodes{&node1, &node2}, + } + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM = evmcfg.EVMConfigs{&chain} + }) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.NewEVMNodeClient(client).IndexNodes(cltest.EmptyCLIContext())) + require.NotEmpty(t, r.Renders) + nodes := *r.Renders[0].(*cmd.EVMNodePresenters) + require.Len(t, nodes, 2) + n1 := nodes[0] + n2 := nodes[1] + assert.Equal(t, chainID.String(), n1.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(chainID.String(), *node1.Name), n1.ID) + assert.Equal(t, *node1.Name, n1.Name) + wantConfig, err := toml.Marshal(node1) + require.NoError(t, err) + assert.Equal(t, string(wantConfig), n1.Config) + assert.Equal(t, chainID.String(), n2.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(chainID.String(), *node2.Name), n2.ID) + assert.Equal(t, *node2.Name, n2.Name) + wantConfig2, err := toml.Marshal(node2) + require.NoError(t, err) + assert.Equal(t, string(wantConfig2), n2.Config) + assertTableRenders(t, r) + + //Render table and check the fields order + b := new(bytes.Buffer) + rt := cmd.RendererTable{b} + require.NoError(t, nodes.RenderTable(rt)) + renderLines := strings.Split(b.String(), "\n") + assert.Equal(t, 23, len(renderLines)) + assert.Contains(t, renderLines[2], "Name") + assert.Contains(t, renderLines[2], n1.Name) + assert.Contains(t, renderLines[3], "Chain ID") + assert.Contains(t, renderLines[3], n1.ChainID) + assert.Contains(t, renderLines[4], "State") + assert.Contains(t, renderLines[4], n1.State) + assert.Contains(t, renderLines[12], "Name") + assert.Contains(t, renderLines[12], n2.Name) + assert.Contains(t, renderLines[13], "Chain ID") + assert.Contains(t, renderLines[13], n2.ChainID) + assert.Contains(t, renderLines[14], "State") + assert.Contains(t, renderLines[14], n2.State) +} diff --git a/core/cmd/evm_transaction_commands.go b/core/cmd/evm_transaction_commands.go new file mode 100644 index 00000000..2fe9be82 --- /dev/null +++ b/core/cmd/evm_transaction_commands.go @@ -0,0 +1,213 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/big" + + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initEVMTxSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "evm", + Usage: "Commands for handling EVM transactions", + Subcommands: []cli.Command{ + { + Name: "create", + Usage: "Send ETH (or wei) from node ETH account to destination .", + Action: s.SendEther, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Usage: "allows to send a higher amount than the account's balance", + }, + cli.BoolFlag{ + Name: "eth", + Usage: "allows to send ETH amounts (Default behavior)", + }, + cli.BoolFlag{ + Name: "wei", + Usage: "allows to send WEI amounts", + }, + cli.Int64Flag{ + Name: "id", + Usage: "chain ID", + }, + }, + }, + { + Name: "list", + Usage: "List the Ethereum Transactions in descending order", + Action: s.IndexTransactions, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "page", + Usage: "page of results to display", + }, + }, + }, + { + Name: "show", + Usage: "get information on a specific Ethereum Transaction", + Action: s.ShowTransaction, + }, + }, + } +} + +type EthTxPresenter struct { + JAID + presenters.EthTxResource +} + +// RenderTable implements TableRenderer +func (p *EthTxPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"From", "Nonce", "To", "State"}) + table.Append([]string{ + p.From.Hex(), + p.Nonce, + p.To.Hex(), + fmt.Sprint(p.State), + }) + + render(fmt.Sprintf("Ethereum Transaction %v", p.Hash.Hex()), table) + return nil +} + +type EthTxPresenters []EthTxPresenter + +// RenderTable implements TableRenderer +func (ps EthTxPresenters) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Hash", "Nonce", "From", "GasPrice", "SentAt", "State"}) + for _, p := range ps { + table.Append([]string{ + p.Hash.Hex(), + p.Nonce, + p.From.Hex(), + p.GasPrice, + p.SentAt, + fmt.Sprint(p.State), + }) + } + + render("Ethereum Transactions", table) + return nil +} + +// IndexTransactions returns the list of transactions in descending order, +// taking an optional page parameter +func (s *Shell) IndexTransactions(c *cli.Context) error { + return s.getPage("/v2/transactions/evm", c.Int("page"), &EthTxPresenters{}) +} + +// ShowTransaction returns the info for the given transaction hash +func (s *Shell) ShowTransaction(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the hash of the transaction")) + } + hash := c.Args().First() + resp, err := s.HTTP.Get(s.ctx(), "/v2/transactions/evm/"+hash) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = s.renderAPIResponse(resp, &EthTxPresenter{}) + return err +} + +// SendEther transfers ETH from the node's account to a specified address. +func (s *Shell) SendEther(c *cli.Context) (err error) { + if c.NArg() < 3 { + return s.errorOut(errors.New("the following arguments expected: (chain) id (in multi-chain setup), amount, fromAddress and toAddress")) + } + + var amount assets.Eth + + if c.IsSet("wei") { + var value int64 + + value, err = stringutils.ToInt64(c.Args().Get(0)) + if err != nil { + return s.errorOut(multierr.Combine( + errors.New("while parsing WEI transfer amount"), err)) + } + + amount = assets.NewEthValue(value) + } else { + amount, err = assets.NewEthValueS(c.Args().Get(0)) + if err != nil { + return s.errorOut(multierr.Combine( + errors.New("while parsing ETH transfer amount"), err)) + } + } + + unparsedFromAddress := c.Args().Get(1) + fromAddress, err := utils.ParseEthereumAddress(unparsedFromAddress) + if err != nil { + return s.errorOut(multierr.Combine( + fmt.Errorf("while parsing withdrawal source address %v", + unparsedFromAddress), err)) + } + + unparsedDestinationAddress := c.Args().Get(2) + destinationAddress, err := utils.ParseEthereumAddress(unparsedDestinationAddress) + if err != nil { + return s.errorOut(multierr.Combine( + fmt.Errorf("while parsing withdrawal destination address %v", + unparsedDestinationAddress), err)) + } + + var evmChainID *big.Int + if c.IsSet("id") { + str := c.String("id") + var ok bool + evmChainID, ok = new(big.Int).SetString(str, 10) + if !ok { + return s.errorOut(errors.New("")) + } + } + + request := models.SendEtherRequest{ + DestinationAddress: destinationAddress, + FromAddress: fromAddress, + Amount: amount, + EVMChainID: (*ubig.Big)(evmChainID), + AllowHigherAmounts: c.IsSet("force"), + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + + resp, err := s.HTTP.Post(s.ctx(), "/v2/transfers/evm", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = s.renderAPIResponse(resp, &EthTxPresenter{}) + return err +} diff --git a/core/cmd/evm_transaction_commands_test.go b/core/cmd/evm_transaction_commands_test.go new file mode 100644 index 00000000..5d775087 --- /dev/null +++ b/core/cmd/evm_transaction_commands_test.go @@ -0,0 +1,268 @@ +package cmd_test + +import ( + "flag" + "fmt" + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func TestShell_IndexTransactions(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + tx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) + attempt := tx.TxAttempts[0] + + // page 1 + set := flag.NewFlagSet("test transactions", 0) + flagSetApplyFromAction(client.IndexTransactions, set, "") + + require.NoError(t, set.Set("page", "1")) + + c := cli.NewContext(nil, set, nil) + require.Equal(t, 1, c.Int("page")) + assert.NoError(t, client.IndexTransactions(c)) + + renderedTxs := *r.Renders[0].(*cmd.EthTxPresenters) + assert.Equal(t, 1, len(renderedTxs)) + assert.Equal(t, attempt.Hash.String(), renderedTxs[0].Hash.Hex()) + + // page 2 which doesn't exist + set = flag.NewFlagSet("test txattempts", 0) + flagSetApplyFromAction(client.IndexTransactions, set, "") + + require.NoError(t, set.Set("page", "2")) + + c = cli.NewContext(nil, set, nil) + require.Equal(t, 2, c.Int("page")) + assert.NoError(t, client.IndexTransactions(c)) + + renderedTxs = *r.Renders[1].(*cmd.EthTxPresenters) + assert.Equal(t, 0, len(renderedTxs)) +} + +func TestShell_ShowTransaction(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + db := app.GetSqlxDB() + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + txStore := cltest.NewTestTxStore(t, db, app.GetConfig().Database()) + tx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) + attempt := tx.TxAttempts[0] + + set := flag.NewFlagSet("test get tx", 0) + flagSetApplyFromAction(client.ShowTransaction, set, "") + + require.NoError(t, set.Parse([]string{attempt.Hash.String()})) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.ShowTransaction(c)) + + renderedTx := *r.Renders[0].(*cmd.EthTxPresenter) + assert.Equal(t, &tx.FromAddress, renderedTx.From) +} + +func TestShell_IndexTxAttempts(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + tx := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) + + // page 1 + set := flag.NewFlagSet("test txattempts", 0) + flagSetApplyFromAction(client.IndexTxAttempts, set, "") + + require.NoError(t, set.Set("page", "1")) + + c := cli.NewContext(nil, set, nil) + require.Equal(t, 1, c.Int("page")) + require.NoError(t, client.IndexTxAttempts(c)) + + renderedAttempts := *r.Renders[0].(*cmd.EthTxPresenters) + require.Len(t, tx.TxAttempts, 1) + assert.Equal(t, tx.TxAttempts[0].Hash.String(), renderedAttempts[0].Hash.Hex()) + + // page 2 which doesn't exist + set = flag.NewFlagSet("test transactions", 0) + flagSetApplyFromAction(client.IndexTxAttempts, set, "") + + require.NoError(t, set.Set("page", "2")) + + c = cli.NewContext(nil, set, nil) + require.Equal(t, 2, c.Int("page")) + assert.NoError(t, client.IndexTxAttempts(c)) + + renderedAttempts = *r.Renders[1].(*cmd.EthTxPresenters) + assert.Equal(t, 0, len(renderedAttempts)) +} + +func TestShell_SendEther_From_Txm(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + fromAddress := key.Address + + balance, err := assets.NewEthValueS("200") + require.NoError(t, err) + + ethMock := newEthMockWithTransactionsOnBlocksAssertions(t) + + ethMock.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + ethMock.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(0), nil).Maybe() + ethMock.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + + // NOTE: FallbackPollInterval is used in this test to quickly create TxAttempts + // Testing triggers requires committing transactions and does not work with transactional tests + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(time.Second) + }, + withKey(), + withMocks(ethMock, key), + ) + client, r := app.NewShellAndRenderer() + db := app.GetSqlxDB() + cfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + set := flag.NewFlagSet("sendether", 0) + flagSetApplyFromAction(client.SendEther, set, "") + + amount := "100.5" + to := "0x342156c8d3bA54Abc67920d35ba1d1e67201aC9C" + require.NoError(t, set.Parse([]string{amount, fromAddress.Hex(), to})) + require.NoError(t, set.Set("id", evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs()).String())) + + cliapp := cli.NewApp() + c := cli.NewContext(cliapp, set, nil) + + assert.NoError(t, client.SendEther(c)) + + evmTxes, err := txStore.GetAllTxes(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, evmTxes, 1) + evmTx := evmTxes[0] + value := assets.Eth(evmTx.Value) + require.Equal(t, "100.500000000000000000", value.String()) + require.Equal(t, fromAddress, evmTx.FromAddress) + require.Equal(t, to, evmTx.ToAddress.String()) + + output := *r.Renders[0].(*cmd.EthTxPresenter) + assert.Equal(t, &evmTx.FromAddress, output.From) + assert.Equal(t, &evmTx.ToAddress, output.To) + assert.Equal(t, value.String(), output.Value) + assert.Equal(t, fmt.Sprintf("%d", *evmTx.Sequence), output.Nonce) + + attempts, err := txStore.GetAllTxAttempts(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, attempts, 1) + assert.Equal(t, attempts[0].Hash, output.Hash) + +} + +func TestShell_SendEther_From_Txm_WEI(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + fromAddress := key.Address + + balance, err := assets.NewEthValueS("200") + require.NoError(t, err) + + ethMock := newEthMockWithTransactionsOnBlocksAssertions(t) + + ethMock.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + ethMock.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(0), nil).Maybe() + ethMock.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + + // NOTE: FallbackPollInterval is used in this test to quickly create TxAttempts + // Testing triggers requires committing transactions and does not work with transactional tests + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(time.Second) + }, + withKey(), + withMocks(ethMock, key), + ) + client, r := app.NewShellAndRenderer() + db := app.GetSqlxDB() + cfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + + set := flag.NewFlagSet("sendether", 0) + flagSetApplyFromAction(client.SendEther, set, "") + + require.NoError(t, set.Set("id", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("wei", "false")) + + amount := "1000000000000000000" + to := "0x342156c8d3bA54Abc67920d35ba1d1e67201aC9C" + err = set.Parse([]string{amount, fromAddress.Hex(), to}) + require.NoError(t, err) + + err = set.Set("wei", "true") + require.NoError(t, err) + + cliapp := cli.NewApp() + c := cli.NewContext(cliapp, set, nil) + + assert.NoError(t, client.SendEther(c)) + + evmTxes, err := txStore.GetAllTxes(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, evmTxes, 1) + evmTx := evmTxes[0] + value := assets.Eth(evmTx.Value) + require.Equal(t, "1.000000000000000000", value.String()) + require.Equal(t, fromAddress, evmTx.FromAddress) + require.Equal(t, to, evmTx.ToAddress.String()) + + output := *r.Renders[0].(*cmd.EthTxPresenter) + assert.Equal(t, &evmTx.FromAddress, output.From) + assert.Equal(t, &evmTx.ToAddress, output.To) + assert.Equal(t, value.String(), output.Value) + assert.Equal(t, fmt.Sprintf("%d", *evmTx.Sequence), output.Nonce) + + attempts, err := txStore.GetAllTxAttempts(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, attempts, 1) + assert.Equal(t, attempts[0].Hash, output.Hash) +} diff --git a/core/cmd/external_initiator_commands.go b/core/cmd/external_initiator_commands.go new file mode 100644 index 00000000..f09cbee2 --- /dev/null +++ b/core/cmd/external_initiator_commands.go @@ -0,0 +1,71 @@ +package cmd + +import ( + "github.com/urfave/cli" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initInitiatorsSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "create", + Usage: "Create an authentication key for a user of External Initiators", + Action: s.CreateExternalInitiator, + }, + { + Name: "destroy", + Usage: "Remove an external initiator by name", + Action: s.DeleteExternalInitiator, + }, + { + Name: "list", + Usage: "List all external initiators", + Action: s.IndexExternalInitiators, + }, + } +} + +type ExternalInitiatorPresenter struct { + JAID + presenters.ExternalInitiatorResource +} + +func (eip *ExternalInitiatorPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"ID", "Name", "URL", "AccessKey", "OutgoingToken", "CreatedAt", "UpdatedAt"}) + table.Append(eip.ToRow()) + render("External Initiator:", table) + return nil +} + +func (eip *ExternalInitiatorPresenter) ToRow() []string { + var urlS string + if eip.URL != nil { + urlS = eip.URL.String() + } + return []string{ + eip.ID, + eip.Name, + urlS, + eip.AccessKey, + eip.OutgoingToken, + eip.CreatedAt.String(), + eip.UpdatedAt.String(), + } +} + +type ExternalInitiatorPresenters []ExternalInitiatorPresenter + +func (eips *ExternalInitiatorPresenters) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"ID", "Name", "URL", "AccessKey", "OutgoingToken", "CreatedAt", "UpdatedAt"}) + for _, eip := range *eips { + table.Append(eip.ToRow()) + } + render("External Initiators:", table) + return nil +} + +// IndexExternalInitiators lists external initiators +func (s *Shell) IndexExternalInitiators(c *cli.Context) (err error) { + return s.getPage("/v2/external_initiators", c.Int("page"), &ExternalInitiatorPresenters{}) +} diff --git a/core/cmd/external_initiator_commands_test.go b/core/cmd/external_initiator_commands_test.go new file mode 100644 index 00000000..fc75e424 --- /dev/null +++ b/core/cmd/external_initiator_commands_test.go @@ -0,0 +1,61 @@ +package cmd_test + +import ( + "bytes" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestExternalInitiatorPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + name = "ExternalInitiator 1" + url = cltest.MustWebURL(t, "http://example.com") + createdAt = time.Now() + updatedAt = time.Now() + outgoingToken = "anoutgoingtoken" + accessKey = "anaccesskey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.ExternalInitiatorPresenter{ + ExternalInitiatorResource: presenters.ExternalInitiatorResource{ + JAID: presenters.NewJAID(name), + Name: name, + URL: url, + AccessKey: accessKey, + OutgoingToken: outgoingToken, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, name) + assert.Contains(t, output, url.String()) + assert.Contains(t, output, accessKey) + assert.Contains(t, output, outgoingToken) + + // Render many resources + buffer.Reset() + ps := cmd.ExternalInitiatorPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, name) + assert.Contains(t, output, url.String()) + assert.Contains(t, output, accessKey) + assert.Contains(t, output, outgoingToken) +} diff --git a/core/cmd/forwarders_commands.go b/core/cmd/forwarders_commands.go new file mode 100644 index 00000000..814caea9 --- /dev/null +++ b/core/cmd/forwarders_commands.go @@ -0,0 +1,168 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math/big" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initFowardersSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "list", + Usage: "List all stored forwarders addresses", + Action: s.ListForwarders, + }, + { + Name: "track", + Usage: "Track a new forwarder", + Action: s.TrackForwarder, + Flags: []cli.Flag{ + cli.Int64Flag{ + Name: "evm-chain-id, evmChainID, c", + Usage: "chain ID, if left empty, EVM.ChainID will be used", + }, + cli.StringFlag{ + Name: "address, a", + Usage: "The forwarding address (in hex format)", + }, + }, + }, + { + Name: "delete", + Usage: "Delete a forwarder address", + Action: s.DeleteForwarder, + }, + } +} + +type EVMForwarderPresenter struct { + JAID // This is needed to render the id for a JSONAPI Resource as normal JSON + presenters.EVMForwarderResource +} + +var evmFwdsHeaders = []string{"ID", "Address", "Chain ID", "Created At"} + +// ToRow presents the EVMForwarderResource as a slice of strings. +func (p *EVMForwarderPresenter) ToRow() []string { + row := []string{ + p.GetID(), + p.Address.String(), + p.EVMChainID.ToInt().String(), + p.CreatedAt.Format(time.RFC3339), + } + return row +} + +// RenderTable implements TableRenderer +func (p *EVMForwarderPresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(evmFwdsHeaders, rows, rt.Writer) + + return nil +} + +// EVMForwarderPresenters implements TableRenderer for a slice of EVMForwarderPresenter. +type EVMForwarderPresenters []EVMForwarderPresenter + +// RenderTable implements TableRenderer +func (ps EVMForwarderPresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(evmFwdsHeaders, rows, rt.Writer) + + return nil +} + +// ListForwarders list all forwarder addresses tracked by node +func (s *Shell) ListForwarders(c *cli.Context) (err error) { + return s.getPage("/v2/nodes/evm/forwarders", c.Int("page"), &EVMForwarderPresenters{}) +} + +// DeleteForwarder deletes forwarder address from node db by id. +func (s *Shell) DeleteForwarder(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the forwarder id to be archived")) + } + resp, err := s.HTTP.Delete(s.ctx(), "/v2/nodes/evm/forwarders/"+c.Args().First()) + if err != nil { + return s.errorOut(err) + } + _, err = s.parseResponse(resp) + if err != nil { + return s.errorOut(err) + } + + fmt.Printf("Forwarder %v Deleted\n", c.Args().First()) + return nil +} + +// TrackForwarder tracks forwarder address in db. +func (s *Shell) TrackForwarder(c *cli.Context) (err error) { + addressHex := c.String("address") + chainIDStr := c.String("evm-chain-id") + + addressBytes, err := hexutil.Decode(addressHex) + if err != nil { + return s.errorOut(errors.Wrap(err, "could not decode address")) + } + address := gethCommon.BytesToAddress(addressBytes) + + var chainID *big.Int + if chainIDStr != "" { + var ok bool + chainID, ok = big.NewInt(0).SetString(chainIDStr, 10) + if !ok { + return s.errorOut(errors.Wrap(err, "invalid evm-chain-id")) + } + } + + request, err := json.Marshal(web.TrackEVMForwarderRequest{ + EVMChainID: (*ubig.Big)(chainID), + Address: address, + }) + if err != nil { + return s.errorOut(err) + } + + resp, err := s.HTTP.Post(s.ctx(), "/v2/nodes/evm/forwarders/track", bytes.NewReader(request)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode >= 400 { + body, rerr := io.ReadAll(resp.Body) + if err != nil { + err = multierr.Append(err, rerr) + return s.errorOut(err) + } + fmt.Printf("Response: '%v', Status: %d\n", string(body), resp.StatusCode) + return s.errorOut(err) + } + + err = s.renderAPIResponse(resp, &EVMForwarderPresenter{}, "Forwarder created") + return err +} diff --git a/core/cmd/forwarders_commands_test.go b/core/cmd/forwarders_commands_test.go new file mode 100644 index 00000000..32fa8715 --- /dev/null +++ b/core/cmd/forwarders_commands_test.go @@ -0,0 +1,145 @@ +package cmd_test + +import ( + "bytes" + "flag" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestEVMForwarderPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + address = utils.RandomAddress() + evmChainID = big.NewI(4) + createdAt = time.Now() + updatedAt = time.Now().Add(time.Second) + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.EVMForwarderPresenter{ + EVMForwarderResource: presenters.EVMForwarderResource{ + JAID: presenters.NewJAID(id), + Address: address, + EVMChainID: *evmChainID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, address.String()) + assert.Contains(t, output, evmChainID.ToInt().String()) + assert.Contains(t, output, createdAt.Format(time.RFC3339)) + + // Render many resources + buffer.Reset() + ps := cmd.EVMForwarderPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, address.String()) + assert.Contains(t, output, evmChainID.ToInt().String()) + assert.Contains(t, output, createdAt.Format(time.RFC3339)) +} + +func TestShell_TrackEVMForwarder(t *testing.T) { + t.Parallel() + + id := newRandChainID() + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = id + c.EVM[0].Enabled = ptr(true) + }) + client, r := app.NewShellAndRenderer() + + // Create the fwdr + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.TrackForwarder, set, "") + + require.NoError(t, set.Set("address", utils.RandomAddress().Hex())) + require.NoError(t, set.Set("evm-chain-id", id.String())) + + err := client.TrackForwarder(cli.NewContext(nil, set, nil)) + require.NoError(t, err) + require.Len(t, r.Renders, 1) + createOutput, ok := r.Renders[0].(*cmd.EVMForwarderPresenter) + require.True(t, ok, "Expected Renders[0] to be *cmd.EVMForwarderPresenter, got %T", r.Renders[0]) + + // Assert fwdr is listed + require.Nil(t, client.ListForwarders(cltest.EmptyCLIContext())) + fwds := *r.Renders[1].(*cmd.EVMForwarderPresenters) + require.Equal(t, 1, len(fwds)) + assert.Equal(t, createOutput.ID, fwds[0].ID) + + // Delete fwdr + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteForwarder, set, "") + + require.NoError(t, set.Parse([]string{createOutput.ID})) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.DeleteForwarder(c)) + + // Assert fwdr is not listed + require.Nil(t, client.ListForwarders(cltest.EmptyCLIContext())) + require.Len(t, r.Renders, 3) + fwds = *r.Renders[2].(*cmd.EVMForwarderPresenters) + require.Equal(t, 0, len(fwds)) +} + +func TestShell_TrackEVMForwarder_BadAddress(t *testing.T) { + t.Parallel() + + id := newRandChainID() + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = id + c.EVM[0].Enabled = ptr(true) + }) + client, _ := app.NewShellAndRenderer() + + // Create the fwdr + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.TrackForwarder, set, "") + + require.NoError(t, set.Set("address", "0xWrongFormatAddress")) + require.NoError(t, set.Set("evm-chain-id", id.String())) + + err := client.TrackForwarder(cli.NewContext(nil, set, nil)) + require.Contains(t, err.Error(), "could not decode address: invalid hex string") +} + +func TestShell_DeleteEVMForwarders_MissingFwdId(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + }) + client, _ := app.NewShellAndRenderer() + + // Delete fwdr without id + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteForwarder, set, "") + + c := cli.NewContext(nil, set, nil) + require.Equal(t, "must pass the forwarder id to be archived", client.DeleteForwarder(c).Error()) +} diff --git a/core/cmd/helpers_test.go b/core/cmd/helpers_test.go new file mode 100644 index 00000000..fc129523 --- /dev/null +++ b/core/cmd/helpers_test.go @@ -0,0 +1,13 @@ +package cmd + +import "github.com/goplugin/pluginv3.0/v2/core/logger" + +// CheckRemoteBuildCompatibility exposes checkRemoteBuildCompatibility for testing. +func (s *Shell) CheckRemoteBuildCompatibility(lggr logger.Logger, onlyWarn bool, cliVersion, cliSha string) error { + return s.checkRemoteBuildCompatibility(lggr, onlyWarn, cliVersion, cliSha) +} + +// ConfigV2Str exposes configV2Str for testing. +func (s *Shell) ConfigV2Str(userOnly bool) (string, error) { + return s.configV2Str(userOnly) +} diff --git a/core/cmd/jobs_commands.go b/core/cmd/jobs_commands.go new file mode 100644 index 00000000..2bd15607 --- /dev/null +++ b/core/cmd/jobs_commands.go @@ -0,0 +1,307 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initJobsSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "list", + Usage: "List all jobs", + Action: s.ListJobs, + Flags: []cli.Flag{ + cli.IntFlag{ + Name: "page", + Usage: "page of results to display", + }, + }, + }, + { + Name: "show", + Usage: "Show a job", + Action: s.ShowJob, + }, + { + Name: "create", + Usage: "Create a job", + Action: s.CreateJob, + }, + { + Name: "delete", + Usage: "Delete a job", + Action: s.DeleteJob, + }, + { + Name: "run", + Usage: "Trigger a job run", + Action: s.TriggerPipelineRun, + }, + } +} + +// JobPresenter wraps the JSONAPI Job Resource and adds rendering functionality +type JobPresenter struct { + JAID // This is needed to render the id for a JSONAPI Resource as normal JSON + presenters.JobResource +} + +// ToRows returns the job as a multiple rows per task +func (p JobPresenter) ToRows() [][]string { + row := [][]string{} + + // Produce a row when there are no tasks + if len(p.FriendlyTasks()) == 0 { + row = append(row, p.toRow("")) + + return row + } + + for _, t := range p.FriendlyTasks() { + row = append(row, p.toRow(t)) + } + + return row +} + +// ToRow generates a row for a task +func (p JobPresenter) toRow(task string) []string { + return []string{ + p.GetID(), + p.Name, + p.Type.String(), + task, + p.FriendlyCreatedAt(), + } +} + +// GetTasks extracts the tasks from the dependency graph +func (p JobPresenter) GetTasks() ([]string, error) { + if strings.TrimSpace(p.PipelineSpec.DotDAGSource) == "" { + return nil, nil + } + var types []string + pipeline, err := pipeline.Parse(p.PipelineSpec.DotDAGSource) + if err != nil { + return nil, err + } + + for _, t := range pipeline.Tasks { + types = append(types, fmt.Sprintf("%s %s", t.DotID(), t.Type())) + } + + return types, nil +} + +// FriendlyTasks returns the tasks +func (p JobPresenter) FriendlyTasks() []string { + taskTypes, err := p.GetTasks() + if err != nil { + return []string{"error parsing DAG"} + } + + return taskTypes +} + +// FriendlyCreatedAt returns the created at timestamp of the spec which matches the +// type in RFC3339 format. +func (p JobPresenter) FriendlyCreatedAt() string { + switch p.Type { + case presenters.DirectRequestJobSpec: + if p.DirectRequestSpec != nil { + return p.DirectRequestSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.FluxMonitorJobSpec: + if p.FluxMonitorSpec != nil { + return p.FluxMonitorSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.OffChainReportingJobSpec: + if p.OffChainReportingSpec != nil { + return p.OffChainReportingSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.KeeperJobSpec: + if p.KeeperSpec != nil { + return p.KeeperSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.CronJobSpec: + if p.CronSpec != nil { + return p.CronSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.VRFJobSpec: + if p.VRFSpec != nil { + return p.VRFSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.WebhookJobSpec: + if p.WebhookSpec != nil { + return p.WebhookSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.BlockhashStoreJobSpec: + if p.BlockhashStoreSpec != nil { + return p.BlockhashStoreSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.BlockHeaderFeederJobSpec: + if p.BlockHeaderFeederSpec != nil { + return p.BlockHeaderFeederSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.BootstrapJobSpec: + if p.BootstrapSpec != nil { + return p.BootstrapSpec.CreatedAt.Format(time.RFC3339) + } + case presenters.GatewayJobSpec: + if p.GatewaySpec != nil { + return p.GatewaySpec.CreatedAt.Format(time.RFC3339) + } + default: + return "unknown" + } + + // This should never occur since the job should always have a spec matching + // the type + return "N/A" +} + +// RenderTable implements TableRenderer +func (p *JobPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"ID", "Name", "Type", "Tasks", "Created At"}) + table.SetAutoMergeCells(true) + for _, r := range p.ToRows() { + table.Append(r) + } + + render("Jobs", table) + return nil +} + +type JobPresenters []JobPresenter + +// RenderTable implements TableRenderer +func (ps JobPresenters) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"ID", "Name", "Type", "Tasks", "Created At"}) + table.SetAutoMergeCells(true) + for _, p := range ps { + for _, r := range p.ToRows() { + table.Append(r) + } + } + + render("Jobs (V2)", table) + return nil +} + +// ListJobs lists all jobs +func (s *Shell) ListJobs(c *cli.Context) (err error) { + return s.getPage("/v2/jobs", c.Int("page"), &JobPresenters{}) +} + +// ShowJob displays the details of a job +func (s *Shell) ShowJob(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must provide the id of the job")) + } + id := c.Args().First() + resp, err := s.HTTP.Get(s.ctx(), "/v2/jobs/"+id) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &JobPresenter{}) +} + +// CreateJob creates a job +// Valid input is a TOML string or a path to TOML file +func (s *Shell) CreateJob(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass in TOML or filepath")) + } + + tomlString, err := getTOMLString(c.Args().First()) + if err != nil { + return s.errorOut(err) + } + + request, err := json.Marshal(web.CreateJobRequest{ + TOML: tomlString, + }) + if err != nil { + return s.errorOut(err) + } + + resp, err := s.HTTP.Post(s.ctx(), "/v2/jobs", bytes.NewReader(request)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode >= 400 { + body, rerr := io.ReadAll(resp.Body) + if err != nil { + err = multierr.Append(err, rerr) + return s.errorOut(err) + } + fmt.Printf("Response: '%v', Status: %d\n", string(body), resp.StatusCode) + return s.errorOut(err) + } + + err = s.renderAPIResponse(resp, &JobPresenter{}, "Job created") + return err +} + +// DeleteJob deletes a job +func (s *Shell) DeleteJob(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the job id to be archived")) + } + resp, err := s.HTTP.Delete(s.ctx(), "/v2/jobs/"+c.Args().First()) + if err != nil { + return s.errorOut(err) + } + _, err = s.parseResponse(resp) + if err != nil { + return s.errorOut(err) + } + + fmt.Printf("Job %v Deleted\n", c.Args().First()) + return nil +} + +// TriggerPipelineRun triggers a job run based on a job ID +func (s *Shell) TriggerPipelineRun(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the job id to trigger a run")) + } + resp, err := s.HTTP.Post(s.ctx(), "/v2/jobs/"+c.Args().First()+"/runs", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var run presenters.PipelineRunResource + err = s.renderAPIResponse(resp, &run, "Pipeline run successfully triggered") + return err +} diff --git a/core/cmd/jobs_commands_test.go b/core/cmd/jobs_commands_test.go new file mode 100644 index 00000000..9712afab --- /dev/null +++ b/core/cmd/jobs_commands_test.go @@ -0,0 +1,457 @@ +package cmd_test + +import ( + "bytes" + _ "embed" + "flag" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestJobPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + name = "Job 1" + jobSpecType = "fluxmonitor" + schemaVersion = uint32(1) + maxTaskDuration = models.Interval(1 * time.Second) + + createdAt = time.Now() + updatedAt = time.Now().Add(time.Second) + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.JobPresenter{ + JobResource: presenters.JobResource{ + JAID: presenters.NewJAID(id), + Name: name, + Type: presenters.JobSpecType(jobSpecType), + SchemaVersion: schemaVersion, + MaxTaskDuration: maxTaskDuration, + DirectRequestSpec: nil, + FluxMonitorSpec: &presenters.FluxMonitorSpec{ + CreatedAt: createdAt, + UpdatedAt: updatedAt, + }, + OffChainReportingSpec: nil, + KeeperSpec: nil, + PipelineSpec: presenters.PipelineSpec{ + ID: 1, + DotDAGSource: "ds1 [type=http method=GET url=\"example.com\" allowunrestrictednetworkaccess=\"true\"];\n ds1_parse [type=jsonparse path=\"USD\"];\n ds1_multiply [type=multiply times=100];\n ds1 -\u003e ds1_parse -\u003e ds1_multiply;\n", + }, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, name) + assert.Contains(t, output, jobSpecType) + assert.Contains(t, output, "ds1 http") + assert.Contains(t, output, "ds1_parse jsonparse") + assert.Contains(t, output, "ds1_multiply multiply") + assert.Contains(t, output, createdAt.Format(time.RFC3339)) + + // Render many resources + buffer.Reset() + ps := cmd.JobPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, name) + assert.Contains(t, output, jobSpecType) + assert.Contains(t, output, "ds1 http") + assert.Contains(t, output, "ds1_parse jsonparse") + assert.Contains(t, output, "ds1_multiply multiply") + assert.Contains(t, output, createdAt.Format(time.RFC3339)) +} + +func TestJobRenderer_GetTasks(t *testing.T) { + t.Parallel() + + r := &cmd.JobPresenter{} + + t.Run("gets the tasks from the DAG in reverse order", func(t *testing.T) { + r.PipelineSpec = presenters.PipelineSpec{ + DotDAGSource: "ds1 [type=http method=GET url=\"example.com\" allowunrestrictednetworkaccess=\"true\"];\n ds1_parse [type=jsonparse path=\"USD\"];\n ds1_multiply [type=multiply times=100];\n ds1 -\u003e ds1_parse -\u003e ds1_multiply;\n", + } + + tasks, err := r.GetTasks() + + assert.NoError(t, err) + assert.Equal(t, []string{ + "ds1 http", + "ds1_parse jsonparse", + "ds1_multiply multiply", + }, tasks) + }) + + t.Run("parse error", func(t *testing.T) { + r.PipelineSpec = presenters.PipelineSpec{ + DotDAGSource: "invalid dot", + } + + tasks, err := r.GetTasks() + + assert.Error(t, err) + assert.Nil(t, tasks) + }) +} + +func TestJob_FriendlyTasks(t *testing.T) { + t.Parallel() + + r := &cmd.JobPresenter{} + + t.Run("gets the tasks in a printable format", func(t *testing.T) { + r.PipelineSpec = presenters.PipelineSpec{ + DotDAGSource: " ds1 [type=http method=GET url=\"example.com\" allowunrestrictednetworkaccess=\"true\"];\n ds1_parse [type=jsonparse path=\"USD\"];\n ds1_multiply [type=multiply times=100];\n ds1 -\u003e ds1_parse -\u003e ds1_multiply;\n", + } + + assert.Equal(t, []string{ + "ds1 http", + "ds1_parse jsonparse", + "ds1_multiply multiply", + }, r.FriendlyTasks()) + }) + + t.Run("parse error", func(t *testing.T) { + r.PipelineSpec = presenters.PipelineSpec{ + DotDAGSource: "invalid dot", + } + + assert.Equal(t, []string{"error parsing DAG"}, r.FriendlyTasks()) + }) +} + +func TestJob_FriendlyCreatedAt(t *testing.T) { + t.Parallel() + + now := time.Now() + + testCases := []struct { + name string + job *cmd.JobPresenter + result string + }{ + { + "gets the direct request spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.DirectRequestJobSpec, + DirectRequestSpec: &presenters.DirectRequestSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the flux monitor spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.FluxMonitorJobSpec, + FluxMonitorSpec: &presenters.FluxMonitorSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the cron spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.CronJobSpec, + CronSpec: &presenters.CronSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the vrf spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.VRFJobSpec, + VRFSpec: &presenters.VRFSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the off chain reporting spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.OffChainReportingJobSpec, + OffChainReportingSpec: &presenters.OffChainReportingSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the blockhash store spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.BlockhashStoreJobSpec, + BlockhashStoreSpec: &presenters.BlockhashStoreSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "gets the blockheaderfeeder spec created at timestamp", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.BlockHeaderFeederJobSpec, + BlockHeaderFeederSpec: &presenters.BlockHeaderFeederSpec{ + CreatedAt: now, + }, + }, + }, + now.Format(time.RFC3339), + }, + { + "invalid type", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: "invalid type", + }, + }, + "unknown", + }, + { + "no spec exists", + &cmd.JobPresenter{ + JobResource: presenters.JobResource{ + Type: presenters.DirectRequestJobSpec, + }, + }, + "N/A", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.result, tc.job.FriendlyCreatedAt()) + }) + } +} + +func TestJob_ToRows(t *testing.T) { + t.Parallel() + + now := time.Now() + + job := &cmd.JobPresenter{ + JAID: cmd.NewJAID("1"), + JobResource: presenters.JobResource{ + Name: "Test Job", + Type: presenters.DirectRequestJobSpec, + DirectRequestSpec: &presenters.DirectRequestSpec{ + CreatedAt: now, + }, + PipelineSpec: presenters.PipelineSpec{ + DotDAGSource: " ds1 [type=http method=GET url=\"example.com\" allowunrestrictednetworkaccess=\"true\"];\n ds1_parse [type=jsonparse path=\"USD\"];\n ds1_multiply [type=multiply times=100];\n ds1 -\u003e ds1_parse -\u003e ds1_multiply;\n", + }, + }, + } + + assert.Equal(t, [][]string{ + {"1", "Test Job", "directrequest", "ds1 http", now.Format(time.RFC3339)}, + {"1", "Test Job", "directrequest", "ds1_parse jsonparse", now.Format(time.RFC3339)}, + {"1", "Test Job", "directrequest", "ds1_multiply multiply", now.Format(time.RFC3339)}, + }, job.ToRows()) + + // Produce a single row even if there is not DAG + job.PipelineSpec.DotDAGSource = "" + assert.Equal(t, [][]string{ + {"1", "Test Job", "directrequest", "", now.Format(time.RFC3339)}, + }, job.ToRows()) +} + +//go:embed direct-request-spec-template.yml +var directRequestSpecTemplate string + +func getDirectRequestSpec() string { + return fmt.Sprintf(directRequestSpecTemplate, uuid.New(), uuid.New()) +} + +func TestShell_ListFindJobs(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + }) + client, r := app.NewShellAndRenderer() + + // Create the job + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.CreateJob, fs, "") + + require.NoError(t, fs.Parse([]string{getDirectRequestSpec()})) + + err := client.CreateJob(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) + require.Len(t, r.Renders, 1) + createOutput, ok := r.Renders[0].(*cmd.JobPresenter) + require.True(t, ok, "Expected Renders[0] to be *cmd.JobPresenter, got %T", r.Renders[0]) + + require.Nil(t, client.ListJobs(cltest.EmptyCLIContext())) + jobs := *r.Renders[1].(*cmd.JobPresenters) + require.Equal(t, 1, len(jobs)) + assert.Equal(t, createOutput.ID, jobs[0].ID) +} + +func TestShell_ShowJob(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + }) + client, r := app.NewShellAndRenderer() + + // Create the job + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.CreateJob, fs, "") + + require.NoError(t, fs.Parse([]string{getDirectRequestSpec()})) + + err := client.CreateJob(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) + require.Len(t, r.Renders, 1) + createOutput, ok := r.Renders[0].(*cmd.JobPresenter) + require.True(t, ok, "Expected Renders[0] to be *cmd.JobPresenter, got %T", r.Renders[0]) + + set := flag.NewFlagSet("test", 0) + err = set.Parse([]string{createOutput.ID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.ShowJob(c)) + job := *r.Renders[0].(*cmd.JobPresenter) + assert.Equal(t, createOutput.ID, job.ID) +} + +//go:embed ocr-bootstrap-spec.yml +var ocrBootstrapSpec string + +func TestShell_CreateJobV2(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(100 * time.Millisecond) + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }, func(opts *startOptions) { + opts.FlagsAndDeps = append(opts.FlagsAndDeps, cltest.DefaultP2PKey) + }) + client, r := app.NewShellAndRenderer() + + requireJobsCount(t, app.JobORM(), 0) + + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.CreateJob, fs, "") + + nameAndExternalJobID := uuid.New() + spec := fmt.Sprintf(ocrBootstrapSpec, nameAndExternalJobID, nameAndExternalJobID) + require.NoError(t, fs.Parse([]string{spec})) + + err := client.CreateJob(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) + + requireJobsCount(t, app.JobORM(), 1) + + output := *r.Renders[0].(*cmd.JobPresenter) + assert.Equal(t, presenters.JobSpecType("offchainreporting"), output.Type) + assert.Equal(t, uint32(1), output.SchemaVersion) + assert.Equal(t, "0x27548a32b9aD5D64c5945EaE9Da5337bc3169D15", output.OffChainReportingSpec.ContractAddress.String()) +} + +func TestShell_DeleteJob(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(100 * time.Millisecond) + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }) + client, r := app.NewShellAndRenderer() + + // Create the job + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.CreateJob, fs, "") + + require.NoError(t, fs.Parse([]string{getDirectRequestSpec()})) + + err := client.CreateJob(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) + require.NotEmpty(t, r.Renders) + + output := *r.Renders[0].(*cmd.JobPresenter) + + requireJobsCount(t, app.JobORM(), 1) + + jobs, _, err := app.JobORM().FindJobs(0, 1000) + require.NoError(t, err) + jobID := jobs[0].ID + cltest.AwaitJobActive(t, app.JobSpawner(), jobID, 3*time.Second) + + // Must supply job id + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteJob, set, "") + c := cli.NewContext(nil, set, nil) + require.Equal(t, "must pass the job id to be archived", client.DeleteJob(c).Error()) + + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteJob, set, "") + + require.NoError(t, set.Parse([]string{output.ID})) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.DeleteJob(c)) + + requireJobsCount(t, app.JobORM(), 0) +} + +func requireJobsCount(t *testing.T, orm job.ORM, expected int) { + jobs, _, err := orm.FindJobs(0, 1000) + require.NoError(t, err) + require.Len(t, jobs, expected) +} diff --git a/core/cmd/key_store_authenticator.go b/core/cmd/key_store_authenticator.go new file mode 100644 index 00000000..d690a8e8 --- /dev/null +++ b/core/cmd/key_store_authenticator.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// TerminalKeyStoreAuthenticator contains fields for prompting the user and an +// exit code. +type TerminalKeyStoreAuthenticator struct { + Prompter Prompter +} + +type keystorePassword interface { + Keystore() string +} + +func (auth TerminalKeyStoreAuthenticator) authenticate(keyStore keystore.Master, password keystorePassword) error { + isEmpty, err := keyStore.IsEmpty() + if err != nil { + return errors.Wrap(err, "error determining if keystore is empty") + } + pw := password.Keystore() + + if len(pw) != 0 { + // Because we changed password requirements to increase complexity, to + // not break backward compatibility we enforce this only for empty key + // stores. + if err = auth.validatePasswordStrength(pw); err != nil && isEmpty { + return err + } + return keyStore.Unlock(pw) + } + interactive := auth.Prompter.IsTerminal() + if !interactive { + return errors.New("no password provided") + } else if !isEmpty { + pw = auth.promptExistingPassword() + } else { + pw, err = auth.promptNewPassword() + } + if err != nil { + return err + } + return keyStore.Unlock(pw) +} + +func (auth TerminalKeyStoreAuthenticator) validatePasswordStrength(password string) error { + return utils.VerifyPasswordComplexity(password) +} + +func (auth TerminalKeyStoreAuthenticator) promptExistingPassword() string { + password := auth.Prompter.PasswordPrompt("Enter key store password:") + return password +} + +func (auth TerminalKeyStoreAuthenticator) promptNewPassword() (string, error) { + for { + password := auth.Prompter.PasswordPrompt("New key store password: ") + if err := auth.validatePasswordStrength(password); err != nil { + return "", err + } + if strings.TrimSpace(password) != password { + return "", utils.ErrPasswordWhitespace + } + clearLine() + passwordConfirmation := auth.Prompter.PasswordPrompt("Confirm password: ") + clearLine() + if password != passwordConfirmation { + fmt.Printf("Passwords don't match. Please try again... ") + continue + } + return password, nil + } +} diff --git a/core/cmd/keys_commands.go b/core/cmd/keys_commands.go new file mode 100644 index 00000000..b4d4916d --- /dev/null +++ b/core/cmd/keys_commands.go @@ -0,0 +1,260 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// KeysClient is a generic client interface for any type of key. +type KeysClient interface { + CreateKey(*cli.Context) error + ImportKey(*cli.Context) error + ExportKey(*cli.Context) error + DeleteKey(*cli.Context) error + ListKeys(*cli.Context) error +} + +// keysCommand returns a cli.Command with subcommands for the given KeysClient. +func keysCommand(typ string, c KeysClient) cli.Command { + lower := strings.ToLower(typ) + return cli.Command{ + Name: lower, + Usage: fmt.Sprintf("Remote commands for administering the node's %s keys", typ), + Subcommands: cli.Commands{ + { + Name: "create", + Usage: fmt.Sprintf("Create a %s key", typ), + Action: c.CreateKey, + }, + { + Name: "import", + Usage: fmt.Sprintf("Import %s key from keyfile", typ), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: c.ImportKey, + }, + { + Name: "export", + Usage: fmt.Sprintf("Export %s key to keyfile", typ), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: c.ExportKey, + }, + { + Name: "delete", + Usage: fmt.Sprintf("Delete %s key if present", typ), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + cli.BoolFlag{ + Name: "hard", + Usage: "hard-delete the key instead of archiving (irreversible!)", + }, + }, + Action: c.DeleteKey, + }, + { + Name: "list", Usage: fmt.Sprintf("List the %s keys", typ), + Action: c.ListKeys, + }, + }, + } +} + +type keysClient[K keystore.Key, P TableRenderer, P2 ~[]P] struct { + *Shell + typ string + path string +} + +// newKeysClient returns a new KeysClient for a particular type of keystore.Key. +// P is a TableRenderer corresponding to K, and P2 is the slice variant. +func newKeysClient[K keystore.Key, P TableRenderer, P2 ~[]P](typ string, s *Shell) KeysClient { + lower := strings.ToLower(typ) + return &keysClient[K, P, P2]{ + Shell: s, + typ: typ, + path: "/v2/keys/" + lower, + } +} + +// ListKeys retrieves a list of all keys +func (cli *keysClient[K, P, P2]) ListKeys(_ *cli.Context) (err error) { + resp, err := cli.HTTP.Get(cli.ctx(), cli.path, nil) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var p2 P2 + return cli.renderAPIResponse(resp, &p2) +} + +// CreateKey creates a new key +func (cli *keysClient[K, P, P2]) CreateKey(_ *cli.Context) (err error) { + resp, err := cli.HTTP.Post(cli.ctx(), cli.path, nil) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var p P + return cli.renderAPIResponse(resp, &p, fmt.Sprintf("Created %s keypair", cli.typ)) +} + +// DeleteKey deletes a key, +// key ID must be passed +func (cli *keysClient[K, P, P2]) DeleteKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("Must pass the key ID to be deleted")) + } + id := c.Args().Get(0) + + if !confirmAction(c) { + return nil + } + + var queryStr string + if c.Bool("hard") { + queryStr = "?hard=true" + } + + resp, err := cli.HTTP.Delete(cli.ctx(), fmt.Sprintf(cli.path+"/%s%s", id, queryStr)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var p P + return cli.renderAPIResponse(resp, &p, " key deleted") +} + +// ImportKey imports and stores a key, +// path to key must be passed +func (cli *keysClient[K, P, P2]) ImportKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return cli.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return cli.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return cli.errorOut(err) + } + + normalizedPassword := normalizePassword(string(oldPassword)) + resp, err := cli.HTTP.Post(cli.ctx(), cli.path+"/import?oldpassword="+normalizedPassword, bytes.NewReader(keyJSON)) + if err != nil { + return cli.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var p P + return cli.renderAPIResponse(resp, &p, fmt.Sprintf("🔑 Imported %s key", cli.typ)) +} + +// ExportKey exports a key, +// key ID must be passed +func (cli *keysClient[K, P, P2]) ExportKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("Must pass the ID of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return cli.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return cli.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return cli.errorOut(errors.New("Must specify --output/-o flag")) + } + + ID := c.Args().Get(0) + + normalizedPassword := normalizePassword(string(newPassword)) + resp, err := cli.HTTP.Post(cli.ctx(), cli.path+"/export/"+ID+"?newpassword="+normalizedPassword, nil) + if err != nil { + return cli.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return cli.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return cli.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0600) + if err != nil { + return cli.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported %s key %s to %s\n", cli.typ, ID, filepath)) + if err != nil { + return cli.errorOut(err) + } + + return nil +} diff --git a/core/cmd/mocks/prompter.go b/core/cmd/mocks/prompter.go new file mode 100644 index 00000000..a05d24d6 --- /dev/null +++ b/core/cmd/mocks/prompter.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Prompter is an autogenerated mock type for the Prompter type +type Prompter struct { + mock.Mock +} + +// IsTerminal provides a mock function with given fields: +func (_m *Prompter) IsTerminal() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsTerminal") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// PasswordPrompt provides a mock function with given fields: _a0 +func (_m *Prompter) PasswordPrompt(_a0 string) string { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for PasswordPrompt") + } + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Prompt provides a mock function with given fields: _a0 +func (_m *Prompter) Prompt(_a0 string) string { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Prompt") + } + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewPrompter creates a new instance of Prompter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPrompter(t interface { + mock.TestingT + Cleanup(func()) +}) *Prompter { + mock := &Prompter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/cmd/nodes_commands.go b/core/cmd/nodes_commands.go new file mode 100644 index 00000000..efee10bb --- /dev/null +++ b/core/cmd/nodes_commands.go @@ -0,0 +1,68 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/urfave/cli" +) + +func initCosmosNodeSubCmd(s *Shell) cli.Command { + return nodeCommand("Cosmos", NewCosmosNodeClient(s)) +} + +func initStarkNetNodeSubCmd(s *Shell) cli.Command { + return nodeCommand("StarkNet", NewStarkNetNodeClient(s)) +} + +func initEVMNodeSubCmd(s *Shell) cli.Command { + return nodeCommand("EVM", NewEVMNodeClient(s)) +} + +func initSolanaNodeSubCmd(s *Shell) cli.Command { + return nodeCommand("Solana", NewSolanaNodeClient(s)) +} + +// nodeCommand returns a cli.Command with subcommands for the given NodeClient. +// A string cli.Flag for "name" is automatically included. +func nodeCommand(typ string, client NodeClient) cli.Command { + lower := strings.ToLower(typ) + return cli.Command{ + Name: lower, + Usage: fmt.Sprintf("Commands for handling %s node configuration", typ), + Subcommands: cli.Commands{ + { + Name: "list", + Usage: fmt.Sprintf("List all existing %s nodes", typ), + Action: client.IndexNodes, + }, + }, + } +} + +// NodeClient is a generic client interface for any of node. +type NodeClient interface { + IndexNodes(c *cli.Context) error +} + +type nodeClient[P TableRenderer] struct { + *Shell + path string +} + +// newNodeClient returns a new NodeClient for a particular type of NodeStatus. +// P is a TableRenderer for []types.NodeStatus. +func newNodeClient[P TableRenderer](s *Shell, name string) NodeClient { + return &nodeClient[P]{ + Shell: s, + path: "/v2/nodes/" + name, + } +} + +// IndexNodes returns all nodes. +func (cli *nodeClient[P2]) IndexNodes(c *cli.Context) (err error) { + var p P2 + return cli.getPage(cli.path, c.Int("page"), &p) +} + +var nodeHeaders = []string{"Name", "Chain ID", "State", "Config"} diff --git a/core/cmd/ocr-bootstrap-spec.yml b/core/cmd/ocr-bootstrap-spec.yml new file mode 100644 index 00000000..058fdeb2 --- /dev/null +++ b/core/cmd/ocr-bootstrap-spec.yml @@ -0,0 +1,8 @@ +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x27548a32b9aD5D64c5945EaE9Da5337bc3169D15" +externalJobID = "%s" +name = "%s" +evmChainID = "0" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = true diff --git a/core/cmd/ocr2_keys_commands.go b/core/cmd/ocr2_keys_commands.go new file mode 100644 index 00000000..a1ad1948 --- /dev/null +++ b/core/cmd/ocr2_keys_commands.go @@ -0,0 +1,288 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initOCR2KeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "ocr2", + Usage: "Remote commands for administering the node's off chain reporting keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: format(`Create an OCR2 key bundle, encrypted with password from the password file, and store it in the database`), + Action: s.CreateOCR2KeyBundle, + }, + { + Name: "delete", + Usage: format(`Deletes the encrypted OCR2 key bundle matching the given ID`), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + cli.BoolFlag{ + Name: "hard", + Usage: "hard-delete the key instead of archiving (irreversible!)", + }, + }, + Action: s.DeleteOCR2KeyBundle, + }, + { + Name: "list", + Usage: format(`List available OCR2 key bundles`), + Action: s.ListOCR2KeyBundles, + }, + { + Name: "import", + Usage: format(`Imports an OCR2 key bundle from a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: s.ImportOCR2Key, + }, + { + Name: "export", + Usage: format(`Exports an OCR2 key bundle to a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: s.ExportOCR2Key, + }, + }, + } +} + +type OCR2KeyBundlePresenter struct { + JAID // Include this to overwrite the presenter JAID so it can correctly render the ID in JSON + presenters.OCR2KeysBundleResource +} + +// RenderTable implements TableRenderer +func (p *OCR2KeyBundlePresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Type", "On-chain pubkey", "Off-chain pubkey", "Config pubkey"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 OCR Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +func (p *OCR2KeyBundlePresenter) ToRow() []string { + return []string{ + p.ID, + p.ChainType, + p.OnchainPublicKey, + p.OffChainPublicKey, + p.ConfigPublicKey, + } +} + +type OCR2KeyBundlePresenters []OCR2KeyBundlePresenter + +// RenderTable implements TableRenderer +func (ps OCR2KeyBundlePresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Type", "On-chain pubkey", "Off-chain pubkey", "Config pubkey"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 OCR Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +// ListOCR2KeyBundles lists the available OCR2 Key Bundles +func (s *Shell) ListOCR2KeyBundles(_ *cli.Context) error { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/ocr2", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenters OCR2KeyBundlePresenters + return s.renderAPIResponse(resp, &presenters) +} + +// CreateOCR2KeyBundle creates an OCR2 key bundle and saves it to the keystore +func (s *Shell) CreateOCR2KeyBundle(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut( + errors.Errorf(`must pass the type to create, options are: %s`, chaintype.SupportedChainTypes.String()), + ) + } + chainType := c.Args().Get(0) + resp, err := s.HTTP.Post(s.ctx(), fmt.Sprintf("/v2/keys/ocr2/%s", chainType), nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCR2KeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "Created OCR key bundle") +} + +// DeleteOCR2KeyBundle deletes an OCR2 key bundle +func (s *Shell) DeleteOCR2KeyBundle(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the key ID to be deleted")) + } + id, err := models.Sha256HashFromHex(c.Args().Get(0)) + if err != nil { + return s.errorOut(err) + } + + if !confirmAction(c) { + return nil + } + + var queryStr string + if c.Bool("hard") { + queryStr = "?hard=true" + } + + resp, err := s.HTTP.Delete(s.ctx(), fmt.Sprintf("/v2/keys/ocr2/%s%s", id, queryStr)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCR2KeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "OCR key bundle deleted") +} + +// ImportOCR2Key imports OCR2 key bundle +func (s *Shell) ImportOCR2Key(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + normalizedPassword := normalizePassword(string(oldPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/ocr2/import?oldpassword="+normalizedPassword, bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCR2KeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "Imported OCR key bundle") +} + +// ExportOCR2Key exports an OCR2 key bundle by ID +func (s *Shell) ExportOCR2Key(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the ID of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + ID := c.Args().Get(0) + + normalizedPassword := normalizePassword(string(newPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/ocr2/export/"+ID+"?newpassword="+normalizedPassword, nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("Exported OCR key bundle %s to %s", ID, filepath)) + if err != nil { + return s.errorOut(err) + } + + return nil +} diff --git a/core/cmd/ocr2_keys_commands_test.go b/core/cmd/ocr2_keys_commands_test.go new file mode 100644 index 00000000..a5c91389 --- /dev/null +++ b/core/cmd/ocr2_keys_commands_test.go @@ -0,0 +1,195 @@ +package cmd_test + +import ( + "bytes" + "encoding/hex" + "flag" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestOCR2KeyBundlePresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + bundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + key := cltest.DefaultOCR2Key + pubKeyConfig := key.ConfigEncryptionPublicKey() + pubKey := key.OffchainPublicKey() + p := cmd.OCR2KeyBundlePresenter{ + JAID: cmd.NewJAID(bundleID), + OCR2KeysBundleResource: presenters.OCR2KeysBundleResource{ + JAID: presenters.NewJAID(key.ID()), + ChainType: "evm", + OnchainPublicKey: key.OnChainPublicKey(), + OffChainPublicKey: hex.EncodeToString(pubKey[:]), + ConfigPublicKey: hex.EncodeToString(pubKeyConfig[:]), + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, bundleID) + assert.Contains(t, output, key.ChainType()) + assert.Contains(t, output, key.OnChainPublicKey()) + assert.Contains(t, output, hex.EncodeToString(pubKey[:])) + assert.Contains(t, output, hex.EncodeToString(pubKeyConfig[:])) + + // Render many resources + buffer.Reset() + ps := cmd.OCR2KeyBundlePresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, bundleID) + assert.Contains(t, output, key.OnChainPublicKey()) + assert.Contains(t, output, hex.EncodeToString(pubKey[:])) + pubKeyConfig = key.ConfigEncryptionPublicKey() + assert.Contains(t, output, hex.EncodeToString(pubKeyConfig[:])) +} + +func TestShell_OCR2Keys(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().OCR2() + cleanup := func() { + keys, err := app.GetKeyStore().OCR2().GetAll() + require.NoError(t, err) + for _, key := range keys { + require.NoError(t, ks.Delete(key.ID())) + } + requireOCR2KeyCount(t, app, 0) + } + + t.Run("ListOCR2KeyBundles", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + + key, err := app.GetKeyStore().OCR2().Create("evm") + require.NoError(t, err) + requireOCR2KeyCount(t, app, 1) + assert.Nil(t, client.ListOCR2KeyBundles(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + output := *r.Renders[0].(*cmd.OCR2KeyBundlePresenters) + require.Equal(t, key.ID(), output[0].ID) + }) + + t.Run("CreateOCR2KeyBundle", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.CreateOCR2KeyBundle, set, "") + + require.NoError(tt, set.Parse([]string{"evm"})) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.CreateOCR2KeyBundle(c)) + keys, err := app.GetKeyStore().OCR2().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, 1, len(r.Renders)) + output := (*r.Renders[0].(*cmd.OCR2KeyBundlePresenter)) + require.Equal(t, output.ID, keys[0].ID()) + }) + + t.Run("DeleteOCR2KeyBundle", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + + key, err := app.GetKeyStore().OCR2().Create("evm") + require.NoError(t, err) + requireOCR2KeyCount(t, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteOCR2KeyBundle, set, "") + + require.NoError(tt, set.Parse([]string{key.ID()})) + require.NoError(tt, set.Set("yes", "true")) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.DeleteOCR2KeyBundle(c)) + requireOCR2KeyCount(t, app, 0) + require.Equal(t, 1, len(r.Renders)) + output := *r.Renders[0].(*cmd.OCR2KeyBundlePresenter) + assert.Equal(t, key.ID(), output.ID) + + }) + + t.Run("ImportExportOCR2Key", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(t) + client, _ := app.NewShellAndRenderer() + + err := app.KeyStore.OCR2().Add(cltest.DefaultOCR2Key) + require.NoError(t, err) + + keys := requireOCR2KeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test OCR2 export", 0) + flagSetApplyFromAction(client.ExportOCR2Key, set, "") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/new_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = client.ExportOCR2Key(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export + set = flag.NewFlagSet("test OCR2 export", 0) + flagSetApplyFromAction(client.ExportOCR2Key, set, "") + + require.NoError(tt, set.Parse([]string{key.ID()})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/new_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, client.ExportOCR2Key(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, app.GetKeyStore().OCR2().Delete(key.ID())) + requireOCR2KeyCount(t, app, 0) + + set = flag.NewFlagSet("test OCR2 import", 0) + flagSetApplyFromAction(client.ImportOCR2Key, set, "") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/new_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.ImportOCR2Key(c)) + + requireOCR2KeyCount(t, app, 1) + }) +} + +func requireOCR2KeyCount(t *testing.T, app plugin.Application, length int) []ocr2key.KeyBundle { + keys, err := app.GetKeyStore().OCR2().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/ocr2vrf_configure_commands.go b/core/cmd/ocr2vrf_configure_commands.go new file mode 100644 index 00000000..572af09d --- /dev/null +++ b/core/cmd/ocr2vrf_configure_commands.go @@ -0,0 +1,509 @@ +package cmd + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "github.com/urfave/cli" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type SetupOCR2VRFNodePayload struct { + OnChainPublicKey string + OffChainPublicKey string + ConfigPublicKey string + PeerID string + Transmitter string + DkgEncrypt string + DkgSign string + SendingKeys []string +} + +type dkgTemplateArgs struct { + contractID string + ocrKeyBundleID string + p2pv2BootstrapperPeerID string + p2pv2BootstrapperPort string + transmitterID string + useForwarder bool + chainID int64 + encryptionPublicKey string + keyID string + signingPublicKey string +} + +type ocr2vrfTemplateArgs struct { + dkgTemplateArgs + vrfBeaconAddress string + vrfCoordinatorAddress string + linkEthFeedAddress string + sendingKeys []string +} + +const DKGTemplate = ` +# DKGSpec +type = "offchainreporting2" +schemaVersion = 1 +name = "ocr2" +maxTaskDuration = "30s" +contractID = "%s" +ocrKeyBundleID = "%s" +relay = "evm" +pluginType = "dkg" +transmitterID = "%s" +forwardingAllowed = %t +%s + +[relayConfig] +chainID = %d + +[pluginConfig] +EncryptionPublicKey = "%s" +KeyID = "%s" +SigningPublicKey = "%s" +` + +const OCR2VRFTemplate = ` +type = "offchainreporting2" +schemaVersion = 1 +name = "ocr2vrf-chainID-%d" +maxTaskDuration = "30s" +contractID = "%s" +ocrKeyBundleID = "%s" +relay = "evm" +pluginType = "ocr2vrf" +transmitterID = "%s" +forwardingAllowed = %t +%s + +[relayConfig] +chainID = %d +sendingKeys = [%s] + +[pluginConfig] +dkgEncryptionPublicKey = "%s" +dkgSigningPublicKey = "%s" +dkgKeyID = "%s" +dkgContractAddress = "%s" + +vrfCoordinatorAddress = "%s" +linkEthFeedAddress = "%s" +` + +const BootstrapTemplate = ` +type = "bootstrap" +schemaVersion = 1 +name = "bootstrap-chainID-%d" +id = "1" +contractID = "%s" +relay = "evm" + +[relayConfig] +chainID = %d +` + +const forwarderAdditionalEOACount = 4 + +func (s *Shell) ConfigureOCR2VRFNode(c *cli.Context, owner *bind.TransactOpts, ec *ethclient.Client) (*SetupOCR2VRFNodePayload, error) { + ctx := s.ctx() + lggr := logger.Sugared(s.Logger.Named("ConfigureOCR2VRFNode")) + lggr.Infow( + fmt.Sprintf("Configuring Plugin Node for job type %s %s at commit %s", c.String("job-type"), static.Version, static.Sha), + "Version", static.Version, "SHA", static.Sha) + + var pwd, vrfpwd *string + if passwordFile := c.String("password"); passwordFile != "" { + p, err := utils.PasswordFromFile(passwordFile) + if err != nil { + return nil, errors.Wrap(err, "error reading password from file") + } + pwd = &p + } + if vrfPasswordFile := c.String("vrfpassword"); len(vrfPasswordFile) != 0 { + p, err := utils.PasswordFromFile(vrfPasswordFile) + if err != nil { + return nil, errors.Wrapf(err, "error reading VRF password from vrfpassword file \"%s\"", vrfPasswordFile) + } + vrfpwd = &p + } + + s.Config.SetPasswords(pwd, vrfpwd) + + err := s.Config.Validate() + if err != nil { + return nil, s.errorOut(errors.Wrap(err, "config validation failed")) + } + + cfg := s.Config + ldb := pg.NewLockedDB(cfg.AppID(), cfg.Database(), cfg.Database().Lock(), lggr) + + if err = ldb.Open(ctx); err != nil { + return nil, s.errorOut(errors.Wrap(err, "opening db")) + } + defer lggr.ErrorIfFn(ldb.Close, "Error closing db") + + app, err := s.AppFactory.NewApplication(ctx, s.Config, lggr, ldb.DB()) + if err != nil { + return nil, s.errorOut(errors.Wrap(err, "fatal error instantiating application")) + } + + chainID := c.Int64("chainID") + + // Initialize keystore and generate keys. + keyStore := app.GetKeyStore() + err = setupKeystore(s, app, keyStore) + if err != nil { + return nil, s.errorOut(err) + } + + // Start application. + err = app.Start(ctx) + if err != nil { + return nil, s.errorOut(err) + } + + // Close application. + defer lggr.ErrorIfFn(app.Stop, "Failed to Stop application") + + // Initialize transmitter settings. + var sendingKeys []string + var sendingKeysAddresses []common.Address + useForwarder := c.Bool("use-forwarder") + ethKeys, err := app.GetKeyStore().Eth().EnabledKeysForChain(big.NewInt(chainID)) + if err != nil { + return nil, s.errorOut(err) + } + transmitterID := ethKeys[0].Address.String() + + // Populate sendingKeys with current ETH keys. + for _, k := range ethKeys { + sendingKeys = append(sendingKeys, k.Address.String()) + sendingKeysAddresses = append(sendingKeysAddresses, k.Address) + } + + if useForwarder { + // Add extra sending keys if using a forwarder. + sendingKeys, sendingKeysAddresses, err = s.appendForwarders(chainID, app.GetKeyStore().Eth(), sendingKeys, sendingKeysAddresses) + if err != nil { + return nil, err + } + err = s.authorizeForwarder(c, ldb.DB(), lggr, chainID, ec, owner, sendingKeysAddresses) + if err != nil { + return nil, err + } + } + + // Get all configuration parameters. + keyID := c.String("keyID") + dkgEncrypt, _ := app.GetKeyStore().DKGEncrypt().GetAll() + dkgSign, _ := app.GetKeyStore().DKGSign().GetAll() + dkgEncryptKey := dkgEncrypt[0].PublicKeyString() + dkgSignKey := dkgSign[0].PublicKeyString() + p2p, _ := app.GetKeyStore().P2P().GetAll() + ocr2List, _ := app.GetKeyStore().OCR2().GetAll() + peerID := p2p[0].PeerID().Raw() + if !c.Bool("isBootstrapper") { + peerID = c.String("bootstrapperPeerID") + } + + // Find the EVM OCR2 bundle. + var ocr2 ocr2key.KeyBundle + for _, ocr2Item := range ocr2List { + if ocr2Item.ChainType() == chaintype.EVM { + ocr2 = ocr2Item + } + } + if ocr2 == nil { + return nil, s.errorOut(errors.Wrap(job.ErrNoSuchKeyBundle, "evm OCR2 key bundle not found")) + } + offChainPublicKey := ocr2.OffchainPublicKey() + configPublicKey := ocr2.ConfigEncryptionPublicKey() + + if c.Bool("isBootstrapper") { + // Set up bootstrapper job if bootstrapper. + err = createBootstrapperJob(ctx, lggr, c, app) + } else if c.String("job-type") == "DKG" { + // Set up DKG job. + err = createDKGJob(ctx, lggr, app, dkgTemplateArgs{ + contractID: c.String("contractID"), + ocrKeyBundleID: ocr2.ID(), + p2pv2BootstrapperPeerID: peerID, + p2pv2BootstrapperPort: c.String("bootstrapPort"), + transmitterID: transmitterID, + useForwarder: useForwarder, + chainID: chainID, + encryptionPublicKey: dkgEncryptKey, + keyID: keyID, + signingPublicKey: dkgSignKey, + }) + } else if c.String("job-type") == "OCR2VRF" { + // Set up OCR2VRF job. + err = createOCR2VRFJob(ctx, lggr, app, ocr2vrfTemplateArgs{ + dkgTemplateArgs: dkgTemplateArgs{ + contractID: c.String("dkg-address"), + ocrKeyBundleID: ocr2.ID(), + p2pv2BootstrapperPeerID: peerID, + p2pv2BootstrapperPort: c.String("bootstrapPort"), + transmitterID: transmitterID, + useForwarder: useForwarder, + chainID: chainID, + encryptionPublicKey: dkgEncryptKey, + keyID: keyID, + signingPublicKey: dkgSignKey, + }, + vrfBeaconAddress: c.String("vrf-beacon-address"), + vrfCoordinatorAddress: c.String("vrf-coordinator-address"), + linkEthFeedAddress: c.String("link-eth-feed-address"), + sendingKeys: sendingKeys, + }) + } else { + err = fmt.Errorf("unknown job type: %s", c.String("job-type")) + } + + if err != nil { + return nil, err + } + + return &SetupOCR2VRFNodePayload{ + OnChainPublicKey: ocr2.OnChainPublicKey(), + OffChainPublicKey: hex.EncodeToString(offChainPublicKey[:]), + ConfigPublicKey: hex.EncodeToString(configPublicKey[:]), + PeerID: p2p[0].PeerID().Raw(), + Transmitter: transmitterID, + DkgEncrypt: dkgEncryptKey, + DkgSign: dkgSignKey, + SendingKeys: sendingKeys, + }, nil +} + +func (s *Shell) appendForwarders(chainID int64, ks keystore.Eth, sendingKeys []string, sendingKeysAddresses []common.Address) ([]string, []common.Address, error) { + for i := 0; i < forwarderAdditionalEOACount; i++ { + // Create the sending key in the keystore. + k, err := ks.Create() + if err != nil { + return nil, nil, err + } + + // Enable the sending key for the current chain. + err = ks.Enable(k.Address, big.NewInt(chainID)) + if err != nil { + return nil, nil, err + } + + sendingKeys = append(sendingKeys, k.Address.String()) + sendingKeysAddresses = append(sendingKeysAddresses, k.Address) + } + + return sendingKeys, sendingKeysAddresses, nil +} + +func (s *Shell) authorizeForwarder(c *cli.Context, db *sqlx.DB, lggr logger.Logger, chainID int64, ec *ethclient.Client, owner *bind.TransactOpts, sendingKeysAddresses []common.Address) error { + ctx := s.ctx() + // Replace the transmitter ID with the forwarder address. + forwarderAddress := c.String("forwarder-address") + + // We have to set the authorized senders on-chain here, otherwise the job spawner will fail as the + // forwarder will not be recognized. + ctx, cancel := context.WithTimeout(ctx, 300*time.Second) + defer cancel() + f, err := authorized_forwarder.NewAuthorizedForwarder(common.HexToAddress(forwarderAddress), ec) + if err != nil { + return err + } + tx, err := f.SetAuthorizedSenders(owner, sendingKeysAddresses) + if err != nil { + return err + } + _, err = bind.WaitMined(ctx, ec, tx) + if err != nil { + return err + } + + // Create forwarder for management in forwarder_manager.go. + orm := forwarders.NewORM(db, lggr, s.Config.Database()) + _, err = orm.CreateForwarder(common.HexToAddress(forwarderAddress), *ubig.NewI(chainID)) + if err != nil { + return err + } + + return nil +} + +func setupKeystore(cli *Shell, app plugin.Application, keyStore keystore.Master) error { + if err := cli.KeyStoreAuthenticator.authenticate(keyStore, cli.Config.Password()); err != nil { + return errors.Wrap(err, "error authenticating keystore") + } + + if cli.Config.EVMEnabled() { + chains, err := app.GetRelayers().LegacyEVMChains().List() + if err != nil { + return fmt.Errorf("failed to get legacy evm chains") + } + for _, ch := range chains { + if err = keyStore.Eth().EnsureKeys(ch.ID()); err != nil { + return errors.Wrap(err, "failed to ensure keystore keys") + } + } + } + + var enabledChains []chaintype.ChainType + if cli.Config.EVMEnabled() { + enabledChains = append(enabledChains, chaintype.EVM) + } + if cli.Config.CosmosEnabled() { + enabledChains = append(enabledChains, chaintype.Cosmos) + } + if cli.Config.SolanaEnabled() { + enabledChains = append(enabledChains, chaintype.Solana) + } + if cli.Config.StarkNetEnabled() { + enabledChains = append(enabledChains, chaintype.StarkNet) + } + + if err := keyStore.OCR2().EnsureKeys(enabledChains...); err != nil { + return errors.Wrap(err, "failed to ensure ocr key") + } + + if err := keyStore.DKGSign().EnsureKey(); err != nil { + return errors.Wrap(err, "failed to ensure dkgsign key") + } + + if err := keyStore.DKGEncrypt().EnsureKey(); err != nil { + return errors.Wrap(err, "failed to ensure dkgencrypt key") + } + + if err := keyStore.P2P().EnsureKey(); err != nil { + return errors.Wrap(err, "failed to ensure p2p key") + } + + return nil +} + +func createBootstrapperJob(ctx context.Context, lggr logger.Logger, c *cli.Context, app plugin.Application) error { + sp := fmt.Sprintf(BootstrapTemplate, + c.Int64("chainID"), + c.String("contractID"), + c.Int64("chainID"), + ) + var jb job.Job + err := toml.Unmarshal([]byte(sp), &jb) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + var os job.BootstrapSpec + err = toml.Unmarshal([]byte(sp), &os) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + jb.BootstrapSpec = &os + + err = app.AddJobV2(ctx, &jb) + if err != nil { + return errors.Wrap(err, "failed to add job") + } + lggr.Info("bootstrap spec:", sp) + + // Give a cooldown + time.Sleep(time.Second) + + return nil +} + +func createDKGJob(ctx context.Context, lggr logger.Logger, app plugin.Application, args dkgTemplateArgs) error { + sp := fmt.Sprintf(DKGTemplate, + args.contractID, + args.ocrKeyBundleID, + args.transmitterID, + args.useForwarder, + fmt.Sprintf(`p2pv2Bootstrappers = ["%s@127.0.0.1:%s"]`, args.p2pv2BootstrapperPeerID, args.p2pv2BootstrapperPort), + args.chainID, + args.encryptionPublicKey, + args.keyID, + args.signingPublicKey, + ) + + var jb job.Job + err := toml.Unmarshal([]byte(sp), &jb) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + var os job.OCR2OracleSpec + err = toml.Unmarshal([]byte(sp), &os) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + jb.OCR2OracleSpec = &os + + err = app.AddJobV2(ctx, &jb) + if err != nil { + return errors.Wrap(err, "failed to add job") + } + lggr.Info("dkg spec:", sp) + + return nil +} + +func createOCR2VRFJob(ctx context.Context, lggr logger.Logger, app plugin.Application, args ocr2vrfTemplateArgs) error { + var sendingKeysString = fmt.Sprintf(`"%s"`, args.sendingKeys[0]) + for x := 1; x < len(args.sendingKeys); x++ { + sendingKeysString = fmt.Sprintf(`%s,"%s"`, sendingKeysString, args.sendingKeys[x]) + } + sp := fmt.Sprintf(OCR2VRFTemplate, + args.chainID, + args.vrfBeaconAddress, + args.ocrKeyBundleID, + args.transmitterID, + args.useForwarder, + fmt.Sprintf(`p2pv2Bootstrappers = ["%s@127.0.0.1:%s"]`, args.p2pv2BootstrapperPeerID, args.p2pv2BootstrapperPort), + args.chainID, + sendingKeysString, + args.encryptionPublicKey, + args.signingPublicKey, + args.keyID, + args.contractID, + args.vrfCoordinatorAddress, + args.linkEthFeedAddress, + ) + + var jb job.Job + err := toml.Unmarshal([]byte(sp), &jb) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + var os job.OCR2OracleSpec + err = toml.Unmarshal([]byte(sp), &os) + if err != nil { + return errors.Wrap(err, "failed to unmarshal job spec") + } + jb.OCR2OracleSpec = &os + + err = app.AddJobV2(ctx, &jb) + if err != nil { + return errors.Wrap(err, "failed to add job") + } + lggr.Info("ocr2vrf spec:", sp) + + return nil +} diff --git a/core/cmd/ocr_keys_commands.go b/core/cmd/ocr_keys_commands.go new file mode 100644 index 00000000..4daec8c6 --- /dev/null +++ b/core/cmd/ocr_keys_commands.go @@ -0,0 +1,280 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initOCRKeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "ocr", + Usage: "Remote commands for administering the node's legacy off chain reporting keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: format(`Create an OCR key bundle, encrypted with password from the password file, and store it in the database`), + Action: s.CreateOCRKeyBundle, + }, + { + Name: "delete", + Usage: format(`Deletes the encrypted OCR key bundle matching the given ID`), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + cli.BoolFlag{ + Name: "hard", + Usage: "hard-delete the key instead of archiving (irreversible!)", + }, + }, + Action: s.DeleteOCRKeyBundle, + }, + { + Name: "list", + Usage: format(`List available OCR key bundles`), + Action: s.ListOCRKeyBundles, + }, + { + Name: "import", + Usage: format(`Imports an OCR key bundle from a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: s.ImportOCRKey, + }, + { + Name: "export", + Usage: format(`Exports an OCR key bundle to a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: s.ExportOCRKey, + }, + }, + } +} + +type OCRKeyBundlePresenter struct { + JAID // Include this to overwrite the presenter JAID so it can correctly render the ID in JSON + presenters.OCRKeysBundleResource +} + +// RenderTable implements TableRenderer +func (p *OCRKeyBundlePresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "On-chain signing addr", "Off-chain pubkey", "Config pubkey"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 Legacy OCR Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +func (p *OCRKeyBundlePresenter) ToRow() []string { + return []string{ + p.ID, + p.OnChainSigningAddress.String(), + p.OffChainPublicKey.String(), + p.ConfigPublicKey.String(), + } +} + +type OCRKeyBundlePresenters []OCRKeyBundlePresenter + +// ListOCRKeyBundles lists the available OCR Key Bundles +func (s *Shell) ListOCRKeyBundles(_ *cli.Context) error { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/ocr", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenters OCRKeyBundlePresenters + return s.renderAPIResponse(resp, &presenters) +} + +// RenderTable implements TableRenderer +func (ps OCRKeyBundlePresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "On-chain signing addr", "Off-chain pubkey", "Config pubkey"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 Legacy OCR Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +// CreateOCR2KeyBundle creates an OCR key bundle and saves it to the keystore +func (s *Shell) CreateOCRKeyBundle(_ *cli.Context) error { + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/ocr", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCRKeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "Created OCR key bundle") +} + +// DeleteOCR2KeyBundle deletes an OCR key bundle +func (s *Shell) DeleteOCRKeyBundle(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the key ID to be deleted")) + } + id, err := models.Sha256HashFromHex(c.Args().Get(0)) + if err != nil { + return s.errorOut(err) + } + + if !confirmAction(c) { + return nil + } + + var queryStr string + if c.Bool("hard") { + queryStr = "?hard=true" + } + + resp, err := s.HTTP.Delete(s.ctx(), fmt.Sprintf("/v2/keys/ocr/%s%s", id, queryStr)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCRKeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "OCR key bundle deleted") +} + +// ImportOCR2Key imports OCR key bundle +func (s *Shell) ImportOCRKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + normalizedPassword := normalizePassword(string(oldPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/ocr/import?oldpassword="+normalizedPassword, bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter OCRKeyBundlePresenter + return s.renderAPIResponse(resp, &presenter, "Imported OCR key bundle") +} + +// ExportOCR2Key exports an OCR key bundle by ID +func (s *Shell) ExportOCRKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the ID of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + ID := c.Args().Get(0) + + normalizedPassword := normalizePassword(string(newPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/ocr/export/"+ID+"?newpassword="+normalizedPassword, nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("Exported OCR key bundle %s to %s", ID, filepath)) + if err != nil { + return s.errorOut(err) + } + + return nil +} diff --git a/core/cmd/ocr_keys_commands_test.go b/core/cmd/ocr_keys_commands_test.go new file mode 100644 index 00000000..34fb7c34 --- /dev/null +++ b/core/cmd/ocr_keys_commands_test.go @@ -0,0 +1,187 @@ +package cmd_test + +import ( + "bytes" + "encoding/hex" + "flag" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestOCRKeyBundlePresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + bundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + key := cltest.DefaultOCRKey + + p := cmd.OCRKeyBundlePresenter{ + JAID: cmd.JAID{ID: bundleID}, + OCRKeysBundleResource: presenters.OCRKeysBundleResource{ + JAID: presenters.NewJAID(key.ID()), + OnChainSigningAddress: key.OnChainSigning.Address(), + OffChainPublicKey: key.OffChainSigning.PublicKey(), + ConfigPublicKey: key.PublicKeyConfig(), + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, bundleID) + assert.Contains(t, output, key.OnChainSigning.Address().String()) + assert.Contains(t, output, hex.EncodeToString(key.PublicKeyOffChain())) + pubKeyConfig := key.PublicKeyConfig() + assert.Contains(t, output, hex.EncodeToString(pubKeyConfig[:])) + + // Render many resources + buffer.Reset() + ps := cmd.OCRKeyBundlePresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, bundleID) + assert.Contains(t, output, key.OnChainSigning.Address().String()) + assert.Contains(t, output, hex.EncodeToString(key.PublicKeyOffChain())) + pubKeyConfig = key.PublicKeyConfig() + assert.Contains(t, output, hex.EncodeToString(pubKeyConfig[:])) +} + +func TestShell_ListOCRKeyBundles(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + key, err := app.GetKeyStore().OCR().Create() + require.NoError(t, err) + + requireOCRKeyCount(t, app, 1) + + assert.Nil(t, client.ListOCRKeyBundles(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + output := *r.Renders[0].(*cmd.OCRKeyBundlePresenters) + require.Equal(t, key.ID(), output[0].ID) +} + +func TestShell_CreateOCRKeyBundle(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + requireOCRKeyCount(t, app, 0) + + require.NoError(t, client.CreateOCRKeyBundle(nilContext)) + + keys, err := app.GetKeyStore().OCR().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + + require.Equal(t, 1, len(r.Renders)) + output := *r.Renders[0].(*cmd.OCRKeyBundlePresenter) + require.Equal(t, output.ID, keys[0].ID()) +} + +func TestShell_DeleteOCRKeyBundle(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + key, err := app.GetKeyStore().OCR().Create() + require.NoError(t, err) + + requireOCRKeyCount(t, app, 1) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteOCRKeyBundle, set, "") + + require.NoError(t, set.Parse([]string{key.ID()})) + require.NoError(t, set.Set("yes", "true")) + + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.DeleteOCRKeyBundle(c)) + requireOCRKeyCount(t, app, 0) // Only fixture key remains + + require.Equal(t, 1, len(r.Renders)) + output := *r.Renders[0].(*cmd.OCRKeyBundlePresenter) + assert.Equal(t, key.ID(), output.ID) +} + +func TestShell_ImportExportOCRKey(t *testing.T) { + defer deleteKeyExportFile(t) + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + + keys := requireOCRKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test OCR export", 0) + flagSetApplyFromAction(client.ExportOCRKey, set, "") + + require.NoError(t, set.Parse([]string{"0"})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/new_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err := client.ExportOCRKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export + set = flag.NewFlagSet("test OCR export", 0) + flagSetApplyFromAction(client.ExportOCRKey, set, "") + + require.NoError(t, set.Parse([]string{key.ID()})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/new_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, client.ExportOCRKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().OCR().Delete(key.ID()))) + requireOCRKeyCount(t, app, 0) + + set = flag.NewFlagSet("test OCR import", 0) + flagSetApplyFromAction(client.ImportOCRKey, set, "") + + require.NoError(t, set.Parse([]string{keyName})) + require.NoError(t, set.Set("old-password", "../internal/fixtures/new_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.ImportOCRKey(c)) + + requireOCRKeyCount(t, app, 1) +} + +func requireOCRKeyCount(t *testing.T, app plugin.Application, length int) []ocrkey.KeyV2 { + keys, err := app.GetKeyStore().OCR().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/p2p_keys_commands.go b/core/cmd/p2p_keys_commands.go new file mode 100644 index 00000000..d0c62e24 --- /dev/null +++ b/core/cmd/p2p_keys_commands.go @@ -0,0 +1,276 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initP2PKeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "p2p", + Usage: "Remote commands for administering the node's p2p keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: format(`Create a p2p key, encrypted with password from the password file, and store it in the database.`), + Action: s.CreateP2PKey, + }, + { + Name: "delete", + Usage: format(`Delete the encrypted P2P key by id`), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + cli.BoolFlag{ + Name: "hard", + Usage: "hard-delete the key instead of archiving (irreversible!)", + }, + }, + Action: s.DeleteP2PKey, + }, + { + Name: "list", + Usage: format(`List available P2P keys`), + Action: s.ListP2PKeys, + }, + { + Name: "import", + Usage: format(`Imports a P2P key from a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: s.ImportP2PKey, + }, + { + Name: "export", + Usage: format(`Exports a P2P key to a JSON file`), + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: s.ExportP2PKey, + }, + }, + } +} + +type P2PKeyPresenter struct { + JAID + presenters.P2PKeyResource +} + +// RenderTable implements TableRenderer +func (p *P2PKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Peer ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 P2P Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +func (p *P2PKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PeerID, + p.PubKey, + } + + return row +} + +type P2PKeyPresenters []P2PKeyPresenter + +// RenderTable implements TableRenderer +func (ps P2PKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Peer ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 P2P Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return cutils.JustError(rt.Write([]byte("\n"))) +} + +// ListP2PKeys retrieves a list of all P2P keys +func (s *Shell) ListP2PKeys(_ *cli.Context) (err error) { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/p2p", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &P2PKeyPresenters{}) +} + +// CreateP2PKey creates a new P2P key +func (s *Shell) CreateP2PKey(_ *cli.Context) (err error) { + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/p2p", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &P2PKeyPresenter{}, "Created P2P keypair") +} + +// DeleteP2PKey deletes a P2P key, +// key ID must be passed +func (s *Shell) DeleteP2PKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the key ID to be deleted")) + } + id := c.Args().Get(0) + + if !confirmAction(c) { + return nil + } + + var queryStr string + if c.Bool("hard") { + queryStr = "?hard=true" + } + + resp, err := s.HTTP.Delete(s.ctx(), fmt.Sprintf("/v2/keys/p2p/%s%s", id, queryStr)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &P2PKeyPresenter{}, "P2P key deleted") +} + +// ImportP2PKey imports and stores a P2P key, +// path to key must be passed +func (s *Shell) ImportP2PKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + normalizedPassword := normalizePassword(string(oldPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/p2p/import?oldpassword="+normalizedPassword, bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &P2PKeyPresenter{}, "🔑 Imported P2P key") +} + +// ExportP2PKey exports a P2P key, +// key ID must be passed +func (s *Shell) ExportP2PKey(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the ID of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + ID := c.Args().Get(0) + + normalizedPassword := normalizePassword(string(newPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/p2p/export/"+ID+"?newpassword="+normalizedPassword, nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported P2P key %s to %s\n", ID, filepath)) + if err != nil { + return s.errorOut(err) + } + + return nil +} diff --git a/core/cmd/p2p_keys_commands_test.go b/core/cmd/p2p_keys_commands_test.go new file mode 100644 index 00000000..9762eb4d --- /dev/null +++ b/core/cmd/p2p_keys_commands_test.go @@ -0,0 +1,181 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestP2PKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + peerID = configtest.DefaultPeerID + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.P2PKeyPresenter{ + JAID: cmd.JAID{ID: id}, + P2PKeyResource: presenters.P2PKeyResource{ + JAID: presenters.NewJAID(id), + PeerID: peerID, + PubKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, peerID) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.P2PKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, peerID) + assert.Contains(t, output, pubKey) +} + +func TestShell_ListP2PKeys(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + key, err := app.GetKeyStore().P2P().Create() + require.NoError(t, err) + + requireP2PKeyCount(t, app, 1) + + client, r := app.NewShellAndRenderer() + + assert.Nil(t, client.ListP2PKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.P2PKeyPresenters) + assert.True(t, key.PublicKeyHex() == keys[0].PubKey) +} + +func TestShell_CreateP2PKey(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + require.NoError(t, client.CreateP2PKey(nilContext)) + + keys, err := app.GetKeyStore().P2P().GetAll() + require.NoError(t, err) + + require.Len(t, keys, 1) +} + +func TestShell_DeleteP2PKey(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + key, err := app.GetKeyStore().P2P().Create() + require.NoError(t, err) + + requireP2PKeyCount(t, app, 1) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteP2PKey, set, "") + + require.NoError(t, set.Set("yes", "true")) + + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = client.DeleteP2PKey(c) + require.NoError(t, err) + + requireP2PKeyCount(t, app, 0) +} + +func TestShell_ImportExportP2PKeyBundle(t *testing.T) { + t.Parallel() + + defer deleteKeyExportFile(t) + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + _, err := app.GetKeyStore().P2P().Create() + require.NoError(t, err) + + keys := requireP2PKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test P2P export", 0) + flagSetApplyFromAction(client.ExportP2PKey, set, "") + + require.NoError(t, set.Parse([]string{"0"})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = client.ExportP2PKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test P2P export", 0) + flagSetApplyFromAction(client.ExportP2PKey, set, "") + + require.NoError(t, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(t, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, client.ExportP2PKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().P2P().Delete(key.PeerID()))) + requireP2PKeyCount(t, app, 0) + + set = flag.NewFlagSet("test P2P import", 0) + flagSetApplyFromAction(client.ImportP2PKey, set, "") + + require.NoError(t, set.Parse([]string{keyName})) + require.NoError(t, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, client.ImportP2PKey(c)) + + requireP2PKeyCount(t, app, 1) +} + +func requireP2PKeyCount(t *testing.T, app plugin.Application, length int) []p2pkey.KeyV2 { + t.Helper() + + keys, err := app.GetKeyStore().P2P().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/presenters.go b/core/cmd/presenters.go new file mode 100644 index 00000000..417251ef --- /dev/null +++ b/core/cmd/presenters.go @@ -0,0 +1,35 @@ +package cmd + +// JAID represents a JSON API ID. +// +// It implements the api2go MarshalIdentifier and UnmarshalIdentitier interface. +// +// When you embed a JSONAPI resource into a presenter, it will not render the +// ID into the JSON object when you perform a json.Marshal. Instead we use this +// to override the ID field of the resource with a JSON tag that will render. +// +// Embed this into a Presenter to render the ID. For example +// +// type JobPresenter struct { +// JAID +// presenters.JobResource +// } +type JAID struct { + ID string `json:"id"` +} + +func NewJAID(id string) JAID { + return JAID{ID: id} +} + +// GetID implements the api2go MarshalIdentifier interface. +func (jaid JAID) GetID() string { + return jaid.ID +} + +// SetID implements the api2go UnmarshalIdentitier interface. +func (jaid *JAID) SetID(value string) error { + jaid.ID = value + + return nil +} diff --git a/core/cmd/presenters_test.go b/core/cmd/presenters_test.go new file mode 100644 index 00000000..9dc13822 --- /dev/null +++ b/core/cmd/presenters_test.go @@ -0,0 +1,23 @@ +package cmd_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" +) + +func TestJAID(t *testing.T) { + t.Parallel() + + jaid := cmd.JAID{ID: "1"} + + t.Run("GetID", func(t *testing.T) { assert.Equal(t, "1", jaid.GetID()) }) + t.Run("SetID", func(t *testing.T) { + err := jaid.SetID("2") + require.NoError(t, err) + assert.Equal(t, "2", jaid.GetID()) + }) +} diff --git a/core/cmd/prompter.go b/core/cmd/prompter.go new file mode 100644 index 00000000..6e98fd92 --- /dev/null +++ b/core/cmd/prompter.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "bufio" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + + "golang.org/x/term" +) + +//go:generate mockery --quiet --name Prompter --output ./mocks/ --case=underscore + +// Prompter implements the Prompt function to be used to display at +// the console. +type Prompter interface { + Prompt(string) string + PasswordPrompt(string) string + IsTerminal() bool +} + +// terminalPrompter is used to display and read input from the user. +type terminalPrompter struct{} + +// NewTerminalPrompter prompts the user via terminal. +func NewTerminalPrompter() Prompter { + return terminalPrompter{} +} + +// Prompt displays the prompt for the user to enter the password and +// reads their input. +func (tp terminalPrompter) Prompt(prompt string) string { + reader := bufio.NewReader(os.Stdin) + fmt.Print(prompt) + line, err := reader.ReadString('\n') + if err != nil { + fmt.Print(err) + os.Exit(1) + } + clearLine() + return strings.TrimSpace(line) +} + +// PasswordPrompt displays the prompt for the user to enter the password and +// reads their input. +func (tp terminalPrompter) PasswordPrompt(prompt string) string { + var rval string + withTerminalResetter(func() { + fmt.Print(prompt) + bytePwd, err := term.ReadPassword(int(os.Stdin.Fd())) + if err != nil { + fmt.Print(err) + os.Exit(1) + } + clearLine() + rval = string(bytePwd) + }) + return rval +} + +// IsTerminal checks if the current process is executing in a terminal, this +// should be used to decide when to use PasswordPrompt. +func (tp terminalPrompter) IsTerminal() bool { + return term.IsTerminal(int(os.Stdout.Fd())) +} + +// Explicitly reset terminal state in the event of a signal (CTRL+C) +// to ensure typed characters are echoed in terminal: +// https://groups.google.com/forum/#!topic/Golang-nuts/kTVAbtee9UA +func withTerminalResetter(f func()) { + osSafeStdin := int(os.Stdin.Fd()) + + initialTermState, err := term.GetState(osSafeStdin) + if err != nil { + fmt.Print(err) + os.Exit(1) + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + err := term.Restore(osSafeStdin, initialTermState) + if err != nil { + fmt.Printf("Error restoring terminal: %v", err) + } + os.Exit(1) + }() + + f() + signal.Stop(c) +} + +func clearLine() { + fmt.Printf("\r" + strings.Repeat(" ", 60) + "\r") +} diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go new file mode 100644 index 00000000..94a01b70 --- /dev/null +++ b/core/cmd/renderer.go @@ -0,0 +1,181 @@ +package cmd + +import ( + "fmt" + "io" + "strings" + + "github.com/olekukonko/tablewriter" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// Renderer implements the Render method. +type Renderer interface { + Render(interface{}, ...string) error +} + +// RendererJSON is used to render JSON data. +type RendererJSON struct { + io.Writer +} + +// Render writes the given input as a JSON string. +func (rj RendererJSON) Render(v interface{}, _ ...string) error { + b, err := utils.FormatJSON(v) + if err != nil { + return err + } + + // Append a new line + b = append(b, []byte("\n")...) + if _, err = rj.Write(b); err != nil { + return err + } + return nil +} + +// RendererTable is used for data to be rendered as a table. +type RendererTable struct { + io.Writer +} + +type TableRenderer interface { + RenderTable(rt RendererTable) error +} + +// Render returns a formatted table of text for a given Job or presenter +// and relevant information. +func (rt RendererTable) Render(v interface{}, headers ...string) error { + for _, h := range headers { + fmt.Println(h) + } + + switch typed := v.(type) { + case *webpresenters.ExternalInitiatorAuthentication: + return rt.renderExternalInitiatorAuthentication(*typed) + case *webpresenters.PipelineRunResource: + return rt.renderPipelineRun(*typed) + case *webpresenters.ServiceLogConfigResource: + return rt.renderLogPkgConfig(*typed) + case *[]VRFKeyPresenter: + return rt.renderVRFKeys(*typed) + case TableRenderer: + return typed.RenderTable(rt) + default: + return fmt.Errorf("unable to render object of type %T: %v", typed, typed) + } +} + +func (rt RendererTable) renderLogPkgConfig(serviceLevelLog webpresenters.ServiceLogConfigResource) error { + table := rt.newTable([]string{"ID", "Service", "LogLevel"}) + for i, svcName := range serviceLevelLog.ServiceName { + table.Append([]string{ + serviceLevelLog.ID, + svcName, + serviceLevelLog.LogLevel[i], + }) + } + + render("ServiceLogConfig", table) + return nil +} + +func (rt RendererTable) renderVRFKeys(keys []VRFKeyPresenter) error { + var rows [][]string + + for _, key := range keys { + rows = append(rows, []string{ + key.Compressed, + key.Uncompressed, + key.Hash, + }) + } + + renderList([]string{"Compressed", "Uncompressed", "Hash"}, rows, rt.Writer) + + return nil +} + +func render(name string, table *tablewriter.Table) { + table.SetRowLine(true) + table.SetColumnSeparator("║") + table.SetRowSeparator("═") + table.SetCenterSeparator("╬") + + fmt.Println("╔ " + name) + table.Render() +} + +func renderList(fields []string, items [][]string, writer io.Writer) { + var maxLabelLength int + for _, field := range fields { + if len(field) > maxLabelLength { + maxLabelLength = len(field) + } + } + var itemsRendered []string + var maxLineLength int + for _, row := range items { + var lines []string + for i, field := range fields { + diff := maxLabelLength - len(field) + spaces := strings.Repeat(" ", diff) + line := fmt.Sprintf("%v: %v%v", field, spaces, row[i]) + for _, l := range strings.Split(line, "\n") { + if len(l) > maxLineLength { + maxLineLength = len(l) + } + } + lines = append(lines, line) + } + itemsRendered = append(itemsRendered, strings.Join(lines, "\n")) + } + divider := "\n" + strings.Repeat("-", maxLineLength) + "\n" + listRendered := divider + strings.Join(itemsRendered, divider) + divider + _, err := writer.Write([]byte(listRendered)) + if err != nil { + // Handles errcheck + return + } +} + +func (rt RendererTable) renderExternalInitiatorAuthentication(eia webpresenters.ExternalInitiatorAuthentication) error { + table := rt.newTable([]string{"Name", "URL", "AccessKey", "Secret", "OutgoingToken", "OutgoingSecret"}) + table.Append([]string{ + eia.Name, + eia.URL.String(), + eia.AccessKey, + eia.Secret, + eia.OutgoingToken, + eia.OutgoingSecret, + }) + render("External Initiator Credentials:", table) + return nil +} + +func (rt RendererTable) newTable(headers []string) *tablewriter.Table { + table := tablewriter.NewWriter(rt) + table.SetHeader(headers) + return table +} + +func (rt RendererTable) renderPipelineRun(run webpresenters.PipelineRunResource) error { + table := rt.newTable([]string{"ID", "Created At", "Finished At"}) + + var finishedAt string + if !run.FinishedAt.IsZero() { + finishedAt = run.FinishedAt.ValueOrZero().String() + } + + row := []string{ + run.GetID(), + run.CreatedAt.String(), + finishedAt, + } + table.Append(row) + + render("Pipeline Run", table) + return nil +} diff --git a/core/cmd/renderer_test.go b/core/cmd/renderer_test.go new file mode 100644 index 00000000..8a5f422c --- /dev/null +++ b/core/cmd/renderer_test.go @@ -0,0 +1,112 @@ +package cmd_test + +import ( + "bytes" + "io" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/web" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRendererJSON_RenderVRFKeys(t *testing.T) { + t.Parallel() + + r := cmd.RendererJSON{Writer: io.Discard} + keys := []cmd.VRFKeyPresenter{ + { + VRFKeyResource: webpresenters.VRFKeyResource{ + Compressed: "0xe2c659dd73ded1663c0caf02304aac5ccd247047b3993d273a8920bba0402f4d01", + Uncompressed: "0xe2c659dd73ded1663c0caf02304aac5ccd247047b3993d273a8920bba0402f4db44652a69526181101d4aa9a58ecf43b1be972330de99ea5e540f56f4e0a672f", + Hash: "0x9926c5f19ec3b3ce005e1c183612f05cfc042966fcdd82ec6e78bf128d91695a", + }, + }, + } + assert.NoError(t, r.Render(&keys)) +} + +func TestRendererTable_RenderConfigurationV2(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + wantUser, wantEffective := app.Config.ConfigTOML() + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + t.Run("effective", func(t *testing.T) { + resp, cleanup := client.Get("/v2/config/v2") + t.Cleanup(cleanup) + var effective web.ConfigV2Resource + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &effective)) + + assert.Equal(t, wantEffective, effective.Config) + }) + + t.Run("user", func(t *testing.T) { + resp, cleanup := client.Get("/v2/config/v2?userOnly=true") + t.Cleanup(cleanup) + var user web.ConfigV2Resource + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &user)) + + assert.Equal(t, wantUser, user.Config) + }) +} + +type testWriter struct { + expected string + t testing.TB + found bool +} + +func (w *testWriter) Write(actual []byte) (int, error) { + if bytes.Contains(actual, []byte(w.expected)) { + w.found = true + } + return len(actual), nil +} + +func TestRendererTable_RenderExternalInitiatorAuthentication(t *testing.T) { + t.Parallel() + + eia := webpresenters.ExternalInitiatorAuthentication{ + Name: "bitcoin", + URL: cltest.WebURL(t, "http://localhost:8888"), + AccessKey: "accesskey", + Secret: "secret", + OutgoingToken: "outgoingToken", + OutgoingSecret: "outgoingSecret", + } + tests := []struct { + name, content string + }{ + {"Name", eia.Name}, + {"URL", eia.URL.String()}, + {"AccessKey", eia.AccessKey}, + {"Secret", eia.Secret}, + {"OutgoingToken", eia.OutgoingToken}, + {"OutgoingSecret", eia.OutgoingSecret}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tw := &testWriter{test.content, t, false} + r := cmd.RendererTable{Writer: tw} + + assert.NoError(t, r.Render(&eia)) + assert.True(t, tw.found) + }) + } +} + +func TestRendererTable_RenderUnknown(t *testing.T) { + t.Parallel() + r := cmd.RendererTable{Writer: io.Discard} + anon := struct{ Name string }{"Romeo"} + assert.Error(t, r.Render(&anon)) +} diff --git a/core/cmd/shell.go b/core/cmd/shell.go new file mode 100644 index 00000000..4a66151e --- /dev/null +++ b/core/cmd/shell.go @@ -0,0 +1,1030 @@ +package cmd + +import ( + "bytes" + "context" + "crypto/tls" + "database/sql" + "encoding/json" + "fmt" + "io" + "log/slog" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/Depado/ginprom" + "github.com/Masterminds/semver/v3" + "github.com/getsentry/sentry-go" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/periodicbackup" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/services/versioning" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/migrate" + "github.com/goplugin/pluginv3.0/v2/core/utils" + clhttp "github.com/goplugin/pluginv3.0/v2/core/utils/http" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +func init() { + // hack to undo geth's disruption of the std default logger + // remove with geth v1.13.10 + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, nil))) +} + +var ( + initGlobalsOnce sync.Once + prometheus *ginprom.Prometheus + grpcOpts loop.GRPCOpts +) + +func initGlobals(cfgProm config.Prometheus, cfgTracing config.Tracing, logger logger.Logger) error { + // Avoid double initializations, but does not prevent relay methods from being called multiple times. + var err error + initGlobalsOnce.Do(func() { + prometheus = ginprom.New(ginprom.Namespace("service"), ginprom.Token(cfgProm.AuthToken())) + grpcOpts = loop.NewGRPCOpts(nil) // default prometheus.Registerer + err = loop.SetupTracing(loop.TracingConfig{ + Enabled: cfgTracing.Enabled(), + CollectorTarget: cfgTracing.CollectorTarget(), + NodeAttributes: cfgTracing.Attributes(), + SamplingRatio: cfgTracing.SamplingRatio(), + OnDialError: func(error) { logger.Errorw("Failed to dial", "err", err) }, + }) + }) + return err +} + +var ( + // ErrorNoAPICredentialsAvailable is returned when not run from a terminal + // and no API credentials have been provided + ErrorNoAPICredentialsAvailable = errors.New("API credentials must be supplied") +) + +// Shell for the node, local commands and remote commands. +type Shell struct { + Renderer + Config plugin.GeneralConfig // initialized in Before + Logger logger.Logger // initialized in Before + CloseLogger func() error // called in After + AppFactory AppFactory + KeyStoreAuthenticator TerminalKeyStoreAuthenticator + FallbackAPIInitializer APIInitializer + Runner Runner + HTTP HTTPClient + CookieAuthenticator CookieAuthenticator + FileSessionRequestBuilder SessionRequestBuilder + PromptingSessionRequestBuilder SessionRequestBuilder + ChangePasswordPrompter ChangePasswordPrompter + PasswordPrompter PasswordPrompter + + configFiles []string + configFilesIsSet bool + secretsFiles []string + secretsFileIsSet bool +} + +func (s *Shell) errorOut(err error) cli.ExitCoder { + if err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil +} + +// exitOnConfigError is helper that executes as validation func and +// pretty-prints errors +func (s *Shell) configExitErr(validateFn func() error) cli.ExitCoder { + err := validateFn() + if err != nil { + fmt.Println("Invalid configuration:", err) + fmt.Println() + return s.errorOut(errors.New("invalid configuration")) + } + return nil +} + +// AppFactory implements the NewApplication method. +type AppFactory interface { + NewApplication(ctx context.Context, cfg plugin.GeneralConfig, appLggr logger.Logger, db *sqlx.DB) (plugin.Application, error) +} + +// PluginAppFactory is used to create a new Application. +type PluginAppFactory struct{} + +// NewApplication returns a new instance of the node with the given config. +func (n PluginAppFactory) NewApplication(ctx context.Context, cfg plugin.GeneralConfig, appLggr logger.Logger, db *sqlx.DB) (app plugin.Application, err error) { + err = initGlobals(cfg.Prometheus(), cfg.Tracing(), appLggr) + if err != nil { + appLggr.Errorf("Failed to initialize globals: %v", err) + } + + err = migrate.SetMigrationENVVars(cfg) + if err != nil { + return nil, err + } + + err = handleNodeVersioning(ctx, db, appLggr, cfg.RootDir(), cfg.Database(), cfg.WebServer().HTTPPort()) + if err != nil { + return nil, err + } + + keyStore := keystore.New(db, utils.GetScryptParams(cfg), appLggr, cfg.Database()) + mailMon := mailbox.NewMonitor(cfg.AppID().String(), appLggr.Named("Mailbox")) + + loopRegistry := plugins.NewLoopRegistry(appLggr, cfg.Tracing()) + + mercuryPool := wsrpc.NewPool(appLggr, cache.Config{ + LatestReportTTL: cfg.Mercury().Cache().LatestReportTTL(), + MaxStaleAge: cfg.Mercury().Cache().MaxStaleAge(), + LatestReportDeadline: cfg.Mercury().Cache().LatestReportDeadline(), + }) + + // create the relayer-chain interoperators from application configuration + relayerFactory := plugin.RelayerFactory{ + Logger: appLggr, + LoopRegistry: loopRegistry, + GRPCOpts: grpcOpts, + MercuryPool: mercuryPool, + } + + evmFactoryCfg := plugin.EVMFactoryConfig{ + CSAETHKeystore: keyStore, + ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DB: db}, + } + // evm always enabled for backward compatibility + // TODO BCF-2510 this needs to change in order to clear the path for EVM extraction + initOps := []plugin.CoreRelayerChainInitFunc{plugin.InitEVM(ctx, relayerFactory, evmFactoryCfg)} + + if cfg.CosmosEnabled() { + cosmosCfg := plugin.CosmosFactoryConfig{ + Keystore: keyStore.Cosmos(), + TOMLConfigs: cfg.CosmosConfigs(), + DB: db, + QConfig: cfg.Database(), + } + initOps = append(initOps, plugin.InitCosmos(ctx, relayerFactory, cosmosCfg)) + } + if cfg.SolanaEnabled() { + solanaCfg := plugin.SolanaFactoryConfig{ + Keystore: keyStore.Solana(), + TOMLConfigs: cfg.SolanaConfigs(), + } + initOps = append(initOps, plugin.InitSolana(ctx, relayerFactory, solanaCfg)) + } + if cfg.StarkNetEnabled() { + starkCfg := plugin.StarkNetFactoryConfig{ + Keystore: keyStore.StarkNet(), + TOMLConfigs: cfg.StarknetConfigs(), + } + initOps = append(initOps, plugin.InitStarknet(ctx, relayerFactory, starkCfg)) + + } + + relayChainInterops, err := plugin.NewCoreRelayerChainInteroperators(initOps...) + if err != nil { + return nil, err + } + + // Configure and optionally start the audit log forwarder service + auditLogger, err := audit.NewAuditLogger(appLggr, cfg.AuditLogger()) + if err != nil { + return nil, err + } + + restrictedClient := clhttp.NewRestrictedHTTPClient(cfg.Database(), appLggr) + unrestrictedClient := clhttp.NewUnrestrictedHTTPClient() + externalInitiatorManager := webhook.NewExternalInitiatorManager(db, unrestrictedClient, appLggr, cfg.Database()) + return plugin.NewApplication(plugin.ApplicationOpts{ + Config: cfg, + SqlxDB: db, + KeyStore: keyStore, + RelayerChainInteroperators: relayChainInterops, + MailMon: mailMon, + Logger: appLggr, + AuditLogger: auditLogger, + ExternalInitiatorManager: externalInitiatorManager, + Version: static.Version, + RestrictedHTTPClient: restrictedClient, + UnrestrictedHTTPClient: unrestrictedClient, + SecretGenerator: plugin.FilePersistedSecretGenerator{}, + LoopRegistry: loopRegistry, + GRPCOpts: grpcOpts, + MercuryPool: mercuryPool, + }) +} + +// handleNodeVersioning is a setup-time helper to encapsulate version changes and db migration +func handleNodeVersioning(ctx context.Context, db *sqlx.DB, appLggr logger.Logger, rootDir string, cfg config.Database, healthReportPort uint16) error { + var err error + // Set up the versioning Configs + verORM := versioning.NewORM(db, appLggr, cfg.DefaultQueryTimeout()) + + if static.Version != static.Unset { + var appv, dbv *semver.Version + appv, dbv, err = versioning.CheckVersion(db, appLggr, static.Version) + if err != nil { + // Exit immediately and don't touch the database if the app version is too old + return fmt.Errorf("CheckVersion: %w", err) + } + + // Take backup if app version is newer than DB version + // Need to do this BEFORE migration + backupCfg := cfg.Backup() + if backupCfg.Mode() != config.DatabaseBackupModeNone && backupCfg.OnVersionUpgrade() { + if err = takeBackupIfVersionUpgrade(cfg.URL(), rootDir, cfg.Backup(), appLggr, appv, dbv, healthReportPort); err != nil { + if errors.Is(err, sql.ErrNoRows) { + appLggr.Debugf("Failed to find any node version in the DB: %w", err) + } else if strings.Contains(err.Error(), "relation \"node_versions\" does not exist") { + appLggr.Debugf("Failed to find any node version in the DB, the node_versions table does not exist yet: %w", err) + } else { + return fmt.Errorf("initializeORM#FindLatestNodeVersion: %w", err) + } + } + } + } + + // Migrate the database + if cfg.MigrateDatabase() { + if err = migrate.Migrate(ctx, db.DB, appLggr); err != nil { + return fmt.Errorf("initializeORM#Migrate: %w", err) + } + } + + // Update to latest version + if static.Version != static.Unset { + version := versioning.NewNodeVersion(static.Version) + if err = verORM.UpsertNodeVersion(version); err != nil { + return fmt.Errorf("UpsertNodeVersion: %w", err) + } + } + return nil +} + +func takeBackupIfVersionUpgrade(dbUrl url.URL, rootDir string, cfg periodicbackup.BackupConfig, lggr logger.Logger, appv, dbv *semver.Version, healthReportPort uint16) (err error) { + if appv == nil { + lggr.Debug("Application version is missing, skipping automatic DB backup.") + return nil + } + if dbv == nil { + lggr.Debug("Database version is missing, skipping automatic DB backup.") + return nil + } + if !appv.GreaterThan(dbv) { + lggr.Debugf("Application version %s is older or equal to database version %s, skipping automatic DB backup.", appv.String(), dbv.String()) + return nil + } + lggr.Infof("Upgrade detected: application version %s is newer than database version %s, taking automatic DB backup. To skip automatic database backup before version upgrades, set Database.Backup.OnVersionUpgrade=false. To disable backups entirely set Database.Backup.Mode=none.", appv.String(), dbv.String()) + + databaseBackup, err := periodicbackup.NewDatabaseBackup(dbUrl, rootDir, cfg, lggr) + if err != nil { + return errors.Wrap(err, "takeBackupIfVersionUpgrade failed") + } + + //Because backups can take a long time we must start a "fake" health report to prevent + //node shutdown because of healthcheck fail/timeout + ibhr := services.NewInBackupHealthReport(healthReportPort, lggr) + ibhr.Start() + defer ibhr.Stop() + err = databaseBackup.RunBackup(appv.String()) + return err +} + +// Runner implements the Run method. +type Runner interface { + Run(context.Context, plugin.Application) error +} + +// PluginRunner is used to run the node application. +type PluginRunner struct{} + +// Run sets the log level based on config and starts the web router to listen +// for input and return data. +func (n PluginRunner) Run(ctx context.Context, app plugin.Application) error { + config := app.GetConfig() + + mode := gin.ReleaseMode + if !build.IsProd() && config.Log().Level() < zapcore.InfoLevel { + mode = gin.DebugMode + } + gin.SetMode(mode) + gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { + app.GetLogger().Debugf("%-6s %-25s --> %s (%d handlers)", httpMethod, absolutePath, handlerName, nuHandlers) + } + + if err := sentryInit(config.Sentry()); err != nil { + return errors.Wrap(err, "failed to initialize sentry") + } + + ws := config.WebServer() + if ws.HTTPPort() == 0 && ws.TLS().HTTPSPort() == 0 { + return errors.New("You must specify at least one port to listen on") + } + + handler, err := web.NewRouter(app, prometheus) + if err != nil { + return errors.Wrap(err, "failed to create web router") + } + server := server{handler: handler, lggr: app.GetLogger()} + + g, gCtx := errgroup.WithContext(ctx) + serverStartTimeoutDuration := config.WebServer().StartTimeout() + if ws.HTTPPort() != 0 { + go tryRunServerUntilCancelled(gCtx, app.GetLogger(), serverStartTimeoutDuration, func() error { + return server.run(ws.ListenIP(), ws.HTTPPort(), config.WebServer().HTTPWriteTimeout()) + }) + } + + tls := config.WebServer().TLS() + if tls.HTTPSPort() != 0 { + go tryRunServerUntilCancelled(gCtx, app.GetLogger(), serverStartTimeoutDuration, func() error { + return server.runTLS( + tls.ListenIP(), + tls.HTTPSPort(), + tls.CertFile(), + tls.KeyFile(), + config.WebServer().HTTPWriteTimeout()) + }) + } + + g.Go(func() error { + <-gCtx.Done() + var err error + if server.httpServer != nil { + err = errors.WithStack(server.httpServer.Shutdown(context.Background())) + } + if server.tlsServer != nil { + err = multierr.Combine(err, errors.WithStack(server.tlsServer.Shutdown(context.Background()))) + } + return err + }) + + return errors.WithStack(g.Wait()) +} + +func sentryInit(cfg config.Sentry) error { + sentrydsn := cfg.DSN() + if sentrydsn == "" { + // Do not initialize sentry at all if the DSN is missing + return nil + } + + var sentryenv string + if env := cfg.Environment(); env != "" { + sentryenv = env + } else if !build.IsProd() { + sentryenv = "dev" + } else { + sentryenv = "prod" + } + + var sentryrelease string + if release := cfg.Release(); release != "" { + sentryrelease = release + } else { + sentryrelease = static.Version + } + + return sentry.Init(sentry.ClientOptions{ + // AttachStacktrace is needed to send stacktrace alongside panics + AttachStacktrace: true, + Dsn: sentrydsn, + Environment: sentryenv, + Release: sentryrelease, + Debug: cfg.Debug(), + }) +} + +func tryRunServerUntilCancelled(ctx context.Context, lggr logger.Logger, timeout time.Duration, runServer func() error) { + for { + // try calling runServer() and log error if any + if err := runServer(); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + lggr.Criticalf("Error starting server: %v", err) + } + } + // if ctx is cancelled, we must leave the loop + select { + case <-ctx.Done(): + return + case <-time.After(timeout): + // pause between attempts, default 15s + } + } +} + +type server struct { + httpServer *http.Server + tlsServer *http.Server + handler *gin.Engine + lggr logger.Logger +} + +func (s *server) run(ip net.IP, port uint16, writeTimeout time.Duration) error { + addr := fmt.Sprintf("%s:%d", ip.String(), port) + s.lggr.Infow(fmt.Sprintf("Listening and serving HTTP on %s", addr), "ip", ip, "port", port) + s.httpServer = createServer(s.handler, addr, writeTimeout) + err := s.httpServer.ListenAndServe() + return errors.Wrap(err, "failed to run plaintext HTTP server") +} + +func (s *server) runTLS(ip net.IP, port uint16, certFile, keyFile string, requestTimeout time.Duration) error { + addr := fmt.Sprintf("%s:%d", ip.String(), port) + s.lggr.Infow(fmt.Sprintf("Listening and serving HTTPS on %s", addr), "ip", ip, "port", port) + s.tlsServer = createServer(s.handler, addr, requestTimeout) + err := s.tlsServer.ListenAndServeTLS(certFile, keyFile) + return errors.Wrap(err, "failed to run TLS server (NOTE: you can disable TLS server completely and silence these errors by setting WebServer.TLS.HTTPSPort=0 in your config)") +} + +func createServer(handler *gin.Engine, addr string, requestTimeout time.Duration) *http.Server { + s := &http.Server{ + Addr: addr, + Handler: handler, + ReadTimeout: requestTimeout, + WriteTimeout: requestTimeout, + IdleTimeout: 60 * time.Second, + MaxHeaderBytes: 1 << 20, + } + return s +} + +// HTTPClient encapsulates all methods used to interact with a plugin node API. +type HTTPClient interface { + Get(context.Context, string, ...map[string]string) (*http.Response, error) + Post(context.Context, string, io.Reader) (*http.Response, error) + Put(context.Context, string, io.Reader) (*http.Response, error) + Patch(context.Context, string, io.Reader, ...map[string]string) (*http.Response, error) + Delete(context.Context, string) (*http.Response, error) +} + +type authenticatedHTTPClient struct { + client *http.Client + cookieAuth CookieAuthenticator + sessionRequest sessions.SessionRequest + remoteNodeURL url.URL +} + +// NewAuthenticatedHTTPClient uses the CookieAuthenticator to generate a sessionID +// which is then used for all subsequent HTTP API requests. +func NewAuthenticatedHTTPClient(lggr logger.Logger, clientOpts ClientOpts, cookieAuth CookieAuthenticator, sessionRequest sessions.SessionRequest) HTTPClient { + return &authenticatedHTTPClient{ + client: newHttpClient(lggr, clientOpts.InsecureSkipVerify), + cookieAuth: cookieAuth, + sessionRequest: sessionRequest, + remoteNodeURL: clientOpts.RemoteNodeURL, + } +} + +func newHttpClient(lggr logger.Logger, insecureSkipVerify bool) *http.Client { + tr := &http.Transport{ + // User enables this at their own risk! + // #nosec G402 + TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, + } + if insecureSkipVerify { + lggr.Warn("InsecureSkipVerify is on, skipping SSL certificate verification.") + } + return &http.Client{Transport: tr} +} + +// Get performs an HTTP Get using the authenticated HTTP client's cookie. +func (h *authenticatedHTTPClient) Get(ctx context.Context, path string, headers ...map[string]string) (*http.Response, error) { + return h.doRequest(ctx, "GET", path, nil, headers...) +} + +// Post performs an HTTP Post using the authenticated HTTP client's cookie. +func (h *authenticatedHTTPClient) Post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + return h.doRequest(ctx, "POST", path, body) +} + +// Put performs an HTTP Put using the authenticated HTTP client's cookie. +func (h *authenticatedHTTPClient) Put(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + return h.doRequest(ctx, "PUT", path, body) +} + +// Patch performs an HTTP Patch using the authenticated HTTP client's cookie. +func (h *authenticatedHTTPClient) Patch(ctx context.Context, path string, body io.Reader, headers ...map[string]string) (*http.Response, error) { + return h.doRequest(ctx, "PATCH", path, body, headers...) +} + +// Delete performs an HTTP Delete using the authenticated HTTP client's cookie. +func (h *authenticatedHTTPClient) Delete(ctx context.Context, path string) (*http.Response, error) { + return h.doRequest(ctx, "DELETE", path, nil) +} + +func (h *authenticatedHTTPClient) doRequest(ctx context.Context, verb, path string, body io.Reader, headerArgs ...map[string]string) (*http.Response, error) { + var headers map[string]string + if len(headerArgs) > 0 { + headers = headerArgs[0] + } else { + headers = map[string]string{} + } + + request, err := http.NewRequestWithContext(ctx, verb, h.remoteNodeURL.String()+path, body) + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "application/json") + for key, value := range headers { + request.Header.Add(key, value) + } + cookie, err := h.cookieAuth.Cookie() + if err != nil { + return nil, err + } else if cookie != nil { + request.AddCookie(cookie) + } + + response, err := h.client.Do(request) + if err != nil { + return response, err + } + if response.StatusCode == http.StatusUnauthorized && (h.sessionRequest.Email != "" || h.sessionRequest.Password != "") { + var cookieerr error + cookie, cookieerr = h.cookieAuth.Authenticate(ctx, h.sessionRequest) + if cookieerr != nil { + return response, err + } + request.Header.Set("Cookie", "") + request.AddCookie(cookie) + response, err = h.client.Do(request) + if err != nil { + return response, err + } + } + return response, nil +} + +// CookieAuthenticator is the interface to generating a cookie to authenticate +// future HTTP requests. +type CookieAuthenticator interface { + Cookie() (*http.Cookie, error) + Authenticate(context.Context, sessions.SessionRequest) (*http.Cookie, error) + Logout() error +} + +type ClientOpts struct { + RemoteNodeURL url.URL + InsecureSkipVerify bool +} + +// SessionCookieAuthenticator is a concrete implementation of CookieAuthenticator +// that retrieves a session id for the user with credentials from the session request. +type SessionCookieAuthenticator struct { + config ClientOpts + store CookieStore + lggr logger.SugaredLogger +} + +// NewSessionCookieAuthenticator creates a SessionCookieAuthenticator using the passed config +// and builder. +func NewSessionCookieAuthenticator(config ClientOpts, store CookieStore, lggr logger.Logger) CookieAuthenticator { + return &SessionCookieAuthenticator{config: config, store: store, lggr: logger.Sugared(lggr)} +} + +// Cookie Returns the previously saved authentication cookie. +func (t *SessionCookieAuthenticator) Cookie() (*http.Cookie, error) { + return t.store.Retrieve() +} + +// Authenticate retrieves a session ID via a cookie and saves it to disk. +func (t *SessionCookieAuthenticator) Authenticate(ctx context.Context, sessionRequest sessions.SessionRequest) (*http.Cookie, error) { + b := new(bytes.Buffer) + err := json.NewEncoder(b).Encode(sessionRequest) + if err != nil { + return nil, err + } + url := t.config.RemoteNodeURL.String() + "/sessions" + req, err := http.NewRequestWithContext(ctx, "POST", url, b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + client := newHttpClient(t.lggr, t.config.InsecureSkipVerify) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer t.lggr.ErrorIfFn(resp.Body.Close, "Error closing Authenticate response body") + + _, err = parseResponse(resp) + if err != nil { + return nil, err + } + + cookies := resp.Cookies() + if len(cookies) == 0 { + return nil, errors.New("did not receive cookie with session id") + } + sc := web.FindSessionCookie(cookies) + return sc, t.store.Save(sc) +} + +// Deletes any stored session +func (t *SessionCookieAuthenticator) Logout() error { + return t.store.Reset() +} + +// CookieStore is a place to store and retrieve cookies. +type CookieStore interface { + Save(cookie *http.Cookie) error + Retrieve() (*http.Cookie, error) + Reset() error +} + +// MemoryCookieStore keeps a single cookie in memory +type MemoryCookieStore struct { + Cookie *http.Cookie +} + +// Save stores a cookie. +func (m *MemoryCookieStore) Save(cookie *http.Cookie) error { + m.Cookie = cookie + return nil +} + +// Removes any stored cookie. +func (m *MemoryCookieStore) Reset() error { + m.Cookie = nil + return nil +} + +// Retrieve returns any Saved cookies. +func (m *MemoryCookieStore) Retrieve() (*http.Cookie, error) { + return m.Cookie, nil +} + +type DiskCookieConfig interface { + RootDir() string +} + +// DiskCookieStore saves a single cookie in the local cli working directory. +type DiskCookieStore struct { + Config DiskCookieConfig +} + +// Save stores a cookie. +func (d DiskCookieStore) Save(cookie *http.Cookie) error { + return os.WriteFile(d.cookiePath(), []byte(cookie.String()), 0600) +} + +// Removes any stored cookie. +func (d DiskCookieStore) Reset() error { + // Write empty bytes + return os.WriteFile(d.cookiePath(), []byte(""), 0600) +} + +// Retrieve returns any Saved cookies. +func (d DiskCookieStore) Retrieve() (*http.Cookie, error) { + b, err := os.ReadFile(d.cookiePath()) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, multierr.Append(errors.New("unable to retrieve credentials, you must first login through the CLI"), err) + } + header := http.Header{} + header.Add("Cookie", string(b)) + request := http.Request{Header: header} + cookies := request.Cookies() + if len(cookies) == 0 { + return nil, errors.New("Cookie not in file, you must first login through the CLI") + } + return request.Cookies()[0], nil +} + +func (d DiskCookieStore) cookiePath() string { + return path.Join(d.Config.RootDir(), "cookie") +} + +type UserCache struct { + dir string + lggr func() logger.Logger // func b/c we don't have the final logger at construction time + ensureOnce sync.Once +} + +func NewUserCache(subdir string, lggr func() logger.Logger) (*UserCache, error) { + cd, err := os.UserCacheDir() + if err != nil { + return nil, err + } + return &UserCache{dir: filepath.Join(cd, "plugin", subdir), lggr: lggr}, nil +} + +func (cs *UserCache) ensure() { + if err := os.MkdirAll(cs.dir, 0700); err != nil { + cs.lggr().Errorw("Failed to make user cache dir", "dir", cs.dir, "err", err) + } +} + +func (cs *UserCache) RootDir() string { + cs.ensureOnce.Do(cs.ensure) + return cs.dir +} + +// SessionRequestBuilder is an interface that returns a SessionRequest, +// abstracting how session requests are generated, whether they be from +// the prompt or from a file. +type SessionRequestBuilder interface { + Build(flag string) (sessions.SessionRequest, error) +} + +type promptingSessionRequestBuilder struct { + prompter Prompter +} + +// NewPromptingSessionRequestBuilder uses a prompter, often via terminal, +// to solicit information from a user to generate the SessionRequest. +func NewPromptingSessionRequestBuilder(prompter Prompter) SessionRequestBuilder { + return promptingSessionRequestBuilder{prompter} +} + +func (p promptingSessionRequestBuilder) Build(string) (sessions.SessionRequest, error) { + email := p.prompter.Prompt("Enter email: ") + pwd := p.prompter.PasswordPrompt("Enter password: ") + return sessions.SessionRequest{Email: email, Password: pwd}, nil +} + +type fileSessionRequestBuilder struct { + lggr logger.Logger +} + +// NewFileSessionRequestBuilder pulls credentials from a file to generate a SessionRequest. +func NewFileSessionRequestBuilder(lggr logger.Logger) SessionRequestBuilder { + return &fileSessionRequestBuilder{lggr: lggr} +} + +func (f *fileSessionRequestBuilder) Build(file string) (sessions.SessionRequest, error) { + return credentialsFromFile(file, f.lggr.With("file", file)) +} + +// APIInitializer is the interface used to create the API User credentials +// needed to access the API. Does nothing if API user already exists. +type APIInitializer interface { + // Initialize creates a new local Admin user for API access, or does nothing if one exists. + Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) +} + +type promptingAPIInitializer struct { + prompter Prompter +} + +// NewPromptingAPIInitializer creates a concrete instance of APIInitializer +// that uses the terminal to solicit credentials from the user. +func NewPromptingAPIInitializer(prompter Prompter) APIInitializer { + return &promptingAPIInitializer{prompter: prompter} +} + +// Initialize uses the terminal to get credentials that it then saves in the store. +func (t *promptingAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) { + // Load list of users to determine which to assume, or if a user needs to be created + dbUsers, err := orm.ListUsers() + if err != nil { + return sessions.User{}, errors.Wrap(err, "Unable to List users for initialization") + } + + // If there are no users in the database, prompt for initial admin user creation + if len(dbUsers) == 0 { + if !t.prompter.IsTerminal() { + return sessions.User{}, ErrorNoAPICredentialsAvailable + } + + for { + email := t.prompter.Prompt("Enter API Email: ") + pwd := t.prompter.PasswordPrompt("Enter API Password: ") + // On a fresh DB, create an admin user + user, err2 := sessions.NewUser(email, pwd, sessions.UserRoleAdmin) + if err2 != nil { + lggr.Errorw("Error creating API user", "err", err2) + continue + } + if err = orm.CreateUser(&user); err != nil { + lggr.Errorf("Error creating API user: ", err, "err") + } + return user, err + } + } + + // Attempt to contextually return the correct admin user, CLI access here implies admin + if adminUser, found := attemptAssumeAdminUser(dbUsers, lggr); found { + return adminUser, nil + } + + // Otherwise, multiple admin users exist, prompt for which to use + email := t.prompter.Prompt("Enter email of API user account to assume: ") + user, err := orm.FindUser(email) + + if err != nil { + return sessions.User{}, err + } + return user, nil +} + +type fileAPIInitializer struct { + file string +} + +// NewFileAPIInitializer creates a concrete instance of APIInitializer +// that pulls API user credentials from the passed file path. +func NewFileAPIInitializer(file string) APIInitializer { + return fileAPIInitializer{file: file} +} + +func (f fileAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) { + request, err := credentialsFromFile(f.file, lggr) + if err != nil { + return sessions.User{}, err + } + + // Load list of users to determine which to assume, or if a user needs to be created + dbUsers, err := orm.ListUsers() + if err != nil { + return sessions.User{}, errors.Wrap(err, "Unable to List users for initialization") + } + + // If there are no users in the database, create initial admin user from session request from file creds + if len(dbUsers) == 0 { + user, err2 := sessions.NewUser(request.Email, request.Password, sessions.UserRoleAdmin) + if err2 != nil { + return user, errors.Wrap(err2, "failed to instantiate new user") + } + return user, orm.CreateUser(&user) + } + + // Attempt to contextually return the correct admin user, CLI access here implies admin + if adminUser, found := attemptAssumeAdminUser(dbUsers, lggr); found { + return adminUser, nil + } + + // Otherwise, multiple admin users exist, attempt to load email specified in session request + user, err := orm.FindUser(request.Email) + if err != nil { + return sessions.User{}, err + } + return user, nil +} + +func attemptAssumeAdminUser(users []sessions.User, lggr logger.Logger) (sessions.User, bool) { + if len(users) == 0 { + return sessions.User{}, false + } + + // If there is only a single DB user, select it within the context of CLI + if len(users) == 1 { + lggr.Infow("Defaulted to assume single DB API User", "email", users[0].Email) + return users[0], true + } + + // If there is only one admin user, use it within the context of CLI + var singleAdmin sessions.User + populatedUser := false + for _, user := range users { + if user.Role == sessions.UserRoleAdmin { + // If multiple admin users found, don't use assume any and clear to continue to prompt + if populatedUser { + // Clear flag to skip return + populatedUser = false + break + } + singleAdmin = user + populatedUser = true + } + } + if populatedUser { + lggr.Infow("Defaulted to assume single DB admin API User", "email", singleAdmin) + return singleAdmin, true + } + + return sessions.User{}, false +} + +var ErrNoCredentialFile = errors.New("no API user credential file was passed") + +func credentialsFromFile(file string, lggr logger.Logger) (sessions.SessionRequest, error) { + if len(file) == 0 { + return sessions.SessionRequest{}, ErrNoCredentialFile + } + + lggr.Debug("Initializing API credentials") + dat, err := os.ReadFile(file) + if err != nil { + return sessions.SessionRequest{}, err + } + lines := strings.Split(string(dat), "\n") + if len(lines) < 2 { + return sessions.SessionRequest{}, fmt.Errorf("malformed API credentials file does not have at least two lines at %s", file) + } + credentials := sessions.SessionRequest{ + Email: strings.TrimSpace(lines[0]), + Password: strings.TrimSpace(lines[1]), + } + return credentials, nil +} + +// ChangePasswordPrompter is an interface primarily used for DI to obtain a +// password change request from the User. +type ChangePasswordPrompter interface { + Prompt() (web.UpdatePasswordRequest, error) +} + +// NewChangePasswordPrompter returns the production password change request prompter +func NewChangePasswordPrompter() ChangePasswordPrompter { + prompter := NewTerminalPrompter() + return changePasswordPrompter{prompter: prompter} +} + +type changePasswordPrompter struct { + prompter Prompter +} + +func (c changePasswordPrompter) Prompt() (web.UpdatePasswordRequest, error) { + fmt.Println("Changing your plugin account password.") + fmt.Println("NOTE: This will terminate any other sessions.") + oldPassword := c.prompter.PasswordPrompt("Password:") + + fmt.Println("Now enter your **NEW** password") + newPassword := c.prompter.PasswordPrompt("Password:") + confirmPassword := c.prompter.PasswordPrompt("Confirmation:") + + if newPassword != confirmPassword { + return web.UpdatePasswordRequest{}, errors.New("new password and confirmation did not match") + } + + return web.UpdatePasswordRequest{ + OldPassword: oldPassword, + NewPassword: newPassword, + }, nil +} + +// PasswordPrompter is an interface primarily used for DI to obtain a password +// from the User. +type PasswordPrompter interface { + Prompt() string +} + +// NewPasswordPrompter returns the production password change request prompter +func NewPasswordPrompter() PasswordPrompter { + prompter := NewTerminalPrompter() + return passwordPrompter{prompter: prompter} +} + +type passwordPrompter struct { + prompter Prompter +} + +func (c passwordPrompter) Prompt() string { + return c.prompter.PasswordPrompt("Password:") +} + +func confirmAction(c *cli.Context) bool { + if len(c.String("yes")) > 0 { + yes, err := strconv.ParseBool(c.String("yes")) + if err == nil && yes { + return true + } + } + + prompt := NewTerminalPrompter() + var answer string + for { + answer = prompt.Prompt("Are you sure? This action is irreversible! (yes/no) ") + if answer == "yes" { + return true + } else if answer == "no" { + return false + } + fmt.Printf("%s is not valid. Please type yes or no\n", answer) + } +} diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go new file mode 100644 index 00000000..a1843e92 --- /dev/null +++ b/core/cmd/shell_local.go @@ -0,0 +1,1175 @@ +package cmd + +import ( + "context" + crand "crypto/rand" + "database/sql" + "fmt" + "log" + "math/big" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/fatih/color" + "github.com/lib/pq" + + "github.com/kylelemons/godebug/diff" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + "golang.org/x/sync/errgroup" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/shutdown" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" + "github.com/goplugin/pluginv3.0/v2/core/store/migrate" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web" + webPresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + "github.com/goplugin/pluginv3.0/v2/internal/testdb" +) + +var ErrProfileTooLong = errors.New("requested profile duration too large") + +func initLocalSubCmds(s *Shell, safe bool) []cli.Command { + return []cli.Command{ + { + Name: "start", + Aliases: []string{"node", "n"}, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "api, a", + Usage: "text file holding the API email and password, each on a line", + }, + cli.BoolFlag{ + Name: "debug, d", + Usage: "set logger level to debug", + }, + cli.StringFlag{ + Name: "password, p", + Usage: "text file holding the password for the node's account", + }, + cli.StringFlag{ + Name: "vrfpassword, vp", + Usage: "text file holding the password for the vrf keys; enables Plugin VRF oracle", + }, + }, + Usage: "Run the Plugin node", + Action: s.RunNode, + }, + { + Name: "rebroadcast-transactions", + Usage: "Manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue", + Action: s.RebroadcastTransactions, + Flags: []cli.Flag{ + cli.Uint64Flag{ + Name: "beginningNonce, beginning-nonce, b", + Usage: "beginning of nonce range to rebroadcast", + }, + cli.Uint64Flag{ + Name: "endingNonce, ending-nonce, e", + Usage: "end of nonce range to rebroadcast (inclusive)", + }, + cli.Uint64Flag{ + Name: "gasPriceWei, gas-price-wei, g", + Usage: "gas price (in Wei) to rebroadcast transactions at", + }, + cli.StringFlag{ + Name: "password, p", + Usage: "text file holding the password for the node's account", + }, + cli.StringFlag{ + Name: "address, a", + Usage: "The address (in hex format) for the key which we want to rebroadcast transactions", + Required: true, + }, + cli.StringFlag{ + Name: "evmChainID, evm-chain-id", + Usage: "Chain ID for which to rebroadcast transactions. If left blank, EVM.ChainID will be used.", + }, + cli.Uint64Flag{ + Name: "gasLimit, gas-limit", + Usage: "OPTIONAL: gas limit to use for each transaction ", + }, + }, + }, + { + Name: "status", + Usage: "Displays the health of various services running inside the node.", + Action: s.Status, + Flags: []cli.Flag{}, + Hidden: true, + Before: func(_ *cli.Context) error { + s.Logger.Warnf("Command deprecated. Use `admin status` instead.") + return nil + }, + }, + { + Name: "profile", + Usage: "Collects profile metrics from the node.", + Action: s.Profile, + Flags: []cli.Flag{ + cli.Uint64Flag{ + Name: "seconds, s", + Usage: "duration of profile capture", + Value: 8, + }, + cli.StringFlag{ + Name: "output_dir, o", + Usage: "output directory of the captured profile", + Value: "/tmp/", + }, + }, + Hidden: true, + Before: func(_ *cli.Context) error { + s.Logger.Warnf("Command deprecated. Use `admin profile` instead.") + return nil + }, + }, + { + Name: "validate", + Usage: "Validate the TOML configuration and secrets that are passed as flags to the `node` command. Prints the full effective configuration, with defaults included", + Action: s.ConfigFileValidate, + }, + { + Name: "db", + Usage: "Commands for managing the database.", + Description: "Potentially destructive commands for managing the database.", + Subcommands: []cli.Command{ + { + Name: "reset", + Usage: "Drop, create and migrate database. Useful for setting up the database in order to run tests or resetting the dev database. WARNING: This will ERASE ALL DATA for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config.", + Hidden: safe, + Action: s.ResetDatabase, + Before: s.validateDB, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "dangerWillRobinson", + Usage: "set to true to enable dropping non-test databases", + }, + }, + }, + { + Name: "preparetest", + Usage: "Reset database and load fixtures.", + Hidden: safe, + Action: s.PrepareTestDatabase, + Before: s.validateDB, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "user-only", + Usage: "only include test user fixture", + }, + }, + }, + { + Name: "version", + Usage: "Display the current database version.", + Action: s.VersionDatabase, + Before: s.validateDB, + Flags: []cli.Flag{}, + }, + { + Name: "status", + Usage: "Display the current database migration status.", + Action: s.StatusDatabase, + Before: s.validateDB, + Flags: []cli.Flag{}, + }, + { + Name: "migrate", + Usage: "Migrate the database to the latest version.", + Action: s.MigrateDatabase, + Before: s.validateDB, + Flags: []cli.Flag{}, + }, + { + Name: "rollback", + Usage: "Roll back the database to a previous . Rolls back a single migration if no version specified.", + Action: s.RollbackDatabase, + Before: s.validateDB, + Flags: []cli.Flag{}, + }, + { + Name: "create-migration", + Usage: "Create a new migration.", + Hidden: safe, + Action: s.CreateMigration, + Before: s.validateDB, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "type", + Usage: "set to `go` to generate a .go migration (instead of .sql)", + }, + }, + }, + { + Name: "delete-chain", + Aliases: []string{}, + Usage: "Commands for cleaning up chain specific db tables. WARNING: This will ERASE ALL chain specific data referred to by --type and --id options for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config.", + Action: s.CleanupChainTables, + Before: s.validateDB, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "id", + Usage: "chain id based on which chain specific table cleanup will be done", + Required: true, + }, + cli.StringFlag{ + Name: "type", + Usage: "chain type based on which table cleanup will be done, eg. EVM", + Required: true, + }, + cli.BoolFlag{ + Name: "danger", + Usage: "set to true to enable dropping non-test databases", + }, + }, + }, + }, + }, + } +} + +// ownerPermsMask are the file permission bits reserved for owner. +const ownerPermsMask = os.FileMode(0o700) + +// RunNode starts the Plugin core. +func (s *Shell) RunNode(c *cli.Context) error { + if err := s.runNode(c); err != nil { + return s.errorOut(err) + } + return nil +} + +func (s *Shell) runNode(c *cli.Context) error { + lggr := logger.Sugared(s.Logger.Named("RunNode")) + + var pwd, vrfpwd *string + if passwordFile := c.String("password"); passwordFile != "" { + p, err := utils.PasswordFromFile(passwordFile) + if err != nil { + return errors.Wrap(err, "error reading password from file") + } + pwd = &p + } + if vrfPasswordFile := c.String("vrfpassword"); len(vrfPasswordFile) != 0 { + p, err := utils.PasswordFromFile(vrfPasswordFile) + if err != nil { + return errors.Wrapf(err, "error reading VRF password from vrfpassword file \"%s\"", vrfPasswordFile) + } + vrfpwd = &p + } + + s.Config.SetPasswords(pwd, vrfpwd) + + s.Config.LogConfiguration(lggr.Debugf, lggr.Warnf) + + if err := s.Config.Validate(); err != nil { + return errors.Wrap(err, "config validation failed") + } + + lggr.Infow(fmt.Sprintf("Starting Plugin Node %s at commit %s", static.Version, static.Sha), "Version", static.Version, "SHA", static.Sha) + + if build.IsDev() { + lggr.Warn("Plugin is running in DEVELOPMENT mode. This is a security risk if enabled in production.") + } + + if err := utils.EnsureDirAndMaxPerms(s.Config.RootDir(), os.FileMode(0700)); err != nil { + return fmt.Errorf("failed to create root directory %q: %w", s.Config.RootDir(), err) + } + + cfg := s.Config + ldb := pg.NewLockedDB(cfg.AppID(), cfg.Database(), cfg.Database().Lock(), lggr) + + // rootCtx will be cancelled when SIGINT|SIGTERM is received + rootCtx, cancelRootCtx := context.WithCancel(context.Background()) + + // cleanExit is used to skip "fail fast" routine + cleanExit := make(chan struct{}) + var shutdownStartTime time.Time + defer func() { + close(cleanExit) + if !shutdownStartTime.IsZero() { + log.Printf("Graceful shutdown time: %s", time.Since(shutdownStartTime)) + } + }() + + go shutdown.HandleShutdown(func(sig string) { + lggr.Infof("Shutting down due to %s signal received...", sig) + + shutdownStartTime = time.Now() + cancelRootCtx() + + select { + case <-cleanExit: + return + case <-time.After(s.Config.ShutdownGracePeriod()): + } + + lggr.Criticalf("Shutdown grace period of %v exceeded, closing DB and exiting...", s.Config.ShutdownGracePeriod()) + // LockedDB.Close() will release DB locks and close DB connection + // Executing this explicitly because defers are not executed in case of os.Exit() + if err := ldb.Close(); err != nil { + lggr.Criticalf("Failed to close LockedDB: %v", err) + } + if err := s.CloseLogger(); err != nil { + log.Printf("Failed to close Logger: %v", err) + } + + os.Exit(-1) + }) + + // Try opening DB connection and acquiring DB locks at once + if err := ldb.Open(rootCtx); err != nil { + // If not successful, we know neither locks nor connection remains opened + return s.errorOut(errors.Wrap(err, "opening db")) + } + defer lggr.ErrorIfFn(ldb.Close, "Error closing db") + + // From now on, DB locks and DB connection will be released on every return. + // Keep watching on logger.Fatal* calls and os.Exit(), because defer will not be executed. + + app, err := s.AppFactory.NewApplication(rootCtx, s.Config, s.Logger, ldb.DB()) + if err != nil { + return s.errorOut(errors.Wrap(err, "fatal error instantiating application")) + } + + // Local shell initialization always uses local auth users table for admin auth + authProviderORM := app.BasicAdminUsersORM() + keyStore := app.GetKeyStore() + err = s.KeyStoreAuthenticator.authenticate(keyStore, s.Config.Password()) + if err != nil { + return errors.Wrap(err, "error authenticating keystore") + } + + legacyEVMChains := app.GetRelayers().LegacyEVMChains() + + if s.Config.EVMEnabled() { + chainList, err2 := legacyEVMChains.List() + if err2 != nil { + return fmt.Errorf("error listing legacy evm chains: %w", err2) + } + for _, ch := range chainList { + if ch.Config().EVM().AutoCreateKey() { + lggr.Debugf("AutoCreateKey=true, will ensure EVM key for chain %s", ch.ID()) + err2 := app.GetKeyStore().Eth().EnsureKeys(ch.ID()) + if err2 != nil { + return errors.Wrap(err2, "failed to ensure keystore keys") + } + } else { + lggr.Debugf("AutoCreateKey=false, will not ensure EVM key for chain %s", ch.ID()) + } + } + } + + if s.Config.OCR().Enabled() { + err2 := app.GetKeyStore().OCR().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure ocr key") + } + } + if s.Config.OCR2().Enabled() { + var enabledChains []chaintype.ChainType + if s.Config.EVMEnabled() { + enabledChains = append(enabledChains, chaintype.EVM) + } + if s.Config.CosmosEnabled() { + enabledChains = append(enabledChains, chaintype.Cosmos) + } + if s.Config.SolanaEnabled() { + enabledChains = append(enabledChains, chaintype.Solana) + } + if s.Config.StarkNetEnabled() { + enabledChains = append(enabledChains, chaintype.StarkNet) + } + err2 := app.GetKeyStore().OCR2().EnsureKeys(enabledChains...) + if err2 != nil { + return errors.Wrap(err2, "failed to ensure ocr key") + } + } + if s.Config.P2P().Enabled() { + err2 := app.GetKeyStore().P2P().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure p2p key") + } + } + if s.Config.CosmosEnabled() { + err2 := app.GetKeyStore().Cosmos().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure cosmos key") + } + } + if s.Config.SolanaEnabled() { + err2 := app.GetKeyStore().Solana().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure solana key") + } + } + if s.Config.StarkNetEnabled() { + err2 := app.GetKeyStore().StarkNet().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure starknet key") + } + } + + err2 := app.GetKeyStore().CSA().EnsureKey() + if err2 != nil { + return errors.Wrap(err2, "failed to ensure CSA key") + } + + if e := checkFilePermissions(lggr, s.Config.RootDir()); e != nil { + lggr.Warn(e) + } + + var user sessions.User + if user, err = NewFileAPIInitializer(c.String("api")).Initialize(authProviderORM, lggr); err != nil { + if !errors.Is(err, ErrNoCredentialFile) { + return errors.Wrap(err, "error creating api initializer") + } + if user, err = s.FallbackAPIInitializer.Initialize(authProviderORM, lggr); err != nil { + if errors.Is(err, ErrorNoAPICredentialsAvailable) { + return errors.WithStack(err) + } + return errors.Wrap(err, "error creating fallback initializer") + } + } + + lggr.Info("API exposed for user ", user.Email) + + if err = app.Start(rootCtx); err != nil { + // We do not try stopping any sub-services that might be started, + // because the app will exit immediately upon return. + // But LockedDB will be released by defer in above. + return errors.Wrap(err, "error starting app") + } + + grp, grpCtx := errgroup.WithContext(rootCtx) + + grp.Go(func() error { + <-grpCtx.Done() + if errInternal := app.Stop(); errInternal != nil { + return errors.Wrap(errInternal, "error stopping app") + } + return nil + }) + + lggr.Infow(fmt.Sprintf("Plugin booted in %.2fs", time.Since(static.InitTime).Seconds()), "appID", app.ID()) + + grp.Go(func() error { + errInternal := s.Runner.Run(grpCtx, app) + if errors.Is(errInternal, http.ErrServerClosed) { + errInternal = nil + } + // In tests we have custom runners that stop the app gracefully, + // therefore we need to cancel rootCtx when the Runner has quit. + cancelRootCtx() + return errInternal + }) + + return grp.Wait() +} + +func checkFilePermissions(lggr logger.Logger, rootDir string) error { + // Ensure tls sub directory (and children) permissions are <= `ownerPermsMask`` + tlsDir := filepath.Join(rootDir, "tls") + if _, err := os.Stat(tlsDir); err != nil && !os.IsNotExist(err) { + lggr.Errorf("error checking perms of 'tls' directory: %v", err) + } else if err == nil { + err := utils.EnsureDirAndMaxPerms(tlsDir, ownerPermsMask) + if err != nil { + return err + } + + err = filepath.Walk(tlsDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + lggr.Errorf(`error checking perms of "%v": %v`, path, err) + return err + } + if utils.TooPermissive(info.Mode().Perm(), ownerPermsMask) { + newPerms := info.Mode().Perm() & ownerPermsMask + lggr.Warnf("%s has overly permissive file permissions, reducing them from %s to %s", path, info.Mode().Perm(), newPerms) + return utils.EnsureFilepathMaxPerms(path, newPerms) + } + return nil + }) + if err != nil { + return err + } + } + + // Ensure {secret,cookie} files' permissions are <= `ownerPermsMask`` + protectedFiles := []string{"secret", "cookie", ".password", ".env", ".api"} + for _, fileName := range protectedFiles { + path := filepath.Join(rootDir, fileName) + fileInfo, err := os.Stat(path) + if os.IsNotExist(err) { + continue + } else if err != nil { + return err + } + if utils.TooPermissive(fileInfo.Mode().Perm(), ownerPermsMask) { + newPerms := fileInfo.Mode().Perm() & ownerPermsMask + lggr.Warnf("%s has overly permissive file permissions, reducing them from %s to %s", path, fileInfo.Mode().Perm(), newPerms) + err = utils.EnsureFilepathMaxPerms(path, newPerms) + if err != nil { + return err + } + } + owned, err := utils.IsFileOwnedByPlugin(fileInfo) + if err != nil { + lggr.Warn(err) + continue + } + if !owned { + lggr.Warnf("The file %v is not owned by the user running plugin. This will be made mandatory in the future.", path) + } + } + return nil +} + +// RebroadcastTransactions run locally to force manual rebroadcasting of +// transactions in a given nonce range. +func (s *Shell) RebroadcastTransactions(c *cli.Context) (err error) { + ctx := s.ctx() + beginningNonce := c.Int64("beginningNonce") + endingNonce := c.Int64("endingNonce") + gasPriceWei := c.Uint64("gasPriceWei") + overrideGasLimit := c.Uint("gasLimit") + addressHex := c.String("address") + chainIDStr := c.String("evmChainID") + + addressBytes, err := hexutil.Decode(addressHex) + if err != nil { + return s.errorOut(errors.Wrap(err, "could not decode address")) + } + address := gethCommon.BytesToAddress(addressBytes) + + var chainID *big.Int + if chainIDStr != "" { + var ok bool + chainID, ok = big.NewInt(0).SetString(chainIDStr, 10) + if !ok { + return s.errorOut(errors.New("invalid evmChainID")) + } + } + + lggr := logger.Sugared(s.Logger.Named("RebroadcastTransactions")) + db, err := pg.OpenUnlockedDB(s.Config.AppID(), s.Config.Database()) + if err != nil { + return s.errorOut(errors.Wrap(err, "opening DB")) + } + defer lggr.ErrorIfFn(db.Close, "Error closing db") + + app, err := s.AppFactory.NewApplication(ctx, s.Config, lggr, db) + if err != nil { + return s.errorOut(errors.Wrap(err, "fatal error instantiating application")) + } + + // TODO: BCF-2511 once the dust settles on BCF-2440/1 evaluate how the + // [loop.Relayer] interface needs to be extended to support programming similar to + // this pattern but in a chain-agnostic way + chain, err := app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + return s.errorOut(err) + } + keyStore := app.GetKeyStore() + + ethClient := chain.Client() + + err = ethClient.Dial(ctx) + if err != nil { + return err + } + + if c.IsSet("password") { + pwd, err2 := utils.PasswordFromFile(c.String("password")) + if err2 != nil { + return s.errorOut(fmt.Errorf("error reading password: %+v", err2)) + } + s.Config.SetPasswords(&pwd, nil) + } + + err = s.Config.Validate() + if err != nil { + return s.errorOut(fmt.Errorf("error validating configuration: %+v", err)) + } + + err = keyStore.Unlock(s.Config.Password().Keystore()) + if err != nil { + return s.errorOut(errors.Wrap(err, "error authenticating keystore")) + } + + if err = keyStore.Eth().CheckEnabled(address, chain.ID()); err != nil { + return s.errorOut(err) + } + + s.Logger.Infof("Rebroadcasting transactions from %v to %v", beginningNonce, endingNonce) + + orm := txmgr.NewTxStore(app.GetSqlxDB(), lggr, s.Config.Database()) + txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), chain.Config().EVM().GasEstimator(), keyStore.Eth(), nil) + cfg := txmgr.NewEvmTxmConfig(chain.Config().EVM()) + feeCfg := txmgr.NewEvmTxmFeeConfig(chain.Config().EVM().GasEstimator()) + ec := txmgr.NewEvmConfirmer(orm, txmgr.NewEvmTxmClient(ethClient), cfg, feeCfg, chain.Config().EVM().Transactions(), chain.Config().Database(), keyStore.Eth(), txBuilder, chain.Logger()) + totalNonces := endingNonce - beginningNonce + 1 + nonces := make([]evmtypes.Nonce, totalNonces) + for i := int64(0); i < totalNonces; i++ { + nonces[i] = evmtypes.Nonce(beginningNonce + i) + } + err = ec.ForceRebroadcast(ctx, nonces, gas.EvmFee{Legacy: assets.NewWeiI(int64(gasPriceWei))}, address, uint32(overrideGasLimit)) + return s.errorOut(err) +} + +type HealthCheckPresenter struct { + webPresenters.Check +} + +func (p *HealthCheckPresenter) ToRow() []string { + red := color.New(color.FgRed).SprintFunc() + green := color.New(color.FgGreen).SprintFunc() + + var status string + + switch p.Status { + case web.HealthStatusFailing: + status = red(p.Status) + case web.HealthStatusPassing: + status = green(p.Status) + } + + return []string{ + p.Name, + status, + p.Output, + } +} + +type HealthCheckPresenters []HealthCheckPresenter + +// RenderTable implements TableRenderer +func (ps HealthCheckPresenters) RenderTable(rt RendererTable) error { + headers := []string{"Name", "Status", "Output"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(headers, rows, rt.Writer) + + return nil +} + +var errDBURLMissing = errors.New("You must set CL_DATABASE_URL env variable or provide a secrets TOML with Database.URL set. HINT: If you are running this to set up your local test database, try CL_DATABASE_URL=postgresql://postgres@localhost:5432/plugin_test?sslmode=disable") + +// ConfigValidate validate the client configuration and pretty-prints results +func (s *Shell) ConfigFileValidate(_ *cli.Context) error { + fn := func(f string, params ...any) { fmt.Printf(f, params...) } + s.Config.LogConfiguration(fn, fn) + if err := s.configExitErr(s.Config.Validate); err != nil { + return err + } + fmt.Println("Valid configuration.") + return nil +} + +// ValidateDB is a BeforeFunc to run prior to database sub commands +// the ctx must be that of the last subcommand to be validated +func (s *Shell) validateDB(c *cli.Context) error { + return s.configExitErr(s.Config.ValidateDB) +} + +// ctx returns a context.Context that will be cancelled when SIGINT|SIGTERM is received +func (s *Shell) ctx() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + go shutdown.HandleShutdown(func(_ string) { cancel() }) + return ctx +} + +// ResetDatabase drops, creates and migrates the database specified by CL_DATABASE_URL or Database.URL +// in secrets TOML. This is useful to set up the database for testing +func (s *Shell) ResetDatabase(c *cli.Context) error { + ctx := s.ctx() + cfg := s.Config.Database() + parsed := cfg.URL() + if parsed.String() == "" { + return s.errorOut(errDBURLMissing) + } + + dangerMode := c.Bool("dangerWillRobinson") + + dbname := parsed.Path[1:] + if !dangerMode && !strings.HasSuffix(dbname, "_test") { + return s.errorOut(fmt.Errorf("cannot reset database named `%s`. This command can only be run against databases with a name that ends in `_test`, to prevent accidental data loss. If you REALLY want to reset this database, pass in the -dangerWillRobinson option", dbname)) + } + lggr := s.Logger + lggr.Infof("Resetting database: %#v", parsed.String()) + lggr.Debugf("Dropping and recreating database: %#v", parsed.String()) + if err := dropAndCreateDB(parsed); err != nil { + return s.errorOut(err) + } + lggr.Debugf("Migrating database: %#v", parsed.String()) + if err := migrateDB(ctx, cfg, lggr); err != nil { + return s.errorOut(err) + } + schema, err := dumpSchema(parsed) + if err != nil { + return s.errorOut(err) + } + lggr.Debugf("Testing rollback and re-migrate for database: %#v", parsed.String()) + var baseVersionID int64 = 54 + if err := downAndUpDB(ctx, cfg, lggr, baseVersionID); err != nil { + return s.errorOut(err) + } + if err := checkSchema(parsed, schema); err != nil { + return s.errorOut(err) + } + return nil +} + +// PrepareTestDatabase calls ResetDatabase then loads fixtures required for tests +func (s *Shell) PrepareTestDatabase(c *cli.Context) error { + if err := s.ResetDatabase(c); err != nil { + return s.errorOut(err) + } + cfg := s.Config + + // Creating pristine DB copy to speed up FullTestDB + dbUrl := cfg.Database().URL() + db, err := sqlx.Open(string(dialects.Postgres), dbUrl.String()) + if err != nil { + return s.errorOut(err) + } + defer db.Close() + templateDB := strings.Trim(dbUrl.Path, "/") + if err = dropAndCreatePristineDB(db, templateDB); err != nil { + return s.errorOut(err) + } + + userOnly := c.Bool("user-only") + fixturePath := "../store/fixtures/fixtures.sql" + if userOnly { + fixturePath = "../store/fixtures/users_only_fixture.sql" + } + if err = insertFixtures(dbUrl, fixturePath); err != nil { + return s.errorOut(err) + } + if err = dropDanglingTestDBs(s.Logger, db); err != nil { + return s.errorOut(err) + } + return s.errorOut(randomizeTestDBSequences(db)) +} + +func dropDanglingTestDBs(lggr logger.Logger, db *sqlx.DB) (err error) { + // Drop all old dangling databases + var dbs []string + if err = db.Select(&dbs, `SELECT datname FROM pg_database WHERE datistemplate = false;`); err != nil { + return err + } + + // dropping database is very slow in postgres so we parallelise it here + nWorkers := 25 + ch := make(chan string) + var wg sync.WaitGroup + wg.Add(nWorkers) + errCh := make(chan error, len(dbs)) + for i := 0; i < nWorkers; i++ { + go func() { + defer wg.Done() + for dbname := range ch { + lggr.Infof("Dropping old, dangling test database: %q", dbname) + gerr := cutils.JustError(db.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS %s`, dbname))) + errCh <- gerr + } + }() + } + for _, dbname := range dbs { + if strings.HasPrefix(dbname, testdb.TestDBNamePrefix) && !strings.HasSuffix(dbname, "_pristine") { + ch <- dbname + } + } + close(ch) + wg.Wait() + close(errCh) + for gerr := range errCh { + err = multierr.Append(err, gerr) + } + return +} + +type failedToRandomizeTestDBSequencesError struct{} + +func (m *failedToRandomizeTestDBSequencesError) Error() string { + return "failed to randomize test db sequences" +} + +// randomizeTestDBSequences randomizes sequenced table columns sequence +// This is necessary as to avoid false positives in some test cases. +func randomizeTestDBSequences(db *sqlx.DB) error { + // not ideal to hard code this, but also not safe to do it programmatically :( + schemas := pq.Array([]string{"public", "evm"}) + seqRows, err := db.Query(`SELECT sequence_schema, sequence_name, minimum_value FROM information_schema.sequences WHERE sequence_schema IN ($1)`, schemas) + if err != nil { + return fmt.Errorf("%s: error fetching sequences: %s", failedToRandomizeTestDBSequencesError{}, err) + } + + defer seqRows.Close() + for seqRows.Next() { + var sequenceSchema, sequenceName string + var minimumSequenceValue int64 + if err = seqRows.Scan(&sequenceSchema, &sequenceName, &minimumSequenceValue); err != nil { + return fmt.Errorf("%s: failed scanning sequence rows: %s", failedToRandomizeTestDBSequencesError{}, err) + } + + if sequenceName == "goose_migrations_id_seq" || sequenceName == "configurations_id_seq" { + continue + } + + var randNum *big.Int + randNum, err = crand.Int(crand.Reader, ubig.NewI(10000).ToInt()) + if err != nil { + return fmt.Errorf("%s: failed to generate random number", failedToRandomizeTestDBSequencesError{}) + } + randNum.Add(randNum, big.NewInt(minimumSequenceValue)) + + if _, err = db.Exec(fmt.Sprintf("ALTER SEQUENCE %s.%s RESTART WITH %d", sequenceSchema, sequenceName, randNum)); err != nil { + return fmt.Errorf("%s: failed to alter and restart %s sequence: %w", failedToRandomizeTestDBSequencesError{}, sequenceName, err) + } + } + + if err = seqRows.Err(); err != nil { + return fmt.Errorf("%s: failed to iterate through sequences: %w", failedToRandomizeTestDBSequencesError{}, err) + } + + return nil +} + +// PrepareTestDatabaseUserOnly calls ResetDatabase then loads only user fixtures required for local +// testing against testnets. Does not include fake chain fixtures. +func (s *Shell) PrepareTestDatabaseUserOnly(c *cli.Context) error { + if err := s.ResetDatabase(c); err != nil { + return s.errorOut(err) + } + cfg := s.Config + if err := insertFixtures(cfg.Database().URL(), "../store/fixtures/users_only_fixtures.sql"); err != nil { + return s.errorOut(err) + } + return nil +} + +// MigrateDatabase migrates the database +func (s *Shell) MigrateDatabase(_ *cli.Context) error { + ctx := s.ctx() + cfg := s.Config.Database() + parsed := cfg.URL() + if parsed.String() == "" { + return s.errorOut(errDBURLMissing) + } + + err := migrate.SetMigrationENVVars(s.Config) + if err != nil { + return err + } + + s.Logger.Infof("Migrating database: %#v", parsed.String()) + if err := migrateDB(ctx, cfg, s.Logger); err != nil { + return s.errorOut(err) + } + return nil +} + +// RollbackDatabase rolls back the database via down migrations. +func (s *Shell) RollbackDatabase(c *cli.Context) error { + ctx := s.ctx() + var version null.Int + if c.Args().Present() { + arg := c.Args().First() + numVersion, err := strconv.ParseInt(arg, 10, 64) + if err != nil { + return s.errorOut(errors.Errorf("Unable to parse %v as integer", arg)) + } + version = null.IntFrom(numVersion) + } + + db, err := newConnection(s.Config.Database()) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + + if err := migrate.Rollback(ctx, db.DB, s.Logger, version); err != nil { + return fmt.Errorf("migrateDB failed: %v", err) + } + + return nil +} + +// VersionDatabase displays the current database version. +func (s *Shell) VersionDatabase(_ *cli.Context) error { + ctx := s.ctx() + db, err := newConnection(s.Config.Database()) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + + version, err := migrate.Current(ctx, db.DB, s.Logger) + if err != nil { + return fmt.Errorf("migrateDB failed: %v", err) + } + + s.Logger.Infof("Database version: %v", version) + return nil +} + +// StatusDatabase displays the database migration status +func (s *Shell) StatusDatabase(_ *cli.Context) error { + ctx := s.ctx() + db, err := newConnection(s.Config.Database()) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + + if err = migrate.Status(ctx, db.DB, s.Logger); err != nil { + return fmt.Errorf("Status failed: %v", err) + } + return nil +} + +// CreateMigration displays the database migration status +func (s *Shell) CreateMigration(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("You must specify a migration name")) + } + db, err := newConnection(s.Config.Database()) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + + migrationType := c.String("type") + if migrationType != "go" { + migrationType = "sql" + } + + if err = migrate.Create(db.DB, c.Args().First(), migrationType); err != nil { + return fmt.Errorf("Status failed: %v", err) + } + return nil +} + +// CleanupChainTables deletes database table rows based on chain type and chain id input. +func (s *Shell) CleanupChainTables(c *cli.Context) error { + cfg := s.Config.Database() + parsed := cfg.URL() + if parsed.String() == "" { + return s.errorOut(errDBURLMissing) + } + + dbname := parsed.Path[1:] + if !c.Bool("danger") && !strings.HasSuffix(dbname, "_test") { + return s.errorOut(fmt.Errorf("cannot reset database named `%s`. This command can only be run against databases with a name that ends in `_test`, to prevent accidental data loss. If you really want to delete chain specific data from this database, pass in the --danger option", dbname)) + } + + db, err := newConnection(cfg) + if err != nil { + return s.errorOut(errors.Wrap(err, "error connecting to the database")) + } + defer db.Close() + + // some tables with evm_chain_id (mostly job specs) are in public schema + tablesToDeleteFromQuery := `SELECT table_name, table_schema FROM information_schema.columns WHERE "column_name"=$1;` + // Delete rows from each table based on the chain_id. + if !strings.EqualFold("EVM", c.String("type")) { + return s.errorOut(errors.New("unknown chain type")) + } + rows, err := db.Query(tablesToDeleteFromQuery, "evm_chain_id") + if err != nil { + return err + } + defer rows.Close() + + var tablesToDeleteFrom []string + for rows.Next() { + var name string + var schema string + if err = rows.Scan(&name, &schema); err != nil { + return err + } + tablesToDeleteFrom = append(tablesToDeleteFrom, schema+"."+name) + } + if rows.Err() != nil { + return rows.Err() + } + + for _, tableName := range tablesToDeleteFrom { + query := fmt.Sprintf(`DELETE FROM %s WHERE "evm_chain_id"=$1;`, tableName) + _, err = db.Exec(query, c.String("id")) + if err != nil { + fmt.Printf("Error deleting rows containing evm_chain_id from %s: %v\n", tableName, err) + } else { + fmt.Printf("Rows with evm_chain_id %s deleted from %s.\n", c.String("id"), tableName) + } + } + return nil +} + +type dbConfig interface { + DefaultIdleInTxSessionTimeout() time.Duration + DefaultLockTimeout() time.Duration + MaxOpenConns() int + MaxIdleConns() int + URL() url.URL + Dialect() dialects.DialectName +} + +func newConnection(cfg dbConfig) (*sqlx.DB, error) { + parsed := cfg.URL() + if parsed.String() == "" { + return nil, errDBURLMissing + } + return pg.NewConnection(parsed.String(), cfg.Dialect(), cfg) +} + +func dropAndCreateDB(parsed url.URL) (err error) { + // Cannot drop the database if we are connected to it, so we must connect + // to a different one. template1 should be present on all postgres installations + dbname := parsed.Path[1:] + parsed.Path = "/template1" + db, err := sql.Open(string(dialects.Postgres), parsed.String()) + if err != nil { + return fmt.Errorf("unable to open postgres database for creating test db: %+v", err) + } + defer func() { + if cerr := db.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + _, err = db.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, dbname)) + if err != nil { + return fmt.Errorf("unable to drop postgres database: %v", err) + } + _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE "%s"`, dbname)) + if err != nil { + return fmt.Errorf("unable to create postgres database: %v", err) + } + return nil +} + +func dropAndCreatePristineDB(db *sqlx.DB, template string) (err error) { + _, err = db.Exec(fmt.Sprintf(`DROP DATABASE IF EXISTS "%s"`, testdb.PristineDBName)) + if err != nil { + return fmt.Errorf("unable to drop postgres database: %v", err) + } + _, err = db.Exec(fmt.Sprintf(`CREATE DATABASE "%s" WITH TEMPLATE "%s"`, testdb.PristineDBName, template)) + if err != nil { + return fmt.Errorf("unable to create postgres database: %v", err) + } + return nil +} + +func migrateDB(ctx context.Context, config dbConfig, lggr logger.Logger) error { + db, err := newConnection(config) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + + if err = migrate.Migrate(ctx, db.DB, lggr); err != nil { + return fmt.Errorf("migrateDB failed: %v", err) + } + return db.Close() +} + +func downAndUpDB(ctx context.Context, cfg dbConfig, lggr logger.Logger, baseVersionID int64) error { + db, err := newConnection(cfg) + if err != nil { + return fmt.Errorf("failed to initialize orm: %v", err) + } + if err = migrate.Rollback(ctx, db.DB, lggr, null.IntFrom(baseVersionID)); err != nil { + return fmt.Errorf("test rollback failed: %v", err) + } + if err = migrate.Migrate(ctx, db.DB, lggr); err != nil { + return fmt.Errorf("second migrateDB failed: %v", err) + } + return db.Close() +} + +func dumpSchema(dbURL url.URL) (string, error) { + args := []string{ + dbURL.String(), + "--schema-only", + } + cmd := exec.Command( + "pg_dump", args..., + ) + + schema, err := cmd.Output() + if err != nil { + var ee *exec.ExitError + if errors.As(err, &ee) { + return "", fmt.Errorf("failed to dump schema: %v\n%s", err, string(ee.Stderr)) + } + return "", fmt.Errorf("failed to dump schema: %v", err) + } + return string(schema), nil +} + +func checkSchema(dbURL url.URL, prevSchema string) error { + newSchema, err := dumpSchema(dbURL) + if err != nil { + return err + } + df := diff.Diff(prevSchema, newSchema) + if len(df) > 0 { + fmt.Println(df) + return errors.New("schema pre- and post- rollback does not match (ctrl+f for '+' or '-' to find the changed lines)") + } + return nil +} + +func insertFixtures(dbURL url.URL, pathToFixtures string) (err error) { + db, err := sql.Open(string(dialects.Postgres), dbURL.String()) + if err != nil { + return fmt.Errorf("unable to open postgres database for creating test db: %+v", err) + } + defer func() { + if cerr := db.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + _, filename, _, ok := runtime.Caller(1) + if !ok { + return errors.New("could not get runtime.Caller(1)") + } + filepath := path.Join(path.Dir(filename), pathToFixtures) + fixturesSQL, err := os.ReadFile(filepath) + if err != nil { + return err + } + _, err = db.Exec(string(fixturesSQL)) + return err +} diff --git a/core/cmd/shell_local_test.go b/core/cmd/shell_local_test.go new file mode 100644 index 00000000..16255f0f --- /dev/null +++ b/core/cmd/shell_local_test.go @@ -0,0 +1,514 @@ +package cmd_test + +import ( + "flag" + "math/big" + "os" + "strconv" + "testing" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + cmdMocks "github.com/goplugin/pluginv3.0/v2/core/cmd/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + pluginmocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + evmrelayer "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/sessions/localauth" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/plugins" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" +) + +func genTestEVMRelayers(t *testing.T, opts legacyevm.ChainRelayExtenderConfig, ks evmrelayer.CSAETHKeystore) *plugin.CoreRelayerChainInteroperators { + f := plugin.RelayerFactory{ + Logger: opts.Logger, + LoopRegistry: plugins.NewLoopRegistry(opts.Logger, opts.AppConfig.Tracing()), + } + + relayers, err := plugin.NewCoreRelayerChainInteroperators(plugin.InitEVM(testutils.Context(t), f, plugin.EVMFactoryConfig{ + ChainOpts: opts.ChainOpts, + CSAETHKeystore: ks, + })) + if err != nil { + t.Fatal(err) + } + return relayers + +} + +func TestShell_RunNodeWithPasswords(t *testing.T) { + tests := []struct { + name string + pwdfile string + wantUnlocked bool + }{ + {"correct", "../internal/fixtures/correct_password.txt", true}, + {"incorrect", "../internal/fixtures/incorrect_password.txt", false}, + {"wrongfile", "doesntexist.txt", false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + s.Password.Keystore = models.NewSecret("dummy") + c.EVM[0].Nodes[0].Name = ptr("fake") + c.EVM[0].Nodes[0].HTTPURL = commonconfig.MustParseURL("http://fake.com") + c.EVM[0].Nodes[0].WSURL = commonconfig.MustParseURL("WSS://fake.com/ws") + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + authProviderORM := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger) + + lggr := logger.TestLogger(t) + + opts := legacyevm.ChainRelayExtenderConfig{ + Logger: lggr, + KeyStore: keyStore.Eth(), + ChainOpts: legacyevm.ChainOpts{ + AppConfig: cfg, + MailMon: &mailbox.Monitor{}, + DB: db, + }, + } + testRelayers := genTestEVMRelayers(t, opts, keyStore) + + // Purge the fixture users to test assumption of single admin + // initialUser user created above + pgtest.MustExec(t, db, "DELETE FROM users;") + + app := mocks.NewApplication(t) + app.On("AuthenticationProvider").Return(authProviderORM).Maybe() + app.On("BasicAdminUsersORM").Return(authProviderORM).Maybe() + app.On("GetKeyStore").Return(keyStore).Maybe() + app.On("GetRelayers").Return(testRelayers).Maybe() + app.On("Start", mock.Anything).Maybe().Return(nil) + app.On("Stop").Maybe().Return(nil) + app.On("ID").Maybe().Return(uuid.New()) + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("Dial", mock.Anything).Return(nil).Maybe() + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(10), nil).Maybe() + + cltest.MustInsertRandomKey(t, keyStore.Eth()) + apiPrompt := cltest.NewMockAPIInitializer(t) + + client := cmd.Shell{ + Config: cfg, + FallbackAPIInitializer: apiPrompt, + Runner: cltest.EmptyRunner{}, + AppFactory: cltest.InstanceAppFactory{App: app}, + Logger: lggr, + } + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RunNode, set, "") + + require.NoError(t, set.Set("password", test.pwdfile)) + + c := cli.NewContext(nil, set, nil) + + run := func() error { + cli := cmd.NewApp(&client) + if err := cli.Before(c); err != nil { + return err + } + return client.RunNode(c) + } + + if test.wantUnlocked { + assert.NoError(t, run()) + assert.Equal(t, 1, apiPrompt.Count) + } else { + assert.Error(t, run()) + assert.Equal(t, 0, apiPrompt.Count) + } + }) + } +} + +func TestShell_RunNodeWithAPICredentialsFile(t *testing.T) { + tests := []struct { + name string + apiFile string + wantPrompt bool + wantError bool + }{ + {"correct", "../internal/fixtures/apicredentials", false, false}, + {"no file", "", true, false}, + {"wrong file", "doesntexist.txt", false, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + s.Password.Keystore = models.NewSecret("16charlengthp4SsW0rD1!@#_") + c.EVM[0].Nodes[0].Name = ptr("fake") + c.EVM[0].Nodes[0].WSURL = commonconfig.MustParseURL("WSS://fake.com/ws") + c.EVM[0].Nodes[0].HTTPURL = commonconfig.MustParseURL("http://fake.com") + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + db := pgtest.NewSqlxDB(t) + authProviderORM := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger) + + // Clear out fixture users/users created from the other test cases + // This asserts that on initial run with an empty users table that the credentials file will instantiate and + // create/run with a new admin user + pgtest.MustExec(t, db, "DELETE FROM users;") + + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + _, err := keyStore.Eth().Create(&cltest.FixtureChainID) + require.NoError(t, err) + + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("Dial", mock.Anything).Return(nil).Maybe() + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(10), nil).Maybe() + + lggr := logger.TestLogger(t) + opts := legacyevm.ChainRelayExtenderConfig{ + Logger: lggr, + KeyStore: keyStore.Eth(), + ChainOpts: legacyevm.ChainOpts{ + AppConfig: cfg, + MailMon: &mailbox.Monitor{}, + DB: db, + }, + } + testRelayers := genTestEVMRelayers(t, opts, keyStore) + app := mocks.NewApplication(t) + app.On("BasicAdminUsersORM").Return(authProviderORM) + app.On("GetKeyStore").Return(keyStore) + app.On("GetRelayers").Return(testRelayers).Maybe() + app.On("Start", mock.Anything).Maybe().Return(nil) + app.On("Stop").Maybe().Return(nil) + app.On("ID").Maybe().Return(uuid.New()) + + prompter := cmdMocks.NewPrompter(t) + + apiPrompt := cltest.NewMockAPIInitializer(t) + + client := cmd.Shell{ + Config: cfg, + AppFactory: cltest.InstanceAppFactory{App: app}, + KeyStoreAuthenticator: cmd.TerminalKeyStoreAuthenticator{prompter}, + FallbackAPIInitializer: apiPrompt, + Runner: cltest.EmptyRunner{}, + Logger: lggr, + } + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RunNode, set, "") + + require.NoError(t, set.Set("api", test.apiFile)) + + c := cli.NewContext(nil, set, nil) + + if test.wantError { + err = client.RunNode(c) + assert.ErrorContains(t, err, "error creating api initializer: open doesntexist.txt: no such file or directory") + } else { + assert.NoError(t, client.RunNode(c)) + } + + assert.Equal(t, test.wantPrompt, apiPrompt.Count > 0) + }) + } +} + +func TestShell_DiskMaxSizeBeforeRotateOptionDisablesAsExpected(t *testing.T) { + tests := []struct { + name string + logFileSize func(t *testing.T) utils.FileSize + fileShouldExist bool + }{ + {"DiskMaxSizeBeforeRotate = 0 => no log on disk", func(t *testing.T) utils.FileSize { + return 0 + }, false}, + {"DiskMaxSizeBeforeRotate > 0 => log on disk (positive control)", func(t *testing.T) utils.FileSize { + var logFileSize utils.FileSize + err := logFileSize.UnmarshalText([]byte("100mb")) + assert.NoError(t, err) + + return logFileSize + }, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := logger.Config{ + Dir: t.TempDir(), + FileMaxSizeMB: int(tt.logFileSize(t) / utils.MB), + } + assert.NoError(t, os.MkdirAll(cfg.Dir, os.FileMode(0700))) + + lggr, closeFn := cfg.New() + t.Cleanup(func() { assert.NoError(t, closeFn()) }) + + // Tries to create a log file by logging. The log file won't be created if there's no logging happening. + lggr.Debug("Trying to create a log file by logging.") + + _, err := os.Stat(cfg.LogsFile()) + require.Equal(t, os.IsNotExist(err), !tt.fileShouldExist) + }) + } +} + +func TestShell_RebroadcastTransactions_Txm(t *testing.T) { + // Use a non-transactional db for this test because we need to + // test multiple connections to the database, and changes made within + // the transaction cannot be seen from another connection. + config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Dialect = dialects.Postgres + // evm config is used in this test. but if set, it must be pass config validation. + // simplest to make it nil + c.EVM = nil + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + keyStore := cltest.NewKeyStore(t, sqlxDB, config.Database()) + _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + txStore := cltest.NewTestTxStore(t, sqlxDB, config.Database()) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 7, 42, fromAddress) + + lggr := logger.TestLogger(t) + + app := mocks.NewApplication(t) + app.On("GetSqlxDB").Return(sqlxDB) + app.On("GetKeyStore").Return(keyStore) + app.On("ID").Maybe().Return(uuid.New()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config) + + mockRelayerChainInteroperators := &pluginmocks.FakeRelayerChainInteroperators{EVMChains: legacy} + app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe() + ethClient.On("Dial", mock.Anything).Return(nil) + + c := cmd.Shell{ + Config: config, + AppFactory: cltest.InstanceAppFactory{App: app}, + FallbackAPIInitializer: cltest.NewMockAPIInitializer(t), + Runner: cltest.EmptyRunner{}, + Logger: lggr, + } + + beginningNonce := uint64(7) + endingNonce := uint64(10) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(c.RebroadcastTransactions, set, "") + + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("beginningNonce", strconv.FormatUint(beginningNonce, 10))) + require.NoError(t, set.Set("endingNonce", strconv.FormatUint(endingNonce, 10))) + require.NoError(t, set.Set("gasPriceWei", "100000000000")) + require.NoError(t, set.Set("gasLimit", "3000000")) + require.NoError(t, set.Set("address", fromAddress.Hex())) + require.NoError(t, set.Set("password", "../internal/fixtures/correct_password.txt")) + + ctx := cli.NewContext(nil, set, nil) + + for i := beginningNonce; i <= endingNonce; i++ { + n := i + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return tx.Nonce() == n + }), mock.Anything).Once().Return(client.Successful, nil) + } + + assert.NoError(t, c.RebroadcastTransactions(ctx)) +} + +func TestShell_RebroadcastTransactions_OutsideRange_Txm(t *testing.T) { + beginningNonce := uint(7) + endingNonce := uint(10) + gasPrice := big.NewInt(100000000000) + gasLimit := uint64(3000000) + + tests := []struct { + name string + nonce uint + }{ + {"below beginning", beginningNonce - 1}, + {"above ending", endingNonce + 1}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Use the non-transactional db for this test because we need to + // test multiple connections to the database, and changes made within + // the transaction cannot be seen from another connection. + config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Dialect = dialects.Postgres + // evm config is used in this test. but if set, it must be pass config validation. + // simplest to make it nil + c.EVM = nil + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + + keyStore := cltest.NewKeyStore(t, sqlxDB, config.Database()) + + _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + txStore := cltest.NewTestTxStore(t, sqlxDB, config.Database()) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, int64(test.nonce), 42, fromAddress) + + lggr := logger.TestLogger(t) + + app := mocks.NewApplication(t) + app.On("GetSqlxDB").Return(sqlxDB) + app.On("GetKeyStore").Return(keyStore) + app.On("ID").Maybe().Return(uuid.New()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("Dial", mock.Anything).Return(nil) + legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config) + + mockRelayerChainInteroperators := &pluginmocks.FakeRelayerChainInteroperators{EVMChains: legacy} + app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe() + + c := cmd.Shell{ + Config: config, + AppFactory: cltest.InstanceAppFactory{App: app}, + FallbackAPIInitializer: cltest.NewMockAPIInitializer(t), + Runner: cltest.EmptyRunner{}, + Logger: lggr, + } + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(c.RebroadcastTransactions, set, "") + + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("beginningNonce", strconv.FormatUint(uint64(beginningNonce), 10))) + require.NoError(t, set.Set("endingNonce", strconv.FormatUint(uint64(endingNonce), 10))) + require.NoError(t, set.Set("gasPriceWei", gasPrice.String())) + require.NoError(t, set.Set("gasLimit", strconv.FormatUint(gasLimit, 10))) + require.NoError(t, set.Set("address", fromAddress.Hex())) + + require.NoError(t, set.Set("password", "../internal/fixtures/correct_password.txt")) + ctx := cli.NewContext(nil, set, nil) + + for i := beginningNonce; i <= endingNonce; i++ { + n := i + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { + return uint(tx.Nonce()) == n + }), mock.Anything).Once().Return(client.Successful, nil) + } + + assert.NoError(t, c.RebroadcastTransactions(ctx)) + + cltest.AssertEthTxAttemptCountStays(t, txStore, 1) + }) + } +} + +func TestShell_RebroadcastTransactions_AddressCheck(t *testing.T) { + tests := []struct { + name string + enableAddress bool + shouldError bool + errorContains string + }{ + {"Rebroadcast: enabled address", true, false, ""}, + {"Rebroadcast: disabled address", false, true, "exists but is disabled for chain"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + config, sqlxDB := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Dialect = dialects.Postgres + + c.EVM = nil + // seems to be needed for config validate + c.Insecure.OCRDevelopmentMode = nil + }) + + keyStore := cltest.NewKeyStore(t, sqlxDB, config.Database()) + + _, fromAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + if !test.enableAddress { + err := keyStore.Eth().Disable(fromAddress, testutils.FixtureChainID) + require.NoError(t, err, "failed to disable test key") + } + + lggr := logger.TestLogger(t) + + app := mocks.NewApplication(t) + app.On("GetSqlxDB").Maybe().Return(sqlxDB) + app.On("GetKeyStore").Return(keyStore) + app.On("ID").Maybe().Return(uuid.New()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + ethClient.On("Dial", mock.Anything).Return(nil) + legacy := cltest.NewLegacyChainsWithMockChain(t, ethClient, config) + + mockRelayerChainInteroperators := &pluginmocks.FakeRelayerChainInteroperators{EVMChains: legacy} + app.On("GetRelayers").Return(mockRelayerChainInteroperators).Maybe() + ethClient.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(client.Successful, nil) + + client := cmd.Shell{ + Config: config, + AppFactory: cltest.InstanceAppFactory{App: app}, + FallbackAPIInitializer: cltest.NewMockAPIInitializer(t), + Runner: cltest.EmptyRunner{}, + Logger: lggr, + } + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RebroadcastTransactions, set, "") + + require.NoError(t, set.Set("evmChainID", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("address", fromAddress.Hex())) + require.NoError(t, set.Set("password", "../internal/fixtures/correct_password.txt")) + c := cli.NewContext(nil, set, nil) + if test.shouldError { + require.ErrorContains(t, client.RebroadcastTransactions(c), test.errorContains) + } else { + require.NoError(t, client.RebroadcastTransactions(c)) + } + + }) + } +} + +func TestShell_CleanupChainTables(t *testing.T) { + // Just check if it doesn't error, command itself shouldn't be changed unless major schema changes were made. + // It would be really hard to write a test that accounts for schema changes, so this should be enough to alarm us that something broke. + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { c.Database.Dialect = dialects.Postgres }) + client := cmd.Shell{ + Config: config, + Logger: logger.TestLogger(t), + } + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.CleanupChainTables, set, "") + require.NoError(t, set.Set("id", testutils.FixtureChainID.String())) + require.NoError(t, set.Set("type", "EVM")) + // heavyweight creates test db named plugin_test_uid, while usual naming is plugin_test + // CleanupChainTables handles test db name with plugin_test, but because of heavyweight test db naming we have to set danger flag + require.NoError(t, set.Set("danger", "true")) + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.CleanupChainTables(c)) +} diff --git a/core/cmd/shell_remote.go b/core/cmd/shell_remote.go new file mode 100644 index 00000000..c3b9992c --- /dev/null +++ b/core/cmd/shell_remote.go @@ -0,0 +1,540 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/manyminds/api2go/jsonapi" + "github.com/mitchellh/go-homedir" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "github.com/tidwall/gjson" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initRemoteConfigSubCmds(s *Shell) []cli.Command { + return []cli.Command{ + { + Name: "show", + Usage: "Show the application configuration", + Action: s.ConfigV2, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "user-only", + Usage: "If set, show only the user-provided TOML configuration, omitting application defaults", + }, + }, + }, + { + Name: "loglevel", + Usage: "Set log level", + Action: s.SetLogLevel, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "level", + Usage: "set log level for node (debug||info||warn||error)", + }, + }, + }, + { + Name: "logsql", + Usage: "Enable/disable SQL statement logging", + Action: s.SetLogSQL, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "enable", + Usage: "enable SQL logging", + }, + cli.BoolFlag{ + Name: "disable", + Usage: "disable SQL logging", + }, + }, + }, + { + Name: "validate", + Usage: "DEPRECATED. Use `plugin node validate`", + Before: func(c *cli.Context) error { + return s.errorOut(fmt.Errorf("Deprecated, use `plugin node validate`")) + }, + Hidden: true, + }, + } +} + +var ( + errUnauthorized = errors.New(http.StatusText(http.StatusUnauthorized)) + errForbidden = errors.New(http.StatusText(http.StatusForbidden)) + errBadRequest = errors.New(http.StatusText(http.StatusBadRequest)) +) + +// CreateExternalInitiator adds an external initiator +func (s *Shell) CreateExternalInitiator(c *cli.Context) (err error) { + if c.NArg() != 1 && c.NArg() != 2 { + return s.errorOut(errors.New("create expects 1 - 2 arguments: a name and a url (optional)")) + } + + var request bridges.ExternalInitiatorRequest + request.Name = c.Args().Get(0) + + // process optional URL + if c.NArg() == 2 { + var reqURL *url.URL + reqURL, err = url.ParseRequestURI(c.Args().Get(1)) + if err != nil { + return s.errorOut(err) + } + request.URL = (*models.WebURL)(reqURL) + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := s.HTTP.Post(s.ctx(), "/v2/external_initiators", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var ei webpresenters.ExternalInitiatorAuthentication + err = s.renderAPIResponse(resp, &ei) + return err +} + +// DeleteExternalInitiator removes an external initiator +func (s *Shell) DeleteExternalInitiator(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the name of the external initiator to delete")) + } + + resp, err := s.HTTP.Delete(s.ctx(), "/v2/external_initiators/"+c.Args().First()) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + _, err = s.parseResponse(resp) + return err +} + +func (s *Shell) getPage(requestURI string, page int, model interface{}) (err error) { + uri, err := url.Parse(requestURI) + if err != nil { + return err + } + q := uri.Query() + if page > 0 { + q.Set("page", strconv.Itoa(page)) + } + uri.RawQuery = q.Encode() + + resp, err := s.HTTP.Get(s.ctx(), uri.String()) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = s.deserializeAPIResponse(resp, model, &jsonapi.Links{}) + if err != nil { + return err + } + err = s.errorOut(s.Render(model)) + return err +} + +// RemoteLogin creates a cookie session to run remote commands. +func (s *Shell) RemoteLogin(c *cli.Context) error { + lggr := s.Logger.Named("RemoteLogin") + sessionRequest, err := s.buildSessionRequest(c.String("file")) + if err != nil { + return s.errorOut(err) + } + _, err = s.CookieAuthenticator.Authenticate(s.ctx(), sessionRequest) + if err != nil { + return s.errorOut(err) + } + err = s.checkRemoteBuildCompatibility(lggr, c.Bool("bypass-version-check"), static.Version, static.Sha) + if err != nil { + return s.errorOut(err) + } + fmt.Println("Successfully Logged In.") + return nil +} + +// Logout removes local and remote session. +func (s *Shell) Logout(_ *cli.Context) (err error) { + resp, err := s.HTTP.Delete(s.ctx(), "/sessions") + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + err = s.CookieAuthenticator.Logout() + if err != nil { + return s.errorOut(err) + } + return nil +} + +// ChangePassword prompts the user for the old password and a new one, then +// posts it to Plugin to change the password. +func (s *Shell) ChangePassword(_ *cli.Context) (err error) { + req, err := s.ChangePasswordPrompter.Prompt() + if err != nil { + return s.errorOut(err) + } + + requestData, err := json.Marshal(req) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := s.HTTP.Patch(s.ctx(), "/v2/user/password", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + switch resp.StatusCode { + case http.StatusOK: + fmt.Println("Password updated.") + case http.StatusConflict: + fmt.Println("Old password did not match.") + default: + return s.printResponseBody(resp) + } + return nil +} + +func (s *Shell) buildSessionRequest(flag string) (sessions.SessionRequest, error) { + if len(flag) > 0 { + return s.FileSessionRequestBuilder.Build(flag) + } + return s.PromptingSessionRequestBuilder.Build("") +} + +func getTOMLString(s string) (string, error) { + var val interface{} + err := toml.Unmarshal([]byte(s), &val) + if err == nil { + return s, nil + } + + buf, err := fromFile(s) + if os.IsNotExist(err) { + return "", fmt.Errorf("invalid TOML or file not found '%s'", s) + } else if err != nil { + return "", fmt.Errorf("error reading from file '%s': %v", s, err) + } + return buf.String(), nil +} + +func (s *Shell) parseResponse(resp *http.Response) ([]byte, error) { + b, err := parseResponse(resp) + if errors.Is(err, errUnauthorized) { + return nil, s.errorOut(multierr.Append(err, fmt.Errorf("your credentials may be missing, invalid or you may need to login first using the CLI via 'plugin admin login'"))) + } + + if errors.Is(err, errForbidden) { + return nil, s.errorOut(multierr.Append(err, fmt.Errorf("this action requires %s privileges. The current user %s has '%s' role and cannot perform this action, login with a user that has '%s' role via 'plugin admin login'", resp.Header.Get("forbidden-required-role"), resp.Header.Get("forbidden-provided-email"), resp.Header.Get("forbidden-provided-role"), resp.Header.Get("forbidden-required-role")))) + } + if err != nil { + return nil, s.errorOut(err) + } + return b, err +} + +func (s *Shell) printResponseBody(resp *http.Response) error { + b, err := parseResponse(resp) + if err != nil { + return s.errorOut(err) + } + + fmt.Println(string(b)) + return nil +} + +func (s *Shell) renderAPIResponse(resp *http.Response, dst interface{}, headers ...string) error { + var links jsonapi.Links + if err := s.deserializeAPIResponse(resp, dst, &links); err != nil { + return s.errorOut(err) + } + + return s.errorOut(s.Render(dst, headers...)) +} + +func (s *Shell) ConfigV2(c *cli.Context) error { + userOnly := c.Bool("user-only") + str, err := s.configV2Str(userOnly) + if err != nil { + return err + } + fmt.Println(str) + return nil +} + +func (s *Shell) configV2Str(userOnly bool) (string, error) { + resp, err := s.HTTP.Get(s.ctx(), fmt.Sprintf("/v2/config/v2?userOnly=%t", userOnly)) + if err != nil { + return "", s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + respPayload, err := io.ReadAll(resp.Body) + if err != nil { + return "", s.errorOut(err) + } + if resp.StatusCode != 200 { + return "", s.errorOut(errors.Errorf("got HTTP status %d: %s", resp.StatusCode, respPayload)) + } + var configV2Resource web.ConfigV2Resource + err = web.ParseJSONAPIResponse(respPayload, &configV2Resource) + if err != nil { + return "", s.errorOut(err) + } + return configV2Resource.Config, nil +} + +func normalizePassword(password string) string { + return url.QueryEscape(strings.TrimSpace(password)) +} + +// SetLogLevel sets the log level on the node +func (s *Shell) SetLogLevel(c *cli.Context) (err error) { + logLevel := c.String("level") + request := web.LogPatchRequest{Level: logLevel} + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := s.HTTP.Patch(s.ctx(), "/v2/log", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var svcLogConfig webpresenters.ServiceLogConfigResource + err = s.renderAPIResponse(resp, &svcLogConfig) + return err +} + +// SetLogSQL enables or disables the log sql statements +func (s *Shell) SetLogSQL(c *cli.Context) (err error) { + // Enforces selection of --enable or --disable + if !c.Bool("enable") && !c.Bool("disable") { + return s.errorOut(errors.New("Must set logSql --enabled || --disable")) + } + + // Sets logSql to true || false based on the --enabled flag + logSql := c.Bool("enable") + + request := web.LogPatchRequest{SqlEnabled: &logSql} + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := s.HTTP.Patch(s.ctx(), "/v2/log", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var svcLogConfig webpresenters.ServiceLogConfigResource + err = s.renderAPIResponse(resp, &svcLogConfig) + return err +} + +func getBufferFromJSON(s string) (*bytes.Buffer, error) { + if gjson.Valid(s) { + return bytes.NewBufferString(s), nil + } + + buf, err := fromFile(s) + if os.IsNotExist(err) { + return nil, fmt.Errorf("invalid JSON or file not found '%s'", s) + } else if err != nil { + return nil, fmt.Errorf("error reading from file '%s': %v", s, err) + } + return buf, nil +} + +func fromFile(arg string) (*bytes.Buffer, error) { + dir, err := homedir.Expand(arg) + if err != nil { + return nil, err + } + file, err := os.ReadFile(dir) + if err != nil { + return nil, err + } + return bytes.NewBuffer(file), nil +} + +// deserializeAPIResponse is distinct from deserializeResponse in that it supports JSONAPI responses with Links +func (s *Shell) deserializeAPIResponse(resp *http.Response, dst interface{}, links *jsonapi.Links) error { + b, err := s.parseResponse(resp) + if err != nil { + return errors.Wrap(err, "parseResponse error") + } + if err = web.ParsePaginatedResponse(b, dst, links); err != nil { + return s.errorOut(err) + } + return nil +} + +// parseErrorResponseBody parses response body from web API and returns a single string containing all errors +func parseErrorResponseBody(responseBody []byte) (string, error) { + if responseBody == nil { + return "Empty error message", nil + } + + var errors models.JSONAPIErrors + err := json.Unmarshal(responseBody, &errors) + if err != nil || len(errors.Errors) == 0 { + return "", err + } + + var errorDetails strings.Builder + errorDetails.WriteString(errors.Errors[0].Detail) + for _, errorDetail := range errors.Errors[1:] { + fmt.Fprintf(&errorDetails, "\n%s", errorDetail.Detail) + } + return errorDetails.String(), nil +} + +func parseResponse(resp *http.Response) ([]byte, error) { + b, err := io.ReadAll(resp.Body) + if err != nil { + return b, multierr.Append(errors.New(resp.Status), err) + } + if resp.StatusCode == http.StatusUnauthorized { + return b, errUnauthorized + } else if resp.StatusCode == http.StatusForbidden { + return b, errForbidden + } else if resp.StatusCode >= http.StatusBadRequest { + errorMessage, err2 := parseErrorResponseBody(b) + if err2 != nil { + return b, err2 + } + return b, errors.New(errorMessage) + } + return b, err +} + +func (s *Shell) checkRemoteBuildCompatibility(lggr logger.Logger, onlyWarn bool, cliVersion, cliSha string) error { + resp, err := s.HTTP.Get(s.ctx(), "/v2/build_info") + if err != nil { + lggr.Warnw("Got error querying for version. Remote node version is unknown and CLI may behave in unexpected ways.", "err", err) + return nil + } + b, err := parseResponse(resp) + if err != nil { + lggr.Warnw("Got error parsing http response for remote version. Remote node version is unknown and CLI may behave in unexpected ways.", "resp", resp, "err", err) + return nil + } + + var remoteBuildInfo map[string]string + if err := json.Unmarshal(b, &remoteBuildInfo); err != nil { + lggr.Warnw("Got error json parsing bytes from remote version response. Remote node version is unknown and CLI may behave in unexpected ways.", "bytes", b, "err", err) + return nil + } + remoteVersion, remoteSha := remoteBuildInfo["version"], remoteBuildInfo["commitSHA"] + + remoteSemverUnset := remoteVersion == static.Unset || remoteVersion == "" || remoteSha == static.Unset || remoteSha == "" + cliRemoteSemverMismatch := remoteVersion != cliVersion || remoteSha != cliSha + + if remoteSemverUnset || cliRemoteSemverMismatch { + // Show a warning but allow mismatch + if onlyWarn { + lggr.Warnf("CLI build (%s@%s) mismatches remote node build (%s@%s), it might behave in unexpected ways", remoteVersion, remoteSha, cliVersion, cliSha) + return nil + } + // Don't allow usage of CLI by unsetting the session cookie to prevent further requests + if err2 := s.CookieAuthenticator.Logout(); err2 != nil { + s.Logger.Debugw("CookieAuthenticator failed to logout", "err", err2) + } + return ErrIncompatible{CLIVersion: cliVersion, CLISha: cliSha, RemoteVersion: remoteVersion, RemoteSha: remoteSha} + } + return nil +} + +func (s *Shell) Health(c *cli.Context) error { + mime := gin.MIMEPlain + if c.Bool("json") { + mime = gin.MIMEJSON + } + resp, err := s.HTTP.Get(s.ctx(), "/health", map[string]string{"Accept": mime}) + if err != nil { + return s.errorOut(err) + } + b, err := parseResponse(resp) + if err != nil { + return s.errorOut(err) + } + fmt.Println(string(b)) + return nil +} + +// ErrIncompatible is returned when the cli and remote versions are not compatible. +type ErrIncompatible struct { + CLIVersion, CLISha string + RemoteVersion, RemoteSha string +} + +func (e ErrIncompatible) Error() string { + return fmt.Sprintf("error: CLI build (%s@%s) mismatches remote node build (%s@%s). You can set flag --bypass-version-check to bypass this", e.CLIVersion, e.CLISha, e.RemoteVersion, e.RemoteSha) +} diff --git a/core/cmd/shell_remote_test.go b/core/cmd/shell_remote_test.go new file mode 100644 index 00000000..1e13cb43 --- /dev/null +++ b/core/cmd/shell_remote_test.go @@ -0,0 +1,752 @@ +package cmd_test + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "io" + "net/http" + "os" + "strconv" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/kylelemons/godebug/diff" + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/web" +) + +var ( + nilContext = cli.NewContext(nil, nil, nil) +) + +type startOptions struct { + // Use to set up mocks on the app + FlagsAndDeps []interface{} + // Add a key on start up + WithKey bool +} + +func startNewApplicationV2(t *testing.T, overrideFn func(c *plugin.Config, s *plugin.Secrets), setup ...func(opts *startOptions)) *cltest.TestApplication { + t.Helper() + + sopts := &startOptions{ + FlagsAndDeps: []interface{}{}, + } + for _, fn := range setup { + fn(sopts) + } + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(30 * time.Millisecond) + f := false + c.EVM[0].Enabled = &f + c.P2P.V2.Enabled = &f + + if overrideFn != nil { + overrideFn(c, s) + } + }) + + app := cltest.NewApplicationWithConfigAndKey(t, config, sopts.FlagsAndDeps...) + require.NoError(t, app.Start(testutils.Context(t))) + + return app +} + +func withMocks(mks ...interface{}) func(opts *startOptions) { + return func(opts *startOptions) { + opts.FlagsAndDeps = mks + } +} + +func withKey() func(opts *startOptions) { + return func(opts *startOptions) { + opts.WithKey = true + } +} + +func newEthMock(t *testing.T) *evmclimocks.Client { + t.Helper() + return cltest.NewEthMocksWithStartupAssertions(t) +} + +func newEthMockWithTransactionsOnBlocksAssertions(t *testing.T) *evmclimocks.Client { + t.Helper() + + return cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) +} + +func keyNameForTest(t *testing.T) string { + return fmt.Sprintf("%s/%s_test_key.json", t.TempDir(), t.Name()) +} + +func deleteKeyExportFile(t *testing.T) { + keyName := keyNameForTest(t) + err := os.Remove(keyName) + if err == nil || os.IsNotExist(err) { + return + } + require.NoError(t, err) +} + +func TestShell_ReplayBlocks(t *testing.T) { + t.Parallel() + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }) + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("flagset", 0) + flagSetApplyFromAction(client.ReplayFromBlock, set, "") + + require.NoError(t, set.Set("block-number", "42")) + require.NoError(t, set.Set("evm-chain-id", "12345678")) + c := cli.NewContext(nil, set, nil) + assert.ErrorContains(t, client.ReplayFromBlock(c), "chain id does not match any local chains") + + require.NoError(t, set.Set("evm-chain-id", "0")) + c = cli.NewContext(nil, set, nil) + assert.NoError(t, client.ReplayFromBlock(c)) +} + +func TestShell_CreateExternalInitiator(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + args []string + }{ + {"create external initiator", []string{"exi", "http://testing.com/external_initiators"}}, + {"create external initiator w/ query params", []string{"exiqueryparams", "http://testing.com/external_initiators?query=param"}}, + {"create external initiator w/o url", []string{"exi_no_url"}}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + }) + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("create", 0) + flagSetApplyFromAction(client.CreateExternalInitiator, set, "") + assert.NoError(t, set.Parse(test.args)) + c := cli.NewContext(nil, set, nil) + + err := client.CreateExternalInitiator(c) + require.NoError(t, err) + + var exi bridges.ExternalInitiator + err = app.GetSqlxDB().Get(&exi, `SELECT * FROM external_initiators WHERE name = $1`, test.args[0]) + require.NoError(t, err) + + if len(test.args) > 1 { + assert.Equal(t, test.args[1], exi.URL.String()) + } + }) + } +} + +func TestShell_CreateExternalInitiator_Errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + args []string + }{ + {"no arguments", []string{}}, + {"too many arguments", []string{"bitcoin", "https://valid.url", "extra arg"}}, + {"invalid url", []string{"bitcoin", "not a url"}}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + }) + client, _ := app.NewShellAndRenderer() + + initialExis := len(cltest.AllExternalInitiators(t, app.GetSqlxDB())) + + set := flag.NewFlagSet("create", 0) + flagSetApplyFromAction(client.CreateExternalInitiator, set, "") + + assert.NoError(t, set.Parse(test.args)) + c := cli.NewContext(nil, set, nil) + + err := client.CreateExternalInitiator(c) + assert.Error(t, err) + + exis := cltest.AllExternalInitiators(t, app.GetSqlxDB()) + assert.Len(t, exis, initialExis) + }) + } +} + +func TestShell_DestroyExternalInitiator(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + }) + client, r := app.NewShellAndRenderer() + + token := auth.NewToken() + exi, err := bridges.NewExternalInitiator(token, + &bridges.ExternalInitiatorRequest{Name: uuid.New().String()}, + ) + require.NoError(t, err) + err = app.BridgeORM().CreateExternalInitiator(exi) + require.NoError(t, err) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteExternalInitiator, set, "") + + require.NoError(t, set.Parse([]string{exi.Name})) + + c := cli.NewContext(nil, set, nil) + assert.NoError(t, client.DeleteExternalInitiator(c)) + assert.Empty(t, r.Renders) +} + +func TestShell_DestroyExternalInitiator_NotFound(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + }) + client, r := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteExternalInitiator, set, "") + + require.NoError(t, set.Parse([]string{"bogus-ID"})) + + c := cli.NewContext(nil, set, nil) + assert.Error(t, client.DeleteExternalInitiator(c)) + assert.Empty(t, r.Renders) +} + +func TestShell_RemoteLogin(t *testing.T) { + + app := startNewApplicationV2(t, nil) + orm := app.AuthenticationProvider() + + u := cltest.NewUserWithSession(t, orm) + + tests := []struct { + name, file string + email, pwd string + wantError bool + }{ + {"success prompt", "", u.Email, cltest.Password, false}, + {"success file", "../internal/fixtures/apicredentials", "", "", false}, + {"failure prompt", "", "wrong@email.com", "wrongpwd", true}, + {"failure file", "/tmp/doesntexist", "", "", true}, + {"failure file w correct prompt", "/tmp/doesntexist", u.Email, cltest.Password, true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + enteredStrings := []string{test.email, test.pwd} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + client := app.NewAuthenticatingShell(prompter) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", test.file)) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + + err := client.RemoteLogin(c) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestShell_RemoteBuildCompatibility(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + enteredStrings := []string{u.Email, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: append(enteredStrings, enteredStrings...)} + client := app.NewAuthenticatingShell(prompter) + + remoteVersion, remoteSha := "test"+static.Version, "abcd"+static.Sha + client.HTTP = &mockHTTPClient{client.HTTP, remoteVersion, remoteSha} + + expErr := cmd.ErrIncompatible{ + CLIVersion: static.Version, + CLISha: static.Sha, + RemoteVersion: remoteVersion, + RemoteSha: remoteSha, + }.Error() + + // Fails without bypass + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("bypass-version-check", "false")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + assert.Error(t, err) + assert.EqualError(t, err, expErr) + + // Defaults to false + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + c = cli.NewContext(nil, set, nil) + err = client.RemoteLogin(c) + assert.Error(t, err) + assert.EqualError(t, err, expErr) +} + +func TestShell_CheckRemoteBuildCompatibility(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + tests := []struct { + name string + remoteVersion, remoteSha string + cliVersion, cliSha string + bypassVersionFlag, wantError bool + }{ + {"success match", "1.1.1", "53120d5", "1.1.1", "53120d5", false, false}, + {"cli unset fails", "1.1.1", "53120d5", "unset", "unset", false, true}, + {"remote unset fails", "unset", "unset", "1.1.1", "53120d5", false, true}, + {"mismatch fail", "1.1.1", "53120d5", "1.6.9", "13230sas", false, true}, + {"mismatch but using bypass_version_flag", "1.1.1", "53120d5", "1.6.9", "13230sas", true, false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + enteredStrings := []string{u.Email, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + client := app.NewAuthenticatingShell(prompter) + + client.HTTP = &mockHTTPClient{client.HTTP, test.remoteVersion, test.remoteSha} + + err := client.CheckRemoteBuildCompatibility(logger.TestLogger(t), test.bypassVersionFlag, test.cliVersion, test.cliSha) + if test.wantError { + assert.Error(t, err) + assert.ErrorIs(t, err, cmd.ErrIncompatible{ + RemoteVersion: test.remoteVersion, + RemoteSha: test.remoteSha, + CLIVersion: test.cliVersion, + CLISha: test.cliSha, + }) + } else { + assert.NoError(t, err) + } + }) + } +} + +type mockHTTPClient struct { + HTTP cmd.HTTPClient + mockVersion string + mockSha string +} + +func (h *mockHTTPClient) Get(ctx context.Context, path string, headers ...map[string]string) (*http.Response, error) { + if path == "/v2/build_info" { + // Return mocked response here + json := fmt.Sprintf(`{"version":"%s","commitSHA":"%s"}`, h.mockVersion, h.mockSha) + r := io.NopCloser(bytes.NewReader([]byte(json))) + return &http.Response{ + StatusCode: 200, + Body: r, + }, nil + } + return h.HTTP.Get(ctx, path, headers...) +} + +func (h *mockHTTPClient) Post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + return h.HTTP.Post(ctx, path, body) +} + +func (h *mockHTTPClient) Put(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + return h.HTTP.Put(ctx, path, body) +} + +func (h *mockHTTPClient) Patch(ctx context.Context, path string, body io.Reader, headers ...map[string]string) (*http.Response, error) { + return h.HTTP.Patch(ctx, path, body, headers...) +} + +func (h *mockHTTPClient) Delete(ctx context.Context, path string) (*http.Response, error) { + return h.HTTP.Delete(ctx, path) +} + +func TestShell_ChangePassword(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + + enteredStrings := []string{u.Email, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + + client := app.NewAuthenticatingShell(prompter) + otherClient := app.NewAuthenticatingShell(prompter) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", "../internal/fixtures/apicredentials")) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + require.NoError(t, err) + + err = otherClient.RemoteLogin(c) + require.NoError(t, err) + + client.ChangePasswordPrompter = cltest.MockChangePasswordPrompter{ + UpdatePasswordRequest: web.UpdatePasswordRequest{ + OldPassword: testutils.Password, + NewPassword: "12345", + }, + } + err = client.ChangePassword(cli.NewContext(nil, nil, nil)) + require.Error(t, err) + assert.ErrorContains(t, err, "Expected password complexity") + + client.ChangePasswordPrompter = cltest.MockChangePasswordPrompter{ + UpdatePasswordRequest: web.UpdatePasswordRequest{ + OldPassword: testutils.Password, + NewPassword: testutils.Password + "foo", + }, + } + err = client.ChangePassword(cli.NewContext(nil, nil, nil)) + assert.NoError(t, err) + + // otherClient should now be logged out + err = otherClient.IndexBridges(c) + require.Error(t, err) + require.Contains(t, err.Error(), "Unauthorized") +} + +func TestShell_Profile_InvalidSecondsParam(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + enteredStrings := []string{u.Email, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + + client := app.NewAuthenticatingShell(prompter) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", "../internal/fixtures/apicredentials")) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + require.NoError(t, err) + + // pick a value larger than the default http service write timeout + d := app.Config.WebServer().HTTPWriteTimeout() + 2*time.Second + set.Uint("seconds", uint(d.Seconds()), "") + tDir := t.TempDir() + set.String("output_dir", tDir, "") + err = client.Profile(cli.NewContext(nil, set, nil)) + wantErr := cmd.ErrProfileTooLong + require.ErrorAs(t, err, &wantErr) + +} + +func TestShell_Profile(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + enteredStrings := []string{u.Email, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + + client := app.NewAuthenticatingShell(prompter) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("file", "../internal/fixtures/apicredentials")) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.RemoteLogin(c) + require.NoError(t, err) + + set.Uint("seconds", 1, "") + tDir := t.TempDir() + set.String("output_dir", tDir, "") + + // we don't care about the cli behavior, i.e. the before func, + // so call the client func directly + err = client.Profile(cli.NewContext(nil, set, nil)) + require.NoError(t, err) + + ents, err := os.ReadDir(tDir) + require.NoError(t, err) + require.Greater(t, len(ents), 0, "ents %+v", ents) +} + +func TestShell_Profile_Unauthenticated(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + + client := app.NewAuthenticatingShell(&cltest.MockCountingPrompter{T: t, EnteredStrings: []string{}}) + + set := flag.NewFlagSet("test", 0) + set.Uint("seconds", 1, "") + set.String("output_dir", t.TempDir(), "") + + err := client.Profile(cli.NewContext(nil, set, nil)) + require.ErrorContains(t, err, "profile collection failed:") + require.ErrorContains(t, err, "Unauthorized") +} + +func TestShell_ConfigV2(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + user, effective := app.Config.ConfigTOML() + + t.Run("user", func(t *testing.T) { + got, err := client.ConfigV2Str(true) + require.NoError(t, err) + assert.Equal(t, user, got, diff.Diff(user, got)) + }) + t.Run("effective", func(t *testing.T) { + got, err := client.ConfigV2Str(false) + require.NoError(t, err) + assert.Equal(t, effective, got, diff.Diff(effective, got)) + }) +} + +func TestShell_RunOCRJob_HappyPath(t *testing.T) { + t.Parallel() + app := startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(true) + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }, func(opts *startOptions) { + opts.FlagsAndDeps = append(opts.FlagsAndDeps, cltest.DefaultP2PKey) + }) + client, _ := app.NewShellAndRenderer() + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + _, bridge2 := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + + var jb job.Job + ocrspec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{DS1BridgeName: bridge.Name.String(), DS2BridgeName: bridge2.Name.String()}) + err := toml.Unmarshal([]byte(ocrspec.Toml()), &jb) + require.NoError(t, err) + var ocrSpec job.OCROracleSpec + err = toml.Unmarshal([]byte(ocrspec.Toml()), &ocrSpec) + require.NoError(t, err) + jb.OCROracleSpec = &ocrSpec + key, _ := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + jb.OCROracleSpec.TransmitterAddress = &key.EIP55Address + + err = app.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("bypass-version-check", "true")) + require.NoError(t, set.Parse([]string{strconv.FormatInt(int64(jb.ID), 10)})) + + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.RemoteLogin(c)) + require.NoError(t, client.TriggerPipelineRun(c)) +} + +func TestShell_RunOCRJob_MissingJobID(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.RemoteLogin(c)) + assert.EqualError(t, client.TriggerPipelineRun(c), "Must pass the job id to trigger a run") +} + +func TestShell_RunOCRJob_JobNotFound(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.RemoteLogin, set, "") + + require.NoError(t, set.Parse([]string{"1"})) + require.NoError(t, set.Set("bypass-version-check", "true")) + + c := cli.NewContext(nil, set, nil) + + require.NoError(t, client.RemoteLogin(c)) + err := client.TriggerPipelineRun(c) + assert.Contains(t, err.Error(), "findJob failed: failed to load job") +} + +func TestShell_AutoLogin(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user)) + + sr := sessions.SessionRequest{ + Email: user.Email, + Password: cltest.Password, + } + client, _ := app.NewShellAndRenderer() + client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.NewClientOpts(), &cmd.MemoryCookieStore{}, logger.TestLogger(t)) + client.HTTP = cmd.NewAuthenticatedHTTPClient(app.Logger, app.NewClientOpts(), client.CookieAuthenticator, sr) + + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.ListJobs, fs, "") + + err := client.ListJobs(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) + + // Expire the session and then try again + pgtest.MustExec(t, app.GetSqlxDB(), "delete from sessions where email = $1", user.Email) + err = client.ListJobs(cli.NewContext(nil, fs, nil)) + require.NoError(t, err) +} + +func TestShell_AutoLogin_AuthFails(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.BasicAdminUsersORM().CreateUser(&user)) + + sr := sessions.SessionRequest{ + Email: user.Email, + Password: cltest.Password, + } + client, _ := app.NewShellAndRenderer() + client.CookieAuthenticator = FailingAuthenticator{} + client.HTTP = cmd.NewAuthenticatedHTTPClient(app.Logger, app.NewClientOpts(), client.CookieAuthenticator, sr) + + fs := flag.NewFlagSet("", flag.ExitOnError) + flagSetApplyFromAction(client.ListJobs, fs, "") + err := client.ListJobs(cli.NewContext(nil, fs, nil)) + require.Error(t, err) +} + +type FailingAuthenticator struct{} + +func (FailingAuthenticator) Cookie() (*http.Cookie, error) { + return &http.Cookie{}, nil +} + +// Authenticate retrieves a session ID via a cookie and saves it to disk. +func (FailingAuthenticator) Authenticate(context.Context, sessions.SessionRequest) (*http.Cookie, error) { + return nil, errors.New("no luck") +} + +// Remove a session ID from disk +func (FailingAuthenticator) Logout() error { + return errors.New("no luck") +} + +func TestShell_SetLogConfig(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + + logLevel := "warn" + set := flag.NewFlagSet("loglevel", 0) + flagSetApplyFromAction(client.SetLogLevel, set, "") + + require.NoError(t, set.Set("level", logLevel)) + + c := cli.NewContext(nil, set, nil) + + err := client.SetLogLevel(c) + require.NoError(t, err) + assert.Equal(t, logLevel, app.Config.Log().Level().String()) + + sqlEnabled := true + set = flag.NewFlagSet("logsql", 0) + flagSetApplyFromAction(client.SetLogSQL, set, "") + + require.NoError(t, set.Set("enable", strconv.FormatBool(sqlEnabled))) + c = cli.NewContext(nil, set, nil) + + err = client.SetLogSQL(c) + assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.Database().LogSQL()) + + sqlEnabled = false + set = flag.NewFlagSet("logsql", 0) + flagSetApplyFromAction(client.SetLogSQL, set, "") + + require.NoError(t, set.Set("disable", "true")) + c = cli.NewContext(nil, set, nil) + + err = client.SetLogSQL(c) + assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.Database().LogSQL()) +} diff --git a/core/cmd/shell_test.go b/core/cmd/shell_test.go new file mode 100644 index 00000000..23e13e64 --- /dev/null +++ b/core/cmd/shell_test.go @@ -0,0 +1,623 @@ +package cmd_test + +import ( + "crypto/rand" + "flag" + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-solana/pkg/solana" + solcfg "github.com/goplugin/plugin-solana/pkg/solana/config" + "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/localauth" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +func TestTerminalCookieAuthenticator_AuthenticateWithoutSession(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + + tests := []struct { + name, email, pwd string + }{ + {"bad email", "notreal", cltest.Password}, + {"bad pwd", u.Email, "mostcommonwrongpwdever"}, + {"bad both", "notreal", "mostcommonwrongpwdever"}, + {"correct", u.Email, cltest.Password}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sr := sessions.SessionRequest{Email: test.email, Password: test.pwd} + store := &cmd.MemoryCookieStore{} + tca := cmd.NewSessionCookieAuthenticator(cmd.ClientOpts{}, store, logger.TestLogger(t)) + cookie, err := tca.Authenticate(ctx, sr) + + assert.Error(t, err) + assert.Nil(t, cookie) + cookie, err = store.Retrieve() + assert.NoError(t, err) + assert.Nil(t, cookie) + }) + } +} + +func TestTerminalCookieAuthenticator_AuthenticateWithSession(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + u := cltest.NewUserWithSession(t, app.AuthenticationProvider()) + + tests := []struct { + name, email, pwd string + wantError bool + }{ + {"bad email", "notreal", cltest.Password, true}, + {"bad pwd", u.Email, "mostcommonwrongpwdever", true}, + {"bad both", "notreal", "mostcommonwrongpwdever", true}, + {"success", u.Email, cltest.Password, false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sr := sessions.SessionRequest{Email: test.email, Password: test.pwd} + store := &cmd.MemoryCookieStore{} + tca := cmd.NewSessionCookieAuthenticator(app.NewClientOpts(), store, logger.TestLogger(t)) + cookie, err := tca.Authenticate(ctx, sr) + + if test.wantError { + assert.Error(t, err) + assert.Nil(t, cookie) + + cookie, err = store.Retrieve() + assert.NoError(t, err) + assert.Nil(t, cookie) + } else { + assert.NoError(t, err) + assert.NotNil(t, cookie) + + retrievedCookie, err := store.Retrieve() + assert.NoError(t, err) + assert.Equal(t, cookie, retrievedCookie) + } + }) + } +} + +type diskCookieStoreConfig struct{ rootdir string } + +func (d diskCookieStoreConfig) RootDir() string { + return d.rootdir +} + +func TestDiskCookieStore_Retrieve(t *testing.T) { + t.Parallel() + + cfg := diskCookieStoreConfig{} + + t.Run("missing cookie file", func(t *testing.T) { + store := cmd.DiskCookieStore{Config: cfg} + cookie, err := store.Retrieve() + assert.NoError(t, err) + assert.Nil(t, cookie) + }) + + t.Run("invalid cookie file", func(t *testing.T) { + cfg.rootdir = "../internal/fixtures/badcookie" + store := cmd.DiskCookieStore{Config: cfg} + cookie, err := store.Retrieve() + assert.Error(t, err) + assert.Nil(t, cookie) + }) + + t.Run("valid cookie file", func(t *testing.T) { + cfg.rootdir = "../internal/fixtures" + store := cmd.DiskCookieStore{Config: cfg} + cookie, err := store.Retrieve() + assert.NoError(t, err) + assert.NotNil(t, cookie) + }) +} + +func TestTerminalAPIInitializer_InitializeWithoutAPIUser(t *testing.T) { + email := "good@email.com" + + tests := []struct { + name string + enteredStrings []string + isTerminal bool + isError bool + }{ + {"correct", []string{email, cltest.Password}, true, false}, + {"bad pwd then correct", []string{email, "p4SsW0r", email, cltest.Password}, true, false}, + {"bad email then correct", []string{"", cltest.Password, email, cltest.Password}, true, false}, + {"not a terminal", []string{}, false, true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + orm := localauth.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger) + + mock := &cltest.MockCountingPrompter{T: t, EnteredStrings: test.enteredStrings, NotTerminal: !test.isTerminal} + tai := cmd.NewPromptingAPIInitializer(mock) + + // Clear out fixture users/users created from the other test cases + // This asserts that on initial run with an empty users table that the credentials file will instantiate and + // create/run with a new admin user + pgtest.MustExec(t, db, "DELETE FROM users;") + + user, err := tai.Initialize(orm, lggr) + if test.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, len(test.enteredStrings), mock.Count) + + persistedUser, err := orm.FindUser(email) + assert.NoError(t, err) + + assert.Equal(t, user.Email, persistedUser.Email) + assert.Equal(t, user.HashedPassword, persistedUser.HashedPassword) + } + }) + } +} + +func TestTerminalAPIInitializer_InitializeWithExistingAPIUser(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + lggr := logger.TestLogger(t) + orm := localauth.NewORM(db, time.Minute, lggr, cfg.Database(), audit.NoopLogger) + + // Clear out fixture users/users created from the other test cases + // This asserts that on initial run with an empty users table that the credentials file will instantiate and + // create/run with a new admin user + _, err := db.Exec("DELETE FROM users;") + require.NoError(t, err) + + initialUser := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&initialUser)) + + mock := &cltest.MockCountingPrompter{T: t} + tai := cmd.NewPromptingAPIInitializer(mock) + + // If there is an existing user, and we are in the Terminal prompt, no input prompts required + user, err := tai.Initialize(orm, lggr) + assert.NoError(t, err) + assert.Equal(t, 0, mock.Count) + + assert.Equal(t, initialUser.Email, user.Email) + assert.Equal(t, initialUser.HashedPassword, user.HashedPassword) +} + +func TestFileAPIInitializer_InitializeWithoutAPIUser(t *testing.T) { + tests := []struct { + name string + file string + wantError bool + }{ + {"correct", "../internal/fixtures/apicredentials", false}, + {"no file", "", true}, + {"incorrect file", "/tmp/doesnotexist", true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + orm := localauth.NewORM(db, time.Minute, lggr, pgtest.NewQConfig(true), audit.NoopLogger) + + // Clear out fixture users/users created from the other test cases + // This asserts that on initial run with an empty users table that the credentials file will instantiate and + // create/run with a new admin user + pgtest.MustExec(t, db, "DELETE FROM users;") + + tfi := cmd.NewFileAPIInitializer(test.file) + user, err := tfi.Initialize(orm, lggr) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, cltest.APIEmailAdmin, user.Email) + persistedUser, err := orm.FindUser(user.Email) + assert.NoError(t, err) + assert.Equal(t, persistedUser.Email, user.Email) + } + }) + } +} + +func TestFileAPIInitializer_InitializeWithExistingAPIUser(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + orm := localauth.NewORM(db, time.Minute, logger.TestLogger(t), cfg.Database(), audit.NoopLogger) + + tests := []struct { + name string + file string + wantError bool + }{ + {"correct", "../internal/fixtures/apicredentials", false}, + {"no file", "", true}, + {"incorrect file", "/tmp/doesnotexist", true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lggr := logger.TestLogger(t) + tfi := cmd.NewFileAPIInitializer(test.file) + user, err := tfi.Initialize(orm, lggr) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, cltest.APIEmailAdmin, user.Email) + } + }) + } +} + +func TestPromptingSessionRequestBuilder(t *testing.T) { + t.Parallel() + + tests := []struct { + email, pwd string + }{ + {"correct@input.com", "mypwd"}, + } + + for _, test := range tests { + t.Run(test.email, func(t *testing.T) { + enteredStrings := []string{test.email, test.pwd} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + builder := cmd.NewPromptingSessionRequestBuilder(prompter) + + sr, err := builder.Build("") + require.NoError(t, err) + assert.Equal(t, test.email, sr.Email) + assert.Equal(t, test.pwd, sr.Password) + }) + } +} + +func TestFileSessionRequestBuilder(t *testing.T) { + t.Parallel() + + builder := cmd.NewFileSessionRequestBuilder(logger.TestLogger(t)) + tests := []struct { + name, file, wantEmail string + wantError bool + }{ + {"empty", "", "", true}, + {"correct file", "../internal/fixtures/apicredentials", cltest.APIEmailAdmin, false}, + {"incorrect file", "/tmp/dontexist", "", true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sr, err := builder.Build(test.file) + assert.Equal(t, test.wantEmail, sr.Email) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestNewUserCache(t *testing.T) { + + r, err := rand.Int(rand.Reader, big.NewInt(256*1024*1024)) + require.NoError(t, err) + // NewUserCache owns it's Dir. + // invent a unique subdir that we can cleanup + // because test.TempDir and ioutil.TempDir don't work well here + subDir := filepath.Base(fmt.Sprintf("%s-%d", t.Name(), r.Int64())) + lggr := logger.TestLogger(t) + c, err := cmd.NewUserCache(subDir, func() logger.Logger { return lggr }) + require.NoError(t, err) + defer func() { + require.NoError(t, os.Remove(c.RootDir())) + }() + + assert.DirExists(t, c.RootDir()) + +} + +func TestSetupSolanaRelayer(t *testing.T) { + lggr := logger.TestLogger(t) + reg := plugins.NewLoopRegistry(lggr, nil) + ks := mocks.NewSolana(t) + + // config 3 chains but only enable 2 => should only be 2 relayer + nEnabledChains := 2 + tConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Solana = solana.TOMLConfigs{ + &solana.TOMLConfig{ + ChainID: ptr[string]("solana-id-1"), + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + &solana.TOMLConfig{ + ChainID: ptr[string]("solana-id-2"), + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + &solana.TOMLConfig{ + ChainID: ptr[string]("disabled-solana-id-1"), + Enabled: ptr(false), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + } + }) + + t2Config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Solana = solana.TOMLConfigs{ + &solana.TOMLConfig{ + ChainID: ptr[string]("solana-id-1"), + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + } + }) + + rf := plugin.RelayerFactory{ + Logger: lggr, + LoopRegistry: reg, + } + + // not parallel; shared state + t.Run("no plugin", func(t *testing.T) { + relayers, err := rf.NewSolana(ks, tConfig.SolanaConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // no using plugin, so registry should be empty + require.Len(t, reg.List(), 0) + }) + + t.Run("plugin", func(t *testing.T) { + t.Setenv("CL_SOLANA_CMD", "phony_solana_cmd") + + relayers, err := rf.NewSolana(ks, tConfig.SolanaConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // make sure registry has the plugin + require.Len(t, reg.List(), nEnabledChains) + }) + + // test that duplicate enabled chains is an error when + duplicateConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Solana = solana.TOMLConfigs{ + &solana.TOMLConfig{ + ChainID: ptr[string]("dupe"), + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + &solana.TOMLConfig{ + ChainID: ptr[string]("dupe"), + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{}, + }, + } + }) + + // not parallel; shared state + t.Run("no plugin, duplicate chains", func(t *testing.T) { + _, err := rf.NewSolana(ks, duplicateConfig.SolanaConfigs()) + require.Error(t, err) + }) + + t.Run("plugin, duplicate chains", func(t *testing.T) { + t.Setenv("CL_SOLANA_CMD", "phony_solana_cmd") + _, err := rf.NewSolana(ks, duplicateConfig.SolanaConfigs()) + require.Error(t, err) + }) + + t.Run("plugin env parsing fails", func(t *testing.T) { + t.Setenv("CL_SOLANA_CMD", "phony_solana_cmd") + t.Setenv("CL_SOLANA_ENV", "fake_path") + + _, err := rf.NewSolana(ks, t2Config.SolanaConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to parse Solana env file") + }) + + t.Run("plugin already registered", func(t *testing.T) { + t.Setenv("CL_SOLANA_CMD", "phony_solana_cmd") + + _, err := rf.NewSolana(ks, tConfig.SolanaConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to create Solana LOOP command") + }) +} + +func TestSetupStarkNetRelayer(t *testing.T) { + lggr := logger.TestLogger(t) + reg := plugins.NewLoopRegistry(lggr, nil) + ks := mocks.NewStarkNet(t) + // config 3 chains but only enable 2 => should only be 2 relayer + nEnabledChains := 2 + tConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Starknet = stkcfg.TOMLConfigs{ + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("starknet-id-1"), + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("starknet-id-2"), + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("disabled-starknet-id-1"), + Enabled: ptr(false), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + } + }) + + t2Config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Starknet = stkcfg.TOMLConfigs{ + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("starknet-id-3"), + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + } + }) + rf := plugin.RelayerFactory{ + Logger: lggr, + LoopRegistry: reg, + } + + // not parallel; shared state + t.Run("no plugin", func(t *testing.T) { + relayers, err := rf.NewStarkNet(ks, tConfig.StarknetConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // no using plugin, so registry should be empty + require.Len(t, reg.List(), 0) + }) + + t.Run("plugin", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + + relayers, err := rf.NewStarkNet(ks, tConfig.StarknetConfigs()) + require.NoError(t, err) + require.NotNil(t, relayers) + require.Len(t, relayers, nEnabledChains) + // make sure registry has the plugin + require.Len(t, reg.List(), nEnabledChains) + }) + + // test that duplicate enabled chains is an error when + duplicateConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Starknet = stkcfg.TOMLConfigs{ + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("dupe"), + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + &stkcfg.TOMLConfig{ + ChainID: ptr[string]("dupe"), + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*config.Node{}, + }, + } + }) + + // not parallel; shared state + t.Run("no plugin, duplicate chains", func(t *testing.T) { + _, err := rf.NewStarkNet(ks, duplicateConfig.StarknetConfigs()) + require.Error(t, err) + }) + + t.Run("plugin, duplicate chains", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + _, err := rf.NewStarkNet(ks, duplicateConfig.StarknetConfigs()) + require.Error(t, err) + }) + + t.Run("plugin env parsing fails", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + t.Setenv("CL_STARKNET_ENV", "fake_path") + + _, err := rf.NewStarkNet(ks, t2Config.StarknetConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to parse Starknet env file") + }) + + t.Run("plugin already registered", func(t *testing.T) { + t.Setenv("CL_STARKNET_CMD", "phony_starknet_cmd") + + _, err := rf.NewStarkNet(ks, tConfig.StarknetConfigs()) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to create StarkNet LOOP command") + }) +} + +// flagSetApplyFromAction applies the flags from action to the flagSet. +// `parentCommand` will filter the app commands and only applies the flags if the command/subcommand has a parent with that name, if left empty no filtering is done +func flagSetApplyFromAction(action interface{}, flagSet *flag.FlagSet, parentCommand string) { + cliApp := cmd.Shell{} + app := cmd.NewApp(&cliApp) + + foundName := parentCommand == "" + actionFuncName := getFuncName(action) + + for _, command := range app.Commands { + flags := recursiveFindFlagsWithName(actionFuncName, command, parentCommand, foundName) + + for _, flag := range flags { + flag.Apply(flagSet) + } + } + +} + +func recursiveFindFlagsWithName(actionFuncName string, command cli.Command, parent string, foundName bool) []cli.Flag { + + if command.Action != nil { + if actionFuncName == getFuncName(command.Action) && foundName { + return command.Flags + } + } + + for _, subcommand := range command.Subcommands { + if !foundName { + foundName = strings.EqualFold(subcommand.Name, parent) + } + + found := recursiveFindFlagsWithName(actionFuncName, subcommand, parent, foundName) + if found != nil { + return found + } + } + return nil +} + +func getFuncName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} diff --git a/core/cmd/solana_chains_commands.go b/core/cmd/solana_chains_commands.go new file mode 100644 index 00000000..b25f6c2e --- /dev/null +++ b/core/cmd/solana_chains_commands.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// SolanaChainPresenter implements TableRenderer for a SolanaChainResource +type SolanaChainPresenter struct { + presenters.SolanaChainResource +} + +// ToRow presents the SolanaChainResource as a slice of strings. +func (p *SolanaChainPresenter) ToRow() []string { + return []string{p.GetID(), strconv.FormatBool(p.Enabled), p.Config} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p SolanaChainPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +// SolanaChainPresenters implements TableRenderer for a slice of SolanaChainPresenters. +type SolanaChainPresenters []SolanaChainPresenter + +// RenderTable implements TableRenderer +func (ps SolanaChainPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +func SolanaChainClient(s *Shell) ChainClient { + return newChainClient[SolanaChainPresenters](s, "solana") +} diff --git a/core/cmd/solana_chains_commands_test.go b/core/cmd/solana_chains_commands_test.go new file mode 100644 index 00000000..3e26c3b3 --- /dev/null +++ b/core/cmd/solana_chains_commands_test.go @@ -0,0 +1,32 @@ +package cmd_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-solana/pkg/solana" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/solanatest" +) + +func TestShell_IndexSolanaChains(t *testing.T) { + t.Parallel() + + id := solanatest.RandomChainID() + cfg := solana.TOMLConfig{ + ChainID: &id, + Enabled: ptr(true), + } + app := solanaStartNewApplication(t, &cfg) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.SolanaChainClient(client).IndexChains(cltest.EmptyCLIContext())) + chains := *r.Renders[0].(*cmd.SolanaChainPresenters) + require.Len(t, chains, 1) + c := chains[0] + assert.Equal(t, id, c.ID) + assertTableRenders(t, r) +} diff --git a/core/cmd/solana_keys_commands.go b/core/cmd/solana_keys_commands.go new file mode 100644 index 00000000..b6d9d30c --- /dev/null +++ b/core/cmd/solana_keys_commands.go @@ -0,0 +1,57 @@ +package cmd + +import ( + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type SolanaKeyPresenter struct { + JAID + presenters.SolanaKeyResource +} + +// RenderTable implements TableRenderer +func (p SolanaKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 Solana Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *SolanaKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.PubKey, + } + + return row +} + +type SolanaKeyPresenters []SolanaKeyPresenter + +// RenderTable implements TableRenderer +func (ps SolanaKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Public key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 Solana Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewSolanaKeysClient(s *Shell) KeysClient { + return newKeysClient[solkey.Key, SolanaKeyPresenter, SolanaKeyPresenters]("Solana", s) +} diff --git a/core/cmd/solana_keys_commands_test.go b/core/cmd/solana_keys_commands_test.go new file mode 100644 index 00000000..454dfb8c --- /dev/null +++ b/core/cmd/solana_keys_commands_test.go @@ -0,0 +1,170 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestSolanaKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + pubKey = "somepubkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.SolanaKeyPresenter{ + JAID: cmd.JAID{ID: id}, + SolanaKeyResource: presenters.SolanaKeyResource{ + JAID: presenters.NewJAID(id), + PubKey: pubKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) + + // Render many resources + buffer.Reset() + ps := cmd.SolanaKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, pubKey) +} + +func TestShell_SolanaKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().Solana() + cleanup := func() { + keys, err := ks.GetAll() + require.NoError(t, err) + for _, key := range keys { + require.NoError(t, utils.JustError(ks.Delete(key.ID()))) + } + requireSolanaKeyCount(t, app, 0) + } + + t.Run("ListSolanaKeys", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Solana().Create() + require.NoError(t, err) + requireSolanaKeyCount(t, app, 1) + assert.Nil(t, cmd.NewSolanaKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.SolanaKeyPresenters) + assert.True(t, key.PublicKeyStr() == keys[0].PubKey) + + }) + + t.Run("CreateSolanaKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + require.NoError(t, cmd.NewSolanaKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().Solana().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + }) + + t.Run("DeleteSolanaKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().Solana().Create() + require.NoError(t, err) + requireSolanaKeyCount(t, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewSolanaKeysClient(client).DeleteKey, set, "solana") + + require.NoError(tt, set.Set("yes", "true")) + + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = cmd.NewSolanaKeysClient(client).DeleteKey(c) + require.NoError(t, err) + requireSolanaKeyCount(t, app, 0) + }) + + t.Run("ImportExportSolanaKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(t) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().Solana().Create() + require.NoError(t, err) + + keys := requireSolanaKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test Solana export", 0) + flagSetApplyFromAction(cmd.NewSolanaKeysClient(client).ExportKey, set, "solana") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewSolanaKeysClient(client).ExportKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test Solana export", 0) + flagSetApplyFromAction(cmd.NewSolanaKeysClient(client).ExportKey, set, "solana") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, cmd.NewSolanaKeysClient(client).ExportKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().Solana().Delete(key.ID()))) + requireSolanaKeyCount(t, app, 0) + + set = flag.NewFlagSet("test Solana import", 0) + flagSetApplyFromAction(cmd.NewSolanaKeysClient(client).ImportKey, set, "solana") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + c = cli.NewContext(nil, set, nil) + require.NoError(t, cmd.NewSolanaKeysClient(client).ImportKey(c)) + + requireSolanaKeyCount(t, app, 1) + }) +} + +func requireSolanaKeyCount(t *testing.T, app plugin.Application, length int) []solkey.Key { + t.Helper() + keys, err := app.GetKeyStore().Solana().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/solana_node_commands.go b/core/cmd/solana_node_commands.go new file mode 100644 index 00000000..9d6c2e20 --- /dev/null +++ b/core/cmd/solana_node_commands.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// SolanaNodePresenter implements TableRenderer for a SolanaNodeResource. +type SolanaNodePresenter struct { + presenters.SolanaNodeResource +} + +// ToRow presents the SolanaNodeResource as a slice of strings. +func (p *SolanaNodePresenter) ToRow() []string { + return []string{p.Name, p.ChainID, p.State, p.Config} +} + +// RenderTable implements TableRenderer +func (p SolanaNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +// SolanaNodePresenters implements TableRenderer for a slice of SolanaNodePresenter. +type SolanaNodePresenters []SolanaNodePresenter + +// RenderTable implements TableRenderer +func (ps SolanaNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +func NewSolanaNodeClient(s *Shell) NodeClient { + return newNodeClient[SolanaNodePresenters](s, "solana") +} diff --git a/core/cmd/solana_node_commands_test.go b/core/cmd/solana_node_commands_test.go new file mode 100644 index 00000000..870801ef --- /dev/null +++ b/core/cmd/solana_node_commands_test.go @@ -0,0 +1,89 @@ +package cmd_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/config" + solcfg "github.com/goplugin/plugin-solana/pkg/solana/config" + + "github.com/goplugin/plugin-solana/pkg/solana" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/solanatest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func solanaStartNewApplication(t *testing.T, cfgs ...*solana.TOMLConfig) *cltest.TestApplication { + for i := range cfgs { + cfgs[i].SetDefaults() + } + return startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Solana = cfgs + c.EVM = nil + }) +} + +func TestShell_IndexSolanaNodes(t *testing.T) { + t.Parallel() + + id := solanatest.RandomChainID() + node1 := solcfg.Node{ + Name: ptr("first"), + URL: config.MustParseURL("https://solana1.example"), + } + node2 := solcfg.Node{ + Name: ptr("second"), + URL: config.MustParseURL("https://solana2.example"), + } + chain := solana.TOMLConfig{ + ChainID: &id, + Nodes: solana.SolanaNodes{&node1, &node2}, + } + app := solanaStartNewApplication(t, &chain) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.NewSolanaNodeClient(client).IndexNodes(cltest.EmptyCLIContext())) + require.NotEmpty(t, r.Renders) + nodes := *r.Renders[0].(*cmd.SolanaNodePresenters) + require.Len(t, nodes, 2) + n1 := nodes[0] + n2 := nodes[1] + assert.Equal(t, id, n1.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(id, *node1.Name), n1.ID) + assert.Equal(t, *node1.Name, n1.Name) + wantConfig, err := toml.Marshal(node1) + require.NoError(t, err) + assert.Equal(t, string(wantConfig), n1.Config) + assert.Equal(t, id, n2.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(id, *node2.Name), n2.ID) + assert.Equal(t, *node2.Name, n2.Name) + wantConfig2, err := toml.Marshal(node2) + require.NoError(t, err) + assert.Equal(t, string(wantConfig2), n2.Config) + assertTableRenders(t, r) + + //Render table and check the fields order + b := new(bytes.Buffer) + rt := cmd.RendererTable{b} + require.NoError(t, nodes.RenderTable(rt)) + renderLines := strings.Split(b.String(), "\n") + assert.Equal(t, 17, len(renderLines)) + assert.Contains(t, renderLines[2], "Name") + assert.Contains(t, renderLines[2], n1.Name) + assert.Contains(t, renderLines[3], "Chain ID") + assert.Contains(t, renderLines[3], n1.ChainID) + assert.Contains(t, renderLines[4], "State") + assert.Contains(t, renderLines[4], n1.State) + assert.Contains(t, renderLines[9], "Name") + assert.Contains(t, renderLines[9], n2.Name) + assert.Contains(t, renderLines[10], "Chain ID") + assert.Contains(t, renderLines[10], n2.ChainID) + assert.Contains(t, renderLines[11], "State") + assert.Contains(t, renderLines[11], n2.State) +} diff --git a/core/cmd/solana_transaction_commands.go b/core/cmd/solana_transaction_commands.go new file mode 100644 index 00000000..f12ca989 --- /dev/null +++ b/core/cmd/solana_transaction_commands.go @@ -0,0 +1,120 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + + solanaGo "github.com/gagliardetto/solana-go" + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/store/models/solana" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initSolanaTxSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "solana", + Usage: "Commands for handling Solana transactions", + Subcommands: []cli.Command{ + { + Name: "create", + Usage: "Send lamports from node Solana account to destination .", + Action: s.SolanaSendSol, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "force", + Usage: "allows to send a higher amount than the account's balance", + }, + cli.StringFlag{ + Name: "id", + Usage: "chain ID, options: [mainnet, testnet, devnet, localnet]", + }, + }, + }, + }, + } +} + +type SolanaMsgPresenter struct { + JAID + presenters.SolanaMsgResource +} + +// RenderTable implements TableRenderer +func (p *SolanaMsgPresenter) RenderTable(rt RendererTable) error { + table := rt.newTable([]string{"Chain ID", "From", "To", "Amount"}) + table.Append([]string{ + p.ChainID, + p.From, + p.To, + strconv.FormatUint(p.Amount, 10), + }) + + render(fmt.Sprintf("Solana Message %v", p.ID), table) + return nil +} + +// SolanaSendSol transfers sol from the node's account to a specified address. +func (s *Shell) SolanaSendSol(c *cli.Context) (err error) { + if c.NArg() < 3 { + return s.errorOut(errors.New("three arguments expected: amount, fromAddress and toAddress")) + } + + amount, err := strconv.ParseUint(c.Args().Get(0), 10, 64) + if err != nil { + return s.errorOut(fmt.Errorf("invalid amount: %w", err)) + } + + unparsedFromAddress := c.Args().Get(1) + fromAddress, err := solanaGo.PublicKeyFromBase58(unparsedFromAddress) + if err != nil { + return s.errorOut(multierr.Combine( + errors.Errorf("while parsing withdrawal source address %v", + unparsedFromAddress), err)) + } + + unparsedDestinationAddress := c.Args().Get(2) + destinationAddress, err := solanaGo.PublicKeyFromBase58(unparsedDestinationAddress) + if err != nil { + return s.errorOut(multierr.Combine( + errors.Errorf("while parsing withdrawal destination address %v", + unparsedDestinationAddress), err)) + } + + chainID := c.String("id") + if chainID == "" { + return s.errorOut(errors.New("missing id")) + } + + request := solana.SendRequest{ + To: destinationAddress, + From: fromAddress, + Amount: amount, + SolanaChainID: chainID, + AllowHigherAmounts: c.IsSet("force"), + } + + requestData, err := json.Marshal(request) + if err != nil { + return s.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + + resp, err := s.HTTP.Post(s.ctx(), "/v2/transfers/solana", buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + err = s.renderAPIResponse(resp, &SolanaMsgPresenter{}) + return err +} diff --git a/core/cmd/solana_transaction_commands_test.go b/core/cmd/solana_transaction_commands_test.go new file mode 100644 index 00000000..f47cdf82 --- /dev/null +++ b/core/cmd/solana_transaction_commands_test.go @@ -0,0 +1,139 @@ +//go:build integration + +package cmd_test + +import ( + "bytes" + "flag" + "os/exec" + "strconv" + "testing" + "time" + + solanago "github.com/gagliardetto/solana-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-solana/pkg/solana" + solanaClient "github.com/goplugin/plugin-solana/pkg/solana/client" + solcfg "github.com/goplugin/plugin-solana/pkg/solana/config" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" +) + +func TestShell_SolanaSendSol(t *testing.T) { + chainID := "localnet" + url := solanaClient.SetupLocalSolNode(t) + node := solcfg.Node{ + Name: ptr(t.Name()), + URL: config.MustParseURL(url), + } + cfg := solana.TOMLConfig{ + ChainID: &chainID, + Nodes: solana.SolanaNodes{&node}, + Enabled: ptr(true), + } + app := solanaStartNewApplication(t, &cfg) + from, err := app.GetKeyStore().Solana().Create() + require.NoError(t, err) + to, err := solanago.NewRandomPrivateKey() + require.NoError(t, err) + solanaClient.FundTestAccounts(t, []solanago.PublicKey{from.PublicKey()}, url) + + require.Eventually(t, func() bool { + coin, err := balance(from.PublicKey(), url) + if err != nil { + return false + } + return coin == 100*solanago.LAMPORTS_PER_SOL + }, time.Minute, 5*time.Second) + + client, r := app.NewShellAndRenderer() + cliapp := cli.NewApp() + + for _, tt := range []struct { + amount string + expErr string + }{ + {amount: "1000000000"}, + {amount: "100000000000", expErr: "is too low for this transaction to be executed:"}, + {amount: "0", expErr: "amount must be greater than zero"}, + {amount: "asdf", expErr: "invalid amount:"}, + } { + tt := tt + t.Run(tt.amount, func(t *testing.T) { + startBal, err := balance(from.PublicKey(), url) + require.NoError(t, err) + + set := flag.NewFlagSet("sendsolcoins", 0) + flagSetApplyFromAction(client.SolanaSendSol, set, "solana") + + require.NoError(t, set.Set("id", chainID)) + require.NoError(t, set.Parse([]string{tt.amount, from.PublicKey().String(), to.PublicKey().String()})) + + c := cli.NewContext(cliapp, set, nil) + err = client.SolanaSendSol(c) + if tt.expErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expErr) + return + } + + // Check CLI output + require.Greater(t, len(r.Renders), 0) + renderer := r.Renders[len(r.Renders)-1] + renderedMsg := renderer.(*cmd.SolanaMsgPresenter) + t.Logf("%+v\n", renderedMsg) + require.NotEmpty(t, renderedMsg.ID) + assert.Equal(t, chainID, renderedMsg.ChainID) + assert.Equal(t, from.PublicKey().String(), renderedMsg.From) + assert.Equal(t, to.PublicKey().String(), renderedMsg.To) + assert.Equal(t, tt.amount, strconv.FormatUint(renderedMsg.Amount, 10)) + + // wait for updated balance + updated := false + endBal := uint64(0) + for i := 0; i < 5; i++ { + time.Sleep(time.Second) // wait for tx execution + + // Check balance + endBal, err = balance(from.PublicKey(), url) + require.NoError(t, err) + require.NoError(t, err) + + // exit if difference found + if endBal != startBal { + updated = true + break + } + } + require.True(t, updated, "end bal == start bal, transaction likely not succeeded") + + // Check balance + if assert.NotEqual(t, 0, startBal) && assert.NotEqual(t, 0, endBal) { + diff := startBal - endBal + receiveBal, err := balance(to.PublicKey(), url) + require.NoError(t, err) + assert.Equal(t, tt.amount, strconv.FormatUint(receiveBal, 10)) + assert.Greater(t, diff, receiveBal) + } + }) + } +} + +func balance(key solanago.PublicKey, url string) (uint64, error) { + b, err := exec.Command("solana", "balance", "--lamports", key.String(), "--url", url).Output() + if err != nil { + return 0, err + } + b = bytes.TrimSuffix(b, []byte(" lamports\n")) + i, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + return 0, err + } + return i, nil +} diff --git a/core/cmd/starknet_chains_commands.go b/core/cmd/starknet_chains_commands.go new file mode 100644 index 00000000..c42af19b --- /dev/null +++ b/core/cmd/starknet_chains_commands.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// StarkNetChainPresenter implements TableRenderer for a StarkNetChainResource +type StarkNetChainPresenter struct { + presenters.StarkNetChainResource +} + +// ToRow presents the StarkNetChainResource as a slice of strings. +func (p *StarkNetChainPresenter) ToRow() []string { + return []string{p.GetID(), strconv.FormatBool(p.Enabled), p.Config} +} + +// RenderTable implements TableRenderer +// Just renders a single row +func (p StarkNetChainPresenter) RenderTable(rt RendererTable) error { + rows := [][]string{} + rows = append(rows, p.ToRow()) + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +// StarkNetChainPresenters implements TableRenderer for a slice of StarkNetChainPresenters. +type StarkNetChainPresenters []StarkNetChainPresenter + +// RenderTable implements TableRenderer +func (ps StarkNetChainPresenters) RenderTable(rt RendererTable) error { + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(chainHeaders, rows, rt.Writer) + + return nil +} + +func StarkNetChainClient(s *Shell) ChainClient { + return newChainClient[StarkNetChainPresenters](s, "starknet") +} diff --git a/core/cmd/starknet_keys_commands.go b/core/cmd/starknet_keys_commands.go new file mode 100644 index 00000000..769c7398 --- /dev/null +++ b/core/cmd/starknet_keys_commands.go @@ -0,0 +1,57 @@ +package cmd + +import ( + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type StarkNetKeyPresenter struct { + JAID + presenters.StarkNetKeyResource +} + +// RenderTable implements TableRenderer +func (p StarkNetKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Stark Public Key"} + rows := [][]string{p.ToRow()} + + if _, err := rt.Write([]byte("🔑 StarkNet Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func (p *StarkNetKeyPresenter) ToRow() []string { + row := []string{ + p.ID, + p.StarkKey, + } + + return row +} + +type StarkNetKeyPresenters []StarkNetKeyPresenter + +// RenderTable implements TableRenderer +func (ps StarkNetKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"ID", "Stark Public Key"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + if _, err := rt.Write([]byte("🔑 StarkNet Keys\n")); err != nil { + return err + } + renderList(headers, rows, rt.Writer) + + return utils.JustError(rt.Write([]byte("\n"))) +} + +func NewStarkNetKeysClient(s *Shell) KeysClient { + return newKeysClient[starkkey.Key, StarkNetKeyPresenter, StarkNetKeyPresenters]("StarkNet", s) +} diff --git a/core/cmd/starknet_keys_commands_test.go b/core/cmd/starknet_keys_commands_test.go new file mode 100644 index 00000000..373870fc --- /dev/null +++ b/core/cmd/starknet_keys_commands_test.go @@ -0,0 +1,170 @@ +package cmd_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestStarkNetKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + id = "1" + starkKey = "somestarkkey" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.StarkNetKeyPresenter{ + JAID: cmd.JAID{ID: id}, + StarkNetKeyResource: presenters.StarkNetKeyResource{ + JAID: presenters.NewJAID(id), + StarkKey: starkKey, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, id) + + // Render many resources + buffer.Reset() + ps := cmd.StarkNetKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, id) + assert.Contains(t, output, starkKey) +} + +func TestShell_StarkNetKeys(t *testing.T) { + app := startNewApplicationV2(t, nil) + ks := app.GetKeyStore().StarkNet() + cleanup := func() { + keys, err := ks.GetAll() + require.NoError(t, err) + for _, key := range keys { + require.NoError(t, utils.JustError(ks.Delete(key.ID()))) + } + requireStarkNetKeyCount(t, app, 0) + } + + t.Run("ListStarkNetKeys", func(tt *testing.T) { + defer cleanup() + client, r := app.NewShellAndRenderer() + key, err := app.GetKeyStore().StarkNet().Create() + require.NoError(t, err) + requireStarkNetKeyCount(t, app, 1) + assert.Nil(t, cmd.NewStarkNetKeysClient(client).ListKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.StarkNetKeyPresenters) + assert.True(t, key.StarkKeyStr() == keys[0].StarkKey) + + }) + + t.Run("CreateStarkNetKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + require.NoError(t, cmd.NewStarkNetKeysClient(client).CreateKey(nilContext)) + keys, err := app.GetKeyStore().StarkNet().GetAll() + require.NoError(t, err) + require.Len(t, keys, 1) + }) + + t.Run("DeleteStarkNetKey", func(tt *testing.T) { + defer cleanup() + client, _ := app.NewShellAndRenderer() + key, err := app.GetKeyStore().StarkNet().Create() + require.NoError(t, err) + requireStarkNetKeyCount(t, app, 1) + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(cmd.NewStarkNetKeysClient(client).DeleteKey, set, "starknet") + + require.NoError(tt, set.Set("yes", "true")) + + strID := key.ID() + err = set.Parse([]string{strID}) + require.NoError(t, err) + c := cli.NewContext(nil, set, nil) + err = cmd.NewStarkNetKeysClient(client).DeleteKey(c) + require.NoError(t, err) + requireStarkNetKeyCount(t, app, 0) + }) + + t.Run("ImportExportStarkNetKey", func(tt *testing.T) { + defer cleanup() + defer deleteKeyExportFile(t) + client, _ := app.NewShellAndRenderer() + + _, err := app.GetKeyStore().StarkNet().Create() + require.NoError(t, err) + + keys := requireStarkNetKeyCount(t, app, 1) + key := keys[0] + keyName := keyNameForTest(t) + + // Export test invalid id + set := flag.NewFlagSet("test StarkNet export", 0) + flagSetApplyFromAction(cmd.NewStarkNetKeysClient(client).ExportKey, set, "starknet") + + require.NoError(tt, set.Parse([]string{"0"})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + err = cmd.NewStarkNetKeysClient(client).ExportKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test StarkNet export", 0) + flagSetApplyFromAction(cmd.NewStarkNetKeysClient(client).ExportKey, set, "starknet") + + require.NoError(tt, set.Parse([]string{fmt.Sprint(key.ID())})) + require.NoError(tt, set.Set("new-password", "../internal/fixtures/incorrect_password.txt")) + require.NoError(tt, set.Set("output", keyName)) + + c = cli.NewContext(nil, set, nil) + + require.NoError(t, cmd.NewStarkNetKeysClient(client).ExportKey(c)) + require.NoError(t, utils.JustError(os.Stat(keyName))) + + require.NoError(t, utils.JustError(app.GetKeyStore().StarkNet().Delete(key.ID()))) + requireStarkNetKeyCount(t, app, 0) + + set = flag.NewFlagSet("test StarkNet import", 0) + flagSetApplyFromAction(cmd.NewStarkNetKeysClient(client).ImportKey, set, "starknet") + + require.NoError(tt, set.Parse([]string{keyName})) + require.NoError(tt, set.Set("old-password", "../internal/fixtures/incorrect_password.txt")) + + c = cli.NewContext(nil, set, nil) + require.NoError(t, cmd.NewStarkNetKeysClient(client).ImportKey(c)) + + requireStarkNetKeyCount(t, app, 1) + }) +} + +func requireStarkNetKeyCount(t *testing.T, app plugin.Application, length int) []starkkey.Key { + t.Helper() + keys, err := app.GetKeyStore().StarkNet().GetAll() + require.NoError(t, err) + require.Len(t, keys, length) + return keys +} diff --git a/core/cmd/starknet_node_commands.go b/core/cmd/starknet_node_commands.go new file mode 100644 index 00000000..b3d5c0c2 --- /dev/null +++ b/core/cmd/starknet_node_commands.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// StarkNetNodePresenter implements TableRenderer for a StarkNetNodeResource. +type StarkNetNodePresenter struct { + presenters.StarkNetNodeResource +} + +// ToRow presents the StarkNetNodeResource as a slice of strings. +func (p *StarkNetNodePresenter) ToRow() []string { + return []string{p.Name, p.ChainID, p.State, p.Config} +} + +// RenderTable implements TableRenderer +func (p StarkNetNodePresenter) RenderTable(rt RendererTable) error { + var rows [][]string + rows = append(rows, p.ToRow()) + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +// StarkNetNodePresenters implements TableRenderer for a slice of StarkNetNodePresenter. +type StarkNetNodePresenters []StarkNetNodePresenter + +// RenderTable implements TableRenderer +func (ps StarkNetNodePresenters) RenderTable(rt RendererTable) error { + var rows [][]string + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(nodeHeaders, rows, rt.Writer) + + return nil +} + +func NewStarkNetNodeClient(s *Shell) NodeClient { + return newNodeClient[StarkNetNodePresenters](s, "starknet") +} diff --git a/core/cmd/starknet_node_commands_test.go b/core/cmd/starknet_node_commands_test.go new file mode 100644 index 00000000..0050db31 --- /dev/null +++ b/core/cmd/starknet_node_commands_test.go @@ -0,0 +1,88 @@ +package cmd_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commoncfg "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func starknetStartNewApplication(t *testing.T, cfgs ...*config.TOMLConfig) *cltest.TestApplication { + for i := range cfgs { + cfgs[i].SetDefaults() + } + return startNewApplicationV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Starknet = cfgs + c.EVM = nil + c.Solana = nil + }) +} + +func TestShell_IndexStarkNetNodes(t *testing.T) { + t.Parallel() + + id := "starknet chain ID" + node1 := config.Node{ + Name: ptr("first"), + URL: commoncfg.MustParseURL("https://starknet1.example"), + } + node2 := config.Node{ + Name: ptr("second"), + URL: commoncfg.MustParseURL("https://starknet2.example"), + } + chain := config.TOMLConfig{ + ChainID: &id, + Nodes: config.Nodes{&node1, &node2}, + } + app := starknetStartNewApplication(t, &chain) + client, r := app.NewShellAndRenderer() + + require.Nil(t, cmd.NewStarkNetNodeClient(client).IndexNodes(cltest.EmptyCLIContext())) + require.NotEmpty(t, r.Renders) + nodes := *r.Renders[0].(*cmd.StarkNetNodePresenters) + require.Len(t, nodes, 2) + n1 := nodes[0] + n2 := nodes[1] + assert.Equal(t, id, n1.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(id, *node1.Name), n1.ID) + assert.Equal(t, *node1.Name, n1.Name) + wantConfig, err := toml.Marshal(node1) + require.NoError(t, err) + assert.Equal(t, string(wantConfig), n1.Config) + assert.Equal(t, id, n2.ChainID) + assert.Equal(t, cltest.FormatWithPrefixedChainID(id, *node2.Name), n2.ID) + assert.Equal(t, *node2.Name, n2.Name) + wantConfig2, err := toml.Marshal(node2) + require.NoError(t, err) + assert.Equal(t, string(wantConfig2), n2.Config) + assertTableRenders(t, r) + + //Render table and check the fields order + b := new(bytes.Buffer) + rt := cmd.RendererTable{b} + require.NoError(t, nodes.RenderTable(rt)) + renderLines := strings.Split(b.String(), "\n") + assert.Equal(t, 17, len(renderLines)) + assert.Contains(t, renderLines[2], "Name") + assert.Contains(t, renderLines[2], n1.Name) + assert.Contains(t, renderLines[3], "Chain ID") + assert.Contains(t, renderLines[3], n1.ChainID) + assert.Contains(t, renderLines[4], "State") + assert.Contains(t, renderLines[4], n1.State) + assert.Contains(t, renderLines[9], "Name") + assert.Contains(t, renderLines[9], n2.Name) + assert.Contains(t, renderLines[10], "Chain ID") + assert.Contains(t, renderLines[10], n2.ChainID) + assert.Contains(t, renderLines[11], "State") + assert.Contains(t, renderLines[11], n2.State) +} diff --git a/core/cmd/vrf_keys_commands.go b/core/cmd/vrf_keys_commands.go new file mode 100644 index 00000000..33b6f89d --- /dev/null +++ b/core/cmd/vrf_keys_commands.go @@ -0,0 +1,291 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + + "github.com/pkg/errors" + "github.com/urfave/cli" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func initVRFKeysSubCmd(s *Shell) cli.Command { + return cli.Command{ + Name: "vrf", + Usage: "Remote commands for administering the node's vrf keys", + Subcommands: cli.Commands{ + { + Name: "create", + Usage: "Create a VRF key", + Action: s.CreateVRFKey, + }, + { + Name: "import", + Usage: "Import VRF key from keyfile", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "old-password, oldpassword, p", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", + }, + }, + Action: s.ImportVRFKey, + }, + { + Name: "export", + Usage: "Export VRF key to keyfile", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "new-password, newpassword, p", + Usage: "`FILE` containing the password to encrypt the key (required)", + }, + cli.StringFlag{ + Name: "output, o", + Usage: "`FILE` where the JSON file will be saved (required)", + }, + }, + Action: s.ExportVRFKey, + }, + { + Name: "delete", + Usage: "Archive or delete VRF key from memory and the database, if present. " + + "Note that jobs referencing the removed key will also be removed.", + Flags: []cli.Flag{ + cli.StringFlag{Name: "publicKey, pk"}, + cli.BoolFlag{ + Name: "yes, y", + Usage: "skip the confirmation prompt", + }, + cli.BoolFlag{ + Name: "hard", + Usage: "hard-delete the key instead of archiving (irreversible!)", + }, + }, + Action: s.DeleteVRFKey, + }, + { + Name: "list", Usage: "List the VRF keys", + Action: s.ListVRFKeys, + }, + }, + } +} + +type VRFKeyPresenter struct { + JAID // Include this to overwrite the presenter JAID so it can correctly render the ID in JSON + presenters.VRFKeyResource +} + +// RenderTable implements TableRenderer +func (p *VRFKeyPresenter) RenderTable(rt RendererTable) error { + headers := []string{"Compressed", "Uncompressed", "Hash"} + rows := [][]string{p.ToRow()} + renderList(headers, rows, rt.Writer) + _, err := rt.Write([]byte("\n")) + return err +} + +func (p *VRFKeyPresenter) ToRow() []string { + return []string{ + p.Compressed, + p.Uncompressed, + p.Hash, + } +} + +type VRFKeyPresenters []VRFKeyPresenter + +// RenderTable implements TableRenderer +func (ps VRFKeyPresenters) RenderTable(rt RendererTable) error { + headers := []string{"Compressed", "Uncompressed", "Hash"} + rows := [][]string{} + + for _, p := range ps { + rows = append(rows, p.ToRow()) + } + + renderList(headers, rows, rt.Writer) + _, err := rt.Write([]byte("\n")) + return err +} + +// CreateVRFKey creates a key in the VRF keystore, protected by the password in +// the vrf password file provided when starting the plugin node. +func (s *Shell) CreateVRFKey(_ *cli.Context) error { + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/vrf", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter VRFKeyPresenter + return s.renderAPIResponse(resp, &presenter) +} + +// ImportVRFKey reads a file into an EncryptedVRFKey in the db +func (s *Shell) ImportVRFKey(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the filepath of the key to be imported")) + } + + oldPasswordFile := c.String("old-password") + if len(oldPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --old-password/-p flag")) + } + oldPassword, err := os.ReadFile(oldPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.Args().Get(0) + keyJSON, err := os.ReadFile(filepath) + if err != nil { + return s.errorOut(err) + } + + normalizedPassword := normalizePassword(string(oldPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/vrf/import?oldpassword="+normalizedPassword, bytes.NewReader(keyJSON)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter VRFKeyPresenter + return s.renderAPIResponse(resp, &presenter, "Imported VRF key") +} + +// ExportVRFKey saves encrypted copy of VRF key with given public key to +// requested file path. +func (s *Shell) ExportVRFKey(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the ID (compressed public key) of the key to export")) + } + + newPasswordFile := c.String("new-password") + if len(newPasswordFile) == 0 { + return s.errorOut(errors.New("Must specify --new-password/-p flag")) + } + newPassword, err := os.ReadFile(newPasswordFile) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read password file")) + } + + filepath := c.String("output") + if len(filepath) == 0 { + return s.errorOut(errors.New("Must specify --output/-o flag")) + } + + pk, err := getPublicKey(c) + if err != nil { + return s.errorOut(err) + } + + normalizedPassword := normalizePassword(string(newPassword)) + resp, err := s.HTTP.Post(s.ctx(), "/v2/keys/vrf/export/"+pk.String()+"?newpassword="+normalizedPassword, nil) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not make HTTP request")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + if resp.StatusCode != http.StatusOK { + return s.errorOut(fmt.Errorf("error exporting: %w", httpError(resp))) + } + + keyJSON, err := io.ReadAll(resp.Body) + if err != nil { + return s.errorOut(errors.Wrap(err, "Could not read response body")) + } + + err = utils.WriteFileWithMaxPerms(filepath, keyJSON, 0o600) + if err != nil { + return s.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) + } + + _, err = os.Stderr.WriteString(fmt.Sprintf("Exported VRF key %s to %s\n", pk.String(), filepath)) + if err != nil { + return s.errorOut(err) + } + + return nil +} + +// DeleteVRFKey deletes (hard or soft) the VRF key with given public key from the db +// and memory. V2 jobs referencing the VRF key will be removed if the key is deleted +// (no such protection for the V1 jobs exists). +func (s *Shell) DeleteVRFKey(c *cli.Context) error { + if !c.Args().Present() { + return s.errorOut(errors.New("Must pass the key ID (compressed public key) to be deleted")) + } + id, err := getPublicKey(c) + if err != nil { + return s.errorOut(err) + } + + if !confirmAction(c) { + return nil + } + + var queryStr string + if c.Bool("hard") { + queryStr = "?hard=true" + } + + resp, err := s.HTTP.Delete(s.ctx(), fmt.Sprintf("/v2/keys/vrf/%s%s", id, queryStr)) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenter VRFKeyPresenter + return s.renderAPIResponse(resp, &presenter, "VRF key deleted") +} + +func getPublicKey(c *cli.Context) (secp256k1.PublicKey, error) { + pkHexString := c.Args().Get(0) + if pkHexString == "" { + return secp256k1.PublicKey{}, fmt.Errorf("must specify public key") + } + publicKey, err := secp256k1.NewPublicKeyFromHex(pkHexString) + if err != nil { + return secp256k1.PublicKey{}, errors.Wrap(err, "failed to parse public key") + } + return publicKey, nil +} + +// ListKeys Lists the keys in the db +func (s *Shell) ListVRFKeys(_ *cli.Context) error { + resp, err := s.HTTP.Get(s.ctx(), "/v2/keys/vrf", nil) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var presenters VRFKeyPresenters + return s.renderAPIResponse(resp, &presenters, "🔑 VRF Keys") +} diff --git a/core/cmd/vrf_keys_commands_test.go b/core/cmd/vrf_keys_commands_test.go new file mode 100644 index 00000000..67dd7b54 --- /dev/null +++ b/core/cmd/vrf_keys_commands_test.go @@ -0,0 +1,183 @@ +package cmd_test + +import ( + "bytes" + "flag" + "os" + "testing" + + "github.com/urfave/cli" + + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestVRFKeyPresenter_RenderTable(t *testing.T) { + t.Parallel() + + var ( + compressed = "0xe2c659dd73ded1663c0caf02304aac5ccd247047b3993d273a8920bba0402f4d01" + uncompressed = "0xe2c659dd73ded1663c0caf02304aac5ccd247047b3993d273a8920bba0402f4db44652a69526181101d4aa9a58ecf43b1be972330de99ea5e540f56f4e0a672f" + hash = "0x9926c5f19ec3b3ce005e1c183612f05cfc042966fcdd82ec6e78bf128d91695a" + buffer = bytes.NewBufferString("") + r = cmd.RendererTable{Writer: buffer} + ) + + p := cmd.VRFKeyPresenter{ + VRFKeyResource: presenters.VRFKeyResource{ + Compressed: compressed, + Uncompressed: uncompressed, + Hash: hash, + }, + } + + // Render a single resource + require.NoError(t, p.RenderTable(r)) + + output := buffer.String() + assert.Contains(t, output, compressed) + assert.Contains(t, output, uncompressed) + assert.Contains(t, output, hash) + + // Render many resources + buffer.Reset() + ps := cmd.VRFKeyPresenters{p} + require.NoError(t, ps.RenderTable(r)) + + output = buffer.String() + assert.Contains(t, output, compressed) + assert.Contains(t, output, uncompressed) + assert.Contains(t, output, hash) +} + +func AssertKeysEqual(t *testing.T, k1, k2 cmd.VRFKeyPresenter) { + AssertKeysEqualNoTimestamps(t, k1, k2) +} + +func AssertKeysEqualNoTimestamps(t *testing.T, k1, k2 cmd.VRFKeyPresenter) { + assert.Equal(t, k1.Compressed, k2.Compressed) + assert.Equal(t, k1.Hash, k2.Hash) + assert.Equal(t, k1.Uncompressed, k2.Uncompressed) +} + +func TestShellVRF_CRUD(t *testing.T) { + t.Parallel() + + // Test application boots with vrf password loaded in memory. + // i.e. as if a user had booted with --vrfpassword= + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + + require.NoError(t, client.ListVRFKeys(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + keys := *r.Renders[0].(*cmd.VRFKeyPresenters) + // No keys yet + require.Equal(t, 0, len(keys)) + + // Create a VRF key + require.NoError(t, client.CreateVRFKey(cltest.EmptyCLIContext())) + require.Equal(t, 2, len(r.Renders)) + k1 := *r.Renders[1].(*cmd.VRFKeyPresenter) + + // List the key and ensure it matches + require.NoError(t, client.ListVRFKeys(cltest.EmptyCLIContext())) + require.Equal(t, 3, len(r.Renders)) + keys = *r.Renders[2].(*cmd.VRFKeyPresenters) + AssertKeysEqual(t, k1, keys[0]) + + // Create another key + require.NoError(t, client.CreateVRFKey(cltest.EmptyCLIContext())) + require.Equal(t, 4, len(r.Renders)) + k2 := *r.Renders[3].(*cmd.VRFKeyPresenter) + + // Ensure the list is valid + require.NoError(t, client.ListVRFKeys(cltest.EmptyCLIContext())) + require.Equal(t, 5, len(r.Renders)) + keys = *r.Renders[4].(*cmd.VRFKeyPresenters) + require.Contains(t, []string{keys[0].ID, keys[1].ID}, k1.ID) + require.Contains(t, []string{keys[0].ID, keys[1].ID}, k2.ID) + + // Now do a hard delete and ensure its completely removes the key + set := flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteVRFKey, set, "") + + require.NoError(t, set.Parse([]string{k2.Compressed})) + require.NoError(t, set.Set("hard", "true")) + require.NoError(t, set.Set("yes", "true")) + + c := cli.NewContext(nil, set, nil) + err := client.DeleteVRFKey(c) + require.NoError(t, err) + // Should return the deleted key + require.Equal(t, 6, len(r.Renders)) + deletedKey := *r.Renders[5].(*cmd.VRFKeyPresenter) + AssertKeysEqual(t, k2, deletedKey) + // Should NOT be in the DB as archived + allKeys, err := app.KeyStore.VRF().GetAll() + require.NoError(t, err) + assert.Equal(t, 1, len(allKeys)) +} + +func TestVRF_ImportExport(t *testing.T) { + t.Parallel() + // Test application boots with vrf password loaded in memory. + // i.e. as if a user had booted with --vrfpassword= + app := startNewApplicationV2(t, nil) + client, r := app.NewShellAndRenderer() + t.Log(client, r) + + // Create a key (encrypted with cltest.VRFPassword) + require.NoError(t, client.CreateVRFKey(cltest.EmptyCLIContext())) + require.Equal(t, 1, len(r.Renders)) + k1 := *r.Renders[0].(*cmd.VRFKeyPresenter) + t.Log(k1.Compressed) + + // Export it, encrypted with cltest.Password instead + keyName := "vrfkey1" + set := flag.NewFlagSet("test VRF export", 0) + flagSetApplyFromAction(client.ExportVRFKey, set, "") + + require.NoError(t, set.Parse([]string{k1.Compressed})) // Arguments + require.NoError(t, set.Set("new-password", "../internal/fixtures/correct_password.txt")) + require.NoError(t, set.Set("output", keyName)) + + c := cli.NewContext(nil, set, nil) + require.NoError(t, client.ExportVRFKey(c)) + // File exists + require.NoError(t, utils.JustError(os.Stat(keyName))) + t.Cleanup(func() { + os.Remove(keyName) + }) + + // Should error if we try to import a duplicate key + importSet := flag.NewFlagSet("test VRF import", 0) + flagSetApplyFromAction(client.ImportVRFKey, importSet, "") + + require.NoError(t, importSet.Parse([]string{keyName})) + require.NoError(t, importSet.Set("old-password", "../internal/fixtures/correct_password.txt")) + + importCli := cli.NewContext(nil, importSet, nil) + require.Error(t, client.ImportVRFKey(importCli)) + + // Lets delete the key and import it + set = flag.NewFlagSet("test", 0) + flagSetApplyFromAction(client.DeleteVRFKey, set, "") + + require.NoError(t, set.Parse([]string{k1.Compressed})) + require.NoError(t, set.Set("hard", "true")) + require.NoError(t, set.Set("yes", "true")) + + require.NoError(t, client.DeleteVRFKey(cli.NewContext(nil, set, nil))) + // Should succeed + require.NoError(t, client.ImportVRFKey(importCli)) + require.NoError(t, client.ListVRFKeys(cltest.EmptyCLIContext())) + require.Equal(t, 4, len(r.Renders)) + keys := *r.Renders[3].(*cmd.VRFKeyPresenters) + AssertKeysEqualNoTimestamps(t, k1, keys[0]) +} diff --git a/core/config/app_config.go b/core/config/app_config.go new file mode 100644 index 00000000..648939b8 --- /dev/null +++ b/core/config/app_config.go @@ -0,0 +1,65 @@ +package config + +import ( + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +// nolint +var ( + ErrEnvUnset = errors.New("env var unset") +) + +type LogfFn func(string, ...any) + +type AppConfig interface { + AppID() uuid.UUID + RootDir() string + ShutdownGracePeriod() time.Duration + InsecureFastScrypt() bool + EVMEnabled() bool + EVMRPCEnabled() bool + CosmosEnabled() bool + SolanaEnabled() bool + StarkNetEnabled() bool + + Validate() error + ValidateDB() error + LogConfiguration(log, warn LogfFn) + SetLogLevel(lvl zapcore.Level) error + SetLogSQL(logSQL bool) + SetPasswords(keystore, vrf *string) + + AuditLogger() AuditLogger + AutoPprof() AutoPprof + Database() Database + Feature() Feature + FluxMonitor() FluxMonitor + Insecure() Insecure + JobPipeline() JobPipeline + Keeper() Keeper + Log() Log + Mercury() Mercury + OCR() OCR + OCR2() OCR2 + P2P() P2P + Password() Password + Prometheus() Prometheus + Pyroscope() Pyroscope + Sentry() Sentry + TelemetryIngress() TelemetryIngress + Threshold() Threshold + WebServer() WebServer + Tracing() Tracing +} + +type DatabaseBackupMode string + +var ( + DatabaseBackupModeNone DatabaseBackupMode = "none" + DatabaseBackupModeLite DatabaseBackupMode = "lite" + DatabaseBackupModeFull DatabaseBackupMode = "full" +) diff --git a/core/config/audit_logger_config.go b/core/config/audit_logger_config.go new file mode 100644 index 00000000..5f8c2812 --- /dev/null +++ b/core/config/audit_logger_config.go @@ -0,0 +1,14 @@ +package config + +import ( + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type AuditLogger interface { + Enabled() bool + ForwardToUrl() (commonconfig.URL, error) + Environment() string + JsonWrapperKey() string + Headers() (models.ServiceHeaders, error) +} diff --git a/core/config/auto_pprof_config.go b/core/config/auto_pprof_config.go new file mode 100644 index 00000000..785ee660 --- /dev/null +++ b/core/config/auto_pprof_config.go @@ -0,0 +1,21 @@ +package config + +import ( + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type AutoPprof interface { + BlockProfileRate() int + CPUProfileRate() int + Enabled() bool + GatherDuration() commonconfig.Duration + GatherTraceDuration() commonconfig.Duration + GoroutineThreshold() int + MaxProfileSize() utils.FileSize + MemProfileRate() int + MemThreshold() utils.FileSize + MutexProfileFraction() int + PollInterval() commonconfig.Duration + ProfileRoot() string +} diff --git a/core/config/config_internal_test.go b/core/config/config_internal_test.go new file mode 100644 index 00000000..2a7f522c --- /dev/null +++ b/core/config/config_internal_test.go @@ -0,0 +1,85 @@ +package config + +import ( + "math/big" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config/parse" +) + +func TestStore_bigIntParser(t *testing.T) { + val, err := parse.BigInt("0") + assert.NoError(t, err) + assert.Equal(t, new(big.Int).SetInt64(0), val) + + val, err = parse.BigInt("15") + assert.NoError(t, err) + assert.Equal(t, new(big.Int).SetInt64(15), val) + + val, err = parse.BigInt("x") + assert.Error(t, err) + assert.Nil(t, val) + + val, err = parse.BigInt("") + assert.Error(t, err) + assert.Nil(t, val) +} + +func TestStore_levelParser(t *testing.T) { + val, err := parse.LogLevel("ERROR") + assert.NoError(t, err) + assert.Equal(t, zapcore.ErrorLevel, val) + + val, err = parse.LogLevel("") + assert.NoError(t, err) + assert.Equal(t, zapcore.InfoLevel, val) + + val, err = parse.LogLevel("primus sucks") + assert.Error(t, err) + assert.Equal(t, val, zapcore.Level(0)) +} + +func TestStore_urlParser(t *testing.T) { + tests := []struct { + name string + input string + wantError bool + }{ + {"valid URL", "http://localhost:3000", false}, + {"invalid URL", ":", true}, + {"empty URL", "", false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i, err := parse.URL(test.input) + + if test.wantError { + assert.Error(t, err) + } else { + require.NoError(t, err) + w, ok := i.(*url.URL) + require.True(t, ok) + assert.Equal(t, test.input, w.String()) + } + }) + } +} + +func TestStore_boolParser(t *testing.T) { + val, err := parse.Bool("true") + assert.NoError(t, err) + assert.Equal(t, true, val) + + val, err = parse.Bool("false") + assert.NoError(t, err) + assert.Equal(t, false, val) + + _, err = parse.Bool("") + assert.Error(t, err) +} diff --git a/core/config/database_config.go b/core/config/database_config.go new file mode 100644 index 00000000..5350d4af --- /dev/null +++ b/core/config/database_config.go @@ -0,0 +1,44 @@ +package config + +import ( + "net/url" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +type Backup interface { + Dir() string + Frequency() time.Duration + Mode() DatabaseBackupMode + OnVersionUpgrade() bool + URL() *url.URL +} + +type Lock interface { + LockingMode() string + LeaseDuration() time.Duration + LeaseRefreshInterval() time.Duration +} + +type Listener interface { + MaxReconnectDuration() time.Duration + MinReconnectInterval() time.Duration + FallbackPollInterval() time.Duration +} + +type Database interface { + Backup() Backup + Listener() Listener + Lock() Lock + + DefaultIdleInTxSessionTimeout() time.Duration + DefaultLockTimeout() time.Duration + DefaultQueryTimeout() time.Duration + Dialect() dialects.DialectName + LogSQL() bool + MaxIdleConns() int + MaxOpenConns() int + MigrateDatabase() bool + URL() url.URL +} diff --git a/core/config/docs/README.md b/core/config/docs/README.md new file mode 100644 index 00000000..0179e0d3 --- /dev/null +++ b/core/config/docs/README.md @@ -0,0 +1,7 @@ +# Configuration Documentation + +1. [core.toml](core.toml) & `chains-*.toml` files are sources of truth for config docs. *Update/add documentation here*. + +2. [CONFIG.md](/docs/CONFIG.md) is _generated_ from (1) by `config.GenerateDocs()` via [docs/main.go](docs/main.go). *Do not edit this file. Run `make config-docs` from the root of the repo*. + +See [example.toml](testdata/example.toml) and [example.md](testdata/example.md) for a minimal example. diff --git a/core/config/docs/chains-cosmos.toml b/core/config/docs/chains-cosmos.toml new file mode 100644 index 00000000..bcdb040a --- /dev/null +++ b/core/config/docs/chains-cosmos.toml @@ -0,0 +1,33 @@ +[[Cosmos]] +# ChainID is the Cosmos chain ID. Mandatory. +ChainID = 'Malaga-420' # Example +# Enabled enables this chain. +Enabled = true # Default +# Bech32Prefix is the human-readable prefix for addresses on this Cosmos chain. See https://docs.cosmos.network/v0.47/spec/addresses/bech32. +Bech32Prefix = 'wasm' # Default +# BlockRate is the average time between blocks. +BlockRate = '6s' # Default +# BlocksUntilTxTimeout is the number of blocks to wait before giving up on the tx getting confirmed. +BlocksUntilTxTimeout = 30 # Default +# ConfirmPollPeriod sets how often check for tx confirmation. +ConfirmPollPeriod = '1s' # Default +# FallbackGasPrice sets a fallback gas price to use when the estimator is not available. +FallbackGasPrice = '0.015' # Default +# GasToken is the token denomination which is being used to pay gas fees on this chain. +GasToken = 'ucosm' # Default +# GasLimitMultiplier scales the estimated gas limit. +GasLimitMultiplier = '1.5' # Default +# MaxMsgsPerBatch limits the numbers of mesages per transaction batch. +MaxMsgsPerBatch = 100 # Default +# OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. +OCR2CachePollPeriod = '4s' # Default +# OCR2CacheTTL is the stale OCR2 cache deadline. +OCR2CacheTTL = '1m' # Default +# TxMsgTimeout is the maximum age for resending transaction before they expire. +TxMsgTimeout = '10m' # Default + +[[Cosmos.Nodes]] +# Name is a unique (per-chain) identifier for this node. +Name = 'primary' # Example +# TendermintURL is the HTTP(S) tendermint endpoint for this node. +TendermintURL = 'http://tender.mint' # Example diff --git a/core/config/docs/chains-evm.toml b/core/config/docs/chains-evm.toml new file mode 100644 index 00000000..61571f74 --- /dev/null +++ b/core/config/docs/chains-evm.toml @@ -0,0 +1,371 @@ +# EVM defaults depend on ChainID: +# +# **EXTENDED** +[[EVM]] +# ChainID is the EVM chain ID. Mandatory. +ChainID = '1' # Example +# Enabled enables this chain. +Enabled = true # Default +# AutoCreateKey, if set to true, will ensure that there is always at least one transmit key for the given chain. +AutoCreateKey = true # Default +# **ADVANCED** +# BlockBackfillDepth specifies the number of blocks before the current HEAD that the log broadcaster will try to re-consume logs from. +BlockBackfillDepth = 10 # Default +# BlockBackfillSkip enables skipping of very long backfills. +BlockBackfillSkip = false # Default +# ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID. +# Available types: arbitrum, metis, optimismBedrock, xdai, celo, kroma, wemix, zksync, scroll +ChainType = 'arbitrum' # Example +# FinalityDepth is the number of blocks after which an ethereum transaction is considered "final". Note that the default is automatically set based on chain ID so it should not be necessary to change this under normal operation. +# BlocksConsideredFinal determines how deeply we look back to ensure that transactions are confirmed onto the longest chain +# There is not a large performance penalty to setting this relatively high (on the order of hundreds) +# It is practically limited by the number of heads we store in the database and should be less than this with a comfortable margin. +# If a transaction is mined in a block more than this many blocks ago, and is reorged out, we will NOT retransmit this transaction and undefined behaviour can occur including gaps in the nonce sequence that require manual intervention to fix. +# Therefore this number represents a number of blocks we consider large enough that no re-org this deep will ever feasibly happen. +# +# Special cases: +# `FinalityDepth`=0 would imply that transactions can be final even before they were mined into a block. This is not supported. +# `FinalityDepth`=1 implies that transactions are final after we see them in one block. +# +# Examples: +# +# Transaction sending: +# A transaction is sent at block height 42 +# +# `FinalityDepth` is set to 5 +# A re-org occurs at height 44 starting at block 41, transaction is marked for rebroadcast +# A re-org occurs at height 46 starting at block 41, transaction is marked for rebroadcast +# A re-org occurs at height 47 starting at block 41, transaction is NOT marked for rebroadcast +FinalityDepth = 50 # Default +# FinalityTagEnabled means that the chain supports the finalized block tag when querying for a block. If FinalityTagEnabled is set to true for a chain, then FinalityDepth field is ignored. +# Finality for a block is solely defined by the finality related tags provided by the chain's RPC API. This is a placeholder and hasn't been implemented yet. +FinalityTagEnabled = false # Default +# **ADVANCED** +# FlagsContractAddress can optionally point to a [Flags contract](../contracts/src/v0.8/Flags.sol). If set, the node will lookup that contract for each job that supports flags contracts (currently OCR and FM jobs are supported). If the job's contractAddress is set as hibernating in the FlagsContractAddress address, it overrides the standard update parameters (such as heartbeat/threshold). +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' # Example +# LinkContractAddress is the canonical ERC-677 PLI token contract address on the given chain. Note that this is usually autodetected from chain ID. +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' # Example +# **ADVANCED** +# LogBackfillBatchSize sets the batch size for calling FilterLogs when we backfill missing logs. +LogBackfillBatchSize = 1000 # Default +# **ADVANCED** +# LogPollInterval works in conjunction with Feature.LogPoller. Controls how frequently the log poller polls for logs. Defaults to the block production rate. +LogPollInterval = '15s' # Default +# **ADVANCED** +# LogKeepBlocksDepth works in conjunction with Feature.LogPoller. Controls how many blocks the poller will keep, must be greater than FinalityDepth+1. +LogKeepBlocksDepth = 100000 # Default +# MinContractPayment is the minimum payment in PLI required to execute a direct request job. This can be overridden on a per-job basis. +MinContractPayment = '10000000000000 juels' # Default +# MinIncomingConfirmations is the minimum required confirmations before a log event will be consumed. +MinIncomingConfirmations = 3 # Default +# NonceAutoSync enables automatic nonce syncing on startup. Plugin nodes will automatically try to sync its local nonce with the remote chain on startup and fast forward if necessary. This is almost always safe but can be disabled in exceptional cases by setting this value to false. +NonceAutoSync = true # Default +# NoNewHeadsThreshold controls how long to wait after receiving no new heads before `NodePool` marks rpc endpoints as +# out-of-sync, and `HeadTracker` logs warnings. +# +# Set to zero to disable out-of-sync checking. +NoNewHeadsThreshold = '3m' # Default +# OperatorFactoryAddress is the address of the canonical operator forwarder contract on the given chain. Note that this is usually autodetected from chain ID. +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' # Example +# RPCDefaultBatchSize is the default batch size for batched RPC calls. +RPCDefaultBatchSize = 250 # Default +# **ADVANCED** +# RPCBlockQueryDelay controls the number of blocks to trail behind head in the block history estimator and balance monitor. +# For example, if this is set to 3, and we receive block 10, block history estimator will fetch block 7. +# +# CAUTION: You might be tempted to set this to 0 to use the latest possible +# block, but it is possible to receive a head BEFORE that block is actually +# available from the connected node via RPC, due to race conditions in the code of the remote ETH node. In this case you will get false +# "zero" blocks that are missing transactions. +RPCBlockQueryDelay = 1 # Default + +[EVM.Transactions] +# ForwardersEnabled enables or disables sending transactions through forwarder contracts. +ForwardersEnabled = false # Default +# MaxInFlight controls how many transactions are allowed to be "in-flight" i.e. broadcast but unconfirmed at any one time. You can consider this a form of transaction throttling. +# +# The default is set conservatively at 16 because this is a pessimistic minimum that both geth and parity will hold without evicting local transactions. If your node is falling behind and you need higher throughput, you can increase this setting, but you MUST make sure that your ETH node is configured properly otherwise you can get nonce gapped and your node will get stuck. +# +# 0 value disables the limit. Use with caution. +MaxInFlight = 16 # Default +# MaxQueued is the maximum number of unbroadcast transactions per key that are allowed to be enqueued before jobs will start failing and rejecting send of any further transactions. This represents a sanity limit and generally indicates a problem with your ETH node (transactions are not getting mined). +# +# Do NOT blindly increase this value thinking it will fix things if you start hitting this limit because transactions are not getting mined, you will instead only make things worse. +# +# In deployments with very high burst rates, or on chains with large re-orgs, you _may_ consider increasing this. +# +# 0 value disables any limit on queue size. Use with caution. +MaxQueued = 250 # Default +# ReaperInterval controls how often the EthTx reaper will run. +ReaperInterval = '1h' # Default +# ReaperThreshold indicates how old an EthTx ought to be before it can be reaped. +ReaperThreshold = '168h' # Default +# ResendAfterThreshold controls how long to wait before re-broadcasting a transaction that has not yet been confirmed. +ResendAfterThreshold = '1m' # Default + +[EVM.BalanceMonitor] +# Enabled balance monitoring for all keys. +Enabled = true # Default + +[EVM.GasEstimator] +# Mode controls what type of gas estimator is used. +# +# - `FixedPrice` uses static configured values for gas price (can be set via API call). +# - `BlockHistory` dynamically adjusts default gas price based on heuristics from mined blocks. +# - `L2Suggested` mode is deprecated and replaced with `SuggestedPrice`. +# - `SuggestedPrice` is a mode which uses the gas price suggested by the rpc endpoint via `eth_gasPrice`. +# - `Arbitrum` is a special mode only for use with Arbitrum blockchains. It uses the suggested gas price (up to `ETH_MAX_GAS_PRICE_WEI`, with `1000 gwei` default) as well as an estimated gas limit (up to `ETH_GAS_LIMIT_MAX`, with `1,000,000,000` default). +# +# Plugin nodes decide what gas price to use using an `Estimator`. It ships with several simple and battle-hardened built-in estimators that should work well for almost all use-cases. Note that estimators will change their behaviour slightly depending on if you are in EIP-1559 mode or not. +# +# You can also use your own estimator for gas price by selecting the `FixedPrice` estimator and using the exposed API to set the price. +# +# An important point to note is that the Plugin node does _not_ ship with built-in support for go-ethereum's `estimateGas` call. This is for several reasons, including security and reliability. We have found empirically that it is not generally safe to rely on the remote ETH node's idea of what gas price should be. +Mode = 'BlockHistory' # Default +# PriceDefault is the default gas price to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. +# +# (Only applies to legacy transactions) +# +# Can be used with the `plugin setgasprice` to be updated while the node is still running. +PriceDefault = '20 gwei' # Default +# PriceMax is the maximum gas price. Plugin nodes will never pay more than this for a transaction. +# This applies to both legacy and EIP1559 transactions. +# Note that it is impossible to disable the maximum limit. Setting this value to zero will prevent paying anything for any transaction (which can be useful in some rare cases). +# Most chains by default have the maximum set to 2**256-1 Wei which is the maximum allowed gas price on EVM-compatible chains, and is so large it may as well be unlimited. +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' # Default +# PriceMin is the minimum gas price. Plugin nodes will never pay less than this for a transaction. +# +# (Only applies to legacy transactions) +# +# It is possible to force the Plugin node to use a fixed gas price by setting a combination of these, e.g. +# +# ```toml +# EIP1559DynamicFees = false +# PriceMax = 100 +# PriceMin = 100 +# PriceDefault = 100 +# BumpThreshold = 0 +# Mode = 'FixedPrice' +# ``` +PriceMin = '1 gwei' # Default +# LimitDefault sets default gas limit for outgoing transactions. This should not need to be changed in most cases. +# Some job types, such as Keeper jobs, might set their own gas limit unrelated to this value. +LimitDefault = 500_000 # Default +# LimitMax sets a maximum for _estimated_ gas limits. This currently only applies to `Arbitrum` `GasEstimatorMode`. +LimitMax = 500_000 # Default +# LimitMultiplier is the factor by which a transaction's GasLimit is multiplied before transmission. So if the value is 1.1, and the GasLimit for a transaction is 10, 10% will be added before transmission. +# +# This factor is always applied, so includes L2 transactions which uses a default gas limit of 1 and is also applied to `LimitDefault`. +LimitMultiplier = '1.0' # Default +# LimitTransfer is the gas limit used for an ordinary ETH transfer. +LimitTransfer = 21_000 # Default +# BumpMin is the minimum fixed amount of wei by which gas is bumped on each transaction attempt. +BumpMin = '5 gwei' # Default +# BumpPercent is the percentage by which to bump gas on a transaction that has exceeded `BumpThreshold`. The larger of `BumpPercent` and `BumpMin` is taken for gas bumps. +# +# The `SuggestedPriceEstimator` adds the larger of `BumpPercent` and `BumpMin` on top of the price provided by the RPC when bumping a transaction's gas. +BumpPercent = 20 # Default +# BumpThreshold is the number of blocks to wait for a transaction stuck in the mempool before automatically bumping the gas price. Set to 0 to disable gas bumping completely. +BumpThreshold = 3 # Default +# BumpTxDepth is the number of transactions to gas bump starting from oldest. Set to 0 for no limit (i.e. bump all). Can not be greater than EVM.Transactions.MaxInFlight. If not set, defaults to EVM.Transactions.MaxInFlight. +BumpTxDepth = 16 # Example +# EIP1559DynamicFees torces EIP-1559 transaction mode. Enabling EIP-1559 mode can help reduce gas costs on chains that support it. This is supported only on official Ethereum mainnet and testnets. It is not recommended to enable this setting on Polygon because the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are less likely to be included than legacy transactions. +# +# #### Technical details +# +# Plugin nodes include experimental support for submitting transactions using type 0x2 (EIP-1559) envelope. +# +# EIP-1559 mode is enabled by default on the Ethereum Mainnet, but can be enabled on a per-chain basis or globally. +# +# This might help to save gas on spikes. Plugin nodes should react faster on the upleg and avoid overpaying on the downleg. It might also be possible to set `EVM.GasEstimator.BlockHistory.BatchSize` to a smaller value such as 12 or even 6 because tip cap should be a more consistent indicator of inclusion time than total gas price. This would make Plugin nodes more responsive and should reduce response time variance. Some experimentation is required to find optimum settings. +# +# Set with caution, if you set this on a chain that does not actually support EIP-1559 your node will be broken. +# +# In EIP-1559 mode, the total price for the transaction is the minimum of base fee + tip cap and fee cap. More information can be found on the [official EIP](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). +# +# Plugin's implementation of EIP-1559 works as follows: +# +# If you are using FixedPriceEstimator: +# - With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=GasTipCapDefault` +# - With gas bumping enabled, it will submit all transactions initially with `feecap=GasFeeCapDefault` and `tipcap=GasTipCapDefault`. +# +# If you are using BlockHistoryEstimator (default for most chains): +# - With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=` +# - With gas bumping enabled (default for most chains) it will submit all transactions initially with `feecap = ( current block base fee * (1.125 ^ N) + tipcap )` where N is configurable by setting `EVM.GasEstimator.BlockHistory.EIP1559FeeCapBufferBlocks` but defaults to `gas bump threshold+1` and `tipcap=` +# +# Bumping works as follows: +# +# - Increase tipcap by `max(tipcap * (1 + BumpPercent), tipcap + BumpMin)` +# - Increase feecap by `max(feecap * (1 + BumpPercent), feecap + BumpMin)` +# +# A quick note on terminology - Plugin nodes use the same terms used internally by go-ethereum source code to describe various prices. This is not the same as the externally used terms. For reference: +# +# - Base Fee Per Gas = BaseFeePerGas +# - Max Fee Per Gas = FeeCap +# - Max Priority Fee Per Gas = TipCap +# +# In EIP-1559 mode, the following changes occur to how configuration works: +# +# - All new transactions will be sent as type 0x2 transactions specifying a TipCap and FeeCap. Be aware that existing pending legacy transactions will continue to be gas bumped in legacy mode. +# - `BlockHistoryEstimator` will apply its calculations (gas percentile etc) to the TipCap and this value will be used for new transactions (GasPrice will be ignored) +# - `FixedPriceEstimator` will use `GasTipCapDefault` instead of `GasPriceDefault` for the tip cap +# - `FixedPriceEstimator` will use `GasFeeCapDefault` instaed of `GasPriceDefault` for the fee cap +# - `PriceMin` is ignored for new transactions and `GasTipCapMinimum` is used instead (default 0) +# - `PriceMax` still represents that absolute upper limit that Plugin will ever spend (total) on a single tx +# - `Keeper.GasTipCapBufferPercent` is ignored in EIP-1559 mode and `Keeper.GasTipCapBufferPercent` is used instead +EIP1559DynamicFees = false # Default +# FeeCapDefault controls the fixed initial fee cap, if EIP1559 mode is enabled and `FixedPrice` gas estimator is used. +FeeCapDefault = '100 gwei' # Default +# TipCapDefault is the default gas tip to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. +# +# (Only applies to EIP-1559 transactions) +TipCapDefault = '1 wei' # Default +# TipCapMinimum is the minimum gas tip to use when submitting transactions to the blockchain. +# +# Only applies to EIP-1559 transactions) +TipCapMin = '1 wei' # Default + +[EVM.GasEstimator.LimitJobType] +# OCR overrides LimitDefault for OCR jobs. +OCR = 100_000 # Example +# OCR2 overrides LimitDefault for OCR2 jobs. +OCR2 = 100_000 # Example +# DR overrides LimitDefault for Direct Request jobs. +DR = 100_000 # Example +# VRF overrides LimitDefault for VRF jobs. +VRF = 100_000 # Example +# FM overrides LimitDefault for Flux Monitor jobs. +FM = 100_000 # Example +# Keeper overrides LimitDefault for Keeper jobs. +Keeper = 100_000 # Example + + +# These settings allow you to configure how your node calculates gas prices when using the block history estimator. +# In most cases, leaving these values at their defaults should give good results. +[EVM.GasEstimator.BlockHistory] +# BatchSize sets the maximum number of blocks to fetch in one batch in the block history estimator. +# If the `BatchSize` variable is set to 0, it defaults to `EVM.RPCDefaultBatchSize`. +BatchSize = 25 # Default +# BlockHistorySize controls the number of past blocks to keep in memory to use as a basis for calculating a percentile gas price. +BlockHistorySize = 8 # Default +# CheckInclusionBlocks is the number of recent blocks to use to detect if there is a transaction propagation/connectivity issue, and to prevent bumping in these cases. +# This can help avoid the situation where RPC nodes are not propagating transactions for some non-price-related reason (e.g. go-ethereum bug, networking issue etc) and bumping gas would not help. +# +# Set to zero to disable connectivity checking completely. +CheckInclusionBlocks = 12 # Default +# CheckInclusionPercentile controls the percentile that a transaction must have been higher than for all the blocks in the inclusion check window in order to register as a connectivity issue. +# +# For example, if CheckInclusionBlocks=12 and CheckInclusionPercentile=90 then further bumping will be prevented for any transaction with any attempt that has a higher price than the 90th percentile for the most recent 12 blocks. +CheckInclusionPercentile = 90 # Default +# **ADVANCED** +# EIP1559FeeCapBufferBlocks controls the buffer blocks to add to the current base fee when sending a transaction. By default, the gas bumping threshold + 1 block is used. +# +# Only applies to EIP-1559 transactions) +EIP1559FeeCapBufferBlocks = 13 # Example +# TransactionPercentile specifies gas price to choose. E.g. if the block history contains four transactions with gas prices `[100, 200, 300, 400]` then picking 25 for this number will give a value of 200. If the calculated gas price is higher than `GasPriceDefault` then the higher price will be used as the base price for new transactions. +# +# Must be in range 0-100. +# +# Only has an effect if gas updater is enabled. +# +# Think of this number as an indicator of how aggressive you want your node to price its transactions. +# +# Setting this number higher will cause the Plugin node to select higher gas prices. +# +# Setting it lower will tend to set lower gas prices. +TransactionPercentile = 60 # Default + +# The head tracker continually listens for new heads from the chain. +# +# In addition to these settings, it log warnings if `EVM.NoNewHeadsThreshold` is exceeded without any new blocks being emitted. +[EVM.HeadTracker] +# HistoryDepth tracks the top N block numbers to keep in the `heads` database table. +# Note that this can easily result in MORE than N records since in the case of re-orgs we keep multiple heads for a particular block height. +# This number should be at least as large as `FinalityDepth`. +# There may be a small performance penalty to setting this to something very large (10,000+) +HistoryDepth = 100 # Default +# MaxBufferSize is the maximum number of heads that may be +# buffered in front of the head tracker before older heads start to be +# dropped. You may think of it as something like the maximum permittable "lag" +# for the head tracker before we start dropping heads to keep up. +MaxBufferSize = 3 # Default +# **ADVANCED** +# SamplingInterval means that head tracker callbacks will at maximum be made once in every window of this duration. This is a performance optimisation for fast chains. Set to 0 to disable sampling entirely. +SamplingInterval = '1s' # Default + +[[EVM.KeySpecific]] +# Key is the account to apply these settings to +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +# GasEstimator.PriceMax overrides the maximum gas price for this key. See EVM.GasEstimator.PriceMax. +GasEstimator.PriceMax = '79 gwei' # Example + +# The node pool manages multiple RPC endpoints. +# +# In addition to these settings, `EVM.NoNewHeadsThreshold` controls how long to wait after receiving no new heads before marking the node as out-of-sync. +[EVM.NodePool] +# PollFailureThreshold indicates how many consecutive polls must fail in order to mark a node as unreachable. +# +# Set to zero to disable poll checking. +PollFailureThreshold = 5 # Default +# PollInterval controls how often to poll the node to check for liveness. +# +# Set to zero to disable poll checking. +PollInterval = '10s' # Default +# SelectionMode controls node selection strategy: +# - HighestHead: use the node with the highest head number +# - RoundRobin: rotate through nodes, per-request +# - PriorityLevel: use the node with the smallest order number +# - TotalDifficulty: use the node with the greatest total difficulty +SelectionMode = 'HighestHead' # Default +# SyncThreshold controls how far a node may lag behind the best node before being marked out-of-sync. +# Depending on `SelectionMode`, this represents a difference in the number of blocks (`HighestHead`, `RoundRobin`, `PriorityLevel`), or total difficulty (`TotalDifficulty`). +# +# Set to 0 to disable this check. +SyncThreshold = 5 # Default +# LeaseDuration is the minimum duration that the selected "best" node (as defined by SelectionMode) will be used, +# before switching to a better one if available. It also controls how often the lease check is done. +# Setting this to a low value (under 1m) might cause RPC to switch too aggressively. +# Recommended value is over 5m +# +# Set to '0s' to disable +LeaseDuration = '0s' # Default + +[EVM.OCR] +# ContractConfirmations sets `OCR.ContractConfirmations` for this EVM chain. +ContractConfirmations = 4 # Default +# ContractTransmitterTransmitTimeout sets `OCR.ContractTransmitterTransmitTimeout` for this EVM chain. +ContractTransmitterTransmitTimeout = '10s' # Default +# DatabaseTimeout sets `OCR.DatabaseTimeout` for this EVM chain. +DatabaseTimeout = '10s' # Default +# **ADVANCED** +# DeltaCOverride (and `DeltaCJitterOverride`) determine the config override DeltaC. +# DeltaC is the maximum age of the latest report in the contract. If the maximum age is exceeded, a new report will be +# created by the report generation protocol. +DeltaCOverride = "168h" # Default +# **ADVANCED** +# DeltaCJitterOverride is the range for jitter to add to `DeltaCOverride`. +DeltaCJitterOverride = "1h" # Default +# ObservationGracePeriod sets `OCR.ObservationGracePeriod` for this EVM chain. +ObservationGracePeriod = '1s' # Default + +[[EVM.Nodes]] +# Name is a unique (per-chain) identifier for this node. +Name = 'foo' # Example +# WSURL is the WS(S) endpoint for this node. Required for primary nodes. +WSURL = 'wss://web.socket/test' # Example +# HTTPURL is the HTTP(S) endpoint for this node. Required for all nodes. +HTTPURL = 'https://foo.web' # Example +# SendOnly limits usage to sending transaction broadcasts only. With this enabled, only HTTPURL is required, and WSURL is not used. +SendOnly = false # Default +# Order of the node in the pool, will takes effect if `SelectionMode` is `PriorityLevel` or will be used as a tie-breaker for `HighestHead` and `TotalDifficulty` +Order = 100 # Default + +[EVM.OCR2.Automation] +# GasLimit controls the gas limit for transmit transactions from ocr2automation job. +GasLimit = 5400000 # Default + +[EVM.ChainWriter] +# FromAddress is Address of the transmitter key to use for workflow writes. +FromAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +# ForwarderAddress is the keystone forwarder contract address on chain. +ForwarderAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example diff --git a/core/config/docs/chains-solana.toml b/core/config/docs/chains-solana.toml new file mode 100644 index 00000000..d0aae876 --- /dev/null +++ b/core/config/docs/chains-solana.toml @@ -0,0 +1,42 @@ +[[Solana]] +# ChainID is the Solana chain ID. Must be one of: mainnet, testnet, devnet, localnet. Mandatory. +ChainID = 'mainnet' # Example +# Enabled enables this chain. +Enabled = false # Default +# BalancePollPeriod is the rate to poll for SOL balance and update Prometheus metrics. +BalancePollPeriod = '5s' # Default +# ConfirmPollPeriod is the rate to poll for signature confirmation. +ConfirmPollPeriod = '500ms' # Default +# OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. +OCR2CachePollPeriod = '1s' # Default +# OCR2CacheTTL is the stale OCR2 cache deadline. +OCR2CacheTTL = '1m' # Default +# TxTimeout is the timeout for sending txes to an RPC endpoint. +TxTimeout = '1m' # Default +# TxRetryTimeout is the duration for tx manager to attempt rebroadcasting to RPC, before giving up. +TxRetryTimeout = '10s' # Default +# TxConfirmTimeout is the duration to wait when confirming a tx signature, before discarding as unconfirmed. +TxConfirmTimeout = '30s' # Default +# SkipPreflight enables or disables preflight checks when sending txs. +SkipPreflight = true # Default +# Commitment is the confirmation level for solana state and transactions. ([documentation](https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment)) +Commitment = 'confirmed' # Default +# MaxRetries is the maximum number of times the RPC node will automatically rebroadcast a tx. +# The default is 0 for custom txm rebroadcasting method, set to -1 to use the RPC node's default retry strategy. +MaxRetries = 0 # Default +# FeeEstimatorMode is the method used to determine the base fee +FeeEstimatorMode = 'fixed' # Default +# ComputeUnitPriceMax is the maximum price per compute unit that a transaction can be bumped to +ComputeUnitPriceMax = 1000 # Default +# ComputeUnitPriceMin is the minimum price per compute unit that transaction can have +ComputeUnitPriceMin = 0 # Default +# ComputeUnitPriceDefault is the default price per compute unit price, and the starting base fee when FeeEstimatorMode = 'fixed' +ComputeUnitPriceDefault = 0 # Default +# FeeBumpPeriod is the amount of time before a tx is retried with a fee bump +FeeBumpPeriod = '3s' # Default + +[[Solana.Nodes]] +# Name is a unique (per-chain) identifier for this node. +Name = 'primary' # Example +# URL is the HTTP(S) endpoint for this node. +URL = 'http://solana.web' # Example diff --git a/core/config/docs/chains-starknet.toml b/core/config/docs/chains-starknet.toml new file mode 100644 index 00000000..8694290a --- /dev/null +++ b/core/config/docs/chains-starknet.toml @@ -0,0 +1,21 @@ +[[Starknet]] +# ChainID is the Starknet chain ID. +ChainID = 'foobar' # Example +# Enabled enables this chain. +Enabled = true # Default +# OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. +OCR2CachePollPeriod = '5s' # Default +# OCR2CacheTTL is the stale OCR2 cache deadline. +OCR2CacheTTL = '1m' # Default +# RequestTimeout is the RPC client timeout. +RequestTimeout = '10s' # Default +# TxTimeout is the timeout for sending txes to an RPC endpoint. +TxTimeout = '10s' # Default +# ConfirmationPoll is how often to confirmer checks for tx inclusion on chain. +ConfirmationPoll = '5s' # Default + +[[Starknet.Nodes]] +# Name is a unique (per-chain) identifier for this node. +Name = 'primary' # Example +# URL is the base HTTP(S) endpoint for this node. +URL = 'http://stark.node' # Example diff --git a/core/config/docs/cmd/generate/main.go b/core/config/docs/cmd/generate/main.go new file mode 100644 index 00000000..8e0eadfc --- /dev/null +++ b/core/config/docs/cmd/generate/main.go @@ -0,0 +1,37 @@ +// Docs prints core node documentation and/or a list of errors. +// The docs are Markdown generated from Toml - see config.GenerateConfig & config.GenerateSecrets. +package main + +import ( + "flag" + "fmt" + "os" + "path" + + "github.com/goplugin/pluginv3.0/v2/core/config/docs" +) + +var outDir = flag.String("o", "", "output directory") + +func main() { + flag.Parse() + + c, err := docs.GenerateConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "invalid config docs: %v\n", err) + os.Exit(1) + } + s, err := docs.GenerateSecrets() + if err != nil { + fmt.Fprintf(os.Stderr, "invalid secrets docs: %v\n", err) + os.Exit(1) + } + if err = os.WriteFile(path.Join(*outDir, "CONFIG.md"), []byte(c), 0600); err != nil { + fmt.Fprintf(os.Stderr, "failed to write config docs: %v\n", err) + os.Exit(1) + } + if err = os.WriteFile(path.Join(*outDir, "SECRETS.md"), []byte(s), 0600); err != nil { + fmt.Fprintf(os.Stderr, "failed to write secrets docs: %v\n", err) + os.Exit(1) + } +} diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml new file mode 100644 index 00000000..ed06f825 --- /dev/null +++ b/core/config/docs/core.toml @@ -0,0 +1,577 @@ +# **ADVANCED** +# InsecureFastScrypt causes all key stores to encrypt using "fast" scrypt params instead. This is insecure and only useful for local testing. DO NOT ENABLE THIS IN PRODUCTION. +InsecureFastScrypt = false # Default +# RootDir is the Plugin node's root directory. This is the default directory for logging, database backups, cookies, and other misc Plugin node files. Plugin nodes will always ensure this directory has 700 permissions because it might contain sensitive data. +RootDir = '~/.plugin' # Default +# ShutdownGracePeriod is the maximum time allowed to shut down gracefully. If exceeded, the node will terminate immediately to avoid being SIGKILLed. +ShutdownGracePeriod = '5s' # Default + +[Feature] +# FeedsManager enables the feeds manager service. +FeedsManager = true # Default +# LogPoller enables the log poller, an experimental approach to processing logs, required if also using Evm.UseForwarders or OCR2. +LogPoller = false # Default +# UICSAKeys enables CSA Keys in the UI. +UICSAKeys = false # Default + +[Database] +# DefaultIdleInTxSessionTimeout is the maximum time allowed for a transaction to be open and idle before timing out. See Postgres `idle_in_transaction_session_timeout` for more details. +DefaultIdleInTxSessionTimeout = '1h' # Default +# DefaultLockTimeout is the maximum time allowed to wait for database lock of any kind before timing out. See Postgres `lock_timeout` for more details. +DefaultLockTimeout = '15s' # Default +# DefaultQueryTimeout is the maximum time allowed for standard queries before timing out. +DefaultQueryTimeout = '10s' # Default +# LogQueries tells the Plugin node to log database queries made using the default logger. SQL statements will be logged at `debug` level. Not all statements can be logged. The best way to get a true log of all SQL statements is to enable SQL statement logging on Postgres. +LogQueries = false # Default +# MaxIdleConns configures the maximum number of idle database connections that the Plugin node will keep open. Think of this as the baseline number of database connections per Plugin node instance. Increasing this number can help to improve performance under database-heavy workloads. +# +# Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Plugin node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. +MaxIdleConns = 10 # Default +# MaxOpenConns configures the maximum number of database connections that a Plugin node will have open at any one time. Think of this as the maximum burst upper bound limit of database connections per Plugin node instance. Increasing this number can help to improve performance under database-heavy workloads. +# +# Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Plugin node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. +MaxOpenConns = 20 # Default +# MigrateOnStartup controls whether a Plugin node will attempt to automatically migrate the database on boot. If you want more control over your database migration process, set this variable to `false` and manually migrate the database using the CLI `migrate` command instead. +MigrateOnStartup = true # Default + +# As a best practice, take regular database backups in case of accidental data loss. This best practice is especially important when you upgrade your Plugin node to a new version. Plugin nodes support automated database backups to make this process easier. +# +# NOTE: Dumps can cause high load and massive database latencies, which will negatively impact the normal functioning of the Plugin node. For this reason, it is recommended to set a `URL` and point it to a read replica if you enable automatic backups. +[Database.Backup] +# Mode sets the type of automatic database backup, which can be one of _none_, `lite`, or `full`. If enabled, the Plugin node will always dump a backup on every boot before running migrations. Additionally, it will automatically take database backups that overwrite the backup file for the given version at regular intervals if `Frequency` is set to a non-zero interval. +# +# _none_ - Disables backups. +# `lite` - Dumps small tables including configuration and keys that are essential for the node to function, which excludes historical data like job runs, transaction history, etc. +# `full` - Dumps the entire database. +# +# It will write to a file like `'Dir'/backup/cl_backup_.dump`. There is one backup dump file per version of the Plugin node. If you upgrade the node, it will keep the backup taken right before the upgrade migration so you can restore to an older version if necessary. +Mode = 'none' # Default +# Dir sets the directory to use for saving the backup file. Use this if you want to save the backup file in a directory other than the default ROOT directory. +Dir = 'test/backup/dir' # Example +# OnVersionUpgrade enables automatic backups of the database before running migrations, when you are upgrading to a new version. +OnVersionUpgrade = true # Default +# Frequency sets the interval for database dumps, if set to a positive duration and `Mode` is not _none_. +# +# Set to `0` to disable periodic backups. +Frequency = '1h' # Default + +# **ADVANCED** +# These settings control the postgres event listener. +[Database.Listener] +# MaxReconnectDuration is the maximum duration to wait between reconnect attempts. +MaxReconnectDuration = '10m' # Default +# MinReconnectInterval controls the duration to wait before trying to re-establish the database connection after connection loss. After each consecutive failure this interval is doubled, until MaxReconnectInterval is reached. Successfully completing the connection establishment procedure resets the interval back to MinReconnectInterval. +MinReconnectInterval = '1m' # Default +# FallbackPollInterval controls how often clients should manually poll as a fallback in case the postgres event was missed/dropped. +FallbackPollInterval = '30s' # Default + +# **ADVANCED** +# Ideally, you should use a container orchestration system like [Kubernetes](https://kubernetes.io/) to ensure that only one Plugin node instance can ever use a specific Postgres database. However, some node operators do not have the technical capacity to do this. Common use cases run multiple Plugin node instances in failover mode as recommended by our official documentation. The first instance takes a lock on the database and subsequent instances will wait trying to take this lock in case the first instance fails. +# +# - If your nodes or applications hold locks open for several hours or days, Postgres is unable to complete internal cleanup tasks. The Postgres maintainers explicitly discourage holding locks open for long periods of time. +# +# Because of the complications with advisory locks, Plugin nodes with v2.0 and later only support `lease` locking mode. The `lease` locking mode works using the following process: +# +# - Node A creates one row in the database with the client ID and updates it once per second. +# - Node B spinlocks and checks periodically to see if the client ID is too old. If the client ID is not updated after a period of time, node B assumes that node A failed and takes over. Node B becomes the owner of the row and updates the client ID once per second. +# - If node A comes back, it attempts to take out a lease, realizes that the database has been leased to another process, and exits the entire application immediately. +[Database.Lock] +# Enabled enables the database lock. +Enabled = true # Default +# LeaseDuration is how long the lease lock will last before expiring. +LeaseDuration = '10s' # Default +# LeaseRefreshInterval determines how often to refresh the lease lock. Also controls how often a standby node will check to see if it can grab the lease. +LeaseRefreshInterval = '1s' # Default + +[TelemetryIngress] +# UniConn toggles which ws connection style is used. +UniConn = true # Default +# Logging toggles verbose logging of the raw telemetry messages being sent. +Logging = false # Default +# BufferSize is the number of telemetry messages to buffer before dropping new ones. +BufferSize = 100 # Default +# MaxBatchSize is the maximum number of messages to batch into one telemetry request. +MaxBatchSize = 50 # Default +# SendInterval determines how often batched telemetry is sent to the ingress server. +SendInterval = '500ms' # Default +# SendTimeout is the max duration to wait for the request to complete when sending batch telemetry. +SendTimeout = '10s' # Default +# UseBatchSend toggles sending telemetry to the ingress server using the batch client. +UseBatchSend = true # Default + +[[TelemetryIngress.Endpoints]] # Example +# Network aka EVM, Solana, Starknet +Network = 'EVM' # Example +# ChainID of the network +ChainID = '111551111' # Example +# ServerPubKey is the public key of the telemetry server. +ServerPubKey = 'test-pub-key-111551111-evm' # Example +# URL is where to send telemetry. +URL = 'localhost-111551111-evm:9000' # Example + +[AuditLogger] +# Enabled determines if this logger should be configured at all +Enabled = false # Default +# ForwardToUrl is where you want to forward logs to +ForwardToUrl = 'http://localhost:9898' # Example +# JsonWrapperKey if set wraps the map of data under another single key to make parsing easier +JsonWrapperKey = 'event' # Example +# Headers is the set of headers you wish to pass along with each request +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] # Example + +[Log] +# Level determines both what is printed on the screen and what is written to the log file. +# +# The available levels are: +# - "debug": Useful for forensic debugging of issues. +# - "info": High-level informational messages. (default) +# - "warn": A mild error occurred that might require non-urgent action. Check these warnings semi-regularly to see if any of them require attention. These warnings usually happen due to factors outside of the control of the node operator. Examples: Unexpected responses from a remote API or misleading networking errors. +# - "error": An unexpected error occurred during the regular operation of a well-maintained node. Node operators might need to take action to remedy this error. Check these regularly to see if any of them require attention. Examples: Use of deprecated configuration options or incorrectly configured settings that cause a job to fail. +# - "crit": A critical error occurred. The node might be unable to function. Node operators should take immediate action to fix these errors. Examples: The node could not boot because a network socket could not be opened or the database became inaccessible. +# - "panic": An exceptional error occurred that could not be handled. If the node is unresponsive, node operators should try to restart their nodes and notify the Plugin team of a potential bug. +# - "fatal": The node encountered an unrecoverable problem and had to exit. +Level = 'info' # Default +# JSONConsole enables JSON logging. Otherwise, the log is saved in a human-friendly console format. +JSONConsole = false # Default +# UnixTS enables legacy unix timestamps. +# +# Previous versions of Plugin nodes wrote JSON logs with a unix timestamp. As of v1.1.0 and up, the default has changed to use ISO8601 timestamps for better readability. +UnixTS = false # Default + +[Log.File] +# Dir sets the log directory. By default, Plugin nodes write log data to `$ROOT/log.jsonl`. +Dir = '/my/log/directory' # Example +# MaxSize determines the log file's max size in megabytes before file rotation. Having this not set will disable logging to disk. If your disk doesn't have enough disk space, the logging will pause and the application will log errors until space is available again. +# +# Values must have suffixes with a unit like: `5120mb` (5,120 megabytes). If no unit suffix is provided, the value defaults to `b` (bytes). The list of valid unit suffixes are: +# +# - b (bytes) +# - kb (kilobytes) +# - mb (megabytes) +# - gb (gigabytes) +# - tb (terabytes) +MaxSize = '5120mb' # Default +# MaxAgeDays determines the log file's max age in days before file rotation. Keeping this config with the default value will not remove log files based on age. +MaxAgeDays = 0 # Default +# MaxBackups determines the maximum number of old log files to retain. Keeping this config with the default value retains all old log files. The `MaxAgeDays` variable can still cause them to get deleted. +MaxBackups = 1 # Default + +[WebServer] +# AuthenticationMethod defines which pluggable auth interface to use for user login and role assumption. Options include 'local' and 'ldap'. See docs for more details +AuthenticationMethod = 'local' # Default +# AllowOrigins controls the URLs Plugin nodes emit in the `Allow-Origins` header of its API responses. The setting can be a comma-separated list with no spaces. You might experience CORS issues if this is not set correctly. +# +# You should set this to the external URL that you use to access the Plugin UI. +# +# You can set `AllowOrigins = '*'` to allow the UI to work from any URL, but it is recommended for security reasons to make it explicit instead. +AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default +# BridgeCacheTTL controls the cache TTL for all bridge tasks to use old values in newer observations in case of intermittent failure. It's disabled by default. +BridgeCacheTTL = '0s' # Default +# BridgeResponseURL defines the URL for bridges to send a response to. This _must_ be set when using async external adapters. +# +# Usually this will be the same as the URL/IP and port you use to connect to the Plugin UI. +BridgeResponseURL = 'https://my-plugin-node.example.com:6688' # Example +# **ADVANCED** +# HTTPWriteTimeout controls how long the Plugin node's API server can hold a socket open for writing a response to an HTTP request. Sometimes, this must be increased for pprof. +HTTPWriteTimeout = '10s' # Default +# HTTPPort is the port used for the Plugin Node API, [CLI](/docs/configuration-variables/#cli-client), and GUI. +HTTPPort = 6688 # Default +# SecureCookies requires the use of secure cookies for authentication. Set to false to enable standard HTTP requests along with `TLSPort = 0`. +SecureCookies = true # Default +# SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. +SessionTimeout = '15m' # Default +# SessionReaperExpiration represents how long an API session lasts before expiring and requiring a new login. +SessionReaperExpiration = '240h' # Default +# HTTPMaxSize defines the maximum size for HTTP requests and responses made by the node server. +HTTPMaxSize = '32768b' # Default +# StartTimeout defines the maximum amount of time the node will wait for a server to start. +StartTimeout = '15s' # Default +# ListenIP specifies the IP to bind the HTTP server to +ListenIP = '0.0.0.0' # Default + +# Optional LDAP config if WebServer.AuthenticationMethod is set to 'ldap' +# LDAP queries are all parameterized to support custom LDAP 'dn', 'cn', and attributes +[WebServer.LDAP] +# ServerTLS defines the option to require the secure ldaps +ServerTLS = true # Default +# SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. +SessionTimeout = '15m0s' # Default +# QueryTimeout defines how long queries should wait before timing out, defined in seconds +QueryTimeout = '2m0s' # Default +# BaseUserAttr defines the base attribute used to populate LDAP queries such as "uid=$", default is example +BaseUserAttr = 'uid' # Default +# BaseDN defines the base LDAP 'dn' search filter to apply to every LDAP query, replace example,com with the appropriate LDAP server's structure +BaseDN = 'dc=custom,dc=example,dc=com' # Example +# UsersDN defines the 'dn' query to use when querying for the 'users' 'ou' group +UsersDN = 'ou=users' # Default +# GroupsDN defines the 'dn' query to use when querying for the 'groups' 'ou' group +GroupsDN = 'ou=groups' # Default +# ActiveAttribute is an optional user field to check truthiness for if a user is valid/active. This is only required if the LDAP provider lists inactive users as members of groups +ActiveAttribute = '' # Default +# ActiveAttributeAllowedValue is the value to check against for the above optional user attribute +ActiveAttributeAllowedValue = '' # Default +# AdminUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Admin' role +AdminUserGroupCN = 'NodeAdmins' # Default +# EditUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Edit' role +EditUserGroupCN = 'NodeEditors' # Default +# RunUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Run' role +RunUserGroupCN = 'NodeRunners' # Default +# ReadUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Read' role +ReadUserGroupCN = 'NodeReadOnly' # Default +# UserApiTokenEnabled enables the users to issue API tokens with the same access of their role +UserApiTokenEnabled = false # Default +# UserAPITokenDuration is the duration of time an API token is active for before expiring +UserAPITokenDuration = '240h0m0s' # Default +# UpstreamSyncInterval is the interval at which the background LDAP sync task will be called. A '0s' value disables the background sync being run on an interval. This check is already performed during login/logout actions, all sessions and API tokens stored in the local ldap tables are updated to match the remote server +UpstreamSyncInterval = '0s' # Default +# UpstreamSyncRateLimit defines a duration to limit the number of query/API calls to the upstream LDAP provider. It prevents the sync functionality from being called multiple times within the defined duration +UpstreamSyncRateLimit = '2m0s' # Default + +[WebServer.RateLimit] +# Authenticated defines the threshold to which authenticated requests get limited. More than this many authenticated requests per `AuthenticatedRateLimitPeriod` will be rejected. +Authenticated = 1000 # Default +# AuthenticatedPeriod defines the period to which authenticated requests get limited. +AuthenticatedPeriod = '1m' # Default +# Unauthenticated defines the threshold to which authenticated requests get limited. More than this many unauthenticated requests per `UnAuthenticatedRateLimitPeriod` will be rejected. +Unauthenticated = 5 # Default +# UnauthenticatedPeriod defines the period to which unauthenticated requests get limited. +UnauthenticatedPeriod = '20s' # Default + +# The Operator UI frontend supports enabling Multi Factor Authentication via Webauthn per account. When enabled, logging in will require the account password and a hardware or OS security key such as Yubikey. To enroll, log in to the operator UI and click the circle purple profile button at the top right and then click **Register MFA Token**. Tap your hardware security key or use the OS public key management feature to enroll a key. Next time you log in, this key will be required to authenticate. +[WebServer.MFA] +# RPID is the FQDN of where the Operator UI is served. When serving locally, the value should be `localhost`. +RPID = 'localhost' # Example +# RPOrigin is the origin URL where WebAuthn requests initiate, including scheme and port. When serving locally, the value should be `http://localhost:6688/`. +RPOrigin = 'http://localhost:6688/' # Example + +# The TLS settings apply only if you want to enable TLS security on your Plugin node. +[WebServer.TLS] +# CertPath is the location of the TLS certificate file. +CertPath = '~/.cl/certs' # Example +# Host is the hostname configured for TLS to be used by the Plugin node. This is useful if you configured a domain name specific for your Plugin node. +Host = 'tls-host' # Example +# KeyPath is the location of the TLS private key file. +KeyPath = '/home/$USER/.plugin/tls/server.key' # Example +# HTTPSPort is the port used for HTTPS connections. Set this to `0` to disable HTTPS. Disabling HTTPS also relieves Plugin nodes of the requirement for a TLS certificate. +HTTPSPort = 6689 # Default +# ForceRedirect forces TLS redirect for unencrypted connections. +ForceRedirect = false # Default +# ListenIP specifies the IP to bind the HTTPS server to +ListenIP = '0.0.0.0' # Default + +[JobPipeline] +# ExternalInitiatorsEnabled enables the External Initiator feature. If disabled, `webhook` jobs can ONLY be initiated by a logged-in user. If enabled, `webhook` jobs can be initiated by a whitelisted external initiator. +ExternalInitiatorsEnabled = false # Default +# MaxRunDuration is the maximum time allowed for a single job run. If it takes longer, it will exit early and be marked errored. If set to zero, disables the time limit completely. +MaxRunDuration = '10m' # Default +# MaxSuccessfulRuns caps the number of completed successful runs per pipeline +# spec in the database. You can set it to zero as a performance optimisation; +# this will avoid saving any successful run. +# +# Note this is not a hard cap, it can drift slightly larger than this but not +# by more than 5% or so. +MaxSuccessfulRuns = 10000 # Default +# ReaperInterval controls how often the job pipeline reaper will run to delete completed jobs older than ReaperThreshold, in order to keep database size manageable. +# +# Set to `0` to disable the periodic reaper. +ReaperInterval = '1h' # Default +# ReaperThreshold determines the age limit for job runs. Completed job runs older than this will be automatically purged from the database. +ReaperThreshold = '24h' # Default +# **ADVANCED** +# ResultWriteQueueDepth controls how many writes will be buffered before subsequent writes are dropped, for jobs that write results asynchronously for performance reasons, such as OCR. +ResultWriteQueueDepth = 100 # Default + +[JobPipeline.HTTPRequest] +# DefaultTimeout defines the default timeout for HTTP requests made by `http` and `bridge` adapters. +DefaultTimeout = '15s' # Default +# MaxSize defines the maximum size for HTTP requests and responses made by `http` and `bridge` adapters. +MaxSize = '32768' # Default + +[FluxMonitor] +# **ADVANCED** +# DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Flux Monitor. Set to 0 to use `SendEvery` strategy instead. +DefaultTransactionQueueDepth = 1 # Default +# SimulateTransactions enables transaction simulation for Flux Monitor. +SimulateTransactions = false # Default + +[OCR2] +# Enabled enables OCR2 jobs. +Enabled = false # Default +# ContractConfirmations is the number of block confirmations to wait for before enacting an on-chain +# configuration change. This value doesn't need to be very high (in +# particular, it does not need to protect against malicious re-orgs). +# Since configuration changes create some overhead, and mini-reorgs +# are fairly common, recommended values are between two and ten. +# +# Malicious re-orgs are not any more of concern here than they are in +# blockchain applications in general: Since nodes check the contract for the +# latest config every ContractConfigTrackerPollInterval.Seconds(), they will +# come to a common view of the current config within any interval longer than +# that, as long as the latest setConfig transaction in the longest chain is +# stable. They will thus be able to continue reporting after the poll +# interval, unless an adversary is able to repeatedly re-org the transaction +# out during every poll interval, which would amount to the capability to +# censor any transaction. +# +# Note that 1 confirmation implies that the transaction/event has been mined in one block. +# 0 confirmations would imply that the event would be recognised before it has even been mined, which is not currently supported. +# e.g. +# Current block height: 42 +# Changed in block height: 43 +# Contract config confirmations: 1 +# STILL PENDING +# +# Current block height: 43 +# Changed in block height: 43 +# Contract config confirmations: 1 +# CONFIRMED +ContractConfirmations = 3 # Default +# BlockchainTimeout is the timeout for blockchain queries (mediated through +# ContractConfigTracker and ContractTransmitter). +# (This is necessary because an oracle's operations are serialized, so +# blocking forever on a chain interaction would break the oracle.) +BlockchainTimeout = '20s' # Default +# ContractPollInterval is the polling interval at which ContractConfigTracker is queried for# updated on-chain configurations. Recommended values are between +# fifteen seconds and two minutes. +ContractPollInterval = '1m' # Default +# ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker +# if one doesn't exist. Recommended values are between two and five minutes. +ContractSubscribeInterval = '2m' # Default +# ContractTransmitterTransmitTimeout is the timeout for ContractTransmitter.Transmit calls. +ContractTransmitterTransmitTimeout = '10s' # Default +# DatabaseTimeout is the timeout for database interactions. +# (This is necessary because an oracle's operations are serialized, so +# blocking forever on an observation would break the oracle.) +DatabaseTimeout = '10s' # Default +# KeyBundleID is a sha256 hexadecimal hash identifier. +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' # Example +# CaptureEATelemetry toggles collecting extra information from External Adaptares +CaptureEATelemetry = false # Default +# CaptureAutomationCustomTelemetry toggles collecting automation specific telemetry +CaptureAutomationCustomTelemetry = true # Default +# DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR2. Set to 0 to use `SendEvery` strategy instead. +DefaultTransactionQueueDepth = 1 # Default +# SimulateTransactions enables transaction simulation for OCR2. +SimulateTransactions = false # Default +# TraceLogging enables trace level logging. +TraceLogging = false # Default + +# This section applies only if you are running off-chain reporting jobs. +[OCR] +# Enabled enables OCR jobs. +Enabled = false # Default +# ObservationTimeout is the timeout for making observations using the DataSource.Observe method. +# (This is necessary because an oracle's operations are serialized, so +# blocking forever on an observation would break the oracle.) +ObservationTimeout = '5s' # Default +# BlockchainTimeout is the timeout for blockchain queries (mediated through +# ContractConfigTracker and ContractTransmitter). +# (This is necessary because an oracle's operations are serialized, so +# blocking forever on a chain interaction would break the oracle.) +BlockchainTimeout = '20s' # Default +# ContractPollInterval is the polling interval at which ContractConfigTracker is queried for +# updated on-chain configurations. Recommended values are between +# fifteen seconds and two minutes. +ContractPollInterval = '1m' # Default +# ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker +# if one doesn't exist. Recommended values are between two and five minutes. +ContractSubscribeInterval = '2m' # Default +# **ADVANCED** +# DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR. Set to 0 to use `SendEvery` strategy instead. +DefaultTransactionQueueDepth = 1 # Default +# KeyBundleID is the default key bundle ID to use for OCR jobs. If you have an OCR job that does not explicitly specify a key bundle ID, it will fall back to this value. +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' # Example +# SimulateTransactions enables transaction simulation for OCR. +SimulateTransactions = false # Default +# TransmitterAddress is the default sending address to use for OCR. If you have an OCR job that does not explicitly specify a transmitter address, it will fall back to this value. +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' # Example +# CaptureEATelemetry toggles collecting extra information from External Adaptares +CaptureEATelemetry = false # Default +# TraceLogging enables trace level logging. +TraceLogging = false # Default + +# P2P has a versioned networking stack. Currenly only `[P2P.V2]` is supported. +# All nodes in the OCR network should share the same networking stack. +[P2P] +# IncomingMessageBufferSize is the per-remote number of incoming +# messages to buffer. Any additional messages received on top of those +# already in the queue will be dropped. +IncomingMessageBufferSize = 10 # Default +# OutgoingMessageBufferSize is the per-remote number of outgoing +# messages to buffer. Any additional messages send on top of those +# already in the queue will displace the oldest. +# NOTE: OutgoingMessageBufferSize should be comfortably smaller than remote's +# IncomingMessageBufferSize to give the remote enough space to process +# them all in case we regained connection and now send a bunch at once +OutgoingMessageBufferSize = 10 # Default +# PeerID is the default peer ID to use for OCR jobs. If unspecified, uses the first available peer ID. +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example +# TraceLogging enables trace level logging. +TraceLogging = false # Default + +[P2P.V2] +# Enabled enables P2P V2. +# Note: V1.Enabled is true by default, so it must be set false in order to run V2 only. +Enabled = true # Default +# AnnounceAddresses is the addresses the peer will advertise on the network in `host:port` form as accepted by the TCP version of Go’s `net.Dial`. +# The addresses should be reachable by other nodes on the network. When attempting to connect to another node, +# a node will attempt to dial all of the other node’s AnnounceAddresses in round-robin fashion. +AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example +# DefaultBootstrappers is the default bootstrapper peers for libocr's v2 networking stack. +# +# Oracle nodes typically only know each other’s PeerIDs, but not their hostnames, IP addresses, or ports. +# DefaultBootstrappers are special nodes that help other nodes discover each other’s `AnnounceAddresses` so they can communicate. +# Nodes continuously attempt to connect to bootstrappers configured in here. When a node wants to connect to another node +# (which it knows only by PeerID, but not by address), it discovers the other node’s AnnounceAddresses from communications +# received from its DefaultBootstrappers or other discovered nodes. To facilitate discovery, +# nodes will regularly broadcast signed announcements containing their PeerID and AnnounceAddresses. +DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example +# DeltaDial controls how far apart Dial attempts are +DeltaDial = '15s' # Default +# DeltaReconcile controls how often a Reconcile message is sent to every peer. +DeltaReconcile = '1m' # Default +# ListenAddresses is the addresses the peer will listen to on the network in `host:port` form as accepted by `net.Listen()`, +# but the host and port must be fully specified and cannot be empty. You can specify `0.0.0.0` (IPv4) or `::` (IPv6) to listen on all interfaces, but that is not recommended. +ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example + +[Keeper] +# **ADVANCED** +# DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Keeper. Set to 0 to use `SendEvery` strategy instead. +DefaultTransactionQueueDepth = 1 # Default +# **ADVANCED** +# GasPriceBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in legacy mode (EIP-1559 off). +GasPriceBufferPercent = 20 # Default +# **ADVANCED** +# GasTipCapBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in EIP-1559 mode. +GasTipCapBufferPercent = 20 # Default +# **ADVANCED** +# BaseFeeBufferPercent specifies the percentage to add to the base fee used for checking whether to perform an upkeep. Applies only in EIP-1559 mode. +BaseFeeBufferPercent = 20 # Default +# **ADVANCED** +# MaxGracePeriod is the maximum number of blocks that a keeper will wait after performing an upkeep before it resumes checking that upkeep +MaxGracePeriod = 100 # Default +# TurnLookBack is the number of blocks in the past to look back when getting a block for a turn. +TurnLookBack = 1_000 # Default + +[Keeper.Registry] +# **ADVANCED** +# CheckGasOverhead is the amount of extra gas to provide checkUpkeep() calls to account for the gas consumed by the keeper registry. +CheckGasOverhead = 200_000 # Default +# **ADVANCED** +# PerformGasOverhead is the amount of extra gas to provide performUpkeep() calls to account for the gas consumed by the keeper registry +PerformGasOverhead = 300_000 # Default +# **ADVANCED** +# SyncInterval is the interval in which the RegistrySynchronizer performs a full sync of the keeper registry contract it is tracking. +SyncInterval = '30m' # Default +# **ADVANCED** +# MaxPerformDataSize is the max size of perform data. +MaxPerformDataSize = 5_000 # Default +# **ADVANCED** +# SyncUpkeepQueueSize represents the maximum number of upkeeps that can be synced in parallel. +SyncUpkeepQueueSize = 10 # Default + +# The Plugin node is equipped with an internal "nurse" service that can perform automatic `pprof` profiling when the certain resource thresholds are exceeded, such as memory and goroutine count. These profiles are saved to disk to facilitate fine-grained debugging of performance-related issues. In general, if you notice that your node has begun to accumulate profiles, forward them to the Plugin team. +# +# To learn more about these profiles, read the [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) guide. +[AutoPprof] +# Enabled enables the automatic profiling service. +Enabled = false # Default +# ProfileRoot sets the location on disk where pprof profiles will be stored. Defaults to `RootDir`. +ProfileRoot = 'prof/root' # Example +# PollInterval is the interval at which the node's resources are checked. +PollInterval = '10s' # Default +# GatherDuration is the duration for which profiles are gathered when profiling starts. +GatherDuration = '10s' # Default +# GatherTraceDuration is the duration for which traces are gathered when profiling is kicked off. This is separately configurable because traces are significantly larger than other types of profiles. +GatherTraceDuration = '5s' # Default +# MaxProfileSize is the maximum amount of disk space that profiles may consume before profiling is disabled. +MaxProfileSize = '100mb' # Default +# CPUProfileRate sets the rate for CPU profiling. See https://pkg.go.dev/runtime#SetCPUProfileRate. +CPUProfileRate = 1 # Default +# MemProfileRate sets the rate for memory profiling. See https://pkg.go.dev/runtime#pkg-variables. +MemProfileRate = 1 # Default +# BlockProfileRate sets the fraction of blocking events for goroutine profiling. See https://pkg.go.dev/runtime#SetBlockProfileRate. +BlockProfileRate = 1 # Default +# MutexProfileFraction sets the fraction of contention events for mutex profiling. See https://pkg.go.dev/runtime#SetMutexProfileFraction. +MutexProfileFraction = 1 # Default +# MemThreshold sets the maximum amount of memory the node can actively consume before profiling begins. +MemThreshold = '4gb' # Default +# GoroutineThreshold is the maximum number of actively-running goroutines the node can spawn before profiling begins. +GoroutineThreshold = 5000 # Default + +[Pyroscope] +# ServerAddress sets the address that will receive the profile logs. It enables the profiling service. +ServerAddress = 'http://localhost:4040' # Example +# Environment sets the target environment tag in which profiles will be added to. +Environment = 'mainnet' # Default + +[Sentry] +# **ADVANCED** +# Debug enables printing of Sentry SDK debug messages. +Debug = false # Default +# DSN is the data source name where events will be sent. Sentry is completely disabled if this is left blank. +DSN = 'sentry-dsn' # Example +# Environment overrides the Sentry environment to the given value. Otherwise autodetects between dev/prod. +Environment = 'my-custom-env' # Example +# Release overrides the Sentry release to the given value. Otherwise uses the compiled-in version number. +Release = 'v1.2.3' # Example + + +# Insecure config family is only allowed in development builds. +[Insecure] +# **ADVANCED** +# DevWebServer skips secure configuration for webserver AllowedHosts, SSL, etc. +DevWebServer = false # Default +# OCRDevelopmentMode run OCR in development mode. +OCRDevelopmentMode = false # Default +# InfiniteDepthQueries skips graphql query depth limit checks. +InfiniteDepthQueries = false # Default +# DisableRateLimiting skips ratelimiting on asset requests. +DisableRateLimiting = false # Default + +[Tracing] +# Enabled turns trace collection on or off. On requires an OTEL Tracing Collector. +Enabled = false # Default +# CollectorTarget is the logical address of the OTEL Tracing Collector. +CollectorTarget = 'localhost:4317' # Example +# NodeID is an unique name for this node relative to any other node traces are collected for. +NodeID = 'NodeID' # Example +# SamplingRatio is the ratio of traces to sample for this node. +SamplingRatio = 1.0 # Example +# Mode is a string value. `tls` or `unencrypted` are the only values allowed. If set to `unencrypted`, `TLSCertPath` can be unset, meaning traces will be sent over plaintext to the collector. +Mode = 'tls' # Default +# TLSCertPath is the file path to the TLS certificate used for secure communication with an OTEL Tracing Collector. +TLSCertPath = '/path/to/cert.pem' # Example + +# Tracing.Attributes are user specified key-value pairs to associate in the context of the traces +[Tracing.Attributes] +# env is an example user specified key-value pair +env = 'test' # Example + +[Mercury] + +# Mercury.Cache controls settings for the price retrieval cache querying a mercury server +[Mercury.Cache] +# LatestReportTTL controls how "stale" we will allow a price to be e.g. if +# set to 1s, a new price will always be fetched if the last result was +# from 1 second ago or older. +# +# Another way of looking at it is such: the cache will _never_ return a +# price that was queried from now-LatestReportTTL or before. +# +# Setting to zero disables caching entirely. +LatestReportTTL = "1s" # Default +# MaxStaleAge is that maximum amount of time that a value can be stale +# before it is deleted from the cache (a form of garbage collection). +# +# This should generally be set to something much larger than +# LatestReportTTL. Setting to zero disables garbage collection. +MaxStaleAge = "1h" # Default +# LatestReportDeadline controls how long to wait for a response from the +# mercury server before retrying. Setting this to zero will wait indefinitely. +LatestReportDeadline = "5s" # Default + +# Mercury.TLS controls client settings for when the node talks to traditional web servers or load balancers. +[Mercury.TLS] +# CertFile is the path to a PEM file of trusted root certificate authority certificates +CertFile = "/path/to/client/certs.pem" # Example diff --git a/core/config/docs/defaults.go b/core/config/docs/defaults.go new file mode 100644 index 00000000..c130829a --- /dev/null +++ b/core/config/docs/defaults.go @@ -0,0 +1,28 @@ +package docs + +import ( + "log" + "strings" + + "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin/cfgtest" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +var ( + defaults toml.Core +) + +func init() { + if err := cfgtest.DocDefaultsOnly(strings.NewReader(coreTOML), &defaults, config.DecodeTOML); err != nil { + log.Fatalf("Failed to initialize defaults from docs: %v", err) + } +} + +func CoreDefaults() (c toml.Core) { + c.SetFrom(&defaults) + c.Database.Dialect = dialects.Postgres // not user visible - overridden for tests only + c.Tracing.Attributes = make(map[string]string) + return +} diff --git a/core/config/docs/defaults_test.go b/core/config/docs/defaults_test.go new file mode 100644 index 00000000..2d6b9f6e --- /dev/null +++ b/core/config/docs/defaults_test.go @@ -0,0 +1,11 @@ +package docs + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin/cfgtest" +) + +func TestCoreDefaults_notNil(t *testing.T) { + cfgtest.AssertFieldsNotNil(t, CoreDefaults()) +} diff --git a/core/config/docs/docs.go b/core/config/docs/docs.go new file mode 100644 index 00000000..00fd42f8 --- /dev/null +++ b/core/config/docs/docs.go @@ -0,0 +1,275 @@ +package docs + +import ( + _ "embed" + "fmt" + "log" + "strings" + + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + fieldDefault = "# Default" + fieldExample = "# Example" + + tokenAdvanced = "**ADVANCED**" + tokenExtended = "**EXTENDED**" +) + +var ( + //go:embed secrets.toml + secretsTOML string + //go:embed core.toml + coreTOML string + //go:embed chains-evm.toml + chainsEVMTOML string + //go:embed chains-cosmos.toml + chainsCosmosTOML string + //go:embed chains-solana.toml + chainsSolanaTOML string + //go:embed chains-starknet.toml + chainsStarknetTOML string + + //go:embed example-config.toml + exampleConfig string + //go:embed example-secrets.toml + exampleSecrets string + + docsTOML = coreTOML + chainsEVMTOML + chainsCosmosTOML + chainsSolanaTOML + chainsStarknetTOML +) + +// GenerateConfig returns MarkDown documentation generated from core.toml & chains-*.toml. +func GenerateConfig() (string, error) { + return generateDocs(docsTOML, `[//]: # (Documentation generated from docs/*.toml - DO NOT EDIT.) + +This document describes the TOML format for configuration. + +See also [SECRETS.md](SECRETS.md) +`, exampleConfig) +} + +// GenerateSecrets returns MarkDown documentation generated from secrets.toml. +func GenerateSecrets() (string, error) { + return generateDocs(secretsTOML, `[//]: # (Documentation generated from docs/secrets.toml - DO NOT EDIT.) + +This document describes the TOML format for secrets. + +Each secret has an alternative corresponding environment variable. + +See also [CONFIG.md](CONFIG.md) +`, exampleSecrets) +} + +// generateDocs returns MarkDown documentation generated from the TOML string. +func generateDocs(toml, header, example string) (string, error) { + items, err := parseTOMLDocs(toml) + var sb strings.Builder + + sb.WriteString(header) + sb.WriteString(` +## Example + +`) + sb.WriteString("```toml\n") + sb.WriteString(example) + sb.WriteString("```\n\n") + + for _, item := range items { + sb.WriteString(item.String()) + sb.WriteString("\n\n") + } + + return sb.String(), err +} + +func advancedWarning(msg string) string { + return fmt.Sprintf(":warning: **_ADVANCED_**: _%s_\n", msg) +} + +// lines holds a set of contiguous lines +type lines []string + +func (d lines) String() string { + return strings.Join(d, "\n") +} + +type table struct { + name string + codes lines + adv bool + desc lines + ext bool +} + +func newTable(line string, desc lines) *table { + t := &table{ + name: strings.Trim(line, "[]"), + codes: []string{line}, + desc: desc, + } + if len(desc) > 0 { + if strings.HasPrefix(strings.TrimSpace(desc[0]), tokenAdvanced) { + t.adv = true + t.desc = t.desc[1:] + } else if strings.HasPrefix(strings.TrimSpace(desc[len(desc)-1]), tokenExtended) { + t.ext = true + t.desc = t.desc[:len(desc)-1] + } + } + return t +} + +func newArrayOfTables(line string, desc lines) *table { + t := &table{ + name: strings.Trim(strings.Trim(line, fieldExample), "[]"), + codes: []string{line}, + desc: desc, + } + if len(desc) > 0 { + if strings.HasPrefix(strings.TrimSpace(desc[0]), tokenAdvanced) { + t.adv = true + t.desc = t.desc[1:] + } else if strings.HasPrefix(strings.TrimSpace(desc[len(desc)-1]), tokenExtended) { + t.ext = true + t.desc = t.desc[:len(desc)-1] + } + } + return t +} + +func (t table) advanced() string { + if t.adv { + return advancedWarning("Do not change these settings unless you know what you are doing.") + } + return "" +} + +func (t table) code() string { + if !t.ext { + return fmt.Sprint("```toml\n", t.codes, "\n```\n") + } + return "" +} + +func (t table) extended() string { + if t.ext { + if t.name != "EVM" { + log.Fatalf("%s: no extended description available", t.name) + } + s, err := evmChainDefaults() + if err != nil { + log.Fatalf("%s: failed to generate evm chain defaults: %v", t.name, err) + } + return s + } + return "" +} + +// String prints a table as an H2, followed by a code block and description. +func (t *table) String() string { + return fmt.Sprint("## ", t.name, "\n", + t.advanced(), + t.code(), + t.desc, + t.extended()) +} + +type keyval struct { + name string + code string + adv bool + desc lines +} + +func newKeyval(line string, desc lines) keyval { + line = strings.TrimSpace(line) + kv := keyval{ + name: line[:strings.Index(line, " ")], + code: line, + desc: desc, + } + if len(desc) > 0 && strings.HasPrefix(strings.TrimSpace(desc[0]), tokenAdvanced) { + kv.adv = true + kv.desc = kv.desc[1:] + } + return kv +} + +func (k keyval) advanced() string { + if k.adv { + return advancedWarning("Do not change this setting unless you know what you are doing.") + } + return "" +} + +// String prints a keyval as an H3, followed by a code block and description. +func (k keyval) String() string { + name := k.name + if i := strings.LastIndex(name, "."); i > -1 { + name = name[i+1:] + } + return fmt.Sprint("### ", name, "\n", + k.advanced(), + "```toml\n", + k.code, + "\n```\n", + k.desc) +} + +func parseTOMLDocs(s string) (items []fmt.Stringer, err error) { + defer func() { _, err = utils.MultiErrorList(err) }() + globalTable := table{name: "Global"} + currentTable := &globalTable + items = append(items, currentTable) + var desc lines + for _, line := range strings.Split(s, "\n") { + if strings.HasPrefix(line, "#") { + // comment + desc = append(desc, strings.TrimSpace(line[1:])) + } else if strings.TrimSpace(line) == "" { + // empty + if len(desc) > 0 { + items = append(items, desc) + desc = nil + } + } else if strings.HasPrefix(line, "[[") { + currentTable = newArrayOfTables(line, desc) + items = append(items, currentTable) + desc = nil + } else if strings.HasPrefix(line, "[") { + currentTable = newTable(line, desc) + items = append(items, currentTable) + desc = nil + } else { + kv := newKeyval(line, desc) + shortName := kv.name + if currentTable != &globalTable { + // update to full name + kv.name = currentTable.name + "." + kv.name + } + if len(kv.desc) == 0 { + err = multierr.Append(err, fmt.Errorf("%s: missing description", kv.name)) + } else if !strings.HasPrefix(kv.desc[0], shortName) { + err = multierr.Append(err, fmt.Errorf("%s: description does not begin with %q", kv.name, shortName)) + } + if !strings.HasSuffix(line, fieldDefault) && !strings.HasSuffix(line, fieldExample) { + err = multierr.Append(err, fmt.Errorf(`%s: is not one of %v`, kv.name, []string{fieldDefault, fieldExample})) + } + + items = append(items, kv) + currentTable.codes = append(currentTable.codes, kv.code) + desc = nil + } + } + if len(globalTable.codes) == 0 { + //drop it + items = items[1:] + } + if len(desc) > 0 { + items = append(items, desc) + } + return +} diff --git a/core/config/docs/docs_test.go b/core/config/docs/docs_test.go new file mode 100644 index 00000000..d8016470 --- /dev/null +++ b/core/config/docs/docs_test.go @@ -0,0 +1,123 @@ +package docs_test + +import ( + "strings" + "testing" + + "github.com/kylelemons/godebug/diff" + gotoml "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/config/docs" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin/cfgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func TestDoc(t *testing.T) { + d := gotoml.NewDecoder(strings.NewReader(docs.DocsTOML)) + d.DisallowUnknownFields() // Ensure no extra fields + var c plugin.Config + err := d.Decode(&c) + var strict *gotoml.StrictMissingError + if err != nil && strings.Contains(err.Error(), "undecoded keys: ") { + t.Errorf("Docs contain extra fields: %v", err) + } else if errors.As(err, &strict) { + t.Fatal("StrictMissingError:", strict.String()) + } else { + require.NoError(t, err) + } + + cfgtest.AssertFieldsNotNil(t, c) + + var defaults plugin.Config + require.NoError(t, cfgtest.DocDefaultsOnly(strings.NewReader(docs.DocsTOML), &defaults, config.DecodeTOML)) + + t.Run("EVM", func(t *testing.T) { + fallbackDefaults := evmcfg.Defaults(nil) + docDefaults := defaults.EVM[0].Chain + + require.Equal(t, "", *docDefaults.ChainType) + docDefaults.ChainType = nil + + // clean up KeySpecific as a special case + require.Equal(t, 1, len(docDefaults.KeySpecific)) + ks := evmcfg.KeySpecific{Key: new(ethkey.EIP55Address), + GasEstimator: evmcfg.KeySpecificGasEstimator{PriceMax: new(assets.Wei)}} + require.Equal(t, ks, docDefaults.KeySpecific[0]) + docDefaults.KeySpecific = nil + + // EVM.GasEstimator.BumpTxDepth doesn't have a constant default - it is derived from another field + require.Zero(t, *docDefaults.GasEstimator.BumpTxDepth) + docDefaults.GasEstimator.BumpTxDepth = nil + + // per-job limits are nilable + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.OCR) + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.OCR2) + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.DR) + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.Keeper) + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.VRF) + require.Zero(t, *docDefaults.GasEstimator.LimitJobType.FM) + docDefaults.GasEstimator.LimitJobType = evmcfg.GasLimitJobType{} + + // EIP1559FeeCapBufferBlocks doesn't have a constant default - it is derived from another field + require.Zero(t, *docDefaults.GasEstimator.BlockHistory.EIP1559FeeCapBufferBlocks) + docDefaults.GasEstimator.BlockHistory.EIP1559FeeCapBufferBlocks = nil + + // addresses w/o global values + require.Zero(t, *docDefaults.FlagsContractAddress) + require.Zero(t, *docDefaults.LinkContractAddress) + require.Zero(t, *docDefaults.OperatorFactoryAddress) + docDefaults.FlagsContractAddress = nil + docDefaults.LinkContractAddress = nil + docDefaults.OperatorFactoryAddress = nil + require.Empty(t, docDefaults.ChainWriter.FromAddress) + require.Empty(t, docDefaults.ChainWriter.ForwarderAddress) + docDefaults.ChainWriter.FromAddress = nil + docDefaults.ChainWriter.ForwarderAddress = nil + + assertTOML(t, fallbackDefaults, docDefaults) + }) + + t.Run("Cosmos", func(t *testing.T) { + var fallbackDefaults coscfg.TOMLConfig + fallbackDefaults.SetDefaults() + + assertTOML(t, fallbackDefaults.Chain, defaults.Cosmos[0].Chain) + }) + + t.Run("Solana", func(t *testing.T) { + var fallbackDefaults solana.TOMLConfig + fallbackDefaults.SetDefaults() + + assertTOML(t, fallbackDefaults.Chain, defaults.Solana[0].Chain) + }) + + t.Run("Starknet", func(t *testing.T) { + var fallbackDefaults stkcfg.TOMLConfig + fallbackDefaults.SetDefaults() + + assertTOML(t, fallbackDefaults.Chain, defaults.Starknet[0].Chain) + }) +} + +func assertTOML[T any](t *testing.T, fallback, docs T) { + t.Helper() + t.Logf("fallback: %#v", fallback) + t.Logf("docs: %#v", docs) + fb, err := gotoml.Marshal(fallback) + require.NoError(t, err) + db, err := gotoml.Marshal(docs) + require.NoError(t, err) + fs, ds := string(fb), string(db) + assert.Equal(t, fs, ds, diff.Diff(fs, ds)) +} diff --git a/core/config/docs/example-config.toml b/core/config/docs/example-config.toml new file mode 100644 index 00000000..b90af810 --- /dev/null +++ b/core/config/docs/example-config.toml @@ -0,0 +1,9 @@ +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' # Required + +[[EVM.Nodes]] +Name = 'fake' # Required +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' # Required diff --git a/core/config/docs/example-secrets.toml b/core/config/docs/example-secrets.toml new file mode 100644 index 00000000..71b35bb7 --- /dev/null +++ b/core/config/docs/example-secrets.toml @@ -0,0 +1,5 @@ +[Database] +URL = 'postgresql://user:pass@localhost:5432/dbname?sslmode=disable' # Required + +[Password] +Keystore = 'keystore_pass' # Required diff --git a/core/config/docs/extended.go b/core/config/docs/extended.go new file mode 100644 index 00000000..8613f4e0 --- /dev/null +++ b/core/config/docs/extended.go @@ -0,0 +1,30 @@ +package docs + +import ( + "bytes" + "fmt" + "strings" + + "github.com/pelletier/go-toml/v2" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" +) + +// evmChainDefaults returns generated Markdown for the EVM per-chain defaults. See v2.Defaults. +func evmChainDefaults() (string, error) { + var sb strings.Builder + for _, id := range evmcfg.DefaultIDs { + config, name := evmcfg.DefaultsNamed(id) + fmt.Fprintf(&sb, "\n
%s (%s)

\n\n", name, id) + sb.WriteString("```toml\n") + b, err := toml.Marshal(config) + if err != nil { + return "", err + } + sb.Write(bytes.TrimSpace(b)) + sb.WriteString("\n```\n\n") + sb.WriteString("

\n") + } + + return sb.String(), nil +} diff --git a/core/config/docs/helpers_test.go b/core/config/docs/helpers_test.go new file mode 100644 index 00000000..96f96ee7 --- /dev/null +++ b/core/config/docs/helpers_test.go @@ -0,0 +1,3 @@ +package docs + +var DocsTOML = docsTOML diff --git a/core/config/docs/secrets.toml b/core/config/docs/secrets.toml new file mode 100644 index 00000000..fe35aa7c --- /dev/null +++ b/core/config/docs/secrets.toml @@ -0,0 +1,60 @@ +[Database] +# URL is the PostgreSQL URI to connect to your database. Plugin nodes require Postgres versions >= 11. See +# [Running a Plugin Node](https://docs.chain.link/docs/running-a-plugin-node/#set-the-remote-database_url-config) for an example. +# +# Environment variable: `CL_DATABASE_URL` +URL = "postgresql://user:pass@localhost:5432/dbname?sslmode=disable" # Example +# BackupURL is where the automatic database backup will pull from, rather than the main CL_DATABASE_URL. It is recommended +# to set this value to a read replica if you have one to avoid excessive load on the main database. +# +# Environment variable: `CL_DATABASE_BACKUP_URL` +BackupURL = "postgresql://user:pass@read-replica.example.com:5432/dbname?sslmode=disable" # Example +# AllowSimplePasswords skips the password complexity check normally enforced on URL & BackupURL. +# +# Environment variable: `CL_DATABASE_ALLOW_SIMPLE_PASSWORDS` +AllowSimplePasswords = false # Default + +# Optional LDAP config +[WebServer.LDAP] +# ServerAddress is the full ldaps:// address of the ldap server to authenticate with and query +ServerAddress = 'ldaps://127.0.0.1' # Example +# ReadOnlyUserLogin is the username of the read only root user used to authenticate the requested LDAP queries +ReadOnlyUserLogin = 'viewer@example.com' # Example +# ReadOnlyUserPass is the password for the above account +ReadOnlyUserPass = 'password' # Example + +[Password] +# Keystore is the password for the node's account. +# +# Environment variable: `CL_PASSWORD_KEYSTORE` +Keystore = "keystore_pass" # Example +# VRF is the password for the vrf keys. +# +# Environment variable: `CL_PASSWORD_VRF` +VRF = "VRF_pass" # Example + +[Pyroscope] +# AuthToken is the API key for the Pyroscope server. +# +# Environment variable: `CL_PYROSCOPE_AUTH_TOKEN` +AuthToken = "pyroscope-token" # Example + +[Prometheus] +# AuthToken is the authorization key for the Prometheus metrics endpoint. +# +# Environment variable: `CL_PROMETHEUS_AUTH_TOKEN` +AuthToken = "prometheus-token" # Example + +[Mercury.Credentials.Name] +# Username is used for basic auth of the Mercury endpoint +Username = "A-Mercury-Username" # Example +# Password is used for basic auth of the Mercury endpoint +Password = "A-Mercury-Password" # Example +# URL is the Mercury endpoint base URL used to access Mercury price feed +URL = "https://example.com" # Example +# LegacyURL is the Mercury legacy endpoint base URL used to access Mercury v0.2 price feed +LegacyURL = "https://example.v1.com" # Example + +[Threshold] +# ThresholdKeyShare used by the threshold decryption OCR plugin +ThresholdKeyShare = "A-Threshold-Decryption-Key-Share" # Example diff --git a/core/config/docs/testdata/example.md b/core/config/docs/testdata/example.md new file mode 100644 index 00000000..54dd613e --- /dev/null +++ b/core/config/docs/testdata/example.md @@ -0,0 +1,52 @@ +[//]: # (Generated - DO NOT EDIT.) + +## Example + +```toml +Bar = 7 # Required +``` + +## Global +```toml +FieldName = 'foo' # Default +``` + + +This example demonstrates some of the features: + +### FieldName +```toml +FieldName = 'foo' # Default +``` +FieldName is a string with a default value. Every field **must** be documented with a comment that begins with the field name. + +This is a loose comment. +Comments can span multiple lines. + +## TableName +```toml +[TableName] +Bar = 10 # Example +TrickyField = true # Default +``` +TableName holds settings that do something... +#### Details + +We can include a long description here: +1. some +2. list +3. items + +### Bar +```toml +Bar = 10 # Example +``` +Bar doesn't have a default value, so an example **must** be included. + +### TrickyField +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +TrickyField = true # Default +``` +TrickyField should only be used by advanced users, so it includes the special line `**ADVANCED**` to include a common warning tag. + diff --git a/core/config/docs/testdata/example.toml b/core/config/docs/testdata/example.toml new file mode 100644 index 00000000..d6bb40fc --- /dev/null +++ b/core/config/docs/testdata/example.toml @@ -0,0 +1,21 @@ +# This example demonstrates some of the features: + +# FieldName is a string with a default value. Every field **must** be documented with a comment that begins with the field name. +FieldName = 'foo' # Default + +# This is a loose comment. +# Comments can span multiple lines. + +# TableName holds settings that do something... +# #### Details +# +# We can include a long description here: +# 1. some +# 2. list +# 3. items +[TableName] +# Bar doesn't have a default value, so an example **must** be included. +Bar = 10 # Example +# **ADVANCED** +# TrickyField should only be used by advanced users, so it includes the special line `**ADVANCED**` to include a common warning tag. +TrickyField = true # Default \ No newline at end of file diff --git a/core/config/feature_config.go b/core/config/feature_config.go new file mode 100644 index 00000000..fbb3a4ea --- /dev/null +++ b/core/config/feature_config.go @@ -0,0 +1,7 @@ +package config + +type Feature interface { + FeedsManager() bool + UICSAKeys() bool + LogPoller() bool +} diff --git a/core/config/flux_monitor_config.go b/core/config/flux_monitor_config.go new file mode 100644 index 00000000..e0ac6e7c --- /dev/null +++ b/core/config/flux_monitor_config.go @@ -0,0 +1,6 @@ +package config + +type FluxMonitor interface { + DefaultTransactionQueueDepth() uint32 + SimulateTransactions() bool +} diff --git a/core/config/insecure_config.go b/core/config/insecure_config.go new file mode 100644 index 00000000..3912062b --- /dev/null +++ b/core/config/insecure_config.go @@ -0,0 +1,8 @@ +package config + +type Insecure interface { + DevWebServer() bool + OCRDevelopmentMode() bool + DisableRateLimiting() bool + InfiniteDepthQueries() bool +} diff --git a/core/config/job_pipeline_config.go b/core/config/job_pipeline_config.go new file mode 100644 index 00000000..15043cc8 --- /dev/null +++ b/core/config/job_pipeline_config.go @@ -0,0 +1,18 @@ +package config + +import ( + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" +) + +type JobPipeline interface { + DefaultHTTPLimit() int64 + DefaultHTTPTimeout() commonconfig.Duration + MaxRunDuration() time.Duration + MaxSuccessfulRuns() uint64 + ReaperInterval() time.Duration + ReaperThreshold() time.Duration + ResultWriteQueueDepth() uint64 + ExternalInitiatorsEnabled() bool +} diff --git a/core/config/keeper_config.go b/core/config/keeper_config.go new file mode 100644 index 00000000..565e70dc --- /dev/null +++ b/core/config/keeper_config.go @@ -0,0 +1,21 @@ +package config + +import "time" + +type Registry interface { + CheckGasOverhead() uint32 + PerformGasOverhead() uint32 + MaxPerformDataSize() uint32 + SyncInterval() time.Duration + SyncUpkeepQueueSize() uint32 +} + +type Keeper interface { + DefaultTransactionQueueDepth() uint32 + GasPriceBufferPercent() uint16 + GasTipCapBufferPercent() uint16 + BaseFeeBufferPercent() uint16 + MaxGracePeriod() int64 + TurnLookBack() int64 + Registry() Registry +} diff --git a/core/config/logging_config.go b/core/config/logging_config.go new file mode 100644 index 00000000..429de514 --- /dev/null +++ b/core/config/logging_config.go @@ -0,0 +1,23 @@ +package config + +import ( + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type File interface { + Dir() string + MaxSize() utils.FileSize + MaxAgeDays() int64 + MaxBackups() int64 +} + +type Log interface { + DefaultLevel() zapcore.Level + JSONConsole() bool + Level() zapcore.Level + UnixTimestamps() bool + + File() File +} diff --git a/core/config/mercury_config.go b/core/config/mercury_config.go new file mode 100644 index 00000000..573cc840 --- /dev/null +++ b/core/config/mercury_config.go @@ -0,0 +1,23 @@ +package config + +import ( + "time" + + "github.com/goplugin/plugin-common/pkg/types" +) + +type MercuryCache interface { + LatestReportTTL() time.Duration + MaxStaleAge() time.Duration + LatestReportDeadline() time.Duration +} + +type MercuryTLS interface { + CertFile() string +} + +type Mercury interface { + Credentials(credName string) *types.MercuryCredentials + Cache() MercuryCache + TLS() MercuryTLS +} diff --git a/core/config/mocks/telemetry_ingress.go b/core/config/mocks/telemetry_ingress.go new file mode 100644 index 00000000..91962016 --- /dev/null +++ b/core/config/mocks/telemetry_ingress.go @@ -0,0 +1,175 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + config "github.com/goplugin/pluginv3.0/v2/core/config" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// TelemetryIngress is an autogenerated mock type for the TelemetryIngress type +type TelemetryIngress struct { + mock.Mock +} + +// BufferSize provides a mock function with given fields: +func (_m *TelemetryIngress) BufferSize() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BufferSize") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// Endpoints provides a mock function with given fields: +func (_m *TelemetryIngress) Endpoints() []config.TelemetryIngressEndpoint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Endpoints") + } + + var r0 []config.TelemetryIngressEndpoint + if rf, ok := ret.Get(0).(func() []config.TelemetryIngressEndpoint); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]config.TelemetryIngressEndpoint) + } + } + + return r0 +} + +// Logging provides a mock function with given fields: +func (_m *TelemetryIngress) Logging() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Logging") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// MaxBatchSize provides a mock function with given fields: +func (_m *TelemetryIngress) MaxBatchSize() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MaxBatchSize") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// SendInterval provides a mock function with given fields: +func (_m *TelemetryIngress) SendInterval() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SendInterval") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SendTimeout provides a mock function with given fields: +func (_m *TelemetryIngress) SendTimeout() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SendTimeout") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// UniConn provides a mock function with given fields: +func (_m *TelemetryIngress) UniConn() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for UniConn") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// UseBatchSend provides a mock function with given fields: +func (_m *TelemetryIngress) UseBatchSend() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for UseBatchSend") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewTelemetryIngress creates a new instance of TelemetryIngress. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTelemetryIngress(t interface { + mock.TestingT + Cleanup(func()) +}) *TelemetryIngress { + mock := &TelemetryIngress{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/config/mocks/telemetry_ingress_endpoint.go b/core/config/mocks/telemetry_ingress_endpoint.go new file mode 100644 index 00000000..08432cfe --- /dev/null +++ b/core/config/mocks/telemetry_ingress_endpoint.go @@ -0,0 +1,102 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + url "net/url" + + mock "github.com/stretchr/testify/mock" +) + +// TelemetryIngressEndpoint is an autogenerated mock type for the TelemetryIngressEndpoint type +type TelemetryIngressEndpoint struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: +func (_m *TelemetryIngressEndpoint) ChainID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Network provides a mock function with given fields: +func (_m *TelemetryIngressEndpoint) Network() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Network") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// ServerPubKey provides a mock function with given fields: +func (_m *TelemetryIngressEndpoint) ServerPubKey() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ServerPubKey") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// URL provides a mock function with given fields: +func (_m *TelemetryIngressEndpoint) URL() *url.URL { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for URL") + } + + var r0 *url.URL + if rf, ok := ret.Get(0).(func() *url.URL); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*url.URL) + } + } + + return r0 +} + +// NewTelemetryIngressEndpoint creates a new instance of TelemetryIngressEndpoint. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTelemetryIngressEndpoint(t interface { + mock.TestingT + Cleanup(func()) +}) *TelemetryIngressEndpoint { + mock := &TelemetryIngressEndpoint{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/config/ocr2_config.go b/core/config/ocr2_config.go new file mode 100644 index 00000000..a2ea7974 --- /dev/null +++ b/core/config/ocr2_config.go @@ -0,0 +1,24 @@ +package config + +import ( + "time" +) + +// OCR2 is a subset of global config relevant to OCR v2. +type OCR2 interface { + Enabled() bool + // OCR2 config, can override in jobs, all chains + ContractConfirmations() uint16 + ContractTransmitterTransmitTimeout() time.Duration + BlockchainTimeout() time.Duration + DatabaseTimeout() time.Duration + ContractPollInterval() time.Duration + ContractSubscribeInterval() time.Duration + KeyBundleID() (string, error) + // OCR2 config, cannot override in jobs + TraceLogging() bool + CaptureEATelemetry() bool + DefaultTransactionQueueDepth() uint32 + SimulateTransactions() bool + CaptureAutomationCustomTelemetry() bool +} diff --git a/core/config/ocr_config.go b/core/config/ocr_config.go new file mode 100644 index 00000000..bea4f072 --- /dev/null +++ b/core/config/ocr_config.go @@ -0,0 +1,24 @@ +package config + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +// OCR is a subset of global config relevant to OCR v1. +type OCR interface { + Enabled() bool + // OCR1 config, can override in jobs, only ethereum. + BlockchainTimeout() time.Duration + ContractPollInterval() time.Duration + ContractSubscribeInterval() time.Duration + KeyBundleID() (string, error) + ObservationTimeout() time.Duration + SimulateTransactions() bool + TransmitterAddress() (ethkey.EIP55Address, error) // OCR2 can support non-evm changes + // OCR1 config, cannot override in jobs + TraceLogging() bool + DefaultTransactionQueueDepth() uint32 + CaptureEATelemetry() bool +} diff --git a/core/config/p2p_config.go b/core/config/p2p_config.go new file mode 100644 index 00000000..84f438ad --- /dev/null +++ b/core/config/p2p_config.go @@ -0,0 +1,14 @@ +package config + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +type P2P interface { + V2() V2 + PeerID() p2pkey.PeerID + IncomingMessageBufferSize() int + OutgoingMessageBufferSize() int + TraceLogging() bool + Enabled() bool +} diff --git a/core/config/p2p_v2_config.go b/core/config/p2p_v2_config.go new file mode 100644 index 00000000..53ead49b --- /dev/null +++ b/core/config/p2p_v2_config.go @@ -0,0 +1,16 @@ +package config + +import ( + commonconfig "github.com/goplugin/plugin-common/pkg/config" + + ocrcommontypes "github.com/goplugin/libocr/commontypes" +) + +type V2 interface { + Enabled() bool + AnnounceAddresses() []string + DefaultBootstrappers() (locators []ocrcommontypes.BootstrapperLocator) + DeltaDial() commonconfig.Duration + DeltaReconcile() commonconfig.Duration + ListenAddresses() []string +} diff --git a/core/config/parse/parsers.go b/core/config/parse/parsers.go new file mode 100644 index 00000000..ded44c1b --- /dev/null +++ b/core/config/parse/parsers.go @@ -0,0 +1,123 @@ +package parse + +import ( + "fmt" + "math/big" + "net" + "net/url" + "path/filepath" + "strconv" + "time" + + "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func String(str string) (string, error) { + return str, nil +} + +func Link(str string) (*commonassets.Link, error) { + i, ok := new(commonassets.Link).SetString(str, 10) + if !ok { + return i, fmt.Errorf("unable to parse '%s'", str) + } + return i, nil +} + +func LogLevel(str string) (zapcore.Level, error) { + var lvl zapcore.Level + err := lvl.Set(str) + return lvl, err +} + +func Uint16(s string) (uint16, error) { + v, err := strconv.ParseUint(s, 10, 16) + return uint16(v), err +} + +func Uint32(s string) (uint32, error) { + v, err := strconv.ParseUint(s, 10, 32) + return uint32(v), err +} + +func Uint64(s string) (uint64, error) { + v, err := strconv.ParseUint(s, 10, 64) + return v, err +} + +func Int64(s string) (int64, error) { + v, err := strconv.ParseInt(s, 10, 64) + return v, err +} + +func F32(s string) (float32, error) { + v, err := strconv.ParseFloat(s, 32) + return float32(v), err +} + +// URL converts string to parsed URL type +func URL(s string) (interface{}, error) { + return url.Parse(s) +} + +// IP converts string to parsed IP type +func IP(s string) (interface{}, error) { + return net.ParseIP(s), nil +} + +// Duration converts string to parsed Duratin type +func Duration(s string) (interface{}, error) { + return time.ParseDuration(s) +} + +func FileSize(s string) (utils.FileSize, error) { + var fs utils.FileSize + err := fs.UnmarshalText([]byte(s)) + return fs, err +} + +// Bool parses string as a bool type +func Bool(s string) (interface{}, error) { + return strconv.ParseBool(s) +} + +func BigInt(str string) (*big.Int, error) { + i, ok := new(big.Int).SetString(str, 10) + if !ok { + return i, fmt.Errorf("unable to parse %v into *big.Int(base 10)", str) + } + return i, nil +} + +func Wei(str string) (w *assets.Wei, err error) { + w = new(assets.Wei) + err = w.UnmarshalText([]byte(str)) + return w, err +} + +func HomeDir(str string) (string, error) { + exp, err := homedir.Expand(str) + if err != nil { + return "", err + } + return filepath.ToSlash(exp), nil +} + +func DatabaseURL(s string) (url.URL, error) { + uri, err := url.Parse(s) + if err != nil { + return url.URL{}, errors.Wrapf(err, "invalid database url %s", s) + } + if uri.String() == "" { + return *uri, nil + } + static.SetConsumerName(uri, "Default", nil) + return *uri, nil +} diff --git a/core/config/password_config.go b/core/config/password_config.go new file mode 100644 index 00000000..f89ba1d3 --- /dev/null +++ b/core/config/password_config.go @@ -0,0 +1,6 @@ +package config + +type Password interface { + Keystore() string + VRF() string +} diff --git a/core/config/prometheus.go b/core/config/prometheus.go new file mode 100644 index 00000000..848627ac --- /dev/null +++ b/core/config/prometheus.go @@ -0,0 +1,5 @@ +package config + +type Prometheus interface { + AuthToken() string +} diff --git a/core/config/pyroscope_config.go b/core/config/pyroscope_config.go new file mode 100644 index 00000000..e20fa3c0 --- /dev/null +++ b/core/config/pyroscope_config.go @@ -0,0 +1,7 @@ +package config + +type Pyroscope interface { + AuthToken() string + ServerAddress() string + Environment() string +} diff --git a/core/config/sentry_config.go b/core/config/sentry_config.go new file mode 100644 index 00000000..494fb188 --- /dev/null +++ b/core/config/sentry_config.go @@ -0,0 +1,8 @@ +package config + +type Sentry interface { + DSN() string + Debug() bool + Environment() string + Release() string +} diff --git a/core/config/telemetry_ingress_config.go b/core/config/telemetry_ingress_config.go new file mode 100644 index 00000000..a4923391 --- /dev/null +++ b/core/config/telemetry_ingress_config.go @@ -0,0 +1,27 @@ +package config + +import ( + "net/url" + "time" +) + +//go:generate mockery --quiet --name TelemetryIngress --output ./mocks/ --case=underscore --filename telemetry_ingress.go + +type TelemetryIngress interface { + Logging() bool + UniConn() bool + BufferSize() uint + MaxBatchSize() uint + SendInterval() time.Duration + SendTimeout() time.Duration + UseBatchSend() bool + Endpoints() []TelemetryIngressEndpoint +} + +//go:generate mockery --quiet --name TelemetryIngressEndpoint --output ./mocks/ --case=underscore --filename telemetry_ingress_endpoint.go +type TelemetryIngressEndpoint interface { + Network() string + ChainID() string + ServerPubKey() string + URL() *url.URL +} diff --git a/core/config/threshold_config.go b/core/config/threshold_config.go new file mode 100644 index 00000000..0ce11e26 --- /dev/null +++ b/core/config/threshold_config.go @@ -0,0 +1,5 @@ +package config + +type Threshold interface { + ThresholdKeyShare() string +} diff --git a/core/config/toml/types.go b/core/config/toml/types.go new file mode 100644 index 00000000..98e83aea --- /dev/null +++ b/core/config/toml/types.go @@ -0,0 +1,1534 @@ +package toml + +import ( + "errors" + "fmt" + "net" + "net/url" + "regexp" + "strings" + + "github.com/google/uuid" + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" + + ocrcommontypes "github.com/goplugin/libocr/commontypes" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/parse" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + configutils "github.com/goplugin/pluginv3.0/v2/core/utils/config" +) + +var ErrUnsupported = errors.New("unsupported with config v2") + +// Core holds the core configuration. See plugin.Config for more information. +type Core struct { + // General/misc + AppID uuid.UUID `toml:"-"` // random or test + InsecureFastScrypt *bool + RootDir *string + ShutdownGracePeriod *commonconfig.Duration + + Feature Feature `toml:",omitempty"` + Database Database `toml:",omitempty"` + TelemetryIngress TelemetryIngress `toml:",omitempty"` + AuditLogger AuditLogger `toml:",omitempty"` + Log Log `toml:",omitempty"` + WebServer WebServer `toml:",omitempty"` + JobPipeline JobPipeline `toml:",omitempty"` + FluxMonitor FluxMonitor `toml:",omitempty"` + OCR2 OCR2 `toml:",omitempty"` + OCR OCR `toml:",omitempty"` + P2P P2P `toml:",omitempty"` + Keeper Keeper `toml:",omitempty"` + AutoPprof AutoPprof `toml:",omitempty"` + Pyroscope Pyroscope `toml:",omitempty"` + Sentry Sentry `toml:",omitempty"` + Insecure Insecure `toml:",omitempty"` + Tracing Tracing `toml:",omitempty"` + Mercury Mercury `toml:",omitempty"` +} + +// SetFrom updates c with any non-nil values from f. (currently TOML field only!) +func (c *Core) SetFrom(f *Core) { + if v := f.InsecureFastScrypt; v != nil { + c.InsecureFastScrypt = v + } + if v := f.RootDir; v != nil { + c.RootDir = v + } + if v := f.ShutdownGracePeriod; v != nil { + c.ShutdownGracePeriod = v + } + + c.Feature.setFrom(&f.Feature) + c.Database.setFrom(&f.Database) + c.TelemetryIngress.setFrom(&f.TelemetryIngress) + c.AuditLogger.SetFrom(&f.AuditLogger) + c.Log.setFrom(&f.Log) + + c.WebServer.setFrom(&f.WebServer) + c.JobPipeline.setFrom(&f.JobPipeline) + + c.FluxMonitor.setFrom(&f.FluxMonitor) + c.OCR2.setFrom(&f.OCR2) + c.OCR.setFrom(&f.OCR) + c.P2P.setFrom(&f.P2P) + c.Keeper.setFrom(&f.Keeper) + c.Mercury.setFrom(&f.Mercury) + + c.AutoPprof.setFrom(&f.AutoPprof) + c.Pyroscope.setFrom(&f.Pyroscope) + c.Sentry.setFrom(&f.Sentry) + c.Insecure.setFrom(&f.Insecure) + c.Tracing.setFrom(&f.Tracing) +} + +func (c *Core) ValidateConfig() (err error) { + _, verr := parse.HomeDir(*c.RootDir) + if err != nil { + err = multierr.Append(err, configutils.ErrInvalid{Name: "RootDir", Value: true, Msg: fmt.Sprintf("Failed to expand RootDir. Please use an explicit path: %s", verr)}) + } + + return err +} + +type Secrets struct { + Database DatabaseSecrets `toml:",omitempty"` + Password Passwords `toml:",omitempty"` + WebServer WebServerSecrets `toml:",omitempty"` + Pyroscope PyroscopeSecrets `toml:",omitempty"` + Prometheus PrometheusSecrets `toml:",omitempty"` + Mercury MercurySecrets `toml:",omitempty"` + Threshold ThresholdKeyShareSecrets `toml:",omitempty"` +} + +func dbURLPasswordComplexity(err error) string { + return fmt.Sprintf("missing or insufficiently complex password: %s. Database should be secured by a password matching the following complexity requirements: "+utils.PasswordComplexityRequirements, err) +} + +type DatabaseSecrets struct { + URL *models.SecretURL + BackupURL *models.SecretURL + AllowSimplePasswords *bool +} + +func validateDBURL(dbURI url.URL) error { + if strings.Contains(dbURI.Redacted(), "_test") { + return nil + } + + // url params take priority if present, multiple params are ignored by postgres (it picks the first) + q := dbURI.Query() + // careful, this is a raw database password + pw := q.Get("password") + if pw == "" { + // fallback to user info + userInfo := dbURI.User + if userInfo == nil { + return fmt.Errorf("DB URL must be authenticated; plaintext URLs are not allowed") + } + var pwSet bool + pw, pwSet = userInfo.Password() + if !pwSet { + return fmt.Errorf("DB URL must be authenticated; password is required") + } + } + + return utils.VerifyPasswordComplexity(pw) +} + +func (d *DatabaseSecrets) ValidateConfig() (err error) { + return d.validateConfig(build.Mode()) +} + +func (d *DatabaseSecrets) validateConfig(buildMode string) (err error) { + if d.URL == nil || (*url.URL)(d.URL).String() == "" { + err = multierr.Append(err, configutils.ErrEmpty{Name: "URL", Msg: "must be provided and non-empty"}) + } else if *d.AllowSimplePasswords && buildMode == build.Prod { + err = multierr.Append(err, configutils.ErrInvalid{Name: "AllowSimplePasswords", Value: true, Msg: "insecure configs are not allowed on secure builds"}) + } else if !*d.AllowSimplePasswords { + if verr := validateDBURL((url.URL)(*d.URL)); verr != nil { + err = multierr.Append(err, configutils.ErrInvalid{Name: "URL", Value: "*****", Msg: dbURLPasswordComplexity(verr)}) + } + } + if d.BackupURL != nil && !*d.AllowSimplePasswords { + if verr := validateDBURL((url.URL)(*d.BackupURL)); verr != nil { + err = multierr.Append(err, configutils.ErrInvalid{Name: "BackupURL", Value: "*****", Msg: dbURLPasswordComplexity(verr)}) + } + } + return err +} + +func (d *DatabaseSecrets) SetFrom(f *DatabaseSecrets) (err error) { + err = d.validateMerge(f) + if err != nil { + return err + } + + if v := f.AllowSimplePasswords; v != nil { + d.AllowSimplePasswords = v + } + if v := f.BackupURL; v != nil { + d.BackupURL = v + } + if v := f.URL; v != nil { + d.URL = v + } + return nil +} + +func (d *DatabaseSecrets) validateMerge(f *DatabaseSecrets) (err error) { + if d.AllowSimplePasswords != nil && f.AllowSimplePasswords != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "AllowSimplePasswords"}) + } + + if d.BackupURL != nil && f.BackupURL != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "BackupURL"}) + } + + if d.URL != nil && f.URL != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "URL"}) + } + + return err +} + +type Passwords struct { + Keystore *models.Secret + VRF *models.Secret +} + +func (p *Passwords) SetFrom(f *Passwords) (err error) { + err = p.validateMerge(f) + if err != nil { + return err + } + + if v := f.Keystore; v != nil { + p.Keystore = v + } + if v := f.VRF; v != nil { + p.VRF = v + } + + return nil +} + +func (p *Passwords) validateMerge(f *Passwords) (err error) { + if p.Keystore != nil && f.Keystore != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "Keystore"}) + } + + if p.VRF != nil && f.VRF != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "VRF"}) + } + + return err +} + +func (p *Passwords) ValidateConfig() (err error) { + if p.Keystore == nil || *p.Keystore == "" { + err = multierr.Append(err, configutils.ErrEmpty{Name: "Keystore", Msg: "must be provided and non-empty"}) + } + return err +} + +type PyroscopeSecrets struct { + AuthToken *models.Secret +} + +func (p *PyroscopeSecrets) SetFrom(f *PyroscopeSecrets) (err error) { + err = p.validateMerge(f) + if err != nil { + return err + } + + if v := f.AuthToken; v != nil { + p.AuthToken = v + } + + return nil +} + +func (p *PyroscopeSecrets) validateMerge(f *PyroscopeSecrets) (err error) { + if p.AuthToken != nil && f.AuthToken != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "AuthToken"}) + } + + return err +} + +type PrometheusSecrets struct { + AuthToken *models.Secret +} + +func (p *PrometheusSecrets) SetFrom(f *PrometheusSecrets) (err error) { + err = p.validateMerge(f) + if err != nil { + return err + } + + if v := f.AuthToken; v != nil { + p.AuthToken = v + } + + return nil +} + +func (p *PrometheusSecrets) validateMerge(f *PrometheusSecrets) (err error) { + if p.AuthToken != nil && f.AuthToken != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "AuthToken"}) + } + + return err +} + +type Feature struct { + FeedsManager *bool + LogPoller *bool + UICSAKeys *bool +} + +func (f *Feature) setFrom(f2 *Feature) { + if v := f2.FeedsManager; v != nil { + f.FeedsManager = v + } + if v := f2.LogPoller; v != nil { + f.LogPoller = v + } + if v := f2.UICSAKeys; v != nil { + f.UICSAKeys = v + } +} + +type Database struct { + DefaultIdleInTxSessionTimeout *commonconfig.Duration + DefaultLockTimeout *commonconfig.Duration + DefaultQueryTimeout *commonconfig.Duration + Dialect dialects.DialectName `toml:"-"` + LogQueries *bool + MaxIdleConns *int64 + MaxOpenConns *int64 + MigrateOnStartup *bool + + Backup DatabaseBackup `toml:",omitempty"` + Listener DatabaseListener `toml:",omitempty"` + Lock DatabaseLock `toml:",omitempty"` +} + +func (d *Database) setFrom(f *Database) { + if v := f.DefaultIdleInTxSessionTimeout; v != nil { + d.DefaultIdleInTxSessionTimeout = v + } + if v := f.DefaultLockTimeout; v != nil { + d.DefaultLockTimeout = v + } + if v := f.DefaultQueryTimeout; v != nil { + d.DefaultQueryTimeout = v + } + if v := f.LogQueries; v != nil { + d.LogQueries = v + } + if v := f.MigrateOnStartup; v != nil { + d.MigrateOnStartup = v + } + if v := f.MaxIdleConns; v != nil { + d.MaxIdleConns = v + } + if v := f.MaxOpenConns; v != nil { + d.MaxOpenConns = v + } + + d.Backup.setFrom(&f.Backup) + d.Listener.setFrom(&f.Listener) + d.Lock.setFrom(&f.Lock) +} + +type DatabaseListener struct { + MaxReconnectDuration *commonconfig.Duration + MinReconnectInterval *commonconfig.Duration + FallbackPollInterval *commonconfig.Duration +} + +func (d *DatabaseListener) setFrom(f *DatabaseListener) { + if v := f.MaxReconnectDuration; v != nil { + d.MaxReconnectDuration = v + } + if v := f.MinReconnectInterval; v != nil { + d.MinReconnectInterval = v + } + if v := f.FallbackPollInterval; v != nil { + d.FallbackPollInterval = v + } +} + +type DatabaseLock struct { + Enabled *bool + LeaseDuration *commonconfig.Duration + LeaseRefreshInterval *commonconfig.Duration +} + +func (l *DatabaseLock) Mode() string { + if *l.Enabled { + return "lease" + } + return "none" +} + +func (l *DatabaseLock) ValidateConfig() (err error) { + if l.LeaseRefreshInterval.Duration() > l.LeaseDuration.Duration()/2 { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LeaseRefreshInterval", Value: l.LeaseRefreshInterval.String(), + Msg: fmt.Sprintf("must be less than or equal to half of LeaseDuration (%s)", l.LeaseDuration.String())}) + } + return +} + +func (l *DatabaseLock) setFrom(f *DatabaseLock) { + if v := f.Enabled; v != nil { + l.Enabled = v + } + if v := f.LeaseDuration; v != nil { + l.LeaseDuration = v + } + if v := f.LeaseRefreshInterval; v != nil { + l.LeaseRefreshInterval = v + } +} + +// DatabaseBackup +// +// Note: url is stored in Secrets.DatabaseBackupURL +type DatabaseBackup struct { + Dir *string + Frequency *commonconfig.Duration + Mode *config.DatabaseBackupMode + OnVersionUpgrade *bool +} + +func (d *DatabaseBackup) setFrom(f *DatabaseBackup) { + if v := f.Dir; v != nil { + d.Dir = v + } + if v := f.Frequency; v != nil { + d.Frequency = v + } + if v := f.Mode; v != nil { + d.Mode = v + } + if v := f.OnVersionUpgrade; v != nil { + d.OnVersionUpgrade = v + } +} + +type TelemetryIngress struct { + UniConn *bool + Logging *bool + BufferSize *uint16 + MaxBatchSize *uint16 + SendInterval *commonconfig.Duration + SendTimeout *commonconfig.Duration + UseBatchSend *bool + Endpoints []TelemetryIngressEndpoint `toml:",omitempty"` +} + +type TelemetryIngressEndpoint struct { + Network *string + ChainID *string + URL *commonconfig.URL + ServerPubKey *string +} + +func (t *TelemetryIngress) setFrom(f *TelemetryIngress) { + if v := f.UniConn; v != nil { + t.UniConn = v + } + if v := f.Logging; v != nil { + t.Logging = v + } + if v := f.BufferSize; v != nil { + t.BufferSize = v + } + if v := f.MaxBatchSize; v != nil { + t.MaxBatchSize = v + } + if v := f.SendInterval; v != nil { + t.SendInterval = v + } + if v := f.SendTimeout; v != nil { + t.SendTimeout = v + } + if v := f.UseBatchSend; v != nil { + t.UseBatchSend = v + } + if v := f.Endpoints; v != nil { + t.Endpoints = v + } +} + +type AuditLogger struct { + Enabled *bool + ForwardToUrl *commonconfig.URL + JsonWrapperKey *string + Headers *[]models.ServiceHeader +} + +func (p *AuditLogger) SetFrom(f *AuditLogger) { + if v := f.Enabled; v != nil { + p.Enabled = v + } + if v := f.ForwardToUrl; v != nil { + p.ForwardToUrl = v + } + if v := f.JsonWrapperKey; v != nil { + p.JsonWrapperKey = v + } + if v := f.Headers; v != nil { + p.Headers = v + } + +} + +// LogLevel replaces dpanic with crit/CRIT +type LogLevel zapcore.Level + +func (l LogLevel) String() string { + zl := zapcore.Level(l) + if zl == zapcore.DPanicLevel { + return "crit" + } + return zl.String() +} + +func (l LogLevel) CapitalString() string { + zl := zapcore.Level(l) + if zl == zapcore.DPanicLevel { + return "CRIT" + } + return zl.CapitalString() +} + +func (l LogLevel) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +func (l *LogLevel) UnmarshalText(text []byte) error { + switch string(text) { + case "crit", "CRIT": + *l = LogLevel(zapcore.DPanicLevel) + return nil + } + return (*zapcore.Level)(l).UnmarshalText(text) +} + +type Log struct { + Level *LogLevel + JSONConsole *bool + UnixTS *bool + + File LogFile `toml:",omitempty"` +} + +func (l *Log) setFrom(f *Log) { + if v := f.Level; v != nil { + l.Level = v + } + if v := f.JSONConsole; v != nil { + l.JSONConsole = v + } + if v := f.UnixTS; v != nil { + l.UnixTS = v + } + l.File.setFrom(&f.File) +} + +type LogFile struct { + Dir *string + MaxSize *utils.FileSize + MaxAgeDays *int64 + MaxBackups *int64 +} + +func (l *LogFile) setFrom(f *LogFile) { + if v := f.Dir; v != nil { + l.Dir = v + } + if v := f.MaxSize; v != nil { + l.MaxSize = v + } + if v := f.MaxAgeDays; v != nil { + l.MaxAgeDays = v + } + if v := f.MaxBackups; v != nil { + l.MaxBackups = v + } +} + +type WebServer struct { + AuthenticationMethod *string + AllowOrigins *string + BridgeResponseURL *commonconfig.URL + BridgeCacheTTL *commonconfig.Duration + HTTPWriteTimeout *commonconfig.Duration + HTTPPort *uint16 + SecureCookies *bool + SessionTimeout *commonconfig.Duration + SessionReaperExpiration *commonconfig.Duration + HTTPMaxSize *utils.FileSize + StartTimeout *commonconfig.Duration + ListenIP *net.IP + + LDAP WebServerLDAP `toml:",omitempty"` + MFA WebServerMFA `toml:",omitempty"` + RateLimit WebServerRateLimit `toml:",omitempty"` + TLS WebServerTLS `toml:",omitempty"` +} + +func (w *WebServer) setFrom(f *WebServer) { + if v := f.AuthenticationMethod; v != nil { + w.AuthenticationMethod = v + } + if v := f.AllowOrigins; v != nil { + w.AllowOrigins = v + } + if v := f.BridgeResponseURL; v != nil { + w.BridgeResponseURL = v + } + if v := f.BridgeCacheTTL; v != nil { + w.BridgeCacheTTL = v + } + if v := f.HTTPWriteTimeout; v != nil { + w.HTTPWriteTimeout = v + } + if v := f.ListenIP; v != nil { + w.ListenIP = v + } + if v := f.HTTPPort; v != nil { + w.HTTPPort = v + } + if v := f.SecureCookies; v != nil { + w.SecureCookies = v + } + if v := f.SessionTimeout; v != nil { + w.SessionTimeout = v + } + if v := f.SessionReaperExpiration; v != nil { + w.SessionReaperExpiration = v + } + if v := f.StartTimeout; v != nil { + w.StartTimeout = v + } + if v := f.HTTPMaxSize; v != nil { + w.HTTPMaxSize = v + } + + w.LDAP.setFrom(&f.LDAP) + w.MFA.setFrom(&f.MFA) + w.RateLimit.setFrom(&f.RateLimit) + w.TLS.setFrom(&f.TLS) +} + +func (w *WebServer) ValidateConfig() (err error) { + // Validate LDAP fields when authentication method is LDAPAuth + if *w.AuthenticationMethod != string(sessions.LDAPAuth) { + return + } + + // Assert LDAP fields when AuthMethod set to LDAP + if *w.LDAP.BaseDN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.BaseDN", Msg: "LDAP BaseDN can not be empty"}) + } + if *w.LDAP.BaseUserAttr == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.BaseUserAttr", Msg: "LDAP BaseUserAttr can not be empty"}) + } + if *w.LDAP.UsersDN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.UsersDN", Msg: "LDAP UsersDN can not be empty"}) + } + if *w.LDAP.GroupsDN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.GroupsDN", Msg: "LDAP GroupsDN can not be empty"}) + } + if *w.LDAP.AdminUserGroupCN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.AdminUserGroupCN", Msg: "LDAP AdminUserGroupCN can not be empty"}) + } + if *w.LDAP.EditUserGroupCN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.RunUserGroupCN", Msg: "LDAP ReadUserGroupCN can not be empty"}) + } + if *w.LDAP.RunUserGroupCN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.RunUserGroupCN", Msg: "LDAP RunUserGroupCN can not be empty"}) + } + if *w.LDAP.ReadUserGroupCN == "" { + err = multierr.Append(err, configutils.ErrInvalid{Name: "LDAP.ReadUserGroupCN", Msg: "LDAP ReadUserGroupCN can not be empty"}) + } + return err +} + +type WebServerMFA struct { + RPID *string + RPOrigin *string +} + +func (w *WebServerMFA) setFrom(f *WebServerMFA) { + if v := f.RPID; v != nil { + w.RPID = v + } + if v := f.RPOrigin; v != nil { + w.RPOrigin = v + } +} + +type WebServerRateLimit struct { + Authenticated *int64 + AuthenticatedPeriod *commonconfig.Duration + Unauthenticated *int64 + UnauthenticatedPeriod *commonconfig.Duration +} + +func (w *WebServerRateLimit) setFrom(f *WebServerRateLimit) { + if v := f.Authenticated; v != nil { + w.Authenticated = v + } + if v := f.AuthenticatedPeriod; v != nil { + w.AuthenticatedPeriod = v + } + if v := f.Unauthenticated; v != nil { + w.Unauthenticated = v + } + if v := f.UnauthenticatedPeriod; v != nil { + w.UnauthenticatedPeriod = v + } +} + +type WebServerTLS struct { + CertPath *string + ForceRedirect *bool + Host *string + HTTPSPort *uint16 + KeyPath *string + ListenIP *net.IP +} + +func (w *WebServerTLS) setFrom(f *WebServerTLS) { + if v := f.CertPath; v != nil { + w.CertPath = v + } + if v := f.ForceRedirect; v != nil { + w.ForceRedirect = v + } + if v := f.Host; v != nil { + w.Host = v + } + if v := f.HTTPSPort; v != nil { + w.HTTPSPort = v + } + if v := f.KeyPath; v != nil { + w.KeyPath = v + } + if v := f.ListenIP; v != nil { + w.ListenIP = v + } +} + +type WebServerLDAP struct { + ServerTLS *bool + SessionTimeout *commonconfig.Duration + QueryTimeout *commonconfig.Duration + BaseUserAttr *string + BaseDN *string + UsersDN *string + GroupsDN *string + ActiveAttribute *string + ActiveAttributeAllowedValue *string + AdminUserGroupCN *string + EditUserGroupCN *string + RunUserGroupCN *string + ReadUserGroupCN *string + UserApiTokenEnabled *bool + UserAPITokenDuration *commonconfig.Duration + UpstreamSyncInterval *commonconfig.Duration + UpstreamSyncRateLimit *commonconfig.Duration +} + +func (w *WebServerLDAP) setFrom(f *WebServerLDAP) { + if v := f.ServerTLS; v != nil { + w.ServerTLS = v + } + if v := f.SessionTimeout; v != nil { + w.SessionTimeout = v + } + if v := f.SessionTimeout; v != nil { + w.SessionTimeout = v + } + if v := f.QueryTimeout; v != nil { + w.QueryTimeout = v + } + if v := f.BaseUserAttr; v != nil { + w.BaseUserAttr = v + } + if v := f.BaseDN; v != nil { + w.BaseDN = v + } + if v := f.UsersDN; v != nil { + w.UsersDN = v + } + if v := f.GroupsDN; v != nil { + w.GroupsDN = v + } + if v := f.ActiveAttribute; v != nil { + w.ActiveAttribute = v + } + if v := f.ActiveAttributeAllowedValue; v != nil { + w.ActiveAttributeAllowedValue = v + } + if v := f.AdminUserGroupCN; v != nil { + w.AdminUserGroupCN = v + } + if v := f.EditUserGroupCN; v != nil { + w.EditUserGroupCN = v + } + if v := f.RunUserGroupCN; v != nil { + w.RunUserGroupCN = v + } + if v := f.ReadUserGroupCN; v != nil { + w.ReadUserGroupCN = v + } + if v := f.UserApiTokenEnabled; v != nil { + w.UserApiTokenEnabled = v + } + if v := f.UserAPITokenDuration; v != nil { + w.UserAPITokenDuration = v + } + if v := f.UpstreamSyncInterval; v != nil { + w.UpstreamSyncInterval = v + } + if v := f.UpstreamSyncRateLimit; v != nil { + w.UpstreamSyncRateLimit = v + } +} + +type WebServerLDAPSecrets struct { + ServerAddress *models.SecretURL + ReadOnlyUserLogin *models.Secret + ReadOnlyUserPass *models.Secret +} + +func (w *WebServerLDAPSecrets) setFrom(f *WebServerLDAPSecrets) { + if v := f.ServerAddress; v != nil { + w.ServerAddress = v + } + if v := f.ReadOnlyUserLogin; v != nil { + w.ReadOnlyUserLogin = v + } + if v := f.ReadOnlyUserPass; v != nil { + w.ReadOnlyUserPass = v + } +} + +type WebServerSecrets struct { + LDAP WebServerLDAPSecrets `toml:",omitempty"` +} + +func (w *WebServerSecrets) SetFrom(f *WebServerSecrets) error { + w.LDAP.setFrom(&f.LDAP) + return nil +} + +type JobPipeline struct { + ExternalInitiatorsEnabled *bool + MaxRunDuration *commonconfig.Duration + MaxSuccessfulRuns *uint64 + ReaperInterval *commonconfig.Duration + ReaperThreshold *commonconfig.Duration + ResultWriteQueueDepth *uint32 + + HTTPRequest JobPipelineHTTPRequest `toml:",omitempty"` +} + +func (j *JobPipeline) setFrom(f *JobPipeline) { + if v := f.ExternalInitiatorsEnabled; v != nil { + j.ExternalInitiatorsEnabled = v + } + if v := f.MaxRunDuration; v != nil { + j.MaxRunDuration = v + } + if v := f.MaxSuccessfulRuns; v != nil { + j.MaxSuccessfulRuns = v + } + if v := f.ReaperInterval; v != nil { + j.ReaperInterval = v + } + if v := f.ReaperThreshold; v != nil { + j.ReaperThreshold = v + } + if v := f.ResultWriteQueueDepth; v != nil { + j.ResultWriteQueueDepth = v + } + j.HTTPRequest.setFrom(&f.HTTPRequest) + +} + +type JobPipelineHTTPRequest struct { + DefaultTimeout *commonconfig.Duration + MaxSize *utils.FileSize +} + +func (j *JobPipelineHTTPRequest) setFrom(f *JobPipelineHTTPRequest) { + if v := f.DefaultTimeout; v != nil { + j.DefaultTimeout = v + } + if v := f.MaxSize; v != nil { + j.MaxSize = v + } +} + +type FluxMonitor struct { + DefaultTransactionQueueDepth *uint32 + SimulateTransactions *bool +} + +func (m *FluxMonitor) setFrom(f *FluxMonitor) { + if v := f.DefaultTransactionQueueDepth; v != nil { + m.DefaultTransactionQueueDepth = v + } + if v := f.SimulateTransactions; v != nil { + m.SimulateTransactions = v + } +} + +type OCR2 struct { + Enabled *bool + ContractConfirmations *uint32 + BlockchainTimeout *commonconfig.Duration + ContractPollInterval *commonconfig.Duration + ContractSubscribeInterval *commonconfig.Duration + ContractTransmitterTransmitTimeout *commonconfig.Duration + DatabaseTimeout *commonconfig.Duration + KeyBundleID *models.Sha256Hash + CaptureEATelemetry *bool + CaptureAutomationCustomTelemetry *bool + DefaultTransactionQueueDepth *uint32 + SimulateTransactions *bool + TraceLogging *bool +} + +func (o *OCR2) setFrom(f *OCR2) { + if v := f.Enabled; v != nil { + o.Enabled = v + } + if v := f.ContractConfirmations; v != nil { + o.ContractConfirmations = v + } + if v := f.BlockchainTimeout; v != nil { + o.BlockchainTimeout = v + } + if v := f.ContractPollInterval; v != nil { + o.ContractPollInterval = v + } + if v := f.ContractSubscribeInterval; v != nil { + o.ContractSubscribeInterval = v + } + if v := f.ContractTransmitterTransmitTimeout; v != nil { + o.ContractTransmitterTransmitTimeout = v + } + if v := f.DatabaseTimeout; v != nil { + o.DatabaseTimeout = v + } + if v := f.KeyBundleID; v != nil { + o.KeyBundleID = v + } + if v := f.CaptureEATelemetry; v != nil { + o.CaptureEATelemetry = v + } + if v := f.CaptureAutomationCustomTelemetry; v != nil { + o.CaptureAutomationCustomTelemetry = v + } + if v := f.DefaultTransactionQueueDepth; v != nil { + o.DefaultTransactionQueueDepth = v + } + if v := f.SimulateTransactions; v != nil { + o.SimulateTransactions = v + } + if v := f.TraceLogging; v != nil { + o.TraceLogging = v + } +} + +type OCR struct { + Enabled *bool + ObservationTimeout *commonconfig.Duration + BlockchainTimeout *commonconfig.Duration + ContractPollInterval *commonconfig.Duration + ContractSubscribeInterval *commonconfig.Duration + DefaultTransactionQueueDepth *uint32 + // Optional + KeyBundleID *models.Sha256Hash + SimulateTransactions *bool + TransmitterAddress *ethkey.EIP55Address + CaptureEATelemetry *bool + TraceLogging *bool +} + +func (o *OCR) setFrom(f *OCR) { + if v := f.Enabled; v != nil { + o.Enabled = v + } + if v := f.ObservationTimeout; v != nil { + o.ObservationTimeout = v + } + if v := f.BlockchainTimeout; v != nil { + o.BlockchainTimeout = v + } + if v := f.ContractPollInterval; v != nil { + o.ContractPollInterval = v + } + if v := f.ContractSubscribeInterval; v != nil { + o.ContractSubscribeInterval = v + } + if v := f.DefaultTransactionQueueDepth; v != nil { + o.DefaultTransactionQueueDepth = v + } + if v := f.KeyBundleID; v != nil { + o.KeyBundleID = v + } + if v := f.SimulateTransactions; v != nil { + o.SimulateTransactions = v + } + if v := f.TransmitterAddress; v != nil { + o.TransmitterAddress = v + } + if v := f.CaptureEATelemetry; v != nil { + o.CaptureEATelemetry = v + } + if v := f.TraceLogging; v != nil { + o.TraceLogging = v + } +} + +type P2P struct { + IncomingMessageBufferSize *int64 + OutgoingMessageBufferSize *int64 + PeerID *p2pkey.PeerID + TraceLogging *bool + + V2 P2PV2 `toml:",omitempty"` +} + +func (p *P2P) setFrom(f *P2P) { + if v := f.IncomingMessageBufferSize; v != nil { + p.IncomingMessageBufferSize = v + } + if v := f.OutgoingMessageBufferSize; v != nil { + p.OutgoingMessageBufferSize = v + } + if v := f.PeerID; v != nil { + p.PeerID = v + } + if v := f.TraceLogging; v != nil { + p.TraceLogging = v + } + + p.V2.setFrom(&f.V2) +} + +type P2PV2 struct { + Enabled *bool + AnnounceAddresses *[]string + DefaultBootstrappers *[]ocrcommontypes.BootstrapperLocator + DeltaDial *commonconfig.Duration + DeltaReconcile *commonconfig.Duration + ListenAddresses *[]string +} + +func (p *P2PV2) setFrom(f *P2PV2) { + if v := f.Enabled; v != nil { + p.Enabled = v + } + if v := f.AnnounceAddresses; v != nil { + p.AnnounceAddresses = v + } + if v := f.DefaultBootstrappers; v != nil { + p.DefaultBootstrappers = v + } + if v := f.DeltaDial; v != nil { + p.DeltaDial = v + } + if v := f.DeltaReconcile; v != nil { + p.DeltaReconcile = v + } + if v := f.ListenAddresses; v != nil { + p.ListenAddresses = v + } +} + +type Keeper struct { + DefaultTransactionQueueDepth *uint32 + GasPriceBufferPercent *uint16 + GasTipCapBufferPercent *uint16 + BaseFeeBufferPercent *uint16 + MaxGracePeriod *int64 + TurnLookBack *int64 + + Registry KeeperRegistry `toml:",omitempty"` +} + +func (k *Keeper) setFrom(f *Keeper) { + if v := f.DefaultTransactionQueueDepth; v != nil { + k.DefaultTransactionQueueDepth = v + } + if v := f.GasPriceBufferPercent; v != nil { + k.GasPriceBufferPercent = v + } + if v := f.GasTipCapBufferPercent; v != nil { + k.GasTipCapBufferPercent = v + } + if v := f.BaseFeeBufferPercent; v != nil { + k.BaseFeeBufferPercent = v + } + if v := f.MaxGracePeriod; v != nil { + k.MaxGracePeriod = v + } + if v := f.TurnLookBack; v != nil { + k.TurnLookBack = v + } + + k.Registry.setFrom(&f.Registry) + +} + +type KeeperRegistry struct { + CheckGasOverhead *uint32 + PerformGasOverhead *uint32 + MaxPerformDataSize *uint32 + SyncInterval *commonconfig.Duration + SyncUpkeepQueueSize *uint32 +} + +func (k *KeeperRegistry) setFrom(f *KeeperRegistry) { + if v := f.CheckGasOverhead; v != nil { + k.CheckGasOverhead = v + } + if v := f.PerformGasOverhead; v != nil { + k.PerformGasOverhead = v + } + if v := f.MaxPerformDataSize; v != nil { + k.MaxPerformDataSize = v + } + if v := f.SyncInterval; v != nil { + k.SyncInterval = v + } + if v := f.SyncUpkeepQueueSize; v != nil { + k.SyncUpkeepQueueSize = v + } +} + +type AutoPprof struct { + Enabled *bool + ProfileRoot *string + PollInterval *commonconfig.Duration + GatherDuration *commonconfig.Duration + GatherTraceDuration *commonconfig.Duration + MaxProfileSize *utils.FileSize + CPUProfileRate *int64 // runtime.SetCPUProfileRate + MemProfileRate *int64 // runtime.MemProfileRate + BlockProfileRate *int64 // runtime.SetBlockProfileRate + MutexProfileFraction *int64 // runtime.SetMutexProfileFraction + MemThreshold *utils.FileSize + GoroutineThreshold *int64 +} + +func (p *AutoPprof) setFrom(f *AutoPprof) { + if v := f.Enabled; v != nil { + p.Enabled = v + } + if v := f.ProfileRoot; v != nil { + p.ProfileRoot = v + } + if v := f.PollInterval; v != nil { + p.PollInterval = v + } + if v := f.GatherDuration; v != nil { + p.GatherDuration = v + } + if v := f.GatherTraceDuration; v != nil { + p.GatherTraceDuration = v + } + if v := f.MaxProfileSize; v != nil { + p.MaxProfileSize = v + } + if v := f.CPUProfileRate; v != nil { + p.CPUProfileRate = v + } + if v := f.MemProfileRate; v != nil { + p.MemProfileRate = v + } + if v := f.BlockProfileRate; v != nil { + p.BlockProfileRate = v + } + if v := f.MutexProfileFraction; v != nil { + p.MutexProfileFraction = v + } + if v := f.MemThreshold; v != nil { + p.MemThreshold = v + } + if v := f.GoroutineThreshold; v != nil { + p.GoroutineThreshold = v + } +} + +type Pyroscope struct { + ServerAddress *string + Environment *string +} + +func (p *Pyroscope) setFrom(f *Pyroscope) { + if v := f.ServerAddress; v != nil { + p.ServerAddress = v + } + if v := f.Environment; v != nil { + p.Environment = v + } +} + +type Sentry struct { + Debug *bool + DSN *string + Environment *string + Release *string +} + +func (s *Sentry) setFrom(f *Sentry) { + if v := f.Debug; v != nil { + s.Debug = f.Debug + } + if v := f.DSN; v != nil { + s.DSN = f.DSN + } + if v := f.Environment; v != nil { + s.Environment = f.Environment + } + if v := f.Release; v != nil { + s.Release = f.Release + } +} + +type Insecure struct { + DevWebServer *bool + OCRDevelopmentMode *bool + InfiniteDepthQueries *bool + DisableRateLimiting *bool +} + +func (ins *Insecure) ValidateConfig() (err error) { + return ins.validateConfig(build.Mode()) +} + +func (ins *Insecure) validateConfig(buildMode string) (err error) { + if buildMode == build.Dev { + return + } + if ins.DevWebServer != nil && *ins.DevWebServer { + err = multierr.Append(err, configutils.ErrInvalid{Name: "DevWebServer", Value: *ins.DevWebServer, Msg: "insecure configs are not allowed on secure builds"}) + } + // OCRDevelopmentMode is allowed on dev/test builds. + if ins.OCRDevelopmentMode != nil && *ins.OCRDevelopmentMode && buildMode == build.Prod { + err = multierr.Append(err, configutils.ErrInvalid{Name: "OCRDevelopmentMode", Value: *ins.OCRDevelopmentMode, Msg: "insecure configs are not allowed on secure builds"}) + } + if ins.InfiniteDepthQueries != nil && *ins.InfiniteDepthQueries { + err = multierr.Append(err, configutils.ErrInvalid{Name: "InfiniteDepthQueries", Value: *ins.InfiniteDepthQueries, Msg: "insecure configs are not allowed on secure builds"}) + } + if ins.DisableRateLimiting != nil && *ins.DisableRateLimiting { + err = multierr.Append(err, configutils.ErrInvalid{Name: "DisableRateLimiting", Value: *ins.DisableRateLimiting, Msg: "insecure configs are not allowed on secure builds"}) + } + return err +} + +func (ins *Insecure) setFrom(f *Insecure) { + if v := f.DevWebServer; v != nil { + ins.DevWebServer = f.DevWebServer + } + if v := f.InfiniteDepthQueries; v != nil { + ins.InfiniteDepthQueries = f.InfiniteDepthQueries + } + if v := f.DisableRateLimiting; v != nil { + ins.DisableRateLimiting = f.DisableRateLimiting + } + if v := f.OCRDevelopmentMode; v != nil { + ins.OCRDevelopmentMode = f.OCRDevelopmentMode + } +} + +type MercuryCache struct { + LatestReportTTL *commonconfig.Duration + MaxStaleAge *commonconfig.Duration + LatestReportDeadline *commonconfig.Duration +} + +func (mc *MercuryCache) setFrom(f *MercuryCache) { + if v := f.LatestReportTTL; v != nil { + mc.LatestReportTTL = v + } + if v := f.MaxStaleAge; v != nil { + mc.MaxStaleAge = v + } + if v := f.LatestReportDeadline; v != nil { + mc.LatestReportDeadline = v + } +} + +type MercuryTLS struct { + CertFile *string +} + +func (m *MercuryTLS) setFrom(f *MercuryTLS) { + if v := f.CertFile; v != nil { + m.CertFile = v + } +} + +func (m *MercuryTLS) ValidateConfig() (err error) { + if *m.CertFile != "" { + if !isValidFilePath(*m.CertFile) { + err = multierr.Append(err, configutils.ErrInvalid{Name: "CertFile", Value: *m.CertFile, Msg: "must be a valid file path"}) + } + } + return +} + +type Mercury struct { + Cache MercuryCache `toml:",omitempty"` + TLS MercuryTLS `toml:",omitempty"` +} + +func (m *Mercury) setFrom(f *Mercury) { + m.Cache.setFrom(&f.Cache) + m.TLS.setFrom(&f.TLS) +} + +func (m *Mercury) ValidateConfig() (err error) { + return m.TLS.ValidateConfig() +} + +type MercuryCredentials struct { + // LegacyURL is the legacy base URL for mercury v0.2 API + LegacyURL *models.SecretURL + // URL is the base URL for mercury v0.3 API + URL *models.SecretURL + // Username is the user id for mercury credential + Username *models.Secret + // Password is the user secret key for mercury credential + Password *models.Secret +} + +type MercurySecrets struct { + Credentials map[string]MercuryCredentials +} + +func (m *MercurySecrets) SetFrom(f *MercurySecrets) (err error) { + err = m.validateMerge(f) + if err != nil { + return err + } + + if m.Credentials != nil && f.Credentials != nil { + for k, v := range f.Credentials { + m.Credentials[k] = v + } + } else if v := f.Credentials; v != nil { + m.Credentials = v + } + + return nil +} + +func (m *MercurySecrets) validateMerge(f *MercurySecrets) (err error) { + if m.Credentials != nil && f.Credentials != nil { + for k := range f.Credentials { + if _, exists := m.Credentials[k]; exists { + err = multierr.Append(err, configutils.ErrOverride{Name: fmt.Sprintf("Credentials[\"%s\"]", k)}) + } + } + } + + return err +} + +func (m *MercurySecrets) ValidateConfig() (err error) { + urls := make(map[string]struct{}, len(m.Credentials)) + for name, creds := range m.Credentials { + if name == "" { + err = multierr.Append(err, configutils.ErrEmpty{Name: "Name", Msg: "must be provided and non-empty"}) + } + if creds.URL == nil || creds.URL.URL() == nil { + err = multierr.Append(err, configutils.ErrMissing{Name: "URL", Msg: "must be provided and non-empty"}) + continue + } + if creds.LegacyURL != nil && creds.LegacyURL.URL() == nil { + err = multierr.Append(err, configutils.ErrMissing{Name: "Legacy URL", Msg: "must be a valid URL"}) + continue + } + s := creds.URL.URL().String() + if _, exists := urls[s]; exists { + err = multierr.Append(err, configutils.NewErrDuplicate("URL", s)) + } + urls[s] = struct{}{} + } + return err +} + +type ThresholdKeyShareSecrets struct { + ThresholdKeyShare *models.Secret +} + +func (t *ThresholdKeyShareSecrets) SetFrom(f *ThresholdKeyShareSecrets) (err error) { + err = t.validateMerge(f) + if err != nil { + return err + } + + if v := f.ThresholdKeyShare; v != nil { + t.ThresholdKeyShare = v + } + + return nil +} + +func (t *ThresholdKeyShareSecrets) validateMerge(f *ThresholdKeyShareSecrets) (err error) { + if t.ThresholdKeyShare != nil && f.ThresholdKeyShare != nil { + err = multierr.Append(err, configutils.ErrOverride{Name: "ThresholdKeyShare"}) + } + + return err +} + +type Tracing struct { + Enabled *bool + CollectorTarget *string + NodeID *string + SamplingRatio *float64 + Mode *string + TLSCertPath *string + Attributes map[string]string `toml:",omitempty"` +} + +func (t *Tracing) setFrom(f *Tracing) { + if v := f.Enabled; v != nil { + t.Enabled = f.Enabled + } + if v := f.CollectorTarget; v != nil { + t.CollectorTarget = f.CollectorTarget + } + if v := f.NodeID; v != nil { + t.NodeID = f.NodeID + } + if v := f.Attributes; v != nil { + t.Attributes = f.Attributes + } + if v := f.SamplingRatio; v != nil { + t.SamplingRatio = f.SamplingRatio + } + if v := f.Mode; v != nil { + t.Mode = f.Mode + } + if v := f.TLSCertPath; v != nil { + t.TLSCertPath = f.TLSCertPath + } +} + +func (t *Tracing) ValidateConfig() (err error) { + if t.Enabled == nil || !*t.Enabled { + return err + } + + if t.SamplingRatio != nil { + if *t.SamplingRatio < 0 || *t.SamplingRatio > 1 { + err = multierr.Append(err, configutils.ErrInvalid{Name: "SamplingRatio", Value: *t.SamplingRatio, Msg: "must be between 0 and 1"}) + } + } + + if t.Mode != nil { + switch *t.Mode { + case "tls": + // TLSCertPath must be set + if t.TLSCertPath == nil { + err = multierr.Append(err, configutils.ErrMissing{Name: "TLSCertPath", Msg: "must be set when Tracing.Mode is tls"}) + } else { + ok := isValidFilePath(*t.TLSCertPath) + if !ok { + err = multierr.Append(err, configutils.ErrInvalid{Name: "TLSCertPath", Value: *t.TLSCertPath, Msg: "must be a valid file path"}) + } + } + case "unencrypted": + // no-op + default: + // Mode must be either "tls" or "unencrypted" + err = multierr.Append(err, configutils.ErrInvalid{Name: "Mode", Value: *t.Mode, Msg: "must be either 'tls' or 'unencrypted'"}) + } + } + + if t.CollectorTarget != nil && t.Mode != nil { + switch *t.Mode { + case "tls": + if !isValidURI(*t.CollectorTarget) { + err = multierr.Append(err, configutils.ErrInvalid{Name: "CollectorTarget", Value: *t.CollectorTarget, Msg: "must be a valid URI"}) + } + case "unencrypted": + // Unencrypted traces can not be sent to external networks + if !isValidLocalURI(*t.CollectorTarget) { + err = multierr.Append(err, configutils.ErrInvalid{Name: "CollectorTarget", Value: *t.CollectorTarget, Msg: "must be a valid local URI"}) + } + default: + // no-op + } + } + + return err +} + +var hostnameRegex = regexp.MustCompile(`^[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*$`) + +// Validates uri is valid external or local URI +func isValidURI(uri string) bool { + if strings.Contains(uri, "://") { + _, err := url.ParseRequestURI(uri) + return err == nil + } + + return isValidLocalURI(uri) +} + +// isValidLocalURI returns true if uri is a valid local URI +// External URIs (e.g. http://) are not valid local URIs, and will return false. +func isValidLocalURI(uri string) bool { + parts := strings.Split(uri, ":") + if len(parts) == 2 { + host, port := parts[0], parts[1] + + // Validating hostname + if !isValidHostname(host) { + return false + } + + // Validating port + if _, err := net.LookupPort("tcp", port); err != nil { + return false + } + + return true + } + return false +} + +func isValidHostname(hostname string) bool { + return hostnameRegex.MatchString(hostname) +} + +func isValidFilePath(path string) bool { + return len(path) > 0 && len(path) < 4096 +} diff --git a/core/config/toml/types_test.go b/core/config/toml/types_test.go new file mode 100644 index 00000000..9f33a17a --- /dev/null +++ b/core/config/toml/types_test.go @@ -0,0 +1,581 @@ +package toml + +import ( + "fmt" + "net/url" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + configutils "github.com/goplugin/pluginv3.0/v2/core/utils/config" +) + +func TestMercurySecrets_valid(t *testing.T) { + ms := MercurySecrets{ + Credentials: map[string]MercuryCredentials{ + "cred1": { + URL: models.MustSecretURL("https://facebook.com"), + Username: models.NewSecret("new user1"), + Password: models.NewSecret("new password1"), + }, + "cred2": { + URL: models.MustSecretURL("HTTPS://GOOGLE.COM"), + Username: models.NewSecret("new user1"), + Password: models.NewSecret("new password2"), + }, + "cred3": { + LegacyURL: models.MustSecretURL("https://abc.com"), + URL: models.MustSecretURL("HTTPS://GOOGLE1.COM"), + Username: models.NewSecret("new user1"), + Password: models.NewSecret("new password2"), + }, + }, + } + + err := ms.ValidateConfig() + assert.NoError(t, err) +} + +func TestMercurySecrets_duplicateURLs(t *testing.T) { + ms := MercurySecrets{ + Credentials: map[string]MercuryCredentials{ + "cred1": { + URL: models.MustSecretURL("HTTPS://GOOGLE.COM"), + Username: models.NewSecret("new user1"), + Password: models.NewSecret("new password1"), + }, + "cred2": { + URL: models.MustSecretURL("HTTPS://GOOGLE.COM"), + Username: models.NewSecret("new user2"), + Password: models.NewSecret("new password2"), + }, + }, + } + + err := ms.ValidateConfig() + assert.Error(t, err) + assert.Equal(t, "URL: invalid value (https://GOOGLE.COM): duplicate - must be unique", err.Error()) +} + +func TestMercurySecrets_emptyURL(t *testing.T) { + ms := MercurySecrets{ + Credentials: map[string]MercuryCredentials{ + "cred1": { + URL: nil, + Username: models.NewSecret("new user1"), + Password: models.NewSecret("new password1"), + }, + }, + } + + err := ms.ValidateConfig() + assert.Error(t, err) + assert.Equal(t, "URL: missing: must be provided and non-empty", err.Error()) +} + +func Test_validateDBURL(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + url string + wantErr string + }{ + {"no user or password", "postgresql://foo.example.com:5432/plugin?application_name=Test+Application", "DB URL must be authenticated; plaintext URLs are not allowed"}, + {"with user and no password", "postgresql://myuser@foo.example.com:5432/plugin?application_name=Test+Application", "DB URL must be authenticated; password is required"}, + {"with user and password of insufficient length", "postgresql://myuser:shortpw@foo.example.com:5432/plugin?application_name=Test+Application", fmt.Sprintf("%s %s\n", utils.ErrMsgHeader, "password is less than 16 characters long")}, + {"with no user and password of sufficient length", "postgresql://:thisisareallylongpassword@foo.example.com:5432/plugin?application_name=Test+Application", ""}, + {"with user and password of sufficient length", "postgresql://myuser:thisisareallylongpassword@foo.example.com:5432/plugin?application_name=Test+Application", ""}, + {"with user and password of insufficient length as params", "postgresql://foo.example.com:5432/plugin?application_name=Test+Application&password=shortpw&user=myuser", fmt.Sprintf("%s %s\n", utils.ErrMsgHeader, "password is less than 16 characters long")}, + {"with no user and password of sufficient length as params", "postgresql://foo.example.com:5432/plugin?application_name=Test+Application&password=thisisareallylongpassword", ""}, + {"with user and password of sufficient length as params", "postgresql://foo.example.com:5432/plugin?application_name=Test+Application&password=thisisareallylongpassword&user=myuser", ""}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + url := testutils.MustParseURL(t, test.url) + err := validateDBURL(*url) + if test.wantErr == "" { + assert.Nil(t, err) + } else { + assert.EqualError(t, err, test.wantErr) + } + }) + } +} + +func TestDatabaseSecrets_ValidateConfig(t *testing.T) { + validUrl := commonconfig.URL(url.URL{Scheme: "https", Host: "localhost"}) + validSecretURL := *models.NewSecretURL(&validUrl) + + invalidEmptyUrl := commonconfig.URL(url.URL{}) + invalidEmptySecretURL := *models.NewSecretURL(&invalidEmptyUrl) + + invalidBackupURL := commonconfig.URL(url.URL{Scheme: "http", Host: "localhost"}) + invalidBackupSecretURL := *models.NewSecretURL(&invalidBackupURL) + + tests := []struct { + name string + input *DatabaseSecrets + buildMode string + expectedErrContains []string + }{ + { + name: "Nil URL", + input: &DatabaseSecrets{ + URL: nil, + }, + expectedErrContains: []string{"URL: empty: must be provided and non-empty"}, + }, + { + name: "Empty URL", + input: &DatabaseSecrets{ + URL: &invalidEmptySecretURL, + }, + expectedErrContains: []string{"URL: empty: must be provided and non-empty"}, + }, + { + name: "Insecure Password in Production", + input: &DatabaseSecrets{ + URL: &validSecretURL, + AllowSimplePasswords: &[]bool{true}[0], + }, + buildMode: build.Prod, + expectedErrContains: []string{"insecure configs are not allowed on secure builds"}, + }, + { + name: "Invalid Backup URL with Simple Passwords Not Allowed", + input: &DatabaseSecrets{ + URL: &validSecretURL, + BackupURL: &invalidBackupSecretURL, + AllowSimplePasswords: &[]bool{false}[0], + }, + expectedErrContains: []string{"missing or insufficiently complex password"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buildMode := build.Mode() + if tt.buildMode != "" { + buildMode = tt.buildMode + } + err := tt.input.validateConfig(buildMode) + if err == nil && len(tt.expectedErrContains) > 0 { + t.Errorf("expected errors but got none") + return + } + + if err != nil { + errStr := err.Error() + for _, expectedErrSubStr := range tt.expectedErrContains { + if !strings.Contains(errStr, expectedErrSubStr) { + t.Errorf("expected error to contain substring %q but got %v", expectedErrSubStr, errStr) + } + } + } + }) + } +} +func TestTracing_ValidateCollectorTarget(t *testing.T) { + tests := []struct { + name string + collectorTarget *string + mode *string + wantErr bool + errMsg string + }{ + { + name: "valid http address in tls mode", + collectorTarget: ptr("https://testing.collector.dev"), + mode: ptr("tls"), + wantErr: false, + }, + { + name: "valid http address in unencrypted mode", + collectorTarget: ptr("https://localhost:4317"), + mode: ptr("unencrypted"), + wantErr: true, + errMsg: "CollectorTarget: invalid value (https://localhost:4317): must be a valid local URI", + }, + // Tracing.Mode = 'tls' + { + name: "valid localhost address", + collectorTarget: ptr("localhost:4317"), + mode: ptr("tls"), + wantErr: false, + }, + { + name: "valid docker address", + collectorTarget: ptr("otel-collector:4317"), + mode: ptr("tls"), + wantErr: false, + }, + { + name: "valid IP address", + collectorTarget: ptr("192.168.1.1:4317"), + mode: ptr("tls"), + wantErr: false, + }, + { + name: "invalid port", + collectorTarget: ptr("localhost:invalid"), + wantErr: true, + mode: ptr("tls"), + errMsg: "CollectorTarget: invalid value (localhost:invalid): must be a valid URI", + }, + { + name: "invalid address", + collectorTarget: ptr("invalid address"), + wantErr: true, + mode: ptr("tls"), + errMsg: "CollectorTarget: invalid value (invalid address): must be a valid URI", + }, + { + name: "nil CollectorTarget", + collectorTarget: ptr(""), + wantErr: true, + mode: ptr("tls"), + errMsg: "CollectorTarget: invalid value (): must be a valid URI", + }, + // Tracing.Mode = 'unencrypted' + { + name: "valid localhost address", + collectorTarget: ptr("localhost:4317"), + mode: ptr("unencrypted"), + wantErr: false, + }, + { + name: "valid docker address", + collectorTarget: ptr("otel-collector:4317"), + mode: ptr("unencrypted"), + wantErr: false, + }, + { + name: "valid IP address", + collectorTarget: ptr("192.168.1.1:4317"), + mode: ptr("unencrypted"), + wantErr: false, + }, + { + name: "invalid port", + collectorTarget: ptr("localhost:invalid"), + wantErr: true, + mode: ptr("unencrypted"), + errMsg: "CollectorTarget: invalid value (localhost:invalid): must be a valid local URI", + }, + { + name: "invalid address", + collectorTarget: ptr("invalid address"), + wantErr: true, + mode: ptr("unencrypted"), + errMsg: "CollectorTarget: invalid value (invalid address): must be a valid local URI", + }, + { + name: "nil CollectorTarget", + collectorTarget: ptr(""), + wantErr: true, + mode: ptr("unencrypted"), + errMsg: "CollectorTarget: invalid value (): must be a valid local URI", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var tlsCertPath string + if *tt.mode == "tls" { + tlsCertPath = "/path/to/cert.pem" + } + tracing := &Tracing{ + Enabled: ptr(true), + TLSCertPath: &tlsCertPath, + Mode: tt.mode, + CollectorTarget: tt.collectorTarget, + } + + err := tracing.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTracing_ValidateSamplingRatio(t *testing.T) { + tests := []struct { + name string + samplingRatio *float64 + wantErr bool + errMsg string + }{ + { + name: "valid lower bound", + samplingRatio: ptr(0.0), + wantErr: false, + }, + { + name: "valid upper bound", + samplingRatio: ptr(1.0), + wantErr: false, + }, + { + name: "valid value", + samplingRatio: ptr(0.5), + wantErr: false, + }, + { + name: "invalid negative value", + samplingRatio: ptr(-0.1), + wantErr: true, + errMsg: configutils.ErrInvalid{Name: "SamplingRatio", Value: -0.1, Msg: "must be between 0 and 1"}.Error(), + }, + { + name: "invalid value greater than 1", + samplingRatio: ptr(1.1), + wantErr: true, + errMsg: configutils.ErrInvalid{Name: "SamplingRatio", Value: 1.1, Msg: "must be between 0 and 1"}.Error(), + }, + { + name: "nil SamplingRatio", + samplingRatio: nil, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tracing := Tracing{ + SamplingRatio: tt.samplingRatio, + Enabled: ptr(true), + } + + err := tracing.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTracing_ValidateTLSCertPath(t *testing.T) { + // tests for Tracing.Mode = 'tls' + tls_tests := []struct { + name string + tlsCertPath *string + wantErr bool + errMsg string + }{ + { + name: "valid file path", + tlsCertPath: ptr("/etc/ssl/certs/cert.pem"), + wantErr: false, + }, + { + name: "relative file path", + tlsCertPath: ptr("certs/cert.pem"), + wantErr: false, + }, + { + name: "excessively long file path", + tlsCertPath: ptr(strings.Repeat("z", 4097)), + wantErr: true, + errMsg: "TLSCertPath: invalid value (" + strings.Repeat("z", 4097) + "): must be a valid file path", + }, + { + name: "empty file path", + tlsCertPath: ptr(""), + wantErr: true, + errMsg: "TLSCertPath: invalid value (): must be a valid file path", + }, + } + + // tests for Tracing.Mode = 'unencrypted' + unencrypted_tests := []struct { + name string + tlsCertPath *string + wantErr bool + errMsg string + }{ + { + name: "valid file path", + tlsCertPath: ptr("/etc/ssl/certs/cert.pem"), + wantErr: false, + }, + { + name: "relative file path", + tlsCertPath: ptr("certs/cert.pem"), + wantErr: false, + }, + { + name: "excessively long file path", + tlsCertPath: ptr(strings.Repeat("z", 4097)), + wantErr: false, + }, + { + name: "empty file path", + tlsCertPath: ptr(""), + wantErr: false, + }, + } + + for _, tt := range tls_tests { + t.Run(tt.name, func(t *testing.T) { + tracing := &Tracing{ + Mode: ptr("tls"), + TLSCertPath: tt.tlsCertPath, + Enabled: ptr(true), + } + + err := tracing.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } + + for _, tt := range unencrypted_tests { + t.Run(tt.name, func(t *testing.T) { + tracing := &Tracing{ + Mode: ptr("unencrypted"), + TLSCertPath: tt.tlsCertPath, + Enabled: ptr(true), + } + + err := tracing.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTracing_ValidateMode(t *testing.T) { + tests := []struct { + name string + mode *string + tlsCertPath *string + wantErr bool + errMsg string + }{ + { + name: "tls mode with valid TLS path", + mode: ptr("tls"), + tlsCertPath: ptr("/path/to/cert.pem"), + wantErr: false, + }, + { + name: "tls mode without TLS path", + mode: ptr("tls"), + tlsCertPath: nil, + wantErr: true, + errMsg: "TLSCertPath: missing: must be set when Tracing.Mode is tls", + }, + { + name: "unencrypted mode with TLS path", + mode: ptr("unencrypted"), + tlsCertPath: ptr("/path/to/cert.pem"), + wantErr: false, + }, + { + name: "unencrypted mode without TLS path", + mode: ptr("unencrypted"), + tlsCertPath: nil, + wantErr: false, + }, + { + name: "invalid mode", + mode: ptr("unknown"), + tlsCertPath: nil, + wantErr: true, + errMsg: "Mode: invalid value (unknown): must be either 'tls' or 'unencrypted'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tracing := &Tracing{ + Enabled: ptr(true), + Mode: tt.mode, + TLSCertPath: tt.tlsCertPath, + } + + err := tracing.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestMercuryTLS_ValidateTLSCertPath(t *testing.T) { + tests := []struct { + name string + tlsCertPath *string + wantErr bool + errMsg string + }{ + { + name: "valid file path", + tlsCertPath: ptr("/etc/ssl/certs/cert.pem"), + wantErr: false, + }, + { + name: "relative file path", + tlsCertPath: ptr("certs/cert.pem"), + wantErr: false, + }, + { + name: "excessively long file path", + tlsCertPath: ptr(strings.Repeat("z", 4097)), + wantErr: true, + errMsg: "CertFile: invalid value (" + strings.Repeat("z", 4097) + "): must be a valid file path", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mercury := &Mercury{ + TLS: MercuryTLS{ + CertFile: tt.tlsCertPath, + }, + } + + err := mercury.ValidateConfig() + + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +// ptr is a utility function for converting a value to a pointer to the value. +func ptr[T any](t T) *T { return &t } diff --git a/core/config/tracing_config.go b/core/config/tracing_config.go new file mode 100644 index 00000000..307a010c --- /dev/null +++ b/core/config/tracing_config.go @@ -0,0 +1,11 @@ +package config + +type Tracing interface { + Enabled() bool + CollectorTarget() string + NodeID() string + SamplingRatio() float64 + TLSCertPath() string + Mode() string + Attributes() map[string]string +} diff --git a/core/config/web_config.go b/core/config/web_config.go new file mode 100644 index 00000000..747d819b --- /dev/null +++ b/core/config/web_config.go @@ -0,0 +1,77 @@ +package config + +import ( + "net" + "net/url" + "time" + + "github.com/gin-contrib/sessions" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" +) + +type TLS interface { + Dir() string + Host() string + ForceRedirect() bool + CertFile() string + KeyFile() string + HTTPSPort() uint16 + ListenIP() net.IP +} + +type RateLimit interface { + Authenticated() int64 + AuthenticatedPeriod() time.Duration + Unauthenticated() int64 + UnauthenticatedPeriod() time.Duration +} + +type MFA interface { + RPID() string + RPOrigin() string +} + +type LDAP interface { + ServerAddress() string + ReadOnlyUserLogin() string + ReadOnlyUserPass() string + ServerTLS() bool + SessionTimeout() commonconfig.Duration + QueryTimeout() time.Duration + BaseUserAttr() string + BaseDN() string + UsersDN() string + GroupsDN() string + ActiveAttribute() string + ActiveAttributeAllowedValue() string + AdminUserGroupCN() string + EditUserGroupCN() string + RunUserGroupCN() string + ReadUserGroupCN() string + UserApiTokenEnabled() bool + UserAPITokenDuration() commonconfig.Duration + UpstreamSyncInterval() commonconfig.Duration + UpstreamSyncRateLimit() commonconfig.Duration +} + +type WebServer interface { + AuthenticationMethod() string + AllowOrigins() string + BridgeCacheTTL() time.Duration + BridgeResponseURL() *url.URL + HTTPMaxSize() int64 + StartTimeout() time.Duration + HTTPWriteTimeout() time.Duration + HTTPPort() uint16 + SessionReaperExpiration() commonconfig.Duration + SecureCookies() bool + SessionOptions() sessions.Options + SessionTimeout() commonconfig.Duration + ListenIP() net.IP + + TLS() TLS + RateLimit() RateLimit + MFA() MFA + LDAP() LDAP +} diff --git a/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi b/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi new file mode 100644 index 00000000..d1a10667 --- /dev/null +++ b/core/gethwrappers/OffchainAggregator/OffchainAggregator.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"},{"internalType":"address","name":"_link","type":"address"},{"internalType":"address","name":"_validator","type":"address"},{"internalType":"int192","name":"_minAnswer","type":"int192"},{"internalType":"int192","name":"_maxAnswer","type":"int192"},{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"},{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"},{"internalType":"uint8","name":"_decimals","type":"uint8"},{"internalType":"string","name":"_description","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"int256","name":"current","type":"int256"},{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"updatedAt","type":"uint256"}],"name":"AnswerUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"BillingAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"indexed":false,"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"name":"BillingSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint32","name":"previousConfigBlockNumber","type":"uint32"},{"indexed":false,"internalType":"uint64","name":"configCount","type":"uint64"},{"indexed":false,"internalType":"address[]","name":"signers","type":"address[]"},{"indexed":false,"internalType":"address[]","name":"transmitters","type":"address[]"},{"indexed":false,"internalType":"uint8","name":"threshold","type":"uint8"},{"indexed":false,"internalType":"uint64","name":"encodedConfigVersion","type":"uint64"},{"indexed":false,"internalType":"bytes","name":"encoded","type":"bytes"}],"name":"ConfigSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"roundId","type":"uint256"},{"indexed":true,"internalType":"address","name":"startedBy","type":"address"},{"indexed":false,"internalType":"uint256","name":"startedAt","type":"uint256"}],"name":"NewRound","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint32","name":"aggregatorRoundId","type":"uint32"},{"indexed":false,"internalType":"int192","name":"answer","type":"int192"},{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"int192[]","name":"observations","type":"int192[]"},{"indexed":false,"internalType":"bytes","name":"observers","type":"bytes"},{"indexed":false,"internalType":"bytes32","name":"rawReportContext","type":"bytes32"}],"name":"NewTransmission","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"transmitter","type":"address"},{"indexed":false,"internalType":"address","name":"payee","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"OraclePaid","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"},{"indexed":true,"internalType":"address","name":"proposed","type":"address"}],"name":"PayeeshipTransferRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transmitter","type":"address"},{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"PayeeshipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"contract AccessControllerInterface","name":"old","type":"address"},{"indexed":false,"internalType":"contract AccessControllerInterface","name":"current","type":"address"}],"name":"RequesterAccessControllerSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"requester","type":"address"},{"indexed":false,"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"},{"indexed":false,"internalType":"uint8","name":"round","type":"uint8"}],"name":"RoundRequested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previous","type":"address"},{"indexed":true,"internalType":"address","name":"current","type":"address"}],"name":"ValidatorUpdated","type":"event"},{"inputs":[],"name":"PLI","outputs":[{"internalType":"contract LinkTokenInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"acceptPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"billingAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"description","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getBilling","outputs":[{"internalType":"uint32","name":"maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"linkGweiPerTransmission","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint80","name":"_roundId","type":"uint80"}],"name":"getRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundId","type":"uint256"}],"name":"getTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestAnswer","outputs":[{"internalType":"int256","name":"","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestConfigDetails","outputs":[{"internalType":"uint32","name":"configCount","type":"uint32"},{"internalType":"uint32","name":"blockNumber","type":"uint32"},{"internalType":"bytes16","name":"configDigest","type":"bytes16"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestRoundData","outputs":[{"internalType":"uint80","name":"roundId","type":"uint80"},{"internalType":"int256","name":"answer","type":"int256"},{"internalType":"uint256","name":"startedAt","type":"uint256"},{"internalType":"uint256","name":"updatedAt","type":"uint256"},{"internalType":"uint80","name":"answeredInRound","type":"uint80"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTimestamp","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"latestTransmissionDetails","outputs":[{"internalType":"bytes16","name":"configDigest","type":"bytes16"},{"internalType":"uint32","name":"epoch","type":"uint32"},{"internalType":"uint8","name":"round","type":"uint8"},{"internalType":"int192","name":"latestAnswer","type":"int192"},{"internalType":"uint64","name":"latestTimestamp","type":"uint64"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"linkAvailableForPayment","outputs":[{"internalType":"int256","name":"availableBalance","type":"int256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"maxAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"minAnswer","outputs":[{"internalType":"int192","name":"","type":"int192"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_signerOrTransmitter","type":"address"}],"name":"oracleObservationCount","outputs":[{"internalType":"uint16","name":"","type":"uint16"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"owedPayment","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address payable","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"requestNewRound","outputs":[{"internalType":"uint80","name":"","type":"uint80"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"requesterAccessController","outputs":[{"internalType":"contract AccessControllerInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint32","name":"_maximumGasPrice","type":"uint32"},{"internalType":"uint32","name":"_reasonableGasPrice","type":"uint32"},{"internalType":"uint32","name":"_microLinkPerEth","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerObservation","type":"uint32"},{"internalType":"uint32","name":"_linkGweiPerTransmission","type":"uint32"}],"name":"setBilling","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_billingAccessController","type":"address"}],"name":"setBillingAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_signers","type":"address[]"},{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"uint8","name":"_threshold","type":"uint8"},{"internalType":"uint64","name":"_encodedConfigVersion","type":"uint64"},{"internalType":"bytes","name":"_encoded","type":"bytes"}],"name":"setConfig","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address[]","name":"_transmitters","type":"address[]"},{"internalType":"address[]","name":"_payees","type":"address[]"}],"name":"setPayees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"contract AccessControllerInterface","name":"_requesterAccessController","type":"address"}],"name":"setRequesterAccessController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newValidator","type":"address"}],"name":"setValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"},{"internalType":"address","name":"_proposed","type":"address"}],"name":"transferPayeeship","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"_report","type":"bytes"},{"internalType":"bytes32[]","name":"_rs","type":"bytes32[]"},{"internalType":"bytes32[]","name":"_ss","type":"bytes32[]"},{"internalType":"bytes32","name":"_rawVs","type":"bytes32"}],"name":"transmit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"transmitters","outputs":[{"internalType":"address[]","name":"","type":"address[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"validator","outputs":[{"internalType":"contract AggregatorValidatorInterface","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"version","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_recipient","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"withdrawFunds","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transmitter","type":"address"}],"name":"withdrawPayment","outputs":[],"stateMutability":"nonpayable","type":"function"}] diff --git a/core/gethwrappers/README.md b/core/gethwrappers/README.md new file mode 100644 index 00000000..f209a491 --- /dev/null +++ b/core/gethwrappers/README.md @@ -0,0 +1,42 @@ +To run these commands, you must either install docker, or the correct version +of abigen. + +The latter can be installed with these commands, at least on linux: + +``` + git clone https://github.com/ethereum/go-ethereum + cd go-ethereum/cmd/abigen + git checkout v + go install +``` + +Here, is the version of go-ethereum specified in plugin's +go.mod. This will install abigen in `"$GOPATH/bin"`, which you should add to +your $PATH. + +To reduce explicit dependencies, and in case the system does not have the +correct version of abigen installed , the above commands spin up docker +containers. In my hands, total running time including compilation is about +13s. If you're modifying solidity code and testing against go code a lot, it +might be worthwhile to generate the the wrappers using a static container +with abigen and solc, which will complete much faster. E.g. + +``` + abigen -sol ../../contracts/src/v0.6/VRFAll.sol -pkg vrf -out solidity_interfaces.go +``` + +where VRFAll.sol simply contains `import "contract_path";` instructions for +all the contracts you wish to target. This runs in about 0.25 seconds in my +hands. + +If you're on linux, you can copy the correct version of solc out of the +appropriate docker container. At least, the following works on ubuntu: + +``` + $ docker run --name solc ethereum/solc:0.6.2 + $ sudo docker cp solc:/usr/bin/solc /usr/bin + $ docker rm solc +``` + +If you need to point abigen at your solc executable, you can specify the path +with the abigen --solc option. \ No newline at end of file diff --git a/core/gethwrappers/abigen.go b/core/gethwrappers/abigen.go new file mode 100644 index 00000000..a4aedb79 --- /dev/null +++ b/core/gethwrappers/abigen.go @@ -0,0 +1,468 @@ +package gethwrappers + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + gethParams "github.com/ethereum/go-ethereum/params" + "golang.org/x/tools/go/ast/astutil" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +const headerComment = `// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +` + +// AbigenArgs is the arguments to the abigen executable. E.g., Bin is the -bin +// arg. +type AbigenArgs struct { + Bin, ABI, Out, Type, Pkg string +} + +// Abigen calls Abigen with the given arguments +// +// It might seem like a shame, to shell out to another golang program like +// this, but the abigen executable is the stable public interface to the +// geth contract-wrapper machinery. +// +// Check whether native abigen is installed, and has correct version +func Abigen(a AbigenArgs) { + var versionResponse bytes.Buffer + abigenExecutablePath := filepath.Join(GetProjectRoot(), "tools/bin/abigen") + abigenVersionCheck := exec.Command(abigenExecutablePath, "--version") + abigenVersionCheck.Stdout = &versionResponse + if err := abigenVersionCheck.Run(); err != nil { + Exit("no native abigen; you must install it (`make abigen` in the "+ + "plugin root dir)", err) + } + version := string(regexp.MustCompile(`[0-9]+\.[0-9]+\.[0-9]+`).Find( + versionResponse.Bytes())) + if version != gethParams.Version { + Exit(fmt.Sprintf("wrong version (%s) of abigen; install the correct one "+ + "(%s) with `make abigen` in the plugin root dir", version, + gethParams.Version), + nil) + } + args := []string{ + "-abi", a.ABI, + "-out", a.Out, + "-type", a.Type, + "-pkg", a.Pkg, + } + if a.Bin != "-" { + args = append(args, "-bin", a.Bin) + } + buildCommand := exec.Command(abigenExecutablePath, args...) + var buildResponse bytes.Buffer + buildCommand.Stderr = &buildResponse + if err := buildCommand.Run(); err != nil { + Exit("failure while building "+a.Pkg+" wrapper, stderr: "+buildResponse.String(), err) + } + + ImproveAbigenOutput(a.Out, a.ABI) +} + +func ImproveAbigenOutput(path string, abiPath string) { + abiBytes, err := os.ReadFile(abiPath) + if err != nil { + Exit("Error while improving abigen output", err) + } + abi, err := abi.JSON(strings.NewReader(string(abiBytes))) + if err != nil { + Exit("Error while improving abigen output", err) + } + + bs, err := os.ReadFile(path) + if err != nil { + Exit("Error while improving abigen output", err) + } + + fset, fileNode := parseFile(bs) + logNames := getLogNames(fileNode) + if len(logNames) > 0 { + astutil.AddImport(fset, fileNode, "fmt") + astutil.AddImport(fset, fileNode, "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated") + } + contractName := getContractName(fileNode) + fileNode = addContractStructFields(contractName, fileNode) + fileNode = replaceAnonymousStructs(contractName, fileNode) + bs = generateCode(fset, fileNode) + bs = writeAdditionalMethods(contractName, logNames, abi, bs) + err = os.WriteFile(path, bs, 0600) + if err != nil { + Exit("Error while writing improved abigen source", err) + } + + fset, fileNode = parseFile(bs) + fileNode = writeInterface(contractName, fileNode) + bs = generateCode(fset, fileNode) + bs = addHeader(bs) + + err = os.WriteFile(path, bs, 0600) + if err != nil { + Exit("Error while writing improved abigen source", err) + } +} + +func parseFile(bs []byte) (*token.FileSet, *ast.File) { + fset := token.NewFileSet() + fileNode, err := parser.ParseFile(fset, "", string(bs), parser.AllErrors) + if err != nil { + Exit("Error while improving abigen output", err) + } + return fset, fileNode +} + +func generateCode(fset *token.FileSet, fileNode *ast.File) []byte { + var buf bytes.Buffer + err := format.Node(&buf, fset, fileNode) + if err != nil { + Exit("Error while writing improved abigen source", err) + } + return buf.Bytes() +} + +func getContractName(fileNode *ast.File) string { + // Search for the ABI const e.g. VRFCoordinatorV2ABI = "0x..." + var contractName string + astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + x, is := cursor.Node().(*ast.ValueSpec) + if !is { + return true + } + if len(x.Names) > 0 { + for _, n := range x.Names { + if len(n.Name) < 3 { + return true + } + if n.Name[len(n.Name)-3:] != "ABI" { + return true + } + contractName = n.Name[:len(n.Name)-3] + } + } + return false + }, nil) + return contractName +} + +// Add the `.address` and `.abi` fields to the contract struct. +func addContractStructFields(contractName string, fileNode *ast.File) *ast.File { + fileNode = addContractStructFieldsToStruct(contractName, fileNode) + fileNode = addContractStructFieldsToConstructor(contractName, fileNode) + fileNode = addContractStructFieldsToDeployMethod(contractName, fileNode) + return fileNode +} + +// Add the fields to the contract struct. +func addContractStructFieldsToStruct(contractName string, fileNode *ast.File) *ast.File { + return astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + x, is := cursor.Node().(*ast.StructType) + if !is { + return true + } + theType, is := cursor.Parent().(*ast.TypeSpec) + if !is { + return false + } else if theType.Name.Name != contractName { + return false + } + + addrField := &ast.Field{ + Names: []*ast.Ident{ast.NewIdent("address")}, + Type: &ast.SelectorExpr{ + X: ast.NewIdent("common"), + Sel: ast.NewIdent("Address"), + }, + } + + abiField := &ast.Field{ + Names: []*ast.Ident{ast.NewIdent("abi")}, + Type: &ast.SelectorExpr{ + X: ast.NewIdent("abi"), + Sel: ast.NewIdent("ABI"), + }, + } + x.Fields.List = append([]*ast.Field{addrField, abiField}, x.Fields.List...) + return false + }, nil).(*ast.File) +} + +// Add the fields to the return value of the constructor. +func addContractStructFieldsToConstructor(contractName string, fileNode *ast.File) *ast.File { + return astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + x, is := cursor.Node().(*ast.FuncDecl) + if !is { + return true + } else if x.Name.Name != "New"+contractName { + return false + } + + for _, stmt := range x.Body.List { + returnStmt, is := stmt.(*ast.ReturnStmt) + if !is { + continue + } + lit, is := returnStmt.Results[0].(*ast.UnaryExpr).X.(*ast.CompositeLit) + if !is { + continue + } + addressExpr := &ast.KeyValueExpr{ + Key: ast.NewIdent("address"), + Value: ast.NewIdent("address"), + } + abiExpr := &ast.KeyValueExpr{ + Key: ast.NewIdent("abi"), + Value: ast.NewIdent("abi"), + } + lit.Elts = append([]ast.Expr{addressExpr, abiExpr}, lit.Elts...) + } + + parseABIStmt := &ast.AssignStmt{ + Lhs: []ast.Expr{ast.NewIdent("abi"), ast.NewIdent("err")}, + Tok: token.DEFINE, + Rhs: []ast.Expr{ + &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: ast.NewIdent("abi"), + Sel: ast.NewIdent("JSON"), + }, + Args: []ast.Expr{ + &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: ast.NewIdent("strings"), + Sel: ast.NewIdent("NewReader"), + }, + Args: []ast.Expr{ast.NewIdent(contractName + "ABI")}, + }, + }, + }, + }, + } + checkParseABIErrStmt := &ast.IfStmt{ + Cond: &ast.BinaryExpr{ + X: ast.NewIdent("err"), + Op: token.NEQ, + Y: ast.NewIdent("nil"), + }, + Body: &ast.BlockStmt{ + List: []ast.Stmt{ + &ast.ReturnStmt{ + Results: []ast.Expr{ast.NewIdent("nil"), ast.NewIdent("err")}, + }, + }, + }, + } + + x.Body.List = append([]ast.Stmt{parseABIStmt, checkParseABIErrStmt}, x.Body.List...) + return false + }, nil).(*ast.File) +} + +// Add the fields to the returned struct in the 'Deploy' method. +func addContractStructFieldsToDeployMethod(contractName string, fileNode *ast.File) *ast.File { + return astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + x, is := cursor.Node().(*ast.FuncDecl) + if !is { + return true + } else if x.Name.Name != "Deploy"+contractName { + return false + } + + for _, stmt := range x.Body.List { + returnStmt, is := stmt.(*ast.ReturnStmt) + if !is { + continue + } + if len(returnStmt.Results) < 3 { + continue + } + rs, is := returnStmt.Results[2].(*ast.UnaryExpr) + if !is { + return true + } + lit, is := rs.X.(*ast.CompositeLit) + if !is { + continue + } + addressExpr := &ast.KeyValueExpr{ + Key: ast.NewIdent("address"), + Value: ast.NewIdent("address"), + } + abiExpr := &ast.KeyValueExpr{ + Key: ast.NewIdent("abi"), + Value: ast.NewIdent("*parsed"), + } + lit.Elts = append([]ast.Expr{addressExpr, abiExpr}, lit.Elts...) + } + return false + }, nil).(*ast.File) +} + +func getLogNames(fileNode *ast.File) []string { + var logNames []string + astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + x, is := cursor.Node().(*ast.FuncDecl) + if !is { + return true + } else if !strings.HasPrefix(x.Name.Name, "Parse") { + return false + } + logNames = append(logNames, x.Name.Name[len("Parse"):]) + return false + }, nil) + return logNames +} + +func replaceAnonymousStructs(contractName string, fileNode *ast.File) *ast.File { + done := map[string]bool{} + return astutil.Apply(fileNode, func(cursor *astutil.Cursor) bool { + // Replace all anonymous structs with named structs + x, is := cursor.Node().(*ast.FuncDecl) + if !is { + return true + } else if len(x.Type.Results.List) == 0 { + return false + } + theStruct, is := x.Type.Results.List[0].Type.(*ast.StructType) + if !is { + return false + } + + methodName := x.Name.Name + x.Type.Results.List[0].Type = ast.NewIdent(methodName) + + x.Body = astutil.Apply(x.Body, func(cursor *astutil.Cursor) bool { + if _, is := cursor.Node().(*ast.StructType); !is { + return true + } + if call, is := cursor.Parent().(*ast.CallExpr); is { + for i, arg := range call.Args { + if arg == cursor.Node() { + call.Args[i] = ast.NewIdent(methodName) + break + } + } + } + return true + }, nil).(*ast.BlockStmt) + + if done[contractName+methodName] { + return true + } + + // Add the named structs to the bottom of the file + fileNode.Decls = append(fileNode.Decls, &ast.GenDecl{ + Tok: token.TYPE, + Specs: []ast.Spec{ + &ast.TypeSpec{ + Name: ast.NewIdent(methodName), + Type: theStruct, + }, + }, + }) + + done[contractName+methodName] = true + return false + }, nil).(*ast.File) +} + +func writeAdditionalMethods(contractName string, logNames []string, abi abi.ABI, bs []byte) []byte { + // Write the ParseLog method + if len(logNames) > 0 { + var logSwitchBody string + for _, logName := range logNames { + logSwitchBody += fmt.Sprintf(`case _%v.abi.Events["%v"].ID: + return _%v.Parse%v(log) +`, contractName, logName, contractName, logName) + } + + bs = append(bs, []byte(fmt.Sprintf(` +func (_%v *%v) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + %v + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %%v", log.Topics[0]) + } +} +`, contractName, contractName, logSwitchBody))...) + } + + // Write the Topic method + for _, logName := range logNames { + bs = append(bs, []byte(fmt.Sprintf(` +func (%v%v) Topic() common.Hash { + return common.HexToHash("%v") +} +`, contractName, logName, abi.Events[logName].ID.Hex()))...) + } + + // Write the Address method to the bottom of the file + bs = append(bs, []byte(fmt.Sprintf(` +func (_%v *%v) Address() common.Address { + return _%v.address +} +`, contractName, contractName, contractName))...) + + return bs +} + +func writeInterface(contractName string, fileNode *ast.File) *ast.File { + // Generate an interface for the contract + var methods []*ast.Field + ast.Inspect(fileNode, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.FuncDecl: + if x.Recv == nil { + return true + } + star, is := x.Recv.List[0].Type.(*ast.StarExpr) + if !is { + return false + } + + typeName := star.X.(*ast.Ident).String() + if typeName != contractName && typeName != contractName+"Caller" && typeName != contractName+"Transactor" && typeName != contractName+"Filterer" { + return true + } + + methods = append(methods, &ast.Field{ + Names: []*ast.Ident{x.Name}, + Type: x.Type, + }) + } + return true + }) + + fileNode.Decls = append(fileNode.Decls, &ast.GenDecl{ + Tok: token.TYPE, + Specs: []ast.Spec{ + &ast.TypeSpec{ + Name: ast.NewIdent(contractName + "Interface"), + Type: &ast.InterfaceType{ + Methods: &ast.FieldList{ + List: methods, + }, + }, + }, + }, + }) + + return fileNode +} + +func addHeader(code []byte) []byte { + return utils.ConcatBytes([]byte(headerComment), code) +} diff --git a/core/gethwrappers/abigen_test.go b/core/gethwrappers/abigen_test.go new file mode 100644 index 00000000..1b3a5105 --- /dev/null +++ b/core/gethwrappers/abigen_test.go @@ -0,0 +1,29 @@ +package gethwrappers + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +// Test that the generated Deploy method fill all the required fields and returns the correct address. +// We perform this test using the generated LogEmitter wrapper. +func TestGeneratedDeployMethodAddressField(t *testing.T) { + owner := testutils.MustNewSimTransactor(t) + ec := backends.NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + emitterAddr, _, emitter, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + require.Equal(t, emitterAddr, emitter.Address()) +} diff --git a/core/gethwrappers/doc.go b/core/gethwrappers/doc.go new file mode 100644 index 00000000..292c148f --- /dev/null +++ b/core/gethwrappers/doc.go @@ -0,0 +1,4 @@ +// Package gethwrappers provides infrastructure for generating and verifying +// go-ethereum wrapper packages for smart contracts. See go_generate.go for more +// information. +package gethwrappers diff --git a/core/gethwrappers/functions/generated/functions/functions.go b/core/gethwrappers/functions/generated/functions/functions.go new file mode 100644 index 00000000..0c35806b --- /dev/null +++ b/core/gethwrappers/functions/generated/functions/functions.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FunctionsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"REQUEST_DATA_VERSION\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6063610038600b82828239805160001a607314602b57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe730000000000000000000000000000000000000000301460806040526004361060335760003560e01c80635d641dfc146038575b600080fd5b603f600181565b60405161ffff909116815260200160405180910390f3fea164736f6c6343000813000a", +} + +var FunctionsABI = FunctionsMetaData.ABI + +var FunctionsBin = FunctionsMetaData.Bin + +func DeployFunctions(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Functions, error) { + parsed, err := FunctionsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Functions{address: address, abi: *parsed, FunctionsCaller: FunctionsCaller{contract: contract}, FunctionsTransactor: FunctionsTransactor{contract: contract}, FunctionsFilterer: FunctionsFilterer{contract: contract}}, nil +} + +type Functions struct { + address common.Address + abi abi.ABI + FunctionsCaller + FunctionsTransactor + FunctionsFilterer +} + +type FunctionsCaller struct { + contract *bind.BoundContract +} + +type FunctionsTransactor struct { + contract *bind.BoundContract +} + +type FunctionsFilterer struct { + contract *bind.BoundContract +} + +type FunctionsSession struct { + Contract *Functions + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsCallerSession struct { + Contract *FunctionsCaller + CallOpts bind.CallOpts +} + +type FunctionsTransactorSession struct { + Contract *FunctionsTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsRaw struct { + Contract *Functions +} + +type FunctionsCallerRaw struct { + Contract *FunctionsCaller +} + +type FunctionsTransactorRaw struct { + Contract *FunctionsTransactor +} + +func NewFunctions(address common.Address, backend bind.ContractBackend) (*Functions, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctions(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Functions{address: address, abi: abi, FunctionsCaller: FunctionsCaller{contract: contract}, FunctionsTransactor: FunctionsTransactor{contract: contract}, FunctionsFilterer: FunctionsFilterer{contract: contract}}, nil +} + +func NewFunctionsCaller(address common.Address, caller bind.ContractCaller) (*FunctionsCaller, error) { + contract, err := bindFunctions(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsCaller{contract: contract}, nil +} + +func NewFunctionsTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsTransactor, error) { + contract, err := bindFunctions(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsTransactor{contract: contract}, nil +} + +func NewFunctionsFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsFilterer, error) { + contract, err := bindFunctions(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsFilterer{contract: contract}, nil +} + +func bindFunctions(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Functions *FunctionsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Functions.Contract.FunctionsCaller.contract.Call(opts, result, method, params...) +} + +func (_Functions *FunctionsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Functions.Contract.FunctionsTransactor.contract.Transfer(opts) +} + +func (_Functions *FunctionsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Functions.Contract.FunctionsTransactor.contract.Transact(opts, method, params...) +} + +func (_Functions *FunctionsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Functions.Contract.contract.Call(opts, result, method, params...) +} + +func (_Functions *FunctionsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Functions.Contract.contract.Transfer(opts) +} + +func (_Functions *FunctionsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Functions.Contract.contract.Transact(opts, method, params...) +} + +func (_Functions *FunctionsCaller) REQUESTDATAVERSION(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _Functions.contract.Call(opts, &out, "REQUEST_DATA_VERSION") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_Functions *FunctionsSession) REQUESTDATAVERSION() (uint16, error) { + return _Functions.Contract.REQUESTDATAVERSION(&_Functions.CallOpts) +} + +func (_Functions *FunctionsCallerSession) REQUESTDATAVERSION() (uint16, error) { + return _Functions.Contract.REQUESTDATAVERSION(&_Functions.CallOpts) +} + +func (_Functions *Functions) Address() common.Address { + return _Functions.address +} + +type FunctionsInterface interface { + REQUESTDATAVERSION(opts *bind.CallOpts) (uint16, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_allow_list/functions_allow_list.go b/core/gethwrappers/functions/generated/functions_allow_list/functions_allow_list.go new file mode 100644 index 00000000..d52fb074 --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_allow_list/functions_allow_list.go @@ -0,0 +1,1355 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_allow_list + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type TermsOfServiceAllowListConfig struct { + Enabled bool + SignerPublicKey common.Address +} + +var TermsOfServiceAllowListMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"enabled\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"signerPublicKey\",\"type\":\"address\"}],\"internalType\":\"structTermsOfServiceAllowListConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"initialAllowedSenders\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"initialBlockedSenders\",\"type\":\"address[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSignature\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidUsage\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RecipientIsBlocked\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"}],\"name\":\"AddedAccess\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"}],\"name\":\"BlockedAccess\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"enabled\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"signerPublicKey\",\"type\":\"address\"}],\"indexed\":false,\"internalType\":\"structTermsOfServiceAllowListConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"}],\"name\":\"UnblockedAccess\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"acceptor\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"r\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"s\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"v\",\"type\":\"uint8\"}],\"name\":\"acceptTermsOfService\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"blockSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllAllowedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllowedSendersCount\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"allowedSenderIdxStart\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"allowedSenderIdxEnd\",\"type\":\"uint64\"}],\"name\":\"getAllowedSendersInRange\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"allowedSenders\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBlockedSendersCount\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"blockedSenderIdxStart\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"blockedSenderIdxEnd\",\"type\":\"uint64\"}],\"name\":\"getBlockedSendersInRange\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"blockedSenders\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"enabled\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"signerPublicKey\",\"type\":\"address\"}],\"internalType\":\"structTermsOfServiceAllowListConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"acceptor\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"}],\"name\":\"getMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"hasAccess\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isBlockedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"unblockSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"enabled\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"signerPublicKey\",\"type\":\"address\"}],\"internalType\":\"structTermsOfServiceAllowListConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"updateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001a3038038062001a308339810160408190526200003491620004d9565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620001d4565b505050620000d2836200027f60201b60201c565b60005b8251811015620001255762000111838281518110620000f857620000f8620005a8565b602002602001015160026200030660201b90919060201c565b506200011d81620005be565b9050620000d5565b5060005b8151811015620001ca57620001658282815181106200014c576200014c620005a8565b602002602001015160026200032660201b90919060201c565b156200018457604051638129bbcd60e01b815260040160405180910390fd5b620001b68282815181106200019d576200019d620005a8565b602002602001015160046200030660201b90919060201c565b50620001c281620005be565b905062000129565b50505050620005e6565b336001600160a01b038216036200022e5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200028962000349565b805160068054602080850180516001600160a81b0319909316941515610100600160a81b03198116959095176101006001600160a01b039485160217909355604080519485529251909116908301527f0d22b8a99f411b3dd338c961284f608489ca0dab9cdad17366a343c361bcf80a910160405180910390a150565b60006200031d836001600160a01b038416620003a7565b90505b92915050565b6001600160a01b038116600090815260018301602052604081205415156200031d565b6000546001600160a01b03163314620003a55760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b6000818152600183016020526040812054620003f05750815460018181018455600084815260208082209093018490558454848252828601909352604090209190915562000320565b50600062000320565b634e487b7160e01b600052604160045260246000fd5b80516001600160a01b03811681146200042757600080fd5b919050565b600082601f8301126200043e57600080fd5b815160206001600160401b03808311156200045d576200045d620003f9565b8260051b604051601f19603f83011681018181108482111715620004855762000485620003f9565b604052938452858101830193838101925087851115620004a457600080fd5b83870191505b84821015620004ce57620004be826200040f565b83529183019190830190620004aa565b979650505050505050565b60008060008385036080811215620004f057600080fd5b6040811215620004ff57600080fd5b50604080519081016001600160401b038082118383101715620005265762000526620003f9565b816040528651915081151582146200053d57600080fd5b8183526200054e602088016200040f565b60208401526040870151929550808311156200056957600080fd5b62000577888489016200042c565b945060608701519250808311156200058e57600080fd5b50506200059e868287016200042c565b9150509250925092565b634e487b7160e01b600052603260045260246000fd5b600060018201620005df57634e487b7160e01b600052601160045260246000fd5b5060010190565b61143a80620005f66000396000f3fe608060405234801561001057600080fd5b506004361061011b5760003560e01c8063817ef62e116100b2578063a39b06e311610081578063c3f909d411610066578063c3f909d4146102c3578063cc7ebf4914610322578063f2fde38b1461032a57600080fd5b8063a39b06e314610237578063a5e1d61d146102b057600080fd5b8063817ef62e146101e157806382184c7b146101e957806389f9a2c4146101fc5780638da5cb5b1461020f57600080fd5b80633908c4d4116100ee5780633908c4d41461018e57806347663acb146101a35780636b14daf8146101b657806379ba5097146101d957600080fd5b806301a05958146101205780630a8c9c2414610146578063181f5a771461016657806320229a861461017b575b600080fd5b61012861033d565b60405167ffffffffffffffff90911681526020015b60405180910390f35b610159610154366004610fd6565b61034e565b60405161013d9190611009565b61016e6104bc565b60405161013d9190611063565b610159610189366004610fd6565b6104d8565b6101a161019c3660046110f3565b61063e565b005b6101a16101b1366004611154565b6108e9565b6101c96101c436600461116f565b61094a565b604051901515815260200161013d565b6101a1610974565b610159610a76565b6101a16101f7366004611154565b610a82565b6101a161020a366004611221565b610ae8565b60005460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161013d565b6102a26102453660046112aa565b6040517fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606084811b8216602084015283901b16603482015260009060480160405160208183030381529060405280519060200120905092915050565b60405190815260200161013d565b6101c96102be366004611154565b610ba3565b60408051808201825260008082526020918201528151808301835260065460ff8116151580835273ffffffffffffffffffffffffffffffffffffffff61010090920482169284019283528451908152915116918101919091520161013d565b610128610bc3565b6101a1610338366004611154565b610bcf565b60006103496004610be3565b905090565b60608167ffffffffffffffff168367ffffffffffffffff16118061038557506103776002610be3565b8267ffffffffffffffff1610155b8061039757506103956002610be3565b155b156103ce576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6103d88383611303565b6103e3906001611324565b67ffffffffffffffff1667ffffffffffffffff811115610405576104056111f2565b60405190808252806020026020018201604052801561042e578160200160208202803683370190505b50905060005b61043e8484611303565b67ffffffffffffffff1681116104b45761046d6104658267ffffffffffffffff8716611345565b600290610bed565b82828151811061047f5761047f611358565b73ffffffffffffffffffffffffffffffffffffffff909216602092830291909101909101526104ad81611387565b9050610434565b505b92915050565b6040518060600160405280602c8152602001611402602c913981565b60608167ffffffffffffffff168367ffffffffffffffff16118061050f57506105016004610be3565b8267ffffffffffffffff1610155b80610521575061051f6004610be3565b155b15610558576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105628383611303565b61056d906001611324565b67ffffffffffffffff1667ffffffffffffffff81111561058f5761058f6111f2565b6040519080825280602002602001820160405280156105b8578160200160208202803683370190505b50905060005b6105c88484611303565b67ffffffffffffffff1681116104b4576105f76105ef8267ffffffffffffffff8716611345565b600490610bed565b82828151811061060957610609611358565b73ffffffffffffffffffffffffffffffffffffffff9092166020928302919091019091015261063781611387565b90506105be565b610649600485610bf9565b15610680576040517f62b7a34d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051606087811b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000009081166020808501919091529188901b16603483015282516028818403018152604890920190925280519101206000906040517f19457468657265756d205369676e6564204d6573736167653a0a3332000000006020820152603c810191909152605c01604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815282825280516020918201206006546000855291840180845281905260ff8616928401929092526060830187905260808301869052909250610100900473ffffffffffffffffffffffffffffffffffffffff169060019060a0016020604051602081039080840390855afa1580156107b4573d6000803e3d6000fd5b5050506020604051035173ffffffffffffffffffffffffffffffffffffffff161461080b576040517f8baa579f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff861614158061085057503373ffffffffffffffffffffffffffffffffffffffff8716148015906108505750333b155b15610887576040517f381cfcbd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610892600286610c28565b156108e15760405173ffffffffffffffffffffffffffffffffffffffff861681527f87286ad1f399c8e82bf0c4ef4fcdc570ea2e1e92176e5c848b6413545b885db49060200160405180910390a15b505050505050565b6108f1610c4a565b6108fc600482610ccd565b5060405173ffffffffffffffffffffffffffffffffffffffff821681527f28bbd0761309a99e8fb5e5d02ada0b7b2db2e5357531ff5dbfc205c3f5b6592b906020015b60405180910390a150565b60065460009060ff1661095f5750600161096d565b61096a600285610bf9565b90505b9392505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146109fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60606103496002610cef565b610a8a610c4a565b610a95600282610ccd565b50610aa1600482610c28565b5060405173ffffffffffffffffffffffffffffffffffffffff821681527f337cd0f3f594112b6d830afb510072d3b08556b446514f73b8109162fd1151e19060200161093f565b610af0610c4a565b805160068054602080850180517fffffffffffffffffffffff0000000000000000000000000000000000000000009093169415157fffffffffffffffffffffff0000000000000000000000000000000000000000ff81169590951761010073ffffffffffffffffffffffffffffffffffffffff9485160217909355604080519485529251909116908301527f0d22b8a99f411b3dd338c961284f608489ca0dab9cdad17366a343c361bcf80a910161093f565b60065460009060ff16610bb857506000919050565b6104b6600483610bf9565b60006103496002610be3565b610bd7610c4a565b610be081610cfc565b50565b60006104b6825490565b600061096d8383610df1565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600183016020526040812054151561096d565b600061096d8373ffffffffffffffffffffffffffffffffffffffff8416610e1b565b60005473ffffffffffffffffffffffffffffffffffffffff163314610ccb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016109f1565b565b600061096d8373ffffffffffffffffffffffffffffffffffffffff8416610e6a565b6060600061096d83610f5d565b3373ffffffffffffffffffffffffffffffffffffffff821603610d7b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016109f1565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000826000018281548110610e0857610e08611358565b9060005260206000200154905092915050565b6000818152600183016020526040812054610e62575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556104b6565b5060006104b6565b60008181526001830160205260408120548015610f53576000610e8e6001836113bf565b8554909150600090610ea2906001906113bf565b9050818114610f07576000866000018281548110610ec257610ec2611358565b9060005260206000200154905080876000018481548110610ee557610ee5611358565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080610f1857610f186113d2565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506104b6565b60009150506104b6565b606081600001805480602002602001604051908101604052809291908181526020018280548015610fad57602002820191906000526020600020905b815481526020019060010190808311610f99575b50505050509050919050565b803567ffffffffffffffff81168114610fd157600080fd5b919050565b60008060408385031215610fe957600080fd5b610ff283610fb9565b915061100060208401610fb9565b90509250929050565b6020808252825182820181905260009190848201906040850190845b8181101561105757835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101611025565b50909695505050505050565b600060208083528351808285015260005b8181101561109057858101830151858201604001528201611074565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610fd157600080fd5b600080600080600060a0868803121561110b57600080fd5b611114866110cf565b9450611122602087016110cf565b93506040860135925060608601359150608086013560ff8116811461114657600080fd5b809150509295509295909350565b60006020828403121561116657600080fd5b61096d826110cf565b60008060006040848603121561118457600080fd5b61118d846110cf565b9250602084013567ffffffffffffffff808211156111aa57600080fd5b818601915086601f8301126111be57600080fd5b8135818111156111cd57600080fd5b8760208285010111156111df57600080fd5b6020830194508093505050509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60006040828403121561123357600080fd5b6040516040810181811067ffffffffffffffff8211171561127d577f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040528235801515811461129057600080fd5b815261129e602084016110cf565b60208201529392505050565b600080604083850312156112bd57600080fd5b6112c6836110cf565b9150611000602084016110cf565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b67ffffffffffffffff8281168282160390808211156104b4576104b46112d4565b67ffffffffffffffff8181168382160190808211156104b4576104b46112d4565b808201808211156104b6576104b66112d4565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036113b8576113b86112d4565b5060010190565b818103818111156104b6576104b66112d4565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfe46756e6374696f6e73205465726d73206f66205365727669636520416c6c6f77204c6973742076312e312e30a164736f6c6343000813000a", +} + +var TermsOfServiceAllowListABI = TermsOfServiceAllowListMetaData.ABI + +var TermsOfServiceAllowListBin = TermsOfServiceAllowListMetaData.Bin + +func DeployTermsOfServiceAllowList(auth *bind.TransactOpts, backend bind.ContractBackend, config TermsOfServiceAllowListConfig, initialAllowedSenders []common.Address, initialBlockedSenders []common.Address) (common.Address, *types.Transaction, *TermsOfServiceAllowList, error) { + parsed, err := TermsOfServiceAllowListMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(TermsOfServiceAllowListBin), backend, config, initialAllowedSenders, initialBlockedSenders) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &TermsOfServiceAllowList{address: address, abi: *parsed, TermsOfServiceAllowListCaller: TermsOfServiceAllowListCaller{contract: contract}, TermsOfServiceAllowListTransactor: TermsOfServiceAllowListTransactor{contract: contract}, TermsOfServiceAllowListFilterer: TermsOfServiceAllowListFilterer{contract: contract}}, nil +} + +type TermsOfServiceAllowList struct { + address common.Address + abi abi.ABI + TermsOfServiceAllowListCaller + TermsOfServiceAllowListTransactor + TermsOfServiceAllowListFilterer +} + +type TermsOfServiceAllowListCaller struct { + contract *bind.BoundContract +} + +type TermsOfServiceAllowListTransactor struct { + contract *bind.BoundContract +} + +type TermsOfServiceAllowListFilterer struct { + contract *bind.BoundContract +} + +type TermsOfServiceAllowListSession struct { + Contract *TermsOfServiceAllowList + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type TermsOfServiceAllowListCallerSession struct { + Contract *TermsOfServiceAllowListCaller + CallOpts bind.CallOpts +} + +type TermsOfServiceAllowListTransactorSession struct { + Contract *TermsOfServiceAllowListTransactor + TransactOpts bind.TransactOpts +} + +type TermsOfServiceAllowListRaw struct { + Contract *TermsOfServiceAllowList +} + +type TermsOfServiceAllowListCallerRaw struct { + Contract *TermsOfServiceAllowListCaller +} + +type TermsOfServiceAllowListTransactorRaw struct { + Contract *TermsOfServiceAllowListTransactor +} + +func NewTermsOfServiceAllowList(address common.Address, backend bind.ContractBackend) (*TermsOfServiceAllowList, error) { + abi, err := abi.JSON(strings.NewReader(TermsOfServiceAllowListABI)) + if err != nil { + return nil, err + } + contract, err := bindTermsOfServiceAllowList(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowList{address: address, abi: abi, TermsOfServiceAllowListCaller: TermsOfServiceAllowListCaller{contract: contract}, TermsOfServiceAllowListTransactor: TermsOfServiceAllowListTransactor{contract: contract}, TermsOfServiceAllowListFilterer: TermsOfServiceAllowListFilterer{contract: contract}}, nil +} + +func NewTermsOfServiceAllowListCaller(address common.Address, caller bind.ContractCaller) (*TermsOfServiceAllowListCaller, error) { + contract, err := bindTermsOfServiceAllowList(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListCaller{contract: contract}, nil +} + +func NewTermsOfServiceAllowListTransactor(address common.Address, transactor bind.ContractTransactor) (*TermsOfServiceAllowListTransactor, error) { + contract, err := bindTermsOfServiceAllowList(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListTransactor{contract: contract}, nil +} + +func NewTermsOfServiceAllowListFilterer(address common.Address, filterer bind.ContractFilterer) (*TermsOfServiceAllowListFilterer, error) { + contract, err := bindTermsOfServiceAllowList(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListFilterer{contract: contract}, nil +} + +func bindTermsOfServiceAllowList(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := TermsOfServiceAllowListMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TermsOfServiceAllowList.Contract.TermsOfServiceAllowListCaller.contract.Call(opts, result, method, params...) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.TermsOfServiceAllowListTransactor.contract.Transfer(opts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.TermsOfServiceAllowListTransactor.contract.Transact(opts, method, params...) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TermsOfServiceAllowList.Contract.contract.Call(opts, result, method, params...) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.contract.Transfer(opts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.contract.Transact(opts, method, params...) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetAllAllowedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getAllAllowedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetAllAllowedSenders() ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetAllAllowedSenders(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetAllAllowedSenders() ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetAllAllowedSenders(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetAllowedSendersCount(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getAllowedSendersCount") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetAllowedSendersCount() (uint64, error) { + return _TermsOfServiceAllowList.Contract.GetAllowedSendersCount(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetAllowedSendersCount() (uint64, error) { + return _TermsOfServiceAllowList.Contract.GetAllowedSendersCount(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetAllowedSendersInRange(opts *bind.CallOpts, allowedSenderIdxStart uint64, allowedSenderIdxEnd uint64) ([]common.Address, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getAllowedSendersInRange", allowedSenderIdxStart, allowedSenderIdxEnd) + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetAllowedSendersInRange(allowedSenderIdxStart uint64, allowedSenderIdxEnd uint64) ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetAllowedSendersInRange(&_TermsOfServiceAllowList.CallOpts, allowedSenderIdxStart, allowedSenderIdxEnd) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetAllowedSendersInRange(allowedSenderIdxStart uint64, allowedSenderIdxEnd uint64) ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetAllowedSendersInRange(&_TermsOfServiceAllowList.CallOpts, allowedSenderIdxStart, allowedSenderIdxEnd) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetBlockedSendersCount(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getBlockedSendersCount") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetBlockedSendersCount() (uint64, error) { + return _TermsOfServiceAllowList.Contract.GetBlockedSendersCount(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetBlockedSendersCount() (uint64, error) { + return _TermsOfServiceAllowList.Contract.GetBlockedSendersCount(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetBlockedSendersInRange(opts *bind.CallOpts, blockedSenderIdxStart uint64, blockedSenderIdxEnd uint64) ([]common.Address, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getBlockedSendersInRange", blockedSenderIdxStart, blockedSenderIdxEnd) + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetBlockedSendersInRange(blockedSenderIdxStart uint64, blockedSenderIdxEnd uint64) ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetBlockedSendersInRange(&_TermsOfServiceAllowList.CallOpts, blockedSenderIdxStart, blockedSenderIdxEnd) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetBlockedSendersInRange(blockedSenderIdxStart uint64, blockedSenderIdxEnd uint64) ([]common.Address, error) { + return _TermsOfServiceAllowList.Contract.GetBlockedSendersInRange(&_TermsOfServiceAllowList.CallOpts, blockedSenderIdxStart, blockedSenderIdxEnd) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetConfig(opts *bind.CallOpts) (TermsOfServiceAllowListConfig, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getConfig") + + if err != nil { + return *new(TermsOfServiceAllowListConfig), err + } + + out0 := *abi.ConvertType(out[0], new(TermsOfServiceAllowListConfig)).(*TermsOfServiceAllowListConfig) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetConfig() (TermsOfServiceAllowListConfig, error) { + return _TermsOfServiceAllowList.Contract.GetConfig(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetConfig() (TermsOfServiceAllowListConfig, error) { + return _TermsOfServiceAllowList.Contract.GetConfig(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) GetMessage(opts *bind.CallOpts, acceptor common.Address, recipient common.Address) ([32]byte, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "getMessage", acceptor, recipient) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) GetMessage(acceptor common.Address, recipient common.Address) ([32]byte, error) { + return _TermsOfServiceAllowList.Contract.GetMessage(&_TermsOfServiceAllowList.CallOpts, acceptor, recipient) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) GetMessage(acceptor common.Address, recipient common.Address) ([32]byte, error) { + return _TermsOfServiceAllowList.Contract.GetMessage(&_TermsOfServiceAllowList.CallOpts, acceptor, recipient) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) HasAccess(opts *bind.CallOpts, user common.Address, arg1 []byte) (bool, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "hasAccess", user, arg1) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) HasAccess(user common.Address, arg1 []byte) (bool, error) { + return _TermsOfServiceAllowList.Contract.HasAccess(&_TermsOfServiceAllowList.CallOpts, user, arg1) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) HasAccess(user common.Address, arg1 []byte) (bool, error) { + return _TermsOfServiceAllowList.Contract.HasAccess(&_TermsOfServiceAllowList.CallOpts, user, arg1) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) IsBlockedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "isBlockedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) IsBlockedSender(sender common.Address) (bool, error) { + return _TermsOfServiceAllowList.Contract.IsBlockedSender(&_TermsOfServiceAllowList.CallOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) IsBlockedSender(sender common.Address) (bool, error) { + return _TermsOfServiceAllowList.Contract.IsBlockedSender(&_TermsOfServiceAllowList.CallOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) Owner() (common.Address, error) { + return _TermsOfServiceAllowList.Contract.Owner(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) Owner() (common.Address, error) { + return _TermsOfServiceAllowList.Contract.Owner(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _TermsOfServiceAllowList.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) TypeAndVersion() (string, error) { + return _TermsOfServiceAllowList.Contract.TypeAndVersion(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListCallerSession) TypeAndVersion() (string, error) { + return _TermsOfServiceAllowList.Contract.TypeAndVersion(&_TermsOfServiceAllowList.CallOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "acceptOwnership") +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) AcceptOwnership() (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.AcceptOwnership(&_TermsOfServiceAllowList.TransactOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.AcceptOwnership(&_TermsOfServiceAllowList.TransactOpts) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) AcceptTermsOfService(opts *bind.TransactOpts, acceptor common.Address, recipient common.Address, r [32]byte, s [32]byte, v uint8) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "acceptTermsOfService", acceptor, recipient, r, s, v) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) AcceptTermsOfService(acceptor common.Address, recipient common.Address, r [32]byte, s [32]byte, v uint8) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.AcceptTermsOfService(&_TermsOfServiceAllowList.TransactOpts, acceptor, recipient, r, s, v) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) AcceptTermsOfService(acceptor common.Address, recipient common.Address, r [32]byte, s [32]byte, v uint8) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.AcceptTermsOfService(&_TermsOfServiceAllowList.TransactOpts, acceptor, recipient, r, s, v) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) BlockSender(opts *bind.TransactOpts, sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "blockSender", sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) BlockSender(sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.BlockSender(&_TermsOfServiceAllowList.TransactOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) BlockSender(sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.BlockSender(&_TermsOfServiceAllowList.TransactOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "transferOwnership", to) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.TransferOwnership(&_TermsOfServiceAllowList.TransactOpts, to) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.TransferOwnership(&_TermsOfServiceAllowList.TransactOpts, to) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) UnblockSender(opts *bind.TransactOpts, sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "unblockSender", sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) UnblockSender(sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.UnblockSender(&_TermsOfServiceAllowList.TransactOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) UnblockSender(sender common.Address) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.UnblockSender(&_TermsOfServiceAllowList.TransactOpts, sender) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactor) UpdateConfig(opts *bind.TransactOpts, config TermsOfServiceAllowListConfig) (*types.Transaction, error) { + return _TermsOfServiceAllowList.contract.Transact(opts, "updateConfig", config) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListSession) UpdateConfig(config TermsOfServiceAllowListConfig) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.UpdateConfig(&_TermsOfServiceAllowList.TransactOpts, config) +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListTransactorSession) UpdateConfig(config TermsOfServiceAllowListConfig) (*types.Transaction, error) { + return _TermsOfServiceAllowList.Contract.UpdateConfig(&_TermsOfServiceAllowList.TransactOpts, config) +} + +type TermsOfServiceAllowListAddedAccessIterator struct { + Event *TermsOfServiceAllowListAddedAccess + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListAddedAccessIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListAddedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListAddedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListAddedAccessIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListAddedAccessIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListAddedAccess struct { + User common.Address + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterAddedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListAddedAccessIterator, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "AddedAccess") + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListAddedAccessIterator{contract: _TermsOfServiceAllowList.contract, event: "AddedAccess", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchAddedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListAddedAccess) (event.Subscription, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "AddedAccess") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListAddedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "AddedAccess", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseAddedAccess(log types.Log) (*TermsOfServiceAllowListAddedAccess, error) { + event := new(TermsOfServiceAllowListAddedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "AddedAccess", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TermsOfServiceAllowListBlockedAccessIterator struct { + Event *TermsOfServiceAllowListBlockedAccess + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListBlockedAccessIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListBlockedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListBlockedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListBlockedAccessIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListBlockedAccessIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListBlockedAccess struct { + User common.Address + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterBlockedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListBlockedAccessIterator, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "BlockedAccess") + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListBlockedAccessIterator{contract: _TermsOfServiceAllowList.contract, event: "BlockedAccess", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchBlockedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListBlockedAccess) (event.Subscription, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "BlockedAccess") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListBlockedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "BlockedAccess", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseBlockedAccess(log types.Log) (*TermsOfServiceAllowListBlockedAccess, error) { + event := new(TermsOfServiceAllowListBlockedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "BlockedAccess", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TermsOfServiceAllowListConfigUpdatedIterator struct { + Event *TermsOfServiceAllowListConfigUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListConfigUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListConfigUpdatedIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListConfigUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListConfigUpdated struct { + Config TermsOfServiceAllowListConfig + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterConfigUpdated(opts *bind.FilterOpts) (*TermsOfServiceAllowListConfigUpdatedIterator, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListConfigUpdatedIterator{contract: _TermsOfServiceAllowList.contract, event: "ConfigUpdated", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListConfigUpdated) (event.Subscription, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListConfigUpdated) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseConfigUpdated(log types.Log) (*TermsOfServiceAllowListConfigUpdated, error) { + event := new(TermsOfServiceAllowListConfigUpdated) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TermsOfServiceAllowListOwnershipTransferRequestedIterator struct { + Event *TermsOfServiceAllowListOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TermsOfServiceAllowListOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListOwnershipTransferRequestedIterator{contract: _TermsOfServiceAllowList.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListOwnershipTransferRequested) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseOwnershipTransferRequested(log types.Log) (*TermsOfServiceAllowListOwnershipTransferRequested, error) { + event := new(TermsOfServiceAllowListOwnershipTransferRequested) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TermsOfServiceAllowListOwnershipTransferredIterator struct { + Event *TermsOfServiceAllowListOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TermsOfServiceAllowListOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListOwnershipTransferredIterator{contract: _TermsOfServiceAllowList.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListOwnershipTransferred) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseOwnershipTransferred(log types.Log) (*TermsOfServiceAllowListOwnershipTransferred, error) { + event := new(TermsOfServiceAllowListOwnershipTransferred) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TermsOfServiceAllowListUnblockedAccessIterator struct { + Event *TermsOfServiceAllowListUnblockedAccess + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TermsOfServiceAllowListUnblockedAccessIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListUnblockedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TermsOfServiceAllowListUnblockedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TermsOfServiceAllowListUnblockedAccessIterator) Error() error { + return it.fail +} + +func (it *TermsOfServiceAllowListUnblockedAccessIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TermsOfServiceAllowListUnblockedAccess struct { + User common.Address + Raw types.Log +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) FilterUnblockedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListUnblockedAccessIterator, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.FilterLogs(opts, "UnblockedAccess") + if err != nil { + return nil, err + } + return &TermsOfServiceAllowListUnblockedAccessIterator{contract: _TermsOfServiceAllowList.contract, event: "UnblockedAccess", logs: logs, sub: sub}, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) WatchUnblockedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListUnblockedAccess) (event.Subscription, error) { + + logs, sub, err := _TermsOfServiceAllowList.contract.WatchLogs(opts, "UnblockedAccess") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TermsOfServiceAllowListUnblockedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "UnblockedAccess", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowListFilterer) ParseUnblockedAccess(log types.Log) (*TermsOfServiceAllowListUnblockedAccess, error) { + event := new(TermsOfServiceAllowListUnblockedAccess) + if err := _TermsOfServiceAllowList.contract.UnpackLog(event, "UnblockedAccess", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowList) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _TermsOfServiceAllowList.abi.Events["AddedAccess"].ID: + return _TermsOfServiceAllowList.ParseAddedAccess(log) + case _TermsOfServiceAllowList.abi.Events["BlockedAccess"].ID: + return _TermsOfServiceAllowList.ParseBlockedAccess(log) + case _TermsOfServiceAllowList.abi.Events["ConfigUpdated"].ID: + return _TermsOfServiceAllowList.ParseConfigUpdated(log) + case _TermsOfServiceAllowList.abi.Events["OwnershipTransferRequested"].ID: + return _TermsOfServiceAllowList.ParseOwnershipTransferRequested(log) + case _TermsOfServiceAllowList.abi.Events["OwnershipTransferred"].ID: + return _TermsOfServiceAllowList.ParseOwnershipTransferred(log) + case _TermsOfServiceAllowList.abi.Events["UnblockedAccess"].ID: + return _TermsOfServiceAllowList.ParseUnblockedAccess(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (TermsOfServiceAllowListAddedAccess) Topic() common.Hash { + return common.HexToHash("0x87286ad1f399c8e82bf0c4ef4fcdc570ea2e1e92176e5c848b6413545b885db4") +} + +func (TermsOfServiceAllowListBlockedAccess) Topic() common.Hash { + return common.HexToHash("0x337cd0f3f594112b6d830afb510072d3b08556b446514f73b8109162fd1151e1") +} + +func (TermsOfServiceAllowListConfigUpdated) Topic() common.Hash { + return common.HexToHash("0x0d22b8a99f411b3dd338c961284f608489ca0dab9cdad17366a343c361bcf80a") +} + +func (TermsOfServiceAllowListOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (TermsOfServiceAllowListOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (TermsOfServiceAllowListUnblockedAccess) Topic() common.Hash { + return common.HexToHash("0x28bbd0761309a99e8fb5e5d02ada0b7b2db2e5357531ff5dbfc205c3f5b6592b") +} + +func (_TermsOfServiceAllowList *TermsOfServiceAllowList) Address() common.Address { + return _TermsOfServiceAllowList.address +} + +type TermsOfServiceAllowListInterface interface { + GetAllAllowedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetAllowedSendersCount(opts *bind.CallOpts) (uint64, error) + + GetAllowedSendersInRange(opts *bind.CallOpts, allowedSenderIdxStart uint64, allowedSenderIdxEnd uint64) ([]common.Address, error) + + GetBlockedSendersCount(opts *bind.CallOpts) (uint64, error) + + GetBlockedSendersInRange(opts *bind.CallOpts, blockedSenderIdxStart uint64, blockedSenderIdxEnd uint64) ([]common.Address, error) + + GetConfig(opts *bind.CallOpts) (TermsOfServiceAllowListConfig, error) + + GetMessage(opts *bind.CallOpts, acceptor common.Address, recipient common.Address) ([32]byte, error) + + HasAccess(opts *bind.CallOpts, user common.Address, arg1 []byte) (bool, error) + + IsBlockedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptTermsOfService(opts *bind.TransactOpts, acceptor common.Address, recipient common.Address, r [32]byte, s [32]byte, v uint8) (*types.Transaction, error) + + BlockSender(opts *bind.TransactOpts, sender common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UnblockSender(opts *bind.TransactOpts, sender common.Address) (*types.Transaction, error) + + UpdateConfig(opts *bind.TransactOpts, config TermsOfServiceAllowListConfig) (*types.Transaction, error) + + FilterAddedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListAddedAccessIterator, error) + + WatchAddedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListAddedAccess) (event.Subscription, error) + + ParseAddedAccess(log types.Log) (*TermsOfServiceAllowListAddedAccess, error) + + FilterBlockedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListBlockedAccessIterator, error) + + WatchBlockedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListBlockedAccess) (event.Subscription, error) + + ParseBlockedAccess(log types.Log) (*TermsOfServiceAllowListBlockedAccess, error) + + FilterConfigUpdated(opts *bind.FilterOpts) (*TermsOfServiceAllowListConfigUpdatedIterator, error) + + WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListConfigUpdated) (event.Subscription, error) + + ParseConfigUpdated(log types.Log) (*TermsOfServiceAllowListConfigUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TermsOfServiceAllowListOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*TermsOfServiceAllowListOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TermsOfServiceAllowListOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*TermsOfServiceAllowListOwnershipTransferred, error) + + FilterUnblockedAccess(opts *bind.FilterOpts) (*TermsOfServiceAllowListUnblockedAccessIterator, error) + + WatchUnblockedAccess(opts *bind.WatchOpts, sink chan<- *TermsOfServiceAllowListUnblockedAccess) (event.Subscription, error) + + ParseUnblockedAccess(log types.Log) (*TermsOfServiceAllowListUnblockedAccess, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_client/functions_client.go b/core/gethwrappers/functions/generated/functions_client/functions_client.go new file mode 100644 index 00000000..f37ad18b --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_client/functions_client.go @@ -0,0 +1,463 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_client + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FunctionsClientMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"OnlyRouterCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var FunctionsClientABI = FunctionsClientMetaData.ABI + +type FunctionsClient struct { + address common.Address + abi abi.ABI + FunctionsClientCaller + FunctionsClientTransactor + FunctionsClientFilterer +} + +type FunctionsClientCaller struct { + contract *bind.BoundContract +} + +type FunctionsClientTransactor struct { + contract *bind.BoundContract +} + +type FunctionsClientFilterer struct { + contract *bind.BoundContract +} + +type FunctionsClientSession struct { + Contract *FunctionsClient + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsClientCallerSession struct { + Contract *FunctionsClientCaller + CallOpts bind.CallOpts +} + +type FunctionsClientTransactorSession struct { + Contract *FunctionsClientTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsClientRaw struct { + Contract *FunctionsClient +} + +type FunctionsClientCallerRaw struct { + Contract *FunctionsClientCaller +} + +type FunctionsClientTransactorRaw struct { + Contract *FunctionsClientTransactor +} + +func NewFunctionsClient(address common.Address, backend bind.ContractBackend) (*FunctionsClient, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsClientABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsClient(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsClient{address: address, abi: abi, FunctionsClientCaller: FunctionsClientCaller{contract: contract}, FunctionsClientTransactor: FunctionsClientTransactor{contract: contract}, FunctionsClientFilterer: FunctionsClientFilterer{contract: contract}}, nil +} + +func NewFunctionsClientCaller(address common.Address, caller bind.ContractCaller) (*FunctionsClientCaller, error) { + contract, err := bindFunctionsClient(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsClientCaller{contract: contract}, nil +} + +func NewFunctionsClientTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsClientTransactor, error) { + contract, err := bindFunctionsClient(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsClientTransactor{contract: contract}, nil +} + +func NewFunctionsClientFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsClientFilterer, error) { + contract, err := bindFunctionsClient(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsClientFilterer{contract: contract}, nil +} + +func bindFunctionsClient(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsClientMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsClient *FunctionsClientRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsClient.Contract.FunctionsClientCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsClient *FunctionsClientRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsClient.Contract.FunctionsClientTransactor.contract.Transfer(opts) +} + +func (_FunctionsClient *FunctionsClientRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsClient.Contract.FunctionsClientTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsClient *FunctionsClientCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsClient.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsClient *FunctionsClientTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsClient.Contract.contract.Transfer(opts) +} + +func (_FunctionsClient *FunctionsClientTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsClient.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsClient *FunctionsClientTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClient.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_FunctionsClient *FunctionsClientSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClient.Contract.HandleOracleFulfillment(&_FunctionsClient.TransactOpts, requestId, response, err) +} + +func (_FunctionsClient *FunctionsClientTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClient.Contract.HandleOracleFulfillment(&_FunctionsClient.TransactOpts, requestId, response, err) +} + +type FunctionsClientRequestFulfilledIterator struct { + Event *FunctionsClientRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsClient *FunctionsClientFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClient.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &FunctionsClientRequestFulfilledIterator{contract: _FunctionsClient.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_FunctionsClient *FunctionsClientFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsClientRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClient.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientRequestFulfilled) + if err := _FunctionsClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClient *FunctionsClientFilterer) ParseRequestFulfilled(log types.Log) (*FunctionsClientRequestFulfilled, error) { + event := new(FunctionsClientRequestFulfilled) + if err := _FunctionsClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsClientRequestSentIterator struct { + Event *FunctionsClientRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientRequestSentIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsClient *FunctionsClientFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClient.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &FunctionsClientRequestSentIterator{contract: _FunctionsClient.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_FunctionsClient *FunctionsClientFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsClientRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClient.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientRequestSent) + if err := _FunctionsClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClient *FunctionsClientFilterer) ParseRequestSent(log types.Log) (*FunctionsClientRequestSent, error) { + event := new(FunctionsClientRequestSent) + if err := _FunctionsClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsClient *FunctionsClient) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsClient.abi.Events["RequestFulfilled"].ID: + return _FunctionsClient.ParseRequestFulfilled(log) + case _FunctionsClient.abi.Events["RequestSent"].ID: + return _FunctionsClient.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsClientRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (FunctionsClientRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_FunctionsClient *FunctionsClient) Address() common.Address { + return _FunctionsClient.address +} + +type FunctionsClientInterface interface { + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsClientRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*FunctionsClientRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsClientRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*FunctionsClientRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_client_example/functions_client_example.go b/core/gethwrappers/functions/generated/functions_client_example/functions_client_example.go new file mode 100644 index 00000000..2afd2b9a --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_client_example/functions_client_example.go @@ -0,0 +1,988 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_client_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FunctionsClientExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"router\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyRouterCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"UnexpectedRequestID\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CALLBACK_GAS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastError\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastErrorLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastResponse\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastResponseLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedSecretsReferences\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"jobId\",\"type\":\"bytes32\"}],\"name\":\"sendRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162001a4838038062001a48833981016040819052620000349162000180565b6001600160a01b0381166080523380600081620000985760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000cb57620000cb81620000d5565b50505050620001b2565b336001600160a01b038216036200012f5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200008f565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156200019357600080fd5b81516001600160a01b0381168114620001ab57600080fd5b9392505050565b608051611873620001d5600039600081816101c601526109f301526118736000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80636d9809a011610081578063b1e217491161005b578063b1e2174914610182578063f2fde38b1461018b578063f7b4c06f1461019e57600080fd5b80636d9809a01461014857806379ba5097146101525780638da5cb5b1461015a57600080fd5b806342748b2a116100b257806342748b2a146100ff5780634b0795a81461012c5780635fa353e71461013557600080fd5b80630ca76175146100ce5780633944ea3a146100e3575b600080fd5b6100e16100dc3660046112bf565b6101ae565b005b6100ec60035481565b6040519081526020015b60405180910390f35b60055461011790640100000000900463ffffffff1681565b60405163ffffffff90911681526020016100f6565b6100ec60045481565b6100e1610143366004611392565b610258565b6101176201117081565b6100e161036a565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100f6565b6100ec60025481565b6100e1610199366004611476565b61046c565b6005546101179063ffffffff1681565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461021d576040517fc6829f8300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610228838383610480565b60405183907f85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e690600090a2505050565b61026061054e565b6102a16040805160e0810190915280600081526020016000815260200160008152602001606081526020016060815260200160608152602001606081525090565b6102e389898080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525085939250506105d19050565b851561032b5761032b87878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525085939250506105e29050565b83156103455761034561033e85876114ac565b829061062c565b61035c6103518261066f565b8462011170856109ee565b600255505050505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146103f0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61047461054e565b61047d81610acd565b50565b82600254146104be576040517fd068bf5b000000000000000000000000000000000000000000000000000000008152600481018490526024016103e7565b6104c782610bc2565b6003558151600580547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff90921691909117905561050981610bc2565b600455516005805463ffffffff909216640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff9092169190911790555050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146105cf576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103e7565b565b6105de8260008084610c44565b5050565b805160000361061d576040517fe889636f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60016020830152608090910152565b8051600003610667576040517ffe936cb700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a090910152565b6060600061067e610100610cdb565b90506106c86040518060400160405280600c81526020017f636f64654c6f636174696f6e000000000000000000000000000000000000000081525082610cfc90919063ffffffff16565b82516106e69060028111156106df576106df611544565b8290610d1a565b60408051808201909152600881527f6c616e67756167650000000000000000000000000000000000000000000000006020820152610725908290610cfc565b604083015161073c9080156106df576106df611544565b60408051808201909152600681527f736f757263650000000000000000000000000000000000000000000000000000602082015261077b908290610cfc565b606083015161078b908290610cfc565b60a083015151156108385760408051808201909152600481527f617267730000000000000000000000000000000000000000000000000000000060208201526107d5908290610cfc565b6107de81610d53565b60005b8360a001515181101561082e5761081e8460a00151828151811061080757610807611573565b602002602001015183610cfc90919063ffffffff16565b610827816115d1565b90506107e1565b5061083881610d77565b608083015151156109395760008360200151600281111561085b5761085b611544565b03610892576040517fa80d31f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051808201909152600f81527f736563726574734c6f636174696f6e000000000000000000000000000000000060208201526108d1908290610cfc565b6108ea836020015160028111156106df576106df611544565b60408051808201909152600781527f73656372657473000000000000000000000000000000000000000000000000006020820152610929908290610cfc565b6080830151610939908290610d95565b60c083015151156109e65760408051808201909152600981527f62797465734172677300000000000000000000000000000000000000000000006020820152610983908290610cfc565b61098c81610d53565b60005b8360c00151518110156109dc576109cc8460c0015182815181106109b5576109b5611573565b602002602001015183610d9590919063ffffffff16565b6109d5816115d1565b905061098f565b506109e681610d77565b515192915050565b6000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663461d27628688600188886040518663ffffffff1660e01b8152600401610a53959493929190611609565b6020604051808303816000875af1158015610a72573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a9691906116a9565b60405190915081907f1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db890600090a295945050505050565b3373ffffffffffffffffffffffffffffffffffffffff821603610b4c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103e7565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60008060209050602083511015610bd7575081515b60005b81811015610c3d57610bed8160086116c2565b848281518110610bff57610bff611573565b01602001517fff0000000000000000000000000000000000000000000000000000000000000016901c9290921791610c36816115d1565b9050610bda565b5050919050565b8051600003610c7f576040517f22ce3edd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83836002811115610c9257610c92611544565b90816002811115610ca557610ca5611544565b90525060408401828015610cbb57610cbb611544565b90818015610ccb57610ccb611544565b9052506060909301929092525050565b610ce3611176565b8051610cef9083610da2565b5060006020820152919050565b610d098260038351610e1c565b8151610d159082610f43565b505050565b8151610d279060c2610f6b565b506105de8282604051602001610d3f91815260200190565b604051602081830303815290604052610d95565b610d5e816004610fd4565b600181602001818151610d7191906116d9565b90525050565b610d82816007610fd4565b600181602001818151610d7191906116ec565b610d098260028351610e1c565b604080518082019091526060815260006020820152610dc26020836116ff565b15610dea57610dd26020836116ff565b610ddd9060206116ec565b610de790836116d9565b91505b602080840183905260405180855260008152908184010181811015610e0e57600080fd5b604052508290505b92915050565b60178167ffffffffffffffff1611610e49578251610e439060e0600585901b168317610f6b565b50505050565b60ff8167ffffffffffffffff1611610e8b578251610e72906018611fe0600586901b1617610f6b565b508251610e439067ffffffffffffffff83166001610feb565b61ffff8167ffffffffffffffff1611610ece578251610eb5906019611fe0600586901b1617610f6b565b508251610e439067ffffffffffffffff83166002610feb565b63ffffffff8167ffffffffffffffff1611610f13578251610efa90601a611fe0600586901b1617610f6b565b508251610e439067ffffffffffffffff83166004610feb565b8251610f2a90601b611fe0600586901b1617610f6b565b508251610e439067ffffffffffffffff83166008610feb565b604080518082019091526060815260006020820152610f6483838451611070565b9392505050565b6040805180820190915260608152600060208201528251516000610f908260016116d9565b905084602001518210610fb157610fb185610fac8360026116c2565b61115f565b8451602083820101858153508051821115610fca578181525b5093949350505050565b8151610d1590601f611fe0600585901b1617610f6b565b604080518082019091526060815260006020820152835151600061100f82856116d9565b9050856020015181111561102c5761102c86610fac8360026116c2565b6000600161103c8661010061185a565b61104691906116ec565b90508651828101878319825116178152508051831115611064578281525b50959695505050505050565b604080518082019091526060815260006020820152825182111561109357600080fd5b83515160006110a284836116d9565b905085602001518111156110bf576110bf86610fac8360026116c2565b8551805183820160200191600091808511156110d9578482525b505050602086015b6020861061111957805182526110f86020836116d9565b91506111056020826116d9565b90506111126020876116ec565b95506110e1565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208890036101000a0190811690199190911617905250849150509392505050565b815161116b8383610da2565b50610e438382610f43565b604051806040016040528061119e604051806040016040528060608152602001600081525090565b8152602001600081525090565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611221576112216111ab565b604052919050565b600067ffffffffffffffff831115611243576112436111ab565b61127460207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f860116016111da565b905082815283838301111561128857600080fd5b828260208301376000602084830101529392505050565b600082601f8301126112b057600080fd5b610f6483833560208501611229565b6000806000606084860312156112d457600080fd5b83359250602084013567ffffffffffffffff808211156112f357600080fd5b6112ff8783880161129f565b9350604086013591508082111561131557600080fd5b506113228682870161129f565b9150509250925092565b60008083601f84011261133e57600080fd5b50813567ffffffffffffffff81111561135657600080fd5b60208301915083602082850101111561136e57600080fd5b9250929050565b803567ffffffffffffffff8116811461138d57600080fd5b919050565b60008060008060008060008060a0898b0312156113ae57600080fd5b883567ffffffffffffffff808211156113c657600080fd5b6113d28c838d0161132c565b909a50985060208b01359150808211156113eb57600080fd5b6113f78c838d0161132c565b909850965060408b013591508082111561141057600080fd5b818b0191508b601f83011261142457600080fd5b81358181111561143357600080fd5b8c60208260051b850101111561144857600080fd5b60208301965080955050505061146060608a01611375565b9150608089013590509295985092959890939650565b60006020828403121561148857600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610f6457600080fd5b600067ffffffffffffffff808411156114c7576114c76111ab565b8360051b60206114d88183016111da565b8681529185019181810190368411156114f057600080fd5b865b848110156115385780358681111561150a5760008081fd5b880136601f82011261151c5760008081fd5b61152a368235878401611229565b8452509183019183016114f2565b50979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611602576116026115a2565b5060010190565b67ffffffffffffffff861681526000602060a08184015286518060a085015260005b818110156116475788810183015185820160c00152820161162b565b50600060c0828601015260c07fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010192505050611690604083018661ffff169052565b63ffffffff939093166060820152608001529392505050565b6000602082840312156116bb57600080fd5b5051919050565b8082028115828204841417610e1657610e166115a2565b80820180821115610e1657610e166115a2565b81810381811115610e1657610e166115a2565b600082611735577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b600181815b8085111561179357817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611779576117796115a2565b8085161561178657918102915b93841c939080029061173f565b509250929050565b6000826117aa57506001610e16565b816117b757506000610e16565b81600181146117cd57600281146117d7576117f3565b6001915050610e16565b60ff8411156117e8576117e86115a2565b50506001821b610e16565b5060208310610133831016604e8410600b8410161715611816575081810a610e16565b611820838361173a565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611852576118526115a2565b029392505050565b6000610f64838361179b56fea164736f6c6343000813000a", +} + +var FunctionsClientExampleABI = FunctionsClientExampleMetaData.ABI + +var FunctionsClientExampleBin = FunctionsClientExampleMetaData.Bin + +func DeployFunctionsClientExample(auth *bind.TransactOpts, backend bind.ContractBackend, router common.Address) (common.Address, *types.Transaction, *FunctionsClientExample, error) { + parsed, err := FunctionsClientExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsClientExampleBin), backend, router) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsClientExample{address: address, abi: *parsed, FunctionsClientExampleCaller: FunctionsClientExampleCaller{contract: contract}, FunctionsClientExampleTransactor: FunctionsClientExampleTransactor{contract: contract}, FunctionsClientExampleFilterer: FunctionsClientExampleFilterer{contract: contract}}, nil +} + +type FunctionsClientExample struct { + address common.Address + abi abi.ABI + FunctionsClientExampleCaller + FunctionsClientExampleTransactor + FunctionsClientExampleFilterer +} + +type FunctionsClientExampleCaller struct { + contract *bind.BoundContract +} + +type FunctionsClientExampleTransactor struct { + contract *bind.BoundContract +} + +type FunctionsClientExampleFilterer struct { + contract *bind.BoundContract +} + +type FunctionsClientExampleSession struct { + Contract *FunctionsClientExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsClientExampleCallerSession struct { + Contract *FunctionsClientExampleCaller + CallOpts bind.CallOpts +} + +type FunctionsClientExampleTransactorSession struct { + Contract *FunctionsClientExampleTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsClientExampleRaw struct { + Contract *FunctionsClientExample +} + +type FunctionsClientExampleCallerRaw struct { + Contract *FunctionsClientExampleCaller +} + +type FunctionsClientExampleTransactorRaw struct { + Contract *FunctionsClientExampleTransactor +} + +func NewFunctionsClientExample(address common.Address, backend bind.ContractBackend) (*FunctionsClientExample, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsClientExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsClientExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsClientExample{address: address, abi: abi, FunctionsClientExampleCaller: FunctionsClientExampleCaller{contract: contract}, FunctionsClientExampleTransactor: FunctionsClientExampleTransactor{contract: contract}, FunctionsClientExampleFilterer: FunctionsClientExampleFilterer{contract: contract}}, nil +} + +func NewFunctionsClientExampleCaller(address common.Address, caller bind.ContractCaller) (*FunctionsClientExampleCaller, error) { + contract, err := bindFunctionsClientExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsClientExampleCaller{contract: contract}, nil +} + +func NewFunctionsClientExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsClientExampleTransactor, error) { + contract, err := bindFunctionsClientExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsClientExampleTransactor{contract: contract}, nil +} + +func NewFunctionsClientExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsClientExampleFilterer, error) { + contract, err := bindFunctionsClientExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsClientExampleFilterer{contract: contract}, nil +} + +func bindFunctionsClientExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsClientExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsClientExample *FunctionsClientExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsClientExample.Contract.FunctionsClientExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsClientExample *FunctionsClientExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.FunctionsClientExampleTransactor.contract.Transfer(opts) +} + +func (_FunctionsClientExample *FunctionsClientExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.FunctionsClientExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsClientExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.contract.Transfer(opts) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "MAX_CALLBACK_GAS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) MAXCALLBACKGAS() (uint32, error) { + return _FunctionsClientExample.Contract.MAXCALLBACKGAS(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) MAXCALLBACKGAS() (uint32, error) { + return _FunctionsClientExample.Contract.MAXCALLBACKGAS(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) Owner() (common.Address, error) { + return _FunctionsClientExample.Contract.Owner(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) Owner() (common.Address, error) { + return _FunctionsClientExample.Contract.Owner(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) SLastError(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "s_lastError") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SLastError() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastError(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) SLastError() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastError(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) SLastErrorLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "s_lastErrorLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SLastErrorLength() (uint32, error) { + return _FunctionsClientExample.Contract.SLastErrorLength(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) SLastErrorLength() (uint32, error) { + return _FunctionsClientExample.Contract.SLastErrorLength(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) SLastRequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SLastRequestId() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastRequestId(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) SLastRequestId() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastRequestId(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) SLastResponse(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "s_lastResponse") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SLastResponse() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastResponse(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) SLastResponse() ([32]byte, error) { + return _FunctionsClientExample.Contract.SLastResponse(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCaller) SLastResponseLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsClientExample.contract.Call(opts, &out, "s_lastResponseLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SLastResponseLength() (uint32, error) { + return _FunctionsClientExample.Contract.SLastResponseLength(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleCallerSession) SLastResponseLength() (uint32, error) { + return _FunctionsClientExample.Contract.SLastResponseLength(&_FunctionsClientExample.CallOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsClientExample.contract.Transact(opts, "acceptOwnership") +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsClientExample.Contract.AcceptOwnership(&_FunctionsClientExample.TransactOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsClientExample.Contract.AcceptOwnership(&_FunctionsClientExample.TransactOpts) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClientExample.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.HandleOracleFulfillment(&_FunctionsClientExample.TransactOpts, requestId, response, err) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.HandleOracleFulfillment(&_FunctionsClientExample.TransactOpts, requestId, response, err) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactor) SendRequest(opts *bind.TransactOpts, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) (*types.Transaction, error) { + return _FunctionsClientExample.contract.Transact(opts, "sendRequest", source, encryptedSecretsReferences, args, subscriptionId, jobId) +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) SendRequest(source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.SendRequest(&_FunctionsClientExample.TransactOpts, source, encryptedSecretsReferences, args, subscriptionId, jobId) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorSession) SendRequest(source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.SendRequest(&_FunctionsClientExample.TransactOpts, source, encryptedSecretsReferences, args, subscriptionId, jobId) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FunctionsClientExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_FunctionsClientExample *FunctionsClientExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.TransferOwnership(&_FunctionsClientExample.TransactOpts, to) +} + +func (_FunctionsClientExample *FunctionsClientExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsClientExample.Contract.TransferOwnership(&_FunctionsClientExample.TransactOpts, to) +} + +type FunctionsClientExampleOwnershipTransferRequestedIterator struct { + Event *FunctionsClientExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsClientExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsClientExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsClientExampleOwnershipTransferRequestedIterator{contract: _FunctionsClientExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsClientExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientExampleOwnershipTransferRequested) + if err := _FunctionsClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsClientExampleOwnershipTransferRequested, error) { + event := new(FunctionsClientExampleOwnershipTransferRequested) + if err := _FunctionsClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsClientExampleOwnershipTransferredIterator struct { + Event *FunctionsClientExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsClientExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsClientExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsClientExampleOwnershipTransferredIterator{contract: _FunctionsClientExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsClientExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientExampleOwnershipTransferred) + if err := _FunctionsClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsClientExampleOwnershipTransferred, error) { + event := new(FunctionsClientExampleOwnershipTransferred) + if err := _FunctionsClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsClientExampleRequestFulfilledIterator struct { + Event *FunctionsClientExampleRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientExampleRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientExampleRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientExampleRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientExampleRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientExampleRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClientExample.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &FunctionsClientExampleRequestFulfilledIterator{contract: _FunctionsClientExample.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClientExample.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientExampleRequestFulfilled) + if err := _FunctionsClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) ParseRequestFulfilled(log types.Log) (*FunctionsClientExampleRequestFulfilled, error) { + event := new(FunctionsClientExampleRequestFulfilled) + if err := _FunctionsClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsClientExampleRequestSentIterator struct { + Event *FunctionsClientExampleRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsClientExampleRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsClientExampleRequestSentIterator) Error() error { + return it.fail +} + +func (it *FunctionsClientExampleRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsClientExampleRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientExampleRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClientExample.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &FunctionsClientExampleRequestSentIterator{contract: _FunctionsClientExample.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsClientExample.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsClientExampleRequestSent) + if err := _FunctionsClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsClientExample *FunctionsClientExampleFilterer) ParseRequestSent(log types.Log) (*FunctionsClientExampleRequestSent, error) { + event := new(FunctionsClientExampleRequestSent) + if err := _FunctionsClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsClientExample *FunctionsClientExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsClientExample.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsClientExample.ParseOwnershipTransferRequested(log) + case _FunctionsClientExample.abi.Events["OwnershipTransferred"].ID: + return _FunctionsClientExample.ParseOwnershipTransferred(log) + case _FunctionsClientExample.abi.Events["RequestFulfilled"].ID: + return _FunctionsClientExample.ParseRequestFulfilled(log) + case _FunctionsClientExample.abi.Events["RequestSent"].ID: + return _FunctionsClientExample.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsClientExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsClientExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsClientExampleRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (FunctionsClientExampleRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_FunctionsClientExample *FunctionsClientExample) Address() common.Address { + return _FunctionsClientExample.address +} + +type FunctionsClientExampleInterface interface { + MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SLastError(opts *bind.CallOpts) ([32]byte, error) + + SLastErrorLength(opts *bind.CallOpts) (uint32, error) + + SLastRequestId(opts *bind.CallOpts) ([32]byte, error) + + SLastResponse(opts *bind.CallOpts) ([32]byte, error) + + SLastResponseLength(opts *bind.CallOpts) (uint32, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + SendRequest(opts *bind.TransactOpts, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsClientExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsClientExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsClientExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsClientExampleOwnershipTransferred, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientExampleRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*FunctionsClientExampleRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsClientExampleRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsClientExampleRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*FunctionsClientExampleRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go b/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go new file mode 100644 index 00000000..64b3734d --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_coordinator/functions_coordinator.go @@ -0,0 +1,1965 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_coordinator + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsBillingConfig struct { + FulfillmentGasPriceOverEstimationBP uint32 + FeedStalenessSeconds uint32 + GasOverheadBeforeCallback uint32 + GasOverheadAfterCallback uint32 + DonFee *big.Int + MinimumEstimateGasPriceWei *big.Int + MaxSupportedRequestDataVersion uint16 + FallbackNativePerUnitLink *big.Int + RequestTimeoutSeconds uint32 +} + +type FunctionsResponseCommitment struct { + RequestId [32]byte + Coordinator common.Address + EstimatedTotalCostJuels *big.Int + Client common.Address + SubscriptionId uint64 + CallbackGasLimit uint32 + AdminFee *big.Int + DonFee *big.Int + GasOverheadBeforeCallback *big.Int + GasOverheadAfterCallback *big.Int + TimeoutTimestamp uint32 +} + +type FunctionsResponseRequestMeta struct { + Data []byte + Flags [32]byte + RequestingContract common.Address + AvailableBalance *big.Int + AdminFee *big.Int + SubscriptionId uint64 + InitiatedRequests uint64 + CallbackGasLimit uint32 + DataVersion uint16 + CompletedRequests uint64 + SubscriptionOwner common.Address +} + +var FunctionsCoordinatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"router\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBillingConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address\",\"name\":\"linkToNativeFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyPublicKey\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InconsistentReportData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"InvalidConfig\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoTransmittersSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByRouterOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"message\",\"type\":\"string\"}],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RouterMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedPublicKeyChange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnsupportedRequestDataVersion\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"CommitmentDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsBillingConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"callbackGasLimit\",\"type\":\"uint64\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"juelsPerGas\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1FeeShareWei\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"callbackCostJuels\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCostJuels\",\"type\":\"uint96\"}],\"name\":\"RequestBilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"deleteCommitment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAdminFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBillingConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"getDONFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getThresholdPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"oracleWithdrawAll\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"donPublicKey\",\"type\":\"bytes\"}],\"name\":\"setDONPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"thresholdPublicKey\",\"type\":\"bytes\"}],\"name\":\"setThresholdPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"availableBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"initiatedRequests\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint64\",\"name\":\"completedRequests\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"}],\"internalType\":\"structFunctionsResponse.RequestMeta\",\"name\":\"request\",\"type\":\"tuple\"}],\"name\":\"startRequest\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentGasPriceOverEstimationBP\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"feedStalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"minimumEstimateGasPriceWei\",\"type\":\"uint40\"},{\"internalType\":\"uint16\",\"name\":\"maxSupportedRequestDataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint224\",\"name\":\"fallbackNativePerUnitLink\",\"type\":\"uint224\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsBillingConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"updateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620057a9380380620057a983398101604081905262000034916200046d565b8282828233806000816200008f5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c257620000c28162000139565b5050506001600160a01b038116620000ed57604051632530e88560e11b815260040160405180910390fd5b6001600160a01b03908116608052600b80549183166c01000000000000000000000000026001600160601b039092169190911790556200012d82620001e4565b5050505050506200062c565b336001600160a01b03821603620001935760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000086565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001ee62000342565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff16600160f01b026001600160f01b0364ffffffffff909216600160c81b0264ffffffffff60c81b196001600160481b03909416600160801b0293909316600160801b600160f01b031963ffffffff9586166c010000000000000000000000000263ffffffff60601b19978716680100000000000000000297909716600160401b600160801b0319998716640100000000026001600160401b0319909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e0830151610100840151909216600160e01b026001600160e01b0390921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862906200033790839062000576565b60405180910390a150565b6200034c6200034e565b565b6000546001600160a01b031633146200034c5760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000086565b80516001600160a01b0381168114620003c257600080fd5b919050565b60405161012081016001600160401b0381118282101715620003f957634e487b7160e01b600052604160045260246000fd5b60405290565b805163ffffffff81168114620003c257600080fd5b80516001600160481b0381168114620003c257600080fd5b805164ffffffffff81168114620003c257600080fd5b805161ffff81168114620003c257600080fd5b80516001600160e01b0381168114620003c257600080fd5b60008060008385036101608112156200048557600080fd5b6200049085620003aa565b935061012080601f1983011215620004a757600080fd5b620004b1620003c7565b9150620004c160208701620003ff565b8252620004d160408701620003ff565b6020830152620004e460608701620003ff565b6040830152620004f760808701620003ff565b60608301526200050a60a0870162000414565b60808301526200051d60c087016200042c565b60a08301526200053060e0870162000442565b60c08301526101006200054581880162000455565b60e084015262000557828801620003ff565b908301525091506200056d6101408501620003aa565b90509250925092565b815163ffffffff908116825260208084015182169083015260408084015182169083015260608084015191821690830152610120820190506080830151620005c960808401826001600160481b03169052565b5060a0830151620005e360a084018264ffffffffff169052565b5060c0830151620005fa60c084018261ffff169052565b5060e08301516200061660e08401826001600160e01b03169052565b506101009283015163ffffffff16919092015290565b6080516151376200067260003960008181610845015281816109d301528181610ca601528181610f3a0152818161104501528181611790015261357001526151376000f3fe608060405234801561001057600080fd5b506004361061018d5760003560e01c806381ff7048116100e3578063c3f909d41161008c578063e3d0e71211610066578063e3d0e71214610560578063e4ddcea614610573578063f2fde38b1461058957600080fd5b8063c3f909d4146103b0578063d227d24514610528578063d328a91e1461055857600080fd5b8063a631571e116100bd578063a631571e1461035d578063afcb95d71461037d578063b1dc65a41461039d57600080fd5b806381ff7048146102b557806385b214cf146103225780638da5cb5b1461033557600080fd5b806366316d8d116101455780637f15e1661161011f5780637f15e16614610285578063814118341461029857806381f1b938146102ad57600080fd5b806366316d8d1461026257806379ba5097146102755780637d4807871461027d57600080fd5b8063181f5a7711610176578063181f5a77146101ba5780632a905ccc1461020c57806359b5b7ac1461022e57600080fd5b8063083a5466146101925780631112dadc146101a7575b600080fd5b6101a56101a0366004613aad565b61059c565b005b6101a56101b5366004613c56565b6105f1565b6101f66040518060400160405280601c81526020017f46756e6374696f6e7320436f6f7264696e61746f722076312e322e300000000081525081565b6040516102039190613d7a565b60405180910390f35b610214610841565b60405168ffffffffffffffffff9091168152602001610203565b61021461023c366004613e1b565b50600854700100000000000000000000000000000000900468ffffffffffffffffff1690565b6101a5610270366004613eaa565b6108d7565b6101a5610a90565b6101a5610b92565b6101a5610293366004613aad565b610d92565b6102a0610de2565b6040516102039190613f34565b6101f6610e51565b6102ff60015460025463ffffffff74010000000000000000000000000000000000000000830481169378010000000000000000000000000000000000000000000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610203565b6101a5610330366004613f47565b610f22565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610203565b61037061036b366004613f60565b610fd4565b60405161020391906140b5565b604080516001815260006020820181905291810191909152606001610203565b6101a56103ab366004614109565b611175565b61051b6040805161012081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081019190915250604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c01000000000000000000000000810483166060830152700100000000000000000000000000000000810468ffffffffffffffffff166080830152790100000000000000000000000000000000000000000000000000810464ffffffffff1660a08301527e01000000000000000000000000000000000000000000000000000000000000900461ffff1660c08201526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08301527c0100000000000000000000000000000000000000000000000000000000900490911661010082015290565b60405161020391906141c0565b61053b6105363660046142b0565b61178c565b6040516bffffffffffffffffffffffff9091168152602001610203565b6101f66118ec565b6101a561056e3660046143c9565b611943565b61057b6124bf565b604051908152602001610203565b6101a5610597366004614496565b612718565b6105a461272c565b60008190036105df576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d6105ec82848361454c565b505050565b6105f96127af565b80516008805460208401516040808601516060870151608088015160a089015160c08a015161ffff167e01000000000000000000000000000000000000000000000000000000000000027dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff64ffffffffff909216790100000000000000000000000000000000000000000000000000027fffff0000000000ffffffffffffffffffffffffffffffffffffffffffffffffff68ffffffffffffffffff90941670010000000000000000000000000000000002939093167fffff0000000000000000000000000000ffffffffffffffffffffffffffffffff63ffffffff9586166c01000000000000000000000000027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff9787166801000000000000000002979097167fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff998716640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909b169c87169c909c1799909917979097169990991793909317959095169390931793909317929092169390931790915560e08301516101008401519092167c0100000000000000000000000000000000000000000000000000000000027bffffffffffffffffffffffffffffffffffffffffffffffffffffffff90921691909117600955517f5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862906108369083906141c0565b60405180910390a150565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16632a905ccc6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156108ae573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d29190614672565b905090565b6108df6127b7565b806bffffffffffffffffffffffff166000036109195750336000908152600a60205260409020546bffffffffffffffffffffffff16610973565b336000908152600a60205260409020546bffffffffffffffffffffffff80831691161015610973576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600a6020526040812080548392906109a09084906bffffffffffffffffffffffff166146be565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055506109f57f000000000000000000000000000000000000000000000000000000000000000090565b6040517f66316d8d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301526bffffffffffffffffffffffff8416602483015291909116906366316d8d90604401600060405180830381600087803b158015610a7457600080fd5b505af1158015610a88573d6000803e3d6000fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610b16576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610b9a6127af565b610ba26127b7565b6000610bac610de2565b905060005b8151811015610d8e576000600a6000848481518110610bd257610bd26146e3565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252810191909152604001600020546bffffffffffffffffffffffff1690508015610d7d576000600a6000858581518110610c3157610c316146e3565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550610cc87f000000000000000000000000000000000000000000000000000000000000000090565b73ffffffffffffffffffffffffffffffffffffffff166366316d8d848481518110610cf557610cf56146e3565b6020026020010151836040518363ffffffff1660e01b8152600401610d4a92919073ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b158015610d6457600080fd5b505af1158015610d78573d6000803e3d6000fd5b505050505b50610d8781614712565b9050610bb1565b5050565b610d9a61272c565b6000819003610dd5576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c6105ec82848361454c565b60606006805480602002602001604051908101604052809291908181526020018280548015610e4757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610e1c575b5050505050905090565b6060600d8054610e60906144b3565b9050600003610e9b576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d8054610ea8906144b3565b80601f0160208091040260200160405190810160405280929190818152602001828054610ed4906144b3565b8015610e475780601f10610ef657610100808354040283529160200191610e47565b820191906000526020600020905b815481529060010190602001808311610f0457509395945050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610f91576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526007602052604080822091909155517f8a4b97add3359bd6bcf5e82874363670eb5ad0f7615abddbd0ed0a3a98f0f416906108369083815260200190565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e08101829052610100810182905261012081018290526101408101919091523373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461109c576040517fc41a5b0900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6110ad6110a88361474a565b612963565b90506110bf6060830160408401614496565b815173ffffffffffffffffffffffffffffffffffffffff91909116907fbf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff3261110d60c0870160a08801614837565b61111f61016088016101408901614496565b6111298880614854565b61113b6101208b016101008c016148b9565b60208b01356111516101008d0160e08e016148d4565b8b604051611167999897969594939291906148f1565b60405180910390a35b919050565b6000806111828989612e01565b915091508115611193575050611782565b604080518b3580825262ffffff6020808f0135600881901c9290921690840152909290917fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16111f08b8b8b8b8b8b612f8a565b60035460009060029061120e9060ff80821691610100900416614999565b61121891906149e1565b611223906001614999565b60ff169050888114611291576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e6174757265730000000000006044820152606401610b0d565b888714611320576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152602860248201527f7265706f727420727320616e64207373206d757374206265206f66206571756160448201527f6c206c656e6774680000000000000000000000000000000000000000000000006064820152608401610b0d565b3360009081526004602090815260408083208151808301909252805460ff8082168452929391929184019161010090910416600281111561136357611363614a03565b600281111561137457611374614a03565b905250905060028160200151600281111561139157611391614a03565b141580156113da57506006816000015160ff16815481106113b4576113b46146e3565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff163314155b15611441576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d697474657200000000000000006044820152606401610b0d565b5050505061144d613a4c565b60008a8a60405161145f929190614a32565b604051908190038120611476918e90602001614a42565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b898110156117725760006001848984602081106114df576114df6146e3565b6114ec91901a601b614999565b8e8e868181106114fe576114fe6146e3565b905060200201358d8d87818110611517576115176146e3565b9050602002013560405160008152602001604052604051611554949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611576573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526004602090815290849020838501909452835460ff808216855292965092945084019161010090041660028111156115f6576115f6614a03565b600281111561160757611607614a03565b905250925060018360200151600281111561162457611624614a03565b1461168b576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e00006044820152606401610b0d565b8251600090869060ff16601f81106116a5576116a56146e3565b602002015173ffffffffffffffffffffffffffffffffffffffff1614611727576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e61747572650000000000000000000000006044820152606401610b0d565b8085846000015160ff16601f8110611741576117416146e3565b73ffffffffffffffffffffffffffffffffffffffff90921660209290920201525061176b81614712565b90506114c0565b50505061177e82613041565b5050505b5050505050505050565b60007f00000000000000000000000000000000000000000000000000000000000000006040517f10fc49c100000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8816600482015263ffffffff8516602482015273ffffffffffffffffffffffffffffffffffffffff91909116906310fc49c19060440160006040518083038186803b15801561182c57600080fd5b505afa158015611840573d6000803e3d6000fd5b5050505066038d7ea4c68000821115611885576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061188f610841565b905060006118d287878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061023c92505050565b90506118e085858385613190565b98975050505050505050565b6060600c80546118fb906144b3565b9050600003611936576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c8054610ea8906144b3565b855185518560ff16601f8311156119b6576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e657273000000000000000000000000000000006044820152606401610b0d565b80600003611a20576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f73697469766500000000000000000000000000006044820152606401610b0d565b818314611aae576040517f89a61989000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e000000000000000000000000000000000000000000000000000000006064820152608401610b0d565b611ab9816003614a56565b8311611b21576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f206869676800000000000000006044820152606401610b0d565b611b2961272c565b6040805160c0810182528a8152602081018a905260ff89169181018290526060810188905267ffffffffffffffff8716608082015260a0810186905290611b7090886132fd565b60055415611d2557600554600090611b8a90600190614a6d565b9050600060058281548110611ba157611ba16146e3565b60009182526020822001546006805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110611bdb57611bdb6146e3565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526004909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600580549192509080611c5b57611c5b614a80565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556006805480611cc457611cc4614a80565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611b70915050565b60005b8151518110156122dc57815180516000919083908110611d4a57611d4a6146e3565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603611dcf576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f7369676e6572206d757374206e6f7420626520656d70747900000000000000006044820152606401610b0d565b600073ffffffffffffffffffffffffffffffffffffffff1682602001518281518110611dfd57611dfd6146e3565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603611e82576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7472616e736d6974746572206d757374206e6f7420626520656d7074790000006044820152606401610b0d565b60006004600084600001518481518110611e9e57611e9e6146e3565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115611ee857611ee8614a03565b14611f4f576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e657220616464726573730000000000000000006044820152606401610b0d565b6040805180820190915260ff82168152600160208201528251805160049160009185908110611f8057611f806146e3565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000161761010083600281111561202157612021614a03565b0217905550600091506120319050565b600460008460200151848151811061204b5761204b6146e3565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff16600281111561209557612095614a03565b146120fc576040517f89a6198900000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d69747465722061646472657373000000006044820152606401610b0d565b6040805180820190915260ff82168152602081016002815250600460008460200151848151811061212f5761212f6146e3565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000016176101008360028111156121d0576121d0614a03565b0217905550508251805160059250839081106121ee576121ee6146e3565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909316929092179091558201518051600691908390811061226a5761226a6146e3565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055806122d481614712565b915050611d28565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600180547fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff8116780100000000000000000000000000000000000000000000000063ffffffff438116820292909217808555920481169291829160149161239491849174010000000000000000000000000000000000000000900416614aaf565b92506101000a81548163ffffffff021916908363ffffffff1602179055506123f34630600160149054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a00151613316565b600281905582518051600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff1661010060ff9093169290920291909117905560015460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05986124aa988b9891977401000000000000000000000000000000000000000090920463ffffffff16969095919491939192614acc565b60405180910390a15050505050505050505050565b604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116838501526c0100000000000000000000000080830482166060850152700100000000000000000000000000000000830468ffffffffffffffffff166080850152790100000000000000000000000000000000000000000000000000830464ffffffffff1660a0808601919091527e0100000000000000000000000000000000000000000000000000000000000090930461ffff1660c08501526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08601527c01000000000000000000000000000000000000000000000000000000009004909116610100840152600b5484517ffeaf968c00000000000000000000000000000000000000000000000000000000815294516000958694859490930473ffffffffffffffffffffffffffffffffffffffff169263feaf968c926004808401938290030181865afa15801561264d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906126719190614b7c565b5093505092505080426126849190614a6d565b836020015163ffffffff161080156126a657506000836020015163ffffffff16115b156126d457505060e001517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff16919050565b60008213612711576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101839052602401610b0d565b5092915050565b61272061272c565b612729816133c1565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146127ad576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610b0d565b565b6127ad61272c565b600b546bffffffffffffffffffffffff166000036127d157565b60006127db610de2565b8051909150600081900361281b576040517f30274b3a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600b5460009061283a9083906bffffffffffffffffffffffff16614bcc565b905060005b828110156129055781600a600086848151811061285e5761285e6146e3565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282829054906101000a90046bffffffffffffffffffffffff166128c69190614bf7565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550806128fe90614712565b905061283f565b506129108282614c1c565b600b80546000906129309084906bffffffffffffffffffffffff166146be565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550505050565b6040805161016081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081018290526101208101829052610140810191909152604080516101208101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c0100000000000000000000000081048316606083015268ffffffffffffffffff700100000000000000000000000000000000820416608083015264ffffffffff79010000000000000000000000000000000000000000000000000082041660a083015261ffff7e01000000000000000000000000000000000000000000000000000000000000909104811660c083018190526009547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff811660e08501527c0100000000000000000000000000000000000000000000000000000000900490931661010080840191909152850151919291161115612b1e576040517fdada758700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600854600090700100000000000000000000000000000000900468ffffffffffffffffff1690506000612b5b8560e001513a848860800151613190565b9050806bffffffffffffffffffffffff1685606001516bffffffffffffffffffffffff161015612bb7576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083610100015163ffffffff1642612bd09190614c44565b905060003087604001518860a001518960c001516001612bf09190614c57565b8a5180516020918201206101008d015160e08e0151604051612ca498979695948c918c9132910173ffffffffffffffffffffffffffffffffffffffff9a8b168152988a1660208a015267ffffffffffffffff97881660408a0152959096166060880152608087019390935261ffff9190911660a086015263ffffffff90811660c08601526bffffffffffffffffffffffff9190911660e0850152919091166101008301529091166101208201526101400190565b6040516020818303038152906040528051906020012090506040518061016001604052808281526020013073ffffffffffffffffffffffffffffffffffffffff168152602001846bffffffffffffffffffffffff168152602001886040015173ffffffffffffffffffffffffffffffffffffffff1681526020018860a0015167ffffffffffffffff1681526020018860e0015163ffffffff168152602001886080015168ffffffffffffffffff1681526020018568ffffffffffffffffff168152602001866040015163ffffffff1664ffffffffff168152602001866060015163ffffffff1664ffffffffff1681526020018363ffffffff16815250955085604051602001612db391906140b5565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152815160209283012060009384526007909252909120555092949350505050565b6000612e356040518060a0016040528060608152602001606081526020016060815260200160608152602001606081525090565b600080808080612e47888a018a614d53565b84519499509297509095509350915060ff16801580612e67575084518114155b80612e73575083518114155b80612e7f575082518114155b80612e8b575081518114155b15612ef2576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4669656c6473206d75737420626520657175616c206c656e67746800000000006044820152606401610b0d565b60005b81811015612f5857612f2e878281518110612f1257612f126146e3565b6020026020010151600090815260076020526040902054151590565b612f5857612f3d600183614a6d565b8103612f4857600198505b612f5181614712565b9050612ef5565b50506040805160a0810182529586526020860194909452928401919091526060830152608082015290505b9250929050565b6000612f97826020614a56565b612fa2856020614a56565b612fae88610144614c44565b612fb89190614c44565b612fc29190614c44565b612fcd906000614c44565b9050368114613038576040517f660bd4ba00000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d6174636800000000000000006044820152606401610b0d565b50505050505050565b80515160ff1660005b818110156105ec5760006130f38460000151838151811061306d5761306d6146e3565b60200260200101518560200151848151811061308b5761308b6146e3565b6020026020010151866040015185815181106130a9576130a96146e3565b6020026020010151876060015186815181106130c7576130c76146e3565b6020026020010151886080015187815181106130e5576130e56146e3565b6020026020010151886134b6565b9050600081600681111561310957613109614a03565b14806131265750600181600681111561312457613124614a03565b145b1561317f57835180518390811061313f5761313f6146e3565b60209081029190910181015160405133815290917fc708e0440951fd63499c0f7a73819b469ee5dd3ecc356c0ab4eb7f18389009d9910160405180910390a25b5061318981614712565b905061304a565b600854600090790100000000000000000000000000000000000000000000000000900464ffffffffff168410156131eb57600854790100000000000000000000000000000000000000000000000000900464ffffffffff1693505b600854600090612710906132059063ffffffff1687614a56565b61320f9190614e25565b6132199086614c44565b60085490915060009087906132529063ffffffff6c01000000000000000000000000820481169168010000000000000000900416614aaf565b61325c9190614aaf565b63ffffffff16905060006132a66000368080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506137ca92505050565b905060006132c7826132b88587614a56565b6132c29190614c44565b61390c565b905060006132e368ffffffffffffffffff808916908a16614bf7565b90506132ef8183614bf7565b9a9950505050505050505050565b6000613307610de2565b511115610d8e57610d8e6127b7565b6000808a8a8a8a8a8a8a8a8a60405160200161333a99989796959493929190614e39565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff821603613440576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610b0d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080848060200190518101906134cd9190614f05565b905060003a8261012001518361010001516134e89190614fcd565b64ffffffffff166134f99190614a56565b905060008460ff166135416000368080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506137ca92505050565b61354b9190614e25565b9050600061355c6132c28385614c44565b905060006135693a61390c565b90506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663330605298e8e868b60e0015168ffffffffffffffffff16896135c89190614bf7565b338d6040518763ffffffff1660e01b81526004016135eb96959493929190614feb565b60408051808303816000875af1158015613609573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061362d9190615067565b9092509050600082600681111561364657613646614a03565b14806136635750600182600681111561366157613661614a03565b145b156137b95760008e8152600760205260408120556136818185614bf7565b336000908152600a6020526040812080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff93841617905560e0890151600b805468ffffffffffffffffff909216939092916136ed91859116614bf7565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508d7f90815c2e624694e8010bffad2bcefaf96af282ef1bc2ebc0042d1b89a585e0468487848b60c0015168ffffffffffffffffff168c60e0015168ffffffffffffffffff16878b61376c9190614bf7565b6137769190614bf7565b6137809190614bf7565b604080516bffffffffffffffffffffffff9586168152602081019490945291841683830152909216606082015290519081900360800190a25b509c9b505050505050505050505050565b6000466137d681613940565b1561385257606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613827573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061384b919061509a565b9392505050565b61385b81613963565b156139035773420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff166349948e0e846040518060800160405280604881526020016150e3604891396040516020016138bb9291906150b3565b6040516020818303038152906040526040518263ffffffff1660e01b81526004016138e69190613d7a565b602060405180830381865afa158015613827573d6000803e3d6000fd5b50600092915050565b600061393a6139196124bf565b61392b84670de0b6b3a7640000614a56565b6139359190614e25565b6139aa565b92915050565b600061a4b1821480613954575062066eed82145b8061393a57505062066eee1490565b6000600a82148061397557506101a482145b80613982575062aa37dc82145b8061398e575061210582145b8061399b575062014a3382145b8061393a57505062014a341490565b60006bffffffffffffffffffffffff821115613a48576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f36206269747300000000000000000000000000000000000000000000000000006064820152608401610b0d565b5090565b604051806103e00160405280601f906020820280368337509192915050565b60008083601f840112613a7d57600080fd5b50813567ffffffffffffffff811115613a9557600080fd5b602083019150836020828501011115612f8357600080fd5b60008060208385031215613ac057600080fd5b823567ffffffffffffffff811115613ad757600080fd5b613ae385828601613a6b565b90969095509350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610120810167ffffffffffffffff81118282101715613b4257613b42613aef565b60405290565b604051610160810167ffffffffffffffff81118282101715613b4257613b42613aef565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715613bb357613bb3613aef565b604052919050565b63ffffffff8116811461272957600080fd5b803561117081613bbb565b68ffffffffffffffffff8116811461272957600080fd5b803561117081613bd8565b64ffffffffff8116811461272957600080fd5b803561117081613bfa565b803561ffff8116811461117057600080fd5b80357bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8116811461117057600080fd5b60006101208284031215613c6957600080fd5b613c71613b1e565b613c7a83613bcd565b8152613c8860208401613bcd565b6020820152613c9960408401613bcd565b6040820152613caa60608401613bcd565b6060820152613cbb60808401613bef565b6080820152613ccc60a08401613c0d565b60a0820152613cdd60c08401613c18565b60c0820152613cee60e08401613c2a565b60e0820152610100613d01818501613bcd565b908201529392505050565b60005b83811015613d27578181015183820152602001613d0f565b50506000910152565b60008151808452613d48816020860160208601613d0c565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061384b6020830184613d30565b600082601f830112613d9e57600080fd5b813567ffffffffffffffff811115613db857613db8613aef565b613de960207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601613b6c565b818152846020838601011115613dfe57600080fd5b816020850160208301376000918101602001919091529392505050565b600060208284031215613e2d57600080fd5b813567ffffffffffffffff811115613e4457600080fd5b613e5084828501613d8d565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461272957600080fd5b803561117081613e58565b6bffffffffffffffffffffffff8116811461272957600080fd5b803561117081613e85565b60008060408385031215613ebd57600080fd5b8235613ec881613e58565b91506020830135613ed881613e85565b809150509250929050565b600081518084526020808501945080840160005b83811015613f2957815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613ef7565b509495945050505050565b60208152600061384b6020830184613ee3565b600060208284031215613f5957600080fd5b5035919050565b600060208284031215613f7257600080fd5b813567ffffffffffffffff811115613f8957600080fd5b8201610160818503121561384b57600080fd5b805182526020810151613fc7602084018273ffffffffffffffffffffffffffffffffffffffff169052565b506040810151613fe760408401826bffffffffffffffffffffffff169052565b50606081015161400f606084018273ffffffffffffffffffffffffffffffffffffffff169052565b50608081015161402b608084018267ffffffffffffffff169052565b5060a081015161404360a084018263ffffffff169052565b5060c081015161406060c084018268ffffffffffffffffff169052565b5060e081015161407d60e084018268ffffffffffffffffff169052565b506101008181015164ffffffffff9081169184019190915261012080830151909116908301526101409081015163ffffffff16910152565b610160810161393a8284613f9c565b60008083601f8401126140d657600080fd5b50813567ffffffffffffffff8111156140ee57600080fd5b6020830191508360208260051b8501011115612f8357600080fd5b60008060008060008060008060e0898b03121561412557600080fd5b606089018a81111561413657600080fd5b8998503567ffffffffffffffff8082111561415057600080fd5b61415c8c838d01613a6b565b909950975060808b013591508082111561417557600080fd5b6141818c838d016140c4565b909750955060a08b013591508082111561419a57600080fd5b506141a78b828c016140c4565b999c989b50969995989497949560c00135949350505050565b815163ffffffff908116825260208084015182169083015260408084015182169083015260608084015191821690830152610120820190506080830151614214608084018268ffffffffffffffffff169052565b5060a083015161422d60a084018264ffffffffff169052565b5060c083015161424360c084018261ffff169052565b5060e083015161427360e08401827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff169052565b506101008381015163ffffffff8116848301525b505092915050565b67ffffffffffffffff8116811461272957600080fd5b80356111708161428f565b6000806000806000608086880312156142c857600080fd5b85356142d38161428f565b9450602086013567ffffffffffffffff8111156142ef57600080fd5b6142fb88828901613a6b565b909550935050604086013561430f81613bbb565b949793965091946060013592915050565b600067ffffffffffffffff82111561433a5761433a613aef565b5060051b60200190565b600082601f83011261435557600080fd5b8135602061436a61436583614320565b613b6c565b82815260059290921b8401810191818101908684111561438957600080fd5b8286015b848110156143ad5780356143a081613e58565b835291830191830161438d565b509695505050505050565b803560ff8116811461117057600080fd5b60008060008060008060c087890312156143e257600080fd5b863567ffffffffffffffff808211156143fa57600080fd5b6144068a838b01614344565b9750602089013591508082111561441c57600080fd5b6144288a838b01614344565b965061443660408a016143b8565b9550606089013591508082111561444c57600080fd5b6144588a838b01613d8d565b945061446660808a016142a5565b935060a089013591508082111561447c57600080fd5b5061448989828a01613d8d565b9150509295509295509295565b6000602082840312156144a857600080fd5b813561384b81613e58565b600181811c908216806144c757607f821691505b602082108103614500577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f8211156105ec57600081815260208120601f850160051c8101602086101561452d5750805b601f850160051c820191505b81811015610a8857828155600101614539565b67ffffffffffffffff83111561456457614564613aef565b6145788361457283546144b3565b83614506565b6000601f8411600181146145ca57600085156145945750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b178355614660565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b8281101561461957868501358255602094850194600190920191016145f9565b5086821015614654577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b805161117081613bd8565b60006020828403121561468457600080fd5b815161384b81613bd8565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6bffffffffffffffffffffffff8281168282160390808211156127115761271161468f565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036147435761474361468f565b5060010190565b6000610160823603121561475d57600080fd5b614765613b48565b823567ffffffffffffffff81111561477c57600080fd5b61478836828601613d8d565b825250602083013560208201526147a160408401613e7a565b60408201526147b260608401613e9f565b60608201526147c360808401613bef565b60808201526147d460a084016142a5565b60a08201526147e560c084016142a5565b60c08201526147f660e08401613bcd565b60e0820152610100614809818501613c18565b9082015261012061481b8482016142a5565b9082015261014061482d848201613e7a565b9082015292915050565b60006020828403121561484957600080fd5b813561384b8161428f565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261488957600080fd5b83018035915067ffffffffffffffff8211156148a457600080fd5b602001915036819003821315612f8357600080fd5b6000602082840312156148cb57600080fd5b61384b82613c18565b6000602082840312156148e657600080fd5b813561384b81613bbb565b73ffffffffffffffffffffffffffffffffffffffff8a8116825267ffffffffffffffff8a166020830152881660408201526102406060820181905281018690526000610260878982850137600083890182015261ffff8716608084015260a0830186905263ffffffff851660c0840152601f88017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01683010190506132ef60e0830184613f9c565b60ff818116838216019081111561393a5761393a61468f565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600060ff8316806149f4576149f46149b2565b8060ff84160491505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b8183823760009101908152919050565b828152606082602083013760800192915050565b808202811582820484141761393a5761393a61468f565b8181038181111561393a5761393a61468f565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b63ffffffff8181168382160190808211156127115761271161468f565b600061012063ffffffff808d1684528b6020850152808b16604085015250806060840152614afc8184018a613ee3565b90508281036080840152614b108189613ee3565b905060ff871660a084015282810360c0840152614b2d8187613d30565b905067ffffffffffffffff851660e0840152828103610100840152614b528185613d30565b9c9b505050505050505050505050565b805169ffffffffffffffffffff8116811461117057600080fd5b600080600080600060a08688031215614b9457600080fd5b614b9d86614b62565b9450602086015193506040860151925060608601519150614bc060808701614b62565b90509295509295909350565b60006bffffffffffffffffffffffff80841680614beb57614beb6149b2565b92169190910492915050565b6bffffffffffffffffffffffff8181168382160190808211156127115761271161468f565b6bffffffffffffffffffffffff8181168382160280821691908281146142875761428761468f565b8082018082111561393a5761393a61468f565b67ffffffffffffffff8181168382160190808211156127115761271161468f565b600082601f830112614c8957600080fd5b81356020614c9961436583614320565b82815260059290921b84018101918181019086841115614cb857600080fd5b8286015b848110156143ad5780358352918301918301614cbc565b600082601f830112614ce457600080fd5b81356020614cf461436583614320565b82815260059290921b84018101918181019086841115614d1357600080fd5b8286015b848110156143ad57803567ffffffffffffffff811115614d375760008081fd5b614d458986838b0101613d8d565b845250918301918301614d17565b600080600080600060a08688031215614d6b57600080fd5b853567ffffffffffffffff80821115614d8357600080fd5b614d8f89838a01614c78565b96506020880135915080821115614da557600080fd5b614db189838a01614cd3565b95506040880135915080821115614dc757600080fd5b614dd389838a01614cd3565b94506060880135915080821115614de957600080fd5b614df589838a01614cd3565b93506080880135915080821115614e0b57600080fd5b50614e1888828901614cd3565b9150509295509295909350565b600082614e3457614e346149b2565b500490565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152614e808285018b613ee3565b91508382036080850152614e94828a613ee3565b915060ff881660a085015283820360c0850152614eb18288613d30565b90861660e08501528381036101008501529050614b528185613d30565b805161117081613e58565b805161117081613e85565b80516111708161428f565b805161117081613bbb565b805161117081613bfa565b60006101608284031215614f1857600080fd5b614f20613b48565b82518152614f3060208401614ece565b6020820152614f4160408401614ed9565b6040820152614f5260608401614ece565b6060820152614f6360808401614ee4565b6080820152614f7460a08401614eef565b60a0820152614f8560c08401614667565b60c0820152614f9660e08401614667565b60e0820152610100614fa9818501614efa565b90820152610120614fbb848201614efa565b90820152610140613d01848201614eef565b64ffffffffff8181168382160190808211156127115761271161468f565b6000610200808352614fff8184018a613d30565b905082810360208401526150138189613d30565b6bffffffffffffffffffffffff88811660408601528716606085015273ffffffffffffffffffffffffffffffffffffffff86166080850152915061505c905060a0830184613f9c565b979650505050505050565b6000806040838503121561507a57600080fd5b82516007811061508957600080fd5b6020840151909250613ed881613e85565b6000602082840312156150ac57600080fd5b5051919050565b600083516150c5818460208801613d0c565b8351908301906150d9818360208801613d0c565b0194935050505056fe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000813000a", +} + +var FunctionsCoordinatorABI = FunctionsCoordinatorMetaData.ABI + +var FunctionsCoordinatorBin = FunctionsCoordinatorMetaData.Bin + +func DeployFunctionsCoordinator(auth *bind.TransactOpts, backend bind.ContractBackend, router common.Address, config FunctionsBillingConfig, linkToNativeFeed common.Address) (common.Address, *types.Transaction, *FunctionsCoordinator, error) { + parsed, err := FunctionsCoordinatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsCoordinatorBin), backend, router, config, linkToNativeFeed) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsCoordinator{address: address, abi: *parsed, FunctionsCoordinatorCaller: FunctionsCoordinatorCaller{contract: contract}, FunctionsCoordinatorTransactor: FunctionsCoordinatorTransactor{contract: contract}, FunctionsCoordinatorFilterer: FunctionsCoordinatorFilterer{contract: contract}}, nil +} + +type FunctionsCoordinator struct { + address common.Address + abi abi.ABI + FunctionsCoordinatorCaller + FunctionsCoordinatorTransactor + FunctionsCoordinatorFilterer +} + +type FunctionsCoordinatorCaller struct { + contract *bind.BoundContract +} + +type FunctionsCoordinatorTransactor struct { + contract *bind.BoundContract +} + +type FunctionsCoordinatorFilterer struct { + contract *bind.BoundContract +} + +type FunctionsCoordinatorSession struct { + Contract *FunctionsCoordinator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsCoordinatorCallerSession struct { + Contract *FunctionsCoordinatorCaller + CallOpts bind.CallOpts +} + +type FunctionsCoordinatorTransactorSession struct { + Contract *FunctionsCoordinatorTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsCoordinatorRaw struct { + Contract *FunctionsCoordinator +} + +type FunctionsCoordinatorCallerRaw struct { + Contract *FunctionsCoordinatorCaller +} + +type FunctionsCoordinatorTransactorRaw struct { + Contract *FunctionsCoordinatorTransactor +} + +func NewFunctionsCoordinator(address common.Address, backend bind.ContractBackend) (*FunctionsCoordinator, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsCoordinatorABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsCoordinator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsCoordinator{address: address, abi: abi, FunctionsCoordinatorCaller: FunctionsCoordinatorCaller{contract: contract}, FunctionsCoordinatorTransactor: FunctionsCoordinatorTransactor{contract: contract}, FunctionsCoordinatorFilterer: FunctionsCoordinatorFilterer{contract: contract}}, nil +} + +func NewFunctionsCoordinatorCaller(address common.Address, caller bind.ContractCaller) (*FunctionsCoordinatorCaller, error) { + contract, err := bindFunctionsCoordinator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorCaller{contract: contract}, nil +} + +func NewFunctionsCoordinatorTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsCoordinatorTransactor, error) { + contract, err := bindFunctionsCoordinator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorTransactor{contract: contract}, nil +} + +func NewFunctionsCoordinatorFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsCoordinatorFilterer, error) { + contract, err := bindFunctionsCoordinator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorFilterer{contract: contract}, nil +} + +func bindFunctionsCoordinator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsCoordinatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsCoordinator.Contract.FunctionsCoordinatorCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.FunctionsCoordinatorTransactor.contract.Transfer(opts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.FunctionsCoordinatorTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsCoordinator.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.contract.Transfer(opts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, callbackGasLimit uint32, gasPriceWei *big.Int) (*big.Int, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "estimateCost", subscriptionId, data, callbackGasLimit, gasPriceWei) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) EstimateCost(subscriptionId uint64, data []byte, callbackGasLimit uint32, gasPriceWei *big.Int) (*big.Int, error) { + return _FunctionsCoordinator.Contract.EstimateCost(&_FunctionsCoordinator.CallOpts, subscriptionId, data, callbackGasLimit, gasPriceWei) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) EstimateCost(subscriptionId uint64, data []byte, callbackGasLimit uint32, gasPriceWei *big.Int) (*big.Int, error) { + return _FunctionsCoordinator.Contract.EstimateCost(&_FunctionsCoordinator.CallOpts, subscriptionId, data, callbackGasLimit, gasPriceWei) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetAdminFee(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getAdminFee") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetAdminFee() (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetAdminFee(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetAdminFee() (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetAdminFee(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetConfig(opts *bind.CallOpts) (FunctionsBillingConfig, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getConfig") + + if err != nil { + return *new(FunctionsBillingConfig), err + } + + out0 := *abi.ConvertType(out[0], new(FunctionsBillingConfig)).(*FunctionsBillingConfig) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetConfig() (FunctionsBillingConfig, error) { + return _FunctionsCoordinator.Contract.GetConfig(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetConfig() (FunctionsBillingConfig, error) { + return _FunctionsCoordinator.Contract.GetConfig(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetDONFee(opts *bind.CallOpts, arg0 []byte) (*big.Int, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getDONFee", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetDONFee(arg0 []byte) (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetDONFee(&_FunctionsCoordinator.CallOpts, arg0) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetDONFee(arg0 []byte) (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetDONFee(&_FunctionsCoordinator.CallOpts, arg0) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetDONPublicKey() ([]byte, error) { + return _FunctionsCoordinator.Contract.GetDONPublicKey(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetDONPublicKey() ([]byte, error) { + return _FunctionsCoordinator.Contract.GetDONPublicKey(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getThresholdPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetThresholdPublicKey() ([]byte, error) { + return _FunctionsCoordinator.Contract.GetThresholdPublicKey(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetThresholdPublicKey() ([]byte, error) { + return _FunctionsCoordinator.Contract.GetThresholdPublicKey(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) GetWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "getWeiPerUnitLink") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) GetWeiPerUnitLink() (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetWeiPerUnitLink(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) GetWeiPerUnitLink() (*big.Int, error) { + return _FunctionsCoordinator.Contract.GetWeiPerUnitLink(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _FunctionsCoordinator.Contract.LatestConfigDetails(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _FunctionsCoordinator.Contract.LatestConfigDetails(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _FunctionsCoordinator.Contract.LatestConfigDigestAndEpoch(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _FunctionsCoordinator.Contract.LatestConfigDigestAndEpoch(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) Owner() (common.Address, error) { + return _FunctionsCoordinator.Contract.Owner(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) Owner() (common.Address, error) { + return _FunctionsCoordinator.Contract.Owner(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) Transmitters() ([]common.Address, error) { + return _FunctionsCoordinator.Contract.Transmitters(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) Transmitters() ([]common.Address, error) { + return _FunctionsCoordinator.Contract.Transmitters(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _FunctionsCoordinator.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) TypeAndVersion() (string, error) { + return _FunctionsCoordinator.Contract.TypeAndVersion(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorCallerSession) TypeAndVersion() (string, error) { + return _FunctionsCoordinator.Contract.TypeAndVersion(&_FunctionsCoordinator.CallOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "acceptOwnership") +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.AcceptOwnership(&_FunctionsCoordinator.TransactOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.AcceptOwnership(&_FunctionsCoordinator.TransactOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) DeleteCommitment(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "deleteCommitment", requestId) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) DeleteCommitment(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.DeleteCommitment(&_FunctionsCoordinator.TransactOpts, requestId) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) DeleteCommitment(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.DeleteCommitment(&_FunctionsCoordinator.TransactOpts, requestId) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.OracleWithdraw(&_FunctionsCoordinator.TransactOpts, recipient, amount) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.OracleWithdraw(&_FunctionsCoordinator.TransactOpts, recipient, amount) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) OracleWithdrawAll(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "oracleWithdrawAll") +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) OracleWithdrawAll() (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.OracleWithdrawAll(&_FunctionsCoordinator.TransactOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) OracleWithdrawAll() (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.OracleWithdrawAll(&_FunctionsCoordinator.TransactOpts) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "setConfig", _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetConfig(&_FunctionsCoordinator.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetConfig(&_FunctionsCoordinator.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "setDONPublicKey", donPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetDONPublicKey(&_FunctionsCoordinator.TransactOpts, donPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetDONPublicKey(&_FunctionsCoordinator.TransactOpts, donPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "setThresholdPublicKey", thresholdPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetThresholdPublicKey(&_FunctionsCoordinator.TransactOpts, thresholdPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.SetThresholdPublicKey(&_FunctionsCoordinator.TransactOpts, thresholdPublicKey) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) StartRequest(opts *bind.TransactOpts, request FunctionsResponseRequestMeta) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "startRequest", request) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) StartRequest(request FunctionsResponseRequestMeta) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.StartRequest(&_FunctionsCoordinator.TransactOpts, request) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) StartRequest(request FunctionsResponseRequestMeta) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.StartRequest(&_FunctionsCoordinator.TransactOpts, request) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "transferOwnership", to) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.TransferOwnership(&_FunctionsCoordinator.TransactOpts, to) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.TransferOwnership(&_FunctionsCoordinator.TransactOpts, to) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.Transmit(&_FunctionsCoordinator.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.Transmit(&_FunctionsCoordinator.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactor) UpdateConfig(opts *bind.TransactOpts, config FunctionsBillingConfig) (*types.Transaction, error) { + return _FunctionsCoordinator.contract.Transact(opts, "updateConfig", config) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorSession) UpdateConfig(config FunctionsBillingConfig) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.UpdateConfig(&_FunctionsCoordinator.TransactOpts, config) +} + +func (_FunctionsCoordinator *FunctionsCoordinatorTransactorSession) UpdateConfig(config FunctionsBillingConfig) (*types.Transaction, error) { + return _FunctionsCoordinator.Contract.UpdateConfig(&_FunctionsCoordinator.TransactOpts, config) +} + +type FunctionsCoordinatorCommitmentDeletedIterator struct { + Event *FunctionsCoordinatorCommitmentDeleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorCommitmentDeletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorCommitmentDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorCommitmentDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorCommitmentDeletedIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorCommitmentDeletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorCommitmentDeleted struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterCommitmentDeleted(opts *bind.FilterOpts) (*FunctionsCoordinatorCommitmentDeletedIterator, error) { + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "CommitmentDeleted") + if err != nil { + return nil, err + } + return &FunctionsCoordinatorCommitmentDeletedIterator{contract: _FunctionsCoordinator.contract, event: "CommitmentDeleted", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchCommitmentDeleted(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorCommitmentDeleted) (event.Subscription, error) { + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "CommitmentDeleted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorCommitmentDeleted) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "CommitmentDeleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseCommitmentDeleted(log types.Log) (*FunctionsCoordinatorCommitmentDeleted, error) { + event := new(FunctionsCoordinatorCommitmentDeleted) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "CommitmentDeleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorConfigSetIterator struct { + Event *FunctionsCoordinatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterConfigSet(opts *bind.FilterOpts) (*FunctionsCoordinatorConfigSetIterator, error) { + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &FunctionsCoordinatorConfigSetIterator{contract: _FunctionsCoordinator.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorConfigSet) (event.Subscription, error) { + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorConfigSet) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseConfigSet(log types.Log) (*FunctionsCoordinatorConfigSet, error) { + event := new(FunctionsCoordinatorConfigSet) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorConfigUpdatedIterator struct { + Event *FunctionsCoordinatorConfigUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorConfigUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorConfigUpdatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorConfigUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorConfigUpdated struct { + Config FunctionsBillingConfig + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsCoordinatorConfigUpdatedIterator, error) { + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return &FunctionsCoordinatorConfigUpdatedIterator{contract: _FunctionsCoordinator.contract, event: "ConfigUpdated", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorConfigUpdated) (event.Subscription, error) { + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorConfigUpdated) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseConfigUpdated(log types.Log) (*FunctionsCoordinatorConfigUpdated, error) { + event := new(FunctionsCoordinatorConfigUpdated) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorOracleRequestIterator struct { + Event *FunctionsCoordinatorOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorOracleRequestIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorOracleRequest struct { + RequestId [32]byte + RequestingContract common.Address + RequestInitiator common.Address + SubscriptionId uint64 + SubscriptionOwner common.Address + Data []byte + DataVersion uint16 + Flags [32]byte + CallbackGasLimit uint64 + Commitment FunctionsResponseCommitment + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte, requestingContract []common.Address) (*FunctionsCoordinatorOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var requestingContractRule []interface{} + for _, requestingContractItem := range requestingContract { + requestingContractRule = append(requestingContractRule, requestingContractItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "OracleRequest", requestIdRule, requestingContractRule) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorOracleRequestIterator{contract: _FunctionsCoordinator.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOracleRequest, requestId [][32]byte, requestingContract []common.Address) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var requestingContractRule []interface{} + for _, requestingContractItem := range requestingContract { + requestingContractRule = append(requestingContractRule, requestingContractItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "OracleRequest", requestIdRule, requestingContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorOracleRequest) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseOracleRequest(log types.Log) (*FunctionsCoordinatorOracleRequest, error) { + event := new(FunctionsCoordinatorOracleRequest) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorOracleResponseIterator struct { + Event *FunctionsCoordinatorOracleResponse + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorOracleResponseIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorOracleResponseIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorOracleResponseIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorOracleResponse struct { + RequestId [32]byte + Transmitter common.Address + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorOracleResponseIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorOracleResponseIterator{contract: _FunctionsCoordinator.contract, event: "OracleResponse", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOracleResponse, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorOracleResponse) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseOracleResponse(log types.Log) (*FunctionsCoordinatorOracleResponse, error) { + event := new(FunctionsCoordinatorOracleResponse) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorOwnershipTransferRequestedIterator struct { + Event *FunctionsCoordinatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsCoordinatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorOwnershipTransferRequestedIterator{contract: _FunctionsCoordinator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorOwnershipTransferRequested) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsCoordinatorOwnershipTransferRequested, error) { + event := new(FunctionsCoordinatorOwnershipTransferRequested) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorOwnershipTransferredIterator struct { + Event *FunctionsCoordinatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsCoordinatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorOwnershipTransferredIterator{contract: _FunctionsCoordinator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorOwnershipTransferred) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsCoordinatorOwnershipTransferred, error) { + event := new(FunctionsCoordinatorOwnershipTransferred) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorRequestBilledIterator struct { + Event *FunctionsCoordinatorRequestBilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorRequestBilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorRequestBilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorRequestBilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorRequestBilledIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorRequestBilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorRequestBilled struct { + RequestId [32]byte + JuelsPerGas *big.Int + L1FeeShareWei *big.Int + CallbackCostJuels *big.Int + TotalCostJuels *big.Int + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterRequestBilled(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorRequestBilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "RequestBilled", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsCoordinatorRequestBilledIterator{contract: _FunctionsCoordinator.contract, event: "RequestBilled", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchRequestBilled(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorRequestBilled, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "RequestBilled", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorRequestBilled) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "RequestBilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseRequestBilled(log types.Log) (*FunctionsCoordinatorRequestBilled, error) { + event := new(FunctionsCoordinatorRequestBilled) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "RequestBilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsCoordinatorTransmittedIterator struct { + Event *FunctionsCoordinatorTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsCoordinatorTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsCoordinatorTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsCoordinatorTransmittedIterator) Error() error { + return it.fail +} + +func (it *FunctionsCoordinatorTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsCoordinatorTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) FilterTransmitted(opts *bind.FilterOpts) (*FunctionsCoordinatorTransmittedIterator, error) { + + logs, sub, err := _FunctionsCoordinator.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &FunctionsCoordinatorTransmittedIterator{contract: _FunctionsCoordinator.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorTransmitted) (event.Subscription, error) { + + logs, sub, err := _FunctionsCoordinator.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsCoordinatorTransmitted) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsCoordinator *FunctionsCoordinatorFilterer) ParseTransmitted(log types.Log) (*FunctionsCoordinatorTransmitted, error) { + event := new(FunctionsCoordinatorTransmitted) + if err := _FunctionsCoordinator.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_FunctionsCoordinator *FunctionsCoordinator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsCoordinator.abi.Events["CommitmentDeleted"].ID: + return _FunctionsCoordinator.ParseCommitmentDeleted(log) + case _FunctionsCoordinator.abi.Events["ConfigSet"].ID: + return _FunctionsCoordinator.ParseConfigSet(log) + case _FunctionsCoordinator.abi.Events["ConfigUpdated"].ID: + return _FunctionsCoordinator.ParseConfigUpdated(log) + case _FunctionsCoordinator.abi.Events["OracleRequest"].ID: + return _FunctionsCoordinator.ParseOracleRequest(log) + case _FunctionsCoordinator.abi.Events["OracleResponse"].ID: + return _FunctionsCoordinator.ParseOracleResponse(log) + case _FunctionsCoordinator.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsCoordinator.ParseOwnershipTransferRequested(log) + case _FunctionsCoordinator.abi.Events["OwnershipTransferred"].ID: + return _FunctionsCoordinator.ParseOwnershipTransferred(log) + case _FunctionsCoordinator.abi.Events["RequestBilled"].ID: + return _FunctionsCoordinator.ParseRequestBilled(log) + case _FunctionsCoordinator.abi.Events["Transmitted"].ID: + return _FunctionsCoordinator.ParseTransmitted(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsCoordinatorCommitmentDeleted) Topic() common.Hash { + return common.HexToHash("0x8a4b97add3359bd6bcf5e82874363670eb5ad0f7615abddbd0ed0a3a98f0f416") +} + +func (FunctionsCoordinatorConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (FunctionsCoordinatorConfigUpdated) Topic() common.Hash { + return common.HexToHash("0x5f32d06f5e83eda3a68e0e964ef2e6af5cb613e8117aa103c2d6bca5f5184862") +} + +func (FunctionsCoordinatorOracleRequest) Topic() common.Hash { + return common.HexToHash("0xbf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff") +} + +func (FunctionsCoordinatorOracleResponse) Topic() common.Hash { + return common.HexToHash("0xc708e0440951fd63499c0f7a73819b469ee5dd3ecc356c0ab4eb7f18389009d9") +} + +func (FunctionsCoordinatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsCoordinatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsCoordinatorRequestBilled) Topic() common.Hash { + return common.HexToHash("0x90815c2e624694e8010bffad2bcefaf96af282ef1bc2ebc0042d1b89a585e046") +} + +func (FunctionsCoordinatorTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (_FunctionsCoordinator *FunctionsCoordinator) Address() common.Address { + return _FunctionsCoordinator.address +} + +type FunctionsCoordinatorInterface interface { + EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, callbackGasLimit uint32, gasPriceWei *big.Int) (*big.Int, error) + + GetAdminFee(opts *bind.CallOpts) (*big.Int, error) + + GetConfig(opts *bind.CallOpts) (FunctionsBillingConfig, error) + + GetDONFee(opts *bind.CallOpts, arg0 []byte) (*big.Int, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) + + GetWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + DeleteCommitment(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OracleWithdrawAll(opts *bind.TransactOpts) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) + + SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) + + StartRequest(opts *bind.TransactOpts, request FunctionsResponseRequestMeta) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + UpdateConfig(opts *bind.TransactOpts, config FunctionsBillingConfig) (*types.Transaction, error) + + FilterCommitmentDeleted(opts *bind.FilterOpts) (*FunctionsCoordinatorCommitmentDeletedIterator, error) + + WatchCommitmentDeleted(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorCommitmentDeleted) (event.Subscription, error) + + ParseCommitmentDeleted(log types.Log) (*FunctionsCoordinatorCommitmentDeleted, error) + + FilterConfigSet(opts *bind.FilterOpts) (*FunctionsCoordinatorConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*FunctionsCoordinatorConfigSet, error) + + FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsCoordinatorConfigUpdatedIterator, error) + + WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorConfigUpdated) (event.Subscription, error) + + ParseConfigUpdated(log types.Log) (*FunctionsCoordinatorConfigUpdated, error) + + FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte, requestingContract []common.Address) (*FunctionsCoordinatorOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOracleRequest, requestId [][32]byte, requestingContract []common.Address) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*FunctionsCoordinatorOracleRequest, error) + + FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorOracleResponseIterator, error) + + WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOracleResponse, requestId [][32]byte) (event.Subscription, error) + + ParseOracleResponse(log types.Log) (*FunctionsCoordinatorOracleResponse, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsCoordinatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsCoordinatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsCoordinatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsCoordinatorOwnershipTransferred, error) + + FilterRequestBilled(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsCoordinatorRequestBilledIterator, error) + + WatchRequestBilled(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorRequestBilled, requestId [][32]byte) (event.Subscription, error) + + ParseRequestBilled(log types.Log) (*FunctionsCoordinatorRequestBilled, error) + + FilterTransmitted(opts *bind.FilterOpts) (*FunctionsCoordinatorTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsCoordinatorTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*FunctionsCoordinatorTransmitted, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_load_test_client/functions_load_test_client.go b/core/gethwrappers/functions/generated/functions_load_test_client/functions_load_test_client.go new file mode 100644 index 00000000..c95fa845 --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_load_test_client/functions_load_test_client.go @@ -0,0 +1,1108 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_load_test_client + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FunctionsLoadTestClientMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"router\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyRouterCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CALLBACK_GAS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getStats\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastError\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastResponse\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"resetStats\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"times\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"cborEncodedRequest\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"}],\"name\":\"sendEncodedRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"times\",\"type\":\"uint32\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedSecretsReferences\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"}],\"name\":\"sendRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"times\",\"type\":\"uint32\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"slotId\",\"type\":\"uint8\"},{\"internalType\":\"uint64\",\"name\":\"slotVersion\",\"type\":\"uint64\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"}],\"name\":\"sendRequestWithDONHostedSecrets\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalEmptyResponses\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalFailedResponses\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalRequests\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSucceededResponses\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620023ff380380620023ff833981016040819052620000349162000180565b6001600160a01b0381166080523380600081620000985760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000cb57620000cb81620000d5565b50505050620001b2565b336001600160a01b038216036200012f5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200008f565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156200019357600080fd5b81516001600160a01b0381168114620001ab57600080fd5b9392505050565b60805161222a620001d5600039600081816102ac0152610fa0015261222a6000f3fe608060405234801561001057600080fd5b506004361061011b5760003560e01c806379ba5097116100b2578063954491c111610081578063c59d484711610066578063c59d484714610246578063c9429e2a14610261578063f2fde38b1461028157600080fd5b8063954491c114610220578063b2518e0e1461023357600080fd5b806379ba5097146101cd578063887efe94146101d55780638aea61dc146101e85780638da5cb5b146101f857600080fd5b80635c1d92e9116100ee5780635c1d92e91461019b57806362747e42146101b35780636d9809a0146101bb578063724ec8a2146101c557600080fd5b80630ca761751461012057806329f0de3f146101355780632ab424da1461015357806347c0318614610184575b600080fd5b61013361012e3660046118b4565b610294565b005b61013d61033e565b60405161014a9190611985565b60405180910390f35b60055461016f9068010000000000000000900463ffffffff1681565b60405163ffffffff909116815260200161014a565b61018d60025481565b60405190815260200161014a565b60055461016f90640100000000900463ffffffff1681565b61013d6103cc565b61016f6203d09081565b6101336103d9565b61013361044b565b6101336101e3366004611a57565b61054d565b60055461016f9063ffffffff1681565b60005460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161014a565b61013361022e366004611b1b565b6106bf565b610133610241366004611be1565b6107d5565b61024e610854565b60405161014a9796959493929190611c47565b60055461016f906c01000000000000000000000000900463ffffffff1681565b61013361028f366004611ca4565b6109dc565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610303576040517fc6829f8300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61030e8383836109f0565b60405183907f85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e690600090a2505050565b6004805461034b90611cda565b80601f016020809104026020016040519081016040528092919081815260200182805461037790611cda565b80156103c45780601f10610399576101008083540402835291602001916103c4565b820191906000526020600020905b8154815290600101906020018083116103a757829003601f168201915b505050505081565b6003805461034b90611cda565b6103e1610afb565b6000600281905560408051602081019091529081526003906104039082611d7b565b506040805160208101909152600081526004906104209082611d7b565b50600580547fffffffffffffffffffffffffffffffff00000000000000000000000000000000169055565b60015473ffffffffffffffffffffffffffffffffffffffff1633146104d1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610555610afb565b6105966040805160e0810190915280600081526020016000815260200160008152602001606081526020016060815260200160608152602001606081525090565b6105d889898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610b7e9050565b85156106205761062087878080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610b8f9050565b831561063a5761063a6106338587611e95565b8290610bd9565b60005b8a63ffffffff168110156106b25761066261065783610c1c565b856203d09086610f9b565b600255600580546001919060009061068190849063ffffffff16611f5c565b92506101000a81548163ffffffff021916908363ffffffff16021790555080806106aa90611f80565b91505061063d565b5050505050505050505050565b6106c7610afb565b6107086040805160e0810190915280600081526020016000815260200160008152602001606081526020016060815260200160608152602001606081525090565b61074a89898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610b7e9050565b61075581888861107a565b8315610768576107686106338587611e95565b60005b8a63ffffffff168110156106b25761078561065783610c1c565b60025560058054600191906000906107a490849063ffffffff16611f5c565b92506101000a81548163ffffffff021916908363ffffffff16021790555080806107cd90611f80565b91505061076b565b6107dd610afb565b60005b8463ffffffff1681101561084d576107fd84846203d09085610f9b565b600255600580546001919060009061081c90849063ffffffff16611f5c565b92506101000a81548163ffffffff021916908363ffffffff160217905550808061084590611f80565b9150506107e0565b5050505050565b6000606080600080600080610867610afb565b60025460055460038054909160049163ffffffff808316926801000000000000000081048216926c0100000000000000000000000082048316926401000000009092049091169086906108b990611cda565b80601f01602080910402602001604051908101604052809291908181526020018280546108e590611cda565b80156109325780601f1061090757610100808354040283529160200191610932565b820191906000526020600020905b81548152906001019060200180831161091557829003601f168201915b5050505050955084805461094590611cda565b80601f016020809104026020016040519081016040528092919081815260200182805461097190611cda565b80156109be5780601f10610993576101008083540402835291602001916109be565b820191906000526020600020905b8154815290600101906020018083116109a157829003601f168201915b50505050509450965096509650965096509650965090919293949596565b6109e4610afb565b6109ed8161113d565b50565b60028390556003610a018382611d7b565b506004610a0e8282611d7b565b508151600003610a59576001600560048282829054906101000a900463ffffffff16610a3a9190611f5c565b92506101000a81548163ffffffff021916908363ffffffff1602179055505b805115610aa15760016005600c8282829054906101000a900463ffffffff16610a829190611f5c565b92506101000a81548163ffffffff021916908363ffffffff1602179055505b815115801590610ab057508051155b15610af6576001600560088282829054906101000a900463ffffffff16610ad79190611f5c565b92506101000a81548163ffffffff021916908363ffffffff1602179055505b505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610b7c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016104c8565b565b610b8b8260008084611232565b5050565b8051600003610bca576040517fe889636f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60016020830152608090910152565b8051600003610c14576040517ffe936cb700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a090910152565b60606000610c2b6101006112c9565b9050610c756040518060400160405280600c81526020017f636f64654c6f636174696f6e0000000000000000000000000000000000000000815250826112ea90919063ffffffff16565b8251610c93906002811115610c8c57610c8c611fb8565b8290611303565b60408051808201909152600881527f6c616e67756167650000000000000000000000000000000000000000000000006020820152610cd29082906112ea565b6040830151610ce9908015610c8c57610c8c611fb8565b60408051808201909152600681527f736f7572636500000000000000000000000000000000000000000000000000006020820152610d289082906112ea565b6060830151610d389082906112ea565b60a08301515115610de55760408051808201909152600481527f61726773000000000000000000000000000000000000000000000000000000006020820152610d829082906112ea565b610d8b8161133c565b60005b8360a0015151811015610ddb57610dcb8460a001518281518110610db457610db4611fe7565b6020026020010151836112ea90919063ffffffff16565b610dd481611f80565b9050610d8e565b50610de581611360565b60808301515115610ee657600083602001516002811115610e0857610e08611fb8565b03610e3f576040517fa80d31f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051808201909152600f81527f736563726574734c6f636174696f6e00000000000000000000000000000000006020820152610e7e9082906112ea565b610e9783602001516002811115610c8c57610c8c611fb8565b60408051808201909152600781527f73656372657473000000000000000000000000000000000000000000000000006020820152610ed69082906112ea565b6080830151610ee690829061137e565b60c08301515115610f935760408051808201909152600981527f62797465734172677300000000000000000000000000000000000000000000006020820152610f309082906112ea565b610f398161133c565b60005b8360c0015151811015610f8957610f798460c001518281518110610f6257610f62611fe7565b60200260200101518361137e90919063ffffffff16565b610f8281611f80565b9050610f3c565b50610f9381611360565b515192915050565b6000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663461d27628688600188886040518663ffffffff1660e01b8152600401611000959493929190612016565b6020604051808303816000875af115801561101f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906110439190612060565b60405190915081907f1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db890600090a295945050505050565b60006110876101006112c9565b90506110d16040518060400160405280600681526020017f736c6f7449440000000000000000000000000000000000000000000000000000815250826112ea90919063ffffffff16565b6110de8160ff851661138b565b60408051808201909152600781527f76657273696f6e00000000000000000000000000000000000000000000000000602082015261111d9082906112ea565b611127818361138b565b6002602085015251516080909301929092525050565b3373ffffffffffffffffffffffffffffffffffffffff8216036111bc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016104c8565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b805160000361126d576040517f22ce3edd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8383600281111561128057611280611fb8565b9081600281111561129357611293611fb8565b905250604084018280156112a9576112a9611fb8565b908180156112b9576112b9611fb8565b9052506060909301929092525050565b6112d161176b565b80516112dd9083611397565b5060006020820152919050565b6112f78260038351611411565b8151610af69082611538565b81516113109060c2611560565b50610b8b828260405160200161132891815260200190565b60405160208183030381529060405261137e565b6113478160046115c9565b60018160200181815161135a9190612079565b90525050565b61136b8160076115c9565b60018160200181815161135a919061208c565b6112f78260028351611411565b610b8b82600083611411565b6040805180820190915260608152600060208201526113b760208361209f565b156113df576113c760208361209f565b6113d290602061208c565b6113dc9083612079565b91505b60208084018390526040518085526000815290818401018181101561140357600080fd5b604052508290505b92915050565b60178167ffffffffffffffff161161143e5782516114389060e0600585901b168317611560565b50505050565b60ff8167ffffffffffffffff1611611480578251611467906018611fe0600586901b1617611560565b5082516114389067ffffffffffffffff831660016115e0565b61ffff8167ffffffffffffffff16116114c35782516114aa906019611fe0600586901b1617611560565b5082516114389067ffffffffffffffff831660026115e0565b63ffffffff8167ffffffffffffffff16116115085782516114ef90601a611fe0600586901b1617611560565b5082516114389067ffffffffffffffff831660046115e0565b825161151f90601b611fe0600586901b1617611560565b5082516114389067ffffffffffffffff831660086115e0565b60408051808201909152606081526000602082015261155983838451611665565b9392505050565b6040805180820190915260608152600060208201528251516000611585826001612079565b9050846020015182106115a6576115a6856115a18360026120da565b611754565b84516020838201018581535080518211156115bf578181525b5093949350505050565b8151610af690601f611fe0600585901b1617611560565b60408051808201909152606081526000602082015283515160006116048285612079565b9050856020015181111561162157611621866115a18360026120da565b6000600161163186610100612211565b61163b919061208c565b90508651828101878319825116178152508051831115611659578281525b50959695505050505050565b604080518082019091526060815260006020820152825182111561168857600080fd5b83515160006116978483612079565b905085602001518111156116b4576116b4866115a18360026120da565b8551805183820160200191600091808511156116ce578482525b505050602086015b6020861061170e57805182526116ed602083612079565b91506116fa602082612079565b905061170760208761208c565b95506116d6565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208890036101000a0190811690199190911617905250849150509392505050565b81516117608383611397565b506114388382611538565b6040518060400160405280611793604051806040016040528060608152602001600081525090565b8152602001600081525090565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611816576118166117a0565b604052919050565b600067ffffffffffffffff831115611838576118386117a0565b61186960207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f860116016117cf565b905082815283838301111561187d57600080fd5b828260208301376000602084830101529392505050565b600082601f8301126118a557600080fd5b6115598383356020850161181e565b6000806000606084860312156118c957600080fd5b83359250602084013567ffffffffffffffff808211156118e857600080fd5b6118f487838801611894565b9350604086013591508082111561190a57600080fd5b5061191786828701611894565b9150509250925092565b6000815180845260005b818110156119475760208185018101518683018201520161192b565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006115596020830184611921565b803563ffffffff811681146119ac57600080fd5b919050565b60008083601f8401126119c357600080fd5b50813567ffffffffffffffff8111156119db57600080fd5b6020830191508360208285010111156119f357600080fd5b9250929050565b60008083601f840112611a0c57600080fd5b50813567ffffffffffffffff811115611a2457600080fd5b6020830191508360208260051b85010111156119f357600080fd5b803567ffffffffffffffff811681146119ac57600080fd5b600080600080600080600080600060c08a8c031215611a7557600080fd5b611a7e8a611998565b985060208a013567ffffffffffffffff80821115611a9b57600080fd5b611aa78d838e016119b1565b909a50985060408c0135915080821115611ac057600080fd5b611acc8d838e016119b1565b909850965060608c0135915080821115611ae557600080fd5b50611af28c828d016119fa565b9095509350611b05905060808b01611a3f565b915060a08a013590509295985092959850929598565b600080600080600080600080600060e08a8c031215611b3957600080fd5b611b428a611998565b985060208a013567ffffffffffffffff80821115611b5f57600080fd5b611b6b8d838e016119b1565b909a50985060408c0135915060ff82168214611b8657600080fd5b819750611b9560608d01611a3f565b965060808c0135915080821115611bab57600080fd5b50611bb88c828d016119fa565b9095509350611bcb905060a08b01611a3f565b915060c08a013590509295985092959850929598565b60008060008060808587031215611bf757600080fd5b611c0085611998565b9350602085013567ffffffffffffffff811115611c1c57600080fd5b611c2887828801611894565b935050611c3760408601611a3f565b9396929550929360600135925050565b87815260e060208201526000611c6060e0830189611921565b8281036040840152611c728189611921565b63ffffffff97881660608501529587166080840152505091841660a083015290921660c0909201919091529392505050565b600060208284031215611cb657600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461155957600080fd5b600181811c90821680611cee57607f821691505b602082108103611d27577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f821115610af657600081815260208120601f850160051c81016020861015611d545750805b601f850160051c820191505b81811015611d7357828155600101611d60565b505050505050565b815167ffffffffffffffff811115611d9557611d956117a0565b611da981611da38454611cda565b84611d2d565b602080601f831160018114611dfc5760008415611dc65750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555611d73565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015611e4957888601518255948401946001909101908401611e2a565b5085821015611e8557878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b600067ffffffffffffffff80841115611eb057611eb06117a0565b8360051b6020611ec18183016117cf565b868152918501918181019036841115611ed957600080fd5b865b84811015611f2157803586811115611ef35760008081fd5b880136601f820112611f055760008081fd5b611f1336823587840161181e565b845250918301918301611edb565b50979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b63ffffffff818116838216019080821115611f7957611f79611f2d565b5092915050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611fb157611fb1611f2d565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b67ffffffffffffffff8616815260a06020820152600061203960a0830187611921565b61ffff9590951660408301525063ffffffff92909216606083015260809091015292915050565b60006020828403121561207257600080fd5b5051919050565b8082018082111561140b5761140b611f2d565b8181038181111561140b5761140b611f2d565b6000826120d5577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b808202811582820484141761140b5761140b611f2d565b600181815b8085111561214a57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561213057612130611f2d565b8085161561213d57918102915b93841c93908002906120f6565b509250929050565b6000826121615750600161140b565b8161216e5750600061140b565b8160018114612184576002811461218e576121aa565b600191505061140b565b60ff84111561219f5761219f611f2d565b50506001821b61140b565b5060208310610133831016604e8410600b84101617156121cd575081810a61140b565b6121d783836120f1565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561220957612209611f2d565b029392505050565b6000611559838361215256fea164736f6c6343000813000a", +} + +var FunctionsLoadTestClientABI = FunctionsLoadTestClientMetaData.ABI + +var FunctionsLoadTestClientBin = FunctionsLoadTestClientMetaData.Bin + +func DeployFunctionsLoadTestClient(auth *bind.TransactOpts, backend bind.ContractBackend, router common.Address) (common.Address, *types.Transaction, *FunctionsLoadTestClient, error) { + parsed, err := FunctionsLoadTestClientMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsLoadTestClientBin), backend, router) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsLoadTestClient{address: address, abi: *parsed, FunctionsLoadTestClientCaller: FunctionsLoadTestClientCaller{contract: contract}, FunctionsLoadTestClientTransactor: FunctionsLoadTestClientTransactor{contract: contract}, FunctionsLoadTestClientFilterer: FunctionsLoadTestClientFilterer{contract: contract}}, nil +} + +type FunctionsLoadTestClient struct { + address common.Address + abi abi.ABI + FunctionsLoadTestClientCaller + FunctionsLoadTestClientTransactor + FunctionsLoadTestClientFilterer +} + +type FunctionsLoadTestClientCaller struct { + contract *bind.BoundContract +} + +type FunctionsLoadTestClientTransactor struct { + contract *bind.BoundContract +} + +type FunctionsLoadTestClientFilterer struct { + contract *bind.BoundContract +} + +type FunctionsLoadTestClientSession struct { + Contract *FunctionsLoadTestClient + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsLoadTestClientCallerSession struct { + Contract *FunctionsLoadTestClientCaller + CallOpts bind.CallOpts +} + +type FunctionsLoadTestClientTransactorSession struct { + Contract *FunctionsLoadTestClientTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsLoadTestClientRaw struct { + Contract *FunctionsLoadTestClient +} + +type FunctionsLoadTestClientCallerRaw struct { + Contract *FunctionsLoadTestClientCaller +} + +type FunctionsLoadTestClientTransactorRaw struct { + Contract *FunctionsLoadTestClientTransactor +} + +func NewFunctionsLoadTestClient(address common.Address, backend bind.ContractBackend) (*FunctionsLoadTestClient, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsLoadTestClientABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsLoadTestClient(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClient{address: address, abi: abi, FunctionsLoadTestClientCaller: FunctionsLoadTestClientCaller{contract: contract}, FunctionsLoadTestClientTransactor: FunctionsLoadTestClientTransactor{contract: contract}, FunctionsLoadTestClientFilterer: FunctionsLoadTestClientFilterer{contract: contract}}, nil +} + +func NewFunctionsLoadTestClientCaller(address common.Address, caller bind.ContractCaller) (*FunctionsLoadTestClientCaller, error) { + contract, err := bindFunctionsLoadTestClient(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientCaller{contract: contract}, nil +} + +func NewFunctionsLoadTestClientTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsLoadTestClientTransactor, error) { + contract, err := bindFunctionsLoadTestClient(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientTransactor{contract: contract}, nil +} + +func NewFunctionsLoadTestClientFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsLoadTestClientFilterer, error) { + contract, err := bindFunctionsLoadTestClient(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientFilterer{contract: contract}, nil +} + +func bindFunctionsLoadTestClient(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsLoadTestClientMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsLoadTestClient.Contract.FunctionsLoadTestClientCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.FunctionsLoadTestClientTransactor.contract.Transfer(opts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.FunctionsLoadTestClientTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsLoadTestClient.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.contract.Transfer(opts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "MAX_CALLBACK_GAS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) MAXCALLBACKGAS() (uint32, error) { + return _FunctionsLoadTestClient.Contract.MAXCALLBACKGAS(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) MAXCALLBACKGAS() (uint32, error) { + return _FunctionsLoadTestClient.Contract.MAXCALLBACKGAS(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) GetStats(opts *bind.CallOpts) ([32]byte, []byte, []byte, uint32, uint32, uint32, uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "getStats") + + if err != nil { + return *new([32]byte), *new([]byte), *new([]byte), *new(uint32), *new(uint32), *new(uint32), *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + out2 := *abi.ConvertType(out[2], new([]byte)).(*[]byte) + out3 := *abi.ConvertType(out[3], new(uint32)).(*uint32) + out4 := *abi.ConvertType(out[4], new(uint32)).(*uint32) + out5 := *abi.ConvertType(out[5], new(uint32)).(*uint32) + out6 := *abi.ConvertType(out[6], new(uint32)).(*uint32) + + return out0, out1, out2, out3, out4, out5, out6, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) GetStats() ([32]byte, []byte, []byte, uint32, uint32, uint32, uint32, error) { + return _FunctionsLoadTestClient.Contract.GetStats(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) GetStats() ([32]byte, []byte, []byte, uint32, uint32, uint32, uint32, error) { + return _FunctionsLoadTestClient.Contract.GetStats(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) LastError(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "lastError") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) LastError() ([]byte, error) { + return _FunctionsLoadTestClient.Contract.LastError(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) LastError() ([]byte, error) { + return _FunctionsLoadTestClient.Contract.LastError(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) LastRequestID(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "lastRequestID") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) LastRequestID() ([32]byte, error) { + return _FunctionsLoadTestClient.Contract.LastRequestID(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) LastRequestID() ([32]byte, error) { + return _FunctionsLoadTestClient.Contract.LastRequestID(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) LastResponse(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "lastResponse") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) LastResponse() ([]byte, error) { + return _FunctionsLoadTestClient.Contract.LastResponse(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) LastResponse() ([]byte, error) { + return _FunctionsLoadTestClient.Contract.LastResponse(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) Owner() (common.Address, error) { + return _FunctionsLoadTestClient.Contract.Owner(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) Owner() (common.Address, error) { + return _FunctionsLoadTestClient.Contract.Owner(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) TotalEmptyResponses(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "totalEmptyResponses") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) TotalEmptyResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalEmptyResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) TotalEmptyResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalEmptyResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) TotalFailedResponses(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "totalFailedResponses") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) TotalFailedResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalFailedResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) TotalFailedResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalFailedResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) TotalRequests(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "totalRequests") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) TotalRequests() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalRequests(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) TotalRequests() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalRequests(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCaller) TotalSucceededResponses(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FunctionsLoadTestClient.contract.Call(opts, &out, "totalSucceededResponses") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) TotalSucceededResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalSucceededResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientCallerSession) TotalSucceededResponses() (uint32, error) { + return _FunctionsLoadTestClient.Contract.TotalSucceededResponses(&_FunctionsLoadTestClient.CallOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "acceptOwnership") +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.AcceptOwnership(&_FunctionsLoadTestClient.TransactOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.AcceptOwnership(&_FunctionsLoadTestClient.TransactOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.HandleOracleFulfillment(&_FunctionsLoadTestClient.TransactOpts, requestId, response, err) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.HandleOracleFulfillment(&_FunctionsLoadTestClient.TransactOpts, requestId, response, err) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) ResetStats(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "resetStats") +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) ResetStats() (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.ResetStats(&_FunctionsLoadTestClient.TransactOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) ResetStats() (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.ResetStats(&_FunctionsLoadTestClient.TransactOpts) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) SendEncodedRequest(opts *bind.TransactOpts, times uint32, cborEncodedRequest []byte, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "sendEncodedRequest", times, cborEncodedRequest, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) SendEncodedRequest(times uint32, cborEncodedRequest []byte, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendEncodedRequest(&_FunctionsLoadTestClient.TransactOpts, times, cborEncodedRequest, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) SendEncodedRequest(times uint32, cborEncodedRequest []byte, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendEncodedRequest(&_FunctionsLoadTestClient.TransactOpts, times, cborEncodedRequest, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) SendRequest(opts *bind.TransactOpts, times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "sendRequest", times, source, encryptedSecretsReferences, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) SendRequest(times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendRequest(&_FunctionsLoadTestClient.TransactOpts, times, source, encryptedSecretsReferences, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) SendRequest(times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendRequest(&_FunctionsLoadTestClient.TransactOpts, times, source, encryptedSecretsReferences, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) SendRequestWithDONHostedSecrets(opts *bind.TransactOpts, times uint32, source string, slotId uint8, slotVersion uint64, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "sendRequestWithDONHostedSecrets", times, source, slotId, slotVersion, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) SendRequestWithDONHostedSecrets(times uint32, source string, slotId uint8, slotVersion uint64, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendRequestWithDONHostedSecrets(&_FunctionsLoadTestClient.TransactOpts, times, source, slotId, slotVersion, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) SendRequestWithDONHostedSecrets(times uint32, source string, slotId uint8, slotVersion uint64, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.SendRequestWithDONHostedSecrets(&_FunctionsLoadTestClient.TransactOpts, times, source, slotId, slotVersion, args, subscriptionId, donId) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FunctionsLoadTestClient.contract.Transact(opts, "transferOwnership", to) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.TransferOwnership(&_FunctionsLoadTestClient.TransactOpts, to) +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsLoadTestClient.Contract.TransferOwnership(&_FunctionsLoadTestClient.TransactOpts, to) +} + +type FunctionsLoadTestClientOwnershipTransferRequestedIterator struct { + Event *FunctionsLoadTestClientOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsLoadTestClientOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsLoadTestClientOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsLoadTestClientOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsLoadTestClientOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsLoadTestClientOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientOwnershipTransferRequestedIterator{contract: _FunctionsLoadTestClient.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsLoadTestClientOwnershipTransferRequested) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsLoadTestClientOwnershipTransferRequested, error) { + event := new(FunctionsLoadTestClientOwnershipTransferRequested) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsLoadTestClientOwnershipTransferredIterator struct { + Event *FunctionsLoadTestClientOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsLoadTestClientOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsLoadTestClientOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsLoadTestClientOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsLoadTestClientOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsLoadTestClientOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientOwnershipTransferredIterator{contract: _FunctionsLoadTestClient.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsLoadTestClientOwnershipTransferred) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsLoadTestClientOwnershipTransferred, error) { + event := new(FunctionsLoadTestClientOwnershipTransferred) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsLoadTestClientRequestFulfilledIterator struct { + Event *FunctionsLoadTestClientRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsLoadTestClientRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsLoadTestClientRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *FunctionsLoadTestClientRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsLoadTestClientRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsLoadTestClientRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientRequestFulfilledIterator{contract: _FunctionsLoadTestClient.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsLoadTestClientRequestFulfilled) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) ParseRequestFulfilled(log types.Log) (*FunctionsLoadTestClientRequestFulfilled, error) { + event := new(FunctionsLoadTestClientRequestFulfilled) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsLoadTestClientRequestSentIterator struct { + Event *FunctionsLoadTestClientRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsLoadTestClientRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsLoadTestClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsLoadTestClientRequestSentIterator) Error() error { + return it.fail +} + +func (it *FunctionsLoadTestClientRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsLoadTestClientRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsLoadTestClientRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &FunctionsLoadTestClientRequestSentIterator{contract: _FunctionsLoadTestClient.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _FunctionsLoadTestClient.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsLoadTestClientRequestSent) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClientFilterer) ParseRequestSent(log types.Log) (*FunctionsLoadTestClientRequestSent, error) { + event := new(FunctionsLoadTestClientRequestSent) + if err := _FunctionsLoadTestClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClient) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsLoadTestClient.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsLoadTestClient.ParseOwnershipTransferRequested(log) + case _FunctionsLoadTestClient.abi.Events["OwnershipTransferred"].ID: + return _FunctionsLoadTestClient.ParseOwnershipTransferred(log) + case _FunctionsLoadTestClient.abi.Events["RequestFulfilled"].ID: + return _FunctionsLoadTestClient.ParseRequestFulfilled(log) + case _FunctionsLoadTestClient.abi.Events["RequestSent"].ID: + return _FunctionsLoadTestClient.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsLoadTestClientOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsLoadTestClientOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsLoadTestClientRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (FunctionsLoadTestClientRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_FunctionsLoadTestClient *FunctionsLoadTestClient) Address() common.Address { + return _FunctionsLoadTestClient.address +} + +type FunctionsLoadTestClientInterface interface { + MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) + + GetStats(opts *bind.CallOpts) ([32]byte, []byte, []byte, uint32, uint32, uint32, uint32, error) + + LastError(opts *bind.CallOpts) ([]byte, error) + + LastRequestID(opts *bind.CallOpts) ([32]byte, error) + + LastResponse(opts *bind.CallOpts) ([]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TotalEmptyResponses(opts *bind.CallOpts) (uint32, error) + + TotalFailedResponses(opts *bind.CallOpts) (uint32, error) + + TotalRequests(opts *bind.CallOpts) (uint32, error) + + TotalSucceededResponses(opts *bind.CallOpts) (uint32, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + ResetStats(opts *bind.TransactOpts) (*types.Transaction, error) + + SendEncodedRequest(opts *bind.TransactOpts, times uint32, cborEncodedRequest []byte, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) + + SendRequest(opts *bind.TransactOpts, times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) + + SendRequestWithDONHostedSecrets(opts *bind.TransactOpts, times uint32, source string, slotId uint8, slotVersion uint64, args []string, subscriptionId uint64, donId [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsLoadTestClientOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsLoadTestClientOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsLoadTestClientOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsLoadTestClientOwnershipTransferred, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*FunctionsLoadTestClientRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*FunctionsLoadTestClientRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*FunctionsLoadTestClientRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *FunctionsLoadTestClientRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*FunctionsLoadTestClientRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_router/functions_router.go b/core/gethwrappers/functions/generated/functions_router/functions_router.go new file mode 100644 index 00000000..c95dfe95 --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_router/functions_router.go @@ -0,0 +1,3668 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_router + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsResponseCommitment struct { + RequestId [32]byte + Coordinator common.Address + EstimatedTotalCostJuels *big.Int + Client common.Address + SubscriptionId uint64 + CallbackGasLimit uint32 + AdminFee *big.Int + DonFee *big.Int + GasOverheadBeforeCallback *big.Int + GasOverheadAfterCallback *big.Int + TimeoutTimestamp uint32 +} + +type FunctionsRouterConfig struct { + MaxConsumersPerSubscription uint16 + AdminFee *big.Int + HandleOracleFulfillmentSelector [4]byte + GasForCallExactCheck uint16 + MaxCallbackGasLimits []uint32 + SubscriptionDepositMinimumRequests uint16 + SubscriptionDepositJuels *big.Int +} + +type IFunctionsSubscriptionsConsumer struct { + Allowed bool + InitiatedRequests uint64 + CompletedRequests uint64 +} + +type IFunctionsSubscriptionsSubscription struct { + Balance *big.Int + Owner common.Address + BlockedBalance *big.Int + ProposedOwner common.Address + Consumers []common.Address + Flags [32]byte +} + +var FunctionsRouterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkToken\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"},{\"internalType\":\"uint16\",\"name\":\"subscriptionDepositMinimumRequests\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"subscriptionDepositJuels\",\"type\":\"uint72\"}],\"internalType\":\"structFunctionsRouter.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"CannotRemoveWithPendingRequests\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"DuplicateRequestId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyRequestData\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"limit\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"IdentifierIsReserved\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"currentBalanceJuels\",\"type\":\"uint96\"}],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"value\",\"type\":\"uint8\"}],\"name\":\"InvalidGasFlagValue\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidProposal\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeProposedOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MustBeSubscriptionOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RouteNotFound\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderMustAcceptTermsOfService\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TimeoutNotExceeded\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"maximumConsumers\",\"type\":\"uint16\"}],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"},{\"internalType\":\"uint16\",\"name\":\"subscriptionDepositMinimumRequests\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"subscriptionDepositJuels\",\"type\":\"uint72\"}],\"indexed\":false,\"internalType\":\"structFunctionsRouter.Config\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"proposedContractSetId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"proposedContractSetFromAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"proposedContractSetToAddress\",\"type\":\"address\"}],\"name\":\"ContractProposed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"ContractUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"enumFunctionsResponse.FulfillResult\",\"name\":\"resultCode\",\"type\":\"uint8\"}],\"name\":\"RequestNotProcessed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCostJuels\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"enumFunctionsResponse.FulfillResult\",\"name\":\"resultCode\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"callbackReturnData\",\"type\":\"bytes\"}],\"name\":\"RequestProcessed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"}],\"name\":\"RequestStart\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"RequestTimedOut\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"fundsRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fundsAmount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CALLBACK_RETURN_BYTES\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"createSubscriptionWithConsumer\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"juelsPerGas\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"costWithoutFulfillment\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsResponse.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"fulfill\",\"outputs\":[{\"internalType\":\"enumFunctionsResponse.FulfillResult\",\"name\":\"resultCode\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAdminFee\",\"outputs\":[{\"internalType\":\"uint72\",\"name\":\"\",\"type\":\"uint72\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllowListId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"},{\"internalType\":\"uint16\",\"name\":\"subscriptionDepositMinimumRequests\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"subscriptionDepositJuels\",\"type\":\"uint72\"}],\"internalType\":\"structFunctionsRouter.Config\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getConsumer\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"},{\"internalType\":\"uint64\",\"name\":\"initiatedRequests\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"completedRequests\",\"type\":\"uint64\"}],\"internalType\":\"structIFunctionsSubscriptions.Consumer\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"getContractById\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getFlags\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"getProposedContractById\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getProposedContractSet\",\"outputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"},{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getSubscription\",\"outputs\":[{\"components\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"blockedBalance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"}],\"internalType\":\"structIFunctionsSubscriptions.Subscription\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSubscriptionCount\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionIdStart\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionIdEnd\",\"type\":\"uint64\"}],\"name\":\"getSubscriptionsInRange\",\"outputs\":[{\"components\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"blockedBalance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"}],\"internalType\":\"structIFunctionsSubscriptions.Subscription[]\",\"name\":\"subscriptions\",\"type\":\"tuple[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"}],\"name\":\"isValidCallbackGasLimit\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"ownerWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"proposedContractSetIds\",\"type\":\"bytes32[]\"},{\"internalType\":\"address[]\",\"name\":\"proposedContractSetAddresses\",\"type\":\"address[]\"}],\"name\":\"proposeContractsUpdate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"proposeSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"}],\"name\":\"sendRequest\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"}],\"name\":\"sendRequestToProposed\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"allowListId\",\"type\":\"bytes32\"}],\"name\":\"setAllowListId\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"flags\",\"type\":\"bytes32\"}],\"name\":\"setFlags\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"uint72\",\"name\":\"donFee\",\"type\":\"uint72\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadBeforeCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint40\",\"name\":\"gasOverheadAfterCallback\",\"type\":\"uint40\"},{\"internalType\":\"uint32\",\"name\":\"timeoutTimestamp\",\"type\":\"uint32\"}],\"internalType\":\"structFunctionsResponse.Commitment[]\",\"name\":\"requestsToTimeoutByCommitment\",\"type\":\"tuple[]\"}],\"name\":\"timeoutRequests\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"},{\"internalType\":\"uint16\",\"name\":\"subscriptionDepositMinimumRequests\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"subscriptionDepositJuels\",\"type\":\"uint72\"}],\"internalType\":\"structFunctionsRouter.Config\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"updateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"updateContracts\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b506040516200673c3803806200673c833981016040819052620000349162000549565b6001600160a01b0382166080526006805460ff191690553380600081620000a25760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600680546001600160a01b0380851661010002610100600160a81b031990921691909117909155811615620000dc57620000dc81620000f8565b505050620000f081620001aa60201b60201c565b50506200071a565b336001600160a01b03821603620001525760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000099565b600780546001600160a01b0319166001600160a01b03838116918217909255600654604051919261010090910416907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b620001b4620002c0565b8051600a80546020808501516040860151606087015161ffff908116600160781b0261ffff60781b1960e09390931c6b010000000000000000000000029290921665ffffffffffff60581b196001600160481b0390941662010000026001600160581b031990961691909716179390931716939093171781556080830151805184936200024792600b9291019062000323565b5060a08201516002909101805460c0909301516001600160481b031662010000026001600160581b031990931661ffff909216919091179190911790556040517ea5832bf95f66c7814294cc4db681f20ee79608bfb8912a5321d66cfed5e98590620002b590839062000652565b60405180910390a150565b60065461010090046001600160a01b03163314620003215760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000099565b565b82805482825590600052602060002090600701600890048101928215620003c75791602002820160005b838211156200039357835183826101000a81548163ffffffff021916908363ffffffff16021790555092602001926004016020816003010492830192600103026200034d565b8015620003c55782816101000a81549063ffffffff021916905560040160208160030104928301926001030262000393565b505b50620003d5929150620003d9565b5090565b5b80821115620003d55760008155600101620003da565b634e487b7160e01b600052604160045260246000fd5b60405160e081016001600160401b03811182821017156200042b576200042b620003f0565b60405290565b604051601f8201601f191681016001600160401b03811182821017156200045c576200045c620003f0565b604052919050565b805161ffff811681146200047757600080fd5b919050565b80516001600160481b03811681146200047757600080fd5b80516001600160e01b0319811681146200047757600080fd5b600082601f830112620004bf57600080fd5b815160206001600160401b03821115620004dd57620004dd620003f0565b8160051b620004ee82820162000431565b92835284810182019282810190878511156200050957600080fd5b83870192505b848310156200053e57825163ffffffff811681146200052e5760008081fd5b825291830191908301906200050f565b979650505050505050565b600080604083850312156200055d57600080fd5b82516001600160a01b03811681146200057557600080fd5b60208401519092506001600160401b03808211156200059357600080fd5b9084019060e08287031215620005a857600080fd5b620005b262000406565b620005bd8362000464565b8152620005cd602084016200047c565b6020820152620005e06040840162000494565b6040820152620005f36060840162000464565b60608201526080830151828111156200060b57600080fd5b6200061988828601620004ad565b6080830152506200062d60a0840162000464565b60a08201526200064060c084016200047c565b60c08201528093505050509250929050565b6020808252825161ffff90811683830152838201516001600160481b03166040808501919091528401516001600160e01b0319166060808501919091528401511660808084019190915283015160e060a0840152805161010084018190526000929182019083906101208601905b80831015620006e857835163ffffffff168252928401926001929092019190840190620006c0565b5060a087015161ffff811660c0880152935060c08701516001600160481b03811660e088015293509695505050505050565b608051615fea62000752600039600081816111cd0152818161208c015281816129b801528181612a7c01526135d30152615fea6000f3fe608060405234801561001057600080fd5b50600436106102e95760003560e01c80637341c10c11610191578063b734c0f4116100e3578063e72f6e3011610097578063ea320e0b11610071578063ea320e0b146106dd578063ec2454e5146106f0578063f2fde38b1461071057600080fd5b8063e72f6e30146106a4578063e82622aa146106b7578063e82ad7d4146106ca57600080fd5b8063c3f909d4116100c8578063c3f909d414610669578063cc77470a1461067e578063d7ae1d301461069157600080fd5b8063b734c0f41461064b578063badc3eb61461065357600080fd5b80639f87fad711610145578063a4c0ed361161011f578063a4c0ed361461061d578063a9c9a91814610630578063aab396bd1461064357600080fd5b80639f87fad7146105e2578063a21a23e4146105f5578063a47c7696146105fd57600080fd5b8063823597401161017657806382359740146105a45780638456cb59146105b75780638da5cb5b146105bf57600080fd5b80637341c10c1461058957806379ba50971461059c57600080fd5b806341db4ca31161024a5780635ed6dfba116101fe57806366419970116101d857806366419970146104e1578063674603d0146105085780636a2215de1461055157600080fd5b80635ed6dfba146104a85780636162a323146104bb57806366316d8d146104ce57600080fd5b80634b8832d31161022f5780634b8832d31461045057806355fedefa146104635780635c975abb1461049157600080fd5b806341db4ca31461041c578063461d27621461043d57600080fd5b80631ded3b36116102a1578063330605291161028657806333060529146103e05780633e871e4d146104015780633f4ba83a1461041457600080fd5b80631ded3b361461039f5780632a905ccc146103b257600080fd5b806310fc49c1116102d257806310fc49c11461032357806312b5834914610336578063181f5a771461035657600080fd5b806302bcc5b6146102ee5780630c5d49cb14610303575b600080fd5b6103016102fc366004614ba6565b610723565b005b61030b608481565b60405161ffff90911681526020015b60405180910390f35b610301610331366004614be7565b610783565b6000546040516bffffffffffffffffffffffff909116815260200161031a565b6103926040518060400160405280601781526020017f46756e6374696f6e7320526f757465722076322e302e3000000000000000000081525081565b60405161031a9190614c8e565b6103016103ad366004614ca1565b61087f565b600a5462010000900468ffffffffffffffffff1660405168ffffffffffffffffff909116815260200161031a565b6103f36103ee366004614f8c565b6108b1565b60405161031a929190615074565b61030161040f366004615135565b610c7c565b610301610e91565b61042f61042a366004615249565b610ea3565b60405190815260200161031a565b61042f61044b366004615249565b610f03565b61030161045e3660046152cd565b610f0f565b61042f610471366004614ba6565b67ffffffffffffffff166000908152600360208190526040909120015490565b60065460ff165b604051901515815260200161031a565b6103016104b63660046152fb565b61105d565b6103016104c93660046153bd565b611216565b6103016104dc3660046152fb565b611396565b60025467ffffffffffffffff165b60405167ffffffffffffffff909116815260200161031a565b61051b610516366004615490565b61147f565b6040805182511515815260208084015167ffffffffffffffff90811691830191909152928201519092169082015260600161031a565b61056461055f3660046154be565b61150f565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161031a565b6103016105973660046152cd565b6115ce565b610301611781565b6103016105b2366004614ba6565b6118a8565b6103016119ef565b600654610100900473ffffffffffffffffffffffffffffffffffffffff16610564565b6103016105f03660046152cd565b6119ff565b6104ef611daa565b61061061060b366004614ba6565b611f37565b60405161031a91906155a7565b61030161062b3660046155ba565b61206c565b61056461063e3660046154be565b6122b8565b60095461042f565b610301612317565b61065b612463565b60405161031a929190615616565b610671612533565b60405161031a919061566d565b6104ef61068c366004615749565b61269a565b61030161069f3660046152cd565b61291a565b6103016106b2366004615749565b61297f565b6103016106c5366004615766565b612af8565b6104986106d8366004614ba6565b612db7565b6103016106eb3660046154be565b612f06565b6107036106fe3660046157dc565b612f13565b60405161031a91906157fa565b61030161071e366004615749565b6131a8565b61072b6131b9565b610734816131c1565b67ffffffffffffffff81166000908152600360205260408120546107809183916c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1690613237565b50565b67ffffffffffffffff8216600090815260036020819052604082200154600b54911a9081106107e8576040517f45c108ce00000000000000000000000000000000000000000000000000000000815260ff821660048201526024015b60405180910390fd5b6000600a6001018260ff16815481106108035761080361587a565b90600052602060002090600891828204019190066004029054906101000a900463ffffffff1690508063ffffffff168363ffffffff161115610879576040517f1d70f87a00000000000000000000000000000000000000000000000000000000815263ffffffff821660048201526024016107df565b50505050565b6108876131b9565b610890826131c1565b67ffffffffffffffff90911660009081526003602081905260409091200155565b6000806108bc613689565b826020015173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610925576040517f8bec23e700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82516000908152600560205260409020548061098a5783516020850151604051600295507f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee19161097891899088906158a9565b60405180910390a25060009050610c71565b808460405160200161099c91906158db565b60405160208183030381529060405280519060200120146109f45783516020850151604051600695507f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee19161097891899088906158a9565b8361012001518460a0015163ffffffff16610a0f9190615a37565b64ffffffffff165a1015610a5a5783516020850151604051600495507f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee19161097891899088906158a9565b506000610a708460a0015163ffffffff16613691565b610a7a9088615a55565b9050600081878660c0015168ffffffffffffffffff16610a9a9190615a7d565b610aa49190615a7d565b9050610ab38560800151611f37565b600001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff161115610b2b5784516020860151604051600596507f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee191610b17918a9089906158a9565b60405180910390a25060009150610c719050565b84604001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff161115610b905784516020860151604051600396507f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee191610b17918a9089906158a9565b505082516000908152600560205260408120819055835160a08501516060860151610bc092918c918c9190613733565b8051909150610bd0576001610bd3565b60005b92506000610c0d8560800151866040015187606001518860c0015168ffffffffffffffffff168c610c078860200151613691565b8d6138f1565b9050846080015167ffffffffffffffff1685600001517f64778f26c70b60a8d7e29e2451b3844302d959448401c0535b768ed88c6b505e836020015189888f8f8960400151604051610c6496959493929190615aa2565b60405180910390a3519150505b965096945050505050565b610c84613c17565b8151815181141580610c965750600881115b15610ccd576040517fee03280800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b81811015610e47576000848281518110610cec57610cec61587a565b602002602001015190506000848381518110610d0a57610d0a61587a565b60200260200101519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161480610d75575060008281526008602052604090205473ffffffffffffffffffffffffffffffffffffffff8281169116145b15610dac576040517fee03280800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260086020526040908190205490517f8b052f0f4bf82fede7daffea71592b29d5ef86af1f3c7daaa0345dbb2f52f48191610e2c91859173ffffffffffffffffffffffffffffffffffffffff1690859092835273ffffffffffffffffffffffffffffffffffffffff918216602084015216604082015260600190565b60405180910390a1505080610e4090615b25565b9050610cd0565b506040805180820190915283815260208082018490528451600d91610e709183918801906149e6565b506020828101518051610e899260018501920190614a2d565b505050505050565b610e99613c17565b610ea1613c9d565b565b600080610eaf8361150f565b9050610ef783828a8a8a8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508c92508b9150613d1a9050565b98975050505050505050565b600080610eaf836122b8565b610f17613689565b610f20826140ef565b610f286141b5565b73ffffffffffffffffffffffffffffffffffffffff81161580610f8f575067ffffffffffffffff821660009081526003602052604090206001015473ffffffffffffffffffffffffffffffffffffffff8281166c0100000000000000000000000090920416145b15610fc6576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff821660008181526003602090815260409182902060010180546bffffffffffffffffffffffff166c0100000000000000000000000073ffffffffffffffffffffffffffffffffffffffff8716908102919091179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be910160405180910390a25050565b6110656131b9565b806bffffffffffffffffffffffff1660000361109b5750306000908152600160205260409020546bffffffffffffffffffffffff165b306000908152600160205260409020546bffffffffffffffffffffffff908116908216811015611107576040517f6b0fe56f0000000000000000000000000000000000000000000000000000000081526bffffffffffffffffffffffff821660048201526024016107df565b30600090815260016020526040812080548492906111349084906bffffffffffffffffffffffff16615b5d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550816000808282829054906101000a90046bffffffffffffffffffffffff1661118a9190615b5d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555061121183836bffffffffffffffffffffffff167f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166142bf9092919063ffffffff16565b505050565b61121e613c17565b8051600a80546020808501516040860151606087015161ffff9081166f01000000000000000000000000000000027fffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff60e09390931c6b01000000000000000000000002929092167fffffffffffffffffffffffffffffff000000000000ffffffffffffffffffffff68ffffffffffffffffff90941662010000027fffffffffffffffffffffffffffffffffffffffffff0000000000000000000000909616919097161793909317169390931717815560808301518051849361130592600b92910190614aa7565b5060a08201516002909101805460c09093015168ffffffffffffffffff1662010000027fffffffffffffffffffffffffffffffffffffffffff000000000000000000000090931661ffff909216919091179190911790556040517ea5832bf95f66c7814294cc4db681f20ee79608bfb8912a5321d66cfed5e9859061138b90839061566d565b60405180910390a150565b61139e613689565b806bffffffffffffffffffffffff166000036113e6576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600160205260409020546bffffffffffffffffffffffff908116908216811015611452576040517f6b0fe56f0000000000000000000000000000000000000000000000000000000081526bffffffffffffffffffffffff821660048201526024016107df565b33600090815260016020526040812080548492906111349084906bffffffffffffffffffffffff16615b5d565b60408051606080820183526000808352602080840182905292840181905273ffffffffffffffffffffffffffffffffffffffff861681526004835283812067ffffffffffffffff868116835290845290849020845192830185525460ff81161515835261010081048216938301939093526901000000000000000000909204909116918101919091525b92915050565b6000805b600d5460ff8216101561159857600d805460ff83169081106115375761153761587a565b9060005260206000200154830361158857600e805460ff831690811061155f5761155f61587a565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff169392505050565b61159181615b82565b9050611513565b506040517f80833e33000000000000000000000000000000000000000000000000000000008152600481018390526024016107df565b6115d6613689565b6115df826140ef565b6115e76141b5565b60006115f6600a5461ffff1690565b67ffffffffffffffff841660009081526003602052604090206002015490915061ffff821611611658576040517fb72bc70300000000000000000000000000000000000000000000000000000000815261ffff821660048201526024016107df565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260046020908152604080832067ffffffffffffffff8716845290915290205460ff16156116a057505050565b73ffffffffffffffffffffffffffffffffffffffff8216600081815260046020908152604080832067ffffffffffffffff881680855290835281842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001908117909155600384528285206002018054918201815585529383902090930180547fffffffffffffffffffffffff000000000000000000000000000000000000000016851790555192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e091015b60405180910390a2505050565b60075473ffffffffffffffffffffffffffffffffffffffff163314611802576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016107df565b600680547fffffffffffffffffffffff0000000000000000000000000000000000000000ff81166101003381810292909217909355600780547fffffffffffffffffffffffff00000000000000000000000000000000000000001690556040519290910473ffffffffffffffffffffffffffffffffffffffff169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b6118b0613689565b6118b86141b5565b67ffffffffffffffff81166000908152600360205260409020805460019091015473ffffffffffffffffffffffffffffffffffffffff6c010000000000000000000000009283900481169290910416338114611958576040517f4e1d9f1800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024016107df565b67ffffffffffffffff831660008181526003602090815260409182902080546c01000000000000000000000000339081026bffffffffffffffffffffffff928316178355600190920180549091169055825173ffffffffffffffffffffffffffffffffffffffff87168152918201527f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f09101611774565b6119f7613c17565b610ea161434c565b611a07613689565b611a10826140ef565b611a186141b5565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260046020908152604080832067ffffffffffffffff8087168552908352928190208151606081018352905460ff8116151582526101008104851693820193909352690100000000000000000090920490921691810191909152611a9782846143a7565b806040015167ffffffffffffffff16816020015167ffffffffffffffff1614611aec576040517f06eb10c800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8316600090815260036020908152604080832060020180548251818502810185019093528083529192909190830182828015611b6757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611b3c575b5050505050905060005b8151811015611d0f578373ffffffffffffffffffffffffffffffffffffffff16828281518110611ba357611ba361587a565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603611cff578160018351611bd59190615ba1565b81518110611be557611be561587a565b6020026020010151600360008767ffffffffffffffff1667ffffffffffffffff1681526020019081526020016000206002018281548110611c2857611c2861587a565b600091825260208083209190910180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff949094169390931790925567ffffffffffffffff87168152600390915260409020600201805480611ca257611ca2615bb4565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055611d0f565b611d0881615b25565b9050611b71565b5073ffffffffffffffffffffffffffffffffffffffff8316600081815260046020908152604080832067ffffffffffffffff89168085529083529281902080547fffffffffffffffffffffffffffffff00000000000000000000000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b910160405180910390a250505050565b6000611db4613689565b611dbc6141b5565b60028054600090611dd69067ffffffffffffffff16615be3565b825467ffffffffffffffff8083166101009490940a93840293021916919091179091556040805160c0810182526000808252336020830152918101829052606081018290529192506080820190604051908082528060200260200182016040528015611e4c578160200160208202803683370190505b5081526000602091820181905267ffffffffffffffff841681526003825260409081902083518484015173ffffffffffffffffffffffffffffffffffffffff9081166c010000000000000000000000009081026bffffffffffffffffffffffff9384161784559386015160608701519091169093029216919091176001820155608083015180519192611ee792600285019290910190614a2d565b5060a0919091015160039091015560405133815267ffffffffffffffff8216907f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a290565b6040805160c0810182526000808252602082018190529181018290526060808201839052608082015260a0810191909152611f71826131c1565b67ffffffffffffffff8216600090815260036020908152604091829020825160c08101845281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811684870152600185015491821684880152919004166060820152600282018054855181860281018601909652808652919492936080860193929083018282801561205257602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612027575b505050505081526020016003820154815250509050919050565b612074613689565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146120e3576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020811461211d576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061212b82840184614ba6565b67ffffffffffffffff81166000908152600360205260409020549091506c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff166121a4576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8116600090815260036020526040812080546bffffffffffffffffffffffff16918691906121db8385615a7d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550846000808282829054906101000a90046bffffffffffffffffffffffff166122319190615a7d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f88287846122989190615c0a565b6040805192835260208301919091520160405180910390a2505050505050565b60008181526008602052604081205473ffffffffffffffffffffffffffffffffffffffff1680611509576040517f80833e33000000000000000000000000000000000000000000000000000000008152600481018490526024016107df565b61231f613c17565b60005b600d54811015612442576000600d60000182815481106123445761234461587a565b906000526020600020015490506000600d60010183815481106123695761236961587a565b6000918252602080832091909101548483526008825260409283902054835186815273ffffffffffffffffffffffffffffffffffffffff91821693810193909352169181018290529091507ff8a6175bca1ba37d682089187edc5e20a859989727f10ca6bd9a5bc0de8caf949060600160405180910390a160009182526008602052604090912080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905561243b81615b25565b9050612322565b50600d60006124518282614b51565b61245f600183016000614b51565b5050565b606080600d600001600d600101818054806020026020016040519081016040528092919081815260200182805480156124bb57602002820191906000526020600020905b8154815260200190600101908083116124a7575b505050505091508080548060200260200160405190810160405280929190818152602001828054801561252457602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116124f9575b50505050509050915091509091565b6040805160e0810182526000808252602082018190529181018290526060808201839052608082015260a0810182905260c08101919091526040805160e08082018352600a805461ffff808216855262010000820468ffffffffffffffffff166020808701919091526b010000000000000000000000830490941b7fffffffff0000000000000000000000000000000000000000000000000000000016858701526f01000000000000000000000000000000909104166060840152600b805485518185028101850190965280865293949193608086019383018282801561266557602002820191906000526020600020906000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116126285790505b50505091835250506002919091015461ffff8116602083015262010000900468ffffffffffffffffff16604090910152919050565b60006126a4613689565b6126ac6141b5565b600280546000906126c69067ffffffffffffffff16615be3565b825467ffffffffffffffff8083166101009490940a93840293021916919091179091556040805160c081018252600080825233602083015291810182905260608101829052919250608082019060405190808252806020026020018201604052801561273c578160200160208202803683370190505b5081526000602091820181905267ffffffffffffffff841681526003825260409081902083518484015173ffffffffffffffffffffffffffffffffffffffff9081166c010000000000000000000000009081026bffffffffffffffffffffffff93841617845593860151606087015190911690930292169190911760018201556080830151805191926127d792600285019290910190614a2d565b5060a0919091015160039182015567ffffffffffffffff82166000818152602092835260408082206002018054600180820183559184528584200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff891690811790915583526004855281832084845285529181902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169092179091555133815290917f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf910160405180910390a260405173ffffffffffffffffffffffffffffffffffffffff8316815267ffffffffffffffff8216907f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e09060200160405180910390a2919050565b612922613689565b61292b826140ef565b6129336141b5565b61293c82612db7565b15612973576040517f06eb10c800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61245f82826001613237565b6129876131b9565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015612a14573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612a389190615c1d565b6000549091506bffffffffffffffffffffffff1681811015611211576000612a608284615ba1565b9050612aa373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001685836142bf565b6040805173ffffffffffffffffffffffffffffffffffffffff86168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600910160405180910390a150505050565b612b00613689565b60005b81811015611211576000838383818110612b1f57612b1f61587a565b90506101600201803603810190612b369190615c36565b80516080820151600082815260056020908152604091829020549151949550929391929091612b67918691016158db565b6040516020818303038152906040528051906020012014612bb4576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82610140015163ffffffff16421015612bf9576040517fa2376fe800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208301516040517f85b214cf0000000000000000000000000000000000000000000000000000000081526004810184905273ffffffffffffffffffffffffffffffffffffffff909116906385b214cf90602401600060405180830381600087803b158015612c6757600080fd5b505af1158015612c7b573d6000803e3d6000fd5b50505060408085015167ffffffffffffffff84166000908152600360205291822060010180549193509190612cbf9084906bffffffffffffffffffffffff16615b5d565b82546bffffffffffffffffffffffff9182166101009390930a928302919092021990911617905550606083015173ffffffffffffffffffffffffffffffffffffffff16600090815260046020908152604080832067ffffffffffffffff808616855292529091208054600192600991612d479185916901000000000000000000900416615c53565b825467ffffffffffffffff9182166101009390930a9283029190920219909116179055506000828152600560205260408082208290555183917ff1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af41491a250505080612db090615b25565b9050612b03565b67ffffffffffffffff8116600090815260036020908152604080832060020180548251818502810185019093528083528493830182828015612e2f57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612e04575b5050505050905060005b8151811015612efc57600060046000848481518110612e5a57612e5a61587a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600090812067ffffffffffffffff808a168352908452908290208251606081018452905460ff8116151582526101008104831694820185905269010000000000000000009004909116918101829052925014612eeb57506001949350505050565b50612ef581615b25565b9050612e39565b5060009392505050565b612f0e613c17565b600955565b60608167ffffffffffffffff168367ffffffffffffffff161180612f46575060025467ffffffffffffffff908116908316115b80612f5b575060025467ffffffffffffffff16155b15612f92576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612f9c8383615c74565b612fa7906001615c53565b67ffffffffffffffff1667ffffffffffffffff811115612fc957612fc9614ccd565b60405190808252806020026020018201604052801561304657816020015b6040805160c081018252600080825260208083018290529282018190526060808301829052608083015260a082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181612fe75790505b50905060005b6130568484615c74565b67ffffffffffffffff1681116131a1576003600061307e8367ffffffffffffffff8816615c0a565b67ffffffffffffffff1681526020808201929092526040908101600020815160c08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c010000000000000000000000009283900481168488015260018501549182168487015291900416606082015260028201805484518187028101870190955280855291949293608086019390929083018282801561316057602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613135575b505050505081526020016003820154815250508282815181106131855761318561587a565b60200260200101819052508061319a90615b25565b905061304c565b5092915050565b6131b0613c17565b6107808161441b565b610ea1613c17565b67ffffffffffffffff81166000908152600360205260409020546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff16610780576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff83166000908152600360209081526040808320815160c08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c010000000000000000000000009283900481168488015260018501549182168487015291900416606082015260028201805484518187028101870190955280855291949293608086019390929083018282801561331857602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116132ed575b50505091835250506003919091015460209091015280519091506000805b83608001515181101561342e5760008460800151828151811061335b5761335b61587a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff8116600090815260048352604080822067ffffffffffffffff808e16845294529020549092506133bb9169010000000000000000009091041684615c53565b73ffffffffffffffffffffffffffffffffffffffff909116600090815260046020908152604080832067ffffffffffffffff8c168452909152902080547fffffffffffffffffffffffffffffff0000000000000000000000000000000000169055915061342781615b25565b9050613336565b5067ffffffffffffffff8616600090815260036020526040812081815560018101829055906134606002830182614b51565b50600060039190910155600c5461ffff81169062010000900468ffffffffffffffffff1685801561349e57508161ffff168367ffffffffffffffff16105b1561355a576000846bffffffffffffffffffffffff168268ffffffffffffffffff16116134d6578168ffffffffffffffffff166134d8565b845b90506bffffffffffffffffffffffff81161561355857306000908152600160205260408120805483929061351b9084906bffffffffffffffffffffffff16615a7d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080856135559190615b5d565b94505b505b6bffffffffffffffffffffffff841615613617576000805485919081906135909084906bffffffffffffffffffffffff16615b5d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555061361787856bffffffffffffffffffffffff167f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166142bf9092919063ffffffff16565b6040805173ffffffffffffffffffffffffffffffffffffffff891681526bffffffffffffffffffffffff8616602082015267ffffffffffffffff8a16917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815910160405180910390a25050505050505050565b610ea1614517565b60006bffffffffffffffffffffffff82111561372f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f362062697473000000000000000000000000000000000000000000000000000060648201526084016107df565b5090565b60408051606080820183526000808352602083015291810191909152813b1580156137865750506040805160608101825260008082526020808301829052835191825281018352918101919091526138e8565b600a546040516000916b010000000000000000000000900460e01b906137b4908a908a908a90602401615c95565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152602080830180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009590951694909417909352600a548151608480825260c0820190935292945061ffff6f01000000000000000000000000000000909104169260009283928392820181803683370190505090505a8481101561388257600080fd5b8490036040810481038a1061389657600080fd5b505a60008087516020890160008d8ff193505a900391503d60848111156138bb575060845b808252806000602084013e5060408051606081018252931515845260208401929092529082015293505050505b95945050505050565b604080518082019091526000808252602082015260006139118486615a55565b90506000816139208886615a7d565b61392a9190615a7d565b67ffffffffffffffff8b166000908152600360205260409020549091506bffffffffffffffffffffffff80831691161080613991575067ffffffffffffffff8a166000908152600360205260409020600101546bffffffffffffffffffffffff808b169116105b156139f45767ffffffffffffffff8a16600090815260036020526040908190205490517f6b0fe56f0000000000000000000000000000000000000000000000000000000081526bffffffffffffffffffffffff90911660048201526024016107df565b67ffffffffffffffff8a1660009081526003602052604081208054839290613a2b9084906bffffffffffffffffffffffff16615b5d565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915567ffffffffffffffff8c16600090815260036020526040812060010180548d94509092613a7f91859116615b5d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508184613ab99190615a7d565b3360009081526001602052604081208054909190613ae69084906bffffffffffffffffffffffff16615a7d565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915530600090815260016020526040812080548b94509092613b2d91859116615a7d565b82546bffffffffffffffffffffffff9182166101009390930a92830291909202199091161790555073ffffffffffffffffffffffffffffffffffffffff8816600090815260046020908152604080832067ffffffffffffffff808f16855292529091208054600192600991613bb19185916901000000000000000000900416615c53565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055506040518060400160405280836bffffffffffffffffffffffff168152602001826bffffffffffffffffffffffff1681525092505050979650505050505050565b600654610100900473ffffffffffffffffffffffffffffffffffffffff163314610ea1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016107df565b613ca5614584565b600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390a1565b6000613d24613689565b613d2d856131c1565b613d3733866143a7565b613d418583610783565b8351600003613d7b576040517ec1cfc000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000613d8686611f37565b90506000613d94338861147f565b600a54604080516101608101825289815267ffffffffffffffff8b1660009081526003602081815293822001549495506201000090930468ffffffffffffffffff169373ffffffffffffffffffffffffffffffffffffffff8d169263a631571e929190820190815233602082015260408881015189519190920191613e1891615b5d565b6bffffffffffffffffffffffff1681526020018568ffffffffffffffffff1681526020018c67ffffffffffffffff168152602001866020015167ffffffffffffffff1681526020018963ffffffff1681526020018a61ffff168152602001866040015167ffffffffffffffff168152602001876020015173ffffffffffffffffffffffffffffffffffffffff168152506040518263ffffffff1660e01b8152600401613ec49190615cc0565b610160604051808303816000875af1158015613ee4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613f089190615e25565b805160009081526005602052604090205490915015613f595780516040517f304f32e800000000000000000000000000000000000000000000000000000000815260048101919091526024016107df565b604051806101600160405280826000015181526020018b73ffffffffffffffffffffffffffffffffffffffff16815260200182604001516bffffffffffffffffffffffff1681526020013373ffffffffffffffffffffffffffffffffffffffff1681526020018a67ffffffffffffffff1681526020018763ffffffff1681526020018368ffffffffffffffffff1681526020018260e0015168ffffffffffffffffff16815260200182610100015164ffffffffff16815260200182610120015164ffffffffff16815260200182610140015163ffffffff1681525060405160200161404491906158db565b60405160208183030381529060405280519060200120600560008360000151815260200190815260200160002081905550614084338a83604001516145f0565b8867ffffffffffffffff168b82600001517ff67aec45c9a7ede407974a3e0c3a743dffeab99ee3f2d4c9a8144c2ebf2c7ec9876020015133328e8e8e8a604001516040516140d89796959493929190615ef8565b60405180910390a4519a9950505050505050505050565b67ffffffffffffffff81166000908152600360205260409020546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1680614166576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff82161461245f576040517f5a68151d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60095460009081526008602052604090205473ffffffffffffffffffffffffffffffffffffffff16806141e55750565b604080516000815260208101918290527f6b14daf80000000000000000000000000000000000000000000000000000000090915273ffffffffffffffffffffffffffffffffffffffff821690636b14daf89061424690339060248101615f70565b602060405180830381865afa158015614263573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906142879190615f9f565b610780576040517f229062630000000000000000000000000000000000000000000000000000000081523360048201526024016107df565b6040805173ffffffffffffffffffffffffffffffffffffffff8416602482015260448082018490528251808303909101815260649091019091526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fa9059cbb000000000000000000000000000000000000000000000000000000001790526112119084906146cb565b614354614517565b600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258613cf03390565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260046020908152604080832067ffffffffffffffff8516845290915290205460ff1661245f576040517f71e8313700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff82160361449a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016107df565b600780547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600654604051919261010090910416907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b60065460ff1615610ea1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a207061757365640000000000000000000000000000000060448201526064016107df565b60065460ff16610ea1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f742070617573656400000000000000000000000060448201526064016107df565b67ffffffffffffffff82166000908152600360205260408120600101805483929061462a9084906bffffffffffffffffffffffff16615a7d565b82546bffffffffffffffffffffffff91821661010093840a908102920219161790915573ffffffffffffffffffffffffffffffffffffffff8516600090815260046020908152604080832067ffffffffffffffff80891685529252909120805460019450909284926146a0928492900416615c53565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff160217905550505050565b600061472d826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff166147d79092919063ffffffff16565b805190915015611211578080602001905181019061474b9190615f9f565b611211576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f7420737563636565640000000000000000000000000000000000000000000060648201526084016107df565b60606147e684846000856147ee565b949350505050565b606082471015614880576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c000000000000000000000000000000000000000000000000000060648201526084016107df565b6000808673ffffffffffffffffffffffffffffffffffffffff1685876040516148a99190615fc1565b60006040518083038185875af1925050503d80600081146148e6576040519150601f19603f3d011682016040523d82523d6000602084013e6148eb565b606091505b50915091506148fc87838387614907565b979650505050505050565b6060831561499d5782516000036149965773ffffffffffffffffffffffffffffffffffffffff85163b614996576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e747261637400000060448201526064016107df565b50816147e6565b6147e683838151156149b25781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016107df9190614c8e565b828054828255906000526020600020908101928215614a21579160200282015b82811115614a21578251825591602001919060010190614a06565b5061372f929150614b6b565b828054828255906000526020600020908101928215614a21579160200282015b82811115614a2157825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190614a4d565b82805482825590600052602060002090600701600890048101928215614a215791602002820160005b83821115614b1457835183826101000a81548163ffffffff021916908363ffffffff1602179055509260200192600401602081600301049283019260010302614ad0565b8015614b445782816101000a81549063ffffffff0219169055600401602081600301049283019260010302614b14565b505061372f929150614b6b565b508054600082559060005260206000209081019061078091905b5b8082111561372f5760008155600101614b6c565b67ffffffffffffffff8116811461078057600080fd5b8035614ba181614b80565b919050565b600060208284031215614bb857600080fd5b8135614bc381614b80565b9392505050565b63ffffffff8116811461078057600080fd5b8035614ba181614bca565b60008060408385031215614bfa57600080fd5b8235614c0581614b80565b91506020830135614c1581614bca565b809150509250929050565b60005b83811015614c3b578181015183820152602001614c23565b50506000910152565b60008151808452614c5c816020860160208601614c20565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000614bc36020830184614c44565b60008060408385031215614cb457600080fd5b8235614cbf81614b80565b946020939093013593505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610160810167ffffffffffffffff81118282101715614d2057614d20614ccd565b60405290565b60405160e0810167ffffffffffffffff81118282101715614d2057614d20614ccd565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715614d9057614d90614ccd565b604052919050565b600082601f830112614da957600080fd5b813567ffffffffffffffff811115614dc357614dc3614ccd565b614df460207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601614d49565b818152846020838601011115614e0957600080fd5b816020850160208301376000918101602001919091529392505050565b6bffffffffffffffffffffffff8116811461078057600080fd5b8035614ba181614e26565b73ffffffffffffffffffffffffffffffffffffffff8116811461078057600080fd5b8035614ba181614e4b565b68ffffffffffffffffff8116811461078057600080fd5b8035614ba181614e78565b64ffffffffff8116811461078057600080fd5b8035614ba181614e9a565b60006101608284031215614ecb57600080fd5b614ed3614cfc565b905081358152614ee560208301614e6d565b6020820152614ef660408301614e40565b6040820152614f0760608301614e6d565b6060820152614f1860808301614b96565b6080820152614f2960a08301614bdc565b60a0820152614f3a60c08301614e8f565b60c0820152614f4b60e08301614e8f565b60e0820152610100614f5e818401614ead565b90820152610120614f70838201614ead565b90820152610140614f82838201614bdc565b9082015292915050565b6000806000806000806102008789031215614fa657600080fd5b863567ffffffffffffffff80821115614fbe57600080fd5b614fca8a838b01614d98565b97506020890135915080821115614fe057600080fd5b50614fed89828a01614d98565b9550506040870135614ffe81614e26565b9350606087013561500e81614e26565b9250608087013561501e81614e4b565b915061502d8860a08901614eb8565b90509295509295509295565b60078110615070577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b604081016150828285615039565b6bffffffffffffffffffffffff831660208301529392505050565b600067ffffffffffffffff8211156150b7576150b7614ccd565b5060051b60200190565b600082601f8301126150d257600080fd5b813560206150e76150e28361509d565b614d49565b82815260059290921b8401810191818101908684111561510657600080fd5b8286015b8481101561512a57803561511d81614e4b565b835291830191830161510a565b509695505050505050565b6000806040838503121561514857600080fd5b823567ffffffffffffffff8082111561516057600080fd5b818501915085601f83011261517457600080fd5b813560206151846150e28361509d565b82815260059290921b840181019181810190898411156151a357600080fd5b948201945b838610156151c1578535825294820194908201906151a8565b965050860135925050808211156151d757600080fd5b506151e4858286016150c1565b9150509250929050565b60008083601f84011261520057600080fd5b50813567ffffffffffffffff81111561521857600080fd5b60208301915083602082850101111561523057600080fd5b9250929050565b803561ffff81168114614ba157600080fd5b60008060008060008060a0878903121561526257600080fd5b863561526d81614b80565b9550602087013567ffffffffffffffff81111561528957600080fd5b61529589828a016151ee565b90965094506152a8905060408801615237565b925060608701356152b881614bca565b80925050608087013590509295509295509295565b600080604083850312156152e057600080fd5b82356152eb81614b80565b91506020830135614c1581614e4b565b6000806040838503121561530e57600080fd5b823561531981614e4b565b91506020830135614c1581614e26565b80357fffffffff0000000000000000000000000000000000000000000000000000000081168114614ba157600080fd5b600082601f83011261536a57600080fd5b8135602061537a6150e28361509d565b82815260059290921b8401810191818101908684111561539957600080fd5b8286015b8481101561512a5780356153b081614bca565b835291830191830161539d565b6000602082840312156153cf57600080fd5b813567ffffffffffffffff808211156153e757600080fd5b9083019060e082860312156153fb57600080fd5b615403614d26565b61540c83615237565b815261541a60208401614e8f565b602082015261542b60408401615329565b604082015261543c60608401615237565b606082015260808301358281111561545357600080fd5b61545f87828601615359565b60808301525061547160a08401615237565b60a082015261548260c08401614e8f565b60c082015295945050505050565b600080604083850312156154a357600080fd5b82356154ae81614e4b565b91506020830135614c1581614b80565b6000602082840312156154d057600080fd5b5035919050565b600081518084526020808501945080840160005b8381101561551d57815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016154eb565b509495945050505050565b60006bffffffffffffffffffffffff808351168452602083015173ffffffffffffffffffffffffffffffffffffffff8082166020870152826040860151166040870152806060860151166060870152505050608082015160c0608085015261559360c08501826154d7565b60a093840151949093019390935250919050565b602081526000614bc36020830184615528565b600080600080606085870312156155d057600080fd5b84356155db81614e4b565b935060208501359250604085013567ffffffffffffffff8111156155fe57600080fd5b61560a878288016151ee565b95989497509550505050565b604080825283519082018190526000906020906060840190828701845b8281101561564f57815184529284019290840190600101615633565b5050508381038285015261566381866154d7565b9695505050505050565b60006020808352610100830161ffff808651168386015268ffffffffffffffffff838701511660408601527fffffffff00000000000000000000000000000000000000000000000000000000604087015116606086015280606087015116608086015250608085015160e060a0860152818151808452610120870191508483019350600092505b8083101561571a57835163ffffffff1682529284019260019290920191908401906156f4565b5060a087015161ffff811660c0880152935060c087015168ffffffffffffffffff811660e08801529350615663565b60006020828403121561575b57600080fd5b8135614bc381614e4b565b6000806020838503121561577957600080fd5b823567ffffffffffffffff8082111561579157600080fd5b818501915085601f8301126157a557600080fd5b8135818111156157b457600080fd5b866020610160830285010111156157ca57600080fd5b60209290920196919550909350505050565b600080604083850312156157ef57600080fd5b82356154ae81614b80565b6000602080830181845280855180835260408601915060408160051b870101925083870160005b8281101561586d577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc088860301845261585b858351615528565b94509285019290850190600101615821565b5092979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff848116825283166020820152606081016147e66040830184615039565b8151815260208083015161016083019161590c9084018273ffffffffffffffffffffffffffffffffffffffff169052565b50604083015161592c60408401826bffffffffffffffffffffffff169052565b506060830151615954606084018273ffffffffffffffffffffffffffffffffffffffff169052565b506080830151615970608084018267ffffffffffffffff169052565b5060a083015161598860a084018263ffffffff169052565b5060c08301516159a560c084018268ffffffffffffffffff169052565b5060e08301516159c260e084018268ffffffffffffffffff169052565b506101008381015164ffffffffff81168483015250506101208381015164ffffffffff81168483015250506101408381015163ffffffff8116848301525b505092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b64ffffffffff8181168382160190808211156131a1576131a1615a08565b6bffffffffffffffffffffffff818116838216028082169190828114615a0057615a00615a08565b6bffffffffffffffffffffffff8181168382160190808211156131a1576131a1615a08565b6bffffffffffffffffffffffff8716815273ffffffffffffffffffffffffffffffffffffffff86166020820152615adc6040820186615039565b60c060608201526000615af260c0830186614c44565b8281036080840152615b048186614c44565b905082810360a0840152615b188185614c44565b9998505050505050505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203615b5657615b56615a08565b5060010190565b6bffffffffffffffffffffffff8281168282160390808211156131a1576131a1615a08565b600060ff821660ff8103615b9857615b98615a08565b60010192915050565b8181038181111561150957611509615a08565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b600067ffffffffffffffff808316818103615c0057615c00615a08565b6001019392505050565b8082018082111561150957611509615a08565b600060208284031215615c2f57600080fd5b5051919050565b60006101608284031215615c4957600080fd5b614bc38383614eb8565b67ffffffffffffffff8181168382160190808211156131a1576131a1615a08565b67ffffffffffffffff8281168282160390808211156131a1576131a1615a08565b838152606060208201526000615cae6060830185614c44565b82810360408401526156638185614c44565b6020815260008251610160806020850152615cdf610180850183614c44565b9150602085015160408501526040850151615d12606086018273ffffffffffffffffffffffffffffffffffffffff169052565b5060608501516bffffffffffffffffffffffff8116608086015250608085015168ffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015167ffffffffffffffff811660e08601525060e0850151610100615d898187018363ffffffff169052565b8601519050610120615da08682018361ffff169052565b8601519050610140615dbd8682018367ffffffffffffffff169052565b9095015173ffffffffffffffffffffffffffffffffffffffff1693019290925250919050565b8051614ba181614e4b565b8051614ba181614e26565b8051614ba181614b80565b8051614ba181614bca565b8051614ba181614e78565b8051614ba181614e9a565b60006101608284031215615e3857600080fd5b615e40614cfc565b82518152615e5060208401615de3565b6020820152615e6160408401615dee565b6040820152615e7260608401615de3565b6060820152615e8360808401615df9565b6080820152615e9460a08401615e04565b60a0820152615ea560c08401615e0f565b60c0820152615eb660e08401615e0f565b60e0820152610100615ec9818501615e1a565b90820152610120615edb848201615e1a565b90820152610140615eed848201615e04565b908201529392505050565b600073ffffffffffffffffffffffffffffffffffffffff808a168352808916602084015280881660408401525060e06060830152615f3960e0830187614c44565b61ffff9590951660808301525063ffffffff9290921660a08301526bffffffffffffffffffffffff1660c090910152949350505050565b73ffffffffffffffffffffffffffffffffffffffff831681526040602082015260006147e66040830184614c44565b600060208284031215615fb157600080fd5b81518015158114614bc357600080fd5b60008251615fd3818460208701614c20565b919091019291505056fea164736f6c6343000813000a", +} + +var FunctionsRouterABI = FunctionsRouterMetaData.ABI + +var FunctionsRouterBin = FunctionsRouterMetaData.Bin + +func DeployFunctionsRouter(auth *bind.TransactOpts, backend bind.ContractBackend, linkToken common.Address, config FunctionsRouterConfig) (common.Address, *types.Transaction, *FunctionsRouter, error) { + parsed, err := FunctionsRouterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsRouterBin), backend, linkToken, config) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsRouter{address: address, abi: *parsed, FunctionsRouterCaller: FunctionsRouterCaller{contract: contract}, FunctionsRouterTransactor: FunctionsRouterTransactor{contract: contract}, FunctionsRouterFilterer: FunctionsRouterFilterer{contract: contract}}, nil +} + +type FunctionsRouter struct { + address common.Address + abi abi.ABI + FunctionsRouterCaller + FunctionsRouterTransactor + FunctionsRouterFilterer +} + +type FunctionsRouterCaller struct { + contract *bind.BoundContract +} + +type FunctionsRouterTransactor struct { + contract *bind.BoundContract +} + +type FunctionsRouterFilterer struct { + contract *bind.BoundContract +} + +type FunctionsRouterSession struct { + Contract *FunctionsRouter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsRouterCallerSession struct { + Contract *FunctionsRouterCaller + CallOpts bind.CallOpts +} + +type FunctionsRouterTransactorSession struct { + Contract *FunctionsRouterTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsRouterRaw struct { + Contract *FunctionsRouter +} + +type FunctionsRouterCallerRaw struct { + Contract *FunctionsRouterCaller +} + +type FunctionsRouterTransactorRaw struct { + Contract *FunctionsRouterTransactor +} + +func NewFunctionsRouter(address common.Address, backend bind.ContractBackend) (*FunctionsRouter, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsRouterABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsRouter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsRouter{address: address, abi: abi, FunctionsRouterCaller: FunctionsRouterCaller{contract: contract}, FunctionsRouterTransactor: FunctionsRouterTransactor{contract: contract}, FunctionsRouterFilterer: FunctionsRouterFilterer{contract: contract}}, nil +} + +func NewFunctionsRouterCaller(address common.Address, caller bind.ContractCaller) (*FunctionsRouterCaller, error) { + contract, err := bindFunctionsRouter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsRouterCaller{contract: contract}, nil +} + +func NewFunctionsRouterTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsRouterTransactor, error) { + contract, err := bindFunctionsRouter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsRouterTransactor{contract: contract}, nil +} + +func NewFunctionsRouterFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsRouterFilterer, error) { + contract, err := bindFunctionsRouter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsRouterFilterer{contract: contract}, nil +} + +func bindFunctionsRouter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsRouterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsRouter *FunctionsRouterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsRouter.Contract.FunctionsRouterCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsRouter *FunctionsRouterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.Contract.FunctionsRouterTransactor.contract.Transfer(opts) +} + +func (_FunctionsRouter *FunctionsRouterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsRouter.Contract.FunctionsRouterTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsRouter *FunctionsRouterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsRouter.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsRouter *FunctionsRouterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.Contract.contract.Transfer(opts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsRouter.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsRouter *FunctionsRouterCaller) MAXCALLBACKRETURNBYTES(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "MAX_CALLBACK_RETURN_BYTES") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) MAXCALLBACKRETURNBYTES() (uint16, error) { + return _FunctionsRouter.Contract.MAXCALLBACKRETURNBYTES(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) MAXCALLBACKRETURNBYTES() (uint16, error) { + return _FunctionsRouter.Contract.MAXCALLBACKRETURNBYTES(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetAdminFee(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getAdminFee") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetAdminFee() (*big.Int, error) { + return _FunctionsRouter.Contract.GetAdminFee(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetAdminFee() (*big.Int, error) { + return _FunctionsRouter.Contract.GetAdminFee(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetAllowListId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getAllowListId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetAllowListId() ([32]byte, error) { + return _FunctionsRouter.Contract.GetAllowListId(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetAllowListId() ([32]byte, error) { + return _FunctionsRouter.Contract.GetAllowListId(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetConfig(opts *bind.CallOpts) (FunctionsRouterConfig, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getConfig") + + if err != nil { + return *new(FunctionsRouterConfig), err + } + + out0 := *abi.ConvertType(out[0], new(FunctionsRouterConfig)).(*FunctionsRouterConfig) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetConfig() (FunctionsRouterConfig, error) { + return _FunctionsRouter.Contract.GetConfig(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetConfig() (FunctionsRouterConfig, error) { + return _FunctionsRouter.Contract.GetConfig(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetConsumer(opts *bind.CallOpts, client common.Address, subscriptionId uint64) (IFunctionsSubscriptionsConsumer, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getConsumer", client, subscriptionId) + + if err != nil { + return *new(IFunctionsSubscriptionsConsumer), err + } + + out0 := *abi.ConvertType(out[0], new(IFunctionsSubscriptionsConsumer)).(*IFunctionsSubscriptionsConsumer) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetConsumer(client common.Address, subscriptionId uint64) (IFunctionsSubscriptionsConsumer, error) { + return _FunctionsRouter.Contract.GetConsumer(&_FunctionsRouter.CallOpts, client, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetConsumer(client common.Address, subscriptionId uint64) (IFunctionsSubscriptionsConsumer, error) { + return _FunctionsRouter.Contract.GetConsumer(&_FunctionsRouter.CallOpts, client, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetContractById(opts *bind.CallOpts, id [32]byte) (common.Address, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getContractById", id) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetContractById(id [32]byte) (common.Address, error) { + return _FunctionsRouter.Contract.GetContractById(&_FunctionsRouter.CallOpts, id) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetContractById(id [32]byte) (common.Address, error) { + return _FunctionsRouter.Contract.GetContractById(&_FunctionsRouter.CallOpts, id) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetFlags(opts *bind.CallOpts, subscriptionId uint64) ([32]byte, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getFlags", subscriptionId) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetFlags(subscriptionId uint64) ([32]byte, error) { + return _FunctionsRouter.Contract.GetFlags(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetFlags(subscriptionId uint64) ([32]byte, error) { + return _FunctionsRouter.Contract.GetFlags(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetProposedContractById(opts *bind.CallOpts, id [32]byte) (common.Address, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getProposedContractById", id) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetProposedContractById(id [32]byte) (common.Address, error) { + return _FunctionsRouter.Contract.GetProposedContractById(&_FunctionsRouter.CallOpts, id) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetProposedContractById(id [32]byte) (common.Address, error) { + return _FunctionsRouter.Contract.GetProposedContractById(&_FunctionsRouter.CallOpts, id) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetProposedContractSet(opts *bind.CallOpts) ([][32]byte, []common.Address, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getProposedContractSet") + + if err != nil { + return *new([][32]byte), *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([][32]byte)).(*[][32]byte) + out1 := *abi.ConvertType(out[1], new([]common.Address)).(*[]common.Address) + + return out0, out1, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetProposedContractSet() ([][32]byte, []common.Address, error) { + return _FunctionsRouter.Contract.GetProposedContractSet(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetProposedContractSet() ([][32]byte, []common.Address, error) { + return _FunctionsRouter.Contract.GetProposedContractSet(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (IFunctionsSubscriptionsSubscription, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getSubscription", subscriptionId) + + if err != nil { + return *new(IFunctionsSubscriptionsSubscription), err + } + + out0 := *abi.ConvertType(out[0], new(IFunctionsSubscriptionsSubscription)).(*IFunctionsSubscriptionsSubscription) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetSubscription(subscriptionId uint64) (IFunctionsSubscriptionsSubscription, error) { + return _FunctionsRouter.Contract.GetSubscription(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetSubscription(subscriptionId uint64) (IFunctionsSubscriptionsSubscription, error) { + return _FunctionsRouter.Contract.GetSubscription(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetSubscriptionCount(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getSubscriptionCount") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetSubscriptionCount() (uint64, error) { + return _FunctionsRouter.Contract.GetSubscriptionCount(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetSubscriptionCount() (uint64, error) { + return _FunctionsRouter.Contract.GetSubscriptionCount(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetSubscriptionsInRange(opts *bind.CallOpts, subscriptionIdStart uint64, subscriptionIdEnd uint64) ([]IFunctionsSubscriptionsSubscription, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getSubscriptionsInRange", subscriptionIdStart, subscriptionIdEnd) + + if err != nil { + return *new([]IFunctionsSubscriptionsSubscription), err + } + + out0 := *abi.ConvertType(out[0], new([]IFunctionsSubscriptionsSubscription)).(*[]IFunctionsSubscriptionsSubscription) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetSubscriptionsInRange(subscriptionIdStart uint64, subscriptionIdEnd uint64) ([]IFunctionsSubscriptionsSubscription, error) { + return _FunctionsRouter.Contract.GetSubscriptionsInRange(&_FunctionsRouter.CallOpts, subscriptionIdStart, subscriptionIdEnd) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetSubscriptionsInRange(subscriptionIdStart uint64, subscriptionIdEnd uint64) ([]IFunctionsSubscriptionsSubscription, error) { + return _FunctionsRouter.Contract.GetSubscriptionsInRange(&_FunctionsRouter.CallOpts, subscriptionIdStart, subscriptionIdEnd) +} + +func (_FunctionsRouter *FunctionsRouterCaller) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "getTotalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) GetTotalBalance() (*big.Int, error) { + return _FunctionsRouter.Contract.GetTotalBalance(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) GetTotalBalance() (*big.Int, error) { + return _FunctionsRouter.Contract.GetTotalBalance(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) IsValidCallbackGasLimit(opts *bind.CallOpts, subscriptionId uint64, callbackGasLimit uint32) error { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "isValidCallbackGasLimit", subscriptionId, callbackGasLimit) + + if err != nil { + return err + } + + return err + +} + +func (_FunctionsRouter *FunctionsRouterSession) IsValidCallbackGasLimit(subscriptionId uint64, callbackGasLimit uint32) error { + return _FunctionsRouter.Contract.IsValidCallbackGasLimit(&_FunctionsRouter.CallOpts, subscriptionId, callbackGasLimit) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) IsValidCallbackGasLimit(subscriptionId uint64, callbackGasLimit uint32) error { + return _FunctionsRouter.Contract.IsValidCallbackGasLimit(&_FunctionsRouter.CallOpts, subscriptionId, callbackGasLimit) +} + +func (_FunctionsRouter *FunctionsRouterCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) Owner() (common.Address, error) { + return _FunctionsRouter.Contract.Owner(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) Owner() (common.Address, error) { + return _FunctionsRouter.Contract.Owner(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) Paused() (bool, error) { + return _FunctionsRouter.Contract.Paused(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) Paused() (bool, error) { + return _FunctionsRouter.Contract.Paused(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCaller) PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "pendingRequestExists", subscriptionId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _FunctionsRouter.Contract.PendingRequestExists(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _FunctionsRouter.Contract.PendingRequestExists(&_FunctionsRouter.CallOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _FunctionsRouter.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_FunctionsRouter *FunctionsRouterSession) TypeAndVersion() (string, error) { + return _FunctionsRouter.Contract.TypeAndVersion(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterCallerSession) TypeAndVersion() (string, error) { + return _FunctionsRouter.Contract.TypeAndVersion(&_FunctionsRouter.CallOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "acceptOwnership") +} + +func (_FunctionsRouter *FunctionsRouterSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsRouter.Contract.AcceptOwnership(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FunctionsRouter.Contract.AcceptOwnership(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterSession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.Contract.AcceptSubscriptionOwnerTransfer(&_FunctionsRouter.TransactOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.Contract.AcceptSubscriptionOwnerTransfer(&_FunctionsRouter.TransactOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "addConsumer", subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterSession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.AddConsumer(&_FunctionsRouter.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.AddConsumer(&_FunctionsRouter.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "cancelSubscription", subscriptionId, to) +} + +func (_FunctionsRouter *FunctionsRouterSession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.CancelSubscription(&_FunctionsRouter.TransactOpts, subscriptionId, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.CancelSubscription(&_FunctionsRouter.TransactOpts, subscriptionId, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "createSubscription") +} + +func (_FunctionsRouter *FunctionsRouterSession) CreateSubscription() (*types.Transaction, error) { + return _FunctionsRouter.Contract.CreateSubscription(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _FunctionsRouter.Contract.CreateSubscription(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) CreateSubscriptionWithConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "createSubscriptionWithConsumer", consumer) +} + +func (_FunctionsRouter *FunctionsRouterSession) CreateSubscriptionWithConsumer(consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.CreateSubscriptionWithConsumer(&_FunctionsRouter.TransactOpts, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) CreateSubscriptionWithConsumer(consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.CreateSubscriptionWithConsumer(&_FunctionsRouter.TransactOpts, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) Fulfill(opts *bind.TransactOpts, response []byte, err []byte, juelsPerGas *big.Int, costWithoutFulfillment *big.Int, transmitter common.Address, commitment FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "fulfill", response, err, juelsPerGas, costWithoutFulfillment, transmitter, commitment) +} + +func (_FunctionsRouter *FunctionsRouterSession) Fulfill(response []byte, err []byte, juelsPerGas *big.Int, costWithoutFulfillment *big.Int, transmitter common.Address, commitment FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.Contract.Fulfill(&_FunctionsRouter.TransactOpts, response, err, juelsPerGas, costWithoutFulfillment, transmitter, commitment) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) Fulfill(response []byte, err []byte, juelsPerGas *big.Int, costWithoutFulfillment *big.Int, transmitter common.Address, commitment FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.Contract.Fulfill(&_FunctionsRouter.TransactOpts, response, err, juelsPerGas, costWithoutFulfillment, transmitter, commitment) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_FunctionsRouter *FunctionsRouterSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OnTokenTransfer(&_FunctionsRouter.TransactOpts, arg0, amount, data) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OnTokenTransfer(&_FunctionsRouter.TransactOpts, arg0, amount, data) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OracleWithdraw(&_FunctionsRouter.TransactOpts, recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OracleWithdraw(&_FunctionsRouter.TransactOpts, recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "ownerCancelSubscription", subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterSession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OwnerCancelSubscription(&_FunctionsRouter.TransactOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OwnerCancelSubscription(&_FunctionsRouter.TransactOpts, subscriptionId) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) OwnerWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "ownerWithdraw", recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterSession) OwnerWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OwnerWithdraw(&_FunctionsRouter.TransactOpts, recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) OwnerWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsRouter.Contract.OwnerWithdraw(&_FunctionsRouter.TransactOpts, recipient, amount) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "pause") +} + +func (_FunctionsRouter *FunctionsRouterSession) Pause() (*types.Transaction, error) { + return _FunctionsRouter.Contract.Pause(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) Pause() (*types.Transaction, error) { + return _FunctionsRouter.Contract.Pause(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) ProposeContractsUpdate(opts *bind.TransactOpts, proposedContractSetIds [][32]byte, proposedContractSetAddresses []common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "proposeContractsUpdate", proposedContractSetIds, proposedContractSetAddresses) +} + +func (_FunctionsRouter *FunctionsRouterSession) ProposeContractsUpdate(proposedContractSetIds [][32]byte, proposedContractSetAddresses []common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.ProposeContractsUpdate(&_FunctionsRouter.TransactOpts, proposedContractSetIds, proposedContractSetAddresses) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) ProposeContractsUpdate(proposedContractSetIds [][32]byte, proposedContractSetAddresses []common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.ProposeContractsUpdate(&_FunctionsRouter.TransactOpts, proposedContractSetIds, proposedContractSetAddresses) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) ProposeSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "proposeSubscriptionOwnerTransfer", subscriptionId, newOwner) +} + +func (_FunctionsRouter *FunctionsRouterSession) ProposeSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.ProposeSubscriptionOwnerTransfer(&_FunctionsRouter.TransactOpts, subscriptionId, newOwner) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) ProposeSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.ProposeSubscriptionOwnerTransfer(&_FunctionsRouter.TransactOpts, subscriptionId, newOwner) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "recoverFunds", to) +} + +func (_FunctionsRouter *FunctionsRouterSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.RecoverFunds(&_FunctionsRouter.TransactOpts, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.RecoverFunds(&_FunctionsRouter.TransactOpts, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "removeConsumer", subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterSession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.RemoveConsumer(&_FunctionsRouter.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.RemoveConsumer(&_FunctionsRouter.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "sendRequest", subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterSession) SendRequest(subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SendRequest(&_FunctionsRouter.TransactOpts, subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) SendRequest(subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SendRequest(&_FunctionsRouter.TransactOpts, subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) SendRequestToProposed(opts *bind.TransactOpts, subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "sendRequestToProposed", subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterSession) SendRequestToProposed(subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SendRequestToProposed(&_FunctionsRouter.TransactOpts, subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) SendRequestToProposed(subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SendRequestToProposed(&_FunctionsRouter.TransactOpts, subscriptionId, data, dataVersion, callbackGasLimit, donId) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) SetAllowListId(opts *bind.TransactOpts, allowListId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "setAllowListId", allowListId) +} + +func (_FunctionsRouter *FunctionsRouterSession) SetAllowListId(allowListId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SetAllowListId(&_FunctionsRouter.TransactOpts, allowListId) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) SetAllowListId(allowListId [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SetAllowListId(&_FunctionsRouter.TransactOpts, allowListId) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) SetFlags(opts *bind.TransactOpts, subscriptionId uint64, flags [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "setFlags", subscriptionId, flags) +} + +func (_FunctionsRouter *FunctionsRouterSession) SetFlags(subscriptionId uint64, flags [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SetFlags(&_FunctionsRouter.TransactOpts, subscriptionId, flags) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) SetFlags(subscriptionId uint64, flags [32]byte) (*types.Transaction, error) { + return _FunctionsRouter.Contract.SetFlags(&_FunctionsRouter.TransactOpts, subscriptionId, flags) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) TimeoutRequests(opts *bind.TransactOpts, requestsToTimeoutByCommitment []FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "timeoutRequests", requestsToTimeoutByCommitment) +} + +func (_FunctionsRouter *FunctionsRouterSession) TimeoutRequests(requestsToTimeoutByCommitment []FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.Contract.TimeoutRequests(&_FunctionsRouter.TransactOpts, requestsToTimeoutByCommitment) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) TimeoutRequests(requestsToTimeoutByCommitment []FunctionsResponseCommitment) (*types.Transaction, error) { + return _FunctionsRouter.Contract.TimeoutRequests(&_FunctionsRouter.TransactOpts, requestsToTimeoutByCommitment) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "transferOwnership", to) +} + +func (_FunctionsRouter *FunctionsRouterSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.TransferOwnership(&_FunctionsRouter.TransactOpts, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FunctionsRouter.Contract.TransferOwnership(&_FunctionsRouter.TransactOpts, to) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "unpause") +} + +func (_FunctionsRouter *FunctionsRouterSession) Unpause() (*types.Transaction, error) { + return _FunctionsRouter.Contract.Unpause(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) Unpause() (*types.Transaction, error) { + return _FunctionsRouter.Contract.Unpause(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) UpdateConfig(opts *bind.TransactOpts, config FunctionsRouterConfig) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "updateConfig", config) +} + +func (_FunctionsRouter *FunctionsRouterSession) UpdateConfig(config FunctionsRouterConfig) (*types.Transaction, error) { + return _FunctionsRouter.Contract.UpdateConfig(&_FunctionsRouter.TransactOpts, config) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) UpdateConfig(config FunctionsRouterConfig) (*types.Transaction, error) { + return _FunctionsRouter.Contract.UpdateConfig(&_FunctionsRouter.TransactOpts, config) +} + +func (_FunctionsRouter *FunctionsRouterTransactor) UpdateContracts(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsRouter.contract.Transact(opts, "updateContracts") +} + +func (_FunctionsRouter *FunctionsRouterSession) UpdateContracts() (*types.Transaction, error) { + return _FunctionsRouter.Contract.UpdateContracts(&_FunctionsRouter.TransactOpts) +} + +func (_FunctionsRouter *FunctionsRouterTransactorSession) UpdateContracts() (*types.Transaction, error) { + return _FunctionsRouter.Contract.UpdateContracts(&_FunctionsRouter.TransactOpts) +} + +type FunctionsRouterConfigUpdatedIterator struct { + Event *FunctionsRouterConfigUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterConfigUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterConfigUpdatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterConfigUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterConfigUpdated struct { + Arg0 FunctionsRouterConfig + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsRouterConfigUpdatedIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return &FunctionsRouterConfigUpdatedIterator{contract: _FunctionsRouter.contract, event: "ConfigUpdated", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterConfigUpdated) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterConfigUpdated) + if err := _FunctionsRouter.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseConfigUpdated(log types.Log) (*FunctionsRouterConfigUpdated, error) { + event := new(FunctionsRouterConfigUpdated) + if err := _FunctionsRouter.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterContractProposedIterator struct { + Event *FunctionsRouterContractProposed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterContractProposedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterContractProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterContractProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterContractProposedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterContractProposedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterContractProposed struct { + ProposedContractSetId [32]byte + ProposedContractSetFromAddress common.Address + ProposedContractSetToAddress common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterContractProposed(opts *bind.FilterOpts) (*FunctionsRouterContractProposedIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "ContractProposed") + if err != nil { + return nil, err + } + return &FunctionsRouterContractProposedIterator{contract: _FunctionsRouter.contract, event: "ContractProposed", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchContractProposed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterContractProposed) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "ContractProposed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterContractProposed) + if err := _FunctionsRouter.contract.UnpackLog(event, "ContractProposed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseContractProposed(log types.Log) (*FunctionsRouterContractProposed, error) { + event := new(FunctionsRouterContractProposed) + if err := _FunctionsRouter.contract.UnpackLog(event, "ContractProposed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterContractUpdatedIterator struct { + Event *FunctionsRouterContractUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterContractUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterContractUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterContractUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterContractUpdatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterContractUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterContractUpdated struct { + Id [32]byte + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterContractUpdated(opts *bind.FilterOpts) (*FunctionsRouterContractUpdatedIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "ContractUpdated") + if err != nil { + return nil, err + } + return &FunctionsRouterContractUpdatedIterator{contract: _FunctionsRouter.contract, event: "ContractUpdated", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchContractUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterContractUpdated) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "ContractUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterContractUpdated) + if err := _FunctionsRouter.contract.UnpackLog(event, "ContractUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseContractUpdated(log types.Log) (*FunctionsRouterContractUpdated, error) { + event := new(FunctionsRouterContractUpdated) + if err := _FunctionsRouter.contract.UnpackLog(event, "ContractUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterFundsRecoveredIterator struct { + Event *FunctionsRouterFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsRouterFundsRecoveredIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &FunctionsRouterFundsRecoveredIterator{contract: _FunctionsRouter.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsRouterFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterFundsRecovered) + if err := _FunctionsRouter.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseFundsRecovered(log types.Log) (*FunctionsRouterFundsRecovered, error) { + event := new(FunctionsRouterFundsRecovered) + if err := _FunctionsRouter.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterOwnershipTransferRequestedIterator struct { + Event *FunctionsRouterOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsRouterOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsRouterOwnershipTransferRequestedIterator{contract: _FunctionsRouter.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsRouterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterOwnershipTransferRequested) + if err := _FunctionsRouter.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsRouterOwnershipTransferRequested, error) { + event := new(FunctionsRouterOwnershipTransferRequested) + if err := _FunctionsRouter.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterOwnershipTransferredIterator struct { + Event *FunctionsRouterOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsRouterOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsRouterOwnershipTransferredIterator{contract: _FunctionsRouter.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsRouterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterOwnershipTransferred) + if err := _FunctionsRouter.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsRouterOwnershipTransferred, error) { + event := new(FunctionsRouterOwnershipTransferred) + if err := _FunctionsRouter.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterPausedIterator struct { + Event *FunctionsRouterPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterPausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterPaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterPaused(opts *bind.FilterOpts) (*FunctionsRouterPausedIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &FunctionsRouterPausedIterator{contract: _FunctionsRouter.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsRouterPaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterPaused) + if err := _FunctionsRouter.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParsePaused(log types.Log) (*FunctionsRouterPaused, error) { + event := new(FunctionsRouterPaused) + if err := _FunctionsRouter.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterRequestNotProcessedIterator struct { + Event *FunctionsRouterRequestNotProcessed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterRequestNotProcessedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestNotProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestNotProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterRequestNotProcessedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterRequestNotProcessedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterRequestNotProcessed struct { + RequestId [32]byte + Coordinator common.Address + Transmitter common.Address + ResultCode uint8 + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterRequestNotProcessed(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsRouterRequestNotProcessedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "RequestNotProcessed", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterRequestNotProcessedIterator{contract: _FunctionsRouter.contract, event: "RequestNotProcessed", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchRequestNotProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestNotProcessed, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "RequestNotProcessed", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterRequestNotProcessed) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestNotProcessed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseRequestNotProcessed(log types.Log) (*FunctionsRouterRequestNotProcessed, error) { + event := new(FunctionsRouterRequestNotProcessed) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestNotProcessed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterRequestProcessedIterator struct { + Event *FunctionsRouterRequestProcessed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterRequestProcessedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterRequestProcessedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterRequestProcessedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterRequestProcessed struct { + RequestId [32]byte + SubscriptionId uint64 + TotalCostJuels *big.Int + Transmitter common.Address + ResultCode uint8 + Response []byte + Err []byte + CallbackReturnData []byte + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterRequestProcessed(opts *bind.FilterOpts, requestId [][32]byte, subscriptionId []uint64) (*FunctionsRouterRequestProcessedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "RequestProcessed", requestIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterRequestProcessedIterator{contract: _FunctionsRouter.contract, event: "RequestProcessed", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchRequestProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestProcessed, requestId [][32]byte, subscriptionId []uint64) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "RequestProcessed", requestIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterRequestProcessed) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestProcessed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseRequestProcessed(log types.Log) (*FunctionsRouterRequestProcessed, error) { + event := new(FunctionsRouterRequestProcessed) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestProcessed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterRequestStartIterator struct { + Event *FunctionsRouterRequestStart + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterRequestStartIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterRequestStartIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterRequestStartIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterRequestStart struct { + RequestId [32]byte + DonId [32]byte + SubscriptionId uint64 + SubscriptionOwner common.Address + RequestingContract common.Address + RequestInitiator common.Address + Data []byte + DataVersion uint16 + CallbackGasLimit uint32 + EstimatedTotalCostJuels *big.Int + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterRequestStart(opts *bind.FilterOpts, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (*FunctionsRouterRequestStartIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var donIdRule []interface{} + for _, donIdItem := range donId { + donIdRule = append(donIdRule, donIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "RequestStart", requestIdRule, donIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterRequestStartIterator{contract: _FunctionsRouter.contract, event: "RequestStart", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchRequestStart(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestStart, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var donIdRule []interface{} + for _, donIdItem := range donId { + donIdRule = append(donIdRule, donIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "RequestStart", requestIdRule, donIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterRequestStart) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestStart", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseRequestStart(log types.Log) (*FunctionsRouterRequestStart, error) { + event := new(FunctionsRouterRequestStart) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestStart", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterRequestTimedOutIterator struct { + Event *FunctionsRouterRequestTimedOut + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterRequestTimedOutIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterRequestTimedOutIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterRequestTimedOutIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterRequestTimedOut struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsRouterRequestTimedOutIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterRequestTimedOutIterator{contract: _FunctionsRouter.contract, event: "RequestTimedOut", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestTimedOut, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterRequestTimedOut) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseRequestTimedOut(log types.Log) (*FunctionsRouterRequestTimedOut, error) { + event := new(FunctionsRouterRequestTimedOut) + if err := _FunctionsRouter.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionCanceledIterator struct { + Event *FunctionsRouterSubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionCanceled struct { + SubscriptionId uint64 + FundsRecipient common.Address + FundsAmount *big.Int + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionCanceledIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionCanceledIterator{contract: _FunctionsRouter.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionCanceled) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionCanceled(log types.Log) (*FunctionsRouterSubscriptionCanceled, error) { + event := new(FunctionsRouterSubscriptionCanceled) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionConsumerAddedIterator struct { + Event *FunctionsRouterSubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionConsumerAdded struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionConsumerAddedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionConsumerAddedIterator{contract: _FunctionsRouter.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionConsumerAdded) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsRouterSubscriptionConsumerAdded, error) { + event := new(FunctionsRouterSubscriptionConsumerAdded) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionConsumerRemovedIterator struct { + Event *FunctionsRouterSubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionConsumerRemoved struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionConsumerRemovedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionConsumerRemovedIterator{contract: _FunctionsRouter.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionConsumerRemoved) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsRouterSubscriptionConsumerRemoved, error) { + event := new(FunctionsRouterSubscriptionConsumerRemoved) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionCreatedIterator struct { + Event *FunctionsRouterSubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionCreated struct { + SubscriptionId uint64 + Owner common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionCreatedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionCreatedIterator{contract: _FunctionsRouter.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionCreated) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionCreated(log types.Log) (*FunctionsRouterSubscriptionCreated, error) { + event := new(FunctionsRouterSubscriptionCreated) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionFundedIterator struct { + Event *FunctionsRouterSubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionFunded struct { + SubscriptionId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionFundedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionFundedIterator{contract: _FunctionsRouter.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionFunded) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionFunded(log types.Log) (*FunctionsRouterSubscriptionFunded, error) { + event := new(FunctionsRouterSubscriptionFunded) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionOwnerTransferRequestedIterator struct { + Event *FunctionsRouterSubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionOwnerTransferRequested struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionOwnerTransferRequestedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionOwnerTransferRequestedIterator{contract: _FunctionsRouter.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionOwnerTransferRequested) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsRouterSubscriptionOwnerTransferRequested, error) { + event := new(FunctionsRouterSubscriptionOwnerTransferRequested) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterSubscriptionOwnerTransferredIterator struct { + Event *FunctionsRouterSubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterSubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterSubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterSubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterSubscriptionOwnerTransferred struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionOwnerTransferredIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsRouterSubscriptionOwnerTransferredIterator{contract: _FunctionsRouter.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterSubscriptionOwnerTransferred) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsRouterSubscriptionOwnerTransferred, error) { + event := new(FunctionsRouterSubscriptionOwnerTransferred) + if err := _FunctionsRouter.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsRouterUnpausedIterator struct { + Event *FunctionsRouterUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsRouterUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsRouterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsRouterUnpausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsRouterUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsRouterUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsRouter *FunctionsRouterFilterer) FilterUnpaused(opts *bind.FilterOpts) (*FunctionsRouterUnpausedIterator, error) { + + logs, sub, err := _FunctionsRouter.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &FunctionsRouterUnpausedIterator{contract: _FunctionsRouter.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsRouterUnpaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsRouter.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsRouterUnpaused) + if err := _FunctionsRouter.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsRouter *FunctionsRouterFilterer) ParseUnpaused(log types.Log) (*FunctionsRouterUnpaused, error) { + event := new(FunctionsRouterUnpaused) + if err := _FunctionsRouter.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsRouter *FunctionsRouter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsRouter.abi.Events["ConfigUpdated"].ID: + return _FunctionsRouter.ParseConfigUpdated(log) + case _FunctionsRouter.abi.Events["ContractProposed"].ID: + return _FunctionsRouter.ParseContractProposed(log) + case _FunctionsRouter.abi.Events["ContractUpdated"].ID: + return _FunctionsRouter.ParseContractUpdated(log) + case _FunctionsRouter.abi.Events["FundsRecovered"].ID: + return _FunctionsRouter.ParseFundsRecovered(log) + case _FunctionsRouter.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsRouter.ParseOwnershipTransferRequested(log) + case _FunctionsRouter.abi.Events["OwnershipTransferred"].ID: + return _FunctionsRouter.ParseOwnershipTransferred(log) + case _FunctionsRouter.abi.Events["Paused"].ID: + return _FunctionsRouter.ParsePaused(log) + case _FunctionsRouter.abi.Events["RequestNotProcessed"].ID: + return _FunctionsRouter.ParseRequestNotProcessed(log) + case _FunctionsRouter.abi.Events["RequestProcessed"].ID: + return _FunctionsRouter.ParseRequestProcessed(log) + case _FunctionsRouter.abi.Events["RequestStart"].ID: + return _FunctionsRouter.ParseRequestStart(log) + case _FunctionsRouter.abi.Events["RequestTimedOut"].ID: + return _FunctionsRouter.ParseRequestTimedOut(log) + case _FunctionsRouter.abi.Events["SubscriptionCanceled"].ID: + return _FunctionsRouter.ParseSubscriptionCanceled(log) + case _FunctionsRouter.abi.Events["SubscriptionConsumerAdded"].ID: + return _FunctionsRouter.ParseSubscriptionConsumerAdded(log) + case _FunctionsRouter.abi.Events["SubscriptionConsumerRemoved"].ID: + return _FunctionsRouter.ParseSubscriptionConsumerRemoved(log) + case _FunctionsRouter.abi.Events["SubscriptionCreated"].ID: + return _FunctionsRouter.ParseSubscriptionCreated(log) + case _FunctionsRouter.abi.Events["SubscriptionFunded"].ID: + return _FunctionsRouter.ParseSubscriptionFunded(log) + case _FunctionsRouter.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _FunctionsRouter.ParseSubscriptionOwnerTransferRequested(log) + case _FunctionsRouter.abi.Events["SubscriptionOwnerTransferred"].ID: + return _FunctionsRouter.ParseSubscriptionOwnerTransferred(log) + case _FunctionsRouter.abi.Events["Unpaused"].ID: + return _FunctionsRouter.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsRouterConfigUpdated) Topic() common.Hash { + return common.HexToHash("0x00a5832bf95f66c7814294cc4db681f20ee79608bfb8912a5321d66cfed5e985") +} + +func (FunctionsRouterContractProposed) Topic() common.Hash { + return common.HexToHash("0x8b052f0f4bf82fede7daffea71592b29d5ef86af1f3c7daaa0345dbb2f52f481") +} + +func (FunctionsRouterContractUpdated) Topic() common.Hash { + return common.HexToHash("0xf8a6175bca1ba37d682089187edc5e20a859989727f10ca6bd9a5bc0de8caf94") +} + +func (FunctionsRouterFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (FunctionsRouterOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsRouterOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsRouterPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (FunctionsRouterRequestNotProcessed) Topic() common.Hash { + return common.HexToHash("0x1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee1") +} + +func (FunctionsRouterRequestProcessed) Topic() common.Hash { + return common.HexToHash("0x64778f26c70b60a8d7e29e2451b3844302d959448401c0535b768ed88c6b505e") +} + +func (FunctionsRouterRequestStart) Topic() common.Hash { + return common.HexToHash("0xf67aec45c9a7ede407974a3e0c3a743dffeab99ee3f2d4c9a8144c2ebf2c7ec9") +} + +func (FunctionsRouterRequestTimedOut) Topic() common.Hash { + return common.HexToHash("0xf1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af414") +} + +func (FunctionsRouterSubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (FunctionsRouterSubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (FunctionsRouterSubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (FunctionsRouterSubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (FunctionsRouterSubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (FunctionsRouterSubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (FunctionsRouterSubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (FunctionsRouterUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_FunctionsRouter *FunctionsRouter) Address() common.Address { + return _FunctionsRouter.address +} + +type FunctionsRouterInterface interface { + MAXCALLBACKRETURNBYTES(opts *bind.CallOpts) (uint16, error) + + GetAdminFee(opts *bind.CallOpts) (*big.Int, error) + + GetAllowListId(opts *bind.CallOpts) ([32]byte, error) + + GetConfig(opts *bind.CallOpts) (FunctionsRouterConfig, error) + + GetConsumer(opts *bind.CallOpts, client common.Address, subscriptionId uint64) (IFunctionsSubscriptionsConsumer, error) + + GetContractById(opts *bind.CallOpts, id [32]byte) (common.Address, error) + + GetFlags(opts *bind.CallOpts, subscriptionId uint64) ([32]byte, error) + + GetProposedContractById(opts *bind.CallOpts, id [32]byte) (common.Address, error) + + GetProposedContractSet(opts *bind.CallOpts) ([][32]byte, []common.Address, error) + + GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (IFunctionsSubscriptionsSubscription, error) + + GetSubscriptionCount(opts *bind.CallOpts) (uint64, error) + + GetSubscriptionsInRange(opts *bind.CallOpts, subscriptionIdStart uint64, subscriptionIdEnd uint64) ([]IFunctionsSubscriptionsSubscription, error) + + GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) + + IsValidCallbackGasLimit(opts *bind.CallOpts, subscriptionId uint64, callbackGasLimit uint32) error + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + CreateSubscriptionWithConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) + + Fulfill(opts *bind.TransactOpts, response []byte, err []byte, juelsPerGas *big.Int, costWithoutFulfillment *big.Int, transmitter common.Address, commitment FunctionsResponseCommitment) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + OwnerWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + ProposeContractsUpdate(opts *bind.TransactOpts, proposedContractSetIds [][32]byte, proposedContractSetAddresses []common.Address) (*types.Transaction, error) + + ProposeSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) + + SendRequestToProposed(opts *bind.TransactOpts, subscriptionId uint64, data []byte, dataVersion uint16, callbackGasLimit uint32, donId [32]byte) (*types.Transaction, error) + + SetAllowListId(opts *bind.TransactOpts, allowListId [32]byte) (*types.Transaction, error) + + SetFlags(opts *bind.TransactOpts, subscriptionId uint64, flags [32]byte) (*types.Transaction, error) + + TimeoutRequests(opts *bind.TransactOpts, requestsToTimeoutByCommitment []FunctionsResponseCommitment) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UpdateConfig(opts *bind.TransactOpts, config FunctionsRouterConfig) (*types.Transaction, error) + + UpdateContracts(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsRouterConfigUpdatedIterator, error) + + WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterConfigUpdated) (event.Subscription, error) + + ParseConfigUpdated(log types.Log) (*FunctionsRouterConfigUpdated, error) + + FilterContractProposed(opts *bind.FilterOpts) (*FunctionsRouterContractProposedIterator, error) + + WatchContractProposed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterContractProposed) (event.Subscription, error) + + ParseContractProposed(log types.Log) (*FunctionsRouterContractProposed, error) + + FilterContractUpdated(opts *bind.FilterOpts) (*FunctionsRouterContractUpdatedIterator, error) + + WatchContractUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterContractUpdated) (event.Subscription, error) + + ParseContractUpdated(log types.Log) (*FunctionsRouterContractUpdated, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsRouterFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsRouterFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*FunctionsRouterFundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsRouterOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsRouterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsRouterOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsRouterOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsRouterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsRouterOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*FunctionsRouterPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsRouterPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*FunctionsRouterPaused, error) + + FilterRequestNotProcessed(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsRouterRequestNotProcessedIterator, error) + + WatchRequestNotProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestNotProcessed, requestId [][32]byte) (event.Subscription, error) + + ParseRequestNotProcessed(log types.Log) (*FunctionsRouterRequestNotProcessed, error) + + FilterRequestProcessed(opts *bind.FilterOpts, requestId [][32]byte, subscriptionId []uint64) (*FunctionsRouterRequestProcessedIterator, error) + + WatchRequestProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestProcessed, requestId [][32]byte, subscriptionId []uint64) (event.Subscription, error) + + ParseRequestProcessed(log types.Log) (*FunctionsRouterRequestProcessed, error) + + FilterRequestStart(opts *bind.FilterOpts, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (*FunctionsRouterRequestStartIterator, error) + + WatchRequestStart(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestStart, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (event.Subscription, error) + + ParseRequestStart(log types.Log) (*FunctionsRouterRequestStart, error) + + FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsRouterRequestTimedOutIterator, error) + + WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsRouterRequestTimedOut, requestId [][32]byte) (event.Subscription, error) + + ParseRequestTimedOut(log types.Log) (*FunctionsRouterRequestTimedOut, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*FunctionsRouterSubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsRouterSubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsRouterSubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*FunctionsRouterSubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*FunctionsRouterSubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsRouterSubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsRouterSubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsRouterSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsRouterSubscriptionOwnerTransferred, error) + + FilterUnpaused(opts *bind.FilterOpts) (*FunctionsRouterUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsRouterUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*FunctionsRouterUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/functions_v1_events_mock/functions_v1_events_mock.go b/core/gethwrappers/functions/generated/functions_v1_events_mock/functions_v1_events_mock.go new file mode 100644 index 00000000..e62cb9c8 --- /dev/null +++ b/core/gethwrappers/functions/generated/functions_v1_events_mock/functions_v1_events_mock.go @@ -0,0 +1,3122 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_v1_events_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsV1EventsMockConfig struct { + MaxConsumersPerSubscription uint16 + AdminFee *big.Int + HandleOracleFulfillmentSelector [4]byte + GasForCallExactCheck uint16 + MaxCallbackGasLimits []uint32 +} + +var FunctionsV1EventsMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"}],\"indexed\":false,\"internalType\":\"structFunctionsV1EventsMock.Config\",\"name\":\"param1\",\"type\":\"tuple\"}],\"name\":\"ConfigUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"proposedContractSetId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"proposedContractSetFromAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"proposedContractSetToAddress\",\"type\":\"address\"}],\"name\":\"ContractProposed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"ContractUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"resultCode\",\"type\":\"uint8\"}],\"name\":\"RequestNotProcessed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCostJuels\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"resultCode\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"callbackReturnData\",\"type\":\"bytes\"}],\"name\":\"RequestProcessed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"}],\"name\":\"RequestStart\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"RequestTimedOut\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"fundsRecipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fundsAmount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint16\",\"name\":\"maxConsumersPerSubscription\",\"type\":\"uint16\"},{\"internalType\":\"uint72\",\"name\":\"adminFee\",\"type\":\"uint72\"},{\"internalType\":\"bytes4\",\"name\":\"handleOracleFulfillmentSelector\",\"type\":\"bytes4\"},{\"internalType\":\"uint16\",\"name\":\"gasForCallExactCheck\",\"type\":\"uint16\"},{\"internalType\":\"uint32[]\",\"name\":\"maxCallbackGasLimits\",\"type\":\"uint32[]\"}],\"internalType\":\"structFunctionsV1EventsMock.Config\",\"name\":\"param1\",\"type\":\"tuple\"}],\"name\":\"emitConfigUpdated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"proposedContractSetId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"proposedContractSetFromAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposedContractSetToAddress\",\"type\":\"address\"}],\"name\":\"emitContractProposed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitContractUpdated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"emitFundsRecovered\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitPaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"resultCode\",\"type\":\"uint8\"}],\"name\":\"emitRequestNotProcessed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint96\",\"name\":\"totalCostJuels\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"resultCode\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callbackReturnData\",\"type\":\"bytes\"}],\"name\":\"emitRequestProcessed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"donId\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint16\",\"name\":\"dataVersion\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"estimatedTotalCostJuels\",\"type\":\"uint96\"}],\"name\":\"emitRequestStart\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"emitRequestTimedOut\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"fundsRecipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"fundsAmount\",\"type\":\"uint256\"}],\"name\":\"emitSubscriptionCanceled\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"emitSubscriptionConsumerAdded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"emitSubscriptionConsumerRemoved\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"emitSubscriptionCreated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"emitSubscriptionFunded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitSubscriptionOwnerTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitSubscriptionOwnerTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitUnpaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506111a8806100206000396000f3fe608060405234801561001057600080fd5b50600436106101515760003560e01c8063a5257226116100cd578063e0f6eff111610081578063e9bfcd1811610066578063e9bfcd1814610288578063f7420bc21461029b578063fa7dd96b146102ae57600080fd5b8063e0f6eff114610262578063e2cab57b1461027557600080fd5b8063b24a02cb116100b2578063b24a02cb14610229578063ce150ef11461023c578063dde69b3f1461024f57600080fd5b8063a525722614610203578063b019b4e81461021657600080fd5b8063689300ea116101245780637e1b44c0116101095780637e1b44c0146101ca57806389d38eb4146101dd5780639ec3ce4b146101f057600080fd5b8063689300ea146101a45780637be5c756146101b757600080fd5b8063027d7d22146101565780633f70afb61461016b5780634bf6a80d1461017e578063675b924414610191575b600080fd5b610169610164366004610919565b6102c1565b005b61016961017936600461097e565b610323565b61016961018c3660046109b1565b61037d565b61016961019f36600461097e565b6103df565b6101696101b23660046109f4565b610431565b6101696101c5366004610a1e565b610484565b6101696101d8366004610a40565b6104d1565b6101696101eb366004610bd0565b6104ff565b6101696101fe366004610a1e565b61055b565b61016961021136600461097e565b6105a1565b610169610224366004610c97565b6105f3565b610169610237366004610cb3565b610651565b61016961024a366004610cd8565b6106b1565b61016961025d366004610dad565b610708565b6101696102703660046109b1565b610760565b610169610283366004610de9565b6107b9565b610169610296366004610cb3565b6107fb565b6101696102a9366004610c97565b610852565b6101696102bc366004610ea3565b6108b0565b6040805173ffffffffffffffffffffffffffffffffffffffff85811682528416602082015260ff831681830152905185917f1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee1919081900360600190a250505050565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf906020015b60405180910390a25050565b6040805173ffffffffffffffffffffffffffffffffffffffff80851682528316602082015267ffffffffffffffff8516917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f091015b60405180910390a2505050565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e090602001610371565b6040805173ffffffffffffffffffffffffffffffffffffffff84168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600910160405180910390a15050565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258906020015b60405180910390a150565b60405181907ff1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af41490600090a250565b8767ffffffffffffffff16898b7ff67aec45c9a7ede407974a3e0c3a743dffeab99ee3f2d4c9a8144c2ebf2c7ec98a8a8a8a8a8a8a6040516105479796959493929190610fef565b60405180910390a450505050505050505050565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020016104c6565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b90602001610371565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6040805184815273ffffffffffffffffffffffffffffffffffffffff80851660208301528316918101919091527f8b052f0f4bf82fede7daffea71592b29d5ef86af1f3c7daaa0345dbb2f52f481906060015b60405180910390a1505050565b8667ffffffffffffffff16887f64778f26c70b60a8d7e29e2451b3844302d959448401c0535b768ed88c6b505e8888888888886040516106f696959493929190611067565b60405180910390a35050505050505050565b6040805173ffffffffffffffffffffffffffffffffffffffff841681526020810183905267ffffffffffffffff8516917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd4981591016103d2565b6040805173ffffffffffffffffffffffffffffffffffffffff80851682528316602082015267ffffffffffffffff8516917f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91016103d2565b604080518381526020810183905267ffffffffffffffff8516917fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f891016103d2565b6040805184815273ffffffffffffffffffffffffffffffffffffffff80851660208301528316918101919091527ff8a6175bca1ba37d682089187edc5e20a859989727f10ca6bd9a5bc0de8caf94906060016106a4565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b7f049ce2e6e1420eb4b07b425e90129186833eb346bda40b37d5d921aad482f71c816040516104c691906110e6565b803573ffffffffffffffffffffffffffffffffffffffff8116811461090357600080fd5b919050565b803560ff8116811461090357600080fd5b6000806000806080858703121561092f57600080fd5b8435935061093f602086016108df565b925061094d604086016108df565b915061095b60608601610908565b905092959194509250565b803567ffffffffffffffff8116811461090357600080fd5b6000806040838503121561099157600080fd5b61099a83610966565b91506109a8602084016108df565b90509250929050565b6000806000606084860312156109c657600080fd5b6109cf84610966565b92506109dd602085016108df565b91506109eb604085016108df565b90509250925092565b60008060408385031215610a0757600080fd5b610a10836108df565b946020939093013593505050565b600060208284031215610a3057600080fd5b610a39826108df565b9392505050565b600060208284031215610a5257600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405160a0810167ffffffffffffffff81118282101715610aab57610aab610a59565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610af857610af8610a59565b604052919050565b600082601f830112610b1157600080fd5b813567ffffffffffffffff811115610b2b57610b2b610a59565b610b5c60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610ab1565b818152846020838601011115610b7157600080fd5b816020850160208301376000918101602001919091529392505050565b803561ffff8116811461090357600080fd5b803563ffffffff8116811461090357600080fd5b80356bffffffffffffffffffffffff8116811461090357600080fd5b6000806000806000806000806000806101408b8d031215610bf057600080fd5b8a35995060208b01359850610c0760408c01610966565b9750610c1560608c016108df565b9650610c2360808c016108df565b9550610c3160a08c016108df565b945060c08b013567ffffffffffffffff811115610c4d57600080fd5b610c598d828e01610b00565b945050610c6860e08c01610b8e565b9250610c776101008c01610ba0565b9150610c866101208c01610bb4565b90509295989b9194979a5092959850565b60008060408385031215610caa57600080fd5b61099a836108df565b600080600060608486031215610cc857600080fd5b833592506109dd602085016108df565b600080600080600080600080610100898b031215610cf557600080fd5b88359750610d0560208a01610966565b9650610d1360408a01610bb4565b9550610d2160608a016108df565b9450610d2f60808a01610908565b935060a089013567ffffffffffffffff80821115610d4c57600080fd5b610d588c838d01610b00565b945060c08b0135915080821115610d6e57600080fd5b610d7a8c838d01610b00565b935060e08b0135915080821115610d9057600080fd5b50610d9d8b828c01610b00565b9150509295985092959890939650565b600080600060608486031215610dc257600080fd5b610dcb84610966565b9250610dd9602085016108df565b9150604084013590509250925092565b600080600060608486031215610dfe57600080fd5b610e0784610966565b95602085013595506040909401359392505050565b600082601f830112610e2d57600080fd5b8135602067ffffffffffffffff821115610e4957610e49610a59565b8160051b610e58828201610ab1565b9283528481018201928281019087851115610e7257600080fd5b83870192505b84831015610e9857610e8983610ba0565b82529183019190830190610e78565b979650505050505050565b600060208284031215610eb557600080fd5b813567ffffffffffffffff80821115610ecd57600080fd5b9083019060a08286031215610ee157600080fd5b610ee9610a88565b610ef283610b8e565b8152602083013568ffffffffffffffffff81168114610f1057600080fd5b602082015260408301357fffffffff0000000000000000000000000000000000000000000000000000000081168114610f4857600080fd5b6040820152610f5960608401610b8e565b6060820152608083013582811115610f7057600080fd5b610f7c87828601610e1c565b60808301525095945050505050565b6000815180845260005b81811015610fb157602081850181015186830182015201610f95565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b600073ffffffffffffffffffffffffffffffffffffffff808a168352808916602084015280881660408401525060e0606083015261103060e0830187610f8b565b61ffff9590951660808301525063ffffffff9290921660a08301526bffffffffffffffffffffffff1660c090910152949350505050565b6bffffffffffffffffffffffff8716815273ffffffffffffffffffffffffffffffffffffffff8616602082015260ff8516604082015260c0606082015260006110b360c0830186610f8b565b82810360808401526110c58186610f8b565b905082810360a08401526110d98185610f8b565b9998505050505050505050565b6000602080835260c0830161ffff808651168386015268ffffffffffffffffff838701511660408601527fffffffff00000000000000000000000000000000000000000000000000000000604087015116606086015280606087015116608086015250608085015160a08086015281815180845260e0870191508483019350600092505b8083101561119057835163ffffffff16825292840192600192909201919084019061116a565b50969550505050505056fea164736f6c6343000813000a", +} + +var FunctionsV1EventsMockABI = FunctionsV1EventsMockMetaData.ABI + +var FunctionsV1EventsMockBin = FunctionsV1EventsMockMetaData.Bin + +func DeployFunctionsV1EventsMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *FunctionsV1EventsMock, error) { + parsed, err := FunctionsV1EventsMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsV1EventsMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsV1EventsMock{address: address, abi: *parsed, FunctionsV1EventsMockCaller: FunctionsV1EventsMockCaller{contract: contract}, FunctionsV1EventsMockTransactor: FunctionsV1EventsMockTransactor{contract: contract}, FunctionsV1EventsMockFilterer: FunctionsV1EventsMockFilterer{contract: contract}}, nil +} + +type FunctionsV1EventsMock struct { + address common.Address + abi abi.ABI + FunctionsV1EventsMockCaller + FunctionsV1EventsMockTransactor + FunctionsV1EventsMockFilterer +} + +type FunctionsV1EventsMockCaller struct { + contract *bind.BoundContract +} + +type FunctionsV1EventsMockTransactor struct { + contract *bind.BoundContract +} + +type FunctionsV1EventsMockFilterer struct { + contract *bind.BoundContract +} + +type FunctionsV1EventsMockSession struct { + Contract *FunctionsV1EventsMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsV1EventsMockCallerSession struct { + Contract *FunctionsV1EventsMockCaller + CallOpts bind.CallOpts +} + +type FunctionsV1EventsMockTransactorSession struct { + Contract *FunctionsV1EventsMockTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsV1EventsMockRaw struct { + Contract *FunctionsV1EventsMock +} + +type FunctionsV1EventsMockCallerRaw struct { + Contract *FunctionsV1EventsMockCaller +} + +type FunctionsV1EventsMockTransactorRaw struct { + Contract *FunctionsV1EventsMockTransactor +} + +func NewFunctionsV1EventsMock(address common.Address, backend bind.ContractBackend) (*FunctionsV1EventsMock, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsV1EventsMockABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsV1EventsMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMock{address: address, abi: abi, FunctionsV1EventsMockCaller: FunctionsV1EventsMockCaller{contract: contract}, FunctionsV1EventsMockTransactor: FunctionsV1EventsMockTransactor{contract: contract}, FunctionsV1EventsMockFilterer: FunctionsV1EventsMockFilterer{contract: contract}}, nil +} + +func NewFunctionsV1EventsMockCaller(address common.Address, caller bind.ContractCaller) (*FunctionsV1EventsMockCaller, error) { + contract, err := bindFunctionsV1EventsMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockCaller{contract: contract}, nil +} + +func NewFunctionsV1EventsMockTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsV1EventsMockTransactor, error) { + contract, err := bindFunctionsV1EventsMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockTransactor{contract: contract}, nil +} + +func NewFunctionsV1EventsMockFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsV1EventsMockFilterer, error) { + contract, err := bindFunctionsV1EventsMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockFilterer{contract: contract}, nil +} + +func bindFunctionsV1EventsMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsV1EventsMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsV1EventsMock.Contract.FunctionsV1EventsMockCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.FunctionsV1EventsMockTransactor.contract.Transfer(opts) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.FunctionsV1EventsMockTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsV1EventsMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.contract.Transfer(opts) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitConfigUpdated(opts *bind.TransactOpts, param1 FunctionsV1EventsMockConfig) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitConfigUpdated", param1) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitConfigUpdated(param1 FunctionsV1EventsMockConfig) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitConfigUpdated(&_FunctionsV1EventsMock.TransactOpts, param1) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitConfigUpdated(param1 FunctionsV1EventsMockConfig) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitConfigUpdated(&_FunctionsV1EventsMock.TransactOpts, param1) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitContractProposed(opts *bind.TransactOpts, proposedContractSetId [32]byte, proposedContractSetFromAddress common.Address, proposedContractSetToAddress common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitContractProposed", proposedContractSetId, proposedContractSetFromAddress, proposedContractSetToAddress) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitContractProposed(proposedContractSetId [32]byte, proposedContractSetFromAddress common.Address, proposedContractSetToAddress common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitContractProposed(&_FunctionsV1EventsMock.TransactOpts, proposedContractSetId, proposedContractSetFromAddress, proposedContractSetToAddress) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitContractProposed(proposedContractSetId [32]byte, proposedContractSetFromAddress common.Address, proposedContractSetToAddress common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitContractProposed(&_FunctionsV1EventsMock.TransactOpts, proposedContractSetId, proposedContractSetFromAddress, proposedContractSetToAddress) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitContractUpdated(opts *bind.TransactOpts, id [32]byte, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitContractUpdated", id, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitContractUpdated(id [32]byte, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitContractUpdated(&_FunctionsV1EventsMock.TransactOpts, id, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitContractUpdated(id [32]byte, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitContractUpdated(&_FunctionsV1EventsMock.TransactOpts, id, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitFundsRecovered(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitFundsRecovered", to, amount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitFundsRecovered(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitFundsRecovered(&_FunctionsV1EventsMock.TransactOpts, to, amount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitFundsRecovered(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitFundsRecovered(&_FunctionsV1EventsMock.TransactOpts, to, amount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsV1EventsMock.TransactOpts, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsV1EventsMock.TransactOpts, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitOwnershipTransferred(&_FunctionsV1EventsMock.TransactOpts, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitOwnershipTransferred(&_FunctionsV1EventsMock.TransactOpts, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitPaused", account) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitPaused(&_FunctionsV1EventsMock.TransactOpts, account) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitPaused(&_FunctionsV1EventsMock.TransactOpts, account) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitRequestNotProcessed(opts *bind.TransactOpts, requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitRequestNotProcessed", requestId, coordinator, transmitter, resultCode) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitRequestNotProcessed(requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestNotProcessed(&_FunctionsV1EventsMock.TransactOpts, requestId, coordinator, transmitter, resultCode) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitRequestNotProcessed(requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestNotProcessed(&_FunctionsV1EventsMock.TransactOpts, requestId, coordinator, transmitter, resultCode) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitRequestProcessed(opts *bind.TransactOpts, requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, err []byte, callbackReturnData []byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitRequestProcessed", requestId, subscriptionId, totalCostJuels, transmitter, resultCode, response, err, callbackReturnData) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitRequestProcessed(requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, err []byte, callbackReturnData []byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestProcessed(&_FunctionsV1EventsMock.TransactOpts, requestId, subscriptionId, totalCostJuels, transmitter, resultCode, response, err, callbackReturnData) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitRequestProcessed(requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, err []byte, callbackReturnData []byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestProcessed(&_FunctionsV1EventsMock.TransactOpts, requestId, subscriptionId, totalCostJuels, transmitter, resultCode, response, err, callbackReturnData) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitRequestStart(opts *bind.TransactOpts, requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitRequestStart", requestId, donId, subscriptionId, subscriptionOwner, requestingContract, requestInitiator, data, dataVersion, callbackGasLimit, estimatedTotalCostJuels) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitRequestStart(requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestStart(&_FunctionsV1EventsMock.TransactOpts, requestId, donId, subscriptionId, subscriptionOwner, requestingContract, requestInitiator, data, dataVersion, callbackGasLimit, estimatedTotalCostJuels) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitRequestStart(requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestStart(&_FunctionsV1EventsMock.TransactOpts, requestId, donId, subscriptionId, subscriptionOwner, requestingContract, requestInitiator, data, dataVersion, callbackGasLimit, estimatedTotalCostJuels) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitRequestTimedOut(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitRequestTimedOut", requestId) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitRequestTimedOut(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestTimedOut(&_FunctionsV1EventsMock.TransactOpts, requestId) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitRequestTimedOut(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitRequestTimedOut(&_FunctionsV1EventsMock.TransactOpts, requestId) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionCanceled(opts *bind.TransactOpts, subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionCanceled", subscriptionId, fundsRecipient, fundsAmount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionCanceled(subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionCanceled(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, fundsRecipient, fundsAmount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionCanceled(subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionCanceled(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, fundsRecipient, fundsAmount) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionConsumerAdded(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionConsumerAdded", subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionConsumerAdded(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionConsumerAdded(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionConsumerRemoved(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionConsumerRemoved", subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionConsumerRemoved(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionConsumerRemoved(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionCreated(opts *bind.TransactOpts, subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionCreated", subscriptionId, owner) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionCreated(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, owner) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionCreated(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, owner) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionFunded(opts *bind.TransactOpts, subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionFunded", subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionFunded(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionFunded(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionOwnerTransferRequested(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionOwnerTransferRequested", subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionOwnerTransferRequested(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionOwnerTransferRequested(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitSubscriptionOwnerTransferred(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitSubscriptionOwnerTransferred", subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionOwnerTransferred(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitSubscriptionOwnerTransferred(&_FunctionsV1EventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactor) EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.contract.Transact(opts, "emitUnpaused", account) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitUnpaused(&_FunctionsV1EventsMock.TransactOpts, account) +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockTransactorSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _FunctionsV1EventsMock.Contract.EmitUnpaused(&_FunctionsV1EventsMock.TransactOpts, account) +} + +type FunctionsV1EventsMockConfigUpdatedIterator struct { + Event *FunctionsV1EventsMockConfigUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockConfigUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockConfigUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockConfigUpdatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockConfigUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockConfigUpdated struct { + Param1 FunctionsV1EventsMockConfig + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsV1EventsMockConfigUpdatedIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockConfigUpdatedIterator{contract: _FunctionsV1EventsMock.contract, event: "ConfigUpdated", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockConfigUpdated) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "ConfigUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockConfigUpdated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseConfigUpdated(log types.Log) (*FunctionsV1EventsMockConfigUpdated, error) { + event := new(FunctionsV1EventsMockConfigUpdated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ConfigUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockContractProposedIterator struct { + Event *FunctionsV1EventsMockContractProposed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockContractProposedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockContractProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockContractProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockContractProposedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockContractProposedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockContractProposed struct { + ProposedContractSetId [32]byte + ProposedContractSetFromAddress common.Address + ProposedContractSetToAddress common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterContractProposed(opts *bind.FilterOpts) (*FunctionsV1EventsMockContractProposedIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "ContractProposed") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockContractProposedIterator{contract: _FunctionsV1EventsMock.contract, event: "ContractProposed", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchContractProposed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockContractProposed) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "ContractProposed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockContractProposed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ContractProposed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseContractProposed(log types.Log) (*FunctionsV1EventsMockContractProposed, error) { + event := new(FunctionsV1EventsMockContractProposed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ContractProposed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockContractUpdatedIterator struct { + Event *FunctionsV1EventsMockContractUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockContractUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockContractUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockContractUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockContractUpdatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockContractUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockContractUpdated struct { + Id [32]byte + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterContractUpdated(opts *bind.FilterOpts) (*FunctionsV1EventsMockContractUpdatedIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "ContractUpdated") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockContractUpdatedIterator{contract: _FunctionsV1EventsMock.contract, event: "ContractUpdated", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchContractUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockContractUpdated) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "ContractUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockContractUpdated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ContractUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseContractUpdated(log types.Log) (*FunctionsV1EventsMockContractUpdated, error) { + event := new(FunctionsV1EventsMockContractUpdated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "ContractUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockFundsRecoveredIterator struct { + Event *FunctionsV1EventsMockFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsV1EventsMockFundsRecoveredIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockFundsRecoveredIterator{contract: _FunctionsV1EventsMock.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockFundsRecovered) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseFundsRecovered(log types.Log) (*FunctionsV1EventsMockFundsRecovered, error) { + event := new(FunctionsV1EventsMockFundsRecovered) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockOwnershipTransferRequestedIterator struct { + Event *FunctionsV1EventsMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsV1EventsMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockOwnershipTransferRequestedIterator{contract: _FunctionsV1EventsMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockOwnershipTransferRequested) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsV1EventsMockOwnershipTransferRequested, error) { + event := new(FunctionsV1EventsMockOwnershipTransferRequested) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockOwnershipTransferredIterator struct { + Event *FunctionsV1EventsMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsV1EventsMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockOwnershipTransferredIterator{contract: _FunctionsV1EventsMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockOwnershipTransferred) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsV1EventsMockOwnershipTransferred, error) { + event := new(FunctionsV1EventsMockOwnershipTransferred) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockPausedIterator struct { + Event *FunctionsV1EventsMockPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockPausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockPaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterPaused(opts *bind.FilterOpts) (*FunctionsV1EventsMockPausedIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockPausedIterator{contract: _FunctionsV1EventsMock.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockPaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockPaused) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParsePaused(log types.Log) (*FunctionsV1EventsMockPaused, error) { + event := new(FunctionsV1EventsMockPaused) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockRequestNotProcessedIterator struct { + Event *FunctionsV1EventsMockRequestNotProcessed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockRequestNotProcessedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestNotProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestNotProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockRequestNotProcessedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockRequestNotProcessedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockRequestNotProcessed struct { + RequestId [32]byte + Coordinator common.Address + Transmitter common.Address + ResultCode uint8 + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterRequestNotProcessed(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsV1EventsMockRequestNotProcessedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "RequestNotProcessed", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockRequestNotProcessedIterator{contract: _FunctionsV1EventsMock.contract, event: "RequestNotProcessed", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchRequestNotProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestNotProcessed, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "RequestNotProcessed", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockRequestNotProcessed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestNotProcessed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseRequestNotProcessed(log types.Log) (*FunctionsV1EventsMockRequestNotProcessed, error) { + event := new(FunctionsV1EventsMockRequestNotProcessed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestNotProcessed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockRequestProcessedIterator struct { + Event *FunctionsV1EventsMockRequestProcessed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockRequestProcessedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestProcessed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockRequestProcessedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockRequestProcessedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockRequestProcessed struct { + RequestId [32]byte + SubscriptionId uint64 + TotalCostJuels *big.Int + Transmitter common.Address + ResultCode uint8 + Response []byte + Err []byte + CallbackReturnData []byte + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterRequestProcessed(opts *bind.FilterOpts, requestId [][32]byte, subscriptionId []uint64) (*FunctionsV1EventsMockRequestProcessedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "RequestProcessed", requestIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockRequestProcessedIterator{contract: _FunctionsV1EventsMock.contract, event: "RequestProcessed", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchRequestProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestProcessed, requestId [][32]byte, subscriptionId []uint64) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "RequestProcessed", requestIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockRequestProcessed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestProcessed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseRequestProcessed(log types.Log) (*FunctionsV1EventsMockRequestProcessed, error) { + event := new(FunctionsV1EventsMockRequestProcessed) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestProcessed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockRequestStartIterator struct { + Event *FunctionsV1EventsMockRequestStart + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockRequestStartIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockRequestStartIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockRequestStartIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockRequestStart struct { + RequestId [32]byte + DonId [32]byte + SubscriptionId uint64 + SubscriptionOwner common.Address + RequestingContract common.Address + RequestInitiator common.Address + Data []byte + DataVersion uint16 + CallbackGasLimit uint32 + EstimatedTotalCostJuels *big.Int + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterRequestStart(opts *bind.FilterOpts, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (*FunctionsV1EventsMockRequestStartIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var donIdRule []interface{} + for _, donIdItem := range donId { + donIdRule = append(donIdRule, donIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "RequestStart", requestIdRule, donIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockRequestStartIterator{contract: _FunctionsV1EventsMock.contract, event: "RequestStart", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchRequestStart(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestStart, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var donIdRule []interface{} + for _, donIdItem := range donId { + donIdRule = append(donIdRule, donIdItem) + } + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "RequestStart", requestIdRule, donIdRule, subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockRequestStart) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestStart", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseRequestStart(log types.Log) (*FunctionsV1EventsMockRequestStart, error) { + event := new(FunctionsV1EventsMockRequestStart) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestStart", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockRequestTimedOutIterator struct { + Event *FunctionsV1EventsMockRequestTimedOut + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockRequestTimedOutIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockRequestTimedOutIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockRequestTimedOutIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockRequestTimedOut struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsV1EventsMockRequestTimedOutIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockRequestTimedOutIterator{contract: _FunctionsV1EventsMock.contract, event: "RequestTimedOut", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestTimedOut, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockRequestTimedOut) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseRequestTimedOut(log types.Log) (*FunctionsV1EventsMockRequestTimedOut, error) { + event := new(FunctionsV1EventsMockRequestTimedOut) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionCanceledIterator struct { + Event *FunctionsV1EventsMockSubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionCanceled struct { + SubscriptionId uint64 + FundsRecipient common.Address + FundsAmount *big.Int + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionCanceledIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionCanceledIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionCanceled) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionCanceled(log types.Log) (*FunctionsV1EventsMockSubscriptionCanceled, error) { + event := new(FunctionsV1EventsMockSubscriptionCanceled) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionConsumerAddedIterator struct { + Event *FunctionsV1EventsMockSubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionConsumerAdded struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionConsumerAddedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionConsumerAddedIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionConsumerAdded) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsV1EventsMockSubscriptionConsumerAdded, error) { + event := new(FunctionsV1EventsMockSubscriptionConsumerAdded) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionConsumerRemovedIterator struct { + Event *FunctionsV1EventsMockSubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionConsumerRemoved struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionConsumerRemovedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionConsumerRemovedIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionConsumerRemoved) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsV1EventsMockSubscriptionConsumerRemoved, error) { + event := new(FunctionsV1EventsMockSubscriptionConsumerRemoved) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionCreatedIterator struct { + Event *FunctionsV1EventsMockSubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionCreated struct { + SubscriptionId uint64 + Owner common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionCreatedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionCreatedIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionCreated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionCreated(log types.Log) (*FunctionsV1EventsMockSubscriptionCreated, error) { + event := new(FunctionsV1EventsMockSubscriptionCreated) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionFundedIterator struct { + Event *FunctionsV1EventsMockSubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionFunded struct { + SubscriptionId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionFundedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionFundedIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionFunded) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionFunded(log types.Log) (*FunctionsV1EventsMockSubscriptionFunded, error) { + event := new(FunctionsV1EventsMockSubscriptionFunded) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator struct { + Event *FunctionsV1EventsMockSubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionOwnerTransferRequested struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionOwnerTransferRequested) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsV1EventsMockSubscriptionOwnerTransferRequested, error) { + event := new(FunctionsV1EventsMockSubscriptionOwnerTransferRequested) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockSubscriptionOwnerTransferredIterator struct { + Event *FunctionsV1EventsMockSubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockSubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockSubscriptionOwnerTransferred struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionOwnerTransferredIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockSubscriptionOwnerTransferredIterator{contract: _FunctionsV1EventsMock.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockSubscriptionOwnerTransferred) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsV1EventsMockSubscriptionOwnerTransferred, error) { + event := new(FunctionsV1EventsMockSubscriptionOwnerTransferred) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsV1EventsMockUnpausedIterator struct { + Event *FunctionsV1EventsMockUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsV1EventsMockUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsV1EventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsV1EventsMockUnpausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsV1EventsMockUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsV1EventsMockUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) FilterUnpaused(opts *bind.FilterOpts) (*FunctionsV1EventsMockUnpausedIterator, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &FunctionsV1EventsMockUnpausedIterator{contract: _FunctionsV1EventsMock.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockUnpaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsV1EventsMock.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsV1EventsMockUnpaused) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMockFilterer) ParseUnpaused(log types.Log) (*FunctionsV1EventsMockUnpaused, error) { + event := new(FunctionsV1EventsMockUnpaused) + if err := _FunctionsV1EventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsV1EventsMock.abi.Events["ConfigUpdated"].ID: + return _FunctionsV1EventsMock.ParseConfigUpdated(log) + case _FunctionsV1EventsMock.abi.Events["ContractProposed"].ID: + return _FunctionsV1EventsMock.ParseContractProposed(log) + case _FunctionsV1EventsMock.abi.Events["ContractUpdated"].ID: + return _FunctionsV1EventsMock.ParseContractUpdated(log) + case _FunctionsV1EventsMock.abi.Events["FundsRecovered"].ID: + return _FunctionsV1EventsMock.ParseFundsRecovered(log) + case _FunctionsV1EventsMock.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsV1EventsMock.ParseOwnershipTransferRequested(log) + case _FunctionsV1EventsMock.abi.Events["OwnershipTransferred"].ID: + return _FunctionsV1EventsMock.ParseOwnershipTransferred(log) + case _FunctionsV1EventsMock.abi.Events["Paused"].ID: + return _FunctionsV1EventsMock.ParsePaused(log) + case _FunctionsV1EventsMock.abi.Events["RequestNotProcessed"].ID: + return _FunctionsV1EventsMock.ParseRequestNotProcessed(log) + case _FunctionsV1EventsMock.abi.Events["RequestProcessed"].ID: + return _FunctionsV1EventsMock.ParseRequestProcessed(log) + case _FunctionsV1EventsMock.abi.Events["RequestStart"].ID: + return _FunctionsV1EventsMock.ParseRequestStart(log) + case _FunctionsV1EventsMock.abi.Events["RequestTimedOut"].ID: + return _FunctionsV1EventsMock.ParseRequestTimedOut(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionCanceled"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionCanceled(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionConsumerAdded"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionConsumerAdded(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionConsumerRemoved"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionConsumerRemoved(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionCreated"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionCreated(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionFunded"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionFunded(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionOwnerTransferRequested(log) + case _FunctionsV1EventsMock.abi.Events["SubscriptionOwnerTransferred"].ID: + return _FunctionsV1EventsMock.ParseSubscriptionOwnerTransferred(log) + case _FunctionsV1EventsMock.abi.Events["Unpaused"].ID: + return _FunctionsV1EventsMock.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsV1EventsMockConfigUpdated) Topic() common.Hash { + return common.HexToHash("0x049ce2e6e1420eb4b07b425e90129186833eb346bda40b37d5d921aad482f71c") +} + +func (FunctionsV1EventsMockContractProposed) Topic() common.Hash { + return common.HexToHash("0x8b052f0f4bf82fede7daffea71592b29d5ef86af1f3c7daaa0345dbb2f52f481") +} + +func (FunctionsV1EventsMockContractUpdated) Topic() common.Hash { + return common.HexToHash("0xf8a6175bca1ba37d682089187edc5e20a859989727f10ca6bd9a5bc0de8caf94") +} + +func (FunctionsV1EventsMockFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (FunctionsV1EventsMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsV1EventsMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsV1EventsMockPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (FunctionsV1EventsMockRequestNotProcessed) Topic() common.Hash { + return common.HexToHash("0x1a90e9a50793db2e394cf581e7c522e10c358a81e70acf6b5a0edd620c08dee1") +} + +func (FunctionsV1EventsMockRequestProcessed) Topic() common.Hash { + return common.HexToHash("0x64778f26c70b60a8d7e29e2451b3844302d959448401c0535b768ed88c6b505e") +} + +func (FunctionsV1EventsMockRequestStart) Topic() common.Hash { + return common.HexToHash("0xf67aec45c9a7ede407974a3e0c3a743dffeab99ee3f2d4c9a8144c2ebf2c7ec9") +} + +func (FunctionsV1EventsMockRequestTimedOut) Topic() common.Hash { + return common.HexToHash("0xf1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af414") +} + +func (FunctionsV1EventsMockSubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (FunctionsV1EventsMockSubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (FunctionsV1EventsMockSubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (FunctionsV1EventsMockSubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (FunctionsV1EventsMockSubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (FunctionsV1EventsMockSubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (FunctionsV1EventsMockSubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (FunctionsV1EventsMockUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_FunctionsV1EventsMock *FunctionsV1EventsMock) Address() common.Address { + return _FunctionsV1EventsMock.address +} + +type FunctionsV1EventsMockInterface interface { + EmitConfigUpdated(opts *bind.TransactOpts, param1 FunctionsV1EventsMockConfig) (*types.Transaction, error) + + EmitContractProposed(opts *bind.TransactOpts, proposedContractSetId [32]byte, proposedContractSetFromAddress common.Address, proposedContractSetToAddress common.Address) (*types.Transaction, error) + + EmitContractUpdated(opts *bind.TransactOpts, id [32]byte, from common.Address, to common.Address) (*types.Transaction, error) + + EmitFundsRecovered(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitRequestNotProcessed(opts *bind.TransactOpts, requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) (*types.Transaction, error) + + EmitRequestProcessed(opts *bind.TransactOpts, requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, err []byte, callbackReturnData []byte) (*types.Transaction, error) + + EmitRequestStart(opts *bind.TransactOpts, requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) (*types.Transaction, error) + + EmitRequestTimedOut(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) + + EmitSubscriptionCanceled(opts *bind.TransactOpts, subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) (*types.Transaction, error) + + EmitSubscriptionConsumerAdded(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + EmitSubscriptionConsumerRemoved(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + EmitSubscriptionCreated(opts *bind.TransactOpts, subscriptionId uint64, owner common.Address) (*types.Transaction, error) + + EmitSubscriptionFunded(opts *bind.TransactOpts, subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) + + EmitSubscriptionOwnerTransferRequested(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) + + EmitSubscriptionOwnerTransferred(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) + + EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + FilterConfigUpdated(opts *bind.FilterOpts) (*FunctionsV1EventsMockConfigUpdatedIterator, error) + + WatchConfigUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockConfigUpdated) (event.Subscription, error) + + ParseConfigUpdated(log types.Log) (*FunctionsV1EventsMockConfigUpdated, error) + + FilterContractProposed(opts *bind.FilterOpts) (*FunctionsV1EventsMockContractProposedIterator, error) + + WatchContractProposed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockContractProposed) (event.Subscription, error) + + ParseContractProposed(log types.Log) (*FunctionsV1EventsMockContractProposed, error) + + FilterContractUpdated(opts *bind.FilterOpts) (*FunctionsV1EventsMockContractUpdatedIterator, error) + + WatchContractUpdated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockContractUpdated) (event.Subscription, error) + + ParseContractUpdated(log types.Log) (*FunctionsV1EventsMockContractUpdated, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsV1EventsMockFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*FunctionsV1EventsMockFundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsV1EventsMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsV1EventsMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsV1EventsMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsV1EventsMockOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*FunctionsV1EventsMockPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*FunctionsV1EventsMockPaused, error) + + FilterRequestNotProcessed(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsV1EventsMockRequestNotProcessedIterator, error) + + WatchRequestNotProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestNotProcessed, requestId [][32]byte) (event.Subscription, error) + + ParseRequestNotProcessed(log types.Log) (*FunctionsV1EventsMockRequestNotProcessed, error) + + FilterRequestProcessed(opts *bind.FilterOpts, requestId [][32]byte, subscriptionId []uint64) (*FunctionsV1EventsMockRequestProcessedIterator, error) + + WatchRequestProcessed(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestProcessed, requestId [][32]byte, subscriptionId []uint64) (event.Subscription, error) + + ParseRequestProcessed(log types.Log) (*FunctionsV1EventsMockRequestProcessed, error) + + FilterRequestStart(opts *bind.FilterOpts, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (*FunctionsV1EventsMockRequestStartIterator, error) + + WatchRequestStart(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestStart, requestId [][32]byte, donId [][32]byte, subscriptionId []uint64) (event.Subscription, error) + + ParseRequestStart(log types.Log) (*FunctionsV1EventsMockRequestStart, error) + + FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsV1EventsMockRequestTimedOutIterator, error) + + WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockRequestTimedOut, requestId [][32]byte) (event.Subscription, error) + + ParseRequestTimedOut(log types.Log) (*FunctionsV1EventsMockRequestTimedOut, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*FunctionsV1EventsMockSubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsV1EventsMockSubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsV1EventsMockSubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*FunctionsV1EventsMockSubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*FunctionsV1EventsMockSubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsV1EventsMockSubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsV1EventsMockSubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsV1EventsMockSubscriptionOwnerTransferred, error) + + FilterUnpaused(opts *bind.FilterOpts) (*FunctionsV1EventsMockUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsV1EventsMockUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*FunctionsV1EventsMockUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/ocr2dr/ocr2dr.go b/core/gethwrappers/functions/generated/ocr2dr/ocr2dr.go new file mode 100644 index 00000000..c2b3c3a5 --- /dev/null +++ b/core/gethwrappers/functions/generated/ocr2dr/ocr2dr.go @@ -0,0 +1,178 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OCR2DRMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyUrl\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"}]", + Bin: "0x602d6037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea164736f6c6343000806000a", +} + +var OCR2DRABI = OCR2DRMetaData.ABI + +var OCR2DRBin = OCR2DRMetaData.Bin + +func DeployOCR2DR(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OCR2DR, error) { + parsed, err := OCR2DRMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DR{address: address, abi: *parsed, OCR2DRCaller: OCR2DRCaller{contract: contract}, OCR2DRTransactor: OCR2DRTransactor{contract: contract}, OCR2DRFilterer: OCR2DRFilterer{contract: contract}}, nil +} + +type OCR2DR struct { + address common.Address + abi abi.ABI + OCR2DRCaller + OCR2DRTransactor + OCR2DRFilterer +} + +type OCR2DRCaller struct { + contract *bind.BoundContract +} + +type OCR2DRTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRSession struct { + Contract *OCR2DR + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRCallerSession struct { + Contract *OCR2DRCaller + CallOpts bind.CallOpts +} + +type OCR2DRTransactorSession struct { + Contract *OCR2DRTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRRaw struct { + Contract *OCR2DR +} + +type OCR2DRCallerRaw struct { + Contract *OCR2DRCaller +} + +type OCR2DRTransactorRaw struct { + Contract *OCR2DRTransactor +} + +func NewOCR2DR(address common.Address, backend bind.ContractBackend) (*OCR2DR, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DR(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DR{address: address, abi: abi, OCR2DRCaller: OCR2DRCaller{contract: contract}, OCR2DRTransactor: OCR2DRTransactor{contract: contract}, OCR2DRFilterer: OCR2DRFilterer{contract: contract}}, nil +} + +func NewOCR2DRCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRCaller, error) { + contract, err := bindOCR2DR(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRCaller{contract: contract}, nil +} + +func NewOCR2DRTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRTransactor, error) { + contract, err := bindOCR2DR(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRTransactor{contract: contract}, nil +} + +func NewOCR2DRFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRFilterer, error) { + contract, err := bindOCR2DR(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRFilterer{contract: contract}, nil +} + +func bindOCR2DR(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DR *OCR2DRRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DR.Contract.OCR2DRCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DR *OCR2DRRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DR.Contract.OCR2DRTransactor.contract.Transfer(opts) +} + +func (_OCR2DR *OCR2DRRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DR.Contract.OCR2DRTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DR *OCR2DRCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DR.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DR *OCR2DRTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DR.Contract.contract.Transfer(opts) +} + +func (_OCR2DR *OCR2DRTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DR.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DR *OCR2DR) Address() common.Address { + return _OCR2DR.address +} + +type OCR2DRInterface interface { + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/ocr2dr_client/ocr2dr_client.go b/core/gethwrappers/functions/generated/ocr2dr_client/ocr2dr_client.go new file mode 100644 index 00000000..9b6065f0 --- /dev/null +++ b/core/gethwrappers/functions/generated/ocr2dr_client/ocr2dr_client.go @@ -0,0 +1,520 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_client + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsRequest struct { + CodeLocation uint8 + SecretsLocation uint8 + Language uint8 + Source string + Secrets []byte + Args []string +} + +var OCR2DRClientMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsAlreadyPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsNotPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderIsNotRegistry\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"internalType\":\"enumFunctions.Location\",\"name\":\"codeLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.Location\",\"name\":\"secretsLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.CodeLanguage\",\"name\":\"language\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"}],\"internalType\":\"structFunctions.Request\",\"name\":\"req\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var OCR2DRClientABI = OCR2DRClientMetaData.ABI + +type OCR2DRClient struct { + address common.Address + abi abi.ABI + OCR2DRClientCaller + OCR2DRClientTransactor + OCR2DRClientFilterer +} + +type OCR2DRClientCaller struct { + contract *bind.BoundContract +} + +type OCR2DRClientTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRClientFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRClientSession struct { + Contract *OCR2DRClient + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRClientCallerSession struct { + Contract *OCR2DRClientCaller + CallOpts bind.CallOpts +} + +type OCR2DRClientTransactorSession struct { + Contract *OCR2DRClientTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRClientRaw struct { + Contract *OCR2DRClient +} + +type OCR2DRClientCallerRaw struct { + Contract *OCR2DRClientCaller +} + +type OCR2DRClientTransactorRaw struct { + Contract *OCR2DRClientTransactor +} + +func NewOCR2DRClient(address common.Address, backend bind.ContractBackend) (*OCR2DRClient, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRClientABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRClient(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRClient{address: address, abi: abi, OCR2DRClientCaller: OCR2DRClientCaller{contract: contract}, OCR2DRClientTransactor: OCR2DRClientTransactor{contract: contract}, OCR2DRClientFilterer: OCR2DRClientFilterer{contract: contract}}, nil +} + +func NewOCR2DRClientCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRClientCaller, error) { + contract, err := bindOCR2DRClient(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientCaller{contract: contract}, nil +} + +func NewOCR2DRClientTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRClientTransactor, error) { + contract, err := bindOCR2DRClient(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientTransactor{contract: contract}, nil +} + +func NewOCR2DRClientFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRClientFilterer, error) { + contract, err := bindOCR2DRClient(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRClientFilterer{contract: contract}, nil +} + +func bindOCR2DRClient(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRClientMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRClient *OCR2DRClientRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClient.Contract.OCR2DRClientCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClient.Contract.OCR2DRClientTransactor.contract.Transfer(opts) +} + +func (_OCR2DRClient *OCR2DRClientRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClient.Contract.OCR2DRClientTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClient.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClient.Contract.contract.Transfer(opts) +} + +func (_OCR2DRClient *OCR2DRClientTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClient.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientCaller) EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRClient.contract.Call(opts, &out, "estimateCost", req, subscriptionId, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRClient *OCR2DRClientSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClient.Contract.EstimateCost(&_OCR2DRClient.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClient *OCR2DRClientCallerSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClient.Contract.EstimateCost(&_OCR2DRClient.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClient *OCR2DRClientCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DRClient.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DRClient *OCR2DRClientSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClient.Contract.GetDONPublicKey(&_OCR2DRClient.CallOpts) +} + +func (_OCR2DRClient *OCR2DRClientCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClient.Contract.GetDONPublicKey(&_OCR2DRClient.CallOpts) +} + +func (_OCR2DRClient *OCR2DRClientTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_OCR2DRClient *OCR2DRClientSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.Contract.HandleOracleFulfillment(&_OCR2DRClient.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClient *OCR2DRClientTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.Contract.HandleOracleFulfillment(&_OCR2DRClient.TransactOpts, requestId, response, err) +} + +type OCR2DRClientRequestFulfilledIterator struct { + Event *OCR2DRClientRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClient *OCR2DRClientFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientRequestFulfilledIterator{contract: _OCR2DRClient.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientRequestFulfilled) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) ParseRequestFulfilled(log types.Log) (*OCR2DRClientRequestFulfilled, error) { + event := new(OCR2DRClientRequestFulfilled) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientRequestSentIterator struct { + Event *OCR2DRClientRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientRequestSentIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClient *OCR2DRClientFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientRequestSentIterator{contract: _OCR2DRClient.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientRequestSent) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) ParseRequestSent(log types.Log) (*OCR2DRClientRequestSent, error) { + event := new(OCR2DRClientRequestSent) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OCR2DRClient *OCR2DRClient) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRClient.abi.Events["RequestFulfilled"].ID: + return _OCR2DRClient.ParseRequestFulfilled(log) + case _OCR2DRClient.abi.Events["RequestSent"].ID: + return _OCR2DRClient.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRClientRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (OCR2DRClientRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_OCR2DRClient *OCR2DRClient) Address() common.Address { + return _OCR2DRClient.address +} + +type OCR2DRClientInterface interface { + EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*OCR2DRClientRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*OCR2DRClientRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/ocr2dr_client_example/ocr2dr_client_example.go b/core/gethwrappers/functions/generated/ocr2dr_client_example/ocr2dr_client_example.go new file mode 100644 index 00000000..7ab26abc --- /dev/null +++ b/core/gethwrappers/functions/generated/ocr2dr_client_example/ocr2dr_client_example.go @@ -0,0 +1,1045 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_client_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsRequest struct { + CodeLocation uint8 + SecretsLocation uint8 + Language uint8 + Source string + Secrets []byte + Args []string +} + +var OCR2DRClientExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsAlreadyPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsNotPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderIsNotRegistry\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"UnexpectedRequestID\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CALLBACK_GAS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"SendRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"enumFunctions.Location\",\"name\":\"codeLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.Location\",\"name\":\"secretsLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.CodeLanguage\",\"name\":\"language\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"}],\"internalType\":\"structFunctions.Request\",\"name\":\"req\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastError\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastErrorLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastResponse\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastResponseLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001f0338038062001f03833981016040819052620000349162000199565b600080546001600160a01b0319166001600160a01b038316178155339081906001600160a01b038216620000af5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600280546001600160a01b0319166001600160a01b0384811691909117909155811615620000e257620000e281620000ec565b50505050620001cb565b6001600160a01b038116331415620001475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000a6565b600380546001600160a01b0319166001600160a01b03838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600060208284031215620001ac57600080fd5b81516001600160a01b0381168114620001c457600080fd5b9392505050565b611d2880620001db6000396000f3fe608060405234801561001057600080fd5b50600436106100df5760003560e01c80638da5cb5b1161008c578063d4b3917511610066578063d4b39175146101aa578063d769717e146101da578063f2fde38b146101ed578063fc2a88c31461020057600080fd5b80638da5cb5b1461015d578063b48cffea14610185578063d328a91e1461019557600080fd5b806362747e42116100bd57806362747e42146101425780636d9809a01461014b57806379ba50971461015557600080fd5b80630ca76175146100e457806329f0de3f146100f95780632c29166b14610115575b600080fd5b6100f76100f2366004611577565b610209565b005b61010260065481565b6040519081526020015b60405180910390f35b60075461012d90640100000000900463ffffffff1681565b60405163ffffffff909116815260200161010c565b61010260055481565b61012d6201117081565b6100f76102d4565b60025460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010c565b60075461012d9063ffffffff1681565b61019d6103da565b60405161010c91906118b7565b6101bd6101b836600461172d565b6104a3565b6040516bffffffffffffffffffffffff909116815260200161010c565b6100f76101e8366004611652565b610546565b6100f76101fb366004611524565b610647565b61010260045481565b600083815260016020526040902054839073ffffffffffffffffffffffffffffffffffffffff163314610268576040517fa0c5ec6300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526001602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e691a26102ce84848461065b565b50505050565b60035473ffffffffffffffffffffffffffffffffffffffff16331461035a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560038054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b60008054604080517fd328a91e000000000000000000000000000000000000000000000000000000008152905160609373ffffffffffffffffffffffffffffffffffffffff9093169263d328a91e9260048082019391829003018186803b15801561044457600080fd5b505afa158015610458573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261049e91908101906115e4565b905090565b6000805473ffffffffffffffffffffffffffffffffffffffff1663d227d245856104cc88610729565b86866040518563ffffffff1660e01b81526004016104ed9493929190611903565b60206040518083038186803b15801561050557600080fd5b505afa158015610519573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061053d919061183f565b95945050505050565b61054e6109f5565b6105886040805160c08101909152806000815260200160008152602001600081526020016060815260200160608152602001606081525090565b6105ca88888080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610a789050565b84156106125761061286868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610a899050565b821561062c5761062c6106258486611b90565b8290610ad0565b61063a818362011170610b10565b6004555050505050505050565b61064f6109f5565b61065881610cce565b50565b8260045414610699576040517fd068bf5b00000000000000000000000000000000000000000000000000000000815260048101849052602401610351565b6106a282610dc5565b6005558151600780547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff9092169190911790556106e481610dc5565b600655516007805463ffffffff909216640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff9092169190911790555050565b6060610733611355565b805161074190610100610e4d565b50610781816040518060400160405280600c81526020017f636f64654c6f636174696f6e0000000000000000000000000000000000000000815250610ec7565b6107a0818460000151600281111561079b5761079b611c6c565b610ee5565b6107df816040518060400160405280600881526020017f6c616e6775616765000000000000000000000000000000000000000000000000815250610ec7565b6107f9818460400151600081111561079b5761079b611c6c565b610838816040518060400160405280600681526020017f736f757263650000000000000000000000000000000000000000000000000000815250610ec7565b610846818460600151610ec7565b60a083015151156108ec57610890816040518060400160405280600481526020017f6172677300000000000000000000000000000000000000000000000000000000815250610ec7565b61089981610f1e565b60005b8360a00151518110156108e2576108d0828560a0015183815181106108c3576108c3611c9b565b6020026020010151610ec7565b806108da81611bc9565b91505061089c565b506108ec81610f42565b608083015151156109ed5760008360200151600281111561090f5761090f611c6c565b1415610947576040517fa80d31f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610986816040518060400160405280600f81526020017f736563726574734c6f636174696f6e0000000000000000000000000000000000815250610ec7565b6109a0818460200151600281111561079b5761079b611c6c565b6109df816040518060400160405280600781526020017f7365637265747300000000000000000000000000000000000000000000000000815250610ec7565b6109ed818460800151610f60565b515192915050565b60025473ffffffffffffffffffffffffffffffffffffffff163314610a76576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610351565b565b610a858260008084610f6d565b5050565b8051610ac1576040517fe889636f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60016020830152608090910152565b8051610b08576040517ffe936cb700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a090910152565b60008054819073ffffffffffffffffffffffffffffffffffffffff166328242b0485610b3b88610729565b866040518463ffffffff1660e01b8152600401610b5a939291906118ca565b602060405180830381600087803b158015610b7457600080fd5b505af1158015610b88573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610bac919061155e565b905060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16635ab1bd536040518163ffffffff1660e01b815260040160206040518083038186803b158015610c1457600080fd5b505afa158015610c28573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c4c9190611541565b60008281526001602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9490941693909317909255905182917f1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db891a2949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116331415610d4e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610351565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600080600060209050602084511015610ddc575082515b60005b81811015610e4457610df2816008611b3c565b858281518110610e0457610e04611c9b565b01602001517fff0000000000000000000000000000000000000000000000000000000000000016901c929092179180610e3c81611bc9565b915050610ddf565b50909392505050565b604080518082019091526060815260006020820152610e6d602083611c02565b15610e9557610e7d602083611c02565b610e88906020611b79565b610e9290836119fd565b91505b602080840183905260405180855260008152908184010181811015610eb957600080fd5b604052508290505b92915050565b610ed48260038351611001565b8151610ee09082611122565b505050565b8151610ef29060c261114a565b50610a858282604051602001610f0a91815260200190565b604051602081830303815290604052610f60565b610f298160046111b3565b600181602001818151610f3c91906119fd565b90525050565b610f4d8160076111b3565b600181602001818151610f3c9190611b79565b610ed48260028351611001565b8051610fa5576040517f22ce3edd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83836002811115610fb857610fb8611c6c565b90816002811115610fcb57610fcb611c6c565b90525060408401828015610fe157610fe1611c6c565b90818015610ff157610ff1611c6c565b9052506060909301929092525050565b60178167ffffffffffffffff16116110285782516102ce9060e0600585901b16831761114a565b60ff8167ffffffffffffffff161161106a578251611051906018611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660016111ca565b61ffff8167ffffffffffffffff16116110ad578251611094906019611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660026111ca565b63ffffffff8167ffffffffffffffff16116110f25782516110d990601a611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660046111ca565b825161110990601b611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660086111ca565b6040805180820190915260608152600060208201526111438383845161124f565b9392505050565b604080518082019091526060815260006020820152825151600061116f8260016119fd565b905084602001518210611190576111908561118b836002611b3c565b61133e565b84516020838201018581535080518211156111a9578181525b5093949350505050565b8151610ee090601f611fe0600585901b161761114a565b60408051808201909152606081526000602082015283515160006111ee82856119fd565b9050856020015181111561120b5761120b8661118b836002611b3c565b6000600161121b86610100611a76565b6112259190611b79565b90508651828101878319825116178152508051831115611243578281525b50959695505050505050565b604080518082019091526060815260006020820152825182111561127257600080fd5b835151600061128184836119fd565b9050856020015181111561129e5761129e8661118b836002611b3c565b8551805183820160200191600091808511156112b8578482525b505050602086015b602086106112f857805182526112d76020836119fd565b91506112e46020826119fd565b90506112f1602087611b79565b95506112c0565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208890036101000a0190811690199190911617905250849150509392505050565b815161134a8383610e4d565b506102ce8382611122565b604051806040016040528061137d604051806040016040528060608152602001600081525090565b8152602001600081525090565b600067ffffffffffffffff808411156113a5576113a5611cca565b8360051b60206113b6818301611968565b8681529350808401858381018910156113ce57600080fd5b60009350835b88811015611409578135868111156113ea578586fd5b6113f68b828b0161147f565b84525091830191908301906001016113d4565b5050505050509392505050565b600082601f83011261142757600080fd5b6111438383356020850161138a565b60008083601f84011261144857600080fd5b50813567ffffffffffffffff81111561146057600080fd5b60208301915083602082850101111561147857600080fd5b9250929050565b600082601f83011261149057600080fd5b81356114a361149e826119b7565b611968565b8181528460208386010111156114b857600080fd5b816020850160208301376000918101602001919091529392505050565b8035600181106114e457600080fd5b919050565b8035600381106114e457600080fd5b803563ffffffff811681146114e457600080fd5b803567ffffffffffffffff811681146114e457600080fd5b60006020828403121561153657600080fd5b813561114381611cf9565b60006020828403121561155357600080fd5b815161114381611cf9565b60006020828403121561157057600080fd5b5051919050565b60008060006060848603121561158c57600080fd5b83359250602084013567ffffffffffffffff808211156115ab57600080fd5b6115b78783880161147f565b935060408601359150808211156115cd57600080fd5b506115da8682870161147f565b9150509250925092565b6000602082840312156115f657600080fd5b815167ffffffffffffffff81111561160d57600080fd5b8201601f8101841361161e57600080fd5b805161162c61149e826119b7565b81815285602083850101111561164157600080fd5b61053d826020830160208601611b9d565b60008060008060008060006080888a03121561166d57600080fd5b873567ffffffffffffffff8082111561168557600080fd5b6116918b838c01611436565b909950975060208a01359150808211156116aa57600080fd5b6116b68b838c01611436565b909750955060408a01359150808211156116cf57600080fd5b818a0191508a601f8301126116e357600080fd5b8135818111156116f257600080fd5b8b60208260051b850101111561170757600080fd5b60208301955080945050505061171f6060890161150c565b905092959891949750929550565b6000806000806080858703121561174357600080fd5b843567ffffffffffffffff8082111561175b57600080fd5b9086019060c0828903121561176f57600080fd5b61177761193f565b611780836114e9565b815261178e602084016114e9565b602082015261179f604084016114d5565b60408201526060830135828111156117b657600080fd5b6117c28a82860161147f565b6060830152506080830135828111156117da57600080fd5b6117e68a82860161147f565b60808301525060a0830135828111156117fe57600080fd5b61180a8a828601611416565b60a08301525095506118219150506020860161150c565b925061182f604086016114f8565b9396929550929360600135925050565b60006020828403121561185157600080fd5b81516bffffffffffffffffffffffff8116811461114357600080fd5b60008151808452611885816020860160208601611b9d565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611143602083018461186d565b67ffffffffffffffff841681526060602082015260006118ed606083018561186d565b905063ffffffff83166040830152949350505050565b67ffffffffffffffff85168152608060208201526000611926608083018661186d565b63ffffffff949094166040830152506060015292915050565b60405160c0810167ffffffffffffffff8111828210171561196257611962611cca565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156119af576119af611cca565b604052919050565b600067ffffffffffffffff8211156119d1576119d1611cca565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b60008219821115611a1057611a10611c3d565b500190565b600181815b80851115611a6e57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611a5457611a54611c3d565b80851615611a6157918102915b93841c9390800290611a1a565b509250929050565b60006111438383600082611a8c57506001610ec1565b81611a9957506000610ec1565b8160018114611aaf5760028114611ab957611ad5565b6001915050610ec1565b60ff841115611aca57611aca611c3d565b50506001821b610ec1565b5060208310610133831016604e8410600b8410161715611af8575081810a610ec1565b611b028383611a15565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611b3457611b34611c3d565b029392505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611b7457611b74611c3d565b500290565b600082821015611b8b57611b8b611c3d565b500390565b600061114336848461138a565b60005b83811015611bb8578181015183820152602001611ba0565b838111156102ce5750506000910152565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611bfb57611bfb611c3d565b5060010190565b600082611c38577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff8116811461065857600080fdfea164736f6c6343000806000a", +} + +var OCR2DRClientExampleABI = OCR2DRClientExampleMetaData.ABI + +var OCR2DRClientExampleBin = OCR2DRClientExampleMetaData.Bin + +func DeployOCR2DRClientExample(auth *bind.TransactOpts, backend bind.ContractBackend, oracle common.Address) (common.Address, *types.Transaction, *OCR2DRClientExample, error) { + parsed, err := OCR2DRClientExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRClientExampleBin), backend, oracle) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DRClientExample{address: address, abi: *parsed, OCR2DRClientExampleCaller: OCR2DRClientExampleCaller{contract: contract}, OCR2DRClientExampleTransactor: OCR2DRClientExampleTransactor{contract: contract}, OCR2DRClientExampleFilterer: OCR2DRClientExampleFilterer{contract: contract}}, nil +} + +type OCR2DRClientExample struct { + address common.Address + abi abi.ABI + OCR2DRClientExampleCaller + OCR2DRClientExampleTransactor + OCR2DRClientExampleFilterer +} + +type OCR2DRClientExampleCaller struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleSession struct { + Contract *OCR2DRClientExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRClientExampleCallerSession struct { + Contract *OCR2DRClientExampleCaller + CallOpts bind.CallOpts +} + +type OCR2DRClientExampleTransactorSession struct { + Contract *OCR2DRClientExampleTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRClientExampleRaw struct { + Contract *OCR2DRClientExample +} + +type OCR2DRClientExampleCallerRaw struct { + Contract *OCR2DRClientExampleCaller +} + +type OCR2DRClientExampleTransactorRaw struct { + Contract *OCR2DRClientExampleTransactor +} + +func NewOCR2DRClientExample(address common.Address, backend bind.ContractBackend) (*OCR2DRClientExample, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRClientExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRClientExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRClientExample{address: address, abi: abi, OCR2DRClientExampleCaller: OCR2DRClientExampleCaller{contract: contract}, OCR2DRClientExampleTransactor: OCR2DRClientExampleTransactor{contract: contract}, OCR2DRClientExampleFilterer: OCR2DRClientExampleFilterer{contract: contract}}, nil +} + +func NewOCR2DRClientExampleCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRClientExampleCaller, error) { + contract, err := bindOCR2DRClientExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleCaller{contract: contract}, nil +} + +func NewOCR2DRClientExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRClientExampleTransactor, error) { + contract, err := bindOCR2DRClientExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleTransactor{contract: contract}, nil +} + +func NewOCR2DRClientExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRClientExampleFilterer, error) { + contract, err := bindOCR2DRClientExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleFilterer{contract: contract}, nil +} + +func bindOCR2DRClientExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRClientExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleTransactor.contract.Transfer(opts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClientExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.contract.Transfer(opts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "MAX_CALLBACK_GAS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) MAXCALLBACKGAS() (uint32, error) { + return _OCR2DRClientExample.Contract.MAXCALLBACKGAS(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) MAXCALLBACKGAS() (uint32, error) { + return _OCR2DRClientExample.Contract.MAXCALLBACKGAS(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "estimateCost", req, subscriptionId, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClientExample.Contract.EstimateCost(&_OCR2DRClientExample.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClientExample.Contract.EstimateCost(&_OCR2DRClientExample.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClientExample.Contract.GetDONPublicKey(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClientExample.Contract.GetDONPublicKey(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastError(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastError") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastError() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastError(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastError() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastError(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastErrorLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastErrorLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastErrorLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastErrorLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastErrorLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastErrorLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastRequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastRequestId() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastRequestId(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastRequestId() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastRequestId(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastResponse(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastResponse") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastResponse() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastResponse(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastResponse() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastResponse(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastResponseLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastResponseLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastResponseLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastResponseLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastResponseLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastResponseLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) Owner() (common.Address, error) { + return _OCR2DRClientExample.Contract.Owner(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) Owner() (common.Address, error) { + return _OCR2DRClientExample.Contract.Owner(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) SendRequest(opts *bind.TransactOpts, source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "SendRequest", source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) SendRequest(source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.SendRequest(&_OCR2DRClientExample.TransactOpts, source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) SendRequest(source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.SendRequest(&_OCR2DRClientExample.TransactOpts, source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.AcceptOwnership(&_OCR2DRClientExample.TransactOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.AcceptOwnership(&_OCR2DRClientExample.TransactOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.HandleOracleFulfillment(&_OCR2DRClientExample.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.HandleOracleFulfillment(&_OCR2DRClientExample.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.TransferOwnership(&_OCR2DRClientExample.TransactOpts, to) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.TransferOwnership(&_OCR2DRClientExample.TransactOpts, to) +} + +type OCR2DRClientExampleOwnershipTransferRequestedIterator struct { + Event *OCR2DRClientExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleOwnershipTransferRequestedIterator{contract: _OCR2DRClientExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleOwnershipTransferRequested) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DRClientExampleOwnershipTransferRequested, error) { + event := new(OCR2DRClientExampleOwnershipTransferRequested) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleOwnershipTransferredIterator struct { + Event *OCR2DRClientExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleOwnershipTransferredIterator{contract: _OCR2DRClientExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleOwnershipTransferred) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DRClientExampleOwnershipTransferred, error) { + event := new(OCR2DRClientExampleOwnershipTransferred) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleRequestFulfilledIterator struct { + Event *OCR2DRClientExampleRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleRequestFulfilledIterator{contract: _OCR2DRClientExample.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleRequestFulfilled) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseRequestFulfilled(log types.Log) (*OCR2DRClientExampleRequestFulfilled, error) { + event := new(OCR2DRClientExampleRequestFulfilled) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleRequestSentIterator struct { + Event *OCR2DRClientExampleRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleRequestSentIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleRequestSentIterator{contract: _OCR2DRClientExample.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleRequestSent) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseRequestSent(log types.Log) (*OCR2DRClientExampleRequestSent, error) { + event := new(OCR2DRClientExampleRequestSent) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRClientExample.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DRClientExample.ParseOwnershipTransferRequested(log) + case _OCR2DRClientExample.abi.Events["OwnershipTransferred"].ID: + return _OCR2DRClientExample.ParseOwnershipTransferred(log) + case _OCR2DRClientExample.abi.Events["RequestFulfilled"].ID: + return _OCR2DRClientExample.ParseRequestFulfilled(log) + case _OCR2DRClientExample.abi.Events["RequestSent"].ID: + return _OCR2DRClientExample.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRClientExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DRClientExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DRClientExampleRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (OCR2DRClientExampleRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_OCR2DRClientExample *OCR2DRClientExample) Address() common.Address { + return _OCR2DRClientExample.address +} + +type OCR2DRClientExampleInterface interface { + MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) + + EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + LastError(opts *bind.CallOpts) ([32]byte, error) + + LastErrorLength(opts *bind.CallOpts) (uint32, error) + + LastRequestId(opts *bind.CallOpts) ([32]byte, error) + + LastResponse(opts *bind.CallOpts) ([32]byte, error) + + LastResponseLength(opts *bind.CallOpts) (uint32, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SendRequest(opts *bind.TransactOpts, source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DRClientExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DRClientExampleOwnershipTransferred, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*OCR2DRClientExampleRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*OCR2DRClientExampleRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/ocr2dr_oracle/ocr2dr_oracle.go b/core/gethwrappers/functions/generated/ocr2dr_oracle/ocr2dr_oracle.go new file mode 100644 index 00000000..114e560e --- /dev/null +++ b/core/gethwrappers/functions/generated/ocr2dr_oracle/ocr2dr_oracle.go @@ -0,0 +1,2699 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_oracle + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type IFunctionsBillingRegistryRequestBilling struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int +} + +var OCR2DROracleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AlreadySet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotSelfTransfer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyBillingRegistry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyPublicKey\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyRequestData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySendersList\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InconsistentReportData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAllowedToSetSenders\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotProposedOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OwnerMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedPublicKeyChange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersActive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersDeactive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"InvalidRequestID\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"ResponseTransmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"UserCallbackError\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"UserCallbackRawError\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"activateAuthorizedReceiver\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"addAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"authorizedReceiverActive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deactivateAuthorizedReceiver\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"node\",\"type\":\"address\"}],\"name\":\"deleteNodePublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllNodePublicKeys\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"},{\"internalType\":\"bytes[]\",\"name\":\"\",\"type\":\"bytes[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistry\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"getRequiredFee\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getThresholdPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"removeAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"sendRequest\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"donPublicKey\",\"type\":\"bytes\"}],\"name\":\"setDONPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"node\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"}],\"name\":\"setNodePublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"registryAddress\",\"type\":\"address\"}],\"name\":\"setRegistry\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"thresholdPublicKey\",\"type\":\"bytes\"}],\"name\":\"setThresholdPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b506200001c62000022565b620003ae565b600054610100900460ff1615808015620000435750600054600160ff909116105b8062000073575062000060306200016260201b620026d71760201c565b15801562000073575060005460ff166001145b620000dc5760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201526d191e481a5b9a5d1a585b1a5e995960921b60648201526084015b60405180910390fd5b6000805460ff19166001179055801562000100576000805461ff0019166101001790555b6200010c600162000171565b620001186001620001f8565b80156200015f576000805461ff0019169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50565b6001600160a01b03163b151590565b600054610100900460ff16620001cd5760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b620001da33600062000267565b60018054911515600160a01b0260ff60a01b19909216919091179055565b600054610100900460ff16620002545760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b6008805460ff1916911515919091179055565b600054610100900460ff16620002c35760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b6001600160a01b038216620002eb57604051635b5a8afd60e11b815260040160405180910390fd5b600080546001600160a01b03808516620100000262010000600160b01b031990921691909117909155811615620003275762000327816200032b565b5050565b6001600160a01b038116331415620003565760405163282010c360e01b815260040160405180910390fd5b600180546001600160a01b0319166001600160a01b038381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6146a380620003be6000396000f3fe608060405234801561001057600080fd5b50600436106101cf5760003560e01c80638129fc1c11610104578063afcb95d7116100a2578063e3d0e71211610071578063e3d0e71214610437578063f1e14a211461044a578063f2fde38b14610461578063fa00763a1461047457600080fd5b8063afcb95d7146103cc578063b1dc65a4146103ec578063d227d245146103ff578063d328a91e1461042f57600080fd5b806381ff7048116100de57806381ff70481461035d5780638da5cb5b1461038d57806391bb64eb146103b1578063a91ee0dc146103b957600080fd5b80638129fc1c14610345578063814118341461034d57806381f1b9381461035557600080fd5b80634b4fa0c1116101715780635ab1bd531161014b5780635ab1bd53146102d857806379ba5097146103175780637f15e1661461031f578063807560311461033257600080fd5b80634b4fa0c1146102985780634dcef404146102af57806353398987146102c257600080fd5b8063181f5a77116101ad578063181f5a77146102045780632408afaa1461024f57806326ceabac1461026457806328242b041461027757600080fd5b806303e1bf23146101d4578063083a5466146101e9578063110254c8146101fc575b600080fd5b6101e76101e23660046138de565b610487565b005b6101e76101f7366004613c42565b6105a5565b6101e76105f5565b60408051808201909152601581527f46756e6374696f6e734f7261636c6520302e302e30000000000000000000000060208201525b604051610246919061422d565b60405180910390f35b6102576106ea565b604051610246919061407d565b6101e761027236600461384f565b6106fb565b61028a610285366004613d74565b6107cf565b604051908152602001610246565b60085460ff165b6040519015158152602001610246565b6101e76102bd3660046138de565b610a35565b6102ca610b13565b604051610246929190614090565b600d5473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610246565b6101e7610d33565b6101e761032d366004613c42565b610e26565b6101e7610340366004613889565b610e71565b6101e7610f5a565b6102576110fc565b61023961116b565b6004546002546040805163ffffffff80851682526401000000009094049093166020840152820152606001610246565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff166102f2565b6101e76111f4565b6101e76103c736600461384f565b6112df565b604080516001815260006020820181905291810191909152606001610246565b6101e76103fa366004613a8e565b61137b565b61041261040d366004613dd9565b611aaa565b6040516bffffffffffffffffffffffff9091168152602001610246565b610239611cb1565b6101e76104453660046139c1565b611cc0565b610412610458366004613c78565b60009392505050565b6101e761046f36600461384f565b61269b565b61029f61048236600461384f565b6126ac565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1633146104de576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80610515576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b818110156105655761055283838381811061053557610535614616565b905060200201602081019061054a919061384f565b6009906126f3565b508061055d81614521565b915050610518565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a082823360405161059993929190614003565b60405180910390a15050565b6105ad61271c565b806105e4576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105f0600f83836134d4565b505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff16331461064c576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60085460ff1615610689576040517fa741a04500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556040513381527fae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce906020015b60405180910390a1565b60606106f66009612775565b905090565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16148061076857503373ffffffffffffffffffffffffffffffffffffffff8216145b61079e576040517fed6dd19b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff81166000908152600e602052604081206107cc91613576565b50565b600d5460009073ffffffffffffffffffffffffffffffffffffffff16610821576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610829612782565b8261085f576040517ec1cfc000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d546040805160808101825267ffffffffffffffff8816815233602082015263ffffffff8516818301523a606082015290517fa9d03c0500000000000000000000000000000000000000000000000000000000815260009273ffffffffffffffffffffffffffffffffffffffff169163a9d03c05916108e69189918991906004016141c4565b602060405180830381600087803b15801561090057600080fd5b505af1158015610914573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109389190613c29565b600d546040517fb2a489ff00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8916600482015291925082917fa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c91339132918b9173ffffffffffffffffffffffffffffffffffffffff9091169063b2a489ff9060240160206040518083038186803b1580156109d857600080fd5b505afa1580156109ec573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a10919061386c565b8a8a604051610a2496959493929190613fa5565b60405180910390a295945050505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314610a8c576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80610ac3576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b8181101561056557610b00838383818110610ae357610ae3614616565b9050602002016020810190610af8919061384f565b6009906127c1565b5080610b0b81614521565b915050610ac6565b60608060003073ffffffffffffffffffffffffffffffffffffffff1663814118346040518163ffffffff1660e01b815260040160006040518083038186803b158015610b5e57600080fd5b505afa158015610b72573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610bb89190810190613920565b90506000815167ffffffffffffffff811115610bd657610bd6614645565b604051908082528060200260200182016040528015610c0957816020015b6060815260200190600190039081610bf45790505b50905060005b8251811015610d2957600e6000848381518110610c2e57610c2e614616565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208054610c7b906144cd565b80601f0160208091040260200160405190810160405280929190818152602001828054610ca7906144cd565b8015610cf45780601f10610cc957610100808354040283529160200191610cf4565b820191906000526020600020905b815481529060010190602001808311610cd757829003601f168201915b5050505050828281518110610d0b57610d0b614616565b60200260200101819052508080610d2190614521565b915050610c0f565b5090939092509050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610d84576040517f0f22ca5f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805433620100008181027fffffffffffffffffffff0000000000000000000000000000000000000000ffff8416178455600180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905560405173ffffffffffffffffffffffffffffffffffffffff919093041692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610e2e61271c565b80610e65576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105f0600c83836134d4565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161480610eee5750610ecd336127e3565b8015610eee57503373ffffffffffffffffffffffffffffffffffffffff8416145b610f24576040517fed6dd19b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152600e60205260409020610f549083836134d4565b50505050565b600054610100900460ff1615808015610f7a5750600054600160ff909116105b80610f945750303b158015610f94575060005460ff166001145b611025576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561108357600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b61108d6001612907565b61109760016129f3565b80156107cc57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150565b6060600780548060200260200160405190810160405280929190818152602001828054801561116157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611136575b5050505050905090565b6060600f805461117a906144cd565b80601f01602080910402602001604051908101604052809291908181526020018280546111a6906144cd565b80156111615780601f106111c857610100808354040283529160200191611161565b820191906000526020600020905b8154815290600101906020018083116111d657509395945050505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff16331461124b576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60085460ff16611287576040517fa741a04500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556040513381527fea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a906020016106e0565b6112e761271c565b73ffffffffffffffffffffffffffffffffffffffff8116611334576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60005a604080516020601f8b018190048102820181019092528981529192508a3591818c0135916113d191849163ffffffff851691908e908e9081908401838280828437600092019190915250612abb92505050565b611407576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805183815262ffffff600884901c1660208201527fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16040805160608101825260025480825260035460ff808216602085015261010090910416928201929092529083146114dc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f636f6e666967446967657374206d69736d617463680000000000000000000000604482015260640161101c565b6114ea8b8b8b8b8b8b612ac4565b60015460009074010000000000000000000000000000000000000000900460ff161561154557600282602001518360400151611526919061441e565b6115309190614457565b61153b90600161441e565b60ff16905061155b565b602082015161155590600161441e565b60ff1690505b8881146115c4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e617475726573000000000000604482015260640161101c565b88871461162d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f7369676e617475726573206f7574206f6620726567697374726174696f6e0000604482015260640161101c565b3360009081526005602090815260408083208151808301909252805460ff80821684529293919291840191610100909104166002811115611670576116706145b8565b6002811115611681576116816145b8565b905250905060028160200151600281111561169e5761169e6145b8565b1480156116e557506007816000015160ff16815481106116c0576116c0614616565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff1633145b61174b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d69747465720000000000000000604482015260640161101c565b50505050506117586135b0565b6000808a8a60405161176b929190613f95565b604051908190038120611782918e90602001613f79565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b89811015611a8c5760006001848984602081106117eb576117eb614616565b6117f891901a601b61441e565b8e8e8681811061180a5761180a614616565b905060200201358d8d8781811061182357611823614616565b9050602002013560405160008152602001604052604051611860949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611882573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526005602090815290849020838501909452835460ff80821685529296509294508401916101009004166002811115611902576119026145b8565b6002811115611913576119136145b8565b9052509250600183602001516002811115611930576119306145b8565b14611997576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e0000604482015260640161101c565b8251600090879060ff16601f81106119b1576119b1614616565b602002015173ffffffffffffffffffffffffffffffffffffffff1614611a33576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e6174757265000000000000000000000000604482015260640161101c565b8086846000015160ff16601f8110611a4d57611a4d614616565b73ffffffffffffffffffffffffffffffffffffffff9092166020929092020152611a7860018661441e565b94505080611a8590614521565b90506117cc565b505050611a9d833383858e8e612b7b565b5050505050505050505050565b600d5460009073ffffffffffffffffffffffffffffffffffffffff16611afc576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160808101825267ffffffffffffffff8816815233602082015263ffffffff85168183015260608101849052600d5491517ff1e14a210000000000000000000000000000000000000000000000000000000081529091600091829173ffffffffffffffffffffffffffffffffffffffff169063f1e14a2190611b89908b908b9088906004016141c4565b60206040518083038186803b158015611ba157600080fd5b505afa158015611bb5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611bd99190613e46565b600d546040517fa1a6d04100000000000000000000000000000000000000000000000000000000815263ffffffff89166004820152602481018890526bffffffffffffffffffffffff80861660448301528316606482015291925073ffffffffffffffffffffffffffffffffffffffff169063a1a6d0419060840160206040518083038186803b158015611c6c57600080fd5b505afa158015611c80573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ca49190613e46565b9998505050505050505050565b6060600c805461117a906144cd565b855185518560ff16601f831115611d33576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e65727300000000000000000000000000000000604482015260640161101c565b60008111611d9d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f7369746976650000000000000000000000000000604482015260640161101c565b818314611e2b576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e00000000000000000000000000000000000000000000000000000000606482015260840161101c565b611e36816003614479565b8311611e9e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f20686967680000000000000000604482015260640161101c565b611ea661271c565b6040805160c0810182528a8152602081018a905260ff8916918101919091526060810187905267ffffffffffffffff8616608082015260a081018590525b6006541561209957600654600090611efe906001906144b6565b9050600060068281548110611f1557611f15614616565b60009182526020822001546007805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110611f4f57611f4f614616565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526005909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600680549192509080611fcf57611fcf6145e7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556007805480612038576120386145e7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611ee4915050565b60005b8151518110156124fe57600060056000846000015184815181106120c2576120c2614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff16600281111561210c5761210c6145b8565b14612173576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e65722061646472657373000000000000000000604482015260640161101c565b6040805180820190915260ff821681526001602082015282518051600591600091859081106121a4576121a4614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115612245576122456145b8565b0217905550600091506122559050565b600560008460200151848151811061226f5761226f614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff1660028111156122b9576122b96145b8565b14612320576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d6974746572206164647265737300000000604482015260640161101c565b6040805180820190915260ff82168152602081016002815250600560008460200151848151811061235357612353614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000016176101008360028111156123f4576123f46145b8565b02179055505082518051600692508390811061241257612412614616565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909316929092179091558201518051600791908390811061248e5761248e614616565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790556124f781614521565b905061209c565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600480547fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff811664010000000063ffffffff4381168202928317855590830481169360019390926000926125909286929082169116176143f6565b92506101000a81548163ffffffff021916908363ffffffff1602179055506125ef4630600460009054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a0015161304f565b6002819055825180516003805460ff909216610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff90921691909117905560045460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e059861268e988b98919763ffffffff9092169690959194919391926142e5565b60405180910390a1611a9d565b6126a361271c565b6107cc816130fa565b60006126ba60085460ff1690565b6126c657506001919050565b6126d16009836131c7565b92915050565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b60006127158373ffffffffffffffffffffffffffffffffffffffff84166131f6565b9392505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314612773576040517f2b5c74de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60606000612715836132e9565b61278b326126ac565b612773576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006127158373ffffffffffffffffffffffffffffffffffffffff8416613345565b6000803073ffffffffffffffffffffffffffffffffffffffff1663814118346040518163ffffffff1660e01b815260040160006040518083038186803b15801561282c57600080fd5b505afa158015612840573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526128869190810190613920565b905060005b81518110156128fd578373ffffffffffffffffffffffffffffffffffffffff168282815181106128bd576128bd614616565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1614156128eb575060019392505050565b806128f581614521565b91505061288b565b5060009392505050565b600054610100900460ff1661299e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b6129a9336000613394565b6001805491151574010000000000000000000000000000000000000000027fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff909216919091179055565b600054610100900460ff16612a8a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b60019392505050565b6000612ad1826020614479565b612adc856020614479565b612ae8886101446143de565b612af291906143de565b612afc91906143de565b612b079060006143de565b9050368114612b72576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d617463680000000000000000604482015260640161101c565b50505050505050565b600d5473ffffffffffffffffffffffffffffffffffffffff16612bca576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608080612bda84860186613b45565b825192955090935091501580612bf257508151835114155b80612bff57508051835114155b15612c36576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083515a612c45908c6144b6565b612c4f9190614443565b905060005b8451811015611a9d57600d54855173ffffffffffffffffffffffffffffffffffffffff90911690630739e4f190879084908110612c9357612c93614616565b6020026020010151868481518110612cad57612cad614616565b6020026020010151868581518110612cc757612cc7614616565b60200260200101518e8d8f895a6040518963ffffffff1660e01b8152600401612cf7989796959493929190614120565b602060405180830381600087803b158015612d1157600080fd5b505af1925050508015612d5f575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252612d5c91810190613d53565b60015b612e57573d808015612d8d576040519150601f19603f3d011682016040523d82523d6000602084013e612d92565b606091505b50858281518110612da557612da5614616565b60200260200101517fe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb282604051612ddc919061422d565b60405180910390a2858281518110612df657612df6614616565b60200260200101517fdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a68c604051612e49919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180910390a250613039565b6000816002811115612e6b57612e6b6145b8565b1415612f2957858281518110612e8357612e83614616565b60200260200101517f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6460405160405180910390a2858281518110612ec957612ec9614616565b60200260200101517fdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a68c604051612f1c919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180910390a2613037565b6001816002811115612f3d57612f3d6145b8565b1415612fd657858281518110612f5557612f55614616565b60200260200101517fb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c604051612fbc9060208082526011908201527f6572726f7220696e2063616c6c6261636b000000000000000000000000000000604082015260600190565b60405180910390a2858281518110612ec957612ec9614616565b6002816002811115612fea57612fea6145b8565b14156130375785828151811061300257613002614616565b60200260200101517fa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be60405160405180910390a25b505b8061304381614521565b915050612c54565b5050565b6000808a8a8a8a8a8a8a8a8a60405160200161307399989796959493929190614240565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff811633141561314a576040517f282010c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001830160205260408120541515612715565b600081815260018301602052604081205480156132df57600061321a6001836144b6565b855490915060009061322e906001906144b6565b905081811461329357600086600001828154811061324e5761324e614616565b906000526020600020015490508087600001848154811061327157613271614616565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806132a4576132a46145e7565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506126d1565b60009150506126d1565b60608160000180548060200260200160405190810160405280929190818152602001828054801561333957602002820191906000526020600020905b815481526020019060010190808311613325575b50505050509050919050565b600081815260018301602052604081205461338c575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556126d1565b5060006126d1565b600054610100900460ff1661342b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b73ffffffffffffffffffffffffffffffffffffffff8216613478576040517fb6b515fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff80851662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff9092169190911790915581161561304b5761304b816130fa565b8280546134e0906144cd565b90600052602060002090601f0160209004810192826135025760008555613566565b82601f10613539578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555613566565b82800160010185558215613566579182015b8281111561356657823582559160200191906001019061354b565b506135729291506135cf565b5090565b508054613582906144cd565b6000825580601f10613592575050565b601f0160209004906000526020600020908101906107cc91906135cf565b604051806103e00160405280601f906020820280368337509192915050565b5b8082111561357257600081556001016135d0565b60008083601f8401126135f657600080fd5b50813567ffffffffffffffff81111561360e57600080fd5b6020830191508360208260051b850101111561362957600080fd5b9250929050565b600082601f83011261364157600080fd5b81356020613656613651836143ba565b61436b565b80838252828201915082860187848660051b890101111561367657600080fd5b60005b8581101561369e57813561368c81614674565b84529284019290840190600101613679565b5090979650505050505050565b600082601f8301126136bc57600080fd5b813560206136cc613651836143ba565b80838252828201915082860187848660051b89010111156136ec57600080fd5b6000805b8681101561372f57823567ffffffffffffffff81111561370e578283fd5b61371c8b88838d010161377f565b86525093850193918501916001016136f0565b509198975050505050505050565b60008083601f84011261374f57600080fd5b50813567ffffffffffffffff81111561376757600080fd5b60208301915083602082850101111561362957600080fd5b600082601f83011261379057600080fd5b813567ffffffffffffffff8111156137aa576137aa614645565b6137db60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161436b565b8181528460208386010111156137f057600080fd5b816020850160208301376000918101602001919091529392505050565b803563ffffffff8116811461382157600080fd5b919050565b803567ffffffffffffffff8116811461382157600080fd5b803560ff8116811461382157600080fd5b60006020828403121561386157600080fd5b813561271581614674565b60006020828403121561387e57600080fd5b815161271581614674565b60008060006040848603121561389e57600080fd5b83356138a981614674565b9250602084013567ffffffffffffffff8111156138c557600080fd5b6138d18682870161373d565b9497909650939450505050565b600080602083850312156138f157600080fd5b823567ffffffffffffffff81111561390857600080fd5b613914858286016135e4565b90969095509350505050565b6000602080838503121561393357600080fd5b825167ffffffffffffffff81111561394a57600080fd5b8301601f8101851361395b57600080fd5b8051613969613651826143ba565b80828252848201915084840188868560051b870101111561398957600080fd5b600094505b838510156139b55780516139a181614674565b83526001949094019391850191850161398e565b50979650505050505050565b60008060008060008060c087890312156139da57600080fd5b863567ffffffffffffffff808211156139f257600080fd5b6139fe8a838b01613630565b97506020890135915080821115613a1457600080fd5b613a208a838b01613630565b9650613a2e60408a0161383e565b95506060890135915080821115613a4457600080fd5b613a508a838b0161377f565b9450613a5e60808a01613826565b935060a0890135915080821115613a7457600080fd5b50613a8189828a0161377f565b9150509295509295509295565b60008060008060008060008060e0898b031215613aaa57600080fd5b606089018a811115613abb57600080fd5b8998503567ffffffffffffffff80821115613ad557600080fd5b613ae18c838d0161373d565b909950975060808b0135915080821115613afa57600080fd5b613b068c838d016135e4565b909750955060a08b0135915080821115613b1f57600080fd5b50613b2c8b828c016135e4565b999c989b50969995989497949560c00135949350505050565b600080600060608486031215613b5a57600080fd5b833567ffffffffffffffff80821115613b7257600080fd5b818601915086601f830112613b8657600080fd5b81356020613b96613651836143ba565b8083825282820191508286018b848660051b8901011115613bb657600080fd5b600096505b84871015613bd9578035835260019690960195918301918301613bbb565b5097505087013592505080821115613bf057600080fd5b613bfc878388016136ab565b93506040860135915080821115613c1257600080fd5b50613c1f868287016136ab565b9150509250925092565b600060208284031215613c3b57600080fd5b5051919050565b60008060208385031215613c5557600080fd5b823567ffffffffffffffff811115613c6c57600080fd5b6139148582860161373d565b600080600083850360a0811215613c8e57600080fd5b843567ffffffffffffffff80821115613ca657600080fd5b613cb28883890161373d565b909650945084915060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe084011215613cea57600080fd5b60405192506080830191508282108183111715613d0957613d09614645565b50604052613d1960208601613826565b81526040850135613d2981614674565b6020820152613d3a6060860161380d565b6040820152608094909401356060850152509093909250565b600060208284031215613d6557600080fd5b81516003811061271557600080fd5b60008060008060608587031215613d8a57600080fd5b613d9385613826565b9350602085013567ffffffffffffffff811115613daf57600080fd5b613dbb8782880161373d565b9094509250613dce90506040860161380d565b905092959194509250565b600080600080600060808688031215613df157600080fd5b613dfa86613826565b9450602086013567ffffffffffffffff811115613e1657600080fd5b613e228882890161373d565b9095509350613e3590506040870161380d565b949793965091946060013592915050565b600060208284031215613e5857600080fd5b81516bffffffffffffffffffffffff8116811461271557600080fd5b600081518084526020808501945080840160005b83811015613eba57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613e88565b509495945050505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6000815180845260005b81811015613f3457602081850181015186830182015201613f18565b81811115613f46576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8281526060826020830137600060809190910190815292915050565b8183823760009101908152919050565b600073ffffffffffffffffffffffffffffffffffffffff8089168352808816602084015267ffffffffffffffff8716604084015280861660608401525060a06080830152613ff760a083018486613ec5565b98975050505050505050565b6040808252810183905260008460608301825b8681101561405357823561402981614674565b73ffffffffffffffffffffffffffffffffffffffff16825260209283019290910190600101614016565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b6020815260006127156020830184613e74565b6040815260006140a36040830185613e74565b6020838203818501528185518084528284019150828160051b85010183880160005b83811015614111577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08784030185526140ff838351613f0e565b948601949250908501906001016140c5565b50909998505050505050505050565b60006104c08a83526020818185015261413b8285018c613f0e565b9150838203604085015261414f828b613f0e565b925073ffffffffffffffffffffffffffffffffffffffff91508189166060850152608084018860005b601f811015614197578151851683529183019190830190600101614178565b50505050506141ac61046083018660ff169052565b6104808201939093526104a001529695505050505050565b60a0815260006141d860a083018587613ec5565b905067ffffffffffffffff835116602083015273ffffffffffffffffffffffffffffffffffffffff602084015116604083015263ffffffff604084015116606083015260608301516080830152949350505050565b6020815260006127156020830184613f0e565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b1660408501528160608501526142878285018b613e74565b9150838203608085015261429b828a613e74565b915060ff881660a085015283820360c08501526142b88288613f0e565b90861660e085015283810361010085015290506142d58185613f0e565b9c9b505050505050505050505050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526143158184018a613e74565b905082810360808401526143298189613e74565b905060ff871660a084015282810360c08401526143468187613f0e565b905067ffffffffffffffff851660e08401528281036101008401526142d58185613f0e565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156143b2576143b2614645565b604052919050565b600067ffffffffffffffff8211156143d4576143d4614645565b5060051b60200190565b600082198211156143f1576143f161455a565b500190565b600063ffffffff8083168185168083038211156144155761441561455a565b01949350505050565b600060ff821660ff84168060ff0382111561443b5761443b61455a565b019392505050565b60008261445257614452614589565b500490565b600060ff83168061446a5761446a614589565b8060ff84160491505092915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156144b1576144b161455a565b500290565b6000828210156144c8576144c861455a565b500390565b600181811c908216806144e157607f821691505b6020821081141561451b577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156145535761455361455a565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff811681146107cc57600080fdfea164736f6c6343000806000a496e697469616c697a61626c653a20636f6e7472616374206973206e6f742069", +} + +var OCR2DROracleABI = OCR2DROracleMetaData.ABI + +var OCR2DROracleBin = OCR2DROracleMetaData.Bin + +func DeployOCR2DROracle(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OCR2DROracle, error) { + parsed, err := OCR2DROracleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DROracleBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DROracle{address: address, abi: *parsed, OCR2DROracleCaller: OCR2DROracleCaller{contract: contract}, OCR2DROracleTransactor: OCR2DROracleTransactor{contract: contract}, OCR2DROracleFilterer: OCR2DROracleFilterer{contract: contract}}, nil +} + +type OCR2DROracle struct { + address common.Address + abi abi.ABI + OCR2DROracleCaller + OCR2DROracleTransactor + OCR2DROracleFilterer +} + +type OCR2DROracleCaller struct { + contract *bind.BoundContract +} + +type OCR2DROracleTransactor struct { + contract *bind.BoundContract +} + +type OCR2DROracleFilterer struct { + contract *bind.BoundContract +} + +type OCR2DROracleSession struct { + Contract *OCR2DROracle + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DROracleCallerSession struct { + Contract *OCR2DROracleCaller + CallOpts bind.CallOpts +} + +type OCR2DROracleTransactorSession struct { + Contract *OCR2DROracleTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DROracleRaw struct { + Contract *OCR2DROracle +} + +type OCR2DROracleCallerRaw struct { + Contract *OCR2DROracleCaller +} + +type OCR2DROracleTransactorRaw struct { + Contract *OCR2DROracleTransactor +} + +func NewOCR2DROracle(address common.Address, backend bind.ContractBackend) (*OCR2DROracle, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DROracleABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DROracle(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DROracle{address: address, abi: abi, OCR2DROracleCaller: OCR2DROracleCaller{contract: contract}, OCR2DROracleTransactor: OCR2DROracleTransactor{contract: contract}, OCR2DROracleFilterer: OCR2DROracleFilterer{contract: contract}}, nil +} + +func NewOCR2DROracleCaller(address common.Address, caller bind.ContractCaller) (*OCR2DROracleCaller, error) { + contract, err := bindOCR2DROracle(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DROracleCaller{contract: contract}, nil +} + +func NewOCR2DROracleTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DROracleTransactor, error) { + contract, err := bindOCR2DROracle(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DROracleTransactor{contract: contract}, nil +} + +func NewOCR2DROracleFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DROracleFilterer, error) { + contract, err := bindOCR2DROracle(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DROracleFilterer{contract: contract}, nil +} + +func bindOCR2DROracle(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DROracleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DROracle *OCR2DROracleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DROracle.Contract.OCR2DROracleCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.Contract.OCR2DROracleTransactor.contract.Transfer(opts) +} + +func (_OCR2DROracle *OCR2DROracleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DROracle.Contract.OCR2DROracleTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DROracle.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.Contract.contract.Transfer(opts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DROracle.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleCaller) AuthorizedReceiverActive(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "authorizedReceiverActive") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) AuthorizedReceiverActive() (bool, error) { + return _OCR2DROracle.Contract.AuthorizedReceiverActive(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) AuthorizedReceiverActive() (bool, error) { + return _OCR2DROracle.Contract.AuthorizedReceiverActive(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "estimateCost", subscriptionId, data, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) EstimateCost(subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DROracle.Contract.EstimateCost(&_OCR2DROracle.CallOpts, subscriptionId, data, gasLimit, gasPrice) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) EstimateCost(subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DROracle.Contract.EstimateCost(&_OCR2DROracle.CallOpts, subscriptionId, data, gasLimit, gasPrice) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetAllNodePublicKeys(opts *bind.CallOpts) ([]common.Address, [][]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getAllNodePublicKeys") + + if err != nil { + return *new([]common.Address), *new([][]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + out1 := *abi.ConvertType(out[1], new([][]byte)).(*[][]byte) + + return out0, out1, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetAllNodePublicKeys() ([]common.Address, [][]byte, error) { + return _OCR2DROracle.Contract.GetAllNodePublicKeys(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetAllNodePublicKeys() ([]common.Address, [][]byte, error) { + return _OCR2DROracle.Contract.GetAllNodePublicKeys(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DROracle.Contract.GetAuthorizedSenders(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DROracle.Contract.GetAuthorizedSenders(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetDONPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetDONPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetRegistry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getRegistry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetRegistry() (common.Address, error) { + return _OCR2DROracle.Contract.GetRegistry(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetRegistry() (common.Address, error) { + return _OCR2DROracle.Contract.GetRegistry(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getRequiredFee", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DROracle.Contract.GetRequiredFee(&_OCR2DROracle.CallOpts, arg0, arg1) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DROracle.Contract.GetRequiredFee(&_OCR2DROracle.CallOpts, arg0, arg1) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getThresholdPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetThresholdPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetThresholdPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetThresholdPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetThresholdPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DROracle.Contract.IsAuthorizedSender(&_OCR2DROracle.CallOpts, sender) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DROracle.Contract.IsAuthorizedSender(&_OCR2DROracle.CallOpts, sender) +} + +func (_OCR2DROracle *OCR2DROracleCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR2DROracle.Contract.LatestConfigDetails(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR2DROracle.Contract.LatestConfigDetails(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR2DROracle.Contract.LatestConfigDigestAndEpoch(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR2DROracle.Contract.LatestConfigDigestAndEpoch(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) Owner() (common.Address, error) { + return _OCR2DROracle.Contract.Owner(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) Owner() (common.Address, error) { + return _OCR2DROracle.Contract.Owner(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) Transmitters() ([]common.Address, error) { + return _OCR2DROracle.Contract.Transmitters(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) Transmitters() ([]common.Address, error) { + return _OCR2DROracle.Contract.Transmitters(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) TypeAndVersion() (string, error) { + return _OCR2DROracle.Contract.TypeAndVersion(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) TypeAndVersion() (string, error) { + return _OCR2DROracle.Contract.TypeAndVersion(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DROracle *OCR2DROracleSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DROracle.Contract.AcceptOwnership(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DROracle.Contract.AcceptOwnership(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) ActivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "activateAuthorizedReceiver") +} + +func (_OCR2DROracle *OCR2DROracleSession) ActivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.ActivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) ActivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.ActivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) AddAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "addAuthorizedSenders", senders) +} + +func (_OCR2DROracle *OCR2DROracleSession) AddAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.AddAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) AddAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.AddAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) DeactivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "deactivateAuthorizedReceiver") +} + +func (_OCR2DROracle *OCR2DROracleSession) DeactivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeactivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) DeactivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeactivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) DeleteNodePublicKey(opts *bind.TransactOpts, node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "deleteNodePublicKey", node) +} + +func (_OCR2DROracle *OCR2DROracleSession) DeleteNodePublicKey(node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeleteNodePublicKey(&_OCR2DROracle.TransactOpts, node) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) DeleteNodePublicKey(node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeleteNodePublicKey(&_OCR2DROracle.TransactOpts, node) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) Initialize(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "initialize") +} + +func (_OCR2DROracle *OCR2DROracleSession) Initialize() (*types.Transaction, error) { + return _OCR2DROracle.Contract.Initialize(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) Initialize() (*types.Transaction, error) { + return _OCR2DROracle.Contract.Initialize(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) RemoveAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "removeAuthorizedSenders", senders) +} + +func (_OCR2DROracle *OCR2DROracleSession) RemoveAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.RemoveAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) RemoveAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.RemoveAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "sendRequest", subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleSession) SendRequest(subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SendRequest(&_OCR2DROracle.TransactOpts, subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SendRequest(subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SendRequest(&_OCR2DROracle.TransactOpts, subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setConfig", _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetConfig(&_OCR2DROracle.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetConfig(&_OCR2DROracle.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setDONPublicKey", donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetDONPublicKey(&_OCR2DROracle.TransactOpts, donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetDONPublicKey(&_OCR2DROracle.TransactOpts, donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetNodePublicKey(opts *bind.TransactOpts, node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setNodePublicKey", node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetNodePublicKey(node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetNodePublicKey(&_OCR2DROracle.TransactOpts, node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetNodePublicKey(node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetNodePublicKey(&_OCR2DROracle.TransactOpts, node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetRegistry(opts *bind.TransactOpts, registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setRegistry", registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetRegistry(registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetRegistry(&_OCR2DROracle.TransactOpts, registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetRegistry(registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetRegistry(&_OCR2DROracle.TransactOpts, registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setThresholdPublicKey", thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetThresholdPublicKey(&_OCR2DROracle.TransactOpts, thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetThresholdPublicKey(&_OCR2DROracle.TransactOpts, thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DROracle *OCR2DROracleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.TransferOwnership(&_OCR2DROracle.TransactOpts, to) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.TransferOwnership(&_OCR2DROracle.TransactOpts, to) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_OCR2DROracle *OCR2DROracleSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.Transmit(&_OCR2DROracle.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.Transmit(&_OCR2DROracle.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +type OCR2DROracleAuthorizedSendersActiveIterator struct { + Event *OCR2DROracleAuthorizedSendersActive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersActive struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersActiveIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersActiveIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersActive", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersActive) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersActive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersActive(log types.Log) (*OCR2DROracleAuthorizedSendersActive, error) { + event := new(OCR2DROracleAuthorizedSendersActive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleAuthorizedSendersChangedIterator struct { + Event *OCR2DROracleAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersChangedIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersChanged) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersChanged(log types.Log) (*OCR2DROracleAuthorizedSendersChanged, error) { + event := new(OCR2DROracleAuthorizedSendersChanged) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleAuthorizedSendersDeactiveIterator struct { + Event *OCR2DROracleAuthorizedSendersDeactive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersDeactive struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersDeactiveIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersDeactiveIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersDeactive", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersDeactive) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersDeactive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersDeactive(log types.Log) (*OCR2DROracleAuthorizedSendersDeactive, error) { + event := new(OCR2DROracleAuthorizedSendersDeactive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleConfigSetIterator struct { + Event *OCR2DROracleConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleConfigSetIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OCR2DROracleConfigSetIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OCR2DROracleConfigSetIterator{contract: _OCR2DROracle.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DROracleConfigSet) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleConfigSet) + if err := _OCR2DROracle.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseConfigSet(log types.Log) (*OCR2DROracleConfigSet, error) { + event := new(OCR2DROracleConfigSet) + if err := _OCR2DROracle.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleInitializedIterator struct { + Event *OCR2DROracleInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleInitializedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleInitialized struct { + Version uint8 + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterInitialized(opts *bind.FilterOpts) (*OCR2DROracleInitializedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &OCR2DROracleInitializedIterator{contract: _OCR2DROracle.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInitialized) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleInitialized) + if err := _OCR2DROracle.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseInitialized(log types.Log) (*OCR2DROracleInitialized, error) { + event := new(OCR2DROracleInitialized) + if err := _OCR2DROracle.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleInvalidRequestIDIterator struct { + Event *OCR2DROracleInvalidRequestID + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleInvalidRequestID struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleInvalidRequestIDIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleInvalidRequestIDIterator{contract: _OCR2DROracle.contract, event: "InvalidRequestID", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInvalidRequestID, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleInvalidRequestID) + if err := _OCR2DROracle.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseInvalidRequestID(log types.Log) (*OCR2DROracleInvalidRequestID, error) { + event := new(OCR2DROracleInvalidRequestID) + if err := _OCR2DROracle.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOracleRequestIterator struct { + Event *OCR2DROracleOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOracleRequest struct { + RequestId [32]byte + RequestingContract common.Address + RequestInitiator common.Address + SubscriptionId uint64 + SubscriptionOwner common.Address + Data []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOracleRequestIterator{contract: _OCR2DROracle.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleRequest, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOracleRequest) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOracleRequest(log types.Log) (*OCR2DROracleOracleRequest, error) { + event := new(OCR2DROracleOracleRequest) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOracleResponseIterator struct { + Event *OCR2DROracleOracleResponse + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOracleResponseIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOracleResponseIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOracleResponseIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOracleResponse struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleResponseIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOracleResponseIterator{contract: _OCR2DROracle.contract, event: "OracleResponse", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleResponse, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOracleResponse) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOracleResponse(log types.Log) (*OCR2DROracleOracleResponse, error) { + event := new(OCR2DROracleOracleResponse) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOwnershipTransferRequestedIterator struct { + Event *OCR2DROracleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOwnershipTransferRequestedIterator{contract: _OCR2DROracle.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOwnershipTransferRequested) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DROracleOwnershipTransferRequested, error) { + event := new(OCR2DROracleOwnershipTransferRequested) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOwnershipTransferredIterator struct { + Event *OCR2DROracleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOwnershipTransferredIterator{contract: _OCR2DROracle.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOwnershipTransferred) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DROracleOwnershipTransferred, error) { + event := new(OCR2DROracleOwnershipTransferred) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleResponseTransmittedIterator struct { + Event *OCR2DROracleResponseTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleResponseTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleResponseTransmittedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleResponseTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleResponseTransmitted struct { + RequestId [32]byte + Transmitter common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleResponseTransmittedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleResponseTransmittedIterator{contract: _OCR2DROracle.contract, event: "ResponseTransmitted", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleResponseTransmitted, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleResponseTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseResponseTransmitted(log types.Log) (*OCR2DROracleResponseTransmitted, error) { + event := new(OCR2DROracleResponseTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleTransmittedIterator struct { + Event *OCR2DROracleTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleTransmittedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterTransmitted(opts *bind.FilterOpts) (*OCR2DROracleTransmittedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &OCR2DROracleTransmittedIterator{contract: _OCR2DROracle.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleTransmitted) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseTransmitted(log types.Log) (*OCR2DROracleTransmitted, error) { + event := new(OCR2DROracleTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleUserCallbackErrorIterator struct { + Event *OCR2DROracleUserCallbackError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleUserCallbackError struct { + RequestId [32]byte + Reason string + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleUserCallbackErrorIterator{contract: _OCR2DROracle.contract, event: "UserCallbackError", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleUserCallbackError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseUserCallbackError(log types.Log) (*OCR2DROracleUserCallbackError, error) { + event := new(OCR2DROracleUserCallbackError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleUserCallbackRawErrorIterator struct { + Event *OCR2DROracleUserCallbackRawError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleUserCallbackRawError struct { + RequestId [32]byte + LowLevelData []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackRawErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleUserCallbackRawErrorIterator{contract: _OCR2DROracle.contract, event: "UserCallbackRawError", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleUserCallbackRawError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseUserCallbackRawError(log types.Log) (*OCR2DROracleUserCallbackRawError, error) { + event := new(OCR2DROracleUserCallbackRawError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_OCR2DROracle *OCR2DROracle) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DROracle.abi.Events["AuthorizedSendersActive"].ID: + return _OCR2DROracle.ParseAuthorizedSendersActive(log) + case _OCR2DROracle.abi.Events["AuthorizedSendersChanged"].ID: + return _OCR2DROracle.ParseAuthorizedSendersChanged(log) + case _OCR2DROracle.abi.Events["AuthorizedSendersDeactive"].ID: + return _OCR2DROracle.ParseAuthorizedSendersDeactive(log) + case _OCR2DROracle.abi.Events["ConfigSet"].ID: + return _OCR2DROracle.ParseConfigSet(log) + case _OCR2DROracle.abi.Events["Initialized"].ID: + return _OCR2DROracle.ParseInitialized(log) + case _OCR2DROracle.abi.Events["InvalidRequestID"].ID: + return _OCR2DROracle.ParseInvalidRequestID(log) + case _OCR2DROracle.abi.Events["OracleRequest"].ID: + return _OCR2DROracle.ParseOracleRequest(log) + case _OCR2DROracle.abi.Events["OracleResponse"].ID: + return _OCR2DROracle.ParseOracleResponse(log) + case _OCR2DROracle.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DROracle.ParseOwnershipTransferRequested(log) + case _OCR2DROracle.abi.Events["OwnershipTransferred"].ID: + return _OCR2DROracle.ParseOwnershipTransferred(log) + case _OCR2DROracle.abi.Events["ResponseTransmitted"].ID: + return _OCR2DROracle.ParseResponseTransmitted(log) + case _OCR2DROracle.abi.Events["Transmitted"].ID: + return _OCR2DROracle.ParseTransmitted(log) + case _OCR2DROracle.abi.Events["UserCallbackError"].ID: + return _OCR2DROracle.ParseUserCallbackError(log) + case _OCR2DROracle.abi.Events["UserCallbackRawError"].ID: + return _OCR2DROracle.ParseUserCallbackRawError(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DROracleAuthorizedSendersActive) Topic() common.Hash { + return common.HexToHash("0xae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce") +} + +func (OCR2DROracleAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (OCR2DROracleAuthorizedSendersDeactive) Topic() common.Hash { + return common.HexToHash("0xea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a") +} + +func (OCR2DROracleConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (OCR2DROracleInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (OCR2DROracleInvalidRequestID) Topic() common.Hash { + return common.HexToHash("0xa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be") +} + +func (OCR2DROracleOracleRequest) Topic() common.Hash { + return common.HexToHash("0xa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c") +} + +func (OCR2DROracleOracleResponse) Topic() common.Hash { + return common.HexToHash("0x9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a64") +} + +func (OCR2DROracleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DROracleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DROracleResponseTransmitted) Topic() common.Hash { + return common.HexToHash("0xdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a6") +} + +func (OCR2DROracleTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (OCR2DROracleUserCallbackError) Topic() common.Hash { + return common.HexToHash("0xb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c") +} + +func (OCR2DROracleUserCallbackRawError) Topic() common.Hash { + return common.HexToHash("0xe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb2") +} + +func (_OCR2DROracle *OCR2DROracle) Address() common.Address { + return _OCR2DROracle.address +} + +type OCR2DROracleInterface interface { + AuthorizedReceiverActive(opts *bind.CallOpts) (bool, error) + + EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetAllNodePublicKeys(opts *bind.CallOpts) ([]common.Address, [][]byte, error) + + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + GetRegistry(opts *bind.CallOpts) (common.Address, error) + + GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) + + GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ActivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) + + AddAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + DeactivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) + + DeleteNodePublicKey(opts *bind.TransactOpts, node common.Address) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts) (*types.Transaction, error) + + RemoveAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) + + SetNodePublicKey(opts *bind.TransactOpts, node common.Address, publicKey []byte) (*types.Transaction, error) + + SetRegistry(opts *bind.TransactOpts, registryAddress common.Address) (*types.Transaction, error) + + SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersActiveIterator, error) + + WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersActive) (event.Subscription, error) + + ParseAuthorizedSendersActive(log types.Log) (*OCR2DROracleAuthorizedSendersActive, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*OCR2DROracleAuthorizedSendersChanged, error) + + FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersDeactiveIterator, error) + + WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersDeactive) (event.Subscription, error) + + ParseAuthorizedSendersDeactive(log types.Log) (*OCR2DROracleAuthorizedSendersDeactive, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OCR2DROracleConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DROracleConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OCR2DROracleConfigSet, error) + + FilterInitialized(opts *bind.FilterOpts) (*OCR2DROracleInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*OCR2DROracleInitialized, error) + + FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleInvalidRequestIDIterator, error) + + WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInvalidRequestID, requestId [][32]byte) (event.Subscription, error) + + ParseInvalidRequestID(log types.Log) (*OCR2DROracleInvalidRequestID, error) + + FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleRequest, requestId [][32]byte) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*OCR2DROracleOracleRequest, error) + + FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleResponseIterator, error) + + WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleResponse, requestId [][32]byte) (event.Subscription, error) + + ParseOracleResponse(log types.Log) (*OCR2DROracleOracleResponse, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DROracleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DROracleOwnershipTransferred, error) + + FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleResponseTransmittedIterator, error) + + WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleResponseTransmitted, requestId [][32]byte) (event.Subscription, error) + + ParseResponseTransmitted(log types.Log) (*OCR2DROracleResponseTransmitted, error) + + FilterTransmitted(opts *bind.FilterOpts) (*OCR2DROracleTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*OCR2DROracleTransmitted, error) + + FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackErrorIterator, error) + + WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackError(log types.Log) (*OCR2DROracleUserCallbackError, error) + + FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackRawErrorIterator, error) + + WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackRawError(log types.Log) (*OCR2DROracleUserCallbackRawError, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generated/ocr2dr_registry/ocr2dr_registry.go b/core/gethwrappers/functions/generated/ocr2dr_registry/ocr2dr_registry.go new file mode 100644 index 00000000..6db80961 --- /dev/null +++ b/core/gethwrappers/functions/generated/ocr2dr_registry/ocr2dr_registry.go @@ -0,0 +1,3350 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_registry + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsBillingRegistryCommitment struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int + Don common.Address + DonFee *big.Int + RegistryFee *big.Int + EstimatedCost *big.Int + Timestamp *big.Int +} + +type IFunctionsBillingRegistryRequestBilling struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int +} + +var OCR2DRRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotSelfTransfer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySendersList\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAllowedToSetSenders\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotProposedOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OwnerMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"signerPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"transmitterPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCost\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"BillingEnd\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"don\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"estimatedCost\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structFunctionsBillingRegistry.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"BillingStart\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"RequestTimedOut\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address[31]\",\"name\":\"signers\",\"type\":\"address[31]\"},{\"internalType\":\"uint8\",\"name\":\"signerCount\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"reportValidationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initialGas\",\"type\":\"uint256\"}],\"name\":\"fulfillAndBill\",\"outputs\":[{\"internalType\":\"enumIFunctionsBillingRegistry.FulfillResult\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"linkAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkPriceFeed\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentsubscriptionId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRequestConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"getRequiredFee\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getSubscriptionOwner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"billing\",\"type\":\"tuple\"}],\"name\":\"startBilling\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"requestIdsToTimeout\",\"type\":\"bytes32[]\"}],\"name\":\"timeoutRequests\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162005d3838038062005d3883398101604081905262000034916200040c565b620000418383836200004a565b50505062000456565b600054610100900460ff16158080156200006b5750600054600160ff909116105b806200009b57506200008830620001c960201b62003bd41760201c565b1580156200009b575060005460ff166001145b620001045760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201526d191e481a5b9a5d1a585b1a5e995960921b60648201526084015b60405180910390fd5b6000805460ff19166001179055801562000128576000805461ff0019166101001790555b62000132620001d8565b6200013f33600062000240565b606980546001600160a01b038087166001600160a01b031992831617909255606a8054868416908316179055606b8054928516929091169190911790558015620001c3576000805461ff0019169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50505050565b6001600160a01b03163b151590565b600054610100900460ff16620002345760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6200023e62000304565b565b600054610100900460ff166200029c5760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6001600160a01b038216620002c457604051635b5a8afd60e11b815260040160405180910390fd5b600080546001600160a01b03808516620100000262010000600160b01b031990921691909117909155811615620003005762000300816200036c565b5050565b600054610100900460ff16620003605760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6034805460ff19169055565b6001600160a01b038116331415620003975760405163282010c360e01b815260040160405180910390fd5b600180546001600160a01b0319166001600160a01b038381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200040757600080fd5b919050565b6000806000606084860312156200042257600080fd5b6200042d84620003ef565b92506200043d60208501620003ef565b91506200044d60408501620003ef565b90509250925092565b6158b280620004666000396000f3fe608060405234801561001057600080fd5b50600436106102255760003560e01c80638da5cb5b1161012a578063c0c53b8b116100bd578063e82ad7d41161008c578063f1e14a2111610071578063f1e14a2114610561578063f2fde38b14610578578063fa00763a1461058b57600080fd5b8063e82ad7d41461053b578063ee56997b1461054e57600080fd5b8063c0c53b8b1461048d578063c3f909d4146104a0578063d7ae1d3014610515578063e72f6e301461052857600080fd5b8063a47c7696116100f9578063a47c769614610432578063a4c0ed3614610454578063a9d03c0514610467578063b2a489ff1461047a57600080fd5b80638da5cb5b146103a25780639f87fad7146103e7578063a1a6d041146103fa578063a21a23e41461042a57600080fd5b80633f4ba83a116101bd578063665871ec1161018c57806379ba50971161017157806379ba50971461037f57806382359740146103875780638456cb591461039a57600080fd5b8063665871ec146103595780637341c10c1461036c57600080fd5b80633f4ba83a1461030c5780635c975abb1461031457806364d51a2a1461032b57806366316d8d1461034657600080fd5b806312b58349116101f957806312b58349146102915780632408afaa146102bd57806327923e41146102d257806333652e3e146102e557600080fd5b80620122911461022a57806302bcc5b61461024957806304c357cb1461025e5780630739e4f114610271575b600080fd5b61023261059e565b6040516102409291906155a1565b60405180910390f35b61025c6102573660046151d9565b6105bd565b005b61025c61026c3660046151f4565b61063a565b61028461027f366004614edf565b610835565b6040516102409190615470565b606f546801000000000000000090046bffffffffffffffffffffffff165b604051908152602001610240565b6102c5610ef7565b60405161024091906153a6565b61025c6102e0366004615173565b610f66565b606f5467ffffffffffffffff165b60405167ffffffffffffffff9091168152602001610240565b61025c6110b9565b60345460ff165b6040519015158152602001610240565b610333606481565b60405161ffff9091168152602001610240565b61025c610354366004614e44565b6110cb565b61025c610367366004614e7b565b611331565b61025c61037a3660046151f4565b6115fd565b61025c611890565b61025c6103953660046151d9565b611983565b61025c611d04565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610240565b61025c6103f53660046151f4565b611d14565b61040d610408366004615122565b61219b565b6040516bffffffffffffffffffffffff9091168152602001610240565b6102f36122bf565b6104456104403660046151d9565b612657565b604051610240939291906155c8565b61025c610462366004614dea565b612788565b6102af610475366004615009565b6129e1565b6103c26104883660046151d9565b6131dc565b61025c61049b366004614da7565b613275565b607354607454607254607554606954606a546040805163ffffffff808916825265010000000000909804881660208201529081019590955260608501939093529316608083015273ffffffffffffffffffffffffffffffffffffffff92831660a08301529190911660c082015260e001610240565b61025c6105233660046151f4565b613477565b61025c610536366004614d8c565b6135de565b61031b6105493660046151d9565b6137fb565b61025c61055c366004614e7b565b613a3a565b61040d61056f366004615086565b60009392505050565b61025c610586366004614d8c565b613bad565b61031b610599366004614d8c565b613bc1565b60735460009060609063ffffffff166105b5610ef7565b915091509091565b6105c5613bf0565b67ffffffffffffffff81166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff168061062c576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6106368282613c47565b5050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff16806106a3576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff82161461070f576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b607354640100000000900460ff1615610754576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61075c61404e565b67ffffffffffffffff84166000908152606d602052604090206001015473ffffffffffffffffffffffffffffffffffffffff84811691161461082f5767ffffffffffffffff84166000818152606d602090815260409182902060010180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88169081179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91015b60405180910390a25b50505050565b600061083f6140bb565b607354640100000000900460ff1615610884576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61088c61404e565b60008b815260716020908152604091829020825161012081018452815467ffffffffffffffff8116825268010000000000000000810473ffffffffffffffffffffffffffffffffffffffff908116948301949094527c0100000000000000000000000000000000000000000000000000000000900463ffffffff169381019390935260018101546060840152600281015491821660808401819052740100000000000000000000000000000000000000009092046bffffffffffffffffffffffff90811660a0850152600382015480821660c08601526c0100000000000000000000000090041660e084015260040154610100830152610990576002915050610ee9565b60008c81526071602052604080822082815560018101839055600281018390556003810180547fffffffffffffffff000000000000000000000000000000000000000000000000169055600401829055517f0ca761750000000000000000000000000000000000000000000000000000000090610a19908f908f908f908f908f906024016153b9565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152602080830180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090951694909417909352607380547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff1664010000000017905584015191840151909250600091610ae69163ffffffff90911690846140fa565b607380547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff16905560745460a085015160c0860151929350600092610b3292899290918c908c3a614146565b604080820151865167ffffffffffffffff166000908152606e60205291909120549192506bffffffffffffffffffffffff90811691161015610ba0576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080820151855167ffffffffffffffff166000908152606e602052918220805491929091610bde9084906bffffffffffffffffffffffff16615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060005b8860ff16811015610cd8578151607060008c84601f8110610c3257610c3261582d565b602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282829054906101000a90046bffffffffffffffffffffffff16610c979190615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508080610cd09061573f565b915050610c0f565b508360c0015160706000610d0860005473ffffffffffffffffffffffffffffffffffffffff620100009091041690565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160009081208054909190610d4e9084906bffffffffffffffffffffffff16615658565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560208381015173ffffffffffffffffffffffffffffffffffffffff8e166000908152607090925260408220805491945092610db091859116615658565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560e0860151865167ffffffffffffffff166000908152606e60205260409020805491935091600c91610e199185916c01000000000000000000000000900416615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508e7fc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f856000015183600001518460200151856040015187604051610ecb95949392919067ffffffffffffffff9590951685526bffffffffffffffffffffffff9384166020860152918316604085015290911660608301521515608082015260a00190565b60405180910390a281610edf576001610ee2565b60005b9450505050505b9a9950505050505050505050565b60606068805480602002602001604051908101604052809291908181526020018280548015610f5c57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610f31575b5050505050905090565b610f6e613bf0565b60008313610fab576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101849052602401610706565b6040805160c08101825263ffffffff888116808352600060208085019190915289831684860181905260608086018b9052888516608080880182905295891660a0978801819052607380547fffffffffffffffffffffffffffffffffffffffffffffff00000000000000000016871765010000000000860217905560748d9055607580547fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000016831764010000000090920291909117905560728b9055875194855292840191909152948201899052938101879052908101929092527f24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd4910160405180910390a1505050505050565b6110c1613bf0565b6110c96142c6565b565b607354640100000000900460ff1615611110576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61111861404e565b6bffffffffffffffffffffffff811661114b5750336000908152607060205260409020546bffffffffffffffffffffffff165b336000908152607060205260409020546bffffffffffffffffffffffff808316911610156111a5576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260706020526040812080548392906111d29084906bffffffffffffffffffffffff16615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080606f60088282829054906101000a90046bffffffffffffffffffffffff166112299190615712565b82546101009290920a6bffffffffffffffffffffffff8181021990931691831602179091556069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff868116600483015292851660248201529116915063a9059cbb90604401602060405180830381600087803b1580156112c357600080fd5b505af11580156112d7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112fb9190614ebd565b610636576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61133961404e565b60005b818110156115f85760008383838181106113585761135861582d565b602090810292909201356000818152607184526040808220815161012081018352815467ffffffffffffffff811680835268010000000000000000820473ffffffffffffffffffffffffffffffffffffffff908116848b01527c010000000000000000000000000000000000000000000000000000000090920463ffffffff168386015260018401546060840152600284015480831660808501527401000000000000000000000000000000000000000090046bffffffffffffffffffffffff90811660a0850152600385015480821660c08601526c0100000000000000000000000090041660e0840152600490930154610100830152918452606d90965291205491945016331490506114cd57805167ffffffffffffffff166000908152606d6020526040908190205490517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610706565b60755461010082015142916114f19164010000000090910463ffffffff1690615614565b11156115e35760e0810151815167ffffffffffffffff166000908152606e602052604090208054600c906115449084906c0100000000000000000000000090046bffffffffffffffffffffffff16615712565b82546bffffffffffffffffffffffff9182166101009390930a92830291909202199091161790555060008281526071602052604080822082815560018101839055600281018390556003810180547fffffffffffffffff0000000000000000000000000000000000000000000000001690556004018290555183917ff1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af41491a25b505080806115f09061573f565b91505061133c565b505050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611666576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff8216146116cd576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff1615611712576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61171a61404e565b67ffffffffffffffff84166000908152606d602052604090206002015460641415611771576040517f05a48e0f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152606c6020908152604080832067ffffffffffffffff808916855292529091205416156117b85761082f565b73ffffffffffffffffffffffffffffffffffffffff83166000818152606c6020908152604080832067ffffffffffffffff891680855290835281842080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001908117909155606d84528285206002018054918201815585529383902090930180547fffffffffffffffffffffffff000000000000000000000000000000000000000016851790555192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e09101610826565b60015473ffffffffffffffffffffffffffffffffffffffff1633146118e1576040517f0f22ca5f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805433620100008181027fffffffffffffffffffff0000000000000000000000000000000000000000ffff8416178455600180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905560405173ffffffffffffffffffffffffffffffffffffffff919093041692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b607354640100000000900460ff16156119c8576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6119d061404e565b606b60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634b4fa0c16040518163ffffffff1660e01b815260040160206040518083038186803b158015611a3857600080fd5b505afa158015611a4c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a709190614ebd565b8015611b1a5750606b546040517ffa00763a00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff9091169063fa00763a9060240160206040518083038186803b158015611ae057600080fd5b505afa158015611af4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b189190614ebd565b155b15611b51576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff16611bb7576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606d602052604090206001015473ffffffffffffffffffffffffffffffffffffffff163314611c595767ffffffffffffffff81166000908152606d6020526040908190206001015490517fd084e97500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610706565b67ffffffffffffffff81166000818152606d60209081526040918290208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560019093018054909316909255835173ffffffffffffffffffffffffffffffffffffffff909116808252928101919091529092917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0910160405180910390a25050565b611d0c613bf0565b6110c9614343565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611d7d576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614611de4576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff1615611e29576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611e3161404e565b73ffffffffffffffffffffffffffffffffffffffff83166000908152606c6020908152604080832067ffffffffffffffff808916855292529091205416611ecc576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8516600482015273ffffffffffffffffffffffffffffffffffffffff84166024820152604401610706565b67ffffffffffffffff84166000908152606d6020908152604080832060020180548251818502810185019093528083529192909190830182828015611f4757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611f1c575b50505050509050600060018251611f5e91906156fb565b905060005b82518110156120fd578573ffffffffffffffffffffffffffffffffffffffff16838281518110611f9557611f9561582d565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1614156120eb576000838381518110611fcd57611fcd61582d565b6020026020010151905080606d60008a67ffffffffffffffff1667ffffffffffffffff16815260200190815260200160002060020183815481106120135761201361582d565b600091825260208083209190910180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff949094169390931790925567ffffffffffffffff8a168152606d9091526040902060020180548061208d5761208d6157fe565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055506120fd565b806120f58161573f565b915050611f63565b5073ffffffffffffffffffffffffffffffffffffffff85166000818152606c6020908152604080832067ffffffffffffffff8b168085529083529281902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b91015b60405180910390a2505050505050565b6000806121a661439e565b9050600081136121e5576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101829052602401610706565b60745460755460009163ffffffff808a1692612202929116615614565b61220c9190615614565b90506000828261222489670de0b6b3a76400006156be565b61222e91906156be565b612238919061567f565b905060006122576bffffffffffffffffffffffff808816908916615614565b905061226f816b033b2e3c9fd0803ce80000006156fb565b8211156122a8576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6122b28183615614565b9998505050505050505050565b607354600090640100000000900460ff1615612307576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61230f61404e565b606b60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634b4fa0c16040518163ffffffff1660e01b815260040160206040518083038186803b15801561237757600080fd5b505afa15801561238b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123af9190614ebd565b80156124595750606b546040517ffa00763a00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff9091169063fa00763a9060240160206040518083038186803b15801561241f57600080fd5b505afa158015612433573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906124579190614ebd565b155b15612490576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606f805467ffffffffffffffff169060006124aa83615778565b82546101009290920a67ffffffffffffffff818102199093169183160217909155606f541690506000806040519080825280602002602001820160405280156124fd578160200160208202803683370190505b506040805180820182526000808252602080830182815267ffffffffffffffff8816808452606e83528584209451855492516bffffffffffffffffffffffff9081166c01000000000000000000000000027fffffffffffffffff000000000000000000000000000000000000000000000000909416911617919091179093558351606081018552338152808201838152818601878152948452606d8352949092208251815473ffffffffffffffffffffffffffffffffffffffff9182167fffffffffffffffffffffffff000000000000000000000000000000000000000091821617835595516001830180549190921696169590951790945591518051949550909361260f9260028501920190614ad8565b505060405133815267ffffffffffffffff841691507f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a250905090565b67ffffffffffffffff81166000908152606d6020526040812054819060609073ffffffffffffffffffffffffffffffffffffffff166126c2576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff84166000908152606e6020908152604080832054606d8352928190208054600290910180548351818602810186019094528084526bffffffffffffffffffffffff9095169473ffffffffffffffffffffffffffffffffffffffff90921693909291839183018282801561277457602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612749575b505050505090509250925092509193909250565b607354640100000000900460ff16156127cd576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6127d561404e565b60695473ffffffffffffffffffffffffffffffffffffffff163314612826576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114612860576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061286e828401846151d9565b67ffffffffffffffff81166000908152606d602052604090205490915073ffffffffffffffffffffffffffffffffffffffff166128d7576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606e6020526040812080546bffffffffffffffffffffffff169186919061290e8385615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555084606f60088282829054906101000a90046bffffffffffffffffffffffff166129659190615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f88287846129cc9190615614565b6040805192835260208301919091520161218b565b60006129eb6140bb565b607354640100000000900460ff1615612a30576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612a3861404e565b6000606d81612a4a60208601866151d9565b67ffffffffffffffff16815260208101919091526040016000205473ffffffffffffffffffffffffffffffffffffffff161415612ab3576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000606c81612ac86040860160208701614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602080820192909252604001600090812091612afe908601866151d9565b67ffffffffffffffff908116825260208201929092526040016000205416905080612b9a57612b3060208401846151d9565b612b406040850160208601614d8c565b6040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff909216600483015273ffffffffffffffffffffffffffffffffffffffff166024820152604401610706565b60735463ffffffff16612bb36060850160408601615107565b63ffffffff161115612c1457612bcf6060840160408501615107565b6073546040517ff5d7e01e00000000000000000000000000000000000000000000000000000000815263ffffffff928316600482015291166024820152604401610706565b6040517ff1e14a21000000000000000000000000000000000000000000000000000000008152600090339063f1e14a2190612c57908990899089906004016153f2565b60206040518083038186803b158015612c6f57600080fd5b505afa158015612c83573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612ca79190615277565b90506000612cbf878761056f368990038901896150d2565b90506000612ce2612cd66060880160408901615107565b8760600135858561219b565b90506000606e81612cf660208a018a6151d9565b67ffffffffffffffff1681526020808201929092526040016000908120546c0100000000000000000000000090046bffffffffffffffffffffffff1691606e9190612d43908b018b6151d9565b67ffffffffffffffff168152602081019190915260400160002054612d7691906bffffffffffffffffffffffff16615712565b9050816bffffffffffffffffffffffff16816bffffffffffffffffffffffff161015612dce576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612ddb86600161562c565b90506000612e6333612df360408c0160208d01614d8c565b612e0060208d018d6151d9565b856040805173ffffffffffffffffffffffffffffffffffffffff958616602080830191909152949095168582015267ffffffffffffffff928316606086015291166080808501919091528151808503909101815260a09093019052815191012090565b60408051610120810190915290915060009080612e8360208d018d6151d9565b67ffffffffffffffff1681526020018b6020016020810190612ea59190614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602001612ed060608d0160408e01615107565b63ffffffff90811682526060808e0135602080850191909152336040808601919091526bffffffffffffffffffffffff808e16848701528c81166080808801919091528c821660a0808901919091524260c09889015260008b8152607186528481208a5181548c890151978d0151909a167c0100000000000000000000000000000000000000000000000000000000027bffffffffffffffffffffffffffffffffffffffffffffffffffffffff73ffffffffffffffffffffffffffffffffffffffff98891668010000000000000000027fffffffff00000000000000000000000000000000000000000000000000000000909c1667ffffffffffffffff909316929092179a909a171698909817885595890151600188015590880151908801518216740100000000000000000000000000000000000000000292169190911760028501559385015160038401805460e088015187166c01000000000000000000000000027fffffffffffffffff0000000000000000000000000000000000000000000000009091169290961691909117949094179093556101008401516004909201919091559192508691606e9161308a908e018e6151d9565b67ffffffffffffffff16815260208101919091526040016000208054600c906130d29084906c0100000000000000000000000090046bffffffffffffffffffffffff16615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550817f99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe48260405161313091906154b1565b60405180910390a282606c600061314d60408e0160208f01614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602080820192909252604001600090812091613183908e018e6151d9565b67ffffffffffffffff9081168252602082019290925260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001692909116919091179055509a9950505050505050505050565b67ffffffffffffffff81166000908152606d602052604081205473ffffffffffffffffffffffffffffffffffffffff16613242576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5067ffffffffffffffff166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b600054610100900460ff16158080156132955750600054600160ff909116105b806132af5750303b1580156132af575060005460ff166001145b61333b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a65640000000000000000000000000000000000006064820152608401610706565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561339957600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6133a1614491565b6133ac336000614530565b6069805473ffffffffffffffffffffffffffffffffffffffff8087167fffffffffffffffffffffffff000000000000000000000000000000000000000092831617909255606a8054868416908316179055606b805492851692909116919091179055801561082f57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498906020015b60405180910390a150505050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff16806134e0576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614613547576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff161561358c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61359461404e565b61359d846137fb565b156135d4576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61082f8484613c47565b6135e6613bf0565b6069546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009173ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b15801561365057600080fd5b505afa158015613664573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061368891906150ee565b606f549091506801000000000000000090046bffffffffffffffffffffffff16818111156136ec576040517fa99da3020000000000000000000000000000000000000000000000000000000081526004810182905260248101839052604401610706565b818110156115f857600061370082846156fb565b6069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87811660048301526024820184905292935091169063a9059cbb90604401602060405180830381600087803b15801561377657600080fd5b505af115801561378a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906137ae9190614ebd565b506040805173ffffffffffffffffffffffffffffffffffffffff86168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b4366009101613469565b67ffffffffffffffff81166000908152606d602090815260408083206002018054825181850281018501909352808352849383018282801561387357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613848575b505050505090506000613884610ef7565b905060005b8251811015613a2f5760005b8251811015613a1c5760006139cc8483815181106138b5576138b561582d565b60200260200101518685815181106138cf576138cf61582d565b602002602001015189606c60008a89815181106138ee576138ee61582d565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008c67ffffffffffffffff1667ffffffffffffffff16815260200190815260200160002060009054906101000a900467ffffffffffffffff166040805173ffffffffffffffffffffffffffffffffffffffff958616602080830191909152949095168582015267ffffffffffffffff928316606086015291166080808501919091528151808503909101815260a09093019052815191012090565b60008181526071602052604090206002015490915073ffffffffffffffffffffffffffffffffffffffff1615613a09575060019695505050505050565b5080613a148161573f565b915050613895565b5080613a278161573f565b915050613889565b506000949350505050565b613a42614670565b613a78576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80613aaf576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b606854811015613b0f57613afc60688281548110613ad257613ad261582d565b60009182526020909120015460669073ffffffffffffffffffffffffffffffffffffffff16614680565b5080613b078161573f565b915050613ab2565b5060005b81811015613b6057613b4d838383818110613b3057613b3061582d565b9050602002016020810190613b459190614d8c565b6066906146a9565b5080613b588161573f565b915050613b13565b50613b6d60688383614b5e565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0828233604051613ba19392919061532e565b60405180910390a15050565b613bb5613bf0565b613bbe816146cb565b50565b6000613bce606683614798565b92915050565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1633146110c9576040517f2b5c74de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b607354640100000000900460ff1615613c8c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff82166000908152606d602090815260408083208151606081018352815473ffffffffffffffffffffffffffffffffffffffff908116825260018301541681850152600282018054845181870281018701865281815292959394860193830182828015613d3757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613d0c575b5050509190925250505067ffffffffffffffff84166000908152606e60205260408120549192506bffffffffffffffffffffffff909116905b826040015151811015613e1657606c600084604001518381518110613d9757613d9761582d565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600090812067ffffffffffffffff89168252909252902080547fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000016905580613e0e8161573f565b915050613d70565b5067ffffffffffffffff84166000908152606d6020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000009081168255600182018054909116905590613e716002830182614bd6565b505067ffffffffffffffff84166000908152606e6020526040902080547fffffffffffffffff000000000000000000000000000000000000000000000000169055606f8054829190600890613ee19084906801000000000000000090046bffffffffffffffffffffffff16615712565b82546101009290920a6bffffffffffffffffffffffff8181021990931691831602179091556069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff878116600483015292851660248201529116915063a9059cbb90604401602060405180830381600087803b158015613f7b57600080fd5b505af1158015613f8f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613fb39190614ebd565b613fe9576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805173ffffffffffffffffffffffffffffffffffffffff851681526bffffffffffffffffffffffff8316602082015267ffffffffffffffff8616917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd498159101610826565b60345460ff16156110c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a20706175736564000000000000000000000000000000006044820152606401610706565b6140c433613bc1565b6110c9576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a61138881101561410c57600080fd5b61138881039050846040820482031161412457600080fd5b50823b61413057600080fd5b60008083516020850160008789f1949350505050565b604080516060810182526000808252602082018190529181018290529061416b61439e565b9050600081136141aa576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101829052602401610706565b6000815a8b6141b98c89615614565b6141c39190615614565b6141cd91906156fb565b6141df86670de0b6b3a76400006156be565b6141e991906156be565b6141f3919061567f565b905060006142126bffffffffffffffffffffffff808916908b16615614565b905061422a816b033b2e3c9fd0803ce80000006156fb565b821115614263576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061427260ff8a168b615693565b90508260006142896142848584615614565b6147c7565b604080516060810182526bffffffffffffffffffffffff958616815293851660208501529316928201929092529c9b505050505050505050505050565b6142ce614869565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390a1565b61434b61404e565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586143193390565b607354606a54604080517ffeaf968c000000000000000000000000000000000000000000000000000000008152905160009365010000000000900463ffffffff1692831515928592839273ffffffffffffffffffffffffffffffffffffffff169163feaf968c9160048083019260a0929190829003018186803b15801561442457600080fd5b505afa158015614438573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061445c9190615227565b5093505092505082801561447e575061447581426156fb565b8463ffffffff16105b156144895760725491505b509392505050565b600054610100900460ff16614528576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b6110c96148d5565b600054610100900460ff166145c7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b73ffffffffffffffffffffffffffffffffffffffff8216614614576040517fb6b515fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff80851662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff9092169190911790915581161561063657610636816146cb565b600061467a613bf0565b50600190565b60006146a28373ffffffffffffffffffffffffffffffffffffffff8416614996565b9392505050565b60006146a28373ffffffffffffffffffffffffffffffffffffffff8416614a89565b73ffffffffffffffffffffffffffffffffffffffff811633141561471b576040517f282010c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260018301602052604081205415156146a2565b60006bffffffffffffffffffffffff821115614865576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f36206269747300000000000000000000000000000000000000000000000000006064820152608401610706565b5090565b60345460ff166110c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f74207061757365640000000000000000000000006044820152606401610706565b600054610100900460ff1661496c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055565b60008181526001830160205260408120548015614a7f5760006149ba6001836156fb565b85549091506000906149ce906001906156fb565b9050818114614a335760008660000182815481106149ee576149ee61582d565b9060005260206000200154905080876000018481548110614a1157614a1161582d565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080614a4457614a446157fe565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050613bce565b6000915050613bce565b6000818152600183016020526040812054614ad057508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155613bce565b506000613bce565b828054828255906000526020600020908101928215614b52579160200282015b82811115614b5257825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190614af8565b50614865929150614bf0565b828054828255906000526020600020908101928215614b52579160200282015b82811115614b525781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614b7e565b5080546000825590600052602060002090810190613bbe91905b5b808211156148655760008155600101614bf1565b803573ffffffffffffffffffffffffffffffffffffffff81168114614c2957600080fd5b919050565b60008083601f840112614c4057600080fd5b50813567ffffffffffffffff811115614c5857600080fd5b6020830191508360208260051b8501011115614c7357600080fd5b9250929050565b60008083601f840112614c8c57600080fd5b50813567ffffffffffffffff811115614ca457600080fd5b602083019150836020828501011115614c7357600080fd5b600060808284031215614cce57600080fd5b6040516080810181811067ffffffffffffffff82111715614cf157614cf161585c565b604052905080614d0083614d49565b8152614d0e60208401614c05565b6020820152614d1f60408401614d35565b6040820152606083013560608201525092915050565b803563ffffffff81168114614c2957600080fd5b803567ffffffffffffffff81168114614c2957600080fd5b803560ff81168114614c2957600080fd5b805169ffffffffffffffffffff81168114614c2957600080fd5b600060208284031215614d9e57600080fd5b6146a282614c05565b600080600060608486031215614dbc57600080fd5b614dc584614c05565b9250614dd360208501614c05565b9150614de160408501614c05565b90509250925092565b60008060008060608587031215614e0057600080fd5b614e0985614c05565b935060208501359250604085013567ffffffffffffffff811115614e2c57600080fd5b614e3887828801614c7a565b95989497509550505050565b60008060408385031215614e5757600080fd5b614e6083614c05565b91506020830135614e708161588b565b809150509250929050565b60008060208385031215614e8e57600080fd5b823567ffffffffffffffff811115614ea557600080fd5b614eb185828601614c2e565b90969095509350505050565b600060208284031215614ecf57600080fd5b815180151581146146a257600080fd5b6000806000806000806000806000806104c08b8d031215614eff57600080fd5b8a35995060208b013567ffffffffffffffff80821115614f1e57600080fd5b614f2a8e838f01614c7a565b909b50995060408d0135915080821115614f4357600080fd5b614f4f8e838f01614c7a565b9099509750879150614f6360608e01614c05565b96508d609f8e0112614f7457600080fd5b60405191506103e082018281108282111715614f9257614f9261585c565b604052508060808d016104608e018f811115614fad57600080fd5b60005b601f811015614fd757614fc283614c05565b84526020938401939290920191600101614fb0565b50839750614fe481614d61565b9650505050506104808b013591506104a08b013590509295989b9194979a5092959850565b600080600083850360a081121561501f57600080fd5b843567ffffffffffffffff81111561503657600080fd5b61504287828801614c7a565b90955093505060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08201121561507857600080fd5b506020840190509250925092565b600080600060a0848603121561509b57600080fd5b833567ffffffffffffffff8111156150b257600080fd5b6150be86828701614c7a565b9094509250614de190508560208601614cbc565b6000608082840312156150e457600080fd5b6146a28383614cbc565b60006020828403121561510057600080fd5b5051919050565b60006020828403121561511957600080fd5b6146a282614d35565b6000806000806080858703121561513857600080fd5b61514185614d35565b93506020850135925060408501356151588161588b565b915060608501356151688161588b565b939692955090935050565b60008060008060008060c0878903121561518c57600080fd5b61519587614d35565b95506151a360208801614d35565b945060408701359350606087013592506151bf60808801614d35565b91506151cd60a08801614d35565b90509295509295509295565b6000602082840312156151eb57600080fd5b6146a282614d49565b6000806040838503121561520757600080fd5b61521083614d49565b915061521e60208401614c05565b90509250929050565b600080600080600060a0868803121561523f57600080fd5b61524886614d72565b945060208601519350604086015192506060860151915061526b60808701614d72565b90509295509295909350565b60006020828403121561528957600080fd5b81516146a28161588b565b600081518084526020808501945080840160005b838110156152da57815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016152a8565b509495945050505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6040808252810183905260008460608301825b8681101561537c5773ffffffffffffffffffffffffffffffffffffffff61536784614c05565b16825260209283019290910190600101615341565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b6020815260006146a26020830184615294565b8581526060602082015260006153d36060830186886152e5565b82810360408401526153e68185876152e5565b98975050505050505050565b60a08152600061540660a0830185876152e5565b905067ffffffffffffffff61541a84614d49565b16602083015273ffffffffffffffffffffffffffffffffffffffff61544160208501614c05565b16604083015263ffffffff61545860408501614d35565b16606083015260608301356080830152949350505050565b60208101600383106154ab577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91905290565b60006101208201905067ffffffffffffffff835116825273ffffffffffffffffffffffffffffffffffffffff602084015116602083015260408301516154ff604084018263ffffffff169052565b50606083015160608301526080830151615531608084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060a083015161555160a08401826bffffffffffffffffffffffff169052565b5060c083015161557160c08401826bffffffffffffffffffffffff169052565b5060e083015161559160e08401826bffffffffffffffffffffffff169052565b5061010092830151919092015290565b63ffffffff831681526040602082015260006155c06040830184615294565b949350505050565b6bffffffffffffffffffffffff8416815273ffffffffffffffffffffffffffffffffffffffff8316602082015260606040820152600061560b6060830184615294565b95945050505050565b60008219821115615627576156276157a0565b500190565b600067ffffffffffffffff80831681851680830382111561564f5761564f6157a0565b01949350505050565b60006bffffffffffffffffffffffff80831681851680830382111561564f5761564f6157a0565b60008261568e5761568e6157cf565b500490565b60006bffffffffffffffffffffffff808416806156b2576156b26157cf565b92169190910492915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156156f6576156f66157a0565b500290565b60008282101561570d5761570d6157a0565b500390565b60006bffffffffffffffffffffffff83811690831681811015615737576157376157a0565b039392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615771576157716157a0565b5060010190565b600067ffffffffffffffff80831681811415615796576157966157a0565b6001019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6bffffffffffffffffffffffff81168114613bbe57600080fdfea164736f6c6343000806000a496e697469616c697a61626c653a20636f6e7472616374206973206e6f742069", +} + +var OCR2DRRegistryABI = OCR2DRRegistryMetaData.ABI + +var OCR2DRRegistryBin = OCR2DRRegistryMetaData.Bin + +func DeployOCR2DRRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkEthFeed common.Address, oracle common.Address) (common.Address, *types.Transaction, *OCR2DRRegistry, error) { + parsed, err := OCR2DRRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRRegistryBin), backend, link, linkEthFeed, oracle) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DRRegistry{address: address, abi: *parsed, OCR2DRRegistryCaller: OCR2DRRegistryCaller{contract: contract}, OCR2DRRegistryTransactor: OCR2DRRegistryTransactor{contract: contract}, OCR2DRRegistryFilterer: OCR2DRRegistryFilterer{contract: contract}}, nil +} + +type OCR2DRRegistry struct { + address common.Address + abi abi.ABI + OCR2DRRegistryCaller + OCR2DRRegistryTransactor + OCR2DRRegistryFilterer +} + +type OCR2DRRegistryCaller struct { + contract *bind.BoundContract +} + +type OCR2DRRegistryTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRRegistryFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRRegistrySession struct { + Contract *OCR2DRRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRRegistryCallerSession struct { + Contract *OCR2DRRegistryCaller + CallOpts bind.CallOpts +} + +type OCR2DRRegistryTransactorSession struct { + Contract *OCR2DRRegistryTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRRegistryRaw struct { + Contract *OCR2DRRegistry +} + +type OCR2DRRegistryCallerRaw struct { + Contract *OCR2DRRegistryCaller +} + +type OCR2DRRegistryTransactorRaw struct { + Contract *OCR2DRRegistryTransactor +} + +func NewOCR2DRRegistry(address common.Address, backend bind.ContractBackend) (*OCR2DRRegistry, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRRegistry{address: address, abi: abi, OCR2DRRegistryCaller: OCR2DRRegistryCaller{contract: contract}, OCR2DRRegistryTransactor: OCR2DRRegistryTransactor{contract: contract}, OCR2DRRegistryFilterer: OCR2DRRegistryFilterer{contract: contract}}, nil +} + +func NewOCR2DRRegistryCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRRegistryCaller, error) { + contract, err := bindOCR2DRRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRRegistryCaller{contract: contract}, nil +} + +func NewOCR2DRRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRRegistryTransactor, error) { + contract, err := bindOCR2DRRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRRegistryTransactor{contract: contract}, nil +} + +func NewOCR2DRRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRRegistryFilterer, error) { + contract, err := bindOCR2DRRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRRegistryFilterer{contract: contract}, nil +} + +func bindOCR2DRRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRRegistry.Contract.OCR2DRRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OCR2DRRegistryTransactor.contract.Transfer(opts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OCR2DRRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.contract.Transfer(opts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) MAXCONSUMERS() (uint16, error) { + return _OCR2DRRegistry.Contract.MAXCONSUMERS(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) MAXCONSUMERS() (uint16, error) { + return _OCR2DRRegistry.Contract.MAXCONSUMERS(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) EstimateCost(opts *bind.CallOpts, gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "estimateCost", gasLimit, gasPrice, donFee, registryFee) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) EstimateCost(gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + return _OCR2DRRegistry.Contract.EstimateCost(&_OCR2DRRegistry.CallOpts, gasLimit, gasPrice, donFee, registryFee) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) EstimateCost(gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + return _OCR2DRRegistry.Contract.EstimateCost(&_OCR2DRRegistry.CallOpts, gasLimit, gasPrice, donFee, registryFee) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DRRegistry.Contract.GetAuthorizedSenders(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DRRegistry.Contract.GetAuthorizedSenders(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MaxGasLimit = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasOverhead = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.LinkAddress = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.LinkPriceFeed = *abi.ConvertType(out[6], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetConfig() (GetConfig, + + error) { + return _OCR2DRRegistry.Contract.GetConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetConfig() (GetConfig, + + error) { + return _OCR2DRRegistry.Contract.GetConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetCurrentsubscriptionId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getCurrentsubscriptionId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetCurrentsubscriptionId() (uint64, error) { + return _OCR2DRRegistry.Contract.GetCurrentsubscriptionId(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetCurrentsubscriptionId() (uint64, error) { + return _OCR2DRRegistry.Contract.GetCurrentsubscriptionId(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetRequestConfig(opts *bind.CallOpts) (uint32, []common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getRequestConfig") + + if err != nil { + return *new(uint32), *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + out1 := *abi.ConvertType(out[1], new([]common.Address)).(*[]common.Address) + + return out0, out1, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetRequestConfig() (uint32, []common.Address, error) { + return _OCR2DRRegistry.Contract.GetRequestConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetRequestConfig() (uint32, []common.Address, error) { + return _OCR2DRRegistry.Contract.GetRequestConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getRequiredFee", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetRequiredFee(&_OCR2DRRegistry.CallOpts, arg0, arg1) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetRequiredFee(&_OCR2DRRegistry.CallOpts, arg0, arg1) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (GetSubscription, + + error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getSubscription", subscriptionId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Owner = *abi.ConvertType(out[1], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetSubscription(subscriptionId uint64) (GetSubscription, + + error) { + return _OCR2DRRegistry.Contract.GetSubscription(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetSubscription(subscriptionId uint64) (GetSubscription, + + error) { + return _OCR2DRRegistry.Contract.GetSubscription(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetSubscriptionOwner(opts *bind.CallOpts, subscriptionId uint64) (common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getSubscriptionOwner", subscriptionId) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetSubscriptionOwner(subscriptionId uint64) (common.Address, error) { + return _OCR2DRRegistry.Contract.GetSubscriptionOwner(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetSubscriptionOwner(subscriptionId uint64) (common.Address, error) { + return _OCR2DRRegistry.Contract.GetSubscriptionOwner(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getTotalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetTotalBalance() (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetTotalBalance(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetTotalBalance() (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetTotalBalance(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DRRegistry.Contract.IsAuthorizedSender(&_OCR2DRRegistry.CallOpts, sender) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DRRegistry.Contract.IsAuthorizedSender(&_OCR2DRRegistry.CallOpts, sender) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Owner() (common.Address, error) { + return _OCR2DRRegistry.Contract.Owner(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) Owner() (common.Address, error) { + return _OCR2DRRegistry.Contract.Owner(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Paused() (bool, error) { + return _OCR2DRRegistry.Contract.Paused(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) Paused() (bool, error) { + return _OCR2DRRegistry.Contract.Paused(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "pendingRequestExists", subscriptionId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _OCR2DRRegistry.Contract.PendingRequestExists(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _OCR2DRRegistry.Contract.PendingRequestExists(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptOwnership(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptOwnership(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "addConsumer", subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AddConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AddConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "cancelSubscription", subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "createSubscription") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) CreateSubscription() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CreateSubscription(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CreateSubscription(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) FulfillAndBill(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "fulfillAndBill", requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) FulfillAndBill(requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.FulfillAndBill(&_OCR2DRRegistry.TransactOpts, requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) FulfillAndBill(requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.FulfillAndBill(&_OCR2DRRegistry.TransactOpts, requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Initialize(opts *bind.TransactOpts, link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "initialize", link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Initialize(link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Initialize(&_OCR2DRRegistry.TransactOpts, link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Initialize(link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Initialize(&_OCR2DRRegistry.TransactOpts, link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OnTokenTransfer(&_OCR2DRRegistry.TransactOpts, arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OnTokenTransfer(&_OCR2DRRegistry.TransactOpts, arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OracleWithdraw(&_OCR2DRRegistry.TransactOpts, recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OracleWithdraw(&_OCR2DRRegistry.TransactOpts, recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "ownerCancelSubscription", subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OwnerCancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OwnerCancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "pause") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Pause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Pause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Pause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "recoverFunds", to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RecoverFunds(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RecoverFunds(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "removeConsumer", subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RemoveConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RemoveConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RequestSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RequestSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RequestSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RequestSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetAuthorizedSenders(&_OCR2DRRegistry.TransactOpts, senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetAuthorizedSenders(&_OCR2DRRegistry.TransactOpts, senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) SetConfig(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "setConfig", maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) SetConfig(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetConfig(&_OCR2DRRegistry.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) SetConfig(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetConfig(&_OCR2DRRegistry.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) StartBilling(opts *bind.TransactOpts, data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "startBilling", data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) StartBilling(data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.StartBilling(&_OCR2DRRegistry.TransactOpts, data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) StartBilling(data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.StartBilling(&_OCR2DRRegistry.TransactOpts, data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) TimeoutRequests(opts *bind.TransactOpts, requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "timeoutRequests", requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) TimeoutRequests(requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TimeoutRequests(&_OCR2DRRegistry.TransactOpts, requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) TimeoutRequests(requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TimeoutRequests(&_OCR2DRRegistry.TransactOpts, requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TransferOwnership(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TransferOwnership(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "unpause") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Unpause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Unpause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Unpause(&_OCR2DRRegistry.TransactOpts) +} + +type OCR2DRRegistryAuthorizedSendersChangedIterator struct { + Event *OCR2DRRegistryAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DRRegistryAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &OCR2DRRegistryAuthorizedSendersChangedIterator{contract: _OCR2DRRegistry.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryAuthorizedSendersChanged) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseAuthorizedSendersChanged(log types.Log) (*OCR2DRRegistryAuthorizedSendersChanged, error) { + event := new(OCR2DRRegistryAuthorizedSendersChanged) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryBillingEndIterator struct { + Event *OCR2DRRegistryBillingEnd + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryBillingEndIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryBillingEndIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryBillingEndIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryBillingEnd struct { + RequestId [32]byte + SubscriptionId uint64 + SignerPayment *big.Int + TransmitterPayment *big.Int + TotalCost *big.Int + Success bool + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingEndIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryBillingEndIterator{contract: _OCR2DRRegistry.contract, event: "BillingEnd", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingEnd, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryBillingEnd) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseBillingEnd(log types.Log) (*OCR2DRRegistryBillingEnd, error) { + event := new(OCR2DRRegistryBillingEnd) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryBillingStartIterator struct { + Event *OCR2DRRegistryBillingStart + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryBillingStartIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryBillingStartIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryBillingStartIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryBillingStart struct { + RequestId [32]byte + Commitment FunctionsBillingRegistryCommitment + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingStartIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryBillingStartIterator{contract: _OCR2DRRegistry.contract, event: "BillingStart", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchBillingStart(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingStart, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryBillingStart) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingStart", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseBillingStart(log types.Log) (*OCR2DRRegistryBillingStart, error) { + event := new(OCR2DRRegistryBillingStart) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingStart", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryConfigSetIterator struct { + Event *OCR2DRRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryConfigSet struct { + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation *big.Int + FallbackWeiPerUnitLink *big.Int + GasOverhead uint32 + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OCR2DRRegistryConfigSetIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OCR2DRRegistryConfigSetIterator{contract: _OCR2DRRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryConfigSet) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseConfigSet(log types.Log) (*OCR2DRRegistryConfigSet, error) { + event := new(OCR2DRRegistryConfigSet) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryFundsRecoveredIterator struct { + Event *OCR2DRRegistryFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*OCR2DRRegistryFundsRecoveredIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &OCR2DRRegistryFundsRecoveredIterator{contract: _OCR2DRRegistry.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryFundsRecovered) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseFundsRecovered(log types.Log) (*OCR2DRRegistryFundsRecovered, error) { + event := new(OCR2DRRegistryFundsRecovered) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryInitializedIterator struct { + Event *OCR2DRRegistryInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryInitializedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryInitialized struct { + Version uint8 + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterInitialized(opts *bind.FilterOpts) (*OCR2DRRegistryInitializedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &OCR2DRRegistryInitializedIterator{contract: _OCR2DRRegistry.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryInitialized) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryInitialized) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseInitialized(log types.Log) (*OCR2DRRegistryInitialized, error) { + event := new(OCR2DRRegistryInitialized) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryOwnershipTransferRequestedIterator struct { + Event *OCR2DRRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryOwnershipTransferRequestedIterator{contract: _OCR2DRRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryOwnershipTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DRRegistryOwnershipTransferRequested, error) { + event := new(OCR2DRRegistryOwnershipTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryOwnershipTransferredIterator struct { + Event *OCR2DRRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryOwnershipTransferredIterator{contract: _OCR2DRRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryOwnershipTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DRRegistryOwnershipTransferred, error) { + event := new(OCR2DRRegistryOwnershipTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryPausedIterator struct { + Event *OCR2DRRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*OCR2DRRegistryPausedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &OCR2DRRegistryPausedIterator{contract: _OCR2DRRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryPaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParsePaused(log types.Log) (*OCR2DRRegistryPaused, error) { + event := new(OCR2DRRegistryPaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryRequestTimedOutIterator struct { + Event *OCR2DRRegistryRequestTimedOut + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryRequestTimedOut struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryRequestTimedOutIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryRequestTimedOutIterator{contract: _OCR2DRRegistry.contract, event: "RequestTimedOut", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryRequestTimedOut, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryRequestTimedOut) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseRequestTimedOut(log types.Log) (*OCR2DRRegistryRequestTimedOut, error) { + event := new(OCR2DRRegistryRequestTimedOut) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionCanceledIterator struct { + Event *OCR2DRRegistrySubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionCanceled struct { + SubscriptionId uint64 + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCanceledIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionCanceledIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionCanceled) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionCanceled(log types.Log) (*OCR2DRRegistrySubscriptionCanceled, error) { + event := new(OCR2DRRegistrySubscriptionCanceled) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionConsumerAddedIterator struct { + Event *OCR2DRRegistrySubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionConsumerAdded struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerAddedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionConsumerAddedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*OCR2DRRegistrySubscriptionConsumerAdded, error) { + event := new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionConsumerRemovedIterator struct { + Event *OCR2DRRegistrySubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionConsumerRemoved struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerRemovedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionConsumerRemovedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*OCR2DRRegistrySubscriptionConsumerRemoved, error) { + event := new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionCreatedIterator struct { + Event *OCR2DRRegistrySubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionCreated struct { + SubscriptionId uint64 + Owner common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCreatedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionCreatedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionCreated) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionCreated(log types.Log) (*OCR2DRRegistrySubscriptionCreated, error) { + event := new(OCR2DRRegistrySubscriptionCreated) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionFundedIterator struct { + Event *OCR2DRRegistrySubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionFunded struct { + SubscriptionId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionFundedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionFundedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionFunded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionFunded(log types.Log) (*OCR2DRRegistrySubscriptionFunded, error) { + event := new(OCR2DRRegistrySubscriptionFunded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator struct { + Event *OCR2DRRegistrySubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferRequested struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferRequested, error) { + event := new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferredIterator struct { + Event *OCR2DRRegistrySubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferred struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferredIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionOwnerTransferredIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferred, error) { + event := new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryUnpausedIterator struct { + Event *OCR2DRRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*OCR2DRRegistryUnpausedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &OCR2DRRegistryUnpausedIterator{contract: _OCR2DRRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryUnpaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseUnpaused(log types.Log) (*OCR2DRRegistryUnpaused, error) { + event := new(OCR2DRRegistryUnpaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation *big.Int + FallbackWeiPerUnitLink *big.Int + GasOverhead uint32 + LinkAddress common.Address + LinkPriceFeed common.Address +} +type GetSubscription struct { + Balance *big.Int + Owner common.Address + Consumers []common.Address +} + +func (_OCR2DRRegistry *OCR2DRRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRRegistry.abi.Events["AuthorizedSendersChanged"].ID: + return _OCR2DRRegistry.ParseAuthorizedSendersChanged(log) + case _OCR2DRRegistry.abi.Events["BillingEnd"].ID: + return _OCR2DRRegistry.ParseBillingEnd(log) + case _OCR2DRRegistry.abi.Events["BillingStart"].ID: + return _OCR2DRRegistry.ParseBillingStart(log) + case _OCR2DRRegistry.abi.Events["ConfigSet"].ID: + return _OCR2DRRegistry.ParseConfigSet(log) + case _OCR2DRRegistry.abi.Events["FundsRecovered"].ID: + return _OCR2DRRegistry.ParseFundsRecovered(log) + case _OCR2DRRegistry.abi.Events["Initialized"].ID: + return _OCR2DRRegistry.ParseInitialized(log) + case _OCR2DRRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DRRegistry.ParseOwnershipTransferRequested(log) + case _OCR2DRRegistry.abi.Events["OwnershipTransferred"].ID: + return _OCR2DRRegistry.ParseOwnershipTransferred(log) + case _OCR2DRRegistry.abi.Events["Paused"].ID: + return _OCR2DRRegistry.ParsePaused(log) + case _OCR2DRRegistry.abi.Events["RequestTimedOut"].ID: + return _OCR2DRRegistry.ParseRequestTimedOut(log) + case _OCR2DRRegistry.abi.Events["SubscriptionCanceled"].ID: + return _OCR2DRRegistry.ParseSubscriptionCanceled(log) + case _OCR2DRRegistry.abi.Events["SubscriptionConsumerAdded"].ID: + return _OCR2DRRegistry.ParseSubscriptionConsumerAdded(log) + case _OCR2DRRegistry.abi.Events["SubscriptionConsumerRemoved"].ID: + return _OCR2DRRegistry.ParseSubscriptionConsumerRemoved(log) + case _OCR2DRRegistry.abi.Events["SubscriptionCreated"].ID: + return _OCR2DRRegistry.ParseSubscriptionCreated(log) + case _OCR2DRRegistry.abi.Events["SubscriptionFunded"].ID: + return _OCR2DRRegistry.ParseSubscriptionFunded(log) + case _OCR2DRRegistry.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _OCR2DRRegistry.ParseSubscriptionOwnerTransferRequested(log) + case _OCR2DRRegistry.abi.Events["SubscriptionOwnerTransferred"].ID: + return _OCR2DRRegistry.ParseSubscriptionOwnerTransferred(log) + case _OCR2DRRegistry.abi.Events["Unpaused"].ID: + return _OCR2DRRegistry.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRRegistryAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (OCR2DRRegistryBillingEnd) Topic() common.Hash { + return common.HexToHash("0xc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f") +} + +func (OCR2DRRegistryBillingStart) Topic() common.Hash { + return common.HexToHash("0x99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe4") +} + +func (OCR2DRRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0x24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd4") +} + +func (OCR2DRRegistryFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (OCR2DRRegistryInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (OCR2DRRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DRRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DRRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (OCR2DRRegistryRequestTimedOut) Topic() common.Hash { + return common.HexToHash("0xf1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af414") +} + +func (OCR2DRRegistrySubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (OCR2DRRegistrySubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (OCR2DRRegistrySubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (OCR2DRRegistrySubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (OCR2DRRegistrySubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (OCR2DRRegistrySubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (OCR2DRRegistrySubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (OCR2DRRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_OCR2DRRegistry *OCR2DRRegistry) Address() common.Address { + return _OCR2DRRegistry.address +} + +type OCR2DRRegistryInterface interface { + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + EstimateCost(opts *bind.CallOpts, gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) + + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetCurrentsubscriptionId(opts *bind.CallOpts) (uint64, error) + + GetRequestConfig(opts *bind.CallOpts) (uint32, []common.Address, error) + + GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (GetSubscription, + + error) + + GetSubscriptionOwner(opts *bind.CallOpts, subscriptionId uint64) (common.Address, error) + + GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + FulfillAndBill(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts, link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) + + StartBilling(opts *bind.TransactOpts, data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) + + TimeoutRequests(opts *bind.TransactOpts, requestIdsToTimeout [][32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DRRegistryAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*OCR2DRRegistryAuthorizedSendersChanged, error) + + FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingEndIterator, error) + + WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingEnd, requestId [][32]byte) (event.Subscription, error) + + ParseBillingEnd(log types.Log) (*OCR2DRRegistryBillingEnd, error) + + FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingStartIterator, error) + + WatchBillingStart(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingStart, requestId [][32]byte) (event.Subscription, error) + + ParseBillingStart(log types.Log) (*OCR2DRRegistryBillingStart, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OCR2DRRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OCR2DRRegistryConfigSet, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*OCR2DRRegistryFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*OCR2DRRegistryFundsRecovered, error) + + FilterInitialized(opts *bind.FilterOpts) (*OCR2DRRegistryInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*OCR2DRRegistryInitialized, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DRRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DRRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*OCR2DRRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*OCR2DRRegistryPaused, error) + + FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryRequestTimedOutIterator, error) + + WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryRequestTimedOut, requestId [][32]byte) (event.Subscription, error) + + ParseRequestTimedOut(log types.Log) (*OCR2DRRegistryRequestTimedOut, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*OCR2DRRegistrySubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*OCR2DRRegistrySubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*OCR2DRRegistrySubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*OCR2DRRegistrySubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*OCR2DRRegistrySubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferred, error) + + FilterUnpaused(opts *bind.FilterOpts) (*OCR2DRRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*OCR2DRRegistryUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..102e3778 --- /dev/null +++ b/core/gethwrappers/functions/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,16 @@ +GETH_VERSION: 1.13.8 +functions: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRequest.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRequest.bin 3c972870b0afeb6d73a29ebb182f24956a2cebb127b21c4f867d1ecf19a762db +functions_allow_list: ../../../contracts/solc/v0.8.19/functions/v1_X/TermsOfServiceAllowList.abi ../../../contracts/solc/v0.8.19/functions/v1_X/TermsOfServiceAllowList.bin 0c2156289e11f884ca6e92bf851192d3917c9094a0a301bcefa61266678d0e57 +functions_billing_registry_events_mock: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryEventsMock.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryEventsMock.bin 50deeb883bd9c3729702be335c0388f9d8553bab4be5e26ecacac496a89e2b77 +functions_client: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.bin 2368f537a04489c720a46733f8596c4fc88a31062ecfa966d05f25dd98608aca +functions_client_example: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.bin abf32e69f268f40e8530eb8d8e96bf310b798a4c0049a58022d9d2fb527b601b +functions_coordinator: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.bin 9686bdf83a0ce09ad07e81f6af52889735ea5af5709ffd018bb7b75e5d284c5e +functions_load_test_client: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.bin c8dbbd5ebb34435800d6674700068837c3a252db60046a14b0e61e829db517de +functions_oracle_events_mock: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleEventsMock.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleEventsMock.bin 3ca70f966f8fe751987f0ccb50bebb6aa5be77e4a9f835d1ae99e0e9bfb7d52c +functions_router: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.bin 1f6d18f9e0846ad74b37a0a6acef5942ab73ace1e84307f201899f69e732e776 +functions_v1_events_mock: ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsV1EventsMock.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsV1EventsMock.bin 0f0ba42e0cc33c7abc8b8fd4fdfce903748a169886dd5f16cfdd56e75bcf708d +ocr2dr: ../../../contracts/solc/v0.8.6/functions/v0_0_0/Functions.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/Functions.bin d9a794b33f47cc57563d216f7cf3a612309fc3062356a27e30005cf1d59e449d +ocr2dr_client: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsClient.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsClient.bin 84aa63f9dbc5c7eac240db699b09e613ca4c6cd56dab10bdc25b02461b717e21 +ocr2dr_client_example: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsClientExample.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsClientExample.bin a978d9b52a5a2da19eef0975979de256e62980a0cfb3084fe6d66a351b4ef534 +ocr2dr_oracle: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleWithInit.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsOracleWithInit.bin b9084b34b0ee2e89adc72f068a868f0f22e361c96677fe20e44801e84bbd0c18 +ocr2dr_registry: ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryWithInit.abi ../../../contracts/solc/v0.8.6/functions/v0_0_0/FunctionsBillingRegistryWithInit.bin be588d5036cbeb8d67bbc124fefbdc6fd354802a30b8e87093b2b94a6549741b diff --git a/core/gethwrappers/functions/go_generate.go b/core/gethwrappers/functions/go_generate.go new file mode 100644 index 00000000..46880b66 --- /dev/null +++ b/core/gethwrappers/functions/go_generate.go @@ -0,0 +1,15 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Plugin Functions (OCR2DR) + +// Version 1 (Mainnet Preview) +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRequest.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRequest.bin Functions functions +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClient.bin FunctionsClient functions_client +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsClientExample.bin FunctionsClientExample functions_client_example +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsLoadTestClient.bin FunctionsLoadTestClient functions_load_test_client +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsCoordinator.bin FunctionsCoordinator functions_coordinator +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsRouter.bin FunctionsRouter functions_router +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/TermsOfServiceAllowList.abi ../../../contracts/solc/v0.8.19/functions/v1_X/TermsOfServiceAllowList.bin TermsOfServiceAllowList functions_allow_list +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsV1EventsMock.abi ../../../contracts/solc/v0.8.19/functions/v1_X/FunctionsV1EventsMock.bin FunctionsV1EventsMock functions_v1_events_mock diff --git a/core/gethwrappers/generated/aggregator_v2v3_interface/aggregator_v2v3_interface.go b/core/gethwrappers/generated/aggregator_v2v3_interface/aggregator_v2v3_interface.go new file mode 100644 index 00000000..aa3bfacc --- /dev/null +++ b/core/gethwrappers/generated/aggregator_v2v3_interface/aggregator_v2v3_interface.go @@ -0,0 +1,750 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package aggregator_v2v3_interface + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AggregatorV2V3InterfaceMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"}],\"name\":\"getAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"}],\"name\":\"getTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRound\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +var AggregatorV2V3InterfaceABI = AggregatorV2V3InterfaceMetaData.ABI + +type AggregatorV2V3Interface struct { + address common.Address + abi abi.ABI + AggregatorV2V3InterfaceCaller + AggregatorV2V3InterfaceTransactor + AggregatorV2V3InterfaceFilterer +} + +type AggregatorV2V3InterfaceCaller struct { + contract *bind.BoundContract +} + +type AggregatorV2V3InterfaceTransactor struct { + contract *bind.BoundContract +} + +type AggregatorV2V3InterfaceFilterer struct { + contract *bind.BoundContract +} + +type AggregatorV2V3InterfaceSession struct { + Contract *AggregatorV2V3Interface + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AggregatorV2V3InterfaceCallerSession struct { + Contract *AggregatorV2V3InterfaceCaller + CallOpts bind.CallOpts +} + +type AggregatorV2V3InterfaceTransactorSession struct { + Contract *AggregatorV2V3InterfaceTransactor + TransactOpts bind.TransactOpts +} + +type AggregatorV2V3InterfaceRaw struct { + Contract *AggregatorV2V3Interface +} + +type AggregatorV2V3InterfaceCallerRaw struct { + Contract *AggregatorV2V3InterfaceCaller +} + +type AggregatorV2V3InterfaceTransactorRaw struct { + Contract *AggregatorV2V3InterfaceTransactor +} + +func NewAggregatorV2V3Interface(address common.Address, backend bind.ContractBackend) (*AggregatorV2V3Interface, error) { + abi, err := abi.JSON(strings.NewReader(AggregatorV2V3InterfaceABI)) + if err != nil { + return nil, err + } + contract, err := bindAggregatorV2V3Interface(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AggregatorV2V3Interface{address: address, abi: abi, AggregatorV2V3InterfaceCaller: AggregatorV2V3InterfaceCaller{contract: contract}, AggregatorV2V3InterfaceTransactor: AggregatorV2V3InterfaceTransactor{contract: contract}, AggregatorV2V3InterfaceFilterer: AggregatorV2V3InterfaceFilterer{contract: contract}}, nil +} + +func NewAggregatorV2V3InterfaceCaller(address common.Address, caller bind.ContractCaller) (*AggregatorV2V3InterfaceCaller, error) { + contract, err := bindAggregatorV2V3Interface(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AggregatorV2V3InterfaceCaller{contract: contract}, nil +} + +func NewAggregatorV2V3InterfaceTransactor(address common.Address, transactor bind.ContractTransactor) (*AggregatorV2V3InterfaceTransactor, error) { + contract, err := bindAggregatorV2V3Interface(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AggregatorV2V3InterfaceTransactor{contract: contract}, nil +} + +func NewAggregatorV2V3InterfaceFilterer(address common.Address, filterer bind.ContractFilterer) (*AggregatorV2V3InterfaceFilterer, error) { + contract, err := bindAggregatorV2V3Interface(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AggregatorV2V3InterfaceFilterer{contract: contract}, nil +} + +func bindAggregatorV2V3Interface(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AggregatorV2V3InterfaceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AggregatorV2V3Interface.Contract.AggregatorV2V3InterfaceCaller.contract.Call(opts, result, method, params...) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AggregatorV2V3Interface.Contract.AggregatorV2V3InterfaceTransactor.contract.Transfer(opts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AggregatorV2V3Interface.Contract.AggregatorV2V3InterfaceTransactor.contract.Transact(opts, method, params...) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AggregatorV2V3Interface.Contract.contract.Call(opts, result, method, params...) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AggregatorV2V3Interface.Contract.contract.Transfer(opts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AggregatorV2V3Interface.Contract.contract.Transact(opts, method, params...) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) Decimals() (uint8, error) { + return _AggregatorV2V3Interface.Contract.Decimals(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) Decimals() (uint8, error) { + return _AggregatorV2V3Interface.Contract.Decimals(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) Description() (string, error) { + return _AggregatorV2V3Interface.Contract.Description(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) Description() (string, error) { + return _AggregatorV2V3Interface.Contract.Description(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) GetAnswer(opts *bind.CallOpts, roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "getAnswer", roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) GetAnswer(roundId *big.Int) (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.GetAnswer(&_AggregatorV2V3Interface.CallOpts, roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) GetAnswer(roundId *big.Int) (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.GetAnswer(&_AggregatorV2V3Interface.CallOpts, roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _AggregatorV2V3Interface.Contract.GetRoundData(&_AggregatorV2V3Interface.CallOpts, _roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _AggregatorV2V3Interface.Contract.GetRoundData(&_AggregatorV2V3Interface.CallOpts, _roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) GetTimestamp(opts *bind.CallOpts, roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "getTimestamp", roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) GetTimestamp(roundId *big.Int) (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.GetTimestamp(&_AggregatorV2V3Interface.CallOpts, roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) GetTimestamp(roundId *big.Int) (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.GetTimestamp(&_AggregatorV2V3Interface.CallOpts, roundId) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "latestAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) LatestAnswer() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestAnswer(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) LatestAnswer() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestAnswer(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "latestRound") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) LatestRound() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestRound(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) LatestRound() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestRound(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) LatestRoundData() (LatestRoundData, + + error) { + return _AggregatorV2V3Interface.Contract.LatestRoundData(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _AggregatorV2V3Interface.Contract.LatestRoundData(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "latestTimestamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) LatestTimestamp() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestTimestamp(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) LatestTimestamp() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.LatestTimestamp(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AggregatorV2V3Interface.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceSession) Version() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.Version(&_AggregatorV2V3Interface.CallOpts) +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceCallerSession) Version() (*big.Int, error) { + return _AggregatorV2V3Interface.Contract.Version(&_AggregatorV2V3Interface.CallOpts) +} + +type AggregatorV2V3InterfaceAnswerUpdatedIterator struct { + Event *AggregatorV2V3InterfaceAnswerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AggregatorV2V3InterfaceAnswerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AggregatorV2V3InterfaceAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AggregatorV2V3InterfaceAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AggregatorV2V3InterfaceAnswerUpdatedIterator) Error() error { + return it.fail +} + +func (it *AggregatorV2V3InterfaceAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AggregatorV2V3InterfaceAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*AggregatorV2V3InterfaceAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _AggregatorV2V3Interface.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &AggregatorV2V3InterfaceAnswerUpdatedIterator{contract: _AggregatorV2V3Interface.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *AggregatorV2V3InterfaceAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _AggregatorV2V3Interface.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AggregatorV2V3InterfaceAnswerUpdated) + if err := _AggregatorV2V3Interface.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) ParseAnswerUpdated(log types.Log) (*AggregatorV2V3InterfaceAnswerUpdated, error) { + event := new(AggregatorV2V3InterfaceAnswerUpdated) + if err := _AggregatorV2V3Interface.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AggregatorV2V3InterfaceNewRoundIterator struct { + Event *AggregatorV2V3InterfaceNewRound + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AggregatorV2V3InterfaceNewRoundIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AggregatorV2V3InterfaceNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AggregatorV2V3InterfaceNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AggregatorV2V3InterfaceNewRoundIterator) Error() error { + return it.fail +} + +func (it *AggregatorV2V3InterfaceNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AggregatorV2V3InterfaceNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*AggregatorV2V3InterfaceNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _AggregatorV2V3Interface.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &AggregatorV2V3InterfaceNewRoundIterator{contract: _AggregatorV2V3Interface.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *AggregatorV2V3InterfaceNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _AggregatorV2V3Interface.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AggregatorV2V3InterfaceNewRound) + if err := _AggregatorV2V3Interface.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AggregatorV2V3Interface *AggregatorV2V3InterfaceFilterer) ParseNewRound(log types.Log) (*AggregatorV2V3InterfaceNewRound, error) { + event := new(AggregatorV2V3InterfaceNewRound) + if err := _AggregatorV2V3Interface.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +func (_AggregatorV2V3Interface *AggregatorV2V3Interface) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AggregatorV2V3Interface.abi.Events["AnswerUpdated"].ID: + return _AggregatorV2V3Interface.ParseAnswerUpdated(log) + case _AggregatorV2V3Interface.abi.Events["NewRound"].ID: + return _AggregatorV2V3Interface.ParseNewRound(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AggregatorV2V3InterfaceAnswerUpdated) Topic() common.Hash { + return common.HexToHash("0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f") +} + +func (AggregatorV2V3InterfaceNewRound) Topic() common.Hash { + return common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271") +} + +func (_AggregatorV2V3Interface *AggregatorV2V3Interface) Address() common.Address { + return _AggregatorV2V3Interface.address +} + +type AggregatorV2V3InterfaceInterface interface { + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetAnswer(opts *bind.CallOpts, roundId *big.Int) (*big.Int, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + GetTimestamp(opts *bind.CallOpts, roundId *big.Int) (*big.Int, error) + + LatestAnswer(opts *bind.CallOpts) (*big.Int, error) + + LatestRound(opts *bind.CallOpts) (*big.Int, error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*AggregatorV2V3InterfaceAnswerUpdatedIterator, error) + + WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *AggregatorV2V3InterfaceAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) + + ParseAnswerUpdated(log types.Log) (*AggregatorV2V3InterfaceAnswerUpdated, error) + + FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*AggregatorV2V3InterfaceNewRoundIterator, error) + + WatchNewRound(opts *bind.WatchOpts, sink chan<- *AggregatorV2V3InterfaceNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) + + ParseNewRound(log types.Log) (*AggregatorV2V3InterfaceNewRound, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/aggregator_v3_interface/aggregator_v3_interface.go b/core/gethwrappers/generated/aggregator_v3_interface/aggregator_v3_interface.go new file mode 100644 index 00000000..5daccaf8 --- /dev/null +++ b/core/gethwrappers/generated/aggregator_v3_interface/aggregator_v3_interface.go @@ -0,0 +1,320 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package aggregator_v3_interface + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AggregatorV3InterfaceMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +var AggregatorV3InterfaceABI = AggregatorV3InterfaceMetaData.ABI + +type AggregatorV3Interface struct { + address common.Address + abi abi.ABI + AggregatorV3InterfaceCaller + AggregatorV3InterfaceTransactor + AggregatorV3InterfaceFilterer +} + +type AggregatorV3InterfaceCaller struct { + contract *bind.BoundContract +} + +type AggregatorV3InterfaceTransactor struct { + contract *bind.BoundContract +} + +type AggregatorV3InterfaceFilterer struct { + contract *bind.BoundContract +} + +type AggregatorV3InterfaceSession struct { + Contract *AggregatorV3Interface + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AggregatorV3InterfaceCallerSession struct { + Contract *AggregatorV3InterfaceCaller + CallOpts bind.CallOpts +} + +type AggregatorV3InterfaceTransactorSession struct { + Contract *AggregatorV3InterfaceTransactor + TransactOpts bind.TransactOpts +} + +type AggregatorV3InterfaceRaw struct { + Contract *AggregatorV3Interface +} + +type AggregatorV3InterfaceCallerRaw struct { + Contract *AggregatorV3InterfaceCaller +} + +type AggregatorV3InterfaceTransactorRaw struct { + Contract *AggregatorV3InterfaceTransactor +} + +func NewAggregatorV3Interface(address common.Address, backend bind.ContractBackend) (*AggregatorV3Interface, error) { + abi, err := abi.JSON(strings.NewReader(AggregatorV3InterfaceABI)) + if err != nil { + return nil, err + } + contract, err := bindAggregatorV3Interface(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AggregatorV3Interface{address: address, abi: abi, AggregatorV3InterfaceCaller: AggregatorV3InterfaceCaller{contract: contract}, AggregatorV3InterfaceTransactor: AggregatorV3InterfaceTransactor{contract: contract}, AggregatorV3InterfaceFilterer: AggregatorV3InterfaceFilterer{contract: contract}}, nil +} + +func NewAggregatorV3InterfaceCaller(address common.Address, caller bind.ContractCaller) (*AggregatorV3InterfaceCaller, error) { + contract, err := bindAggregatorV3Interface(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AggregatorV3InterfaceCaller{contract: contract}, nil +} + +func NewAggregatorV3InterfaceTransactor(address common.Address, transactor bind.ContractTransactor) (*AggregatorV3InterfaceTransactor, error) { + contract, err := bindAggregatorV3Interface(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AggregatorV3InterfaceTransactor{contract: contract}, nil +} + +func NewAggregatorV3InterfaceFilterer(address common.Address, filterer bind.ContractFilterer) (*AggregatorV3InterfaceFilterer, error) { + contract, err := bindAggregatorV3Interface(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AggregatorV3InterfaceFilterer{contract: contract}, nil +} + +func bindAggregatorV3Interface(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AggregatorV3InterfaceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AggregatorV3Interface.Contract.AggregatorV3InterfaceCaller.contract.Call(opts, result, method, params...) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AggregatorV3Interface.Contract.AggregatorV3InterfaceTransactor.contract.Transfer(opts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AggregatorV3Interface.Contract.AggregatorV3InterfaceTransactor.contract.Transact(opts, method, params...) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AggregatorV3Interface.Contract.contract.Call(opts, result, method, params...) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AggregatorV3Interface.Contract.contract.Transfer(opts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AggregatorV3Interface.Contract.contract.Transact(opts, method, params...) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _AggregatorV3Interface.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceSession) Decimals() (uint8, error) { + return _AggregatorV3Interface.Contract.Decimals(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerSession) Decimals() (uint8, error) { + return _AggregatorV3Interface.Contract.Decimals(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AggregatorV3Interface.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceSession) Description() (string, error) { + return _AggregatorV3Interface.Contract.Description(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerSession) Description() (string, error) { + return _AggregatorV3Interface.Contract.Description(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _AggregatorV3Interface.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _AggregatorV3Interface.Contract.GetRoundData(&_AggregatorV3Interface.CallOpts, _roundId) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _AggregatorV3Interface.Contract.GetRoundData(&_AggregatorV3Interface.CallOpts, _roundId) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _AggregatorV3Interface.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceSession) LatestRoundData() (LatestRoundData, + + error) { + return _AggregatorV3Interface.Contract.LatestRoundData(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _AggregatorV3Interface.Contract.LatestRoundData(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AggregatorV3Interface.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceSession) Version() (*big.Int, error) { + return _AggregatorV3Interface.Contract.Version(&_AggregatorV3Interface.CallOpts) +} + +func (_AggregatorV3Interface *AggregatorV3InterfaceCallerSession) Version() (*big.Int, error) { + return _AggregatorV3Interface.Contract.Version(&_AggregatorV3Interface.CallOpts) +} + +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +func (_AggregatorV3Interface *AggregatorV3Interface) Address() common.Address { + return _AggregatorV3Interface.address +} + +type AggregatorV3InterfaceInterface interface { + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go b/core/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go new file mode 100644 index 00000000..f0c3c483 --- /dev/null +++ b/core/gethwrappers/generated/authorized_forwarder/authorized_forwarder.go @@ -0,0 +1,983 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package authorized_forwarder + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AuthorizedForwarderMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"OwnershipTransferRequestedWithMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"forward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"tos\",\"type\":\"address[]\"},{\"internalType\":\"bytes[]\",\"name\":\"datas\",\"type\":\"bytes[]\"}],\"name\":\"multiForward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"ownerForward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"transferOwnershipWithMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620016993803806200169983398101604081905262000034916200029d565b82826001600160a01b038216620000925760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c557620000c58162000199565b50506001600160a01b0384166200012b5760405162461bcd60e51b815260206004820152602360248201527f4c696e6b20746f6b656e2063616e6e6f742062652061207a65726f206164647260448201526265737360e81b606482015260840162000089565b6001600160a01b038085166080528216156200018f57816001600160a01b0316836001600160a01b03167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e836040516200018691906200038e565b60405180910390a35b50505050620003c3565b336001600160a01b03821603620001f35760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000089565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200025c57600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b60005b83811015620002945781810151838201526020016200027a565b50506000910152565b60008060008060808587031215620002b457600080fd5b620002bf8562000244565b9350620002cf6020860162000244565b9250620002df6040860162000244565b60608601519092506001600160401b0380821115620002fd57600080fd5b818701915087601f8301126200031257600080fd5b81518181111562000327576200032762000261565b604051601f8201601f19908116603f0116810190838211818310171562000352576200035262000261565b816040528281528a60208487010111156200036c57600080fd5b6200037f83602083016020880162000277565b979a9699509497505050505050565b6020815260008251806020840152620003af81604085016020870162000277565b601f01601f19169190910160400192915050565b6080516112ac620003ed6000396000818161016d0152818161037501526105d301526112ac6000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c806379ba509711610081578063ee56997b1161005b578063ee56997b14610200578063f2fde38b14610213578063fa00763a1461022657600080fd5b806379ba5097146101c75780638da5cb5b146101cf578063b64fa9e6146101ed57600080fd5b80634d3e2323116100b25780634d3e23231461015557806357970e93146101685780636fadcf72146101b457600080fd5b8063033f49f7146100d9578063181f5a77146100ee5780632408afaa14610140575b600080fd5b6100ec6100e7366004610e72565b61026f565b005b61012a6040518060400160405280601981526020017f417574686f72697a6564466f7277617264657220312e312e300000000000000081525081565b6040516101379190610ef5565b60405180910390f35b610148610287565b6040516101379190610f61565b6100ec610163366004610e72565b6102f6565b61018f7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610137565b6100ec6101c2366004610e72565b61036b565b6100ec61042d565b60005473ffffffffffffffffffffffffffffffffffffffff1661018f565b6100ec6101fb366004611007565b61052a565b6100ec61020e366004611073565b6106cb565b6100ec6102213660046110b5565b6109dc565b61025f6102343660046110b5565b73ffffffffffffffffffffffffffffffffffffffff1660009081526002602052604090205460ff1690565b6040519015158152602001610137565b6102776109f0565b610282838383610a73565b505050565b606060038054806020026020016040519081016040528092919081815260200182805480156102ec57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116102c1575b5050505050905090565b6102ff836109dc565b8273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e848460405161035e9291906110d7565b60405180910390a3505050565b610373610c00565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610277576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f43616e6e6f7420666f727761726420746f204c696e6b20746f6b656e0000000060448201526064015b60405180910390fd5b60015473ffffffffffffffffffffffffffffffffffffffff1633146104ae576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610424565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610532610c00565b82811461059b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f417272617973206d7573742068617665207468652073616d65206c656e6774686044820152606401610424565b60005b838110156106c45760008585838181106105ba576105ba611124565b90506020020160208101906105cf91906110b5565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610686576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f43616e6e6f7420666f727761726420746f204c696e6b20746f6b656e000000006044820152606401610424565b6106b38185858581811061069c5761069c611124565b90506020028101906106ae9190611153565b610a73565b506106bd816111b8565b905061059e565b5050505050565b6106d3610c79565b610739576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e646572730000006044820152606401610424565b806107a0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d7573742068617665206174206c6561737420312073656e64657200000000006044820152606401610424565b60035460005b8181101561083657600060026000600384815481106107c7576107c7611124565b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905561082f816111b8565b90506107a6565b5060005b8281101561098e576002600085858481811061085857610858611124565b905060200201602081019061086d91906110b5565b73ffffffffffffffffffffffffffffffffffffffff16815260208101919091526040016000205460ff16156108fe576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4d757374206e6f742068617665206475706c69636174652073656e64657273006044820152606401610424565b60016002600086868581811061091657610916611124565b905060200201602081019061092b91906110b5565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055610987816111b8565b905061083a565b5061099b60038484610dac565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a08383336040516109cf93929190611217565b60405180910390a1505050565b6109e46109f0565b6109ed81610cb7565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610a71576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610424565b565b73ffffffffffffffffffffffffffffffffffffffff83163b610af1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d75737420666f727761726420746f206120636f6e74726163740000000000006044820152606401610424565b6000808473ffffffffffffffffffffffffffffffffffffffff168484604051610b1b92919061128f565b6000604051808303816000865af19150503d8060008114610b58576040519150601f19603f3d011682016040523d82523d6000602084013e610b5d565b606091505b5091509150816106c4578051600003610bf8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f466f727761726465642063616c6c20726576657274656420776974686f75742060448201527f726561736f6e00000000000000000000000000000000000000000000000000006064820152608401610424565b805181602001fd5b3360009081526002602052604090205460ff16610a71576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4e6f7420617574686f72697a65642073656e64657200000000000000000000006044820152606401610424565b600033610c9b60005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b3373ffffffffffffffffffffffffffffffffffffffff821603610d36576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610424565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610e24579160200282015b82811115610e245781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190610dcc565b50610e30929150610e34565b5090565b5b80821115610e305760008155600101610e35565b803573ffffffffffffffffffffffffffffffffffffffff81168114610e6d57600080fd5b919050565b600080600060408486031215610e8757600080fd5b610e9084610e49565b9250602084013567ffffffffffffffff80821115610ead57600080fd5b818601915086601f830112610ec157600080fd5b813581811115610ed057600080fd5b876020828501011115610ee257600080fd5b6020830194508093505050509250925092565b600060208083528351808285015260005b81811015610f2257858101830151858201604001528201610f06565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b6020808252825182820181905260009190848201906040850190845b81811015610faf57835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101610f7d565b50909695505050505050565b60008083601f840112610fcd57600080fd5b50813567ffffffffffffffff811115610fe557600080fd5b6020830191508360208260051b850101111561100057600080fd5b9250929050565b6000806000806040858703121561101d57600080fd5b843567ffffffffffffffff8082111561103557600080fd5b61104188838901610fbb565b9096509450602087013591508082111561105a57600080fd5b5061106787828801610fbb565b95989497509550505050565b6000806020838503121561108657600080fd5b823567ffffffffffffffff81111561109d57600080fd5b6110a985828601610fbb565b90969095509350505050565b6000602082840312156110c757600080fd5b6110d082610e49565b9392505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261118857600080fd5b83018035915067ffffffffffffffff8211156111a357600080fd5b60200191503681900382131561100057600080fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611210577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b6040808252810183905260008460608301825b868110156112655773ffffffffffffffffffffffffffffffffffffffff61125084610e49565b1682526020928301929091019060010161122a565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b818382376000910190815291905056fea164736f6c6343000813000a", +} + +var AuthorizedForwarderABI = AuthorizedForwarderMetaData.ABI + +var AuthorizedForwarderBin = AuthorizedForwarderMetaData.Bin + +func DeployAuthorizedForwarder(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, owner common.Address, recipient common.Address, message []byte) (common.Address, *types.Transaction, *AuthorizedForwarder, error) { + parsed, err := AuthorizedForwarderMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AuthorizedForwarderBin), backend, link, owner, recipient, message) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AuthorizedForwarder{address: address, abi: *parsed, AuthorizedForwarderCaller: AuthorizedForwarderCaller{contract: contract}, AuthorizedForwarderTransactor: AuthorizedForwarderTransactor{contract: contract}, AuthorizedForwarderFilterer: AuthorizedForwarderFilterer{contract: contract}}, nil +} + +type AuthorizedForwarder struct { + address common.Address + abi abi.ABI + AuthorizedForwarderCaller + AuthorizedForwarderTransactor + AuthorizedForwarderFilterer +} + +type AuthorizedForwarderCaller struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderTransactor struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderFilterer struct { + contract *bind.BoundContract +} + +type AuthorizedForwarderSession struct { + Contract *AuthorizedForwarder + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AuthorizedForwarderCallerSession struct { + Contract *AuthorizedForwarderCaller + CallOpts bind.CallOpts +} + +type AuthorizedForwarderTransactorSession struct { + Contract *AuthorizedForwarderTransactor + TransactOpts bind.TransactOpts +} + +type AuthorizedForwarderRaw struct { + Contract *AuthorizedForwarder +} + +type AuthorizedForwarderCallerRaw struct { + Contract *AuthorizedForwarderCaller +} + +type AuthorizedForwarderTransactorRaw struct { + Contract *AuthorizedForwarderTransactor +} + +func NewAuthorizedForwarder(address common.Address, backend bind.ContractBackend) (*AuthorizedForwarder, error) { + abi, err := abi.JSON(strings.NewReader(AuthorizedForwarderABI)) + if err != nil { + return nil, err + } + contract, err := bindAuthorizedForwarder(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AuthorizedForwarder{address: address, abi: abi, AuthorizedForwarderCaller: AuthorizedForwarderCaller{contract: contract}, AuthorizedForwarderTransactor: AuthorizedForwarderTransactor{contract: contract}, AuthorizedForwarderFilterer: AuthorizedForwarderFilterer{contract: contract}}, nil +} + +func NewAuthorizedForwarderCaller(address common.Address, caller bind.ContractCaller) (*AuthorizedForwarderCaller, error) { + contract, err := bindAuthorizedForwarder(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AuthorizedForwarderCaller{contract: contract}, nil +} + +func NewAuthorizedForwarderTransactor(address common.Address, transactor bind.ContractTransactor) (*AuthorizedForwarderTransactor, error) { + contract, err := bindAuthorizedForwarder(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AuthorizedForwarderTransactor{contract: contract}, nil +} + +func NewAuthorizedForwarderFilterer(address common.Address, filterer bind.ContractFilterer) (*AuthorizedForwarderFilterer, error) { + contract, err := bindAuthorizedForwarder(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AuthorizedForwarderFilterer{contract: contract}, nil +} + +func bindAuthorizedForwarder(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AuthorizedForwarderMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedForwarder.Contract.AuthorizedForwarderCaller.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AuthorizedForwarderTransactor.contract.Transfer(opts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AuthorizedForwarderTransactor.contract.Transact(opts, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedForwarder.Contract.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.contract.Transfer(opts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.contract.Transact(opts, method, params...) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedForwarder.Contract.GetAuthorizedSenders(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedForwarder.Contract.GetAuthorizedSenders(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedForwarder.Contract.IsAuthorizedSender(&_AuthorizedForwarder.CallOpts, sender) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedForwarder.Contract.IsAuthorizedSender(&_AuthorizedForwarder.CallOpts, sender) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) LinkToken() (common.Address, error) { + return _AuthorizedForwarder.Contract.LinkToken(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) LinkToken() (common.Address, error) { + return _AuthorizedForwarder.Contract.LinkToken(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) Owner() (common.Address, error) { + return _AuthorizedForwarder.Contract.Owner(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) Owner() (common.Address, error) { + return _AuthorizedForwarder.Contract.Owner(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AuthorizedForwarder.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TypeAndVersion() (string, error) { + return _AuthorizedForwarder.Contract.TypeAndVersion(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderCallerSession) TypeAndVersion() (string, error) { + return _AuthorizedForwarder.Contract.TypeAndVersion(&_AuthorizedForwarder.CallOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "acceptOwnership") +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) AcceptOwnership() (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AcceptOwnership(&_AuthorizedForwarder.TransactOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.AcceptOwnership(&_AuthorizedForwarder.TransactOpts) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) Forward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "forward", to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) Forward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.Forward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) Forward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.Forward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) MultiForward(opts *bind.TransactOpts, tos []common.Address, datas [][]byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "multiForward", tos, datas) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) MultiForward(tos []common.Address, datas [][]byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.MultiForward(&_AuthorizedForwarder.TransactOpts, tos, datas) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) MultiForward(tos []common.Address, datas [][]byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.MultiForward(&_AuthorizedForwarder.TransactOpts, tos, datas) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "ownerForward", to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.OwnerForward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.OwnerForward(&_AuthorizedForwarder.TransactOpts, to, data) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.SetAuthorizedSenders(&_AuthorizedForwarder.TransactOpts, senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.SetAuthorizedSenders(&_AuthorizedForwarder.TransactOpts, senders) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "transferOwnership", to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnership(&_AuthorizedForwarder.TransactOpts, to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnership(&_AuthorizedForwarder.TransactOpts, to) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactor) TransferOwnershipWithMessage(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.contract.Transact(opts, "transferOwnershipWithMessage", to, message) +} + +func (_AuthorizedForwarder *AuthorizedForwarderSession) TransferOwnershipWithMessage(to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnershipWithMessage(&_AuthorizedForwarder.TransactOpts, to, message) +} + +func (_AuthorizedForwarder *AuthorizedForwarderTransactorSession) TransferOwnershipWithMessage(to common.Address, message []byte) (*types.Transaction, error) { + return _AuthorizedForwarder.Contract.TransferOwnershipWithMessage(&_AuthorizedForwarder.TransactOpts, to, message) +} + +type AuthorizedForwarderAuthorizedSendersChangedIterator struct { + Event *AuthorizedForwarderAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedForwarderAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &AuthorizedForwarderAuthorizedSendersChangedIterator{contract: _AuthorizedForwarder.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderAuthorizedSendersChanged) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedForwarderAuthorizedSendersChanged, error) { + event := new(AuthorizedForwarderAuthorizedSendersChanged) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferRequestedIterator struct { + Event *AuthorizedForwarderOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferRequestedIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferRequested) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferRequested(log types.Log) (*AuthorizedForwarderOwnershipTransferRequested, error) { + event := new(AuthorizedForwarderOwnershipTransferRequested) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator struct { + Event *AuthorizedForwarderOwnershipTransferRequestedWithMessage + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferRequestedWithMessage struct { + From common.Address + To common.Address + Message []byte + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferRequestedWithMessage(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferRequestedWithMessage", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferRequestedWithMessage", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferRequestedWithMessage(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequestedWithMessage, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferRequestedWithMessage", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequestedWithMessage", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferRequestedWithMessage(log types.Log) (*AuthorizedForwarderOwnershipTransferRequestedWithMessage, error) { + event := new(AuthorizedForwarderOwnershipTransferRequestedWithMessage) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferRequestedWithMessage", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AuthorizedForwarderOwnershipTransferredIterator struct { + Event *AuthorizedForwarderOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AuthorizedForwarderOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedForwarderOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AuthorizedForwarderOwnershipTransferredIterator{contract: _AuthorizedForwarder.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AuthorizedForwarder.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedForwarderOwnershipTransferred) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedForwarder *AuthorizedForwarderFilterer) ParseOwnershipTransferred(log types.Log) (*AuthorizedForwarderOwnershipTransferred, error) { + event := new(AuthorizedForwarderOwnershipTransferred) + if err := _AuthorizedForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AuthorizedForwarder *AuthorizedForwarder) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AuthorizedForwarder.abi.Events["AuthorizedSendersChanged"].ID: + return _AuthorizedForwarder.ParseAuthorizedSendersChanged(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferRequested"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferRequested(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferRequestedWithMessage"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferRequestedWithMessage(log) + case _AuthorizedForwarder.abi.Events["OwnershipTransferred"].ID: + return _AuthorizedForwarder.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AuthorizedForwarderAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (AuthorizedForwarderOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AuthorizedForwarderOwnershipTransferRequestedWithMessage) Topic() common.Hash { + return common.HexToHash("0x4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e") +} + +func (AuthorizedForwarderOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_AuthorizedForwarder *AuthorizedForwarder) Address() common.Address { + return _AuthorizedForwarder.address +} + +type AuthorizedForwarderInterface interface { + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Forward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) + + MultiForward(opts *bind.TransactOpts, tos []common.Address, datas [][]byte) (*types.Transaction, error) + + OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferOwnershipWithMessage(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedForwarderAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedForwarderAuthorizedSendersChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AuthorizedForwarderOwnershipTransferRequested, error) + + FilterOwnershipTransferRequestedWithMessage(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferRequestedWithMessageIterator, error) + + WatchOwnershipTransferRequestedWithMessage(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferRequestedWithMessage, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequestedWithMessage(log types.Log) (*AuthorizedForwarderOwnershipTransferRequestedWithMessage, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AuthorizedForwarderOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AuthorizedForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AuthorizedForwarderOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/authorized_receiver/authorized_receiver.go b/core/gethwrappers/generated/authorized_receiver/authorized_receiver.go new file mode 100644 index 00000000..c2ce30fc --- /dev/null +++ b/core/gethwrappers/generated/authorized_receiver/authorized_receiver.go @@ -0,0 +1,363 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package authorized_receiver + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AuthorizedReceiverMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var AuthorizedReceiverABI = AuthorizedReceiverMetaData.ABI + +type AuthorizedReceiver struct { + address common.Address + abi abi.ABI + AuthorizedReceiverCaller + AuthorizedReceiverTransactor + AuthorizedReceiverFilterer +} + +type AuthorizedReceiverCaller struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverTransactor struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverFilterer struct { + contract *bind.BoundContract +} + +type AuthorizedReceiverSession struct { + Contract *AuthorizedReceiver + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AuthorizedReceiverCallerSession struct { + Contract *AuthorizedReceiverCaller + CallOpts bind.CallOpts +} + +type AuthorizedReceiverTransactorSession struct { + Contract *AuthorizedReceiverTransactor + TransactOpts bind.TransactOpts +} + +type AuthorizedReceiverRaw struct { + Contract *AuthorizedReceiver +} + +type AuthorizedReceiverCallerRaw struct { + Contract *AuthorizedReceiverCaller +} + +type AuthorizedReceiverTransactorRaw struct { + Contract *AuthorizedReceiverTransactor +} + +func NewAuthorizedReceiver(address common.Address, backend bind.ContractBackend) (*AuthorizedReceiver, error) { + abi, err := abi.JSON(strings.NewReader(AuthorizedReceiverABI)) + if err != nil { + return nil, err + } + contract, err := bindAuthorizedReceiver(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AuthorizedReceiver{address: address, abi: abi, AuthorizedReceiverCaller: AuthorizedReceiverCaller{contract: contract}, AuthorizedReceiverTransactor: AuthorizedReceiverTransactor{contract: contract}, AuthorizedReceiverFilterer: AuthorizedReceiverFilterer{contract: contract}}, nil +} + +func NewAuthorizedReceiverCaller(address common.Address, caller bind.ContractCaller) (*AuthorizedReceiverCaller, error) { + contract, err := bindAuthorizedReceiver(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AuthorizedReceiverCaller{contract: contract}, nil +} + +func NewAuthorizedReceiverTransactor(address common.Address, transactor bind.ContractTransactor) (*AuthorizedReceiverTransactor, error) { + contract, err := bindAuthorizedReceiver(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AuthorizedReceiverTransactor{contract: contract}, nil +} + +func NewAuthorizedReceiverFilterer(address common.Address, filterer bind.ContractFilterer) (*AuthorizedReceiverFilterer, error) { + contract, err := bindAuthorizedReceiver(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AuthorizedReceiverFilterer{contract: contract}, nil +} + +func bindAuthorizedReceiver(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AuthorizedReceiverMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedReceiver.Contract.AuthorizedReceiverCaller.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.AuthorizedReceiverTransactor.contract.Transfer(opts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.AuthorizedReceiverTransactor.contract.Transact(opts, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AuthorizedReceiver.Contract.contract.Call(opts, result, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.contract.Transfer(opts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.contract.Transact(opts, method, params...) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _AuthorizedReceiver.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedReceiver.Contract.GetAuthorizedSenders(&_AuthorizedReceiver.CallOpts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _AuthorizedReceiver.Contract.GetAuthorizedSenders(&_AuthorizedReceiver.CallOpts) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _AuthorizedReceiver.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedReceiver.Contract.IsAuthorizedSender(&_AuthorizedReceiver.CallOpts, sender) +} + +func (_AuthorizedReceiver *AuthorizedReceiverCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _AuthorizedReceiver.Contract.IsAuthorizedSender(&_AuthorizedReceiver.CallOpts, sender) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_AuthorizedReceiver *AuthorizedReceiverSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.SetAuthorizedSenders(&_AuthorizedReceiver.TransactOpts, senders) +} + +func (_AuthorizedReceiver *AuthorizedReceiverTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _AuthorizedReceiver.Contract.SetAuthorizedSenders(&_AuthorizedReceiver.TransactOpts, senders) +} + +type AuthorizedReceiverAuthorizedSendersChangedIterator struct { + Event *AuthorizedReceiverAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AuthorizedReceiverAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AuthorizedReceiverAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *AuthorizedReceiverAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AuthorizedReceiverAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedReceiverAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _AuthorizedReceiver.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &AuthorizedReceiverAuthorizedSendersChangedIterator{contract: _AuthorizedReceiver.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedReceiverAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _AuthorizedReceiver.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AuthorizedReceiverAuthorizedSendersChanged) + if err := _AuthorizedReceiver.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AuthorizedReceiver *AuthorizedReceiverFilterer) ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedReceiverAuthorizedSendersChanged, error) { + event := new(AuthorizedReceiverAuthorizedSendersChanged) + if err := _AuthorizedReceiver.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AuthorizedReceiver *AuthorizedReceiver) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AuthorizedReceiver.abi.Events["AuthorizedSendersChanged"].ID: + return _AuthorizedReceiver.ParseAuthorizedSendersChanged(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AuthorizedReceiverAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (_AuthorizedReceiver *AuthorizedReceiver) Address() common.Address { + return _AuthorizedReceiver.address +} + +type AuthorizedReceiverInterface interface { + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*AuthorizedReceiverAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *AuthorizedReceiverAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*AuthorizedReceiverAuthorizedSendersChanged, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_consumer_benchmark/automation_consumer_benchmark.go b/core/gethwrappers/generated/automation_consumer_benchmark/automation_consumer_benchmark.go new file mode 100644 index 00000000..72c65594 --- /dev/null +++ b/core/gethwrappers/generated/automation_consumer_benchmark/automation_consumer_benchmark.go @@ -0,0 +1,520 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_consumer_benchmark + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AutomationConsumerBenchmarkMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialCall\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nextEligible\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"range\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"firstEligibleBuffer\",\"type\":\"uint256\"}],\"name\":\"checkEligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"count\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getCountPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"initialCall\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"nextEligible\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50436004556106ed806100246000396000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80637145f11b11610076578063d826f88f1161005b578063d826f88f14610177578063e81018b314610180578063f597b393146101a057600080fd5b80637145f11b14610134578063a6fe0a1e1461015757600080fd5b80633b3546c8146100a85780634585e33b146100db57806351dcee4b146100f05780636e04ff0d14610113575b600080fd5b6100c86100b63660046104dd565b60036020526000908152604090205481565b6040519081526020015b60405180910390f35b6100ee6100e93660046104f6565b6101c0565b005b6101036100fe366004610568565b610341565b60405190151581526020016100d2565b6101266101213660046104f6565b610356565b6040516100d2929190610594565b6101036101423660046104dd565b60026020526000908152604090205460ff1681565b6100c86101653660046104dd565b60016020526000908152604090205481565b6100ee43600455565b6100c861018e3660046104dd565b60009081526003602052604090205490565b6100c86101ae3660046104dd565b60006020819052908152604090205481565b600080808080806101d38789018961060a565b9550955095509550955095506101ea868583610477565b6101f357600080fd5b60005a6000888152602081905260408120549192500361021f5760008781526020819052604090204390555b610229864361067c565b6000888152600160209081526040808320939093556003905290812080549161025183610695565b90915550506000878152602081815260408083205460018352928190205481518b815232938101939093529082019290925260608101919091524360808201527f39223708d1655effd0be3f9a99a7e1d1aadd9fb456f0bfc4c2a4f50b2484a3679060a00160405180910390a160006102cb6001436106cd565b40905060005b845a6102dd90856106cd565b1015610334578080156102fe575060008281526002602052604090205460ff165b604080516020810185905230918101919091529091506060016040516020818303038152906040528051906020012091506102d1565b5050505050505050505050565b600061034e848484610477565b949350505050565b6000606081808080808061036c898b018b61060a565b95509550955095509550955060005a9050600061038a6001436106cd565b409050600080861180156103a457506103a4898886610477565b1561040d575b855a6103b690856106cd565b101561040d578080156103d7575060008281526002602052604090205460ff165b604080516020810185905230918101919091529091506060016040516020818303038152906040528051906020012091506103aa565b610418898886610477565b8d8d81818080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050905090509a509a505050505050505050509250929050565b600083815260208190526040812054156104c55760008481526020819052604090205483906104a690436106cd565b1080156104c0575060008481526001602052604090205443115b61034e565b6004546104d2908361067c565b431015949350505050565b6000602082840312156104ef57600080fd5b5035919050565b6000806020838503121561050957600080fd5b823567ffffffffffffffff8082111561052157600080fd5b818501915085601f83011261053557600080fd5b81358181111561054457600080fd5b86602082850101111561055657600080fd5b60209290920196919550909350505050565b60008060006060848603121561057d57600080fd5b505081359360208301359350604090920135919050565b821515815260006020604081840152835180604085015260005b818110156105ca578581018301518582016060015282016105ae565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b60008060008060008060c0878903121561062357600080fd5b505084359660208601359650604086013595606081013595506080810135945060a0013592509050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082018082111561068f5761068f61064d565b92915050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036106c6576106c661064d565b5060010190565b8181038181111561068f5761068f61064d56fea164736f6c6343000810000a", +} + +var AutomationConsumerBenchmarkABI = AutomationConsumerBenchmarkMetaData.ABI + +var AutomationConsumerBenchmarkBin = AutomationConsumerBenchmarkMetaData.Bin + +func DeployAutomationConsumerBenchmark(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *AutomationConsumerBenchmark, error) { + parsed, err := AutomationConsumerBenchmarkMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationConsumerBenchmarkBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationConsumerBenchmark{address: address, abi: *parsed, AutomationConsumerBenchmarkCaller: AutomationConsumerBenchmarkCaller{contract: contract}, AutomationConsumerBenchmarkTransactor: AutomationConsumerBenchmarkTransactor{contract: contract}, AutomationConsumerBenchmarkFilterer: AutomationConsumerBenchmarkFilterer{contract: contract}}, nil +} + +type AutomationConsumerBenchmark struct { + address common.Address + abi abi.ABI + AutomationConsumerBenchmarkCaller + AutomationConsumerBenchmarkTransactor + AutomationConsumerBenchmarkFilterer +} + +type AutomationConsumerBenchmarkCaller struct { + contract *bind.BoundContract +} + +type AutomationConsumerBenchmarkTransactor struct { + contract *bind.BoundContract +} + +type AutomationConsumerBenchmarkFilterer struct { + contract *bind.BoundContract +} + +type AutomationConsumerBenchmarkSession struct { + Contract *AutomationConsumerBenchmark + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationConsumerBenchmarkCallerSession struct { + Contract *AutomationConsumerBenchmarkCaller + CallOpts bind.CallOpts +} + +type AutomationConsumerBenchmarkTransactorSession struct { + Contract *AutomationConsumerBenchmarkTransactor + TransactOpts bind.TransactOpts +} + +type AutomationConsumerBenchmarkRaw struct { + Contract *AutomationConsumerBenchmark +} + +type AutomationConsumerBenchmarkCallerRaw struct { + Contract *AutomationConsumerBenchmarkCaller +} + +type AutomationConsumerBenchmarkTransactorRaw struct { + Contract *AutomationConsumerBenchmarkTransactor +} + +func NewAutomationConsumerBenchmark(address common.Address, backend bind.ContractBackend) (*AutomationConsumerBenchmark, error) { + abi, err := abi.JSON(strings.NewReader(AutomationConsumerBenchmarkABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationConsumerBenchmark(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationConsumerBenchmark{address: address, abi: abi, AutomationConsumerBenchmarkCaller: AutomationConsumerBenchmarkCaller{contract: contract}, AutomationConsumerBenchmarkTransactor: AutomationConsumerBenchmarkTransactor{contract: contract}, AutomationConsumerBenchmarkFilterer: AutomationConsumerBenchmarkFilterer{contract: contract}}, nil +} + +func NewAutomationConsumerBenchmarkCaller(address common.Address, caller bind.ContractCaller) (*AutomationConsumerBenchmarkCaller, error) { + contract, err := bindAutomationConsumerBenchmark(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationConsumerBenchmarkCaller{contract: contract}, nil +} + +func NewAutomationConsumerBenchmarkTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationConsumerBenchmarkTransactor, error) { + contract, err := bindAutomationConsumerBenchmark(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationConsumerBenchmarkTransactor{contract: contract}, nil +} + +func NewAutomationConsumerBenchmarkFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationConsumerBenchmarkFilterer, error) { + contract, err := bindAutomationConsumerBenchmark(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationConsumerBenchmarkFilterer{contract: contract}, nil +} + +func bindAutomationConsumerBenchmark(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationConsumerBenchmarkMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationConsumerBenchmark.Contract.AutomationConsumerBenchmarkCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.AutomationConsumerBenchmarkTransactor.contract.Transfer(opts) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.AutomationConsumerBenchmarkTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationConsumerBenchmark.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.contract.Transfer(opts) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) CheckEligible(opts *bind.CallOpts, id *big.Int, arg1 *big.Int, firstEligibleBuffer *big.Int) (bool, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "checkEligible", id, arg1, firstEligibleBuffer) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) CheckEligible(id *big.Int, arg1 *big.Int, firstEligibleBuffer *big.Int) (bool, error) { + return _AutomationConsumerBenchmark.Contract.CheckEligible(&_AutomationConsumerBenchmark.CallOpts, id, arg1, firstEligibleBuffer) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) CheckEligible(id *big.Int, arg1 *big.Int, firstEligibleBuffer *big.Int) (bool, error) { + return _AutomationConsumerBenchmark.Contract.CheckEligible(&_AutomationConsumerBenchmark.CallOpts, id, arg1, firstEligibleBuffer) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) CheckUpkeep(opts *bind.CallOpts, checkData []byte) (bool, []byte, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "checkUpkeep", checkData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) CheckUpkeep(checkData []byte) (bool, []byte, error) { + return _AutomationConsumerBenchmark.Contract.CheckUpkeep(&_AutomationConsumerBenchmark.CallOpts, checkData) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) CheckUpkeep(checkData []byte) (bool, []byte, error) { + return _AutomationConsumerBenchmark.Contract.CheckUpkeep(&_AutomationConsumerBenchmark.CallOpts, checkData) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) Count(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "count", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) Count(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.Count(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) Count(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.Count(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) DummyMap(arg0 [32]byte) (bool, error) { + return _AutomationConsumerBenchmark.Contract.DummyMap(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _AutomationConsumerBenchmark.Contract.DummyMap(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) GetCountPerforms(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "getCountPerforms", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) GetCountPerforms(id *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.GetCountPerforms(&_AutomationConsumerBenchmark.CallOpts, id) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) GetCountPerforms(id *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.GetCountPerforms(&_AutomationConsumerBenchmark.CallOpts, id) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) InitialCall(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "initialCall", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) InitialCall(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.InitialCall(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) InitialCall(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.InitialCall(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCaller) NextEligible(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationConsumerBenchmark.contract.Call(opts, &out, "nextEligible", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) NextEligible(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.NextEligible(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkCallerSession) NextEligible(arg0 *big.Int) (*big.Int, error) { + return _AutomationConsumerBenchmark.Contract.NextEligible(&_AutomationConsumerBenchmark.CallOpts, arg0) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.contract.Transact(opts, "performUpkeep", performData) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.PerformUpkeep(&_AutomationConsumerBenchmark.TransactOpts, performData) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.PerformUpkeep(&_AutomationConsumerBenchmark.TransactOpts, performData) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationConsumerBenchmark.contract.Transact(opts, "reset") +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkSession) Reset() (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.Reset(&_AutomationConsumerBenchmark.TransactOpts) +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkTransactorSession) Reset() (*types.Transaction, error) { + return _AutomationConsumerBenchmark.Contract.Reset(&_AutomationConsumerBenchmark.TransactOpts) +} + +type AutomationConsumerBenchmarkPerformingUpkeepIterator struct { + Event *AutomationConsumerBenchmarkPerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationConsumerBenchmarkPerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationConsumerBenchmarkPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationConsumerBenchmarkPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationConsumerBenchmarkPerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *AutomationConsumerBenchmarkPerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationConsumerBenchmarkPerformingUpkeep struct { + Id *big.Int + From common.Address + InitialCall *big.Int + NextEligible *big.Int + BlockNumber *big.Int + Raw types.Log +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts) (*AutomationConsumerBenchmarkPerformingUpkeepIterator, error) { + + logs, sub, err := _AutomationConsumerBenchmark.contract.FilterLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return &AutomationConsumerBenchmarkPerformingUpkeepIterator{contract: _AutomationConsumerBenchmark.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *AutomationConsumerBenchmarkPerformingUpkeep) (event.Subscription, error) { + + logs, sub, err := _AutomationConsumerBenchmark.contract.WatchLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationConsumerBenchmarkPerformingUpkeep) + if err := _AutomationConsumerBenchmark.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmarkFilterer) ParsePerformingUpkeep(log types.Log) (*AutomationConsumerBenchmarkPerformingUpkeep, error) { + event := new(AutomationConsumerBenchmarkPerformingUpkeep) + if err := _AutomationConsumerBenchmark.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmark) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationConsumerBenchmark.abi.Events["PerformingUpkeep"].ID: + return _AutomationConsumerBenchmark.ParsePerformingUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationConsumerBenchmarkPerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0x39223708d1655effd0be3f9a99a7e1d1aadd9fb456f0bfc4c2a4f50b2484a367") +} + +func (_AutomationConsumerBenchmark *AutomationConsumerBenchmark) Address() common.Address { + return _AutomationConsumerBenchmark.address +} + +type AutomationConsumerBenchmarkInterface interface { + CheckEligible(opts *bind.CallOpts, id *big.Int, arg1 *big.Int, firstEligibleBuffer *big.Int) (bool, error) + + CheckUpkeep(opts *bind.CallOpts, checkData []byte) (bool, []byte, error) + + Count(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + GetCountPerforms(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + InitialCall(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + NextEligible(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts) (*AutomationConsumerBenchmarkPerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *AutomationConsumerBenchmarkPerformingUpkeep) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*AutomationConsumerBenchmarkPerformingUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_forwarder_logic/automation_forwarder_logic.go b/core/gethwrappers/generated/automation_forwarder_logic/automation_forwarder_logic.go new file mode 100644 index 00000000..8b35a68c --- /dev/null +++ b/core/gethwrappers/generated/automation_forwarder_logic/automation_forwarder_logic.go @@ -0,0 +1,240 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_forwarder_logic + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AutomationForwarderLogicMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"getRegistry\",\"outputs\":[{\"internalType\":\"contractIAutomationRegistryConsumer\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newRegistry\",\"type\":\"address\"}],\"name\":\"updateRegistry\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506101f6806100206000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063181f5a77146100465780631a5da6c8146100985780635ab1bd53146100ad575b600080fd5b6100826040518060400160405280601981526020017f4175746f6d6174696f6e466f7277617264657220312e302e300000000000000081525081565b60405161008f9190610140565b60405180910390f35b6100ab6100a63660046101ac565b6100d5565b005b60005460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161008f565b60005473ffffffffffffffffffffffffffffffffffffffff1633146100f957600080fd5b600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b600060208083528351808285015260005b8181101561016d57858101830151858201604001528201610151565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b6000602082840312156101be57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff811681146101e257600080fd5b939250505056fea164736f6c6343000810000a", +} + +var AutomationForwarderLogicABI = AutomationForwarderLogicMetaData.ABI + +var AutomationForwarderLogicBin = AutomationForwarderLogicMetaData.Bin + +func DeployAutomationForwarderLogic(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *AutomationForwarderLogic, error) { + parsed, err := AutomationForwarderLogicMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationForwarderLogicBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationForwarderLogic{address: address, abi: *parsed, AutomationForwarderLogicCaller: AutomationForwarderLogicCaller{contract: contract}, AutomationForwarderLogicTransactor: AutomationForwarderLogicTransactor{contract: contract}, AutomationForwarderLogicFilterer: AutomationForwarderLogicFilterer{contract: contract}}, nil +} + +type AutomationForwarderLogic struct { + address common.Address + abi abi.ABI + AutomationForwarderLogicCaller + AutomationForwarderLogicTransactor + AutomationForwarderLogicFilterer +} + +type AutomationForwarderLogicCaller struct { + contract *bind.BoundContract +} + +type AutomationForwarderLogicTransactor struct { + contract *bind.BoundContract +} + +type AutomationForwarderLogicFilterer struct { + contract *bind.BoundContract +} + +type AutomationForwarderLogicSession struct { + Contract *AutomationForwarderLogic + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationForwarderLogicCallerSession struct { + Contract *AutomationForwarderLogicCaller + CallOpts bind.CallOpts +} + +type AutomationForwarderLogicTransactorSession struct { + Contract *AutomationForwarderLogicTransactor + TransactOpts bind.TransactOpts +} + +type AutomationForwarderLogicRaw struct { + Contract *AutomationForwarderLogic +} + +type AutomationForwarderLogicCallerRaw struct { + Contract *AutomationForwarderLogicCaller +} + +type AutomationForwarderLogicTransactorRaw struct { + Contract *AutomationForwarderLogicTransactor +} + +func NewAutomationForwarderLogic(address common.Address, backend bind.ContractBackend) (*AutomationForwarderLogic, error) { + abi, err := abi.JSON(strings.NewReader(AutomationForwarderLogicABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationForwarderLogic(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationForwarderLogic{address: address, abi: abi, AutomationForwarderLogicCaller: AutomationForwarderLogicCaller{contract: contract}, AutomationForwarderLogicTransactor: AutomationForwarderLogicTransactor{contract: contract}, AutomationForwarderLogicFilterer: AutomationForwarderLogicFilterer{contract: contract}}, nil +} + +func NewAutomationForwarderLogicCaller(address common.Address, caller bind.ContractCaller) (*AutomationForwarderLogicCaller, error) { + contract, err := bindAutomationForwarderLogic(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationForwarderLogicCaller{contract: contract}, nil +} + +func NewAutomationForwarderLogicTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationForwarderLogicTransactor, error) { + contract, err := bindAutomationForwarderLogic(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationForwarderLogicTransactor{contract: contract}, nil +} + +func NewAutomationForwarderLogicFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationForwarderLogicFilterer, error) { + contract, err := bindAutomationForwarderLogic(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationForwarderLogicFilterer{contract: contract}, nil +} + +func bindAutomationForwarderLogic(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationForwarderLogicMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationForwarderLogic.Contract.AutomationForwarderLogicCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.AutomationForwarderLogicTransactor.contract.Transfer(opts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.AutomationForwarderLogicTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationForwarderLogic.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.contract.Transfer(opts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicCaller) GetRegistry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationForwarderLogic.contract.Call(opts, &out, "getRegistry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicSession) GetRegistry() (common.Address, error) { + return _AutomationForwarderLogic.Contract.GetRegistry(&_AutomationForwarderLogic.CallOpts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicCallerSession) GetRegistry() (common.Address, error) { + return _AutomationForwarderLogic.Contract.GetRegistry(&_AutomationForwarderLogic.CallOpts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AutomationForwarderLogic.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicSession) TypeAndVersion() (string, error) { + return _AutomationForwarderLogic.Contract.TypeAndVersion(&_AutomationForwarderLogic.CallOpts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicCallerSession) TypeAndVersion() (string, error) { + return _AutomationForwarderLogic.Contract.TypeAndVersion(&_AutomationForwarderLogic.CallOpts) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicTransactor) UpdateRegistry(opts *bind.TransactOpts, newRegistry common.Address) (*types.Transaction, error) { + return _AutomationForwarderLogic.contract.Transact(opts, "updateRegistry", newRegistry) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicSession) UpdateRegistry(newRegistry common.Address) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.UpdateRegistry(&_AutomationForwarderLogic.TransactOpts, newRegistry) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogicTransactorSession) UpdateRegistry(newRegistry common.Address) (*types.Transaction, error) { + return _AutomationForwarderLogic.Contract.UpdateRegistry(&_AutomationForwarderLogic.TransactOpts, newRegistry) +} + +func (_AutomationForwarderLogic *AutomationForwarderLogic) Address() common.Address { + return _AutomationForwarderLogic.address +} + +type AutomationForwarderLogicInterface interface { + GetRegistry(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpdateRegistry(opts *bind.TransactOpts, newRegistry common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_registrar_wrapper2_1/automation_registrar_wrapper2_1.go b/core/gethwrappers/generated/automation_registrar_wrapper2_1/automation_registrar_wrapper2_1.go new file mode 100644 index 00000000..da2c8fd9 --- /dev/null +++ b/core/gethwrappers/generated/automation_registrar_wrapper2_1/automation_registrar_wrapper2_1.go @@ -0,0 +1,1685 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_registrar_wrapper2_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistrar21InitialTriggerConfig struct { + TriggerType uint8 + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 +} + +type AutomationRegistrar21RegistrationParams struct { + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + TriggerType uint8 + CheckData []byte + TriggerConfig []byte + OffchainConfig []byte + Amount *big.Int +} + +type AutomationRegistrar21TriggerRegistrationStorage struct { + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 + ApprovedCount uint32 +} + +var AutomationRegistrarMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"PLIAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"},{\"components\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"enumAutomationRegistrar2_1.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"internalType\":\"structAutomationRegistrar2_1.InitialTriggerConfig[]\",\"name\":\"triggerConfigs\",\"type\":\"tuple[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AmountMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FunctionNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"HashMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientPayment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAdminAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"LinkTransferFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyAdminOrOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistrationRequestFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"AutoApproveAllowedSenderSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"ConfigChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"RegistrationApproved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"RegistrationRejected\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"RegistrationRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"enumAutomationRegistrar2_1.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"name\":\"TriggerConfigSet\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"approve\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"cancel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"}],\"name\":\"getAutoApproveAllowedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"minPLIJuels\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"getPendingRequest\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"}],\"name\":\"getTriggerRegistrationDetails\",\"outputs\":[{\"components\":[{\"internalType\":\"enumAutomationRegistrar2_1.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"approvedCount\",\"type\":\"uint32\"}],\"internalType\":\"structAutomationRegistrar2_1.TriggerRegistrationStorage\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"internalType\":\"structAutomationRegistrar2_1.RegistrationParams\",\"name\":\"requestParams\",\"type\":\"tuple\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"setAutoApproveAllowedSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"enumAutomationRegistrar2_1.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"name\":\"setTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162002d8238038062002d8283398101604081905262000034916200043b565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be816200017a565b5050506001600160a01b038416608052620000da838362000225565b60005b81518110156200016f576200015a82828151811062000100576200010062000598565b60200260200101516000015183838151811062000121576200012162000598565b60200260200101516020015184848151811062000142576200014262000598565b6020026020010151604001516200029e60201b60201c565b806200016681620005ae565b915050620000dd565b50505050506200062f565b336001600160a01b03821603620001d45760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200022f6200034c565b6040805180820182526001600160a01b0384168082526001600160601b0384166020928301819052600160a01b810282176004558351918252918101919091527f39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a910160405180910390a15050565b620002a86200034c565b60ff83166000908152600360205260409020805483919060ff19166001836002811115620002da57620002da620005d6565b021790555060ff831660009081526003602052604090819020805464ffffffff00191661010063ffffffff851602179055517f830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a3906200033f90859085908590620005ec565b60405180910390a1505050565b6000546001600160a01b03163314620003a85760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b80516001600160a01b0381168114620003c257600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b604051606081016001600160401b0381118282101715620004025762000402620003c7565b60405290565b604051601f8201601f191681016001600160401b0381118282101715620004335762000433620003c7565b604052919050565b600080600080608085870312156200045257600080fd5b6200045d85620003aa565b935060206200046e818701620003aa565b604087810151919550906001600160601b03811681146200048e57600080fd5b606088810151919550906001600160401b0380821115620004ae57600080fd5b818a0191508a601f830112620004c357600080fd5b815181811115620004d857620004d8620003c7565b620004e8868260051b0162000408565b818152868101925090840283018601908c8211156200050657600080fd5b928601925b81841015620005875784848e031215620005255760008081fd5b6200052f620003dd565b845160ff81168114620005425760008081fd5b81528488015160038110620005575760008081fd5b818901528487015163ffffffff81168114620005735760008081fd5b81880152835292840192918601916200050b565b999c989b5096995050505050505050565b634e487b7160e01b600052603260045260246000fd5b600060018201620005cf57634e487b7160e01b600052601160045260246000fd5b5060010190565b634e487b7160e01b600052602160045260246000fd5b60ff8416815260608101600384106200061557634e487b7160e01b600052602160045260246000fd5b83602083015263ffffffff83166040830152949350505050565b6080516127146200066e60003960008181610177015281816105d601528181610887015281816109bd01528181610f0e015261171b01526127146000f3fe608060405234801561001057600080fd5b506004361061011b5760003560e01c8063856853e6116100b2578063b5ff5b4111610081578063c4d252f511610066578063c4d252f5146103e3578063e8d4070d146103f6578063f2fde38b1461040957600080fd5b8063b5ff5b4114610369578063c3f909d41461037c57600080fd5b8063856853e61461027857806388b12d551461028b5780638da5cb5b14610338578063a4c0ed361461035657600080fd5b80633f678e11116100ee5780633f678e11146101f35780636c4cdfc31461021457806379ba5097146102275780637e776f7f1461022f57600080fd5b8063181f5a77146101205780631b6b6d2314610172578063212d0884146101be578063367b9b4f146101de575b600080fd5b61015c6040518060400160405280601981526020017f4175746f6d6174696f6e52656769737472617220322e312e300000000000000081525081565b6040516101699190611a74565b60405180910390f35b6101997f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610169565b6101d16101cc366004611aa4565b61041c565b6040516101699190611b29565b6101f16101ec366004611b9d565b6104a9565b005b610206610201366004611bd6565b61053b565b604051908152602001610169565b6101f1610222366004611c2e565b6106d3565b6101f161076d565b61026861023d366004611c63565b73ffffffffffffffffffffffffffffffffffffffff1660009081526005602052604090205460ff1690565b6040519015158152602001610169565b6101f1610286366004611de1565b61086f565b6102ff610299366004611f40565b60009081526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff169290910182905291565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526bffffffffffffffffffffffff909116602083015201610169565b60005473ffffffffffffffffffffffffffffffffffffffff16610199565b6101f1610364366004611f59565b6109a5565b6101f1610377366004611fb5565b610ce3565b60408051808201825260045473ffffffffffffffffffffffffffffffffffffffff8116808352740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff16602092830181905283519182529181019190915201610169565b6101f16103f1366004611f40565b610dc2565b6101f1610404366004611ffe565b61104c565b6101f1610417366004611c63565b6112d9565b60408051606080820183526000808352602080840182905283850182905260ff86811683526003909152908490208451928301909452835492939192839116600281111561046c5761046c611abf565b600281111561047d5761047d611abf565b8152905463ffffffff610100820481166020840152650100000000009091041660409091015292915050565b6104b16112ed565b73ffffffffffffffffffffffffffffffffffffffff821660008181526005602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001685151590811790915591519182527f20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356910160405180910390a25050565b6004546000907401000000000000000000000000000000000000000090046bffffffffffffffffffffffff1661057961014084016101208501612109565b6bffffffffffffffffffffffff1610156105bf576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166323b872dd333061060f61014087016101208801612109565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff93841660048201529290911660248301526bffffffffffffffffffffffff1660448201526064016020604051808303816000875af1158015610696573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106ba9190612124565b506106cd6106c783612141565b33611370565b92915050565b6106db6112ed565b60408051808201825273ffffffffffffffffffffffffffffffffffffffff84168082526bffffffffffffffffffffffff8416602092830181905274010000000000000000000000000000000000000000810282176004558351918252918101919091527f39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a910160405180910390a15050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146107f3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146108de576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109966040518061014001604052808e81526020018d8d8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525073ffffffffffffffffffffffffffffffffffffffff808d16602083015263ffffffff8c1660408301528a16606082015260ff8916608082015260a0810188905260c0810187905260e081018690526bffffffffffffffffffffffff85166101009091015282611370565b50505050505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610a14576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b81818080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505060208101517fffffffff0000000000000000000000000000000000000000000000000000000081167f856853e60000000000000000000000000000000000000000000000000000000014610aca576040517fe3d6792100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8484846000610adc8260048186612276565b810190610ae991906122a0565b509950505050505050505050806bffffffffffffffffffffffff168414610b3c576040517f55e97b0d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8988886000610b4e8260048186612276565b810190610b5b91906122a0565b9a50505050505050505050508073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614610bcc576040517ff8c5638e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6004547401000000000000000000000000000000000000000090046bffffffffffffffffffffffff168d1015610c2e576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60003073ffffffffffffffffffffffffffffffffffffffff168d8d604051610c579291906123dd565b600060405180830381855af49150503d8060008114610c92576040519150601f19603f3d011682016040523d82523d6000602084013e610c97565b606091505b5050905080610cd2576040517f649bf81000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050505050565b610ceb6112ed565b60ff8316600090815260036020526040902080548391907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836002811115610d3857610d38611abf565b021790555060ff83166000908152600360205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff1661010063ffffffff851602179055517f830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a390610db5908590859085906123ed565b60405180910390a1505050565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff1691830191909152331480610e49575060005473ffffffffffffffffffffffffffffffffffffffff1633145b610e7f576040517f61685c2b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff16610ecd576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260026020908152604080832083905583519184015190517fa9059cbb0000000000000000000000000000000000000000000000000000000081527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169263a9059cbb92610f859260040173ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b6020604051808303816000875af1158015610fa4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fc89190612124565b90508061101c5781516040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911660048201526024016107ea565b60405183907f3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a2290600090a2505050565b6110546112ed565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff16918301919091526110ed576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008b8b8b8b8b8b8b8b8b60405160200161111099989796959493929190612461565b604051602081830303815290604052805190602001209050808314611161576040517f3f4d605300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60026000848152602001908152602001600020600080820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690556000820160146101000a8154906bffffffffffffffffffffffff021916905550506112c96040518061014001604052808f81526020016040518060200160405280600081525081526020018e73ffffffffffffffffffffffffffffffffffffffff1681526020018d63ffffffff1681526020018c73ffffffffffffffffffffffffffffffffffffffff1681526020018b60ff1681526020018a8a8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060208082018a905260408051601f8a0183900483028101830182528981529201919089908990819084018382808284376000920191909152505050908252506020858101516bffffffffffffffffffffffff1691015282611647565b5050505050505050505050505050565b6112e16112ed565b6112ea81611876565b50565b60005473ffffffffffffffffffffffffffffffffffffffff16331461136e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016107ea565b565b608082015160009073ffffffffffffffffffffffffffffffffffffffff166113c4576040517f05bb467c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008360400151846060015185608001518660a001518760c001518860e0015189610100015160405160200161140097969594939291906124e7565b604051602081830303815290604052805190602001209050836040015173ffffffffffffffffffffffffffffffffffffffff16817f7684390ebb103102f7f48c71439c2408713f8d437782a6fab2756acc0e42c1b786600001518760200151886060015189608001518a60a001518b60e001518c61010001518d60c001518e610120015160405161149999989796959493929190612569565b60405180910390a360a084015160ff9081166000908152600360205260408082208151606081019092528054929361151c9383911660028111156114df576114df611abf565b60028111156114f0576114f0611abf565b8152905463ffffffff61010082048116602084015265010000000000909104166040909101528561196b565b156115845760a085015160ff166000908152600360205260409020805465010000000000900463ffffffff1690600561155483612653565b91906101000a81548163ffffffff021916908363ffffffff1602179055505061157d8583611647565b905061163f565b61012085015160008381526002602052604081205490916115ca917401000000000000000000000000000000000000000090046bffffffffffffffffffffffff16612676565b604080518082018252608089015173ffffffffffffffffffffffffffffffffffffffff90811682526bffffffffffffffffffffffff9384166020808401918252600089815260029091529390932091519251909316740100000000000000000000000000000000000000000291909216179055505b949350505050565b600480546040808501516060860151608087015160a088015160c089015160e08a01516101008b015196517f28f32f3800000000000000000000000000000000000000000000000000000000815260009973ffffffffffffffffffffffffffffffffffffffff909916988a988a986328f32f38986116d29891979096919590949193909291016124e7565b6020604051808303816000875af11580156116f1573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061171591906126a2565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea0848861012001518560405160200161176f91815260200190565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161179c939291906126bb565b6020604051808303816000875af11580156117bb573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117df9190612124565b905080611830576040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841660048201526024016107ea565b81857fb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b88600001516040516118659190611a74565b60405180910390a350949350505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036118f5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016107ea565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000808351600281111561198157611981611abf565b0361198e575060006106cd565b6001835160028111156119a3576119a3611abf565b1480156119d6575073ffffffffffffffffffffffffffffffffffffffff821660009081526005602052604090205460ff16155b156119e3575060006106cd565b826020015163ffffffff16836040015163ffffffff161015611a07575060016106cd565b50600092915050565b6000815180845260005b81811015611a3657602081850181015186830182015201611a1a565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000611a876020830184611a10565b9392505050565b803560ff81168114611a9f57600080fd5b919050565b600060208284031215611ab657600080fd5b611a8782611a8e565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60038110611b25577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b6000606082019050611b3c828451611aee565b602083015163ffffffff8082166020850152806040860151166040850152505092915050565b73ffffffffffffffffffffffffffffffffffffffff811681146112ea57600080fd5b8035611a9f81611b62565b80151581146112ea57600080fd5b60008060408385031215611bb057600080fd5b8235611bbb81611b62565b91506020830135611bcb81611b8f565b809150509250929050565b600060208284031215611be857600080fd5b813567ffffffffffffffff811115611bff57600080fd5b82016101408185031215611a8757600080fd5b80356bffffffffffffffffffffffff81168114611a9f57600080fd5b60008060408385031215611c4157600080fd5b8235611c4c81611b62565b9150611c5a60208401611c12565b90509250929050565b600060208284031215611c7557600080fd5b8135611a8781611b62565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610140810167ffffffffffffffff81118282101715611cd357611cd3611c80565b60405290565b600082601f830112611cea57600080fd5b813567ffffffffffffffff80821115611d0557611d05611c80565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715611d4b57611d4b611c80565b81604052838152866020858801011115611d6457600080fd5b836020870160208301376000602085830101528094505050505092915050565b60008083601f840112611d9657600080fd5b50813567ffffffffffffffff811115611dae57600080fd5b602083019150836020828501011115611dc657600080fd5b9250929050565b803563ffffffff81168114611a9f57600080fd5b6000806000806000806000806000806000806101608d8f031215611e0457600080fd5b67ffffffffffffffff8d351115611e1a57600080fd5b611e278e8e358f01611cd9565b9b5067ffffffffffffffff60208e01351115611e4257600080fd5b611e528e60208f01358f01611d84565b909b509950611e6360408e01611b84565b9850611e7160608e01611dcd565b9750611e7f60808e01611b84565b9650611e8d60a08e01611a8e565b955067ffffffffffffffff60c08e01351115611ea857600080fd5b611eb88e60c08f01358f01611cd9565b945067ffffffffffffffff60e08e01351115611ed357600080fd5b611ee38e60e08f01358f01611cd9565b935067ffffffffffffffff6101008e01351115611eff57600080fd5b611f108e6101008f01358f01611cd9565b9250611f1f6101208e01611c12565b9150611f2e6101408e01611b84565b90509295989b509295989b509295989b565b600060208284031215611f5257600080fd5b5035919050565b60008060008060608587031215611f6f57600080fd5b8435611f7a81611b62565b935060208501359250604085013567ffffffffffffffff811115611f9d57600080fd5b611fa987828801611d84565b95989497509550505050565b600080600060608486031215611fca57600080fd5b611fd384611a8e565b9250602084013560038110611fe757600080fd5b9150611ff560408501611dcd565b90509250925092565b60008060008060008060008060008060006101208c8e03121561202057600080fd5b67ffffffffffffffff808d35111561203757600080fd5b6120448e8e358f01611cd9565b9b5061205260208e01611b84565b9a5061206060408e01611dcd565b995061206e60608e01611b84565b985061207c60808e01611a8e565b97508060a08e0135111561208f57600080fd5b61209f8e60a08f01358f01611d84565b909750955060c08d01358110156120b557600080fd5b6120c58e60c08f01358f01611cd9565b94508060e08e013511156120d857600080fd5b506120e98d60e08e01358e01611d84565b81945080935050506101008c013590509295989b509295989b9093969950565b60006020828403121561211b57600080fd5b611a8782611c12565b60006020828403121561213657600080fd5b8151611a8781611b8f565b6000610140823603121561215457600080fd5b61215c611caf565b823567ffffffffffffffff8082111561217457600080fd5b61218036838701611cd9565b8352602085013591508082111561219657600080fd5b6121a236838701611cd9565b60208401526121b360408601611b84565b60408401526121c460608601611dcd565b60608401526121d560808601611b84565b60808401526121e660a08601611a8e565b60a084015260c08501359150808211156121ff57600080fd5b61220b36838701611cd9565b60c084015260e085013591508082111561222457600080fd5b61223036838701611cd9565b60e08401526101009150818501358181111561224b57600080fd5b61225736828801611cd9565b8385015250505061012061226c818501611c12565b9082015292915050565b6000808585111561228657600080fd5b8386111561229357600080fd5b5050820193919092039150565b60008060008060008060008060008060006101608c8e0312156122c257600080fd5b67ffffffffffffffff808d3511156122d957600080fd5b6122e68e8e358f01611cd9565b9b508060208e013511156122f957600080fd5b6123098e60208f01358f01611cd9565b9a5061231760408e01611b84565b995061232560608e01611dcd565b985061233360808e01611b84565b975061234160a08e01611a8e565b96508060c08e0135111561235457600080fd5b6123648e60c08f01358f01611cd9565b95508060e08e0135111561237757600080fd5b6123878e60e08f01358f01611cd9565b9450806101008e0135111561239b57600080fd5b506123ad8d6101008e01358e01611cd9565b92506123bc6101208d01611c12565b91506123cb6101408d01611b84565b90509295989b509295989b9093969950565b8183823760009101908152919050565b60ff84168152606081016124046020830185611aee565b63ffffffff83166040830152949350505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600073ffffffffffffffffffffffffffffffffffffffff808c16835263ffffffff8b166020840152808a1660408401525060ff8816606083015260e060808301526124b060e083018789612418565b82810360a08401526124c28187611a10565b905082810360c08401526124d7818587612418565b9c9b505050505050505050505050565b600073ffffffffffffffffffffffffffffffffffffffff808a16835263ffffffff8916602084015280881660408401525060ff8616606083015260e0608083015261253560e0830186611a10565b82810360a08401526125478186611a10565b905082810360c084015261255b8185611a10565b9a9950505050505050505050565b600061012080835261257d8184018d611a10565b90508281036020840152612591818c611a10565b905063ffffffff8a16604084015273ffffffffffffffffffffffffffffffffffffffff8916606084015260ff8816608084015282810360a08401526125d68188611a10565b905082810360c08401526125ea8187611a10565b905082810360e08401526125fe8186611a10565b9150506bffffffffffffffffffffffff83166101008301529a9950505050505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600063ffffffff80831681810361266c5761266c612624565b6001019392505050565b6bffffffffffffffffffffffff81811683821601908082111561269b5761269b612624565b5092915050565b6000602082840312156126b457600080fd5b5051919050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff831660208201526060604082015260006126fe6060830184611a10565b9594505050505056fea164736f6c6343000810000a", +} + +var AutomationRegistrarABI = AutomationRegistrarMetaData.ABI + +var AutomationRegistrarBin = AutomationRegistrarMetaData.Bin + +func DeployAutomationRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, PLIAddress common.Address, keeperRegistry common.Address, minPLIJuels *big.Int, triggerConfigs []AutomationRegistrar21InitialTriggerConfig) (common.Address, *types.Transaction, *AutomationRegistrar, error) { + parsed, err := AutomationRegistrarMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationRegistrarBin), backend, PLIAddress, keeperRegistry, minPLIJuels, triggerConfigs) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationRegistrar{address: address, abi: *parsed, AutomationRegistrarCaller: AutomationRegistrarCaller{contract: contract}, AutomationRegistrarTransactor: AutomationRegistrarTransactor{contract: contract}, AutomationRegistrarFilterer: AutomationRegistrarFilterer{contract: contract}}, nil +} + +type AutomationRegistrar struct { + address common.Address + abi abi.ABI + AutomationRegistrarCaller + AutomationRegistrarTransactor + AutomationRegistrarFilterer +} + +type AutomationRegistrarCaller struct { + contract *bind.BoundContract +} + +type AutomationRegistrarTransactor struct { + contract *bind.BoundContract +} + +type AutomationRegistrarFilterer struct { + contract *bind.BoundContract +} + +type AutomationRegistrarSession struct { + Contract *AutomationRegistrar + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationRegistrarCallerSession struct { + Contract *AutomationRegistrarCaller + CallOpts bind.CallOpts +} + +type AutomationRegistrarTransactorSession struct { + Contract *AutomationRegistrarTransactor + TransactOpts bind.TransactOpts +} + +type AutomationRegistrarRaw struct { + Contract *AutomationRegistrar +} + +type AutomationRegistrarCallerRaw struct { + Contract *AutomationRegistrarCaller +} + +type AutomationRegistrarTransactorRaw struct { + Contract *AutomationRegistrarTransactor +} + +func NewAutomationRegistrar(address common.Address, backend bind.ContractBackend) (*AutomationRegistrar, error) { + abi, err := abi.JSON(strings.NewReader(AutomationRegistrarABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationRegistrar(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationRegistrar{address: address, abi: abi, AutomationRegistrarCaller: AutomationRegistrarCaller{contract: contract}, AutomationRegistrarTransactor: AutomationRegistrarTransactor{contract: contract}, AutomationRegistrarFilterer: AutomationRegistrarFilterer{contract: contract}}, nil +} + +func NewAutomationRegistrarCaller(address common.Address, caller bind.ContractCaller) (*AutomationRegistrarCaller, error) { + contract, err := bindAutomationRegistrar(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationRegistrarCaller{contract: contract}, nil +} + +func NewAutomationRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationRegistrarTransactor, error) { + contract, err := bindAutomationRegistrar(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationRegistrarTransactor{contract: contract}, nil +} + +func NewAutomationRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationRegistrarFilterer, error) { + contract, err := bindAutomationRegistrar(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationRegistrarFilterer{contract: contract}, nil +} + +func bindAutomationRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationRegistrarMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistrar.Contract.AutomationRegistrarCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AutomationRegistrarTransactor.contract.Transfer(opts) +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AutomationRegistrarTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistrar.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.contract.Transfer(opts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) PLI() (common.Address, error) { + return _AutomationRegistrar.Contract.PLI(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) PLI() (common.Address, error) { + return _AutomationRegistrar.Contract.PLI(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getAutoApproveAllowedSender", senderAddress) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _AutomationRegistrar.Contract.GetAutoApproveAllowedSender(&_AutomationRegistrar.CallOpts, senderAddress) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _AutomationRegistrar.Contract.GetAutoApproveAllowedSender(&_AutomationRegistrar.CallOpts, senderAddress) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.KeeperRegistry = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.MinPLIJuels = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetConfig() (GetConfig, + + error) { + return _AutomationRegistrar.Contract.GetConfig(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetConfig() (GetConfig, + + error) { + return _AutomationRegistrar.Contract.GetConfig(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getPendingRequest", hash) + + if err != nil { + return *new(common.Address), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _AutomationRegistrar.Contract.GetPendingRequest(&_AutomationRegistrar.CallOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _AutomationRegistrar.Contract.GetPendingRequest(&_AutomationRegistrar.CallOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetTriggerRegistrationDetails(opts *bind.CallOpts, triggerType uint8) (AutomationRegistrar21TriggerRegistrationStorage, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getTriggerRegistrationDetails", triggerType) + + if err != nil { + return *new(AutomationRegistrar21TriggerRegistrationStorage), err + } + + out0 := *abi.ConvertType(out[0], new(AutomationRegistrar21TriggerRegistrationStorage)).(*AutomationRegistrar21TriggerRegistrationStorage) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetTriggerRegistrationDetails(triggerType uint8) (AutomationRegistrar21TriggerRegistrationStorage, error) { + return _AutomationRegistrar.Contract.GetTriggerRegistrationDetails(&_AutomationRegistrar.CallOpts, triggerType) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetTriggerRegistrationDetails(triggerType uint8) (AutomationRegistrar21TriggerRegistrationStorage, error) { + return _AutomationRegistrar.Contract.GetTriggerRegistrationDetails(&_AutomationRegistrar.CallOpts, triggerType) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Owner() (common.Address, error) { + return _AutomationRegistrar.Contract.Owner(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) Owner() (common.Address, error) { + return _AutomationRegistrar.Contract.Owner(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) TypeAndVersion() (string, error) { + return _AutomationRegistrar.Contract.TypeAndVersion(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) TypeAndVersion() (string, error) { + return _AutomationRegistrar.Contract.TypeAndVersion(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "acceptOwnership") +} + +func (_AutomationRegistrar *AutomationRegistrarSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AcceptOwnership(&_AutomationRegistrar.TransactOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AcceptOwnership(&_AutomationRegistrar.TransactOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "approve", name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Approve(&_AutomationRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Approve(&_AutomationRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "cancel", hash) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Cancel(&_AutomationRegistrar.TransactOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Cancel(&_AutomationRegistrar.TransactOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.OnTokenTransfer(&_AutomationRegistrar.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.OnTokenTransfer(&_AutomationRegistrar.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "register", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Register(&_AutomationRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Register(&_AutomationRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) RegisterUpkeep(opts *bind.TransactOpts, requestParams AutomationRegistrar21RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "registerUpkeep", requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) RegisterUpkeep(requestParams AutomationRegistrar21RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.RegisterUpkeep(&_AutomationRegistrar.TransactOpts, requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) RegisterUpkeep(requestParams AutomationRegistrar21RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.RegisterUpkeep(&_AutomationRegistrar.TransactOpts, requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setAutoApproveAllowedSender", senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetAutoApproveAllowedSender(&_AutomationRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetAutoApproveAllowedSender(&_AutomationRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetConfig(opts *bind.TransactOpts, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setConfig", keeperRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetConfig(keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetConfig(&_AutomationRegistrar.TransactOpts, keeperRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetConfig(keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetConfig(&_AutomationRegistrar.TransactOpts, keeperRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetTriggerConfig(opts *bind.TransactOpts, triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setTriggerConfig", triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetTriggerConfig(triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetTriggerConfig(&_AutomationRegistrar.TransactOpts, triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetTriggerConfig(triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetTriggerConfig(&_AutomationRegistrar.TransactOpts, triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "transferOwnership", to) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.TransferOwnership(&_AutomationRegistrar.TransactOpts, to) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.TransferOwnership(&_AutomationRegistrar.TransactOpts, to) +} + +type AutomationRegistrarAutoApproveAllowedSenderSetIterator struct { + Event *AutomationRegistrarAutoApproveAllowedSenderSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarAutoApproveAllowedSenderSet struct { + SenderAddress common.Address + Allowed bool + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*AutomationRegistrarAutoApproveAllowedSenderSetIterator, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarAutoApproveAllowedSenderSetIterator{contract: _AutomationRegistrar.contract, event: "AutoApproveAllowedSenderSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseAutoApproveAllowedSenderSet(log types.Log) (*AutomationRegistrarAutoApproveAllowedSenderSet, error) { + event := new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarConfigChangedIterator struct { + Event *AutomationRegistrarConfigChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarConfigChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarConfigChangedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarConfigChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarConfigChanged struct { + KeeperRegistry common.Address + MinPLIJuels *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterConfigChanged(opts *bind.FilterOpts) (*AutomationRegistrarConfigChangedIterator, error) { + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return &AutomationRegistrarConfigChangedIterator{contract: _AutomationRegistrar.contract, event: "ConfigChanged", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarConfigChanged) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarConfigChanged) + if err := _AutomationRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseConfigChanged(log types.Log) (*AutomationRegistrarConfigChanged, error) { + event := new(AutomationRegistrarConfigChanged) + if err := _AutomationRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarOwnershipTransferRequestedIterator struct { + Event *AutomationRegistrarOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarOwnershipTransferRequestedIterator{contract: _AutomationRegistrar.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarOwnershipTransferRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistrarOwnershipTransferRequested, error) { + event := new(AutomationRegistrarOwnershipTransferRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarOwnershipTransferredIterator struct { + Event *AutomationRegistrarOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarOwnershipTransferredIterator{contract: _AutomationRegistrar.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarOwnershipTransferred) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseOwnershipTransferred(log types.Log) (*AutomationRegistrarOwnershipTransferred, error) { + event := new(AutomationRegistrarOwnershipTransferred) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationApprovedIterator struct { + Event *AutomationRegistrarRegistrationApproved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationApproved struct { + Hash [32]byte + DisplayName string + UpkeepId *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*AutomationRegistrarRegistrationApprovedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationApprovedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationApproved", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationApproved) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationApproved(log types.Log) (*AutomationRegistrarRegistrationApproved, error) { + event := new(AutomationRegistrarRegistrationApproved) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationRejectedIterator struct { + Event *AutomationRegistrarRegistrationRejected + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationRejected struct { + Hash [32]byte + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*AutomationRegistrarRegistrationRejectedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationRejectedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationRejected", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationRejected) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationRejected(log types.Log) (*AutomationRegistrarRegistrationRejected, error) { + event := new(AutomationRegistrarRegistrationRejected) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationRequestedIterator struct { + Event *AutomationRegistrarRegistrationRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationRequested struct { + Hash [32]byte + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + TriggerType uint8 + TriggerConfig []byte + OffchainConfig []byte + CheckData []byte + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*AutomationRegistrarRegistrationRequestedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationRequestedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationRequested(log types.Log) (*AutomationRegistrarRegistrationRequested, error) { + event := new(AutomationRegistrarRegistrationRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarTriggerConfigSetIterator struct { + Event *AutomationRegistrarTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarTriggerConfigSet struct { + TriggerType uint8 + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterTriggerConfigSet(opts *bind.FilterOpts) (*AutomationRegistrarTriggerConfigSetIterator, error) { + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "TriggerConfigSet") + if err != nil { + return nil, err + } + return &AutomationRegistrarTriggerConfigSetIterator{contract: _AutomationRegistrar.contract, event: "TriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarTriggerConfigSet) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "TriggerConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarTriggerConfigSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "TriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseTriggerConfigSet(log types.Log) (*AutomationRegistrarTriggerConfigSet, error) { + event := new(AutomationRegistrarTriggerConfigSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "TriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + KeeperRegistry common.Address + MinPLIJuels *big.Int +} + +func (_AutomationRegistrar *AutomationRegistrar) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationRegistrar.abi.Events["AutoApproveAllowedSenderSet"].ID: + return _AutomationRegistrar.ParseAutoApproveAllowedSenderSet(log) + case _AutomationRegistrar.abi.Events["ConfigChanged"].ID: + return _AutomationRegistrar.ParseConfigChanged(log) + case _AutomationRegistrar.abi.Events["OwnershipTransferRequested"].ID: + return _AutomationRegistrar.ParseOwnershipTransferRequested(log) + case _AutomationRegistrar.abi.Events["OwnershipTransferred"].ID: + return _AutomationRegistrar.ParseOwnershipTransferred(log) + case _AutomationRegistrar.abi.Events["RegistrationApproved"].ID: + return _AutomationRegistrar.ParseRegistrationApproved(log) + case _AutomationRegistrar.abi.Events["RegistrationRejected"].ID: + return _AutomationRegistrar.ParseRegistrationRejected(log) + case _AutomationRegistrar.abi.Events["RegistrationRequested"].ID: + return _AutomationRegistrar.ParseRegistrationRequested(log) + case _AutomationRegistrar.abi.Events["TriggerConfigSet"].ID: + return _AutomationRegistrar.ParseTriggerConfigSet(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationRegistrarAutoApproveAllowedSenderSet) Topic() common.Hash { + return common.HexToHash("0x20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356") +} + +func (AutomationRegistrarConfigChanged) Topic() common.Hash { + return common.HexToHash("0x39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a") +} + +func (AutomationRegistrarOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AutomationRegistrarOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (AutomationRegistrarRegistrationApproved) Topic() common.Hash { + return common.HexToHash("0xb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b") +} + +func (AutomationRegistrarRegistrationRejected) Topic() common.Hash { + return common.HexToHash("0x3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a22") +} + +func (AutomationRegistrarRegistrationRequested) Topic() common.Hash { + return common.HexToHash("0x7684390ebb103102f7f48c71439c2408713f8d437782a6fab2756acc0e42c1b7") +} + +func (AutomationRegistrarTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a3") +} + +func (_AutomationRegistrar *AutomationRegistrar) Address() common.Address { + return _AutomationRegistrar.address +} + +type AutomationRegistrarInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) + + GetTriggerRegistrationDetails(opts *bind.CallOpts, triggerType uint8) (AutomationRegistrar21TriggerRegistrationStorage, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) + + Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, requestParams AutomationRegistrar21RegistrationParams) (*types.Transaction, error) + + SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) + + SetTriggerConfig(opts *bind.TransactOpts, triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*AutomationRegistrarAutoApproveAllowedSenderSetIterator, error) + + WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) + + ParseAutoApproveAllowedSenderSet(log types.Log) (*AutomationRegistrarAutoApproveAllowedSenderSet, error) + + FilterConfigChanged(opts *bind.FilterOpts) (*AutomationRegistrarConfigChangedIterator, error) + + WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarConfigChanged) (event.Subscription, error) + + ParseConfigChanged(log types.Log) (*AutomationRegistrarConfigChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistrarOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AutomationRegistrarOwnershipTransferred, error) + + FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*AutomationRegistrarRegistrationApprovedIterator, error) + + WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) + + ParseRegistrationApproved(log types.Log) (*AutomationRegistrarRegistrationApproved, error) + + FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*AutomationRegistrarRegistrationRejectedIterator, error) + + WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) + + ParseRegistrationRejected(log types.Log) (*AutomationRegistrarRegistrationRejected, error) + + FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*AutomationRegistrarRegistrationRequestedIterator, error) + + WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) + + ParseRegistrationRequested(log types.Log) (*AutomationRegistrarRegistrationRequested, error) + + FilterTriggerConfigSet(opts *bind.FilterOpts) (*AutomationRegistrarTriggerConfigSetIterator, error) + + WatchTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarTriggerConfigSet) (event.Subscription, error) + + ParseTriggerConfigSet(log types.Log) (*AutomationRegistrarTriggerConfigSet, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_registrar_wrapper2_2/automation_registrar_wrapper2_2.go b/core/gethwrappers/generated/automation_registrar_wrapper2_2/automation_registrar_wrapper2_2.go new file mode 100644 index 00000000..1d592672 --- /dev/null +++ b/core/gethwrappers/generated/automation_registrar_wrapper2_2/automation_registrar_wrapper2_2.go @@ -0,0 +1,1685 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_registrar_wrapper2_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistrar22InitialTriggerConfig struct { + TriggerType uint8 + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 +} + +type AutomationRegistrar22RegistrationParams struct { + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + TriggerType uint8 + CheckData []byte + TriggerConfig []byte + OffchainConfig []byte + Amount *big.Int +} + +type AutomationRegistrar22TriggerRegistrationStorage struct { + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 + ApprovedCount uint32 +} + +var AutomationRegistrarMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"PLIAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"AutomationRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"},{\"components\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"enumAutomationRegistrar2_2.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"internalType\":\"structAutomationRegistrar2_2.InitialTriggerConfig[]\",\"name\":\"triggerConfigs\",\"type\":\"tuple[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AmountMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FunctionNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"HashMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientPayment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAdminAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"LinkTransferFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyAdminOrOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistrationRequestFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"AutoApproveAllowedSenderSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"AutomationRegistry\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"ConfigChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"RegistrationApproved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"RegistrationRejected\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"RegistrationRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"enumAutomationRegistrar2_2.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"name\":\"TriggerConfigSet\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"approve\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"cancel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"}],\"name\":\"getAutoApproveAllowedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"AutomationRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"minPLIJuels\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"getPendingRequest\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"}],\"name\":\"getTriggerRegistrationDetails\",\"outputs\":[{\"components\":[{\"internalType\":\"enumAutomationRegistrar2_2.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"approvedCount\",\"type\":\"uint32\"}],\"internalType\":\"structAutomationRegistrar2_2.TriggerRegistrationStorage\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"internalType\":\"structAutomationRegistrar2_2.RegistrationParams\",\"name\":\"requestParams\",\"type\":\"tuple\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"setAutoApproveAllowedSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"AutomationRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"enumAutomationRegistrar2_2.AutoApproveType\",\"name\":\"autoApproveType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"}],\"name\":\"setTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162002d8238038062002d8283398101604081905262000034916200043b565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be816200017a565b5050506001600160a01b038416608052620000da838362000225565b60005b81518110156200016f576200015a82828151811062000100576200010062000598565b60200260200101516000015183838151811062000121576200012162000598565b60200260200101516020015184848151811062000142576200014262000598565b6020026020010151604001516200029e60201b60201c565b806200016681620005ae565b915050620000dd565b50505050506200062f565b336001600160a01b03821603620001d45760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200022f6200034c565b6040805180820182526001600160a01b0384168082526001600160601b0384166020928301819052600160a01b810282176004558351918252918101919091527f39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a910160405180910390a15050565b620002a86200034c565b60ff83166000908152600360205260409020805483919060ff19166001836002811115620002da57620002da620005d6565b021790555060ff831660009081526003602052604090819020805464ffffffff00191661010063ffffffff851602179055517f830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a3906200033f90859085908590620005ec565b60405180910390a1505050565b6000546001600160a01b03163314620003a85760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b80516001600160a01b0381168114620003c257600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b604051606081016001600160401b0381118282101715620004025762000402620003c7565b60405290565b604051601f8201601f191681016001600160401b0381118282101715620004335762000433620003c7565b604052919050565b600080600080608085870312156200045257600080fd5b6200045d85620003aa565b935060206200046e818701620003aa565b604087810151919550906001600160601b03811681146200048e57600080fd5b606088810151919550906001600160401b0380821115620004ae57600080fd5b818a0191508a601f830112620004c357600080fd5b815181811115620004d857620004d8620003c7565b620004e8868260051b0162000408565b818152868101925090840283018601908c8211156200050657600080fd5b928601925b81841015620005875784848e031215620005255760008081fd5b6200052f620003dd565b845160ff81168114620005425760008081fd5b81528488015160038110620005575760008081fd5b818901528487015163ffffffff81168114620005735760008081fd5b81880152835292840192918601916200050b565b999c989b5096995050505050505050565b634e487b7160e01b600052603260045260246000fd5b600060018201620005cf57634e487b7160e01b600052601160045260246000fd5b5060010190565b634e487b7160e01b600052602160045260246000fd5b60ff8416815260608101600384106200061557634e487b7160e01b600052602160045260246000fd5b83602083015263ffffffff83166040830152949350505050565b6080516127146200066e60003960008181610177015281816105d601528181610887015281816109bd01528181610f0e015261171b01526127146000f3fe608060405234801561001057600080fd5b506004361061011b5760003560e01c8063856853e6116100b2578063b5ff5b4111610081578063c4d252f511610066578063c4d252f5146103e3578063e8d4070d146103f6578063f2fde38b1461040957600080fd5b8063b5ff5b4114610369578063c3f909d41461037c57600080fd5b8063856853e61461027857806388b12d551461028b5780638da5cb5b14610338578063a4c0ed361461035657600080fd5b80633f678e11116100ee5780633f678e11146101f35780636c4cdfc31461021457806379ba5097146102275780637e776f7f1461022f57600080fd5b8063181f5a77146101205780631b6b6d2314610172578063212d0884146101be578063367b9b4f146101de575b600080fd5b61015c6040518060400160405280601981526020017f4175746f6d6174696f6e52656769737472617220322e312e300000000000000081525081565b6040516101699190611a74565b60405180910390f35b6101997f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610169565b6101d16101cc366004611aa4565b61041c565b6040516101699190611b29565b6101f16101ec366004611b9d565b6104a9565b005b610206610201366004611bd6565b61053b565b604051908152602001610169565b6101f1610222366004611c2e565b6106d3565b6101f161076d565b61026861023d366004611c63565b73ffffffffffffffffffffffffffffffffffffffff1660009081526005602052604090205460ff1690565b6040519015158152602001610169565b6101f1610286366004611de1565b61086f565b6102ff610299366004611f40565b60009081526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff169290910182905291565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526bffffffffffffffffffffffff909116602083015201610169565b60005473ffffffffffffffffffffffffffffffffffffffff16610199565b6101f1610364366004611f59565b6109a5565b6101f1610377366004611fb5565b610ce3565b60408051808201825260045473ffffffffffffffffffffffffffffffffffffffff8116808352740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff16602092830181905283519182529181019190915201610169565b6101f16103f1366004611f40565b610dc2565b6101f1610404366004611ffe565b61104c565b6101f1610417366004611c63565b6112d9565b60408051606080820183526000808352602080840182905283850182905260ff86811683526003909152908490208451928301909452835492939192839116600281111561046c5761046c611abf565b600281111561047d5761047d611abf565b8152905463ffffffff610100820481166020840152650100000000009091041660409091015292915050565b6104b16112ed565b73ffffffffffffffffffffffffffffffffffffffff821660008181526005602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001685151590811790915591519182527f20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356910160405180910390a25050565b6004546000907401000000000000000000000000000000000000000090046bffffffffffffffffffffffff1661057961014084016101208501612109565b6bffffffffffffffffffffffff1610156105bf576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166323b872dd333061060f61014087016101208801612109565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff93841660048201529290911660248301526bffffffffffffffffffffffff1660448201526064016020604051808303816000875af1158015610696573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106ba9190612124565b506106cd6106c783612141565b33611370565b92915050565b6106db6112ed565b60408051808201825273ffffffffffffffffffffffffffffffffffffffff84168082526bffffffffffffffffffffffff8416602092830181905274010000000000000000000000000000000000000000810282176004558351918252918101919091527f39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a910160405180910390a15050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146107f3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146108de576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109966040518061014001604052808e81526020018d8d8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525073ffffffffffffffffffffffffffffffffffffffff808d16602083015263ffffffff8c1660408301528a16606082015260ff8916608082015260a0810188905260c0810187905260e081018690526bffffffffffffffffffffffff85166101009091015282611370565b50505050505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610a14576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b81818080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505060208101517fffffffff0000000000000000000000000000000000000000000000000000000081167f856853e60000000000000000000000000000000000000000000000000000000014610aca576040517fe3d6792100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8484846000610adc8260048186612276565b810190610ae991906122a0565b509950505050505050505050806bffffffffffffffffffffffff168414610b3c576040517f55e97b0d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8988886000610b4e8260048186612276565b810190610b5b91906122a0565b9a50505050505050505050508073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614610bcc576040517ff8c5638e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6004547401000000000000000000000000000000000000000090046bffffffffffffffffffffffff168d1015610c2e576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60003073ffffffffffffffffffffffffffffffffffffffff168d8d604051610c579291906123dd565b600060405180830381855af49150503d8060008114610c92576040519150601f19603f3d011682016040523d82523d6000602084013e610c97565b606091505b5050905080610cd2576040517f649bf81000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050505050565b610ceb6112ed565b60ff8316600090815260036020526040902080548391907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836002811115610d3857610d38611abf565b021790555060ff83166000908152600360205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff1661010063ffffffff851602179055517f830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a390610db5908590859085906123ed565b60405180910390a1505050565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff1691830191909152331480610e49575060005473ffffffffffffffffffffffffffffffffffffffff1633145b610e7f576040517f61685c2b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff16610ecd576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260026020908152604080832083905583519184015190517fa9059cbb0000000000000000000000000000000000000000000000000000000081527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169263a9059cbb92610f859260040173ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b6020604051808303816000875af1158015610fa4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fc89190612124565b90508061101c5781516040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911660048201526024016107ea565b60405183907f3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a2290600090a2505050565b6110546112ed565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff16918301919091526110ed576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008b8b8b8b8b8b8b8b8b60405160200161111099989796959493929190612461565b604051602081830303815290604052805190602001209050808314611161576040517f3f4d605300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60026000848152602001908152602001600020600080820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690556000820160146101000a8154906bffffffffffffffffffffffff021916905550506112c96040518061014001604052808f81526020016040518060200160405280600081525081526020018e73ffffffffffffffffffffffffffffffffffffffff1681526020018d63ffffffff1681526020018c73ffffffffffffffffffffffffffffffffffffffff1681526020018b60ff1681526020018a8a8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060208082018a905260408051601f8a0183900483028101830182528981529201919089908990819084018382808284376000920191909152505050908252506020858101516bffffffffffffffffffffffff1691015282611647565b5050505050505050505050505050565b6112e16112ed565b6112ea81611876565b50565b60005473ffffffffffffffffffffffffffffffffffffffff16331461136e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016107ea565b565b608082015160009073ffffffffffffffffffffffffffffffffffffffff166113c4576040517f05bb467c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008360400151846060015185608001518660a001518760c001518860e0015189610100015160405160200161140097969594939291906124e7565b604051602081830303815290604052805190602001209050836040015173ffffffffffffffffffffffffffffffffffffffff16817f7684390ebb103102f7f48c71439c2408713f8d437782a6fab2756acc0e42c1b786600001518760200151886060015189608001518a60a001518b60e001518c61010001518d60c001518e610120015160405161149999989796959493929190612569565b60405180910390a360a084015160ff9081166000908152600360205260408082208151606081019092528054929361151c9383911660028111156114df576114df611abf565b60028111156114f0576114f0611abf565b8152905463ffffffff61010082048116602084015265010000000000909104166040909101528561196b565b156115845760a085015160ff166000908152600360205260409020805465010000000000900463ffffffff1690600561155483612653565b91906101000a81548163ffffffff021916908363ffffffff1602179055505061157d8583611647565b905061163f565b61012085015160008381526002602052604081205490916115ca917401000000000000000000000000000000000000000090046bffffffffffffffffffffffff16612676565b604080518082018252608089015173ffffffffffffffffffffffffffffffffffffffff90811682526bffffffffffffffffffffffff9384166020808401918252600089815260029091529390932091519251909316740100000000000000000000000000000000000000000291909216179055505b949350505050565b600480546040808501516060860151608087015160a088015160c089015160e08a01516101008b015196517f28f32f3800000000000000000000000000000000000000000000000000000000815260009973ffffffffffffffffffffffffffffffffffffffff909916988a988a986328f32f38986116d29891979096919590949193909291016124e7565b6020604051808303816000875af11580156116f1573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061171591906126a2565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea0848861012001518560405160200161176f91815260200190565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161179c939291906126bb565b6020604051808303816000875af11580156117bb573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117df9190612124565b905080611830576040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841660048201526024016107ea565b81857fb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b88600001516040516118659190611a74565b60405180910390a350949350505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036118f5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016107ea565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000808351600281111561198157611981611abf565b0361198e575060006106cd565b6001835160028111156119a3576119a3611abf565b1480156119d6575073ffffffffffffffffffffffffffffffffffffffff821660009081526005602052604090205460ff16155b156119e3575060006106cd565b826020015163ffffffff16836040015163ffffffff161015611a07575060016106cd565b50600092915050565b6000815180845260005b81811015611a3657602081850181015186830182015201611a1a565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000611a876020830184611a10565b9392505050565b803560ff81168114611a9f57600080fd5b919050565b600060208284031215611ab657600080fd5b611a8782611a8e565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60038110611b25577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b6000606082019050611b3c828451611aee565b602083015163ffffffff8082166020850152806040860151166040850152505092915050565b73ffffffffffffffffffffffffffffffffffffffff811681146112ea57600080fd5b8035611a9f81611b62565b80151581146112ea57600080fd5b60008060408385031215611bb057600080fd5b8235611bbb81611b62565b91506020830135611bcb81611b8f565b809150509250929050565b600060208284031215611be857600080fd5b813567ffffffffffffffff811115611bff57600080fd5b82016101408185031215611a8757600080fd5b80356bffffffffffffffffffffffff81168114611a9f57600080fd5b60008060408385031215611c4157600080fd5b8235611c4c81611b62565b9150611c5a60208401611c12565b90509250929050565b600060208284031215611c7557600080fd5b8135611a8781611b62565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610140810167ffffffffffffffff81118282101715611cd357611cd3611c80565b60405290565b600082601f830112611cea57600080fd5b813567ffffffffffffffff80821115611d0557611d05611c80565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715611d4b57611d4b611c80565b81604052838152866020858801011115611d6457600080fd5b836020870160208301376000602085830101528094505050505092915050565b60008083601f840112611d9657600080fd5b50813567ffffffffffffffff811115611dae57600080fd5b602083019150836020828501011115611dc657600080fd5b9250929050565b803563ffffffff81168114611a9f57600080fd5b6000806000806000806000806000806000806101608d8f031215611e0457600080fd5b67ffffffffffffffff8d351115611e1a57600080fd5b611e278e8e358f01611cd9565b9b5067ffffffffffffffff60208e01351115611e4257600080fd5b611e528e60208f01358f01611d84565b909b509950611e6360408e01611b84565b9850611e7160608e01611dcd565b9750611e7f60808e01611b84565b9650611e8d60a08e01611a8e565b955067ffffffffffffffff60c08e01351115611ea857600080fd5b611eb88e60c08f01358f01611cd9565b945067ffffffffffffffff60e08e01351115611ed357600080fd5b611ee38e60e08f01358f01611cd9565b935067ffffffffffffffff6101008e01351115611eff57600080fd5b611f108e6101008f01358f01611cd9565b9250611f1f6101208e01611c12565b9150611f2e6101408e01611b84565b90509295989b509295989b509295989b565b600060208284031215611f5257600080fd5b5035919050565b60008060008060608587031215611f6f57600080fd5b8435611f7a81611b62565b935060208501359250604085013567ffffffffffffffff811115611f9d57600080fd5b611fa987828801611d84565b95989497509550505050565b600080600060608486031215611fca57600080fd5b611fd384611a8e565b9250602084013560038110611fe757600080fd5b9150611ff560408501611dcd565b90509250925092565b60008060008060008060008060008060006101208c8e03121561202057600080fd5b67ffffffffffffffff808d35111561203757600080fd5b6120448e8e358f01611cd9565b9b5061205260208e01611b84565b9a5061206060408e01611dcd565b995061206e60608e01611b84565b985061207c60808e01611a8e565b97508060a08e0135111561208f57600080fd5b61209f8e60a08f01358f01611d84565b909750955060c08d01358110156120b557600080fd5b6120c58e60c08f01358f01611cd9565b94508060e08e013511156120d857600080fd5b506120e98d60e08e01358e01611d84565b81945080935050506101008c013590509295989b509295989b9093969950565b60006020828403121561211b57600080fd5b611a8782611c12565b60006020828403121561213657600080fd5b8151611a8781611b8f565b6000610140823603121561215457600080fd5b61215c611caf565b823567ffffffffffffffff8082111561217457600080fd5b61218036838701611cd9565b8352602085013591508082111561219657600080fd5b6121a236838701611cd9565b60208401526121b360408601611b84565b60408401526121c460608601611dcd565b60608401526121d560808601611b84565b60808401526121e660a08601611a8e565b60a084015260c08501359150808211156121ff57600080fd5b61220b36838701611cd9565b60c084015260e085013591508082111561222457600080fd5b61223036838701611cd9565b60e08401526101009150818501358181111561224b57600080fd5b61225736828801611cd9565b8385015250505061012061226c818501611c12565b9082015292915050565b6000808585111561228657600080fd5b8386111561229357600080fd5b5050820193919092039150565b60008060008060008060008060008060006101608c8e0312156122c257600080fd5b67ffffffffffffffff808d3511156122d957600080fd5b6122e68e8e358f01611cd9565b9b508060208e013511156122f957600080fd5b6123098e60208f01358f01611cd9565b9a5061231760408e01611b84565b995061232560608e01611dcd565b985061233360808e01611b84565b975061234160a08e01611a8e565b96508060c08e0135111561235457600080fd5b6123648e60c08f01358f01611cd9565b95508060e08e0135111561237757600080fd5b6123878e60e08f01358f01611cd9565b9450806101008e0135111561239b57600080fd5b506123ad8d6101008e01358e01611cd9565b92506123bc6101208d01611c12565b91506123cb6101408d01611b84565b90509295989b509295989b9093969950565b8183823760009101908152919050565b60ff84168152606081016124046020830185611aee565b63ffffffff83166040830152949350505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600073ffffffffffffffffffffffffffffffffffffffff808c16835263ffffffff8b166020840152808a1660408401525060ff8816606083015260e060808301526124b060e083018789612418565b82810360a08401526124c28187611a10565b905082810360c08401526124d7818587612418565b9c9b505050505050505050505050565b600073ffffffffffffffffffffffffffffffffffffffff808a16835263ffffffff8916602084015280881660408401525060ff8616606083015260e0608083015261253560e0830186611a10565b82810360a08401526125478186611a10565b905082810360c084015261255b8185611a10565b9a9950505050505050505050565b600061012080835261257d8184018d611a10565b90508281036020840152612591818c611a10565b905063ffffffff8a16604084015273ffffffffffffffffffffffffffffffffffffffff8916606084015260ff8816608084015282810360a08401526125d68188611a10565b905082810360c08401526125ea8187611a10565b905082810360e08401526125fe8186611a10565b9150506bffffffffffffffffffffffff83166101008301529a9950505050505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600063ffffffff80831681810361266c5761266c612624565b6001019392505050565b6bffffffffffffffffffffffff81811683821601908082111561269b5761269b612624565b5092915050565b6000602082840312156126b457600080fd5b5051919050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff831660208201526060604082015260006126fe6060830184611a10565b9594505050505056fea164736f6c6343000813000a", +} + +var AutomationRegistrarABI = AutomationRegistrarMetaData.ABI + +var AutomationRegistrarBin = AutomationRegistrarMetaData.Bin + +func DeployAutomationRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, PLIAddress common.Address, AutomationRegistry common.Address, minPLIJuels *big.Int, triggerConfigs []AutomationRegistrar22InitialTriggerConfig) (common.Address, *types.Transaction, *AutomationRegistrar, error) { + parsed, err := AutomationRegistrarMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationRegistrarBin), backend, PLIAddress, AutomationRegistry, minPLIJuels, triggerConfigs) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationRegistrar{address: address, abi: *parsed, AutomationRegistrarCaller: AutomationRegistrarCaller{contract: contract}, AutomationRegistrarTransactor: AutomationRegistrarTransactor{contract: contract}, AutomationRegistrarFilterer: AutomationRegistrarFilterer{contract: contract}}, nil +} + +type AutomationRegistrar struct { + address common.Address + abi abi.ABI + AutomationRegistrarCaller + AutomationRegistrarTransactor + AutomationRegistrarFilterer +} + +type AutomationRegistrarCaller struct { + contract *bind.BoundContract +} + +type AutomationRegistrarTransactor struct { + contract *bind.BoundContract +} + +type AutomationRegistrarFilterer struct { + contract *bind.BoundContract +} + +type AutomationRegistrarSession struct { + Contract *AutomationRegistrar + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationRegistrarCallerSession struct { + Contract *AutomationRegistrarCaller + CallOpts bind.CallOpts +} + +type AutomationRegistrarTransactorSession struct { + Contract *AutomationRegistrarTransactor + TransactOpts bind.TransactOpts +} + +type AutomationRegistrarRaw struct { + Contract *AutomationRegistrar +} + +type AutomationRegistrarCallerRaw struct { + Contract *AutomationRegistrarCaller +} + +type AutomationRegistrarTransactorRaw struct { + Contract *AutomationRegistrarTransactor +} + +func NewAutomationRegistrar(address common.Address, backend bind.ContractBackend) (*AutomationRegistrar, error) { + abi, err := abi.JSON(strings.NewReader(AutomationRegistrarABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationRegistrar(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationRegistrar{address: address, abi: abi, AutomationRegistrarCaller: AutomationRegistrarCaller{contract: contract}, AutomationRegistrarTransactor: AutomationRegistrarTransactor{contract: contract}, AutomationRegistrarFilterer: AutomationRegistrarFilterer{contract: contract}}, nil +} + +func NewAutomationRegistrarCaller(address common.Address, caller bind.ContractCaller) (*AutomationRegistrarCaller, error) { + contract, err := bindAutomationRegistrar(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationRegistrarCaller{contract: contract}, nil +} + +func NewAutomationRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationRegistrarTransactor, error) { + contract, err := bindAutomationRegistrar(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationRegistrarTransactor{contract: contract}, nil +} + +func NewAutomationRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationRegistrarFilterer, error) { + contract, err := bindAutomationRegistrar(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationRegistrarFilterer{contract: contract}, nil +} + +func bindAutomationRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationRegistrarMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistrar.Contract.AutomationRegistrarCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AutomationRegistrarTransactor.contract.Transfer(opts) +} + +func (_AutomationRegistrar *AutomationRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AutomationRegistrarTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistrar.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.contract.Transfer(opts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) PLI() (common.Address, error) { + return _AutomationRegistrar.Contract.PLI(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) PLI() (common.Address, error) { + return _AutomationRegistrar.Contract.PLI(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getAutoApproveAllowedSender", senderAddress) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _AutomationRegistrar.Contract.GetAutoApproveAllowedSender(&_AutomationRegistrar.CallOpts, senderAddress) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _AutomationRegistrar.Contract.GetAutoApproveAllowedSender(&_AutomationRegistrar.CallOpts, senderAddress) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.AutomationRegistry = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.MinPLIJuels = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetConfig() (GetConfig, + + error) { + return _AutomationRegistrar.Contract.GetConfig(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetConfig() (GetConfig, + + error) { + return _AutomationRegistrar.Contract.GetConfig(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getPendingRequest", hash) + + if err != nil { + return *new(common.Address), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _AutomationRegistrar.Contract.GetPendingRequest(&_AutomationRegistrar.CallOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _AutomationRegistrar.Contract.GetPendingRequest(&_AutomationRegistrar.CallOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) GetTriggerRegistrationDetails(opts *bind.CallOpts, triggerType uint8) (AutomationRegistrar22TriggerRegistrationStorage, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "getTriggerRegistrationDetails", triggerType) + + if err != nil { + return *new(AutomationRegistrar22TriggerRegistrationStorage), err + } + + out0 := *abi.ConvertType(out[0], new(AutomationRegistrar22TriggerRegistrationStorage)).(*AutomationRegistrar22TriggerRegistrationStorage) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) GetTriggerRegistrationDetails(triggerType uint8) (AutomationRegistrar22TriggerRegistrationStorage, error) { + return _AutomationRegistrar.Contract.GetTriggerRegistrationDetails(&_AutomationRegistrar.CallOpts, triggerType) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) GetTriggerRegistrationDetails(triggerType uint8) (AutomationRegistrar22TriggerRegistrationStorage, error) { + return _AutomationRegistrar.Contract.GetTriggerRegistrationDetails(&_AutomationRegistrar.CallOpts, triggerType) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Owner() (common.Address, error) { + return _AutomationRegistrar.Contract.Owner(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) Owner() (common.Address, error) { + return _AutomationRegistrar.Contract.Owner(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AutomationRegistrar.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AutomationRegistrar *AutomationRegistrarSession) TypeAndVersion() (string, error) { + return _AutomationRegistrar.Contract.TypeAndVersion(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarCallerSession) TypeAndVersion() (string, error) { + return _AutomationRegistrar.Contract.TypeAndVersion(&_AutomationRegistrar.CallOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "acceptOwnership") +} + +func (_AutomationRegistrar *AutomationRegistrarSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AcceptOwnership(&_AutomationRegistrar.TransactOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistrar.Contract.AcceptOwnership(&_AutomationRegistrar.TransactOpts) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "approve", name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Approve(&_AutomationRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Approve(&_AutomationRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "cancel", hash) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Cancel(&_AutomationRegistrar.TransactOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Cancel(&_AutomationRegistrar.TransactOpts, hash) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.OnTokenTransfer(&_AutomationRegistrar.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.OnTokenTransfer(&_AutomationRegistrar.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "register", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Register(&_AutomationRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.Register(&_AutomationRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, triggerType, checkData, triggerConfig, offchainConfig, amount, sender) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) RegisterUpkeep(opts *bind.TransactOpts, requestParams AutomationRegistrar22RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "registerUpkeep", requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) RegisterUpkeep(requestParams AutomationRegistrar22RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.RegisterUpkeep(&_AutomationRegistrar.TransactOpts, requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) RegisterUpkeep(requestParams AutomationRegistrar22RegistrationParams) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.RegisterUpkeep(&_AutomationRegistrar.TransactOpts, requestParams) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setAutoApproveAllowedSender", senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetAutoApproveAllowedSender(&_AutomationRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetAutoApproveAllowedSender(&_AutomationRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetConfig(opts *bind.TransactOpts, AutomationRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setConfig", AutomationRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetConfig(AutomationRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetConfig(&_AutomationRegistrar.TransactOpts, AutomationRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetConfig(AutomationRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetConfig(&_AutomationRegistrar.TransactOpts, AutomationRegistry, minPLIJuels) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) SetTriggerConfig(opts *bind.TransactOpts, triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "setTriggerConfig", triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) SetTriggerConfig(triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetTriggerConfig(&_AutomationRegistrar.TransactOpts, triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) SetTriggerConfig(triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.SetTriggerConfig(&_AutomationRegistrar.TransactOpts, triggerType, autoApproveType, autoApproveMaxAllowed) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.contract.Transact(opts, "transferOwnership", to) +} + +func (_AutomationRegistrar *AutomationRegistrarSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.TransferOwnership(&_AutomationRegistrar.TransactOpts, to) +} + +func (_AutomationRegistrar *AutomationRegistrarTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistrar.Contract.TransferOwnership(&_AutomationRegistrar.TransactOpts, to) +} + +type AutomationRegistrarAutoApproveAllowedSenderSetIterator struct { + Event *AutomationRegistrarAutoApproveAllowedSenderSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarAutoApproveAllowedSenderSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarAutoApproveAllowedSenderSet struct { + SenderAddress common.Address + Allowed bool + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*AutomationRegistrarAutoApproveAllowedSenderSetIterator, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarAutoApproveAllowedSenderSetIterator{contract: _AutomationRegistrar.contract, event: "AutoApproveAllowedSenderSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseAutoApproveAllowedSenderSet(log types.Log) (*AutomationRegistrarAutoApproveAllowedSenderSet, error) { + event := new(AutomationRegistrarAutoApproveAllowedSenderSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarConfigChangedIterator struct { + Event *AutomationRegistrarConfigChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarConfigChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarConfigChangedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarConfigChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarConfigChanged struct { + AutomationRegistry common.Address + MinPLIJuels *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterConfigChanged(opts *bind.FilterOpts) (*AutomationRegistrarConfigChangedIterator, error) { + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return &AutomationRegistrarConfigChangedIterator{contract: _AutomationRegistrar.contract, event: "ConfigChanged", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarConfigChanged) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarConfigChanged) + if err := _AutomationRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseConfigChanged(log types.Log) (*AutomationRegistrarConfigChanged, error) { + event := new(AutomationRegistrarConfigChanged) + if err := _AutomationRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarOwnershipTransferRequestedIterator struct { + Event *AutomationRegistrarOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarOwnershipTransferRequestedIterator{contract: _AutomationRegistrar.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarOwnershipTransferRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistrarOwnershipTransferRequested, error) { + event := new(AutomationRegistrarOwnershipTransferRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarOwnershipTransferredIterator struct { + Event *AutomationRegistrarOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarOwnershipTransferredIterator{contract: _AutomationRegistrar.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarOwnershipTransferred) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseOwnershipTransferred(log types.Log) (*AutomationRegistrarOwnershipTransferred, error) { + event := new(AutomationRegistrarOwnershipTransferred) + if err := _AutomationRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationApprovedIterator struct { + Event *AutomationRegistrarRegistrationApproved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationApprovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationApproved struct { + Hash [32]byte + DisplayName string + UpkeepId *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*AutomationRegistrarRegistrationApprovedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationApprovedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationApproved", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationApproved) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationApproved(log types.Log) (*AutomationRegistrarRegistrationApproved, error) { + event := new(AutomationRegistrarRegistrationApproved) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationRejectedIterator struct { + Event *AutomationRegistrarRegistrationRejected + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationRejectedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationRejected struct { + Hash [32]byte + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*AutomationRegistrarRegistrationRejectedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationRejectedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationRejected", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationRejected) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationRejected(log types.Log) (*AutomationRegistrarRegistrationRejected, error) { + event := new(AutomationRegistrarRegistrationRejected) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarRegistrationRequestedIterator struct { + Event *AutomationRegistrarRegistrationRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarRegistrationRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarRegistrationRequested struct { + Hash [32]byte + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + TriggerType uint8 + TriggerConfig []byte + OffchainConfig []byte + CheckData []byte + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*AutomationRegistrarRegistrationRequestedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return &AutomationRegistrarRegistrationRequestedIterator{contract: _AutomationRegistrar.contract, event: "RegistrationRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarRegistrationRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseRegistrationRequested(log types.Log) (*AutomationRegistrarRegistrationRequested, error) { + event := new(AutomationRegistrarRegistrationRequested) + if err := _AutomationRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistrarTriggerConfigSetIterator struct { + Event *AutomationRegistrarTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistrarTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistrarTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistrarTriggerConfigSet struct { + TriggerType uint8 + AutoApproveType uint8 + AutoApproveMaxAllowed uint32 + Raw types.Log +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) FilterTriggerConfigSet(opts *bind.FilterOpts) (*AutomationRegistrarTriggerConfigSetIterator, error) { + + logs, sub, err := _AutomationRegistrar.contract.FilterLogs(opts, "TriggerConfigSet") + if err != nil { + return nil, err + } + return &AutomationRegistrarTriggerConfigSetIterator{contract: _AutomationRegistrar.contract, event: "TriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) WatchTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarTriggerConfigSet) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistrar.contract.WatchLogs(opts, "TriggerConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistrarTriggerConfigSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "TriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistrar *AutomationRegistrarFilterer) ParseTriggerConfigSet(log types.Log) (*AutomationRegistrarTriggerConfigSet, error) { + event := new(AutomationRegistrarTriggerConfigSet) + if err := _AutomationRegistrar.contract.UnpackLog(event, "TriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + AutomationRegistry common.Address + MinPLIJuels *big.Int +} + +func (_AutomationRegistrar *AutomationRegistrar) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationRegistrar.abi.Events["AutoApproveAllowedSenderSet"].ID: + return _AutomationRegistrar.ParseAutoApproveAllowedSenderSet(log) + case _AutomationRegistrar.abi.Events["ConfigChanged"].ID: + return _AutomationRegistrar.ParseConfigChanged(log) + case _AutomationRegistrar.abi.Events["OwnershipTransferRequested"].ID: + return _AutomationRegistrar.ParseOwnershipTransferRequested(log) + case _AutomationRegistrar.abi.Events["OwnershipTransferred"].ID: + return _AutomationRegistrar.ParseOwnershipTransferred(log) + case _AutomationRegistrar.abi.Events["RegistrationApproved"].ID: + return _AutomationRegistrar.ParseRegistrationApproved(log) + case _AutomationRegistrar.abi.Events["RegistrationRejected"].ID: + return _AutomationRegistrar.ParseRegistrationRejected(log) + case _AutomationRegistrar.abi.Events["RegistrationRequested"].ID: + return _AutomationRegistrar.ParseRegistrationRequested(log) + case _AutomationRegistrar.abi.Events["TriggerConfigSet"].ID: + return _AutomationRegistrar.ParseTriggerConfigSet(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationRegistrarAutoApproveAllowedSenderSet) Topic() common.Hash { + return common.HexToHash("0x20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356") +} + +func (AutomationRegistrarConfigChanged) Topic() common.Hash { + return common.HexToHash("0x39ce5d867555f0b0183e358fce5b158e7ca4fecd7c01cb7e0e19f1e23285838a") +} + +func (AutomationRegistrarOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AutomationRegistrarOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (AutomationRegistrarRegistrationApproved) Topic() common.Hash { + return common.HexToHash("0xb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b") +} + +func (AutomationRegistrarRegistrationRejected) Topic() common.Hash { + return common.HexToHash("0x3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a22") +} + +func (AutomationRegistrarRegistrationRequested) Topic() common.Hash { + return common.HexToHash("0x7684390ebb103102f7f48c71439c2408713f8d437782a6fab2756acc0e42c1b7") +} + +func (AutomationRegistrarTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x830a6d06a4e2caac67eba04323de22bdb04f032dd8b3d6a0c52b503d9a7036a3") +} + +func (_AutomationRegistrar *AutomationRegistrar) Address() common.Address { + return _AutomationRegistrar.address +} + +type AutomationRegistrarInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) + + GetTriggerRegistrationDetails(opts *bind.CallOpts, triggerType uint8) (AutomationRegistrar22TriggerRegistrationStorage, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) + + Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, requestParams AutomationRegistrar22RegistrationParams) (*types.Transaction, error) + + SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, AutomationRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) + + SetTriggerConfig(opts *bind.TransactOpts, triggerType uint8, autoApproveType uint8, autoApproveMaxAllowed uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*AutomationRegistrarAutoApproveAllowedSenderSetIterator, error) + + WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) + + ParseAutoApproveAllowedSenderSet(log types.Log) (*AutomationRegistrarAutoApproveAllowedSenderSet, error) + + FilterConfigChanged(opts *bind.FilterOpts) (*AutomationRegistrarConfigChangedIterator, error) + + WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarConfigChanged) (event.Subscription, error) + + ParseConfigChanged(log types.Log) (*AutomationRegistrarConfigChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistrarOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistrarOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AutomationRegistrarOwnershipTransferred, error) + + FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*AutomationRegistrarRegistrationApprovedIterator, error) + + WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) + + ParseRegistrationApproved(log types.Log) (*AutomationRegistrarRegistrationApproved, error) + + FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*AutomationRegistrarRegistrationRejectedIterator, error) + + WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) + + ParseRegistrationRejected(log types.Log) (*AutomationRegistrarRegistrationRejected, error) + + FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*AutomationRegistrarRegistrationRequestedIterator, error) + + WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) + + ParseRegistrationRequested(log types.Log) (*AutomationRegistrarRegistrationRequested, error) + + FilterTriggerConfigSet(opts *bind.FilterOpts) (*AutomationRegistrarTriggerConfigSetIterator, error) + + WatchTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistrarTriggerConfigSet) (event.Subscription, error) + + ParseTriggerConfigSet(log types.Log) (*AutomationRegistrarTriggerConfigSet, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_registry_logic_a_wrapper_2_2/automation_registry_logic_a_wrapper_2_2.go b/core/gethwrappers/generated/automation_registry_logic_a_wrapper_2_2/automation_registry_logic_a_wrapper_2_2.go new file mode 100644 index 00000000..9765d1f4 --- /dev/null +++ b/core/gethwrappers/generated/automation_registry_logic_a_wrapper_2_2/automation_registry_logic_a_wrapper_2_2.go @@ -0,0 +1,4992 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_registry_logic_a_wrapper_2_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var AutomationRegistryLogicAMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistryLogicB2_2\",\"name\":\"logicB\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newModule\",\"type\":\"address\"}],\"name\":\"ChainSpecificModuleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumAutomationRegistryBase2_2.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumAutomationRegistryBase2_2.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumAutomationRegistryBase2_2.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"executeCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumAutomationRegistryBase2_2.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"enumAutomationRegistryBase2_2.Trigger\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101406040523480156200001257600080fd5b5060405162005ee738038062005ee78339810160408190526200003591620003b1565b80816001600160a01b031663ca30e6036040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000075573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200009b9190620003b1565b826001600160a01b031663b10b673c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620000da573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001009190620003b1565b836001600160a01b0316636709d0e56040518163ffffffff1660e01b8152600401602060405180830381865afa1580156200013f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001659190620003b1565b846001600160a01b0316635425d8ac6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620001a4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001ca9190620003b1565b856001600160a01b031663a08714c06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000209573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200022f9190620003b1565b3380600081620002865760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620002b957620002b981620002ed565b5050506001600160a01b0394851660805292841660a05290831660c052821660e052811661010052166101205250620003d8565b336001600160a01b03821603620003475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200027d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620003ae57600080fd5b50565b600060208284031215620003c457600080fd5b8151620003d18162000398565b9392505050565b60805160a05160c05160e0516101005161012051615aaa6200043d6000396000818161010e01526101a9015260006131260152600081816103e10152611ffe0152600061330f015260006133f3015260008181611e40015261240c0152615aaa6000f3fe60806040523480156200001157600080fd5b50600436106200010c5760003560e01c806385c1b0ba11620000a5578063c8048022116200006f578063c804802214620002b7578063ce7dc5b414620002ce578063f2fde38b14620002e5578063f7d334ba14620002fc576200010c565b806385c1b0ba14620002535780638da5cb5b146200026a5780638e86139b1462000289578063948108f714620002a0576200010c565b80634ee88d3511620000e75780634ee88d3514620001ef5780636ded9eae146200020657806371791aa0146200021d57806379ba50971462000249576200010c565b806328f32f38146200015457806329c5efad146200017e578063349e8cca14620001a7575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e8080156200014d573d6000f35b3d6000fd5b005b6200016b6200016536600462004045565b62000313565b6040519081526020015b60405180910390f35b620001956200018f3660046200412b565b6200068d565b60405162000175949392919062004253565b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200162000175565b620001526200020036600462004290565b62000931565b6200016b62000217366004620042e0565b62000999565b620002346200022e3660046200412b565b620009ff565b60405162000175979695949392919062004393565b620001526200114d565b6200015262000264366004620043e5565b62001250565b60005473ffffffffffffffffffffffffffffffffffffffff16620001c9565b620001526200029a36600462004472565b62001ec1565b62000152620002b1366004620044d5565b62002249565b62000152620002c836600462004504565b620024dc565b62000195620002df366004620045da565b62002927565b62000152620002f636600462004651565b620029ed565b620002346200030d36600462004504565b62002a05565b6000805473ffffffffffffffffffffffffffffffffffffffff1633148015906200034757506200034560093362002a43565b155b156200037f576040517fd48b678b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff89163b620003ce576040517f09ee12d500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b620003d98662002a77565b9050600089307f00000000000000000000000000000000000000000000000000000000000000006040516200040e9062003dd1565b73ffffffffffffffffffffffffffffffffffffffff938416815291831660208301529091166040820152606001604051809103906000f08015801562000458573d6000803e3d6000fd5b5090506200051f826040518060e001604052806000151581526020018c63ffffffff16815260200163ffffffff801681526020018473ffffffffffffffffffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff168152602001600063ffffffff168152508a89898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508b92508a915062002d239050565b6015805474010000000000000000000000000000000000000000900463ffffffff169060146200054f83620046a0565b91906101000a81548163ffffffff021916908363ffffffff16021790555050817fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d0128a8a604051620005c892919063ffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a2817fcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d8787604051620006049291906200470f565b60405180910390a2817f2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664856040516200063e919062004725565b60405180910390a2817f3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf48508460405162000678919062004725565b60405180910390a25098975050505050505050565b600060606000806200069e6200310e565b600086815260046020908152604091829020825160e081018452815460ff811615158252610100810463ffffffff90811694830194909452650100000000008104841694820194909452690100000000000000000090930473ffffffffffffffffffffffffffffffffffffffff166060840152600101546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a0840152780100000000000000000000000000000000000000000000000090041660c08201525a9150600080826060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620007b8573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620007de919062004747565b73ffffffffffffffffffffffffffffffffffffffff166014600101600c9054906101000a900463ffffffff1663ffffffff168960405162000820919062004767565b60006040518083038160008787f1925050503d806000811462000860576040519150601f19603f3d011682016040523d82523d6000602084013e62000865565b606091505b50915091505a62000877908562004785565b935081620008a257600060405180602001604052806000815250600796509650965050505062000928565b80806020019051810190620008b89190620047f6565b909750955086620008e657600060405180602001604052806000815250600496509650965050505062000928565b601654865164010000000090910463ffffffff1610156200092457600060405180602001604052806000815250600596509650965050505062000928565b5050505b92959194509250565b6200093c8362003180565b6000838152601b6020526040902062000957828483620048eb565b50827f2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d566483836040516200098c9291906200470f565b60405180910390a2505050565b6000620009f388888860008989604051806020016040528060008152508a8a8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506200031392505050565b98975050505050505050565b60006060600080600080600062000a156200310e565b600062000a228a62003236565b905060006012604051806101600160405290816000820160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160008201600c9054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160109054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160149054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160189054906101000a900462ffffff1662ffffff1662ffffff16815260200160008201601b9054906101000a900461ffff1661ffff1661ffff16815260200160008201601d9054906101000a900460ff1660ff1660ff16815260200160008201601e9054906101000a900460ff1615151515815260200160008201601f9054906101000a900460ff161515151581526020016001820160009054906101000a900460ff161515151581526020016001820160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505090506000600460008d81526020019081526020016000206040518060e00160405290816000820160009054906101000a900460ff161515151581526020016000820160019054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160059054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160099054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020016001820160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160018201600c9054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff1681526020016001820160189054906101000a900463ffffffff1663ffffffff1663ffffffff168152505090508160e001511562000db7576000604051806020016040528060008152506009600084602001516000808263ffffffff169250995099509950995099509950995050505062001141565b604081015163ffffffff9081161462000e08576000604051806020016040528060008152506001600084602001516000808263ffffffff169250995099509950995099509950995050505062001141565b80511562000e4e576000604051806020016040528060008152506002600084602001516000808263ffffffff169250995099509950995099509950995050505062001141565b62000e5982620032ec565b8095508196505050600062000e76838584602001518989620034de565b9050806bffffffffffffffffffffffff168260a001516bffffffffffffffffffffffff16101562000ee0576000604051806020016040528060008152506006600085602001516000808263ffffffff1692509a509a509a509a509a509a509a505050505062001141565b600062000eef8e868f62003793565b90505a9850600080846060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000f47573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000f6d919062004747565b73ffffffffffffffffffffffffffffffffffffffff166014600101600c9054906101000a900463ffffffff1663ffffffff168460405162000faf919062004767565b60006040518083038160008787f1925050503d806000811462000fef576040519150601f19603f3d011682016040523d82523d6000602084013e62000ff4565b606091505b50915091505a62001006908c62004785565b9a5081620010865760165481516801000000000000000090910463ffffffff1610156200106357505060408051602080820190925260008082529490910151939c509a50600899505063ffffffff90911695506200114192505050565b602090940151939b5060039a505063ffffffff9092169650620011419350505050565b808060200190518101906200109c9190620047f6565b909e509c508d620010dd57505060408051602080820190925260008082529490910151939c509a50600499505063ffffffff90911695506200114192505050565b6016548d5164010000000090910463ffffffff1610156200112e57505060408051602080820190925260008082529490910151939c509a50600599505063ffffffff90911695506200114192505050565b505050506020015163ffffffff16945050505b92959891949750929550565b60015473ffffffffffffffffffffffffffffffffffffffff163314620011d4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600173ffffffffffffffffffffffffffffffffffffffff82166000908152601a602052604090205460ff1660038111156200128f576200128f620041e8565b14158015620012db5750600373ffffffffffffffffffffffffffffffffffffffff82166000908152601a602052604090205460ff166003811115620012d857620012d8620041e8565b14155b1562001313576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6014546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1662001373576040517fd12d7d8d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000829003620013af576040517f2c2fc94100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c081018290526000808567ffffffffffffffff81111562001406576200140662003ecc565b60405190808252806020026020018201604052801562001430578160200160208202803683370190505b50905060008667ffffffffffffffff81111562001451576200145162003ecc565b604051908082528060200260200182016040528015620014d857816020015b6040805160e08101825260008082526020808301829052928201819052606082018190526080820181905260a0820181905260c082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181620014705790505b50905060008767ffffffffffffffff811115620014f957620014f962003ecc565b6040519080825280602002602001820160405280156200152e57816020015b6060815260200190600190039081620015185790505b50905060008867ffffffffffffffff8111156200154f576200154f62003ecc565b6040519080825280602002602001820160405280156200158457816020015b60608152602001906001900390816200156e5790505b50905060008967ffffffffffffffff811115620015a557620015a562003ecc565b604051908082528060200260200182016040528015620015da57816020015b6060815260200190600190039081620015c45790505b50905060005b8a81101562001bbe578b8b82818110620015fe57620015fe62004a13565b60209081029290920135600081815260048452604090819020815160e081018352815460ff811615158252610100810463ffffffff90811697830197909752650100000000008104871693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490931660c08401529a50909850620016dd90508962003180565b60608801516040517f1a5da6c800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8c8116600483015290911690631a5da6c890602401600060405180830381600087803b1580156200174d57600080fd5b505af115801562001762573d6000803e3d6000fd5b50505050878582815181106200177c576200177c62004a13565b6020026020010181905250600560008a815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16868281518110620017d057620017d062004a13565b73ffffffffffffffffffffffffffffffffffffffff90921660209283029190910182015260008a815260079091526040902080546200180f9062004843565b80601f01602080910402602001604051908101604052809291908181526020018280546200183d9062004843565b80156200188e5780601f1062001862576101008083540402835291602001916200188e565b820191906000526020600020905b8154815290600101906020018083116200187057829003601f168201915b5050505050848281518110620018a857620018a862004a13565b6020026020010181905250601b60008a81526020019081526020016000208054620018d39062004843565b80601f0160208091040260200160405190810160405280929190818152602001828054620019019062004843565b8015620019525780601f10620019265761010080835404028352916020019162001952565b820191906000526020600020905b8154815290600101906020018083116200193457829003601f168201915b50505050508382815181106200196c576200196c62004a13565b6020026020010181905250601c60008a81526020019081526020016000208054620019979062004843565b80601f0160208091040260200160405190810160405280929190818152602001828054620019c59062004843565b801562001a165780601f10620019ea5761010080835404028352916020019162001a16565b820191906000526020600020905b815481529060010190602001808311620019f857829003601f168201915b505050505082828151811062001a305762001a3062004a13565b60200260200101819052508760a001516bffffffffffffffffffffffff168762001a5b919062004a42565b60008a815260046020908152604080832080547fffffff000000000000000000000000000000000000000000000000000000000016815560010180547fffffffff000000000000000000000000000000000000000000000000000000001690556007909152812091985062001ad1919062003ddf565b6000898152601b6020526040812062001aea9162003ddf565b6000898152601c6020526040812062001b039162003ddf565b600089815260066020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016905562001b4460028a62003983565b5060a0880151604080516bffffffffffffffffffffffff909216825273ffffffffffffffffffffffffffffffffffffffff8c1660208301528a917fb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff910160405180910390a28062001bb58162004a58565b915050620015e0565b508560195462001bcf919062004785565b60195560008b8b868167ffffffffffffffff81111562001bf35762001bf362003ecc565b60405190808252806020026020018201604052801562001c1d578160200160208202803683370190505b508988888860405160200162001c3b98979695949392919062004c1f565b60405160208183030381529060405290508973ffffffffffffffffffffffffffffffffffffffff16638e86139b6014600001600c9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663c71249ab60038e73ffffffffffffffffffffffffffffffffffffffff1663aab9edd66040518163ffffffff1660e01b8152600401602060405180830381865afa15801562001cf7573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062001d1d919062004cfe565b866040518463ffffffff1660e01b815260040162001d3e9392919062004d23565b600060405180830381865afa15801562001d5c573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405262001da4919081019062004d4a565b6040518263ffffffff1660e01b815260040162001dc2919062004725565b600060405180830381600087803b15801562001ddd57600080fd5b505af115801562001df2573d6000803e3d6000fd5b50506040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8d81166004830152602482018b90527f000000000000000000000000000000000000000000000000000000000000000016925063a9059cbb91506044016020604051808303816000875af115801562001e8c573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062001eb2919062004d83565b50505050505050505050505050565b6002336000908152601a602052604090205460ff16600381111562001eea5762001eea620041e8565b1415801562001f2057506003336000908152601a602052604090205460ff16600381111562001f1d5762001f1d620041e8565b14155b1562001f58576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080808080808062001f6e888a018a62004f6e565b965096509650965096509650965060005b87518110156200223d57600073ffffffffffffffffffffffffffffffffffffffff1687828151811062001fb65762001fb662004a13565b60200260200101516060015173ffffffffffffffffffffffffffffffffffffffff1603620020ca5785818151811062001ff35762001ff362004a13565b6020026020010151307f00000000000000000000000000000000000000000000000000000000000000006040516200202b9062003dd1565b73ffffffffffffffffffffffffffffffffffffffff938416815291831660208301529091166040820152606001604051809103906000f08015801562002075573d6000803e3d6000fd5b508782815181106200208b576200208b62004a13565b60200260200101516060019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250505b62002182888281518110620020e357620020e362004a13565b602002602001015188838151811062002100576200210062004a13565b60200260200101518784815181106200211d576200211d62004a13565b60200260200101518785815181106200213a576200213a62004a13565b602002602001015187868151811062002157576200215762004a13565b602002602001015187878151811062002174576200217462004a13565b602002602001015162002d23565b87818151811062002197576200219762004a13565b60200260200101517f74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71888381518110620021d557620021d562004a13565b602002602001015160a0015133604051620022209291906bffffffffffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a280620022348162004a58565b91505062001f7f565b50505050505050505050565b600082815260046020908152604091829020825160e081018452815460ff81161515825263ffffffff6101008204811694830194909452650100000000008104841694820185905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004821660c0820152911462002347576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b818160a001516200235991906200509f565b600084815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff93841602179055601954620023c19184169062004a42565b6019556040517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff831660448201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906323b872dd906064016020604051808303816000875af11580156200246b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062002491919062004d83565b506040516bffffffffffffffffffffffff83168152339084907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a3505050565b6000818152600460209081526040808320815160e081018352815460ff81161515825263ffffffff6101008204811695830195909552650100000000008104851693820184905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004831660c08201529291141590620025c560005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161490506000601260010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166357e871e76040518163ffffffff1660e01b8152600401602060405180830381865afa15801562002668573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200268e9190620050c7565b9050828015620026b25750818015620026b0575080846040015163ffffffff16115b155b15620026ea576040517ffbc0357800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b811580156200271d575060008581526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314155b1562002755576040517ffbdb8e5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b816200276b576200276860328262004a42565b90505b6000858152600460205260409020805463ffffffff80841665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff90921691909117909155620027c79060029087906200398316565b5060145460808501516bffffffffffffffffffffffff918216916000911682111562002830576080860151620027fe9083620050e1565b90508560a001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff16111562002830575060a08501515b808660a00151620028429190620050e1565b600088815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff93841602179055601554620028aa918391166200509f565b601580547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9290921691909117905560405167ffffffffffffffff84169088907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a350505050505050565b600060606000806000634b56a42e60e01b8888886040516024016200294f9392919062005109565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909316929092179091529050620029da89826200068d565b929c919b50995090975095505050505050565b620029f762003991565b62002a028162003a14565b50565b60006060600080600080600062002a2c8860405180602001604052806000815250620009ff565b959e949d50929b5090995097509550909350915050565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260018301602052604081205415155b90505b92915050565b6000806000601260010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905060008173ffffffffffffffffffffffffffffffffffffffff166385df51fd60018473ffffffffffffffffffffffffffffffffffffffff166357e871e76040518163ffffffff1660e01b8152600401602060405180830381865afa15801562002b10573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062002b369190620050c7565b62002b42919062004785565b6040518263ffffffff1660e01b815260040162002b6191815260200190565b602060405180830381865afa15801562002b7f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062002ba59190620050c7565b601554604080516020810193909352309083015274010000000000000000000000000000000000000000900463ffffffff166060820152608001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201209083015201604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060045b600f81101562002cb1578382828151811062002c6d5762002c6d62004a13565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508062002ca88162004a58565b91505062002c4d565b5084600181111562002cc75762002cc7620041e8565b60f81b81600f8151811062002ce05762002ce062004a13565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535062002d1a816200513d565b95945050505050565b6012547e01000000000000000000000000000000000000000000000000000000000000900460ff161562002d83576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601654835163ffffffff909116101562002dc9576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc856020015163ffffffff16108062002e075750601554602086015163ffffffff70010000000000000000000000000000000090920482169116115b1562002e3f576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000868152600460205260409020546901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff161562002ea9576040517f6e3b930b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000868152600460209081526040808320885181548a8501518b85015160608d01517fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000009093169315157fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff169390931761010063ffffffff92831602177fffffff000000000000000000000000000000000000000000000000ffffffffff1665010000000000938216939093027fffffff0000000000000000000000000000000000000000ffffffffffffffffff1692909217690100000000000000000073ffffffffffffffffffffffffffffffffffffffff9283160217835560808b01516001909301805460a08d015160c08e01516bffffffffffffffffffffffff9687167fffffffffffffffff000000000000000000000000000000000000000000000000909316929092176c010000000000000000000000009690911695909502949094177fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff1678010000000000000000000000000000000000000000000000009490931693909302919091179091556005835281842080547fffffffffffffffffffffffff000000000000000000000000000000000000000016918916919091179055600790915290206200309c848262005180565b508460a001516bffffffffffffffffffffffff16601954620030bf919062004a42565b6019556000868152601b60205260409020620030dc838262005180565b506000868152601c60205260409020620030f7828262005180565b506200310560028762003b0b565b50505050505050565b3273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146200317e576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60008181526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314620031de576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526004602052604090205465010000000000900463ffffffff9081161462002a02576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818160045b600f811015620032cb577fff0000000000000000000000000000000000000000000000000000000000000082168382602081106200327f576200327f62004a13565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614620032b657506000949350505050565b80620032c28162004a58565b9150506200323d565b5081600f1a6001811115620032e457620032e4620041e8565b949350505050565b6000806000836080015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa15801562003379573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200339f9190620052c2565b5094509092505050600081131580620033b757508142105b80620033dc5750828015620033dc5750620033d3824262004785565b8463ffffffff16105b15620033ed576017549550620033f1565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa1580156200345d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620034839190620052c2565b50945090925050506000811315806200349b57508142105b80620034c05750828015620034c05750620034b7824262004785565b8463ffffffff16105b15620034d1576018549450620034d5565b8094505b50505050915091565b60008080866001811115620034f757620034f7620041e8565b0362003507575061ea6062003561565b60018660018111156200351e576200351e620041e8565b036200352f575062014c0862003561565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008760c00151600162003576919062005317565b620035869060ff16604062005333565b601654620035a6906103a490640100000000900463ffffffff1662004a42565b620035b2919062004a42565b601354604080517fde9ee35e00000000000000000000000000000000000000000000000000000000815281519394506000938493610100900473ffffffffffffffffffffffffffffffffffffffff169263de9ee35e92600480820193918290030181865afa15801562003629573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200364f91906200534d565b909250905081836200366383601862004a42565b6200366f919062005333565b60c08c01516200368190600162005317565b620036929060ff166115e062005333565b6200369e919062004a42565b620036aa919062004a42565b620036b6908562004a42565b935060008a610140015173ffffffffffffffffffffffffffffffffffffffff166312544140856040518263ffffffff1660e01b8152600401620036fb91815260200190565b602060405180830381865afa15801562003719573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200373f9190620050c7565b8b60a0015161ffff1662003754919062005333565b9050600080620037718d8c63ffffffff1689868e8e600062003b19565b90925090506200378281836200509f565b9d9c50505050505050505050505050565b60606000836001811115620037ac57620037ac620041e8565b0362003879576000848152600760205260409081902090517f6e04ff0d0000000000000000000000000000000000000000000000000000000091620037f49160240162005415565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915290506200397c565b6001836001811115620038905762003890620041e8565b036200352f57600082806020019051810190620038ae91906200548c565b6000868152600760205260409081902090519192507f40691db40000000000000000000000000000000000000000000000000000000091620038f5918491602401620055a0565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915291506200397c9050565b9392505050565b600062002a6e838362003c74565b60005473ffffffffffffffffffffffffffffffffffffffff1633146200317e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401620011cb565b3373ffffffffffffffffffffffffffffffffffffffff82160362003a95576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620011cb565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600062002a6e838362003d7f565b60008060008960a0015161ffff168662003b34919062005333565b905083801562003b435750803a105b1562003b4c57503a5b6000858862003b5c8b8d62004a42565b62003b68908562005333565b62003b74919062004a42565b62003b8890670de0b6b3a764000062005333565b62003b94919062005668565b905060008b6040015163ffffffff1664e8d4a5100062003bb5919062005333565b60208d0151889063ffffffff168b62003bcf8f8862005333565b62003bdb919062004a42565b62003beb90633b9aca0062005333565b62003bf7919062005333565b62003c03919062005668565b62003c0f919062004a42565b90506b033b2e3c9fd0803ce800000062003c2a828462004a42565b111562003c63576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909b909a5098505050505050505050565b6000818152600183016020526040812054801562003d6d57600062003c9b60018362004785565b855490915060009062003cb19060019062004785565b905081811462003d1d57600086600001828154811062003cd55762003cd562004a13565b906000526020600020015490508087600001848154811062003cfb5762003cfb62004a13565b6000918252602080832090910192909255918252600188019052604090208390555b855486908062003d315762003d31620056a4565b60019003818190600052602060002001600090559055856001016000868152602001908152602001600020600090556001935050505062002a71565b600091505062002a71565b5092915050565b600081815260018301602052604081205462003dc85750815460018181018455600084815260208082209093018490558454848252828601909352604090209190915562002a71565b50600062002a71565b6103ca80620056d483390190565b50805462003ded9062004843565b6000825580601f1062003dfe575050565b601f01602090049060005260206000209081019062002a0291905b8082111562003e2f576000815560010162003e19565b5090565b73ffffffffffffffffffffffffffffffffffffffff8116811462002a0257600080fd5b803563ffffffff8116811462003e6b57600080fd5b919050565b80356002811062003e6b57600080fd5b60008083601f84011262003e9357600080fd5b50813567ffffffffffffffff81111562003eac57600080fd5b60208301915083602082850101111562003ec557600080fd5b9250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405160e0810167ffffffffffffffff8111828210171562003f215762003f2162003ecc565b60405290565b604051610100810167ffffffffffffffff8111828210171562003f215762003f2162003ecc565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171562003f985762003f9862003ecc565b604052919050565b600067ffffffffffffffff82111562003fbd5762003fbd62003ecc565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f83011262003ffb57600080fd5b8135620040126200400c8262003fa0565b62003f4e565b8181528460208386010111156200402857600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060008060e0898b0312156200406257600080fd5b88356200406f8162003e33565b97506200407f60208a0162003e56565b96506040890135620040918162003e33565b9550620040a160608a0162003e70565b9450608089013567ffffffffffffffff80821115620040bf57600080fd5b620040cd8c838d0162003e80565b909650945060a08b0135915080821115620040e757600080fd5b620040f58c838d0162003fe9565b935060c08b01359150808211156200410c57600080fd5b506200411b8b828c0162003fe9565b9150509295985092959890939650565b600080604083850312156200413f57600080fd5b82359150602083013567ffffffffffffffff8111156200415e57600080fd5b6200416c8582860162003fe9565b9150509250929050565b60005b838110156200419357818101518382015260200162004179565b50506000910152565b60008151808452620041b681602086016020860162004176565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b600a81106200424f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b84151581526080602082015260006200427060808301866200419c565b905062004281604083018562004217565b82606083015295945050505050565b600080600060408486031215620042a657600080fd5b83359250602084013567ffffffffffffffff811115620042c557600080fd5b620042d38682870162003e80565b9497909650939450505050565b600080600080600080600060a0888a031215620042fc57600080fd5b8735620043098162003e33565b9650620043196020890162003e56565b955060408801356200432b8162003e33565b9450606088013567ffffffffffffffff808211156200434957600080fd5b620043578b838c0162003e80565b909650945060808a01359150808211156200437157600080fd5b50620043808a828b0162003e80565b989b979a50959850939692959293505050565b871515815260e060208201526000620043b060e08301896200419c565b9050620043c1604083018862004217565b8560608301528460808301528360a08301528260c083015298975050505050505050565b600080600060408486031215620043fb57600080fd5b833567ffffffffffffffff808211156200441457600080fd5b818601915086601f8301126200442957600080fd5b8135818111156200443957600080fd5b8760208260051b85010111156200444f57600080fd5b60209283019550935050840135620044678162003e33565b809150509250925092565b600080602083850312156200448657600080fd5b823567ffffffffffffffff8111156200449e57600080fd5b620044ac8582860162003e80565b90969095509350505050565b80356bffffffffffffffffffffffff8116811462003e6b57600080fd5b60008060408385031215620044e957600080fd5b82359150620044fb60208401620044b8565b90509250929050565b6000602082840312156200451757600080fd5b5035919050565b600067ffffffffffffffff8211156200453b576200453b62003ecc565b5060051b60200190565b600082601f8301126200455757600080fd5b813560206200456a6200400c836200451e565b82815260059290921b840181019181810190868411156200458a57600080fd5b8286015b84811015620045cf57803567ffffffffffffffff811115620045b05760008081fd5b620045c08986838b010162003fe9565b8452509183019183016200458e565b509695505050505050565b60008060008060608587031215620045f157600080fd5b84359350602085013567ffffffffffffffff808211156200461157600080fd5b6200461f8883890162004545565b945060408701359150808211156200463657600080fd5b50620046458782880162003e80565b95989497509550505050565b6000602082840312156200466457600080fd5b81356200397c8162003e33565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600063ffffffff808316818103620046bc57620046bc62004671565b6001019392505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000620032e4602083018486620046c6565b60208152600062002a6e60208301846200419c565b805162003e6b8162003e33565b6000602082840312156200475a57600080fd5b81516200397c8162003e33565b600082516200477b81846020870162004176565b9190910192915050565b8181038181111562002a715762002a7162004671565b801515811462002a0257600080fd5b600082601f830112620047bc57600080fd5b8151620047cd6200400c8262003fa0565b818152846020838601011115620047e357600080fd5b620032e482602083016020870162004176565b600080604083850312156200480a57600080fd5b825162004817816200479b565b602084015190925067ffffffffffffffff8111156200483557600080fd5b6200416c85828601620047aa565b600181811c908216806200485857607f821691505b60208210810362004892577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f821115620048e657600081815260208120601f850160051c81016020861015620048c15750805b601f850160051c820191505b81811015620048e257828155600101620048cd565b5050505b505050565b67ffffffffffffffff83111562004906576200490662003ecc565b6200491e8362004917835462004843565b8362004898565b6000601f8411600181146200497357600085156200493c5750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b17835562004a0c565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b82811015620049c45786850135825560209485019460019092019101620049a2565b508682101562004a00577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b8082018082111562002a715762002a7162004671565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820362004a8c5762004a8c62004671565b5060010190565b600081518084526020808501945080840160005b8381101562004b525781518051151588528381015163ffffffff908116858a01526040808301519091169089015260608082015173ffffffffffffffffffffffffffffffffffffffff16908901526080808201516bffffffffffffffffffffffff169089015260a08082015162004b2d828b01826bffffffffffffffffffffffff169052565b505060c09081015163ffffffff169088015260e0909601959082019060010162004aa7565b509495945050505050565b600081518084526020808501945080840160005b8381101562004b5257815173ffffffffffffffffffffffffffffffffffffffff168752958201959082019060010162004b71565b600082825180855260208086019550808260051b84010181860160005b8481101562004c12577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe086840301895262004bff8383516200419c565b9884019892509083019060010162004bc2565b5090979650505050505050565b60e081528760e082015260006101007f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8a111562004c5c57600080fd5b8960051b808c8386013783018381038201602085015262004c808282018b62004a93565b915050828103604084015262004c97818962004b5d565b9050828103606084015262004cad818862004b5d565b9050828103608084015262004cc3818762004ba5565b905082810360a084015262004cd9818662004ba5565b905082810360c084015262004cef818562004ba5565b9b9a5050505050505050505050565b60006020828403121562004d1157600080fd5b815160ff811681146200397c57600080fd5b60ff8416815260ff8316602082015260606040820152600062002d1a60608301846200419c565b60006020828403121562004d5d57600080fd5b815167ffffffffffffffff81111562004d7557600080fd5b620032e484828501620047aa565b60006020828403121562004d9657600080fd5b81516200397c816200479b565b600082601f83011262004db557600080fd5b8135602062004dc86200400c836200451e565b82815260059290921b8401810191818101908684111562004de857600080fd5b8286015b84811015620045cf578035835291830191830162004dec565b600082601f83011262004e1757600080fd5b8135602062004e2a6200400c836200451e565b82815260e0928302850182019282820191908785111562004e4a57600080fd5b8387015b8581101562004c125781818a03121562004e685760008081fd5b62004e7262003efb565b813562004e7f816200479b565b815262004e8e82870162003e56565b86820152604062004ea181840162003e56565b9082015260608281013562004eb68162003e33565b90820152608062004ec9838201620044b8565b9082015260a062004edc838201620044b8565b9082015260c062004eef83820162003e56565b90820152845292840192810162004e4e565b600082601f83011262004f1357600080fd5b8135602062004f266200400c836200451e565b82815260059290921b8401810191818101908684111562004f4657600080fd5b8286015b84811015620045cf57803562004f608162003e33565b835291830191830162004f4a565b600080600080600080600060e0888a03121562004f8a57600080fd5b873567ffffffffffffffff8082111562004fa357600080fd5b62004fb18b838c0162004da3565b985060208a013591508082111562004fc857600080fd5b62004fd68b838c0162004e05565b975060408a013591508082111562004fed57600080fd5b62004ffb8b838c0162004f01565b965060608a01359150808211156200501257600080fd5b620050208b838c0162004f01565b955060808a01359150808211156200503757600080fd5b620050458b838c0162004545565b945060a08a01359150808211156200505c57600080fd5b6200506a8b838c0162004545565b935060c08a01359150808211156200508157600080fd5b50620050908a828b0162004545565b91505092959891949750929550565b6bffffffffffffffffffffffff81811683821601908082111562003d785762003d7862004671565b600060208284031215620050da57600080fd5b5051919050565b6bffffffffffffffffffffffff82811682821603908082111562003d785762003d7862004671565b6040815260006200511e604083018662004ba5565b828103602084015262005133818587620046c6565b9695505050505050565b8051602080830151919081101562004892577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60209190910360031b1b16919050565b815167ffffffffffffffff8111156200519d576200519d62003ecc565b620051b581620051ae845462004843565b8462004898565b602080601f8311600181146200520b5760008415620051d45750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555620048e2565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156200525a5788860151825594840194600190910190840162005239565b50858210156200529757878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b805169ffffffffffffffffffff8116811462003e6b57600080fd5b600080600080600060a08688031215620052db57600080fd5b620052e686620052a7565b94506020860151935060408601519250606086015191506200530b60808701620052a7565b90509295509295909350565b60ff818116838216019081111562002a715762002a7162004671565b808202811582820484141762002a715762002a7162004671565b600080604083850312156200536157600080fd5b505080516020909101519092909150565b60008154620053818162004843565b808552602060018381168015620053a15760018114620053da576200540a565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b89010195506200540a565b866000528260002060005b85811015620054025781548a8201860152908301908401620053e5565b890184019650505b505050505092915050565b60208152600062002a6e602083018462005372565b600082601f8301126200543c57600080fd5b815160206200544f6200400c836200451e565b82815260059290921b840181019181810190868411156200546f57600080fd5b8286015b84811015620045cf578051835291830191830162005473565b6000602082840312156200549f57600080fd5b815167ffffffffffffffff80821115620054b857600080fd5b908301906101008286031215620054ce57600080fd5b620054d862003f27565b82518152602083015160208201526040830151604082015260608301516060820152608083015160808201526200551260a084016200473a565b60a082015260c0830151828111156200552a57600080fd5b62005538878286016200542a565b60c08301525060e0830151828111156200555157600080fd5b6200555f87828601620047aa565b60e08301525095945050505050565b600081518084526020808501945080840160005b8381101562004b525781518752958201959082019060010162005582565b60408152825160408201526020830151606082015260408301516080820152606083015160a0820152608083015160c082015273ffffffffffffffffffffffffffffffffffffffff60a08401511660e0820152600060c0840151610100808185015250620056136101408401826200556e565b905060e08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0848303016101208501526200565182826200419c565b915050828103602084015262002d1a818562005372565b6000826200569f577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfe60c060405234801561001057600080fd5b506040516103ca3803806103ca83398101604081905261002f91610076565b600080546001600160a01b0319166001600160a01b039384161790559181166080521660a0526100b9565b80516001600160a01b038116811461007157600080fd5b919050565b60008060006060848603121561008b57600080fd5b6100948461005a565b92506100a26020850161005a565b91506100b06040850161005a565b90509250925092565b60805160a0516102e76100e36000396000603801526000818160c4015261011701526102e76000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806379188d161461007b578063f00e6a2a146100aa575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e808015610076573d6000f35b3d6000fd5b61008e6100893660046101c1565b6100ee565b6040805192151583526020830191909152015b60405180910390f35b60405173ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001681526020016100a1565b60008054819073ffffffffffffffffffffffffffffffffffffffff16331461011557600080fd5b7f00000000000000000000000000000000000000000000000000000000000000005a91505a61138881101561014957600080fd5b61138881039050856040820482031161016157600080fd5b50803b61016d57600080fd5b6000808551602087016000858af192505a610188908361029a565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080604083850312156101d457600080fd5b82359150602083013567ffffffffffffffff808211156101f357600080fd5b818501915085601f83011261020757600080fd5b81358181111561021957610219610192565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561025f5761025f610192565b8160405282815288602084870101111561027857600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b818103818111156102d4577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b9291505056fea164736f6c6343000813000aa164736f6c6343000813000a", +} + +var AutomationRegistryLogicAABI = AutomationRegistryLogicAMetaData.ABI + +var AutomationRegistryLogicABin = AutomationRegistryLogicAMetaData.Bin + +func DeployAutomationRegistryLogicA(auth *bind.TransactOpts, backend bind.ContractBackend, logicB common.Address) (common.Address, *types.Transaction, *AutomationRegistryLogicA, error) { + parsed, err := AutomationRegistryLogicAMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationRegistryLogicABin), backend, logicB) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationRegistryLogicA{address: address, abi: *parsed, AutomationRegistryLogicACaller: AutomationRegistryLogicACaller{contract: contract}, AutomationRegistryLogicATransactor: AutomationRegistryLogicATransactor{contract: contract}, AutomationRegistryLogicAFilterer: AutomationRegistryLogicAFilterer{contract: contract}}, nil +} + +type AutomationRegistryLogicA struct { + address common.Address + abi abi.ABI + AutomationRegistryLogicACaller + AutomationRegistryLogicATransactor + AutomationRegistryLogicAFilterer +} + +type AutomationRegistryLogicACaller struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicATransactor struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicAFilterer struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicASession struct { + Contract *AutomationRegistryLogicA + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationRegistryLogicACallerSession struct { + Contract *AutomationRegistryLogicACaller + CallOpts bind.CallOpts +} + +type AutomationRegistryLogicATransactorSession struct { + Contract *AutomationRegistryLogicATransactor + TransactOpts bind.TransactOpts +} + +type AutomationRegistryLogicARaw struct { + Contract *AutomationRegistryLogicA +} + +type AutomationRegistryLogicACallerRaw struct { + Contract *AutomationRegistryLogicACaller +} + +type AutomationRegistryLogicATransactorRaw struct { + Contract *AutomationRegistryLogicATransactor +} + +func NewAutomationRegistryLogicA(address common.Address, backend bind.ContractBackend) (*AutomationRegistryLogicA, error) { + abi, err := abi.JSON(strings.NewReader(AutomationRegistryLogicAABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationRegistryLogicA(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicA{address: address, abi: abi, AutomationRegistryLogicACaller: AutomationRegistryLogicACaller{contract: contract}, AutomationRegistryLogicATransactor: AutomationRegistryLogicATransactor{contract: contract}, AutomationRegistryLogicAFilterer: AutomationRegistryLogicAFilterer{contract: contract}}, nil +} + +func NewAutomationRegistryLogicACaller(address common.Address, caller bind.ContractCaller) (*AutomationRegistryLogicACaller, error) { + contract, err := bindAutomationRegistryLogicA(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicACaller{contract: contract}, nil +} + +func NewAutomationRegistryLogicATransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationRegistryLogicATransactor, error) { + contract, err := bindAutomationRegistryLogicA(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicATransactor{contract: contract}, nil +} + +func NewAutomationRegistryLogicAFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationRegistryLogicAFilterer, error) { + contract, err := bindAutomationRegistryLogicA(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAFilterer{contract: contract}, nil +} + +func bindAutomationRegistryLogicA(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationRegistryLogicAMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicARaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistryLogicA.Contract.AutomationRegistryLogicACaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicARaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AutomationRegistryLogicATransactor.contract.Transfer(opts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicARaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AutomationRegistryLogicATransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicACallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistryLogicA.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.contract.Transfer(opts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicACaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicA.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) FallbackTo() (common.Address, error) { + return _AutomationRegistryLogicA.Contract.FallbackTo(&_AutomationRegistryLogicA.CallOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicACallerSession) FallbackTo() (common.Address, error) { + return _AutomationRegistryLogicA.Contract.FallbackTo(&_AutomationRegistryLogicA.CallOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicACaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicA.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) Owner() (common.Address, error) { + return _AutomationRegistryLogicA.Contract.Owner(&_AutomationRegistryLogicA.CallOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicACallerSession) Owner() (common.Address, error) { + return _AutomationRegistryLogicA.Contract.Owner(&_AutomationRegistryLogicA.CallOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "acceptOwnership") +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AcceptOwnership(&_AutomationRegistryLogicA.TransactOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AcceptOwnership(&_AutomationRegistryLogicA.TransactOpts) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "addFunds", id, amount) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AddFunds(&_AutomationRegistryLogicA.TransactOpts, id, amount) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.AddFunds(&_AutomationRegistryLogicA.TransactOpts, id, amount) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CancelUpkeep(&_AutomationRegistryLogicA.TransactOpts, id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CancelUpkeep(&_AutomationRegistryLogicA.TransactOpts, id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) CheckCallback(opts *bind.TransactOpts, id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "checkCallback", id, values, extraData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckCallback(&_AutomationRegistryLogicA.TransactOpts, id, values, extraData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckCallback(&_AutomationRegistryLogicA.TransactOpts, id, values, extraData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "checkUpkeep", id, triggerData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) CheckUpkeep(id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckUpkeep(&_AutomationRegistryLogicA.TransactOpts, id, triggerData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) CheckUpkeep(id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckUpkeep(&_AutomationRegistryLogicA.TransactOpts, id, triggerData) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) CheckUpkeep0(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "checkUpkeep0", id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) CheckUpkeep0(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckUpkeep0(&_AutomationRegistryLogicA.TransactOpts, id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) CheckUpkeep0(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.CheckUpkeep0(&_AutomationRegistryLogicA.TransactOpts, id) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "executeCallback", id, payload) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.ExecuteCallback(&_AutomationRegistryLogicA.TransactOpts, id, payload) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.ExecuteCallback(&_AutomationRegistryLogicA.TransactOpts, id, payload) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.MigrateUpkeeps(&_AutomationRegistryLogicA.TransactOpts, ids, destination) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.MigrateUpkeeps(&_AutomationRegistryLogicA.TransactOpts, ids, destination) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.ReceiveUpkeeps(&_AutomationRegistryLogicA.TransactOpts, encodedUpkeeps) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.ReceiveUpkeeps(&_AutomationRegistryLogicA.TransactOpts, encodedUpkeeps) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.RegisterUpkeep(&_AutomationRegistryLogicA.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.RegisterUpkeep(&_AutomationRegistryLogicA.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "registerUpkeep0", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.RegisterUpkeep0(&_AutomationRegistryLogicA.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.RegisterUpkeep0(&_AutomationRegistryLogicA.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "setUpkeepTriggerConfig", id, triggerConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.SetUpkeepTriggerConfig(&_AutomationRegistryLogicA.TransactOpts, id, triggerConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.SetUpkeepTriggerConfig(&_AutomationRegistryLogicA.TransactOpts, id, triggerConfig) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.Transact(opts, "transferOwnership", to) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.TransferOwnership(&_AutomationRegistryLogicA.TransactOpts, to) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.TransferOwnership(&_AutomationRegistryLogicA.TransactOpts, to) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.contract.RawTransact(opts, calldata) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicASession) Fallback(calldata []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.Fallback(&_AutomationRegistryLogicA.TransactOpts, calldata) +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicATransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicA.Contract.Fallback(&_AutomationRegistryLogicA.TransactOpts, calldata) +} + +type AutomationRegistryLogicAAdminPrivilegeConfigSetIterator struct { + Event *AutomationRegistryLogicAAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryLogicAAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAAdminPrivilegeConfigSetIterator{contract: _AutomationRegistryLogicA.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAAdminPrivilegeConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicAAdminPrivilegeConfigSet, error) { + event := new(AutomationRegistryLogicAAdminPrivilegeConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicACancelledUpkeepReportIterator struct { + Event *AutomationRegistryLogicACancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicACancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicACancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicACancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicACancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicACancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicACancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicACancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicACancelledUpkeepReportIterator{contract: _AutomationRegistryLogicA.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicACancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicACancelledUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryLogicACancelledUpkeepReport, error) { + event := new(AutomationRegistryLogicACancelledUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAChainSpecificModuleUpdatedIterator struct { + Event *AutomationRegistryLogicAChainSpecificModuleUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAChainSpecificModuleUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAChainSpecificModuleUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAChainSpecificModuleUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAChainSpecificModuleUpdated struct { + NewModule common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicAChainSpecificModuleUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAChainSpecificModuleUpdatedIterator{contract: _AutomationRegistryLogicA.contract, event: "ChainSpecificModuleUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAChainSpecificModuleUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAChainSpecificModuleUpdated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryLogicAChainSpecificModuleUpdated, error) { + event := new(AutomationRegistryLogicAChainSpecificModuleUpdated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicADedupKeyAddedIterator struct { + Event *AutomationRegistryLogicADedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicADedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicADedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicADedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicADedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicADedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicADedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryLogicADedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicADedupKeyAddedIterator{contract: _AutomationRegistryLogicA.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicADedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicADedupKeyAdded) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseDedupKeyAdded(log types.Log) (*AutomationRegistryLogicADedupKeyAdded, error) { + event := new(AutomationRegistryLogicADedupKeyAdded) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAFundsAddedIterator struct { + Event *AutomationRegistryLogicAFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAFundsAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryLogicAFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAFundsAddedIterator{contract: _AutomationRegistryLogicA.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAFundsAdded) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseFundsAdded(log types.Log) (*AutomationRegistryLogicAFundsAdded, error) { + event := new(AutomationRegistryLogicAFundsAdded) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAFundsWithdrawnIterator struct { + Event *AutomationRegistryLogicAFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAFundsWithdrawnIterator{contract: _AutomationRegistryLogicA.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAFundsWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseFundsWithdrawn(log types.Log) (*AutomationRegistryLogicAFundsWithdrawn, error) { + event := new(AutomationRegistryLogicAFundsWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator struct { + Event *AutomationRegistryLogicAInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator{contract: _AutomationRegistryLogicA.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAInsufficientFundsUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryLogicAInsufficientFundsUpkeepReport, error) { + event := new(AutomationRegistryLogicAInsufficientFundsUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAOwnerFundsWithdrawnIterator struct { + Event *AutomationRegistryLogicAOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryLogicAOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAOwnerFundsWithdrawnIterator{contract: _AutomationRegistryLogicA.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAOwnerFundsWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryLogicAOwnerFundsWithdrawn, error) { + event := new(AutomationRegistryLogicAOwnerFundsWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAOwnershipTransferRequestedIterator struct { + Event *AutomationRegistryLogicAOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicAOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAOwnershipTransferRequestedIterator{contract: _AutomationRegistryLogicA.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAOwnershipTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryLogicAOwnershipTransferRequested, error) { + event := new(AutomationRegistryLogicAOwnershipTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAOwnershipTransferredIterator struct { + Event *AutomationRegistryLogicAOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicAOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAOwnershipTransferredIterator{contract: _AutomationRegistryLogicA.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAOwnershipTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseOwnershipTransferred(log types.Log) (*AutomationRegistryLogicAOwnershipTransferred, error) { + event := new(AutomationRegistryLogicAOwnershipTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAPausedIterator struct { + Event *AutomationRegistryLogicAPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAPaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryLogicAPausedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAPausedIterator{contract: _AutomationRegistryLogicA.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAPaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParsePaused(log types.Log) (*AutomationRegistryLogicAPaused, error) { + event := new(AutomationRegistryLogicAPaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAPayeesUpdatedIterator struct { + Event *AutomationRegistryLogicAPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicAPayeesUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAPayeesUpdatedIterator{contract: _AutomationRegistryLogicA.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAPayeesUpdated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParsePayeesUpdated(log types.Log) (*AutomationRegistryLogicAPayeesUpdated, error) { + event := new(AutomationRegistryLogicAPayeesUpdated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAPayeeshipTransferRequestedIterator struct { + Event *AutomationRegistryLogicAPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicAPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAPayeeshipTransferRequestedIterator{contract: _AutomationRegistryLogicA.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAPayeeshipTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryLogicAPayeeshipTransferRequested, error) { + event := new(AutomationRegistryLogicAPayeeshipTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAPayeeshipTransferredIterator struct { + Event *AutomationRegistryLogicAPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicAPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAPayeeshipTransferredIterator{contract: _AutomationRegistryLogicA.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAPayeeshipTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryLogicAPayeeshipTransferred, error) { + event := new(AutomationRegistryLogicAPayeeshipTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAPaymentWithdrawnIterator struct { + Event *AutomationRegistryLogicAPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryLogicAPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAPaymentWithdrawnIterator{contract: _AutomationRegistryLogicA.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAPaymentWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryLogicAPaymentWithdrawn, error) { + event := new(AutomationRegistryLogicAPaymentWithdrawn) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAReorgedUpkeepReportIterator struct { + Event *AutomationRegistryLogicAReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAReorgedUpkeepReportIterator{contract: _AutomationRegistryLogicA.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAReorgedUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryLogicAReorgedUpkeepReport, error) { + event := new(AutomationRegistryLogicAReorgedUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAStaleUpkeepReportIterator struct { + Event *AutomationRegistryLogicAStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAStaleUpkeepReportIterator{contract: _AutomationRegistryLogicA.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAStaleUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryLogicAStaleUpkeepReport, error) { + event := new(AutomationRegistryLogicAStaleUpkeepReport) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUnpausedIterator struct { + Event *AutomationRegistryLogicAUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryLogicAUnpausedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUnpausedIterator{contract: _AutomationRegistryLogicA.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUnpaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUnpaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUnpaused(log types.Log) (*AutomationRegistryLogicAUnpaused, error) { + event := new(AutomationRegistryLogicAUnpaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator struct { + Event *AutomationRegistryLogicAUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepAdminTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryLogicAUpkeepAdminTransferRequested, error) { + event := new(AutomationRegistryLogicAUpkeepAdminTransferRequested) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepAdminTransferredIterator struct { + Event *AutomationRegistryLogicAUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicAUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepAdminTransferredIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepAdminTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryLogicAUpkeepAdminTransferred, error) { + event := new(AutomationRegistryLogicAUpkeepAdminTransferred) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepCanceledIterator struct { + Event *AutomationRegistryLogicAUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryLogicAUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepCanceledIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepCanceled) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepCanceled(log types.Log) (*AutomationRegistryLogicAUpkeepCanceled, error) { + event := new(AutomationRegistryLogicAUpkeepCanceled) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepCheckDataSetIterator struct { + Event *AutomationRegistryLogicAUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepCheckDataSetIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepCheckDataSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryLogicAUpkeepCheckDataSet, error) { + event := new(AutomationRegistryLogicAUpkeepCheckDataSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepGasLimitSetIterator struct { + Event *AutomationRegistryLogicAUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepGasLimitSetIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepGasLimitSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryLogicAUpkeepGasLimitSet, error) { + event := new(AutomationRegistryLogicAUpkeepGasLimitSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepMigratedIterator struct { + Event *AutomationRegistryLogicAUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepMigratedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepMigrated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepMigrated(log types.Log) (*AutomationRegistryLogicAUpkeepMigrated, error) { + event := new(AutomationRegistryLogicAUpkeepMigrated) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepOffchainConfigSetIterator struct { + Event *AutomationRegistryLogicAUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepOffchainConfigSetIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepOffchainConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepOffchainConfigSet, error) { + event := new(AutomationRegistryLogicAUpkeepOffchainConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepPausedIterator struct { + Event *AutomationRegistryLogicAUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepPausedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepPaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepPaused(log types.Log) (*AutomationRegistryLogicAUpkeepPaused, error) { + event := new(AutomationRegistryLogicAUpkeepPaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepPerformedIterator struct { + Event *AutomationRegistryLogicAUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryLogicAUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepPerformedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepPerformed) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepPerformed(log types.Log) (*AutomationRegistryLogicAUpkeepPerformed, error) { + event := new(AutomationRegistryLogicAUpkeepPerformed) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator struct { + Event *AutomationRegistryLogicAUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepPrivilegeConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepPrivilegeConfigSet, error) { + event := new(AutomationRegistryLogicAUpkeepPrivilegeConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepReceivedIterator struct { + Event *AutomationRegistryLogicAUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepReceivedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepReceived) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepReceived(log types.Log) (*AutomationRegistryLogicAUpkeepReceived, error) { + event := new(AutomationRegistryLogicAUpkeepReceived) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepRegisteredIterator struct { + Event *AutomationRegistryLogicAUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepRegisteredIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepRegistered) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepRegistered(log types.Log) (*AutomationRegistryLogicAUpkeepRegistered, error) { + event := new(AutomationRegistryLogicAUpkeepRegistered) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepTriggerConfigSetIterator struct { + Event *AutomationRegistryLogicAUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepTriggerConfigSetIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepTriggerConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepTriggerConfigSet, error) { + event := new(AutomationRegistryLogicAUpkeepTriggerConfigSet) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicAUpkeepUnpausedIterator struct { + Event *AutomationRegistryLogicAUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicAUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicAUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicAUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicAUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicAUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicAUpkeepUnpausedIterator{contract: _AutomationRegistryLogicA.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicA.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicAUpkeepUnpaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicAFilterer) ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryLogicAUpkeepUnpaused, error) { + event := new(AutomationRegistryLogicAUpkeepUnpaused) + if err := _AutomationRegistryLogicA.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicA) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationRegistryLogicA.abi.Events["AdminPrivilegeConfigSet"].ID: + return _AutomationRegistryLogicA.ParseAdminPrivilegeConfigSet(log) + case _AutomationRegistryLogicA.abi.Events["CancelledUpkeepReport"].ID: + return _AutomationRegistryLogicA.ParseCancelledUpkeepReport(log) + case _AutomationRegistryLogicA.abi.Events["ChainSpecificModuleUpdated"].ID: + return _AutomationRegistryLogicA.ParseChainSpecificModuleUpdated(log) + case _AutomationRegistryLogicA.abi.Events["DedupKeyAdded"].ID: + return _AutomationRegistryLogicA.ParseDedupKeyAdded(log) + case _AutomationRegistryLogicA.abi.Events["FundsAdded"].ID: + return _AutomationRegistryLogicA.ParseFundsAdded(log) + case _AutomationRegistryLogicA.abi.Events["FundsWithdrawn"].ID: + return _AutomationRegistryLogicA.ParseFundsWithdrawn(log) + case _AutomationRegistryLogicA.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _AutomationRegistryLogicA.ParseInsufficientFundsUpkeepReport(log) + case _AutomationRegistryLogicA.abi.Events["OwnerFundsWithdrawn"].ID: + return _AutomationRegistryLogicA.ParseOwnerFundsWithdrawn(log) + case _AutomationRegistryLogicA.abi.Events["OwnershipTransferRequested"].ID: + return _AutomationRegistryLogicA.ParseOwnershipTransferRequested(log) + case _AutomationRegistryLogicA.abi.Events["OwnershipTransferred"].ID: + return _AutomationRegistryLogicA.ParseOwnershipTransferred(log) + case _AutomationRegistryLogicA.abi.Events["Paused"].ID: + return _AutomationRegistryLogicA.ParsePaused(log) + case _AutomationRegistryLogicA.abi.Events["PayeesUpdated"].ID: + return _AutomationRegistryLogicA.ParsePayeesUpdated(log) + case _AutomationRegistryLogicA.abi.Events["PayeeshipTransferRequested"].ID: + return _AutomationRegistryLogicA.ParsePayeeshipTransferRequested(log) + case _AutomationRegistryLogicA.abi.Events["PayeeshipTransferred"].ID: + return _AutomationRegistryLogicA.ParsePayeeshipTransferred(log) + case _AutomationRegistryLogicA.abi.Events["PaymentWithdrawn"].ID: + return _AutomationRegistryLogicA.ParsePaymentWithdrawn(log) + case _AutomationRegistryLogicA.abi.Events["ReorgedUpkeepReport"].ID: + return _AutomationRegistryLogicA.ParseReorgedUpkeepReport(log) + case _AutomationRegistryLogicA.abi.Events["StaleUpkeepReport"].ID: + return _AutomationRegistryLogicA.ParseStaleUpkeepReport(log) + case _AutomationRegistryLogicA.abi.Events["Unpaused"].ID: + return _AutomationRegistryLogicA.ParseUnpaused(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepAdminTransferRequested"].ID: + return _AutomationRegistryLogicA.ParseUpkeepAdminTransferRequested(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepAdminTransferred"].ID: + return _AutomationRegistryLogicA.ParseUpkeepAdminTransferred(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepCanceled"].ID: + return _AutomationRegistryLogicA.ParseUpkeepCanceled(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepCheckDataSet"].ID: + return _AutomationRegistryLogicA.ParseUpkeepCheckDataSet(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepGasLimitSet"].ID: + return _AutomationRegistryLogicA.ParseUpkeepGasLimitSet(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepMigrated"].ID: + return _AutomationRegistryLogicA.ParseUpkeepMigrated(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepOffchainConfigSet"].ID: + return _AutomationRegistryLogicA.ParseUpkeepOffchainConfigSet(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepPaused"].ID: + return _AutomationRegistryLogicA.ParseUpkeepPaused(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepPerformed"].ID: + return _AutomationRegistryLogicA.ParseUpkeepPerformed(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _AutomationRegistryLogicA.ParseUpkeepPrivilegeConfigSet(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepReceived"].ID: + return _AutomationRegistryLogicA.ParseUpkeepReceived(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepRegistered"].ID: + return _AutomationRegistryLogicA.ParseUpkeepRegistered(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepTriggerConfigSet"].ID: + return _AutomationRegistryLogicA.ParseUpkeepTriggerConfigSet(log) + case _AutomationRegistryLogicA.abi.Events["UpkeepUnpaused"].ID: + return _AutomationRegistryLogicA.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationRegistryLogicAAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (AutomationRegistryLogicACancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (AutomationRegistryLogicAChainSpecificModuleUpdated) Topic() common.Hash { + return common.HexToHash("0xdefc28b11a7980dbe0c49dbbd7055a1584bc8075097d1e8b3b57fb7283df2ad7") +} + +func (AutomationRegistryLogicADedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (AutomationRegistryLogicAFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (AutomationRegistryLogicAFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (AutomationRegistryLogicAInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (AutomationRegistryLogicAOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (AutomationRegistryLogicAOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AutomationRegistryLogicAOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (AutomationRegistryLogicAPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (AutomationRegistryLogicAPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (AutomationRegistryLogicAPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (AutomationRegistryLogicAPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (AutomationRegistryLogicAPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (AutomationRegistryLogicAReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (AutomationRegistryLogicAStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (AutomationRegistryLogicAUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (AutomationRegistryLogicAUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (AutomationRegistryLogicAUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (AutomationRegistryLogicAUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (AutomationRegistryLogicAUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (AutomationRegistryLogicAUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (AutomationRegistryLogicAUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (AutomationRegistryLogicAUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (AutomationRegistryLogicAUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (AutomationRegistryLogicAUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (AutomationRegistryLogicAUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (AutomationRegistryLogicAUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (AutomationRegistryLogicAUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (AutomationRegistryLogicAUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (AutomationRegistryLogicAUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_AutomationRegistryLogicA *AutomationRegistryLogicA) Address() common.Address { + return _AutomationRegistryLogicA.address +} + +type AutomationRegistryLogicAInterface interface { + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckCallback(opts *bind.TransactOpts, id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, triggerData []byte) (*types.Transaction, error) + + CheckUpkeep0(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) + + RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryLogicAAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicAAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicACancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicACancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryLogicACancelledUpkeepReport, error) + + FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicAChainSpecificModuleUpdatedIterator, error) + + WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAChainSpecificModuleUpdated) (event.Subscription, error) + + ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryLogicAChainSpecificModuleUpdated, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryLogicADedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicADedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*AutomationRegistryLogicADedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryLogicAFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*AutomationRegistryLogicAFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*AutomationRegistryLogicAFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryLogicAInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryLogicAOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryLogicAOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicAOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryLogicAOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicAOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AutomationRegistryLogicAOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryLogicAPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*AutomationRegistryLogicAPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicAPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*AutomationRegistryLogicAPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicAPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryLogicAPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicAPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryLogicAPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryLogicAPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryLogicAPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryLogicAReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryLogicAStaleUpkeepReport, error) + + FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryLogicAUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*AutomationRegistryLogicAUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicAUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryLogicAUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicAUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryLogicAUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryLogicAUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*AutomationRegistryLogicAUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryLogicAUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryLogicAUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*AutomationRegistryLogicAUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*AutomationRegistryLogicAUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryLogicAUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*AutomationRegistryLogicAUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*AutomationRegistryLogicAUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*AutomationRegistryLogicAUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryLogicAUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicAUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicAUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryLogicAUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_registry_logic_b_wrapper_2_2/automation_registry_logic_b_wrapper_2_2.go b/core/gethwrappers/generated/automation_registry_logic_b_wrapper_2_2/automation_registry_logic_b_wrapper_2_2.go new file mode 100644 index 00000000..0d38fcd7 --- /dev/null +++ b/core/gethwrappers/generated/automation_registry_logic_b_wrapper_2_2/automation_registry_logic_b_wrapper_2_2.go @@ -0,0 +1,5933 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_registry_logic_b_wrapper_2_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistryBase22OnchainConfigLegacy struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +type AutomationRegistryBase22State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + TotalPremium *big.Int + NumUpkeeps *big.Int + ConfigCount uint32 + LatestConfigBlockNumber uint32 + LatestConfigDigest [32]byte + LatestEpoch uint32 + Paused bool +} + +type AutomationRegistryBase22UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var AutomationRegistryLogicBMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"automationForwarderLogic\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"allowedReadOnlyAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newModule\",\"type\":\"address\"}],\"name\":\"ChainSpecificModuleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"getAdminPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllowedReadOnlyAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAutomationForwarderLogic\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCancellationDelay\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChainModule\",\"outputs\":[{\"internalType\":\"contractIChainModule\",\"name\":\"chainModule\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConditionalGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"contractIAutomationForwarder\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLogGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumAutomationRegistryBase2_2.Trigger\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"enumAutomationRegistryBase2_2.MigrationPermission\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerPerformByteGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerSignerGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getReorgProtectionEnabled\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"reorgProtectionEnabled\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getSignerInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"totalPremium\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"latestConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"latestConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"latestEpoch\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"internalType\":\"structAutomationRegistryBase2_2.State\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structAutomationRegistryBase2_2.OnchainConfigLegacy\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTransmitCalldataFixedBytesOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTransmitCalldataPerSignerBytesOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getTransmitterInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"lastCollected\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"enumAutomationRegistryBase2_2.Trigger\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structAutomationRegistryBase2_2.UpkeepInfo\",\"name\":\"upkeepInfo\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"hasDedupKey\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setAdminPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumAutomationRegistryBase2_2.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"setUpkeepCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101206040523480156200001257600080fd5b5060405162004daf38038062004daf8339810160408190526200003591620001bf565b84848484843380600081620000915760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c457620000c481620000f7565b5050506001600160a01b0394851660805292841660a05290831660c052821660e0521661010052506200022f9350505050565b336001600160a01b03821603620001515760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000088565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001ba57600080fd5b919050565b600080600080600060a08688031215620001d857600080fd5b620001e386620001a2565b9450620001f360208701620001a2565b93506200020360408701620001a2565b92506200021360608701620001a2565b91506200022360808701620001a2565b90509295509295909350565b60805160a05160c05160e05161010051614b0a620002a56000396000610715015260006105920152600081816105ff01526132df01526000818161077801526133b901526000818161080601528181611d490152818161201e015281816124870152818161299a0152612a1e0152614b0a6000f3fe608060405234801561001057600080fd5b50600436106103625760003560e01c80637d9b97e0116101c8578063b121e14711610104578063cd7f71b5116100a2578063ed56b3e11161007c578063ed56b3e114610863578063f2fde38b146108d6578063f777ff06146108e9578063faa3e996146108f057600080fd5b8063cd7f71b51461082a578063d76326481461083d578063eb5dcd6c1461085057600080fd5b8063b657bc9c116100de578063b657bc9c146107c9578063b79550be146107dc578063c7c3a19a146107e4578063ca30e6031461080457600080fd5b8063b121e1471461079c578063b148ab6b146107af578063b6511a2a146107c257600080fd5b80639e0a99ed11610171578063a72aa27e1161014b578063a72aa27e1461074c578063aab9edd61461075f578063abc76ae01461076e578063b10b673c1461077657600080fd5b80639e0a99ed1461070b578063a08714c014610713578063a710b2211461073957600080fd5b80638da5cb5b116101a25780638da5cb5b146106b75780638dcf0fe7146106d55780638ed02bab146106e857600080fd5b80637d9b97e0146106945780638456cb591461069c5780638765ecbe146106a457600080fd5b806343cc055c116102a25780635b6aa71c11610240578063671d36ed1161021a578063671d36ed14610623578063744bfe611461063657806379ba50971461064957806379ea99431461065157600080fd5b80635b6aa71c146105d75780636209e1e9146105ea5780636709d0e5146105fd57600080fd5b80634ca16c521161027c5780634ca16c52146105555780635147cd591461055d5780635165f2f51461057d5780635425d8ac1461059057600080fd5b806343cc055c1461050c57806344cb70b81461052357806348013d7b1461054657600080fd5b80631a2af0111161030f578063232c1cc5116102e9578063232c1cc5146104845780633b9cce591461048b5780633f4ba83a1461049e578063421d183b146104a657600080fd5b80631a2af011146104005780631e01043914610413578063207b65161461047157600080fd5b80631865c57d116103405780631865c57d146103b4578063187256e8146103cd57806319d97a94146103e057600080fd5b8063050ee65d1461036757806306e3b6321461037f5780630b7d33e61461039f575b600080fd5b62014c085b6040519081526020015b60405180910390f35b61039261038d366004613d35565b610936565b6040516103769190613d57565b6103b26103ad366004613de4565b610a53565b005b6103bc610b0d565b604051610376959493929190613fe7565b6103b26103db36600461411e565b610f57565b6103f36103ee36600461415b565b610fc8565b60405161037691906141d8565b6103b261040e3660046141eb565b61106a565b61045461042136600461415b565b6000908152600460205260409020600101546c0100000000000000000000000090046bffffffffffffffffffffffff1690565b6040516bffffffffffffffffffffffff9091168152602001610376565b6103f361047f36600461415b565b611170565b601861036c565b6103b2610499366004614210565b61118d565b6103b26113e3565b6104b96104b4366004614285565b611449565b60408051951515865260ff90941660208601526bffffffffffffffffffffffff9283169385019390935216606083015273ffffffffffffffffffffffffffffffffffffffff16608082015260a001610376565b60135460ff165b6040519015158152602001610376565b61051361053136600461415b565b60009081526008602052604090205460ff1690565b600060405161037691906142d1565b61ea6061036c565b61057061056b36600461415b565b611568565b60405161037691906142eb565b6103b261058b36600461415b565b611573565b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610376565b6104546105e5366004614318565b6116ea565b6103f36105f8366004614285565b61188f565b7f00000000000000000000000000000000000000000000000000000000000000006105b2565b6103b2610631366004614351565b6118c3565b6103b26106443660046141eb565b61199d565b6103b2611e44565b6105b261065f36600461415b565b6000908152600460205260409020546901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1690565b6103b2611f46565b6103b26120a1565b6103b26106b236600461415b565b612122565b60005473ffffffffffffffffffffffffffffffffffffffff166105b2565b6103b26106e3366004613de4565b61229c565b601354610100900473ffffffffffffffffffffffffffffffffffffffff166105b2565b6103a461036c565b7f00000000000000000000000000000000000000000000000000000000000000006105b2565b6103b261074736600461438d565b6122f1565b6103b261075a3660046143bb565b612559565b60405160038152602001610376565b6115e061036c565b7f00000000000000000000000000000000000000000000000000000000000000006105b2565b6103b26107aa366004614285565b61264e565b6103b26107bd36600461415b565b612746565b603261036c565b6104546107d736600461415b565b612934565b6103b2612961565b6107f76107f236600461415b565b612abd565b60405161037691906143de565b7f00000000000000000000000000000000000000000000000000000000000000006105b2565b6103b2610838366004613de4565b612e90565b61045461084b36600461415b565b612f27565b6103b261085e36600461438d565b612f32565b6108bd610871366004614285565b73ffffffffffffffffffffffffffffffffffffffff166000908152600c602090815260409182902082518084019093525460ff8082161515808552610100909204169290910182905291565b60408051921515835260ff909116602083015201610376565b6103b26108e4366004614285565b613090565b604061036c565b6109296108fe366004614285565b73ffffffffffffffffffffffffffffffffffffffff166000908152601a602052604090205460ff1690565b6040516103769190614515565b6060600061094460026130a4565b905080841061097f576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061098b8486614558565b905081811180610999575083155b6109a357806109a5565b815b905060006109b3868361456b565b67ffffffffffffffff8111156109cb576109cb61457e565b6040519080825280602002602001820160405280156109f4578160200160208202803683370190505b50905060005b8151811015610a4757610a18610a108883614558565b6002906130ae565b828281518110610a2a57610a2a6145ad565b602090810291909101015280610a3f816145dc565b9150506109fa565b50925050505b92915050565b6016546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610ab4576040517f77c3599200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152601d60205260409020610acd8284836146b6565b50827f2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae77698383604051610b009291906147d1565b60405180910390a2505050565b6040805161014081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810191909152604080516101e08101825260008082526020820181905291810182905260608082018390526080820183905260a0820183905260c0820183905260e08201839052610100820183905261012082018390526101408201839052610160820183905261018082018390526101a08201526101c0810191909152604080516101408101825260155463ffffffff7401000000000000000000000000000000000000000082041682526bffffffffffffffffffffffff90811660208301526019549282019290925260125490911660608281019190915290819060009060808101610c4660026130a4565b8152601554780100000000000000000000000000000000000000000000000080820463ffffffff9081166020808601919091527c01000000000000000000000000000000000000000000000000000000008404821660408087019190915260115460608088019190915260125474010000000000000000000000000000000000000000810485166080808a01919091527e01000000000000000000000000000000000000000000000000000000000000820460ff16151560a0998a015283516101e0810185526c0100000000000000000000000080840488168252700100000000000000000000000000000000808504891697830197909752808a0488169582019590955296820462ffffff16928701929092527b01000000000000000000000000000000000000000000000000000000900461ffff16908501526014546bffffffffffffffffffffffff8116968501969096529304811660c083015260165480821660e08401526401000000008104821661010084015268010000000000000000900416610120820152601754610140820152601854610160820152910473ffffffffffffffffffffffffffffffffffffffff166101808201529095506101a08101610e1360096130c1565b81526016546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff16602091820152601254600d80546040805182860281018601909152818152949850899489949293600e937d01000000000000000000000000000000000000000000000000000000000090910460ff16928591830182828015610ed657602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610eab575b5050505050925081805480602002602001604051908101604052809291908181526020018280548015610f3f57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610f14575b50505050509150945094509450945094509091929394565b610f5f6130ce565b73ffffffffffffffffffffffffffffffffffffffff82166000908152601a6020526040902080548291907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836003811115610fbf57610fbf6142a2565b02179055505050565b6000818152601d60205260409020805460609190610fe590614614565b80601f016020809104026020016040519081016040528092919081815260200182805461101190614614565b801561105e5780601f106110335761010080835404028352916020019161105e565b820191906000526020600020905b81548152906001019060200180831161104157829003601f168201915b50505050509050919050565b61107382613151565b3373ffffffffffffffffffffffffffffffffffffffff8216036110c2576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff82811691161461116c5760008281526006602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff851690811790915590519091339185917fb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b3591a45b5050565b6000818152601b60205260409020805460609190610fe590614614565b6111956130ce565b600e5481146111d0576040517fcf54c06a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b600e548110156113a2576000600e82815481106111f2576111f26145ad565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff908116808452600f9092526040832054919350169085858581811061123c5761123c6145ad565b90506020020160208101906112519190614285565b905073ffffffffffffffffffffffffffffffffffffffff811615806112e4575073ffffffffffffffffffffffffffffffffffffffff8216158015906112c257508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b80156112e4575073ffffffffffffffffffffffffffffffffffffffff81811614155b1561131b576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8181161461138c5773ffffffffffffffffffffffffffffffffffffffff8381166000908152600f6020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169183169190911790555b505050808061139a906145dc565b9150506111d3565b507fa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725600e83836040516113d79392919061481e565b60405180910390a15050565b6113eb6130ce565b601280547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1690556040513381527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020015b60405180910390a1565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e01000000000000000000000000000090049091166060820152829182918291829190829061150f5760608201516012546000916114fb916bffffffffffffffffffffffff166148d0565b600e5490915061150b9082614924565b9150505b81516020830151604084015161152690849061494f565b6060949094015173ffffffffffffffffffffffffffffffffffffffff9a8b166000908152600f6020526040902054929b919a9499509750921694509092505050565b6000610a4d82613205565b61157c81613151565b600081815260046020908152604091829020825160e081018452815460ff8116151580835263ffffffff610100830481169584019590955265010000000000820485169583019590955273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201529061167b576040517f1b88a78400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556116ba6002836132b0565b5060405182907f7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a4745690600090a25050565b60408051610160810182526012546bffffffffffffffffffffffff8116825263ffffffff6c010000000000000000000000008204811660208401527001000000000000000000000000000000008204811693830193909352740100000000000000000000000000000000000000008104909216606082015262ffffff7801000000000000000000000000000000000000000000000000830416608082015261ffff7b0100000000000000000000000000000000000000000000000000000083041660a082015260ff7d0100000000000000000000000000000000000000000000000000000000008304811660c08301527e0100000000000000000000000000000000000000000000000000000000000083048116151560e08301527f01000000000000000000000000000000000000000000000000000000000000009092048216151561010080830191909152601354928316151561012083015273ffffffffffffffffffffffffffffffffffffffff9204919091166101408201526000908180611874836132bc565b91509150611885838787858561349a565b9695505050505050565b73ffffffffffffffffffffffffffffffffffffffff81166000908152601e60205260409020805460609190610fe590614614565b6016546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314611924576040517f77c3599200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152601e602052604090206119548284836146b6565b508273ffffffffffffffffffffffffffffffffffffffff167f7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d28383604051610b009291906147d1565b6012547f0100000000000000000000000000000000000000000000000000000000000000900460ff16156119fd576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601280547effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f010000000000000000000000000000000000000000000000000000000000000017905573ffffffffffffffffffffffffffffffffffffffff8116611a93576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600460209081526040808320815160e081018352815460ff81161515825263ffffffff610100820481168387015265010000000000820481168386015273ffffffffffffffffffffffffffffffffffffffff6901000000000000000000909204821660608401526001909301546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a08401527801000000000000000000000000000000000000000000000000900490921660c082015286855260059093529220549091163314611b9a576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601260010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166357e871e76040518163ffffffff1660e01b8152600401602060405180830381865afa158015611c0a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611c2e9190614974565b816040015163ffffffff161115611c71576040517fff84e5dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600460205260409020600101546019546c010000000000000000000000009091046bffffffffffffffffffffffff1690611cb190829061456b565b60195560008481526004602081905260409182902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff16905590517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff858116928201929092526bffffffffffffffffffffffff831660248201527f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb906044016020604051808303816000875af1158015611d94573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611db8919061498d565b50604080516bffffffffffffffffffffffff8316815273ffffffffffffffffffffffffffffffffffffffff8516602082015285917ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318910160405180910390a25050601280547effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1690555050565b60015473ffffffffffffffffffffffffffffffffffffffff163314611eca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b611f4e6130ce565b6015546019546bffffffffffffffffffffffff90911690611f7090829061456b565b601955601580547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690556040516bffffffffffffffffffffffff821681527f1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f19060200160405180910390a16040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff821660248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044015b6020604051808303816000875af115801561207d573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061116c919061498d565b6120a96130ce565b601280547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790556040513381527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2589060200161143f565b61212b81613151565b600081815260046020908152604091829020825160e081018452815460ff8116158015835263ffffffff610100830481169584019590955265010000000000820485169583019590955273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201529061222a576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905561226c600283613722565b5060405182907f8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f90600090a25050565b6122a583613151565b6000838152601c602052604090206122be8284836146b6565b50827f3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf48508383604051610b009291906147d1565b73ffffffffffffffffffffffffffffffffffffffff811661233e576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600f602052604090205416331461239e576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601254600e546000916123c19185916bffffffffffffffffffffffff169061372e565b73ffffffffffffffffffffffffffffffffffffffff84166000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff16905560195490915061242b906bffffffffffffffffffffffff83169061456b565b6019556040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301526bffffffffffffffffffffffff831660248301527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906044016020604051808303816000875af11580156124d0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906124f4919061498d565b5060405133815273ffffffffffffffffffffffffffffffffffffffff808416916bffffffffffffffffffffffff8416918616907f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f406989060200160405180910390a4505050565b6108fc8163ffffffff16108061258e575060155463ffffffff7001000000000000000000000000000000009091048116908216115b156125c5576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6125ce82613151565b60008281526004602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff1661010063ffffffff861690810291909117909155915191825283917fc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c910160405180910390a25050565b73ffffffffffffffffffffffffffffffffffffffff8181166000908152601060205260409020541633146126ae576040517f6752e7aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8181166000818152600f602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556010909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260046020908152604091829020825160e081018452815460ff81161515825263ffffffff6101008204811694830194909452650100000000008104841694820185905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004821660c08201529114612843576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff1633146128a0576040517f6352a85300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526005602090815260408083208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821790935560069094528285208054909216909155905173ffffffffffffffffffffffffffffffffffffffff90911692839186917f5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c91a4505050565b6000610a4d61294283613205565b600084815260046020526040902054610100900463ffffffff166116ea565b6129696130ce565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa1580156129f6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612a1a9190614974565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb3360195484612a67919061456b565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff9092166004830152602482015260440161205e565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526000828152600460209081526040808320815160e081018352815460ff811615158252610100810463ffffffff90811695830195909552650100000000008104851693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff16606083018190526001909101546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a08401527801000000000000000000000000000000000000000000000000900490921660c0820152919015612c5557816060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015612c2c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612c5091906149af565b612c58565b60005b90506040518061014001604052808273ffffffffffffffffffffffffffffffffffffffff168152602001836020015163ffffffff168152602001600760008781526020019081526020016000208054612cb090614614565b80601f0160208091040260200160405190810160405280929190818152602001828054612cdc90614614565b8015612d295780601f10612cfe57610100808354040283529160200191612d29565b820191906000526020600020905b815481529060010190602001808311612d0c57829003601f168201915b505050505081526020018360a001516bffffffffffffffffffffffff1681526020016005600087815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001836040015163ffffffff1667ffffffffffffffff1681526020018360c0015163ffffffff16815260200183608001516bffffffffffffffffffffffff168152602001836000015115158152602001601c60008781526020019081526020016000208054612e0690614614565b80601f0160208091040260200160405190810160405280929190818152602001828054612e3290614614565b8015612e7f5780601f10612e5457610100808354040283529160200191612e7f565b820191906000526020600020905b815481529060010190602001808311612e6257829003601f168201915b505050505081525092505050919050565b612e9983613151565b60165463ffffffff16811115612edb576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600760205260409020612ef48284836146b6565b50827fcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d8383604051610b009291906147d1565b6000610a4d82612934565b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600f6020526040902054163314612f92576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821603612fe1576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82811660009081526010602052604090205481169082161461116c5773ffffffffffffffffffffffffffffffffffffffff82811660008181526010602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45050565b6130986130ce565b6130a181613936565b50565b6000610a4d825490565b60006130ba8383613a2b565b9392505050565b606060006130ba83613a55565b60005473ffffffffffffffffffffffffffffffffffffffff16331461314f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401611ec1565b565b60008181526005602052604090205473ffffffffffffffffffffffffffffffffffffffff1633146131ae576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526004602052604090205465010000000000900463ffffffff908116146130a1576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818160045b600f811015613292577fff00000000000000000000000000000000000000000000000000000000000000821683826020811061324a5761324a6145ad565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19161461328057506000949350505050565b8061328a816145dc565b91505061320c565b5081600f1a60018111156132a8576132a86142a2565b949350505050565b60006130ba8383613ab0565b6000806000836080015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa158015613348573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061336c91906149e6565b509450909250505060008113158061338357508142105b806133a457508280156133a4575061339b824261456b565b8463ffffffff16105b156133b35760175495506133b7565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa158015613422573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061344691906149e6565b509450909250505060008113158061345d57508142105b8061347e575082801561347e5750613475824261456b565b8463ffffffff16105b1561348d576018549450613491565b8094505b50505050915091565b600080808660018111156134b0576134b06142a2565b036134be575061ea60613513565b60018660018111156134d2576134d26142a2565b036134e1575062014c08613513565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008760c0015160016135269190614a36565b6135349060ff166040614a4f565b601654613552906103a490640100000000900463ffffffff16614558565b61355c9190614558565b601354604080517fde9ee35e00000000000000000000000000000000000000000000000000000000815281519394506000938493610100900473ffffffffffffffffffffffffffffffffffffffff169263de9ee35e92600480820193918290030181865afa1580156135d2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135f69190614a66565b90925090508183613608836018614558565b6136129190614a4f565b60c08c0151613622906001614a36565b6136319060ff166115e0614a4f565b61363b9190614558565b6136459190614558565b61364f9085614558565b935060008a610140015173ffffffffffffffffffffffffffffffffffffffff166312544140856040518263ffffffff1660e01b815260040161369391815260200190565b602060405180830381865afa1580156136b0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906136d49190614974565b8b60a0015161ffff166136e79190614a4f565b90506000806137028d8c63ffffffff1689868e8e6000613aff565b9092509050613711818361494f565b9d9c50505050505050505050505050565b60006130ba8383613c3b565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e010000000000000000000000000000900490911660608201529061392a5760008160600151856137c691906148d0565b905060006137d48583614924565b905080836040018181516137e8919061494f565b6bffffffffffffffffffffffff169052506138038582614a8a565b83606001818151613814919061494f565b6bffffffffffffffffffffffff90811690915273ffffffffffffffffffffffffffffffffffffffff89166000908152600b602090815260409182902087518154928901519389015160608a015186166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff919096166201000002167fffffffffffff000000000000000000000000000000000000000000000000ffff60ff95909516610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909416939093171792909216179190911790555050505b60400151949350505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036139b5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401611ec1565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000826000018281548110613a4257613a426145ad565b9060005260206000200154905092915050565b60608160000180548060200260200160405190810160405280929190818152602001828054801561105e57602002820191906000526020600020905b815481526020019060010190808311613a915750505050509050919050565b6000818152600183016020526040812054613af757508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610a4d565b506000610a4d565b60008060008960a0015161ffff1686613b189190614a4f565b9050838015613b265750803a105b15613b2e57503a5b60008588613b3c8b8d614558565b613b469085614a4f565b613b509190614558565b613b6290670de0b6b3a7640000614a4f565b613b6c9190614aba565b905060008b6040015163ffffffff1664e8d4a51000613b8b9190614a4f565b60208d0151889063ffffffff168b613ba38f88614a4f565b613bad9190614558565b613bbb90633b9aca00614a4f565b613bc59190614a4f565b613bcf9190614aba565b613bd99190614558565b90506b033b2e3c9fd0803ce8000000613bf28284614558565b1115613c2a576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909b909a5098505050505050505050565b60008181526001830160205260408120548015613d24576000613c5f60018361456b565b8554909150600090613c739060019061456b565b9050818114613cd8576000866000018281548110613c9357613c936145ad565b9060005260206000200154905080876000018481548110613cb657613cb66145ad565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080613ce957613ce9614ace565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050610a4d565b6000915050610a4d565b5092915050565b60008060408385031215613d4857600080fd5b50508035926020909101359150565b6020808252825182820181905260009190848201906040850190845b81811015613d8f57835183529284019291840191600101613d73565b50909695505050505050565b60008083601f840112613dad57600080fd5b50813567ffffffffffffffff811115613dc557600080fd5b602083019150836020828501011115613ddd57600080fd5b9250929050565b600080600060408486031215613df957600080fd5b83359250602084013567ffffffffffffffff811115613e1757600080fd5b613e2386828701613d9b565b9497909650939450505050565b600081518084526020808501945080840160005b83811015613e7657815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613e44565b509495945050505050565b805163ffffffff16825260006101e06020830151613ea7602086018263ffffffff169052565b506040830151613ebf604086018263ffffffff169052565b506060830151613ed6606086018262ffffff169052565b506080830151613eec608086018261ffff169052565b5060a0830151613f0c60a08601826bffffffffffffffffffffffff169052565b5060c0830151613f2460c086018263ffffffff169052565b5060e0830151613f3c60e086018263ffffffff169052565b506101008381015163ffffffff908116918601919091526101208085015190911690850152610140808401519085015261016080840151908501526101808084015173ffffffffffffffffffffffffffffffffffffffff16908501526101a080840151818601839052613fb183870182613e30565b925050506101c080840151613fdd8287018273ffffffffffffffffffffffffffffffffffffffff169052565b5090949350505050565b855163ffffffff16815260006101c0602088015161401560208501826bffffffffffffffffffffffff169052565b5060408801516040840152606088015161403f60608501826bffffffffffffffffffffffff169052565b506080880151608084015260a088015161406160a085018263ffffffff169052565b5060c088015161407960c085018263ffffffff169052565b5060e088015160e08401526101008089015161409c8286018263ffffffff169052565b50506101208881015115159084015261014083018190526140bf81840188613e81565b90508281036101608401526140d48187613e30565b90508281036101808401526140e98186613e30565b9150506118856101a083018460ff169052565b73ffffffffffffffffffffffffffffffffffffffff811681146130a157600080fd5b6000806040838503121561413157600080fd5b823561413c816140fc565b915060208301356004811061415057600080fd5b809150509250929050565b60006020828403121561416d57600080fd5b5035919050565b6000815180845260005b8181101561419a5760208185018101518683018201520161417e565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006130ba6020830184614174565b600080604083850312156141fe57600080fd5b823591506020830135614150816140fc565b6000806020838503121561422357600080fd5b823567ffffffffffffffff8082111561423b57600080fd5b818501915085601f83011261424f57600080fd5b81358181111561425e57600080fd5b8660208260051b850101111561427357600080fd5b60209290920196919550909350505050565b60006020828403121561429757600080fd5b81356130ba816140fc565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60208101600383106142e5576142e56142a2565b91905290565b60208101600283106142e5576142e56142a2565b803563ffffffff8116811461431357600080fd5b919050565b6000806040838503121561432b57600080fd5b82356002811061433a57600080fd5b9150614348602084016142ff565b90509250929050565b60008060006040848603121561436657600080fd5b8335614371816140fc565b9250602084013567ffffffffffffffff811115613e1757600080fd5b600080604083850312156143a057600080fd5b82356143ab816140fc565b91506020830135614150816140fc565b600080604083850312156143ce57600080fd5b82359150614348602084016142ff565b6020815261440560208201835173ffffffffffffffffffffffffffffffffffffffff169052565b6000602083015161441e604084018263ffffffff169052565b50604083015161014080606085015261443b610160850183614174565b9150606085015161445c60808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e08501516101006144c8818701836bffffffffffffffffffffffff169052565b86015190506101206144dd8682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018387015290506118858382614174565b60208101600483106142e5576142e56142a2565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b80820180821115610a4d57610a4d614529565b81810381811115610a4d57610a4d614529565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361460d5761460d614529565b5060010190565b600181811c9082168061462857607f821691505b602082108103614661577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f8211156146b157600081815260208120601f850160051c8101602086101561468e5750805b601f850160051c820191505b818110156146ad5782815560010161469a565b5050505b505050565b67ffffffffffffffff8311156146ce576146ce61457e565b6146e2836146dc8354614614565b83614667565b6000601f84116001811461473457600085156146fe5750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b1783556147ca565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b828110156147835786850135825560209485019460019092019101614763565b50868210156147be577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b6000604082016040835280865480835260608501915087600052602092508260002060005b8281101561487557815473ffffffffffffffffffffffffffffffffffffffff1684529284019260019182019101614843565b505050838103828501528481528590820160005b868110156148c457823561489c816140fc565b73ffffffffffffffffffffffffffffffffffffffff1682529183019190830190600101614889565b50979650505050505050565b6bffffffffffffffffffffffff828116828216039080821115613d2e57613d2e614529565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60006bffffffffffffffffffffffff80841680614943576149436148f5565b92169190910492915050565b6bffffffffffffffffffffffff818116838216019080821115613d2e57613d2e614529565b60006020828403121561498657600080fd5b5051919050565b60006020828403121561499f57600080fd5b815180151581146130ba57600080fd5b6000602082840312156149c157600080fd5b81516130ba816140fc565b805169ffffffffffffffffffff8116811461431357600080fd5b600080600080600060a086880312156149fe57600080fd5b614a07866149cc565b9450602086015193506040860151925060608601519150614a2a608087016149cc565b90509295509295909350565b60ff8181168382160190811115610a4d57610a4d614529565b8082028115828204841417610a4d57610a4d614529565b60008060408385031215614a7957600080fd5b505080516020909101519092909150565b6bffffffffffffffffffffffff818116838216028082169190828114614ab257614ab2614529565b505092915050565b600082614ac957614ac96148f5565b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000813000a", +} + +var AutomationRegistryLogicBABI = AutomationRegistryLogicBMetaData.ABI + +var AutomationRegistryLogicBBin = AutomationRegistryLogicBMetaData.Bin + +func DeployAutomationRegistryLogicB(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkNativeFeed common.Address, fastGasFeed common.Address, automationForwarderLogic common.Address, allowedReadOnlyAddress common.Address) (common.Address, *types.Transaction, *AutomationRegistryLogicB, error) { + parsed, err := AutomationRegistryLogicBMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationRegistryLogicBBin), backend, link, linkNativeFeed, fastGasFeed, automationForwarderLogic, allowedReadOnlyAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationRegistryLogicB{address: address, abi: *parsed, AutomationRegistryLogicBCaller: AutomationRegistryLogicBCaller{contract: contract}, AutomationRegistryLogicBTransactor: AutomationRegistryLogicBTransactor{contract: contract}, AutomationRegistryLogicBFilterer: AutomationRegistryLogicBFilterer{contract: contract}}, nil +} + +type AutomationRegistryLogicB struct { + address common.Address + abi abi.ABI + AutomationRegistryLogicBCaller + AutomationRegistryLogicBTransactor + AutomationRegistryLogicBFilterer +} + +type AutomationRegistryLogicBCaller struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicBTransactor struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicBFilterer struct { + contract *bind.BoundContract +} + +type AutomationRegistryLogicBSession struct { + Contract *AutomationRegistryLogicB + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationRegistryLogicBCallerSession struct { + Contract *AutomationRegistryLogicBCaller + CallOpts bind.CallOpts +} + +type AutomationRegistryLogicBTransactorSession struct { + Contract *AutomationRegistryLogicBTransactor + TransactOpts bind.TransactOpts +} + +type AutomationRegistryLogicBRaw struct { + Contract *AutomationRegistryLogicB +} + +type AutomationRegistryLogicBCallerRaw struct { + Contract *AutomationRegistryLogicBCaller +} + +type AutomationRegistryLogicBTransactorRaw struct { + Contract *AutomationRegistryLogicBTransactor +} + +func NewAutomationRegistryLogicB(address common.Address, backend bind.ContractBackend) (*AutomationRegistryLogicB, error) { + abi, err := abi.JSON(strings.NewReader(AutomationRegistryLogicBABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationRegistryLogicB(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicB{address: address, abi: abi, AutomationRegistryLogicBCaller: AutomationRegistryLogicBCaller{contract: contract}, AutomationRegistryLogicBTransactor: AutomationRegistryLogicBTransactor{contract: contract}, AutomationRegistryLogicBFilterer: AutomationRegistryLogicBFilterer{contract: contract}}, nil +} + +func NewAutomationRegistryLogicBCaller(address common.Address, caller bind.ContractCaller) (*AutomationRegistryLogicBCaller, error) { + contract, err := bindAutomationRegistryLogicB(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBCaller{contract: contract}, nil +} + +func NewAutomationRegistryLogicBTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationRegistryLogicBTransactor, error) { + contract, err := bindAutomationRegistryLogicB(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBTransactor{contract: contract}, nil +} + +func NewAutomationRegistryLogicBFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationRegistryLogicBFilterer, error) { + contract, err := bindAutomationRegistryLogicB(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBFilterer{contract: contract}, nil +} + +func bindAutomationRegistryLogicB(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationRegistryLogicBMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistryLogicB.Contract.AutomationRegistryLogicBCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AutomationRegistryLogicBTransactor.contract.Transfer(opts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AutomationRegistryLogicBTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistryLogicB.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.contract.Transfer(opts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetActiveUpkeepIDs(&_AutomationRegistryLogicB.CallOpts, startIndex, maxCount) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetActiveUpkeepIDs(&_AutomationRegistryLogicB.CallOpts, startIndex, maxCount) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getAdminPrivilegeConfig", admin) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetAdminPrivilegeConfig(&_AutomationRegistryLogicB.CallOpts, admin) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetAdminPrivilegeConfig(&_AutomationRegistryLogicB.CallOpts, admin) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetAllowedReadOnlyAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getAllowedReadOnlyAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetAllowedReadOnlyAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetAllowedReadOnlyAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetAllowedReadOnlyAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetAllowedReadOnlyAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getAutomationForwarderLogic") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetAutomationForwarderLogic() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetAutomationForwarderLogic(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetAutomationForwarderLogic() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetAutomationForwarderLogic(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetBalance(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetBalance(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetBalance(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getCancellationDelay") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetCancellationDelay() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetCancellationDelay(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetCancellationDelay() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetCancellationDelay(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetChainModule(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getChainModule") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetChainModule() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetChainModule(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetChainModule() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetChainModule(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getConditionalGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetConditionalGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetConditionalGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetConditionalGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetConditionalGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetFastGasFeedAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetFastGasFeedAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetFastGasFeedAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetForwarder(&_AutomationRegistryLogicB.CallOpts, upkeepID) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetForwarder(&_AutomationRegistryLogicB.CallOpts, upkeepID) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetLinkAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetLinkAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetLinkAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetLinkAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetLinkNativeFeedAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.GetLinkNativeFeedAddress(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getLogGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetLogGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetLogGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetLogGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetLogGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getMaxPaymentForGas", triggerType, gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMaxPaymentForGas(&_AutomationRegistryLogicB.CallOpts, triggerType, gasLimit) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMaxPaymentForGas(&_AutomationRegistryLogicB.CallOpts, triggerType, gasLimit) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getMinBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMinBalance(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMinBalance(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMinBalanceForUpkeep(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetMinBalanceForUpkeep(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _AutomationRegistryLogicB.Contract.GetPeerRegistryMigrationPermission(&_AutomationRegistryLogicB.CallOpts, peer) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _AutomationRegistryLogicB.Contract.GetPeerRegistryMigrationPermission(&_AutomationRegistryLogicB.CallOpts, peer) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getPerPerformByteGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetPerPerformByteGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetPerPerformByteGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getPerSignerGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetPerSignerGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetPerSignerGasOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetReorgProtectionEnabled(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getReorgProtectionEnabled") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetReorgProtectionEnabled() (bool, error) { + return _AutomationRegistryLogicB.Contract.GetReorgProtectionEnabled(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetReorgProtectionEnabled() (bool, error) { + return _AutomationRegistryLogicB.Contract.GetReorgProtectionEnabled(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getSignerInfo", query) + + outstruct := new(GetSignerInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _AutomationRegistryLogicB.Contract.GetSignerInfo(&_AutomationRegistryLogicB.CallOpts, query) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _AutomationRegistryLogicB.Contract.GetSignerInfo(&_AutomationRegistryLogicB.CallOpts, query) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(AutomationRegistryBase22State)).(*AutomationRegistryBase22State) + outstruct.Config = *abi.ConvertType(out[1], new(AutomationRegistryBase22OnchainConfigLegacy)).(*AutomationRegistryBase22OnchainConfigLegacy) + outstruct.Signers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + outstruct.Transmitters = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + outstruct.F = *abi.ConvertType(out[4], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetState() (GetState, + + error) { + return _AutomationRegistryLogicB.Contract.GetState(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetState() (GetState, + + error) { + return _AutomationRegistryLogicB.Contract.GetState(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetTransmitCalldataFixedBytesOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getTransmitCalldataFixedBytesOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetTransmitCalldataFixedBytesOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetTransmitCalldataFixedBytesOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetTransmitCalldataFixedBytesOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetTransmitCalldataFixedBytesOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetTransmitCalldataPerSignerBytesOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getTransmitCalldataPerSignerBytesOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetTransmitCalldataPerSignerBytesOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetTransmitCalldataPerSignerBytesOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetTransmitCalldataPerSignerBytesOverhead() (*big.Int, error) { + return _AutomationRegistryLogicB.Contract.GetTransmitCalldataPerSignerBytesOverhead(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getTransmitterInfo", query) + + outstruct := new(GetTransmitterInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.LastCollected = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.Payee = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _AutomationRegistryLogicB.Contract.GetTransmitterInfo(&_AutomationRegistryLogicB.CallOpts, query) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _AutomationRegistryLogicB.Contract.GetTransmitterInfo(&_AutomationRegistryLogicB.CallOpts, query) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _AutomationRegistryLogicB.Contract.GetTriggerType(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _AutomationRegistryLogicB.Contract.GetTriggerType(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getUpkeep", id) + + if err != nil { + return *new(AutomationRegistryBase22UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(AutomationRegistryBase22UpkeepInfo)).(*AutomationRegistryBase22UpkeepInfo) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetUpkeep(id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeep(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetUpkeep(id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeep(&_AutomationRegistryLogicB.CallOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeepPrivilegeConfig(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeepPrivilegeConfig(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeepTriggerConfig(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _AutomationRegistryLogicB.Contract.GetUpkeepTriggerConfig(&_AutomationRegistryLogicB.CallOpts, upkeepId) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "hasDedupKey", dedupKey) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _AutomationRegistryLogicB.Contract.HasDedupKey(&_AutomationRegistryLogicB.CallOpts, dedupKey) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _AutomationRegistryLogicB.Contract.HasDedupKey(&_AutomationRegistryLogicB.CallOpts, dedupKey) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) Owner() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.Owner(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) Owner() (common.Address, error) { + return _AutomationRegistryLogicB.Contract.Owner(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) UpkeepTranscoderVersion() (uint8, error) { + return _AutomationRegistryLogicB.Contract.UpkeepTranscoderVersion(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _AutomationRegistryLogicB.Contract.UpkeepTranscoderVersion(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCaller) UpkeepVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _AutomationRegistryLogicB.contract.Call(opts, &out, "upkeepVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) UpkeepVersion() (uint8, error) { + return _AutomationRegistryLogicB.Contract.UpkeepVersion(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBCallerSession) UpkeepVersion() (uint8, error) { + return _AutomationRegistryLogicB.Contract.UpkeepVersion(&_AutomationRegistryLogicB.CallOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "acceptOwnership") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptOwnership(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptOwnership(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptPayeeship(&_AutomationRegistryLogicB.TransactOpts, transmitter) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptPayeeship(&_AutomationRegistryLogicB.TransactOpts, transmitter) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptUpkeepAdmin(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.AcceptUpkeepAdmin(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "pause") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) Pause() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.Pause(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) Pause() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.Pause(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.PauseUpkeep(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.PauseUpkeep(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "recoverFunds") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) RecoverFunds() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.RecoverFunds(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.RecoverFunds(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setAdminPrivilegeConfig", admin, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetAdminPrivilegeConfig(&_AutomationRegistryLogicB.TransactOpts, admin, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetAdminPrivilegeConfig(&_AutomationRegistryLogicB.TransactOpts, admin, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setPayees", payees) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetPayees(&_AutomationRegistryLogicB.TransactOpts, payees) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetPayees(&_AutomationRegistryLogicB.TransactOpts, payees) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetPeerRegistryMigrationPermission(&_AutomationRegistryLogicB.TransactOpts, peer, permission) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetPeerRegistryMigrationPermission(&_AutomationRegistryLogicB.TransactOpts, peer, permission) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setUpkeepCheckData", id, newCheckData) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepCheckData(&_AutomationRegistryLogicB.TransactOpts, id, newCheckData) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepCheckData(&_AutomationRegistryLogicB.TransactOpts, id, newCheckData) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepGasLimit(&_AutomationRegistryLogicB.TransactOpts, id, gasLimit) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepGasLimit(&_AutomationRegistryLogicB.TransactOpts, id, gasLimit) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepOffchainConfig(&_AutomationRegistryLogicB.TransactOpts, id, config) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepOffchainConfig(&_AutomationRegistryLogicB.TransactOpts, id, config) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepPrivilegeConfig(&_AutomationRegistryLogicB.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.SetUpkeepPrivilegeConfig(&_AutomationRegistryLogicB.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "transferOwnership", to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferOwnership(&_AutomationRegistryLogicB.TransactOpts, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferOwnership(&_AutomationRegistryLogicB.TransactOpts, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferPayeeship(&_AutomationRegistryLogicB.TransactOpts, transmitter, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferPayeeship(&_AutomationRegistryLogicB.TransactOpts, transmitter, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferUpkeepAdmin(&_AutomationRegistryLogicB.TransactOpts, id, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.TransferUpkeepAdmin(&_AutomationRegistryLogicB.TransactOpts, id, proposed) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "unpause") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) Unpause() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.Unpause(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) Unpause() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.Unpause(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.UnpauseUpkeep(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.UnpauseUpkeep(&_AutomationRegistryLogicB.TransactOpts, id) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawFunds(&_AutomationRegistryLogicB.TransactOpts, id, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawFunds(&_AutomationRegistryLogicB.TransactOpts, id, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawOwnerFunds(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawOwnerFunds(&_AutomationRegistryLogicB.TransactOpts) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawPayment(&_AutomationRegistryLogicB.TransactOpts, from, to) +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _AutomationRegistryLogicB.Contract.WithdrawPayment(&_AutomationRegistryLogicB.TransactOpts, from, to) +} + +type AutomationRegistryLogicBAdminPrivilegeConfigSetIterator struct { + Event *AutomationRegistryLogicBAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryLogicBAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBAdminPrivilegeConfigSetIterator{contract: _AutomationRegistryLogicB.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBAdminPrivilegeConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicBAdminPrivilegeConfigSet, error) { + event := new(AutomationRegistryLogicBAdminPrivilegeConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBCancelledUpkeepReportIterator struct { + Event *AutomationRegistryLogicBCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBCancelledUpkeepReportIterator{contract: _AutomationRegistryLogicB.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBCancelledUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryLogicBCancelledUpkeepReport, error) { + event := new(AutomationRegistryLogicBCancelledUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBChainSpecificModuleUpdatedIterator struct { + Event *AutomationRegistryLogicBChainSpecificModuleUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBChainSpecificModuleUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBChainSpecificModuleUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBChainSpecificModuleUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBChainSpecificModuleUpdated struct { + NewModule common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicBChainSpecificModuleUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBChainSpecificModuleUpdatedIterator{contract: _AutomationRegistryLogicB.contract, event: "ChainSpecificModuleUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBChainSpecificModuleUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBChainSpecificModuleUpdated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryLogicBChainSpecificModuleUpdated, error) { + event := new(AutomationRegistryLogicBChainSpecificModuleUpdated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBDedupKeyAddedIterator struct { + Event *AutomationRegistryLogicBDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryLogicBDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBDedupKeyAddedIterator{contract: _AutomationRegistryLogicB.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBDedupKeyAdded) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseDedupKeyAdded(log types.Log) (*AutomationRegistryLogicBDedupKeyAdded, error) { + event := new(AutomationRegistryLogicBDedupKeyAdded) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBFundsAddedIterator struct { + Event *AutomationRegistryLogicBFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBFundsAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryLogicBFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBFundsAddedIterator{contract: _AutomationRegistryLogicB.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBFundsAdded) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseFundsAdded(log types.Log) (*AutomationRegistryLogicBFundsAdded, error) { + event := new(AutomationRegistryLogicBFundsAdded) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBFundsWithdrawnIterator struct { + Event *AutomationRegistryLogicBFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBFundsWithdrawnIterator{contract: _AutomationRegistryLogicB.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBFundsWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseFundsWithdrawn(log types.Log) (*AutomationRegistryLogicBFundsWithdrawn, error) { + event := new(AutomationRegistryLogicBFundsWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator struct { + Event *AutomationRegistryLogicBInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator{contract: _AutomationRegistryLogicB.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBInsufficientFundsUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryLogicBInsufficientFundsUpkeepReport, error) { + event := new(AutomationRegistryLogicBInsufficientFundsUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBOwnerFundsWithdrawnIterator struct { + Event *AutomationRegistryLogicBOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryLogicBOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBOwnerFundsWithdrawnIterator{contract: _AutomationRegistryLogicB.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBOwnerFundsWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryLogicBOwnerFundsWithdrawn, error) { + event := new(AutomationRegistryLogicBOwnerFundsWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBOwnershipTransferRequestedIterator struct { + Event *AutomationRegistryLogicBOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicBOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBOwnershipTransferRequestedIterator{contract: _AutomationRegistryLogicB.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBOwnershipTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryLogicBOwnershipTransferRequested, error) { + event := new(AutomationRegistryLogicBOwnershipTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBOwnershipTransferredIterator struct { + Event *AutomationRegistryLogicBOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicBOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBOwnershipTransferredIterator{contract: _AutomationRegistryLogicB.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBOwnershipTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseOwnershipTransferred(log types.Log) (*AutomationRegistryLogicBOwnershipTransferred, error) { + event := new(AutomationRegistryLogicBOwnershipTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBPausedIterator struct { + Event *AutomationRegistryLogicBPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBPaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryLogicBPausedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBPausedIterator{contract: _AutomationRegistryLogicB.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBPaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParsePaused(log types.Log) (*AutomationRegistryLogicBPaused, error) { + event := new(AutomationRegistryLogicBPaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBPayeesUpdatedIterator struct { + Event *AutomationRegistryLogicBPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicBPayeesUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBPayeesUpdatedIterator{contract: _AutomationRegistryLogicB.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBPayeesUpdated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParsePayeesUpdated(log types.Log) (*AutomationRegistryLogicBPayeesUpdated, error) { + event := new(AutomationRegistryLogicBPayeesUpdated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBPayeeshipTransferRequestedIterator struct { + Event *AutomationRegistryLogicBPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicBPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBPayeeshipTransferRequestedIterator{contract: _AutomationRegistryLogicB.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBPayeeshipTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryLogicBPayeeshipTransferRequested, error) { + event := new(AutomationRegistryLogicBPayeeshipTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBPayeeshipTransferredIterator struct { + Event *AutomationRegistryLogicBPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicBPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBPayeeshipTransferredIterator{contract: _AutomationRegistryLogicB.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBPayeeshipTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryLogicBPayeeshipTransferred, error) { + event := new(AutomationRegistryLogicBPayeeshipTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBPaymentWithdrawnIterator struct { + Event *AutomationRegistryLogicBPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryLogicBPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBPaymentWithdrawnIterator{contract: _AutomationRegistryLogicB.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBPaymentWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryLogicBPaymentWithdrawn, error) { + event := new(AutomationRegistryLogicBPaymentWithdrawn) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBReorgedUpkeepReportIterator struct { + Event *AutomationRegistryLogicBReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBReorgedUpkeepReportIterator{contract: _AutomationRegistryLogicB.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBReorgedUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryLogicBReorgedUpkeepReport, error) { + event := new(AutomationRegistryLogicBReorgedUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBStaleUpkeepReportIterator struct { + Event *AutomationRegistryLogicBStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBStaleUpkeepReportIterator{contract: _AutomationRegistryLogicB.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBStaleUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryLogicBStaleUpkeepReport, error) { + event := new(AutomationRegistryLogicBStaleUpkeepReport) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUnpausedIterator struct { + Event *AutomationRegistryLogicBUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryLogicBUnpausedIterator, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUnpausedIterator{contract: _AutomationRegistryLogicB.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUnpaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUnpaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUnpaused(log types.Log) (*AutomationRegistryLogicBUnpaused, error) { + event := new(AutomationRegistryLogicBUnpaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator struct { + Event *AutomationRegistryLogicBUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepAdminTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryLogicBUpkeepAdminTransferRequested, error) { + event := new(AutomationRegistryLogicBUpkeepAdminTransferRequested) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepAdminTransferredIterator struct { + Event *AutomationRegistryLogicBUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicBUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepAdminTransferredIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepAdminTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryLogicBUpkeepAdminTransferred, error) { + event := new(AutomationRegistryLogicBUpkeepAdminTransferred) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepCanceledIterator struct { + Event *AutomationRegistryLogicBUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryLogicBUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepCanceledIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepCanceled) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepCanceled(log types.Log) (*AutomationRegistryLogicBUpkeepCanceled, error) { + event := new(AutomationRegistryLogicBUpkeepCanceled) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepCheckDataSetIterator struct { + Event *AutomationRegistryLogicBUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepCheckDataSetIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepCheckDataSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryLogicBUpkeepCheckDataSet, error) { + event := new(AutomationRegistryLogicBUpkeepCheckDataSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepGasLimitSetIterator struct { + Event *AutomationRegistryLogicBUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepGasLimitSetIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepGasLimitSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryLogicBUpkeepGasLimitSet, error) { + event := new(AutomationRegistryLogicBUpkeepGasLimitSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepMigratedIterator struct { + Event *AutomationRegistryLogicBUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepMigratedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepMigrated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepMigrated(log types.Log) (*AutomationRegistryLogicBUpkeepMigrated, error) { + event := new(AutomationRegistryLogicBUpkeepMigrated) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepOffchainConfigSetIterator struct { + Event *AutomationRegistryLogicBUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepOffchainConfigSetIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepOffchainConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepOffchainConfigSet, error) { + event := new(AutomationRegistryLogicBUpkeepOffchainConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepPausedIterator struct { + Event *AutomationRegistryLogicBUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepPausedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepPaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepPaused(log types.Log) (*AutomationRegistryLogicBUpkeepPaused, error) { + event := new(AutomationRegistryLogicBUpkeepPaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepPerformedIterator struct { + Event *AutomationRegistryLogicBUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryLogicBUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepPerformedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepPerformed) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepPerformed(log types.Log) (*AutomationRegistryLogicBUpkeepPerformed, error) { + event := new(AutomationRegistryLogicBUpkeepPerformed) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator struct { + Event *AutomationRegistryLogicBUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepPrivilegeConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepPrivilegeConfigSet, error) { + event := new(AutomationRegistryLogicBUpkeepPrivilegeConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepReceivedIterator struct { + Event *AutomationRegistryLogicBUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepReceivedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepReceived) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepReceived(log types.Log) (*AutomationRegistryLogicBUpkeepReceived, error) { + event := new(AutomationRegistryLogicBUpkeepReceived) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepRegisteredIterator struct { + Event *AutomationRegistryLogicBUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepRegisteredIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepRegistered) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepRegistered(log types.Log) (*AutomationRegistryLogicBUpkeepRegistered, error) { + event := new(AutomationRegistryLogicBUpkeepRegistered) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepTriggerConfigSetIterator struct { + Event *AutomationRegistryLogicBUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepTriggerConfigSetIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepTriggerConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepTriggerConfigSet, error) { + event := new(AutomationRegistryLogicBUpkeepTriggerConfigSet) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryLogicBUpkeepUnpausedIterator struct { + Event *AutomationRegistryLogicBUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryLogicBUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryLogicBUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryLogicBUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryLogicBUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryLogicBUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryLogicBUpkeepUnpausedIterator{contract: _AutomationRegistryLogicB.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistryLogicB.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryLogicBUpkeepUnpaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicBFilterer) ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryLogicBUpkeepUnpaused, error) { + event := new(AutomationRegistryLogicBUpkeepUnpaused) + if err := _AutomationRegistryLogicB.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSignerInfo struct { + Active bool + Index uint8 +} +type GetState struct { + State AutomationRegistryBase22State + Config AutomationRegistryBase22OnchainConfigLegacy + Signers []common.Address + Transmitters []common.Address + F uint8 +} +type GetTransmitterInfo struct { + Active bool + Index uint8 + Balance *big.Int + LastCollected *big.Int + Payee common.Address +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicB) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationRegistryLogicB.abi.Events["AdminPrivilegeConfigSet"].ID: + return _AutomationRegistryLogicB.ParseAdminPrivilegeConfigSet(log) + case _AutomationRegistryLogicB.abi.Events["CancelledUpkeepReport"].ID: + return _AutomationRegistryLogicB.ParseCancelledUpkeepReport(log) + case _AutomationRegistryLogicB.abi.Events["ChainSpecificModuleUpdated"].ID: + return _AutomationRegistryLogicB.ParseChainSpecificModuleUpdated(log) + case _AutomationRegistryLogicB.abi.Events["DedupKeyAdded"].ID: + return _AutomationRegistryLogicB.ParseDedupKeyAdded(log) + case _AutomationRegistryLogicB.abi.Events["FundsAdded"].ID: + return _AutomationRegistryLogicB.ParseFundsAdded(log) + case _AutomationRegistryLogicB.abi.Events["FundsWithdrawn"].ID: + return _AutomationRegistryLogicB.ParseFundsWithdrawn(log) + case _AutomationRegistryLogicB.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _AutomationRegistryLogicB.ParseInsufficientFundsUpkeepReport(log) + case _AutomationRegistryLogicB.abi.Events["OwnerFundsWithdrawn"].ID: + return _AutomationRegistryLogicB.ParseOwnerFundsWithdrawn(log) + case _AutomationRegistryLogicB.abi.Events["OwnershipTransferRequested"].ID: + return _AutomationRegistryLogicB.ParseOwnershipTransferRequested(log) + case _AutomationRegistryLogicB.abi.Events["OwnershipTransferred"].ID: + return _AutomationRegistryLogicB.ParseOwnershipTransferred(log) + case _AutomationRegistryLogicB.abi.Events["Paused"].ID: + return _AutomationRegistryLogicB.ParsePaused(log) + case _AutomationRegistryLogicB.abi.Events["PayeesUpdated"].ID: + return _AutomationRegistryLogicB.ParsePayeesUpdated(log) + case _AutomationRegistryLogicB.abi.Events["PayeeshipTransferRequested"].ID: + return _AutomationRegistryLogicB.ParsePayeeshipTransferRequested(log) + case _AutomationRegistryLogicB.abi.Events["PayeeshipTransferred"].ID: + return _AutomationRegistryLogicB.ParsePayeeshipTransferred(log) + case _AutomationRegistryLogicB.abi.Events["PaymentWithdrawn"].ID: + return _AutomationRegistryLogicB.ParsePaymentWithdrawn(log) + case _AutomationRegistryLogicB.abi.Events["ReorgedUpkeepReport"].ID: + return _AutomationRegistryLogicB.ParseReorgedUpkeepReport(log) + case _AutomationRegistryLogicB.abi.Events["StaleUpkeepReport"].ID: + return _AutomationRegistryLogicB.ParseStaleUpkeepReport(log) + case _AutomationRegistryLogicB.abi.Events["Unpaused"].ID: + return _AutomationRegistryLogicB.ParseUnpaused(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepAdminTransferRequested"].ID: + return _AutomationRegistryLogicB.ParseUpkeepAdminTransferRequested(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepAdminTransferred"].ID: + return _AutomationRegistryLogicB.ParseUpkeepAdminTransferred(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepCanceled"].ID: + return _AutomationRegistryLogicB.ParseUpkeepCanceled(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepCheckDataSet"].ID: + return _AutomationRegistryLogicB.ParseUpkeepCheckDataSet(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepGasLimitSet"].ID: + return _AutomationRegistryLogicB.ParseUpkeepGasLimitSet(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepMigrated"].ID: + return _AutomationRegistryLogicB.ParseUpkeepMigrated(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepOffchainConfigSet"].ID: + return _AutomationRegistryLogicB.ParseUpkeepOffchainConfigSet(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepPaused"].ID: + return _AutomationRegistryLogicB.ParseUpkeepPaused(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepPerformed"].ID: + return _AutomationRegistryLogicB.ParseUpkeepPerformed(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _AutomationRegistryLogicB.ParseUpkeepPrivilegeConfigSet(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepReceived"].ID: + return _AutomationRegistryLogicB.ParseUpkeepReceived(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepRegistered"].ID: + return _AutomationRegistryLogicB.ParseUpkeepRegistered(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepTriggerConfigSet"].ID: + return _AutomationRegistryLogicB.ParseUpkeepTriggerConfigSet(log) + case _AutomationRegistryLogicB.abi.Events["UpkeepUnpaused"].ID: + return _AutomationRegistryLogicB.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationRegistryLogicBAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (AutomationRegistryLogicBCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (AutomationRegistryLogicBChainSpecificModuleUpdated) Topic() common.Hash { + return common.HexToHash("0xdefc28b11a7980dbe0c49dbbd7055a1584bc8075097d1e8b3b57fb7283df2ad7") +} + +func (AutomationRegistryLogicBDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (AutomationRegistryLogicBFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (AutomationRegistryLogicBFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (AutomationRegistryLogicBInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (AutomationRegistryLogicBOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (AutomationRegistryLogicBOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AutomationRegistryLogicBOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (AutomationRegistryLogicBPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (AutomationRegistryLogicBPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (AutomationRegistryLogicBPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (AutomationRegistryLogicBPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (AutomationRegistryLogicBPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (AutomationRegistryLogicBReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (AutomationRegistryLogicBStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (AutomationRegistryLogicBUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (AutomationRegistryLogicBUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (AutomationRegistryLogicBUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (AutomationRegistryLogicBUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (AutomationRegistryLogicBUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (AutomationRegistryLogicBUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (AutomationRegistryLogicBUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (AutomationRegistryLogicBUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (AutomationRegistryLogicBUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (AutomationRegistryLogicBUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (AutomationRegistryLogicBUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (AutomationRegistryLogicBUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (AutomationRegistryLogicBUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (AutomationRegistryLogicBUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (AutomationRegistryLogicBUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_AutomationRegistryLogicB *AutomationRegistryLogicB) Address() common.Address { + return _AutomationRegistryLogicB.address +} + +type AutomationRegistryLogicBInterface interface { + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) + + GetAllowedReadOnlyAddress(opts *bind.CallOpts) (common.Address, error) + + GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) + + GetChainModule(opts *bind.CallOpts) (common.Address, error) + + GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) + + GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetReorgProtectionEnabled(opts *bind.CallOpts) (bool, error) + + GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetTransmitCalldataFixedBytesOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetTransmitCalldataPerSignerBytesOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + UpkeepVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryLogicBAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicBAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryLogicBCancelledUpkeepReport, error) + + FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicBChainSpecificModuleUpdatedIterator, error) + + WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBChainSpecificModuleUpdated) (event.Subscription, error) + + ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryLogicBChainSpecificModuleUpdated, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryLogicBDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*AutomationRegistryLogicBDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryLogicBFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*AutomationRegistryLogicBFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*AutomationRegistryLogicBFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryLogicBInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryLogicBOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryLogicBOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicBOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryLogicBOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryLogicBOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AutomationRegistryLogicBOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryLogicBPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*AutomationRegistryLogicBPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryLogicBPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*AutomationRegistryLogicBPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicBPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryLogicBPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryLogicBPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryLogicBPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryLogicBPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryLogicBPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryLogicBReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryLogicBStaleUpkeepReport, error) + + FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryLogicBUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*AutomationRegistryLogicBUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicBUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryLogicBUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryLogicBUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryLogicBUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryLogicBUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*AutomationRegistryLogicBUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryLogicBUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryLogicBUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*AutomationRegistryLogicBUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*AutomationRegistryLogicBUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryLogicBUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*AutomationRegistryLogicBUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*AutomationRegistryLogicBUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*AutomationRegistryLogicBUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryLogicBUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryLogicBUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryLogicBUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryLogicBUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_registry_wrapper_2_2/automation_registry_wrapper_2_2.go b/core/gethwrappers/generated/automation_registry_wrapper_2_2/automation_registry_wrapper_2_2.go new file mode 100644 index 00000000..46c0156c --- /dev/null +++ b/core/gethwrappers/generated/automation_registry_wrapper_2_2/automation_registry_wrapper_2_2.go @@ -0,0 +1,5300 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_registry_wrapper_2_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistryBase22OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address + ChainModule common.Address + ReorgProtectionEnabled bool +} + +var AutomationRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistryLogicB2_2\",\"name\":\"logicA\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newModule\",\"type\":\"address\"}],\"name\":\"ChainSpecificModuleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfigBytes\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"},{\"internalType\":\"contractIChainModule\",\"name\":\"chainModule\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"reorgProtectionEnabled\",\"type\":\"bool\"}],\"internalType\":\"structAutomationRegistryBase2_2.OnchainConfig\",\"name\":\"onchainConfig\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfigTypeSafe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"simulatePerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6101406040523480156200001257600080fd5b50604051620051bb380380620051bb8339810160408190526200003591620003b1565b80816001600160a01b031663ca30e6036040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000075573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200009b9190620003b1565b826001600160a01b031663b10b673c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620000da573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001009190620003b1565b836001600160a01b0316636709d0e56040518163ffffffff1660e01b8152600401602060405180830381865afa1580156200013f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001659190620003b1565b846001600160a01b0316635425d8ac6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620001a4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001ca9190620003b1565b856001600160a01b031663a08714c06040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000209573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200022f9190620003b1565b3380600081620002865760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620002b957620002b981620002ed565b5050506001600160a01b0394851660805292841660a05290831660c052821660e052811661010052166101205250620003d8565b336001600160a01b03821603620003475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200027d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620003ae57600080fd5b50565b600060208284031215620003c457600080fd5b8151620003d18162000398565b9392505050565b60805160a05160c05160e0516101005161012051614d96620004256000396000818160d6015261016f01526000611b29015260005050600050506000505060006104330152614d966000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c8063aed2e92911610081578063e3d0e7121161005b578063e3d0e712146102e0578063f2fde38b146102f3578063f75f6b1114610306576100d4565b8063aed2e92914610262578063afcb95d71461028c578063b1dc65a4146102cd576100d4565b806381ff7048116100b257806381ff7048146101bc5780638da5cb5b14610231578063a4c0ed361461024f576100d4565b8063181f5a771461011b578063349e8cca1461016d57806379ba5097146101b4575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e808015610114573d6000f35b3d6000fd5b005b6101576040518060400160405280601881526020017f4175746f6d6174696f6e526567697374727920322e322e30000000000000000081525081565b6040516101649190613a92565b60405180910390f35b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610164565b610119610319565b61020e60155460115463ffffffff780100000000000000000000000000000000000000000000000083048116937c01000000000000000000000000000000000000000000000000000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610164565b60005473ffffffffffffffffffffffffffffffffffffffff1661018f565b61011961025d366004613b20565b61041b565b610275610270366004613b7c565b610637565b604080519215158352602083019190915201610164565b601154601254604080516000815260208101939093527401000000000000000000000000000000000000000090910463ffffffff1690820152606001610164565b6101196102db366004613c0d565b6107ad565b6101196102ee366004613ede565b610ae8565b610119610301366004613fab565b610b11565b6101196103143660046141af565b610b25565b60015473ffffffffffffffffffffffffffffffffffffffff16331461039f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461048a576040517fc8bad78d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602081146104c4576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006104d28284018461423e565b60008181526004602052604090205490915065010000000000900463ffffffff9081161461052c576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600460205260409020600101546105679085906c0100000000000000000000000090046bffffffffffffffffffffffff16614286565b600082815260046020526040902060010180546bffffffffffffffffffffffff929092166c01000000000000000000000000027fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff9092169190911790556019546105d29085906142ab565b6019556040516bffffffffffffffffffffffff8516815273ffffffffffffffffffffffffffffffffffffffff86169082907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a35050505050565b600080610642611b11565b6012547e01000000000000000000000000000000000000000000000000000000000000900460ff16156106a1576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600085815260046020908152604091829020825160e081018452815460ff811615158252610100810463ffffffff908116838601819052650100000000008304821684880152690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff16606084018190526001909401546bffffffffffffffffffffffff80821660808601526c0100000000000000000000000082041660a0850152780100000000000000000000000000000000000000000000000090041660c08301528451601f890185900485028101850190955287855290936107a0938990899081908401838280828437600092019190915250611b8292505050565b9097909650945050505050565b60005a60408051610160810182526012546bffffffffffffffffffffffff8116825263ffffffff6c010000000000000000000000008204811660208401527001000000000000000000000000000000008204811693830193909352740100000000000000000000000000000000000000008104909216606082015262ffffff7801000000000000000000000000000000000000000000000000830416608082015261ffff7b0100000000000000000000000000000000000000000000000000000083041660a082015260ff7d0100000000000000000000000000000000000000000000000000000000008304811660c08301527e010000000000000000000000000000000000000000000000000000000000008304811615801560e08401527f01000000000000000000000000000000000000000000000000000000000000009093048116151561010080840191909152601354918216151561012084015273ffffffffffffffffffffffffffffffffffffffff910416610140820152919250610963576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600b602052604090205460ff166109ac576040517f1099ed7500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6011548a35146109e8576040517fdfdcf8e700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60c08101516109f89060016142ed565b60ff1686141580610a095750858414155b15610a40576040517f0244f71a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610a508a8a8a8a8a8a8a8a611dab565b6000610a5c8a8a612014565b905060208b0135600881901c63ffffffff16610a798484876120cf565b836060015163ffffffff168163ffffffff161115610ad957601280547fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000063ffffffff8416021790555b50505050505050505050505050565b610b0986868686806020019051810190610b0291906143ac565b8686610b25565b505050505050565b610b19612a4e565b610b2281612acf565b50565b610b2d612a4e565b601f86511115610b69576040517f25d0209c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8360ff16600003610ba6576040517fe77dba5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84518651141580610bc55750610bbd84600361452e565b60ff16865111155b15610bfc576040517f1d2d1c5800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601254600e546bffffffffffffffffffffffff9091169060005b816bffffffffffffffffffffffff16811015610c7e57610c6b600e8281548110610c4257610c426142be565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff168484612bc4565b5080610c768161454a565b915050610c16565b5060008060005b836bffffffffffffffffffffffff16811015610d8757600d8181548110610cae57610cae6142be565b600091825260209091200154600e805473ffffffffffffffffffffffffffffffffffffffff90921694509082908110610ce957610ce96142be565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff8681168452600c8352604080852080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001690559116808452600b90925290912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055915080610d7f8161454a565b915050610c85565b50610d94600d6000613971565b610da0600e6000613971565b604080516080810182526000808252602082018190529181018290526060810182905290805b8c5181101561120957600c60008e8381518110610de557610de56142be565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528101919091526040016000205460ff1615610e50576040517f77cea0fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168d8281518110610e7a57610e7a6142be565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1603610ecf576040517f815e1d6400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405180604001604052806001151581526020018260ff16815250600c60008f8481518110610f0057610f006142be565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528181019290925260400160002082518154939092015160ff16610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909316929092171790558b518c9082908110610fa857610fa86142be565b60200260200101519150600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1603611018576040517f58a70a0a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82166000908152600b60209081526040918290208251608081018452905460ff80821615801584526101008304909116938301939093526bffffffffffffffffffffffff6201000082048116948301949094526e010000000000000000000000000000900490921660608301529093506110d3576040517f6a7281ad00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001835260ff80821660208086019182526bffffffffffffffffffffffff808b166060880190815273ffffffffffffffffffffffffffffffffffffffff87166000908152600b909352604092839020885181549551948a0151925184166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff939094166201000002929092167fffffffffffff000000000000000000000000000000000000000000000000ffff94909616610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090951694909417179190911692909217919091179055806112018161454a565b915050610dc6565b50508a5161121f9150600d9060208d019061398f565b50885161123390600e9060208c019061398f565b50604051806101600160405280856bffffffffffffffffffffffff168152602001886000015163ffffffff168152602001886020015163ffffffff168152602001600063ffffffff168152602001886060015162ffffff168152602001886080015161ffff1681526020018960ff1681526020016012600001601e9054906101000a900460ff16151581526020016012600001601f9054906101000a900460ff161515815260200188610200015115158152602001886101e0015173ffffffffffffffffffffffffffffffffffffffff16815250601260008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160106101000a81548163ffffffff021916908363ffffffff16021790555060608201518160000160146101000a81548163ffffffff021916908363ffffffff16021790555060808201518160000160186101000a81548162ffffff021916908362ffffff16021790555060a082015181600001601b6101000a81548161ffff021916908361ffff16021790555060c082015181600001601d6101000a81548160ff021916908360ff16021790555060e082015181600001601e6101000a81548160ff02191690831515021790555061010082015181600001601f6101000a81548160ff0219169083151502179055506101208201518160010160006101000a81548160ff0219169083151502179055506101408201518160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050506040518061018001604052808860a001516bffffffffffffffffffffffff16815260200188610180015173ffffffffffffffffffffffffffffffffffffffff168152602001601460010160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff168152602001886040015163ffffffff1681526020018860c0015163ffffffff168152602001601460010160149054906101000a900463ffffffff1663ffffffff168152602001601460010160189054906101000a900463ffffffff1663ffffffff1681526020016014600101601c9054906101000a900463ffffffff1663ffffffff1681526020018860e0015163ffffffff16815260200188610100015163ffffffff16815260200188610120015163ffffffff168152602001886101c0015173ffffffffffffffffffffffffffffffffffffffff16815250601460008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548163ffffffff021916908363ffffffff16021790555060808201518160010160106101000a81548163ffffffff021916908363ffffffff16021790555060a08201518160010160146101000a81548163ffffffff021916908363ffffffff16021790555060c08201518160010160186101000a81548163ffffffff021916908363ffffffff16021790555060e082015181600101601c6101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160020160006101000a81548163ffffffff021916908363ffffffff1602179055506101208201518160020160046101000a81548163ffffffff021916908363ffffffff1602179055506101408201518160020160086101000a81548163ffffffff021916908363ffffffff16021790555061016082015181600201600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555090505086610140015160178190555086610160015160188190555060006014600101601c9054906101000a900463ffffffff169050876101e0015173ffffffffffffffffffffffffffffffffffffffff166357e871e76040518163ffffffff1660e01b8152600401602060405180830381865afa1580156118d4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118f89190614582565b601580547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c010000000000000000000000000000000000000000000000000000000063ffffffff9384160217808255600192601891611973918591780100000000000000000000000000000000000000000000000090041661459b565b92506101000a81548163ffffffff021916908363ffffffff1602179055506000886040516020016119a49190614609565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018152919052601554909150611a0d90469030907801000000000000000000000000000000000000000000000000900463ffffffff168f8f8f878f8f612dcc565b60115560005b611a1d6009612e76565b811015611a4d57611a3a611a32600983612e80565b600990612e93565b5080611a458161454a565b915050611a13565b5060005b896101a0015151811015611aa457611a918a6101a001518281518110611a7957611a796142be565b60200260200101516009612eb590919063ffffffff16565b5080611a9c8161454a565b915050611a51565b507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0582601154601460010160189054906101000a900463ffffffff168f8f8f878f8f604051611afb999897969594939291906147ad565b60405180910390a1505050505050505050505050565b3273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614611b80576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60125460009081907f0100000000000000000000000000000000000000000000000000000000000000900460ff1615611be7576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601280547effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01000000000000000000000000000000000000000000000000000000000000001790556040517f4585e33b0000000000000000000000000000000000000000000000000000000090611c63908590602401613a92565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931790925290517f79188d1600000000000000000000000000000000000000000000000000000000815290935073ffffffffffffffffffffffffffffffffffffffff8616906379188d1690611d369087908790600401614843565b60408051808303816000875af1158015611d54573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d78919061485c565b601280547effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff16905590969095509350505050565b60008787604051611dbd92919061488a565b604051908190038120611dd4918b9060200161489a565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201208383019092526000808452908301819052909250906000805b88811015611fab57600185878360208110611e4057611e406142be565b611e4d91901a601b6142ed565b8c8c85818110611e5f57611e5f6142be565b905060200201358b8b86818110611e7857611e786142be565b9050602002013560405160008152602001604052604051611eb5949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611ed7573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff81166000908152600c602090815290849020838501909452925460ff8082161515808552610100909204169383019390935290955093509050611f85576040517f0f4c073700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826020015160080260ff166001901b840193508080611fa39061454a565b915050611e23565b50827e01010101010101010101010101010101010101010101010101010101010101841614612006576040517fc103be2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050565b61204d6040518060c001604052806000815260200160008152602001606081526020016060815260200160608152602001606081525090565b600061205b8385018561498b565b604081015151606082015151919250908114158061207e57508082608001515114155b8061208e5750808260a001515114155b156120c5576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5090505b92915050565b600082604001515167ffffffffffffffff8111156120ef576120ef613cc4565b6040519080825280602002602001820160405280156121ab57816020015b604080516101c081018252600060e08201818152610100830182905261012083018290526101408301829052610160830182905261018083018290526101a0830182905282526020808301829052928201819052606082018190526080820181905260a0820181905260c082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90920191018161210d5790505b50905060006040518060800160405280600061ffff1681526020016000815260200160006bffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff168152509050600085610140015173ffffffffffffffffffffffffffffffffffffffff166357e871e76040518163ffffffff1660e01b8152600401602060405180830381865afa158015612249573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061226d9190614582565b9050600086610140015173ffffffffffffffffffffffffffffffffffffffff166318b8f6136040518163ffffffff1660e01b8152600401602060405180830381865afa1580156122c1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906122e59190614582565b905060005b866040015151811015612734576004600088604001518381518110612311576123116142be565b6020908102919091018101518252818101929092526040908101600020815160e081018352815460ff811615158252610100810463ffffffff90811695830195909552650100000000008104851693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c082015285518690839081106123f6576123f66142be565b60200260200101516000018190525061242b8760400151828151811061241e5761241e6142be565b6020026020010151612ed7565b85828151811061243d5761243d6142be565b602002602001015160600190600181111561245a5761245a614a78565b9081600181111561246d5761246d614a78565b815250506124d18760400151828151811061248a5761248a6142be565b602002602001015184896080015184815181106124a9576124a96142be565b60200260200101518885815181106124c3576124c36142be565b60200260200101518c612f82565b8683815181106124e3576124e36142be565b6020026020010151602001878481518110612500576125006142be565b602002602001015160c001828152508215151515815250505084818151811061252b5761252b6142be565b6020026020010151602001511561255b5760018460000181815161254f9190614aa7565b61ffff16905250612560565b612722565b6125c6858281518110612575576125756142be565b602002602001015160000151606001518860600151838151811061259b5761259b6142be565b60200260200101518960a0015184815181106125b9576125b96142be565b6020026020010151611b82565b8683815181106125d8576125d86142be565b60200260200101516040018784815181106125f5576125f56142be565b602090810291909101015160800191909152901515905260c088015161261c9060016142ed565b61262a9060ff166040614ac2565b6103a48860a001518381518110612643576126436142be565b60200260200101515161265691906142ab565b61266091906142ab565b858281518110612672576126726142be565b602002602001015160a0018181525050848181518110612694576126946142be565b602002602001015160a00151846020018181516126b191906142ab565b90525084518590829081106126c8576126c86142be565b602002602001015160800151866126df9190614ad9565b9550612722876040015182815181106126fa576126fa6142be565b602002602001015184878481518110612715576127156142be565b60200260200101516130a1565b8061272c8161454a565b9150506122ea565b50825161ffff1660000361274b5750505050505050565b6155f0612759366010614ac2565b5a6127649088614ad9565b61276e91906142ab565b61277891906142ab565b8351909550611b589061278f9061ffff1687614b1b565b61279991906142ab565b945060008060005b88604001515181101561297d578681815181106127c0576127c06142be565b6020026020010151602001511561296b576128598a8a6040015183815181106127eb576127eb6142be565b6020026020010151898481518110612805576128056142be565b6020026020010151608001518c600001518d602001518d8c602001518e8981518110612833576128336142be565b602002602001015160a001518c61284a9190614ac2565b6128549190614b1b565b6131a6565b6060880180519295509093508391612872908390614286565b6bffffffffffffffffffffffff16905250604086018051849190612897908390614286565b6bffffffffffffffffffffffff1690525086518790829081106128bc576128bc6142be565b6020026020010151604001511515896040015182815181106128e0576128e06142be565b60200260200101517fad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b84866129159190614286565b8a8581518110612927576129276142be565b6020026020010151608001518c8e60800151878151811061294a5761294a6142be565b60200260200101516040516129629493929190614b2f565b60405180910390a35b806129758161454a565b9150506127a1565b505050604083810151336000908152600b6020529190912080546002906129b99084906201000090046bffffffffffffffffffffffff16614286565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508260600151601260000160008282829054906101000a90046bffffffffffffffffffffffff16612a179190614286565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555050505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314611b80576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610396565b3373ffffffffffffffffffffffffffffffffffffffff821603612b4e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610396565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e0100000000000000000000000000009004909116606082015290612dc0576000816060015185612c5c9190614b6c565b90506000612c6a8583614b91565b90508083604001818151612c7e9190614286565b6bffffffffffffffffffffffff16905250612c998582614bbc565b83606001818151612caa9190614286565b6bffffffffffffffffffffffff90811690915273ffffffffffffffffffffffffffffffffffffffff89166000908152600b602090815260409182902087518154928901519389015160608a015186166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff919096166201000002167fffffffffffff000000000000000000000000000000000000000000000000ffff60ff95909516610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909416939093171792909216179190911790555050505b60400151949350505050565b6000808a8a8a8a8a8a8a8a8a604051602001612df099989796959493929190614bec565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179b9a5050505050505050505050565b60006120c9825490565b6000612e8c8383613329565b9392505050565b6000612e8c8373ffffffffffffffffffffffffffffffffffffffff8416613353565b6000612e8c8373ffffffffffffffffffffffffffffffffffffffff841661344d565b6000818160045b600f811015612f64577fff000000000000000000000000000000000000000000000000000000000000008216838260208110612f1c57612f1c6142be565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614612f5257506000949350505050565b80612f5c8161454a565b915050612ede565b5081600f1a6001811115612f7a57612f7a614a78565b949350505050565b600080808085606001516001811115612f9d57612f9d614a78565b03612fc357612faf888888888861349c565b612fbe57600092509050613097565b61303b565b600185606001516001811115612fdb57612fdb614a78565b03613009576000612fee89898988613627565b92509050806130035750600092509050613097565b5061303b565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84516040015163ffffffff16871061309057877fc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd56368760405161307d9190613a92565b60405180910390a2600092509050613097565b6001925090505b9550959350505050565b6000816060015160018111156130b9576130b9614a78565b0361311d57600083815260046020526040902060010180547fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff16780100000000000000000000000000000000000000000000000063ffffffff851602179055505050565b60018160600151600181111561313557613135614a78565b036131a15760c08101805160009081526008602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055915191517fa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f29190a25b505050565b6000806131b9898886868a8a6001613835565b60008a8152600460205260408120600101549294509092506c010000000000000000000000009091046bffffffffffffffffffffffff16906131fb8385614286565b9050836bffffffffffffffffffffffff16826bffffffffffffffffffffffff16101561322f57509150600090508180613262565b806bffffffffffffffffffffffff16826bffffffffffffffffffffffff16101561326257508061325f8482614b6c565b92505b60008a81526004602052604090206001018054829190600c906132a49084906c0100000000000000000000000090046bffffffffffffffffffffffff16614b6c565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008c8152600460205260408120600101805485945090926132ed91859116614286565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550505097509795505050505050565b6000826000018281548110613340576133406142be565b9060005260206000200154905092915050565b6000818152600183016020526040812054801561343c576000613377600183614ad9565b855490915060009061338b90600190614ad9565b90508181146133f05760008660000182815481106133ab576133ab6142be565b90600052602060002001549050808760000184815481106133ce576133ce6142be565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061340157613401614c81565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506120c9565b60009150506120c9565b5092915050565b6000818152600183016020526040812054613494575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556120c9565b5060006120c9565b600080848060200190518101906134b39190614cb0565b845160c00151815191925063ffffffff9081169116101561351057867f405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8866040516134fe9190613a92565b60405180910390a2600091505061361e565b82610120015180156135d157506020810151158015906135d15750602081015161014084015182516040517f85df51fd00000000000000000000000000000000000000000000000000000000815263ffffffff909116600482015273ffffffffffffffffffffffffffffffffffffffff909116906385df51fd90602401602060405180830381865afa1580156135aa573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135ce9190614582565b14155b806135e35750805163ffffffff168611155b1561361857867f6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301866040516134fe9190613a92565b60019150505b95945050505050565b6000806000848060200190518101906136409190614d08565b90506000878260000151836020015184604001516040516020016136a294939291909384526020840192909252604083015260e01b7fffffffff0000000000000000000000000000000000000000000000000000000016606082015260640190565b604051602081830303815290604052805190602001209050846101200151801561377e575060808201511580159061377e5750608082015161014086015160608401516040517f85df51fd00000000000000000000000000000000000000000000000000000000815263ffffffff909116600482015273ffffffffffffffffffffffffffffffffffffffff909116906385df51fd90602401602060405180830381865afa158015613757573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061377b9190614582565b14155b80613793575086826060015163ffffffff1610155b156137dd57877f6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301876040516137c89190613a92565b60405180910390a260009350915061382c9050565b60008181526008602052604090205460ff161561382457877f405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8876040516137c89190613a92565b600193509150505b94509492505050565b60008060008960a0015161ffff168661384e9190614ac2565b905083801561385c5750803a105b1561386457503a5b600085886138728b8d6142ab565b61387c9085614ac2565b61388691906142ab565b61389890670de0b6b3a7640000614ac2565b6138a29190614b1b565b905060008b6040015163ffffffff1664e8d4a510006138c19190614ac2565b60208d0151889063ffffffff168b6138d98f88614ac2565b6138e391906142ab565b6138f190633b9aca00614ac2565b6138fb9190614ac2565b6139059190614b1b565b61390f91906142ab565b90506b033b2e3c9fd0803ce800000061392882846142ab565b1115613960576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909b909a5098505050505050505050565b5080546000825590600052602060002090810190610b229190613a19565b828054828255906000526020600020908101928215613a09579160200282015b82811115613a0957825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9091161782556020909201916001909101906139af565b50613a15929150613a19565b5090565b5b80821115613a155760008155600101613a1a565b6000815180845260005b81811015613a5457602081850181015186830182015201613a38565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000612e8c6020830184613a2e565b73ffffffffffffffffffffffffffffffffffffffff81168114610b2257600080fd5b8035613ad281613aa5565b919050565b60008083601f840112613ae957600080fd5b50813567ffffffffffffffff811115613b0157600080fd5b602083019150836020828501011115613b1957600080fd5b9250929050565b60008060008060608587031215613b3657600080fd5b8435613b4181613aa5565b935060208501359250604085013567ffffffffffffffff811115613b6457600080fd5b613b7087828801613ad7565b95989497509550505050565b600080600060408486031215613b9157600080fd5b83359250602084013567ffffffffffffffff811115613baf57600080fd5b613bbb86828701613ad7565b9497909650939450505050565b60008083601f840112613bda57600080fd5b50813567ffffffffffffffff811115613bf257600080fd5b6020830191508360208260051b8501011115613b1957600080fd5b60008060008060008060008060e0898b031215613c2957600080fd5b606089018a811115613c3a57600080fd5b8998503567ffffffffffffffff80821115613c5457600080fd5b613c608c838d01613ad7565b909950975060808b0135915080821115613c7957600080fd5b613c858c838d01613bc8565b909750955060a08b0135915080821115613c9e57600080fd5b50613cab8b828c01613bc8565b999c989b50969995989497949560c00135949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610220810167ffffffffffffffff81118282101715613d1757613d17613cc4565b60405290565b60405160c0810167ffffffffffffffff81118282101715613d1757613d17613cc4565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715613d8757613d87613cc4565b604052919050565b600067ffffffffffffffff821115613da957613da9613cc4565b5060051b60200190565b600082601f830112613dc457600080fd5b81356020613dd9613dd483613d8f565b613d40565b82815260059290921b84018101918181019086841115613df857600080fd5b8286015b84811015613e1c578035613e0f81613aa5565b8352918301918301613dfc565b509695505050505050565b803560ff81168114613ad257600080fd5b600082601f830112613e4957600080fd5b813567ffffffffffffffff811115613e6357613e63613cc4565b613e9460207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601613d40565b818152846020838601011115613ea957600080fd5b816020850160208301376000918101602001919091529392505050565b803567ffffffffffffffff81168114613ad257600080fd5b60008060008060008060c08789031215613ef757600080fd5b863567ffffffffffffffff80821115613f0f57600080fd5b613f1b8a838b01613db3565b97506020890135915080821115613f3157600080fd5b613f3d8a838b01613db3565b9650613f4b60408a01613e27565b95506060890135915080821115613f6157600080fd5b613f6d8a838b01613e38565b9450613f7b60808a01613ec6565b935060a0890135915080821115613f9157600080fd5b50613f9e89828a01613e38565b9150509295509295509295565b600060208284031215613fbd57600080fd5b8135612e8c81613aa5565b63ffffffff81168114610b2257600080fd5b8035613ad281613fc8565b62ffffff81168114610b2257600080fd5b8035613ad281613fe5565b61ffff81168114610b2257600080fd5b8035613ad281614001565b6bffffffffffffffffffffffff81168114610b2257600080fd5b8035613ad28161401c565b8015158114610b2257600080fd5b8035613ad281614041565b6000610220828403121561406d57600080fd5b614075613cf3565b905061408082613fda565b815261408e60208301613fda565b602082015261409f60408301613fda565b60408201526140b060608301613ff6565b60608201526140c160808301614011565b60808201526140d260a08301614036565b60a08201526140e360c08301613fda565b60c08201526140f460e08301613fda565b60e0820152610100614107818401613fda565b90820152610120614119838201613fda565b9082015261014082810135908201526101608083013590820152610180614141818401613ac7565b908201526101a08281013567ffffffffffffffff81111561416157600080fd5b61416d85828601613db3565b8284015250506101c0614181818401613ac7565b908201526101e0614193838201613ac7565b908201526102006141a583820161404f565b9082015292915050565b60008060008060008060c087890312156141c857600080fd5b863567ffffffffffffffff808211156141e057600080fd5b6141ec8a838b01613db3565b9750602089013591508082111561420257600080fd5b61420e8a838b01613db3565b965061421c60408a01613e27565b9550606089013591508082111561423257600080fd5b613f6d8a838b0161405a565b60006020828403121561425057600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6bffffffffffffffffffffffff81811683821601908082111561344657613446614257565b808201808211156120c9576120c9614257565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60ff81811683821601908111156120c9576120c9614257565b8051613ad281613fc8565b8051613ad281613fe5565b8051613ad281614001565b8051613ad28161401c565b8051613ad281613aa5565b600082601f83011261434e57600080fd5b8151602061435e613dd483613d8f565b82815260059290921b8401810191818101908684111561437d57600080fd5b8286015b84811015613e1c57805161439481613aa5565b8352918301918301614381565b8051613ad281614041565b6000602082840312156143be57600080fd5b815167ffffffffffffffff808211156143d657600080fd5b9083019061022082860312156143eb57600080fd5b6143f3613cf3565b6143fc83614306565b815261440a60208401614306565b602082015261441b60408401614306565b604082015261442c60608401614311565b606082015261443d6080840161431c565b608082015261444e60a08401614327565b60a082015261445f60c08401614306565b60c082015261447060e08401614306565b60e0820152610100614483818501614306565b90820152610120614495848201614306565b90820152610140838101519082015261016080840151908201526101806144bd818501614332565b908201526101a083810151838111156144d557600080fd5b6144e18882870161433d565b8284015250506101c091506144f7828401614332565b828201526101e0915061450b828401614332565b82820152610200915061451f8284016143a1565b91810191909152949350505050565b60ff818116838216029081169081811461344657613446614257565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361457b5761457b614257565b5060010190565b60006020828403121561459457600080fd5b5051919050565b63ffffffff81811683821601908082111561344657613446614257565b600081518084526020808501945080840160005b838110156145fe57815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016145cc565b509495945050505050565b6020815261462060208201835163ffffffff169052565b60006020830151614639604084018263ffffffff169052565b50604083015163ffffffff8116606084015250606083015162ffffff8116608084015250608083015161ffff811660a08401525060a08301516bffffffffffffffffffffffff811660c08401525060c083015163ffffffff811660e08401525060e08301516101006146b28185018363ffffffff169052565b84015190506101206146cb8482018363ffffffff169052565b84015190506101406146e48482018363ffffffff169052565b840151610160848101919091528401516101808085019190915284015190506101a06147278185018373ffffffffffffffffffffffffffffffffffffffff169052565b808501519150506102206101c081818601526147476102408601846145b8565b908601519092506101e06147728682018373ffffffffffffffffffffffffffffffffffffffff169052565b860151905061020061479b8682018373ffffffffffffffffffffffffffffffffffffffff169052565b90950151151593019290925250919050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526147dd8184018a6145b8565b905082810360808401526147f181896145b8565b905060ff871660a084015282810360c084015261480e8187613a2e565b905067ffffffffffffffff851660e08401528281036101008401526148338185613a2e565b9c9b505050505050505050505050565b828152604060208201526000612f7a6040830184613a2e565b6000806040838503121561486f57600080fd5b825161487a81614041565b6020939093015192949293505050565b8183823760009101908152919050565b8281526080810160608360208401379392505050565b600082601f8301126148c157600080fd5b813560206148d1613dd483613d8f565b82815260059290921b840181019181810190868411156148f057600080fd5b8286015b84811015613e1c57803583529183019183016148f4565b600082601f83011261491c57600080fd5b8135602061492c613dd483613d8f565b82815260059290921b8401810191818101908684111561494b57600080fd5b8286015b84811015613e1c57803567ffffffffffffffff81111561496f5760008081fd5b61497d8986838b0101613e38565b84525091830191830161494f565b60006020828403121561499d57600080fd5b813567ffffffffffffffff808211156149b557600080fd5b9083019060c082860312156149c957600080fd5b6149d1613d1d565b82358152602083013560208201526040830135828111156149f157600080fd5b6149fd878286016148b0565b604083015250606083013582811115614a1557600080fd5b614a21878286016148b0565b606083015250608083013582811115614a3957600080fd5b614a458782860161490b565b60808301525060a083013582811115614a5d57600080fd5b614a698782860161490b565b60a08301525095945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b61ffff81811683821601908082111561344657613446614257565b80820281158282048414176120c9576120c9614257565b818103818111156120c9576120c9614257565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082614b2a57614b2a614aec565b500490565b6bffffffffffffffffffffffff85168152836020820152826040820152608060608201526000614b626080830184613a2e565b9695505050505050565b6bffffffffffffffffffffffff82811682821603908082111561344657613446614257565b60006bffffffffffffffffffffffff80841680614bb057614bb0614aec565b92169190910492915050565b6bffffffffffffffffffffffff818116838216028082169190828114614be457614be4614257565b505092915050565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152614c338285018b6145b8565b91508382036080850152614c47828a6145b8565b915060ff881660a085015283820360c0850152614c648288613a2e565b90861660e085015283810361010085015290506148338185613a2e565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b600060408284031215614cc257600080fd5b6040516040810181811067ffffffffffffffff82111715614ce557614ce5613cc4565b6040528251614cf381613fc8565b81526020928301519281019290925250919050565b600060a08284031215614d1a57600080fd5b60405160a0810181811067ffffffffffffffff82111715614d3d57614d3d613cc4565b806040525082518152602083015160208201526040830151614d5e81613fc8565b60408201526060830151614d7181613fc8565b6060820152608092830151928101929092525091905056fea164736f6c6343000813000a", +} + +var AutomationRegistryABI = AutomationRegistryMetaData.ABI + +var AutomationRegistryBin = AutomationRegistryMetaData.Bin + +func DeployAutomationRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, logicA common.Address) (common.Address, *types.Transaction, *AutomationRegistry, error) { + parsed, err := AutomationRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationRegistryBin), backend, logicA) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationRegistry{address: address, abi: *parsed, AutomationRegistryCaller: AutomationRegistryCaller{contract: contract}, AutomationRegistryTransactor: AutomationRegistryTransactor{contract: contract}, AutomationRegistryFilterer: AutomationRegistryFilterer{contract: contract}}, nil +} + +type AutomationRegistry struct { + address common.Address + abi abi.ABI + AutomationRegistryCaller + AutomationRegistryTransactor + AutomationRegistryFilterer +} + +type AutomationRegistryCaller struct { + contract *bind.BoundContract +} + +type AutomationRegistryTransactor struct { + contract *bind.BoundContract +} + +type AutomationRegistryFilterer struct { + contract *bind.BoundContract +} + +type AutomationRegistrySession struct { + Contract *AutomationRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationRegistryCallerSession struct { + Contract *AutomationRegistryCaller + CallOpts bind.CallOpts +} + +type AutomationRegistryTransactorSession struct { + Contract *AutomationRegistryTransactor + TransactOpts bind.TransactOpts +} + +type AutomationRegistryRaw struct { + Contract *AutomationRegistry +} + +type AutomationRegistryCallerRaw struct { + Contract *AutomationRegistryCaller +} + +type AutomationRegistryTransactorRaw struct { + Contract *AutomationRegistryTransactor +} + +func NewAutomationRegistry(address common.Address, backend bind.ContractBackend) (*AutomationRegistry, error) { + abi, err := abi.JSON(strings.NewReader(AutomationRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationRegistry{address: address, abi: abi, AutomationRegistryCaller: AutomationRegistryCaller{contract: contract}, AutomationRegistryTransactor: AutomationRegistryTransactor{contract: contract}, AutomationRegistryFilterer: AutomationRegistryFilterer{contract: contract}}, nil +} + +func NewAutomationRegistryCaller(address common.Address, caller bind.ContractCaller) (*AutomationRegistryCaller, error) { + contract, err := bindAutomationRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryCaller{contract: contract}, nil +} + +func NewAutomationRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationRegistryTransactor, error) { + contract, err := bindAutomationRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationRegistryTransactor{contract: contract}, nil +} + +func NewAutomationRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationRegistryFilterer, error) { + contract, err := bindAutomationRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationRegistryFilterer{contract: contract}, nil +} + +func bindAutomationRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationRegistry *AutomationRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistry.Contract.AutomationRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistry *AutomationRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistry.Contract.AutomationRegistryTransactor.contract.Transfer(opts) +} + +func (_AutomationRegistry *AutomationRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistry.Contract.AutomationRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistry *AutomationRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationRegistry *AutomationRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistry.Contract.contract.Transfer(opts) +} + +func (_AutomationRegistry *AutomationRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationRegistry *AutomationRegistryCaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistry.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistry *AutomationRegistrySession) FallbackTo() (common.Address, error) { + return _AutomationRegistry.Contract.FallbackTo(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCallerSession) FallbackTo() (common.Address, error) { + return _AutomationRegistry.Contract.FallbackTo(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _AutomationRegistry.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_AutomationRegistry *AutomationRegistrySession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _AutomationRegistry.Contract.LatestConfigDetails(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _AutomationRegistry.Contract.LatestConfigDetails(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _AutomationRegistry.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_AutomationRegistry *AutomationRegistrySession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _AutomationRegistry.Contract.LatestConfigDigestAndEpoch(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _AutomationRegistry.Contract.LatestConfigDigestAndEpoch(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _AutomationRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_AutomationRegistry *AutomationRegistrySession) Owner() (common.Address, error) { + return _AutomationRegistry.Contract.Owner(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCallerSession) Owner() (common.Address, error) { + return _AutomationRegistry.Contract.Owner(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _AutomationRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_AutomationRegistry *AutomationRegistrySession) TypeAndVersion() (string, error) { + return _AutomationRegistry.Contract.TypeAndVersion(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryCallerSession) TypeAndVersion() (string, error) { + return _AutomationRegistry.Contract.TypeAndVersion(&_AutomationRegistry.CallOpts) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_AutomationRegistry *AutomationRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistry.Contract.AcceptOwnership(&_AutomationRegistry.TransactOpts) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _AutomationRegistry.Contract.AcceptOwnership(&_AutomationRegistry.TransactOpts) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_AutomationRegistry *AutomationRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.OnTokenTransfer(&_AutomationRegistry.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.OnTokenTransfer(&_AutomationRegistry.TransactOpts, sender, amount, data) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistrySession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SetConfig(&_AutomationRegistry.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SetConfig(&_AutomationRegistry.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "setConfigTypeSafe", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistrySession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SetConfigTypeSafe(&_AutomationRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SetConfigTypeSafe(&_AutomationRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "simulatePerformUpkeep", id, performData) +} + +func (_AutomationRegistry *AutomationRegistrySession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SimulatePerformUpkeep(&_AutomationRegistry.TransactOpts, id, performData) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.SimulatePerformUpkeep(&_AutomationRegistry.TransactOpts, id, performData) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_AutomationRegistry *AutomationRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistry.Contract.TransferOwnership(&_AutomationRegistry.TransactOpts, to) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _AutomationRegistry.Contract.TransferOwnership(&_AutomationRegistry.TransactOpts, to) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.Transact(opts, "transmit", reportContext, rawReport, rs, ss, rawVs) +} + +func (_AutomationRegistry *AutomationRegistrySession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.Transmit(&_AutomationRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.Transmit(&_AutomationRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_AutomationRegistry *AutomationRegistryTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _AutomationRegistry.contract.RawTransact(opts, calldata) +} + +func (_AutomationRegistry *AutomationRegistrySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.Fallback(&_AutomationRegistry.TransactOpts, calldata) +} + +func (_AutomationRegistry *AutomationRegistryTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _AutomationRegistry.Contract.Fallback(&_AutomationRegistry.TransactOpts, calldata) +} + +type AutomationRegistryAdminPrivilegeConfigSetIterator struct { + Event *AutomationRegistryAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &AutomationRegistryAdminPrivilegeConfigSetIterator{contract: _AutomationRegistry.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryAdminPrivilegeConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryAdminPrivilegeConfigSet, error) { + event := new(AutomationRegistryAdminPrivilegeConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryCancelledUpkeepReportIterator struct { + Event *AutomationRegistryCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryCancelledUpkeepReportIterator{contract: _AutomationRegistry.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryCancelledUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryCancelledUpkeepReport, error) { + event := new(AutomationRegistryCancelledUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryChainSpecificModuleUpdatedIterator struct { + Event *AutomationRegistryChainSpecificModuleUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryChainSpecificModuleUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryChainSpecificModuleUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryChainSpecificModuleUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryChainSpecificModuleUpdated struct { + NewModule common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryChainSpecificModuleUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryChainSpecificModuleUpdatedIterator{contract: _AutomationRegistry.contract, event: "ChainSpecificModuleUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryChainSpecificModuleUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryChainSpecificModuleUpdated) + if err := _AutomationRegistry.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryChainSpecificModuleUpdated, error) { + event := new(AutomationRegistryChainSpecificModuleUpdated) + if err := _AutomationRegistry.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryConfigSetIterator struct { + Event *AutomationRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*AutomationRegistryConfigSetIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &AutomationRegistryConfigSetIterator{contract: _AutomationRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseConfigSet(log types.Log) (*AutomationRegistryConfigSet, error) { + event := new(AutomationRegistryConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryDedupKeyAddedIterator struct { + Event *AutomationRegistryDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &AutomationRegistryDedupKeyAddedIterator{contract: _AutomationRegistry.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryDedupKeyAdded) + if err := _AutomationRegistry.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseDedupKeyAdded(log types.Log) (*AutomationRegistryDedupKeyAdded, error) { + event := new(AutomationRegistryDedupKeyAdded) + if err := _AutomationRegistry.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryFundsAddedIterator struct { + Event *AutomationRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &AutomationRegistryFundsAddedIterator{contract: _AutomationRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryFundsAdded) + if err := _AutomationRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseFundsAdded(log types.Log) (*AutomationRegistryFundsAdded, error) { + event := new(AutomationRegistryFundsAdded) + if err := _AutomationRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryFundsWithdrawnIterator struct { + Event *AutomationRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryFundsWithdrawnIterator{contract: _AutomationRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryFundsWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*AutomationRegistryFundsWithdrawn, error) { + event := new(AutomationRegistryFundsWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryInsufficientFundsUpkeepReportIterator struct { + Event *AutomationRegistryInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryInsufficientFundsUpkeepReportIterator{contract: _AutomationRegistry.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryInsufficientFundsUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryInsufficientFundsUpkeepReport, error) { + event := new(AutomationRegistryInsufficientFundsUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryOwnerFundsWithdrawnIterator struct { + Event *AutomationRegistryOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &AutomationRegistryOwnerFundsWithdrawnIterator{contract: _AutomationRegistry.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryOwnerFundsWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryOwnerFundsWithdrawn, error) { + event := new(AutomationRegistryOwnerFundsWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryOwnershipTransferRequestedIterator struct { + Event *AutomationRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryOwnershipTransferRequestedIterator{contract: _AutomationRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryOwnershipTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryOwnershipTransferRequested, error) { + event := new(AutomationRegistryOwnershipTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryOwnershipTransferredIterator struct { + Event *AutomationRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryOwnershipTransferredIterator{contract: _AutomationRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryOwnershipTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*AutomationRegistryOwnershipTransferred, error) { + event := new(AutomationRegistryOwnershipTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryPausedIterator struct { + Event *AutomationRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryPausedIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &AutomationRegistryPausedIterator{contract: _AutomationRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryPaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParsePaused(log types.Log) (*AutomationRegistryPaused, error) { + event := new(AutomationRegistryPaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryPayeesUpdatedIterator struct { + Event *AutomationRegistryPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryPayeesUpdatedIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &AutomationRegistryPayeesUpdatedIterator{contract: _AutomationRegistry.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryPayeesUpdated) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParsePayeesUpdated(log types.Log) (*AutomationRegistryPayeesUpdated, error) { + event := new(AutomationRegistryPayeesUpdated) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryPayeeshipTransferRequestedIterator struct { + Event *AutomationRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryPayeeshipTransferRequestedIterator{contract: _AutomationRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryPayeeshipTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryPayeeshipTransferRequested, error) { + event := new(AutomationRegistryPayeeshipTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryPayeeshipTransferredIterator struct { + Event *AutomationRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryPayeeshipTransferredIterator{contract: _AutomationRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryPayeeshipTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryPayeeshipTransferred, error) { + event := new(AutomationRegistryPayeeshipTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryPaymentWithdrawnIterator struct { + Event *AutomationRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryPaymentWithdrawnIterator{contract: _AutomationRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryPaymentWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryPaymentWithdrawn, error) { + event := new(AutomationRegistryPaymentWithdrawn) + if err := _AutomationRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryReorgedUpkeepReportIterator struct { + Event *AutomationRegistryReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryReorgedUpkeepReportIterator{contract: _AutomationRegistry.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryReorgedUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryReorgedUpkeepReport, error) { + event := new(AutomationRegistryReorgedUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryStaleUpkeepReportIterator struct { + Event *AutomationRegistryStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryStaleUpkeepReportIterator{contract: _AutomationRegistry.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryStaleUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryStaleUpkeepReport, error) { + event := new(AutomationRegistryStaleUpkeepReport) + if err := _AutomationRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryTransmittedIterator struct { + Event *AutomationRegistryTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryTransmittedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterTransmitted(opts *bind.FilterOpts) (*AutomationRegistryTransmittedIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &AutomationRegistryTransmittedIterator{contract: _AutomationRegistry.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *AutomationRegistryTransmitted) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryTransmitted) + if err := _AutomationRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseTransmitted(log types.Log) (*AutomationRegistryTransmitted, error) { + event := new(AutomationRegistryTransmitted) + if err := _AutomationRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUnpausedIterator struct { + Event *AutomationRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryUnpausedIterator, error) { + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &AutomationRegistryUnpausedIterator{contract: _AutomationRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUnpaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUnpaused(log types.Log) (*AutomationRegistryUnpaused, error) { + event := new(AutomationRegistryUnpaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepAdminTransferRequestedIterator struct { + Event *AutomationRegistryUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepAdminTransferRequestedIterator{contract: _AutomationRegistry.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepAdminTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryUpkeepAdminTransferRequested, error) { + event := new(AutomationRegistryUpkeepAdminTransferRequested) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepAdminTransferredIterator struct { + Event *AutomationRegistryUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepAdminTransferredIterator{contract: _AutomationRegistry.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepAdminTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryUpkeepAdminTransferred, error) { + event := new(AutomationRegistryUpkeepAdminTransferred) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepCanceledIterator struct { + Event *AutomationRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepCanceledIterator{contract: _AutomationRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepCanceled) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*AutomationRegistryUpkeepCanceled, error) { + event := new(AutomationRegistryUpkeepCanceled) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepCheckDataSetIterator struct { + Event *AutomationRegistryUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepCheckDataSetIterator{contract: _AutomationRegistry.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepCheckDataSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryUpkeepCheckDataSet, error) { + event := new(AutomationRegistryUpkeepCheckDataSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepGasLimitSetIterator struct { + Event *AutomationRegistryUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepGasLimitSetIterator{contract: _AutomationRegistry.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepGasLimitSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryUpkeepGasLimitSet, error) { + event := new(AutomationRegistryUpkeepGasLimitSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepMigratedIterator struct { + Event *AutomationRegistryUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepMigratedIterator{contract: _AutomationRegistry.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepMigrated) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepMigrated(log types.Log) (*AutomationRegistryUpkeepMigrated, error) { + event := new(AutomationRegistryUpkeepMigrated) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepOffchainConfigSetIterator struct { + Event *AutomationRegistryUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepOffchainConfigSetIterator{contract: _AutomationRegistry.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepOffchainConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryUpkeepOffchainConfigSet, error) { + event := new(AutomationRegistryUpkeepOffchainConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepPausedIterator struct { + Event *AutomationRegistryUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepPausedIterator{contract: _AutomationRegistry.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepPaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepPaused(log types.Log) (*AutomationRegistryUpkeepPaused, error) { + event := new(AutomationRegistryUpkeepPaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepPerformedIterator struct { + Event *AutomationRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepPerformedIterator{contract: _AutomationRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepPerformed) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*AutomationRegistryUpkeepPerformed, error) { + event := new(AutomationRegistryUpkeepPerformed) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepPrivilegeConfigSetIterator struct { + Event *AutomationRegistryUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepPrivilegeConfigSetIterator{contract: _AutomationRegistry.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepPrivilegeConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryUpkeepPrivilegeConfigSet, error) { + event := new(AutomationRegistryUpkeepPrivilegeConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepReceivedIterator struct { + Event *AutomationRegistryUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepReceivedIterator{contract: _AutomationRegistry.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepReceived) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepReceived(log types.Log) (*AutomationRegistryUpkeepReceived, error) { + event := new(AutomationRegistryUpkeepReceived) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepRegisteredIterator struct { + Event *AutomationRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepRegisteredIterator{contract: _AutomationRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepRegistered) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*AutomationRegistryUpkeepRegistered, error) { + event := new(AutomationRegistryUpkeepRegistered) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepTriggerConfigSetIterator struct { + Event *AutomationRegistryUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepTriggerConfigSetIterator{contract: _AutomationRegistry.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepTriggerConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryUpkeepTriggerConfigSet, error) { + event := new(AutomationRegistryUpkeepTriggerConfigSet) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type AutomationRegistryUpkeepUnpausedIterator struct { + Event *AutomationRegistryUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *AutomationRegistryUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(AutomationRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *AutomationRegistryUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *AutomationRegistryUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type AutomationRegistryUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_AutomationRegistry *AutomationRegistryFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &AutomationRegistryUpkeepUnpausedIterator{contract: _AutomationRegistry.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _AutomationRegistry.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(AutomationRegistryUpkeepUnpaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_AutomationRegistry *AutomationRegistryFilterer) ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryUpkeepUnpaused, error) { + event := new(AutomationRegistryUpkeepUnpaused) + if err := _AutomationRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_AutomationRegistry *AutomationRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _AutomationRegistry.abi.Events["AdminPrivilegeConfigSet"].ID: + return _AutomationRegistry.ParseAdminPrivilegeConfigSet(log) + case _AutomationRegistry.abi.Events["CancelledUpkeepReport"].ID: + return _AutomationRegistry.ParseCancelledUpkeepReport(log) + case _AutomationRegistry.abi.Events["ChainSpecificModuleUpdated"].ID: + return _AutomationRegistry.ParseChainSpecificModuleUpdated(log) + case _AutomationRegistry.abi.Events["ConfigSet"].ID: + return _AutomationRegistry.ParseConfigSet(log) + case _AutomationRegistry.abi.Events["DedupKeyAdded"].ID: + return _AutomationRegistry.ParseDedupKeyAdded(log) + case _AutomationRegistry.abi.Events["FundsAdded"].ID: + return _AutomationRegistry.ParseFundsAdded(log) + case _AutomationRegistry.abi.Events["FundsWithdrawn"].ID: + return _AutomationRegistry.ParseFundsWithdrawn(log) + case _AutomationRegistry.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _AutomationRegistry.ParseInsufficientFundsUpkeepReport(log) + case _AutomationRegistry.abi.Events["OwnerFundsWithdrawn"].ID: + return _AutomationRegistry.ParseOwnerFundsWithdrawn(log) + case _AutomationRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _AutomationRegistry.ParseOwnershipTransferRequested(log) + case _AutomationRegistry.abi.Events["OwnershipTransferred"].ID: + return _AutomationRegistry.ParseOwnershipTransferred(log) + case _AutomationRegistry.abi.Events["Paused"].ID: + return _AutomationRegistry.ParsePaused(log) + case _AutomationRegistry.abi.Events["PayeesUpdated"].ID: + return _AutomationRegistry.ParsePayeesUpdated(log) + case _AutomationRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _AutomationRegistry.ParsePayeeshipTransferRequested(log) + case _AutomationRegistry.abi.Events["PayeeshipTransferred"].ID: + return _AutomationRegistry.ParsePayeeshipTransferred(log) + case _AutomationRegistry.abi.Events["PaymentWithdrawn"].ID: + return _AutomationRegistry.ParsePaymentWithdrawn(log) + case _AutomationRegistry.abi.Events["ReorgedUpkeepReport"].ID: + return _AutomationRegistry.ParseReorgedUpkeepReport(log) + case _AutomationRegistry.abi.Events["StaleUpkeepReport"].ID: + return _AutomationRegistry.ParseStaleUpkeepReport(log) + case _AutomationRegistry.abi.Events["Transmitted"].ID: + return _AutomationRegistry.ParseTransmitted(log) + case _AutomationRegistry.abi.Events["Unpaused"].ID: + return _AutomationRegistry.ParseUnpaused(log) + case _AutomationRegistry.abi.Events["UpkeepAdminTransferRequested"].ID: + return _AutomationRegistry.ParseUpkeepAdminTransferRequested(log) + case _AutomationRegistry.abi.Events["UpkeepAdminTransferred"].ID: + return _AutomationRegistry.ParseUpkeepAdminTransferred(log) + case _AutomationRegistry.abi.Events["UpkeepCanceled"].ID: + return _AutomationRegistry.ParseUpkeepCanceled(log) + case _AutomationRegistry.abi.Events["UpkeepCheckDataSet"].ID: + return _AutomationRegistry.ParseUpkeepCheckDataSet(log) + case _AutomationRegistry.abi.Events["UpkeepGasLimitSet"].ID: + return _AutomationRegistry.ParseUpkeepGasLimitSet(log) + case _AutomationRegistry.abi.Events["UpkeepMigrated"].ID: + return _AutomationRegistry.ParseUpkeepMigrated(log) + case _AutomationRegistry.abi.Events["UpkeepOffchainConfigSet"].ID: + return _AutomationRegistry.ParseUpkeepOffchainConfigSet(log) + case _AutomationRegistry.abi.Events["UpkeepPaused"].ID: + return _AutomationRegistry.ParseUpkeepPaused(log) + case _AutomationRegistry.abi.Events["UpkeepPerformed"].ID: + return _AutomationRegistry.ParseUpkeepPerformed(log) + case _AutomationRegistry.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _AutomationRegistry.ParseUpkeepPrivilegeConfigSet(log) + case _AutomationRegistry.abi.Events["UpkeepReceived"].ID: + return _AutomationRegistry.ParseUpkeepReceived(log) + case _AutomationRegistry.abi.Events["UpkeepRegistered"].ID: + return _AutomationRegistry.ParseUpkeepRegistered(log) + case _AutomationRegistry.abi.Events["UpkeepTriggerConfigSet"].ID: + return _AutomationRegistry.ParseUpkeepTriggerConfigSet(log) + case _AutomationRegistry.abi.Events["UpkeepUnpaused"].ID: + return _AutomationRegistry.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (AutomationRegistryAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (AutomationRegistryCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (AutomationRegistryChainSpecificModuleUpdated) Topic() common.Hash { + return common.HexToHash("0xdefc28b11a7980dbe0c49dbbd7055a1584bc8075097d1e8b3b57fb7283df2ad7") +} + +func (AutomationRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (AutomationRegistryDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (AutomationRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (AutomationRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (AutomationRegistryInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (AutomationRegistryOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (AutomationRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (AutomationRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (AutomationRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (AutomationRegistryPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (AutomationRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (AutomationRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (AutomationRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (AutomationRegistryReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (AutomationRegistryStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (AutomationRegistryTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (AutomationRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (AutomationRegistryUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (AutomationRegistryUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (AutomationRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (AutomationRegistryUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (AutomationRegistryUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (AutomationRegistryUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (AutomationRegistryUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (AutomationRegistryUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (AutomationRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (AutomationRegistryUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (AutomationRegistryUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (AutomationRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (AutomationRegistryUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (AutomationRegistryUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_AutomationRegistry *AutomationRegistry) Address() common.Address { + return _AutomationRegistry.address +} + +type AutomationRegistryInterface interface { + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*AutomationRegistryAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*AutomationRegistryAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*AutomationRegistryCancelledUpkeepReport, error) + + FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*AutomationRegistryChainSpecificModuleUpdatedIterator, error) + + WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryChainSpecificModuleUpdated) (event.Subscription, error) + + ParseChainSpecificModuleUpdated(log types.Log) (*AutomationRegistryChainSpecificModuleUpdated, error) + + FilterConfigSet(opts *bind.FilterOpts) (*AutomationRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*AutomationRegistryConfigSet, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*AutomationRegistryDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*AutomationRegistryDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*AutomationRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *AutomationRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*AutomationRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*AutomationRegistryFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*AutomationRegistryInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*AutomationRegistryOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*AutomationRegistryOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*AutomationRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*AutomationRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*AutomationRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*AutomationRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*AutomationRegistryPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*AutomationRegistryPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*AutomationRegistryPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*AutomationRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*AutomationRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*AutomationRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*AutomationRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *AutomationRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*AutomationRegistryPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*AutomationRegistryReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *AutomationRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*AutomationRegistryStaleUpkeepReport, error) + + FilterTransmitted(opts *bind.FilterOpts) (*AutomationRegistryTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *AutomationRegistryTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*AutomationRegistryTransmitted, error) + + FilterUnpaused(opts *bind.FilterOpts) (*AutomationRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*AutomationRegistryUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*AutomationRegistryUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*AutomationRegistryUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*AutomationRegistryUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*AutomationRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*AutomationRegistryUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*AutomationRegistryUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*AutomationRegistryUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*AutomationRegistryUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*AutomationRegistryUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*AutomationRegistryUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*AutomationRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*AutomationRegistryUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*AutomationRegistryUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*AutomationRegistryUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*AutomationRegistryUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*AutomationRegistryUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*AutomationRegistryUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *AutomationRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*AutomationRegistryUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_utils_2_1/automation_utils_2_1.go b/core/gethwrappers/generated/automation_utils_2_1/automation_utils_2_1.go new file mode 100644 index 00000000..5d6dc1e4 --- /dev/null +++ b/core/gethwrappers/generated/automation_utils_2_1/automation_utils_2_1.go @@ -0,0 +1,322 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_utils_2_1 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21ConditionalTrigger struct { + BlockNum uint32 + BlockHash [32]byte +} + +type KeeperRegistryBase21LogTrigger struct { + LogBlockHash [32]byte + TxHash [32]byte + LogIndex uint32 + BlockNum uint32 + BlockHash [32]byte +} + +type KeeperRegistryBase21OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +type KeeperRegistryBase21Report struct { + FastGasWei *big.Int + LinkNative *big.Int + UpkeepIds []*big.Int + GasLimits []*big.Int + Triggers [][]byte + PerformDatas [][]byte +} + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +type LogTriggerConfig struct { + ContractAddress common.Address + FilterSelector uint8 + Topic0 [32]byte + Topic1 [32]byte + Topic2 [32]byte + Topic3 [32]byte +} + +var AutomationUtilsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"blockNum\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"internalType\":\"structKeeperRegistryBase2_1.ConditionalTrigger\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_conditionalTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_log\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"logBlockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"logIndex\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNum\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"internalType\":\"structKeeperRegistryBase2_1.LogTrigger\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_logTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"filterSelector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"internalType\":\"structLogTriggerConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_logTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structKeeperRegistryBase2_1.OnchainConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_onChainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"gasLimits\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes[]\",\"name\":\"triggers\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes[]\",\"name\":\"performDatas\",\"type\":\"bytes[]\"}],\"internalType\":\"structKeeperRegistryBase2_1.Report\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_report\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506108ca806100206000396000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063776f306111610050578063776f3061146100a6578063e65d6546146100b4578063e9720a49146100c257600080fd5b806321f373d7146100775780632ff92a811461008a5780634b6df29414610098575b600080fd5b6100886100853660046101e8565b50565b005b610088610085366004610363565b6100886100853660046104bd565b610088610085366004610514565b6100886100853660046106fb565b6100886100853660046107e8565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040516101e0810167ffffffffffffffff81118282101715610123576101236100d0565b60405290565b60405160c0810167ffffffffffffffff81118282101715610123576101236100d0565b604051610100810167ffffffffffffffff81118282101715610123576101236100d0565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156101b7576101b76100d0565b604052919050565b803573ffffffffffffffffffffffffffffffffffffffff811681146101e357600080fd5b919050565b600060c082840312156101fa57600080fd5b60405160c0810181811067ffffffffffffffff8211171561021d5761021d6100d0565b604052610229836101bf565b8152602083013560ff8116811461023f57600080fd5b8060208301525060408301356040820152606083013560608201526080830135608082015260a083013560a08201528091505092915050565b803563ffffffff811681146101e357600080fd5b803562ffffff811681146101e357600080fd5b803561ffff811681146101e357600080fd5b80356bffffffffffffffffffffffff811681146101e357600080fd5b600067ffffffffffffffff8211156102e7576102e76100d0565b5060051b60200190565b600082601f83011261030257600080fd5b81356020610317610312836102cd565b610170565b82815260059290921b8401810191818101908684111561033657600080fd5b8286015b848110156103585761034b816101bf565b835291830191830161033a565b509695505050505050565b60006020828403121561037557600080fd5b813567ffffffffffffffff8082111561038d57600080fd5b908301906101e082860312156103a257600080fd5b6103aa6100ff565b6103b383610278565b81526103c160208401610278565b60208201526103d260408401610278565b60408201526103e36060840161028c565b60608201526103f46080840161029f565b608082015261040560a084016102b1565b60a082015261041660c08401610278565b60c082015261042760e08401610278565b60e082015261010061043a818501610278565b9082015261012061044c848201610278565b90820152610140838101359082015261016080840135908201526101806104748185016101bf565b908201526101a0838101358381111561048c57600080fd5b610498888287016102f1565b8284015250506101c091506104ae8284016101bf565b91810191909152949350505050565b6000604082840312156104cf57600080fd5b6040516040810181811067ffffffffffffffff821117156104f2576104f26100d0565b6040526104fe83610278565b8152602083013560208201528091505092915050565b600060a0828403121561052657600080fd5b60405160a0810181811067ffffffffffffffff82111715610549576105496100d0565b8060405250823581526020830135602082015261056860408401610278565b604082015261057960608401610278565b6060820152608083013560808201528091505092915050565b600082601f8301126105a357600080fd5b813560206105b3610312836102cd565b82815260059290921b840181019181810190868411156105d257600080fd5b8286015b8481101561035857803583529183019183016105d6565b600082601f8301126105fe57600080fd5b813567ffffffffffffffff811115610618576106186100d0565b61064960207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610170565b81815284602083860101111561065e57600080fd5b816020850160208301376000918101602001919091529392505050565b600082601f83011261068c57600080fd5b8135602061069c610312836102cd565b82815260059290921b840181019181810190868411156106bb57600080fd5b8286015b8481101561035857803567ffffffffffffffff8111156106df5760008081fd5b6106ed8986838b01016105ed565b8452509183019183016106bf565b60006020828403121561070d57600080fd5b813567ffffffffffffffff8082111561072557600080fd5b9083019060c0828603121561073957600080fd5b610741610129565b823581526020830135602082015260408301358281111561076157600080fd5b61076d87828601610592565b60408301525060608301358281111561078557600080fd5b61079187828601610592565b6060830152506080830135828111156107a957600080fd5b6107b58782860161067b565b60808301525060a0830135828111156107cd57600080fd5b6107d98782860161067b565b60a08301525095945050505050565b6000602082840312156107fa57600080fd5b813567ffffffffffffffff8082111561081257600080fd5b90830190610100828603121561082757600080fd5b61082f61014c565b823581526020830135602082015260408301356040820152606083013560608201526080830135608082015261086760a084016101bf565b60a082015260c08301358281111561087e57600080fd5b61088a87828601610592565b60c08301525060e0830135828111156108a257600080fd5b6108ae878286016105ed565b60e0830152509594505050505056fea164736f6c6343000810000a", +} + +var AutomationUtilsABI = AutomationUtilsMetaData.ABI + +var AutomationUtilsBin = AutomationUtilsMetaData.Bin + +func DeployAutomationUtils(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *AutomationUtils, error) { + parsed, err := AutomationUtilsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationUtilsBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationUtils{address: address, abi: *parsed, AutomationUtilsCaller: AutomationUtilsCaller{contract: contract}, AutomationUtilsTransactor: AutomationUtilsTransactor{contract: contract}, AutomationUtilsFilterer: AutomationUtilsFilterer{contract: contract}}, nil +} + +type AutomationUtils struct { + address common.Address + abi abi.ABI + AutomationUtilsCaller + AutomationUtilsTransactor + AutomationUtilsFilterer +} + +type AutomationUtilsCaller struct { + contract *bind.BoundContract +} + +type AutomationUtilsTransactor struct { + contract *bind.BoundContract +} + +type AutomationUtilsFilterer struct { + contract *bind.BoundContract +} + +type AutomationUtilsSession struct { + Contract *AutomationUtils + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationUtilsCallerSession struct { + Contract *AutomationUtilsCaller + CallOpts bind.CallOpts +} + +type AutomationUtilsTransactorSession struct { + Contract *AutomationUtilsTransactor + TransactOpts bind.TransactOpts +} + +type AutomationUtilsRaw struct { + Contract *AutomationUtils +} + +type AutomationUtilsCallerRaw struct { + Contract *AutomationUtilsCaller +} + +type AutomationUtilsTransactorRaw struct { + Contract *AutomationUtilsTransactor +} + +func NewAutomationUtils(address common.Address, backend bind.ContractBackend) (*AutomationUtils, error) { + abi, err := abi.JSON(strings.NewReader(AutomationUtilsABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationUtils(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationUtils{address: address, abi: abi, AutomationUtilsCaller: AutomationUtilsCaller{contract: contract}, AutomationUtilsTransactor: AutomationUtilsTransactor{contract: contract}, AutomationUtilsFilterer: AutomationUtilsFilterer{contract: contract}}, nil +} + +func NewAutomationUtilsCaller(address common.Address, caller bind.ContractCaller) (*AutomationUtilsCaller, error) { + contract, err := bindAutomationUtils(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationUtilsCaller{contract: contract}, nil +} + +func NewAutomationUtilsTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationUtilsTransactor, error) { + contract, err := bindAutomationUtils(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationUtilsTransactor{contract: contract}, nil +} + +func NewAutomationUtilsFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationUtilsFilterer, error) { + contract, err := bindAutomationUtils(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationUtilsFilterer{contract: contract}, nil +} + +func bindAutomationUtils(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationUtilsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationUtils *AutomationUtilsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationUtils.Contract.AutomationUtilsCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationUtils *AutomationUtilsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationUtils.Contract.AutomationUtilsTransactor.contract.Transfer(opts) +} + +func (_AutomationUtils *AutomationUtilsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationUtils.Contract.AutomationUtilsTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationUtils *AutomationUtilsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationUtils.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationUtils *AutomationUtilsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationUtils.Contract.contract.Transfer(opts) +} + +func (_AutomationUtils *AutomationUtilsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationUtils.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationUtils *AutomationUtilsTransactor) ConditionalTrigger(opts *bind.TransactOpts, arg0 KeeperRegistryBase21ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_conditionalTrigger", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) ConditionalTrigger(arg0 KeeperRegistryBase21ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.ConditionalTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) ConditionalTrigger(arg0 KeeperRegistryBase21ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.ConditionalTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) Log(opts *bind.TransactOpts, arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_log", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) Log(arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.Contract.Log(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) Log(arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.Contract.Log(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) LogTrigger(opts *bind.TransactOpts, arg0 KeeperRegistryBase21LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_logTrigger", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) LogTrigger(arg0 KeeperRegistryBase21LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) LogTrigger(arg0 KeeperRegistryBase21LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) LogTriggerConfig(opts *bind.TransactOpts, arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_logTriggerConfig", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) LogTriggerConfig(arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTriggerConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) LogTriggerConfig(arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTriggerConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) OnChainConfig(opts *bind.TransactOpts, arg0 KeeperRegistryBase21OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_onChainConfig", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) OnChainConfig(arg0 KeeperRegistryBase21OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.OnChainConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) OnChainConfig(arg0 KeeperRegistryBase21OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.OnChainConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) Report(opts *bind.TransactOpts, arg0 KeeperRegistryBase21Report) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_report", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) Report(arg0 KeeperRegistryBase21Report) (*types.Transaction, error) { + return _AutomationUtils.Contract.Report(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) Report(arg0 KeeperRegistryBase21Report) (*types.Transaction, error) { + return _AutomationUtils.Contract.Report(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtils) Address() common.Address { + return _AutomationUtils.address +} + +type AutomationUtilsInterface interface { + ConditionalTrigger(opts *bind.TransactOpts, arg0 KeeperRegistryBase21ConditionalTrigger) (*types.Transaction, error) + + Log(opts *bind.TransactOpts, arg0 Log) (*types.Transaction, error) + + LogTrigger(opts *bind.TransactOpts, arg0 KeeperRegistryBase21LogTrigger) (*types.Transaction, error) + + LogTriggerConfig(opts *bind.TransactOpts, arg0 LogTriggerConfig) (*types.Transaction, error) + + OnChainConfig(opts *bind.TransactOpts, arg0 KeeperRegistryBase21OnchainConfig) (*types.Transaction, error) + + Report(opts *bind.TransactOpts, arg0 KeeperRegistryBase21Report) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/automation_utils_2_2/automation_utils_2_2.go b/core/gethwrappers/generated/automation_utils_2_2/automation_utils_2_2.go new file mode 100644 index 00000000..187193cb --- /dev/null +++ b/core/gethwrappers/generated/automation_utils_2_2/automation_utils_2_2.go @@ -0,0 +1,324 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package automation_utils_2_2 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistryBase22ConditionalTrigger struct { + BlockNum uint32 + BlockHash [32]byte +} + +type AutomationRegistryBase22LogTrigger struct { + LogBlockHash [32]byte + TxHash [32]byte + LogIndex uint32 + BlockNum uint32 + BlockHash [32]byte +} + +type AutomationRegistryBase22OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address + ChainModule common.Address + ReorgProtectionEnabled bool +} + +type AutomationRegistryBase22Report struct { + FastGasWei *big.Int + LinkNative *big.Int + UpkeepIds []*big.Int + GasLimits []*big.Int + Triggers [][]byte + PerformDatas [][]byte +} + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +type LogTriggerConfig struct { + ContractAddress common.Address + FilterSelector uint8 + Topic0 [32]byte + Topic1 [32]byte + Topic2 [32]byte + Topic3 [32]byte +} + +var AutomationUtilsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"blockNum\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"internalType\":\"structAutomationRegistryBase2_2.ConditionalTrigger\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_conditionalTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_log\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"logBlockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"logIndex\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNum\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"internalType\":\"structAutomationRegistryBase2_2.LogTrigger\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_logTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"filterSelector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"internalType\":\"structLogTriggerConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_logTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"},{\"internalType\":\"contractIChainModule\",\"name\":\"chainModule\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"reorgProtectionEnabled\",\"type\":\"bool\"}],\"internalType\":\"structAutomationRegistryBase2_2.OnchainConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_onChainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"gasLimits\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes[]\",\"name\":\"triggers\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes[]\",\"name\":\"performDatas\",\"type\":\"bytes[]\"}],\"internalType\":\"structAutomationRegistryBase2_2.Report\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_report\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506108f1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063a4860f2311610050578063a4860f23146100a6578063e65d6546146100b4578063e9720a49146100c257600080fd5b806321f373d7146100775780634b6df2941461008a578063776f306114610098575b600080fd5b6100886100853660046101f1565b50565b005b610088610085366004610279565b6100886100853660046102d0565b610088610085366004610437565b610088610085366004610722565b61008861008536600461080f565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405160c0810167ffffffffffffffff81118282101715610122576101226100d0565b60405290565b604051610220810167ffffffffffffffff81118282101715610122576101226100d0565b604051610100810167ffffffffffffffff81118282101715610122576101226100d0565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156101b7576101b76100d0565b604052919050565b73ffffffffffffffffffffffffffffffffffffffff8116811461008557600080fd5b80356101ec816101bf565b919050565b600060c0828403121561020357600080fd5b61020b6100ff565b8235610216816101bf565b8152602083013560ff8116811461022c57600080fd5b8060208301525060408301356040820152606083013560608201526080830135608082015260a083013560a08201528091505092915050565b803563ffffffff811681146101ec57600080fd5b60006040828403121561028b57600080fd5b6040516040810181811067ffffffffffffffff821117156102ae576102ae6100d0565b6040526102ba83610265565b8152602083013560208201528091505092915050565b600060a082840312156102e257600080fd5b60405160a0810181811067ffffffffffffffff82111715610305576103056100d0565b8060405250823581526020830135602082015261032460408401610265565b604082015261033560608401610265565b6060820152608083013560808201528091505092915050565b803562ffffff811681146101ec57600080fd5b803561ffff811681146101ec57600080fd5b80356bffffffffffffffffffffffff811681146101ec57600080fd5b600067ffffffffffffffff8211156103a9576103a96100d0565b5060051b60200190565b600082601f8301126103c457600080fd5b813560206103d96103d48361038f565b610170565b82815260059290921b840181019181810190868411156103f857600080fd5b8286015b8481101561041c57803561040f816101bf565b83529183019183016103fc565b509695505050505050565b803580151581146101ec57600080fd5b60006020828403121561044957600080fd5b813567ffffffffffffffff8082111561046157600080fd5b90830190610220828603121561047657600080fd5b61047e610128565b61048783610265565b815261049560208401610265565b60208201526104a660408401610265565b60408201526104b76060840161034e565b60608201526104c860808401610361565b60808201526104d960a08401610373565b60a08201526104ea60c08401610265565b60c08201526104fb60e08401610265565b60e082015261010061050e818501610265565b90820152610120610520848201610265565b90820152610140838101359082015261016080840135908201526101806105488185016101e1565b908201526101a0838101358381111561056057600080fd5b61056c888287016103b3565b8284015250506101c091506105828284016101e1565b828201526101e091506105968284016101e1565b8282015261020091506105aa828401610427565b91810191909152949350505050565b600082601f8301126105ca57600080fd5b813560206105da6103d48361038f565b82815260059290921b840181019181810190868411156105f957600080fd5b8286015b8481101561041c57803583529183019183016105fd565b600082601f83011261062557600080fd5b813567ffffffffffffffff81111561063f5761063f6100d0565b61067060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610170565b81815284602083860101111561068557600080fd5b816020850160208301376000918101602001919091529392505050565b600082601f8301126106b357600080fd5b813560206106c36103d48361038f565b82815260059290921b840181019181810190868411156106e257600080fd5b8286015b8481101561041c57803567ffffffffffffffff8111156107065760008081fd5b6107148986838b0101610614565b8452509183019183016106e6565b60006020828403121561073457600080fd5b813567ffffffffffffffff8082111561074c57600080fd5b9083019060c0828603121561076057600080fd5b6107686100ff565b823581526020830135602082015260408301358281111561078857600080fd5b610794878286016105b9565b6040830152506060830135828111156107ac57600080fd5b6107b8878286016105b9565b6060830152506080830135828111156107d057600080fd5b6107dc878286016106a2565b60808301525060a0830135828111156107f457600080fd5b610800878286016106a2565b60a08301525095945050505050565b60006020828403121561082157600080fd5b813567ffffffffffffffff8082111561083957600080fd5b90830190610100828603121561084e57600080fd5b61085661014c565b823581526020830135602082015260408301356040820152606083013560608201526080830135608082015261088e60a084016101e1565b60a082015260c0830135828111156108a557600080fd5b6108b1878286016105b9565b60c08301525060e0830135828111156108c957600080fd5b6108d587828601610614565b60e0830152509594505050505056fea164736f6c6343000813000a", +} + +var AutomationUtilsABI = AutomationUtilsMetaData.ABI + +var AutomationUtilsBin = AutomationUtilsMetaData.Bin + +func DeployAutomationUtils(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *AutomationUtils, error) { + parsed, err := AutomationUtilsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(AutomationUtilsBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &AutomationUtils{address: address, abi: *parsed, AutomationUtilsCaller: AutomationUtilsCaller{contract: contract}, AutomationUtilsTransactor: AutomationUtilsTransactor{contract: contract}, AutomationUtilsFilterer: AutomationUtilsFilterer{contract: contract}}, nil +} + +type AutomationUtils struct { + address common.Address + abi abi.ABI + AutomationUtilsCaller + AutomationUtilsTransactor + AutomationUtilsFilterer +} + +type AutomationUtilsCaller struct { + contract *bind.BoundContract +} + +type AutomationUtilsTransactor struct { + contract *bind.BoundContract +} + +type AutomationUtilsFilterer struct { + contract *bind.BoundContract +} + +type AutomationUtilsSession struct { + Contract *AutomationUtils + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type AutomationUtilsCallerSession struct { + Contract *AutomationUtilsCaller + CallOpts bind.CallOpts +} + +type AutomationUtilsTransactorSession struct { + Contract *AutomationUtilsTransactor + TransactOpts bind.TransactOpts +} + +type AutomationUtilsRaw struct { + Contract *AutomationUtils +} + +type AutomationUtilsCallerRaw struct { + Contract *AutomationUtilsCaller +} + +type AutomationUtilsTransactorRaw struct { + Contract *AutomationUtilsTransactor +} + +func NewAutomationUtils(address common.Address, backend bind.ContractBackend) (*AutomationUtils, error) { + abi, err := abi.JSON(strings.NewReader(AutomationUtilsABI)) + if err != nil { + return nil, err + } + contract, err := bindAutomationUtils(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &AutomationUtils{address: address, abi: abi, AutomationUtilsCaller: AutomationUtilsCaller{contract: contract}, AutomationUtilsTransactor: AutomationUtilsTransactor{contract: contract}, AutomationUtilsFilterer: AutomationUtilsFilterer{contract: contract}}, nil +} + +func NewAutomationUtilsCaller(address common.Address, caller bind.ContractCaller) (*AutomationUtilsCaller, error) { + contract, err := bindAutomationUtils(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &AutomationUtilsCaller{contract: contract}, nil +} + +func NewAutomationUtilsTransactor(address common.Address, transactor bind.ContractTransactor) (*AutomationUtilsTransactor, error) { + contract, err := bindAutomationUtils(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &AutomationUtilsTransactor{contract: contract}, nil +} + +func NewAutomationUtilsFilterer(address common.Address, filterer bind.ContractFilterer) (*AutomationUtilsFilterer, error) { + contract, err := bindAutomationUtils(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &AutomationUtilsFilterer{contract: contract}, nil +} + +func bindAutomationUtils(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := AutomationUtilsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_AutomationUtils *AutomationUtilsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationUtils.Contract.AutomationUtilsCaller.contract.Call(opts, result, method, params...) +} + +func (_AutomationUtils *AutomationUtilsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationUtils.Contract.AutomationUtilsTransactor.contract.Transfer(opts) +} + +func (_AutomationUtils *AutomationUtilsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationUtils.Contract.AutomationUtilsTransactor.contract.Transact(opts, method, params...) +} + +func (_AutomationUtils *AutomationUtilsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _AutomationUtils.Contract.contract.Call(opts, result, method, params...) +} + +func (_AutomationUtils *AutomationUtilsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _AutomationUtils.Contract.contract.Transfer(opts) +} + +func (_AutomationUtils *AutomationUtilsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _AutomationUtils.Contract.contract.Transact(opts, method, params...) +} + +func (_AutomationUtils *AutomationUtilsTransactor) ConditionalTrigger(opts *bind.TransactOpts, arg0 AutomationRegistryBase22ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_conditionalTrigger", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) ConditionalTrigger(arg0 AutomationRegistryBase22ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.ConditionalTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) ConditionalTrigger(arg0 AutomationRegistryBase22ConditionalTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.ConditionalTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) Log(opts *bind.TransactOpts, arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_log", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) Log(arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.Contract.Log(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) Log(arg0 Log) (*types.Transaction, error) { + return _AutomationUtils.Contract.Log(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) LogTrigger(opts *bind.TransactOpts, arg0 AutomationRegistryBase22LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_logTrigger", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) LogTrigger(arg0 AutomationRegistryBase22LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) LogTrigger(arg0 AutomationRegistryBase22LogTrigger) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTrigger(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) LogTriggerConfig(opts *bind.TransactOpts, arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_logTriggerConfig", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) LogTriggerConfig(arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTriggerConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) LogTriggerConfig(arg0 LogTriggerConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.LogTriggerConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) OnChainConfig(opts *bind.TransactOpts, arg0 AutomationRegistryBase22OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_onChainConfig", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) OnChainConfig(arg0 AutomationRegistryBase22OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.OnChainConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) OnChainConfig(arg0 AutomationRegistryBase22OnchainConfig) (*types.Transaction, error) { + return _AutomationUtils.Contract.OnChainConfig(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactor) Report(opts *bind.TransactOpts, arg0 AutomationRegistryBase22Report) (*types.Transaction, error) { + return _AutomationUtils.contract.Transact(opts, "_report", arg0) +} + +func (_AutomationUtils *AutomationUtilsSession) Report(arg0 AutomationRegistryBase22Report) (*types.Transaction, error) { + return _AutomationUtils.Contract.Report(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtilsTransactorSession) Report(arg0 AutomationRegistryBase22Report) (*types.Transaction, error) { + return _AutomationUtils.Contract.Report(&_AutomationUtils.TransactOpts, arg0) +} + +func (_AutomationUtils *AutomationUtils) Address() common.Address { + return _AutomationUtils.address +} + +type AutomationUtilsInterface interface { + ConditionalTrigger(opts *bind.TransactOpts, arg0 AutomationRegistryBase22ConditionalTrigger) (*types.Transaction, error) + + Log(opts *bind.TransactOpts, arg0 Log) (*types.Transaction, error) + + LogTrigger(opts *bind.TransactOpts, arg0 AutomationRegistryBase22LogTrigger) (*types.Transaction, error) + + LogTriggerConfig(opts *bind.TransactOpts, arg0 LogTriggerConfig) (*types.Transaction, error) + + OnChainConfig(opts *bind.TransactOpts, arg0 AutomationRegistryBase22OnchainConfig) (*types.Transaction, error) + + Report(opts *bind.TransactOpts, arg0 AutomationRegistryBase22Report) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/basic_upkeep_contract/basic_upkeep_contract.go b/core/gethwrappers/generated/basic_upkeep_contract/basic_upkeep_contract.go new file mode 100644 index 00000000..43b4fce0 --- /dev/null +++ b/core/gethwrappers/generated/basic_upkeep_contract/basic_upkeep_contract.go @@ -0,0 +1,366 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package basic_upkeep_contract + +import ( + "math/big" + "strings" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// BasicUpkeepContractABI is the input ABI used to generate the binding from. +const BasicUpkeepContractABI = "[{\"inputs\":[],\"name\":\"bytesToSend\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"receivedBytes\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_bytes\",\"type\":\"bytes\"}],\"name\":\"setBytesToSend\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"_should\",\"type\":\"bool\"}],\"name\":\"setShouldPerformUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"shouldPerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]" + +// BasicUpkeepContractBin is the compiled bytecode used for deploying new contracts. +var BasicUpkeepContractBin = "0x608060405234801561001057600080fd5b5061072a806100206000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80634585e33b1161005b5780634585e33b146101af5780636e04ff0d1461021f57806384aadecc14610310578063bb4462681461032f5761007d565b80630427e4b7146100825780632c3b84ac146100ff57806333437c77146101a7575b600080fd5b61008a61034b565b6040805160208082528351818301528351919283929083019185019080838360005b838110156100c45781810151838201526020016100ac565b50505050905090810190601f1680156100f15780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6101a56004803603602081101561011557600080fd5b81019060208101813564010000000081111561013057600080fd5b82018360208201111561014257600080fd5b8035906020019184600183028401116401000000008311171561016457600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506103f4945050505050565b005b61008a61040b565b6101a5600480360360208110156101c557600080fd5b8101906020810181356401000000008111156101e057600080fd5b8201836020820111156101f257600080fd5b8035906020019184600183028401116401000000008311171561021457600080fd5b509092509050610483565b61028f6004803603602081101561023557600080fd5b81019060208101813564010000000081111561025057600080fd5b82018360208201111561026257600080fd5b8035906020019184600183028401116401000000008311171561028457600080fd5b5090925090506104bc565b60405180831515815260200180602001828103825283818151815260200191508051906020019080838360005b838110156102d45781810151838201526020016102bc565b50505050905090810190601f1680156103015780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b6101a56004803603602081101561032657600080fd5b5035151561057f565b6103376105b0565b604080519115158252519081900360200190f35b600280546040805160206001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01909316849004601f810184900484028201840190925281815292918301828280156103ec5780601f106103c1576101008083540402835291602001916103ec565b820191906000526020600020905b8154815290600101906020018083116103cf57829003601f168201915b505050505081565b80516104079060019060208401906105b9565b5050565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f810184900484028201840190925281815292918301828280156103ec5780601f106103c1576101008083540402835291602001916103ec565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556104b760028383610645565b505050565b6000805460018054604080516020600261010085871615027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f810184900484028201840190925281815260609460ff1693929091839183018282801561056d5780601f106105425761010080835404028352916020019161056d565b820191906000526020600020905b81548152906001019060200180831161055057829003601f168201915b50505050509050915091509250929050565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b60005460ff1681565b828054600181600116156101000203166002900490600052602060002090601f0160209004810192826105ef5760008555610635565b82601f1061060857805160ff1916838001178555610635565b82800160010185558215610635579182015b8281111561063557825182559160200191906001019061061a565b506106419291506106df565b5090565b828054600181600116156101000203166002900490600052602060002090601f01602090048101928261067b5760008555610635565b82601f106106b2578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555610635565b82800160010185558215610635579182015b828111156106355782358255916020019190600101906106c4565b5b8082111561064157600081556001016106e056fea2646970667358221220d14dfba5a1efc4145905e37243ce77c189567ba18352066c0fe8a0c3ba53d27e64736f6c63430007060033" + +// DeployBasicUpkeepContract deploys a new Ethereum contract, binding an instance of BasicUpkeepContract to it. +func DeployBasicUpkeepContract(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *BasicUpkeepContract, error) { + parsed, err := abi.JSON(strings.NewReader(BasicUpkeepContractABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(BasicUpkeepContractBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BasicUpkeepContract{BasicUpkeepContractCaller: BasicUpkeepContractCaller{contract: contract}, BasicUpkeepContractTransactor: BasicUpkeepContractTransactor{contract: contract}, BasicUpkeepContractFilterer: BasicUpkeepContractFilterer{contract: contract}}, nil +} + +// BasicUpkeepContract is an auto generated Go binding around an Ethereum contract. +type BasicUpkeepContract struct { + BasicUpkeepContractCaller // Read-only binding to the contract + BasicUpkeepContractTransactor // Write-only binding to the contract + BasicUpkeepContractFilterer // Log filterer for contract events +} + +// BasicUpkeepContractCaller is an auto generated read-only Go binding around an Ethereum contract. +type BasicUpkeepContractCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// BasicUpkeepContractTransactor is an auto generated write-only Go binding around an Ethereum contract. +type BasicUpkeepContractTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// BasicUpkeepContractFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type BasicUpkeepContractFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// BasicUpkeepContractSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type BasicUpkeepContractSession struct { + Contract *BasicUpkeepContract // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// BasicUpkeepContractCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type BasicUpkeepContractCallerSession struct { + Contract *BasicUpkeepContractCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// BasicUpkeepContractTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type BasicUpkeepContractTransactorSession struct { + Contract *BasicUpkeepContractTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// BasicUpkeepContractRaw is an auto generated low-level Go binding around an Ethereum contract. +type BasicUpkeepContractRaw struct { + Contract *BasicUpkeepContract // Generic contract binding to access the raw methods on +} + +// BasicUpkeepContractCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type BasicUpkeepContractCallerRaw struct { + Contract *BasicUpkeepContractCaller // Generic read-only contract binding to access the raw methods on +} + +// BasicUpkeepContractTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type BasicUpkeepContractTransactorRaw struct { + Contract *BasicUpkeepContractTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewBasicUpkeepContract creates a new instance of BasicUpkeepContract, bound to a specific deployed contract. +func NewBasicUpkeepContract(address common.Address, backend bind.ContractBackend) (*BasicUpkeepContract, error) { + contract, err := bindBasicUpkeepContract(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BasicUpkeepContract{BasicUpkeepContractCaller: BasicUpkeepContractCaller{contract: contract}, BasicUpkeepContractTransactor: BasicUpkeepContractTransactor{contract: contract}, BasicUpkeepContractFilterer: BasicUpkeepContractFilterer{contract: contract}}, nil +} + +// NewBasicUpkeepContractCaller creates a new read-only instance of BasicUpkeepContract, bound to a specific deployed contract. +func NewBasicUpkeepContractCaller(address common.Address, caller bind.ContractCaller) (*BasicUpkeepContractCaller, error) { + contract, err := bindBasicUpkeepContract(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BasicUpkeepContractCaller{contract: contract}, nil +} + +// NewBasicUpkeepContractTransactor creates a new write-only instance of BasicUpkeepContract, bound to a specific deployed contract. +func NewBasicUpkeepContractTransactor(address common.Address, transactor bind.ContractTransactor) (*BasicUpkeepContractTransactor, error) { + contract, err := bindBasicUpkeepContract(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BasicUpkeepContractTransactor{contract: contract}, nil +} + +// NewBasicUpkeepContractFilterer creates a new log filterer instance of BasicUpkeepContract, bound to a specific deployed contract. +func NewBasicUpkeepContractFilterer(address common.Address, filterer bind.ContractFilterer) (*BasicUpkeepContractFilterer, error) { + contract, err := bindBasicUpkeepContract(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BasicUpkeepContractFilterer{contract: contract}, nil +} + +// bindBasicUpkeepContract binds a generic wrapper to an already deployed contract. +func bindBasicUpkeepContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(BasicUpkeepContractABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_BasicUpkeepContract *BasicUpkeepContractRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BasicUpkeepContract.Contract.BasicUpkeepContractCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_BasicUpkeepContract *BasicUpkeepContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.BasicUpkeepContractTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_BasicUpkeepContract *BasicUpkeepContractRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.BasicUpkeepContractTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_BasicUpkeepContract *BasicUpkeepContractCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BasicUpkeepContract.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_BasicUpkeepContract *BasicUpkeepContractTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_BasicUpkeepContract *BasicUpkeepContractTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.contract.Transact(opts, method, params...) +} + +// BytesToSend is a free data retrieval call binding the contract method 0x33437c77. +// +// Solidity: function bytesToSend() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractCaller) BytesToSend(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _BasicUpkeepContract.contract.Call(opts, &out, "bytesToSend") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +// BytesToSend is a free data retrieval call binding the contract method 0x33437c77. +// +// Solidity: function bytesToSend() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractSession) BytesToSend() ([]byte, error) { + return _BasicUpkeepContract.Contract.BytesToSend(&_BasicUpkeepContract.CallOpts) +} + +// BytesToSend is a free data retrieval call binding the contract method 0x33437c77. +// +// Solidity: function bytesToSend() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractCallerSession) BytesToSend() ([]byte, error) { + return _BasicUpkeepContract.Contract.BytesToSend(&_BasicUpkeepContract.CallOpts) +} + +// ReceivedBytes is a free data retrieval call binding the contract method 0x0427e4b7. +// +// Solidity: function receivedBytes() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractCaller) ReceivedBytes(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _BasicUpkeepContract.contract.Call(opts, &out, "receivedBytes") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +// ReceivedBytes is a free data retrieval call binding the contract method 0x0427e4b7. +// +// Solidity: function receivedBytes() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractSession) ReceivedBytes() ([]byte, error) { + return _BasicUpkeepContract.Contract.ReceivedBytes(&_BasicUpkeepContract.CallOpts) +} + +// ReceivedBytes is a free data retrieval call binding the contract method 0x0427e4b7. +// +// Solidity: function receivedBytes() view returns(bytes) +func (_BasicUpkeepContract *BasicUpkeepContractCallerSession) ReceivedBytes() ([]byte, error) { + return _BasicUpkeepContract.Contract.ReceivedBytes(&_BasicUpkeepContract.CallOpts) +} + +// ShouldPerformUpkeep is a free data retrieval call binding the contract method 0xbb446268. +// +// Solidity: function shouldPerformUpkeep() view returns(bool) +func (_BasicUpkeepContract *BasicUpkeepContractCaller) ShouldPerformUpkeep(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _BasicUpkeepContract.contract.Call(opts, &out, "shouldPerformUpkeep") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// ShouldPerformUpkeep is a free data retrieval call binding the contract method 0xbb446268. +// +// Solidity: function shouldPerformUpkeep() view returns(bool) +func (_BasicUpkeepContract *BasicUpkeepContractSession) ShouldPerformUpkeep() (bool, error) { + return _BasicUpkeepContract.Contract.ShouldPerformUpkeep(&_BasicUpkeepContract.CallOpts) +} + +// ShouldPerformUpkeep is a free data retrieval call binding the contract method 0xbb446268. +// +// Solidity: function shouldPerformUpkeep() view returns(bool) +func (_BasicUpkeepContract *BasicUpkeepContractCallerSession) ShouldPerformUpkeep() (bool, error) { + return _BasicUpkeepContract.Contract.ShouldPerformUpkeep(&_BasicUpkeepContract.CallOpts) +} + +// CheckUpkeep is a paid mutator transaction binding the contract method 0x6e04ff0d. +// +// Solidity: function checkUpkeep(bytes data) returns(bool, bytes) +func (_BasicUpkeepContract *BasicUpkeepContractTransactor) CheckUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.contract.Transact(opts, "checkUpkeep", data) +} + +// CheckUpkeep is a paid mutator transaction binding the contract method 0x6e04ff0d. +// +// Solidity: function checkUpkeep(bytes data) returns(bool, bytes) +func (_BasicUpkeepContract *BasicUpkeepContractSession) CheckUpkeep(data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.CheckUpkeep(&_BasicUpkeepContract.TransactOpts, data) +} + +// CheckUpkeep is a paid mutator transaction binding the contract method 0x6e04ff0d. +// +// Solidity: function checkUpkeep(bytes data) returns(bool, bytes) +func (_BasicUpkeepContract *BasicUpkeepContractTransactorSession) CheckUpkeep(data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.CheckUpkeep(&_BasicUpkeepContract.TransactOpts, data) +} + +// PerformUpkeep is a paid mutator transaction binding the contract method 0x4585e33b. +// +// Solidity: function performUpkeep(bytes data) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactor) PerformUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.contract.Transact(opts, "performUpkeep", data) +} + +// PerformUpkeep is a paid mutator transaction binding the contract method 0x4585e33b. +// +// Solidity: function performUpkeep(bytes data) returns() +func (_BasicUpkeepContract *BasicUpkeepContractSession) PerformUpkeep(data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.PerformUpkeep(&_BasicUpkeepContract.TransactOpts, data) +} + +// PerformUpkeep is a paid mutator transaction binding the contract method 0x4585e33b. +// +// Solidity: function performUpkeep(bytes data) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactorSession) PerformUpkeep(data []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.PerformUpkeep(&_BasicUpkeepContract.TransactOpts, data) +} + +// SetBytesToSend is a paid mutator transaction binding the contract method 0x2c3b84ac. +// +// Solidity: function setBytesToSend(bytes _bytes) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactor) SetBytesToSend(opts *bind.TransactOpts, _bytes []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.contract.Transact(opts, "setBytesToSend", _bytes) +} + +// SetBytesToSend is a paid mutator transaction binding the contract method 0x2c3b84ac. +// +// Solidity: function setBytesToSend(bytes _bytes) returns() +func (_BasicUpkeepContract *BasicUpkeepContractSession) SetBytesToSend(_bytes []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.SetBytesToSend(&_BasicUpkeepContract.TransactOpts, _bytes) +} + +// SetBytesToSend is a paid mutator transaction binding the contract method 0x2c3b84ac. +// +// Solidity: function setBytesToSend(bytes _bytes) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactorSession) SetBytesToSend(_bytes []byte) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.SetBytesToSend(&_BasicUpkeepContract.TransactOpts, _bytes) +} + +// SetShouldPerformUpkeep is a paid mutator transaction binding the contract method 0x84aadecc. +// +// Solidity: function setShouldPerformUpkeep(bool _should) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactor) SetShouldPerformUpkeep(opts *bind.TransactOpts, _should bool) (*types.Transaction, error) { + return _BasicUpkeepContract.contract.Transact(opts, "setShouldPerformUpkeep", _should) +} + +// SetShouldPerformUpkeep is a paid mutator transaction binding the contract method 0x84aadecc. +// +// Solidity: function setShouldPerformUpkeep(bool _should) returns() +func (_BasicUpkeepContract *BasicUpkeepContractSession) SetShouldPerformUpkeep(_should bool) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.SetShouldPerformUpkeep(&_BasicUpkeepContract.TransactOpts, _should) +} + +// SetShouldPerformUpkeep is a paid mutator transaction binding the contract method 0x84aadecc. +// +// Solidity: function setShouldPerformUpkeep(bool _should) returns() +func (_BasicUpkeepContract *BasicUpkeepContractTransactorSession) SetShouldPerformUpkeep(_should bool) (*types.Transaction, error) { + return _BasicUpkeepContract.Contract.SetShouldPerformUpkeep(&_BasicUpkeepContract.TransactOpts, _should) +} diff --git a/core/gethwrappers/generated/batch_blockhash_store/batch_blockhash_store.go b/core/gethwrappers/generated/batch_blockhash_store/batch_blockhash_store.go new file mode 100644 index 00000000..1a43287d --- /dev/null +++ b/core/gethwrappers/generated/batch_blockhash_store/batch_blockhash_store.go @@ -0,0 +1,254 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package batch_blockhash_store + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var BatchBlockhashStoreMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"blockhashStoreAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"BHS\",\"outputs\":[{\"internalType\":\"contractBlockhashStore\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"blockNumbers\",\"type\":\"uint256[]\"}],\"name\":\"getBlockhashes\",\"outputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"blockNumbers\",\"type\":\"uint256[]\"}],\"name\":\"store\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"blockNumbers\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes[]\",\"name\":\"headers\",\"type\":\"bytes[]\"}],\"name\":\"storeVerifyHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610b9b380380610b9b83398101604081905261002f91610044565b60601b6001600160601b031916608052610074565b60006020828403121561005657600080fd5b81516001600160a01b038116811461006d57600080fd5b9392505050565b60805160601c610af56100a66000396000818160a7015281816101270152818161023a01526104290152610af56000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c806306bd010d146100515780631f600f86146100665780635d290e211461008f578063f745eafb146100a2575b600080fd5b61006461005f36600461066e565b6100ee565b005b61007961007436600461066e565b6101e2565b6040516100869190610819565b60405180910390f35b61006461009d3660046106ab565b6103ac565b6100c97f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610086565b60005b81518110156101de5761011c82828151811061010f5761010f6109c6565b60200260200101516104fe565b610125576101cc565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16636057361d838381518110610173576101736109c6565b60200260200101516040518263ffffffff1660e01b815260040161019991815260200190565b600060405180830381600087803b1580156101b357600080fd5b505af11580156101c7573d6000803e3d6000fd5b505050505b806101d68161095e565b9150506100f1565b5050565b60606000825167ffffffffffffffff811115610200576102006109f5565b604051908082528060200260200182016040528015610229578160200160208202803683370190505b50905060005b83518110156103a5577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663e9413d38858381518110610286576102866109c6565b60200260200101516040518263ffffffff1660e01b81526004016102ac91815260200190565b60206040518083038186803b1580156102c457600080fd5b505afa925050508015610312575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820190925261030f91810190610800565b60015b6103725761031e610a24565b806308c379a014156103665750610333610a40565b8061033e5750610368565b6000801b838381518110610354576103546109c6565b60200260200101818152505050610393565b505b3d6000803e3d6000fd5b80838381518110610385576103856109c6565b602002602001018181525050505b8061039d8161095e565b91505061022f565b5092915050565b805182511461041b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f696e70757420617272617920617267206c656e67746873206d69736d61746368604482015260640160405180910390fd5b60005b82518110156104f9577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663fadff0e1848381518110610475576104756109c6565b602002602001015184848151811061048f5761048f6109c6565b60200260200101516040518363ffffffff1660e01b81526004016104b492919061085d565b600060405180830381600087803b1580156104ce57600080fd5b505af11580156104e2573d6000803e3d6000fd5b5050505080806104f19061095e565b91505061041e565b505050565b600061010061050b610537565b111561052e5761010061051c610537565b61052691906108fc565b821015610531565b60015b92915050565b600046610543816105d4565b156105cd57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561058f57600080fd5b505afa1580156105a3573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105c79190610800565b91505090565b4391505090565b600061a4b18214806105e8575062066eed82145b8061053157505062066eee1490565b600082601f83011261060857600080fd5b81356020610615826108d8565b6040516106228282610913565b8381528281019150858301600585901b8701840188101561064257600080fd5b60005b8581101561066157813584529284019290840190600101610645565b5090979650505050505050565b60006020828403121561068057600080fd5b813567ffffffffffffffff81111561069757600080fd5b6106a3848285016105f7565b949350505050565b60008060408084860312156106bf57600080fd5b833567ffffffffffffffff808211156106d757600080fd5b6106e3878388016105f7565b94506020915081860135818111156106fa57600080fd5b8601601f8101881361070b57600080fd5b8035610716816108d8565b85516107228282610913565b8281528581019150838601600584901b850187018c101561074257600080fd5b60005b848110156107ee5781358781111561075c57600080fd5b8601603f81018e1361076d57600080fd5b8881013588811115610781576107816109f5565b8a516107b48b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160182610913565b8181528f8c8385010111156107c857600080fd5b818c84018c83013760009181018b01919091528552509287019290870190600101610745565b50989b909a5098505050505050505050565b60006020828403121561081257600080fd5b5051919050565b6020808252825182820181905260009190848201906040850190845b8181101561085157835183529284019291840191600101610835565b50909695505050505050565b82815260006020604081840152835180604085015260005b8181101561089157858101830151858201606001528201610875565b818111156108a3576000606083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201606001949350505050565b600067ffffffffffffffff8211156108f2576108f26109f5565b5060051b60200190565b60008282101561090e5761090e610997565b500390565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116810181811067ffffffffffffffff82111715610957576109576109f5565b6040525050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561099057610990610997565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060033d1115610a3d5760046000803e5060005160e01c5b90565b600060443d1015610a4e5790565b6040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc803d016004833e81513d67ffffffffffffffff8160248401118184111715610a9c57505050505090565b8285019150815181811115610ab45750505050505090565b843d8701016020828501011115610ace5750505050505090565b610add60208286010187610913565b50909594505050505056fea164736f6c6343000806000a", +} + +var BatchBlockhashStoreABI = BatchBlockhashStoreMetaData.ABI + +var BatchBlockhashStoreBin = BatchBlockhashStoreMetaData.Bin + +func DeployBatchBlockhashStore(auth *bind.TransactOpts, backend bind.ContractBackend, blockhashStoreAddr common.Address) (common.Address, *types.Transaction, *BatchBlockhashStore, error) { + parsed, err := BatchBlockhashStoreMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BatchBlockhashStoreBin), backend, blockhashStoreAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BatchBlockhashStore{address: address, abi: *parsed, BatchBlockhashStoreCaller: BatchBlockhashStoreCaller{contract: contract}, BatchBlockhashStoreTransactor: BatchBlockhashStoreTransactor{contract: contract}, BatchBlockhashStoreFilterer: BatchBlockhashStoreFilterer{contract: contract}}, nil +} + +type BatchBlockhashStore struct { + address common.Address + abi abi.ABI + BatchBlockhashStoreCaller + BatchBlockhashStoreTransactor + BatchBlockhashStoreFilterer +} + +type BatchBlockhashStoreCaller struct { + contract *bind.BoundContract +} + +type BatchBlockhashStoreTransactor struct { + contract *bind.BoundContract +} + +type BatchBlockhashStoreFilterer struct { + contract *bind.BoundContract +} + +type BatchBlockhashStoreSession struct { + Contract *BatchBlockhashStore + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BatchBlockhashStoreCallerSession struct { + Contract *BatchBlockhashStoreCaller + CallOpts bind.CallOpts +} + +type BatchBlockhashStoreTransactorSession struct { + Contract *BatchBlockhashStoreTransactor + TransactOpts bind.TransactOpts +} + +type BatchBlockhashStoreRaw struct { + Contract *BatchBlockhashStore +} + +type BatchBlockhashStoreCallerRaw struct { + Contract *BatchBlockhashStoreCaller +} + +type BatchBlockhashStoreTransactorRaw struct { + Contract *BatchBlockhashStoreTransactor +} + +func NewBatchBlockhashStore(address common.Address, backend bind.ContractBackend) (*BatchBlockhashStore, error) { + abi, err := abi.JSON(strings.NewReader(BatchBlockhashStoreABI)) + if err != nil { + return nil, err + } + contract, err := bindBatchBlockhashStore(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BatchBlockhashStore{address: address, abi: abi, BatchBlockhashStoreCaller: BatchBlockhashStoreCaller{contract: contract}, BatchBlockhashStoreTransactor: BatchBlockhashStoreTransactor{contract: contract}, BatchBlockhashStoreFilterer: BatchBlockhashStoreFilterer{contract: contract}}, nil +} + +func NewBatchBlockhashStoreCaller(address common.Address, caller bind.ContractCaller) (*BatchBlockhashStoreCaller, error) { + contract, err := bindBatchBlockhashStore(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BatchBlockhashStoreCaller{contract: contract}, nil +} + +func NewBatchBlockhashStoreTransactor(address common.Address, transactor bind.ContractTransactor) (*BatchBlockhashStoreTransactor, error) { + contract, err := bindBatchBlockhashStore(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BatchBlockhashStoreTransactor{contract: contract}, nil +} + +func NewBatchBlockhashStoreFilterer(address common.Address, filterer bind.ContractFilterer) (*BatchBlockhashStoreFilterer, error) { + contract, err := bindBatchBlockhashStore(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BatchBlockhashStoreFilterer{contract: contract}, nil +} + +func bindBatchBlockhashStore(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BatchBlockhashStoreMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BatchBlockhashStore *BatchBlockhashStoreRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchBlockhashStore.Contract.BatchBlockhashStoreCaller.contract.Call(opts, result, method, params...) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.BatchBlockhashStoreTransactor.contract.Transfer(opts) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.BatchBlockhashStoreTransactor.contract.Transact(opts, method, params...) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchBlockhashStore.Contract.contract.Call(opts, result, method, params...) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.contract.Transfer(opts) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.contract.Transact(opts, method, params...) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreCaller) BHS(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BatchBlockhashStore.contract.Call(opts, &out, "BHS") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BatchBlockhashStore *BatchBlockhashStoreSession) BHS() (common.Address, error) { + return _BatchBlockhashStore.Contract.BHS(&_BatchBlockhashStore.CallOpts) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreCallerSession) BHS() (common.Address, error) { + return _BatchBlockhashStore.Contract.BHS(&_BatchBlockhashStore.CallOpts) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreCaller) GetBlockhashes(opts *bind.CallOpts, blockNumbers []*big.Int) ([][32]byte, error) { + var out []interface{} + err := _BatchBlockhashStore.contract.Call(opts, &out, "getBlockhashes", blockNumbers) + + if err != nil { + return *new([][32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([][32]byte)).(*[][32]byte) + + return out0, err + +} + +func (_BatchBlockhashStore *BatchBlockhashStoreSession) GetBlockhashes(blockNumbers []*big.Int) ([][32]byte, error) { + return _BatchBlockhashStore.Contract.GetBlockhashes(&_BatchBlockhashStore.CallOpts, blockNumbers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreCallerSession) GetBlockhashes(blockNumbers []*big.Int) ([][32]byte, error) { + return _BatchBlockhashStore.Contract.GetBlockhashes(&_BatchBlockhashStore.CallOpts, blockNumbers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactor) Store(opts *bind.TransactOpts, blockNumbers []*big.Int) (*types.Transaction, error) { + return _BatchBlockhashStore.contract.Transact(opts, "store", blockNumbers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreSession) Store(blockNumbers []*big.Int) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.Store(&_BatchBlockhashStore.TransactOpts, blockNumbers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactorSession) Store(blockNumbers []*big.Int) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.Store(&_BatchBlockhashStore.TransactOpts, blockNumbers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactor) StoreVerifyHeader(opts *bind.TransactOpts, blockNumbers []*big.Int, headers [][]byte) (*types.Transaction, error) { + return _BatchBlockhashStore.contract.Transact(opts, "storeVerifyHeader", blockNumbers, headers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreSession) StoreVerifyHeader(blockNumbers []*big.Int, headers [][]byte) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.StoreVerifyHeader(&_BatchBlockhashStore.TransactOpts, blockNumbers, headers) +} + +func (_BatchBlockhashStore *BatchBlockhashStoreTransactorSession) StoreVerifyHeader(blockNumbers []*big.Int, headers [][]byte) (*types.Transaction, error) { + return _BatchBlockhashStore.Contract.StoreVerifyHeader(&_BatchBlockhashStore.TransactOpts, blockNumbers, headers) +} + +func (_BatchBlockhashStore *BatchBlockhashStore) Address() common.Address { + return _BatchBlockhashStore.address +} + +type BatchBlockhashStoreInterface interface { + BHS(opts *bind.CallOpts) (common.Address, error) + + GetBlockhashes(opts *bind.CallOpts, blockNumbers []*big.Int) ([][32]byte, error) + + Store(opts *bind.TransactOpts, blockNumbers []*big.Int) (*types.Transaction, error) + + StoreVerifyHeader(opts *bind.TransactOpts, blockNumbers []*big.Int, headers [][]byte) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go b/core/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go new file mode 100644 index 00000000..67d593fe --- /dev/null +++ b/core/gethwrappers/generated/batch_vrf_coordinator_v2/batch_vrf_coordinator_v2.go @@ -0,0 +1,528 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package batch_vrf_coordinator_v2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFTypesProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFTypesRequestCommitment struct { + BlockNum uint64 + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +var BatchVRFCoordinatorV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"ErrorReturned\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"RawErrorReturned\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRFTypes.Proof[]\",\"name\":\"proofs\",\"type\":\"tuple[]\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"internalType\":\"structVRFTypes.RequestCommitment[]\",\"name\":\"rcs\",\"type\":\"tuple[]\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610bbb380380610bbb83398101604081905261002f91610044565b60601b6001600160601b031916608052610074565b60006020828403121561005657600080fd5b81516001600160a01b038116811461006d57600080fd5b9392505050565b60805160601c610b23610098600039600081816055015261011d0152610b236000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806308b2da0a1461003b5780633b2bcbf114610050575b600080fd5b61004e61004936600461057f565b6100a0565b005b6100777f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b805182511461010f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f696e70757420617272617920617267206c656e67746873206d69736d61746368604482015260640160405180910390fd5b60005b8251811015610330577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663af198b97848381518110610169576101696109f5565b6020026020010151848481518110610183576101836109f5565b60200260200101516040518363ffffffff1660e01b81526004016101a89291906107d3565b602060405180830381600087803b1580156101c257600080fd5b505af1925050508015610210575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820190925261020d918101906106e1565b60015b61031c5761021c610a53565b806308c379a014156102a15750610231610a6e565b8061023c57506102a3565b6000610260858481518110610253576102536109f5565b6020026020010151610335565b9050807f4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e8360405161029291906107c0565b60405180910390a2505061031e565b505b3d8080156102cd576040519150601f19603f3d011682016040523d82523d6000602084013e6102d2565b606091505b5060006102ea858481518110610253576102536109f5565b9050807fbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b58360405161029291906107c0565b505b8061032881610995565b915050610112565b505050565b60008061034583600001516103a4565b9050808360800151604051602001610367929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209392505050565b6000816040516020016103b791906107ac565b604051602081830303815290604052805190602001209050919050565b803573ffffffffffffffffffffffffffffffffffffffff811681146103f857600080fd5b919050565b600082601f83011261040e57600080fd5b8135602061041b826108df565b60408051610429838261094a565b848152838101925086840160a0808702890186018a101561044957600080fd5b6000805b888110156104cb5782848d031215610463578182fd5b855161046e81610903565b61047785610567565b8152610484898601610567565b89820152610493878601610553565b8782015260606104a4818701610553565b9082015260806104b58682016103d4565b908201528752958701959282019260010161044d565b50929a9950505050505050505050565b600082601f8301126104ec57600080fd5b6040516040810181811067ffffffffffffffff8211171561050f5761050f610a24565b806040525080838560408601111561052657600080fd5b60005b6002811015610548578135835260209283019290910190600101610529565b509195945050505050565b803563ffffffff811681146103f857600080fd5b803567ffffffffffffffff811681146103f857600080fd5b6000806040838503121561059257600080fd5b823567ffffffffffffffff808211156105aa57600080fd5b818501915085601f8301126105be57600080fd5b813560206105cb826108df565b6040516105d8828261094a565b83815282810191508583016101a0808602880185018c10156105f957600080fd5b600097505b858810156106b15780828d03121561061557600080fd5b61061d6108d0565b6106278d846104db565b81526106368d604085016104db565b86820152608080840135604083015260a080850135606084015260c08501358284015261066560e086016103d4565b90830152506101006106798e8583016104db565b60c083015261068c8e61014086016104db565b60e08301526101808401359082015284526001979097019692840192908101906105fe565b509097505050860135925050808211156106ca57600080fd5b506106d7858286016103fd565b9150509250929050565b6000602082840312156106f357600080fd5b81516bffffffffffffffffffffffff8116811461070f57600080fd5b9392505050565b8060005b600281101561073957815184526020938401939091019060010161071a565b50505050565b60008151808452602060005b8281101561076657848101820151868201830152810161074b565b828111156107775760008284880101525b50807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401168601019250505092915050565b604081016107ba8284610716565b92915050565b60208152600061070f602083018461073f565b6000610240820190506107e7828551610716565b60208401516107f96040840182610716565b5060408401516080830152606084015160a0830152608084015160c083015273ffffffffffffffffffffffffffffffffffffffff60a08501511660e083015260c084015161010061084c81850183610716565b60e08601519150610861610140850183610716565b85015161018084015250825167ffffffffffffffff9081166101a08401526020840151166101c0830152604083015163ffffffff9081166101e0840152606084015116610200830152608083015173ffffffffffffffffffffffffffffffffffffffff1661022083015261070f565b6040516108dc81610929565b90565b600067ffffffffffffffff8211156108f9576108f9610a24565b5060051b60200190565b60a0810181811067ffffffffffffffff8211171561092357610923610a24565b60405250565b610120810167ffffffffffffffff8111828210171561092357610923610a24565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116810181811067ffffffffffffffff8211171561098e5761098e610a24565b6040525050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156109ee577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060033d11156108dc5760046000803e5060005160e01c90565b600060443d1015610a7c5790565b6040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc803d016004833e81513d67ffffffffffffffff8160248401118184111715610aca57505050505090565b8285019150815181811115610ae25750505050505090565b843d8701016020828501011115610afc5750505050505090565b610b0b6020828601018761094a565b50909594505050505056fea164736f6c6343000806000a", +} + +var BatchVRFCoordinatorV2ABI = BatchVRFCoordinatorV2MetaData.ABI + +var BatchVRFCoordinatorV2Bin = BatchVRFCoordinatorV2MetaData.Bin + +func DeployBatchVRFCoordinatorV2(auth *bind.TransactOpts, backend bind.ContractBackend, coordinatorAddr common.Address) (common.Address, *types.Transaction, *BatchVRFCoordinatorV2, error) { + parsed, err := BatchVRFCoordinatorV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BatchVRFCoordinatorV2Bin), backend, coordinatorAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BatchVRFCoordinatorV2{address: address, abi: *parsed, BatchVRFCoordinatorV2Caller: BatchVRFCoordinatorV2Caller{contract: contract}, BatchVRFCoordinatorV2Transactor: BatchVRFCoordinatorV2Transactor{contract: contract}, BatchVRFCoordinatorV2Filterer: BatchVRFCoordinatorV2Filterer{contract: contract}}, nil +} + +type BatchVRFCoordinatorV2 struct { + address common.Address + abi abi.ABI + BatchVRFCoordinatorV2Caller + BatchVRFCoordinatorV2Transactor + BatchVRFCoordinatorV2Filterer +} + +type BatchVRFCoordinatorV2Caller struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Transactor struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Filterer struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2Session struct { + Contract *BatchVRFCoordinatorV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2CallerSession struct { + Contract *BatchVRFCoordinatorV2Caller + CallOpts bind.CallOpts +} + +type BatchVRFCoordinatorV2TransactorSession struct { + Contract *BatchVRFCoordinatorV2Transactor + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2Raw struct { + Contract *BatchVRFCoordinatorV2 +} + +type BatchVRFCoordinatorV2CallerRaw struct { + Contract *BatchVRFCoordinatorV2Caller +} + +type BatchVRFCoordinatorV2TransactorRaw struct { + Contract *BatchVRFCoordinatorV2Transactor +} + +func NewBatchVRFCoordinatorV2(address common.Address, backend bind.ContractBackend) (*BatchVRFCoordinatorV2, error) { + abi, err := abi.JSON(strings.NewReader(BatchVRFCoordinatorV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindBatchVRFCoordinatorV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2{address: address, abi: abi, BatchVRFCoordinatorV2Caller: BatchVRFCoordinatorV2Caller{contract: contract}, BatchVRFCoordinatorV2Transactor: BatchVRFCoordinatorV2Transactor{contract: contract}, BatchVRFCoordinatorV2Filterer: BatchVRFCoordinatorV2Filterer{contract: contract}}, nil +} + +func NewBatchVRFCoordinatorV2Caller(address common.Address, caller bind.ContractCaller) (*BatchVRFCoordinatorV2Caller, error) { + contract, err := bindBatchVRFCoordinatorV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Caller{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2Transactor(address common.Address, transactor bind.ContractTransactor) (*BatchVRFCoordinatorV2Transactor, error) { + contract, err := bindBatchVRFCoordinatorV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Transactor{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2Filterer(address common.Address, filterer bind.ContractFilterer) (*BatchVRFCoordinatorV2Filterer, error) { + contract, err := bindBatchVRFCoordinatorV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Filterer{contract: contract}, nil +} + +func bindBatchVRFCoordinatorV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BatchVRFCoordinatorV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Caller.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Transactor.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.BatchVRFCoordinatorV2Transactor.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Caller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BatchVRFCoordinatorV2.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Session) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2.Contract.COORDINATOR(&_BatchVRFCoordinatorV2.CallOpts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2CallerSession) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2.Contract.COORDINATOR(&_BatchVRFCoordinatorV2.CallOpts) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Transactor) FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.contract.Transact(opts, "fulfillRandomWords", proofs, rcs) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Session) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2.TransactOpts, proofs, rcs) +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2TransactorSession) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2.TransactOpts, proofs, rcs) +} + +type BatchVRFCoordinatorV2ErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2ErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2ErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2ErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2ErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2ErrorReturned struct { + RequestId *big.Int + Reason string + Raw types.Log +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2ErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.FilterLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2ErrorReturnedIterator{contract: _BatchVRFCoordinatorV2.contract, event: "ErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2ErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.WatchLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2ErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2ErrorReturned, error) { + event := new(BatchVRFCoordinatorV2ErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BatchVRFCoordinatorV2RawErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2RawErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2RawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2RawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2RawErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2RawErrorReturned struct { + RequestId *big.Int + LowLevelData []byte + Raw types.Log +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2RawErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.FilterLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2RawErrorReturnedIterator{contract: _BatchVRFCoordinatorV2.contract, event: "RawErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2RawErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2.contract.WatchLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2RawErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2Filterer) ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2RawErrorReturned, error) { + event := new(BatchVRFCoordinatorV2RawErrorReturned) + if err := _BatchVRFCoordinatorV2.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _BatchVRFCoordinatorV2.abi.Events["ErrorReturned"].ID: + return _BatchVRFCoordinatorV2.ParseErrorReturned(log) + case _BatchVRFCoordinatorV2.abi.Events["RawErrorReturned"].ID: + return _BatchVRFCoordinatorV2.ParseRawErrorReturned(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (BatchVRFCoordinatorV2ErrorReturned) Topic() common.Hash { + return common.HexToHash("0x4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e") +} + +func (BatchVRFCoordinatorV2RawErrorReturned) Topic() common.Hash { + return common.HexToHash("0xbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b5") +} + +func (_BatchVRFCoordinatorV2 *BatchVRFCoordinatorV2) Address() common.Address { + return _BatchVRFCoordinatorV2.address +} + +type BatchVRFCoordinatorV2Interface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitment) (*types.Transaction, error) + + FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2ErrorReturnedIterator, error) + + WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2ErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2ErrorReturned, error) + + FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2RawErrorReturnedIterator, error) + + WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2RawErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2RawErrorReturned, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/batch_vrf_coordinator_v2plus/batch_vrf_coordinator_v2plus.go b/core/gethwrappers/generated/batch_vrf_coordinator_v2plus/batch_vrf_coordinator_v2plus.go new file mode 100644 index 00000000..12026d37 --- /dev/null +++ b/core/gethwrappers/generated/batch_vrf_coordinator_v2plus/batch_vrf_coordinator_v2plus.go @@ -0,0 +1,529 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package batch_vrf_coordinator_v2plus + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFTypesProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFTypesRequestCommitmentV2Plus struct { + BlockNum uint64 + SubId *big.Int + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + ExtraArgs []byte +} + +var BatchVRFCoordinatorV2PlusMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"ErrorReturned\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"RawErrorReturned\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRFTypes.Proof[]\",\"name\":\"proofs\",\"type\":\"tuple[]\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFTypes.RequestCommitmentV2Plus[]\",\"name\":\"rcs\",\"type\":\"tuple[]\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610cd3380380610cd383398101604081905261002f91610044565b60601b6001600160601b031916608052610074565b60006020828403121561005657600080fd5b81516001600160a01b038116811461006d57600080fd5b9392505050565b60805160601c610c3b610098600039600081816040015261011d0152610c3b6000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80633b2bcbf11461003b5780636abb17211461008b575b600080fd5b6100627f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b61009e610099366004610668565b6100a0565b005b805182511461010f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f696e70757420617272617920617267206c656e67746873206d69736d61746368604482015260640160405180910390fd5b60005b8251811015610333577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663301f42e984838151811061016957610169610b0c565b602002602001015184848151811061018357610183610b0c565b602002602001015160006040518463ffffffff1660e01b81526004016101ab93929190610933565b602060405180830381600087803b1580156101c557600080fd5b505af1925050508015610213575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252610210918101906107c8565b60015b61031f5761021f610b6a565b806308c379a014156102a45750610234610b86565b8061023f57506102a6565b600061026385848151811061025657610256610b0c565b6020026020010151610338565b9050807f4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e836040516102959190610920565b60405180910390a25050610321565b505b3d8080156102d0576040519150601f19603f3d011682016040523d82523d6000602084013e6102d5565b606091505b5060006102ed85848151811061025657610256610b0c565b9050807fbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b5836040516102959190610920565b505b8061032b81610aac565b915050610112565b505050565b60008061034883600001516103a7565b905080836080015160405160200161036a929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209392505050565b6000816040516020016103ba919061090c565b604051602081830303815290604052805190602001209050919050565b803573ffffffffffffffffffffffffffffffffffffffff811681146103fb57600080fd5b919050565b600082601f83011261041157600080fd5b8135602061041e82610a17565b6040805161042c8382610a61565b8481528381019250868401600586901b8801850189101561044c57600080fd5b60005b8681101561053c57813567ffffffffffffffff8082111561046f57600080fd5b818b01915060c0807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0848f030112156104a757600080fd5b86516104b281610a3b565b8984013583811681146104c457600080fd5b8152838801358a82015260606104db818601610654565b8983015260806104ec818701610654565b8284015260a091506104ff8287016103d7565b9083015291840135918383111561051557600080fd5b6105238f8c858801016105c2565b908201528852505050938501939085019060010161044f565b509098975050505050505050565b600082601f83011261055b57600080fd5b6040516040810181811067ffffffffffffffff8211171561057e5761057e610b3b565b806040525080838560408601111561059557600080fd5b60005b60028110156105b7578135835260209283019290910190600101610598565b509195945050505050565b600082601f8301126105d357600080fd5b813567ffffffffffffffff8111156105ed576105ed610b3b565b60405161062260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501160182610a61565b81815284602083860101111561063757600080fd5b816020850160208301376000918101602001919091529392505050565b803563ffffffff811681146103fb57600080fd5b600080604080848603121561067c57600080fd5b833567ffffffffffffffff8082111561069457600080fd5b818601915086601f8301126106a857600080fd5b813560206106b582610a17565b85516106c18282610a61565b83815282810191508583016101a0808602880185018d10156106e257600080fd5b600097505b858810156107975780828e0312156106fe57600080fd5b6107066109ed565b6107108e8461054a565b815261071e8e8b850161054a565b8682015260808301358a82015260a0830135606082015260c0830135608082015261074b60e084016103d7565b60a082015261010061075f8f82860161054a565b60c08301526107728f610140860161054a565b60e08301526101808401359082015284526001979097019692840192908101906106e7565b509098505050870135935050808311156107b057600080fd5b50506107be85828601610400565b9150509250929050565b6000602082840312156107da57600080fd5b81516bffffffffffffffffffffffff811681146107f657600080fd5b9392505050565b8060005b6002811015610820578151845260209384019390910190600101610801565b50505050565b6000815180845260005b8181101561084c57602081850181015186830182015201610830565b8181111561085e576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b67ffffffffffffffff8151168252602081015160208301526000604082015163ffffffff8082166040860152806060850151166060860152505073ffffffffffffffffffffffffffffffffffffffff608083015116608084015260a082015160c060a085015261090460c0850182610826565b949350505050565b6040810161091a82846107fd565b92915050565b6020815260006107f66020830184610826565b60006101e06109438387516107fd565b602086015161095560408501826107fd565b5060408601516080840152606086015160a0840152608086015160c084015273ffffffffffffffffffffffffffffffffffffffff60a08701511660e084015260c08601516101006109a8818601836107fd565b60e088015191506109bd6101408601836107fd565b870151610180850152506101a083018190526109db81840186610891565b9150506109046101c083018415159052565b604051610120810167ffffffffffffffff81118282101715610a1157610a11610b3b565b60405290565b600067ffffffffffffffff821115610a3157610a31610b3b565b5060051b60200190565b60c0810181811067ffffffffffffffff82111715610a5b57610a5b610b3b565b60405250565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116810181811067ffffffffffffffff82111715610aa557610aa5610b3b565b6040525050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610b05577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060033d1115610b835760046000803e5060005160e01c5b90565b600060443d1015610b945790565b6040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc803d016004833e81513d67ffffffffffffffff8160248401118184111715610be257505050505090565b8285019150815181811115610bfa5750505050505090565b843d8701016020828501011115610c145750505050505090565b610c2360208286010187610a61565b50909594505050505056fea164736f6c6343000806000a", +} + +var BatchVRFCoordinatorV2PlusABI = BatchVRFCoordinatorV2PlusMetaData.ABI + +var BatchVRFCoordinatorV2PlusBin = BatchVRFCoordinatorV2PlusMetaData.Bin + +func DeployBatchVRFCoordinatorV2Plus(auth *bind.TransactOpts, backend bind.ContractBackend, coordinatorAddr common.Address) (common.Address, *types.Transaction, *BatchVRFCoordinatorV2Plus, error) { + parsed, err := BatchVRFCoordinatorV2PlusMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BatchVRFCoordinatorV2PlusBin), backend, coordinatorAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BatchVRFCoordinatorV2Plus{address: address, abi: *parsed, BatchVRFCoordinatorV2PlusCaller: BatchVRFCoordinatorV2PlusCaller{contract: contract}, BatchVRFCoordinatorV2PlusTransactor: BatchVRFCoordinatorV2PlusTransactor{contract: contract}, BatchVRFCoordinatorV2PlusFilterer: BatchVRFCoordinatorV2PlusFilterer{contract: contract}}, nil +} + +type BatchVRFCoordinatorV2Plus struct { + address common.Address + abi abi.ABI + BatchVRFCoordinatorV2PlusCaller + BatchVRFCoordinatorV2PlusTransactor + BatchVRFCoordinatorV2PlusFilterer +} + +type BatchVRFCoordinatorV2PlusCaller struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2PlusTransactor struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2PlusFilterer struct { + contract *bind.BoundContract +} + +type BatchVRFCoordinatorV2PlusSession struct { + Contract *BatchVRFCoordinatorV2Plus + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2PlusCallerSession struct { + Contract *BatchVRFCoordinatorV2PlusCaller + CallOpts bind.CallOpts +} + +type BatchVRFCoordinatorV2PlusTransactorSession struct { + Contract *BatchVRFCoordinatorV2PlusTransactor + TransactOpts bind.TransactOpts +} + +type BatchVRFCoordinatorV2PlusRaw struct { + Contract *BatchVRFCoordinatorV2Plus +} + +type BatchVRFCoordinatorV2PlusCallerRaw struct { + Contract *BatchVRFCoordinatorV2PlusCaller +} + +type BatchVRFCoordinatorV2PlusTransactorRaw struct { + Contract *BatchVRFCoordinatorV2PlusTransactor +} + +func NewBatchVRFCoordinatorV2Plus(address common.Address, backend bind.ContractBackend) (*BatchVRFCoordinatorV2Plus, error) { + abi, err := abi.JSON(strings.NewReader(BatchVRFCoordinatorV2PlusABI)) + if err != nil { + return nil, err + } + contract, err := bindBatchVRFCoordinatorV2Plus(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2Plus{address: address, abi: abi, BatchVRFCoordinatorV2PlusCaller: BatchVRFCoordinatorV2PlusCaller{contract: contract}, BatchVRFCoordinatorV2PlusTransactor: BatchVRFCoordinatorV2PlusTransactor{contract: contract}, BatchVRFCoordinatorV2PlusFilterer: BatchVRFCoordinatorV2PlusFilterer{contract: contract}}, nil +} + +func NewBatchVRFCoordinatorV2PlusCaller(address common.Address, caller bind.ContractCaller) (*BatchVRFCoordinatorV2PlusCaller, error) { + contract, err := bindBatchVRFCoordinatorV2Plus(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2PlusCaller{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2PlusTransactor(address common.Address, transactor bind.ContractTransactor) (*BatchVRFCoordinatorV2PlusTransactor, error) { + contract, err := bindBatchVRFCoordinatorV2Plus(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2PlusTransactor{contract: contract}, nil +} + +func NewBatchVRFCoordinatorV2PlusFilterer(address common.Address, filterer bind.ContractFilterer) (*BatchVRFCoordinatorV2PlusFilterer, error) { + contract, err := bindBatchVRFCoordinatorV2Plus(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2PlusFilterer{contract: contract}, nil +} + +func bindBatchVRFCoordinatorV2Plus(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BatchVRFCoordinatorV2PlusMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2Plus.Contract.BatchVRFCoordinatorV2PlusCaller.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.BatchVRFCoordinatorV2PlusTransactor.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.BatchVRFCoordinatorV2PlusTransactor.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BatchVRFCoordinatorV2Plus.Contract.contract.Call(opts, result, method, params...) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.contract.Transfer(opts) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.contract.Transact(opts, method, params...) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BatchVRFCoordinatorV2Plus.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusSession) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2Plus.Contract.COORDINATOR(&_BatchVRFCoordinatorV2Plus.CallOpts) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusCallerSession) COORDINATOR() (common.Address, error) { + return _BatchVRFCoordinatorV2Plus.Contract.COORDINATOR(&_BatchVRFCoordinatorV2Plus.CallOpts) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusTransactor) FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitmentV2Plus) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.contract.Transact(opts, "fulfillRandomWords", proofs, rcs) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusSession) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitmentV2Plus) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2Plus.TransactOpts, proofs, rcs) +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusTransactorSession) FulfillRandomWords(proofs []VRFTypesProof, rcs []VRFTypesRequestCommitmentV2Plus) (*types.Transaction, error) { + return _BatchVRFCoordinatorV2Plus.Contract.FulfillRandomWords(&_BatchVRFCoordinatorV2Plus.TransactOpts, proofs, rcs) +} + +type BatchVRFCoordinatorV2PlusErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2PlusErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2PlusErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2PlusErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2PlusErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2PlusErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2PlusErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2PlusErrorReturned struct { + RequestId *big.Int + Reason string + Raw types.Log +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2PlusErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2Plus.contract.FilterLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2PlusErrorReturnedIterator{contract: _BatchVRFCoordinatorV2Plus.contract, event: "ErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2PlusErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2Plus.contract.WatchLogs(opts, "ErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2PlusErrorReturned) + if err := _BatchVRFCoordinatorV2Plus.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2PlusErrorReturned, error) { + event := new(BatchVRFCoordinatorV2PlusErrorReturned) + if err := _BatchVRFCoordinatorV2Plus.contract.UnpackLog(event, "ErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BatchVRFCoordinatorV2PlusRawErrorReturnedIterator struct { + Event *BatchVRFCoordinatorV2PlusRawErrorReturned + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BatchVRFCoordinatorV2PlusRawErrorReturnedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2PlusRawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BatchVRFCoordinatorV2PlusRawErrorReturned) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BatchVRFCoordinatorV2PlusRawErrorReturnedIterator) Error() error { + return it.fail +} + +func (it *BatchVRFCoordinatorV2PlusRawErrorReturnedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BatchVRFCoordinatorV2PlusRawErrorReturned struct { + RequestId *big.Int + LowLevelData []byte + Raw types.Log +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2PlusRawErrorReturnedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2Plus.contract.FilterLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return &BatchVRFCoordinatorV2PlusRawErrorReturnedIterator{contract: _BatchVRFCoordinatorV2Plus.contract, event: "RawErrorReturned", logs: logs, sub: sub}, nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2PlusRawErrorReturned, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _BatchVRFCoordinatorV2Plus.contract.WatchLogs(opts, "RawErrorReturned", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BatchVRFCoordinatorV2PlusRawErrorReturned) + if err := _BatchVRFCoordinatorV2Plus.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2PlusFilterer) ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2PlusRawErrorReturned, error) { + event := new(BatchVRFCoordinatorV2PlusRawErrorReturned) + if err := _BatchVRFCoordinatorV2Plus.contract.UnpackLog(event, "RawErrorReturned", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2Plus) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _BatchVRFCoordinatorV2Plus.abi.Events["ErrorReturned"].ID: + return _BatchVRFCoordinatorV2Plus.ParseErrorReturned(log) + case _BatchVRFCoordinatorV2Plus.abi.Events["RawErrorReturned"].ID: + return _BatchVRFCoordinatorV2Plus.ParseRawErrorReturned(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (BatchVRFCoordinatorV2PlusErrorReturned) Topic() common.Hash { + return common.HexToHash("0x4dcab4ce0e741a040f7e0f9b880557f8de685a9520d4bfac272a81c3c3802b2e") +} + +func (BatchVRFCoordinatorV2PlusRawErrorReturned) Topic() common.Hash { + return common.HexToHash("0xbfd42bb5a1bf8153ea750f66ea4944f23f7b9ae51d0462177b9769aa652b61b5") +} + +func (_BatchVRFCoordinatorV2Plus *BatchVRFCoordinatorV2Plus) Address() common.Address { + return _BatchVRFCoordinatorV2Plus.address +} + +type BatchVRFCoordinatorV2PlusInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + FulfillRandomWords(opts *bind.TransactOpts, proofs []VRFTypesProof, rcs []VRFTypesRequestCommitmentV2Plus) (*types.Transaction, error) + + FilterErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2PlusErrorReturnedIterator, error) + + WatchErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2PlusErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseErrorReturned(log types.Log) (*BatchVRFCoordinatorV2PlusErrorReturned, error) + + FilterRawErrorReturned(opts *bind.FilterOpts, requestId []*big.Int) (*BatchVRFCoordinatorV2PlusRawErrorReturnedIterator, error) + + WatchRawErrorReturned(opts *bind.WatchOpts, sink chan<- *BatchVRFCoordinatorV2PlusRawErrorReturned, requestId []*big.Int) (event.Subscription, error) + + ParseRawErrorReturned(log types.Log) (*BatchVRFCoordinatorV2PlusRawErrorReturned, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/blockhash_store/blockhash_store.go b/core/gethwrappers/generated/blockhash_store/blockhash_store.go new file mode 100644 index 00000000..e43f9f45 --- /dev/null +++ b/core/gethwrappers/generated/blockhash_store/blockhash_store.go @@ -0,0 +1,244 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package blockhash_store + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var BlockhashStoreMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getBlockhash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"store\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"storeEarliest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"header\",\"type\":\"bytes\"}],\"name\":\"storeVerifyHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506105d3806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80636057361d1461005157806383b6d6b714610066578063e9413d381461006e578063fadff0e114610093575b600080fd5b61006461005f366004610447565b6100a6565b005b610064610131565b61008161007c366004610447565b61014b565b60405190815260200160405180910390f35b6100646100a1366004610460565b6101c7565b60006100b182610269565b90508061011f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f626c6f636b68617368286e29206661696c65640000000000000000000000000060448201526064015b60405180910390fd5b60009182526020829052604090912055565b61014961010061013f61036e565b61005f9190610551565b565b600081815260208190526040812054806101c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f626c6f636b68617368206e6f7420666f756e6420696e2073746f7265000000006044820152606401610116565b92915050565b6000806101d5846001610539565b815260200190815260200160002054818051906020012014610253576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f6865616465722068617320756e6b6e6f776e20626c6f636b68617368000000006044820152606401610116565b6024015160009182526020829052604090912055565b6000466102758161040b565b1561035e576101008367ffffffffffffffff1661029061036e565b61029a9190610551565b11806102b757506102a961036e565b8367ffffffffffffffff1610155b156102c55750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a829060240160206040518083038186803b15801561031f57600080fd5b505afa158015610333573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610357919061042e565b9392505050565b505067ffffffffffffffff164090565b60004661037a8161040b565b1561040457606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b1580156103c657600080fd5b505afa1580156103da573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103fe919061042e565b91505090565b4391505090565b600061a4b182148061041f575062066eed82145b806101c157505062066eee1490565b60006020828403121561044057600080fd5b5051919050565b60006020828403121561045957600080fd5b5035919050565b6000806040838503121561047357600080fd5b82359150602083013567ffffffffffffffff8082111561049257600080fd5b818501915085601f8301126104a657600080fd5b8135818111156104b8576104b8610597565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104fe576104fe610597565b8160405282815288602084870101111561051757600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b6000821982111561054c5761054c610568565b500190565b60008282101561056357610563610568565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var BlockhashStoreABI = BlockhashStoreMetaData.ABI + +var BlockhashStoreBin = BlockhashStoreMetaData.Bin + +func DeployBlockhashStore(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *BlockhashStore, error) { + parsed, err := BlockhashStoreMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BlockhashStoreBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BlockhashStore{address: address, abi: *parsed, BlockhashStoreCaller: BlockhashStoreCaller{contract: contract}, BlockhashStoreTransactor: BlockhashStoreTransactor{contract: contract}, BlockhashStoreFilterer: BlockhashStoreFilterer{contract: contract}}, nil +} + +type BlockhashStore struct { + address common.Address + abi abi.ABI + BlockhashStoreCaller + BlockhashStoreTransactor + BlockhashStoreFilterer +} + +type BlockhashStoreCaller struct { + contract *bind.BoundContract +} + +type BlockhashStoreTransactor struct { + contract *bind.BoundContract +} + +type BlockhashStoreFilterer struct { + contract *bind.BoundContract +} + +type BlockhashStoreSession struct { + Contract *BlockhashStore + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BlockhashStoreCallerSession struct { + Contract *BlockhashStoreCaller + CallOpts bind.CallOpts +} + +type BlockhashStoreTransactorSession struct { + Contract *BlockhashStoreTransactor + TransactOpts bind.TransactOpts +} + +type BlockhashStoreRaw struct { + Contract *BlockhashStore +} + +type BlockhashStoreCallerRaw struct { + Contract *BlockhashStoreCaller +} + +type BlockhashStoreTransactorRaw struct { + Contract *BlockhashStoreTransactor +} + +func NewBlockhashStore(address common.Address, backend bind.ContractBackend) (*BlockhashStore, error) { + abi, err := abi.JSON(strings.NewReader(BlockhashStoreABI)) + if err != nil { + return nil, err + } + contract, err := bindBlockhashStore(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BlockhashStore{address: address, abi: abi, BlockhashStoreCaller: BlockhashStoreCaller{contract: contract}, BlockhashStoreTransactor: BlockhashStoreTransactor{contract: contract}, BlockhashStoreFilterer: BlockhashStoreFilterer{contract: contract}}, nil +} + +func NewBlockhashStoreCaller(address common.Address, caller bind.ContractCaller) (*BlockhashStoreCaller, error) { + contract, err := bindBlockhashStore(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BlockhashStoreCaller{contract: contract}, nil +} + +func NewBlockhashStoreTransactor(address common.Address, transactor bind.ContractTransactor) (*BlockhashStoreTransactor, error) { + contract, err := bindBlockhashStore(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BlockhashStoreTransactor{contract: contract}, nil +} + +func NewBlockhashStoreFilterer(address common.Address, filterer bind.ContractFilterer) (*BlockhashStoreFilterer, error) { + contract, err := bindBlockhashStore(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BlockhashStoreFilterer{contract: contract}, nil +} + +func bindBlockhashStore(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BlockhashStoreMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BlockhashStore *BlockhashStoreRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BlockhashStore.Contract.BlockhashStoreCaller.contract.Call(opts, result, method, params...) +} + +func (_BlockhashStore *BlockhashStoreRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BlockhashStore.Contract.BlockhashStoreTransactor.contract.Transfer(opts) +} + +func (_BlockhashStore *BlockhashStoreRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BlockhashStore.Contract.BlockhashStoreTransactor.contract.Transact(opts, method, params...) +} + +func (_BlockhashStore *BlockhashStoreCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BlockhashStore.Contract.contract.Call(opts, result, method, params...) +} + +func (_BlockhashStore *BlockhashStoreTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BlockhashStore.Contract.contract.Transfer(opts) +} + +func (_BlockhashStore *BlockhashStoreTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BlockhashStore.Contract.contract.Transact(opts, method, params...) +} + +func (_BlockhashStore *BlockhashStoreCaller) GetBlockhash(opts *bind.CallOpts, n *big.Int) ([32]byte, error) { + var out []interface{} + err := _BlockhashStore.contract.Call(opts, &out, "getBlockhash", n) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_BlockhashStore *BlockhashStoreSession) GetBlockhash(n *big.Int) ([32]byte, error) { + return _BlockhashStore.Contract.GetBlockhash(&_BlockhashStore.CallOpts, n) +} + +func (_BlockhashStore *BlockhashStoreCallerSession) GetBlockhash(n *big.Int) ([32]byte, error) { + return _BlockhashStore.Contract.GetBlockhash(&_BlockhashStore.CallOpts, n) +} + +func (_BlockhashStore *BlockhashStoreTransactor) Store(opts *bind.TransactOpts, n *big.Int) (*types.Transaction, error) { + return _BlockhashStore.contract.Transact(opts, "store", n) +} + +func (_BlockhashStore *BlockhashStoreSession) Store(n *big.Int) (*types.Transaction, error) { + return _BlockhashStore.Contract.Store(&_BlockhashStore.TransactOpts, n) +} + +func (_BlockhashStore *BlockhashStoreTransactorSession) Store(n *big.Int) (*types.Transaction, error) { + return _BlockhashStore.Contract.Store(&_BlockhashStore.TransactOpts, n) +} + +func (_BlockhashStore *BlockhashStoreTransactor) StoreEarliest(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BlockhashStore.contract.Transact(opts, "storeEarliest") +} + +func (_BlockhashStore *BlockhashStoreSession) StoreEarliest() (*types.Transaction, error) { + return _BlockhashStore.Contract.StoreEarliest(&_BlockhashStore.TransactOpts) +} + +func (_BlockhashStore *BlockhashStoreTransactorSession) StoreEarliest() (*types.Transaction, error) { + return _BlockhashStore.Contract.StoreEarliest(&_BlockhashStore.TransactOpts) +} + +func (_BlockhashStore *BlockhashStoreTransactor) StoreVerifyHeader(opts *bind.TransactOpts, n *big.Int, header []byte) (*types.Transaction, error) { + return _BlockhashStore.contract.Transact(opts, "storeVerifyHeader", n, header) +} + +func (_BlockhashStore *BlockhashStoreSession) StoreVerifyHeader(n *big.Int, header []byte) (*types.Transaction, error) { + return _BlockhashStore.Contract.StoreVerifyHeader(&_BlockhashStore.TransactOpts, n, header) +} + +func (_BlockhashStore *BlockhashStoreTransactorSession) StoreVerifyHeader(n *big.Int, header []byte) (*types.Transaction, error) { + return _BlockhashStore.Contract.StoreVerifyHeader(&_BlockhashStore.TransactOpts, n, header) +} + +func (_BlockhashStore *BlockhashStore) Address() common.Address { + return _BlockhashStore.address +} + +type BlockhashStoreInterface interface { + GetBlockhash(opts *bind.CallOpts, n *big.Int) ([32]byte, error) + + Store(opts *bind.TransactOpts, n *big.Int) (*types.Transaction, error) + + StoreEarliest(opts *bind.TransactOpts) (*types.Transaction, error) + + StoreVerifyHeader(opts *bind.TransactOpts, n *big.Int, header []byte) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/chain_reader_example/chain_reader_example.go b/core/gethwrappers/generated/chain_reader_example/chain_reader_example.go new file mode 100644 index 00000000..0d73686c --- /dev/null +++ b/core/gethwrappers/generated/chain_reader_example/chain_reader_example.go @@ -0,0 +1,830 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package chain_reader_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type InnerTestStruct struct { + IntVal int64 + S string +} + +type MidLevelTestStruct struct { + FixedBytes [2]byte + Inner InnerTestStruct +} + +type TestStruct struct { + Field int32 + DifferentField string + OracleId uint8 + OracleIds [32]uint8 + Account common.Address + Accounts []common.Address + BigField *big.Int + NestedStruct MidLevelTestStruct +} + +var LatestValueHolderMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"indexed\":false,\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"Triggered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"fieldHash\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"TriggeredEventWithDynamicTopic\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"TriggeredWithFourTopics\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"addTestStruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDifferentPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"i\",\"type\":\"uint256\"}],\"name\":\"getElementAtIndex\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSliceValue\",\"outputs\":[{\"internalType\":\"uint64[]\",\"name\":\"\",\"type\":\"uint64[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"returnSeen\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"triggerEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"triggerEventWithDynamicTopic\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"triggerWithFourTopics\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50600180548082018255600082905260048082047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6908101805460086003958616810261010090810a8088026001600160401b0391820219909416939093179093558654808801909755848704909301805496909516909202900a91820291021990921691909117905561176c806100a96000396000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80637f002d6711610076578063dbfd73321161005b578063dbfd73321461013e578063ef4e1ced14610151578063f6f871c81461015857600080fd5b80637f002d671461010e578063ab5e0b381461012157600080fd5b80632c45576f146100a85780633272b66c146100d157806349eac2ac146100e6578063679004a4146100f9575b600080fd5b6100bb6100b6366004610baa565b61016b565b6040516100c89190610d09565b60405180910390f35b6100e46100df366004610e48565b610446565b005b6100e46100f4366004610f5d565b61049b565b61010161079e565b6040516100c8919061104f565b6100e461011c366004610f5d565b61082a565b6107c65b60405167ffffffffffffffff90911681526020016100c8565b6100e461014c36600461109d565b610881565b6003610125565b6100bb610166366004610f5d565b6108be565b6101736109c7565b60006101806001846110e0565b815481106101905761019061111a565b6000918252602091829020604080516101008101909152600a90920201805460030b825260018101805492939192918401916101cb90611149565b80601f01602080910402602001604051908101604052809291908181526020018280546101f790611149565b80156102445780601f1061021957610100808354040283529160200191610244565b820191906000526020600020905b81548152906001019060200180831161022757829003601f168201915b5050509183525050600282015460ff166020808301919091526040805161040081018083529190930192916003850191826000855b825461010083900a900460ff1681526020600192830181810494850194909303909202910180841161027957505050928452505050600482015473ffffffffffffffffffffffffffffffffffffffff16602080830191909152600583018054604080518285028101850182528281529401939283018282801561033257602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610307575b5050509183525050600682015460170b6020808301919091526040805180820182526007808601805460f01b7fffff0000000000000000000000000000000000000000000000000000000000001683528351808501855260088801805490930b815260098801805495909701969395919486830194919392840191906103b790611149565b80601f01602080910402602001604051908101604052809291908181526020018280546103e390611149565b80156104305780601f1061040557610100808354040283529160200191610430565b820191906000526020600020905b81548152906001019060200180831161041357829003601f168201915b5050509190925250505090525090525092915050565b8181604051610456929190611196565b60405180910390207f3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c67838360405161048f9291906111ef565b60405180910390a25050565b60006040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b602082015260400161058d846112ec565b905281546001808201845560009384526020938490208351600a9093020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff9093169290921782559282015191929091908201906105f39082611446565b5060408201516002820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff90921691909117905560608201516106419060038301906020610a16565b5060808201516004820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905560a082015180516106a8916005840191602090910190610aa9565b5060c08201516006820180547fffffffffffffffff0000000000000000000000000000000000000000000000001677ffffffffffffffffffffffffffffffffffffffffffffffff90921691909117905560e082015180516007830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660f09290921c91909117815560208083015180516008860180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff90921691909117815591810151909190600986019061078b9082611446565b5050505050505050505050505050505050565b6060600180548060200260200160405190810160405280929190818152602001828054801561082057602002820191906000526020600020906000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff16815260200190600801906020826007010492830192600103820291508084116107db5790505b5050505050905090565b8960030b7f7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d8a8a8a8a8a8a8a8a8a60405161086d999897969594939291906116a5565b60405180910390a250505050505050505050565b8060030b8260030b8460030b7f91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac560405160405180910390a4505050565b6108c66109c7565b6040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b60208201526040016109b6846112ec565b90529b9a5050505050505050505050565b60408051610100810182526000808252606060208301819052928201529081016109ef610b23565b8152600060208201819052606060408301819052820152608001610a11610b42565b905290565b600183019183908215610a995791602002820160005b83821115610a6a57835183826101000a81548160ff021916908360ff1602179055509260200192600101602081600001049283019260010302610a2c565b8015610a975782816101000a81549060ff0219169055600101602081600001049283019260010302610a6a565b505b50610aa5929150610b95565b5090565b828054828255906000526020600020908101928215610a99579160200282015b82811115610a9957825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610ac9565b6040518061040001604052806020906020820280368337509192915050565b604051806040016040528060007dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001610a116040518060400160405280600060070b8152602001606081525090565b5b80821115610aa55760008155600101610b96565b600060208284031215610bbc57600080fd5b5035919050565b6000815180845260005b81811015610be957602081850181015186830182015201610bcd565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8060005b6020808210610c3a5750610c51565b825160ff1685529384019390910190600101610c2b565b50505050565b600081518084526020808501945080840160005b83811015610c9d57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610c6b565b509495945050505050565b7fffff00000000000000000000000000000000000000000000000000000000000081511682526000602082015160406020850152805160070b60408501526020810151905060406060850152610d016080850182610bc3565b949350505050565b60208152610d1d60208201835160030b9052565b600060208301516104e0806040850152610d3b610500850183610bc3565b91506040850151610d51606086018260ff169052565b506060850151610d646080860182610c27565b50608085015173ffffffffffffffffffffffffffffffffffffffff1661048085015260a08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe085840381016104a0870152610dc18483610c57565b935060c08701519150610dda6104c087018360170b9052565b60e0870151915080868503018387015250610df58382610ca8565b9695505050505050565b60008083601f840112610e1157600080fd5b50813567ffffffffffffffff811115610e2957600080fd5b602083019150836020828501011115610e4157600080fd5b9250929050565b60008060208385031215610e5b57600080fd5b823567ffffffffffffffff811115610e7257600080fd5b610e7e85828601610dff565b90969095509350505050565b8035600381900b8114610e9c57600080fd5b919050565b803560ff81168114610e9c57600080fd5b806104008101831015610ec457600080fd5b92915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610e9c57600080fd5b60008083601f840112610f0057600080fd5b50813567ffffffffffffffff811115610f1857600080fd5b6020830191508360208260051b8501011115610e4157600080fd5b8035601781900b8114610e9c57600080fd5b600060408284031215610f5757600080fd5b50919050565b6000806000806000806000806000806104e08b8d031215610f7d57600080fd5b610f868b610e8a565b995060208b013567ffffffffffffffff80821115610fa357600080fd5b610faf8e838f01610dff565b909b509950899150610fc360408e01610ea1565b9850610fd28e60608f01610eb2565b9750610fe16104608e01610eca565b96506104808d0135915080821115610ff857600080fd5b6110048e838f01610eee565b90965094508491506110196104a08e01610f33565b93506104c08d013591508082111561103057600080fd5b5061103d8d828e01610f45565b9150509295989b9194979a5092959850565b6020808252825182820181905260009190848201906040850190845b8181101561109157835167ffffffffffffffff168352928401929184019160010161106b565b50909695505050505050565b6000806000606084860312156110b257600080fd5b6110bb84610e8a565b92506110c960208501610e8a565b91506110d760408501610e8a565b90509250925092565b81810381811115610ec4577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600181811c9082168061115d57607f821691505b602082108103610f57577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000610d016020830184866111a6565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff8111828210171561125557611255611203565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156112a2576112a2611203565b604052919050565b80357fffff00000000000000000000000000000000000000000000000000000000000081168114610e9c57600080fd5b8035600781900b8114610e9c57600080fd5b6000604082360312156112fe57600080fd5b611306611232565b61130f836112aa565b815260208084013567ffffffffffffffff8082111561132d57600080fd5b81860191506040823603121561134257600080fd5b61134a611232565b611353836112da565b8152838301358281111561136657600080fd5b929092019136601f84011261137a57600080fd5b82358281111561138c5761138c611203565b6113bc857fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161125b565b925080835236858286010111156113d257600080fd5b8085850186850137600090830185015280840191909152918301919091525092915050565b601f82111561144157600081815260208120601f850160051c8101602086101561141e5750805b601f850160051c820191505b8181101561143d5782815560010161142a565b5050505b505050565b815167ffffffffffffffff81111561146057611460611203565b6114748161146e8454611149565b846113f7565b602080601f8311600181146114c757600084156114915750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b17855561143d565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015611514578886015182559484019460019091019084016114f5565b508582101561155057878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8183526000602080850194508260005b85811015610c9d5773ffffffffffffffffffffffffffffffffffffffff61159683610eca565b1687529582019590820190600101611570565b7fffff0000000000000000000000000000000000000000000000000000000000006115d3826112aa565b168252600060208201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc183360301811261160d57600080fd5b60406020850152820161161f816112da565b60070b604085015260208101357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe182360301811261165c57600080fd5b0160208101903567ffffffffffffffff81111561167857600080fd5b80360382131561168757600080fd5b6040606086015261169c6080860182846111a6565b95945050505050565b60006104c08083526116ba8184018c8e6111a6565b9050602060ff808c1682860152604085018b60005b848110156116f457836116e183610ea1565b16835291840191908401906001016116cf565b505050505073ffffffffffffffffffffffffffffffffffffffff881661044084015282810361046084015261172a818789611560565b905061173c61048084018660170b9052565b8281036104a084015261174f81856115a9565b9c9b50505050505050505050505056fea164736f6c6343000813000a", +} + +var LatestValueHolderABI = LatestValueHolderMetaData.ABI + +var LatestValueHolderBin = LatestValueHolderMetaData.Bin + +func DeployLatestValueHolder(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LatestValueHolder, error) { + parsed, err := LatestValueHolderMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LatestValueHolderBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LatestValueHolder{address: address, abi: *parsed, LatestValueHolderCaller: LatestValueHolderCaller{contract: contract}, LatestValueHolderTransactor: LatestValueHolderTransactor{contract: contract}, LatestValueHolderFilterer: LatestValueHolderFilterer{contract: contract}}, nil +} + +type LatestValueHolder struct { + address common.Address + abi abi.ABI + LatestValueHolderCaller + LatestValueHolderTransactor + LatestValueHolderFilterer +} + +type LatestValueHolderCaller struct { + contract *bind.BoundContract +} + +type LatestValueHolderTransactor struct { + contract *bind.BoundContract +} + +type LatestValueHolderFilterer struct { + contract *bind.BoundContract +} + +type LatestValueHolderSession struct { + Contract *LatestValueHolder + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LatestValueHolderCallerSession struct { + Contract *LatestValueHolderCaller + CallOpts bind.CallOpts +} + +type LatestValueHolderTransactorSession struct { + Contract *LatestValueHolderTransactor + TransactOpts bind.TransactOpts +} + +type LatestValueHolderRaw struct { + Contract *LatestValueHolder +} + +type LatestValueHolderCallerRaw struct { + Contract *LatestValueHolderCaller +} + +type LatestValueHolderTransactorRaw struct { + Contract *LatestValueHolderTransactor +} + +func NewLatestValueHolder(address common.Address, backend bind.ContractBackend) (*LatestValueHolder, error) { + abi, err := abi.JSON(strings.NewReader(LatestValueHolderABI)) + if err != nil { + return nil, err + } + contract, err := bindLatestValueHolder(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LatestValueHolder{address: address, abi: abi, LatestValueHolderCaller: LatestValueHolderCaller{contract: contract}, LatestValueHolderTransactor: LatestValueHolderTransactor{contract: contract}, LatestValueHolderFilterer: LatestValueHolderFilterer{contract: contract}}, nil +} + +func NewLatestValueHolderCaller(address common.Address, caller bind.ContractCaller) (*LatestValueHolderCaller, error) { + contract, err := bindLatestValueHolder(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LatestValueHolderCaller{contract: contract}, nil +} + +func NewLatestValueHolderTransactor(address common.Address, transactor bind.ContractTransactor) (*LatestValueHolderTransactor, error) { + contract, err := bindLatestValueHolder(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LatestValueHolderTransactor{contract: contract}, nil +} + +func NewLatestValueHolderFilterer(address common.Address, filterer bind.ContractFilterer) (*LatestValueHolderFilterer, error) { + contract, err := bindLatestValueHolder(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LatestValueHolderFilterer{contract: contract}, nil +} + +func bindLatestValueHolder(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LatestValueHolderMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LatestValueHolder *LatestValueHolderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LatestValueHolder.Contract.LatestValueHolderCaller.contract.Call(opts, result, method, params...) +} + +func (_LatestValueHolder *LatestValueHolderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LatestValueHolder.Contract.LatestValueHolderTransactor.contract.Transfer(opts) +} + +func (_LatestValueHolder *LatestValueHolderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LatestValueHolder.Contract.LatestValueHolderTransactor.contract.Transact(opts, method, params...) +} + +func (_LatestValueHolder *LatestValueHolderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LatestValueHolder.Contract.contract.Call(opts, result, method, params...) +} + +func (_LatestValueHolder *LatestValueHolderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LatestValueHolder.Contract.contract.Transfer(opts) +} + +func (_LatestValueHolder *LatestValueHolderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LatestValueHolder.Contract.contract.Transact(opts, method, params...) +} + +func (_LatestValueHolder *LatestValueHolderCaller) GetDifferentPrimitiveValue(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _LatestValueHolder.contract.Call(opts, &out, "getDifferentPrimitiveValue") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_LatestValueHolder *LatestValueHolderSession) GetDifferentPrimitiveValue() (uint64, error) { + return _LatestValueHolder.Contract.GetDifferentPrimitiveValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCallerSession) GetDifferentPrimitiveValue() (uint64, error) { + return _LatestValueHolder.Contract.GetDifferentPrimitiveValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCaller) GetElementAtIndex(opts *bind.CallOpts, i *big.Int) (TestStruct, error) { + var out []interface{} + err := _LatestValueHolder.contract.Call(opts, &out, "getElementAtIndex", i) + + if err != nil { + return *new(TestStruct), err + } + + out0 := *abi.ConvertType(out[0], new(TestStruct)).(*TestStruct) + + return out0, err + +} + +func (_LatestValueHolder *LatestValueHolderSession) GetElementAtIndex(i *big.Int) (TestStruct, error) { + return _LatestValueHolder.Contract.GetElementAtIndex(&_LatestValueHolder.CallOpts, i) +} + +func (_LatestValueHolder *LatestValueHolderCallerSession) GetElementAtIndex(i *big.Int) (TestStruct, error) { + return _LatestValueHolder.Contract.GetElementAtIndex(&_LatestValueHolder.CallOpts, i) +} + +func (_LatestValueHolder *LatestValueHolderCaller) GetPrimitiveValue(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _LatestValueHolder.contract.Call(opts, &out, "getPrimitiveValue") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_LatestValueHolder *LatestValueHolderSession) GetPrimitiveValue() (uint64, error) { + return _LatestValueHolder.Contract.GetPrimitiveValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCallerSession) GetPrimitiveValue() (uint64, error) { + return _LatestValueHolder.Contract.GetPrimitiveValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCaller) GetSliceValue(opts *bind.CallOpts) ([]uint64, error) { + var out []interface{} + err := _LatestValueHolder.contract.Call(opts, &out, "getSliceValue") + + if err != nil { + return *new([]uint64), err + } + + out0 := *abi.ConvertType(out[0], new([]uint64)).(*[]uint64) + + return out0, err + +} + +func (_LatestValueHolder *LatestValueHolderSession) GetSliceValue() ([]uint64, error) { + return _LatestValueHolder.Contract.GetSliceValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCallerSession) GetSliceValue() ([]uint64, error) { + return _LatestValueHolder.Contract.GetSliceValue(&_LatestValueHolder.CallOpts) +} + +func (_LatestValueHolder *LatestValueHolderCaller) ReturnSeen(opts *bind.CallOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (TestStruct, error) { + var out []interface{} + err := _LatestValueHolder.contract.Call(opts, &out, "returnSeen", field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) + + if err != nil { + return *new(TestStruct), err + } + + out0 := *abi.ConvertType(out[0], new(TestStruct)).(*TestStruct) + + return out0, err + +} + +func (_LatestValueHolder *LatestValueHolderSession) ReturnSeen(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (TestStruct, error) { + return _LatestValueHolder.Contract.ReturnSeen(&_LatestValueHolder.CallOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderCallerSession) ReturnSeen(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (TestStruct, error) { + return _LatestValueHolder.Contract.ReturnSeen(&_LatestValueHolder.CallOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderTransactor) AddTestStruct(opts *bind.TransactOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.contract.Transact(opts, "addTestStruct", field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderSession) AddTestStruct(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.Contract.AddTestStruct(&_LatestValueHolder.TransactOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderTransactorSession) AddTestStruct(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.Contract.AddTestStruct(&_LatestValueHolder.TransactOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderTransactor) TriggerEvent(opts *bind.TransactOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.contract.Transact(opts, "triggerEvent", field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderSession) TriggerEvent(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerEvent(&_LatestValueHolder.TransactOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderTransactorSession) TriggerEvent(field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerEvent(&_LatestValueHolder.TransactOpts, field, differentField, oracleId, oracleIds, account, accounts, bigField, nestedStruct) +} + +func (_LatestValueHolder *LatestValueHolderTransactor) TriggerEventWithDynamicTopic(opts *bind.TransactOpts, field string) (*types.Transaction, error) { + return _LatestValueHolder.contract.Transact(opts, "triggerEventWithDynamicTopic", field) +} + +func (_LatestValueHolder *LatestValueHolderSession) TriggerEventWithDynamicTopic(field string) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerEventWithDynamicTopic(&_LatestValueHolder.TransactOpts, field) +} + +func (_LatestValueHolder *LatestValueHolderTransactorSession) TriggerEventWithDynamicTopic(field string) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerEventWithDynamicTopic(&_LatestValueHolder.TransactOpts, field) +} + +func (_LatestValueHolder *LatestValueHolderTransactor) TriggerWithFourTopics(opts *bind.TransactOpts, field1 int32, field2 int32, field3 int32) (*types.Transaction, error) { + return _LatestValueHolder.contract.Transact(opts, "triggerWithFourTopics", field1, field2, field3) +} + +func (_LatestValueHolder *LatestValueHolderSession) TriggerWithFourTopics(field1 int32, field2 int32, field3 int32) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerWithFourTopics(&_LatestValueHolder.TransactOpts, field1, field2, field3) +} + +func (_LatestValueHolder *LatestValueHolderTransactorSession) TriggerWithFourTopics(field1 int32, field2 int32, field3 int32) (*types.Transaction, error) { + return _LatestValueHolder.Contract.TriggerWithFourTopics(&_LatestValueHolder.TransactOpts, field1, field2, field3) +} + +type LatestValueHolderTriggeredIterator struct { + Event *LatestValueHolderTriggered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LatestValueHolderTriggeredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LatestValueHolderTriggeredIterator) Error() error { + return it.fail +} + +func (it *LatestValueHolderTriggeredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LatestValueHolderTriggered struct { + Field int32 + DifferentField string + OracleId uint8 + OracleIds [32]uint8 + Account common.Address + Accounts []common.Address + BigField *big.Int + NestedStruct MidLevelTestStruct + Raw types.Log +} + +func (_LatestValueHolder *LatestValueHolderFilterer) FilterTriggered(opts *bind.FilterOpts, field []int32) (*LatestValueHolderTriggeredIterator, error) { + + var fieldRule []interface{} + for _, fieldItem := range field { + fieldRule = append(fieldRule, fieldItem) + } + + logs, sub, err := _LatestValueHolder.contract.FilterLogs(opts, "Triggered", fieldRule) + if err != nil { + return nil, err + } + return &LatestValueHolderTriggeredIterator{contract: _LatestValueHolder.contract, event: "Triggered", logs: logs, sub: sub}, nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) WatchTriggered(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggered, field []int32) (event.Subscription, error) { + + var fieldRule []interface{} + for _, fieldItem := range field { + fieldRule = append(fieldRule, fieldItem) + } + + logs, sub, err := _LatestValueHolder.contract.WatchLogs(opts, "Triggered", fieldRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LatestValueHolderTriggered) + if err := _LatestValueHolder.contract.UnpackLog(event, "Triggered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) ParseTriggered(log types.Log) (*LatestValueHolderTriggered, error) { + event := new(LatestValueHolderTriggered) + if err := _LatestValueHolder.contract.UnpackLog(event, "Triggered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestValueHolderTriggeredEventWithDynamicTopicIterator struct { + Event *LatestValueHolderTriggeredEventWithDynamicTopic + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LatestValueHolderTriggeredEventWithDynamicTopicIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggeredEventWithDynamicTopic) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggeredEventWithDynamicTopic) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LatestValueHolderTriggeredEventWithDynamicTopicIterator) Error() error { + return it.fail +} + +func (it *LatestValueHolderTriggeredEventWithDynamicTopicIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LatestValueHolderTriggeredEventWithDynamicTopic struct { + FieldHash common.Hash + Field string + Raw types.Log +} + +func (_LatestValueHolder *LatestValueHolderFilterer) FilterTriggeredEventWithDynamicTopic(opts *bind.FilterOpts, fieldHash []string) (*LatestValueHolderTriggeredEventWithDynamicTopicIterator, error) { + + var fieldHashRule []interface{} + for _, fieldHashItem := range fieldHash { + fieldHashRule = append(fieldHashRule, fieldHashItem) + } + + logs, sub, err := _LatestValueHolder.contract.FilterLogs(opts, "TriggeredEventWithDynamicTopic", fieldHashRule) + if err != nil { + return nil, err + } + return &LatestValueHolderTriggeredEventWithDynamicTopicIterator{contract: _LatestValueHolder.contract, event: "TriggeredEventWithDynamicTopic", logs: logs, sub: sub}, nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) WatchTriggeredEventWithDynamicTopic(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggeredEventWithDynamicTopic, fieldHash []string) (event.Subscription, error) { + + var fieldHashRule []interface{} + for _, fieldHashItem := range fieldHash { + fieldHashRule = append(fieldHashRule, fieldHashItem) + } + + logs, sub, err := _LatestValueHolder.contract.WatchLogs(opts, "TriggeredEventWithDynamicTopic", fieldHashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LatestValueHolderTriggeredEventWithDynamicTopic) + if err := _LatestValueHolder.contract.UnpackLog(event, "TriggeredEventWithDynamicTopic", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) ParseTriggeredEventWithDynamicTopic(log types.Log) (*LatestValueHolderTriggeredEventWithDynamicTopic, error) { + event := new(LatestValueHolderTriggeredEventWithDynamicTopic) + if err := _LatestValueHolder.contract.UnpackLog(event, "TriggeredEventWithDynamicTopic", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestValueHolderTriggeredWithFourTopicsIterator struct { + Event *LatestValueHolderTriggeredWithFourTopics + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LatestValueHolderTriggeredWithFourTopicsIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggeredWithFourTopics) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LatestValueHolderTriggeredWithFourTopics) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LatestValueHolderTriggeredWithFourTopicsIterator) Error() error { + return it.fail +} + +func (it *LatestValueHolderTriggeredWithFourTopicsIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LatestValueHolderTriggeredWithFourTopics struct { + Field1 int32 + Field2 int32 + Field3 int32 + Raw types.Log +} + +func (_LatestValueHolder *LatestValueHolderFilterer) FilterTriggeredWithFourTopics(opts *bind.FilterOpts, field1 []int32, field2 []int32, field3 []int32) (*LatestValueHolderTriggeredWithFourTopicsIterator, error) { + + var field1Rule []interface{} + for _, field1Item := range field1 { + field1Rule = append(field1Rule, field1Item) + } + var field2Rule []interface{} + for _, field2Item := range field2 { + field2Rule = append(field2Rule, field2Item) + } + var field3Rule []interface{} + for _, field3Item := range field3 { + field3Rule = append(field3Rule, field3Item) + } + + logs, sub, err := _LatestValueHolder.contract.FilterLogs(opts, "TriggeredWithFourTopics", field1Rule, field2Rule, field3Rule) + if err != nil { + return nil, err + } + return &LatestValueHolderTriggeredWithFourTopicsIterator{contract: _LatestValueHolder.contract, event: "TriggeredWithFourTopics", logs: logs, sub: sub}, nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) WatchTriggeredWithFourTopics(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggeredWithFourTopics, field1 []int32, field2 []int32, field3 []int32) (event.Subscription, error) { + + var field1Rule []interface{} + for _, field1Item := range field1 { + field1Rule = append(field1Rule, field1Item) + } + var field2Rule []interface{} + for _, field2Item := range field2 { + field2Rule = append(field2Rule, field2Item) + } + var field3Rule []interface{} + for _, field3Item := range field3 { + field3Rule = append(field3Rule, field3Item) + } + + logs, sub, err := _LatestValueHolder.contract.WatchLogs(opts, "TriggeredWithFourTopics", field1Rule, field2Rule, field3Rule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LatestValueHolderTriggeredWithFourTopics) + if err := _LatestValueHolder.contract.UnpackLog(event, "TriggeredWithFourTopics", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LatestValueHolder *LatestValueHolderFilterer) ParseTriggeredWithFourTopics(log types.Log) (*LatestValueHolderTriggeredWithFourTopics, error) { + event := new(LatestValueHolderTriggeredWithFourTopics) + if err := _LatestValueHolder.contract.UnpackLog(event, "TriggeredWithFourTopics", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LatestValueHolder *LatestValueHolder) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LatestValueHolder.abi.Events["Triggered"].ID: + return _LatestValueHolder.ParseTriggered(log) + case _LatestValueHolder.abi.Events["TriggeredEventWithDynamicTopic"].ID: + return _LatestValueHolder.ParseTriggeredEventWithDynamicTopic(log) + case _LatestValueHolder.abi.Events["TriggeredWithFourTopics"].ID: + return _LatestValueHolder.ParseTriggeredWithFourTopics(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LatestValueHolderTriggered) Topic() common.Hash { + return common.HexToHash("0x7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d") +} + +func (LatestValueHolderTriggeredEventWithDynamicTopic) Topic() common.Hash { + return common.HexToHash("0x3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c67") +} + +func (LatestValueHolderTriggeredWithFourTopics) Topic() common.Hash { + return common.HexToHash("0x91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac5") +} + +func (_LatestValueHolder *LatestValueHolder) Address() common.Address { + return _LatestValueHolder.address +} + +type LatestValueHolderInterface interface { + GetDifferentPrimitiveValue(opts *bind.CallOpts) (uint64, error) + + GetElementAtIndex(opts *bind.CallOpts, i *big.Int) (TestStruct, error) + + GetPrimitiveValue(opts *bind.CallOpts) (uint64, error) + + GetSliceValue(opts *bind.CallOpts) ([]uint64, error) + + ReturnSeen(opts *bind.CallOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (TestStruct, error) + + AddTestStruct(opts *bind.TransactOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) + + TriggerEvent(opts *bind.TransactOpts, field int32, differentField string, oracleId uint8, oracleIds [32]uint8, account common.Address, accounts []common.Address, bigField *big.Int, nestedStruct MidLevelTestStruct) (*types.Transaction, error) + + TriggerEventWithDynamicTopic(opts *bind.TransactOpts, field string) (*types.Transaction, error) + + TriggerWithFourTopics(opts *bind.TransactOpts, field1 int32, field2 int32, field3 int32) (*types.Transaction, error) + + FilterTriggered(opts *bind.FilterOpts, field []int32) (*LatestValueHolderTriggeredIterator, error) + + WatchTriggered(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggered, field []int32) (event.Subscription, error) + + ParseTriggered(log types.Log) (*LatestValueHolderTriggered, error) + + FilterTriggeredEventWithDynamicTopic(opts *bind.FilterOpts, fieldHash []string) (*LatestValueHolderTriggeredEventWithDynamicTopicIterator, error) + + WatchTriggeredEventWithDynamicTopic(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggeredEventWithDynamicTopic, fieldHash []string) (event.Subscription, error) + + ParseTriggeredEventWithDynamicTopic(log types.Log) (*LatestValueHolderTriggeredEventWithDynamicTopic, error) + + FilterTriggeredWithFourTopics(opts *bind.FilterOpts, field1 []int32, field2 []int32, field3 []int32) (*LatestValueHolderTriggeredWithFourTopicsIterator, error) + + WatchTriggeredWithFourTopics(opts *bind.WatchOpts, sink chan<- *LatestValueHolderTriggeredWithFourTopics, field1 []int32, field2 []int32, field3 []int32) (event.Subscription, error) + + ParseTriggeredWithFourTopics(log types.Log) (*LatestValueHolderTriggeredWithFourTopics, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/chain_specific_util_helper/chain_specific_util_helper.go b/core/gethwrappers/generated/chain_specific_util_helper/chain_specific_util_helper.go new file mode 100644 index 00000000..b32c3d18 --- /dev/null +++ b/core/gethwrappers/generated/chain_specific_util_helper/chain_specific_util_helper.go @@ -0,0 +1,274 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package chain_specific_util_helper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var ChainSpecificUtilHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"getBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"}],\"name\":\"getBlockhash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"txCallData\",\"type\":\"string\"}],\"name\":\"getCurrentTxL1GasFees\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"calldataSize\",\"type\":\"uint256\"}],\"name\":\"getL1CalldataGasCost\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610c1a806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c806342cbb15c1461005157806397e329a91461006b578063b778b1121461007e578063da9027ef14610091575b600080fd5b6100596100a4565b60405190815260200160405180910390f35b6100596100793660046108bf565b6100b3565b61005961008c36600461085c565b6100c4565b61005961009f36600461078d565b6100cf565b60006100ae6100da565b905090565b60006100be82610177565b92915050565b60006100be8261027d565b60006100be82610355565b6000466100e681610441565b1561017057606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561013257600080fd5b505afa158015610146573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061016a9190610774565b91505090565b4391505090565b60004661018381610441565b1561026d576101008367ffffffffffffffff1661019e6100da565b6101a89190610b20565b11806101c557506101b76100da565b8367ffffffffffffffff1610155b156101d35750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a82906024015b60206040518083038186803b15801561022e57600080fd5b505afa158015610242573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102669190610774565b9392505050565b505067ffffffffffffffff164090565b60004661028981610441565b15610335576000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c06040518083038186803b1580156102d757600080fd5b505afa1580156102eb573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061030f9190610875565b5050505091505083608c6103239190610969565b61032d9082610ae3565b949350505050565b61033e81610464565b1561034c576102668361049e565b50600092915050565b60004661036181610441565b156103ad57606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b15801561022e57600080fd5b6103b681610464565b1561034c5773420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff166349948e0e84604051806080016040528060488152602001610bc6604891396040516020016104169291906108e9565b6040516020818303038152906040526040518263ffffffff1660e01b81526004016102169190610918565b600061a4b1821480610455575062066eed82145b806100be57505062066eee1490565b6000600a82148061047657506101a482145b80610483575062aa37dc82145b8061048f575061210582145b806100be57505062014a331490565b60008073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663519b4bd36040518163ffffffff1660e01b815260040160206040518083038186803b1580156104fb57600080fd5b505afa15801561050f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105339190610774565b90506000806105428186610b20565b90506000610551826010610ae3565b61055c846004610ae3565b6105669190610969565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff16630c18c1626040518163ffffffff1660e01b815260040160206040518083038186803b1580156105c457600080fd5b505afa1580156105d8573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105fc9190610774565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663f45e65d86040518163ffffffff1660e01b815260040160206040518083038186803b15801561065a57600080fd5b505afa15801561066e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106929190610774565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b1580156106f057600080fd5b505afa158015610704573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107289190610774565b9050600061073782600a610a1d565b9050600081846107478789610969565b610751908c610ae3565b61075b9190610ae3565b6107659190610981565b9b9a5050505050505050505050565b60006020828403121561078657600080fd5b5051919050565b60006020828403121561079f57600080fd5b813567ffffffffffffffff808211156107b757600080fd5b818401915084601f8301126107cb57600080fd5b8135818111156107dd576107dd610b96565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561082357610823610b96565b8160405282815287602084870101111561083c57600080fd5b826020860160208301376000928101602001929092525095945050505050565b60006020828403121561086e57600080fd5b5035919050565b60008060008060008060c0878903121561088e57600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b6000602082840312156108d157600080fd5b813567ffffffffffffffff8116811461026657600080fd5b600083516108fb818460208801610b37565b83519083019061090f818360208801610b37565b01949350505050565b6020815260008251806020840152610937816040850160208701610b37565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6000821982111561097c5761097c610b67565b500190565b6000826109b7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b600181815b80851115610a1557817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048211156109fb576109fb610b67565b80851615610a0857918102915b93841c93908002906109c1565b509250929050565b60006102668383600082610a33575060016100be565b81610a40575060006100be565b8160018114610a565760028114610a6057610a7c565b60019150506100be565b60ff841115610a7157610a71610b67565b50506001821b6100be565b5060208310610133831016604e8410600b8410161715610a9f575081810a6100be565b610aa983836109bc565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115610adb57610adb610b67565b029392505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615610b1b57610b1b610b67565b500290565b600082821015610b3257610b32610b67565b500390565b60005b83811015610b52578181015183820152602001610b3a565b83811115610b61576000848401525b50505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var ChainSpecificUtilHelperABI = ChainSpecificUtilHelperMetaData.ABI + +var ChainSpecificUtilHelperBin = ChainSpecificUtilHelperMetaData.Bin + +func DeployChainSpecificUtilHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ChainSpecificUtilHelper, error) { + parsed, err := ChainSpecificUtilHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ChainSpecificUtilHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ChainSpecificUtilHelper{address: address, abi: *parsed, ChainSpecificUtilHelperCaller: ChainSpecificUtilHelperCaller{contract: contract}, ChainSpecificUtilHelperTransactor: ChainSpecificUtilHelperTransactor{contract: contract}, ChainSpecificUtilHelperFilterer: ChainSpecificUtilHelperFilterer{contract: contract}}, nil +} + +type ChainSpecificUtilHelper struct { + address common.Address + abi abi.ABI + ChainSpecificUtilHelperCaller + ChainSpecificUtilHelperTransactor + ChainSpecificUtilHelperFilterer +} + +type ChainSpecificUtilHelperCaller struct { + contract *bind.BoundContract +} + +type ChainSpecificUtilHelperTransactor struct { + contract *bind.BoundContract +} + +type ChainSpecificUtilHelperFilterer struct { + contract *bind.BoundContract +} + +type ChainSpecificUtilHelperSession struct { + Contract *ChainSpecificUtilHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ChainSpecificUtilHelperCallerSession struct { + Contract *ChainSpecificUtilHelperCaller + CallOpts bind.CallOpts +} + +type ChainSpecificUtilHelperTransactorSession struct { + Contract *ChainSpecificUtilHelperTransactor + TransactOpts bind.TransactOpts +} + +type ChainSpecificUtilHelperRaw struct { + Contract *ChainSpecificUtilHelper +} + +type ChainSpecificUtilHelperCallerRaw struct { + Contract *ChainSpecificUtilHelperCaller +} + +type ChainSpecificUtilHelperTransactorRaw struct { + Contract *ChainSpecificUtilHelperTransactor +} + +func NewChainSpecificUtilHelper(address common.Address, backend bind.ContractBackend) (*ChainSpecificUtilHelper, error) { + abi, err := abi.JSON(strings.NewReader(ChainSpecificUtilHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindChainSpecificUtilHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ChainSpecificUtilHelper{address: address, abi: abi, ChainSpecificUtilHelperCaller: ChainSpecificUtilHelperCaller{contract: contract}, ChainSpecificUtilHelperTransactor: ChainSpecificUtilHelperTransactor{contract: contract}, ChainSpecificUtilHelperFilterer: ChainSpecificUtilHelperFilterer{contract: contract}}, nil +} + +func NewChainSpecificUtilHelperCaller(address common.Address, caller bind.ContractCaller) (*ChainSpecificUtilHelperCaller, error) { + contract, err := bindChainSpecificUtilHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ChainSpecificUtilHelperCaller{contract: contract}, nil +} + +func NewChainSpecificUtilHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*ChainSpecificUtilHelperTransactor, error) { + contract, err := bindChainSpecificUtilHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ChainSpecificUtilHelperTransactor{contract: contract}, nil +} + +func NewChainSpecificUtilHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*ChainSpecificUtilHelperFilterer, error) { + contract, err := bindChainSpecificUtilHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ChainSpecificUtilHelperFilterer{contract: contract}, nil +} + +func bindChainSpecificUtilHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ChainSpecificUtilHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChainSpecificUtilHelper.Contract.ChainSpecificUtilHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChainSpecificUtilHelper.Contract.ChainSpecificUtilHelperTransactor.contract.Transfer(opts) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChainSpecificUtilHelper.Contract.ChainSpecificUtilHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChainSpecificUtilHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChainSpecificUtilHelper.Contract.contract.Transfer(opts) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChainSpecificUtilHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCaller) GetBlockNumber(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _ChainSpecificUtilHelper.contract.Call(opts, &out, "getBlockNumber") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperSession) GetBlockNumber() (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetBlockNumber(&_ChainSpecificUtilHelper.CallOpts) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCallerSession) GetBlockNumber() (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetBlockNumber(&_ChainSpecificUtilHelper.CallOpts) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCaller) GetBlockhash(opts *bind.CallOpts, blockNumber uint64) ([32]byte, error) { + var out []interface{} + err := _ChainSpecificUtilHelper.contract.Call(opts, &out, "getBlockhash", blockNumber) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperSession) GetBlockhash(blockNumber uint64) ([32]byte, error) { + return _ChainSpecificUtilHelper.Contract.GetBlockhash(&_ChainSpecificUtilHelper.CallOpts, blockNumber) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCallerSession) GetBlockhash(blockNumber uint64) ([32]byte, error) { + return _ChainSpecificUtilHelper.Contract.GetBlockhash(&_ChainSpecificUtilHelper.CallOpts, blockNumber) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCaller) GetCurrentTxL1GasFees(opts *bind.CallOpts, txCallData string) (*big.Int, error) { + var out []interface{} + err := _ChainSpecificUtilHelper.contract.Call(opts, &out, "getCurrentTxL1GasFees", txCallData) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperSession) GetCurrentTxL1GasFees(txCallData string) (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetCurrentTxL1GasFees(&_ChainSpecificUtilHelper.CallOpts, txCallData) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCallerSession) GetCurrentTxL1GasFees(txCallData string) (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetCurrentTxL1GasFees(&_ChainSpecificUtilHelper.CallOpts, txCallData) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCaller) GetL1CalldataGasCost(opts *bind.CallOpts, calldataSize *big.Int) (*big.Int, error) { + var out []interface{} + err := _ChainSpecificUtilHelper.contract.Call(opts, &out, "getL1CalldataGasCost", calldataSize) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperSession) GetL1CalldataGasCost(calldataSize *big.Int) (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetL1CalldataGasCost(&_ChainSpecificUtilHelper.CallOpts, calldataSize) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelperCallerSession) GetL1CalldataGasCost(calldataSize *big.Int) (*big.Int, error) { + return _ChainSpecificUtilHelper.Contract.GetL1CalldataGasCost(&_ChainSpecificUtilHelper.CallOpts, calldataSize) +} + +func (_ChainSpecificUtilHelper *ChainSpecificUtilHelper) Address() common.Address { + return _ChainSpecificUtilHelper.address +} + +type ChainSpecificUtilHelperInterface interface { + GetBlockNumber(opts *bind.CallOpts) (*big.Int, error) + + GetBlockhash(opts *bind.CallOpts, blockNumber uint64) ([32]byte, error) + + GetCurrentTxL1GasFees(opts *bind.CallOpts, txCallData string) (*big.Int, error) + + GetL1CalldataGasCost(opts *bind.CallOpts, calldataSize *big.Int) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/consumer_wrapper/consumer_wrapper.go b/core/gethwrappers/generated/consumer_wrapper/consumer_wrapper.go new file mode 100644 index 00000000..ec850d69 --- /dev/null +++ b/core/gethwrappers/generated/consumer_wrapper/consumer_wrapper.go @@ -0,0 +1,915 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package consumer_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var ConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_specId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginCancelled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"price\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"}],\"name\":\"addExternalRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_expiration\",\"type\":\"uint256\"}],\"name\":\"cancelRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"currentPrice\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"currentPriceInt\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_price\",\"type\":\"bytes32\"}],\"name\":\"fulfill\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_price\",\"type\":\"uint256\"}],\"name\":\"fulfillParametersWithCustomURLs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_currency\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"}],\"name\":\"requestEthereumPrice\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_urlUSD\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_pathUSD\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"}],\"name\":\"requestMultipleParametersWithCustomURLs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_specId\",\"type\":\"bytes32\"}],\"name\":\"setSpecID\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052600160045534801561001557600080fd5b5060405161158b38038061158b8339818101604052606081101561003857600080fd5b508051602082015160409092015190919061005283610066565b61005b82610088565b600655506100aa9050565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b600380546001600160a01b0319166001600160a01b0392909216919091179055565b6114d2806100b96000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c806383db5cbc116100765780639438a6011161005b5780639438a601146103745780639d1b464a1461038e578063e8d5359d14610396576100be565b806383db5cbc146102c45780638dc654a21461036c576100be565b80635591a608116100a75780635591a608146101055780635b8260051461017257806371c2002a14610195576100be565b8063042f2b65146100c3578063501fdd5d146100e8575b600080fd5b6100e6600480360360408110156100d957600080fd5b50803590602001356103cf565b005b6100e6600480360360208110156100fe57600080fd5b50356104dc565b6100e6600480360360a081101561011b57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813516906020810135906040810135907fffffffff0000000000000000000000000000000000000000000000000000000060608201351690608001356104e1565b6100e66004803603604081101561018857600080fd5b50803590602001356105a8565b6100e6600480360360608110156101ab57600080fd5b8101906020810181356401000000008111156101c657600080fd5b8201836020820111156101d857600080fd5b803590602001918460018302840111640100000000831117156101fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929594936020810193503591505064010000000081111561024d57600080fd5b82018360208201111561025f57600080fd5b8035906020019184600183028401116401000000008311171561028157600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092955050913592506106b5915050565b6100e6600480360360408110156102da57600080fd5b8101906020810181356401000000008111156102f557600080fd5b82018360208201111561030757600080fd5b8035906020019184600183028401116401000000008311171561032957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550509135925061075e915050565b6100e661086a565b61037c610a34565b60408051918252519081900360200190f35b61037c610a3a565b6100e6600480360360408110156103ac57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610a40565b600082815260056020526040902054829073ffffffffffffffffffffffffffffffffffffffff16331461044d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260288152602001806114576028913960400191505060405180910390fd5b60008181526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a91a2604051829084907f0c2366233f634048c0f0458060d1228fab36d00f7c0ecf6bdf2d9c458503631190600090a35060075550565b600655565b604080517f6ee4d55300000000000000000000000000000000000000000000000000000000815260048101869052602481018590527fffffffff0000000000000000000000000000000000000000000000000000000084166044820152606481018390529051869173ffffffffffffffffffffffffffffffffffffffff831691636ee4d5539160848082019260009290919082900301818387803b15801561058857600080fd5b505af115801561059c573d6000803e3d6000fd5b50505050505050505050565b600082815260056020526040902054829073ffffffffffffffffffffffffffffffffffffffff163314610626576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260288152602001806114576028913960400191505060405180910390fd5b60008181526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a91a2604051829084907f0c2366233f634048c0f0458060d1228fab36d00f7c0ecf6bdf2d9c458503631190600090a35060085550565b60006106ca600654635b82600560e01b610a4e565b60408051808201909152600681527f75726c5553440000000000000000000000000000000000000000000000000000602082015290915061070d90829086610a74565b60408051808201909152600781527f7061746855534400000000000000000000000000000000000000000000000000602082015261074d90829085610a74565b6107578183610a97565b5050505050565b600061077360065463042f2b6560e01b610a4e565b90506107cf6040518060400160405280600381526020017f676574000000000000000000000000000000000000000000000000000000000081525060405180608001604052806047815260200161147f60479139839190610a74565b604080516001808252818301909252600091816020015b60608152602001906001900390816107e6579050509050838160008151811061080b57fe5b60200260200101819052506108606040518060400160405280600481526020017f70617468000000000000000000000000000000000000000000000000000000008152508284610ac59092919063ffffffff16565b6107578284610a97565b6000610874610b2d565b90508073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb338373ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156108fa57600080fd5b505afa15801561090e573d6000803e3d6000fd5b505050506040513d602081101561092457600080fd5b5051604080517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff909316600484015260248301919091525160448083019260209291908290030181600087803b15801561099a57600080fd5b505af11580156109ae573d6000803e3d6000fd5b505050506040513d60208110156109c457600080fd5b5051610a3157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f556e61626c6520746f207472616e736665720000000000000000000000000000604482015290519081900360640190fd5b50565b60085481565b60075481565b610a4a8282610b49565b5050565b610a566113e4565b610a5e6113e4565b610a6a81853086610c30565b9150505b92915050565b6080830151610a839083610c9b565b6080830151610a929082610c9b565b505050565b600354600090610abe9073ffffffffffffffffffffffffffffffffffffffff168484610cb2565b9392505050565b6080830151610ad49083610c9b565b610ae18360800151610e4b565b60005b8151811015610b1f57610b17828281518110610afc57fe5b60200260200101518560800151610c9b90919063ffffffff16565b600101610ae4565b50610a928360800151610e56565b60025473ffffffffffffffffffffffffffffffffffffffff1690565b600081815260056020526040902054819073ffffffffffffffffffffffffffffffffffffffff1615610bdc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f5265717565737420697320616c72656164792070656e64696e67000000000000604482015290519081900360640190fd5b50600090815260056020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b610c386113e4565b610c488560800151610100610e61565b505082845273ffffffffffffffffffffffffffffffffffffffff821660208501527fffffffff0000000000000000000000000000000000000000000000000000000081166040850152835b949350505050565b610ca88260038351610e9b565b610a928282610fb6565b6000806004549050806001016004819055506000634042994660e01b60008087600001513089604001518760018c6080015160000151604051602401808973ffffffffffffffffffffffffffffffffffffffff1681526020018881526020018781526020018673ffffffffffffffffffffffffffffffffffffffff168152602001857bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916815260200184815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610d9d578181015183820152602001610d85565b50505050905090810190601f168015610dca5780820380516001836020036101000a031916815260200191505b509950505050505050505050604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050509050610e4186838684610fd0565b9695505050505050565b610a318160046111e3565b610a318160076111e3565b610e69611419565b6020820615610e7e5760208206602003820191505b506020828101829052604080518085526000815290920101905290565b60178167ffffffffffffffff1611610ec657610ec08360e0600585901b1683176111f4565b50610a92565b60ff8167ffffffffffffffff1611610f0457610eed836018611fe0600586901b16176111f4565b50610ec08367ffffffffffffffff8316600161120c565b61ffff8167ffffffffffffffff1611610f4357610f2c836019611fe0600586901b16176111f4565b50610ec08367ffffffffffffffff8316600261120c565b63ffffffff8167ffffffffffffffff1611610f8457610f6d83601a611fe0600586901b16176111f4565b50610ec08367ffffffffffffffff8316600461120c565b610f9983601b611fe0600586901b16176111f4565b50610fb08367ffffffffffffffff8316600861120c565b50505050565b610fbe611419565b610abe83846000015151848551611225565b604080513060601b60208083019190915260348083018790528351808403909101815260549092018084528251928201929092206000818152600590925292812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff891617905582917fb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af99190a26002546040517f4000aea000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87811660048301908152602483018790526060604484019081528651606485015286519290941693634000aea0938a938993899390929091608490910190602085019080838360005b838110156111145781810151838201526020016110fc565b50505050905090810190601f1680156111415780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b15801561116257600080fd5b505af1158015611176573d6000803e3d6000fd5b505050506040513d602081101561118c57600080fd5b5051610c93576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260238152602001806114346023913960400191505060405180910390fd5b610a9282601f611fe0600585901b16175b6111fc611419565b610abe838460000151518461130d565b611214611419565b610c93848560000151518585611358565b61122d611419565b825182111561123b57600080fd5b84602001518285011115611265576112658561125d87602001518786016113b6565b6002026113cd565b6000808651805187602083010193508088870111156112845787860182525b505050602084015b602084106112c957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909301926020918201910161128c565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208690036101000a019081169019919091161790525083949350505050565b611315611419565b83602001518310611331576113318485602001516002026113cd565b83518051602085830101848153508085141561134e576001810182525b5093949350505050565b611360611419565b8460200151848301111561137d5761137d858584016002026113cd565b60006001836101000a0390508551838682010185831982511617815250805184870111156113ab5783860181525b509495945050505050565b6000818311156113c7575081610a6e565b50919050565b81516113d98383610e61565b50610fb08382610fb6565b6040805160a081018252600080825260208201819052918101829052606081019190915260808101611414611419565b905290565b60405180604001604052806060815260200160008152509056fe756e61626c6520746f207472616e73666572416e6443616c6c20746f206f7261636c65536f75726365206d75737420626520746865206f7261636c65206f6620746865207265717565737468747470733a2f2f6d696e2d6170692e63727970746f636f6d706172652e636f6d2f646174612f70726963653f6673796d3d455448267473796d733d5553442c4555522c4a5059a164736f6c6343000706000a", +} + +var ConsumerABI = ConsumerMetaData.ABI + +var ConsumerBin = ConsumerMetaData.Bin + +func DeployConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _oracle common.Address, _specId [32]byte) (common.Address, *types.Transaction, *Consumer, error) { + parsed, err := ConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ConsumerBin), backend, _link, _oracle, _specId) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Consumer{address: address, abi: *parsed, ConsumerCaller: ConsumerCaller{contract: contract}, ConsumerTransactor: ConsumerTransactor{contract: contract}, ConsumerFilterer: ConsumerFilterer{contract: contract}}, nil +} + +type Consumer struct { + address common.Address + abi abi.ABI + ConsumerCaller + ConsumerTransactor + ConsumerFilterer +} + +type ConsumerCaller struct { + contract *bind.BoundContract +} + +type ConsumerTransactor struct { + contract *bind.BoundContract +} + +type ConsumerFilterer struct { + contract *bind.BoundContract +} + +type ConsumerSession struct { + Contract *Consumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ConsumerCallerSession struct { + Contract *ConsumerCaller + CallOpts bind.CallOpts +} + +type ConsumerTransactorSession struct { + Contract *ConsumerTransactor + TransactOpts bind.TransactOpts +} + +type ConsumerRaw struct { + Contract *Consumer +} + +type ConsumerCallerRaw struct { + Contract *ConsumerCaller +} + +type ConsumerTransactorRaw struct { + Contract *ConsumerTransactor +} + +func NewConsumer(address common.Address, backend bind.ContractBackend) (*Consumer, error) { + abi, err := abi.JSON(strings.NewReader(ConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Consumer{address: address, abi: abi, ConsumerCaller: ConsumerCaller{contract: contract}, ConsumerTransactor: ConsumerTransactor{contract: contract}, ConsumerFilterer: ConsumerFilterer{contract: contract}}, nil +} + +func NewConsumerCaller(address common.Address, caller bind.ContractCaller) (*ConsumerCaller, error) { + contract, err := bindConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ConsumerCaller{contract: contract}, nil +} + +func NewConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*ConsumerTransactor, error) { + contract, err := bindConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ConsumerTransactor{contract: contract}, nil +} + +func NewConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*ConsumerFilterer, error) { + contract, err := bindConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ConsumerFilterer{contract: contract}, nil +} + +func bindConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Consumer *ConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Consumer.Contract.ConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_Consumer *ConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Consumer.Contract.ConsumerTransactor.contract.Transfer(opts) +} + +func (_Consumer *ConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Consumer.Contract.ConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_Consumer *ConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Consumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_Consumer *ConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Consumer.Contract.contract.Transfer(opts) +} + +func (_Consumer *ConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Consumer.Contract.contract.Transact(opts, method, params...) +} + +func (_Consumer *ConsumerCaller) CurrentPrice(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _Consumer.contract.Call(opts, &out, "currentPrice") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_Consumer *ConsumerSession) CurrentPrice() ([32]byte, error) { + return _Consumer.Contract.CurrentPrice(&_Consumer.CallOpts) +} + +func (_Consumer *ConsumerCallerSession) CurrentPrice() ([32]byte, error) { + return _Consumer.Contract.CurrentPrice(&_Consumer.CallOpts) +} + +func (_Consumer *ConsumerCaller) CurrentPriceInt(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Consumer.contract.Call(opts, &out, "currentPriceInt") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_Consumer *ConsumerSession) CurrentPriceInt() (*big.Int, error) { + return _Consumer.Contract.CurrentPriceInt(&_Consumer.CallOpts) +} + +func (_Consumer *ConsumerCallerSession) CurrentPriceInt() (*big.Int, error) { + return _Consumer.Contract.CurrentPriceInt(&_Consumer.CallOpts) +} + +func (_Consumer *ConsumerTransactor) AddExternalRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "addExternalRequest", _oracle, _requestId) +} + +func (_Consumer *ConsumerSession) AddExternalRequest(_oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.AddExternalRequest(&_Consumer.TransactOpts, _oracle, _requestId) +} + +func (_Consumer *ConsumerTransactorSession) AddExternalRequest(_oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.AddExternalRequest(&_Consumer.TransactOpts, _oracle, _requestId) +} + +func (_Consumer *ConsumerTransactor) CancelRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "cancelRequest", _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_Consumer *ConsumerSession) CancelRequest(_oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.CancelRequest(&_Consumer.TransactOpts, _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_Consumer *ConsumerTransactorSession) CancelRequest(_oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.CancelRequest(&_Consumer.TransactOpts, _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_Consumer *ConsumerTransactor) Fulfill(opts *bind.TransactOpts, _requestId [32]byte, _price [32]byte) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "fulfill", _requestId, _price) +} + +func (_Consumer *ConsumerSession) Fulfill(_requestId [32]byte, _price [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.Fulfill(&_Consumer.TransactOpts, _requestId, _price) +} + +func (_Consumer *ConsumerTransactorSession) Fulfill(_requestId [32]byte, _price [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.Fulfill(&_Consumer.TransactOpts, _requestId, _price) +} + +func (_Consumer *ConsumerTransactor) FulfillParametersWithCustomURLs(opts *bind.TransactOpts, _requestId [32]byte, _price *big.Int) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "fulfillParametersWithCustomURLs", _requestId, _price) +} + +func (_Consumer *ConsumerSession) FulfillParametersWithCustomURLs(_requestId [32]byte, _price *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.FulfillParametersWithCustomURLs(&_Consumer.TransactOpts, _requestId, _price) +} + +func (_Consumer *ConsumerTransactorSession) FulfillParametersWithCustomURLs(_requestId [32]byte, _price *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.FulfillParametersWithCustomURLs(&_Consumer.TransactOpts, _requestId, _price) +} + +func (_Consumer *ConsumerTransactor) RequestEthereumPrice(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "requestEthereumPrice", _currency, _payment) +} + +func (_Consumer *ConsumerSession) RequestEthereumPrice(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.RequestEthereumPrice(&_Consumer.TransactOpts, _currency, _payment) +} + +func (_Consumer *ConsumerTransactorSession) RequestEthereumPrice(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.RequestEthereumPrice(&_Consumer.TransactOpts, _currency, _payment) +} + +func (_Consumer *ConsumerTransactor) RequestMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _urlUSD string, _pathUSD string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "requestMultipleParametersWithCustomURLs", _urlUSD, _pathUSD, _payment) +} + +func (_Consumer *ConsumerSession) RequestMultipleParametersWithCustomURLs(_urlUSD string, _pathUSD string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.RequestMultipleParametersWithCustomURLs(&_Consumer.TransactOpts, _urlUSD, _pathUSD, _payment) +} + +func (_Consumer *ConsumerTransactorSession) RequestMultipleParametersWithCustomURLs(_urlUSD string, _pathUSD string, _payment *big.Int) (*types.Transaction, error) { + return _Consumer.Contract.RequestMultipleParametersWithCustomURLs(&_Consumer.TransactOpts, _urlUSD, _pathUSD, _payment) +} + +func (_Consumer *ConsumerTransactor) SetSpecID(opts *bind.TransactOpts, _specId [32]byte) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "setSpecID", _specId) +} + +func (_Consumer *ConsumerSession) SetSpecID(_specId [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.SetSpecID(&_Consumer.TransactOpts, _specId) +} + +func (_Consumer *ConsumerTransactorSession) SetSpecID(_specId [32]byte) (*types.Transaction, error) { + return _Consumer.Contract.SetSpecID(&_Consumer.TransactOpts, _specId) +} + +func (_Consumer *ConsumerTransactor) WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Consumer.contract.Transact(opts, "withdrawLink") +} + +func (_Consumer *ConsumerSession) WithdrawLink() (*types.Transaction, error) { + return _Consumer.Contract.WithdrawLink(&_Consumer.TransactOpts) +} + +func (_Consumer *ConsumerTransactorSession) WithdrawLink() (*types.Transaction, error) { + return _Consumer.Contract.WithdrawLink(&_Consumer.TransactOpts) +} + +type ConsumerPluginCancelledIterator struct { + Event *ConsumerPluginCancelled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ConsumerPluginCancelledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ConsumerPluginCancelledIterator) Error() error { + return it.fail +} + +func (it *ConsumerPluginCancelledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ConsumerPluginCancelled struct { + Id [32]byte + Raw types.Log +} + +func (_Consumer *ConsumerFilterer) FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginCancelledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.FilterLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return &ConsumerPluginCancelledIterator{contract: _Consumer.contract, event: "PluginCancelled", logs: logs, sub: sub}, nil +} + +func (_Consumer *ConsumerFilterer) WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *ConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.WatchLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ConsumerPluginCancelled) + if err := _Consumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Consumer *ConsumerFilterer) ParsePluginCancelled(log types.Log) (*ConsumerPluginCancelled, error) { + event := new(ConsumerPluginCancelled) + if err := _Consumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ConsumerPluginFulfilledIterator struct { + Event *ConsumerPluginFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ConsumerPluginFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ConsumerPluginFulfilledIterator) Error() error { + return it.fail +} + +func (it *ConsumerPluginFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ConsumerPluginFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_Consumer *ConsumerFilterer) FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.FilterLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return &ConsumerPluginFulfilledIterator{contract: _Consumer.contract, event: "PluginFulfilled", logs: logs, sub: sub}, nil +} + +func (_Consumer *ConsumerFilterer) WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *ConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.WatchLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ConsumerPluginFulfilled) + if err := _Consumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Consumer *ConsumerFilterer) ParsePluginFulfilled(log types.Log) (*ConsumerPluginFulfilled, error) { + event := new(ConsumerPluginFulfilled) + if err := _Consumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ConsumerPluginRequestedIterator struct { + Event *ConsumerPluginRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ConsumerPluginRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ConsumerPluginRequestedIterator) Error() error { + return it.fail +} + +func (it *ConsumerPluginRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ConsumerPluginRequested struct { + Id [32]byte + Raw types.Log +} + +func (_Consumer *ConsumerFilterer) FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.FilterLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return &ConsumerPluginRequestedIterator{contract: _Consumer.contract, event: "PluginRequested", logs: logs, sub: sub}, nil +} + +func (_Consumer *ConsumerFilterer) WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *ConsumerPluginRequested, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _Consumer.contract.WatchLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ConsumerPluginRequested) + if err := _Consumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Consumer *ConsumerFilterer) ParsePluginRequested(log types.Log) (*ConsumerPluginRequested, error) { + event := new(ConsumerPluginRequested) + if err := _Consumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ConsumerRequestFulfilledIterator struct { + Event *ConsumerRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ConsumerRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ConsumerRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *ConsumerRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ConsumerRequestFulfilled struct { + RequestId [32]byte + Price [32]byte + Raw types.Log +} + +func (_Consumer *ConsumerFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, requestId [][32]byte, price [][32]byte) (*ConsumerRequestFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + + logs, sub, err := _Consumer.contract.FilterLogs(opts, "RequestFulfilled", requestIdRule, priceRule) + if err != nil { + return nil, err + } + return &ConsumerRequestFulfilledIterator{contract: _Consumer.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_Consumer *ConsumerFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *ConsumerRequestFulfilled, requestId [][32]byte, price [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + + logs, sub, err := _Consumer.contract.WatchLogs(opts, "RequestFulfilled", requestIdRule, priceRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ConsumerRequestFulfilled) + if err := _Consumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Consumer *ConsumerFilterer) ParseRequestFulfilled(log types.Log) (*ConsumerRequestFulfilled, error) { + event := new(ConsumerRequestFulfilled) + if err := _Consumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_Consumer *Consumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Consumer.abi.Events["PluginCancelled"].ID: + return _Consumer.ParsePluginCancelled(log) + case _Consumer.abi.Events["PluginFulfilled"].ID: + return _Consumer.ParsePluginFulfilled(log) + case _Consumer.abi.Events["PluginRequested"].ID: + return _Consumer.ParsePluginRequested(log) + case _Consumer.abi.Events["RequestFulfilled"].ID: + return _Consumer.ParseRequestFulfilled(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (ConsumerPluginCancelled) Topic() common.Hash { + return common.HexToHash("0xe1fe3afa0f7f761ff0a8b89086790efd5140d2907ebd5b7ff6bfcb5e075fd4c5") +} + +func (ConsumerPluginFulfilled) Topic() common.Hash { + return common.HexToHash("0x7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a") +} + +func (ConsumerPluginRequested) Topic() common.Hash { + return common.HexToHash("0xb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af9") +} + +func (ConsumerRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x0c2366233f634048c0f0458060d1228fab36d00f7c0ecf6bdf2d9c4585036311") +} + +func (_Consumer *Consumer) Address() common.Address { + return _Consumer.address +} + +type ConsumerInterface interface { + CurrentPrice(opts *bind.CallOpts) ([32]byte, error) + + CurrentPriceInt(opts *bind.CallOpts) (*big.Int, error) + + AddExternalRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte) (*types.Transaction, error) + + CancelRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) + + Fulfill(opts *bind.TransactOpts, _requestId [32]byte, _price [32]byte) (*types.Transaction, error) + + FulfillParametersWithCustomURLs(opts *bind.TransactOpts, _requestId [32]byte, _price *big.Int) (*types.Transaction, error) + + RequestEthereumPrice(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) + + RequestMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _urlUSD string, _pathUSD string, _payment *big.Int) (*types.Transaction, error) + + SetSpecID(opts *bind.TransactOpts, _specId [32]byte) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginCancelledIterator, error) + + WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *ConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) + + ParsePluginCancelled(log types.Log) (*ConsumerPluginCancelled, error) + + FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginFulfilledIterator, error) + + WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *ConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) + + ParsePluginFulfilled(log types.Log) (*ConsumerPluginFulfilled, error) + + FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*ConsumerPluginRequestedIterator, error) + + WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *ConsumerPluginRequested, id [][32]byte) (event.Subscription, error) + + ParsePluginRequested(log types.Log) (*ConsumerPluginRequested, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, requestId [][32]byte, price [][32]byte) (*ConsumerRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *ConsumerRequestFulfilled, requestId [][32]byte, price [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*ConsumerRequestFulfilled, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go b/core/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go new file mode 100644 index 00000000..80cb2e73 --- /dev/null +++ b/core/gethwrappers/generated/cron_upkeep_factory_wrapper/cron_upkeep_factory_wrapper.go @@ -0,0 +1,787 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package cron_upkeep_factory_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var CronUpkeepFactoryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"upkeep\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"NewCronUpkeepCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cronDelegateAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"}],\"name\":\"encodeCronJob\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"}],\"name\":\"encodeCronString\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"newCronUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedJob\",\"type\":\"bytes\"}],\"name\":\"newCronUpkeepWithJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_maxJobs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxJobs\",\"type\":\"uint256\"}],\"name\":\"setMaxJobs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var CronUpkeepFactoryABI = CronUpkeepFactoryMetaData.ABI + +type CronUpkeepFactory struct { + address common.Address + abi abi.ABI + CronUpkeepFactoryCaller + CronUpkeepFactoryTransactor + CronUpkeepFactoryFilterer +} + +type CronUpkeepFactoryCaller struct { + contract *bind.BoundContract +} + +type CronUpkeepFactoryTransactor struct { + contract *bind.BoundContract +} + +type CronUpkeepFactoryFilterer struct { + contract *bind.BoundContract +} + +type CronUpkeepFactorySession struct { + Contract *CronUpkeepFactory + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type CronUpkeepFactoryCallerSession struct { + Contract *CronUpkeepFactoryCaller + CallOpts bind.CallOpts +} + +type CronUpkeepFactoryTransactorSession struct { + Contract *CronUpkeepFactoryTransactor + TransactOpts bind.TransactOpts +} + +type CronUpkeepFactoryRaw struct { + Contract *CronUpkeepFactory +} + +type CronUpkeepFactoryCallerRaw struct { + Contract *CronUpkeepFactoryCaller +} + +type CronUpkeepFactoryTransactorRaw struct { + Contract *CronUpkeepFactoryTransactor +} + +func NewCronUpkeepFactory(address common.Address, backend bind.ContractBackend) (*CronUpkeepFactory, error) { + abi, err := abi.JSON(strings.NewReader(CronUpkeepFactoryABI)) + if err != nil { + return nil, err + } + contract, err := bindCronUpkeepFactory(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CronUpkeepFactory{address: address, abi: abi, CronUpkeepFactoryCaller: CronUpkeepFactoryCaller{contract: contract}, CronUpkeepFactoryTransactor: CronUpkeepFactoryTransactor{contract: contract}, CronUpkeepFactoryFilterer: CronUpkeepFactoryFilterer{contract: contract}}, nil +} + +func NewCronUpkeepFactoryCaller(address common.Address, caller bind.ContractCaller) (*CronUpkeepFactoryCaller, error) { + contract, err := bindCronUpkeepFactory(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryCaller{contract: contract}, nil +} + +func NewCronUpkeepFactoryTransactor(address common.Address, transactor bind.ContractTransactor) (*CronUpkeepFactoryTransactor, error) { + contract, err := bindCronUpkeepFactory(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryTransactor{contract: contract}, nil +} + +func NewCronUpkeepFactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*CronUpkeepFactoryFilterer, error) { + contract, err := bindCronUpkeepFactory(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryFilterer{contract: contract}, nil +} + +func bindCronUpkeepFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := CronUpkeepFactoryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryCaller.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryTransactor.contract.Transfer(opts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.CronUpkeepFactoryTransactor.contract.Transact(opts, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeepFactory.Contract.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.contract.Transfer(opts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.contract.Transact(opts, method, params...) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) CronDelegateAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "cronDelegateAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) CronDelegateAddress() (common.Address, error) { + return _CronUpkeepFactory.Contract.CronDelegateAddress(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) CronDelegateAddress() (common.Address, error) { + return _CronUpkeepFactory.Contract.CronDelegateAddress(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) EncodeCronJob(opts *bind.CallOpts, target common.Address, handler []byte, cronString string) ([]byte, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "encodeCronJob", target, handler, cronString) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) EncodeCronJob(target common.Address, handler []byte, cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronJob(&_CronUpkeepFactory.CallOpts, target, handler, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) EncodeCronJob(target common.Address, handler []byte, cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronJob(&_CronUpkeepFactory.CallOpts, target, handler, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) EncodeCronString(opts *bind.CallOpts, cronString string) ([]byte, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "encodeCronString", cronString) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) EncodeCronString(cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronString(&_CronUpkeepFactory.CallOpts, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) EncodeCronString(cronString string) ([]byte, error) { + return _CronUpkeepFactory.Contract.EncodeCronString(&_CronUpkeepFactory.CallOpts, cronString) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) Owner() (common.Address, error) { + return _CronUpkeepFactory.Contract.Owner(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) Owner() (common.Address, error) { + return _CronUpkeepFactory.Contract.Owner(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCaller) SMaxJobs(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CronUpkeepFactory.contract.Call(opts, &out, "s_maxJobs") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) SMaxJobs() (*big.Int, error) { + return _CronUpkeepFactory.Contract.SMaxJobs(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryCallerSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeepFactory.Contract.SMaxJobs(&_CronUpkeepFactory.CallOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "acceptOwnership") +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.AcceptOwnership(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.AcceptOwnership(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) NewCronUpkeep(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "newCronUpkeep") +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) NewCronUpkeep() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeep(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) NewCronUpkeep() (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeep(&_CronUpkeepFactory.TransactOpts) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) NewCronUpkeepWithJob(opts *bind.TransactOpts, encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "newCronUpkeepWithJob", encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) NewCronUpkeepWithJob(encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeepWithJob(&_CronUpkeepFactory.TransactOpts, encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) NewCronUpkeepWithJob(encodedJob []byte) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.NewCronUpkeepWithJob(&_CronUpkeepFactory.TransactOpts, encodedJob) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) SetMaxJobs(opts *bind.TransactOpts, maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "setMaxJobs", maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) SetMaxJobs(maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.SetMaxJobs(&_CronUpkeepFactory.TransactOpts, maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) SetMaxJobs(maxJobs *big.Int) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.SetMaxJobs(&_CronUpkeepFactory.TransactOpts, maxJobs) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.contract.Transact(opts, "transferOwnership", to) +} + +func (_CronUpkeepFactory *CronUpkeepFactorySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.TransferOwnership(&_CronUpkeepFactory.TransactOpts, to) +} + +func (_CronUpkeepFactory *CronUpkeepFactoryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeepFactory.Contract.TransferOwnership(&_CronUpkeepFactory.TransactOpts, to) +} + +type CronUpkeepFactoryNewCronUpkeepCreatedIterator struct { + Event *CronUpkeepFactoryNewCronUpkeepCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryNewCronUpkeepCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryNewCronUpkeepCreated struct { + Upkeep common.Address + Owner common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterNewCronUpkeepCreated(opts *bind.FilterOpts) (*CronUpkeepFactoryNewCronUpkeepCreatedIterator, error) { + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "NewCronUpkeepCreated") + if err != nil { + return nil, err + } + return &CronUpkeepFactoryNewCronUpkeepCreatedIterator{contract: _CronUpkeepFactory.contract, event: "NewCronUpkeepCreated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchNewCronUpkeepCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryNewCronUpkeepCreated) (event.Subscription, error) { + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "NewCronUpkeepCreated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "NewCronUpkeepCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseNewCronUpkeepCreated(log types.Log) (*CronUpkeepFactoryNewCronUpkeepCreated, error) { + event := new(CronUpkeepFactoryNewCronUpkeepCreated) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "NewCronUpkeepCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepFactoryOwnershipTransferRequestedIterator struct { + Event *CronUpkeepFactoryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryOwnershipTransferRequestedIterator{contract: _CronUpkeepFactory.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryOwnershipTransferRequested) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepFactoryOwnershipTransferRequested, error) { + event := new(CronUpkeepFactoryOwnershipTransferRequested) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepFactoryOwnershipTransferredIterator struct { + Event *CronUpkeepFactoryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepFactoryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepFactoryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepFactoryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepFactoryOwnershipTransferredIterator{contract: _CronUpkeepFactory.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeepFactory.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepFactoryOwnershipTransferred) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeepFactory *CronUpkeepFactoryFilterer) ParseOwnershipTransferred(log types.Log) (*CronUpkeepFactoryOwnershipTransferred, error) { + event := new(CronUpkeepFactoryOwnershipTransferred) + if err := _CronUpkeepFactory.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_CronUpkeepFactory *CronUpkeepFactory) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _CronUpkeepFactory.abi.Events["NewCronUpkeepCreated"].ID: + return _CronUpkeepFactory.ParseNewCronUpkeepCreated(log) + case _CronUpkeepFactory.abi.Events["OwnershipTransferRequested"].ID: + return _CronUpkeepFactory.ParseOwnershipTransferRequested(log) + case _CronUpkeepFactory.abi.Events["OwnershipTransferred"].ID: + return _CronUpkeepFactory.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (CronUpkeepFactoryNewCronUpkeepCreated) Topic() common.Hash { + return common.HexToHash("0x959d571686b1c9343b61bdc3c0459760cb9695fcd4c4c64845e3b2cdd6865ced") +} + +func (CronUpkeepFactoryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (CronUpkeepFactoryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_CronUpkeepFactory *CronUpkeepFactory) Address() common.Address { + return _CronUpkeepFactory.address +} + +type CronUpkeepFactoryInterface interface { + CronDelegateAddress(opts *bind.CallOpts) (common.Address, error) + + EncodeCronJob(opts *bind.CallOpts, target common.Address, handler []byte, cronString string) ([]byte, error) + + EncodeCronString(opts *bind.CallOpts, cronString string) ([]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SMaxJobs(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + NewCronUpkeep(opts *bind.TransactOpts) (*types.Transaction, error) + + NewCronUpkeepWithJob(opts *bind.TransactOpts, encodedJob []byte) (*types.Transaction, error) + + SetMaxJobs(opts *bind.TransactOpts, maxJobs *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterNewCronUpkeepCreated(opts *bind.FilterOpts) (*CronUpkeepFactoryNewCronUpkeepCreatedIterator, error) + + WatchNewCronUpkeepCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryNewCronUpkeepCreated) (event.Subscription, error) + + ParseNewCronUpkeepCreated(log types.Log) (*CronUpkeepFactoryNewCronUpkeepCreated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepFactoryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepFactoryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepFactoryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*CronUpkeepFactoryOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go b/core/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go new file mode 100644 index 00000000..ad91800a --- /dev/null +++ b/core/gethwrappers/generated/cron_upkeep_wrapper/cron_upkeep_wrapper.go @@ -0,0 +1,1579 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package cron_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var CronUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"delegate\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"maxJobs\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"firstJob\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CronJobIDNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ExceedsMaxJobs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidHandler\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickDoesntMatchSpec\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickInFuture\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TickTooOld\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnknownFieldType\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"}],\"name\":\"CronJobCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CronJobDeleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"CronJobExecuted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"}],\"name\":\"CronJobUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"encodedCronSpec\",\"type\":\"bytes\"}],\"name\":\"createCronJobFromEncodedSpec\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"deleteCronJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getActiveCronJobIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getCronJob\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"handler\",\"type\":\"bytes\"},{\"internalType\":\"string\",\"name\":\"cronString\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"nextTick\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_maxJobs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newTarget\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newHandler\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"newEncodedCronSpec\",\"type\":\"bytes\"}],\"name\":\"updateCronJob\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", +} + +var CronUpkeepABI = CronUpkeepMetaData.ABI + +type CronUpkeep struct { + address common.Address + abi abi.ABI + CronUpkeepCaller + CronUpkeepTransactor + CronUpkeepFilterer +} + +type CronUpkeepCaller struct { + contract *bind.BoundContract +} + +type CronUpkeepTransactor struct { + contract *bind.BoundContract +} + +type CronUpkeepFilterer struct { + contract *bind.BoundContract +} + +type CronUpkeepSession struct { + Contract *CronUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type CronUpkeepCallerSession struct { + Contract *CronUpkeepCaller + CallOpts bind.CallOpts +} + +type CronUpkeepTransactorSession struct { + Contract *CronUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type CronUpkeepRaw struct { + Contract *CronUpkeep +} + +type CronUpkeepCallerRaw struct { + Contract *CronUpkeepCaller +} + +type CronUpkeepTransactorRaw struct { + Contract *CronUpkeepTransactor +} + +func NewCronUpkeep(address common.Address, backend bind.ContractBackend) (*CronUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(CronUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindCronUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CronUpkeep{address: address, abi: abi, CronUpkeepCaller: CronUpkeepCaller{contract: contract}, CronUpkeepTransactor: CronUpkeepTransactor{contract: contract}, CronUpkeepFilterer: CronUpkeepFilterer{contract: contract}}, nil +} + +func NewCronUpkeepCaller(address common.Address, caller bind.ContractCaller) (*CronUpkeepCaller, error) { + contract, err := bindCronUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CronUpkeepCaller{contract: contract}, nil +} + +func NewCronUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*CronUpkeepTransactor, error) { + contract, err := bindCronUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CronUpkeepTransactor{contract: contract}, nil +} + +func NewCronUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*CronUpkeepFilterer, error) { + contract, err := bindCronUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CronUpkeepFilterer{contract: contract}, nil +} + +func bindCronUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := CronUpkeepMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_CronUpkeep *CronUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeep.Contract.CronUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeep *CronUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.Contract.CronUpkeepTransactor.contract.Transfer(opts) +} + +func (_CronUpkeep *CronUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeep.Contract.CronUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_CronUpkeep *CronUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CronUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_CronUpkeep *CronUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.Contract.contract.Transfer(opts) +} + +func (_CronUpkeep *CronUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CronUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_CronUpkeep *CronUpkeepCaller) GetActiveCronJobIDs(opts *bind.CallOpts) ([]*big.Int, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "getActiveCronJobIDs") + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) GetActiveCronJobIDs() ([]*big.Int, error) { + return _CronUpkeep.Contract.GetActiveCronJobIDs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) GetActiveCronJobIDs() ([]*big.Int, error) { + return _CronUpkeep.Contract.GetActiveCronJobIDs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) GetCronJob(opts *bind.CallOpts, id *big.Int) (GetCronJob, + + error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "getCronJob", id) + + outstruct := new(GetCronJob) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Handler = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.CronString = *abi.ConvertType(out[2], new(string)).(*string) + outstruct.NextTick = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_CronUpkeep *CronUpkeepSession) GetCronJob(id *big.Int) (GetCronJob, + + error) { + return _CronUpkeep.Contract.GetCronJob(&_CronUpkeep.CallOpts, id) +} + +func (_CronUpkeep *CronUpkeepCallerSession) GetCronJob(id *big.Int) (GetCronJob, + + error) { + return _CronUpkeep.Contract.GetCronJob(&_CronUpkeep.CallOpts, id) +} + +func (_CronUpkeep *CronUpkeepCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) Owner() (common.Address, error) { + return _CronUpkeep.Contract.Owner(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) Owner() (common.Address, error) { + return _CronUpkeep.Contract.Owner(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) Paused() (bool, error) { + return _CronUpkeep.Contract.Paused(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) Paused() (bool, error) { + return _CronUpkeep.Contract.Paused(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCaller) SMaxJobs(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CronUpkeep.contract.Call(opts, &out, "s_maxJobs") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_CronUpkeep *CronUpkeepSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeep.Contract.SMaxJobs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepCallerSession) SMaxJobs() (*big.Int, error) { + return _CronUpkeep.Contract.SMaxJobs(&_CronUpkeep.CallOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "acceptOwnership") +} + +func (_CronUpkeep *CronUpkeepSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeep.Contract.AcceptOwnership(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _CronUpkeep.Contract.AcceptOwnership(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) CheckUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "checkUpkeep", arg0) +} + +func (_CronUpkeep *CronUpkeepSession) CheckUpkeep(arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CheckUpkeep(&_CronUpkeep.TransactOpts, arg0) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) CheckUpkeep(arg0 []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CheckUpkeep(&_CronUpkeep.TransactOpts, arg0) +} + +func (_CronUpkeep *CronUpkeepTransactor) CreateCronJobFromEncodedSpec(opts *bind.TransactOpts, target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "createCronJobFromEncodedSpec", target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepSession) CreateCronJobFromEncodedSpec(target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CreateCronJobFromEncodedSpec(&_CronUpkeep.TransactOpts, target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) CreateCronJobFromEncodedSpec(target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.CreateCronJobFromEncodedSpec(&_CronUpkeep.TransactOpts, target, handler, encodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactor) DeleteCronJob(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "deleteCronJob", id) +} + +func (_CronUpkeep *CronUpkeepSession) DeleteCronJob(id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.Contract.DeleteCronJob(&_CronUpkeep.TransactOpts, id) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) DeleteCronJob(id *big.Int) (*types.Transaction, error) { + return _CronUpkeep.Contract.DeleteCronJob(&_CronUpkeep.TransactOpts, id) +} + +func (_CronUpkeep *CronUpkeepTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "pause") +} + +func (_CronUpkeep *CronUpkeepSession) Pause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Pause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Pause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Pause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_CronUpkeep *CronUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.PerformUpkeep(&_CronUpkeep.TransactOpts, performData) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.PerformUpkeep(&_CronUpkeep.TransactOpts, performData) +} + +func (_CronUpkeep *CronUpkeepTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "transferOwnership", to) +} + +func (_CronUpkeep *CronUpkeepSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeep.Contract.TransferOwnership(&_CronUpkeep.TransactOpts, to) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _CronUpkeep.Contract.TransferOwnership(&_CronUpkeep.TransactOpts, to) +} + +func (_CronUpkeep *CronUpkeepTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "unpause") +} + +func (_CronUpkeep *CronUpkeepSession) Unpause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Unpause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Unpause() (*types.Transaction, error) { + return _CronUpkeep.Contract.Unpause(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactor) UpdateCronJob(opts *bind.TransactOpts, id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.Transact(opts, "updateCronJob", id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepSession) UpdateCronJob(id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.UpdateCronJob(&_CronUpkeep.TransactOpts, id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) UpdateCronJob(id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.UpdateCronJob(&_CronUpkeep.TransactOpts, id, newTarget, newHandler, newEncodedCronSpec) +} + +func (_CronUpkeep *CronUpkeepTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.contract.RawTransact(opts, calldata) +} + +func (_CronUpkeep *CronUpkeepSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.Fallback(&_CronUpkeep.TransactOpts, calldata) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _CronUpkeep.Contract.Fallback(&_CronUpkeep.TransactOpts, calldata) +} + +func (_CronUpkeep *CronUpkeepTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CronUpkeep.contract.RawTransact(opts, nil) +} + +func (_CronUpkeep *CronUpkeepSession) Receive() (*types.Transaction, error) { + return _CronUpkeep.Contract.Receive(&_CronUpkeep.TransactOpts) +} + +func (_CronUpkeep *CronUpkeepTransactorSession) Receive() (*types.Transaction, error) { + return _CronUpkeep.Contract.Receive(&_CronUpkeep.TransactOpts) +} + +type CronUpkeepCronJobCreatedIterator struct { + Event *CronUpkeepCronJobCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobCreatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobCreated struct { + Id *big.Int + Target common.Address + Handler []byte + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobCreated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobCreatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobCreated", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobCreatedIterator{contract: _CronUpkeep.contract, event: "CronJobCreated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobCreated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobCreated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobCreated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobCreated(log types.Log) (*CronUpkeepCronJobCreated, error) { + event := new(CronUpkeepCronJobCreated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobDeletedIterator struct { + Event *CronUpkeepCronJobDeleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobDeletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobDeleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobDeletedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobDeletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobDeleted struct { + Id *big.Int + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobDeleted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobDeletedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobDeleted", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobDeletedIterator{contract: _CronUpkeep.contract, event: "CronJobDeleted", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobDeleted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobDeleted, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobDeleted", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobDeleted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobDeleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobDeleted(log types.Log) (*CronUpkeepCronJobDeleted, error) { + event := new(CronUpkeepCronJobDeleted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobDeleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobExecutedIterator struct { + Event *CronUpkeepCronJobExecuted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobExecutedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobExecutedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobExecutedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobExecuted struct { + Id *big.Int + Success bool + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobExecuted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobExecutedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobExecuted", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobExecutedIterator{contract: _CronUpkeep.contract, event: "CronJobExecuted", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobExecuted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobExecuted, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobExecuted", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobExecuted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobExecuted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobExecuted(log types.Log) (*CronUpkeepCronJobExecuted, error) { + event := new(CronUpkeepCronJobExecuted) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobExecuted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepCronJobUpdatedIterator struct { + Event *CronUpkeepCronJobUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepCronJobUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepCronJobUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepCronJobUpdatedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepCronJobUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepCronJobUpdated struct { + Id *big.Int + Target common.Address + Handler []byte + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterCronJobUpdated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "CronJobUpdated", idRule) + if err != nil { + return nil, err + } + return &CronUpkeepCronJobUpdatedIterator{contract: _CronUpkeep.contract, event: "CronJobUpdated", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchCronJobUpdated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "CronJobUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepCronJobUpdated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseCronJobUpdated(log types.Log) (*CronUpkeepCronJobUpdated, error) { + event := new(CronUpkeepCronJobUpdated) + if err := _CronUpkeep.contract.UnpackLog(event, "CronJobUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepOwnershipTransferRequestedIterator struct { + Event *CronUpkeepOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepOwnershipTransferRequestedIterator{contract: _CronUpkeep.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepOwnershipTransferRequested) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepOwnershipTransferRequested, error) { + event := new(CronUpkeepOwnershipTransferRequested) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepOwnershipTransferredIterator struct { + Event *CronUpkeepOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &CronUpkeepOwnershipTransferredIterator{contract: _CronUpkeep.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepOwnershipTransferred) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseOwnershipTransferred(log types.Log) (*CronUpkeepOwnershipTransferred, error) { + event := new(CronUpkeepOwnershipTransferred) + if err := _CronUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepPausedIterator struct { + Event *CronUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepPaused struct { + Account common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterPaused(opts *bind.FilterOpts) (*CronUpkeepPausedIterator, error) { + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &CronUpkeepPausedIterator{contract: _CronUpkeep.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepPaused) (event.Subscription, error) { + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepPaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParsePaused(log types.Log) (*CronUpkeepPaused, error) { + event := new(CronUpkeepPaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CronUpkeepUnpausedIterator struct { + Event *CronUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *CronUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(CronUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(CronUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *CronUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *CronUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type CronUpkeepUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_CronUpkeep *CronUpkeepFilterer) FilterUnpaused(opts *bind.FilterOpts) (*CronUpkeepUnpausedIterator, error) { + + logs, sub, err := _CronUpkeep.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &CronUpkeepUnpausedIterator{contract: _CronUpkeep.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_CronUpkeep *CronUpkeepFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepUnpaused) (event.Subscription, error) { + + logs, sub, err := _CronUpkeep.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(CronUpkeepUnpaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_CronUpkeep *CronUpkeepFilterer) ParseUnpaused(log types.Log) (*CronUpkeepUnpaused, error) { + event := new(CronUpkeepUnpaused) + if err := _CronUpkeep.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetCronJob struct { + Target common.Address + Handler []byte + CronString string + NextTick *big.Int +} + +func (_CronUpkeep *CronUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _CronUpkeep.abi.Events["CronJobCreated"].ID: + return _CronUpkeep.ParseCronJobCreated(log) + case _CronUpkeep.abi.Events["CronJobDeleted"].ID: + return _CronUpkeep.ParseCronJobDeleted(log) + case _CronUpkeep.abi.Events["CronJobExecuted"].ID: + return _CronUpkeep.ParseCronJobExecuted(log) + case _CronUpkeep.abi.Events["CronJobUpdated"].ID: + return _CronUpkeep.ParseCronJobUpdated(log) + case _CronUpkeep.abi.Events["OwnershipTransferRequested"].ID: + return _CronUpkeep.ParseOwnershipTransferRequested(log) + case _CronUpkeep.abi.Events["OwnershipTransferred"].ID: + return _CronUpkeep.ParseOwnershipTransferred(log) + case _CronUpkeep.abi.Events["Paused"].ID: + return _CronUpkeep.ParsePaused(log) + case _CronUpkeep.abi.Events["Unpaused"].ID: + return _CronUpkeep.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (CronUpkeepCronJobCreated) Topic() common.Hash { + return common.HexToHash("0xe66fb0bca0f9d6a395d3eaf5f39c6ac87dd34aff4e3f2f9a9b33a46f15589627") +} + +func (CronUpkeepCronJobDeleted) Topic() common.Hash { + return common.HexToHash("0x7aaa5a7c35e162386d922bd67e91ea476d38d9bb931bc369d8b15ab113250974") +} + +func (CronUpkeepCronJobExecuted) Topic() common.Hash { + return common.HexToHash("0x25d1b235668fd0219da15f5fa6054013a53e59c4f3ea31459dc1d4e0b9f23d26") +} + +func (CronUpkeepCronJobUpdated) Topic() common.Hash { + return common.HexToHash("0xeeaf6ad42034ba5357ffd961b8c80bf6cbf53c224020541e46573a3f19ef09a5") +} + +func (CronUpkeepOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (CronUpkeepOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (CronUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (CronUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_CronUpkeep *CronUpkeep) Address() common.Address { + return _CronUpkeep.address +} + +type CronUpkeepInterface interface { + GetActiveCronJobIDs(opts *bind.CallOpts) ([]*big.Int, error) + + GetCronJob(opts *bind.CallOpts, id *big.Int) (GetCronJob, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + SMaxJobs(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) + + CreateCronJobFromEncodedSpec(opts *bind.TransactOpts, target common.Address, handler []byte, encodedCronSpec []byte) (*types.Transaction, error) + + DeleteCronJob(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UpdateCronJob(opts *bind.TransactOpts, id *big.Int, newTarget common.Address, newHandler []byte, newEncodedCronSpec []byte) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterCronJobCreated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobCreatedIterator, error) + + WatchCronJobCreated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobCreated, id []*big.Int) (event.Subscription, error) + + ParseCronJobCreated(log types.Log) (*CronUpkeepCronJobCreated, error) + + FilterCronJobDeleted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobDeletedIterator, error) + + WatchCronJobDeleted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobDeleted, id []*big.Int) (event.Subscription, error) + + ParseCronJobDeleted(log types.Log) (*CronUpkeepCronJobDeleted, error) + + FilterCronJobExecuted(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobExecutedIterator, error) + + WatchCronJobExecuted(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobExecuted, id []*big.Int) (event.Subscription, error) + + ParseCronJobExecuted(log types.Log) (*CronUpkeepCronJobExecuted, error) + + FilterCronJobUpdated(opts *bind.FilterOpts, id []*big.Int) (*CronUpkeepCronJobUpdatedIterator, error) + + WatchCronJobUpdated(opts *bind.WatchOpts, sink chan<- *CronUpkeepCronJobUpdated, id []*big.Int) (event.Subscription, error) + + ParseCronJobUpdated(log types.Log) (*CronUpkeepCronJobUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*CronUpkeepOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*CronUpkeepOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CronUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*CronUpkeepOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*CronUpkeepPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*CronUpkeepPaused, error) + + FilterUnpaused(opts *bind.FilterOpts) (*CronUpkeepUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *CronUpkeepUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*CronUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/dummy_protocol_wrapper/dummy_protocol_wrapper.go b/core/gethwrappers/generated/dummy_protocol_wrapper/dummy_protocol_wrapper.go new file mode 100644 index 00000000..bc2668bc --- /dev/null +++ b/core/gethwrappers/generated/dummy_protocol_wrapper/dummy_protocol_wrapper.go @@ -0,0 +1,751 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package dummy_protocol_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var DummyProtocolMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"orderId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"exchange\",\"type\":\"address\"}],\"name\":\"LimitOrderExecuted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"price\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"LimitOrderSent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"price\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"LimitOrderWithdrawn\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"orderId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"exchange\",\"type\":\"address\"}],\"name\":\"executeLimitOrder\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"targetContract\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"t0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"t1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"t2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"t3\",\"type\":\"bytes32\"}],\"name\":\"getAdvancedLogTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"logTrigger\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"targetContract\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"t0\",\"type\":\"bytes32\"}],\"name\":\"getBasicLogTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"logTrigger\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"price\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"sendLimitedOrder\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"price\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"withdrawLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506103e2806100206000396000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80636bb17e4c116100505780636bb17e4c1461013d5780639ab74b0e1461013d578063af38c9c21461015057600080fd5b80632065ff7e1461006c5780635f35f80b14610081575b600080fd5b61007f61007a3660046102ad565b6101f0565b005b61012761008f3660046102e2565b6040805160c0808201835273ffffffffffffffffffffffffffffffffffffffff9890981680825260ff97881660208084019182528385019889526060808501988952608080860198895260a095860197885286519283019490945291519099168985015296519688019690965293519486019490945290519184019190915251828401528051808303909301835260e0909101905290565b604051610134919061033f565b60405180910390f35b61007f61014b3660046102ad565b61023a565b61012761015e3660046103ab565b6040805160c0808201835273ffffffffffffffffffffffffffffffffffffffff94909416808252600060208084018281528486019687526060808601848152608080880186815260a0988901968752895195860197909752925160ff168489015297519083015295519581019590955290519184019190915251828401528051808303909301835260e0909101905290565b8073ffffffffffffffffffffffffffffffffffffffff1682847fd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd60405160405180910390a4505050565b8073ffffffffffffffffffffffffffffffffffffffff1682847f3e9c37b3143f2eb7e9a2a0f8091b6de097b62efcfe48e1f68847a832e521750a60405160405180910390a4505050565b803573ffffffffffffffffffffffffffffffffffffffff811681146102a857600080fd5b919050565b6000806000606084860312156102c257600080fd5b83359250602084013591506102d960408501610284565b90509250925092565b60008060008060008060c087890312156102fb57600080fd5b61030487610284565b9550602087013560ff8116811461031a57600080fd5b95989597505050506040840135936060810135936080820135935060a0909101359150565b600060208083528351808285015260005b8181101561036c57858101830151858201604001528201610350565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b600080604083850312156103be57600080fd5b6103c783610284565b94602093909301359350505056fea164736f6c6343000810000a", +} + +var DummyProtocolABI = DummyProtocolMetaData.ABI + +var DummyProtocolBin = DummyProtocolMetaData.Bin + +func DeployDummyProtocol(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *DummyProtocol, error) { + parsed, err := DummyProtocolMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DummyProtocolBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &DummyProtocol{address: address, abi: *parsed, DummyProtocolCaller: DummyProtocolCaller{contract: contract}, DummyProtocolTransactor: DummyProtocolTransactor{contract: contract}, DummyProtocolFilterer: DummyProtocolFilterer{contract: contract}}, nil +} + +type DummyProtocol struct { + address common.Address + abi abi.ABI + DummyProtocolCaller + DummyProtocolTransactor + DummyProtocolFilterer +} + +type DummyProtocolCaller struct { + contract *bind.BoundContract +} + +type DummyProtocolTransactor struct { + contract *bind.BoundContract +} + +type DummyProtocolFilterer struct { + contract *bind.BoundContract +} + +type DummyProtocolSession struct { + Contract *DummyProtocol + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type DummyProtocolCallerSession struct { + Contract *DummyProtocolCaller + CallOpts bind.CallOpts +} + +type DummyProtocolTransactorSession struct { + Contract *DummyProtocolTransactor + TransactOpts bind.TransactOpts +} + +type DummyProtocolRaw struct { + Contract *DummyProtocol +} + +type DummyProtocolCallerRaw struct { + Contract *DummyProtocolCaller +} + +type DummyProtocolTransactorRaw struct { + Contract *DummyProtocolTransactor +} + +func NewDummyProtocol(address common.Address, backend bind.ContractBackend) (*DummyProtocol, error) { + abi, err := abi.JSON(strings.NewReader(DummyProtocolABI)) + if err != nil { + return nil, err + } + contract, err := bindDummyProtocol(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &DummyProtocol{address: address, abi: abi, DummyProtocolCaller: DummyProtocolCaller{contract: contract}, DummyProtocolTransactor: DummyProtocolTransactor{contract: contract}, DummyProtocolFilterer: DummyProtocolFilterer{contract: contract}}, nil +} + +func NewDummyProtocolCaller(address common.Address, caller bind.ContractCaller) (*DummyProtocolCaller, error) { + contract, err := bindDummyProtocol(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &DummyProtocolCaller{contract: contract}, nil +} + +func NewDummyProtocolTransactor(address common.Address, transactor bind.ContractTransactor) (*DummyProtocolTransactor, error) { + contract, err := bindDummyProtocol(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &DummyProtocolTransactor{contract: contract}, nil +} + +func NewDummyProtocolFilterer(address common.Address, filterer bind.ContractFilterer) (*DummyProtocolFilterer, error) { + contract, err := bindDummyProtocol(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &DummyProtocolFilterer{contract: contract}, nil +} + +func bindDummyProtocol(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := DummyProtocolMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_DummyProtocol *DummyProtocolRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _DummyProtocol.Contract.DummyProtocolCaller.contract.Call(opts, result, method, params...) +} + +func (_DummyProtocol *DummyProtocolRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _DummyProtocol.Contract.DummyProtocolTransactor.contract.Transfer(opts) +} + +func (_DummyProtocol *DummyProtocolRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _DummyProtocol.Contract.DummyProtocolTransactor.contract.Transact(opts, method, params...) +} + +func (_DummyProtocol *DummyProtocolCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _DummyProtocol.Contract.contract.Call(opts, result, method, params...) +} + +func (_DummyProtocol *DummyProtocolTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _DummyProtocol.Contract.contract.Transfer(opts) +} + +func (_DummyProtocol *DummyProtocolTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _DummyProtocol.Contract.contract.Transact(opts, method, params...) +} + +func (_DummyProtocol *DummyProtocolCaller) GetAdvancedLogTriggerConfig(opts *bind.CallOpts, targetContract common.Address, selector uint8, t0 [32]byte, t1 [32]byte, t2 [32]byte, t3 [32]byte) ([]byte, error) { + var out []interface{} + err := _DummyProtocol.contract.Call(opts, &out, "getAdvancedLogTriggerConfig", targetContract, selector, t0, t1, t2, t3) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_DummyProtocol *DummyProtocolSession) GetAdvancedLogTriggerConfig(targetContract common.Address, selector uint8, t0 [32]byte, t1 [32]byte, t2 [32]byte, t3 [32]byte) ([]byte, error) { + return _DummyProtocol.Contract.GetAdvancedLogTriggerConfig(&_DummyProtocol.CallOpts, targetContract, selector, t0, t1, t2, t3) +} + +func (_DummyProtocol *DummyProtocolCallerSession) GetAdvancedLogTriggerConfig(targetContract common.Address, selector uint8, t0 [32]byte, t1 [32]byte, t2 [32]byte, t3 [32]byte) ([]byte, error) { + return _DummyProtocol.Contract.GetAdvancedLogTriggerConfig(&_DummyProtocol.CallOpts, targetContract, selector, t0, t1, t2, t3) +} + +func (_DummyProtocol *DummyProtocolCaller) GetBasicLogTriggerConfig(opts *bind.CallOpts, targetContract common.Address, t0 [32]byte) ([]byte, error) { + var out []interface{} + err := _DummyProtocol.contract.Call(opts, &out, "getBasicLogTriggerConfig", targetContract, t0) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_DummyProtocol *DummyProtocolSession) GetBasicLogTriggerConfig(targetContract common.Address, t0 [32]byte) ([]byte, error) { + return _DummyProtocol.Contract.GetBasicLogTriggerConfig(&_DummyProtocol.CallOpts, targetContract, t0) +} + +func (_DummyProtocol *DummyProtocolCallerSession) GetBasicLogTriggerConfig(targetContract common.Address, t0 [32]byte) ([]byte, error) { + return _DummyProtocol.Contract.GetBasicLogTriggerConfig(&_DummyProtocol.CallOpts, targetContract, t0) +} + +func (_DummyProtocol *DummyProtocolTransactor) ExecuteLimitOrder(opts *bind.TransactOpts, orderId *big.Int, amount *big.Int, exchange common.Address) (*types.Transaction, error) { + return _DummyProtocol.contract.Transact(opts, "executeLimitOrder", orderId, amount, exchange) +} + +func (_DummyProtocol *DummyProtocolSession) ExecuteLimitOrder(orderId *big.Int, amount *big.Int, exchange common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.ExecuteLimitOrder(&_DummyProtocol.TransactOpts, orderId, amount, exchange) +} + +func (_DummyProtocol *DummyProtocolTransactorSession) ExecuteLimitOrder(orderId *big.Int, amount *big.Int, exchange common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.ExecuteLimitOrder(&_DummyProtocol.TransactOpts, orderId, amount, exchange) +} + +func (_DummyProtocol *DummyProtocolTransactor) SendLimitedOrder(opts *bind.TransactOpts, amount *big.Int, price *big.Int, to common.Address) (*types.Transaction, error) { + return _DummyProtocol.contract.Transact(opts, "sendLimitedOrder", amount, price, to) +} + +func (_DummyProtocol *DummyProtocolSession) SendLimitedOrder(amount *big.Int, price *big.Int, to common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.SendLimitedOrder(&_DummyProtocol.TransactOpts, amount, price, to) +} + +func (_DummyProtocol *DummyProtocolTransactorSession) SendLimitedOrder(amount *big.Int, price *big.Int, to common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.SendLimitedOrder(&_DummyProtocol.TransactOpts, amount, price, to) +} + +func (_DummyProtocol *DummyProtocolTransactor) WithdrawLimit(opts *bind.TransactOpts, amount *big.Int, price *big.Int, from common.Address) (*types.Transaction, error) { + return _DummyProtocol.contract.Transact(opts, "withdrawLimit", amount, price, from) +} + +func (_DummyProtocol *DummyProtocolSession) WithdrawLimit(amount *big.Int, price *big.Int, from common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.WithdrawLimit(&_DummyProtocol.TransactOpts, amount, price, from) +} + +func (_DummyProtocol *DummyProtocolTransactorSession) WithdrawLimit(amount *big.Int, price *big.Int, from common.Address) (*types.Transaction, error) { + return _DummyProtocol.Contract.WithdrawLimit(&_DummyProtocol.TransactOpts, amount, price, from) +} + +type DummyProtocolLimitOrderExecutedIterator struct { + Event *DummyProtocolLimitOrderExecuted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DummyProtocolLimitOrderExecutedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DummyProtocolLimitOrderExecutedIterator) Error() error { + return it.fail +} + +func (it *DummyProtocolLimitOrderExecutedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DummyProtocolLimitOrderExecuted struct { + OrderId *big.Int + Amount *big.Int + Exchange common.Address + Raw types.Log +} + +func (_DummyProtocol *DummyProtocolFilterer) FilterLimitOrderExecuted(opts *bind.FilterOpts, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (*DummyProtocolLimitOrderExecutedIterator, error) { + + var orderIdRule []interface{} + for _, orderIdItem := range orderId { + orderIdRule = append(orderIdRule, orderIdItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var exchangeRule []interface{} + for _, exchangeItem := range exchange { + exchangeRule = append(exchangeRule, exchangeItem) + } + + logs, sub, err := _DummyProtocol.contract.FilterLogs(opts, "LimitOrderExecuted", orderIdRule, amountRule, exchangeRule) + if err != nil { + return nil, err + } + return &DummyProtocolLimitOrderExecutedIterator{contract: _DummyProtocol.contract, event: "LimitOrderExecuted", logs: logs, sub: sub}, nil +} + +func (_DummyProtocol *DummyProtocolFilterer) WatchLimitOrderExecuted(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderExecuted, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (event.Subscription, error) { + + var orderIdRule []interface{} + for _, orderIdItem := range orderId { + orderIdRule = append(orderIdRule, orderIdItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var exchangeRule []interface{} + for _, exchangeItem := range exchange { + exchangeRule = append(exchangeRule, exchangeItem) + } + + logs, sub, err := _DummyProtocol.contract.WatchLogs(opts, "LimitOrderExecuted", orderIdRule, amountRule, exchangeRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DummyProtocolLimitOrderExecuted) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderExecuted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DummyProtocol *DummyProtocolFilterer) ParseLimitOrderExecuted(log types.Log) (*DummyProtocolLimitOrderExecuted, error) { + event := new(DummyProtocolLimitOrderExecuted) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderExecuted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DummyProtocolLimitOrderSentIterator struct { + Event *DummyProtocolLimitOrderSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DummyProtocolLimitOrderSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DummyProtocolLimitOrderSentIterator) Error() error { + return it.fail +} + +func (it *DummyProtocolLimitOrderSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DummyProtocolLimitOrderSent struct { + Amount *big.Int + Price *big.Int + To common.Address + Raw types.Log +} + +func (_DummyProtocol *DummyProtocolFilterer) FilterLimitOrderSent(opts *bind.FilterOpts, amount []*big.Int, price []*big.Int, to []common.Address) (*DummyProtocolLimitOrderSentIterator, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DummyProtocol.contract.FilterLogs(opts, "LimitOrderSent", amountRule, priceRule, toRule) + if err != nil { + return nil, err + } + return &DummyProtocolLimitOrderSentIterator{contract: _DummyProtocol.contract, event: "LimitOrderSent", logs: logs, sub: sub}, nil +} + +func (_DummyProtocol *DummyProtocolFilterer) WatchLimitOrderSent(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderSent, amount []*big.Int, price []*big.Int, to []common.Address) (event.Subscription, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DummyProtocol.contract.WatchLogs(opts, "LimitOrderSent", amountRule, priceRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DummyProtocolLimitOrderSent) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DummyProtocol *DummyProtocolFilterer) ParseLimitOrderSent(log types.Log) (*DummyProtocolLimitOrderSent, error) { + event := new(DummyProtocolLimitOrderSent) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DummyProtocolLimitOrderWithdrawnIterator struct { + Event *DummyProtocolLimitOrderWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DummyProtocolLimitOrderWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DummyProtocolLimitOrderWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DummyProtocolLimitOrderWithdrawnIterator) Error() error { + return it.fail +} + +func (it *DummyProtocolLimitOrderWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DummyProtocolLimitOrderWithdrawn struct { + Amount *big.Int + Price *big.Int + From common.Address + Raw types.Log +} + +func (_DummyProtocol *DummyProtocolFilterer) FilterLimitOrderWithdrawn(opts *bind.FilterOpts, amount []*big.Int, price []*big.Int, from []common.Address) (*DummyProtocolLimitOrderWithdrawnIterator, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _DummyProtocol.contract.FilterLogs(opts, "LimitOrderWithdrawn", amountRule, priceRule, fromRule) + if err != nil { + return nil, err + } + return &DummyProtocolLimitOrderWithdrawnIterator{contract: _DummyProtocol.contract, event: "LimitOrderWithdrawn", logs: logs, sub: sub}, nil +} + +func (_DummyProtocol *DummyProtocolFilterer) WatchLimitOrderWithdrawn(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderWithdrawn, amount []*big.Int, price []*big.Int, from []common.Address) (event.Subscription, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _DummyProtocol.contract.WatchLogs(opts, "LimitOrderWithdrawn", amountRule, priceRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DummyProtocolLimitOrderWithdrawn) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DummyProtocol *DummyProtocolFilterer) ParseLimitOrderWithdrawn(log types.Log) (*DummyProtocolLimitOrderWithdrawn, error) { + event := new(DummyProtocolLimitOrderWithdrawn) + if err := _DummyProtocol.contract.UnpackLog(event, "LimitOrderWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_DummyProtocol *DummyProtocol) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _DummyProtocol.abi.Events["LimitOrderExecuted"].ID: + return _DummyProtocol.ParseLimitOrderExecuted(log) + case _DummyProtocol.abi.Events["LimitOrderSent"].ID: + return _DummyProtocol.ParseLimitOrderSent(log) + case _DummyProtocol.abi.Events["LimitOrderWithdrawn"].ID: + return _DummyProtocol.ParseLimitOrderWithdrawn(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (DummyProtocolLimitOrderExecuted) Topic() common.Hash { + return common.HexToHash("0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd") +} + +func (DummyProtocolLimitOrderSent) Topic() common.Hash { + return common.HexToHash("0x3e9c37b3143f2eb7e9a2a0f8091b6de097b62efcfe48e1f68847a832e521750a") +} + +func (DummyProtocolLimitOrderWithdrawn) Topic() common.Hash { + return common.HexToHash("0x0a71b8ed921ff64d49e4d39449f8a21094f38a0aeae489c3051aedd63f2c229f") +} + +func (_DummyProtocol *DummyProtocol) Address() common.Address { + return _DummyProtocol.address +} + +type DummyProtocolInterface interface { + GetAdvancedLogTriggerConfig(opts *bind.CallOpts, targetContract common.Address, selector uint8, t0 [32]byte, t1 [32]byte, t2 [32]byte, t3 [32]byte) ([]byte, error) + + GetBasicLogTriggerConfig(opts *bind.CallOpts, targetContract common.Address, t0 [32]byte) ([]byte, error) + + ExecuteLimitOrder(opts *bind.TransactOpts, orderId *big.Int, amount *big.Int, exchange common.Address) (*types.Transaction, error) + + SendLimitedOrder(opts *bind.TransactOpts, amount *big.Int, price *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawLimit(opts *bind.TransactOpts, amount *big.Int, price *big.Int, from common.Address) (*types.Transaction, error) + + FilterLimitOrderExecuted(opts *bind.FilterOpts, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (*DummyProtocolLimitOrderExecutedIterator, error) + + WatchLimitOrderExecuted(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderExecuted, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (event.Subscription, error) + + ParseLimitOrderExecuted(log types.Log) (*DummyProtocolLimitOrderExecuted, error) + + FilterLimitOrderSent(opts *bind.FilterOpts, amount []*big.Int, price []*big.Int, to []common.Address) (*DummyProtocolLimitOrderSentIterator, error) + + WatchLimitOrderSent(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderSent, amount []*big.Int, price []*big.Int, to []common.Address) (event.Subscription, error) + + ParseLimitOrderSent(log types.Log) (*DummyProtocolLimitOrderSent, error) + + FilterLimitOrderWithdrawn(opts *bind.FilterOpts, amount []*big.Int, price []*big.Int, from []common.Address) (*DummyProtocolLimitOrderWithdrawnIterator, error) + + WatchLimitOrderWithdrawn(opts *bind.WatchOpts, sink chan<- *DummyProtocolLimitOrderWithdrawn, amount []*big.Int, price []*big.Int, from []common.Address) (event.Subscription, error) + + ParseLimitOrderWithdrawn(log types.Log) (*DummyProtocolLimitOrderWithdrawn, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/flags_wrapper/flags_wrapper.go b/core/gethwrappers/generated/flags_wrapper/flags_wrapper.go new file mode 100644 index 00000000..58c730a3 --- /dev/null +++ b/core/gethwrappers/generated/flags_wrapper/flags_wrapper.go @@ -0,0 +1,1710 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package flags_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FlagsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"racAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"}],\"name\":\"AddedAccess\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"CheckAccessDisabled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"CheckAccessEnabled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"subject\",\"type\":\"address\"}],\"name\":\"FlagLowered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"subject\",\"type\":\"address\"}],\"name\":\"FlagRaised\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"RaisingAccessControllerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"user\",\"type\":\"address\"}],\"name\":\"RemovedAccess\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_user\",\"type\":\"address\"}],\"name\":\"addAccess\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkEnabled\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"disableAccessCheck\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enableAccessCheck\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subject\",\"type\":\"address\"}],\"name\":\"getFlag\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"subjects\",\"type\":\"address[]\"}],\"name\":\"getFlags\",\"outputs\":[{\"internalType\":\"bool[]\",\"name\":\"\",\"type\":\"bool[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_user\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_calldata\",\"type\":\"bytes\"}],\"name\":\"hasAccess\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"subjects\",\"type\":\"address[]\"}],\"name\":\"lowerFlags\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subject\",\"type\":\"address\"}],\"name\":\"raiseFlag\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"subjects\",\"type\":\"address[]\"}],\"name\":\"raiseFlags\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"raisingAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_user\",\"type\":\"address\"}],\"name\":\"removeAccess\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"racAddress\",\"type\":\"address\"}],\"name\":\"setRaisingAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506040516115a43803806115a48339818101604052602081101561003357600080fd5b5051600080546001600160a01b031916331790556001805460ff60a01b1916600160a01b17905561006c816001600160e01b0361007216565b5061013a565b6000546001600160a01b031633146100d1576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6003546001600160a01b03908116908216811461013657600380546001600160a01b0319166001600160a01b0384811691821790925560405190918316907fbaf9ea078655a4fffefd08f9435677bbc91e457a6d015fe7de1d0e68b8802cac90600090a35b5050565b61145b806101496000396000f3fe608060405234801561001057600080fd5b50600436106101005760003560e01c80637d723cac11610097578063a118f24911610066578063a118f24914610468578063d74af2631461049b578063dc7f0124146104ce578063f2fde38b146104d657610100565b80637d723cac146103655780638038e4a1146104255780638823da6c1461042d5780638da5cb5b1461046057610100565b8063517e89fe116100d3578063517e89fe146101f75780636b14daf81461022a578063760bc82d146102ed57806379ba50971461035d57610100565b80630a75698314610105578063282865961461010f5780632e1d859c1461017f578063357e47fe146101b0575b600080fd5b61010d610509565b005b61010d6004803603602081101561012557600080fd5b81019060208101813564010000000081111561014057600080fd5b82018360208201111561015257600080fd5b8035906020019184602083028401116401000000008311171561017457600080fd5b509092509050610606565b610187610761565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6101e3600480360360208110156101c657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff1661077d565b604080519115158252519081900360200190f35b61010d6004803603602081101561020d57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610857565b6101e36004803603604081101561024057600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516919081019060408101602082013564010000000081111561027857600080fd5b82018360208201111561028a57600080fd5b803590602001918460018302840111640100000000831117156102ac57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610978945050505050565b61010d6004803603602081101561030357600080fd5b81019060208101813564010000000081111561031e57600080fd5b82018360208201111561033057600080fd5b8035906020019184602083028401116401000000008311171561035257600080fd5b5090925090506109ab565b61010d610a62565b6103d56004803603602081101561037b57600080fd5b81019060208101813564010000000081111561039657600080fd5b8201836020820111156103a857600080fd5b803590602001918460208302840111640100000000831117156103ca57600080fd5b509092509050610b64565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156104115781810151838201526020016103f9565b505050509050019250505060405180910390f35b61010d610d04565b61010d6004803603602081101561044357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610e16565b610187610f4e565b61010d6004803603602081101561047e57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610f6a565b61010d600480360360208110156104b157600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166110a3565b6101e361111f565b61010d600480360360208110156104ec57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611140565b60005473ffffffffffffffffffffffffffffffffffffffff16331461058f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60015474010000000000000000000000000000000000000000900460ff161561060457600180547fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff1690556040517f3be8a977a014527b50ae38adda80b56911c267328965c98ddc385d248f53963890600090a15b565b60005473ffffffffffffffffffffffffffffffffffffffff16331461068c57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60005b8181101561075c5760008383838181106106a557fe5b6020908102929092013573ffffffffffffffffffffffffffffffffffffffff16600081815260049093526040909220549192505060ff16156107535773ffffffffffffffffffffffffffffffffffffffff811660008181526004602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055517fd86728e2e5cbaa28c1d357b5fbccc9c1ab0add09950eb7cac42df9acb24c4bc89190a25b5060010161068f565b505050565b60035473ffffffffffffffffffffffffffffffffffffffff1681565b60006107c0336000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061097892505050565b61082b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600960248201527f4e6f206163636573730000000000000000000000000000000000000000000000604482015290519081900360640190fd5b5073ffffffffffffffffffffffffffffffffffffffff1660009081526004602052604090205460ff1690565b60005473ffffffffffffffffffffffffffffffffffffffff1633146108dd57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60035473ffffffffffffffffffffffffffffffffffffffff908116908216811461097457600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907fbaf9ea078655a4fffefd08f9435677bbc91e457a6d015fe7de1d0e68b8802cac90600090a35b5050565b6000610984838361123c565b806109a4575073ffffffffffffffffffffffffffffffffffffffff831632145b9392505050565b6109b3611291565b610a1e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4e6f7420616c6c6f77656420746f20726169736520666c616773000000000000604482015290519081900360640190fd5b60005b8181101561075c57610a5a838383818110610a3857fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff166113aa565b600101610a21565b60015473ffffffffffffffffffffffffffffffffffffffff163314610ae857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6060610ba7336000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061097892505050565b610c1257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600960248201527f4e6f206163636573730000000000000000000000000000000000000000000000604482015290519081900360640190fd5b60608267ffffffffffffffff81118015610c2b57600080fd5b50604051908082528060200260200182016040528015610c55578160200160208202803683370190505b50905060005b83811015610cfc5760046000868684818110610c7357fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff16828281518110610ce457fe5b91151560209283029190910190910152600101610c5b565b509392505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610d8a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60015474010000000000000000000000000000000000000000900460ff1661060457600180547fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff16740100000000000000000000000000000000000000001790556040517faebf329500988c6488a0074e5a0a9ff304561fc5c6fc877aeb1d59c8282c348090600090a1565b60005473ffffffffffffffffffffffffffffffffffffffff163314610e9c57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff811660009081526002602052604090205460ff1615610f4b5773ffffffffffffffffffffffffffffffffffffffff811660008181526002602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055815192835290517f3d68a6fce901d20453d1a7aa06bf3950302a735948037deb182a8db66df2a0d19281900390910190a15b50565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b60005473ffffffffffffffffffffffffffffffffffffffff163314610ff057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff811660009081526002602052604090205460ff16610f4b5773ffffffffffffffffffffffffffffffffffffffff811660008181526002602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055815192835290517f87286ad1f399c8e82bf0c4ef4fcdc570ea2e1e92176e5c848b6413545b885db49281900390910190a150565b6110ab611291565b61111657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4e6f7420616c6c6f77656420746f20726169736520666c616773000000000000604482015290519081900360640190fd5b610f4b816113aa565b60015474010000000000000000000000000000000000000000900460ff1681565b60005473ffffffffffffffffffffffffffffffffffffffff1633146111c657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff821660009081526002602052604081205460ff16806109a457505060015474010000000000000000000000000000000000000000900460ff161592915050565b6000805473ffffffffffffffffffffffffffffffffffffffff163314806113a55750600354604080517f6b14daf8000000000000000000000000000000000000000000000000000000008152336004820181815260248301938452366044840181905273ffffffffffffffffffffffffffffffffffffffff90951694636b14daf894929360009391929190606401848480828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016909201965060209550909350505081840390508186803b15801561137857600080fd5b505afa15801561138c573d6000803e3d6000fd5b505050506040513d60208110156113a257600080fd5b50515b905090565b73ffffffffffffffffffffffffffffffffffffffff811660009081526004602052604090205460ff16610f4b5773ffffffffffffffffffffffffffffffffffffffff811660008181526004602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055517f881febd4cd194dd4ace637642862aef1fb59a65c7e5551a5d9208f268d11c0069190a25056fea164736f6c6343000606000a", +} + +var FlagsABI = FlagsMetaData.ABI + +var FlagsBin = FlagsMetaData.Bin + +func DeployFlags(auth *bind.TransactOpts, backend bind.ContractBackend, racAddress common.Address) (common.Address, *types.Transaction, *Flags, error) { + parsed, err := FlagsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FlagsBin), backend, racAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Flags{address: address, abi: *parsed, FlagsCaller: FlagsCaller{contract: contract}, FlagsTransactor: FlagsTransactor{contract: contract}, FlagsFilterer: FlagsFilterer{contract: contract}}, nil +} + +type Flags struct { + address common.Address + abi abi.ABI + FlagsCaller + FlagsTransactor + FlagsFilterer +} + +type FlagsCaller struct { + contract *bind.BoundContract +} + +type FlagsTransactor struct { + contract *bind.BoundContract +} + +type FlagsFilterer struct { + contract *bind.BoundContract +} + +type FlagsSession struct { + Contract *Flags + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FlagsCallerSession struct { + Contract *FlagsCaller + CallOpts bind.CallOpts +} + +type FlagsTransactorSession struct { + Contract *FlagsTransactor + TransactOpts bind.TransactOpts +} + +type FlagsRaw struct { + Contract *Flags +} + +type FlagsCallerRaw struct { + Contract *FlagsCaller +} + +type FlagsTransactorRaw struct { + Contract *FlagsTransactor +} + +func NewFlags(address common.Address, backend bind.ContractBackend) (*Flags, error) { + abi, err := abi.JSON(strings.NewReader(FlagsABI)) + if err != nil { + return nil, err + } + contract, err := bindFlags(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Flags{address: address, abi: abi, FlagsCaller: FlagsCaller{contract: contract}, FlagsTransactor: FlagsTransactor{contract: contract}, FlagsFilterer: FlagsFilterer{contract: contract}}, nil +} + +func NewFlagsCaller(address common.Address, caller bind.ContractCaller) (*FlagsCaller, error) { + contract, err := bindFlags(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FlagsCaller{contract: contract}, nil +} + +func NewFlagsTransactor(address common.Address, transactor bind.ContractTransactor) (*FlagsTransactor, error) { + contract, err := bindFlags(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FlagsTransactor{contract: contract}, nil +} + +func NewFlagsFilterer(address common.Address, filterer bind.ContractFilterer) (*FlagsFilterer, error) { + contract, err := bindFlags(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FlagsFilterer{contract: contract}, nil +} + +func bindFlags(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FlagsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Flags *FlagsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Flags.Contract.FlagsCaller.contract.Call(opts, result, method, params...) +} + +func (_Flags *FlagsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Flags.Contract.FlagsTransactor.contract.Transfer(opts) +} + +func (_Flags *FlagsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Flags.Contract.FlagsTransactor.contract.Transact(opts, method, params...) +} + +func (_Flags *FlagsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Flags.Contract.contract.Call(opts, result, method, params...) +} + +func (_Flags *FlagsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Flags.Contract.contract.Transfer(opts) +} + +func (_Flags *FlagsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Flags.Contract.contract.Transact(opts, method, params...) +} + +func (_Flags *FlagsCaller) CheckEnabled(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "checkEnabled") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Flags *FlagsSession) CheckEnabled() (bool, error) { + return _Flags.Contract.CheckEnabled(&_Flags.CallOpts) +} + +func (_Flags *FlagsCallerSession) CheckEnabled() (bool, error) { + return _Flags.Contract.CheckEnabled(&_Flags.CallOpts) +} + +func (_Flags *FlagsCaller) GetFlag(opts *bind.CallOpts, subject common.Address) (bool, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "getFlag", subject) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Flags *FlagsSession) GetFlag(subject common.Address) (bool, error) { + return _Flags.Contract.GetFlag(&_Flags.CallOpts, subject) +} + +func (_Flags *FlagsCallerSession) GetFlag(subject common.Address) (bool, error) { + return _Flags.Contract.GetFlag(&_Flags.CallOpts, subject) +} + +func (_Flags *FlagsCaller) GetFlags(opts *bind.CallOpts, subjects []common.Address) ([]bool, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "getFlags", subjects) + + if err != nil { + return *new([]bool), err + } + + out0 := *abi.ConvertType(out[0], new([]bool)).(*[]bool) + + return out0, err + +} + +func (_Flags *FlagsSession) GetFlags(subjects []common.Address) ([]bool, error) { + return _Flags.Contract.GetFlags(&_Flags.CallOpts, subjects) +} + +func (_Flags *FlagsCallerSession) GetFlags(subjects []common.Address) ([]bool, error) { + return _Flags.Contract.GetFlags(&_Flags.CallOpts, subjects) +} + +func (_Flags *FlagsCaller) HasAccess(opts *bind.CallOpts, _user common.Address, _calldata []byte) (bool, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "hasAccess", _user, _calldata) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Flags *FlagsSession) HasAccess(_user common.Address, _calldata []byte) (bool, error) { + return _Flags.Contract.HasAccess(&_Flags.CallOpts, _user, _calldata) +} + +func (_Flags *FlagsCallerSession) HasAccess(_user common.Address, _calldata []byte) (bool, error) { + return _Flags.Contract.HasAccess(&_Flags.CallOpts, _user, _calldata) +} + +func (_Flags *FlagsCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Flags *FlagsSession) Owner() (common.Address, error) { + return _Flags.Contract.Owner(&_Flags.CallOpts) +} + +func (_Flags *FlagsCallerSession) Owner() (common.Address, error) { + return _Flags.Contract.Owner(&_Flags.CallOpts) +} + +func (_Flags *FlagsCaller) RaisingAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Flags.contract.Call(opts, &out, "raisingAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Flags *FlagsSession) RaisingAccessController() (common.Address, error) { + return _Flags.Contract.RaisingAccessController(&_Flags.CallOpts) +} + +func (_Flags *FlagsCallerSession) RaisingAccessController() (common.Address, error) { + return _Flags.Contract.RaisingAccessController(&_Flags.CallOpts) +} + +func (_Flags *FlagsTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "acceptOwnership") +} + +func (_Flags *FlagsSession) AcceptOwnership() (*types.Transaction, error) { + return _Flags.Contract.AcceptOwnership(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _Flags.Contract.AcceptOwnership(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactor) AddAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "addAccess", _user) +} + +func (_Flags *FlagsSession) AddAccess(_user common.Address) (*types.Transaction, error) { + return _Flags.Contract.AddAccess(&_Flags.TransactOpts, _user) +} + +func (_Flags *FlagsTransactorSession) AddAccess(_user common.Address) (*types.Transaction, error) { + return _Flags.Contract.AddAccess(&_Flags.TransactOpts, _user) +} + +func (_Flags *FlagsTransactor) DisableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "disableAccessCheck") +} + +func (_Flags *FlagsSession) DisableAccessCheck() (*types.Transaction, error) { + return _Flags.Contract.DisableAccessCheck(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactorSession) DisableAccessCheck() (*types.Transaction, error) { + return _Flags.Contract.DisableAccessCheck(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactor) EnableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "enableAccessCheck") +} + +func (_Flags *FlagsSession) EnableAccessCheck() (*types.Transaction, error) { + return _Flags.Contract.EnableAccessCheck(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactorSession) EnableAccessCheck() (*types.Transaction, error) { + return _Flags.Contract.EnableAccessCheck(&_Flags.TransactOpts) +} + +func (_Flags *FlagsTransactor) LowerFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "lowerFlags", subjects) +} + +func (_Flags *FlagsSession) LowerFlags(subjects []common.Address) (*types.Transaction, error) { + return _Flags.Contract.LowerFlags(&_Flags.TransactOpts, subjects) +} + +func (_Flags *FlagsTransactorSession) LowerFlags(subjects []common.Address) (*types.Transaction, error) { + return _Flags.Contract.LowerFlags(&_Flags.TransactOpts, subjects) +} + +func (_Flags *FlagsTransactor) RaiseFlag(opts *bind.TransactOpts, subject common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "raiseFlag", subject) +} + +func (_Flags *FlagsSession) RaiseFlag(subject common.Address) (*types.Transaction, error) { + return _Flags.Contract.RaiseFlag(&_Flags.TransactOpts, subject) +} + +func (_Flags *FlagsTransactorSession) RaiseFlag(subject common.Address) (*types.Transaction, error) { + return _Flags.Contract.RaiseFlag(&_Flags.TransactOpts, subject) +} + +func (_Flags *FlagsTransactor) RaiseFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "raiseFlags", subjects) +} + +func (_Flags *FlagsSession) RaiseFlags(subjects []common.Address) (*types.Transaction, error) { + return _Flags.Contract.RaiseFlags(&_Flags.TransactOpts, subjects) +} + +func (_Flags *FlagsTransactorSession) RaiseFlags(subjects []common.Address) (*types.Transaction, error) { + return _Flags.Contract.RaiseFlags(&_Flags.TransactOpts, subjects) +} + +func (_Flags *FlagsTransactor) RemoveAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "removeAccess", _user) +} + +func (_Flags *FlagsSession) RemoveAccess(_user common.Address) (*types.Transaction, error) { + return _Flags.Contract.RemoveAccess(&_Flags.TransactOpts, _user) +} + +func (_Flags *FlagsTransactorSession) RemoveAccess(_user common.Address) (*types.Transaction, error) { + return _Flags.Contract.RemoveAccess(&_Flags.TransactOpts, _user) +} + +func (_Flags *FlagsTransactor) SetRaisingAccessController(opts *bind.TransactOpts, racAddress common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "setRaisingAccessController", racAddress) +} + +func (_Flags *FlagsSession) SetRaisingAccessController(racAddress common.Address) (*types.Transaction, error) { + return _Flags.Contract.SetRaisingAccessController(&_Flags.TransactOpts, racAddress) +} + +func (_Flags *FlagsTransactorSession) SetRaisingAccessController(racAddress common.Address) (*types.Transaction, error) { + return _Flags.Contract.SetRaisingAccessController(&_Flags.TransactOpts, racAddress) +} + +func (_Flags *FlagsTransactor) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + return _Flags.contract.Transact(opts, "transferOwnership", _to) +} + +func (_Flags *FlagsSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _Flags.Contract.TransferOwnership(&_Flags.TransactOpts, _to) +} + +func (_Flags *FlagsTransactorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _Flags.Contract.TransferOwnership(&_Flags.TransactOpts, _to) +} + +type FlagsAddedAccessIterator struct { + Event *FlagsAddedAccess + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsAddedAccessIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsAddedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsAddedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsAddedAccessIterator) Error() error { + return it.fail +} + +func (it *FlagsAddedAccessIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsAddedAccess struct { + User common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterAddedAccess(opts *bind.FilterOpts) (*FlagsAddedAccessIterator, error) { + + logs, sub, err := _Flags.contract.FilterLogs(opts, "AddedAccess") + if err != nil { + return nil, err + } + return &FlagsAddedAccessIterator{contract: _Flags.contract, event: "AddedAccess", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchAddedAccess(opts *bind.WatchOpts, sink chan<- *FlagsAddedAccess) (event.Subscription, error) { + + logs, sub, err := _Flags.contract.WatchLogs(opts, "AddedAccess") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsAddedAccess) + if err := _Flags.contract.UnpackLog(event, "AddedAccess", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseAddedAccess(log types.Log) (*FlagsAddedAccess, error) { + event := new(FlagsAddedAccess) + if err := _Flags.contract.UnpackLog(event, "AddedAccess", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsCheckAccessDisabledIterator struct { + Event *FlagsCheckAccessDisabled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsCheckAccessDisabledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsCheckAccessDisabled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsCheckAccessDisabled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsCheckAccessDisabledIterator) Error() error { + return it.fail +} + +func (it *FlagsCheckAccessDisabledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsCheckAccessDisabled struct { + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterCheckAccessDisabled(opts *bind.FilterOpts) (*FlagsCheckAccessDisabledIterator, error) { + + logs, sub, err := _Flags.contract.FilterLogs(opts, "CheckAccessDisabled") + if err != nil { + return nil, err + } + return &FlagsCheckAccessDisabledIterator{contract: _Flags.contract, event: "CheckAccessDisabled", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchCheckAccessDisabled(opts *bind.WatchOpts, sink chan<- *FlagsCheckAccessDisabled) (event.Subscription, error) { + + logs, sub, err := _Flags.contract.WatchLogs(opts, "CheckAccessDisabled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsCheckAccessDisabled) + if err := _Flags.contract.UnpackLog(event, "CheckAccessDisabled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseCheckAccessDisabled(log types.Log) (*FlagsCheckAccessDisabled, error) { + event := new(FlagsCheckAccessDisabled) + if err := _Flags.contract.UnpackLog(event, "CheckAccessDisabled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsCheckAccessEnabledIterator struct { + Event *FlagsCheckAccessEnabled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsCheckAccessEnabledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsCheckAccessEnabled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsCheckAccessEnabled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsCheckAccessEnabledIterator) Error() error { + return it.fail +} + +func (it *FlagsCheckAccessEnabledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsCheckAccessEnabled struct { + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterCheckAccessEnabled(opts *bind.FilterOpts) (*FlagsCheckAccessEnabledIterator, error) { + + logs, sub, err := _Flags.contract.FilterLogs(opts, "CheckAccessEnabled") + if err != nil { + return nil, err + } + return &FlagsCheckAccessEnabledIterator{contract: _Flags.contract, event: "CheckAccessEnabled", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchCheckAccessEnabled(opts *bind.WatchOpts, sink chan<- *FlagsCheckAccessEnabled) (event.Subscription, error) { + + logs, sub, err := _Flags.contract.WatchLogs(opts, "CheckAccessEnabled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsCheckAccessEnabled) + if err := _Flags.contract.UnpackLog(event, "CheckAccessEnabled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseCheckAccessEnabled(log types.Log) (*FlagsCheckAccessEnabled, error) { + event := new(FlagsCheckAccessEnabled) + if err := _Flags.contract.UnpackLog(event, "CheckAccessEnabled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsFlagLoweredIterator struct { + Event *FlagsFlagLowered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsFlagLoweredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsFlagLowered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsFlagLowered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsFlagLoweredIterator) Error() error { + return it.fail +} + +func (it *FlagsFlagLoweredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsFlagLowered struct { + Subject common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterFlagLowered(opts *bind.FilterOpts, subject []common.Address) (*FlagsFlagLoweredIterator, error) { + + var subjectRule []interface{} + for _, subjectItem := range subject { + subjectRule = append(subjectRule, subjectItem) + } + + logs, sub, err := _Flags.contract.FilterLogs(opts, "FlagLowered", subjectRule) + if err != nil { + return nil, err + } + return &FlagsFlagLoweredIterator{contract: _Flags.contract, event: "FlagLowered", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchFlagLowered(opts *bind.WatchOpts, sink chan<- *FlagsFlagLowered, subject []common.Address) (event.Subscription, error) { + + var subjectRule []interface{} + for _, subjectItem := range subject { + subjectRule = append(subjectRule, subjectItem) + } + + logs, sub, err := _Flags.contract.WatchLogs(opts, "FlagLowered", subjectRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsFlagLowered) + if err := _Flags.contract.UnpackLog(event, "FlagLowered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseFlagLowered(log types.Log) (*FlagsFlagLowered, error) { + event := new(FlagsFlagLowered) + if err := _Flags.contract.UnpackLog(event, "FlagLowered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsFlagRaisedIterator struct { + Event *FlagsFlagRaised + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsFlagRaisedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsFlagRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsFlagRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsFlagRaisedIterator) Error() error { + return it.fail +} + +func (it *FlagsFlagRaisedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsFlagRaised struct { + Subject common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterFlagRaised(opts *bind.FilterOpts, subject []common.Address) (*FlagsFlagRaisedIterator, error) { + + var subjectRule []interface{} + for _, subjectItem := range subject { + subjectRule = append(subjectRule, subjectItem) + } + + logs, sub, err := _Flags.contract.FilterLogs(opts, "FlagRaised", subjectRule) + if err != nil { + return nil, err + } + return &FlagsFlagRaisedIterator{contract: _Flags.contract, event: "FlagRaised", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchFlagRaised(opts *bind.WatchOpts, sink chan<- *FlagsFlagRaised, subject []common.Address) (event.Subscription, error) { + + var subjectRule []interface{} + for _, subjectItem := range subject { + subjectRule = append(subjectRule, subjectItem) + } + + logs, sub, err := _Flags.contract.WatchLogs(opts, "FlagRaised", subjectRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsFlagRaised) + if err := _Flags.contract.UnpackLog(event, "FlagRaised", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseFlagRaised(log types.Log) (*FlagsFlagRaised, error) { + event := new(FlagsFlagRaised) + if err := _Flags.contract.UnpackLog(event, "FlagRaised", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsOwnershipTransferRequestedIterator struct { + Event *FlagsOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FlagsOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FlagsOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Flags.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FlagsOwnershipTransferRequestedIterator{contract: _Flags.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FlagsOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Flags.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsOwnershipTransferRequested) + if err := _Flags.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseOwnershipTransferRequested(log types.Log) (*FlagsOwnershipTransferRequested, error) { + event := new(FlagsOwnershipTransferRequested) + if err := _Flags.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsOwnershipTransferredIterator struct { + Event *FlagsOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FlagsOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FlagsOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Flags.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FlagsOwnershipTransferredIterator{contract: _Flags.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FlagsOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Flags.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsOwnershipTransferred) + if err := _Flags.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseOwnershipTransferred(log types.Log) (*FlagsOwnershipTransferred, error) { + event := new(FlagsOwnershipTransferred) + if err := _Flags.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsRaisingAccessControllerUpdatedIterator struct { + Event *FlagsRaisingAccessControllerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsRaisingAccessControllerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsRaisingAccessControllerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsRaisingAccessControllerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsRaisingAccessControllerUpdatedIterator) Error() error { + return it.fail +} + +func (it *FlagsRaisingAccessControllerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsRaisingAccessControllerUpdated struct { + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterRaisingAccessControllerUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*FlagsRaisingAccessControllerUpdatedIterator, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _Flags.contract.FilterLogs(opts, "RaisingAccessControllerUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return &FlagsRaisingAccessControllerUpdatedIterator{contract: _Flags.contract, event: "RaisingAccessControllerUpdated", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchRaisingAccessControllerUpdated(opts *bind.WatchOpts, sink chan<- *FlagsRaisingAccessControllerUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _Flags.contract.WatchLogs(opts, "RaisingAccessControllerUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsRaisingAccessControllerUpdated) + if err := _Flags.contract.UnpackLog(event, "RaisingAccessControllerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseRaisingAccessControllerUpdated(log types.Log) (*FlagsRaisingAccessControllerUpdated, error) { + event := new(FlagsRaisingAccessControllerUpdated) + if err := _Flags.contract.UnpackLog(event, "RaisingAccessControllerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FlagsRemovedAccessIterator struct { + Event *FlagsRemovedAccess + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FlagsRemovedAccessIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FlagsRemovedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FlagsRemovedAccess) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FlagsRemovedAccessIterator) Error() error { + return it.fail +} + +func (it *FlagsRemovedAccessIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FlagsRemovedAccess struct { + User common.Address + Raw types.Log +} + +func (_Flags *FlagsFilterer) FilterRemovedAccess(opts *bind.FilterOpts) (*FlagsRemovedAccessIterator, error) { + + logs, sub, err := _Flags.contract.FilterLogs(opts, "RemovedAccess") + if err != nil { + return nil, err + } + return &FlagsRemovedAccessIterator{contract: _Flags.contract, event: "RemovedAccess", logs: logs, sub: sub}, nil +} + +func (_Flags *FlagsFilterer) WatchRemovedAccess(opts *bind.WatchOpts, sink chan<- *FlagsRemovedAccess) (event.Subscription, error) { + + logs, sub, err := _Flags.contract.WatchLogs(opts, "RemovedAccess") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FlagsRemovedAccess) + if err := _Flags.contract.UnpackLog(event, "RemovedAccess", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Flags *FlagsFilterer) ParseRemovedAccess(log types.Log) (*FlagsRemovedAccess, error) { + event := new(FlagsRemovedAccess) + if err := _Flags.contract.UnpackLog(event, "RemovedAccess", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_Flags *Flags) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Flags.abi.Events["AddedAccess"].ID: + return _Flags.ParseAddedAccess(log) + case _Flags.abi.Events["CheckAccessDisabled"].ID: + return _Flags.ParseCheckAccessDisabled(log) + case _Flags.abi.Events["CheckAccessEnabled"].ID: + return _Flags.ParseCheckAccessEnabled(log) + case _Flags.abi.Events["FlagLowered"].ID: + return _Flags.ParseFlagLowered(log) + case _Flags.abi.Events["FlagRaised"].ID: + return _Flags.ParseFlagRaised(log) + case _Flags.abi.Events["OwnershipTransferRequested"].ID: + return _Flags.ParseOwnershipTransferRequested(log) + case _Flags.abi.Events["OwnershipTransferred"].ID: + return _Flags.ParseOwnershipTransferred(log) + case _Flags.abi.Events["RaisingAccessControllerUpdated"].ID: + return _Flags.ParseRaisingAccessControllerUpdated(log) + case _Flags.abi.Events["RemovedAccess"].ID: + return _Flags.ParseRemovedAccess(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FlagsAddedAccess) Topic() common.Hash { + return common.HexToHash("0x87286ad1f399c8e82bf0c4ef4fcdc570ea2e1e92176e5c848b6413545b885db4") +} + +func (FlagsCheckAccessDisabled) Topic() common.Hash { + return common.HexToHash("0x3be8a977a014527b50ae38adda80b56911c267328965c98ddc385d248f539638") +} + +func (FlagsCheckAccessEnabled) Topic() common.Hash { + return common.HexToHash("0xaebf329500988c6488a0074e5a0a9ff304561fc5c6fc877aeb1d59c8282c3480") +} + +func (FlagsFlagLowered) Topic() common.Hash { + return common.HexToHash("0xd86728e2e5cbaa28c1d357b5fbccc9c1ab0add09950eb7cac42df9acb24c4bc8") +} + +func (FlagsFlagRaised) Topic() common.Hash { + return common.HexToHash("0x881febd4cd194dd4ace637642862aef1fb59a65c7e5551a5d9208f268d11c006") +} + +func (FlagsOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FlagsOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FlagsRaisingAccessControllerUpdated) Topic() common.Hash { + return common.HexToHash("0xbaf9ea078655a4fffefd08f9435677bbc91e457a6d015fe7de1d0e68b8802cac") +} + +func (FlagsRemovedAccess) Topic() common.Hash { + return common.HexToHash("0x3d68a6fce901d20453d1a7aa06bf3950302a735948037deb182a8db66df2a0d1") +} + +func (_Flags *Flags) Address() common.Address { + return _Flags.address +} + +type FlagsInterface interface { + CheckEnabled(opts *bind.CallOpts) (bool, error) + + GetFlag(opts *bind.CallOpts, subject common.Address) (bool, error) + + GetFlags(opts *bind.CallOpts, subjects []common.Address) ([]bool, error) + + HasAccess(opts *bind.CallOpts, _user common.Address, _calldata []byte) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + RaisingAccessController(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) + + DisableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) + + EnableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) + + LowerFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) + + RaiseFlag(opts *bind.TransactOpts, subject common.Address) (*types.Transaction, error) + + RaiseFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) + + RemoveAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) + + SetRaisingAccessController(opts *bind.TransactOpts, racAddress common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) + + FilterAddedAccess(opts *bind.FilterOpts) (*FlagsAddedAccessIterator, error) + + WatchAddedAccess(opts *bind.WatchOpts, sink chan<- *FlagsAddedAccess) (event.Subscription, error) + + ParseAddedAccess(log types.Log) (*FlagsAddedAccess, error) + + FilterCheckAccessDisabled(opts *bind.FilterOpts) (*FlagsCheckAccessDisabledIterator, error) + + WatchCheckAccessDisabled(opts *bind.WatchOpts, sink chan<- *FlagsCheckAccessDisabled) (event.Subscription, error) + + ParseCheckAccessDisabled(log types.Log) (*FlagsCheckAccessDisabled, error) + + FilterCheckAccessEnabled(opts *bind.FilterOpts) (*FlagsCheckAccessEnabledIterator, error) + + WatchCheckAccessEnabled(opts *bind.WatchOpts, sink chan<- *FlagsCheckAccessEnabled) (event.Subscription, error) + + ParseCheckAccessEnabled(log types.Log) (*FlagsCheckAccessEnabled, error) + + FilterFlagLowered(opts *bind.FilterOpts, subject []common.Address) (*FlagsFlagLoweredIterator, error) + + WatchFlagLowered(opts *bind.WatchOpts, sink chan<- *FlagsFlagLowered, subject []common.Address) (event.Subscription, error) + + ParseFlagLowered(log types.Log) (*FlagsFlagLowered, error) + + FilterFlagRaised(opts *bind.FilterOpts, subject []common.Address) (*FlagsFlagRaisedIterator, error) + + WatchFlagRaised(opts *bind.WatchOpts, sink chan<- *FlagsFlagRaised, subject []common.Address) (event.Subscription, error) + + ParseFlagRaised(log types.Log) (*FlagsFlagRaised, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FlagsOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FlagsOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FlagsOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FlagsOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FlagsOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FlagsOwnershipTransferred, error) + + FilterRaisingAccessControllerUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*FlagsRaisingAccessControllerUpdatedIterator, error) + + WatchRaisingAccessControllerUpdated(opts *bind.WatchOpts, sink chan<- *FlagsRaisingAccessControllerUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParseRaisingAccessControllerUpdated(log types.Log) (*FlagsRaisingAccessControllerUpdated, error) + + FilterRemovedAccess(opts *bind.FilterOpts) (*FlagsRemovedAccessIterator, error) + + WatchRemovedAccess(opts *bind.WatchOpts, sink chan<- *FlagsRemovedAccess) (event.Subscription, error) + + ParseRemovedAccess(log types.Log) (*FlagsRemovedAccess, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/flux_aggregator_wrapper/flux_aggregator_wrapper.go b/core/gethwrappers/generated/flux_aggregator_wrapper/flux_aggregator_wrapper.go new file mode 100644 index 00000000..51841886 --- /dev/null +++ b/core/gethwrappers/generated/flux_aggregator_wrapper/flux_aggregator_wrapper.go @@ -0,0 +1,2876 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package flux_aggregator_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FluxAggregatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"uint128\",\"name\":\"_paymentAmount\",\"type\":\"uint128\"},{\"internalType\":\"uint32\",\"name\":\"_timeout\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"_validator\",\"type\":\"address\"},{\"internalType\":\"int256\",\"name\":\"_minSubmissionValue\",\"type\":\"int256\"},{\"internalType\":\"int256\",\"name\":\"_maxSubmissionValue\",\"type\":\"int256\"},{\"internalType\":\"uint8\",\"name\":\"_decimals\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"_description\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"AvailableFundsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"OracleAdminUpdateRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"OracleAdminUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"whitelisted\",\"type\":\"bool\"}],\"name\":\"OraclePermissionsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"authorized\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"delay\",\"type\":\"uint32\"}],\"name\":\"RequesterPermissionsSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint128\",\"name\":\"paymentAmount\",\"type\":\"uint128\"},{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"minSubmissionCount\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"maxSubmissionCount\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"restartDelay\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"timeout\",\"type\":\"uint32\"}],\"name\":\"RoundDetailsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"submission\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"round\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"SubmissionReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"ValidatorUpdated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"}],\"name\":\"acceptAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"allocatedFunds\",\"outputs\":[{\"internalType\":\"uint128\",\"name\":\"\",\"type\":\"uint128\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"availableFunds\",\"outputs\":[{\"internalType\":\"uint128\",\"name\":\"\",\"type\":\"uint128\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_removed\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_added\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_addedAdmins\",\"type\":\"address[]\"},{\"internalType\":\"uint32\",\"name\":\"_minSubmissions\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_maxSubmissions\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_restartDelay\",\"type\":\"uint32\"}],\"name\":\"changeOracles\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"}],\"name\":\"getAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getOracles\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRound\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxSubmissionCount\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxSubmissionValue\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minSubmissionCount\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minSubmissionValue\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"oracleCount\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"_queriedRoundId\",\"type\":\"uint32\"}],\"name\":\"oracleRoundState\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"_eligibleToSubmit\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"_roundId\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"_latestSubmission\",\"type\":\"int256\"},{\"internalType\":\"uint64\",\"name\":\"_startedAt\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"_timeout\",\"type\":\"uint64\"},{\"internalType\":\"uint128\",\"name\":\"_availableFunds\",\"type\":\"uint128\"},{\"internalType\":\"uint8\",\"name\":\"_oracleCount\",\"type\":\"uint8\"},{\"internalType\":\"uint128\",\"name\":\"_paymentAmount\",\"type\":\"uint128\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paymentAmount\",\"outputs\":[{\"internalType\":\"uint128\",\"name\":\"\",\"type\":\"uint128\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestNewRound\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"restartDelay\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_requester\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_authorized\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"_delay\",\"type\":\"uint32\"}],\"name\":\"setRequesterPermissions\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newValidator\",\"type\":\"address\"}],\"name\":\"setValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"_submission\",\"type\":\"int256\"}],\"name\":\"submit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeout\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_newAdmin\",\"type\":\"address\"}],\"name\":\"transferAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"updateAvailableFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint128\",\"name\":\"_paymentAmount\",\"type\":\"uint128\"},{\"internalType\":\"uint32\",\"name\":\"_minSubmissions\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_maxSubmissions\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_restartDelay\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_timeout\",\"type\":\"uint32\"}],\"name\":\"updateFutureRounds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"validator\",\"outputs\":[{\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"}],\"name\":\"withdrawablePayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60c06040523480156200001157600080fd5b5060405162005cfc38038062005cfc83398181016040526101008110156200003857600080fd5b815160208301516040808501516060860151608087015160a088015160c089015160e08a0180519651989a9799959894979396929591949391820192846401000000008211156200008857600080fd5b9083019060208201858111156200009e57600080fd5b8251640100000000811182820188101715620000b957600080fd5b82525081516020918201929091019080838360005b83811015620000e8578181015183820152602001620000ce565b50505050905090810190601f168015620001165780820380516001836020036101000a031916815260200191505b50604052505060008054336001600160a01b0319918216178255600280549091166001600160a01b038c16179055620001559150889080808a6200021f565b62000169856001600160e01b03620005fd16565b608084905260a08390526005805460ff191660ff8416179055805162000197906006906020840190620007e1565b50620001b88663ffffffff1642620006c760201b62003d4a1790919060201c565b6000805260096020527fec8156718a8372b1db44bb411437d0870f3e3790d4a08526d024ce1b0b668f6c80546001600160401b03929092166801000000000000000002600160401b600160801b031990921691909117905550620008839650505050505050565b6000546001600160a01b031633146200027f576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6000620002946001600160e01b036200072a16565b60ff1690508463ffffffff168463ffffffff161015620002fb576040805162461bcd60e51b815260206004820152601960248201527f6d6178206d75737420657175616c2f657863656564206d696e00000000000000604482015290519081900360640190fd5b8363ffffffff168163ffffffff1610156200035d576040805162461bcd60e51b815260206004820152601760248201527f6d61782063616e6e6f742065786365656420746f74616c000000000000000000604482015290519081900360640190fd5b63ffffffff811615806200037c57508263ffffffff168163ffffffff16115b620003ce576040805162461bcd60e51b815260206004820152601960248201527f64656c61792063616e6e6f742065786365656420746f74616c00000000000000604482015290519081900360640190fd5b620003eb6001600160801b0387166001600160e01b036200073116565b600d546001600160801b031610156200044b576040805162461bcd60e51b815260206004820152601e60248201527f696e73756666696369656e742066756e647320666f72207061796d656e740000604482015290519081900360640190fd5b6000620004606001600160e01b036200072a16565b60ff161115620004c65760008563ffffffff1611620004c6576040805162461bcd60e51b815260206004820152601a60248201527f6d696e206d7573742062652067726561746572207468616e2030000000000000604482015290519081900360640190fd5b85600460006101000a8154816001600160801b0302191690836001600160801b0316021790555084600460146101000a81548163ffffffff021916908363ffffffff16021790555083600460106101000a81548163ffffffff021916908363ffffffff16021790555082600460186101000a81548163ffffffff021916908363ffffffff160217905550816004601c6101000a81548163ffffffff021916908363ffffffff1602179055508363ffffffff168563ffffffff16600460009054906101000a90046001600160801b03166001600160801b03167f56800c9d1ed723511246614d15e58cfcde15b6a33c245b5c961b689c1890fd8f8686604051808363ffffffff1663ffffffff1681526020018263ffffffff1663ffffffff1681526020019250505060405180910390a4505050505050565b6000546001600160a01b031633146200065d576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6003546001600160a01b039081169082168114620006c357600380546001600160a01b0319166001600160a01b0384811691821790925560405190918316907fcfac5dc75b8d9a7e074162f59d9adcd33da59f0fe8dfb21580db298fc0fdad0d90600090a35b5050565b6000828211156200071f576040805162461bcd60e51b815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b508082035b92915050565b600c545b90565b600062000724600262000768620007506001600160e01b036200072a16565b60ff16856200077c60201b620047a01790919060201c565b6200077c60201b620047a01790919060201c565b6000826200078d5750600062000724565b828202828482816200079b57fe5b0414620007da5760405162461bcd60e51b815260040180806020018281038252602181526020018062005cdb6021913960400191505060405180910390fd5b9392505050565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200082457805160ff191683800117855562000854565b8280016001018555821562000854579182015b828111156200085457825182559160200191906001019062000837565b506200086292915062000866565b5090565b6200072e91905b808211156200086257600081556001016200086d565b60805160a05161542a620008b160003980610c495280610fbc525080610bba5280611ff6525061542a6000f3fe608060405234801561001057600080fd5b50600436106102d35760003560e01c8063668a0f0211610186578063a4c0ed36116100e3578063c937450011610097578063e9ee6eeb11610071578063e9ee6eeb14610a13578063f2fde38b14610a4e578063feaf968c14610a81576102d3565b8063c9374500146109d0578063d4cc54e4146109d8578063e2e40317146109e0576102d3565b8063b633620c116100c8578063b633620c14610972578063c10753291461098f578063c35905c6146109c8576102d3565b8063a4c0ed36146108c3578063b5ab58dc14610955576102d3565b80638205bf6a1161013a5780638da5cb5b1161011f5780638da5cb5b1461082c57806398e5b12a146108345780639a6fc8f51461085b576102d3565b80638205bf6a1461077b57806388aa80e714610783576102d3565b80637284e4161161016b5780637284e416146106ee57806379ba50971461076b5780637c2b0b2114610773576102d3565b8063668a0f02146106de57806370dea79a146106e6576102d3565b806340884c521161023457806357970e93116101e8578063613d8fcc116101cd578063613d8fcc14610670578063628806ef1461067857806364efb22b146106ab576102d3565b806357970e931461066057806358609e4414610668576102d3565b80634f8fc3b5116102195780634f8fc3b51461064857806350d25bcd1461065057806354fd4d5014610658576102d3565b806340884c52146105c357806346fcff4c1461061b576102d3565b8063357ebb021161028b5780633969c20f116102705780633969c20f146104235780633a5381b51461054f5780633d3d771414610580576102d3565b8063357ebb02146103af57806338aa4c72146103d0576102d3565b806320ed0275116102bc57806320ed02751461033057806323ca290314610377578063313ce56714610391576102d3565b80631327d3d8146102d8578063202ee0ed1461030d575b600080fd5b61030b600480360360208110156102ee57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610a89565b005b61030b6004803603604081101561032357600080fd5b5080359060200135610baa565b61030b6004803603606081101561034657600080fd5b50803573ffffffffffffffffffffffffffffffffffffffff169060208101351515906040013563ffffffff16610dca565b61037f610fba565b60408051918252519081900360200190f35b610399610fde565b6040805160ff9092168252519081900360200190f35b6103b7610fe7565b6040805163ffffffff9092168252519081900360200190f35b61030b600480360360a08110156103e657600080fd5b506fffffffffffffffffffffffffffffffff8135169063ffffffff602082013581169160408101358216916060820135811691608001351661100f565b61030b600480360360c081101561043957600080fd5b81019060208101813564010000000081111561045457600080fd5b82018360208201111561046657600080fd5b8035906020019184602083028401116401000000008311171561048857600080fd5b9193909290916020810190356401000000008111156104a657600080fd5b8201836020820111156104b857600080fd5b803590602001918460208302840111640100000000831117156104da57600080fd5b9193909290916020810190356401000000008111156104f857600080fd5b82018360208201111561050a57600080fd5b8035906020019184602083028401116401000000008311171561052c57600080fd5b919350915063ffffffff81358116916020810135821691604090910135166114a3565b61055761172b565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b61030b6004803603606081101561059657600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160208101359091169060400135611747565b6105cb611a18565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156106075781810151838201526020016105ef565b505050509050019250505060405180910390f35b610623611a88565b604080516fffffffffffffffffffffffffffffffff9092168252519081900360200190f35b61030b611aa0565b61037f611c1d565b61037f611c40565b610557611c45565b6103b7611c61565b610399611c81565b61030b6004803603602081101561068e57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611c87565b610557600480360360208110156106c157600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611dcf565b61037f611e06565b6103b7611e1a565b6106f6611e46565b6040805160208082528351818301528351919283929083019185019080838360005b83811015610730578181015183820152602001610718565b50505050905090810190601f16801561075d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61030b611ef2565b61037f611ff4565b61037f612018565b6107c26004803603604081101561079957600080fd5b50803573ffffffffffffffffffffffffffffffffffffffff16906020013563ffffffff16612054565b60408051981515895263ffffffff90971660208901528787019590955267ffffffffffffffff93841660608801529190921660808601526fffffffffffffffffffffffffffffffff91821660a086015260ff1660c08501521660e083015251908190036101000190f35b61055761220d565b61083c612229565b6040805169ffffffffffffffffffff9092168252519081900360200190f35b6108846004803603602081101561087157600080fd5b503569ffffffffffffffffffff16612382565b6040805169ffffffffffffffffffff96871681526020810195909552848101939093526060840191909152909216608082015290519081900360a00190f35b61030b600480360360608110156108d957600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235169160208101359181019060608101604082013564010000000081111561091657600080fd5b82018360208201111561092857600080fd5b8035906020019184600183028401116401000000008311171561094a57600080fd5b5090925090506124f8565b61037f6004803603602081101561096b57600080fd5b5035612573565b61037f6004803603602081101561098857600080fd5b50356125a7565b61030b600480360360408110156109a557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81351690602001356125ec565b610623612832565b6103b761284a565b61062361286e565b61037f600480360360208110156109f657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff1661289a565b61030b60048036036040811015610a2957600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166128d4565b61030b60048036036020811015610a6457600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16612a0a565b610884612b06565b60005473ffffffffffffffffffffffffffffffffffffffff163314610b0f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60035473ffffffffffffffffffffffffffffffffffffffff9081169082168114610ba657600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907fcfac5dc75b8d9a7e074162f59d9adcd33da59f0fe8dfb21580db298fc0fdad0d90600090a35b5050565b6060610bb63384612b40565b90507f0000000000000000000000000000000000000000000000000000000000000000821215610c4757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f76616c75652062656c6f77206d696e5375626d697373696f6e56616c75650000604482015290519081900360640190fd5b7f0000000000000000000000000000000000000000000000000000000000000000821315610cd657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f76616c75652061626f7665206d61785375626d697373696f6e56616c75650000604482015290519081900360640190fd5b8051819015610d7d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825283818151815260200191508051906020019080838360005b83811015610d42578181015183820152602001610d2a565b50505050905090810190601f168015610d6f5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50610d8783612e4f565b610d918284612f3f565b600080610d9d8561305f565b91509150610daa8561320f565b610db3856133d2565b8115610dc357610dc38582613449565b5050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610e5057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b602052604090205460ff1615158215151415610e8957610fb5565b8115610f115773ffffffffffffffffffffffffffffffffffffffff83166000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016831515177fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff1661010063ffffffff841602179055610f5d565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000001690555b60408051831515815263ffffffff83166020820152815173ffffffffffffffffffffffffffffffffffffffff8616927fc3df5a754e002718f2e10804b99e6605e7c701d95cec9552c7680ca2b6f2820a928290030190a25b505050565b7f000000000000000000000000000000000000000000000000000000000000000081565b60055460ff1681565b6004547801000000000000000000000000000000000000000000000000900463ffffffff1681565b60005473ffffffffffffffffffffffffffffffffffffffff16331461109557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b600061109f611c81565b60ff1690508463ffffffff168463ffffffff16101561111f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f6d6178206d75737420657175616c2f657863656564206d696e00000000000000604482015290519081900360640190fd5b8363ffffffff168163ffffffff16101561119a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6d61782063616e6e6f742065786365656420746f74616c000000000000000000604482015290519081900360640190fd5b63ffffffff811615806111b857508263ffffffff168163ffffffff16115b61122357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f64656c61792063616e6e6f742065786365656420746f74616c00000000000000604482015290519081900360640190fd5b61123e866fffffffffffffffffffffffffffffffff16613582565b600d546fffffffffffffffffffffffffffffffff1610156112c057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f696e73756666696369656e742066756e647320666f72207061796d656e740000604482015290519081900360640190fd5b60006112ca611c81565b60ff1611156113485760008563ffffffff161161134857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6d696e206d7573742062652067726561746572207468616e2030000000000000604482015290519081900360640190fd5b85600460006101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff16021790555084600460146101000a81548163ffffffff021916908363ffffffff16021790555083600460106101000a81548163ffffffff021916908363ffffffff16021790555082600460186101000a81548163ffffffff021916908363ffffffff160217905550816004601c6101000a81548163ffffffff021916908363ffffffff1602179055508363ffffffff168563ffffffff16600460009054906101000a90046fffffffffffffffffffffffffffffffff166fffffffffffffffffffffffffffffffff167f56800c9d1ed723511246614d15e58cfcde15b6a33c245b5c961b689c1890fd8f8686604051808363ffffffff1663ffffffff1681526020018263ffffffff1663ffffffff1681526020019250505060405180910390a4505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461152957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b60005b8881101561156d576115658a8a8381811061154357fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff166135b0565b60010161152c565b508584146115dc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f6e6565642073616d65206f7261636c6520616e642061646d696e20636f756e74604482015290519081900360640190fd5b604d6115f9876115ea611c81565b60ff169063ffffffff61386016565b111561166657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6d6178206f7261636c657320616c6c6f77656400000000000000000000000000604482015290519081900360640190fd5b60005b868110156116d3576116cb88888381811061168057fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff168787848181106116a957fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff166138db565b600101611669565b50600454611720906fffffffffffffffffffffffffffffffff8116908590859085907c0100000000000000000000000000000000000000000000000000000000900463ffffffff1661100f565b505050505050505050565b60035473ffffffffffffffffffffffffffffffffffffffff1681565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600860205260409020600201546201000090041633146117e557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff831660009081526008602052604090205481906fffffffffffffffffffffffffffffffff90811690821681101561189257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f696e73756666696369656e7420776974686472617761626c652066756e647300604482015290519081900360640190fd5b6118b46fffffffffffffffffffffffffffffffff82168363ffffffff613caf16565b73ffffffffffffffffffffffffffffffffffffffff8616600090815260086020526040902080547fffffffffffffffffffffffffffffffff00000000000000000000000000000000166fffffffffffffffffffffffffffffffff928316179055600d54611937917001000000000000000000000000000000009091041683613caf565b600d80546fffffffffffffffffffffffffffffffff92831670010000000000000000000000000000000002908316179055600254604080517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff888116600483015293861660248201529051929091169163a9059cbb916044808201926020929091908290030181600087803b1580156119e657600080fd5b505af11580156119fa573d6000803e3d6000fd5b505050506040513d6020811015611a1057600080fd5b5051610dc357fe5b6060600c805480602002602001604051908101604052809291908181526020018280548015611a7d57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611a52575b505050505090505b90565b600d546fffffffffffffffffffffffffffffffff1690565b611aa86152ec565b50604080518082018252600d546fffffffffffffffffffffffffffffffff808216835270010000000000000000000000000000000090910416602080830182905260025484517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015294519394600094611b95949373ffffffffffffffffffffffffffffffffffffffff909316926370a082319260248082019391829003018186803b158015611b5d57600080fd5b505afa158015611b71573d6000803e3d6000fd5b505050506040513d6020811015611b8757600080fd5b50519063ffffffff613d4a16565b82519091506fffffffffffffffffffffffffffffffff168114610ba657600d80547fffffffffffffffffffffffffffffffff00000000000000000000000000000000166fffffffffffffffffffffffffffffffff831617905560405181907ffe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f90600090a25050565b600754640100000000900463ffffffff1660009081526009602052604090205490565b600381565b60025473ffffffffffffffffffffffffffffffffffffffff1681565b600454700100000000000000000000000000000000900463ffffffff1681565b600c5490565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260086020526040902060030154163314611d1f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f6f6e6c792063616c6c61626c652062792070656e64696e672061646d696e0000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81166000818152600860205260408082206003810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905560020180547fffffffffffffffffffff0000000000000000000000000000000000000000ffff16336201000081029190911790915590519092917f0c5055390645c15a4be9a21b3f8d019153dcb4a0c125685da6eb84048e2fe90491a350565b73ffffffffffffffffffffffffffffffffffffffff808216600090815260086020526040902060020154620100009004165b919050565b600754640100000000900463ffffffff1690565b6004547c0100000000000000000000000000000000000000000000000000000000900463ffffffff1681565b6006805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f81018490048402820184019092528181529291830182828015611eea5780601f10611ebf57610100808354040283529160200191611eea565b820191906000526020600020905b815481529060010190602001808311611ecd57829003601f168201915b505050505081565b60015473ffffffffffffffffffffffffffffffffffffffff163314611f7857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b7f000000000000000000000000000000000000000000000000000000000000000081565b600754640100000000900463ffffffff1660009081526009602052604090206001015468010000000000000000900467ffffffffffffffff1690565b6000808080808080803332146120cb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f66662d636861696e2072656164696e67206f6e6c7900000000000000000000604482015290519081900360640190fd5b63ffffffff8916156121e65763ffffffff89166000908152600960209081526040808320600a9092529091206121018c8c613dbb565b73ffffffffffffffffffffffffffffffffffffffff8d1660009081526008602052604090206001908101548482015491840154600d548f9367ffffffffffffffff169168010000000000000000900463ffffffff16906fffffffffffffffffffffffffffffffff16612171611c81565b600189015467ffffffffffffffff1661219e576004546fffffffffffffffffffffffffffffffff166121c6565b60018801546c0100000000000000000000000090046fffffffffffffffffffffffffffffffff165b8363ffffffff169350995099509950995099509950995099505050612200565b6121ef8a613e11565b975097509750975097509750975097505b9295985092959890939650565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b336000908152600b602052604081205460ff166122a757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6e6f7420617574686f72697a6564207265717565737465720000000000000000604482015290519081900360640190fd5b60075463ffffffff1660008181526009602052604090206001015468010000000000000000900467ffffffffffffffff161515806122e957506122e981614028565b61235457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f7072657620726f756e64206d75737420626520737570657273656461626c6500604482015290519081900360640190fd5b600061236b63ffffffff808416906001906140bb16565b905061237681614138565b63ffffffff1691505090565b6000806000806000612392615303565b5063ffffffff80871660009081526009602090815260409182902082516080810184528154815260019091015467ffffffffffffffff80821693830193909352680100000000000000008104909216928101929092527001000000000000000000000000000000009004909116606082018190521580159061242457506124248769ffffffffffffffffffff16614240565b6040518060400160405280600f81526020017f4e6f20646174612070726573656e740000000000000000000000000000000000815250906124c0576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201818152835160248401528351909283926044909101919085019080838360008315610d42578181015183820152602001610d2a565b508051602082015160408301516060909301519899919867ffffffffffffffff91821698509216955063ffffffff9091169350915050565b801561256557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f7472616e7366657220646f65736e2774206163636570742063616c6c64617461604482015290519081900360640190fd5b61256d611aa0565b50505050565b600061257e82614240565b1561259f575063ffffffff8116600090815260096020526040902054611e01565b506000919050565b60006125b282614240565b1561259f575063ffffffff811660009081526009602052604090206001015468010000000000000000900467ffffffffffffffff16611e01565b60005473ffffffffffffffffffffffffffffffffffffffff16331461267257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b600d546004546fffffffffffffffffffffffffffffffff9182169183916126aa9161269d9116613582565b839063ffffffff613d4a16565b101561271757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f696e73756666696369656e7420726573657276652066756e6473000000000000604482015290519081900360640190fd5b600254604080517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8681166004830152602482018690529151919092169163a9059cbb9160448083019260209291908290030181600087803b15801561279357600080fd5b505af11580156127a7573d6000803e3d6000fd5b505050506040513d60208110156127bd57600080fd5b505161282a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f746f6b656e207472616e73666572206661696c65640000000000000000000000604482015290519081900360640190fd5b610fb5611aa0565b6004546fffffffffffffffffffffffffffffffff1681565b60045474010000000000000000000000000000000000000000900463ffffffff1681565b600d5470010000000000000000000000000000000090046fffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff166000908152600860205260409020546fffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff82811660009081526008602052604090206002015462010000900416331461297257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff82811660008181526008602090815260409182902060030180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055815133815290810193909352805191927fb79bf2e89c2d70dde91d2991fb1ea69b7e478061ad7c04ed5b02b96bc52b8104929081900390910190a25050565b60005473ffffffffffffffffffffffffffffffffffffffff163314612a9057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000806000806000612b2f600760049054906101000a900463ffffffff1663ffffffff16612382565b945094509450945094509091929394565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090205460075460609163ffffffff7001000000000000000000000000000000009091048116911681612bce576040518060400160405280601281526020017f6e6f7420656e61626c6564206f7261636c65000000000000000000000000000081525092505050612e49565b8363ffffffff168263ffffffff161115612c21576040518060400160405280601681526020017f6e6f742079657420656e61626c6564206f7261636c650000000000000000000081525092505050612e49565b73ffffffffffffffffffffffffffffffffffffffff851660009081526008602052604090205463ffffffff80861674010000000000000000000000000000000000000000909204161015612cae576040518060400160405280601881526020017f6e6f206c6f6e67657220616c6c6f776564206f7261636c65000000000000000081525092505050612e49565b73ffffffffffffffffffffffffffffffffffffffff851660009081526008602052604090205463ffffffff80861678010000000000000000000000000000000000000000000000009092041610612d3e576040518060400160405280602081526020017f63616e6e6f74207265706f7274206f6e2070726576696f757320726f756e647381525092505050612e49565b8063ffffffff168463ffffffff1614158015612d7a5750612d6a63ffffffff808316906001906140bb16565b63ffffffff168463ffffffff1614155b8015612d8d5750612d8b848261424a565b155b15612dd1576040518060400160405280601781526020017f696e76616c696420726f756e6420746f207265706f727400000000000000000081525092505050612e49565b8363ffffffff16600114158015612e025750612e00612dfb63ffffffff808716906001906142b016565b61432d565b155b15612e46576040518060400160405280601f81526020017f70726576696f757320726f756e64206e6f7420737570657273656461626c650081525092505050612e49565b50505b92915050565b612e588161436d565b612e6157612f3c565b3360009081526008602052604090205460045463ffffffff7c010000000000000000000000000000000000000000000000000000000090920482169178010000000000000000000000000000000000000000000000009091048116820190831611801590612ece57508015155b15612ed95750612f3c565b612ee28261439e565b5033600090815260086020526040902080547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c010000000000000000000000000000000000000000000000000000000063ffffffff8416021790555b50565b612f488161462d565b612fb357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f726f756e64206e6f7420616363657074696e67207375626d697373696f6e7300604482015290519081900360640190fd5b63ffffffff81166000818152600a602090815260408083208054600180820183559185528385200187905533808552600890935281842080547fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff1678010000000000000000000000000000000000000000000000008702178155018690555190929185917f92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c9190a45050565b63ffffffff8082166000908152600a60205260408120600181015490549192839264010000000090920416111561309b5750600090508061320a565b63ffffffff83166000908152600a602090815260408083208054825181850281018501909352808352613101938301828280156130f757602002820191906000526020600020905b8154815260200190600101908083116130e3575b505050505061464d565b63ffffffff851660008181526009602090815260409182902084815560010180547fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff16680100000000000000004267ffffffffffffffff811691909102919091177fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff16700100000000000000000000000000000000860217909155600780547fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff16640100000000860217905582519081529151939450919284927f0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f928290030190a36001925090505b915091565b63ffffffff81166000908152600a60205260409020600101546c0100000000000000000000000090046fffffffffffffffffffffffffffffffff166132526152ec565b5060408051808201909152600d546fffffffffffffffffffffffffffffffff8082168084527001000000000000000000000000000000009092041660208301526132a2908363ffffffff613caf16565b6fffffffffffffffffffffffffffffffff908116825260208201516132ce91168363ffffffff61471716565b6fffffffffffffffffffffffffffffffff90811660208084018290528351600d80547001000000000000000000000000000000009094029185167fffffffffffffffffffffffffffffffff00000000000000000000000000000000909416939093178416179091553360009081526008909152604090205461335791168363ffffffff61471716565b3360009081526008602052604080822080547fffffffffffffffffffffffffffffffff00000000000000000000000000000000166fffffffffffffffffffffffffffffffff948516179055835190519216917ffe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f9190a2505050565b63ffffffff8082166000908152600a6020526040902060018101549054911611156133fc57612f3c565b63ffffffff81166000908152600a602052604081209061341c828261532a565b5060010180547fffffffff0000000000000000000000000000000000000000000000000000000016905550565b60035473ffffffffffffffffffffffffffffffffffffffff168061346d5750610ba6565b600061348463ffffffff808616906001906142b016565b63ffffffff80821660009081526009602090815260408083206001810154905482517fbeed9b5100000000000000000000000000000000000000000000000000000000815270010000000000000000000000000000000090920486166004830181905260248301829052958b166044830152606482018a905291519596509394909373ffffffffffffffffffffffffffffffffffffffff88169363beed9b5193620186a093608480850194929391928390030190829088803b15801561354957600080fd5b5087f19350505050801561356f57506040513d602081101561356a57600080fd5b505160015b6135785761357a565b505b505050505050565b6000612e4960026135a4613594611c81565b859060ff1663ffffffff6147a016565b9063ffffffff6147a016565b6135b981614813565b61362457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6f7261636c65206e6f7420656e61626c65640000000000000000000000000000604482015290519081900360640190fd5b60075461363d9063ffffffff908116906001906140bb16565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600860205260408120805463ffffffff9390931674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff90931692909217909155600c6136cd60016136be611c81565b60ff169063ffffffff613d4a16565b815481106136d757fe5b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff85811680855260089093526040808520600290810180549390941680875291862001805461ffff9093167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009384168117909155939094528154169055600c805492935090918391908390811061376d57fe5b9060005260206000200160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600c8054806137c057fe5b60008281526020812082017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905590910190915560405173ffffffffffffffffffffffffffffffffffffffff8516907f18dd09695e4fbdae8d1a5edb11221eb04564269c29a089b9753a6535c54ba92e908390a3505050565b6000828201838110156138d457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b6138e482614813565b1561395057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f7261636c6520616c726561647920656e61626c656400000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81166139d257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f63616e6e6f74207365742061646d696e20746f20300000000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff828116600090815260086020526040902060020154620100009004161580613a41575073ffffffffffffffffffffffffffffffffffffffff8281166000908152600860205260409020600201546201000090048116908216145b613aac57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f6f776e65722063616e6e6f74206f76657277726974652061646d696e00000000604482015290519081900360640190fd5b613ab58261485c565b73ffffffffffffffffffffffffffffffffffffffff80841660008181526008602052604080822080547fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff63ffffffff97909716700100000000000000000000000000000000027fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff909116179590951677ffffffff0000000000000000000000000000000000000000178555600c80546002909601805461ffff9097167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909716969096178655805460018181019092557fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c70180547fffffffffffffffffffffffff00000000000000000000000000000000000000001685179055838352855494871662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff909516949094179094559251919290917f18dd09695e4fbdae8d1a5edb11221eb04564269c29a089b9753a6535c54ba92e9190a38073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f0c5055390645c15a4be9a21b3f8d019153dcb4a0c125685da6eb84048e2fe90460405160405180910390a35050565b6000826fffffffffffffffffffffffffffffffff16826fffffffffffffffffffffffffffffffff161115613d4457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b600082821115613d4457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b63ffffffff811660009081526009602052604081206001015467ffffffffffffffff1615613e0757613dec8261462d565b8015613e005750613dfd8383612b40565b51155b9050612e49565b613dec83836148dd565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600860205260408120600754815483928392839283928392839283927fec8156718a8372b1db44bb411437d0870f3e3790d4a08526d024ce1b0b668f6b929091849163ffffffff9081167801000000000000000000000000000000000000000000000000909204161480613eb05750600754613eae9063ffffffff1661462d565b155b600754909150613ec59063ffffffff1661432d565b8015613ece5750805b15613f3057600754613eec9063ffffffff908116906001906140bb16565b63ffffffff81166000908152600960205260409020600454919b506fffffffffffffffffffffffffffffffff90911694509250613f298c8b6148dd565b9a50613f8c565b60075463ffffffff166000818152600960209081526040808320600a90925290912060010154919b506c010000000000000000000000009091046fffffffffffffffffffffffffffffffff1694509250613f898a61462d565b9a505b613f968c8b612b40565b5115613fa15760009a505b6001808301548482015463ffffffff808e166000908152600a6020526040902090930154600d548f948f949367ffffffffffffffff169268010000000000000000900416906fffffffffffffffffffffffffffffffff16614000611c81565b8a8363ffffffff1693509a509a509a509a509a509a509a509a50505050919395975091939597565b63ffffffff8082166000908152600960209081526040808320600190810154600a9093529083200154919267ffffffffffffffff90911691680100000000000000009004168115801590614082575060008163ffffffff16115b80156140b35750426140a767ffffffffffffffff841663ffffffff8085169061496216565b67ffffffffffffffff16105b949350505050565b600082820163ffffffff80851690821610156138d457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6141418161436d565b61414a57612f3c565b336000908152600b602052604090205463ffffffff6501000000000082048116916101009004811682019083161180614181575080155b6141ec57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6d7573742064656c617920726571756573747300000000000000000000000000604482015290519081900360640190fd5b6141f58261439e565b50336000908152600b60205260409020805463ffffffff831665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff90911617905550565b63ffffffff101590565b60008163ffffffff1661426d60018563ffffffff166140bb90919063ffffffff16565b63ffffffff161480156138d457505063ffffffff1660009081526009602052604090206001015468010000000000000000900467ffffffffffffffff1615919050565b60008263ffffffff168263ffffffff161115613d4457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b63ffffffff811660009081526009602052604081206001015468010000000000000000900467ffffffffffffffff16151580612e495750612e4982614028565b6007546000906143899063ffffffff908116906001906140bb16565b63ffffffff168263ffffffff16149050919050565b6143bb6143b663ffffffff808416906001906142b016565b6149e3565b600780547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff83161790556143f3615348565b5060408051600060a0820181815260c083018452825260045463ffffffff700100000000000000000000000000000000820481166020808601919091527401000000000000000000000000000000000000000083048216858701527c01000000000000000000000000000000000000000000000000000000008304821660608601526fffffffffffffffffffffffffffffffff909216608085015285168252600a815292902081518051929384936144ae9284920190615376565b506020828101516001928301805460408087015160608801516080909801517fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000090931663ffffffff958616177fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff1664010000000091861691909102177fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff166801000000000000000097851697909702969096177fffffffff00000000000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006fffffffffffffffffffffffffffffffff90921691909102179055851660008181526009835284902090920180547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000164267ffffffffffffffff9081169190911791829055845191168152925133937f0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac6027192908290030190a35050565b63ffffffff9081166000908152600a602052604090206001015416151590565b600081516000106146bf57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6c697374206d757374206e6f7420626520656d70747900000000000000000000604482015290519081900360640190fd5b815160028104600182166146fe576000806146e4866000600187036001870387614afc565b90925090506146f38282614bda565b945050505050611e01565b61470e8460006001850384614c48565b92505050611e01565b60008282016fffffffffffffffffffffffffffffffff80851690821610156138d457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6000826147af57506000612e49565b828202828482816147bc57fe5b04146138d4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806153fd6021913960400191505060405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff1660009081526008602052604090205463ffffffff7401000000000000000000000000000000000000000090910481161490565b60075460009063ffffffff1680158015906148bc575073ffffffffffffffffffffffffffffffffffffffff831660009081526008602052604090205463ffffffff8281167401000000000000000000000000000000000000000090920416145b156148c8579050611e01565b6138d463ffffffff808316906001906140bb16565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604081205460045463ffffffff7c010000000000000000000000000000000000000000000000000000000090920482169178010000000000000000000000000000000000000000000000009091048116820190841611806140b35750159392505050565b600082820167ffffffffffffffff80851690821610156138d457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6149ec81614028565b6149f557612f3c565b6000614a0c63ffffffff808416906001906142b016565b63ffffffff818116600090815260096020908152604080832080548886168552828520908155600191820154910180547fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff1670010000000000000000000000000000000092839004909616909102949094177fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff16680100000000000000004267ffffffffffffffff160217909355600a9052908120919250614ace828261532a565b5060010180547fffffffff000000000000000000000000000000000000000000000000000000001690555050565b600080828410614b0b57600080fd5b838611158015614b1b5750848411155b614b2457600080fd5b828611158015614b345750848311155b614b3d57600080fd5b60078686031015614b5e57614b558787878787614cd9565b91509150614bd0565b6000614b6b888888615190565b9050808411614b7c57809550614bca565b84811015614b8f57806001019650614bca565b808511158015614b9e57508381105b614ba457fe5b614bb088888388614c48565b9250614bc188826001018887614c48565b9150614bd09050565b50614b3d565b9550959350505050565b60008083128015614beb5750600082135b80614c015750600083138015614c015750600082125b15614c21576002614c12848461526d565b81614c1957fe5b059050612e49565b60006002808507818507010590506140b3614c42600286056002860561526d565b8261526d565b600081841115614c5757600080fd5b82821115614c6457600080fd5b82841015614cbb5760078484031015614c90576000614c868686868687614cd9565b5091506140b39050565b6000614c9d868686615190565b9050808311614cae57809350614cb5565b8060010194505b50614c64565b848481518110614cc757fe5b60200260200101519050949350505050565b600080600086866001010390506000888860000181518110614cf757fe5b60200260200101519050600082600110614d31577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614d49565b898960010181518110614d4057fe5b60200260200101515b9050600083600210614d7b577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614d93565b8a8a60020181518110614d8a57fe5b60200260200101515b9050600084600310614dc5577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614ddd565b8b8b60030181518110614dd457fe5b60200260200101515b9050600085600410614e0f577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614e27565b8c8c60040181518110614e1e57fe5b60200260200101515b9050600086600510614e59577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614e71565b8d8d60050181518110614e6857fe5b60200260200101515b9050600087600610614ea3577f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff614ebb565b8e8e60060181518110614eb257fe5b60200260200101515b905085871315614ec9579495945b83851315614ed5579293925b81831315614ee1579091905b84871315614eed579395935b83861315614ef9579294925b80831315614f0357915b84861315614f0f579394935b80821315614f1957905b82871315614f25579195915b81861315614f31579094905b80851315614f3b57935b82861315614f47579194915b80841315614f5157925b82851315614f5d579193915b81841315614f69579092905b82841315614f75579192915b8d8c0380614f8557879a50615052565b8060011415614f9657869a50615052565b8060021415614fa757859a50615052565b8060031415614fb857849a50615052565b8060041415614fc957839a50615052565b8060051415614fda57829a50615052565b8060061415614feb57819a50615052565b604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f6b31206f7574206f6620626f756e647300000000000000000000000000000000604482015290519081900360640190fd5b8e8c038d8d141561507057508a9950614bd098505050505050505050565b806150875750969850614bd0975050505050505050565b80600114156150a25750959850614bd0975050505050505050565b80600214156150bd5750949850614bd0975050505050505050565b80600314156150d85750939850614bd0975050505050505050565b80600414156150f35750929850614bd0975050505050505050565b806005141561510e5750919850614bd0975050505050505050565b80600614156151295750909850614bd0975050505050505050565b604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f6b32206f7574206f6620626f756e647300000000000000000000000000000000604482015290519081900360640190fd5b60008084600285850104815181106151a457fe5b602002602001015190506001840393506001830192505b600184019350808585815181106151ce57fe5b6020026020010151126151bb575b600183039250808584815181106151ef57fe5b6020026020010151136151dc578284101561525f5784838151811061521057fe5b602002602001015185858151811061522457fe5b602002602001015186868151811061523857fe5b6020026020010187868151811061524b57fe5b602090810291909101019190915252615268565b829150506138d4565b6151bb565b60008282018183128015906152825750838112155b80615297575060008312801561529757508381125b6138d4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806153dc6021913960400191505060405180910390fd5b604080518082019091526000808252602082015290565b60408051608081018252600080825260208201819052918101829052606081019190915290565b5080546000825590600052602060002090810190612f3c91906153c1565b6040805160a08101825260608082526000602083018190529282018390528101829052608081019190915290565b8280548282559060005260206000209081019282156153b1579160200282015b828111156153b1578251825591602001919060010190615396565b506153bd9291506153c1565b5090565b611a8591905b808211156153bd57600081556001016153c756fe5369676e6564536166654d6174683a206164646974696f6e206f766572666c6f77536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77a164736f6c6343000606000a536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77", +} + +var FluxAggregatorABI = FluxAggregatorMetaData.ABI + +var FluxAggregatorBin = FluxAggregatorMetaData.Bin + +func DeployFluxAggregator(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _paymentAmount *big.Int, _timeout uint32, _validator common.Address, _minSubmissionValue *big.Int, _maxSubmissionValue *big.Int, _decimals uint8, _description string) (common.Address, *types.Transaction, *FluxAggregator, error) { + parsed, err := FluxAggregatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FluxAggregatorBin), backend, _link, _paymentAmount, _timeout, _validator, _minSubmissionValue, _maxSubmissionValue, _decimals, _description) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FluxAggregator{address: address, abi: *parsed, FluxAggregatorCaller: FluxAggregatorCaller{contract: contract}, FluxAggregatorTransactor: FluxAggregatorTransactor{contract: contract}, FluxAggregatorFilterer: FluxAggregatorFilterer{contract: contract}}, nil +} + +type FluxAggregator struct { + address common.Address + abi abi.ABI + FluxAggregatorCaller + FluxAggregatorTransactor + FluxAggregatorFilterer +} + +type FluxAggregatorCaller struct { + contract *bind.BoundContract +} + +type FluxAggregatorTransactor struct { + contract *bind.BoundContract +} + +type FluxAggregatorFilterer struct { + contract *bind.BoundContract +} + +type FluxAggregatorSession struct { + Contract *FluxAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FluxAggregatorCallerSession struct { + Contract *FluxAggregatorCaller + CallOpts bind.CallOpts +} + +type FluxAggregatorTransactorSession struct { + Contract *FluxAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type FluxAggregatorRaw struct { + Contract *FluxAggregator +} + +type FluxAggregatorCallerRaw struct { + Contract *FluxAggregatorCaller +} + +type FluxAggregatorTransactorRaw struct { + Contract *FluxAggregatorTransactor +} + +func NewFluxAggregator(address common.Address, backend bind.ContractBackend) (*FluxAggregator, error) { + abi, err := abi.JSON(strings.NewReader(FluxAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindFluxAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FluxAggregator{address: address, abi: abi, FluxAggregatorCaller: FluxAggregatorCaller{contract: contract}, FluxAggregatorTransactor: FluxAggregatorTransactor{contract: contract}, FluxAggregatorFilterer: FluxAggregatorFilterer{contract: contract}}, nil +} + +func NewFluxAggregatorCaller(address common.Address, caller bind.ContractCaller) (*FluxAggregatorCaller, error) { + contract, err := bindFluxAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FluxAggregatorCaller{contract: contract}, nil +} + +func NewFluxAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*FluxAggregatorTransactor, error) { + contract, err := bindFluxAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FluxAggregatorTransactor{contract: contract}, nil +} + +func NewFluxAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*FluxAggregatorFilterer, error) { + contract, err := bindFluxAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FluxAggregatorFilterer{contract: contract}, nil +} + +func bindFluxAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FluxAggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FluxAggregator *FluxAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FluxAggregator.Contract.FluxAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_FluxAggregator *FluxAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FluxAggregator.Contract.FluxAggregatorTransactor.contract.Transfer(opts) +} + +func (_FluxAggregator *FluxAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FluxAggregator.Contract.FluxAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_FluxAggregator *FluxAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FluxAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_FluxAggregator *FluxAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FluxAggregator.Contract.contract.Transfer(opts) +} + +func (_FluxAggregator *FluxAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FluxAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_FluxAggregator *FluxAggregatorCaller) AllocatedFunds(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "allocatedFunds") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) AllocatedFunds() (*big.Int, error) { + return _FluxAggregator.Contract.AllocatedFunds(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) AllocatedFunds() (*big.Int, error) { + return _FluxAggregator.Contract.AllocatedFunds(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) AvailableFunds(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "availableFunds") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) AvailableFunds() (*big.Int, error) { + return _FluxAggregator.Contract.AvailableFunds(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) AvailableFunds() (*big.Int, error) { + return _FluxAggregator.Contract.AvailableFunds(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Decimals() (uint8, error) { + return _FluxAggregator.Contract.Decimals(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Decimals() (uint8, error) { + return _FluxAggregator.Contract.Decimals(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Description() (string, error) { + return _FluxAggregator.Contract.Description(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Description() (string, error) { + return _FluxAggregator.Contract.Description(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) GetAdmin(opts *bind.CallOpts, _oracle common.Address) (common.Address, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "getAdmin", _oracle) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) GetAdmin(_oracle common.Address) (common.Address, error) { + return _FluxAggregator.Contract.GetAdmin(&_FluxAggregator.CallOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) GetAdmin(_oracle common.Address) (common.Address, error) { + return _FluxAggregator.Contract.GetAdmin(&_FluxAggregator.CallOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorCaller) GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "getAnswer", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _FluxAggregator.Contract.GetAnswer(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _FluxAggregator.Contract.GetAnswer(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCaller) GetOracles(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "getOracles") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) GetOracles() ([]common.Address, error) { + return _FluxAggregator.Contract.GetOracles(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) GetOracles() ([]common.Address, error) { + return _FluxAggregator.Contract.GetOracles(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_FluxAggregator *FluxAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _FluxAggregator.Contract.GetRoundData(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _FluxAggregator.Contract.GetRoundData(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCaller) GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "getTimestamp", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _FluxAggregator.Contract.GetTimestamp(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _FluxAggregator.Contract.GetTimestamp(&_FluxAggregator.CallOpts, _roundId) +} + +func (_FluxAggregator *FluxAggregatorCaller) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "latestAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) LatestAnswer() (*big.Int, error) { + return _FluxAggregator.Contract.LatestAnswer(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) LatestAnswer() (*big.Int, error) { + return _FluxAggregator.Contract.LatestAnswer(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "latestRound") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) LatestRound() (*big.Int, error) { + return _FluxAggregator.Contract.LatestRound(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) LatestRound() (*big.Int, error) { + return _FluxAggregator.Contract.LatestRound(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_FluxAggregator *FluxAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _FluxAggregator.Contract.LatestRoundData(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _FluxAggregator.Contract.LatestRoundData(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "latestTimestamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) LatestTimestamp() (*big.Int, error) { + return _FluxAggregator.Contract.LatestTimestamp(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) LatestTimestamp() (*big.Int, error) { + return _FluxAggregator.Contract.LatestTimestamp(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) LinkToken() (common.Address, error) { + return _FluxAggregator.Contract.LinkToken(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) LinkToken() (common.Address, error) { + return _FluxAggregator.Contract.LinkToken(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) MaxSubmissionCount(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "maxSubmissionCount") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) MaxSubmissionCount() (uint32, error) { + return _FluxAggregator.Contract.MaxSubmissionCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) MaxSubmissionCount() (uint32, error) { + return _FluxAggregator.Contract.MaxSubmissionCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) MaxSubmissionValue(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "maxSubmissionValue") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) MaxSubmissionValue() (*big.Int, error) { + return _FluxAggregator.Contract.MaxSubmissionValue(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) MaxSubmissionValue() (*big.Int, error) { + return _FluxAggregator.Contract.MaxSubmissionValue(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) MinSubmissionCount(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "minSubmissionCount") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) MinSubmissionCount() (uint32, error) { + return _FluxAggregator.Contract.MinSubmissionCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) MinSubmissionCount() (uint32, error) { + return _FluxAggregator.Contract.MinSubmissionCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) MinSubmissionValue(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "minSubmissionValue") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) MinSubmissionValue() (*big.Int, error) { + return _FluxAggregator.Contract.MinSubmissionValue(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) MinSubmissionValue() (*big.Int, error) { + return _FluxAggregator.Contract.MinSubmissionValue(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) OracleCount(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "oracleCount") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) OracleCount() (uint8, error) { + return _FluxAggregator.Contract.OracleCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) OracleCount() (uint8, error) { + return _FluxAggregator.Contract.OracleCount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) OracleRoundState(opts *bind.CallOpts, _oracle common.Address, _queriedRoundId uint32) (OracleRoundState, + + error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "oracleRoundState", _oracle, _queriedRoundId) + + outstruct := new(OracleRoundState) + if err != nil { + return *outstruct, err + } + + outstruct.EligibleToSubmit = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RoundId = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.LatestSubmission = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[3], new(uint64)).(*uint64) + outstruct.Timeout = *abi.ConvertType(out[4], new(uint64)).(*uint64) + outstruct.AvailableFunds = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.OracleCount = *abi.ConvertType(out[6], new(uint8)).(*uint8) + outstruct.PaymentAmount = *abi.ConvertType(out[7], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_FluxAggregator *FluxAggregatorSession) OracleRoundState(_oracle common.Address, _queriedRoundId uint32) (OracleRoundState, + + error) { + return _FluxAggregator.Contract.OracleRoundState(&_FluxAggregator.CallOpts, _oracle, _queriedRoundId) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) OracleRoundState(_oracle common.Address, _queriedRoundId uint32) (OracleRoundState, + + error) { + return _FluxAggregator.Contract.OracleRoundState(&_FluxAggregator.CallOpts, _oracle, _queriedRoundId) +} + +func (_FluxAggregator *FluxAggregatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Owner() (common.Address, error) { + return _FluxAggregator.Contract.Owner(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Owner() (common.Address, error) { + return _FluxAggregator.Contract.Owner(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) PaymentAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "paymentAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) PaymentAmount() (*big.Int, error) { + return _FluxAggregator.Contract.PaymentAmount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) PaymentAmount() (*big.Int, error) { + return _FluxAggregator.Contract.PaymentAmount(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) RestartDelay(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "restartDelay") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) RestartDelay() (uint32, error) { + return _FluxAggregator.Contract.RestartDelay(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) RestartDelay() (uint32, error) { + return _FluxAggregator.Contract.RestartDelay(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) Timeout(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "timeout") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Timeout() (uint32, error) { + return _FluxAggregator.Contract.Timeout(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Timeout() (uint32, error) { + return _FluxAggregator.Contract.Timeout(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) Validator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "validator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Validator() (common.Address, error) { + return _FluxAggregator.Contract.Validator(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Validator() (common.Address, error) { + return _FluxAggregator.Contract.Validator(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) Version() (*big.Int, error) { + return _FluxAggregator.Contract.Version(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) Version() (*big.Int, error) { + return _FluxAggregator.Contract.Version(&_FluxAggregator.CallOpts) +} + +func (_FluxAggregator *FluxAggregatorCaller) WithdrawablePayment(opts *bind.CallOpts, _oracle common.Address) (*big.Int, error) { + var out []interface{} + err := _FluxAggregator.contract.Call(opts, &out, "withdrawablePayment", _oracle) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FluxAggregator *FluxAggregatorSession) WithdrawablePayment(_oracle common.Address) (*big.Int, error) { + return _FluxAggregator.Contract.WithdrawablePayment(&_FluxAggregator.CallOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorCallerSession) WithdrawablePayment(_oracle common.Address) (*big.Int, error) { + return _FluxAggregator.Contract.WithdrawablePayment(&_FluxAggregator.CallOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorTransactor) AcceptAdmin(opts *bind.TransactOpts, _oracle common.Address) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "acceptAdmin", _oracle) +} + +func (_FluxAggregator *FluxAggregatorSession) AcceptAdmin(_oracle common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.AcceptAdmin(&_FluxAggregator.TransactOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) AcceptAdmin(_oracle common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.AcceptAdmin(&_FluxAggregator.TransactOpts, _oracle) +} + +func (_FluxAggregator *FluxAggregatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "acceptOwnership") +} + +func (_FluxAggregator *FluxAggregatorSession) AcceptOwnership() (*types.Transaction, error) { + return _FluxAggregator.Contract.AcceptOwnership(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FluxAggregator.Contract.AcceptOwnership(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactor) ChangeOracles(opts *bind.TransactOpts, _removed []common.Address, _added []common.Address, _addedAdmins []common.Address, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "changeOracles", _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) +} + +func (_FluxAggregator *FluxAggregatorSession) ChangeOracles(_removed []common.Address, _added []common.Address, _addedAdmins []common.Address, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.ChangeOracles(&_FluxAggregator.TransactOpts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) ChangeOracles(_removed []common.Address, _added []common.Address, _addedAdmins []common.Address, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.ChangeOracles(&_FluxAggregator.TransactOpts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) +} + +func (_FluxAggregator *FluxAggregatorTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, arg1 *big.Int, _data []byte) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "onTokenTransfer", arg0, arg1, _data) +} + +func (_FluxAggregator *FluxAggregatorSession) OnTokenTransfer(arg0 common.Address, arg1 *big.Int, _data []byte) (*types.Transaction, error) { + return _FluxAggregator.Contract.OnTokenTransfer(&_FluxAggregator.TransactOpts, arg0, arg1, _data) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) OnTokenTransfer(arg0 common.Address, arg1 *big.Int, _data []byte) (*types.Transaction, error) { + return _FluxAggregator.Contract.OnTokenTransfer(&_FluxAggregator.TransactOpts, arg0, arg1, _data) +} + +func (_FluxAggregator *FluxAggregatorTransactor) RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "requestNewRound") +} + +func (_FluxAggregator *FluxAggregatorSession) RequestNewRound() (*types.Transaction, error) { + return _FluxAggregator.Contract.RequestNewRound(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) RequestNewRound() (*types.Transaction, error) { + return _FluxAggregator.Contract.RequestNewRound(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactor) SetRequesterPermissions(opts *bind.TransactOpts, _requester common.Address, _authorized bool, _delay uint32) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "setRequesterPermissions", _requester, _authorized, _delay) +} + +func (_FluxAggregator *FluxAggregatorSession) SetRequesterPermissions(_requester common.Address, _authorized bool, _delay uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.SetRequesterPermissions(&_FluxAggregator.TransactOpts, _requester, _authorized, _delay) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) SetRequesterPermissions(_requester common.Address, _authorized bool, _delay uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.SetRequesterPermissions(&_FluxAggregator.TransactOpts, _requester, _authorized, _delay) +} + +func (_FluxAggregator *FluxAggregatorTransactor) SetValidator(opts *bind.TransactOpts, _newValidator common.Address) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "setValidator", _newValidator) +} + +func (_FluxAggregator *FluxAggregatorSession) SetValidator(_newValidator common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.SetValidator(&_FluxAggregator.TransactOpts, _newValidator) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) SetValidator(_newValidator common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.SetValidator(&_FluxAggregator.TransactOpts, _newValidator) +} + +func (_FluxAggregator *FluxAggregatorTransactor) Submit(opts *bind.TransactOpts, _roundId *big.Int, _submission *big.Int) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "submit", _roundId, _submission) +} + +func (_FluxAggregator *FluxAggregatorSession) Submit(_roundId *big.Int, _submission *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.Submit(&_FluxAggregator.TransactOpts, _roundId, _submission) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) Submit(_roundId *big.Int, _submission *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.Submit(&_FluxAggregator.TransactOpts, _roundId, _submission) +} + +func (_FluxAggregator *FluxAggregatorTransactor) TransferAdmin(opts *bind.TransactOpts, _oracle common.Address, _newAdmin common.Address) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "transferAdmin", _oracle, _newAdmin) +} + +func (_FluxAggregator *FluxAggregatorSession) TransferAdmin(_oracle common.Address, _newAdmin common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.TransferAdmin(&_FluxAggregator.TransactOpts, _oracle, _newAdmin) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) TransferAdmin(_oracle common.Address, _newAdmin common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.TransferAdmin(&_FluxAggregator.TransactOpts, _oracle, _newAdmin) +} + +func (_FluxAggregator *FluxAggregatorTransactor) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "transferOwnership", _to) +} + +func (_FluxAggregator *FluxAggregatorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.TransferOwnership(&_FluxAggregator.TransactOpts, _to) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _FluxAggregator.Contract.TransferOwnership(&_FluxAggregator.TransactOpts, _to) +} + +func (_FluxAggregator *FluxAggregatorTransactor) UpdateAvailableFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "updateAvailableFunds") +} + +func (_FluxAggregator *FluxAggregatorSession) UpdateAvailableFunds() (*types.Transaction, error) { + return _FluxAggregator.Contract.UpdateAvailableFunds(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) UpdateAvailableFunds() (*types.Transaction, error) { + return _FluxAggregator.Contract.UpdateAvailableFunds(&_FluxAggregator.TransactOpts) +} + +func (_FluxAggregator *FluxAggregatorTransactor) UpdateFutureRounds(opts *bind.TransactOpts, _paymentAmount *big.Int, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32, _timeout uint32) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "updateFutureRounds", _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) +} + +func (_FluxAggregator *FluxAggregatorSession) UpdateFutureRounds(_paymentAmount *big.Int, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32, _timeout uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.UpdateFutureRounds(&_FluxAggregator.TransactOpts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) UpdateFutureRounds(_paymentAmount *big.Int, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32, _timeout uint32) (*types.Transaction, error) { + return _FluxAggregator.Contract.UpdateFutureRounds(&_FluxAggregator.TransactOpts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) +} + +func (_FluxAggregator *FluxAggregatorTransactor) WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "withdrawFunds", _recipient, _amount) +} + +func (_FluxAggregator *FluxAggregatorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.WithdrawFunds(&_FluxAggregator.TransactOpts, _recipient, _amount) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.WithdrawFunds(&_FluxAggregator.TransactOpts, _recipient, _amount) +} + +func (_FluxAggregator *FluxAggregatorTransactor) WithdrawPayment(opts *bind.TransactOpts, _oracle common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.contract.Transact(opts, "withdrawPayment", _oracle, _recipient, _amount) +} + +func (_FluxAggregator *FluxAggregatorSession) WithdrawPayment(_oracle common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.WithdrawPayment(&_FluxAggregator.TransactOpts, _oracle, _recipient, _amount) +} + +func (_FluxAggregator *FluxAggregatorTransactorSession) WithdrawPayment(_oracle common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _FluxAggregator.Contract.WithdrawPayment(&_FluxAggregator.TransactOpts, _oracle, _recipient, _amount) +} + +type FluxAggregatorAnswerUpdatedIterator struct { + Event *FluxAggregatorAnswerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorAnswerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorAnswerUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*FluxAggregatorAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &FluxAggregatorAnswerUpdatedIterator{contract: _FluxAggregator.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorAnswerUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseAnswerUpdated(log types.Log) (*FluxAggregatorAnswerUpdated, error) { + event := new(FluxAggregatorAnswerUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorAvailableFundsUpdatedIterator struct { + Event *FluxAggregatorAvailableFundsUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorAvailableFundsUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorAvailableFundsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorAvailableFundsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorAvailableFundsUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorAvailableFundsUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorAvailableFundsUpdated struct { + Amount *big.Int + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterAvailableFundsUpdated(opts *bind.FilterOpts, amount []*big.Int) (*FluxAggregatorAvailableFundsUpdatedIterator, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "AvailableFundsUpdated", amountRule) + if err != nil { + return nil, err + } + return &FluxAggregatorAvailableFundsUpdatedIterator{contract: _FluxAggregator.contract, event: "AvailableFundsUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchAvailableFundsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorAvailableFundsUpdated, amount []*big.Int) (event.Subscription, error) { + + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "AvailableFundsUpdated", amountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorAvailableFundsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "AvailableFundsUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseAvailableFundsUpdated(log types.Log) (*FluxAggregatorAvailableFundsUpdated, error) { + event := new(FluxAggregatorAvailableFundsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "AvailableFundsUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorNewRoundIterator struct { + Event *FluxAggregatorNewRound + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorNewRoundIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorNewRoundIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*FluxAggregatorNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &FluxAggregatorNewRoundIterator{contract: _FluxAggregator.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *FluxAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorNewRound) + if err := _FluxAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseNewRound(log types.Log) (*FluxAggregatorNewRound, error) { + event := new(FluxAggregatorNewRound) + if err := _FluxAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorOracleAdminUpdateRequestedIterator struct { + Event *FluxAggregatorOracleAdminUpdateRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorOracleAdminUpdateRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOracleAdminUpdateRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOracleAdminUpdateRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorOracleAdminUpdateRequestedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorOracleAdminUpdateRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorOracleAdminUpdateRequested struct { + Oracle common.Address + Admin common.Address + NewAdmin common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterOracleAdminUpdateRequested(opts *bind.FilterOpts, oracle []common.Address) (*FluxAggregatorOracleAdminUpdateRequestedIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "OracleAdminUpdateRequested", oracleRule) + if err != nil { + return nil, err + } + return &FluxAggregatorOracleAdminUpdateRequestedIterator{contract: _FluxAggregator.contract, event: "OracleAdminUpdateRequested", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchOracleAdminUpdateRequested(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOracleAdminUpdateRequested, oracle []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "OracleAdminUpdateRequested", oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorOracleAdminUpdateRequested) + if err := _FluxAggregator.contract.UnpackLog(event, "OracleAdminUpdateRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseOracleAdminUpdateRequested(log types.Log) (*FluxAggregatorOracleAdminUpdateRequested, error) { + event := new(FluxAggregatorOracleAdminUpdateRequested) + if err := _FluxAggregator.contract.UnpackLog(event, "OracleAdminUpdateRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorOracleAdminUpdatedIterator struct { + Event *FluxAggregatorOracleAdminUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorOracleAdminUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOracleAdminUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOracleAdminUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorOracleAdminUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorOracleAdminUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorOracleAdminUpdated struct { + Oracle common.Address + NewAdmin common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterOracleAdminUpdated(opts *bind.FilterOpts, oracle []common.Address, newAdmin []common.Address) (*FluxAggregatorOracleAdminUpdatedIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + var newAdminRule []interface{} + for _, newAdminItem := range newAdmin { + newAdminRule = append(newAdminRule, newAdminItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "OracleAdminUpdated", oracleRule, newAdminRule) + if err != nil { + return nil, err + } + return &FluxAggregatorOracleAdminUpdatedIterator{contract: _FluxAggregator.contract, event: "OracleAdminUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchOracleAdminUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOracleAdminUpdated, oracle []common.Address, newAdmin []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + var newAdminRule []interface{} + for _, newAdminItem := range newAdmin { + newAdminRule = append(newAdminRule, newAdminItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "OracleAdminUpdated", oracleRule, newAdminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorOracleAdminUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "OracleAdminUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseOracleAdminUpdated(log types.Log) (*FluxAggregatorOracleAdminUpdated, error) { + event := new(FluxAggregatorOracleAdminUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "OracleAdminUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorOraclePermissionsUpdatedIterator struct { + Event *FluxAggregatorOraclePermissionsUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorOraclePermissionsUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOraclePermissionsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOraclePermissionsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorOraclePermissionsUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorOraclePermissionsUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorOraclePermissionsUpdated struct { + Oracle common.Address + Whitelisted bool + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterOraclePermissionsUpdated(opts *bind.FilterOpts, oracle []common.Address, whitelisted []bool) (*FluxAggregatorOraclePermissionsUpdatedIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + var whitelistedRule []interface{} + for _, whitelistedItem := range whitelisted { + whitelistedRule = append(whitelistedRule, whitelistedItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "OraclePermissionsUpdated", oracleRule, whitelistedRule) + if err != nil { + return nil, err + } + return &FluxAggregatorOraclePermissionsUpdatedIterator{contract: _FluxAggregator.contract, event: "OraclePermissionsUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchOraclePermissionsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOraclePermissionsUpdated, oracle []common.Address, whitelisted []bool) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + var whitelistedRule []interface{} + for _, whitelistedItem := range whitelisted { + whitelistedRule = append(whitelistedRule, whitelistedItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "OraclePermissionsUpdated", oracleRule, whitelistedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorOraclePermissionsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "OraclePermissionsUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseOraclePermissionsUpdated(log types.Log) (*FluxAggregatorOraclePermissionsUpdated, error) { + event := new(FluxAggregatorOraclePermissionsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "OraclePermissionsUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorOwnershipTransferRequestedIterator struct { + Event *FluxAggregatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FluxAggregatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FluxAggregatorOwnershipTransferRequestedIterator{contract: _FluxAggregator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorOwnershipTransferRequested) + if err := _FluxAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*FluxAggregatorOwnershipTransferRequested, error) { + event := new(FluxAggregatorOwnershipTransferRequested) + if err := _FluxAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorOwnershipTransferredIterator struct { + Event *FluxAggregatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FluxAggregatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FluxAggregatorOwnershipTransferredIterator{contract: _FluxAggregator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorOwnershipTransferred) + if err := _FluxAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseOwnershipTransferred(log types.Log) (*FluxAggregatorOwnershipTransferred, error) { + event := new(FluxAggregatorOwnershipTransferred) + if err := _FluxAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorRequesterPermissionsSetIterator struct { + Event *FluxAggregatorRequesterPermissionsSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorRequesterPermissionsSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorRequesterPermissionsSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorRequesterPermissionsSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorRequesterPermissionsSetIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorRequesterPermissionsSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorRequesterPermissionsSet struct { + Requester common.Address + Authorized bool + Delay uint32 + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterRequesterPermissionsSet(opts *bind.FilterOpts, requester []common.Address) (*FluxAggregatorRequesterPermissionsSetIterator, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "RequesterPermissionsSet", requesterRule) + if err != nil { + return nil, err + } + return &FluxAggregatorRequesterPermissionsSetIterator{contract: _FluxAggregator.contract, event: "RequesterPermissionsSet", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchRequesterPermissionsSet(opts *bind.WatchOpts, sink chan<- *FluxAggregatorRequesterPermissionsSet, requester []common.Address) (event.Subscription, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "RequesterPermissionsSet", requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorRequesterPermissionsSet) + if err := _FluxAggregator.contract.UnpackLog(event, "RequesterPermissionsSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseRequesterPermissionsSet(log types.Log) (*FluxAggregatorRequesterPermissionsSet, error) { + event := new(FluxAggregatorRequesterPermissionsSet) + if err := _FluxAggregator.contract.UnpackLog(event, "RequesterPermissionsSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorRoundDetailsUpdatedIterator struct { + Event *FluxAggregatorRoundDetailsUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorRoundDetailsUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorRoundDetailsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorRoundDetailsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorRoundDetailsUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorRoundDetailsUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorRoundDetailsUpdated struct { + PaymentAmount *big.Int + MinSubmissionCount uint32 + MaxSubmissionCount uint32 + RestartDelay uint32 + Timeout uint32 + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterRoundDetailsUpdated(opts *bind.FilterOpts, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (*FluxAggregatorRoundDetailsUpdatedIterator, error) { + + var paymentAmountRule []interface{} + for _, paymentAmountItem := range paymentAmount { + paymentAmountRule = append(paymentAmountRule, paymentAmountItem) + } + var minSubmissionCountRule []interface{} + for _, minSubmissionCountItem := range minSubmissionCount { + minSubmissionCountRule = append(minSubmissionCountRule, minSubmissionCountItem) + } + var maxSubmissionCountRule []interface{} + for _, maxSubmissionCountItem := range maxSubmissionCount { + maxSubmissionCountRule = append(maxSubmissionCountRule, maxSubmissionCountItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "RoundDetailsUpdated", paymentAmountRule, minSubmissionCountRule, maxSubmissionCountRule) + if err != nil { + return nil, err + } + return &FluxAggregatorRoundDetailsUpdatedIterator{contract: _FluxAggregator.contract, event: "RoundDetailsUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchRoundDetailsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorRoundDetailsUpdated, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (event.Subscription, error) { + + var paymentAmountRule []interface{} + for _, paymentAmountItem := range paymentAmount { + paymentAmountRule = append(paymentAmountRule, paymentAmountItem) + } + var minSubmissionCountRule []interface{} + for _, minSubmissionCountItem := range minSubmissionCount { + minSubmissionCountRule = append(minSubmissionCountRule, minSubmissionCountItem) + } + var maxSubmissionCountRule []interface{} + for _, maxSubmissionCountItem := range maxSubmissionCount { + maxSubmissionCountRule = append(maxSubmissionCountRule, maxSubmissionCountItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "RoundDetailsUpdated", paymentAmountRule, minSubmissionCountRule, maxSubmissionCountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorRoundDetailsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "RoundDetailsUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseRoundDetailsUpdated(log types.Log) (*FluxAggregatorRoundDetailsUpdated, error) { + event := new(FluxAggregatorRoundDetailsUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "RoundDetailsUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorSubmissionReceivedIterator struct { + Event *FluxAggregatorSubmissionReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorSubmissionReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorSubmissionReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorSubmissionReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorSubmissionReceivedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorSubmissionReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorSubmissionReceived struct { + Submission *big.Int + Round uint32 + Oracle common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterSubmissionReceived(opts *bind.FilterOpts, submission []*big.Int, round []uint32, oracle []common.Address) (*FluxAggregatorSubmissionReceivedIterator, error) { + + var submissionRule []interface{} + for _, submissionItem := range submission { + submissionRule = append(submissionRule, submissionItem) + } + var roundRule []interface{} + for _, roundItem := range round { + roundRule = append(roundRule, roundItem) + } + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "SubmissionReceived", submissionRule, roundRule, oracleRule) + if err != nil { + return nil, err + } + return &FluxAggregatorSubmissionReceivedIterator{contract: _FluxAggregator.contract, event: "SubmissionReceived", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchSubmissionReceived(opts *bind.WatchOpts, sink chan<- *FluxAggregatorSubmissionReceived, submission []*big.Int, round []uint32, oracle []common.Address) (event.Subscription, error) { + + var submissionRule []interface{} + for _, submissionItem := range submission { + submissionRule = append(submissionRule, submissionItem) + } + var roundRule []interface{} + for _, roundItem := range round { + roundRule = append(roundRule, roundItem) + } + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "SubmissionReceived", submissionRule, roundRule, oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorSubmissionReceived) + if err := _FluxAggregator.contract.UnpackLog(event, "SubmissionReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseSubmissionReceived(log types.Log) (*FluxAggregatorSubmissionReceived, error) { + event := new(FluxAggregatorSubmissionReceived) + if err := _FluxAggregator.contract.UnpackLog(event, "SubmissionReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FluxAggregatorValidatorUpdatedIterator struct { + Event *FluxAggregatorValidatorUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FluxAggregatorValidatorUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorValidatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FluxAggregatorValidatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FluxAggregatorValidatorUpdatedIterator) Error() error { + return it.fail +} + +func (it *FluxAggregatorValidatorUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FluxAggregatorValidatorUpdated struct { + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_FluxAggregator *FluxAggregatorFilterer) FilterValidatorUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*FluxAggregatorValidatorUpdatedIterator, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _FluxAggregator.contract.FilterLogs(opts, "ValidatorUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return &FluxAggregatorValidatorUpdatedIterator{contract: _FluxAggregator.contract, event: "ValidatorUpdated", logs: logs, sub: sub}, nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) WatchValidatorUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorValidatorUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _FluxAggregator.contract.WatchLogs(opts, "ValidatorUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FluxAggregatorValidatorUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "ValidatorUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FluxAggregator *FluxAggregatorFilterer) ParseValidatorUpdated(log types.Log) (*FluxAggregatorValidatorUpdated, error) { + event := new(FluxAggregatorValidatorUpdated) + if err := _FluxAggregator.contract.UnpackLog(event, "ValidatorUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type OracleRoundState struct { + EligibleToSubmit bool + RoundId uint32 + LatestSubmission *big.Int + StartedAt uint64 + Timeout uint64 + AvailableFunds *big.Int + OracleCount uint8 + PaymentAmount *big.Int +} + +func (_FluxAggregator *FluxAggregator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FluxAggregator.abi.Events["AnswerUpdated"].ID: + return _FluxAggregator.ParseAnswerUpdated(log) + case _FluxAggregator.abi.Events["AvailableFundsUpdated"].ID: + return _FluxAggregator.ParseAvailableFundsUpdated(log) + case _FluxAggregator.abi.Events["NewRound"].ID: + return _FluxAggregator.ParseNewRound(log) + case _FluxAggregator.abi.Events["OracleAdminUpdateRequested"].ID: + return _FluxAggregator.ParseOracleAdminUpdateRequested(log) + case _FluxAggregator.abi.Events["OracleAdminUpdated"].ID: + return _FluxAggregator.ParseOracleAdminUpdated(log) + case _FluxAggregator.abi.Events["OraclePermissionsUpdated"].ID: + return _FluxAggregator.ParseOraclePermissionsUpdated(log) + case _FluxAggregator.abi.Events["OwnershipTransferRequested"].ID: + return _FluxAggregator.ParseOwnershipTransferRequested(log) + case _FluxAggregator.abi.Events["OwnershipTransferred"].ID: + return _FluxAggregator.ParseOwnershipTransferred(log) + case _FluxAggregator.abi.Events["RequesterPermissionsSet"].ID: + return _FluxAggregator.ParseRequesterPermissionsSet(log) + case _FluxAggregator.abi.Events["RoundDetailsUpdated"].ID: + return _FluxAggregator.ParseRoundDetailsUpdated(log) + case _FluxAggregator.abi.Events["SubmissionReceived"].ID: + return _FluxAggregator.ParseSubmissionReceived(log) + case _FluxAggregator.abi.Events["ValidatorUpdated"].ID: + return _FluxAggregator.ParseValidatorUpdated(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FluxAggregatorAnswerUpdated) Topic() common.Hash { + return common.HexToHash("0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f") +} + +func (FluxAggregatorAvailableFundsUpdated) Topic() common.Hash { + return common.HexToHash("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f") +} + +func (FluxAggregatorNewRound) Topic() common.Hash { + return common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271") +} + +func (FluxAggregatorOracleAdminUpdateRequested) Topic() common.Hash { + return common.HexToHash("0xb79bf2e89c2d70dde91d2991fb1ea69b7e478061ad7c04ed5b02b96bc52b8104") +} + +func (FluxAggregatorOracleAdminUpdated) Topic() common.Hash { + return common.HexToHash("0x0c5055390645c15a4be9a21b3f8d019153dcb4a0c125685da6eb84048e2fe904") +} + +func (FluxAggregatorOraclePermissionsUpdated) Topic() common.Hash { + return common.HexToHash("0x18dd09695e4fbdae8d1a5edb11221eb04564269c29a089b9753a6535c54ba92e") +} + +func (FluxAggregatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FluxAggregatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FluxAggregatorRequesterPermissionsSet) Topic() common.Hash { + return common.HexToHash("0xc3df5a754e002718f2e10804b99e6605e7c701d95cec9552c7680ca2b6f2820a") +} + +func (FluxAggregatorRoundDetailsUpdated) Topic() common.Hash { + return common.HexToHash("0x56800c9d1ed723511246614d15e58cfcde15b6a33c245b5c961b689c1890fd8f") +} + +func (FluxAggregatorSubmissionReceived) Topic() common.Hash { + return common.HexToHash("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c") +} + +func (FluxAggregatorValidatorUpdated) Topic() common.Hash { + return common.HexToHash("0xcfac5dc75b8d9a7e074162f59d9adcd33da59f0fe8dfb21580db298fc0fdad0d") +} + +func (_FluxAggregator *FluxAggregator) Address() common.Address { + return _FluxAggregator.address +} + +type FluxAggregatorInterface interface { + AllocatedFunds(opts *bind.CallOpts) (*big.Int, error) + + AvailableFunds(opts *bind.CallOpts) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetAdmin(opts *bind.CallOpts, _oracle common.Address) (common.Address, error) + + GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + GetOracles(opts *bind.CallOpts) ([]common.Address, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + LatestAnswer(opts *bind.CallOpts) (*big.Int, error) + + LatestRound(opts *bind.CallOpts) (*big.Int, error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + MaxSubmissionCount(opts *bind.CallOpts) (uint32, error) + + MaxSubmissionValue(opts *bind.CallOpts) (*big.Int, error) + + MinSubmissionCount(opts *bind.CallOpts) (uint32, error) + + MinSubmissionValue(opts *bind.CallOpts) (*big.Int, error) + + OracleCount(opts *bind.CallOpts) (uint8, error) + + OracleRoundState(opts *bind.CallOpts, _oracle common.Address, _queriedRoundId uint32) (OracleRoundState, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PaymentAmount(opts *bind.CallOpts) (*big.Int, error) + + RestartDelay(opts *bind.CallOpts) (uint32, error) + + Timeout(opts *bind.CallOpts) (uint32, error) + + Validator(opts *bind.CallOpts) (common.Address, error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + WithdrawablePayment(opts *bind.CallOpts, _oracle common.Address) (*big.Int, error) + + AcceptAdmin(opts *bind.TransactOpts, _oracle common.Address) (*types.Transaction, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ChangeOracles(opts *bind.TransactOpts, _removed []common.Address, _added []common.Address, _addedAdmins []common.Address, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, arg1 *big.Int, _data []byte) (*types.Transaction, error) + + RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) + + SetRequesterPermissions(opts *bind.TransactOpts, _requester common.Address, _authorized bool, _delay uint32) (*types.Transaction, error) + + SetValidator(opts *bind.TransactOpts, _newValidator common.Address) (*types.Transaction, error) + + Submit(opts *bind.TransactOpts, _roundId *big.Int, _submission *big.Int) (*types.Transaction, error) + + TransferAdmin(opts *bind.TransactOpts, _oracle common.Address, _newAdmin common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) + + UpdateAvailableFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + UpdateFutureRounds(opts *bind.TransactOpts, _paymentAmount *big.Int, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32, _timeout uint32) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, _oracle common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*FluxAggregatorAnswerUpdatedIterator, error) + + WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) + + ParseAnswerUpdated(log types.Log) (*FluxAggregatorAnswerUpdated, error) + + FilterAvailableFundsUpdated(opts *bind.FilterOpts, amount []*big.Int) (*FluxAggregatorAvailableFundsUpdatedIterator, error) + + WatchAvailableFundsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorAvailableFundsUpdated, amount []*big.Int) (event.Subscription, error) + + ParseAvailableFundsUpdated(log types.Log) (*FluxAggregatorAvailableFundsUpdated, error) + + FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*FluxAggregatorNewRoundIterator, error) + + WatchNewRound(opts *bind.WatchOpts, sink chan<- *FluxAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) + + ParseNewRound(log types.Log) (*FluxAggregatorNewRound, error) + + FilterOracleAdminUpdateRequested(opts *bind.FilterOpts, oracle []common.Address) (*FluxAggregatorOracleAdminUpdateRequestedIterator, error) + + WatchOracleAdminUpdateRequested(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOracleAdminUpdateRequested, oracle []common.Address) (event.Subscription, error) + + ParseOracleAdminUpdateRequested(log types.Log) (*FluxAggregatorOracleAdminUpdateRequested, error) + + FilterOracleAdminUpdated(opts *bind.FilterOpts, oracle []common.Address, newAdmin []common.Address) (*FluxAggregatorOracleAdminUpdatedIterator, error) + + WatchOracleAdminUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOracleAdminUpdated, oracle []common.Address, newAdmin []common.Address) (event.Subscription, error) + + ParseOracleAdminUpdated(log types.Log) (*FluxAggregatorOracleAdminUpdated, error) + + FilterOraclePermissionsUpdated(opts *bind.FilterOpts, oracle []common.Address, whitelisted []bool) (*FluxAggregatorOraclePermissionsUpdatedIterator, error) + + WatchOraclePermissionsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOraclePermissionsUpdated, oracle []common.Address, whitelisted []bool) (event.Subscription, error) + + ParseOraclePermissionsUpdated(log types.Log) (*FluxAggregatorOraclePermissionsUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FluxAggregatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FluxAggregatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FluxAggregatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FluxAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FluxAggregatorOwnershipTransferred, error) + + FilterRequesterPermissionsSet(opts *bind.FilterOpts, requester []common.Address) (*FluxAggregatorRequesterPermissionsSetIterator, error) + + WatchRequesterPermissionsSet(opts *bind.WatchOpts, sink chan<- *FluxAggregatorRequesterPermissionsSet, requester []common.Address) (event.Subscription, error) + + ParseRequesterPermissionsSet(log types.Log) (*FluxAggregatorRequesterPermissionsSet, error) + + FilterRoundDetailsUpdated(opts *bind.FilterOpts, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (*FluxAggregatorRoundDetailsUpdatedIterator, error) + + WatchRoundDetailsUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorRoundDetailsUpdated, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (event.Subscription, error) + + ParseRoundDetailsUpdated(log types.Log) (*FluxAggregatorRoundDetailsUpdated, error) + + FilterSubmissionReceived(opts *bind.FilterOpts, submission []*big.Int, round []uint32, oracle []common.Address) (*FluxAggregatorSubmissionReceivedIterator, error) + + WatchSubmissionReceived(opts *bind.WatchOpts, sink chan<- *FluxAggregatorSubmissionReceived, submission []*big.Int, round []uint32, oracle []common.Address) (event.Subscription, error) + + ParseSubmissionReceived(log types.Log) (*FluxAggregatorSubmissionReceived, error) + + FilterValidatorUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*FluxAggregatorValidatorUpdatedIterator, error) + + WatchValidatorUpdated(opts *bind.WatchOpts, sink chan<- *FluxAggregatorValidatorUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParseValidatorUpdated(log types.Log) (*FluxAggregatorValidatorUpdated, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/functions_billing_registry_events_mock/functions_billing_registry_events_mock.go b/core/gethwrappers/generated/functions_billing_registry_events_mock/functions_billing_registry_events_mock.go new file mode 100644 index 00000000..c51d9a35 --- /dev/null +++ b/core/gethwrappers/generated/functions_billing_registry_events_mock/functions_billing_registry_events_mock.go @@ -0,0 +1,2937 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_billing_registry_events_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsBillingRegistryEventsMockCommitment struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int + Don common.Address + DonFee *big.Int + RegistryFee *big.Int + EstimatedCost *big.Int + Timestamp *big.Int +} + +var FunctionsBillingRegistryEventsMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"signerPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"transmitterPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCost\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"BillingEnd\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"don\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"estimatedCost\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structFunctionsBillingRegistryEventsMock.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"BillingStart\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"RequestTimedOut\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"emitAuthorizedSendersChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint96\",\"name\":\"signerPayment\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"transmitterPayment\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"totalCost\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"emitBillingEnd\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"don\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"estimatedCost\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"internalType\":\"structFunctionsBillingRegistryEventsMock.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"emitBillingStart\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"}],\"name\":\"emitConfigSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"emitFundsRecovered\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"emitInitialized\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitPaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"emitRequestTimedOut\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"emitSubscriptionCanceled\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"emitSubscriptionConsumerAdded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"emitSubscriptionConsumerRemoved\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"emitSubscriptionCreated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"emitSubscriptionFunded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitSubscriptionOwnerTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitSubscriptionOwnerTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitUnpaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610eae806100206000396000f3fe608060405234801561001057600080fd5b50600436106101365760003560e01c8063a5257226116100b2578063dde69b3f11610081578063e2cab57b11610066578063e2cab57b1461025a578063e9c6260f1461026d578063f7420bc21461028057600080fd5b8063dde69b3f14610234578063e0f6eff11461024757600080fd5b8063a5257226146101e8578063b019b4e8146101fb578063bef9e1831461020e578063c1d2ad191461022157600080fd5b8063689300ea116101095780637be5c756116100ee5780637be5c756146101af5780637e1b44c0146101c25780639ec3ce4b146101d557600080fd5b8063689300ea14610189578063735bb0821461019c57600080fd5b80632d6d80b31461013b5780633f70afb6146101505780634bf6a80d14610163578063675b924414610176575b600080fd5b61014e61014936600461090f565b610293565b005b61014e61015e366004610ba1565b6102d0565b61014e610171366004610bbd565b61032a565b61014e610184366004610ba1565b61038c565b61014e6101973660046108e5565b6103de565b61014e6101aa366004610b4a565b61042a565b61014e6101bd366004610890565b610487565b61014e6101d03660046109d5565b6104d4565b61014e6101e3366004610890565b610502565b61014e6101f6366004610ba1565b610548565b61014e6102093660046108b2565b61059a565b61014e61021c366004610c6f565b6105f8565b61014e61022f366004610ad4565b61062b565b61014e610242366004610c00565b61069e565b61014e610255366004610bbd565b6106f6565b61014e610268366004610c3c565b61074f565b61014e61027b3660046109ee565b610791565b61014e61028e3660046108b2565b6107c1565b7ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a082826040516102c4929190610c92565b60405180910390a15050565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf906020015b60405180910390a25050565b6040805173ffffffffffffffffffffffffffffffffffffffff80851682528316602082015267ffffffffffffffff8516917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f091015b60405180910390a2505050565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e09060200161031e565b6040805173ffffffffffffffffffffffffffffffffffffffff84168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b43660091016102c4565b6040805163ffffffff87811682528681166020830152818301869052606082018590528316608082015290517f24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd49181900360a00190a15050505050565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258906020015b60405180910390a150565b60405181907ff1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af41490600090a250565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020016104c9565b60405173ffffffffffffffffffffffffffffffffffffffff8216815267ffffffffffffffff8316907f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b9060200161031e565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b60405160ff821681527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498906020016104c9565b6040805167ffffffffffffffff871681526bffffffffffffffffffffffff868116602083015285811682840152841660608201528215156080820152905187917fc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f919081900360a00190a2505050505050565b6040805173ffffffffffffffffffffffffffffffffffffffff841681526020810183905267ffffffffffffffff8516917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815910161037f565b6040805173ffffffffffffffffffffffffffffffffffffffff80851682528316602082015267ffffffffffffffff8516917f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be910161037f565b604080518381526020810183905267ffffffffffffffff8516917fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8910161037f565b817f99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe48260405161031e9190610d09565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461084357600080fd5b919050565b803563ffffffff8116811461084357600080fd5b803567ffffffffffffffff8116811461084357600080fd5b80356bffffffffffffffffffffffff8116811461084357600080fd5b6000602082840312156108a257600080fd5b6108ab8261081f565b9392505050565b600080604083850312156108c557600080fd5b6108ce8361081f565b91506108dc6020840161081f565b90509250929050565b600080604083850312156108f857600080fd5b6109018361081f565b946020939093013593505050565b6000806040838503121561092257600080fd5b823567ffffffffffffffff8082111561093a57600080fd5b818501915085601f83011261094e57600080fd5b813560208282111561096257610962610e72565b8160051b9250610973818401610e23565b8281528181019085830185870184018b101561098e57600080fd5b600096505b848710156109b8576109a48161081f565b835260019690960195918301918301610993565b5096506109c8905087820161081f565b9450505050509250929050565b6000602082840312156109e757600080fd5b5035919050565b600080828403610140811215610a0357600080fd5b83359250610120807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe083011215610a3957600080fd5b610a41610df9565b9150610a4f6020860161085c565b8252610a5d6040860161081f565b6020830152610a6e60608601610848565b604083015260808501356060830152610a8960a0860161081f565b6080830152610a9a60c08601610874565b60a0830152610aab60e08601610874565b60c0830152610100610abe818701610874565b60e0840152940135938101939093525092909150565b60008060008060008060c08789031215610aed57600080fd5b86359550610afd6020880161085c565b9450610b0b60408801610874565b9350610b1960608801610874565b9250610b2760808801610874565b915060a08701358015158114610b3c57600080fd5b809150509295509295509295565b600080600080600060a08688031215610b6257600080fd5b610b6b86610848565b9450610b7960208701610848565b93506040860135925060608601359150610b9560808701610848565b90509295509295909350565b60008060408385031215610bb457600080fd5b6108ce8361085c565b600080600060608486031215610bd257600080fd5b610bdb8461085c565b9250610be96020850161081f565b9150610bf76040850161081f565b90509250925092565b600080600060608486031215610c1557600080fd5b610c1e8461085c565b9250610c2c6020850161081f565b9150604084013590509250925092565b600080600060608486031215610c5157600080fd5b610c5a8461085c565b95602085013595506040909401359392505050565b600060208284031215610c8157600080fd5b813560ff811681146108ab57600080fd5b604080825283519082018190526000906020906060840190828701845b82811015610ce157815173ffffffffffffffffffffffffffffffffffffffff1684529284019290840190600101610caf565b50505073ffffffffffffffffffffffffffffffffffffffff9490941692019190915250919050565b60006101208201905067ffffffffffffffff835116825273ffffffffffffffffffffffffffffffffffffffff60208401511660208301526040830151610d57604084018263ffffffff169052565b50606083015160608301526080830151610d89608084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060a0830151610da960a08401826bffffffffffffffffffffffff169052565b5060c0830151610dc960c08401826bffffffffffffffffffffffff169052565b5060e0830151610de960e08401826bffffffffffffffffffffffff169052565b5061010092830151919092015290565b604051610120810167ffffffffffffffff81118282101715610e1d57610e1d610e72565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610e6a57610e6a610e72565b604052919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var FunctionsBillingRegistryEventsMockABI = FunctionsBillingRegistryEventsMockMetaData.ABI + +var FunctionsBillingRegistryEventsMockBin = FunctionsBillingRegistryEventsMockMetaData.Bin + +func DeployFunctionsBillingRegistryEventsMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *FunctionsBillingRegistryEventsMock, error) { + parsed, err := FunctionsBillingRegistryEventsMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsBillingRegistryEventsMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsBillingRegistryEventsMock{address: address, abi: *parsed, FunctionsBillingRegistryEventsMockCaller: FunctionsBillingRegistryEventsMockCaller{contract: contract}, FunctionsBillingRegistryEventsMockTransactor: FunctionsBillingRegistryEventsMockTransactor{contract: contract}, FunctionsBillingRegistryEventsMockFilterer: FunctionsBillingRegistryEventsMockFilterer{contract: contract}}, nil +} + +type FunctionsBillingRegistryEventsMock struct { + address common.Address + abi abi.ABI + FunctionsBillingRegistryEventsMockCaller + FunctionsBillingRegistryEventsMockTransactor + FunctionsBillingRegistryEventsMockFilterer +} + +type FunctionsBillingRegistryEventsMockCaller struct { + contract *bind.BoundContract +} + +type FunctionsBillingRegistryEventsMockTransactor struct { + contract *bind.BoundContract +} + +type FunctionsBillingRegistryEventsMockFilterer struct { + contract *bind.BoundContract +} + +type FunctionsBillingRegistryEventsMockSession struct { + Contract *FunctionsBillingRegistryEventsMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsBillingRegistryEventsMockCallerSession struct { + Contract *FunctionsBillingRegistryEventsMockCaller + CallOpts bind.CallOpts +} + +type FunctionsBillingRegistryEventsMockTransactorSession struct { + Contract *FunctionsBillingRegistryEventsMockTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsBillingRegistryEventsMockRaw struct { + Contract *FunctionsBillingRegistryEventsMock +} + +type FunctionsBillingRegistryEventsMockCallerRaw struct { + Contract *FunctionsBillingRegistryEventsMockCaller +} + +type FunctionsBillingRegistryEventsMockTransactorRaw struct { + Contract *FunctionsBillingRegistryEventsMockTransactor +} + +func NewFunctionsBillingRegistryEventsMock(address common.Address, backend bind.ContractBackend) (*FunctionsBillingRegistryEventsMock, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsBillingRegistryEventsMockABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsBillingRegistryEventsMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMock{address: address, abi: abi, FunctionsBillingRegistryEventsMockCaller: FunctionsBillingRegistryEventsMockCaller{contract: contract}, FunctionsBillingRegistryEventsMockTransactor: FunctionsBillingRegistryEventsMockTransactor{contract: contract}, FunctionsBillingRegistryEventsMockFilterer: FunctionsBillingRegistryEventsMockFilterer{contract: contract}}, nil +} + +func NewFunctionsBillingRegistryEventsMockCaller(address common.Address, caller bind.ContractCaller) (*FunctionsBillingRegistryEventsMockCaller, error) { + contract, err := bindFunctionsBillingRegistryEventsMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockCaller{contract: contract}, nil +} + +func NewFunctionsBillingRegistryEventsMockTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsBillingRegistryEventsMockTransactor, error) { + contract, err := bindFunctionsBillingRegistryEventsMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockTransactor{contract: contract}, nil +} + +func NewFunctionsBillingRegistryEventsMockFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsBillingRegistryEventsMockFilterer, error) { + contract, err := bindFunctionsBillingRegistryEventsMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockFilterer{contract: contract}, nil +} + +func bindFunctionsBillingRegistryEventsMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsBillingRegistryEventsMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsBillingRegistryEventsMock.Contract.FunctionsBillingRegistryEventsMockCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.FunctionsBillingRegistryEventsMockTransactor.contract.Transfer(opts) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.FunctionsBillingRegistryEventsMockTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsBillingRegistryEventsMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.contract.Transfer(opts) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitAuthorizedSendersChanged(opts *bind.TransactOpts, senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitAuthorizedSendersChanged", senders, changedBy) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitAuthorizedSendersChanged(senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitAuthorizedSendersChanged(&_FunctionsBillingRegistryEventsMock.TransactOpts, senders, changedBy) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitAuthorizedSendersChanged(senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitAuthorizedSendersChanged(&_FunctionsBillingRegistryEventsMock.TransactOpts, senders, changedBy) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitBillingEnd(opts *bind.TransactOpts, requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitBillingEnd", requestId, subscriptionId, signerPayment, transmitterPayment, totalCost, success) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitBillingEnd(requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitBillingEnd(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId, subscriptionId, signerPayment, transmitterPayment, totalCost, success) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitBillingEnd(requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitBillingEnd(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId, subscriptionId, signerPayment, transmitterPayment, totalCost, success) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitBillingStart(opts *bind.TransactOpts, requestId [32]byte, commitment FunctionsBillingRegistryEventsMockCommitment) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitBillingStart", requestId, commitment) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitBillingStart(requestId [32]byte, commitment FunctionsBillingRegistryEventsMockCommitment) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitBillingStart(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId, commitment) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitBillingStart(requestId [32]byte, commitment FunctionsBillingRegistryEventsMockCommitment) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitBillingStart(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId, commitment) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitConfigSet(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitConfigSet", maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitConfigSet(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitConfigSet(&_FunctionsBillingRegistryEventsMock.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitConfigSet(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitConfigSet(&_FunctionsBillingRegistryEventsMock.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitFundsRecovered(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitFundsRecovered", to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitFundsRecovered(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitFundsRecovered(&_FunctionsBillingRegistryEventsMock.TransactOpts, to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitFundsRecovered(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitFundsRecovered(&_FunctionsBillingRegistryEventsMock.TransactOpts, to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitInitialized(opts *bind.TransactOpts, version uint8) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitInitialized", version) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitInitialized(version uint8) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitInitialized(&_FunctionsBillingRegistryEventsMock.TransactOpts, version) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitInitialized(version uint8) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitInitialized(&_FunctionsBillingRegistryEventsMock.TransactOpts, version) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsBillingRegistryEventsMock.TransactOpts, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsBillingRegistryEventsMock.TransactOpts, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitOwnershipTransferred(&_FunctionsBillingRegistryEventsMock.TransactOpts, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitOwnershipTransferred(&_FunctionsBillingRegistryEventsMock.TransactOpts, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitPaused", account) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitPaused(&_FunctionsBillingRegistryEventsMock.TransactOpts, account) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitPaused(&_FunctionsBillingRegistryEventsMock.TransactOpts, account) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitRequestTimedOut(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitRequestTimedOut", requestId) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitRequestTimedOut(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitRequestTimedOut(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitRequestTimedOut(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitRequestTimedOut(&_FunctionsBillingRegistryEventsMock.TransactOpts, requestId) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionCanceled(opts *bind.TransactOpts, subscriptionId uint64, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionCanceled", subscriptionId, to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionCanceled(subscriptionId uint64, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionCanceled(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionCanceled(subscriptionId uint64, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionCanceled(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, to, amount) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionConsumerAdded(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionConsumerAdded", subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionConsumerAdded(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionConsumerAdded(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionConsumerRemoved(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionConsumerRemoved", subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionConsumerRemoved(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionConsumerRemoved(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, consumer) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionCreated(opts *bind.TransactOpts, subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionCreated", subscriptionId, owner) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionCreated(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, owner) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionCreated(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, owner) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionFunded(opts *bind.TransactOpts, subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionFunded", subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionFunded(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionFunded(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, oldBalance, newBalance) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionOwnerTransferRequested(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionOwnerTransferRequested", subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionOwnerTransferRequested(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionOwnerTransferRequested(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitSubscriptionOwnerTransferred(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitSubscriptionOwnerTransferred", subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionOwnerTransferred(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitSubscriptionOwnerTransferred(&_FunctionsBillingRegistryEventsMock.TransactOpts, subscriptionId, from, to) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactor) EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.contract.Transact(opts, "emitUnpaused", account) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitUnpaused(&_FunctionsBillingRegistryEventsMock.TransactOpts, account) +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockTransactorSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _FunctionsBillingRegistryEventsMock.Contract.EmitUnpaused(&_FunctionsBillingRegistryEventsMock.TransactOpts, account) +} + +type FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator struct { + Event *FunctionsBillingRegistryEventsMockAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseAuthorizedSendersChanged(log types.Log) (*FunctionsBillingRegistryEventsMockAuthorizedSendersChanged, error) { + event := new(FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockBillingEndIterator struct { + Event *FunctionsBillingRegistryEventsMockBillingEnd + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockBillingEndIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockBillingEndIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockBillingEndIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockBillingEnd struct { + RequestId [32]byte + SubscriptionId uint64 + SignerPayment *big.Int + TransmitterPayment *big.Int + TotalCost *big.Int + Success bool + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockBillingEndIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockBillingEndIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "BillingEnd", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockBillingEnd, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockBillingEnd) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseBillingEnd(log types.Log) (*FunctionsBillingRegistryEventsMockBillingEnd, error) { + event := new(FunctionsBillingRegistryEventsMockBillingEnd) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockBillingStartIterator struct { + Event *FunctionsBillingRegistryEventsMockBillingStart + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockBillingStartIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockBillingStartIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockBillingStartIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockBillingStart struct { + RequestId [32]byte + Commitment FunctionsBillingRegistryEventsMockCommitment + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockBillingStartIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockBillingStartIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "BillingStart", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchBillingStart(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockBillingStart, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockBillingStart) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "BillingStart", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseBillingStart(log types.Log) (*FunctionsBillingRegistryEventsMockBillingStart, error) { + event := new(FunctionsBillingRegistryEventsMockBillingStart) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "BillingStart", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockConfigSetIterator struct { + Event *FunctionsBillingRegistryEventsMockConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockConfigSetIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockConfigSet struct { + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation *big.Int + FallbackWeiPerUnitLink *big.Int + GasOverhead uint32 + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterConfigSet(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockConfigSetIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockConfigSetIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockConfigSet) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockConfigSet) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseConfigSet(log types.Log) (*FunctionsBillingRegistryEventsMockConfigSet, error) { + event := new(FunctionsBillingRegistryEventsMockConfigSet) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockFundsRecoveredIterator struct { + Event *FunctionsBillingRegistryEventsMockFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockFundsRecoveredIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockFundsRecoveredIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockFundsRecovered) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseFundsRecovered(log types.Log) (*FunctionsBillingRegistryEventsMockFundsRecovered, error) { + event := new(FunctionsBillingRegistryEventsMockFundsRecovered) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockInitializedIterator struct { + Event *FunctionsBillingRegistryEventsMockInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockInitializedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockInitialized struct { + Version uint8 + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterInitialized(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockInitializedIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockInitializedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockInitialized) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockInitialized) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseInitialized(log types.Log) (*FunctionsBillingRegistryEventsMockInitialized, error) { + event := new(FunctionsBillingRegistryEventsMockInitialized) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator struct { + Event *FunctionsBillingRegistryEventsMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockOwnershipTransferRequested) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsBillingRegistryEventsMockOwnershipTransferRequested, error) { + event := new(FunctionsBillingRegistryEventsMockOwnershipTransferRequested) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockOwnershipTransferredIterator struct { + Event *FunctionsBillingRegistryEventsMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsBillingRegistryEventsMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockOwnershipTransferredIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockOwnershipTransferred) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsBillingRegistryEventsMockOwnershipTransferred, error) { + event := new(FunctionsBillingRegistryEventsMockOwnershipTransferred) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockPausedIterator struct { + Event *FunctionsBillingRegistryEventsMockPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockPausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockPaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterPaused(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockPausedIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockPausedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockPaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockPaused) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParsePaused(log types.Log) (*FunctionsBillingRegistryEventsMockPaused, error) { + event := new(FunctionsBillingRegistryEventsMockPaused) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockRequestTimedOutIterator struct { + Event *FunctionsBillingRegistryEventsMockRequestTimedOut + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockRequestTimedOutIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockRequestTimedOutIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockRequestTimedOutIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockRequestTimedOut struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockRequestTimedOutIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockRequestTimedOutIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "RequestTimedOut", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockRequestTimedOut, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockRequestTimedOut) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseRequestTimedOut(log types.Log) (*FunctionsBillingRegistryEventsMockRequestTimedOut, error) { + event := new(FunctionsBillingRegistryEventsMockRequestTimedOut) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionCanceled struct { + SubscriptionId uint64 + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionCanceled) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionCanceled(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionCanceled, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionCanceled) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionCreated struct { + SubscriptionId uint64 + Owner common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionCreated) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionCreated(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionCreated, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionCreated) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionFundedIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionFunded struct { + SubscriptionId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionFundedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionFundedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionFunded) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionFunded(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionFunded, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionFunded) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator struct { + Event *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred, error) { + event := new(FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsBillingRegistryEventsMockUnpausedIterator struct { + Event *FunctionsBillingRegistryEventsMockUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsBillingRegistryEventsMockUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsBillingRegistryEventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsBillingRegistryEventsMockUnpausedIterator) Error() error { + return it.fail +} + +func (it *FunctionsBillingRegistryEventsMockUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsBillingRegistryEventsMockUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) FilterUnpaused(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockUnpausedIterator, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &FunctionsBillingRegistryEventsMockUnpausedIterator{contract: _FunctionsBillingRegistryEventsMock.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockUnpaused) (event.Subscription, error) { + + logs, sub, err := _FunctionsBillingRegistryEventsMock.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsBillingRegistryEventsMockUnpaused) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMockFilterer) ParseUnpaused(log types.Log) (*FunctionsBillingRegistryEventsMockUnpaused, error) { + event := new(FunctionsBillingRegistryEventsMockUnpaused) + if err := _FunctionsBillingRegistryEventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsBillingRegistryEventsMock.abi.Events["AuthorizedSendersChanged"].ID: + return _FunctionsBillingRegistryEventsMock.ParseAuthorizedSendersChanged(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["BillingEnd"].ID: + return _FunctionsBillingRegistryEventsMock.ParseBillingEnd(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["BillingStart"].ID: + return _FunctionsBillingRegistryEventsMock.ParseBillingStart(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["ConfigSet"].ID: + return _FunctionsBillingRegistryEventsMock.ParseConfigSet(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["FundsRecovered"].ID: + return _FunctionsBillingRegistryEventsMock.ParseFundsRecovered(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["Initialized"].ID: + return _FunctionsBillingRegistryEventsMock.ParseInitialized(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsBillingRegistryEventsMock.ParseOwnershipTransferRequested(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["OwnershipTransferred"].ID: + return _FunctionsBillingRegistryEventsMock.ParseOwnershipTransferred(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["Paused"].ID: + return _FunctionsBillingRegistryEventsMock.ParsePaused(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["RequestTimedOut"].ID: + return _FunctionsBillingRegistryEventsMock.ParseRequestTimedOut(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionCanceled"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionCanceled(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionConsumerAdded"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionConsumerAdded(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionConsumerRemoved"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionConsumerRemoved(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionCreated"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionCreated(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionFunded"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionFunded(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionOwnerTransferRequested(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["SubscriptionOwnerTransferred"].ID: + return _FunctionsBillingRegistryEventsMock.ParseSubscriptionOwnerTransferred(log) + case _FunctionsBillingRegistryEventsMock.abi.Events["Unpaused"].ID: + return _FunctionsBillingRegistryEventsMock.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (FunctionsBillingRegistryEventsMockBillingEnd) Topic() common.Hash { + return common.HexToHash("0xc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f") +} + +func (FunctionsBillingRegistryEventsMockBillingStart) Topic() common.Hash { + return common.HexToHash("0x99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe4") +} + +func (FunctionsBillingRegistryEventsMockConfigSet) Topic() common.Hash { + return common.HexToHash("0x24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd4") +} + +func (FunctionsBillingRegistryEventsMockFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (FunctionsBillingRegistryEventsMockInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (FunctionsBillingRegistryEventsMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsBillingRegistryEventsMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsBillingRegistryEventsMockPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (FunctionsBillingRegistryEventsMockRequestTimedOut) Topic() common.Hash { + return common.HexToHash("0xf1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af414") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (FunctionsBillingRegistryEventsMockUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_FunctionsBillingRegistryEventsMock *FunctionsBillingRegistryEventsMock) Address() common.Address { + return _FunctionsBillingRegistryEventsMock.address +} + +type FunctionsBillingRegistryEventsMockInterface interface { + EmitAuthorizedSendersChanged(opts *bind.TransactOpts, senders []common.Address, changedBy common.Address) (*types.Transaction, error) + + EmitBillingEnd(opts *bind.TransactOpts, requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) (*types.Transaction, error) + + EmitBillingStart(opts *bind.TransactOpts, requestId [32]byte, commitment FunctionsBillingRegistryEventsMockCommitment) (*types.Transaction, error) + + EmitConfigSet(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32) (*types.Transaction, error) + + EmitFundsRecovered(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + EmitInitialized(opts *bind.TransactOpts, version uint8) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitRequestTimedOut(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) + + EmitSubscriptionCanceled(opts *bind.TransactOpts, subscriptionId uint64, to common.Address, amount *big.Int) (*types.Transaction, error) + + EmitSubscriptionConsumerAdded(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + EmitSubscriptionConsumerRemoved(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + EmitSubscriptionCreated(opts *bind.TransactOpts, subscriptionId uint64, owner common.Address) (*types.Transaction, error) + + EmitSubscriptionFunded(opts *bind.TransactOpts, subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) (*types.Transaction, error) + + EmitSubscriptionOwnerTransferRequested(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) + + EmitSubscriptionOwnerTransferred(opts *bind.TransactOpts, subscriptionId uint64, from common.Address, to common.Address) (*types.Transaction, error) + + EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*FunctionsBillingRegistryEventsMockAuthorizedSendersChanged, error) + + FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockBillingEndIterator, error) + + WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockBillingEnd, requestId [][32]byte) (event.Subscription, error) + + ParseBillingEnd(log types.Log) (*FunctionsBillingRegistryEventsMockBillingEnd, error) + + FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockBillingStartIterator, error) + + WatchBillingStart(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockBillingStart, requestId [][32]byte) (event.Subscription, error) + + ParseBillingStart(log types.Log) (*FunctionsBillingRegistryEventsMockBillingStart, error) + + FilterConfigSet(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*FunctionsBillingRegistryEventsMockConfigSet, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*FunctionsBillingRegistryEventsMockFundsRecovered, error) + + FilterInitialized(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*FunctionsBillingRegistryEventsMockInitialized, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsBillingRegistryEventsMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsBillingRegistryEventsMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsBillingRegistryEventsMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsBillingRegistryEventsMockOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*FunctionsBillingRegistryEventsMockPaused, error) + + FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsBillingRegistryEventsMockRequestTimedOutIterator, error) + + WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockRequestTimedOut, requestId [][32]byte) (event.Subscription, error) + + ParseRequestTimedOut(log types.Log) (*FunctionsBillingRegistryEventsMockRequestTimedOut, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*FunctionsBillingRegistryEventsMockSubscriptionOwnerTransferred, error) + + FilterUnpaused(opts *bind.FilterOpts) (*FunctionsBillingRegistryEventsMockUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *FunctionsBillingRegistryEventsMockUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*FunctionsBillingRegistryEventsMockUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/functions_oracle_events_mock/functions_oracle_events_mock.go b/core/gethwrappers/generated/functions_oracle_events_mock/functions_oracle_events_mock.go new file mode 100644 index 00000000..04ba7647 --- /dev/null +++ b/core/gethwrappers/generated/functions_oracle_events_mock/functions_oracle_events_mock.go @@ -0,0 +1,2308 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package functions_oracle_events_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var FunctionsOracleEventsMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersActive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersDeactive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"InvalidRequestID\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"ResponseTransmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"UserCallbackError\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"UserCallbackRawError\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitAuthorizedSendersActive\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"emitAuthorizedSendersChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitAuthorizedSendersDeactive\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"emitConfigSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"emitInitialized\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"emitInvalidRequestID\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"emitOracleRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"emitOracleResponse\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"emitResponseTransmitted\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"emitTransmitted\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"emitUserCallbackError\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"emitUserCallbackRawError\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610cd9806100206000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063b019b4e81161008c578063ddd5603c11610066578063ddd5603c146101af578063df4a9fe0146101c2578063e055cff0146101d5578063f7420bc2146101e857600080fd5b8063b019b4e814610176578063bef9e18314610189578063c9d3d0811461019c57600080fd5b80632d6d80b3116100c85780632d6d80b31461012a5780636446fe921461013d5780638eb62eb6146101505780639784f15a1461016357600080fd5b806317472dac146100ef57806327a88d59146101045780632a7f477b14610117575b600080fd5b6101026100fd36600461072f565b6101fb565b005b6101026101123660046107c2565b610248565b610102610125366004610886565b610276565b61010261013836600461077d565b6102b2565b61010261014b3660046107fe565b6102ef565b61010261015e3660046107c2565b610337565b61010261017136600461072f565b610365565b61010261018436600461074a565b6103ab565b610102610197366004610a3b565b610409565b6101026101aa3660046108cd565b61043c565b6101026101bd36600461091e565b61046c565b6101026101d03660046107db565b6104a7565b6101026101e3366004610941565b6104ef565b6101026101f636600461074a565b610541565b60405173ffffffffffffffffffffffffffffffffffffffff821681527fae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce906020015b60405180910390a150565b60405181907f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6490600090a250565b817fe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb2826040516102a69190610ba6565b60405180910390a25050565b7ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a082826040516102e3929190610b6e565b60405180910390a15050565b857fa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c8686868686604051610327959493929190610b12565b60405180910390a2505050505050565b60405181907fa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be90600090a250565b60405173ffffffffffffffffffffffffffffffffffffffff821681527fea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a9060200161023d565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b60405160ff821681527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200161023d565b817fb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c826040516102a69190610ba6565b6040805183815263ffffffff831660208201527fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a6291016102e3565b60405173ffffffffffffffffffffffffffffffffffffffff8216815282907fdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a6906020016102a6565b7f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0589898989898989898960405161052e99989796959493929190610bb9565b60405180910390a1505050505050505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b600067ffffffffffffffff8311156105b9576105b9610c9d565b6105ea60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f86011601610c4e565b90508281528383830111156105fe57600080fd5b828260208301376000602084830101529392505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461063957600080fd5b919050565b600082601f83011261064f57600080fd5b8135602067ffffffffffffffff82111561066b5761066b610c9d565b8160051b61067a828201610c4e565b83815282810190868401838801850189101561069557600080fd5b600093505b858410156106bf576106ab81610615565b83526001939093019291840191840161069a565b50979650505050505050565b600082601f8301126106dc57600080fd5b6106eb8383356020850161059f565b9392505050565b803563ffffffff8116811461063957600080fd5b803567ffffffffffffffff8116811461063957600080fd5b803560ff8116811461063957600080fd5b60006020828403121561074157600080fd5b6106eb82610615565b6000806040838503121561075d57600080fd5b61076683610615565b915061077460208401610615565b90509250929050565b6000806040838503121561079057600080fd5b823567ffffffffffffffff8111156107a757600080fd5b6107b38582860161063e565b92505061077460208401610615565b6000602082840312156107d457600080fd5b5035919050565b600080604083850312156107ee57600080fd5b8235915061077460208401610615565b60008060008060008060c0878903121561081757600080fd5b8635955061082760208801610615565b945061083560408801610615565b935061084360608801610706565b925061085160808801610615565b915060a087013567ffffffffffffffff81111561086d57600080fd5b61087989828a016106cb565b9150509295509295509295565b6000806040838503121561089957600080fd5b82359150602083013567ffffffffffffffff8111156108b757600080fd5b6108c3858286016106cb565b9150509250929050565b600080604083850312156108e057600080fd5b82359150602083013567ffffffffffffffff8111156108fe57600080fd5b8301601f8101851361090f57600080fd5b6108c38582356020840161059f565b6000806040838503121561093157600080fd5b82359150610774602084016106f2565b60008060008060008060008060006101208a8c03121561096057600080fd5b6109698a6106f2565b985060208a0135975061097e60408b01610706565b965060608a013567ffffffffffffffff8082111561099b57600080fd5b6109a78d838e0161063e565b975060808c01359150808211156109bd57600080fd5b6109c98d838e0161063e565b96506109d760a08d0161071e565b955060c08c01359150808211156109ed57600080fd5b6109f98d838e016106cb565b9450610a0760e08d01610706565b93506101008c0135915080821115610a1e57600080fd5b50610a2b8c828d016106cb565b9150509295985092959850929598565b600060208284031215610a4d57600080fd5b6106eb8261071e565b600081518084526020808501945080840160005b83811015610a9c57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610a6a565b509495945050505050565b6000815180845260005b81811015610acd57602081850181015186830182015201610ab1565b81811115610adf576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600073ffffffffffffffffffffffffffffffffffffffff8088168352808716602084015267ffffffffffffffff8616604084015280851660608401525060a06080830152610b6360a0830184610aa7565b979650505050505050565b604081526000610b816040830185610a56565b905073ffffffffffffffffffffffffffffffffffffffff831660208301529392505050565b6020815260006106eb6020830184610aa7565b600061012063ffffffff8c1683528a602084015267ffffffffffffffff808b166040850152816060850152610bf08285018b610a56565b91508382036080850152610c04828a610a56565b915060ff881660a085015283820360c0850152610c218288610aa7565b90861660e08501528381036101008501529050610c3e8185610aa7565b9c9b505050505050505050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610c9557610c95610c9d565b604052919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var FunctionsOracleEventsMockABI = FunctionsOracleEventsMockMetaData.ABI + +var FunctionsOracleEventsMockBin = FunctionsOracleEventsMockMetaData.Bin + +func DeployFunctionsOracleEventsMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *FunctionsOracleEventsMock, error) { + parsed, err := FunctionsOracleEventsMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FunctionsOracleEventsMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FunctionsOracleEventsMock{address: address, abi: *parsed, FunctionsOracleEventsMockCaller: FunctionsOracleEventsMockCaller{contract: contract}, FunctionsOracleEventsMockTransactor: FunctionsOracleEventsMockTransactor{contract: contract}, FunctionsOracleEventsMockFilterer: FunctionsOracleEventsMockFilterer{contract: contract}}, nil +} + +type FunctionsOracleEventsMock struct { + address common.Address + abi abi.ABI + FunctionsOracleEventsMockCaller + FunctionsOracleEventsMockTransactor + FunctionsOracleEventsMockFilterer +} + +type FunctionsOracleEventsMockCaller struct { + contract *bind.BoundContract +} + +type FunctionsOracleEventsMockTransactor struct { + contract *bind.BoundContract +} + +type FunctionsOracleEventsMockFilterer struct { + contract *bind.BoundContract +} + +type FunctionsOracleEventsMockSession struct { + Contract *FunctionsOracleEventsMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FunctionsOracleEventsMockCallerSession struct { + Contract *FunctionsOracleEventsMockCaller + CallOpts bind.CallOpts +} + +type FunctionsOracleEventsMockTransactorSession struct { + Contract *FunctionsOracleEventsMockTransactor + TransactOpts bind.TransactOpts +} + +type FunctionsOracleEventsMockRaw struct { + Contract *FunctionsOracleEventsMock +} + +type FunctionsOracleEventsMockCallerRaw struct { + Contract *FunctionsOracleEventsMockCaller +} + +type FunctionsOracleEventsMockTransactorRaw struct { + Contract *FunctionsOracleEventsMockTransactor +} + +func NewFunctionsOracleEventsMock(address common.Address, backend bind.ContractBackend) (*FunctionsOracleEventsMock, error) { + abi, err := abi.JSON(strings.NewReader(FunctionsOracleEventsMockABI)) + if err != nil { + return nil, err + } + contract, err := bindFunctionsOracleEventsMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMock{address: address, abi: abi, FunctionsOracleEventsMockCaller: FunctionsOracleEventsMockCaller{contract: contract}, FunctionsOracleEventsMockTransactor: FunctionsOracleEventsMockTransactor{contract: contract}, FunctionsOracleEventsMockFilterer: FunctionsOracleEventsMockFilterer{contract: contract}}, nil +} + +func NewFunctionsOracleEventsMockCaller(address common.Address, caller bind.ContractCaller) (*FunctionsOracleEventsMockCaller, error) { + contract, err := bindFunctionsOracleEventsMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockCaller{contract: contract}, nil +} + +func NewFunctionsOracleEventsMockTransactor(address common.Address, transactor bind.ContractTransactor) (*FunctionsOracleEventsMockTransactor, error) { + contract, err := bindFunctionsOracleEventsMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockTransactor{contract: contract}, nil +} + +func NewFunctionsOracleEventsMockFilterer(address common.Address, filterer bind.ContractFilterer) (*FunctionsOracleEventsMockFilterer, error) { + contract, err := bindFunctionsOracleEventsMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockFilterer{contract: contract}, nil +} + +func bindFunctionsOracleEventsMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FunctionsOracleEventsMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsOracleEventsMock.Contract.FunctionsOracleEventsMockCaller.contract.Call(opts, result, method, params...) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.FunctionsOracleEventsMockTransactor.contract.Transfer(opts) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.FunctionsOracleEventsMockTransactor.contract.Transact(opts, method, params...) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FunctionsOracleEventsMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.contract.Transfer(opts) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.contract.Transact(opts, method, params...) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitAuthorizedSendersActive(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitAuthorizedSendersActive", account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitAuthorizedSendersActive(account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersActive(&_FunctionsOracleEventsMock.TransactOpts, account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitAuthorizedSendersActive(account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersActive(&_FunctionsOracleEventsMock.TransactOpts, account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitAuthorizedSendersChanged(opts *bind.TransactOpts, senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitAuthorizedSendersChanged", senders, changedBy) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitAuthorizedSendersChanged(senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersChanged(&_FunctionsOracleEventsMock.TransactOpts, senders, changedBy) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitAuthorizedSendersChanged(senders []common.Address, changedBy common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersChanged(&_FunctionsOracleEventsMock.TransactOpts, senders, changedBy) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitAuthorizedSendersDeactive(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitAuthorizedSendersDeactive", account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitAuthorizedSendersDeactive(account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersDeactive(&_FunctionsOracleEventsMock.TransactOpts, account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitAuthorizedSendersDeactive(account common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitAuthorizedSendersDeactive(&_FunctionsOracleEventsMock.TransactOpts, account) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitConfigSet(opts *bind.TransactOpts, previousConfigBlockNumber uint32, configDigest [32]byte, configCount uint64, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitConfigSet", previousConfigBlockNumber, configDigest, configCount, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitConfigSet(previousConfigBlockNumber uint32, configDigest [32]byte, configCount uint64, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitConfigSet(&_FunctionsOracleEventsMock.TransactOpts, previousConfigBlockNumber, configDigest, configCount, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitConfigSet(previousConfigBlockNumber uint32, configDigest [32]byte, configCount uint64, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitConfigSet(&_FunctionsOracleEventsMock.TransactOpts, previousConfigBlockNumber, configDigest, configCount, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitInitialized(opts *bind.TransactOpts, version uint8) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitInitialized", version) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitInitialized(version uint8) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitInitialized(&_FunctionsOracleEventsMock.TransactOpts, version) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitInitialized(version uint8) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitInitialized(&_FunctionsOracleEventsMock.TransactOpts, version) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitInvalidRequestID(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitInvalidRequestID", requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitInvalidRequestID(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitInvalidRequestID(&_FunctionsOracleEventsMock.TransactOpts, requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitInvalidRequestID(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitInvalidRequestID(&_FunctionsOracleEventsMock.TransactOpts, requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitOracleRequest(opts *bind.TransactOpts, requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitOracleRequest", requestId, requestingContract, requestInitiator, subscriptionId, subscriptionOwner, data) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitOracleRequest(requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOracleRequest(&_FunctionsOracleEventsMock.TransactOpts, requestId, requestingContract, requestInitiator, subscriptionId, subscriptionOwner, data) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitOracleRequest(requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOracleRequest(&_FunctionsOracleEventsMock.TransactOpts, requestId, requestingContract, requestInitiator, subscriptionId, subscriptionOwner, data) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitOracleResponse(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitOracleResponse", requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitOracleResponse(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOracleResponse(&_FunctionsOracleEventsMock.TransactOpts, requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitOracleResponse(requestId [32]byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOracleResponse(&_FunctionsOracleEventsMock.TransactOpts, requestId) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsOracleEventsMock.TransactOpts, from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOwnershipTransferRequested(&_FunctionsOracleEventsMock.TransactOpts, from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOwnershipTransferred(&_FunctionsOracleEventsMock.TransactOpts, from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitOwnershipTransferred(&_FunctionsOracleEventsMock.TransactOpts, from, to) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitResponseTransmitted(opts *bind.TransactOpts, requestId [32]byte, transmitter common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitResponseTransmitted", requestId, transmitter) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitResponseTransmitted(requestId [32]byte, transmitter common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitResponseTransmitted(&_FunctionsOracleEventsMock.TransactOpts, requestId, transmitter) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitResponseTransmitted(requestId [32]byte, transmitter common.Address) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitResponseTransmitted(&_FunctionsOracleEventsMock.TransactOpts, requestId, transmitter) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitTransmitted(opts *bind.TransactOpts, configDigest [32]byte, epoch uint32) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitTransmitted", configDigest, epoch) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitTransmitted(configDigest [32]byte, epoch uint32) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitTransmitted(&_FunctionsOracleEventsMock.TransactOpts, configDigest, epoch) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitTransmitted(configDigest [32]byte, epoch uint32) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitTransmitted(&_FunctionsOracleEventsMock.TransactOpts, configDigest, epoch) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitUserCallbackError(opts *bind.TransactOpts, requestId [32]byte, reason string) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitUserCallbackError", requestId, reason) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitUserCallbackError(requestId [32]byte, reason string) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitUserCallbackError(&_FunctionsOracleEventsMock.TransactOpts, requestId, reason) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitUserCallbackError(requestId [32]byte, reason string) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitUserCallbackError(&_FunctionsOracleEventsMock.TransactOpts, requestId, reason) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactor) EmitUserCallbackRawError(opts *bind.TransactOpts, requestId [32]byte, lowLevelData []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.contract.Transact(opts, "emitUserCallbackRawError", requestId, lowLevelData) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockSession) EmitUserCallbackRawError(requestId [32]byte, lowLevelData []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitUserCallbackRawError(&_FunctionsOracleEventsMock.TransactOpts, requestId, lowLevelData) +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockTransactorSession) EmitUserCallbackRawError(requestId [32]byte, lowLevelData []byte) (*types.Transaction, error) { + return _FunctionsOracleEventsMock.Contract.EmitUserCallbackRawError(&_FunctionsOracleEventsMock.TransactOpts, requestId, lowLevelData) +} + +type FunctionsOracleEventsMockAuthorizedSendersActiveIterator struct { + Event *FunctionsOracleEventsMockAuthorizedSendersActive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersActiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersActiveIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersActiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockAuthorizedSendersActive struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersActiveIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockAuthorizedSendersActiveIterator{contract: _FunctionsOracleEventsMock.contract, event: "AuthorizedSendersActive", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersActive) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockAuthorizedSendersActive) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseAuthorizedSendersActive(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersActive, error) { + event := new(FunctionsOracleEventsMockAuthorizedSendersActive) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockAuthorizedSendersChangedIterator struct { + Event *FunctionsOracleEventsMockAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockAuthorizedSendersChangedIterator{contract: _FunctionsOracleEventsMock.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockAuthorizedSendersChanged) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseAuthorizedSendersChanged(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersChanged, error) { + event := new(FunctionsOracleEventsMockAuthorizedSendersChanged) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator struct { + Event *FunctionsOracleEventsMockAuthorizedSendersDeactive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockAuthorizedSendersDeactive struct { + Account common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator{contract: _FunctionsOracleEventsMock.contract, event: "AuthorizedSendersDeactive", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersDeactive) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockAuthorizedSendersDeactive) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseAuthorizedSendersDeactive(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersDeactive, error) { + event := new(FunctionsOracleEventsMockAuthorizedSendersDeactive) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockConfigSetIterator struct { + Event *FunctionsOracleEventsMockConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockConfigSetIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterConfigSet(opts *bind.FilterOpts) (*FunctionsOracleEventsMockConfigSetIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockConfigSetIterator{contract: _FunctionsOracleEventsMock.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockConfigSet) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockConfigSet) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseConfigSet(log types.Log) (*FunctionsOracleEventsMockConfigSet, error) { + event := new(FunctionsOracleEventsMockConfigSet) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockInitializedIterator struct { + Event *FunctionsOracleEventsMockInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockInitializedIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockInitialized struct { + Version uint8 + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterInitialized(opts *bind.FilterOpts) (*FunctionsOracleEventsMockInitializedIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockInitializedIterator{contract: _FunctionsOracleEventsMock.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockInitialized) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockInitialized) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseInitialized(log types.Log) (*FunctionsOracleEventsMockInitialized, error) { + event := new(FunctionsOracleEventsMockInitialized) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockInvalidRequestIDIterator struct { + Event *FunctionsOracleEventsMockInvalidRequestID + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockInvalidRequestIDIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockInvalidRequestIDIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockInvalidRequestIDIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockInvalidRequestID struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockInvalidRequestIDIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockInvalidRequestIDIterator{contract: _FunctionsOracleEventsMock.contract, event: "InvalidRequestID", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockInvalidRequestID, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockInvalidRequestID) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseInvalidRequestID(log types.Log) (*FunctionsOracleEventsMockInvalidRequestID, error) { + event := new(FunctionsOracleEventsMockInvalidRequestID) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockOracleRequestIterator struct { + Event *FunctionsOracleEventsMockOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockOracleRequestIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockOracleRequest struct { + RequestId [32]byte + RequestingContract common.Address + RequestInitiator common.Address + SubscriptionId uint64 + SubscriptionOwner common.Address + Data []byte + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockOracleRequestIterator{contract: _FunctionsOracleEventsMock.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOracleRequest, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockOracleRequest) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseOracleRequest(log types.Log) (*FunctionsOracleEventsMockOracleRequest, error) { + event := new(FunctionsOracleEventsMockOracleRequest) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockOracleResponseIterator struct { + Event *FunctionsOracleEventsMockOracleResponse + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockOracleResponseIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockOracleResponseIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockOracleResponseIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockOracleResponse struct { + RequestId [32]byte + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockOracleResponseIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockOracleResponseIterator{contract: _FunctionsOracleEventsMock.contract, event: "OracleResponse", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOracleResponse, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockOracleResponse) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseOracleResponse(log types.Log) (*FunctionsOracleEventsMockOracleResponse, error) { + event := new(FunctionsOracleEventsMockOracleResponse) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockOwnershipTransferRequestedIterator struct { + Event *FunctionsOracleEventsMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsOracleEventsMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockOwnershipTransferRequestedIterator{contract: _FunctionsOracleEventsMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockOwnershipTransferRequested) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*FunctionsOracleEventsMockOwnershipTransferRequested, error) { + event := new(FunctionsOracleEventsMockOwnershipTransferRequested) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockOwnershipTransferredIterator struct { + Event *FunctionsOracleEventsMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsOracleEventsMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockOwnershipTransferredIterator{contract: _FunctionsOracleEventsMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockOwnershipTransferred) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseOwnershipTransferred(log types.Log) (*FunctionsOracleEventsMockOwnershipTransferred, error) { + event := new(FunctionsOracleEventsMockOwnershipTransferred) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockResponseTransmittedIterator struct { + Event *FunctionsOracleEventsMockResponseTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockResponseTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockResponseTransmittedIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockResponseTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockResponseTransmitted struct { + RequestId [32]byte + Transmitter common.Address + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockResponseTransmittedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockResponseTransmittedIterator{contract: _FunctionsOracleEventsMock.contract, event: "ResponseTransmitted", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockResponseTransmitted, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockResponseTransmitted) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseResponseTransmitted(log types.Log) (*FunctionsOracleEventsMockResponseTransmitted, error) { + event := new(FunctionsOracleEventsMockResponseTransmitted) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockTransmittedIterator struct { + Event *FunctionsOracleEventsMockTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockTransmittedIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterTransmitted(opts *bind.FilterOpts) (*FunctionsOracleEventsMockTransmittedIterator, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockTransmittedIterator{contract: _FunctionsOracleEventsMock.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockTransmitted) (event.Subscription, error) { + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockTransmitted) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseTransmitted(log types.Log) (*FunctionsOracleEventsMockTransmitted, error) { + event := new(FunctionsOracleEventsMockTransmitted) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockUserCallbackErrorIterator struct { + Event *FunctionsOracleEventsMockUserCallbackError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockUserCallbackErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockUserCallbackErrorIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockUserCallbackErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockUserCallbackError struct { + RequestId [32]byte + Reason string + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockUserCallbackErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockUserCallbackErrorIterator{contract: _FunctionsOracleEventsMock.contract, event: "UserCallbackError", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockUserCallbackError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockUserCallbackError) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseUserCallbackError(log types.Log) (*FunctionsOracleEventsMockUserCallbackError, error) { + event := new(FunctionsOracleEventsMockUserCallbackError) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FunctionsOracleEventsMockUserCallbackRawErrorIterator struct { + Event *FunctionsOracleEventsMockUserCallbackRawError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FunctionsOracleEventsMockUserCallbackRawErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FunctionsOracleEventsMockUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FunctionsOracleEventsMockUserCallbackRawErrorIterator) Error() error { + return it.fail +} + +func (it *FunctionsOracleEventsMockUserCallbackRawErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FunctionsOracleEventsMockUserCallbackRawError struct { + RequestId [32]byte + LowLevelData []byte + Raw types.Log +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockUserCallbackRawErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.FilterLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return &FunctionsOracleEventsMockUserCallbackRawErrorIterator{contract: _FunctionsOracleEventsMock.contract, event: "UserCallbackRawError", logs: logs, sub: sub}, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _FunctionsOracleEventsMock.contract.WatchLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FunctionsOracleEventsMockUserCallbackRawError) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMockFilterer) ParseUserCallbackRawError(log types.Log) (*FunctionsOracleEventsMockUserCallbackRawError, error) { + event := new(FunctionsOracleEventsMockUserCallbackRawError) + if err := _FunctionsOracleEventsMock.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FunctionsOracleEventsMock.abi.Events["AuthorizedSendersActive"].ID: + return _FunctionsOracleEventsMock.ParseAuthorizedSendersActive(log) + case _FunctionsOracleEventsMock.abi.Events["AuthorizedSendersChanged"].ID: + return _FunctionsOracleEventsMock.ParseAuthorizedSendersChanged(log) + case _FunctionsOracleEventsMock.abi.Events["AuthorizedSendersDeactive"].ID: + return _FunctionsOracleEventsMock.ParseAuthorizedSendersDeactive(log) + case _FunctionsOracleEventsMock.abi.Events["ConfigSet"].ID: + return _FunctionsOracleEventsMock.ParseConfigSet(log) + case _FunctionsOracleEventsMock.abi.Events["Initialized"].ID: + return _FunctionsOracleEventsMock.ParseInitialized(log) + case _FunctionsOracleEventsMock.abi.Events["InvalidRequestID"].ID: + return _FunctionsOracleEventsMock.ParseInvalidRequestID(log) + case _FunctionsOracleEventsMock.abi.Events["OracleRequest"].ID: + return _FunctionsOracleEventsMock.ParseOracleRequest(log) + case _FunctionsOracleEventsMock.abi.Events["OracleResponse"].ID: + return _FunctionsOracleEventsMock.ParseOracleResponse(log) + case _FunctionsOracleEventsMock.abi.Events["OwnershipTransferRequested"].ID: + return _FunctionsOracleEventsMock.ParseOwnershipTransferRequested(log) + case _FunctionsOracleEventsMock.abi.Events["OwnershipTransferred"].ID: + return _FunctionsOracleEventsMock.ParseOwnershipTransferred(log) + case _FunctionsOracleEventsMock.abi.Events["ResponseTransmitted"].ID: + return _FunctionsOracleEventsMock.ParseResponseTransmitted(log) + case _FunctionsOracleEventsMock.abi.Events["Transmitted"].ID: + return _FunctionsOracleEventsMock.ParseTransmitted(log) + case _FunctionsOracleEventsMock.abi.Events["UserCallbackError"].ID: + return _FunctionsOracleEventsMock.ParseUserCallbackError(log) + case _FunctionsOracleEventsMock.abi.Events["UserCallbackRawError"].ID: + return _FunctionsOracleEventsMock.ParseUserCallbackRawError(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FunctionsOracleEventsMockAuthorizedSendersActive) Topic() common.Hash { + return common.HexToHash("0xae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce") +} + +func (FunctionsOracleEventsMockAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (FunctionsOracleEventsMockAuthorizedSendersDeactive) Topic() common.Hash { + return common.HexToHash("0xea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a") +} + +func (FunctionsOracleEventsMockConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (FunctionsOracleEventsMockInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (FunctionsOracleEventsMockInvalidRequestID) Topic() common.Hash { + return common.HexToHash("0xa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be") +} + +func (FunctionsOracleEventsMockOracleRequest) Topic() common.Hash { + return common.HexToHash("0xa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c") +} + +func (FunctionsOracleEventsMockOracleResponse) Topic() common.Hash { + return common.HexToHash("0x9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a64") +} + +func (FunctionsOracleEventsMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FunctionsOracleEventsMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FunctionsOracleEventsMockResponseTransmitted) Topic() common.Hash { + return common.HexToHash("0xdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a6") +} + +func (FunctionsOracleEventsMockTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (FunctionsOracleEventsMockUserCallbackError) Topic() common.Hash { + return common.HexToHash("0xb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c") +} + +func (FunctionsOracleEventsMockUserCallbackRawError) Topic() common.Hash { + return common.HexToHash("0xe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb2") +} + +func (_FunctionsOracleEventsMock *FunctionsOracleEventsMock) Address() common.Address { + return _FunctionsOracleEventsMock.address +} + +type FunctionsOracleEventsMockInterface interface { + EmitAuthorizedSendersActive(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitAuthorizedSendersChanged(opts *bind.TransactOpts, senders []common.Address, changedBy common.Address) (*types.Transaction, error) + + EmitAuthorizedSendersDeactive(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitConfigSet(opts *bind.TransactOpts, previousConfigBlockNumber uint32, configDigest [32]byte, configCount uint64, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + EmitInitialized(opts *bind.TransactOpts, version uint8) (*types.Transaction, error) + + EmitInvalidRequestID(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) + + EmitOracleRequest(opts *bind.TransactOpts, requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) (*types.Transaction, error) + + EmitOracleResponse(opts *bind.TransactOpts, requestId [32]byte) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitResponseTransmitted(opts *bind.TransactOpts, requestId [32]byte, transmitter common.Address) (*types.Transaction, error) + + EmitTransmitted(opts *bind.TransactOpts, configDigest [32]byte, epoch uint32) (*types.Transaction, error) + + EmitUserCallbackError(opts *bind.TransactOpts, requestId [32]byte, reason string) (*types.Transaction, error) + + EmitUserCallbackRawError(opts *bind.TransactOpts, requestId [32]byte, lowLevelData []byte) (*types.Transaction, error) + + FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersActiveIterator, error) + + WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersActive) (event.Subscription, error) + + ParseAuthorizedSendersActive(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersActive, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersChanged, error) + + FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*FunctionsOracleEventsMockAuthorizedSendersDeactiveIterator, error) + + WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockAuthorizedSendersDeactive) (event.Subscription, error) + + ParseAuthorizedSendersDeactive(log types.Log) (*FunctionsOracleEventsMockAuthorizedSendersDeactive, error) + + FilterConfigSet(opts *bind.FilterOpts) (*FunctionsOracleEventsMockConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*FunctionsOracleEventsMockConfigSet, error) + + FilterInitialized(opts *bind.FilterOpts) (*FunctionsOracleEventsMockInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*FunctionsOracleEventsMockInitialized, error) + + FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockInvalidRequestIDIterator, error) + + WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockInvalidRequestID, requestId [][32]byte) (event.Subscription, error) + + ParseInvalidRequestID(log types.Log) (*FunctionsOracleEventsMockInvalidRequestID, error) + + FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOracleRequest, requestId [][32]byte) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*FunctionsOracleEventsMockOracleRequest, error) + + FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockOracleResponseIterator, error) + + WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOracleResponse, requestId [][32]byte) (event.Subscription, error) + + ParseOracleResponse(log types.Log) (*FunctionsOracleEventsMockOracleResponse, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsOracleEventsMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FunctionsOracleEventsMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FunctionsOracleEventsMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FunctionsOracleEventsMockOwnershipTransferred, error) + + FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockResponseTransmittedIterator, error) + + WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockResponseTransmitted, requestId [][32]byte) (event.Subscription, error) + + ParseResponseTransmitted(log types.Log) (*FunctionsOracleEventsMockResponseTransmitted, error) + + FilterTransmitted(opts *bind.FilterOpts) (*FunctionsOracleEventsMockTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*FunctionsOracleEventsMockTransmitted, error) + + FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockUserCallbackErrorIterator, error) + + WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockUserCallbackError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackError(log types.Log) (*FunctionsOracleEventsMockUserCallbackError, error) + + FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*FunctionsOracleEventsMockUserCallbackRawErrorIterator, error) + + WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *FunctionsOracleEventsMockUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackRawError(log types.Log) (*FunctionsOracleEventsMockUserCallbackRawError, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/gas_wrapper/gas_wrapper.go b/core/gethwrappers/generated/gas_wrapper/gas_wrapper.go new file mode 100644 index 00000000..297b2551 --- /dev/null +++ b/core/gethwrappers/generated/gas_wrapper/gas_wrapper.go @@ -0,0 +1,576 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package gas_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryCheckUpkeepGasUsageWrapperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistryExecutableInterface\",\"name\":\"keeperRegistry\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperRegistry\",\"outputs\":[{\"internalType\":\"contractAutomationRegistryExecutableInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"measureCheckGas\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b5060405161092f38038061092f83398101604081905261002f91610177565b33806000816100855760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100b5576100b5816100cd565b50505060601b6001600160601b0319166080526101a7565b6001600160a01b0381163314156101265760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161007c565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60006020828403121561018957600080fd5b81516001600160a01b03811681146101a057600080fd5b9392505050565b60805160601c6107646101cb6000396000818160e2015261017001526107646000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80638da5cb5b116100505780638da5cb5b146100a1578063a33c0660146100e0578063f2fde38b1461010657600080fd5b80636bf490301461006c57806379ba509714610097575b600080fd5b61007f61007a36600461062c565b610119565b60405161008e93929190610658565b60405180910390f35b61009f610262565b005b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161008e565b7f00000000000000000000000000000000000000000000000000000000000000006100bb565b61009f61011436600461051a565b610364565b600060606000805a6040517fc41b813a0000000000000000000000000000000000000000000000000000000081526004810188905273ffffffffffffffffffffffffffffffffffffffff87811660248301529192507f00000000000000000000000000000000000000000000000000000000000000009091169063c41b813a90604401600060405180830381600087803b1580156101b657600080fd5b505af192505050801561020957506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610206919081019061053c565b60015b6102385760005a61021a90836106ba565b6040805160208101909152600080825296509450925061025b915050565b60005a61024590886106ba565b60019a5095985094965061025b95505050505050565b9250925092565b60015473ffffffffffffffffffffffffffffffffffffffff1633146102e8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61036c610378565b610375816103fb565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146103f9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016102df565b565b73ffffffffffffffffffffffffffffffffffffffff811633141561047b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016102df565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b803573ffffffffffffffffffffffffffffffffffffffff8116811461051557600080fd5b919050565b60006020828403121561052c57600080fd5b610535826104f1565b9392505050565b600080600080600060a0868803121561055457600080fd5b855167ffffffffffffffff8082111561056c57600080fd5b818801915088601f83011261058057600080fd5b81518181111561059257610592610728565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156105d8576105d8610728565b816040528281528b60208487010111156105f157600080fd5b6106028360208301602088016106f8565b60208b015160408c015160608d01516080909d0151929e919d509b9a509098509650505050505050565b6000806040838503121561063f57600080fd5b8235915061064f602084016104f1565b90509250929050565b8315158152606060208201526000835180606084015261067f8160808501602088016106f8565b604083019390935250601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160160800192915050565b6000828210156106f3577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500390565b60005b838110156107135781810151838201526020016106fb565b83811115610722576000848401525b50505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var KeeperRegistryCheckUpkeepGasUsageWrapperABI = KeeperRegistryCheckUpkeepGasUsageWrapperMetaData.ABI + +var KeeperRegistryCheckUpkeepGasUsageWrapperBin = KeeperRegistryCheckUpkeepGasUsageWrapperMetaData.Bin + +func DeployKeeperRegistryCheckUpkeepGasUsageWrapper(auth *bind.TransactOpts, backend bind.ContractBackend, keeperRegistry common.Address) (common.Address, *types.Transaction, *KeeperRegistryCheckUpkeepGasUsageWrapper, error) { + parsed, err := KeeperRegistryCheckUpkeepGasUsageWrapperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryCheckUpkeepGasUsageWrapperBin), backend, keeperRegistry) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryCheckUpkeepGasUsageWrapper{address: address, abi: *parsed, KeeperRegistryCheckUpkeepGasUsageWrapperCaller: KeeperRegistryCheckUpkeepGasUsageWrapperCaller{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperTransactor: KeeperRegistryCheckUpkeepGasUsageWrapperTransactor{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperFilterer: KeeperRegistryCheckUpkeepGasUsageWrapperFilterer{contract: contract}}, nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapper struct { + address common.Address + abi abi.ABI + KeeperRegistryCheckUpkeepGasUsageWrapperCaller + KeeperRegistryCheckUpkeepGasUsageWrapperTransactor + KeeperRegistryCheckUpkeepGasUsageWrapperFilterer +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperCallerSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperTransactorSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapper +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperCallerRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperCaller +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperTransactorRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperTransactor +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapper(address common.Address, backend bind.ContractBackend) (*KeeperRegistryCheckUpkeepGasUsageWrapper, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryCheckUpkeepGasUsageWrapperABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapper{address: address, abi: abi, KeeperRegistryCheckUpkeepGasUsageWrapperCaller: KeeperRegistryCheckUpkeepGasUsageWrapperCaller{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperTransactor: KeeperRegistryCheckUpkeepGasUsageWrapperTransactor{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperFilterer: KeeperRegistryCheckUpkeepGasUsageWrapperFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCheckUpkeepGasUsageWrapperCaller, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperCaller{contract: contract}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryCheckUpkeepGasUsageWrapperTransactor, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperTransactor{contract: contract}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryCheckUpkeepGasUsageWrapperFilterer, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperFilterer{contract: contract}, nil +} + +func bindKeeperRegistryCheckUpkeepGasUsageWrapper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryCheckUpkeepGasUsageWrapperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperCaller) GetKeeperRegistry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.Call(opts, &out, "getKeeperRegistry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperSession) GetKeeperRegistry() (common.Address, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.GetKeeperRegistry(&_KeeperRegistryCheckUpkeepGasUsageWrapper.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperCallerSession) GetKeeperRegistry() (common.Address, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.GetKeeperRegistry(&_KeeperRegistryCheckUpkeepGasUsageWrapper.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperSession) Owner() (common.Address, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.Owner(&_KeeperRegistryCheckUpkeepGasUsageWrapper.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperCallerSession) Owner() (common.Address, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.Owner(&_KeeperRegistryCheckUpkeepGasUsageWrapper.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.AcceptOwnership(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.AcceptOwnership(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactor) MeasureCheckGas(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.Transact(opts, "measureCheckGas", id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperSession) MeasureCheckGas(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.MeasureCheckGas(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts, id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactorSession) MeasureCheckGas(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.MeasureCheckGas(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts, id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.TransferOwnership(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.Contract.TransferOwnership(&_KeeperRegistryCheckUpkeepGasUsageWrapper.TransactOpts, to) +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator{contract: _KeeperRegistryCheckUpkeepGasUsageWrapper.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested, error) { + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator struct { + Event *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator{contract: _KeeperRegistryCheckUpkeepGasUsageWrapper.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapperFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred, error) { + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapper) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryCheckUpkeepGasUsageWrapper.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryCheckUpkeepGasUsageWrapper.ParseOwnershipTransferRequested(log) + case _KeeperRegistryCheckUpkeepGasUsageWrapper.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryCheckUpkeepGasUsageWrapper.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapper *KeeperRegistryCheckUpkeepGasUsageWrapper) Address() common.Address { + return _KeeperRegistryCheckUpkeepGasUsageWrapper.address +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperInterface interface { + GetKeeperRegistry(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + MeasureCheckGas(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/gas_wrapper_mock/gas_wrapper_mock.go b/core/gethwrappers/generated/gas_wrapper_mock/gas_wrapper_mock.go new file mode 100644 index 00000000..9189b2ca --- /dev/null +++ b/core/gethwrappers/generated/gas_wrapper_mock/gas_wrapper_mock.go @@ -0,0 +1,614 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package gas_wrapper_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryCheckUpkeepGasUsageWrapperMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"measureCheckGas\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_mockGas\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_mockPayload\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_mockResult\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"result\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gas\",\"type\":\"uint256\"}],\"name\":\"setMeasureCheckGasResult\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506106b9806100206000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c8063846811931161005b57806384681193146100d5578063b019b4e8146100f2578063b023145014610105578063f7420bc21461011a57600080fd5b80632dae06f51461008257806356343496146100975780636bf49030146100b3575b600080fd5b610095610090366004610466565b61012d565b005b6100a060025481565b6040519081526020015b60405180910390f35b6100c66100c1366004610556565b610174565b6040516100aa939291906105e4565b6000546100e29060ff1681565b60405190151581526020016100aa565b610095610100366004610433565b610227565b61010d610285565b6040516100aa919061060f565b610095610128366004610433565b610313565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016841515179055815161016c906001906020850190610371565b506002555050565b6000606060008060009054906101000a900460ff16600160025481805461019a90610629565b80601f01602080910402602001604051908101604052809291908181526020018280546101c690610629565b80156102135780601f106101e857610100808354040283529160200191610213565b820191906000526020600020905b8154815290600101906020018083116101f657829003601f168201915b505050505091509250925092509250925092565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6001805461029290610629565b80601f01602080910402602001604051908101604052809291908181526020018280546102be90610629565b801561030b5780601f106102e05761010080835404028352916020019161030b565b820191906000526020600020905b8154815290600101906020018083116102ee57829003601f168201915b505050505081565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b82805461037d90610629565b90600052602060002090601f01602090048101928261039f57600085556103e5565b82601f106103b857805160ff19168380011785556103e5565b828001600101855582156103e5579182015b828111156103e55782518255916020019190600101906103ca565b506103f19291506103f5565b5090565b5b808211156103f157600081556001016103f6565b803573ffffffffffffffffffffffffffffffffffffffff8116811461042e57600080fd5b919050565b6000806040838503121561044657600080fd5b61044f8361040a565b915061045d6020840161040a565b90509250929050565b60008060006060848603121561047b57600080fd5b8335801515811461048b57600080fd5b9250602084013567ffffffffffffffff808211156104a857600080fd5b818601915086601f8301126104bc57600080fd5b8135818111156104ce576104ce61067d565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156105145761051461067d565b8160405282815289602084870101111561052d57600080fd5b826020860160208301376000602084830101528096505050505050604084013590509250925092565b6000806040838503121561056957600080fd5b8235915061045d6020840161040a565b6000815180845260005b8181101561059f57602081850181015186830182015201610583565b818111156105b1576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b83151581526060602082015260006105ff6060830185610579565b9050826040830152949350505050565b6020815260006106226020830184610579565b9392505050565b600181811c9082168061063d57607f821691505b60208210811415610677577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var KeeperRegistryCheckUpkeepGasUsageWrapperMockABI = KeeperRegistryCheckUpkeepGasUsageWrapperMockMetaData.ABI + +var KeeperRegistryCheckUpkeepGasUsageWrapperMockBin = KeeperRegistryCheckUpkeepGasUsageWrapperMockMetaData.Bin + +func DeployKeeperRegistryCheckUpkeepGasUsageWrapperMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *KeeperRegistryCheckUpkeepGasUsageWrapperMock, error) { + parsed, err := KeeperRegistryCheckUpkeepGasUsageWrapperMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryCheckUpkeepGasUsageWrapperMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryCheckUpkeepGasUsageWrapperMock{address: address, abi: *parsed, KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller: KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor: KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer: KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer{contract: contract}}, nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMock struct { + address common.Address + abi abi.ABI + KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller + KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor + KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorSession struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMock +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorRaw struct { + Contract *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperMock(address common.Address, backend bind.ContractBackend) (*KeeperRegistryCheckUpkeepGasUsageWrapperMock, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryCheckUpkeepGasUsageWrapperMockABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapperMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMock{address: address, abi: abi, KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller: KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor: KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor{contract: contract}, KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer: KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperMockCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapperMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller{contract: contract}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapperMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor{contract: contract}, nil +} + +func NewKeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer, error) { + contract, err := bindKeeperRegistryCheckUpkeepGasUsageWrapperMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer{contract: contract}, nil +} + +func bindKeeperRegistryCheckUpkeepGasUsageWrapperMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryCheckUpkeepGasUsageWrapperMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller) SMockGas(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Call(opts, &out, "s_mockGas") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) SMockGas() (*big.Int, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockGas(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerSession) SMockGas() (*big.Int, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockGas(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller) SMockPayload(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Call(opts, &out, "s_mockPayload") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) SMockPayload() ([]byte, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockPayload(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerSession) SMockPayload() ([]byte, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockPayload(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCaller) SMockResult(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Call(opts, &out, "s_mockResult") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) SMockResult() (bool, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockResult(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockCallerSession) SMockResult() (bool, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SMockResult(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.CallOpts) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.EmitOwnershipTransferred(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.EmitOwnershipTransferred(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, from, to) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor) MeasureCheckGas(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Transact(opts, "measureCheckGas", id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) MeasureCheckGas(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.MeasureCheckGas(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorSession) MeasureCheckGas(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.MeasureCheckGas(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, id, from) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactor) SetMeasureCheckGasResult(opts *bind.TransactOpts, result bool, payload []byte, gas *big.Int) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.Transact(opts, "setMeasureCheckGasResult", result, payload, gas) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockSession) SetMeasureCheckGasResult(result bool, payload []byte, gas *big.Int) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SetMeasureCheckGasResult(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, result, payload, gas) +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockTransactorSession) SetMeasureCheckGasResult(result bool, payload []byte, gas *big.Int) (*types.Transaction, error) { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.Contract.SetMeasureCheckGasResult(&_KeeperRegistryCheckUpkeepGasUsageWrapperMock.TransactOpts, result, payload, gas) +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator{contract: _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested, error) { + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator struct { + Event *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator{contract: _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMockFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred, error) { + event := new(KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred) + if err := _KeeperRegistryCheckUpkeepGasUsageWrapperMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryCheckUpkeepGasUsageWrapperMock.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.ParseOwnershipTransferRequested(log) + case _KeeperRegistryCheckUpkeepGasUsageWrapperMock.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_KeeperRegistryCheckUpkeepGasUsageWrapperMock *KeeperRegistryCheckUpkeepGasUsageWrapperMock) Address() common.Address { + return _KeeperRegistryCheckUpkeepGasUsageWrapperMock.address +} + +type KeeperRegistryCheckUpkeepGasUsageWrapperMockInterface interface { + SMockGas(opts *bind.CallOpts) (*big.Int, error) + + SMockPayload(opts *bind.CallOpts) ([]byte, error) + + SMockResult(opts *bind.CallOpts) (bool, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + MeasureCheckGas(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + SetMeasureCheckGasResult(opts *bind.TransactOpts, result bool, payload []byte, gas *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryCheckUpkeepGasUsageWrapperMockOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/generated.go b/core/gethwrappers/generated/generated.go new file mode 100644 index 00000000..8478c9ef --- /dev/null +++ b/core/gethwrappers/generated/generated.go @@ -0,0 +1,10 @@ +package generated + +import ( + "github.com/ethereum/go-ethereum/common" +) + +// AbigenLog is an interface for abigen generated log topics +type AbigenLog interface { + Topic() common.Hash +} diff --git a/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2/i_automation_registry_master_wrapper_2_2.go b/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2/i_automation_registry_master_wrapper_2_2.go new file mode 100644 index 00000000..bc338db6 --- /dev/null +++ b/core/gethwrappers/generated/i_automation_registry_master_wrapper_2_2/i_automation_registry_master_wrapper_2_2.go @@ -0,0 +1,6687 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package i_automation_registry_master_wrapper_2_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type AutomationRegistryBase22OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address + ChainModule common.Address + ReorgProtectionEnabled bool +} + +type AutomationRegistryBase22OnchainConfigLegacy struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +type AutomationRegistryBase22State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + TotalPremium *big.Int + NumUpkeeps *big.Int + ConfigCount uint32 + LatestConfigBlockNumber uint32 + LatestConfigDigest [32]byte + LatestEpoch uint32 + Paused bool +} + +type AutomationRegistryBase22UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var IAutomationRegistryMasterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newModule\",\"type\":\"address\"}],\"name\":\"ChainSpecificModuleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"executeCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"getAdminPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllowedReadOnlyAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAutomationForwarderLogic\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCancellationDelay\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChainModule\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"chainModule\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConditionalGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLogGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerPerformByteGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerSignerGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getReorgProtectionEnabled\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"reorgProtectionEnabled\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getSignerInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"totalPremium\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"latestConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"latestConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"latestEpoch\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"internalType\":\"structAutomationRegistryBase2_2.State\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structAutomationRegistryBase2_2.OnchainConfigLegacy\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTransmitCalldataFixedBytesOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTransmitCalldataPerSignerBytesOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getTransmitterInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"lastCollected\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structAutomationRegistryBase2_2.UpkeepInfo\",\"name\":\"upkeepInfo\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"hasDedupKey\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setAdminPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfigBytes\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"chainModule\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"reorgProtectionEnabled\",\"type\":\"bool\"}],\"internalType\":\"structAutomationRegistryBase2_2.OnchainConfig\",\"name\":\"onchainConfig\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfigTypeSafe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"setUpkeepCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"simulatePerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var IAutomationRegistryMasterABI = IAutomationRegistryMasterMetaData.ABI + +type IAutomationRegistryMaster struct { + address common.Address + abi abi.ABI + IAutomationRegistryMasterCaller + IAutomationRegistryMasterTransactor + IAutomationRegistryMasterFilterer +} + +type IAutomationRegistryMasterCaller struct { + contract *bind.BoundContract +} + +type IAutomationRegistryMasterTransactor struct { + contract *bind.BoundContract +} + +type IAutomationRegistryMasterFilterer struct { + contract *bind.BoundContract +} + +type IAutomationRegistryMasterSession struct { + Contract *IAutomationRegistryMaster + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type IAutomationRegistryMasterCallerSession struct { + Contract *IAutomationRegistryMasterCaller + CallOpts bind.CallOpts +} + +type IAutomationRegistryMasterTransactorSession struct { + Contract *IAutomationRegistryMasterTransactor + TransactOpts bind.TransactOpts +} + +type IAutomationRegistryMasterRaw struct { + Contract *IAutomationRegistryMaster +} + +type IAutomationRegistryMasterCallerRaw struct { + Contract *IAutomationRegistryMasterCaller +} + +type IAutomationRegistryMasterTransactorRaw struct { + Contract *IAutomationRegistryMasterTransactor +} + +func NewIAutomationRegistryMaster(address common.Address, backend bind.ContractBackend) (*IAutomationRegistryMaster, error) { + abi, err := abi.JSON(strings.NewReader(IAutomationRegistryMasterABI)) + if err != nil { + return nil, err + } + contract, err := bindIAutomationRegistryMaster(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &IAutomationRegistryMaster{address: address, abi: abi, IAutomationRegistryMasterCaller: IAutomationRegistryMasterCaller{contract: contract}, IAutomationRegistryMasterTransactor: IAutomationRegistryMasterTransactor{contract: contract}, IAutomationRegistryMasterFilterer: IAutomationRegistryMasterFilterer{contract: contract}}, nil +} + +func NewIAutomationRegistryMasterCaller(address common.Address, caller bind.ContractCaller) (*IAutomationRegistryMasterCaller, error) { + contract, err := bindIAutomationRegistryMaster(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterCaller{contract: contract}, nil +} + +func NewIAutomationRegistryMasterTransactor(address common.Address, transactor bind.ContractTransactor) (*IAutomationRegistryMasterTransactor, error) { + contract, err := bindIAutomationRegistryMaster(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterTransactor{contract: contract}, nil +} + +func NewIAutomationRegistryMasterFilterer(address common.Address, filterer bind.ContractFilterer) (*IAutomationRegistryMasterFilterer, error) { + contract, err := bindIAutomationRegistryMaster(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterFilterer{contract: contract}, nil +} + +func bindIAutomationRegistryMaster(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := IAutomationRegistryMasterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IAutomationRegistryMaster.Contract.IAutomationRegistryMasterCaller.contract.Call(opts, result, method, params...) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.IAutomationRegistryMasterTransactor.contract.Transfer(opts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.IAutomationRegistryMasterTransactor.contract.Transact(opts, method, params...) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IAutomationRegistryMaster.Contract.contract.Call(opts, result, method, params...) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.contract.Transfer(opts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.contract.Transact(opts, method, params...) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "checkCallback", id, values, extraData) + + outstruct := new(CheckCallback) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _IAutomationRegistryMaster.Contract.CheckCallback(&_IAutomationRegistryMaster.CallOpts, id, values, extraData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _IAutomationRegistryMaster.Contract.CheckCallback(&_IAutomationRegistryMaster.CallOpts, id, values, extraData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) CheckUpkeep(opts *bind.CallOpts, id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "checkUpkeep", id, triggerData) + + outstruct := new(CheckUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasLimit = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FastGasWei = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.LinkNative = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) CheckUpkeep(id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + return _IAutomationRegistryMaster.Contract.CheckUpkeep(&_IAutomationRegistryMaster.CallOpts, id, triggerData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) CheckUpkeep(id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + return _IAutomationRegistryMaster.Contract.CheckUpkeep(&_IAutomationRegistryMaster.CallOpts, id, triggerData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) CheckUpkeep0(opts *bind.CallOpts, id *big.Int) (CheckUpkeep0, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "checkUpkeep0", id) + + outstruct := new(CheckUpkeep0) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasLimit = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FastGasWei = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.LinkNative = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) CheckUpkeep0(id *big.Int) (CheckUpkeep0, + + error) { + return _IAutomationRegistryMaster.Contract.CheckUpkeep0(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) CheckUpkeep0(id *big.Int) (CheckUpkeep0, + + error) { + return _IAutomationRegistryMaster.Contract.CheckUpkeep0(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) FallbackTo() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.FallbackTo(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) FallbackTo() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.FallbackTo(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetActiveUpkeepIDs(&_IAutomationRegistryMaster.CallOpts, startIndex, maxCount) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetActiveUpkeepIDs(&_IAutomationRegistryMaster.CallOpts, startIndex, maxCount) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getAdminPrivilegeConfig", admin) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetAdminPrivilegeConfig(&_IAutomationRegistryMaster.CallOpts, admin) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetAdminPrivilegeConfig(&_IAutomationRegistryMaster.CallOpts, admin) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetAllowedReadOnlyAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getAllowedReadOnlyAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetAllowedReadOnlyAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetAllowedReadOnlyAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetAllowedReadOnlyAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetAllowedReadOnlyAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getAutomationForwarderLogic") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetAutomationForwarderLogic() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetAutomationForwarderLogic(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetAutomationForwarderLogic() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetAutomationForwarderLogic(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetBalance(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetBalance(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetBalance(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getCancellationDelay") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetCancellationDelay() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetCancellationDelay(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetCancellationDelay() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetCancellationDelay(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetChainModule(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getChainModule") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetChainModule() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetChainModule(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetChainModule() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetChainModule(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getConditionalGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetConditionalGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetConditionalGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetConditionalGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetConditionalGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetFastGasFeedAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetFastGasFeedAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetFastGasFeedAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetForwarder(&_IAutomationRegistryMaster.CallOpts, upkeepID) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetForwarder(&_IAutomationRegistryMaster.CallOpts, upkeepID) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetLinkAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetLinkAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetLinkAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetLinkAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetLinkNativeFeedAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.GetLinkNativeFeedAddress(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getLogGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetLogGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetLogGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetLogGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetLogGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getMaxPaymentForGas", triggerType, gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMaxPaymentForGas(&_IAutomationRegistryMaster.CallOpts, triggerType, gasLimit) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMaxPaymentForGas(&_IAutomationRegistryMaster.CallOpts, triggerType, gasLimit) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getMinBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMinBalance(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMinBalance(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMinBalanceForUpkeep(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetMinBalanceForUpkeep(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _IAutomationRegistryMaster.Contract.GetPeerRegistryMigrationPermission(&_IAutomationRegistryMaster.CallOpts, peer) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _IAutomationRegistryMaster.Contract.GetPeerRegistryMigrationPermission(&_IAutomationRegistryMaster.CallOpts, peer) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getPerPerformByteGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetPerPerformByteGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetPerPerformByteGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getPerSignerGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetPerSignerGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetPerSignerGasOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetReorgProtectionEnabled(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getReorgProtectionEnabled") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetReorgProtectionEnabled() (bool, error) { + return _IAutomationRegistryMaster.Contract.GetReorgProtectionEnabled(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetReorgProtectionEnabled() (bool, error) { + return _IAutomationRegistryMaster.Contract.GetReorgProtectionEnabled(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getSignerInfo", query) + + outstruct := new(GetSignerInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _IAutomationRegistryMaster.Contract.GetSignerInfo(&_IAutomationRegistryMaster.CallOpts, query) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _IAutomationRegistryMaster.Contract.GetSignerInfo(&_IAutomationRegistryMaster.CallOpts, query) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(AutomationRegistryBase22State)).(*AutomationRegistryBase22State) + outstruct.Config = *abi.ConvertType(out[1], new(AutomationRegistryBase22OnchainConfigLegacy)).(*AutomationRegistryBase22OnchainConfigLegacy) + outstruct.Signers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + outstruct.Transmitters = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + outstruct.F = *abi.ConvertType(out[4], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetState() (GetState, + + error) { + return _IAutomationRegistryMaster.Contract.GetState(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetState() (GetState, + + error) { + return _IAutomationRegistryMaster.Contract.GetState(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetTransmitCalldataFixedBytesOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getTransmitCalldataFixedBytesOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetTransmitCalldataFixedBytesOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetTransmitCalldataFixedBytesOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetTransmitCalldataFixedBytesOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetTransmitCalldataFixedBytesOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetTransmitCalldataPerSignerBytesOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getTransmitCalldataPerSignerBytesOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetTransmitCalldataPerSignerBytesOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetTransmitCalldataPerSignerBytesOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetTransmitCalldataPerSignerBytesOverhead() (*big.Int, error) { + return _IAutomationRegistryMaster.Contract.GetTransmitCalldataPerSignerBytesOverhead(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getTransmitterInfo", query) + + outstruct := new(GetTransmitterInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.LastCollected = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.Payee = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _IAutomationRegistryMaster.Contract.GetTransmitterInfo(&_IAutomationRegistryMaster.CallOpts, query) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _IAutomationRegistryMaster.Contract.GetTransmitterInfo(&_IAutomationRegistryMaster.CallOpts, query) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _IAutomationRegistryMaster.Contract.GetTriggerType(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _IAutomationRegistryMaster.Contract.GetTriggerType(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getUpkeep", id) + + if err != nil { + return *new(AutomationRegistryBase22UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(AutomationRegistryBase22UpkeepInfo)).(*AutomationRegistryBase22UpkeepInfo) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetUpkeep(id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeep(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetUpkeep(id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeep(&_IAutomationRegistryMaster.CallOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeepPrivilegeConfig(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeepPrivilegeConfig(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeepTriggerConfig(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _IAutomationRegistryMaster.Contract.GetUpkeepTriggerConfig(&_IAutomationRegistryMaster.CallOpts, upkeepId) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "hasDedupKey", dedupKey) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _IAutomationRegistryMaster.Contract.HasDedupKey(&_IAutomationRegistryMaster.CallOpts, dedupKey) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _IAutomationRegistryMaster.Contract.HasDedupKey(&_IAutomationRegistryMaster.CallOpts, dedupKey) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _IAutomationRegistryMaster.Contract.LatestConfigDetails(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _IAutomationRegistryMaster.Contract.LatestConfigDetails(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _IAutomationRegistryMaster.Contract.LatestConfigDigestAndEpoch(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _IAutomationRegistryMaster.Contract.LatestConfigDigestAndEpoch(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) Owner() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.Owner(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) Owner() (common.Address, error) { + return _IAutomationRegistryMaster.Contract.Owner(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) SimulatePerformUpkeep(opts *bind.CallOpts, id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "simulatePerformUpkeep", id, performData) + + outstruct := new(SimulatePerformUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Success = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.GasUsed = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + return _IAutomationRegistryMaster.Contract.SimulatePerformUpkeep(&_IAutomationRegistryMaster.CallOpts, id, performData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + return _IAutomationRegistryMaster.Contract.SimulatePerformUpkeep(&_IAutomationRegistryMaster.CallOpts, id, performData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) TypeAndVersion() (string, error) { + return _IAutomationRegistryMaster.Contract.TypeAndVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) TypeAndVersion() (string, error) { + return _IAutomationRegistryMaster.Contract.TypeAndVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) UpkeepTranscoderVersion() (uint8, error) { + return _IAutomationRegistryMaster.Contract.UpkeepTranscoderVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _IAutomationRegistryMaster.Contract.UpkeepTranscoderVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCaller) UpkeepVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _IAutomationRegistryMaster.contract.Call(opts, &out, "upkeepVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) UpkeepVersion() (uint8, error) { + return _IAutomationRegistryMaster.Contract.UpkeepVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterCallerSession) UpkeepVersion() (uint8, error) { + return _IAutomationRegistryMaster.Contract.UpkeepVersion(&_IAutomationRegistryMaster.CallOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "acceptOwnership") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) AcceptOwnership() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptOwnership(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptOwnership(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptPayeeship(&_IAutomationRegistryMaster.TransactOpts, transmitter) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptPayeeship(&_IAutomationRegistryMaster.TransactOpts, transmitter) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptUpkeepAdmin(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AcceptUpkeepAdmin(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "addFunds", id, amount) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AddFunds(&_IAutomationRegistryMaster.TransactOpts, id, amount) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.AddFunds(&_IAutomationRegistryMaster.TransactOpts, id, amount) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.CancelUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.CancelUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "executeCallback", id, payload) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.ExecuteCallback(&_IAutomationRegistryMaster.TransactOpts, id, payload) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.ExecuteCallback(&_IAutomationRegistryMaster.TransactOpts, id, payload) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.MigrateUpkeeps(&_IAutomationRegistryMaster.TransactOpts, ids, destination) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.MigrateUpkeeps(&_IAutomationRegistryMaster.TransactOpts, ids, destination) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.OnTokenTransfer(&_IAutomationRegistryMaster.TransactOpts, sender, amount, data) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.OnTokenTransfer(&_IAutomationRegistryMaster.TransactOpts, sender, amount, data) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "pause") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) Pause() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Pause(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) Pause() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Pause(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.PauseUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.PauseUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.ReceiveUpkeeps(&_IAutomationRegistryMaster.TransactOpts, encodedUpkeeps) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.ReceiveUpkeeps(&_IAutomationRegistryMaster.TransactOpts, encodedUpkeeps) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "recoverFunds") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) RecoverFunds() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RecoverFunds(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RecoverFunds(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RegisterUpkeep(&_IAutomationRegistryMaster.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RegisterUpkeep(&_IAutomationRegistryMaster.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "registerUpkeep0", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RegisterUpkeep0(&_IAutomationRegistryMaster.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.RegisterUpkeep0(&_IAutomationRegistryMaster.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setAdminPrivilegeConfig", admin, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetAdminPrivilegeConfig(&_IAutomationRegistryMaster.TransactOpts, admin, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetAdminPrivilegeConfig(&_IAutomationRegistryMaster.TransactOpts, admin, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetConfig(&_IAutomationRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetConfig(&_IAutomationRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setConfigTypeSafe", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetConfigTypeSafe(&_IAutomationRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetConfigTypeSafe(&_IAutomationRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setPayees", payees) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetPayees(&_IAutomationRegistryMaster.TransactOpts, payees) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetPayees(&_IAutomationRegistryMaster.TransactOpts, payees) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetPeerRegistryMigrationPermission(&_IAutomationRegistryMaster.TransactOpts, peer, permission) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetPeerRegistryMigrationPermission(&_IAutomationRegistryMaster.TransactOpts, peer, permission) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setUpkeepCheckData", id, newCheckData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepCheckData(&_IAutomationRegistryMaster.TransactOpts, id, newCheckData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepCheckData(&_IAutomationRegistryMaster.TransactOpts, id, newCheckData) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepGasLimit(&_IAutomationRegistryMaster.TransactOpts, id, gasLimit) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepGasLimit(&_IAutomationRegistryMaster.TransactOpts, id, gasLimit) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepOffchainConfig(&_IAutomationRegistryMaster.TransactOpts, id, config) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepOffchainConfig(&_IAutomationRegistryMaster.TransactOpts, id, config) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepPrivilegeConfig(&_IAutomationRegistryMaster.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepPrivilegeConfig(&_IAutomationRegistryMaster.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "setUpkeepTriggerConfig", id, triggerConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepTriggerConfig(&_IAutomationRegistryMaster.TransactOpts, id, triggerConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.SetUpkeepTriggerConfig(&_IAutomationRegistryMaster.TransactOpts, id, triggerConfig) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "transferOwnership", to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferOwnership(&_IAutomationRegistryMaster.TransactOpts, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferOwnership(&_IAutomationRegistryMaster.TransactOpts, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferPayeeship(&_IAutomationRegistryMaster.TransactOpts, transmitter, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferPayeeship(&_IAutomationRegistryMaster.TransactOpts, transmitter, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferUpkeepAdmin(&_IAutomationRegistryMaster.TransactOpts, id, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.TransferUpkeepAdmin(&_IAutomationRegistryMaster.TransactOpts, id, proposed) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "transmit", reportContext, rawReport, rs, ss, rawVs) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Transmit(&_IAutomationRegistryMaster.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Transmit(&_IAutomationRegistryMaster.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "unpause") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) Unpause() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Unpause(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) Unpause() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Unpause(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.UnpauseUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.UnpauseUpkeep(&_IAutomationRegistryMaster.TransactOpts, id) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawFunds(&_IAutomationRegistryMaster.TransactOpts, id, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawFunds(&_IAutomationRegistryMaster.TransactOpts, id, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawOwnerFunds(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawOwnerFunds(&_IAutomationRegistryMaster.TransactOpts) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawPayment(&_IAutomationRegistryMaster.TransactOpts, from, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.WithdrawPayment(&_IAutomationRegistryMaster.TransactOpts, from, to) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.contract.RawTransact(opts, calldata) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Fallback(&_IAutomationRegistryMaster.TransactOpts, calldata) +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _IAutomationRegistryMaster.Contract.Fallback(&_IAutomationRegistryMaster.TransactOpts, calldata) +} + +type IAutomationRegistryMasterAdminPrivilegeConfigSetIterator struct { + Event *IAutomationRegistryMasterAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*IAutomationRegistryMasterAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterAdminPrivilegeConfigSetIterator{contract: _IAutomationRegistryMaster.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterAdminPrivilegeConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*IAutomationRegistryMasterAdminPrivilegeConfigSet, error) { + event := new(IAutomationRegistryMasterAdminPrivilegeConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterCancelledUpkeepReportIterator struct { + Event *IAutomationRegistryMasterCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterCancelledUpkeepReportIterator{contract: _IAutomationRegistryMaster.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterCancelledUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseCancelledUpkeepReport(log types.Log) (*IAutomationRegistryMasterCancelledUpkeepReport, error) { + event := new(IAutomationRegistryMasterCancelledUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterChainSpecificModuleUpdatedIterator struct { + Event *IAutomationRegistryMasterChainSpecificModuleUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterChainSpecificModuleUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterChainSpecificModuleUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterChainSpecificModuleUpdatedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterChainSpecificModuleUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterChainSpecificModuleUpdated struct { + NewModule common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*IAutomationRegistryMasterChainSpecificModuleUpdatedIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterChainSpecificModuleUpdatedIterator{contract: _IAutomationRegistryMaster.contract, event: "ChainSpecificModuleUpdated", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterChainSpecificModuleUpdated) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "ChainSpecificModuleUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterChainSpecificModuleUpdated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseChainSpecificModuleUpdated(log types.Log) (*IAutomationRegistryMasterChainSpecificModuleUpdated, error) { + event := new(IAutomationRegistryMasterChainSpecificModuleUpdated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ChainSpecificModuleUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterConfigSetIterator struct { + Event *IAutomationRegistryMasterConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterConfigSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterConfigSet(opts *bind.FilterOpts) (*IAutomationRegistryMasterConfigSetIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterConfigSetIterator{contract: _IAutomationRegistryMaster.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterConfigSet) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseConfigSet(log types.Log) (*IAutomationRegistryMasterConfigSet, error) { + event := new(IAutomationRegistryMasterConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterDedupKeyAddedIterator struct { + Event *IAutomationRegistryMasterDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*IAutomationRegistryMasterDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterDedupKeyAddedIterator{contract: _IAutomationRegistryMaster.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterDedupKeyAdded) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseDedupKeyAdded(log types.Log) (*IAutomationRegistryMasterDedupKeyAdded, error) { + event := new(IAutomationRegistryMasterDedupKeyAdded) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterFundsAddedIterator struct { + Event *IAutomationRegistryMasterFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterFundsAddedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*IAutomationRegistryMasterFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterFundsAddedIterator{contract: _IAutomationRegistryMaster.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterFundsAdded) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseFundsAdded(log types.Log) (*IAutomationRegistryMasterFundsAdded, error) { + event := new(IAutomationRegistryMasterFundsAdded) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterFundsWithdrawnIterator struct { + Event *IAutomationRegistryMasterFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterFundsWithdrawnIterator{contract: _IAutomationRegistryMaster.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterFundsWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseFundsWithdrawn(log types.Log) (*IAutomationRegistryMasterFundsWithdrawn, error) { + event := new(IAutomationRegistryMasterFundsWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator struct { + Event *IAutomationRegistryMasterInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator{contract: _IAutomationRegistryMaster.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterInsufficientFundsUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*IAutomationRegistryMasterInsufficientFundsUpkeepReport, error) { + event := new(IAutomationRegistryMasterInsufficientFundsUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterOwnerFundsWithdrawnIterator struct { + Event *IAutomationRegistryMasterOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*IAutomationRegistryMasterOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterOwnerFundsWithdrawnIterator{contract: _IAutomationRegistryMaster.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterOwnerFundsWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*IAutomationRegistryMasterOwnerFundsWithdrawn, error) { + event := new(IAutomationRegistryMasterOwnerFundsWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterOwnershipTransferRequestedIterator struct { + Event *IAutomationRegistryMasterOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IAutomationRegistryMasterOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterOwnershipTransferRequestedIterator{contract: _IAutomationRegistryMaster.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterOwnershipTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseOwnershipTransferRequested(log types.Log) (*IAutomationRegistryMasterOwnershipTransferRequested, error) { + event := new(IAutomationRegistryMasterOwnershipTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterOwnershipTransferredIterator struct { + Event *IAutomationRegistryMasterOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IAutomationRegistryMasterOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterOwnershipTransferredIterator{contract: _IAutomationRegistryMaster.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterOwnershipTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseOwnershipTransferred(log types.Log) (*IAutomationRegistryMasterOwnershipTransferred, error) { + event := new(IAutomationRegistryMasterOwnershipTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterPausedIterator struct { + Event *IAutomationRegistryMasterPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterPausedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterPaused struct { + Account common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterPaused(opts *bind.FilterOpts) (*IAutomationRegistryMasterPausedIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterPausedIterator{contract: _IAutomationRegistryMaster.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPaused) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterPaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParsePaused(log types.Log) (*IAutomationRegistryMasterPaused, error) { + event := new(IAutomationRegistryMasterPaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterPayeesUpdatedIterator struct { + Event *IAutomationRegistryMasterPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*IAutomationRegistryMasterPayeesUpdatedIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterPayeesUpdatedIterator{contract: _IAutomationRegistryMaster.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterPayeesUpdated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParsePayeesUpdated(log types.Log) (*IAutomationRegistryMasterPayeesUpdated, error) { + event := new(IAutomationRegistryMasterPayeesUpdated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterPayeeshipTransferRequestedIterator struct { + Event *IAutomationRegistryMasterPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IAutomationRegistryMasterPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterPayeeshipTransferRequestedIterator{contract: _IAutomationRegistryMaster.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterPayeeshipTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParsePayeeshipTransferRequested(log types.Log) (*IAutomationRegistryMasterPayeeshipTransferRequested, error) { + event := new(IAutomationRegistryMasterPayeeshipTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterPayeeshipTransferredIterator struct { + Event *IAutomationRegistryMasterPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IAutomationRegistryMasterPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterPayeeshipTransferredIterator{contract: _IAutomationRegistryMaster.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterPayeeshipTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParsePayeeshipTransferred(log types.Log) (*IAutomationRegistryMasterPayeeshipTransferred, error) { + event := new(IAutomationRegistryMasterPayeeshipTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterPaymentWithdrawnIterator struct { + Event *IAutomationRegistryMasterPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*IAutomationRegistryMasterPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterPaymentWithdrawnIterator{contract: _IAutomationRegistryMaster.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterPaymentWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParsePaymentWithdrawn(log types.Log) (*IAutomationRegistryMasterPaymentWithdrawn, error) { + event := new(IAutomationRegistryMasterPaymentWithdrawn) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterReorgedUpkeepReportIterator struct { + Event *IAutomationRegistryMasterReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterReorgedUpkeepReportIterator{contract: _IAutomationRegistryMaster.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterReorgedUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseReorgedUpkeepReport(log types.Log) (*IAutomationRegistryMasterReorgedUpkeepReport, error) { + event := new(IAutomationRegistryMasterReorgedUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterStaleUpkeepReportIterator struct { + Event *IAutomationRegistryMasterStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterStaleUpkeepReportIterator{contract: _IAutomationRegistryMaster.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterStaleUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseStaleUpkeepReport(log types.Log) (*IAutomationRegistryMasterStaleUpkeepReport, error) { + event := new(IAutomationRegistryMasterStaleUpkeepReport) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterTransmittedIterator struct { + Event *IAutomationRegistryMasterTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterTransmittedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterTransmitted(opts *bind.FilterOpts) (*IAutomationRegistryMasterTransmittedIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterTransmittedIterator{contract: _IAutomationRegistryMaster.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterTransmitted) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterTransmitted) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseTransmitted(log types.Log) (*IAutomationRegistryMasterTransmitted, error) { + event := new(IAutomationRegistryMasterTransmitted) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUnpausedIterator struct { + Event *IAutomationRegistryMasterUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUnpausedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUnpaused(opts *bind.FilterOpts) (*IAutomationRegistryMasterUnpausedIterator, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUnpausedIterator{contract: _IAutomationRegistryMaster.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUnpaused) (event.Subscription, error) { + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUnpaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUnpaused(log types.Log) (*IAutomationRegistryMasterUnpaused, error) { + event := new(IAutomationRegistryMasterUnpaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator struct { + Event *IAutomationRegistryMasterUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepAdminTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*IAutomationRegistryMasterUpkeepAdminTransferRequested, error) { + event := new(IAutomationRegistryMasterUpkeepAdminTransferRequested) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepAdminTransferredIterator struct { + Event *IAutomationRegistryMasterUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IAutomationRegistryMasterUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepAdminTransferredIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepAdminTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepAdminTransferred(log types.Log) (*IAutomationRegistryMasterUpkeepAdminTransferred, error) { + event := new(IAutomationRegistryMasterUpkeepAdminTransferred) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepCanceledIterator struct { + Event *IAutomationRegistryMasterUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*IAutomationRegistryMasterUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepCanceledIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepCanceled) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepCanceled(log types.Log) (*IAutomationRegistryMasterUpkeepCanceled, error) { + event := new(IAutomationRegistryMasterUpkeepCanceled) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepCheckDataSetIterator struct { + Event *IAutomationRegistryMasterUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepCheckDataSetIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepCheckDataSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepCheckDataSet(log types.Log) (*IAutomationRegistryMasterUpkeepCheckDataSet, error) { + event := new(IAutomationRegistryMasterUpkeepCheckDataSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepGasLimitSetIterator struct { + Event *IAutomationRegistryMasterUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepGasLimitSetIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepGasLimitSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepGasLimitSet(log types.Log) (*IAutomationRegistryMasterUpkeepGasLimitSet, error) { + event := new(IAutomationRegistryMasterUpkeepGasLimitSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepMigratedIterator struct { + Event *IAutomationRegistryMasterUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepMigratedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepMigrated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepMigrated(log types.Log) (*IAutomationRegistryMasterUpkeepMigrated, error) { + event := new(IAutomationRegistryMasterUpkeepMigrated) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepOffchainConfigSetIterator struct { + Event *IAutomationRegistryMasterUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepOffchainConfigSetIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepOffchainConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepOffchainConfigSet, error) { + event := new(IAutomationRegistryMasterUpkeepOffchainConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepPausedIterator struct { + Event *IAutomationRegistryMasterUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepPausedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepPaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepPaused(log types.Log) (*IAutomationRegistryMasterUpkeepPaused, error) { + event := new(IAutomationRegistryMasterUpkeepPaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepPerformedIterator struct { + Event *IAutomationRegistryMasterUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*IAutomationRegistryMasterUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepPerformedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepPerformed) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepPerformed(log types.Log) (*IAutomationRegistryMasterUpkeepPerformed, error) { + event := new(IAutomationRegistryMasterUpkeepPerformed) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator struct { + Event *IAutomationRegistryMasterUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepPrivilegeConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepPrivilegeConfigSet, error) { + event := new(IAutomationRegistryMasterUpkeepPrivilegeConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepReceivedIterator struct { + Event *IAutomationRegistryMasterUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepReceivedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepReceived) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepReceived(log types.Log) (*IAutomationRegistryMasterUpkeepReceived, error) { + event := new(IAutomationRegistryMasterUpkeepReceived) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepRegisteredIterator struct { + Event *IAutomationRegistryMasterUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepRegisteredIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepRegistered) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepRegistered(log types.Log) (*IAutomationRegistryMasterUpkeepRegistered, error) { + event := new(IAutomationRegistryMasterUpkeepRegistered) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepTriggerConfigSetIterator struct { + Event *IAutomationRegistryMasterUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepTriggerConfigSetIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepTriggerConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepTriggerConfigSet, error) { + event := new(IAutomationRegistryMasterUpkeepTriggerConfigSet) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IAutomationRegistryMasterUpkeepUnpausedIterator struct { + Event *IAutomationRegistryMasterUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IAutomationRegistryMasterUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IAutomationRegistryMasterUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IAutomationRegistryMasterUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *IAutomationRegistryMasterUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IAutomationRegistryMasterUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &IAutomationRegistryMasterUpkeepUnpausedIterator{contract: _IAutomationRegistryMaster.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IAutomationRegistryMaster.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IAutomationRegistryMasterUpkeepUnpaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMasterFilterer) ParseUpkeepUnpaused(log types.Log) (*IAutomationRegistryMasterUpkeepUnpaused, error) { + event := new(IAutomationRegistryMasterUpkeepUnpaused) + if err := _IAutomationRegistryMaster.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CheckCallback struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int +} +type CheckUpkeep struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int + GasLimit *big.Int + FastGasWei *big.Int + LinkNative *big.Int +} +type CheckUpkeep0 struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int + GasLimit *big.Int + FastGasWei *big.Int + LinkNative *big.Int +} +type GetSignerInfo struct { + Active bool + Index uint8 +} +type GetState struct { + State AutomationRegistryBase22State + Config AutomationRegistryBase22OnchainConfigLegacy + Signers []common.Address + Transmitters []common.Address + F uint8 +} +type GetTransmitterInfo struct { + Active bool + Index uint8 + Balance *big.Int + LastCollected *big.Int + Payee common.Address +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} +type SimulatePerformUpkeep struct { + Success bool + GasUsed *big.Int +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMaster) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _IAutomationRegistryMaster.abi.Events["AdminPrivilegeConfigSet"].ID: + return _IAutomationRegistryMaster.ParseAdminPrivilegeConfigSet(log) + case _IAutomationRegistryMaster.abi.Events["CancelledUpkeepReport"].ID: + return _IAutomationRegistryMaster.ParseCancelledUpkeepReport(log) + case _IAutomationRegistryMaster.abi.Events["ChainSpecificModuleUpdated"].ID: + return _IAutomationRegistryMaster.ParseChainSpecificModuleUpdated(log) + case _IAutomationRegistryMaster.abi.Events["ConfigSet"].ID: + return _IAutomationRegistryMaster.ParseConfigSet(log) + case _IAutomationRegistryMaster.abi.Events["DedupKeyAdded"].ID: + return _IAutomationRegistryMaster.ParseDedupKeyAdded(log) + case _IAutomationRegistryMaster.abi.Events["FundsAdded"].ID: + return _IAutomationRegistryMaster.ParseFundsAdded(log) + case _IAutomationRegistryMaster.abi.Events["FundsWithdrawn"].ID: + return _IAutomationRegistryMaster.ParseFundsWithdrawn(log) + case _IAutomationRegistryMaster.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _IAutomationRegistryMaster.ParseInsufficientFundsUpkeepReport(log) + case _IAutomationRegistryMaster.abi.Events["OwnerFundsWithdrawn"].ID: + return _IAutomationRegistryMaster.ParseOwnerFundsWithdrawn(log) + case _IAutomationRegistryMaster.abi.Events["OwnershipTransferRequested"].ID: + return _IAutomationRegistryMaster.ParseOwnershipTransferRequested(log) + case _IAutomationRegistryMaster.abi.Events["OwnershipTransferred"].ID: + return _IAutomationRegistryMaster.ParseOwnershipTransferred(log) + case _IAutomationRegistryMaster.abi.Events["Paused"].ID: + return _IAutomationRegistryMaster.ParsePaused(log) + case _IAutomationRegistryMaster.abi.Events["PayeesUpdated"].ID: + return _IAutomationRegistryMaster.ParsePayeesUpdated(log) + case _IAutomationRegistryMaster.abi.Events["PayeeshipTransferRequested"].ID: + return _IAutomationRegistryMaster.ParsePayeeshipTransferRequested(log) + case _IAutomationRegistryMaster.abi.Events["PayeeshipTransferred"].ID: + return _IAutomationRegistryMaster.ParsePayeeshipTransferred(log) + case _IAutomationRegistryMaster.abi.Events["PaymentWithdrawn"].ID: + return _IAutomationRegistryMaster.ParsePaymentWithdrawn(log) + case _IAutomationRegistryMaster.abi.Events["ReorgedUpkeepReport"].ID: + return _IAutomationRegistryMaster.ParseReorgedUpkeepReport(log) + case _IAutomationRegistryMaster.abi.Events["StaleUpkeepReport"].ID: + return _IAutomationRegistryMaster.ParseStaleUpkeepReport(log) + case _IAutomationRegistryMaster.abi.Events["Transmitted"].ID: + return _IAutomationRegistryMaster.ParseTransmitted(log) + case _IAutomationRegistryMaster.abi.Events["Unpaused"].ID: + return _IAutomationRegistryMaster.ParseUnpaused(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepAdminTransferRequested"].ID: + return _IAutomationRegistryMaster.ParseUpkeepAdminTransferRequested(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepAdminTransferred"].ID: + return _IAutomationRegistryMaster.ParseUpkeepAdminTransferred(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepCanceled"].ID: + return _IAutomationRegistryMaster.ParseUpkeepCanceled(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepCheckDataSet"].ID: + return _IAutomationRegistryMaster.ParseUpkeepCheckDataSet(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepGasLimitSet"].ID: + return _IAutomationRegistryMaster.ParseUpkeepGasLimitSet(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepMigrated"].ID: + return _IAutomationRegistryMaster.ParseUpkeepMigrated(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepOffchainConfigSet"].ID: + return _IAutomationRegistryMaster.ParseUpkeepOffchainConfigSet(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepPaused"].ID: + return _IAutomationRegistryMaster.ParseUpkeepPaused(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepPerformed"].ID: + return _IAutomationRegistryMaster.ParseUpkeepPerformed(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _IAutomationRegistryMaster.ParseUpkeepPrivilegeConfigSet(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepReceived"].ID: + return _IAutomationRegistryMaster.ParseUpkeepReceived(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepRegistered"].ID: + return _IAutomationRegistryMaster.ParseUpkeepRegistered(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepTriggerConfigSet"].ID: + return _IAutomationRegistryMaster.ParseUpkeepTriggerConfigSet(log) + case _IAutomationRegistryMaster.abi.Events["UpkeepUnpaused"].ID: + return _IAutomationRegistryMaster.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (IAutomationRegistryMasterAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (IAutomationRegistryMasterCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (IAutomationRegistryMasterChainSpecificModuleUpdated) Topic() common.Hash { + return common.HexToHash("0xdefc28b11a7980dbe0c49dbbd7055a1584bc8075097d1e8b3b57fb7283df2ad7") +} + +func (IAutomationRegistryMasterConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (IAutomationRegistryMasterDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (IAutomationRegistryMasterFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (IAutomationRegistryMasterFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (IAutomationRegistryMasterInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (IAutomationRegistryMasterOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (IAutomationRegistryMasterOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (IAutomationRegistryMasterOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (IAutomationRegistryMasterPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (IAutomationRegistryMasterPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (IAutomationRegistryMasterPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (IAutomationRegistryMasterPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (IAutomationRegistryMasterPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (IAutomationRegistryMasterReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (IAutomationRegistryMasterStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (IAutomationRegistryMasterTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (IAutomationRegistryMasterUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (IAutomationRegistryMasterUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (IAutomationRegistryMasterUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (IAutomationRegistryMasterUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (IAutomationRegistryMasterUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (IAutomationRegistryMasterUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (IAutomationRegistryMasterUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (IAutomationRegistryMasterUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (IAutomationRegistryMasterUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (IAutomationRegistryMasterUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (IAutomationRegistryMasterUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (IAutomationRegistryMasterUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (IAutomationRegistryMasterUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (IAutomationRegistryMasterUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (IAutomationRegistryMasterUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_IAutomationRegistryMaster *IAutomationRegistryMaster) Address() common.Address { + return _IAutomationRegistryMaster.address +} + +type IAutomationRegistryMasterInterface interface { + CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) + + CheckUpkeep(opts *bind.CallOpts, id *big.Int, triggerData []byte) (CheckUpkeep, + + error) + + CheckUpkeep0(opts *bind.CallOpts, id *big.Int) (CheckUpkeep0, + + error) + + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) + + GetAllowedReadOnlyAddress(opts *bind.CallOpts) (common.Address, error) + + GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) + + GetChainModule(opts *bind.CallOpts) (common.Address, error) + + GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) + + GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetReorgProtectionEnabled(opts *bind.CallOpts) (bool, error) + + GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetTransmitCalldataFixedBytesOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetTransmitCalldataPerSignerBytesOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (AutomationRegistryBase22UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SimulatePerformUpkeep(opts *bind.CallOpts, id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + UpkeepVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) + + RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig AutomationRegistryBase22OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*IAutomationRegistryMasterAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*IAutomationRegistryMasterAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*IAutomationRegistryMasterCancelledUpkeepReport, error) + + FilterChainSpecificModuleUpdated(opts *bind.FilterOpts) (*IAutomationRegistryMasterChainSpecificModuleUpdatedIterator, error) + + WatchChainSpecificModuleUpdated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterChainSpecificModuleUpdated) (event.Subscription, error) + + ParseChainSpecificModuleUpdated(log types.Log) (*IAutomationRegistryMasterChainSpecificModuleUpdated, error) + + FilterConfigSet(opts *bind.FilterOpts) (*IAutomationRegistryMasterConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*IAutomationRegistryMasterConfigSet, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*IAutomationRegistryMasterDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*IAutomationRegistryMasterDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*IAutomationRegistryMasterFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*IAutomationRegistryMasterFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*IAutomationRegistryMasterFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*IAutomationRegistryMasterInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*IAutomationRegistryMasterOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*IAutomationRegistryMasterOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IAutomationRegistryMasterOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*IAutomationRegistryMasterOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IAutomationRegistryMasterOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*IAutomationRegistryMasterOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*IAutomationRegistryMasterPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*IAutomationRegistryMasterPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*IAutomationRegistryMasterPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*IAutomationRegistryMasterPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IAutomationRegistryMasterPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*IAutomationRegistryMasterPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IAutomationRegistryMasterPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*IAutomationRegistryMasterPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*IAutomationRegistryMasterPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*IAutomationRegistryMasterPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*IAutomationRegistryMasterReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*IAutomationRegistryMasterStaleUpkeepReport, error) + + FilterTransmitted(opts *bind.FilterOpts) (*IAutomationRegistryMasterTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*IAutomationRegistryMasterTransmitted, error) + + FilterUnpaused(opts *bind.FilterOpts) (*IAutomationRegistryMasterUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*IAutomationRegistryMasterUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IAutomationRegistryMasterUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*IAutomationRegistryMasterUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IAutomationRegistryMasterUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*IAutomationRegistryMasterUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*IAutomationRegistryMasterUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*IAutomationRegistryMasterUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*IAutomationRegistryMasterUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*IAutomationRegistryMasterUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*IAutomationRegistryMasterUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*IAutomationRegistryMasterUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*IAutomationRegistryMasterUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*IAutomationRegistryMasterUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*IAutomationRegistryMasterUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*IAutomationRegistryMasterUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*IAutomationRegistryMasterUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*IAutomationRegistryMasterUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *IAutomationRegistryMasterUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*IAutomationRegistryMasterUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1/i_keeper_registry_master_wrapper_2_1.go b/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1/i_keeper_registry_master_wrapper_2_1.go new file mode 100644 index 00000000..770a44eb --- /dev/null +++ b/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1/i_keeper_registry_master_wrapper_2_1.go @@ -0,0 +1,6442 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package i_keeper_registry_master_wrapper_2_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +type KeeperRegistryBase21State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + TotalPremium *big.Int + NumUpkeeps *big.Int + ConfigCount uint32 + LatestConfigBlockNumber uint32 + LatestConfigDigest [32]byte + LatestEpoch uint32 + Paused bool +} + +type KeeperRegistryBase21UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var IKeeperRegistryMasterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"executeCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint8\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"getAdminPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAutomationForwarderLogic\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCancellationDelay\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConditionalGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLogGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMode\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerPerformByteGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerSignerGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getSignerInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"totalPremium\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"latestConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"latestConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"latestEpoch\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"internalType\":\"structKeeperRegistryBase2_1.State\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structKeeperRegistryBase2_1.OnchainConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getTransmitterInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"lastCollected\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structKeeperRegistryBase2_1.UpkeepInfo\",\"name\":\"upkeepInfo\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"hasDedupKey\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setAdminPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfigBytes\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structKeeperRegistryBase2_1.OnchainConfig\",\"name\":\"onchainConfig\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfigTypeSafe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"setUpkeepCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"simulatePerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var IKeeperRegistryMasterABI = IKeeperRegistryMasterMetaData.ABI + +type IKeeperRegistryMaster struct { + address common.Address + abi abi.ABI + IKeeperRegistryMasterCaller + IKeeperRegistryMasterTransactor + IKeeperRegistryMasterFilterer +} + +type IKeeperRegistryMasterCaller struct { + contract *bind.BoundContract +} + +type IKeeperRegistryMasterTransactor struct { + contract *bind.BoundContract +} + +type IKeeperRegistryMasterFilterer struct { + contract *bind.BoundContract +} + +type IKeeperRegistryMasterSession struct { + Contract *IKeeperRegistryMaster + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type IKeeperRegistryMasterCallerSession struct { + Contract *IKeeperRegistryMasterCaller + CallOpts bind.CallOpts +} + +type IKeeperRegistryMasterTransactorSession struct { + Contract *IKeeperRegistryMasterTransactor + TransactOpts bind.TransactOpts +} + +type IKeeperRegistryMasterRaw struct { + Contract *IKeeperRegistryMaster +} + +type IKeeperRegistryMasterCallerRaw struct { + Contract *IKeeperRegistryMasterCaller +} + +type IKeeperRegistryMasterTransactorRaw struct { + Contract *IKeeperRegistryMasterTransactor +} + +func NewIKeeperRegistryMaster(address common.Address, backend bind.ContractBackend) (*IKeeperRegistryMaster, error) { + abi, err := abi.JSON(strings.NewReader(IKeeperRegistryMasterABI)) + if err != nil { + return nil, err + } + contract, err := bindIKeeperRegistryMaster(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &IKeeperRegistryMaster{address: address, abi: abi, IKeeperRegistryMasterCaller: IKeeperRegistryMasterCaller{contract: contract}, IKeeperRegistryMasterTransactor: IKeeperRegistryMasterTransactor{contract: contract}, IKeeperRegistryMasterFilterer: IKeeperRegistryMasterFilterer{contract: contract}}, nil +} + +func NewIKeeperRegistryMasterCaller(address common.Address, caller bind.ContractCaller) (*IKeeperRegistryMasterCaller, error) { + contract, err := bindIKeeperRegistryMaster(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterCaller{contract: contract}, nil +} + +func NewIKeeperRegistryMasterTransactor(address common.Address, transactor bind.ContractTransactor) (*IKeeperRegistryMasterTransactor, error) { + contract, err := bindIKeeperRegistryMaster(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterTransactor{contract: contract}, nil +} + +func NewIKeeperRegistryMasterFilterer(address common.Address, filterer bind.ContractFilterer) (*IKeeperRegistryMasterFilterer, error) { + contract, err := bindIKeeperRegistryMaster(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterFilterer{contract: contract}, nil +} + +func bindIKeeperRegistryMaster(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := IKeeperRegistryMasterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IKeeperRegistryMaster.Contract.IKeeperRegistryMasterCaller.contract.Call(opts, result, method, params...) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.IKeeperRegistryMasterTransactor.contract.Transfer(opts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.IKeeperRegistryMasterTransactor.contract.Transact(opts, method, params...) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IKeeperRegistryMaster.Contract.contract.Call(opts, result, method, params...) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.contract.Transfer(opts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.contract.Transact(opts, method, params...) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "checkCallback", id, values, extraData) + + outstruct := new(CheckCallback) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _IKeeperRegistryMaster.Contract.CheckCallback(&_IKeeperRegistryMaster.CallOpts, id, values, extraData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _IKeeperRegistryMaster.Contract.CheckCallback(&_IKeeperRegistryMaster.CallOpts, id, values, extraData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) CheckUpkeep(opts *bind.CallOpts, id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "checkUpkeep", id, triggerData) + + outstruct := new(CheckUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasLimit = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FastGasWei = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.LinkNative = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) CheckUpkeep(id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + return _IKeeperRegistryMaster.Contract.CheckUpkeep(&_IKeeperRegistryMaster.CallOpts, id, triggerData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) CheckUpkeep(id *big.Int, triggerData []byte) (CheckUpkeep, + + error) { + return _IKeeperRegistryMaster.Contract.CheckUpkeep(&_IKeeperRegistryMaster.CallOpts, id, triggerData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) CheckUpkeep0(opts *bind.CallOpts, id *big.Int) (CheckUpkeep0, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "checkUpkeep0", id) + + outstruct := new(CheckUpkeep0) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + outstruct.UpkeepFailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasLimit = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FastGasWei = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.LinkNative = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) CheckUpkeep0(id *big.Int) (CheckUpkeep0, + + error) { + return _IKeeperRegistryMaster.Contract.CheckUpkeep0(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) CheckUpkeep0(id *big.Int) (CheckUpkeep0, + + error) { + return _IKeeperRegistryMaster.Contract.CheckUpkeep0(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) FallbackTo() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.FallbackTo(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) FallbackTo() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.FallbackTo(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetActiveUpkeepIDs(&_IKeeperRegistryMaster.CallOpts, startIndex, maxCount) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetActiveUpkeepIDs(&_IKeeperRegistryMaster.CallOpts, startIndex, maxCount) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getAdminPrivilegeConfig", admin) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetAdminPrivilegeConfig(&_IKeeperRegistryMaster.CallOpts, admin) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetAdminPrivilegeConfig(&_IKeeperRegistryMaster.CallOpts, admin) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getAutomationForwarderLogic") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetAutomationForwarderLogic() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetAutomationForwarderLogic(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetAutomationForwarderLogic() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetAutomationForwarderLogic(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetBalance(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetBalance(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetBalance(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getCancellationDelay") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetCancellationDelay() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetCancellationDelay(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetCancellationDelay() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetCancellationDelay(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getConditionalGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetConditionalGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetConditionalGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetConditionalGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetConditionalGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetFastGasFeedAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetFastGasFeedAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetFastGasFeedAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetForwarder(&_IKeeperRegistryMaster.CallOpts, upkeepID) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetForwarder(&_IKeeperRegistryMaster.CallOpts, upkeepID) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetLinkAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetLinkAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetLinkAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetLinkAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetLinkNativeFeedAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.GetLinkNativeFeedAddress(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getLogGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetLogGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetLogGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetLogGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetLogGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getMaxPaymentForGas", triggerType, gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMaxPaymentForGas(&_IKeeperRegistryMaster.CallOpts, triggerType, gasLimit) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMaxPaymentForGas(&_IKeeperRegistryMaster.CallOpts, triggerType, gasLimit) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getMinBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMinBalance(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMinBalance(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMinBalanceForUpkeep(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetMinBalanceForUpkeep(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetMode(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getMode") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetMode() (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetMode(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetMode() (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetMode(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetPeerRegistryMigrationPermission(&_IKeeperRegistryMaster.CallOpts, peer) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetPeerRegistryMigrationPermission(&_IKeeperRegistryMaster.CallOpts, peer) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getPerPerformByteGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetPerPerformByteGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetPerPerformByteGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getPerSignerGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetPerSignerGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _IKeeperRegistryMaster.Contract.GetPerSignerGasOverhead(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getSignerInfo", query) + + outstruct := new(GetSignerInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _IKeeperRegistryMaster.Contract.GetSignerInfo(&_IKeeperRegistryMaster.CallOpts, query) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _IKeeperRegistryMaster.Contract.GetSignerInfo(&_IKeeperRegistryMaster.CallOpts, query) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(KeeperRegistryBase21State)).(*KeeperRegistryBase21State) + outstruct.Config = *abi.ConvertType(out[1], new(KeeperRegistryBase21OnchainConfig)).(*KeeperRegistryBase21OnchainConfig) + outstruct.Signers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + outstruct.Transmitters = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + outstruct.F = *abi.ConvertType(out[4], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetState() (GetState, + + error) { + return _IKeeperRegistryMaster.Contract.GetState(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetState() (GetState, + + error) { + return _IKeeperRegistryMaster.Contract.GetState(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getTransmitterInfo", query) + + outstruct := new(GetTransmitterInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.LastCollected = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.Payee = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _IKeeperRegistryMaster.Contract.GetTransmitterInfo(&_IKeeperRegistryMaster.CallOpts, query) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _IKeeperRegistryMaster.Contract.GetTransmitterInfo(&_IKeeperRegistryMaster.CallOpts, query) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetTriggerType(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _IKeeperRegistryMaster.Contract.GetTriggerType(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getUpkeep", id) + + if err != nil { + return *new(KeeperRegistryBase21UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(KeeperRegistryBase21UpkeepInfo)).(*KeeperRegistryBase21UpkeepInfo) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetUpkeep(id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeep(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetUpkeep(id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeep(&_IKeeperRegistryMaster.CallOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeepPrivilegeConfig(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeepPrivilegeConfig(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeepTriggerConfig(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _IKeeperRegistryMaster.Contract.GetUpkeepTriggerConfig(&_IKeeperRegistryMaster.CallOpts, upkeepId) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "hasDedupKey", dedupKey) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _IKeeperRegistryMaster.Contract.HasDedupKey(&_IKeeperRegistryMaster.CallOpts, dedupKey) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _IKeeperRegistryMaster.Contract.HasDedupKey(&_IKeeperRegistryMaster.CallOpts, dedupKey) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _IKeeperRegistryMaster.Contract.LatestConfigDetails(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _IKeeperRegistryMaster.Contract.LatestConfigDetails(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _IKeeperRegistryMaster.Contract.LatestConfigDigestAndEpoch(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _IKeeperRegistryMaster.Contract.LatestConfigDigestAndEpoch(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) Owner() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.Owner(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) Owner() (common.Address, error) { + return _IKeeperRegistryMaster.Contract.Owner(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) SimulatePerformUpkeep(opts *bind.CallOpts, id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "simulatePerformUpkeep", id, performData) + + outstruct := new(SimulatePerformUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Success = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.GasUsed = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + return _IKeeperRegistryMaster.Contract.SimulatePerformUpkeep(&_IKeeperRegistryMaster.CallOpts, id, performData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) { + return _IKeeperRegistryMaster.Contract.SimulatePerformUpkeep(&_IKeeperRegistryMaster.CallOpts, id, performData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) TypeAndVersion() (string, error) { + return _IKeeperRegistryMaster.Contract.TypeAndVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) TypeAndVersion() (string, error) { + return _IKeeperRegistryMaster.Contract.TypeAndVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) UpkeepTranscoderVersion() (uint8, error) { + return _IKeeperRegistryMaster.Contract.UpkeepTranscoderVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _IKeeperRegistryMaster.Contract.UpkeepTranscoderVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCaller) UpkeepVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _IKeeperRegistryMaster.contract.Call(opts, &out, "upkeepVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) UpkeepVersion() (uint8, error) { + return _IKeeperRegistryMaster.Contract.UpkeepVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterCallerSession) UpkeepVersion() (uint8, error) { + return _IKeeperRegistryMaster.Contract.UpkeepVersion(&_IKeeperRegistryMaster.CallOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "acceptOwnership") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) AcceptOwnership() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptOwnership(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptOwnership(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptPayeeship(&_IKeeperRegistryMaster.TransactOpts, transmitter) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptPayeeship(&_IKeeperRegistryMaster.TransactOpts, transmitter) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptUpkeepAdmin(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AcceptUpkeepAdmin(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "addFunds", id, amount) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AddFunds(&_IKeeperRegistryMaster.TransactOpts, id, amount) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.AddFunds(&_IKeeperRegistryMaster.TransactOpts, id, amount) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.CancelUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.CancelUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "executeCallback", id, payload) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.ExecuteCallback(&_IKeeperRegistryMaster.TransactOpts, id, payload) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.ExecuteCallback(&_IKeeperRegistryMaster.TransactOpts, id, payload) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.MigrateUpkeeps(&_IKeeperRegistryMaster.TransactOpts, ids, destination) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.MigrateUpkeeps(&_IKeeperRegistryMaster.TransactOpts, ids, destination) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.OnTokenTransfer(&_IKeeperRegistryMaster.TransactOpts, sender, amount, data) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.OnTokenTransfer(&_IKeeperRegistryMaster.TransactOpts, sender, amount, data) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "pause") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) Pause() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Pause(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) Pause() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Pause(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.PauseUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.PauseUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.ReceiveUpkeeps(&_IKeeperRegistryMaster.TransactOpts, encodedUpkeeps) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.ReceiveUpkeeps(&_IKeeperRegistryMaster.TransactOpts, encodedUpkeeps) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "recoverFunds") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) RecoverFunds() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RecoverFunds(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RecoverFunds(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RegisterUpkeep(&_IKeeperRegistryMaster.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RegisterUpkeep(&_IKeeperRegistryMaster.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "registerUpkeep0", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RegisterUpkeep0(&_IKeeperRegistryMaster.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.RegisterUpkeep0(&_IKeeperRegistryMaster.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setAdminPrivilegeConfig", admin, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetAdminPrivilegeConfig(&_IKeeperRegistryMaster.TransactOpts, admin, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetAdminPrivilegeConfig(&_IKeeperRegistryMaster.TransactOpts, admin, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetConfig(&_IKeeperRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetConfig(&_IKeeperRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setConfigTypeSafe", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetConfigTypeSafe(&_IKeeperRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetConfigTypeSafe(&_IKeeperRegistryMaster.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setPayees", payees) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetPayees(&_IKeeperRegistryMaster.TransactOpts, payees) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetPayees(&_IKeeperRegistryMaster.TransactOpts, payees) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetPeerRegistryMigrationPermission(&_IKeeperRegistryMaster.TransactOpts, peer, permission) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetPeerRegistryMigrationPermission(&_IKeeperRegistryMaster.TransactOpts, peer, permission) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setUpkeepCheckData", id, newCheckData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepCheckData(&_IKeeperRegistryMaster.TransactOpts, id, newCheckData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepCheckData(&_IKeeperRegistryMaster.TransactOpts, id, newCheckData) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepGasLimit(&_IKeeperRegistryMaster.TransactOpts, id, gasLimit) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepGasLimit(&_IKeeperRegistryMaster.TransactOpts, id, gasLimit) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepOffchainConfig(&_IKeeperRegistryMaster.TransactOpts, id, config) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepOffchainConfig(&_IKeeperRegistryMaster.TransactOpts, id, config) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepPrivilegeConfig(&_IKeeperRegistryMaster.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepPrivilegeConfig(&_IKeeperRegistryMaster.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "setUpkeepTriggerConfig", id, triggerConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepTriggerConfig(&_IKeeperRegistryMaster.TransactOpts, id, triggerConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.SetUpkeepTriggerConfig(&_IKeeperRegistryMaster.TransactOpts, id, triggerConfig) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "transferOwnership", to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferOwnership(&_IKeeperRegistryMaster.TransactOpts, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferOwnership(&_IKeeperRegistryMaster.TransactOpts, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferPayeeship(&_IKeeperRegistryMaster.TransactOpts, transmitter, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferPayeeship(&_IKeeperRegistryMaster.TransactOpts, transmitter, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferUpkeepAdmin(&_IKeeperRegistryMaster.TransactOpts, id, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.TransferUpkeepAdmin(&_IKeeperRegistryMaster.TransactOpts, id, proposed) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "transmit", reportContext, rawReport, rs, ss, rawVs) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Transmit(&_IKeeperRegistryMaster.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Transmit(&_IKeeperRegistryMaster.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "unpause") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) Unpause() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Unpause(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) Unpause() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Unpause(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.UnpauseUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.UnpauseUpkeep(&_IKeeperRegistryMaster.TransactOpts, id) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawFunds(&_IKeeperRegistryMaster.TransactOpts, id, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawFunds(&_IKeeperRegistryMaster.TransactOpts, id, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawOwnerFunds(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawOwnerFunds(&_IKeeperRegistryMaster.TransactOpts) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawPayment(&_IKeeperRegistryMaster.TransactOpts, from, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.WithdrawPayment(&_IKeeperRegistryMaster.TransactOpts, from, to) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.contract.RawTransact(opts, calldata) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Fallback(&_IKeeperRegistryMaster.TransactOpts, calldata) +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _IKeeperRegistryMaster.Contract.Fallback(&_IKeeperRegistryMaster.TransactOpts, calldata) +} + +type IKeeperRegistryMasterAdminPrivilegeConfigSetIterator struct { + Event *IKeeperRegistryMasterAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*IKeeperRegistryMasterAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterAdminPrivilegeConfigSetIterator{contract: _IKeeperRegistryMaster.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterAdminPrivilegeConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*IKeeperRegistryMasterAdminPrivilegeConfigSet, error) { + event := new(IKeeperRegistryMasterAdminPrivilegeConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterCancelledUpkeepReportIterator struct { + Event *IKeeperRegistryMasterCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterCancelledUpkeepReportIterator{contract: _IKeeperRegistryMaster.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterCancelledUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseCancelledUpkeepReport(log types.Log) (*IKeeperRegistryMasterCancelledUpkeepReport, error) { + event := new(IKeeperRegistryMasterCancelledUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterConfigSetIterator struct { + Event *IKeeperRegistryMasterConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterConfigSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterConfigSet(opts *bind.FilterOpts) (*IKeeperRegistryMasterConfigSetIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterConfigSetIterator{contract: _IKeeperRegistryMaster.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterConfigSet) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseConfigSet(log types.Log) (*IKeeperRegistryMasterConfigSet, error) { + event := new(IKeeperRegistryMasterConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterDedupKeyAddedIterator struct { + Event *IKeeperRegistryMasterDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*IKeeperRegistryMasterDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterDedupKeyAddedIterator{contract: _IKeeperRegistryMaster.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterDedupKeyAdded) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseDedupKeyAdded(log types.Log) (*IKeeperRegistryMasterDedupKeyAdded, error) { + event := new(IKeeperRegistryMasterDedupKeyAdded) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterFundsAddedIterator struct { + Event *IKeeperRegistryMasterFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterFundsAddedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*IKeeperRegistryMasterFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterFundsAddedIterator{contract: _IKeeperRegistryMaster.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterFundsAdded) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseFundsAdded(log types.Log) (*IKeeperRegistryMasterFundsAdded, error) { + event := new(IKeeperRegistryMasterFundsAdded) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterFundsWithdrawnIterator struct { + Event *IKeeperRegistryMasterFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterFundsWithdrawnIterator{contract: _IKeeperRegistryMaster.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterFundsWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseFundsWithdrawn(log types.Log) (*IKeeperRegistryMasterFundsWithdrawn, error) { + event := new(IKeeperRegistryMasterFundsWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator struct { + Event *IKeeperRegistryMasterInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator{contract: _IKeeperRegistryMaster.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterInsufficientFundsUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*IKeeperRegistryMasterInsufficientFundsUpkeepReport, error) { + event := new(IKeeperRegistryMasterInsufficientFundsUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterOwnerFundsWithdrawnIterator struct { + Event *IKeeperRegistryMasterOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*IKeeperRegistryMasterOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterOwnerFundsWithdrawnIterator{contract: _IKeeperRegistryMaster.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterOwnerFundsWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*IKeeperRegistryMasterOwnerFundsWithdrawn, error) { + event := new(IKeeperRegistryMasterOwnerFundsWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterOwnershipTransferRequestedIterator struct { + Event *IKeeperRegistryMasterOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IKeeperRegistryMasterOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterOwnershipTransferRequestedIterator{contract: _IKeeperRegistryMaster.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterOwnershipTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseOwnershipTransferRequested(log types.Log) (*IKeeperRegistryMasterOwnershipTransferRequested, error) { + event := new(IKeeperRegistryMasterOwnershipTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterOwnershipTransferredIterator struct { + Event *IKeeperRegistryMasterOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IKeeperRegistryMasterOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterOwnershipTransferredIterator{contract: _IKeeperRegistryMaster.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterOwnershipTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseOwnershipTransferred(log types.Log) (*IKeeperRegistryMasterOwnershipTransferred, error) { + event := new(IKeeperRegistryMasterOwnershipTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterPausedIterator struct { + Event *IKeeperRegistryMasterPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterPausedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterPaused struct { + Account common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterPaused(opts *bind.FilterOpts) (*IKeeperRegistryMasterPausedIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterPausedIterator{contract: _IKeeperRegistryMaster.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPaused) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterPaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParsePaused(log types.Log) (*IKeeperRegistryMasterPaused, error) { + event := new(IKeeperRegistryMasterPaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterPayeesUpdatedIterator struct { + Event *IKeeperRegistryMasterPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*IKeeperRegistryMasterPayeesUpdatedIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterPayeesUpdatedIterator{contract: _IKeeperRegistryMaster.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterPayeesUpdated) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParsePayeesUpdated(log types.Log) (*IKeeperRegistryMasterPayeesUpdated, error) { + event := new(IKeeperRegistryMasterPayeesUpdated) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterPayeeshipTransferRequestedIterator struct { + Event *IKeeperRegistryMasterPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IKeeperRegistryMasterPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterPayeeshipTransferRequestedIterator{contract: _IKeeperRegistryMaster.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterPayeeshipTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParsePayeeshipTransferRequested(log types.Log) (*IKeeperRegistryMasterPayeeshipTransferRequested, error) { + event := new(IKeeperRegistryMasterPayeeshipTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterPayeeshipTransferredIterator struct { + Event *IKeeperRegistryMasterPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IKeeperRegistryMasterPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterPayeeshipTransferredIterator{contract: _IKeeperRegistryMaster.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterPayeeshipTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParsePayeeshipTransferred(log types.Log) (*IKeeperRegistryMasterPayeeshipTransferred, error) { + event := new(IKeeperRegistryMasterPayeeshipTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterPaymentWithdrawnIterator struct { + Event *IKeeperRegistryMasterPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*IKeeperRegistryMasterPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterPaymentWithdrawnIterator{contract: _IKeeperRegistryMaster.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterPaymentWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParsePaymentWithdrawn(log types.Log) (*IKeeperRegistryMasterPaymentWithdrawn, error) { + event := new(IKeeperRegistryMasterPaymentWithdrawn) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterReorgedUpkeepReportIterator struct { + Event *IKeeperRegistryMasterReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterReorgedUpkeepReportIterator{contract: _IKeeperRegistryMaster.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterReorgedUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseReorgedUpkeepReport(log types.Log) (*IKeeperRegistryMasterReorgedUpkeepReport, error) { + event := new(IKeeperRegistryMasterReorgedUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterStaleUpkeepReportIterator struct { + Event *IKeeperRegistryMasterStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterStaleUpkeepReportIterator{contract: _IKeeperRegistryMaster.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterStaleUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseStaleUpkeepReport(log types.Log) (*IKeeperRegistryMasterStaleUpkeepReport, error) { + event := new(IKeeperRegistryMasterStaleUpkeepReport) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterTransmittedIterator struct { + Event *IKeeperRegistryMasterTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterTransmittedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterTransmitted(opts *bind.FilterOpts) (*IKeeperRegistryMasterTransmittedIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterTransmittedIterator{contract: _IKeeperRegistryMaster.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterTransmitted) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterTransmitted) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseTransmitted(log types.Log) (*IKeeperRegistryMasterTransmitted, error) { + event := new(IKeeperRegistryMasterTransmitted) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUnpausedIterator struct { + Event *IKeeperRegistryMasterUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUnpausedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUnpaused(opts *bind.FilterOpts) (*IKeeperRegistryMasterUnpausedIterator, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUnpausedIterator{contract: _IKeeperRegistryMaster.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUnpaused) (event.Subscription, error) { + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUnpaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUnpaused(log types.Log) (*IKeeperRegistryMasterUnpaused, error) { + event := new(IKeeperRegistryMasterUnpaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator struct { + Event *IKeeperRegistryMasterUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepAdminTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*IKeeperRegistryMasterUpkeepAdminTransferRequested, error) { + event := new(IKeeperRegistryMasterUpkeepAdminTransferRequested) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepAdminTransferredIterator struct { + Event *IKeeperRegistryMasterUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IKeeperRegistryMasterUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepAdminTransferredIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepAdminTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepAdminTransferred(log types.Log) (*IKeeperRegistryMasterUpkeepAdminTransferred, error) { + event := new(IKeeperRegistryMasterUpkeepAdminTransferred) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepCanceledIterator struct { + Event *IKeeperRegistryMasterUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*IKeeperRegistryMasterUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepCanceledIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepCanceled) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepCanceled(log types.Log) (*IKeeperRegistryMasterUpkeepCanceled, error) { + event := new(IKeeperRegistryMasterUpkeepCanceled) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepCheckDataSetIterator struct { + Event *IKeeperRegistryMasterUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepCheckDataSetIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepCheckDataSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepCheckDataSet(log types.Log) (*IKeeperRegistryMasterUpkeepCheckDataSet, error) { + event := new(IKeeperRegistryMasterUpkeepCheckDataSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepGasLimitSetIterator struct { + Event *IKeeperRegistryMasterUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepGasLimitSetIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepGasLimitSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepGasLimitSet(log types.Log) (*IKeeperRegistryMasterUpkeepGasLimitSet, error) { + event := new(IKeeperRegistryMasterUpkeepGasLimitSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepMigratedIterator struct { + Event *IKeeperRegistryMasterUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepMigratedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepMigrated) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepMigrated(log types.Log) (*IKeeperRegistryMasterUpkeepMigrated, error) { + event := new(IKeeperRegistryMasterUpkeepMigrated) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepOffchainConfigSetIterator struct { + Event *IKeeperRegistryMasterUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepOffchainConfigSetIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepOffchainConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepOffchainConfigSet, error) { + event := new(IKeeperRegistryMasterUpkeepOffchainConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepPausedIterator struct { + Event *IKeeperRegistryMasterUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepPausedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepPaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepPaused(log types.Log) (*IKeeperRegistryMasterUpkeepPaused, error) { + event := new(IKeeperRegistryMasterUpkeepPaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepPerformedIterator struct { + Event *IKeeperRegistryMasterUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*IKeeperRegistryMasterUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepPerformedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepPerformed) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepPerformed(log types.Log) (*IKeeperRegistryMasterUpkeepPerformed, error) { + event := new(IKeeperRegistryMasterUpkeepPerformed) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator struct { + Event *IKeeperRegistryMasterUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepPrivilegeConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepPrivilegeConfigSet, error) { + event := new(IKeeperRegistryMasterUpkeepPrivilegeConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepReceivedIterator struct { + Event *IKeeperRegistryMasterUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepReceivedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepReceived) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepReceived(log types.Log) (*IKeeperRegistryMasterUpkeepReceived, error) { + event := new(IKeeperRegistryMasterUpkeepReceived) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepRegisteredIterator struct { + Event *IKeeperRegistryMasterUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepRegisteredIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepRegistered) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepRegistered(log types.Log) (*IKeeperRegistryMasterUpkeepRegistered, error) { + event := new(IKeeperRegistryMasterUpkeepRegistered) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepTriggerConfigSetIterator struct { + Event *IKeeperRegistryMasterUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepTriggerConfigSetIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepTriggerConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepTriggerConfigSet, error) { + event := new(IKeeperRegistryMasterUpkeepTriggerConfigSet) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IKeeperRegistryMasterUpkeepUnpausedIterator struct { + Event *IKeeperRegistryMasterUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IKeeperRegistryMasterUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IKeeperRegistryMasterUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IKeeperRegistryMasterUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *IKeeperRegistryMasterUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IKeeperRegistryMasterUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &IKeeperRegistryMasterUpkeepUnpausedIterator{contract: _IKeeperRegistryMaster.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _IKeeperRegistryMaster.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IKeeperRegistryMasterUpkeepUnpaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMasterFilterer) ParseUpkeepUnpaused(log types.Log) (*IKeeperRegistryMasterUpkeepUnpaused, error) { + event := new(IKeeperRegistryMasterUpkeepUnpaused) + if err := _IKeeperRegistryMaster.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CheckCallback struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int +} +type CheckUpkeep struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int + GasLimit *big.Int + FastGasWei *big.Int + LinkNative *big.Int +} +type CheckUpkeep0 struct { + UpkeepNeeded bool + PerformData []byte + UpkeepFailureReason uint8 + GasUsed *big.Int + GasLimit *big.Int + FastGasWei *big.Int + LinkNative *big.Int +} +type GetSignerInfo struct { + Active bool + Index uint8 +} +type GetState struct { + State KeeperRegistryBase21State + Config KeeperRegistryBase21OnchainConfig + Signers []common.Address + Transmitters []common.Address + F uint8 +} +type GetTransmitterInfo struct { + Active bool + Index uint8 + Balance *big.Int + LastCollected *big.Int + Payee common.Address +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} +type SimulatePerformUpkeep struct { + Success bool + GasUsed *big.Int +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMaster) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _IKeeperRegistryMaster.abi.Events["AdminPrivilegeConfigSet"].ID: + return _IKeeperRegistryMaster.ParseAdminPrivilegeConfigSet(log) + case _IKeeperRegistryMaster.abi.Events["CancelledUpkeepReport"].ID: + return _IKeeperRegistryMaster.ParseCancelledUpkeepReport(log) + case _IKeeperRegistryMaster.abi.Events["ConfigSet"].ID: + return _IKeeperRegistryMaster.ParseConfigSet(log) + case _IKeeperRegistryMaster.abi.Events["DedupKeyAdded"].ID: + return _IKeeperRegistryMaster.ParseDedupKeyAdded(log) + case _IKeeperRegistryMaster.abi.Events["FundsAdded"].ID: + return _IKeeperRegistryMaster.ParseFundsAdded(log) + case _IKeeperRegistryMaster.abi.Events["FundsWithdrawn"].ID: + return _IKeeperRegistryMaster.ParseFundsWithdrawn(log) + case _IKeeperRegistryMaster.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _IKeeperRegistryMaster.ParseInsufficientFundsUpkeepReport(log) + case _IKeeperRegistryMaster.abi.Events["OwnerFundsWithdrawn"].ID: + return _IKeeperRegistryMaster.ParseOwnerFundsWithdrawn(log) + case _IKeeperRegistryMaster.abi.Events["OwnershipTransferRequested"].ID: + return _IKeeperRegistryMaster.ParseOwnershipTransferRequested(log) + case _IKeeperRegistryMaster.abi.Events["OwnershipTransferred"].ID: + return _IKeeperRegistryMaster.ParseOwnershipTransferred(log) + case _IKeeperRegistryMaster.abi.Events["Paused"].ID: + return _IKeeperRegistryMaster.ParsePaused(log) + case _IKeeperRegistryMaster.abi.Events["PayeesUpdated"].ID: + return _IKeeperRegistryMaster.ParsePayeesUpdated(log) + case _IKeeperRegistryMaster.abi.Events["PayeeshipTransferRequested"].ID: + return _IKeeperRegistryMaster.ParsePayeeshipTransferRequested(log) + case _IKeeperRegistryMaster.abi.Events["PayeeshipTransferred"].ID: + return _IKeeperRegistryMaster.ParsePayeeshipTransferred(log) + case _IKeeperRegistryMaster.abi.Events["PaymentWithdrawn"].ID: + return _IKeeperRegistryMaster.ParsePaymentWithdrawn(log) + case _IKeeperRegistryMaster.abi.Events["ReorgedUpkeepReport"].ID: + return _IKeeperRegistryMaster.ParseReorgedUpkeepReport(log) + case _IKeeperRegistryMaster.abi.Events["StaleUpkeepReport"].ID: + return _IKeeperRegistryMaster.ParseStaleUpkeepReport(log) + case _IKeeperRegistryMaster.abi.Events["Transmitted"].ID: + return _IKeeperRegistryMaster.ParseTransmitted(log) + case _IKeeperRegistryMaster.abi.Events["Unpaused"].ID: + return _IKeeperRegistryMaster.ParseUnpaused(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepAdminTransferRequested"].ID: + return _IKeeperRegistryMaster.ParseUpkeepAdminTransferRequested(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepAdminTransferred"].ID: + return _IKeeperRegistryMaster.ParseUpkeepAdminTransferred(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepCanceled"].ID: + return _IKeeperRegistryMaster.ParseUpkeepCanceled(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepCheckDataSet"].ID: + return _IKeeperRegistryMaster.ParseUpkeepCheckDataSet(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepGasLimitSet"].ID: + return _IKeeperRegistryMaster.ParseUpkeepGasLimitSet(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepMigrated"].ID: + return _IKeeperRegistryMaster.ParseUpkeepMigrated(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepOffchainConfigSet"].ID: + return _IKeeperRegistryMaster.ParseUpkeepOffchainConfigSet(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepPaused"].ID: + return _IKeeperRegistryMaster.ParseUpkeepPaused(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepPerformed"].ID: + return _IKeeperRegistryMaster.ParseUpkeepPerformed(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _IKeeperRegistryMaster.ParseUpkeepPrivilegeConfigSet(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepReceived"].ID: + return _IKeeperRegistryMaster.ParseUpkeepReceived(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepRegistered"].ID: + return _IKeeperRegistryMaster.ParseUpkeepRegistered(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepTriggerConfigSet"].ID: + return _IKeeperRegistryMaster.ParseUpkeepTriggerConfigSet(log) + case _IKeeperRegistryMaster.abi.Events["UpkeepUnpaused"].ID: + return _IKeeperRegistryMaster.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (IKeeperRegistryMasterAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (IKeeperRegistryMasterCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (IKeeperRegistryMasterConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (IKeeperRegistryMasterDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (IKeeperRegistryMasterFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (IKeeperRegistryMasterFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (IKeeperRegistryMasterInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (IKeeperRegistryMasterOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (IKeeperRegistryMasterOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (IKeeperRegistryMasterOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (IKeeperRegistryMasterPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (IKeeperRegistryMasterPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (IKeeperRegistryMasterPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (IKeeperRegistryMasterPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (IKeeperRegistryMasterPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (IKeeperRegistryMasterReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (IKeeperRegistryMasterStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (IKeeperRegistryMasterTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (IKeeperRegistryMasterUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (IKeeperRegistryMasterUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (IKeeperRegistryMasterUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (IKeeperRegistryMasterUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (IKeeperRegistryMasterUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (IKeeperRegistryMasterUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (IKeeperRegistryMasterUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (IKeeperRegistryMasterUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (IKeeperRegistryMasterUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (IKeeperRegistryMasterUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (IKeeperRegistryMasterUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (IKeeperRegistryMasterUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (IKeeperRegistryMasterUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (IKeeperRegistryMasterUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (IKeeperRegistryMasterUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_IKeeperRegistryMaster *IKeeperRegistryMaster) Address() common.Address { + return _IKeeperRegistryMaster.address +} + +type IKeeperRegistryMasterInterface interface { + CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (CheckCallback, + + error) + + CheckUpkeep(opts *bind.CallOpts, id *big.Int, triggerData []byte) (CheckUpkeep, + + error) + + CheckUpkeep0(opts *bind.CallOpts, id *big.Int) (CheckUpkeep0, + + error) + + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) + + GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) + + GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) + + GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMode(opts *bind.CallOpts) (uint8, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SimulatePerformUpkeep(opts *bind.CallOpts, id *big.Int, performData []byte) (SimulatePerformUpkeep, + + error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + UpkeepVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) + + RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*IKeeperRegistryMasterAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*IKeeperRegistryMasterAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*IKeeperRegistryMasterCancelledUpkeepReport, error) + + FilterConfigSet(opts *bind.FilterOpts) (*IKeeperRegistryMasterConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*IKeeperRegistryMasterConfigSet, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*IKeeperRegistryMasterDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*IKeeperRegistryMasterDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*IKeeperRegistryMasterFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*IKeeperRegistryMasterFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*IKeeperRegistryMasterFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*IKeeperRegistryMasterInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*IKeeperRegistryMasterOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*IKeeperRegistryMasterOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IKeeperRegistryMasterOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*IKeeperRegistryMasterOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*IKeeperRegistryMasterOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*IKeeperRegistryMasterOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*IKeeperRegistryMasterPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*IKeeperRegistryMasterPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*IKeeperRegistryMasterPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*IKeeperRegistryMasterPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IKeeperRegistryMasterPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*IKeeperRegistryMasterPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*IKeeperRegistryMasterPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*IKeeperRegistryMasterPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*IKeeperRegistryMasterPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*IKeeperRegistryMasterPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*IKeeperRegistryMasterReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*IKeeperRegistryMasterStaleUpkeepReport, error) + + FilterTransmitted(opts *bind.FilterOpts) (*IKeeperRegistryMasterTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*IKeeperRegistryMasterTransmitted, error) + + FilterUnpaused(opts *bind.FilterOpts) (*IKeeperRegistryMasterUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*IKeeperRegistryMasterUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IKeeperRegistryMasterUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*IKeeperRegistryMasterUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*IKeeperRegistryMasterUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*IKeeperRegistryMasterUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*IKeeperRegistryMasterUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*IKeeperRegistryMasterUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*IKeeperRegistryMasterUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*IKeeperRegistryMasterUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*IKeeperRegistryMasterUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*IKeeperRegistryMasterUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*IKeeperRegistryMasterUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*IKeeperRegistryMasterUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*IKeeperRegistryMasterUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*IKeeperRegistryMasterUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*IKeeperRegistryMasterUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*IKeeperRegistryMasterUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *IKeeperRegistryMasterUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*IKeeperRegistryMasterUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/i_log_automation/i_log_automation.go b/core/gethwrappers/generated/i_log_automation/i_log_automation.go new file mode 100644 index 00000000..e6345b74 --- /dev/null +++ b/core/gethwrappers/generated/i_log_automation/i_log_automation.go @@ -0,0 +1,198 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package i_log_automation + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +var ILogAutomationMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"log\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkLog\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var ILogAutomationABI = ILogAutomationMetaData.ABI + +type ILogAutomation struct { + address common.Address + abi abi.ABI + ILogAutomationCaller + ILogAutomationTransactor + ILogAutomationFilterer +} + +type ILogAutomationCaller struct { + contract *bind.BoundContract +} + +type ILogAutomationTransactor struct { + contract *bind.BoundContract +} + +type ILogAutomationFilterer struct { + contract *bind.BoundContract +} + +type ILogAutomationSession struct { + Contract *ILogAutomation + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ILogAutomationCallerSession struct { + Contract *ILogAutomationCaller + CallOpts bind.CallOpts +} + +type ILogAutomationTransactorSession struct { + Contract *ILogAutomationTransactor + TransactOpts bind.TransactOpts +} + +type ILogAutomationRaw struct { + Contract *ILogAutomation +} + +type ILogAutomationCallerRaw struct { + Contract *ILogAutomationCaller +} + +type ILogAutomationTransactorRaw struct { + Contract *ILogAutomationTransactor +} + +func NewILogAutomation(address common.Address, backend bind.ContractBackend) (*ILogAutomation, error) { + abi, err := abi.JSON(strings.NewReader(ILogAutomationABI)) + if err != nil { + return nil, err + } + contract, err := bindILogAutomation(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ILogAutomation{address: address, abi: abi, ILogAutomationCaller: ILogAutomationCaller{contract: contract}, ILogAutomationTransactor: ILogAutomationTransactor{contract: contract}, ILogAutomationFilterer: ILogAutomationFilterer{contract: contract}}, nil +} + +func NewILogAutomationCaller(address common.Address, caller bind.ContractCaller) (*ILogAutomationCaller, error) { + contract, err := bindILogAutomation(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ILogAutomationCaller{contract: contract}, nil +} + +func NewILogAutomationTransactor(address common.Address, transactor bind.ContractTransactor) (*ILogAutomationTransactor, error) { + contract, err := bindILogAutomation(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ILogAutomationTransactor{contract: contract}, nil +} + +func NewILogAutomationFilterer(address common.Address, filterer bind.ContractFilterer) (*ILogAutomationFilterer, error) { + contract, err := bindILogAutomation(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ILogAutomationFilterer{contract: contract}, nil +} + +func bindILogAutomation(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ILogAutomationMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ILogAutomation *ILogAutomationRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ILogAutomation.Contract.ILogAutomationCaller.contract.Call(opts, result, method, params...) +} + +func (_ILogAutomation *ILogAutomationRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ILogAutomation.Contract.ILogAutomationTransactor.contract.Transfer(opts) +} + +func (_ILogAutomation *ILogAutomationRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ILogAutomation.Contract.ILogAutomationTransactor.contract.Transact(opts, method, params...) +} + +func (_ILogAutomation *ILogAutomationCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ILogAutomation.Contract.contract.Call(opts, result, method, params...) +} + +func (_ILogAutomation *ILogAutomationTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ILogAutomation.Contract.contract.Transfer(opts) +} + +func (_ILogAutomation *ILogAutomationTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ILogAutomation.Contract.contract.Transact(opts, method, params...) +} + +func (_ILogAutomation *ILogAutomationTransactor) CheckLog(opts *bind.TransactOpts, log Log, checkData []byte) (*types.Transaction, error) { + return _ILogAutomation.contract.Transact(opts, "checkLog", log, checkData) +} + +func (_ILogAutomation *ILogAutomationSession) CheckLog(log Log, checkData []byte) (*types.Transaction, error) { + return _ILogAutomation.Contract.CheckLog(&_ILogAutomation.TransactOpts, log, checkData) +} + +func (_ILogAutomation *ILogAutomationTransactorSession) CheckLog(log Log, checkData []byte) (*types.Transaction, error) { + return _ILogAutomation.Contract.CheckLog(&_ILogAutomation.TransactOpts, log, checkData) +} + +func (_ILogAutomation *ILogAutomationTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _ILogAutomation.contract.Transact(opts, "performUpkeep", performData) +} + +func (_ILogAutomation *ILogAutomationSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _ILogAutomation.Contract.PerformUpkeep(&_ILogAutomation.TransactOpts, performData) +} + +func (_ILogAutomation *ILogAutomationTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _ILogAutomation.Contract.PerformUpkeep(&_ILogAutomation.TransactOpts, performData) +} + +func (_ILogAutomation *ILogAutomation) Address() common.Address { + return _ILogAutomation.address +} + +type ILogAutomationInterface interface { + CheckLog(opts *bind.TransactOpts, log Log, checkData []byte) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_consumer_performance_wrapper/keeper_consumer_performance_wrapper.go b/core/gethwrappers/generated/keeper_consumer_performance_wrapper/keeper_consumer_performance_wrapper.go new file mode 100644 index 00000000..1f74dcbe --- /dev/null +++ b/core/gethwrappers/generated/keeper_consumer_performance_wrapper/keeper_consumer_performance_wrapper.go @@ -0,0 +1,658 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_consumer_performance_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperConsumerPerformanceMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_averageEligibilityCadence\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_checkGasToBurn\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_performGasToBurn\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"eligible\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialCall\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nextEligible\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"averageEligibilityCadence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkEligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"count\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCountPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialCall\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextEligible\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"performGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setCheckGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newTestRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_newAverageEligibilityCadence\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600080556000600155600060075534801561001e57600080fd5b5060405161070438038061070483398101604081905261003d91610054565b60029390935560039190915560045560055561008a565b6000806000806080858703121561006a57600080fd5b505082516020840151604085015160609095015191969095509092509050565b61066b806100996000396000f3fe608060405234801561001057600080fd5b50600436106101005760003560e01c80637145f11b11610097578063b30566b411610066578063b30566b4146101f6578063c228a98e146101ff578063d826f88f14610207578063e303666f1461021457600080fd5b80637145f11b146101985780637f407edf146101cb578063926f086e146101e4578063a9a4c57c146101ed57600080fd5b80634585e33b116100d35780634585e33b14610152578063523d9b8a146101655780636250a13a1461016e5780636e04ff0d1461017757600080fd5b806306661abd1461010557806313bda75b146101215780632555d2cf146101365780632ff3617d14610149575b600080fd5b61010e60075481565b6040519081526020015b60405180910390f35b61013461012f366004610430565b600455565b005b610134610144366004610430565b600555565b61010e60045481565b610134610160366004610449565b61021c565b61010e60015481565b61010e60025481565b61018a610185366004610449565b610342565b6040516101189291906104bb565b6101bb6101a6366004610430565b60066020526000908152604090205460ff1681565b6040519015158152602001610118565b6101346101d9366004610531565b600291909155600355565b61010e60005481565b61010e60035481565b61010e60055481565b6101bb6103b7565b6101346000808055600755565b60075461010e565b60005a9050600061022b6103c6565b60005460015460408051841515815232602082015290810192909252606082015243608082018190529192507fbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc09060a00160405180910390a18161028e57600080fd5b60005460000361029e5760008190555b6003546102ac906002610582565b6102b46103f2565b6102be91906105bf565b6102c890826105fa565b6102d39060016105fa565b600155600780549060006102e683610613565b91905055505b6005545a6102fa908561064b565b101561033b574340600090815260066020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556102ec565b5050505050565b6000606060005a905060005b6004545a61035c908461064b565b10156103865780801561037f5750434060009081526006602052604090205460ff165b905061034e565b61038e6103c6565b604080518315156020820152016040516020818303038152906040529350935050509250929050565b60006103c16103c6565b905090565b6000805415806103c157506002546000546103e1904361064b565b1080156103c1575050600154431190565b60006103ff60014361064b565b604080519140602083015230908201526060016040516020818303038152906040528051906020012060001c905090565b60006020828403121561044257600080fd5b5035919050565b6000806020838503121561045c57600080fd5b823567ffffffffffffffff8082111561047457600080fd5b818501915085601f83011261048857600080fd5b81358181111561049757600080fd5b8660208285010111156104a957600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b818110156104f1578581018301518582016060015282016104d5565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b6000806040838503121561054457600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156105ba576105ba610553565b500290565b6000826105f5577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b8082018082111561060d5761060d610553565b92915050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361064457610644610553565b5060010190565b8181038181111561060d5761060d61055356fea164736f6c6343000810000a", +} + +var KeeperConsumerPerformanceABI = KeeperConsumerPerformanceMetaData.ABI + +var KeeperConsumerPerformanceBin = KeeperConsumerPerformanceMetaData.Bin + +func DeployKeeperConsumerPerformance(auth *bind.TransactOpts, backend bind.ContractBackend, _testRange *big.Int, _averageEligibilityCadence *big.Int, _checkGasToBurn *big.Int, _performGasToBurn *big.Int) (common.Address, *types.Transaction, *KeeperConsumerPerformance, error) { + parsed, err := KeeperConsumerPerformanceMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperConsumerPerformanceBin), backend, _testRange, _averageEligibilityCadence, _checkGasToBurn, _performGasToBurn) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperConsumerPerformance{address: address, abi: *parsed, KeeperConsumerPerformanceCaller: KeeperConsumerPerformanceCaller{contract: contract}, KeeperConsumerPerformanceTransactor: KeeperConsumerPerformanceTransactor{contract: contract}, KeeperConsumerPerformanceFilterer: KeeperConsumerPerformanceFilterer{contract: contract}}, nil +} + +type KeeperConsumerPerformance struct { + address common.Address + abi abi.ABI + KeeperConsumerPerformanceCaller + KeeperConsumerPerformanceTransactor + KeeperConsumerPerformanceFilterer +} + +type KeeperConsumerPerformanceCaller struct { + contract *bind.BoundContract +} + +type KeeperConsumerPerformanceTransactor struct { + contract *bind.BoundContract +} + +type KeeperConsumerPerformanceFilterer struct { + contract *bind.BoundContract +} + +type KeeperConsumerPerformanceSession struct { + Contract *KeeperConsumerPerformance + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperConsumerPerformanceCallerSession struct { + Contract *KeeperConsumerPerformanceCaller + CallOpts bind.CallOpts +} + +type KeeperConsumerPerformanceTransactorSession struct { + Contract *KeeperConsumerPerformanceTransactor + TransactOpts bind.TransactOpts +} + +type KeeperConsumerPerformanceRaw struct { + Contract *KeeperConsumerPerformance +} + +type KeeperConsumerPerformanceCallerRaw struct { + Contract *KeeperConsumerPerformanceCaller +} + +type KeeperConsumerPerformanceTransactorRaw struct { + Contract *KeeperConsumerPerformanceTransactor +} + +func NewKeeperConsumerPerformance(address common.Address, backend bind.ContractBackend) (*KeeperConsumerPerformance, error) { + abi, err := abi.JSON(strings.NewReader(KeeperConsumerPerformanceABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperConsumerPerformance(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperConsumerPerformance{address: address, abi: abi, KeeperConsumerPerformanceCaller: KeeperConsumerPerformanceCaller{contract: contract}, KeeperConsumerPerformanceTransactor: KeeperConsumerPerformanceTransactor{contract: contract}, KeeperConsumerPerformanceFilterer: KeeperConsumerPerformanceFilterer{contract: contract}}, nil +} + +func NewKeeperConsumerPerformanceCaller(address common.Address, caller bind.ContractCaller) (*KeeperConsumerPerformanceCaller, error) { + contract, err := bindKeeperConsumerPerformance(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperConsumerPerformanceCaller{contract: contract}, nil +} + +func NewKeeperConsumerPerformanceTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperConsumerPerformanceTransactor, error) { + contract, err := bindKeeperConsumerPerformance(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperConsumerPerformanceTransactor{contract: contract}, nil +} + +func NewKeeperConsumerPerformanceFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperConsumerPerformanceFilterer, error) { + contract, err := bindKeeperConsumerPerformance(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperConsumerPerformanceFilterer{contract: contract}, nil +} + +func bindKeeperConsumerPerformance(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperConsumerPerformanceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperConsumerPerformance.Contract.KeeperConsumerPerformanceCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.KeeperConsumerPerformanceTransactor.contract.Transfer(opts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.KeeperConsumerPerformanceTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperConsumerPerformance.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.contract.Transfer(opts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) AverageEligibilityCadence(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "averageEligibilityCadence") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) AverageEligibilityCadence() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.AverageEligibilityCadence(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) AverageEligibilityCadence() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.AverageEligibilityCadence(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) CheckEligible(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "checkEligible") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) CheckEligible() (bool, error) { + return _KeeperConsumerPerformance.Contract.CheckEligible(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) CheckEligible() (bool, error) { + return _KeeperConsumerPerformance.Contract.CheckEligible(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "checkGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) CheckGasToBurn() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.CheckGasToBurn(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) CheckGasToBurn() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.CheckGasToBurn(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "checkUpkeep", data) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _KeeperConsumerPerformance.Contract.CheckUpkeep(&_KeeperConsumerPerformance.CallOpts, data) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _KeeperConsumerPerformance.Contract.CheckUpkeep(&_KeeperConsumerPerformance.CallOpts, data) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) Count(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "count") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) Count() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.Count(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) Count() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.Count(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) DummyMap(arg0 [32]byte) (bool, error) { + return _KeeperConsumerPerformance.Contract.DummyMap(&_KeeperConsumerPerformance.CallOpts, arg0) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _KeeperConsumerPerformance.Contract.DummyMap(&_KeeperConsumerPerformance.CallOpts, arg0) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "getCountPerforms") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) GetCountPerforms() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.GetCountPerforms(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) GetCountPerforms() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.GetCountPerforms(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) InitialCall(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "initialCall") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) InitialCall() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.InitialCall(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) InitialCall() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.InitialCall(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) NextEligible(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "nextEligible") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) NextEligible() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.NextEligible(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) NextEligible() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.NextEligible(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "performGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) PerformGasToBurn() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.PerformGasToBurn(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) PerformGasToBurn() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.PerformGasToBurn(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumerPerformance.contract.Call(opts, &out, "testRange") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) TestRange() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.TestRange(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceCallerSession) TestRange() (*big.Int, error) { + return _KeeperConsumerPerformance.Contract.TestRange(&_KeeperConsumerPerformance.CallOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactor) PerformUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) { + return _KeeperConsumerPerformance.contract.Transact(opts, "performUpkeep", data) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) PerformUpkeep(data []byte) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.PerformUpkeep(&_KeeperConsumerPerformance.TransactOpts, data) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorSession) PerformUpkeep(data []byte) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.PerformUpkeep(&_KeeperConsumerPerformance.TransactOpts, data) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperConsumerPerformance.contract.Transact(opts, "reset") +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) Reset() (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.Reset(&_KeeperConsumerPerformance.TransactOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorSession) Reset() (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.Reset(&_KeeperConsumerPerformance.TransactOpts) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactor) SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.contract.Transact(opts, "setCheckGasToBurn", value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetCheckGasToBurn(&_KeeperConsumerPerformance.TransactOpts, value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetCheckGasToBurn(&_KeeperConsumerPerformance.TransactOpts, value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactor) SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.contract.Transact(opts, "setPerformGasToBurn", value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetPerformGasToBurn(&_KeeperConsumerPerformance.TransactOpts, value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetPerformGasToBurn(&_KeeperConsumerPerformance.TransactOpts, value) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactor) SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.contract.Transact(opts, "setSpread", _newTestRange, _newAverageEligibilityCadence) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceSession) SetSpread(_newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetSpread(&_KeeperConsumerPerformance.TransactOpts, _newTestRange, _newAverageEligibilityCadence) +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceTransactorSession) SetSpread(_newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _KeeperConsumerPerformance.Contract.SetSpread(&_KeeperConsumerPerformance.TransactOpts, _newTestRange, _newAverageEligibilityCadence) +} + +type KeeperConsumerPerformancePerformingUpkeepIterator struct { + Event *KeeperConsumerPerformancePerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperConsumerPerformancePerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperConsumerPerformancePerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperConsumerPerformancePerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperConsumerPerformancePerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *KeeperConsumerPerformancePerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperConsumerPerformancePerformingUpkeep struct { + Eligible bool + From common.Address + InitialCall *big.Int + NextEligible *big.Int + BlockNumber *big.Int + Raw types.Log +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts) (*KeeperConsumerPerformancePerformingUpkeepIterator, error) { + + logs, sub, err := _KeeperConsumerPerformance.contract.FilterLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return &KeeperConsumerPerformancePerformingUpkeepIterator{contract: _KeeperConsumerPerformance.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *KeeperConsumerPerformancePerformingUpkeep) (event.Subscription, error) { + + logs, sub, err := _KeeperConsumerPerformance.contract.WatchLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperConsumerPerformancePerformingUpkeep) + if err := _KeeperConsumerPerformance.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformanceFilterer) ParsePerformingUpkeep(log types.Log) (*KeeperConsumerPerformancePerformingUpkeep, error) { + event := new(KeeperConsumerPerformancePerformingUpkeep) + if err := _KeeperConsumerPerformance.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformance) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperConsumerPerformance.abi.Events["PerformingUpkeep"].ID: + return _KeeperConsumerPerformance.ParsePerformingUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperConsumerPerformancePerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0xbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc0") +} + +func (_KeeperConsumerPerformance *KeeperConsumerPerformance) Address() common.Address { + return _KeeperConsumerPerformance.address +} + +type KeeperConsumerPerformanceInterface interface { + AverageEligibilityCadence(opts *bind.CallOpts) (*big.Int, error) + + CheckEligible(opts *bind.CallOpts) (bool, error) + + CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) + + CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) + + Count(opts *bind.CallOpts) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) + + InitialCall(opts *bind.CallOpts) (*big.Int, error) + + NextEligible(opts *bind.CallOpts) (*big.Int, error) + + PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) + + TestRange(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + + SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + + SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts) (*KeeperConsumerPerformancePerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *KeeperConsumerPerformancePerformingUpkeep) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*KeeperConsumerPerformancePerformingUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go b/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go new file mode 100644 index 00000000..feb614aa --- /dev/null +++ b/core/gethwrappers/generated/keeper_consumer_wrapper/keeper_consumer_wrapper.go @@ -0,0 +1,303 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_consumer_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"updateInterval\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"interval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastTimeStamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b5060405161033838038061033883398101604081905261002f9161003f565b6080524260015560008055610058565b60006020828403121561005157600080fd5b5051919050565b6080516102c6610072600039600060cc01526102c66000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c806361bc221a1161005057806361bc221a1461009d5780636e04ff0d146100a6578063947a36fb146100c757600080fd5b80633f3b3b271461006c5780634585e33b14610088575b600080fd5b61007560015481565b6040519081526020015b60405180910390f35b61009b610096366004610191565b6100ee565b005b61007560005481565b6100b96100b4366004610191565b610103565b60405161007f929190610203565b6100757f000000000000000000000000000000000000000000000000000000000000000081565b6000546100fc906001610279565b6000555050565b6000606061010f610157565b6001848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959a92995091975050505050505050565b321561018f576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b600080602083850312156101a457600080fd5b823567ffffffffffffffff808211156101bc57600080fd5b818501915085601f8301126101d057600080fd5b8135818111156101df57600080fd5b8660208285010111156101f157600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b818110156102395785810183015185820160600152820161021d565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b808201808211156102b3577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b9291505056fea164736f6c6343000810000a", +} + +var KeeperConsumerABI = KeeperConsumerMetaData.ABI + +var KeeperConsumerBin = KeeperConsumerMetaData.Bin + +func DeployKeeperConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, updateInterval *big.Int) (common.Address, *types.Transaction, *KeeperConsumer, error) { + parsed, err := KeeperConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperConsumerBin), backend, updateInterval) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperConsumer{address: address, abi: *parsed, KeeperConsumerCaller: KeeperConsumerCaller{contract: contract}, KeeperConsumerTransactor: KeeperConsumerTransactor{contract: contract}, KeeperConsumerFilterer: KeeperConsumerFilterer{contract: contract}}, nil +} + +type KeeperConsumer struct { + address common.Address + abi abi.ABI + KeeperConsumerCaller + KeeperConsumerTransactor + KeeperConsumerFilterer +} + +type KeeperConsumerCaller struct { + contract *bind.BoundContract +} + +type KeeperConsumerTransactor struct { + contract *bind.BoundContract +} + +type KeeperConsumerFilterer struct { + contract *bind.BoundContract +} + +type KeeperConsumerSession struct { + Contract *KeeperConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperConsumerCallerSession struct { + Contract *KeeperConsumerCaller + CallOpts bind.CallOpts +} + +type KeeperConsumerTransactorSession struct { + Contract *KeeperConsumerTransactor + TransactOpts bind.TransactOpts +} + +type KeeperConsumerRaw struct { + Contract *KeeperConsumer +} + +type KeeperConsumerCallerRaw struct { + Contract *KeeperConsumerCaller +} + +type KeeperConsumerTransactorRaw struct { + Contract *KeeperConsumerTransactor +} + +func NewKeeperConsumer(address common.Address, backend bind.ContractBackend) (*KeeperConsumer, error) { + abi, err := abi.JSON(strings.NewReader(KeeperConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperConsumer{address: address, abi: abi, KeeperConsumerCaller: KeeperConsumerCaller{contract: contract}, KeeperConsumerTransactor: KeeperConsumerTransactor{contract: contract}, KeeperConsumerFilterer: KeeperConsumerFilterer{contract: contract}}, nil +} + +func NewKeeperConsumerCaller(address common.Address, caller bind.ContractCaller) (*KeeperConsumerCaller, error) { + contract, err := bindKeeperConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperConsumerCaller{contract: contract}, nil +} + +func NewKeeperConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperConsumerTransactor, error) { + contract, err := bindKeeperConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperConsumerTransactor{contract: contract}, nil +} + +func NewKeeperConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperConsumerFilterer, error) { + contract, err := bindKeeperConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperConsumerFilterer{contract: contract}, nil +} + +func bindKeeperConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperConsumer *KeeperConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperConsumer.Contract.KeeperConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperConsumer *KeeperConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperConsumer.Contract.KeeperConsumerTransactor.contract.Transfer(opts) +} + +func (_KeeperConsumer *KeeperConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperConsumer.Contract.KeeperConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperConsumer *KeeperConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperConsumer *KeeperConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperConsumer.Contract.contract.Transfer(opts) +} + +func (_KeeperConsumer *KeeperConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperConsumer *KeeperConsumerCaller) CheckUpkeep(opts *bind.CallOpts, checkData []byte) (CheckUpkeep, + + error) { + var out []interface{} + err := _KeeperConsumer.contract.Call(opts, &out, "checkUpkeep", checkData) + + outstruct := new(CheckUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return *outstruct, err + +} + +func (_KeeperConsumer *KeeperConsumerSession) CheckUpkeep(checkData []byte) (CheckUpkeep, + + error) { + return _KeeperConsumer.Contract.CheckUpkeep(&_KeeperConsumer.CallOpts, checkData) +} + +func (_KeeperConsumer *KeeperConsumerCallerSession) CheckUpkeep(checkData []byte) (CheckUpkeep, + + error) { + return _KeeperConsumer.Contract.CheckUpkeep(&_KeeperConsumer.CallOpts, checkData) +} + +func (_KeeperConsumer *KeeperConsumerCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumer.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumer *KeeperConsumerSession) Counter() (*big.Int, error) { + return _KeeperConsumer.Contract.Counter(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerCallerSession) Counter() (*big.Int, error) { + return _KeeperConsumer.Contract.Counter(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerCaller) Interval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumer.contract.Call(opts, &out, "interval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumer *KeeperConsumerSession) Interval() (*big.Int, error) { + return _KeeperConsumer.Contract.Interval(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerCallerSession) Interval() (*big.Int, error) { + return _KeeperConsumer.Contract.Interval(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerCaller) LastTimeStamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperConsumer.contract.Call(opts, &out, "lastTimeStamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperConsumer *KeeperConsumerSession) LastTimeStamp() (*big.Int, error) { + return _KeeperConsumer.Contract.LastTimeStamp(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerCallerSession) LastTimeStamp() (*big.Int, error) { + return _KeeperConsumer.Contract.LastTimeStamp(&_KeeperConsumer.CallOpts) +} + +func (_KeeperConsumer *KeeperConsumerTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _KeeperConsumer.contract.Transact(opts, "performUpkeep", performData) +} + +func (_KeeperConsumer *KeeperConsumerSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _KeeperConsumer.Contract.PerformUpkeep(&_KeeperConsumer.TransactOpts, performData) +} + +func (_KeeperConsumer *KeeperConsumerTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _KeeperConsumer.Contract.PerformUpkeep(&_KeeperConsumer.TransactOpts, performData) +} + +type CheckUpkeep struct { + UpkeepNeeded bool + PerformData []byte +} + +func (_KeeperConsumer *KeeperConsumer) Address() common.Address { + return _KeeperConsumer.address +} + +type KeeperConsumerInterface interface { + CheckUpkeep(opts *bind.CallOpts, checkData []byte) (CheckUpkeep, + + error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + Interval(opts *bind.CallOpts) (*big.Int, error) + + LastTimeStamp(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registrar_wrapper1_2/keeper_registrar_wrapper1_2.go b/core/gethwrappers/generated/keeper_registrar_wrapper1_2/keeper_registrar_wrapper1_2.go new file mode 100644 index 00000000..f9f3c53c --- /dev/null +++ b/core/gethwrappers/generated/keeper_registrar_wrapper1_2/keeper_registrar_wrapper1_2.go @@ -0,0 +1,1493 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registrar_wrapper1_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistrarMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"PLIAddress\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistrar.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint16\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AmountMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FunctionNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"HashMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientPayment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAdminAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"LinkTransferFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyAdminOrOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistrationRequestFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"AutoApproveAllowedSenderSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"enumKeeperRegistrar.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"ConfigChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"RegistrationApproved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"RegistrationRejected\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"indexed\":true,\"internalType\":\"uint8\",\"name\":\"source\",\"type\":\"uint8\"}],\"name\":\"RegistrationRequested\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"approve\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"cancel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"}],\"name\":\"getAutoApproveAllowedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"getPendingRequest\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrationConfig\",\"outputs\":[{\"internalType\":\"enumKeeperRegistrar.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"approvedCount\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"minPLIJuels\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"uint8\",\"name\":\"source\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"setAutoApproveAllowedSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumKeeperRegistrar.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint16\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"setRegistrationConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b506040516200234638038062002346833981016040819052620000349162000394565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000ec565b5050506001600160601b0319606086901b16608052620000e18484848462000198565b50505050506200048d565b6001600160a01b038116331415620001475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001a262000319565b6003546040805160a081019091526501000000000090910463ffffffff169080866002811115620001d757620001d762000477565b815261ffff8616602082015263ffffffff831660408201526001600160a01b03851660608201526001600160601b038416608090910152805160038054909190829060ff1916600183600281111562000234576200023462000477565b0217905550602082015181546040808501516060860151610100600160481b031990931661010063ffffffff9586160263ffffffff60281b19161765010000000000949091169390930292909217600160481b600160e81b03191669010000000000000000006001600160a01b0390921691909102178255608090920151600190910180546001600160601b0319166001600160601b03909216919091179055517f6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd906200030a90879087908790879062000422565b60405180910390a15050505050565b6000546001600160a01b03163314620003755760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b80516001600160a01b03811681146200038f57600080fd5b919050565b600080600080600060a08688031215620003ad57600080fd5b620003b88662000377565b9450602086015160038110620003cd57600080fd5b604087015190945061ffff81168114620003e657600080fd5b9250620003f66060870162000377565b60808701519092506001600160601b03811681146200041457600080fd5b809150509295509295909350565b60808101600386106200044557634e487b7160e01b600052602160045260246000fd5b94815261ffff9390931660208401526001600160a01b039190911660408301526001600160601b031660609091015290565b634e487b7160e01b600052602160045260246000fd5b60805160601c611e7e620004c86000396000818161015b015281816104a601528181610a410152818161110b01526113500152611e7e6000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063850af0cb1161008c578063a4c0ed3611610066578063a4c0ed36146102fd578063a793ab8b14610310578063c4d252f514610323578063f2fde38b1461033657600080fd5b8063850af0cb1461021957806388b12d55146102325780638da5cb5b146102df57600080fd5b80633659d666116100c85780633659d666146101a2578063367b9b4f146101b557806379ba5097146101c85780637e776f7f146101d057600080fd5b8063181f5a77146100ef578063183310b3146101415780631b6b6d2314610156575b600080fd5b61012b6040518060400160405280601581526020017f4b656570657252656769737472617220312e312e30000000000000000000000081525081565b6040516101389190611cf1565b60405180910390f35b61015461014f3660046118fc565b610349565b005b61017d7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610138565b6101546101b03660046119a1565b61048e565b6101546101c33660046117d2565b6107b4565b610154610846565b6102096101de3660046117b0565b73ffffffffffffffffffffffffffffffffffffffff1660009081526005602052604090205460ff1690565b6040519015158152602001610138565b610221610948565b604051610138959493929190611ca1565b6102a6610240366004611880565b60009081526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff169290910182905291565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526bffffffffffffffffffffffff909116602083015201610138565b60005473ffffffffffffffffffffffffffffffffffffffff1661017d565b61015461030b366004611809565b610a29565b61015461031e366004611899565b610d7c565b610154610331366004611880565b610f91565b6101546103443660046117b0565b6111f2565b610351611206565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff16918301919091526103ea576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008787878787604051602001610405959493929190611bb3565b604051602081830303815290604052805190602001209050808314610456576040517f3f4d605300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600260209081526040822091909155820151610483908a908a908a908a908a908a908a611289565b505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146104fd576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff861661054a576040517f05bb467c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008888888888604051602001610565959493929190611bb3565b6040516020818303038152906040528051906020012090508260ff168973ffffffffffffffffffffffffffffffffffffffff16827fc3f5df4aefec026f610a3fcb08f19476492d69d2cb78b1c2eba259a8820e6a788f8f8f8e8e8e8e8e6040516105d6989796959493929190611d04565b60405180910390a46040805160a08101909152600380546000929190829060ff16600281111561060857610608611e05565b600281111561061957610619611e05565b8152815463ffffffff61010082048116602084015265010000000000820416604083015273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff16608090910152905061068c81846114b5565b156106f55760408101516106a1906001611d8b565b6003805463ffffffff9290921665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff9092169190911790556106f08d8b8b8b8b8b8b89611289565b6107a5565b6000828152600260205260408120546107359087907401000000000000000000000000000000000000000090046bffffffffffffffffffffffff16611db3565b60408051808201825273ffffffffffffffffffffffffffffffffffffffff808d1682526bffffffffffffffffffffffff9384166020808401918252600089815260029091529390932091519251909316740100000000000000000000000000000000000000000291909216179055505b50505050505050505050505050565b6107bc611206565b73ffffffffffffffffffffffffffffffffffffffff821660008181526005602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001685151590811790915591519182527f20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356910160405180910390a25050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146108cc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6040805160a081019091526003805460009283928392839283928392829060ff16600281111561097a5761097a611e05565b600281111561098b5761098b611e05565b81528154610100810463ffffffff908116602080850191909152650100000000008304909116604080850191909152690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060808501919091526001909401546bffffffffffffffffffffffff90811660809485015285519186015192860151948601519590930151909b919a50929850929650169350915050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610a98576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b81818080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505060208101517fffffffff0000000000000000000000000000000000000000000000000000000081167f3659d6660000000000000000000000000000000000000000000000000000000014610b4e576040517fe3d6792100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8484848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505060e4810151828114610bc3576040517f55e97b0d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8887878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505061012481015173ffffffffffffffffffffffffffffffffffffffff83811690821614610c52576040517ff8c5638e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610124891015610c8e576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6004546bffffffffffffffffffffffff168b1015610cd8576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60003073ffffffffffffffffffffffffffffffffffffffff168b8b604051610d01929190611ba3565b600060405180830381855af49150503d8060008114610d3c576040519150601f19603f3d011682016040523d82523d6000602084013e610d41565b606091505b50509050806107a5576040517f649bf81000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610d84611206565b6003546040805160a081019091526501000000000090910463ffffffff169080866002811115610db657610db6611e05565b815261ffff8616602082015263ffffffff8316604082015273ffffffffffffffffffffffffffffffffffffffff851660608201526bffffffffffffffffffffffff841660809091015280516003805490919082907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836002811115610e4057610e40611e05565b02179055506020820151815460408085015160608601517fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000ff90931661010063ffffffff958616027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff1617650100000000009490911693909302929092177fffffff0000000000000000000000000000000000000000ffffffffffffffffff16690100000000000000000073ffffffffffffffffffffffffffffffffffffffff90921691909102178255608090920151600190910180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff909216919091179055517f6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd90610f82908790879087908790611c50565b60405180910390a15050505050565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff1691830191909152331480611018575060005473ffffffffffffffffffffffffffffffffffffffff1633145b61104e576040517f61685c2b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff1661109c576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526002602090815260408083208390559083015190517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff909116602482015273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90604401602060405180830381600087803b15801561114f57600080fd5b505af1158015611163573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111879190611863565b9050806111c2576040517fc2e4dce80000000000000000000000000000000000000000000000000000000081523360048201526024016108c3565b60405183907f3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a2290600090a2505050565b6111fa611206565b6112038161155c565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314611287576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016108c3565b565b6003546040517fda5c6741000000000000000000000000000000000000000000000000000000008152690100000000000000000090910473ffffffffffffffffffffffffffffffffffffffff1690600090829063da5c6741906112f8908c908c908c908c908c90600401611bb3565b602060405180830381600087803b15801561131257600080fd5b505af1158015611326573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061134a9190611a9b565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea084878560405160200161139f91815260200190565b6040516020818303038152906040526040518463ffffffff1660e01b81526004016113cc93929190611c04565b602060405180830381600087803b1580156113e657600080fd5b505af11580156113fa573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061141e9190611863565b90508061146f576040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841660048201526024016108c3565b81847fb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b8d6040516114a09190611cf1565b60405180910390a35050505050505050505050565b600080835160028111156114cb576114cb611e05565b14156114d957506000611556565b6001835160028111156114ee576114ee611e05565b148015611521575073ffffffffffffffffffffffffffffffffffffffff821660009081526005602052604090205460ff16155b1561152e57506000611556565b826020015163ffffffff16836040015163ffffffff16101561155257506001611556565b5060005b92915050565b73ffffffffffffffffffffffffffffffffffffffff81163314156115dc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016108c3565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b803573ffffffffffffffffffffffffffffffffffffffff8116811461167657600080fd5b919050565b60008083601f84011261168d57600080fd5b50813567ffffffffffffffff8111156116a557600080fd5b6020830191508360208285010111156116bd57600080fd5b9250929050565b600082601f8301126116d557600080fd5b813567ffffffffffffffff808211156116f0576116f0611e34565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190828211818310171561173657611736611e34565b8160405283815286602085880101111561174f57600080fd5b836020870160208301376000602085830101528094505050505092915050565b803563ffffffff8116811461167657600080fd5b803560ff8116811461167657600080fd5b80356bffffffffffffffffffffffff8116811461167657600080fd5b6000602082840312156117c257600080fd5b6117cb82611652565b9392505050565b600080604083850312156117e557600080fd5b6117ee83611652565b915060208301356117fe81611e63565b809150509250929050565b6000806000806060858703121561181f57600080fd5b61182885611652565b935060208501359250604085013567ffffffffffffffff81111561184b57600080fd5b6118578782880161167b565b95989497509550505050565b60006020828403121561187557600080fd5b81516117cb81611e63565b60006020828403121561189257600080fd5b5035919050565b600080600080608085870312156118af57600080fd5b8435600381106118be57600080fd5b9350602085013561ffff811681146118d557600080fd5b92506118e360408601611652565b91506118f160608601611794565b905092959194509250565b600080600080600080600060c0888a03121561191757600080fd5b873567ffffffffffffffff8082111561192f57600080fd5b61193b8b838c016116c4565b985061194960208b01611652565b975061195760408b0161176f565b965061196560608b01611652565b955060808a013591508082111561197b57600080fd5b506119888a828b0161167b565b989b979a5095989497959660a090950135949350505050565b60008060008060008060008060008060006101208c8e0312156119c357600080fd5b67ffffffffffffffff808d3511156119da57600080fd5b6119e78e8e358f016116c4565b9b508060208e013511156119fa57600080fd5b611a0a8e60208f01358f0161167b565b909b509950611a1b60408e01611652565b9850611a2960608e0161176f565b9750611a3760808e01611652565b96508060a08e01351115611a4a57600080fd5b50611a5b8d60a08e01358e0161167b565b9095509350611a6c60c08d01611794565b9250611a7a60e08d01611783565b9150611a896101008d01611652565b90509295989b509295989b9093969950565b600060208284031215611aad57600080fd5b5051919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6000815180845260005b81811015611b2357602081850181015186830182015201611b07565b81811115611b35576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60038110611b9f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b8183823760009101908152919050565b600073ffffffffffffffffffffffffffffffffffffffff808816835263ffffffff8716602084015280861660408401525060806060830152611bf9608083018486611ab4565b979650505050505050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff83166020820152606060408201526000611c476060830184611afd565b95945050505050565b60808101611c5e8287611b68565b61ffff8516602083015273ffffffffffffffffffffffffffffffffffffffff841660408301526bffffffffffffffffffffffff8316606083015295945050505050565b60a08101611caf8288611b68565b63ffffffff808716602084015280861660408401525073ffffffffffffffffffffffffffffffffffffffff841660608301528260808301529695505050505050565b6020815260006117cb6020830184611afd565b60c081526000611d1760c083018b611afd565b8281036020840152611d2a818a8c611ab4565b905063ffffffff8816604084015273ffffffffffffffffffffffffffffffffffffffff871660608401528281036080840152611d67818688611ab4565b9150506bffffffffffffffffffffffff831660a08301529998505050505050505050565b600063ffffffff808316818516808303821115611daa57611daa611dd6565b01949350505050565b60006bffffffffffffffffffffffff808316818516808303821115611daa57611daa5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b801515811461120357600080fdfea164736f6c6343000806000a", +} + +var KeeperRegistrarABI = KeeperRegistrarMetaData.ABI + +var KeeperRegistrarBin = KeeperRegistrarMetaData.Bin + +func DeployKeeperRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, PLIAddress common.Address, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (common.Address, *types.Transaction, *KeeperRegistrar, error) { + parsed, err := KeeperRegistrarMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistrarBin), backend, PLIAddress, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistrar{address: address, abi: *parsed, KeeperRegistrarCaller: KeeperRegistrarCaller{contract: contract}, KeeperRegistrarTransactor: KeeperRegistrarTransactor{contract: contract}, KeeperRegistrarFilterer: KeeperRegistrarFilterer{contract: contract}}, nil +} + +type KeeperRegistrar struct { + address common.Address + abi abi.ABI + KeeperRegistrarCaller + KeeperRegistrarTransactor + KeeperRegistrarFilterer +} + +type KeeperRegistrarCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistrarTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistrarFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrarSession struct { + Contract *KeeperRegistrar + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarCallerSession struct { + Contract *KeeperRegistrarCaller + CallOpts bind.CallOpts +} + +type KeeperRegistrarTransactorSession struct { + Contract *KeeperRegistrarTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarRaw struct { + Contract *KeeperRegistrar +} + +type KeeperRegistrarCallerRaw struct { + Contract *KeeperRegistrarCaller +} + +type KeeperRegistrarTransactorRaw struct { + Contract *KeeperRegistrarTransactor +} + +func NewKeeperRegistrar(address common.Address, backend bind.ContractBackend) (*KeeperRegistrar, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistrarABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistrar(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistrar{address: address, abi: abi, KeeperRegistrarCaller: KeeperRegistrarCaller{contract: contract}, KeeperRegistrarTransactor: KeeperRegistrarTransactor{contract: contract}, KeeperRegistrarFilterer: KeeperRegistrarFilterer{contract: contract}}, nil +} + +func NewKeeperRegistrarCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistrarCaller, error) { + contract, err := bindKeeperRegistrar(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarCaller{contract: contract}, nil +} + +func NewKeeperRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistrarTransactor, error) { + contract, err := bindKeeperRegistrar(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarTransactor{contract: contract}, nil +} + +func NewKeeperRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistrarFilterer, error) { + contract, err := bindKeeperRegistrar(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistrarFilterer{contract: contract}, nil +} + +func bindKeeperRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistrarMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrar.Contract.KeeperRegistrarCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.KeeperRegistrarTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.KeeperRegistrarTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrar.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) PLI() (common.Address, error) { + return _KeeperRegistrar.Contract.PLI(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) PLI() (common.Address, error) { + return _KeeperRegistrar.Contract.PLI(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getAutoApproveAllowedSender", senderAddress) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _KeeperRegistrar.Contract.GetAutoApproveAllowedSender(&_KeeperRegistrar.CallOpts, senderAddress) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _KeeperRegistrar.Contract.GetAutoApproveAllowedSender(&_KeeperRegistrar.CallOpts, senderAddress) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getPendingRequest", hash) + + if err != nil { + return *new(common.Address), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _KeeperRegistrar.Contract.GetPendingRequest(&_KeeperRegistrar.CallOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _KeeperRegistrar.Contract.GetPendingRequest(&_KeeperRegistrar.CallOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getRegistrationConfig") + + outstruct := new(GetRegistrationConfig) + if err != nil { + return *outstruct, err + } + + outstruct.AutoApproveConfigType = *abi.ConvertType(out[0], new(uint8)).(*uint8) + outstruct.AutoApproveMaxAllowed = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ApprovedCount = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.KeeperRegistry = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.MinPLIJuels = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrar.Contract.GetRegistrationConfig(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrar.Contract.GetRegistrationConfig(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Owner() (common.Address, error) { + return _KeeperRegistrar.Contract.Owner(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) Owner() (common.Address, error) { + return _KeeperRegistrar.Contract.Owner(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) TypeAndVersion() (string, error) { + return _KeeperRegistrar.Contract.TypeAndVersion(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistrar.Contract.TypeAndVersion(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistrar *KeeperRegistrarSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistrar.Contract.AcceptOwnership(&_KeeperRegistrar.TransactOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistrar.Contract.AcceptOwnership(&_KeeperRegistrar.TransactOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "approve", name, upkeepContract, gasLimit, adminAddress, checkData, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Approve(&_KeeperRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, checkData, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Approve(&_KeeperRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, checkData, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "cancel", hash) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Cancel(&_KeeperRegistrar.TransactOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Cancel(&_KeeperRegistrar.TransactOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.OnTokenTransfer(&_KeeperRegistrar.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.OnTokenTransfer(&_KeeperRegistrar.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "register", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Register(&_KeeperRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Register(&_KeeperRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "setAutoApproveAllowedSender", senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetAutoApproveAllowedSender(&_KeeperRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetAutoApproveAllowedSender(&_KeeperRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) SetRegistrationConfig(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "setRegistrationConfig", autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) SetRegistrationConfig(autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetRegistrationConfig(&_KeeperRegistrar.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) SetRegistrationConfig(autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetRegistrationConfig(&_KeeperRegistrar.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.TransferOwnership(&_KeeperRegistrar.TransactOpts, to) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.TransferOwnership(&_KeeperRegistrar.TransactOpts, to) +} + +type KeeperRegistrarAutoApproveAllowedSenderSetIterator struct { + Event *KeeperRegistrarAutoApproveAllowedSenderSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarAutoApproveAllowedSenderSet struct { + SenderAddress common.Address + Allowed bool + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarAutoApproveAllowedSenderSetIterator, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarAutoApproveAllowedSenderSetIterator{contract: _KeeperRegistrar.contract, event: "AutoApproveAllowedSenderSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := _KeeperRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarAutoApproveAllowedSenderSet, error) { + event := new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := _KeeperRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarConfigChangedIterator struct { + Event *KeeperRegistrarConfigChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarConfigChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarConfigChangedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarConfigChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarConfigChanged struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarConfigChangedIterator, error) { + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return &KeeperRegistrarConfigChangedIterator{contract: _KeeperRegistrar.contract, event: "ConfigChanged", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarConfigChanged) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarConfigChanged) + if err := _KeeperRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseConfigChanged(log types.Log) (*KeeperRegistrarConfigChanged, error) { + event := new(KeeperRegistrarConfigChanged) + if err := _KeeperRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarOwnershipTransferRequestedIterator struct { + Event *KeeperRegistrarOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarOwnershipTransferRequestedIterator{contract: _KeeperRegistrar.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarOwnershipTransferRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarOwnershipTransferRequested, error) { + event := new(KeeperRegistrarOwnershipTransferRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarOwnershipTransferredIterator struct { + Event *KeeperRegistrarOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarOwnershipTransferredIterator{contract: _KeeperRegistrar.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarOwnershipTransferred) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarOwnershipTransferred, error) { + event := new(KeeperRegistrarOwnershipTransferred) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationApprovedIterator struct { + Event *KeeperRegistrarRegistrationApproved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationApproved struct { + Hash [32]byte + DisplayName string + UpkeepId *big.Int + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarRegistrationApprovedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationApprovedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationApproved", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationApproved) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationApproved(log types.Log) (*KeeperRegistrarRegistrationApproved, error) { + event := new(KeeperRegistrarRegistrationApproved) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationRejectedIterator struct { + Event *KeeperRegistrarRegistrationRejected + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationRejected struct { + Hash [32]byte + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarRegistrationRejectedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationRejectedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationRejected", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationRejected) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationRejected(log types.Log) (*KeeperRegistrarRegistrationRejected, error) { + event := new(KeeperRegistrarRegistrationRejected) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationRequestedIterator struct { + Event *KeeperRegistrarRegistrationRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationRequested struct { + Hash [32]byte + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + CheckData []byte + Amount *big.Int + Source uint8 + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address, source []uint8) (*KeeperRegistrarRegistrationRequestedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + var sourceRule []interface{} + for _, sourceItem := range source { + sourceRule = append(sourceRule, sourceItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule, sourceRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationRequestedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address, source []uint8) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + var sourceRule []interface{} + for _, sourceItem := range source { + sourceRule = append(sourceRule, sourceItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule, sourceRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationRequested(log types.Log) (*KeeperRegistrarRegistrationRequested, error) { + event := new(KeeperRegistrarRegistrationRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRegistrationConfig struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + ApprovedCount uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int +} + +func (_KeeperRegistrar *KeeperRegistrar) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistrar.abi.Events["AutoApproveAllowedSenderSet"].ID: + return _KeeperRegistrar.ParseAutoApproveAllowedSenderSet(log) + case _KeeperRegistrar.abi.Events["ConfigChanged"].ID: + return _KeeperRegistrar.ParseConfigChanged(log) + case _KeeperRegistrar.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistrar.ParseOwnershipTransferRequested(log) + case _KeeperRegistrar.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistrar.ParseOwnershipTransferred(log) + case _KeeperRegistrar.abi.Events["RegistrationApproved"].ID: + return _KeeperRegistrar.ParseRegistrationApproved(log) + case _KeeperRegistrar.abi.Events["RegistrationRejected"].ID: + return _KeeperRegistrar.ParseRegistrationRejected(log) + case _KeeperRegistrar.abi.Events["RegistrationRequested"].ID: + return _KeeperRegistrar.ParseRegistrationRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistrarAutoApproveAllowedSenderSet) Topic() common.Hash { + return common.HexToHash("0x20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356") +} + +func (KeeperRegistrarConfigChanged) Topic() common.Hash { + return common.HexToHash("0x6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd") +} + +func (KeeperRegistrarOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistrarOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistrarRegistrationApproved) Topic() common.Hash { + return common.HexToHash("0xb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b") +} + +func (KeeperRegistrarRegistrationRejected) Topic() common.Hash { + return common.HexToHash("0x3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a22") +} + +func (KeeperRegistrarRegistrationRequested) Topic() common.Hash { + return common.HexToHash("0xc3f5df4aefec026f610a3fcb08f19476492d69d2cb78b1c2eba259a8820e6a78") +} + +func (_KeeperRegistrar *KeeperRegistrar) Address() common.Address { + return _KeeperRegistrar.address +} + +type KeeperRegistrarInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) + + GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) + + GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, hash [32]byte) (*types.Transaction, error) + + Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8, sender common.Address) (*types.Transaction, error) + + SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) + + SetRegistrationConfig(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarAutoApproveAllowedSenderSetIterator, error) + + WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) + + ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarAutoApproveAllowedSenderSet, error) + + FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarConfigChangedIterator, error) + + WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarConfigChanged) (event.Subscription, error) + + ParseConfigChanged(log types.Log) (*KeeperRegistrarConfigChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarOwnershipTransferred, error) + + FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarRegistrationApprovedIterator, error) + + WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) + + ParseRegistrationApproved(log types.Log) (*KeeperRegistrarRegistrationApproved, error) + + FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarRegistrationRejectedIterator, error) + + WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) + + ParseRegistrationRejected(log types.Log) (*KeeperRegistrarRegistrationRejected, error) + + FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address, source []uint8) (*KeeperRegistrarRegistrationRequestedIterator, error) + + WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address, source []uint8) (event.Subscription, error) + + ParseRegistrationRequested(log types.Log) (*KeeperRegistrarRegistrationRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock/keeper_registrar_wrapper1_2_mock.go b/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock/keeper_registrar_wrapper1_2_mock.go new file mode 100644 index 00000000..122e55ee --- /dev/null +++ b/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock/keeper_registrar_wrapper1_2_mock.go @@ -0,0 +1,1492 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registrar_wrapper1_2_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistrarMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"AutoApproveAllowedSenderSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"ConfigChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"RegistrationApproved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"RegistrationRejected\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"indexed\":true,\"internalType\":\"uint8\",\"name\":\"source\",\"type\":\"uint8\"}],\"name\":\"RegistrationRequested\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"emitAutoApproveAllowedSenderSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"emitConfigChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"emitRegistrationApproved\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"emitRegistrationRejected\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"uint8\",\"name\":\"source\",\"type\":\"uint8\"}],\"name\":\"emitRegistrationRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrationConfig\",\"outputs\":[{\"internalType\":\"enumKeeperRegistrar1_2Mock.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"approvedCount\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"minPLIJuels\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_approvedCount\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_autoApproveConfigType\",\"outputs\":[{\"internalType\":\"enumKeeperRegistrar1_2Mock.AutoApproveType\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_autoApproveMaxAllowed\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_keeperRegistry\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_minPLIJuels\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumKeeperRegistrar1_2Mock.AutoApproveType\",\"name\":\"_autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"_autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_approvedCount\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"_keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_minPLIJuels\",\"type\":\"uint256\"}],\"name\":\"setRegistrationConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610ba0806100206000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063aee052f31161008c578063b59d75eb11610066578063b59d75eb14610259578063bb98fe561461026c578063ca40bcd314610285578063f7420bc21461029857600080fd5b8063aee052f314610220578063b019b4e814610233578063b49fd35b1461024657600080fd5b806384638bb6116100c857806384638bb61461014a578063850af0cb146101645780639e105f95146101bb578063adeab0b71461020d57600080fd5b80631701f938146100ef5780634882b5bd1461010457806355e8b24814610120575b600080fd5b6101026100fd36600461093a565b6102ab565b005b61010d60015481565b6040519081526020015b60405180910390f35b60005461013590610100900463ffffffff1681565b60405163ffffffff9091168152602001610117565b6000546101579060ff1681565b6040516101179190610a34565b6000546001546040516101179260ff811692610100820463ffffffff90811693650100000000008404909116926901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff169190610a48565b6000546101e8906901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610117565b61010261021b366004610789565b610322565b61010261022e366004610888565b610350565b61010261024136600461071a565b61038e565b6101026102543660046108d8565b6103ec565b6101026102673660046107a2565b6104e9565b6000546101359065010000000000900463ffffffff1681565b61010261029336600461074d565b610551565b6101026102a636600461071a565b6105a7565b6040805160ff8616815263ffffffff8516602082015273ffffffffffffffffffffffffffffffffffffffff8416818301526bffffffffffffffffffffffff8316606082015290517f6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd9181900360800190a150505050565b60405181907f3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a2290600090a250565b80837fb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b846040516103819190610a98565b60405180910390a3505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b600080548691907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600183600281111561042957610429610b35565b0217905550600080547fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000ff1661010063ffffffff968716027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff1617650100000000009490951693909302939093177fffffff0000000000000000000000000000000000000000ffffffffffffffffff16690100000000000000000073ffffffffffffffffffffffffffffffffffffffff929092169190910217905560015550565b8060ff168673ffffffffffffffffffffffffffffffffffffffff168a7fc3f5df4aefec026f610a3fcb08f19476492d69d2cb78b1c2eba259a8820e6a788b8b8a8a8a8a60405161053e96959493929190610ab2565b60405180910390a4505050505050505050565b8173ffffffffffffffffffffffffffffffffffffffff167f20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad3568260405161059b911515815260200190565b60405180910390a25050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461062957600080fd5b919050565b600082601f83011261063f57600080fd5b813567ffffffffffffffff8082111561065a5761065a610b64565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019082821181831017156106a0576106a0610b64565b816040528381528660208588010111156106b957600080fd5b836020870160208301376000602085830101528094505050505092915050565b803563ffffffff8116811461062957600080fd5b803560ff8116811461062957600080fd5b80356bffffffffffffffffffffffff8116811461062957600080fd5b6000806040838503121561072d57600080fd5b61073683610605565b915061074460208401610605565b90509250929050565b6000806040838503121561076057600080fd5b61076983610605565b91506020830135801515811461077e57600080fd5b809150509250929050565b60006020828403121561079b57600080fd5b5035919050565b60008060008060008060008060006101208a8c0312156107c157600080fd5b8935985060208a013567ffffffffffffffff808211156107e057600080fd5b6107ec8d838e0161062e565b995060408c013591508082111561080257600080fd5b61080e8d838e0161062e565b985061081c60608d01610605565b975061082a60808d016106d9565b965061083860a08d01610605565b955060c08c013591508082111561084e57600080fd5b5061085b8c828d0161062e565b93505061086a60e08b016106fe565b91506108796101008b016106ed565b90509295985092959850929598565b60008060006060848603121561089d57600080fd5b83359250602084013567ffffffffffffffff8111156108bb57600080fd5b6108c78682870161062e565b925050604084013590509250925092565b600080600080600060a086880312156108f057600080fd5b8535600381106108ff57600080fd5b945061090d602087016106d9565b935061091b604087016106d9565b925061092960608701610605565b949793965091946080013592915050565b6000806000806080858703121561095057600080fd5b610959856106ed565b9350610967602086016106d9565b925061097560408601610605565b9150610983606086016106fe565b905092959194509250565b6000815180845260005b818110156109b457602081850181015186830182015201610998565b818111156109c6576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60038110610a30577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b60208101610a4282846109f9565b92915050565b60a08101610a5682886109f9565b63ffffffff808716602084015280861660408401525073ffffffffffffffffffffffffffffffffffffffff841660608301528260808301529695505050505050565b602081526000610aab602083018461098e565b9392505050565b60c081526000610ac560c083018961098e565b8281036020840152610ad7818961098e565b905063ffffffff8716604084015273ffffffffffffffffffffffffffffffffffffffff861660608401528281036080840152610b13818661098e565b9150506bffffffffffffffffffffffff831660a0830152979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var KeeperRegistrarMockABI = KeeperRegistrarMockMetaData.ABI + +var KeeperRegistrarMockBin = KeeperRegistrarMockMetaData.Bin + +func DeployKeeperRegistrarMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *KeeperRegistrarMock, error) { + parsed, err := KeeperRegistrarMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistrarMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistrarMock{address: address, abi: *parsed, KeeperRegistrarMockCaller: KeeperRegistrarMockCaller{contract: contract}, KeeperRegistrarMockTransactor: KeeperRegistrarMockTransactor{contract: contract}, KeeperRegistrarMockFilterer: KeeperRegistrarMockFilterer{contract: contract}}, nil +} + +type KeeperRegistrarMock struct { + address common.Address + abi abi.ABI + KeeperRegistrarMockCaller + KeeperRegistrarMockTransactor + KeeperRegistrarMockFilterer +} + +type KeeperRegistrarMockCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistrarMockTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistrarMockFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrarMockSession struct { + Contract *KeeperRegistrarMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarMockCallerSession struct { + Contract *KeeperRegistrarMockCaller + CallOpts bind.CallOpts +} + +type KeeperRegistrarMockTransactorSession struct { + Contract *KeeperRegistrarMockTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarMockRaw struct { + Contract *KeeperRegistrarMock +} + +type KeeperRegistrarMockCallerRaw struct { + Contract *KeeperRegistrarMockCaller +} + +type KeeperRegistrarMockTransactorRaw struct { + Contract *KeeperRegistrarMockTransactor +} + +func NewKeeperRegistrarMock(address common.Address, backend bind.ContractBackend) (*KeeperRegistrarMock, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistrarMockABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistrarMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistrarMock{address: address, abi: abi, KeeperRegistrarMockCaller: KeeperRegistrarMockCaller{contract: contract}, KeeperRegistrarMockTransactor: KeeperRegistrarMockTransactor{contract: contract}, KeeperRegistrarMockFilterer: KeeperRegistrarMockFilterer{contract: contract}}, nil +} + +func NewKeeperRegistrarMockCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistrarMockCaller, error) { + contract, err := bindKeeperRegistrarMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockCaller{contract: contract}, nil +} + +func NewKeeperRegistrarMockTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistrarMockTransactor, error) { + contract, err := bindKeeperRegistrarMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockTransactor{contract: contract}, nil +} + +func NewKeeperRegistrarMockFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistrarMockFilterer, error) { + contract, err := bindKeeperRegistrarMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockFilterer{contract: contract}, nil +} + +func bindKeeperRegistrarMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistrarMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrarMock.Contract.KeeperRegistrarMockCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.KeeperRegistrarMockTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.KeeperRegistrarMockTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrarMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "getRegistrationConfig") + + outstruct := new(GetRegistrationConfig) + if err != nil { + return *outstruct, err + } + + outstruct.AutoApproveConfigType = *abi.ConvertType(out[0], new(uint8)).(*uint8) + outstruct.AutoApproveMaxAllowed = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ApprovedCount = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.KeeperRegistry = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.MinPLIJuels = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrarMock.Contract.GetRegistrationConfig(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrarMock.Contract.GetRegistrationConfig(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) SApprovedCount(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "s_approvedCount") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SApprovedCount() (uint32, error) { + return _KeeperRegistrarMock.Contract.SApprovedCount(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) SApprovedCount() (uint32, error) { + return _KeeperRegistrarMock.Contract.SApprovedCount(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) SAutoApproveConfigType(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "s_autoApproveConfigType") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SAutoApproveConfigType() (uint8, error) { + return _KeeperRegistrarMock.Contract.SAutoApproveConfigType(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) SAutoApproveConfigType() (uint8, error) { + return _KeeperRegistrarMock.Contract.SAutoApproveConfigType(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) SAutoApproveMaxAllowed(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "s_autoApproveMaxAllowed") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SAutoApproveMaxAllowed() (uint32, error) { + return _KeeperRegistrarMock.Contract.SAutoApproveMaxAllowed(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) SAutoApproveMaxAllowed() (uint32, error) { + return _KeeperRegistrarMock.Contract.SAutoApproveMaxAllowed(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) SKeeperRegistry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "s_keeperRegistry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SKeeperRegistry() (common.Address, error) { + return _KeeperRegistrarMock.Contract.SKeeperRegistry(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) SKeeperRegistry() (common.Address, error) { + return _KeeperRegistrarMock.Contract.SKeeperRegistry(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCaller) SMinPLIJuels(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistrarMock.contract.Call(opts, &out, "s_minPLIJuels") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SMinPLIJuels() (*big.Int, error) { + return _KeeperRegistrarMock.Contract.SMinPLIJuels(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockCallerSession) SMinPLIJuels() (*big.Int, error) { + return _KeeperRegistrarMock.Contract.SMinPLIJuels(&_KeeperRegistrarMock.CallOpts) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitAutoApproveAllowedSenderSet(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitAutoApproveAllowedSenderSet", senderAddress, allowed) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitAutoApproveAllowedSenderSet(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitAutoApproveAllowedSenderSet(&_KeeperRegistrarMock.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitAutoApproveAllowedSenderSet(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitAutoApproveAllowedSenderSet(&_KeeperRegistrarMock.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitConfigChanged(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint32, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitConfigChanged", autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitConfigChanged(autoApproveConfigType uint8, autoApproveMaxAllowed uint32, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitConfigChanged(&_KeeperRegistrarMock.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitConfigChanged(autoApproveConfigType uint8, autoApproveMaxAllowed uint32, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitConfigChanged(&_KeeperRegistrarMock.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistrarMock.TransactOpts, from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistrarMock.TransactOpts, from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitOwnershipTransferred(&_KeeperRegistrarMock.TransactOpts, from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitOwnershipTransferred(&_KeeperRegistrarMock.TransactOpts, from, to) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitRegistrationApproved(opts *bind.TransactOpts, hash [32]byte, displayName string, upkeepId *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitRegistrationApproved", hash, displayName, upkeepId) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitRegistrationApproved(hash [32]byte, displayName string, upkeepId *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationApproved(&_KeeperRegistrarMock.TransactOpts, hash, displayName, upkeepId) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitRegistrationApproved(hash [32]byte, displayName string, upkeepId *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationApproved(&_KeeperRegistrarMock.TransactOpts, hash, displayName, upkeepId) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitRegistrationRejected(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitRegistrationRejected", hash) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitRegistrationRejected(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationRejected(&_KeeperRegistrarMock.TransactOpts, hash) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitRegistrationRejected(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationRejected(&_KeeperRegistrarMock.TransactOpts, hash) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) EmitRegistrationRequested(opts *bind.TransactOpts, hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "emitRegistrationRequested", hash, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) EmitRegistrationRequested(hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationRequested(&_KeeperRegistrarMock.TransactOpts, hash, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) EmitRegistrationRequested(hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.EmitRegistrationRequested(&_KeeperRegistrarMock.TransactOpts, hash, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactor) SetRegistrationConfig(opts *bind.TransactOpts, _autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.contract.Transact(opts, "setRegistrationConfig", _autoApproveConfigType, _autoApproveMaxAllowed, _approvedCount, _keeperRegistry, _minPLIJuels) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockSession) SetRegistrationConfig(_autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.SetRegistrationConfig(&_KeeperRegistrarMock.TransactOpts, _autoApproveConfigType, _autoApproveMaxAllowed, _approvedCount, _keeperRegistry, _minPLIJuels) +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockTransactorSession) SetRegistrationConfig(_autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrarMock.Contract.SetRegistrationConfig(&_KeeperRegistrarMock.TransactOpts, _autoApproveConfigType, _autoApproveMaxAllowed, _approvedCount, _keeperRegistry, _minPLIJuels) +} + +type KeeperRegistrarMockAutoApproveAllowedSenderSetIterator struct { + Event *KeeperRegistrarMockAutoApproveAllowedSenderSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockAutoApproveAllowedSenderSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockAutoApproveAllowedSenderSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockAutoApproveAllowedSenderSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockAutoApproveAllowedSenderSet struct { + SenderAddress common.Address + Allowed bool + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarMockAutoApproveAllowedSenderSetIterator, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockAutoApproveAllowedSenderSetIterator{contract: _KeeperRegistrarMock.contract, event: "AutoApproveAllowedSenderSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockAutoApproveAllowedSenderSet) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarMockAutoApproveAllowedSenderSet, error) { + event := new(KeeperRegistrarMockAutoApproveAllowedSenderSet) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockConfigChangedIterator struct { + Event *KeeperRegistrarMockConfigChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockConfigChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockConfigChangedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockConfigChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockConfigChanged struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarMockConfigChangedIterator, error) { + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return &KeeperRegistrarMockConfigChangedIterator{contract: _KeeperRegistrarMock.contract, event: "ConfigChanged", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockConfigChanged) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockConfigChanged) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseConfigChanged(log types.Log) (*KeeperRegistrarMockConfigChanged, error) { + event := new(KeeperRegistrarMockConfigChanged) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockOwnershipTransferRequestedIterator struct { + Event *KeeperRegistrarMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockOwnershipTransferRequestedIterator{contract: _KeeperRegistrarMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockOwnershipTransferRequested) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarMockOwnershipTransferRequested, error) { + event := new(KeeperRegistrarMockOwnershipTransferRequested) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockOwnershipTransferredIterator struct { + Event *KeeperRegistrarMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockOwnershipTransferredIterator{contract: _KeeperRegistrarMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockOwnershipTransferred) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarMockOwnershipTransferred, error) { + event := new(KeeperRegistrarMockOwnershipTransferred) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockRegistrationApprovedIterator struct { + Event *KeeperRegistrarMockRegistrationApproved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockRegistrationApprovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockRegistrationApprovedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockRegistrationApprovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockRegistrationApproved struct { + Hash [32]byte + DisplayName string + UpkeepId *big.Int + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarMockRegistrationApprovedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockRegistrationApprovedIterator{contract: _KeeperRegistrarMock.contract, event: "RegistrationApproved", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockRegistrationApproved) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseRegistrationApproved(log types.Log) (*KeeperRegistrarMockRegistrationApproved, error) { + event := new(KeeperRegistrarMockRegistrationApproved) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockRegistrationRejectedIterator struct { + Event *KeeperRegistrarMockRegistrationRejected + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockRegistrationRejectedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockRegistrationRejectedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockRegistrationRejectedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockRegistrationRejected struct { + Hash [32]byte + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarMockRegistrationRejectedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockRegistrationRejectedIterator{contract: _KeeperRegistrarMock.contract, event: "RegistrationRejected", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationRejected, hash [][32]byte) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockRegistrationRejected) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseRegistrationRejected(log types.Log) (*KeeperRegistrarMockRegistrationRejected, error) { + event := new(KeeperRegistrarMockRegistrationRejected) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarMockRegistrationRequestedIterator struct { + Event *KeeperRegistrarMockRegistrationRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarMockRegistrationRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarMockRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarMockRegistrationRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarMockRegistrationRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarMockRegistrationRequested struct { + Hash [32]byte + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + CheckData []byte + Amount *big.Int + Source uint8 + Raw types.Log +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address, source []uint8) (*KeeperRegistrarMockRegistrationRequestedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + var sourceRule []interface{} + for _, sourceItem := range source { + sourceRule = append(sourceRule, sourceItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.FilterLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule, sourceRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarMockRegistrationRequestedIterator{contract: _KeeperRegistrarMock.contract, event: "RegistrationRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationRequested, hash [][32]byte, upkeepContract []common.Address, source []uint8) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + var sourceRule []interface{} + for _, sourceItem := range source { + sourceRule = append(sourceRule, sourceItem) + } + + logs, sub, err := _KeeperRegistrarMock.contract.WatchLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule, sourceRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarMockRegistrationRequested) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrarMock *KeeperRegistrarMockFilterer) ParseRegistrationRequested(log types.Log) (*KeeperRegistrarMockRegistrationRequested, error) { + event := new(KeeperRegistrarMockRegistrationRequested) + if err := _KeeperRegistrarMock.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRegistrationConfig struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + ApprovedCount uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int +} + +func (_KeeperRegistrarMock *KeeperRegistrarMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistrarMock.abi.Events["AutoApproveAllowedSenderSet"].ID: + return _KeeperRegistrarMock.ParseAutoApproveAllowedSenderSet(log) + case _KeeperRegistrarMock.abi.Events["ConfigChanged"].ID: + return _KeeperRegistrarMock.ParseConfigChanged(log) + case _KeeperRegistrarMock.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistrarMock.ParseOwnershipTransferRequested(log) + case _KeeperRegistrarMock.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistrarMock.ParseOwnershipTransferred(log) + case _KeeperRegistrarMock.abi.Events["RegistrationApproved"].ID: + return _KeeperRegistrarMock.ParseRegistrationApproved(log) + case _KeeperRegistrarMock.abi.Events["RegistrationRejected"].ID: + return _KeeperRegistrarMock.ParseRegistrationRejected(log) + case _KeeperRegistrarMock.abi.Events["RegistrationRequested"].ID: + return _KeeperRegistrarMock.ParseRegistrationRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistrarMockAutoApproveAllowedSenderSet) Topic() common.Hash { + return common.HexToHash("0x20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356") +} + +func (KeeperRegistrarMockConfigChanged) Topic() common.Hash { + return common.HexToHash("0x6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd") +} + +func (KeeperRegistrarMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistrarMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistrarMockRegistrationApproved) Topic() common.Hash { + return common.HexToHash("0xb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b") +} + +func (KeeperRegistrarMockRegistrationRejected) Topic() common.Hash { + return common.HexToHash("0x3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a22") +} + +func (KeeperRegistrarMockRegistrationRequested) Topic() common.Hash { + return common.HexToHash("0xc3f5df4aefec026f610a3fcb08f19476492d69d2cb78b1c2eba259a8820e6a78") +} + +func (_KeeperRegistrarMock *KeeperRegistrarMock) Address() common.Address { + return _KeeperRegistrarMock.address +} + +type KeeperRegistrarMockInterface interface { + GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) + + SApprovedCount(opts *bind.CallOpts) (uint32, error) + + SAutoApproveConfigType(opts *bind.CallOpts) (uint8, error) + + SAutoApproveMaxAllowed(opts *bind.CallOpts) (uint32, error) + + SKeeperRegistry(opts *bind.CallOpts) (common.Address, error) + + SMinPLIJuels(opts *bind.CallOpts) (*big.Int, error) + + EmitAutoApproveAllowedSenderSet(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) + + EmitConfigChanged(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint32, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitRegistrationApproved(opts *bind.TransactOpts, hash [32]byte, displayName string, upkeepId *big.Int) (*types.Transaction, error) + + EmitRegistrationRejected(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) + + EmitRegistrationRequested(opts *bind.TransactOpts, hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) + + SetRegistrationConfig(opts *bind.TransactOpts, _autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) (*types.Transaction, error) + + FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarMockAutoApproveAllowedSenderSetIterator, error) + + WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) + + ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarMockAutoApproveAllowedSenderSet, error) + + FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarMockConfigChangedIterator, error) + + WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockConfigChanged) (event.Subscription, error) + + ParseConfigChanged(log types.Log) (*KeeperRegistrarMockConfigChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarMockOwnershipTransferred, error) + + FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarMockRegistrationApprovedIterator, error) + + WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) + + ParseRegistrationApproved(log types.Log) (*KeeperRegistrarMockRegistrationApproved, error) + + FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarMockRegistrationRejectedIterator, error) + + WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationRejected, hash [][32]byte) (event.Subscription, error) + + ParseRegistrationRejected(log types.Log) (*KeeperRegistrarMockRegistrationRejected, error) + + FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address, source []uint8) (*KeeperRegistrarMockRegistrationRequestedIterator, error) + + WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarMockRegistrationRequested, hash [][32]byte, upkeepContract []common.Address, source []uint8) (event.Subscription, error) + + ParseRegistrationRequested(log types.Log) (*KeeperRegistrarMockRegistrationRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registrar_wrapper2_0/keeper_registrar_wrapper2_0.go b/core/gethwrappers/generated/keeper_registrar_wrapper2_0/keeper_registrar_wrapper2_0.go new file mode 100644 index 00000000..6d5ca761 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registrar_wrapper2_0/keeper_registrar_wrapper2_0.go @@ -0,0 +1,1507 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registrar_wrapper2_0 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistrar20RegistrationParams struct { + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + CheckData []byte + OffchainConfig []byte + Amount *big.Int +} + +var KeeperRegistrarMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"PLIAddress\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistrar2_0.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint16\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AmountMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FunctionNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"HashMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientPayment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAdminAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"LinkTransferFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyAdminOrOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistrationRequestFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderMismatch\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"AutoApproveAllowedSenderSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"enumKeeperRegistrar2_0.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"ConfigChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"displayName\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"RegistrationApproved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"RegistrationRejected\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"RegistrationRequested\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"approve\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"cancel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"}],\"name\":\"getAutoApproveAllowedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"hash\",\"type\":\"bytes32\"}],\"name\":\"getPendingRequest\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrationConfig\",\"outputs\":[{\"internalType\":\"enumKeeperRegistrar2_0.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"approvedCount\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"minPLIJuels\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"register\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"encryptedEmail\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"upkeepContract\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"internalType\":\"structKeeperRegistrar2_0.RegistrationParams\",\"name\":\"requestParams\",\"type\":\"tuple\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"senderAddress\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowed\",\"type\":\"bool\"}],\"name\":\"setAutoApproveAllowedSender\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumKeeperRegistrar2_0.AutoApproveType\",\"name\":\"autoApproveConfigType\",\"type\":\"uint8\"},{\"internalType\":\"uint16\",\"name\":\"autoApproveMaxAllowed\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"keeperRegistry\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"minPLIJuels\",\"type\":\"uint96\"}],\"name\":\"setRegistrationConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162002b0538038062002b05833981016040819052620000349162000394565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000ec565b5050506001600160601b0319606086901b16608052620000e18484848462000198565b50505050506200048d565b6001600160a01b038116331415620001475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001a262000319565b6003546040805160a081019091526501000000000090910463ffffffff169080866002811115620001d757620001d762000477565b815261ffff8616602082015263ffffffff831660408201526001600160a01b03851660608201526001600160601b038416608090910152805160038054909190829060ff1916600183600281111562000234576200023462000477565b0217905550602082015181546040808501516060860151610100600160481b031990931661010063ffffffff9586160263ffffffff60281b19161765010000000000949091169390930292909217600160481b600160e81b03191669010000000000000000006001600160a01b0390921691909102178255608090920151600190910180546001600160601b0319166001600160601b03909216919091179055517f6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd906200030a90879087908790879062000422565b60405180910390a15050505050565b6000546001600160a01b03163314620003755760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b80516001600160a01b03811681146200038f57600080fd5b919050565b600080600080600060a08688031215620003ad57600080fd5b620003b88662000377565b9450602086015160038110620003cd57600080fd5b604087015190945061ffff81168114620003e657600080fd5b9250620003f66060870162000377565b60808701519092506001600160601b03811681146200041457600080fd5b809150509295509295909350565b60808101600386106200044557634e487b7160e01b600052602160045260246000fd5b94815261ffff9390931660208401526001600160a01b039190911660408301526001600160601b031660609091015290565b634e487b7160e01b600052602160045260246000fd5b60805160601c612636620004cf6000396000818161016e015281816103f30152818161099f01528181610cfd015281816111d9015261178301526126366000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c8063850af0cb11610097578063a611ea5611610066578063a611ea5614610325578063a793ab8b14610338578063c4d252f51461034b578063f2fde38b1461035e57600080fd5b8063850af0cb1461022e57806388b12d55146102475780638da5cb5b146102f4578063a4c0ed361461031257600080fd5b8063367b9b4f116100d3578063367b9b4f146101b557806362105854146101ca57806379ba5097146101dd5780637e776f7f146101e557600080fd5b806308b79da4146100fa578063181f5a77146101205780631b6b6d2314610169575b600080fd5b61010d610108366004612011565b610371565b6040519081526020015b60405180910390f35b61015c6040518060400160405280601581526020017f4b656570657252656769737472617220322e302e30000000000000000000000081525081565b6040516101179190612337565b6101907f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610117565b6101c86101c3366004611be0565b6104fe565b005b6101c86101d8366004611d10565b610590565b6101c86107a4565b61021e6101f3366004611bbc565b73ffffffffffffffffffffffffffffffffffffffff1660009081526005602052604090205460ff1690565b6040519015158152602001610117565b6102366108a6565b6040516101179594939291906122e7565b6102bb610255366004611c92565b60009081526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff169290910182905291565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526bffffffffffffffffffffffff909116602083015201610117565b60005473ffffffffffffffffffffffffffffffffffffffff16610190565b6101c8610320366004611c19565b610987565b6101c8610333366004611de7565b610ce5565b6101c8610346366004611cab565b610e78565b6101c8610359366004611c92565b61108d565b6101c861036c366004611bbc565b611326565b6004546000906bffffffffffffffffffffffff16610396610100840160e08501612066565b6bffffffffffffffffffffffff1610156103dc576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166323b872dd333061042b610100870160e08801612066565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff93841660048201529290911660248301526bffffffffffffffffffffffff166044820152606401602060405180830381600087803b1580156104ad57600080fd5b505af11580156104c1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104e59190611c75565b506104f86104f283612470565b3361133a565b92915050565b610506611622565b73ffffffffffffffffffffffffffffffffffffffff821660008181526005602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001685151590811790915591519182527f20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356910160405180910390a25050565b610598611622565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff1691830191909152610631576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000898989898989896040516020016106509796959493929190612180565b6040516020818303038152906040528051906020012090508083146106a1576040517f3f4d605300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526002602090815260408083208390558051610100810182528e8152815180840183529384528083019390935273ffffffffffffffffffffffffffffffffffffffff8d81168483015263ffffffff8d1660608501528b1660808401528051601f8a01839004830281018301909152888152610796929160a0830191908b908b9081908401838280828437600092019190915250505090825250604080516020601f8a01819004810282018101909252888152918101919089908990819084018382808284376000920191909152505050908252506020858101516bffffffffffffffffffffffff16910152826116a5565b505050505050505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461082a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6040805160a081019091526003805460009283928392839283928392829060ff1660028111156108d8576108d861259b565b60028111156108e9576108e961259b565b81528154610100810463ffffffff908116602080850191909152650100000000008304909116604080850191909152690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060808501919091526001909401546bffffffffffffffffffffffff90811660809485015285519186015192860151948601519590930151909b919a50929850929650169350915050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146109f6576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b81818080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050505060208101517fffffffff0000000000000000000000000000000000000000000000000000000081167fa611ea560000000000000000000000000000000000000000000000000000000014610aac576040517fe3d6792100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8484846000610abe82600481866123f7565b810190610acb9190611f10565b50975050505050505050806bffffffffffffffffffffffff168414610b1c576040517f55e97b0d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8988886000610b2e82600481866123f7565b810190610b3b9190611f10565b985050505050505050508073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614610baa576040517ff8c5638e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6101248b1015610be6576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6004546bffffffffffffffffffffffff168d1015610c30576040517fcd1c886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60003073ffffffffffffffffffffffffffffffffffffffff168d8d604051610c59929190612170565b600060405180830381855af49150503d8060008114610c94576040519150601f19603f3d011682016040523d82523d6000602084013e610c99565b606091505b5050905080610cd4576040517f649bf81000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610d54576040517f018d10be00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610e696040518061010001604052808e81526020018d8d8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525073ffffffffffffffffffffffffffffffffffffffff808d1660208084019190915263ffffffff8d16604080850191909152918c1660608401528151601f8b018290048202810182019092528982526080909201918a908a9081908401838280828437600092019190915250505090825250604080516020601f8901819004810282018101909252878152918101919088908890819084018382808284376000920191909152505050908252506bffffffffffffffffffffffff85166020909101528261133a565b50505050505050505050505050565b610e80611622565b6003546040805160a081019091526501000000000090910463ffffffff169080866002811115610eb257610eb261259b565b815261ffff8616602082015263ffffffff8316604082015273ffffffffffffffffffffffffffffffffffffffff851660608201526bffffffffffffffffffffffff841660809091015280516003805490919082907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836002811115610f3c57610f3c61259b565b02179055506020820151815460408085015160608601517fffffffffffffffffffffffffffffffffffffffffffffff0000000000000000ff90931661010063ffffffff958616027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff1617650100000000009490911693909302929092177fffffff0000000000000000000000000000000000000000ffffffffffffffffff16690100000000000000000073ffffffffffffffffffffffffffffffffffffffff90921691909102178255608090920151600190910180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff909216919091179055517f6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd9061107e908790879087908790612296565b60405180910390a15050505050565b60008181526002602090815260409182902082518084019093525473ffffffffffffffffffffffffffffffffffffffff8116808452740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff1691830191909152331480611114575060005473ffffffffffffffffffffffffffffffffffffffff1633145b61114a576040517f61685c2b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805173ffffffffffffffffffffffffffffffffffffffff16611198576040517f4b13b31e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260026020908152604080832083905583519184015190517fa9059cbb0000000000000000000000000000000000000000000000000000000081527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169263a9059cbb926112509260040173ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b602060405180830381600087803b15801561126a57600080fd5b505af115801561127e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112a29190611c75565b9050806112f65781516040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610821565b60405183907f3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a2290600090a2505050565b61132e611622565b611337816118ec565b50565b608082015160009073ffffffffffffffffffffffffffffffffffffffff1661138e576040517f05bb467c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008360400151846060015185608001518660a001518760c001516040516020016113bd9594939291906121e7565b604051602081830303815290604052805190602001209050836040015173ffffffffffffffffffffffffffffffffffffffff16817f9b8456f925542af2c5fb15ff4be32cc8f209dda96c544766e301367df40f499886600001518760200151886060015189608001518a60a001518b60e001516040516114429695949392919061234a565b60405180910390a36040805160a081019091526003805460009283929091829060ff1660028111156114765761147661259b565b60028111156114875761148761259b565b8152815463ffffffff61010082048116602084015265010000000000820416604083015273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff1660809091015290506114fa81866119e2565b1561155f57604081015161150f906001612421565b6003805463ffffffff9290921665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff90921691909117905561155886846116a5565b9150611619565b60e086015160008481526002602052604081205490916115a4917401000000000000000000000000000000000000000090046bffffffffffffffffffffffff16612449565b60408051808201825260808a015173ffffffffffffffffffffffffffffffffffffffff90811682526bffffffffffffffffffffffff938416602080840191825260008a815260029091529390932091519251909316740100000000000000000000000000000000000000000291909216179055505b50949350505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146116a3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610821565b565b6003546040838101516060850151608086015160a087015160c088015194517f6ded9eae0000000000000000000000000000000000000000000000000000000081526000966901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff169587958795636ded9eae9561172b95929491939092916004016121e7565b602060405180830381600087803b15801561174557600080fd5b505af1158015611759573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061177d919061204d565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea0848860e00151856040516020016117d691815260200190565b6040516020818303038152906040526040518463ffffffff1660e01b81526004016118039392919061224a565b602060405180830381600087803b15801561181d57600080fd5b505af1158015611831573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118559190611c75565b9050806118a6576040517fc2e4dce800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84166004820152602401610821565b81857fb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b88600001516040516118db9190612337565b60405180910390a350949350505050565b73ffffffffffffffffffffffffffffffffffffffff811633141561196c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610821565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080835160028111156119f8576119f861259b565b1415611a06575060006104f8565b600183516002811115611a1b57611a1b61259b565b148015611a4e575073ffffffffffffffffffffffffffffffffffffffff821660009081526005602052604090205460ff16155b15611a5b575060006104f8565b826020015163ffffffff16836040015163ffffffff161015611a7f575060016104f8565b50600092915050565b8035611a93816125f9565b919050565b60008083601f840112611aaa57600080fd5b50813567ffffffffffffffff811115611ac257600080fd5b602083019150836020828501011115611ada57600080fd5b9250929050565b600082601f830112611af257600080fd5b813567ffffffffffffffff80821115611b0d57611b0d6125ca565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715611b5357611b536125ca565b81604052838152866020858801011115611b6c57600080fd5b836020870160208301376000602085830101528094505050505092915050565b803563ffffffff81168114611a9357600080fd5b80356bffffffffffffffffffffffff81168114611a9357600080fd5b600060208284031215611bce57600080fd5b8135611bd9816125f9565b9392505050565b60008060408385031215611bf357600080fd5b8235611bfe816125f9565b91506020830135611c0e8161261b565b809150509250929050565b60008060008060608587031215611c2f57600080fd5b8435611c3a816125f9565b935060208501359250604085013567ffffffffffffffff811115611c5d57600080fd5b611c6987828801611a98565b95989497509550505050565b600060208284031215611c8757600080fd5b8151611bd98161261b565b600060208284031215611ca457600080fd5b5035919050565b60008060008060808587031215611cc157600080fd5b843560038110611cd057600080fd5b9350602085013561ffff81168114611ce757600080fd5b92506040850135611cf7816125f9565b9150611d0560608601611ba0565b905092959194509250565b600080600080600080600080600060e08a8c031215611d2e57600080fd5b893567ffffffffffffffff80821115611d4657600080fd5b611d528d838e01611ae1565b9a5060208c01359150611d64826125f9565b819950611d7360408d01611b8c565b985060608c01359150611d85826125f9565b90965060808b01359080821115611d9b57600080fd5b611da78d838e01611a98565b909750955060a08c0135915080821115611dc057600080fd5b50611dcd8c828d01611a98565b9a9d999c50979a9699959894979660c00135949350505050565b6000806000806000806000806000806000806101208d8f031215611e0a57600080fd5b67ffffffffffffffff8d351115611e2057600080fd5b611e2d8e8e358f01611ae1565b9b5067ffffffffffffffff60208e01351115611e4857600080fd5b611e588e60208f01358f01611a98565b909b509950611e6960408e01611a88565b9850611e7760608e01611b8c565b9750611e8560808e01611a88565b965067ffffffffffffffff60a08e01351115611ea057600080fd5b611eb08e60a08f01358f01611a98565b909650945067ffffffffffffffff60c08e01351115611ece57600080fd5b611ede8e60c08f01358f01611a98565b9094509250611eef60e08e01611ba0565b9150611efe6101008e01611a88565b90509295989b509295989b509295989b565b60008060008060008060008060006101208a8c031215611f2f57600080fd5b893567ffffffffffffffff80821115611f4757600080fd5b611f538d838e01611ae1565b9a5060208c0135915080821115611f6957600080fd5b611f758d838e01611ae1565b9950611f8360408d01611a88565b9850611f9160608d01611b8c565b9750611f9f60808d01611a88565b965060a08c0135915080821115611fb557600080fd5b611fc18d838e01611ae1565b955060c08c0135915080821115611fd757600080fd5b50611fe48c828d01611ae1565b935050611ff360e08b01611ba0565b91506120026101008b01611a88565b90509295985092959850929598565b60006020828403121561202357600080fd5b813567ffffffffffffffff81111561203a57600080fd5b82016101008185031215611bd957600080fd5b60006020828403121561205f57600080fd5b5051919050565b60006020828403121561207857600080fd5b611bd982611ba0565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6000815180845260005b818110156120f0576020818501810151868301820152016120d4565b81811115612102576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6003811061216c577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b8183823760009101908152919050565b600073ffffffffffffffffffffffffffffffffffffffff808a16835263ffffffff8916602084015280881660408401525060a060608301526121c660a083018688612081565b82810360808401526121d9818587612081565b9a9950505050505050505050565b600073ffffffffffffffffffffffffffffffffffffffff808816835263ffffffff8716602084015280861660408401525060a0606083015261222c60a08301856120ca565b828103608084015261223e81856120ca565b98975050505050505050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff8316602082015260606040820152600061228d60608301846120ca565b95945050505050565b608081016122a48287612135565b61ffff8516602083015273ffffffffffffffffffffffffffffffffffffffff841660408301526bffffffffffffffffffffffff8316606083015295945050505050565b60a081016122f58288612135565b63ffffffff808716602084015280861660408401525073ffffffffffffffffffffffffffffffffffffffff841660608301528260808301529695505050505050565b602081526000611bd960208301846120ca565b60c08152600061235d60c08301896120ca565b828103602084015261236f81896120ca565b905063ffffffff8716604084015273ffffffffffffffffffffffffffffffffffffffff8616606084015282810360808401526123ab81866120ca565b9150506bffffffffffffffffffffffff831660a0830152979650505050505050565b604051610100810167ffffffffffffffff811182821017156123f1576123f16125ca565b60405290565b6000808585111561240757600080fd5b8386111561241457600080fd5b5050820193919092039150565b600063ffffffff8083168185168083038211156124405761244061256c565b01949350505050565b60006bffffffffffffffffffffffff8083168185168083038211156124405761244061256c565b6000610100823603121561248357600080fd5b61248b6123cd565b823567ffffffffffffffff808211156124a357600080fd5b6124af36838701611ae1565b835260208501359150808211156124c557600080fd5b6124d136838701611ae1565b60208401526124e260408601611a88565b60408401526124f360608601611b8c565b606084015261250460808601611a88565b608084015260a085013591508082111561251d57600080fd5b61252936838701611ae1565b60a084015260c085013591508082111561254257600080fd5b5061254f36828601611ae1565b60c08301525061256160e08401611ba0565b60e082015292915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff8116811461133757600080fd5b801515811461133757600080fdfea164736f6c6343000806000a", +} + +var KeeperRegistrarABI = KeeperRegistrarMetaData.ABI + +var KeeperRegistrarBin = KeeperRegistrarMetaData.Bin + +func DeployKeeperRegistrar(auth *bind.TransactOpts, backend bind.ContractBackend, PLIAddress common.Address, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (common.Address, *types.Transaction, *KeeperRegistrar, error) { + parsed, err := KeeperRegistrarMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistrarBin), backend, PLIAddress, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistrar{address: address, abi: *parsed, KeeperRegistrarCaller: KeeperRegistrarCaller{contract: contract}, KeeperRegistrarTransactor: KeeperRegistrarTransactor{contract: contract}, KeeperRegistrarFilterer: KeeperRegistrarFilterer{contract: contract}}, nil +} + +type KeeperRegistrar struct { + address common.Address + abi abi.ABI + KeeperRegistrarCaller + KeeperRegistrarTransactor + KeeperRegistrarFilterer +} + +type KeeperRegistrarCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistrarTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistrarFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrarSession struct { + Contract *KeeperRegistrar + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarCallerSession struct { + Contract *KeeperRegistrarCaller + CallOpts bind.CallOpts +} + +type KeeperRegistrarTransactorSession struct { + Contract *KeeperRegistrarTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistrarRaw struct { + Contract *KeeperRegistrar +} + +type KeeperRegistrarCallerRaw struct { + Contract *KeeperRegistrarCaller +} + +type KeeperRegistrarTransactorRaw struct { + Contract *KeeperRegistrarTransactor +} + +func NewKeeperRegistrar(address common.Address, backend bind.ContractBackend) (*KeeperRegistrar, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistrarABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistrar(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistrar{address: address, abi: abi, KeeperRegistrarCaller: KeeperRegistrarCaller{contract: contract}, KeeperRegistrarTransactor: KeeperRegistrarTransactor{contract: contract}, KeeperRegistrarFilterer: KeeperRegistrarFilterer{contract: contract}}, nil +} + +func NewKeeperRegistrarCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistrarCaller, error) { + contract, err := bindKeeperRegistrar(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarCaller{contract: contract}, nil +} + +func NewKeeperRegistrarTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistrarTransactor, error) { + contract, err := bindKeeperRegistrar(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistrarTransactor{contract: contract}, nil +} + +func NewKeeperRegistrarFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistrarFilterer, error) { + contract, err := bindKeeperRegistrar(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistrarFilterer{contract: contract}, nil +} + +func bindKeeperRegistrar(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistrarMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrar.Contract.KeeperRegistrarCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.KeeperRegistrarTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistrar *KeeperRegistrarRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.KeeperRegistrarTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistrar.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) PLI() (common.Address, error) { + return _KeeperRegistrar.Contract.PLI(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) PLI() (common.Address, error) { + return _KeeperRegistrar.Contract.PLI(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getAutoApproveAllowedSender", senderAddress) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _KeeperRegistrar.Contract.GetAutoApproveAllowedSender(&_KeeperRegistrar.CallOpts, senderAddress) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetAutoApproveAllowedSender(senderAddress common.Address) (bool, error) { + return _KeeperRegistrar.Contract.GetAutoApproveAllowedSender(&_KeeperRegistrar.CallOpts, senderAddress) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getPendingRequest", hash) + + if err != nil { + return *new(common.Address), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _KeeperRegistrar.Contract.GetPendingRequest(&_KeeperRegistrar.CallOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetPendingRequest(hash [32]byte) (common.Address, *big.Int, error) { + return _KeeperRegistrar.Contract.GetPendingRequest(&_KeeperRegistrar.CallOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "getRegistrationConfig") + + outstruct := new(GetRegistrationConfig) + if err != nil { + return *outstruct, err + } + + outstruct.AutoApproveConfigType = *abi.ConvertType(out[0], new(uint8)).(*uint8) + outstruct.AutoApproveMaxAllowed = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ApprovedCount = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.KeeperRegistry = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.MinPLIJuels = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrar.Contract.GetRegistrationConfig(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) GetRegistrationConfig() (GetRegistrationConfig, + + error) { + return _KeeperRegistrar.Contract.GetRegistrationConfig(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Owner() (common.Address, error) { + return _KeeperRegistrar.Contract.Owner(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) Owner() (common.Address, error) { + return _KeeperRegistrar.Contract.Owner(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistrar.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistrar *KeeperRegistrarSession) TypeAndVersion() (string, error) { + return _KeeperRegistrar.Contract.TypeAndVersion(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistrar.Contract.TypeAndVersion(&_KeeperRegistrar.CallOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistrar *KeeperRegistrarSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistrar.Contract.AcceptOwnership(&_KeeperRegistrar.TransactOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistrar.Contract.AcceptOwnership(&_KeeperRegistrar.TransactOpts) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "approve", name, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Approve(&_KeeperRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Approve(name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Approve(&_KeeperRegistrar.TransactOpts, name, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "cancel", hash) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Cancel(&_KeeperRegistrar.TransactOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Cancel(hash [32]byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Cancel(&_KeeperRegistrar.TransactOpts, hash) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.OnTokenTransfer(&_KeeperRegistrar.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.OnTokenTransfer(&_KeeperRegistrar.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "register", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, amount, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Register(&_KeeperRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, amount, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.Register(&_KeeperRegistrar.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, offchainConfig, amount, sender) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) RegisterUpkeep(opts *bind.TransactOpts, requestParams KeeperRegistrar20RegistrationParams) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "registerUpkeep", requestParams) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) RegisterUpkeep(requestParams KeeperRegistrar20RegistrationParams) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.RegisterUpkeep(&_KeeperRegistrar.TransactOpts, requestParams) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) RegisterUpkeep(requestParams KeeperRegistrar20RegistrationParams) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.RegisterUpkeep(&_KeeperRegistrar.TransactOpts, requestParams) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "setAutoApproveAllowedSender", senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetAutoApproveAllowedSender(&_KeeperRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) SetAutoApproveAllowedSender(senderAddress common.Address, allowed bool) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetAutoApproveAllowedSender(&_KeeperRegistrar.TransactOpts, senderAddress, allowed) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) SetRegistrationConfig(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "setRegistrationConfig", autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) SetRegistrationConfig(autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetRegistrationConfig(&_KeeperRegistrar.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) SetRegistrationConfig(autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.SetRegistrationConfig(&_KeeperRegistrar.TransactOpts, autoApproveConfigType, autoApproveMaxAllowed, keeperRegistry, minPLIJuels) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistrar *KeeperRegistrarSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.TransferOwnership(&_KeeperRegistrar.TransactOpts, to) +} + +func (_KeeperRegistrar *KeeperRegistrarTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistrar.Contract.TransferOwnership(&_KeeperRegistrar.TransactOpts, to) +} + +type KeeperRegistrarAutoApproveAllowedSenderSetIterator struct { + Event *KeeperRegistrarAutoApproveAllowedSenderSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarAutoApproveAllowedSenderSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarAutoApproveAllowedSenderSet struct { + SenderAddress common.Address + Allowed bool + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarAutoApproveAllowedSenderSetIterator, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarAutoApproveAllowedSenderSetIterator{contract: _KeeperRegistrar.contract, event: "AutoApproveAllowedSenderSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) { + + var senderAddressRule []interface{} + for _, senderAddressItem := range senderAddress { + senderAddressRule = append(senderAddressRule, senderAddressItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "AutoApproveAllowedSenderSet", senderAddressRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := _KeeperRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarAutoApproveAllowedSenderSet, error) { + event := new(KeeperRegistrarAutoApproveAllowedSenderSet) + if err := _KeeperRegistrar.contract.UnpackLog(event, "AutoApproveAllowedSenderSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarConfigChangedIterator struct { + Event *KeeperRegistrarConfigChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarConfigChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarConfigChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarConfigChangedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarConfigChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarConfigChanged struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarConfigChangedIterator, error) { + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return &KeeperRegistrarConfigChangedIterator{contract: _KeeperRegistrar.contract, event: "ConfigChanged", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarConfigChanged) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "ConfigChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarConfigChanged) + if err := _KeeperRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseConfigChanged(log types.Log) (*KeeperRegistrarConfigChanged, error) { + event := new(KeeperRegistrarConfigChanged) + if err := _KeeperRegistrar.contract.UnpackLog(event, "ConfigChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarOwnershipTransferRequestedIterator struct { + Event *KeeperRegistrarOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarOwnershipTransferRequestedIterator{contract: _KeeperRegistrar.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarOwnershipTransferRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarOwnershipTransferRequested, error) { + event := new(KeeperRegistrarOwnershipTransferRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarOwnershipTransferredIterator struct { + Event *KeeperRegistrarOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarOwnershipTransferredIterator{contract: _KeeperRegistrar.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarOwnershipTransferred) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarOwnershipTransferred, error) { + event := new(KeeperRegistrarOwnershipTransferred) + if err := _KeeperRegistrar.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationApprovedIterator struct { + Event *KeeperRegistrarRegistrationApproved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationApproved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationApprovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationApproved struct { + Hash [32]byte + DisplayName string + UpkeepId *big.Int + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarRegistrationApprovedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationApprovedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationApproved", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationApproved", hashRule, upkeepIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationApproved) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationApproved(log types.Log) (*KeeperRegistrarRegistrationApproved, error) { + event := new(KeeperRegistrarRegistrationApproved) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationApproved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationRejectedIterator struct { + Event *KeeperRegistrarRegistrationRejected + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRejected) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationRejectedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationRejected struct { + Hash [32]byte + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarRegistrationRejectedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationRejectedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationRejected", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationRejected", hashRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationRejected) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationRejected(log types.Log) (*KeeperRegistrarRegistrationRejected, error) { + event := new(KeeperRegistrarRegistrationRejected) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRejected", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistrarRegistrationRequestedIterator struct { + Event *KeeperRegistrarRegistrationRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistrarRegistrationRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistrarRegistrationRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistrarRegistrationRequested struct { + Hash [32]byte + Name string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + CheckData []byte + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*KeeperRegistrarRegistrationRequestedIterator, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _KeeperRegistrar.contract.FilterLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return &KeeperRegistrarRegistrationRequestedIterator{contract: _KeeperRegistrar.contract, event: "RegistrationRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) { + + var hashRule []interface{} + for _, hashItem := range hash { + hashRule = append(hashRule, hashItem) + } + + var upkeepContractRule []interface{} + for _, upkeepContractItem := range upkeepContract { + upkeepContractRule = append(upkeepContractRule, upkeepContractItem) + } + + logs, sub, err := _KeeperRegistrar.contract.WatchLogs(opts, "RegistrationRequested", hashRule, upkeepContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistrarRegistrationRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistrar *KeeperRegistrarFilterer) ParseRegistrationRequested(log types.Log) (*KeeperRegistrarRegistrationRequested, error) { + event := new(KeeperRegistrarRegistrationRequested) + if err := _KeeperRegistrar.contract.UnpackLog(event, "RegistrationRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRegistrationConfig struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint32 + ApprovedCount uint32 + KeeperRegistry common.Address + MinPLIJuels *big.Int +} + +func (_KeeperRegistrar *KeeperRegistrar) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistrar.abi.Events["AutoApproveAllowedSenderSet"].ID: + return _KeeperRegistrar.ParseAutoApproveAllowedSenderSet(log) + case _KeeperRegistrar.abi.Events["ConfigChanged"].ID: + return _KeeperRegistrar.ParseConfigChanged(log) + case _KeeperRegistrar.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistrar.ParseOwnershipTransferRequested(log) + case _KeeperRegistrar.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistrar.ParseOwnershipTransferred(log) + case _KeeperRegistrar.abi.Events["RegistrationApproved"].ID: + return _KeeperRegistrar.ParseRegistrationApproved(log) + case _KeeperRegistrar.abi.Events["RegistrationRejected"].ID: + return _KeeperRegistrar.ParseRegistrationRejected(log) + case _KeeperRegistrar.abi.Events["RegistrationRequested"].ID: + return _KeeperRegistrar.ParseRegistrationRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistrarAutoApproveAllowedSenderSet) Topic() common.Hash { + return common.HexToHash("0x20c6237dac83526a849285a9f79d08a483291bdd3a056a0ef9ae94ecee1ad356") +} + +func (KeeperRegistrarConfigChanged) Topic() common.Hash { + return common.HexToHash("0x6293a703ec7145dfa23c5cde2e627d6a02e153fc2e9c03b14d1e22cbb4a7e9cd") +} + +func (KeeperRegistrarOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistrarOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistrarRegistrationApproved) Topic() common.Hash { + return common.HexToHash("0xb9a292fb7e3edd920cd2d2829a3615a640c43fd7de0a0820aa0668feb4c37d4b") +} + +func (KeeperRegistrarRegistrationRejected) Topic() common.Hash { + return common.HexToHash("0x3663fb28ebc87645eb972c9dad8521bf665c623f287e79f1c56f1eb374b82a22") +} + +func (KeeperRegistrarRegistrationRequested) Topic() common.Hash { + return common.HexToHash("0x9b8456f925542af2c5fb15ff4be32cc8f209dda96c544766e301367df40f4998") +} + +func (_KeeperRegistrar *KeeperRegistrar) Address() common.Address { + return _KeeperRegistrar.address +} + +type KeeperRegistrarInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + GetAutoApproveAllowedSender(opts *bind.CallOpts, senderAddress common.Address) (bool, error) + + GetPendingRequest(opts *bind.CallOpts, hash [32]byte) (common.Address, *big.Int, error) + + GetRegistrationConfig(opts *bind.CallOpts) (GetRegistrationConfig, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, name string, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, hash [32]byte) (*types.Transaction, error) + + Cancel(opts *bind.TransactOpts, hash [32]byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, offchainConfig []byte, amount *big.Int, sender common.Address) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, requestParams KeeperRegistrar20RegistrationParams) (*types.Transaction, error) + + SetAutoApproveAllowedSender(opts *bind.TransactOpts, senderAddress common.Address, allowed bool) (*types.Transaction, error) + + SetRegistrationConfig(opts *bind.TransactOpts, autoApproveConfigType uint8, autoApproveMaxAllowed uint16, keeperRegistry common.Address, minPLIJuels *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterAutoApproveAllowedSenderSet(opts *bind.FilterOpts, senderAddress []common.Address) (*KeeperRegistrarAutoApproveAllowedSenderSetIterator, error) + + WatchAutoApproveAllowedSenderSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarAutoApproveAllowedSenderSet, senderAddress []common.Address) (event.Subscription, error) + + ParseAutoApproveAllowedSenderSet(log types.Log) (*KeeperRegistrarAutoApproveAllowedSenderSet, error) + + FilterConfigChanged(opts *bind.FilterOpts) (*KeeperRegistrarConfigChangedIterator, error) + + WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarConfigChanged) (event.Subscription, error) + + ParseConfigChanged(log types.Log) (*KeeperRegistrarConfigChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistrarOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistrarOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistrarOwnershipTransferred, error) + + FilterRegistrationApproved(opts *bind.FilterOpts, hash [][32]byte, upkeepId []*big.Int) (*KeeperRegistrarRegistrationApprovedIterator, error) + + WatchRegistrationApproved(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationApproved, hash [][32]byte, upkeepId []*big.Int) (event.Subscription, error) + + ParseRegistrationApproved(log types.Log) (*KeeperRegistrarRegistrationApproved, error) + + FilterRegistrationRejected(opts *bind.FilterOpts, hash [][32]byte) (*KeeperRegistrarRegistrationRejectedIterator, error) + + WatchRegistrationRejected(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRejected, hash [][32]byte) (event.Subscription, error) + + ParseRegistrationRejected(log types.Log) (*KeeperRegistrarRegistrationRejected, error) + + FilterRegistrationRequested(opts *bind.FilterOpts, hash [][32]byte, upkeepContract []common.Address) (*KeeperRegistrarRegistrationRequestedIterator, error) + + WatchRegistrationRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistrarRegistrationRequested, hash [][32]byte, upkeepContract []common.Address) (event.Subscription, error) + + ParseRegistrationRequested(log types.Log) (*KeeperRegistrarRegistrationRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_logic1_3/keeper_registry_logic1_3.go b/core/gethwrappers/generated/keeper_registry_logic1_3/keeper_registry_logic1_3.go new file mode 100644 index 00000000..718120de --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_logic1_3/keeper_registry_logic1_3.go @@ -0,0 +1,4021 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_logic1_3 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Config struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrar common.Address +} + +var KeeperRegistryLogicMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"enumKeeperRegistryBase1_3.PaymentModel\",\"name\":\"paymentModel\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"registryGasOverhead\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"KeepersMustTakeTurns\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveKeepers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"indexed\":false,\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"ARB_NITRO_ORACLE\",\"outputs\":[{\"internalType\":\"contractArbGasInfo\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OPTIMISM_ORACLE\",\"outputs\":[{\"internalType\":\"contractOVM_GasPriceOracle\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PAYMENT_MODEL\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase1_3.PaymentModel\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"REGISTRY_GAS_OVERHEAD\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase1_3.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101606040527f420000000000000000000000000000000000000f00000000000000000000000060e0526c6c000000000000000000000000610100523480156200004857600080fd5b50604051620058fa380380620058fa8339810160408190526200006b916200028f565b84848484843380600081620000c75760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000fa57620000fa81620001c6565b5050600160029081556003805460ff19169055869150811115620001225762000122620002fc565b6101208160028111156200013a576200013a620002fc565b60f81b9052506101408490526001600160a01b03831615806200016457506001600160a01b038216155b806200017757506001600160a01b038116155b156200019657604051637138356f60e01b815260040160405180910390fd5b6001600160601b0319606093841b811660805291831b821660a05290911b1660c052506200031295505050505050565b6001600160a01b038116331415620002215760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000be565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200028a57600080fd5b919050565b600080600080600060a08688031215620002a857600080fd5b855160038110620002b857600080fd5b60208701519095509350620002d06040870162000272565b9250620002e06060870162000272565b9150620002f06080870162000272565b90509295509295909350565b634e487b7160e01b600052602160045260246000fd5b60805160601c60a05160601c60c05160601c60e05160601c6101005160601c6101205160f81c61014051615519620003e16000396000818161028c0152613ec801526000818161036301528181613f1b01526140970152600081816102fa01526140cf01526000818161032901526140060152600081816102650152613bfc0152600081816104010152613cdd01526000818161020c01528181610a4601528181610ca30152818161150e0152818161197301528181611c6301528181612273015261230601526155196000f3fe608060405234801561001057600080fd5b50600436106101da5760003560e01c80638da5cb5b11610104578063b148ab6b116100a2578063c804802211610071578063c804802214610488578063da5c67411461049b578063eb5dcd6c146104ae578063f2fde38b146104c157600080fd5b8063b148ab6b14610436578063b79550be14610449578063b7fdb43614610451578063c41b813a1461046457600080fd5b8063a710b221116100de578063a710b221146103d6578063a72aa27e146103e9578063ad178361146103fc578063b121e1471461042357600080fd5b80638da5cb5b146103925780638e86139b146103b0578063948108f7146103c357600080fd5b8063744bfe611161017c5780638456cb591161014b5780638456cb591461031c578063850cce341461032457806385c1b0ba1461034b5780638811cbe81461035e57600080fd5b8063744bfe61146102d257806379ba5097146102e55780637d9b97e0146102ed5780637f37618e146102f557600080fd5b80633f4ba83a116101b85780633f4ba83a146102585780634584a419146102605780635077b210146102875780635c975abb146102bc57600080fd5b8063187256e8146101df5780631a2af011146101f45780631b6b6d2314610207575b600080fd5b6101f26101ed366004614747565b6104d4565b005b6101f2610202366004614adb565b610545565b61022e7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6101f2610770565b61022e7f000000000000000000000000000000000000000000000000000000000000000081565b6102ae7f000000000000000000000000000000000000000000000000000000000000000081565b60405190815260200161024f565b60035460ff16604051901515815260200161024f565b6101f26102e0366004614adb565b610782565b6101f2610ac9565b6101f2610bcb565b61022e7f000000000000000000000000000000000000000000000000000000000000000081565b6101f2610d39565b61022e7f000000000000000000000000000000000000000000000000000000000000000081565b6101f2610359366004614864565b610d49565b6103857f000000000000000000000000000000000000000000000000000000000000000081565b60405161024f9190614f73565b60005473ffffffffffffffffffffffffffffffffffffffff1661022e565b6101f26103be366004614a11565b611598565b6101f26103d1366004614b21565b6117b6565b6101f26103e4366004614714565b611a4f565b6101f26103f7366004614afe565b611ce7565b61022e7f000000000000000000000000000000000000000000000000000000000000000081565b6101f26104313660046146f9565b611f20565b6101f2610444366004614aa9565b612018565b6101f261223a565b6101f261045f3660046147f8565b6123a5565b610477610472366004614adb565b612706565b60405161024f959493929190614e5d565b6101f2610496366004614aa9565b612a18565b6102ae6104a9366004614782565b612d92565b6101f26104bc366004614714565b612f89565b6101f26104cf3660046146f9565b6130e8565b6104dc6130fc565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600c6020526040902080548291907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600183600381111561053c5761053c6152b3565b02179055505050565b60008281526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff808216608085015264010000000082041660a084015268010000000000000000810490911660c083015260ff7c010000000000000000000000000000000000000000000000000000000090910416151560e08201526106248161317d565b73ffffffffffffffffffffffffffffffffffffffff8216331415610674576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82166106c1576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600a602052604090205473ffffffffffffffffffffffffffffffffffffffff83811691161461076b576000838152600a602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff861690811790915590519091339186917fb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b3591a45b505050565b6107786130fc565b61078061322a565b565b73ffffffffffffffffffffffffffffffffffffffff81166107cf576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff9081169584019590955260018401549081169583019590955290930482166060840181905260029091015463ffffffff808216608086015264010000000082041660a085015268010000000000000000810490921660c08401527c010000000000000000000000000000000000000000000000000000000090910460ff16151560e083015233146108e2576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b438160a0015163ffffffff161115610926576040517fff84e5dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600760205260409020546012546bffffffffffffffffffffffff90911690610954908290615163565b60125560008481526007602090815260409182902080547fffffffffffffffffffffffffffffffffffffffff00000000000000000000000016905581516bffffffffffffffffffffffff8416815273ffffffffffffffffffffffffffffffffffffffff86169181019190915285917ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318910160405180910390a26040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301526bffffffffffffffffffffffff831660248301527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90604401602060405180830381600087803b158015610a8a57600080fd5b505af1158015610a9e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ac2919061499c565b5050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610b4f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610bd36130fc565b6011546012546bffffffffffffffffffffffff90911690610bf5908290615163565b601255601180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690556040516bffffffffffffffffffffffff821681527f1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f19060200160405180910390a16040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff821660248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044015b602060405180830381600087803b158015610cfd57600080fd5b505af1158015610d11573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d35919061499c565b5050565b610d416130fc565b6107806132a7565b600173ffffffffffffffffffffffffffffffffffffffff82166000908152600c602052604090205460ff166003811115610d8557610d856152b3565b14158015610dcd5750600373ffffffffffffffffffffffffffffffffffffffff82166000908152600c602052604090205460ff166003811115610dca57610dca6152b3565b14155b15610e04576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60135473ffffffffffffffffffffffffffffffffffffffff16610e53576040517fd12d7d8d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b81610e8a576040517f2c2fc94100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805161010081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526000808567ffffffffffffffff811115610ee657610ee6615340565b604051908082528060200260200182016040528015610f1957816020015b6060815260200190600190039081610f045790505b50905060008667ffffffffffffffff811115610f3757610f37615340565b604051908082528060200260200182016040528015610fc457816020015b604080516101008101825260008082526020808301829052928201819052606082018190526080820181905260a0820181905260c0820181905260e082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181610f555790505b50905060005b878110156112ce57888882818110610fe457610fe4615311565b6020908102929092013560008181526007845260409081902081516101008101835281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff90811698840198909852600184015490811694830194909452909204851660608301526002015463ffffffff808216608084015264010000000082041660a083015268010000000000000000810490941660c08201527c010000000000000000000000000000000000000000000000000000000090930460ff16151560e084015297509095506110d290508561317d565b848282815181106110e5576110e5615311565b6020026020010181905250600b6000878152602001908152602001600020805461110e906151d3565b80601f016020809104026020016040519081016040528092919081815260200182805461113a906151d3565b80156111875780601f1061115c57610100808354040283529160200191611187565b820191906000526020600020905b81548152906001019060200180831161116a57829003601f168201915b505050505083828151811061119e5761119e615311565b602090810291909101015284516111c3906bffffffffffffffffffffffff16856150a3565b60008781526007602090815260408083208381556001810184905560020180547fffffff0000000000000000000000000000000000000000000000000000000000169055600b909152812091955061121b9190614284565b6000868152600a6020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016905561125a600587613302565b508451604080516bffffffffffffffffffffffff909216825273ffffffffffffffffffffffffffffffffffffffff8916602083015287917fb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff910160405180910390a2806112c681615227565b915050610fca565b50826012546112dd9190615163565b6012556040516000906112fa908a908a9085908790602001614cec565b60405160208183030381529060405290508673ffffffffffffffffffffffffffffffffffffffff16638e86139b601360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663c71249ab60018b73ffffffffffffffffffffffffffffffffffffffff166348013d7b6040518163ffffffff1660e01b815260040160206040518083038186803b1580156113ad57600080fd5b505afa1580156113c1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113e59190614a88565b866040518463ffffffff1660e01b815260040161140493929190614f86565b60006040518083038186803b15801561141c57600080fd5b505afa158015611430573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526114769190810190614a53565b6040518263ffffffff1660e01b81526004016114929190614e4a565b600060405180830381600087803b1580156114ac57600080fd5b505af11580156114c0573d6000803e3d6000fd5b50506040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8a81166004830152602482018890527f000000000000000000000000000000000000000000000000000000000000000016925063a9059cbb9150604401602060405180830381600087803b15801561155457600080fd5b505af1158015611568573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061158c919061499c565b50505050505050505050565b6002336000908152600c602052604090205460ff1660038111156115be576115be6152b3565b141580156115f057506003336000908152600c602052604090205460ff1660038111156115ed576115ed6152b3565b14155b15611627576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008080611637848601866148b8565b92509250925060005b83518110156117ae5761171b84828151811061165e5761165e615311565b602002602001015184838151811061167857611678615311565b602002602001015160c0015185848151811061169657611696615311565b6020026020010151608001518685815181106116b4576116b4615311565b6020026020010151606001518786815181106116d2576116d2615311565b6020026020010151600001518787815181106116f0576116f0615311565b602002602001015189888151811061170a5761170a615311565b602002602001015160e00151613317565b83818151811061172d5761172d615311565b60200260200101517f74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a7184838151811061176857611768615311565b60209081029190910181015151604080516bffffffffffffffffffffffff909216825233928201929092520160405180910390a2806117a681615227565b915050611640565b505050505050565b60008281526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff90811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff80821660808501526401000000008204811660a0850181905268010000000000000000830490931660c08501527c010000000000000000000000000000000000000000000000000000000090910460ff16151560e0840152146118c7576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80516118d49083906150bb565b600084815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055601254611928918416906150a3565b6012556040517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff831660448201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906323b872dd90606401602060405180830381600087803b1580156119cc57600080fd5b505af11580156119e0573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a04919061499c565b506040516bffffffffffffffffffffffff83168152339084907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff8116611a9c576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff16151591810191909152903314611b4d576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff80841660009081526008602090815260409091208054909216909155810151601254611b9c916bffffffffffffffffffffffff1690615163565b60125560208082015160405133815273ffffffffffffffffffffffffffffffffffffffff808616936bffffffffffffffffffffffff90931692908716917f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698910160405180910390a460208101516040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301526bffffffffffffffffffffffff90921660248201527f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b158015611ca957600080fd5b505af1158015611cbd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ce1919061499c565b50505050565b6108fc8163ffffffff161080611d085750600e5463ffffffff908116908216115b15611d3f576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff90811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff80821660808501526401000000008204811660a0850181905268010000000000000000830490931660c08501527c010000000000000000000000000000000000000000000000000000000090910460ff16151560e084015214611e50576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606081015173ffffffffffffffffffffffffffffffffffffffff163314611ea3576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526007602090815260409182902060020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff8616908117909155915191825284917fc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c910160405180910390a2505050565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314611f80576040517f6752e7aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b60008181526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff90811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff80821660808501526401000000008204811660a0850181905268010000000000000000830490931660c08501527c010000000000000000000000000000000000000000000000000000000090910460ff16151560e084015214612129576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600a602052604090205473ffffffffffffffffffffffffffffffffffffffff163314612186576040517f6352a85300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6060810151600083815260076020908152604080832060010180546bffffffffffffffffffffffff16336c01000000000000000000000000810291909117909155600a90925280832080547fffffffffffffffffffffffff000000000000000000000000000000000000000016905551909173ffffffffffffffffffffffffffffffffffffffff84169186917f5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c91a4505050565b6122426130fc565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b1580156122ca57600080fd5b505afa1580156122de573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123029190614ac2565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb336012548461234f9190615163565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526024820152604401610ce3565b6123ad6130fc565b82811415806123bc5750600283105b156123f3576040517fcf54c06a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b60045481101561247f5760006004828154811061241557612415615311565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff168252600890526040902060010180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055508061247781615227565b9150506123f6565b5060005b838110156126b557600085858381811061249f5761249f615311565b90506020020160208101906124b491906146f9565b73ffffffffffffffffffffffffffffffffffffffff8082166000908152600860205260408120805493945092909116908686868181106124f6576124f6615311565b905060200201602081019061250b91906146f9565b905073ffffffffffffffffffffffffffffffffffffffff8116158061259e575073ffffffffffffffffffffffffffffffffffffffff82161580159061257c57508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b801561259e575073ffffffffffffffffffffffffffffffffffffffff81811614155b156125d5576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600183015460ff1615612614576040517f357d0cc400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff8181161461269e5782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b5050505080806126ad90615227565b915050612483565b506126c2600485856142be565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f848484846040516126f89493929190614cba565b60405180910390a150505050565b6060600080600080612716613688565b600087815260076020908152604080832081516101008101835281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff908116848801526001850154918216848701529190048116606083015260029092015463ffffffff808216608084015264010000000082041660a083015268010000000000000000810490921660c08201527c010000000000000000000000000000000000000000000000000000000090910460ff16151560e08201528a8452600b90925280832090519192917f6e04ff0d000000000000000000000000000000000000000000000000000000009161282691602401614e94565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506000808360c0015173ffffffffffffffffffffffffffffffffffffffff16600d600001600b9054906101000a900463ffffffff1663ffffffff16846040516128cd9190614c9e565b60006040518083038160008787f1925050503d806000811461290b576040519150601f19603f3d011682016040523d82523d6000602084013e612910565b606091505b50915091508161294e57806040517f96c36235000000000000000000000000000000000000000000000000000000008152600401610b469190614e4a565b8080602001905181019061296291906149c0565b995091508161299d576040517f865676e300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006129ac8b8d8c60006136c0565b90506129c185826000015183606001516137aa565b60608101516080820151600d5460a08401518d9392916129fc91720100000000000000000000000000000000000090910461ffff1690615126565b60c090940151929f919e509c50919a5098509650505050505050565b600081815260076020908152604080832081516101008101835281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff90811696840196909652600184015490811694830194909452909204831660608301526002015463ffffffff80821660808401526401000000008204811660a08401819052680100000000000000008304851660c08501527c010000000000000000000000000000000000000000000000000000000090920460ff16151560e08401529354919314801592919091163314908290612b1e5750808015612b1c5750438360a0015163ffffffff16115b155b15612b55576040517ffbc0357800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80158015612b935750826060015173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614155b15612bca576040517ffbdb8e5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b4381612bde57612bdb6032826150a3565b90505b6000858152600760205260409020600201805463ffffffff808416640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff90921691909117909155612c3a90600590879061330216565b50600d5460408501516bffffffffffffffffffffffff7401000000000000000000000000000000000000000090920482169160009116821115612cb4576040860151612c86908361517a565b905085600001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff161115612cb4575084515b8551612cc190829061517a565b600088815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055601154612d15918391166150bb565b601180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9290921691909117905560405167ffffffffffffffff84169088907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a350505050505050565b6000805473ffffffffffffffffffffffffffffffffffffffff163314801590612dd3575060145473ffffffffffffffffffffffffffffffffffffffff163314155b15612e0a576040517fd48b678b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612e15600143615163565b600e5460408051924060208401523060601b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690830152640100000000900460e01b7fffffffff000000000000000000000000000000000000000000000000000000001660548201526058016040516020818303038152906040528051906020012060001c9050612ee281878787600088888080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201829052509250613317915050565b600e8054640100000000900463ffffffff16906004612f0083615260565b91906101000a81548163ffffffff021916908363ffffffff16021790555050807fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d0128686604051612f7892919063ffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a295945050505050565b73ffffffffffffffffffffffffffffffffffffffff828116600090815260086020526040902054163314612fe9576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8116331415613039576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff828116600090815260096020526040902054811690821614610d355773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45050565b6130f06130fc565b6130f9816138fb565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610780576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610b46565b806060015173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146131e6576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a081015163ffffffff908116146130f9576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6132326139f1565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390a1565b6132af613a5d565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a25861327d3390565b600061330e8383613aca565b90505b92915050565b61331f613a5d565b73ffffffffffffffffffffffffffffffffffffffff86163b61336d576040517f09ee12d500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc8563ffffffff16108061338e5750600e5463ffffffff908116908616115b156133c5576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604051806101000160405280846bffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018663ffffffff16815260200163ffffffff801681526020018773ffffffffffffffffffffffffffffffffffffffff1681526020018215158152506007600089815260200190815260200160002060008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160020160006101000a81548163ffffffff021916908363ffffffff16021790555060a08201518160020160046101000a81548163ffffffff021916908363ffffffff16021790555060c08201518160020160086101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060e082015181600201601c6101000a81548160ff021916908315150217905550905050826bffffffffffffffffffffffff1660125461365091906150a3565b6012556000878152600b60209081526040909120835161367292850190614346565b5061367e600588613bbd565b5050505050505050565b3215610780576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6137166040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b60008481526007602052604081206002015463ffffffff169080613738613bc9565b91509150600061374a84848489613dc4565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff909b168b5260208b0199909952978901969096526bffffffffffffffffffffffff9096166060880152608087019190915260a086015250505060c082015290565b8260e00151156137e6576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff16613848576040517fcfbacfd800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82516bffffffffffffffffffffffff16811115613891576040517f356680b700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16836020015173ffffffffffffffffffffffffffffffffffffffff16141561076b576040517f06bc104000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff811633141561397b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610b46565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60035460ff16610780576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f74207061757365640000000000000000000000006044820152606401610b46565b60035460ff1615610780576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a20706175736564000000000000000000000000000000006044820152606401610b46565b60008181526001830160205260408120548015613bb3576000613aee600183615163565b8554909150600090613b0290600190615163565b9050818114613b67576000866000018281548110613b2257613b22615311565b9060005260206000200154905080876000018481548110613b4557613b45615311565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080613b7857613b786152e2565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050613311565b6000915050613311565b600061330e8383614235565b6000806000600d600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b158015613c6057600080fd5b505afa158015613c74573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613c989190614b44565b509450909250849150508015613cbc5750613cb38242615163565b8463ffffffff16105b80613cc8575060008113155b15613cd757600f549550613cdb565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b158015613d4157600080fd5b505afa158015613d55573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613d799190614b44565b509450909250849150508015613d9d5750613d948242615163565b8463ffffffff16105b80613da9575060008113155b15613db8576010549450613dbc565b8094505b505050509091565b6040805161012081018252600d5463ffffffff80821683526401000000008083048216602085015268010000000000000000830462ffffff908116958501959095526b0100000000000000000000008304821660608501526f01000000000000000000000000000000830490941660808401527201000000000000000000000000000000000000820461ffff1660a08401819052740100000000000000000000000000000000000000009092046bffffffffffffffffffffffff1660c0840152600e5480821660e0850152939093049092166101008201526000918290613eab9087615126565b9050838015613eb95750803a105b15613ec157503a5b6000613eed7f0000000000000000000000000000000000000000000000000000000000000000896150a3565b613ef79083615126565b8351909150600090613f139063ffffffff16633b9aca006150a3565b9050600060027f00000000000000000000000000000000000000000000000000000000000000006002811115613f4b57613f4b6152b3565b1415614093576040805160008152602081019091528715613faa576000366040518060800160405280604881526020016153b560489139604051602001613f9493929190614c77565b6040516020818303038152906040529050613fc9565b60405180610140016040528061011081526020016153fd610110913990505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906349948e0e9061403b908490600401614e4a565b60206040518083038186803b15801561405357600080fd5b505afa158015614067573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061408b9190614ac2565b91505061416e565b60017f000000000000000000000000000000000000000000000000000000000000000060028111156140c7576140c76152b3565b141561416e577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b15801561413357600080fd5b505afa158015614147573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061416b9190614ac2565b90505b8661418a57808560a0015161ffff166141879190615126565b90505b6000856020015163ffffffff1664e8d4a510006141a79190615126565b89846141b385886150a3565b6141c190633b9aca00615126565b6141cb9190615126565b6141d591906150eb565b6141df91906150a3565b90506b033b2e3c9fd0803ce8000000811115614227576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b9a9950505050505050505050565b600081815260018301602052604081205461427c57508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155613311565b506000613311565b508054614290906151d3565b6000825580601f106142a0575050565b601f0160209004906000526020600020908101906130f991906143ba565b828054828255906000526020600020908101928215614336579160200282015b828111156143365781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8435161782556020909201916001909101906142de565b506143429291506143ba565b5090565b828054614352906151d3565b90600052602060002090601f0160209004810192826143745760008555614336565b82601f1061438d57805160ff1916838001178555614336565b82800160010185558215614336579182015b8281111561433657825182559160200191906001019061439f565b5b8082111561434257600081556001016143bb565b803573ffffffffffffffffffffffffffffffffffffffff811681146143f357600080fd5b919050565b60008083601f84011261440a57600080fd5b50813567ffffffffffffffff81111561442257600080fd5b6020830191508360208260051b850101111561443d57600080fd5b9250929050565b600082601f83011261445557600080fd5b8135602061446a61446583615039565b614fea565b80838252828201915082860187848660051b890101111561448a57600080fd5b60005b8581101561450a57813567ffffffffffffffff8111156144ac57600080fd5b8801603f81018a136144bd57600080fd5b8581013560406144cf6144658361505d565b8281528c828486010111156144e357600080fd5b828285018a830137600092810189019290925250855250928401929084019060010161448d565b5090979650505050505050565b600082601f83011261452857600080fd5b8135602061453861446583615039565b80838252828201915082860187848660081b890101111561455857600080fd5b6000805b868110156146125761010080848c031215614575578283fd5b61457d614fc0565b614586856146dd565b81526145938886016143cf565b8882015260406145a48187016146dd565b9082015260606145b58682016143cf565b9082015260806145c68682016146af565b9082015260a06145d78682016146af565b9082015260c06145e88682016143cf565b9082015260e0858101356145fb816153a6565b90820152865294860194929092019160010161455c565b509198975050505050505050565b60008083601f84011261463257600080fd5b50813567ffffffffffffffff81111561464a57600080fd5b60208301915083602082850101111561443d57600080fd5b600082601f83011261467357600080fd5b81516146816144658261505d565b81815284602083860101111561469657600080fd5b6146a78260208301602087016151a7565b949350505050565b803563ffffffff811681146143f357600080fd5b805169ffffffffffffffffffff811681146143f357600080fd5b80356bffffffffffffffffffffffff811681146143f357600080fd5b60006020828403121561470b57600080fd5b61330e826143cf565b6000806040838503121561472757600080fd5b614730836143cf565b915061473e602084016143cf565b90509250929050565b6000806040838503121561475a57600080fd5b614763836143cf565b915060208301356004811061477757600080fd5b809150509250929050565b60008060008060006080868803121561479a57600080fd5b6147a3866143cf565b94506147b1602087016146af565b93506147bf604087016143cf565b9250606086013567ffffffffffffffff8111156147db57600080fd5b6147e788828901614620565b969995985093965092949392505050565b6000806000806040858703121561480e57600080fd5b843567ffffffffffffffff8082111561482657600080fd5b614832888389016143f8565b9096509450602087013591508082111561484b57600080fd5b50614858878288016143f8565b95989497509550505050565b60008060006040848603121561487957600080fd5b833567ffffffffffffffff81111561489057600080fd5b61489c868287016143f8565b90945092506148af9050602085016143cf565b90509250925092565b6000806000606084860312156148cd57600080fd5b833567ffffffffffffffff808211156148e557600080fd5b818601915086601f8301126148f957600080fd5b8135602061490961446583615039565b8083825282820191508286018b848660051b890101111561492957600080fd5b600096505b8487101561494c57803583526001969096019591830191830161492e565b509750508701359250508082111561496357600080fd5b61496f87838801614517565b9350604086013591508082111561498557600080fd5b5061499286828701614444565b9150509250925092565b6000602082840312156149ae57600080fd5b81516149b9816153a6565b9392505050565b600080604083850312156149d357600080fd5b82516149de816153a6565b602084015190925067ffffffffffffffff8111156149fb57600080fd5b614a0785828601614662565b9150509250929050565b60008060208385031215614a2457600080fd5b823567ffffffffffffffff811115614a3b57600080fd5b614a4785828601614620565b90969095509350505050565b600060208284031215614a6557600080fd5b815167ffffffffffffffff811115614a7c57600080fd5b6146a784828501614662565b600060208284031215614a9a57600080fd5b8151600381106149b957600080fd5b600060208284031215614abb57600080fd5b5035919050565b600060208284031215614ad457600080fd5b5051919050565b60008060408385031215614aee57600080fd5b8235915061473e602084016143cf565b60008060408385031215614b1157600080fd5b8235915061473e602084016146af565b60008060408385031215614b3457600080fd5b8235915061473e602084016146dd565b600080600080600060a08688031215614b5c57600080fd5b614b65866146c3565b9450602086015193506040860151925060608601519150614b88608087016146c3565b90509295509295909350565b8183526000602080850194508260005b85811015614bdd5773ffffffffffffffffffffffffffffffffffffffff614bca836143cf565b1687529582019590820190600101614ba4565b509495945050505050565b6000815180845260208085019450848260051b860182860160005b8581101561450a578383038952614c1b838351614c2d565b98850198925090840190600101614c03565b60008151808452614c458160208601602086016151a7565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b828482376000838201600081528351614c948183602088016151a7565b0195945050505050565b60008251614cb08184602087016151a7565b9190910192915050565b604081526000614cce604083018688614b94565b8281036020840152614ce1818587614b94565b979650505050505050565b60006060808352858184015260807f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff871115614d2757600080fd5b8660051b808983870137808501905081810160008152602083878403018188015281895180845260a093508385019150828b01945060005b81811015614e265785516bffffffffffffffffffffffff80825116855273ffffffffffffffffffffffffffffffffffffffff868301511686860152604081818401511681870152505088810151614dcd8a86018273ffffffffffffffffffffffffffffffffffffffff169052565b508781015163ffffffff908116858a015286820151168685015260c08082015173ffffffffffffffffffffffffffffffffffffffff169085015260e0908101511515908401529483019461010090920191600101614d5f565b50508781036040890152614e3a818a614be8565b9c9b505050505050505050505050565b60208152600061330e6020830184614c2d565b60a081526000614e7060a0830188614c2d565b90508560208301528460408301528360608301528260808301529695505050505050565b600060208083526000845481600182811c915080831680614eb657607f831692505b858310811415614eed577f4e487b710000000000000000000000000000000000000000000000000000000085526022600452602485fd5b878601838152602001818015614f0a5760018114614f3957614f64565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00861682528782019650614f64565b60008b81526020902060005b86811015614f5e57815484820152908501908901614f45565b83019750505b50949998505050505050505050565b60208101614f808361536f565b91905290565b614f8f8461536f565b838152614f9b8361536f565b826020820152606060408201526000614fb76060830184614c2d565b95945050505050565b604051610100810167ffffffffffffffff81118282101715614fe457614fe4615340565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561503157615031615340565b604052919050565b600067ffffffffffffffff82111561505357615053615340565b5060051b60200190565b600067ffffffffffffffff82111561507757615077615340565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082198211156150b6576150b6615284565b500190565b60006bffffffffffffffffffffffff8083168185168083038211156150e2576150e2615284565b01949350505050565b600082615121577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561515e5761515e615284565b500290565b60008282101561517557615175615284565b500390565b60006bffffffffffffffffffffffff8381169083168181101561519f5761519f615284565b039392505050565b60005b838110156151c25781810151838201526020016151aa565b83811115611ce15750506000910152565b600181811c908216806151e757607f821691505b60208210811415615221577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561525957615259615284565b5060010190565b600063ffffffff8083168181141561527a5761527a615284565b6001019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600381106130f9577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b80151581146130f957600080fdfe3078666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666663078666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var KeeperRegistryLogicABI = KeeperRegistryLogicMetaData.ABI + +var KeeperRegistryLogicBin = KeeperRegistryLogicMetaData.Bin + +func DeployKeeperRegistryLogic(auth *bind.TransactOpts, backend bind.ContractBackend, paymentModel uint8, registryGasOverhead *big.Int, link common.Address, linkEthFeed common.Address, fastGasFeed common.Address) (common.Address, *types.Transaction, *KeeperRegistryLogic, error) { + parsed, err := KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryLogicBin), backend, paymentModel, registryGasOverhead, link, linkEthFeed, fastGasFeed) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryLogic{address: address, abi: *parsed, KeeperRegistryLogicCaller: KeeperRegistryLogicCaller{contract: contract}, KeeperRegistryLogicTransactor: KeeperRegistryLogicTransactor{contract: contract}, KeeperRegistryLogicFilterer: KeeperRegistryLogicFilterer{contract: contract}}, nil +} + +type KeeperRegistryLogic struct { + address common.Address + abi abi.ABI + KeeperRegistryLogicCaller + KeeperRegistryLogicTransactor + KeeperRegistryLogicFilterer +} + +type KeeperRegistryLogicCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicSession struct { + Contract *KeeperRegistryLogic + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicCallerSession struct { + Contract *KeeperRegistryLogicCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryLogicTransactorSession struct { + Contract *KeeperRegistryLogicTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicRaw struct { + Contract *KeeperRegistryLogic +} + +type KeeperRegistryLogicCallerRaw struct { + Contract *KeeperRegistryLogicCaller +} + +type KeeperRegistryLogicTransactorRaw struct { + Contract *KeeperRegistryLogicTransactor +} + +func NewKeeperRegistryLogic(address common.Address, backend bind.ContractBackend) (*KeeperRegistryLogic, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryLogicABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryLogic(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryLogic{address: address, abi: abi, KeeperRegistryLogicCaller: KeeperRegistryLogicCaller{contract: contract}, KeeperRegistryLogicTransactor: KeeperRegistryLogicTransactor{contract: contract}, KeeperRegistryLogicFilterer: KeeperRegistryLogicFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryLogicCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryLogicCaller, error) { + contract, err := bindKeeperRegistryLogic(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicCaller{contract: contract}, nil +} + +func NewKeeperRegistryLogicTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryLogicTransactor, error) { + contract, err := bindKeeperRegistryLogic(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicTransactor{contract: contract}, nil +} + +func NewKeeperRegistryLogicFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryLogicFilterer, error) { + contract, err := bindKeeperRegistryLogic(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFilterer{contract: contract}, nil +} + +func bindKeeperRegistryLogic(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogic.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) ARBNITROORACLE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "ARB_NITRO_ORACLE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) ARBNITROORACLE() (common.Address, error) { + return _KeeperRegistryLogic.Contract.ARBNITROORACLE(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) ARBNITROORACLE() (common.Address, error) { + return _KeeperRegistryLogic.Contract.ARBNITROORACLE(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) FASTGASFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "FAST_GAS_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistryLogic.Contract.FASTGASFEED(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistryLogic.Contract.FASTGASFEED(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) PLI() (common.Address, error) { + return _KeeperRegistryLogic.Contract.PLI(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) PLI() (common.Address, error) { + return _KeeperRegistryLogic.Contract.PLI(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistryLogic.Contract.PLIETHFEED(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistryLogic.Contract.PLIETHFEED(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) OPTIMISMORACLE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "OPTIMISM_ORACLE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) OPTIMISMORACLE() (common.Address, error) { + return _KeeperRegistryLogic.Contract.OPTIMISMORACLE(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) OPTIMISMORACLE() (common.Address, error) { + return _KeeperRegistryLogic.Contract.OPTIMISMORACLE(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) PAYMENTMODEL(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "PAYMENT_MODEL") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) PAYMENTMODEL() (uint8, error) { + return _KeeperRegistryLogic.Contract.PAYMENTMODEL(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) PAYMENTMODEL() (uint8, error) { + return _KeeperRegistryLogic.Contract.PAYMENTMODEL(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) REGISTRYGASOVERHEAD(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "REGISTRY_GAS_OVERHEAD") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) REGISTRYGASOVERHEAD() (*big.Int, error) { + return _KeeperRegistryLogic.Contract.REGISTRYGASOVERHEAD(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) REGISTRYGASOVERHEAD() (*big.Int, error) { + return _KeeperRegistryLogic.Contract.REGISTRYGASOVERHEAD(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Owner() (common.Address, error) { + return _KeeperRegistryLogic.Contract.Owner(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) Owner() (common.Address, error) { + return _KeeperRegistryLogic.Contract.Owner(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Paused() (bool, error) { + return _KeeperRegistryLogic.Contract.Paused(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) Paused() (bool, error) { + return _KeeperRegistryLogic.Contract.Paused(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptOwnership(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptOwnership(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptPayeeship", keeper) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptPayeeship(&_KeeperRegistryLogic.TransactOpts, keeper) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptPayeeship(&_KeeperRegistryLogic.TransactOpts, keeper) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AddFunds(&_KeeperRegistryLogic.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AddFunds(&_KeeperRegistryLogic.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CancelUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CancelUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "checkUpkeep", id, from) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CheckUpkeep(&_KeeperRegistryLogic.TransactOpts, id, from) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CheckUpkeep(&_KeeperRegistryLogic.TransactOpts, id, from) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.MigrateUpkeeps(&_KeeperRegistryLogic.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.MigrateUpkeeps(&_KeeperRegistryLogic.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "pause") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Pause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Pause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.ReceiveUpkeeps(&_KeeperRegistryLogic.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.ReceiveUpkeeps(&_KeeperRegistryLogic.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RecoverFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RecoverFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RegisterUpkeep(&_KeeperRegistryLogic.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RegisterUpkeep(&_KeeperRegistryLogic.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setKeepers", keepers, payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetKeepers(&_KeeperRegistryLogic.TransactOpts, keepers, payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetKeepers(&_KeeperRegistryLogic.TransactOpts, keepers, payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogic.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogic.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogic.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogic.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferOwnership(&_KeeperRegistryLogic.TransactOpts, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferOwnership(&_KeeperRegistryLogic.TransactOpts, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferPayeeship", keeper, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferPayeeship(&_KeeperRegistryLogic.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferPayeeship(&_KeeperRegistryLogic.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Unpause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Unpause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawFunds(&_KeeperRegistryLogic.TransactOpts, id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawFunds(&_KeeperRegistryLogic.TransactOpts, id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawPayment(&_KeeperRegistryLogic.TransactOpts, from, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawPayment(&_KeeperRegistryLogic.TransactOpts, from, to) +} + +type KeeperRegistryLogicConfigSetIterator struct { + Event *KeeperRegistryLogicConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicConfigSet struct { + Config Config + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryLogicConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicConfigSetIterator{contract: _KeeperRegistryLogic.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicConfigSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryLogicConfigSet, error) { + event := new(KeeperRegistryLogicConfigSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicFundsAddedIterator struct { + Event *KeeperRegistryLogicFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFundsAddedIterator{contract: _KeeperRegistryLogic.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicFundsAdded) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryLogicFundsAdded, error) { + event := new(KeeperRegistryLogicFundsAdded) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFundsWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicFundsWithdrawn, error) { + event := new(KeeperRegistryLogicFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicKeepersUpdatedIterator struct { + Event *KeeperRegistryLogicKeepersUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicKeepersUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicKeepersUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicKeepersUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicKeepersUpdated struct { + Keepers []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicKeepersUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicKeepersUpdatedIterator{contract: _KeeperRegistryLogic.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicKeepersUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicKeepersUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryLogicKeepersUpdated, error) { + event := new(KeeperRegistryLogicKeepersUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnerFundsWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryLogicOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnershipTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnershipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicOwnershipTransferRequested, error) { + event := new(KeeperRegistryLogicOwnershipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnershipTransferredIterator struct { + Event *KeeperRegistryLogicOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnershipTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnershipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicOwnershipTransferred, error) { + event := new(KeeperRegistryLogicOwnershipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPausedIterator struct { + Event *KeeperRegistryLogicPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicPausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPausedIterator{contract: _KeeperRegistryLogic.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePaused(log types.Log) (*KeeperRegistryLogicPaused, error) { + event := new(KeeperRegistryLogicPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryLogicPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPayeeshipTransferRequested struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferRequestedIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPayeeshipTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicPayeeshipTransferRequested, error) { + event := new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPayeeshipTransferredIterator struct { + Event *KeeperRegistryLogicPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPayeeshipTransferred struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferredIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPayeeshipTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPayeeshipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicPayeeshipTransferred, error) { + event := new(KeeperRegistryLogicPayeeshipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPaymentWithdrawnIterator struct { + Event *KeeperRegistryLogicPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPaymentWithdrawn struct { + Keeper common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicPaymentWithdrawnIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPaymentWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPaymentWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicPaymentWithdrawn, error) { + event := new(KeeperRegistryLogicPaymentWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUnpausedIterator struct { + Event *KeeperRegistryLogicUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUnpausedIterator{contract: _KeeperRegistryLogic.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryLogicUnpaused, error) { + event := new(KeeperRegistryLogicUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryLogicUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryLogicUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepAdminTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferred, error) { + event := new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepCanceledIterator struct { + Event *KeeperRegistryLogicUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepCanceledIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepCanceled) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicUpkeepCanceled, error) { + event := new(KeeperRegistryLogicUpkeepCanceled) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepCheckDataUpdatedIterator struct { + Event *KeeperRegistryLogicUpkeepCheckDataUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepCheckDataUpdated struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepCheckDataUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepCheckDataUpdatedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepCheckDataUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryLogicUpkeepCheckDataUpdated, error) { + event := new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryLogicUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepGasLimitSetIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicUpkeepGasLimitSet, error) { + event := new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepMigratedIterator struct { + Event *KeeperRegistryLogicUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepMigratedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepMigrated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicUpkeepMigrated, error) { + event := new(KeeperRegistryLogicUpkeepMigrated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepPausedIterator struct { + Event *KeeperRegistryLogicUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepPausedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicUpkeepPaused, error) { + event := new(KeeperRegistryLogicUpkeepPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepPerformedIterator struct { + Event *KeeperRegistryLogicUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepPerformed struct { + Id *big.Int + Success bool + From common.Address + Payment *big.Int + PerformData []byte + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryLogicUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepPerformedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepPerformed) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicUpkeepPerformed, error) { + event := new(KeeperRegistryLogicUpkeepPerformed) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepReceivedIterator struct { + Event *KeeperRegistryLogicUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepReceivedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepReceived) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicUpkeepReceived, error) { + event := new(KeeperRegistryLogicUpkeepReceived) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepRegisteredIterator struct { + Event *KeeperRegistryLogicUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepRegisteredIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepRegistered) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicUpkeepRegistered, error) { + event := new(KeeperRegistryLogicUpkeepRegistered) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepUnpausedIterator struct { + Event *KeeperRegistryLogicUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepUnpausedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicUpkeepUnpaused, error) { + event := new(KeeperRegistryLogicUpkeepUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogic) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryLogic.abi.Events["ConfigSet"].ID: + return _KeeperRegistryLogic.ParseConfigSet(log) + case _KeeperRegistryLogic.abi.Events["FundsAdded"].ID: + return _KeeperRegistryLogic.ParseFundsAdded(log) + case _KeeperRegistryLogic.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistryLogic.ParseFundsWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["KeepersUpdated"].ID: + return _KeeperRegistryLogic.ParseKeepersUpdated(log) + case _KeeperRegistryLogic.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistryLogic.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryLogic.ParseOwnershipTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryLogic.ParseOwnershipTransferred(log) + case _KeeperRegistryLogic.abi.Events["Paused"].ID: + return _KeeperRegistryLogic.ParsePaused(log) + case _KeeperRegistryLogic.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistryLogic.ParsePayeeshipTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistryLogic.ParsePayeeshipTransferred(log) + case _KeeperRegistryLogic.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistryLogic.ParsePaymentWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["Unpaused"].ID: + return _KeeperRegistryLogic.ParseUnpaused(log) + case _KeeperRegistryLogic.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistryLogic.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistryLogic.ParseUpkeepAdminTransferred(log) + case _KeeperRegistryLogic.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistryLogic.ParseUpkeepCanceled(log) + case _KeeperRegistryLogic.abi.Events["UpkeepCheckDataUpdated"].ID: + return _KeeperRegistryLogic.ParseUpkeepCheckDataUpdated(log) + case _KeeperRegistryLogic.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistryLogic.ParseUpkeepGasLimitSet(log) + case _KeeperRegistryLogic.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistryLogic.ParseUpkeepMigrated(log) + case _KeeperRegistryLogic.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistryLogic.ParseUpkeepPaused(log) + case _KeeperRegistryLogic.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistryLogic.ParseUpkeepPerformed(log) + case _KeeperRegistryLogic.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistryLogic.ParseUpkeepReceived(log) + case _KeeperRegistryLogic.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistryLogic.ParseUpkeepRegistered(log) + case _KeeperRegistryLogic.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistryLogic.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryLogicConfigSet) Topic() common.Hash { + return common.HexToHash("0xfe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de325") +} + +func (KeeperRegistryLogicFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryLogicFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryLogicKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryLogicOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryLogicOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryLogicOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryLogicPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryLogicPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryLogicPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryLogicPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryLogicUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryLogicUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryLogicUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryLogicUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryLogicUpkeepCheckDataUpdated) Topic() common.Hash { + return common.HexToHash("0x7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf") +} + +func (KeeperRegistryLogicUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryLogicUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryLogicUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryLogicUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryLogicUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryLogicUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryLogicUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogic) Address() common.Address { + return _KeeperRegistryLogic.address +} + +type KeeperRegistryLogicInterface interface { + ARBNITROORACLE(opts *bind.CallOpts) (common.Address, error) + + FASTGASFEED(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + OPTIMISMORACLE(opts *bind.CallOpts) (common.Address, error) + + PAYMENTMODEL(opts *bind.CallOpts) (uint8, error) + + REGISTRYGASOVERHEAD(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) + + SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryLogicConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryLogicConfigSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryLogicFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicFundsWithdrawn, error) + + FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicKeepersUpdatedIterator, error) + + WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicKeepersUpdated) (event.Subscription, error) + + ParseKeepersUpdated(log types.Log) (*KeeperRegistryLogicKeepersUpdated, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryLogicPaused, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicPaymentWithdrawn, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryLogicUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicUpkeepCanceled, error) + + FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepCheckDataUpdatedIterator, error) + + WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryLogicUpkeepCheckDataUpdated, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicUpkeepMigrated, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryLogicUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicUpkeepPerformed, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicUpkeepRegistered, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_logic2_0/keeper_registry_logic2_0.go b/core/gethwrappers/generated/keeper_registry_logic2_0/keeper_registry_logic2_0.go new file mode 100644 index 00000000..a89b5a42 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_logic2_0/keeper_registry_logic2_0.go @@ -0,0 +1,4526 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_logic2_0 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryLogicMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"enumKeeperRegistryBase2_0.Mode\",\"name\":\"mode\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnchainConfigNonEmpty\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumUpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMode\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_0.Mode\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase2_0.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"updateCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101006040523480156200001257600080fd5b50604051620060e0380380620060e08339810160408190526200003591620001ef565b838383833380600081620000905760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c357620000c38162000126565b505050836002811115620000db57620000db62000251565b60e0816002811115620000f257620000f262000251565b60f81b9052506001600160601b0319606093841b811660805291831b821660a05290911b1660c05250620002679350505050565b6001600160a01b038116331415620001815760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000087565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001ea57600080fd5b919050565b600080600080608085870312156200020657600080fd5b8451600381106200021657600080fd5b93506200022660208601620001d2565b92506200023660408601620001d2565b91506200024660608601620001d2565b905092959194509250565b634e487b7160e01b600052602160045260246000fd5b60805160601c60a05160601c60c05160601c60e05160f81c615dd86200030860003960008181610224015281816138850152818161394a01528181614668015261481f01526000818161026e015261428b0152600081816103b7015261437401526000818161041e01528181610f7e0152818161126301528181611bf4015281816122670152818161259c01528181612a830152612b160152615dd86000f3fe608060405234801561001057600080fd5b50600436106101da5760003560e01c80638dcf0fe711610104578063b121e147116100a2578063ca30e60311610071578063ca30e6031461041c578063eb5dcd6c14610442578063f2fde38b14610455578063f7d334ba1461046857600080fd5b8063b121e147146103db578063b148ab6b146103ee578063b79550be14610401578063c80480221461040957600080fd5b80639fab4386116100de5780639fab43861461037c578063a710b2211461038f578063a72aa27e146103a2578063b10b673c146103b557600080fd5b80638dcf0fe7146103435780638e86139b14610356578063948108f71461036957600080fd5b80636ded9eae1161017c5780638456cb591161014b5780638456cb59146102f757806385c1b0ba146102ff5780638765ecbe146103125780638da5cb5b1461032557600080fd5b80636ded9eae146102b3578063744bfe61146102d457806379ba5097146102e75780637d9b97e0146102ef57600080fd5b80633f4ba83a116101b85780633f4ba83a1461021a5780634b4fd03b146102225780635165f2f5146102595780636709d0e51461026c57600080fd5b8063187256e8146101df5780631a2af011146101f45780633b9cce5914610207575b600080fd5b6101f26101ed366004614f3c565b61048d565b005b6101f26102023660046152c3565b6104fe565b6101f2610215366004615019565b610652565b6101f26108a8565b7f00000000000000000000000000000000000000000000000000000000000000006040516102509190615897565b60405180910390f35b6101f26102673660046152aa565b61090e565b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610250565b6102c66102c1366004614f77565b610a88565b604051908152602001610250565b6101f26102e23660046152c3565b610c77565b6101f2611089565b6101f261118b565b6101f26112f5565b6101f261030d36600461505b565b611366565b6101f26103203660046152aa565b611c7f565b60005473ffffffffffffffffffffffffffffffffffffffff1661028e565b6101f26103513660046152e6565b611e06565b6101f261036436600461523f565b611e68565b6101f2610377366004615355565b6120a4565b6101f261038a3660046152e6565b612343565b6101f261039d366004614f09565b6123f2565b6101f26103b0366004615332565b61267d565b7f000000000000000000000000000000000000000000000000000000000000000061028e565b6101f26103e9366004614eee565b61275f565b6101f26103fc3660046152aa565b612857565b6101f2612a4a565b6101f26104173660046152aa565b612bb5565b7f000000000000000000000000000000000000000000000000000000000000000061028e565b6101f2610450366004614f09565b612f6a565b6101f2610463366004614eee565b6130c9565b61047b6104763660046152aa565b6130dd565b60405161025096959493929190615708565b610495613734565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260166020526040902080548291907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660018360038111156104f5576104f5615cb9565b02179055505050565b610507826137b7565b73ffffffffffffffffffffffffffffffffffffffff8116331415610557576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff81166105a4576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff82811691161461064e5760008281526006602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff851690811790915590519091339185917fb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b3591a45b5050565b61065a613734565b600b548114610695576040517fcf54c06a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b600b54811015610867576000600b82815481106106b7576106b7615d17565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff908116808452600c9092526040832054919350169085858581811061070157610701615d17565b90506020020160208101906107169190614eee565b905073ffffffffffffffffffffffffffffffffffffffff811615806107a9575073ffffffffffffffffffffffffffffffffffffffff82161580159061078757508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b80156107a9575073ffffffffffffffffffffffffffffffffffffffff81811614155b156107e0576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff818116146108515773ffffffffffffffffffffffffffffffffffffffff8381166000908152600c6020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169183169190911790555b505050808061085f90615bfe565b915050610698565b507fa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725600b838360405161089c9392919061550e565b60405180910390a15050565b6108b0613734565b600f80547fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff1690556040513381527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020015b60405180910390a1565b610917816137b7565b600081815260046020908152604091829020825160e081018452815463ffffffff8082168352640100000000820481169483019490945268010000000000000000810460ff1615159482018590526901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c082015290610a19576040517f1b88a78400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff169055610a5860028361386a565b5060405182907f7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a4745690600090a25050565b6000805473ffffffffffffffffffffffffffffffffffffffff163314801590610ad957506011546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314155b15610b10576040517fd48b678b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610b2c6001610b1d61387f565b610b279190615b36565b613944565b601254604080516020810193909352309083015268010000000000000000900463ffffffff1660608201526080016040516020818303038152906040528051906020012060001c9050610bba8189898960008a8a8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201829052509250613ac4915050565b6012805468010000000000000000900463ffffffff16906008610bdc83615c37565b825463ffffffff9182166101009390930a9283029190920219909116179055506000818152601760205260409020610c15908484614a12565b506040805163ffffffff8916815273ffffffffffffffffffffffffffffffffffffffff8816602082015282917fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012910160405180910390a2979650505050505050565b600f546f01000000000000000000000000000000900460ff1615610cc7576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600f80547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff166f0100000000000000000000000000000017905573ffffffffffffffffffffffffffffffffffffffff8116610d4e576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600460209081526040808320815160e081018352815463ffffffff8082168352640100000000820481168387015260ff6801000000000000000083041615158386015273ffffffffffffffffffffffffffffffffffffffff6901000000000000000000909204821660608401526001909301546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a08401527801000000000000000000000000000000000000000000000000900490921660c082015286855260059093529220549091163314610e5b576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610e6361387f565b816020015163ffffffff161115610ea6576040517fff84e5dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600460205260409020600101546015546c010000000000000000000000009091046bffffffffffffffffffffffff1690610ee6908290615b36565b60155560008481526004602081905260409182902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff16905590517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff858116928201929092526bffffffffffffffffffffffff831660248201527f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b158015610fc457600080fd5b505af1158015610fd8573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ffc91906151b8565b50604080516bffffffffffffffffffffffff8316815273ffffffffffffffffffffffffffffffffffffffff8516602082015285917ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318910160405180910390a25050600f80547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff1690555050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461110f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b611193613734565b6011546015546bffffffffffffffffffffffff909116906111b5908290615b36565b601555601180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690556040516bffffffffffffffffffffffff821681527f1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f19060200160405180910390a16040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff821660248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044015b602060405180830381600087803b1580156112bd57600080fd5b505af11580156112d1573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061064e91906151b8565b6112fd613734565b600f80547fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff166e0100000000000000000000000000001790556040513381527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a25890602001610904565b600173ffffffffffffffffffffffffffffffffffffffff821660009081526016602052604090205460ff1660038111156113a2576113a2615cb9565b141580156113ea5750600373ffffffffffffffffffffffffffffffffffffffff821660009081526016602052604090205460ff1660038111156113e7576113e7615cb9565b14155b15611421576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6010546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff16611480576040517fd12d7d8d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b816114b7576040517f2c2fc94100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c081018290526000808567ffffffffffffffff81111561150b5761150b615d46565b60405190808252806020026020018201604052801561153e57816020015b60608152602001906001900390816115295790505b50905060008667ffffffffffffffff81111561155c5761155c615d46565b604051908082528060200260200182016040528015611585578160200160208202803683370190505b50905060008767ffffffffffffffff8111156115a3576115a3615d46565b60405190808252806020026020018201604052801561162857816020015b6040805160e08101825260008082526020808301829052928201819052606082018190526080820181905260a0820181905260c082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9092019101816115c15790505b50905060005b888110156119b35789898281811061164857611648615d17565b60209081029290920135600081815260048452604090819020815160e081018352815463ffffffff8082168352640100000000820481169783019790975268010000000000000000810460ff16151593820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490931660c0840152985090965061172a9050876137b7565b8582828151811061173d5761173d615d17565b602002602001018190525060076000888152602001908152602001600020805461176690615baa565b80601f016020809104026020016040519081016040528092919081815260200182805461179290615baa565b80156117df5780601f106117b4576101008083540402835291602001916117df565b820191906000526020600020905b8154815290600101906020018083116117c257829003601f168201915b50505050508482815181106117f6576117f6615d17565b60200260200101819052506005600088815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061184757611847615d17565b73ffffffffffffffffffffffffffffffffffffffff9092166020928302919091019091015260a0860151611889906bffffffffffffffffffffffff16866159f6565b600088815260046020908152604080832080547fffffff000000000000000000000000000000000000000000000000000000000016815560010180547fffffffff00000000000000000000000000000000000000000000000000000000169055600790915281209196506118fd9190614ab4565b600087815260066020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016905561193c600288613f07565b5060a0860151604080516bffffffffffffffffffffffff909216825273ffffffffffffffffffffffffffffffffffffffff8a16602083015288917fb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff910160405180910390a2806119ab81615bfe565b91505061162e565b50836015546119c29190615b36565b6015556040516000906119e1908b908b908590889088906020016155be565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905260105490915073ffffffffffffffffffffffffffffffffffffffff808a1691638e86139b916c010000000000000000000000009091041663c71249ab60028c73ffffffffffffffffffffffffffffffffffffffff1663aab9edd66040518163ffffffff1660e01b815260040160206040518083038186803b158015611a9357600080fd5b505afa158015611aa7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611acb91906153c8565b866040518463ffffffff1660e01b8152600401611aea939291906158e6565b60006040518083038186803b158015611b0257600080fd5b505afa158015611b16573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611b5c9190810190615275565b6040518263ffffffff1660e01b8152600401611b7891906157a5565b600060405180830381600087803b158015611b9257600080fd5b505af1158015611ba6573d6000803e3d6000fd5b50506040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8b81166004830152602482018990527f000000000000000000000000000000000000000000000000000000000000000016925063a9059cbb9150604401602060405180830381600087803b158015611c3a57600080fd5b505af1158015611c4e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611c7291906151b8565b5050505050505050505050565b611c88816137b7565b600081815260046020908152604091829020825160e081018452815463ffffffff8082168352640100000000820481169483019490945268010000000000000000810460ff16158015958301959095526901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c082015290611d8c576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff1668010000000000000000179055611dd6600283613f07565b5060405182907f8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f90600090a25050565b611e0f836137b7565b6000838152601760205260409020611e28908383614a12565b50827f3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf48508383604051611e5b929190615758565b60405180910390a2505050565b60023360009081526016602052604090205460ff166003811115611e8e57611e8e615cb9565b14158015611ec0575060033360009081526016602052604090205460ff166003811115611ebd57611ebd615cb9565b14155b15611ef7576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000808080611f08858701876150af565b935093509350935060005b845181101561209b57611fea858281518110611f3157611f31615d17565b6020026020010151858381518110611f4b57611f4b615d17565b602002602001015160600151868481518110611f6957611f69615d17565b602002602001015160000151858581518110611f8757611f87615d17565b6020026020010151888681518110611fa157611fa1615d17565b602002602001015160a00151888781518110611fbf57611fbf615d17565b60200260200101518a8881518110611fd957611fd9615d17565b602002602001015160400151613ac4565b848181518110611ffc57611ffc615d17565b60200260200101517f74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a7185838151811061203757612037615d17565b602002602001015160a00151336040516120819291906bffffffffffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a28061209381615bfe565b915050611f13565b50505050505050565b600082815260046020908152604091829020825160e081018452815463ffffffff80821683526401000000008204811694830185905268010000000000000000820460ff161515958301959095526901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004831660c082015291146121a6576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b818160a001516121b69190615a33565b600084815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff9384160217905560155461221c918416906159f6565b6015556040517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff831660448201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906323b872dd90606401602060405180830381600087803b1580156122c057600080fd5b505af11580156122d4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906122f891906151b8565b506040516bffffffffffffffffffffffff83168152339084907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a3505050565b61234c836137b7565b60125474010000000000000000000000000000000000000000900463ffffffff168111156123a6576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526007602052604090206123bf908383614a12565b50827f7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf8383604051611e5b929190615758565b73ffffffffffffffffffffffffffffffffffffffff811661243f576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600c602052604090205416331461249f576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600f54600b546000916124d691859170010000000000000000000000000000000090046bffffffffffffffffffffffff1690613f13565b73ffffffffffffffffffffffffffffffffffffffff8416600090815260086020526040902080547fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff169055601554909150612540906bffffffffffffffffffffffff831690615b36565b6015556040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301526bffffffffffffffffffffffff831660248301527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90604401602060405180830381600087803b1580156125e057600080fd5b505af11580156125f4573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061261891906151b8565b5060405133815273ffffffffffffffffffffffffffffffffffffffff808416916bffffffffffffffffffffffff8416918616907f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f406989060200160405180910390a4505050565b6108fc8163ffffffff1610806126a6575060125463ffffffff6401000000009091048116908216115b156126dd576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6126e6826137b7565b60008281526004602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff8516908117909155915191825283917fc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c910160405180910390a25050565b73ffffffffffffffffffffffffffffffffffffffff8181166000908152600d60205260409020541633146127bf576040517f6752e7aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8181166000818152600c602090815260408083208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217909355600d909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260046020908152604091829020825160e081018452815463ffffffff80821683526401000000008204811694830185905268010000000000000000820460ff161515958301959095526901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004831660c08201529114612959576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff1633146129b6576040517f6352a85300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526005602090815260408083208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821790935560069094528285208054909216909155905173ffffffffffffffffffffffffffffffffffffffff90911692839186917f5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c91a4505050565b612a52613734565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b158015612ada57600080fd5b505afa158015612aee573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612b129190615226565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb3360155484612b5f9190615b36565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff909216600483015260248201526044016112a3565b6000818152600460209081526040808320815160e081018352815463ffffffff80821683526401000000008204811695830186905260ff6801000000000000000083041615159483019490945273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004821660c08201529291141590612ca360005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16149050818015612cfa5750808015612cf85750612ceb61387f565b836020015163ffffffff16115b155b15612d31576040517ffbc0357800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80158015612d63575060008481526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314155b15612d9a576040517ffbdb8e5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612da461387f565b905081612db957612db66032826159f6565b90505b6000858152600460205260409020805463ffffffff808416640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff90921691909117909155612e12906002908790613f0716565b5060105460808501516bffffffffffffffffffffffff9182169160009116821115612e77576080860151612e469083615b4d565b90508560a001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff161115612e77575060a08501515b808660a00151612e879190615b4d565b600088815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff93841602179055601154612eed91839116615a33565b601180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9290921691909117905560405167ffffffffffffffff84169088907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a350505050505050565b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600c6020526040902054163314612fca576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff811633141561301a576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600d602052604090205481169082161461064e5773ffffffffffffffffffffffffffffffffffffffff8281166000818152600d602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45050565b6130d1613734565b6130da8161413a565b50565b600060606000806000806130ef614230565b6000600f604051806101200160405290816000820160009054906101000a900460ff1660ff1660ff1681526020016000820160019054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160059054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160099054906101000a900462ffffff1662ffffff1662ffffff16815260200160008201600c9054906101000a900461ffff1661ffff1661ffff16815260200160008201600e9054906101000a900460ff1615151515815260200160008201600f9054906101000a900460ff161515151581526020016000820160109054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160008201601c9054906101000a900463ffffffff1663ffffffff1663ffffffff168152505090506000600460008a81526020019081526020016000206040518060e00160405290816000820160009054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160049054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160089054906101000a900460ff161515151581526020016000820160099054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020016001820160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160018201600c9054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff1681526020016001820160189054906101000a900463ffffffff1663ffffffff1663ffffffff1681525050905063ffffffff8016816020015163ffffffff161461340457505060408051602081019091526000808252965094506001935085915081905061372b565b80604001511561343357505060408051602081019091526000808252965094506002935085915081905061372b565b61343c82614268565b825160125492965090945060009161347a9185917801000000000000000000000000000000000000000000000000900463ffffffff16888886614464565b9050806bffffffffffffffffffffffff168260a001516bffffffffffffffffffffffff1610156134c657600060405180602001604052806000815250600698509850985050505061372b565b5a60008b815260076020526040808220905192985090917f6e04ff0d000000000000000000000000000000000000000000000000000000009161350b916024016157b8565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931790925260608501516012549251919350600092839273ffffffffffffffffffffffffffffffffffffffff9092169163ffffffff909116906135c79086906154f2565b60006040518083038160008787f1925050503d8060008114613605576040519150601f19603f3d011682016040523d82523d6000602084013e61360a565b606091505b50915091505a61361a908a615b36565b98508161362a57600399506136c0565b8080602001905181019061363e91906151d5565b909c5090508b61366d5760006040518060200160405280600081525060049b509b509b5050505050505061372b565b6012548151780100000000000000000000000000000000000000000000000090910463ffffffff1610156136c05760006040518060200160405280600081525060059b509b509b5050505050505061372b565b604051806060016040528060016136d561387f565b6136df9190615b36565b63ffffffff1681526020016136f76001610b1d61387f565b81526020018281525060405160200161371091906158b1565b6040516020818303038152906040529a50819b505050505050505b91939550919395565b60005473ffffffffffffffffffffffffffffffffffffffff1633146137b5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401611106565b565b60008181526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314613814576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081815260046020526040902054640100000000900463ffffffff908116146130da576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061387683836144ad565b90505b92915050565b600060017f000000000000000000000000000000000000000000000000000000000000000060028111156138b5576138b5615cb9565b141561393f57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561390257600080fd5b505afa158015613916573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061393a9190615226565b905090565b504390565b600060017f0000000000000000000000000000000000000000000000000000000000000000600281111561397a5761397a615cb9565b1415613aba576000606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b1580156139c957600080fd5b505afa1580156139dd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613a019190615226565b90508083101580613a1c5750610100613a1a8483615b36565b115b15613a2a5750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815260048101849052606490632b407a829060240160206040518083038186803b158015613a7b57600080fd5b505afa158015613a8f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613ab39190615226565b9392505050565b504090565b919050565b600f546e010000000000000000000000000000900460ff1615613b13576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff86163b613b61576040517f09ee12d500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60125482517401000000000000000000000000000000000000000090910463ffffffff161015613bbd576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc8563ffffffff161080613be6575060125463ffffffff6401000000009091048116908616115b15613c1d576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000878152600460205260409020546901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1615613c86576040517f6e3b930b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040518060e001604052808663ffffffff16815260200163ffffffff8016815260200182151581526020018773ffffffffffffffffffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff168152602001846bffffffffffffffffffffffff168152602001600063ffffffff168152506004600089815260200190815260200160002060008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548160ff02191690831515021790555060608201518160000160096101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060a082015181600101600c6101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060c08201518160010160186101000a81548163ffffffff021916908363ffffffff160217905550905050836005600089815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550826bffffffffffffffffffffffff16601554613ecf91906159f6565b60155560008781526007602090815260409091208351613ef192850190614aee565b50613efd60028861386a565b5050505050505050565b600061387683836144fc565b73ffffffffffffffffffffffffffffffffffffffff831660009081526008602090815260408083208151608081018352905460ff80821615158352610100820416938201939093526bffffffffffffffffffffffff6201000084048116928201929092526e01000000000000000000000000000090920416606082018190528290613f9e9086615b4d565b90506000613fac8583615a77565b90508083604001818151613fc09190615a33565b6bffffffffffffffffffffffff9081169091528716606085015250613fe58582615b0b565b613fef9083615b4d565b6011805460009061400f9084906bffffffffffffffffffffffff16615a33565b825461010092830a6bffffffffffffffffffffffff81810219909216928216029190911790925573ffffffffffffffffffffffffffffffffffffffff999099166000908152600860209081526040918290208751815492890151938901516060909901517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009093169015157fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff161760ff909316909b02919091177fffffffffffff000000000000000000000000000000000000000000000000ffff1662010000878416027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff16176e010000000000000000000000000000919092160217909755509095945050505050565b73ffffffffffffffffffffffffffffffffffffffff81163314156141ba576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401611106565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b32156137b5576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000806000836060015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156142ef57600080fd5b505afa158015614303573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906143279190615378565b509450909250505060008113158061433e57508142105b8061435f575082801561435f57506143568242615b36565b8463ffffffff16105b1561436e576013549550614372565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156143d857600080fd5b505afa1580156143ec573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906144109190615378565b509450909250505060008113158061442757508142105b806144485750828015614448575061443f8242615b36565b8463ffffffff16105b1561445757601454945061445b565b8094505b50505050915091565b6000806144758689600001516145ef565b90506000806144908a8a63ffffffff16858a8a60018b614633565b909250905061449f8183615a33565b9a9950505050505050505050565b60008181526001830160205260408120546144f457508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155613879565b506000613879565b600081815260018301602052604081205480156145e5576000614520600183615b36565b855490915060009061453490600190615b36565b905081811461459957600086600001828154811061455457614554615d17565b906000526020600020015490508087600001848154811061457757614577615d17565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806145aa576145aa615ce8565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050613879565b6000915050613879565b600061460263ffffffff84166014615aa2565b61460d836001615a0e565b61461c9060ff16611d4c615aa2565b61462990620111706159f6565b61387691906159f6565b6000806000896080015161ffff168761464c9190615aa2565b905083801561465a5750803a105b1561466257503a5b600060027f0000000000000000000000000000000000000000000000000000000000000000600281111561469857614698615cb9565b141561481b5760408051600081526020810190915285156146f757600036604051806080016040528060488152602001615d84604891396040516020016146e1939291906154cb565b6040516020818303038152906040529050614773565b601254614727907801000000000000000000000000000000000000000000000000900463ffffffff166004615adf565b63ffffffff1667ffffffffffffffff81111561474557614745615d46565b6040519080825280601f01601f19166020018201604052801561476f576020820181803683370190505b5090505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273420000000000000000000000000000000000000f906349948e0e906147c39084906004016157a5565b60206040518083038186803b1580156147db57600080fd5b505afa1580156147ef573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906148139190615226565b9150506148d7565b60017f0000000000000000000000000000000000000000000000000000000000000000600281111561484f5761484f615cb9565b14156148d757606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b15801561489c57600080fd5b505afa1580156148b0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906148d49190615226565b90505b846148f357808b6080015161ffff166148f09190615aa2565b90505b61490161ffff871682615a63565b9050600087826149118c8e6159f6565b61491b9086615aa2565b61492591906159f6565b61493790670de0b6b3a7640000615aa2565b6149419190615a63565b905060008c6040015163ffffffff1664e8d4a510006149609190615aa2565b898e6020015163ffffffff16858f886149799190615aa2565b61498391906159f6565b61499190633b9aca00615aa2565b61499b9190615aa2565b6149a59190615a63565b6149af91906159f6565b90506b033b2e3c9fd0803ce80000006149c882846159f6565b1115614a00576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909c909b509950505050505050505050565b828054614a1e90615baa565b90600052602060002090601f016020900481019282614a405760008555614aa4565b82601f10614a77578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555614aa4565b82800160010185558215614aa4579182015b82811115614aa4578235825591602001919060010190614a89565b50614ab0929150614b62565b5090565b508054614ac090615baa565b6000825580601f10614ad0575050565b601f0160209004906000526020600020908101906130da9190614b62565b828054614afa90615baa565b90600052602060002090601f016020900481019282614b1c5760008555614aa4565b82601f10614b3557805160ff1916838001178555614aa4565b82800160010185558215614aa4579182015b82811115614aa4578251825591602001919060010190614b47565b5b80821115614ab05760008155600101614b63565b803573ffffffffffffffffffffffffffffffffffffffff81168114613abf57600080fd5b60008083601f840112614bad57600080fd5b50813567ffffffffffffffff811115614bc557600080fd5b6020830191508360208260051b8501011115614be057600080fd5b9250929050565b600082601f830112614bf857600080fd5b81356020614c0d614c088361598c565b61593d565b80838252828201915082860187848660051b8901011115614c2d57600080fd5b60005b85811015614c5357614c4182614b77565b84529284019290840190600101614c30565b5090979650505050505050565b600082601f830112614c7157600080fd5b81356020614c81614c088361598c565b80838252828201915082860187848660051b8901011115614ca157600080fd5b60005b85811015614c5357813567ffffffffffffffff811115614cc357600080fd5b8801603f81018a13614cd457600080fd5b858101356040614ce6614c08836159b0565b8281528c82848601011115614cfa57600080fd5b828285018a8301376000928101890192909252508552509284019290840190600101614ca4565b600082601f830112614d3257600080fd5b81356020614d42614c088361598c565b8281528181019085830160e080860288018501891015614d6157600080fd5b6000805b87811015614e065782848c031215614d7b578182fd5b614d83615914565b614d8c85614ea4565b8152614d99888601614ea4565b88820152604080860135614dac81615d75565b908201526060614dbd868201614b77565b908201526080614dce868201614ed2565b9082015260a0614ddf868201614ed2565b9082015260c0614df0868201614ea4565b9082015286529486019492820192600101614d65565b50929998505050505050505050565b60008083601f840112614e2757600080fd5b50813567ffffffffffffffff811115614e3f57600080fd5b602083019150836020828501011115614be057600080fd5b600082601f830112614e6857600080fd5b8151614e76614c08826159b0565b818152846020838601011115614e8b57600080fd5b614e9c826020830160208701615b7a565b949350505050565b803563ffffffff81168114613abf57600080fd5b805169ffffffffffffffffffff81168114613abf57600080fd5b80356bffffffffffffffffffffffff81168114613abf57600080fd5b600060208284031215614f0057600080fd5b61387682614b77565b60008060408385031215614f1c57600080fd5b614f2583614b77565b9150614f3360208401614b77565b90509250929050565b60008060408385031215614f4f57600080fd5b614f5883614b77565b9150602083013560048110614f6c57600080fd5b809150509250929050565b600080600080600080600060a0888a031215614f9257600080fd5b614f9b88614b77565b9650614fa960208901614ea4565b9550614fb760408901614b77565b9450606088013567ffffffffffffffff80821115614fd457600080fd5b614fe08b838c01614e15565b909650945060808a0135915080821115614ff957600080fd5b506150068a828b01614e15565b989b979a50959850939692959293505050565b6000806020838503121561502c57600080fd5b823567ffffffffffffffff81111561504357600080fd5b61504f85828601614b9b565b90969095509350505050565b60008060006040848603121561507057600080fd5b833567ffffffffffffffff81111561508757600080fd5b61509386828701614b9b565b90945092506150a6905060208501614b77565b90509250925092565b600080600080608085870312156150c557600080fd5b843567ffffffffffffffff808211156150dd57600080fd5b818701915087601f8301126150f157600080fd5b81356020615101614c088361598c565b8083825282820191508286018c848660051b890101111561512157600080fd5b600096505b84871015615144578035835260019690960195918301918301615126565b509850508801359250508082111561515b57600080fd5b61516788838901614d21565b9450604087013591508082111561517d57600080fd5b61518988838901614c60565b9350606087013591508082111561519f57600080fd5b506151ac87828801614be7565b91505092959194509250565b6000602082840312156151ca57600080fd5b8151613ab381615d75565b600080604083850312156151e857600080fd5b82516151f381615d75565b602084015190925067ffffffffffffffff81111561521057600080fd5b61521c85828601614e57565b9150509250929050565b60006020828403121561523857600080fd5b5051919050565b6000806020838503121561525257600080fd5b823567ffffffffffffffff81111561526957600080fd5b61504f85828601614e15565b60006020828403121561528757600080fd5b815167ffffffffffffffff81111561529e57600080fd5b614e9c84828501614e57565b6000602082840312156152bc57600080fd5b5035919050565b600080604083850312156152d657600080fd5b82359150614f3360208401614b77565b6000806000604084860312156152fb57600080fd5b83359250602084013567ffffffffffffffff81111561531957600080fd5b61532586828701614e15565b9497909650939450505050565b6000806040838503121561534557600080fd5b82359150614f3360208401614ea4565b6000806040838503121561536857600080fd5b82359150614f3360208401614ed2565b600080600080600060a0868803121561539057600080fd5b61539986614eb8565b94506020860151935060408601519250606086015191506153bc60808701614eb8565b90509295509295909350565b6000602082840312156153da57600080fd5b815160ff81168114613ab357600080fd5b600081518084526020808501945080840160005b8381101561543157815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016153ff565b509495945050505050565b6000815180845260208085019450848260051b860182860160005b85811015614c5357838303895261546f838351615481565b98850198925090840190600101615457565b60008151808452615499816020860160208601615b7a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8284823760008382016000815283516154e8818360208801615b7a565b0195945050505050565b60008251615504818460208701615b7a565b9190910192915050565b6000604082016040835280865480835260608501915087600052602092508260002060005b8281101561556557815473ffffffffffffffffffffffffffffffffffffffff1684529284019260019182019101615533565b505050838103828501528481528590820160005b868110156155b25773ffffffffffffffffffffffffffffffffffffffff61559f84614b77565b1682529183019190830190600101615579565b50979650505050505050565b60006080808352868184015260a07f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8811156155f957600080fd5b8760051b808a838701378085019050818101600081526020838784030181880152818a5180845260c093508385019150828c01945060005b818110156156cf578551805163ffffffff908116855285820151168585015260408082015115159085015260608082015173ffffffffffffffffffffffffffffffffffffffff1690850152888101516bffffffffffffffffffffffff1689850152878101516156af898601826bffffffffffffffffffffffff169052565b5085015163ffffffff16838601529483019460e090920191600101615631565b505087810360408901526156e3818b61543c565b9550505050505082810360608401526156fc81856153eb565b98975050505050505050565b861515815260c06020820152600061572360c0830188615481565b90506007861061573557615735615cb9565b8560408301528460608301528360808301528260a0830152979650505050505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b6020815260006138766020830184615481565b600060208083526000845481600182811c9150808316806157da57607f831692505b858310811415615811577f4e487b710000000000000000000000000000000000000000000000000000000085526022600452602485fd5b87860183815260200181801561582e576001811461585d57615888565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00861682528782019650615888565b60008b81526020902060005b8681101561588257815484820152908501908901615869565b83019750505b50949998505050505050505050565b60208101600383106158ab576158ab615cb9565b91905290565b6020815263ffffffff82511660208201526020820151604082015260006040830151606080840152614e9c6080840182615481565b60ff8416815260ff8316602082015260606040820152600061590b6060830184615481565b95945050505050565b60405160e0810167ffffffffffffffff8111828210171561593757615937615d46565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561598457615984615d46565b604052919050565b600067ffffffffffffffff8211156159a6576159a6615d46565b5060051b60200190565b600067ffffffffffffffff8211156159ca576159ca615d46565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b60008219821115615a0957615a09615c5b565b500190565b600060ff821660ff84168060ff03821115615a2b57615a2b615c5b565b019392505050565b60006bffffffffffffffffffffffff808316818516808303821115615a5a57615a5a615c5b565b01949350505050565b600082615a7257615a72615c8a565b500490565b60006bffffffffffffffffffffffff80841680615a9657615a96615c8a565b92169190910492915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615615ada57615ada615c5b565b500290565b600063ffffffff80831681851681830481118215151615615b0257615b02615c5b565b02949350505050565b60006bffffffffffffffffffffffff80831681851681830481118215151615615b0257615b02615c5b565b600082821015615b4857615b48615c5b565b500390565b60006bffffffffffffffffffffffff83811690831681811015615b7257615b72615c5b565b039392505050565b60005b83811015615b95578181015183820152602001615b7d565b83811115615ba4576000848401525b50505050565b600181811c90821680615bbe57607f821691505b60208210811415615bf8577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615c3057615c30615c5b565b5060010190565b600063ffffffff80831681811415615c5157615c51615c5b565b6001019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b80151581146130da57600080fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var KeeperRegistryLogicABI = KeeperRegistryLogicMetaData.ABI + +var KeeperRegistryLogicBin = KeeperRegistryLogicMetaData.Bin + +func DeployKeeperRegistryLogic(auth *bind.TransactOpts, backend bind.ContractBackend, mode uint8, link common.Address, linkNativeFeed common.Address, fastGasFeed common.Address) (common.Address, *types.Transaction, *KeeperRegistryLogic, error) { + parsed, err := KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryLogicBin), backend, mode, link, linkNativeFeed, fastGasFeed) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryLogic{address: address, abi: *parsed, KeeperRegistryLogicCaller: KeeperRegistryLogicCaller{contract: contract}, KeeperRegistryLogicTransactor: KeeperRegistryLogicTransactor{contract: contract}, KeeperRegistryLogicFilterer: KeeperRegistryLogicFilterer{contract: contract}}, nil +} + +type KeeperRegistryLogic struct { + address common.Address + abi abi.ABI + KeeperRegistryLogicCaller + KeeperRegistryLogicTransactor + KeeperRegistryLogicFilterer +} + +type KeeperRegistryLogicCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicSession struct { + Contract *KeeperRegistryLogic + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicCallerSession struct { + Contract *KeeperRegistryLogicCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryLogicTransactorSession struct { + Contract *KeeperRegistryLogicTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicRaw struct { + Contract *KeeperRegistryLogic +} + +type KeeperRegistryLogicCallerRaw struct { + Contract *KeeperRegistryLogicCaller +} + +type KeeperRegistryLogicTransactorRaw struct { + Contract *KeeperRegistryLogicTransactor +} + +func NewKeeperRegistryLogic(address common.Address, backend bind.ContractBackend) (*KeeperRegistryLogic, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryLogicABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryLogic(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryLogic{address: address, abi: abi, KeeperRegistryLogicCaller: KeeperRegistryLogicCaller{contract: contract}, KeeperRegistryLogicTransactor: KeeperRegistryLogicTransactor{contract: contract}, KeeperRegistryLogicFilterer: KeeperRegistryLogicFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryLogicCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryLogicCaller, error) { + contract, err := bindKeeperRegistryLogic(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicCaller{contract: contract}, nil +} + +func NewKeeperRegistryLogicTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryLogicTransactor, error) { + contract, err := bindKeeperRegistryLogic(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicTransactor{contract: contract}, nil +} + +func NewKeeperRegistryLogicFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryLogicFilterer, error) { + contract, err := bindKeeperRegistryLogic(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFilterer{contract: contract}, nil +} + +func bindKeeperRegistryLogic(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryLogicMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.KeeperRegistryLogicTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogic.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetFastGasFeedAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetFastGasFeedAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetLinkAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetLinkAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetLinkNativeFeedAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistryLogic.Contract.GetLinkNativeFeedAddress(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) GetMode(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "getMode") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) GetMode() (uint8, error) { + return _KeeperRegistryLogic.Contract.GetMode(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) GetMode() (uint8, error) { + return _KeeperRegistryLogic.Contract.GetMode(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogic.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Owner() (common.Address, error) { + return _KeeperRegistryLogic.Contract.Owner(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicCallerSession) Owner() (common.Address, error) { + return _KeeperRegistryLogic.Contract.Owner(&_KeeperRegistryLogic.CallOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptOwnership(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptOwnership(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptPayeeship(&_KeeperRegistryLogic.TransactOpts, transmitter) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptPayeeship(&_KeeperRegistryLogic.TransactOpts, transmitter) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AddFunds(&_KeeperRegistryLogic.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.AddFunds(&_KeeperRegistryLogic.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CancelUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CancelUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "checkUpkeep", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) CheckUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CheckUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) CheckUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.CheckUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.MigrateUpkeeps(&_KeeperRegistryLogic.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.MigrateUpkeeps(&_KeeperRegistryLogic.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "pause") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Pause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Pause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.PauseUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.PauseUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.ReceiveUpkeeps(&_KeeperRegistryLogic.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.ReceiveUpkeeps(&_KeeperRegistryLogic.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RecoverFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RecoverFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RegisterUpkeep(&_KeeperRegistryLogic.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.RegisterUpkeep(&_KeeperRegistryLogic.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setPayees", payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPayees(&_KeeperRegistryLogic.TransactOpts, payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPayees(&_KeeperRegistryLogic.TransactOpts, payees) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogic.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogic.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogic.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogic.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepOffchainConfig(&_KeeperRegistryLogic.TransactOpts, id, config) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.SetUpkeepOffchainConfig(&_KeeperRegistryLogic.TransactOpts, id, config) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferOwnership(&_KeeperRegistryLogic.TransactOpts, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferOwnership(&_KeeperRegistryLogic.TransactOpts, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferPayeeship(&_KeeperRegistryLogic.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferPayeeship(&_KeeperRegistryLogic.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogic.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Unpause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.Unpause(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.UnpauseUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.UnpauseUpkeep(&_KeeperRegistryLogic.TransactOpts, id) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "updateCheckData", id, newCheckData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.UpdateCheckData(&_KeeperRegistryLogic.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.UpdateCheckData(&_KeeperRegistryLogic.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawFunds(&_KeeperRegistryLogic.TransactOpts, id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawFunds(&_KeeperRegistryLogic.TransactOpts, id, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogic.TransactOpts) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawPayment(&_KeeperRegistryLogic.TransactOpts, from, to) +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogic.Contract.WithdrawPayment(&_KeeperRegistryLogic.TransactOpts, from, to) +} + +type KeeperRegistryLogicCancelledUpkeepReportIterator struct { + Event *KeeperRegistryLogicCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicCancelledUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicCancelledUpkeepReportIterator{contract: _KeeperRegistryLogic.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicCancelledUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicCancelledUpkeepReport, error) { + event := new(KeeperRegistryLogicCancelledUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicFundsAddedIterator struct { + Event *KeeperRegistryLogicFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFundsAddedIterator{contract: _KeeperRegistryLogic.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicFundsAdded) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryLogicFundsAdded, error) { + event := new(KeeperRegistryLogicFundsAdded) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicFundsWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicFundsWithdrawn, error) { + event := new(KeeperRegistryLogicFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicInsufficientFundsUpkeepReportIterator struct { + Event *KeeperRegistryLogicInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicInsufficientFundsUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicInsufficientFundsUpkeepReportIterator{contract: _KeeperRegistryLogic.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicInsufficientFundsUpkeepReport, error) { + event := new(KeeperRegistryLogicInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnerFundsWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryLogicOwnerFundsWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryLogicOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnershipTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnershipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicOwnershipTransferRequested, error) { + event := new(KeeperRegistryLogicOwnershipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicOwnershipTransferredIterator struct { + Event *KeeperRegistryLogicOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicOwnershipTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicOwnershipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicOwnershipTransferred, error) { + event := new(KeeperRegistryLogicOwnershipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPausedIterator struct { + Event *KeeperRegistryLogicPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicPausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPausedIterator{contract: _KeeperRegistryLogic.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePaused(log types.Log) (*KeeperRegistryLogicPaused, error) { + event := new(KeeperRegistryLogicPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPayeesUpdatedIterator struct { + Event *KeeperRegistryLogicPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicPayeesUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPayeesUpdatedIterator{contract: _KeeperRegistryLogic.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPayeesUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicPayeesUpdated, error) { + event := new(KeeperRegistryLogicPayeesUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryLogicPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPayeeshipTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicPayeeshipTransferRequested, error) { + event := new(KeeperRegistryLogicPayeeshipTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPayeeshipTransferredIterator struct { + Event *KeeperRegistryLogicPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPayeeshipTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPayeeshipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicPayeeshipTransferred, error) { + event := new(KeeperRegistryLogicPayeeshipTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicPaymentWithdrawnIterator struct { + Event *KeeperRegistryLogicPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicPaymentWithdrawnIterator{contract: _KeeperRegistryLogic.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicPaymentWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicPaymentWithdrawn, error) { + event := new(KeeperRegistryLogicPaymentWithdrawn) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicReorgedUpkeepReportIterator struct { + Event *KeeperRegistryLogicReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicReorgedUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicReorgedUpkeepReportIterator{contract: _KeeperRegistryLogic.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicReorgedUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicReorgedUpkeepReport, error) { + event := new(KeeperRegistryLogicReorgedUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicStaleUpkeepReportIterator struct { + Event *KeeperRegistryLogicStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicStaleUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicStaleUpkeepReportIterator{contract: _KeeperRegistryLogic.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicStaleUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicStaleUpkeepReport, error) { + event := new(KeeperRegistryLogicStaleUpkeepReport) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUnpausedIterator struct { + Event *KeeperRegistryLogicUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUnpausedIterator{contract: _KeeperRegistryLogic.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryLogicUnpaused, error) { + event := new(KeeperRegistryLogicUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryLogicUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryLogicUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryLogicUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepAdminTransferredIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferred, error) { + event := new(KeeperRegistryLogicUpkeepAdminTransferred) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepCanceledIterator struct { + Event *KeeperRegistryLogicUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepCanceledIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepCanceled) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicUpkeepCanceled, error) { + event := new(KeeperRegistryLogicUpkeepCanceled) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepCheckDataUpdatedIterator struct { + Event *KeeperRegistryLogicUpkeepCheckDataUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepCheckDataUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepCheckDataUpdated struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepCheckDataUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepCheckDataUpdatedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepCheckDataUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryLogicUpkeepCheckDataUpdated, error) { + event := new(KeeperRegistryLogicUpkeepCheckDataUpdated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryLogicUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepGasLimitSetIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicUpkeepGasLimitSet, error) { + event := new(KeeperRegistryLogicUpkeepGasLimitSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepMigratedIterator struct { + Event *KeeperRegistryLogicUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepMigratedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepMigrated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicUpkeepMigrated, error) { + event := new(KeeperRegistryLogicUpkeepMigrated) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepOffchainConfigSetIterator struct { + Event *KeeperRegistryLogicUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepOffchainConfigSetIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicUpkeepOffchainConfigSet, error) { + event := new(KeeperRegistryLogicUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepPausedIterator struct { + Event *KeeperRegistryLogicUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepPausedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicUpkeepPaused, error) { + event := new(KeeperRegistryLogicUpkeepPaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepPerformedIterator struct { + Event *KeeperRegistryLogicUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepPerformed struct { + Id *big.Int + Success bool + CheckBlockNumber uint32 + GasUsed *big.Int + GasOverhead *big.Int + TotalPayment *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepPerformedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepPerformed) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicUpkeepPerformed, error) { + event := new(KeeperRegistryLogicUpkeepPerformed) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepReceivedIterator struct { + Event *KeeperRegistryLogicUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepReceivedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepReceived) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicUpkeepReceived, error) { + event := new(KeeperRegistryLogicUpkeepReceived) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepRegisteredIterator struct { + Event *KeeperRegistryLogicUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepRegisteredIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepRegistered) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicUpkeepRegistered, error) { + event := new(KeeperRegistryLogicUpkeepRegistered) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicUpkeepUnpausedIterator struct { + Event *KeeperRegistryLogicUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicUpkeepUnpausedIterator{contract: _KeeperRegistryLogic.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogic.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicUpkeepUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogicFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicUpkeepUnpaused, error) { + event := new(KeeperRegistryLogicUpkeepUnpaused) + if err := _KeeperRegistryLogic.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperRegistryLogic *KeeperRegistryLogic) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryLogic.abi.Events["CancelledUpkeepReport"].ID: + return _KeeperRegistryLogic.ParseCancelledUpkeepReport(log) + case _KeeperRegistryLogic.abi.Events["FundsAdded"].ID: + return _KeeperRegistryLogic.ParseFundsAdded(log) + case _KeeperRegistryLogic.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistryLogic.ParseFundsWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _KeeperRegistryLogic.ParseInsufficientFundsUpkeepReport(log) + case _KeeperRegistryLogic.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistryLogic.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryLogic.ParseOwnershipTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryLogic.ParseOwnershipTransferred(log) + case _KeeperRegistryLogic.abi.Events["Paused"].ID: + return _KeeperRegistryLogic.ParsePaused(log) + case _KeeperRegistryLogic.abi.Events["PayeesUpdated"].ID: + return _KeeperRegistryLogic.ParsePayeesUpdated(log) + case _KeeperRegistryLogic.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistryLogic.ParsePayeeshipTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistryLogic.ParsePayeeshipTransferred(log) + case _KeeperRegistryLogic.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistryLogic.ParsePaymentWithdrawn(log) + case _KeeperRegistryLogic.abi.Events["ReorgedUpkeepReport"].ID: + return _KeeperRegistryLogic.ParseReorgedUpkeepReport(log) + case _KeeperRegistryLogic.abi.Events["StaleUpkeepReport"].ID: + return _KeeperRegistryLogic.ParseStaleUpkeepReport(log) + case _KeeperRegistryLogic.abi.Events["Unpaused"].ID: + return _KeeperRegistryLogic.ParseUnpaused(log) + case _KeeperRegistryLogic.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistryLogic.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistryLogic.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistryLogic.ParseUpkeepAdminTransferred(log) + case _KeeperRegistryLogic.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistryLogic.ParseUpkeepCanceled(log) + case _KeeperRegistryLogic.abi.Events["UpkeepCheckDataUpdated"].ID: + return _KeeperRegistryLogic.ParseUpkeepCheckDataUpdated(log) + case _KeeperRegistryLogic.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistryLogic.ParseUpkeepGasLimitSet(log) + case _KeeperRegistryLogic.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistryLogic.ParseUpkeepMigrated(log) + case _KeeperRegistryLogic.abi.Events["UpkeepOffchainConfigSet"].ID: + return _KeeperRegistryLogic.ParseUpkeepOffchainConfigSet(log) + case _KeeperRegistryLogic.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistryLogic.ParseUpkeepPaused(log) + case _KeeperRegistryLogic.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistryLogic.ParseUpkeepPerformed(log) + case _KeeperRegistryLogic.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistryLogic.ParseUpkeepReceived(log) + case _KeeperRegistryLogic.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistryLogic.ParseUpkeepRegistered(log) + case _KeeperRegistryLogic.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistryLogic.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryLogicCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xd84831b6a3a7fbd333f42fe7f9104a139da6cca4cc1507aef4ddad79b31d017f") +} + +func (KeeperRegistryLogicFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryLogicFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryLogicInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x7895fdfe292beab0842d5beccd078e85296b9e17a30eaee4c261a2696b84eb96") +} + +func (KeeperRegistryLogicOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryLogicOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryLogicOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryLogicPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryLogicPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (KeeperRegistryLogicPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryLogicPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryLogicPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryLogicReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x561ff77e59394941a01a456497a9418dea82e2a39abb3ecebfb1cef7e0bfdc13") +} + +func (KeeperRegistryLogicStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x5aa44821f7938098502bff537fbbdc9aaaa2fa655c10740646fce27e54987a89") +} + +func (KeeperRegistryLogicUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryLogicUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryLogicUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryLogicUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryLogicUpkeepCheckDataUpdated) Topic() common.Hash { + return common.HexToHash("0x7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf") +} + +func (KeeperRegistryLogicUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryLogicUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryLogicUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (KeeperRegistryLogicUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryLogicUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0x29233ba1d7b302b8fe230ad0b81423aba5371b2a6f6b821228212385ee6a4420") +} + +func (KeeperRegistryLogicUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryLogicUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryLogicUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistryLogic *KeeperRegistryLogic) Address() common.Address { + return _KeeperRegistryLogic.address +} + +type KeeperRegistryLogicInterface interface { + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetMode(opts *bind.CallOpts) (uint8, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicCancelledUpkeepReport, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryLogicFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryLogicPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicStaleUpkeepReport, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryLogicUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicUpkeepCanceled, error) + + FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepCheckDataUpdatedIterator, error) + + WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryLogicUpkeepCheckDataUpdated, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicUpkeepPerformed, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicUpkeepRegistered, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1/keeper_registry_logic_a_wrapper_2_1.go b/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1/keeper_registry_logic_a_wrapper_2_1.go new file mode 100644 index 00000000..c3a9e7b6 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1/keeper_registry_logic_a_wrapper_2_1.go @@ -0,0 +1,4863 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_logic_a_wrapper_2_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryLogicAMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractKeeperRegistryLogicB2_1\",\"name\":\"logicB\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumKeeperRegistryBase2_1.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumKeeperRegistryBase2_1.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumKeeperRegistryBase2_1.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"executeCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumKeeperRegistryBase2_1.UpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase2_1.Trigger\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepTriggerConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101406040523480156200001257600080fd5b50604051620061d1380380620061d18339810160408190526200003591620003df565b80816001600160a01b0316634b4fd03b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000075573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200009b919062000406565b826001600160a01b031663ca30e6036040518163ffffffff1660e01b8152600401602060405180830381865afa158015620000da573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001009190620003df565b836001600160a01b031663b10b673c6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156200013f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001659190620003df565b846001600160a01b0316636709d0e56040518163ffffffff1660e01b8152600401602060405180830381865afa158015620001a4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001ca9190620003df565b856001600160a01b0316635425d8ac6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000209573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200022f9190620003df565b3380600081620002865760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620002b957620002b9816200031b565b505050846002811115620002d157620002d162000429565b60e0816002811115620002e857620002e862000429565b9052506001600160a01b0393841660805291831660a052821660c0528116610100529190911661012052506200043f9050565b336001600160a01b03821603620003755760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200027d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620003dc57600080fd5b50565b600060208284031215620003f257600080fd5b8151620003ff81620003c6565b9392505050565b6000602082840312156200041957600080fd5b815160038110620003ff57600080fd5b634e487b7160e01b600052602160045260246000fd5b60805160a05160c05160e0516101005161012051615d18620004b96000396000818161010e01526101a90152600081816103e10152611fa10152600081816135370152818161376d015281816139b50152613b5d015260006130e1015260006131c5015260008181611de301526123af0152615d186000f3fe60806040523480156200001157600080fd5b50600436106200010c5760003560e01c806385c1b0ba11620000a5578063c8048022116200006f578063c804802214620002b7578063ce7dc5b414620002ce578063f2fde38b14620002e5578063f7d334ba14620002fc576200010c565b806385c1b0ba14620002535780638da5cb5b146200026a5780638e86139b1462000289578063948108f714620002a0576200010c565b80634ee88d3511620000e75780634ee88d3514620001ef5780636ded9eae146200020657806371791aa0146200021d57806379ba50971462000249576200010c565b806328f32f38146200015457806329c5efad146200017e578063349e8cca14620001a7575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e8080156200014d573d6000f35b3d6000fd5b005b6200016b62000165366004620041ea565b62000313565b6040519081526020015b60405180910390f35b620001956200018f366004620042d0565b6200068c565b604051620001759493929190620043f8565b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200162000175565b620001526200020036600462004435565b62000930565b6200016b6200021736600462004485565b62000998565b620002346200022e366004620042d0565b620009fe565b60405162000175979695949392919062004538565b62000152620010f0565b62000152620002643660046200458a565b620011f3565b60005473ffffffffffffffffffffffffffffffffffffffff16620001c9565b620001526200029a36600462004617565b62001e64565b62000152620002b13660046200467a565b620021ec565b62000152620002c8366004620046a9565b6200247f565b62000195620002df3660046200477f565b62002846565b62000152620002f6366004620047f6565b62002916565b620002346200030d366004620046a9565b6200292e565b6000805473ffffffffffffffffffffffffffffffffffffffff163314801590620003475750620003456009336200296c565b155b156200037f576040517fd48b678b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff89163b620003ce576040517f09ee12d500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b620003d986620029a0565b9050600089307f00000000000000000000000000000000000000000000000000000000000000006040516200040e9062003f7b565b73ffffffffffffffffffffffffffffffffffffffff938416815291831660208301529091166040820152606001604051809103906000f08015801562000458573d6000803e3d6000fd5b5090506200051f826040518060e001604052806000151581526020018c63ffffffff16815260200163ffffffff801681526020018473ffffffffffffffffffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff168152602001600063ffffffff168152508a89898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508b92508a915062002b449050565b6014805474010000000000000000000000000000000000000000900463ffffffff1690806200054e8362004845565b91906101000a81548163ffffffff021916908363ffffffff16021790555050817fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d0128a8a604051620005c792919063ffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a2817fcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d878760405162000603929190620048b4565b60405180910390a2817f2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664856040516200063d9190620048ca565b60405180910390a2817f3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf485084604051620006779190620048ca565b60405180910390a25098975050505050505050565b600060606000806200069d62002f1f565b600086815260046020908152604091829020825160e081018452815460ff811615158252610100810463ffffffff90811694830194909452650100000000008104841694820194909452690100000000000000000090930473ffffffffffffffffffffffffffffffffffffffff166060840152600101546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a0840152780100000000000000000000000000000000000000000000000090041660c08201525a9150600080826060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620007b7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620007dd9190620048ec565b73ffffffffffffffffffffffffffffffffffffffff166013600101600c9054906101000a900463ffffffff1663ffffffff16896040516200081f91906200490c565b60006040518083038160008787f1925050503d80600081146200085f576040519150601f19603f3d011682016040523d82523d6000602084013e62000864565b606091505b50915091505a6200087690856200492a565b935081620008a157600060405180602001604052806000815250600796509650965050505062000927565b80806020019051810190620008b791906200499b565b909750955086620008e557600060405180602001604052806000815250600496509650965050505062000927565b601554865164010000000090910463ffffffff1610156200092357600060405180602001604052806000815250600596509650965050505062000927565b5050505b92959194509250565b6200093b8362002f5a565b6000838152601a602052604090206200095682848362004a90565b50827f2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d566483836040516200098b929190620048b4565b60405180910390a2505050565b6000620009f288888860008989604051806020016040528060008152508a8a8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506200031392505050565b98975050505050505050565b60006060600080600080600062000a1462002f1f565b600062000a218a62003010565b905060006012604051806101200160405290816000820160009054906101000a900460ff1660ff1660ff1681526020016000820160019054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160059054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160099054906101000a900462ffffff1662ffffff1662ffffff16815260200160008201600c9054906101000a900461ffff1661ffff1661ffff16815260200160008201600e9054906101000a900460ff1615151515815260200160008201600f9054906101000a900460ff161515151581526020016000820160109054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160008201601c9054906101000a900463ffffffff1663ffffffff1663ffffffff168152505090506000600460008d81526020019081526020016000206040518060e00160405290816000820160009054906101000a900460ff161515151581526020016000820160019054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160059054906101000a900463ffffffff1663ffffffff1663ffffffff1681526020016000820160099054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020016001820160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200160018201600c9054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff166bffffffffffffffffffffffff1681526020016001820160189054906101000a900463ffffffff1663ffffffff1663ffffffff168152505090508160a001511562000d45576000604051806020016040528060008152506009600084602001516000808263ffffffff1692509950995099509950995099509950505050620010e4565b604081015163ffffffff9081161462000d96576000604051806020016040528060008152506001600084602001516000808263ffffffff1692509950995099509950995099509950505050620010e4565b80511562000ddc576000604051806020016040528060008152506002600084602001516000808263ffffffff1692509950995099509950995099509950505050620010e4565b62000de782620030be565b602083015160155492975090955060009162000e19918591879190640100000000900463ffffffff168a8a87620032b0565b9050806bffffffffffffffffffffffff168260a001516bffffffffffffffffffffffff16101562000e83576000604051806020016040528060008152506006600085602001516000808263ffffffff1692509a509a509a509a509a509a509a5050505050620010e4565b600062000e928e868f62003301565b90505a9850600080846060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000eea573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000f109190620048ec565b73ffffffffffffffffffffffffffffffffffffffff166013600101600c9054906101000a900463ffffffff1663ffffffff168460405162000f5291906200490c565b60006040518083038160008787f1925050503d806000811462000f92576040519150601f19603f3d011682016040523d82523d6000602084013e62000f97565b606091505b50915091505a62000fa9908c6200492a565b9a5081620010295760155481516801000000000000000090910463ffffffff1610156200100657505060408051602080820190925260008082529490910151939c509a50600899505063ffffffff9091169550620010e492505050565b602090940151939b5060039a505063ffffffff9092169650620010e49350505050565b808060200190518101906200103f91906200499b565b909e509c508d6200108057505060408051602080820190925260008082529490910151939c509a50600499505063ffffffff9091169550620010e492505050565b6015548d5164010000000090910463ffffffff161015620010d157505060408051602080820190925260008082529490910151939c509a50600599505063ffffffff9091169550620010e492505050565b505050506020015163ffffffff16945050505b92959891949750929550565b60015473ffffffffffffffffffffffffffffffffffffffff16331462001177576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600173ffffffffffffffffffffffffffffffffffffffff821660009081526019602052604090205460ff1660038111156200123257620012326200438d565b141580156200127e5750600373ffffffffffffffffffffffffffffffffffffffff821660009081526019602052604090205460ff1660038111156200127b576200127b6200438d565b14155b15620012b6576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6013546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1662001316576040517fd12d7d8d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082900362001352576040517f2c2fc94100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c081018290526000808567ffffffffffffffff811115620013a957620013a962004071565b604051908082528060200260200182016040528015620013d3578160200160208202803683370190505b50905060008667ffffffffffffffff811115620013f457620013f462004071565b6040519080825280602002602001820160405280156200147b57816020015b6040805160e08101825260008082526020808301829052928201819052606082018190526080820181905260a0820181905260c082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181620014135790505b50905060008767ffffffffffffffff8111156200149c576200149c62004071565b604051908082528060200260200182016040528015620014d157816020015b6060815260200190600190039081620014bb5790505b50905060008867ffffffffffffffff811115620014f257620014f262004071565b6040519080825280602002602001820160405280156200152757816020015b6060815260200190600190039081620015115790505b50905060008967ffffffffffffffff81111562001548576200154862004071565b6040519080825280602002602001820160405280156200157d57816020015b6060815260200190600190039081620015675790505b50905060005b8a81101562001b61578b8b82818110620015a157620015a162004bb8565b60209081029290920135600081815260048452604090819020815160e081018352815460ff811615158252610100810463ffffffff90811697830197909752650100000000008104871693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490931660c08401529a509098506200168090508962002f5a565b60608801516040517f1a5da6c800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8c8116600483015290911690631a5da6c890602401600060405180830381600087803b158015620016f057600080fd5b505af115801562001705573d6000803e3d6000fd5b50505050878582815181106200171f576200171f62004bb8565b6020026020010181905250600560008a815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1686828151811062001773576200177362004bb8565b73ffffffffffffffffffffffffffffffffffffffff90921660209283029190910182015260008a81526007909152604090208054620017b290620049e8565b80601f0160208091040260200160405190810160405280929190818152602001828054620017e090620049e8565b8015620018315780601f10620018055761010080835404028352916020019162001831565b820191906000526020600020905b8154815290600101906020018083116200181357829003601f168201915b50505050508482815181106200184b576200184b62004bb8565b6020026020010181905250601a60008a815260200190815260200160002080546200187690620049e8565b80601f0160208091040260200160405190810160405280929190818152602001828054620018a490620049e8565b8015620018f55780601f10620018c957610100808354040283529160200191620018f5565b820191906000526020600020905b815481529060010190602001808311620018d757829003601f168201915b50505050508382815181106200190f576200190f62004bb8565b6020026020010181905250601b60008a815260200190815260200160002080546200193a90620049e8565b80601f01602080910402602001604051908101604052809291908181526020018280546200196890620049e8565b8015620019b95780601f106200198d57610100808354040283529160200191620019b9565b820191906000526020600020905b8154815290600101906020018083116200199b57829003601f168201915b5050505050828281518110620019d357620019d362004bb8565b60200260200101819052508760a001516bffffffffffffffffffffffff1687620019fe919062004be7565b60008a815260046020908152604080832080547fffffff000000000000000000000000000000000000000000000000000000000016815560010180547fffffffff000000000000000000000000000000000000000000000000000000001690556007909152812091985062001a74919062003f89565b6000898152601a6020526040812062001a8d9162003f89565b6000898152601b6020526040812062001aa69162003f89565b600089815260066020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016905562001ae760028a62003523565b5060a0880151604080516bffffffffffffffffffffffff909216825273ffffffffffffffffffffffffffffffffffffffff8c1660208301528a917fb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff910160405180910390a28062001b588162004bfd565b91505062001583565b508560185462001b7291906200492a565b60185560008b8b868167ffffffffffffffff81111562001b965762001b9662004071565b60405190808252806020026020018201604052801562001bc0578160200160208202803683370190505b508988888860405160200162001bde98979695949392919062004da3565b60405160208183030381529060405290508973ffffffffffffffffffffffffffffffffffffffff16638e86139b6013600001600c9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663c71249ab60038e73ffffffffffffffffffffffffffffffffffffffff1663aab9edd66040518163ffffffff1660e01b8152600401602060405180830381865afa15801562001c9a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062001cc0919062004e73565b866040518463ffffffff1660e01b815260040162001ce19392919062004e98565b600060405180830381865afa15801562001cff573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405262001d47919081019062004ebf565b6040518263ffffffff1660e01b815260040162001d659190620048ca565b600060405180830381600087803b15801562001d8057600080fd5b505af115801562001d95573d6000803e3d6000fd5b50506040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8d81166004830152602482018b90527f000000000000000000000000000000000000000000000000000000000000000016925063a9059cbb91506044016020604051808303816000875af115801562001e2f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062001e55919062004ef8565b50505050505050505050505050565b60023360009081526019602052604090205460ff16600381111562001e8d5762001e8d6200438d565b1415801562001ec3575060033360009081526019602052604090205460ff16600381111562001ec05762001ec06200438d565b14155b1562001efb576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080808080808062001f11888a018a620050f0565b965096509650965096509650965060005b8751811015620021e057600073ffffffffffffffffffffffffffffffffffffffff1687828151811062001f595762001f5962004bb8565b60200260200101516060015173ffffffffffffffffffffffffffffffffffffffff16036200206d5785818151811062001f965762001f9662004bb8565b6020026020010151307f000000000000000000000000000000000000000000000000000000000000000060405162001fce9062003f7b565b73ffffffffffffffffffffffffffffffffffffffff938416815291831660208301529091166040820152606001604051809103906000f08015801562002018573d6000803e3d6000fd5b508782815181106200202e576200202e62004bb8565b60200260200101516060019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250505b6200212588828151811062002086576200208662004bb8565b6020026020010151888381518110620020a357620020a362004bb8565b6020026020010151878481518110620020c057620020c062004bb8565b6020026020010151878581518110620020dd57620020dd62004bb8565b6020026020010151878681518110620020fa57620020fa62004bb8565b602002602001015187878151811062002117576200211762004bb8565b602002602001015162002b44565b8781815181106200213a576200213a62004bb8565b60200260200101517f74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a7188838151811062002178576200217862004bb8565b602002602001015160a0015133604051620021c39291906bffffffffffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a280620021d78162004bfd565b91505062001f22565b50505050505050505050565b600082815260046020908152604091829020825160e081018452815460ff81161515825263ffffffff6101008204811694830194909452650100000000008104841694820185905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004821660c08201529114620022ea576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b818160a00151620022fc919062005221565b600084815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff93841602179055601854620023649184169062004be7565b6018556040517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff831660448201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906323b872dd906064016020604051808303816000875af11580156200240e573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062002434919062004ef8565b506040516bffffffffffffffffffffffff83168152339084907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a3505050565b6000818152600460209081526040808320815160e081018352815460ff81161515825263ffffffff6101008204811695830195909552650100000000008104851693820184905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004831660c082015292911415906200256860005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16149050818015620025c35750808015620025c15750620025b462003531565b836040015163ffffffff16115b155b15620025fb576040517ffbc0357800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b801580156200262e575060008481526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314155b1562002666576040517ffbdb8e5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006200267262003531565b9050816200268a576200268760328262004be7565b90505b6000858152600460205260409020805463ffffffff80841665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff90921691909117909155620026e69060029087906200352316565b5060135460808501516bffffffffffffffffffffffff91821691600091168211156200274f5760808601516200271d908362005249565b90508560a001516bffffffffffffffffffffffff16816bffffffffffffffffffffffff1611156200274f575060a08501515b808660a0015162002761919062005249565b600088815260046020526040902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff166c010000000000000000000000006bffffffffffffffffffffffff93841602179055601454620027c99183911662005221565b601480547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9290921691909117905560405167ffffffffffffffff84169088907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a350505050505050565b600060606000806200285762002f1f565b6000634b56a42e60e01b888888604051602401620028789392919062005271565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915290506200290389826200068c565b929c919b50995090975095505050505050565b62002920620035ed565b6200292b8162003670565b50565b600060606000806000806000620029558860405180602001604052806000815250620009fe565b959e949d50929b5090995097509550909350915050565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260018301602052604081205415155b90505b92915050565b6000806000620029c76001620029b562003531565b620029c191906200492a565b62003767565b601454604080516020810193909352309083015274010000000000000000000000000000000000000000900463ffffffff166060820152608001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201209083015201604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052905060045b600f81101562002ad3578282828151811062002a8f5762002a8f62004bb8565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508062002aca8162004bfd565b91505062002a6f565b5083600181111562002ae95762002ae96200438d565b60f81b81600f8151811062002b025762002b0262004bb8565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535062002b3c81620052a5565b949350505050565b6012546e010000000000000000000000000000900460ff161562002b94576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601554835163ffffffff909116101562002bda576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc856020015163ffffffff16108062002c185750601454602086015163ffffffff70010000000000000000000000000000000090920482169116115b1562002c50576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000868152600460205260409020546901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff161562002cba576040517f6e3b930b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000868152600460209081526040808320885181548a8501518b85015160608d01517fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000009093169315157fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff169390931761010063ffffffff92831602177fffffff000000000000000000000000000000000000000000000000ffffffffff1665010000000000938216939093027fffffff0000000000000000000000000000000000000000ffffffffffffffffff1692909217690100000000000000000073ffffffffffffffffffffffffffffffffffffffff9283160217835560808b01516001909301805460a08d015160c08e01516bffffffffffffffffffffffff9687167fffffffffffffffff000000000000000000000000000000000000000000000000909316929092176c010000000000000000000000009690911695909502949094177fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff1678010000000000000000000000000000000000000000000000009490931693909302919091179091556005835281842080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169189169190911790556007909152902062002ead8482620052e8565b508460a001516bffffffffffffffffffffffff1660185462002ed0919062004be7565b6018556000868152601a6020526040902062002eed8382620052e8565b506000868152601b6020526040902062002f088282620052e8565b5062002f16600287620038cf565b50505050505050565b321562002f58576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60008181526005602052604090205473ffffffffffffffffffffffffffffffffffffffff16331462002fb8576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526004602052604090205465010000000000900463ffffffff908116146200292b576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818160045b600f811015620030a5577fff00000000000000000000000000000000000000000000000000000000000000821683826020811062003059576200305962004bb8565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916146200309057506000949350505050565b806200309c8162004bfd565b91505062003017565b5081600f1a600181111562002b3c5762002b3c6200438d565b6000806000836060015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa1580156200314b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200317191906200542a565b50945090925050506000811315806200318957508142105b80620031ae5750828015620031ae5750620031a582426200492a565b8463ffffffff16105b15620031bf576016549550620031c3565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa1580156200322f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200325591906200542a565b50945090925050506000811315806200326d57508142105b806200329257508280156200329257506200328982426200492a565b8463ffffffff16105b15620032a3576017549450620032a7565b8094505b50505050915091565b600080620032c488878b60000151620038dd565b9050600080620032e18b8a63ffffffff16858a8a60018b6200397c565b9092509050620032f2818362005221565b9b9a5050505050505050505050565b606060008360018111156200331a576200331a6200438d565b03620033e7576000848152600760205260409081902090517f6e04ff0d0000000000000000000000000000000000000000000000000000000091620033629160240162005522565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915290506200351c565b6001836001811115620033fe57620033fe6200438d565b03620034ea576000828060200190518101906200341c919062005599565b6000868152600760205260409081902090519192507f40691db4000000000000000000000000000000000000000000000000000000009162003463918491602401620056ad565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915291506200351c9050565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b9392505050565b600062002997838362003e1e565b600060017f000000000000000000000000000000000000000000000000000000000000000060028111156200356a576200356a6200438d565b03620035e857606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620035bd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620035e3919062005775565b905090565b504390565b60005473ffffffffffffffffffffffffffffffffffffffff16331462002f58576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016200116e565b3373ffffffffffffffffffffffffffffffffffffffff821603620036f1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200116e565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060017f00000000000000000000000000000000000000000000000000000000000000006002811115620037a057620037a06200438d565b03620038c5576000606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015620037f5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200381b919062005775565b905080831015806200383957506101006200383784836200492a565b115b15620038485750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815260048101849052606490632b407a8290602401602060405180830381865afa1580156200389f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200351c919062005775565b504090565b919050565b600062002997838362003f29565b60008080856001811115620038f657620038f66200438d565b0362003907575062015f906200392a565b60018560018111156200391e576200391e6200438d565b03620034ea57506201adb05b6200393d63ffffffff851660146200578f565b6200394a846001620057cf565b6200395b9060ff16611d4c6200578f565b62003967908362004be7565b62003973919062004be7565b95945050505050565b6000806000896080015161ffff16876200399791906200578f565b9050838015620039a65750803a105b15620039af57503a5b600060027f00000000000000000000000000000000000000000000000000000000000000006002811115620039e857620039e86200438d565b0362003b5957604080516000815260208101909152851562003a4c5760003660405180608001604052806048815260200162005cc46048913960405160200162003a3593929190620057eb565b604051602081830303815290604052905062003aba565b60155462003a6a90640100000000900463ffffffff16600462005814565b63ffffffff1667ffffffffffffffff81111562003a8b5762003a8b62004071565b6040519080825280601f01601f19166020018201604052801562003ab6576020820181803683370190505b5090505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273420000000000000000000000000000000000000f906349948e0e9062003b0c908490600401620048ca565b602060405180830381865afa15801562003b2a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062003b50919062005775565b91505062003cc3565b60017f0000000000000000000000000000000000000000000000000000000000000000600281111562003b905762003b906200438d565b0362003cc357841562003c1857606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562003bea573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062003c10919062005775565b905062003cc3565b6000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c060405180830381865afa15801562003c67573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062003c8d919062005843565b505060155492945062003cb293505050640100000000900463ffffffff16826200578f565b62003cbf9060106200578f565b9150505b8462003ce257808b6080015161ffff1662003cdf91906200578f565b90505b62003cf261ffff8716826200588e565b90506000878262003d048c8e62004be7565b62003d1090866200578f565b62003d1c919062004be7565b62003d3090670de0b6b3a76400006200578f565b62003d3c91906200588e565b905060008c6040015163ffffffff1664e8d4a5100062003d5d91906200578f565b898e6020015163ffffffff16858f8862003d7891906200578f565b62003d84919062004be7565b62003d9490633b9aca006200578f565b62003da091906200578f565b62003dac91906200588e565b62003db8919062004be7565b90506b033b2e3c9fd0803ce800000062003dd3828462004be7565b111562003e0c576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909c909b509950505050505050505050565b6000818152600183016020526040812054801562003f1757600062003e456001836200492a565b855490915060009062003e5b906001906200492a565b905081811462003ec757600086600001828154811062003e7f5762003e7f62004bb8565b906000526020600020015490508087600001848154811062003ea55762003ea562004bb8565b6000918252602080832090910192909255918252600188019052604090208390555b855486908062003edb5762003edb620058ca565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506200299a565b60009150506200299a565b5092915050565b600081815260018301602052604081205462003f72575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556200299a565b5060006200299a565b6103ca80620058fa83390190565b50805462003f9790620049e8565b6000825580601f1062003fa8575050565b601f0160209004906000526020600020908101906200292b91905b8082111562003fd9576000815560010162003fc3565b5090565b73ffffffffffffffffffffffffffffffffffffffff811681146200292b57600080fd5b803563ffffffff81168114620038ca57600080fd5b803560028110620038ca57600080fd5b60008083601f8401126200403857600080fd5b50813567ffffffffffffffff8111156200405157600080fd5b6020830191508360208285010111156200406a57600080fd5b9250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405160e0810167ffffffffffffffff81118282101715620040c657620040c662004071565b60405290565b604051610100810167ffffffffffffffff81118282101715620040c657620040c662004071565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156200413d576200413d62004071565b604052919050565b600067ffffffffffffffff82111562004162576200416262004071565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f830112620041a057600080fd5b8135620041b7620041b18262004145565b620040f3565b818152846020838601011115620041cd57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060008060e0898b0312156200420757600080fd5b8835620042148162003fdd565b97506200422460208a0162004000565b96506040890135620042368162003fdd565b95506200424660608a0162004015565b9450608089013567ffffffffffffffff808211156200426457600080fd5b620042728c838d0162004025565b909650945060a08b01359150808211156200428c57600080fd5b6200429a8c838d016200418e565b935060c08b0135915080821115620042b157600080fd5b50620042c08b828c016200418e565b9150509295985092959890939650565b60008060408385031215620042e457600080fd5b82359150602083013567ffffffffffffffff8111156200430357600080fd5b62004311858286016200418e565b9150509250929050565b60005b83811015620043385781810151838201526020016200431e565b50506000910152565b600081518084526200435b8160208601602086016200431b565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b600a8110620043f4577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b9052565b841515815260806020820152600062004415608083018662004341565b9050620044266040830185620043bc565b82606083015295945050505050565b6000806000604084860312156200444b57600080fd5b83359250602084013567ffffffffffffffff8111156200446a57600080fd5b620044788682870162004025565b9497909650939450505050565b600080600080600080600060a0888a031215620044a157600080fd5b8735620044ae8162003fdd565b9650620044be6020890162004000565b95506040880135620044d08162003fdd565b9450606088013567ffffffffffffffff80821115620044ee57600080fd5b620044fc8b838c0162004025565b909650945060808a01359150808211156200451657600080fd5b50620045258a828b0162004025565b989b979a50959850939692959293505050565b871515815260e0602082015260006200455560e083018962004341565b9050620045666040830188620043bc565b8560608301528460808301528360a08301528260c083015298975050505050505050565b600080600060408486031215620045a057600080fd5b833567ffffffffffffffff80821115620045b957600080fd5b818601915086601f830112620045ce57600080fd5b813581811115620045de57600080fd5b8760208260051b8501011115620045f457600080fd5b602092830195509350508401356200460c8162003fdd565b809150509250925092565b600080602083850312156200462b57600080fd5b823567ffffffffffffffff8111156200464357600080fd5b620046518582860162004025565b90969095509350505050565b80356bffffffffffffffffffffffff81168114620038ca57600080fd5b600080604083850312156200468e57600080fd5b82359150620046a0602084016200465d565b90509250929050565b600060208284031215620046bc57600080fd5b5035919050565b600067ffffffffffffffff821115620046e057620046e062004071565b5060051b60200190565b600082601f830112620046fc57600080fd5b813560206200470f620041b183620046c3565b82815260059290921b840181019181810190868411156200472f57600080fd5b8286015b848110156200477457803567ffffffffffffffff811115620047555760008081fd5b620047658986838b01016200418e565b84525091830191830162004733565b509695505050505050565b600080600080606085870312156200479657600080fd5b84359350602085013567ffffffffffffffff80821115620047b657600080fd5b620047c488838901620046ea565b94506040870135915080821115620047db57600080fd5b50620047ea8782880162004025565b95989497509550505050565b6000602082840312156200480957600080fd5b81356200351c8162003fdd565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600063ffffffff80831681810362004861576200486162004816565b6001019392505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b60208152600062002b3c6020830184866200486b565b60208152600062002997602083018462004341565b8051620038ca8162003fdd565b600060208284031215620048ff57600080fd5b81516200351c8162003fdd565b60008251620049208184602087016200431b565b9190910192915050565b818103818111156200299a576200299a62004816565b80151581146200292b57600080fd5b600082601f8301126200496157600080fd5b815162004972620041b18262004145565b8181528460208386010111156200498857600080fd5b62002b3c8260208301602087016200431b565b60008060408385031215620049af57600080fd5b8251620049bc8162004940565b602084015190925067ffffffffffffffff811115620049da57600080fd5b62004311858286016200494f565b600181811c90821680620049fd57607f821691505b60208210810362004a37577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111562004a8b57600081815260208120601f850160051c8101602086101562004a665750805b601f850160051c820191505b8181101562004a875782815560010162004a72565b5050505b505050565b67ffffffffffffffff83111562004aab5762004aab62004071565b62004ac38362004abc8354620049e8565b8362004a3d565b6000601f84116001811462004b18576000851562004ae15750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b17835562004bb1565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b8281101562004b69578685013582556020948501946001909201910162004b47565b508682101562004ba5577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b808201808211156200299a576200299a62004816565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820362004c315762004c3162004816565b5060010190565b600081518084526020808501945080840160005b8381101562004cf75781518051151588528381015163ffffffff908116858a01526040808301519091169089015260608082015173ffffffffffffffffffffffffffffffffffffffff16908901526080808201516bffffffffffffffffffffffff169089015260a08082015162004cd2828b01826bffffffffffffffffffffffff169052565b505060c09081015163ffffffff169088015260e0909601959082019060010162004c4c565b509495945050505050565b600081518084526020808501945080840160005b8381101562004cf757815173ffffffffffffffffffffffffffffffffffffffff168752958201959082019060010162004d16565b600081518084526020808501808196508360051b8101915082860160005b8581101562004d9657828403895262004d8384835162004341565b9885019893509084019060010162004d68565b5091979650505050505050565b60e081528760e082015260006101007f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8a111562004de057600080fd5b8960051b808c8386013783018381038201602085015262004e048282018b62004c38565b915050828103604084015262004e1b818962004d02565b9050828103606084015262004e31818862004d02565b9050828103608084015262004e47818762004d4a565b905082810360a084015262004e5d818662004d4a565b905082810360c0840152620032f2818562004d4a565b60006020828403121562004e8657600080fd5b815160ff811681146200351c57600080fd5b60ff8416815260ff8316602082015260606040820152600062003973606083018462004341565b60006020828403121562004ed257600080fd5b815167ffffffffffffffff81111562004eea57600080fd5b62002b3c848285016200494f565b60006020828403121562004f0b57600080fd5b81516200351c8162004940565b600082601f83011262004f2a57600080fd5b8135602062004f3d620041b183620046c3565b82815260059290921b8401810191818101908684111562004f5d57600080fd5b8286015b8481101562004774578035835291830191830162004f61565b600082601f83011262004f8c57600080fd5b8135602062004f9f620041b183620046c3565b82815260e0928302850182019282820191908785111562004fbf57600080fd5b8387015b85811015620050765781818a03121562004fdd5760008081fd5b62004fe7620040a0565b813562004ff48162004940565b81526200500382870162004000565b8682015260406200501681840162004000565b908201526060828101356200502b8162003fdd565b9082015260806200503e8382016200465d565b9082015260a0620050518382016200465d565b9082015260c06200506483820162004000565b90820152845292840192810162004fc3565b5090979650505050505050565b600082601f8301126200509557600080fd5b81356020620050a8620041b183620046c3565b82815260059290921b84018101918181019086841115620050c857600080fd5b8286015b8481101562004774578035620050e28162003fdd565b8352918301918301620050cc565b600080600080600080600060e0888a0312156200510c57600080fd5b873567ffffffffffffffff808211156200512557600080fd5b620051338b838c0162004f18565b985060208a01359150808211156200514a57600080fd5b620051588b838c0162004f7a565b975060408a01359150808211156200516f57600080fd5b6200517d8b838c0162005083565b965060608a01359150808211156200519457600080fd5b620051a28b838c0162005083565b955060808a0135915080821115620051b957600080fd5b620051c78b838c01620046ea565b945060a08a0135915080821115620051de57600080fd5b620051ec8b838c01620046ea565b935060c08a01359150808211156200520357600080fd5b50620052128a828b01620046ea565b91505092959891949750929550565b6bffffffffffffffffffffffff81811683821601908082111562003f225762003f2262004816565b6bffffffffffffffffffffffff82811682821603908082111562003f225762003f2262004816565b60408152600062005286604083018662004d4a565b82810360208401526200529b8185876200486b565b9695505050505050565b8051602080830151919081101562004a37577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60209190910360031b1b16919050565b815167ffffffffffffffff81111562005305576200530562004071565b6200531d81620053168454620049e8565b8462004a3d565b602080601f8311600181146200537357600084156200533c5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b17855562004a87565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015620053c257888601518255948401946001909101908401620053a1565b5085821015620053ff57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b805169ffffffffffffffffffff81168114620038ca57600080fd5b600080600080600060a086880312156200544357600080fd5b6200544e866200540f565b945060208601519350604086015192506060860151915062005473608087016200540f565b90509295509295909350565b600081546200548e81620049e8565b808552602060018381168015620054ae5760018114620054e75762005517565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b890101955062005517565b866000528260002060005b858110156200550f5781548a8201860152908301908401620054f2565b890184019650505b505050505092915050565b6020815260006200299760208301846200547f565b600082601f8301126200554957600080fd5b815160206200555c620041b183620046c3565b82815260059290921b840181019181810190868411156200557c57600080fd5b8286015b8481101562004774578051835291830191830162005580565b600060208284031215620055ac57600080fd5b815167ffffffffffffffff80821115620055c557600080fd5b908301906101008286031215620055db57600080fd5b620055e5620040cc565b82518152602083015160208201526040830151604082015260608301516060820152608083015160808201526200561f60a08401620048df565b60a082015260c0830151828111156200563757600080fd5b620056458782860162005537565b60c08301525060e0830151828111156200565e57600080fd5b6200566c878286016200494f565b60e08301525095945050505050565b600081518084526020808501945080840160005b8381101562004cf7578151875295820195908201906001016200568f565b60408152825160408201526020830151606082015260408301516080820152606083015160a0820152608083015160c082015273ffffffffffffffffffffffffffffffffffffffff60a08401511660e0820152600060c0840151610100808185015250620057206101408401826200567b565b905060e08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0848303016101208501526200575e828262004341565b91505082810360208401526200397381856200547f565b6000602082840312156200578857600080fd5b5051919050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615620057ca57620057ca62004816565b500290565b60ff81811683821601908111156200299a576200299a62004816565b8284823760008382016000815283516200580a8183602088016200431b565b0195945050505050565b600063ffffffff808316818516818304811182151516156200583a576200583a62004816565b02949350505050565b60008060008060008060c087890312156200585d57600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b600082620058c5577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfe60c060405234801561001057600080fd5b506040516103ca3803806103ca83398101604081905261002f91610076565b600080546001600160a01b0319166001600160a01b039384161790559181166080521660a0526100b9565b80516001600160a01b038116811461007157600080fd5b919050565b60008060006060848603121561008b57600080fd5b6100948461005a565b92506100a26020850161005a565b91506100b06040850161005a565b90509250925092565b60805160a0516102e76100e36000396000603801526000818160c4015261011701526102e76000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806379188d161461007b578063f00e6a2a146100aa575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e808015610076573d6000f35b3d6000fd5b61008e6100893660046101c1565b6100ee565b6040805192151583526020830191909152015b60405180910390f35b60405173ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001681526020016100a1565b60008054819073ffffffffffffffffffffffffffffffffffffffff16331461011557600080fd5b7f00000000000000000000000000000000000000000000000000000000000000005a91505a61138881101561014957600080fd5b61138881039050856040820482031161016157600080fd5b50803b61016d57600080fd5b6000808551602087016000858af192505a610188908361029a565b9150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080604083850312156101d457600080fd5b82359150602083013567ffffffffffffffff808211156101f357600080fd5b818501915085601f83011261020757600080fd5b81358181111561021957610219610192565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561025f5761025f610192565b8160405282815288602084870101111561027857600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b818103818111156102d4577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b9291505056fea164736f6c6343000810000a307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000810000a", +} + +var KeeperRegistryLogicAABI = KeeperRegistryLogicAMetaData.ABI + +var KeeperRegistryLogicABin = KeeperRegistryLogicAMetaData.Bin + +func DeployKeeperRegistryLogicA(auth *bind.TransactOpts, backend bind.ContractBackend, logicB common.Address) (common.Address, *types.Transaction, *KeeperRegistryLogicA, error) { + parsed, err := KeeperRegistryLogicAMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryLogicABin), backend, logicB) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryLogicA{address: address, abi: *parsed, KeeperRegistryLogicACaller: KeeperRegistryLogicACaller{contract: contract}, KeeperRegistryLogicATransactor: KeeperRegistryLogicATransactor{contract: contract}, KeeperRegistryLogicAFilterer: KeeperRegistryLogicAFilterer{contract: contract}}, nil +} + +type KeeperRegistryLogicA struct { + address common.Address + abi abi.ABI + KeeperRegistryLogicACaller + KeeperRegistryLogicATransactor + KeeperRegistryLogicAFilterer +} + +type KeeperRegistryLogicACaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicATransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicAFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicASession struct { + Contract *KeeperRegistryLogicA + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicACallerSession struct { + Contract *KeeperRegistryLogicACaller + CallOpts bind.CallOpts +} + +type KeeperRegistryLogicATransactorSession struct { + Contract *KeeperRegistryLogicATransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicARaw struct { + Contract *KeeperRegistryLogicA +} + +type KeeperRegistryLogicACallerRaw struct { + Contract *KeeperRegistryLogicACaller +} + +type KeeperRegistryLogicATransactorRaw struct { + Contract *KeeperRegistryLogicATransactor +} + +func NewKeeperRegistryLogicA(address common.Address, backend bind.ContractBackend) (*KeeperRegistryLogicA, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryLogicAABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryLogicA(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicA{address: address, abi: abi, KeeperRegistryLogicACaller: KeeperRegistryLogicACaller{contract: contract}, KeeperRegistryLogicATransactor: KeeperRegistryLogicATransactor{contract: contract}, KeeperRegistryLogicAFilterer: KeeperRegistryLogicAFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryLogicACaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryLogicACaller, error) { + contract, err := bindKeeperRegistryLogicA(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicACaller{contract: contract}, nil +} + +func NewKeeperRegistryLogicATransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryLogicATransactor, error) { + contract, err := bindKeeperRegistryLogicA(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicATransactor{contract: contract}, nil +} + +func NewKeeperRegistryLogicAFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryLogicAFilterer, error) { + contract, err := bindKeeperRegistryLogicA(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAFilterer{contract: contract}, nil +} + +func bindKeeperRegistryLogicA(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryLogicAMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicARaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogicA.Contract.KeeperRegistryLogicACaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicARaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.KeeperRegistryLogicATransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicARaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.KeeperRegistryLogicATransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicACallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogicA.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicACaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicA.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) FallbackTo() (common.Address, error) { + return _KeeperRegistryLogicA.Contract.FallbackTo(&_KeeperRegistryLogicA.CallOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicACallerSession) FallbackTo() (common.Address, error) { + return _KeeperRegistryLogicA.Contract.FallbackTo(&_KeeperRegistryLogicA.CallOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicACaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicA.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) Owner() (common.Address, error) { + return _KeeperRegistryLogicA.Contract.Owner(&_KeeperRegistryLogicA.CallOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicACallerSession) Owner() (common.Address, error) { + return _KeeperRegistryLogicA.Contract.Owner(&_KeeperRegistryLogicA.CallOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.AcceptOwnership(&_KeeperRegistryLogicA.TransactOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.AcceptOwnership(&_KeeperRegistryLogicA.TransactOpts) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.AddFunds(&_KeeperRegistryLogicA.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.AddFunds(&_KeeperRegistryLogicA.TransactOpts, id, amount) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CancelUpkeep(&_KeeperRegistryLogicA.TransactOpts, id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CancelUpkeep(&_KeeperRegistryLogicA.TransactOpts, id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) CheckCallback(opts *bind.TransactOpts, id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "checkCallback", id, values, extraData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckCallback(&_KeeperRegistryLogicA.TransactOpts, id, values, extraData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) CheckCallback(id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckCallback(&_KeeperRegistryLogicA.TransactOpts, id, values, extraData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "checkUpkeep", id, triggerData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) CheckUpkeep(id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckUpkeep(&_KeeperRegistryLogicA.TransactOpts, id, triggerData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) CheckUpkeep(id *big.Int, triggerData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckUpkeep(&_KeeperRegistryLogicA.TransactOpts, id, triggerData) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) CheckUpkeep0(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "checkUpkeep0", id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) CheckUpkeep0(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckUpkeep0(&_KeeperRegistryLogicA.TransactOpts, id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) CheckUpkeep0(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.CheckUpkeep0(&_KeeperRegistryLogicA.TransactOpts, id) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "executeCallback", id, payload) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.ExecuteCallback(&_KeeperRegistryLogicA.TransactOpts, id, payload) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) ExecuteCallback(id *big.Int, payload []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.ExecuteCallback(&_KeeperRegistryLogicA.TransactOpts, id, payload) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.MigrateUpkeeps(&_KeeperRegistryLogicA.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.MigrateUpkeeps(&_KeeperRegistryLogicA.TransactOpts, ids, destination) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.ReceiveUpkeeps(&_KeeperRegistryLogicA.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.ReceiveUpkeeps(&_KeeperRegistryLogicA.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.RegisterUpkeep(&_KeeperRegistryLogicA.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.RegisterUpkeep(&_KeeperRegistryLogicA.TransactOpts, target, gasLimit, admin, triggerType, checkData, triggerConfig, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "registerUpkeep0", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.RegisterUpkeep0(&_KeeperRegistryLogicA.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) RegisterUpkeep0(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.RegisterUpkeep0(&_KeeperRegistryLogicA.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "setUpkeepTriggerConfig", id, triggerConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.SetUpkeepTriggerConfig(&_KeeperRegistryLogicA.TransactOpts, id, triggerConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.SetUpkeepTriggerConfig(&_KeeperRegistryLogicA.TransactOpts, id, triggerConfig) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.TransferOwnership(&_KeeperRegistryLogicA.TransactOpts, to) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.TransferOwnership(&_KeeperRegistryLogicA.TransactOpts, to) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.contract.RawTransact(opts, calldata) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicASession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.Fallback(&_KeeperRegistryLogicA.TransactOpts, calldata) +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicATransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicA.Contract.Fallback(&_KeeperRegistryLogicA.TransactOpts, calldata) +} + +type KeeperRegistryLogicAAdminPrivilegeConfigSetIterator struct { + Event *KeeperRegistryLogicAAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryLogicAAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAAdminPrivilegeConfigSetIterator{contract: _KeeperRegistryLogicA.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAAdminPrivilegeConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicAAdminPrivilegeConfigSet, error) { + event := new(KeeperRegistryLogicAAdminPrivilegeConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicACancelledUpkeepReportIterator struct { + Event *KeeperRegistryLogicACancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicACancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicACancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicACancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicACancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicACancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicACancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicACancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicACancelledUpkeepReportIterator{contract: _KeeperRegistryLogicA.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicACancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicACancelledUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicACancelledUpkeepReport, error) { + event := new(KeeperRegistryLogicACancelledUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicADedupKeyAddedIterator struct { + Event *KeeperRegistryLogicADedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicADedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicADedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicADedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicADedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicADedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicADedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryLogicADedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicADedupKeyAddedIterator{contract: _KeeperRegistryLogicA.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicADedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicADedupKeyAdded) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseDedupKeyAdded(log types.Log) (*KeeperRegistryLogicADedupKeyAdded, error) { + event := new(KeeperRegistryLogicADedupKeyAdded) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAFundsAddedIterator struct { + Event *KeeperRegistryLogicAFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicAFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAFundsAddedIterator{contract: _KeeperRegistryLogicA.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAFundsAdded) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryLogicAFundsAdded, error) { + event := new(KeeperRegistryLogicAFundsAdded) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicAFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAFundsWithdrawnIterator{contract: _KeeperRegistryLogicA.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAFundsWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicAFundsWithdrawn, error) { + event := new(KeeperRegistryLogicAFundsWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator struct { + Event *KeeperRegistryLogicAInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator{contract: _KeeperRegistryLogicA.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicAInsufficientFundsUpkeepReport, error) { + event := new(KeeperRegistryLogicAInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicAOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicAOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAOwnerFundsWithdrawnIterator{contract: _KeeperRegistryLogicA.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAOwnerFundsWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicAOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryLogicAOwnerFundsWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryLogicAOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicAOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAOwnershipTransferRequestedIterator{contract: _KeeperRegistryLogicA.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAOwnershipTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicAOwnershipTransferRequested, error) { + event := new(KeeperRegistryLogicAOwnershipTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAOwnershipTransferredIterator struct { + Event *KeeperRegistryLogicAOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicAOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAOwnershipTransferredIterator{contract: _KeeperRegistryLogicA.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAOwnershipTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicAOwnershipTransferred, error) { + event := new(KeeperRegistryLogicAOwnershipTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAPausedIterator struct { + Event *KeeperRegistryLogicAPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicAPausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAPausedIterator{contract: _KeeperRegistryLogicA.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAPaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParsePaused(log types.Log) (*KeeperRegistryLogicAPaused, error) { + event := new(KeeperRegistryLogicAPaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAPayeesUpdatedIterator struct { + Event *KeeperRegistryLogicAPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicAPayeesUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAPayeesUpdatedIterator{contract: _KeeperRegistryLogicA.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAPayeesUpdated) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicAPayeesUpdated, error) { + event := new(KeeperRegistryLogicAPayeesUpdated) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryLogicAPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicAPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAPayeeshipTransferRequestedIterator{contract: _KeeperRegistryLogicA.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAPayeeshipTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicAPayeeshipTransferRequested, error) { + event := new(KeeperRegistryLogicAPayeeshipTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAPayeeshipTransferredIterator struct { + Event *KeeperRegistryLogicAPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicAPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAPayeeshipTransferredIterator{contract: _KeeperRegistryLogicA.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAPayeeshipTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicAPayeeshipTransferred, error) { + event := new(KeeperRegistryLogicAPayeeshipTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAPaymentWithdrawnIterator struct { + Event *KeeperRegistryLogicAPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicAPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAPaymentWithdrawnIterator{contract: _KeeperRegistryLogicA.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAPaymentWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicAPaymentWithdrawn, error) { + event := new(KeeperRegistryLogicAPaymentWithdrawn) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAReorgedUpkeepReportIterator struct { + Event *KeeperRegistryLogicAReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAReorgedUpkeepReportIterator{contract: _KeeperRegistryLogicA.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAReorgedUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicAReorgedUpkeepReport, error) { + event := new(KeeperRegistryLogicAReorgedUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAStaleUpkeepReportIterator struct { + Event *KeeperRegistryLogicAStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAStaleUpkeepReportIterator{contract: _KeeperRegistryLogicA.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAStaleUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicAStaleUpkeepReport, error) { + event := new(KeeperRegistryLogicAStaleUpkeepReport) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUnpausedIterator struct { + Event *KeeperRegistryLogicAUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicAUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUnpausedIterator{contract: _KeeperRegistryLogicA.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUnpaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryLogicAUnpaused, error) { + event := new(KeeperRegistryLogicAUnpaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryLogicAUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicAUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryLogicAUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryLogicAUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicAUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepAdminTransferredIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepAdminTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicAUpkeepAdminTransferred, error) { + event := new(KeeperRegistryLogicAUpkeepAdminTransferred) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepCanceledIterator struct { + Event *KeeperRegistryLogicAUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicAUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepCanceledIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepCanceled) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicAUpkeepCanceled, error) { + event := new(KeeperRegistryLogicAUpkeepCanceled) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepCheckDataSetIterator struct { + Event *KeeperRegistryLogicAUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepCheckDataSetIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepCheckDataSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryLogicAUpkeepCheckDataSet, error) { + event := new(KeeperRegistryLogicAUpkeepCheckDataSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryLogicAUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepGasLimitSetIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepGasLimitSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicAUpkeepGasLimitSet, error) { + event := new(KeeperRegistryLogicAUpkeepGasLimitSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepMigratedIterator struct { + Event *KeeperRegistryLogicAUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepMigratedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepMigrated) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicAUpkeepMigrated, error) { + event := new(KeeperRegistryLogicAUpkeepMigrated) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepOffchainConfigSetIterator struct { + Event *KeeperRegistryLogicAUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepOffchainConfigSetIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepOffchainConfigSet, error) { + event := new(KeeperRegistryLogicAUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepPausedIterator struct { + Event *KeeperRegistryLogicAUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepPausedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepPaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicAUpkeepPaused, error) { + event := new(KeeperRegistryLogicAUpkeepPaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepPerformedIterator struct { + Event *KeeperRegistryLogicAUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicAUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepPerformedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepPerformed) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicAUpkeepPerformed, error) { + event := new(KeeperRegistryLogicAUpkeepPerformed) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator struct { + Event *KeeperRegistryLogicAUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepPrivilegeConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepPrivilegeConfigSet, error) { + event := new(KeeperRegistryLogicAUpkeepPrivilegeConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepReceivedIterator struct { + Event *KeeperRegistryLogicAUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepReceivedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepReceived) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicAUpkeepReceived, error) { + event := new(KeeperRegistryLogicAUpkeepReceived) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepRegisteredIterator struct { + Event *KeeperRegistryLogicAUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepRegisteredIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepRegistered) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicAUpkeepRegistered, error) { + event := new(KeeperRegistryLogicAUpkeepRegistered) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepTriggerConfigSetIterator struct { + Event *KeeperRegistryLogicAUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepTriggerConfigSetIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepTriggerConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepTriggerConfigSet, error) { + event := new(KeeperRegistryLogicAUpkeepTriggerConfigSet) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicAUpkeepUnpausedIterator struct { + Event *KeeperRegistryLogicAUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicAUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicAUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicAUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicAUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicAUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicAUpkeepUnpausedIterator{contract: _KeeperRegistryLogicA.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicA.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicAUpkeepUnpaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicAFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicAUpkeepUnpaused, error) { + event := new(KeeperRegistryLogicAUpkeepUnpaused) + if err := _KeeperRegistryLogicA.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicA) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryLogicA.abi.Events["AdminPrivilegeConfigSet"].ID: + return _KeeperRegistryLogicA.ParseAdminPrivilegeConfigSet(log) + case _KeeperRegistryLogicA.abi.Events["CancelledUpkeepReport"].ID: + return _KeeperRegistryLogicA.ParseCancelledUpkeepReport(log) + case _KeeperRegistryLogicA.abi.Events["DedupKeyAdded"].ID: + return _KeeperRegistryLogicA.ParseDedupKeyAdded(log) + case _KeeperRegistryLogicA.abi.Events["FundsAdded"].ID: + return _KeeperRegistryLogicA.ParseFundsAdded(log) + case _KeeperRegistryLogicA.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistryLogicA.ParseFundsWithdrawn(log) + case _KeeperRegistryLogicA.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _KeeperRegistryLogicA.ParseInsufficientFundsUpkeepReport(log) + case _KeeperRegistryLogicA.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistryLogicA.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistryLogicA.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryLogicA.ParseOwnershipTransferRequested(log) + case _KeeperRegistryLogicA.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryLogicA.ParseOwnershipTransferred(log) + case _KeeperRegistryLogicA.abi.Events["Paused"].ID: + return _KeeperRegistryLogicA.ParsePaused(log) + case _KeeperRegistryLogicA.abi.Events["PayeesUpdated"].ID: + return _KeeperRegistryLogicA.ParsePayeesUpdated(log) + case _KeeperRegistryLogicA.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistryLogicA.ParsePayeeshipTransferRequested(log) + case _KeeperRegistryLogicA.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistryLogicA.ParsePayeeshipTransferred(log) + case _KeeperRegistryLogicA.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistryLogicA.ParsePaymentWithdrawn(log) + case _KeeperRegistryLogicA.abi.Events["ReorgedUpkeepReport"].ID: + return _KeeperRegistryLogicA.ParseReorgedUpkeepReport(log) + case _KeeperRegistryLogicA.abi.Events["StaleUpkeepReport"].ID: + return _KeeperRegistryLogicA.ParseStaleUpkeepReport(log) + case _KeeperRegistryLogicA.abi.Events["Unpaused"].ID: + return _KeeperRegistryLogicA.ParseUnpaused(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistryLogicA.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistryLogicA.ParseUpkeepAdminTransferred(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistryLogicA.ParseUpkeepCanceled(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepCheckDataSet"].ID: + return _KeeperRegistryLogicA.ParseUpkeepCheckDataSet(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistryLogicA.ParseUpkeepGasLimitSet(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistryLogicA.ParseUpkeepMigrated(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepOffchainConfigSet"].ID: + return _KeeperRegistryLogicA.ParseUpkeepOffchainConfigSet(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistryLogicA.ParseUpkeepPaused(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistryLogicA.ParseUpkeepPerformed(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _KeeperRegistryLogicA.ParseUpkeepPrivilegeConfigSet(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistryLogicA.ParseUpkeepReceived(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistryLogicA.ParseUpkeepRegistered(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepTriggerConfigSet"].ID: + return _KeeperRegistryLogicA.ParseUpkeepTriggerConfigSet(log) + case _KeeperRegistryLogicA.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistryLogicA.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryLogicAAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (KeeperRegistryLogicACancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (KeeperRegistryLogicADedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (KeeperRegistryLogicAFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryLogicAFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryLogicAInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (KeeperRegistryLogicAOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryLogicAOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryLogicAOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryLogicAPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryLogicAPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (KeeperRegistryLogicAPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryLogicAPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryLogicAPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryLogicAReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (KeeperRegistryLogicAStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (KeeperRegistryLogicAUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryLogicAUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryLogicAUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryLogicAUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryLogicAUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (KeeperRegistryLogicAUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryLogicAUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryLogicAUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (KeeperRegistryLogicAUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryLogicAUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (KeeperRegistryLogicAUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (KeeperRegistryLogicAUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryLogicAUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryLogicAUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (KeeperRegistryLogicAUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistryLogicA *KeeperRegistryLogicA) Address() common.Address { + return _KeeperRegistryLogicA.address +} + +type KeeperRegistryLogicAInterface interface { + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckCallback(opts *bind.TransactOpts, id *big.Int, values [][]byte, extraData []byte) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, triggerData []byte) (*types.Transaction, error) + + CheckUpkeep0(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ExecuteCallback(opts *bind.TransactOpts, id *big.Int, payload []byte) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, checkData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) + + RegisterUpkeep0(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetUpkeepTriggerConfig(opts *bind.TransactOpts, id *big.Int, triggerConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryLogicAAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicAAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicACancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicACancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicACancelledUpkeepReport, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryLogicADedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicADedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*KeeperRegistryLogicADedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicAFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryLogicAFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicAFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicAInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicAOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicAOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicAOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicAOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicAOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicAOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicAPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryLogicAPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicAPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicAPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicAPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicAPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicAPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicAPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicAPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicAPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicAReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicAStaleUpkeepReport, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicAUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryLogicAUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicAUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicAUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicAUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicAUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicAUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicAUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryLogicAUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicAUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicAUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicAUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicAUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicAUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicAUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicAUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryLogicAUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicAUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicAUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicAUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1/keeper_registry_logic_b_wrapper_2_1.go b/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1/keeper_registry_logic_b_wrapper_2_1.go new file mode 100644 index 00000000..e6aabcda --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1/keeper_registry_logic_b_wrapper_2_1.go @@ -0,0 +1,5708 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_logic_b_wrapper_2_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +type KeeperRegistryBase21State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + TotalPremium *big.Int + NumUpkeeps *big.Int + ConfigCount uint32 + LatestConfigBlockNumber uint32 + LatestConfigDigest [32]byte + LatestEpoch uint32 + Paused bool +} + +type KeeperRegistryBase21UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var KeeperRegistryLogicBMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"enumKeeperRegistryBase2_1.Mode\",\"name\":\"mode\",\"type\":\"uint8\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"automationForwarderLogic\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"getAdminPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAutomationForwarderLogic\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCancellationDelay\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConditionalGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"contractIAutomationForwarder\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLogGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumKeeperRegistryBase2_1.Trigger\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMode\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_1.Mode\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_1.MigrationPermission\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerPerformByteGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPerSignerGasOverhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getSignerInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"totalPremium\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"latestConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"latestConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"latestEpoch\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"internalType\":\"structKeeperRegistryBase2_1.State\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structKeeperRegistryBase2_1.OnchainConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getTransmitterInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"lastCollected\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_1.Trigger\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structKeeperRegistryBase2_1.UpkeepInfo\",\"name\":\"upkeepInfo\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"hasDedupKey\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setAdminPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase2_1.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"setUpkeepCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newPrivilegeConfig\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101206040523480156200001257600080fd5b5060405162004fbc38038062004fbc8339810160408190526200003591620001e9565b84848484843380600081620000915760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c457620000c48162000121565b505050846002811115620000dc57620000dc6200025e565b60e0816002811115620000f357620000f36200025e565b9052506001600160a01b0393841660805291831660a052821660c05216610100525062000274945050505050565b336001600160a01b038216036200017b5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000088565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001e457600080fd5b919050565b600080600080600060a086880312156200020257600080fd5b8551600381106200021257600080fd5b94506200022260208701620001cc565b93506200023260408701620001cc565b92506200024260608701620001cc565b91506200025260808701620001cc565b90509295509295909350565b634e487b7160e01b600052602160045260246000fd5b60805160a05160c05160e05161010051614cbd620002ff600039600061058701526000818161052501528181613352015281816138d50152613a680152600081816105f4015261314601526000818161071c01526132200152600081816107aa01528181611bab01528181611e81015281816122ee0152818161280101526128850152614cbd6000f3fe608060405234801561001057600080fd5b50600436106103365760003560e01c806379ba5097116101b2578063b121e147116100f9578063ca30e603116100a2578063eb5dcd6c1161007c578063eb5dcd6c146107f4578063ed56b3e114610807578063f2fde38b1461087a578063faa3e9961461088d57600080fd5b8063ca30e603146107a8578063cd7f71b5146107ce578063d7632648146107e157600080fd5b8063b657bc9c116100d3578063b657bc9c1461076d578063b79550be14610780578063c7c3a19a1461078857600080fd5b8063b121e14714610740578063b148ab6b14610753578063b6511a2a1461076657600080fd5b80638dcf0fe71161015b578063aab9edd611610135578063aab9edd614610703578063abc76ae014610712578063b10b673c1461071a57600080fd5b80638dcf0fe7146106ca578063a710b221146106dd578063a72aa27e146106f057600080fd5b80638456cb591161018c5780638456cb59146106915780638765ecbe146106995780638da5cb5b146106ac57600080fd5b806379ba50971461063e57806379ea9943146106465780637d9b97e01461068957600080fd5b8063421d183b116102815780635165f2f51161022a5780636209e1e9116102045780636209e1e9146105df5780636709d0e5146105f2578063671d36ed14610618578063744bfe611461062b57600080fd5b80635165f2f5146105725780635425d8ac146105855780635b6aa71c146105cc57600080fd5b80634b4fd03b1161025b5780634b4fd03b146105235780634ca16c52146105495780635147cd591461055257600080fd5b8063421d183b1461047a57806344cb70b8146104e057806348013d7b1461051357600080fd5b80631a2af011116102e3578063232c1cc5116102bd578063232c1cc5146104585780633b9cce591461045f5780633f4ba83a1461047257600080fd5b80631a2af011146103d45780631e010439146103e7578063207b65161461044557600080fd5b80631865c57d116103145780631865c57d14610388578063187256e8146103a157806319d97a94146103b457600080fd5b8063050ee65d1461033b57806306e3b632146103535780630b7d33e614610373575b600080fd5b6201adb05b6040519081526020015b60405180910390f35b610366610361366004613df3565b6108d3565b60405161034a9190613e15565b610386610381366004613ea2565b6109f0565b005b610390610aaa565b60405161034a9594939291906140a5565b6103866103af3660046141dc565b610ec3565b6103c76103c2366004614219565b610f34565b60405161034a91906142a0565b6103866103e23660046142b3565b610fd6565b6104286103f5366004614219565b6000908152600460205260409020600101546c0100000000000000000000000090046bffffffffffffffffffffffff1690565b6040516bffffffffffffffffffffffff909116815260200161034a565b6103c7610453366004614219565b6110dc565b6014610340565b61038661046d3660046142d8565b6110f9565b61038661134f565b61048d61048836600461434d565b6113b5565b60408051951515865260ff90941660208601526bffffffffffffffffffffffff9283169385019390935216606083015273ffffffffffffffffffffffffffffffffffffffff16608082015260a00161034a565b6105036104ee366004614219565b60009081526008602052604090205460ff1690565b604051901515815260200161034a565b60005b60405161034a91906143a9565b7f0000000000000000000000000000000000000000000000000000000000000000610516565b62015f90610340565b610565610560366004614219565b6114e8565b60405161034a91906143bc565b610386610580366004614219565b6114f3565b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161034a565b6104286105da3660046143e9565b61166a565b6103c76105ed36600461434d565b61179c565b7f00000000000000000000000000000000000000000000000000000000000000006105a7565b610386610626366004614422565b6117d0565b6103866106393660046142b3565b6118aa565b610386611ca7565b6105a7610654366004614219565b6000908152600460205260409020546901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1690565b610386611da9565b610386611f04565b6103866106a7366004614219565b611f75565b60005473ffffffffffffffffffffffffffffffffffffffff166105a7565b6103866106d8366004613ea2565b6120ef565b6103866106eb36600461445e565b612144565b6103866106fe36600461448c565b6123c0565b6040516003815260200161034a565b611d4c610340565b7f00000000000000000000000000000000000000000000000000000000000000006105a7565b61038661074e36600461434d565b6124b5565b610386610761366004614219565b6125ad565b6032610340565b61042861077b366004614219565b61279b565b6103866127c8565b61079b610796366004614219565b612924565b60405161034a91906144af565b7f00000000000000000000000000000000000000000000000000000000000000006105a7565b6103866107dc366004613ea2565b612cf7565b6104286107ef366004614219565b612d8e565b61038661080236600461445e565b612d99565b61086161081536600461434d565b73ffffffffffffffffffffffffffffffffffffffff166000908152600c602090815260409182902082518084019093525460ff8082161515808552610100909204169290910182905291565b60408051921515835260ff90911660208301520161034a565b61038661088836600461434d565b612ef7565b6108c661089b36600461434d565b73ffffffffffffffffffffffffffffffffffffffff1660009081526019602052604090205460ff1690565b60405161034a91906145e6565b606060006108e16002612f0b565b905080841061091c576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006109288486614629565b905081811180610936575083155b6109405780610942565b815b90506000610950868361463c565b67ffffffffffffffff8111156109685761096861464f565b604051908082528060200260200182016040528015610991578160200160208202803683370190505b50905060005b81518110156109e4576109b56109ad8883614629565b600290612f15565b8282815181106109c7576109c761467e565b6020908102919091010152806109dc816146ad565b915050610997565b50925050505b92915050565b6015546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610a51576040517f77c3599200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152601c60205260409020610a6a828483614787565b50827f2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae77698383604051610a9d9291906148a2565b60405180910390a2505050565b6040805161014081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810191909152604080516101e08101825260008082526020820181905291810182905260608082018390526080820183905260a0820183905260c0820183905260e08201839052610100820183905261012082018390526101408201839052610160820183905261018082018390526101a08201526101c0810191909152604080516101408101825260145463ffffffff7401000000000000000000000000000000000000000082041682526bffffffffffffffffffffffff908116602083015260185492820192909252601254700100000000000000000000000000000000900490911660608281019190915290819060009060808101610bf76002612f0b565b81526014547801000000000000000000000000000000000000000000000000810463ffffffff9081166020808501919091527c0100000000000000000000000000000000000000000000000000000000808404831660408087019190915260115460608088019190915260125492830485166080808901919091526e010000000000000000000000000000840460ff16151560a09889015282516101e081018452610100808604881682526501000000000086048816968201969096526c010000000000000000000000008089048816948201949094526901000000000000000000850462ffffff16928101929092529282900461ffff16928101929092526013546bffffffffffffffffffffffff811696830196909652700100000000000000000000000000000000909404831660c082015260155480841660e0830152640100000000810484169282019290925268010000000000000000909104909116610120820152601654610140820152601754610160820152910473ffffffffffffffffffffffffffffffffffffffff166101808201529095506101a08101610d9f6009612f28565b81526015546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff16602091820152601254600d80546040805182860281018601909152818152949850899489949293600e9360ff909116928591830182828015610e4257602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610e17575b5050505050925081805480602002602001604051908101604052809291908181526020018280548015610eab57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610e80575b50505050509150945094509450945094509091929394565b610ecb612f35565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260196020526040902080548291907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836003811115610f2b57610f2b61436a565b02179055505050565b6000818152601c60205260409020805460609190610f51906146e5565b80601f0160208091040260200160405190810160405280929190818152602001828054610f7d906146e5565b8015610fca5780601f10610f9f57610100808354040283529160200191610fca565b820191906000526020600020905b815481529060010190602001808311610fad57829003601f168201915b50505050509050919050565b610fdf82612fb8565b3373ffffffffffffffffffffffffffffffffffffffff82160361102e576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff8281169116146110d85760008281526006602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff851690811790915590519091339185917fb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b3591a45b5050565b6000818152601a60205260409020805460609190610f51906146e5565b611101612f35565b600e54811461113c576040517fcf54c06a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b600e5481101561130e576000600e828154811061115e5761115e61467e565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff908116808452600f909252604083205491935016908585858181106111a8576111a861467e565b90506020020160208101906111bd919061434d565b905073ffffffffffffffffffffffffffffffffffffffff81161580611250575073ffffffffffffffffffffffffffffffffffffffff82161580159061122e57508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b8015611250575073ffffffffffffffffffffffffffffffffffffffff81811614155b15611287576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff818116146112f85773ffffffffffffffffffffffffffffffffffffffff8381166000908152600f6020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169183169190911790555b5050508080611306906146ad565b91505061113f565b507fa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725600e8383604051611343939291906148ef565b60405180910390a15050565b611357612f35565b601280547fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff1690556040513381527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020015b60405180910390a1565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e01000000000000000000000000000090049091166060820152829182918291829190829061148f57606082015160125460009161147b9170010000000000000000000000000000000090046bffffffffffffffffffffffff166149a1565b600e5490915061148b90826149f5565b9150505b8151602083015160408401516114a6908490614a20565b6060949094015173ffffffffffffffffffffffffffffffffffffffff9a8b166000908152600f6020526040902054929b919a9499509750921694509092505050565b60006109ea8261306c565b6114fc81612fb8565b600081815260046020908152604091829020825160e081018452815460ff8116151580835263ffffffff610100830481169584019590955265010000000000820485169583019590955273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c0820152906115fb576040517f1b88a78400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905561163a600283613117565b5060405182907f7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a4745690600090a25050565b604080516101208101825260125460ff808216835263ffffffff6101008084048216602086015265010000000000840482169585019590955262ffffff6901000000000000000000840416606085015261ffff6c0100000000000000000000000084041660808501526e01000000000000000000000000000083048216151560a08501526f010000000000000000000000000000008304909116151560c08401526bffffffffffffffffffffffff70010000000000000000000000000000000083041660e08401527c01000000000000000000000000000000000000000000000000000000009091041691810191909152600090818061176983613123565b91509150611792838787601360020160049054906101000a900463ffffffff1686866000613301565b9695505050505050565b73ffffffffffffffffffffffffffffffffffffffff81166000908152601d60205260409020805460609190610f51906146e5565b6015546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314611831576040517f77c3599200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152601d60205260409020611861828483614787565b508273ffffffffffffffffffffffffffffffffffffffff167f7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d28383604051610a9d9291906148a2565b6012546f01000000000000000000000000000000900460ff16156118fa576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601280547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff166f0100000000000000000000000000000017905573ffffffffffffffffffffffffffffffffffffffff8116611981576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600460209081526040808320815160e081018352815460ff81161515825263ffffffff610100820481168387015265010000000000820481168386015273ffffffffffffffffffffffffffffffffffffffff6901000000000000000000909204821660608401526001909301546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a08401527801000000000000000000000000000000000000000000000000900490921660c082015286855260059093529220549091163314611a88576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611a9061334c565b816040015163ffffffff161115611ad3576040517fff84e5dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600460205260409020600101546018546c010000000000000000000000009091046bffffffffffffffffffffffff1690611b1390829061463c565b60185560008481526004602081905260409182902060010180547fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff16905590517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff858116928201929092526bffffffffffffffffffffffff831660248201527f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb906044016020604051808303816000875af1158015611bf6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611c1a9190614a45565b50604080516bffffffffffffffffffffffff8316815273ffffffffffffffffffffffffffffffffffffffff8516602082015285917ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318910160405180910390a25050601280547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff1690555050565b60015473ffffffffffffffffffffffffffffffffffffffff163314611d2d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b611db1612f35565b6014546018546bffffffffffffffffffffffff90911690611dd390829061463c565b601855601480547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690556040516bffffffffffffffffffffffff821681527f1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f19060200160405180910390a16040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff821660248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044015b6020604051808303816000875af1158015611ee0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906110d89190614a45565b611f0c612f35565b601280547fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff166e0100000000000000000000000000001790556040513381527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258906020016113ab565b611f7e81612fb8565b600081815260046020908152604091829020825160e081018452815460ff8116158015835263ffffffff610100830481169584019590955265010000000000820485169583019590955273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201529061207d576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260046020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556120bf600283613401565b5060405182907f8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f90600090a25050565b6120f883612fb8565b6000838152601b60205260409020612111828483614787565b50827f3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf48508383604051610a9d9291906148a2565b73ffffffffffffffffffffffffffffffffffffffff8116612191576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600f60205260409020541633146121f1576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601254600e5460009161222891859170010000000000000000000000000000000090046bffffffffffffffffffffffff169061340d565b73ffffffffffffffffffffffffffffffffffffffff84166000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff169055601854909150612292906bffffffffffffffffffffffff83169061463c565b6018556040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301526bffffffffffffffffffffffff831660248301527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906044016020604051808303816000875af1158015612337573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061235b9190614a45565b5060405133815273ffffffffffffffffffffffffffffffffffffffff808416916bffffffffffffffffffffffff8416918616907f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f406989060200160405180910390a4505050565b6108fc8163ffffffff1610806123f5575060145463ffffffff7001000000000000000000000000000000009091048116908216115b1561242c576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61243582612fb8565b60008281526004602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff1661010063ffffffff861690810291909117909155915191825283917fc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c910160405180910390a25050565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260106020526040902054163314612515576040517f6752e7aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8181166000818152600f602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556010909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260046020908152604091829020825160e081018452815460ff81161515825263ffffffff6101008204811694830194909452650100000000008104841694820185905273ffffffffffffffffffffffffffffffffffffffff69010000000000000000009091041660608201526001909101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a083015278010000000000000000000000000000000000000000000000009004821660c082015291146126aa576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526006602052604090205473ffffffffffffffffffffffffffffffffffffffff163314612707576040517f6352a85300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526005602090815260408083208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821790935560069094528285208054909216909155905173ffffffffffffffffffffffffffffffffffffffff90911692839186917f5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c91a4505050565b60006109ea6127a98361306c565b600084815260046020526040902054610100900463ffffffff1661166a565b6127d0612f35565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa15801561285d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906128819190614a67565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb33601854846128ce919061463c565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526024820152604401611ec1565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526000828152600460209081526040808320815160e081018352815460ff811615158252610100810463ffffffff90811695830195909552650100000000008104851693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff16606083018190526001909101546bffffffffffffffffffffffff80821660808501526c0100000000000000000000000082041660a08401527801000000000000000000000000000000000000000000000000900490921660c0820152919015612abc57816060015173ffffffffffffffffffffffffffffffffffffffff1663f00e6a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015612a93573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612ab79190614a80565b612abf565b60005b90506040518061014001604052808273ffffffffffffffffffffffffffffffffffffffff168152602001836020015163ffffffff168152602001600760008781526020019081526020016000208054612b17906146e5565b80601f0160208091040260200160405190810160405280929190818152602001828054612b43906146e5565b8015612b905780601f10612b6557610100808354040283529160200191612b90565b820191906000526020600020905b815481529060010190602001808311612b7357829003601f168201915b505050505081526020018360a001516bffffffffffffffffffffffff1681526020016005600087815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001836040015163ffffffff1667ffffffffffffffff1681526020018360c0015163ffffffff16815260200183608001516bffffffffffffffffffffffff168152602001836000015115158152602001601b60008781526020019081526020016000208054612c6d906146e5565b80601f0160208091040260200160405190810160405280929190818152602001828054612c99906146e5565b8015612ce65780601f10612cbb57610100808354040283529160200191612ce6565b820191906000526020600020905b815481529060010190602001808311612cc957829003601f168201915b505050505081525092505050919050565b612d0083612fb8565b60155463ffffffff16811115612d42576040517fae7235df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600760205260409020612d5b828483614787565b50827fcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d8383604051610a9d9291906148a2565b60006109ea8261279b565b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600f6020526040902054163314612df9576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821603612e48576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152601060205260409020548116908216146110d85773ffffffffffffffffffffffffffffffffffffffff82811660008181526010602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45050565b612eff612f35565b612f0881613615565b50565b60006109ea825490565b6000612f21838361370a565b9392505050565b60606000612f2183613734565b60005473ffffffffffffffffffffffffffffffffffffffff163314612fb6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401611d24565b565b60008181526005602052604090205473ffffffffffffffffffffffffffffffffffffffff163314613015576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526004602052604090205465010000000000900463ffffffff90811614612f08576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818160045b600f8110156130f9577fff0000000000000000000000000000000000000000000000000000000000000082168382602081106130b1576130b161467e565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916146130e757506000949350505050565b806130f1816146ad565b915050613073565b5081600f1a600181111561310f5761310f61436a565b949350505050565b6000612f21838361378f565b6000806000836060015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa1580156131af573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906131d39190614ab7565b50945090925050506000811315806131ea57508142105b8061320b575082801561320b5750613202824261463c565b8463ffffffff16105b1561321a57601654955061321e565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa158015613289573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906132ad9190614ab7565b50945090925050506000811315806132c457508142105b806132e557508280156132e557506132dc824261463c565b8463ffffffff16105b156132f45760175494506132f8565b8094505b50505050915091565b60008061331388878b600001516137de565b905060008061332e8b8a63ffffffff16858a8a60018b6138a0565b909250905061333d8183614a20565b9b9a5050505050505050505050565b600060017f000000000000000000000000000000000000000000000000000000000000000060028111156133825761338261436a565b036133fc57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156133d3573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906133f79190614a67565b905090565b504390565b6000612f218383613cf9565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e01000000000000000000000000000090049091166060820152906136095760008160600151856134a591906149a1565b905060006134b385836149f5565b905080836040018181516134c79190614a20565b6bffffffffffffffffffffffff169052506134e28582614b07565b836060018181516134f39190614a20565b6bffffffffffffffffffffffff90811690915273ffffffffffffffffffffffffffffffffffffffff89166000908152600b602090815260409182902087518154928901519389015160608a015186166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff919096166201000002167fffffffffffff000000000000000000000000000000000000000000000000ffff60ff95909516610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909416939093171792909216179190911790555050505b60400151949350505050565b3373ffffffffffffffffffffffffffffffffffffffff821603613694576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401611d24565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60008260000182815481106137215761372161467e565b9060005260206000200154905092915050565b606081600001805480602002602001604051908101604052809291908181526020018280548015610fca57602002820191906000526020600020905b8154815260200190600101908083116137705750505050509050919050565b60008181526001830160205260408120546137d6575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556109ea565b5060006109ea565b600080808560018111156137f4576137f461436a565b03613803575062015f90613858565b60018560018111156138175761381761436a565b0361382657506201adb0613858565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61386963ffffffff85166014614b3b565b613874846001614b78565b6138839060ff16611d4c614b3b565b61388d9083614629565b6138979190614629565b95945050505050565b6000806000896080015161ffff16876138b99190614b3b565b90508380156138c75750803a105b156138cf57503a5b600060027f000000000000000000000000000000000000000000000000000000000000000060028111156139055761390561436a565b03613a6457604080516000815260208101909152851561396357600036604051806080016040528060488152602001614c696048913960405160200161394d93929190614b91565b60405160208183030381529060405290506139cb565b60155461397f90640100000000900463ffffffff166004614bb8565b63ffffffff1667ffffffffffffffff81111561399d5761399d61464f565b6040519080825280601f01601f1916602001820160405280156139c7576020820181803683370190505b5090505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273420000000000000000000000000000000000000f906349948e0e90613a1b9084906004016142a0565b602060405180830381865afa158015613a38573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613a5c9190614a67565b915050613bbe565b60017f00000000000000000000000000000000000000000000000000000000000000006002811115613a9857613a9861436a565b03613bbe578415613b1a57606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613aef573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613b139190614a67565b9050613bbe565b6000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c060405180830381865afa158015613b68573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613b8c9190614bdb565b5050601554929450613baf93505050640100000000900463ffffffff1682614b3b565b613bba906010614b3b565b9150505b84613bda57808b6080015161ffff16613bd79190614b3b565b90505b613be861ffff871682614c25565b905060008782613bf88c8e614629565b613c029086614b3b565b613c0c9190614629565b613c1e90670de0b6b3a7640000614b3b565b613c289190614c25565b905060008c6040015163ffffffff1664e8d4a51000613c479190614b3b565b898e6020015163ffffffff16858f88613c609190614b3b565b613c6a9190614629565b613c7890633b9aca00614b3b565b613c829190614b3b565b613c8c9190614c25565b613c969190614629565b90506b033b2e3c9fd0803ce8000000613caf8284614629565b1115613ce7576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909c909b509950505050505050505050565b60008181526001830160205260408120548015613de2576000613d1d60018361463c565b8554909150600090613d319060019061463c565b9050818114613d96576000866000018281548110613d5157613d5161467e565b9060005260206000200154905080876000018481548110613d7457613d7461467e565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080613da757613da7614c39565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506109ea565b60009150506109ea565b5092915050565b60008060408385031215613e0657600080fd5b50508035926020909101359150565b6020808252825182820181905260009190848201906040850190845b81811015613e4d57835183529284019291840191600101613e31565b50909695505050505050565b60008083601f840112613e6b57600080fd5b50813567ffffffffffffffff811115613e8357600080fd5b602083019150836020828501011115613e9b57600080fd5b9250929050565b600080600060408486031215613eb757600080fd5b83359250602084013567ffffffffffffffff811115613ed557600080fd5b613ee186828701613e59565b9497909650939450505050565b600081518084526020808501945080840160005b83811015613f3457815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613f02565b509495945050505050565b805163ffffffff16825260006101e06020830151613f65602086018263ffffffff169052565b506040830151613f7d604086018263ffffffff169052565b506060830151613f94606086018262ffffff169052565b506080830151613faa608086018261ffff169052565b5060a0830151613fca60a08601826bffffffffffffffffffffffff169052565b5060c0830151613fe260c086018263ffffffff169052565b5060e0830151613ffa60e086018263ffffffff169052565b506101008381015163ffffffff908116918601919091526101208085015190911690850152610140808401519085015261016080840151908501526101808084015173ffffffffffffffffffffffffffffffffffffffff16908501526101a08084015181860183905261406f83870182613eee565b925050506101c08084015161409b8287018273ffffffffffffffffffffffffffffffffffffffff169052565b5090949350505050565b855163ffffffff16815260006101c060208801516140d360208501826bffffffffffffffffffffffff169052565b506040880151604084015260608801516140fd60608501826bffffffffffffffffffffffff169052565b506080880151608084015260a088015161411f60a085018263ffffffff169052565b5060c088015161413760c085018263ffffffff169052565b5060e088015160e08401526101008089015161415a8286018263ffffffff169052565b505061012088810151151590840152610140830181905261417d81840188613f3f565b90508281036101608401526141928187613eee565b90508281036101808401526141a78186613eee565b9150506117926101a083018460ff169052565b73ffffffffffffffffffffffffffffffffffffffff81168114612f0857600080fd5b600080604083850312156141ef57600080fd5b82356141fa816141ba565b915060208301356004811061420e57600080fd5b809150509250929050565b60006020828403121561422b57600080fd5b5035919050565b60005b8381101561424d578181015183820152602001614235565b50506000910152565b6000815180845261426e816020860160208601614232565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000612f216020830184614256565b600080604083850312156142c657600080fd5b82359150602083013561420e816141ba565b600080602083850312156142eb57600080fd5b823567ffffffffffffffff8082111561430357600080fd5b818501915085601f83011261431757600080fd5b81358181111561432657600080fd5b8660208260051b850101111561433b57600080fd5b60209290920196919550909350505050565b60006020828403121561435f57600080fd5b8135612f21816141ba565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60038110612f0857612f0861436a565b602081016143b683614399565b91905290565b60208101600283106143b6576143b661436a565b803563ffffffff811681146143e457600080fd5b919050565b600080604083850312156143fc57600080fd5b82356002811061440b57600080fd5b9150614419602084016143d0565b90509250929050565b60008060006040848603121561443757600080fd5b8335614442816141ba565b9250602084013567ffffffffffffffff811115613ed557600080fd5b6000806040838503121561447157600080fd5b823561447c816141ba565b9150602083013561420e816141ba565b6000806040838503121561449f57600080fd5b82359150614419602084016143d0565b602081526144d660208201835173ffffffffffffffffffffffffffffffffffffffff169052565b600060208301516144ef604084018263ffffffff169052565b50604083015161014080606085015261450c610160850183614256565b9150606085015161452d60808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e0850151610100614599818701836bffffffffffffffffffffffff169052565b86015190506101206145ae8682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018387015290506117928382614256565b60208101600483106143b6576143b661436a565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156109ea576109ea6145fa565b818103818111156109ea576109ea6145fa565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036146de576146de6145fa565b5060010190565b600181811c908216806146f957607f821691505b602082108103614732577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111561478257600081815260208120601f850160051c8101602086101561475f5750805b601f850160051c820191505b8181101561477e5782815560010161476b565b5050505b505050565b67ffffffffffffffff83111561479f5761479f61464f565b6147b3836147ad83546146e5565b83614738565b6000601f84116001811461480557600085156147cf5750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b17835561489b565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b828110156148545786850135825560209485019460019092019101614834565b508682101561488f577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b6000604082016040835280865480835260608501915087600052602092508260002060005b8281101561494657815473ffffffffffffffffffffffffffffffffffffffff1684529284019260019182019101614914565b505050838103828501528481528590820160005b8681101561499557823561496d816141ba565b73ffffffffffffffffffffffffffffffffffffffff168252918301919083019060010161495a565b50979650505050505050565b6bffffffffffffffffffffffff828116828216039080821115613dec57613dec6145fa565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60006bffffffffffffffffffffffff80841680614a1457614a146149c6565b92169190910492915050565b6bffffffffffffffffffffffff818116838216019080821115613dec57613dec6145fa565b600060208284031215614a5757600080fd5b81518015158114612f2157600080fd5b600060208284031215614a7957600080fd5b5051919050565b600060208284031215614a9257600080fd5b8151612f21816141ba565b805169ffffffffffffffffffff811681146143e457600080fd5b600080600080600060a08688031215614acf57600080fd5b614ad886614a9d565b9450602086015193506040860151925060608601519150614afb60808701614a9d565b90509295509295909350565b60006bffffffffffffffffffffffff80831681851681830481118215151615614b3257614b326145fa565b02949350505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615614b7357614b736145fa565b500290565b60ff81811683821601908111156109ea576109ea6145fa565b828482376000838201600081528351614bae818360208801614232565b0195945050505050565b600063ffffffff80831681851681830481118215151615614b3257614b326145fa565b60008060008060008060c08789031215614bf457600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b600082614c3457614c346149c6565b500490565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000810000a", +} + +var KeeperRegistryLogicBABI = KeeperRegistryLogicBMetaData.ABI + +var KeeperRegistryLogicBBin = KeeperRegistryLogicBMetaData.Bin + +func DeployKeeperRegistryLogicB(auth *bind.TransactOpts, backend bind.ContractBackend, mode uint8, link common.Address, linkNativeFeed common.Address, fastGasFeed common.Address, automationForwarderLogic common.Address) (common.Address, *types.Transaction, *KeeperRegistryLogicB, error) { + parsed, err := KeeperRegistryLogicBMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryLogicBBin), backend, mode, link, linkNativeFeed, fastGasFeed, automationForwarderLogic) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryLogicB{address: address, abi: *parsed, KeeperRegistryLogicBCaller: KeeperRegistryLogicBCaller{contract: contract}, KeeperRegistryLogicBTransactor: KeeperRegistryLogicBTransactor{contract: contract}, KeeperRegistryLogicBFilterer: KeeperRegistryLogicBFilterer{contract: contract}}, nil +} + +type KeeperRegistryLogicB struct { + address common.Address + abi abi.ABI + KeeperRegistryLogicBCaller + KeeperRegistryLogicBTransactor + KeeperRegistryLogicBFilterer +} + +type KeeperRegistryLogicBCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicBTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicBFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryLogicBSession struct { + Contract *KeeperRegistryLogicB + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicBCallerSession struct { + Contract *KeeperRegistryLogicBCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryLogicBTransactorSession struct { + Contract *KeeperRegistryLogicBTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryLogicBRaw struct { + Contract *KeeperRegistryLogicB +} + +type KeeperRegistryLogicBCallerRaw struct { + Contract *KeeperRegistryLogicBCaller +} + +type KeeperRegistryLogicBTransactorRaw struct { + Contract *KeeperRegistryLogicBTransactor +} + +func NewKeeperRegistryLogicB(address common.Address, backend bind.ContractBackend) (*KeeperRegistryLogicB, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryLogicBABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryLogicB(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicB{address: address, abi: abi, KeeperRegistryLogicBCaller: KeeperRegistryLogicBCaller{contract: contract}, KeeperRegistryLogicBTransactor: KeeperRegistryLogicBTransactor{contract: contract}, KeeperRegistryLogicBFilterer: KeeperRegistryLogicBFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryLogicBCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryLogicBCaller, error) { + contract, err := bindKeeperRegistryLogicB(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBCaller{contract: contract}, nil +} + +func NewKeeperRegistryLogicBTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryLogicBTransactor, error) { + contract, err := bindKeeperRegistryLogicB(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBTransactor{contract: contract}, nil +} + +func NewKeeperRegistryLogicBFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryLogicBFilterer, error) { + contract, err := bindKeeperRegistryLogicB(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBFilterer{contract: contract}, nil +} + +func bindKeeperRegistryLogicB(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryLogicBMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogicB.Contract.KeeperRegistryLogicBCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.KeeperRegistryLogicBTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.KeeperRegistryLogicBTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryLogicB.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetActiveUpkeepIDs(&_KeeperRegistryLogicB.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetActiveUpkeepIDs(&_KeeperRegistryLogicB.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getAdminPrivilegeConfig", admin) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetAdminPrivilegeConfig(&_KeeperRegistryLogicB.CallOpts, admin) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetAdminPrivilegeConfig(admin common.Address) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetAdminPrivilegeConfig(&_KeeperRegistryLogicB.CallOpts, admin) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getAutomationForwarderLogic") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetAutomationForwarderLogic() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetAutomationForwarderLogic(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetAutomationForwarderLogic() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetAutomationForwarderLogic(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetBalance(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetBalance(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetBalance(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getCancellationDelay") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetCancellationDelay() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetCancellationDelay(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetCancellationDelay() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetCancellationDelay(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getConditionalGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetConditionalGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetConditionalGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetConditionalGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetConditionalGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetFastGasFeedAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetFastGasFeedAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetForwarder(&_KeeperRegistryLogicB.CallOpts, upkeepID) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetForwarder(&_KeeperRegistryLogicB.CallOpts, upkeepID) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetLinkAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetLinkAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetLinkNativeFeedAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.GetLinkNativeFeedAddress(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getLogGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetLogGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetLogGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetLogGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetLogGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getMaxPaymentForGas", triggerType, gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMaxPaymentForGas(&_KeeperRegistryLogicB.CallOpts, triggerType, gasLimit) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetMaxPaymentForGas(triggerType uint8, gasLimit uint32) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMaxPaymentForGas(&_KeeperRegistryLogicB.CallOpts, triggerType, gasLimit) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getMinBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMinBalance(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetMinBalance(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMinBalance(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetMode(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getMode") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetMode() (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetMode(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetMode() (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetMode(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistryLogicB.CallOpts, peer) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistryLogicB.CallOpts, peer) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getPerPerformByteGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetPerPerformByteGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetPerPerformByteGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetPerPerformByteGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getPerSignerGasOverhead") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetPerSignerGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetPerSignerGasOverhead() (*big.Int, error) { + return _KeeperRegistryLogicB.Contract.GetPerSignerGasOverhead(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getSignerInfo", query) + + outstruct := new(GetSignerInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _KeeperRegistryLogicB.Contract.GetSignerInfo(&_KeeperRegistryLogicB.CallOpts, query) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _KeeperRegistryLogicB.Contract.GetSignerInfo(&_KeeperRegistryLogicB.CallOpts, query) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(KeeperRegistryBase21State)).(*KeeperRegistryBase21State) + outstruct.Config = *abi.ConvertType(out[1], new(KeeperRegistryBase21OnchainConfig)).(*KeeperRegistryBase21OnchainConfig) + outstruct.Signers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + outstruct.Transmitters = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + outstruct.F = *abi.ConvertType(out[4], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetState() (GetState, + + error) { + return _KeeperRegistryLogicB.Contract.GetState(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetState() (GetState, + + error) { + return _KeeperRegistryLogicB.Contract.GetState(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getTransmitterInfo", query) + + outstruct := new(GetTransmitterInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.LastCollected = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.Payee = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _KeeperRegistryLogicB.Contract.GetTransmitterInfo(&_KeeperRegistryLogicB.CallOpts, query) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _KeeperRegistryLogicB.Contract.GetTransmitterInfo(&_KeeperRegistryLogicB.CallOpts, query) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetTriggerType(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _KeeperRegistryLogicB.Contract.GetTriggerType(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getUpkeep", id) + + if err != nil { + return *new(KeeperRegistryBase21UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(KeeperRegistryBase21UpkeepInfo)).(*KeeperRegistryBase21UpkeepInfo) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetUpkeep(id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeep(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetUpkeep(id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeep(&_KeeperRegistryLogicB.CallOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeepPrivilegeConfig(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeepPrivilegeConfig(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeepTriggerConfig(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _KeeperRegistryLogicB.Contract.GetUpkeepTriggerConfig(&_KeeperRegistryLogicB.CallOpts, upkeepId) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "hasDedupKey", dedupKey) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _KeeperRegistryLogicB.Contract.HasDedupKey(&_KeeperRegistryLogicB.CallOpts, dedupKey) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) HasDedupKey(dedupKey [32]byte) (bool, error) { + return _KeeperRegistryLogicB.Contract.HasDedupKey(&_KeeperRegistryLogicB.CallOpts, dedupKey) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) Owner() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.Owner(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) Owner() (common.Address, error) { + return _KeeperRegistryLogicB.Contract.Owner(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistryLogicB.Contract.UpkeepTranscoderVersion(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistryLogicB.Contract.UpkeepTranscoderVersion(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCaller) UpkeepVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistryLogicB.contract.Call(opts, &out, "upkeepVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) UpkeepVersion() (uint8, error) { + return _KeeperRegistryLogicB.Contract.UpkeepVersion(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBCallerSession) UpkeepVersion() (uint8, error) { + return _KeeperRegistryLogicB.Contract.UpkeepVersion(&_KeeperRegistryLogicB.CallOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptOwnership(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptOwnership(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptPayeeship(&_KeeperRegistryLogicB.TransactOpts, transmitter) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptPayeeship(&_KeeperRegistryLogicB.TransactOpts, transmitter) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.AcceptUpkeepAdmin(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "pause") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.Pause(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.Pause(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.PauseUpkeep(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.PauseUpkeep(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.RecoverFunds(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.RecoverFunds(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setAdminPrivilegeConfig", admin, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetAdminPrivilegeConfig(&_KeeperRegistryLogicB.TransactOpts, admin, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetAdminPrivilegeConfig(admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetAdminPrivilegeConfig(&_KeeperRegistryLogicB.TransactOpts, admin, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setPayees", payees) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetPayees(&_KeeperRegistryLogicB.TransactOpts, payees) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetPayees(&_KeeperRegistryLogicB.TransactOpts, payees) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogicB.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistryLogicB.TransactOpts, peer, permission) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setUpkeepCheckData", id, newCheckData) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepCheckData(&_KeeperRegistryLogicB.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetUpkeepCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepCheckData(&_KeeperRegistryLogicB.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogicB.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepGasLimit(&_KeeperRegistryLogicB.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepOffchainConfig(&_KeeperRegistryLogicB.TransactOpts, id, config) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepOffchainConfig(&_KeeperRegistryLogicB.TransactOpts, id, config) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepPrivilegeConfig(&_KeeperRegistryLogicB.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.SetUpkeepPrivilegeConfig(&_KeeperRegistryLogicB.TransactOpts, upkeepId, newPrivilegeConfig) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferOwnership(&_KeeperRegistryLogicB.TransactOpts, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferOwnership(&_KeeperRegistryLogicB.TransactOpts, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferPayeeship(&_KeeperRegistryLogicB.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferPayeeship(&_KeeperRegistryLogicB.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogicB.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.TransferUpkeepAdmin(&_KeeperRegistryLogicB.TransactOpts, id, proposed) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.Unpause(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.Unpause(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.UnpauseUpkeep(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.UnpauseUpkeep(&_KeeperRegistryLogicB.TransactOpts, id) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawFunds(&_KeeperRegistryLogicB.TransactOpts, id, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawFunds(&_KeeperRegistryLogicB.TransactOpts, id, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawOwnerFunds(&_KeeperRegistryLogicB.TransactOpts) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawPayment(&_KeeperRegistryLogicB.TransactOpts, from, to) +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryLogicB.Contract.WithdrawPayment(&_KeeperRegistryLogicB.TransactOpts, from, to) +} + +type KeeperRegistryLogicBAdminPrivilegeConfigSetIterator struct { + Event *KeeperRegistryLogicBAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryLogicBAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBAdminPrivilegeConfigSetIterator{contract: _KeeperRegistryLogicB.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBAdminPrivilegeConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicBAdminPrivilegeConfigSet, error) { + event := new(KeeperRegistryLogicBAdminPrivilegeConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBCancelledUpkeepReportIterator struct { + Event *KeeperRegistryLogicBCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBCancelledUpkeepReportIterator{contract: _KeeperRegistryLogicB.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBCancelledUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicBCancelledUpkeepReport, error) { + event := new(KeeperRegistryLogicBCancelledUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBDedupKeyAddedIterator struct { + Event *KeeperRegistryLogicBDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryLogicBDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBDedupKeyAddedIterator{contract: _KeeperRegistryLogicB.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBDedupKeyAdded) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseDedupKeyAdded(log types.Log) (*KeeperRegistryLogicBDedupKeyAdded, error) { + event := new(KeeperRegistryLogicBDedupKeyAdded) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBFundsAddedIterator struct { + Event *KeeperRegistryLogicBFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicBFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBFundsAddedIterator{contract: _KeeperRegistryLogicB.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBFundsAdded) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryLogicBFundsAdded, error) { + event := new(KeeperRegistryLogicBFundsAdded) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicBFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBFundsWithdrawnIterator{contract: _KeeperRegistryLogicB.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBFundsWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicBFundsWithdrawn, error) { + event := new(KeeperRegistryLogicBFundsWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator struct { + Event *KeeperRegistryLogicBInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator{contract: _KeeperRegistryLogicB.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicBInsufficientFundsUpkeepReport, error) { + event := new(KeeperRegistryLogicBInsufficientFundsUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryLogicBOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicBOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBOwnerFundsWithdrawnIterator{contract: _KeeperRegistryLogicB.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBOwnerFundsWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicBOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryLogicBOwnerFundsWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryLogicBOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicBOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBOwnershipTransferRequestedIterator{contract: _KeeperRegistryLogicB.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBOwnershipTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicBOwnershipTransferRequested, error) { + event := new(KeeperRegistryLogicBOwnershipTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBOwnershipTransferredIterator struct { + Event *KeeperRegistryLogicBOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicBOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBOwnershipTransferredIterator{contract: _KeeperRegistryLogicB.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBOwnershipTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicBOwnershipTransferred, error) { + event := new(KeeperRegistryLogicBOwnershipTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBPausedIterator struct { + Event *KeeperRegistryLogicBPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicBPausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBPausedIterator{contract: _KeeperRegistryLogicB.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBPaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParsePaused(log types.Log) (*KeeperRegistryLogicBPaused, error) { + event := new(KeeperRegistryLogicBPaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBPayeesUpdatedIterator struct { + Event *KeeperRegistryLogicBPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicBPayeesUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBPayeesUpdatedIterator{contract: _KeeperRegistryLogicB.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBPayeesUpdated) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicBPayeesUpdated, error) { + event := new(KeeperRegistryLogicBPayeesUpdated) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryLogicBPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicBPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBPayeeshipTransferRequestedIterator{contract: _KeeperRegistryLogicB.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBPayeeshipTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicBPayeeshipTransferRequested, error) { + event := new(KeeperRegistryLogicBPayeeshipTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBPayeeshipTransferredIterator struct { + Event *KeeperRegistryLogicBPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicBPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBPayeeshipTransferredIterator{contract: _KeeperRegistryLogicB.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBPayeeshipTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicBPayeeshipTransferred, error) { + event := new(KeeperRegistryLogicBPayeeshipTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBPaymentWithdrawnIterator struct { + Event *KeeperRegistryLogicBPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicBPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBPaymentWithdrawnIterator{contract: _KeeperRegistryLogicB.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBPaymentWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicBPaymentWithdrawn, error) { + event := new(KeeperRegistryLogicBPaymentWithdrawn) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBReorgedUpkeepReportIterator struct { + Event *KeeperRegistryLogicBReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBReorgedUpkeepReportIterator{contract: _KeeperRegistryLogicB.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBReorgedUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicBReorgedUpkeepReport, error) { + event := new(KeeperRegistryLogicBReorgedUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBStaleUpkeepReportIterator struct { + Event *KeeperRegistryLogicBStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBStaleUpkeepReportIterator{contract: _KeeperRegistryLogicB.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBStaleUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicBStaleUpkeepReport, error) { + event := new(KeeperRegistryLogicBStaleUpkeepReport) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUnpausedIterator struct { + Event *KeeperRegistryLogicBUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicBUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUnpausedIterator{contract: _KeeperRegistryLogicB.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUnpaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryLogicBUnpaused, error) { + event := new(KeeperRegistryLogicBUnpaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryLogicBUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicBUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryLogicBUpkeepAdminTransferRequested) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryLogicBUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicBUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepAdminTransferredIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepAdminTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicBUpkeepAdminTransferred, error) { + event := new(KeeperRegistryLogicBUpkeepAdminTransferred) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepCanceledIterator struct { + Event *KeeperRegistryLogicBUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicBUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepCanceledIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepCanceled) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicBUpkeepCanceled, error) { + event := new(KeeperRegistryLogicBUpkeepCanceled) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepCheckDataSetIterator struct { + Event *KeeperRegistryLogicBUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepCheckDataSetIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepCheckDataSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryLogicBUpkeepCheckDataSet, error) { + event := new(KeeperRegistryLogicBUpkeepCheckDataSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryLogicBUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepGasLimitSetIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepGasLimitSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicBUpkeepGasLimitSet, error) { + event := new(KeeperRegistryLogicBUpkeepGasLimitSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepMigratedIterator struct { + Event *KeeperRegistryLogicBUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepMigratedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepMigrated) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicBUpkeepMigrated, error) { + event := new(KeeperRegistryLogicBUpkeepMigrated) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepOffchainConfigSetIterator struct { + Event *KeeperRegistryLogicBUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepOffchainConfigSetIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepOffchainConfigSet, error) { + event := new(KeeperRegistryLogicBUpkeepOffchainConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepPausedIterator struct { + Event *KeeperRegistryLogicBUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepPausedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepPaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicBUpkeepPaused, error) { + event := new(KeeperRegistryLogicBUpkeepPaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepPerformedIterator struct { + Event *KeeperRegistryLogicBUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicBUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepPerformedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepPerformed) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicBUpkeepPerformed, error) { + event := new(KeeperRegistryLogicBUpkeepPerformed) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator struct { + Event *KeeperRegistryLogicBUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepPrivilegeConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepPrivilegeConfigSet, error) { + event := new(KeeperRegistryLogicBUpkeepPrivilegeConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepReceivedIterator struct { + Event *KeeperRegistryLogicBUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepReceivedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepReceived) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicBUpkeepReceived, error) { + event := new(KeeperRegistryLogicBUpkeepReceived) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepRegisteredIterator struct { + Event *KeeperRegistryLogicBUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepRegisteredIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepRegistered) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicBUpkeepRegistered, error) { + event := new(KeeperRegistryLogicBUpkeepRegistered) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepTriggerConfigSetIterator struct { + Event *KeeperRegistryLogicBUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepTriggerConfigSetIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepTriggerConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepTriggerConfigSet, error) { + event := new(KeeperRegistryLogicBUpkeepTriggerConfigSet) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryLogicBUpkeepUnpausedIterator struct { + Event *KeeperRegistryLogicBUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryLogicBUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryLogicBUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryLogicBUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryLogicBUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryLogicBUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryLogicBUpkeepUnpausedIterator{contract: _KeeperRegistryLogicB.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryLogicB.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryLogicBUpkeepUnpaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicBFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicBUpkeepUnpaused, error) { + event := new(KeeperRegistryLogicBUpkeepUnpaused) + if err := _KeeperRegistryLogicB.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSignerInfo struct { + Active bool + Index uint8 +} +type GetState struct { + State KeeperRegistryBase21State + Config KeeperRegistryBase21OnchainConfig + Signers []common.Address + Transmitters []common.Address + F uint8 +} +type GetTransmitterInfo struct { + Active bool + Index uint8 + Balance *big.Int + LastCollected *big.Int + Payee common.Address +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicB) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryLogicB.abi.Events["AdminPrivilegeConfigSet"].ID: + return _KeeperRegistryLogicB.ParseAdminPrivilegeConfigSet(log) + case _KeeperRegistryLogicB.abi.Events["CancelledUpkeepReport"].ID: + return _KeeperRegistryLogicB.ParseCancelledUpkeepReport(log) + case _KeeperRegistryLogicB.abi.Events["DedupKeyAdded"].ID: + return _KeeperRegistryLogicB.ParseDedupKeyAdded(log) + case _KeeperRegistryLogicB.abi.Events["FundsAdded"].ID: + return _KeeperRegistryLogicB.ParseFundsAdded(log) + case _KeeperRegistryLogicB.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistryLogicB.ParseFundsWithdrawn(log) + case _KeeperRegistryLogicB.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _KeeperRegistryLogicB.ParseInsufficientFundsUpkeepReport(log) + case _KeeperRegistryLogicB.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistryLogicB.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistryLogicB.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryLogicB.ParseOwnershipTransferRequested(log) + case _KeeperRegistryLogicB.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryLogicB.ParseOwnershipTransferred(log) + case _KeeperRegistryLogicB.abi.Events["Paused"].ID: + return _KeeperRegistryLogicB.ParsePaused(log) + case _KeeperRegistryLogicB.abi.Events["PayeesUpdated"].ID: + return _KeeperRegistryLogicB.ParsePayeesUpdated(log) + case _KeeperRegistryLogicB.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistryLogicB.ParsePayeeshipTransferRequested(log) + case _KeeperRegistryLogicB.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistryLogicB.ParsePayeeshipTransferred(log) + case _KeeperRegistryLogicB.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistryLogicB.ParsePaymentWithdrawn(log) + case _KeeperRegistryLogicB.abi.Events["ReorgedUpkeepReport"].ID: + return _KeeperRegistryLogicB.ParseReorgedUpkeepReport(log) + case _KeeperRegistryLogicB.abi.Events["StaleUpkeepReport"].ID: + return _KeeperRegistryLogicB.ParseStaleUpkeepReport(log) + case _KeeperRegistryLogicB.abi.Events["Unpaused"].ID: + return _KeeperRegistryLogicB.ParseUnpaused(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistryLogicB.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistryLogicB.ParseUpkeepAdminTransferred(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistryLogicB.ParseUpkeepCanceled(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepCheckDataSet"].ID: + return _KeeperRegistryLogicB.ParseUpkeepCheckDataSet(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistryLogicB.ParseUpkeepGasLimitSet(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistryLogicB.ParseUpkeepMigrated(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepOffchainConfigSet"].ID: + return _KeeperRegistryLogicB.ParseUpkeepOffchainConfigSet(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistryLogicB.ParseUpkeepPaused(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistryLogicB.ParseUpkeepPerformed(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _KeeperRegistryLogicB.ParseUpkeepPrivilegeConfigSet(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistryLogicB.ParseUpkeepReceived(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistryLogicB.ParseUpkeepRegistered(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepTriggerConfigSet"].ID: + return _KeeperRegistryLogicB.ParseUpkeepTriggerConfigSet(log) + case _KeeperRegistryLogicB.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistryLogicB.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryLogicBAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (KeeperRegistryLogicBCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (KeeperRegistryLogicBDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (KeeperRegistryLogicBFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryLogicBFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryLogicBInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (KeeperRegistryLogicBOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryLogicBOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryLogicBOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryLogicBPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryLogicBPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (KeeperRegistryLogicBPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryLogicBPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryLogicBPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryLogicBReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (KeeperRegistryLogicBStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (KeeperRegistryLogicBUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryLogicBUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryLogicBUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryLogicBUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryLogicBUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (KeeperRegistryLogicBUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryLogicBUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryLogicBUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (KeeperRegistryLogicBUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryLogicBUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (KeeperRegistryLogicBUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (KeeperRegistryLogicBUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryLogicBUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryLogicBUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (KeeperRegistryLogicBUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistryLogicB *KeeperRegistryLogicB) Address() common.Address { + return _KeeperRegistryLogicB.address +} + +type KeeperRegistryLogicBInterface interface { + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAdminPrivilegeConfig(opts *bind.CallOpts, admin common.Address) ([]byte, error) + + GetAutomationForwarderLogic(opts *bind.CallOpts) (common.Address, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetCancellationDelay(opts *bind.CallOpts) (*big.Int, error) + + GetConditionalGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetLogGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, triggerType uint8, gasLimit uint32) (*big.Int, error) + + GetMinBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMode(opts *bind.CallOpts) (uint8, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetPerPerformByteGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetPerSignerGasOverhead(opts *bind.CallOpts) (*big.Int, error) + + GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (KeeperRegistryBase21UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + HasDedupKey(opts *bind.CallOpts, dedupKey [32]byte) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + UpkeepVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + SetAdminPrivilegeConfig(opts *bind.TransactOpts, admin common.Address, newPrivilegeConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, newPrivilegeConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryLogicBAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicBAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryLogicBCancelledUpkeepReport, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryLogicBDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*KeeperRegistryLogicBDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryLogicBFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryLogicBFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryLogicBFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryLogicBInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryLogicBOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryLogicBOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicBOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryLogicBOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryLogicBOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryLogicBOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryLogicBPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryLogicBPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryLogicBPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*KeeperRegistryLogicBPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicBPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryLogicBPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryLogicBPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryLogicBPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryLogicBPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryLogicBPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryLogicBReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryLogicBStaleUpkeepReport, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryLogicBUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryLogicBUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicBUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryLogicBUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryLogicBUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryLogicBUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryLogicBUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryLogicBUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryLogicBUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryLogicBUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryLogicBUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryLogicBUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryLogicBUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryLogicBUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryLogicBUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryLogicBUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryLogicBUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryLogicBUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryLogicBUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryLogicBUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper1_1/keeper_registry_wrapper1_1.go b/core/gethwrappers/generated/keeper_registry_wrapper1_1/keeper_registry_wrapper1_1.go new file mode 100644 index 00000000..50cef3e6 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper1_1/keeper_registry_wrapper1_1.go @@ -0,0 +1,3197 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper1_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"}],\"name\":\"FlatFeeSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"RegistrarChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCanceledUpkeepList\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFlatFee\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getKeeperInfo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperList\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistrar\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getUpkeepCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"name\":\"setRegistrar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b50604051620052493803806200524983398181016040526101608110156200003857600080fd5b508051602082015160408301516060840151608085015160a086015160c087015160e08801516101008901516101208a0151610140909a0151989997989697959694959394929391929091903380600081620000db576040805162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f0000000000000000604482015290519081900360640190fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200010e576200010e8162000165565b50506001600255506003805460ff191690556001600160601b031960608c811b82166080528b811b821660a0528a901b1660c05262000154888888888888888862000215565b505050505050505050505062000487565b6001600160a01b038116331415620001c4576040805162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200021f62000425565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b6000546001600160a01b0316331462000485576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b565b60805160601c60a05160601c60c05160601c614d64620004e560003980610d0052806140085250806119eb52806140db525080610bf55280610fa652806113a2528061147752806119485280611b825280611c505250614d646000f3fe608060405234801561001057600080fd5b50600436106102265760003560e01c8063a4c0ed361161012a578063c41b813a116100bd578063db30a3861161008c578063f2fde38b11610071578063f2fde38b14610add578063faab9d3914610b10578063fecf27c914610b4357610226565b8063db30a38614610a41578063eb5dcd6c14610aa257610226565b8063c41b813a14610784578063c7c3a19a14610851578063c804802214610970578063da5c67411461098d57610226565b8063b657bc9c116100f9578063b657bc9c14610645578063b79550be14610662578063b7fdb4361461066a578063c3f909d41461072c57610226565b8063a4c0ed361461053d578063a710b221146105cf578063ad1783611461060a578063b121e1471461061257610226565b80635c975abb116101bd5780638456cb591161018c5780638da5cb5b116101715780638da5cb5b146104c657806393f0c1fc146104ce578063948108f71461050c57610226565b80638456cb591461049d5780638a601fc8146104a557610226565b80635c975abb146103c9578063744bfe61146103e557806379ba50971461041e5780637bbaf1ea1461042657610226565b80632cb6864d116101f95780632cb6864d146103a75780633f4ba83a146103af5780634584a419146103b95780634d3f7334146103c157610226565b806315a126ea1461022b578063181f5a77146102835780631b6b6d23146103005780631e12b8a514610331575b600080fd5b610233610b4b565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561026f578181015183820152602001610257565b505050509050019250505060405180910390f35b61028b610bba565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102c55781810151838201526020016102ad565b50505050905090810190601f1680156102f25780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610308610bf3565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6103646004803603602081101561034757600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610c17565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1682820152519081900360600190f35b610233610c95565b6103b7610cec565b005b610308610cfe565b610308610d22565b6103d1610d3e565b604080519115158252519081900360200190f35b6103b7600480360360408110156103fb57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff16610d47565b6103b7611067565b6103d16004803603604081101561043c57600080fd5b8135919081019060408101602082013564010000000081111561045e57600080fd5b82018360208201111561047057600080fd5b8035906020019184600183028401116401000000008311171561049257600080fd5b509092509050611169565b6103b76111bf565b6104ad6111cf565b6040805163ffffffff9092168252519081900360200190f35b6103086111e3565b6104eb600480360360208110156104e457600080fd5b50356111ff565b604080516bffffffffffffffffffffffff9092168252519081900360200190f35b6103b76004803603604081101561052257600080fd5b50803590602001356bffffffffffffffffffffffff16611235565b6103b76004803603606081101561055357600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235169160208101359181019060608101604082013564010000000081111561059057600080fd5b8201836020820111156105a257600080fd5b803590602001918460018302840111640100000000831117156105c457600080fd5b50909250905061145f565b6103b7600480360360408110156105e557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166116fb565b6103086119e9565b6103b76004803603602081101561062857600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16611a0d565b6104eb6004803603602081101561065b57600080fd5b5035611b3a565b6103b7611b76565b6103b76004803603604081101561068057600080fd5b81019060208101813564010000000081111561069b57600080fd5b8201836020820111156106ad57600080fd5b803590602001918460208302840111640100000000831117156106cf57600080fd5b9193909290916020810190356401000000008111156106ed57600080fd5b8201836020820111156106ff57600080fd5b8035906020019184602083028401116401000000008311171561072157600080fd5b509092509050611d06565b610734612223565b6040805163ffffffff988916815262ffffff9788166020820152959097168588015292909416606084015261ffff16608083015260a082019290925260c081019190915290519081900360e00190f35b6107bd6004803603604081101561079a57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff166122cc565b6040518080602001868152602001858152602001848152602001838152602001828103825287818151815260200191508051906020019080838360005b838110156108125781810151838201526020016107fa565b50505050905090810190601f16801561083f5780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390f35b61086e6004803603602081101561086757600080fd5b503561296b565b604051808873ffffffffffffffffffffffffffffffffffffffff1681526020018763ffffffff16815260200180602001866bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018367ffffffffffffffff168152602001828103825287818151815260200191508051906020019080838360005b8381101561092f578181015183820152602001610917565b50505050905090810190601f16801561095c5780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390f35b6103b76004803603602081101561098657600080fd5b5035612b14565b610a2f600480360360808110156109a357600080fd5b73ffffffffffffffffffffffffffffffffffffffff823581169263ffffffff602082013516926040820135909216918101906080810160608201356401000000008111156109f057600080fd5b820183602082011115610a0257600080fd5b80359060200191846001830284011164010000000083111715610a2457600080fd5b509092509050612d75565b60408051918252519081900360200190f35b6103b76004803603610100811015610a5857600080fd5b5063ffffffff8135811691602081013582169162ffffff604083013581169260608101359092169160808101359091169061ffff60a0820135169060c08101359060e001356131c1565b6103b760048036036040811015610ab857600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813581169160200135166133cf565b6103b760048036036020811015610af357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16613599565b6103b760048036036020811015610b2657600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166135ad565b610a2f61375c565b60606006805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610b85575b5050505050905090565b6040518060400160405280601481526020017f4b6565706572526567697374727920312e312e3000000000000000000000000081525081565b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b60606005805480602002602001604051908101604052809291908181526020018280548015610bb057602002820191906000526020600020905b815481526020019060010190808311610ccf575050505050905090565b610cf4613762565b610cfc6137e8565b565b7f000000000000000000000000000000000000000000000000000000000000000081565b600f5473ffffffffffffffffffffffffffffffffffffffff1690565b60035460ff1690565b8073ffffffffffffffffffffffffffffffffffffffff8116610dca57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b6000838152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610e6f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c652062792061646d696e00000000000000000000604482015290519081900360640190fd5b6000838152600760205260409020600201544367ffffffffffffffff9091161115610efb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f75706b656570206d7573742062652063616e63656c6564000000000000000000604482015290519081900360640190fd5b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000008116909155600e546bffffffffffffffffffffffff90911690610f5290826138d6565b600e556040805182815273ffffffffffffffffffffffffffffffffffffffff85166020820152815186927ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318928290030190a27f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84836040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15801561103557600080fd5b505af1158015611049573d6000803e3d6000fd5b505050506040513d602081101561105f57600080fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146110ed57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60006111b76111b2338686868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061394d915050565b613a0e565b949350505050565b6111c7613762565b610cfc613f0d565b600b54640100000000900463ffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b600080600061120c613fd5565b91509150600061121d8360006141b4565b905061122a8582846141fa565b93505050505b919050565b60008281526007602052604090206002015467ffffffffffffffff908116146112bf57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b6000828152600760205260409020600101546112e9906bffffffffffffffffffffffff16826143a9565b600083815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055600e5461133f918316614435565b600e55604080517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff83166044820152905173ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016916323b872dd9160648083019260209291908290030181600087803b1580156113ea57600080fd5b505af11580156113fe573d6000803e3d6000fd5b505050506040513d602081101561141457600080fd5b5050604080516bffffffffffffffffffffffff831681529051339184917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461150357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c792063616c6c61626c65207468726f756768204c494e4b000000000000604482015290519081900360640190fd5b6020811461157257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f64617461206d7573742062652033322062797465730000000000000000000000604482015290519081900360640190fd5b60008282602081101561158457600080fd5b503560008181526007602052604090206002015490915067ffffffffffffffff9081161461161357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f75706b656570206d757374206265206163746976650000000000000000000000604482015290519081900360640190fd5b60008181526007602052604090206001015461163d906bffffffffffffffffffffffff16856143a9565b600082815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff92909216919091179055600e546116969085614435565b600e55604080516bffffffffffffffffffffffff86168152905173ffffffffffffffffffffffffffffffffffffffff87169183917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a35050505050565b8073ffffffffffffffffffffffffffffffffffffffff811661177e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f63616e6e6f742073656e6420746f207a65726f20616464726573730000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff83811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff1615159181019190915290331461186457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff80851660009081526008602090815260409091208054909216909155810151600e546118b2916bffffffffffffffffffffffff166138d6565b600e819055508273ffffffffffffffffffffffffffffffffffffffff1681602001516bffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f4069833604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a47f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb8483602001516040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001826bffffffffffffffffffffffff16815260200192505050602060405180830381600087803b15801561103557600080fd5b7f000000000000000000000000000000000000000000000000000000000000000081565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314611aa257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792063616c6c61626c652062792070726f706f73656420706179656500604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b600081815260076020526040812054611b709074010000000000000000000000000000000000000000900463ffffffff166111ff565b92915050565b611b7e613762565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611c0757600080fd5b505afa158015611c1b573d6000803e3d6000fd5b505050506040513d6020811015611c3157600080fd5b5051600e5490915073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb903390611c849085906138d6565b6040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b158015611cd757600080fd5b505af1158015611ceb573d6000803e3d6000fd5b505050506040513d6020811015611d0157600080fd5b505050565b611d0e613762565b828114611d66576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614d376021913960400191505060405180910390fd5b6002831015611dd657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f7420656e6f756768206b6565706572730000000000000000000000000000604482015290519081900360640190fd5b60005b600654811015611e5657600060068281548110611df257fe5b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff1682526008905260409020600190810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055919091019050611dd9565b5060005b83811015612140576000858583818110611e7057fe5b73ffffffffffffffffffffffffffffffffffffffff6020918202939093013583166000818152600890925260408220805491955093169150868686818110611eb457fe5b9050602002013573ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611f59576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180614ccf6024913960400191505060405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82161580611fa757508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16145b80611fc7575073ffffffffffffffffffffffffffffffffffffffff818116145b61203257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f63616e6e6f74206368616e676520706179656500000000000000000000000000604482015290519081900360640190fd5b600183015460ff16156120a657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f7420616464206b6565706572207477696365000000000000000000604482015290519081900360640190fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff818116146121305782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b505060019092019150611e5a9050565b5061214d60068585614b44565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f848484846040518080602001806020018381038352878782818152602001925060200280828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169091018481038352858152602090810191508690860280828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039850909650505050505050a150505050565b6040805160c081018252600b5463ffffffff80821680845264010000000083048216602085015268010000000000000000830462ffffff9081169585018690526b0100000000000000000000008404909216606085018190526f01000000000000000000000000000000840490921660808501819052720100000000000000000000000000000000000090930461ffff1660a0909401849052600c54600d549196929492909190565b60606000806000806122dc610d3e565b1561234857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b6123506144a9565b6000878152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff16828601526001808401546bffffffffffffffffffffffff8116848701526c0100000000000000000000000090048216606084015260029384015467ffffffffffffffff8116608085015268010000000000000000900490911660a08301528c8652600a8552838620935160248101958652845461010092811615929092027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190911692909204604483018190529094937f6e04ff0d0000000000000000000000000000000000000000000000000000000093929091829160640190849080156124cf5780601f106124a4576101008083540402835291602001916124cf565b820191906000526020600020905b8154815290600101906020018083116124b257829003601f168201915b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009097169690961786528751600b549151835193985060009788975073ffffffffffffffffffffffffffffffffffffffff909216955063ffffffff6b01000000000000000000000090930492909216935087928291908083835b602083106125da57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161259d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060006040518083038160008787f1925050503d806000811461263d576040519150601f19603f3d011682016040523d82523d6000602084013e612642565b606091505b5091509150816127dd57600061265782614516565b905060008160405160200180807f63616c6c20746f20636865636b20746172676574206661696c65643a20000000815250601d0182805190602001908083835b602083106126d457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101612697565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909216911617905260408051929094018281037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018352938490527f08c379a00000000000000000000000000000000000000000000000000000000084526004840181815282516024860152825192975087965094508493604401925085019080838360005b838110156127a257818101518382015260200161278a565b50505050905090810190601f1680156127cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b8080602001905160408110156127f257600080fd5b81516020830180516040519294929383019291908464010000000082111561281957600080fd5b90830190602082018581111561282e57600080fd5b825164010000000081118282018810171561284857600080fd5b82525081516020918201929091019080838360005b8381101561287557818101518382015260200161285d565b50505050905090810190601f1680156128a25780820380516001836020036101000a031916815260200191505b50604052505050809a5081935050508161291d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f75706b656570206e6f74206e6565646564000000000000000000000000000000604482015290519081900360640190fd5b600061292c8b8d8c600061394d565b9050612941858260000151836060015161462d565b6060810151608082015160a083015160c0909301519b9e919d509b50909998509650505050505050565b6000818152600760209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff8082168084527401000000000000000000000000000000000000000090920463ffffffff168387018190526001808601546bffffffffffffffffffffffff81168689019081526c010000000000000000000000009091048416606080880191825260029889015467ffffffffffffffff811660808a019081526801000000000000000090910490961660a089019081528d8d52600a8c528a8d20935190519251965184548c5161010097821615979097027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01169a909a04601f81018d90048d0286018d01909b528a85528c9b919a8c9a8b9a8b9a8b9a91999098909796949591939091879190830182828015612af35780601f10612ac857610100808354040283529160200191612af3565b820191906000526020600020905b815481529060010190602001808311612ad657829003601f168201915b50505050509450975097509750975097509750975050919395979092949650565b60008181526007602052604081206002015467ffffffffffffffff9081169190821490612b3f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161490508180612b8c5750808015612b8c5750438367ffffffffffffffff16115b612bf757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f746f6f206c61746520746f2063616e63656c2075706b65657000000000000000604482015290519081900360640190fd5b8080612c3957506000848152600760205260409020600101546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633145b612ca457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c79206f776e6572206f722061646d696e00000000000000000000000000604482015290519081900360640190fd5b4381612cb857612cb5816032614435565b90505b600085815260076020526040902060020180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff83161790558215612d3757600580546001810182556000919091527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0018590555b60405167ffffffffffffffff82169086907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050505050565b6000612d7f6111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161480612dcf5750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b612e24576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614cf36023913960400191505060405180910390fd5b612e438673ffffffffffffffffffffffffffffffffffffffff166147e4565b612eae57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f746172676574206973206e6f74206120636f6e74726163740000000000000000604482015290519081900360640190fd5b6108fc8563ffffffff161015612f2557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f6d696e2067617320697320323330300000000000000000000000000000000000604482015290519081900360640190fd5b624c4b408563ffffffff161115612f9d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6d61782067617320697320353030303030300000000000000000000000000000604482015290519081900360640190fd5b506004546040805160c08101825273ffffffffffffffffffffffffffffffffffffffff808916825263ffffffff808916602080850191825260008587018181528b86166060880190815267ffffffffffffffff6080890181815260a08a018581528c8652600787528b86209a518b54985190991674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff998b167fffffffffffffffffffffffff000000000000000000000000000000000000000090991698909817989098169690961789559151600189018054925189166c01000000000000000000000000026bffffffffffffffffffffffff9283167fffffffffffffffffffffffffffffffffffffffff00000000000000000000000090941693909317909116919091179055925160029096018054945190951668010000000000000000027fffffffff0000000000000000000000000000000000000000ffffffffffffffff969093167fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909416939093179490941617909155600a90915220613159908484614bcc565b506004805460010190556040805163ffffffff8716815273ffffffffffffffffffffffffffffffffffffffff86166020820152815183927fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012928290030190a295945050505050565b6131c9613762565b6040518060c001604052808963ffffffff1681526020018863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815250600b60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555090505081600c8190555080600d819055507feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b3988878787878787604051808863ffffffff1681526020018762ffffff1681526020018663ffffffff1681526020018562ffffff1681526020018461ffff16815260200183815260200182815260200197505050505050505060405180910390a16040805163ffffffff8916815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a15050505050505050565b73ffffffffffffffffffffffffffffffffffffffff82811660009081526008602052604090205416331461346457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f6f6e6c792063616c6c61626c6520627920706179656500000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff81163314156134e957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f63616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600960205260409020548116908216146135955773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45b5050565b6135a1613762565b6135aa816147ea565b50565b6135b56111e3565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806136055750600f5473ffffffffffffffffffffffffffffffffffffffff1633145b61365a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180614cf36023913960400191505060405180910390fd5b600f5473ffffffffffffffffffffffffffffffffffffffff9081169082168114156136e657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f53616d6520726567697374726172000000000000000000000000000000000000604482015290519081900360640190fd5b600f80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560405190918316907f9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e90600090a35050565b60045490565b60005473ffffffffffffffffffffffffffffffffffffffff163314610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6137f0610d3e565b61385b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f7420706175736564000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa6138ac6148e5565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190a1565b60008282111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b613955614c66565b60008481526007602052604081205474010000000000000000000000000000000000000000900463ffffffff16908061398c613fd5565b91509150600061399c83876141b4565b905060006139ab8583856141fa565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff8d168152602081018c90529081018a90526bffffffffffffffffffffffff909116606082015260808101959095525060a084015260c0830152509050949350505050565b6000600280541415613a8157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015290519081900360640190fd5b600280556020820151613a93816148e9565b602083810151600090815260078252604090819020815160c081018352815473ffffffffffffffffffffffffffffffffffffffff80821683527401000000000000000000000000000000000000000090910463ffffffff169482019490945260018201546bffffffffffffffffffffffff8116938201939093526c01000000000000000000000000909204831660608084019190915260029091015467ffffffffffffffff8116608084015268010000000000000000900490921660a08201528451918501519091613b679183919061462d565b60005a90506000634585e33b60e01b86604001516040516024018080602001828103825283818151815260200191508051906020019080838360005b83811015613bbb578181015183820152602001613ba3565b50505050905090810190601f168015613be85780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009096169590951790945250505060808701518451919250613c7d9183614974565b94505a820391506000613c99838860a001518960c001516141fa565b60208089015160009081526007909152604081206001015491925090613ccd906bffffffffffffffffffffffff16836149c0565b60208981018051600090815260078352604080822060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff878116919091179091558d519351835281832060020180547fffffffff0000000000000000000000000000000000000000ffffffffffffffff166801000000000000000073ffffffffffffffffffffffffffffffffffffffff968716021790558d5190931682526008909352918220549293509091613daf917401000000000000000000000000000000000000000090910416846143a9565b905080600860008b6000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550886000015173ffffffffffffffffffffffffffffffffffffffff168815158a602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6868d6040015160405180836bffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b83811015613ec1578181015183820152602001613ea9565b50505050905090810190601f168015613eee5780820380516001836020036101000a031916815260200191505b50935050505060405180910390a4505050505050506001600255919050565b613f15610d3e565b15613f8157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a2070617573656400000000000000000000000000000000604482015290519081900360640190fd5b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586138ac6148e5565b6000806000600b600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561406c57600080fd5b505afa158015614080573d6000803e3d6000fd5b505050506040513d60a081101561409657600080fd5b506020810151606090910151925090508280156140ba57508142038463ffffffff16105b806140c6575060008113155b156140d557600c5495506140d9565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561413f57600080fd5b505afa158015614153573d6000803e3d6000fd5b505050506040513d60a081101561416957600080fd5b5060208101516060909101519250905082801561418d57508142038463ffffffff16105b80614199575060008113155b156141a857600d5494506141ac565b8094505b505050509091565b600b546000906141df9084907201000000000000000000000000000000000000900461ffff16614a4d565b90508180156141ed5750803a105b15611b7057503a92915050565b6040805160c081018252600b5463ffffffff808216835264010000000082048116602084015262ffffff6801000000000000000083048116948401949094526b0100000000000000000000008204811660608401526f010000000000000000000000000000008204909316608083015261ffff72010000000000000000000000000000000000009091041660a082015260009182906142ab906142a4908890620138809061443516565b8690614a4d565b905060006142d0836000015163ffffffff16633b9aca0061443590919063ffffffff16565b905060006143216142f964e8d4a51000866020015163ffffffff16614a4d90919063ffffffff16565b61431b886143158661430f89633b9aca00614a4d565b90614a4d565b90614ac0565b90614435565b90506b033b2e3c9fd0803ce800000081111561439e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f7061796d656e742067726561746572207468616e20616c6c204c494e4b000000604482015290519081900360640190fd5b979650505050505050565b60008282016bffffffffffffffffffffffff808516908216101561442e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60008282018381101561442e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b3215610cfc57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6f6e6c7920666f722073696d756c61746564206261636b656e64000000000000604482015290519081900360640190fd5b606060448251101561455c575060408051808201909152601d81527f7472616e73616374696f6e2072657665727465642073696c656e746c790000006020820152611230565b600482018051909260240190602081101561457657600080fd5b810190808051604051939291908464010000000082111561459657600080fd5b9083019060208201858111156145ab57600080fd5b82516401000000008111828201881017156145c557600080fd5b82525081516020918201929091019080838360005b838110156145f25781810151838201526020016145da565b50505050905090810190601f16801561461f5780820380516001836020036101000a031916815260200191505b506040525050509050919050565b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff166146c457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f6f6e6c7920616374697665206b65657065727300000000000000000000000000604482015290519081900360640190fd5b8083604001516bffffffffffffffffffffffff16101561474557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b8173ffffffffffffffffffffffffffffffffffffffff168360a0015173ffffffffffffffffffffffffffffffffffffffff161415611d0157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6b656570657273206d7573742074616b65207475726e73000000000000000000604482015290519081900360640190fd5b3b151590565b73ffffffffffffffffffffffffffffffffffffffff811633141561486f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b3390565b6000818152600760205260409020600201544367ffffffffffffffff909116116135aa57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f696e76616c69642075706b656570206964000000000000000000000000000000604482015290519081900360640190fd5b60005a61138881101561498657600080fd5b61138881039050846040820482031161499e57600080fd5b50823b6149aa57600080fd5b60008083516020850160008789f1949350505050565b6000826bffffffffffffffffffffffff16826bffffffffffffffffffffffff16111561394757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b600082614a5c57506000611b70565b82820282848281614a6957fe5b041461442e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180614d166021913960400191505060405180910390fd5b6000808211614b3057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b6000828481614b3b57fe5b04949350505050565b828054828255906000526020600020908101928215614bbc579160200282015b82811115614bbc5781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614b64565b50614bc8929150614cb9565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282614c025760008555614bbc565b82601f10614c39578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555614bbc565b82800160010185558215614bbc579182015b82811115614bbc578235825591602001919060010190614c4b565b6040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b5b80821115614bc85760008155600101614cba56fe63616e6e6f742073657420706179656520746f20746865207a65726f20616464726573734f6e6c792063616c6c61626c65206279206f776e6572206f7220726567697374726172536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f7761646472657373206c69737473206e6f74207468652073616d65206c656e677468a164736f6c6343000706000a", +} + +var KeeperRegistryABI = KeeperRegistryMetaData.ABI + +var KeeperRegistryBin = KeeperRegistryMetaData.Bin + +func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkEthFeed common.Address, fastGasFeed common.Address, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (common.Address, *types.Transaction, *KeeperRegistry, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryBin), backend, link, linkEthFeed, fastGasFeed, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistry{address: address, abi: *parsed, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +type KeeperRegistry struct { + address common.Address + abi abi.ABI + KeeperRegistryCaller + KeeperRegistryTransactor + KeeperRegistryFilterer +} + +type KeeperRegistryCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrySession struct { + Contract *KeeperRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCallerSession struct { + Contract *KeeperRegistryCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryTransactorSession struct { + Contract *KeeperRegistryTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryRaw struct { + Contract *KeeperRegistry +} + +type KeeperRegistryCallerRaw struct { + Contract *KeeperRegistryCaller +} + +type KeeperRegistryTransactorRaw struct { + Contract *KeeperRegistryTransactor +} + +func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { + contract, err := bindKeeperRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCaller{contract: contract}, nil +} + +func NewKeeperRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryTransactor, error) { + contract, err := bindKeeperRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryTransactor{contract: contract}, nil +} + +func NewKeeperRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryFilterer, error) { + contract, err := bindKeeperRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryFilterer{contract: contract}, nil +} + +func bindKeeperRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistry *KeeperRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.KeeperRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCaller) FASTGASFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "FAST_GAS_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getCanceledUpkeepList") + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetCanceledUpkeepList() ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetCanceledUpkeepList(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetCanceledUpkeepList() ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetCanceledUpkeepList(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.PaymentPremiumPPB = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockCountPerTurn = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.CheckGasLimit = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasCeilingMultiplier = *abi.ConvertType(out[4], new(uint16)).(*uint16) + outstruct.FallbackGasPrice = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.FallbackLinkPrice = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetConfig() (GetConfig, + + error) { + return _KeeperRegistry.Contract.GetConfig(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetConfig() (GetConfig, + + error) { + return _KeeperRegistry.Contract.GetConfig(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetFlatFee(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getFlatFee") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetFlatFee() (uint32, error) { + return _KeeperRegistry.Contract.GetFlatFee(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetFlatFee() (uint32, error) { + return _KeeperRegistry.Contract.GetFlatFee(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getKeeperInfo", query) + + outstruct := new(GetKeeperInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Payee = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Active = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getKeeperList") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetKeeperList() ([]common.Address, error) { + return _KeeperRegistry.Contract.GetKeeperList(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetKeeperList() ([]common.Address, error) { + return _KeeperRegistry.Contract.GetKeeperList(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMaxPaymentForGas", gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetRegistrar(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getRegistrar") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetRegistrar() (common.Address, error) { + return _KeeperRegistry.Contract.GetRegistrar(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetRegistrar() (common.Address, error) { + return _KeeperRegistry.Contract.GetRegistrar(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getUpkeep", id) + + outstruct := new(GetUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.ExecuteGas = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.CheckData = *abi.ConvertType(out[2], new([]byte)).(*[]byte) + outstruct.Balance = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LastKeeper = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + outstruct.Admin = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.MaxValidBlocknumber = *abi.ConvertType(out[6], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getUpkeepCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetUpkeepCount() (*big.Int, error) { + return _KeeperRegistry.Contract.GetUpkeepCount(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetUpkeepCount() (*big.Int, error) { + return _KeeperRegistry.Contract.GetUpkeepCount(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptPayeeship", keeper) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistry *KeeperRegistrySession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "checkUpkeep", id, from) +} + +func (_KeeperRegistry *KeeperRegistrySession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "performUpkeep", id, performData) +} + +func (_KeeperRegistry *KeeperRegistrySession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistrySession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfig(opts *bind.TransactOpts, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfig", paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfig(paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfig(paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, paymentPremiumPPB, flatFeeMicroLink, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setKeepers", keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetRegistrar(opts *bind.TransactOpts, registrar common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setRegistrar", registrar) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetRegistrar(registrar common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetRegistrar(&_KeeperRegistry.TransactOpts, registrar) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetRegistrar(registrar common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetRegistrar(&_KeeperRegistry.TransactOpts, registrar) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferPayeeship", keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +type KeeperRegistryConfigSetIterator struct { + Event *KeeperRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryConfigSet struct { + PaymentPremiumPPB uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryConfigSetIterator{contract: _KeeperRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) { + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFlatFeeSetIterator struct { + Event *KeeperRegistryFlatFeeSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFlatFeeSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFlatFeeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFlatFeeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFlatFeeSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFlatFeeSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFlatFeeSet struct { + FlatFeeMicroLink uint32 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryFlatFeeSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FlatFeeSet") + if err != nil { + return nil, err + } + return &KeeperRegistryFlatFeeSetIterator{contract: _KeeperRegistry.contract, event: "FlatFeeSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFlatFeeSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FlatFeeSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFlatFeeSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFlatFeeSet(log types.Log) (*KeeperRegistryFlatFeeSet, error) { + event := new(KeeperRegistryFlatFeeSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsAddedIterator struct { + Event *KeeperRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsAddedIterator{contract: _KeeperRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) { + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsWithdrawnIterator struct { + Event *KeeperRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) { + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryKeepersUpdatedIterator struct { + Event *KeeperRegistryKeepersUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryKeepersUpdated struct { + Keepers []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryKeepersUpdatedIterator{contract: _KeeperRegistry.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) { + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) { + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferredIterator struct { + Event *KeeperRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferredIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) { + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPausedIterator struct { + Event *KeeperRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryPausedIterator{contract: _KeeperRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaused(log types.Log) (*KeeperRegistryPaused, error) { + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferRequested struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) { + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferredIterator struct { + Event *KeeperRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferred struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferredIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) { + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPaymentWithdrawnIterator struct { + Event *KeeperRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaymentWithdrawn struct { + Keeper common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPaymentWithdrawnIterator{contract: _KeeperRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) { + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryRegistrarChangedIterator struct { + Event *KeeperRegistryRegistrarChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryRegistrarChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryRegistrarChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryRegistrarChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryRegistrarChangedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryRegistrarChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryRegistrarChanged struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryRegistrarChangedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "RegistrarChanged", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryRegistrarChangedIterator{contract: _KeeperRegistry.contract, event: "RegistrarChanged", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "RegistrarChanged", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryRegistrarChanged) + if err := _KeeperRegistry.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseRegistrarChanged(log types.Log) (*KeeperRegistryRegistrarChanged, error) { + event := new(KeeperRegistryRegistrarChanged) + if err := _KeeperRegistry.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUnpausedIterator struct { + Event *KeeperRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryUnpausedIterator{contract: _KeeperRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) { + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCanceledIterator struct { + Event *KeeperRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCanceledIterator{contract: _KeeperRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) { + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPerformedIterator struct { + Event *KeeperRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + From common.Address + Payment *big.Int + PerformData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPerformedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) { + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepRegisteredIterator struct { + Event *KeeperRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepRegisteredIterator{contract: _KeeperRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) { + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + PaymentPremiumPPB uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int +} +type GetKeeperInfo struct { + Payee common.Address + Active bool + Balance *big.Int +} +type GetUpkeep struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + LastKeeper common.Address + Admin common.Address + MaxValidBlocknumber uint64 +} + +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistry.abi.Events["ConfigSet"].ID: + return _KeeperRegistry.ParseConfigSet(log) + case _KeeperRegistry.abi.Events["FlatFeeSet"].ID: + return _KeeperRegistry.ParseFlatFeeSet(log) + case _KeeperRegistry.abi.Events["FundsAdded"].ID: + return _KeeperRegistry.ParseFundsAdded(log) + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistry.ParseFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["KeepersUpdated"].ID: + return _KeeperRegistry.ParseKeepersUpdated(log) + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistry.ParseOwnershipTransferRequested(log) + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistry.ParseOwnershipTransferred(log) + case _KeeperRegistry.abi.Events["Paused"].ID: + return _KeeperRegistry.ParsePaused(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistry.ParsePayeeshipTransferRequested(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistry.ParsePayeeshipTransferred(log) + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistry.ParsePaymentWithdrawn(log) + case _KeeperRegistry.abi.Events["RegistrarChanged"].ID: + return _KeeperRegistry.ParseRegistrarChanged(log) + case _KeeperRegistry.abi.Events["Unpaused"].ID: + return _KeeperRegistry.ParseUnpaused(log) + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistry.ParseUpkeepCanceled(log) + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistry.ParseUpkeepPerformed(log) + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistry.ParseUpkeepRegistered(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0xeb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b39") +} + +func (KeeperRegistryFlatFeeSet) Topic() common.Hash { + return common.HexToHash("0x17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryRegistrarChanged) Topic() common.Hash { + return common.HexToHash("0x9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e") +} + +func (KeeperRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (_KeeperRegistry *KeeperRegistry) Address() common.Address { + return _KeeperRegistry.address +} + +type KeeperRegistryInterface interface { + FASTGASFEED(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetFlatFee(opts *bind.CallOpts) (uint32, error) + + GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) + + GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetRegistrar(opts *bind.CallOpts) (common.Address, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) + + GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, paymentPremiumPPB uint32, flatFeeMicroLink uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) + + SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) + + SetRegistrar(opts *bind.TransactOpts, registrar common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) + + FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryFlatFeeSetIterator, error) + + WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFlatFeeSet) (event.Subscription, error) + + ParseFlatFeeSet(log types.Log) (*KeeperRegistryFlatFeeSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) + + FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) + + WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) + + ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryPaused, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) + + FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryRegistrarChangedIterator, error) + + WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseRegistrarChanged(log types.Log) (*KeeperRegistryRegistrarChanged, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock/keeper_registry_wrapper1_1_mock.go b/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock/keeper_registry_wrapper1_1_mock.go new file mode 100644 index 00000000..7e0b8a02 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock/keeper_registry_wrapper1_1_mock.go @@ -0,0 +1,3083 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper1_1_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeeperRegistryMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"}],\"name\":\"FlatFeeSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"RegistrarChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"emitConfigSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"}],\"name\":\"emitFlatFeeSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"emitFundsAdded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitFundsWithdrawn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"emitKeepersUpdated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitPaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitPayeeshipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitPayeeshipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"emitPaymentWithdrawn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitRegistrarChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitUnpaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"emitUpkeepCanceled\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"emitUpkeepPerformed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"emitUpkeepRegistered\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCanceledUpkeepList\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperList\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getUpkeepCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"_canceledUpkeepList\",\"type\":\"uint256[]\"}],\"name\":\"setCanceledUpkeepList\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"name\":\"setCheckUpkeepData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"_blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"_checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"_stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"_gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"_fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_fallbackLinkPrice\",\"type\":\"uint256\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_keepers\",\"type\":\"address[]\"}],\"name\":\"setKeeperList\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"name\":\"setMinBalance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"setPerformUpkeepSuccess\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"_executeGas\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"_balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"_admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"_maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"_lastKeeper\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_checkData\",\"type\":\"bytes\"}],\"name\":\"setUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_upkeepCount\",\"type\":\"uint256\"}],\"name\":\"setUpkeepCount\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50611fb6806100206000396000f3fe608060405234801561001057600080fd5b50600436106101f05760003560e01c8063999a73bb1161010f578063c3f909d4116100a2578063db30a38611610071578063db30a38614610d76578063f7420bc214610dd7578063fecf27c914610e12578063ffc1d91c14610e2c576101f0565b8063c3f909d414610aed578063c41b813a14610b45578063c7c3a19a14610c12578063d5b16ded14610d31576101f0565b8063b019b4e8116100de578063b019b4e814610996578063b34362d7146109d1578063b657bc9c14610a0c578063c2030c8b14610a4a576101f0565b8063999a73bb146107b857806399e1a39b146108df5780639ec3ce4b14610924578063a6e95ed014610957576101f0565b806358e1e734116101875780637be5c756116101565780637be5c756146106b557806381d2c40c146106e8578063825bea391461073e5780638a8aa1651461076f576101f0565b806358e1e7341461056b57806367923e95146105b0578063749e9cc9146105fd5780637bbaf1ea1461062a576101f0565b80633e2d7056116101c35780633e2d7056146103815780634a16a9ad146103a65780634e6575e01461048a5780635181feaa146104ad576101f0565b806315a126ea146101f55780631ffe6c971461024d578063284403761461035c5780632cb6864d14610379575b600080fd5b6101fd610ecf565b60408051602080825283518183015283519192839290830191858101910280838360005b83811015610239578181015183820152602001610221565b505050509050019250505060405180910390f35b61035a600480360361010081101561026457600080fd5b81359173ffffffffffffffffffffffffffffffffffffffff602082013581169263ffffffff604084013516926bffffffffffffffffffffffff60608201351692608082013581169267ffffffffffffffff60a0840135169260c08101359092169190810190610100810160e08201356401000000008111156102e557600080fd5b8201836020820111156102f757600080fd5b8035906020019184600183028401116401000000008311171561031957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610f3e945050505050565b005b61035a6004803603602081101561037257600080fd5b503561117b565b6101fd611180565b61035a6004803603604081101561039757600080fd5b508035906020013515156111d7565b61035a600480360360a08110156103bc57600080fd5b813591602081013515159173ffffffffffffffffffffffffffffffffffffffff604083013516916bffffffffffffffffffffffff6060820135169181019060a08101608082013564010000000081111561041557600080fd5b82018360208201111561042757600080fd5b8035906020019184600183028401116401000000008311171561044957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550611215945050505050565b61035a600480360360208110156104a057600080fd5b503563ffffffff166112e6565b61035a600480360360c08110156104c357600080fd5b813591908101906040810160208201356401000000008111156104e557600080fd5b8201836020820111156104f757600080fd5b8035906020019184600183028401116401000000008311171561051957600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295505082359350505060208101359060408101359060600135611322565b61035a6004803603606081101561058157600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81358116916020810135821691604090910135166113a0565b61035a600480360360608110156105c657600080fd5b50803590602081013573ffffffffffffffffffffffffffffffffffffffff1690604001356bffffffffffffffffffffffff16611416565b61035a6004803603604081101561061357600080fd5b508035906020013567ffffffffffffffff16611476565b6106a16004803603604081101561064057600080fd5b8135919081019060408101602082013564010000000081111561066257600080fd5b82018360208201111561067457600080fd5b8035906020019184600183028401116401000000008311171561069657600080fd5b5090925090506114b1565b604080519115158252519081900360200190f35b61035a600480360360208110156106cb57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166114c8565b61035a600480360360e08110156106fe57600080fd5b5063ffffffff813581169162ffffff60208201358116926040830135169160608101359091169061ffff6080820135169060a08101359060c00135611514565b61035a6004803603604081101561075457600080fd5b50803590602001356bffffffffffffffffffffffff16611589565b61035a6004803603608081101561078557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013591604082013581169160600135166115d4565b61035a600480360360408110156107ce57600080fd5b8101906020810181356401000000008111156107e957600080fd5b8201836020820111156107fb57600080fd5b8035906020019184602083028401116401000000008311171561081d57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929594936020810193503591505064010000000081111561086d57600080fd5b82018360208201111561087f57600080fd5b803590602001918460208302840111640100000000831117156108a157600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550611656945050505050565b61035a600480360360608110156108f557600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602081013582169160409091013516611715565b61035a6004803603602081101561093a57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff1661178b565b61035a6004803603606081101561096d57600080fd5b508035906020810135906040013573ffffffffffffffffffffffffffffffffffffffff166117d7565b61035a600480360360408110156109ac57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135811691602001351661182b565b61035a600480360360408110156109e757600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81358116916020013516611889565b610a2960048036036020811015610a2257600080fd5b50356118e7565b604080516bffffffffffffffffffffffff9092168252519081900360200190f35b61035a60048036036020811015610a6057600080fd5b810190602081018135640100000000811115610a7b57600080fd5b820183602082011115610a8d57600080fd5b80359060200191846020830284011164010000000083111715610aaf57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550611907945050505050565b610af561191e565b6040805163ffffffff988916815262ffffff9788166020820152959097168588015292909416606084015261ffff16608083015260a082019290925260c081019190915290519081900360e00190f35b610b7e60048036036040811015610b5b57600080fd5b508035906020013573ffffffffffffffffffffffffffffffffffffffff1661198e565b6040518080602001868152602001858152602001848152602001838152602001828103825287818151815260200191508051906020019080838360005b83811015610bd3578181015183820152602001610bbb565b50505050905090810190601f168015610c005780820380516001836020036101000a031916815260200191505b50965050505050505060405180910390f35b610c2f60048036036020811015610c2857600080fd5b5035611a76565b604051808873ffffffffffffffffffffffffffffffffffffffff1681526020018763ffffffff16815260200180602001866bffffffffffffffffffffffff1681526020018573ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1681526020018367ffffffffffffffff168152602001828103825287818151815260200191508051906020019080838360005b83811015610cf0578181015183820152602001610cd8565b50505050905090810190601f168015610d1d5780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390f35b61035a60048036036060811015610d4757600080fd5b50803590602081013563ffffffff16906040013573ffffffffffffffffffffffffffffffffffffffff16611c1f565b61035a6004803603610100811015610d8d57600080fd5b5063ffffffff8135811691602081013582169162ffffff604083013581169260608101359092169160808101359091169061ffff60a0820135169060c08101359060e00135611c79565b61035a60048036036040811015610ded57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81358116916020013516611ddd565b610e1a611e3b565b60408051918252519081900360200190f35b61035a60048036036020811015610e4257600080fd5b810190602081018135640100000000811115610e5d57600080fd5b820183602082011115610e6f57600080fd5b80359060200191846020830284011164010000000083111715610e9157600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550611e41945050505050565b60606002805480602002602001604051908101604052809291908181526020018280548015610f3457602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610f09575b5050505050905090565b60006040518060c001604052808973ffffffffffffffffffffffffffffffffffffffff1681526020018863ffffffff168152602001876bffffffffffffffffffffffff1681526020018673ffffffffffffffffffffffffffffffffffffffff1681526020018567ffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff16815250905080600660008b815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160146101000a81548163ffffffff021916908363ffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160020160006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555060a08201518160020160086101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555090505081600760008b8152602001908152602001600020908051906020019061116f929190611e54565b50505050505050505050565b600055565b60606001805480602002602001604051908101604052809291908181526020018280548015610f3457602002820191906000526020600020905b8154815260200190600101908083116111ba575050505050905090565b6000918252600a602052604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b8273ffffffffffffffffffffffffffffffffffffffff16841515867fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6858560405180836bffffffffffffffffffffffff16815260200180602001828103825283818151815260200191508051906020019080838360005b838110156112a457818101518382015260200161128c565b50505050905090810190601f1680156112d15780820380516001836020036101000a031916815260200191505b50935050505060405180910390a45050505050565b6040805163ffffffff8316815290517f17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b9181900360200190a150565b6040805160a0810182528681526020808201879052818301869052606082018590526080820184905260008981526009825292909220815180519293919261136d9284920190611e54565b50602082015160018201556040820151600282015560608201516003820155608090910151600490910155505050505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b360405160405180910390a4505050565b604080516bffffffffffffffffffffffff83168152905173ffffffffffffffffffffffffffffffffffffffff84169185917fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039181900360200190a3505050565b60405167ffffffffffffffff82169083907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050565b50506000908152600a602052604090205460ff1690565b6040805173ffffffffffffffffffffffffffffffffffffffff8316815290517f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2589181900360200190a150565b6040805163ffffffff808a16825262ffffff808a166020840152908816828401528616606082015261ffff8516608082015260a0810184905260c0810183905290517feb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b399181900360e00190a150505050505050565b60009182526008602052604090912080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff909216919091179055565b8173ffffffffffffffffffffffffffffffffffffffff16838573ffffffffffffffffffffffffffffffffffffffff167f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f4069884604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a450505050565b7f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f8282604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156116bd5781810151838201526020016116a5565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156116fc5781810151838201526020016116e4565b5050505090500194505050505060405180910390a15050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836760405160405180910390a4505050565b6040805173ffffffffffffffffffffffffffffffffffffffff8316815290517f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa9181900360200190a150565b6040805183815273ffffffffffffffffffffffffffffffffffffffff83166020820152815185927ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318928290030190a2505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e60405160405180910390a35050565b6000908152600860205260409020546bffffffffffffffffffffffff1690565b805161191a906001906020840190611ee0565b5050565b60035460045460055463ffffffff8084169468010000000000000000850462ffffff908116956b0100000000000000000000008104909316946f01000000000000000000000000000000840490911693720100000000000000000000000000000000000090930461ffff16929091565b60606000806000806000600960008981526020019081526020016000209050806000018160010154826002015483600301548460040154848054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015611a5a5780601f10611a2f57610100808354040283529160200191611a5a565b820191906000526020600020905b815481529060010190602001808311611a3d57829003601f168201915b5050505050945095509550955095509550509295509295909350565b6000818152600660209081526040808320815160c081018352815473ffffffffffffffffffffffffffffffffffffffff8082168084527401000000000000000000000000000000000000000090920463ffffffff168387018190526001808601546bffffffffffffffffffffffff81168689019081526c010000000000000000000000009091048416606080880191825260029889015467ffffffffffffffff811660808a019081526801000000000000000090910490961660a089019081528d8d5260078c528a8d20935190519251965184548c5161010097821615979097027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01169a909a04601f81018d90048d0286018d01909b528a85528c9b919a8c9a8b9a8b9a8b9a91999098909796949591939091879190830182828015611bfe5780601f10611bd357610100808354040283529160200191611bfe565b820191906000526020600020905b815481529060010190602001808311611be157829003601f168201915b50505050509450975097509750975097509750975050919395979092949650565b6040805163ffffffff8416815273ffffffffffffffffffffffffffffffffffffffff83166020820152815185927fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012928290030190a2505050565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff998a16177fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff16640100000000988a1698909802979097177fffffffffffffffffffffffffffffffffffffffffff000000ffffffffffffffff166801000000000000000062ffffff97881602177fffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffff166b0100000000000000000000009590981694909402969096177fffffffffffffffffffffffffffff000000ffffffffffffffffffffffffffffff166f010000000000000000000000000000009290941691909102929092177fffffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffffff16720100000000000000000000000000000000000061ffff939093169290920291909117909155600491909155600555565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b60005490565b805161191a906002906020840190611f1a565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282611e8a5760008555611ed0565b82601f10611ea357805160ff1916838001178555611ed0565b82800160010185558215611ed0579182015b82811115611ed0578251825591602001919060010190611eb5565b50611edc929150611f94565b5090565b828054828255906000526020600020908101928215611ed05791602002820182811115611ed0578251825591602001919060010190611eb5565b828054828255906000526020600020908101928215611ed0579160200282015b82811115611ed057825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190611f3a565b5b80821115611edc5760008155600101611f9556fea164736f6c6343000706000a", +} + +var KeeperRegistryMockABI = KeeperRegistryMockMetaData.ABI + +var KeeperRegistryMockBin = KeeperRegistryMockMetaData.Bin + +func DeployKeeperRegistryMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *KeeperRegistryMock, error) { + parsed, err := KeeperRegistryMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistryMock{address: address, abi: *parsed, KeeperRegistryMockCaller: KeeperRegistryMockCaller{contract: contract}, KeeperRegistryMockTransactor: KeeperRegistryMockTransactor{contract: contract}, KeeperRegistryMockFilterer: KeeperRegistryMockFilterer{contract: contract}}, nil +} + +type KeeperRegistryMock struct { + address common.Address + abi abi.ABI + KeeperRegistryMockCaller + KeeperRegistryMockTransactor + KeeperRegistryMockFilterer +} + +type KeeperRegistryMockCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryMockTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryMockFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistryMockSession struct { + Contract *KeeperRegistryMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryMockCallerSession struct { + Contract *KeeperRegistryMockCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryMockTransactorSession struct { + Contract *KeeperRegistryMockTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryMockRaw struct { + Contract *KeeperRegistryMock +} + +type KeeperRegistryMockCallerRaw struct { + Contract *KeeperRegistryMockCaller +} + +type KeeperRegistryMockTransactorRaw struct { + Contract *KeeperRegistryMockTransactor +} + +func NewKeeperRegistryMock(address common.Address, backend bind.ContractBackend) (*KeeperRegistryMock, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryMockABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistryMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistryMock{address: address, abi: abi, KeeperRegistryMockCaller: KeeperRegistryMockCaller{contract: contract}, KeeperRegistryMockTransactor: KeeperRegistryMockTransactor{contract: contract}, KeeperRegistryMockFilterer: KeeperRegistryMockFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryMockCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryMockCaller, error) { + contract, err := bindKeeperRegistryMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryMockCaller{contract: contract}, nil +} + +func NewKeeperRegistryMockTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryMockTransactor, error) { + contract, err := bindKeeperRegistryMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryMockTransactor{contract: contract}, nil +} + +func NewKeeperRegistryMockFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryMockFilterer, error) { + contract, err := bindKeeperRegistryMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryMockFilterer{contract: contract}, nil +} + +func bindKeeperRegistryMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryMock.Contract.KeeperRegistryMockCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryMock *KeeperRegistryMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.KeeperRegistryMockTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.KeeperRegistryMockTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistryMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) CheckUpkeep(opts *bind.CallOpts, id *big.Int, from common.Address) (CheckUpkeep, + + error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "checkUpkeep", id, from) + + outstruct := new(CheckUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.PerformData = *abi.ConvertType(out[0], new([]byte)).(*[]byte) + outstruct.MaxLinkPayment = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.GasLimit = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.AdjustedGasWei = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LinkEth = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) CheckUpkeep(id *big.Int, from common.Address) (CheckUpkeep, + + error) { + return _KeeperRegistryMock.Contract.CheckUpkeep(&_KeeperRegistryMock.CallOpts, id, from) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) CheckUpkeep(id *big.Int, from common.Address) (CheckUpkeep, + + error) { + return _KeeperRegistryMock.Contract.CheckUpkeep(&_KeeperRegistryMock.CallOpts, id, from) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getCanceledUpkeepList") + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetCanceledUpkeepList() ([]*big.Int, error) { + return _KeeperRegistryMock.Contract.GetCanceledUpkeepList(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetCanceledUpkeepList() ([]*big.Int, error) { + return _KeeperRegistryMock.Contract.GetCanceledUpkeepList(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.PaymentPremiumPPB = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockCountPerTurn = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.CheckGasLimit = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasCeilingMultiplier = *abi.ConvertType(out[4], new(uint16)).(*uint16) + outstruct.FallbackGasPrice = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.FallbackLinkPrice = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetConfig() (GetConfig, + + error) { + return _KeeperRegistryMock.Contract.GetConfig(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetConfig() (GetConfig, + + error) { + return _KeeperRegistryMock.Contract.GetConfig(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getKeeperList") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetKeeperList() ([]common.Address, error) { + return _KeeperRegistryMock.Contract.GetKeeperList(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetKeeperList() ([]common.Address, error) { + return _KeeperRegistryMock.Contract.GetKeeperList(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistryMock.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryMock.CallOpts, id) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistryMock.Contract.GetMinBalanceForUpkeep(&_KeeperRegistryMock.CallOpts, id) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getUpkeep", id) + + outstruct := new(GetUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.ExecuteGas = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.CheckData = *abi.ConvertType(out[2], new([]byte)).(*[]byte) + outstruct.Balance = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LastKeeper = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + outstruct.Admin = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.MaxValidBlocknumber = *abi.ConvertType(out[6], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistryMock.Contract.GetUpkeep(&_KeeperRegistryMock.CallOpts, id) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistryMock.Contract.GetUpkeep(&_KeeperRegistryMock.CallOpts, id) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCaller) GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistryMock.contract.Call(opts, &out, "getUpkeepCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) GetUpkeepCount() (*big.Int, error) { + return _KeeperRegistryMock.Contract.GetUpkeepCount(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockCallerSession) GetUpkeepCount() (*big.Int, error) { + return _KeeperRegistryMock.Contract.GetUpkeepCount(&_KeeperRegistryMock.CallOpts) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitConfigSet(opts *bind.TransactOpts, paymentPremiumPPB uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitConfigSet", paymentPremiumPPB, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitConfigSet(paymentPremiumPPB uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitConfigSet(&_KeeperRegistryMock.TransactOpts, paymentPremiumPPB, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitConfigSet(paymentPremiumPPB uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitConfigSet(&_KeeperRegistryMock.TransactOpts, paymentPremiumPPB, blockCountPerTurn, checkGasLimit, stalenessSeconds, gasCeilingMultiplier, fallbackGasPrice, fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitFlatFeeSet(opts *bind.TransactOpts, flatFeeMicroLink uint32) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitFlatFeeSet", flatFeeMicroLink) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitFlatFeeSet(flatFeeMicroLink uint32) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFlatFeeSet(&_KeeperRegistryMock.TransactOpts, flatFeeMicroLink) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitFlatFeeSet(flatFeeMicroLink uint32) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFlatFeeSet(&_KeeperRegistryMock.TransactOpts, flatFeeMicroLink) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitFundsAdded(opts *bind.TransactOpts, id *big.Int, from common.Address, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitFundsAdded", id, from, amount) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitFundsAdded(id *big.Int, from common.Address, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFundsAdded(&_KeeperRegistryMock.TransactOpts, id, from, amount) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitFundsAdded(id *big.Int, from common.Address, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFundsAdded(&_KeeperRegistryMock.TransactOpts, id, from, amount) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitFundsWithdrawn(opts *bind.TransactOpts, id *big.Int, amount *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitFundsWithdrawn", id, amount, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitFundsWithdrawn(id *big.Int, amount *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFundsWithdrawn(&_KeeperRegistryMock.TransactOpts, id, amount, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitFundsWithdrawn(id *big.Int, amount *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitFundsWithdrawn(&_KeeperRegistryMock.TransactOpts, id, amount, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitKeepersUpdated(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitKeepersUpdated", keepers, payees) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitKeepersUpdated(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitKeepersUpdated(&_KeeperRegistryMock.TransactOpts, keepers, payees) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitKeepersUpdated(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitKeepersUpdated(&_KeeperRegistryMock.TransactOpts, keepers, payees) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitOwnershipTransferRequested(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitOwnershipTransferred(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitOwnershipTransferred(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitPaused", account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPaused(&_KeeperRegistryMock.TransactOpts, account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPaused(&_KeeperRegistryMock.TransactOpts, account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitPayeeshipTransferRequested(opts *bind.TransactOpts, keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitPayeeshipTransferRequested", keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitPayeeshipTransferRequested(keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPayeeshipTransferRequested(&_KeeperRegistryMock.TransactOpts, keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitPayeeshipTransferRequested(keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPayeeshipTransferRequested(&_KeeperRegistryMock.TransactOpts, keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitPayeeshipTransferred(opts *bind.TransactOpts, keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitPayeeshipTransferred", keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitPayeeshipTransferred(keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPayeeshipTransferred(&_KeeperRegistryMock.TransactOpts, keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitPayeeshipTransferred(keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPayeeshipTransferred(&_KeeperRegistryMock.TransactOpts, keeper, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitPaymentWithdrawn(opts *bind.TransactOpts, keeper common.Address, amount *big.Int, to common.Address, payee common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitPaymentWithdrawn", keeper, amount, to, payee) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitPaymentWithdrawn(keeper common.Address, amount *big.Int, to common.Address, payee common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPaymentWithdrawn(&_KeeperRegistryMock.TransactOpts, keeper, amount, to, payee) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitPaymentWithdrawn(keeper common.Address, amount *big.Int, to common.Address, payee common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitPaymentWithdrawn(&_KeeperRegistryMock.TransactOpts, keeper, amount, to, payee) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitRegistrarChanged(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitRegistrarChanged", from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitRegistrarChanged(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitRegistrarChanged(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitRegistrarChanged(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitRegistrarChanged(&_KeeperRegistryMock.TransactOpts, from, to) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitUnpaused", account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUnpaused(&_KeeperRegistryMock.TransactOpts, account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUnpaused(&_KeeperRegistryMock.TransactOpts, account) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitUpkeepCanceled(opts *bind.TransactOpts, id *big.Int, atBlockHeight uint64) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitUpkeepCanceled", id, atBlockHeight) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitUpkeepCanceled(id *big.Int, atBlockHeight uint64) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepCanceled(&_KeeperRegistryMock.TransactOpts, id, atBlockHeight) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitUpkeepCanceled(id *big.Int, atBlockHeight uint64) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepCanceled(&_KeeperRegistryMock.TransactOpts, id, atBlockHeight) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitUpkeepPerformed(opts *bind.TransactOpts, id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitUpkeepPerformed", id, success, from, payment, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitUpkeepPerformed(id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepPerformed(&_KeeperRegistryMock.TransactOpts, id, success, from, payment, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitUpkeepPerformed(id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepPerformed(&_KeeperRegistryMock.TransactOpts, id, success, from, payment, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) EmitUpkeepRegistered(opts *bind.TransactOpts, id *big.Int, executeGas uint32, admin common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "emitUpkeepRegistered", id, executeGas, admin) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) EmitUpkeepRegistered(id *big.Int, executeGas uint32, admin common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepRegistered(&_KeeperRegistryMock.TransactOpts, id, executeGas, admin) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) EmitUpkeepRegistered(id *big.Int, executeGas uint32, admin common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.EmitUpkeepRegistered(&_KeeperRegistryMock.TransactOpts, id, executeGas, admin) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "performUpkeep", id, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.PerformUpkeep(&_KeeperRegistryMock.TransactOpts, id, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.PerformUpkeep(&_KeeperRegistryMock.TransactOpts, id, performData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetCanceledUpkeepList(opts *bind.TransactOpts, _canceledUpkeepList []*big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setCanceledUpkeepList", _canceledUpkeepList) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetCanceledUpkeepList(_canceledUpkeepList []*big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetCanceledUpkeepList(&_KeeperRegistryMock.TransactOpts, _canceledUpkeepList) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetCanceledUpkeepList(_canceledUpkeepList []*big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetCanceledUpkeepList(&_KeeperRegistryMock.TransactOpts, _canceledUpkeepList) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetCheckUpkeepData(opts *bind.TransactOpts, id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setCheckUpkeepData", id, performData, maxLinkPayment, gasLimit, adjustedGasWei, linkEth) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetCheckUpkeepData(id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetCheckUpkeepData(&_KeeperRegistryMock.TransactOpts, id, performData, maxLinkPayment, gasLimit, adjustedGasWei, linkEth) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetCheckUpkeepData(id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetCheckUpkeepData(&_KeeperRegistryMock.TransactOpts, id, performData, maxLinkPayment, gasLimit, adjustedGasWei, linkEth) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetConfig(opts *bind.TransactOpts, _paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setConfig", _paymentPremiumPPB, _flatFeeMicroLink, _blockCountPerTurn, _checkGasLimit, _stalenessSeconds, _gasCeilingMultiplier, _fallbackGasPrice, _fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetConfig(_paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetConfig(&_KeeperRegistryMock.TransactOpts, _paymentPremiumPPB, _flatFeeMicroLink, _blockCountPerTurn, _checkGasLimit, _stalenessSeconds, _gasCeilingMultiplier, _fallbackGasPrice, _fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetConfig(_paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetConfig(&_KeeperRegistryMock.TransactOpts, _paymentPremiumPPB, _flatFeeMicroLink, _blockCountPerTurn, _checkGasLimit, _stalenessSeconds, _gasCeilingMultiplier, _fallbackGasPrice, _fallbackLinkPrice) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetKeeperList(opts *bind.TransactOpts, _keepers []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setKeeperList", _keepers) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetKeeperList(_keepers []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetKeeperList(&_KeeperRegistryMock.TransactOpts, _keepers) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetKeeperList(_keepers []common.Address) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetKeeperList(&_KeeperRegistryMock.TransactOpts, _keepers) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetMinBalance(opts *bind.TransactOpts, id *big.Int, minBalance *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setMinBalance", id, minBalance) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetMinBalance(id *big.Int, minBalance *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetMinBalance(&_KeeperRegistryMock.TransactOpts, id, minBalance) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetMinBalance(id *big.Int, minBalance *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetMinBalance(&_KeeperRegistryMock.TransactOpts, id, minBalance) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetPerformUpkeepSuccess(opts *bind.TransactOpts, id *big.Int, success bool) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setPerformUpkeepSuccess", id, success) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetPerformUpkeepSuccess(id *big.Int, success bool) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetPerformUpkeepSuccess(&_KeeperRegistryMock.TransactOpts, id, success) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetPerformUpkeepSuccess(id *big.Int, success bool) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetPerformUpkeepSuccess(&_KeeperRegistryMock.TransactOpts, id, success) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetUpkeep(opts *bind.TransactOpts, id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setUpkeep", id, _target, _executeGas, _balance, _admin, _maxValidBlocknumber, _lastKeeper, _checkData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetUpkeep(id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetUpkeep(&_KeeperRegistryMock.TransactOpts, id, _target, _executeGas, _balance, _admin, _maxValidBlocknumber, _lastKeeper, _checkData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetUpkeep(id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetUpkeep(&_KeeperRegistryMock.TransactOpts, id, _target, _executeGas, _balance, _admin, _maxValidBlocknumber, _lastKeeper, _checkData) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactor) SetUpkeepCount(opts *bind.TransactOpts, _upkeepCount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.contract.Transact(opts, "setUpkeepCount", _upkeepCount) +} + +func (_KeeperRegistryMock *KeeperRegistryMockSession) SetUpkeepCount(_upkeepCount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetUpkeepCount(&_KeeperRegistryMock.TransactOpts, _upkeepCount) +} + +func (_KeeperRegistryMock *KeeperRegistryMockTransactorSession) SetUpkeepCount(_upkeepCount *big.Int) (*types.Transaction, error) { + return _KeeperRegistryMock.Contract.SetUpkeepCount(&_KeeperRegistryMock.TransactOpts, _upkeepCount) +} + +type KeeperRegistryMockConfigSetIterator struct { + Event *KeeperRegistryMockConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockConfigSet struct { + PaymentPremiumPPB uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryMockConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryMockConfigSetIterator{contract: _KeeperRegistryMock.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockConfigSet) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryMockConfigSet, error) { + event := new(KeeperRegistryMockConfigSet) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockFlatFeeSetIterator struct { + Event *KeeperRegistryMockFlatFeeSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockFlatFeeSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFlatFeeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFlatFeeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockFlatFeeSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockFlatFeeSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockFlatFeeSet struct { + FlatFeeMicroLink uint32 + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryMockFlatFeeSetIterator, error) { + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "FlatFeeSet") + if err != nil { + return nil, err + } + return &KeeperRegistryMockFlatFeeSetIterator{contract: _KeeperRegistryMock.contract, event: "FlatFeeSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFlatFeeSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "FlatFeeSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockFlatFeeSet) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseFlatFeeSet(log types.Log) (*KeeperRegistryMockFlatFeeSet, error) { + event := new(KeeperRegistryMockFlatFeeSet) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FlatFeeSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockFundsAddedIterator struct { + Event *KeeperRegistryMockFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryMockFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockFundsAddedIterator{contract: _KeeperRegistryMock.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockFundsAdded) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryMockFundsAdded, error) { + event := new(KeeperRegistryMockFundsAdded) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockFundsWithdrawnIterator struct { + Event *KeeperRegistryMockFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryMockFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockFundsWithdrawnIterator{contract: _KeeperRegistryMock.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockFundsWithdrawn) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryMockFundsWithdrawn, error) { + event := new(KeeperRegistryMockFundsWithdrawn) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockKeepersUpdatedIterator struct { + Event *KeeperRegistryMockKeepersUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockKeepersUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockKeepersUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockKeepersUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockKeepersUpdated struct { + Keepers []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryMockKeepersUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryMockKeepersUpdatedIterator{contract: _KeeperRegistryMock.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockKeepersUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockKeepersUpdated) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryMockKeepersUpdated, error) { + event := new(KeeperRegistryMockKeepersUpdated) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockOwnershipTransferRequestedIterator{contract: _KeeperRegistryMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockOwnershipTransferRequested) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryMockOwnershipTransferRequested, error) { + event := new(KeeperRegistryMockOwnershipTransferRequested) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockOwnershipTransferredIterator struct { + Event *KeeperRegistryMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockOwnershipTransferredIterator{contract: _KeeperRegistryMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockOwnershipTransferred) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryMockOwnershipTransferred, error) { + event := new(KeeperRegistryMockOwnershipTransferred) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockPausedIterator struct { + Event *KeeperRegistryMockPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryMockPausedIterator, error) { + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryMockPausedIterator{contract: _KeeperRegistryMock.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockPaused) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParsePaused(log types.Log) (*KeeperRegistryMockPaused, error) { + event := new(KeeperRegistryMockPaused) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryMockPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockPayeeshipTransferRequested struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryMockPayeeshipTransferRequestedIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockPayeeshipTransferRequestedIterator{contract: _KeeperRegistryMock.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockPayeeshipTransferRequested) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryMockPayeeshipTransferRequested, error) { + event := new(KeeperRegistryMockPayeeshipTransferRequested) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockPayeeshipTransferredIterator struct { + Event *KeeperRegistryMockPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockPayeeshipTransferred struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryMockPayeeshipTransferredIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockPayeeshipTransferredIterator{contract: _KeeperRegistryMock.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockPayeeshipTransferred) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryMockPayeeshipTransferred, error) { + event := new(KeeperRegistryMockPayeeshipTransferred) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockPaymentWithdrawnIterator struct { + Event *KeeperRegistryMockPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockPaymentWithdrawn struct { + Keeper common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryMockPaymentWithdrawnIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockPaymentWithdrawnIterator{contract: _KeeperRegistryMock.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockPaymentWithdrawn) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryMockPaymentWithdrawn, error) { + event := new(KeeperRegistryMockPaymentWithdrawn) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockRegistrarChangedIterator struct { + Event *KeeperRegistryMockRegistrarChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockRegistrarChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockRegistrarChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockRegistrarChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockRegistrarChangedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockRegistrarChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockRegistrarChanged struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockRegistrarChangedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "RegistrarChanged", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockRegistrarChangedIterator{contract: _KeeperRegistryMock.contract, event: "RegistrarChanged", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "RegistrarChanged", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockRegistrarChanged) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseRegistrarChanged(log types.Log) (*KeeperRegistryMockRegistrarChanged, error) { + event := new(KeeperRegistryMockRegistrarChanged) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "RegistrarChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockUnpausedIterator struct { + Event *KeeperRegistryMockUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryMockUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryMockUnpausedIterator{contract: _KeeperRegistryMock.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockUnpaused) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryMockUnpaused, error) { + event := new(KeeperRegistryMockUnpaused) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockUpkeepCanceledIterator struct { + Event *KeeperRegistryMockUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryMockUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockUpkeepCanceledIterator{contract: _KeeperRegistryMock.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockUpkeepCanceled) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryMockUpkeepCanceled, error) { + event := new(KeeperRegistryMockUpkeepCanceled) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockUpkeepPerformedIterator struct { + Event *KeeperRegistryMockUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockUpkeepPerformed struct { + Id *big.Int + Success bool + From common.Address + Payment *big.Int + PerformData []byte + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryMockUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockUpkeepPerformedIterator{contract: _KeeperRegistryMock.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockUpkeepPerformed) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryMockUpkeepPerformed, error) { + event := new(KeeperRegistryMockUpkeepPerformed) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryMockUpkeepRegisteredIterator struct { + Event *KeeperRegistryMockUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryMockUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryMockUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryMockUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryMockUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryMockUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryMockUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryMockUpkeepRegisteredIterator{contract: _KeeperRegistryMock.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistryMock.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryMockUpkeepRegistered) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistryMock *KeeperRegistryMockFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryMockUpkeepRegistered, error) { + event := new(KeeperRegistryMockUpkeepRegistered) + if err := _KeeperRegistryMock.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type CheckUpkeep struct { + PerformData []byte + MaxLinkPayment *big.Int + GasLimit *big.Int + AdjustedGasWei *big.Int + LinkEth *big.Int +} +type GetConfig struct { + PaymentPremiumPPB uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int +} +type GetUpkeep struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + LastKeeper common.Address + Admin common.Address + MaxValidBlocknumber uint64 +} + +func (_KeeperRegistryMock *KeeperRegistryMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistryMock.abi.Events["ConfigSet"].ID: + return _KeeperRegistryMock.ParseConfigSet(log) + case _KeeperRegistryMock.abi.Events["FlatFeeSet"].ID: + return _KeeperRegistryMock.ParseFlatFeeSet(log) + case _KeeperRegistryMock.abi.Events["FundsAdded"].ID: + return _KeeperRegistryMock.ParseFundsAdded(log) + case _KeeperRegistryMock.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistryMock.ParseFundsWithdrawn(log) + case _KeeperRegistryMock.abi.Events["KeepersUpdated"].ID: + return _KeeperRegistryMock.ParseKeepersUpdated(log) + case _KeeperRegistryMock.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistryMock.ParseOwnershipTransferRequested(log) + case _KeeperRegistryMock.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistryMock.ParseOwnershipTransferred(log) + case _KeeperRegistryMock.abi.Events["Paused"].ID: + return _KeeperRegistryMock.ParsePaused(log) + case _KeeperRegistryMock.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistryMock.ParsePayeeshipTransferRequested(log) + case _KeeperRegistryMock.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistryMock.ParsePayeeshipTransferred(log) + case _KeeperRegistryMock.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistryMock.ParsePaymentWithdrawn(log) + case _KeeperRegistryMock.abi.Events["RegistrarChanged"].ID: + return _KeeperRegistryMock.ParseRegistrarChanged(log) + case _KeeperRegistryMock.abi.Events["Unpaused"].ID: + return _KeeperRegistryMock.ParseUnpaused(log) + case _KeeperRegistryMock.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistryMock.ParseUpkeepCanceled(log) + case _KeeperRegistryMock.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistryMock.ParseUpkeepPerformed(log) + case _KeeperRegistryMock.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistryMock.ParseUpkeepRegistered(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryMockConfigSet) Topic() common.Hash { + return common.HexToHash("0xeb3c06937e6595fd80ec1add18a195026d5cf65f122cc3ffedbfb18a9ed80b39") +} + +func (KeeperRegistryMockFlatFeeSet) Topic() common.Hash { + return common.HexToHash("0x17b46a44a823646eef686b7824df2962de896bc9a012a60b67694c5cbf184d8b") +} + +func (KeeperRegistryMockFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryMockFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryMockKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryMockPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryMockPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryMockPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryMockPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryMockRegistrarChanged) Topic() common.Hash { + return common.HexToHash("0x9bf4a5b30267728df68663e14adb47e559863967c419dc6030638883408bed2e") +} + +func (KeeperRegistryMockUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryMockUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryMockUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryMockUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (_KeeperRegistryMock *KeeperRegistryMock) Address() common.Address { + return _KeeperRegistryMock.address +} + +type KeeperRegistryMockInterface interface { + CheckUpkeep(opts *bind.CallOpts, id *big.Int, from common.Address) (CheckUpkeep, + + error) + + GetCanceledUpkeepList(opts *bind.CallOpts) ([]*big.Int, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetKeeperList(opts *bind.CallOpts) ([]common.Address, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) + + GetUpkeepCount(opts *bind.CallOpts) (*big.Int, error) + + EmitConfigSet(opts *bind.TransactOpts, paymentPremiumPPB uint32, blockCountPerTurn *big.Int, checkGasLimit uint32, stalenessSeconds *big.Int, gasCeilingMultiplier uint16, fallbackGasPrice *big.Int, fallbackLinkPrice *big.Int) (*types.Transaction, error) + + EmitFlatFeeSet(opts *bind.TransactOpts, flatFeeMicroLink uint32) (*types.Transaction, error) + + EmitFundsAdded(opts *bind.TransactOpts, id *big.Int, from common.Address, amount *big.Int) (*types.Transaction, error) + + EmitFundsWithdrawn(opts *bind.TransactOpts, id *big.Int, amount *big.Int, to common.Address) (*types.Transaction, error) + + EmitKeepersUpdated(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitPayeeshipTransferRequested(opts *bind.TransactOpts, keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPayeeshipTransferred(opts *bind.TransactOpts, keeper common.Address, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPaymentWithdrawn(opts *bind.TransactOpts, keeper common.Address, amount *big.Int, to common.Address, payee common.Address) (*types.Transaction, error) + + EmitRegistrarChanged(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitUpkeepCanceled(opts *bind.TransactOpts, id *big.Int, atBlockHeight uint64) (*types.Transaction, error) + + EmitUpkeepPerformed(opts *bind.TransactOpts, id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) (*types.Transaction, error) + + EmitUpkeepRegistered(opts *bind.TransactOpts, id *big.Int, executeGas uint32, admin common.Address) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + SetCanceledUpkeepList(opts *bind.TransactOpts, _canceledUpkeepList []*big.Int) (*types.Transaction, error) + + SetCheckUpkeepData(opts *bind.TransactOpts, id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) (*types.Transaction, error) + + SetKeeperList(opts *bind.TransactOpts, _keepers []common.Address) (*types.Transaction, error) + + SetMinBalance(opts *bind.TransactOpts, id *big.Int, minBalance *big.Int) (*types.Transaction, error) + + SetPerformUpkeepSuccess(opts *bind.TransactOpts, id *big.Int, success bool) (*types.Transaction, error) + + SetUpkeep(opts *bind.TransactOpts, id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) (*types.Transaction, error) + + SetUpkeepCount(opts *bind.TransactOpts, _upkeepCount *big.Int) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryMockConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryMockConfigSet, error) + + FilterFlatFeeSet(opts *bind.FilterOpts) (*KeeperRegistryMockFlatFeeSetIterator, error) + + WatchFlatFeeSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFlatFeeSet) (event.Subscription, error) + + ParseFlatFeeSet(log types.Log) (*KeeperRegistryMockFlatFeeSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryMockFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryMockFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryMockFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryMockFundsWithdrawn, error) + + FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryMockKeepersUpdatedIterator, error) + + WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockKeepersUpdated) (event.Subscription, error) + + ParseKeepersUpdated(log types.Log) (*KeeperRegistryMockKeepersUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryMockOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryMockPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryMockPaused, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryMockPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryMockPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryMockPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryMockPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryMockPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryMockPaymentWithdrawn, error) + + FilterRegistrarChanged(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryMockRegistrarChangedIterator, error) + + WatchRegistrarChanged(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockRegistrarChanged, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseRegistrarChanged(log types.Log) (*KeeperRegistryMockRegistrarChanged, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryMockUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryMockUnpaused, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryMockUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryMockUpkeepCanceled, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryMockUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryMockUpkeepPerformed, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryMockUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryMockUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryMockUpkeepRegistered, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper1_2/keeper_registry_wrapper1_2.go b/core/gethwrappers/generated/keeper_registry_wrapper1_2/keeper_registry_wrapper1_2.go new file mode 100644 index 00000000..0eb73050 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper1_2/keeper_registry_wrapper1_2.go @@ -0,0 +1,3489 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper1_2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Config struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrar common.Address +} + +type State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + NumUpkeeps *big.Int +} + +var KeeperRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"fastGasFeed\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"KeepersMustTakeTurns\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveKeepers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotActive\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"indexed\":false,\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getKeeperInfo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"enumKeeperRegistry1_2.MigrationPermission\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"}],\"internalType\":\"structState\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistry1_2.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b50604051620066f2380380620066f2833981016040819052620000349162000577565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be8162000107565b50506001600255506003805460ff191690556001600160601b0319606085811b821660805284811b821660a05283901b1660c052620000fd81620001b3565b50505050620007fa565b6001600160a01b038116331415620001625760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620001bd620004a8565b600d5460e082015163ffffffff91821691161015620001ef57604051630e6af04160e21b815260040160405180910390fd5b604051806101200160405280826000015163ffffffff168152602001826020015163ffffffff168152602001826040015162ffffff168152602001826060015163ffffffff168152602001826080015162ffffff1681526020018260a0015161ffff1681526020018260c001516001600160601b031681526020018260e0015163ffffffff168152602001600c60010160049054906101000a900463ffffffff1663ffffffff16815250600c60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a8154816001600160601b0302191690836001600160601b0316021790555060e08201518160010160006101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160010160046101000a81548163ffffffff021916908363ffffffff160217905550905050806101000151600e81905550806101200151600f81905550806101400151601260006101000a8154816001600160a01b0302191690836001600160a01b03160217905550806101600151601360006101000a8154816001600160a01b0302191690836001600160a01b031602179055507ffe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de325816040516200049d9190620006c3565b60405180910390a150565b6000546001600160a01b03163314620005045760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b80516001600160a01b03811681146200051e57600080fd5b919050565b805161ffff811681146200051e57600080fd5b805162ffffff811681146200051e57600080fd5b805163ffffffff811681146200051e57600080fd5b80516001600160601b03811681146200051e57600080fd5b6000806000808486036101e08112156200059057600080fd5b6200059b8662000506565b9450620005ab6020870162000506565b9350620005bb6040870162000506565b925061018080605f1983011215620005d257600080fd5b620005dc620007c2565b9150620005ec606088016200054a565b8252620005fc608088016200054a565b60208301526200060f60a0880162000536565b60408301526200062260c088016200054a565b60608301526200063560e0880162000536565b60808301526101006200064a81890162000523565b60a08401526101206200065f818a016200055f565b60c085015261014062000674818b016200054a565b60e0860152610160808b015184870152848b0151838701526200069b6101a08c0162000506565b82870152620006ae6101c08c0162000506565b90860152509699959850939650909450505050565b815163ffffffff16815261018081016020830151620006ea602084018263ffffffff169052565b50604083015162000702604084018262ffffff169052565b5060608301516200071b606084018263ffffffff169052565b50608083015162000733608084018262ffffff169052565b5060a08301516200074a60a084018261ffff169052565b5060c08301516200076660c08401826001600160601b03169052565b5060e08301516200077f60e084018263ffffffff169052565b5061010083810151908301526101208084015190830152610140808401516001600160a01b03908116918401919091526101609384015116929091019190915290565b60405161018081016001600160401b0381118282101715620007f457634e487b7160e01b600052604160045260246000fd5b60405290565b60805160601c60a05160601c60c05160601c615e7962000879600039600081816104240152614126015260008181610575015261420701526000818161030401528181610e10015281816110d10152818161192201528181611cad01528181611da1015281816121990152818161251701526125aa0152615e796000f3fe608060405234801561001057600080fd5b506004361061025c5760003560e01c806393f0c1fc11610145578063b7fdb436116100bd578063da5c67411161008c578063ef47a0ce11610071578063ef47a0ce1461066a578063f2fde38b1461067d578063faa3e9961461069057600080fd5b8063da5c674114610636578063eb5dcd6c1461065757600080fd5b8063b7fdb436146105c5578063c41b813a146105d8578063c7c3a19a146105fc578063c80480221461062357600080fd5b8063a72aa27e11610114578063b121e147116100f9578063b121e14714610597578063b657bc9c146105aa578063b79550be146105bd57600080fd5b8063a72aa27e1461055d578063ad1783611461057057600080fd5b806393f0c1fc146104f4578063948108f714610524578063a4c0ed3614610537578063a710b2211461054a57600080fd5b80635c975abb116101d85780637d9b97e0116101a757806385c1b0ba1161018c57806385c1b0ba146104b05780638da5cb5b146104c35780638e86139b146104e157600080fd5b80637d9b97e0146104a05780638456cb59146104a857600080fd5b80635c975abb1461045b578063744bfe611461047257806379ba5097146104855780637bbaf1ea1461048d57600080fd5b80631b6b6d231161022f5780633f4ba83a116102145780633f4ba83a146104175780634584a4191461041f57806348013d7b1461044657600080fd5b80631b6b6d23146102ff5780631e12b8a51461034b57600080fd5b806306e3b63214610261578063181f5a771461028a5780631865c57d146102d3578063187256e8146102ea575b600080fd5b61027461026f3660046152fd565b6106d6565b60405161028191906157fb565b60405180910390f35b6102c66040518060400160405280601481526020017f4b6565706572526567697374727920312e322e3000000000000000000000000081525081565b604051610281919061583f565b6102db6107d2565b604051610281939291906159cc565b6102fd6102f8366004614dda565b610a8a565b005b6103267f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610281565b6103d7610359366004614d8c565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1690820152606001610281565b6102fd610afb565b6103267f000000000000000000000000000000000000000000000000000000000000000081565b61044e600081565b6040516102819190615982565b60035460ff165b6040519015158152602001610281565b6102fd61048036600461528e565b610b0d565b6102fd610e99565b61046261049b3660046152b1565b610f9b565b6102fd610ff9565b6102fd611167565b6102fd6104be366004614f45565b611177565b60005473ffffffffffffffffffffffffffffffffffffffff16610326565b6102fd6104ef3660046150e6565b611953565b61050761050236600461525c565b611b53565b6040516bffffffffffffffffffffffff9091168152602001610281565b6102fd610532366004615342565b611b87565b6102fd610545366004614e15565b611d89565b6102fd610558366004614da7565b611f84565b6102fd61056b36600461531f565b61221e565b6103267f000000000000000000000000000000000000000000000000000000000000000081565b6102fd6105a5366004614d8c565b6123c5565b6105076105b836600461525c565b6124bd565b6102fd6124de565b6102fd6105d3366004614ee5565b612649565b6105eb6105e636600461528e565b6129aa565b604051610281959493929190615852565b61060f61060a36600461525c565b612c5f565b6040516102819897969594939291906155f1565b6102fd61063136600461525c565b612dea565b610649610644366004614e6f565b612fe0565b604051908152602001610281565b6102fd610665366004614da7565b6131d7565b6102fd61067836600461517e565b613336565b6102fd61068b366004614d8c565b613682565b6106c961069e366004614d8c565b73ffffffffffffffffffffffffffffffffffffffff166000908152600b602052604090205460ff1690565b6040516102819190615968565b606060006106e46005613696565b905080841061071f576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826107315761072e8482615c60565b92505b60008367ffffffffffffffff81111561074c5761074c615e3d565b604051908082528060200260200182016040528015610775578160200160208202803683370190505b50905060005b848110156107c7576107986107908288615ba0565b6005906136a0565b8282815181106107aa576107aa615e0e565b6020908102919091010152806107bf81615d24565b91505061077b565b509150505b92915050565b6040805160808101825260008082526020820181905291810182905260608101919091526040805161018081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810182905261014081018290526101608101919091526040805161012081018252600c5463ffffffff8082168352640100000000808304821660208086019190915262ffffff6801000000000000000085048116868801526b010000000000000000000000850484166060878101919091526f010000000000000000000000000000008604909116608087015261ffff720100000000000000000000000000000000000086041660a08701526bffffffffffffffffffffffff74010000000000000000000000000000000000000000909504851660c0870152600d5480851660e0880152929092049092166101008501819052875260105490921690860152601154928501929092526109546005613696565b606080860191909152815163ffffffff908116855260208084015182168187015260408085015162ffffff90811682890152858501518416948801949094526080808601519094169387019390935260a08085015161ffff169087015260c0808501516bffffffffffffffffffffffff169087015260e08085015190921691860191909152600e54610100860152600f5461012086015260125473ffffffffffffffffffffffffffffffffffffffff90811661014087015260135416610160860152600480548351818402810184019094528084528793879390918391830182828015610a7757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610a4c575b5050505050905093509350935050909192565b610a926136b3565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600b6020526040902080548291907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001836003811115610af257610af2615db0565b02179055505050565b610b036136b3565b610b0b613734565b565b8073ffffffffffffffffffffffffffffffffffffffff8116610b5b576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526007602052604090206002015483906c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314610bcd576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000848152600760205260409020600101544364010000000090910467ffffffffffffffff161115610c2b576040517fff84e5dd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600c54600085815260076020526040812080546002909101546bffffffffffffffffffffffff740100000000000000000000000000000000000000009094048416939182169291169083821015610caf57610c868285615c77565b9050826bffffffffffffffffffffffff16816bffffffffffffffffffffffff161115610caf5750815b6000610cbb8285615c77565b60008a815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000169055601054909150610d0e9083906bffffffffffffffffffffffff16615bb8565b601080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055601154610d5691831690615c60565b601155604080516bffffffffffffffffffffffff8316815273ffffffffffffffffffffffffffffffffffffffff8a1660208201528a917ff3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318910160405180910390a26040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff89811660048301526bffffffffffffffffffffffff831660248301527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906044015b602060405180830381600087803b158015610e5557600080fd5b505af1158015610e69573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e8d919061507d565b50505050505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610f1f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6000610fa56137b1565b610ff1610fec338686868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061381e915050565b613918565b949350505050565b6110016136b3565b6010546011546bffffffffffffffffffffffff90911690611023908290615c60565b601155601080547fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690556040516bffffffffffffffffffffffff821681527f1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f19060200160405180910390a16040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526bffffffffffffffffffffffff821660248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044015b602060405180830381600087803b15801561112b57600080fd5b505af115801561113f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611163919061507d565b5050565b61116f6136b3565b610b0b613d39565b600173ffffffffffffffffffffffffffffffffffffffff82166000908152600b602052604090205460ff1660038111156111b3576111b3615db0565b141580156111fb5750600373ffffffffffffffffffffffffffffffffffffffff82166000908152600b602052604090205460ff1660038111156111f8576111f8615db0565b14155b15611232576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60125473ffffffffffffffffffffffffffffffffffffffff16611281576040517fd12d7d8d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b816112b8576040517f2c2fc94100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160e081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c081018290526000808567ffffffffffffffff81111561130c5761130c615e3d565b60405190808252806020026020018201604052801561133f57816020015b606081526020019060019003908161132a5790505b50905060008667ffffffffffffffff81111561135d5761135d615e3d565b6040519080825280602002602001820160405280156113e257816020015b6040805160e08101825260008082526020808301829052928201819052606082018190526080820181905260a0820181905260c082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90920191018161137b5790505b50905060005b878110156116e25788888281811061140257611402615e0e565b60209081029290920135600081815260078452604090819020815160e08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811698840198909852600184015463ffffffff81169584019590955267ffffffffffffffff6401000000008604166060840152938190048716608083015260029092015492831660a0820152910490931660c084018190529098509196505033146114f5576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606085015167ffffffffffffffff9081161461153d576040517fd096219c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8482828151811061155057611550615e0e565b6020026020010181905250600a6000878152602001908152602001600020805461157990615cd0565b80601f01602080910402602001604051908101604052809291908181526020018280546115a590615cd0565b80156115f25780601f106115c7576101008083540402835291602001916115f2565b820191906000526020600020905b8154815290600101906020018083116115d557829003601f168201915b505050505083828151811061160957611609615e0e565b6020908102919091010152845161162e906bffffffffffffffffffffffff1685615ba0565b600087815260076020908152604080832083815560018101849055600201839055600a909152812091955061166391906148f8565b61166e600587613d94565b508451604080516bffffffffffffffffffffffff909216825273ffffffffffffffffffffffffffffffffffffffff8916602083015287917fb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff910160405180910390a2806116da81615d24565b9150506113e8565b50826011546116f19190615c60565b60115560405160009061170e908a908a90859087906020016156ad565b60405160208183030381529060405290508673ffffffffffffffffffffffffffffffffffffffff16638e86139b601260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663c71249ab60008b73ffffffffffffffffffffffffffffffffffffffff166348013d7b6040518163ffffffff1660e01b815260040160206040518083038186803b1580156117c157600080fd5b505afa1580156117d5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117f9919061515d565b866040518463ffffffff1660e01b815260040161181893929190615990565b60006040518083038186803b15801561183057600080fd5b505afa158015611844573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261188a9190810190615128565b6040518263ffffffff1660e01b81526004016118a6919061583f565b600060405180830381600087803b1580156118c057600080fd5b505af11580156118d4573d6000803e3d6000fd5b50506040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8a81166004830152602482018890527f000000000000000000000000000000000000000000000000000000000000000016925063a9059cbb9150604401610e3b565b6002336000908152600b602052604090205460ff16600381111561197957611979615db0565b141580156119ab57506003336000908152600b602052604090205460ff1660038111156119a8576119a8615db0565b14155b156119e2576040517f0ebeec3c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080806119f284860186614f99565b92509250925060005b8351811015611b4b57611ab8848281518110611a1957611a19615e0e565b6020026020010151848381518110611a3357611a33615e0e565b602002602001015160800151858481518110611a5157611a51615e0e565b602002602001015160400151868581518110611a6f57611a6f615e0e565b602002602001015160c00151878681518110611a8d57611a8d615e0e565b602002602001015160000151878781518110611aab57611aab615e0e565b6020026020010151613da0565b838181518110611aca57611aca615e0e565b60200260200101517f74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71848381518110611b0557611b05615e0e565b60209081029190910181015151604080516bffffffffffffffffffffffff909216825233928201929092520160405180910390a280611b4381615d24565b9150506119fb565b505050505050565b6000806000611b606140f3565b915091506000611b718360006142ee565b9050611b7e858284614333565b95945050505050565b6000828152600760205260409020600101548290640100000000900467ffffffffffffffff90811614611be6576040517fd096219c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083815260076020526040902054611c0e9083906bffffffffffffffffffffffff16615bb8565b600084815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff928316179055601154611c6291841690615ba0565b6011556040517f23b872dd0000000000000000000000000000000000000000000000000000000081523360048201523060248201526bffffffffffffffffffffffff831660448201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906323b872dd90606401602060405180830381600087803b158015611d0657600080fd5b505af1158015611d1a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d3e919061507d565b506040516bffffffffffffffffffffffff83168152339084907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a3505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614611df8576040517fc8bad78d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114611e32576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000611e408284018461525c565b600081815260076020526040902060010154909150640100000000900467ffffffffffffffff90811614611ea0576040517fd096219c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081815260076020526040902054611ec89085906bffffffffffffffffffffffff16615bb8565b600082815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff92909216919091179055601154611f1f908590615ba0565b6011556040516bffffffffffffffffffffffff8516815273ffffffffffffffffffffffffffffffffffffffff86169082907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a35050505050565b8073ffffffffffffffffffffffffffffffffffffffff8116611fd2576040517f9c8d2cd200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff16928101929092526001015460ff16151591810191909152903314612083576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff808516600090815260086020908152604090912080549092169091558101516011546120d2916bffffffffffffffffffffffff1690615c60565b60115560208082015160405133815273ffffffffffffffffffffffffffffffffffffffff808716936bffffffffffffffffffffffff90931692908816917f9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698910160405180910390a460208101516040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85811660048301526bffffffffffffffffffffffff90921660248201527f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b1580156121df57600080fd5b505af11580156121f3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612217919061507d565b5050505050565b6000828152600760205260409020600101548290640100000000900467ffffffffffffffff9081161461227d576040517fd096219c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526007602052604090206002015483906c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633146122ef576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc8363ffffffff1610806123105750600d5463ffffffff908116908416115b15612347576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008481526007602090815260409182902060010180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff8716908117909155915191825285917fc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c910160405180910390a250505050565b73ffffffffffffffffffffffffffffffffffffffff818116600090815260096020526040902054163314612425576040517f6752e7aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff81811660008181526008602090815260408083208054337fffffffffffffffffffffffff000000000000000000000000000000000000000080831682179093556009909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b6000818152600760205260408120600101546107cc9063ffffffff16611b53565b6124e66136b3565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b15801561256e57600080fd5b505afa158015612582573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906125a69190615275565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb33601154846125f39190615c60565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526024820152604401611111565b6126516136b3565b82811415806126605750600283105b15612697576040517fcf54c06a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b600454811015612723576000600482815481106126b9576126b9615e0e565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff168252600890526040902060010180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055508061271b81615d24565b91505061269a565b5060005b8381101561295957600085858381811061274357612743615e0e565b90506020020160208101906127589190614d8c565b73ffffffffffffffffffffffffffffffffffffffff80821660009081526008602052604081208054939450929091169086868681811061279a5761279a615e0e565b90506020020160208101906127af9190614d8c565b905073ffffffffffffffffffffffffffffffffffffffff81161580612842575073ffffffffffffffffffffffffffffffffffffffff82161580159061282057508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b8015612842575073ffffffffffffffffffffffffffffffffffffffff81811614155b15612879576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600183015460ff16156128b8576040517f357d0cc400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600183810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016909117905573ffffffffffffffffffffffffffffffffffffffff818116146129425782547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff82161783555b50505050808061295190615d24565b915050612727565b5061296660048585614932565b507f056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f8484848460405161299c949392919061567b565b60405180910390a150505050565b60606000806000806129ba614410565b6000878152600760209081526040808320815160e08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811684880152600185015463ffffffff81168588015267ffffffffffffffff64010000000082041660608601528390048116608085015260029094015490811660a08401520490911660c08201528a8452600a90925280832090519192917f6e04ff0d0000000000000000000000000000000000000000000000000000000091612a9a91602401615889565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050509050600080836080015173ffffffffffffffffffffffffffffffffffffffff16600c600001600b9054906101000a900463ffffffff1663ffffffff1684604051612b4191906155d5565b60006040518083038160008787f1925050503d8060008114612b7f576040519150601f19603f3d011682016040523d82523d6000602084013e612b84565b606091505b509150915081612bc257806040517f96c36235000000000000000000000000000000000000000000000000000000008152600401610f16919061583f565b80806020019051810190612bd69190615098565b9950915081612c11576040517f865676e300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612c208b8d8c600061381e565b9050612c358582600001518360600151614448565b6060810151608082015160a083015160c0909301519b9e919d509b50909998509650505050505050565b6000818152600760209081526040808320815160e08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c01000000000000000000000000928390048116848801908152600186015463ffffffff811686890181905267ffffffffffffffff64010000000083041660608881019182529287900485166080890181905260029099015495861660a089019081529690950490931660c087019081528b8b52600a9099529689208551915198519351945181548b9a8b998a998a998a998a9992989397929692959394939092908690612d4e90615cd0565b80601f0160208091040260200160405190810160405280929190818152602001828054612d7a90615cd0565b8015612dc75780601f10612d9c57610100808354040283529160200191612dc7565b820191906000526020600020905b815481529060010190602001808311612daa57829003601f168201915b505050505095509850985098509850985098509850985050919395975091939597565b60008181526007602052604081206001015467ffffffffffffffff6401000000009091048116919082141590612e3560005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16149050818015612e855750808015612e835750438367ffffffffffffffff16115b155b15612ebc576040517ffbc0357800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80158015612f0157506000848152600760205260409020600201546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff163314155b15612f38576040517ffbdb8e5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b4381612f4c57612f49603282615ba0565b90505b600085815260076020526040902060010180547fffffffffffffffffffffffffffffffffffffffff0000000000000000ffffffff1664010000000067ffffffffffffffff841602179055612fa1600586613d94565b5060405167ffffffffffffffff82169086907f91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f79118190600090a35050505050565b6000805473ffffffffffffffffffffffffffffffffffffffff163314801590613021575060135473ffffffffffffffffffffffffffffffffffffffff163314155b15613058576040517fd48b678b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b613063600143615c60565b600d5460408051924060208401523060601b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001690830152640100000000900460e01b7fffffffff000000000000000000000000000000000000000000000000000000001660548201526058016040516020818303038152906040528051906020012060001c905061313081878787600088888080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250613da092505050565b600d8054640100000000900463ffffffff1690600461314e83615d5d565b91906101000a81548163ffffffff021916908363ffffffff16021790555050807fbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d01286866040516131c692919063ffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a295945050505050565b73ffffffffffffffffffffffffffffffffffffffff828116600090815260086020526040902054163314613237576040517fcebf515b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8116331415613287576040517f8c8728c700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600960205260409020548116908216146111635773ffffffffffffffffffffffffffffffffffffffff82811660008181526009602052604080822080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169486169485179055513392917f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836791a45050565b61333e6136b3565b600d5460e082015163ffffffff91821691161015613388576040517f39abc10400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604051806101200160405280826000015163ffffffff168152602001826020015163ffffffff168152602001826040015162ffffff168152602001826060015163ffffffff168152602001826080015162ffffff1681526020018260a0015161ffff1681526020018260c001516bffffffffffffffffffffffff1681526020018260e0015163ffffffff168152602001600c60010160049054906101000a900463ffffffff1663ffffffff16815250600c60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060e08201518160010160006101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160010160046101000a81548163ffffffff021916908363ffffffff160217905550905050806101000151600e81905550806101200151600f81905550806101400151601260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550806101600151601360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507ffe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de3258160405161367791906159bd565b60405180910390a150565b61368a6136b3565b61369381614562565b50565b60006107cc825490565b60006136ac8383614658565b9392505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610b0b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610f16565b61373c614682565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390a1565b60035460ff1615610b0b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a20706175736564000000000000000000000000000000006044820152606401610f16565b6138746040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b60008481526007602052604081206001015463ffffffff1690806138966140f3565b9150915060006138a683876142ee565b905060006138b5858385614333565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff8d168152602081018c90529081018a90526bffffffffffffffffffffffff909116606082015260808101959095525060a084015260c0830152509050949350505050565b60006139226146ee565b602082810151600081815260079092526040909120600101544364010000000090910467ffffffffffffffff1611613986576040517fd096219c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602080840151600090815260078252604090819020815160e08101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811696840196909652600184015463ffffffff81169584019590955267ffffffffffffffff640100000000860416606080850191909152948290048616608084015260029093015492831660a083015290910490921660c0830152845190850151613a4a918391614448565b60005a90506000634585e33b60e01b8660400151604051602401613a6e919061583f565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050509050613ae08660800151846080015183614760565b94505a613aed9083615c60565b91506000613b04838860a001518960c00151614333565b602080890151600090815260079091526040902054909150613b359082906bffffffffffffffffffffffff16615c77565b6020888101805160009081526007909252604080832080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff95861617905590518252902060020154613b9891839116615bb8565b60208881018051600090815260078352604080822060020180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9687161790558b5192518252808220805486166c0100000000000000000000000073ffffffffffffffffffffffffffffffffffffffff958616021790558b5190921681526008909252902054613c5191839174010000000000000000000000000000000000000000900416615bb8565b60086000896000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550866000015173ffffffffffffffffffffffffffffffffffffffff1686151588602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6848b60400151604051613d1d929190615a73565b60405180910390a45050505050613d346001600255565b919050565b613d416137b1565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586137873390565b60006136ac83836147ac565b613da86137b1565b73ffffffffffffffffffffffffffffffffffffffff85163b613df6576040517f09ee12d500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6108fc8463ffffffff161080613e175750600d5463ffffffff908116908516115b15613e4e576040517f14c237fb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040518060e00160405280836bffffffffffffffffffffffff168152602001600073ffffffffffffffffffffffffffffffffffffffff1681526020018563ffffffff16815260200167ffffffffffffffff801681526020018673ffffffffffffffffffffffffffffffffffffffff16815260200160006bffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff168152506007600088815260200190815260200160002060008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060408201518160010160006101000a81548163ffffffff021916908363ffffffff16021790555060608201518160010160046101000a81548167ffffffffffffffff021916908367ffffffffffffffff160217905550608082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060a08201518160020160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060c082015181600201600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050816bffffffffffffffffffffffff166011546140bc9190615ba0565b6011556000868152600a6020908152604090912082516140de928401906149ba565b506140ea60058761489f565b50505050505050565b6000806000600c600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561418a57600080fd5b505afa15801561419e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906141c29190615365565b5094509092508491505080156141e657506141dd8242615c60565b8463ffffffff16105b806141f2575060008113155b1561420157600e549550614205565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561426b57600080fd5b505afa15801561427f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906142a39190615365565b5094509092508491505080156142c757506142be8242615c60565b8463ffffffff16105b806142d3575060008113155b156142e257600f5494506142e6565b8094505b505050509091565b600c54600090614318907201000000000000000000000000000000000000900461ffff1684615c23565b90508180156143265750803a105b156107cc57503a92915050565b6000806143436201388086615ba0565b61434d9085615c23565b600c5490915060009061436a9063ffffffff16633b9aca00615ba0565b600c5490915060009061439090640100000000900463ffffffff1664e8d4a51000615c23565b85836143a086633b9aca00615c23565b6143aa9190615c23565b6143b49190615be8565b6143be9190615ba0565b90506b033b2e3c9fd0803ce8000000811115614406576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b9695505050505050565b3215610b0b576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff166144aa576040517fcfbacfd800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82516bffffffffffffffffffffffff168111156144f3576040517f356680b700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16836020015173ffffffffffffffffffffffffffffffffffffffff16141561455d576040517f06bc104000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050565b73ffffffffffffffffffffffffffffffffffffffff81163314156145e2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610f16565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600082600001828154811061466f5761466f615e0e565b9060005260206000200154905092915050565b60035460ff16610b0b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f74207061757365640000000000000000000000006044820152606401610f16565b60028054141561475a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c006044820152606401610f16565b60028055565b60005a61138881101561477257600080fd5b61138881039050846040820482031161478a57600080fd5b50823b61479657600080fd5b60008083516020850160008789f1949350505050565b600081815260018301602052604081205480156148955760006147d0600183615c60565b85549091506000906147e490600190615c60565b905081811461484957600086600001828154811061480457614804615e0e565b906000526020600020015490508087600001848154811061482757614827615e0e565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061485a5761485a615ddf565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506107cc565b60009150506107cc565b60008181526001830160205260408120546136ac908490849084906148f0575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556107cc565b5060006107cc565b50805461490490615cd0565b6000825580601f10614914575050565b601f0160209004906000526020600020908101906136939190614a2e565b8280548282559060005260206000209081019282156149aa579160200282015b828111156149aa5781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614952565b506149b6929150614a2e565b5090565b8280546149c690615cd0565b90600052602060002090601f0160209004810192826149e857600085556149aa565b82601f10614a0157805160ff19168380011785556149aa565b828001600101855582156149aa579182015b828111156149aa578251825591602001919060010190614a13565b5b808211156149b65760008155600101614a2f565b803573ffffffffffffffffffffffffffffffffffffffff81168114613d3457600080fd5b60008083601f840112614a7957600080fd5b50813567ffffffffffffffff811115614a9157600080fd5b6020830191508360208260051b8501011115614aac57600080fd5b9250929050565b600082601f830112614ac457600080fd5b81356020614ad9614ad483615b36565b615ae7565b80838252828201915082860187848660051b8901011115614af957600080fd5b60005b85811015614b7957813567ffffffffffffffff811115614b1b57600080fd5b8801603f81018a13614b2c57600080fd5b858101356040614b3e614ad483615b5a565b8281528c82848601011115614b5257600080fd5b828285018a8301376000928101890192909252508552509284019290840190600101614afc565b5090979650505050505050565b600082601f830112614b9757600080fd5b81356020614ba7614ad483615b36565b8281528181019085830160e080860288018501891015614bc657600080fd5b60005b86811015614c785781838b031215614be057600080fd5b614be8615a9a565b614bf184614d70565b8152614bfe878501614a43565b878201526040614c0f818601614d42565b9082015260608481013567ffffffffffffffff81168114614c2f57600080fd5b908201526080614c40858201614a43565b9082015260a0614c51858201614d70565b9082015260c0614c62858201614a43565b9082015285529385019391810191600101614bc9565b509198975050505050505050565b80518015158114613d3457600080fd5b60008083601f840112614ca857600080fd5b50813567ffffffffffffffff811115614cc057600080fd5b602083019150836020828501011115614aac57600080fd5b600082601f830112614ce957600080fd5b8151614cf7614ad482615b5a565b818152846020838601011115614d0c57600080fd5b610ff1826020830160208701615ca4565b803561ffff81168114613d3457600080fd5b803562ffffff81168114613d3457600080fd5b803563ffffffff81168114613d3457600080fd5b805169ffffffffffffffffffff81168114613d3457600080fd5b80356bffffffffffffffffffffffff81168114613d3457600080fd5b600060208284031215614d9e57600080fd5b6136ac82614a43565b60008060408385031215614dba57600080fd5b614dc383614a43565b9150614dd160208401614a43565b90509250929050565b60008060408385031215614ded57600080fd5b614df683614a43565b9150602083013560048110614e0a57600080fd5b809150509250929050565b60008060008060608587031215614e2b57600080fd5b614e3485614a43565b935060208501359250604085013567ffffffffffffffff811115614e5757600080fd5b614e6387828801614c96565b95989497509550505050565b600080600080600060808688031215614e8757600080fd5b614e9086614a43565b9450614e9e60208701614d42565b9350614eac60408701614a43565b9250606086013567ffffffffffffffff811115614ec857600080fd5b614ed488828901614c96565b969995985093965092949392505050565b60008060008060408587031215614efb57600080fd5b843567ffffffffffffffff80821115614f1357600080fd5b614f1f88838901614a67565b90965094506020870135915080821115614f3857600080fd5b50614e6387828801614a67565b600080600060408486031215614f5a57600080fd5b833567ffffffffffffffff811115614f7157600080fd5b614f7d86828701614a67565b9094509250614f90905060208501614a43565b90509250925092565b600080600060608486031215614fae57600080fd5b833567ffffffffffffffff80821115614fc657600080fd5b818601915086601f830112614fda57600080fd5b81356020614fea614ad483615b36565b8083825282820191508286018b848660051b890101111561500a57600080fd5b600096505b8487101561502d57803583526001969096019591830191830161500f565b509750508701359250508082111561504457600080fd5b61505087838801614b86565b9350604086013591508082111561506657600080fd5b5061507386828701614ab3565b9150509250925092565b60006020828403121561508f57600080fd5b6136ac82614c86565b600080604083850312156150ab57600080fd5b6150b483614c86565b9150602083015167ffffffffffffffff8111156150d057600080fd5b6150dc85828601614cd8565b9150509250929050565b600080602083850312156150f957600080fd5b823567ffffffffffffffff81111561511057600080fd5b61511c85828601614c96565b90969095509350505050565b60006020828403121561513a57600080fd5b815167ffffffffffffffff81111561515157600080fd5b610ff184828501614cd8565b60006020828403121561516f57600080fd5b8151600381106136ac57600080fd5b6000610180828403121561519157600080fd5b615199615ac3565b6151a283614d42565b81526151b060208401614d42565b60208201526151c160408401614d2f565b60408201526151d260608401614d42565b60608201526151e360808401614d2f565b60808201526151f460a08401614d1d565b60a082015261520560c08401614d70565b60c082015261521660e08401614d42565b60e08201526101008381013590820152610120808401359082015261014061523f818501614a43565b90820152610160615251848201614a43565b908201529392505050565b60006020828403121561526e57600080fd5b5035919050565b60006020828403121561528757600080fd5b5051919050565b600080604083850312156152a157600080fd5b82359150614dd160208401614a43565b6000806000604084860312156152c657600080fd5b83359250602084013567ffffffffffffffff8111156152e457600080fd5b6152f086828701614c96565b9497909650939450505050565b6000806040838503121561531057600080fd5b50508035926020909101359150565b6000806040838503121561533257600080fd5b82359150614dd160208401614d42565b6000806040838503121561535557600080fd5b82359150614dd160208401614d70565b600080600080600060a0868803121561537d57600080fd5b61538686614d56565b94506020860151935060408601519250606086015191506153a960808701614d56565b90509295509295909350565b8183526000602080850194508260005b858110156153fe5773ffffffffffffffffffffffffffffffffffffffff6153eb83614a43565b16875295820195908201906001016153c5565b509495945050505050565b600081518084526020808501808196508360051b8101915082860160005b8581101561545157828403895261543f84835161545e565b98850198935090840190600101615427565b5091979650505050505050565b60008151808452615476816020860160208601615ca4565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600381106154b8576154b8615db0565b9052565b805163ffffffff16825260208101516154dd602084018263ffffffff169052565b5060408101516154f4604084018262ffffff169052565b50606081015161550c606084018263ffffffff169052565b506080810151615523608084018262ffffff169052565b5060a081015161553960a084018261ffff169052565b5060c081015161555960c08401826bffffffffffffffffffffffff169052565b5060e081015161557160e084018263ffffffff169052565b50610100818101519083015261012080820151908301526101408082015173ffffffffffffffffffffffffffffffffffffffff81168285015250506101608181015173ffffffffffffffffffffffffffffffffffffffff8116848301525b50505050565b600082516155e7818460208701615ca4565b9190910192915050565b600061010073ffffffffffffffffffffffffffffffffffffffff808c16845263ffffffff8b16602085015281604085015261562e8285018b61545e565b6bffffffffffffffffffffffff998a16606086015297811660808501529590951660a08301525067ffffffffffffffff9290921660c083015290931660e090930192909252949350505050565b60408152600061568f6040830186886153b5565b82810360208401526156a28185876153b5565b979650505050505050565b60006060808352858184015260807f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8711156156e857600080fd5b8660051b808983870137808501905081810160008152602083878403018188015281895180845260a093508385019150828b01945060005b818110156157d757855180516bffffffffffffffffffffffff1684528481015173ffffffffffffffffffffffffffffffffffffffff9081168686015260408083015163ffffffff16908601528982015167ffffffffffffffff168a860152888201511688850152858101516157a4878601826bffffffffffffffffffffffff169052565b5060c09081015173ffffffffffffffffffffffffffffffffffffffff16908401529483019460e090920191600101615720565b505087810360408901526157eb818a615409565b9c9b505050505050505050505050565b6020808252825182820181905260009190848201906040850190845b8181101561583357835183529284019291840191600101615817565b50909695505050505050565b6020815260006136ac602083018461545e565b60a08152600061586560a083018861545e565b90508560208301528460408301528360608301528260808301529695505050505050565b600060208083526000845481600182811c9150808316806158ab57607f831692505b8583108114156158e2577f4e487b710000000000000000000000000000000000000000000000000000000085526022600452602485fd5b8786018381526020018180156158ff576001811461592e57615959565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00861682528782019650615959565b60008b81526020902060005b868110156159535781548482015290850190890161593a565b83019750505b50949998505050505050505050565b602081016004831061597c5761597c615db0565b91905290565b602081016107cc82846154a8565b61599a81856154a8565b6159a760208201846154a8565b606060408201526000611b7e606083018461545e565b61018081016107cc82846154bc565b600061022080830163ffffffff875116845260206bffffffffffffffffffffffff8189015116818601526040880151604086015260608801516060860152615a1760808601886154bc565b6102008501929092528451908190526102408401918086019160005b81811015615a6557835173ffffffffffffffffffffffffffffffffffffffff1685529382019392820192600101615a33565b509298975050505050505050565b6bffffffffffffffffffffffff83168152604060208201526000610ff1604083018461545e565b60405160e0810167ffffffffffffffff81118282101715615abd57615abd615e3d565b60405290565b604051610180810167ffffffffffffffff81118282101715615abd57615abd615e3d565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715615b2e57615b2e615e3d565b604052919050565b600067ffffffffffffffff821115615b5057615b50615e3d565b5060051b60200190565b600067ffffffffffffffff821115615b7457615b74615e3d565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b60008219821115615bb357615bb3615d81565b500190565b60006bffffffffffffffffffffffff808316818516808303821115615bdf57615bdf615d81565b01949350505050565b600082615c1e577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615615c5b57615c5b615d81565b500290565b600082821015615c7257615c72615d81565b500390565b60006bffffffffffffffffffffffff83811690831681811015615c9c57615c9c615d81565b039392505050565b60005b83811015615cbf578181015183820152602001615ca7565b838111156155cf5750506000910152565b600181811c90821680615ce457607f821691505b60208210811415615d1e577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615d5657615d56615d81565b5060010190565b600063ffffffff80831681811415615d7757615d77615d81565b6001019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var KeeperRegistryABI = KeeperRegistryMetaData.ABI + +var KeeperRegistryBin = KeeperRegistryMetaData.Bin + +func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkEthFeed common.Address, fastGasFeed common.Address, config Config) (common.Address, *types.Transaction, *KeeperRegistry, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryBin), backend, link, linkEthFeed, fastGasFeed, config) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistry{address: address, abi: *parsed, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +type KeeperRegistry struct { + address common.Address + abi abi.ABI + KeeperRegistryCaller + KeeperRegistryTransactor + KeeperRegistryFilterer +} + +type KeeperRegistryCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrySession struct { + Contract *KeeperRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCallerSession struct { + Contract *KeeperRegistryCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryTransactorSession struct { + Contract *KeeperRegistryTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryRaw struct { + Contract *KeeperRegistry +} + +type KeeperRegistryCallerRaw struct { + Contract *KeeperRegistryCaller +} + +type KeeperRegistryTransactorRaw struct { + Contract *KeeperRegistryTransactor +} + +func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { + contract, err := bindKeeperRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCaller{contract: contract}, nil +} + +func NewKeeperRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryTransactor, error) { + contract, err := bindKeeperRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryTransactor{contract: contract}, nil +} + +func NewKeeperRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryFilterer, error) { + contract, err := bindKeeperRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryFilterer{contract: contract}, nil +} + +func bindKeeperRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistry *KeeperRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.KeeperRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCaller) FASTGASFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "FAST_GAS_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getKeeperInfo", query) + + outstruct := new(GetKeeperInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Payee = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Active = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMaxPaymentForGas", gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(State)).(*State) + outstruct.Config = *abi.ConvertType(out[1], new(Config)).(*Config) + outstruct.Keepers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getUpkeep", id) + + outstruct := new(GetUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.ExecuteGas = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.CheckData = *abi.ConvertType(out[2], new([]byte)).(*[]byte) + outstruct.Balance = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LastKeeper = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + outstruct.Admin = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.MaxValidBlocknumber = *abi.ConvertType(out[6], new(uint64)).(*uint64) + outstruct.AmountSpent = *abi.ConvertType(out[7], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptPayeeship", keeper) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistry *KeeperRegistrySession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "checkUpkeep", id, from) +} + +func (_KeeperRegistry *KeeperRegistrySession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistry *KeeperRegistrySession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "performUpkeep", id, performData) +} + +func (_KeeperRegistry *KeeperRegistrySession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistrySession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistrySession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfig(opts *bind.TransactOpts, config Config) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfig", config) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfig(config Config) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfig(config Config) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setKeepers", keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferPayeeship", keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +type KeeperRegistryConfigSetIterator struct { + Event *KeeperRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryConfigSet struct { + Config Config + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryConfigSetIterator{contract: _KeeperRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) { + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsAddedIterator struct { + Event *KeeperRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsAddedIterator{contract: _KeeperRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) { + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsWithdrawnIterator struct { + Event *KeeperRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) { + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryKeepersUpdatedIterator struct { + Event *KeeperRegistryKeepersUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryKeepersUpdated struct { + Keepers []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryKeepersUpdatedIterator{contract: _KeeperRegistry.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) { + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryOwnerFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) { + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferredIterator struct { + Event *KeeperRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferredIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) { + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPausedIterator struct { + Event *KeeperRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryPausedIterator{contract: _KeeperRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaused(log types.Log) (*KeeperRegistryPaused, error) { + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferRequested struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) { + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferredIterator struct { + Event *KeeperRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferred struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferredIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) { + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPaymentWithdrawnIterator struct { + Event *KeeperRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaymentWithdrawn struct { + Keeper common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPaymentWithdrawnIterator{contract: _KeeperRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) { + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUnpausedIterator struct { + Event *KeeperRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryUnpausedIterator{contract: _KeeperRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) { + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCanceledIterator struct { + Event *KeeperRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCanceledIterator{contract: _KeeperRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) { + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepGasLimitSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) { + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepMigratedIterator struct { + Event *KeeperRegistryUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepMigratedIterator{contract: _KeeperRegistry.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) { + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPerformedIterator struct { + Event *KeeperRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + From common.Address + Payment *big.Int + PerformData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPerformedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) { + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepReceivedIterator struct { + Event *KeeperRegistryUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepReceivedIterator{contract: _KeeperRegistry.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) { + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepRegisteredIterator struct { + Event *KeeperRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepRegisteredIterator{contract: _KeeperRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) { + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetKeeperInfo struct { + Payee common.Address + Active bool + Balance *big.Int +} +type GetState struct { + State State + Config Config + Keepers []common.Address +} +type GetUpkeep struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + LastKeeper common.Address + Admin common.Address + MaxValidBlocknumber uint64 + AmountSpent *big.Int +} + +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistry.abi.Events["ConfigSet"].ID: + return _KeeperRegistry.ParseConfigSet(log) + case _KeeperRegistry.abi.Events["FundsAdded"].ID: + return _KeeperRegistry.ParseFundsAdded(log) + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistry.ParseFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["KeepersUpdated"].ID: + return _KeeperRegistry.ParseKeepersUpdated(log) + case _KeeperRegistry.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistry.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistry.ParseOwnershipTransferRequested(log) + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistry.ParseOwnershipTransferred(log) + case _KeeperRegistry.abi.Events["Paused"].ID: + return _KeeperRegistry.ParsePaused(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistry.ParsePayeeshipTransferRequested(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistry.ParsePayeeshipTransferred(log) + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistry.ParsePaymentWithdrawn(log) + case _KeeperRegistry.abi.Events["Unpaused"].ID: + return _KeeperRegistry.ParseUnpaused(log) + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistry.ParseUpkeepCanceled(log) + case _KeeperRegistry.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistry.ParseUpkeepGasLimitSet(log) + case _KeeperRegistry.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistry.ParseUpkeepMigrated(log) + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistry.ParseUpkeepPerformed(log) + case _KeeperRegistry.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistry.ParseUpkeepReceived(log) + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistry.ParseUpkeepRegistered(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0xfe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de325") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (_KeeperRegistry *KeeperRegistry) Address() common.Address { + return _KeeperRegistry.address +} + +type KeeperRegistryInterface interface { + FASTGASFEED(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) + + GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, config Config) (*types.Transaction, error) + + SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) + + FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) + + WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) + + ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryPaused, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper1_3/keeper_registry_wrapper1_3.go b/core/gethwrappers/generated/keeper_registry_wrapper1_3/keeper_registry_wrapper1_3.go new file mode 100644 index 00000000..c694ef6e --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper1_3/keeper_registry_wrapper1_3.go @@ -0,0 +1,4441 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper1_3 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Config struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + BlockCountPerTurn *big.Int + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrar common.Address +} + +type State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + NumUpkeeps *big.Int +} + +var KeeperRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractKeeperRegistryLogic1_3\",\"name\":\"keeperRegistryLogic\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"KeepersMustTakeTurns\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveKeepers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"indexed\":false,\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"KeepersUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"ARB_NITRO_ORACLE\",\"outputs\":[{\"internalType\":\"contractArbGasInfo\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"FAST_GAS_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"KEEPER_REGISTRY_LOGIC\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OPTIMISM_ORACLE\",\"outputs\":[{\"internalType\":\"contractOVM_GasPriceOracle\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PAYMENT_MODEL\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase1_3.PaymentModel\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"REGISTRY_GAS_OVERHEAD\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"maxLinkPayment\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"adjustedGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkEth\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getKeeperInfo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase1_3.MigrationPermission\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"}],\"internalType\":\"structState\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"lastKeeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"blockCountPerTurn\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"keepers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setKeepers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase1_3.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"keeper\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"updateCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x6101806040527f420000000000000000000000000000000000000f00000000000000000000000060e0526c6c000000000000000000000000610100523480156200004857600080fd5b50604051620048cd380380620048cd8339810160408190526200006b91620008a7565b816001600160a01b0316638811cbe86040518163ffffffff1660e01b815260040160206040518083038186803b158015620000a557600080fd5b505afa158015620000ba573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620000e09190620009ce565b826001600160a01b0316635077b2106040518163ffffffff1660e01b815260040160206040518083038186803b1580156200011a57600080fd5b505afa1580156200012f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001559190620009f1565b836001600160a01b0316631b6b6d236040518163ffffffff1660e01b815260040160206040518083038186803b1580156200018f57600080fd5b505afa158015620001a4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001ca919062000880565b846001600160a01b031663ad1783616040518163ffffffff1660e01b815260040160206040518083038186803b1580156200020457600080fd5b505afa15801562000219573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200023f919062000880565b856001600160a01b0316634584a4196040518163ffffffff1660e01b815260040160206040518083038186803b1580156200027957600080fd5b505afa1580156200028e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620002b4919062000880565b33806000816200030b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200033e576200033e816200041b565b5050600160029081556003805460ff1916905586915081111562000366576200036662000b42565b6101208160028111156200037e576200037e62000b42565b60f81b9052506101408490526001600160a01b0383161580620003a857506001600160a01b038216155b80620003bb57506001600160a01b038116155b15620003da57604051637138356f60e01b815260040160405180910390fd5b6001600160601b0319606093841b811660805291831b821660a052821b811660c0529085901b16610160525062000413905081620004c7565b505062000b71565b6001600160a01b038116331415620004765760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000302565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b620004d1620007bc565b600e5460e082015163ffffffff918216911610156200050357604051630e6af04160e21b815260040160405180910390fd5b604051806101200160405280826000015163ffffffff168152602001826020015163ffffffff168152602001826040015162ffffff168152602001826060015163ffffffff168152602001826080015162ffffff1681526020018260a0015161ffff1681526020018260c001516001600160601b031681526020018260e0015163ffffffff168152602001600d60010160049054906101000a900463ffffffff1663ffffffff16815250600d60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a8154816001600160601b0302191690836001600160601b0316021790555060e08201518160010160006101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160010160046101000a81548163ffffffff021916908363ffffffff160217905550905050806101000151600f81905550806101200151601081905550806101400151601360006101000a8154816001600160a01b0302191690836001600160a01b03160217905550806101600151601460006101000a8154816001600160a01b0302191690836001600160a01b031602179055507ffe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de32581604051620007b1919062000a0b565b60405180910390a150565b6000546001600160a01b03163314620008185760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000302565b565b8051620008278162000b58565b919050565b805161ffff811681146200082757600080fd5b805162ffffff811681146200082757600080fd5b805163ffffffff811681146200082757600080fd5b80516001600160601b03811681146200082757600080fd5b6000602082840312156200089357600080fd5b8151620008a08162000b58565b9392505050565b6000808284036101a0811215620008bd57600080fd5b8351620008ca8162000b58565b9250610180601f198201811315620008e157600080fd5b620008eb62000b0a565b9150620008fb6020860162000853565b82526200090b6040860162000853565b60208301526200091e606086016200083f565b6040830152620009316080860162000853565b60608301526200094460a086016200083f565b60808301526200095760c086016200082c565b60a08301526200096a60e0860162000868565b60c08301526101006200097f81870162000853565b60e084015261012080870151828501526101409150818701518185015250610160620009ad8188016200081a565b82850152620009be8388016200081a565b9084015250929590945092505050565b600060208284031215620009e157600080fd5b815160038110620008a057600080fd5b60006020828403121562000a0457600080fd5b5051919050565b815163ffffffff1681526101808101602083015162000a32602084018263ffffffff169052565b50604083015162000a4a604084018262ffffff169052565b50606083015162000a63606084018263ffffffff169052565b50608083015162000a7b608084018262ffffff169052565b5060a083015162000a9260a084018261ffff169052565b5060c083015162000aae60c08401826001600160601b03169052565b5060e083015162000ac760e084018263ffffffff169052565b5061010083810151908301526101208084015190830152610140808401516001600160a01b03908116918401919091526101609384015116929091019190915290565b60405161018081016001600160401b038111828210171562000b3c57634e487b7160e01b600052604160045260246000fd5b60405290565b634e487b7160e01b600052602160045260246000fd5b6001600160a01b038116811462000b6e57600080fd5b50565b60805160601c60a05160601c60c05160601c60e05160601c6101005160601c6101205160f81c610140516101605160601c613ca262000c2b600039600081816103600152610a4c0152600081816105e601526125460152600081816107490152818161259901526127150152600081816106a1015261274d0152600081816106d50152612684015260008181610590015261227a015260008181610891015261235b01526000818161046e015261144e0152613ca26000f3fe6080604052600436106103015760003560e01c80638811cbe81161018f578063b148ab6b116100e1578063c80480221161008a578063ef47a0ce11610064578063ef47a0ce146109b4578063f2fde38b146109d4578063faa3e996146109f457610310565b8063c8048022146108d3578063da5c674114610994578063eb5dcd6c1461084957610310565b8063b7fdb436116100bb578063b7fdb4361461090e578063c41b813a1461092e578063c7c3a19a1461095f57610310565b8063b148ab6b146108d3578063b657bc9c146108ee578063b79550be1461056957610310565b80639fab438611610143578063a72aa27e1161011d578063a72aa27e14610864578063ad1783611461087f578063b121e147146108b357610310565b80639fab438614610809578063a4c0ed3614610829578063a710b2211461084957610310565b80638e86139b116101745780638e86139b1461079657806393f0c1fc146107b1578063948108f7146107ee57610310565b80638811cbe8146107375780638da5cb5b1461076b57610310565b80635077b210116102535780637d9b97e0116101fc578063850cce34116101d6578063850cce34146106c357806385c1b0ba146106f75780638765ecbe1461071757610310565b80637d9b97e0146105695780637f37618e1461068f5780638456cb591461056957610310565b8063744bfe611161022d578063744bfe611461044157806379ba50971461065a5780637bbaf1ea1461066f57610310565b80635077b210146105d45780635165f2f5146106165780635c975abb1461063657610310565b80631a2af011116102b55780633f4ba83a1161028f5780633f4ba83a146105695780634584a4191461057e57806348013d7b146105b257610310565b80631a2af011146104415780631b6b6d231461045c5780631e12b8a51461049057610310565b8063181f5a77116102e6578063181f5a77146103a75780631865c57d146103fd578063187256e81461042157610310565b806306e3b632146103185780630852c7c91461034e57610310565b366103105761030e610a47565b005b61030e610a47565b34801561032457600080fd5b50610338610333366004613383565b610a72565b6040516103459190613655565b60405180910390f35b34801561035a57600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610345565b3480156103b357600080fd5b506103f06040518060400160405280601481526020017f4b6565706572526567697374727920312e332e3000000000000000000000000081525081565b60405161034591906136e6565b34801561040957600080fd5b50610412610b6e565b60405161034593929190613766565b34801561042d57600080fd5b5061030e61043c366004613003565b610e26565b34801561044d57600080fd5b5061030e61043c366004613314565b34801561046857600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b34801561049c57600080fd5b506105296104ab366004612fb5565b73ffffffffffffffffffffffffffffffffffffffff90811660009081526008602090815260409182902082516060810184528154948516808252740100000000000000000000000000000000000000009095046bffffffffffffffffffffffff1692810183905260019091015460ff16151592018290529192909190565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845291151560208401526bffffffffffffffffffffffff1690820152606001610345565b34801561057557600080fd5b5061030e610e32565b34801561058a57600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b3480156105be57600080fd5b506105c7600181565b604051610345919061374a565b3480156105e057600080fd5b506106087f000000000000000000000000000000000000000000000000000000000000000081565b604051908152602001610345565b34801561062257600080fd5b5061030e6106313660046132e2565b610e3a565b34801561064257600080fd5b5060035460ff165b6040519015158152602001610345565b34801561066657600080fd5b5061030e610fc6565b34801561067b57600080fd5b5061064a61068a366004613337565b6110c8565b34801561069b57600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b3480156106cf57600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b34801561070357600080fd5b5061030e61071236600461316e565b611126565b34801561072357600080fd5b5061030e6107323660046132e2565b611133565b34801561074357600080fd5b506105c77f000000000000000000000000000000000000000000000000000000000000000081565b34801561077757600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610382565b3480156107a257600080fd5b5061030e61043c3660046131c2565b3480156107bd57600080fd5b506107d16107cc3660046132e2565b6112df565b6040516bffffffffffffffffffffffff9091168152602001610345565b3480156107fa57600080fd5b5061030e61043c3660046133c8565b34801561081557600080fd5b5061030e610824366004613337565b6112fd565b34801561083557600080fd5b5061030e61084436600461303e565b611436565b34801561085557600080fd5b5061030e61043c366004612fd0565b34801561087057600080fd5b5061030e61043c3660046133a5565b34801561088b57600080fd5b506103827f000000000000000000000000000000000000000000000000000000000000000081565b3480156108bf57600080fd5b5061030e6108ce366004612fb5565b61162d565b3480156108df57600080fd5b5061030e6108ce3660046132e2565b3480156108fa57600080fd5b506107d16109093660046132e2565b611638565b34801561091a57600080fd5b5061030e61092936600461310e565b611659565b34801561093a57600080fd5b5061094e610949366004613314565b611667565b6040516103459594939291906136f9565b34801561096b57600080fd5b5061097f61097a3660046132e2565b611689565b604051610345999897969594939291906135c3565b3480156109a057600080fd5b506106086109af366004613098565b611859565b3480156109c057600080fd5b5061030e6109cf366004613204565b61186c565b3480156109e057600080fd5b5061030e6109ef366004612fb5565b611bb8565b348015610a0057600080fd5b50610a3a610a0f366004612fb5565b73ffffffffffffffffffffffffffffffffffffffff166000908152600c602052604090205460ff1690565b6040516103459190613730565b610a707f0000000000000000000000000000000000000000000000000000000000000000611bc9565b565b60606000610a806005611bed565b9050808410610abb576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82610acd57610aca8482613945565b92505b60008367ffffffffffffffff811115610ae857610ae8613afe565b604051908082528060200260200182016040528015610b11578160200160208202803683370190505b50905060005b84811015610b6357610b34610b2c8288613885565b600590611bf7565b828281518110610b4657610b46613acf565b602090810291909101015280610b5b81613a09565b915050610b17565b509150505b92915050565b6040805160808101825260008082526020820181905291810182905260608101919091526040805161018081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810182905261014081018290526101608101919091526040805161012081018252600d5463ffffffff8082168352640100000000808304821660208086019190915262ffffff6801000000000000000085048116868801526b010000000000000000000000850484166060878101919091526f010000000000000000000000000000008604909116608087015261ffff720100000000000000000000000000000000000086041660a08701526bffffffffffffffffffffffff74010000000000000000000000000000000000000000909504851660c0870152600e5480851660e088015292909204909216610100850181905287526011549092169086015260125492850192909252610cf06005611bed565b606080860191909152815163ffffffff908116855260208084015182168187015260408085015162ffffff90811682890152858501518416948801949094526080808601519094169387019390935260a08085015161ffff169087015260c0808501516bffffffffffffffffffffffff169087015260e08085015190921691860191909152600f5461010086015260105461012086015260135473ffffffffffffffffffffffffffffffffffffffff90811661014087015260145416610160860152600480548351818402810184019094528084528793879390918391830182828015610e1357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610de8575b5050505050905093509350935050909192565b610e2e610a47565b5050565b610a70610a47565b60008181526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff808216608085015264010000000082041660a084015268010000000000000000810490911660c083015260ff7c010000000000000000000000000000000000000000000000000000000090910416151560e0820152610f1981611c0a565b8060e00151610f54576040517f1b88a78400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260076020526040902060020180547fffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff169055610f96600583611cb7565b5060405182907f7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a4745690600090a25050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461104c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60006110d2611cc3565b61111e611119338686868080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525060019250611d30915050565b611e1a565b949350505050565b61112e610a47565b505050565b60008181526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff808216608085015264010000000082041660a084015268010000000000000000810490911660c083015260ff7c010000000000000000000000000000000000000000000000000000000090910416151560e082015261121281611c0a565b8060e001511561124e576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260076020526040902060020180547fffffff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c01000000000000000000000000000000000000000000000000000000001790556112af60058361223b565b5060405182907f8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f90600090a25050565b60008060006112ec612247565b9150915061111e8483836000612442565b60008381526007602090815260409182902082516101008101845281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811695840195909552600184015490811695830195909552909304821660608401526002015463ffffffff808216608085015264010000000082041660a084015268010000000000000000810490911660c083015260ff7c010000000000000000000000000000000000000000000000000000000090910416151560e08201526113dc81611c0a565b6000848152600b602052604090206113f5908484612ddd565b50837f7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf8484604051611428929190613699565b60405180910390a250505050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146114a5576040517fc8bad78d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602081146114df576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006114ed828401846132e2565b600081815260076020526040902060020154909150640100000000900463ffffffff90811614611549576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600760205260409020546115719085906bffffffffffffffffffffffff1661389d565b600082815260076020526040902080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff929092169190911790556012546115c8908590613885565b6012556040516bffffffffffffffffffffffff8516815273ffffffffffffffffffffffffffffffffffffffff86169082907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a35050505050565b611635610a47565b50565b600081815260076020526040812060020154610b689063ffffffff166112df565b611661610a47565b50505050565b60606000806000806116776128b3565b61167f610a47565b9295509295909350565b600081815260076020908152604080832081516101008101835281546bffffffffffffffffffffffff80821683526c010000000000000000000000009182900473ffffffffffffffffffffffffffffffffffffffff9081168488019081526001860154928316858801908152939092048116606080860191825260029096015463ffffffff80821660808801819052640100000000830490911660a0880190815268010000000000000000830490941660c088018190527c010000000000000000000000000000000000000000000000000000000090920460ff16151560e088019081528c8c52600b909a52978a2086519451925193519551995181548c9b999a8c9a8b9a8b9a8b9a8b9a8b9a93999895979596919593949093909187906117b0906139b5565b80601f01602080910402602001604051908101604052809291908181526020018280546117dc906139b5565b80156118295780601f106117fe57610100808354040283529160200191611829565b820191906000526020600020905b81548152906001019060200180831161180c57829003601f168201915b505050505096508263ffffffff169250995099509950995099509950995099509950509193959799909294969850565b6000611863610a47565b95945050505050565b6118746128eb565b600e5460e082015163ffffffff918216911610156118be576040517f39abc10400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604051806101200160405280826000015163ffffffff168152602001826020015163ffffffff168152602001826040015162ffffff168152602001826060015163ffffffff168152602001826080015162ffffff1681526020018260a0015161ffff1681526020018260c001516bffffffffffffffffffffffff1681526020018260e0015163ffffffff168152602001600d60010160049054906101000a900463ffffffff1663ffffffff16815250600d60008201518160000160006101000a81548163ffffffff021916908363ffffffff16021790555060208201518160000160046101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160086101000a81548162ffffff021916908362ffffff160217905550606082015181600001600b6101000a81548163ffffffff021916908363ffffffff160217905550608082015181600001600f6101000a81548162ffffff021916908362ffffff16021790555060a08201518160000160126101000a81548161ffff021916908361ffff16021790555060c08201518160000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060e08201518160010160006101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160010160046101000a81548163ffffffff021916908363ffffffff160217905550905050806101000151600f81905550806101200151601081905550806101400151601360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550806101600151601460006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507ffe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de32581604051611bad9190613757565b60405180910390a150565b611bc06128eb565b6116358161296c565b3660008037600080366000845af43d6000803e808015611be8573d6000f35b3d6000fd5b6000610b68825490565b6000611c038383612a62565b9392505050565b806060015173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614611c73576040517fa47c170600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a081015163ffffffff90811614611635576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000611c038383612a8c565b60035460ff1615610a70576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a20706175736564000000000000000000000000000000006044820152606401611043565b611d866040518060e00160405280600073ffffffffffffffffffffffffffffffffffffffff1681526020016000815260200160608152602001600081526020016000815260200160008152602001600081525090565b60008481526007602052604081206002015463ffffffff169080611da8612247565b915091506000611dba84848489612442565b6040805160e08101825273ffffffffffffffffffffffffffffffffffffffff909b168b5260208b0199909952978901969096526bffffffffffffffffffffffff9096166060880152608087019190915260a086015250505060c082015290565b6000611e24612adb565b60208083015160009081526007825260409081902081516101008101835281546bffffffffffffffffffffffff808216835273ffffffffffffffffffffffffffffffffffffffff6c0100000000000000000000000092839004811696840196909652600184015490811694830194909452909204831660608301526002015463ffffffff808216608084015264010000000082041660a0830181905268010000000000000000820490931660c083015260ff7c010000000000000000000000000000000000000000000000000000000090910416151560e0820152904310611f38576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611f4b8184600001518560600151612b4d565b60005a90506000634585e33b60e01b8560400151604051602401611f6f91906136e6565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050509050611fe185608001518460c0015183612c9e565b93505a611fee9083613945565b91506000612007838760a001518860c001516001612442565b6020808801516000908152600790915260409020549091506120389082906bffffffffffffffffffffffff1661395c565b6020878101805160009081526007909252604080832080547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9586161790559051825290206001015461209b9183911661389d565b60208781018051600090815260078352604080822060010180547fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166bffffffffffffffffffffffff9687161790558a5192518252808220805486166c0100000000000000000000000073ffffffffffffffffffffffffffffffffffffffff958616021790558a51909216815260089092529020546121549183917401000000000000000000000000000000000000000090041661389d565b60086000886000015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160146101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550856000015173ffffffffffffffffffffffffffffffffffffffff1685151587602001517fcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6848a6040015160405161222092919061380d565b60405180910390a4505050506122366001600255565b919050565b6000611c038383612cea565b6000806000600d600001600f9054906101000a900462ffffff1662ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156122de57600080fd5b505afa1580156122f2573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061231691906133eb565b50945090925084915050801561233a57506123318242613945565b8463ffffffff16105b80612346575060008113155b1561235557600f549550612359565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156123bf57600080fd5b505afa1580156123d3573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123f791906133eb565b50945090925084915050801561241b57506124128242613945565b8463ffffffff16105b80612427575060008113155b1561243657601054945061243a565b8094505b505050509091565b6040805161012081018252600d5463ffffffff80821683526401000000008083048216602085015268010000000000000000830462ffffff908116958501959095526b0100000000000000000000008304821660608501526f01000000000000000000000000000000830490941660808401527201000000000000000000000000000000000000820461ffff1660a08401819052740100000000000000000000000000000000000000009092046bffffffffffffffffffffffff1660c0840152600e5480821660e08501529390930490921661010082015260009182906125299087613908565b90508380156125375750803a105b1561253f57503a5b600061256b7f000000000000000000000000000000000000000000000000000000000000000089613885565b6125759083613908565b83519091506000906125919063ffffffff16633b9aca00613885565b9050600060027f000000000000000000000000000000000000000000000000000000000000000060028111156125c9576125c9613a71565b141561271157604080516000815260208101909152871561262857600036604051806080016040528060488152602001613b3e604891396040516020016126129392919061359c565b6040516020818303038152906040529050612647565b6040518061014001604052806101108152602001613b86610110913990505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906349948e0e906126b99084906004016136e6565b60206040518083038186803b1580156126d157600080fd5b505afa1580156126e5573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061270991906132fb565b9150506127ec565b60017f0000000000000000000000000000000000000000000000000000000000000000600281111561274557612745613a71565b14156127ec577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b1580156127b157600080fd5b505afa1580156127c5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906127e991906132fb565b90505b8661280857808560a0015161ffff166128059190613908565b90505b6000856020015163ffffffff1664e8d4a510006128259190613908565b89846128318588613885565b61283f90633b9aca00613908565b6128499190613908565b61285391906138cd565b61285d9190613885565b90506b033b2e3c9fd0803ce80000008111156128a5576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b9a9950505050505050505050565b3215610a70576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005473ffffffffffffffffffffffffffffffffffffffff163314610a70576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401611043565b73ffffffffffffffffffffffffffffffffffffffff81163314156129ec576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401611043565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000826000018281548110612a7957612a79613acf565b9060005260206000200154905092915050565b6000818152600183016020526040812054612ad357508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610b68565b506000610b68565b600280541415612b47576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c006044820152606401611043565b60028055565b8260e0015115612b89576040517f514b6c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660009081526008602052604090206001015460ff16612beb576040517fcfbacfd800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82516bffffffffffffffffffffffff16811115612c34576040517f356680b700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16836020015173ffffffffffffffffffffffffffffffffffffffff16141561112e576040517f06bc104000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a611388811015612cb057600080fd5b611388810390508460408204820311612cc857600080fd5b50823b612cd457600080fd5b60008083516020850160008789f1949350505050565b60008181526001830160205260408120548015612dd3576000612d0e600183613945565b8554909150600090612d2290600190613945565b9050818114612d87576000866000018281548110612d4257612d42613acf565b9060005260206000200154905080876000018481548110612d6557612d65613acf565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080612d9857612d98613aa0565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050610b68565b6000915050610b68565b828054612de9906139b5565b90600052602060002090601f016020900481019282612e0b5760008555612e6f565b82601f10612e42578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555612e6f565b82800160010185558215612e6f579182015b82811115612e6f578235825591602001919060010190612e54565b50612e7b929150612e7f565b5090565b5b80821115612e7b5760008155600101612e80565b803573ffffffffffffffffffffffffffffffffffffffff8116811461223657600080fd5b60008083601f840112612eca57600080fd5b50813567ffffffffffffffff811115612ee257600080fd5b6020830191508360208260051b8501011115612efd57600080fd5b9250929050565b60008083601f840112612f1657600080fd5b50813567ffffffffffffffff811115612f2e57600080fd5b602083019150836020828501011115612efd57600080fd5b803561ffff8116811461223657600080fd5b803562ffffff8116811461223657600080fd5b803563ffffffff8116811461223657600080fd5b805169ffffffffffffffffffff8116811461223657600080fd5b80356bffffffffffffffffffffffff8116811461223657600080fd5b600060208284031215612fc757600080fd5b611c0382612e94565b60008060408385031215612fe357600080fd5b612fec83612e94565b9150612ffa60208401612e94565b90509250929050565b6000806040838503121561301657600080fd5b61301f83612e94565b915060208301356004811061303357600080fd5b809150509250929050565b6000806000806060858703121561305457600080fd5b61305d85612e94565b935060208501359250604085013567ffffffffffffffff81111561308057600080fd5b61308c87828801612f04565b95989497509550505050565b6000806000806000608086880312156130b057600080fd5b6130b986612e94565b94506130c760208701612f6b565b93506130d560408701612e94565b9250606086013567ffffffffffffffff8111156130f157600080fd5b6130fd88828901612f04565b969995985093965092949392505050565b6000806000806040858703121561312457600080fd5b843567ffffffffffffffff8082111561313c57600080fd5b61314888838901612eb8565b9096509450602087013591508082111561316157600080fd5b5061308c87828801612eb8565b60008060006040848603121561318357600080fd5b833567ffffffffffffffff81111561319a57600080fd5b6131a686828701612eb8565b90945092506131b9905060208501612e94565b90509250925092565b600080602083850312156131d557600080fd5b823567ffffffffffffffff8111156131ec57600080fd5b6131f885828601612f04565b90969095509350505050565b6000610180828403121561321757600080fd5b61321f613834565b61322883612f6b565b815261323660208401612f6b565b602082015261324760408401612f58565b604082015261325860608401612f6b565b606082015261326960808401612f58565b608082015261327a60a08401612f46565b60a082015261328b60c08401612f99565b60c082015261329c60e08401612f6b565b60e0820152610100838101359082015261012080840135908201526101406132c5818501612e94565b908201526101606132d7848201612e94565b908201529392505050565b6000602082840312156132f457600080fd5b5035919050565b60006020828403121561330d57600080fd5b5051919050565b6000806040838503121561332757600080fd5b82359150612ffa60208401612e94565b60008060006040848603121561334c57600080fd5b83359250602084013567ffffffffffffffff81111561336a57600080fd5b61337686828701612f04565b9497909650939450505050565b6000806040838503121561339657600080fd5b50508035926020909101359150565b600080604083850312156133b857600080fd5b82359150612ffa60208401612f6b565b600080604083850312156133db57600080fd5b82359150612ffa60208401612f99565b600080600080600060a0868803121561340357600080fd5b61340c86612f7f565b945060208601519350604086015192506060860151915061342f60808701612f7f565b90509295509295909350565b60008151808452613453816020860160208601613989565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b805163ffffffff16825260208101516134a6602084018263ffffffff169052565b5060408101516134bd604084018262ffffff169052565b5060608101516134d5606084018263ffffffff169052565b5060808101516134ec608084018262ffffff169052565b5060a081015161350260a084018261ffff169052565b5060c081015161352260c08401826bffffffffffffffffffffffff169052565b5060e081015161353a60e084018263ffffffff169052565b50610100818101519083015261012080820151908301526101408082015173ffffffffffffffffffffffffffffffffffffffff81168285015250506101608181015173ffffffffffffffffffffffffffffffffffffffff811684830152611661565b8284823760008382016000815283516135b9818360208801613989565b0195945050505050565b600061012073ffffffffffffffffffffffffffffffffffffffff808d16845263ffffffff8c1660208501528160408501526136008285018c61343b565b6bffffffffffffffffffffffff9a8b16606086015298811660808501529690961660a08301525067ffffffffffffffff9390931660c0840152941660e082015292151561010090930192909252949350505050565b6020808252825182820181905260009190848201906040850190845b8181101561368d57835183529284019291840191600101613671565b50909695505050505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b602081526000611c03602083018461343b565b60a08152600061370c60a083018861343b565b90508560208301528460408301528360608301528260808301529695505050505050565b602081016004831061374457613744613a71565b91905290565b6020810161374483613b2d565b6101808101610b688284613485565b600061022080830163ffffffff875116845260206bffffffffffffffffffffffff81890151168186015260408801516040860152606088015160608601526137b16080860188613485565b6102008501929092528451908190526102408401918086019160005b818110156137ff57835173ffffffffffffffffffffffffffffffffffffffff16855293820193928201926001016137cd565b509298975050505050505050565b6bffffffffffffffffffffffff8316815260406020820152600061111e604083018461343b565b604051610180810167ffffffffffffffff8111828210171561387f577f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405290565b6000821982111561389857613898613a42565b500190565b60006bffffffffffffffffffffffff8083168185168083038211156138c4576138c4613a42565b01949350505050565b600082613903577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561394057613940613a42565b500290565b60008282101561395757613957613a42565b500390565b60006bffffffffffffffffffffffff8381169083168181101561398157613981613a42565b039392505050565b60005b838110156139a457818101518382015260200161398c565b838111156116615750506000910152565b600181811c908216806139c957607f821691505b60208210811415613a03577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415613a3b57613a3b613a42565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6003811061163557611635613a7156fe3078666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666663078666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var KeeperRegistryABI = KeeperRegistryMetaData.ABI + +var KeeperRegistryBin = KeeperRegistryMetaData.Bin + +func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, keeperRegistryLogic common.Address, config Config) (common.Address, *types.Transaction, *KeeperRegistry, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryBin), backend, keeperRegistryLogic, config) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistry{address: address, abi: *parsed, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +type KeeperRegistry struct { + address common.Address + abi abi.ABI + KeeperRegistryCaller + KeeperRegistryTransactor + KeeperRegistryFilterer +} + +type KeeperRegistryCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrySession struct { + Contract *KeeperRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCallerSession struct { + Contract *KeeperRegistryCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryTransactorSession struct { + Contract *KeeperRegistryTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryRaw struct { + Contract *KeeperRegistry +} + +type KeeperRegistryCallerRaw struct { + Contract *KeeperRegistryCaller +} + +type KeeperRegistryTransactorRaw struct { + Contract *KeeperRegistryTransactor +} + +func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { + contract, err := bindKeeperRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCaller{contract: contract}, nil +} + +func NewKeeperRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryTransactor, error) { + contract, err := bindKeeperRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryTransactor{contract: contract}, nil +} + +func NewKeeperRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryFilterer, error) { + contract, err := bindKeeperRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryFilterer{contract: contract}, nil +} + +func bindKeeperRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistry *KeeperRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.KeeperRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCaller) ARBNITROORACLE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "ARB_NITRO_ORACLE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) ARBNITROORACLE() (common.Address, error) { + return _KeeperRegistry.Contract.ARBNITROORACLE(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) ARBNITROORACLE() (common.Address, error) { + return _KeeperRegistry.Contract.ARBNITROORACLE(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) FASTGASFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "FAST_GAS_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) FASTGASFEED() (common.Address, error) { + return _KeeperRegistry.Contract.FASTGASFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) KEEPERREGISTRYLOGIC(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "KEEPER_REGISTRY_LOGIC") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) KEEPERREGISTRYLOGIC() (common.Address, error) { + return _KeeperRegistry.Contract.KEEPERREGISTRYLOGIC(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) KEEPERREGISTRYLOGIC() (common.Address, error) { + return _KeeperRegistry.Contract.KEEPERREGISTRYLOGIC(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLI() (common.Address, error) { + return _KeeperRegistry.Contract.PLI(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PLIETHFEED() (common.Address, error) { + return _KeeperRegistry.Contract.PLIETHFEED(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) OPTIMISMORACLE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "OPTIMISM_ORACLE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) OPTIMISMORACLE() (common.Address, error) { + return _KeeperRegistry.Contract.OPTIMISMORACLE(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) OPTIMISMORACLE() (common.Address, error) { + return _KeeperRegistry.Contract.OPTIMISMORACLE(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) PAYMENTMODEL(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "PAYMENT_MODEL") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) PAYMENTMODEL() (uint8, error) { + return _KeeperRegistry.Contract.PAYMENTMODEL(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) PAYMENTMODEL() (uint8, error) { + return _KeeperRegistry.Contract.PAYMENTMODEL(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) REGISTRYGASOVERHEAD(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "REGISTRY_GAS_OVERHEAD") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) REGISTRYGASOVERHEAD() (*big.Int, error) { + return _KeeperRegistry.Contract.REGISTRYGASOVERHEAD(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) REGISTRYGASOVERHEAD() (*big.Int, error) { + return _KeeperRegistry.Contract.REGISTRYGASOVERHEAD(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getKeeperInfo", query) + + outstruct := new(GetKeeperInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Payee = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Active = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetKeeperInfo(query common.Address) (GetKeeperInfo, + + error) { + return _KeeperRegistry.Contract.GetKeeperInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMaxPaymentForGas", gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMaxPaymentForGas(gasLimit *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(State)).(*State) + outstruct.Config = *abi.ConvertType(out[1], new(Config)).(*Config) + outstruct.Keepers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getUpkeep", id) + + outstruct := new(GetUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.Target = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.ExecuteGas = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.CheckData = *abi.ConvertType(out[2], new([]byte)).(*[]byte) + outstruct.Balance = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LastKeeper = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + outstruct.Admin = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.MaxValidBlocknumber = *abi.ConvertType(out[6], new(uint64)).(*uint64) + outstruct.AmountSpent = *abi.ConvertType(out[7], new(*big.Int)).(**big.Int) + outstruct.Paused = *abi.ConvertType(out[8], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetUpkeep(id *big.Int) (GetUpkeep, + + error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Paused() (bool, error) { + return _KeeperRegistry.Contract.Paused(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptPayeeship", keeper) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptPayeeship(keeper common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, keeper) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptUpkeepAdmin(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptUpkeepAdmin(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistry *KeeperRegistrySession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "checkUpkeep", id, from) +} + +func (_KeeperRegistry *KeeperRegistrySession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CheckUpkeep(id *big.Int, from common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id, from) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistry *KeeperRegistrySession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "performUpkeep", id, performData) +} + +func (_KeeperRegistry *KeeperRegistrySession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) PerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistrySession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistrySession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfig(opts *bind.TransactOpts, config Config) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfig", config) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfig(config Config) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfig(config Config) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setKeepers", keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetKeepers(keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetKeepers(&_KeeperRegistry.TransactOpts, keepers, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferPayeeship", keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferPayeeship(keeper common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, keeper, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferUpkeepAdmin(&_KeeperRegistry.TransactOpts, id, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferUpkeepAdmin(&_KeeperRegistry.TransactOpts, id, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UnpauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UnpauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "updateCheckData", id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistrySession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UpdateCheckData(&_KeeperRegistry.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UpdateCheckData(&_KeeperRegistry.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.RawTransact(opts, calldata) +} + +func (_KeeperRegistry *KeeperRegistrySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.RawTransact(opts, nil) +} + +func (_KeeperRegistry *KeeperRegistrySession) Receive() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Receive(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Receive() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Receive(&_KeeperRegistry.TransactOpts) +} + +type KeeperRegistryConfigSetIterator struct { + Event *KeeperRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryConfigSet struct { + Config Config + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryConfigSetIterator{contract: _KeeperRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) { + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsAddedIterator struct { + Event *KeeperRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsAddedIterator{contract: _KeeperRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) { + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsWithdrawnIterator struct { + Event *KeeperRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) { + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryKeepersUpdatedIterator struct { + Event *KeeperRegistryKeepersUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryKeepersUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryKeepersUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryKeepersUpdated struct { + Keepers []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryKeepersUpdatedIterator{contract: _KeeperRegistry.contract, event: "KeepersUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "KeepersUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) { + event := new(KeeperRegistryKeepersUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "KeepersUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryOwnerFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) { + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferredIterator struct { + Event *KeeperRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferredIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) { + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPausedIterator struct { + Event *KeeperRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryPausedIterator{contract: _KeeperRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaused(log types.Log) (*KeeperRegistryPaused, error) { + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferRequested struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) { + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferredIterator struct { + Event *KeeperRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferred struct { + Keeper common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferredIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", keeperRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) { + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPaymentWithdrawnIterator struct { + Event *KeeperRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaymentWithdrawn struct { + Keeper common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPaymentWithdrawnIterator{contract: _KeeperRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var keeperRule []interface{} + for _, keeperItem := range keeper { + keeperRule = append(keeperRule, keeperItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", keeperRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) { + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUnpausedIterator struct { + Event *KeeperRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryUnpausedIterator{contract: _KeeperRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) { + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferredIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) { + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCanceledIterator struct { + Event *KeeperRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCanceledIterator{contract: _KeeperRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) { + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCheckDataUpdatedIterator struct { + Event *KeeperRegistryUpkeepCheckDataUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCheckDataUpdated struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCheckDataUpdatedIterator{contract: _KeeperRegistry.contract, event: "UpkeepCheckDataUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCheckDataUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryUpkeepCheckDataUpdated, error) { + event := new(KeeperRegistryUpkeepCheckDataUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepGasLimitSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) { + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepMigratedIterator struct { + Event *KeeperRegistryUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepMigratedIterator{contract: _KeeperRegistry.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) { + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPausedIterator struct { + Event *KeeperRegistryUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) { + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPerformedIterator struct { + Event *KeeperRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + From common.Address + Payment *big.Int + PerformData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPerformedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) { + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepReceivedIterator struct { + Event *KeeperRegistryUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepReceivedIterator{contract: _KeeperRegistry.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) { + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepRegisteredIterator struct { + Event *KeeperRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepRegisteredIterator{contract: _KeeperRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) { + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepUnpausedIterator struct { + Event *KeeperRegistryUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepUnpausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) { + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetKeeperInfo struct { + Payee common.Address + Active bool + Balance *big.Int +} +type GetState struct { + State State + Config Config + Keepers []common.Address +} +type GetUpkeep struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + LastKeeper common.Address + Admin common.Address + MaxValidBlocknumber uint64 + AmountSpent *big.Int + Paused bool +} + +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistry.abi.Events["ConfigSet"].ID: + return _KeeperRegistry.ParseConfigSet(log) + case _KeeperRegistry.abi.Events["FundsAdded"].ID: + return _KeeperRegistry.ParseFundsAdded(log) + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistry.ParseFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["KeepersUpdated"].ID: + return _KeeperRegistry.ParseKeepersUpdated(log) + case _KeeperRegistry.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistry.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistry.ParseOwnershipTransferRequested(log) + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistry.ParseOwnershipTransferred(log) + case _KeeperRegistry.abi.Events["Paused"].ID: + return _KeeperRegistry.ParsePaused(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistry.ParsePayeeshipTransferRequested(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistry.ParsePayeeshipTransferred(log) + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistry.ParsePaymentWithdrawn(log) + case _KeeperRegistry.abi.Events["Unpaused"].ID: + return _KeeperRegistry.ParseUnpaused(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferred(log) + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistry.ParseUpkeepCanceled(log) + case _KeeperRegistry.abi.Events["UpkeepCheckDataUpdated"].ID: + return _KeeperRegistry.ParseUpkeepCheckDataUpdated(log) + case _KeeperRegistry.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistry.ParseUpkeepGasLimitSet(log) + case _KeeperRegistry.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistry.ParseUpkeepMigrated(log) + case _KeeperRegistry.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistry.ParseUpkeepPaused(log) + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistry.ParseUpkeepPerformed(log) + case _KeeperRegistry.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistry.ParseUpkeepReceived(log) + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistry.ParseUpkeepRegistered(log) + case _KeeperRegistry.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistry.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0xfe125a41957477226ba20f85ef30a4024ea3bb8d066521ddc16df3f2944de325") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepCheckDataUpdated) Topic() common.Hash { + return common.HexToHash("0x7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf") +} + +func (KeeperRegistryUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistry *KeeperRegistry) Address() common.Address { + return _KeeperRegistry.address +} + +type KeeperRegistryInterface interface { + ARBNITROORACLE(opts *bind.CallOpts) (common.Address, error) + + FASTGASFEED(opts *bind.CallOpts) (common.Address, error) + + KEEPERREGISTRYLOGIC(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + OPTIMISMORACLE(opts *bind.CallOpts) (common.Address, error) + + PAYMENTMODEL(opts *bind.CallOpts) (uint8, error) + + REGISTRYGASOVERHEAD(opts *bind.CallOpts) (*big.Int, error) + + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetKeeperInfo(opts *bind.CallOpts, query common.Address) (GetKeeperInfo, + + error) + + GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit *big.Int) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (GetUpkeep, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, keeper common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int, from common.Address) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, config Config) (*types.Transaction, error) + + SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, keeper common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) + + FilterKeepersUpdated(opts *bind.FilterOpts) (*KeeperRegistryKeepersUpdatedIterator, error) + + WatchKeepersUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryKeepersUpdated) (event.Subscription, error) + + ParseKeepersUpdated(log types.Log) (*KeeperRegistryKeepersUpdated, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryPaused, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, keeper []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, keeper []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, keeper []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, keeper []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) + + FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataUpdatedIterator, error) + + WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryUpkeepCheckDataUpdated, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool, from []common.Address) (*KeeperRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool, from []common.Address) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper2_0/keeper_registry_wrapper2_0.go b/core/gethwrappers/generated/keeper_registry_wrapper2_0/keeper_registry_wrapper2_0.go new file mode 100644 index 00000000..329d73b2 --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper2_0/keeper_registry_wrapper2_0.go @@ -0,0 +1,5342 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper2_0 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrar common.Address +} + +type State struct { + Nonce uint32 + OwnerLinkBalance *big.Int + ExpectedLinkBalance *big.Int + TotalPremium *big.Int + NumUpkeeps *big.Int + ConfigCount uint32 + LatestConfigBlockNumber uint32 + LatestConfigDigest [32]byte + LatestEpoch uint32 + Paused bool +} + +type UpkeepInfo struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var KeeperRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractKeeperRegistryBase2_0\",\"name\":\"keeperRegistryLogic\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnchainConfigNonEmpty\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"checkBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"acceptUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"cancelUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"},{\"internalType\":\"enumUpkeepFailureReason\",\"name\":\"upkeepFailureReason\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fastGasWei\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"linkNative\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDs\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFastGasFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getKeeperRegistryLogicAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkNativeFeedAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"getMaxPaymentForGas\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"maxPayment\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"minBalance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMode\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_0.Mode\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"}],\"name\":\"getPeerRegistryMigrationPermission\",\"outputs\":[{\"internalType\":\"enumKeeperRegistryBase2_0.MigrationPermission\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getSignerInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getState\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"nonce\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"ownerLinkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"expectedLinkBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"totalPremium\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"numUpkeeps\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"latestConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"latestConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"latestEpoch\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"internalType\":\"structState\",\"name\":\"state\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"registrar\",\"type\":\"address\"}],\"internalType\":\"structOnchainConfig\",\"name\":\"config\",\"type\":\"tuple\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"getTransmitterInfo\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"index\",\"type\":\"uint8\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"lastCollected\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getUpkeep\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"executeGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structUpkeepInfo\",\"name\":\"upkeepInfo\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"},{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"migrateUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"pauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"receiveUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"registerUpkeep\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"peer\",\"type\":\"address\"},{\"internalType\":\"enumKeeperRegistryBase2_0.MigrationPermission\",\"name\":\"permission\",\"type\":\"uint8\"}],\"name\":\"setPeerRegistryMigrationPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"}],\"name\":\"setUpkeepOffchainConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"simulatePerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferUpkeepAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"unpauseUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"updateCheckData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTranscoderVersion\",\"outputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawOwnerFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x6101206040523480156200001257600080fd5b50604051620063c0380380620063c08339810160408190526200003591620003a7565b806001600160a01b0316634b4fd03b6040518163ffffffff1660e01b815260040160206040518083038186803b1580156200006f57600080fd5b505afa15801562000084573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620000aa9190620003ce565b816001600160a01b031663ca30e6036040518163ffffffff1660e01b815260040160206040518083038186803b158015620000e457600080fd5b505afa158015620000f9573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200011f9190620003a7565b826001600160a01b031663b10b673c6040518163ffffffff1660e01b815260040160206040518083038186803b1580156200015957600080fd5b505afa1580156200016e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001949190620003a7565b836001600160a01b0316636709d0e56040518163ffffffff1660e01b815260040160206040518083038186803b158015620001ce57600080fd5b505afa158015620001e3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620002099190620003a7565b3380600081620002605760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b038481169190911790915581161562000293576200029381620002fb565b505050836002811115620002ab57620002ab620003f1565b60e0816002811115620002c257620002c2620003f1565b60f81b9052506001600160601b0319606093841b811660805291831b821660a052821b811660c05292901b909116610100525062000420565b6001600160a01b038116331415620003565760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000257565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208284031215620003ba57600080fd5b8151620003c78162000407565b9392505050565b600060208284031215620003e157600080fd5b815160038110620003c757600080fd5b634e487b7160e01b600052602160045260246000fd5b6001600160a01b03811681146200041d57600080fd5b50565b60805160601c60a05160601c60c05160601c60e05160f81c6101005160601c615f14620004ac6000396000818161057f0152610a8201526000818161052c01528181613c5801528181614313015281816144ca015261470f0152600081816105d3015261342a015260008181610859015261351301526000818161091401526113210152615f146000f3fe6080604052600436106103175760003560e01c80638e86139b1161019a578063b1dc65a4116100e1578063e3d0e7121161008a578063f2fde38b11610064578063f2fde38b146109d8578063f7d334ba146109f8578063faa3e99614610a2a57610326565b8063e3d0e71214610938578063eb5dcd6c14610760578063ed56b3e11461095857610326565b8063c7c3a19a116100bb578063c7c3a19a146108d8578063c804802214610550578063ca30e6031461090557610326565b8063b1dc65a414610898578063b657bc9c146108b8578063b79550be1461047357610326565b8063aab9edd611610143578063b10b673c1161011d578063b10b673c1461084a578063b121e1471461087d578063b148ab6b1461055057610326565b8063aab9edd614610796578063aed2e929146107bd578063afcb95d7146107f457610326565b8063a4c0ed3611610174578063a4c0ed3614610740578063a710b22114610760578063a72aa27e1461077b57610326565b80638e86139b1461070a578063948108f7146107255780639fab4386146106ef57610326565b8063572e05e11161025e57806381ff7048116102075780638765ecbe116101e15780638765ecbe146105505780638da5cb5b146106c45780638dcf0fe7146106ef57610326565b806381ff70481461063a5780638456cb591461047357806385c1b0ba146106a457610326565b8063744bfe6111610238578063744bfe611461043d57806379ba5097146106255780637d9b97e01461047357610326565b8063572e05e1146105705780636709d0e5146105c45780636ded9eae146105f757610326565b80633b9cce59116102c057806348013d7b1161029a57806348013d7b146104fb5780634b4fd03b1461051d5780635165f2f51461055057610326565b80633b9cce59146104585780633f4ba83a14610473578063421d183b1461048857610326565b80631865c57d116102f15780631865c57d146103f7578063187256e81461041d5780631a2af0111461043d57610326565b806306e3b6321461032e5780630e08ae8414610364578063181f5a77146103a157610326565b3661032657610324610a7d565b005b610324610a7d565b34801561033a57600080fd5b5061034e6103493660046151c8565b610aa8565b60405161035b9190615577565b60405180910390f35b34801561037057600080fd5b5061038461037f36600461530a565b610ba2565b6040516bffffffffffffffffffffffff909116815260200161035b565b3480156103ad57600080fd5b506103ea6040518060400160405280601481526020017f4b6565706572526567697374727920322e302e3200000000000000000000000081525081565b60405161035b9190615625565b34801561040357600080fd5b5061040c610ce5565b60405161035b95949392919061565f565b34801561042957600080fd5b50610324610438366004614cac565b6110a8565b34801561044957600080fd5b50610324610438366004615157565b34801561046457600080fd5b50610324610438366004614de2565b34801561047f57600080fd5b506103246110b4565b34801561049457600080fd5b506104a86104a3366004614c56565b6110bc565b60408051951515865260ff90941660208601526bffffffffffffffffffffffff9283169385019390935216606083015273ffffffffffffffffffffffffffffffffffffffff16608082015260a00161035b565b34801561050757600080fd5b50610510600081565b60405161035b9190615652565b34801561052957600080fd5b507f0000000000000000000000000000000000000000000000000000000000000000610510565b34801561055c57600080fd5b5061032461056b36600461513e565b6111da565b34801561057c57600080fd5b507f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161035b565b3480156105d057600080fd5b507f000000000000000000000000000000000000000000000000000000000000000061059f565b34801561060357600080fd5b50610617610612366004614d3a565b6111e5565b60405190815260200161035b565b34801561063157600080fd5b506103246111fa565b34801561064657600080fd5b50610681601254600e5463ffffffff6c0100000000000000000000000083048116937001000000000000000000000000000000009093041691565b6040805163ffffffff94851681529390921660208401529082015260600161035b565b3480156106b057600080fd5b506103246106bf366004614fa8565b6112fc565b3480156106d057600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff1661059f565b3480156106fb57600080fd5b506103246106bf36600461517c565b34801561071657600080fd5b50610324610438366004615018565b34801561073157600080fd5b506103246104383660046152e5565b34801561074c57600080fd5b5061032461075b366004614cde565b611309565b34801561076c57600080fd5b50610324610438366004614c73565b34801561078757600080fd5b506103246104383660046152c0565b3480156107a257600080fd5b506107ab600281565b60405160ff909116815260200161035b565b3480156107c957600080fd5b506107dd6107d836600461517c565b611524565b60408051921515835260208301919091520161035b565b34801561080057600080fd5b50600e54600f54604080516000815260208101939093527c010000000000000000000000000000000000000000000000000000000090910463ffffffff169082015260600161035b565b34801561085657600080fd5b507f000000000000000000000000000000000000000000000000000000000000000061059f565b34801561088957600080fd5b5061032461056b366004614c56565b3480156108a457600080fd5b506103246108b3366004614ef1565b61168f565b3480156108c457600080fd5b506103846108d336600461513e565b61224c565b3480156108e457600080fd5b506108f86108f336600461513e565b612270565b60405161035b919061576c565b34801561091157600080fd5b507f000000000000000000000000000000000000000000000000000000000000000061059f565b34801561094457600080fd5b50610324610953366004614e24565b61259b565b34801561096457600080fd5b506109bf610973366004614c56565b73ffffffffffffffffffffffffffffffffffffffff1660009081526009602090815260409182902082518084019093525460ff8082161515808552610100909204169290910182905291565b60408051921515835260ff90911660208301520161035b565b3480156109e457600080fd5b506103246109f3366004614c56565b613392565b348015610a0457600080fd5b50610a18610a1336600461513e565b6133a3565b60405161035b969594939291906155bb565b348015610a3657600080fd5b50610a70610a45366004614c56565b73ffffffffffffffffffffffffffffffffffffffff1660009081526016602052604090205460ff1690565b60405161035b9190615638565b610aa67f00000000000000000000000000000000000000000000000000000000000000006133c6565b565b60606000610ab660026133ea565b9050808410610af1576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82610b0357610b008482615c79565b92505b60008367ffffffffffffffff811115610b1e57610b1e615e32565b604051908082528060200260200182016040528015610b47578160200160208202803683370190505b50905060005b84811015610b9957610b6a610b628288615afa565b6002906133f4565b828281518110610b7c57610b7c615e03565b602090810291909101015280610b9181615d3d565b915050610b4d565b50949350505050565b6040805161012081018252600f5460ff808216835263ffffffff6101008084048216602086015265010000000000840482169585019590955262ffffff6901000000000000000000840416606085015261ffff6c0100000000000000000000000084041660808501526e01000000000000000000000000000083048216151560a08501526f010000000000000000000000000000008304909116151560c08401526bffffffffffffffffffffffff70010000000000000000000000000000000083041660e08401527c010000000000000000000000000000000000000000000000000000000090910416918101919091526000908180610ca183613407565b6012549193509150610cdc90849087907801000000000000000000000000000000000000000000000000900463ffffffff1685856000613603565b95945050505050565b6040805161014081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810191909152604080516101a081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e081018290526101008101829052610120810182905261014081018290526101608101829052610180810191909152604080516101408101825260125468010000000000000000900463ffffffff1681526011546bffffffffffffffffffffffff908116602083015260155492820192909252600f54700100000000000000000000000000000000900490911660608083019190915290819060009060808101610e1a60026133ea565b815260125463ffffffff6c01000000000000000000000000808304821660208086019190915270010000000000000000000000000000000084048316604080870191909152600e54606080880191909152600f547c0100000000000000000000000000000000000000000000000000000000810486166080808a019190915260ff6e01000000000000000000000000000083048116151560a09a8b015284516101a0810186526101008085048a1682526501000000000085048a1682890152898b168288015262ffffff69010000000000000000008604169582019590955261ffff88850416928101929092526010546bffffffffffffffffffffffff81169a83019a909a526401000000008904881660c0830152740100000000000000000000000000000000000000008904881660e083015278010000000000000000000000000000000000000000000000009098049096169186019190915260135461012086015260145461014086015273ffffffffffffffffffffffffffffffffffffffff96849004871661016086015260115493909304909516610180840152600a8054865181840281018401909752808752969b509299508a958a959394600b949316929185919083018282801561102757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610ffc575b505050505092508180548060200260200160405190810160405280929190818152602001828054801561109057602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611065575b50505050509150945094509450945094509091929394565b6110b0610a7d565b5050565b610aa6610a7d565b73ffffffffffffffffffffffffffffffffffffffff811660009081526008602090815260408083208151608081018352905460ff80821615158352610100820416938201939093526bffffffffffffffffffffffff6201000084048116928201929092526e010000000000000000000000000000909204811660608301819052600f54849384938493849384926111689291700100000000000000000000000000000000900416615c90565b600b5490915060009061117b9083615b91565b9050826000015183602001518285604001516111979190615b56565b6060959095015173ffffffffffffffffffffffffffffffffffffffff9b8c166000908152600c6020526040902054929c919b959a50985093169550919350505050565b6111e2610a7d565b50565b60006111ef610a7d565b979650505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314611280576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b611304610a7d565b505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614611378576040517fc8bad78d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602081146113b2576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006113c08284018461513e565b600081815260046020526040902054909150640100000000900463ffffffff90811614611419576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600460205260409020600101546114549085906c0100000000000000000000000090046bffffffffffffffffffffffff16615b56565b600082815260046020526040902060010180546bffffffffffffffffffffffff929092166c01000000000000000000000000027fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff9092169190911790556015546114bf908590615afa565b6015556040516bffffffffffffffffffffffff8516815273ffffffffffffffffffffffffffffffffffffffff86169082907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a35050505050565b60008061152f61364e565b600f546e010000000000000000000000000000900460ff161561157e576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600085815260046020908152604091829020825160e081018452815463ffffffff8082168352640100000000820481168386015268010000000000000000820460ff16151583870152690100000000000000000090910473ffffffffffffffffffffffffffffffffffffffff1660608301526001909201546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201528251601f87018390048302810183019093528583529161168291839190889088908190840183828082843760009201919091525061368692505050565b9250925050935093915050565b60005a6040805161012081018252600f5460ff808216835261010080830463ffffffff90811660208601526501000000000084048116958501959095526901000000000000000000830462ffffff1660608501526c01000000000000000000000000830461ffff1660808501526e0100000000000000000000000000008304821615801560a08601526f010000000000000000000000000000008404909216151560c085015270010000000000000000000000000000000083046bffffffffffffffffffffffff1660e08501527c0100000000000000000000000000000000000000000000000000000000909204909316908201529192506117bd576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3360009081526008602052604090205460ff16611806576040517f1099ed7500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006118478a8a8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506137ec92505050565b9050600081604001515167ffffffffffffffff81111561186957611869615e32565b60405190808252806020026020018201604052801561191d57816020015b604080516101a081018252600060c0820181815260e083018290526101008301829052610120830182905261014083018290526101608301829052610180830182905282526020808301829052928201819052606082018190526080820181905260a082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9092019101816118875790505b5090506000805b836040015151811015611bc957600460008560400151838151811061194b5761194b615e03565b6020908102919091018101518252818101929092526040908101600020815160e081018352815463ffffffff8082168352640100000000820481169583019590955268010000000000000000810460ff16151593820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201528351849083908110611a3557611a35615e03565b602002602001015160000181905250611a9e85848381518110611a5a57611a5a615e03565b6020026020010151600001516000015186606001518481518110611a8057611a80615e03565b60200260200101516040015151876000015188602001516001613603565b838281518110611ab057611ab0615e03565b6020026020010151604001906bffffffffffffffffffffffff1690816bffffffffffffffffffffffff1681525050611b5e84604001518281518110611af757611af7615e03565b602002602001015185606001518381518110611b1557611b15615e03565b6020026020010151858481518110611b2f57611b2f615e03565b602002602001015160000151868581518110611b4d57611b4d615e03565b602002602001015160400151613898565b838281518110611b7057611b70615e03565b60200260200101516020019015159081151581525050828181518110611b9857611b98615e03565b60200260200101516020015115611bb757611bb4600183615ad4565b91505b80611bc181615d3d565b915050611924565b5061ffff8116611bdd575050505050612242565b600e548d3514611c19576040517fdfdcf8e700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8351611c26906001615b31565b60ff1689141580611c375750888714155b15611c6e576040517f0244f71a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611c7e8d8d8d8d8d8d8d8d6139e9565b60005b836040015151811015611e6857828181518110611ca057611ca0615e03565b60200260200101516020015115611e5657611cb9613c52565b63ffffffff166004600086604001518481518110611cd957611cd9615e03565b6020026020010151815260200190815260200160002060010160189054906101000a900463ffffffff1663ffffffff161415611d41576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611d89838281518110611d5657611d56615e03565b60200260200101516000015185606001518381518110611d7857611d78615e03565b602002602001015160400151613686565b848381518110611d9b57611d9b615e03565b6020026020010151606001858481518110611db857611db8615e03565b60200260200101516080018281525082151515158152505050828181518110611de357611de3615e03565b60200260200101516080015186611dfa9190615c79565b9550611e04613c52565b6004600086604001518481518110611e1e57611e1e615e03565b6020026020010151815260200190815260200160002060010160186101000a81548163ffffffff021916908363ffffffff1602179055505b80611e6081615d3d565b915050611c81565b508351611e76906001615b31565b611e859060ff1661044c615bbc565b616914611e938d6010615bbc565b5a611e9e9089615c79565b611ea89190615afa565b611eb29190615afa565b611ebc9190615afa565b94506116a8611ecf61ffff831687615b7d565b611ed99190615afa565b945060008060008060005b8760400151518110156120e457868181518110611f0357611f03615e03565b602002602001015160200151156120d257611f458a89606001518381518110611f2e57611f2e615e03565b602002602001015160400151518b60000151613d17565b878281518110611f5757611f57615e03565b602002602001015160a0018181525050611fb38989604001518381518110611f8157611f81615e03565b6020026020010151898481518110611f9b57611f9b615e03565b60200260200101518b600001518c602001518b613d35565b9093509150611fc28285615b56565b9350611fce8386615b56565b9450868181518110611fe257611fe2615e03565b60200260200101516060015115158860400151828151811061200657612006615e03565b60200260200101517f29233ba1d7b302b8fe230ad0b81423aba5371b2a6f6b821228212385ee6a44208a60600151848151811061204557612045615e03565b6020026020010151600001518a858151811061206357612063615e03565b6020026020010151608001518b868151811061208157612081615e03565b602002602001015160a0015187896120999190615b56565b6040805163ffffffff90951685526020850193909352918301526bffffffffffffffffffffffff16606082015260800160405180910390a35b806120dc81615d3d565b915050611ee4565b5050336000908152600860205260409020805484925060029061211c9084906201000090046bffffffffffffffffffffffff16615b56565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080600f60000160108282829054906101000a90046bffffffffffffffffffffffff166121769190615b56565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060008f6001600381106121b9576121b9615e03565b602002013560001c9050600060088264ffffffffff16901c905087610100015163ffffffff168163ffffffff16111561223857600f80547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c010000000000000000000000000000000000000000000000000000000063ffffffff8416021790555b5050505050505050505b5050505050505050565b60008181526004602052604081205461226a9063ffffffff16610ba2565b92915050565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526000828152600460209081526040808320815160e081018352815463ffffffff8082168352640100000000820481168387015268010000000000000000820460ff16151583860152690100000000000000000090910473ffffffffffffffffffffffffffffffffffffffff908116606084019081526001909401546bffffffffffffffffffffffff80821660808601526c0100000000000000000000000082041660a085015278010000000000000000000000000000000000000000000000009004821660c08401528451610140810186529351168352815116828501528685526007909352928190208054929392918301916123bc90615ce9565b80601f01602080910402602001604051908101604052809291908181526020018280546123e890615ce9565b80156124355780601f1061240a57610100808354040283529160200191612435565b820191906000526020600020905b81548152906001019060200180831161241857829003601f168201915b505050505081526020018260a001516bffffffffffffffffffffffff1681526020016005600086815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001826020015163ffffffff1667ffffffffffffffff1681526020018260c0015163ffffffff16815260200182608001516bffffffffffffffffffffffff16815260200182604001511515815260200160176000868152602001908152602001600020805461251290615ce9565b80601f016020809104026020016040519081016040528092919081815260200182805461253e90615ce9565b801561258b5780601f106125605761010080835404028352916020019161258b565b820191906000526020600020905b81548152906001019060200180831161256e57829003601f168201915b5050505050815250915050919050565b6125a3613e28565b601f865111156125df576040517f25d0209c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60ff8416612619576040517fe77dba5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b845186511415806126385750612630846003615c25565b60ff16865111155b1561266f576040517f1d2d1c5800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600f54600b547001000000000000000000000000000000009091046bffffffffffffffffffffffff169060005b816bffffffffffffffffffffffff16811015612704576126f1600b82815481106126c8576126c8615e03565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff168484613ea9565b50806126fc81615d3d565b91505061269c565b5060008060005b836bffffffffffffffffffffffff1681101561280d57600a818154811061273457612734615e03565b600091825260209091200154600b805473ffffffffffffffffffffffffffffffffffffffff9092169450908290811061276f5761276f615e03565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff868116845260098352604080852080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001690559116808452600890925290912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905591508061280581615d3d565b91505061270b565b5061281a600a6000614882565b612826600b6000614882565b604080516080810182526000808252602082018190529181018290526060810182905290805b8c51811015612baa57600960008e838151811061286b5761286b615e03565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528101919091526040016000205460ff16156128d6576040517f77cea0fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405180604001604052806001151581526020018260ff16815250600960008f848151811061290757612907615e03565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528181019290925260400160002082518154939092015160ff16610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909316929092171790558b518c90829081106129af576129af615e03565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff81166000908152600883526040908190208151608081018352905460ff80821615801584526101008304909116958301959095526bffffffffffffffffffffffff6201000082048116938301939093526e0100000000000000000000000000009004909116606082015294509250612a74576040517f6a7281ad00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001835260ff80821660208086019182526bffffffffffffffffffffffff808b166060880190815273ffffffffffffffffffffffffffffffffffffffff871660009081526008909352604092839020885181549551948a0151925184166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff939094166201000002929092167fffffffffffff000000000000000000000000000000000000000000000000ffff94909616610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009095169490941717919091169290921791909117905580612ba281615d3d565b91505061284c565b50508a51612bc09150600a9060208d01906148a0565b508851612bd490600b9060208c01906148a0565b50600087806020019051810190612beb919061504e565b60125460c082015191925063ffffffff640100000000909104811691161015612c40576040517f39abc10400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60125460e082015163ffffffff74010000000000000000000000000000000000000000909204821691161015612ca2576040517f1fa9bdcb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60125461010082015163ffffffff7801000000000000000000000000000000000000000000000000909204821691161015612d09576040517fd1d5faa800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040518061012001604052808a60ff168152602001826000015163ffffffff168152602001826020015163ffffffff168152602001826060015162ffffff168152602001826080015161ffff168152602001600015158152602001600015158152602001866bffffffffffffffffffffffff168152602001600063ffffffff16815250600f60008201518160000160006101000a81548160ff021916908360ff16021790555060208201518160000160016101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160056101000a81548163ffffffff021916908363ffffffff16021790555060608201518160000160096101000a81548162ffffff021916908362ffffff160217905550608082015181600001600c6101000a81548161ffff021916908361ffff16021790555060a082015181600001600e6101000a81548160ff02191690831515021790555060c082015181600001600f6101000a81548160ff02191690831515021790555060e08201518160000160106101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555061010082015181600001601c6101000a81548163ffffffff021916908363ffffffff1602179055509050506040518061016001604052808260a001516bffffffffffffffffffffffff16815260200182610160015173ffffffffffffffffffffffffffffffffffffffff168152602001601060010160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff16815260200182610180015173ffffffffffffffffffffffffffffffffffffffff168152602001826040015163ffffffff1681526020018260c0015163ffffffff168152602001601060020160089054906101000a900463ffffffff1663ffffffff1681526020016010600201600c9054906101000a900463ffffffff1663ffffffff168152602001601060020160109054906101000a900463ffffffff1663ffffffff1681526020018260e0015163ffffffff16815260200182610100015163ffffffff16815250601060008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060808201518160020160006101000a81548163ffffffff021916908363ffffffff16021790555060a08201518160020160046101000a81548163ffffffff021916908363ffffffff16021790555060c08201518160020160086101000a81548163ffffffff021916908363ffffffff16021790555060e082015181600201600c6101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160020160106101000a81548163ffffffff021916908363ffffffff1602179055506101208201518160020160146101000a81548163ffffffff021916908363ffffffff1602179055506101408201518160020160186101000a81548163ffffffff021916908363ffffffff1602179055509050508061012001516013819055508061014001516014819055506000601060020160109054906101000a900463ffffffff16905061326f613c52565b601280547fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff1670010000000000000000000000000000000063ffffffff9384160217808255600192600c916132d69185916c01000000000000000000000000900416615b12565b92506101000a81548163ffffffff021916908363ffffffff16021790555061332046306010600201600c9054906101000a900463ffffffff1663ffffffff168f8f8f8f8f8f6140d0565b600e819055507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0581600e546010600201600c9054906101000a900463ffffffff168f8f8f8f8f8f60405161337c99989796959493929190615948565b60405180910390a1505050505050505050505050565b61339a613e28565b6111e28161417a565b600060606000806000806133b561364e565b6133bd610a7d565b91939550919395565b3660008037600080366000845af43d6000803e8080156133e5573d6000f35b3d6000fd5b600061226a825490565b60006134008383614270565b9392505050565b6000806000836060015162ffffff1690506000808263ffffffff161190506000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561348e57600080fd5b505afa1580156134a2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906134c69190615327565b50945090925050506000811315806134dd57508142105b806134fe57508280156134fe57506134f58242615c79565b8463ffffffff16105b1561350d576013549550613511565b8095505b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561357757600080fd5b505afa15801561358b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135af9190615327565b50945090925050506000811315806135c657508142105b806135e757508280156135e757506135de8242615c79565b8463ffffffff16105b156135f65760145494506135fa565b8094505b50505050915091565b60008061361486896000015161429a565b905060008061362f8a8a63ffffffff16858a8a60018b6142de565b909250905061363e8183615b56565b93505050505b9695505050505050565b3215610aa6576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600f5460009081906f01000000000000000000000000000000900460ff16156136db576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600f80547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff166f010000000000000000000000000000001790555a90506000634585e33b60e01b846040516024016137339190615625565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506137ab856000015163ffffffff168660600151836146bd565b92505a6137b89083615c79565b915050600f80547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff16905590939092509050565b6138176040518060800160405280600081526020016000815260200160608152602001606081525090565b6000806000808580602001905181019061383191906151ea565b93509350935093508051825114613874576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051608081018252948552602085019390935291830152606082015292915050565b60008260c0015163ffffffff16846000015163ffffffff1610156138e95760405185907f5aa44821f7938098502bff537fbbdc9aaaa2fa655c10740646fce27e54987a8990600090a25060006139e1565b602084015184516138ff9063ffffffff16614709565b146139375760405185907f561ff77e59394941a01a456497a9418dea82e2a39abb3ecebfb1cef7e0bfdc1390600090a25060006139e1565b61393f613c52565b836020015163ffffffff16116139825760405185907fd84831b6a3a7fbd333f42fe7f9104a139da6cca4cc1507aef4ddad79b31d017f90600090a25060006139e1565b816bffffffffffffffffffffffff168360a001516bffffffffffffffffffffffff1610156139dd5760405185907f7895fdfe292beab0842d5beccd078e85296b9e17a30eaee4c261a2696b84eb9690600090a25060006139e1565b5060015b949350505050565b600087876040516139fb929190615540565b604051908190038120613a12918b9060200161560b565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201208383019092526000808452908301819052909250906000805b88811015613be957600185878360208110613a7e57613a7e615e03565b613a8b91901a601b615b31565b8c8c85818110613a9d57613a9d615e03565b905060200201358b8b86818110613ab657613ab6615e03565b9050602002013560405160008152602001604052604051613af3949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015613b15573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526009602090815290849020838501909452925460ff8082161515808552610100909204169383019390935290955093509050613bc3576040517f0f4c073700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826020015160080260ff166001901b840193508080613be190615d3d565b915050613a61565b50827e01010101010101010101010101010101010101010101010101010101010101841614613c44576040517fc103be2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050565b600060017f00000000000000000000000000000000000000000000000000000000000000006002811115613c8857613c88615dd4565b1415613d1257606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b158015613cd557600080fd5b505afa158015613ce9573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613d0d9190614fff565b905090565b504390565b6000613d23838361429a565b90508084101561340057509192915050565b600080613d508887608001518860a0015188888860016142de565b90925090506000613d618284615b56565b600089815260046020526040902060010180549192508291600c90613da59084906c0100000000000000000000000090046bffffffffffffffffffffffff16615c90565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008a815260046020526040812060010180548594509092613dee91859116615b56565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610aa6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401611277565b73ffffffffffffffffffffffffffffffffffffffff831660009081526008602090815260408083208151608081018352905460ff80821615158352610100820416938201939093526bffffffffffffffffffffffff6201000084048116928201929092526e01000000000000000000000000000090920416606082018190528290613f349086615c90565b90506000613f428583615b91565b90508083604001818151613f569190615b56565b6bffffffffffffffffffffffff9081169091528716606085015250613f7b8582615c4e565b613f859083615c90565b60118054600090613fa59084906bffffffffffffffffffffffff16615b56565b825461010092830a6bffffffffffffffffffffffff81810219909216928216029190911790925573ffffffffffffffffffffffffffffffffffffffff999099166000908152600860209081526040918290208751815492890151938901516060909901517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009093169015157fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff161760ff909316909b02919091177fffffffffffff000000000000000000000000000000000000000000000000ffff1662010000878416027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff16176e010000000000000000000000000000919092160217909755509095945050505050565b6000808a8a8a8a8a8a8a8a8a6040516020016140f4999897969594939291906158a3565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179b9a5050505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff81163314156141fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401611277565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600082600001828154811061428757614287615e03565b9060005260206000200154905092915050565b60006142ad63ffffffff84166014615bbc565b6142b8836001615b31565b6142c79060ff16611d4c615bbc565b6142d49062011170615afa565b6134009190615afa565b6000806000896080015161ffff16876142f79190615bbc565b90508380156143055750803a105b1561430d57503a5b600060027f0000000000000000000000000000000000000000000000000000000000000000600281111561434357614343615dd4565b14156144c65760408051600081526020810190915285156143a257600036604051806080016040528060488152602001615ec06048913960405160200161438c93929190615550565b604051602081830303815290604052905061441e565b6012546143d2907801000000000000000000000000000000000000000000000000900463ffffffff166004615bf9565b63ffffffff1667ffffffffffffffff8111156143f0576143f0615e32565b6040519080825280601f01601f19166020018201604052801561441a576020820181803683370190505b5090505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273420000000000000000000000000000000000000f906349948e0e9061446e908490600401615625565b60206040518083038186803b15801561448657600080fd5b505afa15801561449a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906144be9190614fff565b915050614582565b60017f000000000000000000000000000000000000000000000000000000000000000060028111156144fa576144fa615dd4565b141561458257606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b15801561454757600080fd5b505afa15801561455b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061457f9190614fff565b90505b8461459e57808b6080015161ffff1661459b9190615bbc565b90505b6145ac61ffff871682615b7d565b9050600087826145bc8c8e615afa565b6145c69086615bbc565b6145d09190615afa565b6145e290670de0b6b3a7640000615bbc565b6145ec9190615b7d565b905060008c6040015163ffffffff1664e8d4a5100061460b9190615bbc565b898e6020015163ffffffff16858f886146249190615bbc565b61462e9190615afa565b61463c90633b9aca00615bbc565b6146469190615bbc565b6146509190615b7d565b61465a9190615afa565b90506b033b2e3c9fd0803ce80000006146738284615afa565b11156146ab576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909c909b509950505050505050505050565b60005a6113888110156146cf57600080fd5b6113888103905084604082048203116146e757600080fd5b50823b6146f357600080fd5b60008083516020850160008789f1949350505050565b600060017f0000000000000000000000000000000000000000000000000000000000000000600281111561473f5761473f615dd4565b1415614878576000606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561478e57600080fd5b505afa1580156147a2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906147c69190614fff565b905080831015806147e157506101006147df8483615c79565b115b156147ef5750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815260048101849052606490632b407a829060240160206040518083038186803b15801561484057600080fd5b505afa158015614854573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906134009190614fff565b504090565b919050565b50805460008255906000526020600020908101906111e2919061492a565b82805482825590600052602060002090810192821561491a579160200282015b8281111561491a57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9091161782556020909201916001909101906148c0565b5061492692915061492a565b5090565b5b80821115614926576000815560010161492b565b805161487d81615e71565b60008083601f84011261495c57600080fd5b50813567ffffffffffffffff81111561497457600080fd5b6020830191508360208260051b850101111561498f57600080fd5b9250929050565b600082601f8301126149a757600080fd5b813560206149bc6149b783615a6a565b615a1b565b80838252828201915082860187848660051b89010111156149dc57600080fd5b60005b85811015614a045781356149f281615e71565b845292840192908401906001016149df565b5090979650505050505050565b600082601f830112614a2257600080fd5b81516020614a326149b783615a6a565b80838252828201915082860187848660051b8901011115614a5257600080fd5b60005b85811015614a0457815167ffffffffffffffff80821115614a7557600080fd5b818a0191506060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0848e03011215614aad57600080fd5b614ab56159ce565b88840151614ac281615e93565b81526040848101518a830152918401519183831115614ae057600080fd5b82850194508d603f860112614af457600080fd5b898501519350614b066149b785615a8e565b92508383528d81858701011115614b1c57600080fd5b614b2b848b8501838801615cbd565b810191909152865250509284019290840190600101614a55565b60008083601f840112614b5757600080fd5b50813567ffffffffffffffff811115614b6f57600080fd5b60208301915083602082850101111561498f57600080fd5b600082601f830112614b9857600080fd5b8135614ba66149b782615a8e565b818152846020838601011115614bbb57600080fd5b816020850160208301376000918101602001919091529392505050565b805161ffff8116811461487d57600080fd5b805162ffffff8116811461487d57600080fd5b805161487d81615e93565b803567ffffffffffffffff8116811461487d57600080fd5b803560ff8116811461487d57600080fd5b805169ffffffffffffffffffff8116811461487d57600080fd5b805161487d81615ea5565b600060208284031215614c6857600080fd5b813561340081615e71565b60008060408385031215614c8657600080fd5b8235614c9181615e71565b91506020830135614ca181615e71565b809150509250929050565b60008060408385031215614cbf57600080fd5b8235614cca81615e71565b9150602083013560048110614ca157600080fd5b60008060008060608587031215614cf457600080fd5b8435614cff81615e71565b935060208501359250604085013567ffffffffffffffff811115614d2257600080fd5b614d2e87828801614b45565b95989497509550505050565b600080600080600080600060a0888a031215614d5557600080fd5b8735614d6081615e71565b96506020880135614d7081615e93565b95506040880135614d8081615e71565b9450606088013567ffffffffffffffff80821115614d9d57600080fd5b614da98b838c01614b45565b909650945060808a0135915080821115614dc257600080fd5b50614dcf8a828b01614b45565b989b979a50959850939692959293505050565b60008060208385031215614df557600080fd5b823567ffffffffffffffff811115614e0c57600080fd5b614e188582860161494a565b90969095509350505050565b60008060008060008060c08789031215614e3d57600080fd5b863567ffffffffffffffff80821115614e5557600080fd5b614e618a838b01614996565b97506020890135915080821115614e7757600080fd5b614e838a838b01614996565b9650614e9160408a01614c20565b95506060890135915080821115614ea757600080fd5b614eb38a838b01614b87565b9450614ec160808a01614c08565b935060a0890135915080821115614ed757600080fd5b50614ee489828a01614b87565b9150509295509295509295565b60008060008060008060008060e0898b031215614f0d57600080fd5b606089018a811115614f1e57600080fd5b8998503567ffffffffffffffff80821115614f3857600080fd5b614f448c838d01614b45565b909950975060808b0135915080821115614f5d57600080fd5b614f698c838d0161494a565b909750955060a08b0135915080821115614f8257600080fd5b50614f8f8b828c0161494a565b999c989b50969995989497949560c00135949350505050565b600080600060408486031215614fbd57600080fd5b833567ffffffffffffffff811115614fd457600080fd5b614fe08682870161494a565b9094509250506020840135614ff481615e71565b809150509250925092565b60006020828403121561501157600080fd5b5051919050565b6000806020838503121561502b57600080fd5b823567ffffffffffffffff81111561504257600080fd5b614e1885828601614b45565b60006101a0828403121561506157600080fd5b6150696159f7565b61507283614bfd565b815261508060208401614bfd565b602082015261509160408401614bfd565b60408201526150a260608401614bea565b60608201526150b360808401614bd8565b60808201526150c460a08401614c4b565b60a08201526150d560c08401614bfd565b60c08201526150e660e08401614bfd565b60e08201526101006150f9818501614bfd565b908201526101208381015190820152610140808401519082015261016061512181850161493f565b9082015261018061513384820161493f565b908201529392505050565b60006020828403121561515057600080fd5b5035919050565b6000806040838503121561516a57600080fd5b823591506020830135614ca181615e71565b60008060006040848603121561519157600080fd5b83359250602084013567ffffffffffffffff8111156151af57600080fd5b6151bb86828701614b45565b9497909650939450505050565b600080604083850312156151db57600080fd5b50508035926020909101359150565b6000806000806080858703121561520057600080fd5b845193506020808601519350604086015167ffffffffffffffff8082111561522757600080fd5b818801915088601f83011261523b57600080fd5b81516152496149b782615a6a565b8082825285820191508585018c878560051b880101111561526957600080fd5b600095505b8386101561528c57805183526001959095019491860191860161526e565b5060608b015190975094505050808311156152a657600080fd5b50506152b487828801614a11565b91505092959194509250565b600080604083850312156152d357600080fd5b823591506020830135614ca181615e93565b600080604083850312156152f857600080fd5b823591506020830135614ca181615ea5565b60006020828403121561531c57600080fd5b813561340081615e93565b600080600080600060a0868803121561533f57600080fd5b61534886614c31565b945060208601519350604086015192506060860151915061536b60808701614c31565b90509295509295909350565b600081518084526020808501945080840160005b838110156153bd57815173ffffffffffffffffffffffffffffffffffffffff168752958201959082019060010161538b565b509495945050505050565b600081518084526153e0816020860160208601615cbd565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b805163ffffffff1682526020810151615433602084018263ffffffff169052565b50604081015161544b604084018263ffffffff169052565b506060810151615462606084018262ffffff169052565b506080810151615478608084018261ffff169052565b5060a081015161549860a08401826bffffffffffffffffffffffff169052565b5060c08101516154b060c084018263ffffffff169052565b5060e08101516154c860e084018263ffffffff169052565b506101008181015163ffffffff8116848301525050610120818101519083015261014080820151908301526101608082015173ffffffffffffffffffffffffffffffffffffffff81168285015250506101808181015173ffffffffffffffffffffffffffffffffffffffff8116848301525b50505050565b8183823760009101908152919050565b82848237600083820160008152835161556d818360208801615cbd565b0195945050505050565b6020808252825182820181905260009190848201906040850190845b818110156155af57835183529284019291840191600101615593565b50909695505050505050565b861515815260c0602082015260006155d660c08301886153c8565b9050600786106155e8576155e8615dd4565b8560408301528460608301528360808301528260a0830152979650505050505050565b828152608081016060836020840137600081529392505050565b60208152600061340060208301846153c8565b602081016004831061564c5761564c615dd4565b91905290565b6020810161564c83615e61565b855163ffffffff1681526000610340602088015161568d60208501826bffffffffffffffffffffffff169052565b506040880151604084015260608801516156b760608501826bffffffffffffffffffffffff169052565b506080880151608084015260a08801516156d960a085018263ffffffff169052565b5060c08801516156f160c085018263ffffffff169052565b5060e088015160e0840152610100808901516157148286018263ffffffff169052565b505061012088810151151590840152615731610140840188615412565b806102e084015261574481840187615377565b90508281036103008401526157598186615377565b91505061364461032083018460ff169052565b6020815261579360208201835173ffffffffffffffffffffffffffffffffffffffff169052565b600060208301516157ac604084018263ffffffff169052565b5060408301516101408060608501526157c96101608501836153c8565b915060608501516157ea60808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e0850151610100615856818701836bffffffffffffffffffffffff169052565b860151905061012061586b8682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00183870152905061364483826153c8565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b1660408501528160608501526158ea8285018b615377565b915083820360808501526158fe828a615377565b915060ff881660a085015283820360c085015261591b82886153c8565b90861660e0850152838103610100850152905061593881856153c8565b9c9b505050505050505050505050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526159788184018a615377565b9050828103608084015261598c8189615377565b905060ff871660a084015282810360c08401526159a981876153c8565b905067ffffffffffffffff851660e084015282810361010084015261593881856153c8565b6040516060810167ffffffffffffffff811182821017156159f1576159f1615e32565b60405290565b6040516101a0810167ffffffffffffffff811182821017156159f1576159f1615e32565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715615a6257615a62615e32565b604052919050565b600067ffffffffffffffff821115615a8457615a84615e32565b5060051b60200190565b600067ffffffffffffffff821115615aa857615aa8615e32565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600061ffff808316818516808303821115615af157615af1615d76565b01949350505050565b60008219821115615b0d57615b0d615d76565b500190565b600063ffffffff808316818516808303821115615af157615af1615d76565b600060ff821660ff84168060ff03821115615b4e57615b4e615d76565b019392505050565b60006bffffffffffffffffffffffff808316818516808303821115615af157615af1615d76565b600082615b8c57615b8c615da5565b500490565b60006bffffffffffffffffffffffff80841680615bb057615bb0615da5565b92169190910492915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615615bf457615bf4615d76565b500290565b600063ffffffff80831681851681830481118215151615615c1c57615c1c615d76565b02949350505050565b600060ff821660ff84168160ff0481118215151615615c4657615c46615d76565b029392505050565b60006bffffffffffffffffffffffff80831681851681830481118215151615615c1c57615c1c615d76565b600082821015615c8b57615c8b615d76565b500390565b60006bffffffffffffffffffffffff83811690831681811015615cb557615cb5615d76565b039392505050565b60005b83811015615cd8578181015183820152602001615cc0565b8381111561553a5750506000910152565b600181811c90821680615cfd57607f821691505b60208210811415615d37577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615d6f57615d6f615d76565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600381106111e2576111e2615dd4565b73ffffffffffffffffffffffffffffffffffffffff811681146111e257600080fd5b63ffffffff811681146111e257600080fd5b6bffffffffffffffffffffffff811681146111e257600080fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var KeeperRegistryABI = KeeperRegistryMetaData.ABI + +var KeeperRegistryBin = KeeperRegistryMetaData.Bin + +func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, keeperRegistryLogic common.Address) (common.Address, *types.Transaction, *KeeperRegistry, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryBin), backend, keeperRegistryLogic) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistry{address: address, abi: *parsed, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +type KeeperRegistry struct { + address common.Address + abi abi.ABI + KeeperRegistryCaller + KeeperRegistryTransactor + KeeperRegistryFilterer +} + +type KeeperRegistryCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrySession struct { + Contract *KeeperRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCallerSession struct { + Contract *KeeperRegistryCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryTransactorSession struct { + Contract *KeeperRegistryTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryRaw struct { + Contract *KeeperRegistry +} + +type KeeperRegistryCallerRaw struct { + Contract *KeeperRegistryCaller +} + +type KeeperRegistryTransactorRaw struct { + Contract *KeeperRegistryTransactor +} + +func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { + contract, err := bindKeeperRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCaller{contract: contract}, nil +} + +func NewKeeperRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryTransactor, error) { + contract, err := bindKeeperRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryTransactor{contract: contract}, nil +} + +func NewKeeperRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryFilterer, error) { + contract, err := bindKeeperRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryFilterer{contract: contract}, nil +} + +func bindKeeperRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistry *KeeperRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.KeeperRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getActiveUpkeepIDs", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetActiveUpkeepIDs(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _KeeperRegistry.Contract.GetActiveUpkeepIDs(&_KeeperRegistry.CallOpts, startIndex, maxCount) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getFastGasFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetFastGasFeedAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetFastGasFeedAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetFastGasFeedAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetKeeperRegistryLogicAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getKeeperRegistryLogicAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetKeeperRegistryLogicAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetKeeperRegistryLogicAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetKeeperRegistryLogicAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetKeeperRegistryLogicAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetLinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getLinkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetLinkAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetLinkAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetLinkAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getLinkNativeFeedAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetLinkNativeFeedAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetLinkNativeFeedAddress() (common.Address, error) { + return _KeeperRegistry.Contract.GetLinkNativeFeedAddress(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMaxPaymentForGas", gasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMaxPaymentForGas(gasLimit uint32) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMaxPaymentForGas(gasLimit uint32) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMaxPaymentForGas(&_KeeperRegistry.CallOpts, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMinBalanceForUpkeep", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMinBalanceForUpkeep(id *big.Int) (*big.Int, error) { + return _KeeperRegistry.Contract.GetMinBalanceForUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetMode(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getMode") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetMode() (uint8, error) { + return _KeeperRegistry.Contract.GetMode(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetMode() (uint8, error) { + return _KeeperRegistry.Contract.GetMode(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getPeerRegistryMigrationPermission", peer) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetPeerRegistryMigrationPermission(peer common.Address) (uint8, error) { + return _KeeperRegistry.Contract.GetPeerRegistryMigrationPermission(&_KeeperRegistry.CallOpts, peer) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getSignerInfo", query) + + outstruct := new(GetSignerInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _KeeperRegistry.Contract.GetSignerInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetSignerInfo(query common.Address) (GetSignerInfo, + + error) { + return _KeeperRegistry.Contract.GetSignerInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetState(opts *bind.CallOpts) (GetState, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getState") + + outstruct := new(GetState) + if err != nil { + return *outstruct, err + } + + outstruct.State = *abi.ConvertType(out[0], new(State)).(*State) + outstruct.Config = *abi.ConvertType(out[1], new(OnchainConfig)).(*OnchainConfig) + outstruct.Signers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + outstruct.Transmitters = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + outstruct.F = *abi.ConvertType(out[4], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetState() (GetState, + + error) { + return _KeeperRegistry.Contract.GetState(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getTransmitterInfo", query) + + outstruct := new(GetTransmitterInfo) + if err != nil { + return *outstruct, err + } + + outstruct.Active = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Index = *abi.ConvertType(out[1], new(uint8)).(*uint8) + outstruct.Balance = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.LastCollected = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.Payee = *abi.ConvertType(out[4], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _KeeperRegistry.Contract.GetTransmitterInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetTransmitterInfo(query common.Address) (GetTransmitterInfo, + + error) { + return _KeeperRegistry.Contract.GetTransmitterInfo(&_KeeperRegistry.CallOpts, query) +} + +func (_KeeperRegistry *KeeperRegistryCaller) GetUpkeep(opts *bind.CallOpts, id *big.Int) (UpkeepInfo, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "getUpkeep", id) + + if err != nil { + return *new(UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(UpkeepInfo)).(*UpkeepInfo) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) GetUpkeep(id *big.Int) (UpkeepInfo, error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) GetUpkeep(id *big.Int) (UpkeepInfo, error) { + return _KeeperRegistry.Contract.GetUpkeep(&_KeeperRegistry.CallOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _KeeperRegistry.Contract.LatestConfigDetails(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _KeeperRegistry.Contract.LatestConfigDetails(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _KeeperRegistry.Contract.LatestConfigDigestAndEpoch(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _KeeperRegistry.Contract.LatestConfigDigestAndEpoch(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "upkeepTranscoderVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) UpkeepTranscoderVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepTranscoderVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) UpkeepVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "upkeepVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) UpkeepVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) UpkeepVersion() (uint8, error) { + return _KeeperRegistry.Contract.UpkeepVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, transmitter) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptPayeeship(&_KeeperRegistry.TransactOpts, transmitter) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptUpkeepAdmin", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptUpkeepAdmin(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptUpkeepAdmin(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptUpkeepAdmin(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "addFunds", id, amount) +} + +func (_KeeperRegistry *KeeperRegistrySession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AddFunds(id *big.Int, amount *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.AddFunds(&_KeeperRegistry.TransactOpts, id, amount) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "cancelUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CancelUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CancelUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) CheckUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "checkUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) CheckUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) CheckUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.CheckUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "migrateUpkeeps", ids, destination) +} + +func (_KeeperRegistry *KeeperRegistrySession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) MigrateUpkeeps(ids []*big.Int, destination common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.MigrateUpkeeps(&_KeeperRegistry.TransactOpts, ids, destination) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Pause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "pauseUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) PauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.PauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "receiveUpkeeps", encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistrySession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) ReceiveUpkeeps(encodedUpkeeps []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.ReceiveUpkeeps(&_KeeperRegistry.TransactOpts, encodedUpkeeps) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "recoverFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RecoverFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.RecoverFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "registerUpkeep", target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistrySession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) RegisterUpkeep(target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.RegisterUpkeep(&_KeeperRegistry.TransactOpts, target, gasLimit, admin, checkData, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setPayees", payees) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPayees(&_KeeperRegistry.TransactOpts, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetPayees(payees []common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPayees(&_KeeperRegistry.TransactOpts, payees) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setPeerRegistryMigrationPermission", peer, permission) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetPeerRegistryMigrationPermission(peer common.Address, permission uint8) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetPeerRegistryMigrationPermission(&_KeeperRegistry.TransactOpts, peer, permission) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setUpkeepGasLimit", id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetUpkeepGasLimit(id *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepGasLimit(&_KeeperRegistry.TransactOpts, id, gasLimit) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setUpkeepOffchainConfig", id, config) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepOffchainConfig(&_KeeperRegistry.TransactOpts, id, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetUpkeepOffchainConfig(id *big.Int, config []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetUpkeepOffchainConfig(&_KeeperRegistry.TransactOpts, id, config) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "simulatePerformUpkeep", id, performData) +} + +func (_KeeperRegistry *KeeperRegistrySession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SimulatePerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SimulatePerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferPayeeship(&_KeeperRegistry.TransactOpts, transmitter, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferUpkeepAdmin", id, proposed) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferUpkeepAdmin(&_KeeperRegistry.TransactOpts, id, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferUpkeepAdmin(id *big.Int, proposed common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferUpkeepAdmin(&_KeeperRegistry.TransactOpts, id, proposed) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transmit", reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistrySession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Transmit(&_KeeperRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Transmit(&_KeeperRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpause") +} + +func (_KeeperRegistry *KeeperRegistrySession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Unpause(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "unpauseUpkeep", id) +} + +func (_KeeperRegistry *KeeperRegistrySession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UnpauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) UnpauseUpkeep(id *big.Int) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UnpauseUpkeep(&_KeeperRegistry.TransactOpts, id) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "updateCheckData", id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistrySession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UpdateCheckData(&_KeeperRegistry.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) UpdateCheckData(id *big.Int, newCheckData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.UpdateCheckData(&_KeeperRegistry.TransactOpts, id, newCheckData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawFunds", id, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawFunds(id *big.Int, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawFunds(&_KeeperRegistry.TransactOpts, id, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawOwnerFunds") +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawOwnerFunds() (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawOwnerFunds(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "withdrawPayment", from, to) +} + +func (_KeeperRegistry *KeeperRegistrySession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) WithdrawPayment(from common.Address, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.WithdrawPayment(&_KeeperRegistry.TransactOpts, from, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.RawTransact(opts, calldata) +} + +func (_KeeperRegistry *KeeperRegistrySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.RawTransact(opts, nil) +} + +func (_KeeperRegistry *KeeperRegistrySession) Receive() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Receive(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Receive() (*types.Transaction, error) { + return _KeeperRegistry.Contract.Receive(&_KeeperRegistry.TransactOpts) +} + +type KeeperRegistryCancelledUpkeepReportIterator struct { + Event *KeeperRegistryCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCancelledUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCancelledUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCancelledUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryCancelledUpkeepReport, error) { + event := new(KeeperRegistryCancelledUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryConfigSetIterator struct { + Event *KeeperRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryConfigSetIterator{contract: _KeeperRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) { + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsAddedIterator struct { + Event *KeeperRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsAddedIterator{contract: _KeeperRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) { + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsWithdrawnIterator struct { + Event *KeeperRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) { + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryInsufficientFundsUpkeepReportIterator struct { + Event *KeeperRegistryInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryInsufficientFundsUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryInsufficientFundsUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryInsufficientFundsUpkeepReport, error) { + event := new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryOwnerFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) { + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferredIterator struct { + Event *KeeperRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferredIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) { + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPausedIterator struct { + Event *KeeperRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryPausedIterator{contract: _KeeperRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaused(log types.Log) (*KeeperRegistryPaused, error) { + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeesUpdatedIterator struct { + Event *KeeperRegistryPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryPayeesUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryPayeesUpdatedIterator{contract: _KeeperRegistry.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeesUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeesUpdated(log types.Log) (*KeeperRegistryPayeesUpdated, error) { + event := new(KeeperRegistryPayeesUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) { + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferredIterator struct { + Event *KeeperRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferredIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) { + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPaymentWithdrawnIterator struct { + Event *KeeperRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPaymentWithdrawnIterator{contract: _KeeperRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) { + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryReorgedUpkeepReportIterator struct { + Event *KeeperRegistryReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryReorgedUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryReorgedUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryReorgedUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryReorgedUpkeepReport, error) { + event := new(KeeperRegistryReorgedUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryStaleUpkeepReportIterator struct { + Event *KeeperRegistryStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryStaleUpkeepReport struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryStaleUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryStaleUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryStaleUpkeepReport, error) { + event := new(KeeperRegistryStaleUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryTransmittedIterator struct { + Event *KeeperRegistryTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryTransmittedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterTransmitted(opts *bind.FilterOpts) (*KeeperRegistryTransmittedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &KeeperRegistryTransmittedIterator{contract: _KeeperRegistry.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *KeeperRegistryTransmitted) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryTransmitted) + if err := _KeeperRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseTransmitted(log types.Log) (*KeeperRegistryTransmitted, error) { + event := new(KeeperRegistryTransmitted) + if err := _KeeperRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUnpausedIterator struct { + Event *KeeperRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryUnpausedIterator{contract: _KeeperRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) { + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferredIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) { + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCanceledIterator struct { + Event *KeeperRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCanceledIterator{contract: _KeeperRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) { + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCheckDataUpdatedIterator struct { + Event *KeeperRegistryUpkeepCheckDataUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCheckDataUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCheckDataUpdated struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataUpdatedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCheckDataUpdatedIterator{contract: _KeeperRegistry.contract, event: "UpkeepCheckDataUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCheckDataUpdated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCheckDataUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryUpkeepCheckDataUpdated, error) { + event := new(KeeperRegistryUpkeepCheckDataUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepGasLimitSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) { + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepMigratedIterator struct { + Event *KeeperRegistryUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepMigratedIterator{contract: _KeeperRegistry.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) { + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepOffchainConfigSetIterator struct { + Event *KeeperRegistryUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepOffchainConfigSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepOffchainConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryUpkeepOffchainConfigSet, error) { + event := new(KeeperRegistryUpkeepOffchainConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPausedIterator struct { + Event *KeeperRegistryUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) { + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPerformedIterator struct { + Event *KeeperRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + CheckBlockNumber uint32 + GasUsed *big.Int + GasOverhead *big.Int + TotalPayment *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPerformedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) { + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepReceivedIterator struct { + Event *KeeperRegistryUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepReceivedIterator{contract: _KeeperRegistry.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) { + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepRegisteredIterator struct { + Event *KeeperRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepRegistered struct { + Id *big.Int + ExecuteGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepRegisteredIterator{contract: _KeeperRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) { + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepUnpausedIterator struct { + Event *KeeperRegistryUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepUnpausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) { + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSignerInfo struct { + Active bool + Index uint8 +} +type GetState struct { + State State + Config OnchainConfig + Signers []common.Address + Transmitters []common.Address + F uint8 +} +type GetTransmitterInfo struct { + Active bool + Index uint8 + Balance *big.Int + LastCollected *big.Int + Payee common.Address +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistry.abi.Events["CancelledUpkeepReport"].ID: + return _KeeperRegistry.ParseCancelledUpkeepReport(log) + case _KeeperRegistry.abi.Events["ConfigSet"].ID: + return _KeeperRegistry.ParseConfigSet(log) + case _KeeperRegistry.abi.Events["FundsAdded"].ID: + return _KeeperRegistry.ParseFundsAdded(log) + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistry.ParseFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _KeeperRegistry.ParseInsufficientFundsUpkeepReport(log) + case _KeeperRegistry.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistry.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistry.ParseOwnershipTransferRequested(log) + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistry.ParseOwnershipTransferred(log) + case _KeeperRegistry.abi.Events["Paused"].ID: + return _KeeperRegistry.ParsePaused(log) + case _KeeperRegistry.abi.Events["PayeesUpdated"].ID: + return _KeeperRegistry.ParsePayeesUpdated(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistry.ParsePayeeshipTransferRequested(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistry.ParsePayeeshipTransferred(log) + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistry.ParsePaymentWithdrawn(log) + case _KeeperRegistry.abi.Events["ReorgedUpkeepReport"].ID: + return _KeeperRegistry.ParseReorgedUpkeepReport(log) + case _KeeperRegistry.abi.Events["StaleUpkeepReport"].ID: + return _KeeperRegistry.ParseStaleUpkeepReport(log) + case _KeeperRegistry.abi.Events["Transmitted"].ID: + return _KeeperRegistry.ParseTransmitted(log) + case _KeeperRegistry.abi.Events["Unpaused"].ID: + return _KeeperRegistry.ParseUnpaused(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferred(log) + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistry.ParseUpkeepCanceled(log) + case _KeeperRegistry.abi.Events["UpkeepCheckDataUpdated"].ID: + return _KeeperRegistry.ParseUpkeepCheckDataUpdated(log) + case _KeeperRegistry.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistry.ParseUpkeepGasLimitSet(log) + case _KeeperRegistry.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistry.ParseUpkeepMigrated(log) + case _KeeperRegistry.abi.Events["UpkeepOffchainConfigSet"].ID: + return _KeeperRegistry.ParseUpkeepOffchainConfigSet(log) + case _KeeperRegistry.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistry.ParseUpkeepPaused(log) + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistry.ParseUpkeepPerformed(log) + case _KeeperRegistry.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistry.ParseUpkeepReceived(log) + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistry.ParseUpkeepRegistered(log) + case _KeeperRegistry.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistry.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xd84831b6a3a7fbd333f42fe7f9104a139da6cca4cc1507aef4ddad79b31d017f") +} + +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x7895fdfe292beab0842d5beccd078e85296b9e17a30eaee4c261a2696b84eb96") +} + +func (KeeperRegistryOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x561ff77e59394941a01a456497a9418dea82e2a39abb3ecebfb1cef7e0bfdc13") +} + +func (KeeperRegistryStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x5aa44821f7938098502bff537fbbdc9aaaa2fa655c10740646fce27e54987a89") +} + +func (KeeperRegistryTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (KeeperRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepCheckDataUpdated) Topic() common.Hash { + return common.HexToHash("0x7b778136e5211932b51a145badd01959415e79e051a933604b3d323f862dcabf") +} + +func (KeeperRegistryUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (KeeperRegistryUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0x29233ba1d7b302b8fe230ad0b81423aba5371b2a6f6b821228212385ee6a4420") +} + +func (KeeperRegistryUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistry *KeeperRegistry) Address() common.Address { + return _KeeperRegistry.address +} + +type KeeperRegistryInterface interface { + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetFastGasFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetKeeperRegistryLogicAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkAddress(opts *bind.CallOpts) (common.Address, error) + + GetLinkNativeFeedAddress(opts *bind.CallOpts) (common.Address, error) + + GetMaxPaymentForGas(opts *bind.CallOpts, gasLimit uint32) (*big.Int, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetMode(opts *bind.CallOpts) (uint8, error) + + GetPeerRegistryMigrationPermission(opts *bind.CallOpts, peer common.Address) (uint8, error) + + GetSignerInfo(opts *bind.CallOpts, query common.Address) (GetSignerInfo, + + error) + + GetState(opts *bind.CallOpts) (GetState, + + error) + + GetTransmitterInfo(opts *bind.CallOpts, query common.Address) (GetTransmitterInfo, + + error) + + GetUpkeep(opts *bind.CallOpts, id *big.Int) (UpkeepInfo, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + UpkeepTranscoderVersion(opts *bind.CallOpts) (uint8, error) + + UpkeepVersion(opts *bind.CallOpts) (uint8, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + AcceptUpkeepAdmin(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) + + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + MigrateUpkeeps(opts *bind.TransactOpts, ids []*big.Int, destination common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + PauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + ReceiveUpkeeps(opts *bind.TransactOpts, encodedUpkeeps []byte) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, payees []common.Address) (*types.Transaction, error) + + SetPeerRegistryMigrationPermission(opts *bind.TransactOpts, peer common.Address, permission uint8) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, id *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepOffchainConfig(opts *bind.TransactOpts, id *big.Int, config []byte) (*types.Transaction, error) + + SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + TransferUpkeepAdmin(opts *bind.TransactOpts, id *big.Int, proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + UnpauseUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + + UpdateCheckData(opts *bind.TransactOpts, id *big.Int, newCheckData []byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + + WithdrawOwnerFunds(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryCancelledUpkeepReport, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*KeeperRegistryPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryStaleUpkeepReport, error) + + FilterTransmitted(opts *bind.FilterOpts) (*KeeperRegistryTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *KeeperRegistryTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*KeeperRegistryTransmitted, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) + + FilterUpkeepCheckDataUpdated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataUpdatedIterator, error) + + WatchUpkeepCheckDataUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataUpdated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataUpdated(log types.Log) (*KeeperRegistryUpkeepCheckDataUpdated, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keeper_registry_wrapper_2_1/keeper_registry_wrapper_2_1.go b/core/gethwrappers/generated/keeper_registry_wrapper_2_1/keeper_registry_wrapper_2_1.go new file mode 100644 index 00000000..5830da8e --- /dev/null +++ b/core/gethwrappers/generated/keeper_registry_wrapper_2_1/keeper_registry_wrapper_2_1.go @@ -0,0 +1,5169 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keeper_registry_wrapper_2_1 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21OnchainConfig struct { + PaymentPremiumPPB uint32 + FlatFeeMicroLink uint32 + CheckGasLimit uint32 + StalenessSeconds *big.Int + GasCeilingMultiplier uint16 + MinUpkeepSpend *big.Int + MaxPerformGas uint32 + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + MaxRevertDataSize uint32 + FallbackGasPrice *big.Int + FallbackLinkPrice *big.Int + Transcoder common.Address + Registrars []common.Address + UpkeepPrivilegeManager common.Address +} + +var KeeperRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractKeeperRegistryLogicB2_1\",\"name\":\"logicA\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ArrayHasNoEntries\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotCancel\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CheckDataExceedsLimit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateEntry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"GasLimitOutsideRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDataLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRecipient\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrigger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTriggerType\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxCheckDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MaxPerformDataSizeCanOnlyIncrease\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MigrationNotPermitted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPLIToken\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwnerOrRegistrar\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedAdmin\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByProposedPayee\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByUpkeepPrivilegeManager\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyPausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlySimulatedBackend\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUnpausedUpkeep\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ParameterLengthError\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentGreaterThanAllPLI\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RegistryPaused\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"reason\",\"type\":\"bytes\"}],\"name\":\"TargetCheckReverted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TranscoderNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepCancelled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotCanceled\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UpkeepNotNeeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ValueNotChanged\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"AdminPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"CancelledUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"dedupKey\",\"type\":\"bytes32\"}],\"name\":\"DedupKeyAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"FundsAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"FundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"InsufficientFundsUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"OwnerFundsWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"PayeesUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"}],\"name\":\"PaymentWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"ReorgedUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"StaleUpkeepReport\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"UpkeepAdminTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"atBlockHeight\",\"type\":\"uint64\"}],\"name\":\"UpkeepCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"newCheckData\",\"type\":\"bytes\"}],\"name\":\"UpkeepCheckDataSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"gasLimit\",\"type\":\"uint96\"}],\"name\":\"UpkeepGasLimitSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"remainingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"}],\"name\":\"UpkeepMigrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepOffchainConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepPaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasOverhead\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"trigger\",\"type\":\"bytes\"}],\"name\":\"UpkeepPerformed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"privilegeConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepPrivilegeConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startingBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"importedFrom\",\"type\":\"address\"}],\"name\":\"UpkeepReceived\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"UpkeepRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"}],\"name\":\"UpkeepTriggerConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"UpkeepUnpaused\",\"type\":\"event\"},{\"stateMutability\":\"nonpayable\",\"type\":\"fallback\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fallbackTo\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfigBytes\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"paymentPremiumPPB\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeMicroLink\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"checkGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"stalenessSeconds\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"gasCeilingMultiplier\",\"type\":\"uint16\"},{\"internalType\":\"uint96\",\"name\":\"minUpkeepSpend\",\"type\":\"uint96\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformGas\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCheckDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxPerformDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxRevertDataSize\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"fallbackGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fallbackLinkPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"transcoder\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"registrars\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"upkeepPrivilegeManager\",\"type\":\"address\"}],\"internalType\":\"structKeeperRegistryBase2_1.OnchainConfig\",\"name\":\"onchainConfig\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfigTypeSafe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"simulatePerformUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"gasUsed\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"rawReport\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6101406040523480156200001257600080fd5b50604051620054d0380380620054d08339810160408190526200003591620003df565b80816001600160a01b0316634b4fd03b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000075573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200009b919062000406565b826001600160a01b031663ca30e6036040518163ffffffff1660e01b8152600401602060405180830381865afa158015620000da573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001009190620003df565b836001600160a01b031663b10b673c6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156200013f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001659190620003df565b846001600160a01b0316636709d0e56040518163ffffffff1660e01b8152600401602060405180830381865afa158015620001a4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001ca9190620003df565b856001600160a01b0316635425d8ac6040518163ffffffff1660e01b8152600401602060405180830381865afa15801562000209573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200022f9190620003df565b3380600081620002865760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620002b957620002b9816200031b565b505050846002811115620002d157620002d162000429565b60e0816002811115620002e857620002e862000429565b9052506001600160a01b0393841660805291831660a052821660c0528116610100529190911661012052506200043f9050565b336001600160a01b03821603620003755760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200027d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620003dc57600080fd5b50565b600060208284031215620003f257600080fd5b8151620003ff81620003c6565b9392505050565b6000602082840312156200041957600080fd5b815160038110620003ff57600080fd5b634e487b7160e01b600052602160045260246000fd5b60805160a05160c05160e051610100516101205161502f620004a16000396000818160d6015261016f01526000505060008181612eb701528181613220015281816133b30152613a4901526000505060005050600061043b015261502f6000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c8063aed2e92911610081578063e29b753c1161005b578063e29b753c146102e8578063e3d0e712146102fb578063f2fde38b1461030e576100d4565b8063aed2e92914610262578063afcb95d71461028c578063b1dc65a4146102d5576100d4565b806381ff7048116100b257806381ff7048146101bc5780638da5cb5b14610231578063a4c0ed361461024f576100d4565b8063181f5a771461011b578063349e8cca1461016d57806379ba5097146101b4575b7f00000000000000000000000000000000000000000000000000000000000000003660008037600080366000845af43d6000803e808015610114573d6000f35b3d6000fd5b005b6101576040518060400160405280601481526020017f4b6565706572526567697374727920322e312e3000000000000000000000000081525081565b6040516101649190613cc8565b60405180910390f35b7f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610164565b610119610321565b61020e60145460115463ffffffff780100000000000000000000000000000000000000000000000083048116937c01000000000000000000000000000000000000000000000000000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610164565b60005473ffffffffffffffffffffffffffffffffffffffff1661018f565b61011961025d366004613d51565b610423565b610275610270366004613dad565b61063f565b604080519215158352602083019190915201610164565b601154601254604080516000815260208101939093527c010000000000000000000000000000000000000000000000000000000090910463ffffffff1690820152606001610164565b6101196102e3366004613e3e565b6107a7565b6101196102f63660046142b9565b6112ea565b610119610309366004614386565b6121e6565b61011961031c366004614415565b61220f565b60015473ffffffffffffffffffffffffffffffffffffffff1633146103a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610492576040517fc8bad78d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602081146104cc576040517fdfe9309000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006104da82840184614432565b60008181526004602052604090205490915065010000000000900463ffffffff90811614610534576040517f9c0083a200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526004602052604090206001015461056f9085906c0100000000000000000000000090046bffffffffffffffffffffffff1661447a565b600082815260046020526040902060010180546bffffffffffffffffffffffff929092166c01000000000000000000000000027fffffffffffffffff000000000000000000000000ffffffffffffffffffffffff9092169190911790556018546105da90859061449f565b6018556040516bffffffffffffffffffffffff8516815273ffffffffffffffffffffffffffffffffffffffff86169082907fafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa7348915062039060200160405180910390a35050505050565b60008061064a612223565b6012546e010000000000000000000000000000900460ff1615610699576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600085815260046020908152604091829020825160e081018452815460ff811615158252610100810463ffffffff908116838601819052650100000000008304821684880152690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff16606084018190526001909401546bffffffffffffffffffffffff80821660808601526c0100000000000000000000000082041660a0850152780100000000000000000000000000000000000000000000000090041660c08301528451601f8901859004850281018501909552878552909361079893899089908190840183828082843760009201919091525061225d92505050565b9093509150505b935093915050565b60005a604080516101208101825260125460ff808216835261010080830463ffffffff90811660208601526501000000000084048116958501959095526901000000000000000000830462ffffff1660608501526c01000000000000000000000000830461ffff1660808501526e0100000000000000000000000000008304821615801560a08601526f010000000000000000000000000000008404909216151560c085015270010000000000000000000000000000000083046bffffffffffffffffffffffff1660e08501527c0100000000000000000000000000000000000000000000000000000000909204909316908201529192506108d5576040517f24522f3400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600b602052604090205460ff1661091e576040517f1099ed7500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6011548a351461095a576040517fdfdcf8e700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80516109679060016144e1565b60ff16861415806109785750858414155b156109af576040517f0244f71a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109bf8a8a8a8a8a8a8a8a612468565b60006109cb8a8a6126d1565b9050600081604001515167ffffffffffffffff8111156109ed576109ed613ef5565b604051908082528060200260200182016040528015610ab157816020015b604080516101e0810182526000610100820181815261012083018290526101408301829052610160830182905261018083018290526101a083018290526101c0830182905282526020808301829052928201819052606082018190526080820181905260a0820181905260c0820181905260e082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181610a0b5790505b5090506000805b836040015151811015610efa576004600085604001518381518110610adf57610adf6144b2565b6020908102919091018101518252818101929092526040908101600020815160e081018352815460ff811615158252610100810463ffffffff90811695830195909552650100000000008104851693820193909352690100000000000000000090920473ffffffffffffffffffffffffffffffffffffffff166060830152600101546bffffffffffffffffffffffff80821660808401526c0100000000000000000000000082041660a08301527801000000000000000000000000000000000000000000000000900490911660c08201528351849083908110610bc457610bc46144b2565b602002602001015160000181905250610bf984604001518281518110610bec57610bec6144b2565b602002602001015161278c565b838281518110610c0b57610c0b6144b2565b6020026020010151608001906001811115610c2857610c286144fa565b90816001811115610c3b57610c3b6144fa565b81525050610caf85848381518110610c5557610c556144b2565b60200260200101516080015186606001518481518110610c7757610c776144b2565b60200260200101518760a001518581518110610c9557610c956144b2565b602002602001015151886000015189602001516001612837565b838281518110610cc157610cc16144b2565b6020026020010151604001906bffffffffffffffffffffffff1690816bffffffffffffffffffffffff1681525050610d4d84604001518281518110610d0857610d086144b2565b602002602001015185608001518381518110610d2657610d266144b2565b6020026020010151858481518110610d4057610d406144b2565b6020026020010151612882565b848381518110610d5f57610d5f6144b2565b6020026020010151602001858481518110610d7c57610d7c6144b2565b602002602001015160e0018281525082151515158152505050828181518110610da757610da76144b2565b60200260200101516020015115610dca57610dc3600183614529565b9150610dcf565b610ee8565b610e35838281518110610de457610de46144b2565b6020026020010151600001516060015185606001518381518110610e0a57610e0a6144b2565b60200260200101518660a001518481518110610e2857610e286144b2565b602002602001015161225d565b848381518110610e4757610e476144b2565b6020026020010151606001858481518110610e6457610e646144b2565b602002602001015160a0018281525082151515158152505050828181518110610e8f57610e8f6144b2565b602002602001015160a0015186610ea69190614544565b9550610ee884604001518281518110610ec157610ec16144b2565b6020026020010151848381518110610edb57610edb6144b2565b6020026020010151612a01565b80610ef281614557565b915050610ab8565b508061ffff16600003610f115750505050506112e0565b8351610f1e9060016144e1565b610f2d9060ff1661044c61458f565b616b6c610f3b8d601061458f565b5a610f469089614544565b610f50919061449f565b610f5a919061449f565b610f64919061449f565b9450611b58610f7761ffff8316876145fb565b610f81919061449f565b945060008060008060005b87604001515181101561118257868181518110610fab57610fab6144b2565b60200260200101516020015115611170576110078a888381518110610fd257610fd26144b2565b6020026020010151608001518a60a001518481518110610ff457610ff46144b2565b6020026020010151518c60000151612b13565b878281518110611019576110196144b2565b602002602001015160c00181815250506110758989604001518381518110611043576110436144b2565b602002602001015189848151811061105d5761105d6144b2565b60200260200101518b600001518c602001518b612b33565b9093509150611084828561447a565b9350611090838661447a565b94508681815181106110a4576110a46144b2565b6020026020010151606001511515886040015182815181106110c8576110c86144b2565b60200260200101517fad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b84866110fd919061447a565b8a858151811061110f5761110f6144b2565b602002602001015160a001518b868151811061112d5761112d6144b2565b602002602001015160c001518d60800151878151811061114f5761114f6144b2565b6020026020010151604051611167949392919061460f565b60405180910390a35b8061117a81614557565b915050610f8c565b5050336000908152600b6020526040902080548492506002906111ba9084906201000090046bffffffffffffffffffffffff1661447a565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080601260000160108282829054906101000a90046bffffffffffffffffffffffff16611214919061447a565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060008f600160038110611257576112576144b2565b602002013560001c9050600060088264ffffffffff16901c905087610100015163ffffffff168163ffffffff1611156112d657601280547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c010000000000000000000000000000000000000000000000000000000063ffffffff8416021790555b5050505050505050505b5050505050505050565b6112f2612c26565b601f8651111561132e576040517f25d0209c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8360ff1660000361136b576040517fe77dba5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8451865114158061138a575061138284600361464c565b60ff16865111155b156113c1576040517f1d2d1c5800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601254600e547001000000000000000000000000000000009091046bffffffffffffffffffffffff169060005b816bffffffffffffffffffffffff1681101561145657611443600e828154811061141a5761141a6144b2565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff168484612ca7565b508061144e81614557565b9150506113ee565b5060008060005b836bffffffffffffffffffffffff1681101561155f57600d8181548110611486576114866144b2565b600091825260209091200154600e805473ffffffffffffffffffffffffffffffffffffffff909216945090829081106114c1576114c16144b2565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff8681168452600c8352604080852080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001690559116808452600b90925290912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905591508061155781614557565b91505061145d565b5061156c600d6000613b9d565b611578600e6000613b9d565b604080516080810182526000808252602082018190529181018290526060810182905290805b8c518110156119e157600c60008e83815181106115bd576115bd6144b2565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528101919091526040016000205460ff1615611628576040517f77cea0fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168d8281518110611652576116526144b2565b602002602001015173ffffffffffffffffffffffffffffffffffffffff16036116a7576040517f815e1d6400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405180604001604052806001151581526020018260ff16815250600c60008f84815181106116d8576116d86144b2565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528181019290925260400160002082518154939092015160ff16610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909316929092171790558b518c9082908110611780576117806144b2565b60200260200101519150600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16036117f0576040517f58a70a0a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff82166000908152600b60209081526040918290208251608081018452905460ff80821615801584526101008304909116938301939093526bffffffffffffffffffffffff6201000082048116948301949094526e010000000000000000000000000000900490921660608301529093506118ab576040517f6a7281ad00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001835260ff80821660208086019182526bffffffffffffffffffffffff808b166060880190815273ffffffffffffffffffffffffffffffffffffffff87166000908152600b909352604092839020885181549551948a0151925184166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff939094166201000002929092167fffffffffffff000000000000000000000000000000000000000000000000ffff94909616610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090951694909417179190911692909217919091179055806119d981614557565b91505061159e565b50508a516119f79150600d9060208d0190613bbb565b508851611a0b90600e9060208c0190613bbb565b506040518061012001604052808960ff168152602001886000015163ffffffff168152602001886020015163ffffffff168152602001886060015162ffffff168152602001886080015161ffff1681526020016012600001600e9054906101000a900460ff16151581526020016012600001600f9054906101000a900460ff1615158152602001856bffffffffffffffffffffffff168152602001600063ffffffff16815250601260008201518160000160006101000a81548160ff021916908360ff16021790555060208201518160000160016101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160056101000a81548163ffffffff021916908363ffffffff16021790555060608201518160000160096101000a81548162ffffff021916908362ffffff160217905550608082015181600001600c6101000a81548161ffff021916908361ffff16021790555060a082015181600001600e6101000a81548160ff02191690831515021790555060c082015181600001600f6101000a81548160ff02191690831515021790555060e08201518160000160106101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555061010082015181600001601c6101000a81548163ffffffff021916908363ffffffff1602179055509050506040518061018001604052808860a001516bffffffffffffffffffffffff16815260200188610180015173ffffffffffffffffffffffffffffffffffffffff168152602001601360010160009054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff168152602001886040015163ffffffff1681526020018860c0015163ffffffff168152602001601360010160149054906101000a900463ffffffff1663ffffffff168152602001601360010160189054906101000a900463ffffffff1663ffffffff1681526020016013600101601c9054906101000a900463ffffffff1663ffffffff1681526020018860e0015163ffffffff16815260200188610100015163ffffffff16815260200188610120015163ffffffff168152602001886101c0015173ffffffffffffffffffffffffffffffffffffffff16815250601360008201518160000160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550602082015181600001600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060408201518160010160006101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550606082015181600101600c6101000a81548163ffffffff021916908363ffffffff16021790555060808201518160010160106101000a81548163ffffffff021916908363ffffffff16021790555060a08201518160010160146101000a81548163ffffffff021916908363ffffffff16021790555060c08201518160010160186101000a81548163ffffffff021916908363ffffffff16021790555060e082015181600101601c6101000a81548163ffffffff021916908363ffffffff1602179055506101008201518160020160006101000a81548163ffffffff021916908363ffffffff1602179055506101208201518160020160046101000a81548163ffffffff021916908363ffffffff1602179055506101408201518160020160086101000a81548163ffffffff021916908363ffffffff16021790555061016082015181600201600c6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555090505086610140015160168190555086610160015160178190555060006013600101601c9054906101000a900463ffffffff169050611fcd612eb1565b601480547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c010000000000000000000000000000000000000000000000000000000063ffffffff93841602178082556001926018916120489185917801000000000000000000000000000000000000000000000000900416614675565b92506101000a81548163ffffffff021916908363ffffffff16021790555060008860405160200161207991906146e3565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00181529190526014549091506120e290469030907801000000000000000000000000000000000000000000000000900463ffffffff168f8f8f878f8f612f66565b60115560005b6120f26009613010565b8110156121225761210f61210760098361301a565b600990613026565b508061211a81614557565b9150506120e8565b5060005b896101a0015151811015612179576121668a6101a00151828151811061214e5761214e6144b2565b6020026020010151600961304890919063ffffffff16565b508061217181614557565b915050612126565b507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0582601154601360010160189054906101000a900463ffffffff168f8f8f878f8f6040516121d099989796959493929190614847565b60405180910390a1505050505050505050505050565b612207868686868060200190518101906122009190614978565b86866112ea565b505050505050565b612217612c26565b6122208161306a565b50565b321561225b576040517fb60ac5db00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60125460009081906f01000000000000000000000000000000900460ff16156122b2576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601280547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff166f010000000000000000000000000000001790556040517f4585e33b000000000000000000000000000000000000000000000000000000009061231f908590602401613cc8565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931790925290517f79188d1600000000000000000000000000000000000000000000000000000000815290935073ffffffffffffffffffffffffffffffffffffffff8616906379188d16906123f29087908790600401614ad2565b60408051808303816000875af1158015612410573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906124349190614aeb565b601280547fffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffff16905590969095509350505050565b6000878760405161247a929190614b1e565b604051908190038120612491918b90602001614b2e565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815282825280516020918201208383019092526000808452908301819052909250906000805b88811015612668576001858783602081106124fd576124fd6144b2565b61250a91901a601b6144e1565b8c8c8581811061251c5761251c6144b2565b905060200201358b8b86818110612535576125356144b2565b9050602002013560405160008152602001604052604051612572949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015612594573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff81166000908152600c602090815290849020838501909452925460ff8082161515808552610100909204169383019390935290955093509050612642576040517f0f4c073700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826020015160080260ff166001901b84019350808061266090614557565b9150506124e0565b50827e010101010101010101010101010101010101010101010101010101010101018416146126c3576040517fc103be2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b505050505050505050505050565b61270a6040518060c001604052806000815260200160008152602001606081526020016060815260200160608152602001606081525090565b600061271883850185614c1f565b604081015151606082015151919250908114158061273b57508082608001515114155b8061274b5750808260a001515114155b15612782576040517fb55ac75400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5090505b92915050565b6000818160045b600f811015612819577fff0000000000000000000000000000000000000000000000000000000000000082168382602081106127d1576127d16144b2565b1a60f81b7effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19161461280757506000949350505050565b8061281181614557565b915050612793565b5081600f1a600181111561282f5761282f6144fa565b949350505050565b60008061284988878b6000015161315f565b90506000806128648b8a63ffffffff16858a8a60018b6131eb565b9092509050612873818361447a565b9b9a5050505050505050505050565b60008080808460800151600181111561289d5761289d6144fa565b036128c1576128ad868686613644565b6128bc5760009250905061079f565b612938565b6001846080015160018111156128d9576128d96144fa565b036129065760006128eb878787613738565b9250905080612900575060009250905061079f565b50612938565b6040517ff2b2d41200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612940612eb1565b84516040015163ffffffff161161299457857fc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636866040516129819190613cc8565b60405180910390a260009250905061079f565b83604001516bffffffffffffffffffffffff16846000015160a001516bffffffffffffffffffffffff1610156129f457857f377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02866040516129819190613cc8565b6001969095509350505050565b600081608001516001811115612a1957612a196144fa565b03612a8b57612a26612eb1565b6000838152600460205260409020600101805463ffffffff929092167801000000000000000000000000000000000000000000000000027fffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff9092169190911790555050565b600181608001516001811115612aa357612aa36144fa565b03612b0f5760e08101805160009081526008602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055915191517fa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f29190a25b5050565b6000612b2084848461315f565b90508085101561282f5750929392505050565b600080612b4e888760a001518860c0015188888860016131eb565b90925090506000612b5f828461447a565b600089815260046020526040902060010180549192508291600c90612ba39084906c0100000000000000000000000090046bffffffffffffffffffffffff16614d0c565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008a815260046020526040812060010180548594509092612bec9185911661447a565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461225b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161039e565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600b602090815260408083208151608081018352905460ff80821615801584526101008304909116948301949094526bffffffffffffffffffffffff6201000082048116938301939093526e0100000000000000000000000000009004909116606082015290612ea3576000816060015185612d3f9190614d0c565b90506000612d4d8583614d31565b90508083604001818151612d61919061447a565b6bffffffffffffffffffffffff16905250612d7c8582614d5c565b83606001818151612d8d919061447a565b6bffffffffffffffffffffffff90811690915273ffffffffffffffffffffffffffffffffffffffff89166000908152600b602090815260409182902087518154928901519389015160608a015186166e010000000000000000000000000000027fffffffffffff000000000000000000000000ffffffffffffffffffffffffffff919096166201000002167fffffffffffff000000000000000000000000000000000000000000000000ffff60ff95909516610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff921515929092167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909416939093171792909216179190911790555050505b6040015190505b9392505050565b600060017f00000000000000000000000000000000000000000000000000000000000000006002811115612ee757612ee76144fa565b03612f6157606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015612f38573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612f5c9190614d90565b905090565b504390565b6000808a8a8a8a8a8a8a8a8a604051602001612f8a99989796959493929190614da9565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179b9a5050505050505050505050565b6000612786825490565b6000612eaa83836138d0565b6000612eaa8373ffffffffffffffffffffffffffffffffffffffff84166138fa565b6000612eaa8373ffffffffffffffffffffffffffffffffffffffff84166139f4565b3373ffffffffffffffffffffffffffffffffffffffff8216036130e9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161039e565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60008080856001811115613175576131756144fa565b03613184575062015f906131a3565b6001856001811115613198576131986144fa565b0361290657506201adb05b6131b463ffffffff8516601461458f565b6131bf8460016144e1565b6131ce9060ff16611d4c61458f565b6131d8908361449f565b6131e2919061449f565b95945050505050565b6000806000896080015161ffff1687613204919061458f565b90508380156132125750803a105b1561321a57503a5b600060027f00000000000000000000000000000000000000000000000000000000000000006002811115613250576132506144fa565b036133af5760408051600081526020810190915285156132ae57600036604051806080016040528060488152602001614fdb6048913960405160200161329893929190614e3e565b6040516020818303038152906040529050613316565b6015546132ca90640100000000900463ffffffff166004614e65565b63ffffffff1667ffffffffffffffff8111156132e8576132e8613ef5565b6040519080825280601f01601f191660200182016040528015613312576020820181803683370190505b5090505b6040517f49948e0e00000000000000000000000000000000000000000000000000000000815273420000000000000000000000000000000000000f906349948e0e90613366908490600401613cc8565b602060405180830381865afa158015613383573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906133a79190614d90565b915050613509565b60017f000000000000000000000000000000000000000000000000000000000000000060028111156133e3576133e36144fa565b0361350957841561346557606c73ffffffffffffffffffffffffffffffffffffffff1663c6f7de0e6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561343a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061345e9190614d90565b9050613509565b6000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c060405180830381865afa1580156134b3573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906134d79190614e88565b50506015549294506134fa93505050640100000000900463ffffffff168261458f565b61350590601061458f565b9150505b8461352557808b6080015161ffff16613522919061458f565b90505b61353361ffff8716826145fb565b9050600087826135438c8e61449f565b61354d908661458f565b613557919061449f565b61356990670de0b6b3a764000061458f565b61357391906145fb565b905060008c6040015163ffffffff1664e8d4a51000613592919061458f565b898e6020015163ffffffff16858f886135ab919061458f565b6135b5919061449f565b6135c390633b9aca0061458f565b6135cd919061458f565b6135d791906145fb565b6135e1919061449f565b90506b033b2e3c9fd0803ce80000006135fa828461449f565b1115613632576040517f2ad7547a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b909c909b509950505050505050505050565b6000808380602001905181019061365b9190614ed2565b835160c00151815191925063ffffffff908116911610156136b857847f405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8856040516136a69190613cc8565b60405180910390a26000915050612eaa565b6020810151158015906136df5750602081015181516136dc9063ffffffff16613a43565b14155b806136f857506136ed612eb1565b815163ffffffff1610155b1561372d57847f6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301856040516136a69190613cc8565b506001949350505050565b6000806000848060200190518101906137519190614f2a565b90506000868260000151836020015184604001516040516020016137b394939291909384526020840192909252604083015260e01b7fffffffff0000000000000000000000000000000000000000000000000000000016606082015260640190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052805160209091012060808301519091501580159061381557508160800151613812836060015163ffffffff16613a43565b14155b806138315750613823612eb1565b826060015163ffffffff1610155b1561387b57867f6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301876040516138669190613cc8565b60405180910390a260009350915061079f9050565b60008181526008602052604090205460ff16156138c257867f405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8876040516138669190613cc8565b600197909650945050505050565b60008260000182815481106138e7576138e76144b2565b9060005260206000200154905092915050565b600081815260018301602052604081205480156139e357600061391e600183614544565b855490915060009061393290600190614544565b9050818114613997576000866000018281548110613952576139526144b2565b9060005260206000200154905080876000018481548110613975576139756144b2565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806139a8576139a8614fab565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050612786565b6000915050612786565b5092915050565b6000818152600183016020526040812054613a3b57508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155612786565b506000612786565b600060017f00000000000000000000000000000000000000000000000000000000000000006002811115613a7957613a796144fa565b03613b93576000606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613acc573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613af09190614d90565b90508083101580613b0b5750610100613b098483614544565b115b15613b195750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815260048101849052606490632b407a8290602401602060405180830381865afa158015613b6f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612eaa9190614d90565b504090565b919050565b50805460008255906000526020600020908101906122209190613c45565b828054828255906000526020600020908101928215613c35579160200282015b82811115613c3557825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190613bdb565b50613c41929150613c45565b5090565b5b80821115613c415760008155600101613c46565b60005b83811015613c75578181015183820152602001613c5d565b50506000910152565b60008151808452613c96816020860160208601613c5a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000612eaa6020830184613c7e565b73ffffffffffffffffffffffffffffffffffffffff8116811461222057600080fd5b8035613b9881613cdb565b60008083601f840112613d1a57600080fd5b50813567ffffffffffffffff811115613d3257600080fd5b602083019150836020828501011115613d4a57600080fd5b9250929050565b60008060008060608587031215613d6757600080fd5b8435613d7281613cdb565b935060208501359250604085013567ffffffffffffffff811115613d9557600080fd5b613da187828801613d08565b95989497509550505050565b600080600060408486031215613dc257600080fd5b83359250602084013567ffffffffffffffff811115613de057600080fd5b613dec86828701613d08565b9497909650939450505050565b60008083601f840112613e0b57600080fd5b50813567ffffffffffffffff811115613e2357600080fd5b6020830191508360208260051b8501011115613d4a57600080fd5b60008060008060008060008060e0898b031215613e5a57600080fd5b606089018a811115613e6b57600080fd5b8998503567ffffffffffffffff80821115613e8557600080fd5b613e918c838d01613d08565b909950975060808b0135915080821115613eaa57600080fd5b613eb68c838d01613df9565b909750955060a08b0135915080821115613ecf57600080fd5b50613edc8b828c01613df9565b999c989b50969995989497949560c00135949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040516101e0810167ffffffffffffffff81118282101715613f4857613f48613ef5565b60405290565b60405160c0810167ffffffffffffffff81118282101715613f4857613f48613ef5565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715613fb857613fb8613ef5565b604052919050565b600067ffffffffffffffff821115613fda57613fda613ef5565b5060051b60200190565b600082601f830112613ff557600080fd5b8135602061400a61400583613fc0565b613f71565b82815260059290921b8401810191818101908684111561402957600080fd5b8286015b8481101561404d57803561404081613cdb565b835291830191830161402d565b509695505050505050565b803560ff81168114613b9857600080fd5b63ffffffff8116811461222057600080fd5b8035613b9881614069565b62ffffff8116811461222057600080fd5b8035613b9881614086565b61ffff8116811461222057600080fd5b8035613b98816140a2565b6bffffffffffffffffffffffff8116811461222057600080fd5b8035613b98816140bd565b60006101e082840312156140f557600080fd5b6140fd613f24565b90506141088261407b565b81526141166020830161407b565b60208201526141276040830161407b565b604082015261413860608301614097565b6060820152614149608083016140b2565b608082015261415a60a083016140d7565b60a082015261416b60c0830161407b565b60c082015261417c60e0830161407b565b60e082015261010061418f81840161407b565b908201526101206141a183820161407b565b90820152610140828101359082015261016080830135908201526101806141c9818401613cfd565b908201526101a08281013567ffffffffffffffff8111156141e957600080fd5b6141f585828601613fe4565b8284015250506101c0614209818401613cfd565b9082015292915050565b803567ffffffffffffffff81168114613b9857600080fd5b600082601f83011261423c57600080fd5b813567ffffffffffffffff81111561425657614256613ef5565b61428760207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601613f71565b81815284602083860101111561429c57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060c087890312156142d257600080fd5b863567ffffffffffffffff808211156142ea57600080fd5b6142f68a838b01613fe4565b9750602089013591508082111561430c57600080fd5b6143188a838b01613fe4565b965061432660408a01614058565b9550606089013591508082111561433c57600080fd5b6143488a838b016140e2565b945061435660808a01614213565b935060a089013591508082111561436c57600080fd5b5061437989828a0161422b565b9150509295509295509295565b60008060008060008060c0878903121561439f57600080fd5b863567ffffffffffffffff808211156143b757600080fd5b6143c38a838b01613fe4565b975060208901359150808211156143d957600080fd5b6143e58a838b01613fe4565b96506143f360408a01614058565b9550606089013591508082111561440957600080fd5b6143488a838b0161422b565b60006020828403121561442757600080fd5b8135612eaa81613cdb565b60006020828403121561444457600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6bffffffffffffffffffffffff8181168382160190808211156139ed576139ed61444b565b808201808211156127865761278661444b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60ff81811683821601908111156127865761278661444b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b61ffff8181168382160190808211156139ed576139ed61444b565b818103818111156127865761278661444b565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036145885761458861444b565b5060010190565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156145c7576145c761444b565b500290565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60008261460a5761460a6145cc565b500490565b6bffffffffffffffffffffffff851681528360208201528260408201526080606082015260006146426080830184613c7e565b9695505050505050565b600060ff821660ff84168160ff048111821515161561466d5761466d61444b565b029392505050565b63ffffffff8181168382160190808211156139ed576139ed61444b565b600081518084526020808501945080840160005b838110156146d857815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016146a6565b509495945050505050565b602081526146fa60208201835163ffffffff169052565b60006020830151614713604084018263ffffffff169052565b50604083015163ffffffff8116606084015250606083015162ffffff8116608084015250608083015161ffff811660a08401525060a08301516bffffffffffffffffffffffff811660c08401525060c083015163ffffffff811660e08401525060e083015161010061478c8185018363ffffffff169052565b84015190506101206147a58482018363ffffffff169052565b84015190506101406147be8482018363ffffffff169052565b840151610160848101919091528401516101808085019190915284015190506101a06148018185018373ffffffffffffffffffffffffffffffffffffffff169052565b808501519150506101e06101c08181860152614821610200860184614692565b95015173ffffffffffffffffffffffffffffffffffffffff169301929092525090919050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526148778184018a614692565b9050828103608084015261488b8189614692565b905060ff871660a084015282810360c08401526148a88187613c7e565b905067ffffffffffffffff851660e08401528281036101008401526148cd8185613c7e565b9c9b505050505050505050505050565b8051613b9881614069565b8051613b9881614086565b8051613b98816140a2565b8051613b98816140bd565b8051613b9881613cdb565b600082601f83011261492557600080fd5b8151602061493561400583613fc0565b82815260059290921b8401810191818101908684111561495457600080fd5b8286015b8481101561404d57805161496b81613cdb565b8352918301918301614958565b60006020828403121561498a57600080fd5b815167ffffffffffffffff808211156149a257600080fd5b908301906101e082860312156149b757600080fd5b6149bf613f24565b6149c8836148dd565b81526149d6602084016148dd565b60208201526149e7604084016148dd565b60408201526149f8606084016148e8565b6060820152614a09608084016148f3565b6080820152614a1a60a084016148fe565b60a0820152614a2b60c084016148dd565b60c0820152614a3c60e084016148dd565b60e0820152610100614a4f8185016148dd565b90820152610120614a618482016148dd565b9082015261014083810151908201526101608084015190820152610180614a89818501614909565b908201526101a08381015183811115614aa157600080fd5b614aad88828701614914565b8284015250506101c09150614ac3828401614909565b91810191909152949350505050565b82815260406020820152600061282f6040830184613c7e565b60008060408385031215614afe57600080fd5b82518015158114614b0e57600080fd5b6020939093015192949293505050565b8183823760009101908152919050565b8281526080810160608360208401379392505050565b600082601f830112614b5557600080fd5b81356020614b6561400583613fc0565b82815260059290921b84018101918181019086841115614b8457600080fd5b8286015b8481101561404d5780358352918301918301614b88565b600082601f830112614bb057600080fd5b81356020614bc061400583613fc0565b82815260059290921b84018101918181019086841115614bdf57600080fd5b8286015b8481101561404d57803567ffffffffffffffff811115614c035760008081fd5b614c118986838b010161422b565b845250918301918301614be3565b600060208284031215614c3157600080fd5b813567ffffffffffffffff80821115614c4957600080fd5b9083019060c08286031215614c5d57600080fd5b614c65613f4e565b8235815260208301356020820152604083013582811115614c8557600080fd5b614c9187828601614b44565b604083015250606083013582811115614ca957600080fd5b614cb587828601614b44565b606083015250608083013582811115614ccd57600080fd5b614cd987828601614b9f565b60808301525060a083013582811115614cf157600080fd5b614cfd87828601614b9f565b60a08301525095945050505050565b6bffffffffffffffffffffffff8281168282160390808211156139ed576139ed61444b565b60006bffffffffffffffffffffffff80841680614d5057614d506145cc565b92169190910492915050565b60006bffffffffffffffffffffffff80831681851681830481118215151615614d8757614d8761444b565b02949350505050565b600060208284031215614da257600080fd5b5051919050565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152614df08285018b614692565b91508382036080850152614e04828a614692565b915060ff881660a085015283820360c0850152614e218288613c7e565b90861660e085015283810361010085015290506148cd8185613c7e565b828482376000838201600081528351614e5b818360208801613c5a565b0195945050505050565b600063ffffffff80831681851681830481118215151615614d8757614d8761444b565b60008060008060008060c08789031215614ea157600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b600060408284031215614ee457600080fd5b6040516040810181811067ffffffffffffffff82111715614f0757614f07613ef5565b6040528251614f1581614069565b81526020928301519281019290925250919050565b600060a08284031215614f3c57600080fd5b60405160a0810181811067ffffffffffffffff82111715614f5f57614f5f613ef5565b806040525082518152602083015160208201526040830151614f8081614069565b60408201526060830151614f9381614069565b60608201526080928301519281019290925250919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000810000a", +} + +var KeeperRegistryABI = KeeperRegistryMetaData.ABI + +var KeeperRegistryBin = KeeperRegistryMetaData.Bin + +func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, logicA common.Address) (common.Address, *types.Transaction, *KeeperRegistry, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeeperRegistryBin), backend, logicA) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeeperRegistry{address: address, abi: *parsed, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +type KeeperRegistry struct { + address common.Address + abi abi.ABI + KeeperRegistryCaller + KeeperRegistryTransactor + KeeperRegistryFilterer +} + +type KeeperRegistryCaller struct { + contract *bind.BoundContract +} + +type KeeperRegistryTransactor struct { + contract *bind.BoundContract +} + +type KeeperRegistryFilterer struct { + contract *bind.BoundContract +} + +type KeeperRegistrySession struct { + Contract *KeeperRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeeperRegistryCallerSession struct { + Contract *KeeperRegistryCaller + CallOpts bind.CallOpts +} + +type KeeperRegistryTransactorSession struct { + Contract *KeeperRegistryTransactor + TransactOpts bind.TransactOpts +} + +type KeeperRegistryRaw struct { + Contract *KeeperRegistry +} + +type KeeperRegistryCallerRaw struct { + Contract *KeeperRegistryCaller +} + +type KeeperRegistryTransactorRaw struct { + Contract *KeeperRegistryTransactor +} + +func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindKeeperRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil +} + +func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { + contract, err := bindKeeperRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryCaller{contract: contract}, nil +} + +func NewKeeperRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*KeeperRegistryTransactor, error) { + contract, err := bindKeeperRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeeperRegistryTransactor{contract: contract}, nil +} + +func NewKeeperRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*KeeperRegistryFilterer, error) { + contract, err := bindKeeperRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeeperRegistryFilterer{contract: contract}, nil +} + +func bindKeeperRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeeperRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeeperRegistry *KeeperRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.KeeperRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.KeeperRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeeperRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transfer(opts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeeperRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_KeeperRegistry *KeeperRegistryCaller) FallbackTo(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "fallbackTo") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) FallbackTo() (common.Address, error) { + return _KeeperRegistry.Contract.FallbackTo(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) FallbackTo() (common.Address, error) { + return _KeeperRegistry.Contract.FallbackTo(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _KeeperRegistry.Contract.LatestConfigDetails(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _KeeperRegistry.Contract.LatestConfigDetails(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _KeeperRegistry.Contract.LatestConfigDigestAndEpoch(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _KeeperRegistry.Contract.LatestConfigDigestAndEpoch(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) Owner() (common.Address, error) { + return _KeeperRegistry.Contract.Owner(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeeperRegistry.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeeperRegistry *KeeperRegistrySession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryCallerSession) TypeAndVersion() (string, error) { + return _KeeperRegistry.Contract.TypeAndVersion(&_KeeperRegistry.CallOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_KeeperRegistry *KeeperRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeeperRegistry.Contract.AcceptOwnership(&_KeeperRegistry.TransactOpts) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistrySession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.OnTokenTransfer(&_KeeperRegistry.TransactOpts, sender, amount, data) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfig(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfigBytes, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "setConfigTypeSafe", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistrySession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfigTypeSafe(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SetConfigTypeSafe(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SetConfigTypeSafe(&_KeeperRegistry.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "simulatePerformUpkeep", id, performData) +} + +func (_KeeperRegistry *KeeperRegistrySession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SimulatePerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) SimulatePerformUpkeep(id *big.Int, performData []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.SimulatePerformUpkeep(&_KeeperRegistry.TransactOpts, id, performData) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeeperRegistry *KeeperRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeeperRegistry.Contract.TransferOwnership(&_KeeperRegistry.TransactOpts, to) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.Transact(opts, "transmit", reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistrySession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Transmit(&_KeeperRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Transmit(reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Transmit(&_KeeperRegistry.TransactOpts, reportContext, rawReport, rs, ss, rawVs) +} + +func (_KeeperRegistry *KeeperRegistryTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.contract.RawTransact(opts, calldata) +} + +func (_KeeperRegistry *KeeperRegistrySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +func (_KeeperRegistry *KeeperRegistryTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _KeeperRegistry.Contract.Fallback(&_KeeperRegistry.TransactOpts, calldata) +} + +type KeeperRegistryAdminPrivilegeConfigSetIterator struct { + Event *KeeperRegistryAdminPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryAdminPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryAdminPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryAdminPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryAdminPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryAdminPrivilegeConfigSet struct { + Admin common.Address + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryAdminPrivilegeConfigSetIterator, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return &KeeperRegistryAdminPrivilegeConfigSetIterator{contract: _KeeperRegistry.contract, event: "AdminPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) { + + var adminRule []interface{} + for _, adminItem := range admin { + adminRule = append(adminRule, adminItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "AdminPrivilegeConfigSet", adminRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryAdminPrivilegeConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryAdminPrivilegeConfigSet, error) { + event := new(KeeperRegistryAdminPrivilegeConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "AdminPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryCancelledUpkeepReportIterator struct { + Event *KeeperRegistryCancelledUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryCancelledUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryCancelledUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryCancelledUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryCancelledUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryCancelledUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "CancelledUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "CancelledUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryCancelledUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryCancelledUpkeepReport, error) { + event := new(KeeperRegistryCancelledUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "CancelledUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryConfigSetIterator struct { + Event *KeeperRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &KeeperRegistryConfigSetIterator{contract: _KeeperRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) { + event := new(KeeperRegistryConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryDedupKeyAddedIterator struct { + Event *KeeperRegistryDedupKeyAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryDedupKeyAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryDedupKeyAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryDedupKeyAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryDedupKeyAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryDedupKeyAdded struct { + DedupKey [32]byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryDedupKeyAddedIterator, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return &KeeperRegistryDedupKeyAddedIterator{contract: _KeeperRegistry.contract, event: "DedupKeyAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) { + + var dedupKeyRule []interface{} + for _, dedupKeyItem := range dedupKey { + dedupKeyRule = append(dedupKeyRule, dedupKeyItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "DedupKeyAdded", dedupKeyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryDedupKeyAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseDedupKeyAdded(log types.Log) (*KeeperRegistryDedupKeyAdded, error) { + event := new(KeeperRegistryDedupKeyAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "DedupKeyAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsAddedIterator struct { + Event *KeeperRegistryFundsAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsAddedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsAdded struct { + Id *big.Int + From common.Address + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsAddedIterator{contract: _KeeperRegistry.contract, event: "FundsAdded", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsAdded", idRule, fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) { + event := new(KeeperRegistryFundsAdded) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryFundsWithdrawnIterator struct { + Event *KeeperRegistryFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryFundsWithdrawn struct { + Id *big.Int + Amount *big.Int + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "FundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "FundsWithdrawn", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) { + event := new(KeeperRegistryFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "FundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryInsufficientFundsUpkeepReportIterator struct { + Event *KeeperRegistryInsufficientFundsUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryInsufficientFundsUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryInsufficientFundsUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryInsufficientFundsUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryInsufficientFundsUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "InsufficientFundsUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "InsufficientFundsUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryInsufficientFundsUpkeepReport, error) { + event := new(KeeperRegistryInsufficientFundsUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "InsufficientFundsUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnerFundsWithdrawnIterator struct { + Event *KeeperRegistryOwnerFundsWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnerFundsWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnerFundsWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnerFundsWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return &KeeperRegistryOwnerFundsWithdrawnIterator{contract: _KeeperRegistry.contract, event: "OwnerFundsWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnerFundsWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) { + event := new(KeeperRegistryOwnerFundsWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnerFundsWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferRequestedIterator struct { + Event *KeeperRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) { + event := new(KeeperRegistryOwnershipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryOwnershipTransferredIterator struct { + Event *KeeperRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryOwnershipTransferredIterator{contract: _KeeperRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) { + event := new(KeeperRegistryOwnershipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPausedIterator struct { + Event *KeeperRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &KeeperRegistryPausedIterator{contract: _KeeperRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaused(log types.Log) (*KeeperRegistryPaused, error) { + event := new(KeeperRegistryPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeesUpdatedIterator struct { + Event *KeeperRegistryPayeesUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeesUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeesUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeesUpdated struct { + Transmitters []common.Address + Payees []common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryPayeesUpdatedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return &KeeperRegistryPayeesUpdatedIterator{contract: _KeeperRegistry.contract, event: "PayeesUpdated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeesUpdated) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeesUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeesUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeesUpdated(log types.Log) (*KeeperRegistryPayeesUpdated, error) { + event := new(KeeperRegistryPayeesUpdated) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeesUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferRequestedIterator struct { + Event *KeeperRegistryPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferRequested struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) { + event := new(KeeperRegistryPayeeshipTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPayeeshipTransferredIterator struct { + Event *KeeperRegistryPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPayeeshipTransferred struct { + Transmitter common.Address + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPayeeshipTransferredIterator{contract: _KeeperRegistry.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) { + event := new(KeeperRegistryPayeeshipTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryPaymentWithdrawnIterator struct { + Event *KeeperRegistryPaymentWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryPaymentWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryPaymentWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryPaymentWithdrawn struct { + Transmitter common.Address + Amount *big.Int + To common.Address + Payee common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryPaymentWithdrawnIterator{contract: _KeeperRegistry.contract, event: "PaymentWithdrawn", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "PaymentWithdrawn", transmitterRule, amountRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) { + event := new(KeeperRegistryPaymentWithdrawn) + if err := _KeeperRegistry.contract.UnpackLog(event, "PaymentWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryReorgedUpkeepReportIterator struct { + Event *KeeperRegistryReorgedUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryReorgedUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryReorgedUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryReorgedUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryReorgedUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryReorgedUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "ReorgedUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "ReorgedUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryReorgedUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryReorgedUpkeepReport, error) { + event := new(KeeperRegistryReorgedUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "ReorgedUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryStaleUpkeepReportIterator struct { + Event *KeeperRegistryStaleUpkeepReport + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryStaleUpkeepReport) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryStaleUpkeepReportIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryStaleUpkeepReport struct { + Id *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryStaleUpkeepReportIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryStaleUpkeepReportIterator{contract: _KeeperRegistry.contract, event: "StaleUpkeepReport", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "StaleUpkeepReport", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryStaleUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryStaleUpkeepReport, error) { + event := new(KeeperRegistryStaleUpkeepReport) + if err := _KeeperRegistry.contract.UnpackLog(event, "StaleUpkeepReport", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryTransmittedIterator struct { + Event *KeeperRegistryTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryTransmittedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterTransmitted(opts *bind.FilterOpts) (*KeeperRegistryTransmittedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &KeeperRegistryTransmittedIterator{contract: _KeeperRegistry.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *KeeperRegistryTransmitted) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryTransmitted) + if err := _KeeperRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseTransmitted(log types.Log) (*KeeperRegistryTransmitted, error) { + event := new(KeeperRegistryTransmitted) + if err := _KeeperRegistry.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUnpausedIterator struct { + Event *KeeperRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) { + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &KeeperRegistryUnpausedIterator{contract: _KeeperRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) { + event := new(KeeperRegistryUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferRequestedIterator struct { + Event *KeeperRegistryUpkeepAdminTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferRequested struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferRequestedIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferRequested", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) { + event := new(KeeperRegistryUpkeepAdminTransferRequested) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepAdminTransferredIterator struct { + Event *KeeperRegistryUpkeepAdminTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepAdminTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepAdminTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepAdminTransferred struct { + Id *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepAdminTransferredIterator{contract: _KeeperRegistry.contract, event: "UpkeepAdminTransferred", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepAdminTransferred", idRule, fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) { + event := new(KeeperRegistryUpkeepAdminTransferred) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepAdminTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCanceledIterator struct { + Event *KeeperRegistryUpkeepCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCanceled struct { + Id *big.Int + AtBlockHeight uint64 + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCanceledIterator{contract: _KeeperRegistry.contract, event: "UpkeepCanceled", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var atBlockHeightRule []interface{} + for _, atBlockHeightItem := range atBlockHeight { + atBlockHeightRule = append(atBlockHeightRule, atBlockHeightItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCanceled", idRule, atBlockHeightRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) { + event := new(KeeperRegistryUpkeepCanceled) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepCheckDataSetIterator struct { + Event *KeeperRegistryUpkeepCheckDataSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepCheckDataSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepCheckDataSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepCheckDataSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepCheckDataSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepCheckDataSet struct { + Id *big.Int + NewCheckData []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepCheckDataSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepCheckDataSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepCheckDataSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepCheckDataSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryUpkeepCheckDataSet, error) { + event := new(KeeperRegistryUpkeepCheckDataSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepCheckDataSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepGasLimitSetIterator struct { + Event *KeeperRegistryUpkeepGasLimitSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepGasLimitSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepGasLimitSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepGasLimitSet struct { + Id *big.Int + GasLimit *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepGasLimitSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepGasLimitSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepGasLimitSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) { + event := new(KeeperRegistryUpkeepGasLimitSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepGasLimitSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepMigratedIterator struct { + Event *KeeperRegistryUpkeepMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepMigrated struct { + Id *big.Int + RemainingBalance *big.Int + Destination common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepMigratedIterator{contract: _KeeperRegistry.contract, event: "UpkeepMigrated", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepMigrated", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) { + event := new(KeeperRegistryUpkeepMigrated) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepMigrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepOffchainConfigSetIterator struct { + Event *KeeperRegistryUpkeepOffchainConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepOffchainConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepOffchainConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepOffchainConfigSet struct { + Id *big.Int + OffchainConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepOffchainConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepOffchainConfigSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepOffchainConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepOffchainConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepOffchainConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryUpkeepOffchainConfigSet, error) { + event := new(KeeperRegistryUpkeepOffchainConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepOffchainConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPausedIterator struct { + Event *KeeperRegistryUpkeepPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) { + event := new(KeeperRegistryUpkeepPaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPerformedIterator struct { + Event *KeeperRegistryUpkeepPerformed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPerformed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPerformedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPerformed struct { + Id *big.Int + Success bool + TotalPayment *big.Int + GasUsed *big.Int + GasOverhead *big.Int + Trigger []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryUpkeepPerformedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPerformedIterator{contract: _KeeperRegistry.contract, event: "UpkeepPerformed", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + var successRule []interface{} + for _, successItem := range success { + successRule = append(successRule, successItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPerformed", idRule, successRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) { + event := new(KeeperRegistryUpkeepPerformed) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPerformed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepPrivilegeConfigSetIterator struct { + Event *KeeperRegistryUpkeepPrivilegeConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepPrivilegeConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepPrivilegeConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepPrivilegeConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepPrivilegeConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepPrivilegeConfigSet struct { + Id *big.Int + PrivilegeConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPrivilegeConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepPrivilegeConfigSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepPrivilegeConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepPrivilegeConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepPrivilegeConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryUpkeepPrivilegeConfigSet, error) { + event := new(KeeperRegistryUpkeepPrivilegeConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepPrivilegeConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepReceivedIterator struct { + Event *KeeperRegistryUpkeepReceived + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepReceived) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepReceivedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepReceived struct { + Id *big.Int + StartingBalance *big.Int + ImportedFrom common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepReceivedIterator{contract: _KeeperRegistry.contract, event: "UpkeepReceived", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepReceived", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) { + event := new(KeeperRegistryUpkeepReceived) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepReceived", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepRegisteredIterator struct { + Event *KeeperRegistryUpkeepRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepRegistered struct { + Id *big.Int + PerformGas uint32 + Admin common.Address + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepRegisteredIterator{contract: _KeeperRegistry.contract, event: "UpkeepRegistered", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepRegistered", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) { + event := new(KeeperRegistryUpkeepRegistered) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepTriggerConfigSetIterator struct { + Event *KeeperRegistryUpkeepTriggerConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepTriggerConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepTriggerConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepTriggerConfigSetIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepTriggerConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepTriggerConfigSet struct { + Id *big.Int + TriggerConfig []byte + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepTriggerConfigSetIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepTriggerConfigSetIterator{contract: _KeeperRegistry.contract, event: "UpkeepTriggerConfigSet", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepTriggerConfigSet", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepTriggerConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryUpkeepTriggerConfigSet, error) { + event := new(KeeperRegistryUpkeepTriggerConfigSet) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepTriggerConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeeperRegistryUpkeepUnpausedIterator struct { + Event *KeeperRegistryUpkeepUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeeperRegistryUpkeepUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Error() error { + return it.fail +} + +func (it *KeeperRegistryUpkeepUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeeperRegistryUpkeepUnpaused struct { + Id *big.Int + Raw types.Log +} + +func (_KeeperRegistry *KeeperRegistryFilterer) FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.FilterLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return &KeeperRegistryUpkeepUnpausedIterator{contract: _KeeperRegistry.contract, event: "UpkeepUnpaused", logs: logs, sub: sub}, nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _KeeperRegistry.contract.WatchLogs(opts, "UpkeepUnpaused", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeeperRegistry *KeeperRegistryFilterer) ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) { + event := new(KeeperRegistryUpkeepUnpaused) + if err := _KeeperRegistry.contract.UnpackLog(event, "UpkeepUnpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeeperRegistry.abi.Events["AdminPrivilegeConfigSet"].ID: + return _KeeperRegistry.ParseAdminPrivilegeConfigSet(log) + case _KeeperRegistry.abi.Events["CancelledUpkeepReport"].ID: + return _KeeperRegistry.ParseCancelledUpkeepReport(log) + case _KeeperRegistry.abi.Events["ConfigSet"].ID: + return _KeeperRegistry.ParseConfigSet(log) + case _KeeperRegistry.abi.Events["DedupKeyAdded"].ID: + return _KeeperRegistry.ParseDedupKeyAdded(log) + case _KeeperRegistry.abi.Events["FundsAdded"].ID: + return _KeeperRegistry.ParseFundsAdded(log) + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: + return _KeeperRegistry.ParseFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["InsufficientFundsUpkeepReport"].ID: + return _KeeperRegistry.ParseInsufficientFundsUpkeepReport(log) + case _KeeperRegistry.abi.Events["OwnerFundsWithdrawn"].ID: + return _KeeperRegistry.ParseOwnerFundsWithdrawn(log) + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _KeeperRegistry.ParseOwnershipTransferRequested(log) + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: + return _KeeperRegistry.ParseOwnershipTransferred(log) + case _KeeperRegistry.abi.Events["Paused"].ID: + return _KeeperRegistry.ParsePaused(log) + case _KeeperRegistry.abi.Events["PayeesUpdated"].ID: + return _KeeperRegistry.ParsePayeesUpdated(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: + return _KeeperRegistry.ParsePayeeshipTransferRequested(log) + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: + return _KeeperRegistry.ParsePayeeshipTransferred(log) + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: + return _KeeperRegistry.ParsePaymentWithdrawn(log) + case _KeeperRegistry.abi.Events["ReorgedUpkeepReport"].ID: + return _KeeperRegistry.ParseReorgedUpkeepReport(log) + case _KeeperRegistry.abi.Events["StaleUpkeepReport"].ID: + return _KeeperRegistry.ParseStaleUpkeepReport(log) + case _KeeperRegistry.abi.Events["Transmitted"].ID: + return _KeeperRegistry.ParseTransmitted(log) + case _KeeperRegistry.abi.Events["Unpaused"].ID: + return _KeeperRegistry.ParseUnpaused(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferRequested"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferRequested(log) + case _KeeperRegistry.abi.Events["UpkeepAdminTransferred"].ID: + return _KeeperRegistry.ParseUpkeepAdminTransferred(log) + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: + return _KeeperRegistry.ParseUpkeepCanceled(log) + case _KeeperRegistry.abi.Events["UpkeepCheckDataSet"].ID: + return _KeeperRegistry.ParseUpkeepCheckDataSet(log) + case _KeeperRegistry.abi.Events["UpkeepGasLimitSet"].ID: + return _KeeperRegistry.ParseUpkeepGasLimitSet(log) + case _KeeperRegistry.abi.Events["UpkeepMigrated"].ID: + return _KeeperRegistry.ParseUpkeepMigrated(log) + case _KeeperRegistry.abi.Events["UpkeepOffchainConfigSet"].ID: + return _KeeperRegistry.ParseUpkeepOffchainConfigSet(log) + case _KeeperRegistry.abi.Events["UpkeepPaused"].ID: + return _KeeperRegistry.ParseUpkeepPaused(log) + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: + return _KeeperRegistry.ParseUpkeepPerformed(log) + case _KeeperRegistry.abi.Events["UpkeepPrivilegeConfigSet"].ID: + return _KeeperRegistry.ParseUpkeepPrivilegeConfigSet(log) + case _KeeperRegistry.abi.Events["UpkeepReceived"].ID: + return _KeeperRegistry.ParseUpkeepReceived(log) + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: + return _KeeperRegistry.ParseUpkeepRegistered(log) + case _KeeperRegistry.abi.Events["UpkeepTriggerConfigSet"].ID: + return _KeeperRegistry.ParseUpkeepTriggerConfigSet(log) + case _KeeperRegistry.abi.Events["UpkeepUnpaused"].ID: + return _KeeperRegistry.ParseUpkeepUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeeperRegistryAdminPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x7c44b4eb59ee7873514e7e43e7718c269d872965938b288aa143befca62f99d2") +} + +func (KeeperRegistryCancelledUpkeepReport) Topic() common.Hash { + return common.HexToHash("0xc3237c8807c467c1b39b8d0395eff077313e691bf0a7388106792564ebfd5636") +} + +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (KeeperRegistryDedupKeyAdded) Topic() common.Hash { + return common.HexToHash("0xa4a4e334c0e330143f9437484fe516c13bc560b86b5b0daf58e7084aaac228f2") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryInsufficientFundsUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x377c8b0c126ae5248d27aca1c76fac4608aff85673ee3caf09747e1044549e02") +} + +func (KeeperRegistryOwnerFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0x1d07d0b0be43d3e5fee41a80b579af370affee03fa595bf56d5d4c19328162f1") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (KeeperRegistryPayeesUpdated) Topic() common.Hash { + return common.HexToHash("0xa46de38886467c59be07a0675f14781206a5477d871628af46c2443822fcb725") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryReorgedUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x6aa7f60c176da7af894b384daea2249497448137f5943c1237ada8bc92bdc301") +} + +func (KeeperRegistryStaleUpkeepReport) Topic() common.Hash { + return common.HexToHash("0x405288ea7be309e16cfdf481367f90a413e1d4634fcdaf8966546db9b93012e8") +} + +func (KeeperRegistryTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (KeeperRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (KeeperRegistryUpkeepAdminTransferRequested) Topic() common.Hash { + return common.HexToHash("0xb1cbb2c4b8480034c27e06da5f096b8233a8fd4497028593a41ff6df79726b35") +} + +func (KeeperRegistryUpkeepAdminTransferred) Topic() common.Hash { + return common.HexToHash("0x5cff4db96bef051785e999f44bfcd21c18823e034fb92dd376e3db4ce0feeb2c") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepCheckDataSet) Topic() common.Hash { + return common.HexToHash("0xcba2d5723b2ee59e53a8e8a82a4a7caf4fdfe70e9f7c582950bf7e7a5c24e83d") +} + +func (KeeperRegistryUpkeepGasLimitSet) Topic() common.Hash { + return common.HexToHash("0xc24c07e655ce79fba8a589778987d3c015bc6af1632bb20cf9182e02a65d972c") +} + +func (KeeperRegistryUpkeepMigrated) Topic() common.Hash { + return common.HexToHash("0xb38647142fbb1ea4c000fc4569b37a4e9a9f6313317b84ee3e5326c1a6cd06ff") +} + +func (KeeperRegistryUpkeepOffchainConfigSet) Topic() common.Hash { + return common.HexToHash("0x3e8740446213c8a77d40e08f79136ce3f347d13ed270a6ebdf57159e0faf4850") +} + +func (KeeperRegistryUpkeepPaused) Topic() common.Hash { + return common.HexToHash("0x8ab10247ce168c27748e656ecf852b951fcaac790c18106b19aa0ae57a8b741f") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xad8cc9579b21dfe2c2f6ea35ba15b656e46b4f5b0cb424f52739b8ce5cac9c5b") +} + +func (KeeperRegistryUpkeepPrivilegeConfigSet) Topic() common.Hash { + return common.HexToHash("0x2fd8d70753a007014349d4591843cc031c2dd7a260d7dd82eca8253686ae7769") +} + +func (KeeperRegistryUpkeepReceived) Topic() common.Hash { + return common.HexToHash("0x74931a144e43a50694897f241d973aecb5024c0e910f9bb80a163ea3c1cf5a71") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + +func (KeeperRegistryUpkeepTriggerConfigSet) Topic() common.Hash { + return common.HexToHash("0x2b72ac786c97e68dbab71023ed6f2bdbfc80ad9bb7808941929229d71b7d5664") +} + +func (KeeperRegistryUpkeepUnpaused) Topic() common.Hash { + return common.HexToHash("0x7bada562044eb163f6b4003c4553e4e62825344c0418eea087bed5ee05a47456") +} + +func (_KeeperRegistry *KeeperRegistry) Address() common.Address { + return _KeeperRegistry.address +} + +type KeeperRegistryInterface interface { + FallbackTo(opts *bind.CallOpts) (common.Address, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfigBytes []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetConfigTypeSafe(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig KeeperRegistryBase21OnchainConfig, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SimulatePerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, rawReport []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + FilterAdminPrivilegeConfigSet(opts *bind.FilterOpts, admin []common.Address) (*KeeperRegistryAdminPrivilegeConfigSetIterator, error) + + WatchAdminPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryAdminPrivilegeConfigSet, admin []common.Address) (event.Subscription, error) + + ParseAdminPrivilegeConfigSet(log types.Log) (*KeeperRegistryAdminPrivilegeConfigSet, error) + + FilterCancelledUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryCancelledUpkeepReportIterator, error) + + WatchCancelledUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryCancelledUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseCancelledUpkeepReport(log types.Log) (*KeeperRegistryCancelledUpkeepReport, error) + + FilterConfigSet(opts *bind.FilterOpts) (*KeeperRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*KeeperRegistryConfigSet, error) + + FilterDedupKeyAdded(opts *bind.FilterOpts, dedupKey [][32]byte) (*KeeperRegistryDedupKeyAddedIterator, error) + + WatchDedupKeyAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryDedupKeyAdded, dedupKey [][32]byte) (event.Subscription, error) + + ParseDedupKeyAdded(log types.Log) (*KeeperRegistryDedupKeyAdded, error) + + FilterFundsAdded(opts *bind.FilterOpts, id []*big.Int, from []common.Address) (*KeeperRegistryFundsAddedIterator, error) + + WatchFundsAdded(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsAdded, id []*big.Int, from []common.Address) (event.Subscription, error) + + ParseFundsAdded(log types.Log) (*KeeperRegistryFundsAdded, error) + + FilterFundsWithdrawn(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryFundsWithdrawnIterator, error) + + WatchFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryFundsWithdrawn, id []*big.Int) (event.Subscription, error) + + ParseFundsWithdrawn(log types.Log) (*KeeperRegistryFundsWithdrawn, error) + + FilterInsufficientFundsUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryInsufficientFundsUpkeepReportIterator, error) + + WatchInsufficientFundsUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryInsufficientFundsUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseInsufficientFundsUpkeepReport(log types.Log) (*KeeperRegistryInsufficientFundsUpkeepReport, error) + + FilterOwnerFundsWithdrawn(opts *bind.FilterOpts) (*KeeperRegistryOwnerFundsWithdrawnIterator, error) + + WatchOwnerFundsWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnerFundsWithdrawn) (event.Subscription, error) + + ParseOwnerFundsWithdrawn(log types.Log) (*KeeperRegistryOwnerFundsWithdrawn, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeeperRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeeperRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeeperRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*KeeperRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*KeeperRegistryPaused, error) + + FilterPayeesUpdated(opts *bind.FilterOpts) (*KeeperRegistryPayeesUpdatedIterator, error) + + WatchPayeesUpdated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeesUpdated) (event.Subscription, error) + + ParsePayeesUpdated(log types.Log) (*KeeperRegistryPayeesUpdated, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferRequested, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*KeeperRegistryPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, from []common.Address, to []common.Address) (*KeeperRegistryPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPayeeshipTransferred, transmitter []common.Address, from []common.Address, to []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*KeeperRegistryPayeeshipTransferred, error) + + FilterPaymentWithdrawn(opts *bind.FilterOpts, transmitter []common.Address, amount []*big.Int, to []common.Address) (*KeeperRegistryPaymentWithdrawnIterator, error) + + WatchPaymentWithdrawn(opts *bind.WatchOpts, sink chan<- *KeeperRegistryPaymentWithdrawn, transmitter []common.Address, amount []*big.Int, to []common.Address) (event.Subscription, error) + + ParsePaymentWithdrawn(log types.Log) (*KeeperRegistryPaymentWithdrawn, error) + + FilterReorgedUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryReorgedUpkeepReportIterator, error) + + WatchReorgedUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryReorgedUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseReorgedUpkeepReport(log types.Log) (*KeeperRegistryReorgedUpkeepReport, error) + + FilterStaleUpkeepReport(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryStaleUpkeepReportIterator, error) + + WatchStaleUpkeepReport(opts *bind.WatchOpts, sink chan<- *KeeperRegistryStaleUpkeepReport, id []*big.Int) (event.Subscription, error) + + ParseStaleUpkeepReport(log types.Log) (*KeeperRegistryStaleUpkeepReport, error) + + FilterTransmitted(opts *bind.FilterOpts) (*KeeperRegistryTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *KeeperRegistryTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*KeeperRegistryTransmitted, error) + + FilterUnpaused(opts *bind.FilterOpts) (*KeeperRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*KeeperRegistryUnpaused, error) + + FilterUpkeepAdminTransferRequested(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferRequestedIterator, error) + + WatchUpkeepAdminTransferRequested(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferRequested, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferRequested(log types.Log) (*KeeperRegistryUpkeepAdminTransferRequested, error) + + FilterUpkeepAdminTransferred(opts *bind.FilterOpts, id []*big.Int, from []common.Address, to []common.Address) (*KeeperRegistryUpkeepAdminTransferredIterator, error) + + WatchUpkeepAdminTransferred(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepAdminTransferred, id []*big.Int, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseUpkeepAdminTransferred(log types.Log) (*KeeperRegistryUpkeepAdminTransferred, error) + + FilterUpkeepCanceled(opts *bind.FilterOpts, id []*big.Int, atBlockHeight []uint64) (*KeeperRegistryUpkeepCanceledIterator, error) + + WatchUpkeepCanceled(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCanceled, id []*big.Int, atBlockHeight []uint64) (event.Subscription, error) + + ParseUpkeepCanceled(log types.Log) (*KeeperRegistryUpkeepCanceled, error) + + FilterUpkeepCheckDataSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepCheckDataSetIterator, error) + + WatchUpkeepCheckDataSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepCheckDataSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepCheckDataSet(log types.Log) (*KeeperRegistryUpkeepCheckDataSet, error) + + FilterUpkeepGasLimitSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepGasLimitSetIterator, error) + + WatchUpkeepGasLimitSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepGasLimitSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepGasLimitSet(log types.Log) (*KeeperRegistryUpkeepGasLimitSet, error) + + FilterUpkeepMigrated(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepMigratedIterator, error) + + WatchUpkeepMigrated(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepMigrated, id []*big.Int) (event.Subscription, error) + + ParseUpkeepMigrated(log types.Log) (*KeeperRegistryUpkeepMigrated, error) + + FilterUpkeepOffchainConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepOffchainConfigSetIterator, error) + + WatchUpkeepOffchainConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepOffchainConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepOffchainConfigSet(log types.Log) (*KeeperRegistryUpkeepOffchainConfigSet, error) + + FilterUpkeepPaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPausedIterator, error) + + WatchUpkeepPaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPaused(log types.Log) (*KeeperRegistryUpkeepPaused, error) + + FilterUpkeepPerformed(opts *bind.FilterOpts, id []*big.Int, success []bool) (*KeeperRegistryUpkeepPerformedIterator, error) + + WatchUpkeepPerformed(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPerformed, id []*big.Int, success []bool) (event.Subscription, error) + + ParseUpkeepPerformed(log types.Log) (*KeeperRegistryUpkeepPerformed, error) + + FilterUpkeepPrivilegeConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepPrivilegeConfigSetIterator, error) + + WatchUpkeepPrivilegeConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepPrivilegeConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepPrivilegeConfigSet(log types.Log) (*KeeperRegistryUpkeepPrivilegeConfigSet, error) + + FilterUpkeepReceived(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepReceivedIterator, error) + + WatchUpkeepReceived(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepReceived, id []*big.Int) (event.Subscription, error) + + ParseUpkeepReceived(log types.Log) (*KeeperRegistryUpkeepReceived, error) + + FilterUpkeepRegistered(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepRegisteredIterator, error) + + WatchUpkeepRegistered(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepRegistered, id []*big.Int) (event.Subscription, error) + + ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) + + FilterUpkeepTriggerConfigSet(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepTriggerConfigSetIterator, error) + + WatchUpkeepTriggerConfigSet(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepTriggerConfigSet, id []*big.Int) (event.Subscription, error) + + ParseUpkeepTriggerConfigSet(log types.Log) (*KeeperRegistryUpkeepTriggerConfigSet, error) + + FilterUpkeepUnpaused(opts *bind.FilterOpts, id []*big.Int) (*KeeperRegistryUpkeepUnpausedIterator, error) + + WatchUpkeepUnpaused(opts *bind.WatchOpts, sink chan<- *KeeperRegistryUpkeepUnpaused, id []*big.Int) (event.Subscription, error) + + ParseUpkeepUnpaused(log types.Log) (*KeeperRegistryUpkeepUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/keepers_vrf_consumer/keepers_vrf_consumer.go b/core/gethwrappers/generated/keepers_vrf_consumer/keepers_vrf_consumer.go new file mode 100644 index 00000000..57e0aced --- /dev/null +++ b/core/gethwrappers/generated/keepers_vrf_consumer/keepers_vrf_consumer.go @@ -0,0 +1,466 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package keepers_vrf_consumer + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeepersVRFConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"upkeepInterval\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"KEY_HASH\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"REQUEST_CONFIRMATIONS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SUBSCRIPTION_ID\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"UPKEEP_INTERVAL\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastTimeStamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfRequestCounter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfResponseCounter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x61014060405234801561001157600080fd5b50604051610bab380380610bab83398101604081905261003091610089565b60609490941b6001600160601b031916608081905260c09081529290921b6001600160c01b03191660e05260f09190911b6001600160f01b031916610100526101205260a0524260009081556001819055600255610109565b600080600080600060a086880312156100a157600080fd5b85516001600160a01b03811681146100b857600080fd5b60208701519095506001600160401b03811681146100d557600080fd5b60408701516060880151919550935061ffff811681146100f457600080fd5b80925050608086015190509295509295909350565b60805160601c60a05160c05160601c60e05160c01c6101005160f01c61012051610a17610194600039600081816101d501526105370152600081816101fc015261059001526000818160de015261056601526000818161017601526105ca0152600081816101230152818161039201526103d70152600081816102e801526103500152610a176000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c806351dc86a5116100815780639579291e1161005b5780639579291e14610252578063a168fa891461025b578063f7818645146102c757600080fd5b806351dc86a5146101d057806367f082b0146101f75780636e04ff0d1461023157600080fd5b806334854043116100b257806334854043146101685780633b2bcbf1146101715780634585e33b146101bd57600080fd5b8063030932bb146100d9578063035262101461011e5780631fe543e314610153575b600080fd5b6101007f000000000000000000000000000000000000000000000000000000000000000081565b60405167ffffffffffffffff90911681526020015b60405180910390f35b6101457f000000000000000000000000000000000000000000000000000000000000000081565b604051908152602001610115565b6101666101613660046107c1565b6102d0565b005b61014560005481565b6101987f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610115565b6101666101cb36600461071d565b610390565b6101457f000000000000000000000000000000000000000000000000000000000000000081565b61021e7f000000000000000000000000000000000000000000000000000000000000000081565b60405161ffff9091168152602001610115565b61024461023f36600461071d565b6103d1565b6040516101159291906108b0565b61014560025481565b61029b61026936600461078f565b600360205260009081526040902080546001820154600290920154909160ff81169161010090910463ffffffff169084565b6040516101159493929190938452911515602084015263ffffffff166040830152606082015260800190565b61014560015481565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610382576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b61038c828261040e565b5050565b7f0000000000000000000000000000000000000000000000000000000000000000600054426103bf919061092d565b111561038c574260005561038c61050f565b600060607f000000000000000000000000000000000000000000000000000000000000000060005442610404919061092d565b1191509250929050565b60008281526003602090815260409182902082516080810184528154808252600183015460ff811615159483019490945261010090930463ffffffff169381019390935260020154606083015283146104c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f72657175657374204944206e6f7420666f756e6420696e206d617000000000006044820152606401610379565b816000815181106104d6576104d66109ac565b602090810291909101810151600085815260039092526040822060029081019190915580549161050583610944565b9190505550505050565b6040517f5d3b1d300000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015267ffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016602482015261ffff7f0000000000000000000000000000000000000000000000000000000000000000166044820152620249f06064820152600160848201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b15801561062357600080fd5b505af1158015610637573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061065b91906107a8565b6040805160808101825282815260006020808301828152620249f0848601908152606085018481528785526003909352948320935184555160018481018054965163ffffffff16610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ff931515939093167fffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000909716969096179190911790945551600290920191909155815492935061071583610944565b919050555050565b6000806020838503121561073057600080fd5b823567ffffffffffffffff8082111561074857600080fd5b818501915085601f83011261075c57600080fd5b81358181111561076b57600080fd5b86602082850101111561077d57600080fd5b60209290920196919550909350505050565b6000602082840312156107a157600080fd5b5035919050565b6000602082840312156107ba57600080fd5b5051919050565b600080604083850312156107d457600080fd5b8235915060208084013567ffffffffffffffff808211156107f457600080fd5b818601915086601f83011261080857600080fd5b81358181111561081a5761081a6109db565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561085d5761085d6109db565b604052828152858101935084860182860187018b101561087c57600080fd5b600095505b8386101561089f578035855260019590950194938601938601610881565b508096505050505050509250929050565b821515815260006020604081840152835180604085015260005b818110156108e6578581018301518582016060015282016108ca565b818111156108f8576000606083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201606001949350505050565b60008282101561093f5761093f61097d565b500390565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156109765761097661097d565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var KeepersVRFConsumerABI = KeepersVRFConsumerMetaData.ABI + +var KeepersVRFConsumerBin = KeepersVRFConsumerMetaData.Bin + +func DeployKeepersVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, subscriptionId uint64, keyHash [32]byte, requestConfirmations uint16, upkeepInterval *big.Int) (common.Address, *types.Transaction, *KeepersVRFConsumer, error) { + parsed, err := KeepersVRFConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeepersVRFConsumerBin), backend, vrfCoordinator, subscriptionId, keyHash, requestConfirmations, upkeepInterval) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeepersVRFConsumer{address: address, abi: *parsed, KeepersVRFConsumerCaller: KeepersVRFConsumerCaller{contract: contract}, KeepersVRFConsumerTransactor: KeepersVRFConsumerTransactor{contract: contract}, KeepersVRFConsumerFilterer: KeepersVRFConsumerFilterer{contract: contract}}, nil +} + +type KeepersVRFConsumer struct { + address common.Address + abi abi.ABI + KeepersVRFConsumerCaller + KeepersVRFConsumerTransactor + KeepersVRFConsumerFilterer +} + +type KeepersVRFConsumerCaller struct { + contract *bind.BoundContract +} + +type KeepersVRFConsumerTransactor struct { + contract *bind.BoundContract +} + +type KeepersVRFConsumerFilterer struct { + contract *bind.BoundContract +} + +type KeepersVRFConsumerSession struct { + Contract *KeepersVRFConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeepersVRFConsumerCallerSession struct { + Contract *KeepersVRFConsumerCaller + CallOpts bind.CallOpts +} + +type KeepersVRFConsumerTransactorSession struct { + Contract *KeepersVRFConsumerTransactor + TransactOpts bind.TransactOpts +} + +type KeepersVRFConsumerRaw struct { + Contract *KeepersVRFConsumer +} + +type KeepersVRFConsumerCallerRaw struct { + Contract *KeepersVRFConsumerCaller +} + +type KeepersVRFConsumerTransactorRaw struct { + Contract *KeepersVRFConsumerTransactor +} + +func NewKeepersVRFConsumer(address common.Address, backend bind.ContractBackend) (*KeepersVRFConsumer, error) { + abi, err := abi.JSON(strings.NewReader(KeepersVRFConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindKeepersVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeepersVRFConsumer{address: address, abi: abi, KeepersVRFConsumerCaller: KeepersVRFConsumerCaller{contract: contract}, KeepersVRFConsumerTransactor: KeepersVRFConsumerTransactor{contract: contract}, KeepersVRFConsumerFilterer: KeepersVRFConsumerFilterer{contract: contract}}, nil +} + +func NewKeepersVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*KeepersVRFConsumerCaller, error) { + contract, err := bindKeepersVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeepersVRFConsumerCaller{contract: contract}, nil +} + +func NewKeepersVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*KeepersVRFConsumerTransactor, error) { + contract, err := bindKeepersVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeepersVRFConsumerTransactor{contract: contract}, nil +} + +func NewKeepersVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*KeepersVRFConsumerFilterer, error) { + contract, err := bindKeepersVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeepersVRFConsumerFilterer{contract: contract}, nil +} + +func bindKeepersVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeepersVRFConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeepersVRFConsumer.Contract.KeepersVRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.KeepersVRFConsumerTransactor.contract.Transfer(opts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.KeepersVRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeepersVRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.contract.Transfer(opts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) COORDINATOR() (common.Address, error) { + return _KeepersVRFConsumer.Contract.COORDINATOR(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) COORDINATOR() (common.Address, error) { + return _KeepersVRFConsumer.Contract.COORDINATOR(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) KEYHASH(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "KEY_HASH") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) KEYHASH() ([32]byte, error) { + return _KeepersVRFConsumer.Contract.KEYHASH(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) KEYHASH() ([32]byte, error) { + return _KeepersVRFConsumer.Contract.KEYHASH(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) REQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "REQUEST_CONFIRMATIONS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) REQUESTCONFIRMATIONS() (uint16, error) { + return _KeepersVRFConsumer.Contract.REQUESTCONFIRMATIONS(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) REQUESTCONFIRMATIONS() (uint16, error) { + return _KeepersVRFConsumer.Contract.REQUESTCONFIRMATIONS(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) SUBSCRIPTIONID(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "SUBSCRIPTION_ID") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) SUBSCRIPTIONID() (uint64, error) { + return _KeepersVRFConsumer.Contract.SUBSCRIPTIONID(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) SUBSCRIPTIONID() (uint64, error) { + return _KeepersVRFConsumer.Contract.SUBSCRIPTIONID(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) UPKEEPINTERVAL(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "UPKEEP_INTERVAL") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) UPKEEPINTERVAL() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.UPKEEPINTERVAL(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) UPKEEPINTERVAL() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.UPKEEPINTERVAL(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) CheckUpkeep(opts *bind.CallOpts, arg0 []byte) (bool, []byte, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "checkUpkeep", arg0) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) CheckUpkeep(arg0 []byte) (bool, []byte, error) { + return _KeepersVRFConsumer.Contract.CheckUpkeep(&_KeepersVRFConsumer.CallOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) CheckUpkeep(arg0 []byte) (bool, []byte, error) { + return _KeepersVRFConsumer.Contract.CheckUpkeep(&_KeepersVRFConsumer.CallOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) SLastTimeStamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "s_lastTimeStamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) SLastTimeStamp() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SLastTimeStamp(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) SLastTimeStamp() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SLastTimeStamp(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.RequestId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.CallbackGasLimit = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.Randomness = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _KeepersVRFConsumer.Contract.SRequests(&_KeepersVRFConsumer.CallOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _KeepersVRFConsumer.Contract.SRequests(&_KeepersVRFConsumer.CallOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) SVrfRequestCounter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "s_vrfRequestCounter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) SVrfRequestCounter() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SVrfRequestCounter(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) SVrfRequestCounter() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SVrfRequestCounter(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCaller) SVrfResponseCounter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _KeepersVRFConsumer.contract.Call(opts, &out, "s_vrfResponseCounter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) SVrfResponseCounter() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SVrfResponseCounter(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerCallerSession) SVrfResponseCounter() (*big.Int, error) { + return _KeepersVRFConsumer.Contract.SVrfResponseCounter(&_KeepersVRFConsumer.CallOpts) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactor) PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) { + return _KeepersVRFConsumer.contract.Transact(opts, "performUpkeep", arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.PerformUpkeep(&_KeepersVRFConsumer.TransactOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactorSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.PerformUpkeep(&_KeepersVRFConsumer.TransactOpts, arg0) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _KeepersVRFConsumer.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.RawFulfillRandomWords(&_KeepersVRFConsumer.TransactOpts, requestId, randomWords) +} + +func (_KeepersVRFConsumer *KeepersVRFConsumerTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _KeepersVRFConsumer.Contract.RawFulfillRandomWords(&_KeepersVRFConsumer.TransactOpts, requestId, randomWords) +} + +type SRequests struct { + RequestId *big.Int + Fulfilled bool + CallbackGasLimit uint32 + Randomness *big.Int +} + +func (_KeepersVRFConsumer *KeepersVRFConsumer) Address() common.Address { + return _KeepersVRFConsumer.address +} + +type KeepersVRFConsumerInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + KEYHASH(opts *bind.CallOpts) ([32]byte, error) + + REQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) + + SUBSCRIPTIONID(opts *bind.CallOpts) (uint64, error) + + UPKEEPINTERVAL(opts *bind.CallOpts) (*big.Int, error) + + CheckUpkeep(opts *bind.CallOpts, arg0 []byte) (bool, []byte, error) + + SLastTimeStamp(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SVrfRequestCounter(opts *bind.CallOpts) (*big.Int, error) + + SVrfResponseCounter(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/link_token_interface/link_token_interface.go b/core/gethwrappers/generated/link_token_interface/link_token_interface.go new file mode 100644 index 00000000..888325ea --- /dev/null +++ b/core/gethwrappers/generated/link_token_interface/link_token_interface.go @@ -0,0 +1,717 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package link_token_interface + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LinkTokenMetaData = &bind.MetaData{ + ABI: "[{\"constant\":true,\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_from\",\"type\":\"address\"},{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"transferAndCall\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseApproval\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"name\":\"balance\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseApproval\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"},{\"name\":\"_spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"name\":\"remaining\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"}]", + Bin: "0x6060604052341561000f57600080fd5b5b600160a060020a03331660009081526001602052604090206b033b2e3c9fd0803ce800000090555b5b610c51806100486000396000f300606060405236156100b75763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166306fdde0381146100bc578063095ea7b31461014757806318160ddd1461017d57806323b872dd146101a2578063313ce567146101de5780634000aea014610207578063661884631461028057806370a08231146102b657806395d89b41146102e7578063a9059cbb14610372578063d73dd623146103a8578063dd62ed3e146103de575b600080fd5b34156100c757600080fd5b6100cf610415565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610169600160a060020a036004351660243561044c565b604051901515815260200160405180910390f35b341561018857600080fd5b610190610499565b60405190815260200160405180910390f35b34156101ad57600080fd5b610169600160a060020a03600435811690602435166044356104a9565b604051901515815260200160405180910390f35b34156101e957600080fd5b6101f16104f8565b60405160ff909116815260200160405180910390f35b341561021257600080fd5b61016960048035600160a060020a03169060248035919060649060443590810190830135806020601f820181900481020160405190810160405281815292919060208401838380828437509496506104fd95505050505050565b604051901515815260200160405180910390f35b341561028b57600080fd5b610169600160a060020a036004351660243561054c565b604051901515815260200160405180910390f35b34156102c157600080fd5b610190600160a060020a0360043516610648565b60405190815260200160405180910390f35b34156102f257600080fd5b6100cf610667565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037d57600080fd5b610169600160a060020a036004351660243561069e565b604051901515815260200160405180910390f35b34156103b357600080fd5b610169600160a060020a03600435166024356106eb565b604051901515815260200160405180910390f35b34156103e957600080fd5b610190600160a060020a0360043581169060243516610790565b60405190815260200160405180910390f35b60408051908101604052600f81527f436861696e4c696e6b20546f6b656e0000000000000000000000000000000000602082015281565b600082600160a060020a03811615801590610479575030600160a060020a031681600160a060020a031614155b151561048457600080fd5b61048e84846107bd565b91505b5b5092915050565b6b033b2e3c9fd0803ce800000081565b600082600160a060020a038116158015906104d6575030600160a060020a031681600160a060020a031614155b15156104e157600080fd5b6104ec85858561082a565b91505b5b509392505050565b601281565b600083600160a060020a0381161580159061052a575030600160a060020a031681600160a060020a031614155b151561053557600080fd5b6104ec85858561093c565b91505b5b509392505050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054808311156105a957600160a060020a0333811660009081526002602090815260408083209388168352929052908120556105e0565b6105b9818463ffffffff610a2316565b600160a060020a033381166000908152600260209081526040808320938916835292905220555b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020547f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925915190815260200160405180910390a3600191505b5092915050565b600160a060020a0381166000908152600160205260409020545b919050565b60408051908101604052600481527f4c494e4b00000000000000000000000000000000000000000000000000000000602082015281565b600082600160a060020a038116158015906106cb575030600160a060020a031681600160a060020a031614155b15156106d657600080fd5b61048e8484610a3a565b91505b5b5092915050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054610723908363ffffffff610afa16565b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020849055919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591905190815260200160405180910390a35060015b92915050565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b600160a060020a03338116600081815260026020908152604080832094871680845294909152808220859055909291907f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b9259085905190815260200160405180910390a35060015b92915050565b600160a060020a03808416600081815260026020908152604080832033909516835293815283822054928252600190529182205461086e908463ffffffff610a2316565b600160a060020a0380871660009081526001602052604080822093909355908616815220546108a3908463ffffffff610afa16565b600160a060020a0385166000908152600160205260409020556108cc818463ffffffff610a2316565b600160a060020a03808716600081815260026020908152604080832033861684529091529081902093909355908616917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9086905190815260200160405180910390a3600191505b509392505050565b60006109488484610a3a565b5083600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16858560405182815260406020820181815290820183818151815260200191508051906020019080838360005b838110156109c35780820151818401525b6020016109aa565b50505050905090810190601f1680156109f05780820380516001836020036101000a031916815260200191505b50935050505060405180910390a3610a0784610b14565b15610a1757610a17848484610b23565b5b5060015b9392505050565b600082821115610a2f57fe5b508082035b92915050565b600160a060020a033316600090815260016020526040812054610a63908363ffffffff610a2316565b600160a060020a033381166000908152600160205260408082209390935590851681522054610a98908363ffffffff610afa16565b600160a060020a0380851660008181526001602052604090819020939093559133909116907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9085905190815260200160405180910390a35060015b92915050565b600082820183811015610b0957fe5b8091505b5092915050565b6000813b908111905b50919050565b82600160a060020a03811663a4c0ed363385856040518463ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610bbd5780820151818401525b602001610ba4565b50505050905090810190601f168015610bea5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1515610c0a57600080fd5b6102c65a03f11515610c1b57600080fd5b5050505b505050505600a165627a7a72305820c5f438ff94e5ddaf2058efa0019e246c636c37a622e04bb67827c7374acad8d60029", +} + +var LinkTokenABI = LinkTokenMetaData.ABI + +var LinkTokenBin = LinkTokenMetaData.Bin + +func DeployLinkToken(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LinkToken, error) { + parsed, err := LinkTokenMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LinkTokenBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LinkToken{address: address, abi: *parsed, LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +type LinkToken struct { + address common.Address + abi abi.ABI + LinkTokenCaller + LinkTokenTransactor + LinkTokenFilterer +} + +type LinkTokenCaller struct { + contract *bind.BoundContract +} + +type LinkTokenTransactor struct { + contract *bind.BoundContract +} + +type LinkTokenFilterer struct { + contract *bind.BoundContract +} + +type LinkTokenSession struct { + Contract *LinkToken + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LinkTokenCallerSession struct { + Contract *LinkTokenCaller + CallOpts bind.CallOpts +} + +type LinkTokenTransactorSession struct { + Contract *LinkTokenTransactor + TransactOpts bind.TransactOpts +} + +type LinkTokenRaw struct { + Contract *LinkToken +} + +type LinkTokenCallerRaw struct { + Contract *LinkTokenCaller +} + +type LinkTokenTransactorRaw struct { + Contract *LinkTokenTransactor +} + +func NewLinkToken(address common.Address, backend bind.ContractBackend) (*LinkToken, error) { + abi, err := abi.JSON(strings.NewReader(LinkTokenABI)) + if err != nil { + return nil, err + } + contract, err := bindLinkToken(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LinkToken{address: address, abi: abi, LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +func NewLinkTokenCaller(address common.Address, caller bind.ContractCaller) (*LinkTokenCaller, error) { + contract, err := bindLinkToken(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LinkTokenCaller{contract: contract}, nil +} + +func NewLinkTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*LinkTokenTransactor, error) { + contract, err := bindLinkToken(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LinkTokenTransactor{contract: contract}, nil +} + +func NewLinkTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*LinkTokenFilterer, error) { + contract, err := bindLinkToken(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LinkTokenFilterer{contract: contract}, nil +} + +func bindLinkToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LinkTokenMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LinkToken *LinkTokenRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.LinkTokenCaller.contract.Call(opts, result, method, params...) +} + +func (_LinkToken *LinkTokenRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transfer(opts) +} + +func (_LinkToken *LinkTokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transact(opts, method, params...) +} + +func (_LinkToken *LinkTokenCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.contract.Call(opts, result, method, params...) +} + +func (_LinkToken *LinkTokenTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transfer(opts) +} + +func (_LinkToken *LinkTokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transact(opts, method, params...) +} + +func (_LinkToken *LinkTokenCaller) Allowance(opts *bind.CallOpts, _owner common.Address, _spender common.Address) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "allowance", _owner, _spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Allowance(_owner common.Address, _spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, _owner, _spender) +} + +func (_LinkToken *LinkTokenCallerSession) Allowance(_owner common.Address, _spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, _owner, _spender) +} + +func (_LinkToken *LinkTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "balanceOf", _owner) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, _owner) +} + +func (_LinkToken *LinkTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, _owner) +} + +func (_LinkToken *LinkTokenCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenTransactor) Approve(opts *bind.TransactOpts, _spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "approve", _spender, _value) +} + +func (_LinkToken *LinkTokenSession) Approve(_spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, _spender, _value) +} + +func (_LinkToken *LinkTokenTransactorSession) Approve(_spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, _spender, _value) +} + +func (_LinkToken *LinkTokenTransactor) DecreaseApproval(opts *bind.TransactOpts, _spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "decreaseApproval", _spender, _subtractedValue) +} + +func (_LinkToken *LinkTokenSession) DecreaseApproval(_spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, _spender, _subtractedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) DecreaseApproval(_spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, _spender, _subtractedValue) +} + +func (_LinkToken *LinkTokenTransactor) IncreaseApproval(opts *bind.TransactOpts, _spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "increaseApproval", _spender, _addedValue) +} + +func (_LinkToken *LinkTokenSession) IncreaseApproval(_spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, _spender, _addedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) IncreaseApproval(_spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, _spender, _addedValue) +} + +func (_LinkToken *LinkTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transfer", _to, _value) +} + +func (_LinkToken *LinkTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, _to, _value) +} + +func (_LinkToken *LinkTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, _to, _value) +} + +func (_LinkToken *LinkTokenTransactor) TransferAndCall(opts *bind.TransactOpts, _to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferAndCall", _to, _value, _data) +} + +func (_LinkToken *LinkTokenSession) TransferAndCall(_to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, _to, _value, _data) +} + +func (_LinkToken *LinkTokenTransactorSession) TransferAndCall(_to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, _to, _value, _data) +} + +func (_LinkToken *LinkTokenTransactor) TransferFrom(opts *bind.TransactOpts, _from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferFrom", _from, _to, _value) +} + +func (_LinkToken *LinkTokenSession) TransferFrom(_from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, _from, _to, _value) +} + +func (_LinkToken *LinkTokenTransactorSession) TransferFrom(_from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, _from, _to, _value) +} + +type LinkTokenApprovalIterator struct { + Event *LinkTokenApproval + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenApprovalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenApprovalIterator) Error() error { + return it.fail +} + +func (it *LinkTokenApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenApproval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*LinkTokenApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &LinkTokenApprovalIterator{contract: _LinkToken.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *LinkTokenApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseApproval(log types.Log) (*LinkTokenApproval, error) { + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenTransferIterator struct { + Event *LinkTokenTransfer + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenTransferIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenTransferIterator) Error() error { + return it.fail +} + +func (it *LinkTokenTransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenTransfer struct { + From common.Address + To common.Address + Value *big.Int + Data []byte + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenTransferIterator{contract: _LinkToken.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseTransfer(log types.Log) (*LinkTokenTransfer, error) { + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LinkToken *LinkToken) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LinkToken.abi.Events["Approval"].ID: + return _LinkToken.ParseApproval(log) + case _LinkToken.abi.Events["Transfer"].ID: + return _LinkToken.ParseTransfer(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LinkTokenApproval) Topic() common.Hash { + return common.HexToHash("0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925") +} + +func (LinkTokenTransfer) Topic() common.Hash { + return common.HexToHash("0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16") +} + +func (_LinkToken *LinkToken) Address() common.Address { + return _LinkToken.address +} + +type LinkTokenInterface interface { + Allowance(opts *bind.CallOpts, _owner common.Address, _spender common.Address) (*big.Int, error) + + BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Name(opts *bind.CallOpts) (string, error) + + Symbol(opts *bind.CallOpts) (string, error) + + TotalSupply(opts *bind.CallOpts) (*big.Int, error) + + Approve(opts *bind.TransactOpts, _spender common.Address, _value *big.Int) (*types.Transaction, error) + + DecreaseApproval(opts *bind.TransactOpts, _spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) + + IncreaseApproval(opts *bind.TransactOpts, _spender common.Address, _addedValue *big.Int) (*types.Transaction, error) + + Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) + + TransferAndCall(opts *bind.TransactOpts, _to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) + + TransferFrom(opts *bind.TransactOpts, _from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) + + FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*LinkTokenApprovalIterator, error) + + WatchApproval(opts *bind.WatchOpts, sink chan<- *LinkTokenApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) + + ParseApproval(log types.Log) (*LinkTokenApproval, error) + + FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransferIterator, error) + + WatchTransfer(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer(log types.Log) (*LinkTokenTransfer, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/llo_feeds/llo_feeds.go b/core/gethwrappers/generated/llo_feeds/llo_feeds.go new file mode 100644 index 00000000..cf23e558 --- /dev/null +++ b/core/gethwrappers/generated/llo_feeds/llo_feeds.go @@ -0,0 +1,1200 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package llo_feeds + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LLOVerifierProxyMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"accessController\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"verifier\",\"type\":\"address\"}],\"name\":\"ConfigDigestAlreadySet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifier\",\"type\":\"address\"}],\"name\":\"VerifierAlreadyInitialized\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"VerifierInvalid\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"VerifierNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldAccessController\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAccessController\",\"type\":\"address\"}],\"name\":\"AccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierInitialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"oldConfigDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"newConfigDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierUnset\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"accessController\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"getVerifier\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"initializeVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"accessController\",\"type\":\"address\"}],\"name\":\"setAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"currentConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newConfigDigest\",\"type\":\"bytes32\"}],\"name\":\"setVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"unsetVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"signedReport\",\"type\":\"bytes\"}],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"verifierResponse\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161112e38038061112e83398101604081905261002f91610187565b33806000816100855760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100b5576100b5816100de565b5050600480546001600160a01b0319166001600160a01b039390931692909217909155506101b7565b336001600160a01b038216036101365760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161007c565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60006020828403121561019957600080fd5b81516001600160a01b03811681146101b057600080fd5b9392505050565b610f68806101c66000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80638c2a4d5311610081578063eeb7b2481161005b578063eeb7b248146101c8578063f08391d8146101fe578063f2fde38b1461021157600080fd5b80638c2a4d53146101845780638da5cb5b146101975780638e760afe146101b557600080fd5b80632cc99477116100b25780632cc99477146101545780636e9140941461016957806379ba50971461017c57600080fd5b806316d6b5f6146100ce578063181f5a7714610112575b600080fd5b60045473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b60408051808201909152601381527f566572696669657250726f787920312e302e300000000000000000000000000060208201525b6040516101099190610c40565b610167610162366004610c5a565b610224565b005b610167610177366004610c7c565b61036f565b610167610467565b610167610192366004610cb7565b610564565b60005473ffffffffffffffffffffffffffffffffffffffff166100e8565b6101476101c3366004610cd4565b610795565b6100e86101d6366004610c7c565b60009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b61016761020c366004610cb7565b6109bf565b61016761021f366004610cb7565b610a46565b600081815260036020526040902054819073ffffffffffffffffffffffffffffffffffffffff1680156102a7576040517f375d1fe60000000000000000000000000000000000000000000000000000000081526004810183905273ffffffffffffffffffffffffffffffffffffffff821660248201526044015b60405180910390fd5b3360009081526002602052604090205460ff166102f0576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000838152600360209081526040918290208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000909116811790915582518781529182018690528183015290517fbeb513e532542a562ac35699e7cd9ae7d198dcd3eee15bada6c857d28ceaddcf9181900360600190a150505050565b610377610a5a565b60008181526003602052604090205473ffffffffffffffffffffffffffffffffffffffff16806103d6576040517fb151802b0000000000000000000000000000000000000000000000000000000081526004810183905260240161029e565b6000828152600360205260409081902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055517f11dc15c4b8ac2b183166cc8427e5385a5ece8308217a4217338c6a7614845c4c9061045b908490849091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60405180910390a15050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146104e8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161029e565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61056c610a5a565b8073ffffffffffffffffffffffffffffffffffffffff81166105ba576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f3d3ac1b500000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff8216906301ffc9a790602401602060405180830381865afa158015610644573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106689190610d46565b61069e576040517f75b0527a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660009081526002602052604090205460ff1615610716576040517f4e01ccfd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8316600482015260240161029e565b73ffffffffffffffffffffffffffffffffffffffff821660008181526002602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905590519182527f1f2cd7c97f4d801b5efe26cc409617c1fd6c5ef786e79aacb90af40923e4e8e9910161045b565b60045460609073ffffffffffffffffffffffffffffffffffffffff16801580159061085557506040517f6b14daf800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690636b14daf8906108129033906000903690600401610db1565b602060405180830381865afa15801561082f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108539190610d46565b155b1561088c576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006108988486610dea565b60008181526003602052604090205490915073ffffffffffffffffffffffffffffffffffffffff16806108fa576040517fb151802b0000000000000000000000000000000000000000000000000000000081526004810183905260240161029e565b6040517f3d3ac1b500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690633d3ac1b59061095090899089903390600401610e27565b6000604051808303816000875af115801561096f573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526109b59190810190610e90565b9695505050505050565b6109c7610a5a565b6004805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f953e92b1a6442e9c3242531154a3f6f6eb00b4e9c719ba8118fa6235e4ce89b6910161045b565b610a4e610a5a565b610a5781610add565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610adb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161029e565b565b3373ffffffffffffffffffffffffffffffffffffffff821603610b5c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161029e565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60005b83811015610bed578181015183820152602001610bd5565b50506000910152565b60008151808452610c0e816020860160208601610bd2565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000610c536020830184610bf6565b9392505050565b60008060408385031215610c6d57600080fd5b50508035926020909101359150565b600060208284031215610c8e57600080fd5b5035919050565b73ffffffffffffffffffffffffffffffffffffffff81168114610a5757600080fd5b600060208284031215610cc957600080fd5b8135610c5381610c95565b60008060208385031215610ce757600080fd5b823567ffffffffffffffff80821115610cff57600080fd5b818501915085601f830112610d1357600080fd5b813581811115610d2257600080fd5b866020828501011115610d3457600080fd5b60209290920196919550909350505050565b600060208284031215610d5857600080fd5b81518015158114610c5357600080fd5b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff84168152604060208201526000610de1604083018486610d68565b95945050505050565b80356020831015610e21577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b165b92915050565b604081526000610e3b604083018587610d68565b905073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060208284031215610ea257600080fd5b815167ffffffffffffffff80821115610eba57600080fd5b818401915084601f830112610ece57600080fd5b815181811115610ee057610ee0610e61565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715610f2657610f26610e61565b81604052828152876020848701011115610f3f57600080fd5b610f50836020830160208801610bd2565b97965050505050505056fea164736f6c6343000810000a", +} + +var LLOVerifierProxyABI = LLOVerifierProxyMetaData.ABI + +var LLOVerifierProxyBin = LLOVerifierProxyMetaData.Bin + +func DeployLLOVerifierProxy(auth *bind.TransactOpts, backend bind.ContractBackend, accessController common.Address) (common.Address, *types.Transaction, *LLOVerifierProxy, error) { + parsed, err := LLOVerifierProxyMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LLOVerifierProxyBin), backend, accessController) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LLOVerifierProxy{LLOVerifierProxyCaller: LLOVerifierProxyCaller{contract: contract}, LLOVerifierProxyTransactor: LLOVerifierProxyTransactor{contract: contract}, LLOVerifierProxyFilterer: LLOVerifierProxyFilterer{contract: contract}}, nil +} + +type LLOVerifierProxy struct { + address common.Address + abi abi.ABI + LLOVerifierProxyCaller + LLOVerifierProxyTransactor + LLOVerifierProxyFilterer +} + +type LLOVerifierProxyCaller struct { + contract *bind.BoundContract +} + +type LLOVerifierProxyTransactor struct { + contract *bind.BoundContract +} + +type LLOVerifierProxyFilterer struct { + contract *bind.BoundContract +} + +type LLOVerifierProxySession struct { + Contract *LLOVerifierProxy + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LLOVerifierProxyCallerSession struct { + Contract *LLOVerifierProxyCaller + CallOpts bind.CallOpts +} + +type LLOVerifierProxyTransactorSession struct { + Contract *LLOVerifierProxyTransactor + TransactOpts bind.TransactOpts +} + +type LLOVerifierProxyRaw struct { + Contract *LLOVerifierProxy +} + +type LLOVerifierProxyCallerRaw struct { + Contract *LLOVerifierProxyCaller +} + +type LLOVerifierProxyTransactorRaw struct { + Contract *LLOVerifierProxyTransactor +} + +func NewLLOVerifierProxy(address common.Address, backend bind.ContractBackend) (*LLOVerifierProxy, error) { + abi, err := abi.JSON(strings.NewReader(LLOVerifierProxyABI)) + if err != nil { + return nil, err + } + contract, err := bindLLOVerifierProxy(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LLOVerifierProxy{address: address, abi: abi, LLOVerifierProxyCaller: LLOVerifierProxyCaller{contract: contract}, LLOVerifierProxyTransactor: LLOVerifierProxyTransactor{contract: contract}, LLOVerifierProxyFilterer: LLOVerifierProxyFilterer{contract: contract}}, nil +} + +func NewLLOVerifierProxyCaller(address common.Address, caller bind.ContractCaller) (*LLOVerifierProxyCaller, error) { + contract, err := bindLLOVerifierProxy(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LLOVerifierProxyCaller{contract: contract}, nil +} + +func NewLLOVerifierProxyTransactor(address common.Address, transactor bind.ContractTransactor) (*LLOVerifierProxyTransactor, error) { + contract, err := bindLLOVerifierProxy(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LLOVerifierProxyTransactor{contract: contract}, nil +} + +func NewLLOVerifierProxyFilterer(address common.Address, filterer bind.ContractFilterer) (*LLOVerifierProxyFilterer, error) { + contract, err := bindLLOVerifierProxy(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LLOVerifierProxyFilterer{contract: contract}, nil +} + +func bindLLOVerifierProxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LLOVerifierProxyMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOVerifierProxy.Contract.LLOVerifierProxyCaller.contract.Call(opts, result, method, params...) +} + +func (_LLOVerifierProxy *LLOVerifierProxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.LLOVerifierProxyTransactor.contract.Transfer(opts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.LLOVerifierProxyTransactor.contract.Transact(opts, method, params...) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOVerifierProxy.Contract.contract.Call(opts, result, method, params...) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.contract.Transfer(opts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.contract.Transact(opts, method, params...) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCaller) GetAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _LLOVerifierProxy.contract.Call(opts, &out, "getAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) GetAccessController() (common.Address, error) { + return _LLOVerifierProxy.Contract.GetAccessController(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCallerSession) GetAccessController() (common.Address, error) { + return _LLOVerifierProxy.Contract.GetAccessController(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCaller) GetVerifier(opts *bind.CallOpts, configDigest [32]byte) (common.Address, error) { + var out []interface{} + err := _LLOVerifierProxy.contract.Call(opts, &out, "getVerifier", configDigest) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) GetVerifier(configDigest [32]byte) (common.Address, error) { + return _LLOVerifierProxy.Contract.GetVerifier(&_LLOVerifierProxy.CallOpts, configDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCallerSession) GetVerifier(configDigest [32]byte) (common.Address, error) { + return _LLOVerifierProxy.Contract.GetVerifier(&_LLOVerifierProxy.CallOpts, configDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _LLOVerifierProxy.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) Owner() (common.Address, error) { + return _LLOVerifierProxy.Contract.Owner(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCallerSession) Owner() (common.Address, error) { + return _LLOVerifierProxy.Contract.Owner(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LLOVerifierProxy.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) TypeAndVersion() (string, error) { + return _LLOVerifierProxy.Contract.TypeAndVersion(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyCallerSession) TypeAndVersion() (string, error) { + return _LLOVerifierProxy.Contract.TypeAndVersion(&_LLOVerifierProxy.CallOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "acceptOwnership") +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) AcceptOwnership() (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.AcceptOwnership(&_LLOVerifierProxy.TransactOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.AcceptOwnership(&_LLOVerifierProxy.TransactOpts) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) InitializeVerifier(opts *bind.TransactOpts, verifierAddress common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "initializeVerifier", verifierAddress) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.InitializeVerifier(&_LLOVerifierProxy.TransactOpts, verifierAddress) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.InitializeVerifier(&_LLOVerifierProxy.TransactOpts, verifierAddress) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) SetAccessController(opts *bind.TransactOpts, accessController common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "setAccessController", accessController) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) SetAccessController(accessController common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.SetAccessController(&_LLOVerifierProxy.TransactOpts, accessController) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) SetAccessController(accessController common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.SetAccessController(&_LLOVerifierProxy.TransactOpts, accessController) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) SetVerifier(opts *bind.TransactOpts, currentConfigDigest [32]byte, newConfigDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "setVerifier", currentConfigDigest, newConfigDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) SetVerifier(currentConfigDigest [32]byte, newConfigDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.SetVerifier(&_LLOVerifierProxy.TransactOpts, currentConfigDigest, newConfigDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) SetVerifier(currentConfigDigest [32]byte, newConfigDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.SetVerifier(&_LLOVerifierProxy.TransactOpts, currentConfigDigest, newConfigDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "transferOwnership", to) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.TransferOwnership(&_LLOVerifierProxy.TransactOpts, to) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.TransferOwnership(&_LLOVerifierProxy.TransactOpts, to) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) UnsetVerifier(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "unsetVerifier", configDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) UnsetVerifier(configDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.UnsetVerifier(&_LLOVerifierProxy.TransactOpts, configDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) UnsetVerifier(configDigest [32]byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.UnsetVerifier(&_LLOVerifierProxy.TransactOpts, configDigest) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactor) Verify(opts *bind.TransactOpts, signedReport []byte) (*types.Transaction, error) { + return _LLOVerifierProxy.contract.Transact(opts, "verify", signedReport) +} + +func (_LLOVerifierProxy *LLOVerifierProxySession) Verify(signedReport []byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.Verify(&_LLOVerifierProxy.TransactOpts, signedReport) +} + +func (_LLOVerifierProxy *LLOVerifierProxyTransactorSession) Verify(signedReport []byte) (*types.Transaction, error) { + return _LLOVerifierProxy.Contract.Verify(&_LLOVerifierProxy.TransactOpts, signedReport) +} + +type LLOVerifierProxyAccessControllerSetIterator struct { + Event *LLOVerifierProxyAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyAccessControllerSet struct { + OldAccessController common.Address + NewAccessController common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterAccessControllerSet(opts *bind.FilterOpts) (*LLOVerifierProxyAccessControllerSetIterator, error) { + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "AccessControllerSet") + if err != nil { + return nil, err + } + return &LLOVerifierProxyAccessControllerSetIterator{contract: _LLOVerifierProxy.contract, event: "AccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchAccessControllerSet(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "AccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyAccessControllerSet) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "AccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseAccessControllerSet(log types.Log) (*LLOVerifierProxyAccessControllerSet, error) { + event := new(LLOVerifierProxyAccessControllerSet) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "AccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOVerifierProxyOwnershipTransferRequestedIterator struct { + Event *LLOVerifierProxyOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOVerifierProxyOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &LLOVerifierProxyOwnershipTransferRequestedIterator{contract: _LLOVerifierProxy.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyOwnershipTransferRequested) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseOwnershipTransferRequested(log types.Log) (*LLOVerifierProxyOwnershipTransferRequested, error) { + event := new(LLOVerifierProxyOwnershipTransferRequested) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOVerifierProxyOwnershipTransferredIterator struct { + Event *LLOVerifierProxyOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOVerifierProxyOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &LLOVerifierProxyOwnershipTransferredIterator{contract: _LLOVerifierProxy.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyOwnershipTransferred) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseOwnershipTransferred(log types.Log) (*LLOVerifierProxyOwnershipTransferred, error) { + event := new(LLOVerifierProxyOwnershipTransferred) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOVerifierProxyVerifierInitializedIterator struct { + Event *LLOVerifierProxyVerifierInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyVerifierInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyVerifierInitializedIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyVerifierInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyVerifierInitialized struct { + VerifierAddress common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterVerifierInitialized(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierInitializedIterator, error) { + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "VerifierInitialized") + if err != nil { + return nil, err + } + return &LLOVerifierProxyVerifierInitializedIterator{contract: _LLOVerifierProxy.contract, event: "VerifierInitialized", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchVerifierInitialized(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierInitialized) (event.Subscription, error) { + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "VerifierInitialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyVerifierInitialized) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierInitialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseVerifierInitialized(log types.Log) (*LLOVerifierProxyVerifierInitialized, error) { + event := new(LLOVerifierProxyVerifierInitialized) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierInitialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOVerifierProxyVerifierSetIterator struct { + Event *LLOVerifierProxyVerifierSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyVerifierSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyVerifierSetIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyVerifierSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyVerifierSet struct { + OldConfigDigest [32]byte + NewConfigDigest [32]byte + VerifierAddress common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterVerifierSet(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierSetIterator, error) { + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "VerifierSet") + if err != nil { + return nil, err + } + return &LLOVerifierProxyVerifierSetIterator{contract: _LLOVerifierProxy.contract, event: "VerifierSet", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchVerifierSet(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierSet) (event.Subscription, error) { + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "VerifierSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyVerifierSet) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseVerifierSet(log types.Log) (*LLOVerifierProxyVerifierSet, error) { + event := new(LLOVerifierProxyVerifierSet) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOVerifierProxyVerifierUnsetIterator struct { + Event *LLOVerifierProxyVerifierUnset + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOVerifierProxyVerifierUnsetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierUnset) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOVerifierProxyVerifierUnset) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOVerifierProxyVerifierUnsetIterator) Error() error { + return it.fail +} + +func (it *LLOVerifierProxyVerifierUnsetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOVerifierProxyVerifierUnset struct { + ConfigDigest [32]byte + VerifierAddress common.Address + Raw types.Log +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) FilterVerifierUnset(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierUnsetIterator, error) { + + logs, sub, err := _LLOVerifierProxy.contract.FilterLogs(opts, "VerifierUnset") + if err != nil { + return nil, err + } + return &LLOVerifierProxyVerifierUnsetIterator{contract: _LLOVerifierProxy.contract, event: "VerifierUnset", logs: logs, sub: sub}, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) WatchVerifierUnset(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierUnset) (event.Subscription, error) { + + logs, sub, err := _LLOVerifierProxy.contract.WatchLogs(opts, "VerifierUnset") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOVerifierProxyVerifierUnset) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierUnset", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOVerifierProxy *LLOVerifierProxyFilterer) ParseVerifierUnset(log types.Log) (*LLOVerifierProxyVerifierUnset, error) { + event := new(LLOVerifierProxyVerifierUnset) + if err := _LLOVerifierProxy.contract.UnpackLog(event, "VerifierUnset", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LLOVerifierProxy *LLOVerifierProxy) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LLOVerifierProxy.abi.Events["AccessControllerSet"].ID: + return _LLOVerifierProxy.ParseAccessControllerSet(log) + case _LLOVerifierProxy.abi.Events["OwnershipTransferRequested"].ID: + return _LLOVerifierProxy.ParseOwnershipTransferRequested(log) + case _LLOVerifierProxy.abi.Events["OwnershipTransferred"].ID: + return _LLOVerifierProxy.ParseOwnershipTransferred(log) + case _LLOVerifierProxy.abi.Events["VerifierInitialized"].ID: + return _LLOVerifierProxy.ParseVerifierInitialized(log) + case _LLOVerifierProxy.abi.Events["VerifierSet"].ID: + return _LLOVerifierProxy.ParseVerifierSet(log) + case _LLOVerifierProxy.abi.Events["VerifierUnset"].ID: + return _LLOVerifierProxy.ParseVerifierUnset(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LLOVerifierProxyAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x953e92b1a6442e9c3242531154a3f6f6eb00b4e9c719ba8118fa6235e4ce89b6") +} + +func (LLOVerifierProxyOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (LLOVerifierProxyOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (LLOVerifierProxyVerifierInitialized) Topic() common.Hash { + return common.HexToHash("0x1f2cd7c97f4d801b5efe26cc409617c1fd6c5ef786e79aacb90af40923e4e8e9") +} + +func (LLOVerifierProxyVerifierSet) Topic() common.Hash { + return common.HexToHash("0xbeb513e532542a562ac35699e7cd9ae7d198dcd3eee15bada6c857d28ceaddcf") +} + +func (LLOVerifierProxyVerifierUnset) Topic() common.Hash { + return common.HexToHash("0x11dc15c4b8ac2b183166cc8427e5385a5ece8308217a4217338c6a7614845c4c") +} + +func (_LLOVerifierProxy *LLOVerifierProxy) Address() common.Address { + return _LLOVerifierProxy.address +} + +type LLOVerifierProxyInterface interface { + GetAccessController(opts *bind.CallOpts) (common.Address, error) + + GetVerifier(opts *bind.CallOpts, configDigest [32]byte) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + InitializeVerifier(opts *bind.TransactOpts, verifierAddress common.Address) (*types.Transaction, error) + + SetAccessController(opts *bind.TransactOpts, accessController common.Address) (*types.Transaction, error) + + SetVerifier(opts *bind.TransactOpts, currentConfigDigest [32]byte, newConfigDigest [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UnsetVerifier(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) + + Verify(opts *bind.TransactOpts, signedReport []byte) (*types.Transaction, error) + + FilterAccessControllerSet(opts *bind.FilterOpts) (*LLOVerifierProxyAccessControllerSetIterator, error) + + WatchAccessControllerSet(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyAccessControllerSet) (event.Subscription, error) + + ParseAccessControllerSet(log types.Log) (*LLOVerifierProxyAccessControllerSet, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOVerifierProxyOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*LLOVerifierProxyOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOVerifierProxyOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*LLOVerifierProxyOwnershipTransferred, error) + + FilterVerifierInitialized(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierInitializedIterator, error) + + WatchVerifierInitialized(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierInitialized) (event.Subscription, error) + + ParseVerifierInitialized(log types.Log) (*LLOVerifierProxyVerifierInitialized, error) + + FilterVerifierSet(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierSetIterator, error) + + WatchVerifierSet(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierSet) (event.Subscription, error) + + ParseVerifierSet(log types.Log) (*LLOVerifierProxyVerifierSet, error) + + FilterVerifierUnset(opts *bind.FilterOpts) (*LLOVerifierProxyVerifierUnsetIterator, error) + + WatchVerifierUnset(opts *bind.WatchOpts, sink chan<- *LLOVerifierProxyVerifierUnset) (event.Subscription, error) + + ParseVerifierUnset(log types.Log) (*LLOVerifierProxyVerifierUnset, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/llo_feeds_test/llo_feeds_test.go b/core/gethwrappers/generated/llo_feeds_test/llo_feeds_test.go new file mode 100644 index 00000000..51f0f6e4 --- /dev/null +++ b/core/gethwrappers/generated/llo_feeds_test/llo_feeds_test.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package llo_feeds_test + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LLOExposedVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_feedId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_chainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"_configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"_offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_encodedConfig\",\"type\":\"bytes\"}],\"name\":\"exposedConfigDigestFromConfigData\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610696806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80630ebd702314610030575b600080fd5b61004361003e3660046103f7565b610055565b60405190815260200160405180910390f35b60006100a18c8c8c8c8c8c8c8c8c8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508e92508d91506100b19050565b9c9b505050505050505050505050565b6000808b8b8b8b8b8b8b8b8b8b6040516020016100d79a999897969594939291906105a7565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e06000000000000000000000000000000000000000000000000000000000000179150509a9950505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461018357600080fd5b919050565b803567ffffffffffffffff8116811461018357600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610216576102166101a0565b604052919050565b600067ffffffffffffffff821115610238576102386101a0565b5060051b60200190565b600082601f83011261025357600080fd5b813560206102686102638361021e565b6101cf565b82815260059290921b8401810191818101908684111561028757600080fd5b8286015b848110156102a95761029c8161015f565b835291830191830161028b565b509695505050505050565b600082601f8301126102c557600080fd5b813560206102d56102638361021e565b82815260059290921b840181019181810190868411156102f457600080fd5b8286015b848110156102a957803583529183019183016102f8565b803560ff8116811461018357600080fd5b60008083601f84011261033257600080fd5b50813567ffffffffffffffff81111561034a57600080fd5b60208301915083602082850101111561036257600080fd5b9250929050565b600082601f83011261037a57600080fd5b813567ffffffffffffffff811115610394576103946101a0565b6103c560207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016101cf565b8181528460208386010111156103da57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060008060008060006101408c8e03121561041957600080fd5b8b359a5060208c0135995061043060408d0161015f565b985061043e60608d01610188565b975067ffffffffffffffff8060808e0135111561045a57600080fd5b61046a8e60808f01358f01610242565b97508060a08e0135111561047d57600080fd5b61048d8e60a08f01358f016102b4565b965061049b60c08e0161030f565b95508060e08e013511156104ae57600080fd5b6104be8e60e08f01358f01610320565b90955093506104d06101008e01610188565b9250806101208e013511156104e457600080fd5b506104f68d6101208e01358e01610369565b90509295989b509295989b9093969950565b600081518084526020808501945080840160005b838110156105385781518752958201959082019060010161051c565b509495945050505050565b6000815180845260005b818110156105695760208185018101518683018201520161054d565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8a815260208082018b905273ffffffffffffffffffffffffffffffffffffffff8a8116604084015267ffffffffffffffff8a1660608401526101406080840181905289519084018190526000926101608501928b820192855b8181101561061e578451831686529483019493830193600101610600565b505050505082810360a08401526106358189610508565b60ff881660c0850152905082810360e08401526106528187610543565b67ffffffffffffffff861661010085015290508281036101208401526106788185610543565b9d9c5050505050505050505050505056fea164736f6c6343000810000a", +} + +var LLOExposedVerifierABI = LLOExposedVerifierMetaData.ABI + +var LLOExposedVerifierBin = LLOExposedVerifierMetaData.Bin + +func DeployLLOExposedVerifier(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LLOExposedVerifier, error) { + parsed, err := LLOExposedVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LLOExposedVerifierBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LLOExposedVerifier{LLOExposedVerifierCaller: LLOExposedVerifierCaller{contract: contract}, LLOExposedVerifierTransactor: LLOExposedVerifierTransactor{contract: contract}, LLOExposedVerifierFilterer: LLOExposedVerifierFilterer{contract: contract}}, nil +} + +type LLOExposedVerifier struct { + address common.Address + abi abi.ABI + LLOExposedVerifierCaller + LLOExposedVerifierTransactor + LLOExposedVerifierFilterer +} + +type LLOExposedVerifierCaller struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierTransactor struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierFilterer struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierSession struct { + Contract *LLOExposedVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LLOExposedVerifierCallerSession struct { + Contract *LLOExposedVerifierCaller + CallOpts bind.CallOpts +} + +type LLOExposedVerifierTransactorSession struct { + Contract *LLOExposedVerifierTransactor + TransactOpts bind.TransactOpts +} + +type LLOExposedVerifierRaw struct { + Contract *LLOExposedVerifier +} + +type LLOExposedVerifierCallerRaw struct { + Contract *LLOExposedVerifierCaller +} + +type LLOExposedVerifierTransactorRaw struct { + Contract *LLOExposedVerifierTransactor +} + +func NewLLOExposedVerifier(address common.Address, backend bind.ContractBackend) (*LLOExposedVerifier, error) { + abi, err := abi.JSON(strings.NewReader(LLOExposedVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindLLOExposedVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LLOExposedVerifier{address: address, abi: abi, LLOExposedVerifierCaller: LLOExposedVerifierCaller{contract: contract}, LLOExposedVerifierTransactor: LLOExposedVerifierTransactor{contract: contract}, LLOExposedVerifierFilterer: LLOExposedVerifierFilterer{contract: contract}}, nil +} + +func NewLLOExposedVerifierCaller(address common.Address, caller bind.ContractCaller) (*LLOExposedVerifierCaller, error) { + contract, err := bindLLOExposedVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LLOExposedVerifierCaller{contract: contract}, nil +} + +func NewLLOExposedVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*LLOExposedVerifierTransactor, error) { + contract, err := bindLLOExposedVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LLOExposedVerifierTransactor{contract: contract}, nil +} + +func NewLLOExposedVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*LLOExposedVerifierFilterer, error) { + contract, err := bindLLOExposedVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LLOExposedVerifierFilterer{contract: contract}, nil +} + +func bindLLOExposedVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LLOExposedVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOExposedVerifier.Contract.LLOExposedVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.LLOExposedVerifierTransactor.contract.Transfer(opts) +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.LLOExposedVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOExposedVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.contract.Transfer(opts) +} + +func (_LLOExposedVerifier *LLOExposedVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCaller) ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + var out []interface{} + err := _LLOExposedVerifier.contract.Call(opts, &out, "exposedConfigDigestFromConfigData", _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_LLOExposedVerifier *LLOExposedVerifierSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _LLOExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_LLOExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCallerSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _LLOExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_LLOExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_LLOExposedVerifier *LLOExposedVerifier) Address() common.Address { + return _LLOExposedVerifier.address +} + +type LLOExposedVerifierInterface interface { + ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/log_emitter/log_emitter.go b/core/gethwrappers/generated/log_emitter/log_emitter.go new file mode 100644 index 00000000..2ab8b0e4 --- /dev/null +++ b/core/gethwrappers/generated/log_emitter/log_emitter.go @@ -0,0 +1,791 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package log_emitter + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LogEmitterMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"Log1\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"Log2\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"name\":\"Log3\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"Log4\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"v\",\"type\":\"uint256[]\"}],\"name\":\"EmitLog1\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"v\",\"type\":\"uint256[]\"}],\"name\":\"EmitLog2\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"v\",\"type\":\"string[]\"}],\"name\":\"EmitLog3\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"v\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"w\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"}],\"name\":\"EmitLog4\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506105c5806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c8063696933c914610051578063b4b12d9814610066578063bc253bc014610079578063d9c21f461461008c575b600080fd5b61006461005f3660046102d7565b61009f565b005b61006461007436600461036d565b610113565b6100646100873660046102d7565b610163565b61006461009a366004610399565b6101c7565b60005b815181101561010f577f46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a88282815181106100de576100de6104be565b60200260200101516040516100f591815260200190565b60405180910390a180610107816104ed565b9150506100a2565b5050565b60005b8181101561015d57604051839085907fba21d5b63d64546cb4ab29e370a8972bf26f78cb0c395391b4f451699fdfdc5d90600090a380610155816104ed565b915050610116565b50505050565b60005b815181101561010f57818181518110610181576101816104be565b60200260200101517f624fb00c2ce79f34cb543884c3af64816dce0f4cec3d32661959e49d488a7a9360405160405180910390a2806101bf816104ed565b915050610166565b60005b815181101561010f577fb94ec34dfe32a8a7170992a093976368d1e63decf8f0bc0b38a8eb89cc9f95cf828281518110610206576102066104be565b602002602001015160405161021b919061054c565b60405180910390a18061022d816104ed565b9150506101ca565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156102ab576102ab610235565b604052919050565b600067ffffffffffffffff8211156102cd576102cd610235565b5060051b60200190565b600060208083850312156102ea57600080fd5b823567ffffffffffffffff81111561030157600080fd5b8301601f8101851361031257600080fd5b8035610325610320826102b3565b610264565b81815260059190911b8201830190838101908783111561034457600080fd5b928401925b8284101561036257833582529284019290840190610349565b979650505050505050565b60008060006060848603121561038257600080fd5b505081359360208301359350604090920135919050565b600060208083850312156103ac57600080fd5b823567ffffffffffffffff808211156103c457600080fd5b8185019150601f86818401126103d957600080fd5b82356103e7610320826102b3565b81815260059190911b8401850190858101908983111561040657600080fd5b8686015b838110156104b0578035868111156104225760008081fd5b8701603f81018c136104345760008081fd5b8881013560408882111561044a5761044a610235565b6104798b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08a85011601610264565b8281528e8284860101111561048e5760008081fd5b828285018d83013760009281018c01929092525084525091870191870161040a565b509998505050505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610545577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b600060208083528351808285015260005b818110156105795785810183015185820160400152820161055d565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509291505056fea164736f6c6343000813000a", +} + +var LogEmitterABI = LogEmitterMetaData.ABI + +var LogEmitterBin = LogEmitterMetaData.Bin + +func DeployLogEmitter(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LogEmitter, error) { + parsed, err := LogEmitterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogEmitterBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LogEmitter{address: address, abi: *parsed, LogEmitterCaller: LogEmitterCaller{contract: contract}, LogEmitterTransactor: LogEmitterTransactor{contract: contract}, LogEmitterFilterer: LogEmitterFilterer{contract: contract}}, nil +} + +type LogEmitter struct { + address common.Address + abi abi.ABI + LogEmitterCaller + LogEmitterTransactor + LogEmitterFilterer +} + +type LogEmitterCaller struct { + contract *bind.BoundContract +} + +type LogEmitterTransactor struct { + contract *bind.BoundContract +} + +type LogEmitterFilterer struct { + contract *bind.BoundContract +} + +type LogEmitterSession struct { + Contract *LogEmitter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LogEmitterCallerSession struct { + Contract *LogEmitterCaller + CallOpts bind.CallOpts +} + +type LogEmitterTransactorSession struct { + Contract *LogEmitterTransactor + TransactOpts bind.TransactOpts +} + +type LogEmitterRaw struct { + Contract *LogEmitter +} + +type LogEmitterCallerRaw struct { + Contract *LogEmitterCaller +} + +type LogEmitterTransactorRaw struct { + Contract *LogEmitterTransactor +} + +func NewLogEmitter(address common.Address, backend bind.ContractBackend) (*LogEmitter, error) { + abi, err := abi.JSON(strings.NewReader(LogEmitterABI)) + if err != nil { + return nil, err + } + contract, err := bindLogEmitter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LogEmitter{address: address, abi: abi, LogEmitterCaller: LogEmitterCaller{contract: contract}, LogEmitterTransactor: LogEmitterTransactor{contract: contract}, LogEmitterFilterer: LogEmitterFilterer{contract: contract}}, nil +} + +func NewLogEmitterCaller(address common.Address, caller bind.ContractCaller) (*LogEmitterCaller, error) { + contract, err := bindLogEmitter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogEmitterCaller{contract: contract}, nil +} + +func NewLogEmitterTransactor(address common.Address, transactor bind.ContractTransactor) (*LogEmitterTransactor, error) { + contract, err := bindLogEmitter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogEmitterTransactor{contract: contract}, nil +} + +func NewLogEmitterFilterer(address common.Address, filterer bind.ContractFilterer) (*LogEmitterFilterer, error) { + contract, err := bindLogEmitter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogEmitterFilterer{contract: contract}, nil +} + +func bindLogEmitter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LogEmitterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LogEmitter *LogEmitterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogEmitter.Contract.LogEmitterCaller.contract.Call(opts, result, method, params...) +} + +func (_LogEmitter *LogEmitterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogEmitter.Contract.LogEmitterTransactor.contract.Transfer(opts) +} + +func (_LogEmitter *LogEmitterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogEmitter.Contract.LogEmitterTransactor.contract.Transact(opts, method, params...) +} + +func (_LogEmitter *LogEmitterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogEmitter.Contract.contract.Call(opts, result, method, params...) +} + +func (_LogEmitter *LogEmitterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogEmitter.Contract.contract.Transfer(opts) +} + +func (_LogEmitter *LogEmitterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogEmitter.Contract.contract.Transact(opts, method, params...) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog1(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog1", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog1(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog1(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog1(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog1(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog2(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog2", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog2(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog2(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog2(v []*big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog2(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog3(opts *bind.TransactOpts, v []string) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog3", v) +} + +func (_LogEmitter *LogEmitterSession) EmitLog3(v []string) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog3(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog3(v []string) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog3(&_LogEmitter.TransactOpts, v) +} + +func (_LogEmitter *LogEmitterTransactor) EmitLog4(opts *bind.TransactOpts, v *big.Int, w *big.Int, c *big.Int) (*types.Transaction, error) { + return _LogEmitter.contract.Transact(opts, "EmitLog4", v, w, c) +} + +func (_LogEmitter *LogEmitterSession) EmitLog4(v *big.Int, w *big.Int, c *big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog4(&_LogEmitter.TransactOpts, v, w, c) +} + +func (_LogEmitter *LogEmitterTransactorSession) EmitLog4(v *big.Int, w *big.Int, c *big.Int) (*types.Transaction, error) { + return _LogEmitter.Contract.EmitLog4(&_LogEmitter.TransactOpts, v, w, c) +} + +type LogEmitterLog1Iterator struct { + Event *LogEmitterLog1 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog1Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog1Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog1Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog1 struct { + Arg0 *big.Int + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog1(opts *bind.FilterOpts) (*LogEmitterLog1Iterator, error) { + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log1") + if err != nil { + return nil, err + } + return &LogEmitterLog1Iterator{contract: _LogEmitter.contract, event: "Log1", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog1(opts *bind.WatchOpts, sink chan<- *LogEmitterLog1) (event.Subscription, error) { + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log1") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog1) + if err := _LogEmitter.contract.UnpackLog(event, "Log1", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog1(log types.Log) (*LogEmitterLog1, error) { + event := new(LogEmitterLog1) + if err := _LogEmitter.contract.UnpackLog(event, "Log1", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogEmitterLog2Iterator struct { + Event *LogEmitterLog2 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog2Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog2Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog2Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog2 struct { + Arg0 *big.Int + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog2(opts *bind.FilterOpts, arg0 []*big.Int) (*LogEmitterLog2Iterator, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log2", arg0Rule) + if err != nil { + return nil, err + } + return &LogEmitterLog2Iterator{contract: _LogEmitter.contract, event: "Log2", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog2(opts *bind.WatchOpts, sink chan<- *LogEmitterLog2, arg0 []*big.Int) (event.Subscription, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log2", arg0Rule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog2) + if err := _LogEmitter.contract.UnpackLog(event, "Log2", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog2(log types.Log) (*LogEmitterLog2, error) { + event := new(LogEmitterLog2) + if err := _LogEmitter.contract.UnpackLog(event, "Log2", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogEmitterLog3Iterator struct { + Event *LogEmitterLog3 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog3Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog3) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog3) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog3Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog3Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog3 struct { + Arg0 string + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog3(opts *bind.FilterOpts) (*LogEmitterLog3Iterator, error) { + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log3") + if err != nil { + return nil, err + } + return &LogEmitterLog3Iterator{contract: _LogEmitter.contract, event: "Log3", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog3(opts *bind.WatchOpts, sink chan<- *LogEmitterLog3) (event.Subscription, error) { + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log3") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog3) + if err := _LogEmitter.contract.UnpackLog(event, "Log3", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog3(log types.Log) (*LogEmitterLog3, error) { + event := new(LogEmitterLog3) + if err := _LogEmitter.contract.UnpackLog(event, "Log3", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogEmitterLog4Iterator struct { + Event *LogEmitterLog4 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogEmitterLog4Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog4) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogEmitterLog4) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogEmitterLog4Iterator) Error() error { + return it.fail +} + +func (it *LogEmitterLog4Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogEmitterLog4 struct { + Arg0 *big.Int + Arg1 *big.Int + Raw types.Log +} + +func (_LogEmitter *LogEmitterFilterer) FilterLog4(opts *bind.FilterOpts, arg0 []*big.Int, arg1 []*big.Int) (*LogEmitterLog4Iterator, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + var arg1Rule []interface{} + for _, arg1Item := range arg1 { + arg1Rule = append(arg1Rule, arg1Item) + } + + logs, sub, err := _LogEmitter.contract.FilterLogs(opts, "Log4", arg0Rule, arg1Rule) + if err != nil { + return nil, err + } + return &LogEmitterLog4Iterator{contract: _LogEmitter.contract, event: "Log4", logs: logs, sub: sub}, nil +} + +func (_LogEmitter *LogEmitterFilterer) WatchLog4(opts *bind.WatchOpts, sink chan<- *LogEmitterLog4, arg0 []*big.Int, arg1 []*big.Int) (event.Subscription, error) { + + var arg0Rule []interface{} + for _, arg0Item := range arg0 { + arg0Rule = append(arg0Rule, arg0Item) + } + var arg1Rule []interface{} + for _, arg1Item := range arg1 { + arg1Rule = append(arg1Rule, arg1Item) + } + + logs, sub, err := _LogEmitter.contract.WatchLogs(opts, "Log4", arg0Rule, arg1Rule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogEmitterLog4) + if err := _LogEmitter.contract.UnpackLog(event, "Log4", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogEmitter *LogEmitterFilterer) ParseLog4(log types.Log) (*LogEmitterLog4, error) { + event := new(LogEmitterLog4) + if err := _LogEmitter.contract.UnpackLog(event, "Log4", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LogEmitter *LogEmitter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LogEmitter.abi.Events["Log1"].ID: + return _LogEmitter.ParseLog1(log) + case _LogEmitter.abi.Events["Log2"].ID: + return _LogEmitter.ParseLog2(log) + case _LogEmitter.abi.Events["Log3"].ID: + return _LogEmitter.ParseLog3(log) + case _LogEmitter.abi.Events["Log4"].ID: + return _LogEmitter.ParseLog4(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LogEmitterLog1) Topic() common.Hash { + return common.HexToHash("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8") +} + +func (LogEmitterLog2) Topic() common.Hash { + return common.HexToHash("0x624fb00c2ce79f34cb543884c3af64816dce0f4cec3d32661959e49d488a7a93") +} + +func (LogEmitterLog3) Topic() common.Hash { + return common.HexToHash("0xb94ec34dfe32a8a7170992a093976368d1e63decf8f0bc0b38a8eb89cc9f95cf") +} + +func (LogEmitterLog4) Topic() common.Hash { + return common.HexToHash("0xba21d5b63d64546cb4ab29e370a8972bf26f78cb0c395391b4f451699fdfdc5d") +} + +func (_LogEmitter *LogEmitter) Address() common.Address { + return _LogEmitter.address +} + +type LogEmitterInterface interface { + EmitLog1(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) + + EmitLog2(opts *bind.TransactOpts, v []*big.Int) (*types.Transaction, error) + + EmitLog3(opts *bind.TransactOpts, v []string) (*types.Transaction, error) + + EmitLog4(opts *bind.TransactOpts, v *big.Int, w *big.Int, c *big.Int) (*types.Transaction, error) + + FilterLog1(opts *bind.FilterOpts) (*LogEmitterLog1Iterator, error) + + WatchLog1(opts *bind.WatchOpts, sink chan<- *LogEmitterLog1) (event.Subscription, error) + + ParseLog1(log types.Log) (*LogEmitterLog1, error) + + FilterLog2(opts *bind.FilterOpts, arg0 []*big.Int) (*LogEmitterLog2Iterator, error) + + WatchLog2(opts *bind.WatchOpts, sink chan<- *LogEmitterLog2, arg0 []*big.Int) (event.Subscription, error) + + ParseLog2(log types.Log) (*LogEmitterLog2, error) + + FilterLog3(opts *bind.FilterOpts) (*LogEmitterLog3Iterator, error) + + WatchLog3(opts *bind.WatchOpts, sink chan<- *LogEmitterLog3) (event.Subscription, error) + + ParseLog3(log types.Log) (*LogEmitterLog3, error) + + FilterLog4(opts *bind.FilterOpts, arg0 []*big.Int, arg1 []*big.Int) (*LogEmitterLog4Iterator, error) + + WatchLog4(opts *bind.WatchOpts, sink chan<- *LogEmitterLog4, arg0 []*big.Int, arg1 []*big.Int) (event.Subscription, error) + + ParseLog4(log types.Log) (*LogEmitterLog4, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper/log_triggered_streams_lookup_wrapper.go b/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper/log_triggered_streams_lookup_wrapper.go new file mode 100644 index 00000000..5fc8b09d --- /dev/null +++ b/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper/log_triggered_streams_lookup_wrapper.go @@ -0,0 +1,756 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package log_triggered_streams_lookup_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +var LogTriggeredStreamsLookupMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"_useArbitrumBlockNum\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"_verify\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string[]\",\"name\":\"feeds\",\"type\":\"string[]\"},{\"internalType\":\"string\",\"name\":\"timeParamKey\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"time\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"StreamsLookup\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"orderId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"exchange\",\"type\":\"address\"}],\"name\":\"LimitOrderExecuted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"orderId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"exchange\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"blob\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"verified\",\"type\":\"bytes\"}],\"name\":\"PerformingLogTriggerUpkeep\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"log\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"checkLog\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feedParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"feedsHex\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParam\",\"type\":\"string\"}],\"name\":\"setFeedParamKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"newFeeds\",\"type\":\"string[]\"}],\"name\":\"setFeedsHex\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"timeParam\",\"type\":\"string\"}],\"name\":\"setTimeParamKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"start\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useArbitrumBlockNum\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x610120604052604260a08181526080918291906200179660c03990526200002a9060019081620000e8565b506040805180820190915260098152680cccacac892c890caf60bb1b60208201526002906200005a908262000264565b5060408051808201909152600b81526a313637b1b5a73ab6b132b960a91b60208201526003906200008c908262000264565b503480156200009a57600080fd5b50604051620017d8380380620017d8833981016040819052620000bd9162000346565b6000805461ffff191692151561ff00191692909217610100911515919091021781556004556200037e565b82805482825590600052602060002090810192821562000133579160200282015b8281111562000133578251829062000122908262000264565b509160200191906001019062000109565b506200014192915062000145565b5090565b80821115620001415760006200015c828262000166565b5060010162000145565b5080546200017490620001d5565b6000825580601f1062000185575050565b601f016020900490600052602060002090810190620001a59190620001a8565b50565b5b80821115620001415760008155600101620001a9565b634e487b7160e01b600052604160045260246000fd5b600181811c90821680620001ea57607f821691505b6020821081036200020b57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200025f57600081815260208120601f850160051c810160208610156200023a5750805b601f850160051c820191505b818110156200025b5782815560010162000246565b5050505b505050565b81516001600160401b03811115620002805762000280620001bf565b6200029881620002918454620001d5565b8462000211565b602080601f831160018114620002d05760008415620002b75750858301515b600019600386901b1c1916600185901b1785556200025b565b600085815260208120601f198616915b828110156200030157888601518255948401946001909101908401620002e0565b5085821015620003205787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b805180151581146200034157600080fd5b919050565b600080604083850312156200035a57600080fd5b620003658362000330565b9150620003756020840162000330565b90509250929050565b611408806200038e6000396000f3fe608060405234801561001057600080fd5b50600436106100df5760003560e01c8063642f6cef1161008c578063afb28d1f11610066578063afb28d1f146101c3578063be9a6555146101cb578063c98f10b0146101d3578063fc735e99146101db57600080fd5b8063642f6cef146101735780639525d574146101905780639d6f1cc7146101a357600080fd5b80634b56a42e116100bd5780634b56a42e14610136578063601d5a711461014957806361bc221a1461015c57600080fd5b806305e25131146100e457806340691db4146100f95780634585e33b14610123575b600080fd5b6100f76100f2366004610ac8565b6101ed565b005b61010c610107366004610b79565b610204565b60405161011a929190610c54565b60405180910390f35b6100f7610131366004610c77565b6104da565b61010c610144366004610ce9565b6106d8565b6100f7610157366004610da6565b61072e565b61016560045481565b60405190815260200161011a565b6000546101809060ff1681565b604051901515815260200161011a565b6100f761019e366004610da6565b61073a565b6101b66101b1366004610ddb565b610746565b60405161011a9190610df4565b6101b66107f2565b6100f76107ff565b6101b6610832565b60005461018090610100900460ff1681565b80516102009060019060208401906108c5565b5050565b60006060600061021261083f565b90507fd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd61024260c0870187610e0e565b600081811061025357610253610e76565b905060200201350361045257600061026e60c0870187610e0e565b600181811061027f5761027f610e76565b9050602002013560405160200161029891815260200190565b60405160208183030381529060405290506000818060200190518101906102bf9190610ea5565b905060006102d060c0890189610e0e565b60028181106102e1576102e1610e76565b905060200201356040516020016102fa91815260200190565b60405160208183030381529060405290506000818060200190518101906103219190610ea5565b9050600061033260c08b018b610e0e565b600381811061034357610343610e76565b9050602002013560405160200161035c91815260200190565b60405160208183030381529060405290506000818060200190518101906103839190610ee7565b604080516020810188905290810185905273ffffffffffffffffffffffffffffffffffffffff821660608201527fd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd60808201529091506002906001906003908a9060a001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527ff055e4a20000000000000000000000000000000000000000000000000000000082526104499594939291600401610ff0565b60405180910390fd5b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f636f756c64206e6f742066696e64206d61746368696e67206576656e7420736960448201527f67000000000000000000000000000000000000000000000000000000000000006064820152608401610449565b6000806104e983850185610ce9565b915091506000806000808480602001905181019061050791906110b3565b6040805160208101909152600080825254949850929650909450925090610100900460ff1615610600577309dff56a4ff44e0f4436260a04f5cfa65636a48173ffffffffffffffffffffffffffffffffffffffff16638e760afe8860008151811061057457610574610e76565b60200260200101516040518263ffffffff1660e01b81526004016105989190610df4565b6000604051808303816000875af11580156105b7573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526105fd91908101906110f0565b90505b60045461060e906001611167565b6004557f2e00161baa7e3ee28260d12a08ade832b4160748111950f092fc0b921ac6a933820161066a576040516000906064906001907fd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd908490a45b327f299a03817e683a32b21e29e3ae3c31f1c9c773f7d532836d116b62a9281fbc9d86868661069761083f565b8c6000815181106106aa576106aa610e76565b6020026020010151876040516106c5969594939291906111a7565b60405180910390a2505050505050505050565b60006060600084846040516020016106f1929190611207565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052600193509150505b9250929050565b600361020082826112e1565b600261020082826112e1565b6001818154811061075657600080fd5b90600052602060002001600091509050805461077190610f02565b80601f016020809104026020016040519081016040528092919081815260200182805461079d90610f02565b80156107ea5780601f106107bf576101008083540402835291602001916107ea565b820191906000526020600020905b8154815290600101906020018083116107cd57829003601f168201915b505050505081565b6002805461077190610f02565b6040516000906064906001907fd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd908490a4565b6003805461077190610f02565b6000805460ff16156108c057606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610897573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108bb9190610ea5565b905090565b504390565b82805482825590600052602060002090810192821561090b579160200282015b8281111561090b57825182906108fb90826112e1565b50916020019190600101906108e5565b5061091792915061091b565b5090565b8082111561091757600061092f8282610938565b5060010161091b565b50805461094490610f02565b6000825580601f10610954575050565b601f0160209004906000526020600020908101906109729190610975565b50565b5b808211156109175760008155600101610976565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610a0057610a0061098a565b604052919050565b600067ffffffffffffffff821115610a2257610a2261098a565b5060051b60200190565b600067ffffffffffffffff821115610a4657610a4661098a565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f830112610a8357600080fd5b8135610a96610a9182610a2c565b6109b9565b818152846020838601011115610aab57600080fd5b816020850160208301376000918101602001919091529392505050565b60006020808385031215610adb57600080fd5b823567ffffffffffffffff80821115610af357600080fd5b818501915085601f830112610b0757600080fd5b8135610b15610a9182610a08565b81815260059190911b83018401908481019088831115610b3457600080fd5b8585015b83811015610b6c57803585811115610b505760008081fd5b610b5e8b89838a0101610a72565b845250918601918601610b38565b5098975050505050505050565b60008060408385031215610b8c57600080fd5b823567ffffffffffffffff80821115610ba457600080fd5b908401906101008287031215610bb957600080fd5b90925060208401359080821115610bcf57600080fd5b50610bdc85828601610a72565b9150509250929050565b60005b83811015610c01578181015183820152602001610be9565b50506000910152565b60008151808452610c22816020860160208601610be6565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8215158152604060208201526000610c6f6040830184610c0a565b949350505050565b60008060208385031215610c8a57600080fd5b823567ffffffffffffffff80821115610ca257600080fd5b818501915085601f830112610cb657600080fd5b813581811115610cc557600080fd5b866020828501011115610cd757600080fd5b60209290920196919550909350505050565b60008060408385031215610cfc57600080fd5b823567ffffffffffffffff80821115610d1457600080fd5b818501915085601f830112610d2857600080fd5b81356020610d38610a9183610a08565b82815260059290921b84018101918181019089841115610d5757600080fd5b8286015b84811015610d8f57803586811115610d735760008081fd5b610d818c86838b0101610a72565b845250918301918301610d5b565b5096505086013592505080821115610bcf57600080fd5b600060208284031215610db857600080fd5b813567ffffffffffffffff811115610dcf57600080fd5b610c6f84828501610a72565b600060208284031215610ded57600080fd5b5035919050565b602081526000610e076020830184610c0a565b9392505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610e4357600080fd5b83018035915067ffffffffffffffff821115610e5e57600080fd5b6020019150600581901b360382131561072757600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060208284031215610eb757600080fd5b5051919050565b805173ffffffffffffffffffffffffffffffffffffffff81168114610ee257600080fd5b919050565b600060208284031215610ef957600080fd5b610e0782610ebe565b600181811c90821680610f1657607f821691505b602082108103610f4f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60008154610f6281610f02565b808552602060018381168015610f7f5760018114610fb757610fe5565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b8901019550610fe5565b866000528260002060005b85811015610fdd5781548a8201860152908301908401610fc2565b890184019650505b505050505092915050565b60a08152600061100360a0830188610f55565b6020838203818501528188548084528284019150828160051b8501018a6000528360002060005b83811015611075577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08784030185526110638383610f55565b9486019492506001918201910161102a565b50508681036040880152611089818b610f55565b94505050505084606084015282810360808401526110a78185610c0a565b98975050505050505050565b600080600080608085870312156110c957600080fd5b84519350602085015192506110e060408601610ebe565b6060959095015193969295505050565b60006020828403121561110257600080fd5b815167ffffffffffffffff81111561111957600080fd5b8201601f8101841361112a57600080fd5b8051611138610a9182610a2c565b81815285602083850101111561114d57600080fd5b61115e826020830160208601610be6565b95945050505050565b808201808211156111a1577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b92915050565b86815285602082015273ffffffffffffffffffffffffffffffffffffffff8516604082015283606082015260c0608082015260006111e860c0830185610c0a565b82810360a08401526111fa8185610c0a565b9998505050505050505050565b6000604082016040835280855180835260608501915060608160051b8601019250602080880160005b8381101561127c577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa088870301855261126a868351610c0a565b95509382019390820190600101611230565b50508584038187015250505061115e8185610c0a565b601f8211156112dc57600081815260208120601f850160051c810160208610156112b95750805b601f850160051c820191505b818110156112d8578281556001016112c5565b5050505b505050565b815167ffffffffffffffff8111156112fb576112fb61098a565b61130f816113098454610f02565b84611292565b602080601f831160018114611362576000841561132c5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556112d8565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156113af57888601518255948401946001909101908401611390565b50858210156113eb57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b0190555056fea164736f6c6343000810000a307834353534343832643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030", +} + +var LogTriggeredStreamsLookupABI = LogTriggeredStreamsLookupMetaData.ABI + +var LogTriggeredStreamsLookupBin = LogTriggeredStreamsLookupMetaData.Bin + +func DeployLogTriggeredStreamsLookup(auth *bind.TransactOpts, backend bind.ContractBackend, _useArbitrumBlockNum bool, _verify bool) (common.Address, *types.Transaction, *LogTriggeredStreamsLookup, error) { + parsed, err := LogTriggeredStreamsLookupMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogTriggeredStreamsLookupBin), backend, _useArbitrumBlockNum, _verify) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LogTriggeredStreamsLookup{address: address, abi: *parsed, LogTriggeredStreamsLookupCaller: LogTriggeredStreamsLookupCaller{contract: contract}, LogTriggeredStreamsLookupTransactor: LogTriggeredStreamsLookupTransactor{contract: contract}, LogTriggeredStreamsLookupFilterer: LogTriggeredStreamsLookupFilterer{contract: contract}}, nil +} + +type LogTriggeredStreamsLookup struct { + address common.Address + abi abi.ABI + LogTriggeredStreamsLookupCaller + LogTriggeredStreamsLookupTransactor + LogTriggeredStreamsLookupFilterer +} + +type LogTriggeredStreamsLookupCaller struct { + contract *bind.BoundContract +} + +type LogTriggeredStreamsLookupTransactor struct { + contract *bind.BoundContract +} + +type LogTriggeredStreamsLookupFilterer struct { + contract *bind.BoundContract +} + +type LogTriggeredStreamsLookupSession struct { + Contract *LogTriggeredStreamsLookup + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LogTriggeredStreamsLookupCallerSession struct { + Contract *LogTriggeredStreamsLookupCaller + CallOpts bind.CallOpts +} + +type LogTriggeredStreamsLookupTransactorSession struct { + Contract *LogTriggeredStreamsLookupTransactor + TransactOpts bind.TransactOpts +} + +type LogTriggeredStreamsLookupRaw struct { + Contract *LogTriggeredStreamsLookup +} + +type LogTriggeredStreamsLookupCallerRaw struct { + Contract *LogTriggeredStreamsLookupCaller +} + +type LogTriggeredStreamsLookupTransactorRaw struct { + Contract *LogTriggeredStreamsLookupTransactor +} + +func NewLogTriggeredStreamsLookup(address common.Address, backend bind.ContractBackend) (*LogTriggeredStreamsLookup, error) { + abi, err := abi.JSON(strings.NewReader(LogTriggeredStreamsLookupABI)) + if err != nil { + return nil, err + } + contract, err := bindLogTriggeredStreamsLookup(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookup{address: address, abi: abi, LogTriggeredStreamsLookupCaller: LogTriggeredStreamsLookupCaller{contract: contract}, LogTriggeredStreamsLookupTransactor: LogTriggeredStreamsLookupTransactor{contract: contract}, LogTriggeredStreamsLookupFilterer: LogTriggeredStreamsLookupFilterer{contract: contract}}, nil +} + +func NewLogTriggeredStreamsLookupCaller(address common.Address, caller bind.ContractCaller) (*LogTriggeredStreamsLookupCaller, error) { + contract, err := bindLogTriggeredStreamsLookup(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookupCaller{contract: contract}, nil +} + +func NewLogTriggeredStreamsLookupTransactor(address common.Address, transactor bind.ContractTransactor) (*LogTriggeredStreamsLookupTransactor, error) { + contract, err := bindLogTriggeredStreamsLookup(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookupTransactor{contract: contract}, nil +} + +func NewLogTriggeredStreamsLookupFilterer(address common.Address, filterer bind.ContractFilterer) (*LogTriggeredStreamsLookupFilterer, error) { + contract, err := bindLogTriggeredStreamsLookup(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookupFilterer{contract: contract}, nil +} + +func bindLogTriggeredStreamsLookup(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LogTriggeredStreamsLookupMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogTriggeredStreamsLookup.Contract.LogTriggeredStreamsLookupCaller.contract.Call(opts, result, method, params...) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.LogTriggeredStreamsLookupTransactor.contract.Transfer(opts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.LogTriggeredStreamsLookupTransactor.contract.Transact(opts, method, params...) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogTriggeredStreamsLookup.Contract.contract.Call(opts, result, method, params...) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.contract.Transfer(opts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.contract.Transact(opts, method, params...) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "checkCallback", values, extraData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _LogTriggeredStreamsLookup.Contract.CheckCallback(&_LogTriggeredStreamsLookup.CallOpts, values, extraData) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _LogTriggeredStreamsLookup.Contract.CheckCallback(&_LogTriggeredStreamsLookup.CallOpts, values, extraData) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) Counter() (*big.Int, error) { + return _LogTriggeredStreamsLookup.Contract.Counter(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) Counter() (*big.Int, error) { + return _LogTriggeredStreamsLookup.Contract.Counter(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) FeedParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "feedParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) FeedParamKey() (string, error) { + return _LogTriggeredStreamsLookup.Contract.FeedParamKey(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) FeedParamKey() (string, error) { + return _LogTriggeredStreamsLookup.Contract.FeedParamKey(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "feedsHex", arg0) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) FeedsHex(arg0 *big.Int) (string, error) { + return _LogTriggeredStreamsLookup.Contract.FeedsHex(&_LogTriggeredStreamsLookup.CallOpts, arg0) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) FeedsHex(arg0 *big.Int) (string, error) { + return _LogTriggeredStreamsLookup.Contract.FeedsHex(&_LogTriggeredStreamsLookup.CallOpts, arg0) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) TimeParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "timeParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) TimeParamKey() (string, error) { + return _LogTriggeredStreamsLookup.Contract.TimeParamKey(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) TimeParamKey() (string, error) { + return _LogTriggeredStreamsLookup.Contract.TimeParamKey(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "useArbitrumBlockNum") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) UseArbitrumBlockNum() (bool, error) { + return _LogTriggeredStreamsLookup.Contract.UseArbitrumBlockNum(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) UseArbitrumBlockNum() (bool, error) { + return _LogTriggeredStreamsLookup.Contract.UseArbitrumBlockNum(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCaller) Verify(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _LogTriggeredStreamsLookup.contract.Call(opts, &out, "verify") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) Verify() (bool, error) { + return _LogTriggeredStreamsLookup.Contract.Verify(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupCallerSession) Verify() (bool, error) { + return _LogTriggeredStreamsLookup.Contract.Verify(&_LogTriggeredStreamsLookup.CallOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) CheckLog(opts *bind.TransactOpts, log Log, arg1 []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "checkLog", log, arg1) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) CheckLog(log Log, arg1 []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.CheckLog(&_LogTriggeredStreamsLookup.TransactOpts, log, arg1) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) CheckLog(log Log, arg1 []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.CheckLog(&_LogTriggeredStreamsLookup.TransactOpts, log, arg1) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "performUpkeep", performData) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.PerformUpkeep(&_LogTriggeredStreamsLookup.TransactOpts, performData) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.PerformUpkeep(&_LogTriggeredStreamsLookup.TransactOpts, performData) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) SetFeedParamKey(opts *bind.TransactOpts, feedParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "setFeedParamKey", feedParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) SetFeedParamKey(feedParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetFeedParamKey(&_LogTriggeredStreamsLookup.TransactOpts, feedParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) SetFeedParamKey(feedParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetFeedParamKey(&_LogTriggeredStreamsLookup.TransactOpts, feedParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) SetFeedsHex(opts *bind.TransactOpts, newFeeds []string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "setFeedsHex", newFeeds) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) SetFeedsHex(newFeeds []string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetFeedsHex(&_LogTriggeredStreamsLookup.TransactOpts, newFeeds) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) SetFeedsHex(newFeeds []string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetFeedsHex(&_LogTriggeredStreamsLookup.TransactOpts, newFeeds) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) SetTimeParamKey(opts *bind.TransactOpts, timeParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "setTimeParamKey", timeParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) SetTimeParamKey(timeParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetTimeParamKey(&_LogTriggeredStreamsLookup.TransactOpts, timeParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) SetTimeParamKey(timeParam string) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.SetTimeParamKey(&_LogTriggeredStreamsLookup.TransactOpts, timeParam) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactor) Start(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.contract.Transact(opts, "start") +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupSession) Start() (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.Start(&_LogTriggeredStreamsLookup.TransactOpts) +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupTransactorSession) Start() (*types.Transaction, error) { + return _LogTriggeredStreamsLookup.Contract.Start(&_LogTriggeredStreamsLookup.TransactOpts) +} + +type LogTriggeredStreamsLookupLimitOrderExecutedIterator struct { + Event *LogTriggeredStreamsLookupLimitOrderExecuted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogTriggeredStreamsLookupLimitOrderExecutedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogTriggeredStreamsLookupLimitOrderExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogTriggeredStreamsLookupLimitOrderExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogTriggeredStreamsLookupLimitOrderExecutedIterator) Error() error { + return it.fail +} + +func (it *LogTriggeredStreamsLookupLimitOrderExecutedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogTriggeredStreamsLookupLimitOrderExecuted struct { + OrderId *big.Int + Amount *big.Int + Exchange common.Address + Raw types.Log +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) FilterLimitOrderExecuted(opts *bind.FilterOpts, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (*LogTriggeredStreamsLookupLimitOrderExecutedIterator, error) { + + var orderIdRule []interface{} + for _, orderIdItem := range orderId { + orderIdRule = append(orderIdRule, orderIdItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var exchangeRule []interface{} + for _, exchangeItem := range exchange { + exchangeRule = append(exchangeRule, exchangeItem) + } + + logs, sub, err := _LogTriggeredStreamsLookup.contract.FilterLogs(opts, "LimitOrderExecuted", orderIdRule, amountRule, exchangeRule) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookupLimitOrderExecutedIterator{contract: _LogTriggeredStreamsLookup.contract, event: "LimitOrderExecuted", logs: logs, sub: sub}, nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) WatchLimitOrderExecuted(opts *bind.WatchOpts, sink chan<- *LogTriggeredStreamsLookupLimitOrderExecuted, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (event.Subscription, error) { + + var orderIdRule []interface{} + for _, orderIdItem := range orderId { + orderIdRule = append(orderIdRule, orderIdItem) + } + var amountRule []interface{} + for _, amountItem := range amount { + amountRule = append(amountRule, amountItem) + } + var exchangeRule []interface{} + for _, exchangeItem := range exchange { + exchangeRule = append(exchangeRule, exchangeItem) + } + + logs, sub, err := _LogTriggeredStreamsLookup.contract.WatchLogs(opts, "LimitOrderExecuted", orderIdRule, amountRule, exchangeRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogTriggeredStreamsLookupLimitOrderExecuted) + if err := _LogTriggeredStreamsLookup.contract.UnpackLog(event, "LimitOrderExecuted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) ParseLimitOrderExecuted(log types.Log) (*LogTriggeredStreamsLookupLimitOrderExecuted, error) { + event := new(LogTriggeredStreamsLookupLimitOrderExecuted) + if err := _LogTriggeredStreamsLookup.contract.UnpackLog(event, "LimitOrderExecuted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator struct { + Event *LogTriggeredStreamsLookupPerformingLogTriggerUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogTriggeredStreamsLookupPerformingLogTriggerUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogTriggeredStreamsLookupPerformingLogTriggerUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator) Error() error { + return it.fail +} + +func (it *LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogTriggeredStreamsLookupPerformingLogTriggerUpkeep struct { + From common.Address + OrderId *big.Int + Amount *big.Int + Exchange common.Address + BlockNumber *big.Int + Blob []byte + Verified []byte + Raw types.Log +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) FilterPerformingLogTriggerUpkeep(opts *bind.FilterOpts, from []common.Address) (*LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _LogTriggeredStreamsLookup.contract.FilterLogs(opts, "PerformingLogTriggerUpkeep", fromRule) + if err != nil { + return nil, err + } + return &LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator{contract: _LogTriggeredStreamsLookup.contract, event: "PerformingLogTriggerUpkeep", logs: logs, sub: sub}, nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) WatchPerformingLogTriggerUpkeep(opts *bind.WatchOpts, sink chan<- *LogTriggeredStreamsLookupPerformingLogTriggerUpkeep, from []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _LogTriggeredStreamsLookup.contract.WatchLogs(opts, "PerformingLogTriggerUpkeep", fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogTriggeredStreamsLookupPerformingLogTriggerUpkeep) + if err := _LogTriggeredStreamsLookup.contract.UnpackLog(event, "PerformingLogTriggerUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookupFilterer) ParsePerformingLogTriggerUpkeep(log types.Log) (*LogTriggeredStreamsLookupPerformingLogTriggerUpkeep, error) { + event := new(LogTriggeredStreamsLookupPerformingLogTriggerUpkeep) + if err := _LogTriggeredStreamsLookup.contract.UnpackLog(event, "PerformingLogTriggerUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookup) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LogTriggeredStreamsLookup.abi.Events["LimitOrderExecuted"].ID: + return _LogTriggeredStreamsLookup.ParseLimitOrderExecuted(log) + case _LogTriggeredStreamsLookup.abi.Events["PerformingLogTriggerUpkeep"].ID: + return _LogTriggeredStreamsLookup.ParsePerformingLogTriggerUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LogTriggeredStreamsLookupLimitOrderExecuted) Topic() common.Hash { + return common.HexToHash("0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd") +} + +func (LogTriggeredStreamsLookupPerformingLogTriggerUpkeep) Topic() common.Hash { + return common.HexToHash("0x299a03817e683a32b21e29e3ae3c31f1c9c773f7d532836d116b62a9281fbc9d") +} + +func (_LogTriggeredStreamsLookup *LogTriggeredStreamsLookup) Address() common.Address { + return _LogTriggeredStreamsLookup.address +} + +type LogTriggeredStreamsLookupInterface interface { + CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + FeedParamKey(opts *bind.CallOpts) (string, error) + + FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) + + TimeParamKey(opts *bind.CallOpts) (string, error) + + UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) + + Verify(opts *bind.CallOpts) (bool, error) + + CheckLog(opts *bind.TransactOpts, log Log, arg1 []byte) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SetFeedParamKey(opts *bind.TransactOpts, feedParam string) (*types.Transaction, error) + + SetFeedsHex(opts *bind.TransactOpts, newFeeds []string) (*types.Transaction, error) + + SetTimeParamKey(opts *bind.TransactOpts, timeParam string) (*types.Transaction, error) + + Start(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterLimitOrderExecuted(opts *bind.FilterOpts, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (*LogTriggeredStreamsLookupLimitOrderExecutedIterator, error) + + WatchLimitOrderExecuted(opts *bind.WatchOpts, sink chan<- *LogTriggeredStreamsLookupLimitOrderExecuted, orderId []*big.Int, amount []*big.Int, exchange []common.Address) (event.Subscription, error) + + ParseLimitOrderExecuted(log types.Log) (*LogTriggeredStreamsLookupLimitOrderExecuted, error) + + FilterPerformingLogTriggerUpkeep(opts *bind.FilterOpts, from []common.Address) (*LogTriggeredStreamsLookupPerformingLogTriggerUpkeepIterator, error) + + WatchPerformingLogTriggerUpkeep(opts *bind.WatchOpts, sink chan<- *LogTriggeredStreamsLookupPerformingLogTriggerUpkeep, from []common.Address) (event.Subscription, error) + + ParsePerformingLogTriggerUpkeep(log types.Log) (*LogTriggeredStreamsLookupPerformingLogTriggerUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/log_upkeep_counter_wrapper/log_upkeep_counter_wrapper.go b/core/gethwrappers/generated/log_upkeep_counter_wrapper/log_upkeep_counter_wrapper.go new file mode 100644 index 00000000..a9344be1 --- /dev/null +++ b/core/gethwrappers/generated/log_upkeep_counter_wrapper/log_upkeep_counter_wrapper.go @@ -0,0 +1,1073 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package log_upkeep_counter_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +var LogUpkeepCounterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"lastBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"previousBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"counter\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"}],\"name\":\"Trigger\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"}],\"name\":\"Trigger\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"}],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"log\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"checkLog\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"previousPerformBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"start\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60806040527f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d6000557f57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da6001557f1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c86002557f5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e716003553480156100a057600080fd5b50604051610f41380380610f418339810160408190526100bf916100da565b600455600060068190554360055560078190556008556100f3565b6000602082840312156100ec57600080fd5b5051919050565b610e3f806101026000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c8063806b984f11610076578063b66a261c1161005b578063b66a261c14610139578063be9a655514610156578063d832d92f1461015e57600080fd5b8063806b984f14610127578063917d895f1461013057600080fd5b80634585e33b116100a75780634585e33b1461010057806361bc221a146101155780636250a13a1461011e57600080fd5b80632cb15864146100c357806340691db4146100df575b600080fd5b6100cc60075481565b6040519081526020015b60405180910390f35b6100f26100ed366004610889565b610176565b6040516100d6929190610a7c565b61011361010e366004610817565b610365565b005b6100cc60085481565b6100cc60045481565b6100cc60055481565b6100cc60065481565b6101136101473660046109cb565b60045560006007819055600855565b6101136105d7565b6101666106b1565b60405190151581526020016100d6565b600060606101826106b1565b6101ed576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600c60248201527f6e6f7420656c696769626c65000000000000000000000000000000000000000060448201526064015b60405180910390fd5b6000546101fd60c0860186610bca565b600081811061020e5761020e610dd4565b905060200201351480610246575060015461022c60c0860186610bca565b600081811061023d5761023d610dd4565b90506020020135145b80610276575060025461025c60c0860186610bca565b600081811061026d5761026d610dd4565b90506020020135145b806102a6575060035461028c60c0860186610bca565b600081811061029d5761029d610dd4565b90506020020135145b156102d6576001846040516020016102be9190610af9565b6040516020818303038152906040529150915061035e565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f636f756c64206e6f742066696e64206d61746368696e67206576656e7420736960448201527f670000000000000000000000000000000000000000000000000000000000000060648201526084016101e4565b9250929050565b60075461037157436007555b43600555600854610383906001610d76565b600855600554600655600061039a828401846108f6565b90506000548160c001516000815181106103b6576103b6610dd4565b602002602001015114156103f2576040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a161057f565b6001548160c0015160008151811061040c5761040c610dd4565b6020026020010151141561045457604051600181527f57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da906020015b60405180910390a161057f565b6002548160c0015160008151811061046e5761046e610dd4565b602002602001015114156104b3576040805160018152600260208201527f1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c89101610447565b6003548160c001516000815181106104cd576104cd610dd4565b6020026020010151141561051d576040805160018152600260208201526003918101919091527f5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e7190606001610447565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f636f756c64206e6f742066696e64206d61746368696e6720736967000000000060448201526064016101e4565b60075460055460065460085460408051948552602085019390935291830152606082015232907f8e8112f20a2134e18e591d2cdd68cd86a95d06e6328ede501fc6314f4a5075fa9060800160405180910390a2505050565b6040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1604051600181527f57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da9060200160405180910390a16040805160018152600260208201527f1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c8910160405180910390a160408051600181526002602082015260038183015290517f5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e719181900360600190a1565b6000600754600014156106c45750600190565b6004546007546106d49043610d8e565b10905090565b803573ffffffffffffffffffffffffffffffffffffffff811681146106fe57600080fd5b919050565b600082601f83011261071457600080fd5b8135602067ffffffffffffffff82111561073057610730610e03565b8160051b61073f828201610c5c565b83815282810190868401838801850189101561075a57600080fd5b600093505b8584101561077d57803583526001939093019291840191840161075f565b50979650505050505050565b600082601f83011261079a57600080fd5b813567ffffffffffffffff8111156107b4576107b4610e03565b6107e560207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610c5c565b8181528460208386010111156107fa57600080fd5b816020850160208301376000918101602001919091529392505050565b6000806020838503121561082a57600080fd5b823567ffffffffffffffff8082111561084257600080fd5b818501915085601f83011261085657600080fd5b81358181111561086557600080fd5b86602082850101111561087757600080fd5b60209290920196919550909350505050565b6000806040838503121561089c57600080fd5b823567ffffffffffffffff808211156108b457600080fd5b9084019061010082870312156108c957600080fd5b909250602084013590808211156108df57600080fd5b506108ec85828601610789565b9150509250929050565b60006020828403121561090857600080fd5b813567ffffffffffffffff8082111561092057600080fd5b90830190610100828603121561093557600080fd5b61093d610c32565b823581526020830135602082015260408301356040820152606083013560608201526080830135608082015261097560a084016106da565b60a082015260c08301358281111561098c57600080fd5b61099887828601610703565b60c08301525060e0830135828111156109b057600080fd5b6109bc87828601610789565b60e08301525095945050505050565b6000602082840312156109dd57600080fd5b5035919050565b81835260007f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff831115610a1657600080fd5b8260051b8083602087013760009401602001938452509192915050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b821515815260006020604081840152835180604085015260005b81811015610ab257858101830151858201606001528201610a96565b81811115610ac4576000606083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201606001949350505050565b6020815281356020820152602082013560408201526040820135606082015260608201356080820152608082013560a082015273ffffffffffffffffffffffffffffffffffffffff610b4d60a084016106da565b1660c08201526000610b6260c0840184610cab565b6101008060e0860152610b7a610120860183856109e4565b9250610b8960e0870187610d12565b92507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08685030182870152610bbf848483610a33565b979650505050505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610bff57600080fd5b83018035915067ffffffffffffffff821115610c1a57600080fd5b6020019150600581901b360382131561035e57600080fd5b604051610100810167ffffffffffffffff81118282101715610c5657610c56610e03565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610ca357610ca3610e03565b604052919050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610ce057600080fd5b830160208101925035905067ffffffffffffffff811115610d0057600080fd5b8060051b360383131561035e57600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610d4757600080fd5b830160208101925035905067ffffffffffffffff811115610d6757600080fd5b80360383131561035e57600080fd5b60008219821115610d8957610d89610da5565b500190565b600082821015610da057610da0610da5565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var LogUpkeepCounterABI = LogUpkeepCounterMetaData.ABI + +var LogUpkeepCounterBin = LogUpkeepCounterMetaData.Bin + +func DeployLogUpkeepCounter(auth *bind.TransactOpts, backend bind.ContractBackend, _testRange *big.Int) (common.Address, *types.Transaction, *LogUpkeepCounter, error) { + parsed, err := LogUpkeepCounterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LogUpkeepCounterBin), backend, _testRange) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LogUpkeepCounter{address: address, abi: *parsed, LogUpkeepCounterCaller: LogUpkeepCounterCaller{contract: contract}, LogUpkeepCounterTransactor: LogUpkeepCounterTransactor{contract: contract}, LogUpkeepCounterFilterer: LogUpkeepCounterFilterer{contract: contract}}, nil +} + +type LogUpkeepCounter struct { + address common.Address + abi abi.ABI + LogUpkeepCounterCaller + LogUpkeepCounterTransactor + LogUpkeepCounterFilterer +} + +type LogUpkeepCounterCaller struct { + contract *bind.BoundContract +} + +type LogUpkeepCounterTransactor struct { + contract *bind.BoundContract +} + +type LogUpkeepCounterFilterer struct { + contract *bind.BoundContract +} + +type LogUpkeepCounterSession struct { + Contract *LogUpkeepCounter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LogUpkeepCounterCallerSession struct { + Contract *LogUpkeepCounterCaller + CallOpts bind.CallOpts +} + +type LogUpkeepCounterTransactorSession struct { + Contract *LogUpkeepCounterTransactor + TransactOpts bind.TransactOpts +} + +type LogUpkeepCounterRaw struct { + Contract *LogUpkeepCounter +} + +type LogUpkeepCounterCallerRaw struct { + Contract *LogUpkeepCounterCaller +} + +type LogUpkeepCounterTransactorRaw struct { + Contract *LogUpkeepCounterTransactor +} + +func NewLogUpkeepCounter(address common.Address, backend bind.ContractBackend) (*LogUpkeepCounter, error) { + abi, err := abi.JSON(strings.NewReader(LogUpkeepCounterABI)) + if err != nil { + return nil, err + } + contract, err := bindLogUpkeepCounter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LogUpkeepCounter{address: address, abi: abi, LogUpkeepCounterCaller: LogUpkeepCounterCaller{contract: contract}, LogUpkeepCounterTransactor: LogUpkeepCounterTransactor{contract: contract}, LogUpkeepCounterFilterer: LogUpkeepCounterFilterer{contract: contract}}, nil +} + +func NewLogUpkeepCounterCaller(address common.Address, caller bind.ContractCaller) (*LogUpkeepCounterCaller, error) { + contract, err := bindLogUpkeepCounter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LogUpkeepCounterCaller{contract: contract}, nil +} + +func NewLogUpkeepCounterTransactor(address common.Address, transactor bind.ContractTransactor) (*LogUpkeepCounterTransactor, error) { + contract, err := bindLogUpkeepCounter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LogUpkeepCounterTransactor{contract: contract}, nil +} + +func NewLogUpkeepCounterFilterer(address common.Address, filterer bind.ContractFilterer) (*LogUpkeepCounterFilterer, error) { + contract, err := bindLogUpkeepCounter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LogUpkeepCounterFilterer{contract: contract}, nil +} + +func bindLogUpkeepCounter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LogUpkeepCounterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogUpkeepCounter.Contract.LogUpkeepCounterCaller.contract.Call(opts, result, method, params...) +} + +func (_LogUpkeepCounter *LogUpkeepCounterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.LogUpkeepCounterTransactor.contract.Transfer(opts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.LogUpkeepCounterTransactor.contract.Transact(opts, method, params...) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LogUpkeepCounter.Contract.contract.Call(opts, result, method, params...) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.contract.Transfer(opts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.contract.Transact(opts, method, params...) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) CheckLog(opts *bind.CallOpts, log Log, arg1 []byte) (bool, []byte, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "checkLog", log, arg1) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) CheckLog(log Log, arg1 []byte) (bool, []byte, error) { + return _LogUpkeepCounter.Contract.CheckLog(&_LogUpkeepCounter.CallOpts, log, arg1) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) CheckLog(log Log, arg1 []byte) (bool, []byte, error) { + return _LogUpkeepCounter.Contract.CheckLog(&_LogUpkeepCounter.CallOpts, log, arg1) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) Counter() (*big.Int, error) { + return _LogUpkeepCounter.Contract.Counter(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) Counter() (*big.Int, error) { + return _LogUpkeepCounter.Contract.Counter(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) Eligible(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "eligible") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) Eligible() (bool, error) { + return _LogUpkeepCounter.Contract.Eligible(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) Eligible() (bool, error) { + return _LogUpkeepCounter.Contract.Eligible(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) InitialBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "initialBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) InitialBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.InitialBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) InitialBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.InitialBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) LastBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "lastBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) LastBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.LastBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) LastBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.LastBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "previousPerformBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) PreviousPerformBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.PreviousPerformBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) PreviousPerformBlock() (*big.Int, error) { + return _LogUpkeepCounter.Contract.PreviousPerformBlock(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LogUpkeepCounter.contract.Call(opts, &out, "testRange") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) TestRange() (*big.Int, error) { + return _LogUpkeepCounter.Contract.TestRange(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterCallerSession) TestRange() (*big.Int, error) { + return _LogUpkeepCounter.Contract.TestRange(&_LogUpkeepCounter.CallOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _LogUpkeepCounter.contract.Transact(opts, "performUpkeep", performData) +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.PerformUpkeep(&_LogUpkeepCounter.TransactOpts, performData) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.PerformUpkeep(&_LogUpkeepCounter.TransactOpts, performData) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactor) SetSpread(opts *bind.TransactOpts, _testRange *big.Int) (*types.Transaction, error) { + return _LogUpkeepCounter.contract.Transact(opts, "setSpread", _testRange) +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) SetSpread(_testRange *big.Int) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.SetSpread(&_LogUpkeepCounter.TransactOpts, _testRange) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactorSession) SetSpread(_testRange *big.Int) (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.SetSpread(&_LogUpkeepCounter.TransactOpts, _testRange) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactor) Start(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LogUpkeepCounter.contract.Transact(opts, "start") +} + +func (_LogUpkeepCounter *LogUpkeepCounterSession) Start() (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.Start(&_LogUpkeepCounter.TransactOpts) +} + +func (_LogUpkeepCounter *LogUpkeepCounterTransactorSession) Start() (*types.Transaction, error) { + return _LogUpkeepCounter.Contract.Start(&_LogUpkeepCounter.TransactOpts) +} + +type LogUpkeepCounterPerformingUpkeepIterator struct { + Event *LogUpkeepCounterPerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogUpkeepCounterPerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogUpkeepCounterPerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *LogUpkeepCounterPerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogUpkeepCounterPerformingUpkeep struct { + From common.Address + InitialBlock *big.Int + LastBlock *big.Int + PreviousBlock *big.Int + Counter *big.Int + Raw types.Log +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*LogUpkeepCounterPerformingUpkeepIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _LogUpkeepCounter.contract.FilterLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return &LogUpkeepCounterPerformingUpkeepIterator{contract: _LogUpkeepCounter.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _LogUpkeepCounter.contract.WatchLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogUpkeepCounterPerformingUpkeep) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) ParsePerformingUpkeep(log types.Log) (*LogUpkeepCounterPerformingUpkeep, error) { + event := new(LogUpkeepCounterPerformingUpkeep) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogUpkeepCounterTriggerIterator struct { + Event *LogUpkeepCounterTrigger + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogUpkeepCounterTriggerIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogUpkeepCounterTriggerIterator) Error() error { + return it.fail +} + +func (it *LogUpkeepCounterTriggerIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogUpkeepCounterTrigger struct { + Raw types.Log +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) FilterTrigger(opts *bind.FilterOpts) (*LogUpkeepCounterTriggerIterator, error) { + + logs, sub, err := _LogUpkeepCounter.contract.FilterLogs(opts, "Trigger") + if err != nil { + return nil, err + } + return &LogUpkeepCounterTriggerIterator{contract: _LogUpkeepCounter.contract, event: "Trigger", logs: logs, sub: sub}, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) WatchTrigger(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger) (event.Subscription, error) { + + logs, sub, err := _LogUpkeepCounter.contract.WatchLogs(opts, "Trigger") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogUpkeepCounterTrigger) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) ParseTrigger(log types.Log) (*LogUpkeepCounterTrigger, error) { + event := new(LogUpkeepCounterTrigger) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogUpkeepCounterTrigger0Iterator struct { + Event *LogUpkeepCounterTrigger0 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogUpkeepCounterTrigger0Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogUpkeepCounterTrigger0Iterator) Error() error { + return it.fail +} + +func (it *LogUpkeepCounterTrigger0Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogUpkeepCounterTrigger0 struct { + A *big.Int + Raw types.Log +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) FilterTrigger0(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger0Iterator, error) { + + logs, sub, err := _LogUpkeepCounter.contract.FilterLogs(opts, "Trigger0") + if err != nil { + return nil, err + } + return &LogUpkeepCounterTrigger0Iterator{contract: _LogUpkeepCounter.contract, event: "Trigger0", logs: logs, sub: sub}, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) WatchTrigger0(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger0) (event.Subscription, error) { + + logs, sub, err := _LogUpkeepCounter.contract.WatchLogs(opts, "Trigger0") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogUpkeepCounterTrigger0) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger0", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) ParseTrigger0(log types.Log) (*LogUpkeepCounterTrigger0, error) { + event := new(LogUpkeepCounterTrigger0) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger0", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogUpkeepCounterTrigger1Iterator struct { + Event *LogUpkeepCounterTrigger1 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogUpkeepCounterTrigger1Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger1) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogUpkeepCounterTrigger1Iterator) Error() error { + return it.fail +} + +func (it *LogUpkeepCounterTrigger1Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogUpkeepCounterTrigger1 struct { + A *big.Int + B *big.Int + Raw types.Log +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) FilterTrigger1(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger1Iterator, error) { + + logs, sub, err := _LogUpkeepCounter.contract.FilterLogs(opts, "Trigger1") + if err != nil { + return nil, err + } + return &LogUpkeepCounterTrigger1Iterator{contract: _LogUpkeepCounter.contract, event: "Trigger1", logs: logs, sub: sub}, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) WatchTrigger1(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger1) (event.Subscription, error) { + + logs, sub, err := _LogUpkeepCounter.contract.WatchLogs(opts, "Trigger1") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogUpkeepCounterTrigger1) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger1", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) ParseTrigger1(log types.Log) (*LogUpkeepCounterTrigger1, error) { + event := new(LogUpkeepCounterTrigger1) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger1", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LogUpkeepCounterTrigger2Iterator struct { + Event *LogUpkeepCounterTrigger2 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LogUpkeepCounterTrigger2Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LogUpkeepCounterTrigger2) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LogUpkeepCounterTrigger2Iterator) Error() error { + return it.fail +} + +func (it *LogUpkeepCounterTrigger2Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LogUpkeepCounterTrigger2 struct { + A *big.Int + B *big.Int + C *big.Int + Raw types.Log +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) FilterTrigger2(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger2Iterator, error) { + + logs, sub, err := _LogUpkeepCounter.contract.FilterLogs(opts, "Trigger2") + if err != nil { + return nil, err + } + return &LogUpkeepCounterTrigger2Iterator{contract: _LogUpkeepCounter.contract, event: "Trigger2", logs: logs, sub: sub}, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) WatchTrigger2(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger2) (event.Subscription, error) { + + logs, sub, err := _LogUpkeepCounter.contract.WatchLogs(opts, "Trigger2") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LogUpkeepCounterTrigger2) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger2", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LogUpkeepCounter *LogUpkeepCounterFilterer) ParseTrigger2(log types.Log) (*LogUpkeepCounterTrigger2, error) { + event := new(LogUpkeepCounterTrigger2) + if err := _LogUpkeepCounter.contract.UnpackLog(event, "Trigger2", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LogUpkeepCounter *LogUpkeepCounter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LogUpkeepCounter.abi.Events["PerformingUpkeep"].ID: + return _LogUpkeepCounter.ParsePerformingUpkeep(log) + case _LogUpkeepCounter.abi.Events["Trigger"].ID: + return _LogUpkeepCounter.ParseTrigger(log) + case _LogUpkeepCounter.abi.Events["Trigger0"].ID: + return _LogUpkeepCounter.ParseTrigger0(log) + case _LogUpkeepCounter.abi.Events["Trigger1"].ID: + return _LogUpkeepCounter.ParseTrigger1(log) + case _LogUpkeepCounter.abi.Events["Trigger2"].ID: + return _LogUpkeepCounter.ParseTrigger2(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LogUpkeepCounterPerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0x8e8112f20a2134e18e591d2cdd68cd86a95d06e6328ede501fc6314f4a5075fa") +} + +func (LogUpkeepCounterTrigger) Topic() common.Hash { + return common.HexToHash("0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d") +} + +func (LogUpkeepCounterTrigger0) Topic() common.Hash { + return common.HexToHash("0x57b1de35764b0939dde00771c7069cdf8d6a65d6a175623f19aa18784fd4c6da") +} + +func (LogUpkeepCounterTrigger1) Topic() common.Hash { + return common.HexToHash("0x1da9f70fe932e73fba9374396c5c0b02dbd170f951874b7b4afabe4dd029a9c8") +} + +func (LogUpkeepCounterTrigger2) Topic() common.Hash { + return common.HexToHash("0x5121119bad45ca7e58e0bdadf39045f5111e93ba4304a0f6457a3e7bc9791e71") +} + +func (_LogUpkeepCounter *LogUpkeepCounter) Address() common.Address { + return _LogUpkeepCounter.address +} + +type LogUpkeepCounterInterface interface { + CheckLog(opts *bind.CallOpts, log Log, arg1 []byte) (bool, []byte, error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + Eligible(opts *bind.CallOpts) (bool, error) + + InitialBlock(opts *bind.CallOpts) (*big.Int, error) + + LastBlock(opts *bind.CallOpts) (*big.Int, error) + + PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) + + TestRange(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SetSpread(opts *bind.TransactOpts, _testRange *big.Int) (*types.Transaction, error) + + Start(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*LogUpkeepCounterPerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*LogUpkeepCounterPerformingUpkeep, error) + + FilterTrigger(opts *bind.FilterOpts) (*LogUpkeepCounterTriggerIterator, error) + + WatchTrigger(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger) (event.Subscription, error) + + ParseTrigger(log types.Log) (*LogUpkeepCounterTrigger, error) + + FilterTrigger0(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger0Iterator, error) + + WatchTrigger0(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger0) (event.Subscription, error) + + ParseTrigger0(log types.Log) (*LogUpkeepCounterTrigger0, error) + + FilterTrigger1(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger1Iterator, error) + + WatchTrigger1(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger1) (event.Subscription, error) + + ParseTrigger1(log types.Log) (*LogUpkeepCounterTrigger1, error) + + FilterTrigger2(opts *bind.FilterOpts) (*LogUpkeepCounterTrigger2Iterator, error) + + WatchTrigger2(opts *bind.WatchOpts, sink chan<- *LogUpkeepCounterTrigger2) (event.Subscription, error) + + ParseTrigger2(log types.Log) (*LogUpkeepCounterTrigger2, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/mock_aggregator_proxy/mock_aggregator_proxy.go b/core/gethwrappers/generated/mock_aggregator_proxy/mock_aggregator_proxy.go new file mode 100644 index 00000000..a9c972e8 --- /dev/null +++ b/core/gethwrappers/generated/mock_aggregator_proxy/mock_aggregator_proxy.go @@ -0,0 +1,216 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package mock_aggregator_proxy + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var MockAggregatorProxyMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_aggregator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"aggregator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_aggregator\",\"type\":\"address\"}],\"name\":\"updateAggregator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161019138038061019183398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b60ff806100926000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c8063245a7bfc1460375780639fe4ee47146063575b600080fd5b6000546040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b60b5606e36600460b7565b600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b005b60006020828403121560c857600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811460eb57600080fd5b939250505056fea164736f6c6343000806000a", +} + +var MockAggregatorProxyABI = MockAggregatorProxyMetaData.ABI + +var MockAggregatorProxyBin = MockAggregatorProxyMetaData.Bin + +func DeployMockAggregatorProxy(auth *bind.TransactOpts, backend bind.ContractBackend, _aggregator common.Address) (common.Address, *types.Transaction, *MockAggregatorProxy, error) { + parsed, err := MockAggregatorProxyMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(MockAggregatorProxyBin), backend, _aggregator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MockAggregatorProxy{address: address, abi: *parsed, MockAggregatorProxyCaller: MockAggregatorProxyCaller{contract: contract}, MockAggregatorProxyTransactor: MockAggregatorProxyTransactor{contract: contract}, MockAggregatorProxyFilterer: MockAggregatorProxyFilterer{contract: contract}}, nil +} + +type MockAggregatorProxy struct { + address common.Address + abi abi.ABI + MockAggregatorProxyCaller + MockAggregatorProxyTransactor + MockAggregatorProxyFilterer +} + +type MockAggregatorProxyCaller struct { + contract *bind.BoundContract +} + +type MockAggregatorProxyTransactor struct { + contract *bind.BoundContract +} + +type MockAggregatorProxyFilterer struct { + contract *bind.BoundContract +} + +type MockAggregatorProxySession struct { + Contract *MockAggregatorProxy + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type MockAggregatorProxyCallerSession struct { + Contract *MockAggregatorProxyCaller + CallOpts bind.CallOpts +} + +type MockAggregatorProxyTransactorSession struct { + Contract *MockAggregatorProxyTransactor + TransactOpts bind.TransactOpts +} + +type MockAggregatorProxyRaw struct { + Contract *MockAggregatorProxy +} + +type MockAggregatorProxyCallerRaw struct { + Contract *MockAggregatorProxyCaller +} + +type MockAggregatorProxyTransactorRaw struct { + Contract *MockAggregatorProxyTransactor +} + +func NewMockAggregatorProxy(address common.Address, backend bind.ContractBackend) (*MockAggregatorProxy, error) { + abi, err := abi.JSON(strings.NewReader(MockAggregatorProxyABI)) + if err != nil { + return nil, err + } + contract, err := bindMockAggregatorProxy(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MockAggregatorProxy{address: address, abi: abi, MockAggregatorProxyCaller: MockAggregatorProxyCaller{contract: contract}, MockAggregatorProxyTransactor: MockAggregatorProxyTransactor{contract: contract}, MockAggregatorProxyFilterer: MockAggregatorProxyFilterer{contract: contract}}, nil +} + +func NewMockAggregatorProxyCaller(address common.Address, caller bind.ContractCaller) (*MockAggregatorProxyCaller, error) { + contract, err := bindMockAggregatorProxy(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MockAggregatorProxyCaller{contract: contract}, nil +} + +func NewMockAggregatorProxyTransactor(address common.Address, transactor bind.ContractTransactor) (*MockAggregatorProxyTransactor, error) { + contract, err := bindMockAggregatorProxy(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MockAggregatorProxyTransactor{contract: contract}, nil +} + +func NewMockAggregatorProxyFilterer(address common.Address, filterer bind.ContractFilterer) (*MockAggregatorProxyFilterer, error) { + contract, err := bindMockAggregatorProxy(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MockAggregatorProxyFilterer{contract: contract}, nil +} + +func bindMockAggregatorProxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := MockAggregatorProxyMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_MockAggregatorProxy *MockAggregatorProxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockAggregatorProxy.Contract.MockAggregatorProxyCaller.contract.Call(opts, result, method, params...) +} + +func (_MockAggregatorProxy *MockAggregatorProxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.MockAggregatorProxyTransactor.contract.Transfer(opts) +} + +func (_MockAggregatorProxy *MockAggregatorProxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.MockAggregatorProxyTransactor.contract.Transact(opts, method, params...) +} + +func (_MockAggregatorProxy *MockAggregatorProxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockAggregatorProxy.Contract.contract.Call(opts, result, method, params...) +} + +func (_MockAggregatorProxy *MockAggregatorProxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.contract.Transfer(opts) +} + +func (_MockAggregatorProxy *MockAggregatorProxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.contract.Transact(opts, method, params...) +} + +func (_MockAggregatorProxy *MockAggregatorProxyCaller) Aggregator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _MockAggregatorProxy.contract.Call(opts, &out, "aggregator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_MockAggregatorProxy *MockAggregatorProxySession) Aggregator() (common.Address, error) { + return _MockAggregatorProxy.Contract.Aggregator(&_MockAggregatorProxy.CallOpts) +} + +func (_MockAggregatorProxy *MockAggregatorProxyCallerSession) Aggregator() (common.Address, error) { + return _MockAggregatorProxy.Contract.Aggregator(&_MockAggregatorProxy.CallOpts) +} + +func (_MockAggregatorProxy *MockAggregatorProxyTransactor) UpdateAggregator(opts *bind.TransactOpts, _aggregator common.Address) (*types.Transaction, error) { + return _MockAggregatorProxy.contract.Transact(opts, "updateAggregator", _aggregator) +} + +func (_MockAggregatorProxy *MockAggregatorProxySession) UpdateAggregator(_aggregator common.Address) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.UpdateAggregator(&_MockAggregatorProxy.TransactOpts, _aggregator) +} + +func (_MockAggregatorProxy *MockAggregatorProxyTransactorSession) UpdateAggregator(_aggregator common.Address) (*types.Transaction, error) { + return _MockAggregatorProxy.Contract.UpdateAggregator(&_MockAggregatorProxy.TransactOpts, _aggregator) +} + +func (_MockAggregatorProxy *MockAggregatorProxy) Address() common.Address { + return _MockAggregatorProxy.address +} + +type MockAggregatorProxyInterface interface { + Aggregator(opts *bind.CallOpts) (common.Address, error) + + UpdateAggregator(opts *bind.TransactOpts, _aggregator common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper/mock_ethlink_aggregator_wrapper.go b/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper/mock_ethlink_aggregator_wrapper.go new file mode 100644 index 00000000..382786d0 --- /dev/null +++ b/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper/mock_ethlink_aggregator_wrapper.go @@ -0,0 +1,363 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package mock_ethlink_aggregator_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var MockETHPLIAggregatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"_answer\",\"type\":\"int256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"answer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"ans\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"ans\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161025c38038061025c8339818101604052602081101561003357600080fd5b5051600055610215806100476000396000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c806385bb7d691161005057806385bb7d691461012c5780639a6fc8f514610134578063feaf968c1461019c57610072565b8063313ce5671461007757806354fd4d50146100955780637284e416146100af575b600080fd5b61007f6101a4565b6040805160ff9092168252519081900360200190f35b61009d6101a9565b60408051918252519081900360200190f35b6100b76101ae565b6040805160208082528351818301528351919283929083019185019080838360005b838110156100f15781810151838201526020016100d9565b50505050905090810190601f16801561011e5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61009d6101e5565b61015d6004803603602081101561014a57600080fd5b503569ffffffffffffffffffff166101eb565b6040805169ffffffffffffffffffff96871681526020810195909552848101939093526060840191909152909216608082015290519081900360a00190f35b61015d6101fa565b601290565b600190565b60408051808201909152601581527f4d6f636b4554484c494e4b41676772656761746f720000000000000000000000602082015290565b60005481565b50600054600191429081908490565b60005460019142908190849056fea164736f6c6343000606000a", +} + +var MockETHPLIAggregatorABI = MockETHPLIAggregatorMetaData.ABI + +var MockETHPLIAggregatorBin = MockETHPLIAggregatorMetaData.Bin + +func DeployMockETHPLIAggregator(auth *bind.TransactOpts, backend bind.ContractBackend, _answer *big.Int) (common.Address, *types.Transaction, *MockETHPLIAggregator, error) { + parsed, err := MockETHPLIAggregatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(MockETHPLIAggregatorBin), backend, _answer) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MockETHPLIAggregator{address: address, abi: *parsed, MockETHPLIAggregatorCaller: MockETHPLIAggregatorCaller{contract: contract}, MockETHPLIAggregatorTransactor: MockETHPLIAggregatorTransactor{contract: contract}, MockETHPLIAggregatorFilterer: MockETHPLIAggregatorFilterer{contract: contract}}, nil +} + +type MockETHPLIAggregator struct { + address common.Address + abi abi.ABI + MockETHPLIAggregatorCaller + MockETHPLIAggregatorTransactor + MockETHPLIAggregatorFilterer +} + +type MockETHPLIAggregatorCaller struct { + contract *bind.BoundContract +} + +type MockETHPLIAggregatorTransactor struct { + contract *bind.BoundContract +} + +type MockETHPLIAggregatorFilterer struct { + contract *bind.BoundContract +} + +type MockETHPLIAggregatorSession struct { + Contract *MockETHPLIAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type MockETHPLIAggregatorCallerSession struct { + Contract *MockETHPLIAggregatorCaller + CallOpts bind.CallOpts +} + +type MockETHPLIAggregatorTransactorSession struct { + Contract *MockETHPLIAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type MockETHPLIAggregatorRaw struct { + Contract *MockETHPLIAggregator +} + +type MockETHPLIAggregatorCallerRaw struct { + Contract *MockETHPLIAggregatorCaller +} + +type MockETHPLIAggregatorTransactorRaw struct { + Contract *MockETHPLIAggregatorTransactor +} + +func NewMockETHPLIAggregator(address common.Address, backend bind.ContractBackend) (*MockETHPLIAggregator, error) { + abi, err := abi.JSON(strings.NewReader(MockETHPLIAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindMockETHPLIAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MockETHPLIAggregator{address: address, abi: abi, MockETHPLIAggregatorCaller: MockETHPLIAggregatorCaller{contract: contract}, MockETHPLIAggregatorTransactor: MockETHPLIAggregatorTransactor{contract: contract}, MockETHPLIAggregatorFilterer: MockETHPLIAggregatorFilterer{contract: contract}}, nil +} + +func NewMockETHPLIAggregatorCaller(address common.Address, caller bind.ContractCaller) (*MockETHPLIAggregatorCaller, error) { + contract, err := bindMockETHPLIAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MockETHPLIAggregatorCaller{contract: contract}, nil +} + +func NewMockETHPLIAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*MockETHPLIAggregatorTransactor, error) { + contract, err := bindMockETHPLIAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MockETHPLIAggregatorTransactor{contract: contract}, nil +} + +func NewMockETHPLIAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*MockETHPLIAggregatorFilterer, error) { + contract, err := bindMockETHPLIAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MockETHPLIAggregatorFilterer{contract: contract}, nil +} + +func bindMockETHPLIAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := MockETHPLIAggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockETHPLIAggregator.Contract.MockETHPLIAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockETHPLIAggregator.Contract.MockETHPLIAggregatorTransactor.contract.Transfer(opts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockETHPLIAggregator.Contract.MockETHPLIAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockETHPLIAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockETHPLIAggregator.Contract.contract.Transfer(opts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockETHPLIAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) Answer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "answer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) Answer() (*big.Int, error) { + return _MockETHPLIAggregator.Contract.Answer(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) Answer() (*big.Int, error) { + return _MockETHPLIAggregator.Contract.Answer(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) Decimals() (uint8, error) { + return _MockETHPLIAggregator.Contract.Decimals(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) Decimals() (uint8, error) { + return _MockETHPLIAggregator.Contract.Decimals(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) Description() (string, error) { + return _MockETHPLIAggregator.Contract.Description(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) Description() (string, error) { + return _MockETHPLIAggregator.Contract.Description(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Ans = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _MockETHPLIAggregator.Contract.GetRoundData(&_MockETHPLIAggregator.CallOpts, _roundId) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _MockETHPLIAggregator.Contract.GetRoundData(&_MockETHPLIAggregator.CallOpts, _roundId) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Ans = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _MockETHPLIAggregator.Contract.LatestRoundData(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _MockETHPLIAggregator.Contract.LatestRoundData(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockETHPLIAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorSession) Version() (*big.Int, error) { + return _MockETHPLIAggregator.Contract.Version(&_MockETHPLIAggregator.CallOpts) +} + +func (_MockETHPLIAggregator *MockETHPLIAggregatorCallerSession) Version() (*big.Int, error) { + return _MockETHPLIAggregator.Contract.Version(&_MockETHPLIAggregator.CallOpts) +} + +type GetRoundData struct { + RoundId *big.Int + Ans *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Ans *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +func (_MockETHPLIAggregator *MockETHPLIAggregator) Address() common.Address { + return _MockETHPLIAggregator.address +} + +type MockETHPLIAggregatorInterface interface { + Answer(opts *bind.CallOpts) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/mock_gas_aggregator_wrapper/mock_gas_aggregator_wrapper.go b/core/gethwrappers/generated/mock_gas_aggregator_wrapper/mock_gas_aggregator_wrapper.go new file mode 100644 index 00000000..148417f3 --- /dev/null +++ b/core/gethwrappers/generated/mock_gas_aggregator_wrapper/mock_gas_aggregator_wrapper.go @@ -0,0 +1,363 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package mock_gas_aggregator_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var MockGASAggregatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"_answer\",\"type\":\"int256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"answer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161025c38038061025c8339818101604052602081101561003357600080fd5b5051600055610215806100476000396000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c806385bb7d691161005057806385bb7d691461012c5780639a6fc8f514610134578063feaf968c1461019c57610072565b8063313ce5671461007757806354fd4d50146100955780637284e416146100af575b600080fd5b61007f6101a4565b6040805160ff9092168252519081900360200190f35b61009d6101a9565b60408051918252519081900360200190f35b6100b76101ae565b6040805160208082528351818301528351919283929083019185019080838360005b838110156100f15781810151838201526020016100d9565b50505050905090810190601f16801561011e5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61009d6101e5565b61015d6004803603602081101561014a57600080fd5b503569ffffffffffffffffffff166101eb565b6040805169ffffffffffffffffffff96871681526020810195909552848101939093526060840191909152909216608082015290519081900360a00190f35b61015d6101fa565b601290565b600190565b60408051808201909152601181527f4d6f636b47415341676772656761746f72000000000000000000000000000000602082015290565b60005481565b50600190600090429081908490565b60016000428083909192939456fea164736f6c6343000606000a", +} + +var MockGASAggregatorABI = MockGASAggregatorMetaData.ABI + +var MockGASAggregatorBin = MockGASAggregatorMetaData.Bin + +func DeployMockGASAggregator(auth *bind.TransactOpts, backend bind.ContractBackend, _answer *big.Int) (common.Address, *types.Transaction, *MockGASAggregator, error) { + parsed, err := MockGASAggregatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(MockGASAggregatorBin), backend, _answer) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MockGASAggregator{address: address, abi: *parsed, MockGASAggregatorCaller: MockGASAggregatorCaller{contract: contract}, MockGASAggregatorTransactor: MockGASAggregatorTransactor{contract: contract}, MockGASAggregatorFilterer: MockGASAggregatorFilterer{contract: contract}}, nil +} + +type MockGASAggregator struct { + address common.Address + abi abi.ABI + MockGASAggregatorCaller + MockGASAggregatorTransactor + MockGASAggregatorFilterer +} + +type MockGASAggregatorCaller struct { + contract *bind.BoundContract +} + +type MockGASAggregatorTransactor struct { + contract *bind.BoundContract +} + +type MockGASAggregatorFilterer struct { + contract *bind.BoundContract +} + +type MockGASAggregatorSession struct { + Contract *MockGASAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type MockGASAggregatorCallerSession struct { + Contract *MockGASAggregatorCaller + CallOpts bind.CallOpts +} + +type MockGASAggregatorTransactorSession struct { + Contract *MockGASAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type MockGASAggregatorRaw struct { + Contract *MockGASAggregator +} + +type MockGASAggregatorCallerRaw struct { + Contract *MockGASAggregatorCaller +} + +type MockGASAggregatorTransactorRaw struct { + Contract *MockGASAggregatorTransactor +} + +func NewMockGASAggregator(address common.Address, backend bind.ContractBackend) (*MockGASAggregator, error) { + abi, err := abi.JSON(strings.NewReader(MockGASAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindMockGASAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MockGASAggregator{address: address, abi: abi, MockGASAggregatorCaller: MockGASAggregatorCaller{contract: contract}, MockGASAggregatorTransactor: MockGASAggregatorTransactor{contract: contract}, MockGASAggregatorFilterer: MockGASAggregatorFilterer{contract: contract}}, nil +} + +func NewMockGASAggregatorCaller(address common.Address, caller bind.ContractCaller) (*MockGASAggregatorCaller, error) { + contract, err := bindMockGASAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MockGASAggregatorCaller{contract: contract}, nil +} + +func NewMockGASAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*MockGASAggregatorTransactor, error) { + contract, err := bindMockGASAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MockGASAggregatorTransactor{contract: contract}, nil +} + +func NewMockGASAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*MockGASAggregatorFilterer, error) { + contract, err := bindMockGASAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MockGASAggregatorFilterer{contract: contract}, nil +} + +func bindMockGASAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := MockGASAggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_MockGASAggregator *MockGASAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockGASAggregator.Contract.MockGASAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_MockGASAggregator *MockGASAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockGASAggregator.Contract.MockGASAggregatorTransactor.contract.Transfer(opts) +} + +func (_MockGASAggregator *MockGASAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockGASAggregator.Contract.MockGASAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_MockGASAggregator *MockGASAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockGASAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_MockGASAggregator *MockGASAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockGASAggregator.Contract.contract.Transfer(opts) +} + +func (_MockGASAggregator *MockGASAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockGASAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) Answer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "answer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) Answer() (*big.Int, error) { + return _MockGASAggregator.Contract.Answer(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) Answer() (*big.Int, error) { + return _MockGASAggregator.Contract.Answer(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) Decimals() (uint8, error) { + return _MockGASAggregator.Contract.Decimals(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) Decimals() (uint8, error) { + return _MockGASAggregator.Contract.Decimals(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) Description() (string, error) { + return _MockGASAggregator.Contract.Description(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) Description() (string, error) { + return _MockGASAggregator.Contract.Description(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _MockGASAggregator.Contract.GetRoundData(&_MockGASAggregator.CallOpts, _roundId) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _MockGASAggregator.Contract.GetRoundData(&_MockGASAggregator.CallOpts, _roundId) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _MockGASAggregator.Contract.LatestRoundData(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _MockGASAggregator.Contract.LatestRoundData(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockGASAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MockGASAggregator *MockGASAggregatorSession) Version() (*big.Int, error) { + return _MockGASAggregator.Contract.Version(&_MockGASAggregator.CallOpts) +} + +func (_MockGASAggregator *MockGASAggregatorCallerSession) Version() (*big.Int, error) { + return _MockGASAggregator.Contract.Version(&_MockGASAggregator.CallOpts) +} + +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +func (_MockGASAggregator *MockGASAggregator) Address() common.Address { + return _MockGASAggregator.address +} + +type MockGASAggregatorInterface interface { + Answer(opts *bind.CallOpts) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/mock_v3_aggregator_contract/mock_v3_aggregator_contract.go b/core/gethwrappers/generated/mock_v3_aggregator_contract/mock_v3_aggregator_contract.go new file mode 100644 index 00000000..81ebbdd6 --- /dev/null +++ b/core/gethwrappers/generated/mock_v3_aggregator_contract/mock_v3_aggregator_contract.go @@ -0,0 +1,907 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package mock_v3_aggregator_contract + +import ( + "math/big" + "strings" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// MockV3AggregatorContractABI is the input ABI used to generate the binding from. +const MockV3AggregatorContractABI = "[{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"_decimals\",\"type\":\"uint8\"},{\"internalType\":\"int256\",\"name\":\"_initialAnswer\",\"type\":\"int256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"getAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"getTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRound\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"_answer\",\"type\":\"int256\"}],\"name\":\"updateAnswer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"_answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"_timestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_startedAt\",\"type\":\"uint256\"}],\"name\":\"updateRoundData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]" + +// MockV3AggregatorContractBin is the compiled bytecode used for deploying new contracts. +var MockV3AggregatorContractBin = "0x608060405234801561001057600080fd5b506040516105113803806105118339818101604052604081101561003357600080fd5b5080516020909101516000805460ff191660ff84161790556100548161005b565b50506100a2565b600181815542600281905560038054909201808355600090815260046020908152604080832095909555835482526005815284822083905592548152600690925291902055565b610460806100b16000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c80638205bf6a11610081578063b5ab58dc1161005b578063b5ab58dc14610273578063b633620c14610290578063feaf968c146102ad576100d4565b80638205bf6a146101db5780639a6fc8f5146101e3578063a87a20ce14610256576100d4565b806354fd4d50116100b257806354fd4d501461014e578063668a0f02146101565780637284e4161461015e576100d4565b8063313ce567146100d95780634aa2011f146100f757806350d25bcd14610134575b600080fd5b6100e16102b5565b6040805160ff9092168252519081900360200190f35b6101326004803603608081101561010d57600080fd5b5069ffffffffffffffffffff81351690602081013590604081013590606001356102be565b005b61013c61030b565b60408051918252519081900360200190f35b61013c610311565b61013c610316565b61016661031c565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101a0578181015183820152602001610188565b50505050905090810190601f1680156101cd5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61013c610353565b61020c600480360360208110156101f957600080fd5b503569ffffffffffffffffffff16610359565b604051808669ffffffffffffffffffff1681526020018581526020018481526020018381526020018269ffffffffffffffffffff1681526020019550505050505060405180910390f35b6101326004803603602081101561026c57600080fd5b5035610392565b61013c6004803603602081101561028957600080fd5b50356103d9565b61013c600480360360208110156102a657600080fd5b50356103eb565b61020c6103fd565b60005460ff1681565b69ffffffffffffffffffff90931660038181556001849055600283905560009182526004602090815260408084209590955581548352600581528483209390935554815260069091522055565b60015481565b600081565b60035481565b60408051808201909152601f81527f76302e362f74657374732f4d6f636b563341676772656761746f722e736f6c00602082015290565b60025481565b69ffffffffffffffffffff8116600090815260046020908152604080832054600683528184205460059093529220549293919290918490565b600181815542600281905560038054909201808355600090815260046020908152604080832095909555835482526005815284822083905592548152600690925291902055565b60046020526000908152604090205481565b60056020526000908152604090205481565b6003546000818152600460209081526040808320546006835281842054600590935292205483909192939456fea2646970667358221220ecf1c50e0f78cd131fb708022b7a4f2d2de0408537205a8d45c5a41fdbc0ad4d64736f6c63430007060033" + +// DeployMockV3AggregatorContract deploys a new Ethereum contract, binding an instance of MockV3AggregatorContract to it. +func DeployMockV3AggregatorContract(auth *bind.TransactOpts, backend bind.ContractBackend, _decimals uint8, _initialAnswer *big.Int) (common.Address, *types.Transaction, *MockV3AggregatorContract, error) { + parsed, err := abi.JSON(strings.NewReader(MockV3AggregatorContractABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(MockV3AggregatorContractBin), backend, _decimals, _initialAnswer) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MockV3AggregatorContract{MockV3AggregatorContractCaller: MockV3AggregatorContractCaller{contract: contract}, MockV3AggregatorContractTransactor: MockV3AggregatorContractTransactor{contract: contract}, MockV3AggregatorContractFilterer: MockV3AggregatorContractFilterer{contract: contract}}, nil +} + +// MockV3AggregatorContract is an auto generated Go binding around an Ethereum contract. +type MockV3AggregatorContract struct { + MockV3AggregatorContractCaller // Read-only binding to the contract + MockV3AggregatorContractTransactor // Write-only binding to the contract + MockV3AggregatorContractFilterer // Log filterer for contract events +} + +// MockV3AggregatorContractCaller is an auto generated read-only Go binding around an Ethereum contract. +type MockV3AggregatorContractCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// MockV3AggregatorContractTransactor is an auto generated write-only Go binding around an Ethereum contract. +type MockV3AggregatorContractTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// MockV3AggregatorContractFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type MockV3AggregatorContractFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// MockV3AggregatorContractSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type MockV3AggregatorContractSession struct { + Contract *MockV3AggregatorContract // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// MockV3AggregatorContractCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type MockV3AggregatorContractCallerSession struct { + Contract *MockV3AggregatorContractCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// MockV3AggregatorContractTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type MockV3AggregatorContractTransactorSession struct { + Contract *MockV3AggregatorContractTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// MockV3AggregatorContractRaw is an auto generated low-level Go binding around an Ethereum contract. +type MockV3AggregatorContractRaw struct { + Contract *MockV3AggregatorContract // Generic contract binding to access the raw methods on +} + +// MockV3AggregatorContractCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type MockV3AggregatorContractCallerRaw struct { + Contract *MockV3AggregatorContractCaller // Generic read-only contract binding to access the raw methods on +} + +// MockV3AggregatorContractTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type MockV3AggregatorContractTransactorRaw struct { + Contract *MockV3AggregatorContractTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewMockV3AggregatorContract creates a new instance of MockV3AggregatorContract, bound to a specific deployed contract. +func NewMockV3AggregatorContract(address common.Address, backend bind.ContractBackend) (*MockV3AggregatorContract, error) { + contract, err := bindMockV3AggregatorContract(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MockV3AggregatorContract{MockV3AggregatorContractCaller: MockV3AggregatorContractCaller{contract: contract}, MockV3AggregatorContractTransactor: MockV3AggregatorContractTransactor{contract: contract}, MockV3AggregatorContractFilterer: MockV3AggregatorContractFilterer{contract: contract}}, nil +} + +// NewMockV3AggregatorContractCaller creates a new read-only instance of MockV3AggregatorContract, bound to a specific deployed contract. +func NewMockV3AggregatorContractCaller(address common.Address, caller bind.ContractCaller) (*MockV3AggregatorContractCaller, error) { + contract, err := bindMockV3AggregatorContract(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MockV3AggregatorContractCaller{contract: contract}, nil +} + +// NewMockV3AggregatorContractTransactor creates a new write-only instance of MockV3AggregatorContract, bound to a specific deployed contract. +func NewMockV3AggregatorContractTransactor(address common.Address, transactor bind.ContractTransactor) (*MockV3AggregatorContractTransactor, error) { + contract, err := bindMockV3AggregatorContract(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MockV3AggregatorContractTransactor{contract: contract}, nil +} + +// NewMockV3AggregatorContractFilterer creates a new log filterer instance of MockV3AggregatorContract, bound to a specific deployed contract. +func NewMockV3AggregatorContractFilterer(address common.Address, filterer bind.ContractFilterer) (*MockV3AggregatorContractFilterer, error) { + contract, err := bindMockV3AggregatorContract(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MockV3AggregatorContractFilterer{contract: contract}, nil +} + +// bindMockV3AggregatorContract binds a generic wrapper to an already deployed contract. +func bindMockV3AggregatorContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(MockV3AggregatorContractABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_MockV3AggregatorContract *MockV3AggregatorContractRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockV3AggregatorContract.Contract.MockV3AggregatorContractCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_MockV3AggregatorContract *MockV3AggregatorContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.MockV3AggregatorContractTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_MockV3AggregatorContract *MockV3AggregatorContractRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.MockV3AggregatorContractTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MockV3AggregatorContract.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.contract.Transact(opts, method, params...) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) Decimals() (uint8, error) { + return _MockV3AggregatorContract.Contract.Decimals(&_MockV3AggregatorContract.CallOpts) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) Decimals() (uint8, error) { + return _MockV3AggregatorContract.Contract.Decimals(&_MockV3AggregatorContract.CallOpts) +} + +// Description is a free data retrieval call binding the contract method 0x7284e416. +// +// Solidity: function description() view returns(string) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// Description is a free data retrieval call binding the contract method 0x7284e416. +// +// Solidity: function description() view returns(string) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) Description() (string, error) { + return _MockV3AggregatorContract.Contract.Description(&_MockV3AggregatorContract.CallOpts) +} + +// Description is a free data retrieval call binding the contract method 0x7284e416. +// +// Solidity: function description() view returns(string) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) Description() (string, error) { + return _MockV3AggregatorContract.Contract.Description(&_MockV3AggregatorContract.CallOpts) +} + +// GetAnswer is a free data retrieval call binding the contract method 0xb5ab58dc. +// +// Solidity: function getAnswer(uint256 ) view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) GetAnswer(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "getAnswer", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetAnswer is a free data retrieval call binding the contract method 0xb5ab58dc. +// +// Solidity: function getAnswer(uint256 ) view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) GetAnswer(arg0 *big.Int) (*big.Int, error) { + return _MockV3AggregatorContract.Contract.GetAnswer(&_MockV3AggregatorContract.CallOpts, arg0) +} + +// GetAnswer is a free data retrieval call binding the contract method 0xb5ab58dc. +// +// Solidity: function getAnswer(uint256 ) view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) GetAnswer(arg0 *big.Int) (*big.Int, error) { + return _MockV3AggregatorContract.Contract.GetAnswer(&_MockV3AggregatorContract.CallOpts, arg0) +} + +// GetRoundData is a free data retrieval call binding the contract method 0x9a6fc8f5. +// +// Solidity: function getRoundData(uint80 _roundId) view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int + }) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = out[0].(*big.Int) + outstruct.Answer = out[1].(*big.Int) + outstruct.StartedAt = out[2].(*big.Int) + outstruct.UpdatedAt = out[3].(*big.Int) + outstruct.AnsweredInRound = out[4].(*big.Int) + + return *outstruct, err + +} + +// GetRoundData is a free data retrieval call binding the contract method 0x9a6fc8f5. +// +// Solidity: function getRoundData(uint80 _roundId) view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) GetRoundData(_roundId *big.Int) (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + return _MockV3AggregatorContract.Contract.GetRoundData(&_MockV3AggregatorContract.CallOpts, _roundId) +} + +// GetRoundData is a free data retrieval call binding the contract method 0x9a6fc8f5. +// +// Solidity: function getRoundData(uint80 _roundId) view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) GetRoundData(_roundId *big.Int) (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + return _MockV3AggregatorContract.Contract.GetRoundData(&_MockV3AggregatorContract.CallOpts, _roundId) +} + +// GetTimestamp is a free data retrieval call binding the contract method 0xb633620c. +// +// Solidity: function getTimestamp(uint256 ) view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) GetTimestamp(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "getTimestamp", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetTimestamp is a free data retrieval call binding the contract method 0xb633620c. +// +// Solidity: function getTimestamp(uint256 ) view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) GetTimestamp(arg0 *big.Int) (*big.Int, error) { + return _MockV3AggregatorContract.Contract.GetTimestamp(&_MockV3AggregatorContract.CallOpts, arg0) +} + +// GetTimestamp is a free data retrieval call binding the contract method 0xb633620c. +// +// Solidity: function getTimestamp(uint256 ) view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) GetTimestamp(arg0 *big.Int) (*big.Int, error) { + return _MockV3AggregatorContract.Contract.GetTimestamp(&_MockV3AggregatorContract.CallOpts, arg0) +} + +// LatestAnswer is a free data retrieval call binding the contract method 0x50d25bcd. +// +// Solidity: function latestAnswer() view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "latestAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// LatestAnswer is a free data retrieval call binding the contract method 0x50d25bcd. +// +// Solidity: function latestAnswer() view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) LatestAnswer() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestAnswer(&_MockV3AggregatorContract.CallOpts) +} + +// LatestAnswer is a free data retrieval call binding the contract method 0x50d25bcd. +// +// Solidity: function latestAnswer() view returns(int256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) LatestAnswer() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestAnswer(&_MockV3AggregatorContract.CallOpts) +} + +// LatestRound is a free data retrieval call binding the contract method 0x668a0f02. +// +// Solidity: function latestRound() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "latestRound") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// LatestRound is a free data retrieval call binding the contract method 0x668a0f02. +// +// Solidity: function latestRound() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) LatestRound() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestRound(&_MockV3AggregatorContract.CallOpts) +} + +// LatestRound is a free data retrieval call binding the contract method 0x668a0f02. +// +// Solidity: function latestRound() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) LatestRound() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestRound(&_MockV3AggregatorContract.CallOpts) +} + +// LatestRoundData is a free data retrieval call binding the contract method 0xfeaf968c. +// +// Solidity: function latestRoundData() view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) LatestRoundData(opts *bind.CallOpts) (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int + }) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = out[0].(*big.Int) + outstruct.Answer = out[1].(*big.Int) + outstruct.StartedAt = out[2].(*big.Int) + outstruct.UpdatedAt = out[3].(*big.Int) + outstruct.AnsweredInRound = out[4].(*big.Int) + + return *outstruct, err + +} + +// LatestRoundData is a free data retrieval call binding the contract method 0xfeaf968c. +// +// Solidity: function latestRoundData() view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) LatestRoundData() (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + return _MockV3AggregatorContract.Contract.LatestRoundData(&_MockV3AggregatorContract.CallOpts) +} + +// LatestRoundData is a free data retrieval call binding the contract method 0xfeaf968c. +// +// Solidity: function latestRoundData() view returns(uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) LatestRoundData() (struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +}, error) { + return _MockV3AggregatorContract.Contract.LatestRoundData(&_MockV3AggregatorContract.CallOpts) +} + +// LatestTimestamp is a free data retrieval call binding the contract method 0x8205bf6a. +// +// Solidity: function latestTimestamp() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "latestTimestamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// LatestTimestamp is a free data retrieval call binding the contract method 0x8205bf6a. +// +// Solidity: function latestTimestamp() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) LatestTimestamp() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestTimestamp(&_MockV3AggregatorContract.CallOpts) +} + +// LatestTimestamp is a free data retrieval call binding the contract method 0x8205bf6a. +// +// Solidity: function latestTimestamp() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) LatestTimestamp() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.LatestTimestamp(&_MockV3AggregatorContract.CallOpts) +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MockV3AggregatorContract.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) Version() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.Version(&_MockV3AggregatorContract.CallOpts) +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(uint256) +func (_MockV3AggregatorContract *MockV3AggregatorContractCallerSession) Version() (*big.Int, error) { + return _MockV3AggregatorContract.Contract.Version(&_MockV3AggregatorContract.CallOpts) +} + +// UpdateAnswer is a paid mutator transaction binding the contract method 0xa87a20ce. +// +// Solidity: function updateAnswer(int256 _answer) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactor) UpdateAnswer(opts *bind.TransactOpts, _answer *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.contract.Transact(opts, "updateAnswer", _answer) +} + +// UpdateAnswer is a paid mutator transaction binding the contract method 0xa87a20ce. +// +// Solidity: function updateAnswer(int256 _answer) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) UpdateAnswer(_answer *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.UpdateAnswer(&_MockV3AggregatorContract.TransactOpts, _answer) +} + +// UpdateAnswer is a paid mutator transaction binding the contract method 0xa87a20ce. +// +// Solidity: function updateAnswer(int256 _answer) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactorSession) UpdateAnswer(_answer *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.UpdateAnswer(&_MockV3AggregatorContract.TransactOpts, _answer) +} + +// UpdateRoundData is a paid mutator transaction binding the contract method 0x4aa2011f. +// +// Solidity: function updateRoundData(uint80 _roundId, int256 _answer, uint256 _timestamp, uint256 _startedAt) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactor) UpdateRoundData(opts *bind.TransactOpts, _roundId *big.Int, _answer *big.Int, _timestamp *big.Int, _startedAt *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.contract.Transact(opts, "updateRoundData", _roundId, _answer, _timestamp, _startedAt) +} + +// UpdateRoundData is a paid mutator transaction binding the contract method 0x4aa2011f. +// +// Solidity: function updateRoundData(uint80 _roundId, int256 _answer, uint256 _timestamp, uint256 _startedAt) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractSession) UpdateRoundData(_roundId *big.Int, _answer *big.Int, _timestamp *big.Int, _startedAt *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.UpdateRoundData(&_MockV3AggregatorContract.TransactOpts, _roundId, _answer, _timestamp, _startedAt) +} + +// UpdateRoundData is a paid mutator transaction binding the contract method 0x4aa2011f. +// +// Solidity: function updateRoundData(uint80 _roundId, int256 _answer, uint256 _timestamp, uint256 _startedAt) returns() +func (_MockV3AggregatorContract *MockV3AggregatorContractTransactorSession) UpdateRoundData(_roundId *big.Int, _answer *big.Int, _timestamp *big.Int, _startedAt *big.Int) (*types.Transaction, error) { + return _MockV3AggregatorContract.Contract.UpdateRoundData(&_MockV3AggregatorContract.TransactOpts, _roundId, _answer, _timestamp, _startedAt) +} + +// MockV3AggregatorContractAnswerUpdatedIterator is returned from FilterAnswerUpdated and is used to iterate over the raw logs and unpacked data for AnswerUpdated events raised by the MockV3AggregatorContract contract. +type MockV3AggregatorContractAnswerUpdatedIterator struct { + Event *MockV3AggregatorContractAnswerUpdated // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *MockV3AggregatorContractAnswerUpdatedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(MockV3AggregatorContractAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(MockV3AggregatorContractAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *MockV3AggregatorContractAnswerUpdatedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *MockV3AggregatorContractAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// MockV3AggregatorContractAnswerUpdated represents a AnswerUpdated event raised by the MockV3AggregatorContract contract. +type MockV3AggregatorContractAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterAnswerUpdated is a free log retrieval operation binding the contract event 0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f. +// +// Solidity: event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*MockV3AggregatorContractAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _MockV3AggregatorContract.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &MockV3AggregatorContractAnswerUpdatedIterator{contract: _MockV3AggregatorContract.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +// WatchAnswerUpdated is a free log subscription operation binding the contract event 0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f. +// +// Solidity: event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *MockV3AggregatorContractAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _MockV3AggregatorContract.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(MockV3AggregatorContractAnswerUpdated) + if err := _MockV3AggregatorContract.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseAnswerUpdated is a log parse operation binding the contract event 0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f. +// +// Solidity: event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) ParseAnswerUpdated(log types.Log) (*MockV3AggregatorContractAnswerUpdated, error) { + event := new(MockV3AggregatorContractAnswerUpdated) + if err := _MockV3AggregatorContract.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// MockV3AggregatorContractNewRoundIterator is returned from FilterNewRound and is used to iterate over the raw logs and unpacked data for NewRound events raised by the MockV3AggregatorContract contract. +type MockV3AggregatorContractNewRoundIterator struct { + Event *MockV3AggregatorContractNewRound // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *MockV3AggregatorContractNewRoundIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(MockV3AggregatorContractNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(MockV3AggregatorContractNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *MockV3AggregatorContractNewRoundIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *MockV3AggregatorContractNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// MockV3AggregatorContractNewRound represents a NewRound event raised by the MockV3AggregatorContract contract. +type MockV3AggregatorContractNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterNewRound is a free log retrieval operation binding the contract event 0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271. +// +// Solidity: event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*MockV3AggregatorContractNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _MockV3AggregatorContract.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &MockV3AggregatorContractNewRoundIterator{contract: _MockV3AggregatorContract.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +// WatchNewRound is a free log subscription operation binding the contract event 0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271. +// +// Solidity: event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *MockV3AggregatorContractNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _MockV3AggregatorContract.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(MockV3AggregatorContractNewRound) + if err := _MockV3AggregatorContract.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseNewRound is a log parse operation binding the contract event 0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271. +// +// Solidity: event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt) +func (_MockV3AggregatorContract *MockV3AggregatorContractFilterer) ParseNewRound(log types.Log) (*MockV3AggregatorContractNewRound, error) { + event := new(MockV3AggregatorContractNewRound) + if err := _MockV3AggregatorContract.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/core/gethwrappers/generated/multiwordconsumer_wrapper/multiwordconsumer_wrapper.go b/core/gethwrappers/generated/multiwordconsumer_wrapper/multiwordconsumer_wrapper.go new file mode 100644 index 00000000..4b1ab1f5 --- /dev/null +++ b/core/gethwrappers/generated/multiwordconsumer_wrapper/multiwordconsumer_wrapper.go @@ -0,0 +1,1403 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package multiwordconsumer_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var MultiWordConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_specId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginCancelled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes\",\"name\":\"price\",\"type\":\"bytes\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"usd\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"eur\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"jpy\",\"type\":\"bytes32\"}],\"name\":\"RequestMultipleFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"usd\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"eur\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"jpy\",\"type\":\"uint256\"}],\"name\":\"RequestMultipleFulfilledWithCustomURLs\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"}],\"name\":\"addExternalRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_expiration\",\"type\":\"uint256\"}],\"name\":\"cancelRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"currentPrice\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eur\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eurInt\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"_price\",\"type\":\"bytes\"}],\"name\":\"fulfillBytes\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_usd\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_eur\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_jpy\",\"type\":\"bytes32\"}],\"name\":\"fulfillMultipleParameters\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_usd\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_eur\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_jpy\",\"type\":\"uint256\"}],\"name\":\"fulfillMultipleParametersWithCustomURLs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"jpy\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"jpyInt\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"publicGetNextRequestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_currency\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"}],\"name\":\"requestEthereumPrice\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_currency\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"}],\"name\":\"requestMultipleParameters\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_urlUSD\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_pathUSD\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_urlEUR\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_pathEUR\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_urlJPY\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_pathJPY\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"}],\"name\":\"requestMultipleParametersWithCustomURLs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_specId\",\"type\":\"bytes32\"}],\"name\":\"setSpecID\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"usd\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"usdInt\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052600160045534801561001557600080fd5b50604051611cbc380380611cbc8339818101604052606081101561003857600080fd5b508051602082015160409092015190919061005283610066565b61005b82610088565b600655506100aa9050565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b600380546001600160a01b0319166001600160a01b0392909216919091179055565b611c03806100b96000396000f3fe608060405234801561001057600080fd5b50600436106101355760003560e01c80639d1b464a116100b2578063d63a6ccd11610081578063e8d5359d11610066578063e8d5359d14610804578063ef5934731461083d578063faa367611461086c57610135565b8063d63a6ccd14610754578063e89855ba1461075c57610135565b80639d1b464a146105f3578063a856ff6b14610670578063b44cb4691461069f578063c2fb8523146106a757610135565b8063673cd6aa1161010957806383db5cbc116100ee57806383db5cbc146101f85780638dc654a2146102a0578063938649e5146102a857610135565b8063673cd6aa146101e85780637439ae59146101f057610135565b80629879571461013a5780632f0dc45814610154578063501fdd5d1461015c5780635591a6081461017b575b600080fd5b610142610874565b60408051918252519081900360200190f35b61014261087a565b6101796004803603602081101561017257600080fd5b5035610880565b005b610179600480360360a081101561019157600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813516906020810135906040810135907fffffffff000000000000000000000000000000000000000000000000000000006060820135169060800135610885565b61014261094c565b61014261095b565b6101796004803603604081101561020e57600080fd5b81019060208101813564010000000081111561022957600080fd5b82018360208201111561023b57600080fd5b8035906020019184600183028401116401000000008311171561025d57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295505091359250610961915050565b610179610988565b610179600480360360e08110156102be57600080fd5b8101906020810181356401000000008111156102d957600080fd5b8201836020820111156102eb57600080fd5b8035906020019184600183028401116401000000008311171561030d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929594936020810193503591505064010000000081111561036057600080fd5b82018360208201111561037257600080fd5b8035906020019184600183028401116401000000008311171561039457600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092959493602081019350359150506401000000008111156103e757600080fd5b8201836020820111156103f957600080fd5b8035906020019184600183028401116401000000008311171561041b57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929594936020810193503591505064010000000081111561046e57600080fd5b82018360208201111561048057600080fd5b803590602001918460018302840111640100000000831117156104a257600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092959493602081019350359150506401000000008111156104f557600080fd5b82018360208201111561050757600080fd5b8035906020019184600183028401116401000000008311171561052957600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929594936020810193503591505064010000000081111561057c57600080fd5b82018360208201111561058e57600080fd5b803590602001918460018302840111640100000000831117156105b057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295505091359250610b52915050565b6105fb610cff565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561063557818101518382015260200161061d565b50505050905090810190601f1680156106625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6101796004803603608081101561068657600080fd5b5080359060208101359060408101359060600135610dab565b610142610ece565b610179600480360360408110156106bd57600080fd5b813591908101906040810160208201356401000000008111156106df57600080fd5b8201836020820111156106f157600080fd5b8035906020019184600183028401116401000000008311171561071357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610ed4945050505050565b610142611080565b6101796004803603604081101561077257600080fd5b81019060208101813564010000000081111561078d57600080fd5b82018360208201111561079f57600080fd5b803590602001918460018302840111640100000000831117156107c157600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295505091359250611086915050565b6101796004803603604081101561081a57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff813516906020013561109b565b6101796004803603608081101561085357600080fd5b50803590602081013590604081013590606001356110a9565b6101426111cc565b600c5481565b600d5481565b600655565b604080517f6ee4d55300000000000000000000000000000000000000000000000000000000815260048101869052602481018590527fffffffff0000000000000000000000000000000000000000000000000000000084166044820152606481018390529051869173ffffffffffffffffffffffffffffffffffffffff831691636ee4d5539160848082019260009290919082900301818387803b15801561092c57600080fd5b505af1158015610940573d6000803e3d6000fd5b50505050505050505050565b60006109566111d2565b905090565b60095481565b600061097660065463c2fb852360e01b6111d8565b905061098281836111fe565b50505050565b600061099261122c565b90508073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb338373ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015610a1857600080fd5b505afa158015610a2c573d6000803e3d6000fd5b505050506040513d6020811015610a4257600080fd5b5051604080517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff909316600484015260248301919091525160448083019260209291908290030181600087803b158015610ab857600080fd5b505af1158015610acc573d6000803e3d6000fd5b505050506040513d6020811015610ae257600080fd5b5051610b4f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f556e61626c6520746f207472616e736665720000000000000000000000000000604482015290519081900360640190fd5b50565b6000610b6760065463ef59347360e01b6111d8565b60408051808201909152600681527f75726c55534400000000000000000000000000000000000000000000000000006020820152909150610baa9082908a611248565b60408051808201909152600781527f70617468555344000000000000000000000000000000000000000000000000006020820152610bea90829089611248565b60408051808201909152600681527f75726c45555200000000000000000000000000000000000000000000000000006020820152610c2a90829088611248565b60408051808201909152600781527f70617468455552000000000000000000000000000000000000000000000000006020820152610c6a90829087611248565b60408051808201909152600681527f75726c4a505900000000000000000000000000000000000000000000000000006020820152610caa90829086611248565b60408051808201909152600781527f706174684a5059000000000000000000000000000000000000000000000000006020820152610cea90829085611248565b610cf481836111fe565b505050505050505050565b6007805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f81018490048402820184019092528181529291830182828015610da35780601f10610d7857610100808354040283529160200191610da3565b820191906000526020600020905b815481529060010190602001808311610d8657829003601f168201915b505050505081565b600084815260056020526040902054849073ffffffffffffffffffffffffffffffffffffffff163314610e29576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180611bcf6028913960400191505060405180910390fd5b60008181526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a91a28284867f0ec0c13e44aa04198947078cb990660252870dd3363f4c4bb3cc780f808dabbe856040518082815260200191505060405180910390a450600892909255600955600a5550565b600b5481565b600082815260056020526040902054829073ffffffffffffffffffffffffffffffffffffffff163314610f52576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180611bcf6028913960400191505060405180910390fd5b60008181526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a91a2816040518082805190602001908083835b60208310610ffb57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fbe565b5181516020939093036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990911692169190911790526040519201829003822093508692507f1a111c5dcf9a71088bd5e1797fdfaf399fec2afbb24aca247e4e3e9f4b61df919160009150a38151610982906007906020850190611abb565b60085481565b600061097660065463a856ff6b60e01b6111d8565b6110a5828261126b565b5050565b600084815260056020526040902054849073ffffffffffffffffffffffffffffffffffffffff163314611127576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180611bcf6028913960400191505060405180910390fd5b60008181526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a91a28284867f077e16d6f19163c0c96d84a7bff48b4ba41f3956f95d6fb0e584ec77297fe245856040518082815260200191505060405180910390a450600b92909255600c55600d5550565b600a5481565b60045490565b6111e0611b47565b6111e8611b47565b6111f481853086611352565b9150505b92915050565b6003546000906112259073ffffffffffffffffffffffffffffffffffffffff1684846113bd565b9392505050565b60025473ffffffffffffffffffffffffffffffffffffffff1690565b60808301516112579083611538565b60808301516112669082611538565b505050565b600081815260056020526040902054819073ffffffffffffffffffffffffffffffffffffffff16156112fe57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f5265717565737420697320616c72656164792070656e64696e67000000000000604482015290519081900360640190fd5b50600090815260056020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b61135a611b47565b61136a856080015161010061154f565b505082845273ffffffffffffffffffffffffffffffffffffffff821660208501527fffffffff0000000000000000000000000000000000000000000000000000000081166040850152835b949350505050565b6000806004549050806001016004819055506000633c6d41b960e01b600080876000015188604001518660028b6080015160000151604051602401808873ffffffffffffffffffffffffffffffffffffffff168152602001878152602001868152602001857bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916815260200184815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561148b578181015183820152602001611473565b50505050905090810190601f1680156114b85780820380516001836020036101000a031916815260200191505b5098505050505050505050604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905061152e86838684611589565b9695505050505050565b611545826003835161179c565b61126682826118b1565b611557611b7c565b602082061561156c5760208206602003820191505b506020828101829052604080518085526000815290920101905290565b604080513060601b60208083019190915260348083018790528351808403909101815260549092018084528251928201929092206000818152600590925292812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff891617905582917fb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af99190a26002546040517f4000aea000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87811660048301908152602483018790526060604484019081528651606485015286519290941693634000aea0938a938993899390929091608490910190602085019080838360005b838110156116cd5781810151838201526020016116b5565b50505050905090810190601f1680156116fa5780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b15801561171b57600080fd5b505af115801561172f573d6000803e3d6000fd5b505050506040513d602081101561174557600080fd5b50516113b5576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611bac6023913960400191505060405180910390fd5b60178167ffffffffffffffff16116117c7576117c18360e0600585901b1683176118cb565b50611266565b60ff8167ffffffffffffffff1611611805576117ee836018611fe0600586901b16176118cb565b506117c18367ffffffffffffffff831660016118e3565b61ffff8167ffffffffffffffff16116118445761182d836019611fe0600586901b16176118cb565b506117c18367ffffffffffffffff831660026118e3565b63ffffffff8167ffffffffffffffff16116118855761186e83601a611fe0600586901b16176118cb565b506117c18367ffffffffffffffff831660046118e3565b61189a83601b611fe0600586901b16176118cb565b506109828367ffffffffffffffff831660086118e3565b6118b9611b7c565b611225838460000151518485516118fc565b6118d3611b7c565b61122583846000015151846119e4565b6118eb611b7c565b6113b5848560000151518585611a2f565b611904611b7c565b825182111561191257600080fd5b8460200151828501111561193c5761193c856119348760200151878601611a8d565b600202611aa4565b60008086518051876020830101935080888701111561195b5787860182525b505050602084015b602084106119a057805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09093019260209182019101611963565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208690036101000a019081169019919091161790525083949350505050565b6119ec611b7c565b83602001518310611a0857611a08848560200151600202611aa4565b835180516020858301018481535080851415611a25576001810182525b5093949350505050565b611a37611b7c565b84602001518483011115611a5457611a5485858401600202611aa4565b60006001836101000a039050855183868201018583198251161781525080518487011115611a825783860181525b509495945050505050565b600081831115611a9e5750816111f8565b50919050565b8151611ab0838361154f565b5061098283826118b1565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282611af15760008555611b37565b82601f10611b0a57805160ff1916838001178555611b37565b82800160010185558215611b37579182015b82811115611b37578251825591602001919060010190611b1c565b50611b43929150611b96565b5090565b6040805160a081018252600080825260208201819052918101829052606081019190915260808101611b77611b7c565b905290565b604051806040016040528060608152602001600081525090565b5b80821115611b435760008155600101611b9756fe756e61626c6520746f207472616e73666572416e6443616c6c20746f206f7261636c65536f75726365206d75737420626520746865206f7261636c65206f66207468652072657175657374a164736f6c6343000706000a", +} + +var MultiWordConsumerABI = MultiWordConsumerMetaData.ABI + +var MultiWordConsumerBin = MultiWordConsumerMetaData.Bin + +func DeployMultiWordConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _oracle common.Address, _specId [32]byte) (common.Address, *types.Transaction, *MultiWordConsumer, error) { + parsed, err := MultiWordConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(MultiWordConsumerBin), backend, _link, _oracle, _specId) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MultiWordConsumer{address: address, abi: *parsed, MultiWordConsumerCaller: MultiWordConsumerCaller{contract: contract}, MultiWordConsumerTransactor: MultiWordConsumerTransactor{contract: contract}, MultiWordConsumerFilterer: MultiWordConsumerFilterer{contract: contract}}, nil +} + +type MultiWordConsumer struct { + address common.Address + abi abi.ABI + MultiWordConsumerCaller + MultiWordConsumerTransactor + MultiWordConsumerFilterer +} + +type MultiWordConsumerCaller struct { + contract *bind.BoundContract +} + +type MultiWordConsumerTransactor struct { + contract *bind.BoundContract +} + +type MultiWordConsumerFilterer struct { + contract *bind.BoundContract +} + +type MultiWordConsumerSession struct { + Contract *MultiWordConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type MultiWordConsumerCallerSession struct { + Contract *MultiWordConsumerCaller + CallOpts bind.CallOpts +} + +type MultiWordConsumerTransactorSession struct { + Contract *MultiWordConsumerTransactor + TransactOpts bind.TransactOpts +} + +type MultiWordConsumerRaw struct { + Contract *MultiWordConsumer +} + +type MultiWordConsumerCallerRaw struct { + Contract *MultiWordConsumerCaller +} + +type MultiWordConsumerTransactorRaw struct { + Contract *MultiWordConsumerTransactor +} + +func NewMultiWordConsumer(address common.Address, backend bind.ContractBackend) (*MultiWordConsumer, error) { + abi, err := abi.JSON(strings.NewReader(MultiWordConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindMultiWordConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MultiWordConsumer{address: address, abi: abi, MultiWordConsumerCaller: MultiWordConsumerCaller{contract: contract}, MultiWordConsumerTransactor: MultiWordConsumerTransactor{contract: contract}, MultiWordConsumerFilterer: MultiWordConsumerFilterer{contract: contract}}, nil +} + +func NewMultiWordConsumerCaller(address common.Address, caller bind.ContractCaller) (*MultiWordConsumerCaller, error) { + contract, err := bindMultiWordConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MultiWordConsumerCaller{contract: contract}, nil +} + +func NewMultiWordConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*MultiWordConsumerTransactor, error) { + contract, err := bindMultiWordConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MultiWordConsumerTransactor{contract: contract}, nil +} + +func NewMultiWordConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*MultiWordConsumerFilterer, error) { + contract, err := bindMultiWordConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MultiWordConsumerFilterer{contract: contract}, nil +} + +func bindMultiWordConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := MultiWordConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_MultiWordConsumer *MultiWordConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MultiWordConsumer.Contract.MultiWordConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_MultiWordConsumer *MultiWordConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.MultiWordConsumerTransactor.contract.Transfer(opts) +} + +func (_MultiWordConsumer *MultiWordConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.MultiWordConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MultiWordConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.contract.Transfer(opts) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) CurrentPrice(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "currentPrice") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) CurrentPrice() ([]byte, error) { + return _MultiWordConsumer.Contract.CurrentPrice(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) CurrentPrice() ([]byte, error) { + return _MultiWordConsumer.Contract.CurrentPrice(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) Eur(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "eur") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) Eur() ([32]byte, error) { + return _MultiWordConsumer.Contract.Eur(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) Eur() ([32]byte, error) { + return _MultiWordConsumer.Contract.Eur(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) EurInt(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "eurInt") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) EurInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.EurInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) EurInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.EurInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) Jpy(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "jpy") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) Jpy() ([32]byte, error) { + return _MultiWordConsumer.Contract.Jpy(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) Jpy() ([32]byte, error) { + return _MultiWordConsumer.Contract.Jpy(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) JpyInt(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "jpyInt") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) JpyInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.JpyInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) JpyInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.JpyInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) PublicGetNextRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "publicGetNextRequestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) PublicGetNextRequestCount() (*big.Int, error) { + return _MultiWordConsumer.Contract.PublicGetNextRequestCount(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) PublicGetNextRequestCount() (*big.Int, error) { + return _MultiWordConsumer.Contract.PublicGetNextRequestCount(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) Usd(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "usd") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) Usd() ([32]byte, error) { + return _MultiWordConsumer.Contract.Usd(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) Usd() ([32]byte, error) { + return _MultiWordConsumer.Contract.Usd(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCaller) UsdInt(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MultiWordConsumer.contract.Call(opts, &out, "usdInt") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MultiWordConsumer *MultiWordConsumerSession) UsdInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.UsdInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerCallerSession) UsdInt() (*big.Int, error) { + return _MultiWordConsumer.Contract.UsdInt(&_MultiWordConsumer.CallOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) AddExternalRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "addExternalRequest", _oracle, _requestId) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) AddExternalRequest(_oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.AddExternalRequest(&_MultiWordConsumer.TransactOpts, _oracle, _requestId) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) AddExternalRequest(_oracle common.Address, _requestId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.AddExternalRequest(&_MultiWordConsumer.TransactOpts, _oracle, _requestId) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) CancelRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "cancelRequest", _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) CancelRequest(_oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.CancelRequest(&_MultiWordConsumer.TransactOpts, _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) CancelRequest(_oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.CancelRequest(&_MultiWordConsumer.TransactOpts, _oracle, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) FulfillBytes(opts *bind.TransactOpts, _requestId [32]byte, _price []byte) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "fulfillBytes", _requestId, _price) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) FulfillBytes(_requestId [32]byte, _price []byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillBytes(&_MultiWordConsumer.TransactOpts, _requestId, _price) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) FulfillBytes(_requestId [32]byte, _price []byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillBytes(&_MultiWordConsumer.TransactOpts, _requestId, _price) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) FulfillMultipleParameters(opts *bind.TransactOpts, _requestId [32]byte, _usd [32]byte, _eur [32]byte, _jpy [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "fulfillMultipleParameters", _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) FulfillMultipleParameters(_requestId [32]byte, _usd [32]byte, _eur [32]byte, _jpy [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillMultipleParameters(&_MultiWordConsumer.TransactOpts, _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) FulfillMultipleParameters(_requestId [32]byte, _usd [32]byte, _eur [32]byte, _jpy [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillMultipleParameters(&_MultiWordConsumer.TransactOpts, _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) FulfillMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _requestId [32]byte, _usd *big.Int, _eur *big.Int, _jpy *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "fulfillMultipleParametersWithCustomURLs", _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) FulfillMultipleParametersWithCustomURLs(_requestId [32]byte, _usd *big.Int, _eur *big.Int, _jpy *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillMultipleParametersWithCustomURLs(&_MultiWordConsumer.TransactOpts, _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) FulfillMultipleParametersWithCustomURLs(_requestId [32]byte, _usd *big.Int, _eur *big.Int, _jpy *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.FulfillMultipleParametersWithCustomURLs(&_MultiWordConsumer.TransactOpts, _requestId, _usd, _eur, _jpy) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) RequestEthereumPrice(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "requestEthereumPrice", _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) RequestEthereumPrice(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestEthereumPrice(&_MultiWordConsumer.TransactOpts, _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) RequestEthereumPrice(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestEthereumPrice(&_MultiWordConsumer.TransactOpts, _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) RequestMultipleParameters(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "requestMultipleParameters", _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) RequestMultipleParameters(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestMultipleParameters(&_MultiWordConsumer.TransactOpts, _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) RequestMultipleParameters(_currency string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestMultipleParameters(&_MultiWordConsumer.TransactOpts, _currency, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) RequestMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _urlUSD string, _pathUSD string, _urlEUR string, _pathEUR string, _urlJPY string, _pathJPY string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "requestMultipleParametersWithCustomURLs", _urlUSD, _pathUSD, _urlEUR, _pathEUR, _urlJPY, _pathJPY, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) RequestMultipleParametersWithCustomURLs(_urlUSD string, _pathUSD string, _urlEUR string, _pathEUR string, _urlJPY string, _pathJPY string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestMultipleParametersWithCustomURLs(&_MultiWordConsumer.TransactOpts, _urlUSD, _pathUSD, _urlEUR, _pathEUR, _urlJPY, _pathJPY, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) RequestMultipleParametersWithCustomURLs(_urlUSD string, _pathUSD string, _urlEUR string, _pathEUR string, _urlJPY string, _pathJPY string, _payment *big.Int) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.RequestMultipleParametersWithCustomURLs(&_MultiWordConsumer.TransactOpts, _urlUSD, _pathUSD, _urlEUR, _pathEUR, _urlJPY, _pathJPY, _payment) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) SetSpecID(opts *bind.TransactOpts, _specId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "setSpecID", _specId) +} + +func (_MultiWordConsumer *MultiWordConsumerSession) SetSpecID(_specId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.SetSpecID(&_MultiWordConsumer.TransactOpts, _specId) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) SetSpecID(_specId [32]byte) (*types.Transaction, error) { + return _MultiWordConsumer.Contract.SetSpecID(&_MultiWordConsumer.TransactOpts, _specId) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactor) WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MultiWordConsumer.contract.Transact(opts, "withdrawLink") +} + +func (_MultiWordConsumer *MultiWordConsumerSession) WithdrawLink() (*types.Transaction, error) { + return _MultiWordConsumer.Contract.WithdrawLink(&_MultiWordConsumer.TransactOpts) +} + +func (_MultiWordConsumer *MultiWordConsumerTransactorSession) WithdrawLink() (*types.Transaction, error) { + return _MultiWordConsumer.Contract.WithdrawLink(&_MultiWordConsumer.TransactOpts) +} + +type MultiWordConsumerPluginCancelledIterator struct { + Event *MultiWordConsumerPluginCancelled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerPluginCancelledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerPluginCancelledIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerPluginCancelledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerPluginCancelled struct { + Id [32]byte + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginCancelledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerPluginCancelledIterator{contract: _MultiWordConsumer.contract, event: "PluginCancelled", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerPluginCancelled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParsePluginCancelled(log types.Log) (*MultiWordConsumerPluginCancelled, error) { + event := new(MultiWordConsumerPluginCancelled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MultiWordConsumerPluginFulfilledIterator struct { + Event *MultiWordConsumerPluginFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerPluginFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerPluginFulfilledIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerPluginFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerPluginFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerPluginFulfilledIterator{contract: _MultiWordConsumer.contract, event: "PluginFulfilled", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerPluginFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParsePluginFulfilled(log types.Log) (*MultiWordConsumerPluginFulfilled, error) { + event := new(MultiWordConsumerPluginFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MultiWordConsumerPluginRequestedIterator struct { + Event *MultiWordConsumerPluginRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerPluginRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerPluginRequestedIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerPluginRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerPluginRequested struct { + Id [32]byte + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerPluginRequestedIterator{contract: _MultiWordConsumer.contract, event: "PluginRequested", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginRequested, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerPluginRequested) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParsePluginRequested(log types.Log) (*MultiWordConsumerPluginRequested, error) { + event := new(MultiWordConsumerPluginRequested) + if err := _MultiWordConsumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MultiWordConsumerRequestFulfilledIterator struct { + Event *MultiWordConsumerRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerRequestFulfilled struct { + RequestId [32]byte + Price common.Hash + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, requestId [][32]byte, price [][]byte) (*MultiWordConsumerRequestFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "RequestFulfilled", requestIdRule, priceRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerRequestFulfilledIterator{contract: _MultiWordConsumer.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestFulfilled, requestId [][32]byte, price [][]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var priceRule []interface{} + for _, priceItem := range price { + priceRule = append(priceRule, priceItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "RequestFulfilled", requestIdRule, priceRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerRequestFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParseRequestFulfilled(log types.Log) (*MultiWordConsumerRequestFulfilled, error) { + event := new(MultiWordConsumerRequestFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MultiWordConsumerRequestMultipleFulfilledIterator struct { + Event *MultiWordConsumerRequestMultipleFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerRequestMultipleFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestMultipleFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestMultipleFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerRequestMultipleFulfilledIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerRequestMultipleFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerRequestMultipleFulfilled struct { + RequestId [32]byte + Usd [32]byte + Eur [32]byte + Jpy [32]byte + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterRequestMultipleFulfilled(opts *bind.FilterOpts, requestId [][32]byte, usd [][32]byte, eur [][32]byte) (*MultiWordConsumerRequestMultipleFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var usdRule []interface{} + for _, usdItem := range usd { + usdRule = append(usdRule, usdItem) + } + var eurRule []interface{} + for _, eurItem := range eur { + eurRule = append(eurRule, eurItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "RequestMultipleFulfilled", requestIdRule, usdRule, eurRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerRequestMultipleFulfilledIterator{contract: _MultiWordConsumer.contract, event: "RequestMultipleFulfilled", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchRequestMultipleFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestMultipleFulfilled, requestId [][32]byte, usd [][32]byte, eur [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var usdRule []interface{} + for _, usdItem := range usd { + usdRule = append(usdRule, usdItem) + } + var eurRule []interface{} + for _, eurItem := range eur { + eurRule = append(eurRule, eurItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "RequestMultipleFulfilled", requestIdRule, usdRule, eurRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerRequestMultipleFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestMultipleFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParseRequestMultipleFulfilled(log types.Log) (*MultiWordConsumerRequestMultipleFulfilled, error) { + event := new(MultiWordConsumerRequestMultipleFulfilled) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestMultipleFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator struct { + Event *MultiWordConsumerRequestMultipleFulfilledWithCustomURLs + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestMultipleFulfilledWithCustomURLs) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MultiWordConsumerRequestMultipleFulfilledWithCustomURLs) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator) Error() error { + return it.fail +} + +func (it *MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MultiWordConsumerRequestMultipleFulfilledWithCustomURLs struct { + RequestId [32]byte + Usd *big.Int + Eur *big.Int + Jpy *big.Int + Raw types.Log +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) FilterRequestMultipleFulfilledWithCustomURLs(opts *bind.FilterOpts, requestId [][32]byte, usd []*big.Int, eur []*big.Int) (*MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var usdRule []interface{} + for _, usdItem := range usd { + usdRule = append(usdRule, usdItem) + } + var eurRule []interface{} + for _, eurItem := range eur { + eurRule = append(eurRule, eurItem) + } + + logs, sub, err := _MultiWordConsumer.contract.FilterLogs(opts, "RequestMultipleFulfilledWithCustomURLs", requestIdRule, usdRule, eurRule) + if err != nil { + return nil, err + } + return &MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator{contract: _MultiWordConsumer.contract, event: "RequestMultipleFulfilledWithCustomURLs", logs: logs, sub: sub}, nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) WatchRequestMultipleFulfilledWithCustomURLs(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestMultipleFulfilledWithCustomURLs, requestId [][32]byte, usd []*big.Int, eur []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var usdRule []interface{} + for _, usdItem := range usd { + usdRule = append(usdRule, usdItem) + } + var eurRule []interface{} + for _, eurItem := range eur { + eurRule = append(eurRule, eurItem) + } + + logs, sub, err := _MultiWordConsumer.contract.WatchLogs(opts, "RequestMultipleFulfilledWithCustomURLs", requestIdRule, usdRule, eurRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MultiWordConsumerRequestMultipleFulfilledWithCustomURLs) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestMultipleFulfilledWithCustomURLs", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MultiWordConsumer *MultiWordConsumerFilterer) ParseRequestMultipleFulfilledWithCustomURLs(log types.Log) (*MultiWordConsumerRequestMultipleFulfilledWithCustomURLs, error) { + event := new(MultiWordConsumerRequestMultipleFulfilledWithCustomURLs) + if err := _MultiWordConsumer.contract.UnpackLog(event, "RequestMultipleFulfilledWithCustomURLs", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_MultiWordConsumer *MultiWordConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _MultiWordConsumer.abi.Events["PluginCancelled"].ID: + return _MultiWordConsumer.ParsePluginCancelled(log) + case _MultiWordConsumer.abi.Events["PluginFulfilled"].ID: + return _MultiWordConsumer.ParsePluginFulfilled(log) + case _MultiWordConsumer.abi.Events["PluginRequested"].ID: + return _MultiWordConsumer.ParsePluginRequested(log) + case _MultiWordConsumer.abi.Events["RequestFulfilled"].ID: + return _MultiWordConsumer.ParseRequestFulfilled(log) + case _MultiWordConsumer.abi.Events["RequestMultipleFulfilled"].ID: + return _MultiWordConsumer.ParseRequestMultipleFulfilled(log) + case _MultiWordConsumer.abi.Events["RequestMultipleFulfilledWithCustomURLs"].ID: + return _MultiWordConsumer.ParseRequestMultipleFulfilledWithCustomURLs(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (MultiWordConsumerPluginCancelled) Topic() common.Hash { + return common.HexToHash("0xe1fe3afa0f7f761ff0a8b89086790efd5140d2907ebd5b7ff6bfcb5e075fd4c5") +} + +func (MultiWordConsumerPluginFulfilled) Topic() common.Hash { + return common.HexToHash("0x7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a") +} + +func (MultiWordConsumerPluginRequested) Topic() common.Hash { + return common.HexToHash("0xb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af9") +} + +func (MultiWordConsumerRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x1a111c5dcf9a71088bd5e1797fdfaf399fec2afbb24aca247e4e3e9f4b61df91") +} + +func (MultiWordConsumerRequestMultipleFulfilled) Topic() common.Hash { + return common.HexToHash("0x0ec0c13e44aa04198947078cb990660252870dd3363f4c4bb3cc780f808dabbe") +} + +func (MultiWordConsumerRequestMultipleFulfilledWithCustomURLs) Topic() common.Hash { + return common.HexToHash("0x077e16d6f19163c0c96d84a7bff48b4ba41f3956f95d6fb0e584ec77297fe245") +} + +func (_MultiWordConsumer *MultiWordConsumer) Address() common.Address { + return _MultiWordConsumer.address +} + +type MultiWordConsumerInterface interface { + CurrentPrice(opts *bind.CallOpts) ([]byte, error) + + Eur(opts *bind.CallOpts) ([32]byte, error) + + EurInt(opts *bind.CallOpts) (*big.Int, error) + + Jpy(opts *bind.CallOpts) ([32]byte, error) + + JpyInt(opts *bind.CallOpts) (*big.Int, error) + + PublicGetNextRequestCount(opts *bind.CallOpts) (*big.Int, error) + + Usd(opts *bind.CallOpts) ([32]byte, error) + + UsdInt(opts *bind.CallOpts) (*big.Int, error) + + AddExternalRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte) (*types.Transaction, error) + + CancelRequest(opts *bind.TransactOpts, _oracle common.Address, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) + + FulfillBytes(opts *bind.TransactOpts, _requestId [32]byte, _price []byte) (*types.Transaction, error) + + FulfillMultipleParameters(opts *bind.TransactOpts, _requestId [32]byte, _usd [32]byte, _eur [32]byte, _jpy [32]byte) (*types.Transaction, error) + + FulfillMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _requestId [32]byte, _usd *big.Int, _eur *big.Int, _jpy *big.Int) (*types.Transaction, error) + + RequestEthereumPrice(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) + + RequestMultipleParameters(opts *bind.TransactOpts, _currency string, _payment *big.Int) (*types.Transaction, error) + + RequestMultipleParametersWithCustomURLs(opts *bind.TransactOpts, _urlUSD string, _pathUSD string, _urlEUR string, _pathEUR string, _urlJPY string, _pathJPY string, _payment *big.Int) (*types.Transaction, error) + + SetSpecID(opts *bind.TransactOpts, _specId [32]byte) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginCancelledIterator, error) + + WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) + + ParsePluginCancelled(log types.Log) (*MultiWordConsumerPluginCancelled, error) + + FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginFulfilledIterator, error) + + WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) + + ParsePluginFulfilled(log types.Log) (*MultiWordConsumerPluginFulfilled, error) + + FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*MultiWordConsumerPluginRequestedIterator, error) + + WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerPluginRequested, id [][32]byte) (event.Subscription, error) + + ParsePluginRequested(log types.Log) (*MultiWordConsumerPluginRequested, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, requestId [][32]byte, price [][]byte) (*MultiWordConsumerRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestFulfilled, requestId [][32]byte, price [][]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*MultiWordConsumerRequestFulfilled, error) + + FilterRequestMultipleFulfilled(opts *bind.FilterOpts, requestId [][32]byte, usd [][32]byte, eur [][32]byte) (*MultiWordConsumerRequestMultipleFulfilledIterator, error) + + WatchRequestMultipleFulfilled(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestMultipleFulfilled, requestId [][32]byte, usd [][32]byte, eur [][32]byte) (event.Subscription, error) + + ParseRequestMultipleFulfilled(log types.Log) (*MultiWordConsumerRequestMultipleFulfilled, error) + + FilterRequestMultipleFulfilledWithCustomURLs(opts *bind.FilterOpts, requestId [][32]byte, usd []*big.Int, eur []*big.Int) (*MultiWordConsumerRequestMultipleFulfilledWithCustomURLsIterator, error) + + WatchRequestMultipleFulfilledWithCustomURLs(opts *bind.WatchOpts, sink chan<- *MultiWordConsumerRequestMultipleFulfilledWithCustomURLs, requestId [][32]byte, usd []*big.Int, eur []*big.Int) (event.Subscription, error) + + ParseRequestMultipleFulfilledWithCustomURLs(log types.Log) (*MultiWordConsumerRequestMultipleFulfilledWithCustomURLs, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/ocr2dr/ocr2dr.go b/core/gethwrappers/generated/ocr2dr/ocr2dr.go new file mode 100644 index 00000000..8cc65942 --- /dev/null +++ b/core/gethwrappers/generated/ocr2dr/ocr2dr.go @@ -0,0 +1,178 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OCR2DRMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyUrl\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"}]", + Bin: "0x602d6037600b82828239805160001a607314602a57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600080fdfea164736f6c6343000806000a", +} + +var OCR2DRABI = OCR2DRMetaData.ABI + +var OCR2DRBin = OCR2DRMetaData.Bin + +func DeployOCR2DR(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OCR2DR, error) { + parsed, err := OCR2DRMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DR{OCR2DRCaller: OCR2DRCaller{contract: contract}, OCR2DRTransactor: OCR2DRTransactor{contract: contract}, OCR2DRFilterer: OCR2DRFilterer{contract: contract}}, nil +} + +type OCR2DR struct { + address common.Address + abi abi.ABI + OCR2DRCaller + OCR2DRTransactor + OCR2DRFilterer +} + +type OCR2DRCaller struct { + contract *bind.BoundContract +} + +type OCR2DRTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRSession struct { + Contract *OCR2DR + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRCallerSession struct { + Contract *OCR2DRCaller + CallOpts bind.CallOpts +} + +type OCR2DRTransactorSession struct { + Contract *OCR2DRTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRRaw struct { + Contract *OCR2DR +} + +type OCR2DRCallerRaw struct { + Contract *OCR2DRCaller +} + +type OCR2DRTransactorRaw struct { + Contract *OCR2DRTransactor +} + +func NewOCR2DR(address common.Address, backend bind.ContractBackend) (*OCR2DR, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DR(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DR{address: address, abi: abi, OCR2DRCaller: OCR2DRCaller{contract: contract}, OCR2DRTransactor: OCR2DRTransactor{contract: contract}, OCR2DRFilterer: OCR2DRFilterer{contract: contract}}, nil +} + +func NewOCR2DRCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRCaller, error) { + contract, err := bindOCR2DR(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRCaller{contract: contract}, nil +} + +func NewOCR2DRTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRTransactor, error) { + contract, err := bindOCR2DR(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRTransactor{contract: contract}, nil +} + +func NewOCR2DRFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRFilterer, error) { + contract, err := bindOCR2DR(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRFilterer{contract: contract}, nil +} + +func bindOCR2DR(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DR *OCR2DRRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DR.Contract.OCR2DRCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DR *OCR2DRRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DR.Contract.OCR2DRTransactor.contract.Transfer(opts) +} + +func (_OCR2DR *OCR2DRRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DR.Contract.OCR2DRTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DR *OCR2DRCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DR.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DR *OCR2DRTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DR.Contract.contract.Transfer(opts) +} + +func (_OCR2DR *OCR2DRTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DR.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DR *OCR2DR) Address() common.Address { + return _OCR2DR.address +} + +type OCR2DRInterface interface { + Address() common.Address +} diff --git a/core/gethwrappers/generated/ocr2dr_client/ocr2dr_client.go b/core/gethwrappers/generated/ocr2dr_client/ocr2dr_client.go new file mode 100644 index 00000000..9b6065f0 --- /dev/null +++ b/core/gethwrappers/generated/ocr2dr_client/ocr2dr_client.go @@ -0,0 +1,520 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_client + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsRequest struct { + CodeLocation uint8 + SecretsLocation uint8 + Language uint8 + Source string + Secrets []byte + Args []string +} + +var OCR2DRClientMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsAlreadyPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsNotPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderIsNotRegistry\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"internalType\":\"enumFunctions.Location\",\"name\":\"codeLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.Location\",\"name\":\"secretsLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.CodeLanguage\",\"name\":\"language\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"}],\"internalType\":\"structFunctions.Request\",\"name\":\"req\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var OCR2DRClientABI = OCR2DRClientMetaData.ABI + +type OCR2DRClient struct { + address common.Address + abi abi.ABI + OCR2DRClientCaller + OCR2DRClientTransactor + OCR2DRClientFilterer +} + +type OCR2DRClientCaller struct { + contract *bind.BoundContract +} + +type OCR2DRClientTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRClientFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRClientSession struct { + Contract *OCR2DRClient + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRClientCallerSession struct { + Contract *OCR2DRClientCaller + CallOpts bind.CallOpts +} + +type OCR2DRClientTransactorSession struct { + Contract *OCR2DRClientTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRClientRaw struct { + Contract *OCR2DRClient +} + +type OCR2DRClientCallerRaw struct { + Contract *OCR2DRClientCaller +} + +type OCR2DRClientTransactorRaw struct { + Contract *OCR2DRClientTransactor +} + +func NewOCR2DRClient(address common.Address, backend bind.ContractBackend) (*OCR2DRClient, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRClientABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRClient(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRClient{address: address, abi: abi, OCR2DRClientCaller: OCR2DRClientCaller{contract: contract}, OCR2DRClientTransactor: OCR2DRClientTransactor{contract: contract}, OCR2DRClientFilterer: OCR2DRClientFilterer{contract: contract}}, nil +} + +func NewOCR2DRClientCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRClientCaller, error) { + contract, err := bindOCR2DRClient(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientCaller{contract: contract}, nil +} + +func NewOCR2DRClientTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRClientTransactor, error) { + contract, err := bindOCR2DRClient(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientTransactor{contract: contract}, nil +} + +func NewOCR2DRClientFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRClientFilterer, error) { + contract, err := bindOCR2DRClient(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRClientFilterer{contract: contract}, nil +} + +func bindOCR2DRClient(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRClientMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRClient *OCR2DRClientRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClient.Contract.OCR2DRClientCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClient.Contract.OCR2DRClientTransactor.contract.Transfer(opts) +} + +func (_OCR2DRClient *OCR2DRClientRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClient.Contract.OCR2DRClientTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClient.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClient.Contract.contract.Transfer(opts) +} + +func (_OCR2DRClient *OCR2DRClientTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClient.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClient *OCR2DRClientCaller) EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRClient.contract.Call(opts, &out, "estimateCost", req, subscriptionId, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRClient *OCR2DRClientSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClient.Contract.EstimateCost(&_OCR2DRClient.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClient *OCR2DRClientCallerSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClient.Contract.EstimateCost(&_OCR2DRClient.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClient *OCR2DRClientCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DRClient.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DRClient *OCR2DRClientSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClient.Contract.GetDONPublicKey(&_OCR2DRClient.CallOpts) +} + +func (_OCR2DRClient *OCR2DRClientCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClient.Contract.GetDONPublicKey(&_OCR2DRClient.CallOpts) +} + +func (_OCR2DRClient *OCR2DRClientTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_OCR2DRClient *OCR2DRClientSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.Contract.HandleOracleFulfillment(&_OCR2DRClient.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClient *OCR2DRClientTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClient.Contract.HandleOracleFulfillment(&_OCR2DRClient.TransactOpts, requestId, response, err) +} + +type OCR2DRClientRequestFulfilledIterator struct { + Event *OCR2DRClientRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClient *OCR2DRClientFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientRequestFulfilledIterator{contract: _OCR2DRClient.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientRequestFulfilled) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) ParseRequestFulfilled(log types.Log) (*OCR2DRClientRequestFulfilled, error) { + event := new(OCR2DRClientRequestFulfilled) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientRequestSentIterator struct { + Event *OCR2DRClientRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientRequestSentIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClient *OCR2DRClientFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientRequestSentIterator{contract: _OCR2DRClient.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClient.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientRequestSent) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClient *OCR2DRClientFilterer) ParseRequestSent(log types.Log) (*OCR2DRClientRequestSent, error) { + event := new(OCR2DRClientRequestSent) + if err := _OCR2DRClient.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OCR2DRClient *OCR2DRClient) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRClient.abi.Events["RequestFulfilled"].ID: + return _OCR2DRClient.ParseRequestFulfilled(log) + case _OCR2DRClient.abi.Events["RequestSent"].ID: + return _OCR2DRClient.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRClientRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (OCR2DRClientRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_OCR2DRClient *OCR2DRClient) Address() common.Address { + return _OCR2DRClient.address +} + +type OCR2DRClientInterface interface { + EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*OCR2DRClientRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*OCR2DRClientRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/ocr2dr_client_example/ocr2dr_client_example.go b/core/gethwrappers/generated/ocr2dr_client_example/ocr2dr_client_example.go new file mode 100644 index 00000000..de519b3f --- /dev/null +++ b/core/gethwrappers/generated/ocr2dr_client_example/ocr2dr_client_example.go @@ -0,0 +1,1045 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_client_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsRequest struct { + CodeLocation uint8 + SecretsLocation uint8 + Language uint8 + Source string + Secrets []byte + Args []string +} + +var OCR2DRClientExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptyArgs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySource\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoInlineSecrets\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsAlreadyPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RequestIsNotPending\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderIsNotRegistry\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"UnexpectedRequestID\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CALLBACK_GAS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"SendRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"enumFunctions.Location\",\"name\":\"codeLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.Location\",\"name\":\"secretsLocation\",\"type\":\"uint8\"},{\"internalType\":\"enumFunctions.CodeLanguage\",\"name\":\"language\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"source\",\"type\":\"string\"},{\"internalType\":\"bytes\",\"name\":\"secrets\",\"type\":\"bytes\"},{\"internalType\":\"string[]\",\"name\":\"args\",\"type\":\"string[]\"}],\"internalType\":\"structFunctions.Request\",\"name\":\"req\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"}],\"name\":\"handleOracleFulfillment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastError\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastErrorLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastResponse\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastResponseLength\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001f0338038062001f03833981016040819052620000349162000199565b600080546001600160a01b0319166001600160a01b038316178155339081906001600160a01b038216620000af5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600280546001600160a01b0319166001600160a01b0384811691909117909155811615620000e257620000e281620000ec565b50505050620001cb565b6001600160a01b038116331415620001475760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000a6565b600380546001600160a01b0319166001600160a01b03838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600060208284031215620001ac57600080fd5b81516001600160a01b0381168114620001c457600080fd5b9392505050565b611d2880620001db6000396000f3fe608060405234801561001057600080fd5b50600436106100df5760003560e01c80638da5cb5b1161008c578063d4b3917511610066578063d4b39175146101aa578063d769717e146101da578063f2fde38b146101ed578063fc2a88c31461020057600080fd5b80638da5cb5b1461015d578063b48cffea14610185578063d328a91e1461019557600080fd5b806362747e42116100bd57806362747e42146101425780636d9809a01461014b57806379ba50971461015557600080fd5b80630ca76175146100e457806329f0de3f146100f95780632c29166b14610115575b600080fd5b6100f76100f2366004611577565b610209565b005b61010260065481565b6040519081526020015b60405180910390f35b60075461012d90640100000000900463ffffffff1681565b60405163ffffffff909116815260200161010c565b61010260055481565b61012d6201117081565b6100f76102d4565b60025460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010c565b60075461012d9063ffffffff1681565b61019d6103da565b60405161010c91906118b7565b6101bd6101b836600461172d565b6104a3565b6040516bffffffffffffffffffffffff909116815260200161010c565b6100f76101e8366004611652565b610546565b6100f76101fb366004611524565b610647565b61010260045481565b600083815260016020526040902054839073ffffffffffffffffffffffffffffffffffffffff163314610268576040517fa0c5ec6300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526001602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555182917f85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e691a26102ce84848461065b565b50505050565b60035473ffffffffffffffffffffffffffffffffffffffff16331461035a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560038054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b60008054604080517fd328a91e000000000000000000000000000000000000000000000000000000008152905160609373ffffffffffffffffffffffffffffffffffffffff9093169263d328a91e9260048082019391829003018186803b15801561044457600080fd5b505afa158015610458573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261049e91908101906115e4565b905090565b6000805473ffffffffffffffffffffffffffffffffffffffff1663d227d245856104cc88610729565b86866040518563ffffffff1660e01b81526004016104ed9493929190611903565b60206040518083038186803b15801561050557600080fd5b505afa158015610519573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061053d919061183f565b95945050505050565b61054e6109f5565b6105886040805160c08101909152806000815260200160008152602001600081526020016060815260200160608152602001606081525090565b6105ca88888080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610a789050565b84156106125761061286868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508593925050610a899050565b821561062c5761062c6106258486611b90565b8290610ad0565b61063a818362011170610b10565b6004555050505050505050565b61064f6109f5565b61065881610cce565b50565b8260045414610699576040517fd068bf5b00000000000000000000000000000000000000000000000000000000815260048101849052602401610351565b6106a282610dc5565b6005558151600780547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff9092169190911790556106e481610dc5565b600655516007805463ffffffff909216640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff9092169190911790555050565b6060610733611355565b805161074190610100610e4d565b50610781816040518060400160405280600c81526020017f636f64654c6f636174696f6e0000000000000000000000000000000000000000815250610ec7565b6107a0818460000151600281111561079b5761079b611c6c565b610ee5565b6107df816040518060400160405280600881526020017f6c616e6775616765000000000000000000000000000000000000000000000000815250610ec7565b6107f9818460400151600081111561079b5761079b611c6c565b610838816040518060400160405280600681526020017f736f757263650000000000000000000000000000000000000000000000000000815250610ec7565b610846818460600151610ec7565b60a083015151156108ec57610890816040518060400160405280600481526020017f6172677300000000000000000000000000000000000000000000000000000000815250610ec7565b61089981610f1e565b60005b8360a00151518110156108e2576108d0828560a0015183815181106108c3576108c3611c9b565b6020026020010151610ec7565b806108da81611bc9565b91505061089c565b506108ec81610f42565b608083015151156109ed5760008360200151600281111561090f5761090f611c6c565b1415610947576040517fa80d31f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610986816040518060400160405280600f81526020017f736563726574734c6f636174696f6e0000000000000000000000000000000000815250610ec7565b6109a0818460200151600281111561079b5761079b611c6c565b6109df816040518060400160405280600781526020017f7365637265747300000000000000000000000000000000000000000000000000815250610ec7565b6109ed818460800151610f60565b515192915050565b60025473ffffffffffffffffffffffffffffffffffffffff163314610a76576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610351565b565b610a858260008084610f6d565b5050565b8051610ac1576040517fe889636f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60016020830152608090910152565b8051610b08576040517ffe936cb700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60a090910152565b60008054819073ffffffffffffffffffffffffffffffffffffffff166328242b0485610b3b88610729565b866040518463ffffffff1660e01b8152600401610b5a939291906118ca565b602060405180830381600087803b158015610b7457600080fd5b505af1158015610b88573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610bac919061155e565b905060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16635ab1bd536040518163ffffffff1660e01b815260040160206040518083038186803b158015610c1457600080fd5b505afa158015610c28573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c4c9190611541565b60008281526001602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9490941693909317909255905182917f1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db891a2949350505050565b73ffffffffffffffffffffffffffffffffffffffff8116331415610d4e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610351565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600080600060209050602084511015610ddc575082515b60005b81811015610e4457610df2816008611b3c565b858281518110610e0457610e04611c9b565b01602001517fff0000000000000000000000000000000000000000000000000000000000000016901c929092179180610e3c81611bc9565b915050610ddf565b50909392505050565b604080518082019091526060815260006020820152610e6d602083611c02565b15610e9557610e7d602083611c02565b610e88906020611b79565b610e9290836119fd565b91505b602080840183905260405180855260008152908184010181811015610eb957600080fd5b604052508290505b92915050565b610ed48260038351611001565b8151610ee09082611122565b505050565b8151610ef29060c261114a565b50610a858282604051602001610f0a91815260200190565b604051602081830303815290604052610f60565b610f298160046111b3565b600181602001818151610f3c91906119fd565b90525050565b610f4d8160076111b3565b600181602001818151610f3c9190611b79565b610ed48260028351611001565b8051610fa5576040517f22ce3edd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83836002811115610fb857610fb8611c6c565b90816002811115610fcb57610fcb611c6c565b90525060408401828015610fe157610fe1611c6c565b90818015610ff157610ff1611c6c565b9052506060909301929092525050565b60178167ffffffffffffffff16116110285782516102ce9060e0600585901b16831761114a565b60ff8167ffffffffffffffff161161106a578251611051906018611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660016111ca565b61ffff8167ffffffffffffffff16116110ad578251611094906019611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660026111ca565b63ffffffff8167ffffffffffffffff16116110f25782516110d990601a611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660046111ca565b825161110990601b611fe0600586901b161761114a565b5082516102ce9067ffffffffffffffff831660086111ca565b6040805180820190915260608152600060208201526111438383845161124f565b9392505050565b604080518082019091526060815260006020820152825151600061116f8260016119fd565b905084602001518210611190576111908561118b836002611b3c565b61133e565b84516020838201018581535080518211156111a9578181525b5093949350505050565b8151610ee090601f611fe0600585901b161761114a565b60408051808201909152606081526000602082015283515160006111ee82856119fd565b9050856020015181111561120b5761120b8661118b836002611b3c565b6000600161121b86610100611a76565b6112259190611b79565b90508651828101878319825116178152508051831115611243578281525b50959695505050505050565b604080518082019091526060815260006020820152825182111561127257600080fd5b835151600061128184836119fd565b9050856020015181111561129e5761129e8661118b836002611b3c565b8551805183820160200191600091808511156112b8578482525b505050602086015b602086106112f857805182526112d76020836119fd565b91506112e46020826119fd565b90506112f1602087611b79565b95506112c0565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208890036101000a0190811690199190911617905250849150509392505050565b815161134a8383610e4d565b506102ce8382611122565b604051806040016040528061137d604051806040016040528060608152602001600081525090565b8152602001600081525090565b600067ffffffffffffffff808411156113a5576113a5611cca565b8360051b60206113b6818301611968565b8681529350808401858381018910156113ce57600080fd5b60009350835b88811015611409578135868111156113ea578586fd5b6113f68b828b0161147f565b84525091830191908301906001016113d4565b5050505050509392505050565b600082601f83011261142757600080fd5b6111438383356020850161138a565b60008083601f84011261144857600080fd5b50813567ffffffffffffffff81111561146057600080fd5b60208301915083602082850101111561147857600080fd5b9250929050565b600082601f83011261149057600080fd5b81356114a361149e826119b7565b611968565b8181528460208386010111156114b857600080fd5b816020850160208301376000918101602001919091529392505050565b8035600181106114e457600080fd5b919050565b8035600381106114e457600080fd5b803563ffffffff811681146114e457600080fd5b803567ffffffffffffffff811681146114e457600080fd5b60006020828403121561153657600080fd5b813561114381611cf9565b60006020828403121561155357600080fd5b815161114381611cf9565b60006020828403121561157057600080fd5b5051919050565b60008060006060848603121561158c57600080fd5b83359250602084013567ffffffffffffffff808211156115ab57600080fd5b6115b78783880161147f565b935060408601359150808211156115cd57600080fd5b506115da8682870161147f565b9150509250925092565b6000602082840312156115f657600080fd5b815167ffffffffffffffff81111561160d57600080fd5b8201601f8101841361161e57600080fd5b805161162c61149e826119b7565b81815285602083850101111561164157600080fd5b61053d826020830160208601611b9d565b60008060008060008060006080888a03121561166d57600080fd5b873567ffffffffffffffff8082111561168557600080fd5b6116918b838c01611436565b909950975060208a01359150808211156116aa57600080fd5b6116b68b838c01611436565b909750955060408a01359150808211156116cf57600080fd5b818a0191508a601f8301126116e357600080fd5b8135818111156116f257600080fd5b8b60208260051b850101111561170757600080fd5b60208301955080945050505061171f6060890161150c565b905092959891949750929550565b6000806000806080858703121561174357600080fd5b843567ffffffffffffffff8082111561175b57600080fd5b9086019060c0828903121561176f57600080fd5b61177761193f565b611780836114e9565b815261178e602084016114e9565b602082015261179f604084016114d5565b60408201526060830135828111156117b657600080fd5b6117c28a82860161147f565b6060830152506080830135828111156117da57600080fd5b6117e68a82860161147f565b60808301525060a0830135828111156117fe57600080fd5b61180a8a828601611416565b60a08301525095506118219150506020860161150c565b925061182f604086016114f8565b9396929550929360600135925050565b60006020828403121561185157600080fd5b81516bffffffffffffffffffffffff8116811461114357600080fd5b60008151808452611885816020860160208601611b9d565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611143602083018461186d565b67ffffffffffffffff841681526060602082015260006118ed606083018561186d565b905063ffffffff83166040830152949350505050565b67ffffffffffffffff85168152608060208201526000611926608083018661186d565b63ffffffff949094166040830152506060015292915050565b60405160c0810167ffffffffffffffff8111828210171561196257611962611cca565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156119af576119af611cca565b604052919050565b600067ffffffffffffffff8211156119d1576119d1611cca565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b60008219821115611a1057611a10611c3d565b500190565b600181815b80851115611a6e57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611a5457611a54611c3d565b80851615611a6157918102915b93841c9390800290611a1a565b509250929050565b60006111438383600082611a8c57506001610ec1565b81611a9957506000610ec1565b8160018114611aaf5760028114611ab957611ad5565b6001915050610ec1565b60ff841115611aca57611aca611c3d565b50506001821b610ec1565b5060208310610133831016604e8410600b8410161715611af8575081810a610ec1565b611b028383611a15565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611b3457611b34611c3d565b029392505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611b7457611b74611c3d565b500290565b600082821015611b8b57611b8b611c3d565b500390565b600061114336848461138a565b60005b83811015611bb8578181015183820152602001611ba0565b838111156102ce5750506000910152565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611bfb57611bfb611c3d565b5060010190565b600082611c38577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff8116811461065857600080fdfea164736f6c6343000806000a", +} + +var OCR2DRClientExampleABI = OCR2DRClientExampleMetaData.ABI + +var OCR2DRClientExampleBin = OCR2DRClientExampleMetaData.Bin + +func DeployOCR2DRClientExample(auth *bind.TransactOpts, backend bind.ContractBackend, oracle common.Address) (common.Address, *types.Transaction, *OCR2DRClientExample, error) { + parsed, err := OCR2DRClientExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRClientExampleBin), backend, oracle) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DRClientExample{OCR2DRClientExampleCaller: OCR2DRClientExampleCaller{contract: contract}, OCR2DRClientExampleTransactor: OCR2DRClientExampleTransactor{contract: contract}, OCR2DRClientExampleFilterer: OCR2DRClientExampleFilterer{contract: contract}}, nil +} + +type OCR2DRClientExample struct { + address common.Address + abi abi.ABI + OCR2DRClientExampleCaller + OCR2DRClientExampleTransactor + OCR2DRClientExampleFilterer +} + +type OCR2DRClientExampleCaller struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRClientExampleSession struct { + Contract *OCR2DRClientExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRClientExampleCallerSession struct { + Contract *OCR2DRClientExampleCaller + CallOpts bind.CallOpts +} + +type OCR2DRClientExampleTransactorSession struct { + Contract *OCR2DRClientExampleTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRClientExampleRaw struct { + Contract *OCR2DRClientExample +} + +type OCR2DRClientExampleCallerRaw struct { + Contract *OCR2DRClientExampleCaller +} + +type OCR2DRClientExampleTransactorRaw struct { + Contract *OCR2DRClientExampleTransactor +} + +func NewOCR2DRClientExample(address common.Address, backend bind.ContractBackend) (*OCR2DRClientExample, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRClientExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRClientExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRClientExample{address: address, abi: abi, OCR2DRClientExampleCaller: OCR2DRClientExampleCaller{contract: contract}, OCR2DRClientExampleTransactor: OCR2DRClientExampleTransactor{contract: contract}, OCR2DRClientExampleFilterer: OCR2DRClientExampleFilterer{contract: contract}}, nil +} + +func NewOCR2DRClientExampleCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRClientExampleCaller, error) { + contract, err := bindOCR2DRClientExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleCaller{contract: contract}, nil +} + +func NewOCR2DRClientExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRClientExampleTransactor, error) { + contract, err := bindOCR2DRClientExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleTransactor{contract: contract}, nil +} + +func NewOCR2DRClientExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRClientExampleFilterer, error) { + contract, err := bindOCR2DRClientExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleFilterer{contract: contract}, nil +} + +func bindOCR2DRClientExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRClientExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleTransactor.contract.Transfer(opts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.OCR2DRClientExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRClientExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.contract.Transfer(opts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "MAX_CALLBACK_GAS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) MAXCALLBACKGAS() (uint32, error) { + return _OCR2DRClientExample.Contract.MAXCALLBACKGAS(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) MAXCALLBACKGAS() (uint32, error) { + return _OCR2DRClientExample.Contract.MAXCALLBACKGAS(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "estimateCost", req, subscriptionId, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClientExample.Contract.EstimateCost(&_OCR2DRClientExample.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) EstimateCost(req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DRClientExample.Contract.EstimateCost(&_OCR2DRClientExample.CallOpts, req, subscriptionId, gasLimit, gasPrice) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClientExample.Contract.GetDONPublicKey(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DRClientExample.Contract.GetDONPublicKey(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastError(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastError") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastError() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastError(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastError() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastError(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastErrorLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastErrorLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastErrorLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastErrorLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastErrorLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastErrorLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastRequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastRequestId() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastRequestId(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastRequestId() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastRequestId(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastResponse(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastResponse") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastResponse() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastResponse(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastResponse() ([32]byte, error) { + return _OCR2DRClientExample.Contract.LastResponse(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) LastResponseLength(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "lastResponseLength") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) LastResponseLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastResponseLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) LastResponseLength() (uint32, error) { + return _OCR2DRClientExample.Contract.LastResponseLength(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DRClientExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) Owner() (common.Address, error) { + return _OCR2DRClientExample.Contract.Owner(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleCallerSession) Owner() (common.Address, error) { + return _OCR2DRClientExample.Contract.Owner(&_OCR2DRClientExample.CallOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) SendRequest(opts *bind.TransactOpts, source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "SendRequest", source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) SendRequest(source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.SendRequest(&_OCR2DRClientExample.TransactOpts, source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) SendRequest(source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.SendRequest(&_OCR2DRClientExample.TransactOpts, source, secrets, args, subscriptionId) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.AcceptOwnership(&_OCR2DRClientExample.TransactOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.AcceptOwnership(&_OCR2DRClientExample.TransactOpts) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "handleOracleFulfillment", requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.HandleOracleFulfillment(&_OCR2DRClientExample.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) HandleOracleFulfillment(requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.HandleOracleFulfillment(&_OCR2DRClientExample.TransactOpts, requestId, response, err) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.TransferOwnership(&_OCR2DRClientExample.TransactOpts, to) +} + +func (_OCR2DRClientExample *OCR2DRClientExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRClientExample.Contract.TransferOwnership(&_OCR2DRClientExample.TransactOpts, to) +} + +type OCR2DRClientExampleOwnershipTransferRequestedIterator struct { + Event *OCR2DRClientExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleOwnershipTransferRequestedIterator{contract: _OCR2DRClientExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleOwnershipTransferRequested) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DRClientExampleOwnershipTransferRequested, error) { + event := new(OCR2DRClientExampleOwnershipTransferRequested) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleOwnershipTransferredIterator struct { + Event *OCR2DRClientExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleOwnershipTransferredIterator{contract: _OCR2DRClientExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleOwnershipTransferred) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DRClientExampleOwnershipTransferred, error) { + event := new(OCR2DRClientExampleOwnershipTransferred) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleRequestFulfilledIterator struct { + Event *OCR2DRClientExampleRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleRequestFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleRequestFulfilledIterator{contract: _OCR2DRClientExample.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "RequestFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleRequestFulfilled) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseRequestFulfilled(log types.Log) (*OCR2DRClientExampleRequestFulfilled, error) { + event := new(OCR2DRClientExampleRequestFulfilled) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRClientExampleRequestSentIterator struct { + Event *OCR2DRClientExampleRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRClientExampleRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRClientExampleRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRClientExampleRequestSentIterator) Error() error { + return it.fail +} + +func (it *OCR2DRClientExampleRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRClientExampleRequestSent struct { + Id [32]byte + Raw types.Log +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestSentIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.FilterLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return &OCR2DRClientExampleRequestSentIterator{contract: _OCR2DRClientExample.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestSent, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _OCR2DRClientExample.contract.WatchLogs(opts, "RequestSent", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRClientExampleRequestSent) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRClientExample *OCR2DRClientExampleFilterer) ParseRequestSent(log types.Log) (*OCR2DRClientExampleRequestSent, error) { + event := new(OCR2DRClientExampleRequestSent) + if err := _OCR2DRClientExample.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OCR2DRClientExample *OCR2DRClientExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRClientExample.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DRClientExample.ParseOwnershipTransferRequested(log) + case _OCR2DRClientExample.abi.Events["OwnershipTransferred"].ID: + return _OCR2DRClientExample.ParseOwnershipTransferred(log) + case _OCR2DRClientExample.abi.Events["RequestFulfilled"].ID: + return _OCR2DRClientExample.ParseRequestFulfilled(log) + case _OCR2DRClientExample.abi.Events["RequestSent"].ID: + return _OCR2DRClientExample.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRClientExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DRClientExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DRClientExampleRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x85e1543bf2f84fe80c6badbce3648c8539ad1df4d2b3d822938ca0538be727e6") +} + +func (OCR2DRClientExampleRequestSent) Topic() common.Hash { + return common.HexToHash("0x1131472297a800fee664d1d89cfa8f7676ff07189ecc53f80bbb5f4969099db8") +} + +func (_OCR2DRClientExample *OCR2DRClientExample) Address() common.Address { + return _OCR2DRClientExample.address +} + +type OCR2DRClientExampleInterface interface { + MAXCALLBACKGAS(opts *bind.CallOpts) (uint32, error) + + EstimateCost(opts *bind.CallOpts, req FunctionsRequest, subscriptionId uint64, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + LastError(opts *bind.CallOpts) ([32]byte, error) + + LastErrorLength(opts *bind.CallOpts) (uint32, error) + + LastRequestId(opts *bind.CallOpts) ([32]byte, error) + + LastResponse(opts *bind.CallOpts) ([32]byte, error) + + LastResponseLength(opts *bind.CallOpts) (uint32, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SendRequest(opts *bind.TransactOpts, source string, secrets []byte, args []string, subscriptionId uint64) (*types.Transaction, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + HandleOracleFulfillment(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DRClientExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRClientExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DRClientExampleOwnershipTransferred, error) + + FilterRequestFulfilled(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestFulfilled, id [][32]byte) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*OCR2DRClientExampleRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts, id [][32]byte) (*OCR2DRClientExampleRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *OCR2DRClientExampleRequestSent, id [][32]byte) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*OCR2DRClientExampleRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/ocr2dr_oracle/ocr2dr_oracle.go b/core/gethwrappers/generated/ocr2dr_oracle/ocr2dr_oracle.go new file mode 100644 index 00000000..30dee840 --- /dev/null +++ b/core/gethwrappers/generated/ocr2dr_oracle/ocr2dr_oracle.go @@ -0,0 +1,2699 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_oracle + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type IFunctionsBillingRegistryRequestBilling struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int +} + +var OCR2DROracleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AlreadySet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotSelfTransfer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyBillingRegistry\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyPublicKey\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyRequestData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySendersList\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InconsistentReportData\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAllowedToSetSenders\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotProposedOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OwnerMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReportInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedPublicKeyChange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersActive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersDeactive\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"InvalidRequestID\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestingContract\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requestInitiator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"subscriptionOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"ResponseTransmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"UserCallbackError\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"lowLevelData\",\"type\":\"bytes\"}],\"name\":\"UserCallbackRawError\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"activateAuthorizedReceiver\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"addAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"authorizedReceiverActive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deactivateAuthorizedReceiver\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"node\",\"type\":\"address\"}],\"name\":\"deleteNodePublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAllNodePublicKeys\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"},{\"internalType\":\"bytes[]\",\"name\":\"\",\"type\":\"bytes[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDONPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRegistry\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"getRequiredFee\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getThresholdPublicKey\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"removeAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"sendRequest\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"donPublicKey\",\"type\":\"bytes\"}],\"name\":\"setDONPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"node\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"}],\"name\":\"setNodePublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"registryAddress\",\"type\":\"address\"}],\"name\":\"setRegistry\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"thresholdPublicKey\",\"type\":\"bytes\"}],\"name\":\"setThresholdPublicKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b506200001c62000022565b620003ae565b600054610100900460ff1615808015620000435750600054600160ff909116105b8062000073575062000060306200016260201b620026d71760201c565b15801562000073575060005460ff166001145b620000dc5760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201526d191e481a5b9a5d1a585b1a5e995960921b60648201526084015b60405180910390fd5b6000805460ff19166001179055801562000100576000805461ff0019166101001790555b6200010c600162000171565b620001186001620001f8565b80156200015f576000805461ff0019169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50565b6001600160a01b03163b151590565b600054610100900460ff16620001cd5760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b620001da33600062000267565b60018054911515600160a01b0260ff60a01b19909216919091179055565b600054610100900460ff16620002545760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b6008805460ff1916911515919091179055565b600054610100900460ff16620002c35760405162461bcd60e51b815260206004820152602b602482015260008051602062004a6183398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000d3565b6001600160a01b038216620002eb57604051635b5a8afd60e11b815260040160405180910390fd5b600080546001600160a01b03808516620100000262010000600160b01b031990921691909117909155811615620003275762000327816200032b565b5050565b6001600160a01b038116331415620003565760405163282010c360e01b815260040160405180910390fd5b600180546001600160a01b0319166001600160a01b038381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6146a380620003be6000396000f3fe608060405234801561001057600080fd5b50600436106101cf5760003560e01c80638129fc1c11610104578063afcb95d7116100a2578063e3d0e71211610071578063e3d0e71214610437578063f1e14a211461044a578063f2fde38b14610461578063fa00763a1461047457600080fd5b8063afcb95d7146103cc578063b1dc65a4146103ec578063d227d245146103ff578063d328a91e1461042f57600080fd5b806381ff7048116100de57806381ff70481461035d5780638da5cb5b1461038d57806391bb64eb146103b1578063a91ee0dc146103b957600080fd5b80638129fc1c14610345578063814118341461034d57806381f1b9381461035557600080fd5b80634b4fa0c1116101715780635ab1bd531161014b5780635ab1bd53146102d857806379ba5097146103175780637f15e1661461031f578063807560311461033257600080fd5b80634b4fa0c1146102985780634dcef404146102af57806353398987146102c257600080fd5b8063181f5a77116101ad578063181f5a77146102045780632408afaa1461024f57806326ceabac1461026457806328242b041461027757600080fd5b806303e1bf23146101d4578063083a5466146101e9578063110254c8146101fc575b600080fd5b6101e76101e23660046138de565b610487565b005b6101e76101f7366004613c42565b6105a5565b6101e76105f5565b60408051808201909152601581527f46756e6374696f6e734f7261636c6520302e302e30000000000000000000000060208201525b604051610246919061422d565b60405180910390f35b6102576106ea565b604051610246919061407d565b6101e761027236600461384f565b6106fb565b61028a610285366004613d74565b6107cf565b604051908152602001610246565b60085460ff165b6040519015158152602001610246565b6101e76102bd3660046138de565b610a35565b6102ca610b13565b604051610246929190614090565b600d5473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610246565b6101e7610d33565b6101e761032d366004613c42565b610e26565b6101e7610340366004613889565b610e71565b6101e7610f5a565b6102576110fc565b61023961116b565b6004546002546040805163ffffffff80851682526401000000009094049093166020840152820152606001610246565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff166102f2565b6101e76111f4565b6101e76103c736600461384f565b6112df565b604080516001815260006020820181905291810191909152606001610246565b6101e76103fa366004613a8e565b61137b565b61041261040d366004613dd9565b611aaa565b6040516bffffffffffffffffffffffff9091168152602001610246565b610239611cb1565b6101e76104453660046139c1565b611cc0565b610412610458366004613c78565b60009392505050565b6101e761046f36600461384f565b61269b565b61029f61048236600461384f565b6126ac565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1633146104de576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80610515576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b818110156105655761055283838381811061053557610535614616565b905060200201602081019061054a919061384f565b6009906126f3565b508061055d81614521565b915050610518565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a082823360405161059993929190614003565b60405180910390a15050565b6105ad61271c565b806105e4576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105f0600f83836134d4565b505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff16331461064c576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60085460ff1615610689576040517fa741a04500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556040513381527fae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce906020015b60405180910390a1565b60606106f66009612775565b905090565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16148061076857503373ffffffffffffffffffffffffffffffffffffffff8216145b61079e576040517fed6dd19b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff81166000908152600e602052604081206107cc91613576565b50565b600d5460009073ffffffffffffffffffffffffffffffffffffffff16610821576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610829612782565b8261085f576040517ec1cfc000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d546040805160808101825267ffffffffffffffff8816815233602082015263ffffffff8516818301523a606082015290517fa9d03c0500000000000000000000000000000000000000000000000000000000815260009273ffffffffffffffffffffffffffffffffffffffff169163a9d03c05916108e69189918991906004016141c4565b602060405180830381600087803b15801561090057600080fd5b505af1158015610914573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109389190613c29565b600d546040517fb2a489ff00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8916600482015291925082917fa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c91339132918b9173ffffffffffffffffffffffffffffffffffffffff9091169063b2a489ff9060240160206040518083038186803b1580156109d857600080fd5b505afa1580156109ec573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a10919061386c565b8a8a604051610a2496959493929190613fa5565b60405180910390a295945050505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314610a8c576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80610ac3576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b8181101561056557610b00838383818110610ae357610ae3614616565b9050602002016020810190610af8919061384f565b6009906127c1565b5080610b0b81614521565b915050610ac6565b60608060003073ffffffffffffffffffffffffffffffffffffffff1663814118346040518163ffffffff1660e01b815260040160006040518083038186803b158015610b5e57600080fd5b505afa158015610b72573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610bb89190810190613920565b90506000815167ffffffffffffffff811115610bd657610bd6614645565b604051908082528060200260200182016040528015610c0957816020015b6060815260200190600190039081610bf45790505b50905060005b8251811015610d2957600e6000848381518110610c2e57610c2e614616565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208054610c7b906144cd565b80601f0160208091040260200160405190810160405280929190818152602001828054610ca7906144cd565b8015610cf45780601f10610cc957610100808354040283529160200191610cf4565b820191906000526020600020905b815481529060010190602001808311610cd757829003601f168201915b5050505050828281518110610d0b57610d0b614616565b60200260200101819052508080610d2190614521565b915050610c0f565b5090939092509050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610d84576040517f0f22ca5f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805433620100008181027fffffffffffffffffffff0000000000000000000000000000000000000000ffff8416178455600180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905560405173ffffffffffffffffffffffffffffffffffffffff919093041692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610e2e61271c565b80610e65576040517f4f42be3d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6105f0600c83836134d4565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161480610eee5750610ecd336127e3565b8015610eee57503373ffffffffffffffffffffffffffffffffffffffff8416145b610f24576040517fed6dd19b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152600e60205260409020610f549083836134d4565b50505050565b600054610100900460ff1615808015610f7a5750600054600160ff909116105b80610f945750303b158015610f94575060005460ff166001145b611025576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561108357600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b61108d6001612907565b61109760016129f3565b80156107cc57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a150565b6060600780548060200260200160405190810160405280929190818152602001828054801561116157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611136575b5050505050905090565b6060600f805461117a906144cd565b80601f01602080910402602001604051908101604052809291908181526020018280546111a6906144cd565b80156111615780601f106111c857610100808354040283529160200191611161565b820191906000526020600020905b8154815290600101906020018083116111d657509395945050505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff16331461124b576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60085460ff16611287576040517fa741a04500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556040513381527fea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a906020016106e0565b6112e761271c565b73ffffffffffffffffffffffffffffffffffffffff8116611334576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600d80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60005a604080516020601f8b018190048102820181019092528981529192508a3591818c0135916113d191849163ffffffff851691908e908e9081908401838280828437600092019190915250612abb92505050565b611407576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805183815262ffffff600884901c1660208201527fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16040805160608101825260025480825260035460ff808216602085015261010090910416928201929092529083146114dc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f636f6e666967446967657374206d69736d617463680000000000000000000000604482015260640161101c565b6114ea8b8b8b8b8b8b612ac4565b60015460009074010000000000000000000000000000000000000000900460ff161561154557600282602001518360400151611526919061441e565b6115309190614457565b61153b90600161441e565b60ff16905061155b565b602082015161155590600161441e565b60ff1690505b8881146115c4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e617475726573000000000000604482015260640161101c565b88871461162d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f7369676e617475726573206f7574206f6620726567697374726174696f6e0000604482015260640161101c565b3360009081526005602090815260408083208151808301909252805460ff80821684529293919291840191610100909104166002811115611670576116706145b8565b6002811115611681576116816145b8565b905250905060028160200151600281111561169e5761169e6145b8565b1480156116e557506007816000015160ff16815481106116c0576116c0614616565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff1633145b61174b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f756e617574686f72697a6564207472616e736d69747465720000000000000000604482015260640161101c565b50505050506117586135b0565b6000808a8a60405161176b929190613f95565b604051908190038120611782918e90602001613f79565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181528282528051602091820120838301909252600080845290830152915060005b89811015611a8c5760006001848984602081106117eb576117eb614616565b6117f891901a601b61441e565b8e8e8681811061180a5761180a614616565b905060200201358d8d8781811061182357611823614616565b9050602002013560405160008152602001604052604051611860949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611882573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526005602090815290849020838501909452835460ff80821685529296509294508401916101009004166002811115611902576119026145b8565b6002811115611913576119136145b8565b9052509250600183602001516002811115611930576119306145b8565b14611997576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e0000604482015260640161101c565b8251600090879060ff16601f81106119b1576119b1614616565b602002015173ffffffffffffffffffffffffffffffffffffffff1614611a33576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f6e6f6e2d756e69717565207369676e6174757265000000000000000000000000604482015260640161101c565b8086846000015160ff16601f8110611a4d57611a4d614616565b73ffffffffffffffffffffffffffffffffffffffff9092166020929092020152611a7860018661441e565b94505080611a8590614521565b90506117cc565b505050611a9d833383858e8e612b7b565b5050505050505050505050565b600d5460009073ffffffffffffffffffffffffffffffffffffffff16611afc576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160808101825267ffffffffffffffff8816815233602082015263ffffffff85168183015260608101849052600d5491517ff1e14a210000000000000000000000000000000000000000000000000000000081529091600091829173ffffffffffffffffffffffffffffffffffffffff169063f1e14a2190611b89908b908b9088906004016141c4565b60206040518083038186803b158015611ba157600080fd5b505afa158015611bb5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611bd99190613e46565b600d546040517fa1a6d04100000000000000000000000000000000000000000000000000000000815263ffffffff89166004820152602481018890526bffffffffffffffffffffffff80861660448301528316606482015291925073ffffffffffffffffffffffffffffffffffffffff169063a1a6d0419060840160206040518083038186803b158015611c6c57600080fd5b505afa158015611c80573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ca49190613e46565b9998505050505050505050565b6060600c805461117a906144cd565b855185518560ff16601f831115611d33576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f746f6f206d616e79207369676e65727300000000000000000000000000000000604482015260640161101c565b60008111611d9d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f66206d75737420626520706f7369746976650000000000000000000000000000604482015260640161101c565b818314611e2b576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f6f7261636c6520616464726573736573206f7574206f6620726567697374726160448201527f74696f6e00000000000000000000000000000000000000000000000000000000606482015260840161101c565b611e36816003614479565b8311611e9e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661756c74792d6f7261636c65206620746f6f20686967680000000000000000604482015260640161101c565b611ea661271c565b6040805160c0810182528a8152602081018a905260ff8916918101919091526060810187905267ffffffffffffffff8616608082015260a081018590525b6006541561209957600654600090611efe906001906144b6565b9050600060068281548110611f1557611f15614616565b60009182526020822001546007805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110611f4f57611f4f614616565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526005909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600680549192509080611fcf57611fcf6145e7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556007805480612038576120386145e7565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611ee4915050565b60005b8151518110156124fe57600060056000846000015184815181106120c2576120c2614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff16600281111561210c5761210c6145b8565b14612173576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265706561746564207369676e65722061646472657373000000000000000000604482015260640161101c565b6040805180820190915260ff821681526001602082015282518051600591600091859081106121a4576121a4614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115612245576122456145b8565b0217905550600091506122559050565b600560008460200151848151811061226f5761226f614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff1660028111156122b9576122b96145b8565b14612320576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f7265706561746564207472616e736d6974746572206164647265737300000000604482015260640161101c565b6040805180820190915260ff82168152602081016002815250600560008460200151848151811061235357612353614616565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000016176101008360028111156123f4576123f46145b8565b02179055505082518051600692508390811061241257612412614616565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909316929092179091558201518051600791908390811061248e5761248e614616565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790556124f781614521565b905061209c565b506040810151600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600480547fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff811664010000000063ffffffff4381168202928317855590830481169360019390926000926125909286929082169116176143f6565b92506101000a81548163ffffffff021916908363ffffffff1602179055506125ef4630600460009054906101000a900463ffffffff1663ffffffff16856000015186602001518760400151886060015189608001518a60a0015161304f565b6002819055825180516003805460ff909216610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff90921691909117905560045460208501516040808701516060880151608089015160a08a015193517f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e059861268e988b98919763ffffffff9092169690959194919391926142e5565b60405180910390a1611a9d565b6126a361271c565b6107cc816130fa565b60006126ba60085460ff1690565b6126c657506001919050565b6126d16009836131c7565b92915050565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b60006127158373ffffffffffffffffffffffffffffffffffffffff84166131f6565b9392505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314612773576040517f2b5c74de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b565b60606000612715836132e9565b61278b326126ac565b612773576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006127158373ffffffffffffffffffffffffffffffffffffffff8416613345565b6000803073ffffffffffffffffffffffffffffffffffffffff1663814118346040518163ffffffff1660e01b815260040160006040518083038186803b15801561282c57600080fd5b505afa158015612840573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526128869190810190613920565b905060005b81518110156128fd578373ffffffffffffffffffffffffffffffffffffffff168282815181106128bd576128bd614616565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1614156128eb575060019392505050565b806128f581614521565b91505061288b565b5060009392505050565b600054610100900460ff1661299e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b6129a9336000613394565b6001805491151574010000000000000000000000000000000000000000027fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff909216919091179055565b600054610100900460ff16612a8a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b60019392505050565b6000612ad1826020614479565b612adc856020614479565b612ae8886101446143de565b612af291906143de565b612afc91906143de565b612b079060006143de565b9050368114612b72576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d617463680000000000000000604482015260640161101c565b50505050505050565b600d5473ffffffffffffffffffffffffffffffffffffffff16612bca576040517f9c5fe32400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60608080612bda84860186613b45565b825192955090935091501580612bf257508151835114155b80612bff57508051835114155b15612c36576040517f0be3632800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600083515a612c45908c6144b6565b612c4f9190614443565b905060005b8451811015611a9d57600d54855173ffffffffffffffffffffffffffffffffffffffff90911690630739e4f190879084908110612c9357612c93614616565b6020026020010151868481518110612cad57612cad614616565b6020026020010151868581518110612cc757612cc7614616565b60200260200101518e8d8f895a6040518963ffffffff1660e01b8152600401612cf7989796959493929190614120565b602060405180830381600087803b158015612d1157600080fd5b505af1925050508015612d5f575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252612d5c91810190613d53565b60015b612e57573d808015612d8d576040519150601f19603f3d011682016040523d82523d6000602084013e612d92565b606091505b50858281518110612da557612da5614616565b60200260200101517fe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb282604051612ddc919061422d565b60405180910390a2858281518110612df657612df6614616565b60200260200101517fdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a68c604051612e49919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180910390a250613039565b6000816002811115612e6b57612e6b6145b8565b1415612f2957858281518110612e8357612e83614616565b60200260200101517f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6460405160405180910390a2858281518110612ec957612ec9614616565b60200260200101517fdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a68c604051612f1c919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180910390a2613037565b6001816002811115612f3d57612f3d6145b8565b1415612fd657858281518110612f5557612f55614616565b60200260200101517fb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c604051612fbc9060208082526011908201527f6572726f7220696e2063616c6c6261636b000000000000000000000000000000604082015260600190565b60405180910390a2858281518110612ec957612ec9614616565b6002816002811115612fea57612fea6145b8565b14156130375785828151811061300257613002614616565b60200260200101517fa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be60405160405180910390a25b505b8061304381614521565b915050612c54565b5050565b6000808a8a8a8a8a8a8a8a8a60405160200161307399989796959493929190614240565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff811633141561314a576040517f282010c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001830160205260408120541515612715565b600081815260018301602052604081205480156132df57600061321a6001836144b6565b855490915060009061322e906001906144b6565b905081811461329357600086600001828154811061324e5761324e614616565b906000526020600020015490508087600001848154811061327157613271614616565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806132a4576132a46145e7565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506126d1565b60009150506126d1565b60608160000180548060200260200160405190810160405280929190818152602001828054801561333957602002820191906000526020600020905b815481526020019060010190808311613325575b50505050509050919050565b600081815260018301602052604081205461338c575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556126d1565b5060006126d1565b600054610100900460ff1661342b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161101c565b73ffffffffffffffffffffffffffffffffffffffff8216613478576040517fb6b515fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff80851662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff9092169190911790915581161561304b5761304b816130fa565b8280546134e0906144cd565b90600052602060002090601f0160209004810192826135025760008555613566565b82601f10613539578280017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00823516178555613566565b82800160010185558215613566579182015b8281111561356657823582559160200191906001019061354b565b506135729291506135cf565b5090565b508054613582906144cd565b6000825580601f10613592575050565b601f0160209004906000526020600020908101906107cc91906135cf565b604051806103e00160405280601f906020820280368337509192915050565b5b8082111561357257600081556001016135d0565b60008083601f8401126135f657600080fd5b50813567ffffffffffffffff81111561360e57600080fd5b6020830191508360208260051b850101111561362957600080fd5b9250929050565b600082601f83011261364157600080fd5b81356020613656613651836143ba565b61436b565b80838252828201915082860187848660051b890101111561367657600080fd5b60005b8581101561369e57813561368c81614674565b84529284019290840190600101613679565b5090979650505050505050565b600082601f8301126136bc57600080fd5b813560206136cc613651836143ba565b80838252828201915082860187848660051b89010111156136ec57600080fd5b6000805b8681101561372f57823567ffffffffffffffff81111561370e578283fd5b61371c8b88838d010161377f565b86525093850193918501916001016136f0565b509198975050505050505050565b60008083601f84011261374f57600080fd5b50813567ffffffffffffffff81111561376757600080fd5b60208301915083602082850101111561362957600080fd5b600082601f83011261379057600080fd5b813567ffffffffffffffff8111156137aa576137aa614645565b6137db60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161436b565b8181528460208386010111156137f057600080fd5b816020850160208301376000918101602001919091529392505050565b803563ffffffff8116811461382157600080fd5b919050565b803567ffffffffffffffff8116811461382157600080fd5b803560ff8116811461382157600080fd5b60006020828403121561386157600080fd5b813561271581614674565b60006020828403121561387e57600080fd5b815161271581614674565b60008060006040848603121561389e57600080fd5b83356138a981614674565b9250602084013567ffffffffffffffff8111156138c557600080fd5b6138d18682870161373d565b9497909650939450505050565b600080602083850312156138f157600080fd5b823567ffffffffffffffff81111561390857600080fd5b613914858286016135e4565b90969095509350505050565b6000602080838503121561393357600080fd5b825167ffffffffffffffff81111561394a57600080fd5b8301601f8101851361395b57600080fd5b8051613969613651826143ba565b80828252848201915084840188868560051b870101111561398957600080fd5b600094505b838510156139b55780516139a181614674565b83526001949094019391850191850161398e565b50979650505050505050565b60008060008060008060c087890312156139da57600080fd5b863567ffffffffffffffff808211156139f257600080fd5b6139fe8a838b01613630565b97506020890135915080821115613a1457600080fd5b613a208a838b01613630565b9650613a2e60408a0161383e565b95506060890135915080821115613a4457600080fd5b613a508a838b0161377f565b9450613a5e60808a01613826565b935060a0890135915080821115613a7457600080fd5b50613a8189828a0161377f565b9150509295509295509295565b60008060008060008060008060e0898b031215613aaa57600080fd5b606089018a811115613abb57600080fd5b8998503567ffffffffffffffff80821115613ad557600080fd5b613ae18c838d0161373d565b909950975060808b0135915080821115613afa57600080fd5b613b068c838d016135e4565b909750955060a08b0135915080821115613b1f57600080fd5b50613b2c8b828c016135e4565b999c989b50969995989497949560c00135949350505050565b600080600060608486031215613b5a57600080fd5b833567ffffffffffffffff80821115613b7257600080fd5b818601915086601f830112613b8657600080fd5b81356020613b96613651836143ba565b8083825282820191508286018b848660051b8901011115613bb657600080fd5b600096505b84871015613bd9578035835260019690960195918301918301613bbb565b5097505087013592505080821115613bf057600080fd5b613bfc878388016136ab565b93506040860135915080821115613c1257600080fd5b50613c1f868287016136ab565b9150509250925092565b600060208284031215613c3b57600080fd5b5051919050565b60008060208385031215613c5557600080fd5b823567ffffffffffffffff811115613c6c57600080fd5b6139148582860161373d565b600080600083850360a0811215613c8e57600080fd5b843567ffffffffffffffff80821115613ca657600080fd5b613cb28883890161373d565b909650945084915060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe084011215613cea57600080fd5b60405192506080830191508282108183111715613d0957613d09614645565b50604052613d1960208601613826565b81526040850135613d2981614674565b6020820152613d3a6060860161380d565b6040820152608094909401356060850152509093909250565b600060208284031215613d6557600080fd5b81516003811061271557600080fd5b60008060008060608587031215613d8a57600080fd5b613d9385613826565b9350602085013567ffffffffffffffff811115613daf57600080fd5b613dbb8782880161373d565b9094509250613dce90506040860161380d565b905092959194509250565b600080600080600060808688031215613df157600080fd5b613dfa86613826565b9450602086013567ffffffffffffffff811115613e1657600080fd5b613e228882890161373d565b9095509350613e3590506040870161380d565b949793965091946060013592915050565b600060208284031215613e5857600080fd5b81516bffffffffffffffffffffffff8116811461271557600080fd5b600081518084526020808501945080840160005b83811015613eba57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613e88565b509495945050505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6000815180845260005b81811015613f3457602081850181015186830182015201613f18565b81811115613f46576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8281526060826020830137600060809190910190815292915050565b8183823760009101908152919050565b600073ffffffffffffffffffffffffffffffffffffffff8089168352808816602084015267ffffffffffffffff8716604084015280861660608401525060a06080830152613ff760a083018486613ec5565b98975050505050505050565b6040808252810183905260008460608301825b8681101561405357823561402981614674565b73ffffffffffffffffffffffffffffffffffffffff16825260209283019290910190600101614016565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b6020815260006127156020830184613e74565b6040815260006140a36040830185613e74565b6020838203818501528185518084528284019150828160051b85010183880160005b83811015614111577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08784030185526140ff838351613f0e565b948601949250908501906001016140c5565b50909998505050505050505050565b60006104c08a83526020818185015261413b8285018c613f0e565b9150838203604085015261414f828b613f0e565b925073ffffffffffffffffffffffffffffffffffffffff91508189166060850152608084018860005b601f811015614197578151851683529183019190830190600101614178565b50505050506141ac61046083018660ff169052565b6104808201939093526104a001529695505050505050565b60a0815260006141d860a083018587613ec5565b905067ffffffffffffffff835116602083015273ffffffffffffffffffffffffffffffffffffffff602084015116604083015263ffffffff604084015116606083015260608301516080830152949350505050565b6020815260006127156020830184613f0e565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b1660408501528160608501526142878285018b613e74565b9150838203608085015261429b828a613e74565b915060ff881660a085015283820360c08501526142b88288613f0e565b90861660e085015283810361010085015290506142d58185613f0e565b9c9b505050505050505050505050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526143158184018a613e74565b905082810360808401526143298189613e74565b905060ff871660a084015282810360c08401526143468187613f0e565b905067ffffffffffffffff851660e08401528281036101008401526142d58185613f0e565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156143b2576143b2614645565b604052919050565b600067ffffffffffffffff8211156143d4576143d4614645565b5060051b60200190565b600082198211156143f1576143f161455a565b500190565b600063ffffffff8083168185168083038211156144155761441561455a565b01949350505050565b600060ff821660ff84168060ff0382111561443b5761443b61455a565b019392505050565b60008261445257614452614589565b500490565b600060ff83168061446a5761446a614589565b8060ff84160491505092915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156144b1576144b161455a565b500290565b6000828210156144c8576144c861455a565b500390565b600181811c908216806144e157607f821691505b6020821081141561451b577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156145535761455361455a565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff811681146107cc57600080fdfea164736f6c6343000806000a496e697469616c697a61626c653a20636f6e7472616374206973206e6f742069", +} + +var OCR2DROracleABI = OCR2DROracleMetaData.ABI + +var OCR2DROracleBin = OCR2DROracleMetaData.Bin + +func DeployOCR2DROracle(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OCR2DROracle, error) { + parsed, err := OCR2DROracleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DROracleBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DROracle{OCR2DROracleCaller: OCR2DROracleCaller{contract: contract}, OCR2DROracleTransactor: OCR2DROracleTransactor{contract: contract}, OCR2DROracleFilterer: OCR2DROracleFilterer{contract: contract}}, nil +} + +type OCR2DROracle struct { + address common.Address + abi abi.ABI + OCR2DROracleCaller + OCR2DROracleTransactor + OCR2DROracleFilterer +} + +type OCR2DROracleCaller struct { + contract *bind.BoundContract +} + +type OCR2DROracleTransactor struct { + contract *bind.BoundContract +} + +type OCR2DROracleFilterer struct { + contract *bind.BoundContract +} + +type OCR2DROracleSession struct { + Contract *OCR2DROracle + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DROracleCallerSession struct { + Contract *OCR2DROracleCaller + CallOpts bind.CallOpts +} + +type OCR2DROracleTransactorSession struct { + Contract *OCR2DROracleTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DROracleRaw struct { + Contract *OCR2DROracle +} + +type OCR2DROracleCallerRaw struct { + Contract *OCR2DROracleCaller +} + +type OCR2DROracleTransactorRaw struct { + Contract *OCR2DROracleTransactor +} + +func NewOCR2DROracle(address common.Address, backend bind.ContractBackend) (*OCR2DROracle, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DROracleABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DROracle(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DROracle{address: address, abi: abi, OCR2DROracleCaller: OCR2DROracleCaller{contract: contract}, OCR2DROracleTransactor: OCR2DROracleTransactor{contract: contract}, OCR2DROracleFilterer: OCR2DROracleFilterer{contract: contract}}, nil +} + +func NewOCR2DROracleCaller(address common.Address, caller bind.ContractCaller) (*OCR2DROracleCaller, error) { + contract, err := bindOCR2DROracle(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DROracleCaller{contract: contract}, nil +} + +func NewOCR2DROracleTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DROracleTransactor, error) { + contract, err := bindOCR2DROracle(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DROracleTransactor{contract: contract}, nil +} + +func NewOCR2DROracleFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DROracleFilterer, error) { + contract, err := bindOCR2DROracle(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DROracleFilterer{contract: contract}, nil +} + +func bindOCR2DROracle(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DROracleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DROracle *OCR2DROracleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DROracle.Contract.OCR2DROracleCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.Contract.OCR2DROracleTransactor.contract.Transfer(opts) +} + +func (_OCR2DROracle *OCR2DROracleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DROracle.Contract.OCR2DROracleTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DROracle.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.Contract.contract.Transfer(opts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DROracle.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DROracle *OCR2DROracleCaller) AuthorizedReceiverActive(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "authorizedReceiverActive") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) AuthorizedReceiverActive() (bool, error) { + return _OCR2DROracle.Contract.AuthorizedReceiverActive(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) AuthorizedReceiverActive() (bool, error) { + return _OCR2DROracle.Contract.AuthorizedReceiverActive(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "estimateCost", subscriptionId, data, gasLimit, gasPrice) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) EstimateCost(subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DROracle.Contract.EstimateCost(&_OCR2DROracle.CallOpts, subscriptionId, data, gasLimit, gasPrice) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) EstimateCost(subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) { + return _OCR2DROracle.Contract.EstimateCost(&_OCR2DROracle.CallOpts, subscriptionId, data, gasLimit, gasPrice) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetAllNodePublicKeys(opts *bind.CallOpts) ([]common.Address, [][]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getAllNodePublicKeys") + + if err != nil { + return *new([]common.Address), *new([][]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + out1 := *abi.ConvertType(out[1], new([][]byte)).(*[][]byte) + + return out0, out1, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetAllNodePublicKeys() ([]common.Address, [][]byte, error) { + return _OCR2DROracle.Contract.GetAllNodePublicKeys(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetAllNodePublicKeys() ([]common.Address, [][]byte, error) { + return _OCR2DROracle.Contract.GetAllNodePublicKeys(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DROracle.Contract.GetAuthorizedSenders(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DROracle.Contract.GetAuthorizedSenders(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getDONPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetDONPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetDONPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetDONPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetRegistry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getRegistry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetRegistry() (common.Address, error) { + return _OCR2DROracle.Contract.GetRegistry(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetRegistry() (common.Address, error) { + return _OCR2DROracle.Contract.GetRegistry(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getRequiredFee", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DROracle.Contract.GetRequiredFee(&_OCR2DROracle.CallOpts, arg0, arg1) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DROracle.Contract.GetRequiredFee(&_OCR2DROracle.CallOpts, arg0, arg1) +} + +func (_OCR2DROracle *OCR2DROracleCaller) GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "getThresholdPublicKey") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) GetThresholdPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetThresholdPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) GetThresholdPublicKey() ([]byte, error) { + return _OCR2DROracle.Contract.GetThresholdPublicKey(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DROracle.Contract.IsAuthorizedSender(&_OCR2DROracle.CallOpts, sender) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DROracle.Contract.IsAuthorizedSender(&_OCR2DROracle.CallOpts, sender) +} + +func (_OCR2DROracle *OCR2DROracleCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR2DROracle.Contract.LatestConfigDetails(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OCR2DROracle.Contract.LatestConfigDetails(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR2DROracle.Contract.LatestConfigDigestAndEpoch(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _OCR2DROracle.Contract.LatestConfigDigestAndEpoch(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) Owner() (common.Address, error) { + return _OCR2DROracle.Contract.Owner(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) Owner() (common.Address, error) { + return _OCR2DROracle.Contract.Owner(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) Transmitters() ([]common.Address, error) { + return _OCR2DROracle.Contract.Transmitters(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) Transmitters() ([]common.Address, error) { + return _OCR2DROracle.Contract.Transmitters(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OCR2DROracle.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OCR2DROracle *OCR2DROracleSession) TypeAndVersion() (string, error) { + return _OCR2DROracle.Contract.TypeAndVersion(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleCallerSession) TypeAndVersion() (string, error) { + return _OCR2DROracle.Contract.TypeAndVersion(&_OCR2DROracle.CallOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DROracle *OCR2DROracleSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DROracle.Contract.AcceptOwnership(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DROracle.Contract.AcceptOwnership(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) ActivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "activateAuthorizedReceiver") +} + +func (_OCR2DROracle *OCR2DROracleSession) ActivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.ActivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) ActivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.ActivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) AddAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "addAuthorizedSenders", senders) +} + +func (_OCR2DROracle *OCR2DROracleSession) AddAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.AddAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) AddAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.AddAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) DeactivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "deactivateAuthorizedReceiver") +} + +func (_OCR2DROracle *OCR2DROracleSession) DeactivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeactivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) DeactivateAuthorizedReceiver() (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeactivateAuthorizedReceiver(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) DeleteNodePublicKey(opts *bind.TransactOpts, node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "deleteNodePublicKey", node) +} + +func (_OCR2DROracle *OCR2DROracleSession) DeleteNodePublicKey(node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeleteNodePublicKey(&_OCR2DROracle.TransactOpts, node) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) DeleteNodePublicKey(node common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.DeleteNodePublicKey(&_OCR2DROracle.TransactOpts, node) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) Initialize(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "initialize") +} + +func (_OCR2DROracle *OCR2DROracleSession) Initialize() (*types.Transaction, error) { + return _OCR2DROracle.Contract.Initialize(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) Initialize() (*types.Transaction, error) { + return _OCR2DROracle.Contract.Initialize(&_OCR2DROracle.TransactOpts) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) RemoveAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "removeAuthorizedSenders", senders) +} + +func (_OCR2DROracle *OCR2DROracleSession) RemoveAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.RemoveAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) RemoveAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.RemoveAuthorizedSenders(&_OCR2DROracle.TransactOpts, senders) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "sendRequest", subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleSession) SendRequest(subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SendRequest(&_OCR2DROracle.TransactOpts, subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SendRequest(subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SendRequest(&_OCR2DROracle.TransactOpts, subscriptionId, data, gasLimit) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setConfig", _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetConfig(&_OCR2DROracle.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetConfig(&_OCR2DROracle.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setDONPublicKey", donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetDONPublicKey(&_OCR2DROracle.TransactOpts, donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetDONPublicKey(donPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetDONPublicKey(&_OCR2DROracle.TransactOpts, donPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetNodePublicKey(opts *bind.TransactOpts, node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setNodePublicKey", node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetNodePublicKey(node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetNodePublicKey(&_OCR2DROracle.TransactOpts, node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetNodePublicKey(node common.Address, publicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetNodePublicKey(&_OCR2DROracle.TransactOpts, node, publicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetRegistry(opts *bind.TransactOpts, registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setRegistry", registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetRegistry(registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetRegistry(&_OCR2DROracle.TransactOpts, registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetRegistry(registryAddress common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetRegistry(&_OCR2DROracle.TransactOpts, registryAddress) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "setThresholdPublicKey", thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetThresholdPublicKey(&_OCR2DROracle.TransactOpts, thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) SetThresholdPublicKey(thresholdPublicKey []byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.SetThresholdPublicKey(&_OCR2DROracle.TransactOpts, thresholdPublicKey) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DROracle *OCR2DROracleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.TransferOwnership(&_OCR2DROracle.TransactOpts, to) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DROracle.Contract.TransferOwnership(&_OCR2DROracle.TransactOpts, to) +} + +func (_OCR2DROracle *OCR2DROracleTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_OCR2DROracle *OCR2DROracleSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.Transmit(&_OCR2DROracle.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_OCR2DROracle *OCR2DROracleTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OCR2DROracle.Contract.Transmit(&_OCR2DROracle.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +type OCR2DROracleAuthorizedSendersActiveIterator struct { + Event *OCR2DROracleAuthorizedSendersActive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersActive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersActiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersActive struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersActiveIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersActiveIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersActive", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersActive) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersActive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersActive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersActive(log types.Log) (*OCR2DROracleAuthorizedSendersActive, error) { + event := new(OCR2DROracleAuthorizedSendersActive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersActive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleAuthorizedSendersChangedIterator struct { + Event *OCR2DROracleAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersChangedIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersChanged) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersChanged(log types.Log) (*OCR2DROracleAuthorizedSendersChanged, error) { + event := new(OCR2DROracleAuthorizedSendersChanged) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleAuthorizedSendersDeactiveIterator struct { + Event *OCR2DROracleAuthorizedSendersDeactive + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleAuthorizedSendersDeactive) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleAuthorizedSendersDeactiveIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleAuthorizedSendersDeactive struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersDeactiveIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return &OCR2DROracleAuthorizedSendersDeactiveIterator{contract: _OCR2DROracle.contract, event: "AuthorizedSendersDeactive", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersDeactive) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "AuthorizedSendersDeactive") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleAuthorizedSendersDeactive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseAuthorizedSendersDeactive(log types.Log) (*OCR2DROracleAuthorizedSendersDeactive, error) { + event := new(OCR2DROracleAuthorizedSendersDeactive) + if err := _OCR2DROracle.contract.UnpackLog(event, "AuthorizedSendersDeactive", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleConfigSetIterator struct { + Event *OCR2DROracleConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleConfigSetIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OCR2DROracleConfigSetIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OCR2DROracleConfigSetIterator{contract: _OCR2DROracle.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DROracleConfigSet) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleConfigSet) + if err := _OCR2DROracle.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseConfigSet(log types.Log) (*OCR2DROracleConfigSet, error) { + event := new(OCR2DROracleConfigSet) + if err := _OCR2DROracle.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleInitializedIterator struct { + Event *OCR2DROracleInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleInitializedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleInitialized struct { + Version uint8 + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterInitialized(opts *bind.FilterOpts) (*OCR2DROracleInitializedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &OCR2DROracleInitializedIterator{contract: _OCR2DROracle.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInitialized) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleInitialized) + if err := _OCR2DROracle.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseInitialized(log types.Log) (*OCR2DROracleInitialized, error) { + event := new(OCR2DROracleInitialized) + if err := _OCR2DROracle.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleInvalidRequestIDIterator struct { + Event *OCR2DROracleInvalidRequestID + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleInvalidRequestID) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleInvalidRequestIDIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleInvalidRequestID struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleInvalidRequestIDIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleInvalidRequestIDIterator{contract: _OCR2DROracle.contract, event: "InvalidRequestID", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInvalidRequestID, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "InvalidRequestID", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleInvalidRequestID) + if err := _OCR2DROracle.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseInvalidRequestID(log types.Log) (*OCR2DROracleInvalidRequestID, error) { + event := new(OCR2DROracleInvalidRequestID) + if err := _OCR2DROracle.contract.UnpackLog(event, "InvalidRequestID", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOracleRequestIterator struct { + Event *OCR2DROracleOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOracleRequest struct { + RequestId [32]byte + RequestingContract common.Address + RequestInitiator common.Address + SubscriptionId uint64 + SubscriptionOwner common.Address + Data []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOracleRequestIterator{contract: _OCR2DROracle.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleRequest, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOracleRequest) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOracleRequest(log types.Log) (*OCR2DROracleOracleRequest, error) { + event := new(OCR2DROracleOracleRequest) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOracleResponseIterator struct { + Event *OCR2DROracleOracleResponse + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOracleResponseIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOracleResponseIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOracleResponseIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOracleResponse struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleResponseIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOracleResponseIterator{contract: _OCR2DROracle.contract, event: "OracleResponse", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleResponse, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOracleResponse) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOracleResponse(log types.Log) (*OCR2DROracleOracleResponse, error) { + event := new(OCR2DROracleOracleResponse) + if err := _OCR2DROracle.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOwnershipTransferRequestedIterator struct { + Event *OCR2DROracleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOwnershipTransferRequestedIterator{contract: _OCR2DROracle.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOwnershipTransferRequested) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DROracleOwnershipTransferRequested, error) { + event := new(OCR2DROracleOwnershipTransferRequested) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleOwnershipTransferredIterator struct { + Event *OCR2DROracleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DROracleOwnershipTransferredIterator{contract: _OCR2DROracle.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleOwnershipTransferred) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DROracleOwnershipTransferred, error) { + event := new(OCR2DROracleOwnershipTransferred) + if err := _OCR2DROracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleResponseTransmittedIterator struct { + Event *OCR2DROracleResponseTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleResponseTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleResponseTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleResponseTransmittedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleResponseTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleResponseTransmitted struct { + RequestId [32]byte + Transmitter common.Address + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleResponseTransmittedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleResponseTransmittedIterator{contract: _OCR2DROracle.contract, event: "ResponseTransmitted", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleResponseTransmitted, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "ResponseTransmitted", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleResponseTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseResponseTransmitted(log types.Log) (*OCR2DROracleResponseTransmitted, error) { + event := new(OCR2DROracleResponseTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "ResponseTransmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleTransmittedIterator struct { + Event *OCR2DROracleTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleTransmittedIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterTransmitted(opts *bind.FilterOpts) (*OCR2DROracleTransmittedIterator, error) { + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &OCR2DROracleTransmittedIterator{contract: _OCR2DROracle.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleTransmitted) (event.Subscription, error) { + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseTransmitted(log types.Log) (*OCR2DROracleTransmitted, error) { + event := new(OCR2DROracleTransmitted) + if err := _OCR2DROracle.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleUserCallbackErrorIterator struct { + Event *OCR2DROracleUserCallbackError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleUserCallbackErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleUserCallbackError struct { + RequestId [32]byte + Reason string + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleUserCallbackErrorIterator{contract: _OCR2DROracle.contract, event: "UserCallbackError", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "UserCallbackError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleUserCallbackError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseUserCallbackError(log types.Log) (*OCR2DROracleUserCallbackError, error) { + event := new(OCR2DROracleUserCallbackError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DROracleUserCallbackRawErrorIterator struct { + Event *OCR2DROracleUserCallbackRawError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DROracleUserCallbackRawError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Error() error { + return it.fail +} + +func (it *OCR2DROracleUserCallbackRawErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DROracleUserCallbackRawError struct { + RequestId [32]byte + LowLevelData []byte + Raw types.Log +} + +func (_OCR2DROracle *OCR2DROracleFilterer) FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackRawErrorIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.FilterLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DROracleUserCallbackRawErrorIterator{contract: _OCR2DROracle.contract, event: "UserCallbackRawError", logs: logs, sub: sub}, nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DROracle.contract.WatchLogs(opts, "UserCallbackRawError", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DROracleUserCallbackRawError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DROracle *OCR2DROracleFilterer) ParseUserCallbackRawError(log types.Log) (*OCR2DROracleUserCallbackRawError, error) { + event := new(OCR2DROracleUserCallbackRawError) + if err := _OCR2DROracle.contract.UnpackLog(event, "UserCallbackRawError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_OCR2DROracle *OCR2DROracle) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DROracle.abi.Events["AuthorizedSendersActive"].ID: + return _OCR2DROracle.ParseAuthorizedSendersActive(log) + case _OCR2DROracle.abi.Events["AuthorizedSendersChanged"].ID: + return _OCR2DROracle.ParseAuthorizedSendersChanged(log) + case _OCR2DROracle.abi.Events["AuthorizedSendersDeactive"].ID: + return _OCR2DROracle.ParseAuthorizedSendersDeactive(log) + case _OCR2DROracle.abi.Events["ConfigSet"].ID: + return _OCR2DROracle.ParseConfigSet(log) + case _OCR2DROracle.abi.Events["Initialized"].ID: + return _OCR2DROracle.ParseInitialized(log) + case _OCR2DROracle.abi.Events["InvalidRequestID"].ID: + return _OCR2DROracle.ParseInvalidRequestID(log) + case _OCR2DROracle.abi.Events["OracleRequest"].ID: + return _OCR2DROracle.ParseOracleRequest(log) + case _OCR2DROracle.abi.Events["OracleResponse"].ID: + return _OCR2DROracle.ParseOracleResponse(log) + case _OCR2DROracle.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DROracle.ParseOwnershipTransferRequested(log) + case _OCR2DROracle.abi.Events["OwnershipTransferred"].ID: + return _OCR2DROracle.ParseOwnershipTransferred(log) + case _OCR2DROracle.abi.Events["ResponseTransmitted"].ID: + return _OCR2DROracle.ParseResponseTransmitted(log) + case _OCR2DROracle.abi.Events["Transmitted"].ID: + return _OCR2DROracle.ParseTransmitted(log) + case _OCR2DROracle.abi.Events["UserCallbackError"].ID: + return _OCR2DROracle.ParseUserCallbackError(log) + case _OCR2DROracle.abi.Events["UserCallbackRawError"].ID: + return _OCR2DROracle.ParseUserCallbackRawError(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DROracleAuthorizedSendersActive) Topic() common.Hash { + return common.HexToHash("0xae51766a982895b0c444fc99fc1a560762b464d709e6c78376c85617f7eeb5ce") +} + +func (OCR2DROracleAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (OCR2DROracleAuthorizedSendersDeactive) Topic() common.Hash { + return common.HexToHash("0xea3828816a323b8d7ff49d755efd105e7719166d6c76fad97a28eee5eccc3d9a") +} + +func (OCR2DROracleConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (OCR2DROracleInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (OCR2DROracleInvalidRequestID) Topic() common.Hash { + return common.HexToHash("0xa1c120e327c9ad8b075793878c88d59b8934b97ae37117faa3bb21616237f7be") +} + +func (OCR2DROracleOracleRequest) Topic() common.Hash { + return common.HexToHash("0xa1ec73989d79578cd6f67d4f593ac3e0a4d1020e5c0164db52108d7ff785406c") +} + +func (OCR2DROracleOracleResponse) Topic() common.Hash { + return common.HexToHash("0x9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a64") +} + +func (OCR2DROracleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DROracleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DROracleResponseTransmitted) Topic() common.Hash { + return common.HexToHash("0xdc941eddab34a6109ab77798299c6b1f035b125fd6f774d266ecbf9541d630a6") +} + +func (OCR2DROracleTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (OCR2DROracleUserCallbackError) Topic() common.Hash { + return common.HexToHash("0xb2931868c372fe17a25643458add467d60ec5c51125a99b7309f41f5bcd2da6c") +} + +func (OCR2DROracleUserCallbackRawError) Topic() common.Hash { + return common.HexToHash("0xe0b838ffe6ee22a0d3acf19a85db6a41b34a1ab739e2d6c759a2e42d95bdccb2") +} + +func (_OCR2DROracle *OCR2DROracle) Address() common.Address { + return _OCR2DROracle.address +} + +type OCR2DROracleInterface interface { + AuthorizedReceiverActive(opts *bind.CallOpts) (bool, error) + + EstimateCost(opts *bind.CallOpts, subscriptionId uint64, data []byte, gasLimit uint32, gasPrice *big.Int) (*big.Int, error) + + GetAllNodePublicKeys(opts *bind.CallOpts) ([]common.Address, [][]byte, error) + + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetDONPublicKey(opts *bind.CallOpts) ([]byte, error) + + GetRegistry(opts *bind.CallOpts) (common.Address, error) + + GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) + + GetThresholdPublicKey(opts *bind.CallOpts) ([]byte, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ActivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) + + AddAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + DeactivateAuthorizedReceiver(opts *bind.TransactOpts) (*types.Transaction, error) + + DeleteNodePublicKey(opts *bind.TransactOpts, node common.Address) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts) (*types.Transaction, error) + + RemoveAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SendRequest(opts *bind.TransactOpts, subscriptionId uint64, data []byte, gasLimit uint32) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + SetDONPublicKey(opts *bind.TransactOpts, donPublicKey []byte) (*types.Transaction, error) + + SetNodePublicKey(opts *bind.TransactOpts, node common.Address, publicKey []byte) (*types.Transaction, error) + + SetRegistry(opts *bind.TransactOpts, registryAddress common.Address) (*types.Transaction, error) + + SetThresholdPublicKey(opts *bind.TransactOpts, thresholdPublicKey []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + FilterAuthorizedSendersActive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersActiveIterator, error) + + WatchAuthorizedSendersActive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersActive) (event.Subscription, error) + + ParseAuthorizedSendersActive(log types.Log) (*OCR2DROracleAuthorizedSendersActive, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*OCR2DROracleAuthorizedSendersChanged, error) + + FilterAuthorizedSendersDeactive(opts *bind.FilterOpts) (*OCR2DROracleAuthorizedSendersDeactiveIterator, error) + + WatchAuthorizedSendersDeactive(opts *bind.WatchOpts, sink chan<- *OCR2DROracleAuthorizedSendersDeactive) (event.Subscription, error) + + ParseAuthorizedSendersDeactive(log types.Log) (*OCR2DROracleAuthorizedSendersDeactive, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OCR2DROracleConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DROracleConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OCR2DROracleConfigSet, error) + + FilterInitialized(opts *bind.FilterOpts) (*OCR2DROracleInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*OCR2DROracleInitialized, error) + + FilterInvalidRequestID(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleInvalidRequestIDIterator, error) + + WatchInvalidRequestID(opts *bind.WatchOpts, sink chan<- *OCR2DROracleInvalidRequestID, requestId [][32]byte) (event.Subscription, error) + + ParseInvalidRequestID(log types.Log) (*OCR2DROracleInvalidRequestID, error) + + FilterOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleRequest, requestId [][32]byte) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*OCR2DROracleOracleRequest, error) + + FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleOracleResponseIterator, error) + + WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOracleResponse, requestId [][32]byte) (event.Subscription, error) + + ParseOracleResponse(log types.Log) (*OCR2DROracleOracleResponse, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DROracleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DROracleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DROracleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DROracleOwnershipTransferred, error) + + FilterResponseTransmitted(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleResponseTransmittedIterator, error) + + WatchResponseTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleResponseTransmitted, requestId [][32]byte) (event.Subscription, error) + + ParseResponseTransmitted(log types.Log) (*OCR2DROracleResponseTransmitted, error) + + FilterTransmitted(opts *bind.FilterOpts) (*OCR2DROracleTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *OCR2DROracleTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*OCR2DROracleTransmitted, error) + + FilterUserCallbackError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackErrorIterator, error) + + WatchUserCallbackError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackError(log types.Log) (*OCR2DROracleUserCallbackError, error) + + FilterUserCallbackRawError(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DROracleUserCallbackRawErrorIterator, error) + + WatchUserCallbackRawError(opts *bind.WatchOpts, sink chan<- *OCR2DROracleUserCallbackRawError, requestId [][32]byte) (event.Subscription, error) + + ParseUserCallbackRawError(log types.Log) (*OCR2DROracleUserCallbackRawError, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/ocr2dr_registry/ocr2dr_registry.go b/core/gethwrappers/generated/ocr2dr_registry/ocr2dr_registry.go new file mode 100644 index 00000000..7889936f --- /dev/null +++ b/core/gethwrappers/generated/ocr2dr_registry/ocr2dr_registry.go @@ -0,0 +1,3350 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ocr2dr_registry + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FunctionsBillingRegistryCommitment struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int + Don common.Address + DonFee *big.Int + RegistryFee *big.Int + EstimatedCost *big.Int + Timestamp *big.Int +} + +type IFunctionsBillingRegistryRequestBilling struct { + SubscriptionId uint64 + Client common.Address + GasLimit uint32 + GasPrice *big.Int +} + +var OCR2DRRegistryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotSelfTransfer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptySendersList\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAllowedToSetSenders\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotProposedOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OwnerMustBeSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"signerPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"transmitterPayment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"totalCost\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"BillingEnd\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"don\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"estimatedCost\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structFunctionsBillingRegistry.Commitment\",\"name\":\"commitment\",\"type\":\"tuple\"}],\"name\":\"BillingStart\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"RequestTimedOut\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"donFee\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"registryFee\",\"type\":\"uint96\"}],\"name\":\"estimateCost\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"response\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"err\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address[31]\",\"name\":\"signers\",\"type\":\"address[31]\"},{\"internalType\":\"uint8\",\"name\":\"signerCount\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"reportValidationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initialGas\",\"type\":\"uint256\"}],\"name\":\"fulfillAndBill\",\"outputs\":[{\"internalType\":\"enumIFunctionsBillingRegistry.FulfillResult\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"linkAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkPriceFeed\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentsubscriptionId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRequestConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"getRequiredFee\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"getSubscriptionOwner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"gasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"requestTimeoutSeconds\",\"type\":\"uint32\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"subscriptionId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"client\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"}],\"internalType\":\"structIFunctionsBillingRegistry.RequestBilling\",\"name\":\"billing\",\"type\":\"tuple\"}],\"name\":\"startBilling\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"requestIdsToTimeout\",\"type\":\"bytes32[]\"}],\"name\":\"timeoutRequests\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162005d3838038062005d3883398101604081905262000034916200040c565b620000418383836200004a565b50505062000456565b600054610100900460ff16158080156200006b5750600054600160ff909116105b806200009b57506200008830620001c960201b62003bd41760201c565b1580156200009b575060005460ff166001145b620001045760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201526d191e481a5b9a5d1a585b1a5e995960921b60648201526084015b60405180910390fd5b6000805460ff19166001179055801562000128576000805461ff0019166101001790555b62000132620001d8565b6200013f33600062000240565b606980546001600160a01b038087166001600160a01b031992831617909255606a8054868416908316179055606b8054928516929091169190911790558015620001c3576000805461ff0019169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50505050565b6001600160a01b03163b151590565b600054610100900460ff16620002345760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6200023e62000304565b565b600054610100900460ff166200029c5760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6001600160a01b038216620002c457604051635b5a8afd60e11b815260040160405180910390fd5b600080546001600160a01b03808516620100000262010000600160b01b031990921691909117909155811615620003005762000300816200036c565b5050565b600054610100900460ff16620003605760405162461bcd60e51b815260206004820152602b602482015260008051602062005d1883398151915260448201526a6e697469616c697a696e6760a81b6064820152608401620000fb565b6034805460ff19169055565b6001600160a01b038116331415620003975760405163282010c360e01b815260040160405180910390fd5b600180546001600160a01b0319166001600160a01b038381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200040757600080fd5b919050565b6000806000606084860312156200042257600080fd5b6200042d84620003ef565b92506200043d60208501620003ef565b91506200044d60408501620003ef565b90509250925092565b6158b280620004666000396000f3fe608060405234801561001057600080fd5b50600436106102255760003560e01c80638da5cb5b1161012a578063c0c53b8b116100bd578063e82ad7d41161008c578063f1e14a2111610071578063f1e14a2114610561578063f2fde38b14610578578063fa00763a1461058b57600080fd5b8063e82ad7d41461053b578063ee56997b1461054e57600080fd5b8063c0c53b8b1461048d578063c3f909d4146104a0578063d7ae1d3014610515578063e72f6e301461052857600080fd5b8063a47c7696116100f9578063a47c769614610432578063a4c0ed3614610454578063a9d03c0514610467578063b2a489ff1461047a57600080fd5b80638da5cb5b146103a25780639f87fad7146103e7578063a1a6d041146103fa578063a21a23e41461042a57600080fd5b80633f4ba83a116101bd578063665871ec1161018c57806379ba50971161017157806379ba50971461037f57806382359740146103875780638456cb591461039a57600080fd5b8063665871ec146103595780637341c10c1461036c57600080fd5b80633f4ba83a1461030c5780635c975abb1461031457806364d51a2a1461032b57806366316d8d1461034657600080fd5b806312b58349116101f957806312b58349146102915780632408afaa146102bd57806327923e41146102d257806333652e3e146102e557600080fd5b80620122911461022a57806302bcc5b61461024957806304c357cb1461025e5780630739e4f114610271575b600080fd5b61023261059e565b6040516102409291906155a1565b60405180910390f35b61025c6102573660046151d9565b6105bd565b005b61025c61026c3660046151f4565b61063a565b61028461027f366004614edf565b610835565b6040516102409190615470565b606f546801000000000000000090046bffffffffffffffffffffffff165b604051908152602001610240565b6102c5610ef7565b60405161024091906153a6565b61025c6102e0366004615173565b610f66565b606f5467ffffffffffffffff165b60405167ffffffffffffffff9091168152602001610240565b61025c6110b9565b60345460ff165b6040519015158152602001610240565b610333606481565b60405161ffff9091168152602001610240565b61025c610354366004614e44565b6110cb565b61025c610367366004614e7b565b611331565b61025c61037a3660046151f4565b6115fd565b61025c611890565b61025c6103953660046151d9565b611983565b61025c611d04565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610240565b61025c6103f53660046151f4565b611d14565b61040d610408366004615122565b61219b565b6040516bffffffffffffffffffffffff9091168152602001610240565b6102f36122bf565b6104456104403660046151d9565b612657565b604051610240939291906155c8565b61025c610462366004614dea565b612788565b6102af610475366004615009565b6129e1565b6103c26104883660046151d9565b6131dc565b61025c61049b366004614da7565b613275565b607354607454607254607554606954606a546040805163ffffffff808916825265010000000000909804881660208201529081019590955260608501939093529316608083015273ffffffffffffffffffffffffffffffffffffffff92831660a08301529190911660c082015260e001610240565b61025c6105233660046151f4565b613477565b61025c610536366004614d8c565b6135de565b61031b6105493660046151d9565b6137fb565b61025c61055c366004614e7b565b613a3a565b61040d61056f366004615086565b60009392505050565b61025c610586366004614d8c565b613bad565b61031b610599366004614d8c565b613bc1565b60735460009060609063ffffffff166105b5610ef7565b915091509091565b6105c5613bf0565b67ffffffffffffffff81166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff168061062c576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6106368282613c47565b5050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff16806106a3576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff82161461070f576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b607354640100000000900460ff1615610754576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61075c61404e565b67ffffffffffffffff84166000908152606d602052604090206001015473ffffffffffffffffffffffffffffffffffffffff84811691161461082f5767ffffffffffffffff84166000818152606d602090815260409182902060010180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88169081179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91015b60405180910390a25b50505050565b600061083f6140bb565b607354640100000000900460ff1615610884576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61088c61404e565b60008b815260716020908152604091829020825161012081018452815467ffffffffffffffff8116825268010000000000000000810473ffffffffffffffffffffffffffffffffffffffff908116948301949094527c0100000000000000000000000000000000000000000000000000000000900463ffffffff169381019390935260018101546060840152600281015491821660808401819052740100000000000000000000000000000000000000009092046bffffffffffffffffffffffff90811660a0850152600382015480821660c08601526c0100000000000000000000000090041660e084015260040154610100830152610990576002915050610ee9565b60008c81526071602052604080822082815560018101839055600281018390556003810180547fffffffffffffffff000000000000000000000000000000000000000000000000169055600401829055517f0ca761750000000000000000000000000000000000000000000000000000000090610a19908f908f908f908f908f906024016153b9565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152602080830180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090951694909417909352607380547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff1664010000000017905584015191840151909250600091610ae69163ffffffff90911690846140fa565b607380547fffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffff16905560745460a085015160c0860151929350600092610b3292899290918c908c3a614146565b604080820151865167ffffffffffffffff166000908152606e60205291909120549192506bffffffffffffffffffffffff90811691161015610ba0576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080820151855167ffffffffffffffff166000908152606e602052918220805491929091610bde9084906bffffffffffffffffffffffff16615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555060005b8860ff16811015610cd8578151607060008c84601f8110610c3257610c3261582d565b602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282829054906101000a90046bffffffffffffffffffffffff16610c979190615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508080610cd09061573f565b915050610c0f565b508360c0015160706000610d0860005473ffffffffffffffffffffffffffffffffffffffff620100009091041690565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160009081208054909190610d4e9084906bffffffffffffffffffffffff16615658565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560208381015173ffffffffffffffffffffffffffffffffffffffff8e166000908152607090925260408220805491945092610db091859116615658565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560e0860151865167ffffffffffffffff166000908152606e60205260409020805491935091600c91610e199185916c01000000000000000000000000900416615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508e7fc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f856000015183600001518460200151856040015187604051610ecb95949392919067ffffffffffffffff9590951685526bffffffffffffffffffffffff9384166020860152918316604085015290911660608301521515608082015260a00190565b60405180910390a281610edf576001610ee2565b60005b9450505050505b9a9950505050505050505050565b60606068805480602002602001604051908101604052809291908181526020018280548015610f5c57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610f31575b5050505050905090565b610f6e613bf0565b60008313610fab576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101849052602401610706565b6040805160c08101825263ffffffff888116808352600060208085019190915289831684860181905260608086018b9052888516608080880182905295891660a0978801819052607380547fffffffffffffffffffffffffffffffffffffffffffffff00000000000000000016871765010000000000860217905560748d9055607580547fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000016831764010000000090920291909117905560728b9055875194855292840191909152948201899052938101879052908101929092527f24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd4910160405180910390a1505050505050565b6110c1613bf0565b6110c96142c6565b565b607354640100000000900460ff1615611110576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61111861404e565b6bffffffffffffffffffffffff811661114b5750336000908152607060205260409020546bffffffffffffffffffffffff165b336000908152607060205260409020546bffffffffffffffffffffffff808316911610156111a5576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260706020526040812080548392906111d29084906bffffffffffffffffffffffff16615712565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080606f60088282829054906101000a90046bffffffffffffffffffffffff166112299190615712565b82546101009290920a6bffffffffffffffffffffffff8181021990931691831602179091556069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff868116600483015292851660248201529116915063a9059cbb90604401602060405180830381600087803b1580156112c357600080fd5b505af11580156112d7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112fb9190614ebd565b610636576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61133961404e565b60005b818110156115f85760008383838181106113585761135861582d565b602090810292909201356000818152607184526040808220815161012081018352815467ffffffffffffffff811680835268010000000000000000820473ffffffffffffffffffffffffffffffffffffffff908116848b01527c010000000000000000000000000000000000000000000000000000000090920463ffffffff168386015260018401546060840152600284015480831660808501527401000000000000000000000000000000000000000090046bffffffffffffffffffffffff90811660a0850152600385015480821660c08601526c0100000000000000000000000090041660e0840152600490930154610100830152918452606d90965291205491945016331490506114cd57805167ffffffffffffffff166000908152606d6020526040908190205490517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610706565b60755461010082015142916114f19164010000000090910463ffffffff1690615614565b11156115e35760e0810151815167ffffffffffffffff166000908152606e602052604090208054600c906115449084906c0100000000000000000000000090046bffffffffffffffffffffffff16615712565b82546bffffffffffffffffffffffff9182166101009390930a92830291909202199091161790555060008281526071602052604080822082815560018101839055600281018390556003810180547fffffffffffffffff0000000000000000000000000000000000000000000000001690556004018290555183917ff1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af41491a25b505080806115f09061573f565b91505061133c565b505050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611666576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff8216146116cd576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff1615611712576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61171a61404e565b67ffffffffffffffff84166000908152606d602052604090206002015460641415611771576040517f05a48e0f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff83166000908152606c6020908152604080832067ffffffffffffffff808916855292529091205416156117b85761082f565b73ffffffffffffffffffffffffffffffffffffffff83166000818152606c6020908152604080832067ffffffffffffffff891680855290835281842080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001908117909155606d84528285206002018054918201815585529383902090930180547fffffffffffffffffffffffff000000000000000000000000000000000000000016851790555192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e09101610826565b60015473ffffffffffffffffffffffffffffffffffffffff1633146118e1576040517f0f22ca5f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805433620100008181027fffffffffffffffffffff0000000000000000000000000000000000000000ffff8416178455600180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905560405173ffffffffffffffffffffffffffffffffffffffff919093041692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b607354640100000000900460ff16156119c8576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6119d061404e565b606b60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634b4fa0c16040518163ffffffff1660e01b815260040160206040518083038186803b158015611a3857600080fd5b505afa158015611a4c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a709190614ebd565b8015611b1a5750606b546040517ffa00763a00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff9091169063fa00763a9060240160206040518083038186803b158015611ae057600080fd5b505afa158015611af4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b189190614ebd565b155b15611b51576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff16611bb7576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606d602052604090206001015473ffffffffffffffffffffffffffffffffffffffff163314611c595767ffffffffffffffff81166000908152606d6020526040908190206001015490517fd084e97500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610706565b67ffffffffffffffff81166000818152606d60209081526040918290208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560019093018054909316909255835173ffffffffffffffffffffffffffffffffffffffff909116808252928101919091529092917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0910160405180910390a25050565b611d0c613bf0565b6110c9614343565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611d7d576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614611de4576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff1615611e29576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611e3161404e565b73ffffffffffffffffffffffffffffffffffffffff83166000908152606c6020908152604080832067ffffffffffffffff808916855292529091205416611ecc576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8516600482015273ffffffffffffffffffffffffffffffffffffffff84166024820152604401610706565b67ffffffffffffffff84166000908152606d6020908152604080832060020180548251818502810185019093528083529192909190830182828015611f4757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611f1c575b50505050509050600060018251611f5e91906156fb565b905060005b82518110156120fd578573ffffffffffffffffffffffffffffffffffffffff16838281518110611f9557611f9561582d565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1614156120eb576000838381518110611fcd57611fcd61582d565b6020026020010151905080606d60008a67ffffffffffffffff1667ffffffffffffffff16815260200190815260200160002060020183815481106120135761201361582d565b600091825260208083209190910180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff949094169390931790925567ffffffffffffffff8a168152606d9091526040902060020180548061208d5761208d6157fe565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055506120fd565b806120f58161573f565b915050611f63565b5073ffffffffffffffffffffffffffffffffffffffff85166000818152606c6020908152604080832067ffffffffffffffff8b168085529083529281902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b91015b60405180910390a2505050505050565b6000806121a661439e565b9050600081136121e5576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101829052602401610706565b60745460755460009163ffffffff808a1692612202929116615614565b61220c9190615614565b90506000828261222489670de0b6b3a76400006156be565b61222e91906156be565b612238919061567f565b905060006122576bffffffffffffffffffffffff808816908916615614565b905061226f816b033b2e3c9fd0803ce80000006156fb565b8211156122a8576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6122b28183615614565b9998505050505050505050565b607354600090640100000000900460ff1615612307576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61230f61404e565b606b60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634b4fa0c16040518163ffffffff1660e01b815260040160206040518083038186803b15801561237757600080fd5b505afa15801561238b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123af9190614ebd565b80156124595750606b546040517ffa00763a00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff9091169063fa00763a9060240160206040518083038186803b15801561241f57600080fd5b505afa158015612433573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906124579190614ebd565b155b15612490576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606f805467ffffffffffffffff169060006124aa83615778565b82546101009290920a67ffffffffffffffff818102199093169183160217909155606f541690506000806040519080825280602002602001820160405280156124fd578160200160208202803683370190505b506040805180820182526000808252602080830182815267ffffffffffffffff8816808452606e83528584209451855492516bffffffffffffffffffffffff9081166c01000000000000000000000000027fffffffffffffffff000000000000000000000000000000000000000000000000909416911617919091179093558351606081018552338152808201838152818601878152948452606d8352949092208251815473ffffffffffffffffffffffffffffffffffffffff9182167fffffffffffffffffffffffff000000000000000000000000000000000000000091821617835595516001830180549190921696169590951790945591518051949550909361260f9260028501920190614ad8565b505060405133815267ffffffffffffffff841691507f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a250905090565b67ffffffffffffffff81166000908152606d6020526040812054819060609073ffffffffffffffffffffffffffffffffffffffff166126c2576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff84166000908152606e6020908152604080832054606d8352928190208054600290910180548351818602810186019094528084526bffffffffffffffffffffffff9095169473ffffffffffffffffffffffffffffffffffffffff90921693909291839183018282801561277457602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612749575b505050505090509250925092509193909250565b607354640100000000900460ff16156127cd576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6127d561404e565b60695473ffffffffffffffffffffffffffffffffffffffff163314612826576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114612860576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061286e828401846151d9565b67ffffffffffffffff81166000908152606d602052604090205490915073ffffffffffffffffffffffffffffffffffffffff166128d7576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152606e6020526040812080546bffffffffffffffffffffffff169186919061290e8385615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555084606f60088282829054906101000a90046bffffffffffffffffffffffff166129659190615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f88287846129cc9190615614565b6040805192835260208301919091520161218b565b60006129eb6140bb565b607354640100000000900460ff1615612a30576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612a3861404e565b6000606d81612a4a60208601866151d9565b67ffffffffffffffff16815260208101919091526040016000205473ffffffffffffffffffffffffffffffffffffffff161415612ab3576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000606c81612ac86040860160208701614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602080820192909252604001600090812091612afe908601866151d9565b67ffffffffffffffff908116825260208201929092526040016000205416905080612b9a57612b3060208401846151d9565b612b406040850160208601614d8c565b6040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff909216600483015273ffffffffffffffffffffffffffffffffffffffff166024820152604401610706565b60735463ffffffff16612bb36060850160408601615107565b63ffffffff161115612c1457612bcf6060840160408501615107565b6073546040517ff5d7e01e00000000000000000000000000000000000000000000000000000000815263ffffffff928316600482015291166024820152604401610706565b6040517ff1e14a21000000000000000000000000000000000000000000000000000000008152600090339063f1e14a2190612c57908990899089906004016153f2565b60206040518083038186803b158015612c6f57600080fd5b505afa158015612c83573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612ca79190615277565b90506000612cbf878761056f368990038901896150d2565b90506000612ce2612cd66060880160408901615107565b8760600135858561219b565b90506000606e81612cf660208a018a6151d9565b67ffffffffffffffff1681526020808201929092526040016000908120546c0100000000000000000000000090046bffffffffffffffffffffffff1691606e9190612d43908b018b6151d9565b67ffffffffffffffff168152602081019190915260400160002054612d7691906bffffffffffffffffffffffff16615712565b9050816bffffffffffffffffffffffff16816bffffffffffffffffffffffff161015612dce576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612ddb86600161562c565b90506000612e6333612df360408c0160208d01614d8c565b612e0060208d018d6151d9565b856040805173ffffffffffffffffffffffffffffffffffffffff958616602080830191909152949095168582015267ffffffffffffffff928316606086015291166080808501919091528151808503909101815260a09093019052815191012090565b60408051610120810190915290915060009080612e8360208d018d6151d9565b67ffffffffffffffff1681526020018b6020016020810190612ea59190614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602001612ed060608d0160408e01615107565b63ffffffff90811682526060808e0135602080850191909152336040808601919091526bffffffffffffffffffffffff808e16848701528c81166080808801919091528c821660a0808901919091524260c09889015260008b8152607186528481208a5181548c890151978d0151909a167c0100000000000000000000000000000000000000000000000000000000027bffffffffffffffffffffffffffffffffffffffffffffffffffffffff73ffffffffffffffffffffffffffffffffffffffff98891668010000000000000000027fffffffff00000000000000000000000000000000000000000000000000000000909c1667ffffffffffffffff909316929092179a909a171698909817885595890151600188015590880151908801518216740100000000000000000000000000000000000000000292169190911760028501559385015160038401805460e088015187166c01000000000000000000000000027fffffffffffffffff0000000000000000000000000000000000000000000000009091169290961691909117949094179093556101008401516004909201919091559192508691606e9161308a908e018e6151d9565b67ffffffffffffffff16815260208101919091526040016000208054600c906130d29084906c0100000000000000000000000090046bffffffffffffffffffffffff16615658565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550817f99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe48260405161313091906154b1565b60405180910390a282606c600061314d60408e0160208f01614d8c565b73ffffffffffffffffffffffffffffffffffffffff168152602080820192909252604001600090812091613183908e018e6151d9565b67ffffffffffffffff9081168252602082019290925260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001692909116919091179055509a9950505050505050505050565b67ffffffffffffffff81166000908152606d602052604081205473ffffffffffffffffffffffffffffffffffffffff16613242576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5067ffffffffffffffff166000908152606d602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b600054610100900460ff16158080156132955750600054600160ff909116105b806132af5750303b1580156132af575060005460ff166001145b61333b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a65640000000000000000000000000000000000006064820152608401610706565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561339957600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6133a1614491565b6133ac336000614530565b6069805473ffffffffffffffffffffffffffffffffffffffff8087167fffffffffffffffffffffffff000000000000000000000000000000000000000092831617909255606a8054868416908316179055606b805492851692909116919091179055801561082f57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498906020015b60405180910390a150505050565b67ffffffffffffffff82166000908152606d6020526040902054829073ffffffffffffffffffffffffffffffffffffffff16806134e0576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614613547576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610706565b607354640100000000900460ff161561358c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61359461404e565b61359d846137fb565b156135d4576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61082f8484613c47565b6135e6613bf0565b6069546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009173ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b15801561365057600080fd5b505afa158015613664573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061368891906150ee565b606f549091506801000000000000000090046bffffffffffffffffffffffff16818111156136ec576040517fa99da3020000000000000000000000000000000000000000000000000000000081526004810182905260248101839052604401610706565b818110156115f857600061370082846156fb565b6069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87811660048301526024820184905292935091169063a9059cbb90604401602060405180830381600087803b15801561377657600080fd5b505af115801561378a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906137ae9190614ebd565b506040805173ffffffffffffffffffffffffffffffffffffffff86168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b4366009101613469565b67ffffffffffffffff81166000908152606d602090815260408083206002018054825181850281018501909352808352849383018282801561387357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613848575b505050505090506000613884610ef7565b905060005b8251811015613a2f5760005b8251811015613a1c5760006139cc8483815181106138b5576138b561582d565b60200260200101518685815181106138cf576138cf61582d565b602002602001015189606c60008a89815181106138ee576138ee61582d565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008c67ffffffffffffffff1667ffffffffffffffff16815260200190815260200160002060009054906101000a900467ffffffffffffffff166040805173ffffffffffffffffffffffffffffffffffffffff958616602080830191909152949095168582015267ffffffffffffffff928316606086015291166080808501919091528151808503909101815260a09093019052815191012090565b60008181526071602052604090206002015490915073ffffffffffffffffffffffffffffffffffffffff1615613a09575060019695505050505050565b5080613a148161573f565b915050613895565b5080613a278161573f565b915050613889565b506000949350505050565b613a42614670565b613a78576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80613aaf576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b606854811015613b0f57613afc60688281548110613ad257613ad261582d565b60009182526020909120015460669073ffffffffffffffffffffffffffffffffffffffff16614680565b5080613b078161573f565b915050613ab2565b5060005b81811015613b6057613b4d838383818110613b3057613b3061582d565b9050602002016020810190613b459190614d8c565b6066906146a9565b5080613b588161573f565b915050613b13565b50613b6d60688383614b5e565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0828233604051613ba19392919061532e565b60405180910390a15050565b613bb5613bf0565b613bbe816146cb565b50565b6000613bce606683614798565b92915050565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff1633146110c9576040517f2b5c74de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b607354640100000000900460ff1615613c8c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff82166000908152606d602090815260408083208151606081018352815473ffffffffffffffffffffffffffffffffffffffff908116825260018301541681850152600282018054845181870281018701865281815292959394860193830182828015613d3757602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613d0c575b5050509190925250505067ffffffffffffffff84166000908152606e60205260408120549192506bffffffffffffffffffffffff909116905b826040015151811015613e1657606c600084604001518381518110613d9757613d9761582d565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600090812067ffffffffffffffff89168252909252902080547fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000016905580613e0e8161573f565b915050613d70565b5067ffffffffffffffff84166000908152606d6020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000009081168255600182018054909116905590613e716002830182614bd6565b505067ffffffffffffffff84166000908152606e6020526040902080547fffffffffffffffff000000000000000000000000000000000000000000000000169055606f8054829190600890613ee19084906801000000000000000090046bffffffffffffffffffffffff16615712565b82546101009290920a6bffffffffffffffffffffffff8181021990931691831602179091556069546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff878116600483015292851660248201529116915063a9059cbb90604401602060405180830381600087803b158015613f7b57600080fd5b505af1158015613f8f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613fb39190614ebd565b613fe9576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805173ffffffffffffffffffffffffffffffffffffffff851681526bffffffffffffffffffffffff8316602082015267ffffffffffffffff8616917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd498159101610826565b60345460ff16156110c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f5061757361626c653a20706175736564000000000000000000000000000000006044820152606401610706565b6140c433613bc1565b6110c9576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a61138881101561410c57600080fd5b61138881039050846040820482031161412457600080fd5b50823b61413057600080fd5b60008083516020850160008789f1949350505050565b604080516060810182526000808252602082018190529181018290529061416b61439e565b9050600081136141aa576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101829052602401610706565b6000815a8b6141b98c89615614565b6141c39190615614565b6141cd91906156fb565b6141df86670de0b6b3a76400006156be565b6141e991906156be565b6141f3919061567f565b905060006142126bffffffffffffffffffffffff808916908b16615614565b905061422a816b033b2e3c9fd0803ce80000006156fb565b821115614263576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061427260ff8a168b615693565b90508260006142896142848584615614565b6147c7565b604080516060810182526bffffffffffffffffffffffff958616815293851660208501529316928201929092529c9b505050505050505050505050565b6142ce614869565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390a1565b61434b61404e565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a2586143193390565b607354606a54604080517ffeaf968c000000000000000000000000000000000000000000000000000000008152905160009365010000000000900463ffffffff1692831515928592839273ffffffffffffffffffffffffffffffffffffffff169163feaf968c9160048083019260a0929190829003018186803b15801561442457600080fd5b505afa158015614438573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061445c9190615227565b5093505092505082801561447e575061447581426156fb565b8463ffffffff16105b156144895760725491505b509392505050565b600054610100900460ff16614528576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b6110c96148d5565b600054610100900460ff166145c7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b73ffffffffffffffffffffffffffffffffffffffff8216614614576040517fb6b515fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805473ffffffffffffffffffffffffffffffffffffffff80851662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff9092169190911790915581161561063657610636816146cb565b600061467a613bf0565b50600190565b60006146a28373ffffffffffffffffffffffffffffffffffffffff8416614996565b9392505050565b60006146a28373ffffffffffffffffffffffffffffffffffffffff8416614a89565b73ffffffffffffffffffffffffffffffffffffffff811633141561471b576040517f282010c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8381169182179092556000805460405192936201000090910416917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260018301602052604081205415156146a2565b60006bffffffffffffffffffffffff821115614865576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203960448201527f36206269747300000000000000000000000000000000000000000000000000006064820152608401610706565b5090565b60345460ff166110c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f5061757361626c653a206e6f74207061757365640000000000000000000000006044820152606401610706565b600054610100900460ff1661496c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610706565b603480547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055565b60008181526001830160205260408120548015614a7f5760006149ba6001836156fb565b85549091506000906149ce906001906156fb565b9050818114614a335760008660000182815481106149ee576149ee61582d565b9060005260206000200154905080876000018481548110614a1157614a1161582d565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080614a4457614a446157fe565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050613bce565b6000915050613bce565b6000818152600183016020526040812054614ad057508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155613bce565b506000613bce565b828054828255906000526020600020908101928215614b52579160200282015b82811115614b5257825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190614af8565b50614865929150614bf0565b828054828255906000526020600020908101928215614b52579160200282015b82811115614b525781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190614b7e565b5080546000825590600052602060002090810190613bbe91905b5b808211156148655760008155600101614bf1565b803573ffffffffffffffffffffffffffffffffffffffff81168114614c2957600080fd5b919050565b60008083601f840112614c4057600080fd5b50813567ffffffffffffffff811115614c5857600080fd5b6020830191508360208260051b8501011115614c7357600080fd5b9250929050565b60008083601f840112614c8c57600080fd5b50813567ffffffffffffffff811115614ca457600080fd5b602083019150836020828501011115614c7357600080fd5b600060808284031215614cce57600080fd5b6040516080810181811067ffffffffffffffff82111715614cf157614cf161585c565b604052905080614d0083614d49565b8152614d0e60208401614c05565b6020820152614d1f60408401614d35565b6040820152606083013560608201525092915050565b803563ffffffff81168114614c2957600080fd5b803567ffffffffffffffff81168114614c2957600080fd5b803560ff81168114614c2957600080fd5b805169ffffffffffffffffffff81168114614c2957600080fd5b600060208284031215614d9e57600080fd5b6146a282614c05565b600080600060608486031215614dbc57600080fd5b614dc584614c05565b9250614dd360208501614c05565b9150614de160408501614c05565b90509250925092565b60008060008060608587031215614e0057600080fd5b614e0985614c05565b935060208501359250604085013567ffffffffffffffff811115614e2c57600080fd5b614e3887828801614c7a565b95989497509550505050565b60008060408385031215614e5757600080fd5b614e6083614c05565b91506020830135614e708161588b565b809150509250929050565b60008060208385031215614e8e57600080fd5b823567ffffffffffffffff811115614ea557600080fd5b614eb185828601614c2e565b90969095509350505050565b600060208284031215614ecf57600080fd5b815180151581146146a257600080fd5b6000806000806000806000806000806104c08b8d031215614eff57600080fd5b8a35995060208b013567ffffffffffffffff80821115614f1e57600080fd5b614f2a8e838f01614c7a565b909b50995060408d0135915080821115614f4357600080fd5b614f4f8e838f01614c7a565b9099509750879150614f6360608e01614c05565b96508d609f8e0112614f7457600080fd5b60405191506103e082018281108282111715614f9257614f9261585c565b604052508060808d016104608e018f811115614fad57600080fd5b60005b601f811015614fd757614fc283614c05565b84526020938401939290920191600101614fb0565b50839750614fe481614d61565b9650505050506104808b013591506104a08b013590509295989b9194979a5092959850565b600080600083850360a081121561501f57600080fd5b843567ffffffffffffffff81111561503657600080fd5b61504287828801614c7a565b90955093505060807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08201121561507857600080fd5b506020840190509250925092565b600080600060a0848603121561509b57600080fd5b833567ffffffffffffffff8111156150b257600080fd5b6150be86828701614c7a565b9094509250614de190508560208601614cbc565b6000608082840312156150e457600080fd5b6146a28383614cbc565b60006020828403121561510057600080fd5b5051919050565b60006020828403121561511957600080fd5b6146a282614d35565b6000806000806080858703121561513857600080fd5b61514185614d35565b93506020850135925060408501356151588161588b565b915060608501356151688161588b565b939692955090935050565b60008060008060008060c0878903121561518c57600080fd5b61519587614d35565b95506151a360208801614d35565b945060408701359350606087013592506151bf60808801614d35565b91506151cd60a08801614d35565b90509295509295509295565b6000602082840312156151eb57600080fd5b6146a282614d49565b6000806040838503121561520757600080fd5b61521083614d49565b915061521e60208401614c05565b90509250929050565b600080600080600060a0868803121561523f57600080fd5b61524886614d72565b945060208601519350604086015192506060860151915061526b60808701614d72565b90509295509295909350565b60006020828403121561528957600080fd5b81516146a28161588b565b600081518084526020808501945080840160005b838110156152da57815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016152a8565b509495945050505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b6040808252810183905260008460608301825b8681101561537c5773ffffffffffffffffffffffffffffffffffffffff61536784614c05565b16825260209283019290910190600101615341565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b6020815260006146a26020830184615294565b8581526060602082015260006153d36060830186886152e5565b82810360408401526153e68185876152e5565b98975050505050505050565b60a08152600061540660a0830185876152e5565b905067ffffffffffffffff61541a84614d49565b16602083015273ffffffffffffffffffffffffffffffffffffffff61544160208501614c05565b16604083015263ffffffff61545860408501614d35565b16606083015260608301356080830152949350505050565b60208101600383106154ab577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91905290565b60006101208201905067ffffffffffffffff835116825273ffffffffffffffffffffffffffffffffffffffff602084015116602083015260408301516154ff604084018263ffffffff169052565b50606083015160608301526080830151615531608084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060a083015161555160a08401826bffffffffffffffffffffffff169052565b5060c083015161557160c08401826bffffffffffffffffffffffff169052565b5060e083015161559160e08401826bffffffffffffffffffffffff169052565b5061010092830151919092015290565b63ffffffff831681526040602082015260006155c06040830184615294565b949350505050565b6bffffffffffffffffffffffff8416815273ffffffffffffffffffffffffffffffffffffffff8316602082015260606040820152600061560b6060830184615294565b95945050505050565b60008219821115615627576156276157a0565b500190565b600067ffffffffffffffff80831681851680830382111561564f5761564f6157a0565b01949350505050565b60006bffffffffffffffffffffffff80831681851680830382111561564f5761564f6157a0565b60008261568e5761568e6157cf565b500490565b60006bffffffffffffffffffffffff808416806156b2576156b26157cf565b92169190910492915050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156156f6576156f66157a0565b500290565b60008282101561570d5761570d6157a0565b500390565b60006bffffffffffffffffffffffff83811690831681811015615737576157376157a0565b039392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615771576157716157a0565b5060010190565b600067ffffffffffffffff80831681811415615796576157966157a0565b6001019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6bffffffffffffffffffffffff81168114613bbe57600080fdfea164736f6c6343000806000a496e697469616c697a61626c653a20636f6e7472616374206973206e6f742069", +} + +var OCR2DRRegistryABI = OCR2DRRegistryMetaData.ABI + +var OCR2DRRegistryBin = OCR2DRRegistryMetaData.Bin + +func DeployOCR2DRRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, linkEthFeed common.Address, oracle common.Address) (common.Address, *types.Transaction, *OCR2DRRegistry, error) { + parsed, err := OCR2DRRegistryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OCR2DRRegistryBin), backend, link, linkEthFeed, oracle) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OCR2DRRegistry{OCR2DRRegistryCaller: OCR2DRRegistryCaller{contract: contract}, OCR2DRRegistryTransactor: OCR2DRRegistryTransactor{contract: contract}, OCR2DRRegistryFilterer: OCR2DRRegistryFilterer{contract: contract}}, nil +} + +type OCR2DRRegistry struct { + address common.Address + abi abi.ABI + OCR2DRRegistryCaller + OCR2DRRegistryTransactor + OCR2DRRegistryFilterer +} + +type OCR2DRRegistryCaller struct { + contract *bind.BoundContract +} + +type OCR2DRRegistryTransactor struct { + contract *bind.BoundContract +} + +type OCR2DRRegistryFilterer struct { + contract *bind.BoundContract +} + +type OCR2DRRegistrySession struct { + Contract *OCR2DRRegistry + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OCR2DRRegistryCallerSession struct { + Contract *OCR2DRRegistryCaller + CallOpts bind.CallOpts +} + +type OCR2DRRegistryTransactorSession struct { + Contract *OCR2DRRegistryTransactor + TransactOpts bind.TransactOpts +} + +type OCR2DRRegistryRaw struct { + Contract *OCR2DRRegistry +} + +type OCR2DRRegistryCallerRaw struct { + Contract *OCR2DRRegistryCaller +} + +type OCR2DRRegistryTransactorRaw struct { + Contract *OCR2DRRegistryTransactor +} + +func NewOCR2DRRegistry(address common.Address, backend bind.ContractBackend) (*OCR2DRRegistry, error) { + abi, err := abi.JSON(strings.NewReader(OCR2DRRegistryABI)) + if err != nil { + return nil, err + } + contract, err := bindOCR2DRRegistry(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OCR2DRRegistry{address: address, abi: abi, OCR2DRRegistryCaller: OCR2DRRegistryCaller{contract: contract}, OCR2DRRegistryTransactor: OCR2DRRegistryTransactor{contract: contract}, OCR2DRRegistryFilterer: OCR2DRRegistryFilterer{contract: contract}}, nil +} + +func NewOCR2DRRegistryCaller(address common.Address, caller bind.ContractCaller) (*OCR2DRRegistryCaller, error) { + contract, err := bindOCR2DRRegistry(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OCR2DRRegistryCaller{contract: contract}, nil +} + +func NewOCR2DRRegistryTransactor(address common.Address, transactor bind.ContractTransactor) (*OCR2DRRegistryTransactor, error) { + contract, err := bindOCR2DRRegistry(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OCR2DRRegistryTransactor{contract: contract}, nil +} + +func NewOCR2DRRegistryFilterer(address common.Address, filterer bind.ContractFilterer) (*OCR2DRRegistryFilterer, error) { + contract, err := bindOCR2DRRegistry(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OCR2DRRegistryFilterer{contract: contract}, nil +} + +func bindOCR2DRRegistry(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OCR2DRRegistryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRRegistry.Contract.OCR2DRRegistryCaller.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OCR2DRRegistryTransactor.contract.Transfer(opts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OCR2DRRegistryTransactor.contract.Transact(opts, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OCR2DRRegistry.Contract.contract.Call(opts, result, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.contract.Transfer(opts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.contract.Transact(opts, method, params...) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) MAXCONSUMERS() (uint16, error) { + return _OCR2DRRegistry.Contract.MAXCONSUMERS(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) MAXCONSUMERS() (uint16, error) { + return _OCR2DRRegistry.Contract.MAXCONSUMERS(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) EstimateCost(opts *bind.CallOpts, gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "estimateCost", gasLimit, gasPrice, donFee, registryFee) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) EstimateCost(gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + return _OCR2DRRegistry.Contract.EstimateCost(&_OCR2DRRegistry.CallOpts, gasLimit, gasPrice, donFee, registryFee) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) EstimateCost(gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) { + return _OCR2DRRegistry.Contract.EstimateCost(&_OCR2DRRegistry.CallOpts, gasLimit, gasPrice, donFee, registryFee) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DRRegistry.Contract.GetAuthorizedSenders(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _OCR2DRRegistry.Contract.GetAuthorizedSenders(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MaxGasLimit = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.GasOverhead = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.LinkAddress = *abi.ConvertType(out[5], new(common.Address)).(*common.Address) + outstruct.LinkPriceFeed = *abi.ConvertType(out[6], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetConfig() (GetConfig, + + error) { + return _OCR2DRRegistry.Contract.GetConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetConfig() (GetConfig, + + error) { + return _OCR2DRRegistry.Contract.GetConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetCurrentsubscriptionId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getCurrentsubscriptionId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetCurrentsubscriptionId() (uint64, error) { + return _OCR2DRRegistry.Contract.GetCurrentsubscriptionId(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetCurrentsubscriptionId() (uint64, error) { + return _OCR2DRRegistry.Contract.GetCurrentsubscriptionId(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetRequestConfig(opts *bind.CallOpts) (uint32, []common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getRequestConfig") + + if err != nil { + return *new(uint32), *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + out1 := *abi.ConvertType(out[1], new([]common.Address)).(*[]common.Address) + + return out0, out1, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetRequestConfig() (uint32, []common.Address, error) { + return _OCR2DRRegistry.Contract.GetRequestConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetRequestConfig() (uint32, []common.Address, error) { + return _OCR2DRRegistry.Contract.GetRequestConfig(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getRequiredFee", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetRequiredFee(&_OCR2DRRegistry.CallOpts, arg0, arg1) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetRequiredFee(arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetRequiredFee(&_OCR2DRRegistry.CallOpts, arg0, arg1) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (GetSubscription, + + error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getSubscription", subscriptionId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Owner = *abi.ConvertType(out[1], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[2], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetSubscription(subscriptionId uint64) (GetSubscription, + + error) { + return _OCR2DRRegistry.Contract.GetSubscription(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetSubscription(subscriptionId uint64) (GetSubscription, + + error) { + return _OCR2DRRegistry.Contract.GetSubscription(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetSubscriptionOwner(opts *bind.CallOpts, subscriptionId uint64) (common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getSubscriptionOwner", subscriptionId) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetSubscriptionOwner(subscriptionId uint64) (common.Address, error) { + return _OCR2DRRegistry.Contract.GetSubscriptionOwner(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetSubscriptionOwner(subscriptionId uint64) (common.Address, error) { + return _OCR2DRRegistry.Contract.GetSubscriptionOwner(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "getTotalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) GetTotalBalance() (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetTotalBalance(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) GetTotalBalance() (*big.Int, error) { + return _OCR2DRRegistry.Contract.GetTotalBalance(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DRRegistry.Contract.IsAuthorizedSender(&_OCR2DRRegistry.CallOpts, sender) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _OCR2DRRegistry.Contract.IsAuthorizedSender(&_OCR2DRRegistry.CallOpts, sender) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Owner() (common.Address, error) { + return _OCR2DRRegistry.Contract.Owner(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) Owner() (common.Address, error) { + return _OCR2DRRegistry.Contract.Owner(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Paused() (bool, error) { + return _OCR2DRRegistry.Contract.Paused(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) Paused() (bool, error) { + return _OCR2DRRegistry.Contract.Paused(&_OCR2DRRegistry.CallOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCaller) PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) { + var out []interface{} + err := _OCR2DRRegistry.contract.Call(opts, &out, "pendingRequestExists", subscriptionId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _OCR2DRRegistry.Contract.PendingRequestExists(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryCallerSession) PendingRequestExists(subscriptionId uint64) (bool, error) { + return _OCR2DRRegistry.Contract.PendingRequestExists(&_OCR2DRRegistry.CallOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "acceptOwnership") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptOwnership(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptOwnership(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AcceptSubscriptionOwnerTransfer(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AcceptSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "addConsumer", subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AddConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) AddConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.AddConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "cancelSubscription", subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) CancelSubscription(subscriptionId uint64, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "createSubscription") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) CreateSubscription() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CreateSubscription(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.CreateSubscription(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) FulfillAndBill(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "fulfillAndBill", requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) FulfillAndBill(requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.FulfillAndBill(&_OCR2DRRegistry.TransactOpts, requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) FulfillAndBill(requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.FulfillAndBill(&_OCR2DRRegistry.TransactOpts, requestId, response, err, transmitter, signers, signerCount, reportValidationGas, initialGas) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Initialize(opts *bind.TransactOpts, link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "initialize", link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Initialize(link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Initialize(&_OCR2DRRegistry.TransactOpts, link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Initialize(link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Initialize(&_OCR2DRRegistry.TransactOpts, link, linkEthFeed, oracle) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OnTokenTransfer(&_OCR2DRRegistry.TransactOpts, arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OnTokenTransfer(&_OCR2DRRegistry.TransactOpts, arg0, amount, data) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OracleWithdraw(&_OCR2DRRegistry.TransactOpts, recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OracleWithdraw(&_OCR2DRRegistry.TransactOpts, recipient, amount) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "ownerCancelSubscription", subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OwnerCancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) OwnerCancelSubscription(subscriptionId uint64) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.OwnerCancelSubscription(&_OCR2DRRegistry.TransactOpts, subscriptionId) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Pause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "pause") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Pause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Pause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Pause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Pause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "recoverFunds", to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RecoverFunds(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RecoverFunds(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "removeConsumer", subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RemoveConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RemoveConsumer(subscriptionId uint64, consumer common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RemoveConsumer(&_OCR2DRRegistry.TransactOpts, subscriptionId, consumer) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) RequestSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RequestSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) RequestSubscriptionOwnerTransfer(subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.RequestSubscriptionOwnerTransfer(&_OCR2DRRegistry.TransactOpts, subscriptionId, newOwner) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetAuthorizedSenders(&_OCR2DRRegistry.TransactOpts, senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetAuthorizedSenders(&_OCR2DRRegistry.TransactOpts, senders) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) SetConfig(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "setConfig", maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) SetConfig(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetConfig(&_OCR2DRRegistry.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) SetConfig(maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.SetConfig(&_OCR2DRRegistry.TransactOpts, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, gasOverhead, requestTimeoutSeconds) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) StartBilling(opts *bind.TransactOpts, data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "startBilling", data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) StartBilling(data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.StartBilling(&_OCR2DRRegistry.TransactOpts, data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) StartBilling(data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.StartBilling(&_OCR2DRRegistry.TransactOpts, data, billing) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) TimeoutRequests(opts *bind.TransactOpts, requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "timeoutRequests", requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) TimeoutRequests(requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TimeoutRequests(&_OCR2DRRegistry.TransactOpts, requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) TimeoutRequests(requestIdsToTimeout [][32]byte) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TimeoutRequests(&_OCR2DRRegistry.TransactOpts, requestIdsToTimeout) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "transferOwnership", to) +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TransferOwnership(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.TransferOwnership(&_OCR2DRRegistry.TransactOpts, to) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactor) Unpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OCR2DRRegistry.contract.Transact(opts, "unpause") +} + +func (_OCR2DRRegistry *OCR2DRRegistrySession) Unpause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Unpause(&_OCR2DRRegistry.TransactOpts) +} + +func (_OCR2DRRegistry *OCR2DRRegistryTransactorSession) Unpause() (*types.Transaction, error) { + return _OCR2DRRegistry.Contract.Unpause(&_OCR2DRRegistry.TransactOpts) +} + +type OCR2DRRegistryAuthorizedSendersChangedIterator struct { + Event *OCR2DRRegistryAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DRRegistryAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &OCR2DRRegistryAuthorizedSendersChangedIterator{contract: _OCR2DRRegistry.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryAuthorizedSendersChanged) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseAuthorizedSendersChanged(log types.Log) (*OCR2DRRegistryAuthorizedSendersChanged, error) { + event := new(OCR2DRRegistryAuthorizedSendersChanged) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryBillingEndIterator struct { + Event *OCR2DRRegistryBillingEnd + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryBillingEndIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingEnd) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryBillingEndIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryBillingEndIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryBillingEnd struct { + RequestId [32]byte + SubscriptionId uint64 + SignerPayment *big.Int + TransmitterPayment *big.Int + TotalCost *big.Int + Success bool + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingEndIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryBillingEndIterator{contract: _OCR2DRRegistry.contract, event: "BillingEnd", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingEnd, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "BillingEnd", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryBillingEnd) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseBillingEnd(log types.Log) (*OCR2DRRegistryBillingEnd, error) { + event := new(OCR2DRRegistryBillingEnd) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingEnd", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryBillingStartIterator struct { + Event *OCR2DRRegistryBillingStart + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryBillingStartIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryBillingStart) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryBillingStartIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryBillingStartIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryBillingStart struct { + RequestId [32]byte + Commitment FunctionsBillingRegistryCommitment + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingStartIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryBillingStartIterator{contract: _OCR2DRRegistry.contract, event: "BillingStart", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchBillingStart(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingStart, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "BillingStart", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryBillingStart) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingStart", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseBillingStart(log types.Log) (*OCR2DRRegistryBillingStart, error) { + event := new(OCR2DRRegistryBillingStart) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "BillingStart", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryConfigSetIterator struct { + Event *OCR2DRRegistryConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryConfigSetIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryConfigSet struct { + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation *big.Int + FallbackWeiPerUnitLink *big.Int + GasOverhead uint32 + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OCR2DRRegistryConfigSetIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OCR2DRRegistryConfigSetIterator{contract: _OCR2DRRegistry.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryConfigSet) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryConfigSet) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseConfigSet(log types.Log) (*OCR2DRRegistryConfigSet, error) { + event := new(OCR2DRRegistryConfigSet) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryFundsRecoveredIterator struct { + Event *OCR2DRRegistryFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*OCR2DRRegistryFundsRecoveredIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &OCR2DRRegistryFundsRecoveredIterator{contract: _OCR2DRRegistry.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryFundsRecovered) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseFundsRecovered(log types.Log) (*OCR2DRRegistryFundsRecovered, error) { + event := new(OCR2DRRegistryFundsRecovered) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryInitializedIterator struct { + Event *OCR2DRRegistryInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryInitializedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryInitialized struct { + Version uint8 + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterInitialized(opts *bind.FilterOpts) (*OCR2DRRegistryInitializedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &OCR2DRRegistryInitializedIterator{contract: _OCR2DRRegistry.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryInitialized) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryInitialized) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseInitialized(log types.Log) (*OCR2DRRegistryInitialized, error) { + event := new(OCR2DRRegistryInitialized) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryOwnershipTransferRequestedIterator struct { + Event *OCR2DRRegistryOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryOwnershipTransferRequestedIterator{contract: _OCR2DRRegistry.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryOwnershipTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseOwnershipTransferRequested(log types.Log) (*OCR2DRRegistryOwnershipTransferRequested, error) { + event := new(OCR2DRRegistryOwnershipTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryOwnershipTransferredIterator struct { + Event *OCR2DRRegistryOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryOwnershipTransferredIterator{contract: _OCR2DRRegistry.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryOwnershipTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseOwnershipTransferred(log types.Log) (*OCR2DRRegistryOwnershipTransferred, error) { + event := new(OCR2DRRegistryOwnershipTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryPausedIterator struct { + Event *OCR2DRRegistryPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryPausedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryPaused struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterPaused(opts *bind.FilterOpts) (*OCR2DRRegistryPausedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &OCR2DRRegistryPausedIterator{contract: _OCR2DRRegistry.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryPaused) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryPaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParsePaused(log types.Log) (*OCR2DRRegistryPaused, error) { + event := new(OCR2DRRegistryPaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryRequestTimedOutIterator struct { + Event *OCR2DRRegistryRequestTimedOut + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryRequestTimedOut) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryRequestTimedOutIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryRequestTimedOut struct { + RequestId [32]byte + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryRequestTimedOutIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistryRequestTimedOutIterator{contract: _OCR2DRRegistry.contract, event: "RequestTimedOut", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryRequestTimedOut, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "RequestTimedOut", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryRequestTimedOut) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseRequestTimedOut(log types.Log) (*OCR2DRRegistryRequestTimedOut, error) { + event := new(OCR2DRRegistryRequestTimedOut) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "RequestTimedOut", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionCanceledIterator struct { + Event *OCR2DRRegistrySubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionCanceled struct { + SubscriptionId uint64 + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCanceledIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionCanceledIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionCanceled", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionCanceled) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionCanceled(log types.Log) (*OCR2DRRegistrySubscriptionCanceled, error) { + event := new(OCR2DRRegistrySubscriptionCanceled) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionConsumerAddedIterator struct { + Event *OCR2DRRegistrySubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionConsumerAdded struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerAddedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionConsumerAddedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*OCR2DRRegistrySubscriptionConsumerAdded, error) { + event := new(OCR2DRRegistrySubscriptionConsumerAdded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionConsumerRemovedIterator struct { + Event *OCR2DRRegistrySubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionConsumerRemoved struct { + SubscriptionId uint64 + Consumer common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerRemovedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionConsumerRemovedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*OCR2DRRegistrySubscriptionConsumerRemoved, error) { + event := new(OCR2DRRegistrySubscriptionConsumerRemoved) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionCreatedIterator struct { + Event *OCR2DRRegistrySubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionCreated struct { + SubscriptionId uint64 + Owner common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCreatedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionCreatedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionCreated", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionCreated) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionCreated(log types.Log) (*OCR2DRRegistrySubscriptionCreated, error) { + event := new(OCR2DRRegistrySubscriptionCreated) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionFundedIterator struct { + Event *OCR2DRRegistrySubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionFunded struct { + SubscriptionId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionFundedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionFundedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionFunded", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionFunded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionFunded(log types.Log) (*OCR2DRRegistrySubscriptionFunded, error) { + event := new(OCR2DRRegistrySubscriptionFunded) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator struct { + Event *OCR2DRRegistrySubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferRequested struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferRequested, error) { + event := new(OCR2DRRegistrySubscriptionOwnerTransferRequested) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferredIterator struct { + Event *OCR2DRRegistrySubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistrySubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistrySubscriptionOwnerTransferred struct { + SubscriptionId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferredIterator, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return &OCR2DRRegistrySubscriptionOwnerTransferredIterator{contract: _OCR2DRRegistry.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) { + + var subscriptionIdRule []interface{} + for _, subscriptionIdItem := range subscriptionId { + subscriptionIdRule = append(subscriptionIdRule, subscriptionIdItem) + } + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subscriptionIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferred, error) { + event := new(OCR2DRRegistrySubscriptionOwnerTransferred) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OCR2DRRegistryUnpausedIterator struct { + Event *OCR2DRRegistryUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OCR2DRRegistryUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OCR2DRRegistryUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OCR2DRRegistryUnpausedIterator) Error() error { + return it.fail +} + +func (it *OCR2DRRegistryUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OCR2DRRegistryUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) FilterUnpaused(opts *bind.FilterOpts) (*OCR2DRRegistryUnpausedIterator, error) { + + logs, sub, err := _OCR2DRRegistry.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &OCR2DRRegistryUnpausedIterator{contract: _OCR2DRRegistry.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryUnpaused) (event.Subscription, error) { + + logs, sub, err := _OCR2DRRegistry.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OCR2DRRegistryUnpaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OCR2DRRegistry *OCR2DRRegistryFilterer) ParseUnpaused(log types.Log) (*OCR2DRRegistryUnpaused, error) { + event := new(OCR2DRRegistryUnpaused) + if err := _OCR2DRRegistry.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation *big.Int + FallbackWeiPerUnitLink *big.Int + GasOverhead uint32 + LinkAddress common.Address + LinkPriceFeed common.Address +} +type GetSubscription struct { + Balance *big.Int + Owner common.Address + Consumers []common.Address +} + +func (_OCR2DRRegistry *OCR2DRRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OCR2DRRegistry.abi.Events["AuthorizedSendersChanged"].ID: + return _OCR2DRRegistry.ParseAuthorizedSendersChanged(log) + case _OCR2DRRegistry.abi.Events["BillingEnd"].ID: + return _OCR2DRRegistry.ParseBillingEnd(log) + case _OCR2DRRegistry.abi.Events["BillingStart"].ID: + return _OCR2DRRegistry.ParseBillingStart(log) + case _OCR2DRRegistry.abi.Events["ConfigSet"].ID: + return _OCR2DRRegistry.ParseConfigSet(log) + case _OCR2DRRegistry.abi.Events["FundsRecovered"].ID: + return _OCR2DRRegistry.ParseFundsRecovered(log) + case _OCR2DRRegistry.abi.Events["Initialized"].ID: + return _OCR2DRRegistry.ParseInitialized(log) + case _OCR2DRRegistry.abi.Events["OwnershipTransferRequested"].ID: + return _OCR2DRRegistry.ParseOwnershipTransferRequested(log) + case _OCR2DRRegistry.abi.Events["OwnershipTransferred"].ID: + return _OCR2DRRegistry.ParseOwnershipTransferred(log) + case _OCR2DRRegistry.abi.Events["Paused"].ID: + return _OCR2DRRegistry.ParsePaused(log) + case _OCR2DRRegistry.abi.Events["RequestTimedOut"].ID: + return _OCR2DRRegistry.ParseRequestTimedOut(log) + case _OCR2DRRegistry.abi.Events["SubscriptionCanceled"].ID: + return _OCR2DRRegistry.ParseSubscriptionCanceled(log) + case _OCR2DRRegistry.abi.Events["SubscriptionConsumerAdded"].ID: + return _OCR2DRRegistry.ParseSubscriptionConsumerAdded(log) + case _OCR2DRRegistry.abi.Events["SubscriptionConsumerRemoved"].ID: + return _OCR2DRRegistry.ParseSubscriptionConsumerRemoved(log) + case _OCR2DRRegistry.abi.Events["SubscriptionCreated"].ID: + return _OCR2DRRegistry.ParseSubscriptionCreated(log) + case _OCR2DRRegistry.abi.Events["SubscriptionFunded"].ID: + return _OCR2DRRegistry.ParseSubscriptionFunded(log) + case _OCR2DRRegistry.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _OCR2DRRegistry.ParseSubscriptionOwnerTransferRequested(log) + case _OCR2DRRegistry.abi.Events["SubscriptionOwnerTransferred"].ID: + return _OCR2DRRegistry.ParseSubscriptionOwnerTransferred(log) + case _OCR2DRRegistry.abi.Events["Unpaused"].ID: + return _OCR2DRRegistry.ParseUnpaused(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OCR2DRRegistryAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (OCR2DRRegistryBillingEnd) Topic() common.Hash { + return common.HexToHash("0xc8dc973332de19a5f71b6026983110e9c2e04b0c98b87eb771ccb78607fd114f") +} + +func (OCR2DRRegistryBillingStart) Topic() common.Hash { + return common.HexToHash("0x99f7f4e65b4b9fbabd4e357c47ed3099b36e57ecd3a43e84662f34c207d0ebe4") +} + +func (OCR2DRRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0x24d3d934adfef9b9029d6ffa463c07d0139ed47d26ee23506f85ece2879d2bd4") +} + +func (OCR2DRRegistryFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (OCR2DRRegistryInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (OCR2DRRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OCR2DRRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OCR2DRRegistryPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (OCR2DRRegistryRequestTimedOut) Topic() common.Hash { + return common.HexToHash("0xf1ca1e9147be737b04a2b018a79405f687a97de8dd8a2559bbe62357343af414") +} + +func (OCR2DRRegistrySubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (OCR2DRRegistrySubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (OCR2DRRegistrySubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (OCR2DRRegistrySubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (OCR2DRRegistrySubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (OCR2DRRegistrySubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (OCR2DRRegistrySubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (OCR2DRRegistryUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (_OCR2DRRegistry *OCR2DRRegistry) Address() common.Address { + return _OCR2DRRegistry.address +} + +type OCR2DRRegistryInterface interface { + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + EstimateCost(opts *bind.CallOpts, gasLimit uint32, gasPrice *big.Int, donFee *big.Int, registryFee *big.Int) (*big.Int, error) + + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetCurrentsubscriptionId(opts *bind.CallOpts) (uint64, error) + + GetRequestConfig(opts *bind.CallOpts) (uint32, []common.Address, error) + + GetRequiredFee(opts *bind.CallOpts, arg0 []byte, arg1 IFunctionsBillingRegistryRequestBilling) (*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subscriptionId uint64) (GetSubscription, + + error) + + GetSubscriptionOwner(opts *bind.CallOpts, subscriptionId uint64) (common.Address, error) + + GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Paused(opts *bind.CallOpts) (bool, error) + + PendingRequestExists(opts *bind.CallOpts, subscriptionId uint64) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subscriptionId uint64, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + FulfillAndBill(opts *bind.TransactOpts, requestId [32]byte, response []byte, err []byte, transmitter common.Address, signers [31]common.Address, signerCount uint8, reportValidationGas *big.Int, initialGas *big.Int) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts, link common.Address, linkEthFeed common.Address, oracle common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subscriptionId uint64) (*types.Transaction, error) + + Pause(opts *bind.TransactOpts) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subscriptionId uint64, consumer common.Address) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subscriptionId uint64, newOwner common.Address) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation *big.Int, fallbackWeiPerUnitLink *big.Int, gasOverhead uint32, requestTimeoutSeconds uint32) (*types.Transaction, error) + + StartBilling(opts *bind.TransactOpts, data []byte, billing IFunctionsBillingRegistryRequestBilling) (*types.Transaction, error) + + TimeoutRequests(opts *bind.TransactOpts, requestIdsToTimeout [][32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unpause(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OCR2DRRegistryAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*OCR2DRRegistryAuthorizedSendersChanged, error) + + FilterBillingEnd(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingEndIterator, error) + + WatchBillingEnd(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingEnd, requestId [][32]byte) (event.Subscription, error) + + ParseBillingEnd(log types.Log) (*OCR2DRRegistryBillingEnd, error) + + FilterBillingStart(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryBillingStartIterator, error) + + WatchBillingStart(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryBillingStart, requestId [][32]byte) (event.Subscription, error) + + ParseBillingStart(log types.Log) (*OCR2DRRegistryBillingStart, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OCR2DRRegistryConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OCR2DRRegistryConfigSet, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*OCR2DRRegistryFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*OCR2DRRegistryFundsRecovered, error) + + FilterInitialized(opts *bind.FilterOpts) (*OCR2DRRegistryInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*OCR2DRRegistryInitialized, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OCR2DRRegistryOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OCR2DRRegistryOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OCR2DRRegistryOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*OCR2DRRegistryPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*OCR2DRRegistryPaused, error) + + FilterRequestTimedOut(opts *bind.FilterOpts, requestId [][32]byte) (*OCR2DRRegistryRequestTimedOutIterator, error) + + WatchRequestTimedOut(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryRequestTimedOut, requestId [][32]byte) (event.Subscription, error) + + ParseRequestTimedOut(log types.Log) (*OCR2DRRegistryRequestTimedOut, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCanceled, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*OCR2DRRegistrySubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerAdded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*OCR2DRRegistrySubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionConsumerRemoved, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*OCR2DRRegistrySubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionCreated, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*OCR2DRRegistrySubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionFunded, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*OCR2DRRegistrySubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferRequested, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subscriptionId []uint64) (*OCR2DRRegistrySubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistrySubscriptionOwnerTransferred, subscriptionId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*OCR2DRRegistrySubscriptionOwnerTransferred, error) + + FilterUnpaused(opts *bind.FilterOpts) (*OCR2DRRegistryUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *OCR2DRRegistryUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*OCR2DRRegistryUnpaused, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/offchain_aggregator_wrapper/offchain_aggregator_wrapper.go b/core/gethwrappers/generated/offchain_aggregator_wrapper/offchain_aggregator_wrapper.go new file mode 100644 index 00000000..2d7b6489 --- /dev/null +++ b/core/gethwrappers/generated/offchain_aggregator_wrapper/offchain_aggregator_wrapper.go @@ -0,0 +1,3041 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package offchain_aggregator_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OffchainAggregatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerTransmission\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_validator\",\"type\":\"address\"},{\"internalType\":\"int192\",\"name\":\"_minAnswer\",\"type\":\"int192\"},{\"internalType\":\"int192\",\"name\":\"_maxAnswer\",\"type\":\"int192\"},{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_billingAccessController\",\"type\":\"address\"},{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_requesterAccessController\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"_decimals\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"_description\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"BillingAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"BillingSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"threshold\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"encodedConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encoded\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"aggregatorRoundId\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"answer\",\"type\":\"int192\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"int192[]\",\"name\":\"observations\",\"type\":\"int192[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"observers\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"rawReportContext\",\"type\":\"bytes32\"}],\"name\":\"NewTransmission\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"OraclePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"RequesterAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes16\",\"name\":\"configDigest\",\"type\":\"bytes16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"}],\"name\":\"RoundRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"ValidatorUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"billingAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBilling\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes16\",\"name\":\"configDigest\",\"type\":\"bytes16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRound\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTransmissionDetails\",\"outputs\":[{\"internalType\":\"bytes16\",\"name\":\"configDigest\",\"type\":\"bytes16\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"},{\"internalType\":\"int192\",\"name\":\"latestAnswer\",\"type\":\"int192\"},{\"internalType\":\"uint64\",\"name\":\"latestTimestamp\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkAvailableForPayment\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"availableBalance\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxAnswer\",\"outputs\":[{\"internalType\":\"int192\",\"name\":\"\",\"type\":\"int192\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minAnswer\",\"outputs\":[{\"internalType\":\"int192\",\"name\":\"\",\"type\":\"int192\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_signerOrTransmitter\",\"type\":\"address\"}],\"name\":\"oracleObservationCount\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"owedPayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"addresspayable\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestNewRound\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requesterAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"setBilling\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_billingAccessController\",\"type\":\"address\"}],\"name\":\"setBillingAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_threshold\",\"type\":\"uint8\"},{\"internalType\":\"uint64\",\"name\":\"_encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_encoded\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_requesterAccessController\",\"type\":\"address\"}],\"name\":\"setRequesterAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newValidator\",\"type\":\"address\"}],\"name\":\"setValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"_rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"_ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"_rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"validator\",\"outputs\":[{\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +var OffchainAggregatorABI = OffchainAggregatorMetaData.ABI + +type OffchainAggregator struct { + address common.Address + abi abi.ABI + OffchainAggregatorCaller + OffchainAggregatorTransactor + OffchainAggregatorFilterer +} + +type OffchainAggregatorCaller struct { + contract *bind.BoundContract +} + +type OffchainAggregatorTransactor struct { + contract *bind.BoundContract +} + +type OffchainAggregatorFilterer struct { + contract *bind.BoundContract +} + +type OffchainAggregatorSession struct { + Contract *OffchainAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorCallerSession struct { + Contract *OffchainAggregatorCaller + CallOpts bind.CallOpts +} + +type OffchainAggregatorTransactorSession struct { + Contract *OffchainAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorRaw struct { + Contract *OffchainAggregator +} + +type OffchainAggregatorCallerRaw struct { + Contract *OffchainAggregatorCaller +} + +type OffchainAggregatorTransactorRaw struct { + Contract *OffchainAggregatorTransactor +} + +func NewOffchainAggregator(address common.Address, backend bind.ContractBackend) (*OffchainAggregator, error) { + abi, err := abi.JSON(strings.NewReader(OffchainAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindOffchainAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OffchainAggregator{address: address, abi: abi, OffchainAggregatorCaller: OffchainAggregatorCaller{contract: contract}, OffchainAggregatorTransactor: OffchainAggregatorTransactor{contract: contract}, OffchainAggregatorFilterer: OffchainAggregatorFilterer{contract: contract}}, nil +} + +func NewOffchainAggregatorCaller(address common.Address, caller bind.ContractCaller) (*OffchainAggregatorCaller, error) { + contract, err := bindOffchainAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorCaller{contract: contract}, nil +} + +func NewOffchainAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*OffchainAggregatorTransactor, error) { + contract, err := bindOffchainAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorTransactor{contract: contract}, nil +} + +func NewOffchainAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*OffchainAggregatorFilterer, error) { + contract, err := bindOffchainAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OffchainAggregatorFilterer{contract: contract}, nil +} + +func bindOffchainAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OffchainAggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregator.Contract.OffchainAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.Contract.OffchainAggregatorTransactor.contract.Transfer(opts) +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregator.Contract.OffchainAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.Contract.contract.Transfer(opts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) PLI() (common.Address, error) { + return _OffchainAggregator.Contract.PLI(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) PLI() (common.Address, error) { + return _OffchainAggregator.Contract.PLI(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) BillingAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "billingAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) BillingAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.BillingAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) BillingAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.BillingAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Decimals() (uint8, error) { + return _OffchainAggregator.Contract.Decimals(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Decimals() (uint8, error) { + return _OffchainAggregator.Contract.Decimals(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Description() (string, error) { + return _OffchainAggregator.Contract.Description(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Description() (string, error) { + return _OffchainAggregator.Contract.Description(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getAnswer", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetAnswer(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetAnswer(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetBilling(opts *bind.CallOpts) (GetBilling, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getBilling") + + outstruct := new(GetBilling) + if err != nil { + return *outstruct, err + } + + outstruct.MaximumGasPrice = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.ReasonableGasPrice = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.MicroLinkPerEth = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.LinkGweiPerObservation = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.LinkGweiPerTransmission = *abi.ConvertType(out[4], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetBilling() (GetBilling, + + error) { + return _OffchainAggregator.Contract.GetBilling(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetBilling() (GetBilling, + + error) { + return _OffchainAggregator.Contract.GetBilling(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _OffchainAggregator.Contract.GetRoundData(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _OffchainAggregator.Contract.GetRoundData(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getTimestamp", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetTimestamp(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetTimestamp(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([16]byte)).(*[16]byte) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OffchainAggregator.Contract.LatestConfigDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OffchainAggregator.Contract.LatestConfigDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestRound") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestRound() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestRound(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestRound() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestRound(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _OffchainAggregator.Contract.LatestRoundData(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _OffchainAggregator.Contract.LatestRoundData(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestTimestamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestTimestamp() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestTimestamp(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestTimestamp() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestTimestamp(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestTransmissionDetails(opts *bind.CallOpts) (LatestTransmissionDetails, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestTransmissionDetails") + + outstruct := new(LatestTransmissionDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigDigest = *abi.ConvertType(out[0], new([16]byte)).(*[16]byte) + outstruct.Epoch = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.Round = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.LatestAnswer = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LatestTimestamp = *abi.ConvertType(out[4], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestTransmissionDetails() (LatestTransmissionDetails, + + error) { + return _OffchainAggregator.Contract.LatestTransmissionDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestTransmissionDetails() (LatestTransmissionDetails, + + error) { + return _OffchainAggregator.Contract.LatestTransmissionDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "linkAvailableForPayment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LinkAvailableForPayment() (*big.Int, error) { + return _OffchainAggregator.Contract.LinkAvailableForPayment(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LinkAvailableForPayment() (*big.Int, error) { + return _OffchainAggregator.Contract.LinkAvailableForPayment(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) MaxAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "maxAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) MaxAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MaxAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) MaxAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MaxAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) MinAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "minAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) MinAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MinAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) MinAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MinAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) OracleObservationCount(opts *bind.CallOpts, _signerOrTransmitter common.Address) (uint16, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "oracleObservationCount", _signerOrTransmitter) + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) OracleObservationCount(_signerOrTransmitter common.Address) (uint16, error) { + return _OffchainAggregator.Contract.OracleObservationCount(&_OffchainAggregator.CallOpts, _signerOrTransmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) OracleObservationCount(_signerOrTransmitter common.Address) (uint16, error) { + return _OffchainAggregator.Contract.OracleObservationCount(&_OffchainAggregator.CallOpts, _signerOrTransmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) OwedPayment(opts *bind.CallOpts, _transmitter common.Address) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "owedPayment", _transmitter) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) OwedPayment(_transmitter common.Address) (*big.Int, error) { + return _OffchainAggregator.Contract.OwedPayment(&_OffchainAggregator.CallOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) OwedPayment(_transmitter common.Address) (*big.Int, error) { + return _OffchainAggregator.Contract.OwedPayment(&_OffchainAggregator.CallOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Owner() (common.Address, error) { + return _OffchainAggregator.Contract.Owner(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Owner() (common.Address, error) { + return _OffchainAggregator.Contract.Owner(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) RequesterAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "requesterAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) RequesterAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.RequesterAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) RequesterAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.RequesterAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Transmitters() ([]common.Address, error) { + return _OffchainAggregator.Contract.Transmitters(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Transmitters() ([]common.Address, error) { + return _OffchainAggregator.Contract.Transmitters(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Validator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "validator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Validator() (common.Address, error) { + return _OffchainAggregator.Contract.Validator(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Validator() (common.Address, error) { + return _OffchainAggregator.Contract.Validator(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Version() (*big.Int, error) { + return _OffchainAggregator.Contract.Version(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Version() (*big.Int, error) { + return _OffchainAggregator.Contract.Version(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "acceptOwnership") +} + +func (_OffchainAggregator *OffchainAggregatorSession) AcceptOwnership() (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptOwnership(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptOwnership(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) AcceptPayeeship(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "acceptPayeeship", _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorSession) AcceptPayeeship(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptPayeeship(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) AcceptPayeeship(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptPayeeship(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "requestNewRound") +} + +func (_OffchainAggregator *OffchainAggregatorSession) RequestNewRound() (*types.Transaction, error) { + return _OffchainAggregator.Contract.RequestNewRound(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) RequestNewRound() (*types.Transaction, error) { + return _OffchainAggregator.Contract.RequestNewRound(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetBilling(opts *bind.TransactOpts, _maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setBilling", _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetBilling(_maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBilling(&_OffchainAggregator.TransactOpts, _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetBilling(_maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBilling(&_OffchainAggregator.TransactOpts, _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setBillingAccessController", _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBillingAccessController(&_OffchainAggregator.TransactOpts, _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBillingAccessController(&_OffchainAggregator.TransactOpts, _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _threshold uint8, _encodedConfigVersion uint64, _encoded []byte) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setConfig", _signers, _transmitters, _threshold, _encodedConfigVersion, _encoded) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _threshold uint8, _encodedConfigVersion uint64, _encoded []byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetConfig(&_OffchainAggregator.TransactOpts, _signers, _transmitters, _threshold, _encodedConfigVersion, _encoded) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _threshold uint8, _encodedConfigVersion uint64, _encoded []byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetConfig(&_OffchainAggregator.TransactOpts, _signers, _transmitters, _threshold, _encodedConfigVersion, _encoded) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetPayees(opts *bind.TransactOpts, _transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setPayees", _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetPayees(_transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetPayees(&_OffchainAggregator.TransactOpts, _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetPayees(_transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetPayees(&_OffchainAggregator.TransactOpts, _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetRequesterAccessController(opts *bind.TransactOpts, _requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setRequesterAccessController", _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetRequesterAccessController(_requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetRequesterAccessController(&_OffchainAggregator.TransactOpts, _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetRequesterAccessController(_requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetRequesterAccessController(&_OffchainAggregator.TransactOpts, _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetValidator(opts *bind.TransactOpts, _newValidator common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setValidator", _newValidator) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetValidator(_newValidator common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetValidator(&_OffchainAggregator.TransactOpts, _newValidator) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetValidator(_newValidator common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetValidator(&_OffchainAggregator.TransactOpts, _newValidator) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transferOwnership", _to) +} + +func (_OffchainAggregator *OffchainAggregatorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferOwnership(&_OffchainAggregator.TransactOpts, _to) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferOwnership(&_OffchainAggregator.TransactOpts, _to) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) TransferPayeeship(opts *bind.TransactOpts, _transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transferPayeeship", _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorSession) TransferPayeeship(_transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferPayeeship(&_OffchainAggregator.TransactOpts, _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) TransferPayeeship(_transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferPayeeship(&_OffchainAggregator.TransactOpts, _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) Transmit(opts *bind.TransactOpts, _report []byte, _rs [][32]byte, _ss [][32]byte, _rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transmit", _report, _rs, _ss, _rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorSession) Transmit(_report []byte, _rs [][32]byte, _ss [][32]byte, _rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.Transmit(&_OffchainAggregator.TransactOpts, _report, _rs, _ss, _rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) Transmit(_report []byte, _rs [][32]byte, _ss [][32]byte, _rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.Transmit(&_OffchainAggregator.TransactOpts, _report, _rs, _ss, _rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "withdrawFunds", _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawFunds(&_OffchainAggregator.TransactOpts, _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawFunds(&_OffchainAggregator.TransactOpts, _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) WithdrawPayment(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "withdrawPayment", _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorSession) WithdrawPayment(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawPayment(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) WithdrawPayment(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawPayment(&_OffchainAggregator.TransactOpts, _transmitter) +} + +type OffchainAggregatorAnswerUpdatedIterator struct { + Event *OffchainAggregatorAnswerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorAnswerUpdatedIterator{contract: _OffchainAggregator.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorAnswerUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseAnswerUpdated(log types.Log) (*OffchainAggregatorAnswerUpdated, error) { + event := new(OffchainAggregatorAnswerUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorBillingAccessControllerSetIterator struct { + Event *OffchainAggregatorBillingAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorBillingAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorBillingAccessControllerSetIterator{contract: _OffchainAggregator.contract, event: "BillingAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorBillingAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorBillingAccessControllerSet, error) { + event := new(OffchainAggregatorBillingAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorBillingSetIterator struct { + Event *OffchainAggregatorBillingSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorBillingSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorBillingSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorBillingSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorBillingSet struct { + MaximumGasPrice uint32 + ReasonableGasPrice uint32 + MicroLinkPerEth uint32 + LinkGweiPerObservation uint32 + LinkGweiPerTransmission uint32 + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorBillingSetIterator{contract: _OffchainAggregator.contract, event: "BillingSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorBillingSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseBillingSet(log types.Log) (*OffchainAggregatorBillingSet, error) { + event := new(OffchainAggregatorBillingSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorConfigSetIterator struct { + Event *OffchainAggregatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + Threshold uint8 + EncodedConfigVersion uint64 + Encoded []byte + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorConfigSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorConfigSetIterator{contract: _OffchainAggregator.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorConfigSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseConfigSet(log types.Log) (*OffchainAggregatorConfigSet, error) { + event := new(OffchainAggregatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorNewRoundIterator struct { + Event *OffchainAggregatorNewRound + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorNewRoundIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorNewRoundIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorNewRoundIterator{contract: _OffchainAggregator.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorNewRound) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseNewRound(log types.Log) (*OffchainAggregatorNewRound, error) { + event := new(OffchainAggregatorNewRound) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorNewTransmissionIterator struct { + Event *OffchainAggregatorNewTransmission + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorNewTransmissionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorNewTransmissionIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorNewTransmissionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorNewTransmission struct { + AggregatorRoundId uint32 + Answer *big.Int + Transmitter common.Address + Observations []*big.Int + Observers []byte + RawReportContext [32]byte + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorNewTransmissionIterator, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorNewTransmissionIterator{contract: _OffchainAggregator.contract, event: "NewTransmission", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorNewTransmission) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseNewTransmission(log types.Log) (*OffchainAggregatorNewTransmission, error) { + event := new(OffchainAggregatorNewTransmission) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOraclePaidIterator struct { + Event *OffchainAggregatorOraclePaid + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOraclePaidIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOraclePaidIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOraclePaidIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOraclePaid struct { + Transmitter common.Address + Payee common.Address + Amount *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOraclePaid(opts *bind.FilterOpts) (*OffchainAggregatorOraclePaidIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OraclePaid") + if err != nil { + return nil, err + } + return &OffchainAggregatorOraclePaidIterator{contract: _OffchainAggregator.contract, event: "OraclePaid", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOraclePaid) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OraclePaid") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOraclePaid) + if err := _OffchainAggregator.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOraclePaid(log types.Log) (*OffchainAggregatorOraclePaid, error) { + event := new(OffchainAggregatorOraclePaid) + if err := _OffchainAggregator.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOwnershipTransferRequestedIterator struct { + Event *OffchainAggregatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorOwnershipTransferRequestedIterator{contract: _OffchainAggregator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOwnershipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorOwnershipTransferRequested, error) { + event := new(OffchainAggregatorOwnershipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOwnershipTransferredIterator struct { + Event *OffchainAggregatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorOwnershipTransferredIterator{contract: _OffchainAggregator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOwnershipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorOwnershipTransferred, error) { + event := new(OffchainAggregatorOwnershipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorPayeeshipTransferRequestedIterator struct { + Event *OffchainAggregatorPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorPayeeshipTransferRequested struct { + Transmitter common.Address + Current common.Address + Proposed common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorPayeeshipTransferRequestedIterator{contract: _OffchainAggregator.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorPayeeshipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorPayeeshipTransferRequested, error) { + event := new(OffchainAggregatorPayeeshipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorPayeeshipTransferredIterator struct { + Event *OffchainAggregatorPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorPayeeshipTransferred struct { + Transmitter common.Address + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorPayeeshipTransferredIterator{contract: _OffchainAggregator.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorPayeeshipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorPayeeshipTransferred, error) { + event := new(OffchainAggregatorPayeeshipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorRequesterAccessControllerSetIterator struct { + Event *OffchainAggregatorRequesterAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorRequesterAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorRequesterAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorRequesterAccessControllerSetIterator{contract: _OffchainAggregator.contract, event: "RequesterAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRequesterAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorRequesterAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorRequesterAccessControllerSet, error) { + event := new(OffchainAggregatorRequesterAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorRoundRequestedIterator struct { + Event *OffchainAggregatorRoundRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorRoundRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorRoundRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorRoundRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorRoundRequested struct { + Requester common.Address + ConfigDigest [16]byte + Epoch uint32 + Round uint8 + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorRoundRequestedIterator, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorRoundRequestedIterator{contract: _OffchainAggregator.contract, event: "RoundRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRoundRequested, requester []common.Address) (event.Subscription, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorRoundRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseRoundRequested(log types.Log) (*OffchainAggregatorRoundRequested, error) { + event := new(OffchainAggregatorRoundRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorValidatorUpdatedIterator struct { + Event *OffchainAggregatorValidatorUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorValidatorUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorValidatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorValidatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorValidatorUpdatedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorValidatorUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorValidatorUpdated struct { + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterValidatorUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*OffchainAggregatorValidatorUpdatedIterator, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "ValidatorUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorValidatorUpdatedIterator{contract: _OffchainAggregator.contract, event: "ValidatorUpdated", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchValidatorUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorValidatorUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "ValidatorUpdated", previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorValidatorUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "ValidatorUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseValidatorUpdated(log types.Log) (*OffchainAggregatorValidatorUpdated, error) { + event := new(OffchainAggregatorValidatorUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "ValidatorUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetBilling struct { + MaximumGasPrice uint32 + ReasonableGasPrice uint32 + MicroLinkPerEth uint32 + LinkGweiPerObservation uint32 + LinkGweiPerTransmission uint32 +} +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [16]byte +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestTransmissionDetails struct { + ConfigDigest [16]byte + Epoch uint32 + Round uint8 + LatestAnswer *big.Int + LatestTimestamp uint64 +} + +func (_OffchainAggregator *OffchainAggregator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OffchainAggregator.abi.Events["AnswerUpdated"].ID: + return _OffchainAggregator.ParseAnswerUpdated(log) + case _OffchainAggregator.abi.Events["BillingAccessControllerSet"].ID: + return _OffchainAggregator.ParseBillingAccessControllerSet(log) + case _OffchainAggregator.abi.Events["BillingSet"].ID: + return _OffchainAggregator.ParseBillingSet(log) + case _OffchainAggregator.abi.Events["ConfigSet"].ID: + return _OffchainAggregator.ParseConfigSet(log) + case _OffchainAggregator.abi.Events["NewRound"].ID: + return _OffchainAggregator.ParseNewRound(log) + case _OffchainAggregator.abi.Events["NewTransmission"].ID: + return _OffchainAggregator.ParseNewTransmission(log) + case _OffchainAggregator.abi.Events["OraclePaid"].ID: + return _OffchainAggregator.ParseOraclePaid(log) + case _OffchainAggregator.abi.Events["OwnershipTransferRequested"].ID: + return _OffchainAggregator.ParseOwnershipTransferRequested(log) + case _OffchainAggregator.abi.Events["OwnershipTransferred"].ID: + return _OffchainAggregator.ParseOwnershipTransferred(log) + case _OffchainAggregator.abi.Events["PayeeshipTransferRequested"].ID: + return _OffchainAggregator.ParsePayeeshipTransferRequested(log) + case _OffchainAggregator.abi.Events["PayeeshipTransferred"].ID: + return _OffchainAggregator.ParsePayeeshipTransferred(log) + case _OffchainAggregator.abi.Events["RequesterAccessControllerSet"].ID: + return _OffchainAggregator.ParseRequesterAccessControllerSet(log) + case _OffchainAggregator.abi.Events["RoundRequested"].ID: + return _OffchainAggregator.ParseRoundRequested(log) + case _OffchainAggregator.abi.Events["ValidatorUpdated"].ID: + return _OffchainAggregator.ParseValidatorUpdated(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OffchainAggregatorAnswerUpdated) Topic() common.Hash { + return common.HexToHash("0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f") +} + +func (OffchainAggregatorBillingAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d48912") +} + +func (OffchainAggregatorBillingSet) Topic() common.Hash { + return common.HexToHash("0xd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b6") +} + +func (OffchainAggregatorConfigSet) Topic() common.Hash { + return common.HexToHash("0x25d719d88a4512dd76c7442b910a83360845505894eb444ef299409e180f8fb9") +} + +func (OffchainAggregatorNewRound) Topic() common.Hash { + return common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271") +} + +func (OffchainAggregatorNewTransmission) Topic() common.Hash { + return common.HexToHash("0xf6a97944f31ea060dfde0566e4167c1a1082551e64b60ecb14d599a9d023d451") +} + +func (OffchainAggregatorOraclePaid) Topic() common.Hash { + return common.HexToHash("0xe8ec50e5150ae28ae37e493ff389ffab7ffaec2dc4dccfca03f12a3de29d12b2") +} + +func (OffchainAggregatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OffchainAggregatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OffchainAggregatorPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (OffchainAggregatorPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (OffchainAggregatorRequesterAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae634") +} + +func (OffchainAggregatorRoundRequested) Topic() common.Hash { + return common.HexToHash("0x3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037") +} + +func (OffchainAggregatorValidatorUpdated) Topic() common.Hash { + return common.HexToHash("0xcfac5dc75b8d9a7e074162f59d9adcd33da59f0fe8dfb21580db298fc0fdad0d") +} + +func (_OffchainAggregator *OffchainAggregator) Address() common.Address { + return _OffchainAggregator.address +} + +type OffchainAggregatorInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + BillingAccessController(opts *bind.CallOpts) (common.Address, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + GetBilling(opts *bind.CallOpts) (GetBilling, + + error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + LatestAnswer(opts *bind.CallOpts) (*big.Int, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestRound(opts *bind.CallOpts) (*big.Int, error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) + + LatestTransmissionDetails(opts *bind.CallOpts) (LatestTransmissionDetails, + + error) + + LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) + + MaxAnswer(opts *bind.CallOpts) (*big.Int, error) + + MinAnswer(opts *bind.CallOpts) (*big.Int, error) + + OracleObservationCount(opts *bind.CallOpts, _signerOrTransmitter common.Address) (uint16, error) + + OwedPayment(opts *bind.CallOpts, _transmitter common.Address) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + RequesterAccessController(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + Validator(opts *bind.CallOpts) (common.Address, error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) + + RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) + + SetBilling(opts *bind.TransactOpts, _maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) + + SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _threshold uint8, _encodedConfigVersion uint64, _encoded []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, _transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) + + SetRequesterAccessController(opts *bind.TransactOpts, _requesterAccessController common.Address) (*types.Transaction, error) + + SetValidator(opts *bind.TransactOpts, _newValidator common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, _transmitter common.Address, _proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, _report []byte, _rs [][32]byte, _ss [][32]byte, _rawVs [32]byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) + + FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorAnswerUpdatedIterator, error) + + WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) + + ParseAnswerUpdated(log types.Log) (*OffchainAggregatorAnswerUpdated, error) + + FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingAccessControllerSetIterator, error) + + WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingAccessControllerSet) (event.Subscription, error) + + ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorBillingAccessControllerSet, error) + + FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingSetIterator, error) + + WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingSet) (event.Subscription, error) + + ParseBillingSet(log types.Log) (*OffchainAggregatorBillingSet, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OffchainAggregatorConfigSet, error) + + FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorNewRoundIterator, error) + + WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) + + ParseNewRound(log types.Log) (*OffchainAggregatorNewRound, error) + + FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorNewTransmissionIterator, error) + + WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) + + ParseNewTransmission(log types.Log) (*OffchainAggregatorNewTransmission, error) + + FilterOraclePaid(opts *bind.FilterOpts) (*OffchainAggregatorOraclePaidIterator, error) + + WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOraclePaid) (event.Subscription, error) + + ParseOraclePaid(log types.Log) (*OffchainAggregatorOraclePaid, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorOwnershipTransferred, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorPayeeshipTransferred, error) + + FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorRequesterAccessControllerSetIterator, error) + + WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRequesterAccessControllerSet) (event.Subscription, error) + + ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorRequesterAccessControllerSet, error) + + FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorRoundRequestedIterator, error) + + WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRoundRequested, requester []common.Address) (event.Subscription, error) + + ParseRoundRequested(log types.Log) (*OffchainAggregatorRoundRequested, error) + + FilterValidatorUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*OffchainAggregatorValidatorUpdatedIterator, error) + + WatchValidatorUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorValidatorUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParseValidatorUpdated(log types.Log) (*OffchainAggregatorValidatorUpdated, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/operator_factory/operator_factory.go b/core/gethwrappers/generated/operator_factory/operator_factory.go new file mode 100644 index 00000000..47111b1f --- /dev/null +++ b/core/gethwrappers/generated/operator_factory/operator_factory.go @@ -0,0 +1,632 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package operator_factory + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OperatorFactoryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"forwarder\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"AuthorizedForwarderCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"OperatorCreated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"query\",\"type\":\"address\"}],\"name\":\"created\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deployNewForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"deployNewForwarderAndTransferOwnership\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deployNewOperator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deployNewOperatorAndForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051615d59380380615d5983398101604081905261002f91610040565b6001600160a01b0316608052610070565b60006020828403121561005257600080fd5b81516001600160a01b038116811461006957600080fd5b9392505050565b608051615cab6100ae6000396000818161014f015281816101e6015281816102e3015281816103da015281816104be01526105a50152615cab6000f3fe60806040523480156200001157600080fd5b5060043610620000875760003560e01c806357970e93116200006257806357970e931462000149578063d42efd831462000171578063d689d09514620001be578063f4adb6e114620001d557600080fd5b8063181f5a77146200008c57806332f01eae14620000e15780633babafdb1462000119575b600080fd5b620000c96040518060400160405280601581526020017f4f70657261746f72466163746f727920312e302e30000000000000000000000081525081565b604051620000d8919062000717565b60405180910390f35b620000eb620001df565b6040805173ffffffffffffffffffffffffffffffffffffffff938416815292909116602083015201620000d8565b62000123620003c6565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001620000d8565b620001237f000000000000000000000000000000000000000000000000000000000000000081565b620001ad620001823660046200075d565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205460ff1690565b6040519015158152602001620000d8565b62000123620001cf3660046200077b565b620004b9565b62000123620005a0565b60008060007f000000000000000000000000000000000000000000000000000000000000000033604051620002149062000695565b73ffffffffffffffffffffffffffffffffffffffff928316815291166020820152604001604051809103906000f08015801562000255573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff811660008181526020819052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555192935033928392917fd3bb727b2e716a1f142bc9c63c66fe0ae4c5fbc89234f8aa77d0c864a7b63bab91a4604080516000808252602082019092527f000000000000000000000000000000000000000000000000000000000000000090309084906040516200031590620006a3565b62000324949392919062000805565b604051809103906000f08015801562000341573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff811660008181526020819052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555192935033923092917f1c9576ab03e40fdf23673f82d904a0f029c8a6629272a4edad4be877e83af64b91a490939092509050565b6040805160008082526020820190925281907f000000000000000000000000000000000000000000000000000000000000000090339083906040516200040c90620006a3565b6200041b949392919062000805565b604051809103906000f08015801562000438573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff811660008181526020819052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555192935033928392917f1c9576ab03e40fdf23673f82d904a0f029c8a6629272a4edad4be877e83af64b91a4919050565b6000807f000000000000000000000000000000000000000000000000000000000000000033868686604051620004ef90620006a3565b620004ff95949392919062000852565b604051809103906000f0801580156200051c573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff811660008181526020819052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555192935033928392917f1c9576ab03e40fdf23673f82d904a0f029c8a6629272a4edad4be877e83af64b91a4949350505050565b6000807f000000000000000000000000000000000000000000000000000000000000000033604051620005d39062000695565b73ffffffffffffffffffffffffffffffffffffffff928316815291166020820152604001604051809103906000f08015801562000614573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff811660008181526020819052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555192935033928392917fd3bb727b2e716a1f142bc9c63c66fe0ae4c5fbc89234f8aa77d0c864a7b63bab91a4919050565b613d3280620008d483390190565b611699806200460683390190565b6000815180845260005b81811015620006d957602081850181015186830182015201620006bb565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006200072c6020830184620006b1565b9392505050565b803573ffffffffffffffffffffffffffffffffffffffff811681146200075857600080fd5b919050565b6000602082840312156200077057600080fd5b6200072c8262000733565b6000806000604084860312156200079157600080fd5b6200079c8462000733565b9250602084013567ffffffffffffffff80821115620007ba57600080fd5b818601915086601f830112620007cf57600080fd5b813581811115620007df57600080fd5b876020828501011115620007f257600080fd5b6020830194508093505050509250925092565b600073ffffffffffffffffffffffffffffffffffffffff8087168352808616602084015280851660408401525060806060830152620008486080830184620006b1565b9695505050505050565b600073ffffffffffffffffffffffffffffffffffffffff8088168352808716602084015280861660408401525060806060830152826080830152828460a0840137600060a0848401015260a07fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8501168301019050969550505050505056fe60a060405260016006553480156200001657600080fd5b5060405162003d3238038062003d328339810160408190526200003991620001ab565b808060006001600160a01b038216620000995760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600280546001600160a01b0319166001600160a01b0384811691909117909155811615620000cc57620000cc81620000e2565b505050506001600160a01b0316608052620001e3565b336001600160a01b038216036200013c5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000090565b600380546001600160a01b0319166001600160a01b03838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b80516001600160a01b0381168114620001a657600080fd5b919050565b60008060408385031215620001bf57600080fd5b620001ca836200018e565b9150620001da602084016200018e565b90509250929050565b608051613aed62000245600039600081816101ec0152818161075e015281816109f301528181610c4f0152818161187c01528181611ae601528181611b8601528181611f21015281816123ba0152818161266d0152612bf50152613aed6000f3fe6080604052600436106101965760003560e01c80636ae0bc76116100e1578063a4c0ed361161008a578063f2fde38b11610064578063f2fde38b146104aa578063f3fef3a3146104ca578063fa00763a146104ea578063fc4a03ed1461053057600080fd5b8063a4c0ed361461044a578063eb007d991461046a578063ee56997b1461048a57600080fd5b806379ba5097116100bb57806379ba5097146103ea5780638da5cb5b146103ff578063902fc3701461042a57600080fd5b80636ae0bc76146103975780636bd59ec0146103b75780636ee4d553146103ca57600080fd5b80633ec5bc1411610143578063501883011161011d578063501883011461033e57806352043783146103615780635ffa62881461037757600080fd5b80633ec5bc14146102ce57806340429946146102ee5780634ab0d1901461030e57600080fd5b8063181f5a7711610174578063181f5a77146102365780632408afaa1461028c5780633c6d41b9146102ae57600080fd5b806301994b991461019b578063033f49f7146101bd578063165d35e1146101dd575b600080fd5b3480156101a757600080fd5b506101bb6101b6366004613068565b610550565b005b3480156101c957600080fd5b506101bb6101d836600461310e565b610753565b3480156101e957600080fd5b507f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b34801561024257600080fd5b5061027f6040518060400160405280600e81526020017f4f70657261746f7220312e302e3000000000000000000000000000000000000081525081565b60405161022d9190613187565b34801561029857600080fd5b506102a161096c565b60405161022d91906131d8565b3480156102ba57600080fd5b506101bb6102c9366004613267565b6109db565b3480156102da57600080fd5b506101bb6102e93660046132f4565b610ae3565b3480156102fa57600080fd5b506101bb61030936600461334b565b610c37565b34801561031a57600080fd5b5061032e6103293660046133ee565b610d40565b604051901515815260200161022d565b34801561034a57600080fd5b50610353611036565b60405190815260200161022d565b34801561036d57600080fd5b5061035361012c81565b34801561038357600080fd5b506101bb610392366004613448565b611045565b3480156103a357600080fd5b5061032e6103b23660046134b4565b6110c9565b6101bb6103c5366004613448565b611445565b3480156103d657600080fd5b506101bb6103e5366004613538565b611682565b3480156103f657600080fd5b506101bb611906565b34801561040b57600080fd5b5060025473ffffffffffffffffffffffffffffffffffffffff1661020c565b34801561043657600080fd5b5061032e610445366004613575565b611a07565b34801561045657600080fd5b506101bb6104653660046135f4565b611b6e565b34801561047657600080fd5b506101bb610485366004613538565b611cfc565b34801561049657600080fd5b506101bb6104a5366004613068565b611fac565b3480156104b657600080fd5b506101bb6104c53660046136df565b6122ba565b3480156104d657600080fd5b506101bb6104e5366004613703565b6122ce565b3480156104f657600080fd5b5061032e6105053660046136df565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205460ff1690565b34801561053c57600080fd5b506101bb61054b366004613448565b612433565b61055861258f565b6105c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064015b60405180910390fd5b60005b8181101561074e576001600560008585858181106105e6576105e661372f565b90506020020160208101906105fb91906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790558282828181106106605761066061372f565b905060200201602081019061067591906136df565b73ffffffffffffffffffffffffffffffffffffffff167f615a0c1cb00a60d4acd77ec67acf2f17f223ef0932d591052fabc33643fe7e8260405160405180910390a28282828181106106c9576106c961372f565b90506020020160208101906106de91906136df565b73ffffffffffffffffffffffffffffffffffffffff166379ba50976040518163ffffffff1660e01b8152600401600060405180830381600087803b15801561072557600080fd5b505af1158015610739573d6000803e3d6000fd5b50505050806107479061378d565b90506105c6565b505050565b61075b6125e4565b827f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610811576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f43616e6e6f742063616c6c20746f204c494e4b0000000000000000000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff84163b61088f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d75737420666f727761726420746f206120636f6e747261637400000000000060448201526064016105ba565b60008473ffffffffffffffffffffffffffffffffffffffff1684846040516108b89291906137c5565b6000604051808303816000865af19150503d80600081146108f5576040519150601f19603f3d011682016040523d82523d6000602084013e6108fa565b606091505b5050905080610965576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f466f727761726465642063616c6c206661696c6564000000000000000000000060448201526064016105ba565b5050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156109d157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116109a6575b5050505050905090565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610a7a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b600080610a8b8a8a8c8a8a8a612667565b91509150877fd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c658b848c8e8c878c8c8c604051610acf9998979695949392919061381e565b60405180910390a250505050505050505050565b610aeb6125e4565b60005b82811015610c3157600060056000868685818110610b0e57610b0e61372f565b9050602002016020810190610b2391906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055838382818110610b8857610b8861372f565b9050602002016020810190610b9d91906136df565b6040517ff2fde38b00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8481166004830152919091169063f2fde38b90602401600060405180830381600087803b158015610c0857600080fd5b505af1158015610c1c573d6000803e3d6000fd5b5050505080610c2a9061378d565b9050610aee565b50505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610cd6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b600080610ce78b8b8a8a8a8a612667565b91509150887fd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c658c848d8f8c878c8c8c604051610d2b9998979695949392919061381e565b60405180910390a25050505050505050505050565b6000610d4a612945565b600087815260046020526040812054889160089190911b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169003610deb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d757374206861766520612076616c696420726571756573744964000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff8616600090815260056020526040902054869060ff1615610e7d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f43616e6e6f742063616c6c206f776e656420636f6e747261637400000000000060448201526064016105ba565b610e8c898989898960016129be565b60405189907f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6490600090a262061a805a1015610f24576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4d7573742070726f7669646520636f6e73756d657220656e6f7567682067617360448201526064016105ba565b60008773ffffffffffffffffffffffffffffffffffffffff16878b87604051602401610f5a929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909416939093179092529051610fe391906138a9565b6000604051808303816000865af19150503d8060008114611020576040519150601f19603f3d011682016040523d82523d6000602084013e611025565b606091505b50909b9a5050505050505050505050565b6000611040612bb6565b905090565b61104d61258f565b6110b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b6110bd8484610550565b610c3184848484612433565b60006110d3612945565b600088815260046020526040812054899160089190911b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169003611174576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d757374206861766520612076616c696420726571756573744964000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff8716600090815260056020526040902054879060ff1615611206576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f43616e6e6f742063616c6c206f776e656420636f6e747261637400000000000060448201526064016105ba565b8985856020811015611274576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f526573706f6e7365206d757374206265203e203332206279746573000000000060448201526064016105ba565b81358381146112df576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f466972737420776f7264206d757374206265207265717565737449640000000060448201526064016105ba565b6112ee8e8e8e8e8e60026129be565b6040518e907f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6490600090a262061a805a1015611386576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4d7573742070726f7669646520636f6e73756d657220656e6f7567682067617360448201526064016105ba565b60008c73ffffffffffffffffffffffffffffffffffffffff168c8b8b6040516020016113b4939291906138c5565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290526113ec916138a9565b6000604051808303816000865af19150503d8060008114611429576040519150601f19603f3d011682016040523d82523d6000602084013e61142e565b606091505b509098505050505050505050979650505050505050565b821580159061145357508281145b6114b9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f496e76616c6964206172726179206c656e67746828732900000000000000000060448201526064016105ba565b3460005b848110156116195760008484838181106114d9576114d961372f565b90506020020135905080836114ee9190613901565b925060008787848181106115045761150461372f565b905060200201602081019061151991906136df565b73ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d8060008114611570576040519150601f19603f3d011682016040523d82523d6000602084013e611575565b606091505b5050905080611606576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f416464726573733a20756e61626c6520746f2073656e642076616c75652c207260448201527f6563697069656e74206d6179206861766520726576657274656400000000000060648201526084016105ba565b5050806116129061378d565b90506114bd565b508015610965576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f546f6f206d756368204554482073656e7400000000000000000000000000000060448201526064016105ba565b6040805160208082018690527fffffffffffffffffffffffffffffffffffffffff0000000000000000000000003360601b16828401527fffffffff00000000000000000000000000000000000000000000000000000000851660548301526058808301859052835180840390910181526078909201909252805191012060009060008681526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00908116908216146117a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b4282111561180f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f52657175657374206973206e6f7420657870697265640000000000000000000060448201526064016105ba565b6000858152600460205260408082208290555186917fa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e9391a26040517fa9059cbb000000000000000000000000000000000000000000000000000000008152336004820152602481018590527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af11580156118da573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118fe919061391a565b505050505050565b60035473ffffffffffffffffffffffffffffffffffffffff163314611987576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016105ba565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560038054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b6000611a116125e4565b8380611a1b612bb6565b1015611aa9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603560248201527f416d6f756e74207265717565737465642069732067726561746572207468616e60448201527f20776974686472617761626c652062616c616e6365000000000000000000000060648201526084016105ba565b6040517f4000aea000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634000aea090611b2190899089908990899060040161393c565b6020604051808303816000875af1158015611b40573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b64919061391a565b9695505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614611c0d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b60208101518190611c1e8183612c7f565b84602484015283604484015260003073ffffffffffffffffffffffffffffffffffffffff1684604051611c5191906138a9565b600060405180830381855af49150503d8060008114611c8c576040519150601f19603f3d011682016040523d82523d6000602084013e611c91565b606091505b50509050806118fe576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f556e61626c6520746f206372656174652072657175657374000000000000000060448201526064016105ba565b604080513360601b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660208083018290526034808401899052845180850390910181526054840185528051908201206074840188905260948401929092527fffffffff00000000000000000000000000000000000000000000000000000000861660a884015260ac8084018690528451808503909101815260cc9093019093528151919092012060009060008381526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0090811690821614611e4a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b42831115611eb4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f52657175657374206973206e6f7420657870697265640000000000000000000060448201526064016105ba565b6000828152600460205260408082208290555183917fa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e9391a26040517fa9059cbb000000000000000000000000000000000000000000000000000000008152336004820152602481018690527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af1158015611f7f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611fa3919061391a565b50505050505050565b611fb461258f565b61201a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b80612081576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d7573742068617665206174206c6561737420312073656e646572000000000060448201526064016105ba565b60015460005b81811015612116576000806000600184815481106120a7576120a761372f565b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905561210f8161378d565b9050612087565b5060005b8281101561226c576000808585848181106121375761213761372f565b905060200201602081019061214c91906136df565b73ffffffffffffffffffffffffffffffffffffffff16815260208101919091526040016000205460ff16156121dd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4d757374206e6f742068617665206475706c69636174652073656e646572730060448201526064016105ba565b60016000808686858181106121f4576121f461372f565b905060200201602081019061220991906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790556122658161378d565b905061211a565b5061227960018484612f88565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a08383336040516122ad939291906139c8565b60405180910390a1505050565b6122c26125e4565b6122cb81612dfb565b50565b6122d66125e4565b80806122e0612bb6565b101561236e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603560248201527f416d6f756e74207265717565737465642069732067726561746572207468616e60448201527f20776974686472617761626c652062616c616e6365000000000000000000000060648201526084016105ba565b6040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8481166004830152602482018490527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906044016020604051808303816000875af1158015612403573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612427919061391a565b61074e5761074e613a02565b61243b61258f565b6124a1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b7f1bb185903e2cb2f1b303523128b60e314dea81df4f8d9b7351cadd344f6e772784848484336040516124d8959493929190613a31565b60405180910390a160005b83811015610965578484828181106124fd576124fd61372f565b905060200201602081019061251291906136df565b73ffffffffffffffffffffffffffffffffffffffff1663ee56997b84846040518363ffffffff1660e01b815260040161254c929190613a81565b600060405180830381600087803b15801561256657600080fd5b505af115801561257a573d6000803e3d6000fd5b50505050806125889061378d565b90506124e3565b3360009081526020819052604081205460ff16806110405750336125c860025473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b60025473ffffffffffffffffffffffffffffffffffffffff163314612665576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016105ba565b565b600080857f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603612720576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f43616e6e6f742063616c6c20746f204c494e4b0000000000000000000000000060448201526064016105ba565b6040517fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608b901b16602082015260348101869052605401604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291815281516020928301206000818152600490935291205490935060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00161561282b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4d75737420757365206120756e6971756520494400000000000000000000000060448201526064016105ba565b61283761012c42613a9d565b6040805160208082018c90527fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608c901b16828401527fffffffff000000000000000000000000000000000000000000000000000000008a1660548301526058808301859052835180840390910181526078909201909252805191012090925060405180604001604052808260ff191681526020016128d687612ef1565b60ff9081169091526000868152600460209081526040909120835193909101519091167f01000000000000000000000000000000000000000000000000000000000000000260089290921c919091179055600654612935908a90613a9d565b6006555050965096945050505050565b3360009081526020819052604090205460ff16612665576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4e6f7420617574686f72697a65642073656e646572000000000000000000000060448201526064016105ba565b6040805160208082018890527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b16828401527fffffffff00000000000000000000000000000000000000000000000000000000861660548301526058808301869052835180840390910181526078909201909252805191012060009060008881526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0090811690821614612ae2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b612aeb82612ef1565b60008881526004602052604090205460ff9182167f01000000000000000000000000000000000000000000000000000000000000009091049091161115612b8e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f446174612076657273696f6e73206d757374206d61746368000000000000000060448201526064016105ba565b85600654612b9c9190613901565b600655505050600093845250506004602052506040812055565b60006001600654612bc79190613901565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015612c51573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612c759190613ab0565b6110409190613901565b612c8b60026020613ac9565b612c96906004613a9d565b81511015612d00576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f496e76616c69642072657175657374206c656e6774680000000000000000000060448201526064016105ba565b7fffffffff0000000000000000000000000000000000000000000000000000000082167f3c6d41b9000000000000000000000000000000000000000000000000000000001480612d9157507fffffffff0000000000000000000000000000000000000000000000000000000082167f4042994600000000000000000000000000000000000000000000000000000000145b612df7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f4d757374207573652077686974656c69737465642066756e6374696f6e73000060448201526064016105ba565b5050565b3373ffffffffffffffffffffffffffffffffffffffff821603612e7a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016105ba565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600060ff821115612f84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203860448201527f206269747300000000000000000000000000000000000000000000000000000060648201526084016105ba565b5090565b828054828255906000526020600020908101928215613000579160200282015b828111156130005781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190612fa8565b50612f849291505b80821115612f845760008155600101613008565b60008083601f84011261302e57600080fd5b50813567ffffffffffffffff81111561304657600080fd5b6020830191508360208260051b850101111561306157600080fd5b9250929050565b6000806020838503121561307b57600080fd5b823567ffffffffffffffff81111561309257600080fd5b61309e8582860161301c565b90969095509350505050565b73ffffffffffffffffffffffffffffffffffffffff811681146122cb57600080fd5b60008083601f8401126130de57600080fd5b50813567ffffffffffffffff8111156130f657600080fd5b60208301915083602082850101111561306157600080fd5b60008060006040848603121561312357600080fd5b833561312e816130aa565b9250602084013567ffffffffffffffff81111561314a57600080fd5b613156868287016130cc565b9497909650939450505050565b60005b8381101561317e578181015183820152602001613166565b50506000910152565b60208152600082518060208401526131a6816040850160208701613163565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6020808252825182820181905260009190848201906040850190845b8181101561322657835173ffffffffffffffffffffffffffffffffffffffff16835292840192918401916001016131f4565b50909695505050505050565b80357fffffffff000000000000000000000000000000000000000000000000000000008116811461326257600080fd5b919050565b60008060008060008060008060e0898b03121561328357600080fd5b883561328e816130aa565b975060208901359650604089013595506132aa60608a01613232565b94506080890135935060a0890135925060c089013567ffffffffffffffff8111156132d457600080fd5b6132e08b828c016130cc565b999c989b5096995094979396929594505050565b60008060006040848603121561330957600080fd5b833567ffffffffffffffff81111561332057600080fd5b61332c8682870161301c565b9094509250506020840135613340816130aa565b809150509250925092565b60008060008060008060008060006101008a8c03121561336a57600080fd5b8935613375816130aa565b985060208a0135975060408a0135965060608a0135613393816130aa565b95506133a160808b01613232565b945060a08a0135935060c08a0135925060e08a013567ffffffffffffffff8111156133cb57600080fd5b6133d78c828d016130cc565b915080935050809150509295985092959850929598565b60008060008060008060c0878903121561340757600080fd5b86359550602087013594506040870135613420816130aa565b935061342e60608801613232565b92506080870135915060a087013590509295509295509295565b6000806000806040858703121561345e57600080fd5b843567ffffffffffffffff8082111561347657600080fd5b6134828883890161301c565b9096509450602087013591508082111561349b57600080fd5b506134a88782880161301c565b95989497509550505050565b600080600080600080600060c0888a0312156134cf57600080fd5b873596506020880135955060408801356134e8816130aa565b94506134f660608901613232565b93506080880135925060a088013567ffffffffffffffff81111561351957600080fd5b6135258a828b016130cc565b989b979a50959850939692959293505050565b6000806000806080858703121561354e57600080fd5b843593506020850135925061356560408601613232565b9396929550929360600135925050565b6000806000806060858703121561358b57600080fd5b8435613596816130aa565b935060208501359250604085013567ffffffffffffffff8111156135b957600080fd5b6134a8878288016130cc565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60008060006060848603121561360957600080fd5b8335613614816130aa565b925060208401359150604084013567ffffffffffffffff8082111561363857600080fd5b818601915086601f83011261364c57600080fd5b81358181111561365e5761365e6135c5565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156136a4576136a46135c5565b816040528281528960208487010111156136bd57600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b6000602082840312156136f157600080fd5b81356136fc816130aa565b9392505050565b6000806040838503121561371657600080fd5b8235613721816130aa565b946020939093013593505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036137be576137be61375e565b5060010190565b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600061010073ffffffffffffffffffffffffffffffffffffffff808d1684528b60208501528a6040850152808a166060850152507fffffffff00000000000000000000000000000000000000000000000000000000881660808401528660a08401528560c08401528060e084015261389981840185876137d5565b9c9b505050505050505050505050565b600082516138bb818460208701613163565b9190910192915050565b7fffffffff0000000000000000000000000000000000000000000000000000000084168152818360048301376000910160040190815292915050565b818103818111156139145761391461375e565b92915050565b60006020828403121561392c57600080fd5b815180151581146136fc57600080fd5b73ffffffffffffffffffffffffffffffffffffffff85168152836020820152606060408201526000611b646060830184866137d5565b8183526000602080850194508260005b858110156139bd578135613995816130aa565b73ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613982565b509495945050505050565b6040815260006139dc604083018587613972565b905073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b606081526000613a45606083018789613972565b8281036020840152613a58818688613972565b91505073ffffffffffffffffffffffffffffffffffffffff831660408301529695505050505050565b602081526000613a95602083018486613972565b949350505050565b808201808211156139145761391461375e565b600060208284031215613ac257600080fd5b5051919050565b80820281158282048414176139145761391461375e56fea164736f6c6343000813000a60a06040523480156200001157600080fd5b50604051620016993803806200169983398101604081905262000034916200029d565b82826001600160a01b038216620000925760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c557620000c58162000199565b50506001600160a01b0384166200012b5760405162461bcd60e51b815260206004820152602360248201527f4c696e6b20746f6b656e2063616e6e6f742062652061207a65726f206164647260448201526265737360e81b606482015260840162000089565b6001600160a01b038085166080528216156200018f57816001600160a01b0316836001600160a01b03167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e836040516200018691906200038e565b60405180910390a35b50505050620003c3565b336001600160a01b03821603620001f35760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000089565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200025c57600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b60005b83811015620002945781810151838201526020016200027a565b50506000910152565b60008060008060808587031215620002b457600080fd5b620002bf8562000244565b9350620002cf6020860162000244565b9250620002df6040860162000244565b60608601519092506001600160401b0380821115620002fd57600080fd5b818701915087601f8301126200031257600080fd5b81518181111562000327576200032762000261565b604051601f8201601f19908116603f0116810190838211818310171562000352576200035262000261565b816040528281528a60208487010111156200036c57600080fd5b6200037f83602083016020880162000277565b979a9699509497505050505050565b6020815260008251806020840152620003af81604085016020870162000277565b601f01601f19169190910160400192915050565b6080516112ac620003ed6000396000818161016d0152818161037501526105d301526112ac6000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c806379ba509711610081578063ee56997b1161005b578063ee56997b14610200578063f2fde38b14610213578063fa00763a1461022657600080fd5b806379ba5097146101c75780638da5cb5b146101cf578063b64fa9e6146101ed57600080fd5b80634d3e2323116100b25780634d3e23231461015557806357970e93146101685780636fadcf72146101b457600080fd5b8063033f49f7146100d9578063181f5a77146100ee5780632408afaa14610140575b600080fd5b6100ec6100e7366004610e72565b61026f565b005b61012a6040518060400160405280601981526020017f417574686f72697a6564466f7277617264657220312e312e300000000000000081525081565b6040516101379190610ef5565b60405180910390f35b610148610287565b6040516101379190610f61565b6100ec610163366004610e72565b6102f6565b61018f7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610137565b6100ec6101c2366004610e72565b61036b565b6100ec61042d565b60005473ffffffffffffffffffffffffffffffffffffffff1661018f565b6100ec6101fb366004611007565b61052a565b6100ec61020e366004611073565b6106cb565b6100ec6102213660046110b5565b6109dc565b61025f6102343660046110b5565b73ffffffffffffffffffffffffffffffffffffffff1660009081526002602052604090205460ff1690565b6040519015158152602001610137565b6102776109f0565b610282838383610a73565b505050565b606060038054806020026020016040519081016040528092919081815260200182805480156102ec57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116102c1575b5050505050905090565b6102ff836109dc565b8273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f4e1e878dc28d5f040db5969163ff1acd75c44c3f655da2dde9c70bbd8e56dc7e848460405161035e9291906110d7565b60405180910390a3505050565b610373610c00565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1603610277576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f43616e6e6f7420666f727761726420746f204c696e6b20746f6b656e0000000060448201526064015b60405180910390fd5b60015473ffffffffffffffffffffffffffffffffffffffff1633146104ae576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610424565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610532610c00565b82811461059b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f417272617973206d7573742068617665207468652073616d65206c656e6774686044820152606401610424565b60005b838110156106c45760008585838181106105ba576105ba611124565b90506020020160208101906105cf91906110b5565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610686576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f43616e6e6f7420666f727761726420746f204c696e6b20746f6b656e000000006044820152606401610424565b6106b38185858581811061069c5761069c611124565b90506020028101906106ae9190611153565b610a73565b506106bd816111b8565b905061059e565b5050505050565b6106d3610c79565b610739576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e646572730000006044820152606401610424565b806107a0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d7573742068617665206174206c6561737420312073656e64657200000000006044820152606401610424565b60035460005b8181101561083657600060026000600384815481106107c7576107c7611124565b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905561082f816111b8565b90506107a6565b5060005b8281101561098e576002600085858481811061085857610858611124565b905060200201602081019061086d91906110b5565b73ffffffffffffffffffffffffffffffffffffffff16815260208101919091526040016000205460ff16156108fe576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4d757374206e6f742068617665206475706c69636174652073656e64657273006044820152606401610424565b60016002600086868581811061091657610916611124565b905060200201602081019061092b91906110b5565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055610987816111b8565b905061083a565b5061099b60038484610dac565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a08383336040516109cf93929190611217565b60405180910390a1505050565b6109e46109f0565b6109ed81610cb7565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610a71576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610424565b565b73ffffffffffffffffffffffffffffffffffffffff83163b610af1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d75737420666f727761726420746f206120636f6e74726163740000000000006044820152606401610424565b6000808473ffffffffffffffffffffffffffffffffffffffff168484604051610b1b92919061128f565b6000604051808303816000865af19150503d8060008114610b58576040519150601f19603f3d011682016040523d82523d6000602084013e610b5d565b606091505b5091509150816106c4578051600003610bf8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f466f727761726465642063616c6c20726576657274656420776974686f75742060448201527f726561736f6e00000000000000000000000000000000000000000000000000006064820152608401610424565b805181602001fd5b3360009081526002602052604090205460ff16610a71576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4e6f7420617574686f72697a65642073656e64657200000000000000000000006044820152606401610424565b600033610c9b60005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b3373ffffffffffffffffffffffffffffffffffffffff821603610d36576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610424565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610e24579160200282015b82811115610e245781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190610dcc565b50610e30929150610e34565b5090565b5b80821115610e305760008155600101610e35565b803573ffffffffffffffffffffffffffffffffffffffff81168114610e6d57600080fd5b919050565b600080600060408486031215610e8757600080fd5b610e9084610e49565b9250602084013567ffffffffffffffff80821115610ead57600080fd5b818601915086601f830112610ec157600080fd5b813581811115610ed057600080fd5b876020828501011115610ee257600080fd5b6020830194508093505050509250925092565b600060208083528351808285015260005b81811015610f2257858101830151858201604001528201610f06565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b6020808252825182820181905260009190848201906040850190845b81811015610faf57835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101610f7d565b50909695505050505050565b60008083601f840112610fcd57600080fd5b50813567ffffffffffffffff811115610fe557600080fd5b6020830191508360208260051b850101111561100057600080fd5b9250929050565b6000806000806040858703121561101d57600080fd5b843567ffffffffffffffff8082111561103557600080fd5b61104188838901610fbb565b9096509450602087013591508082111561105a57600080fd5b5061106787828801610fbb565b95989497509550505050565b6000806020838503121561108657600080fd5b823567ffffffffffffffff81111561109d57600080fd5b6110a985828601610fbb565b90969095509350505050565b6000602082840312156110c757600080fd5b6110d082610e49565b9392505050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261118857600080fd5b83018035915067ffffffffffffffff8211156111a357600080fd5b60200191503681900382131561100057600080fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611210577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b6040808252810183905260008460608301825b868110156112655773ffffffffffffffffffffffffffffffffffffffff61125084610e49565b1682526020928301929091019060010161122a565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b818382376000910190815291905056fea164736f6c6343000813000aa164736f6c6343000813000a", +} + +var OperatorFactoryABI = OperatorFactoryMetaData.ABI + +var OperatorFactoryBin = OperatorFactoryMetaData.Bin + +func DeployOperatorFactory(auth *bind.TransactOpts, backend bind.ContractBackend, linkAddress common.Address) (common.Address, *types.Transaction, *OperatorFactory, error) { + parsed, err := OperatorFactoryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OperatorFactoryBin), backend, linkAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OperatorFactory{address: address, abi: *parsed, OperatorFactoryCaller: OperatorFactoryCaller{contract: contract}, OperatorFactoryTransactor: OperatorFactoryTransactor{contract: contract}, OperatorFactoryFilterer: OperatorFactoryFilterer{contract: contract}}, nil +} + +type OperatorFactory struct { + address common.Address + abi abi.ABI + OperatorFactoryCaller + OperatorFactoryTransactor + OperatorFactoryFilterer +} + +type OperatorFactoryCaller struct { + contract *bind.BoundContract +} + +type OperatorFactoryTransactor struct { + contract *bind.BoundContract +} + +type OperatorFactoryFilterer struct { + contract *bind.BoundContract +} + +type OperatorFactorySession struct { + Contract *OperatorFactory + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OperatorFactoryCallerSession struct { + Contract *OperatorFactoryCaller + CallOpts bind.CallOpts +} + +type OperatorFactoryTransactorSession struct { + Contract *OperatorFactoryTransactor + TransactOpts bind.TransactOpts +} + +type OperatorFactoryRaw struct { + Contract *OperatorFactory +} + +type OperatorFactoryCallerRaw struct { + Contract *OperatorFactoryCaller +} + +type OperatorFactoryTransactorRaw struct { + Contract *OperatorFactoryTransactor +} + +func NewOperatorFactory(address common.Address, backend bind.ContractBackend) (*OperatorFactory, error) { + abi, err := abi.JSON(strings.NewReader(OperatorFactoryABI)) + if err != nil { + return nil, err + } + contract, err := bindOperatorFactory(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OperatorFactory{address: address, abi: abi, OperatorFactoryCaller: OperatorFactoryCaller{contract: contract}, OperatorFactoryTransactor: OperatorFactoryTransactor{contract: contract}, OperatorFactoryFilterer: OperatorFactoryFilterer{contract: contract}}, nil +} + +func NewOperatorFactoryCaller(address common.Address, caller bind.ContractCaller) (*OperatorFactoryCaller, error) { + contract, err := bindOperatorFactory(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OperatorFactoryCaller{contract: contract}, nil +} + +func NewOperatorFactoryTransactor(address common.Address, transactor bind.ContractTransactor) (*OperatorFactoryTransactor, error) { + contract, err := bindOperatorFactory(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OperatorFactoryTransactor{contract: contract}, nil +} + +func NewOperatorFactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*OperatorFactoryFilterer, error) { + contract, err := bindOperatorFactory(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OperatorFactoryFilterer{contract: contract}, nil +} + +func bindOperatorFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OperatorFactoryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OperatorFactory *OperatorFactoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OperatorFactory.Contract.OperatorFactoryCaller.contract.Call(opts, result, method, params...) +} + +func (_OperatorFactory *OperatorFactoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OperatorFactory.Contract.OperatorFactoryTransactor.contract.Transfer(opts) +} + +func (_OperatorFactory *OperatorFactoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OperatorFactory.Contract.OperatorFactoryTransactor.contract.Transact(opts, method, params...) +} + +func (_OperatorFactory *OperatorFactoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OperatorFactory.Contract.contract.Call(opts, result, method, params...) +} + +func (_OperatorFactory *OperatorFactoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OperatorFactory.Contract.contract.Transfer(opts) +} + +func (_OperatorFactory *OperatorFactoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OperatorFactory.Contract.contract.Transact(opts, method, params...) +} + +func (_OperatorFactory *OperatorFactoryCaller) Created(opts *bind.CallOpts, query common.Address) (bool, error) { + var out []interface{} + err := _OperatorFactory.contract.Call(opts, &out, "created", query) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_OperatorFactory *OperatorFactorySession) Created(query common.Address) (bool, error) { + return _OperatorFactory.Contract.Created(&_OperatorFactory.CallOpts, query) +} + +func (_OperatorFactory *OperatorFactoryCallerSession) Created(query common.Address) (bool, error) { + return _OperatorFactory.Contract.Created(&_OperatorFactory.CallOpts, query) +} + +func (_OperatorFactory *OperatorFactoryCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OperatorFactory.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OperatorFactory *OperatorFactorySession) LinkToken() (common.Address, error) { + return _OperatorFactory.Contract.LinkToken(&_OperatorFactory.CallOpts) +} + +func (_OperatorFactory *OperatorFactoryCallerSession) LinkToken() (common.Address, error) { + return _OperatorFactory.Contract.LinkToken(&_OperatorFactory.CallOpts) +} + +func (_OperatorFactory *OperatorFactoryCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OperatorFactory.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OperatorFactory *OperatorFactorySession) TypeAndVersion() (string, error) { + return _OperatorFactory.Contract.TypeAndVersion(&_OperatorFactory.CallOpts) +} + +func (_OperatorFactory *OperatorFactoryCallerSession) TypeAndVersion() (string, error) { + return _OperatorFactory.Contract.TypeAndVersion(&_OperatorFactory.CallOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactor) DeployNewForwarder(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OperatorFactory.contract.Transact(opts, "deployNewForwarder") +} + +func (_OperatorFactory *OperatorFactorySession) DeployNewForwarder() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewForwarder(&_OperatorFactory.TransactOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactorSession) DeployNewForwarder() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewForwarder(&_OperatorFactory.TransactOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactor) DeployNewForwarderAndTransferOwnership(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) { + return _OperatorFactory.contract.Transact(opts, "deployNewForwarderAndTransferOwnership", to, message) +} + +func (_OperatorFactory *OperatorFactorySession) DeployNewForwarderAndTransferOwnership(to common.Address, message []byte) (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewForwarderAndTransferOwnership(&_OperatorFactory.TransactOpts, to, message) +} + +func (_OperatorFactory *OperatorFactoryTransactorSession) DeployNewForwarderAndTransferOwnership(to common.Address, message []byte) (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewForwarderAndTransferOwnership(&_OperatorFactory.TransactOpts, to, message) +} + +func (_OperatorFactory *OperatorFactoryTransactor) DeployNewOperator(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OperatorFactory.contract.Transact(opts, "deployNewOperator") +} + +func (_OperatorFactory *OperatorFactorySession) DeployNewOperator() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewOperator(&_OperatorFactory.TransactOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactorSession) DeployNewOperator() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewOperator(&_OperatorFactory.TransactOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactor) DeployNewOperatorAndForwarder(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OperatorFactory.contract.Transact(opts, "deployNewOperatorAndForwarder") +} + +func (_OperatorFactory *OperatorFactorySession) DeployNewOperatorAndForwarder() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewOperatorAndForwarder(&_OperatorFactory.TransactOpts) +} + +func (_OperatorFactory *OperatorFactoryTransactorSession) DeployNewOperatorAndForwarder() (*types.Transaction, error) { + return _OperatorFactory.Contract.DeployNewOperatorAndForwarder(&_OperatorFactory.TransactOpts) +} + +type OperatorFactoryAuthorizedForwarderCreatedIterator struct { + Event *OperatorFactoryAuthorizedForwarderCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorFactoryAuthorizedForwarderCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorFactoryAuthorizedForwarderCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorFactoryAuthorizedForwarderCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorFactoryAuthorizedForwarderCreatedIterator) Error() error { + return it.fail +} + +func (it *OperatorFactoryAuthorizedForwarderCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorFactoryAuthorizedForwarderCreated struct { + Forwarder common.Address + Owner common.Address + Sender common.Address + Raw types.Log +} + +func (_OperatorFactory *OperatorFactoryFilterer) FilterAuthorizedForwarderCreated(opts *bind.FilterOpts, forwarder []common.Address, owner []common.Address, sender []common.Address) (*OperatorFactoryAuthorizedForwarderCreatedIterator, error) { + + var forwarderRule []interface{} + for _, forwarderItem := range forwarder { + forwarderRule = append(forwarderRule, forwarderItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _OperatorFactory.contract.FilterLogs(opts, "AuthorizedForwarderCreated", forwarderRule, ownerRule, senderRule) + if err != nil { + return nil, err + } + return &OperatorFactoryAuthorizedForwarderCreatedIterator{contract: _OperatorFactory.contract, event: "AuthorizedForwarderCreated", logs: logs, sub: sub}, nil +} + +func (_OperatorFactory *OperatorFactoryFilterer) WatchAuthorizedForwarderCreated(opts *bind.WatchOpts, sink chan<- *OperatorFactoryAuthorizedForwarderCreated, forwarder []common.Address, owner []common.Address, sender []common.Address) (event.Subscription, error) { + + var forwarderRule []interface{} + for _, forwarderItem := range forwarder { + forwarderRule = append(forwarderRule, forwarderItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _OperatorFactory.contract.WatchLogs(opts, "AuthorizedForwarderCreated", forwarderRule, ownerRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorFactoryAuthorizedForwarderCreated) + if err := _OperatorFactory.contract.UnpackLog(event, "AuthorizedForwarderCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OperatorFactory *OperatorFactoryFilterer) ParseAuthorizedForwarderCreated(log types.Log) (*OperatorFactoryAuthorizedForwarderCreated, error) { + event := new(OperatorFactoryAuthorizedForwarderCreated) + if err := _OperatorFactory.contract.UnpackLog(event, "AuthorizedForwarderCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorFactoryOperatorCreatedIterator struct { + Event *OperatorFactoryOperatorCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorFactoryOperatorCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorFactoryOperatorCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorFactoryOperatorCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorFactoryOperatorCreatedIterator) Error() error { + return it.fail +} + +func (it *OperatorFactoryOperatorCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorFactoryOperatorCreated struct { + Operator common.Address + Owner common.Address + Sender common.Address + Raw types.Log +} + +func (_OperatorFactory *OperatorFactoryFilterer) FilterOperatorCreated(opts *bind.FilterOpts, operator []common.Address, owner []common.Address, sender []common.Address) (*OperatorFactoryOperatorCreatedIterator, error) { + + var operatorRule []interface{} + for _, operatorItem := range operator { + operatorRule = append(operatorRule, operatorItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _OperatorFactory.contract.FilterLogs(opts, "OperatorCreated", operatorRule, ownerRule, senderRule) + if err != nil { + return nil, err + } + return &OperatorFactoryOperatorCreatedIterator{contract: _OperatorFactory.contract, event: "OperatorCreated", logs: logs, sub: sub}, nil +} + +func (_OperatorFactory *OperatorFactoryFilterer) WatchOperatorCreated(opts *bind.WatchOpts, sink chan<- *OperatorFactoryOperatorCreated, operator []common.Address, owner []common.Address, sender []common.Address) (event.Subscription, error) { + + var operatorRule []interface{} + for _, operatorItem := range operator { + operatorRule = append(operatorRule, operatorItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _OperatorFactory.contract.WatchLogs(opts, "OperatorCreated", operatorRule, ownerRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorFactoryOperatorCreated) + if err := _OperatorFactory.contract.UnpackLog(event, "OperatorCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OperatorFactory *OperatorFactoryFilterer) ParseOperatorCreated(log types.Log) (*OperatorFactoryOperatorCreated, error) { + event := new(OperatorFactoryOperatorCreated) + if err := _OperatorFactory.contract.UnpackLog(event, "OperatorCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OperatorFactory *OperatorFactory) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OperatorFactory.abi.Events["AuthorizedForwarderCreated"].ID: + return _OperatorFactory.ParseAuthorizedForwarderCreated(log) + case _OperatorFactory.abi.Events["OperatorCreated"].ID: + return _OperatorFactory.ParseOperatorCreated(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OperatorFactoryAuthorizedForwarderCreated) Topic() common.Hash { + return common.HexToHash("0x1c9576ab03e40fdf23673f82d904a0f029c8a6629272a4edad4be877e83af64b") +} + +func (OperatorFactoryOperatorCreated) Topic() common.Hash { + return common.HexToHash("0xd3bb727b2e716a1f142bc9c63c66fe0ae4c5fbc89234f8aa77d0c864a7b63bab") +} + +func (_OperatorFactory *OperatorFactory) Address() common.Address { + return _OperatorFactory.address +} + +type OperatorFactoryInterface interface { + Created(opts *bind.CallOpts, query common.Address) (bool, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + DeployNewForwarder(opts *bind.TransactOpts) (*types.Transaction, error) + + DeployNewForwarderAndTransferOwnership(opts *bind.TransactOpts, to common.Address, message []byte) (*types.Transaction, error) + + DeployNewOperator(opts *bind.TransactOpts) (*types.Transaction, error) + + DeployNewOperatorAndForwarder(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterAuthorizedForwarderCreated(opts *bind.FilterOpts, forwarder []common.Address, owner []common.Address, sender []common.Address) (*OperatorFactoryAuthorizedForwarderCreatedIterator, error) + + WatchAuthorizedForwarderCreated(opts *bind.WatchOpts, sink chan<- *OperatorFactoryAuthorizedForwarderCreated, forwarder []common.Address, owner []common.Address, sender []common.Address) (event.Subscription, error) + + ParseAuthorizedForwarderCreated(log types.Log) (*OperatorFactoryAuthorizedForwarderCreated, error) + + FilterOperatorCreated(opts *bind.FilterOpts, operator []common.Address, owner []common.Address, sender []common.Address) (*OperatorFactoryOperatorCreatedIterator, error) + + WatchOperatorCreated(opts *bind.WatchOpts, sink chan<- *OperatorFactoryOperatorCreated, operator []common.Address, owner []common.Address, sender []common.Address) (event.Subscription, error) + + ParseOperatorCreated(log types.Log) (*OperatorFactoryOperatorCreated, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/operator_wrapper/operator_wrapper.go b/core/gethwrappers/generated/operator_wrapper/operator_wrapper.go new file mode 100644 index 00000000..7b9b3875 --- /dev/null +++ b/core/gethwrappers/generated/operator_wrapper/operator_wrapper.go @@ -0,0 +1,1731 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package operator_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OperatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"CancelOracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"specId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"callbackAddr\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"cancelExpiration\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"dataVersion\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"OracleResponse\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"acceptedContract\",\"type\":\"address\"}],\"name\":\"OwnableContractAccepted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"targets\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"TargetsUpdatedAuthorizedSenders\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"EXPIRYTIME\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"targets\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"acceptAuthorizedReceivers\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"ownable\",\"type\":\"address[]\"}],\"name\":\"acceptOwnableContracts\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunc\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"expiration\",\"type\":\"uint256\"}],\"name\":\"cancelOracleRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunc\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"expiration\",\"type\":\"uint256\"}],\"name\":\"cancelOracleRequestByRequester\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable[]\",\"name\":\"receivers\",\"type\":\"address[]\"},{\"internalType\":\"uint256[]\",\"name\":\"amounts\",\"type\":\"uint256[]\"}],\"name\":\"distributeFunds\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"callbackAddress\",\"type\":\"address\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"expiration\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"data\",\"type\":\"bytes32\"}],\"name\":\"fulfillOracleRequest\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"callbackAddress\",\"type\":\"address\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"expiration\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"fulfillOracleRequest2\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPluginToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"specId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"dataVersion\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"operatorRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"specId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"callbackAddress\",\"type\":\"address\"},{\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"dataVersion\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"oracleRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"ownerForward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"ownerTransferAndCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"targets\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSendersOn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"ownable\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnableContracts\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a060405260016006553480156200001657600080fd5b5060405162003d3238038062003d328339810160408190526200003991620001ab565b808060006001600160a01b038216620000995760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600280546001600160a01b0319166001600160a01b0384811691909117909155811615620000cc57620000cc81620000e2565b505050506001600160a01b0316608052620001e3565b336001600160a01b038216036200013c5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000090565b600380546001600160a01b0319166001600160a01b03838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b80516001600160a01b0381168114620001a657600080fd5b919050565b60008060408385031215620001bf57600080fd5b620001ca836200018e565b9150620001da602084016200018e565b90509250929050565b608051613aed62000245600039600081816101ec0152818161075e015281816109f301528181610c4f0152818161187c01528181611ae601528181611b8601528181611f21015281816123ba0152818161266d0152612bf50152613aed6000f3fe6080604052600436106101965760003560e01c80636ae0bc76116100e1578063a4c0ed361161008a578063f2fde38b11610064578063f2fde38b146104aa578063f3fef3a3146104ca578063fa00763a146104ea578063fc4a03ed1461053057600080fd5b8063a4c0ed361461044a578063eb007d991461046a578063ee56997b1461048a57600080fd5b806379ba5097116100bb57806379ba5097146103ea5780638da5cb5b146103ff578063902fc3701461042a57600080fd5b80636ae0bc76146103975780636bd59ec0146103b75780636ee4d553146103ca57600080fd5b80633ec5bc1411610143578063501883011161011d578063501883011461033e57806352043783146103615780635ffa62881461037757600080fd5b80633ec5bc14146102ce57806340429946146102ee5780634ab0d1901461030e57600080fd5b8063181f5a7711610174578063181f5a77146102365780632408afaa1461028c5780633c6d41b9146102ae57600080fd5b806301994b991461019b578063033f49f7146101bd578063165d35e1146101dd575b600080fd5b3480156101a757600080fd5b506101bb6101b6366004613068565b610550565b005b3480156101c957600080fd5b506101bb6101d836600461310e565b610753565b3480156101e957600080fd5b507f00000000000000000000000000000000000000000000000000000000000000005b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b34801561024257600080fd5b5061027f6040518060400160405280600e81526020017f4f70657261746f7220312e302e3000000000000000000000000000000000000081525081565b60405161022d9190613187565b34801561029857600080fd5b506102a161096c565b60405161022d91906131d8565b3480156102ba57600080fd5b506101bb6102c9366004613267565b6109db565b3480156102da57600080fd5b506101bb6102e93660046132f4565b610ae3565b3480156102fa57600080fd5b506101bb61030936600461334b565b610c37565b34801561031a57600080fd5b5061032e6103293660046133ee565b610d40565b604051901515815260200161022d565b34801561034a57600080fd5b50610353611036565b60405190815260200161022d565b34801561036d57600080fd5b5061035361012c81565b34801561038357600080fd5b506101bb610392366004613448565b611045565b3480156103a357600080fd5b5061032e6103b23660046134b4565b6110c9565b6101bb6103c5366004613448565b611445565b3480156103d657600080fd5b506101bb6103e5366004613538565b611682565b3480156103f657600080fd5b506101bb611906565b34801561040b57600080fd5b5060025473ffffffffffffffffffffffffffffffffffffffff1661020c565b34801561043657600080fd5b5061032e610445366004613575565b611a07565b34801561045657600080fd5b506101bb6104653660046135f4565b611b6e565b34801561047657600080fd5b506101bb610485366004613538565b611cfc565b34801561049657600080fd5b506101bb6104a5366004613068565b611fac565b3480156104b657600080fd5b506101bb6104c53660046136df565b6122ba565b3480156104d657600080fd5b506101bb6104e5366004613703565b6122ce565b3480156104f657600080fd5b5061032e6105053660046136df565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205460ff1690565b34801561053c57600080fd5b506101bb61054b366004613448565b612433565b61055861258f565b6105c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064015b60405180910390fd5b60005b8181101561074e576001600560008585858181106105e6576105e661372f565b90506020020160208101906105fb91906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790558282828181106106605761066061372f565b905060200201602081019061067591906136df565b73ffffffffffffffffffffffffffffffffffffffff167f615a0c1cb00a60d4acd77ec67acf2f17f223ef0932d591052fabc33643fe7e8260405160405180910390a28282828181106106c9576106c961372f565b90506020020160208101906106de91906136df565b73ffffffffffffffffffffffffffffffffffffffff166379ba50976040518163ffffffff1660e01b8152600401600060405180830381600087803b15801561072557600080fd5b505af1158015610739573d6000803e3d6000fd5b50505050806107479061378d565b90506105c6565b505050565b61075b6125e4565b827f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603610811576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f43616e6e6f742063616c6c20746f204c494e4b0000000000000000000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff84163b61088f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d75737420666f727761726420746f206120636f6e747261637400000000000060448201526064016105ba565b60008473ffffffffffffffffffffffffffffffffffffffff1684846040516108b89291906137c5565b6000604051808303816000865af19150503d80600081146108f5576040519150601f19603f3d011682016040523d82523d6000602084013e6108fa565b606091505b5050905080610965576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f466f727761726465642063616c6c206661696c6564000000000000000000000060448201526064016105ba565b5050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156109d157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116109a6575b5050505050905090565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610a7a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b600080610a8b8a8a8c8a8a8a612667565b91509150877fd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c658b848c8e8c878c8c8c604051610acf9998979695949392919061381e565b60405180910390a250505050505050505050565b610aeb6125e4565b60005b82811015610c3157600060056000868685818110610b0e57610b0e61372f565b9050602002016020810190610b2391906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055838382818110610b8857610b8861372f565b9050602002016020810190610b9d91906136df565b6040517ff2fde38b00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8481166004830152919091169063f2fde38b90602401600060405180830381600087803b158015610c0857600080fd5b505af1158015610c1c573d6000803e3d6000fd5b5050505080610c2a9061378d565b9050610aee565b50505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610cd6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b600080610ce78b8b8a8a8a8a612667565b91509150887fd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c658c848d8f8c878c8c8c604051610d2b9998979695949392919061381e565b60405180910390a25050505050505050505050565b6000610d4a612945565b600087815260046020526040812054889160089190911b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169003610deb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d757374206861766520612076616c696420726571756573744964000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff8616600090815260056020526040902054869060ff1615610e7d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f43616e6e6f742063616c6c206f776e656420636f6e747261637400000000000060448201526064016105ba565b610e8c898989898960016129be565b60405189907f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6490600090a262061a805a1015610f24576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4d7573742070726f7669646520636f6e73756d657220656e6f7567682067617360448201526064016105ba565b60008773ffffffffffffffffffffffffffffffffffffffff16878b87604051602401610f5a929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909416939093179092529051610fe391906138a9565b6000604051808303816000865af19150503d8060008114611020576040519150601f19603f3d011682016040523d82523d6000602084013e611025565b606091505b50909b9a5050505050505050505050565b6000611040612bb6565b905090565b61104d61258f565b6110b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b6110bd8484610550565b610c3184848484612433565b60006110d3612945565b600088815260046020526040812054899160089190911b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169003611174576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d757374206861766520612076616c696420726571756573744964000000000060448201526064016105ba565b73ffffffffffffffffffffffffffffffffffffffff8716600090815260056020526040902054879060ff1615611206576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f43616e6e6f742063616c6c206f776e656420636f6e747261637400000000000060448201526064016105ba565b8985856020811015611274576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f526573706f6e7365206d757374206265203e203332206279746573000000000060448201526064016105ba565b81358381146112df576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f466972737420776f7264206d757374206265207265717565737449640000000060448201526064016105ba565b6112ee8e8e8e8e8e60026129be565b6040518e907f9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a6490600090a262061a805a1015611386576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4d7573742070726f7669646520636f6e73756d657220656e6f7567682067617360448201526064016105ba565b60008c73ffffffffffffffffffffffffffffffffffffffff168c8b8b6040516020016113b4939291906138c5565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290526113ec916138a9565b6000604051808303816000865af19150503d8060008114611429576040519150601f19603f3d011682016040523d82523d6000602084013e61142e565b606091505b509098505050505050505050979650505050505050565b821580159061145357508281145b6114b9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f496e76616c6964206172726179206c656e67746828732900000000000000000060448201526064016105ba565b3460005b848110156116195760008484838181106114d9576114d961372f565b90506020020135905080836114ee9190613901565b925060008787848181106115045761150461372f565b905060200201602081019061151991906136df565b73ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d8060008114611570576040519150601f19603f3d011682016040523d82523d6000602084013e611575565b606091505b5050905080611606576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f416464726573733a20756e61626c6520746f2073656e642076616c75652c207260448201527f6563697069656e74206d6179206861766520726576657274656400000000000060648201526084016105ba565b5050806116129061378d565b90506114bd565b508015610965576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f546f6f206d756368204554482073656e7400000000000000000000000000000060448201526064016105ba565b6040805160208082018690527fffffffffffffffffffffffffffffffffffffffff0000000000000000000000003360601b16828401527fffffffff00000000000000000000000000000000000000000000000000000000851660548301526058808301859052835180840390910181526078909201909252805191012060009060008681526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00908116908216146117a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b4282111561180f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f52657175657374206973206e6f7420657870697265640000000000000000000060448201526064016105ba565b6000858152600460205260408082208290555186917fa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e9391a26040517fa9059cbb000000000000000000000000000000000000000000000000000000008152336004820152602481018590527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af11580156118da573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118fe919061391a565b505050505050565b60035473ffffffffffffffffffffffffffffffffffffffff163314611987576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016105ba565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560038054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b6000611a116125e4565b8380611a1b612bb6565b1015611aa9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603560248201527f416d6f756e74207265717565737465642069732067726561746572207468616e60448201527f20776974686472617761626c652062616c616e6365000000000000000000000060648201526084016105ba565b6040517f4000aea000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634000aea090611b2190899089908990899060040161393c565b6020604051808303816000875af1158015611b40573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b64919061391a565b9695505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614611c0d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064016105ba565b60208101518190611c1e8183612c7f565b84602484015283604484015260003073ffffffffffffffffffffffffffffffffffffffff1684604051611c5191906138a9565b600060405180830381855af49150503d8060008114611c8c576040519150601f19603f3d011682016040523d82523d6000602084013e611c91565b606091505b50509050806118fe576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f556e61626c6520746f206372656174652072657175657374000000000000000060448201526064016105ba565b604080513360601b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660208083018290526034808401899052845180850390910181526054840185528051908201206074840188905260948401929092527fffffffff00000000000000000000000000000000000000000000000000000000861660a884015260ac8084018690528451808503909101815260cc9093019093528151919092012060009060008381526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0090811690821614611e4a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b42831115611eb4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f52657175657374206973206e6f7420657870697265640000000000000000000060448201526064016105ba565b6000828152600460205260408082208290555183917fa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e9391a26040517fa9059cbb000000000000000000000000000000000000000000000000000000008152336004820152602481018690527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af1158015611f7f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611fa3919061391a565b50505050505050565b611fb461258f565b61201a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b80612081576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d7573742068617665206174206c6561737420312073656e646572000000000060448201526064016105ba565b60015460005b81811015612116576000806000600184815481106120a7576120a761372f565b60009182526020808320919091015473ffffffffffffffffffffffffffffffffffffffff168352820192909252604001902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905561210f8161378d565b9050612087565b5060005b8281101561226c576000808585848181106121375761213761372f565b905060200201602081019061214c91906136df565b73ffffffffffffffffffffffffffffffffffffffff16815260208101919091526040016000205460ff16156121dd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4d757374206e6f742068617665206475706c69636174652073656e646572730060448201526064016105ba565b60016000808686858181106121f4576121f461372f565b905060200201602081019061220991906136df565b73ffffffffffffffffffffffffffffffffffffffff168152602081019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790556122658161378d565b905061211a565b5061227960018484612f88565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a08383336040516122ad939291906139c8565b60405180910390a1505050565b6122c26125e4565b6122cb81612dfb565b50565b6122d66125e4565b80806122e0612bb6565b101561236e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603560248201527f416d6f756e74207265717565737465642069732067726561746572207468616e60448201527f20776974686472617761626c652062616c616e6365000000000000000000000060648201526084016105ba565b6040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8481166004830152602482018490527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906044016020604051808303816000875af1158015612403573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612427919061391a565b61074e5761074e613a02565b61243b61258f565b6124a1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f43616e6e6f742073657420617574686f72697a65642073656e6465727300000060448201526064016105ba565b7f1bb185903e2cb2f1b303523128b60e314dea81df4f8d9b7351cadd344f6e772784848484336040516124d8959493929190613a31565b60405180910390a160005b83811015610965578484828181106124fd576124fd61372f565b905060200201602081019061251291906136df565b73ffffffffffffffffffffffffffffffffffffffff1663ee56997b84846040518363ffffffff1660e01b815260040161254c929190613a81565b600060405180830381600087803b15801561256657600080fd5b505af115801561257a573d6000803e3d6000fd5b50505050806125889061378d565b90506124e3565b3360009081526020819052604081205460ff16806110405750336125c860025473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b60025473ffffffffffffffffffffffffffffffffffffffff163314612665576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016105ba565b565b600080857f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603612720576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f43616e6e6f742063616c6c20746f204c494e4b0000000000000000000000000060448201526064016105ba565b6040517fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608b901b16602082015260348101869052605401604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815291815281516020928301206000818152600490935291205490935060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00161561282b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4d75737420757365206120756e6971756520494400000000000000000000000060448201526064016105ba565b61283761012c42613a9d565b6040805160208082018c90527fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608c901b16828401527fffffffff000000000000000000000000000000000000000000000000000000008a1660548301526058808301859052835180840390910181526078909201909252805191012090925060405180604001604052808260ff191681526020016128d687612ef1565b60ff9081169091526000868152600460209081526040909120835193909101519091167f01000000000000000000000000000000000000000000000000000000000000000260089290921c919091179055600654612935908a90613a9d565b6006555050965096945050505050565b3360009081526020819052604090205460ff16612665576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4e6f7420617574686f72697a65642073656e646572000000000000000000000060448201526064016105ba565b6040805160208082018890527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606088901b16828401527fffffffff00000000000000000000000000000000000000000000000000000000861660548301526058808301869052835180840390910181526078909201909252805191012060009060008881526004602052604090205490915060081b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0090811690821614612ae2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d617463682072657175657374204944000060448201526064016105ba565b612aeb82612ef1565b60008881526004602052604090205460ff9182167f01000000000000000000000000000000000000000000000000000000000000009091049091161115612b8e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f446174612076657273696f6e73206d757374206d61746368000000000000000060448201526064016105ba565b85600654612b9c9190613901565b600655505050600093845250506004602052506040812055565b60006001600654612bc79190613901565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015612c51573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612c759190613ab0565b6110409190613901565b612c8b60026020613ac9565b612c96906004613a9d565b81511015612d00576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f496e76616c69642072657175657374206c656e6774680000000000000000000060448201526064016105ba565b7fffffffff0000000000000000000000000000000000000000000000000000000082167f3c6d41b9000000000000000000000000000000000000000000000000000000001480612d9157507fffffffff0000000000000000000000000000000000000000000000000000000082167f4042994600000000000000000000000000000000000000000000000000000000145b612df7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f4d757374207573652077686974656c69737465642066756e6374696f6e73000060448201526064016105ba565b5050565b3373ffffffffffffffffffffffffffffffffffffffff821603612e7a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016105ba565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600254604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600060ff821115612f84576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f53616665436173743a2076616c756520646f65736e27742066697420696e203860448201527f206269747300000000000000000000000000000000000000000000000000000060648201526084016105ba565b5090565b828054828255906000526020600020908101928215613000579160200282015b828111156130005781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff843516178255602090920191600190910190612fa8565b50612f849291505b80821115612f845760008155600101613008565b60008083601f84011261302e57600080fd5b50813567ffffffffffffffff81111561304657600080fd5b6020830191508360208260051b850101111561306157600080fd5b9250929050565b6000806020838503121561307b57600080fd5b823567ffffffffffffffff81111561309257600080fd5b61309e8582860161301c565b90969095509350505050565b73ffffffffffffffffffffffffffffffffffffffff811681146122cb57600080fd5b60008083601f8401126130de57600080fd5b50813567ffffffffffffffff8111156130f657600080fd5b60208301915083602082850101111561306157600080fd5b60008060006040848603121561312357600080fd5b833561312e816130aa565b9250602084013567ffffffffffffffff81111561314a57600080fd5b613156868287016130cc565b9497909650939450505050565b60005b8381101561317e578181015183820152602001613166565b50506000910152565b60208152600082518060208401526131a6816040850160208701613163565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b6020808252825182820181905260009190848201906040850190845b8181101561322657835173ffffffffffffffffffffffffffffffffffffffff16835292840192918401916001016131f4565b50909695505050505050565b80357fffffffff000000000000000000000000000000000000000000000000000000008116811461326257600080fd5b919050565b60008060008060008060008060e0898b03121561328357600080fd5b883561328e816130aa565b975060208901359650604089013595506132aa60608a01613232565b94506080890135935060a0890135925060c089013567ffffffffffffffff8111156132d457600080fd5b6132e08b828c016130cc565b999c989b5096995094979396929594505050565b60008060006040848603121561330957600080fd5b833567ffffffffffffffff81111561332057600080fd5b61332c8682870161301c565b9094509250506020840135613340816130aa565b809150509250925092565b60008060008060008060008060006101008a8c03121561336a57600080fd5b8935613375816130aa565b985060208a0135975060408a0135965060608a0135613393816130aa565b95506133a160808b01613232565b945060a08a0135935060c08a0135925060e08a013567ffffffffffffffff8111156133cb57600080fd5b6133d78c828d016130cc565b915080935050809150509295985092959850929598565b60008060008060008060c0878903121561340757600080fd5b86359550602087013594506040870135613420816130aa565b935061342e60608801613232565b92506080870135915060a087013590509295509295509295565b6000806000806040858703121561345e57600080fd5b843567ffffffffffffffff8082111561347657600080fd5b6134828883890161301c565b9096509450602087013591508082111561349b57600080fd5b506134a88782880161301c565b95989497509550505050565b600080600080600080600060c0888a0312156134cf57600080fd5b873596506020880135955060408801356134e8816130aa565b94506134f660608901613232565b93506080880135925060a088013567ffffffffffffffff81111561351957600080fd5b6135258a828b016130cc565b989b979a50959850939692959293505050565b6000806000806080858703121561354e57600080fd5b843593506020850135925061356560408601613232565b9396929550929360600135925050565b6000806000806060858703121561358b57600080fd5b8435613596816130aa565b935060208501359250604085013567ffffffffffffffff8111156135b957600080fd5b6134a8878288016130cc565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60008060006060848603121561360957600080fd5b8335613614816130aa565b925060208401359150604084013567ffffffffffffffff8082111561363857600080fd5b818601915086601f83011261364c57600080fd5b81358181111561365e5761365e6135c5565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156136a4576136a46135c5565b816040528281528960208487010111156136bd57600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b6000602082840312156136f157600080fd5b81356136fc816130aa565b9392505050565b6000806040838503121561371657600080fd5b8235613721816130aa565b946020939093013593505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036137be576137be61375e565b5060010190565b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600061010073ffffffffffffffffffffffffffffffffffffffff808d1684528b60208501528a6040850152808a166060850152507fffffffff00000000000000000000000000000000000000000000000000000000881660808401528660a08401528560c08401528060e084015261389981840185876137d5565b9c9b505050505050505050505050565b600082516138bb818460208701613163565b9190910192915050565b7fffffffff0000000000000000000000000000000000000000000000000000000084168152818360048301376000910160040190815292915050565b818103818111156139145761391461375e565b92915050565b60006020828403121561392c57600080fd5b815180151581146136fc57600080fd5b73ffffffffffffffffffffffffffffffffffffffff85168152836020820152606060408201526000611b646060830184866137d5565b8183526000602080850194508260005b858110156139bd578135613995816130aa565b73ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101613982565b509495945050505050565b6040815260006139dc604083018587613972565b905073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b606081526000613a45606083018789613972565b8281036020840152613a58818688613972565b91505073ffffffffffffffffffffffffffffffffffffffff831660408301529695505050505050565b602081526000613a95602083018486613972565b949350505050565b808201808211156139145761391461375e565b600060208284031215613ac257600080fd5b5051919050565b80820281158282048414176139145761391461375e56fea164736f6c6343000813000a", +} + +var OperatorABI = OperatorMetaData.ABI + +var OperatorBin = OperatorMetaData.Bin + +func DeployOperator(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, owner common.Address) (common.Address, *types.Transaction, *Operator, error) { + parsed, err := OperatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OperatorBin), backend, link, owner) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Operator{address: address, abi: *parsed, OperatorCaller: OperatorCaller{contract: contract}, OperatorTransactor: OperatorTransactor{contract: contract}, OperatorFilterer: OperatorFilterer{contract: contract}}, nil +} + +type Operator struct { + address common.Address + abi abi.ABI + OperatorCaller + OperatorTransactor + OperatorFilterer +} + +type OperatorCaller struct { + contract *bind.BoundContract +} + +type OperatorTransactor struct { + contract *bind.BoundContract +} + +type OperatorFilterer struct { + contract *bind.BoundContract +} + +type OperatorSession struct { + Contract *Operator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OperatorCallerSession struct { + Contract *OperatorCaller + CallOpts bind.CallOpts +} + +type OperatorTransactorSession struct { + Contract *OperatorTransactor + TransactOpts bind.TransactOpts +} + +type OperatorRaw struct { + Contract *Operator +} + +type OperatorCallerRaw struct { + Contract *OperatorCaller +} + +type OperatorTransactorRaw struct { + Contract *OperatorTransactor +} + +func NewOperator(address common.Address, backend bind.ContractBackend) (*Operator, error) { + abi, err := abi.JSON(strings.NewReader(OperatorABI)) + if err != nil { + return nil, err + } + contract, err := bindOperator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Operator{address: address, abi: abi, OperatorCaller: OperatorCaller{contract: contract}, OperatorTransactor: OperatorTransactor{contract: contract}, OperatorFilterer: OperatorFilterer{contract: contract}}, nil +} + +func NewOperatorCaller(address common.Address, caller bind.ContractCaller) (*OperatorCaller, error) { + contract, err := bindOperator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OperatorCaller{contract: contract}, nil +} + +func NewOperatorTransactor(address common.Address, transactor bind.ContractTransactor) (*OperatorTransactor, error) { + contract, err := bindOperator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OperatorTransactor{contract: contract}, nil +} + +func NewOperatorFilterer(address common.Address, filterer bind.ContractFilterer) (*OperatorFilterer, error) { + contract, err := bindOperator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OperatorFilterer{contract: contract}, nil +} + +func bindOperator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OperatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Operator *OperatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Operator.Contract.OperatorCaller.contract.Call(opts, result, method, params...) +} + +func (_Operator *OperatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Operator.Contract.OperatorTransactor.contract.Transfer(opts) +} + +func (_Operator *OperatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Operator.Contract.OperatorTransactor.contract.Transact(opts, method, params...) +} + +func (_Operator *OperatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Operator.Contract.contract.Call(opts, result, method, params...) +} + +func (_Operator *OperatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Operator.Contract.contract.Transfer(opts) +} + +func (_Operator *OperatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Operator.Contract.contract.Transact(opts, method, params...) +} + +func (_Operator *OperatorCaller) EXPIRYTIME(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "EXPIRYTIME") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_Operator *OperatorSession) EXPIRYTIME() (*big.Int, error) { + return _Operator.Contract.EXPIRYTIME(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) EXPIRYTIME() (*big.Int, error) { + return _Operator.Contract.EXPIRYTIME(&_Operator.CallOpts) +} + +func (_Operator *OperatorCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_Operator *OperatorSession) GetAuthorizedSenders() ([]common.Address, error) { + return _Operator.Contract.GetAuthorizedSenders(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _Operator.Contract.GetAuthorizedSenders(&_Operator.CallOpts) +} + +func (_Operator *OperatorCaller) GetPluginToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "getPluginToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Operator *OperatorSession) GetPluginToken() (common.Address, error) { + return _Operator.Contract.GetPluginToken(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) GetPluginToken() (common.Address, error) { + return _Operator.Contract.GetPluginToken(&_Operator.CallOpts) +} + +func (_Operator *OperatorCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Operator *OperatorSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _Operator.Contract.IsAuthorizedSender(&_Operator.CallOpts, sender) +} + +func (_Operator *OperatorCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _Operator.Contract.IsAuthorizedSender(&_Operator.CallOpts, sender) +} + +func (_Operator *OperatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Operator *OperatorSession) Owner() (common.Address, error) { + return _Operator.Contract.Owner(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) Owner() (common.Address, error) { + return _Operator.Contract.Owner(&_Operator.CallOpts) +} + +func (_Operator *OperatorCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_Operator *OperatorSession) TypeAndVersion() (string, error) { + return _Operator.Contract.TypeAndVersion(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) TypeAndVersion() (string, error) { + return _Operator.Contract.TypeAndVersion(&_Operator.CallOpts) +} + +func (_Operator *OperatorCaller) Withdrawable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Operator.contract.Call(opts, &out, "withdrawable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_Operator *OperatorSession) Withdrawable() (*big.Int, error) { + return _Operator.Contract.Withdrawable(&_Operator.CallOpts) +} + +func (_Operator *OperatorCallerSession) Withdrawable() (*big.Int, error) { + return _Operator.Contract.Withdrawable(&_Operator.CallOpts) +} + +func (_Operator *OperatorTransactor) AcceptAuthorizedReceivers(opts *bind.TransactOpts, targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "acceptAuthorizedReceivers", targets, senders) +} + +func (_Operator *OperatorSession) AcceptAuthorizedReceivers(targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.AcceptAuthorizedReceivers(&_Operator.TransactOpts, targets, senders) +} + +func (_Operator *OperatorTransactorSession) AcceptAuthorizedReceivers(targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.AcceptAuthorizedReceivers(&_Operator.TransactOpts, targets, senders) +} + +func (_Operator *OperatorTransactor) AcceptOwnableContracts(opts *bind.TransactOpts, ownable []common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "acceptOwnableContracts", ownable) +} + +func (_Operator *OperatorSession) AcceptOwnableContracts(ownable []common.Address) (*types.Transaction, error) { + return _Operator.Contract.AcceptOwnableContracts(&_Operator.TransactOpts, ownable) +} + +func (_Operator *OperatorTransactorSession) AcceptOwnableContracts(ownable []common.Address) (*types.Transaction, error) { + return _Operator.Contract.AcceptOwnableContracts(&_Operator.TransactOpts, ownable) +} + +func (_Operator *OperatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "acceptOwnership") +} + +func (_Operator *OperatorSession) AcceptOwnership() (*types.Transaction, error) { + return _Operator.Contract.AcceptOwnership(&_Operator.TransactOpts) +} + +func (_Operator *OperatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _Operator.Contract.AcceptOwnership(&_Operator.TransactOpts) +} + +func (_Operator *OperatorTransactor) CancelOracleRequest(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "cancelOracleRequest", requestId, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorSession) CancelOracleRequest(requestId [32]byte, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.Contract.CancelOracleRequest(&_Operator.TransactOpts, requestId, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorTransactorSession) CancelOracleRequest(requestId [32]byte, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.Contract.CancelOracleRequest(&_Operator.TransactOpts, requestId, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorTransactor) CancelOracleRequestByRequester(opts *bind.TransactOpts, nonce *big.Int, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "cancelOracleRequestByRequester", nonce, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorSession) CancelOracleRequestByRequester(nonce *big.Int, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.Contract.CancelOracleRequestByRequester(&_Operator.TransactOpts, nonce, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorTransactorSession) CancelOracleRequestByRequester(nonce *big.Int, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) { + return _Operator.Contract.CancelOracleRequestByRequester(&_Operator.TransactOpts, nonce, payment, callbackFunc, expiration) +} + +func (_Operator *OperatorTransactor) DistributeFunds(opts *bind.TransactOpts, receivers []common.Address, amounts []*big.Int) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "distributeFunds", receivers, amounts) +} + +func (_Operator *OperatorSession) DistributeFunds(receivers []common.Address, amounts []*big.Int) (*types.Transaction, error) { + return _Operator.Contract.DistributeFunds(&_Operator.TransactOpts, receivers, amounts) +} + +func (_Operator *OperatorTransactorSession) DistributeFunds(receivers []common.Address, amounts []*big.Int) (*types.Transaction, error) { + return _Operator.Contract.DistributeFunds(&_Operator.TransactOpts, receivers, amounts) +} + +func (_Operator *OperatorTransactor) FulfillOracleRequest(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data [32]byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "fulfillOracleRequest", requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorSession) FulfillOracleRequest(requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data [32]byte) (*types.Transaction, error) { + return _Operator.Contract.FulfillOracleRequest(&_Operator.TransactOpts, requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorTransactorSession) FulfillOracleRequest(requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data [32]byte) (*types.Transaction, error) { + return _Operator.Contract.FulfillOracleRequest(&_Operator.TransactOpts, requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorTransactor) FulfillOracleRequest2(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "fulfillOracleRequest2", requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorSession) FulfillOracleRequest2(requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.FulfillOracleRequest2(&_Operator.TransactOpts, requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorTransactorSession) FulfillOracleRequest2(requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.FulfillOracleRequest2(&_Operator.TransactOpts, requestId, payment, callbackAddress, callbackFunctionId, expiration, data) +} + +func (_Operator *OperatorTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +func (_Operator *OperatorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OnTokenTransfer(&_Operator.TransactOpts, sender, amount, data) +} + +func (_Operator *OperatorTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OnTokenTransfer(&_Operator.TransactOpts, sender, amount, data) +} + +func (_Operator *OperatorTransactor) OperatorRequest(opts *bind.TransactOpts, sender common.Address, payment *big.Int, specId [32]byte, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "operatorRequest", sender, payment, specId, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorSession) OperatorRequest(sender common.Address, payment *big.Int, specId [32]byte, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OperatorRequest(&_Operator.TransactOpts, sender, payment, specId, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorTransactorSession) OperatorRequest(sender common.Address, payment *big.Int, specId [32]byte, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OperatorRequest(&_Operator.TransactOpts, sender, payment, specId, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorTransactor) OracleRequest(opts *bind.TransactOpts, sender common.Address, payment *big.Int, specId [32]byte, callbackAddress common.Address, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "oracleRequest", sender, payment, specId, callbackAddress, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorSession) OracleRequest(sender common.Address, payment *big.Int, specId [32]byte, callbackAddress common.Address, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OracleRequest(&_Operator.TransactOpts, sender, payment, specId, callbackAddress, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorTransactorSession) OracleRequest(sender common.Address, payment *big.Int, specId [32]byte, callbackAddress common.Address, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OracleRequest(&_Operator.TransactOpts, sender, payment, specId, callbackAddress, callbackFunctionId, nonce, dataVersion, data) +} + +func (_Operator *OperatorTransactor) OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "ownerForward", to, data) +} + +func (_Operator *OperatorSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OwnerForward(&_Operator.TransactOpts, to, data) +} + +func (_Operator *OperatorTransactorSession) OwnerForward(to common.Address, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OwnerForward(&_Operator.TransactOpts, to, data) +} + +func (_Operator *OperatorTransactor) OwnerTransferAndCall(opts *bind.TransactOpts, to common.Address, value *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "ownerTransferAndCall", to, value, data) +} + +func (_Operator *OperatorSession) OwnerTransferAndCall(to common.Address, value *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OwnerTransferAndCall(&_Operator.TransactOpts, to, value, data) +} + +func (_Operator *OperatorTransactorSession) OwnerTransferAndCall(to common.Address, value *big.Int, data []byte) (*types.Transaction, error) { + return _Operator.Contract.OwnerTransferAndCall(&_Operator.TransactOpts, to, value, data) +} + +func (_Operator *OperatorTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_Operator *OperatorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.SetAuthorizedSenders(&_Operator.TransactOpts, senders) +} + +func (_Operator *OperatorTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.SetAuthorizedSenders(&_Operator.TransactOpts, senders) +} + +func (_Operator *OperatorTransactor) SetAuthorizedSendersOn(opts *bind.TransactOpts, targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "setAuthorizedSendersOn", targets, senders) +} + +func (_Operator *OperatorSession) SetAuthorizedSendersOn(targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.SetAuthorizedSendersOn(&_Operator.TransactOpts, targets, senders) +} + +func (_Operator *OperatorTransactorSession) SetAuthorizedSendersOn(targets []common.Address, senders []common.Address) (*types.Transaction, error) { + return _Operator.Contract.SetAuthorizedSendersOn(&_Operator.TransactOpts, targets, senders) +} + +func (_Operator *OperatorTransactor) TransferOwnableContracts(opts *bind.TransactOpts, ownable []common.Address, newOwner common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "transferOwnableContracts", ownable, newOwner) +} + +func (_Operator *OperatorSession) TransferOwnableContracts(ownable []common.Address, newOwner common.Address) (*types.Transaction, error) { + return _Operator.Contract.TransferOwnableContracts(&_Operator.TransactOpts, ownable, newOwner) +} + +func (_Operator *OperatorTransactorSession) TransferOwnableContracts(ownable []common.Address, newOwner common.Address) (*types.Transaction, error) { + return _Operator.Contract.TransferOwnableContracts(&_Operator.TransactOpts, ownable, newOwner) +} + +func (_Operator *OperatorTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "transferOwnership", to) +} + +func (_Operator *OperatorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Operator.Contract.TransferOwnership(&_Operator.TransactOpts, to) +} + +func (_Operator *OperatorTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Operator.Contract.TransferOwnership(&_Operator.TransactOpts, to) +} + +func (_Operator *OperatorTransactor) Withdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _Operator.contract.Transact(opts, "withdraw", recipient, amount) +} + +func (_Operator *OperatorSession) Withdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _Operator.Contract.Withdraw(&_Operator.TransactOpts, recipient, amount) +} + +func (_Operator *OperatorTransactorSession) Withdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _Operator.Contract.Withdraw(&_Operator.TransactOpts, recipient, amount) +} + +type OperatorAuthorizedSendersChangedIterator struct { + Event *OperatorAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *OperatorAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OperatorAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _Operator.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &OperatorAuthorizedSendersChangedIterator{contract: _Operator.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OperatorAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _Operator.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorAuthorizedSendersChanged) + if err := _Operator.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseAuthorizedSendersChanged(log types.Log) (*OperatorAuthorizedSendersChanged, error) { + event := new(OperatorAuthorizedSendersChanged) + if err := _Operator.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorCancelOracleRequestIterator struct { + Event *OperatorCancelOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorCancelOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorCancelOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorCancelOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorCancelOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OperatorCancelOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorCancelOracleRequest struct { + RequestId [32]byte + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterCancelOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OperatorCancelOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "CancelOracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return &OperatorCancelOracleRequestIterator{contract: _Operator.contract, event: "CancelOracleRequest", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchCancelOracleRequest(opts *bind.WatchOpts, sink chan<- *OperatorCancelOracleRequest, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "CancelOracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorCancelOracleRequest) + if err := _Operator.contract.UnpackLog(event, "CancelOracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseCancelOracleRequest(log types.Log) (*OperatorCancelOracleRequest, error) { + event := new(OperatorCancelOracleRequest) + if err := _Operator.contract.UnpackLog(event, "CancelOracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorOracleRequestIterator struct { + Event *OperatorOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OperatorOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorOracleRequest struct { + SpecId [32]byte + Requester common.Address + RequestId [32]byte + Payment *big.Int + CallbackAddr common.Address + CallbackFunctionId [4]byte + CancelExpiration *big.Int + DataVersion *big.Int + Data []byte + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterOracleRequest(opts *bind.FilterOpts, specId [][32]byte) (*OperatorOracleRequestIterator, error) { + + var specIdRule []interface{} + for _, specIdItem := range specId { + specIdRule = append(specIdRule, specIdItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "OracleRequest", specIdRule) + if err != nil { + return nil, err + } + return &OperatorOracleRequestIterator{contract: _Operator.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OperatorOracleRequest, specId [][32]byte) (event.Subscription, error) { + + var specIdRule []interface{} + for _, specIdItem := range specId { + specIdRule = append(specIdRule, specIdItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "OracleRequest", specIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorOracleRequest) + if err := _Operator.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseOracleRequest(log types.Log) (*OperatorOracleRequest, error) { + event := new(OperatorOracleRequest) + if err := _Operator.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorOracleResponseIterator struct { + Event *OperatorOracleResponse + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorOracleResponseIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorOracleResponse) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorOracleResponseIterator) Error() error { + return it.fail +} + +func (it *OperatorOracleResponseIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorOracleResponse struct { + RequestId [32]byte + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OperatorOracleResponseIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return &OperatorOracleResponseIterator{contract: _Operator.contract, event: "OracleResponse", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OperatorOracleResponse, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "OracleResponse", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorOracleResponse) + if err := _Operator.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseOracleResponse(log types.Log) (*OperatorOracleResponse, error) { + event := new(OperatorOracleResponse) + if err := _Operator.contract.UnpackLog(event, "OracleResponse", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorOwnableContractAcceptedIterator struct { + Event *OperatorOwnableContractAccepted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorOwnableContractAcceptedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorOwnableContractAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorOwnableContractAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorOwnableContractAcceptedIterator) Error() error { + return it.fail +} + +func (it *OperatorOwnableContractAcceptedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorOwnableContractAccepted struct { + AcceptedContract common.Address + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterOwnableContractAccepted(opts *bind.FilterOpts, acceptedContract []common.Address) (*OperatorOwnableContractAcceptedIterator, error) { + + var acceptedContractRule []interface{} + for _, acceptedContractItem := range acceptedContract { + acceptedContractRule = append(acceptedContractRule, acceptedContractItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "OwnableContractAccepted", acceptedContractRule) + if err != nil { + return nil, err + } + return &OperatorOwnableContractAcceptedIterator{contract: _Operator.contract, event: "OwnableContractAccepted", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchOwnableContractAccepted(opts *bind.WatchOpts, sink chan<- *OperatorOwnableContractAccepted, acceptedContract []common.Address) (event.Subscription, error) { + + var acceptedContractRule []interface{} + for _, acceptedContractItem := range acceptedContract { + acceptedContractRule = append(acceptedContractRule, acceptedContractItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "OwnableContractAccepted", acceptedContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorOwnableContractAccepted) + if err := _Operator.contract.UnpackLog(event, "OwnableContractAccepted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseOwnableContractAccepted(log types.Log) (*OperatorOwnableContractAccepted, error) { + event := new(OperatorOwnableContractAccepted) + if err := _Operator.contract.UnpackLog(event, "OwnableContractAccepted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorOwnershipTransferRequestedIterator struct { + Event *OperatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OperatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OperatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OperatorOwnershipTransferRequestedIterator{contract: _Operator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OperatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorOwnershipTransferRequested) + if err := _Operator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*OperatorOwnershipTransferRequested, error) { + event := new(OperatorOwnershipTransferRequested) + if err := _Operator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorOwnershipTransferredIterator struct { + Event *OperatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OperatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OperatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Operator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OperatorOwnershipTransferredIterator{contract: _Operator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OperatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Operator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorOwnershipTransferred) + if err := _Operator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseOwnershipTransferred(log types.Log) (*OperatorOwnershipTransferred, error) { + event := new(OperatorOwnershipTransferred) + if err := _Operator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OperatorTargetsUpdatedAuthorizedSendersIterator struct { + Event *OperatorTargetsUpdatedAuthorizedSenders + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OperatorTargetsUpdatedAuthorizedSendersIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OperatorTargetsUpdatedAuthorizedSenders) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OperatorTargetsUpdatedAuthorizedSenders) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OperatorTargetsUpdatedAuthorizedSendersIterator) Error() error { + return it.fail +} + +func (it *OperatorTargetsUpdatedAuthorizedSendersIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OperatorTargetsUpdatedAuthorizedSenders struct { + Targets []common.Address + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_Operator *OperatorFilterer) FilterTargetsUpdatedAuthorizedSenders(opts *bind.FilterOpts) (*OperatorTargetsUpdatedAuthorizedSendersIterator, error) { + + logs, sub, err := _Operator.contract.FilterLogs(opts, "TargetsUpdatedAuthorizedSenders") + if err != nil { + return nil, err + } + return &OperatorTargetsUpdatedAuthorizedSendersIterator{contract: _Operator.contract, event: "TargetsUpdatedAuthorizedSenders", logs: logs, sub: sub}, nil +} + +func (_Operator *OperatorFilterer) WatchTargetsUpdatedAuthorizedSenders(opts *bind.WatchOpts, sink chan<- *OperatorTargetsUpdatedAuthorizedSenders) (event.Subscription, error) { + + logs, sub, err := _Operator.contract.WatchLogs(opts, "TargetsUpdatedAuthorizedSenders") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OperatorTargetsUpdatedAuthorizedSenders) + if err := _Operator.contract.UnpackLog(event, "TargetsUpdatedAuthorizedSenders", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Operator *OperatorFilterer) ParseTargetsUpdatedAuthorizedSenders(log types.Log) (*OperatorTargetsUpdatedAuthorizedSenders, error) { + event := new(OperatorTargetsUpdatedAuthorizedSenders) + if err := _Operator.contract.UnpackLog(event, "TargetsUpdatedAuthorizedSenders", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_Operator *Operator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Operator.abi.Events["AuthorizedSendersChanged"].ID: + return _Operator.ParseAuthorizedSendersChanged(log) + case _Operator.abi.Events["CancelOracleRequest"].ID: + return _Operator.ParseCancelOracleRequest(log) + case _Operator.abi.Events["OracleRequest"].ID: + return _Operator.ParseOracleRequest(log) + case _Operator.abi.Events["OracleResponse"].ID: + return _Operator.ParseOracleResponse(log) + case _Operator.abi.Events["OwnableContractAccepted"].ID: + return _Operator.ParseOwnableContractAccepted(log) + case _Operator.abi.Events["OwnershipTransferRequested"].ID: + return _Operator.ParseOwnershipTransferRequested(log) + case _Operator.abi.Events["OwnershipTransferred"].ID: + return _Operator.ParseOwnershipTransferred(log) + case _Operator.abi.Events["TargetsUpdatedAuthorizedSenders"].ID: + return _Operator.ParseTargetsUpdatedAuthorizedSenders(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OperatorAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (OperatorCancelOracleRequest) Topic() common.Hash { + return common.HexToHash("0xa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e93") +} + +func (OperatorOracleRequest) Topic() common.Hash { + return common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65") +} + +func (OperatorOracleResponse) Topic() common.Hash { + return common.HexToHash("0x9e9bc7616d42c2835d05ae617e508454e63b30b934be8aa932ebc125e0e58a64") +} + +func (OperatorOwnableContractAccepted) Topic() common.Hash { + return common.HexToHash("0x615a0c1cb00a60d4acd77ec67acf2f17f223ef0932d591052fabc33643fe7e82") +} + +func (OperatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OperatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OperatorTargetsUpdatedAuthorizedSenders) Topic() common.Hash { + return common.HexToHash("0x1bb185903e2cb2f1b303523128b60e314dea81df4f8d9b7351cadd344f6e7727") +} + +func (_Operator *Operator) Address() common.Address { + return _Operator.address +} + +type OperatorInterface interface { + EXPIRYTIME(opts *bind.CallOpts) (*big.Int, error) + + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetPluginToken(opts *bind.CallOpts) (common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + Withdrawable(opts *bind.CallOpts) (*big.Int, error) + + AcceptAuthorizedReceivers(opts *bind.TransactOpts, targets []common.Address, senders []common.Address) (*types.Transaction, error) + + AcceptOwnableContracts(opts *bind.TransactOpts, ownable []common.Address) (*types.Transaction, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CancelOracleRequest(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) + + CancelOracleRequestByRequester(opts *bind.TransactOpts, nonce *big.Int, payment *big.Int, callbackFunc [4]byte, expiration *big.Int) (*types.Transaction, error) + + DistributeFunds(opts *bind.TransactOpts, receivers []common.Address, amounts []*big.Int) (*types.Transaction, error) + + FulfillOracleRequest(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data [32]byte) (*types.Transaction, error) + + FulfillOracleRequest2(opts *bind.TransactOpts, requestId [32]byte, payment *big.Int, callbackAddress common.Address, callbackFunctionId [4]byte, expiration *big.Int, data []byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OperatorRequest(opts *bind.TransactOpts, sender common.Address, payment *big.Int, specId [32]byte, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) + + OracleRequest(opts *bind.TransactOpts, sender common.Address, payment *big.Int, specId [32]byte, callbackAddress common.Address, callbackFunctionId [4]byte, nonce *big.Int, dataVersion *big.Int, data []byte) (*types.Transaction, error) + + OwnerForward(opts *bind.TransactOpts, to common.Address, data []byte) (*types.Transaction, error) + + OwnerTransferAndCall(opts *bind.TransactOpts, to common.Address, value *big.Int, data []byte) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SetAuthorizedSendersOn(opts *bind.TransactOpts, targets []common.Address, senders []common.Address) (*types.Transaction, error) + + TransferOwnableContracts(opts *bind.TransactOpts, ownable []common.Address, newOwner common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*OperatorAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *OperatorAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*OperatorAuthorizedSendersChanged, error) + + FilterCancelOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OperatorCancelOracleRequestIterator, error) + + WatchCancelOracleRequest(opts *bind.WatchOpts, sink chan<- *OperatorCancelOracleRequest, requestId [][32]byte) (event.Subscription, error) + + ParseCancelOracleRequest(log types.Log) (*OperatorCancelOracleRequest, error) + + FilterOracleRequest(opts *bind.FilterOpts, specId [][32]byte) (*OperatorOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OperatorOracleRequest, specId [][32]byte) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*OperatorOracleRequest, error) + + FilterOracleResponse(opts *bind.FilterOpts, requestId [][32]byte) (*OperatorOracleResponseIterator, error) + + WatchOracleResponse(opts *bind.WatchOpts, sink chan<- *OperatorOracleResponse, requestId [][32]byte) (event.Subscription, error) + + ParseOracleResponse(log types.Log) (*OperatorOracleResponse, error) + + FilterOwnableContractAccepted(opts *bind.FilterOpts, acceptedContract []common.Address) (*OperatorOwnableContractAcceptedIterator, error) + + WatchOwnableContractAccepted(opts *bind.WatchOpts, sink chan<- *OperatorOwnableContractAccepted, acceptedContract []common.Address) (event.Subscription, error) + + ParseOwnableContractAccepted(log types.Log) (*OperatorOwnableContractAccepted, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OperatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OperatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OperatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OperatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OperatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OperatorOwnershipTransferred, error) + + FilterTargetsUpdatedAuthorizedSenders(opts *bind.FilterOpts) (*OperatorTargetsUpdatedAuthorizedSendersIterator, error) + + WatchTargetsUpdatedAuthorizedSenders(opts *bind.WatchOpts, sink chan<- *OperatorTargetsUpdatedAuthorizedSenders) (event.Subscription, error) + + ParseTargetsUpdatedAuthorizedSenders(log types.Log) (*OperatorTargetsUpdatedAuthorizedSenders, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/oracle_wrapper/oracle_wrapper.go b/core/gethwrappers/generated/oracle_wrapper/oracle_wrapper.go new file mode 100644 index 00000000..7ef1bd60 --- /dev/null +++ b/core/gethwrappers/generated/oracle_wrapper/oracle_wrapper.go @@ -0,0 +1,866 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package oracle_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OracleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"name\":\"CancelOracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"specId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"callbackAddr\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes4\",\"name\":\"callbackFunctionId\",\"type\":\"bytes4\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"cancelExpiration\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"dataVersion\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"OracleRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"EXPIRY_TIME\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunc\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_expiration\",\"type\":\"uint256\"}],\"name\":\"cancelOracleRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_callbackAddress\",\"type\":\"address\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_expiration\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"_data\",\"type\":\"bytes32\"}],\"name\":\"fulfillOracleRequest\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_node\",\"type\":\"address\"}],\"name\":\"getAuthorizationStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPluginToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isOwner\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"_specId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"_callbackAddress\",\"type\":\"address\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_dataVersion\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"oracleRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_node\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_allowed\",\"type\":\"bool\"}],\"name\":\"setFulfillmentPermission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600160045534801561001557600080fd5b506040516119513803806119518339818101604052602081101561003857600080fd5b5051600080546001600160a01b03191633178082556040516001600160a01b039190911691907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0908290a3600180546001600160a01b0319166001600160a01b039290921691909117905561189f806100b26000396000f3fe608060405234801561001057600080fd5b50600436106100df5760003560e01c80637fcd56db1161008c578063a4c0ed3611610066578063a4c0ed3614610332578063d3e9c314146103fa578063f2fde38b1461042d578063f3fef3a314610460576100df565b80637fcd56db146102e75780638da5cb5b146103225780638f32d59b1461032a576100df565b80634b602282116100bd5780634b60228214610274578063501883011461028e5780636ee4d55314610296576100df565b8063165d35e1146100e457806340429946146101155780634ab0d190146101ed575b600080fd5b6100ec610499565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b6101eb600480360361010081101561012c57600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235811692602081013592604082013592606083013516917fffffffff000000000000000000000000000000000000000000000000000000006080820135169160a08201359160c081013591810190610100810160e08201356401000000008111156101ac57600080fd5b8201836020820111156101be57600080fd5b803590602001918460018302840111640100000000831117156101e057600080fd5b5090925090506104b5565b005b610260600480360360c081101561020357600080fd5b5080359060208101359073ffffffffffffffffffffffffffffffffffffffff604082013516907fffffffff000000000000000000000000000000000000000000000000000000006060820135169060808101359060a001356108e6565b604080519115158252519081900360200190f35b61027c610ce5565b60408051918252519081900360200190f35b61027c610ceb565b6101eb600480360360808110156102ac57600080fd5b508035906020810135907fffffffff000000000000000000000000000000000000000000000000000000006040820135169060600135610d79565b6101eb600480360360408110156102fd57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff81351690602001351515610fac565b6100ec611075565b610260611091565b6101eb6004803603606081101561034857600080fd5b73ffffffffffffffffffffffffffffffffffffffff8235169160208101359181019060608101604082013564010000000081111561038557600080fd5b82018360208201111561039757600080fd5b803590602001918460018302840111640100000000831117156103b957600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506110af945050505050565b6102606004803603602081101561041057600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166113cb565b6101eb6004803603602081101561044357600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166113f6565b6101eb6004803603604081101561047657600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135611475565b60015473ffffffffffffffffffffffffffffffffffffffff1690565b6104bd610499565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461055657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e00000000000000000000000000604482015290519081900360640190fd5b600154869073ffffffffffffffffffffffffffffffffffffffff808316911614156105e257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f742063616c6c6261636b20746f204c494e4b000000000000000000604482015290519081900360640190fd5b604080517fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608d901b16602080830191909152603480830189905283518084039091018152605490920183528151918101919091206000818152600290925291902054156106b257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4d75737420757365206120756e69717565204944000000000000000000000000604482015290519081900360640190fd5b60006106c64261012c63ffffffff61162216565b90508a898983604051602001808581526020018473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660601b8152601401837bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152600401828152602001945050505050604051602081830303815290604052805190602001206002600084815260200190815260200160002081905550897fd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c658d848e8d8d878d8d8d604051808a73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018981526020018881526020018773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001867bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001858152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018290039c50909a5050505050505050505050a2505050505050505050505050565b3360009081526003602052604081205460ff16806109365750610907611075565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b61098b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602a815260200180611869602a913960400191505060405180910390fd5b6000878152600260205260409020548790610a0757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4d757374206861766520612076616c6964207265717565737449640000000000604482015290519081900360640190fd5b6040805160208082018a90527fffffffffffffffffffffffffffffffffffffffff00000000000000000000000060608a901b16828401527fffffffff00000000000000000000000000000000000000000000000000000000881660548301526058808301889052835180840390910181526078909201835281519181019190912060008b81526002909252919020548114610b0357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d6174636820726571756573742049440000604482015290519081900360640190fd5b600454610b16908963ffffffff61162216565b60045560008981526002602052604081205562061a805a1015610b9a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4d7573742070726f7669646520636f6e73756d657220656e6f75676820676173604482015290519081900360640190fd5b60408051602481018b9052604480820187905282518083039091018152606490910182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000008a161781529151815160009373ffffffffffffffffffffffffffffffffffffffff8c169392918291908083835b60208310610c6d57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c30565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d8060008114610ccf576040519150601f19603f3d011682016040523d82523d6000602084013e610cd4565b606091505b50909b9a5050505050505050505050565b61012c81565b6000610cf5611091565b610d6057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b600454610d7490600163ffffffff61169d16565b905090565b6040805160208082018690523360601b828401527fffffffff00000000000000000000000000000000000000000000000000000000851660548301526058808301859052835180840390910181526078909201835281519181019190912060008781526002909252919020548114610e5257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f506172616d7320646f206e6f74206d6174636820726571756573742049440000604482015290519081900360640190fd5b42821115610ec157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f52657175657374206973206e6f74206578706972656400000000000000000000604482015290519081900360640190fd5b6000858152600260205260408082208290555186917fa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e9391a2600154604080517fa9059cbb00000000000000000000000000000000000000000000000000000000815233600482015260248101879052905173ffffffffffffffffffffffffffffffffffffffff9092169163a9059cbb916044808201926020929091908290030181600087803b158015610f7357600080fd5b505af1158015610f87573d6000803e3d6000fd5b505050506040513d6020811015610f9d57600080fd5b5051610fa557fe5b5050505050565b610fb4611091565b61101f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff91909116600090815260036020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff16331490565b6110b7610499565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461115057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e00000000000000000000000000604482015290519081900360640190fd5b80518190604411156111c357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f496e76616c69642072657175657374206c656e67746800000000000000000000604482015290519081900360640190fd5b602082015182907fffffffff0000000000000000000000000000000000000000000000000000000081167f40429946000000000000000000000000000000000000000000000000000000001461127a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f4d757374207573652077686974656c69737465642066756e6374696f6e730000604482015290519081900360640190fd5b85602485015284604485015260003073ffffffffffffffffffffffffffffffffffffffff16856040518082805190602001908083835b602083106112ed57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016112b0565b6001836020036101000a038019825116818451168082178552505050505050905001915050600060405180830381855af49150503d806000811461134d576040519150601f19603f3d011682016040523d82523d6000602084013e611352565b606091505b50509050806113c257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f556e61626c6520746f2063726561746520726571756573740000000000000000604482015290519081900360640190fd5b50505050505050565b73ffffffffffffffffffffffffffffffffffffffff1660009081526003602052604090205460ff1690565b6113fe611091565b61146957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b61147281611714565b50565b61147d611091565b6114e857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b806114fa81600163ffffffff61162216565b6004541015611554576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260358152602001806118346035913960400191505060405180910390fd5b600454611567908363ffffffff61169d16565b6004908155600154604080517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff87811694820194909452602481018690529051929091169163a9059cbb916044808201926020929091908290030181600087803b1580156115eb57600080fd5b505af11580156115ff573d6000803e3d6000fd5b505050506040513d602081101561161557600080fd5b505161161d57fe5b505050565b60008282018381101561169657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b9392505050565b60008282111561170e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b73ffffffffffffffffffffffffffffffffffffffff8116611780576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602681526020018061180e6026913960400191505060405180910390fd5b6000805460405173ffffffffffffffffffffffffffffffffffffffff808516939216917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a3600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9290921691909117905556fe4f776e61626c653a206e6577206f776e657220697320746865207a65726f2061646472657373416d6f756e74207265717565737465642069732067726561746572207468616e20776974686472617761626c652062616c616e63654e6f7420616e20617574686f72697a6564206e6f646520746f2066756c66696c6c207265717565737473a164736f6c6343000606000a", +} + +var OracleABI = OracleMetaData.ABI + +var OracleBin = OracleMetaData.Bin + +func DeployOracle(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address) (common.Address, *types.Transaction, *Oracle, error) { + parsed, err := OracleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OracleBin), backend, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Oracle{address: address, abi: *parsed, OracleCaller: OracleCaller{contract: contract}, OracleTransactor: OracleTransactor{contract: contract}, OracleFilterer: OracleFilterer{contract: contract}}, nil +} + +type Oracle struct { + address common.Address + abi abi.ABI + OracleCaller + OracleTransactor + OracleFilterer +} + +type OracleCaller struct { + contract *bind.BoundContract +} + +type OracleTransactor struct { + contract *bind.BoundContract +} + +type OracleFilterer struct { + contract *bind.BoundContract +} + +type OracleSession struct { + Contract *Oracle + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OracleCallerSession struct { + Contract *OracleCaller + CallOpts bind.CallOpts +} + +type OracleTransactorSession struct { + Contract *OracleTransactor + TransactOpts bind.TransactOpts +} + +type OracleRaw struct { + Contract *Oracle +} + +type OracleCallerRaw struct { + Contract *OracleCaller +} + +type OracleTransactorRaw struct { + Contract *OracleTransactor +} + +func NewOracle(address common.Address, backend bind.ContractBackend) (*Oracle, error) { + abi, err := abi.JSON(strings.NewReader(OracleABI)) + if err != nil { + return nil, err + } + contract, err := bindOracle(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Oracle{address: address, abi: abi, OracleCaller: OracleCaller{contract: contract}, OracleTransactor: OracleTransactor{contract: contract}, OracleFilterer: OracleFilterer{contract: contract}}, nil +} + +func NewOracleCaller(address common.Address, caller bind.ContractCaller) (*OracleCaller, error) { + contract, err := bindOracle(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OracleCaller{contract: contract}, nil +} + +func NewOracleTransactor(address common.Address, transactor bind.ContractTransactor) (*OracleTransactor, error) { + contract, err := bindOracle(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OracleTransactor{contract: contract}, nil +} + +func NewOracleFilterer(address common.Address, filterer bind.ContractFilterer) (*OracleFilterer, error) { + contract, err := bindOracle(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OracleFilterer{contract: contract}, nil +} + +func bindOracle(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OracleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Oracle *OracleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Oracle.Contract.OracleCaller.contract.Call(opts, result, method, params...) +} + +func (_Oracle *OracleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Oracle.Contract.OracleTransactor.contract.Transfer(opts) +} + +func (_Oracle *OracleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Oracle.Contract.OracleTransactor.contract.Transact(opts, method, params...) +} + +func (_Oracle *OracleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Oracle.Contract.contract.Call(opts, result, method, params...) +} + +func (_Oracle *OracleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Oracle.Contract.contract.Transfer(opts) +} + +func (_Oracle *OracleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Oracle.Contract.contract.Transact(opts, method, params...) +} + +func (_Oracle *OracleCaller) EXPIRYTIME(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "EXPIRY_TIME") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_Oracle *OracleSession) EXPIRYTIME() (*big.Int, error) { + return _Oracle.Contract.EXPIRYTIME(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCallerSession) EXPIRYTIME() (*big.Int, error) { + return _Oracle.Contract.EXPIRYTIME(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCaller) GetAuthorizationStatus(opts *bind.CallOpts, _node common.Address) (bool, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "getAuthorizationStatus", _node) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Oracle *OracleSession) GetAuthorizationStatus(_node common.Address) (bool, error) { + return _Oracle.Contract.GetAuthorizationStatus(&_Oracle.CallOpts, _node) +} + +func (_Oracle *OracleCallerSession) GetAuthorizationStatus(_node common.Address) (bool, error) { + return _Oracle.Contract.GetAuthorizationStatus(&_Oracle.CallOpts, _node) +} + +func (_Oracle *OracleCaller) GetPluginToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "getPluginToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Oracle *OracleSession) GetPluginToken() (common.Address, error) { + return _Oracle.Contract.GetPluginToken(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCallerSession) GetPluginToken() (common.Address, error) { + return _Oracle.Contract.GetPluginToken(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCaller) IsOwner(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "isOwner") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Oracle *OracleSession) IsOwner() (bool, error) { + return _Oracle.Contract.IsOwner(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCallerSession) IsOwner() (bool, error) { + return _Oracle.Contract.IsOwner(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Oracle *OracleSession) Owner() (common.Address, error) { + return _Oracle.Contract.Owner(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCallerSession) Owner() (common.Address, error) { + return _Oracle.Contract.Owner(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCaller) Withdrawable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Oracle.contract.Call(opts, &out, "withdrawable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_Oracle *OracleSession) Withdrawable() (*big.Int, error) { + return _Oracle.Contract.Withdrawable(&_Oracle.CallOpts) +} + +func (_Oracle *OracleCallerSession) Withdrawable() (*big.Int, error) { + return _Oracle.Contract.Withdrawable(&_Oracle.CallOpts) +} + +func (_Oracle *OracleTransactor) CancelOracleRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackFunc [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "cancelOracleRequest", _requestId, _payment, _callbackFunc, _expiration) +} + +func (_Oracle *OracleSession) CancelOracleRequest(_requestId [32]byte, _payment *big.Int, _callbackFunc [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Oracle.Contract.CancelOracleRequest(&_Oracle.TransactOpts, _requestId, _payment, _callbackFunc, _expiration) +} + +func (_Oracle *OracleTransactorSession) CancelOracleRequest(_requestId [32]byte, _payment *big.Int, _callbackFunc [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _Oracle.Contract.CancelOracleRequest(&_Oracle.TransactOpts, _requestId, _payment, _callbackFunc, _expiration) +} + +func (_Oracle *OracleTransactor) FulfillOracleRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackAddress common.Address, _callbackFunctionId [4]byte, _expiration *big.Int, _data [32]byte) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "fulfillOracleRequest", _requestId, _payment, _callbackAddress, _callbackFunctionId, _expiration, _data) +} + +func (_Oracle *OracleSession) FulfillOracleRequest(_requestId [32]byte, _payment *big.Int, _callbackAddress common.Address, _callbackFunctionId [4]byte, _expiration *big.Int, _data [32]byte) (*types.Transaction, error) { + return _Oracle.Contract.FulfillOracleRequest(&_Oracle.TransactOpts, _requestId, _payment, _callbackAddress, _callbackFunctionId, _expiration, _data) +} + +func (_Oracle *OracleTransactorSession) FulfillOracleRequest(_requestId [32]byte, _payment *big.Int, _callbackAddress common.Address, _callbackFunctionId [4]byte, _expiration *big.Int, _data [32]byte) (*types.Transaction, error) { + return _Oracle.Contract.FulfillOracleRequest(&_Oracle.TransactOpts, _requestId, _payment, _callbackAddress, _callbackFunctionId, _expiration, _data) +} + +func (_Oracle *OracleTransactor) OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "onTokenTransfer", _sender, _amount, _data) +} + +func (_Oracle *OracleSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.Contract.OnTokenTransfer(&_Oracle.TransactOpts, _sender, _amount, _data) +} + +func (_Oracle *OracleTransactorSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.Contract.OnTokenTransfer(&_Oracle.TransactOpts, _sender, _amount, _data) +} + +func (_Oracle *OracleTransactor) OracleRequest(opts *bind.TransactOpts, _sender common.Address, _payment *big.Int, _specId [32]byte, _callbackAddress common.Address, _callbackFunctionId [4]byte, _nonce *big.Int, _dataVersion *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "oracleRequest", _sender, _payment, _specId, _callbackAddress, _callbackFunctionId, _nonce, _dataVersion, _data) +} + +func (_Oracle *OracleSession) OracleRequest(_sender common.Address, _payment *big.Int, _specId [32]byte, _callbackAddress common.Address, _callbackFunctionId [4]byte, _nonce *big.Int, _dataVersion *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.Contract.OracleRequest(&_Oracle.TransactOpts, _sender, _payment, _specId, _callbackAddress, _callbackFunctionId, _nonce, _dataVersion, _data) +} + +func (_Oracle *OracleTransactorSession) OracleRequest(_sender common.Address, _payment *big.Int, _specId [32]byte, _callbackAddress common.Address, _callbackFunctionId [4]byte, _nonce *big.Int, _dataVersion *big.Int, _data []byte) (*types.Transaction, error) { + return _Oracle.Contract.OracleRequest(&_Oracle.TransactOpts, _sender, _payment, _specId, _callbackAddress, _callbackFunctionId, _nonce, _dataVersion, _data) +} + +func (_Oracle *OracleTransactor) SetFulfillmentPermission(opts *bind.TransactOpts, _node common.Address, _allowed bool) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "setFulfillmentPermission", _node, _allowed) +} + +func (_Oracle *OracleSession) SetFulfillmentPermission(_node common.Address, _allowed bool) (*types.Transaction, error) { + return _Oracle.Contract.SetFulfillmentPermission(&_Oracle.TransactOpts, _node, _allowed) +} + +func (_Oracle *OracleTransactorSession) SetFulfillmentPermission(_node common.Address, _allowed bool) (*types.Transaction, error) { + return _Oracle.Contract.SetFulfillmentPermission(&_Oracle.TransactOpts, _node, _allowed) +} + +func (_Oracle *OracleTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "transferOwnership", newOwner) +} + +func (_Oracle *OracleSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _Oracle.Contract.TransferOwnership(&_Oracle.TransactOpts, newOwner) +} + +func (_Oracle *OracleTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _Oracle.Contract.TransferOwnership(&_Oracle.TransactOpts, newOwner) +} + +func (_Oracle *OracleTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _Oracle.contract.Transact(opts, "withdraw", _recipient, _amount) +} + +func (_Oracle *OracleSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _Oracle.Contract.Withdraw(&_Oracle.TransactOpts, _recipient, _amount) +} + +func (_Oracle *OracleTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _Oracle.Contract.Withdraw(&_Oracle.TransactOpts, _recipient, _amount) +} + +type OracleCancelOracleRequestIterator struct { + Event *OracleCancelOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OracleCancelOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OracleCancelOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OracleCancelOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OracleCancelOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OracleCancelOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OracleCancelOracleRequest struct { + RequestId [32]byte + Raw types.Log +} + +func (_Oracle *OracleFilterer) FilterCancelOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OracleCancelOracleRequestIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Oracle.contract.FilterLogs(opts, "CancelOracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return &OracleCancelOracleRequestIterator{contract: _Oracle.contract, event: "CancelOracleRequest", logs: logs, sub: sub}, nil +} + +func (_Oracle *OracleFilterer) WatchCancelOracleRequest(opts *bind.WatchOpts, sink chan<- *OracleCancelOracleRequest, requestId [][32]byte) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _Oracle.contract.WatchLogs(opts, "CancelOracleRequest", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OracleCancelOracleRequest) + if err := _Oracle.contract.UnpackLog(event, "CancelOracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Oracle *OracleFilterer) ParseCancelOracleRequest(log types.Log) (*OracleCancelOracleRequest, error) { + event := new(OracleCancelOracleRequest) + if err := _Oracle.contract.UnpackLog(event, "CancelOracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OracleOracleRequestIterator struct { + Event *OracleOracleRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OracleOracleRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OracleOracleRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OracleOracleRequestIterator) Error() error { + return it.fail +} + +func (it *OracleOracleRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OracleOracleRequest struct { + SpecId [32]byte + Requester common.Address + RequestId [32]byte + Payment *big.Int + CallbackAddr common.Address + CallbackFunctionId [4]byte + CancelExpiration *big.Int + DataVersion *big.Int + Data []byte + Raw types.Log +} + +func (_Oracle *OracleFilterer) FilterOracleRequest(opts *bind.FilterOpts, specId [][32]byte) (*OracleOracleRequestIterator, error) { + + var specIdRule []interface{} + for _, specIdItem := range specId { + specIdRule = append(specIdRule, specIdItem) + } + + logs, sub, err := _Oracle.contract.FilterLogs(opts, "OracleRequest", specIdRule) + if err != nil { + return nil, err + } + return &OracleOracleRequestIterator{contract: _Oracle.contract, event: "OracleRequest", logs: logs, sub: sub}, nil +} + +func (_Oracle *OracleFilterer) WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OracleOracleRequest, specId [][32]byte) (event.Subscription, error) { + + var specIdRule []interface{} + for _, specIdItem := range specId { + specIdRule = append(specIdRule, specIdItem) + } + + logs, sub, err := _Oracle.contract.WatchLogs(opts, "OracleRequest", specIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OracleOracleRequest) + if err := _Oracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Oracle *OracleFilterer) ParseOracleRequest(log types.Log) (*OracleOracleRequest, error) { + event := new(OracleOracleRequest) + if err := _Oracle.contract.UnpackLog(event, "OracleRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OracleOwnershipTransferredIterator struct { + Event *OracleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OracleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OracleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OracleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OracleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OracleOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log +} + +func (_Oracle *OracleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*OracleOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _Oracle.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &OracleOwnershipTransferredIterator{contract: _Oracle.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_Oracle *OracleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OracleOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _Oracle.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OracleOwnershipTransferred) + if err := _Oracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Oracle *OracleFilterer) ParseOwnershipTransferred(log types.Log) (*OracleOwnershipTransferred, error) { + event := new(OracleOwnershipTransferred) + if err := _Oracle.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_Oracle *Oracle) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Oracle.abi.Events["CancelOracleRequest"].ID: + return _Oracle.ParseCancelOracleRequest(log) + case _Oracle.abi.Events["OracleRequest"].ID: + return _Oracle.ParseOracleRequest(log) + case _Oracle.abi.Events["OwnershipTransferred"].ID: + return _Oracle.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OracleCancelOracleRequest) Topic() common.Hash { + return common.HexToHash("0xa7842b9ec549398102c0d91b1b9919b2f20558aefdadf57528a95c6cd3292e93") +} + +func (OracleOracleRequest) Topic() common.Hash { + return common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65") +} + +func (OracleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_Oracle *Oracle) Address() common.Address { + return _Oracle.address +} + +type OracleInterface interface { + EXPIRYTIME(opts *bind.CallOpts) (*big.Int, error) + + GetAuthorizationStatus(opts *bind.CallOpts, _node common.Address) (bool, error) + + GetPluginToken(opts *bind.CallOpts) (common.Address, error) + + IsOwner(opts *bind.CallOpts) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Withdrawable(opts *bind.CallOpts) (*big.Int, error) + + CancelOracleRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackFunc [4]byte, _expiration *big.Int) (*types.Transaction, error) + + FulfillOracleRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackAddress common.Address, _callbackFunctionId [4]byte, _expiration *big.Int, _data [32]byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + OracleRequest(opts *bind.TransactOpts, _sender common.Address, _payment *big.Int, _specId [32]byte, _callbackAddress common.Address, _callbackFunctionId [4]byte, _nonce *big.Int, _dataVersion *big.Int, _data []byte) (*types.Transaction, error) + + SetFulfillmentPermission(opts *bind.TransactOpts, _node common.Address, _allowed bool) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + FilterCancelOracleRequest(opts *bind.FilterOpts, requestId [][32]byte) (*OracleCancelOracleRequestIterator, error) + + WatchCancelOracleRequest(opts *bind.WatchOpts, sink chan<- *OracleCancelOracleRequest, requestId [][32]byte) (event.Subscription, error) + + ParseCancelOracleRequest(log types.Log) (*OracleCancelOracleRequest, error) + + FilterOracleRequest(opts *bind.FilterOpts, specId [][32]byte) (*OracleOracleRequestIterator, error) + + WatchOracleRequest(opts *bind.WatchOpts, sink chan<- *OracleOracleRequest, specId [][32]byte) (event.Subscription, error) + + ParseOracleRequest(log types.Log) (*OracleOracleRequest, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*OracleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OracleOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OracleOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/perform_data_checker_wrapper/perform_data_checker_wrapper.go b/core/gethwrappers/generated/perform_data_checker_wrapper/perform_data_checker_wrapper.go new file mode 100644 index 00000000..a678ad0a --- /dev/null +++ b/core/gethwrappers/generated/perform_data_checker_wrapper/perform_data_checker_wrapper.go @@ -0,0 +1,293 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package perform_data_checker_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var PerformDataCheckerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"expectedData\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_expectedData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"expectedData\",\"type\":\"bytes\"}],\"name\":\"setExpectedData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506040516108d33803806108d383398101604081905261002f91610058565b600161003b82826101aa565b5050610269565b634e487b7160e01b600052604160045260246000fd5b6000602080838503121561006b57600080fd5b82516001600160401b038082111561008257600080fd5b818501915085601f83011261009657600080fd5b8151818111156100a8576100a8610042565b604051601f8201601f19908116603f011681019083821181831017156100d0576100d0610042565b8160405282815288868487010111156100e857600080fd5b600093505b8284101561010a57848401860151818501870152928501926100ed565b600086848301015280965050505050505092915050565b600181811c9082168061013557607f821691505b60208210810361015557634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156101a557600081815260208120601f850160051c810160208610156101825750805b601f850160051c820191505b818110156101a15782815560010161018e565b5050505b505050565b81516001600160401b038111156101c3576101c3610042565b6101d7816101d18454610121565b8461015b565b602080601f83116001811461020c57600084156101f45750858301515b600019600386901b1c1916600185901b1785556101a1565b600085815260208120601f198616915b8281101561023b5788860151825594840194600190910190840161021c565b50858210156102595787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b61065b806102786000396000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c806361bc221a1161005057806361bc221a146100945780636e04ff0d146100b05780638d1a93c2146100d157600080fd5b80632aa0f7951461006c5780634585e33b14610081575b600080fd5b61007f61007a36600461024d565b6100e6565b005b61007f61008f36600461024d565b6100f8565b61009d60005481565b6040519081526020015b60405180910390f35b6100c36100be36600461024d565b610145565b6040516100a7929190610323565b6100d96101bf565b6040516100a79190610346565b60016100f3828483610430565b505050565b6001604051610107919061054b565b6040518091039020828260405161011f9291906105df565b6040518091039020036101415760008054908061013b836105ef565b91905055505b5050565b600060606001604051610158919061054b565b604051809103902084846040516101709291906105df565b604051809103902014848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959a92995091975050505050505050565b600180546101cc9061038f565b80601f01602080910402602001604051908101604052809291908181526020018280546101f89061038f565b80156102455780601f1061021a57610100808354040283529160200191610245565b820191906000526020600020905b81548152906001019060200180831161022857829003601f168201915b505050505081565b6000806020838503121561026057600080fd5b823567ffffffffffffffff8082111561027857600080fd5b818501915085601f83011261028c57600080fd5b81358181111561029b57600080fd5b8660208285010111156102ad57600080fd5b60209290920196919550909350505050565b6000815180845260005b818110156102e5576020818501810151868301820152016102c9565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b821515815260406020820152600061033e60408301846102bf565b949350505050565b60208152600061035960208301846102bf565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600181811c908216806103a357607f821691505b6020821081036103dc577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f8211156100f357600081815260208120601f850160051c810160208610156104095750805b601f850160051c820191505b8181101561042857828155600101610415565b505050505050565b67ffffffffffffffff83111561044857610448610360565b61045c83610456835461038f565b836103e2565b6000601f8411600181146104ae57600085156104785750838201355b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600387901b1c1916600186901b178355610544565b6000838152602090207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0861690835b828110156104fd57868501358255602094850194600190920191016104dd565b5086821015610538577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60f88860031b161c19848701351681555b505060018560011b0183555b5050505050565b60008083546105598161038f565b6001828116801561057157600181146105a4576105d3565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00841687528215158302870194506105d3565b8760005260208060002060005b858110156105ca5781548a8201529084019082016105b1565b50505082870194505b50929695505050505050565b8183823760009101908152919050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610647577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b506001019056fea164736f6c6343000810000a", +} + +var PerformDataCheckerABI = PerformDataCheckerMetaData.ABI + +var PerformDataCheckerBin = PerformDataCheckerMetaData.Bin + +func DeployPerformDataChecker(auth *bind.TransactOpts, backend bind.ContractBackend, expectedData []byte) (common.Address, *types.Transaction, *PerformDataChecker, error) { + parsed, err := PerformDataCheckerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(PerformDataCheckerBin), backend, expectedData) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &PerformDataChecker{address: address, abi: *parsed, PerformDataCheckerCaller: PerformDataCheckerCaller{contract: contract}, PerformDataCheckerTransactor: PerformDataCheckerTransactor{contract: contract}, PerformDataCheckerFilterer: PerformDataCheckerFilterer{contract: contract}}, nil +} + +type PerformDataChecker struct { + address common.Address + abi abi.ABI + PerformDataCheckerCaller + PerformDataCheckerTransactor + PerformDataCheckerFilterer +} + +type PerformDataCheckerCaller struct { + contract *bind.BoundContract +} + +type PerformDataCheckerTransactor struct { + contract *bind.BoundContract +} + +type PerformDataCheckerFilterer struct { + contract *bind.BoundContract +} + +type PerformDataCheckerSession struct { + Contract *PerformDataChecker + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type PerformDataCheckerCallerSession struct { + Contract *PerformDataCheckerCaller + CallOpts bind.CallOpts +} + +type PerformDataCheckerTransactorSession struct { + Contract *PerformDataCheckerTransactor + TransactOpts bind.TransactOpts +} + +type PerformDataCheckerRaw struct { + Contract *PerformDataChecker +} + +type PerformDataCheckerCallerRaw struct { + Contract *PerformDataCheckerCaller +} + +type PerformDataCheckerTransactorRaw struct { + Contract *PerformDataCheckerTransactor +} + +func NewPerformDataChecker(address common.Address, backend bind.ContractBackend) (*PerformDataChecker, error) { + abi, err := abi.JSON(strings.NewReader(PerformDataCheckerABI)) + if err != nil { + return nil, err + } + contract, err := bindPerformDataChecker(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &PerformDataChecker{address: address, abi: abi, PerformDataCheckerCaller: PerformDataCheckerCaller{contract: contract}, PerformDataCheckerTransactor: PerformDataCheckerTransactor{contract: contract}, PerformDataCheckerFilterer: PerformDataCheckerFilterer{contract: contract}}, nil +} + +func NewPerformDataCheckerCaller(address common.Address, caller bind.ContractCaller) (*PerformDataCheckerCaller, error) { + contract, err := bindPerformDataChecker(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &PerformDataCheckerCaller{contract: contract}, nil +} + +func NewPerformDataCheckerTransactor(address common.Address, transactor bind.ContractTransactor) (*PerformDataCheckerTransactor, error) { + contract, err := bindPerformDataChecker(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &PerformDataCheckerTransactor{contract: contract}, nil +} + +func NewPerformDataCheckerFilterer(address common.Address, filterer bind.ContractFilterer) (*PerformDataCheckerFilterer, error) { + contract, err := bindPerformDataChecker(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &PerformDataCheckerFilterer{contract: contract}, nil +} + +func bindPerformDataChecker(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := PerformDataCheckerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_PerformDataChecker *PerformDataCheckerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _PerformDataChecker.Contract.PerformDataCheckerCaller.contract.Call(opts, result, method, params...) +} + +func (_PerformDataChecker *PerformDataCheckerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _PerformDataChecker.Contract.PerformDataCheckerTransactor.contract.Transfer(opts) +} + +func (_PerformDataChecker *PerformDataCheckerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _PerformDataChecker.Contract.PerformDataCheckerTransactor.contract.Transact(opts, method, params...) +} + +func (_PerformDataChecker *PerformDataCheckerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _PerformDataChecker.Contract.contract.Call(opts, result, method, params...) +} + +func (_PerformDataChecker *PerformDataCheckerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _PerformDataChecker.Contract.contract.Transfer(opts) +} + +func (_PerformDataChecker *PerformDataCheckerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _PerformDataChecker.Contract.contract.Transact(opts, method, params...) +} + +func (_PerformDataChecker *PerformDataCheckerCaller) CheckUpkeep(opts *bind.CallOpts, checkData []byte) (CheckUpkeep, + + error) { + var out []interface{} + err := _PerformDataChecker.contract.Call(opts, &out, "checkUpkeep", checkData) + + outstruct := new(CheckUpkeep) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return *outstruct, err + +} + +func (_PerformDataChecker *PerformDataCheckerSession) CheckUpkeep(checkData []byte) (CheckUpkeep, + + error) { + return _PerformDataChecker.Contract.CheckUpkeep(&_PerformDataChecker.CallOpts, checkData) +} + +func (_PerformDataChecker *PerformDataCheckerCallerSession) CheckUpkeep(checkData []byte) (CheckUpkeep, + + error) { + return _PerformDataChecker.Contract.CheckUpkeep(&_PerformDataChecker.CallOpts, checkData) +} + +func (_PerformDataChecker *PerformDataCheckerCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _PerformDataChecker.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_PerformDataChecker *PerformDataCheckerSession) Counter() (*big.Int, error) { + return _PerformDataChecker.Contract.Counter(&_PerformDataChecker.CallOpts) +} + +func (_PerformDataChecker *PerformDataCheckerCallerSession) Counter() (*big.Int, error) { + return _PerformDataChecker.Contract.Counter(&_PerformDataChecker.CallOpts) +} + +func (_PerformDataChecker *PerformDataCheckerCaller) SExpectedData(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _PerformDataChecker.contract.Call(opts, &out, "s_expectedData") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_PerformDataChecker *PerformDataCheckerSession) SExpectedData() ([]byte, error) { + return _PerformDataChecker.Contract.SExpectedData(&_PerformDataChecker.CallOpts) +} + +func (_PerformDataChecker *PerformDataCheckerCallerSession) SExpectedData() ([]byte, error) { + return _PerformDataChecker.Contract.SExpectedData(&_PerformDataChecker.CallOpts) +} + +func (_PerformDataChecker *PerformDataCheckerTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _PerformDataChecker.contract.Transact(opts, "performUpkeep", performData) +} + +func (_PerformDataChecker *PerformDataCheckerSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _PerformDataChecker.Contract.PerformUpkeep(&_PerformDataChecker.TransactOpts, performData) +} + +func (_PerformDataChecker *PerformDataCheckerTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _PerformDataChecker.Contract.PerformUpkeep(&_PerformDataChecker.TransactOpts, performData) +} + +func (_PerformDataChecker *PerformDataCheckerTransactor) SetExpectedData(opts *bind.TransactOpts, expectedData []byte) (*types.Transaction, error) { + return _PerformDataChecker.contract.Transact(opts, "setExpectedData", expectedData) +} + +func (_PerformDataChecker *PerformDataCheckerSession) SetExpectedData(expectedData []byte) (*types.Transaction, error) { + return _PerformDataChecker.Contract.SetExpectedData(&_PerformDataChecker.TransactOpts, expectedData) +} + +func (_PerformDataChecker *PerformDataCheckerTransactorSession) SetExpectedData(expectedData []byte) (*types.Transaction, error) { + return _PerformDataChecker.Contract.SetExpectedData(&_PerformDataChecker.TransactOpts, expectedData) +} + +type CheckUpkeep struct { + UpkeepNeeded bool + PerformData []byte +} + +func (_PerformDataChecker *PerformDataChecker) Address() common.Address { + return _PerformDataChecker.address +} + +type PerformDataCheckerInterface interface { + CheckUpkeep(opts *bind.CallOpts, checkData []byte) (CheckUpkeep, + + error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + SExpectedData(opts *bind.CallOpts) ([]byte, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SetExpectedData(opts *bind.TransactOpts, expectedData []byte) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper/simple_log_upkeep_counter_wrapper.go b/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper/simple_log_upkeep_counter_wrapper.go new file mode 100644 index 00000000..739bed9f --- /dev/null +++ b/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper/simple_log_upkeep_counter_wrapper.go @@ -0,0 +1,573 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package simple_log_upkeep_counter_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CheckData struct { + CheckBurnAmount *big.Int + PerformBurnAmount *big.Int + EventSig [32]byte +} + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +var SimpleLogUpkeepCounterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"lastBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"previousBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"counter\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timeToPerform\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"isRecovered\",\"type\":\"bool\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"checkBurnAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"performBurnAmount\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"eventSig\",\"type\":\"bytes32\"}],\"internalType\":\"structCheckData\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"_checkDataConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"log\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkLog\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isRecovered\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"previousPerformBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeToPerform\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060006002819055436001556003819055600455610d12806100336000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c80637145f11b11610076578063917d895f1161005b578063917d895f1461016b578063c6066f0d14610174578063eb950ce71461017d57600080fd5b80637145f11b1461012f578063806b984f1461016257600080fd5b80634585e33b116100a75780634585e33b1461010057806361bc221a14610115578063697794731461011e57600080fd5b80632cb15864146100c357806340691db4146100df575b600080fd5b6100cc60035481565b6040519081526020015b60405180910390f35b6100f26100ed3660046106c6565b61018a565b6040516100d692919061092d565b61011361010e366004610628565b6102c2565b005b6100cc60045481565b61011361012c36600461066a565b50565b61015261013d36600461060f565b60006020819052908152604090205460ff1681565b60405190151581526020016100d6565b6100cc60015481565b6100cc60025481565b6100cc60055481565b6006546101529060ff1681565b6000606081808061019d8688018861083b565b92509250925060005a905060006101b5600143610c61565b40905060008515610224575b855a6101cd9085610c61565b1015610224578080156101ee575060008281526020819052604090205460ff165b604080516020810185905230918101919091529091506060016040516020818303038152906040528051906020012091506101c1565b8361023260c08d018d610a9d565b600281811061024357610243610ca7565b9050602002013514156102875760018b438c8c60405160200161026994939291906109aa565b604051602081830303815290604052975097505050505050506102ba565b60008b438c8c6040516020016102a094939291906109aa565b604051602081830303815290604052975097505050505050505b935093915050565b6003546102ce57436003555b4360019081556004546102e091610c49565b600455600154600255600080806102f984860186610738565b92509250925082602001514261030f9190610c61565b600555600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556060830151821461037157600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555b60008060008380602001905181019061038a9190610867565b92509250925060005a905060006103a2600143610c61565b40905060008415610411575b845a6103ba9085610c61565b1015610411578080156103db575060008281526020819052604090205460ff165b604080516020810185905230918101919091529091506060016040516020818303038152906040528051906020012091506103ae565b600354600154600254600454600554600654604080519687526020870195909552938501929092526060840152608083015260ff16151560a082015232907f29eff4cb37911c3ea85db4630638cc5474fdd0631ec42215aef1d7ec96c8e63d9060c00160405180910390a25050505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff811681146104ad57600080fd5b919050565b600082601f8301126104c357600080fd5b8135602067ffffffffffffffff8211156104df576104df610cd6565b8160051b6104ee828201610b2f565b83815282810190868401838801850189101561050957600080fd5b600093505b8584101561052c57803583526001939093019291840191840161050e565b50979650505050505050565b60008083601f84011261054a57600080fd5b50813567ffffffffffffffff81111561056257600080fd5b60208301915083602082850101111561057a57600080fd5b9250929050565b600082601f83011261059257600080fd5b813567ffffffffffffffff8111156105ac576105ac610cd6565b6105dd60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610b2f565b8181528460208386010111156105f257600080fd5b816020850160208301376000918101602001919091529392505050565b60006020828403121561062157600080fd5b5035919050565b6000806020838503121561063b57600080fd5b823567ffffffffffffffff81111561065257600080fd5b61065e85828601610538565b90969095509350505050565b60006060828403121561067c57600080fd5b6040516060810181811067ffffffffffffffff8211171561069f5761069f610cd6565b80604052508235815260208301356020820152604083013560408201528091505092915050565b6000806000604084860312156106db57600080fd5b833567ffffffffffffffff808211156106f357600080fd5b90850190610100828803121561070857600080fd5b9093506020850135908082111561071e57600080fd5b5061072b86828701610538565b9497909650939450505050565b60008060006060848603121561074d57600080fd5b833567ffffffffffffffff8082111561076557600080fd5b90850190610100828803121561077a57600080fd5b610782610b05565b82358152602083013560208201526040830135604082015260608301356060820152608083013560808201526107ba60a08401610489565b60a082015260c0830135828111156107d157600080fd5b6107dd898286016104b2565b60c08301525060e0830135828111156107f557600080fd5b61080189828601610581565b60e083015250945060208601359350604086013591508082111561082457600080fd5b5061083186828701610581565b9150509250925092565b60008060006060848603121561085057600080fd5b505081359360208301359350604090920135919050565b60008060006060848603121561087c57600080fd5b8351925060208401519150604084015190509250925092565b81835260007f07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8311156108c757600080fd5b8260051b8083602087013760009401602001938452509192915050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b821515815260006020604081840152835180604085015260005b8181101561096357858101830151858201606001528201610947565b81811115610975576000606083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201606001949350505050565b606081528435606082015260208501356080820152604085013560a0820152606085013560c0820152608085013560e082015260006109eb60a08701610489565b61010073ffffffffffffffffffffffffffffffffffffffff821681850152610a1660c0890189610b7e565b925081610120860152610a2e61016086018483610895565b92505050610a3f60e0880188610be5565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa085840301610140860152610a758382846108e4565b925050508560208401528281036040840152610a928185876108e4565b979650505050505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610ad257600080fd5b83018035915067ffffffffffffffff821115610aed57600080fd5b6020019150600581901b360382131561057a57600080fd5b604051610100810167ffffffffffffffff81118282101715610b2957610b29610cd6565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610b7657610b76610cd6565b604052919050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610bb357600080fd5b830160208101925035905067ffffffffffffffff811115610bd357600080fd5b8060051b360383131561057a57600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610c1a57600080fd5b830160208101925035905067ffffffffffffffff811115610c3a57600080fd5b80360383131561057a57600080fd5b60008219821115610c5c57610c5c610c78565b500190565b600082821015610c7357610c73610c78565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var SimpleLogUpkeepCounterABI = SimpleLogUpkeepCounterMetaData.ABI + +var SimpleLogUpkeepCounterBin = SimpleLogUpkeepCounterMetaData.Bin + +func DeploySimpleLogUpkeepCounter(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *SimpleLogUpkeepCounter, error) { + parsed, err := SimpleLogUpkeepCounterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(SimpleLogUpkeepCounterBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &SimpleLogUpkeepCounter{address: address, abi: *parsed, SimpleLogUpkeepCounterCaller: SimpleLogUpkeepCounterCaller{contract: contract}, SimpleLogUpkeepCounterTransactor: SimpleLogUpkeepCounterTransactor{contract: contract}, SimpleLogUpkeepCounterFilterer: SimpleLogUpkeepCounterFilterer{contract: contract}}, nil +} + +type SimpleLogUpkeepCounter struct { + address common.Address + abi abi.ABI + SimpleLogUpkeepCounterCaller + SimpleLogUpkeepCounterTransactor + SimpleLogUpkeepCounterFilterer +} + +type SimpleLogUpkeepCounterCaller struct { + contract *bind.BoundContract +} + +type SimpleLogUpkeepCounterTransactor struct { + contract *bind.BoundContract +} + +type SimpleLogUpkeepCounterFilterer struct { + contract *bind.BoundContract +} + +type SimpleLogUpkeepCounterSession struct { + Contract *SimpleLogUpkeepCounter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type SimpleLogUpkeepCounterCallerSession struct { + Contract *SimpleLogUpkeepCounterCaller + CallOpts bind.CallOpts +} + +type SimpleLogUpkeepCounterTransactorSession struct { + Contract *SimpleLogUpkeepCounterTransactor + TransactOpts bind.TransactOpts +} + +type SimpleLogUpkeepCounterRaw struct { + Contract *SimpleLogUpkeepCounter +} + +type SimpleLogUpkeepCounterCallerRaw struct { + Contract *SimpleLogUpkeepCounterCaller +} + +type SimpleLogUpkeepCounterTransactorRaw struct { + Contract *SimpleLogUpkeepCounterTransactor +} + +func NewSimpleLogUpkeepCounter(address common.Address, backend bind.ContractBackend) (*SimpleLogUpkeepCounter, error) { + abi, err := abi.JSON(strings.NewReader(SimpleLogUpkeepCounterABI)) + if err != nil { + return nil, err + } + contract, err := bindSimpleLogUpkeepCounter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &SimpleLogUpkeepCounter{address: address, abi: abi, SimpleLogUpkeepCounterCaller: SimpleLogUpkeepCounterCaller{contract: contract}, SimpleLogUpkeepCounterTransactor: SimpleLogUpkeepCounterTransactor{contract: contract}, SimpleLogUpkeepCounterFilterer: SimpleLogUpkeepCounterFilterer{contract: contract}}, nil +} + +func NewSimpleLogUpkeepCounterCaller(address common.Address, caller bind.ContractCaller) (*SimpleLogUpkeepCounterCaller, error) { + contract, err := bindSimpleLogUpkeepCounter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &SimpleLogUpkeepCounterCaller{contract: contract}, nil +} + +func NewSimpleLogUpkeepCounterTransactor(address common.Address, transactor bind.ContractTransactor) (*SimpleLogUpkeepCounterTransactor, error) { + contract, err := bindSimpleLogUpkeepCounter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &SimpleLogUpkeepCounterTransactor{contract: contract}, nil +} + +func NewSimpleLogUpkeepCounterFilterer(address common.Address, filterer bind.ContractFilterer) (*SimpleLogUpkeepCounterFilterer, error) { + contract, err := bindSimpleLogUpkeepCounter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &SimpleLogUpkeepCounterFilterer{contract: contract}, nil +} + +func bindSimpleLogUpkeepCounter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := SimpleLogUpkeepCounterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SimpleLogUpkeepCounter.Contract.SimpleLogUpkeepCounterCaller.contract.Call(opts, result, method, params...) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.SimpleLogUpkeepCounterTransactor.contract.Transfer(opts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.SimpleLogUpkeepCounterTransactor.contract.Transact(opts, method, params...) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SimpleLogUpkeepCounter.Contract.contract.Call(opts, result, method, params...) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.contract.Transfer(opts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.contract.Transact(opts, method, params...) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) CheckLog(opts *bind.CallOpts, log Log, checkData []byte) (bool, []byte, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "checkLog", log, checkData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) CheckLog(log Log, checkData []byte) (bool, []byte, error) { + return _SimpleLogUpkeepCounter.Contract.CheckLog(&_SimpleLogUpkeepCounter.CallOpts, log, checkData) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) CheckLog(log Log, checkData []byte) (bool, []byte, error) { + return _SimpleLogUpkeepCounter.Contract.CheckLog(&_SimpleLogUpkeepCounter.CallOpts, log, checkData) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) Counter() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.Counter(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) Counter() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.Counter(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) DummyMap(arg0 [32]byte) (bool, error) { + return _SimpleLogUpkeepCounter.Contract.DummyMap(&_SimpleLogUpkeepCounter.CallOpts, arg0) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _SimpleLogUpkeepCounter.Contract.DummyMap(&_SimpleLogUpkeepCounter.CallOpts, arg0) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) InitialBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "initialBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) InitialBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.InitialBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) InitialBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.InitialBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) IsRecovered(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "isRecovered") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) IsRecovered() (bool, error) { + return _SimpleLogUpkeepCounter.Contract.IsRecovered(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) IsRecovered() (bool, error) { + return _SimpleLogUpkeepCounter.Contract.IsRecovered(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) LastBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "lastBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) LastBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.LastBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) LastBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.LastBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "previousPerformBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) PreviousPerformBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.PreviousPerformBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) PreviousPerformBlock() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.PreviousPerformBlock(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCaller) TimeToPerform(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SimpleLogUpkeepCounter.contract.Call(opts, &out, "timeToPerform") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) TimeToPerform() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.TimeToPerform(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterCallerSession) TimeToPerform() (*big.Int, error) { + return _SimpleLogUpkeepCounter.Contract.TimeToPerform(&_SimpleLogUpkeepCounter.CallOpts) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactor) CheckDataConfig(opts *bind.TransactOpts, arg0 CheckData) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.contract.Transact(opts, "_checkDataConfig", arg0) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) CheckDataConfig(arg0 CheckData) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.CheckDataConfig(&_SimpleLogUpkeepCounter.TransactOpts, arg0) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactorSession) CheckDataConfig(arg0 CheckData) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.CheckDataConfig(&_SimpleLogUpkeepCounter.TransactOpts, arg0) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.contract.Transact(opts, "performUpkeep", performData) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.PerformUpkeep(&_SimpleLogUpkeepCounter.TransactOpts, performData) +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _SimpleLogUpkeepCounter.Contract.PerformUpkeep(&_SimpleLogUpkeepCounter.TransactOpts, performData) +} + +type SimpleLogUpkeepCounterPerformingUpkeepIterator struct { + Event *SimpleLogUpkeepCounterPerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *SimpleLogUpkeepCounterPerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(SimpleLogUpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(SimpleLogUpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *SimpleLogUpkeepCounterPerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *SimpleLogUpkeepCounterPerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type SimpleLogUpkeepCounterPerformingUpkeep struct { + From common.Address + InitialBlock *big.Int + LastBlock *big.Int + PreviousBlock *big.Int + Counter *big.Int + TimeToPerform *big.Int + IsRecovered bool + Raw types.Log +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*SimpleLogUpkeepCounterPerformingUpkeepIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _SimpleLogUpkeepCounter.contract.FilterLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return &SimpleLogUpkeepCounterPerformingUpkeepIterator{contract: _SimpleLogUpkeepCounter.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *SimpleLogUpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _SimpleLogUpkeepCounter.contract.WatchLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(SimpleLogUpkeepCounterPerformingUpkeep) + if err := _SimpleLogUpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounterFilterer) ParsePerformingUpkeep(log types.Log) (*SimpleLogUpkeepCounterPerformingUpkeep, error) { + event := new(SimpleLogUpkeepCounterPerformingUpkeep) + if err := _SimpleLogUpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _SimpleLogUpkeepCounter.abi.Events["PerformingUpkeep"].ID: + return _SimpleLogUpkeepCounter.ParsePerformingUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (SimpleLogUpkeepCounterPerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0x29eff4cb37911c3ea85db4630638cc5474fdd0631ec42215aef1d7ec96c8e63d") +} + +func (_SimpleLogUpkeepCounter *SimpleLogUpkeepCounter) Address() common.Address { + return _SimpleLogUpkeepCounter.address +} + +type SimpleLogUpkeepCounterInterface interface { + CheckLog(opts *bind.CallOpts, log Log, checkData []byte) (bool, []byte, error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + InitialBlock(opts *bind.CallOpts) (*big.Int, error) + + IsRecovered(opts *bind.CallOpts) (bool, error) + + LastBlock(opts *bind.CallOpts) (*big.Int, error) + + PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) + + TimeToPerform(opts *bind.CallOpts) (*big.Int, error) + + CheckDataConfig(opts *bind.TransactOpts, arg0 CheckData) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*SimpleLogUpkeepCounterPerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *SimpleLogUpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*SimpleLogUpkeepCounterPerformingUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go b/core/gethwrappers/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go new file mode 100644 index 00000000..8c815be3 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go @@ -0,0 +1,278 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_consumer_interface + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"currentRoundID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"randomnessOutput\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"name\":\"rawFulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"}],\"name\":\"testRequestRandomness\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c0604052600060015534801561001557600080fd5b506040516105363803806105368339818101604052604081101561003857600080fd5b5080516020909101516001600160601b0319606092831b811660a052911b1660805260805160601c60a05160601c6104ae6100886000398061011452806101f45250806101b852506104ae6000f3fe608060405234801561001057600080fd5b50600436106100665760003560e01c8063866ee74811610050578063866ee7481461008d57806394985ddd146100b0578063a312c4f2146100d557610066565b80626d6cae1461006b5780632f47fd8614610085575b600080fd5b6100736100dd565b60408051918252519081900360200190f35b6100736100e3565b610073600480360360408110156100a357600080fd5b50803590602001356100e9565b6100d3600480360360408110156100c657600080fd5b50803590602001356100fc565b005b6100736101ae565b60035481565b60025481565b60006100f583836101b4565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146101a057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4f6e6c7920565246436f6f7264696e61746f722063616e2066756c66696c6c00604482015290519081900360640190fd5b6101aa828261039d565b5050565b60015481565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea07f00000000000000000000000000000000000000000000000000000000000000008486600060405160200180838152602001828152602001925050506040516020818303038152906040526040518463ffffffff1660e01b8152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156102c15781810151838201526020016102a9565b50505050905090810190601f1680156102ee5780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b15801561030f57600080fd5b505af1158015610323573d6000803e3d6000fd5b505050506040513d602081101561033957600080fd5b5050600083815260208190526040812054610359908590839030906103ad565b60008581526020819052604090205490915061037c90600163ffffffff61040116565b6000858152602081905260409020556103958482610475565b949350505050565b6002556003556001805481019055565b604080516020808201969096528082019490945273ffffffffffffffffffffffffffffffffffffffff9290921660608401526080808401919091528151808403909101815260a09092019052805191012090565b6000828201838110156100f557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b60408051602080820194909452808201929092528051808303820181526060909201905280519101209056fea164736f6c6343000606000a", +} + +var VRFConsumerABI = VRFConsumerMetaData.ABI + +var VRFConsumerBin = VRFConsumerMetaData.Bin + +func DeployVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address) (common.Address, *types.Transaction, *VRFConsumer, error) { + parsed, err := VRFConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFConsumerBin), backend, _vrfCoordinator, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumer{address: address, abi: *parsed, VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +type VRFConsumer struct { + address common.Address + abi abi.ABI + VRFConsumerCaller + VRFConsumerTransactor + VRFConsumerFilterer +} + +type VRFConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFConsumerSession struct { + Contract *VRFConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFConsumerCallerSession struct { + Contract *VRFConsumerCaller + CallOpts bind.CallOpts +} + +type VRFConsumerTransactorSession struct { + Contract *VRFConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFConsumerRaw struct { + Contract *VRFConsumer +} + +type VRFConsumerCallerRaw struct { + Contract *VRFConsumerCaller +} + +type VRFConsumerTransactorRaw struct { + Contract *VRFConsumerTransactor +} + +func NewVRFConsumer(address common.Address, backend bind.ContractBackend) (*VRFConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumer{address: address, abi: abi, VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +func NewVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFConsumerCaller, error) { + contract, err := bindVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerCaller{contract: contract}, nil +} + +func NewVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerTransactor, error) { + contract, err := bindVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerTransactor{contract: contract}, nil +} + +func NewVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerFilterer, error) { + contract, err := bindVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerFilterer{contract: contract}, nil +} + +func bindVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFConsumer *VRFConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.VRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumer *VRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFConsumer *VRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFConsumer *VRFConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumer *VRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFConsumer *VRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFConsumer *VRFConsumerCaller) CurrentRoundID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumer.contract.Call(opts, &out, "currentRoundID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumer *VRFConsumerSession) CurrentRoundID() (*big.Int, error) { + return _VRFConsumer.Contract.CurrentRoundID(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCallerSession) CurrentRoundID() (*big.Int, error) { + return _VRFConsumer.Contract.CurrentRoundID(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCaller) RandomnessOutput(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumer.contract.Call(opts, &out, "randomnessOutput") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumer *VRFConsumerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCallerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCaller) RequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VRFConsumer.contract.Call(opts, &out, "requestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFConsumer *VRFConsumerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCallerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerTransactor) RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "rawFulfillRandomness", requestId, randomness) +} + +func (_VRFConsumer *VRFConsumerSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RawFulfillRandomness(&_VRFConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFConsumer *VRFConsumerTransactorSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RawFulfillRandomness(&_VRFConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFConsumer *VRFConsumerTransactor) TestRequestRandomness(opts *bind.TransactOpts, _keyHash [32]byte, _fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "testRequestRandomness", _keyHash, _fee) +} + +func (_VRFConsumer *VRFConsumerSession) TestRequestRandomness(_keyHash [32]byte, _fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.TestRequestRandomness(&_VRFConsumer.TransactOpts, _keyHash, _fee) +} + +func (_VRFConsumer *VRFConsumerTransactorSession) TestRequestRandomness(_keyHash [32]byte, _fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.TestRequestRandomness(&_VRFConsumer.TransactOpts, _keyHash, _fee) +} + +func (_VRFConsumer *VRFConsumer) Address() common.Address { + return _VRFConsumer.address +} + +type VRFConsumerInterface interface { + CurrentRoundID(opts *bind.CallOpts) (*big.Int, error) + + RandomnessOutput(opts *bind.CallOpts) (*big.Int, error) + + RequestId(opts *bind.CallOpts) ([32]byte, error) + + RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) + + TestRequestRandomness(opts *bind.TransactOpts, _keyHash [32]byte, _fee *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_consumer_interface_v08/solidity_vrf_consumer_interface_v08.go b/core/gethwrappers/generated/solidity_vrf_consumer_interface_v08/solidity_vrf_consumer_interface_v08.go new file mode 100644 index 00000000..b3267a17 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_consumer_interface_v08/solidity_vrf_consumer_interface_v08.go @@ -0,0 +1,254 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_consumer_interface_v08 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"doRequestRandomness\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"randomnessOutput\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"name\":\"rawFulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60c060405234801561001057600080fd5b506040516104e33803806104e383398101604081905261002f91610069565b6001600160601b0319606092831b811660a052911b1660805261009c565b80516001600160a01b038116811461006457600080fd5b919050565b6000806040838503121561007c57600080fd5b6100858361004d565b91506100936020840161004d565b90509250929050565b60805160601c60a05160601c6104166100cd6000396000818160c701526101980152600061015c01526104166000f3fe608060405234801561001057600080fd5b506004361061004b5760003560e01c80626d6cae146100505780631a8da9761461006b5780632f47fd861461007e57806394985ddd14610087575b600080fd5b61005960025481565b60405190815260200160405180910390f35b610059610079366004610310565b61009c565b61005960015481565b61009a610095366004610310565b6100af565b005b60006100a88383610158565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610152576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4f6e6c7920565246436f6f7264696e61746f722063616e2066756c66696c6c00604482015260640160405180910390fd5b60015550565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea07f0000000000000000000000000000000000000000000000000000000000000000848660006040516020016101d5929190918252602082015260400190565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161020293929190610332565b602060405180830381600087803b15801561021c57600080fd5b505af1158015610230573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061025491906102ee565b5060008381526020818152604080832054815180840188905280830185905230606082015260808082018390528351808303909101815260a0909101909252815191830191909120868452929091526102ae9060016103ca565b6000858152602081815260409182902092909255805180830187905280820184905281518082038301815260609091019091528051910120949350505050565b60006020828403121561030057600080fd5b815180151581146100a857600080fd5b6000806040838503121561032357600080fd5b50508035926020909101359150565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b8181101561038257858101830151858201608001528201610366565b81811115610394576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b60008219821115610404577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c6343000806000a", +} + +var VRFConsumerABI = VRFConsumerMetaData.ABI + +var VRFConsumerBin = VRFConsumerMetaData.Bin + +func DeployVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFConsumer, error) { + parsed, err := VRFConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFConsumerBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumer{address: address, abi: *parsed, VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +type VRFConsumer struct { + address common.Address + abi abi.ABI + VRFConsumerCaller + VRFConsumerTransactor + VRFConsumerFilterer +} + +type VRFConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFConsumerSession struct { + Contract *VRFConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFConsumerCallerSession struct { + Contract *VRFConsumerCaller + CallOpts bind.CallOpts +} + +type VRFConsumerTransactorSession struct { + Contract *VRFConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFConsumerRaw struct { + Contract *VRFConsumer +} + +type VRFConsumerCallerRaw struct { + Contract *VRFConsumerCaller +} + +type VRFConsumerTransactorRaw struct { + Contract *VRFConsumerTransactor +} + +func NewVRFConsumer(address common.Address, backend bind.ContractBackend) (*VRFConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumer{address: address, abi: abi, VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +func NewVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFConsumerCaller, error) { + contract, err := bindVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerCaller{contract: contract}, nil +} + +func NewVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerTransactor, error) { + contract, err := bindVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerTransactor{contract: contract}, nil +} + +func NewVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerFilterer, error) { + contract, err := bindVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerFilterer{contract: contract}, nil +} + +func bindVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFConsumer *VRFConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.VRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumer *VRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFConsumer *VRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFConsumer *VRFConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumer *VRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFConsumer *VRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFConsumer *VRFConsumerCaller) RandomnessOutput(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumer.contract.Call(opts, &out, "randomnessOutput") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumer *VRFConsumerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCallerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCaller) RequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VRFConsumer.contract.Call(opts, &out, "requestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFConsumer *VRFConsumerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerCallerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +func (_VRFConsumer *VRFConsumerTransactor) DoRequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "doRequestRandomness", keyHash, fee) +} + +func (_VRFConsumer *VRFConsumerSession) DoRequestRandomness(keyHash [32]byte, fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.DoRequestRandomness(&_VRFConsumer.TransactOpts, keyHash, fee) +} + +func (_VRFConsumer *VRFConsumerTransactorSession) DoRequestRandomness(keyHash [32]byte, fee *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.DoRequestRandomness(&_VRFConsumer.TransactOpts, keyHash, fee) +} + +func (_VRFConsumer *VRFConsumerTransactor) RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "rawFulfillRandomness", requestId, randomness) +} + +func (_VRFConsumer *VRFConsumerSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RawFulfillRandomness(&_VRFConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFConsumer *VRFConsumerTransactorSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RawFulfillRandomness(&_VRFConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFConsumer *VRFConsumer) Address() common.Address { + return _VRFConsumer.address +} + +type VRFConsumerInterface interface { + RandomnessOutput(opts *bind.CallOpts) (*big.Int, error) + + RequestId(opts *bind.CallOpts) ([32]byte, error) + + DoRequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, fee *big.Int) (*types.Transaction, error) + + RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go b/core/gethwrappers/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go new file mode 100644 index 00000000..bd8e9975 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go @@ -0,0 +1,1061 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_coordinator_interface + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFCoordinatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_blockHashStore\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"NewServiceAgreement\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"jobID\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestID\",\"type\":\"bytes32\"}],\"name\":\"RandomnessRequest\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"output\",\"type\":\"uint256\"}],\"name\":\"RandomnessRequestFulfilled\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PRESEED_OFFSET\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PROOF_LENGTH\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PUBLIC_KEY_OFFSET\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"callbacks\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"callbackContract\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"randomnessFee\",\"type\":\"uint96\"},{\"internalType\":\"bytes32\",\"name\":\"seedAndBlockNum\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_proof\",\"type\":\"bytes\"}],\"name\":\"fulfillRandomnessRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"_publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isOwner\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"_publicProvingKey\",\"type\":\"uint256[2]\"},{\"internalType\":\"bytes32\",\"name\":\"_jobID\",\"type\":\"bytes32\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"serviceAgreements\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"vRFOracle\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"fee\",\"type\":\"uint96\"},{\"internalType\":\"bytes32\",\"name\":\"jobID\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"withdrawableTokens\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50604051612a68380380612a688339818101604052604081101561003357600080fd5b508051602090910151600080546001600160a01b03191633178082556040516001600160a01b039190911691907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0908290a3600180546001600160a01b039384166001600160a01b031991821617909155600280549290931691161790556129a8806100c06000396000f3fe608060405234801561001057600080fd5b50600436106100e95760003560e01c8063a4c0ed361161008c578063d834020911610066578063d8340209146103c7578063e911439c1461040b578063f2fde38b14610413578063f3fef3a314610446576100e9565b8063a4c0ed36146102ac578063b415f4f514610374578063caf70c4a1461037c576100e9565b806375d35070116100c857806375d350701461023a5780638aa7927b146102575780638da5cb5b1461025f5780638f32d59b14610290576100e9565b80626f6ad0146100ee57806321f36509146101335780635e1c105914610192575b600080fd5b6101216004803603602081101561010457600080fd5b503573ffffffffffffffffffffffffffffffffffffffff1661047f565b60408051918252519081900360200190f35b6101506004803603602081101561014957600080fd5b5035610491565b6040805173ffffffffffffffffffffffffffffffffffffffff90941684526bffffffffffffffffffffffff909216602084015282820152519081900360600190f35b610238600480360360208110156101a857600080fd5b8101906020810181356401000000008111156101c357600080fd5b8201836020820111156101d557600080fd5b803590602001918460018302840111640100000000831117156101f757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506104e9945050505050565b005b6101506004803603602081101561025057600080fd5b50356105f1565b610121610649565b61026761064e565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b61029861066a565b604080519115158252519081900360200190f35b610238600480360360608110156102c257600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516916020810135918101906060810160408201356401000000008111156102ff57600080fd5b82018360208201111561031157600080fd5b8035906020019184600183028401116401000000008311171561033357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610688945050505050565b610121610747565b6101216004803603604081101561039257600080fd5b6040805180820182529183019291818301918390600290839083908082843760009201919091525091945061074c9350505050565b610238600480360360a08110156103dd57600080fd5b5080359073ffffffffffffffffffffffffffffffffffffffff602082013516906040810190608001356107a2565b610121610aa5565b6102386004803603602081101561042957600080fd5b503573ffffffffffffffffffffffffffffffffffffffff16610aab565b6102386004803603604081101561045c57600080fd5b5073ffffffffffffffffffffffffffffffffffffffff8135169060200135610b2a565b60056020526000908152604090205481565b6003602052600090815260409020805460019091015473ffffffffffffffffffffffffffffffffffffffff8216917401000000000000000000000000000000000000000090046bffffffffffffffffffffffff169083565b60006104f361285e565b6000806104ff85610c8d565b6000848152600460209081526040808320548287015173ffffffffffffffffffffffffffffffffffffffff909116808552600590935292205495995093975091955093509091610562916bffffffffffffffffffffffff1663ffffffff61101416565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260056020908152604080832093909355858252600390529081208181556001015583516105ae9084908490611091565b604080518481526020810184905281517fa2e7a402243ebda4a69ceeb3dfb682943b7a9b3ac66d6eefa8db65894009611c929181900390910190a1505050505050565b6004602052600090815260409020805460019091015473ffffffffffffffffffffffffffffffffffffffff8216917401000000000000000000000000000000000000000090046bffffffffffffffffffffffff169083565b602081565b60005473ffffffffffffffffffffffffffffffffffffffff1690565b60005473ffffffffffffffffffffffffffffffffffffffff16331490565b60015473ffffffffffffffffffffffffffffffffffffffff16331461070e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e00000000000000000000000000604482015290519081900360640190fd5b60008082806020019051604081101561072657600080fd5b50805160209091015190925090506107408282868861124e565b5050505050565b60e081565b6000816040516020018082600260200280838360005b8381101561077a578181015183820152602001610762565b505050509050019150506040516020818303038152906040528051906020012090505b919050565b6107aa61066a565b61081557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b60408051808201825260009161084491908590600290839083908082843760009201919091525061074c915050565b60008181526004602052604090205490915073ffffffffffffffffffffffffffffffffffffffff1680156108d957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f706c656173652072656769737465722061206e6577206b657900000000000000604482015290519081900360640190fd5b73ffffffffffffffffffffffffffffffffffffffff851661095b57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f5f6f7261636c65206d757374206e6f7420626520307830000000000000000000604482015290519081900360640190fd5b600082815260046020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff87161781556001018390556b033b2e3c9fd0803ce8000000861115610a12576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603c81526020018061291d603c913960400191505060405180910390fd5b600082815260046020908152604091829020805473ffffffffffffffffffffffffffffffffffffffff16740100000000000000000000000000000000000000006bffffffffffffffffffffffff8b1602179055815184815290810188905281517fae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe929181900390910190a1505050505050565b6101a081565b610ab361066a565b610b1e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b610b278161152f565b50565b336000908152600560205260409020548190811115610baa57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f63616e2774207769746864726177206d6f7265207468616e2062616c616e6365604482015290519081900360640190fd5b33600090815260056020526040902054610bca908363ffffffff61162816565b3360009081526005602090815260408083209390935560015483517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8881166004830152602482018890529451949091169363a9059cbb93604480840194938390030190829087803b158015610c5657600080fd5b505af1158015610c6a573d6000803e3d6000fd5b505050506040513d6020811015610c8057600080fd5b5051610c8857fe5b505050565b6000610c9761285e565b825160009081906101c0908114610d0f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f77726f6e672070726f6f66206c656e6774680000000000000000000000000000604482015290519081900360640190fd5b610d1761287e565b5060e086015181870151602088019190610d308361074c565b9750610d3c888361169f565b6000818152600360209081526040918290208251606081018452815473ffffffffffffffffffffffffffffffffffffffff8116808352740100000000000000000000000000000000000000009091046bffffffffffffffffffffffff169382019390935260019091015492810192909252909850909650610e1e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6e6f20636f72726573706f6e64696e6720726571756573740000000000000000604482015290519081900360640190fd5b6040805160208082018590528183018490528251808303840181526060909201835281519101209088015114610eb557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f77726f6e672070726553656564206f7220626c6f636b206e756d000000000000604482015290519081900360640190fd5b804080610fc857600254604080517fe9413d3800000000000000000000000000000000000000000000000000000000815260048101859052905173ffffffffffffffffffffffffffffffffffffffff9092169163e9413d3891602480820192602092909190829003018186803b158015610f2e57600080fd5b505afa158015610f42573d6000803e3d6000fd5b505050506040513d6020811015610f5857600080fd5b5051905080610fc857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f706c656173652070726f766520626c6f636b6861736800000000000000000000604482015290519081900360640190fd5b6040805160208082018690528183018490528251808303840181526060909201909252805191012060e08b018190526101a08b526110058b6116cb565b96505050505050509193509193565b60008282018381101561108857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b90505b92915050565b604080516024810185905260448082018590528251808303909101815260649091019091526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f94985ddd00000000000000000000000000000000000000000000000000000000179052600090620324b0805a101561117457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f6e6f7420656e6f7567682067617320666f7220636f6e73756d65720000000000604482015290519081900360640190fd5b60008473ffffffffffffffffffffffffffffffffffffffff16836040518082805190602001908083835b602083106111db57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161119e565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d806000811461123d576040519150601f19603f3d011682016040523d82523d6000602084013e611242565b606091505b50505050505050505050565b600084815260046020526040902054829085907401000000000000000000000000000000000000000090046bffffffffffffffffffffffff168210156112f557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f42656c6f7720616772656564207061796d656e74000000000000000000000000604482015290519081900360640190fd5b600086815260066020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549061133288888785611839565b90506000611340898361169f565b60008181526003602052604090205490915073ffffffffffffffffffffffffffffffffffffffff161561136f57fe5b600081815260036020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88161790556b033b2e3c9fd0803ce800000087106113d057fe5b600081815260036020908152604080832080546bffffffffffffffffffffffff8c16740100000000000000000000000000000000000000000273ffffffffffffffffffffffffffffffffffffffff91821617825582518085018890524381850152835180820385018152606082018086528151918701919091206001948501558f875260049095529483902090910154928d905260808401869052891660a084015260c083018a905260e083018490525190917f56bd374744a66d531874338def36c906e3a6cf31176eb1e9afd9f1de69725d5191908190036101000190a2600089815260066020908152604080832073ffffffffffffffffffffffffffffffffffffffff8a1684529091529020546114f090600163ffffffff61101416565b6000998a52600660209081526040808c2073ffffffffffffffffffffffffffffffffffffffff9099168c52979052959098209490945550505050505050565b73ffffffffffffffffffffffffffffffffffffffff811661159b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806128f76026913960400191505060405180910390fd5b6000805460405173ffffffffffffffffffffffffffffffffffffffff808516939216917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a3600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60008282111561169957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b604080516020808201949094528082019290925280518083038201815260609092019052805191012090565b60006101a082511461173e57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f77726f6e672070726f6f66206c656e6774680000000000000000000000000000604482015290519081900360640190fd5b61174661287e565b61174e61287e565b61175661289c565b600061176061287e565b61176861287e565b6000888060200190516101a081101561178057600080fd5b5060e08101516101808201519198506040890197506080890196509450610100880193506101408801925090506117d387878760006020020151886001602002015189600260200201518989898961188d565b6003866040516020018083815260200182600260200280838360005b838110156118075781810151838201526020016117ef565b50505050905001925050506040516020818303038152906040528051906020012060001c975050505050505050919050565b604080516020808201969096528082019490945273ffffffffffffffffffffffffffffffffffffffff9290921660608401526080808401919091528151808403909101815260a09092019052805191012090565b61189689611b8e565b61190157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e206375727665000000000000604482015290519081900360640190fd5b61190a88611b8e565b61197557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f67616d6d61206973206e6f74206f6e2063757276650000000000000000000000604482015290519081900360640190fd5b61197e83611b8e565b6119e957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e206375727665000000604482015290519081900360640190fd5b6119f282611b8e565b611a5d57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e20637572766500000000604482015290519081900360640190fd5b611a69878a8887611bd2565b611ad457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6164647228632a706b2b732a6729e289a05f755769746e657373000000000000604482015290519081900360640190fd5b611adc61287e565b611ae68a87611d9e565b9050611af061287e565b611aff898b878b868989611e41565b90506000611b10838d8d8a86611fb4565b9050808a14611b8057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f696e76616c69642070726f6f6600000000000000000000000000000000000000604482015290519081900360640190fd5b505050505050505050505050565b60208101516000907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f908009611bcb8360005b60200201516120d7565b1492915050565b600073ffffffffffffffffffffffffffffffffffffffff8216611c5657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f626164207769746e657373000000000000000000000000000000000000000000604482015290519081900360640190fd5b602084015160009060011615611c6d57601c611c70565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036414191820392506000919089098751604080516000808252602082810180855288905260ff8916838501526060830194909452608082018590529151939450909260019260a08084019391927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081019281900390910190855afa158015611d4b573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff9081169088161495505050505050949350505050565b611da661287e565b611e04600184846040516020018084815260200183600260200280838360005b83811015611dde578181015183820152602001611dc6565b50505050905001828152602001935050505060405160208183030381529060405261212f565b90505b611e1081611b8e565b61108b578051604080516020818101939093528151808203909301835281019052611e3a9061212f565b9050611e07565b611e4961287e565b825186517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f91900306611edd57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e63740000604482015290519081900360640190fd5b611ee8878988612197565b611f3d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806129596021913960400191505060405180910390fd5b611f48848685612197565b611f9d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602281526020018061297a6022913960400191505060405180910390fd5b611fa88684846122fd565b98975050505050505050565b6000600286868685876040516020018087815260200186600260200280838360005b83811015611fee578181015183820152602001611fd6565b5050505090500185600260200280838360005b83811015612019578181015183820152602001612001565b5050505090500184600260200280838360005b8381101561204457818101518382015260200161202c565b5050505090500183600260200280838360005b8381101561206f578181015183820152602001612057565b505050509050018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660601b815260140196505050505050506040516020818303038152906040528051906020012060001c905095945050505050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f600782089392505050565b61213761287e565b6121408261242b565b8152612155612150826000611bc1565b612480565b60208201819052600290066001141561079d576020810180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f039052919050565b6000826121a357600080fd5b83516020850151600090600116156121bc57601c6121bf565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141838709604080516000808252602080830180855282905260ff871683850152606083018890526080830185905292519394509260019260a08084019391927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081019281900390910190855afa158015612261573d6000803e3d6000fd5b5050506020604051035190506000866040516020018082600260200280838360005b8381101561229b578181015183820152602001612283565b505050509050019150506040516020818303038152906040528051906020012060001c90508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614955050505050509392505050565b61230561287e565b835160208086015185519186015160009384938493612326939091906124ac565b919450925090507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8582096001146123bf57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a00000000000000604482015290519081900360640190fd5b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806123f257fe5b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8785099052979650505050505050565b805160208201205b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f811061079d57604080516020808201939093528151808203840181529082019091528051910120612433565b600061108b827f3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c612642565b60008080600180827ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a08905060006125548383858561273b565b909850905061256588828e88612793565b909850905061257688828c87612793565b909850905060006125898d878b85612793565b909850905061259a8882868661273b565b90985090506125ab88828e89612793565b909850905081811461262e577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183099650612632565b8196505b5050505050509450945094915050565b60008061264d6128ba565b6020808252818101819052604082015260608101859052608081018490527ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f60a08201526126996128d8565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa92508261273157604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6269674d6f64457870206661696c757265210000000000000000000000000000604482015290519081900360640190fd5b5195945050505050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487099097909650945050505050565b600080807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f86890990999098509650505050505050565b604080516060810182526000808252602082018190529181019190915290565b60405180604001604052806002906020820280368337509192915050565b60405180606001604052806003906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b6040518060200160405280600190602082028036833750919291505056fe4f776e61626c653a206e6577206f776e657220697320746865207a65726f2061646472657373796f752063616e277420636861726765206d6f7265207468616e20616c6c20746865204c494e4b20696e2074686520776f726c642c206772656564794669727374206d756c7469706c69636174696f6e20636865636b206661696c65645365636f6e64206d756c7469706c69636174696f6e20636865636b206661696c6564a164736f6c6343000606000a", +} + +var VRFCoordinatorABI = VRFCoordinatorMetaData.ABI + +var VRFCoordinatorBin = VRFCoordinatorMetaData.Bin + +func DeployVRFCoordinator(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _blockHashStore common.Address) (common.Address, *types.Transaction, *VRFCoordinator, error) { + parsed, err := VRFCoordinatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorBin), backend, _link, _blockHashStore) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinator{address: address, abi: *parsed, VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +type VRFCoordinator struct { + address common.Address + abi abi.ABI + VRFCoordinatorCaller + VRFCoordinatorTransactor + VRFCoordinatorFilterer +} + +type VRFCoordinatorCaller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorTransactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorFilterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorSession struct { + Contract *VRFCoordinator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorCallerSession struct { + Contract *VRFCoordinatorCaller + CallOpts bind.CallOpts +} + +type VRFCoordinatorTransactorSession struct { + Contract *VRFCoordinatorTransactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorRaw struct { + Contract *VRFCoordinator +} + +type VRFCoordinatorCallerRaw struct { + Contract *VRFCoordinatorCaller +} + +type VRFCoordinatorTransactorRaw struct { + Contract *VRFCoordinatorTransactor +} + +func NewVRFCoordinator(address common.Address, backend bind.ContractBackend) (*VRFCoordinator, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinator{address: address, abi: abi, VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +func NewVRFCoordinatorCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorCaller, error) { + contract, err := bindVRFCoordinator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorCaller{contract: contract}, nil +} + +func NewVRFCoordinatorTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorTransactor, error) { + contract, err := bindVRFCoordinator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorTransactor{contract: contract}, nil +} + +func NewVRFCoordinatorFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorFilterer, error) { + contract, err := bindVRFCoordinator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorFilterer{contract: contract}, nil +} + +func bindVRFCoordinator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.VRFCoordinatorCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transfer(opts) +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) PRESEEDOFFSET(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "PRESEED_OFFSET") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) PRESEEDOFFSET() (*big.Int, error) { + return _VRFCoordinator.Contract.PRESEEDOFFSET(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) PRESEEDOFFSET() (*big.Int, error) { + return _VRFCoordinator.Contract.PRESEEDOFFSET(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "PROOF_LENGTH") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) PROOFLENGTH() (*big.Int, error) { + return _VRFCoordinator.Contract.PROOFLENGTH(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) PROOFLENGTH() (*big.Int, error) { + return _VRFCoordinator.Contract.PROOFLENGTH(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) PUBLICKEYOFFSET(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "PUBLIC_KEY_OFFSET") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) PUBLICKEYOFFSET() (*big.Int, error) { + return _VRFCoordinator.Contract.PUBLICKEYOFFSET(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) PUBLICKEYOFFSET() (*big.Int, error) { + return _VRFCoordinator.Contract.PUBLICKEYOFFSET(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) Callbacks(opts *bind.CallOpts, arg0 [32]byte) (Callbacks, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "callbacks", arg0) + + outstruct := new(Callbacks) + if err != nil { + return *outstruct, err + } + + outstruct.CallbackContract = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.RandomnessFee = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.SeedAndBlockNum = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) Callbacks(arg0 [32]byte) (Callbacks, + + error) { + return _VRFCoordinator.Contract.Callbacks(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) Callbacks(arg0 [32]byte) (Callbacks, + + error) { + return _VRFCoordinator.Contract.Callbacks(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) HashOfKey(opts *bind.CallOpts, _publicKey [2]*big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "hashOfKey", _publicKey) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) HashOfKey(_publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.HashOfKey(&_VRFCoordinator.CallOpts, _publicKey) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) HashOfKey(_publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.HashOfKey(&_VRFCoordinator.CallOpts, _publicKey) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) IsOwner(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "isOwner") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) IsOwner() (bool, error) { + return _VRFCoordinator.Contract.IsOwner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) IsOwner() (bool, error) { + return _VRFCoordinator.Contract.IsOwner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) Owner() (common.Address, error) { + return _VRFCoordinator.Contract.Owner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) Owner() (common.Address, error) { + return _VRFCoordinator.Contract.Owner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) ServiceAgreements(opts *bind.CallOpts, arg0 [32]byte) (ServiceAgreements, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "serviceAgreements", arg0) + + outstruct := new(ServiceAgreements) + if err != nil { + return *outstruct, err + } + + outstruct.VRFOracle = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.Fee = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.JobID = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) ServiceAgreements(arg0 [32]byte) (ServiceAgreements, + + error) { + return _VRFCoordinator.Contract.ServiceAgreements(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) ServiceAgreements(arg0 [32]byte) (ServiceAgreements, + + error) { + return _VRFCoordinator.Contract.ServiceAgreements(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) WithdrawableTokens(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "withdrawableTokens", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) WithdrawableTokens(arg0 common.Address) (*big.Int, error) { + return _VRFCoordinator.Contract.WithdrawableTokens(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) WithdrawableTokens(arg0 common.Address) (*big.Int, error) { + return _VRFCoordinator.Contract.WithdrawableTokens(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) FulfillRandomnessRequest(opts *bind.TransactOpts, _proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "fulfillRandomnessRequest", _proof) +} + +func (_VRFCoordinator *VRFCoordinatorSession) FulfillRandomnessRequest(_proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.FulfillRandomnessRequest(&_VRFCoordinator.TransactOpts, _proof) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) FulfillRandomnessRequest(_proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.FulfillRandomnessRequest(&_VRFCoordinator.TransactOpts, _proof) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "onTokenTransfer", _sender, _fee, _data) +} + +func (_VRFCoordinator *VRFCoordinatorSession) OnTokenTransfer(_sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, _sender, _fee, _data) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) OnTokenTransfer(_sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, _sender, _fee, _data) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RegisterProvingKey(opts *bind.TransactOpts, _fee *big.Int, _oracle common.Address, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "registerProvingKey", _fee, _oracle, _publicProvingKey, _jobID) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RegisterProvingKey(_fee *big.Int, _oracle common.Address, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterProvingKey(&_VRFCoordinator.TransactOpts, _fee, _oracle, _publicProvingKey, _jobID) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RegisterProvingKey(_fee *big.Int, _oracle common.Address, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterProvingKey(&_VRFCoordinator.TransactOpts, _fee, _oracle, _publicProvingKey, _jobID) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "transferOwnership", newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferOwnership(&_VRFCoordinator.TransactOpts, newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferOwnership(&_VRFCoordinator.TransactOpts, newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "withdraw", _recipient, _amount) +} + +func (_VRFCoordinator *VRFCoordinatorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Withdraw(&_VRFCoordinator.TransactOpts, _recipient, _amount) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Withdraw(&_VRFCoordinator.TransactOpts, _recipient, _amount) +} + +type VRFCoordinatorNewServiceAgreementIterator struct { + Event *VRFCoordinatorNewServiceAgreement + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorNewServiceAgreementIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorNewServiceAgreement) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorNewServiceAgreement) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorNewServiceAgreementIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorNewServiceAgreementIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorNewServiceAgreement struct { + KeyHash [32]byte + Fee *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterNewServiceAgreement(opts *bind.FilterOpts) (*VRFCoordinatorNewServiceAgreementIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "NewServiceAgreement") + if err != nil { + return nil, err + } + return &VRFCoordinatorNewServiceAgreementIterator{contract: _VRFCoordinator.contract, event: "NewServiceAgreement", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchNewServiceAgreement(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorNewServiceAgreement) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "NewServiceAgreement") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorNewServiceAgreement) + if err := _VRFCoordinator.contract.UnpackLog(event, "NewServiceAgreement", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseNewServiceAgreement(log types.Log) (*VRFCoordinatorNewServiceAgreement, error) { + event := new(VRFCoordinatorNewServiceAgreement) + if err := _VRFCoordinator.contract.UnpackLog(event, "NewServiceAgreement", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorOwnershipTransferredIterator struct { + Event *VRFCoordinatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*VRFCoordinatorOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorOwnershipTransferredIterator{contract: _VRFCoordinator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorOwnershipTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorOwnershipTransferred, error) { + event := new(VRFCoordinatorOwnershipTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomnessRequestIterator struct { + Event *VRFCoordinatorRandomnessRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomnessRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomnessRequestIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomnessRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomnessRequest struct { + KeyHash [32]byte + Seed *big.Int + JobID [32]byte + Sender common.Address + Fee *big.Int + RequestID [32]byte + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessRequest(opts *bind.FilterOpts, jobID [][32]byte) (*VRFCoordinatorRandomnessRequestIterator, error) { + + var jobIDRule []interface{} + for _, jobIDItem := range jobID { + jobIDRule = append(jobIDRule, jobIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessRequest", jobIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessRequestIterator{contract: _VRFCoordinator.contract, event: "RandomnessRequest", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessRequest(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequest, jobID [][32]byte) (event.Subscription, error) { + + var jobIDRule []interface{} + for _, jobIDItem := range jobID { + jobIDRule = append(jobIDRule, jobIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessRequest", jobIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomnessRequest) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessRequest(log types.Log) (*VRFCoordinatorRandomnessRequest, error) { + event := new(VRFCoordinatorRandomnessRequest) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomnessRequestFulfilledIterator struct { + Event *VRFCoordinatorRandomnessRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomnessRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomnessRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomnessRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomnessRequestFulfilled struct { + RequestId [32]byte + Output *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessRequestFulfilled(opts *bind.FilterOpts) (*VRFCoordinatorRandomnessRequestFulfilledIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessRequestFulfilled") + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessRequestFulfilledIterator{contract: _VRFCoordinator.contract, event: "RandomnessRequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessRequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomnessRequestFulfilled) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessRequestFulfilled(log types.Log) (*VRFCoordinatorRandomnessRequestFulfilled, error) { + event := new(VRFCoordinatorRandomnessRequestFulfilled) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type Callbacks struct { + CallbackContract common.Address + RandomnessFee *big.Int + SeedAndBlockNum [32]byte +} +type ServiceAgreements struct { + VRFOracle common.Address + Fee *big.Int + JobID [32]byte +} + +func (_VRFCoordinator *VRFCoordinator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinator.abi.Events["NewServiceAgreement"].ID: + return _VRFCoordinator.ParseNewServiceAgreement(log) + case _VRFCoordinator.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinator.ParseOwnershipTransferred(log) + case _VRFCoordinator.abi.Events["RandomnessRequest"].ID: + return _VRFCoordinator.ParseRandomnessRequest(log) + case _VRFCoordinator.abi.Events["RandomnessRequestFulfilled"].ID: + return _VRFCoordinator.ParseRandomnessRequestFulfilled(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorNewServiceAgreement) Topic() common.Hash { + return common.HexToHash("0xae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe") +} + +func (VRFCoordinatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorRandomnessRequest) Topic() common.Hash { + return common.HexToHash("0x56bd374744a66d531874338def36c906e3a6cf31176eb1e9afd9f1de69725d51") +} + +func (VRFCoordinatorRandomnessRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0xa2e7a402243ebda4a69ceeb3dfb682943b7a9b3ac66d6eefa8db65894009611c") +} + +func (_VRFCoordinator *VRFCoordinator) Address() common.Address { + return _VRFCoordinator.address +} + +type VRFCoordinatorInterface interface { + PRESEEDOFFSET(opts *bind.CallOpts) (*big.Int, error) + + PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) + + PUBLICKEYOFFSET(opts *bind.CallOpts) (*big.Int, error) + + Callbacks(opts *bind.CallOpts, arg0 [32]byte) (Callbacks, + + error) + + HashOfKey(opts *bind.CallOpts, _publicKey [2]*big.Int) ([32]byte, error) + + IsOwner(opts *bind.CallOpts) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + ServiceAgreements(opts *bind.CallOpts, arg0 [32]byte) (ServiceAgreements, + + error) + + WithdrawableTokens(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) + + FulfillRandomnessRequest(opts *bind.TransactOpts, _proof []byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, _fee *big.Int, _oracle common.Address, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + FilterNewServiceAgreement(opts *bind.FilterOpts) (*VRFCoordinatorNewServiceAgreementIterator, error) + + WatchNewServiceAgreement(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorNewServiceAgreement) (event.Subscription, error) + + ParseNewServiceAgreement(log types.Log) (*VRFCoordinatorNewServiceAgreement, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*VRFCoordinatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorOwnershipTransferred, error) + + FilterRandomnessRequest(opts *bind.FilterOpts, jobID [][32]byte) (*VRFCoordinatorRandomnessRequestIterator, error) + + WatchRandomnessRequest(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequest, jobID [][32]byte) (event.Subscription, error) + + ParseRandomnessRequest(log types.Log) (*VRFCoordinatorRandomnessRequest, error) + + FilterRandomnessRequestFulfilled(opts *bind.FilterOpts) (*VRFCoordinatorRandomnessRequestFulfilledIterator, error) + + WatchRandomnessRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequestFulfilled) (event.Subscription, error) + + ParseRandomnessRequestFulfilled(log types.Log) (*VRFCoordinatorRandomnessRequestFulfilled, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_request_id/solidity_vrf_request_id.go b/core/gethwrappers/generated/solidity_vrf_request_id/solidity_vrf_request_id.go new file mode 100644 index 00000000..b0d101a9 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_request_id/solidity_vrf_request_id.go @@ -0,0 +1,226 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_request_id + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFRequestIDBaseTestHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_vRFInputSeed\",\"type\":\"uint256\"}],\"name\":\"makeRequestId_\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_userSeed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_requester\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"}],\"name\":\"makeVRFInputSeed_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061016c806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806337ab429a1461003b578063bda087ae14610092575b600080fd5b6100806004803603608081101561005157600080fd5b5080359060208101359073ffffffffffffffffffffffffffffffffffffffff60408201351690606001356100b5565b60408051918252519081900360200190f35b610080600480360360408110156100a857600080fd5b50803590602001356100cc565b60006100c3858585856100df565b95945050505050565b60006100d88383610133565b9392505050565b604080516020808201969096528082019490945273ffffffffffffffffffffffffffffffffffffffff9290921660608401526080808401919091528151808403909101815260a09092019052805191012090565b60408051602080820194909452808201929092528051808303820181526060909201905280519101209056fea164736f6c6343000606000a", +} + +var VRFRequestIDBaseTestHelperABI = VRFRequestIDBaseTestHelperMetaData.ABI + +var VRFRequestIDBaseTestHelperBin = VRFRequestIDBaseTestHelperMetaData.Bin + +func DeployVRFRequestIDBaseTestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFRequestIDBaseTestHelper, error) { + parsed, err := VRFRequestIDBaseTestHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFRequestIDBaseTestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFRequestIDBaseTestHelper{address: address, abi: *parsed, VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +type VRFRequestIDBaseTestHelper struct { + address common.Address + abi abi.ABI + VRFRequestIDBaseTestHelperCaller + VRFRequestIDBaseTestHelperTransactor + VRFRequestIDBaseTestHelperFilterer +} + +type VRFRequestIDBaseTestHelperCaller struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperTransactor struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperFilterer struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperSession struct { + Contract *VRFRequestIDBaseTestHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFRequestIDBaseTestHelperCallerSession struct { + Contract *VRFRequestIDBaseTestHelperCaller + CallOpts bind.CallOpts +} + +type VRFRequestIDBaseTestHelperTransactorSession struct { + Contract *VRFRequestIDBaseTestHelperTransactor + TransactOpts bind.TransactOpts +} + +type VRFRequestIDBaseTestHelperRaw struct { + Contract *VRFRequestIDBaseTestHelper +} + +type VRFRequestIDBaseTestHelperCallerRaw struct { + Contract *VRFRequestIDBaseTestHelperCaller +} + +type VRFRequestIDBaseTestHelperTransactorRaw struct { + Contract *VRFRequestIDBaseTestHelperTransactor +} + +func NewVRFRequestIDBaseTestHelper(address common.Address, backend bind.ContractBackend) (*VRFRequestIDBaseTestHelper, error) { + abi, err := abi.JSON(strings.NewReader(VRFRequestIDBaseTestHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFRequestIDBaseTestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelper{address: address, abi: abi, VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +func NewVRFRequestIDBaseTestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFRequestIDBaseTestHelperCaller, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperCaller{contract: contract}, nil +} + +func NewVRFRequestIDBaseTestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFRequestIDBaseTestHelperTransactor, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperTransactor{contract: contract}, nil +} + +func NewVRFRequestIDBaseTestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFRequestIDBaseTestHelperFilterer, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperFilterer{contract: contract}, nil +} + +func bindVRFRequestIDBaseTestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFRequestIDBaseTestHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transfer(opts) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transfer(opts) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeRequestId(opts *bind.CallOpts, _keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, &out, "makeRequestId_", _keyHash, _vRFInputSeed) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeVRFInputSeed(opts *bind.CallOpts, _keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, &out, "makeVRFInputSeed_", _keyHash, _userSeed, _requester, _nonce) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelper) Address() common.Address { + return _VRFRequestIDBaseTestHelper.address +} + +type VRFRequestIDBaseTestHelperInterface interface { + MakeRequestId(opts *bind.CallOpts, _keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) + + MakeVRFInputSeed(opts *bind.CallOpts, _keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_request_id_v08/solidity_vrf_request_id_v08.go b/core/gethwrappers/generated/solidity_vrf_request_id_v08/solidity_vrf_request_id_v08.go new file mode 100644 index 00000000..5d931e3d --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_request_id_v08/solidity_vrf_request_id_v08.go @@ -0,0 +1,226 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_request_id_v08 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFRequestIDBaseTestHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_vRFInputSeed\",\"type\":\"uint256\"}],\"name\":\"makeRequestId_\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_userSeed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_requester\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"}],\"name\":\"makeVRFInputSeed_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610170806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806337ab429a1461003b578063bda087ae146100af575b600080fd5b61009d61004936600461010b565b604080516020808201969096528082019490945273ffffffffffffffffffffffffffffffffffffffff9290921660608401526080808401919091528151808403909101815260a09092019052805191012090565b60405190815260200160405180910390f35b61009d6100bd3660046100e9565b604080516020808201949094528082019290925280518083038201815260609092019052805191012090565b600080604083850312156100fc57600080fd5b50508035926020909101359150565b6000806000806080858703121561012157600080fd5b8435935060208501359250604085013573ffffffffffffffffffffffffffffffffffffffff8116811461015357600080fd5b939692955092936060013592505056fea164736f6c6343000806000a", +} + +var VRFRequestIDBaseTestHelperABI = VRFRequestIDBaseTestHelperMetaData.ABI + +var VRFRequestIDBaseTestHelperBin = VRFRequestIDBaseTestHelperMetaData.Bin + +func DeployVRFRequestIDBaseTestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFRequestIDBaseTestHelper, error) { + parsed, err := VRFRequestIDBaseTestHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFRequestIDBaseTestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFRequestIDBaseTestHelper{address: address, abi: *parsed, VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +type VRFRequestIDBaseTestHelper struct { + address common.Address + abi abi.ABI + VRFRequestIDBaseTestHelperCaller + VRFRequestIDBaseTestHelperTransactor + VRFRequestIDBaseTestHelperFilterer +} + +type VRFRequestIDBaseTestHelperCaller struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperTransactor struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperFilterer struct { + contract *bind.BoundContract +} + +type VRFRequestIDBaseTestHelperSession struct { + Contract *VRFRequestIDBaseTestHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFRequestIDBaseTestHelperCallerSession struct { + Contract *VRFRequestIDBaseTestHelperCaller + CallOpts bind.CallOpts +} + +type VRFRequestIDBaseTestHelperTransactorSession struct { + Contract *VRFRequestIDBaseTestHelperTransactor + TransactOpts bind.TransactOpts +} + +type VRFRequestIDBaseTestHelperRaw struct { + Contract *VRFRequestIDBaseTestHelper +} + +type VRFRequestIDBaseTestHelperCallerRaw struct { + Contract *VRFRequestIDBaseTestHelperCaller +} + +type VRFRequestIDBaseTestHelperTransactorRaw struct { + Contract *VRFRequestIDBaseTestHelperTransactor +} + +func NewVRFRequestIDBaseTestHelper(address common.Address, backend bind.ContractBackend) (*VRFRequestIDBaseTestHelper, error) { + abi, err := abi.JSON(strings.NewReader(VRFRequestIDBaseTestHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFRequestIDBaseTestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelper{address: address, abi: abi, VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +func NewVRFRequestIDBaseTestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFRequestIDBaseTestHelperCaller, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperCaller{contract: contract}, nil +} + +func NewVRFRequestIDBaseTestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFRequestIDBaseTestHelperTransactor, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperTransactor{contract: contract}, nil +} + +func NewVRFRequestIDBaseTestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFRequestIDBaseTestHelperFilterer, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperFilterer{contract: contract}, nil +} + +func bindVRFRequestIDBaseTestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFRequestIDBaseTestHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transfer(opts) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transfer(opts) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeRequestId(opts *bind.CallOpts, _keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, &out, "makeRequestId_", _keyHash, _vRFInputSeed) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeVRFInputSeed(opts *bind.CallOpts, _keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, &out, "makeVRFInputSeed_", _keyHash, _userSeed, _requester, _nonce) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} + +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelper) Address() common.Address { + return _VRFRequestIDBaseTestHelper.address +} + +type VRFRequestIDBaseTestHelperInterface interface { + MakeRequestId(opts *bind.CallOpts, _keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) + + MakeVRFInputSeed(opts *bind.CallOpts, _keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_v08_verifier_wrapper/solidity_vrf_v08_verifier_wrapper.go b/core/gethwrappers/generated/solidity_vrf_v08_verifier_wrapper/solidity_vrf_v08_verifier_wrapper.go new file mode 100644 index 00000000..6eaf2b99 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_v08_verifier_wrapper/solidity_vrf_v08_verifier_wrapper.go @@ -0,0 +1,526 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_v08_verifier_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +var VRFV08TestHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"invZ\",\"type\":\"uint256\"}],\"name\":\"affineECAdd_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"base\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"exponent\",\"type\":\"uint256\"}],\"name\":\"bigModExp_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"x\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"q\",\"type\":\"uint256[2]\"}],\"name\":\"ecmulVerify_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"b\",\"type\":\"bytes\"}],\"name\":\"fieldHash_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"hashToCurve_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"}],\"name\":\"isOnCurve_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"cp1Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sp2Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"linearCombination_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"px\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"py\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qx\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qy\",\"type\":\"uint256\"}],\"name\":\"projectiveECAdd_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRF.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"}],\"name\":\"randomValueFromVRFProof_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"output\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"hash\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"v\",\"type\":\"uint256[2]\"}],\"name\":\"scalarFromCurvePoints_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"squareRoot_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"lcWitness\",\"type\":\"address\"}],\"name\":\"verifyLinearCombinationWithGenerator_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"verifyVRFProof_\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"ySquared_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50611b34806100206000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c80639d6f03371161008c578063b481e26011610066578063b481e260146101fc578063ef3b10ec1461020f578063fd7e4af914610224578063fe54f2a21461023757600080fd5b80639d6f0337146101c3578063a5e9508f146101d6578063aa7b2fbb146101e957600080fd5b80637f8f50a8116100c85780637f8f50a81461014c5780638af046ea1461015f57806391d5f6911461017257806395e6ee921461019557600080fd5b8063244f896d146100ef57806335452450146101185780635de600421461012b575b600080fd5b6101026100fd366004611549565b61024a565b60405161010f9190611a03565b60405180910390f35b610102610126366004611619565b610267565b61013e6101393660046118e1565b610282565b60405190815260200161010f565b61013e61015a3660046114df565b61028e565b61013e61016d366004611809565b6102a7565b61018561018036600461189a565b6102b2565b604051901515815260200161010f565b6101a86101a3366004611903565b6102c9565b6040805193845260208401929092529082015260600161010f565b61013e6101d1366004611809565b6102ea565b61013e6101e4366004611751565b6102f5565b6101856101f7366004611644565b610301565b61013e61020a366004611682565b61030e565b61022261021d366004611587565b610319565b005b6101856102323660046114c3565b610335565b610102610245366004611822565b610340565b6102526113cd565b61025d848484610363565b90505b9392505050565b61026f6113cd565b6102798383610497565b90505b92915050565b600061027983836104fb565b600061029d86868686866105ef565b9695505050505050565b600061027c8261064d565b60006102c085858585610687565b95945050505050565b60008060006102da8787878761082a565b9250925092509450945094915050565b600061027c826109c0565b60006102798383610a18565b600061025d848484610aa1565b600061027c82610c2e565b61032a898989898989898989610c88565b505050505050505050565b600061027c82610f5f565b6103486113cd565b610357888888888888886110ba565b98975050505050505050565b61036b6113cd565b83516020808601518551918601516000938493849361038c9390919061082a565b919450925090507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f858209600114610425576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a0000000000000060448201526064015b60405180910390fd5b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061045e5761045e611ac9565b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8785099052979650505050505050565b61049f6113cd565b6104cc600184846040516020016104b8939291906119e2565b604051602081830303815290604052611242565b90505b6104d881610f5f565b61027c5780516040805160208101929092526104f491016104b8565b90506104cf565b6000806105066113eb565b6020808252818101819052604082015260608101859052608081018490527ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f60a0820152610552611409565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9250826105e5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6269674d6f64457870206661696c757265210000000000000000000000000000604482015260640161041c565b5195945050505050565b60006002868686858760405160200161060d96959493929190611970565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209695505050505050565b600061027c8260026106807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f6001611a4f565b901c6104fb565b600073ffffffffffffffffffffffffffffffffffffffff8216610706576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f626164207769746e657373000000000000000000000000000000000000000000604482015260640161041c565b60208401516000906001161561071d57601c610720565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141918203925060009190890987516040805160008082526020820180845287905260ff88169282019290925260608101929092526080820183905291925060019060a0016020604051602081039080840390855afa1580156107d7573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff9081169088161495505050505050949350505050565b60008080600180827ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a08905060006108d2838385856112aa565b90985090506108e388828e88611302565b90985090506108f488828c87611302565b909850905060006109078d878b85611302565b9098509050610918888286866112aa565b909850905061092988828e89611302565b90985090508181146109ac577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f81830996506109b0565b8196505b5050505050509450945094915050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f600782089392505050565b6000610a4c8360000151846020015185604001518660600151868860a001518960c001518a60e001518b6101000151610c88565b60038360200151604051602001610a64929190611a11565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209392505050565b600082610b0a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f7a65726f207363616c6172000000000000000000000000000000000000000000604482015260640161041c565b83516020850151600090610b2090600290611a8e565b15610b2c57601c610b2f565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418387096040805160008082526020820180845281905260ff86169282019290925260608101869052608081018390529192509060019060a0016020604051602081039080840390855afa158015610baf573d6000803e3d6000fd5b505050602060405103519050600086604051602001610bce919061195e565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052805160209091012073ffffffffffffffffffffffffffffffffffffffff92831692169190911498975050505050505050565b805160208201205b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8110610c8357604080516020808201939093528151808203840181529082019091528051910120610c36565b919050565b610c9189610f5f565b610cf7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e206375727665000000000000604482015260640161041c565b610d0088610f5f565b610d66576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f67616d6d61206973206e6f74206f6e2063757276650000000000000000000000604482015260640161041c565b610d6f83610f5f565b610dd5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e206375727665000000604482015260640161041c565b610dde82610f5f565b610e44576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e20637572766500000000604482015260640161041c565b610e50878a8887610687565b610eb6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f6164647228632a706b2b732a6729213d5f755769746e65737300000000000000604482015260640161041c565b6000610ec28a87610497565b90506000610ed5898b878b8689896110ba565b90506000610ee6838d8d8a866105ef565b9050808a14610f51576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f696e76616c69642070726f6f6600000000000000000000000000000000000000604482015260640161041c565b505050505050505050505050565b80516000907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f11610fec576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e76616c696420782d6f7264696e6174650000000000000000000000000000604482015260640161041c565b60208201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f11611079576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e76616c696420792d6f7264696e6174650000000000000000000000000000604482015260640161041c565b60208201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f9080096110b38360005b60200201516109c0565b1492915050565b6110c26113cd565b825186517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f9081900691061415611155576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e63740000604482015260640161041c565b611160878988610aa1565b6111c6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4669727374206d756c20636865636b206661696c656400000000000000000000604482015260640161041c565b6111d1848685610aa1565b611237576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f5365636f6e64206d756c20636865636b206661696c6564000000000000000000604482015260640161041c565b610357868484610363565b61124a6113cd565b61125382610c2e565b81526112686112638260006110a9565b61064d565b602082018190526002900660011415610c83576020810180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f039052919050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487099097909650945050505050565b600080807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f86890990999098509650505050505050565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610c8357600080fd5b600082601f83011261145c57600080fd5b6040516040810181811067ffffffffffffffff8211171561147f5761147f611af8565b806040525080838560408601111561149657600080fd5b60005b60028110156114b8578135835260209283019290910190600101611499565b509195945050505050565b6000604082840312156114d557600080fd5b610279838361144b565b600080600080600061012086880312156114f857600080fd5b611502878761144b565b9450611511876040880161144b565b9350611520876080880161144b565b925061152e60c08701611427565b915061153d8760e0880161144b565b90509295509295909350565b600080600060a0848603121561155e57600080fd5b611568858561144b565b9250611577856040860161144b565b9150608084013590509250925092565b60008060008060008060008060006101a08a8c0312156115a657600080fd5b6115b08b8b61144b565b98506115bf8b60408c0161144b565b975060808a0135965060a08a0135955060c08a013594506115e260e08b01611427565b93506115f28b6101008c0161144b565b92506116028b6101408c0161144b565b91506101808a013590509295985092959850929598565b6000806060838503121561162c57600080fd5b611636848461144b565b946040939093013593505050565b600080600060a0848603121561165957600080fd5b611663858561144b565b925060408401359150611679856060860161144b565b90509250925092565b60006020828403121561169457600080fd5b813567ffffffffffffffff808211156116ac57600080fd5b818401915084601f8301126116c057600080fd5b8135818111156116d2576116d2611af8565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561171857611718611af8565b8160405282815287602084870101111561173157600080fd5b826020860160208301376000928101602001929092525095945050505050565b6000808284036101c081121561176657600080fd5b6101a08082121561177657600080fd5b61177e611a25565b915061178a868661144b565b8252611799866040870161144b565b60208301526080850135604083015260a0850135606083015260c085013560808301526117c860e08601611427565b60a08301526101006117dc8782880161144b565b60c08401526117ef87610140880161144b565b60e084015261018086013590830152909593013593505050565b60006020828403121561181b57600080fd5b5035919050565b6000806000806000806000610160888a03121561183e57600080fd5b8735965061184f8960208a0161144b565b955061185e8960608a0161144b565b945060a088013593506118748960c08a0161144b565b9250611884896101008a0161144b565b9150610140880135905092959891949750929550565b60008060008060a085870312156118b057600080fd5b843593506118c1866020870161144b565b9250606085013591506118d660808601611427565b905092959194509250565b600080604083850312156118f457600080fd5b50508035926020909101359150565b6000806000806080858703121561191957600080fd5b5050823594602084013594506040840135936060013592509050565b8060005b6002811015611958578151845260209384019390910190600101611939565b50505050565b6119688183611935565b604001919050565b8681526119806020820187611935565b61198d6060820186611935565b61199a60a0820185611935565b6119a760e0820184611935565b60609190911b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166101208201526101340195945050505050565b8381526119f26020820184611935565b606081019190915260800192915050565b6040810161027c8284611935565b828152606081016102606020830184611935565b604051610120810167ffffffffffffffff81118282101715611a4957611a49611af8565b60405290565b60008219821115611a89577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500190565b600082611ac4577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV08TestHelperABI = VRFV08TestHelperMetaData.ABI + +var VRFV08TestHelperBin = VRFV08TestHelperMetaData.Bin + +func DeployVRFV08TestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFV08TestHelper, error) { + parsed, err := VRFV08TestHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV08TestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV08TestHelper{address: address, abi: *parsed, VRFV08TestHelperCaller: VRFV08TestHelperCaller{contract: contract}, VRFV08TestHelperTransactor: VRFV08TestHelperTransactor{contract: contract}, VRFV08TestHelperFilterer: VRFV08TestHelperFilterer{contract: contract}}, nil +} + +type VRFV08TestHelper struct { + address common.Address + abi abi.ABI + VRFV08TestHelperCaller + VRFV08TestHelperTransactor + VRFV08TestHelperFilterer +} + +type VRFV08TestHelperCaller struct { + contract *bind.BoundContract +} + +type VRFV08TestHelperTransactor struct { + contract *bind.BoundContract +} + +type VRFV08TestHelperFilterer struct { + contract *bind.BoundContract +} + +type VRFV08TestHelperSession struct { + Contract *VRFV08TestHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV08TestHelperCallerSession struct { + Contract *VRFV08TestHelperCaller + CallOpts bind.CallOpts +} + +type VRFV08TestHelperTransactorSession struct { + Contract *VRFV08TestHelperTransactor + TransactOpts bind.TransactOpts +} + +type VRFV08TestHelperRaw struct { + Contract *VRFV08TestHelper +} + +type VRFV08TestHelperCallerRaw struct { + Contract *VRFV08TestHelperCaller +} + +type VRFV08TestHelperTransactorRaw struct { + Contract *VRFV08TestHelperTransactor +} + +func NewVRFV08TestHelper(address common.Address, backend bind.ContractBackend) (*VRFV08TestHelper, error) { + abi, err := abi.JSON(strings.NewReader(VRFV08TestHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV08TestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV08TestHelper{address: address, abi: abi, VRFV08TestHelperCaller: VRFV08TestHelperCaller{contract: contract}, VRFV08TestHelperTransactor: VRFV08TestHelperTransactor{contract: contract}, VRFV08TestHelperFilterer: VRFV08TestHelperFilterer{contract: contract}}, nil +} + +func NewVRFV08TestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFV08TestHelperCaller, error) { + contract, err := bindVRFV08TestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV08TestHelperCaller{contract: contract}, nil +} + +func NewVRFV08TestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV08TestHelperTransactor, error) { + contract, err := bindVRFV08TestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV08TestHelperTransactor{contract: contract}, nil +} + +func NewVRFV08TestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV08TestHelperFilterer, error) { + contract, err := bindVRFV08TestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV08TestHelperFilterer{contract: contract}, nil +} + +func bindVRFV08TestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV08TestHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV08TestHelper *VRFV08TestHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV08TestHelper.Contract.VRFV08TestHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV08TestHelper *VRFV08TestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV08TestHelper.Contract.VRFV08TestHelperTransactor.contract.Transfer(opts) +} + +func (_VRFV08TestHelper *VRFV08TestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV08TestHelper.Contract.VRFV08TestHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV08TestHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV08TestHelper *VRFV08TestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV08TestHelper.Contract.contract.Transfer(opts) +} + +func (_VRFV08TestHelper *VRFV08TestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV08TestHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) AffineECAdd(opts *bind.CallOpts, p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "affineECAdd_", p1, p2, invZ) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.AffineECAdd(&_VRFV08TestHelper.CallOpts, p1, p2, invZ) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.AffineECAdd(&_VRFV08TestHelper.CallOpts, p1, p2, invZ) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) BigModExp(opts *bind.CallOpts, base *big.Int, exponent *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "bigModExp_", base, exponent) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.BigModExp(&_VRFV08TestHelper.CallOpts, base, exponent) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.BigModExp(&_VRFV08TestHelper.CallOpts, base, exponent) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) EcmulVerify(opts *bind.CallOpts, x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "ecmulVerify_", x, scalar, q) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFV08TestHelper.Contract.EcmulVerify(&_VRFV08TestHelper.CallOpts, x, scalar, q) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFV08TestHelper.Contract.EcmulVerify(&_VRFV08TestHelper.CallOpts, x, scalar, q) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) FieldHash(opts *bind.CallOpts, b []byte) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "fieldHash_", b) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFV08TestHelper.Contract.FieldHash(&_VRFV08TestHelper.CallOpts, b) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFV08TestHelper.Contract.FieldHash(&_VRFV08TestHelper.CallOpts, b) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) HashToCurve(opts *bind.CallOpts, pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "hashToCurve_", pk, x) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.HashToCurve(&_VRFV08TestHelper.CallOpts, pk, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.HashToCurve(&_VRFV08TestHelper.CallOpts, pk, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) IsOnCurve(opts *bind.CallOpts, p [2]*big.Int) (bool, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "isOnCurve_", p) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) IsOnCurve(p [2]*big.Int) (bool, error) { + return _VRFV08TestHelper.Contract.IsOnCurve(&_VRFV08TestHelper.CallOpts, p) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) IsOnCurve(p [2]*big.Int) (bool, error) { + return _VRFV08TestHelper.Contract.IsOnCurve(&_VRFV08TestHelper.CallOpts, p) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) LinearCombination(opts *bind.CallOpts, c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "linearCombination_", c, p1, cp1Witness, s, p2, sp2Witness, zInv) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.LinearCombination(&_VRFV08TestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFV08TestHelper.Contract.LinearCombination(&_VRFV08TestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) ProjectiveECAdd(opts *bind.CallOpts, px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "projectiveECAdd_", px, py, qx, qy) + + if err != nil { + return *new(*big.Int), *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return out0, out1, out2, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFV08TestHelper.Contract.ProjectiveECAdd(&_VRFV08TestHelper.CallOpts, px, py, qx, qy) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFV08TestHelper.Contract.ProjectiveECAdd(&_VRFV08TestHelper.CallOpts, px, py, qx, qy) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) RandomValueFromVRFProof(opts *bind.CallOpts, proof VRFProof, seed *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "randomValueFromVRFProof_", proof, seed) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) RandomValueFromVRFProof(proof VRFProof, seed *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.RandomValueFromVRFProof(&_VRFV08TestHelper.CallOpts, proof, seed) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) RandomValueFromVRFProof(proof VRFProof, seed *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.RandomValueFromVRFProof(&_VRFV08TestHelper.CallOpts, proof, seed) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) ScalarFromCurvePoints(opts *bind.CallOpts, hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "scalarFromCurvePoints_", hash, pk, gamma, uWitness, v) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.ScalarFromCurvePoints(&_VRFV08TestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.ScalarFromCurvePoints(&_VRFV08TestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) SquareRoot(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "squareRoot_", x) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.SquareRoot(&_VRFV08TestHelper.CallOpts, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.SquareRoot(&_VRFV08TestHelper.CallOpts, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) VerifyLinearCombinationWithGenerator(opts *bind.CallOpts, c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "verifyLinearCombinationWithGenerator_", c, p, s, lcWitness) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFV08TestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFV08TestHelper.CallOpts, c, p, s, lcWitness) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFV08TestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFV08TestHelper.CallOpts, c, p, s, lcWitness) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) VerifyVRFProof(opts *bind.CallOpts, pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "verifyVRFProof_", pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) + + if err != nil { + return err + } + + return err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFV08TestHelper.Contract.VerifyVRFProof(&_VRFV08TestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFV08TestHelper.Contract.VerifyVRFProof(&_VRFV08TestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCaller) YSquared(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV08TestHelper.contract.Call(opts, &out, "ySquared_", x) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV08TestHelper *VRFV08TestHelperSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.YSquared(&_VRFV08TestHelper.CallOpts, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelperCallerSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFV08TestHelper.Contract.YSquared(&_VRFV08TestHelper.CallOpts, x) +} + +func (_VRFV08TestHelper *VRFV08TestHelper) Address() common.Address { + return _VRFV08TestHelper.address +} + +type VRFV08TestHelperInterface interface { + AffineECAdd(opts *bind.CallOpts, p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) + + BigModExp(opts *bind.CallOpts, base *big.Int, exponent *big.Int) (*big.Int, error) + + EcmulVerify(opts *bind.CallOpts, x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) + + FieldHash(opts *bind.CallOpts, b []byte) (*big.Int, error) + + HashToCurve(opts *bind.CallOpts, pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) + + IsOnCurve(opts *bind.CallOpts, p [2]*big.Int) (bool, error) + + LinearCombination(opts *bind.CallOpts, c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) + + ProjectiveECAdd(opts *bind.CallOpts, px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) + + RandomValueFromVRFProof(opts *bind.CallOpts, proof VRFProof, seed *big.Int) (*big.Int, error) + + ScalarFromCurvePoints(opts *bind.CallOpts, hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) + + SquareRoot(opts *bind.CallOpts, x *big.Int) (*big.Int, error) + + VerifyLinearCombinationWithGenerator(opts *bind.CallOpts, c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) + + VerifyVRFProof(opts *bind.CallOpts, pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error + + YSquared(opts *bind.CallOpts, x *big.Int) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_verifier_wrapper/solidity_vrf_verifier_wrapper.go b/core/gethwrappers/generated/solidity_vrf_verifier_wrapper/solidity_vrf_verifier_wrapper.go new file mode 100644 index 00000000..b46cc3de --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_verifier_wrapper/solidity_vrf_verifier_wrapper.go @@ -0,0 +1,514 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_verifier_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFTestHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"PROOF_LENGTH\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"invZ\",\"type\":\"uint256\"}],\"name\":\"affineECAdd_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"base\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"exponent\",\"type\":\"uint256\"}],\"name\":\"bigModExp_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"x\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"q\",\"type\":\"uint256[2]\"}],\"name\":\"ecmulVerify_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"b\",\"type\":\"bytes\"}],\"name\":\"fieldHash_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"hashToCurve_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"cp1Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sp2Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"linearCombination_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"px\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"py\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qx\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qy\",\"type\":\"uint256\"}],\"name\":\"projectiveECAdd_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"proof\",\"type\":\"bytes\"}],\"name\":\"randomValueFromVRFProof_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"output\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"hash\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"v\",\"type\":\"uint256[2]\"}],\"name\":\"scalarFromCurvePoints_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"squareRoot_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"lcWitness\",\"type\":\"address\"}],\"name\":\"verifyLinearCombinationWithGenerator_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"verifyVRFProof_\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"ySquared_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50611ad2806100206000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c80639d6f03371161008c578063cefda0c511610066578063cefda0c514610538578063e911439c146105de578063ef3b10ec146105e6578063fe54f2a2146106e2576100ea565b80639d6f0337146103f6578063aa7b2fbb14610413578063b481e26014610492576100ea565b80637f8f50a8116100c85780637f8f50a8146102225780638af046ea1461030a57806391d5f6911461032757806395e6ee92146103a9576100ea565b8063244f896d146100ef57806335452450146101a05780635de60042146101ed575b600080fd5b610165600480360360a081101561010557600080fd5b60408051808201825291830192918183019183906002908390839080828437600092019190915250506040805180820182529295949381810193925090600290839083908082843760009201919091525091945050903591506107be9050565b6040518082600260200280838360005b8381101561018d578181015183820152602001610175565b5050505090500191505060405180910390f35b610165600480360360608110156101b657600080fd5b6040805180820182529183019291818301918390600290839083908082843760009201919091525091945050903591506107d99050565b6102106004803603604081101561020357600080fd5b50803590602001356107f4565b60408051918252519081900360200190f35b610210600480360361012081101561023957600080fd5b604080518082018252918301929181830191839060029083908390808284376000920191909152505060408051808201825292959493818101939250906002908390839080828437600092019190915250506040805180820182529295949381810193925090600290839083908082843760009201919091525050604080518082018252929573ffffffffffffffffffffffffffffffffffffffff85351695909490936060820193509160209091019060029083908390808284376000920191909152509194506108009350505050565b6102106004803603602081101561032057600080fd5b5035610819565b610395600480360360a081101561033d57600080fd5b6040805180820182528335939283019291606083019190602084019060029083908390808284376000920191909152509194505081359250506020013573ffffffffffffffffffffffffffffffffffffffff1661082c565b604080519115158252519081900360200190f35b6103d8600480360360808110156103bf57600080fd5b5080359060208101359060408101359060600135610843565b60408051938452602084019290925282820152519081900360600190f35b6102106004803603602081101561040c57600080fd5b5035610864565b610395600480360360a081101561042957600080fd5b604080518082018252918301929181830191839060029083908390808284376000920191909152505060408051808201825292958435959094909360608201935091602090910190600290839083908082843760009201919091525091945061086f9350505050565b610210600480360360208110156104a857600080fd5b8101906020810181356401000000008111156104c357600080fd5b8201836020820111156104d557600080fd5b803590602001918460018302840111640100000000831117156104f757600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092955061087c945050505050565b6102106004803603602081101561054e57600080fd5b81019060208101813564010000000081111561056957600080fd5b82018360208201111561057b57600080fd5b8035906020019184600183028401116401000000008311171561059d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610887945050505050565b610210610892565b6106e060048036036101a08110156105fd57600080fd5b604080518082018252918301929181830191839060029083908390808284376000920191909152505060408051808201825292959493818101939250906002908390839080828437600092019190915250506040805180820182529295843595602086013595838101359573ffffffffffffffffffffffffffffffffffffffff60608301351695509293919260c08201929091608001906002908390839080828437600092019190915250506040805180820182529295949381810193925090600290839083908082843760009201919091525091945050903591506108989050565b005b61016560048036036101608110156106f957600080fd5b604080518082018252833593928301929160608301919060208401906002908390839080828437600092019190915250506040805180820182529295949381810193925090600290839083908082843760009201919091525050604080518082018252929584359590949093606082019350916020909101906002908390839080828437600092019190915250506040805180820182529295949381810193925090600290839083908082843760009201919091525091945050903591506108b49050565b6107c6611a0a565b6107d18484846108d7565b949350505050565b6107e1611a0a565b6107eb8383610a05565b90505b92915050565b60006107eb8383610aa8565b600061080f8686868686610ba1565b9695505050505050565b600061082482610cc4565b90505b919050565b600061083a85858585610cf0565b95945050505050565b600080600061085487878787610ebc565b9250925092509450945094915050565b600061082482611052565b60006107d18484846110aa565b600061082482611210565b600061082482611265565b6101a081565b6108a98989898989898989896113d3565b505050505050505050565b6108bc611a0a565b6108cb888888888888886116d4565b98975050505050505050565b6108df611a0a565b83516020808601518551918601516000938493849361090093909190610ebc565b919450925090507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f85820960011461099957604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a00000000000000604482015290519081900360640190fd5b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806109cc57fe5b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8785099052979650505050505050565b610a0d611a0a565b610a6b600184846040516020018084815260200183600260200280838360005b83811015610a45578181015183820152602001610a2d565b50505050905001828152602001935050505060405160208183030381529060405261183b565b90505b610a77816118a9565b6107ee578051604080516020818101939093528151808203909301835281019052610aa19061183b565b9050610a6e565b600080610ab3611a28565b6020808252818101819052604082015260608101859052608081018490527ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f60a0820152610aff611a46565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa925082610b9757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6269674d6f64457870206661696c757265210000000000000000000000000000604482015290519081900360640190fd5b5195945050505050565b6000600286868685876040516020018087815260200186600260200280838360005b83811015610bdb578181015183820152602001610bc3565b5050505090500185600260200280838360005b83811015610c06578181015183820152602001610bee565b5050505090500184600260200280838360005b83811015610c31578181015183820152602001610c19565b5050505090500183600260200280838360005b83811015610c5c578181015183820152602001610c44565b505050509050018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660601b815260140196505050505050506040516020818303038152906040528051906020012060001c905095945050505050565b6000610824827f3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c610aa8565b600073ffffffffffffffffffffffffffffffffffffffff8216610d7457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f626164207769746e657373000000000000000000000000000000000000000000604482015290519081900360640190fd5b602084015160009060011615610d8b57601c610d8e565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036414191820392506000919089098751604080516000808252602082810180855288905260ff8916838501526060830194909452608082018590529151939450909260019260a08084019391927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081019281900390910190855afa158015610e69573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff9081169088161495505050505050949350505050565b60008080600180827ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a0890506000610f64838385856118e7565b9098509050610f7588828e8861193f565b9098509050610f8688828c8761193f565b90985090506000610f998d878b8561193f565b9098509050610faa888286866118e7565b9098509050610fbb88828e8961193f565b909850905081811461103e577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183099650611042565b8196505b5050505050509450945094915050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f600782089392505050565b6000826110b657600080fd5b83516020850151600090600116156110cf57601c6110d2565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141838709604080516000808252602080830180855282905260ff871683850152606083018890526080830185905292519394509260019260a08084019391927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081019281900390910190855afa158015611174573d6000803e3d6000fd5b5050506020604051035190506000866040516020018082600260200280838360005b838110156111ae578181015183820152602001611196565b505050509050019150506040516020818303038152906040528051906020012060001c90508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614955050505050509392505050565b805160208201205b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f811061082757604080516020808201939093528151808203840181529082019091528051910120611218565b60006101a08251146112d857604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f77726f6e672070726f6f66206c656e6774680000000000000000000000000000604482015290519081900360640190fd5b6112e0611a0a565b6112e8611a0a565b6112f0611a64565b60006112fa611a0a565b611302611a0a565b6000888060200190516101a081101561131a57600080fd5b5060e081015161018082015191985060408901975060808901965094506101008801935061014088019250905061136d8787876000602002015188600160200201518960026020020151898989896113d3565b6003866040516020018083815260200182600260200280838360005b838110156113a1578181015183820152602001611389565b50505050905001925050506040516020818303038152906040528051906020012060001c975050505050505050919050565b6113dc896118a9565b61144757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e206375727665000000000000604482015290519081900360640190fd5b611450886118a9565b6114bb57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f67616d6d61206973206e6f74206f6e2063757276650000000000000000000000604482015290519081900360640190fd5b6114c4836118a9565b61152f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e206375727665000000604482015290519081900360640190fd5b611538826118a9565b6115a357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e20637572766500000000604482015290519081900360640190fd5b6115af878a8887610cf0565b61161a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6164647228632a706b2b732a6729e289a05f755769746e657373000000000000604482015290519081900360640190fd5b611622611a0a565b61162c8a87610a05565b9050611636611a0a565b611645898b878b8689896116d4565b90506000611656838d8d8a86610ba1565b9050808a146116c657604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f696e76616c69642070726f6f6600000000000000000000000000000000000000604482015290519081900360640190fd5b505050505050505050505050565b6116dc611a0a565b825186517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f9190030661177057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e63740000604482015290519081900360640190fd5b61177b8789886110aa565b6117d0576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526021815260200180611a836021913960400191505060405180910390fd5b6117db8486856110aa565b611830576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526022815260200180611aa46022913960400191505060405180910390fd5b6108cb8684846108d7565b611843611a0a565b61184c82611210565b81526118676118628260005b6020020151611052565b610cc4565b602082018190526002900660011415610827576020810180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f039052919050565b60208101516000907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f9080096118e0836000611858565b1492915050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487099097909650945050505050565b600080807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f86890990999098509650505050505050565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b6040518060600160405280600390602082028036833750919291505056fe4669727374206d756c7469706c69636174696f6e20636865636b206661696c65645365636f6e64206d756c7469706c69636174696f6e20636865636b206661696c6564a164736f6c6343000606000a", +} + +var VRFTestHelperABI = VRFTestHelperMetaData.ABI + +var VRFTestHelperBin = VRFTestHelperMetaData.Bin + +func DeployVRFTestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFTestHelper, error) { + parsed, err := VRFTestHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFTestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFTestHelper{address: address, abi: *parsed, VRFTestHelperCaller: VRFTestHelperCaller{contract: contract}, VRFTestHelperTransactor: VRFTestHelperTransactor{contract: contract}, VRFTestHelperFilterer: VRFTestHelperFilterer{contract: contract}}, nil +} + +type VRFTestHelper struct { + address common.Address + abi abi.ABI + VRFTestHelperCaller + VRFTestHelperTransactor + VRFTestHelperFilterer +} + +type VRFTestHelperCaller struct { + contract *bind.BoundContract +} + +type VRFTestHelperTransactor struct { + contract *bind.BoundContract +} + +type VRFTestHelperFilterer struct { + contract *bind.BoundContract +} + +type VRFTestHelperSession struct { + Contract *VRFTestHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFTestHelperCallerSession struct { + Contract *VRFTestHelperCaller + CallOpts bind.CallOpts +} + +type VRFTestHelperTransactorSession struct { + Contract *VRFTestHelperTransactor + TransactOpts bind.TransactOpts +} + +type VRFTestHelperRaw struct { + Contract *VRFTestHelper +} + +type VRFTestHelperCallerRaw struct { + Contract *VRFTestHelperCaller +} + +type VRFTestHelperTransactorRaw struct { + Contract *VRFTestHelperTransactor +} + +func NewVRFTestHelper(address common.Address, backend bind.ContractBackend) (*VRFTestHelper, error) { + abi, err := abi.JSON(strings.NewReader(VRFTestHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFTestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFTestHelper{address: address, abi: abi, VRFTestHelperCaller: VRFTestHelperCaller{contract: contract}, VRFTestHelperTransactor: VRFTestHelperTransactor{contract: contract}, VRFTestHelperFilterer: VRFTestHelperFilterer{contract: contract}}, nil +} + +func NewVRFTestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFTestHelperCaller, error) { + contract, err := bindVRFTestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFTestHelperCaller{contract: contract}, nil +} + +func NewVRFTestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFTestHelperTransactor, error) { + contract, err := bindVRFTestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFTestHelperTransactor{contract: contract}, nil +} + +func NewVRFTestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFTestHelperFilterer, error) { + contract, err := bindVRFTestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFTestHelperFilterer{contract: contract}, nil +} + +func bindVRFTestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFTestHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFTestHelper *VRFTestHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFTestHelper.Contract.VRFTestHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFTestHelper *VRFTestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestHelper.Contract.VRFTestHelperTransactor.contract.Transfer(opts) +} + +func (_VRFTestHelper *VRFTestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestHelper.Contract.VRFTestHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFTestHelper *VRFTestHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFTestHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFTestHelper *VRFTestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestHelper.Contract.contract.Transfer(opts) +} + +func (_VRFTestHelper *VRFTestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFTestHelper *VRFTestHelperCaller) PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "PROOF_LENGTH") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) PROOFLENGTH() (*big.Int, error) { + return _VRFTestHelper.Contract.PROOFLENGTH(&_VRFTestHelper.CallOpts) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) PROOFLENGTH() (*big.Int, error) { + return _VRFTestHelper.Contract.PROOFLENGTH(&_VRFTestHelper.CallOpts) +} + +func (_VRFTestHelper *VRFTestHelperCaller) AffineECAdd(opts *bind.CallOpts, p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "affineECAdd_", p1, p2, invZ) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.AffineECAdd(&_VRFTestHelper.CallOpts, p1, p2, invZ) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.AffineECAdd(&_VRFTestHelper.CallOpts, p1, p2, invZ) +} + +func (_VRFTestHelper *VRFTestHelperCaller) BigModExp(opts *bind.CallOpts, base *big.Int, exponent *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "bigModExp_", base, exponent) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.BigModExp(&_VRFTestHelper.CallOpts, base, exponent) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.BigModExp(&_VRFTestHelper.CallOpts, base, exponent) +} + +func (_VRFTestHelper *VRFTestHelperCaller) EcmulVerify(opts *bind.CallOpts, x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "ecmulVerify_", x, scalar, q) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFTestHelper.Contract.EcmulVerify(&_VRFTestHelper.CallOpts, x, scalar, q) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFTestHelper.Contract.EcmulVerify(&_VRFTestHelper.CallOpts, x, scalar, q) +} + +func (_VRFTestHelper *VRFTestHelperCaller) FieldHash(opts *bind.CallOpts, b []byte) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "fieldHash_", b) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.FieldHash(&_VRFTestHelper.CallOpts, b) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.FieldHash(&_VRFTestHelper.CallOpts, b) +} + +func (_VRFTestHelper *VRFTestHelperCaller) HashToCurve(opts *bind.CallOpts, pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "hashToCurve_", pk, x) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.HashToCurve(&_VRFTestHelper.CallOpts, pk, x) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.HashToCurve(&_VRFTestHelper.CallOpts, pk, x) +} + +func (_VRFTestHelper *VRFTestHelperCaller) LinearCombination(opts *bind.CallOpts, c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "linearCombination_", c, p1, cp1Witness, s, p2, sp2Witness, zInv) + + if err != nil { + return *new([2]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([2]*big.Int)).(*[2]*big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.LinearCombination(&_VRFTestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.LinearCombination(&_VRFTestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +func (_VRFTestHelper *VRFTestHelperCaller) ProjectiveECAdd(opts *bind.CallOpts, px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "projectiveECAdd_", px, py, qx, qy) + + if err != nil { + return *new(*big.Int), *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return out0, out1, out2, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFTestHelper.Contract.ProjectiveECAdd(&_VRFTestHelper.CallOpts, px, py, qx, qy) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFTestHelper.Contract.ProjectiveECAdd(&_VRFTestHelper.CallOpts, px, py, qx, qy) +} + +func (_VRFTestHelper *VRFTestHelperCaller) RandomValueFromVRFProof(opts *bind.CallOpts, proof []byte) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "randomValueFromVRFProof_", proof) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) RandomValueFromVRFProof(proof []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.RandomValueFromVRFProof(&_VRFTestHelper.CallOpts, proof) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) RandomValueFromVRFProof(proof []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.RandomValueFromVRFProof(&_VRFTestHelper.CallOpts, proof) +} + +func (_VRFTestHelper *VRFTestHelperCaller) ScalarFromCurvePoints(opts *bind.CallOpts, hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "scalarFromCurvePoints_", hash, pk, gamma, uWitness, v) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.ScalarFromCurvePoints(&_VRFTestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.ScalarFromCurvePoints(&_VRFTestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +func (_VRFTestHelper *VRFTestHelperCaller) SquareRoot(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "squareRoot_", x) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.SquareRoot(&_VRFTestHelper.CallOpts, x) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.SquareRoot(&_VRFTestHelper.CallOpts, x) +} + +func (_VRFTestHelper *VRFTestHelperCaller) VerifyLinearCombinationWithGenerator(opts *bind.CallOpts, c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "verifyLinearCombinationWithGenerator_", c, p, s, lcWitness) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFTestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFTestHelper.CallOpts, c, p, s, lcWitness) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFTestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFTestHelper.CallOpts, c, p, s, lcWitness) +} + +func (_VRFTestHelper *VRFTestHelperCaller) VerifyVRFProof(opts *bind.CallOpts, pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "verifyVRFProof_", pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) + + if err != nil { + return err + } + + return err + +} + +func (_VRFTestHelper *VRFTestHelperSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFTestHelper.Contract.VerifyVRFProof(&_VRFTestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFTestHelper.Contract.VerifyVRFProof(&_VRFTestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +func (_VRFTestHelper *VRFTestHelperCaller) YSquared(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFTestHelper.contract.Call(opts, &out, "ySquared_", x) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFTestHelper *VRFTestHelperSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.YSquared(&_VRFTestHelper.CallOpts, x) +} + +func (_VRFTestHelper *VRFTestHelperCallerSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.YSquared(&_VRFTestHelper.CallOpts, x) +} + +func (_VRFTestHelper *VRFTestHelper) Address() common.Address { + return _VRFTestHelper.address +} + +type VRFTestHelperInterface interface { + PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) + + AffineECAdd(opts *bind.CallOpts, p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) + + BigModExp(opts *bind.CallOpts, base *big.Int, exponent *big.Int) (*big.Int, error) + + EcmulVerify(opts *bind.CallOpts, x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) + + FieldHash(opts *bind.CallOpts, b []byte) (*big.Int, error) + + HashToCurve(opts *bind.CallOpts, pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) + + LinearCombination(opts *bind.CallOpts, c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) + + ProjectiveECAdd(opts *bind.CallOpts, px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) + + RandomValueFromVRFProof(opts *bind.CallOpts, proof []byte) (*big.Int, error) + + ScalarFromCurvePoints(opts *bind.CallOpts, hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) + + SquareRoot(opts *bind.CallOpts, x *big.Int) (*big.Int, error) + + VerifyLinearCombinationWithGenerator(opts *bind.CallOpts, c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) + + VerifyVRFProof(opts *bind.CallOpts, pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error + + YSquared(opts *bind.CallOpts, x *big.Int) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/solidity_vrf_wrapper/solidity_vrf_wrapper.go b/core/gethwrappers/generated/solidity_vrf_wrapper/solidity_vrf_wrapper.go new file mode 100644 index 00000000..82199d45 --- /dev/null +++ b/core/gethwrappers/generated/solidity_vrf_wrapper/solidity_vrf_wrapper.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"PROOF_LENGTH\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052348015600f57600080fd5b5060588061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063e911439c14602d575b600080fd5b60336045565b60408051918252519081900360200190f35b6101a08156fea164736f6c6343000606000a", +} + +var VRFABI = VRFMetaData.ABI + +var VRFBin = VRFMetaData.Bin + +func DeployVRF(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRF, error) { + parsed, err := VRFMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRF{address: address, abi: *parsed, VRFCaller: VRFCaller{contract: contract}, VRFTransactor: VRFTransactor{contract: contract}, VRFFilterer: VRFFilterer{contract: contract}}, nil +} + +type VRF struct { + address common.Address + abi abi.ABI + VRFCaller + VRFTransactor + VRFFilterer +} + +type VRFCaller struct { + contract *bind.BoundContract +} + +type VRFTransactor struct { + contract *bind.BoundContract +} + +type VRFFilterer struct { + contract *bind.BoundContract +} + +type VRFSession struct { + Contract *VRF + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCallerSession struct { + Contract *VRFCaller + CallOpts bind.CallOpts +} + +type VRFTransactorSession struct { + Contract *VRFTransactor + TransactOpts bind.TransactOpts +} + +type VRFRaw struct { + Contract *VRF +} + +type VRFCallerRaw struct { + Contract *VRFCaller +} + +type VRFTransactorRaw struct { + Contract *VRFTransactor +} + +func NewVRF(address common.Address, backend bind.ContractBackend) (*VRF, error) { + abi, err := abi.JSON(strings.NewReader(VRFABI)) + if err != nil { + return nil, err + } + contract, err := bindVRF(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRF{address: address, abi: abi, VRFCaller: VRFCaller{contract: contract}, VRFTransactor: VRFTransactor{contract: contract}, VRFFilterer: VRFFilterer{contract: contract}}, nil +} + +func NewVRFCaller(address common.Address, caller bind.ContractCaller) (*VRFCaller, error) { + contract, err := bindVRF(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCaller{contract: contract}, nil +} + +func NewVRFTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFTransactor, error) { + contract, err := bindVRF(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFTransactor{contract: contract}, nil +} + +func NewVRFFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFFilterer, error) { + contract, err := bindVRF(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFFilterer{contract: contract}, nil +} + +func bindVRF(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRF *VRFRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRF.Contract.VRFCaller.contract.Call(opts, result, method, params...) +} + +func (_VRF *VRFRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRF.Contract.VRFTransactor.contract.Transfer(opts) +} + +func (_VRF *VRFRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRF.Contract.VRFTransactor.contract.Transact(opts, method, params...) +} + +func (_VRF *VRFCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRF.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRF *VRFTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRF.Contract.contract.Transfer(opts) +} + +func (_VRF *VRFTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRF.Contract.contract.Transact(opts, method, params...) +} + +func (_VRF *VRFCaller) PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRF.contract.Call(opts, &out, "PROOF_LENGTH") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRF *VRFSession) PROOFLENGTH() (*big.Int, error) { + return _VRF.Contract.PROOFLENGTH(&_VRF.CallOpts) +} + +func (_VRF *VRFCallerSession) PROOFLENGTH() (*big.Int, error) { + return _VRF.Contract.PROOFLENGTH(&_VRF.CallOpts) +} + +func (_VRF *VRF) Address() common.Address { + return _VRF.address +} + +type VRFInterface interface { + PROOFLENGTH(opts *bind.CallOpts) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/streams_lookup_compatible_interface/streams_lookup_compatible_interface.go b/core/gethwrappers/generated/streams_lookup_compatible_interface/streams_lookup_compatible_interface.go new file mode 100644 index 00000000..41155618 --- /dev/null +++ b/core/gethwrappers/generated/streams_lookup_compatible_interface/streams_lookup_compatible_interface.go @@ -0,0 +1,198 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package streams_lookup_compatible_interface + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var StreamsLookupCompatibleInterfaceMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string[]\",\"name\":\"feeds\",\"type\":\"string[]\"},{\"internalType\":\"string\",\"name\":\"timeParamKey\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"time\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"StreamsLookup\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"upkeepNeeded\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +var StreamsLookupCompatibleInterfaceABI = StreamsLookupCompatibleInterfaceMetaData.ABI + +type StreamsLookupCompatibleInterface struct { + address common.Address + abi abi.ABI + StreamsLookupCompatibleInterfaceCaller + StreamsLookupCompatibleInterfaceTransactor + StreamsLookupCompatibleInterfaceFilterer +} + +type StreamsLookupCompatibleInterfaceCaller struct { + contract *bind.BoundContract +} + +type StreamsLookupCompatibleInterfaceTransactor struct { + contract *bind.BoundContract +} + +type StreamsLookupCompatibleInterfaceFilterer struct { + contract *bind.BoundContract +} + +type StreamsLookupCompatibleInterfaceSession struct { + Contract *StreamsLookupCompatibleInterface + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type StreamsLookupCompatibleInterfaceCallerSession struct { + Contract *StreamsLookupCompatibleInterfaceCaller + CallOpts bind.CallOpts +} + +type StreamsLookupCompatibleInterfaceTransactorSession struct { + Contract *StreamsLookupCompatibleInterfaceTransactor + TransactOpts bind.TransactOpts +} + +type StreamsLookupCompatibleInterfaceRaw struct { + Contract *StreamsLookupCompatibleInterface +} + +type StreamsLookupCompatibleInterfaceCallerRaw struct { + Contract *StreamsLookupCompatibleInterfaceCaller +} + +type StreamsLookupCompatibleInterfaceTransactorRaw struct { + Contract *StreamsLookupCompatibleInterfaceTransactor +} + +func NewStreamsLookupCompatibleInterface(address common.Address, backend bind.ContractBackend) (*StreamsLookupCompatibleInterface, error) { + abi, err := abi.JSON(strings.NewReader(StreamsLookupCompatibleInterfaceABI)) + if err != nil { + return nil, err + } + contract, err := bindStreamsLookupCompatibleInterface(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &StreamsLookupCompatibleInterface{address: address, abi: abi, StreamsLookupCompatibleInterfaceCaller: StreamsLookupCompatibleInterfaceCaller{contract: contract}, StreamsLookupCompatibleInterfaceTransactor: StreamsLookupCompatibleInterfaceTransactor{contract: contract}, StreamsLookupCompatibleInterfaceFilterer: StreamsLookupCompatibleInterfaceFilterer{contract: contract}}, nil +} + +func NewStreamsLookupCompatibleInterfaceCaller(address common.Address, caller bind.ContractCaller) (*StreamsLookupCompatibleInterfaceCaller, error) { + contract, err := bindStreamsLookupCompatibleInterface(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StreamsLookupCompatibleInterfaceCaller{contract: contract}, nil +} + +func NewStreamsLookupCompatibleInterfaceTransactor(address common.Address, transactor bind.ContractTransactor) (*StreamsLookupCompatibleInterfaceTransactor, error) { + contract, err := bindStreamsLookupCompatibleInterface(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StreamsLookupCompatibleInterfaceTransactor{contract: contract}, nil +} + +func NewStreamsLookupCompatibleInterfaceFilterer(address common.Address, filterer bind.ContractFilterer) (*StreamsLookupCompatibleInterfaceFilterer, error) { + contract, err := bindStreamsLookupCompatibleInterface(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StreamsLookupCompatibleInterfaceFilterer{contract: contract}, nil +} + +func bindStreamsLookupCompatibleInterface(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StreamsLookupCompatibleInterfaceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamsLookupCompatibleInterface.Contract.StreamsLookupCompatibleInterfaceCaller.contract.Call(opts, result, method, params...) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamsLookupCompatibleInterface.Contract.StreamsLookupCompatibleInterfaceTransactor.contract.Transfer(opts) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamsLookupCompatibleInterface.Contract.StreamsLookupCompatibleInterfaceTransactor.contract.Transact(opts, method, params...) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamsLookupCompatibleInterface.Contract.contract.Call(opts, result, method, params...) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamsLookupCompatibleInterface.Contract.contract.Transfer(opts) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamsLookupCompatibleInterface.Contract.contract.Transact(opts, method, params...) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceCaller) CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (CheckCallback, + + error) { + var out []interface{} + err := _StreamsLookupCompatibleInterface.contract.Call(opts, &out, "checkCallback", values, extraData) + + outstruct := new(CheckCallback) + if err != nil { + return *outstruct, err + } + + outstruct.UpkeepNeeded = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.PerformData = *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return *outstruct, err + +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceSession) CheckCallback(values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _StreamsLookupCompatibleInterface.Contract.CheckCallback(&_StreamsLookupCompatibleInterface.CallOpts, values, extraData) +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterfaceCallerSession) CheckCallback(values [][]byte, extraData []byte) (CheckCallback, + + error) { + return _StreamsLookupCompatibleInterface.Contract.CheckCallback(&_StreamsLookupCompatibleInterface.CallOpts, values, extraData) +} + +type CheckCallback struct { + UpkeepNeeded bool + PerformData []byte +} + +func (_StreamsLookupCompatibleInterface *StreamsLookupCompatibleInterface) Address() common.Address { + return _StreamsLookupCompatibleInterface.address +} + +type StreamsLookupCompatibleInterfaceInterface interface { + CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (CheckCallback, + + error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/streams_lookup_upkeep_wrapper/streams_lookup_upkeep_wrapper.go b/core/gethwrappers/generated/streams_lookup_upkeep_wrapper/streams_lookup_upkeep_wrapper.go new file mode 100644 index 00000000..22ea4d8a --- /dev/null +++ b/core/gethwrappers/generated/streams_lookup_upkeep_wrapper/streams_lookup_upkeep_wrapper.go @@ -0,0 +1,811 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package streams_lookup_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var StreamsLookupUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"_useArbBlock\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"_staging\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"_verify\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string[]\",\"name\":\"feeds\",\"type\":\"string[]\"},{\"internalType\":\"string\",\"name\":\"timeParamKey\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"time\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"StreamsLookup\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"v0\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"verifiedV0\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"ed\",\"type\":\"bytes\"}],\"name\":\"MercuryPerformEvent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"callbackReturnBool\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feedParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"feeds\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"interval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"previousPerformBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"value\",\"type\":\"bool\"}],\"name\":\"setCallbackReturnBool\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"_feeds\",\"type\":\"string[]\"}],\"name\":\"setFeeds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_timeParamKey\",\"type\":\"string\"}],\"name\":\"setParamKeys\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"value\",\"type\":\"bool\"}],\"name\":\"setShouldRevertCallback\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"shouldRevertCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"staging\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useArbBlock\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162001a6a38038062001a6a83398101604081905262000034916200020f565b60008581556001859055600281905560038190556004558215156080526040805180820190915260078152666665656449447360c81b60208201526006906200007e908262000312565b50604080518082019091526009815268074696d657374616d760bc1b6020820152600790620000ae908262000312565b50604051806020016040528060405180608001604052806042815260200162001a28604291399052620000e690600590600162000122565b506008805463ff000000199215156101000261ff00199415159490941661ffff19909116179290921716630100000017905550620003de915050565b8280548282559060005260206000209081019282156200016d579160200282015b828111156200016d57825182906200015c908262000312565b509160200191906001019062000143565b506200017b9291506200017f565b5090565b808211156200017b576000620001968282620001a0565b506001016200017f565b508054620001ae9062000283565b6000825580601f10620001bf575050565b601f016020900490600052602060002090810190620001df9190620001e2565b50565b5b808211156200017b5760008155600101620001e3565b805180151581146200020a57600080fd5b919050565b600080600080600060a086880312156200022857600080fd5b85519450602086015193506200024160408701620001f9565b92506200025160608701620001f9565b91506200026160808701620001f9565b90509295509295909350565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200029857607f821691505b602082108103620002b957634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200030d57600081815260208120601f850160051c81016020861015620002e85750805b601f850160051c820191505b818110156200030957828155600101620002f4565b5050505b505050565b81516001600160401b038111156200032e576200032e6200026d565b62000346816200033f845462000283565b84620002bf565b602080601f8311600181146200037e5760008415620003655750858301515b600019600386901b1c1916600185901b17855562000309565b600085815260208120601f198616915b82811015620003af578886015182559484019460019091019084016200038e565b5085821015620003ce5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6080516116196200040f60003960008181610307015281816103900152818161090c0152610a7b01526116196000f3fe608060405234801561001057600080fd5b50600436106101825760003560e01c80636e04ff0d116100d8578063947a36fb1161008c578063d826f88f11610066578063d826f88f1461035e578063d832d92f14610372578063fc735e991461037a57600080fd5b8063947a36fb14610345578063afb28d1f1461034e578063c98f10b01461035657600080fd5b806386b728e2116100bd57806386b728e21461030257806386e330af14610329578063917d895f1461033c57600080fd5b80636e04ff0d146102dc5780638340507c146102ef57600080fd5b80634a5479f31161013a5780635b48391a116101145780635b48391a1461028357806361bc221a146102ca5780636250a13a146102d357600080fd5b80634a5479f3146101fc5780634b56a42e1461021c5780634bdb38621461023d57600080fd5b80631d1970b71161016b5780631d1970b7146101c35780632cb15864146101d05780634585e33b146101e757600080fd5b806302be021f14610187578063102d538b146101af575b600080fd5b60085461019a9062010000900460ff1681565b60405190151581526020015b60405180910390f35b60085461019a906301000000900460ff1681565b60085461019a9060ff1681565b6101d960035481565b6040519081526020016101a6565b6101fa6101f5366004610c0f565b61038c565b005b61020f61020a366004610c81565b6106b9565b6040516101a69190610d08565b61022f61022a366004610e60565b610765565b6040516101a6929190610f34565b6101fa61024b366004610f57565b6008805491151562010000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffff909216919091179055565b6101fa610291366004610f57565b600880549115156301000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffff909216919091179055565b6101d960045481565b6101d960005481565b61022f6102ea366004610c0f565b610840565b6101fa6102fd366004610f79565b610a16565b61019a7f000000000000000000000000000000000000000000000000000000000000000081565b6101fa610337366004610fc6565b610a34565b6101d960025481565b6101d960015481565b61020f610a4b565b61020f610a58565b6101fa600060028190556003819055600455565b61019a610a65565b60085461019a90610100900460ff1681565b60007f00000000000000000000000000000000000000000000000000000000000000001561042b57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610400573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104249190611077565b905061042e565b50435b60035460000361043e5760038190555b60008061044d84860186610e60565b600285905560045491935091506104659060016110bf565b600455604080516020808201835260008083528351918201909352918252600854909190610100900460ff16156106435760085460ff1615610574577360448b880c9f3b501af3f343da9284148bd7d77c73ffffffffffffffffffffffffffffffffffffffff16638e760afe856000815181106104e4576104e46110d8565b60200260200101516040518263ffffffff1660e01b81526004016105089190610d08565b6000604051808303816000875af1158015610527573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261056d9190810190611107565b9150610643565b7309dff56a4ff44e0f4436260a04f5cfa65636a48173ffffffffffffffffffffffffffffffffffffffff16638e760afe856000815181106105b7576105b76110d8565b60200260200101516040518263ffffffff1660e01b81526004016105db9190610d08565b6000604051808303816000875af11580156105fa573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526106409190810190611107565b91505b843373ffffffffffffffffffffffffffffffffffffffff167ff0f72c0b235fc8687d6a67c02ca543473a3cef8a18b48490f10e475a8dda13908660008151811061068f5761068f6110d8565b602002602001015185876040516106a89392919061117e565b60405180910390a350505050505050565b600581815481106106c957600080fd5b9060005260206000200160009150905080546106e4906111c1565b80601f0160208091040260200160405190810160405280929190818152602001828054610710906111c1565b801561075d5780601f106107325761010080835404028352916020019161075d565b820191906000526020600020905b81548152906001019060200180831161074057829003601f168201915b505050505081565b60085460009060609062010000900460ff16156107e3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f73686f756c6452657665727443616c6c6261636b20697320747275650000000060448201526064015b60405180910390fd5b600084846040516020016107f8929190611214565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00181529190526008546301000000900460ff1693509150505b9250929050565b6000606061084c610a65565b610898576000848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959750919550610839945050505050565b6040517f666565644964486578000000000000000000000000000000000000000000000060208201526000906029016040516020818303038152906040528051906020012060066040516020016108ef919061129f565b60405160208183030381529060405280519060200120036109ae577f0000000000000000000000000000000000000000000000000000000000000000156109a757606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561097c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109a09190611077565b90506109b1565b50436109b1565b50425b604080516c6400000000000000000000000060208201528151601481830301815260348201928390527ff055e4a2000000000000000000000000000000000000000000000000000000009092526107da916006916005916007918691906038016113ce565b6006610a2283826114df565b506007610a2f82826114df565b505050565b8051610a47906005906020840190610b4a565b5050565b600680546106e4906111c1565b600780546106e4906111c1565b6000600354600003610a775750600190565b60007f000000000000000000000000000000000000000000000000000000000000000015610b1657606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610aeb573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610b0f9190611077565b9050610b19565b50435b600054600354610b2990836115f9565b108015610b445750600154600254610b4190836115f9565b10155b91505090565b828054828255906000526020600020908101928215610b90579160200282015b82811115610b905782518290610b8090826114df565b5091602001919060010190610b6a565b50610b9c929150610ba0565b5090565b80821115610b9c576000610bb48282610bbd565b50600101610ba0565b508054610bc9906111c1565b6000825580601f10610bd9575050565b601f016020900490600052602060002090810190610bf79190610bfa565b50565b5b80821115610b9c5760008155600101610bfb565b60008060208385031215610c2257600080fd5b823567ffffffffffffffff80821115610c3a57600080fd5b818501915085601f830112610c4e57600080fd5b813581811115610c5d57600080fd5b866020828501011115610c6f57600080fd5b60209290920196919550909350505050565b600060208284031215610c9357600080fd5b5035919050565b60005b83811015610cb5578181015183820152602001610c9d565b50506000910152565b60008151808452610cd6816020860160208601610c9a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000610d1b6020830184610cbe565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610d9857610d98610d22565b604052919050565b600067ffffffffffffffff821115610dba57610dba610d22565b5060051b60200190565b600067ffffffffffffffff821115610dde57610dde610d22565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f830112610e1b57600080fd5b8135610e2e610e2982610dc4565b610d51565b818152846020838601011115610e4357600080fd5b816020850160208301376000918101602001919091529392505050565b60008060408385031215610e7357600080fd5b823567ffffffffffffffff80821115610e8b57600080fd5b818501915085601f830112610e9f57600080fd5b81356020610eaf610e2983610da0565b82815260059290921b84018101918181019089841115610ece57600080fd5b8286015b84811015610f0657803586811115610eea5760008081fd5b610ef88c86838b0101610e0a565b845250918301918301610ed2565b5096505086013592505080821115610f1d57600080fd5b50610f2a85828601610e0a565b9150509250929050565b8215158152604060208201526000610f4f6040830184610cbe565b949350505050565b600060208284031215610f6957600080fd5b81358015158114610d1b57600080fd5b60008060408385031215610f8c57600080fd5b823567ffffffffffffffff80821115610fa457600080fd5b610fb086838701610e0a565b93506020850135915080821115610f1d57600080fd5b60006020808385031215610fd957600080fd5b823567ffffffffffffffff80821115610ff157600080fd5b818501915085601f83011261100557600080fd5b8135611013610e2982610da0565b81815260059190911b8301840190848101908883111561103257600080fd5b8585015b8381101561106a5780358581111561104e5760008081fd5b61105c8b89838a0101610e0a565b845250918601918601611036565b5098975050505050505050565b60006020828403121561108957600080fd5b5051919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156110d2576110d2611090565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60006020828403121561111957600080fd5b815167ffffffffffffffff81111561113057600080fd5b8201601f8101841361114157600080fd5b805161114f610e2982610dc4565b81815285602083850101111561116457600080fd5b611175826020830160208601610c9a565b95945050505050565b6060815260006111916060830186610cbe565b82810360208401526111a38186610cbe565b905082810360408401526111b78185610cbe565b9695505050505050565b600181811c908216806111d557607f821691505b60208210810361120e577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b6000604082016040835280855180835260608501915060608160051b8601019250602080880160005b83811015611289577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0888703018552611277868351610cbe565b9550938201939082019060010161123d565b5050858403818701525050506111758185610cbe565b60008083546112ad816111c1565b600182811680156112c557600181146112f857611327565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0084168752821515830287019450611327565b8760005260208060002060005b8581101561131e5781548a820152908401908201611305565b50505082870194505b50929695505050505050565b60008154611340816111c1565b80855260206001838116801561135d5760018114611395576113c3565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b89010195506113c3565b866000528260002060005b858110156113bb5781548a82018601529083019084016113a0565b890184019650505b505050505092915050565b60a0815260006113e160a0830188611333565b6020838203818501528188548084528284019150828160051b8501018a6000528360002060005b83811015611453577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08784030185526114418383611333565b94860194925060019182019101611408565b50508681036040880152611467818b611333565b94505050505084606084015282810360808401526114858185610cbe565b98975050505050505050565b601f821115610a2f57600081815260208120601f850160051c810160208610156114b85750805b601f850160051c820191505b818110156114d7578281556001016114c4565b505050505050565b815167ffffffffffffffff8111156114f9576114f9610d22565b61150d8161150784546111c1565b84611491565b602080601f831160018114611560576000841561152a5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556114d7565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156115ad5788860151825594840194600190910190840161158e565b50858210156115e957878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b818103818111156110d2576110d261109056fea164736f6c6343000810000a307830303032386339313564366166306664363662626132643066633934303532323662636138643638303633333331323161376439383332313033643135363363", +} + +var StreamsLookupUpkeepABI = StreamsLookupUpkeepMetaData.ABI + +var StreamsLookupUpkeepBin = StreamsLookupUpkeepMetaData.Bin + +func DeployStreamsLookupUpkeep(auth *bind.TransactOpts, backend bind.ContractBackend, _testRange *big.Int, _interval *big.Int, _useArbBlock bool, _staging bool, _verify bool) (common.Address, *types.Transaction, *StreamsLookupUpkeep, error) { + parsed, err := StreamsLookupUpkeepMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StreamsLookupUpkeepBin), backend, _testRange, _interval, _useArbBlock, _staging, _verify) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &StreamsLookupUpkeep{address: address, abi: *parsed, StreamsLookupUpkeepCaller: StreamsLookupUpkeepCaller{contract: contract}, StreamsLookupUpkeepTransactor: StreamsLookupUpkeepTransactor{contract: contract}, StreamsLookupUpkeepFilterer: StreamsLookupUpkeepFilterer{contract: contract}}, nil +} + +type StreamsLookupUpkeep struct { + address common.Address + abi abi.ABI + StreamsLookupUpkeepCaller + StreamsLookupUpkeepTransactor + StreamsLookupUpkeepFilterer +} + +type StreamsLookupUpkeepCaller struct { + contract *bind.BoundContract +} + +type StreamsLookupUpkeepTransactor struct { + contract *bind.BoundContract +} + +type StreamsLookupUpkeepFilterer struct { + contract *bind.BoundContract +} + +type StreamsLookupUpkeepSession struct { + Contract *StreamsLookupUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type StreamsLookupUpkeepCallerSession struct { + Contract *StreamsLookupUpkeepCaller + CallOpts bind.CallOpts +} + +type StreamsLookupUpkeepTransactorSession struct { + Contract *StreamsLookupUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type StreamsLookupUpkeepRaw struct { + Contract *StreamsLookupUpkeep +} + +type StreamsLookupUpkeepCallerRaw struct { + Contract *StreamsLookupUpkeepCaller +} + +type StreamsLookupUpkeepTransactorRaw struct { + Contract *StreamsLookupUpkeepTransactor +} + +func NewStreamsLookupUpkeep(address common.Address, backend bind.ContractBackend) (*StreamsLookupUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(StreamsLookupUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindStreamsLookupUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &StreamsLookupUpkeep{address: address, abi: abi, StreamsLookupUpkeepCaller: StreamsLookupUpkeepCaller{contract: contract}, StreamsLookupUpkeepTransactor: StreamsLookupUpkeepTransactor{contract: contract}, StreamsLookupUpkeepFilterer: StreamsLookupUpkeepFilterer{contract: contract}}, nil +} + +func NewStreamsLookupUpkeepCaller(address common.Address, caller bind.ContractCaller) (*StreamsLookupUpkeepCaller, error) { + contract, err := bindStreamsLookupUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StreamsLookupUpkeepCaller{contract: contract}, nil +} + +func NewStreamsLookupUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*StreamsLookupUpkeepTransactor, error) { + contract, err := bindStreamsLookupUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StreamsLookupUpkeepTransactor{contract: contract}, nil +} + +func NewStreamsLookupUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*StreamsLookupUpkeepFilterer, error) { + contract, err := bindStreamsLookupUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StreamsLookupUpkeepFilterer{contract: contract}, nil +} + +func bindStreamsLookupUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StreamsLookupUpkeepMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamsLookupUpkeep.Contract.StreamsLookupUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.StreamsLookupUpkeepTransactor.contract.Transfer(opts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.StreamsLookupUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StreamsLookupUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.contract.Transfer(opts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) CallbackReturnBool(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "callbackReturnBool") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) CallbackReturnBool() (bool, error) { + return _StreamsLookupUpkeep.Contract.CallbackReturnBool(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) CallbackReturnBool() (bool, error) { + return _StreamsLookupUpkeep.Contract.CallbackReturnBool(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "checkCallback", values, extraData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _StreamsLookupUpkeep.Contract.CheckCallback(&_StreamsLookupUpkeep.CallOpts, values, extraData) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _StreamsLookupUpkeep.Contract.CheckCallback(&_StreamsLookupUpkeep.CallOpts, values, extraData) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "checkUpkeep", data) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _StreamsLookupUpkeep.Contract.CheckUpkeep(&_StreamsLookupUpkeep.CallOpts, data) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _StreamsLookupUpkeep.Contract.CheckUpkeep(&_StreamsLookupUpkeep.CallOpts, data) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Counter() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.Counter(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Counter() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.Counter(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Eligible(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "eligible") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Eligible() (bool, error) { + return _StreamsLookupUpkeep.Contract.Eligible(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Eligible() (bool, error) { + return _StreamsLookupUpkeep.Contract.Eligible(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) FeedParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "feedParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) FeedParamKey() (string, error) { + return _StreamsLookupUpkeep.Contract.FeedParamKey(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) FeedParamKey() (string, error) { + return _StreamsLookupUpkeep.Contract.FeedParamKey(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Feeds(opts *bind.CallOpts, arg0 *big.Int) (string, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "feeds", arg0) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Feeds(arg0 *big.Int) (string, error) { + return _StreamsLookupUpkeep.Contract.Feeds(&_StreamsLookupUpkeep.CallOpts, arg0) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Feeds(arg0 *big.Int) (string, error) { + return _StreamsLookupUpkeep.Contract.Feeds(&_StreamsLookupUpkeep.CallOpts, arg0) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) InitialBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "initialBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) InitialBlock() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.InitialBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) InitialBlock() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.InitialBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Interval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "interval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Interval() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.Interval(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Interval() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.Interval(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "previousPerformBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) PreviousPerformBlock() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.PreviousPerformBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) PreviousPerformBlock() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.PreviousPerformBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) ShouldRevertCallback(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "shouldRevertCallback") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) ShouldRevertCallback() (bool, error) { + return _StreamsLookupUpkeep.Contract.ShouldRevertCallback(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) ShouldRevertCallback() (bool, error) { + return _StreamsLookupUpkeep.Contract.ShouldRevertCallback(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Staging(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "staging") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Staging() (bool, error) { + return _StreamsLookupUpkeep.Contract.Staging(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Staging() (bool, error) { + return _StreamsLookupUpkeep.Contract.Staging(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "testRange") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) TestRange() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.TestRange(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) TestRange() (*big.Int, error) { + return _StreamsLookupUpkeep.Contract.TestRange(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) TimeParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "timeParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) TimeParamKey() (string, error) { + return _StreamsLookupUpkeep.Contract.TimeParamKey(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) TimeParamKey() (string, error) { + return _StreamsLookupUpkeep.Contract.TimeParamKey(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) UseArbBlock(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "useArbBlock") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) UseArbBlock() (bool, error) { + return _StreamsLookupUpkeep.Contract.UseArbBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) UseArbBlock() (bool, error) { + return _StreamsLookupUpkeep.Contract.UseArbBlock(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCaller) Verify(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _StreamsLookupUpkeep.contract.Call(opts, &out, "verify") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Verify() (bool, error) { + return _StreamsLookupUpkeep.Contract.Verify(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepCallerSession) Verify() (bool, error) { + return _StreamsLookupUpkeep.Contract.Verify(&_StreamsLookupUpkeep.CallOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.PerformUpkeep(&_StreamsLookupUpkeep.TransactOpts, performData) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.PerformUpkeep(&_StreamsLookupUpkeep.TransactOpts, performData) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "reset") +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) Reset() (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.Reset(&_StreamsLookupUpkeep.TransactOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) Reset() (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.Reset(&_StreamsLookupUpkeep.TransactOpts) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) SetCallbackReturnBool(opts *bind.TransactOpts, value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "setCallbackReturnBool", value) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) SetCallbackReturnBool(value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetCallbackReturnBool(&_StreamsLookupUpkeep.TransactOpts, value) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) SetCallbackReturnBool(value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetCallbackReturnBool(&_StreamsLookupUpkeep.TransactOpts, value) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "setFeeds", _feeds) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetFeeds(&_StreamsLookupUpkeep.TransactOpts, _feeds) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetFeeds(&_StreamsLookupUpkeep.TransactOpts, _feeds) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "setParamKeys", _feedParamKey, _timeParamKey) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetParamKeys(&_StreamsLookupUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetParamKeys(&_StreamsLookupUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactor) SetShouldRevertCallback(opts *bind.TransactOpts, value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.contract.Transact(opts, "setShouldRevertCallback", value) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepSession) SetShouldRevertCallback(value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetShouldRevertCallback(&_StreamsLookupUpkeep.TransactOpts, value) +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepTransactorSession) SetShouldRevertCallback(value bool) (*types.Transaction, error) { + return _StreamsLookupUpkeep.Contract.SetShouldRevertCallback(&_StreamsLookupUpkeep.TransactOpts, value) +} + +type StreamsLookupUpkeepMercuryPerformEventIterator struct { + Event *StreamsLookupUpkeepMercuryPerformEvent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StreamsLookupUpkeepMercuryPerformEventIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StreamsLookupUpkeepMercuryPerformEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StreamsLookupUpkeepMercuryPerformEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StreamsLookupUpkeepMercuryPerformEventIterator) Error() error { + return it.fail +} + +func (it *StreamsLookupUpkeepMercuryPerformEventIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StreamsLookupUpkeepMercuryPerformEvent struct { + Sender common.Address + BlockNumber *big.Int + V0 []byte + VerifiedV0 []byte + Ed []byte + Raw types.Log +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepFilterer) FilterMercuryPerformEvent(opts *bind.FilterOpts, sender []common.Address, blockNumber []*big.Int) (*StreamsLookupUpkeepMercuryPerformEventIterator, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var blockNumberRule []interface{} + for _, blockNumberItem := range blockNumber { + blockNumberRule = append(blockNumberRule, blockNumberItem) + } + + logs, sub, err := _StreamsLookupUpkeep.contract.FilterLogs(opts, "MercuryPerformEvent", senderRule, blockNumberRule) + if err != nil { + return nil, err + } + return &StreamsLookupUpkeepMercuryPerformEventIterator{contract: _StreamsLookupUpkeep.contract, event: "MercuryPerformEvent", logs: logs, sub: sub}, nil +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepFilterer) WatchMercuryPerformEvent(opts *bind.WatchOpts, sink chan<- *StreamsLookupUpkeepMercuryPerformEvent, sender []common.Address, blockNumber []*big.Int) (event.Subscription, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var blockNumberRule []interface{} + for _, blockNumberItem := range blockNumber { + blockNumberRule = append(blockNumberRule, blockNumberItem) + } + + logs, sub, err := _StreamsLookupUpkeep.contract.WatchLogs(opts, "MercuryPerformEvent", senderRule, blockNumberRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StreamsLookupUpkeepMercuryPerformEvent) + if err := _StreamsLookupUpkeep.contract.UnpackLog(event, "MercuryPerformEvent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeepFilterer) ParseMercuryPerformEvent(log types.Log) (*StreamsLookupUpkeepMercuryPerformEvent, error) { + event := new(StreamsLookupUpkeepMercuryPerformEvent) + if err := _StreamsLookupUpkeep.contract.UnpackLog(event, "MercuryPerformEvent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _StreamsLookupUpkeep.abi.Events["MercuryPerformEvent"].ID: + return _StreamsLookupUpkeep.ParseMercuryPerformEvent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (StreamsLookupUpkeepMercuryPerformEvent) Topic() common.Hash { + return common.HexToHash("0xf0f72c0b235fc8687d6a67c02ca543473a3cef8a18b48490f10e475a8dda1390") +} + +func (_StreamsLookupUpkeep *StreamsLookupUpkeep) Address() common.Address { + return _StreamsLookupUpkeep.address +} + +type StreamsLookupUpkeepInterface interface { + CallbackReturnBool(opts *bind.CallOpts) (bool, error) + + CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) + + CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + Eligible(opts *bind.CallOpts) (bool, error) + + FeedParamKey(opts *bind.CallOpts) (string, error) + + Feeds(opts *bind.CallOpts, arg0 *big.Int) (string, error) + + InitialBlock(opts *bind.CallOpts) (*big.Int, error) + + Interval(opts *bind.CallOpts) (*big.Int, error) + + PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) + + ShouldRevertCallback(opts *bind.CallOpts) (bool, error) + + Staging(opts *bind.CallOpts) (bool, error) + + TestRange(opts *bind.CallOpts) (*big.Int, error) + + TimeParamKey(opts *bind.CallOpts) (string, error) + + UseArbBlock(opts *bind.CallOpts) (bool, error) + + Verify(opts *bind.CallOpts) (bool, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCallbackReturnBool(opts *bind.TransactOpts, value bool) (*types.Transaction, error) + + SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) + + SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) + + SetShouldRevertCallback(opts *bind.TransactOpts, value bool) (*types.Transaction, error) + + FilterMercuryPerformEvent(opts *bind.FilterOpts, sender []common.Address, blockNumber []*big.Int) (*StreamsLookupUpkeepMercuryPerformEventIterator, error) + + WatchMercuryPerformEvent(opts *bind.WatchOpts, sink chan<- *StreamsLookupUpkeepMercuryPerformEvent, sender []common.Address, blockNumber []*big.Int) (event.Subscription, error) + + ParseMercuryPerformEvent(log types.Log) (*StreamsLookupUpkeepMercuryPerformEvent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/test_api_consumer_wrapper/test_api_consumer_wrapper.go b/core/gethwrappers/generated/test_api_consumer_wrapper/test_api_consumer_wrapper.go new file mode 100644 index 00000000..ba6e01e3 --- /dev/null +++ b/core/gethwrappers/generated/test_api_consumer_wrapper/test_api_consumer_wrapper.go @@ -0,0 +1,1100 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package test_api_consumer_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var TestAPIConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginCancelled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"PluginRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"roundID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"name\":\"PerfMetricsEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"bytes4\",\"name\":\"_callbackFunctionId\",\"type\":\"bytes4\"},{\"internalType\":\"uint256\",\"name\":\"_expiration\",\"type\":\"uint256\"}],\"name\":\"cancelRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_jobId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_payment\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"_url\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_path\",\"type\":\"string\"},{\"internalType\":\"int256\",\"name\":\"_times\",\"type\":\"int256\"}],\"name\":\"createRequestTo\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"currentRoundID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"data\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_data\",\"type\":\"uint256\"}],\"name\":\"fulfill\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPluginToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isOwner\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"selector\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052600160045560006007553480156200001b57600080fd5b50604051620017a1380380620017a1833981810160405260208110156200004157600080fd5b5051600680546001600160a01b0319163317908190556040516001600160a01b0391909116906000907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0908290a36001600160a01b038116620000b757620000b16001600160e01b03620000d216565b620000cb565b620000cb816001600160e01b036200016316565b5062000185565b6200016173c89bd4e1632d3a43cb03aaad5262cbe4038bc5716001600160a01b03166338cc48316040518163ffffffff1660e01b815260040160206040518083038186803b1580156200012457600080fd5b505afa15801562000139573d6000803e3d6000fd5b505050506040513d60208110156200015057600080fd5b50516001600160e01b036200016316565b565b600280546001600160a01b0319166001600160a01b0392909216919091179055565b61160c80620001956000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80638dc654a211610081578063ea3d508a1161005b578063ea3d508a146102ca578063ec65d0f814610307578063f2fde38b14610358576100c9565b80638dc654a21461029e5780638f32d59b146102a6578063a312c4f2146102c2576100c9565b80634357855e116100b25780634357855e1461026957806373d4a13a1461028e5780638da5cb5b14610296576100c9565b8063165d35e1146100ce57806316ef7f1a146100ff575b600080fd5b6100d661038b565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b610257600480360360c081101561011557600080fd5b73ffffffffffffffffffffffffffffffffffffffff823516916020810135916040820135919081019060808101606082013564010000000081111561015957600080fd5b82018360208201111561016b57600080fd5b8035906020019184600183028401116401000000008311171561018d57600080fd5b91908080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525092959493602081019350359150506401000000008111156101e057600080fd5b8201836020820111156101f257600080fd5b8035906020019184600183028401116401000000008311171561021457600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550509135925061039a915050565b60408051918252519081900360200190f35b61028c6004803603604081101561027f57600080fd5b508035906020013561055c565b005b6102576105ae565b6100d66105b4565b61028c6105d0565b6102ae610800565b604080519115158252519081900360200190f35b61025761081e565b6102d2610824565b604080517fffffffff000000000000000000000000000000000000000000000000000000009092168252519081900360200190f35b61028c6004803603608081101561031d57600080fd5b508035906020810135907fffffffff00000000000000000000000000000000000000000000000000000000604082013516906060013561082d565b61028c6004803603602081101561036e57600080fd5b503573ffffffffffffffffffffffffffffffffffffffff166108b2565b600061039561092e565b905090565b60006103a4610800565b61040f57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b600980547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000016634357855e179055610445611567565b61047087307f4357855e0000000000000000000000000000000000000000000000000000000061094a565b60408051808201909152600381527f676574000000000000000000000000000000000000000000000000000000000060208201529091506104b99082908763ffffffff61097516565b60408051808201909152600481527f706174680000000000000000000000000000000000000000000000000000000060208201526104ff9082908663ffffffff61097516565b60408051808201909152600581527f74696d657300000000000000000000000000000000000000000000000000000060208201526105459082908563ffffffff6109a416565b6105508882886109ce565b98975050505050505050565b6008819055600780546001019081905560408051918252602082018490524282820152517ffbaf68ee7b9032982942607eaea1859969ed8674797b5c2fc6fecaa7538519469181900360600190a15050565b60085481565b60065473ffffffffffffffffffffffffffffffffffffffff1690565b6105d8610800565b61064357604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b600061064d61092e565b604080517f70a08231000000000000000000000000000000000000000000000000000000008152306004820152905191925073ffffffffffffffffffffffffffffffffffffffff83169163a9059cbb91339184916370a08231916024808301926020929190829003018186803b1580156106c657600080fd5b505afa1580156106da573d6000803e3d6000fd5b505050506040513d60208110156106f057600080fd5b5051604080517fffffffff0000000000000000000000000000000000000000000000000000000060e086901b16815273ffffffffffffffffffffffffffffffffffffffff909316600484015260248301919091525160448083019260209291908290030181600087803b15801561076657600080fd5b505af115801561077a573d6000803e3d6000fd5b505050506040513d602081101561079057600080fd5b50516107fd57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f556e61626c6520746f207472616e736665720000000000000000000000000000604482015290519081900360640190fd5b50565b60065473ffffffffffffffffffffffffffffffffffffffff16331490565b60075481565b60095460e01b81565b610835610800565b6108a057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b6108ac84848484610c0b565b50505050565b6108ba610800565b61092557604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015290519081900360640190fd5b6107fd81610d46565b60025473ffffffffffffffffffffffffffffffffffffffff1690565b610952611567565b61095a611567565b61096c8186868663ffffffff610e4016565b95945050505050565b608083015161098a908363ffffffff610ea216565b608083015161099f908263ffffffff610ea216565b505050565b60808301516109b9908363ffffffff610ea216565b608083015161099f908263ffffffff610ebf16565b6004546040805130606090811b60208084019190915260348084018690528451808503909101815260549093018452825192810192909220908601939093526000838152600590915281812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8816179055905182917fb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af991a260025473ffffffffffffffffffffffffffffffffffffffff16634000aea08584610aa887610f35565b6040518463ffffffff1660e01b8152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610b2c578181015183820152602001610b14565b50505050905090810190601f168015610b595780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b158015610b7a57600080fd5b505af1158015610b8e573d6000803e3d6000fd5b505050506040513d6020811015610ba457600080fd5b5051610bfb576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260238152602001806115dd6023913960400191505060405180910390fd5b6004805460010190559392505050565b60008481526005602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000008116909155905173ffffffffffffffffffffffffffffffffffffffff9091169186917fe1fe3afa0f7f761ff0a8b89086790efd5140d2907ebd5b7ff6bfcb5e075fd4c59190a2604080517f6ee4d55300000000000000000000000000000000000000000000000000000000815260048101879052602481018690527fffffffff000000000000000000000000000000000000000000000000000000008516604482015260648101849052905173ffffffffffffffffffffffffffffffffffffffff831691636ee4d55391608480830192600092919082900301818387803b158015610d2757600080fd5b505af1158015610d3b573d6000803e3d6000fd5b505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff8116610db2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806115b76026913960400191505060405180910390fd5b60065460405173ffffffffffffffffffffffffffffffffffffffff8084169216907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a3600680547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b610e48611567565b610e58856080015161010061111e565b505091835273ffffffffffffffffffffffffffffffffffffffff1660208301527fffffffff0000000000000000000000000000000000000000000000000000000016604082015290565b610eaf826003835161115e565b61099f828263ffffffff6112a916565b7fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000811215610ef657610ef182826112ca565b610f31565b67ffffffffffffffff811315610f1057610ef18282611327565b60008112610f2457610ef18260008361115e565b610f31826001831961115e565b5050565b6060634042994660e01b60008084600001518560200151866040015187606001516001896080015160000151604051602401808973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018881526020018781526020018673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001857bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19167bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916815260200184815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015611061578181015183820152602001611049565b50505050905090810190601f16801561108e5780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909d169c909c17909b5250989950505050505050505050919050565b61112661159c565b602082061561113b5760208206602003820191505b506020808301829052604080518085526000815283019091019052815b92915050565b60178167ffffffffffffffff161161118f576111898360e0600585901b16831763ffffffff61136216565b5061099f565b60ff8167ffffffffffffffff16116111d9576111bc836018611fe0600586901b161763ffffffff61136216565b506111898367ffffffffffffffff8316600163ffffffff61137a16565b61ffff8167ffffffffffffffff161161122457611207836019611fe0600586901b161763ffffffff61136216565b506111898367ffffffffffffffff8316600263ffffffff61137a16565b63ffffffff8167ffffffffffffffff16116112715761125483601a611fe0600586901b161763ffffffff61136216565b506111898367ffffffffffffffff8316600463ffffffff61137a16565b61128c83601b611fe0600586901b161763ffffffff61136216565b506108ac8367ffffffffffffffff8316600863ffffffff61137a16565b6112b161159c565b6112c38384600001515184855161139b565b9392505050565b6112db8260c363ffffffff61136216565b50610f3182827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0360405160200180828152602001915050604051602081830303815290604052611483565b6113388260c263ffffffff61136216565b50610f31828260405160200180828152602001915050604051602081830303815290604052611483565b61136a61159c565b6112c38384600001515184611490565b61138261159c565b6113938485600001515185856114db565b949350505050565b6113a361159c565b82518211156113b157600080fd5b846020015182850111156113db576113db856113d38760200151878601611539565b600202611550565b6000808651805187602083010193508088870111156113fa5787860182525b505050602084015b6020841061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09093019260209182019101611402565b5181517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60208690036101000a019081169019919091161790525083949350505050565b610eaf826002835161115e565b61149861159c565b836020015183106114b4576114b4848560200151600202611550565b8351805160208583010184815350808514156114d1576001810182525b5093949350505050565b6114e361159c565b846020015184830111156115005761150085858401600202611550565b60006001836101000a03905085518386820101858319825116178152508051848701111561152e5783860181525b509495945050505050565b60008183111561154a575081611158565b50919050565b815161155c838361111e565b506108ac83826112a9565b6040805160a08101825260008082526020820181905291810182905260608101919091526080810161159761159c565b905290565b60405180604001604052806060815260200160008152509056fe4f776e61626c653a206e6577206f776e657220697320746865207a65726f2061646472657373756e61626c6520746f207472616e73666572416e6443616c6c20746f206f7261636c65a164736f6c6343000606000a", +} + +var TestAPIConsumerABI = TestAPIConsumerMetaData.ABI + +var TestAPIConsumerBin = TestAPIConsumerMetaData.Bin + +func DeployTestAPIConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address) (common.Address, *types.Transaction, *TestAPIConsumer, error) { + parsed, err := TestAPIConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(TestAPIConsumerBin), backend, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &TestAPIConsumer{address: address, abi: *parsed, TestAPIConsumerCaller: TestAPIConsumerCaller{contract: contract}, TestAPIConsumerTransactor: TestAPIConsumerTransactor{contract: contract}, TestAPIConsumerFilterer: TestAPIConsumerFilterer{contract: contract}}, nil +} + +type TestAPIConsumer struct { + address common.Address + abi abi.ABI + TestAPIConsumerCaller + TestAPIConsumerTransactor + TestAPIConsumerFilterer +} + +type TestAPIConsumerCaller struct { + contract *bind.BoundContract +} + +type TestAPIConsumerTransactor struct { + contract *bind.BoundContract +} + +type TestAPIConsumerFilterer struct { + contract *bind.BoundContract +} + +type TestAPIConsumerSession struct { + Contract *TestAPIConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type TestAPIConsumerCallerSession struct { + Contract *TestAPIConsumerCaller + CallOpts bind.CallOpts +} + +type TestAPIConsumerTransactorSession struct { + Contract *TestAPIConsumerTransactor + TransactOpts bind.TransactOpts +} + +type TestAPIConsumerRaw struct { + Contract *TestAPIConsumer +} + +type TestAPIConsumerCallerRaw struct { + Contract *TestAPIConsumerCaller +} + +type TestAPIConsumerTransactorRaw struct { + Contract *TestAPIConsumerTransactor +} + +func NewTestAPIConsumer(address common.Address, backend bind.ContractBackend) (*TestAPIConsumer, error) { + abi, err := abi.JSON(strings.NewReader(TestAPIConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindTestAPIConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &TestAPIConsumer{address: address, abi: abi, TestAPIConsumerCaller: TestAPIConsumerCaller{contract: contract}, TestAPIConsumerTransactor: TestAPIConsumerTransactor{contract: contract}, TestAPIConsumerFilterer: TestAPIConsumerFilterer{contract: contract}}, nil +} + +func NewTestAPIConsumerCaller(address common.Address, caller bind.ContractCaller) (*TestAPIConsumerCaller, error) { + contract, err := bindTestAPIConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &TestAPIConsumerCaller{contract: contract}, nil +} + +func NewTestAPIConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*TestAPIConsumerTransactor, error) { + contract, err := bindTestAPIConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &TestAPIConsumerTransactor{contract: contract}, nil +} + +func NewTestAPIConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*TestAPIConsumerFilterer, error) { + contract, err := bindTestAPIConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &TestAPIConsumerFilterer{contract: contract}, nil +} + +func bindTestAPIConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := TestAPIConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_TestAPIConsumer *TestAPIConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TestAPIConsumer.Contract.TestAPIConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_TestAPIConsumer *TestAPIConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.TestAPIConsumerTransactor.contract.Transfer(opts) +} + +func (_TestAPIConsumer *TestAPIConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.TestAPIConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TestAPIConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.contract.Transfer(opts) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) CurrentRoundID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "currentRoundID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) CurrentRoundID() (*big.Int, error) { + return _TestAPIConsumer.Contract.CurrentRoundID(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) CurrentRoundID() (*big.Int, error) { + return _TestAPIConsumer.Contract.CurrentRoundID(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) Data(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "data") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) Data() (*big.Int, error) { + return _TestAPIConsumer.Contract.Data(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) Data() (*big.Int, error) { + return _TestAPIConsumer.Contract.Data(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) GetPluginToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "getPluginToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) GetPluginToken() (common.Address, error) { + return _TestAPIConsumer.Contract.GetPluginToken(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) GetPluginToken() (common.Address, error) { + return _TestAPIConsumer.Contract.GetPluginToken(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) IsOwner(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "isOwner") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) IsOwner() (bool, error) { + return _TestAPIConsumer.Contract.IsOwner(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) IsOwner() (bool, error) { + return _TestAPIConsumer.Contract.IsOwner(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) Owner() (common.Address, error) { + return _TestAPIConsumer.Contract.Owner(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) Owner() (common.Address, error) { + return _TestAPIConsumer.Contract.Owner(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCaller) Selector(opts *bind.CallOpts) ([4]byte, error) { + var out []interface{} + err := _TestAPIConsumer.contract.Call(opts, &out, "selector") + + if err != nil { + return *new([4]byte), err + } + + out0 := *abi.ConvertType(out[0], new([4]byte)).(*[4]byte) + + return out0, err + +} + +func (_TestAPIConsumer *TestAPIConsumerSession) Selector() ([4]byte, error) { + return _TestAPIConsumer.Contract.Selector(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerCallerSession) Selector() ([4]byte, error) { + return _TestAPIConsumer.Contract.Selector(&_TestAPIConsumer.CallOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactor) CancelRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.contract.Transact(opts, "cancelRequest", _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_TestAPIConsumer *TestAPIConsumerSession) CancelRequest(_requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.CancelRequest(&_TestAPIConsumer.TransactOpts, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorSession) CancelRequest(_requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.CancelRequest(&_TestAPIConsumer.TransactOpts, _requestId, _payment, _callbackFunctionId, _expiration) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactor) CreateRequestTo(opts *bind.TransactOpts, _oracle common.Address, _jobId [32]byte, _payment *big.Int, _url string, _path string, _times *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.contract.Transact(opts, "createRequestTo", _oracle, _jobId, _payment, _url, _path, _times) +} + +func (_TestAPIConsumer *TestAPIConsumerSession) CreateRequestTo(_oracle common.Address, _jobId [32]byte, _payment *big.Int, _url string, _path string, _times *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.CreateRequestTo(&_TestAPIConsumer.TransactOpts, _oracle, _jobId, _payment, _url, _path, _times) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorSession) CreateRequestTo(_oracle common.Address, _jobId [32]byte, _payment *big.Int, _url string, _path string, _times *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.CreateRequestTo(&_TestAPIConsumer.TransactOpts, _oracle, _jobId, _payment, _url, _path, _times) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactor) Fulfill(opts *bind.TransactOpts, _requestId [32]byte, _data *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.contract.Transact(opts, "fulfill", _requestId, _data) +} + +func (_TestAPIConsumer *TestAPIConsumerSession) Fulfill(_requestId [32]byte, _data *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.Fulfill(&_TestAPIConsumer.TransactOpts, _requestId, _data) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorSession) Fulfill(_requestId [32]byte, _data *big.Int) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.Fulfill(&_TestAPIConsumer.TransactOpts, _requestId, _data) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _TestAPIConsumer.contract.Transact(opts, "transferOwnership", newOwner) +} + +func (_TestAPIConsumer *TestAPIConsumerSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.TransferOwnership(&_TestAPIConsumer.TransactOpts, newOwner) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _TestAPIConsumer.Contract.TransferOwnership(&_TestAPIConsumer.TransactOpts, newOwner) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactor) WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TestAPIConsumer.contract.Transact(opts, "withdrawLink") +} + +func (_TestAPIConsumer *TestAPIConsumerSession) WithdrawLink() (*types.Transaction, error) { + return _TestAPIConsumer.Contract.WithdrawLink(&_TestAPIConsumer.TransactOpts) +} + +func (_TestAPIConsumer *TestAPIConsumerTransactorSession) WithdrawLink() (*types.Transaction, error) { + return _TestAPIConsumer.Contract.WithdrawLink(&_TestAPIConsumer.TransactOpts) +} + +type TestAPIConsumerPluginCancelledIterator struct { + Event *TestAPIConsumerPluginCancelled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TestAPIConsumerPluginCancelledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginCancelled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TestAPIConsumerPluginCancelledIterator) Error() error { + return it.fail +} + +func (it *TestAPIConsumerPluginCancelledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TestAPIConsumerPluginCancelled struct { + Id [32]byte + Raw types.Log +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginCancelledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.FilterLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return &TestAPIConsumerPluginCancelledIterator{contract: _TestAPIConsumer.contract, event: "PluginCancelled", logs: logs, sub: sub}, nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.WatchLogs(opts, "PluginCancelled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TestAPIConsumerPluginCancelled) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) ParsePluginCancelled(log types.Log) (*TestAPIConsumerPluginCancelled, error) { + event := new(TestAPIConsumerPluginCancelled) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginCancelled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TestAPIConsumerPluginFulfilledIterator struct { + Event *TestAPIConsumerPluginFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TestAPIConsumerPluginFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TestAPIConsumerPluginFulfilledIterator) Error() error { + return it.fail +} + +func (it *TestAPIConsumerPluginFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TestAPIConsumerPluginFulfilled struct { + Id [32]byte + Raw types.Log +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginFulfilledIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.FilterLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return &TestAPIConsumerPluginFulfilledIterator{contract: _TestAPIConsumer.contract, event: "PluginFulfilled", logs: logs, sub: sub}, nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.WatchLogs(opts, "PluginFulfilled", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TestAPIConsumerPluginFulfilled) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) ParsePluginFulfilled(log types.Log) (*TestAPIConsumerPluginFulfilled, error) { + event := new(TestAPIConsumerPluginFulfilled) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TestAPIConsumerPluginRequestedIterator struct { + Event *TestAPIConsumerPluginRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TestAPIConsumerPluginRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPluginRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TestAPIConsumerPluginRequestedIterator) Error() error { + return it.fail +} + +func (it *TestAPIConsumerPluginRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TestAPIConsumerPluginRequested struct { + Id [32]byte + Raw types.Log +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginRequestedIterator, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.FilterLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return &TestAPIConsumerPluginRequestedIterator{contract: _TestAPIConsumer.contract, event: "PluginRequested", logs: logs, sub: sub}, nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginRequested, id [][32]byte) (event.Subscription, error) { + + var idRule []interface{} + for _, idItem := range id { + idRule = append(idRule, idItem) + } + + logs, sub, err := _TestAPIConsumer.contract.WatchLogs(opts, "PluginRequested", idRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TestAPIConsumerPluginRequested) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) ParsePluginRequested(log types.Log) (*TestAPIConsumerPluginRequested, error) { + event := new(TestAPIConsumerPluginRequested) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PluginRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TestAPIConsumerOwnershipTransferredIterator struct { + Event *TestAPIConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TestAPIConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TestAPIConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *TestAPIConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TestAPIConsumerOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*TestAPIConsumerOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _TestAPIConsumer.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &TestAPIConsumerOwnershipTransferredIterator{contract: _TestAPIConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _TestAPIConsumer.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TestAPIConsumerOwnershipTransferred) + if err := _TestAPIConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*TestAPIConsumerOwnershipTransferred, error) { + event := new(TestAPIConsumerOwnershipTransferred) + if err := _TestAPIConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TestAPIConsumerPerfMetricsEventIterator struct { + Event *TestAPIConsumerPerfMetricsEvent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TestAPIConsumerPerfMetricsEventIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPerfMetricsEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TestAPIConsumerPerfMetricsEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TestAPIConsumerPerfMetricsEventIterator) Error() error { + return it.fail +} + +func (it *TestAPIConsumerPerfMetricsEventIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TestAPIConsumerPerfMetricsEvent struct { + RoundID *big.Int + RequestId [32]byte + Timestamp *big.Int + Raw types.Log +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) FilterPerfMetricsEvent(opts *bind.FilterOpts) (*TestAPIConsumerPerfMetricsEventIterator, error) { + + logs, sub, err := _TestAPIConsumer.contract.FilterLogs(opts, "PerfMetricsEvent") + if err != nil { + return nil, err + } + return &TestAPIConsumerPerfMetricsEventIterator{contract: _TestAPIConsumer.contract, event: "PerfMetricsEvent", logs: logs, sub: sub}, nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) WatchPerfMetricsEvent(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPerfMetricsEvent) (event.Subscription, error) { + + logs, sub, err := _TestAPIConsumer.contract.WatchLogs(opts, "PerfMetricsEvent") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TestAPIConsumerPerfMetricsEvent) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PerfMetricsEvent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TestAPIConsumer *TestAPIConsumerFilterer) ParsePerfMetricsEvent(log types.Log) (*TestAPIConsumerPerfMetricsEvent, error) { + event := new(TestAPIConsumerPerfMetricsEvent) + if err := _TestAPIConsumer.contract.UnpackLog(event, "PerfMetricsEvent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_TestAPIConsumer *TestAPIConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _TestAPIConsumer.abi.Events["PluginCancelled"].ID: + return _TestAPIConsumer.ParsePluginCancelled(log) + case _TestAPIConsumer.abi.Events["PluginFulfilled"].ID: + return _TestAPIConsumer.ParsePluginFulfilled(log) + case _TestAPIConsumer.abi.Events["PluginRequested"].ID: + return _TestAPIConsumer.ParsePluginRequested(log) + case _TestAPIConsumer.abi.Events["OwnershipTransferred"].ID: + return _TestAPIConsumer.ParseOwnershipTransferred(log) + case _TestAPIConsumer.abi.Events["PerfMetricsEvent"].ID: + return _TestAPIConsumer.ParsePerfMetricsEvent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (TestAPIConsumerPluginCancelled) Topic() common.Hash { + return common.HexToHash("0xe1fe3afa0f7f761ff0a8b89086790efd5140d2907ebd5b7ff6bfcb5e075fd4c5") +} + +func (TestAPIConsumerPluginFulfilled) Topic() common.Hash { + return common.HexToHash("0x7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a") +} + +func (TestAPIConsumerPluginRequested) Topic() common.Hash { + return common.HexToHash("0xb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af9") +} + +func (TestAPIConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (TestAPIConsumerPerfMetricsEvent) Topic() common.Hash { + return common.HexToHash("0xfbaf68ee7b9032982942607eaea1859969ed8674797b5c2fc6fecaa753851946") +} + +func (_TestAPIConsumer *TestAPIConsumer) Address() common.Address { + return _TestAPIConsumer.address +} + +type TestAPIConsumerInterface interface { + CurrentRoundID(opts *bind.CallOpts) (*big.Int, error) + + Data(opts *bind.CallOpts) (*big.Int, error) + + GetPluginToken(opts *bind.CallOpts) (common.Address, error) + + IsOwner(opts *bind.CallOpts) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + Selector(opts *bind.CallOpts) ([4]byte, error) + + CancelRequest(opts *bind.TransactOpts, _requestId [32]byte, _payment *big.Int, _callbackFunctionId [4]byte, _expiration *big.Int) (*types.Transaction, error) + + CreateRequestTo(opts *bind.TransactOpts, _oracle common.Address, _jobId [32]byte, _payment *big.Int, _url string, _path string, _times *big.Int) (*types.Transaction, error) + + Fulfill(opts *bind.TransactOpts, _requestId [32]byte, _data *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterPluginCancelled(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginCancelledIterator, error) + + WatchPluginCancelled(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginCancelled, id [][32]byte) (event.Subscription, error) + + ParsePluginCancelled(log types.Log) (*TestAPIConsumerPluginCancelled, error) + + FilterPluginFulfilled(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginFulfilledIterator, error) + + WatchPluginFulfilled(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginFulfilled, id [][32]byte) (event.Subscription, error) + + ParsePluginFulfilled(log types.Log) (*TestAPIConsumerPluginFulfilled, error) + + FilterPluginRequested(opts *bind.FilterOpts, id [][32]byte) (*TestAPIConsumerPluginRequestedIterator, error) + + WatchPluginRequested(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPluginRequested, id [][32]byte) (event.Subscription, error) + + ParsePluginRequested(log types.Log) (*TestAPIConsumerPluginRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*TestAPIConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*TestAPIConsumerOwnershipTransferred, error) + + FilterPerfMetricsEvent(opts *bind.FilterOpts) (*TestAPIConsumerPerfMetricsEventIterator, error) + + WatchPerfMetricsEvent(opts *bind.WatchOpts, sink chan<- *TestAPIConsumerPerfMetricsEvent) (event.Subscription, error) + + ParsePerfMetricsEvent(log types.Log) (*TestAPIConsumerPerfMetricsEvent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/trusted_blockhash_store/trusted_blockhash_store.go b/core/gethwrappers/generated/trusted_blockhash_store/trusted_blockhash_store.go new file mode 100644 index 00000000..00794a9b --- /dev/null +++ b/core/gethwrappers/generated/trusted_blockhash_store/trusted_blockhash_store.go @@ -0,0 +1,680 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package trusted_blockhash_store + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var TrustedBlockhashStoreMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"whitelist\",\"type\":\"address[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"InvalidRecentBlockhash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidTrustedBlockhashes\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotInWhitelist\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getBlockhash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_whitelist\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"s_whitelistStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"whitelist\",\"type\":\"address[]\"}],\"name\":\"setWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"store\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"storeEarliest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"blockNums\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"blockhashes\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint256\",\"name\":\"recentBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"recentBlockhash\",\"type\":\"bytes32\"}],\"name\":\"storeTrusted\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"header\",\"type\":\"bytes\"}],\"name\":\"storeVerifyHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b50604051620014ec380380620014ec8339810160408190526200003491620003e8565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000d9565b505050620000d2816200018560201b60201c565b5062000517565b6001600160a01b038116331415620001345760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200018f620002ec565b60006004805480602002602001604051908101604052809291908181526020018280548015620001e957602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311620001ca575b5050855193945062000207936004935060208701925090506200034a565b5060005b81518110156200027757600060036000848481518110620002305762000230620004eb565b6020908102919091018101516001600160a01b03168252810191909152604001600020805460ff1916911515919091179055806200026e81620004c1565b9150506200020b565b5060005b8251811015620002e757600160036000858481518110620002a057620002a0620004eb565b6020908102919091018101516001600160a01b03168252810191909152604001600020805460ff191691151591909117905580620002de81620004c1565b9150506200027b565b505050565b6000546001600160a01b03163314620003485760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000082565b565b828054828255906000526020600020908101928215620003a2579160200282015b82811115620003a257825182546001600160a01b0319166001600160a01b039091161782556020909201916001909101906200036b565b50620003b0929150620003b4565b5090565b5b80821115620003b05760008155600101620003b5565b80516001600160a01b0381168114620003e357600080fd5b919050565b60006020808385031215620003fc57600080fd5b82516001600160401b03808211156200041457600080fd5b818501915085601f8301126200042957600080fd5b8151818111156200043e576200043e62000501565b8060051b604051601f19603f8301168101818110858211171562000466576200046662000501565b604052828152858101935084860182860187018a10156200048657600080fd5b600095505b83861015620004b4576200049f81620003cb565b8552600195909501949386019386016200048b565b5098975050505050505050565b6000600019821415620004e457634e487b7160e01b600052601160045260246000fd5b5060010190565b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052604160045260246000fd5b610fc580620005276000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80638da5cb5b11610081578063f2fde38b1161005b578063f2fde38b146101b5578063f4217648146101c8578063fadff0e1146101db57600080fd5b80638da5cb5b14610143578063e9413d3814610161578063e9ecc1541461018257600080fd5b80636057361d116100b25780636057361d1461012057806379ba50971461013357806383b6d6b71461013b57600080fd5b80633b69ad60146100ce5780635c7de309146100e3575b600080fd5b6100e16100dc366004610d07565b6101ee565b005b6100f66100f1366004610d9e565b610326565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100e161012e366004610d9e565b61035d565b6100e16103e8565b6100e16104e5565b60005473ffffffffffffffffffffffffffffffffffffffff166100f6565b61017461016f366004610d9e565b6104ff565b604051908152602001610117565b6101a5610190366004610c38565b60036020526000908152604090205460ff1681565b6040519015158152602001610117565b6100e16101c3366004610c38565b61057b565b6100e16101d6366004610c53565b61058f565b6100e16101e9366004610db7565b610745565b60006101f9836107e8565b9050818114610234576040517fd2f69c9500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3360009081526003602052604090205460ff1661027d576040517f5b0aa2ba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8584146102b6576040517fbd75093300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b8681101561031c578585828181106102d3576102d3610f5a565b90506020020135600260008a8a858181106102f0576102f0610f5a565b90506020020135815260200190815260200160002081905550808061031490610ef2565b9150506102b9565b5050505050505050565b6004818154811061033657600080fd5b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff16905081565b6000610368826107e8565b9050806103d6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f626c6f636b68617368286e29206661696c65640000000000000000000000000060448201526064015b60405180910390fd5b60009182526002602052604090912055565b60015473ffffffffffffffffffffffffffffffffffffffff163314610469576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016103cd565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6104fd6101006104f36108ed565b61012e9190610edb565b565b60008181526002602052604081205480610575576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f626c6f636b68617368206e6f7420666f756e6420696e2073746f72650000000060448201526064016103cd565b92915050565b61058361098a565b61058c81610a0b565b50565b61059761098a565b600060048054806020026020016040519081016040528092919081815260200182805480156105fc57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116105d1575b5050855193945061061893600493506020870192509050610b24565b5060005b81518110156106ac5760006003600084848151811061063d5761063d610f5a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055806106a481610ef2565b91505061061c565b5060005b8251811015610740576001600360008584815181106106d1576106d1610f5a565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790558061073881610ef2565b9150506106b0565b505050565b60026000610754846001610ec3565b8152602001908152602001600020548180519060200120146107d2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f6865616465722068617320756e6b6e6f776e20626c6f636b686173680000000060448201526064016103cd565b6024015160009182526002602052604090912055565b6000466107f481610b01565b156108dd576101008367ffffffffffffffff1661080f6108ed565b6108199190610edb565b118061083657506108286108ed565b8367ffffffffffffffff1610155b156108445750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a829060240160206040518083038186803b15801561089e57600080fd5b505afa1580156108b2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d69190610d85565b9392505050565b505067ffffffffffffffff164090565b6000466108f981610b01565b1561098357606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561094557600080fd5b505afa158015610959573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061097d9190610d85565b91505090565b4391505090565b60005473ffffffffffffffffffffffffffffffffffffffff1633146104fd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103cd565b73ffffffffffffffffffffffffffffffffffffffff8116331415610a8b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103cd565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600061a4b1821480610b15575062066eed82145b8061057557505062066eee1490565b828054828255906000526020600020908101928215610b9e579160200282015b82811115610b9e57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610b44565b50610baa929150610bae565b5090565b5b80821115610baa5760008155600101610baf565b803573ffffffffffffffffffffffffffffffffffffffff81168114610be757600080fd5b919050565b60008083601f840112610bfe57600080fd5b50813567ffffffffffffffff811115610c1657600080fd5b6020830191508360208260051b8501011115610c3157600080fd5b9250929050565b600060208284031215610c4a57600080fd5b6108d682610bc3565b60006020808385031215610c6657600080fd5b823567ffffffffffffffff80821115610c7e57600080fd5b818501915085601f830112610c9257600080fd5b813581811115610ca457610ca4610f89565b8060051b9150610cb5848301610e74565b8181528481019084860184860187018a1015610cd057600080fd5b600095505b83861015610cfa57610ce681610bc3565b835260019590950194918601918601610cd5565b5098975050505050505050565b60008060008060008060808789031215610d2057600080fd5b863567ffffffffffffffff80821115610d3857600080fd5b610d448a838b01610bec565b90985096506020890135915080821115610d5d57600080fd5b50610d6a89828a01610bec565b979a9699509760408101359660609091013595509350505050565b600060208284031215610d9757600080fd5b5051919050565b600060208284031215610db057600080fd5b5035919050565b60008060408385031215610dca57600080fd5b8235915060208084013567ffffffffffffffff80821115610dea57600080fd5b818601915086601f830112610dfe57600080fd5b813581811115610e1057610e10610f89565b610e40847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610e74565b91508082528784828501011115610e5657600080fd5b80848401858401376000848284010152508093505050509250929050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610ebb57610ebb610f89565b604052919050565b60008219821115610ed657610ed6610f2b565b500190565b600082821015610eed57610eed610f2b565b500390565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610f2457610f24610f2b565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var TrustedBlockhashStoreABI = TrustedBlockhashStoreMetaData.ABI + +var TrustedBlockhashStoreBin = TrustedBlockhashStoreMetaData.Bin + +func DeployTrustedBlockhashStore(auth *bind.TransactOpts, backend bind.ContractBackend, whitelist []common.Address) (common.Address, *types.Transaction, *TrustedBlockhashStore, error) { + parsed, err := TrustedBlockhashStoreMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(TrustedBlockhashStoreBin), backend, whitelist) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &TrustedBlockhashStore{address: address, abi: *parsed, TrustedBlockhashStoreCaller: TrustedBlockhashStoreCaller{contract: contract}, TrustedBlockhashStoreTransactor: TrustedBlockhashStoreTransactor{contract: contract}, TrustedBlockhashStoreFilterer: TrustedBlockhashStoreFilterer{contract: contract}}, nil +} + +type TrustedBlockhashStore struct { + address common.Address + abi abi.ABI + TrustedBlockhashStoreCaller + TrustedBlockhashStoreTransactor + TrustedBlockhashStoreFilterer +} + +type TrustedBlockhashStoreCaller struct { + contract *bind.BoundContract +} + +type TrustedBlockhashStoreTransactor struct { + contract *bind.BoundContract +} + +type TrustedBlockhashStoreFilterer struct { + contract *bind.BoundContract +} + +type TrustedBlockhashStoreSession struct { + Contract *TrustedBlockhashStore + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type TrustedBlockhashStoreCallerSession struct { + Contract *TrustedBlockhashStoreCaller + CallOpts bind.CallOpts +} + +type TrustedBlockhashStoreTransactorSession struct { + Contract *TrustedBlockhashStoreTransactor + TransactOpts bind.TransactOpts +} + +type TrustedBlockhashStoreRaw struct { + Contract *TrustedBlockhashStore +} + +type TrustedBlockhashStoreCallerRaw struct { + Contract *TrustedBlockhashStoreCaller +} + +type TrustedBlockhashStoreTransactorRaw struct { + Contract *TrustedBlockhashStoreTransactor +} + +func NewTrustedBlockhashStore(address common.Address, backend bind.ContractBackend) (*TrustedBlockhashStore, error) { + abi, err := abi.JSON(strings.NewReader(TrustedBlockhashStoreABI)) + if err != nil { + return nil, err + } + contract, err := bindTrustedBlockhashStore(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &TrustedBlockhashStore{address: address, abi: abi, TrustedBlockhashStoreCaller: TrustedBlockhashStoreCaller{contract: contract}, TrustedBlockhashStoreTransactor: TrustedBlockhashStoreTransactor{contract: contract}, TrustedBlockhashStoreFilterer: TrustedBlockhashStoreFilterer{contract: contract}}, nil +} + +func NewTrustedBlockhashStoreCaller(address common.Address, caller bind.ContractCaller) (*TrustedBlockhashStoreCaller, error) { + contract, err := bindTrustedBlockhashStore(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &TrustedBlockhashStoreCaller{contract: contract}, nil +} + +func NewTrustedBlockhashStoreTransactor(address common.Address, transactor bind.ContractTransactor) (*TrustedBlockhashStoreTransactor, error) { + contract, err := bindTrustedBlockhashStore(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &TrustedBlockhashStoreTransactor{contract: contract}, nil +} + +func NewTrustedBlockhashStoreFilterer(address common.Address, filterer bind.ContractFilterer) (*TrustedBlockhashStoreFilterer, error) { + contract, err := bindTrustedBlockhashStore(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &TrustedBlockhashStoreFilterer{contract: contract}, nil +} + +func bindTrustedBlockhashStore(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := TrustedBlockhashStoreMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TrustedBlockhashStore.Contract.TrustedBlockhashStoreCaller.contract.Call(opts, result, method, params...) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.TrustedBlockhashStoreTransactor.contract.Transfer(opts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.TrustedBlockhashStoreTransactor.contract.Transact(opts, method, params...) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TrustedBlockhashStore.Contract.contract.Call(opts, result, method, params...) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.contract.Transfer(opts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.contract.Transact(opts, method, params...) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCaller) GetBlockhash(opts *bind.CallOpts, n *big.Int) ([32]byte, error) { + var out []interface{} + err := _TrustedBlockhashStore.contract.Call(opts, &out, "getBlockhash", n) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) GetBlockhash(n *big.Int) ([32]byte, error) { + return _TrustedBlockhashStore.Contract.GetBlockhash(&_TrustedBlockhashStore.CallOpts, n) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCallerSession) GetBlockhash(n *big.Int) ([32]byte, error) { + return _TrustedBlockhashStore.Contract.GetBlockhash(&_TrustedBlockhashStore.CallOpts, n) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _TrustedBlockhashStore.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) Owner() (common.Address, error) { + return _TrustedBlockhashStore.Contract.Owner(&_TrustedBlockhashStore.CallOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCallerSession) Owner() (common.Address, error) { + return _TrustedBlockhashStore.Contract.Owner(&_TrustedBlockhashStore.CallOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCaller) SWhitelist(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) { + var out []interface{} + err := _TrustedBlockhashStore.contract.Call(opts, &out, "s_whitelist", arg0) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) SWhitelist(arg0 *big.Int) (common.Address, error) { + return _TrustedBlockhashStore.Contract.SWhitelist(&_TrustedBlockhashStore.CallOpts, arg0) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCallerSession) SWhitelist(arg0 *big.Int) (common.Address, error) { + return _TrustedBlockhashStore.Contract.SWhitelist(&_TrustedBlockhashStore.CallOpts, arg0) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCaller) SWhitelistStatus(opts *bind.CallOpts, arg0 common.Address) (bool, error) { + var out []interface{} + err := _TrustedBlockhashStore.contract.Call(opts, &out, "s_whitelistStatus", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) SWhitelistStatus(arg0 common.Address) (bool, error) { + return _TrustedBlockhashStore.Contract.SWhitelistStatus(&_TrustedBlockhashStore.CallOpts, arg0) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreCallerSession) SWhitelistStatus(arg0 common.Address) (bool, error) { + return _TrustedBlockhashStore.Contract.SWhitelistStatus(&_TrustedBlockhashStore.CallOpts, arg0) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "acceptOwnership") +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) AcceptOwnership() (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.AcceptOwnership(&_TrustedBlockhashStore.TransactOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.AcceptOwnership(&_TrustedBlockhashStore.TransactOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) SetWhitelist(opts *bind.TransactOpts, whitelist []common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "setWhitelist", whitelist) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) SetWhitelist(whitelist []common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.SetWhitelist(&_TrustedBlockhashStore.TransactOpts, whitelist) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) SetWhitelist(whitelist []common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.SetWhitelist(&_TrustedBlockhashStore.TransactOpts, whitelist) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) Store(opts *bind.TransactOpts, n *big.Int) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "store", n) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) Store(n *big.Int) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.Store(&_TrustedBlockhashStore.TransactOpts, n) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) Store(n *big.Int) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.Store(&_TrustedBlockhashStore.TransactOpts, n) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) StoreEarliest(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "storeEarliest") +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) StoreEarliest() (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreEarliest(&_TrustedBlockhashStore.TransactOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) StoreEarliest() (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreEarliest(&_TrustedBlockhashStore.TransactOpts) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) StoreTrusted(opts *bind.TransactOpts, blockNums []*big.Int, blockhashes [][32]byte, recentBlockNumber *big.Int, recentBlockhash [32]byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "storeTrusted", blockNums, blockhashes, recentBlockNumber, recentBlockhash) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) StoreTrusted(blockNums []*big.Int, blockhashes [][32]byte, recentBlockNumber *big.Int, recentBlockhash [32]byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreTrusted(&_TrustedBlockhashStore.TransactOpts, blockNums, blockhashes, recentBlockNumber, recentBlockhash) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) StoreTrusted(blockNums []*big.Int, blockhashes [][32]byte, recentBlockNumber *big.Int, recentBlockhash [32]byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreTrusted(&_TrustedBlockhashStore.TransactOpts, blockNums, blockhashes, recentBlockNumber, recentBlockhash) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) StoreVerifyHeader(opts *bind.TransactOpts, n *big.Int, header []byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "storeVerifyHeader", n, header) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) StoreVerifyHeader(n *big.Int, header []byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreVerifyHeader(&_TrustedBlockhashStore.TransactOpts, n, header) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) StoreVerifyHeader(n *big.Int, header []byte) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.StoreVerifyHeader(&_TrustedBlockhashStore.TransactOpts, n, header) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.contract.Transact(opts, "transferOwnership", to) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.TransferOwnership(&_TrustedBlockhashStore.TransactOpts, to) +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _TrustedBlockhashStore.Contract.TransferOwnership(&_TrustedBlockhashStore.TransactOpts, to) +} + +type TrustedBlockhashStoreOwnershipTransferRequestedIterator struct { + Event *TrustedBlockhashStoreOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TrustedBlockhashStoreOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TrustedBlockhashStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TrustedBlockhashStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TrustedBlockhashStoreOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *TrustedBlockhashStoreOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TrustedBlockhashStoreOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TrustedBlockhashStoreOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TrustedBlockhashStore.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &TrustedBlockhashStoreOwnershipTransferRequestedIterator{contract: _TrustedBlockhashStore.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *TrustedBlockhashStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TrustedBlockhashStore.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TrustedBlockhashStoreOwnershipTransferRequested) + if err := _TrustedBlockhashStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) ParseOwnershipTransferRequested(log types.Log) (*TrustedBlockhashStoreOwnershipTransferRequested, error) { + event := new(TrustedBlockhashStoreOwnershipTransferRequested) + if err := _TrustedBlockhashStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type TrustedBlockhashStoreOwnershipTransferredIterator struct { + Event *TrustedBlockhashStoreOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *TrustedBlockhashStoreOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(TrustedBlockhashStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(TrustedBlockhashStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *TrustedBlockhashStoreOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *TrustedBlockhashStoreOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type TrustedBlockhashStoreOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TrustedBlockhashStoreOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TrustedBlockhashStore.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &TrustedBlockhashStoreOwnershipTransferredIterator{contract: _TrustedBlockhashStore.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TrustedBlockhashStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _TrustedBlockhashStore.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(TrustedBlockhashStoreOwnershipTransferred) + if err := _TrustedBlockhashStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStoreFilterer) ParseOwnershipTransferred(log types.Log) (*TrustedBlockhashStoreOwnershipTransferred, error) { + event := new(TrustedBlockhashStoreOwnershipTransferred) + if err := _TrustedBlockhashStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_TrustedBlockhashStore *TrustedBlockhashStore) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _TrustedBlockhashStore.abi.Events["OwnershipTransferRequested"].ID: + return _TrustedBlockhashStore.ParseOwnershipTransferRequested(log) + case _TrustedBlockhashStore.abi.Events["OwnershipTransferred"].ID: + return _TrustedBlockhashStore.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (TrustedBlockhashStoreOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (TrustedBlockhashStoreOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_TrustedBlockhashStore *TrustedBlockhashStore) Address() common.Address { + return _TrustedBlockhashStore.address +} + +type TrustedBlockhashStoreInterface interface { + GetBlockhash(opts *bind.CallOpts, n *big.Int) ([32]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SWhitelist(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) + + SWhitelistStatus(opts *bind.CallOpts, arg0 common.Address) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + SetWhitelist(opts *bind.TransactOpts, whitelist []common.Address) (*types.Transaction, error) + + Store(opts *bind.TransactOpts, n *big.Int) (*types.Transaction, error) + + StoreEarliest(opts *bind.TransactOpts) (*types.Transaction, error) + + StoreTrusted(opts *bind.TransactOpts, blockNums []*big.Int, blockhashes [][32]byte, recentBlockNumber *big.Int, recentBlockhash [32]byte) (*types.Transaction, error) + + StoreVerifyHeader(opts *bind.TransactOpts, n *big.Int, header []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TrustedBlockhashStoreOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *TrustedBlockhashStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*TrustedBlockhashStoreOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*TrustedBlockhashStoreOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *TrustedBlockhashStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*TrustedBlockhashStoreOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/type_and_version_interface_wrapper/type_and_version_interface_wrapper.go b/core/gethwrappers/generated/type_and_version_interface_wrapper/type_and_version_interface_wrapper.go new file mode 100644 index 00000000..bf907b03 --- /dev/null +++ b/core/gethwrappers/generated/type_and_version_interface_wrapper/type_and_version_interface_wrapper.go @@ -0,0 +1,183 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package type_and_version_interface_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var TypeAndVersionInterfaceMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", +} + +var TypeAndVersionInterfaceABI = TypeAndVersionInterfaceMetaData.ABI + +type TypeAndVersionInterface struct { + address common.Address + abi abi.ABI + TypeAndVersionInterfaceCaller + TypeAndVersionInterfaceTransactor + TypeAndVersionInterfaceFilterer +} + +type TypeAndVersionInterfaceCaller struct { + contract *bind.BoundContract +} + +type TypeAndVersionInterfaceTransactor struct { + contract *bind.BoundContract +} + +type TypeAndVersionInterfaceFilterer struct { + contract *bind.BoundContract +} + +type TypeAndVersionInterfaceSession struct { + Contract *TypeAndVersionInterface + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type TypeAndVersionInterfaceCallerSession struct { + Contract *TypeAndVersionInterfaceCaller + CallOpts bind.CallOpts +} + +type TypeAndVersionInterfaceTransactorSession struct { + Contract *TypeAndVersionInterfaceTransactor + TransactOpts bind.TransactOpts +} + +type TypeAndVersionInterfaceRaw struct { + Contract *TypeAndVersionInterface +} + +type TypeAndVersionInterfaceCallerRaw struct { + Contract *TypeAndVersionInterfaceCaller +} + +type TypeAndVersionInterfaceTransactorRaw struct { + Contract *TypeAndVersionInterfaceTransactor +} + +func NewTypeAndVersionInterface(address common.Address, backend bind.ContractBackend) (*TypeAndVersionInterface, error) { + abi, err := abi.JSON(strings.NewReader(TypeAndVersionInterfaceABI)) + if err != nil { + return nil, err + } + contract, err := bindTypeAndVersionInterface(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &TypeAndVersionInterface{address: address, abi: abi, TypeAndVersionInterfaceCaller: TypeAndVersionInterfaceCaller{contract: contract}, TypeAndVersionInterfaceTransactor: TypeAndVersionInterfaceTransactor{contract: contract}, TypeAndVersionInterfaceFilterer: TypeAndVersionInterfaceFilterer{contract: contract}}, nil +} + +func NewTypeAndVersionInterfaceCaller(address common.Address, caller bind.ContractCaller) (*TypeAndVersionInterfaceCaller, error) { + contract, err := bindTypeAndVersionInterface(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &TypeAndVersionInterfaceCaller{contract: contract}, nil +} + +func NewTypeAndVersionInterfaceTransactor(address common.Address, transactor bind.ContractTransactor) (*TypeAndVersionInterfaceTransactor, error) { + contract, err := bindTypeAndVersionInterface(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &TypeAndVersionInterfaceTransactor{contract: contract}, nil +} + +func NewTypeAndVersionInterfaceFilterer(address common.Address, filterer bind.ContractFilterer) (*TypeAndVersionInterfaceFilterer, error) { + contract, err := bindTypeAndVersionInterface(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &TypeAndVersionInterfaceFilterer{contract: contract}, nil +} + +func bindTypeAndVersionInterface(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := TypeAndVersionInterfaceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TypeAndVersionInterface.Contract.TypeAndVersionInterfaceCaller.contract.Call(opts, result, method, params...) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TypeAndVersionInterface.Contract.TypeAndVersionInterfaceTransactor.contract.Transfer(opts) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TypeAndVersionInterface.Contract.TypeAndVersionInterfaceTransactor.contract.Transact(opts, method, params...) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _TypeAndVersionInterface.Contract.contract.Call(opts, result, method, params...) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _TypeAndVersionInterface.Contract.contract.Transfer(opts) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _TypeAndVersionInterface.Contract.contract.Transact(opts, method, params...) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _TypeAndVersionInterface.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceSession) TypeAndVersion() (string, error) { + return _TypeAndVersionInterface.Contract.TypeAndVersion(&_TypeAndVersionInterface.CallOpts) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterfaceCallerSession) TypeAndVersion() (string, error) { + return _TypeAndVersionInterface.Contract.TypeAndVersion(&_TypeAndVersionInterface.CallOpts) +} + +func (_TypeAndVersionInterface *TypeAndVersionInterface) Address() common.Address { + return _TypeAndVersionInterface.address +} + +type TypeAndVersionInterfaceInterface interface { + TypeAndVersion(opts *bind.CallOpts) (string, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/upkeep_counter_wrapper/upkeep_counter_wrapper.go b/core/gethwrappers/generated/upkeep_counter_wrapper/upkeep_counter_wrapper.go new file mode 100644 index 00000000..ed7d025f --- /dev/null +++ b/core/gethwrappers/generated/upkeep_counter_wrapper/upkeep_counter_wrapper.go @@ -0,0 +1,554 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package upkeep_counter_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var UpkeepCounterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"lastBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"previousBlock\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"counter\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"counter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"interval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"previousPerformBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161048338038061048383398101604081905261002f9161004d565b60009182556001556003819055436002556004819055600555610071565b6000806040838503121561006057600080fd5b505080516020909101519092909150565b610403806100806000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c80637f407edf11610076578063917d895f1161005b578063917d895f14610150578063947a36fb14610159578063d832d92f1461016257600080fd5b80637f407edf14610127578063806b984f1461014757600080fd5b806361bc221a116100a757806361bc221a146100f45780636250a13a146100fd5780636e04ff0d1461010657600080fd5b80632cb15864146100c35780634585e33b146100df575b600080fd5b6100cc60045481565b6040519081526020015b60405180910390f35b6100f26100ed366004610291565b61017a565b005b6100cc60055481565b6100cc60005481565b610119610114366004610291565b6101fd565b6040516100d6929190610303565b6100f2610135366004610379565b60009182556001556004819055600555565b6100cc60025481565b6100cc60035481565b6100cc60015481565b61016a61024f565b60405190151581526020016100d6565b60045460000361018957436004555b4360025560055461019b9060016103ca565b600581905560045460025460035460408051938452602084019290925290820152606081019190915232907f8e8112f20a2134e18e591d2cdd68cd86a95d06e6328ede501fc6314f4a5075fa9060800160405180910390a25050600254600355565b6000606061020961024f565b848481818080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250959a92995091975050505050505050565b60006004546000036102615750600190565b60005460045461027190436103e3565b10801561028c575060015460025461028990436103e3565b10155b905090565b600080602083850312156102a457600080fd5b823567ffffffffffffffff808211156102bc57600080fd5b818501915085601f8301126102d057600080fd5b8135818111156102df57600080fd5b8660208285010111156102f157600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b818110156103395785810183015185820160600152820161031d565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b6000806040838503121561038c57600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b808201808211156103dd576103dd61039b565b92915050565b818103818111156103dd576103dd61039b56fea164736f6c6343000810000a", +} + +var UpkeepCounterABI = UpkeepCounterMetaData.ABI + +var UpkeepCounterBin = UpkeepCounterMetaData.Bin + +func DeployUpkeepCounter(auth *bind.TransactOpts, backend bind.ContractBackend, _testRange *big.Int, _interval *big.Int) (common.Address, *types.Transaction, *UpkeepCounter, error) { + parsed, err := UpkeepCounterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(UpkeepCounterBin), backend, _testRange, _interval) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &UpkeepCounter{address: address, abi: *parsed, UpkeepCounterCaller: UpkeepCounterCaller{contract: contract}, UpkeepCounterTransactor: UpkeepCounterTransactor{contract: contract}, UpkeepCounterFilterer: UpkeepCounterFilterer{contract: contract}}, nil +} + +type UpkeepCounter struct { + address common.Address + abi abi.ABI + UpkeepCounterCaller + UpkeepCounterTransactor + UpkeepCounterFilterer +} + +type UpkeepCounterCaller struct { + contract *bind.BoundContract +} + +type UpkeepCounterTransactor struct { + contract *bind.BoundContract +} + +type UpkeepCounterFilterer struct { + contract *bind.BoundContract +} + +type UpkeepCounterSession struct { + Contract *UpkeepCounter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type UpkeepCounterCallerSession struct { + Contract *UpkeepCounterCaller + CallOpts bind.CallOpts +} + +type UpkeepCounterTransactorSession struct { + Contract *UpkeepCounterTransactor + TransactOpts bind.TransactOpts +} + +type UpkeepCounterRaw struct { + Contract *UpkeepCounter +} + +type UpkeepCounterCallerRaw struct { + Contract *UpkeepCounterCaller +} + +type UpkeepCounterTransactorRaw struct { + Contract *UpkeepCounterTransactor +} + +func NewUpkeepCounter(address common.Address, backend bind.ContractBackend) (*UpkeepCounter, error) { + abi, err := abi.JSON(strings.NewReader(UpkeepCounterABI)) + if err != nil { + return nil, err + } + contract, err := bindUpkeepCounter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &UpkeepCounter{address: address, abi: abi, UpkeepCounterCaller: UpkeepCounterCaller{contract: contract}, UpkeepCounterTransactor: UpkeepCounterTransactor{contract: contract}, UpkeepCounterFilterer: UpkeepCounterFilterer{contract: contract}}, nil +} + +func NewUpkeepCounterCaller(address common.Address, caller bind.ContractCaller) (*UpkeepCounterCaller, error) { + contract, err := bindUpkeepCounter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &UpkeepCounterCaller{contract: contract}, nil +} + +func NewUpkeepCounterTransactor(address common.Address, transactor bind.ContractTransactor) (*UpkeepCounterTransactor, error) { + contract, err := bindUpkeepCounter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &UpkeepCounterTransactor{contract: contract}, nil +} + +func NewUpkeepCounterFilterer(address common.Address, filterer bind.ContractFilterer) (*UpkeepCounterFilterer, error) { + contract, err := bindUpkeepCounter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &UpkeepCounterFilterer{contract: contract}, nil +} + +func bindUpkeepCounter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := UpkeepCounterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_UpkeepCounter *UpkeepCounterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepCounter.Contract.UpkeepCounterCaller.contract.Call(opts, result, method, params...) +} + +func (_UpkeepCounter *UpkeepCounterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepCounter.Contract.UpkeepCounterTransactor.contract.Transfer(opts) +} + +func (_UpkeepCounter *UpkeepCounterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepCounter.Contract.UpkeepCounterTransactor.contract.Transact(opts, method, params...) +} + +func (_UpkeepCounter *UpkeepCounterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepCounter.Contract.contract.Call(opts, result, method, params...) +} + +func (_UpkeepCounter *UpkeepCounterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepCounter.Contract.contract.Transfer(opts) +} + +func (_UpkeepCounter *UpkeepCounterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepCounter.Contract.contract.Transact(opts, method, params...) +} + +func (_UpkeepCounter *UpkeepCounterCaller) CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "checkUpkeep", data) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _UpkeepCounter.Contract.CheckUpkeep(&_UpkeepCounter.CallOpts, data) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _UpkeepCounter.Contract.CheckUpkeep(&_UpkeepCounter.CallOpts, data) +} + +func (_UpkeepCounter *UpkeepCounterCaller) Counter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "counter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) Counter() (*big.Int, error) { + return _UpkeepCounter.Contract.Counter(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) Counter() (*big.Int, error) { + return _UpkeepCounter.Contract.Counter(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) Eligible(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "eligible") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) Eligible() (bool, error) { + return _UpkeepCounter.Contract.Eligible(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) Eligible() (bool, error) { + return _UpkeepCounter.Contract.Eligible(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) InitialBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "initialBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) InitialBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.InitialBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) InitialBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.InitialBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) Interval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "interval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) Interval() (*big.Int, error) { + return _UpkeepCounter.Contract.Interval(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) Interval() (*big.Int, error) { + return _UpkeepCounter.Contract.Interval(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) LastBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "lastBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) LastBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.LastBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) LastBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.LastBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "previousPerformBlock") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) PreviousPerformBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.PreviousPerformBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) PreviousPerformBlock() (*big.Int, error) { + return _UpkeepCounter.Contract.PreviousPerformBlock(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepCounter.contract.Call(opts, &out, "testRange") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepCounter *UpkeepCounterSession) TestRange() (*big.Int, error) { + return _UpkeepCounter.Contract.TestRange(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterCallerSession) TestRange() (*big.Int, error) { + return _UpkeepCounter.Contract.TestRange(&_UpkeepCounter.CallOpts) +} + +func (_UpkeepCounter *UpkeepCounterTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _UpkeepCounter.contract.Transact(opts, "performUpkeep", performData) +} + +func (_UpkeepCounter *UpkeepCounterSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _UpkeepCounter.Contract.PerformUpkeep(&_UpkeepCounter.TransactOpts, performData) +} + +func (_UpkeepCounter *UpkeepCounterTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _UpkeepCounter.Contract.PerformUpkeep(&_UpkeepCounter.TransactOpts, performData) +} + +func (_UpkeepCounter *UpkeepCounterTransactor) SetSpread(opts *bind.TransactOpts, _testRange *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _UpkeepCounter.contract.Transact(opts, "setSpread", _testRange, _interval) +} + +func (_UpkeepCounter *UpkeepCounterSession) SetSpread(_testRange *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _UpkeepCounter.Contract.SetSpread(&_UpkeepCounter.TransactOpts, _testRange, _interval) +} + +func (_UpkeepCounter *UpkeepCounterTransactorSession) SetSpread(_testRange *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _UpkeepCounter.Contract.SetSpread(&_UpkeepCounter.TransactOpts, _testRange, _interval) +} + +type UpkeepCounterPerformingUpkeepIterator struct { + Event *UpkeepCounterPerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *UpkeepCounterPerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(UpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(UpkeepCounterPerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *UpkeepCounterPerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *UpkeepCounterPerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type UpkeepCounterPerformingUpkeep struct { + From common.Address + InitialBlock *big.Int + LastBlock *big.Int + PreviousBlock *big.Int + Counter *big.Int + Raw types.Log +} + +func (_UpkeepCounter *UpkeepCounterFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*UpkeepCounterPerformingUpkeepIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _UpkeepCounter.contract.FilterLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return &UpkeepCounterPerformingUpkeepIterator{contract: _UpkeepCounter.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_UpkeepCounter *UpkeepCounterFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *UpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + logs, sub, err := _UpkeepCounter.contract.WatchLogs(opts, "PerformingUpkeep", fromRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(UpkeepCounterPerformingUpkeep) + if err := _UpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_UpkeepCounter *UpkeepCounterFilterer) ParsePerformingUpkeep(log types.Log) (*UpkeepCounterPerformingUpkeep, error) { + event := new(UpkeepCounterPerformingUpkeep) + if err := _UpkeepCounter.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_UpkeepCounter *UpkeepCounter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _UpkeepCounter.abi.Events["PerformingUpkeep"].ID: + return _UpkeepCounter.ParsePerformingUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (UpkeepCounterPerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0x8e8112f20a2134e18e591d2cdd68cd86a95d06e6328ede501fc6314f4a5075fa") +} + +func (_UpkeepCounter *UpkeepCounter) Address() common.Address { + return _UpkeepCounter.address +} + +type UpkeepCounterInterface interface { + CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) + + Counter(opts *bind.CallOpts) (*big.Int, error) + + Eligible(opts *bind.CallOpts) (bool, error) + + InitialBlock(opts *bind.CallOpts) (*big.Int, error) + + Interval(opts *bind.CallOpts) (*big.Int, error) + + LastBlock(opts *bind.CallOpts) (*big.Int, error) + + PreviousPerformBlock(opts *bind.CallOpts) (*big.Int, error) + + TestRange(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SetSpread(opts *bind.TransactOpts, _testRange *big.Int, _interval *big.Int) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts, from []common.Address) (*UpkeepCounterPerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *UpkeepCounterPerformingUpkeep, from []common.Address) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*UpkeepCounterPerformingUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go b/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go new file mode 100644 index 00000000..fbd5fc62 --- /dev/null +++ b/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper/upkeep_perform_counter_restrictive_wrapper.go @@ -0,0 +1,634 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package upkeep_perform_counter_restrictive_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var UpkeepPerformCounterRestrictiveMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_testRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_averageEligibilityCadence\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"eligible\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"initialCall\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nextEligible\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"PerformingUpkeep\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"averageEligibilityCadence\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkEligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"checkGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCountPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"initialCall\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextEligible\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"performGasToBurn\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setCheckGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformGasToBurn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newTestRange\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_newAverageEligibilityCadence\",\"type\":\"uint256\"}],\"name\":\"setSpread\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"testRange\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600080556000600155600060075534801561001e57600080fd5b5060405161074238038061074283398101604081905261003d9161004b565b60029190915560035561006f565b6000806040838503121561005e57600080fd5b505080516020909101519092909150565b6106c48061007e6000396000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c80637145f11b11610097578063b30566b411610066578063b30566b4146101e2578063c228a98e146101eb578063d826f88f146101f3578063e303666f1461020057600080fd5b80637145f11b146101845780637f407edf146101b7578063926f086e146101d0578063a9a4c57c146101d957600080fd5b80634585e33b116100d35780634585e33b1461013e578063523d9b8a146101515780636250a13a1461015a5780636e04ff0d1461016357600080fd5b806313bda75b146100fa5780632555d2cf1461010f5780632ff3617d14610122575b600080fd5b61010d610108366004610454565b600455565b005b61010d61011d366004610454565b600555565b61012b60045481565b6040519081526020015b60405180910390f35b61010d61014c36600461046d565b610208565b61012b60015481565b61012b60025481565b61017661017136600461046d565b610349565b6040516101359291906104df565b6101a7610192366004610454565b60066020526000908152604090205460ff1681565b6040519015158152602001610135565b61010d6101c5366004610555565b600291909155600355565b61012b60005481565b61012b60035481565b61012b60055481565b6101a76103db565b61010d6000808055600755565b60075461012b565b60005a905060006102176103ea565b60005460015460408051841515815232602082015290810192909252606082015243608082018190529192507fbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc09060a00160405180910390a18161027a57600080fd5b60005460000361028a5760008190555b6003546102989060026105a6565b6102a0610416565b6102aa91906105e3565b6102b4908261061e565b6102bf90600161061e565b600155600780549060006102d283610637565b919050555080806102e29061066f565b9150505b6005545a6102f490856106a4565b1015610342578040600090815260066020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690558061033a8161066f565b9150506102e6565b5050505050565b6000606060005a9050600061035f6001436106a4565b905060005b6004545a61037290856106a4565b10156103a9578080156103955750814060009081526006602052604090205460ff165b9050816103a18161066f565b925050610364565b6103b16103ea565b60408051831515602082015201604051602081830303815290604052945094505050509250929050565b60006103e56103ea565b905090565b6000805415806103e5575060025460005461040590436106a4565b1080156103e5575050600154431190565b60006104236001436106a4565b604080519140602083015230908201526060016040516020818303038152906040528051906020012060001c905090565b60006020828403121561046657600080fd5b5035919050565b6000806020838503121561048057600080fd5b823567ffffffffffffffff8082111561049857600080fd5b818501915085601f8301126104ac57600080fd5b8135818111156104bb57600080fd5b8660208285010111156104cd57600080fd5b60209290920196919550909350505050565b821515815260006020604081840152835180604085015260005b81811015610515578581018301518582016060015282016104f9565b5060006060828601015260607fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116850101925050509392505050565b6000806040838503121561056857600080fd5b50508035926020909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156105de576105de610577565b500290565b600082610619577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500690565b8082018082111561063157610631610577565b92915050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361066857610668610577565b5060010190565b60008161067e5761067e610577565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190565b818103818111156106315761063161057756fea164736f6c6343000810000a", +} + +var UpkeepPerformCounterRestrictiveABI = UpkeepPerformCounterRestrictiveMetaData.ABI + +var UpkeepPerformCounterRestrictiveBin = UpkeepPerformCounterRestrictiveMetaData.Bin + +func DeployUpkeepPerformCounterRestrictive(auth *bind.TransactOpts, backend bind.ContractBackend, _testRange *big.Int, _averageEligibilityCadence *big.Int) (common.Address, *types.Transaction, *UpkeepPerformCounterRestrictive, error) { + parsed, err := UpkeepPerformCounterRestrictiveMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(UpkeepPerformCounterRestrictiveBin), backend, _testRange, _averageEligibilityCadence) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &UpkeepPerformCounterRestrictive{address: address, abi: *parsed, UpkeepPerformCounterRestrictiveCaller: UpkeepPerformCounterRestrictiveCaller{contract: contract}, UpkeepPerformCounterRestrictiveTransactor: UpkeepPerformCounterRestrictiveTransactor{contract: contract}, UpkeepPerformCounterRestrictiveFilterer: UpkeepPerformCounterRestrictiveFilterer{contract: contract}}, nil +} + +type UpkeepPerformCounterRestrictive struct { + address common.Address + abi abi.ABI + UpkeepPerformCounterRestrictiveCaller + UpkeepPerformCounterRestrictiveTransactor + UpkeepPerformCounterRestrictiveFilterer +} + +type UpkeepPerformCounterRestrictiveCaller struct { + contract *bind.BoundContract +} + +type UpkeepPerformCounterRestrictiveTransactor struct { + contract *bind.BoundContract +} + +type UpkeepPerformCounterRestrictiveFilterer struct { + contract *bind.BoundContract +} + +type UpkeepPerformCounterRestrictiveSession struct { + Contract *UpkeepPerformCounterRestrictive + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type UpkeepPerformCounterRestrictiveCallerSession struct { + Contract *UpkeepPerformCounterRestrictiveCaller + CallOpts bind.CallOpts +} + +type UpkeepPerformCounterRestrictiveTransactorSession struct { + Contract *UpkeepPerformCounterRestrictiveTransactor + TransactOpts bind.TransactOpts +} + +type UpkeepPerformCounterRestrictiveRaw struct { + Contract *UpkeepPerformCounterRestrictive +} + +type UpkeepPerformCounterRestrictiveCallerRaw struct { + Contract *UpkeepPerformCounterRestrictiveCaller +} + +type UpkeepPerformCounterRestrictiveTransactorRaw struct { + Contract *UpkeepPerformCounterRestrictiveTransactor +} + +func NewUpkeepPerformCounterRestrictive(address common.Address, backend bind.ContractBackend) (*UpkeepPerformCounterRestrictive, error) { + abi, err := abi.JSON(strings.NewReader(UpkeepPerformCounterRestrictiveABI)) + if err != nil { + return nil, err + } + contract, err := bindUpkeepPerformCounterRestrictive(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &UpkeepPerformCounterRestrictive{address: address, abi: abi, UpkeepPerformCounterRestrictiveCaller: UpkeepPerformCounterRestrictiveCaller{contract: contract}, UpkeepPerformCounterRestrictiveTransactor: UpkeepPerformCounterRestrictiveTransactor{contract: contract}, UpkeepPerformCounterRestrictiveFilterer: UpkeepPerformCounterRestrictiveFilterer{contract: contract}}, nil +} + +func NewUpkeepPerformCounterRestrictiveCaller(address common.Address, caller bind.ContractCaller) (*UpkeepPerformCounterRestrictiveCaller, error) { + contract, err := bindUpkeepPerformCounterRestrictive(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &UpkeepPerformCounterRestrictiveCaller{contract: contract}, nil +} + +func NewUpkeepPerformCounterRestrictiveTransactor(address common.Address, transactor bind.ContractTransactor) (*UpkeepPerformCounterRestrictiveTransactor, error) { + contract, err := bindUpkeepPerformCounterRestrictive(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &UpkeepPerformCounterRestrictiveTransactor{contract: contract}, nil +} + +func NewUpkeepPerformCounterRestrictiveFilterer(address common.Address, filterer bind.ContractFilterer) (*UpkeepPerformCounterRestrictiveFilterer, error) { + contract, err := bindUpkeepPerformCounterRestrictive(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &UpkeepPerformCounterRestrictiveFilterer{contract: contract}, nil +} + +func bindUpkeepPerformCounterRestrictive(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := UpkeepPerformCounterRestrictiveMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepPerformCounterRestrictive.Contract.UpkeepPerformCounterRestrictiveCaller.contract.Call(opts, result, method, params...) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.UpkeepPerformCounterRestrictiveTransactor.contract.Transfer(opts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.UpkeepPerformCounterRestrictiveTransactor.contract.Transact(opts, method, params...) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepPerformCounterRestrictive.Contract.contract.Call(opts, result, method, params...) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.contract.Transfer(opts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.contract.Transact(opts, method, params...) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) AverageEligibilityCadence(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "averageEligibilityCadence") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) AverageEligibilityCadence() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.AverageEligibilityCadence(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) AverageEligibilityCadence() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.AverageEligibilityCadence(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) CheckEligible(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "checkEligible") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) CheckEligible() (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckEligible(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) CheckEligible() (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckEligible(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "checkGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) CheckGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) CheckGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "checkUpkeep", data) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckUpkeep(&_UpkeepPerformCounterRestrictive.CallOpts, data) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) CheckUpkeep(data []byte) (bool, []byte, error) { + return _UpkeepPerformCounterRestrictive.Contract.CheckUpkeep(&_UpkeepPerformCounterRestrictive.CallOpts, data) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) DummyMap(arg0 [32]byte) (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.DummyMap(&_UpkeepPerformCounterRestrictive.CallOpts, arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _UpkeepPerformCounterRestrictive.Contract.DummyMap(&_UpkeepPerformCounterRestrictive.CallOpts, arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "getCountPerforms") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) GetCountPerforms() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.GetCountPerforms(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) GetCountPerforms() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.GetCountPerforms(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) InitialCall(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "initialCall") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) InitialCall() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.InitialCall(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) InitialCall() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.InitialCall(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) NextEligible(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "nextEligible") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) NextEligible() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.NextEligible(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) NextEligible() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.NextEligible(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "performGasToBurn") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) PerformGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) PerformGasToBurn() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformGasToBurn(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCaller) TestRange(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _UpkeepPerformCounterRestrictive.contract.Call(opts, &out, "testRange") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) TestRange() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.TestRange(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveCallerSession) TestRange() (*big.Int, error) { + return _UpkeepPerformCounterRestrictive.Contract.TestRange(&_UpkeepPerformCounterRestrictive.CallOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "performUpkeep", arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) PerformUpkeep(arg0 []byte) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.PerformUpkeep(&_UpkeepPerformCounterRestrictive.TransactOpts, arg0) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "reset") +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) Reset() (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.Reset(&_UpkeepPerformCounterRestrictive.TransactOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) Reset() (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.Reset(&_UpkeepPerformCounterRestrictive.TransactOpts) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setCheckGasToBurn", value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetCheckGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) SetCheckGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetCheckGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setPerformGasToBurn", value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetPerformGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) SetPerformGasToBurn(value *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetPerformGasToBurn(&_UpkeepPerformCounterRestrictive.TransactOpts, value) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactor) SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.contract.Transact(opts, "setSpread", _newTestRange, _newAverageEligibilityCadence) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveSession) SetSpread(_newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetSpread(&_UpkeepPerformCounterRestrictive.TransactOpts, _newTestRange, _newAverageEligibilityCadence) +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveTransactorSession) SetSpread(_newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) { + return _UpkeepPerformCounterRestrictive.Contract.SetSpread(&_UpkeepPerformCounterRestrictive.TransactOpts, _newTestRange, _newAverageEligibilityCadence) +} + +type UpkeepPerformCounterRestrictivePerformingUpkeepIterator struct { + Event *UpkeepPerformCounterRestrictivePerformingUpkeep + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *UpkeepPerformCounterRestrictivePerformingUpkeepIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(UpkeepPerformCounterRestrictivePerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(UpkeepPerformCounterRestrictivePerformingUpkeep) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *UpkeepPerformCounterRestrictivePerformingUpkeepIterator) Error() error { + return it.fail +} + +func (it *UpkeepPerformCounterRestrictivePerformingUpkeepIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type UpkeepPerformCounterRestrictivePerformingUpkeep struct { + Eligible bool + From common.Address + InitialCall *big.Int + NextEligible *big.Int + BlockNumber *big.Int + Raw types.Log +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveFilterer) FilterPerformingUpkeep(opts *bind.FilterOpts) (*UpkeepPerformCounterRestrictivePerformingUpkeepIterator, error) { + + logs, sub, err := _UpkeepPerformCounterRestrictive.contract.FilterLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return &UpkeepPerformCounterRestrictivePerformingUpkeepIterator{contract: _UpkeepPerformCounterRestrictive.contract, event: "PerformingUpkeep", logs: logs, sub: sub}, nil +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveFilterer) WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *UpkeepPerformCounterRestrictivePerformingUpkeep) (event.Subscription, error) { + + logs, sub, err := _UpkeepPerformCounterRestrictive.contract.WatchLogs(opts, "PerformingUpkeep") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(UpkeepPerformCounterRestrictivePerformingUpkeep) + if err := _UpkeepPerformCounterRestrictive.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictiveFilterer) ParsePerformingUpkeep(log types.Log) (*UpkeepPerformCounterRestrictivePerformingUpkeep, error) { + event := new(UpkeepPerformCounterRestrictivePerformingUpkeep) + if err := _UpkeepPerformCounterRestrictive.contract.UnpackLog(event, "PerformingUpkeep", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictive) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _UpkeepPerformCounterRestrictive.abi.Events["PerformingUpkeep"].ID: + return _UpkeepPerformCounterRestrictive.ParsePerformingUpkeep(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (UpkeepPerformCounterRestrictivePerformingUpkeep) Topic() common.Hash { + return common.HexToHash("0xbd6b6608a51477954e8b498c633bda87e5cd555e06ead50486398d9e3b9cebc0") +} + +func (_UpkeepPerformCounterRestrictive *UpkeepPerformCounterRestrictive) Address() common.Address { + return _UpkeepPerformCounterRestrictive.address +} + +type UpkeepPerformCounterRestrictiveInterface interface { + AverageEligibilityCadence(opts *bind.CallOpts) (*big.Int, error) + + CheckEligible(opts *bind.CallOpts) (bool, error) + + CheckGasToBurn(opts *bind.CallOpts) (*big.Int, error) + + CheckUpkeep(opts *bind.CallOpts, data []byte) (bool, []byte, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + GetCountPerforms(opts *bind.CallOpts) (*big.Int, error) + + InitialCall(opts *bind.CallOpts) (*big.Int, error) + + NextEligible(opts *bind.CallOpts) (*big.Int, error) + + PerformGasToBurn(opts *bind.CallOpts) (*big.Int, error) + + TestRange(opts *bind.CallOpts) (*big.Int, error) + + PerformUpkeep(opts *bind.TransactOpts, arg0 []byte) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCheckGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + + SetPerformGasToBurn(opts *bind.TransactOpts, value *big.Int) (*types.Transaction, error) + + SetSpread(opts *bind.TransactOpts, _newTestRange *big.Int, _newAverageEligibilityCadence *big.Int) (*types.Transaction, error) + + FilterPerformingUpkeep(opts *bind.FilterOpts) (*UpkeepPerformCounterRestrictivePerformingUpkeepIterator, error) + + WatchPerformingUpkeep(opts *bind.WatchOpts, sink chan<- *UpkeepPerformCounterRestrictivePerformingUpkeep) (event.Subscription, error) + + ParsePerformingUpkeep(log types.Log) (*UpkeepPerformCounterRestrictivePerformingUpkeep, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/upkeep_transcoder/upkeep_transcoder.go b/core/gethwrappers/generated/upkeep_transcoder/upkeep_transcoder.go new file mode 100644 index 00000000..53d557d7 --- /dev/null +++ b/core/gethwrappers/generated/upkeep_transcoder/upkeep_transcoder.go @@ -0,0 +1,226 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package upkeep_transcoder + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var UpkeepTranscoderMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"InvalidTranscoding\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"enumUpkeepFormat\",\"name\":\"fromVersion\",\"type\":\"uint8\"},{\"internalType\":\"enumUpkeepFormat\",\"name\":\"toVersion\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"encodedUpkeeps\",\"type\":\"bytes\"}],\"name\":\"transcodeUpkeeps\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061029b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063181f5a771461003b578063c71249ab1461008d575b600080fd5b6100776040518060400160405280601681526020017f55706b6565705472616e73636f64657220312e302e300000000000000000000081525081565b6040516100849190610245565b60405180910390f35b61007761009b36600461014c565b60608360028111156100af576100af61025f565b8560028111156100c1576100c161025f565b146100f8576040517f90aaccc300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82828080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509298975050505050505050565b80356003811061014757600080fd5b919050565b6000806000806060858703121561016257600080fd5b61016b85610138565b935061017960208601610138565b9250604085013567ffffffffffffffff8082111561019657600080fd5b818701915087601f8301126101aa57600080fd5b8135818111156101b957600080fd5b8860208285010111156101cb57600080fd5b95989497505060200194505050565b6000815180845260005b81811015610200576020818501810151868301820152016101e4565b81811115610212576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061025860208301846101da565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fdfea164736f6c6343000806000a", +} + +var UpkeepTranscoderABI = UpkeepTranscoderMetaData.ABI + +var UpkeepTranscoderBin = UpkeepTranscoderMetaData.Bin + +func DeployUpkeepTranscoder(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *UpkeepTranscoder, error) { + parsed, err := UpkeepTranscoderMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(UpkeepTranscoderBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &UpkeepTranscoder{address: address, abi: *parsed, UpkeepTranscoderCaller: UpkeepTranscoderCaller{contract: contract}, UpkeepTranscoderTransactor: UpkeepTranscoderTransactor{contract: contract}, UpkeepTranscoderFilterer: UpkeepTranscoderFilterer{contract: contract}}, nil +} + +type UpkeepTranscoder struct { + address common.Address + abi abi.ABI + UpkeepTranscoderCaller + UpkeepTranscoderTransactor + UpkeepTranscoderFilterer +} + +type UpkeepTranscoderCaller struct { + contract *bind.BoundContract +} + +type UpkeepTranscoderTransactor struct { + contract *bind.BoundContract +} + +type UpkeepTranscoderFilterer struct { + contract *bind.BoundContract +} + +type UpkeepTranscoderSession struct { + Contract *UpkeepTranscoder + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type UpkeepTranscoderCallerSession struct { + Contract *UpkeepTranscoderCaller + CallOpts bind.CallOpts +} + +type UpkeepTranscoderTransactorSession struct { + Contract *UpkeepTranscoderTransactor + TransactOpts bind.TransactOpts +} + +type UpkeepTranscoderRaw struct { + Contract *UpkeepTranscoder +} + +type UpkeepTranscoderCallerRaw struct { + Contract *UpkeepTranscoderCaller +} + +type UpkeepTranscoderTransactorRaw struct { + Contract *UpkeepTranscoderTransactor +} + +func NewUpkeepTranscoder(address common.Address, backend bind.ContractBackend) (*UpkeepTranscoder, error) { + abi, err := abi.JSON(strings.NewReader(UpkeepTranscoderABI)) + if err != nil { + return nil, err + } + contract, err := bindUpkeepTranscoder(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &UpkeepTranscoder{address: address, abi: abi, UpkeepTranscoderCaller: UpkeepTranscoderCaller{contract: contract}, UpkeepTranscoderTransactor: UpkeepTranscoderTransactor{contract: contract}, UpkeepTranscoderFilterer: UpkeepTranscoderFilterer{contract: contract}}, nil +} + +func NewUpkeepTranscoderCaller(address common.Address, caller bind.ContractCaller) (*UpkeepTranscoderCaller, error) { + contract, err := bindUpkeepTranscoder(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &UpkeepTranscoderCaller{contract: contract}, nil +} + +func NewUpkeepTranscoderTransactor(address common.Address, transactor bind.ContractTransactor) (*UpkeepTranscoderTransactor, error) { + contract, err := bindUpkeepTranscoder(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &UpkeepTranscoderTransactor{contract: contract}, nil +} + +func NewUpkeepTranscoderFilterer(address common.Address, filterer bind.ContractFilterer) (*UpkeepTranscoderFilterer, error) { + contract, err := bindUpkeepTranscoder(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &UpkeepTranscoderFilterer{contract: contract}, nil +} + +func bindUpkeepTranscoder(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := UpkeepTranscoderMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_UpkeepTranscoder *UpkeepTranscoderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepTranscoder.Contract.UpkeepTranscoderCaller.contract.Call(opts, result, method, params...) +} + +func (_UpkeepTranscoder *UpkeepTranscoderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepTranscoder.Contract.UpkeepTranscoderTransactor.contract.Transfer(opts) +} + +func (_UpkeepTranscoder *UpkeepTranscoderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepTranscoder.Contract.UpkeepTranscoderTransactor.contract.Transact(opts, method, params...) +} + +func (_UpkeepTranscoder *UpkeepTranscoderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _UpkeepTranscoder.Contract.contract.Call(opts, result, method, params...) +} + +func (_UpkeepTranscoder *UpkeepTranscoderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _UpkeepTranscoder.Contract.contract.Transfer(opts) +} + +func (_UpkeepTranscoder *UpkeepTranscoderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _UpkeepTranscoder.Contract.contract.Transact(opts, method, params...) +} + +func (_UpkeepTranscoder *UpkeepTranscoderCaller) TranscodeUpkeeps(opts *bind.CallOpts, fromVersion uint8, toVersion uint8, encodedUpkeeps []byte) ([]byte, error) { + var out []interface{} + err := _UpkeepTranscoder.contract.Call(opts, &out, "transcodeUpkeeps", fromVersion, toVersion, encodedUpkeeps) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_UpkeepTranscoder *UpkeepTranscoderSession) TranscodeUpkeeps(fromVersion uint8, toVersion uint8, encodedUpkeeps []byte) ([]byte, error) { + return _UpkeepTranscoder.Contract.TranscodeUpkeeps(&_UpkeepTranscoder.CallOpts, fromVersion, toVersion, encodedUpkeeps) +} + +func (_UpkeepTranscoder *UpkeepTranscoderCallerSession) TranscodeUpkeeps(fromVersion uint8, toVersion uint8, encodedUpkeeps []byte) ([]byte, error) { + return _UpkeepTranscoder.Contract.TranscodeUpkeeps(&_UpkeepTranscoder.CallOpts, fromVersion, toVersion, encodedUpkeeps) +} + +func (_UpkeepTranscoder *UpkeepTranscoderCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _UpkeepTranscoder.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_UpkeepTranscoder *UpkeepTranscoderSession) TypeAndVersion() (string, error) { + return _UpkeepTranscoder.Contract.TypeAndVersion(&_UpkeepTranscoder.CallOpts) +} + +func (_UpkeepTranscoder *UpkeepTranscoderCallerSession) TypeAndVersion() (string, error) { + return _UpkeepTranscoder.Contract.TypeAndVersion(&_UpkeepTranscoder.CallOpts) +} + +func (_UpkeepTranscoder *UpkeepTranscoder) Address() common.Address { + return _UpkeepTranscoder.address +} + +type UpkeepTranscoderInterface interface { + TranscodeUpkeeps(opts *bind.CallOpts, fromVersion uint8, toVersion uint8, encodedUpkeeps []byte) ([]byte, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/verifiable_load_log_trigger_upkeep_wrapper/verifiable_load_log_trigger_upkeep_wrapper.go b/core/gethwrappers/generated/verifiable_load_log_trigger_upkeep_wrapper/verifiable_load_log_trigger_upkeep_wrapper.go new file mode 100644 index 00000000..31266676 --- /dev/null +++ b/core/gethwrappers/generated/verifiable_load_log_trigger_upkeep_wrapper/verifiable_load_log_trigger_upkeep_wrapper.go @@ -0,0 +1,2530 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package verifiable_load_log_trigger_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +type Log struct { + Index *big.Int + Timestamp *big.Int + TxHash [32]byte + BlockNumber *big.Int + BlockHash [32]byte + Source common.Address + Topics [][32]byte + Data []byte +} + +var VerifiableLoadLogTriggerUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"_registrar\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_useArb\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"_useMercury\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string[]\",\"name\":\"feeds\",\"type\":\"string[]\"},{\"internalType\":\"string\",\"name\":\"timeParamKey\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"time\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"StreamsLookup\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmittedAgain\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"UpkeepTopUp\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BUCKET_SIZE\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"addLinkAmount\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchCancelUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"batchPreparingUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"}],\"name\":\"batchPreparingUpkeepsSimple\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"number\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"checkGasToBurn\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"performGasToBurn\",\"type\":\"uint256\"}],\"name\":\"batchRegisterUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"batchSendLogs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint32\",\"name\":\"interval\",\"type\":\"uint32\"}],\"name\":\"batchSetIntervals\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchUpdatePipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchWithdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"bucketedDelays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"buckets\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"startGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"burnPerformGas\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"checkGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"txHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"source\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"internalType\":\"structLog\",\"name\":\"log\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkLog\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"counters\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"delays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedAgainSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feedParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"feedsHex\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"firstPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"gasLimits\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDsDeployedByThisContract\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getAllActiveUpkeepIDsOnRegistry\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getBucketedDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getBucketedDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"getLogTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"logTrigger\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"p\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getPxDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getSumDelayInBucket\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getSumDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepInfo\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structKeeperRegistryBase2_1.UpkeepInfo\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"intervals\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastTopUpBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"logNum\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minBalanceThresholdMultiplier\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performDataSizes\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"previousPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registrar\",\"outputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registry\",\"outputs\":[{\"internalType\":\"contractIKeeperRegistryMaster\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"sendLog\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"newRegistrar\",\"type\":\"address\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"_feeds\",\"type\":\"string[]\"}],\"name\":\"setFeeds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"}],\"name\":\"setInterval\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"_log\",\"type\":\"uint8\"}],\"name\":\"setLog\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_timeParamKey\",\"type\":\"string\"}],\"name\":\"setParamKeys\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformDataSize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"topUpFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"updateLogTriggerConfig1\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"updateLogTriggerConfig2\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"pipelineData\",\"type\":\"bytes\"}],\"name\":\"updateUpkeepPipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTopUpCheckInterval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useArbitrumBlockNum\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useMercury\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x7f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf086080527fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d60a0526005601455601580546001600160681b0319166c140000000002c68af0bb140000179055606460e0526101c0604052604261014081815261010091829190620066ec6101603981526020016040518060800160405280604281526020016200672e604291399052620000be906016906002620003de565b506040805180820190915260098152680cccacac892c890caf60bb1b6020820152601790620000ee90826200055a565b5060408051808201909152600b81526a313637b1b5a73ab6b132b960a91b60208201526018906200012090826200055a565b503480156200012e57600080fd5b506040516200677038038062006770833981016040819052620001519162000652565b82823380600081620001aa5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620001dd57620001dd8162000333565b5050601180546001600160a01b0319166001600160a01b038516908117909155604080516330fe427560e21b815281516000945063c3f909d4926004808401939192918290030181865afa1580156200023a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200026091906200069e565b50601380546001600160a01b0319166001600160a01b038381169190911790915560115460408051631b6b6d2360e01b81529051939450911691631b6b6d23916004808201926020929091908290030181865afa158015620002c6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620002ec9190620006cf565b601280546001600160a01b0319166001600160a01b039290921691909117905550151560c052506019805461ffff191691151561ff00191691909117905550620006f69050565b336001600160a01b038216036200038d5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620001a1565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b82805482825590600052602060002090810192821562000429579160200282015b828111156200042957825182906200041890826200055a565b5091602001919060010190620003ff565b50620004379291506200043b565b5090565b80821115620004375760006200045282826200045c565b506001016200043b565b5080546200046a90620004cb565b6000825580601f106200047b575050565b601f0160209004906000526020600020908101906200049b91906200049e565b50565b5b808211156200043757600081556001016200049f565b634e487b7160e01b600052604160045260246000fd5b600181811c90821680620004e057607f821691505b6020821081036200050157634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200055557600081815260208120601f850160051c81016020861015620005305750805b601f850160051c820191505b8181101562000551578281556001016200053c565b5050505b505050565b81516001600160401b03811115620005765762000576620004b5565b6200058e81620005878454620004cb565b8462000507565b602080601f831160018114620005c65760008415620005ad5750858301515b600019600386901b1c1916600185901b17855562000551565b600085815260208120601f198616915b82811015620005f757888601518255948401946001909101908401620005d6565b5085821015620006165787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6001600160a01b03811681146200049b57600080fd5b805180151581146200064d57600080fd5b919050565b6000806000606084860312156200066857600080fd5b8351620006758162000626565b925062000685602085016200063c565b915062000695604085016200063c565b90509250925092565b60008060408385031215620006b257600080fd5b8251620006bf8162000626565b6020939093015192949293505050565b600060208284031215620006e257600080fd5b8151620006ef8162000626565b9392505050565b60805160a05160c05160e051615f926200075a600039600081816105b90152612551015260008181610a2d01526140920152600081816108a601528181611fa80152613ae0015260008181610dca01528181611f780152613ab50152615f926000f3fe6080604052600436106105265760003560e01c80637b103999116102af578063af953a4a11610179578063daee1aeb116100d6578063e83ce5581161008a578063fa333dfb1161006f578063fa333dfb14611066578063fba7ffa314611119578063fcdc1f631461114657600080fd5b8063e83ce55814611027578063f2fde38b1461104657600080fd5b8063de818253116100bb578063de81825314610f90578063e0114adb14610fe4578063e45530831461101157600080fd5b8063daee1aeb14610f50578063dbef701e14610f7057600080fd5b8063c41c815b1161012d578063d4c2490011610112578063d4c2490014610ef0578063d6051a7214610f10578063da6cba4714610f3057600080fd5b8063c41c815b14610ec1578063c98f10b014610edb57600080fd5b8063b657bc9c1161015e578063b657bc9c14610e61578063becde0e114610e81578063c041982214610ea157600080fd5b8063af953a4a14610e2c578063afb28d1f14610e4c57600080fd5b8063948108f7116102275780639d385eaa116101db578063a6548248116101c0578063a654824814610db8578063a6b5947514610dec578063a72aa27e14610e0c57600080fd5b80639d385eaa14610d785780639d6f1cc714610d9857600080fd5b80639ac542eb1161020c5780639ac542eb14610cf05780639b42935414610d1a5780639b51fb0d14610d4757600080fd5b8063948108f714610cb057806396cebc7c14610cd057600080fd5b806386e330af1161027e5780638da5cb5b116102635780638da5cb5b14610c385780638fcb3fba14610c63578063924ca57814610c9057600080fd5b806386e330af14610bf8578063873c758614610c1857600080fd5b80637b10399914610b6b5780637e7a46dc14610b985780638243444a14610bb85780638340507c14610bd857600080fd5b806345d2ec17116103f057806360457ff51161036857806373644cce1161031c578063776898c811610301578063776898c814610b1657806379ba509714610b3657806379ea994314610b4b57600080fd5b806373644cce14610abc5780637672130314610ae957600080fd5b8063642f6cef1161034d578063642f6cef14610a1b57806369cdbadb14610a5f5780637145f11b14610a8c57600080fd5b806360457ff5146109c9578063636092e8146109f657600080fd5b80635147cd59116103bf57806357970e93116103a457806357970e93146109675780635d4ee7f3146109945780635f17e616146109a957600080fd5b80635147cd591461091557806351c98be31461094757600080fd5b806345d2ec1714610867578063469820931461089457806346e7a63e146108c85780634b56a42e146108f557600080fd5b806320e3dbd41161049e5780632b20e397116104525780633ebe8d6c116104375780633ebe8d6c146107f957806340691db4146108195780634585e33b1461084757600080fd5b80632b20e3971461077a578063328ffd11146107cc57600080fd5b806328c4b57b1161048357806328c4b57b1461070d57806329e0a8411461072d5780632a9032d31461075a57600080fd5b806320e3dbd4146106cd5780632636aecf146106ed57600080fd5b806319d97a94116104f55780631e010439116104da5780631e0104391461063b578063206c32e814610678578063207b6516146106ad57600080fd5b806319d97a94146105ee5780631cdde2511461061b57600080fd5b806306c1cc0014610532578063077ac621146105545780630b7d33e61461058757806312c55027146105a757600080fd5b3661052d57005b600080fd5b34801561053e57600080fd5b5061055261054d366004614814565b611173565b005b34801561056057600080fd5b5061057461056f3660046148c7565b6113c2565b6040519081526020015b60405180910390f35b34801561059357600080fd5b506105526105a23660046148fc565b611400565b3480156105b357600080fd5b506105db7f000000000000000000000000000000000000000000000000000000000000000081565b60405161ffff909116815260200161057e565b3480156105fa57600080fd5b5061060e610609366004614943565b61148e565b60405161057e91906149ca565b34801561062757600080fd5b506105526106363660046149ff565b61154b565b34801561064757600080fd5b5061065b610656366004614943565b611688565b6040516bffffffffffffffffffffffff909116815260200161057e565b34801561068457600080fd5b50610698610693366004614a64565b61171d565b6040805192835260208301919091520161057e565b3480156106b957600080fd5b5061060e6106c8366004614943565b6117a0565b3480156106d957600080fd5b506105526106e8366004614a90565b6117f8565b3480156106f957600080fd5b50610552610708366004614af2565b6119c2565b34801561071957600080fd5b50610574610728366004614b6c565b611c8b565b34801561073957600080fd5b5061074d610748366004614943565b611cf6565b60405161057e9190614b98565b34801561076657600080fd5b50610552610775366004614cd9565b611dfb565b34801561078657600080fd5b506011546107a79073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161057e565b3480156107d857600080fd5b506105746107e7366004614943565b60036020526000908152604090205481565b34801561080557600080fd5b50610574610814366004614943565b611edc565b34801561082557600080fd5b50610839610834366004614d1b565b611f45565b60405161057e929190614d7e565b34801561085357600080fd5b50610552610862366004614ddb565b61244b565b34801561087357600080fd5b50610887610882366004614a64565b61269a565b60405161057e9190614e11565b3480156108a057600080fd5b506105747f000000000000000000000000000000000000000000000000000000000000000081565b3480156108d457600080fd5b506105746108e3366004614943565b600a6020526000908152604090205481565b34801561090157600080fd5b50610839610910366004614e79565b612709565b34801561092157600080fd5b50610935610930366004614943565b61275d565b60405160ff909116815260200161057e565b34801561095357600080fd5b50610552610962366004614f36565b6127f1565b34801561097357600080fd5b506012546107a79073ffffffffffffffffffffffffffffffffffffffff1681565b3480156109a057600080fd5b50610552612895565b3480156109b557600080fd5b506105526109c4366004614f8d565b6129d0565b3480156109d557600080fd5b506105746109e4366004614943565b60076020526000908152604090205481565b348015610a0257600080fd5b5060155461065b906bffffffffffffffffffffffff1681565b348015610a2757600080fd5b50610a4f7f000000000000000000000000000000000000000000000000000000000000000081565b604051901515815260200161057e565b348015610a6b57600080fd5b50610574610a7a366004614943565b60086020526000908152604090205481565b348015610a9857600080fd5b50610a4f610aa7366004614943565b600b6020526000908152604090205460ff1681565b348015610ac857600080fd5b50610574610ad7366004614943565b6000908152600c602052604090205490565b348015610af557600080fd5b50610574610b04366004614943565b60046020526000908152604090205481565b348015610b2257600080fd5b50610a4f610b31366004614943565b612a9d565b348015610b4257600080fd5b50610552612aef565b348015610b5757600080fd5b506107a7610b66366004614943565b612bec565b348015610b7757600080fd5b506013546107a79073ffffffffffffffffffffffffffffffffffffffff1681565b348015610ba457600080fd5b50610552610bb3366004614faf565b612c80565b348015610bc457600080fd5b50610552610bd3366004614faf565b612d11565b348015610be457600080fd5b50610552610bf3366004614ffb565b612d6b565b348015610c0457600080fd5b50610552610c13366004615048565b612d89565b348015610c2457600080fd5b50610887610c33366004614f8d565b612d9c565b348015610c4457600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff166107a7565b348015610c6f57600080fd5b50610574610c7e366004614943565b60056020526000908152604090205481565b348015610c9c57600080fd5b50610552610cab366004614f8d565b612e59565b348015610cbc57600080fd5b50610552610ccb3660046150f9565b61309e565b348015610cdc57600080fd5b50610552610ceb366004615129565b6131b6565b348015610cfc57600080fd5b50601554610935906c01000000000000000000000000900460ff1681565b348015610d2657600080fd5b50610552610d35366004614f8d565b60009182526009602052604090912055565b348015610d5357600080fd5b506105db610d62366004614943565b600e6020526000908152604090205461ffff1681565b348015610d8457600080fd5b50610887610d93366004614943565b6133c0565b348015610da457600080fd5b5061060e610db3366004614943565b613422565b348015610dc457600080fd5b506105747f000000000000000000000000000000000000000000000000000000000000000081565b348015610df857600080fd5b50610552610e07366004614b6c565b6134ce565b348015610e1857600080fd5b50610552610e27366004615146565b613537565b348015610e3857600080fd5b50610552610e47366004614943565b6135e2565b348015610e5857600080fd5b5061060e613668565b348015610e6d57600080fd5b5061065b610e7c366004614943565b613675565b348015610e8d57600080fd5b50610552610e9c366004614cd9565b6136cd565b348015610ead57600080fd5b50610887610ebc366004614f8d565b613767565b348015610ecd57600080fd5b50601954610a4f9060ff1681565b348015610ee757600080fd5b5061060e613864565b348015610efc57600080fd5b50610552610f0b36600461516b565b613871565b348015610f1c57600080fd5b50610698610f2b366004614f8d565b6138f0565b348015610f3c57600080fd5b50610552610f4b366004615190565b613959565b348015610f5c57600080fd5b50610552610f6b366004614cd9565b613cc0565b348015610f7c57600080fd5b50610574610f8b366004614f8d565b613d8b565b348015610f9c57600080fd5b50610552610fab366004615129565b6019805460ff909216610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff909216919091179055565b348015610ff057600080fd5b50610574610fff366004614943565b60096020526000908152604090205481565b34801561101d57600080fd5b5061057460145481565b34801561103357600080fd5b5060195461093590610100900460ff1681565b34801561105257600080fd5b50610552611061366004614a90565b613dbc565b34801561107257600080fd5b5061060e6110813660046151f8565b6040805160c0808201835273ffffffffffffffffffffffffffffffffffffffff9890981680825260ff97881660208084019182528385019889526060808501988952608080860198895260a095860197885286519283019490945291519099168985015296519688019690965293519486019490945290519184019190915251828401528051808303909301835260e0909101905290565b34801561112557600080fd5b50610574611134366004614943565b60066020526000908152604090205481565b34801561115257600080fd5b50610574611161366004614943565b60026020526000908152604090205481565b6040805161018081018252600461014082019081527f746573740000000000000000000000000000000000000000000000000000000061016083015281528151602081810184526000808352818401929092523083850181905263ffffffff8b166060850152608084015260ff808a1660a08501528451808301865283815260c085015260e0840189905284519182019094529081526101008201526bffffffffffffffffffffffff8516610120820152601254601154919273ffffffffffffffffffffffffffffffffffffffff9182169263095ea7b3921690611259908c1688615280565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526bffffffffffffffffffffffff1660248201526044016020604051808303816000875af11580156112d7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112fb91906152c4565b5060008860ff1667ffffffffffffffff81111561131a5761131a6146b6565b604051908082528060200260200182016040528015611343578160200160208202803683370190505b50905060005b8960ff168160ff1610156113b657600061136284613dd0565b905080838360ff168151811061137a5761137a6152df565b602090810291909101810191909152600091825260088152604080832088905560079091529020849055806113ae8161530e565b915050611349565b50505050505050505050565b600d60205282600052604060002060205281600052604060002081815481106113ea57600080fd5b9060005260206000200160009250925050505481565b6013546040517f0b7d33e600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690630b7d33e690611458908590859060040161532d565b600060405180830381600087803b15801561147257600080fd5b505af1158015611486573d6000803e3d6000fd5b505050505050565b6013546040517f19d97a940000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff16906319d97a94906024015b600060405180830381865afa1580156114ff573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526115459190810190615393565b92915050565b6013546040517ffa333dfb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff888116600483015260ff8816602483015260448201879052606482018690526084820185905260a4820184905290911690634ee88d35908990309063fa333dfb9060c401600060405180830381865afa1580156115ea573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526116309190810190615393565b6040518363ffffffff1660e01b815260040161164d92919061532d565b600060405180830381600087803b15801561166757600080fd5b505af115801561167b573d6000803e3d6000fd5b5050505050505050505050565b6013546040517f1e0104390000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690631e010439906024015b602060405180830381865afa1580156116f9573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061154591906153d3565b6000828152600d6020908152604080832061ffff85168452825280832080548251818502810185019093528083528493849392919083018282801561178157602002820191906000526020600020905b81548152602001906001019080831161176d575b50505050509050611793818251613e9e565b92509250505b9250929050565b6013546040517f207b65160000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff169063207b6516906024016114e2565b601180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8316908117909155604080517fc3f909d400000000000000000000000000000000000000000000000000000000815281516000939263c3f909d492600480820193918290030181865afa15801561188e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118b291906153fb565b50601380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691909117909155601154604080517f1b6b6d230000000000000000000000000000000000000000000000000000000081529051939450911691631b6b6d23916004808201926020929091908290030181865afa158015611955573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906119799190615429565b601280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff929092169190911790555050565b8560005b81811015611c805760008989838181106119e2576119e26152df565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc8283604051602001611a1b91815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b8152600401611a4792919061532d565b600060405180830381600087803b158015611a6157600080fd5b505af1158015611a75573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa158015611aeb573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b0f9190615446565b90508060ff16600103611c6b576040517ffa333dfb000000000000000000000000000000000000000000000000000000008152306004820181905260ff8b166024830152604482018a9052606482018890526084820188905260a4820187905260009163fa333dfb9060c401600060405180830381865afa158015611b98573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611bde9190810190615393565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d3590611c37908690859060040161532d565b600060405180830381600087803b158015611c5157600080fd5b505af1158015611c65573d6000803e3d6000fd5b50505050505b50508080611c7890615463565b9150506119c6565b505050505050505050565b6000838152600c602090815260408083208054825181850281018501909352808352611cec93830182828015611ce057602002820191906000526020600020905b815481526020019060010190808311611ccc575b50505050508484613f23565b90505b9392505050565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905273ffffffffffffffffffffffffffffffffffffffff9091169063c7c3a19a90602401600060405180830381865afa158015611db5573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261154591908101906154be565b8060005b818160ff161015611ed65760135473ffffffffffffffffffffffffffffffffffffffff1663c8048022858560ff8516818110611e3d57611e3d6152df565b905060200201356040518263ffffffff1660e01b8152600401611e6291815260200190565b600060405180830381600087803b158015611e7c57600080fd5b505af1158015611e90573d6000803e3d6000fd5b50505050611ec384848360ff16818110611eac57611eac6152df565b90506020020135600f61408290919063ffffffff16565b5080611ece8161530e565b915050611dff565b50505050565b6000818152600e602052604081205461ffff1681805b8261ffff168161ffff1611611f3d576000858152600d6020908152604080832061ffff85168452909152902054611f2990836155dd565b915080611f35816155f0565b915050611ef2565b509392505050565b6000606060005a90506000611f5861408e565b9050600085806020019051810190611f709190615611565b6019549091507f000000000000000000000000000000000000000000000000000000000000000090610100900460ff1615611fc857507f00000000000000000000000000000000000000000000000000000000000000005b80611fd660c08a018a61562a565b6000818110611fe757611fe76152df565b90506020020135036123e957600061200260c08a018a61562a565b6001818110612013576120136152df565b9050602002013560405160200161202c91815260200190565b60405160208183030381529060405290506000818060200190518101906120539190615611565b90508381146120c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f75706b6565702069647320646f6e2774206d617463680000000000000000000060448201526064015b60405180910390fd5b60006120d260c08c018c61562a565b60028181106120e3576120e36152df565b905060200201356040516020016120fc91815260200190565b60405160208183030381529060405290506000818060200190518101906121239190615611565b9050600061213460c08e018e61562a565b6003818110612145576121456152df565b9050602002013560405160200161215e91815260200190565b60405160208183030381529060405290506000818060200190518101906121859190615429565b6000868152600860205260409020549091505b805a6121a4908d615692565b6121b090613a986155dd565b10156121f15783406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055612198565b6040517f6665656449644865780000000000000000000000000000000000000000000000602082015260009060290160405160208183030381529060405280519060200120601760405160200161224891906156f8565b604051602081830303815290604052805190602001200361226a57508361226d565b50425b60195460ff161561231557604080516020810189905290810186905273ffffffffffffffffffffffffffffffffffffffff841660608201526017906016906018908490608001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527ff055e4a20000000000000000000000000000000000000000000000000000000082526120ba9594939291600401615827565b60165460009067ffffffffffffffff811115612333576123336146b6565b60405190808252806020026020018201604052801561236657816020015b60608152602001906001900390816123515790505b5060408051602081018b905290810188905273ffffffffffffffffffffffffffffffffffffffff861660608201529091506000906080016040516020818303038152906040529050600182826040516020016123c39291906158ea565b6040516020818303038152906040529f509f505050505050505050505050505050611799565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f756e6578706563746564206576656e742073696700000000000000000000000060448201526064016120ba565b60005a905060008061245f84860186614e79565b9150915060008060008380602001905181019061247c919061597e565b60008381526005602090815260408083205460049092528220549497509295509093509091906124aa61408e565b9050826000036124ca57600086815260056020526040902081905561260e565b60006124d68683615692565b6000888152600e6020908152604080832054600d835281842061ffff90911680855290835281842080548351818602810186019094528084529596509094919290919083018282801561254857602002820191906000526020600020905b815481526020019060010190808311612534575b505050505090507f000000000000000000000000000000000000000000000000000000000000000061ffff168151036125c35781612585816155f0565b60008b8152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001661ffff83161790559250505b506000888152600d6020908152604080832061ffff9094168352928152828220805460018181018355918452828420018590558a8352600c8252928220805493840181558252902001555b6000868152600660205260408120546126289060016155dd565b60008881526006602090815260408083208490556004909152902083905590506126528783612e59565b6040513090839089907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a461268c878b846134ce565b505050505050505050505050565b6000828152600d6020908152604080832061ffff851684528252918290208054835181840281018401909452808452606093928301828280156126fc57602002820191906000526020600020905b8154815260200190600101908083116126e8575b5050505050905092915050565b60006060600084846040516020016127229291906158ea565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00181529190526001969095509350505050565b6013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690635147cd5990602401602060405180830381865afa1580156127cd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906115459190615446565b8160005b8181101561288e5730635f17e616868684818110612815576128156152df565b90506020020135856040518363ffffffff1660e01b815260040161284992919091825263ffffffff16602082015260400190565b600060405180830381600087803b15801561286357600080fd5b505af1158015612877573d6000803e3d6000fd5b50505050808061288690615463565b9150506127f5565b5050505050565b61289d614130565b6012546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009173ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa15801561290c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906129309190615611565b6012546040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526024810183905291925073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af11580156129a8573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906129cc91906152c4565b5050565b60008281526003602090815260408083208490556005825280832083905560068252808320839055600c9091528120612a08916145b5565b6000828152600e602052604081205461ffff16905b8161ffff168161ffff1611612a64576000848152600d6020908152604080832061ffff851684529091528120612a52916145b5565b80612a5c816155f0565b915050612a1d565b5050506000908152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000169055565b6000818152600560205260408120548103612aba57506001919050565b600082815260036020908152604080832054600490925290912054612add61408e565b612ae79190615692565b101592915050565b60015473ffffffffffffffffffffffffffffffffffffffff163314612b70576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016120ba565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6013546040517f79ea99430000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff16906379ea994390602401602060405180830381865afa158015612c5c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906115459190615429565b6013546040517fcd7f71b500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063cd7f71b590612cda908690869086906004016159ac565b600060405180830381600087803b158015612cf457600080fd5b505af1158015612d08573d6000803e3d6000fd5b50505050505050565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690634ee88d3590612cda908690869086906004016159ac565b6017612d778382615a46565b506018612d848282615a46565b505050565b80516129cc9060169060208401906145d3565b6013546040517f06e3b632000000000000000000000000000000000000000000000000000000008152600481018490526024810183905260609173ffffffffffffffffffffffffffffffffffffffff16906306e3b63290604401600060405180830381865afa158015612e13573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611cef9190810190615b60565b601454600083815260026020526040902054612e759083615692565b11156129cc576013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905260009173ffffffffffffffffffffffffffffffffffffffff169063c7c3a19a90602401600060405180830381865afa158015612eeb573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052612f3191908101906154be565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810186905291925060009173ffffffffffffffffffffffffffffffffffffffff9091169063b657bc9c90602401602060405180830381865afa158015612fa6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612fca91906153d3565b601554909150612fee9082906c01000000000000000000000000900460ff16615280565b6bffffffffffffffffffffffff1682606001516bffffffffffffffffffffffff161015611ed6576015546130319085906bffffffffffffffffffffffff1661309e565b60008481526002602090815260409182902085905560155482518781526bffffffffffffffffffffffff909116918101919091529081018490527f49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c09060600160405180910390a150505050565b6012546013546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff91821660048201526bffffffffffffffffffffffff8416602482015291169063095ea7b3906044016020604051808303816000875af1158015613126573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061314a91906152c4565b506013546040517f948108f7000000000000000000000000000000000000000000000000000000008152600481018490526bffffffffffffffffffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063948108f790604401611458565b6040517fc04198220000000000000000000000000000000000000000000000000000000081526000600482018190526024820181905290309063c041982290604401600060405180830381865afa158015613215573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261325b9190810190615b60565b8051909150600061326a61408e565b905060005b8281101561288e57600084828151811061328b5761328b6152df565b60209081029190910101516013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905291925060009173ffffffffffffffffffffffffffffffffffffffff90911690635147cd5990602401602060405180830381865afa15801561330b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061332f9190615446565b90508060ff166001036133ab578660ff1660000361337b576040513090859084907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a46133ab565b6040513090859084907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a45b505080806133b890615463565b91505061326f565b6000818152600c602090815260409182902080548351818402810184019094528084526060939283018282801561341657602002820191906000526020600020905b815481526020019060010190808311613402575b50505050509050919050565b6016818154811061343257600080fd5b90600052602060002001600091509050805461344d906156a5565b80601f0160208091040260200160405190810160405280929190818152602001828054613479906156a5565b80156134c65780601f1061349b576101008083540402835291602001916134c6565b820191906000526020600020905b8154815290600101906020018083116134a957829003601f168201915b505050505081565b6000838152600760205260409020545b805a6134ea9085615692565b6134f6906127106155dd565b1015611ed65781406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556134de565b6013546040517fa72aa27e0000000000000000000000000000000000000000000000000000000081526004810184905263ffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063a72aa27e90604401600060405180830381600087803b1580156135af57600080fd5b505af11580156135c3573d6000803e3d6000fd5b505050600092835250600a602052604090912063ffffffff9091169055565b6013546040517f744bfe610000000000000000000000000000000000000000000000000000000081526004810183905230602482015273ffffffffffffffffffffffffffffffffffffffff9091169063744bfe6190604401600060405180830381600087803b15801561365457600080fd5b505af115801561288e573d6000803e3d6000fd5b6017805461344d906156a5565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff169063b657bc9c906024016116dc565b8060005b818163ffffffff161015611ed6573063af953a4a858563ffffffff85168181106136fd576136fd6152df565b905060200201356040518263ffffffff1660e01b815260040161372291815260200190565b600060405180830381600087803b15801561373c57600080fd5b505af1158015613750573d6000803e3d6000fd5b50505050808061375f90615bf1565b9150506136d1565b60606000613775600f6141b3565b90508084106137b0576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826000036137c5576137c28482615692565b92505b60008367ffffffffffffffff8111156137e0576137e06146b6565b604051908082528060200260200182016040528015613809578160200160208202803683370190505b50905060005b8481101561385b5761382c61382482886155dd565b600f906141bd565b82828151811061383e5761383e6152df565b60209081029190910101528061385381615463565b91505061380f565b50949350505050565b6018805461344d906156a5565b600061387b61408e565b90508160ff166000036138bc576040513090829085907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a4505050565b6040513090829085907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a4505050565b6000828152600c6020908152604080832080548251818502810185019093528083528493849392919083018282801561394857602002820191906000526020600020905b815481526020019060010190808311613934575b505050505090506117938185613e9e565b8260005b81811015611486576000868683818110613979576139796152df565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc82836040516020016139b291815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b81526004016139de92919061532d565b600060405180830381600087803b1580156139f857600080fd5b505af1158015613a0c573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa158015613a82573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613aa69190615446565b90508060ff16600103613cab577f000000000000000000000000000000000000000000000000000000000000000060ff871615613b0057507f00000000000000000000000000000000000000000000000000000000000000005b60003073ffffffffffffffffffffffffffffffffffffffff1663fa333dfb30898588604051602001613b3491815260200190565b604051602081830303815290604052613b4c90615c0a565b60405160e086901b7fffffffff0000000000000000000000000000000000000000000000000000000016815273ffffffffffffffffffffffffffffffffffffffff909416600485015260ff90921660248401526044830152606482015260006084820181905260a482015260c401600060405180830381865afa158015613bd7573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052613c1d9190810190615393565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d3590613c76908790859060040161532d565b600060405180830381600087803b158015613c9057600080fd5b505af1158015613ca4573d6000803e3d6000fd5b5050505050505b50508080613cb890615463565b91505061395d565b8060005b81811015611ed6576000848483818110613ce057613ce06152df565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc8283604051602001613d1991815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b8152600401613d4592919061532d565b600060405180830381600087803b158015613d5f57600080fd5b505af1158015613d73573d6000803e3d6000fd5b50505050508080613d8390615463565b915050613cc4565b600c6020528160005260406000208181548110613da757600080fd5b90600052602060002001600091509150505481565b613dc4614130565b613dcd816141c9565b50565b6011546040517f3f678e11000000000000000000000000000000000000000000000000000000008152600091829173ffffffffffffffffffffffffffffffffffffffff90911690633f678e1190613e2b908690600401615c4c565b6020604051808303816000875af1158015613e4a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613e6e9190615611565b9050613e7b600f826142be565b506060909201516000838152600a6020526040902063ffffffff90911690555090565b815160009081908190841580613eb45750808510155b15613ebd578094505b60008092505b85831015613f1957866001613ed88585615692565b613ee29190615692565b81518110613ef257613ef26152df565b602002602001015181613f0591906155dd565b905082613f1181615463565b935050613ec3565b9694955050505050565b82516000908190831580613f375750808410155b15613f40578093505b60008467ffffffffffffffff811115613f5b57613f5b6146b6565b604051908082528060200260200182016040528015613f84578160200160208202803683370190505b509050600092505b84831015613ff257866001613fa18585615692565b613fab9190615692565b81518110613fbb57613fbb6152df565b6020026020010151818481518110613fd557613fd56152df565b602090810291909101015282613fea81615463565b935050613f8c565b61400b816000600184516140069190615692565b6142ca565b856064036140445780600182516140229190615692565b81518110614032576140326152df565b60200260200101519350505050611cef565b8060648251886140549190615d9e565b61405e9190615e0a565b8151811061406e5761406e6152df565b602002602001015193505050509392505050565b6000611cef8383614442565b60007f00000000000000000000000000000000000000000000000000000000000000001561412b57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015614102573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906141269190615611565b905090565b504390565b60005473ffffffffffffffffffffffffffffffffffffffff1633146141b1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016120ba565b565b6000611545825490565b6000611cef838361453c565b3373ffffffffffffffffffffffffffffffffffffffff821603614248576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016120ba565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000611cef8383614566565b81818082036142da575050505050565b60008560026142e98787615e1e565b6142f39190615e3e565b6142fd9087615ea6565b8151811061430d5761430d6152df565b602002602001015190505b81831361441c575b80868481518110614333576143336152df565b60200260200101511015614353578261434b81615ece565b935050614320565b858281518110614365576143656152df565b6020026020010151811015614386578161437e81615eff565b925050614353565b8183136144175785828151811061439f5761439f6152df565b60200260200101518684815181106143b9576143b96152df565b60200260200101518785815181106143d3576143d36152df565b602002602001018885815181106143ec576143ec6152df565b6020908102919091010191909152528261440581615ece565b935050818061441390615eff565b9250505b614318565b8185121561442f5761442f8686846142ca565b83831215611486576114868684866142ca565b6000818152600183016020526040812054801561452b576000614466600183615692565b855490915060009061447a90600190615692565b90508181146144df57600086600001828154811061449a5761449a6152df565b90600052602060002001549050808760000184815481106144bd576144bd6152df565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806144f0576144f0615f56565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050611545565b6000915050611545565b5092915050565b6000826000018281548110614553576145536152df565b9060005260206000200154905092915050565b60008181526001830160205260408120546145ad57508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155611545565b506000611545565b5080546000825590600052602060002090810190613dcd9190614629565b828054828255906000526020600020908101928215614619579160200282015b8281111561461957825182906146099082615a46565b50916020019190600101906145f3565b5061462592915061463e565b5090565b5b80821115614625576000815560010161462a565b80821115614625576000614652828261465b565b5060010161463e565b508054614667906156a5565b6000825580601f10614677575050565b601f016020900490600052602060002090810190613dcd9190614629565b60ff81168114613dcd57600080fd5b63ffffffff81168114613dcd57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610140810167ffffffffffffffff81118282101715614709576147096146b6565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715614756576147566146b6565b604052919050565b600067ffffffffffffffff821115614778576147786146b6565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f8301126147b557600080fd5b81356147c86147c38261475e565b61470f565b8181528460208386010111156147dd57600080fd5b816020850160208301376000918101602001919091529392505050565b6bffffffffffffffffffffffff81168114613dcd57600080fd5b600080600080600080600060e0888a03121561482f57600080fd5b873561483a81614695565b9650602088013561484a816146a4565b9550604088013561485a81614695565b9450606088013567ffffffffffffffff81111561487657600080fd5b6148828a828b016147a4565b9450506080880135614893816147fa565b9699959850939692959460a0840135945060c09093013592915050565b803561ffff811681146148c257600080fd5b919050565b6000806000606084860312156148dc57600080fd5b833592506148ec602085016148b0565b9150604084013590509250925092565b6000806040838503121561490f57600080fd5b82359150602083013567ffffffffffffffff81111561492d57600080fd5b614939858286016147a4565b9150509250929050565b60006020828403121561495557600080fd5b5035919050565b60005b8381101561497757818101518382015260200161495f565b50506000910152565b6000815180845261499881602086016020860161495c565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611cef6020830184614980565b73ffffffffffffffffffffffffffffffffffffffff81168114613dcd57600080fd5b600080600080600080600060e0888a031215614a1a57600080fd5b873596506020880135614a2c816149dd565b95506040880135614a3c81614695565b969995985095966060810135965060808101359560a0820135955060c0909101359350915050565b60008060408385031215614a7757600080fd5b82359150614a87602084016148b0565b90509250929050565b600060208284031215614aa257600080fd5b8135611cef816149dd565b60008083601f840112614abf57600080fd5b50813567ffffffffffffffff811115614ad757600080fd5b6020830191508360208260051b850101111561179957600080fd5b600080600080600080600060c0888a031215614b0d57600080fd5b873567ffffffffffffffff811115614b2457600080fd5b614b308a828b01614aad565b9098509650506020880135614b4481614695565b96999598509596604081013596506060810135956080820135955060a0909101359350915050565b600080600060608486031215614b8157600080fd5b505081359360208301359350604090920135919050565b60208152614bbf60208201835173ffffffffffffffffffffffffffffffffffffffff169052565b60006020830151614bd8604084018263ffffffff169052565b506040830151610140806060850152614bf5610160850183614980565b91506060850151614c1660808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e0850151610100614c82818701836bffffffffffffffffffffffff169052565b8601519050610120614c978682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001838701529050614ccf8382614980565b9695505050505050565b60008060208385031215614cec57600080fd5b823567ffffffffffffffff811115614d0357600080fd5b614d0f85828601614aad565b90969095509350505050565b60008060408385031215614d2e57600080fd5b823567ffffffffffffffff80821115614d4657600080fd5b908401906101008287031215614d5b57600080fd5b90925060208401359080821115614d7157600080fd5b50614939858286016147a4565b8215158152604060208201526000611cec6040830184614980565b60008083601f840112614dab57600080fd5b50813567ffffffffffffffff811115614dc357600080fd5b60208301915083602082850101111561179957600080fd5b60008060208385031215614dee57600080fd5b823567ffffffffffffffff811115614e0557600080fd5b614d0f85828601614d99565b6020808252825182820181905260009190848201906040850190845b81811015614e4957835183529284019291840191600101614e2d565b50909695505050505050565b600067ffffffffffffffff821115614e6f57614e6f6146b6565b5060051b60200190565b60008060408385031215614e8c57600080fd5b823567ffffffffffffffff80821115614ea457600080fd5b818501915085601f830112614eb857600080fd5b81356020614ec86147c383614e55565b82815260059290921b84018101918181019089841115614ee757600080fd5b8286015b84811015614f1f57803586811115614f035760008081fd5b614f118c86838b01016147a4565b845250918301918301614eeb565b5096505086013592505080821115614d7157600080fd5b600080600060408486031215614f4b57600080fd5b833567ffffffffffffffff811115614f6257600080fd5b614f6e86828701614aad565b9094509250506020840135614f82816146a4565b809150509250925092565b60008060408385031215614fa057600080fd5b50508035926020909101359150565b600080600060408486031215614fc457600080fd5b83359250602084013567ffffffffffffffff811115614fe257600080fd5b614fee86828701614d99565b9497909650939450505050565b6000806040838503121561500e57600080fd5b823567ffffffffffffffff8082111561502657600080fd5b615032868387016147a4565b93506020850135915080821115614d7157600080fd5b6000602080838503121561505b57600080fd5b823567ffffffffffffffff8082111561507357600080fd5b818501915085601f83011261508757600080fd5b81356150956147c382614e55565b81815260059190911b830184019084810190888311156150b457600080fd5b8585015b838110156150ec578035858111156150d05760008081fd5b6150de8b89838a01016147a4565b8452509186019186016150b8565b5098975050505050505050565b6000806040838503121561510c57600080fd5b82359150602083013561511e816147fa565b809150509250929050565b60006020828403121561513b57600080fd5b8135611cef81614695565b6000806040838503121561515957600080fd5b82359150602083013561511e816146a4565b6000806040838503121561517e57600080fd5b82359150602083013561511e81614695565b600080600080606085870312156151a657600080fd5b843567ffffffffffffffff8111156151bd57600080fd5b6151c987828801614aad565b90955093505060208501356151dd81614695565b915060408501356151ed81614695565b939692955090935050565b60008060008060008060c0878903121561521157600080fd5b863561521c816149dd565b9550602087013561522c81614695565b95989597505050506040840135936060810135936080820135935060a0909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006bffffffffffffffffffffffff808316818516818304811182151516156152ab576152ab615251565b02949350505050565b805180151581146148c257600080fd5b6000602082840312156152d657600080fd5b611cef826152b4565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060ff821660ff810361532457615324615251565b60010192915050565b828152604060208201526000611cec6040830184614980565b600082601f83011261535757600080fd5b81516153656147c38261475e565b81815284602083860101111561537a57600080fd5b61538b82602083016020870161495c565b949350505050565b6000602082840312156153a557600080fd5b815167ffffffffffffffff8111156153bc57600080fd5b61538b84828501615346565b80516148c2816147fa565b6000602082840312156153e557600080fd5b8151611cef816147fa565b80516148c2816149dd565b6000806040838503121561540e57600080fd5b8251615419816149dd565b6020939093015192949293505050565b60006020828403121561543b57600080fd5b8151611cef816149dd565b60006020828403121561545857600080fd5b8151611cef81614695565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361549457615494615251565b5060010190565b80516148c2816146a4565b805167ffffffffffffffff811681146148c257600080fd5b6000602082840312156154d057600080fd5b815167ffffffffffffffff808211156154e857600080fd5b9083019061014082860312156154fd57600080fd5b6155056146e5565b61550e836153f0565b815261551c6020840161549b565b602082015260408301518281111561553357600080fd5b61553f87828601615346565b604083015250615551606084016153c8565b6060820152615562608084016153f0565b608082015261557360a084016154a6565b60a082015261558460c0840161549b565b60c082015261559560e084016153c8565b60e08201526101006155a88185016152b4565b9082015261012083810151838111156155c057600080fd5b6155cc88828701615346565b918301919091525095945050505050565b8082018082111561154557611545615251565b600061ffff80831681810361560757615607615251565b6001019392505050565b60006020828403121561562357600080fd5b5051919050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261565f57600080fd5b83018035915067ffffffffffffffff82111561567a57600080fd5b6020019150600581901b360382131561179957600080fd5b8181038181111561154557611545615251565b600181811c908216806156b957607f821691505b6020821081036156f2577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b6000808354615706816156a5565b6001828116801561571e576001811461575157615780565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0084168752821515830287019450615780565b8760005260208060002060005b858110156157775781548a82015290840190820161575e565b50505082870194505b50929695505050505050565b60008154615799816156a5565b8085526020600183811680156157b657600181146157ee5761581c565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b890101955061581c565b866000528260002060005b858110156158145781548a82018601529083019084016157f9565b890184019650505b505050505092915050565b60a08152600061583a60a083018861578c565b6020838203818501528188548084528284019150828160051b8501018a6000528360002060005b838110156158ac577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe087840301855261589a838361578c565b94860194925060019182019101615861565b505086810360408801526158c0818b61578c565b94505050505084606084015282810360808401526158de8185614980565b98975050505050505050565b6000604082016040835280855180835260608501915060608160051b8601019250602080880160005b8381101561595f577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa088870301855261594d868351614980565b95509382019390820190600101615913565b5050858403818701525050506159758185614980565b95945050505050565b60008060006060848603121561599357600080fd5b83519250602084015191506040840151614f82816149dd565b83815260406020820152816040820152818360608301376000818301606090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016010192915050565b601f821115612d8457600081815260208120601f850160051c81016020861015615a275750805b601f850160051c820191505b8181101561148657828155600101615a33565b815167ffffffffffffffff811115615a6057615a606146b6565b615a7481615a6e84546156a5565b84615a00565b602080601f831160018114615ac75760008415615a915750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555611486565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015615b1457888601518255948401946001909101908401615af5565b5085821015615b5057878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b60006020808385031215615b7357600080fd5b825167ffffffffffffffff811115615b8a57600080fd5b8301601f81018513615b9b57600080fd5b8051615ba96147c382614e55565b81815260059190911b82018301908381019087831115615bc857600080fd5b928401925b82841015615be657835182529284019290840190615bcd565b979650505050505050565b600063ffffffff80831681810361560757615607615251565b805160208083015191908110156156f2577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60209190910360031b1b16919050565b6020815260008251610140806020850152615c6b610160850183614980565b915060208501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe080868503016040870152615ca78483614980565b935060408701519150615cd2606087018373ffffffffffffffffffffffffffffffffffffffff169052565b606087015163ffffffff811660808801529150608087015173ffffffffffffffffffffffffffffffffffffffff811660a0880152915060a087015160ff811660c0880152915060c08701519150808685030160e0870152615d338483614980565b935060e08701519150610100818786030181880152615d528584614980565b945080880151925050610120818786030181880152615d718584614980565b94508088015192505050615d94828601826bffffffffffffffffffffffff169052565b5090949350505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615615dd657615dd6615251565b500290565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082615e1957615e19615ddb565b500490565b818103600083128015838313168383128216171561453557614535615251565b600082615e4d57615e4d615ddb565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83147f800000000000000000000000000000000000000000000000000000000000000083141615615ea157615ea1615251565b500590565b8082018281126000831280158216821582161715615ec657615ec6615251565b505092915050565b60007f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361549457615494615251565b60007f80000000000000000000000000000000000000000000000000000000000000008203615f3057615f30615251565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000810000a307834353534343832643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030", +} + +var VerifiableLoadLogTriggerUpkeepABI = VerifiableLoadLogTriggerUpkeepMetaData.ABI + +var VerifiableLoadLogTriggerUpkeepBin = VerifiableLoadLogTriggerUpkeepMetaData.Bin + +func DeployVerifiableLoadLogTriggerUpkeep(auth *bind.TransactOpts, backend bind.ContractBackend, _registrar common.Address, _useArb bool, _useMercury bool) (common.Address, *types.Transaction, *VerifiableLoadLogTriggerUpkeep, error) { + parsed, err := VerifiableLoadLogTriggerUpkeepMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VerifiableLoadLogTriggerUpkeepBin), backend, _registrar, _useArb, _useMercury) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VerifiableLoadLogTriggerUpkeep{address: address, abi: *parsed, VerifiableLoadLogTriggerUpkeepCaller: VerifiableLoadLogTriggerUpkeepCaller{contract: contract}, VerifiableLoadLogTriggerUpkeepTransactor: VerifiableLoadLogTriggerUpkeepTransactor{contract: contract}, VerifiableLoadLogTriggerUpkeepFilterer: VerifiableLoadLogTriggerUpkeepFilterer{contract: contract}}, nil +} + +type VerifiableLoadLogTriggerUpkeep struct { + address common.Address + abi abi.ABI + VerifiableLoadLogTriggerUpkeepCaller + VerifiableLoadLogTriggerUpkeepTransactor + VerifiableLoadLogTriggerUpkeepFilterer +} + +type VerifiableLoadLogTriggerUpkeepCaller struct { + contract *bind.BoundContract +} + +type VerifiableLoadLogTriggerUpkeepTransactor struct { + contract *bind.BoundContract +} + +type VerifiableLoadLogTriggerUpkeepFilterer struct { + contract *bind.BoundContract +} + +type VerifiableLoadLogTriggerUpkeepSession struct { + Contract *VerifiableLoadLogTriggerUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VerifiableLoadLogTriggerUpkeepCallerSession struct { + Contract *VerifiableLoadLogTriggerUpkeepCaller + CallOpts bind.CallOpts +} + +type VerifiableLoadLogTriggerUpkeepTransactorSession struct { + Contract *VerifiableLoadLogTriggerUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type VerifiableLoadLogTriggerUpkeepRaw struct { + Contract *VerifiableLoadLogTriggerUpkeep +} + +type VerifiableLoadLogTriggerUpkeepCallerRaw struct { + Contract *VerifiableLoadLogTriggerUpkeepCaller +} + +type VerifiableLoadLogTriggerUpkeepTransactorRaw struct { + Contract *VerifiableLoadLogTriggerUpkeepTransactor +} + +func NewVerifiableLoadLogTriggerUpkeep(address common.Address, backend bind.ContractBackend) (*VerifiableLoadLogTriggerUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(VerifiableLoadLogTriggerUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindVerifiableLoadLogTriggerUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeep{address: address, abi: abi, VerifiableLoadLogTriggerUpkeepCaller: VerifiableLoadLogTriggerUpkeepCaller{contract: contract}, VerifiableLoadLogTriggerUpkeepTransactor: VerifiableLoadLogTriggerUpkeepTransactor{contract: contract}, VerifiableLoadLogTriggerUpkeepFilterer: VerifiableLoadLogTriggerUpkeepFilterer{contract: contract}}, nil +} + +func NewVerifiableLoadLogTriggerUpkeepCaller(address common.Address, caller bind.ContractCaller) (*VerifiableLoadLogTriggerUpkeepCaller, error) { + contract, err := bindVerifiableLoadLogTriggerUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepCaller{contract: contract}, nil +} + +func NewVerifiableLoadLogTriggerUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*VerifiableLoadLogTriggerUpkeepTransactor, error) { + contract, err := bindVerifiableLoadLogTriggerUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepTransactor{contract: contract}, nil +} + +func NewVerifiableLoadLogTriggerUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*VerifiableLoadLogTriggerUpkeepFilterer, error) { + contract, err := bindVerifiableLoadLogTriggerUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepFilterer{contract: contract}, nil +} + +func bindVerifiableLoadLogTriggerUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VerifiableLoadLogTriggerUpkeepMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadLogTriggerUpkeep.Contract.VerifiableLoadLogTriggerUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.VerifiableLoadLogTriggerUpkeepTransactor.contract.Transfer(opts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.VerifiableLoadLogTriggerUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadLogTriggerUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.contract.Transfer(opts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) BUCKETSIZE(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "BUCKET_SIZE") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "addLinkAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AddLinkAmount(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AddLinkAmount(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "bucketedDelays", arg0, arg1, arg2) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BucketedDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BucketedDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "buckets", arg0) + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Buckets(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Buckets(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "checkCallback", values, extraData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckCallback(&_VerifiableLoadLogTriggerUpkeep.CallOpts, values, extraData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckCallback(&_VerifiableLoadLogTriggerUpkeep.CallOpts, values, extraData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "checkGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "counters", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Counters(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Counters(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "delays", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Delays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Delays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.DummyMap(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.DummyMap(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "eligible", upkeepId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Eligible(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Eligible(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "emittedAgainSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) EmittedSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "emittedSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.EmittedSig(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.EmittedSig(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) FeedParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "feedParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) FeedParamKey() (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FeedParamKey(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) FeedParamKey() (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FeedParamKey(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "feedsHex", arg0) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FeedsHex(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FeedsHex(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "firstPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "gasLimits", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GasLimits(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GasLimits(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getActiveUpkeepIDsDeployedByThisContract", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadLogTriggerUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadLogTriggerUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getAllActiveUpkeepIDsOnRegistry", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadLogTriggerUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadLogTriggerUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBalance(&_VerifiableLoadLogTriggerUpkeep.CallOpts, id) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBalance(&_VerifiableLoadLogTriggerUpkeep.CallOpts, id) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getBucketedDelays", upkeepId, bucket) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getBucketedDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getDelays", upkeepId) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetDelays(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetDelaysLength(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetDelaysLength(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetForwarder(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetForwarder(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getLogTriggerConfig", addr, selector, topic0, topic1, topic2, topic3) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getMinBalanceForUpkeep", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getPxDelayLastNPerforms", upkeepId, p, n) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getSumDelayInBucket", upkeepId, bucket) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getSumDelayLastNPerforms", upkeepId, n) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetTriggerType(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetTriggerType(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getUpkeepInfo", upkeepId) + + if err != nil { + return *new(KeeperRegistryBase21UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(KeeperRegistryBase21UpkeepInfo)).(*KeeperRegistryBase21UpkeepInfo) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadLogTriggerUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "intervals", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Intervals(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Intervals(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "lastTopUpBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) LinkToken() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LinkToken(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) LinkToken() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LinkToken(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) LogNum(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "logNum") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) LogNum() (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LogNum(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) LogNum() (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.LogNum(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "minBalanceThresholdMultiplier") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Owner() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Owner(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Owner() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Owner(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "performDataSizes", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformDataSizes(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformDataSizes(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "performGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "previousPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadLogTriggerUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Registrar(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "registrar") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Registrar() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Registrar(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Registrar() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Registrar(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) Registry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "registry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Registry() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Registry(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) Registry() (common.Address, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Registry(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) TimeParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "timeParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) TimeParamKey() (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TimeParamKey(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) TimeParamKey() (string, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TimeParamKey(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "upkeepTopUpCheckInterval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "useArbitrumBlockNum") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCaller) UseMercury(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VerifiableLoadLogTriggerUpkeep.contract.Call(opts, &out, "useMercury") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UseMercury() (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UseMercury(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepCallerSession) UseMercury() (bool, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UseMercury(&_VerifiableLoadLogTriggerUpkeep.CallOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "acceptOwnership") +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AcceptOwnership(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AcceptOwnership(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "addFunds", upkeepId, amount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AddFunds(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.AddFunds(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchCancelUpkeeps", upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchPreparingUpkeeps", upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchPreparingUpkeepsSimple", upkeepIds, log, selector) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchRegisterUpkeeps", number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchSendLogs", log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchSendLogs(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchSendLogs(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchSetIntervals", upkeepIds, interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchUpdatePipelineData", upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "batchWithdrawLinks", upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "burnPerformGas", upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BurnPerformGas(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.BurnPerformGas(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) CheckLog(opts *bind.TransactOpts, log Log, checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "checkLog", log, checkData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) CheckLog(log Log, checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, log, checkData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) CheckLog(log Log, checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.CheckLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, log, checkData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformUpkeep(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.PerformUpkeep(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "sendLog", upkeepId, log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SendLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SendLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setConfig", newRegistrar) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetConfig(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetConfig(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setFeeds", _feeds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetFeeds(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetFeeds(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setInterval", upkeepId, _interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetInterval(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetInterval(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetLog(opts *bind.TransactOpts, _log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setLog", _log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetLog(_log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetLog(_log uint8) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetLog(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _log) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setParamKeys", _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetParamKeys(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetParamKeys(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setPerformDataSize", upkeepId, value) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setUpkeepGasLimit", upkeepId, gasLimit) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "topUpFund", upkeepId, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TopUpFund(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TopUpFund(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "transferOwnership", to) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TransferOwnership(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.TransferOwnership(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "updateLogTriggerConfig1", upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "updateLogTriggerConfig2", upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "updateUpkeepPipelineData", upkeepId, pipelineData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "withdrawLinks") +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.WithdrawLinks(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.WithdrawLinks(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.Transact(opts, "withdrawLinks0", upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadLogTriggerUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.contract.RawTransact(opts, nil) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Receive(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepTransactorSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadLogTriggerUpkeep.Contract.Receive(&_VerifiableLoadLogTriggerUpkeep.TransactOpts) +} + +type VerifiableLoadLogTriggerUpkeepLogEmittedIterator struct { + Event *VerifiableLoadLogTriggerUpkeepLogEmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadLogTriggerUpkeepLogEmitted struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadLogTriggerUpkeepLogEmittedIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.FilterLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepLogEmittedIterator{contract: _VerifiableLoadLogTriggerUpkeep.contract, event: "LogEmitted", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.WatchLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadLogTriggerUpkeepLogEmitted) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) ParseLogEmitted(log types.Log) (*VerifiableLoadLogTriggerUpkeepLogEmitted, error) { + event := new(VerifiableLoadLogTriggerUpkeepLogEmitted) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator struct { + Event *VerifiableLoadLogTriggerUpkeepLogEmittedAgain + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadLogTriggerUpkeepLogEmittedAgain struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.FilterLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator{contract: _VerifiableLoadLogTriggerUpkeep.contract, event: "LogEmittedAgain", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.WatchLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadLogTriggerUpkeepLogEmittedAgain) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) ParseLogEmittedAgain(log types.Log) (*VerifiableLoadLogTriggerUpkeepLogEmittedAgain, error) { + event := new(VerifiableLoadLogTriggerUpkeepLogEmittedAgain) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator struct { + Event *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator{contract: _VerifiableLoadLogTriggerUpkeep.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested, error) { + event := new(VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator struct { + Event *VerifiableLoadLogTriggerUpkeepOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadLogTriggerUpkeepOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator{contract: _VerifiableLoadLogTriggerUpkeep.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadLogTriggerUpkeepOwnershipTransferred) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) ParseOwnershipTransferred(log types.Log) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferred, error) { + event := new(VerifiableLoadLogTriggerUpkeepOwnershipTransferred) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator struct { + Event *VerifiableLoadLogTriggerUpkeepUpkeepTopUp + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadLogTriggerUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadLogTriggerUpkeepUpkeepTopUp struct { + UpkeepId *big.Int + Amount *big.Int + BlockNum *big.Int + Raw types.Log +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator, error) { + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.FilterLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return &VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator{contract: _VerifiableLoadLogTriggerUpkeep.contract, event: "UpkeepTopUp", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepUpkeepTopUp) (event.Subscription, error) { + + logs, sub, err := _VerifiableLoadLogTriggerUpkeep.contract.WatchLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadLogTriggerUpkeepUpkeepTopUp) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeepFilterer) ParseUpkeepTopUp(log types.Log) (*VerifiableLoadLogTriggerUpkeepUpkeepTopUp, error) { + event := new(VerifiableLoadLogTriggerUpkeepUpkeepTopUp) + if err := _VerifiableLoadLogTriggerUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VerifiableLoadLogTriggerUpkeep.abi.Events["LogEmitted"].ID: + return _VerifiableLoadLogTriggerUpkeep.ParseLogEmitted(log) + case _VerifiableLoadLogTriggerUpkeep.abi.Events["LogEmittedAgain"].ID: + return _VerifiableLoadLogTriggerUpkeep.ParseLogEmittedAgain(log) + case _VerifiableLoadLogTriggerUpkeep.abi.Events["OwnershipTransferRequested"].ID: + return _VerifiableLoadLogTriggerUpkeep.ParseOwnershipTransferRequested(log) + case _VerifiableLoadLogTriggerUpkeep.abi.Events["OwnershipTransferred"].ID: + return _VerifiableLoadLogTriggerUpkeep.ParseOwnershipTransferred(log) + case _VerifiableLoadLogTriggerUpkeep.abi.Events["UpkeepTopUp"].ID: + return _VerifiableLoadLogTriggerUpkeep.ParseUpkeepTopUp(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VerifiableLoadLogTriggerUpkeepLogEmitted) Topic() common.Hash { + return common.HexToHash("0x97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf08") +} + +func (VerifiableLoadLogTriggerUpkeepLogEmittedAgain) Topic() common.Hash { + return common.HexToHash("0xc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d") +} + +func (VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VerifiableLoadLogTriggerUpkeepOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VerifiableLoadLogTriggerUpkeepUpkeepTopUp) Topic() common.Hash { + return common.HexToHash("0x49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c0") +} + +func (_VerifiableLoadLogTriggerUpkeep *VerifiableLoadLogTriggerUpkeep) Address() common.Address { + return _VerifiableLoadLogTriggerUpkeep.address +} + +type VerifiableLoadLogTriggerUpkeepInterface interface { + BUCKETSIZE(opts *bind.CallOpts) (uint16, error) + + AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) + + BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) + + Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) + + CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) + + CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) + + EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) + + EmittedSig(opts *bind.CallOpts) ([32]byte, error) + + FeedParamKey(opts *bind.CallOpts) (string, error) + + FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) + + FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) + + GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) + + GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) + + GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) + + GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + LogNum(opts *bind.CallOpts) (uint8, error) + + MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Registrar(opts *bind.CallOpts) (common.Address, error) + + Registry(opts *bind.CallOpts) (common.Address, error) + + TimeParamKey(opts *bind.CallOpts) (string, error) + + UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) + + UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) + + UseMercury(opts *bind.CallOpts) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) + + BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) + + BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) + + BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) + + BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) + + BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) + + CheckLog(opts *bind.TransactOpts, log Log, checkData []byte) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) + + SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) + + SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) + + SetLog(opts *bind.TransactOpts, _log uint8) (*types.Transaction, error) + + SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) + + SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) + + WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadLogTriggerUpkeepLogEmittedIterator, error) + + WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmitted(log types.Log) (*VerifiableLoadLogTriggerUpkeepLogEmitted, error) + + FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadLogTriggerUpkeepLogEmittedAgainIterator, error) + + WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmittedAgain(log types.Log) (*VerifiableLoadLogTriggerUpkeepLogEmittedAgain, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VerifiableLoadLogTriggerUpkeepOwnershipTransferred, error) + + FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadLogTriggerUpkeepUpkeepTopUpIterator, error) + + WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadLogTriggerUpkeepUpkeepTopUp) (event.Subscription, error) + + ParseUpkeepTopUp(log types.Log) (*VerifiableLoadLogTriggerUpkeepUpkeepTopUp, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/verifiable_load_streams_lookup_upkeep_wrapper/verifiable_load_streams_lookup_upkeep_wrapper.go b/core/gethwrappers/generated/verifiable_load_streams_lookup_upkeep_wrapper/verifiable_load_streams_lookup_upkeep_wrapper.go new file mode 100644 index 00000000..fed828b3 --- /dev/null +++ b/core/gethwrappers/generated/verifiable_load_streams_lookup_upkeep_wrapper/verifiable_load_streams_lookup_upkeep_wrapper.go @@ -0,0 +1,2457 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package verifiable_load_streams_lookup_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var VerifiableLoadStreamsLookupUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"_registrar\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_useArb\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string[]\",\"name\":\"feeds\",\"type\":\"string[]\"},{\"internalType\":\"string\",\"name\":\"timeParamKey\",\"type\":\"string\"},{\"internalType\":\"uint256\",\"name\":\"time\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"StreamsLookup\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmittedAgain\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"UpkeepTopUp\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BUCKET_SIZE\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"addLinkAmount\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchCancelUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"batchPreparingUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"}],\"name\":\"batchPreparingUpkeepsSimple\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"number\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"checkGasToBurn\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"performGasToBurn\",\"type\":\"uint256\"}],\"name\":\"batchRegisterUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"batchSendLogs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint32\",\"name\":\"interval\",\"type\":\"uint32\"}],\"name\":\"batchSetIntervals\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchUpdatePipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchWithdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"bucketedDelays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"buckets\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"startGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"burnPerformGas\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"values\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"extraData\",\"type\":\"bytes\"}],\"name\":\"checkCallback\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"checkGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"counters\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"delays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedAgainSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feedParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"feedsHex\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"firstPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"gasLimits\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDsDeployedByThisContract\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getAllActiveUpkeepIDsOnRegistry\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getBucketedDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getBucketedDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"getLogTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"logTrigger\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"p\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getPxDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getSumDelayInBucket\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getSumDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepInfo\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structKeeperRegistryBase2_1.UpkeepInfo\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"intervals\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastTopUpBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minBalanceThresholdMultiplier\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performDataSizes\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"previousPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registrar\",\"outputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registry\",\"outputs\":[{\"internalType\":\"contractIKeeperRegistryMaster\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"sendLog\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"newRegistrar\",\"type\":\"address\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"_feeds\",\"type\":\"string[]\"}],\"name\":\"setFeeds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"}],\"name\":\"setInterval\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_timeParamKey\",\"type\":\"string\"}],\"name\":\"setParamKeys\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformDataSize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"topUpFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"updateLogTriggerConfig1\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"updateLogTriggerConfig2\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"pipelineData\",\"type\":\"bytes\"}],\"name\":\"updateUpkeepPipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTopUpCheckInterval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useArbitrumBlockNum\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x7f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf086080527fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d60a0526005601455601580546001600160681b0319166c140000000002c68af0bb140000179055606460e0526101c0604052604261014081815261010091829190620061d161016039815260200160405180608001604052806042815260200162006213604291399052620000be906016906002620003c7565b506040805180820190915260098152680cccacac892c890caf60bb1b6020820152601790620000ee908262000543565b5060408051808201909152600b81526a313637b1b5a73ab6b132b960a91b602082015260189062000120908262000543565b503480156200012e57600080fd5b506040516200625538038062006255833981016040819052620001519162000625565b81813380600081620001aa5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620001dd57620001dd816200031c565b5050601180546001600160a01b0319166001600160a01b038516908117909155604080516330fe427560e21b815281516000945063c3f909d4926004808401939192918290030181865afa1580156200023a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000260919062000668565b50601380546001600160a01b0319166001600160a01b038381169190911790915560115460408051631b6b6d2360e01b81529051939450911691631b6b6d23916004808201926020929091908290030181865afa158015620002c6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620002ec919062000699565b601280546001600160a01b0319166001600160a01b039290921691909117905550151560c05250620006c0915050565b336001600160a01b03821603620003765760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620001a1565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b82805482825590600052602060002090810192821562000412579160200282015b8281111562000412578251829062000401908262000543565b5091602001919060010190620003e8565b506200042092915062000424565b5090565b80821115620004205760006200043b828262000445565b5060010162000424565b5080546200045390620004b4565b6000825580601f1062000464575050565b601f01602090049060005260206000209081019062000484919062000487565b50565b5b8082111562000420576000815560010162000488565b634e487b7160e01b600052604160045260246000fd5b600181811c90821680620004c957607f821691505b602082108103620004ea57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200053e57600081815260208120601f850160051c81016020861015620005195750805b601f850160051c820191505b818110156200053a5782815560010162000525565b5050505b505050565b81516001600160401b038111156200055f576200055f6200049e565b6200057781620005708454620004b4565b84620004f0565b602080601f831160018114620005af5760008415620005965750858301515b600019600386901b1c1916600185901b1785556200053a565b600085815260208120601f198616915b82811015620005e057888601518255948401946001909101908401620005bf565b5085821015620005ff5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6001600160a01b03811681146200048457600080fd5b600080604083850312156200063957600080fd5b825162000646816200060f565b602084015190925080151581146200065d57600080fd5b809150509250929050565b600080604083850312156200067c57600080fd5b825162000689816200060f565b6020939093015192949293505050565b600060208284031215620006ac57600080fd5b8151620006b9816200060f565b9392505050565b60805160a05160c05160e051615abb62000716600039600081816105680152611f7a0152600081816109bc0152613ca701526000818161082701526136f5015260008181610d7901526136ca0152615abb6000f3fe6080604052600436106104d55760003560e01c806379ea994311610279578063a6b594751161015e578063d6051a72116100d6578063e45530831161008a578063fa333dfb1161006f578063fa333dfb14610f88578063fba7ffa31461103b578063fcdc1f631461106857600080fd5b8063e455308314610f52578063f2fde38b14610f6857600080fd5b8063daee1aeb116100bb578063daee1aeb14610ee5578063dbef701e14610f05578063e0114adb14610f2557600080fd5b8063d6051a7214610ea5578063da6cba4714610ec557600080fd5b8063b657bc9c1161012d578063c041982211610112578063c041982214610e50578063c98f10b014610e70578063d4c2490014610e8557600080fd5b8063b657bc9c14610e10578063becde0e114610e3057600080fd5b8063a6b5947514610d9b578063a72aa27e14610dbb578063af953a4a14610ddb578063afb28d1f14610dfb57600080fd5b8063924ca578116101f15780639b429354116101c05780639d385eaa116101a55780639d385eaa14610d275780639d6f1cc714610d47578063a654824814610d6757600080fd5b80639b42935414610cc95780639b51fb0d14610cf657600080fd5b8063924ca57814610c3f578063948108f714610c5f57806396cebc7c14610c7f5780639ac542eb14610c9f57600080fd5b80638340507c11610248578063873c75861161022d578063873c758614610bc75780638da5cb5b14610be75780638fcb3fba14610c1257600080fd5b80638340507c14610b8757806386e330af14610ba757600080fd5b806379ea994314610afa5780637b10399914610b1a5780637e7a46dc14610b475780638243444a14610b6757600080fd5b806345d2ec17116103ba57806360457ff5116103325780637145f11b116102e657806376721303116102cb5780637672130314610a98578063776898c814610ac557806379ba509714610ae557600080fd5b80637145f11b14610a3b57806373644cce14610a6b57600080fd5b8063642f6cef11610317578063642f6cef146109aa57806369cdbadb146109ee5780636e04ff0d14610a1b57600080fd5b806360457ff514610958578063636092e81461098557600080fd5b80635147cd591161038957806357970e931161036e57806357970e93146108f65780635d4ee7f3146109235780635f17e6161461093857600080fd5b80635147cd59146108a457806351c98be3146108d657600080fd5b806345d2ec17146107e8578063469820931461081557806346e7a63e146108495780634b56a42e1461087657600080fd5b806320e3dbd41161044d5780632a9032d31161041c578063328ffd1111610401578063328ffd111461077b5780633ebe8d6c146107a85780634585e33b146107c857600080fd5b80632a9032d3146107095780632b20e3971461072957600080fd5b806320e3dbd41461067c5780632636aecf1461069c57806328c4b57b146106bc57806329e0a841146106dc57600080fd5b806319d97a94116104a45780631e010439116104895780631e010439146105ea578063206c32e814610627578063207b65161461065c57600080fd5b806319d97a941461059d5780631cdde251146105ca57600080fd5b806306c1cc00146104e1578063077ac621146105035780630b7d33e61461053657806312c550271461055657600080fd5b366104dc57005b600080fd5b3480156104ed57600080fd5b506105016104fc366004614429565b611095565b005b34801561050f57600080fd5b5061052361051e3660046144dc565b6112e4565b6040519081526020015b60405180910390f35b34801561054257600080fd5b50610501610551366004614511565b611322565b34801561056257600080fd5b5061058a7f000000000000000000000000000000000000000000000000000000000000000081565b60405161ffff909116815260200161052d565b3480156105a957600080fd5b506105bd6105b8366004614558565b6113b0565b60405161052d91906145df565b3480156105d657600080fd5b506105016105e5366004614614565b61146d565b3480156105f657600080fd5b5061060a610605366004614558565b6115aa565b6040516bffffffffffffffffffffffff909116815260200161052d565b34801561063357600080fd5b50610647610642366004614679565b61163f565b6040805192835260208301919091520161052d565b34801561066857600080fd5b506105bd610677366004614558565b6116c2565b34801561068857600080fd5b506105016106973660046146a5565b61171a565b3480156106a857600080fd5b506105016106b7366004614707565b6118e4565b3480156106c857600080fd5b506105236106d7366004614781565b611bad565b3480156106e857600080fd5b506106fc6106f7366004614558565b611c18565b60405161052d91906147ad565b34801561071557600080fd5b506105016107243660046148ee565b611d1d565b34801561073557600080fd5b506011546107569073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161052d565b34801561078757600080fd5b50610523610796366004614558565b60036020526000908152604090205481565b3480156107b457600080fd5b506105236107c3366004614558565b611dfe565b3480156107d457600080fd5b506105016107e3366004614972565b611e67565b3480156107f457600080fd5b50610808610803366004614679565b612086565b60405161052d91906149a8565b34801561082157600080fd5b506105237f000000000000000000000000000000000000000000000000000000000000000081565b34801561085557600080fd5b50610523610864366004614558565b600a6020526000908152604090205481565b34801561088257600080fd5b50610896610891366004614a10565b6120f5565b60405161052d929190614ada565b3480156108b057600080fd5b506108c46108bf366004614558565b612149565b60405160ff909116815260200161052d565b3480156108e257600080fd5b506105016108f1366004614af5565b6121dd565b34801561090257600080fd5b506012546107569073ffffffffffffffffffffffffffffffffffffffff1681565b34801561092f57600080fd5b50610501612281565b34801561094457600080fd5b50610501610953366004614b4c565b6123bc565b34801561096457600080fd5b50610523610973366004614558565b60076020526000908152604090205481565b34801561099157600080fd5b5060155461060a906bffffffffffffffffffffffff1681565b3480156109b657600080fd5b506109de7f000000000000000000000000000000000000000000000000000000000000000081565b604051901515815260200161052d565b3480156109fa57600080fd5b50610523610a09366004614558565b60086020526000908152604090205481565b348015610a2757600080fd5b50610896610a36366004614972565b612489565b348015610a4757600080fd5b506109de610a56366004614558565b600b6020526000908152604090205460ff1681565b348015610a7757600080fd5b50610523610a86366004614558565b6000908152600c602052604090205490565b348015610aa457600080fd5b50610523610ab3366004614558565b60046020526000908152604090205481565b348015610ad157600080fd5b506109de610ae0366004614558565b6126b2565b348015610af157600080fd5b50610501612704565b348015610b0657600080fd5b50610756610b15366004614558565b612801565b348015610b2657600080fd5b506013546107569073ffffffffffffffffffffffffffffffffffffffff1681565b348015610b5357600080fd5b50610501610b62366004614b6e565b612895565b348015610b7357600080fd5b50610501610b82366004614b6e565b612926565b348015610b9357600080fd5b50610501610ba2366004614bba565b612980565b348015610bb357600080fd5b50610501610bc2366004614c07565b61299e565b348015610bd357600080fd5b50610808610be2366004614b4c565b6129b1565b348015610bf357600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610756565b348015610c1e57600080fd5b50610523610c2d366004614558565b60056020526000908152604090205481565b348015610c4b57600080fd5b50610501610c5a366004614b4c565b612a6e565b348015610c6b57600080fd5b50610501610c7a366004614cb8565b612cb3565b348015610c8b57600080fd5b50610501610c9a366004614ce8565b612dcb565b348015610cab57600080fd5b506015546108c4906c01000000000000000000000000900460ff1681565b348015610cd557600080fd5b50610501610ce4366004614b4c565b60009182526009602052604090912055565b348015610d0257600080fd5b5061058a610d11366004614558565b600e6020526000908152604090205461ffff1681565b348015610d3357600080fd5b50610808610d42366004614558565b612fd5565b348015610d5357600080fd5b506105bd610d62366004614558565b613037565b348015610d7357600080fd5b506105237f000000000000000000000000000000000000000000000000000000000000000081565b348015610da757600080fd5b50610501610db6366004614781565b6130e3565b348015610dc757600080fd5b50610501610dd6366004614d05565b61314c565b348015610de757600080fd5b50610501610df6366004614558565b6131f7565b348015610e0757600080fd5b506105bd61327d565b348015610e1c57600080fd5b5061060a610e2b366004614558565b61328a565b348015610e3c57600080fd5b50610501610e4b3660046148ee565b6132e2565b348015610e5c57600080fd5b50610808610e6b366004614b4c565b61337c565b348015610e7c57600080fd5b506105bd613479565b348015610e9157600080fd5b50610501610ea0366004614d2a565b613486565b348015610eb157600080fd5b50610647610ec0366004614b4c565b613505565b348015610ed157600080fd5b50610501610ee0366004614d4f565b61356e565b348015610ef157600080fd5b50610501610f003660046148ee565b6138d5565b348015610f1157600080fd5b50610523610f20366004614b4c565b6139a0565b348015610f3157600080fd5b50610523610f40366004614558565b60096020526000908152604090205481565b348015610f5e57600080fd5b5061052360145481565b348015610f7457600080fd5b50610501610f833660046146a5565b6139d1565b348015610f9457600080fd5b506105bd610fa3366004614db7565b6040805160c0808201835273ffffffffffffffffffffffffffffffffffffffff9890981680825260ff97881660208084019182528385019889526060808501988952608080860198895260a095860197885286519283019490945291519099168985015296519688019690965293519486019490945290519184019190915251828401528051808303909301835260e0909101905290565b34801561104757600080fd5b50610523611056366004614558565b60066020526000908152604090205481565b34801561107457600080fd5b50610523611083366004614558565b60026020526000908152604090205481565b6040805161018081018252600461014082019081527f746573740000000000000000000000000000000000000000000000000000000061016083015281528151602081810184526000808352818401929092523083850181905263ffffffff8b166060850152608084015260ff808a1660a08501528451808301865283815260c085015260e0840189905284519182019094529081526101008201526bffffffffffffffffffffffff8516610120820152601254601154919273ffffffffffffffffffffffffffffffffffffffff9182169263095ea7b392169061117b908c1688614e3f565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526bffffffffffffffffffffffff1660248201526044016020604051808303816000875af11580156111f9573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061121d9190614e83565b5060008860ff1667ffffffffffffffff81111561123c5761123c6142cb565b604051908082528060200260200182016040528015611265578160200160208202803683370190505b50905060005b8960ff168160ff1610156112d8576000611284846139e5565b905080838360ff168151811061129c5761129c614e9e565b602090810291909101810191909152600091825260088152604080832088905560079091529020849055806112d081614ecd565b91505061126b565b50505050505050505050565b600d602052826000526040600020602052816000526040600020818154811061130c57600080fd5b9060005260206000200160009250925050505481565b6013546040517f0b7d33e600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690630b7d33e69061137a9085908590600401614eec565b600060405180830381600087803b15801561139457600080fd5b505af11580156113a8573d6000803e3d6000fd5b505050505050565b6013546040517f19d97a940000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff16906319d97a94906024015b600060405180830381865afa158015611421573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526114679190810190614f52565b92915050565b6013546040517ffa333dfb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff888116600483015260ff8816602483015260448201879052606482018690526084820185905260a4820184905290911690634ee88d35908990309063fa333dfb9060c401600060405180830381865afa15801561150c573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526115529190810190614f52565b6040518363ffffffff1660e01b815260040161156f929190614eec565b600060405180830381600087803b15801561158957600080fd5b505af115801561159d573d6000803e3d6000fd5b5050505050505050505050565b6013546040517f1e0104390000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690631e010439906024015b602060405180830381865afa15801561161b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114679190614f92565b6000828152600d6020908152604080832061ffff8516845282528083208054825181850281018501909352808352849384939291908301828280156116a357602002820191906000526020600020905b81548152602001906001019080831161168f575b505050505090506116b5818251613ab3565b92509250505b9250929050565b6013546040517f207b65160000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff169063207b651690602401611404565b601180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8316908117909155604080517fc3f909d400000000000000000000000000000000000000000000000000000000815281516000939263c3f909d492600480820193918290030181865afa1580156117b0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117d49190614fba565b50601380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691909117909155601154604080517f1b6b6d230000000000000000000000000000000000000000000000000000000081529051939450911691631b6b6d23916004808201926020929091908290030181865afa158015611877573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061189b9190614fe8565b601280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff929092169190911790555050565b8560005b81811015611ba257600089898381811061190457611904614e9e565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc828360405160200161193d91815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b8152600401611969929190614eec565b600060405180830381600087803b15801561198357600080fd5b505af1158015611997573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa158015611a0d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a319190615005565b90508060ff16600103611b8d576040517ffa333dfb000000000000000000000000000000000000000000000000000000008152306004820181905260ff8b166024830152604482018a9052606482018890526084820188905260a4820187905260009163fa333dfb9060c401600060405180830381865afa158015611aba573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611b009190810190614f52565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d3590611b599086908590600401614eec565b600060405180830381600087803b158015611b7357600080fd5b505af1158015611b87573d6000803e3d6000fd5b50505050505b50508080611b9a90615022565b9150506118e8565b505050505050505050565b6000838152600c602090815260408083208054825181850281018501909352808352611c0e93830182828015611c0257602002820191906000526020600020905b815481526020019060010190808311611bee575b50505050508484613b38565b90505b9392505050565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905273ffffffffffffffffffffffffffffffffffffffff9091169063c7c3a19a90602401600060405180830381865afa158015611cd7573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611467919081019061507d565b8060005b818160ff161015611df85760135473ffffffffffffffffffffffffffffffffffffffff1663c8048022858560ff8516818110611d5f57611d5f614e9e565b905060200201356040518263ffffffff1660e01b8152600401611d8491815260200190565b600060405180830381600087803b158015611d9e57600080fd5b505af1158015611db2573d6000803e3d6000fd5b50505050611de584848360ff16818110611dce57611dce614e9e565b90506020020135600f613c9790919063ffffffff16565b5080611df081614ecd565b915050611d21565b50505050565b6000818152600e602052604081205461ffff1681805b8261ffff168161ffff1611611e5f576000858152600d6020908152604080832061ffff85168452909152902054611e4b908361519c565b915080611e57816151af565b915050611e14565b509392505050565b60005a9050600080611e7b84860186614a10565b91509150600081806020019051810190611e9591906151d0565b60008181526005602090815260408083205460049092528220549293509190611ebc613ca3565b905082600003611edc576000848152600560205260409020819055612037565b600084815260036020526040812054611ef584846151e9565b611eff91906151e9565b6000868152600e6020908152604080832054600d835281842061ffff909116808552908352818420805483518186028101860190945280845295965090949192909190830182828015611f7157602002820191906000526020600020905b815481526020019060010190808311611f5d575b505050505090507f000000000000000000000000000000000000000000000000000000000000000061ffff16815103611fec5781611fae816151af565b6000898152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001661ffff83161790559250505b506000868152600d6020908152604080832061ffff909416835292815282822080546001818101835591845282842001859055888352600c8252928220805493840181558252902001555b60008481526006602052604081205461205190600161519c565b600086815260066020908152604080832084905560049091529020839055905061207b8583612a6e565b6112d88589846130e3565b6000828152600d6020908152604080832061ffff851684528252918290208054835181840281018401909452808452606093928301828280156120e857602002820191906000526020600020905b8154815260200190600101908083116120d4575b5050505050905092915050565b600060606000848460405160200161210e9291906151fc565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00181529190526001969095509350505050565b6013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690635147cd5990602401602060405180830381865afa1580156121b9573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114679190615005565b8160005b8181101561227a5730635f17e61686868481811061220157612201614e9e565b90506020020135856040518363ffffffff1660e01b815260040161223592919091825263ffffffff16602082015260400190565b600060405180830381600087803b15801561224f57600080fd5b505af1158015612263573d6000803e3d6000fd5b50505050808061227290615022565b9150506121e1565b5050505050565b612289613d45565b6012546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009173ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa1580156122f8573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061231c91906151d0565b6012546040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526024810183905291925073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af1158015612394573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123b89190614e83565b5050565b60008281526003602090815260408083208490556005825280832083905560068252808320839055600c90915281206123f4916141ca565b6000828152600e602052604081205461ffff16905b8161ffff168161ffff1611612450576000848152600d6020908152604080832061ffff85168452909152812061243e916141ca565b80612448816151af565b915050612409565b5050506000908152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000169055565b6000606060005a905060006124a085870187614558565b60008181526009602090815260408083205460089092528220549293509190838367ffffffffffffffff8111156124d9576124d96142cb565b6040519080825280601f01601f191660200182016040528015612503576020820181803683370190505b50604051602001612515929190614eec565b60405160208183030381529060405290506000612530613ca3565b9050600061253d866126b2565b90505b835a61254c90896151e9565b6125589061271061519c565b10156125995781406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055612540565b806125b15760008398509850505050505050506116bb565b6040517f6665656449644865780000000000000000000000000000000000000000000000602082015260009060290160405160208183030381529060405280519060200120601760405160200161260891906152e3565b604051602081830303815290604052805190602001200361262a57508161262d565b50425b601760166018838a60405160200161264791815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527ff055e4a20000000000000000000000000000000000000000000000000000000082526126a99594939291600401615412565b60405180910390fd5b60008181526005602052604081205481036126cf57506001919050565b6000828152600360209081526040808320546004909252909120546126f2613ca3565b6126fc91906151e9565b101592915050565b60015473ffffffffffffffffffffffffffffffffffffffff163314612785576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016126a9565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6013546040517f79ea99430000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff16906379ea994390602401602060405180830381865afa158015612871573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114679190614fe8565b6013546040517fcd7f71b500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063cd7f71b5906128ef908690869086906004016154d5565b600060405180830381600087803b15801561290957600080fd5b505af115801561291d573d6000803e3d6000fd5b50505050505050565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690634ee88d35906128ef908690869086906004016154d5565b601761298c838261556f565b506018612999828261556f565b505050565b80516123b89060169060208401906141e8565b6013546040517f06e3b632000000000000000000000000000000000000000000000000000000008152600481018490526024810183905260609173ffffffffffffffffffffffffffffffffffffffff16906306e3b63290604401600060405180830381865afa158015612a28573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611c119190810190615689565b601454600083815260026020526040902054612a8a90836151e9565b11156123b8576013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905260009173ffffffffffffffffffffffffffffffffffffffff169063c7c3a19a90602401600060405180830381865afa158015612b00573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052612b46919081019061507d565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810186905291925060009173ffffffffffffffffffffffffffffffffffffffff9091169063b657bc9c90602401602060405180830381865afa158015612bbb573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612bdf9190614f92565b601554909150612c039082906c01000000000000000000000000900460ff16614e3f565b6bffffffffffffffffffffffff1682606001516bffffffffffffffffffffffff161015611df857601554612c469085906bffffffffffffffffffffffff16612cb3565b60008481526002602090815260409182902085905560155482518781526bffffffffffffffffffffffff909116918101919091529081018490527f49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c09060600160405180910390a150505050565b6012546013546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff91821660048201526bffffffffffffffffffffffff8416602482015291169063095ea7b3906044016020604051808303816000875af1158015612d3b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612d5f9190614e83565b506013546040517f948108f7000000000000000000000000000000000000000000000000000000008152600481018490526bffffffffffffffffffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063948108f79060440161137a565b6040517fc04198220000000000000000000000000000000000000000000000000000000081526000600482018190526024820181905290309063c041982290604401600060405180830381865afa158015612e2a573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052612e709190810190615689565b80519091506000612e7f613ca3565b905060005b8281101561227a576000848281518110612ea057612ea0614e9e565b60209081029190910101516013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905291925060009173ffffffffffffffffffffffffffffffffffffffff90911690635147cd5990602401602060405180830381865afa158015612f20573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612f449190615005565b90508060ff16600103612fc0578660ff16600003612f90576040513090859084907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a4612fc0565b6040513090859084907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a45b50508080612fcd90615022565b915050612e84565b6000818152600c602090815260409182902080548351818402810184019094528084526060939283018282801561302b57602002820191906000526020600020905b815481526020019060010190808311613017575b50505050509050919050565b6016818154811061304757600080fd5b90600052602060002001600091509050805461306290615290565b80601f016020809104026020016040519081016040528092919081815260200182805461308e90615290565b80156130db5780601f106130b0576101008083540402835291602001916130db565b820191906000526020600020905b8154815290600101906020018083116130be57829003601f168201915b505050505081565b6000838152600760205260409020545b805a6130ff90856151e9565b61310b9061271061519c565b1015611df85781406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690556130f3565b6013546040517fa72aa27e0000000000000000000000000000000000000000000000000000000081526004810184905263ffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063a72aa27e90604401600060405180830381600087803b1580156131c457600080fd5b505af11580156131d8573d6000803e3d6000fd5b505050600092835250600a602052604090912063ffffffff9091169055565b6013546040517f744bfe610000000000000000000000000000000000000000000000000000000081526004810183905230602482015273ffffffffffffffffffffffffffffffffffffffff9091169063744bfe6190604401600060405180830381600087803b15801561326957600080fd5b505af115801561227a573d6000803e3d6000fd5b6017805461306290615290565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff169063b657bc9c906024016115fe565b8060005b818163ffffffff161015611df8573063af953a4a858563ffffffff851681811061331257613312614e9e565b905060200201356040518263ffffffff1660e01b815260040161333791815260200190565b600060405180830381600087803b15801561335157600080fd5b505af1158015613365573d6000803e3d6000fd5b5050505080806133749061571a565b9150506132e6565b6060600061338a600f613dc8565b90508084106133c5576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b826000036133da576133d784826151e9565b92505b60008367ffffffffffffffff8111156133f5576133f56142cb565b60405190808252806020026020018201604052801561341e578160200160208202803683370190505b50905060005b8481101561347057613441613439828861519c565b600f90613dd2565b82828151811061345357613453614e9e565b60209081029190910101528061346881615022565b915050613424565b50949350505050565b6018805461306290615290565b6000613490613ca3565b90508160ff166000036134d1576040513090829085907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a4505050565b6040513090829085907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a4505050565b6000828152600c6020908152604080832080548251818502810185019093528083528493849392919083018282801561355d57602002820191906000526020600020905b815481526020019060010190808311613549575b505050505090506116b58185613ab3565b8260005b818110156113a857600086868381811061358e5761358e614e9e565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc82836040516020016135c791815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b81526004016135f3929190614eec565b600060405180830381600087803b15801561360d57600080fd5b505af1158015613621573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa158015613697573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906136bb9190615005565b90508060ff166001036138c0577f000000000000000000000000000000000000000000000000000000000000000060ff87161561371557507f00000000000000000000000000000000000000000000000000000000000000005b60003073ffffffffffffffffffffffffffffffffffffffff1663fa333dfb3089858860405160200161374991815260200190565b60405160208183030381529060405261376190615733565b60405160e086901b7fffffffff0000000000000000000000000000000000000000000000000000000016815273ffffffffffffffffffffffffffffffffffffffff909416600485015260ff90921660248401526044830152606482015260006084820181905260a482015260c401600060405180830381865afa1580156137ec573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526138329190810190614f52565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d359061388b9087908590600401614eec565b600060405180830381600087803b1580156138a557600080fd5b505af11580156138b9573d6000803e3d6000fd5b5050505050505b505080806138cd90615022565b915050613572565b8060005b81811015611df85760008484838181106138f5576138f5614e9e565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc828360405160200161392e91815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b815260040161395a929190614eec565b600060405180830381600087803b15801561397457600080fd5b505af1158015613988573d6000803e3d6000fd5b5050505050808061399890615022565b9150506138d9565b600c60205281600052604060002081815481106139bc57600080fd5b90600052602060002001600091509150505481565b6139d9613d45565b6139e281613dde565b50565b6011546040517f3f678e11000000000000000000000000000000000000000000000000000000008152600091829173ffffffffffffffffffffffffffffffffffffffff90911690633f678e1190613a40908690600401615775565b6020604051808303816000875af1158015613a5f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613a8391906151d0565b9050613a90600f82613ed3565b506060909201516000838152600a6020526040902063ffffffff90911690555090565b815160009081908190841580613ac95750808510155b15613ad2578094505b60008092505b85831015613b2e57866001613aed85856151e9565b613af791906151e9565b81518110613b0757613b07614e9e565b602002602001015181613b1a919061519c565b905082613b2681615022565b935050613ad8565b9694955050505050565b82516000908190831580613b4c5750808410155b15613b55578093505b60008467ffffffffffffffff811115613b7057613b706142cb565b604051908082528060200260200182016040528015613b99578160200160208202803683370190505b509050600092505b84831015613c0757866001613bb685856151e9565b613bc091906151e9565b81518110613bd057613bd0614e9e565b6020026020010151818481518110613bea57613bea614e9e565b602090810291909101015282613bff81615022565b935050613ba1565b613c2081600060018451613c1b91906151e9565b613edf565b85606403613c59578060018251613c3791906151e9565b81518110613c4757613c47614e9e565b60200260200101519350505050611c11565b806064825188613c6991906158c7565b613c739190615933565b81518110613c8357613c83614e9e565b602002602001015193505050509392505050565b6000611c118383614057565b60007f000000000000000000000000000000000000000000000000000000000000000015613d4057606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613d17573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613d3b91906151d0565b905090565b504390565b60005473ffffffffffffffffffffffffffffffffffffffff163314613dc6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016126a9565b565b6000611467825490565b6000611c118383614151565b3373ffffffffffffffffffffffffffffffffffffffff821603613e5d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016126a9565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000611c11838361417b565b8181808203613eef575050505050565b6000856002613efe8787615947565b613f089190615967565b613f1290876159cf565b81518110613f2257613f22614e9e565b602002602001015190505b818313614031575b80868481518110613f4857613f48614e9e565b60200260200101511015613f685782613f60816159f7565b935050613f35565b858281518110613f7a57613f7a614e9e565b6020026020010151811015613f9b5781613f9381615a28565b925050613f68565b81831361402c57858281518110613fb457613fb4614e9e565b6020026020010151868481518110613fce57613fce614e9e565b6020026020010151878581518110613fe857613fe8614e9e565b6020026020010188858151811061400157614001614e9e565b6020908102919091010191909152528261401a816159f7565b935050818061402890615a28565b9250505b613f2d565b8185121561404457614044868684613edf565b838312156113a8576113a8868486613edf565b6000818152600183016020526040812054801561414057600061407b6001836151e9565b855490915060009061408f906001906151e9565b90508181146140f45760008660000182815481106140af576140af614e9e565b90600052602060002001549050808760000184815481106140d2576140d2614e9e565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061410557614105615a7f565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050611467565b6000915050611467565b5092915050565b600082600001828154811061416857614168614e9e565b9060005260206000200154905092915050565b60008181526001830160205260408120546141c257508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155611467565b506000611467565b50805460008255906000526020600020908101906139e2919061423e565b82805482825590600052602060002090810192821561422e579160200282015b8281111561422e578251829061421e908261556f565b5091602001919060010190614208565b5061423a929150614253565b5090565b5b8082111561423a576000815560010161423f565b8082111561423a5760006142678282614270565b50600101614253565b50805461427c90615290565b6000825580601f1061428c575050565b601f0160209004906000526020600020908101906139e2919061423e565b60ff811681146139e257600080fd5b63ffffffff811681146139e257600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610140810167ffffffffffffffff8111828210171561431e5761431e6142cb565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561436b5761436b6142cb565b604052919050565b600067ffffffffffffffff82111561438d5761438d6142cb565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f8301126143ca57600080fd5b81356143dd6143d882614373565b614324565b8181528460208386010111156143f257600080fd5b816020850160208301376000918101602001919091529392505050565b6bffffffffffffffffffffffff811681146139e257600080fd5b600080600080600080600060e0888a03121561444457600080fd5b873561444f816142aa565b9650602088013561445f816142b9565b9550604088013561446f816142aa565b9450606088013567ffffffffffffffff81111561448b57600080fd5b6144978a828b016143b9565b94505060808801356144a88161440f565b9699959850939692959460a0840135945060c09093013592915050565b803561ffff811681146144d757600080fd5b919050565b6000806000606084860312156144f157600080fd5b83359250614501602085016144c5565b9150604084013590509250925092565b6000806040838503121561452457600080fd5b82359150602083013567ffffffffffffffff81111561454257600080fd5b61454e858286016143b9565b9150509250929050565b60006020828403121561456a57600080fd5b5035919050565b60005b8381101561458c578181015183820152602001614574565b50506000910152565b600081518084526145ad816020860160208601614571565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611c116020830184614595565b73ffffffffffffffffffffffffffffffffffffffff811681146139e257600080fd5b600080600080600080600060e0888a03121561462f57600080fd5b873596506020880135614641816145f2565b95506040880135614651816142aa565b969995985095966060810135965060808101359560a0820135955060c0909101359350915050565b6000806040838503121561468c57600080fd5b8235915061469c602084016144c5565b90509250929050565b6000602082840312156146b757600080fd5b8135611c11816145f2565b60008083601f8401126146d457600080fd5b50813567ffffffffffffffff8111156146ec57600080fd5b6020830191508360208260051b85010111156116bb57600080fd5b600080600080600080600060c0888a03121561472257600080fd5b873567ffffffffffffffff81111561473957600080fd5b6147458a828b016146c2565b9098509650506020880135614759816142aa565b96999598509596604081013596506060810135956080820135955060a0909101359350915050565b60008060006060848603121561479657600080fd5b505081359360208301359350604090920135919050565b602081526147d460208201835173ffffffffffffffffffffffffffffffffffffffff169052565b600060208301516147ed604084018263ffffffff169052565b50604083015161014080606085015261480a610160850183614595565b9150606085015161482b60808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e0850151610100614897818701836bffffffffffffffffffffffff169052565b86015190506101206148ac8682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018387015290506148e48382614595565b9695505050505050565b6000806020838503121561490157600080fd5b823567ffffffffffffffff81111561491857600080fd5b614924858286016146c2565b90969095509350505050565b60008083601f84011261494257600080fd5b50813567ffffffffffffffff81111561495a57600080fd5b6020830191508360208285010111156116bb57600080fd5b6000806020838503121561498557600080fd5b823567ffffffffffffffff81111561499c57600080fd5b61492485828601614930565b6020808252825182820181905260009190848201906040850190845b818110156149e0578351835292840192918401916001016149c4565b50909695505050505050565b600067ffffffffffffffff821115614a0657614a066142cb565b5060051b60200190565b60008060408385031215614a2357600080fd5b823567ffffffffffffffff80821115614a3b57600080fd5b818501915085601f830112614a4f57600080fd5b81356020614a5f6143d8836149ec565b82815260059290921b84018101918181019089841115614a7e57600080fd5b8286015b84811015614ab657803586811115614a9a5760008081fd5b614aa88c86838b01016143b9565b845250918301918301614a82565b5096505086013592505080821115614acd57600080fd5b5061454e858286016143b9565b8215158152604060208201526000611c0e6040830184614595565b600080600060408486031215614b0a57600080fd5b833567ffffffffffffffff811115614b2157600080fd5b614b2d868287016146c2565b9094509250506020840135614b41816142b9565b809150509250925092565b60008060408385031215614b5f57600080fd5b50508035926020909101359150565b600080600060408486031215614b8357600080fd5b83359250602084013567ffffffffffffffff811115614ba157600080fd5b614bad86828701614930565b9497909650939450505050565b60008060408385031215614bcd57600080fd5b823567ffffffffffffffff80821115614be557600080fd5b614bf1868387016143b9565b93506020850135915080821115614acd57600080fd5b60006020808385031215614c1a57600080fd5b823567ffffffffffffffff80821115614c3257600080fd5b818501915085601f830112614c4657600080fd5b8135614c546143d8826149ec565b81815260059190911b83018401908481019088831115614c7357600080fd5b8585015b83811015614cab57803585811115614c8f5760008081fd5b614c9d8b89838a01016143b9565b845250918601918601614c77565b5098975050505050505050565b60008060408385031215614ccb57600080fd5b823591506020830135614cdd8161440f565b809150509250929050565b600060208284031215614cfa57600080fd5b8135611c11816142aa565b60008060408385031215614d1857600080fd5b823591506020830135614cdd816142b9565b60008060408385031215614d3d57600080fd5b823591506020830135614cdd816142aa565b60008060008060608587031215614d6557600080fd5b843567ffffffffffffffff811115614d7c57600080fd5b614d88878288016146c2565b9095509350506020850135614d9c816142aa565b91506040850135614dac816142aa565b939692955090935050565b60008060008060008060c08789031215614dd057600080fd5b8635614ddb816145f2565b95506020870135614deb816142aa565b95989597505050506040840135936060810135936080820135935060a0909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006bffffffffffffffffffffffff80831681851681830481118215151615614e6a57614e6a614e10565b02949350505050565b805180151581146144d757600080fd5b600060208284031215614e9557600080fd5b611c1182614e73565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060ff821660ff8103614ee357614ee3614e10565b60010192915050565b828152604060208201526000611c0e6040830184614595565b600082601f830112614f1657600080fd5b8151614f246143d882614373565b818152846020838601011115614f3957600080fd5b614f4a826020830160208701614571565b949350505050565b600060208284031215614f6457600080fd5b815167ffffffffffffffff811115614f7b57600080fd5b614f4a84828501614f05565b80516144d78161440f565b600060208284031215614fa457600080fd5b8151611c118161440f565b80516144d7816145f2565b60008060408385031215614fcd57600080fd5b8251614fd8816145f2565b6020939093015192949293505050565b600060208284031215614ffa57600080fd5b8151611c11816145f2565b60006020828403121561501757600080fd5b8151611c11816142aa565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361505357615053614e10565b5060010190565b80516144d7816142b9565b805167ffffffffffffffff811681146144d757600080fd5b60006020828403121561508f57600080fd5b815167ffffffffffffffff808211156150a757600080fd5b9083019061014082860312156150bc57600080fd5b6150c46142fa565b6150cd83614faf565b81526150db6020840161505a565b60208201526040830151828111156150f257600080fd5b6150fe87828601614f05565b60408301525061511060608401614f87565b606082015261512160808401614faf565b608082015261513260a08401615065565b60a082015261514360c0840161505a565b60c082015261515460e08401614f87565b60e0820152610100615167818501614e73565b90820152610120838101518381111561517f57600080fd5b61518b88828701614f05565b918301919091525095945050505050565b8082018082111561146757611467614e10565b600061ffff8083168181036151c6576151c6614e10565b6001019392505050565b6000602082840312156151e257600080fd5b5051919050565b8181038181111561146757611467614e10565b6000604082016040835280855180835260608501915060608160051b8601019250602080880160005b83811015615271577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa088870301855261525f868351614595565b95509382019390820190600101615225565b5050858403818701525050506152878185614595565b95945050505050565b600181811c908216806152a457607f821691505b6020821081036152dd577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60008083546152f181615290565b60018281168015615309576001811461533c5761536b565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008416875282151583028701945061536b565b8760005260208060002060005b858110156153625781548a820152908401908201615349565b50505082870194505b50929695505050505050565b6000815461538481615290565b8085526020600183811680156153a157600181146153d957615407565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838901528284151560051b8901019550615407565b866000528260002060005b858110156153ff5781548a82018601529083019084016153e4565b890184019650505b505050505092915050565b60a08152600061542560a0830188615377565b6020838203818501528188548084528284019150828160051b8501018a6000528360002060005b83811015615497577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08784030185526154858383615377565b9486019492506001918201910161544c565b505086810360408801526154ab818b615377565b94505050505084606084015282810360808401526154c98185614595565b98975050505050505050565b83815260406020820152816040820152818360608301376000818301606090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016010192915050565b601f82111561299957600081815260208120601f850160051c810160208610156155505750805b601f850160051c820191505b818110156113a85782815560010161555c565b815167ffffffffffffffff811115615589576155896142cb565b61559d816155978454615290565b84615529565b602080601f8311600181146155f057600084156155ba5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556113a8565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b8281101561563d5788860151825594840194600190910190840161561e565b508582101561567957878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b6000602080838503121561569c57600080fd5b825167ffffffffffffffff8111156156b357600080fd5b8301601f810185136156c457600080fd5b80516156d26143d8826149ec565b81815260059190911b820183019083810190878311156156f157600080fd5b928401925b8284101561570f578351825292840192908401906156f6565b979650505050505050565b600063ffffffff8083168181036151c6576151c6614e10565b805160208083015191908110156152dd577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60209190910360031b1b16919050565b6020815260008251610140806020850152615794610160850183614595565b915060208501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0808685030160408701526157d08483614595565b9350604087015191506157fb606087018373ffffffffffffffffffffffffffffffffffffffff169052565b606087015163ffffffff811660808801529150608087015173ffffffffffffffffffffffffffffffffffffffff811660a0880152915060a087015160ff811660c0880152915060c08701519150808685030160e087015261585c8483614595565b935060e0870151915061010081878603018188015261587b8584614595565b94508088015192505061012081878603018188015261589a8584614595565b945080880151925050506158bd828601826bffffffffffffffffffffffff169052565b5090949350505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156158ff576158ff614e10565b500290565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60008261594257615942615904565b500490565b818103600083128015838313168383128216171561414a5761414a614e10565b60008261597657615976615904565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83147f8000000000000000000000000000000000000000000000000000000000000000831416156159ca576159ca614e10565b500590565b80820182811260008312801582168215821617156159ef576159ef614e10565b505092915050565b60007f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361505357615053614e10565b60007f80000000000000000000000000000000000000000000000000000000000000008203615a5957615a59614e10565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000810000a307834353534343832643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030", +} + +var VerifiableLoadStreamsLookupUpkeepABI = VerifiableLoadStreamsLookupUpkeepMetaData.ABI + +var VerifiableLoadStreamsLookupUpkeepBin = VerifiableLoadStreamsLookupUpkeepMetaData.Bin + +func DeployVerifiableLoadStreamsLookupUpkeep(auth *bind.TransactOpts, backend bind.ContractBackend, _registrar common.Address, _useArb bool) (common.Address, *types.Transaction, *VerifiableLoadStreamsLookupUpkeep, error) { + parsed, err := VerifiableLoadStreamsLookupUpkeepMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VerifiableLoadStreamsLookupUpkeepBin), backend, _registrar, _useArb) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VerifiableLoadStreamsLookupUpkeep{address: address, abi: *parsed, VerifiableLoadStreamsLookupUpkeepCaller: VerifiableLoadStreamsLookupUpkeepCaller{contract: contract}, VerifiableLoadStreamsLookupUpkeepTransactor: VerifiableLoadStreamsLookupUpkeepTransactor{contract: contract}, VerifiableLoadStreamsLookupUpkeepFilterer: VerifiableLoadStreamsLookupUpkeepFilterer{contract: contract}}, nil +} + +type VerifiableLoadStreamsLookupUpkeep struct { + address common.Address + abi abi.ABI + VerifiableLoadStreamsLookupUpkeepCaller + VerifiableLoadStreamsLookupUpkeepTransactor + VerifiableLoadStreamsLookupUpkeepFilterer +} + +type VerifiableLoadStreamsLookupUpkeepCaller struct { + contract *bind.BoundContract +} + +type VerifiableLoadStreamsLookupUpkeepTransactor struct { + contract *bind.BoundContract +} + +type VerifiableLoadStreamsLookupUpkeepFilterer struct { + contract *bind.BoundContract +} + +type VerifiableLoadStreamsLookupUpkeepSession struct { + Contract *VerifiableLoadStreamsLookupUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VerifiableLoadStreamsLookupUpkeepCallerSession struct { + Contract *VerifiableLoadStreamsLookupUpkeepCaller + CallOpts bind.CallOpts +} + +type VerifiableLoadStreamsLookupUpkeepTransactorSession struct { + Contract *VerifiableLoadStreamsLookupUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type VerifiableLoadStreamsLookupUpkeepRaw struct { + Contract *VerifiableLoadStreamsLookupUpkeep +} + +type VerifiableLoadStreamsLookupUpkeepCallerRaw struct { + Contract *VerifiableLoadStreamsLookupUpkeepCaller +} + +type VerifiableLoadStreamsLookupUpkeepTransactorRaw struct { + Contract *VerifiableLoadStreamsLookupUpkeepTransactor +} + +func NewVerifiableLoadStreamsLookupUpkeep(address common.Address, backend bind.ContractBackend) (*VerifiableLoadStreamsLookupUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(VerifiableLoadStreamsLookupUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindVerifiableLoadStreamsLookupUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeep{address: address, abi: abi, VerifiableLoadStreamsLookupUpkeepCaller: VerifiableLoadStreamsLookupUpkeepCaller{contract: contract}, VerifiableLoadStreamsLookupUpkeepTransactor: VerifiableLoadStreamsLookupUpkeepTransactor{contract: contract}, VerifiableLoadStreamsLookupUpkeepFilterer: VerifiableLoadStreamsLookupUpkeepFilterer{contract: contract}}, nil +} + +func NewVerifiableLoadStreamsLookupUpkeepCaller(address common.Address, caller bind.ContractCaller) (*VerifiableLoadStreamsLookupUpkeepCaller, error) { + contract, err := bindVerifiableLoadStreamsLookupUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepCaller{contract: contract}, nil +} + +func NewVerifiableLoadStreamsLookupUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*VerifiableLoadStreamsLookupUpkeepTransactor, error) { + contract, err := bindVerifiableLoadStreamsLookupUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepTransactor{contract: contract}, nil +} + +func NewVerifiableLoadStreamsLookupUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*VerifiableLoadStreamsLookupUpkeepFilterer, error) { + contract, err := bindVerifiableLoadStreamsLookupUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepFilterer{contract: contract}, nil +} + +func bindVerifiableLoadStreamsLookupUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VerifiableLoadStreamsLookupUpkeepMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadStreamsLookupUpkeep.Contract.VerifiableLoadStreamsLookupUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.VerifiableLoadStreamsLookupUpkeepTransactor.contract.Transfer(opts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.VerifiableLoadStreamsLookupUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadStreamsLookupUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.contract.Transfer(opts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) BUCKETSIZE(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "BUCKET_SIZE") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "addLinkAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AddLinkAmount(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AddLinkAmount(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "bucketedDelays", arg0, arg1, arg2) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BucketedDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BucketedDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "buckets", arg0) + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Buckets(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Buckets(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "checkCallback", values, extraData) + + if err != nil { + return *new(bool), *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + return out0, out1, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckCallback(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, values, extraData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) CheckCallback(values [][]byte, extraData []byte) (bool, []byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckCallback(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, values, extraData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "checkGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "counters", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Counters(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Counters(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "delays", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Delays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Delays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.DummyMap(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.DummyMap(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "eligible", upkeepId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Eligible(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Eligible(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "emittedAgainSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) EmittedSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "emittedSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.EmittedSig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.EmittedSig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) FeedParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "feedParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) FeedParamKey() (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FeedParamKey(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) FeedParamKey() (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FeedParamKey(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "feedsHex", arg0) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FeedsHex(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FeedsHex(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "firstPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "gasLimits", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GasLimits(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GasLimits(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getActiveUpkeepIDsDeployedByThisContract", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getAllActiveUpkeepIDsOnRegistry", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBalance(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, id) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBalance(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, id) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getBucketedDelays", upkeepId, bucket) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getBucketedDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getDelays", upkeepId) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetDelays(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetDelaysLength(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetDelaysLength(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetForwarder(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetForwarder(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getLogTriggerConfig", addr, selector, topic0, topic1, topic2, topic3) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getMinBalanceForUpkeep", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getPxDelayLastNPerforms", upkeepId, p, n) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getSumDelayInBucket", upkeepId, bucket) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getSumDelayLastNPerforms", upkeepId, n) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetTriggerType(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetTriggerType(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getUpkeepInfo", upkeepId) + + if err != nil { + return *new(KeeperRegistryBase21UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(KeeperRegistryBase21UpkeepInfo)).(*KeeperRegistryBase21UpkeepInfo) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "intervals", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Intervals(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Intervals(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "lastTopUpBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) LinkToken() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.LinkToken(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) LinkToken() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.LinkToken(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "minBalanceThresholdMultiplier") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Owner() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Owner(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Owner() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Owner(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "performDataSizes", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformDataSizes(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformDataSizes(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "performGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "previousPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadStreamsLookupUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Registrar(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "registrar") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Registrar() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Registrar(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Registrar() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Registrar(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) Registry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "registry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Registry() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Registry(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) Registry() (common.Address, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Registry(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) TimeParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "timeParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) TimeParamKey() (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TimeParamKey(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) TimeParamKey() (string, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TimeParamKey(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "upkeepTopUpCheckInterval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCaller) UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VerifiableLoadStreamsLookupUpkeep.contract.Call(opts, &out, "useArbitrumBlockNum") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepCallerSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadStreamsLookupUpkeep.CallOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "acceptOwnership") +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AcceptOwnership(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AcceptOwnership(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "addFunds", upkeepId, amount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AddFunds(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.AddFunds(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchCancelUpkeeps", upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchPreparingUpkeeps", upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchPreparingUpkeepsSimple", upkeepIds, log, selector) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchRegisterUpkeeps", number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchSendLogs", log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchSendLogs(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchSendLogs(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchSetIntervals", upkeepIds, interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchUpdatePipelineData", upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "batchWithdrawLinks", upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "burnPerformGas", upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BurnPerformGas(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.BurnPerformGas(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) CheckUpkeep(opts *bind.TransactOpts, checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "checkUpkeep", checkData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) CheckUpkeep(checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckUpkeep(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, checkData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) CheckUpkeep(checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.CheckUpkeep(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, checkData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformUpkeep(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.PerformUpkeep(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "sendLog", upkeepId, log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SendLog(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SendLog(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setConfig", newRegistrar) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetConfig(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetConfig(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setFeeds", _feeds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetFeeds(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetFeeds(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setInterval", upkeepId, _interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetInterval(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetInterval(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setParamKeys", _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetParamKeys(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetParamKeys(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setPerformDataSize", upkeepId, value) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setUpkeepGasLimit", upkeepId, gasLimit) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "topUpFund", upkeepId, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TopUpFund(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TopUpFund(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "transferOwnership", to) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TransferOwnership(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.TransferOwnership(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "updateLogTriggerConfig1", upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "updateLogTriggerConfig2", upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "updateUpkeepPipelineData", upkeepId, pipelineData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "withdrawLinks") +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.WithdrawLinks(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.WithdrawLinks(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.Transact(opts, "withdrawLinks0", upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.contract.RawTransact(opts, nil) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Receive(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepTransactorSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadStreamsLookupUpkeep.Contract.Receive(&_VerifiableLoadStreamsLookupUpkeep.TransactOpts) +} + +type VerifiableLoadStreamsLookupUpkeepLogEmittedIterator struct { + Event *VerifiableLoadStreamsLookupUpkeepLogEmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadStreamsLookupUpkeepLogEmitted struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadStreamsLookupUpkeepLogEmittedIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.FilterLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepLogEmittedIterator{contract: _VerifiableLoadStreamsLookupUpkeep.contract, event: "LogEmitted", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.WatchLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadStreamsLookupUpkeepLogEmitted) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) ParseLogEmitted(log types.Log) (*VerifiableLoadStreamsLookupUpkeepLogEmitted, error) { + event := new(VerifiableLoadStreamsLookupUpkeepLogEmitted) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator struct { + Event *VerifiableLoadStreamsLookupUpkeepLogEmittedAgain + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadStreamsLookupUpkeepLogEmittedAgain struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.FilterLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator{contract: _VerifiableLoadStreamsLookupUpkeep.contract, event: "LogEmittedAgain", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.WatchLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadStreamsLookupUpkeepLogEmittedAgain) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) ParseLogEmittedAgain(log types.Log) (*VerifiableLoadStreamsLookupUpkeepLogEmittedAgain, error) { + event := new(VerifiableLoadStreamsLookupUpkeepLogEmittedAgain) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator struct { + Event *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator{contract: _VerifiableLoadStreamsLookupUpkeep.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested, error) { + event := new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator struct { + Event *VerifiableLoadStreamsLookupUpkeepOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadStreamsLookupUpkeepOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator{contract: _VerifiableLoadStreamsLookupUpkeep.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferred) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) ParseOwnershipTransferred(log types.Log) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferred, error) { + event := new(VerifiableLoadStreamsLookupUpkeepOwnershipTransferred) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator struct { + Event *VerifiableLoadStreamsLookupUpkeepUpkeepTopUp + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadStreamsLookupUpkeepUpkeepTopUp struct { + UpkeepId *big.Int + Amount *big.Int + BlockNum *big.Int + Raw types.Log +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator, error) { + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.FilterLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return &VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator{contract: _VerifiableLoadStreamsLookupUpkeep.contract, event: "UpkeepTopUp", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) (event.Subscription, error) { + + logs, sub, err := _VerifiableLoadStreamsLookupUpkeep.contract.WatchLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeepFilterer) ParseUpkeepTopUp(log types.Log) (*VerifiableLoadStreamsLookupUpkeepUpkeepTopUp, error) { + event := new(VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) + if err := _VerifiableLoadStreamsLookupUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VerifiableLoadStreamsLookupUpkeep.abi.Events["LogEmitted"].ID: + return _VerifiableLoadStreamsLookupUpkeep.ParseLogEmitted(log) + case _VerifiableLoadStreamsLookupUpkeep.abi.Events["LogEmittedAgain"].ID: + return _VerifiableLoadStreamsLookupUpkeep.ParseLogEmittedAgain(log) + case _VerifiableLoadStreamsLookupUpkeep.abi.Events["OwnershipTransferRequested"].ID: + return _VerifiableLoadStreamsLookupUpkeep.ParseOwnershipTransferRequested(log) + case _VerifiableLoadStreamsLookupUpkeep.abi.Events["OwnershipTransferred"].ID: + return _VerifiableLoadStreamsLookupUpkeep.ParseOwnershipTransferred(log) + case _VerifiableLoadStreamsLookupUpkeep.abi.Events["UpkeepTopUp"].ID: + return _VerifiableLoadStreamsLookupUpkeep.ParseUpkeepTopUp(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VerifiableLoadStreamsLookupUpkeepLogEmitted) Topic() common.Hash { + return common.HexToHash("0x97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf08") +} + +func (VerifiableLoadStreamsLookupUpkeepLogEmittedAgain) Topic() common.Hash { + return common.HexToHash("0xc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d") +} + +func (VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VerifiableLoadStreamsLookupUpkeepOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) Topic() common.Hash { + return common.HexToHash("0x49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c0") +} + +func (_VerifiableLoadStreamsLookupUpkeep *VerifiableLoadStreamsLookupUpkeep) Address() common.Address { + return _VerifiableLoadStreamsLookupUpkeep.address +} + +type VerifiableLoadStreamsLookupUpkeepInterface interface { + BUCKETSIZE(opts *bind.CallOpts) (uint16, error) + + AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) + + BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) + + Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) + + CheckCallback(opts *bind.CallOpts, values [][]byte, extraData []byte) (bool, []byte, error) + + CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) + + EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) + + EmittedSig(opts *bind.CallOpts) ([32]byte, error) + + FeedParamKey(opts *bind.CallOpts) (string, error) + + FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) + + FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) + + GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) + + GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) + + GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) + + GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Registrar(opts *bind.CallOpts) (common.Address, error) + + Registry(opts *bind.CallOpts) (common.Address, error) + + TimeParamKey(opts *bind.CallOpts) (string, error) + + UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) + + UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) + + BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) + + BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) + + BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) + + BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) + + BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, checkData []byte) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) + + SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) + + SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) + + SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) + + SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) + + WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadStreamsLookupUpkeepLogEmittedIterator, error) + + WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmitted(log types.Log) (*VerifiableLoadStreamsLookupUpkeepLogEmitted, error) + + FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadStreamsLookupUpkeepLogEmittedAgainIterator, error) + + WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmittedAgain(log types.Log) (*VerifiableLoadStreamsLookupUpkeepLogEmittedAgain, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VerifiableLoadStreamsLookupUpkeepOwnershipTransferred, error) + + FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadStreamsLookupUpkeepUpkeepTopUpIterator, error) + + WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadStreamsLookupUpkeepUpkeepTopUp) (event.Subscription, error) + + ParseUpkeepTopUp(log types.Log) (*VerifiableLoadStreamsLookupUpkeepUpkeepTopUp, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/verifiable_load_upkeep_wrapper/verifiable_load_upkeep_wrapper.go b/core/gethwrappers/generated/verifiable_load_upkeep_wrapper/verifiable_load_upkeep_wrapper.go new file mode 100644 index 00000000..fdbb65ba --- /dev/null +++ b/core/gethwrappers/generated/verifiable_load_upkeep_wrapper/verifiable_load_upkeep_wrapper.go @@ -0,0 +1,2432 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package verifiable_load_upkeep_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeeperRegistryBase21UpkeepInfo struct { + Target common.Address + PerformGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformedBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +var VerifiableLoadUpkeepMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"_registrar\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"_useArb\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmitted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"LogEmittedAgain\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"UpkeepTopUp\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BUCKET_SIZE\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"addFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"addLinkAmount\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchCancelUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"batchPreparingUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"}],\"name\":\"batchPreparingUpkeepsSimple\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"number\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"triggerType\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"triggerConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"checkGasToBurn\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"performGasToBurn\",\"type\":\"uint256\"}],\"name\":\"batchRegisterUpkeeps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"batchSendLogs\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"},{\"internalType\":\"uint32\",\"name\":\"interval\",\"type\":\"uint32\"}],\"name\":\"batchSetIntervals\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchUpdatePipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"upkeepIds\",\"type\":\"uint256[]\"}],\"name\":\"batchWithdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"bucketedDelays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"buckets\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"startGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"burnPerformGas\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"checkGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"}],\"name\":\"checkUpkeep\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"counters\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"delays\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"dummyMap\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"eligible\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedAgainSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emittedSig\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"feedParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"feedsHex\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"firstPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"gasLimits\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveUpkeepIDsDeployedByThisContract\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getAllActiveUpkeepIDsOnRegistry\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getBucketedDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getBucketedDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelays\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getDelaysLength\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepID\",\"type\":\"uint256\"}],\"name\":\"getForwarder\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"getLogTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"logTrigger\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getMinBalanceForUpkeep\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"p\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getPxDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"bucket\",\"type\":\"uint16\"}],\"name\":\"getSumDelayInBucket\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getSumDelayLastNPerforms\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getTriggerType\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepInfo\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"performGas\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"checkData\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"maxValidBlocknumber\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"lastPerformedBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"amountSpent\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"internalType\":\"structKeeperRegistryBase2_1.UpkeepInfo\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepPrivilegeConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"getUpkeepTriggerConfig\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"intervals\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"lastTopUpBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minBalanceThresholdMultiplier\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performDataSizes\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"performGasToBurns\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"performData\",\"type\":\"bytes\"}],\"name\":\"performUpkeep\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"previousPerformBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registrar\",\"outputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"registry\",\"outputs\":[{\"internalType\":\"contractIKeeperRegistryMaster\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint8\",\"name\":\"log\",\"type\":\"uint8\"}],\"name\":\"sendLog\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAutomationRegistrar2_1\",\"name\":\"newRegistrar\",\"type\":\"address\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string[]\",\"name\":\"_feeds\",\"type\":\"string[]\"}],\"name\":\"setFeeds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_interval\",\"type\":\"uint256\"}],\"name\":\"setInterval\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"_feedParamKey\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"_timeParamKey\",\"type\":\"string\"}],\"name\":\"setParamKeys\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"setPerformDataSize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"name\":\"setUpkeepGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"setUpkeepPrivilegeConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"timeParamKey\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"topUpFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"selector\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"topic0\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic1\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic2\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"topic3\",\"type\":\"bytes32\"}],\"name\":\"updateLogTriggerConfig1\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"cfg\",\"type\":\"bytes\"}],\"name\":\"updateLogTriggerConfig2\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"pipelineData\",\"type\":\"bytes\"}],\"name\":\"updateUpkeepPipelineData\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upkeepTopUpCheckInterval\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"useArbitrumBlockNum\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"upkeepId\",\"type\":\"uint256\"}],\"name\":\"withdrawLinks\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x7f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf086080527fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d60a0526005601455601580546001600160681b0319166c140000000002c68af0bb140000179055606460e0526101c060405260426101408181526101009182919062005d0761016039815260200160405180608001604052806042815260200162005d49604291399052620000be906016906002620003c7565b506040805180820190915260098152680cccacac892c890caf60bb1b6020820152601790620000ee908262000543565b5060408051808201909152600b81526a313637b1b5a73ab6b132b960a91b602082015260189062000120908262000543565b503480156200012e57600080fd5b5060405162005d8b38038062005d8b833981016040819052620001519162000625565b81813380600081620001aa5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620001dd57620001dd816200031c565b5050601180546001600160a01b0319166001600160a01b038516908117909155604080516330fe427560e21b815281516000945063c3f909d4926004808401939192918290030181865afa1580156200023a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000260919062000668565b50601380546001600160a01b0319166001600160a01b038381169190911790915560115460408051631b6b6d2360e01b81529051939450911691631b6b6d23916004808201926020929091908290030181865afa158015620002c6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620002ec919062000699565b601280546001600160a01b0319166001600160a01b039290921691909117905550151560c05250620006c0915050565b336001600160a01b03821603620003765760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620001a1565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b82805482825590600052602060002090810192821562000412579160200282015b8281111562000412578251829062000401908262000543565b5091602001919060010190620003e8565b506200042092915062000424565b5090565b80821115620004205760006200043b828262000445565b5060010162000424565b5080546200045390620004b4565b6000825580601f1062000464575050565b601f01602090049060005260206000209081019062000484919062000487565b50565b5b8082111562000420576000815560010162000488565b634e487b7160e01b600052604160045260246000fd5b600181811c90821680620004c957607f821691505b602082108103620004ea57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200053e57600081815260208120601f850160051c81016020861015620005195750805b601f850160051c820191505b818110156200053a5782815560010162000525565b5050505b505050565b81516001600160401b038111156200055f576200055f6200049e565b6200057781620005708454620004b4565b84620004f0565b602080601f831160018114620005af5760008415620005965750858301515b600019600386901b1c1916600185901b1785556200053a565b600085815260208120601f198616915b82811015620005e057888601518255948401946001909101908401620005bf565b5085821015620005ff5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6001600160a01b03811681146200048457600080fd5b600080604083850312156200063957600080fd5b825162000646816200060f565b602084015190925080151581146200065d57600080fd5b809150509250929050565b600080604083850312156200067c57600080fd5b825162000689816200060f565b6020939093015192949293505050565b600060208284031215620006ac57600080fd5b8151620006b9816200060f565b9392505050565b60805160a05160c05160e0516155f1620007166000396000818161054d0152611f250152600081816109730152613b1101526000818161080c015261355f015260008181610d3e015261353401526155f16000f3fe6080604052600436106104ba5760003560e01c806379ea994311610279578063a6b594751161015e578063d6051a72116100d6578063e45530831161008a578063fa333dfb1161006f578063fa333dfb14610f4d578063fba7ffa314611000578063fcdc1f631461102d57600080fd5b8063e455308314610f17578063f2fde38b14610f2d57600080fd5b8063daee1aeb116100bb578063daee1aeb14610eaa578063dbef701e14610eca578063e0114adb14610eea57600080fd5b8063d6051a7214610e6a578063da6cba4714610e8a57600080fd5b8063b657bc9c1161012d578063c041982211610112578063c041982214610e15578063c98f10b014610e35578063d4c2490014610e4a57600080fd5b8063b657bc9c14610dd5578063becde0e114610df557600080fd5b8063a6b5947514610d60578063a72aa27e14610d80578063af953a4a14610da0578063afb28d1f14610dc057600080fd5b8063924ca578116101f15780639b429354116101c05780639d385eaa116101a55780639d385eaa14610cec5780639d6f1cc714610d0c578063a654824814610d2c57600080fd5b80639b42935414610c8e5780639b51fb0d14610cbb57600080fd5b8063924ca57814610c04578063948108f714610c2457806396cebc7c14610c445780639ac542eb14610c6457600080fd5b80638340507c11610248578063873c75861161022d578063873c758614610b8c5780638da5cb5b14610bac5780638fcb3fba14610bd757600080fd5b80638340507c14610b4c57806386e330af14610b6c57600080fd5b806379ea994314610abf5780637b10399914610adf5780637e7a46dc14610b0c5780638243444a14610b2c57600080fd5b806345d2ec171161039f578063636092e8116103175780637145f11b116102e657806376721303116102cb5780637672130314610a5d578063776898c814610a8a57806379ba509714610aaa57600080fd5b80637145f11b14610a0057806373644cce14610a3057600080fd5b8063636092e81461093c578063642f6cef1461096157806369cdbadb146109a55780636e04ff0d146109d257600080fd5b806351c98be31161036e5780635d4ee7f3116103535780635d4ee7f3146108da5780635f17e616146108ef57806360457ff51461090f57600080fd5b806351c98be31461088d57806357970e93146108ad57600080fd5b806345d2ec17146107cd57806346982093146107fa57806346e7a63e1461082e5780635147cd591461085b57600080fd5b806320e3dbd4116104325780632a9032d311610401578063328ffd11116103e6578063328ffd11146107605780633ebe8d6c1461078d5780634585e33b146107ad57600080fd5b80632a9032d3146106ee5780632b20e3971461070e57600080fd5b806320e3dbd4146106615780632636aecf1461068157806328c4b57b146106a157806329e0a841146106c157600080fd5b806319d97a94116104895780631e0104391161046e5780631e010439146105cf578063206c32e81461060c578063207b65161461064157600080fd5b806319d97a94146105825780631cdde251146105af57600080fd5b806306c1cc00146104c6578063077ac621146104e85780630b7d33e61461051b57806312c550271461053b57600080fd5b366104c157005b600080fd5b3480156104d257600080fd5b506104e66104e1366004614293565b61105a565b005b3480156104f457600080fd5b50610508610503366004614346565b6112a9565b6040519081526020015b60405180910390f35b34801561052757600080fd5b506104e661053636600461437b565b6112e7565b34801561054757600080fd5b5061056f7f000000000000000000000000000000000000000000000000000000000000000081565b60405161ffff9091168152602001610512565b34801561058e57600080fd5b506105a261059d3660046143c2565b611375565b6040516105129190614449565b3480156105bb57600080fd5b506104e66105ca36600461447e565b611432565b3480156105db57600080fd5b506105ef6105ea3660046143c2565b61156f565b6040516bffffffffffffffffffffffff9091168152602001610512565b34801561061857600080fd5b5061062c6106273660046144e3565b611604565b60408051928352602083019190915201610512565b34801561064d57600080fd5b506105a261065c3660046143c2565b611687565b34801561066d57600080fd5b506104e661067c36600461450f565b6116df565b34801561068d57600080fd5b506104e661069c366004614571565b6118a9565b3480156106ad57600080fd5b506105086106bc3660046145eb565b611b72565b3480156106cd57600080fd5b506106e16106dc3660046143c2565b611bdd565b6040516105129190614617565b3480156106fa57600080fd5b506104e6610709366004614758565b611ce2565b34801561071a57600080fd5b5060115461073b9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610512565b34801561076c57600080fd5b5061050861077b3660046143c2565b60036020526000908152604090205481565b34801561079957600080fd5b506105086107a83660046143c2565b611dc3565b3480156107b957600080fd5b506104e66107c83660046147dc565b611e2c565b3480156107d957600080fd5b506107ed6107e83660046144e3565b61203b565b6040516105129190614812565b34801561080657600080fd5b506105087f000000000000000000000000000000000000000000000000000000000000000081565b34801561083a57600080fd5b506105086108493660046143c2565b600a6020526000908152604090205481565b34801561086757600080fd5b5061087b6108763660046143c2565b6120aa565b60405160ff9091168152602001610512565b34801561089957600080fd5b506104e66108a8366004614856565b61213e565b3480156108b957600080fd5b5060125461073b9073ffffffffffffffffffffffffffffffffffffffff1681565b3480156108e657600080fd5b506104e66121e2565b3480156108fb57600080fd5b506104e661090a3660046148ad565b61231d565b34801561091b57600080fd5b5061050861092a3660046143c2565b60076020526000908152604090205481565b34801561094857600080fd5b506015546105ef906bffffffffffffffffffffffff1681565b34801561096d57600080fd5b506109957f000000000000000000000000000000000000000000000000000000000000000081565b6040519015158152602001610512565b3480156109b157600080fd5b506105086109c03660046143c2565b60086020526000908152604090205481565b3480156109de57600080fd5b506109f26109ed3660046147dc565b6123ea565b6040516105129291906148cf565b348015610a0c57600080fd5b50610995610a1b3660046143c2565b600b6020526000908152604090205460ff1681565b348015610a3c57600080fd5b50610508610a4b3660046143c2565b6000908152600c602052604090205490565b348015610a6957600080fd5b50610508610a783660046143c2565b60046020526000908152604090205481565b348015610a9657600080fd5b50610995610aa53660046143c2565b612517565b348015610ab657600080fd5b506104e6612569565b348015610acb57600080fd5b5061073b610ada3660046143c2565b61266b565b348015610aeb57600080fd5b5060135461073b9073ffffffffffffffffffffffffffffffffffffffff1681565b348015610b1857600080fd5b506104e6610b273660046148ea565b6126ff565b348015610b3857600080fd5b506104e6610b473660046148ea565b612790565b348015610b5857600080fd5b506104e6610b67366004614936565b6127ea565b348015610b7857600080fd5b506104e6610b873660046149b4565b612808565b348015610b9857600080fd5b506107ed610ba73660046148ad565b61281b565b348015610bb857600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff1661073b565b348015610be357600080fd5b50610508610bf23660046143c2565b60056020526000908152604090205481565b348015610c1057600080fd5b506104e6610c1f3660046148ad565b6128d8565b348015610c3057600080fd5b506104e6610c3f366004614a65565b612b1d565b348015610c5057600080fd5b506104e6610c5f366004614a95565b612c35565b348015610c7057600080fd5b5060155461087b906c01000000000000000000000000900460ff1681565b348015610c9a57600080fd5b506104e6610ca93660046148ad565b60009182526009602052604090912055565b348015610cc757600080fd5b5061056f610cd63660046143c2565b600e6020526000908152604090205461ffff1681565b348015610cf857600080fd5b506107ed610d073660046143c2565b612e3f565b348015610d1857600080fd5b506105a2610d273660046143c2565b612ea1565b348015610d3857600080fd5b506105087f000000000000000000000000000000000000000000000000000000000000000081565b348015610d6c57600080fd5b506104e6610d7b3660046145eb565b612f4d565b348015610d8c57600080fd5b506104e6610d9b366004614ab2565b612fb6565b348015610dac57600080fd5b506104e6610dbb3660046143c2565b613061565b348015610dcc57600080fd5b506105a26130e7565b348015610de157600080fd5b506105ef610df03660046143c2565b6130f4565b348015610e0157600080fd5b506104e6610e10366004614758565b61314c565b348015610e2157600080fd5b506107ed610e303660046148ad565b6131e6565b348015610e4157600080fd5b506105a26132e3565b348015610e5657600080fd5b506104e6610e65366004614ad7565b6132f0565b348015610e7657600080fd5b5061062c610e853660046148ad565b61336f565b348015610e9657600080fd5b506104e6610ea5366004614afc565b6133d8565b348015610eb657600080fd5b506104e6610ec5366004614758565b61373f565b348015610ed657600080fd5b50610508610ee53660046148ad565b61380a565b348015610ef657600080fd5b50610508610f053660046143c2565b60096020526000908152604090205481565b348015610f2357600080fd5b5061050860145481565b348015610f3957600080fd5b506104e6610f4836600461450f565b61383b565b348015610f5957600080fd5b506105a2610f68366004614b64565b6040805160c0808201835273ffffffffffffffffffffffffffffffffffffffff9890981680825260ff97881660208084019182528385019889526060808501988952608080860198895260a095860197885286519283019490945291519099168985015296519688019690965293519486019490945290519184019190915251828401528051808303909301835260e0909101905290565b34801561100c57600080fd5b5061050861101b3660046143c2565b60066020526000908152604090205481565b34801561103957600080fd5b506105086110483660046143c2565b60026020526000908152604090205481565b6040805161018081018252600461014082019081527f746573740000000000000000000000000000000000000000000000000000000061016083015281528151602081810184526000808352818401929092523083850181905263ffffffff8b166060850152608084015260ff808a1660a08501528451808301865283815260c085015260e0840189905284519182019094529081526101008201526bffffffffffffffffffffffff8516610120820152601254601154919273ffffffffffffffffffffffffffffffffffffffff9182169263095ea7b3921690611140908c1688614bec565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526bffffffffffffffffffffffff1660248201526044016020604051808303816000875af11580156111be573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111e29190614c30565b5060008860ff1667ffffffffffffffff81111561120157611201614135565b60405190808252806020026020018201604052801561122a578160200160208202803683370190505b50905060005b8960ff168160ff16101561129d5760006112498461384f565b905080838360ff168151811061126157611261614c4b565b6020908102919091018101919091526000918252600881526040808320889055600790915290208490558061129581614c7a565b915050611230565b50505050505050505050565b600d60205282600052604060002060205281600052604060002081815481106112d157600080fd5b9060005260206000200160009250925050505481565b6013546040517f0b7d33e600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690630b7d33e69061133f9085908590600401614c99565b600060405180830381600087803b15801561135957600080fd5b505af115801561136d573d6000803e3d6000fd5b505050505050565b6013546040517f19d97a940000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff16906319d97a94906024015b600060405180830381865afa1580156113e6573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261142c9190810190614cff565b92915050565b6013546040517ffa333dfb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff888116600483015260ff8816602483015260448201879052606482018690526084820185905260a4820184905290911690634ee88d35908990309063fa333dfb9060c401600060405180830381865afa1580156114d1573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526115179190810190614cff565b6040518363ffffffff1660e01b8152600401611534929190614c99565b600060405180830381600087803b15801561154e57600080fd5b505af1158015611562573d6000803e3d6000fd5b5050505050505050505050565b6013546040517f1e0104390000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690631e010439906024015b602060405180830381865afa1580156115e0573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061142c9190614d3f565b6000828152600d6020908152604080832061ffff85168452825280832080548251818502810185019093528083528493849392919083018282801561166857602002820191906000526020600020905b815481526020019060010190808311611654575b5050505050905061167a81825161391d565b92509250505b9250929050565b6013546040517f207b65160000000000000000000000000000000000000000000000000000000081526004810183905260609173ffffffffffffffffffffffffffffffffffffffff169063207b6516906024016113c9565b601180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8316908117909155604080517fc3f909d400000000000000000000000000000000000000000000000000000000815281516000939263c3f909d492600480820193918290030181865afa158015611775573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906117999190614d67565b50601380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691909117909155601154604080517f1b6b6d230000000000000000000000000000000000000000000000000000000081529051939450911691631b6b6d23916004808201926020929091908290030181865afa15801561183c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118609190614d95565b601280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff929092169190911790555050565b8560005b81811015611b675760008989838181106118c9576118c9614c4b565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc828360405160200161190291815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b815260040161192e929190614c99565b600060405180830381600087803b15801561194857600080fd5b505af115801561195c573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa1580156119d2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906119f69190614db2565b90508060ff16600103611b52576040517ffa333dfb000000000000000000000000000000000000000000000000000000008152306004820181905260ff8b166024830152604482018a9052606482018890526084820188905260a4820187905260009163fa333dfb9060c401600060405180830381865afa158015611a7f573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611ac59190810190614cff565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d3590611b1e9086908590600401614c99565b600060405180830381600087803b158015611b3857600080fd5b505af1158015611b4c573d6000803e3d6000fd5b50505050505b50508080611b5f90614dcf565b9150506118ad565b505050505050505050565b6000838152600c602090815260408083208054825181850281018501909352808352611bd393830182828015611bc757602002820191906000526020600020905b815481526020019060010190808311611bb3575b505050505084846139a2565b90505b9392505050565b604080516101408101825260008082526020820181905260609282018390528282018190526080820181905260a0820181905260c0820181905260e082018190526101008201526101208101919091526013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905273ffffffffffffffffffffffffffffffffffffffff9091169063c7c3a19a90602401600060405180830381865afa158015611c9c573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261142c9190810190614e2a565b8060005b818160ff161015611dbd5760135473ffffffffffffffffffffffffffffffffffffffff1663c8048022858560ff8516818110611d2457611d24614c4b565b905060200201356040518263ffffffff1660e01b8152600401611d4991815260200190565b600060405180830381600087803b158015611d6357600080fd5b505af1158015611d77573d6000803e3d6000fd5b50505050611daa84848360ff16818110611d9357611d93614c4b565b90506020020135600f613b0190919063ffffffff16565b5080611db581614c7a565b915050611ce6565b50505050565b6000818152600e602052604081205461ffff1681805b8261ffff168161ffff1611611e24576000858152600d6020908152604080832061ffff85168452909152902054611e109083614f49565b915080611e1c81614f5c565b915050611dd9565b509392505050565b60005a90506000611e3f8385018561437b565b5060008181526005602090815260408083205460049092528220549293509190611e67613b0d565b905082600003611e87576000848152600560205260409020819055611fe2565b600084815260036020526040812054611ea08484614f7d565b611eaa9190614f7d565b6000868152600e6020908152604080832054600d835281842061ffff909116808552908352818420805483518186028101860190945280845295965090949192909190830182828015611f1c57602002820191906000526020600020905b815481526020019060010190808311611f08575b505050505090507f000000000000000000000000000000000000000000000000000000000000000061ffff16815103611f975781611f5981614f5c565b6000898152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001661ffff83161790559250505b506000868152600d6020908152604080832061ffff909416835292815282822080546001818101835591845282842001859055888352600c8252928220805493840181558252902001555b600084815260066020526040812054611ffc906001614f49565b600086815260066020908152604080832084905560049091529020839055905061202685836128d8565b612031858784612f4d565b5050505050505050565b6000828152600d6020908152604080832061ffff8516845282529182902080548351818402810184019094528084526060939283018282801561209d57602002820191906000526020600020905b815481526020019060010190808311612089575b5050505050905092915050565b6013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff1690635147cd5990602401602060405180830381865afa15801561211a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061142c9190614db2565b8160005b818110156121db5730635f17e61686868481811061216257612162614c4b565b90506020020135856040518363ffffffff1660e01b815260040161219692919091825263ffffffff16602082015260400190565b600060405180830381600087803b1580156121b057600080fd5b505af11580156121c4573d6000803e3d6000fd5b5050505080806121d390614dcf565b915050612142565b5050505050565b6121ea613baf565b6012546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015260009173ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015612259573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061227d9190614f90565b6012546040517fa9059cbb0000000000000000000000000000000000000000000000000000000081523360048201526024810183905291925073ffffffffffffffffffffffffffffffffffffffff169063a9059cbb906044016020604051808303816000875af11580156122f5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123199190614c30565b5050565b60008281526003602090815260408083208490556005825280832083905560068252808320839055600c909152812061235591614034565b6000828152600e602052604081205461ffff16905b8161ffff168161ffff16116123b1576000848152600d6020908152604080832061ffff85168452909152812061239f91614034565b806123a981614f5c565b91505061236a565b5050506000908152600e6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000169055565b6000606060005a90506000612401858701876143c2565b60008181526009602090815260408083205460089092528220549293509190838367ffffffffffffffff81111561243a5761243a614135565b6040519080825280601f01601f191660200182016040528015612464576020820181803683370190505b50604051602001612476929190614c99565b60405160208183030381529060405290506000612491613b0d565b9050600061249e86612517565b90505b835a6124ad9089614f7d565b6124b990612710614f49565b10156125075781406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055816124ff81614fa9565b9250506124a1565b9a91995090975050505050505050565b600081815260056020526040812054810361253457506001919050565b600082815260036020908152604080832054600490925290912054612557613b0d565b6125619190614f7d565b101592915050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146125ef576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6013546040517f79ea99430000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff16906379ea994390602401602060405180830381865afa1580156126db573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061142c9190614d95565b6013546040517fcd7f71b500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063cd7f71b59061275990869086908690600401614fde565b600060405180830381600087803b15801561277357600080fd5b505af1158015612787573d6000803e3d6000fd5b50505050505050565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690634ee88d359061275990869086908690600401614fde565b60176127f683826150cb565b50601861280382826150cb565b505050565b8051612319906016906020840190614052565b6013546040517f06e3b632000000000000000000000000000000000000000000000000000000008152600481018490526024810183905260609173ffffffffffffffffffffffffffffffffffffffff16906306e3b63290604401600060405180830381865afa158015612892573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611bd691908101906151e5565b6014546000838152600260205260409020546128f49083614f7d565b1115612319576013546040517fc7c3a19a0000000000000000000000000000000000000000000000000000000081526004810184905260009173ffffffffffffffffffffffffffffffffffffffff169063c7c3a19a90602401600060405180830381865afa15801561296a573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526129b09190810190614e2a565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810186905291925060009173ffffffffffffffffffffffffffffffffffffffff9091169063b657bc9c90602401602060405180830381865afa158015612a25573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612a499190614d3f565b601554909150612a6d9082906c01000000000000000000000000900460ff16614bec565b6bffffffffffffffffffffffff1682606001516bffffffffffffffffffffffff161015611dbd57601554612ab09085906bffffffffffffffffffffffff16612b1d565b60008481526002602090815260409182902085905560155482518781526bffffffffffffffffffffffff909116918101919091529081018490527f49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c09060600160405180910390a150505050565b6012546013546040517f095ea7b300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff91821660048201526bffffffffffffffffffffffff8416602482015291169063095ea7b3906044016020604051808303816000875af1158015612ba5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612bc99190614c30565b506013546040517f948108f7000000000000000000000000000000000000000000000000000000008152600481018490526bffffffffffffffffffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063948108f79060440161133f565b6040517fc04198220000000000000000000000000000000000000000000000000000000081526000600482018190526024820181905290309063c041982290604401600060405180830381865afa158015612c94573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052612cda91908101906151e5565b80519091506000612ce9613b0d565b905060005b828110156121db576000848281518110612d0a57612d0a614c4b565b60209081029190910101516013546040517f5147cd590000000000000000000000000000000000000000000000000000000081526004810183905291925060009173ffffffffffffffffffffffffffffffffffffffff90911690635147cd5990602401602060405180830381865afa158015612d8a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612dae9190614db2565b90508060ff16600103612e2a578660ff16600003612dfa576040513090859084907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a4612e2a565b6040513090859084907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a45b50508080612e3790614dcf565b915050612cee565b6000818152600c6020908152604091829020805483518184028101840190945280845260609392830182828015612e9557602002820191906000526020600020905b815481526020019060010190808311612e81575b50505050509050919050565b60168181548110612eb157600080fd5b906000526020600020016000915090508054612ecc90615032565b80601f0160208091040260200160405190810160405280929190818152602001828054612ef890615032565b8015612f455780601f10612f1a57610100808354040283529160200191612f45565b820191906000526020600020905b815481529060010190602001808311612f2857829003601f168201915b505050505081565b6000838152600760205260409020545b805a612f699085614f7d565b612f7590612710614f49565b1015611dbd5781406000908152600b6020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055612f5d565b6013546040517fa72aa27e0000000000000000000000000000000000000000000000000000000081526004810184905263ffffffff8316602482015273ffffffffffffffffffffffffffffffffffffffff9091169063a72aa27e90604401600060405180830381600087803b15801561302e57600080fd5b505af1158015613042573d6000803e3d6000fd5b505050600092835250600a602052604090912063ffffffff9091169055565b6013546040517f744bfe610000000000000000000000000000000000000000000000000000000081526004810183905230602482015273ffffffffffffffffffffffffffffffffffffffff9091169063744bfe6190604401600060405180830381600087803b1580156130d357600080fd5b505af11580156121db573d6000803e3d6000fd5b60178054612ecc90615032565b6013546040517fb657bc9c0000000000000000000000000000000000000000000000000000000081526004810183905260009173ffffffffffffffffffffffffffffffffffffffff169063b657bc9c906024016115c3565b8060005b818163ffffffff161015611dbd573063af953a4a858563ffffffff851681811061317c5761317c614c4b565b905060200201356040518263ffffffff1660e01b81526004016131a191815260200190565b600060405180830381600087803b1580156131bb57600080fd5b505af11580156131cf573d6000803e3d6000fd5b5050505080806131de90615276565b915050613150565b606060006131f4600f613c32565b905080841061322f576040517f1390f2a100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82600003613244576132418482614f7d565b92505b60008367ffffffffffffffff81111561325f5761325f614135565b604051908082528060200260200182016040528015613288578160200160208202803683370190505b50905060005b848110156132da576132ab6132a38288614f49565b600f90613c3c565b8282815181106132bd576132bd614c4b565b6020908102919091010152806132d281614dcf565b91505061328e565b50949350505050565b60188054612ecc90615032565b60006132fa613b0d565b90508160ff1660000361333b576040513090829085907f97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf0890600090a4505050565b6040513090829085907fc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d90600090a4505050565b6000828152600c602090815260408083208054825181850281018501909352808352849384939291908301828280156133c757602002820191906000526020600020905b8154815260200190600101908083116133b3575b5050505050905061167a818561391d565b8260005b8181101561136d5760008686838181106133f8576133f8614c4b565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc828360405160200161343191815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b815260040161345d929190614c99565b600060405180830381600087803b15801561347757600080fd5b505af115801561348b573d6000803e3d6000fd5b50506013546040517f5147cd59000000000000000000000000000000000000000000000000000000008152600481018590526000935073ffffffffffffffffffffffffffffffffffffffff9091169150635147cd5990602401602060405180830381865afa158015613501573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135259190614db2565b90508060ff1660010361372a577f000000000000000000000000000000000000000000000000000000000000000060ff87161561357f57507f00000000000000000000000000000000000000000000000000000000000000005b60003073ffffffffffffffffffffffffffffffffffffffff1663fa333dfb308985886040516020016135b391815260200190565b6040516020818303038152906040526135cb9061528f565b60405160e086901b7fffffffff0000000000000000000000000000000000000000000000000000000016815273ffffffffffffffffffffffffffffffffffffffff909416600485015260ff90921660248401526044830152606482015260006084820181905260a482015260c401600060405180830381865afa158015613656573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261369c9190810190614cff565b6013546040517f4ee88d3500000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690634ee88d35906136f59087908590600401614c99565b600060405180830381600087803b15801561370f57600080fd5b505af1158015613723573d6000803e3d6000fd5b5050505050505b5050808061373790614dcf565b9150506133dc565b8060005b81811015611dbd57600084848381811061375f5761375f614c4b565b9050602002013590503073ffffffffffffffffffffffffffffffffffffffff16637e7a46dc828360405160200161379891815260200190565b6040516020818303038152906040526040518363ffffffff1660e01b81526004016137c4929190614c99565b600060405180830381600087803b1580156137de57600080fd5b505af11580156137f2573d6000803e3d6000fd5b5050505050808061380290614dcf565b915050613743565b600c602052816000526040600020818154811061382657600080fd5b90600052602060002001600091509150505481565b613843613baf565b61384c81613c48565b50565b6011546040517f3f678e11000000000000000000000000000000000000000000000000000000008152600091829173ffffffffffffffffffffffffffffffffffffffff90911690633f678e11906138aa9086906004016152d1565b6020604051808303816000875af11580156138c9573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906138ed9190614f90565b90506138fa600f82613d3d565b506060909201516000838152600a6020526040902063ffffffff90911690555090565b8151600090819081908415806139335750808510155b1561393c578094505b60008092505b85831015613998578660016139578585614f7d565b6139619190614f7d565b8151811061397157613971614c4b565b6020026020010151816139849190614f49565b90508261399081614dcf565b935050613942565b9694955050505050565b825160009081908315806139b65750808410155b156139bf578093505b60008467ffffffffffffffff8111156139da576139da614135565b604051908082528060200260200182016040528015613a03578160200160208202803683370190505b509050600092505b84831015613a7157866001613a208585614f7d565b613a2a9190614f7d565b81518110613a3a57613a3a614c4b565b6020026020010151818481518110613a5457613a54614c4b565b602090810291909101015282613a6981614dcf565b935050613a0b565b613a8a81600060018451613a859190614f7d565b613d49565b85606403613ac3578060018251613aa19190614f7d565b81518110613ab157613ab1614c4b565b60200260200101519350505050611bd6565b806064825188613ad39190615423565b613add919061548f565b81518110613aed57613aed614c4b565b602002602001015193505050509392505050565b6000611bd68383613ec1565b60007f000000000000000000000000000000000000000000000000000000000000000015613baa57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015613b81573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613ba59190614f90565b905090565b504390565b60005473ffffffffffffffffffffffffffffffffffffffff163314613c30576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016125e6565b565b600061142c825490565b6000611bd68383613fbb565b3373ffffffffffffffffffffffffffffffffffffffff821603613cc7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016125e6565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000611bd68383613fe5565b8181808203613d59575050505050565b6000856002613d6887876154a3565b613d7291906154c3565b613d7c908761552b565b81518110613d8c57613d8c614c4b565b602002602001015190505b818313613e9b575b80868481518110613db257613db2614c4b565b60200260200101511015613dd25782613dca81615553565b935050613d9f565b858281518110613de457613de4614c4b565b6020026020010151811015613e055781613dfd81615584565b925050613dd2565b818313613e9657858281518110613e1e57613e1e614c4b565b6020026020010151868481518110613e3857613e38614c4b565b6020026020010151878581518110613e5257613e52614c4b565b60200260200101888581518110613e6b57613e6b614c4b565b60209081029190910101919091525282613e8481615553565b9350508180613e9290615584565b9250505b613d97565b81851215613eae57613eae868684613d49565b8383121561136d5761136d868486613d49565b60008181526001830160205260408120548015613faa576000613ee5600183614f7d565b8554909150600090613ef990600190614f7d565b9050818114613f5e576000866000018281548110613f1957613f19614c4b565b9060005260206000200154905080876000018481548110613f3c57613f3c614c4b565b6000918252602080832090910192909255918252600188019052604090208390555b8554869080613f6f57613f6f6155b5565b60019003818190600052602060002001600090559055856001016000868152602001908152602001600020600090556001935050505061142c565b600091505061142c565b5092915050565b6000826000018281548110613fd257613fd2614c4b565b9060005260206000200154905092915050565b600081815260018301602052604081205461402c5750815460018181018455600084815260208082209093018490558454848252828601909352604090209190915561142c565b50600061142c565b508054600082559060005260206000209081019061384c91906140a8565b828054828255906000526020600020908101928215614098579160200282015b82811115614098578251829061408890826150cb565b5091602001919060010190614072565b506140a49291506140bd565b5090565b5b808211156140a457600081556001016140a9565b808211156140a45760006140d182826140da565b506001016140bd565b5080546140e690615032565b6000825580601f106140f6575050565b601f01602090049060005260206000209081019061384c91906140a8565b60ff8116811461384c57600080fd5b63ffffffff8116811461384c57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051610140810167ffffffffffffffff8111828210171561418857614188614135565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156141d5576141d5614135565b604052919050565b600067ffffffffffffffff8211156141f7576141f7614135565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f83011261423457600080fd5b8135614247614242826141dd565b61418e565b81815284602083860101111561425c57600080fd5b816020850160208301376000918101602001919091529392505050565b6bffffffffffffffffffffffff8116811461384c57600080fd5b600080600080600080600060e0888a0312156142ae57600080fd5b87356142b981614114565b965060208801356142c981614123565b955060408801356142d981614114565b9450606088013567ffffffffffffffff8111156142f557600080fd5b6143018a828b01614223565b945050608088013561431281614279565b9699959850939692959460a0840135945060c09093013592915050565b803561ffff8116811461434157600080fd5b919050565b60008060006060848603121561435b57600080fd5b8335925061436b6020850161432f565b9150604084013590509250925092565b6000806040838503121561438e57600080fd5b82359150602083013567ffffffffffffffff8111156143ac57600080fd5b6143b885828601614223565b9150509250929050565b6000602082840312156143d457600080fd5b5035919050565b60005b838110156143f65781810151838201526020016143de565b50506000910152565b600081518084526144178160208601602086016143db565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611bd660208301846143ff565b73ffffffffffffffffffffffffffffffffffffffff8116811461384c57600080fd5b600080600080600080600060e0888a03121561449957600080fd5b8735965060208801356144ab8161445c565b955060408801356144bb81614114565b969995985095966060810135965060808101359560a0820135955060c0909101359350915050565b600080604083850312156144f657600080fd5b823591506145066020840161432f565b90509250929050565b60006020828403121561452157600080fd5b8135611bd68161445c565b60008083601f84011261453e57600080fd5b50813567ffffffffffffffff81111561455657600080fd5b6020830191508360208260051b850101111561168057600080fd5b600080600080600080600060c0888a03121561458c57600080fd5b873567ffffffffffffffff8111156145a357600080fd5b6145af8a828b0161452c565b90985096505060208801356145c381614114565b96999598509596604081013596506060810135956080820135955060a0909101359350915050565b60008060006060848603121561460057600080fd5b505081359360208301359350604090920135919050565b6020815261463e60208201835173ffffffffffffffffffffffffffffffffffffffff169052565b60006020830151614657604084018263ffffffff169052565b5060408301516101408060608501526146746101608501836143ff565b9150606085015161469560808601826bffffffffffffffffffffffff169052565b50608085015173ffffffffffffffffffffffffffffffffffffffff811660a08601525060a085015167ffffffffffffffff811660c08601525060c085015163ffffffff811660e08601525060e0850151610100614701818701836bffffffffffffffffffffffff169052565b86015190506101206147168682018315159052565b8601518584037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00183870152905061474e83826143ff565b9695505050505050565b6000806020838503121561476b57600080fd5b823567ffffffffffffffff81111561478257600080fd5b61478e8582860161452c565b90969095509350505050565b60008083601f8401126147ac57600080fd5b50813567ffffffffffffffff8111156147c457600080fd5b60208301915083602082850101111561168057600080fd5b600080602083850312156147ef57600080fd5b823567ffffffffffffffff81111561480657600080fd5b61478e8582860161479a565b6020808252825182820181905260009190848201906040850190845b8181101561484a5783518352928401929184019160010161482e565b50909695505050505050565b60008060006040848603121561486b57600080fd5b833567ffffffffffffffff81111561488257600080fd5b61488e8682870161452c565b90945092505060208401356148a281614123565b809150509250925092565b600080604083850312156148c057600080fd5b50508035926020909101359150565b8215158152604060208201526000611bd360408301846143ff565b6000806000604084860312156148ff57600080fd5b83359250602084013567ffffffffffffffff81111561491d57600080fd5b6149298682870161479a565b9497909650939450505050565b6000806040838503121561494957600080fd5b823567ffffffffffffffff8082111561496157600080fd5b61496d86838701614223565b9350602085013591508082111561498357600080fd5b506143b885828601614223565b600067ffffffffffffffff8211156149aa576149aa614135565b5060051b60200190565b600060208083850312156149c757600080fd5b823567ffffffffffffffff808211156149df57600080fd5b818501915085601f8301126149f357600080fd5b8135614a0161424282614990565b81815260059190911b83018401908481019088831115614a2057600080fd5b8585015b83811015614a5857803585811115614a3c5760008081fd5b614a4a8b89838a0101614223565b845250918601918601614a24565b5098975050505050505050565b60008060408385031215614a7857600080fd5b823591506020830135614a8a81614279565b809150509250929050565b600060208284031215614aa757600080fd5b8135611bd681614114565b60008060408385031215614ac557600080fd5b823591506020830135614a8a81614123565b60008060408385031215614aea57600080fd5b823591506020830135614a8a81614114565b60008060008060608587031215614b1257600080fd5b843567ffffffffffffffff811115614b2957600080fd5b614b358782880161452c565b9095509350506020850135614b4981614114565b91506040850135614b5981614114565b939692955090935050565b60008060008060008060c08789031215614b7d57600080fd5b8635614b888161445c565b95506020870135614b9881614114565b95989597505050506040840135936060810135936080820135935060a0909101359150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006bffffffffffffffffffffffff80831681851681830481118215151615614c1757614c17614bbd565b02949350505050565b8051801515811461434157600080fd5b600060208284031215614c4257600080fd5b611bd682614c20565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060ff821660ff8103614c9057614c90614bbd565b60010192915050565b828152604060208201526000611bd360408301846143ff565b600082601f830112614cc357600080fd5b8151614cd1614242826141dd565b818152846020838601011115614ce657600080fd5b614cf78260208301602087016143db565b949350505050565b600060208284031215614d1157600080fd5b815167ffffffffffffffff811115614d2857600080fd5b614cf784828501614cb2565b805161434181614279565b600060208284031215614d5157600080fd5b8151611bd681614279565b80516143418161445c565b60008060408385031215614d7a57600080fd5b8251614d858161445c565b6020939093015192949293505050565b600060208284031215614da757600080fd5b8151611bd68161445c565b600060208284031215614dc457600080fd5b8151611bd681614114565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203614e0057614e00614bbd565b5060010190565b805161434181614123565b805167ffffffffffffffff8116811461434157600080fd5b600060208284031215614e3c57600080fd5b815167ffffffffffffffff80821115614e5457600080fd5b908301906101408286031215614e6957600080fd5b614e71614164565b614e7a83614d5c565b8152614e8860208401614e07565b6020820152604083015182811115614e9f57600080fd5b614eab87828601614cb2565b604083015250614ebd60608401614d34565b6060820152614ece60808401614d5c565b6080820152614edf60a08401614e12565b60a0820152614ef060c08401614e07565b60c0820152614f0160e08401614d34565b60e0820152610100614f14818501614c20565b908201526101208381015183811115614f2c57600080fd5b614f3888828701614cb2565b918301919091525095945050505050565b8082018082111561142c5761142c614bbd565b600061ffff808316818103614f7357614f73614bbd565b6001019392505050565b8181038181111561142c5761142c614bbd565b600060208284031215614fa257600080fd5b5051919050565b600081614fb857614fb8614bbd565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190565b83815260406020820152816040820152818360608301376000818301606090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016010192915050565b600181811c9082168061504657607f821691505b60208210810361507f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111561280357600081815260208120601f850160051c810160208610156150ac5750805b601f850160051c820191505b8181101561136d578281556001016150b8565b815167ffffffffffffffff8111156150e5576150e5614135565b6150f9816150f38454615032565b84615085565b602080601f83116001811461514c57600084156151165750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b17855561136d565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156151995788860151825594840194600190910190840161517a565b50858210156151d557878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b600060208083850312156151f857600080fd5b825167ffffffffffffffff81111561520f57600080fd5b8301601f8101851361522057600080fd5b805161522e61424282614990565b81815260059190911b8201830190838101908783111561524d57600080fd5b928401925b8284101561526b57835182529284019290840190615252565b979650505050505050565b600063ffffffff808316818103614f7357614f73614bbd565b8051602080830151919081101561507f577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60209190910360031b1b16919050565b60208152600082516101408060208501526152f06101608501836143ff565b915060208501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08086850301604087015261532c84836143ff565b935060408701519150615357606087018373ffffffffffffffffffffffffffffffffffffffff169052565b606087015163ffffffff811660808801529150608087015173ffffffffffffffffffffffffffffffffffffffff811660a0880152915060a087015160ff811660c0880152915060c08701519150808685030160e08701526153b884836143ff565b935060e087015191506101008187860301818801526153d785846143ff565b9450808801519250506101208187860301818801526153f685846143ff565b94508088015192505050615419828601826bffffffffffffffffffffffff169052565b5090949350505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561545b5761545b614bbd565b500290565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60008261549e5761549e615460565b500490565b8181036000831280158383131683831282161715613fb457613fb4614bbd565b6000826154d2576154d2615460565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83147f80000000000000000000000000000000000000000000000000000000000000008314161561552657615526614bbd565b500590565b808201828112600083128015821682158216171561554b5761554b614bbd565b505092915050565b60007f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203614e0057614e00614bbd565b60007f80000000000000000000000000000000000000000000000000000000000000008203614fb857614fb8614bbd565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000810000a307834353534343832643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030", +} + +var VerifiableLoadUpkeepABI = VerifiableLoadUpkeepMetaData.ABI + +var VerifiableLoadUpkeepBin = VerifiableLoadUpkeepMetaData.Bin + +func DeployVerifiableLoadUpkeep(auth *bind.TransactOpts, backend bind.ContractBackend, _registrar common.Address, _useArb bool) (common.Address, *types.Transaction, *VerifiableLoadUpkeep, error) { + parsed, err := VerifiableLoadUpkeepMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VerifiableLoadUpkeepBin), backend, _registrar, _useArb) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VerifiableLoadUpkeep{address: address, abi: *parsed, VerifiableLoadUpkeepCaller: VerifiableLoadUpkeepCaller{contract: contract}, VerifiableLoadUpkeepTransactor: VerifiableLoadUpkeepTransactor{contract: contract}, VerifiableLoadUpkeepFilterer: VerifiableLoadUpkeepFilterer{contract: contract}}, nil +} + +type VerifiableLoadUpkeep struct { + address common.Address + abi abi.ABI + VerifiableLoadUpkeepCaller + VerifiableLoadUpkeepTransactor + VerifiableLoadUpkeepFilterer +} + +type VerifiableLoadUpkeepCaller struct { + contract *bind.BoundContract +} + +type VerifiableLoadUpkeepTransactor struct { + contract *bind.BoundContract +} + +type VerifiableLoadUpkeepFilterer struct { + contract *bind.BoundContract +} + +type VerifiableLoadUpkeepSession struct { + Contract *VerifiableLoadUpkeep + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VerifiableLoadUpkeepCallerSession struct { + Contract *VerifiableLoadUpkeepCaller + CallOpts bind.CallOpts +} + +type VerifiableLoadUpkeepTransactorSession struct { + Contract *VerifiableLoadUpkeepTransactor + TransactOpts bind.TransactOpts +} + +type VerifiableLoadUpkeepRaw struct { + Contract *VerifiableLoadUpkeep +} + +type VerifiableLoadUpkeepCallerRaw struct { + Contract *VerifiableLoadUpkeepCaller +} + +type VerifiableLoadUpkeepTransactorRaw struct { + Contract *VerifiableLoadUpkeepTransactor +} + +func NewVerifiableLoadUpkeep(address common.Address, backend bind.ContractBackend) (*VerifiableLoadUpkeep, error) { + abi, err := abi.JSON(strings.NewReader(VerifiableLoadUpkeepABI)) + if err != nil { + return nil, err + } + contract, err := bindVerifiableLoadUpkeep(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeep{address: address, abi: abi, VerifiableLoadUpkeepCaller: VerifiableLoadUpkeepCaller{contract: contract}, VerifiableLoadUpkeepTransactor: VerifiableLoadUpkeepTransactor{contract: contract}, VerifiableLoadUpkeepFilterer: VerifiableLoadUpkeepFilterer{contract: contract}}, nil +} + +func NewVerifiableLoadUpkeepCaller(address common.Address, caller bind.ContractCaller) (*VerifiableLoadUpkeepCaller, error) { + contract, err := bindVerifiableLoadUpkeep(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepCaller{contract: contract}, nil +} + +func NewVerifiableLoadUpkeepTransactor(address common.Address, transactor bind.ContractTransactor) (*VerifiableLoadUpkeepTransactor, error) { + contract, err := bindVerifiableLoadUpkeep(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepTransactor{contract: contract}, nil +} + +func NewVerifiableLoadUpkeepFilterer(address common.Address, filterer bind.ContractFilterer) (*VerifiableLoadUpkeepFilterer, error) { + contract, err := bindVerifiableLoadUpkeep(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepFilterer{contract: contract}, nil +} + +func bindVerifiableLoadUpkeep(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VerifiableLoadUpkeepMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadUpkeep.Contract.VerifiableLoadUpkeepCaller.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.VerifiableLoadUpkeepTransactor.contract.Transfer(opts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.VerifiableLoadUpkeepTransactor.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifiableLoadUpkeep.Contract.contract.Call(opts, result, method, params...) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.contract.Transfer(opts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.contract.Transact(opts, method, params...) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) BUCKETSIZE(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "BUCKET_SIZE") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) BUCKETSIZE() (uint16, error) { + return _VerifiableLoadUpkeep.Contract.BUCKETSIZE(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "addLinkAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.AddLinkAmount(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) AddLinkAmount() (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.AddLinkAmount(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "bucketedDelays", arg0, arg1, arg2) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.BucketedDelays(&_VerifiableLoadUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) BucketedDelays(arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.BucketedDelays(&_VerifiableLoadUpkeep.CallOpts, arg0, arg1, arg2) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "buckets", arg0) + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadUpkeep.Contract.Buckets(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Buckets(arg0 *big.Int) (uint16, error) { + return _VerifiableLoadUpkeep.Contract.Buckets(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "checkGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) CheckGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.CheckGasToBurns(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "counters", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Counters(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Counters(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Counters(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "delays", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Delays(&_VerifiableLoadUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Delays(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Delays(&_VerifiableLoadUpkeep.CallOpts, arg0, arg1) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "dummyMap", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadUpkeep.Contract.DummyMap(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) DummyMap(arg0 [32]byte) (bool, error) { + return _VerifiableLoadUpkeep.Contract.DummyMap(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "eligible", upkeepId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadUpkeep.Contract.Eligible(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Eligible(upkeepId *big.Int) (bool, error) { + return _VerifiableLoadUpkeep.Contract.Eligible(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "emittedAgainSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) EmittedAgainSig() ([32]byte, error) { + return _VerifiableLoadUpkeep.Contract.EmittedAgainSig(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) EmittedSig(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "emittedSig") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadUpkeep.Contract.EmittedSig(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) EmittedSig() ([32]byte, error) { + return _VerifiableLoadUpkeep.Contract.EmittedSig(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) FeedParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "feedParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) FeedParamKey() (string, error) { + return _VerifiableLoadUpkeep.Contract.FeedParamKey(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) FeedParamKey() (string, error) { + return _VerifiableLoadUpkeep.Contract.FeedParamKey(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "feedsHex", arg0) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadUpkeep.Contract.FeedsHex(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) FeedsHex(arg0 *big.Int) (string, error) { + return _VerifiableLoadUpkeep.Contract.FeedsHex(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "firstPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) FirstPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.FirstPerformBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "gasLimits", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GasLimits(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GasLimits(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GasLimits(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getActiveUpkeepIDsDeployedByThisContract", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetActiveUpkeepIDsDeployedByThisContract(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetActiveUpkeepIDsDeployedByThisContract(&_VerifiableLoadUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getAllActiveUpkeepIDsOnRegistry", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetAllActiveUpkeepIDsOnRegistry(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetAllActiveUpkeepIDsOnRegistry(&_VerifiableLoadUpkeep.CallOpts, startIndex, maxCount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getBalance", id) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBalance(&_VerifiableLoadUpkeep.CallOpts, id) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetBalance(id *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBalance(&_VerifiableLoadUpkeep.CallOpts, id) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getBucketedDelays", upkeepId, bucket) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetBucketedDelays(upkeepId *big.Int, bucket uint16) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBucketedDelays(&_VerifiableLoadUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getBucketedDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetBucketedDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetBucketedDelaysLength(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getDelays", upkeepId) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetDelays(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetDelays(upkeepId *big.Int) ([]*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetDelays(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getDelaysLength", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetDelaysLength(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetDelaysLength(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetDelaysLength(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getForwarder", upkeepID) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.GetForwarder(&_VerifiableLoadUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetForwarder(upkeepID *big.Int) (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.GetForwarder(&_VerifiableLoadUpkeep.CallOpts, upkeepID) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getLogTriggerConfig", addr, selector, topic0, topic1, topic2, topic3) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetLogTriggerConfig(addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetLogTriggerConfig(&_VerifiableLoadUpkeep.CallOpts, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getMinBalanceForUpkeep", upkeepId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetMinBalanceForUpkeep(upkeepId *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetMinBalanceForUpkeep(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getPxDelayLastNPerforms", upkeepId, p, n) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetPxDelayLastNPerforms(upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetPxDelayLastNPerforms(&_VerifiableLoadUpkeep.CallOpts, upkeepId, p, n) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getSumDelayInBucket", upkeepId, bucket) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetSumDelayInBucket(upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetSumDelayInBucket(&_VerifiableLoadUpkeep.CallOpts, upkeepId, bucket) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getSumDelayLastNPerforms", upkeepId, n) + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetSumDelayLastNPerforms(upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) { + return _VerifiableLoadUpkeep.Contract.GetSumDelayLastNPerforms(&_VerifiableLoadUpkeep.CallOpts, upkeepId, n) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getTriggerType", upkeepId) + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadUpkeep.Contract.GetTriggerType(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetTriggerType(upkeepId *big.Int) (uint8, error) { + return _VerifiableLoadUpkeep.Contract.GetTriggerType(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getUpkeepInfo", upkeepId) + + if err != nil { + return *new(KeeperRegistryBase21UpkeepInfo), err + } + + out0 := *abi.ConvertType(out[0], new(KeeperRegistryBase21UpkeepInfo)).(*KeeperRegistryBase21UpkeepInfo) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetUpkeepInfo(upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepInfo(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getUpkeepPrivilegeConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepPrivilegeConfig(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "getUpkeepTriggerConfig", upkeepId) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) GetUpkeepTriggerConfig(upkeepId *big.Int) ([]byte, error) { + return _VerifiableLoadUpkeep.Contract.GetUpkeepTriggerConfig(&_VerifiableLoadUpkeep.CallOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "intervals", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Intervals(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Intervals(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.Intervals(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "lastTopUpBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) LastTopUpBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.LastTopUpBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) LinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) LinkToken() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.LinkToken(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) LinkToken() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.LinkToken(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "minBalanceThresholdMultiplier") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) MinBalanceThresholdMultiplier() (uint8, error) { + return _VerifiableLoadUpkeep.Contract.MinBalanceThresholdMultiplier(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Owner() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Owner(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Owner() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Owner(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "performDataSizes", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PerformDataSizes(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) PerformDataSizes(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PerformDataSizes(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "performGasToBurns", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) PerformGasToBurns(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PerformGasToBurns(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "previousPerformBlocks", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) PreviousPerformBlocks(arg0 *big.Int) (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.PreviousPerformBlocks(&_VerifiableLoadUpkeep.CallOpts, arg0) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Registrar(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "registrar") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Registrar() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Registrar(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Registrar() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Registrar(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) Registry(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "registry") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Registry() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Registry(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) Registry() (common.Address, error) { + return _VerifiableLoadUpkeep.Contract.Registry(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) TimeParamKey(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "timeParamKey") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) TimeParamKey() (string, error) { + return _VerifiableLoadUpkeep.Contract.TimeParamKey(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) TimeParamKey() (string, error) { + return _VerifiableLoadUpkeep.Contract.TimeParamKey(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "upkeepTopUpCheckInterval") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) UpkeepTopUpCheckInterval() (*big.Int, error) { + return _VerifiableLoadUpkeep.Contract.UpkeepTopUpCheckInterval(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCaller) UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VerifiableLoadUpkeep.contract.Call(opts, &out, "useArbitrumBlockNum") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepCallerSession) UseArbitrumBlockNum() (bool, error) { + return _VerifiableLoadUpkeep.Contract.UseArbitrumBlockNum(&_VerifiableLoadUpkeep.CallOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "acceptOwnership") +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.AcceptOwnership(&_VerifiableLoadUpkeep.TransactOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.AcceptOwnership(&_VerifiableLoadUpkeep.TransactOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "addFunds", upkeepId, amount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.AddFunds(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) AddFunds(upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.AddFunds(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, amount) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchCancelUpkeeps", upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchCancelUpkeeps(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchCancelUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchPreparingUpkeeps", upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchPreparingUpkeeps(upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchPreparingUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchPreparingUpkeepsSimple", upkeepIds, log, selector) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchPreparingUpkeepsSimple(upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchPreparingUpkeepsSimple(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, log, selector) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchRegisterUpkeeps", number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchRegisterUpkeeps(number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchRegisterUpkeeps(&_VerifiableLoadUpkeep.TransactOpts, number, gasLimit, triggerType, triggerConfig, amount, checkGasToBurn, performGasToBurn) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchSendLogs", log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchSendLogs(&_VerifiableLoadUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchSendLogs(log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchSendLogs(&_VerifiableLoadUpkeep.TransactOpts, log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchSetIntervals", upkeepIds, interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchSetIntervals(upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchSetIntervals(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds, interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchUpdatePipelineData", upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchUpdatePipelineData(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchUpdatePipelineData(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "batchWithdrawLinks", upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BatchWithdrawLinks(upkeepIds []*big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BatchWithdrawLinks(&_VerifiableLoadUpkeep.TransactOpts, upkeepIds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "burnPerformGas", upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BurnPerformGas(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) BurnPerformGas(upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.BurnPerformGas(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, startGas, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) CheckUpkeep(opts *bind.TransactOpts, checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "checkUpkeep", checkData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) CheckUpkeep(checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.CheckUpkeep(&_VerifiableLoadUpkeep.TransactOpts, checkData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) CheckUpkeep(checkData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.CheckUpkeep(&_VerifiableLoadUpkeep.TransactOpts, checkData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "performUpkeep", performData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.PerformUpkeep(&_VerifiableLoadUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) PerformUpkeep(performData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.PerformUpkeep(&_VerifiableLoadUpkeep.TransactOpts, performData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "sendLog", upkeepId, log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SendLog(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SendLog(upkeepId *big.Int, log uint8) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SendLog(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, log) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setConfig", newRegistrar) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetConfig(&_VerifiableLoadUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetConfig(newRegistrar common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetConfig(&_VerifiableLoadUpkeep.TransactOpts, newRegistrar) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setFeeds", _feeds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetFeeds(&_VerifiableLoadUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetFeeds(_feeds []string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetFeeds(&_VerifiableLoadUpkeep.TransactOpts, _feeds) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setInterval", upkeepId, _interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetInterval(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetInterval(upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetInterval(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, _interval) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setParamKeys", _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetParamKeys(&_VerifiableLoadUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetParamKeys(_feedParamKey string, _timeParamKey string) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetParamKeys(&_VerifiableLoadUpkeep.TransactOpts, _feedParamKey, _timeParamKey) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setPerformDataSize", upkeepId, value) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetPerformDataSize(upkeepId *big.Int, value *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetPerformDataSize(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, value) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setUpkeepGasLimit", upkeepId, gasLimit) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetUpkeepGasLimit(upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetUpkeepGasLimit(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, gasLimit) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "setUpkeepPrivilegeConfig", upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) SetUpkeepPrivilegeConfig(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.SetUpkeepPrivilegeConfig(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "topUpFund", upkeepId, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.TopUpFund(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) TopUpFund(upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.TopUpFund(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, blockNum) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "transferOwnership", to) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.TransferOwnership(&_VerifiableLoadUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.TransferOwnership(&_VerifiableLoadUpkeep.TransactOpts, to) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "updateLogTriggerConfig1", upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) UpdateLogTriggerConfig1(upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateLogTriggerConfig1(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, addr, selector, topic0, topic1, topic2, topic3) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "updateLogTriggerConfig2", upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) UpdateLogTriggerConfig2(upkeepId *big.Int, cfg []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateLogTriggerConfig2(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, cfg) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "updateUpkeepPipelineData", upkeepId, pipelineData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) UpdateUpkeepPipelineData(upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.UpdateUpkeepPipelineData(&_VerifiableLoadUpkeep.TransactOpts, upkeepId, pipelineData) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "withdrawLinks") +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.WithdrawLinks(&_VerifiableLoadUpkeep.TransactOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) WithdrawLinks() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.WithdrawLinks(&_VerifiableLoadUpkeep.TransactOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.Transact(opts, "withdrawLinks0", upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) WithdrawLinks0(upkeepId *big.Int) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.WithdrawLinks0(&_VerifiableLoadUpkeep.TransactOpts, upkeepId) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifiableLoadUpkeep.contract.RawTransact(opts, nil) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.Receive(&_VerifiableLoadUpkeep.TransactOpts) +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepTransactorSession) Receive() (*types.Transaction, error) { + return _VerifiableLoadUpkeep.Contract.Receive(&_VerifiableLoadUpkeep.TransactOpts) +} + +type VerifiableLoadUpkeepLogEmittedIterator struct { + Event *VerifiableLoadUpkeepLogEmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadUpkeepLogEmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepLogEmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadUpkeepLogEmittedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadUpkeepLogEmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadUpkeepLogEmitted struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadUpkeepLogEmittedIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.FilterLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepLogEmittedIterator{contract: _VerifiableLoadUpkeep.contract, event: "LogEmitted", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.WatchLogs(opts, "LogEmitted", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadUpkeepLogEmitted) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) ParseLogEmitted(log types.Log) (*VerifiableLoadUpkeepLogEmitted, error) { + event := new(VerifiableLoadUpkeepLogEmitted) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "LogEmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadUpkeepLogEmittedAgainIterator struct { + Event *VerifiableLoadUpkeepLogEmittedAgain + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadUpkeepLogEmittedAgainIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepLogEmittedAgain) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadUpkeepLogEmittedAgainIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadUpkeepLogEmittedAgainIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadUpkeepLogEmittedAgain struct { + UpkeepId *big.Int + BlockNum *big.Int + Addr common.Address + Raw types.Log +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadUpkeepLogEmittedAgainIterator, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.FilterLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepLogEmittedAgainIterator{contract: _VerifiableLoadUpkeep.contract, event: "LogEmittedAgain", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) { + + var upkeepIdRule []interface{} + for _, upkeepIdItem := range upkeepId { + upkeepIdRule = append(upkeepIdRule, upkeepIdItem) + } + var blockNumRule []interface{} + for _, blockNumItem := range blockNum { + blockNumRule = append(blockNumRule, blockNumItem) + } + var addrRule []interface{} + for _, addrItem := range addr { + addrRule = append(addrRule, addrItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.WatchLogs(opts, "LogEmittedAgain", upkeepIdRule, blockNumRule, addrRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadUpkeepLogEmittedAgain) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) ParseLogEmittedAgain(log types.Log) (*VerifiableLoadUpkeepLogEmittedAgain, error) { + event := new(VerifiableLoadUpkeepLogEmittedAgain) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "LogEmittedAgain", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadUpkeepOwnershipTransferRequestedIterator struct { + Event *VerifiableLoadUpkeepOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadUpkeepOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadUpkeepOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadUpkeepOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadUpkeepOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadUpkeepOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepOwnershipTransferRequestedIterator{contract: _VerifiableLoadUpkeep.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadUpkeepOwnershipTransferRequested, error) { + event := new(VerifiableLoadUpkeepOwnershipTransferRequested) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadUpkeepOwnershipTransferredIterator struct { + Event *VerifiableLoadUpkeepOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadUpkeepOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadUpkeepOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadUpkeepOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadUpkeepOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadUpkeepOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepOwnershipTransferredIterator{contract: _VerifiableLoadUpkeep.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifiableLoadUpkeep.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadUpkeepOwnershipTransferred) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) ParseOwnershipTransferred(log types.Log) (*VerifiableLoadUpkeepOwnershipTransferred, error) { + event := new(VerifiableLoadUpkeepOwnershipTransferred) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifiableLoadUpkeepUpkeepTopUpIterator struct { + Event *VerifiableLoadUpkeepUpkeepTopUp + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifiableLoadUpkeepUpkeepTopUpIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifiableLoadUpkeepUpkeepTopUp) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifiableLoadUpkeepUpkeepTopUpIterator) Error() error { + return it.fail +} + +func (it *VerifiableLoadUpkeepUpkeepTopUpIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifiableLoadUpkeepUpkeepTopUp struct { + UpkeepId *big.Int + Amount *big.Int + BlockNum *big.Int + Raw types.Log +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadUpkeepUpkeepTopUpIterator, error) { + + logs, sub, err := _VerifiableLoadUpkeep.contract.FilterLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return &VerifiableLoadUpkeepUpkeepTopUpIterator{contract: _VerifiableLoadUpkeep.contract, event: "UpkeepTopUp", logs: logs, sub: sub}, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepUpkeepTopUp) (event.Subscription, error) { + + logs, sub, err := _VerifiableLoadUpkeep.contract.WatchLogs(opts, "UpkeepTopUp") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifiableLoadUpkeepUpkeepTopUp) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeepFilterer) ParseUpkeepTopUp(log types.Log) (*VerifiableLoadUpkeepUpkeepTopUp, error) { + event := new(VerifiableLoadUpkeepUpkeepTopUp) + if err := _VerifiableLoadUpkeep.contract.UnpackLog(event, "UpkeepTopUp", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeep) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VerifiableLoadUpkeep.abi.Events["LogEmitted"].ID: + return _VerifiableLoadUpkeep.ParseLogEmitted(log) + case _VerifiableLoadUpkeep.abi.Events["LogEmittedAgain"].ID: + return _VerifiableLoadUpkeep.ParseLogEmittedAgain(log) + case _VerifiableLoadUpkeep.abi.Events["OwnershipTransferRequested"].ID: + return _VerifiableLoadUpkeep.ParseOwnershipTransferRequested(log) + case _VerifiableLoadUpkeep.abi.Events["OwnershipTransferred"].ID: + return _VerifiableLoadUpkeep.ParseOwnershipTransferred(log) + case _VerifiableLoadUpkeep.abi.Events["UpkeepTopUp"].ID: + return _VerifiableLoadUpkeep.ParseUpkeepTopUp(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VerifiableLoadUpkeepLogEmitted) Topic() common.Hash { + return common.HexToHash("0x97009585a4d2440f981ab6f6eec514343e1e6b2aa9b991a26998e6806f41bf08") +} + +func (VerifiableLoadUpkeepLogEmittedAgain) Topic() common.Hash { + return common.HexToHash("0xc76416badc8398ce17c93eab7b4f60f263241694cf503e4df24f233a8cc1c50d") +} + +func (VerifiableLoadUpkeepOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VerifiableLoadUpkeepOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VerifiableLoadUpkeepUpkeepTopUp) Topic() common.Hash { + return common.HexToHash("0x49d4100ab0124eb4a9a65dc4ea08d6412a43f6f05c49194983f5b322bcc0a5c0") +} + +func (_VerifiableLoadUpkeep *VerifiableLoadUpkeep) Address() common.Address { + return _VerifiableLoadUpkeep.address +} + +type VerifiableLoadUpkeepInterface interface { + BUCKETSIZE(opts *bind.CallOpts) (uint16, error) + + AddLinkAmount(opts *bind.CallOpts) (*big.Int, error) + + BucketedDelays(opts *bind.CallOpts, arg0 *big.Int, arg1 uint16, arg2 *big.Int) (*big.Int, error) + + Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) + + CheckGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Counters(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Delays(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + DummyMap(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + Eligible(opts *bind.CallOpts, upkeepId *big.Int) (bool, error) + + EmittedAgainSig(opts *bind.CallOpts) ([32]byte, error) + + EmittedSig(opts *bind.CallOpts) ([32]byte, error) + + FeedParamKey(opts *bind.CallOpts) (string, error) + + FeedsHex(opts *bind.CallOpts, arg0 *big.Int) (string, error) + + FirstPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GasLimits(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + GetActiveUpkeepIDsDeployedByThisContract(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetBalance(opts *bind.CallOpts, id *big.Int) (*big.Int, error) + + GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) + + GetBucketedDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetDelays(opts *bind.CallOpts, upkeepId *big.Int) ([]*big.Int, error) + + GetDelaysLength(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetForwarder(opts *bind.CallOpts, upkeepID *big.Int) (common.Address, error) + + GetLogTriggerConfig(opts *bind.CallOpts, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) ([]byte, error) + + GetMinBalanceForUpkeep(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + + GetPxDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, p *big.Int, n *big.Int) (*big.Int, error) + + GetSumDelayInBucket(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) (*big.Int, *big.Int, error) + + GetSumDelayLastNPerforms(opts *bind.CallOpts, upkeepId *big.Int, n *big.Int) (*big.Int, *big.Int, error) + + GetTriggerType(opts *bind.CallOpts, upkeepId *big.Int) (uint8, error) + + GetUpkeepInfo(opts *bind.CallOpts, upkeepId *big.Int) (KeeperRegistryBase21UpkeepInfo, error) + + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + + Intervals(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LastTopUpBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + LinkToken(opts *bind.CallOpts) (common.Address, error) + + MinBalanceThresholdMultiplier(opts *bind.CallOpts) (uint8, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PerformDataSizes(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PerformGasToBurns(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + PreviousPerformBlocks(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + Registrar(opts *bind.CallOpts) (common.Address, error) + + Registry(opts *bind.CallOpts) (common.Address, error) + + TimeParamKey(opts *bind.CallOpts) (string, error) + + UpkeepTopUpCheckInterval(opts *bind.CallOpts) (*big.Int, error) + + UseArbitrumBlockNum(opts *bind.CallOpts) (bool, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddFunds(opts *bind.TransactOpts, upkeepId *big.Int, amount *big.Int) (*types.Transaction, error) + + BatchCancelUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchPreparingUpkeeps(opts *bind.TransactOpts, upkeepIds []*big.Int, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + BatchPreparingUpkeepsSimple(opts *bind.TransactOpts, upkeepIds []*big.Int, log uint8, selector uint8) (*types.Transaction, error) + + BatchRegisterUpkeeps(opts *bind.TransactOpts, number uint8, gasLimit uint32, triggerType uint8, triggerConfig []byte, amount *big.Int, checkGasToBurn *big.Int, performGasToBurn *big.Int) (*types.Transaction, error) + + BatchSendLogs(opts *bind.TransactOpts, log uint8) (*types.Transaction, error) + + BatchSetIntervals(opts *bind.TransactOpts, upkeepIds []*big.Int, interval uint32) (*types.Transaction, error) + + BatchUpdatePipelineData(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BatchWithdrawLinks(opts *bind.TransactOpts, upkeepIds []*big.Int) (*types.Transaction, error) + + BurnPerformGas(opts *bind.TransactOpts, upkeepId *big.Int, startGas *big.Int, blockNum *big.Int) (*types.Transaction, error) + + CheckUpkeep(opts *bind.TransactOpts, checkData []byte) (*types.Transaction, error) + + PerformUpkeep(opts *bind.TransactOpts, performData []byte) (*types.Transaction, error) + + SendLog(opts *bind.TransactOpts, upkeepId *big.Int, log uint8) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, newRegistrar common.Address) (*types.Transaction, error) + + SetFeeds(opts *bind.TransactOpts, _feeds []string) (*types.Transaction, error) + + SetInterval(opts *bind.TransactOpts, upkeepId *big.Int, _interval *big.Int) (*types.Transaction, error) + + SetParamKeys(opts *bind.TransactOpts, _feedParamKey string, _timeParamKey string) (*types.Transaction, error) + + SetPerformDataSize(opts *bind.TransactOpts, upkeepId *big.Int, value *big.Int) (*types.Transaction, error) + + SetUpkeepGasLimit(opts *bind.TransactOpts, upkeepId *big.Int, gasLimit uint32) (*types.Transaction, error) + + SetUpkeepPrivilegeConfig(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + TopUpFund(opts *bind.TransactOpts, upkeepId *big.Int, blockNum *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateLogTriggerConfig1(opts *bind.TransactOpts, upkeepId *big.Int, addr common.Address, selector uint8, topic0 [32]byte, topic1 [32]byte, topic2 [32]byte, topic3 [32]byte) (*types.Transaction, error) + + UpdateLogTriggerConfig2(opts *bind.TransactOpts, upkeepId *big.Int, cfg []byte) (*types.Transaction, error) + + UpdateUpkeepPipelineData(opts *bind.TransactOpts, upkeepId *big.Int, pipelineData []byte) (*types.Transaction, error) + + WithdrawLinks(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawLinks0(opts *bind.TransactOpts, upkeepId *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterLogEmitted(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadUpkeepLogEmittedIterator, error) + + WatchLogEmitted(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepLogEmitted, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmitted(log types.Log) (*VerifiableLoadUpkeepLogEmitted, error) + + FilterLogEmittedAgain(opts *bind.FilterOpts, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (*VerifiableLoadUpkeepLogEmittedAgainIterator, error) + + WatchLogEmittedAgain(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepLogEmittedAgain, upkeepId []*big.Int, blockNum []*big.Int, addr []common.Address) (event.Subscription, error) + + ParseLogEmittedAgain(log types.Log) (*VerifiableLoadUpkeepLogEmittedAgain, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadUpkeepOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VerifiableLoadUpkeepOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifiableLoadUpkeepOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VerifiableLoadUpkeepOwnershipTransferred, error) + + FilterUpkeepTopUp(opts *bind.FilterOpts) (*VerifiableLoadUpkeepUpkeepTopUpIterator, error) + + WatchUpkeepTopUp(opts *bind.WatchOpts, sink chan<- *VerifiableLoadUpkeepUpkeepTopUp) (event.Subscription, error) + + ParseUpkeepTopUp(log types.Log) (*VerifiableLoadUpkeepUpkeepTopUp, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_consumer_v2/vrf_consumer_v2.go b/core/gethwrappers/generated/vrf_consumer_v2/vrf_consumer_v2.go new file mode 100644 index 00000000..16090150 --- /dev/null +++ b/core/gethwrappers/generated/vrf_consumer_v2/vrf_consumer_v2.go @@ -0,0 +1,344 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_consumer_v2 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFConsumerV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610e8d380380610e8d83398101604081905261002f9161008e565b6001600160601b0319606083901b16608052600280546001600160a01b03199081166001600160a01b0394851617909155600380549290931691161790556100c1565b80516001600160a01b038116811461008957600080fd5b919050565b600080604083850312156100a157600080fd5b6100aa83610072565b91506100b860208401610072565b90509250929050565b60805160601c610da76100e66000396000818161028001526102e80152610da76000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c8063706da1ca11610076578063e89e106a1161005b578063e89e106a14610161578063f08c5daa1461016a578063f6eaffc81461017357600080fd5b8063706da1ca14610109578063cf62c8ab1461014e57600080fd5b8063177b9692146100a85780631fe543e3146100ce5780632fa4e442146100e357806336bfffed146100f6575b600080fd5b6100bb6100b6366004610a20565b610186565b6040519081526020015b60405180910390f35b6100e16100dc366004610abb565b610268565b005b6100e16100f1366004610b7c565b610328565b6100e1610104366004610938565b610488565b6003546101359074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016100c5565b6100e161015c366004610b7c565b610610565b6100bb60015481565b6100bb60045481565b6100bb610181366004610a89565b610817565b6002546040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810187905267ffffffffffffffff8616602482015261ffff8516604482015263ffffffff80851660648301528316608482015260009173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b15801561022157600080fd5b505af1158015610235573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102599190610aa2565b60018190559695505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461031a576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b6103248282610838565b5050565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff166103b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f74207365740000000000000000000000000000000000000000006044820152606401610311565b6003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b815260040161043693929190610baa565b602060405180830381600087803b15801561045057600080fd5b505af1158015610464573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061032491906109f7565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff16610513576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f7420736574000000000000000000000000000000000000006044820152606401610311565b60005b815181101561032457600254600354835173ffffffffffffffffffffffffffffffffffffffff90921691637341c10c9174010000000000000000000000000000000000000000900467ffffffffffffffff169085908590811061057b5761057b610d23565b60200260200101516040518363ffffffff1660e01b81526004016105cb92919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b1580156105e557600080fd5b505af11580156105f9573d6000803e3d6000fd5b50505050808061060890610cc3565b915050610516565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff166103b357600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b1580156106a357600080fd5b505af11580156106b7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106db9190610b5f565b600380547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff938416810291909117918290556002546040517f7341c10c00000000000000000000000000000000000000000000000000000000815291909204909216600483015230602483015273ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b1580156107a257600080fd5b505af11580156107b6573d6000803e3d6000fd5b50506003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff9384169550634000aea094509290911691859101610409565b6000818154811061082757600080fd5b600091825260209091200154905081565b60015482146108a3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f72726563740000000000000000006044820152606401610311565b5a60045580516108ba9060009060208401906108bf565b505050565b8280548282559060005260206000209081019282156108fa579160200282015b828111156108fa5782518255916020019190600101906108df565b5061090692915061090a565b5090565b5b80821115610906576000815560010161090b565b803563ffffffff8116811461093357600080fd5b919050565b6000602080838503121561094b57600080fd5b823567ffffffffffffffff81111561096257600080fd5b8301601f8101851361097357600080fd5b803561098661098182610c9f565b610c50565b80828252848201915084840188868560051b87010111156109a657600080fd5b60009450845b848110156109e957813573ffffffffffffffffffffffffffffffffffffffff811681146109d7578687fd5b845292860192908601906001016109ac565b509098975050505050505050565b600060208284031215610a0957600080fd5b81518015158114610a1957600080fd5b9392505050565b600080600080600060a08688031215610a3857600080fd5b853594506020860135610a4a81610d81565b9350604086013561ffff81168114610a6157600080fd5b9250610a6f6060870161091f565b9150610a7d6080870161091f565b90509295509295909350565b600060208284031215610a9b57600080fd5b5035919050565b600060208284031215610ab457600080fd5b5051919050565b60008060408385031215610ace57600080fd5b8235915060208084013567ffffffffffffffff811115610aed57600080fd5b8401601f81018613610afe57600080fd5b8035610b0c61098182610c9f565b80828252848201915084840189868560051b8701011115610b2c57600080fd5b600094505b83851015610b4f578035835260019490940193918501918501610b31565b5080955050505050509250929050565b600060208284031215610b7157600080fd5b8151610a1981610d81565b600060208284031215610b8e57600080fd5b81356bffffffffffffffffffffffff81168114610a1957600080fd5b73ffffffffffffffffffffffffffffffffffffffff84168152600060206bffffffffffffffffffffffff85168184015260606040840152835180606085015260005b81811015610c0857858101830151858201608001528201610bec565b81811115610c1a576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610c9757610c97610d52565b604052919050565b600067ffffffffffffffff821115610cb957610cb9610d52565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610d1c577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b67ffffffffffffffff81168114610d9757600080fd5b5056fea164736f6c6343000806000a", +} + +var VRFConsumerV2ABI = VRFConsumerV2MetaData.ABI + +var VRFConsumerV2Bin = VRFConsumerV2MetaData.Bin + +func DeployVRFConsumerV2(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFConsumerV2, error) { + parsed, err := VRFConsumerV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFConsumerV2Bin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumerV2{address: address, abi: *parsed, VRFConsumerV2Caller: VRFConsumerV2Caller{contract: contract}, VRFConsumerV2Transactor: VRFConsumerV2Transactor{contract: contract}, VRFConsumerV2Filterer: VRFConsumerV2Filterer{contract: contract}}, nil +} + +type VRFConsumerV2 struct { + address common.Address + abi abi.ABI + VRFConsumerV2Caller + VRFConsumerV2Transactor + VRFConsumerV2Filterer +} + +type VRFConsumerV2Caller struct { + contract *bind.BoundContract +} + +type VRFConsumerV2Transactor struct { + contract *bind.BoundContract +} + +type VRFConsumerV2Filterer struct { + contract *bind.BoundContract +} + +type VRFConsumerV2Session struct { + Contract *VRFConsumerV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2CallerSession struct { + Contract *VRFConsumerV2Caller + CallOpts bind.CallOpts +} + +type VRFConsumerV2TransactorSession struct { + Contract *VRFConsumerV2Transactor + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2Raw struct { + Contract *VRFConsumerV2 +} + +type VRFConsumerV2CallerRaw struct { + Contract *VRFConsumerV2Caller +} + +type VRFConsumerV2TransactorRaw struct { + Contract *VRFConsumerV2Transactor +} + +func NewVRFConsumerV2(address common.Address, backend bind.ContractBackend) (*VRFConsumerV2, error) { + abi, err := abi.JSON(strings.NewReader(VRFConsumerV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFConsumerV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumerV2{address: address, abi: abi, VRFConsumerV2Caller: VRFConsumerV2Caller{contract: contract}, VRFConsumerV2Transactor: VRFConsumerV2Transactor{contract: contract}, VRFConsumerV2Filterer: VRFConsumerV2Filterer{contract: contract}}, nil +} + +func NewVRFConsumerV2Caller(address common.Address, caller bind.ContractCaller) (*VRFConsumerV2Caller, error) { + contract, err := bindVRFConsumerV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2Caller{contract: contract}, nil +} + +func NewVRFConsumerV2Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerV2Transactor, error) { + contract, err := bindVRFConsumerV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2Transactor{contract: contract}, nil +} + +func NewVRFConsumerV2Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerV2Filterer, error) { + contract, err := bindVRFConsumerV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerV2Filterer{contract: contract}, nil +} + +func bindVRFConsumerV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFConsumerV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFConsumerV2 *VRFConsumerV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2.Contract.VRFConsumerV2Caller.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2 *VRFConsumerV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.VRFConsumerV2Transactor.contract.Transfer(opts) +} + +func (_VRFConsumerV2 *VRFConsumerV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.VRFConsumerV2Transactor.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2 *VRFConsumerV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.contract.Transfer(opts) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2 *VRFConsumerV2Caller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2.Contract.SGasAvailable(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2CallerSession) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2.Contract.SGasAvailable(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2Caller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2.Contract.SRandomWords(&_VRFConsumerV2.CallOpts, arg0) +} + +func (_VRFConsumerV2 *VRFConsumerV2CallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2.Contract.SRandomWords(&_VRFConsumerV2.CallOpts, arg0) +} + +func (_VRFConsumerV2 *VRFConsumerV2Caller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) SRequestId() (*big.Int, error) { + return _VRFConsumerV2.Contract.SRequestId(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2CallerSession) SRequestId() (*big.Int, error) { + return _VRFConsumerV2.Contract.SRequestId(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2Caller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFConsumerV2.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) SSubId() (uint64, error) { + return _VRFConsumerV2.Contract.SSubId(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2CallerSession) SSubId() (uint64, error) { + return _VRFConsumerV2.Contract.SSubId(&_VRFConsumerV2.CallOpts) +} + +func (_VRFConsumerV2 *VRFConsumerV2Transactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2.TransactOpts, amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2.TransactOpts, amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2Transactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.RawFulfillRandomWords(&_VRFConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.RawFulfillRandomWords(&_VRFConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2Transactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2.contract.Transact(opts, "requestRandomness", keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.RequestRandomness(&_VRFConsumerV2.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorSession) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.RequestRandomness(&_VRFConsumerV2.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2 *VRFConsumerV2Transactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.TopUpSubscription(&_VRFConsumerV2.TransactOpts, amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.TopUpSubscription(&_VRFConsumerV2.TransactOpts, amount) +} + +func (_VRFConsumerV2 *VRFConsumerV2Transactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFConsumerV2 *VRFConsumerV2Session) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.UpdateSubscription(&_VRFConsumerV2.TransactOpts, consumers) +} + +func (_VRFConsumerV2 *VRFConsumerV2TransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2.Contract.UpdateSubscription(&_VRFConsumerV2.TransactOpts, consumers) +} + +func (_VRFConsumerV2 *VRFConsumerV2) Address() common.Address { + return _VRFConsumerV2.address +} + +type VRFConsumerV2Interface interface { + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_consumer_v2_plus_upgradeable_example/vrf_consumer_v2_plus_upgradeable_example.go b/core/gethwrappers/generated/vrf_consumer_v2_plus_upgradeable_example/vrf_consumer_v2_plus_upgradeable_example.go new file mode 100644 index 00000000..c02423b1 --- /dev/null +++ b/core/gethwrappers/generated/vrf_consumer_v2_plus_upgradeable_example/vrf_consumer_v2_plus_upgradeable_example.go @@ -0,0 +1,547 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_consumer_v2_plus_upgradeable_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFConsumerV2PlusUpgradeableExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLITOKEN\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506110c1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c806355380dfb11610081578063e89e106a1161005b578063e89e106a146101ce578063f08c5daa146101d7578063f6eaffc8146101e057600080fd5b806355380dfb14610192578063706da1ca146101b2578063cf62c8ab146101bb57600080fd5b806336bfffed116100b257806336bfffed146101275780633b2bcbf11461013a578063485cc9551461017f57600080fd5b80631fe543e3146100d95780632e75964e146100ee5780632fa4e44214610114575b600080fd5b6100ec6100e7366004610d95565b6101f3565b005b6101016100fc366004610d03565b610284565b6040519081526020015b60405180910390f35b6100ec610122366004610e39565b610381565b6100ec610135366004610c36565b6104a3565b60345461015a9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010b565b6100ec61018d366004610c03565b6105db565b60355461015a9073ffffffffffffffffffffffffffffffffffffffff1681565b61010160365481565b6100ec6101c9366004610e39565b6107c5565b61010160335481565b61010160375481565b6101016101ee366004610d63565b61093c565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314610276576000546040517f1cf993f40000000000000000000000000000000000000000000000000000000081523360048201526201000090910473ffffffffffffffffffffffffffffffffffffffff1660248201526044015b60405180910390fd5b610280828261095d565b5050565b6040805160c081018252868152602080820187905261ffff86168284015263ffffffff80861660608401528416608083015282519081018352600080825260a083019190915260345492517f9b1c385e000000000000000000000000000000000000000000000000000000008152909273ffffffffffffffffffffffffffffffffffffffff1690639b1c385e9061031f908490600401610f1e565b602060405180830381600087803b15801561033957600080fd5b505af115801561034d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103719190610d7c565b6033819055979650505050505050565b6036546103ea576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f7420736574000000000000000000000000000000000000000000604482015260640161026d565b60355460345460365460408051602081019290925273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b815260040161045193929190610ed2565b602060405180830381600087803b15801561046b57600080fd5b505af115801561047f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102809190610cda565b60365461050c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f742073657400000000000000000000000000000000000000604482015260640161026d565b60005b815181101561028057603454603654835173ffffffffffffffffffffffffffffffffffffffff9092169163bec4c08c919085908590811061055257610552611056565b60200260200101516040518363ffffffff1660e01b815260040161059692919091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b1580156105b057600080fd5b505af11580156105c4573d6000803e3d6000fd5b5050505080806105d390610ff6565b91505061050f565b600054610100900460ff16158080156105fb5750600054600160ff909116105b806106155750303b158015610615575060005460ff166001145b6106a1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a6564000000000000000000000000000000000000606482015260840161026d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156106ff57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b610708836109df565b6034805473ffffffffffffffffffffffffffffffffffffffff8086167fffffffffffffffffffffffff000000000000000000000000000000000000000092831617909255603580549285169290911691909117905580156107c057600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b505050565b6036546103ea57603460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561083657600080fd5b505af115801561084a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061086e9190610d7c565b60368190556034546040517fbec4c08c000000000000000000000000000000000000000000000000000000008152600481019290925230602483015273ffffffffffffffffffffffffffffffffffffffff169063bec4c08c90604401600060405180830381600087803b1580156108e457600080fd5b505af11580156108f8573d6000803e3d6000fd5b5050505060355460345460365460405173ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591610424919060200190815260200190565b6032818154811061094c57600080fd5b600091825260209091200154905081565b60335482146109c8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f7272656374000000000000000000604482015260640161026d565b5a60375580516107c0906032906020840190610b66565b600054610100900460ff16610a76576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161026d565b73ffffffffffffffffffffffffffffffffffffffff8116610b19576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f6d75737420676976652076616c696420636f6f7264696e61746f72206164647260448201527f6573730000000000000000000000000000000000000000000000000000000000606482015260840161026d565b6000805473ffffffffffffffffffffffffffffffffffffffff90921662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff909216919091179055565b828054828255906000526020600020908101928215610ba1579160200282015b82811115610ba1578251825591602001919060010190610b86565b50610bad929150610bb1565b5090565b5b80821115610bad5760008155600101610bb2565b803573ffffffffffffffffffffffffffffffffffffffff81168114610bea57600080fd5b919050565b803563ffffffff81168114610bea57600080fd5b60008060408385031215610c1657600080fd5b610c1f83610bc6565b9150610c2d60208401610bc6565b90509250929050565b60006020808385031215610c4957600080fd5b823567ffffffffffffffff811115610c6057600080fd5b8301601f81018513610c7157600080fd5b8035610c84610c7f82610fd2565b610f83565b80828252848201915084840188868560051b8701011115610ca457600080fd5b600094505b83851015610cce57610cba81610bc6565b835260019490940193918501918501610ca9565b50979650505050505050565b600060208284031215610cec57600080fd5b81518015158114610cfc57600080fd5b9392505050565b600080600080600060a08688031215610d1b57600080fd5b8535945060208601359350604086013561ffff81168114610d3b57600080fd5b9250610d4960608701610bef565b9150610d5760808701610bef565b90509295509295909350565b600060208284031215610d7557600080fd5b5035919050565b600060208284031215610d8e57600080fd5b5051919050565b60008060408385031215610da857600080fd5b8235915060208084013567ffffffffffffffff811115610dc757600080fd5b8401601f81018613610dd857600080fd5b8035610de6610c7f82610fd2565b80828252848201915084840189868560051b8701011115610e0657600080fd5b600094505b83851015610e29578035835260019490940193918501918501610e0b565b5080955050505050509250929050565b600060208284031215610e4b57600080fd5b81356bffffffffffffffffffffffff81168114610cfc57600080fd5b6000815180845260005b81811015610e8d57602081850181015186830182015201610e71565b81811115610e9f576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff83166020820152606060408201526000610f156060830184610e67565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c080840152610f7b60e0840182610e67565b949350505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610fca57610fca611085565b604052919050565b600067ffffffffffffffff821115610fec57610fec611085565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561104f577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFConsumerV2PlusUpgradeableExampleABI = VRFConsumerV2PlusUpgradeableExampleMetaData.ABI + +var VRFConsumerV2PlusUpgradeableExampleBin = VRFConsumerV2PlusUpgradeableExampleMetaData.Bin + +func DeployVRFConsumerV2PlusUpgradeableExample(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFConsumerV2PlusUpgradeableExample, error) { + parsed, err := VRFConsumerV2PlusUpgradeableExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFConsumerV2PlusUpgradeableExampleBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumerV2PlusUpgradeableExample{address: address, abi: *parsed, VRFConsumerV2PlusUpgradeableExampleCaller: VRFConsumerV2PlusUpgradeableExampleCaller{contract: contract}, VRFConsumerV2PlusUpgradeableExampleTransactor: VRFConsumerV2PlusUpgradeableExampleTransactor{contract: contract}, VRFConsumerV2PlusUpgradeableExampleFilterer: VRFConsumerV2PlusUpgradeableExampleFilterer{contract: contract}}, nil +} + +type VRFConsumerV2PlusUpgradeableExample struct { + address common.Address + abi abi.ABI + VRFConsumerV2PlusUpgradeableExampleCaller + VRFConsumerV2PlusUpgradeableExampleTransactor + VRFConsumerV2PlusUpgradeableExampleFilterer +} + +type VRFConsumerV2PlusUpgradeableExampleCaller struct { + contract *bind.BoundContract +} + +type VRFConsumerV2PlusUpgradeableExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFConsumerV2PlusUpgradeableExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFConsumerV2PlusUpgradeableExampleSession struct { + Contract *VRFConsumerV2PlusUpgradeableExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2PlusUpgradeableExampleCallerSession struct { + Contract *VRFConsumerV2PlusUpgradeableExampleCaller + CallOpts bind.CallOpts +} + +type VRFConsumerV2PlusUpgradeableExampleTransactorSession struct { + Contract *VRFConsumerV2PlusUpgradeableExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2PlusUpgradeableExampleRaw struct { + Contract *VRFConsumerV2PlusUpgradeableExample +} + +type VRFConsumerV2PlusUpgradeableExampleCallerRaw struct { + Contract *VRFConsumerV2PlusUpgradeableExampleCaller +} + +type VRFConsumerV2PlusUpgradeableExampleTransactorRaw struct { + Contract *VRFConsumerV2PlusUpgradeableExampleTransactor +} + +func NewVRFConsumerV2PlusUpgradeableExample(address common.Address, backend bind.ContractBackend) (*VRFConsumerV2PlusUpgradeableExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFConsumerV2PlusUpgradeableExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFConsumerV2PlusUpgradeableExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumerV2PlusUpgradeableExample{address: address, abi: abi, VRFConsumerV2PlusUpgradeableExampleCaller: VRFConsumerV2PlusUpgradeableExampleCaller{contract: contract}, VRFConsumerV2PlusUpgradeableExampleTransactor: VRFConsumerV2PlusUpgradeableExampleTransactor{contract: contract}, VRFConsumerV2PlusUpgradeableExampleFilterer: VRFConsumerV2PlusUpgradeableExampleFilterer{contract: contract}}, nil +} + +func NewVRFConsumerV2PlusUpgradeableExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFConsumerV2PlusUpgradeableExampleCaller, error) { + contract, err := bindVRFConsumerV2PlusUpgradeableExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2PlusUpgradeableExampleCaller{contract: contract}, nil +} + +func NewVRFConsumerV2PlusUpgradeableExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerV2PlusUpgradeableExampleTransactor, error) { + contract, err := bindVRFConsumerV2PlusUpgradeableExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2PlusUpgradeableExampleTransactor{contract: contract}, nil +} + +func NewVRFConsumerV2PlusUpgradeableExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerV2PlusUpgradeableExampleFilterer, error) { + contract, err := bindVRFConsumerV2PlusUpgradeableExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerV2PlusUpgradeableExampleFilterer{contract: contract}, nil +} + +func bindVRFConsumerV2PlusUpgradeableExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFConsumerV2PlusUpgradeableExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2PlusUpgradeableExample.Contract.VRFConsumerV2PlusUpgradeableExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.VRFConsumerV2PlusUpgradeableExampleTransactor.contract.Transfer(opts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.VRFConsumerV2PlusUpgradeableExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2PlusUpgradeableExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.contract.Transfer(opts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) COORDINATOR() (common.Address, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.COORDINATOR(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) COORDINATOR() (common.Address, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.COORDINATOR(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) PLITOKEN(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "PLITOKEN") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) PLITOKEN() (common.Address, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.PLITOKEN(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) PLITOKEN() (common.Address, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.PLITOKEN(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SGasAvailable(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SGasAvailable(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SRandomWords(&_VRFConsumerV2PlusUpgradeableExample.CallOpts, arg0) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SRandomWords(&_VRFConsumerV2PlusUpgradeableExample.CallOpts, arg0) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) SRequestId() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SRequestId(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SRequestId(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCaller) SSubId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2PlusUpgradeableExample.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) SSubId() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SSubId(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleCallerSession) SSubId() (*big.Int, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.SSubId(&_VRFConsumerV2PlusUpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) Initialize(opts *bind.TransactOpts, _vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "initialize", _vrfCoordinator, _link) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) Initialize(_vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.Initialize(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, _vrfCoordinator, _link) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) Initialize(_vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.Initialize(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, _vrfCoordinator, _link) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.RawFulfillRandomWords(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.RawFulfillRandomWords(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "requestRandomness", keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) RequestRandomness(keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.RequestRandomness(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) RequestRandomness(keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.RequestRandomness(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.TopUpSubscription(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.TopUpSubscription(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.UpdateSubscription(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, consumers) +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2PlusUpgradeableExample.Contract.UpdateSubscription(&_VRFConsumerV2PlusUpgradeableExample.TransactOpts, consumers) +} + +type VRFConsumerV2PlusUpgradeableExampleInitializedIterator struct { + Event *VRFConsumerV2PlusUpgradeableExampleInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFConsumerV2PlusUpgradeableExampleInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFConsumerV2PlusUpgradeableExampleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFConsumerV2PlusUpgradeableExampleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFConsumerV2PlusUpgradeableExampleInitializedIterator) Error() error { + return it.fail +} + +func (it *VRFConsumerV2PlusUpgradeableExampleInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFConsumerV2PlusUpgradeableExampleInitialized struct { + Version uint8 + Raw types.Log +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleFilterer) FilterInitialized(opts *bind.FilterOpts) (*VRFConsumerV2PlusUpgradeableExampleInitializedIterator, error) { + + logs, sub, err := _VRFConsumerV2PlusUpgradeableExample.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &VRFConsumerV2PlusUpgradeableExampleInitializedIterator{contract: _VRFConsumerV2PlusUpgradeableExample.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *VRFConsumerV2PlusUpgradeableExampleInitialized) (event.Subscription, error) { + + logs, sub, err := _VRFConsumerV2PlusUpgradeableExample.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFConsumerV2PlusUpgradeableExampleInitialized) + if err := _VRFConsumerV2PlusUpgradeableExample.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExampleFilterer) ParseInitialized(log types.Log) (*VRFConsumerV2PlusUpgradeableExampleInitialized, error) { + event := new(VRFConsumerV2PlusUpgradeableExampleInitialized) + if err := _VRFConsumerV2PlusUpgradeableExample.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFConsumerV2PlusUpgradeableExample.abi.Events["Initialized"].ID: + return _VRFConsumerV2PlusUpgradeableExample.ParseInitialized(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFConsumerV2PlusUpgradeableExampleInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (_VRFConsumerV2PlusUpgradeableExample *VRFConsumerV2PlusUpgradeableExample) Address() common.Address { + return _VRFConsumerV2PlusUpgradeableExample.address +} + +type VRFConsumerV2PlusUpgradeableExampleInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLITOKEN(opts *bind.CallOpts) (common.Address, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (*big.Int, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts, _vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + FilterInitialized(opts *bind.FilterOpts) (*VRFConsumerV2PlusUpgradeableExampleInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *VRFConsumerV2PlusUpgradeableExampleInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*VRFConsumerV2PlusUpgradeableExampleInitialized, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example/vrf_consumer_v2_upgradeable_example.go b/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example/vrf_consumer_v2_upgradeable_example.go new file mode 100644 index 00000000..51678fe0 --- /dev/null +++ b/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example/vrf_consumer_v2_upgradeable_example.go @@ -0,0 +1,547 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_consumer_v2_upgradeable_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFConsumerV2UpgradeableExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLITOKEN\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506111c9806100206000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c806355380dfb11610081578063e89e106a1161005b578063e89e106a1461020a578063f08c5daa14610213578063f6eaffc81461021c57600080fd5b806355380dfb14610192578063706da1ca146101b2578063cf62c8ab146101f757600080fd5b806336bfffed116100b257806336bfffed146101275780633b2bcbf11461013a578063485cc9551461017f57600080fd5b8063177b9692146100d95780631fe543e3146100ff5780632fa4e44214610114575b600080fd5b6100ec6100e7366004610e42565b61022f565b6040519081526020015b60405180910390f35b61011261010d366004610edd565b610311565b005b610112610122366004610f9e565b6103a2565b610112610135366004610d75565b610502565b60345461015a9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100f6565b61011261018d366004610d42565b61068a565b60355461015a9073ffffffffffffffffffffffffffffffffffffffff1681565b6035546101de9074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016100f6565b610112610205366004610f9e565b610874565b6100ec60335481565b6100ec60365481565b6100ec61022a366004610eab565b610a7b565b6034546040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810187905267ffffffffffffffff8616602482015261ffff8516604482015263ffffffff80851660648301528316608482015260009173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b1580156102ca57600080fd5b505af11580156102de573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103029190610ec4565b60338190559695505050505050565b60005462010000900473ffffffffffffffffffffffffffffffffffffffff163314610394576000546040517f1cf993f40000000000000000000000000000000000000000000000000000000081523360048201526201000090910473ffffffffffffffffffffffffffffffffffffffff1660248201526044015b60405180910390fd5b61039e8282610a9c565b5050565b60355474010000000000000000000000000000000000000000900467ffffffffffffffff1661042d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f7420736574000000000000000000000000000000000000000000604482015260640161038b565b6035546034546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b81526004016104b093929190610fcc565b602060405180830381600087803b1580156104ca57600080fd5b505af11580156104de573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061039e9190610e19565b60355474010000000000000000000000000000000000000000900467ffffffffffffffff1661058d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f742073657400000000000000000000000000000000000000604482015260640161038b565b60005b815181101561039e57603454603554835173ffffffffffffffffffffffffffffffffffffffff90921691637341c10c9174010000000000000000000000000000000000000000900467ffffffffffffffff16908590859081106105f5576105f5611145565b60200260200101516040518363ffffffff1660e01b815260040161064592919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b15801561065f57600080fd5b505af1158015610673573d6000803e3d6000fd5b505050508080610682906110e5565b915050610590565b600054610100900460ff16158080156106aa5750600054600160ff909116105b806106c45750303b1580156106c4575060005460ff166001145b610750576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a6564000000000000000000000000000000000000606482015260840161038b565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156107ae57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6107b783610b1e565b6034805473ffffffffffffffffffffffffffffffffffffffff8086167fffffffffffffffffffffffff0000000000000000000000000000000000000000928316179092556035805492851692909116919091179055801561086f57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b505050565b60355474010000000000000000000000000000000000000000900467ffffffffffffffff1661042d57603460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561090757600080fd5b505af115801561091b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061093f9190610f81565b603580547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff938416810291909117918290556034546040517f7341c10c00000000000000000000000000000000000000000000000000000000815291909204909216600483015230602483015273ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b158015610a0657600080fd5b505af1158015610a1a573d6000803e3d6000fd5b50506035546034546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff9384169550634000aea094509290911691859101610483565b60328181548110610a8b57600080fd5b600091825260209091200154905081565b6033548214610b07576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f7272656374000000000000000000604482015260640161038b565b5a603655805161086f906032906020840190610ca5565b600054610100900460ff16610bb5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161038b565b73ffffffffffffffffffffffffffffffffffffffff8116610c58576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f6d75737420676976652076616c696420636f6f7264696e61746f72206164647260448201527f6573730000000000000000000000000000000000000000000000000000000000606482015260840161038b565b6000805473ffffffffffffffffffffffffffffffffffffffff90921662010000027fffffffffffffffffffff0000000000000000000000000000000000000000ffff909216919091179055565b828054828255906000526020600020908101928215610ce0579160200282015b82811115610ce0578251825591602001919060010190610cc5565b50610cec929150610cf0565b5090565b5b80821115610cec5760008155600101610cf1565b803573ffffffffffffffffffffffffffffffffffffffff81168114610d2957600080fd5b919050565b803563ffffffff81168114610d2957600080fd5b60008060408385031215610d5557600080fd5b610d5e83610d05565b9150610d6c60208401610d05565b90509250929050565b60006020808385031215610d8857600080fd5b823567ffffffffffffffff811115610d9f57600080fd5b8301601f81018513610db057600080fd5b8035610dc3610dbe826110c1565b611072565b80828252848201915084840188868560051b8701011115610de357600080fd5b600094505b83851015610e0d57610df981610d05565b835260019490940193918501918501610de8565b50979650505050505050565b600060208284031215610e2b57600080fd5b81518015158114610e3b57600080fd5b9392505050565b600080600080600060a08688031215610e5a57600080fd5b853594506020860135610e6c816111a3565b9350604086013561ffff81168114610e8357600080fd5b9250610e9160608701610d2e565b9150610e9f60808701610d2e565b90509295509295909350565b600060208284031215610ebd57600080fd5b5035919050565b600060208284031215610ed657600080fd5b5051919050565b60008060408385031215610ef057600080fd5b8235915060208084013567ffffffffffffffff811115610f0f57600080fd5b8401601f81018613610f2057600080fd5b8035610f2e610dbe826110c1565b80828252848201915084840189868560051b8701011115610f4e57600080fd5b600094505b83851015610f71578035835260019490940193918501918501610f53565b5080955050505050509250929050565b600060208284031215610f9357600080fd5b8151610e3b816111a3565b600060208284031215610fb057600080fd5b81356bffffffffffffffffffffffff81168114610e3b57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84168152600060206bffffffffffffffffffffffff85168184015260606040840152835180606085015260005b8181101561102a5785810183015185820160800152820161100e565b8181111561103c576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156110b9576110b9611174565b604052919050565b600067ffffffffffffffff8211156110db576110db611174565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561113e577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b67ffffffffffffffff811681146111b957600080fd5b5056fea164736f6c6343000806000a", +} + +var VRFConsumerV2UpgradeableExampleABI = VRFConsumerV2UpgradeableExampleMetaData.ABI + +var VRFConsumerV2UpgradeableExampleBin = VRFConsumerV2UpgradeableExampleMetaData.Bin + +func DeployVRFConsumerV2UpgradeableExample(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFConsumerV2UpgradeableExample, error) { + parsed, err := VRFConsumerV2UpgradeableExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFConsumerV2UpgradeableExampleBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumerV2UpgradeableExample{address: address, abi: *parsed, VRFConsumerV2UpgradeableExampleCaller: VRFConsumerV2UpgradeableExampleCaller{contract: contract}, VRFConsumerV2UpgradeableExampleTransactor: VRFConsumerV2UpgradeableExampleTransactor{contract: contract}, VRFConsumerV2UpgradeableExampleFilterer: VRFConsumerV2UpgradeableExampleFilterer{contract: contract}}, nil +} + +type VRFConsumerV2UpgradeableExample struct { + address common.Address + abi abi.ABI + VRFConsumerV2UpgradeableExampleCaller + VRFConsumerV2UpgradeableExampleTransactor + VRFConsumerV2UpgradeableExampleFilterer +} + +type VRFConsumerV2UpgradeableExampleCaller struct { + contract *bind.BoundContract +} + +type VRFConsumerV2UpgradeableExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFConsumerV2UpgradeableExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFConsumerV2UpgradeableExampleSession struct { + Contract *VRFConsumerV2UpgradeableExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2UpgradeableExampleCallerSession struct { + Contract *VRFConsumerV2UpgradeableExampleCaller + CallOpts bind.CallOpts +} + +type VRFConsumerV2UpgradeableExampleTransactorSession struct { + Contract *VRFConsumerV2UpgradeableExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFConsumerV2UpgradeableExampleRaw struct { + Contract *VRFConsumerV2UpgradeableExample +} + +type VRFConsumerV2UpgradeableExampleCallerRaw struct { + Contract *VRFConsumerV2UpgradeableExampleCaller +} + +type VRFConsumerV2UpgradeableExampleTransactorRaw struct { + Contract *VRFConsumerV2UpgradeableExampleTransactor +} + +func NewVRFConsumerV2UpgradeableExample(address common.Address, backend bind.ContractBackend) (*VRFConsumerV2UpgradeableExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFConsumerV2UpgradeableExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFConsumerV2UpgradeableExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumerV2UpgradeableExample{address: address, abi: abi, VRFConsumerV2UpgradeableExampleCaller: VRFConsumerV2UpgradeableExampleCaller{contract: contract}, VRFConsumerV2UpgradeableExampleTransactor: VRFConsumerV2UpgradeableExampleTransactor{contract: contract}, VRFConsumerV2UpgradeableExampleFilterer: VRFConsumerV2UpgradeableExampleFilterer{contract: contract}}, nil +} + +func NewVRFConsumerV2UpgradeableExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFConsumerV2UpgradeableExampleCaller, error) { + contract, err := bindVRFConsumerV2UpgradeableExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2UpgradeableExampleCaller{contract: contract}, nil +} + +func NewVRFConsumerV2UpgradeableExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerV2UpgradeableExampleTransactor, error) { + contract, err := bindVRFConsumerV2UpgradeableExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerV2UpgradeableExampleTransactor{contract: contract}, nil +} + +func NewVRFConsumerV2UpgradeableExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerV2UpgradeableExampleFilterer, error) { + contract, err := bindVRFConsumerV2UpgradeableExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerV2UpgradeableExampleFilterer{contract: contract}, nil +} + +func bindVRFConsumerV2UpgradeableExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFConsumerV2UpgradeableExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2UpgradeableExample.Contract.VRFConsumerV2UpgradeableExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.VRFConsumerV2UpgradeableExampleTransactor.contract.Transfer(opts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.VRFConsumerV2UpgradeableExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFConsumerV2UpgradeableExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.contract.Transfer(opts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) COORDINATOR() (common.Address, error) { + return _VRFConsumerV2UpgradeableExample.Contract.COORDINATOR(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) COORDINATOR() (common.Address, error) { + return _VRFConsumerV2UpgradeableExample.Contract.COORDINATOR(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) PLITOKEN(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "PLITOKEN") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) PLITOKEN() (common.Address, error) { + return _VRFConsumerV2UpgradeableExample.Contract.PLITOKEN(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) PLITOKEN() (common.Address, error) { + return _VRFConsumerV2UpgradeableExample.Contract.PLITOKEN(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SGasAvailable(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) SGasAvailable() (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SGasAvailable(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SRandomWords(&_VRFConsumerV2UpgradeableExample.CallOpts, arg0) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SRandomWords(&_VRFConsumerV2UpgradeableExample.CallOpts, arg0) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) SRequestId() (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SRequestId(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SRequestId(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCaller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFConsumerV2UpgradeableExample.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) SSubId() (uint64, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SSubId(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleCallerSession) SSubId() (uint64, error) { + return _VRFConsumerV2UpgradeableExample.Contract.SSubId(&_VRFConsumerV2UpgradeableExample.CallOpts) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2UpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.CreateSubscriptionAndFund(&_VRFConsumerV2UpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) Initialize(opts *bind.TransactOpts, _vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "initialize", _vrfCoordinator, _link) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) Initialize(_vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.Initialize(&_VRFConsumerV2UpgradeableExample.TransactOpts, _vrfCoordinator, _link) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) Initialize(_vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.Initialize(&_VRFConsumerV2UpgradeableExample.TransactOpts, _vrfCoordinator, _link) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.RawFulfillRandomWords(&_VRFConsumerV2UpgradeableExample.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.RawFulfillRandomWords(&_VRFConsumerV2UpgradeableExample.TransactOpts, requestId, randomWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "requestRandomness", keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.RequestRandomness(&_VRFConsumerV2UpgradeableExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.RequestRandomness(&_VRFConsumerV2UpgradeableExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.TopUpSubscription(&_VRFConsumerV2UpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.TopUpSubscription(&_VRFConsumerV2UpgradeableExample.TransactOpts, amount) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.UpdateSubscription(&_VRFConsumerV2UpgradeableExample.TransactOpts, consumers) +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFConsumerV2UpgradeableExample.Contract.UpdateSubscription(&_VRFConsumerV2UpgradeableExample.TransactOpts, consumers) +} + +type VRFConsumerV2UpgradeableExampleInitializedIterator struct { + Event *VRFConsumerV2UpgradeableExampleInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFConsumerV2UpgradeableExampleInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFConsumerV2UpgradeableExampleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFConsumerV2UpgradeableExampleInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFConsumerV2UpgradeableExampleInitializedIterator) Error() error { + return it.fail +} + +func (it *VRFConsumerV2UpgradeableExampleInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFConsumerV2UpgradeableExampleInitialized struct { + Version uint8 + Raw types.Log +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleFilterer) FilterInitialized(opts *bind.FilterOpts) (*VRFConsumerV2UpgradeableExampleInitializedIterator, error) { + + logs, sub, err := _VRFConsumerV2UpgradeableExample.contract.FilterLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return &VRFConsumerV2UpgradeableExampleInitializedIterator{contract: _VRFConsumerV2UpgradeableExample.contract, event: "Initialized", logs: logs, sub: sub}, nil +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleFilterer) WatchInitialized(opts *bind.WatchOpts, sink chan<- *VRFConsumerV2UpgradeableExampleInitialized) (event.Subscription, error) { + + logs, sub, err := _VRFConsumerV2UpgradeableExample.contract.WatchLogs(opts, "Initialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFConsumerV2UpgradeableExampleInitialized) + if err := _VRFConsumerV2UpgradeableExample.contract.UnpackLog(event, "Initialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExampleFilterer) ParseInitialized(log types.Log) (*VRFConsumerV2UpgradeableExampleInitialized, error) { + event := new(VRFConsumerV2UpgradeableExampleInitialized) + if err := _VRFConsumerV2UpgradeableExample.contract.UnpackLog(event, "Initialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFConsumerV2UpgradeableExample.abi.Events["Initialized"].ID: + return _VRFConsumerV2UpgradeableExample.ParseInitialized(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFConsumerV2UpgradeableExampleInitialized) Topic() common.Hash { + return common.HexToHash("0x7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498") +} + +func (_VRFConsumerV2UpgradeableExample *VRFConsumerV2UpgradeableExample) Address() common.Address { + return _VRFConsumerV2UpgradeableExample.address +} + +type VRFConsumerV2UpgradeableExampleInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLITOKEN(opts *bind.CallOpts) (common.Address, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Initialize(opts *bind.TransactOpts, _vrfCoordinator common.Address, _link common.Address) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + FilterInitialized(opts *bind.FilterOpts) (*VRFConsumerV2UpgradeableExampleInitializedIterator, error) + + WatchInitialized(opts *bind.WatchOpts, sink chan<- *VRFConsumerV2UpgradeableExampleInitialized) (event.Subscription, error) + + ParseInitialized(log types.Log) (*VRFConsumerV2UpgradeableExampleInitialized, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_mock/vrf_coordinator_mock.go b/core/gethwrappers/generated/vrf_coordinator_mock/vrf_coordinator_mock.go new file mode 100644 index 00000000..11e561a1 --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_mock/vrf_coordinator_mock.go @@ -0,0 +1,400 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFCoordinatorMockMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"RandomnessRequest\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumerContract\",\"type\":\"address\"}],\"name\":\"callBackWithRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060405161057938038061057983398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b6104e6806100936000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c80631b6b6d2314610046578063a4c0ed361461008f578063cf55fe97146100a4575b600080fd5b6000546100669073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b6100a261009d36600461032d565b6100b7565b005b6100a26100b236600461043a565b6101b1565b60005473ffffffffffffffffffffffffffffffffffffffff16331461013d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000060448201526064015b60405180910390fd5b600080828060200190518101906101549190610416565b9150915080828673ffffffffffffffffffffffffffffffffffffffff167fb6a11357fce9fae0b59dcc6e5e4bf50803daf2b17d3b80739767e0c4fdacb444876040516101a291815260200190565b60405180910390a45050505050565b604080516024810185905260448082018590528251808303909101815260649091019091526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f94985ddd00000000000000000000000000000000000000000000000000000000179052600090620324b0805a101561028f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f6e6f7420656e6f7567682067617320666f7220636f6e73756d657200000000006044820152606401610134565b60008473ffffffffffffffffffffffffffffffffffffffff16836040516102b6919061046f565b6000604051808303816000865af19150503d80600081146102f3576040519150601f19603f3d011682016040523d82523d6000602084013e6102f8565b606091505b50505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461032857600080fd5b919050565b60008060006060848603121561034257600080fd5b61034b84610304565b925060208401359150604084013567ffffffffffffffff8082111561036f57600080fd5b818601915086601f83011261038357600080fd5b813581811115610395576103956104aa565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156103db576103db6104aa565b816040528281528960208487010111156103f457600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b6000806040838503121561042957600080fd5b505080516020909101519092909150565b60008060006060848603121561044f57600080fd5b833592506020840135915061046660408501610304565b90509250925092565b6000825160005b818110156104905760208186018101518583015201610476565b8181111561049f576000828501525b509190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFCoordinatorMockABI = VRFCoordinatorMockMetaData.ABI + +var VRFCoordinatorMockBin = VRFCoordinatorMockMetaData.Bin + +func DeployVRFCoordinatorMock(auth *bind.TransactOpts, backend bind.ContractBackend, linkAddress common.Address) (common.Address, *types.Transaction, *VRFCoordinatorMock, error) { + parsed, err := VRFCoordinatorMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorMockBin), backend, linkAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorMock{address: address, abi: *parsed, VRFCoordinatorMockCaller: VRFCoordinatorMockCaller{contract: contract}, VRFCoordinatorMockTransactor: VRFCoordinatorMockTransactor{contract: contract}, VRFCoordinatorMockFilterer: VRFCoordinatorMockFilterer{contract: contract}}, nil +} + +type VRFCoordinatorMock struct { + address common.Address + abi abi.ABI + VRFCoordinatorMockCaller + VRFCoordinatorMockTransactor + VRFCoordinatorMockFilterer +} + +type VRFCoordinatorMockCaller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorMockTransactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorMockFilterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorMockSession struct { + Contract *VRFCoordinatorMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorMockCallerSession struct { + Contract *VRFCoordinatorMockCaller + CallOpts bind.CallOpts +} + +type VRFCoordinatorMockTransactorSession struct { + Contract *VRFCoordinatorMockTransactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorMockRaw struct { + Contract *VRFCoordinatorMock +} + +type VRFCoordinatorMockCallerRaw struct { + Contract *VRFCoordinatorMockCaller +} + +type VRFCoordinatorMockTransactorRaw struct { + Contract *VRFCoordinatorMockTransactor +} + +func NewVRFCoordinatorMock(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorMock, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorMockABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorMock{address: address, abi: abi, VRFCoordinatorMockCaller: VRFCoordinatorMockCaller{contract: contract}, VRFCoordinatorMockTransactor: VRFCoordinatorMockTransactor{contract: contract}, VRFCoordinatorMockFilterer: VRFCoordinatorMockFilterer{contract: contract}}, nil +} + +func NewVRFCoordinatorMockCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorMockCaller, error) { + contract, err := bindVRFCoordinatorMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorMockCaller{contract: contract}, nil +} + +func NewVRFCoordinatorMockTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorMockTransactor, error) { + contract, err := bindVRFCoordinatorMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorMockTransactor{contract: contract}, nil +} + +func NewVRFCoordinatorMockFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorMockFilterer, error) { + contract, err := bindVRFCoordinatorMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorMockFilterer{contract: contract}, nil +} + +func bindVRFCoordinatorMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorMock.Contract.VRFCoordinatorMockCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.VRFCoordinatorMockTransactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.VRFCoordinatorMockTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorMock.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockSession) PLI() (common.Address, error) { + return _VRFCoordinatorMock.Contract.PLI(&_VRFCoordinatorMock.CallOpts) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockCallerSession) PLI() (common.Address, error) { + return _VRFCoordinatorMock.Contract.PLI(&_VRFCoordinatorMock.CallOpts) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactor) CallBackWithRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int, consumerContract common.Address) (*types.Transaction, error) { + return _VRFCoordinatorMock.contract.Transact(opts, "callBackWithRandomness", requestId, randomness, consumerContract) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockSession) CallBackWithRandomness(requestId [32]byte, randomness *big.Int, consumerContract common.Address) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.CallBackWithRandomness(&_VRFCoordinatorMock.TransactOpts, requestId, randomness, consumerContract) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactorSession) CallBackWithRandomness(requestId [32]byte, randomness *big.Int, consumerContract common.Address) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.CallBackWithRandomness(&_VRFCoordinatorMock.TransactOpts, requestId, randomness, consumerContract) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinatorMock.contract.Transact(opts, "onTokenTransfer", sender, fee, _data) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockSession) OnTokenTransfer(sender common.Address, fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.OnTokenTransfer(&_VRFCoordinatorMock.TransactOpts, sender, fee, _data) +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockTransactorSession) OnTokenTransfer(sender common.Address, fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinatorMock.Contract.OnTokenTransfer(&_VRFCoordinatorMock.TransactOpts, sender, fee, _data) +} + +type VRFCoordinatorMockRandomnessRequestIterator struct { + Event *VRFCoordinatorMockRandomnessRequest + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorMockRandomnessRequestIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorMockRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorMockRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorMockRandomnessRequestIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorMockRandomnessRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorMockRandomnessRequest struct { + Sender common.Address + KeyHash [32]byte + Seed *big.Int + Fee *big.Int + Raw types.Log +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockFilterer) FilterRandomnessRequest(opts *bind.FilterOpts, sender []common.Address, keyHash [][32]byte, seed []*big.Int) (*VRFCoordinatorMockRandomnessRequestIterator, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + var seedRule []interface{} + for _, seedItem := range seed { + seedRule = append(seedRule, seedItem) + } + + logs, sub, err := _VRFCoordinatorMock.contract.FilterLogs(opts, "RandomnessRequest", senderRule, keyHashRule, seedRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorMockRandomnessRequestIterator{contract: _VRFCoordinatorMock.contract, event: "RandomnessRequest", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockFilterer) WatchRandomnessRequest(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorMockRandomnessRequest, sender []common.Address, keyHash [][32]byte, seed []*big.Int) (event.Subscription, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + var seedRule []interface{} + for _, seedItem := range seed { + seedRule = append(seedRule, seedItem) + } + + logs, sub, err := _VRFCoordinatorMock.contract.WatchLogs(opts, "RandomnessRequest", senderRule, keyHashRule, seedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorMockRandomnessRequest) + if err := _VRFCoordinatorMock.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorMock *VRFCoordinatorMockFilterer) ParseRandomnessRequest(log types.Log) (*VRFCoordinatorMockRandomnessRequest, error) { + event := new(VRFCoordinatorMockRandomnessRequest) + if err := _VRFCoordinatorMock.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFCoordinatorMock *VRFCoordinatorMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinatorMock.abi.Events["RandomnessRequest"].ID: + return _VRFCoordinatorMock.ParseRandomnessRequest(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorMockRandomnessRequest) Topic() common.Hash { + return common.HexToHash("0xb6a11357fce9fae0b59dcc6e5e4bf50803daf2b17d3b80739767e0c4fdacb444") +} + +func (_VRFCoordinatorMock *VRFCoordinatorMock) Address() common.Address { + return _VRFCoordinatorMock.address +} + +type VRFCoordinatorMockInterface interface { + PLI(opts *bind.CallOpts) (common.Address, error) + + CallBackWithRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int, consumerContract common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, fee *big.Int, _data []byte) (*types.Transaction, error) + + FilterRandomnessRequest(opts *bind.FilterOpts, sender []common.Address, keyHash [][32]byte, seed []*big.Int) (*VRFCoordinatorMockRandomnessRequestIterator, error) + + WatchRandomnessRequest(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorMockRandomnessRequest, sender []common.Address, keyHash [][32]byte, seed []*big.Int) (event.Subscription, error) + + ParseRandomnessRequest(log types.Log) (*VRFCoordinatorMockRandomnessRequest, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_test_v2/vrf_coordinator_test_v2.go b/core/gethwrappers/generated/vrf_coordinator_test_v2/vrf_coordinator_test_v2.go new file mode 100644 index 00000000..5e31139c --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_test_v2/vrf_coordinator_test_v2.go @@ -0,0 +1,3115 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_test_v2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFCoordinatorTestV2FeeConfig struct { + FulfillmentFlatFeeLinkPPMTier1 uint32 + FulfillmentFlatFeeLinkPPMTier2 uint32 + FulfillmentFlatFeeLinkPPMTier3 uint32 + FulfillmentFlatFeeLinkPPMTier4 uint32 + FulfillmentFlatFeeLinkPPMTier5 uint32 + ReqsForTier2 *big.Int + ReqsForTier3 *big.Int + ReqsForTier4 *big.Int + ReqsForTier5 *big.Int +} + +type VRFCoordinatorTestV2RequestCommitment struct { + BlockNum uint64 + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +type VRFProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +var VRFCoordinatorTestV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"blockhashStore\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"BlockhashNotInStore\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectCommitment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"have\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"want\",\"type\":\"uint256\"}],\"name\":\"InsufficientGasForConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"have\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"min\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"max\",\"type\":\"uint16\"}],\"name\":\"InvalidRequestConfirmations\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoCorrespondingRequest\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"NoSuchProvingKey\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"NumWordsTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"ProvingKeyAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"indexed\":false,\"internalType\":\"structVRFCoordinatorTestV2.FeeConfig\",\"name\":\"feeConfig\",\"type\":\"tuple\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"ProvingKeyDeregistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"ProvingKeyRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BLOCKHASH_STORE\",\"outputs\":[{\"internalType\":\"contractBlockhashStoreInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_NUM_WORDS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_REQUEST_CONFIRMATIONS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"deregisterProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRF.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"internalType\":\"structVRFCoordinatorTestV2.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"name\":\"getCommitment\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentSubId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFallbackWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFeeConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"}],\"name\":\"getFeeTier\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRequestConfig\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"internalType\":\"structVRFCoordinatorTestV2.FeeConfig\",\"name\":\"feeConfig\",\"type\":\"tuple\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b5060405162005755380380620057558339810160408190526200003491620001b1565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000e8565b5050506001600160601b0319606093841b811660805290831b811660a052911b1660c052620001fb565b6001600160a01b038116331415620001435760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001ac57600080fd5b919050565b600080600060608486031215620001c757600080fd5b620001d28462000194565b9250620001e26020850162000194565b9150620001f26040850162000194565b90509250925092565b60805160601c60a05160601c60c05160601c6154f0620002656000396000818161051901526138180152600081816106030152613c0401526000818161036d015281816114df0152818161233c01528181612d7301528181612eaf01526134d401526154f06000f3fe608060405234801561001057600080fd5b506004361061025b5760003560e01c80636f64f03f11610145578063ad178361116100bd578063d2f9f9a71161008c578063e72f6e3011610071578063e72f6e30146106e0578063e82ad7d4146106f3578063f2fde38b1461071657600080fd5b8063d2f9f9a7146106ba578063d7ae1d30146106cd57600080fd5b8063ad178361146105fe578063af198b9714610625578063c3f909d414610655578063caf70c4a146106a757600080fd5b80638da5cb5b11610114578063a21a23e4116100f9578063a21a23e4146105c0578063a47c7696146105c8578063a4c0ed36146105eb57600080fd5b80638da5cb5b1461059c5780639f87fad7146105ad57600080fd5b80636f64f03f1461055b5780637341c10c1461056e57806379ba509714610581578063823597401461058957600080fd5b8063356dac71116101d85780635fbbc0d2116101a757806366316d8d1161018c57806366316d8d14610501578063689c45171461051457806369bcdb7d1461053b57600080fd5b80635fbbc0d2146103f357806364d51a2a146104f957600080fd5b8063356dac71146103a757806340d6bb82146103af5780634cb48a54146103cd5780635d3b1d30146103e057600080fd5b806308821d581161022f57806315c48b841161021457806315c48b841461030e578063181f5a77146103295780631b6b6d231461036857600080fd5b806308821d58146102cf57806312b58349146102e257600080fd5b80620122911461026057806302bcc5b61461028057806304c357cb1461029557806306bfa637146102a8575b600080fd5b610268610729565b6040516102779392919061503a565b60405180910390f35b61029361028e366004614e86565b6107a5565b005b6102936102a3366004614ea1565b610837565b60055467ffffffffffffffff165b60405167ffffffffffffffff9091168152602001610277565b6102936102dd366004614b97565b6109eb565b6005546801000000000000000090046bffffffffffffffffffffffff165b604051908152602001610277565b61031660c881565b60405161ffff9091168152602001610277565b604080518082018252601681527f565246436f6f7264696e61746f72563220312e302e3000000000000000000000602082015290516102779190614fe5565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610277565b600a54610300565b6103b86101f481565b60405163ffffffff9091168152602001610277565b6102936103db366004614d30565b610bb0565b6103006103ee366004614c0a565b610fa7565b600c546040805163ffffffff80841682526401000000008404811660208301526801000000000000000084048116928201929092526c010000000000000000000000008304821660608201527001000000000000000000000000000000008304909116608082015262ffffff740100000000000000000000000000000000000000008304811660a0830152770100000000000000000000000000000000000000000000008304811660c08301527a0100000000000000000000000000000000000000000000000000008304811660e08301527d01000000000000000000000000000000000000000000000000000000000090920490911661010082015261012001610277565b610316606481565b61029361050f366004614b4f565b61138a565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b610300610549366004614e6d565b60009081526009602052604090205490565b610293610569366004614a94565b6115d9565b61029361057c366004614ea1565b611709565b610293611956565b610293610597366004614e86565b611a1f565b6000546001600160a01b031661038f565b6102936105bb366004614ea1565b611be5565b6102b6611fe4565b6105db6105d6366004614e86565b6121c7565b60405161027794939291906151d8565b6102936105f9366004614ac8565b6122ea565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b610638610633366004614c68565b612541565b6040516bffffffffffffffffffffffff9091168152602001610277565b600b546040805161ffff8316815263ffffffff6201000084048116602083015267010000000000000084048116928201929092526b010000000000000000000000909204166060820152608001610277565b6103006106b5366004614bb3565b6129db565b6103b86106c8366004614e86565b612a0b565b6102936106db366004614ea1565b612c00565b6102936106ee366004614a79565b612d3a565b610706610701366004614e86565b612f77565b6040519015158152602001610277565b610293610724366004614a79565b61319a565b600b546007805460408051602080840282018101909252828152600094859460609461ffff8316946201000090930463ffffffff1693919283919083018282801561079357602002820191906000526020600020905b81548152602001906001019080831161077f575b50505050509050925092509250909192565b6107ad6131ab565b67ffffffffffffffff81166000908152600360205260409020546001600160a01b0316610806576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020546108349082906001600160a01b0316613207565b50565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680610893576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b038216146108e5576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024015b60405180910390fd5b600b546601000000000000900460ff161561092c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff84166000908152600360205260409020600101546001600160a01b038481169116146109e55767ffffffffffffffff841660008181526003602090815260409182902060010180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0388169081179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91015b60405180910390a25b50505050565b6109f36131ab565b604080518082018252600091610a229190849060029083908390808284376000920191909152506129db915050565b6000818152600660205260409020549091506001600160a01b031680610a77576040517f77f5b84c000000000000000000000000000000000000000000000000000000008152600481018390526024016108dc565b600082815260066020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555b600754811015610b67578260078281548110610aca57610aca615485565b90600052602060002001541415610b55576007805460009190610aef9060019061533f565b81548110610aff57610aff615485565b906000526020600020015490508060078381548110610b2057610b20615485565b6000918252602090912001556007805480610b3d57610b3d615456565b60019003818190600052602060002001600090559055505b80610b5f81615383565b915050610aac565b50806001600160a01b03167f72be339577868f868798bac2c93e52d6f034fef4689a9848996c14ebb7416c0d83604051610ba391815260200190565b60405180910390a2505050565b610bb86131ab565b60c861ffff87161115610c0b576040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff871660048201819052602482015260c860448201526064016108dc565b60008213610c48576040517f43d4cf66000000000000000000000000000000000000000000000000000000008152600481018390526024016108dc565b6040805160a0808201835261ffff891680835263ffffffff89811660208086018290526000868801528a831660608088018290528b85166080988901819052600b80547fffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000001690971762010000909502949094177fffffffffffffffffffffffffffffffffff000000000000000000ffffffffffff166701000000000000009092027fffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffff16919091176b010000000000000000000000909302929092179093558651600c80549489015189890151938a0151978a0151968a015160c08b015160e08c01516101008d01519588167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009099169890981764010000000093881693909302929092177fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff1668010000000000000000958716959095027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff16949094176c0100000000000000000000000098861698909802979097177fffffffffffffffffff00000000000000ffffffffffffffffffffffffffffffff1670010000000000000000000000000000000096909416959095027fffffffffffffffffff000000ffffffffffffffffffffffffffffffffffffffff16929092177401000000000000000000000000000000000000000062ffffff92831602177fffffff000000000000ffffffffffffffffffffffffffffffffffffffffffffff1677010000000000000000000000000000000000000000000000958216959095027fffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffffff16949094177a01000000000000000000000000000000000000000000000000000092851692909202919091177cffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167d0100000000000000000000000000000000000000000000000000000000009390911692909202919091178155600a84905590517fc21e3bd2e0b339d2848f0dd956947a88966c242c0c0c582a33137a5c1ceb5cb291610f97918991899189918991899190615099565b60405180910390a1505050505050565b600b546000906601000000000000900460ff1615610ff1576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff85166000908152600360205260409020546001600160a01b031661104a576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260026020908152604080832067ffffffffffffffff808a16855292529091205416806110ba576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff871660048201523360248201526044016108dc565b600b5461ffff90811690861610806110d6575060c861ffff8616115b1561112657600b546040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff8088166004830152909116602482015260c860448201526064016108dc565b600b5463ffffffff620100009091048116908516111561118d57600b546040517ff5d7e01e00000000000000000000000000000000000000000000000000000000815263ffffffff80871660048301526201000090920490911660248201526044016108dc565b6101f463ffffffff841611156111df576040517f47386bec00000000000000000000000000000000000000000000000000000000815263ffffffff841660048201526101f460248201526044016108dc565b60006111ec82600161529b565b6040805160208082018c9052338284015267ffffffffffffffff808c16606084015284166080808401919091528351808403909101815260a08301845280519082012060c083018d905260e080840182905284518085039091018152610100909301909352815191012091925060009182916040805160208101849052439181019190915267ffffffffffffffff8c16606082015263ffffffff808b166080830152891660a08201523360c0820152919350915060e00160408051808303601f19018152828252805160209182012060008681526009835283902055848352820183905261ffff8a169082015263ffffffff808916606083015287166080820152339067ffffffffffffffff8b16908c907f63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a97729060a00160405180910390a45033600090815260026020908152604080832067ffffffffffffffff808d16855292529091208054919093167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009091161790915591505095945050505050565b600b546601000000000000900460ff16156113d1576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600860205260409020546bffffffffffffffffffffffff8083169116101561142b576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260086020526040812080548392906114589084906bffffffffffffffffffffffff16615356565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080600560088282829054906101000a90046bffffffffffffffffffffffff166114af9190615356565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb83836040518363ffffffff1660e01b815260040161154d9291906001600160a01b039290921682526bffffffffffffffffffffffff16602082015260400190565b602060405180830381600087803b15801561156757600080fd5b505af115801561157b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061159f9190614bcf565b6115d5576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050565b6115e16131ab565b6040805180820182526000916116109190849060029083908390808284376000920191909152506129db915050565b6000818152600660205260409020549091506001600160a01b031615611665576040517f4a0b8fa7000000000000000000000000000000000000000000000000000000008152600481018290526024016108dc565b600081815260066020908152604080832080547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0388169081179091556007805460018101825594527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688909301849055518381527fe729ae16526293f74ade739043022254f1489f616295a25bf72dfb4511ed73b89101610ba3565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680611765576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b038216146117b2576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff16156117f9576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff841660009081526003602052604090206002015460641415611850576040517f05a48e0f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600160a01b038316600090815260026020908152604080832067ffffffffffffffff8089168552925290912054161561188a576109e5565b6001600160a01b038316600081815260026020818152604080842067ffffffffffffffff8a1680865290835281852080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001908117909155600384528286209094018054948501815585529382902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001685179055905192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e091016109dc565b6001546001600160a01b031633146119b05760405162461bcd60e51b815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016108dc565b60008054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600b546601000000000000900460ff1615611a66576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020546001600160a01b0316611abf576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020600101546001600160a01b03163314611b475767ffffffffffffffff8116600090815260036020526040908190206001015490517fd084e9750000000000000000000000000000000000000000000000000000000081526001600160a01b0390911660048201526024016108dc565b67ffffffffffffffff81166000818152600360209081526040918290208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821784556001909301805490931690925583516001600160a01b03909116808252928101919091529092917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0910160405180910390a25050565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680611c41576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b03821614611c8e576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff1615611cd5576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600160a01b038316600090815260026020908152604080832067ffffffffffffffff808916855292529091205416611d56576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff851660048201526001600160a01b03841660248201526044016108dc565b67ffffffffffffffff8416600090815260036020908152604080832060020180548251818502810185019093528083529192909190830182828015611dc457602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611da6575b50505050509050600060018251611ddb919061533f565b905060005b8251811015611f5357856001600160a01b0316838281518110611e0557611e05615485565b60200260200101516001600160a01b03161415611f41576000838381518110611e3057611e30615485565b6020026020010151905080600360008a67ffffffffffffffff1667ffffffffffffffff1681526020019081526020016000206002018381548110611e7657611e76615485565b600091825260208083209190910180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b03949094169390931790925567ffffffffffffffff8a168152600390915260409020600201805480611ee357611ee3615456565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611f53565b80611f4b81615383565b915050611de0565b506001600160a01b038516600081815260026020908152604080832067ffffffffffffffff8b168085529083529281902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b91015b60405180910390a2505050505050565b600b546000906601000000000000900460ff161561202e576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005805467ffffffffffffffff16906000612048836153bc565b82546101009290920a67ffffffffffffffff81810219909316918316021790915560055416905060008060405190808252806020026020018201604052801561209b578160200160208202803683370190505b506040805180820182526000808252602080830182815267ffffffffffffffff888116808552600484528685209551865493516bffffffffffffffffffffffff9091167fffffffffffffffffffffffff0000000000000000000000000000000000000000948516176c01000000000000000000000000919093160291909117909455845160608101865233815280830184815281870188815295855260038452959093208351815483166001600160a01b039182161782559551600182018054909316961695909517905591518051949550909361217f92600285019201906147d3565b505060405133815267ffffffffffffffff841691507f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a250905090565b67ffffffffffffffff8116600090815260036020526040812054819081906060906001600160a01b0316612227576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff80861660009081526004602090815260408083205460038352928190208054600290910180548351818602810186019094528084526bffffffffffffffffffffffff8616966c01000000000000000000000000909604909516946001600160a01b039092169390929183918301828280156122d457602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116122b6575b5050505050905093509350935093509193509193565b600b546601000000000000900460ff1615612331576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b037f00000000000000000000000000000000000000000000000000000000000000001614612393576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b602081146123cd576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006123db82840184614e86565b67ffffffffffffffff81166000908152600360205260409020549091506001600160a01b0316612437576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8116600090815260046020526040812080546bffffffffffffffffffffffff169186919061246e83856152c7565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555084600560088282829054906101000a90046bffffffffffffffffffffffff166124c591906152c7565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f882878461252c9190615283565b60408051928352602083019190915201611fd4565b600b546000906601000000000000900460ff161561258b576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a9050600080600061259f878761362c565b9250925092506000866060015163ffffffff1667ffffffffffffffff8111156125ca576125ca6154b4565b6040519080825280602002602001820160405280156125f3578160200160208202803683370190505b50905060005b876060015163ffffffff168110156126675760408051602081018590529081018290526060016040516020818303038152906040528051906020012060001c82828151811061264a5761264a615485565b60209081029190910101528061265f81615383565b9150506125f9565b506000838152600960205260408082208290555181907f1fe543e300000000000000000000000000000000000000000000000000000000906126af908790869060240161518a565b60408051601f198184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090941693909317909252600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff166601000000000000179055908a015160808b015191925060009161275f9163ffffffff169084613936565b600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff1690556020808c01805167ffffffffffffffff9081166000908152600490935260408084205492518216845290922080549394506c01000000000000000000000000918290048316936001939192600c926127e392869290041661529b565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff160217905550600061283a8a600b600001600b9054906101000a900463ffffffff1663ffffffff1661283485612a0b565b3a613984565b6020808e015167ffffffffffffffff166000908152600490915260409020549091506bffffffffffffffffffffffff808316911610156128a6576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808d015167ffffffffffffffff16600090815260049091526040812080548392906128e29084906bffffffffffffffffffffffff16615356565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008b8152600660209081526040808320546001600160a01b03168352600890915281208054859450909261293e918591166152c7565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550877f7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e48883866040516129c1939291909283526bffffffffffffffffffffffff9190911660208301521515604082015260600190565b60405180910390a299505050505050505050505b92915050565b6000816040516020016129ee9190614fd7565b604051602081830303815290604052805190602001209050919050565b6040805161012081018252600c5463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c010000000000000000000000008104831660608301527001000000000000000000000000000000008104909216608082015262ffffff740100000000000000000000000000000000000000008304811660a08301819052770100000000000000000000000000000000000000000000008404821660c08401527a0100000000000000000000000000000000000000000000000000008404821660e08401527d0100000000000000000000000000000000000000000000000000000000009093041661010082015260009167ffffffffffffffff841611612b29575192915050565b8267ffffffffffffffff168160a0015162ffffff16108015612b5e57508060c0015162ffffff168367ffffffffffffffff1611155b15612b6d576020015192915050565b8267ffffffffffffffff168160c0015162ffffff16108015612ba257508060e0015162ffffff168367ffffffffffffffff1611155b15612bb1576040015192915050565b8267ffffffffffffffff168160e0015162ffffff16108015612be7575080610100015162ffffff168367ffffffffffffffff1611155b15612bf6576060015192915050565b6080015192915050565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680612c5c576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b03821614612ca9576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff1615612cf0576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612cf984612f77565b15612d30576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109e58484613207565b612d426131ab565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316906370a082319060240160206040518083038186803b158015612dbd57600080fd5b505afa158015612dd1573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612df59190614bf1565b6005549091506801000000000000000090046bffffffffffffffffffffffff1681811115612e59576040517fa99da30200000000000000000000000000000000000000000000000000000000815260048101829052602481018390526044016108dc565b81811015612f72576000612e6d828461533f565b6040517fa9059cbb0000000000000000000000000000000000000000000000000000000081526001600160a01b038681166004830152602482018390529192507f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b158015612ef557600080fd5b505af1158015612f09573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612f2d9190614bcf565b50604080516001600160a01b0386168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600910160405180910390a1505b505050565b67ffffffffffffffff81166000908152600360209081526040808320815160608101835281546001600160a01b039081168252600183015416818501526002820180548451818702810187018652818152879693958601939092919083018282801561300c57602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311612fee575b505050505081525050905060005b8160400151518110156131905760005b60075481101561317d5760006131466007838154811061304c5761304c615485565b90600052602060002001548560400151858151811061306d5761306d615485565b602002602001015188600260008960400151898151811061309057613090615485565b6020908102919091018101516001600160a01b03168252818101929092526040908101600090812067ffffffffffffffff808f16835293522054166040805160208082018790526001600160a01b03959095168183015267ffffffffffffffff9384166060820152919092166080808301919091528251808303909101815260a08201835280519084012060c082019490945260e080820185905282518083039091018152610100909101909152805191012091565b506000818152600960205260409020549091501561316a5750600195945050505050565b508061317581615383565b91505061302a565b508061318881615383565b91505061301a565b5060009392505050565b6131a26131ab565b61083481613a8c565b6000546001600160a01b031633146132055760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016108dc565b565b600b546601000000000000900460ff161561324e576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff82166000908152600360209081526040808320815160608101835281546001600160a01b039081168252600183015416818501526002820180548451818702810187018652818152929593948601938301828280156132df57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116132c1575b5050509190925250505067ffffffffffffffff80851660009081526004602090815260408083208151808301909252546bffffffffffffffffffffffff81168083526c01000000000000000000000000909104909416918101919091529293505b8360400151518110156133d957600260008560400151838151811061336757613367615485565b6020908102919091018101516001600160a01b03168252818101929092526040908101600090812067ffffffffffffffff8a168252909252902080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000169055806133d181615383565b915050613340565b5067ffffffffffffffff8516600090815260036020526040812080547fffffffffffffffffffffffff000000000000000000000000000000000000000090811682556001820180549091169055906134346002830182614850565b505067ffffffffffffffff8516600090815260046020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055600580548291906008906134a49084906801000000000000000090046bffffffffffffffffffffffff16615356565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb85836bffffffffffffffffffffffff166040518363ffffffff1660e01b81526004016135429291906001600160a01b03929092168252602082015260400190565b602060405180830381600087803b15801561355c57600080fd5b505af1158015613570573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135949190614bcf565b6135ca576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080516001600160a01b03861681526bffffffffffffffffffffffff8316602082015267ffffffffffffffff8716917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815910160405180910390a25050505050565b600080600061363e85600001516129db565b6000818152600660205260409020549093506001600160a01b031680613693576040517f77f5b84c000000000000000000000000000000000000000000000000000000008152600481018590526024016108dc565b60808601516040516136b2918691602001918252602082015260400190565b60408051601f1981840301815291815281516020928301206000818152600990935291205490935080613711576040517f3688124a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b85516020808801516040808a015160608b015160808c0151925161377d968b96909594910195865267ffffffffffffffff948516602087015292909316604085015263ffffffff90811660608501529190911660808301526001600160a01b031660a082015260c00190565b6040516020818303038152906040528051906020012081146137cb576040517fd529142c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b855167ffffffffffffffff1640806138e25786516040517fe9413d3800000000000000000000000000000000000000000000000000000000815267ffffffffffffffff90911660048201527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169063e9413d389060240160206040518083038186803b15801561386257600080fd5b505afa158015613876573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061389a9190614bf1565b9050806138e25786516040517f175dadad00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff90911660048201526024016108dc565b6000886080015182604051602001613904929190918252602082015260400190565b6040516020818303038152906040528051906020012060001c90506139298982613b4e565b9450505050509250925092565b60005a61138881101561394857600080fd5b61138881039050846040820482031161396057600080fd5b50823b61396c57600080fd5b60008083516020850160008789f190505b9392505050565b60008061398f613bb9565b9050600081136139ce576040517f43d4cf66000000000000000000000000000000000000000000000000000000008152600481018290526024016108dc565b6000815a6139dc8989615283565b6139e6919061533f565b6139f886670de0b6b3a7640000615302565b613a029190615302565b613a0c91906152ee565b90506000613a2563ffffffff871664e8d4a51000615302565b9050613a3d816b033b2e3c9fd0803ce800000061533f565b821115613a76576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b613a808183615283565b98975050505050505050565b6001600160a01b038116331415613ae55760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016108dc565b600180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000613b828360000151846020015185604001518660600151868860a001518960c001518a60e001518b6101000151613cc0565b60038360200151604051602001613b9a929190615176565b60408051601f1981840301815291905280516020909101209392505050565b600b54604080517ffeaf968c0000000000000000000000000000000000000000000000000000000081529051600092670100000000000000900463ffffffff169182151591849182917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169163feaf968c9160048083019260a0929190829003018186803b158015613c5257600080fd5b505afa158015613c66573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613c8a9190614ecb565b509450909250849150508015613cae5750613ca5824261533f565b8463ffffffff16105b15613cb85750600a545b949350505050565b613cc989613efb565b613d155760405162461bcd60e51b815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e20637572766500000000000060448201526064016108dc565b613d1e88613efb565b613d6a5760405162461bcd60e51b815260206004820152601560248201527f67616d6d61206973206e6f74206f6e206375727665000000000000000000000060448201526064016108dc565b613d7383613efb565b613dbf5760405162461bcd60e51b815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e20637572766500000060448201526064016108dc565b613dc882613efb565b613e145760405162461bcd60e51b815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e2063757276650000000060448201526064016108dc565b613e20878a8887613fd4565b613e6c5760405162461bcd60e51b815260206004820152601960248201527f6164647228632a706b2b732a6729213d5f755769746e6573730000000000000060448201526064016108dc565b6000613e788a87614125565b90506000613e8b898b878b868989614189565b90506000613e9c838d8d8a866142a9565b9050808a14613eed5760405162461bcd60e51b815260206004820152600d60248201527f696e76616c69642070726f6f660000000000000000000000000000000000000060448201526064016108dc565b505050505050505050505050565b80516000906401000003d01911613f545760405162461bcd60e51b815260206004820152601260248201527f696e76616c696420782d6f7264696e617465000000000000000000000000000060448201526064016108dc565b60208201516401000003d01911613fad5760405162461bcd60e51b815260206004820152601260248201527f696e76616c696420792d6f7264696e617465000000000000000000000000000060448201526064016108dc565b60208201516401000003d019908009613fcd8360005b60200201516142e9565b1492915050565b60006001600160a01b03821661402c5760405162461bcd60e51b815260206004820152600b60248201527f626164207769746e65737300000000000000000000000000000000000000000060448201526064016108dc565b60208401516000906001161561404357601c614046565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141918203925060009190890987516040805160008082526020820180845287905260ff88169282019290925260608101929092526080820183905291925060019060a0016020604051602081039080840390855afa1580156140fd573d6000803e3d6000fd5b5050604051601f1901516001600160a01b039081169088161495505050505050949350505050565b61412d61486e565b61415a6001848460405160200161414693929190614fb6565b60405160208183030381529060405261430d565b90505b61416681613efb565b6129d55780516040805160208101929092526141829101614146565b905061415d565b61419161486e565b825186516401000003d01990819006910614156141f05760405162461bcd60e51b815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e6374000060448201526064016108dc565b6141fb87898861435c565b6142475760405162461bcd60e51b815260206004820152601660248201527f4669727374206d756c20636865636b206661696c65640000000000000000000060448201526064016108dc565b61425284868561435c565b61429e5760405162461bcd60e51b815260206004820152601760248201527f5365636f6e64206d756c20636865636b206661696c656400000000000000000060448201526064016108dc565b613a808684846144a4565b6000600286868685876040516020016142c796959493929190614f44565b60408051601f1981840301815291905280516020909101209695505050505050565b6000806401000003d01980848509840990506401000003d019600782089392505050565b61431561486e565b61431e8261456b565b815261433361432e826000613fc3565b6145a6565b602082018190526002900660011415614357576020810180516401000003d0190390525b919050565b6000826143ab5760405162461bcd60e51b815260206004820152600b60248201527f7a65726f207363616c617200000000000000000000000000000000000000000060448201526064016108dc565b835160208501516000906143c1906002906153e4565b156143cd57601c6143d0565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418387096040805160008082526020820180845281905260ff86169282019290925260608101869052608081018390529192509060019060a0016020604051602081039080840390855afa158015614450573d6000803e3d6000fd5b50505060206040510351905060008660405160200161446f9190614f32565b60408051601f1981840301815291905280516020909101206001600160a01b0392831692169190911498975050505050505050565b6144ac61486e565b8351602080860151855191860151600093849384936144cd939091906145c6565b919450925090506401000003d01985820960011461452d5760405162461bcd60e51b815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a0000000000000060448201526064016108dc565b60405180604001604052806401000003d0198061454c5761454c615427565b87860981526020016401000003d0198785099052979650505050505050565b805160208201205b6401000003d019811061435757604080516020808201939093528151808203840181529082019091528051910120614573565b60006129d58260026145bf6401000003d0196001615283565b901c6146a6565b60008080600180826401000003d019896401000003d019038808905060006401000003d0198b6401000003d019038a089050600061460683838585614766565b909850905061461788828e8861478a565b909850905061462888828c8761478a565b9098509050600061463b8d878b8561478a565b909850905061464c88828686614766565b909850905061465d88828e8961478a565b9098509050818114614692576401000003d019818a0998506401000003d01982890997506401000003d0198183099650614696565b8196505b5050505050509450945094915050565b6000806146b161488c565b6020808252818101819052604082015260608101859052608081018490526401000003d01960a08201526146e36148aa565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa92508261475c5760405162461bcd60e51b815260206004820152601260248201527f6269674d6f64457870206661696c75726521000000000000000000000000000060448201526064016108dc565b5195945050505050565b6000806401000003d0198487096401000003d0198487099097909650945050505050565b600080806401000003d019878509905060006401000003d01987876401000003d019030990506401000003d0198183086401000003d01986890990999098509650505050505050565b828054828255906000526020600020908101928215614840579160200282015b8281111561484057825182547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b039091161782556020909201916001909101906147f3565b5061484c9291506148c8565b5090565b508054600082559060005260206000209081019061083491906148c8565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b5b8082111561484c57600081556001016148c9565b80356001600160a01b038116811461435757600080fd5b80604081018310156129d557600080fd5b600082601f83011261491657600080fd5b6040516040810181811067ffffffffffffffff82111715614939576149396154b4565b806040525080838560408601111561495057600080fd5b60005b6002811015614972578135835260209283019290910190600101614953565b509195945050505050565b600060a0828403121561498f57600080fd5b60405160a0810181811067ffffffffffffffff821117156149b2576149b26154b4565b6040529050806149c183614a47565b81526149cf60208401614a47565b60208201526149e060408401614a33565b60408201526149f160608401614a33565b6060820152614a02608084016148dd565b60808201525092915050565b803561ffff8116811461435757600080fd5b803562ffffff8116811461435757600080fd5b803563ffffffff8116811461435757600080fd5b803567ffffffffffffffff8116811461435757600080fd5b805169ffffffffffffffffffff8116811461435757600080fd5b600060208284031215614a8b57600080fd5b61397d826148dd565b60008060608385031215614aa757600080fd5b614ab0836148dd565b9150614abf84602085016148f4565b90509250929050565b60008060008060608587031215614ade57600080fd5b614ae7856148dd565b935060208501359250604085013567ffffffffffffffff80821115614b0b57600080fd5b818701915087601f830112614b1f57600080fd5b813581811115614b2e57600080fd5b886020828501011115614b4057600080fd5b95989497505060200194505050565b60008060408385031215614b6257600080fd5b614b6b836148dd565b915060208301356bffffffffffffffffffffffff81168114614b8c57600080fd5b809150509250929050565b600060408284031215614ba957600080fd5b61397d83836148f4565b600060408284031215614bc557600080fd5b61397d8383614905565b600060208284031215614be157600080fd5b8151801515811461397d57600080fd5b600060208284031215614c0357600080fd5b5051919050565b600080600080600060a08688031215614c2257600080fd5b85359450614c3260208701614a47565b9350614c4060408701614a0e565b9250614c4e60608701614a33565b9150614c5c60808701614a33565b90509295509295909350565b600080828403610240811215614c7d57600080fd5b6101a080821215614c8d57600080fd5b614c95615259565b9150614ca18686614905565b8252614cb08660408701614905565b60208301526080850135604083015260a0850135606083015260c08501356080830152614cdf60e086016148dd565b60a0830152610100614cf387828801614905565b60c0840152614d06876101408801614905565b60e08401526101808601358184015250819350614d258682870161497d565b925050509250929050565b6000806000806000808688036101c0811215614d4b57600080fd5b614d5488614a0e565b9650614d6260208901614a33565b9550614d7060408901614a33565b9450614d7e60608901614a33565b935060808801359250610120807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6083011215614db957600080fd5b614dc1615259565b9150614dcf60a08a01614a33565b8252614ddd60c08a01614a33565b6020830152614dee60e08a01614a33565b6040830152610100614e01818b01614a33565b6060840152614e11828b01614a33565b6080840152614e236101408b01614a20565b60a0840152614e356101608b01614a20565b60c0840152614e476101808b01614a20565b60e0840152614e596101a08b01614a20565b818401525050809150509295509295509295565b600060208284031215614e7f57600080fd5b5035919050565b600060208284031215614e9857600080fd5b61397d82614a47565b60008060408385031215614eb457600080fd5b614ebd83614a47565b9150614abf602084016148dd565b600080600080600060a08688031215614ee357600080fd5b614eec86614a5f565b9450602086015193506040860151925060608601519150614c5c60808701614a5f565b8060005b60028110156109e5578151845260209384019390910190600101614f13565b614f3c8183614f0f565b604001919050565b868152614f546020820187614f0f565b614f616060820186614f0f565b614f6e60a0820185614f0f565b614f7b60e0820184614f0f565b60609190911b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166101208201526101340195945050505050565b838152614fc66020820184614f0f565b606081019190915260800192915050565b604081016129d58284614f0f565b600060208083528351808285015260005b8181101561501257858101830151858201604001528201614ff6565b81811115615024576000604083870101525b50601f01601f1916929092016040019392505050565b60006060820161ffff86168352602063ffffffff86168185015260606040850152818551808452608086019150828701935060005b8181101561508b5784518352938301939183019160010161506f565b509098975050505050505050565b60006101c08201905061ffff8816825263ffffffff808816602084015280871660408401528086166060840152846080840152835481811660a08501526150ed60c08501838360201c1663ffffffff169052565b61510460e08501838360401c1663ffffffff169052565b61511c6101008501838360601c1663ffffffff169052565b6151346101208501838360801c1663ffffffff169052565b62ffffff60a082901c811661014086015260b882901c811661016086015260d082901c1661018085015260e81c6101a090930192909252979650505050505050565b8281526060810161397d6020830184614f0f565b6000604082018483526020604081850152818551808452606086019150828701935060005b818110156151cb578451835293830193918301916001016151af565b5090979650505050505050565b6000608082016bffffffffffffffffffffffff87168352602067ffffffffffffffff8716818501526001600160a01b0380871660408601526080606086015282865180855260a087019150838801945060005b8181101561524957855184168352948401949184019160010161522b565b50909a9950505050505050505050565b604051610120810167ffffffffffffffff8111828210171561527d5761527d6154b4565b60405290565b60008219821115615296576152966153f8565b500190565b600067ffffffffffffffff8083168185168083038211156152be576152be6153f8565b01949350505050565b60006bffffffffffffffffffffffff8083168185168083038211156152be576152be6153f8565b6000826152fd576152fd615427565b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561533a5761533a6153f8565b500290565b600082821015615351576153516153f8565b500390565b60006bffffffffffffffffffffffff8381169083168181101561537b5761537b6153f8565b039392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156153b5576153b56153f8565b5060010190565b600067ffffffffffffffff808316818114156153da576153da6153f8565b6001019392505050565b6000826153f3576153f3615427565b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFCoordinatorTestV2ABI = VRFCoordinatorTestV2MetaData.ABI + +var VRFCoordinatorTestV2Bin = VRFCoordinatorTestV2MetaData.Bin + +func DeployVRFCoordinatorTestV2(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, blockhashStore common.Address, linkEthFeed common.Address) (common.Address, *types.Transaction, *VRFCoordinatorTestV2, error) { + parsed, err := VRFCoordinatorTestV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorTestV2Bin), backend, link, blockhashStore, linkEthFeed) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorTestV2{address: address, abi: *parsed, VRFCoordinatorTestV2Caller: VRFCoordinatorTestV2Caller{contract: contract}, VRFCoordinatorTestV2Transactor: VRFCoordinatorTestV2Transactor{contract: contract}, VRFCoordinatorTestV2Filterer: VRFCoordinatorTestV2Filterer{contract: contract}}, nil +} + +type VRFCoordinatorTestV2 struct { + address common.Address + abi abi.ABI + VRFCoordinatorTestV2Caller + VRFCoordinatorTestV2Transactor + VRFCoordinatorTestV2Filterer +} + +type VRFCoordinatorTestV2Caller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorTestV2Transactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorTestV2Filterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorTestV2Session struct { + Contract *VRFCoordinatorTestV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorTestV2CallerSession struct { + Contract *VRFCoordinatorTestV2Caller + CallOpts bind.CallOpts +} + +type VRFCoordinatorTestV2TransactorSession struct { + Contract *VRFCoordinatorTestV2Transactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorTestV2Raw struct { + Contract *VRFCoordinatorTestV2 +} + +type VRFCoordinatorTestV2CallerRaw struct { + Contract *VRFCoordinatorTestV2Caller +} + +type VRFCoordinatorTestV2TransactorRaw struct { + Contract *VRFCoordinatorTestV2Transactor +} + +func NewVRFCoordinatorTestV2(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorTestV2, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorTestV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorTestV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2{address: address, abi: abi, VRFCoordinatorTestV2Caller: VRFCoordinatorTestV2Caller{contract: contract}, VRFCoordinatorTestV2Transactor: VRFCoordinatorTestV2Transactor{contract: contract}, VRFCoordinatorTestV2Filterer: VRFCoordinatorTestV2Filterer{contract: contract}}, nil +} + +func NewVRFCoordinatorTestV2Caller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorTestV2Caller, error) { + contract, err := bindVRFCoordinatorTestV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2Caller{contract: contract}, nil +} + +func NewVRFCoordinatorTestV2Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorTestV2Transactor, error) { + contract, err := bindVRFCoordinatorTestV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2Transactor{contract: contract}, nil +} + +func NewVRFCoordinatorTestV2Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorTestV2Filterer, error) { + contract, err := bindVRFCoordinatorTestV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2Filterer{contract: contract}, nil +} + +func bindVRFCoordinatorTestV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorTestV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorTestV2.Contract.VRFCoordinatorTestV2Caller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.VRFCoordinatorTestV2Transactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.VRFCoordinatorTestV2Transactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorTestV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "BLOCKHASH_STORE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.BLOCKHASHSTORE(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.BLOCKHASHSTORE(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) PLI() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.PLI(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) PLI() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.PLI(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) PLIETHFEED() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.PLIETHFEED(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) PLIETHFEED() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.PLIETHFEED(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorTestV2.Contract.MAXCONSUMERS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorTestV2.Contract.MAXCONSUMERS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "MAX_NUM_WORDS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorTestV2.Contract.MAXNUMWORDS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorTestV2.Contract.MAXNUMWORDS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "MAX_REQUEST_CONFIRMATIONS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorTestV2.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorTestV2.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetCommitment(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getCommitment", requestId) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetCommitment(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinatorTestV2.Contract.GetCommitment(&_VRFCoordinatorTestV2.CallOpts, requestId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetCommitment(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinatorTestV2.Contract.GetCommitment(&_VRFCoordinatorTestV2.CallOpts, requestId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MinimumRequestConfirmations = *abi.ConvertType(out[0], new(uint16)).(*uint16) + outstruct.MaxGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[3], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetConfig() (GetConfig, + + error) { + return _VRFCoordinatorTestV2.Contract.GetConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetConfig() (GetConfig, + + error) { + return _VRFCoordinatorTestV2.Contract.GetConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetCurrentSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getCurrentSubId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetCurrentSubId() (uint64, error) { + return _VRFCoordinatorTestV2.Contract.GetCurrentSubId(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetCurrentSubId() (uint64, error) { + return _VRFCoordinatorTestV2.Contract.GetCurrentSubId(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getFallbackWeiPerUnitLink") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorTestV2.Contract.GetFallbackWeiPerUnitLink(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorTestV2.Contract.GetFallbackWeiPerUnitLink(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetFeeConfig(opts *bind.CallOpts) (GetFeeConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getFeeConfig") + + outstruct := new(GetFeeConfig) + if err != nil { + return *outstruct, err + } + + outstruct.FulfillmentFlatFeeLinkPPMTier1 = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier2 = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier3 = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier4 = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier5 = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.ReqsForTier2 = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier3 = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier4 = *abi.ConvertType(out[7], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier5 = *abi.ConvertType(out[8], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetFeeConfig() (GetFeeConfig, + + error) { + return _VRFCoordinatorTestV2.Contract.GetFeeConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetFeeConfig() (GetFeeConfig, + + error) { + return _VRFCoordinatorTestV2.Contract.GetFeeConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetFeeTier(opts *bind.CallOpts, reqCount uint64) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getFeeTier", reqCount) + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetFeeTier(reqCount uint64) (uint32, error) { + return _VRFCoordinatorTestV2.Contract.GetFeeTier(&_VRFCoordinatorTestV2.CallOpts, reqCount) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetFeeTier(reqCount uint64) (uint32, error) { + return _VRFCoordinatorTestV2.Contract.GetFeeTier(&_VRFCoordinatorTestV2.CallOpts, reqCount) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getRequestConfig") + + if err != nil { + return *new(uint16), *new(uint32), *new([][32]byte), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + out1 := *abi.ConvertType(out[1], new(uint32)).(*uint32) + out2 := *abi.ConvertType(out[2], new([][32]byte)).(*[][32]byte) + + return out0, out1, out2, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorTestV2.Contract.GetRequestConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorTestV2.Contract.GetRequestConfig(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetSubscription(opts *bind.CallOpts, subId uint64) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[1], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[2], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetSubscription(subId uint64) (GetSubscription, + + error) { + return _VRFCoordinatorTestV2.Contract.GetSubscription(&_VRFCoordinatorTestV2.CallOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetSubscription(subId uint64) (GetSubscription, + + error) { + return _VRFCoordinatorTestV2.Contract.GetSubscription(&_VRFCoordinatorTestV2.CallOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "getTotalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) GetTotalBalance() (*big.Int, error) { + return _VRFCoordinatorTestV2.Contract.GetTotalBalance(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) GetTotalBalance() (*big.Int, error) { + return _VRFCoordinatorTestV2.Contract.GetTotalBalance(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "hashOfKey", publicKey) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorTestV2.Contract.HashOfKey(&_VRFCoordinatorTestV2.CallOpts, publicKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorTestV2.Contract.HashOfKey(&_VRFCoordinatorTestV2.CallOpts, publicKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) Owner() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.Owner(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) Owner() (common.Address, error) { + return _VRFCoordinatorTestV2.Contract.Owner(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) PendingRequestExists(opts *bind.CallOpts, subId uint64) (bool, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "pendingRequestExists", subId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) PendingRequestExists(subId uint64) (bool, error) { + return _VRFCoordinatorTestV2.Contract.PendingRequestExists(&_VRFCoordinatorTestV2.CallOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) PendingRequestExists(subId uint64) (bool, error) { + return _VRFCoordinatorTestV2.Contract.PendingRequestExists(&_VRFCoordinatorTestV2.CallOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Caller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFCoordinatorTestV2.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) TypeAndVersion() (string, error) { + return _VRFCoordinatorTestV2.Contract.TypeAndVersion(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2CallerSession) TypeAndVersion() (string, error) { + return _VRFCoordinatorTestV2.Contract.TypeAndVersion(&_VRFCoordinatorTestV2.CallOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AcceptOwnership(&_VRFCoordinatorTestV2.TransactOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AcceptOwnership(&_VRFCoordinatorTestV2.TransactOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorTestV2.TransactOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorTestV2.TransactOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) AddConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) AddConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AddConsumer(&_VRFCoordinatorTestV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) AddConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.AddConsumer(&_VRFCoordinatorTestV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) CancelSubscription(opts *bind.TransactOpts, subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) CancelSubscription(subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.CancelSubscription(&_VRFCoordinatorTestV2.TransactOpts, subId, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) CancelSubscription(subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.CancelSubscription(&_VRFCoordinatorTestV2.TransactOpts, subId, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "createSubscription") +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.CreateSubscription(&_VRFCoordinatorTestV2.TransactOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.CreateSubscription(&_VRFCoordinatorTestV2.TransactOpts) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "deregisterProvingKey", publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.DeregisterProvingKey(&_VRFCoordinatorTestV2.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.DeregisterProvingKey(&_VRFCoordinatorTestV2.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorTestV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "fulfillRandomWords", proof, rc) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorTestV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.FulfillRandomWords(&_VRFCoordinatorTestV2.TransactOpts, proof, rc) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorTestV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.FulfillRandomWords(&_VRFCoordinatorTestV2.TransactOpts, proof, rc) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OnTokenTransfer(&_VRFCoordinatorTestV2.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OnTokenTransfer(&_VRFCoordinatorTestV2.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OracleWithdraw(&_VRFCoordinatorTestV2.TransactOpts, recipient, amount) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OracleWithdraw(&_VRFCoordinatorTestV2.TransactOpts, recipient, amount) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "ownerCancelSubscription", subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OwnerCancelSubscription(&_VRFCoordinatorTestV2.TransactOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.OwnerCancelSubscription(&_VRFCoordinatorTestV2.TransactOpts, subId) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "recoverFunds", to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RecoverFunds(&_VRFCoordinatorTestV2.TransactOpts, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RecoverFunds(&_VRFCoordinatorTestV2.TransactOpts, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "registerProvingKey", oracle, publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RegisterProvingKey(&_VRFCoordinatorTestV2.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RegisterProvingKey(&_VRFCoordinatorTestV2.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) RemoveConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) RemoveConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RemoveConsumer(&_VRFCoordinatorTestV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) RemoveConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RemoveConsumer(&_VRFCoordinatorTestV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "requestRandomWords", keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) RequestRandomWords(keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RequestRandomWords(&_VRFCoordinatorTestV2.TransactOpts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) RequestRandomWords(keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RequestRandomWords(&_VRFCoordinatorTestV2.TransactOpts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) RequestSubscriptionOwnerTransfer(subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorTestV2.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) RequestSubscriptionOwnerTransfer(subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorTestV2.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorTestV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "setConfig", minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorTestV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.SetConfig(&_VRFCoordinatorTestV2.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorTestV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.SetConfig(&_VRFCoordinatorTestV2.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Transactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Session) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.TransferOwnership(&_VRFCoordinatorTestV2.TransactOpts, to) +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2TransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorTestV2.Contract.TransferOwnership(&_VRFCoordinatorTestV2.TransactOpts, to) +} + +type VRFCoordinatorTestV2ConfigSetIterator struct { + Event *VRFCoordinatorTestV2ConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2ConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2ConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2ConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2ConfigSet struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FallbackWeiPerUnitLink *big.Int + FeeConfig VRFCoordinatorTestV2FeeConfig + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorTestV2ConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2ConfigSetIterator{contract: _VRFCoordinatorTestV2.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2ConfigSet) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseConfigSet(log types.Log) (*VRFCoordinatorTestV2ConfigSet, error) { + event := new(VRFCoordinatorTestV2ConfigSet) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2FundsRecoveredIterator struct { + Event *VRFCoordinatorTestV2FundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2FundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2FundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2FundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2FundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorTestV2FundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2FundsRecoveredIterator{contract: _VRFCoordinatorTestV2.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2FundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2FundsRecovered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseFundsRecovered(log types.Log) (*VRFCoordinatorTestV2FundsRecovered, error) { + event := new(VRFCoordinatorTestV2FundsRecovered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2OwnershipTransferRequestedIterator struct { + Event *VRFCoordinatorTestV2OwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2OwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2OwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2OwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2OwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorTestV2OwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2OwnershipTransferRequestedIterator{contract: _VRFCoordinatorTestV2.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2OwnershipTransferRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorTestV2OwnershipTransferRequested, error) { + event := new(VRFCoordinatorTestV2OwnershipTransferRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2OwnershipTransferredIterator struct { + Event *VRFCoordinatorTestV2OwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2OwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2OwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2OwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2OwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorTestV2OwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2OwnershipTransferredIterator{contract: _VRFCoordinatorTestV2.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2OwnershipTransferred) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorTestV2OwnershipTransferred, error) { + event := new(VRFCoordinatorTestV2OwnershipTransferred) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2ProvingKeyDeregisteredIterator struct { + Event *VRFCoordinatorTestV2ProvingKeyDeregistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2ProvingKeyDeregisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2ProvingKeyDeregisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2ProvingKeyDeregisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2ProvingKeyDeregistered struct { + KeyHash [32]byte + Oracle common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterProvingKeyDeregistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorTestV2ProvingKeyDeregisteredIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "ProvingKeyDeregistered", oracleRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2ProvingKeyDeregisteredIterator{contract: _VRFCoordinatorTestV2.contract, event: "ProvingKeyDeregistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ProvingKeyDeregistered, oracle []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "ProvingKeyDeregistered", oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2ProvingKeyDeregistered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorTestV2ProvingKeyDeregistered, error) { + event := new(VRFCoordinatorTestV2ProvingKeyDeregistered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2ProvingKeyRegisteredIterator struct { + Event *VRFCoordinatorTestV2ProvingKeyRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2ProvingKeyRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2ProvingKeyRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2ProvingKeyRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2ProvingKeyRegistered struct { + KeyHash [32]byte + Oracle common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterProvingKeyRegistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorTestV2ProvingKeyRegisteredIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "ProvingKeyRegistered", oracleRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2ProvingKeyRegisteredIterator{contract: _VRFCoordinatorTestV2.contract, event: "ProvingKeyRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ProvingKeyRegistered, oracle []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "ProvingKeyRegistered", oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2ProvingKeyRegistered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorTestV2ProvingKeyRegistered, error) { + event := new(VRFCoordinatorTestV2ProvingKeyRegistered) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2RandomWordsFulfilledIterator struct { + Event *VRFCoordinatorTestV2RandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2RandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2RandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2RandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2RandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + Payment *big.Int + Success bool + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFCoordinatorTestV2RandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2RandomWordsFulfilledIterator{contract: _VRFCoordinatorTestV2.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2RandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2RandomWordsFulfilled) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorTestV2RandomWordsFulfilled, error) { + event := new(VRFCoordinatorTestV2RandomWordsFulfilled) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2RandomWordsRequestedIterator struct { + Event *VRFCoordinatorTestV2RandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2RandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2RandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2RandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2RandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId uint64 + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFCoordinatorTestV2RandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2RandomWordsRequestedIterator{contract: _VRFCoordinatorTestV2.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2RandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2RandomWordsRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorTestV2RandomWordsRequested, error) { + event := new(VRFCoordinatorTestV2RandomWordsRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionCanceledIterator struct { + Event *VRFCoordinatorTestV2SubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionCanceled struct { + SubId uint64 + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionCanceledIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionCanceledIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionCanceled, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionCanceled) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorTestV2SubscriptionCanceled, error) { + event := new(VRFCoordinatorTestV2SubscriptionCanceled) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionConsumerAddedIterator struct { + Event *VRFCoordinatorTestV2SubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionConsumerAdded struct { + SubId uint64 + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionConsumerAddedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionConsumerAddedIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionConsumerAdded, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionConsumerAdded) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorTestV2SubscriptionConsumerAdded, error) { + event := new(VRFCoordinatorTestV2SubscriptionConsumerAdded) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator struct { + Event *VRFCoordinatorTestV2SubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionConsumerRemoved struct { + SubId uint64 + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionConsumerRemoved, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionConsumerRemoved) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorTestV2SubscriptionConsumerRemoved, error) { + event := new(VRFCoordinatorTestV2SubscriptionConsumerRemoved) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionCreatedIterator struct { + Event *VRFCoordinatorTestV2SubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionCreated struct { + SubId uint64 + Owner common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionCreatedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionCreatedIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionCreated, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionCreated) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorTestV2SubscriptionCreated, error) { + event := new(VRFCoordinatorTestV2SubscriptionCreated) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionFundedIterator struct { + Event *VRFCoordinatorTestV2SubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionFunded struct { + SubId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionFundedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionFundedIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionFunded, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionFunded) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorTestV2SubscriptionFunded, error) { + event := new(VRFCoordinatorTestV2SubscriptionFunded) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator struct { + Event *VRFCoordinatorTestV2SubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionOwnerTransferRequested struct { + SubId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionOwnerTransferRequested, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorTestV2SubscriptionOwnerTransferRequested, error) { + event := new(VRFCoordinatorTestV2SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator struct { + Event *VRFCoordinatorTestV2SubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorTestV2SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorTestV2SubscriptionOwnerTransferred struct { + SubId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator{contract: _VRFCoordinatorTestV2.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionOwnerTransferred, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorTestV2.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorTestV2SubscriptionOwnerTransferred) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2Filterer) ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorTestV2SubscriptionOwnerTransferred, error) { + event := new(VRFCoordinatorTestV2SubscriptionOwnerTransferred) + if err := _VRFCoordinatorTestV2.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 +} +type GetFeeConfig struct { + FulfillmentFlatFeeLinkPPMTier1 uint32 + FulfillmentFlatFeeLinkPPMTier2 uint32 + FulfillmentFlatFeeLinkPPMTier3 uint32 + FulfillmentFlatFeeLinkPPMTier4 uint32 + FulfillmentFlatFeeLinkPPMTier5 uint32 + ReqsForTier2 *big.Int + ReqsForTier3 *big.Int + ReqsForTier4 *big.Int + ReqsForTier5 *big.Int +} +type GetSubscription struct { + Balance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinatorTestV2.abi.Events["ConfigSet"].ID: + return _VRFCoordinatorTestV2.ParseConfigSet(log) + case _VRFCoordinatorTestV2.abi.Events["FundsRecovered"].ID: + return _VRFCoordinatorTestV2.ParseFundsRecovered(log) + case _VRFCoordinatorTestV2.abi.Events["OwnershipTransferRequested"].ID: + return _VRFCoordinatorTestV2.ParseOwnershipTransferRequested(log) + case _VRFCoordinatorTestV2.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinatorTestV2.ParseOwnershipTransferred(log) + case _VRFCoordinatorTestV2.abi.Events["ProvingKeyDeregistered"].ID: + return _VRFCoordinatorTestV2.ParseProvingKeyDeregistered(log) + case _VRFCoordinatorTestV2.abi.Events["ProvingKeyRegistered"].ID: + return _VRFCoordinatorTestV2.ParseProvingKeyRegistered(log) + case _VRFCoordinatorTestV2.abi.Events["RandomWordsFulfilled"].ID: + return _VRFCoordinatorTestV2.ParseRandomWordsFulfilled(log) + case _VRFCoordinatorTestV2.abi.Events["RandomWordsRequested"].ID: + return _VRFCoordinatorTestV2.ParseRandomWordsRequested(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionCanceled"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionCanceled(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionConsumerAdded"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionConsumerAdded(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionConsumerRemoved"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionConsumerRemoved(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionCreated"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionCreated(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionFunded"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionFunded(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionOwnerTransferRequested(log) + case _VRFCoordinatorTestV2.abi.Events["SubscriptionOwnerTransferred"].ID: + return _VRFCoordinatorTestV2.ParseSubscriptionOwnerTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorTestV2ConfigSet) Topic() common.Hash { + return common.HexToHash("0xc21e3bd2e0b339d2848f0dd956947a88966c242c0c0c582a33137a5c1ceb5cb2") +} + +func (VRFCoordinatorTestV2FundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (VRFCoordinatorTestV2OwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFCoordinatorTestV2OwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorTestV2ProvingKeyDeregistered) Topic() common.Hash { + return common.HexToHash("0x72be339577868f868798bac2c93e52d6f034fef4689a9848996c14ebb7416c0d") +} + +func (VRFCoordinatorTestV2ProvingKeyRegistered) Topic() common.Hash { + return common.HexToHash("0xe729ae16526293f74ade739043022254f1489f616295a25bf72dfb4511ed73b8") +} + +func (VRFCoordinatorTestV2RandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4") +} + +func (VRFCoordinatorTestV2RandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772") +} + +func (VRFCoordinatorTestV2SubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (VRFCoordinatorTestV2SubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (VRFCoordinatorTestV2SubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (VRFCoordinatorTestV2SubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (VRFCoordinatorTestV2SubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (VRFCoordinatorTestV2SubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (VRFCoordinatorTestV2SubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (_VRFCoordinatorTestV2 *VRFCoordinatorTestV2) Address() common.Address { + return _VRFCoordinatorTestV2.address +} + +type VRFCoordinatorTestV2Interface interface { + BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) + + MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) + + GetCommitment(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetCurrentSubId(opts *bind.CallOpts) (uint64, error) + + GetFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) + + GetFeeConfig(opts *bind.CallOpts) (GetFeeConfig, + + error) + + GetFeeTier(opts *bind.CallOpts, reqCount uint64) (uint32, error) + + GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) + + GetSubscription(opts *bind.CallOpts, subId uint64) (GetSubscription, + + error) + + GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) + + HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PendingRequestExists(opts *bind.CallOpts, subId uint64) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId uint64, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorTestV2RequestCommitment) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64, newOwner common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorTestV2FeeConfig) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorTestV2ConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VRFCoordinatorTestV2ConfigSet, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorTestV2FundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2FundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*VRFCoordinatorTestV2FundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorTestV2OwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorTestV2OwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorTestV2OwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorTestV2OwnershipTransferred, error) + + FilterProvingKeyDeregistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorTestV2ProvingKeyDeregisteredIterator, error) + + WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ProvingKeyDeregistered, oracle []common.Address) (event.Subscription, error) + + ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorTestV2ProvingKeyDeregistered, error) + + FilterProvingKeyRegistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorTestV2ProvingKeyRegisteredIterator, error) + + WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2ProvingKeyRegistered, oracle []common.Address) (event.Subscription, error) + + ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorTestV2ProvingKeyRegistered, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFCoordinatorTestV2RandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2RandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorTestV2RandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFCoordinatorTestV2RandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2RandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorTestV2RandomWordsRequested, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionCanceled, subId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorTestV2SubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionConsumerAdded, subId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorTestV2SubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionConsumerRemoved, subId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorTestV2SubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionCreated, subId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorTestV2SubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionFunded, subId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorTestV2SubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionOwnerTransferRequested, subId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorTestV2SubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorTestV2SubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorTestV2SubscriptionOwnerTransferred, subId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorTestV2SubscriptionOwnerTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_v2/vrf_coordinator_v2.go b/core/gethwrappers/generated/vrf_coordinator_v2/vrf_coordinator_v2.go new file mode 100644 index 00000000..763d9fba --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_v2/vrf_coordinator_v2.go @@ -0,0 +1,3115 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_v2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFCoordinatorV2FeeConfig struct { + FulfillmentFlatFeeLinkPPMTier1 uint32 + FulfillmentFlatFeeLinkPPMTier2 uint32 + FulfillmentFlatFeeLinkPPMTier3 uint32 + FulfillmentFlatFeeLinkPPMTier4 uint32 + FulfillmentFlatFeeLinkPPMTier5 uint32 + ReqsForTier2 *big.Int + ReqsForTier3 *big.Int + ReqsForTier4 *big.Int + ReqsForTier5 *big.Int +} + +type VRFCoordinatorV2RequestCommitment struct { + BlockNum uint64 + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +type VRFProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +var VRFCoordinatorV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"blockhashStore\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkEthFeed\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"BlockhashNotInStore\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectCommitment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"have\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"want\",\"type\":\"uint256\"}],\"name\":\"InsufficientGasForConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"have\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"min\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"max\",\"type\":\"uint16\"}],\"name\":\"InvalidRequestConfirmations\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoCorrespondingRequest\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"NoSuchProvingKey\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"NumWordsTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"ProvingKeyAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"indexed\":false,\"internalType\":\"structVRFCoordinatorV2.FeeConfig\",\"name\":\"feeConfig\",\"type\":\"tuple\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"ProvingKeyDeregistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"}],\"name\":\"ProvingKeyRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BLOCKHASH_STORE\",\"outputs\":[{\"internalType\":\"contractBlockhashStoreInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_NUM_WORDS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_REQUEST_CONFIRMATIONS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"deregisterProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRF.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"internalType\":\"structVRFCoordinatorV2.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"name\":\"getCommitment\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentSubId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFallbackWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFeeConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"}],\"name\":\"getFeeTier\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRequestConfig\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"oracleWithdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"internalType\":\"structVRFCoordinatorV2.FeeConfig\",\"name\":\"feeConfig\",\"type\":\"tuple\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b5060405162005b3b38038062005b3b8339810160408190526200003491620001b1565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000e8565b5050506001600160601b0319606093841b811660805290831b811660a052911b1660c052620001fb565b6001600160a01b038116331415620001435760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001ac57600080fd5b919050565b600080600060608486031215620001c757600080fd5b620001d28462000194565b9250620001e26020850162000194565b9150620001f26040850162000194565b90509250925092565b60805160601c60a05160601c60c05160601c6158d6620002656000396000818161051901526138e70152600081816106030152613e4501526000818161036d015281816114da0152818161237701528181612dae01528181612eea015261350f01526158d66000f3fe608060405234801561001057600080fd5b506004361061025b5760003560e01c80636f64f03f11610145578063ad178361116100bd578063d2f9f9a71161008c578063e72f6e3011610071578063e72f6e30146106e0578063e82ad7d4146106f3578063f2fde38b1461071657600080fd5b8063d2f9f9a7146106ba578063d7ae1d30146106cd57600080fd5b8063ad178361146105fe578063af198b9714610625578063c3f909d414610655578063caf70c4a146106a757600080fd5b80638da5cb5b11610114578063a21a23e4116100f9578063a21a23e4146105c0578063a47c7696146105c8578063a4c0ed36146105eb57600080fd5b80638da5cb5b1461059c5780639f87fad7146105ad57600080fd5b80636f64f03f1461055b5780637341c10c1461056e57806379ba509714610581578063823597401461058957600080fd5b8063356dac71116101d85780635fbbc0d2116101a757806366316d8d1161018c57806366316d8d14610501578063689c45171461051457806369bcdb7d1461053b57600080fd5b80635fbbc0d2146103f357806364d51a2a146104f957600080fd5b8063356dac71146103a757806340d6bb82146103af5780634cb48a54146103cd5780635d3b1d30146103e057600080fd5b806308821d581161022f57806315c48b841161021457806315c48b841461030e578063181f5a77146103295780631b6b6d231461036857600080fd5b806308821d58146102cf57806312b58349146102e257600080fd5b80620122911461026057806302bcc5b61461028057806304c357cb1461029557806306bfa637146102a8575b600080fd5b610268610729565b604051610277939291906153b5565b60405180910390f35b61029361028e3660046151e8565b6107a5565b005b6102936102a3366004615203565b610837565b60055467ffffffffffffffff165b60405167ffffffffffffffff9091168152602001610277565b6102936102dd366004614ef9565b6109eb565b6005546801000000000000000090046bffffffffffffffffffffffff165b604051908152602001610277565b61031660c881565b60405161ffff9091168152602001610277565b604080518082018252601681527f565246436f6f7264696e61746f72563220312e302e30000000000000000000006020820152905161027791906153a2565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b039091168152602001610277565b600a54610300565b6103b86101f481565b60405163ffffffff9091168152602001610277565b6102936103db366004615092565b610bb0565b6103006103ee366004614f6c565b610fa7565b600c546040805163ffffffff80841682526401000000008404811660208301526801000000000000000084048116928201929092526c010000000000000000000000008304821660608201527001000000000000000000000000000000008304909116608082015262ffffff740100000000000000000000000000000000000000008304811660a0830152770100000000000000000000000000000000000000000000008304811660c08301527a0100000000000000000000000000000000000000000000000000008304811660e08301527d01000000000000000000000000000000000000000000000000000000000090920490911661010082015261012001610277565b610316606481565b61029361050f366004614eb1565b611385565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b6103006105493660046151cf565b60009081526009602052604090205490565b610293610569366004614df6565b6115d4565b61029361057c366004615203565b611704565b610293611951565b6102936105973660046151e8565b611a1a565b6000546001600160a01b031661038f565b6102936105bb366004615203565b611be0565b6102b661201f565b6105db6105d63660046151e8565b612202565b6040516102779493929190615553565b6102936105f9366004614e2a565b612325565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b610638610633366004614fca565b61257c565b6040516bffffffffffffffffffffffff9091168152602001610277565b600b546040805161ffff8316815263ffffffff6201000084048116602083015267010000000000000084048116928201929092526b010000000000000000000000909204166060820152608001610277565b6103006106b5366004614f15565b612a16565b6103b86106c83660046151e8565b612a46565b6102936106db366004615203565b612c3b565b6102936106ee366004614ddb565b612d75565b6107066107013660046151e8565b612fb2565b6040519015158152602001610277565b610293610724366004614ddb565b6131d5565b600b546007805460408051602080840282018101909252828152600094859460609461ffff8316946201000090930463ffffffff1693919283919083018282801561079357602002820191906000526020600020905b81548152602001906001019080831161077f575b50505050509050925092509250909192565b6107ad6131e6565b67ffffffffffffffff81166000908152600360205260409020546001600160a01b0316610806576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020546108349082906001600160a01b0316613242565b50565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680610893576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b038216146108e5576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024015b60405180910390fd5b600b546601000000000000900460ff161561092c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff84166000908152600360205260409020600101546001600160a01b038481169116146109e55767ffffffffffffffff841660008181526003602090815260409182902060010180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0388169081179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91015b60405180910390a25b50505050565b6109f36131e6565b604080518082018252600091610a22919084906002908390839080828437600092019190915250612a16915050565b6000818152600660205260409020549091506001600160a01b031680610a77576040517f77f5b84c000000000000000000000000000000000000000000000000000000008152600481018390526024016108dc565b600082815260066020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555b600754811015610b67578260078281548110610aca57610aca615823565b90600052602060002001541415610b55576007805460009190610aef906001906156b1565b81548110610aff57610aff615823565b906000526020600020015490508060078381548110610b2057610b20615823565b6000918252602090912001556007805480610b3d57610b3d6157f4565b60019003818190600052602060002001600090559055505b80610b5f81615721565b915050610aac565b50806001600160a01b03167f72be339577868f868798bac2c93e52d6f034fef4689a9848996c14ebb7416c0d83604051610ba391815260200190565b60405180910390a2505050565b610bb86131e6565b60c861ffff87161115610c0b576040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff871660048201819052602482015260c860448201526064016108dc565b60008213610c48576040517f43d4cf66000000000000000000000000000000000000000000000000000000008152600481018390526024016108dc565b6040805160a0808201835261ffff891680835263ffffffff89811660208086018290526000868801528a831660608088018290528b85166080988901819052600b80547fffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000001690971762010000909502949094177fffffffffffffffffffffffffffffffffff000000000000000000ffffffffffff166701000000000000009092027fffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffff16919091176b010000000000000000000000909302929092179093558651600c80549489015189890151938a0151978a0151968a015160c08b015160e08c01516101008d01519588167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009099169890981764010000000093881693909302929092177fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff1668010000000000000000958716959095027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff16949094176c0100000000000000000000000098861698909802979097177fffffffffffffffffff00000000000000ffffffffffffffffffffffffffffffff1670010000000000000000000000000000000096909416959095027fffffffffffffffffff000000ffffffffffffffffffffffffffffffffffffffff16929092177401000000000000000000000000000000000000000062ffffff92831602177fffffff000000000000ffffffffffffffffffffffffffffffffffffffffffffff1677010000000000000000000000000000000000000000000000958216959095027fffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffffff16949094177a01000000000000000000000000000000000000000000000000000092851692909202919091177cffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167d0100000000000000000000000000000000000000000000000000000000009390911692909202919091178155600a84905590517fc21e3bd2e0b339d2848f0dd956947a88966c242c0c0c582a33137a5c1ceb5cb291610f97918991899189918991899190615414565b60405180910390a1505050505050565b600b546000906601000000000000900460ff1615610ff1576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff85166000908152600360205260409020546001600160a01b031661104a576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260026020908152604080832067ffffffffffffffff808a16855292529091205416806110ba576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff871660048201523360248201526044016108dc565b600b5461ffff90811690861610806110d6575060c861ffff8616115b1561112657600b546040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff8088166004830152909116602482015260c860448201526064016108dc565b600b5463ffffffff620100009091048116908516111561118d57600b546040517ff5d7e01e00000000000000000000000000000000000000000000000000000000815263ffffffff80871660048301526201000090920490911660248201526044016108dc565b6101f463ffffffff841611156111df576040517f47386bec00000000000000000000000000000000000000000000000000000000815263ffffffff841660048201526101f460248201526044016108dc565b60006111ec826001615616565b6040805160208082018c9052338284015267ffffffffffffffff808c16606084015284166080808401919091528351808403909101815260a08301845280519082012060c083018d905260e080840182905284518085039091018152610100909301909352815191012091925081611262613667565b60408051602081019390935282015267ffffffffffffffff8a16606082015263ffffffff8089166080830152871660a08201523360c082015260e00160408051808303601f19018152828252805160209182012060008681526009835283902055848352820183905261ffff8a169082015263ffffffff808916606083015287166080820152339067ffffffffffffffff8b16908c907f63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a97729060a00160405180910390a45033600090815260026020908152604080832067ffffffffffffffff808d16855292529091208054919093167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009091161790915591505095945050505050565b600b546601000000000000900460ff16156113cc576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600860205260409020546bffffffffffffffffffffffff80831691161015611426576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260086020526040812080548392906114539084906bffffffffffffffffffffffff166156c8565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080600560088282829054906101000a90046bffffffffffffffffffffffff166114aa91906156c8565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb83836040518363ffffffff1660e01b81526004016115489291906001600160a01b039290921682526bffffffffffffffffffffffff16602082015260400190565b602060405180830381600087803b15801561156257600080fd5b505af1158015611576573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061159a9190614f31565b6115d0576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050565b6115dc6131e6565b60408051808201825260009161160b919084906002908390839080828437600092019190915250612a16915050565b6000818152600660205260409020549091506001600160a01b031615611660576040517f4a0b8fa7000000000000000000000000000000000000000000000000000000008152600481018290526024016108dc565b600081815260066020908152604080832080547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0388169081179091556007805460018101825594527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688909301849055518381527fe729ae16526293f74ade739043022254f1489f616295a25bf72dfb4511ed73b89101610ba3565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680611760576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b038216146117ad576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff16156117f4576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff84166000908152600360205260409020600201546064141561184b576040517f05a48e0f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600160a01b038316600090815260026020908152604080832067ffffffffffffffff80891685529252909120541615611885576109e5565b6001600160a01b038316600081815260026020818152604080842067ffffffffffffffff8a1680865290835281852080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001908117909155600384528286209094018054948501815585529382902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001685179055905192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e091016109dc565b6001546001600160a01b031633146119ab5760405162461bcd60e51b815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016108dc565b60008054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600b546601000000000000900460ff1615611a61576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020546001600160a01b0316611aba576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff81166000908152600360205260409020600101546001600160a01b03163314611b425767ffffffffffffffff8116600090815260036020526040908190206001015490517fd084e9750000000000000000000000000000000000000000000000000000000081526001600160a01b0390911660048201526024016108dc565b67ffffffffffffffff81166000818152600360209081526040918290208054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821784556001909301805490931690925583516001600160a01b03909116808252928101919091529092917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0910160405180910390a25050565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680611c3c576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b03821614611c89576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff1615611cd0576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611cd984612fb2565b15611d10576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6001600160a01b038316600090815260026020908152604080832067ffffffffffffffff808916855292529091205416611d91576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff851660048201526001600160a01b03841660248201526044016108dc565b67ffffffffffffffff8416600090815260036020908152604080832060020180548251818502810185019093528083529192909190830182828015611dff57602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611de1575b50505050509050600060018251611e1691906156b1565b905060005b8251811015611f8e57856001600160a01b0316838281518110611e4057611e40615823565b60200260200101516001600160a01b03161415611f7c576000838381518110611e6b57611e6b615823565b6020026020010151905080600360008a67ffffffffffffffff1667ffffffffffffffff1681526020019081526020016000206002018381548110611eb157611eb1615823565b600091825260208083209190910180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b03949094169390931790925567ffffffffffffffff8a168152600390915260409020600201805480611f1e57611f1e6157f4565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff000000000000000000000000000000000000000016905501905550611f8e565b80611f8681615721565b915050611e1b565b506001600160a01b038516600081815260026020908152604080832067ffffffffffffffff8b168085529083529281902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b91015b60405180910390a2505050505050565b600b546000906601000000000000900460ff1615612069576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005805467ffffffffffffffff169060006120838361575a565b82546101009290920a67ffffffffffffffff8181021990931691831602179091556005541690506000806040519080825280602002602001820160405280156120d6578160200160208202803683370190505b506040805180820182526000808252602080830182815267ffffffffffffffff888116808552600484528685209551865493516bffffffffffffffffffffffff9091167fffffffffffffffffffffffff0000000000000000000000000000000000000000948516176c01000000000000000000000000919093160291909117909455845160608101865233815280830184815281870188815295855260038452959093208351815483166001600160a01b03918216178255955160018201805490931696169590951790559151805194955090936121ba9260028501920190614b35565b505060405133815267ffffffffffffffff841691507f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a250905090565b67ffffffffffffffff8116600090815260036020526040812054819081906060906001600160a01b0316612262576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff80861660009081526004602090815260408083205460038352928190208054600290910180548351818602810186019094528084526bffffffffffffffffffffffff8616966c01000000000000000000000000909604909516946001600160a01b0390921693909291839183018282801561230f57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116122f1575b5050505050905093509350935093509193509193565b600b546601000000000000900460ff161561236c576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016146123ce576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114612408576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612416828401846151e8565b67ffffffffffffffff81166000908152600360205260409020549091506001600160a01b0316612472576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8116600090815260046020526040812080546bffffffffffffffffffffffff16918691906124a98385615639565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555084600560088282829054906101000a90046bffffffffffffffffffffffff166125009190615639565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f882878461256791906155fe565b6040805192835260208301919091520161200f565b600b546000906601000000000000900460ff16156125c6576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a905060008060006125da87876136f7565b9250925092506000866060015163ffffffff1667ffffffffffffffff81111561260557612605615852565b60405190808252806020026020018201604052801561262e578160200160208202803683370190505b50905060005b876060015163ffffffff168110156126a25760408051602081018590529081018290526060016040516020818303038152906040528051906020012060001c82828151811061268557612685615823565b60209081029190910101528061269a81615721565b915050612634565b506000838152600960205260408082208290555181907f1fe543e300000000000000000000000000000000000000000000000000000000906126ea9087908690602401615505565b60408051601f198184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090941693909317909252600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff166601000000000000179055908a015160808b015191925060009161279a9163ffffffff169084613a05565b600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff1690556020808c01805167ffffffffffffffff9081166000908152600490935260408084205492518216845290922080549394506c01000000000000000000000000918290048316936001939192600c9261281e928692900416615616565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555060006128758a600b600001600b9054906101000a900463ffffffff1663ffffffff1661286f85612a46565b3a613a53565b6020808e015167ffffffffffffffff166000908152600490915260409020549091506bffffffffffffffffffffffff808316911610156128e1576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808d015167ffffffffffffffff166000908152600490915260408120805483929061291d9084906bffffffffffffffffffffffff166156c8565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008b8152600660209081526040808320546001600160a01b03168352600890915281208054859450909261297991859116615639565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550877f7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e48883866040516129fc939291909283526bffffffffffffffffffffffff9190911660208301521515604082015260600190565b60405180910390a299505050505050505050505b92915050565b600081604051602001612a299190615394565b604051602081830303815290604052805190602001209050919050565b6040805161012081018252600c5463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c010000000000000000000000008104831660608301527001000000000000000000000000000000008104909216608082015262ffffff740100000000000000000000000000000000000000008304811660a08301819052770100000000000000000000000000000000000000000000008404821660c08401527a0100000000000000000000000000000000000000000000000000008404821660e08401527d0100000000000000000000000000000000000000000000000000000000009093041661010082015260009167ffffffffffffffff841611612b64575192915050565b8267ffffffffffffffff168160a0015162ffffff16108015612b9957508060c0015162ffffff168367ffffffffffffffff1611155b15612ba8576020015192915050565b8267ffffffffffffffff168160c0015162ffffff16108015612bdd57508060e0015162ffffff168367ffffffffffffffff1611155b15612bec576040015192915050565b8267ffffffffffffffff168160e0015162ffffff16108015612c22575080610100015162ffffff168367ffffffffffffffff1611155b15612c31576060015192915050565b6080015192915050565b67ffffffffffffffff821660009081526003602052604090205482906001600160a01b031680612c97576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336001600160a01b03821614612ce4576040517fd8a3fb520000000000000000000000000000000000000000000000000000000081526001600160a01b03821660048201526024016108dc565b600b546601000000000000900460ff1615612d2b576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612d3484612fb2565b15612d6b576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109e58484613242565b612d7d6131e6565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316906370a082319060240160206040518083038186803b158015612df857600080fd5b505afa158015612e0c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612e309190614f53565b6005549091506801000000000000000090046bffffffffffffffffffffffff1681811115612e94576040517fa99da30200000000000000000000000000000000000000000000000000000000815260048101829052602481018390526044016108dc565b81811015612fad576000612ea882846156b1565b6040517fa9059cbb0000000000000000000000000000000000000000000000000000000081526001600160a01b038681166004830152602482018390529192507f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b158015612f3057600080fd5b505af1158015612f44573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612f689190614f31565b50604080516001600160a01b0386168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600910160405180910390a1505b505050565b67ffffffffffffffff81166000908152600360209081526040808320815160608101835281546001600160a01b039081168252600183015416818501526002820180548451818702810187018652818152879693958601939092919083018282801561304757602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311613029575b505050505081525050905060005b8160400151518110156131cb5760005b6007548110156131b85760006131816007838154811061308757613087615823565b9060005260206000200154856040015185815181106130a8576130a8615823565b60200260200101518860026000896040015189815181106130cb576130cb615823565b6020908102919091018101516001600160a01b03168252818101929092526040908101600090812067ffffffffffffffff808f16835293522054166040805160208082018790526001600160a01b03959095168183015267ffffffffffffffff9384166060820152919092166080808301919091528251808303909101815260a08201835280519084012060c082019490945260e080820185905282518083039091018152610100909101909152805191012091565b50600081815260096020526040902054909150156131a55750600195945050505050565b50806131b081615721565b915050613065565b50806131c381615721565b915050613055565b5060009392505050565b6131dd6131e6565b61083481613bab565b6000546001600160a01b031633146132405760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016108dc565b565b600b546601000000000000900460ff1615613289576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff82166000908152600360209081526040808320815160608101835281546001600160a01b0390811682526001830154168185015260028201805484518187028101870186528181529295939486019383018282801561331a57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116132fc575b5050509190925250505067ffffffffffffffff80851660009081526004602090815260408083208151808301909252546bffffffffffffffffffffffff81168083526c01000000000000000000000000909104909416918101919091529293505b8360400151518110156134145760026000856040015183815181106133a2576133a2615823565b6020908102919091018101516001600160a01b03168252818101929092526040908101600090812067ffffffffffffffff8a168252909252902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690558061340c81615721565b91505061337b565b5067ffffffffffffffff8516600090815260036020526040812080547fffffffffffffffffffffffff0000000000000000000000000000000000000000908116825560018201805490911690559061346f6002830182614bb2565b505067ffffffffffffffff8516600090815260046020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055600580548291906008906134df9084906801000000000000000090046bffffffffffffffffffffffff166156c8565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb85836bffffffffffffffffffffffff166040518363ffffffff1660e01b815260040161357d9291906001600160a01b03929092168252602082015260400190565b602060405180830381600087803b15801561359757600080fd5b505af11580156135ab573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906135cf9190614f31565b613605576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080516001600160a01b03861681526bffffffffffffffffffffffff8316602082015267ffffffffffffffff8716917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815910160405180910390a25050505050565b60004661367381613c6d565b156136f05760646001600160a01b031663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b1580156136b257600080fd5b505afa1580156136c6573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906136ea9190614f53565b91505090565b4391505090565b60008060006137098560000151612a16565b6000818152600660205260409020549093506001600160a01b03168061375e576040517f77f5b84c000000000000000000000000000000000000000000000000000000008152600481018590526024016108dc565b608086015160405161377d918691602001918252602082015260400190565b60408051601f19818403018152918152815160209283012060008181526009909352912054909350806137dc576040517f3688124a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b85516020808801516040808a015160608b015160808c01519251613848968b96909594910195865267ffffffffffffffff948516602087015292909316604085015263ffffffff90811660608501529190911660808301526001600160a01b031660a082015260c00190565b604051602081830303815290604052805190602001208114613896576040517fd529142c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006138a58760000151613c90565b9050806139b15786516040517fe9413d3800000000000000000000000000000000000000000000000000000000815267ffffffffffffffff90911660048201527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169063e9413d389060240160206040518083038186803b15801561393157600080fd5b505afa158015613945573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906139699190614f53565b9050806139b15786516040517f175dadad00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff90911660048201526024016108dc565b60008860800151826040516020016139d3929190918252602082015260400190565b6040516020818303038152906040528051906020012060001c90506139f88982613d8f565b9450505050509250925092565b60005a611388811015613a1757600080fd5b611388810390508460408204820311613a2f57600080fd5b50823b613a3b57600080fd5b60008083516020850160008789f190505b9392505050565b600080613a5e613dfa565b905060008113613a9d576040517f43d4cf66000000000000000000000000000000000000000000000000000000008152600481018290526024016108dc565b6000613adf6000368080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250613f0192505050565b9050600082825a613af08b8b6155fe565b613afa91906156b1565b613b049088615674565b613b0e91906155fe565b613b2090670de0b6b3a7640000615674565b613b2a9190615660565b90506000613b4363ffffffff881664e8d4a51000615674565b9050613b5b816b033b2e3c9fd0803ce80000006156b1565b821115613b94576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b613b9e81836155fe565b9998505050505050505050565b6001600160a01b038116331415613c045760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016108dc565b600180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600061a4b1821480613c81575062066eed82145b80612a1057505062066eee1490565b600046613c9c81613c6d565b15613d7f576101008367ffffffffffffffff16613cb7613667565b613cc191906156b1565b1180613cde5750613cd0613667565b8367ffffffffffffffff1610155b15613cec5750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a82906024015b60206040518083038186803b158015613d4757600080fd5b505afa158015613d5b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613a4c9190614f53565b505067ffffffffffffffff164090565b6000613dc38360000151846020015185604001518660600151868860a001518960c001518a60e001518b6101000151613fdc565b60038360200151604051602001613ddb9291906154f1565b60408051601f1981840301815291905280516020909101209392505050565b600b54604080517ffeaf968c0000000000000000000000000000000000000000000000000000000081529051600092670100000000000000900463ffffffff169182151591849182917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169163feaf968c9160048083019260a0929190829003018186803b158015613e9357600080fd5b505afa158015613ea7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613ecb919061522d565b509450909250849150508015613eef5750613ee682426156b1565b8463ffffffff16105b15613ef95750600a545b949350505050565b600046613f0d81613c6d565b15613f4c57606c6001600160a01b031663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b158015613d4757600080fd5b613f5581614217565b15613fd35773420000000000000000000000000000000000000f6001600160a01b03166349948e0e8460405180608001604052806048815260200161588260489139604051602001613fa89291906152d2565b6040516020818303038152906040526040518263ffffffff1660e01b8152600401613d2f91906153a2565b50600092915050565b613fe589614251565b6140315760405162461bcd60e51b815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e20637572766500000000000060448201526064016108dc565b61403a88614251565b6140865760405162461bcd60e51b815260206004820152601560248201527f67616d6d61206973206e6f74206f6e206375727665000000000000000000000060448201526064016108dc565b61408f83614251565b6140db5760405162461bcd60e51b815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e20637572766500000060448201526064016108dc565b6140e482614251565b6141305760405162461bcd60e51b815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e2063757276650000000060448201526064016108dc565b61413c878a888761432a565b6141885760405162461bcd60e51b815260206004820152601960248201527f6164647228632a706b2b732a6729213d5f755769746e6573730000000000000060448201526064016108dc565b60006141948a8761447b565b905060006141a7898b878b8689896144df565b905060006141b8838d8d8a8661460b565b9050808a146142095760405162461bcd60e51b815260206004820152600d60248201527f696e76616c69642070726f6f660000000000000000000000000000000000000060448201526064016108dc565b505050505050505050505050565b6000600a82148061422957506101a482145b80614236575062aa37dc82145b80614242575061210582145b80612a1057505062014a331490565b80516000906401000003d019116142aa5760405162461bcd60e51b815260206004820152601260248201527f696e76616c696420782d6f7264696e617465000000000000000000000000000060448201526064016108dc565b60208201516401000003d019116143035760405162461bcd60e51b815260206004820152601260248201527f696e76616c696420792d6f7264696e617465000000000000000000000000000060448201526064016108dc565b60208201516401000003d0199080096143238360005b602002015161464b565b1492915050565b60006001600160a01b0382166143825760405162461bcd60e51b815260206004820152600b60248201527f626164207769746e65737300000000000000000000000000000000000000000060448201526064016108dc565b60208401516000906001161561439957601c61439c565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141918203925060009190890987516040805160008082526020820180845287905260ff88169282019290925260608101929092526080820183905291925060019060a0016020604051602081039080840390855afa158015614453573d6000803e3d6000fd5b5050604051601f1901516001600160a01b039081169088161495505050505050949350505050565b614483614bd0565b6144b06001848460405160200161449c93929190615373565b60405160208183030381529060405261466f565b90505b6144bc81614251565b612a105780516040805160208101929092526144d8910161449c565b90506144b3565b6144e7614bd0565b825186516401000003d01990819006910614156145465760405162461bcd60e51b815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e6374000060448201526064016108dc565b6145518789886146be565b61459d5760405162461bcd60e51b815260206004820152601660248201527f4669727374206d756c20636865636b206661696c65640000000000000000000060448201526064016108dc565b6145a88486856146be565b6145f45760405162461bcd60e51b815260206004820152601760248201527f5365636f6e64206d756c20636865636b206661696c656400000000000000000060448201526064016108dc565b6145ff868484614806565b98975050505050505050565b60006002868686858760405160200161462996959493929190615301565b60408051601f1981840301815291905280516020909101209695505050505050565b6000806401000003d01980848509840990506401000003d019600782089392505050565b614677614bd0565b614680826148cd565b8152614695614690826000614319565b614908565b6020820181905260029006600114156146b9576020810180516401000003d0190390525b919050565b60008261470d5760405162461bcd60e51b815260206004820152600b60248201527f7a65726f207363616c617200000000000000000000000000000000000000000060448201526064016108dc565b8351602085015160009061472390600290615782565b1561472f57601c614732565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418387096040805160008082526020820180845281905260ff86169282019290925260608101869052608081018390529192509060019060a0016020604051602081039080840390855afa1580156147b2573d6000803e3d6000fd5b5050506020604051035190506000866040516020016147d191906152c0565b60408051601f1981840301815291905280516020909101206001600160a01b0392831692169190911498975050505050505050565b61480e614bd0565b83516020808601518551918601516000938493849361482f93909190614928565b919450925090506401000003d01985820960011461488f5760405162461bcd60e51b815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a0000000000000060448201526064016108dc565b60405180604001604052806401000003d019806148ae576148ae6157c5565b87860981526020016401000003d0198785099052979650505050505050565b805160208201205b6401000003d01981106146b9576040805160208082019390935281518082038401815290820190915280519101206148d5565b6000612a108260026149216401000003d01960016155fe565b901c614a08565b60008080600180826401000003d019896401000003d019038808905060006401000003d0198b6401000003d019038a089050600061496883838585614ac8565b909850905061497988828e88614aec565b909850905061498a88828c87614aec565b9098509050600061499d8d878b85614aec565b90985090506149ae88828686614ac8565b90985090506149bf88828e89614aec565b90985090508181146149f4576401000003d019818a0998506401000003d01982890997506401000003d01981830996506149f8565b8196505b5050505050509450945094915050565b600080614a13614bee565b6020808252818101819052604082015260608101859052608081018490526401000003d01960a0820152614a45614c0c565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa925082614abe5760405162461bcd60e51b815260206004820152601260248201527f6269674d6f64457870206661696c75726521000000000000000000000000000060448201526064016108dc565b5195945050505050565b6000806401000003d0198487096401000003d0198487099097909650945050505050565b600080806401000003d019878509905060006401000003d01987876401000003d019030990506401000003d0198183086401000003d01986890990999098509650505050505050565b828054828255906000526020600020908101928215614ba2579160200282015b82811115614ba257825182547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b03909116178255602090920191600190910190614b55565b50614bae929150614c2a565b5090565b50805460008255906000526020600020908101906108349190614c2a565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b5b80821115614bae5760008155600101614c2b565b80356001600160a01b03811681146146b957600080fd5b8060408101831015612a1057600080fd5b600082601f830112614c7857600080fd5b6040516040810181811067ffffffffffffffff82111715614c9b57614c9b615852565b8060405250808385604086011115614cb257600080fd5b60005b6002811015614cd4578135835260209283019290910190600101614cb5565b509195945050505050565b600060a08284031215614cf157600080fd5b60405160a0810181811067ffffffffffffffff82111715614d1457614d14615852565b604052905080614d2383614da9565b8152614d3160208401614da9565b6020820152614d4260408401614d95565b6040820152614d5360608401614d95565b6060820152614d6460808401614c3f565b60808201525092915050565b803561ffff811681146146b957600080fd5b803562ffffff811681146146b957600080fd5b803563ffffffff811681146146b957600080fd5b803567ffffffffffffffff811681146146b957600080fd5b805169ffffffffffffffffffff811681146146b957600080fd5b600060208284031215614ded57600080fd5b613a4c82614c3f565b60008060608385031215614e0957600080fd5b614e1283614c3f565b9150614e218460208501614c56565b90509250929050565b60008060008060608587031215614e4057600080fd5b614e4985614c3f565b935060208501359250604085013567ffffffffffffffff80821115614e6d57600080fd5b818701915087601f830112614e8157600080fd5b813581811115614e9057600080fd5b886020828501011115614ea257600080fd5b95989497505060200194505050565b60008060408385031215614ec457600080fd5b614ecd83614c3f565b915060208301356bffffffffffffffffffffffff81168114614eee57600080fd5b809150509250929050565b600060408284031215614f0b57600080fd5b613a4c8383614c56565b600060408284031215614f2757600080fd5b613a4c8383614c67565b600060208284031215614f4357600080fd5b81518015158114613a4c57600080fd5b600060208284031215614f6557600080fd5b5051919050565b600080600080600060a08688031215614f8457600080fd5b85359450614f9460208701614da9565b9350614fa260408701614d70565b9250614fb060608701614d95565b9150614fbe60808701614d95565b90509295509295909350565b600080828403610240811215614fdf57600080fd5b6101a080821215614fef57600080fd5b614ff76155d4565b91506150038686614c67565b82526150128660408701614c67565b60208301526080850135604083015260a0850135606083015260c0850135608083015261504160e08601614c3f565b60a083015261010061505587828801614c67565b60c0840152615068876101408801614c67565b60e0840152610180860135818401525081935061508786828701614cdf565b925050509250929050565b6000806000806000808688036101c08112156150ad57600080fd5b6150b688614d70565b96506150c460208901614d95565b95506150d260408901614d95565b94506150e060608901614d95565b935060808801359250610120807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff608301121561511b57600080fd5b6151236155d4565b915061513160a08a01614d95565b825261513f60c08a01614d95565b602083015261515060e08a01614d95565b6040830152610100615163818b01614d95565b6060840152615173828b01614d95565b60808401526151856101408b01614d82565b60a08401526151976101608b01614d82565b60c08401526151a96101808b01614d82565b60e08401526151bb6101a08b01614d82565b818401525050809150509295509295509295565b6000602082840312156151e157600080fd5b5035919050565b6000602082840312156151fa57600080fd5b613a4c82614da9565b6000806040838503121561521657600080fd5b61521f83614da9565b9150614e2160208401614c3f565b600080600080600060a0868803121561524557600080fd5b61524e86614dc1565b9450602086015193506040860151925060608601519150614fbe60808701614dc1565b8060005b60028110156109e5578151845260209384019390910190600101615275565b600081518084526152ac8160208601602086016156f5565b601f01601f19169290920160200192915050565b6152ca8183615271565b604001919050565b600083516152e48184602088016156f5565b8351908301906152f88183602088016156f5565b01949350505050565b8681526153116020820187615271565b61531e6060820186615271565b61532b60a0820185615271565b61533860e0820184615271565b60609190911b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166101208201526101340195945050505050565b8381526153836020820184615271565b606081019190915260800192915050565b60408101612a108284615271565b602081526000613a4c6020830184615294565b60006060820161ffff86168352602063ffffffff86168185015260606040850152818551808452608086019150828701935060005b81811015615406578451835293830193918301916001016153ea565b509098975050505050505050565b60006101c08201905061ffff8816825263ffffffff808816602084015280871660408401528086166060840152846080840152835481811660a085015261546860c08501838360201c1663ffffffff169052565b61547f60e08501838360401c1663ffffffff169052565b6154976101008501838360601c1663ffffffff169052565b6154af6101208501838360801c1663ffffffff169052565b62ffffff60a082901c811661014086015260b882901c811661016086015260d082901c1661018085015260e81c6101a090930192909252979650505050505050565b82815260608101613a4c6020830184615271565b6000604082018483526020604081850152818551808452606086019150828701935060005b818110156155465784518352938301939183019160010161552a565b5090979650505050505050565b6000608082016bffffffffffffffffffffffff87168352602067ffffffffffffffff8716818501526001600160a01b0380871660408601526080606086015282865180855260a087019150838801945060005b818110156155c45785518416835294840194918401916001016155a6565b50909a9950505050505050505050565b604051610120810167ffffffffffffffff811182821017156155f8576155f8615852565b60405290565b6000821982111561561157615611615796565b500190565b600067ffffffffffffffff8083168185168083038211156152f8576152f8615796565b60006bffffffffffffffffffffffff8083168185168083038211156152f8576152f8615796565b60008261566f5761566f6157c5565b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156156ac576156ac615796565b500290565b6000828210156156c3576156c3615796565b500390565b60006bffffffffffffffffffffffff838116908316818110156156ed576156ed615796565b039392505050565b60005b838110156157105781810151838201526020016156f8565b838111156109e55750506000910152565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561575357615753615796565b5060010190565b600067ffffffffffffffff8083168181141561577857615778615796565b6001019392505050565b600082615791576157916157c5565b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var VRFCoordinatorV2ABI = VRFCoordinatorV2MetaData.ABI + +var VRFCoordinatorV2Bin = VRFCoordinatorV2MetaData.Bin + +func DeployVRFCoordinatorV2(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, blockhashStore common.Address, linkEthFeed common.Address) (common.Address, *types.Transaction, *VRFCoordinatorV2, error) { + parsed, err := VRFCoordinatorV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorV2Bin), backend, link, blockhashStore, linkEthFeed) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorV2{address: address, abi: *parsed, VRFCoordinatorV2Caller: VRFCoordinatorV2Caller{contract: contract}, VRFCoordinatorV2Transactor: VRFCoordinatorV2Transactor{contract: contract}, VRFCoordinatorV2Filterer: VRFCoordinatorV2Filterer{contract: contract}}, nil +} + +type VRFCoordinatorV2 struct { + address common.Address + abi abi.ABI + VRFCoordinatorV2Caller + VRFCoordinatorV2Transactor + VRFCoordinatorV2Filterer +} + +type VRFCoordinatorV2Caller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2Transactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2Filterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2Session struct { + Contract *VRFCoordinatorV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2CallerSession struct { + Contract *VRFCoordinatorV2Caller + CallOpts bind.CallOpts +} + +type VRFCoordinatorV2TransactorSession struct { + Contract *VRFCoordinatorV2Transactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2Raw struct { + Contract *VRFCoordinatorV2 +} + +type VRFCoordinatorV2CallerRaw struct { + Contract *VRFCoordinatorV2Caller +} + +type VRFCoordinatorV2TransactorRaw struct { + Contract *VRFCoordinatorV2Transactor +} + +func NewVRFCoordinatorV2(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorV2, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2{address: address, abi: abi, VRFCoordinatorV2Caller: VRFCoordinatorV2Caller{contract: contract}, VRFCoordinatorV2Transactor: VRFCoordinatorV2Transactor{contract: contract}, VRFCoordinatorV2Filterer: VRFCoordinatorV2Filterer{contract: contract}}, nil +} + +func NewVRFCoordinatorV2Caller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorV2Caller, error) { + contract, err := bindVRFCoordinatorV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2Caller{contract: contract}, nil +} + +func NewVRFCoordinatorV2Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorV2Transactor, error) { + contract, err := bindVRFCoordinatorV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2Transactor{contract: contract}, nil +} + +func NewVRFCoordinatorV2Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorV2Filterer, error) { + contract, err := bindVRFCoordinatorV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2Filterer{contract: contract}, nil +} + +func bindVRFCoordinatorV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2.Contract.VRFCoordinatorV2Caller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.VRFCoordinatorV2Transactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.VRFCoordinatorV2Transactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "BLOCKHASH_STORE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV2.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV2.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) PLI() (common.Address, error) { + return _VRFCoordinatorV2.Contract.PLI(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) PLI() (common.Address, error) { + return _VRFCoordinatorV2.Contract.PLI(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) PLIETHFEED() (common.Address, error) { + return _VRFCoordinatorV2.Contract.PLIETHFEED(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) PLIETHFEED() (common.Address, error) { + return _VRFCoordinatorV2.Contract.PLIETHFEED(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV2.Contract.MAXCONSUMERS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV2.Contract.MAXCONSUMERS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "MAX_NUM_WORDS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV2.Contract.MAXNUMWORDS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV2.Contract.MAXNUMWORDS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "MAX_REQUEST_CONFIRMATIONS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV2.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV2.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetCommitment(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getCommitment", requestId) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetCommitment(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2.Contract.GetCommitment(&_VRFCoordinatorV2.CallOpts, requestId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetCommitment(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2.Contract.GetCommitment(&_VRFCoordinatorV2.CallOpts, requestId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MinimumRequestConfirmations = *abi.ConvertType(out[0], new(uint16)).(*uint16) + outstruct.MaxGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.StalenessSeconds = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[3], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetConfig() (GetConfig, + + error) { + return _VRFCoordinatorV2.Contract.GetConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetConfig() (GetConfig, + + error) { + return _VRFCoordinatorV2.Contract.GetConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetCurrentSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getCurrentSubId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetCurrentSubId() (uint64, error) { + return _VRFCoordinatorV2.Contract.GetCurrentSubId(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetCurrentSubId() (uint64, error) { + return _VRFCoordinatorV2.Contract.GetCurrentSubId(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getFallbackWeiPerUnitLink") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorV2.Contract.GetFallbackWeiPerUnitLink(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorV2.Contract.GetFallbackWeiPerUnitLink(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetFeeConfig(opts *bind.CallOpts) (GetFeeConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getFeeConfig") + + outstruct := new(GetFeeConfig) + if err != nil { + return *outstruct, err + } + + outstruct.FulfillmentFlatFeeLinkPPMTier1 = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier2 = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier3 = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier4 = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPMTier5 = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.ReqsForTier2 = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier3 = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier4 = *abi.ConvertType(out[7], new(*big.Int)).(**big.Int) + outstruct.ReqsForTier5 = *abi.ConvertType(out[8], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetFeeConfig() (GetFeeConfig, + + error) { + return _VRFCoordinatorV2.Contract.GetFeeConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetFeeConfig() (GetFeeConfig, + + error) { + return _VRFCoordinatorV2.Contract.GetFeeConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetFeeTier(opts *bind.CallOpts, reqCount uint64) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getFeeTier", reqCount) + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetFeeTier(reqCount uint64) (uint32, error) { + return _VRFCoordinatorV2.Contract.GetFeeTier(&_VRFCoordinatorV2.CallOpts, reqCount) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetFeeTier(reqCount uint64) (uint32, error) { + return _VRFCoordinatorV2.Contract.GetFeeTier(&_VRFCoordinatorV2.CallOpts, reqCount) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getRequestConfig") + + if err != nil { + return *new(uint16), *new(uint32), *new([][32]byte), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + out1 := *abi.ConvertType(out[1], new(uint32)).(*uint32) + out2 := *abi.ConvertType(out[2], new([][32]byte)).(*[][32]byte) + + return out0, out1, out2, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorV2.Contract.GetRequestConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorV2.Contract.GetRequestConfig(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetSubscription(opts *bind.CallOpts, subId uint64) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[1], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[2], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetSubscription(subId uint64) (GetSubscription, + + error) { + return _VRFCoordinatorV2.Contract.GetSubscription(&_VRFCoordinatorV2.CallOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetSubscription(subId uint64) (GetSubscription, + + error) { + return _VRFCoordinatorV2.Contract.GetSubscription(&_VRFCoordinatorV2.CallOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "getTotalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) GetTotalBalance() (*big.Int, error) { + return _VRFCoordinatorV2.Contract.GetTotalBalance(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) GetTotalBalance() (*big.Int, error) { + return _VRFCoordinatorV2.Contract.GetTotalBalance(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "hashOfKey", publicKey) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV2.Contract.HashOfKey(&_VRFCoordinatorV2.CallOpts, publicKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV2.Contract.HashOfKey(&_VRFCoordinatorV2.CallOpts, publicKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) Owner() (common.Address, error) { + return _VRFCoordinatorV2.Contract.Owner(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) Owner() (common.Address, error) { + return _VRFCoordinatorV2.Contract.Owner(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) PendingRequestExists(opts *bind.CallOpts, subId uint64) (bool, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "pendingRequestExists", subId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) PendingRequestExists(subId uint64) (bool, error) { + return _VRFCoordinatorV2.Contract.PendingRequestExists(&_VRFCoordinatorV2.CallOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) PendingRequestExists(subId uint64) (bool, error) { + return _VRFCoordinatorV2.Contract.PendingRequestExists(&_VRFCoordinatorV2.CallOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Caller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFCoordinatorV2.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) TypeAndVersion() (string, error) { + return _VRFCoordinatorV2.Contract.TypeAndVersion(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2CallerSession) TypeAndVersion() (string, error) { + return _VRFCoordinatorV2.Contract.TypeAndVersion(&_VRFCoordinatorV2.CallOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AcceptOwnership(&_VRFCoordinatorV2.TransactOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AcceptOwnership(&_VRFCoordinatorV2.TransactOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV2.TransactOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV2.TransactOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) AddConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) AddConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AddConsumer(&_VRFCoordinatorV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) AddConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.AddConsumer(&_VRFCoordinatorV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) CancelSubscription(opts *bind.TransactOpts, subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) CancelSubscription(subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.CancelSubscription(&_VRFCoordinatorV2.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) CancelSubscription(subId uint64, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.CancelSubscription(&_VRFCoordinatorV2.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "createSubscription") +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.CreateSubscription(&_VRFCoordinatorV2.TransactOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.CreateSubscription(&_VRFCoordinatorV2.TransactOpts) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "deregisterProvingKey", publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.DeregisterProvingKey(&_VRFCoordinatorV2.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.DeregisterProvingKey(&_VRFCoordinatorV2.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "fulfillRandomWords", proof, rc) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.FulfillRandomWords(&_VRFCoordinatorV2.TransactOpts, proof, rc) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV2RequestCommitment) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.FulfillRandomWords(&_VRFCoordinatorV2.TransactOpts, proof, rc) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OnTokenTransfer(&_VRFCoordinatorV2.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OnTokenTransfer(&_VRFCoordinatorV2.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "oracleWithdraw", recipient, amount) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OracleWithdraw(&_VRFCoordinatorV2.TransactOpts, recipient, amount) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) OracleWithdraw(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OracleWithdraw(&_VRFCoordinatorV2.TransactOpts, recipient, amount) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "ownerCancelSubscription", subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OwnerCancelSubscription(&_VRFCoordinatorV2.TransactOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.OwnerCancelSubscription(&_VRFCoordinatorV2.TransactOpts, subId) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "recoverFunds", to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RecoverFunds(&_VRFCoordinatorV2.TransactOpts, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RecoverFunds(&_VRFCoordinatorV2.TransactOpts, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "registerProvingKey", oracle, publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RegisterProvingKey(&_VRFCoordinatorV2.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RegisterProvingKey(&_VRFCoordinatorV2.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) RemoveConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) RemoveConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RemoveConsumer(&_VRFCoordinatorV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) RemoveConsumer(subId uint64, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RemoveConsumer(&_VRFCoordinatorV2.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "requestRandomWords", keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) RequestRandomWords(keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RequestRandomWords(&_VRFCoordinatorV2.TransactOpts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) RequestRandomWords(keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RequestRandomWords(&_VRFCoordinatorV2.TransactOpts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) RequestSubscriptionOwnerTransfer(subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV2.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) RequestSubscriptionOwnerTransfer(subId uint64, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV2.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "setConfig", minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.SetConfig(&_VRFCoordinatorV2.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorV2FeeConfig) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.SetConfig(&_VRFCoordinatorV2.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Transactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Session) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.TransferOwnership(&_VRFCoordinatorV2.TransactOpts, to) +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2TransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2.Contract.TransferOwnership(&_VRFCoordinatorV2.TransactOpts, to) +} + +type VRFCoordinatorV2ConfigSetIterator struct { + Event *VRFCoordinatorV2ConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2ConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2ConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2ConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2ConfigSet struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FallbackWeiPerUnitLink *big.Int + FeeConfig VRFCoordinatorV2FeeConfig + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV2ConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2ConfigSetIterator{contract: _VRFCoordinatorV2.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2ConfigSet) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseConfigSet(log types.Log) (*VRFCoordinatorV2ConfigSet, error) { + event := new(VRFCoordinatorV2ConfigSet) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2FundsRecoveredIterator struct { + Event *VRFCoordinatorV2FundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2FundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2FundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2FundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2FundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2FundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2FundsRecoveredIterator{contract: _VRFCoordinatorV2.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2FundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2FundsRecovered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseFundsRecovered(log types.Log) (*VRFCoordinatorV2FundsRecovered, error) { + event := new(VRFCoordinatorV2FundsRecovered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2OwnershipTransferRequestedIterator struct { + Event *VRFCoordinatorV2OwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2OwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2OwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2OwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2OwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2OwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2OwnershipTransferRequestedIterator{contract: _VRFCoordinatorV2.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2OwnershipTransferRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV2OwnershipTransferRequested, error) { + event := new(VRFCoordinatorV2OwnershipTransferRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2OwnershipTransferredIterator struct { + Event *VRFCoordinatorV2OwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2OwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2OwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2OwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2OwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2OwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2OwnershipTransferredIterator{contract: _VRFCoordinatorV2.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2OwnershipTransferred) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV2OwnershipTransferred, error) { + event := new(VRFCoordinatorV2OwnershipTransferred) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2ProvingKeyDeregisteredIterator struct { + Event *VRFCoordinatorV2ProvingKeyDeregistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2ProvingKeyDeregisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2ProvingKeyDeregisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2ProvingKeyDeregisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2ProvingKeyDeregistered struct { + KeyHash [32]byte + Oracle common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterProvingKeyDeregistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorV2ProvingKeyDeregisteredIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "ProvingKeyDeregistered", oracleRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2ProvingKeyDeregisteredIterator{contract: _VRFCoordinatorV2.contract, event: "ProvingKeyDeregistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ProvingKeyDeregistered, oracle []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "ProvingKeyDeregistered", oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2ProvingKeyDeregistered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorV2ProvingKeyDeregistered, error) { + event := new(VRFCoordinatorV2ProvingKeyDeregistered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2ProvingKeyRegisteredIterator struct { + Event *VRFCoordinatorV2ProvingKeyRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2ProvingKeyRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2ProvingKeyRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2ProvingKeyRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2ProvingKeyRegistered struct { + KeyHash [32]byte + Oracle common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterProvingKeyRegistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorV2ProvingKeyRegisteredIterator, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "ProvingKeyRegistered", oracleRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2ProvingKeyRegisteredIterator{contract: _VRFCoordinatorV2.contract, event: "ProvingKeyRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ProvingKeyRegistered, oracle []common.Address) (event.Subscription, error) { + + var oracleRule []interface{} + for _, oracleItem := range oracle { + oracleRule = append(oracleRule, oracleItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "ProvingKeyRegistered", oracleRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2ProvingKeyRegistered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV2ProvingKeyRegistered, error) { + event := new(VRFCoordinatorV2ProvingKeyRegistered) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2RandomWordsFulfilledIterator struct { + Event *VRFCoordinatorV2RandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2RandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2RandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2RandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2RandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + Payment *big.Int + Success bool + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFCoordinatorV2RandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2RandomWordsFulfilledIterator{contract: _VRFCoordinatorV2.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2RandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2RandomWordsFulfilled) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV2RandomWordsFulfilled, error) { + event := new(VRFCoordinatorV2RandomWordsFulfilled) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2RandomWordsRequestedIterator struct { + Event *VRFCoordinatorV2RandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2RandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2RandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2RandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2RandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId uint64 + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFCoordinatorV2RandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2RandomWordsRequestedIterator{contract: _VRFCoordinatorV2.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2RandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2RandomWordsRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV2RandomWordsRequested, error) { + event := new(VRFCoordinatorV2RandomWordsRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionCanceledIterator struct { + Event *VRFCoordinatorV2SubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionCanceled struct { + SubId uint64 + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionCanceledIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionCanceledIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionCanceled, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionCanceled) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV2SubscriptionCanceled, error) { + event := new(VRFCoordinatorV2SubscriptionCanceled) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionConsumerAddedIterator struct { + Event *VRFCoordinatorV2SubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionConsumerAdded struct { + SubId uint64 + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionConsumerAddedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionConsumerAddedIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionConsumerAdded, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionConsumerAdded) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV2SubscriptionConsumerAdded, error) { + event := new(VRFCoordinatorV2SubscriptionConsumerAdded) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionConsumerRemovedIterator struct { + Event *VRFCoordinatorV2SubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionConsumerRemoved struct { + SubId uint64 + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionConsumerRemovedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionConsumerRemovedIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionConsumerRemoved, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionConsumerRemoved) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV2SubscriptionConsumerRemoved, error) { + event := new(VRFCoordinatorV2SubscriptionConsumerRemoved) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionCreatedIterator struct { + Event *VRFCoordinatorV2SubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionCreated struct { + SubId uint64 + Owner common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionCreatedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionCreatedIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionCreated, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionCreated) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV2SubscriptionCreated, error) { + event := new(VRFCoordinatorV2SubscriptionCreated) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionFundedIterator struct { + Event *VRFCoordinatorV2SubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionFunded struct { + SubId uint64 + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionFundedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionFundedIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionFunded, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionFunded) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV2SubscriptionFunded, error) { + event := new(VRFCoordinatorV2SubscriptionFunded) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator struct { + Event *VRFCoordinatorV2SubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionOwnerTransferRequested struct { + SubId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionOwnerTransferRequested, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV2SubscriptionOwnerTransferRequested, error) { + event := new(VRFCoordinatorV2SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2SubscriptionOwnerTransferredIterator struct { + Event *VRFCoordinatorV2SubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2SubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2SubscriptionOwnerTransferred struct { + SubId uint64 + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionOwnerTransferredIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2SubscriptionOwnerTransferredIterator{contract: _VRFCoordinatorV2.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionOwnerTransferred, subId []uint64) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2SubscriptionOwnerTransferred) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2Filterer) ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV2SubscriptionOwnerTransferred, error) { + event := new(VRFCoordinatorV2SubscriptionOwnerTransferred) + if err := _VRFCoordinatorV2.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 +} +type GetFeeConfig struct { + FulfillmentFlatFeeLinkPPMTier1 uint32 + FulfillmentFlatFeeLinkPPMTier2 uint32 + FulfillmentFlatFeeLinkPPMTier3 uint32 + FulfillmentFlatFeeLinkPPMTier4 uint32 + FulfillmentFlatFeeLinkPPMTier5 uint32 + ReqsForTier2 *big.Int + ReqsForTier3 *big.Int + ReqsForTier4 *big.Int + ReqsForTier5 *big.Int +} +type GetSubscription struct { + Balance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinatorV2.abi.Events["ConfigSet"].ID: + return _VRFCoordinatorV2.ParseConfigSet(log) + case _VRFCoordinatorV2.abi.Events["FundsRecovered"].ID: + return _VRFCoordinatorV2.ParseFundsRecovered(log) + case _VRFCoordinatorV2.abi.Events["OwnershipTransferRequested"].ID: + return _VRFCoordinatorV2.ParseOwnershipTransferRequested(log) + case _VRFCoordinatorV2.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinatorV2.ParseOwnershipTransferred(log) + case _VRFCoordinatorV2.abi.Events["ProvingKeyDeregistered"].ID: + return _VRFCoordinatorV2.ParseProvingKeyDeregistered(log) + case _VRFCoordinatorV2.abi.Events["ProvingKeyRegistered"].ID: + return _VRFCoordinatorV2.ParseProvingKeyRegistered(log) + case _VRFCoordinatorV2.abi.Events["RandomWordsFulfilled"].ID: + return _VRFCoordinatorV2.ParseRandomWordsFulfilled(log) + case _VRFCoordinatorV2.abi.Events["RandomWordsRequested"].ID: + return _VRFCoordinatorV2.ParseRandomWordsRequested(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionCanceled"].ID: + return _VRFCoordinatorV2.ParseSubscriptionCanceled(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionConsumerAdded"].ID: + return _VRFCoordinatorV2.ParseSubscriptionConsumerAdded(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionConsumerRemoved"].ID: + return _VRFCoordinatorV2.ParseSubscriptionConsumerRemoved(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionCreated"].ID: + return _VRFCoordinatorV2.ParseSubscriptionCreated(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionFunded"].ID: + return _VRFCoordinatorV2.ParseSubscriptionFunded(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _VRFCoordinatorV2.ParseSubscriptionOwnerTransferRequested(log) + case _VRFCoordinatorV2.abi.Events["SubscriptionOwnerTransferred"].ID: + return _VRFCoordinatorV2.ParseSubscriptionOwnerTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorV2ConfigSet) Topic() common.Hash { + return common.HexToHash("0xc21e3bd2e0b339d2848f0dd956947a88966c242c0c0c582a33137a5c1ceb5cb2") +} + +func (VRFCoordinatorV2FundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (VRFCoordinatorV2OwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFCoordinatorV2OwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorV2ProvingKeyDeregistered) Topic() common.Hash { + return common.HexToHash("0x72be339577868f868798bac2c93e52d6f034fef4689a9848996c14ebb7416c0d") +} + +func (VRFCoordinatorV2ProvingKeyRegistered) Topic() common.Hash { + return common.HexToHash("0xe729ae16526293f74ade739043022254f1489f616295a25bf72dfb4511ed73b8") +} + +func (VRFCoordinatorV2RandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4") +} + +func (VRFCoordinatorV2RandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772") +} + +func (VRFCoordinatorV2SubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0xe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815") +} + +func (VRFCoordinatorV2SubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e0") +} + +func (VRFCoordinatorV2SubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b") +} + +func (VRFCoordinatorV2SubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf") +} + +func (VRFCoordinatorV2SubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0xd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f8") +} + +func (VRFCoordinatorV2SubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be") +} + +func (VRFCoordinatorV2SubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0x6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0") +} + +func (_VRFCoordinatorV2 *VRFCoordinatorV2) Address() common.Address { + return _VRFCoordinatorV2.address +} + +type VRFCoordinatorV2Interface interface { + BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) + + MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) + + GetCommitment(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + GetCurrentSubId(opts *bind.CallOpts) (uint64, error) + + GetFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) + + GetFeeConfig(opts *bind.CallOpts) (GetFeeConfig, + + error) + + GetFeeTier(opts *bind.CallOpts, reqCount uint64) (uint32, error) + + GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) + + GetSubscription(opts *bind.CallOpts, subId uint64) (GetSubscription, + + error) + + GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) + + HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PendingRequestExists(opts *bind.CallOpts, subId uint64) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId uint64, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV2RequestCommitment) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64, newOwner common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig VRFCoordinatorV2FeeConfig) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV2ConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VRFCoordinatorV2ConfigSet, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2FundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2FundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*VRFCoordinatorV2FundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2OwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV2OwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2OwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV2OwnershipTransferred, error) + + FilterProvingKeyDeregistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorV2ProvingKeyDeregisteredIterator, error) + + WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ProvingKeyDeregistered, oracle []common.Address) (event.Subscription, error) + + ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorV2ProvingKeyDeregistered, error) + + FilterProvingKeyRegistered(opts *bind.FilterOpts, oracle []common.Address) (*VRFCoordinatorV2ProvingKeyRegisteredIterator, error) + + WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2ProvingKeyRegistered, oracle []common.Address) (event.Subscription, error) + + ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV2ProvingKeyRegistered, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFCoordinatorV2RandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2RandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV2RandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFCoordinatorV2RandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2RandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV2RandomWordsRequested, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionCanceled, subId []uint64) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV2SubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionConsumerAdded, subId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV2SubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionConsumerRemoved, subId []uint64) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV2SubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionCreated, subId []uint64) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV2SubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionFunded, subId []uint64) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV2SubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionOwnerTransferRequested, subId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV2SubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []uint64) (*VRFCoordinatorV2SubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2SubscriptionOwnerTransferred, subId []uint64) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV2SubscriptionOwnerTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_v2_5/vrf_coordinator_v2_5.go b/core/gethwrappers/generated/vrf_coordinator_v2_5/vrf_coordinator_v2_5.go new file mode 100644 index 00000000..aaa5d961 --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_v2_5/vrf_coordinator_v2_5.go @@ -0,0 +1,3863 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_v2_5 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFCoordinatorV25RequestCommitment struct { + BlockNum uint64 + SubId *big.Int + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + ExtraArgs []byte +} + +type VRFProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFV2PlusClientRandomWordsRequest struct { + KeyHash [32]byte + SubId *big.Int + RequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte +} + +var VRFCoordinatorV25MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"blockhashStore\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"BlockhashNotInStore\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorNotRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FailedToSendNative\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FailedToTransferLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxGas\",\"type\":\"uint256\"}],\"name\":\"GasPriceExceeded\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectCommitment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"have\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"want\",\"type\":\"uint256\"}],\"name\":\"InsufficientGasForConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidExtraArgsTag\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"have\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"min\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"max\",\"type\":\"uint16\"}],\"name\":\"InvalidRequestConfirmations\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LinkAlreadySet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"flatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"flatFeeNativePPM\",\"type\":\"uint32\"}],\"name\":\"LinkDiscountTooHigh\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LinkNotSet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoCorrespondingRequest\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"NoSuchProvingKey\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"NumWordsTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"ProvingKeyAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorDeregistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"MigrationCompleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"NativeFundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"maxGas\",\"type\":\"uint64\"}],\"name\":\"ProvingKeyDeregistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"maxGas\",\"type\":\"uint64\"}],\"name\":\"ProvingKeyRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"onlyPremium\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amountLink\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amountNative\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldNativeBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newNativeBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFundedWithNative\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BLOCKHASH_STORE\",\"outputs\":[{\"internalType\":\"contractBlockhashStoreInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_NATIVE_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_NUM_WORDS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_REQUEST_CONFIRMATIONS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"deregisterMigratableCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"deregisterProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRF.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFCoordinatorV2_5.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"},{\"internalType\":\"bool\",\"name\":\"onlyPremium\",\"type\":\"bool\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"fundSubscriptionWithNative\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveSubscriptionIds\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"nativeBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverNativeFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"registerMigratableCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint64\",\"name\":\"maxGas\",\"type\":\"uint64\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFV2PlusClient.RandomWordsRequest\",\"name\":\"req\",\"type\":\"tuple\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_config\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"reentrancyLock\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_currentSubNonce\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fallbackWeiPerUnitLink\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_provingKeyHashes\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"s_provingKeys\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"exists\",\"type\":\"bool\"},{\"internalType\":\"uint64\",\"name\":\"maxGas\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requestCommitments\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalNativeBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"}],\"name\":\"setPLIAndPLINativeFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"recipient\",\"type\":\"address\"}],\"name\":\"withdrawNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162005da138038062005da1833981016040819052620000349162000183565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000d7565b50505060601b6001600160601b031916608052620001b5565b6001600160a01b038116331415620001325760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156200019657600080fd5b81516001600160a01b0381168114620001ae57600080fd5b9392505050565b60805160601c615bc6620001db6000396000818161055001526131e70152615bc66000f3fe60806040526004361061021c5760003560e01c80638402595e11610124578063b2a7cac5116100a6578063b2a7cac514610732578063bec4c08c14610752578063caf70c4a14610772578063cb63179714610792578063d98e620e146107b2578063da2f2610146107d2578063dac83d2914610831578063dc311dd314610851578063e72f6e3014610882578063ee9d2d38146108a2578063f2fde38b146108cf57600080fd5b80638402595e146105c757806386fe91c7146105e75780638da5cb5b1461060757806395b55cfc146106255780639b1c385e146106385780639d40a6fd14610658578063a21a23e414610690578063a4c0ed36146106a5578063a63e0bfb146106c5578063aa433aff146106e5578063aefb212f1461070557600080fd5b8063405b84fa116101ad578063405b84fa1461044e57806340d6bb821461046e57806341af6c871461049957806351cff8d9146104c95780635d06b4ab146104e957806364d51a2a14610509578063659827441461051e578063689c45171461053e57806372e9d5651461057257806379ba5097146105925780637a5a2aef146105a757600080fd5b806304104edb14610221578063043bd6ae14610243578063088070f51461026c57806308821d581461033a5780630ae095401461035a57806315c48b841461037a57806318e3dd27146103a25780631b6b6d23146103e15780632f622e6b1461040e578063301f42e91461042e575b600080fd5b34801561022d57600080fd5b5061024161023c366004614f92565b6108ef565b005b34801561024f57600080fd5b5061025960105481565b6040519081526020015b60405180910390f35b34801561027857600080fd5b50600c546102dd9061ffff81169063ffffffff62010000820481169160ff600160301b8204811692600160381b8304811692600160581b8104821692600160781b8204831692600160981b83041691600160b81b8104821691600160c01b9091041689565b6040805161ffff909a168a5263ffffffff98891660208b01529615159689019690965293861660608801529185166080870152841660a08601529290921660c084015260ff91821660e08401521661010082015261012001610263565b34801561034657600080fd5b50610241610355366004615070565b610a5c565b34801561036657600080fd5b5061024161037536600461535a565b610c06565b34801561038657600080fd5b5061038f60c881565b60405161ffff9091168152602001610263565b3480156103ae57600080fd5b50600a546103c990600160601b90046001600160601b031681565b6040516001600160601b039091168152602001610263565b3480156103ed57600080fd5b50600254610401906001600160a01b031681565b604051610263919061558b565b34801561041a57600080fd5b50610241610429366004614f92565b610c4e565b34801561043a57600080fd5b506103c9610449366004615176565b610d9a565b34801561045a57600080fd5b5061024161046936600461535a565b610fc0565b34801561047a57600080fd5b506104846101f481565b60405163ffffffff9091168152602001610263565b3480156104a557600080fd5b506104b96104b43660046150f9565b611372565b6040519015158152602001610263565b3480156104d557600080fd5b506102416104e4366004614f92565b611482565b3480156104f557600080fd5b50610241610504366004614f92565b611610565b34801561051557600080fd5b5061038f606481565b34801561052a57600080fd5b50610241610539366004614faf565b6116c7565b34801561054a57600080fd5b506104017f000000000000000000000000000000000000000000000000000000000000000081565b34801561057e57600080fd5b50600354610401906001600160a01b031681565b34801561059e57600080fd5b50610241611727565b3480156105b357600080fd5b506102416105c236600461508c565b6117d1565b3480156105d357600080fd5b506102416105e2366004614f92565b611901565b3480156105f357600080fd5b50600a546103c9906001600160601b031681565b34801561061357600080fd5b506000546001600160a01b0316610401565b6102416106333660046150f9565b611a13565b34801561064457600080fd5b50610259610653366004615264565b611b37565b34801561066457600080fd5b50600754610678906001600160401b031681565b6040516001600160401b039091168152602001610263565b34801561069c57600080fd5b50610259611e78565b3480156106b157600080fd5b506102416106c0366004614fe8565b61204b565b3480156106d157600080fd5b506102416106e03660046152b9565b6121c7565b3480156106f157600080fd5b506102416107003660046150f9565b612444565b34801561071157600080fd5b5061072561072036600461537f565b61248c565b6040516102639190615602565b34801561073e57600080fd5b5061024161074d3660046150f9565b61258e565b34801561075e57600080fd5b5061024161076d36600461535a565b612683565b34801561077e57600080fd5b5061025961078d3660046150c0565b61278f565b34801561079e57600080fd5b506102416107ad36600461535a565b6127bf565b3480156107be57600080fd5b506102596107cd3660046150f9565b612a2e565b3480156107de57600080fd5b506108126107ed3660046150f9565b600d6020526000908152604090205460ff81169061010090046001600160401b031682565b6040805192151583526001600160401b03909116602083015201610263565b34801561083d57600080fd5b5061024161084c36600461535a565b612a4f565b34801561085d57600080fd5b5061087161086c3660046150f9565b612ae6565b6040516102639594939291906157d7565b34801561088e57600080fd5b5061024161089d366004614f92565b612bd4565b3480156108ae57600080fd5b506102596108bd3660046150f9565b600f6020526000908152604090205481565b3480156108db57600080fd5b506102416108ea366004614f92565b612da7565b6108f7612db8565b60115460005b81811015610a3457826001600160a01b03166011828154811061092257610922615b22565b6000918252602090912001546001600160a01b03161415610a2457601161094a6001846159d2565b8154811061095a5761095a615b22565b600091825260209091200154601180546001600160a01b03909216918390811061098657610986615b22565b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060118054806109c5576109c5615b0c565b600082815260209020810160001990810180546001600160a01b03191690550190556040517ff80a1a97fd42251f3c33cda98635e7399253033a6774fe37cd3f650b5282af3790610a1790859061558b565b60405180910390a1505050565b610a2d81615a8a565b90506108fd565b5081604051635428d44960e01b8152600401610a50919061558b565b60405180910390fd5b50565b610a64612db8565b604080518082018252600091610a9391908490600290839083908082843760009201919091525061278f915050565b6000818152600d602090815260409182902082518084019093525460ff811615158084526101009091046001600160401b03169183019190915291925090610af157604051631dfd6e1360e21b815260048101839052602401610a50565b6000828152600d6020526040812080546001600160481b0319169055600e54905b81811015610bc25783600e8281548110610b2e57610b2e615b22565b90600052602060002001541415610bb257600e610b4c6001846159d2565b81548110610b5c57610b5c615b22565b9060005260206000200154600e8281548110610b7a57610b7a615b22565b600091825260209091200155600e805480610b9757610b97615b0c565b60019003818190600052602060002001600090559055610bc2565b610bbb81615a8a565b9050610b12565b507f9b6868e0eb737bcd72205360baa6bfd0ba4e4819a33ade2db384e8a8025639a5838360200151604051610bf8929190615615565b60405180910390a150505050565b81610c1081612e0d565b610c18612e6e565b610c2183611372565b15610c3f57604051631685ecdd60e31b815260040160405180910390fd5b610c498383612e99565b505050565b610c56612e6e565b610c5e612db8565b600b54600160601b90046001600160601b0316610c8e57604051631e9acf1760e31b815260040160405180910390fd5b600b8054600160601b90046001600160601b0316908190600c610cb18380615a0e565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600a600c8282829054906101000a90046001600160601b0316610cf99190615a0e565b92506101000a8154816001600160601b0302191690836001600160601b031602179055506000826001600160a01b0316826001600160601b031660405160006040518083038185875af1925050503d8060008114610d73576040519150601f19603f3d011682016040523d82523d6000602084013e610d78565b606091505b5050905080610c495760405163950b247960e01b815260040160405180910390fd5b6000610da4612e6e565b60005a90506000610db5868661304d565b90506000610dcb85836000015160200151613309565b60408301516060888101519293509163ffffffff16806001600160401b03811115610df857610df8615b38565b604051908082528060200260200182016040528015610e21578160200160208202803683370190505b50925060005b81811015610e895760408051602081018590529081018290526060016040516020818303038152906040528051906020012060001c848281518110610e6e57610e6e615b22565b6020908102919091010152610e8281615a8a565b9050610e27565b50506020808501516000818152600f9092526040822082905590610eae828b86613357565b60208b81015160008181526006909252604090912080549293509091601890610ee690600160c01b90046001600160401b0316615aa5565b91906101000a8154816001600160401b0302191690836001600160401b0316021790555060008b60a0015160018d60a0015151610f2391906159d2565b81518110610f3357610f33615b22565b60209101015160f81c6001149050610f4d8988838e6133f2565b9950610f5a8a8284613422565b50604080518581526001600160601b038b166020820152831515818301528b151560608201529051829185917f6c6b5394380e16e41988d8383648010de6f5c2e4814803be5de1c6b1c852db559181900360800190a350505050505050505b9392505050565b610fc8612e6e565b610fd181613575565b610ff05780604051635428d44960e01b8152600401610a50919061558b565b600080600080610fff86612ae6565b945094505093509350336001600160a01b0316826001600160a01b0316146110625760405162461bcd60e51b81526020600482015260166024820152752737ba1039bab139b1b934b83a34b7b71037bbb732b960511b6044820152606401610a50565b61106b86611372565b156110b15760405162461bcd60e51b815260206004820152601660248201527550656e64696e6720726571756573742065786973747360501b6044820152606401610a50565b6040805160c0810182526001815260208082018990526001600160a01b03851682840152606082018490526001600160601b038088166080840152861660a0830152915190916000916111069184910161563f565b6040516020818303038152906040529050611120886135e1565b505060405163ce3f471960e01b81526001600160a01b0388169063ce3f4719906001600160601b0388169061115990859060040161562c565b6000604051808303818588803b15801561117257600080fd5b505af1158015611186573d6000803e3d6000fd5b50506002546001600160a01b0316158015935091506111af905057506001600160601b03861615155b156112795760025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb906111e6908a908a906004016155d2565b602060405180830381600087803b15801561120057600080fd5b505af1158015611214573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061123891906150dc565b6112795760405162461bcd60e51b8152602060048201526012602482015271696e73756666696369656e742066756e647360701b6044820152606401610a50565b600c805460ff60301b1916600160301b17905560005b8351811015611320578381815181106112aa576112aa615b22565b60200260200101516001600160a01b0316638ea98117896040518263ffffffff1660e01b81526004016112dd919061558b565b600060405180830381600087803b1580156112f757600080fd5b505af115801561130b573d6000803e3d6000fd5b505050508061131990615a8a565b905061128f565b50600c805460ff60301b191690556040517fd63ca8cb945956747ee69bfdc3ea754c24a4caf7418db70e46052f7850be4187906113609089908b9061559f565b60405180910390a15050505050505050565b6000818152600560205260408120600201805480611394575060009392505050565b600e5460005b828110156114765760008482815481106113b6576113b6615b22565b60009182526020822001546001600160a01b031691505b8381101561146357600061142b600e83815481106113ed576113ed615b22565b60009182526020808320909101546001600160a01b03871683526004825260408084208e855290925291205485908c906001600160401b0316613789565b506000818152600f6020526040902054909150156114525750600198975050505050505050565b5061145c81615a8a565b90506113cd565b50508061146f90615a8a565b905061139a565b50600095945050505050565b61148a612e6e565b611492612db8565b6002546001600160a01b03166114bb5760405163c1f0c0a160e01b815260040160405180910390fd5b600b546001600160601b03166114e457604051631e9acf1760e31b815260040160405180910390fd5b600b80546001600160601b031690819060006115008380615a0e565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600a60008282829054906101000a90046001600160601b03166115489190615a0e565b82546001600160601b039182166101009390930a92830291909202199091161790555060025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb9061159d90859085906004016155d2565b602060405180830381600087803b1580156115b757600080fd5b505af11580156115cb573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906115ef91906150dc565b61160c57604051631e9acf1760e31b815260040160405180910390fd5b5050565b611618612db8565b61162181613575565b15611641578060405163ac8a27ef60e01b8152600401610a50919061558b565b601180546001810182556000919091527f31ecc21a745e3968a04e9570e4425bc18fa8019c68028196b546d1669c200c680180546001600160a01b0319166001600160a01b0383161790556040517fb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af01625906116bc90839061558b565b60405180910390a150565b6116cf612db8565b6002546001600160a01b0316156116f957604051631688c53760e11b815260040160405180910390fd5b600280546001600160a01b039384166001600160a01b03199182161790915560038054929093169116179055565b6001546001600160a01b0316331461177a5760405162461bcd60e51b815260206004820152601660248201527526bab9ba10313290383937b837b9b2b21037bbb732b960511b6044820152606401610a50565b60008054336001600160a01b0319808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6117d9612db8565b60408051808201825260009161180891908590600290839083908082843760009201919091525061278f915050565b6000818152600d602052604090205490915060ff161561183e57604051634a0b8fa760e01b815260048101829052602401610a50565b60408051808201825260018082526001600160401b0385811660208085019182526000878152600d9091528581209451855492516001600160481b031990931690151568ffffffffffffffff00191617610100929093169190910291909117909255600e805491820181559091527fbb7b4a454dc3493923482f07822329ed19e8244eff582cc204f8554c3620c3fd01829055517f9b911b2c240bfbef3b6a8f7ed6ee321d1258bb2a3fe6becab52ac1cd3210afd390610a179083908590615615565b611909612db8565b600a544790600160601b90046001600160601b031681811115611949576040516354ced18160e11b81526004810182905260248101839052604401610a50565b81811015610c4957600061195d82846159d2565b90506000846001600160a01b03168260405160006040518083038185875af1925050503d80600081146119ac576040519150601f19603f3d011682016040523d82523d6000602084013e6119b1565b606091505b50509050806119d35760405163950b247960e01b815260040160405180910390fd5b7f4aed7c8eed0496c8c19ea2681fcca25741c1602342e38b045d9f1e8e905d2e9c8583604051611a0492919061559f565b60405180910390a15050505050565b611a1b612e6e565b6000818152600560205260409020546001600160a01b0316611a5057604051630fb532db60e11b815260040160405180910390fd5b60008181526006602052604090208054600160601b90046001600160601b0316903490600c611a7f838561597d565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555034600a600c8282829054906101000a90046001600160601b0316611ac7919061597d565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550817f7603b205d03651ee812f803fccde89f1012e545a9c99f0abfea9cedd0fd8e902823484611b1a919061591e565b604080519283526020830191909152015b60405180910390a25050565b6000611b41612e6e565b602080830135600081815260059092526040909120546001600160a01b0316611b7d57604051630fb532db60e11b815260040160405180910390fd5b33600090815260046020908152604080832084845291829052909120546001600160401b031680611bc55782336040516379bfd40160e01b8152600401610a509291906156b4565b600c5461ffff16611bdc606087016040880161529e565b61ffff161080611bff575060c8611bf9606087016040880161529e565b61ffff16115b15611c4557611c14606086016040870161529e565b600c5460405163539c34bb60e11b815261ffff92831660048201529116602482015260c86044820152606401610a50565b600c5462010000900463ffffffff16611c6460808701606088016153a1565b63ffffffff161115611cb457611c8060808601606087016153a1565b600c54604051637aebf00f60e11b815263ffffffff9283166004820152620100009091049091166024820152604401610a50565b6101f4611cc760a08701608088016153a1565b63ffffffff161115611d0d57611ce360a08601608087016153a1565b6040516311ce1afb60e21b815263ffffffff90911660048201526101f46024820152604401610a50565b611d1681615aa5565b90506000611d278635338685613789565b90955090506000611d4b611d46611d4160a08a018a61582c565b613802565b61387f565b905085611d566138f0565b86611d6760808b0160608c016153a1565b611d7760a08c0160808d016153a1565b3386604051602001611d8f9796959493929190615737565b60405160208183030381529060405280519060200120600f600088815260200190815260200160002081905550336001600160a01b03168588600001357feb0e3652e0f44f417695e6e90f2f42c99b65cd7169074c5a654b16b9748c3a4e89868c6040016020810190611e02919061529e565b8d6060016020810190611e1591906153a1565b8e6080016020810190611e2891906153a1565b89604051611e3b969594939291906156f8565b60405180910390a450506000928352602091909152604090912080546001600160401b0319166001600160401b039092169190911790555b919050565b6000611e82612e6e565b6007546001600160401b031633611e9a6001436159d2565b6040516001600160601b0319606093841b81166020830152914060348201523090921b1660548201526001600160c01b031960c083901b16606882015260700160408051601f1981840301815291905280516020909101209150611eff816001615936565b600780546001600160401b0319166001600160401b03928316179055604080516000808252608082018352602080830182815283850183815260608086018581528a86526006855287862093518454935191516001600160601b039182166001600160c01b031990951694909417600160601b9190921602176001600160c01b0316600160c01b9290981691909102969096179055835194850184523385528481018281528585018481528884526005835294909220855181546001600160a01b03199081166001600160a01b039283161783559351600183018054909516911617909255925180519294939192611ffd9260028501920190614cac565b5061200d91506008905084613980565b50827f1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d3360405161203e919061558b565b60405180910390a2505090565b612053612e6e565b6002546001600160a01b0316331461207e576040516344b0e3c360e01b815260040160405180910390fd5b6020811461209f57604051638129bbcd60e01b815260040160405180910390fd5b60006120ad828401846150f9565b6000818152600560205260409020549091506001600160a01b03166120e557604051630fb532db60e11b815260040160405180910390fd5b600081815260066020526040812080546001600160601b03169186919061210c838561597d565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555084600a60008282829054906101000a90046001600160601b0316612154919061597d565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550817f1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a8287846121a7919061591e565b6040805192835260208301919091520160405180910390a2505050505050565b6121cf612db8565b60c861ffff8a1611156122095760405163539c34bb60e11b815261ffff8a1660048201819052602482015260c86044820152606401610a50565b6000851361222d576040516321ea67b360e11b815260048101869052602401610a50565b60008463ffffffff1611801561224f57508363ffffffff168363ffffffff1610155b1561227d576040516313c06e5960e11b815263ffffffff808516600483015285166024820152604401610a50565b604080516101208101825261ffff8b1680825263ffffffff808c16602084018190526000848601528b8216606085018190528b8316608086018190528a841660a08701819052938a1660c0870181905260ff808b1660e08901819052908a16610100909801889052600c8054600160c01b90990260ff60c01b19600160b81b9093029290921661ffff60b81b19600160981b90940263ffffffff60981b19600160781b9099029890981667ffffffffffffffff60781b19600160581b90960263ffffffff60581b19600160381b9098029790971668ffffffffffffffffff60301b196201000090990265ffffffffffff19909c16909a179a909a1796909616979097179390931791909116959095179290921793909316929092179190911790556010869055517f2c6b6b12413678366b05b145c5f00745bdd00e739131ab5de82484a50c9d78b690612431908b908b908b908b908b908b908b908b908b9061ffff99909916895263ffffffff97881660208a0152958716604089015293861660608801526080870192909252841660a086015290921660c084015260ff91821660e0840152166101008201526101200190565b60405180910390a1505050505050505050565b61244c612db8565b6000818152600560205260409020546001600160a01b03168061248257604051630fb532db60e11b815260040160405180910390fd5b61160c8282612e99565b6060600061249a600861398c565b90508084106124bc57604051631390f2a160e01b815260040160405180910390fd5b60006124c8848661591e565b9050818111806124d6575083155b6124e057806124e2565b815b905060006124f086836159d2565b9050806001600160401b0381111561250a5761250a615b38565b604051908082528060200260200182016040528015612533578160200160208202803683370190505b50935060005b818110156125835761255661254e888361591e565b600890613996565b85828151811061256857612568615b22565b602090810291909101015261257c81615a8a565b9050612539565b505050505b92915050565b612596612e6e565b6000818152600560205260409020546001600160a01b0316806125cc57604051630fb532db60e11b815260040160405180910390fd5b6000828152600560205260409020600101546001600160a01b03163314612623576000828152600560205260409081902060010154905163d084e97560e01b8152610a50916001600160a01b03169060040161558b565b600082815260056020526040908190208054336001600160a01b031991821681178355600190920180549091169055905183917fd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c938691611b2b9185916155b8565b8161268d81612e0d565b612695612e6e565b60008381526005602052604090206002018054606414156126c9576040516305a48e0f60e01b815260040160405180910390fd5b6001600160a01b038316600090815260046020908152604080832087845291829052909120546001600160401b031615612704575050505050565b600085815260208281526040808320805460016001600160401b0319909116811790915585549081018655858452919092200180546001600160a01b0319166001600160a01b0387161790555185907f1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e19061278090879061558b565b60405180910390a25050505050565b6000816040516020016127a291906155f4565b604051602081830303815290604052805190602001209050919050565b816127c981612e0d565b6127d1612e6e565b6127da83611372565b156127f857604051631685ecdd60e31b815260040160405180910390fd5b6001600160a01b03821660009081526004602090815260408083208684529091529020546001600160401b03166128465782826040516379bfd40160e01b8152600401610a509291906156b4565b6000838152600560209081526040808320600201805482518185028101850190935280835291929091908301828280156128a957602002820191906000526020600020905b81546001600160a01b0316815260019091019060200180831161288b575b505050505090506000600182516128c091906159d2565b905060005b82518110156129ca57846001600160a01b03168382815181106128ea576128ea615b22565b60200260200101516001600160a01b031614156129ba57600083838151811061291557612915615b22565b602002602001015190508060056000898152602001908152602001600020600201838154811061294757612947615b22565b600091825260208083209190910180546001600160a01b0319166001600160a01b03949094169390931790925588815260059091526040902060020180548061299257612992615b0c565b600082815260209020810160001990810180546001600160a01b0319169055019055506129ca565b6129c381615a8a565b90506128c5565b506001600160a01b03841660009081526004602090815260408083208884529091529081902080546001600160401b03191690555185907f32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a79061278090879061558b565b600e8181548110612a3e57600080fd5b600091825260209091200154905081565b81612a5981612e0d565b612a61612e6e565b600083815260056020526040902060018101546001600160a01b03848116911614612ae0576001810180546001600160a01b0319166001600160a01b03851617905560405184907f21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a190612ad790339087906155b8565b60405180910390a25b50505050565b600081815260056020526040812054819081906001600160a01b0316606081612b2257604051630fb532db60e11b815260040160405180910390fd5b600086815260066020908152604080832054600583529281902060020180548251818502810185019093528083526001600160601b0380861695600160601b810490911694600160c01b9091046001600160401b0316938893929091839190830182828015612bba57602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311612b9c575b505050505090509450945094509450945091939590929450565b612bdc612db8565b6002546001600160a01b0316612c055760405163c1f0c0a160e01b815260040160405180910390fd5b6002546040516370a0823160e01b81526000916001600160a01b0316906370a0823190612c3690309060040161558b565b60206040518083038186803b158015612c4e57600080fd5b505afa158015612c62573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612c869190615112565b600a549091506001600160601b031681811115612cc0576040516354ced18160e11b81526004810182905260248101839052604401610a50565b81811015610c49576000612cd482846159d2565b60025460405163a9059cbb60e01b81529192506001600160a01b03169063a9059cbb90612d07908790859060040161559f565b602060405180830381600087803b158015612d2157600080fd5b505af1158015612d35573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612d5991906150dc565b612d7657604051631f01ff1360e21b815260040160405180910390fd5b7f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b4366008482604051610bf892919061559f565b612daf612db8565b610a59816139a2565b6000546001600160a01b03163314612e0b5760405162461bcd60e51b815260206004820152601660248201527527b7363c9031b0b63630b1363290313c9037bbb732b960511b6044820152606401610a50565b565b6000818152600560205260409020546001600160a01b031680612e4357604051630fb532db60e11b815260040160405180910390fd5b336001600160a01b0382161461160c5780604051636c51fda960e11b8152600401610a50919061558b565b600c54600160301b900460ff1615612e0b5760405163769dd35360e11b815260040160405180910390fd5b600080612ea5846135e1565b60025491935091506001600160a01b031615801590612ecc57506001600160601b03821615155b15612f7b5760025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb90612f0c9086906001600160601b0387169060040161559f565b602060405180830381600087803b158015612f2657600080fd5b505af1158015612f3a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612f5e91906150dc565b612f7b57604051631e9acf1760e31b815260040160405180910390fd5b6000836001600160a01b0316826001600160601b031660405160006040518083038185875af1925050503d8060008114612fd1576040519150601f19603f3d011682016040523d82523d6000602084013e612fd6565b606091505b5050905080612ff85760405163950b247960e01b815260040160405180910390fd5b604080516001600160a01b03861681526001600160601b03808616602083015284169181019190915285907f8c74ce8b8cf87f5eb001275c8be27eb34ea2b62bfab6814fcc62192bb63e81c490606001612780565b6040805160a08101825260006060820181815260808301829052825260208201819052918101919091526000613086846000015161278f565b6000818152600d602090815260409182902082518084019093525460ff811615158084526101009091046001600160401b031691830191909152919250906130e457604051631dfd6e1360e21b815260048101839052602401610a50565b6000828660800151604051602001613106929190918252602082015260400190565b60408051601f1981840301815291815281516020928301206000818152600f9093529120549091508061314c57604051631b44092560e11b815260040160405180910390fd5b85516020808801516040808a015160608b015160808c015160a08d0151935161317b978a979096959101615783565b6040516020818303038152906040528051906020012081146131b05760405163354a450b60e21b815260040160405180910390fd5b60006131bf8760000151613a46565b905080613297578651604051631d2827a760e31b81526001600160401b0390911660048201527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169063e9413d389060240160206040518083038186803b15801561323157600080fd5b505afa158015613245573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906132699190615112565b90508061329757865160405163175dadad60e01b81526001600160401b039091166004820152602401610a50565b60008860800151826040516020016132b9929190918252602082015260400190565b6040516020818303038152906040528051906020012060001c905060006132e08a83613b28565b604080516060810182529788526020880196909652948601949094525092979650505050505050565b6000816001600160401b03163a111561334f57821561333257506001600160401b038116612588565b3a8260405163435e532d60e11b8152600401610a50929190615615565b503a92915050565b6000806000631fe543e360e01b86856040516024016133779291906156df565b60408051601f198184030181529181526020820180516001600160e01b03166001600160e01b031990941693909317909252600c805460ff60301b1916600160301b1790559086015160808701519192506133db9163ffffffff9091169083613b93565b600c805460ff60301b191690559695505050505050565b6000821561340c57613405858584613bdf565b905061341a565b613417858584613cf0565b90505b949350505050565b600081815260066020526040902082156134e15780546001600160601b03600160601b909104811690851681101561346d57604051631e9acf1760e31b815260040160405180910390fd5b6134778582615a0e565b8254600160601b600160c01b031916600160601b6001600160601b039283168102919091178455600b805488939192600c926134b792869290041661597d565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555050612ae0565b80546001600160601b0390811690851681101561351157604051631e9acf1760e31b815260040160405180910390fd5b61351b8582615a0e565b82546001600160601b0319166001600160601b03918216178355600b8054879260009161354a9185911661597d565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505050505050565b601154600090815b818110156135d757836001600160a01b0316601182815481106135a2576135a2615b22565b6000918252602090912001546001600160a01b031614156135c7575060019392505050565b6135d081615a8a565b905061357d565b5060009392505050565b60008181526005602090815260408083206006909252822054600290910180546001600160601b0380841694600160601b90940416925b81811015613683576004600084838154811061363657613636615b22565b60009182526020808320909101546001600160a01b031683528281019390935260409182018120898252909252902080546001600160401b031916905561367c81615a8a565b9050613618565b50600085815260056020526040812080546001600160a01b031990811682556001820180549091169055906136bb6002830182614d11565b50506000858152600660205260408120556136d7600886613ed9565b506001600160601b0384161561372a57600a80548591906000906137059084906001600160601b0316615a0e565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b6001600160601b038316156137825782600a600c8282829054906101000a90046001600160601b031661375d9190615a0e565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b5050915091565b6040805160208082018790526001600160a01b03959095168183015260608101939093526001600160401b03919091166080808401919091528151808403909101815260a08301825280519084012060c083019490945260e0808301859052815180840390910181526101009092019052805191012091565b6040805160208101909152600081528161382b5750604080516020810190915260008152612588565b63125fa26760e31b61383d8385615a2e565b6001600160e01b0319161461386557604051632923fee760e11b815260040160405180910390fd5b61387282600481866158f4565b810190610fb9919061512b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa826040516024016138b891511515815260200190565b60408051601f198184030181529190526020810180516001600160e01b03166001600160e01b03199093169290921790915292915050565b6000466138fc81613ee5565b156139795760646001600160a01b031663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561393b57600080fd5b505afa15801561394f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906139739190615112565b91505090565b4391505090565b6000610fb98383613f08565b6000612588825490565b6000610fb98383613f57565b6001600160a01b0381163314156139f55760405162461bcd60e51b815260206004820152601760248201527621b0b73737ba103a3930b739b332b9103a379039b2b63360491b6044820152606401610a50565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600046613a5281613ee5565b15613b1957610100836001600160401b0316613a6c6138f0565b613a7691906159d2565b1180613a925750613a856138f0565b836001600160401b031610155b15613aa05750600092915050565b6040516315a03d4160e11b81526001600160401b0384166004820152606490632b407a82906024015b60206040518083038186803b158015613ae157600080fd5b505afa158015613af5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610fb99190615112565b50506001600160401b03164090565b6000613b5c8360000151846020015185604001518660600151868860a001518960c001518a60e001518b6101000151613f81565b60038360200151604051602001613b749291906156cb565b60408051601f1981840301815291905280516020909101209392505050565b60005a611388811015613ba557600080fd5b611388810390508460408204820311613bbd57600080fd5b50823b613bc957600080fd5b60008083516020850160008789f1949350505050565b600080613c226000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061419d92505050565b905060005a600c54613c42908890600160581b900463ffffffff1661591e565b613c4c91906159d2565b613c5690866159b3565b600c54909150600090613c7b90600160781b900463ffffffff1664e8d4a510006159b3565b90508415613cc757600c548190606490600160b81b900460ff16613c9f858761591e565b613ca991906159b3565b613cb3919061599f565b613cbd919061591e565b9350505050610fb9565b600c548190606490613ce390600160b81b900460ff1682615958565b60ff16613c9f858761591e565b600080613cfb61426b565b905060008113613d21576040516321ea67b360e11b815260048101829052602401610a50565b6000613d636000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061419d92505050565b9050600082825a600c54613d85908b90600160581b900463ffffffff1661591e565b613d8f91906159d2565b613d9990896159b3565b613da3919061591e565b613db590670de0b6b3a76400006159b3565b613dbf919061599f565b600c54909150600090613de89063ffffffff600160981b8204811691600160781b9004166159e9565b613dfd9063ffffffff1664e8d4a510006159b3565b9050600084613e1483670de0b6b3a76400006159b3565b613e1e919061599f565b905060008715613e5f57600c548290606490613e4490600160c01b900460ff16876159b3565b613e4e919061599f565b613e58919061591e565b9050613e9f565b600c548290606490613e7b90600160c01b900460ff1682615958565b613e889060ff16876159b3565b613e92919061599f565b613e9c919061591e565b90505b6b033b2e3c9fd0803ce8000000811115613ecc5760405163e80fa38160e01b815260040160405180910390fd5b9998505050505050505050565b6000610fb9838361433a565b600061a4b1821480613ef9575062066eed82145b8061258857505062066eee1490565b6000818152600183016020526040812054613f4f57508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155612588565b506000612588565b6000826000018281548110613f6e57613f6e615b22565b9060005260206000200154905092915050565b613f8a8961442d565b613fd35760405162461bcd60e51b815260206004820152601a6024820152797075626c6963206b6579206973206e6f74206f6e20637572766560301b6044820152606401610a50565b613fdc8861442d565b6140205760405162461bcd60e51b815260206004820152601560248201527467616d6d61206973206e6f74206f6e20637572766560581b6044820152606401610a50565b6140298361442d565b6140755760405162461bcd60e51b815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e2063757276650000006044820152606401610a50565b61407e8261442d565b6140ca5760405162461bcd60e51b815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e206375727665000000006044820152606401610a50565b6140d6878a88876144f0565b61411e5760405162461bcd60e51b81526020600482015260196024820152786164647228632a706b2b732a6729213d5f755769746e65737360381b6044820152606401610a50565b600061412a8a87614613565b9050600061413d898b878b868989614677565b9050600061414e838d8d8a86614796565b9050808a1461418f5760405162461bcd60e51b815260206004820152600d60248201526c34b73b30b634b210383937b7b360991b6044820152606401610a50565b505050505050505050505050565b6000466141a981613ee5565b156141e857606c6001600160a01b031663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b158015613ae157600080fd5b6141f1816147d6565b1561426257600f602160991b016001600160a01b03166349948e0e84604051806080016040528060488152602001615b72604891396040516020016142379291906154e1565b6040516020818303038152906040526040518263ffffffff1660e01b8152600401613ac9919061562c565b50600092915050565b600c5460035460408051633fabe5a360e21b81529051600093600160381b900463ffffffff169284926001600160a01b039091169163feaf968c9160048082019260a092909190829003018186803b1580156142c657600080fd5b505afa1580156142da573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906142fe91906153bc565b50919550909250505063ffffffff82161580159061432a575061432181426159d2565b8263ffffffff16105b156143355760105492505b505090565b6000818152600183016020526040812054801561442357600061435e6001836159d2565b8554909150600090614372906001906159d2565b90508181146143d757600086600001828154811061439257614392615b22565b90600052602060002001549050808760000184815481106143b5576143b5615b22565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806143e8576143e8615b0c565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050612588565b6000915050612588565b80516000906401000003d0191161447b5760405162461bcd60e51b8152602060048201526012602482015271696e76616c696420782d6f7264696e61746560701b6044820152606401610a50565b60208201516401000003d019116144c95760405162461bcd60e51b8152602060048201526012602482015271696e76616c696420792d6f7264696e61746560701b6044820152606401610a50565b60208201516401000003d0199080096144e98360005b6020020151614810565b1492915050565b60006001600160a01b0382166145365760405162461bcd60e51b815260206004820152600b60248201526a626164207769746e65737360a81b6044820152606401610a50565b60208401516000906001161561454d57601c614550565b601b5b9050600070014551231950b75fc4402da1732fc9bebe1985876000602002015109865170014551231950b75fc4402da1732fc9bebe19918203925060009190890987516040805160008082526020820180845287905260ff88169282019290925260608101929092526080820183905291925060019060a0016020604051602081039080840390855afa1580156145eb573d6000803e3d6000fd5b5050604051601f1901516001600160a01b039081169088161495505050505050949350505050565b61461b614d2f565b614648600184846040516020016146349392919061556a565b604051602081830303815290604052614834565b90505b6146548161442d565b6125885780516040805160208101929092526146709101614634565b905061464b565b61467f614d2f565b825186516401000003d01990819006910614156146de5760405162461bcd60e51b815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e637400006044820152606401610a50565b6146e9878988614882565b61472e5760405162461bcd60e51b8152602060048201526016602482015275119a5c9cdd081b5d5b0818da1958dac819985a5b195960521b6044820152606401610a50565b614739848685614882565b61477f5760405162461bcd60e51b815260206004820152601760248201527614d958dbdb99081b5d5b0818da1958dac819985a5b1959604a1b6044820152606401610a50565b61478a8684846149aa565b98975050505050505050565b6000600286868685876040516020016147b496959493929190615510565b60408051601f1981840301815291905280516020909101209695505050505050565b6000600a8214806147e857506101a482145b806147f5575062aa37dc82145b80614801575061210582145b8061258857505062014a331490565b6000806401000003d01980848509840990506401000003d019600782089392505050565b61483c614d2f565b61484582614a6d565b815261485a6148558260006144df565b614aa8565b602082018190526002900660011415611e73576020810180516401000003d019039052919050565b6000826148bf5760405162461bcd60e51b815260206004820152600b60248201526a3d32b9379039b1b0b630b960a91b6044820152606401610a50565b835160208501516000906148d590600290615acc565b156148e157601c6148e4565b601b5b9050600070014551231950b75fc4402da1732fc9bebe198387096040805160008082526020820180845281905260ff86169282019290925260608101869052608081018390529192509060019060a0016020604051602081039080840390855afa158015614956573d6000803e3d6000fd5b50505060206040510351905060008660405160200161497591906154cf565b60408051601f1981840301815291905280516020909101206001600160a01b0392831692169190911498975050505050505050565b6149b2614d2f565b8351602080860151855191860151600093849384936149d393909190614ac8565b919450925090506401000003d019858209600114614a2f5760405162461bcd60e51b815260206004820152601960248201527834b73b2d1036bab9ba1031329034b73b32b939b29037b3103d60391b6044820152606401610a50565b60405180604001604052806401000003d01980614a4e57614a4e615af6565b87860981526020016401000003d0198785099052979650505050505050565b805160208201205b6401000003d0198110611e7357604080516020808201939093528151808203840181529082019091528051910120614a75565b6000612588826002614ac16401000003d019600161591e565b901c614ba8565b60008080600180826401000003d019896401000003d019038808905060006401000003d0198b6401000003d019038a0890506000614b0883838585614c3f565b9098509050614b1988828e88614c63565b9098509050614b2a88828c87614c63565b90985090506000614b3d8d878b85614c63565b9098509050614b4e88828686614c3f565b9098509050614b5f88828e89614c63565b9098509050818114614b94576401000003d019818a0998506401000003d01982890997506401000003d0198183099650614b98565b8196505b5050505050509450945094915050565b600080614bb3614d4d565b6020808252818101819052604082015260608101859052608081018490526401000003d01960a0820152614be5614d6b565b60208160c0846005600019fa925082614c355760405162461bcd60e51b81526020600482015260126024820152716269674d6f64457870206661696c7572652160701b6044820152606401610a50565b5195945050505050565b6000806401000003d0198487096401000003d0198487099097909650945050505050565b600080806401000003d019878509905060006401000003d01987876401000003d019030990506401000003d0198183086401000003d01986890990999098509650505050505050565b828054828255906000526020600020908101928215614d01579160200282015b82811115614d0157825182546001600160a01b0319166001600160a01b03909116178255602090920191600190910190614ccc565b50614d0d929150614d89565b5090565b5080546000825590600052602060002090810190610a599190614d89565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b5b80821115614d0d5760008155600101614d8a565b8035611e7381615b4e565b806040810183101561258857600080fd5b600082601f830112614dcb57600080fd5b604051604081018181106001600160401b0382111715614ded57614ded615b38565b8060405250808385604086011115614e0457600080fd5b60005b6002811015614e26578135835260209283019290910190600101614e07565b509195945050505050565b8035611e7381615b63565b600060c08284031215614e4e57600080fd5b614e56615879565b9050614e6182614f50565b815260208083013581830152614e7960408401614f3c565b6040830152614e8a60608401614f3c565b60608301526080830135614e9d81615b4e565b608083015260a08301356001600160401b0380821115614ebc57600080fd5b818501915085601f830112614ed057600080fd5b813581811115614ee257614ee2615b38565b614ef4601f8201601f191685016158c4565b91508082528684828501011115614f0a57600080fd5b80848401858401376000848284010152508060a085015250505092915050565b803561ffff81168114611e7357600080fd5b803563ffffffff81168114611e7357600080fd5b80356001600160401b0381168114611e7357600080fd5b803560ff81168114611e7357600080fd5b805169ffffffffffffffffffff81168114611e7357600080fd5b600060208284031215614fa457600080fd5b8135610fb981615b4e565b60008060408385031215614fc257600080fd5b8235614fcd81615b4e565b91506020830135614fdd81615b4e565b809150509250929050565b60008060008060608587031215614ffe57600080fd5b843561500981615b4e565b93506020850135925060408501356001600160401b038082111561502c57600080fd5b818701915087601f83011261504057600080fd5b81358181111561504f57600080fd5b88602082850101111561506157600080fd5b95989497505060200194505050565b60006040828403121561508257600080fd5b610fb98383614da9565b6000806060838503121561509f57600080fd5b6150a98484614da9565b91506150b760408401614f50565b90509250929050565b6000604082840312156150d257600080fd5b610fb98383614dba565b6000602082840312156150ee57600080fd5b8151610fb981615b63565b60006020828403121561510b57600080fd5b5035919050565b60006020828403121561512457600080fd5b5051919050565b60006020828403121561513d57600080fd5b604051602081018181106001600160401b038211171561515f5761515f615b38565b604052823561516d81615b63565b81529392505050565b60008060008385036101e081121561518d57600080fd5b6101a08082121561519d57600080fd5b6151a56158a1565b91506151b18787614dba565b82526151c08760408801614dba565b60208301526080860135604083015260a0860135606083015260c086013560808301526151ef60e08701614d9e565b60a083015261010061520388828901614dba565b60c0840152615216886101408901614dba565b60e0840152610180870135908301529093508401356001600160401b0381111561523f57600080fd5b61524b86828701614e3c565b92505061525b6101c08501614e31565b90509250925092565b60006020828403121561527657600080fd5b81356001600160401b0381111561528c57600080fd5b820160c08185031215610fb957600080fd5b6000602082840312156152b057600080fd5b610fb982614f2a565b60008060008060008060008060006101208a8c0312156152d857600080fd5b6152e18a614f2a565b98506152ef60208b01614f3c565b97506152fd60408b01614f3c565b965061530b60608b01614f3c565b955060808a0135945061532060a08b01614f3c565b935061532e60c08b01614f3c565b925061533c60e08b01614f67565b915061534b6101008b01614f67565b90509295985092959850929598565b6000806040838503121561536d57600080fd5b823591506020830135614fdd81615b4e565b6000806040838503121561539257600080fd5b50508035926020909101359150565b6000602082840312156153b357600080fd5b610fb982614f3c565b600080600080600060a086880312156153d457600080fd5b6153dd86614f78565b945060208601519350604086015192506060860151915061540060808701614f78565b90509295509295909350565b600081518084526020808501945080840160005b838110156154455781516001600160a01b031687529582019590820190600101615420565b509495945050505050565b8060005b6002811015612ae0578151845260209384019390910190600101615454565b600081518084526020808501945080840160005b8381101561544557815187529582019590820190600101615487565b600081518084526154bb816020860160208601615a5e565b601f01601f19169290920160200192915050565b6154d98183615450565b604001919050565b600083516154f3818460208801615a5e565b835190830190615507818360208801615a5e565b01949350505050565b8681526155206020820187615450565b61552d6060820186615450565b61553a60a0820185615450565b61554760e0820184615450565b60609190911b6001600160601b0319166101208201526101340195945050505050565b83815261557a6020820184615450565b606081019190915260800192915050565b6001600160a01b0391909116815260200190565b6001600160a01b03929092168252602082015260400190565b6001600160a01b0392831681529116602082015260400190565b6001600160a01b039290921682526001600160601b0316602082015260400190565b604081016125888284615450565b602081526000610fb96020830184615473565b9182526001600160401b0316602082015260400190565b602081526000610fb960208301846154a3565b6020815260ff82511660208201526020820151604082015260018060a01b0360408301511660608201526000606083015160c0608084015261568460e084018261540c565b60808501516001600160601b0390811660a0868101919091529095015190941660c0909301929092525090919050565b9182526001600160a01b0316602082015260400190565b82815260608101610fb96020830184615450565b82815260406020820152600061341a6040830184615473565b86815285602082015261ffff85166040820152600063ffffffff808616606084015280851660808401525060c060a083015261478a60c08301846154a3565b878152602081018790526040810186905263ffffffff8581166060830152841660808201526001600160a01b03831660a082015260e060c08201819052600090613ecc908301846154a3565b8781526001600160401b03871660208201526040810186905263ffffffff8581166060830152841660808201526001600160a01b03831660a082015260e060c08201819052600090613ecc908301846154a3565b6001600160601b038681168252851660208201526001600160401b03841660408201526001600160a01b038316606082015260a0608082018190526000906158219083018461540c565b979650505050505050565b6000808335601e1984360301811261584357600080fd5b8301803591506001600160401b0382111561585d57600080fd5b60200191503681900382131561587257600080fd5b9250929050565b60405160c081016001600160401b038111828210171561589b5761589b615b38565b60405290565b60405161012081016001600160401b038111828210171561589b5761589b615b38565b604051601f8201601f191681016001600160401b03811182821017156158ec576158ec615b38565b604052919050565b6000808585111561590457600080fd5b8386111561591157600080fd5b5050820193919092039150565b6000821982111561593157615931615ae0565b500190565b60006001600160401b0380831681851680830382111561550757615507615ae0565b600060ff821660ff84168060ff0382111561597557615975615ae0565b019392505050565b60006001600160601b0382811684821680830382111561550757615507615ae0565b6000826159ae576159ae615af6565b500490565b60008160001904831182151516156159cd576159cd615ae0565b500290565b6000828210156159e4576159e4615ae0565b500390565b600063ffffffff83811690831681811015615a0657615a06615ae0565b039392505050565b60006001600160601b0383811690831681811015615a0657615a06615ae0565b6001600160e01b03198135818116916004851015615a565780818660040360031b1b83161692505b505092915050565b60005b83811015615a79578181015183820152602001615a61565b83811115612ae05750506000910152565b6000600019821415615a9e57615a9e615ae0565b5060010190565b60006001600160401b0380831681811415615ac257615ac2615ae0565b6001019392505050565b600082615adb57615adb615af6565b500690565b634e487b7160e01b600052601160045260246000fd5b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052603160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052604160045260246000fd5b6001600160a01b0381168114610a5957600080fd5b8015158114610a5957600080fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var VRFCoordinatorV25ABI = VRFCoordinatorV25MetaData.ABI + +var VRFCoordinatorV25Bin = VRFCoordinatorV25MetaData.Bin + +func DeployVRFCoordinatorV25(auth *bind.TransactOpts, backend bind.ContractBackend, blockhashStore common.Address) (common.Address, *types.Transaction, *VRFCoordinatorV25, error) { + parsed, err := VRFCoordinatorV25MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorV25Bin), backend, blockhashStore) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorV25{address: address, abi: *parsed, VRFCoordinatorV25Caller: VRFCoordinatorV25Caller{contract: contract}, VRFCoordinatorV25Transactor: VRFCoordinatorV25Transactor{contract: contract}, VRFCoordinatorV25Filterer: VRFCoordinatorV25Filterer{contract: contract}}, nil +} + +type VRFCoordinatorV25 struct { + address common.Address + abi abi.ABI + VRFCoordinatorV25Caller + VRFCoordinatorV25Transactor + VRFCoordinatorV25Filterer +} + +type VRFCoordinatorV25Caller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV25Transactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV25Filterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV25Session struct { + Contract *VRFCoordinatorV25 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV25CallerSession struct { + Contract *VRFCoordinatorV25Caller + CallOpts bind.CallOpts +} + +type VRFCoordinatorV25TransactorSession struct { + Contract *VRFCoordinatorV25Transactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV25Raw struct { + Contract *VRFCoordinatorV25 +} + +type VRFCoordinatorV25CallerRaw struct { + Contract *VRFCoordinatorV25Caller +} + +type VRFCoordinatorV25TransactorRaw struct { + Contract *VRFCoordinatorV25Transactor +} + +func NewVRFCoordinatorV25(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorV25, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorV25ABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorV25(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25{address: address, abi: abi, VRFCoordinatorV25Caller: VRFCoordinatorV25Caller{contract: contract}, VRFCoordinatorV25Transactor: VRFCoordinatorV25Transactor{contract: contract}, VRFCoordinatorV25Filterer: VRFCoordinatorV25Filterer{contract: contract}}, nil +} + +func NewVRFCoordinatorV25Caller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorV25Caller, error) { + contract, err := bindVRFCoordinatorV25(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25Caller{contract: contract}, nil +} + +func NewVRFCoordinatorV25Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorV25Transactor, error) { + contract, err := bindVRFCoordinatorV25(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25Transactor{contract: contract}, nil +} + +func NewVRFCoordinatorV25Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorV25Filterer, error) { + contract, err := bindVRFCoordinatorV25(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25Filterer{contract: contract}, nil +} + +func bindVRFCoordinatorV25(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorV25MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV25.Contract.VRFCoordinatorV25Caller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.VRFCoordinatorV25Transactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.VRFCoordinatorV25Transactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV25.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "BLOCKHASH_STORE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV25.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV25.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) PLI() (common.Address, error) { + return _VRFCoordinatorV25.Contract.PLI(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) PLI() (common.Address, error) { + return _VRFCoordinatorV25.Contract.PLI(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "PLI_NATIVE_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) PLINATIVEFEED() (common.Address, error) { + return _VRFCoordinatorV25.Contract.PLINATIVEFEED(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) PLINATIVEFEED() (common.Address, error) { + return _VRFCoordinatorV25.Contract.PLINATIVEFEED(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV25.Contract.MAXCONSUMERS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV25.Contract.MAXCONSUMERS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "MAX_NUM_WORDS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV25.Contract.MAXNUMWORDS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV25.Contract.MAXNUMWORDS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "MAX_REQUEST_CONFIRMATIONS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV25.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV25.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "getActiveSubscriptionIds", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV25.Contract.GetActiveSubscriptionIds(&_VRFCoordinatorV25.CallOpts, startIndex, maxCount) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV25.Contract.GetActiveSubscriptionIds(&_VRFCoordinatorV25.CallOpts, startIndex, maxCount) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.NativeBalance = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[4], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV25.Contract.GetSubscription(&_VRFCoordinatorV25.CallOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV25.Contract.GetSubscription(&_VRFCoordinatorV25.CallOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "hashOfKey", publicKey) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.HashOfKey(&_VRFCoordinatorV25.CallOpts, publicKey) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.HashOfKey(&_VRFCoordinatorV25.CallOpts, publicKey) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) Owner() (common.Address, error) { + return _VRFCoordinatorV25.Contract.Owner(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) Owner() (common.Address, error) { + return _VRFCoordinatorV25.Contract.Owner(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "pendingRequestExists", subId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) PendingRequestExists(subId *big.Int) (bool, error) { + return _VRFCoordinatorV25.Contract.PendingRequestExists(&_VRFCoordinatorV25.CallOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) PendingRequestExists(subId *big.Int) (bool, error) { + return _VRFCoordinatorV25.Contract.PendingRequestExists(&_VRFCoordinatorV25.CallOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SConfig(opts *bind.CallOpts) (SConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_config") + + outstruct := new(SConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MinimumRequestConfirmations = *abi.ConvertType(out[0], new(uint16)).(*uint16) + outstruct.MaxGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ReentrancyLock = *abi.ConvertType(out[2], new(bool)).(*bool) + outstruct.StalenessSeconds = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeNativePPM = *abi.ConvertType(out[5], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkDiscountPPM = *abi.ConvertType(out[6], new(uint32)).(*uint32) + outstruct.NativePremiumPercentage = *abi.ConvertType(out[7], new(uint8)).(*uint8) + outstruct.LinkPremiumPercentage = *abi.ConvertType(out[8], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SConfig() (SConfig, + + error) { + return _VRFCoordinatorV25.Contract.SConfig(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SConfig() (SConfig, + + error) { + return _VRFCoordinatorV25.Contract.SConfig(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SCurrentSubNonce(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_currentSubNonce") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SCurrentSubNonce() (uint64, error) { + return _VRFCoordinatorV25.Contract.SCurrentSubNonce(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SCurrentSubNonce() (uint64, error) { + return _VRFCoordinatorV25.Contract.SCurrentSubNonce(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_fallbackWeiPerUnitLink") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.SFallbackWeiPerUnitLink(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SFallbackWeiPerUnitLink() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.SFallbackWeiPerUnitLink(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SProvingKeyHashes(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_provingKeyHashes", arg0) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SProvingKeyHashes(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.SProvingKeyHashes(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SProvingKeyHashes(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.SProvingKeyHashes(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SProvingKeys(opts *bind.CallOpts, arg0 [32]byte) (SProvingKeys, + + error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_provingKeys", arg0) + + outstruct := new(SProvingKeys) + if err != nil { + return *outstruct, err + } + + outstruct.Exists = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.MaxGas = *abi.ConvertType(out[1], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SProvingKeys(arg0 [32]byte) (SProvingKeys, + + error) { + return _VRFCoordinatorV25.Contract.SProvingKeys(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SProvingKeys(arg0 [32]byte) (SProvingKeys, + + error) { + return _VRFCoordinatorV25.Contract.SProvingKeys(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) SRequestCommitments(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_requestCommitments", arg0) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SRequestCommitments(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.SRequestCommitments(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) SRequestCommitments(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV25.Contract.SRequestCommitments(&_VRFCoordinatorV25.CallOpts, arg0) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) STotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_totalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) STotalBalance() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.STotalBalance(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) STotalBalance() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.STotalBalance(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Caller) STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV25.contract.Call(opts, &out, "s_totalNativeBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.STotalNativeBalance(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25CallerSession) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV25.Contract.STotalNativeBalance(&_VRFCoordinatorV25.CallOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AcceptOwnership(&_VRFCoordinatorV25.TransactOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AcceptOwnership(&_VRFCoordinatorV25.TransactOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AddConsumer(&_VRFCoordinatorV25.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.AddConsumer(&_VRFCoordinatorV25.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.CancelSubscription(&_VRFCoordinatorV25.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.CancelSubscription(&_VRFCoordinatorV25.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "createSubscription") +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.CreateSubscription(&_VRFCoordinatorV25.TransactOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.CreateSubscription(&_VRFCoordinatorV25.TransactOpts) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) DeregisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "deregisterMigratableCoordinator", target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) DeregisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.DeregisterMigratableCoordinator(&_VRFCoordinatorV25.TransactOpts, target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) DeregisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.DeregisterMigratableCoordinator(&_VRFCoordinatorV25.TransactOpts, target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "deregisterProvingKey", publicProvingKey) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.DeregisterProvingKey(&_VRFCoordinatorV25.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.DeregisterProvingKey(&_VRFCoordinatorV25.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV25RequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "fulfillRandomWords", proof, rc, onlyPremium) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV25RequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.FulfillRandomWords(&_VRFCoordinatorV25.TransactOpts, proof, rc, onlyPremium) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV25RequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.FulfillRandomWords(&_VRFCoordinatorV25.TransactOpts, proof, rc, onlyPremium) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "fundSubscriptionWithNative", subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.FundSubscriptionWithNative(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.FundSubscriptionWithNative(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) Migrate(opts *bind.TransactOpts, subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "migrate", subId, newCoordinator) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) Migrate(subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.Migrate(&_VRFCoordinatorV25.TransactOpts, subId, newCoordinator) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) Migrate(subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.Migrate(&_VRFCoordinatorV25.TransactOpts, subId, newCoordinator) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.OnTokenTransfer(&_VRFCoordinatorV25.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.OnTokenTransfer(&_VRFCoordinatorV25.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) OwnerCancelSubscription(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "ownerCancelSubscription", subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) OwnerCancelSubscription(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.OwnerCancelSubscription(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) OwnerCancelSubscription(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.OwnerCancelSubscription(&_VRFCoordinatorV25.TransactOpts, subId) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "recoverFunds", to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RecoverFunds(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RecoverFunds(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RecoverNativeFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "recoverNativeFunds", to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RecoverNativeFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RecoverNativeFunds(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RecoverNativeFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RecoverNativeFunds(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "registerMigratableCoordinator", target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RegisterMigratableCoordinator(&_VRFCoordinatorV25.TransactOpts, target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RegisterMigratableCoordinator(&_VRFCoordinatorV25.TransactOpts, target) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RegisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int, maxGas uint64) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "registerProvingKey", publicProvingKey, maxGas) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RegisterProvingKey(publicProvingKey [2]*big.Int, maxGas uint64) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RegisterProvingKey(&_VRFCoordinatorV25.TransactOpts, publicProvingKey, maxGas) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RegisterProvingKey(publicProvingKey [2]*big.Int, maxGas uint64) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RegisterProvingKey(&_VRFCoordinatorV25.TransactOpts, publicProvingKey, maxGas) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RemoveConsumer(&_VRFCoordinatorV25.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RemoveConsumer(&_VRFCoordinatorV25.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "requestRandomWords", req) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RequestRandomWords(&_VRFCoordinatorV25.TransactOpts, req) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RequestRandomWords(&_VRFCoordinatorV25.TransactOpts, req) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV25.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV25.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "setConfig", minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.SetConfig(&_VRFCoordinatorV25.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.SetConfig(&_VRFCoordinatorV25.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) SetPLIAndPLINativeFeed(opts *bind.TransactOpts, link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "setPLIAndPLINativeFeed", link, linkNativeFeed) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) SetPLIAndPLINativeFeed(link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.SetPLIAndPLINativeFeed(&_VRFCoordinatorV25.TransactOpts, link, linkNativeFeed) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) SetPLIAndPLINativeFeed(link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.SetPLIAndPLINativeFeed(&_VRFCoordinatorV25.TransactOpts, link, linkNativeFeed) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.TransferOwnership(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.TransferOwnership(&_VRFCoordinatorV25.TransactOpts, to) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "withdraw", recipient) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) Withdraw(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.Withdraw(&_VRFCoordinatorV25.TransactOpts, recipient) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) Withdraw(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.Withdraw(&_VRFCoordinatorV25.TransactOpts, recipient) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Transactor) WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.contract.Transact(opts, "withdrawNative", recipient) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Session) WithdrawNative(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.WithdrawNative(&_VRFCoordinatorV25.TransactOpts, recipient) +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25TransactorSession) WithdrawNative(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV25.Contract.WithdrawNative(&_VRFCoordinatorV25.TransactOpts, recipient) +} + +type VRFCoordinatorV25ConfigSetIterator struct { + Event *VRFCoordinatorV25ConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25ConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25ConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25ConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25ConfigSet struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FallbackWeiPerUnitLink *big.Int + FulfillmentFlatFeeNativePPM uint32 + FulfillmentFlatFeeLinkDiscountPPM uint32 + NativePremiumPercentage uint8 + LinkPremiumPercentage uint8 + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV25ConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25ConfigSetIterator{contract: _VRFCoordinatorV25.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25ConfigSet) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseConfigSet(log types.Log) (*VRFCoordinatorV25ConfigSet, error) { + event := new(VRFCoordinatorV25ConfigSet) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25CoordinatorDeregisteredIterator struct { + Event *VRFCoordinatorV25CoordinatorDeregistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25CoordinatorDeregisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25CoordinatorDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25CoordinatorDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25CoordinatorDeregisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25CoordinatorDeregisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25CoordinatorDeregistered struct { + CoordinatorAddress common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterCoordinatorDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorV25CoordinatorDeregisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "CoordinatorDeregistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25CoordinatorDeregisteredIterator{contract: _VRFCoordinatorV25.contract, event: "CoordinatorDeregistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchCoordinatorDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25CoordinatorDeregistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "CoordinatorDeregistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25CoordinatorDeregistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "CoordinatorDeregistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseCoordinatorDeregistered(log types.Log) (*VRFCoordinatorV25CoordinatorDeregistered, error) { + event := new(VRFCoordinatorV25CoordinatorDeregistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "CoordinatorDeregistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25CoordinatorRegisteredIterator struct { + Event *VRFCoordinatorV25CoordinatorRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25CoordinatorRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25CoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25CoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25CoordinatorRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25CoordinatorRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25CoordinatorRegistered struct { + CoordinatorAddress common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV25CoordinatorRegisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25CoordinatorRegisteredIterator{contract: _VRFCoordinatorV25.contract, event: "CoordinatorRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25CoordinatorRegistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25CoordinatorRegistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorV25CoordinatorRegistered, error) { + event := new(VRFCoordinatorV25CoordinatorRegistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25FundsRecoveredIterator struct { + Event *VRFCoordinatorV25FundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25FundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25FundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25FundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25FundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25FundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV25FundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25FundsRecoveredIterator{contract: _VRFCoordinatorV25.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25FundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25FundsRecovered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseFundsRecovered(log types.Log) (*VRFCoordinatorV25FundsRecovered, error) { + event := new(VRFCoordinatorV25FundsRecovered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25MigrationCompletedIterator struct { + Event *VRFCoordinatorV25MigrationCompleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25MigrationCompletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25MigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25MigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25MigrationCompletedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25MigrationCompletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25MigrationCompleted struct { + NewCoordinator common.Address + SubId *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterMigrationCompleted(opts *bind.FilterOpts) (*VRFCoordinatorV25MigrationCompletedIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "MigrationCompleted") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25MigrationCompletedIterator{contract: _VRFCoordinatorV25.contract, event: "MigrationCompleted", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25MigrationCompleted) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "MigrationCompleted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25MigrationCompleted) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseMigrationCompleted(log types.Log) (*VRFCoordinatorV25MigrationCompleted, error) { + event := new(VRFCoordinatorV25MigrationCompleted) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25NativeFundsRecoveredIterator struct { + Event *VRFCoordinatorV25NativeFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25NativeFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25NativeFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25NativeFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25NativeFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25NativeFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25NativeFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterNativeFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV25NativeFundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "NativeFundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25NativeFundsRecoveredIterator{contract: _VRFCoordinatorV25.contract, event: "NativeFundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchNativeFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25NativeFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "NativeFundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25NativeFundsRecovered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "NativeFundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseNativeFundsRecovered(log types.Log) (*VRFCoordinatorV25NativeFundsRecovered, error) { + event := new(VRFCoordinatorV25NativeFundsRecovered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "NativeFundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25OwnershipTransferRequestedIterator struct { + Event *VRFCoordinatorV25OwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25OwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25OwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25OwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25OwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV25OwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25OwnershipTransferRequestedIterator{contract: _VRFCoordinatorV25.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25OwnershipTransferRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV25OwnershipTransferRequested, error) { + event := new(VRFCoordinatorV25OwnershipTransferRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25OwnershipTransferredIterator struct { + Event *VRFCoordinatorV25OwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25OwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25OwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25OwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25OwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV25OwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25OwnershipTransferredIterator{contract: _VRFCoordinatorV25.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25OwnershipTransferred) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV25OwnershipTransferred, error) { + event := new(VRFCoordinatorV25OwnershipTransferred) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25ProvingKeyDeregisteredIterator struct { + Event *VRFCoordinatorV25ProvingKeyDeregistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25ProvingKeyDeregisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ProvingKeyDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25ProvingKeyDeregisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25ProvingKeyDeregisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25ProvingKeyDeregistered struct { + KeyHash [32]byte + MaxGas uint64 + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterProvingKeyDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorV25ProvingKeyDeregisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "ProvingKeyDeregistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25ProvingKeyDeregisteredIterator{contract: _VRFCoordinatorV25.contract, event: "ProvingKeyDeregistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ProvingKeyDeregistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "ProvingKeyDeregistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25ProvingKeyDeregistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorV25ProvingKeyDeregistered, error) { + event := new(VRFCoordinatorV25ProvingKeyDeregistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ProvingKeyDeregistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25ProvingKeyRegisteredIterator struct { + Event *VRFCoordinatorV25ProvingKeyRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25ProvingKeyRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25ProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25ProvingKeyRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25ProvingKeyRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25ProvingKeyRegistered struct { + KeyHash [32]byte + MaxGas uint64 + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterProvingKeyRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV25ProvingKeyRegisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "ProvingKeyRegistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV25ProvingKeyRegisteredIterator{contract: _VRFCoordinatorV25.contract, event: "ProvingKeyRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ProvingKeyRegistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "ProvingKeyRegistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25ProvingKeyRegistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV25ProvingKeyRegistered, error) { + event := new(VRFCoordinatorV25ProvingKeyRegistered) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25RandomWordsFulfilledIterator struct { + Event *VRFCoordinatorV25RandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25RandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25RandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25RandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25RandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25RandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + SubId *big.Int + Payment *big.Int + Success bool + OnlyPremium bool + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subId []*big.Int) (*VRFCoordinatorV25RandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule, subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25RandomWordsFulfilledIterator{contract: _VRFCoordinatorV25.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25RandomWordsFulfilled, requestId []*big.Int, subId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule, subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25RandomWordsFulfilled) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV25RandomWordsFulfilled, error) { + event := new(VRFCoordinatorV25RandomWordsFulfilled) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25RandomWordsRequestedIterator struct { + Event *VRFCoordinatorV25RandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25RandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25RandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25RandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25RandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25RandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId *big.Int + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte + Sender common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*VRFCoordinatorV25RandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25RandomWordsRequestedIterator{contract: _VRFCoordinatorV25.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25RandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25RandomWordsRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV25RandomWordsRequested, error) { + event := new(VRFCoordinatorV25RandomWordsRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionCanceledIterator struct { + Event *VRFCoordinatorV25SubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionCanceled struct { + SubId *big.Int + To common.Address + AmountLink *big.Int + AmountNative *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionCanceledIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionCanceledIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionCanceled, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionCanceled) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV25SubscriptionCanceled, error) { + event := new(VRFCoordinatorV25SubscriptionCanceled) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionConsumerAddedIterator struct { + Event *VRFCoordinatorV25SubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionConsumerAdded struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionConsumerAddedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionConsumerAddedIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionConsumerAdded) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV25SubscriptionConsumerAdded, error) { + event := new(VRFCoordinatorV25SubscriptionConsumerAdded) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionConsumerRemovedIterator struct { + Event *VRFCoordinatorV25SubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionConsumerRemoved struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionConsumerRemovedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionConsumerRemovedIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionConsumerRemoved) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV25SubscriptionConsumerRemoved, error) { + event := new(VRFCoordinatorV25SubscriptionConsumerRemoved) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionCreatedIterator struct { + Event *VRFCoordinatorV25SubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionCreated struct { + SubId *big.Int + Owner common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionCreatedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionCreatedIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionCreated, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionCreated) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV25SubscriptionCreated, error) { + event := new(VRFCoordinatorV25SubscriptionCreated) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionFundedIterator struct { + Event *VRFCoordinatorV25SubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionFunded struct { + SubId *big.Int + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionFundedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionFundedIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionFunded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionFunded) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV25SubscriptionFunded, error) { + event := new(VRFCoordinatorV25SubscriptionFunded) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionFundedWithNativeIterator struct { + Event *VRFCoordinatorV25SubscriptionFundedWithNative + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionFundedWithNativeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionFundedWithNative) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionFundedWithNative) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionFundedWithNativeIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionFundedWithNativeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionFundedWithNative struct { + SubId *big.Int + OldNativeBalance *big.Int + NewNativeBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionFundedWithNative(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionFundedWithNativeIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionFundedWithNative", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionFundedWithNativeIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionFundedWithNative", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionFundedWithNative(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionFundedWithNative, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionFundedWithNative", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionFundedWithNative) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionFundedWithNative", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionFundedWithNative(log types.Log) (*VRFCoordinatorV25SubscriptionFundedWithNative, error) { + event := new(VRFCoordinatorV25SubscriptionFundedWithNative) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionFundedWithNative", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator struct { + Event *VRFCoordinatorV25SubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionOwnerTransferRequested struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV25SubscriptionOwnerTransferRequested, error) { + event := new(VRFCoordinatorV25SubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV25SubscriptionOwnerTransferredIterator struct { + Event *VRFCoordinatorV25SubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV25SubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV25SubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV25SubscriptionOwnerTransferred struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionOwnerTransferredIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV25SubscriptionOwnerTransferredIterator{contract: _VRFCoordinatorV25.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV25.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV25SubscriptionOwnerTransferred) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25Filterer) ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV25SubscriptionOwnerTransferred, error) { + event := new(VRFCoordinatorV25SubscriptionOwnerTransferred) + if err := _VRFCoordinatorV25.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSubscription struct { + Balance *big.Int + NativeBalance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} +type SConfig struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + ReentrancyLock bool + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FulfillmentFlatFeeNativePPM uint32 + FulfillmentFlatFeeLinkDiscountPPM uint32 + NativePremiumPercentage uint8 + LinkPremiumPercentage uint8 +} +type SProvingKeys struct { + Exists bool + MaxGas uint64 +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinatorV25.abi.Events["ConfigSet"].ID: + return _VRFCoordinatorV25.ParseConfigSet(log) + case _VRFCoordinatorV25.abi.Events["CoordinatorDeregistered"].ID: + return _VRFCoordinatorV25.ParseCoordinatorDeregistered(log) + case _VRFCoordinatorV25.abi.Events["CoordinatorRegistered"].ID: + return _VRFCoordinatorV25.ParseCoordinatorRegistered(log) + case _VRFCoordinatorV25.abi.Events["FundsRecovered"].ID: + return _VRFCoordinatorV25.ParseFundsRecovered(log) + case _VRFCoordinatorV25.abi.Events["MigrationCompleted"].ID: + return _VRFCoordinatorV25.ParseMigrationCompleted(log) + case _VRFCoordinatorV25.abi.Events["NativeFundsRecovered"].ID: + return _VRFCoordinatorV25.ParseNativeFundsRecovered(log) + case _VRFCoordinatorV25.abi.Events["OwnershipTransferRequested"].ID: + return _VRFCoordinatorV25.ParseOwnershipTransferRequested(log) + case _VRFCoordinatorV25.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinatorV25.ParseOwnershipTransferred(log) + case _VRFCoordinatorV25.abi.Events["ProvingKeyDeregistered"].ID: + return _VRFCoordinatorV25.ParseProvingKeyDeregistered(log) + case _VRFCoordinatorV25.abi.Events["ProvingKeyRegistered"].ID: + return _VRFCoordinatorV25.ParseProvingKeyRegistered(log) + case _VRFCoordinatorV25.abi.Events["RandomWordsFulfilled"].ID: + return _VRFCoordinatorV25.ParseRandomWordsFulfilled(log) + case _VRFCoordinatorV25.abi.Events["RandomWordsRequested"].ID: + return _VRFCoordinatorV25.ParseRandomWordsRequested(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionCanceled"].ID: + return _VRFCoordinatorV25.ParseSubscriptionCanceled(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionConsumerAdded"].ID: + return _VRFCoordinatorV25.ParseSubscriptionConsumerAdded(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionConsumerRemoved"].ID: + return _VRFCoordinatorV25.ParseSubscriptionConsumerRemoved(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionCreated"].ID: + return _VRFCoordinatorV25.ParseSubscriptionCreated(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionFunded"].ID: + return _VRFCoordinatorV25.ParseSubscriptionFunded(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionFundedWithNative"].ID: + return _VRFCoordinatorV25.ParseSubscriptionFundedWithNative(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _VRFCoordinatorV25.ParseSubscriptionOwnerTransferRequested(log) + case _VRFCoordinatorV25.abi.Events["SubscriptionOwnerTransferred"].ID: + return _VRFCoordinatorV25.ParseSubscriptionOwnerTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorV25ConfigSet) Topic() common.Hash { + return common.HexToHash("0x2c6b6b12413678366b05b145c5f00745bdd00e739131ab5de82484a50c9d78b6") +} + +func (VRFCoordinatorV25CoordinatorDeregistered) Topic() common.Hash { + return common.HexToHash("0xf80a1a97fd42251f3c33cda98635e7399253033a6774fe37cd3f650b5282af37") +} + +func (VRFCoordinatorV25CoordinatorRegistered) Topic() common.Hash { + return common.HexToHash("0xb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af01625") +} + +func (VRFCoordinatorV25FundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (VRFCoordinatorV25MigrationCompleted) Topic() common.Hash { + return common.HexToHash("0xd63ca8cb945956747ee69bfdc3ea754c24a4caf7418db70e46052f7850be4187") +} + +func (VRFCoordinatorV25NativeFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x4aed7c8eed0496c8c19ea2681fcca25741c1602342e38b045d9f1e8e905d2e9c") +} + +func (VRFCoordinatorV25OwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFCoordinatorV25OwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorV25ProvingKeyDeregistered) Topic() common.Hash { + return common.HexToHash("0x9b6868e0eb737bcd72205360baa6bfd0ba4e4819a33ade2db384e8a8025639a5") +} + +func (VRFCoordinatorV25ProvingKeyRegistered) Topic() common.Hash { + return common.HexToHash("0x9b911b2c240bfbef3b6a8f7ed6ee321d1258bb2a3fe6becab52ac1cd3210afd3") +} + +func (VRFCoordinatorV25RandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c6b5394380e16e41988d8383648010de6f5c2e4814803be5de1c6b1c852db55") +} + +func (VRFCoordinatorV25RandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0xeb0e3652e0f44f417695e6e90f2f42c99b65cd7169074c5a654b16b9748c3a4e") +} + +func (VRFCoordinatorV25SubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0x8c74ce8b8cf87f5eb001275c8be27eb34ea2b62bfab6814fcc62192bb63e81c4") +} + +func (VRFCoordinatorV25SubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e1") +} + +func (VRFCoordinatorV25SubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a7") +} + +func (VRFCoordinatorV25SubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d") +} + +func (VRFCoordinatorV25SubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0x1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a") +} + +func (VRFCoordinatorV25SubscriptionFundedWithNative) Topic() common.Hash { + return common.HexToHash("0x7603b205d03651ee812f803fccde89f1012e545a9c99f0abfea9cedd0fd8e902") +} + +func (VRFCoordinatorV25SubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a1") +} + +func (VRFCoordinatorV25SubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0xd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c9386") +} + +func (_VRFCoordinatorV25 *VRFCoordinatorV25) Address() common.Address { + return _VRFCoordinatorV25.address +} + +type VRFCoordinatorV25Interface interface { + BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) + + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) + + MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) + + GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) + + HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) + + SConfig(opts *bind.CallOpts) (SConfig, + + error) + + SCurrentSubNonce(opts *bind.CallOpts) (uint64, error) + + SFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) + + SProvingKeyHashes(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) + + SProvingKeys(opts *bind.CallOpts, arg0 [32]byte) (SProvingKeys, + + error) + + SRequestCommitments(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) + + STotalBalance(opts *bind.CallOpts) (*big.Int, error) + + STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + DeregisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) + + DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV25RequestCommitment, onlyPremium bool) (*types.Transaction, error) + + FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + Migrate(opts *bind.TransactOpts, subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RecoverNativeFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int, maxGas uint64) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) + + SetPLIAndPLINativeFeed(opts *bind.TransactOpts, link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + + WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV25ConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VRFCoordinatorV25ConfigSet, error) + + FilterCoordinatorDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorV25CoordinatorDeregisteredIterator, error) + + WatchCoordinatorDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25CoordinatorDeregistered) (event.Subscription, error) + + ParseCoordinatorDeregistered(log types.Log) (*VRFCoordinatorV25CoordinatorDeregistered, error) + + FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV25CoordinatorRegisteredIterator, error) + + WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25CoordinatorRegistered) (event.Subscription, error) + + ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorV25CoordinatorRegistered, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV25FundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25FundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*VRFCoordinatorV25FundsRecovered, error) + + FilterMigrationCompleted(opts *bind.FilterOpts) (*VRFCoordinatorV25MigrationCompletedIterator, error) + + WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25MigrationCompleted) (event.Subscription, error) + + ParseMigrationCompleted(log types.Log) (*VRFCoordinatorV25MigrationCompleted, error) + + FilterNativeFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV25NativeFundsRecoveredIterator, error) + + WatchNativeFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25NativeFundsRecovered) (event.Subscription, error) + + ParseNativeFundsRecovered(log types.Log) (*VRFCoordinatorV25NativeFundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV25OwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV25OwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV25OwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV25OwnershipTransferred, error) + + FilterProvingKeyDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorV25ProvingKeyDeregisteredIterator, error) + + WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ProvingKeyDeregistered) (event.Subscription, error) + + ParseProvingKeyDeregistered(log types.Log) (*VRFCoordinatorV25ProvingKeyDeregistered, error) + + FilterProvingKeyRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV25ProvingKeyRegisteredIterator, error) + + WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25ProvingKeyRegistered) (event.Subscription, error) + + ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV25ProvingKeyRegistered, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subId []*big.Int) (*VRFCoordinatorV25RandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25RandomWordsFulfilled, requestId []*big.Int, subId []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV25RandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*VRFCoordinatorV25RandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25RandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV25RandomWordsRequested, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionCanceled, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV25SubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV25SubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV25SubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionCreated, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV25SubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionFunded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV25SubscriptionFunded, error) + + FilterSubscriptionFundedWithNative(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionFundedWithNativeIterator, error) + + WatchSubscriptionFundedWithNative(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionFundedWithNative, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionFundedWithNative(log types.Log) (*VRFCoordinatorV25SubscriptionFundedWithNative, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV25SubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV25SubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV25SubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV25SubscriptionOwnerTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_v2_plus_v2_example/vrf_coordinator_v2_plus_v2_example.go b/core/gethwrappers/generated/vrf_coordinator_v2_plus_v2_example/vrf_coordinator_v2_plus_v2_example.go new file mode 100644 index 00000000..3d9efea9 --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_v2_plus_v2_example/vrf_coordinator_v2_plus_v2_example.go @@ -0,0 +1,484 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_v2_plus_v2_example + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFV2PlusClientRandomWordsRequest struct { + KeyHash [32]byte + SubId *big.Int + RequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte +} + +var VRFCoordinatorV2PlusV2ExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"prevCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"transferredValue\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"expectedValue\",\"type\":\"uint96\"}],\"name\":\"InvalidNativeBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"requestVersion\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"expectedVersion\",\"type\":\"uint8\"}],\"name\":\"InvalidVersion\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"previousCoordinator\",\"type\":\"address\"}],\"name\":\"MustBePreviousCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SubscriptionIDCollisionFound\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"}],\"name\":\"generateFakeRandomness\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"linkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"nativeBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedData\",\"type\":\"bytes\"}],\"name\":\"onMigration\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFV2PlusClient.RandomWordsRequest\",\"name\":\"req\",\"type\":\"tuple\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_link\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_prevCoordinator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requestConsumerMapping\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_subscriptions\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"linkBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"nativeBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalLinkBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalNativeBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600060035534801561001557600080fd5b506040516111d73803806111d783398101604081905261003491610081565b600580546001600160a01b039384166001600160a01b031991821617909155600480549290931691161790556100b4565b80516001600160a01b038116811461007c57600080fd5b919050565b6000806040838503121561009457600080fd5b61009d83610065565b91506100ab60208401610065565b90509250929050565b611114806100c36000396000f3fe6080604052600436106100c75760003560e01c8063ce3f471911610074578063dc311dd31161004e578063dc311dd314610361578063e89e106a14610392578063ed8b558f146103a857600080fd5b8063ce3f4719146102ff578063d6100d1c14610314578063da4f5e6d1461033457600080fd5b806386175f58116100a557806386175f581461026157806393f3acb6146102a45780639b1c385e146102d157600080fd5b80630495f265146100cc578063086597b3146101bd57806318e3dd271461020f575b600080fd5b3480156100d857600080fd5b506101636100e7366004610ec8565b600060208190529081526040902080546001909101546bffffffffffffffffffffffff808316926c01000000000000000000000000810490911691780100000000000000000000000000000000000000000000000090910467ffffffffffffffff169073ffffffffffffffffffffffffffffffffffffffff1684565b604080516bffffffffffffffffffffffff958616815294909316602085015267ffffffffffffffff9091169183019190915273ffffffffffffffffffffffffffffffffffffffff1660608201526080015b60405180910390f35b3480156101c957600080fd5b506004546101ea9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101b4565b34801561021b57600080fd5b50600254610244906c0100000000000000000000000090046bffffffffffffffffffffffff1681565b6040516bffffffffffffffffffffffff90911681526020016101b4565b34801561026d57600080fd5b506101ea61027c366004610ec8565b60016020526000908152604090205473ffffffffffffffffffffffffffffffffffffffff1681565b3480156102b057600080fd5b506102c46102bf366004610ec8565b6103cd565b6040516101b49190610f1c565b3480156102dd57600080fd5b506102f16102ec366004610dca565b610478565b6040519081526020016101b4565b61031261030d366004610d58565b6105aa565b005b34801561032057600080fd5b5061031261032f366004610ec8565b61095e565b34801561034057600080fd5b506005546101ea9073ffffffffffffffffffffffffffffffffffffffff1681565b34801561036d57600080fd5b5061038161037c366004610ec8565b6109e6565b6040516101b4959493929190610f50565b34801561039e57600080fd5b506102f160035481565b3480156103b457600080fd5b50600254610244906bffffffffffffffffffffffff1681565b60408051600180825281830190925260609160009190602080830190803683370190505090508260405160200161043b918152604060208201819052600a908201527f6e6f742072616e646f6d00000000000000000000000000000000000000000000606082015260800190565b6040516020818303038152906040528051906020012060001c81600081518110610467576104676110a9565b602090810291909101015292915050565b60208181013560009081528082526040808220815160a08101835281546bffffffffffffffffffffffff80821683526c01000000000000000000000000820416828701527801000000000000000000000000000000000000000000000000900467ffffffffffffffff1681840152600182015473ffffffffffffffffffffffffffffffffffffffff166060820152600282018054845181880281018801909552808552949586959294608086019390929183018282801561056f57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610544575b50505050508152505090508060400151600161058b919061102b565b67ffffffffffffffff1660408201526105a333610b42565b9392505050565b60045473ffffffffffffffffffffffffffffffffffffffff16331461062357600480546040517ff5828f73000000000000000000000000000000000000000000000000000000008152339281019290925273ffffffffffffffffffffffffffffffffffffffff1660248201526044015b60405180910390fd5b600061063182840184610e05565b9050806000015160ff166001146106835780516040517f8df4607c00000000000000000000000000000000000000000000000000000000815260ff90911660048201526001602482015260440161061a565b8060a001516bffffffffffffffffffffffff1634146106ea5760a08101516040517f6acf13500000000000000000000000000000000000000000000000000000000081523460048201526bffffffffffffffffffffffff909116602482015260440161061a565b602080820151600090815290819052604090206001015473ffffffffffffffffffffffffffffffffffffffff161561074e576040517f4d5f486a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805160a080820183526080808501516bffffffffffffffffffffffff9081168452918501518216602080850191825260008587018181528888015173ffffffffffffffffffffffffffffffffffffffff9081166060808a019182528b0151968901968752848b0151845283855298909220875181549551925167ffffffffffffffff1678010000000000000000000000000000000000000000000000000277ffffffffffffffffffffffffffffffffffffffffffffffff9389166c01000000000000000000000000027fffffffffffffffff000000000000000000000000000000000000000000000000909716919098161794909417169490941782559451600182018054919094167fffffffffffffffffffffffff000000000000000000000000000000000000000090911617909255518051929391926108989260028501920190610bae565b50505060a081015160028054600c906108d09084906c0100000000000000000000000090046bffffffffffffffffffffffff16611057565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508060800151600260008282829054906101000a90046bffffffffffffffffffffffff1661092b9190611057565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550505050565b60008181526001602052604090205473ffffffffffffffffffffffffffffffffffffffff1680631fe543e383610993816103cd565b6040518363ffffffff1660e01b81526004016109b0929190610f2f565b600060405180830381600087803b1580156109ca57600080fd5b505af11580156109de573d6000803e3d6000fd5b505050505050565b60008181526020819052604081206001015481908190819060609073ffffffffffffffffffffffffffffffffffffffff16610a4d576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000868152602081815260409182902080546001820154600290920180548551818602810186019096528086526bffffffffffffffffffffffff808416966c01000000000000000000000000850490911695780100000000000000000000000000000000000000000000000090940467ffffffffffffffff169473ffffffffffffffffffffffffffffffffffffffff169390918391830182828015610b2857602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610afd575b505050505090509450945094509450945091939590929450565b60006003546001610b539190611013565b6003819055600081815260016020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff94909416939093179092555090565b828054828255906000526020600020908101928215610c28579160200282015b82811115610c2857825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610bce565b50610c34929150610c38565b5090565b5b80821115610c345760008155600101610c39565b803573ffffffffffffffffffffffffffffffffffffffff81168114610c7157600080fd5b919050565b600082601f830112610c8757600080fd5b8135602067ffffffffffffffff80831115610ca457610ca46110d8565b8260051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108482111715610ce757610ce76110d8565b60405284815283810192508684018288018501891015610d0657600080fd5b600092505b85831015610d3057610d1c81610c4d565b845292840192600192909201918401610d0b565b50979650505050505050565b80356bffffffffffffffffffffffff81168114610c7157600080fd5b60008060208385031215610d6b57600080fd5b823567ffffffffffffffff80821115610d8357600080fd5b818501915085601f830112610d9757600080fd5b813581811115610da657600080fd5b866020828501011115610db857600080fd5b60209290920196919550909350505050565b600060208284031215610ddc57600080fd5b813567ffffffffffffffff811115610df357600080fd5b820160c081850312156105a357600080fd5b600060208284031215610e1757600080fd5b813567ffffffffffffffff80821115610e2f57600080fd5b9083019060c08286031215610e4357600080fd5b610e4b610fea565b823560ff81168114610e5c57600080fd5b815260208381013590820152610e7460408401610c4d565b6040820152606083013582811115610e8b57600080fd5b610e9787828601610c76565b606083015250610ea960808401610d3c565b6080820152610eba60a08401610d3c565b60a082015295945050505050565b600060208284031215610eda57600080fd5b5035919050565b600081518084526020808501945080840160005b83811015610f1157815187529582019590820190600101610ef5565b509495945050505050565b6020815260006105a36020830184610ee1565b828152604060208201526000610f486040830184610ee1565b949350505050565b600060a082016bffffffffffffffffffffffff808916845260208189168186015267ffffffffffffffff8816604086015273ffffffffffffffffffffffffffffffffffffffff9150818716606086015260a0608086015282865180855260c087019150828801945060005b81811015610fd9578551851683529483019491830191600101610fbb565b50909b9a5050505050505050505050565b60405160c0810167ffffffffffffffff8111828210171561100d5761100d6110d8565b60405290565b600082198211156110265761102661107a565b500190565b600067ffffffffffffffff80831681851680830382111561104e5761104e61107a565b01949350505050565b60006bffffffffffffffffffffffff80831681851680830382111561104e5761104e5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFCoordinatorV2PlusV2ExampleABI = VRFCoordinatorV2PlusV2ExampleMetaData.ABI + +var VRFCoordinatorV2PlusV2ExampleBin = VRFCoordinatorV2PlusV2ExampleMetaData.Bin + +func DeployVRFCoordinatorV2PlusV2Example(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, prevCoordinator common.Address) (common.Address, *types.Transaction, *VRFCoordinatorV2PlusV2Example, error) { + parsed, err := VRFCoordinatorV2PlusV2ExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorV2PlusV2ExampleBin), backend, link, prevCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorV2PlusV2Example{address: address, abi: *parsed, VRFCoordinatorV2PlusV2ExampleCaller: VRFCoordinatorV2PlusV2ExampleCaller{contract: contract}, VRFCoordinatorV2PlusV2ExampleTransactor: VRFCoordinatorV2PlusV2ExampleTransactor{contract: contract}, VRFCoordinatorV2PlusV2ExampleFilterer: VRFCoordinatorV2PlusV2ExampleFilterer{contract: contract}}, nil +} + +type VRFCoordinatorV2PlusV2Example struct { + address common.Address + abi abi.ABI + VRFCoordinatorV2PlusV2ExampleCaller + VRFCoordinatorV2PlusV2ExampleTransactor + VRFCoordinatorV2PlusV2ExampleFilterer +} + +type VRFCoordinatorV2PlusV2ExampleCaller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusV2ExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusV2ExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusV2ExampleSession struct { + Contract *VRFCoordinatorV2PlusV2Example + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2PlusV2ExampleCallerSession struct { + Contract *VRFCoordinatorV2PlusV2ExampleCaller + CallOpts bind.CallOpts +} + +type VRFCoordinatorV2PlusV2ExampleTransactorSession struct { + Contract *VRFCoordinatorV2PlusV2ExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2PlusV2ExampleRaw struct { + Contract *VRFCoordinatorV2PlusV2Example +} + +type VRFCoordinatorV2PlusV2ExampleCallerRaw struct { + Contract *VRFCoordinatorV2PlusV2ExampleCaller +} + +type VRFCoordinatorV2PlusV2ExampleTransactorRaw struct { + Contract *VRFCoordinatorV2PlusV2ExampleTransactor +} + +func NewVRFCoordinatorV2PlusV2Example(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorV2PlusV2Example, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorV2PlusV2ExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorV2PlusV2Example(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusV2Example{address: address, abi: abi, VRFCoordinatorV2PlusV2ExampleCaller: VRFCoordinatorV2PlusV2ExampleCaller{contract: contract}, VRFCoordinatorV2PlusV2ExampleTransactor: VRFCoordinatorV2PlusV2ExampleTransactor{contract: contract}, VRFCoordinatorV2PlusV2ExampleFilterer: VRFCoordinatorV2PlusV2ExampleFilterer{contract: contract}}, nil +} + +func NewVRFCoordinatorV2PlusV2ExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorV2PlusV2ExampleCaller, error) { + contract, err := bindVRFCoordinatorV2PlusV2Example(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusV2ExampleCaller{contract: contract}, nil +} + +func NewVRFCoordinatorV2PlusV2ExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorV2PlusV2ExampleTransactor, error) { + contract, err := bindVRFCoordinatorV2PlusV2Example(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusV2ExampleTransactor{contract: contract}, nil +} + +func NewVRFCoordinatorV2PlusV2ExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorV2PlusV2ExampleFilterer, error) { + contract, err := bindVRFCoordinatorV2PlusV2Example(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusV2ExampleFilterer{contract: contract}, nil +} + +func bindVRFCoordinatorV2PlusV2Example(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorV2PlusV2ExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2PlusV2Example.Contract.VRFCoordinatorV2PlusV2ExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.VRFCoordinatorV2PlusV2ExampleTransactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.VRFCoordinatorV2PlusV2ExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2PlusV2Example.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) GenerateFakeRandomness(opts *bind.CallOpts, requestID *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "generateFakeRandomness", requestID) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) GenerateFakeRandomness(requestID *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.GenerateFakeRandomness(&_VRFCoordinatorV2PlusV2Example.CallOpts, requestID) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) GenerateFakeRandomness(requestID *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.GenerateFakeRandomness(&_VRFCoordinatorV2PlusV2Example.CallOpts, requestID) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.LinkBalance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.NativeBalance = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[4], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV2PlusV2Example.Contract.GetSubscription(&_VRFCoordinatorV2PlusV2Example.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV2PlusV2Example.Contract.GetSubscription(&_VRFCoordinatorV2PlusV2Example.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) SLink(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_link") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) SLink() (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SLink(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) SLink() (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SLink(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) SPrevCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_prevCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) SPrevCoordinator() (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SPrevCoordinator(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) SPrevCoordinator() (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SPrevCoordinator(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) SRequestConsumerMapping(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_requestConsumerMapping", arg0) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) SRequestConsumerMapping(arg0 *big.Int) (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SRequestConsumerMapping(&_VRFCoordinatorV2PlusV2Example.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) SRequestConsumerMapping(arg0 *big.Int) (common.Address, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SRequestConsumerMapping(&_VRFCoordinatorV2PlusV2Example.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) SRequestId() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SRequestId(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SRequestId(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) SSubscriptions(opts *bind.CallOpts, arg0 *big.Int) (SSubscriptions, + + error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_subscriptions", arg0) + + outstruct := new(SSubscriptions) + if err != nil { + return *outstruct, err + } + + outstruct.LinkBalance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.NativeBalance = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) SSubscriptions(arg0 *big.Int) (SSubscriptions, + + error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SSubscriptions(&_VRFCoordinatorV2PlusV2Example.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) SSubscriptions(arg0 *big.Int) (SSubscriptions, + + error) { + return _VRFCoordinatorV2PlusV2Example.Contract.SSubscriptions(&_VRFCoordinatorV2PlusV2Example.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) STotalLinkBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_totalLinkBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) STotalLinkBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.STotalLinkBalance(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) STotalLinkBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.STotalLinkBalance(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCaller) STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusV2Example.contract.Call(opts, &out, "s_totalNativeBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.STotalNativeBalance(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleCallerSession) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.STotalNativeBalance(&_VRFCoordinatorV2PlusV2Example.CallOpts) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactor) FulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.contract.Transact(opts, "fulfillRandomWords", requestId) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) FulfillRandomWords(requestId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.FulfillRandomWords(&_VRFCoordinatorV2PlusV2Example.TransactOpts, requestId) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactorSession) FulfillRandomWords(requestId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.FulfillRandomWords(&_VRFCoordinatorV2PlusV2Example.TransactOpts, requestId) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactor) OnMigration(opts *bind.TransactOpts, encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.contract.Transact(opts, "onMigration", encodedData) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) OnMigration(encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.OnMigration(&_VRFCoordinatorV2PlusV2Example.TransactOpts, encodedData) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactorSession) OnMigration(encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.OnMigration(&_VRFCoordinatorV2PlusV2Example.TransactOpts, encodedData) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactor) RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.contract.Transact(opts, "requestRandomWords", req) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.RequestRandomWords(&_VRFCoordinatorV2PlusV2Example.TransactOpts, req) +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2ExampleTransactorSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusV2Example.Contract.RequestRandomWords(&_VRFCoordinatorV2PlusV2Example.TransactOpts, req) +} + +type GetSubscription struct { + LinkBalance *big.Int + NativeBalance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} +type SSubscriptions struct { + LinkBalance *big.Int + NativeBalance *big.Int + ReqCount uint64 + Owner common.Address +} + +func (_VRFCoordinatorV2PlusV2Example *VRFCoordinatorV2PlusV2Example) Address() common.Address { + return _VRFCoordinatorV2PlusV2Example.address +} + +type VRFCoordinatorV2PlusV2ExampleInterface interface { + GenerateFakeRandomness(opts *bind.CallOpts, requestID *big.Int) ([]*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) + + SLink(opts *bind.CallOpts) (common.Address, error) + + SPrevCoordinator(opts *bind.CallOpts) (common.Address, error) + + SRequestConsumerMapping(opts *bind.CallOpts, arg0 *big.Int) (common.Address, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubscriptions(opts *bind.CallOpts, arg0 *big.Int) (SSubscriptions, + + error) + + STotalLinkBalance(opts *bind.CallOpts) (*big.Int, error) + + STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) + + FulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int) (*types.Transaction, error) + + OnMigration(opts *bind.TransactOpts, encodedData []byte) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_coordinator_v2plus_interface/vrf_coordinator_v2plus_interface.go b/core/gethwrappers/generated/vrf_coordinator_v2plus_interface/vrf_coordinator_v2plus_interface.go new file mode 100644 index 00000000..ddccb7c1 --- /dev/null +++ b/core/gethwrappers/generated/vrf_coordinator_v2plus_interface/vrf_coordinator_v2plus_interface.go @@ -0,0 +1,789 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator_v2plus_interface + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type IVRFCoordinatorV2PlusInternalProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type IVRFCoordinatorV2PlusInternalRequestCommitment struct { + BlockNum uint64 + SubId *big.Int + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + ExtraArgs []byte +} + +type VRFV2PlusClientRandomWordsRequest struct { + KeyHash [32]byte + SubId *big.Int + RequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte +} + +var IVRFCoordinatorV2PlusInternalMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"onlyPremium\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"PLI_NATIVE_FEED\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structIVRFCoordinatorV2PlusInternal.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structIVRFCoordinatorV2PlusInternal.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"},{\"internalType\":\"bool\",\"name\":\"onlyPremium\",\"type\":\"bool\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"fundSubscriptionWithNative\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveSubscriptionIds\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"nativeBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFV2PlusClient.RandomWordsRequest\",\"name\":\"req\",\"type\":\"tuple\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"}],\"name\":\"s_requestCommitments\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +var IVRFCoordinatorV2PlusInternalABI = IVRFCoordinatorV2PlusInternalMetaData.ABI + +type IVRFCoordinatorV2PlusInternal struct { + address common.Address + abi abi.ABI + IVRFCoordinatorV2PlusInternalCaller + IVRFCoordinatorV2PlusInternalTransactor + IVRFCoordinatorV2PlusInternalFilterer +} + +type IVRFCoordinatorV2PlusInternalCaller struct { + contract *bind.BoundContract +} + +type IVRFCoordinatorV2PlusInternalTransactor struct { + contract *bind.BoundContract +} + +type IVRFCoordinatorV2PlusInternalFilterer struct { + contract *bind.BoundContract +} + +type IVRFCoordinatorV2PlusInternalSession struct { + Contract *IVRFCoordinatorV2PlusInternal + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type IVRFCoordinatorV2PlusInternalCallerSession struct { + Contract *IVRFCoordinatorV2PlusInternalCaller + CallOpts bind.CallOpts +} + +type IVRFCoordinatorV2PlusInternalTransactorSession struct { + Contract *IVRFCoordinatorV2PlusInternalTransactor + TransactOpts bind.TransactOpts +} + +type IVRFCoordinatorV2PlusInternalRaw struct { + Contract *IVRFCoordinatorV2PlusInternal +} + +type IVRFCoordinatorV2PlusInternalCallerRaw struct { + Contract *IVRFCoordinatorV2PlusInternalCaller +} + +type IVRFCoordinatorV2PlusInternalTransactorRaw struct { + Contract *IVRFCoordinatorV2PlusInternalTransactor +} + +func NewIVRFCoordinatorV2PlusInternal(address common.Address, backend bind.ContractBackend) (*IVRFCoordinatorV2PlusInternal, error) { + abi, err := abi.JSON(strings.NewReader(IVRFCoordinatorV2PlusInternalABI)) + if err != nil { + return nil, err + } + contract, err := bindIVRFCoordinatorV2PlusInternal(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternal{address: address, abi: abi, IVRFCoordinatorV2PlusInternalCaller: IVRFCoordinatorV2PlusInternalCaller{contract: contract}, IVRFCoordinatorV2PlusInternalTransactor: IVRFCoordinatorV2PlusInternalTransactor{contract: contract}, IVRFCoordinatorV2PlusInternalFilterer: IVRFCoordinatorV2PlusInternalFilterer{contract: contract}}, nil +} + +func NewIVRFCoordinatorV2PlusInternalCaller(address common.Address, caller bind.ContractCaller) (*IVRFCoordinatorV2PlusInternalCaller, error) { + contract, err := bindIVRFCoordinatorV2PlusInternal(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternalCaller{contract: contract}, nil +} + +func NewIVRFCoordinatorV2PlusInternalTransactor(address common.Address, transactor bind.ContractTransactor) (*IVRFCoordinatorV2PlusInternalTransactor, error) { + contract, err := bindIVRFCoordinatorV2PlusInternal(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternalTransactor{contract: contract}, nil +} + +func NewIVRFCoordinatorV2PlusInternalFilterer(address common.Address, filterer bind.ContractFilterer) (*IVRFCoordinatorV2PlusInternalFilterer, error) { + contract, err := bindIVRFCoordinatorV2PlusInternal(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternalFilterer{contract: contract}, nil +} + +func bindIVRFCoordinatorV2PlusInternal(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := IVRFCoordinatorV2PlusInternalMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IVRFCoordinatorV2PlusInternal.Contract.IVRFCoordinatorV2PlusInternalCaller.contract.Call(opts, result, method, params...) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.IVRFCoordinatorV2PlusInternalTransactor.contract.Transfer(opts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.IVRFCoordinatorV2PlusInternalTransactor.contract.Transact(opts, method, params...) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IVRFCoordinatorV2PlusInternal.Contract.contract.Call(opts, result, method, params...) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.contract.Transfer(opts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.contract.Transact(opts, method, params...) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCaller) PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _IVRFCoordinatorV2PlusInternal.contract.Call(opts, &out, "PLI_NATIVE_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) PLINATIVEFEED() (common.Address, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.PLINATIVEFEED(&_IVRFCoordinatorV2PlusInternal.CallOpts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerSession) PLINATIVEFEED() (common.Address, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.PLINATIVEFEED(&_IVRFCoordinatorV2PlusInternal.CallOpts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCaller) GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _IVRFCoordinatorV2PlusInternal.contract.Call(opts, &out, "getActiveSubscriptionIds", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.GetActiveSubscriptionIds(&_IVRFCoordinatorV2PlusInternal.CallOpts, startIndex, maxCount) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerSession) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.GetActiveSubscriptionIds(&_IVRFCoordinatorV2PlusInternal.CallOpts, startIndex, maxCount) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCaller) GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) { + var out []interface{} + err := _IVRFCoordinatorV2PlusInternal.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.NativeBalance = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[4], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _IVRFCoordinatorV2PlusInternal.Contract.GetSubscription(&_IVRFCoordinatorV2PlusInternal.CallOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _IVRFCoordinatorV2PlusInternal.Contract.GetSubscription(&_IVRFCoordinatorV2PlusInternal.CallOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCaller) PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) { + var out []interface{} + err := _IVRFCoordinatorV2PlusInternal.contract.Call(opts, &out, "pendingRequestExists", subId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) PendingRequestExists(subId *big.Int) (bool, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.PendingRequestExists(&_IVRFCoordinatorV2PlusInternal.CallOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerSession) PendingRequestExists(subId *big.Int) (bool, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.PendingRequestExists(&_IVRFCoordinatorV2PlusInternal.CallOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCaller) SRequestCommitments(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) { + var out []interface{} + err := _IVRFCoordinatorV2PlusInternal.contract.Call(opts, &out, "s_requestCommitments", requestID) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) SRequestCommitments(requestID *big.Int) ([32]byte, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.SRequestCommitments(&_IVRFCoordinatorV2PlusInternal.CallOpts, requestID) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalCallerSession) SRequestCommitments(requestID *big.Int) ([32]byte, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.SRequestCommitments(&_IVRFCoordinatorV2PlusInternal.CallOpts, requestID) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.AcceptSubscriptionOwnerTransfer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.AcceptSubscriptionOwnerTransfer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.AddConsumer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.AddConsumer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.CancelSubscription(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, to) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.CancelSubscription(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, to) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "createSubscription") +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) CreateSubscription() (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.CreateSubscription(&_IVRFCoordinatorV2PlusInternal.TransactOpts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.CreateSubscription(&_IVRFCoordinatorV2PlusInternal.TransactOpts) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) FulfillRandomWords(opts *bind.TransactOpts, proof IVRFCoordinatorV2PlusInternalProof, rc IVRFCoordinatorV2PlusInternalRequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "fulfillRandomWords", proof, rc, onlyPremium) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) FulfillRandomWords(proof IVRFCoordinatorV2PlusInternalProof, rc IVRFCoordinatorV2PlusInternalRequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.FulfillRandomWords(&_IVRFCoordinatorV2PlusInternal.TransactOpts, proof, rc, onlyPremium) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) FulfillRandomWords(proof IVRFCoordinatorV2PlusInternalProof, rc IVRFCoordinatorV2PlusInternalRequestCommitment, onlyPremium bool) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.FulfillRandomWords(&_IVRFCoordinatorV2PlusInternal.TransactOpts, proof, rc, onlyPremium) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "fundSubscriptionWithNative", subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.FundSubscriptionWithNative(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.FundSubscriptionWithNative(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RemoveConsumer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RemoveConsumer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, consumer) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "requestRandomWords", req) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RequestRandomWords(&_IVRFCoordinatorV2PlusInternal.TransactOpts, req) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RequestRandomWords(&_IVRFCoordinatorV2PlusInternal.TransactOpts, req) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RequestSubscriptionOwnerTransfer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, newOwner) +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalTransactorSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _IVRFCoordinatorV2PlusInternal.Contract.RequestSubscriptionOwnerTransfer(&_IVRFCoordinatorV2PlusInternal.TransactOpts, subId, newOwner) +} + +type IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator struct { + Event *IVRFCoordinatorV2PlusInternalRandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IVRFCoordinatorV2PlusInternalRandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + SubId *big.Int + Payment *big.Int + Success bool + OnlyPremium bool + Raw types.Log +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subId []*big.Int) (*IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _IVRFCoordinatorV2PlusInternal.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule, subIdRule) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator{contract: _IVRFCoordinatorV2PlusInternal.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *IVRFCoordinatorV2PlusInternalRandomWordsFulfilled, requestId []*big.Int, subId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _IVRFCoordinatorV2PlusInternal.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule, subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) + if err := _IVRFCoordinatorV2PlusInternal.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) ParseRandomWordsFulfilled(log types.Log) (*IVRFCoordinatorV2PlusInternalRandomWordsFulfilled, error) { + event := new(IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) + if err := _IVRFCoordinatorV2PlusInternal.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator struct { + Event *IVRFCoordinatorV2PlusInternalRandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(IVRFCoordinatorV2PlusInternalRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(IVRFCoordinatorV2PlusInternalRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type IVRFCoordinatorV2PlusInternalRandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId *big.Int + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte + Sender common.Address + Raw types.Log +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _IVRFCoordinatorV2PlusInternal.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator{contract: _IVRFCoordinatorV2PlusInternal.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *IVRFCoordinatorV2PlusInternalRandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _IVRFCoordinatorV2PlusInternal.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(IVRFCoordinatorV2PlusInternalRandomWordsRequested) + if err := _IVRFCoordinatorV2PlusInternal.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternalFilterer) ParseRandomWordsRequested(log types.Log) (*IVRFCoordinatorV2PlusInternalRandomWordsRequested, error) { + event := new(IVRFCoordinatorV2PlusInternalRandomWordsRequested) + if err := _IVRFCoordinatorV2PlusInternal.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSubscription struct { + Balance *big.Int + NativeBalance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternal) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _IVRFCoordinatorV2PlusInternal.abi.Events["RandomWordsFulfilled"].ID: + return _IVRFCoordinatorV2PlusInternal.ParseRandomWordsFulfilled(log) + case _IVRFCoordinatorV2PlusInternal.abi.Events["RandomWordsRequested"].ID: + return _IVRFCoordinatorV2PlusInternal.ParseRandomWordsRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c6b5394380e16e41988d8383648010de6f5c2e4814803be5de1c6b1c852db55") +} + +func (IVRFCoordinatorV2PlusInternalRandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0xeb0e3652e0f44f417695e6e90f2f42c99b65cd7169074c5a654b16b9748c3a4e") +} + +func (_IVRFCoordinatorV2PlusInternal *IVRFCoordinatorV2PlusInternal) Address() common.Address { + return _IVRFCoordinatorV2PlusInternal.address +} + +type IVRFCoordinatorV2PlusInternalInterface interface { + PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) + + GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) + + PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) + + SRequestCommitments(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof IVRFCoordinatorV2PlusInternalProof, rc IVRFCoordinatorV2PlusInternalRequestCommitment, onlyPremium bool) (*types.Transaction, error) + + FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subId []*big.Int) (*IVRFCoordinatorV2PlusInternalRandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *IVRFCoordinatorV2PlusInternalRandomWordsFulfilled, requestId []*big.Int, subId []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*IVRFCoordinatorV2PlusInternalRandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*IVRFCoordinatorV2PlusInternalRandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *IVRFCoordinatorV2PlusInternalRandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*IVRFCoordinatorV2PlusInternalRandomWordsRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_external_sub_owner_example/vrf_external_sub_owner_example.go b/core/gethwrappers/generated/vrf_external_sub_owner_example/vrf_external_sub_owner_example.go new file mode 100644 index 00000000..4ab3cdf5 --- /dev/null +++ b/core/gethwrappers/generated/vrf_external_sub_owner_example/vrf_external_sub_owner_example.go @@ -0,0 +1,268 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_external_sub_owner_example + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFExternalSubOwnerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b5060405161072038038061072083398101604081905261002f9161009e565b6001600160601b0319606083901b16608052600080546001600160a01b03199081166001600160a01b039485161790915560018054929093169181169190911790915560048054909116331790556100d1565b80516001600160a01b038116811461009957600080fd5b919050565b600080604083850312156100b157600080fd5b6100ba83610082565b91506100c860208401610082565b90509250929050565b60805160601c61062b6100f56000396000818160ed0152610155015261062b6000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c8063e89e106a11610050578063e89e106a14610094578063f2fde38b146100af578063f6eaffc8146100c257600080fd5b80631fe543e31461006c5780639561f02314610081575b600080fd5b61007f61007a36600461048c565b6100d5565b005b61007f61008f36600461057b565b610195565b61009d60035481565b60405190815260200160405180910390f35b61007f6100bd36600461041d565b610295565b61009d6100d036600461045a565b610300565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610187576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b6101918282610321565b5050565b60045473ffffffffffffffffffffffffffffffffffffffff1633146101b957600080fd5b6000546040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810183905267ffffffffffffffff8716602482015261ffff8516604482015263ffffffff80871660648301528416608482015273ffffffffffffffffffffffffffffffffffffffff90911690635d3b1d309060a401602060405180830381600087803b15801561025357600080fd5b505af1158015610267573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061028b9190610473565b6003555050505050565b60045473ffffffffffffffffffffffffffffffffffffffff1633146102b957600080fd5b600480547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6002818154811061031057600080fd5b600091825260209091200154905081565b600354821461038c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f7272656374000000000000000000604482015260640161017e565b805161039f9060029060208401906103a4565b505050565b8280548282559060005260206000209081019282156103df579160200282015b828111156103df5782518255916020019190600101906103c4565b506103eb9291506103ef565b5090565b5b808211156103eb57600081556001016103f0565b803563ffffffff8116811461041857600080fd5b919050565b60006020828403121561042f57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461045357600080fd5b9392505050565b60006020828403121561046c57600080fd5b5035919050565b60006020828403121561048557600080fd5b5051919050565b6000806040838503121561049f57600080fd5b8235915060208084013567ffffffffffffffff808211156104bf57600080fd5b818601915086601f8301126104d357600080fd5b8135818111156104e5576104e56105ef565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610528576105286105ef565b604052828152858101935084860182860187018b101561054757600080fd5b600095505b8386101561056a57803585526001959095019493860193860161054c565b508096505050505050509250929050565b600080600080600060a0868803121561059357600080fd5b853567ffffffffffffffff811681146105ab57600080fd5b94506105b960208701610404565b9350604086013561ffff811681146105d057600080fd5b92506105de60608701610404565b949793965091946080013592915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFExternalSubOwnerExampleABI = VRFExternalSubOwnerExampleMetaData.ABI + +var VRFExternalSubOwnerExampleBin = VRFExternalSubOwnerExampleMetaData.Bin + +func DeployVRFExternalSubOwnerExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFExternalSubOwnerExample, error) { + parsed, err := VRFExternalSubOwnerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFExternalSubOwnerExampleBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFExternalSubOwnerExample{address: address, abi: *parsed, VRFExternalSubOwnerExampleCaller: VRFExternalSubOwnerExampleCaller{contract: contract}, VRFExternalSubOwnerExampleTransactor: VRFExternalSubOwnerExampleTransactor{contract: contract}, VRFExternalSubOwnerExampleFilterer: VRFExternalSubOwnerExampleFilterer{contract: contract}}, nil +} + +type VRFExternalSubOwnerExample struct { + address common.Address + abi abi.ABI + VRFExternalSubOwnerExampleCaller + VRFExternalSubOwnerExampleTransactor + VRFExternalSubOwnerExampleFilterer +} + +type VRFExternalSubOwnerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFExternalSubOwnerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFExternalSubOwnerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFExternalSubOwnerExampleSession struct { + Contract *VRFExternalSubOwnerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFExternalSubOwnerExampleCallerSession struct { + Contract *VRFExternalSubOwnerExampleCaller + CallOpts bind.CallOpts +} + +type VRFExternalSubOwnerExampleTransactorSession struct { + Contract *VRFExternalSubOwnerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFExternalSubOwnerExampleRaw struct { + Contract *VRFExternalSubOwnerExample +} + +type VRFExternalSubOwnerExampleCallerRaw struct { + Contract *VRFExternalSubOwnerExampleCaller +} + +type VRFExternalSubOwnerExampleTransactorRaw struct { + Contract *VRFExternalSubOwnerExampleTransactor +} + +func NewVRFExternalSubOwnerExample(address common.Address, backend bind.ContractBackend) (*VRFExternalSubOwnerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFExternalSubOwnerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFExternalSubOwnerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFExternalSubOwnerExample{address: address, abi: abi, VRFExternalSubOwnerExampleCaller: VRFExternalSubOwnerExampleCaller{contract: contract}, VRFExternalSubOwnerExampleTransactor: VRFExternalSubOwnerExampleTransactor{contract: contract}, VRFExternalSubOwnerExampleFilterer: VRFExternalSubOwnerExampleFilterer{contract: contract}}, nil +} + +func NewVRFExternalSubOwnerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFExternalSubOwnerExampleCaller, error) { + contract, err := bindVRFExternalSubOwnerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFExternalSubOwnerExampleCaller{contract: contract}, nil +} + +func NewVRFExternalSubOwnerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFExternalSubOwnerExampleTransactor, error) { + contract, err := bindVRFExternalSubOwnerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFExternalSubOwnerExampleTransactor{contract: contract}, nil +} + +func NewVRFExternalSubOwnerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFExternalSubOwnerExampleFilterer, error) { + contract, err := bindVRFExternalSubOwnerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFExternalSubOwnerExampleFilterer{contract: contract}, nil +} + +func bindVRFExternalSubOwnerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFExternalSubOwnerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFExternalSubOwnerExample.Contract.VRFExternalSubOwnerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.VRFExternalSubOwnerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.VRFExternalSubOwnerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFExternalSubOwnerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.contract.Transfer(opts) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFExternalSubOwnerExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFExternalSubOwnerExample.Contract.SRandomWords(&_VRFExternalSubOwnerExample.CallOpts, arg0) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFExternalSubOwnerExample.Contract.SRandomWords(&_VRFExternalSubOwnerExample.CallOpts, arg0) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFExternalSubOwnerExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleSession) SRequestId() (*big.Int, error) { + return _VRFExternalSubOwnerExample.Contract.SRequestId(&_VRFExternalSubOwnerExample.CallOpts) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFExternalSubOwnerExample.Contract.SRequestId(&_VRFExternalSubOwnerExample.CallOpts) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.RawFulfillRandomWords(&_VRFExternalSubOwnerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.RawFulfillRandomWords(&_VRFExternalSubOwnerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactor) RequestRandomWords(opts *bind.TransactOpts, subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.contract.Transact(opts, "requestRandomWords", subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleSession) RequestRandomWords(subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.RequestRandomWords(&_VRFExternalSubOwnerExample.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactorSession) RequestRandomWords(subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.RequestRandomWords(&_VRFExternalSubOwnerExample.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.contract.Transact(opts, "transferOwnership", newOwner) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.TransferOwnership(&_VRFExternalSubOwnerExample.TransactOpts, newOwner) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExampleTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFExternalSubOwnerExample.Contract.TransferOwnership(&_VRFExternalSubOwnerExample.TransactOpts, newOwner) +} + +func (_VRFExternalSubOwnerExample *VRFExternalSubOwnerExample) Address() common.Address { + return _VRFExternalSubOwnerExample.address +} + +type VRFExternalSubOwnerExampleInterface interface { + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_load_test_external_sub_owner/vrf_load_test_external_sub_owner.go b/core/gethwrappers/generated/vrf_load_test_external_sub_owner/vrf_load_test_external_sub_owner.go new file mode 100644 index 00000000..9d331ab8 --- /dev/null +++ b/core/gethwrappers/generated/vrf_load_test_external_sub_owner/vrf_load_test_external_sub_owner.go @@ -0,0 +1,638 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_load_test_external_sub_owner + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFLoadTestExternalSubOwnerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60e060405234801561001057600080fd5b50604051610aa2380380610aa283398101604081905261002f916101ae565b6001600160601b0319606083901b1660805233806000816100975760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100c7576100c7816100e8565b5050506001600160601b0319606092831b811660a052911b1660c0526101e1565b6001600160a01b0381163314156101415760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161008e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146101a957600080fd5b919050565b600080604083850312156101c157600080fd5b6101ca83610192565b91506101d860208401610192565b90509250929050565b60805160601c60a05160601c60c05160601c61087c610226600039600060a701526000818161010b01526101f00152600081816102b3015261031b015261087c6000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c806379ba50971161005b57806379ba50971461012d5780638da5cb5b14610135578063dc1670db14610153578063f2fde38b1461016a57600080fd5b8063096cb17b1461008d5780631b6b6d23146100a25780631fe543e3146100f35780633b2bcbf114610106575b600080fd5b6100a061009b36600461075a565b61017d565b005b6100c97f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100a061010136600461066b565b61029b565b6100c97f000000000000000000000000000000000000000000000000000000000000000081565b6100a061035b565b60005473ffffffffffffffffffffffffffffffffffffffff166100c9565b61015c60025481565b6040519081526020016100ea565b6100a0610178366004610615565b610458565b61018561046c565b60005b8161ffff168161ffff161015610294576040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810184905267ffffffffffffffff8616602482015261ffff8516604482015261c3506064820152600160848201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b15801561024957600080fd5b505af115801561025d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102819190610652565b508061028c816107b6565b915050610188565b5050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461034d576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b61035782826104ef565b5050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146103dc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610344565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61046061046c565b61046981610508565b50565b60005473ffffffffffffffffffffffffffffffffffffffff1633146104ed576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610344565b565b600280549060006104ff836107d8565b91905055505050565b73ffffffffffffffffffffffffffffffffffffffff8116331415610588576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610344565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b803561ffff8116811461061057600080fd5b919050565b60006020828403121561062757600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461064b57600080fd5b9392505050565b60006020828403121561066457600080fd5b5051919050565b6000806040838503121561067e57600080fd5b8235915060208084013567ffffffffffffffff8082111561069e57600080fd5b818601915086601f8301126106b257600080fd5b8135818111156106c4576106c4610840565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561070757610707610840565b604052828152858101935084860182860187018b101561072657600080fd5b600095505b8386101561074957803585526001959095019493860193860161072b565b508096505050505050509250929050565b6000806000806080858703121561077057600080fd5b843567ffffffffffffffff8116811461078857600080fd5b9350610796602086016105fe565b9250604085013591506107ab606086016105fe565b905092959194509250565b600061ffff808316818114156107ce576107ce610811565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561080a5761080a610811565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFLoadTestExternalSubOwnerABI = VRFLoadTestExternalSubOwnerMetaData.ABI + +var VRFLoadTestExternalSubOwnerBin = VRFLoadTestExternalSubOwnerMetaData.Bin + +func DeployVRFLoadTestExternalSubOwner(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address) (common.Address, *types.Transaction, *VRFLoadTestExternalSubOwner, error) { + parsed, err := VRFLoadTestExternalSubOwnerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFLoadTestExternalSubOwnerBin), backend, _vrfCoordinator, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFLoadTestExternalSubOwner{address: address, abi: *parsed, VRFLoadTestExternalSubOwnerCaller: VRFLoadTestExternalSubOwnerCaller{contract: contract}, VRFLoadTestExternalSubOwnerTransactor: VRFLoadTestExternalSubOwnerTransactor{contract: contract}, VRFLoadTestExternalSubOwnerFilterer: VRFLoadTestExternalSubOwnerFilterer{contract: contract}}, nil +} + +type VRFLoadTestExternalSubOwner struct { + address common.Address + abi abi.ABI + VRFLoadTestExternalSubOwnerCaller + VRFLoadTestExternalSubOwnerTransactor + VRFLoadTestExternalSubOwnerFilterer +} + +type VRFLoadTestExternalSubOwnerCaller struct { + contract *bind.BoundContract +} + +type VRFLoadTestExternalSubOwnerTransactor struct { + contract *bind.BoundContract +} + +type VRFLoadTestExternalSubOwnerFilterer struct { + contract *bind.BoundContract +} + +type VRFLoadTestExternalSubOwnerSession struct { + Contract *VRFLoadTestExternalSubOwner + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFLoadTestExternalSubOwnerCallerSession struct { + Contract *VRFLoadTestExternalSubOwnerCaller + CallOpts bind.CallOpts +} + +type VRFLoadTestExternalSubOwnerTransactorSession struct { + Contract *VRFLoadTestExternalSubOwnerTransactor + TransactOpts bind.TransactOpts +} + +type VRFLoadTestExternalSubOwnerRaw struct { + Contract *VRFLoadTestExternalSubOwner +} + +type VRFLoadTestExternalSubOwnerCallerRaw struct { + Contract *VRFLoadTestExternalSubOwnerCaller +} + +type VRFLoadTestExternalSubOwnerTransactorRaw struct { + Contract *VRFLoadTestExternalSubOwnerTransactor +} + +func NewVRFLoadTestExternalSubOwner(address common.Address, backend bind.ContractBackend) (*VRFLoadTestExternalSubOwner, error) { + abi, err := abi.JSON(strings.NewReader(VRFLoadTestExternalSubOwnerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFLoadTestExternalSubOwner(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwner{address: address, abi: abi, VRFLoadTestExternalSubOwnerCaller: VRFLoadTestExternalSubOwnerCaller{contract: contract}, VRFLoadTestExternalSubOwnerTransactor: VRFLoadTestExternalSubOwnerTransactor{contract: contract}, VRFLoadTestExternalSubOwnerFilterer: VRFLoadTestExternalSubOwnerFilterer{contract: contract}}, nil +} + +func NewVRFLoadTestExternalSubOwnerCaller(address common.Address, caller bind.ContractCaller) (*VRFLoadTestExternalSubOwnerCaller, error) { + contract, err := bindVRFLoadTestExternalSubOwner(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwnerCaller{contract: contract}, nil +} + +func NewVRFLoadTestExternalSubOwnerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFLoadTestExternalSubOwnerTransactor, error) { + contract, err := bindVRFLoadTestExternalSubOwner(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwnerTransactor{contract: contract}, nil +} + +func NewVRFLoadTestExternalSubOwnerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFLoadTestExternalSubOwnerFilterer, error) { + contract, err := bindVRFLoadTestExternalSubOwner(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwnerFilterer{contract: contract}, nil +} + +func bindVRFLoadTestExternalSubOwner(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFLoadTestExternalSubOwnerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLoadTestExternalSubOwner.Contract.VRFLoadTestExternalSubOwnerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.VRFLoadTestExternalSubOwnerTransactor.contract.Transfer(opts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.VRFLoadTestExternalSubOwnerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLoadTestExternalSubOwner.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.contract.Transfer(opts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFLoadTestExternalSubOwner.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) COORDINATOR() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.COORDINATOR(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCallerSession) COORDINATOR() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.COORDINATOR(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFLoadTestExternalSubOwner.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) PLI() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.PLI(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCallerSession) PLI() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.PLI(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFLoadTestExternalSubOwner.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) Owner() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.Owner(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCallerSession) Owner() (common.Address, error) { + return _VRFLoadTestExternalSubOwner.Contract.Owner(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFLoadTestExternalSubOwner.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) SResponseCount() (*big.Int, error) { + return _VRFLoadTestExternalSubOwner.Contract.SResponseCount(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerCallerSession) SResponseCount() (*big.Int, error) { + return _VRFLoadTestExternalSubOwner.Contract.SResponseCount(&_VRFLoadTestExternalSubOwner.CallOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.AcceptOwnership(&_VRFLoadTestExternalSubOwner.TransactOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.AcceptOwnership(&_VRFLoadTestExternalSubOwner.TransactOpts) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.RawFulfillRandomWords(&_VRFLoadTestExternalSubOwner.TransactOpts, requestId, randomWords) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.RawFulfillRandomWords(&_VRFLoadTestExternalSubOwner.TransactOpts, requestId, randomWords) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactor) RequestRandomWords(opts *bind.TransactOpts, _subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _requestCount uint16) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.contract.Transact(opts, "requestRandomWords", _subId, _requestConfirmations, _keyHash, _requestCount) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) RequestRandomWords(_subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _requestCount uint16) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.RequestRandomWords(&_VRFLoadTestExternalSubOwner.TransactOpts, _subId, _requestConfirmations, _keyHash, _requestCount) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorSession) RequestRandomWords(_subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _requestCount uint16) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.RequestRandomWords(&_VRFLoadTestExternalSubOwner.TransactOpts, _subId, _requestConfirmations, _keyHash, _requestCount) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.TransferOwnership(&_VRFLoadTestExternalSubOwner.TransactOpts, to) +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFLoadTestExternalSubOwner.Contract.TransferOwnership(&_VRFLoadTestExternalSubOwner.TransactOpts, to) +} + +type VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator struct { + Event *VRFLoadTestExternalSubOwnerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFLoadTestExternalSubOwnerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFLoadTestExternalSubOwnerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFLoadTestExternalSubOwnerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFLoadTestExternalSubOwner.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator{contract: _VRFLoadTestExternalSubOwner.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFLoadTestExternalSubOwnerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFLoadTestExternalSubOwner.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFLoadTestExternalSubOwnerOwnershipTransferRequested) + if err := _VRFLoadTestExternalSubOwner.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFLoadTestExternalSubOwnerOwnershipTransferRequested, error) { + event := new(VRFLoadTestExternalSubOwnerOwnershipTransferRequested) + if err := _VRFLoadTestExternalSubOwner.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFLoadTestExternalSubOwnerOwnershipTransferredIterator struct { + Event *VRFLoadTestExternalSubOwnerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFLoadTestExternalSubOwnerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFLoadTestExternalSubOwnerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFLoadTestExternalSubOwnerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFLoadTestExternalSubOwnerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFLoadTestExternalSubOwnerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFLoadTestExternalSubOwner.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFLoadTestExternalSubOwnerOwnershipTransferredIterator{contract: _VRFLoadTestExternalSubOwner.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFLoadTestExternalSubOwnerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFLoadTestExternalSubOwner.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFLoadTestExternalSubOwnerOwnershipTransferred) + if err := _VRFLoadTestExternalSubOwner.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwnerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFLoadTestExternalSubOwnerOwnershipTransferred, error) { + event := new(VRFLoadTestExternalSubOwnerOwnershipTransferred) + if err := _VRFLoadTestExternalSubOwner.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwner) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFLoadTestExternalSubOwner.abi.Events["OwnershipTransferRequested"].ID: + return _VRFLoadTestExternalSubOwner.ParseOwnershipTransferRequested(log) + case _VRFLoadTestExternalSubOwner.abi.Events["OwnershipTransferred"].ID: + return _VRFLoadTestExternalSubOwner.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFLoadTestExternalSubOwnerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFLoadTestExternalSubOwnerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFLoadTestExternalSubOwner *VRFLoadTestExternalSubOwner) Address() common.Address { + return _VRFLoadTestExternalSubOwner.address +} + +type VRFLoadTestExternalSubOwnerInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, _subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _requestCount uint16) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFLoadTestExternalSubOwnerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFLoadTestExternalSubOwnerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFLoadTestExternalSubOwnerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFLoadTestExternalSubOwnerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFLoadTestExternalSubOwnerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFLoadTestExternalSubOwnerOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_load_test_ownerless_consumer/vrf_load_test_ownerless_consumer.go b/core/gethwrappers/generated/vrf_load_test_ownerless_consumer/vrf_load_test_ownerless_consumer.go new file mode 100644 index 00000000..fea4360e --- /dev/null +++ b/core/gethwrappers/generated/vrf_load_test_ownerless_consumer/vrf_load_test_ownerless_consumer.go @@ -0,0 +1,254 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_load_test_ownerless_consumer + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFLoadTestOwnerlessConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_price\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"PRICE\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"name\":\"rawFulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60e060405234801561001057600080fd5b5060405161078d38038061078d83398101604081905261002f9161006e565b6001600160601b0319606093841b811660a0529190921b1660805260c0526100aa565b80516001600160a01b038116811461006957600080fd5b919050565b60008060006060848603121561008357600080fd5b61008c84610052565b925061009a60208501610052565b9150604084015190509250925092565b60805160601c60a05160601c60c05161068d6101006000396000818160560152818161022501528181610255015261027f01526000818160d3015261030c01526000818161018501526102d0015261068d6000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80638d859f3e1461005157806394985ddd1461008a578063a4c0ed361461009f578063dc1670db146100b2575b600080fd5b6100787f000000000000000000000000000000000000000000000000000000000000000081565b60405190815260200160405180910390f35b61009d610098366004610546565b6100bb565b005b61009d6100ad366004610462565b61016d565b61007860015481565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461015f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4f6e6c7920565246436f6f7264696e61746f722063616e2066756c66696c6c0060448201526064015b60405180910390fd5b61016982826102b3565b5050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461020c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6f6e6c792063616c6c61626c652066726f6d204c494e4b0000000000000000006044820152606401610156565b600061021a8284018461052d565b905060005b8461024a7f000000000000000000000000000000000000000000000000000000000000000083610600565b116102ab57610279827f00000000000000000000000000000000000000000000000000000000000000006102cc565b506102a47f000000000000000000000000000000000000000000000000000000000000000082610600565b905061021f565b505050505050565b600180549060006102c383610618565b91905055505050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea07f000000000000000000000000000000000000000000000000000000000000000084866000604051602001610349929190918252602082015260400190565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161037693929190610568565b602060405180830381600087803b15801561039057600080fd5b505af11580156103a4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103c89190610504565b5060008381526020818152604080832054815180840188905280830185905230606082015260808082018390528351808303909101815260a090910190925281519183019190912086845292909152610422906001610600565b6000858152602081815260409182902092909255805180830187905280820184905281518082038301815260609091019091528051910120949350505050565b6000806000806060858703121561047857600080fd5b843573ffffffffffffffffffffffffffffffffffffffff8116811461049c57600080fd5b935060208501359250604085013567ffffffffffffffff808211156104c057600080fd5b818701915087601f8301126104d457600080fd5b8135818111156104e357600080fd5b8860208285010111156104f557600080fd5b95989497505060200194505050565b60006020828403121561051657600080fd5b8151801515811461052657600080fd5b9392505050565b60006020828403121561053f57600080fd5b5035919050565b6000806040838503121561055957600080fd5b50508035926020909101359150565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b818110156105b85785810183015185820160800152820161059c565b818111156105ca576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b6000821982111561061357610613610651565b500190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561064a5761064a610651565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea164736f6c6343000806000a", +} + +var VRFLoadTestOwnerlessConsumerABI = VRFLoadTestOwnerlessConsumerMetaData.ABI + +var VRFLoadTestOwnerlessConsumerBin = VRFLoadTestOwnerlessConsumerMetaData.Bin + +func DeployVRFLoadTestOwnerlessConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address, _price *big.Int) (common.Address, *types.Transaction, *VRFLoadTestOwnerlessConsumer, error) { + parsed, err := VRFLoadTestOwnerlessConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFLoadTestOwnerlessConsumerBin), backend, _vrfCoordinator, _link, _price) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFLoadTestOwnerlessConsumer{address: address, abi: *parsed, VRFLoadTestOwnerlessConsumerCaller: VRFLoadTestOwnerlessConsumerCaller{contract: contract}, VRFLoadTestOwnerlessConsumerTransactor: VRFLoadTestOwnerlessConsumerTransactor{contract: contract}, VRFLoadTestOwnerlessConsumerFilterer: VRFLoadTestOwnerlessConsumerFilterer{contract: contract}}, nil +} + +type VRFLoadTestOwnerlessConsumer struct { + address common.Address + abi abi.ABI + VRFLoadTestOwnerlessConsumerCaller + VRFLoadTestOwnerlessConsumerTransactor + VRFLoadTestOwnerlessConsumerFilterer +} + +type VRFLoadTestOwnerlessConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFLoadTestOwnerlessConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFLoadTestOwnerlessConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFLoadTestOwnerlessConsumerSession struct { + Contract *VRFLoadTestOwnerlessConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFLoadTestOwnerlessConsumerCallerSession struct { + Contract *VRFLoadTestOwnerlessConsumerCaller + CallOpts bind.CallOpts +} + +type VRFLoadTestOwnerlessConsumerTransactorSession struct { + Contract *VRFLoadTestOwnerlessConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFLoadTestOwnerlessConsumerRaw struct { + Contract *VRFLoadTestOwnerlessConsumer +} + +type VRFLoadTestOwnerlessConsumerCallerRaw struct { + Contract *VRFLoadTestOwnerlessConsumerCaller +} + +type VRFLoadTestOwnerlessConsumerTransactorRaw struct { + Contract *VRFLoadTestOwnerlessConsumerTransactor +} + +func NewVRFLoadTestOwnerlessConsumer(address common.Address, backend bind.ContractBackend) (*VRFLoadTestOwnerlessConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFLoadTestOwnerlessConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFLoadTestOwnerlessConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFLoadTestOwnerlessConsumer{address: address, abi: abi, VRFLoadTestOwnerlessConsumerCaller: VRFLoadTestOwnerlessConsumerCaller{contract: contract}, VRFLoadTestOwnerlessConsumerTransactor: VRFLoadTestOwnerlessConsumerTransactor{contract: contract}, VRFLoadTestOwnerlessConsumerFilterer: VRFLoadTestOwnerlessConsumerFilterer{contract: contract}}, nil +} + +func NewVRFLoadTestOwnerlessConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFLoadTestOwnerlessConsumerCaller, error) { + contract, err := bindVRFLoadTestOwnerlessConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFLoadTestOwnerlessConsumerCaller{contract: contract}, nil +} + +func NewVRFLoadTestOwnerlessConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFLoadTestOwnerlessConsumerTransactor, error) { + contract, err := bindVRFLoadTestOwnerlessConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFLoadTestOwnerlessConsumerTransactor{contract: contract}, nil +} + +func NewVRFLoadTestOwnerlessConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFLoadTestOwnerlessConsumerFilterer, error) { + contract, err := bindVRFLoadTestOwnerlessConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFLoadTestOwnerlessConsumerFilterer{contract: contract}, nil +} + +func bindVRFLoadTestOwnerlessConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFLoadTestOwnerlessConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLoadTestOwnerlessConsumer.Contract.VRFLoadTestOwnerlessConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.VRFLoadTestOwnerlessConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.VRFLoadTestOwnerlessConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLoadTestOwnerlessConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerCaller) PRICE(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFLoadTestOwnerlessConsumer.contract.Call(opts, &out, "PRICE") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerSession) PRICE() (*big.Int, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.PRICE(&_VRFLoadTestOwnerlessConsumer.CallOpts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerCallerSession) PRICE() (*big.Int, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.PRICE(&_VRFLoadTestOwnerlessConsumer.CallOpts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFLoadTestOwnerlessConsumer.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerSession) SResponseCount() (*big.Int, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.SResponseCount(&_VRFLoadTestOwnerlessConsumer.CallOpts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerCallerSession) SResponseCount() (*big.Int, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.SResponseCount(&_VRFLoadTestOwnerlessConsumer.CallOpts) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.contract.Transact(opts, "onTokenTransfer", arg0, _amount, _data) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.OnTokenTransfer(&_VRFLoadTestOwnerlessConsumer.TransactOpts, arg0, _amount, _data) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactorSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.OnTokenTransfer(&_VRFLoadTestOwnerlessConsumer.TransactOpts, arg0, _amount, _data) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactor) RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.contract.Transact(opts, "rawFulfillRandomness", requestId, randomness) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.RawFulfillRandomness(&_VRFLoadTestOwnerlessConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumerTransactorSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFLoadTestOwnerlessConsumer.Contract.RawFulfillRandomness(&_VRFLoadTestOwnerlessConsumer.TransactOpts, requestId, randomness) +} + +func (_VRFLoadTestOwnerlessConsumer *VRFLoadTestOwnerlessConsumer) Address() common.Address { + return _VRFLoadTestOwnerlessConsumer.address +} + +type VRFLoadTestOwnerlessConsumerInterface interface { + PRICE(opts *bind.CallOpts) (*big.Int, error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_load_test_with_metrics/vrf_load_test_with_metrics.go b/core/gethwrappers/generated/vrf_load_test_with_metrics/vrf_load_test_with_metrics.go new file mode 100644 index 00000000..dbbe61a2 --- /dev/null +++ b/core/gethwrappers/generated/vrf_load_test_with_metrics/vrf_load_test_with_metrics.go @@ -0,0 +1,674 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_load_test_with_metrics + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2LoadTestWithMetricsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCreatedFundedAndConsumerAdded\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLITOKEN\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"_subTopUpAmount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"requestRandomWordsWithForceFulfill\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_subId\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c0604052600060035560006004556103e760055534801561002057600080fd5b5060405161133038038061133083398101604081905261003f91610059565b60601b6001600160601b031916608081905260a052610089565b60006020828403121561006b57600080fd5b81516001600160a01b038116811461008257600080fd5b9392505050565b60805160601c60a05160601c61124b6100e560003960008181610156015281816103e00152818161048c0152818161056b0152818161067d0152818161072a0152610a250152600081816102c4015261032c015261124b6000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c806362cce24411610097578063b1e2174911610066578063b1e2174914610256578063d826f88f1461025f578063d8a4676f1461027e578063dc1670db146102a357600080fd5b806362cce244146101c6578063737144bc146101d957806374dba124146101e2578063a168fa89146101eb57600080fd5b80632be555da116100d35780632be555da1461013e5780633b2bcbf11461015157806355380dfb1461019d578063557d2e92146101bd57600080fd5b80631757f11c146100fa5780631fe543e314610116578063271095ef1461012b575b600080fd5b61010360045481565b6040519081526020015b60405180910390f35b610129610124366004610e08565b6102ac565b005b610129610139366004610f14565b61036b565b61012961014c366004610f83565b610381565b6101787f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010d565b6000546101789073ffffffffffffffffffffffffffffffffffffffff1681565b61010360025481565b6101296101d4366004610d5e565b610488565b61010360035481565b61010360055481565b61022c6101f9366004610dd6565b6008602052600090815260409020805460028201546003830154600484015460059094015460ff90931693919290919085565b6040805195151586526020860194909452928401919091526060830152608082015260a00161010d565b61010360065481565b6101296000600381905560048190556103e76005556002819055600155565b61029161028c366004610dd6565b6107a7565b60405161010d96959493929190611059565b61010360015481565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461035d576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016602482015260440160405180910390fd5b610367828261088c565b5050565b6103798686868686866109b2565b505050505050565b600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040805167ffffffffffffffff86166020820152634000aea0917f0000000000000000000000000000000000000000000000000000000000000000918691016040516020818303038152906040526040518463ffffffff1660e01b815260040161043093929190610fc1565b602060405180830381600087803b15801561044a57600080fd5b505af115801561045e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104829190610d35565b50505050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b1580156104f257600080fd5b505af1158015610506573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061052a9190610ef7565b6040517f7341c10c00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff821660048201523060248201529091507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b1580156105c457600080fd5b505af11580156105d8573d6000803e3d6000fd5b505050506105e7818484610381565b6040805167ffffffffffffffff831681523060208201529081018490527f56c142509574e8340ca0190b029c74464b84037d2876278ea0ade3ffb1f0042c9060600160405180910390a161063f8189898989896109b2565b6040517f9f87fad700000000000000000000000000000000000000000000000000000000815267ffffffffffffffff821660048201523060248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1690639f87fad790604401600060405180830381600087803b1580156106d657600080fd5b505af11580156106ea573d6000803e3d6000fd5b50506040517fd7ae1d3000000000000000000000000000000000000000000000000000000000815267ffffffffffffffff841660048201523360248201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16925063d7ae1d309150604401600060405180830381600087803b15801561078557600080fd5b505af1158015610799573d6000803e3d6000fd5b505050505050505050505050565b6000818152600860209081526040808320815160c081018352815460ff161515815260018201805484518187028101870190955280855260609587958695869586958695919492938584019390929083018282801561082557602002820191906000526020600020905b815481526020019060010190808311610811575b505050505081526020016002820154815260200160038201548152602001600482015481526020016005820154815250509050806000015181602001518260400151836060015184608001518560a001519650965096509650965096505091939550919395565b6000610896610bc2565b600084815260076020526040812054919250906108b39083611155565b905060006108c482620f4240611118565b90506004548211156108d65760048290555b60055482106108e7576005546108e9565b815b6005556001546108f9578061092b565b60018054610906916110c5565b816001546003546109179190611118565b61092191906110c5565b61092b91906110dd565b600355600085815260086020908152604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081178255865161097d939290910191870190610c86565b50600085815260086020526040812042600382015560050184905560018054916109a68361118e565b91905055505050505050565b60005b8161ffff168161ffff161015610bb9576040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810186905267ffffffffffffffff8816602482015261ffff8716604482015263ffffffff8086166064830152841660848201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b158015610a7e57600080fd5b505af1158015610a92573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ab69190610def565b600681905590506000610ac7610bc2565b6040805160c08101825260008082528251818152602080820185528084019182524284860152606084018390526080840186905260a084018390528783526008815293909120825181547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169015151781559051805194955091939092610b55926001850192910190610c86565b506040820151600280830191909155606083015160038301556080830151600483015560a0909201516005909101558054906000610b928361118e565b90915550506000918252600760205260409091205580610bb18161116c565b9150506109b5565b50505050505050565b600046610bce81610c5f565b15610c5857606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b158015610c1a57600080fd5b505afa158015610c2e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c529190610def565b91505090565b4391505090565b600061a4b1821480610c73575062066eed82145b80610c80575062066eee82145b92915050565b828054828255906000526020600020908101928215610cc1579160200282015b82811115610cc1578251825591602001919060010190610ca6565b50610ccd929150610cd1565b5090565b5b80821115610ccd5760008155600101610cd2565b803573ffffffffffffffffffffffffffffffffffffffff81168114610d0a57600080fd5b919050565b803561ffff81168114610d0a57600080fd5b803563ffffffff81168114610d0a57600080fd5b600060208284031215610d4757600080fd5b81518015158114610d5757600080fd5b9392505050565b600080600080600080600060e0888a031215610d7957600080fd5b610d8288610d0f565b965060208801359550610d9760408901610d21565b9450610da560608901610d21565b9350610db360808901610d0f565b925060a08801359150610dc860c08901610ce6565b905092959891949750929550565b600060208284031215610de857600080fd5b5035919050565b600060208284031215610e0157600080fd5b5051919050565b60008060408385031215610e1b57600080fd5b8235915060208084013567ffffffffffffffff80821115610e3b57600080fd5b818601915086601f830112610e4f57600080fd5b813581811115610e6157610e616111f6565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610ea457610ea46111f6565b604052828152858101935084860182860187018b1015610ec357600080fd5b600095505b83861015610ee6578035855260019590950194938601938601610ec8565b508096505050505050509250929050565b600060208284031215610f0957600080fd5b8151610d5781611225565b60008060008060008060c08789031215610f2d57600080fd5b8635610f3881611225565b9550610f4660208801610d0f565b945060408701359350610f5b60608801610d21565b9250610f6960808801610d21565b9150610f7760a08801610d0f565b90509295509295509295565b600080600060608486031215610f9857600080fd5b8335610fa381611225565b925060208401359150610fb860408501610ce6565b90509250925092565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b8181101561101157858101830151858201608001528201610ff5565b81811115611023576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b600060c082018815158352602060c08185015281895180845260e086019150828b01935060005b8181101561109c57845183529383019391830191600101611080565b505060408501989098525050506060810193909352608083019190915260a09091015292915050565b600082198211156110d8576110d86111c7565b500190565b600082611113577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611150576111506111c7565b500290565b600082821015611167576111676111c7565b500390565b600061ffff80831681811415611184576111846111c7565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156111c0576111c06111c7565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b67ffffffffffffffff8116811461123b57600080fd5b5056fea164736f6c6343000806000a", +} + +var VRFV2LoadTestWithMetricsABI = VRFV2LoadTestWithMetricsMetaData.ABI + +var VRFV2LoadTestWithMetricsBin = VRFV2LoadTestWithMetricsMetaData.Bin + +func DeployVRFV2LoadTestWithMetrics(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address) (common.Address, *types.Transaction, *VRFV2LoadTestWithMetrics, error) { + parsed, err := VRFV2LoadTestWithMetricsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2LoadTestWithMetricsBin), backend, _vrfCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2LoadTestWithMetrics{address: address, abi: *parsed, VRFV2LoadTestWithMetricsCaller: VRFV2LoadTestWithMetricsCaller{contract: contract}, VRFV2LoadTestWithMetricsTransactor: VRFV2LoadTestWithMetricsTransactor{contract: contract}, VRFV2LoadTestWithMetricsFilterer: VRFV2LoadTestWithMetricsFilterer{contract: contract}}, nil +} + +type VRFV2LoadTestWithMetrics struct { + address common.Address + abi abi.ABI + VRFV2LoadTestWithMetricsCaller + VRFV2LoadTestWithMetricsTransactor + VRFV2LoadTestWithMetricsFilterer +} + +type VRFV2LoadTestWithMetricsCaller struct { + contract *bind.BoundContract +} + +type VRFV2LoadTestWithMetricsTransactor struct { + contract *bind.BoundContract +} + +type VRFV2LoadTestWithMetricsFilterer struct { + contract *bind.BoundContract +} + +type VRFV2LoadTestWithMetricsSession struct { + Contract *VRFV2LoadTestWithMetrics + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2LoadTestWithMetricsCallerSession struct { + Contract *VRFV2LoadTestWithMetricsCaller + CallOpts bind.CallOpts +} + +type VRFV2LoadTestWithMetricsTransactorSession struct { + Contract *VRFV2LoadTestWithMetricsTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2LoadTestWithMetricsRaw struct { + Contract *VRFV2LoadTestWithMetrics +} + +type VRFV2LoadTestWithMetricsCallerRaw struct { + Contract *VRFV2LoadTestWithMetricsCaller +} + +type VRFV2LoadTestWithMetricsTransactorRaw struct { + Contract *VRFV2LoadTestWithMetricsTransactor +} + +func NewVRFV2LoadTestWithMetrics(address common.Address, backend bind.ContractBackend) (*VRFV2LoadTestWithMetrics, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2LoadTestWithMetricsABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2LoadTestWithMetrics(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2LoadTestWithMetrics{address: address, abi: abi, VRFV2LoadTestWithMetricsCaller: VRFV2LoadTestWithMetricsCaller{contract: contract}, VRFV2LoadTestWithMetricsTransactor: VRFV2LoadTestWithMetricsTransactor{contract: contract}, VRFV2LoadTestWithMetricsFilterer: VRFV2LoadTestWithMetricsFilterer{contract: contract}}, nil +} + +func NewVRFV2LoadTestWithMetricsCaller(address common.Address, caller bind.ContractCaller) (*VRFV2LoadTestWithMetricsCaller, error) { + contract, err := bindVRFV2LoadTestWithMetrics(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2LoadTestWithMetricsCaller{contract: contract}, nil +} + +func NewVRFV2LoadTestWithMetricsTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2LoadTestWithMetricsTransactor, error) { + contract, err := bindVRFV2LoadTestWithMetrics(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2LoadTestWithMetricsTransactor{contract: contract}, nil +} + +func NewVRFV2LoadTestWithMetricsFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2LoadTestWithMetricsFilterer, error) { + contract, err := bindVRFV2LoadTestWithMetrics(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2LoadTestWithMetricsFilterer{contract: contract}, nil +} + +func bindVRFV2LoadTestWithMetrics(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2LoadTestWithMetricsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2LoadTestWithMetrics.Contract.VRFV2LoadTestWithMetricsCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.VRFV2LoadTestWithMetricsTransactor.contract.Transfer(opts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.VRFV2LoadTestWithMetricsTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2LoadTestWithMetrics.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.contract.Transfer(opts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) COORDINATOR() (common.Address, error) { + return _VRFV2LoadTestWithMetrics.Contract.COORDINATOR(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) COORDINATOR() (common.Address, error) { + return _VRFV2LoadTestWithMetrics.Contract.COORDINATOR(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) PLITOKEN(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "PLITOKEN") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) PLITOKEN() (common.Address, error) { + return _VRFV2LoadTestWithMetrics.Contract.PLITOKEN(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) PLITOKEN() (common.Address, error) { + return _VRFV2LoadTestWithMetrics.Contract.PLITOKEN(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int) + outstruct.RequestTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2LoadTestWithMetrics.Contract.GetRequestStatus(&_VRFV2LoadTestWithMetrics.CallOpts, _requestId) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2LoadTestWithMetrics.Contract.GetRequestStatus(&_VRFV2LoadTestWithMetrics.CallOpts, _requestId) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SAverageFulfillmentInMillions(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SAverageFulfillmentInMillions(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SFastestFulfillment(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SFastestFulfillment(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SLastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SLastRequestId() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SLastRequestId(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SLastRequestId(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_requestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SRequestCount() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SRequestCount(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SRequestCount() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SRequestCount(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RequestTimestamp = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2LoadTestWithMetrics.Contract.SRequests(&_VRFV2LoadTestWithMetrics.CallOpts, arg0) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2LoadTestWithMetrics.Contract.SRequests(&_VRFV2LoadTestWithMetrics.CallOpts, arg0) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SResponseCount() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SResponseCount(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SResponseCount() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SResponseCount(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2LoadTestWithMetrics.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SSlowestFulfillment(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2LoadTestWithMetrics.Contract.SSlowestFulfillment(&_VRFV2LoadTestWithMetrics.CallOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RawFulfillRandomWords(&_VRFV2LoadTestWithMetrics.TransactOpts, requestId, randomWords) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RawFulfillRandomWords(&_VRFV2LoadTestWithMetrics.TransactOpts, requestId, randomWords) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactor) RequestRandomWords(opts *bind.TransactOpts, _subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.contract.Transact(opts, "requestRandomWords", _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) RequestRandomWords(_subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RequestRandomWords(&_VRFV2LoadTestWithMetrics.TransactOpts, _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorSession) RequestRandomWords(_subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RequestRandomWords(&_VRFV2LoadTestWithMetrics.TransactOpts, _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactor) RequestRandomWordsWithForceFulfill(opts *bind.TransactOpts, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.contract.Transact(opts, "requestRandomWordsWithForceFulfill", _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount, _link) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) RequestRandomWordsWithForceFulfill(_requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RequestRandomWordsWithForceFulfill(&_VRFV2LoadTestWithMetrics.TransactOpts, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount, _link) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorSession) RequestRandomWordsWithForceFulfill(_requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.RequestRandomWordsWithForceFulfill(&_VRFV2LoadTestWithMetrics.TransactOpts, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount, _link) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.contract.Transact(opts, "reset") +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) Reset() (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.Reset(&_VRFV2LoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorSession) Reset() (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.Reset(&_VRFV2LoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactor) TopUpSubscription(opts *bind.TransactOpts, _subId uint64, _amount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.contract.Transact(opts, "topUpSubscription", _subId, _amount, _link) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsSession) TopUpSubscription(_subId uint64, _amount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.TopUpSubscription(&_VRFV2LoadTestWithMetrics.TransactOpts, _subId, _amount, _link) +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsTransactorSession) TopUpSubscription(_subId uint64, _amount *big.Int, _link common.Address) (*types.Transaction, error) { + return _VRFV2LoadTestWithMetrics.Contract.TopUpSubscription(&_VRFV2LoadTestWithMetrics.TransactOpts, _subId, _amount, _link) +} + +type VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator struct { + Event *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded struct { + SubId uint64 + Consumer common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsFilterer) FilterSubscriptionCreatedFundedAndConsumerAdded(opts *bind.FilterOpts) (*VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator, error) { + + logs, sub, err := _VRFV2LoadTestWithMetrics.contract.FilterLogs(opts, "SubscriptionCreatedFundedAndConsumerAdded") + if err != nil { + return nil, err + } + return &VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator{contract: _VRFV2LoadTestWithMetrics.contract, event: "SubscriptionCreatedFundedAndConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsFilterer) WatchSubscriptionCreatedFundedAndConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) (event.Subscription, error) { + + logs, sub, err := _VRFV2LoadTestWithMetrics.contract.WatchLogs(opts, "SubscriptionCreatedFundedAndConsumerAdded") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) + if err := _VRFV2LoadTestWithMetrics.contract.UnpackLog(event, "SubscriptionCreatedFundedAndConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetricsFilterer) ParseSubscriptionCreatedFundedAndConsumerAdded(log types.Log) (*VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded, error) { + event := new(VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) + if err := _VRFV2LoadTestWithMetrics.contract.UnpackLog(event, "SubscriptionCreatedFundedAndConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Fulfilled bool + RandomWords []*big.Int + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} +type SRequests struct { + Fulfilled bool + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetrics) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2LoadTestWithMetrics.abi.Events["SubscriptionCreatedFundedAndConsumerAdded"].ID: + return _VRFV2LoadTestWithMetrics.ParseSubscriptionCreatedFundedAndConsumerAdded(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x56c142509574e8340ca0190b029c74464b84037d2876278ea0ade3ffb1f0042c") +} + +func (_VRFV2LoadTestWithMetrics *VRFV2LoadTestWithMetrics) Address() common.Address { + return _VRFV2LoadTestWithMetrics.address +} + +type VRFV2LoadTestWithMetricsInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLITOKEN(opts *bind.CallOpts) (common.Address, error) + + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SLastRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequestCount(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, _subId uint64, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16) (*types.Transaction, error) + + RequestRandomWordsWithForceFulfill(opts *bind.TransactOpts, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int, _link common.Address) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, _subId uint64, _amount *big.Int, _link common.Address) (*types.Transaction, error) + + FilterSubscriptionCreatedFundedAndConsumerAdded(opts *bind.FilterOpts) (*VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAddedIterator, error) + + WatchSubscriptionCreatedFundedAndConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded) (event.Subscription, error) + + ParseSubscriptionCreatedFundedAndConsumerAdded(log types.Log) (*VRFV2LoadTestWithMetricsSubscriptionCreatedFundedAndConsumerAdded, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_log_emitter/vrf_log_emitter.go b/core/gethwrappers/generated/vrf_log_emitter/vrf_log_emitter.go new file mode 100644 index 00000000..01bb8152 --- /dev/null +++ b/core/gethwrappers/generated/vrf_log_emitter/vrf_log_emitter.go @@ -0,0 +1,526 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_log_emitter + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFLogEmitterMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"emitRandomWordsFulfilled\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"emitRandomWordsRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061027f806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063ca920adb1461003b578063fe62d3e914610050575b600080fd5b61004e61004936600461015b565b610063565b005b61004e61005e366004610212565b6100eb565b604080518881526020810188905261ffff86168183015263ffffffff858116606083015284166080820152905173ffffffffffffffffffffffffffffffffffffffff83169167ffffffffffffffff8816918b917f63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772919081900360a00190a45050505050505050565b604080518481526bffffffffffffffffffffffff8416602082015282151581830152905185917f7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4919081900360600190a250505050565b803563ffffffff8116811461015657600080fd5b919050565b600080600080600080600080610100898b03121561017857600080fd5b883597506020890135965060408901359550606089013567ffffffffffffffff811681146101a557600080fd5b9450608089013561ffff811681146101bc57600080fd5b93506101ca60a08a01610142565b92506101d860c08a01610142565b915060e089013573ffffffffffffffffffffffffffffffffffffffff8116811461020157600080fd5b809150509295985092959890939650565b6000806000806080858703121561022857600080fd5b843593506020850135925060408501356bffffffffffffffffffffffff8116811461025257600080fd5b91506060850135801515811461026757600080fd5b93969295509093505056fea164736f6c6343000813000a", +} + +var VRFLogEmitterABI = VRFLogEmitterMetaData.ABI + +var VRFLogEmitterBin = VRFLogEmitterMetaData.Bin + +func DeployVRFLogEmitter(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFLogEmitter, error) { + parsed, err := VRFLogEmitterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFLogEmitterBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFLogEmitter{address: address, abi: *parsed, VRFLogEmitterCaller: VRFLogEmitterCaller{contract: contract}, VRFLogEmitterTransactor: VRFLogEmitterTransactor{contract: contract}, VRFLogEmitterFilterer: VRFLogEmitterFilterer{contract: contract}}, nil +} + +type VRFLogEmitter struct { + address common.Address + abi abi.ABI + VRFLogEmitterCaller + VRFLogEmitterTransactor + VRFLogEmitterFilterer +} + +type VRFLogEmitterCaller struct { + contract *bind.BoundContract +} + +type VRFLogEmitterTransactor struct { + contract *bind.BoundContract +} + +type VRFLogEmitterFilterer struct { + contract *bind.BoundContract +} + +type VRFLogEmitterSession struct { + Contract *VRFLogEmitter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFLogEmitterCallerSession struct { + Contract *VRFLogEmitterCaller + CallOpts bind.CallOpts +} + +type VRFLogEmitterTransactorSession struct { + Contract *VRFLogEmitterTransactor + TransactOpts bind.TransactOpts +} + +type VRFLogEmitterRaw struct { + Contract *VRFLogEmitter +} + +type VRFLogEmitterCallerRaw struct { + Contract *VRFLogEmitterCaller +} + +type VRFLogEmitterTransactorRaw struct { + Contract *VRFLogEmitterTransactor +} + +func NewVRFLogEmitter(address common.Address, backend bind.ContractBackend) (*VRFLogEmitter, error) { + abi, err := abi.JSON(strings.NewReader(VRFLogEmitterABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFLogEmitter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFLogEmitter{address: address, abi: abi, VRFLogEmitterCaller: VRFLogEmitterCaller{contract: contract}, VRFLogEmitterTransactor: VRFLogEmitterTransactor{contract: contract}, VRFLogEmitterFilterer: VRFLogEmitterFilterer{contract: contract}}, nil +} + +func NewVRFLogEmitterCaller(address common.Address, caller bind.ContractCaller) (*VRFLogEmitterCaller, error) { + contract, err := bindVRFLogEmitter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFLogEmitterCaller{contract: contract}, nil +} + +func NewVRFLogEmitterTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFLogEmitterTransactor, error) { + contract, err := bindVRFLogEmitter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFLogEmitterTransactor{contract: contract}, nil +} + +func NewVRFLogEmitterFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFLogEmitterFilterer, error) { + contract, err := bindVRFLogEmitter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFLogEmitterFilterer{contract: contract}, nil +} + +func bindVRFLogEmitter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFLogEmitterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFLogEmitter *VRFLogEmitterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLogEmitter.Contract.VRFLogEmitterCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFLogEmitter *VRFLogEmitterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.VRFLogEmitterTransactor.contract.Transfer(opts) +} + +func (_VRFLogEmitter *VRFLogEmitterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.VRFLogEmitterTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFLogEmitter *VRFLogEmitterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFLogEmitter.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.contract.Transfer(opts) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactor) EmitRandomWordsFulfilled(opts *bind.TransactOpts, requestId *big.Int, outputSeed *big.Int, payment *big.Int, success bool) (*types.Transaction, error) { + return _VRFLogEmitter.contract.Transact(opts, "emitRandomWordsFulfilled", requestId, outputSeed, payment, success) +} + +func (_VRFLogEmitter *VRFLogEmitterSession) EmitRandomWordsFulfilled(requestId *big.Int, outputSeed *big.Int, payment *big.Int, success bool) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.EmitRandomWordsFulfilled(&_VRFLogEmitter.TransactOpts, requestId, outputSeed, payment, success) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactorSession) EmitRandomWordsFulfilled(requestId *big.Int, outputSeed *big.Int, payment *big.Int, success bool) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.EmitRandomWordsFulfilled(&_VRFLogEmitter.TransactOpts, requestId, outputSeed, payment, success) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactor) EmitRandomWordsRequested(opts *bind.TransactOpts, keyHash [32]byte, requestId *big.Int, preSeed *big.Int, subId uint64, minimumRequestConfirmations uint16, callbackGasLimit uint32, numWords uint32, sender common.Address) (*types.Transaction, error) { + return _VRFLogEmitter.contract.Transact(opts, "emitRandomWordsRequested", keyHash, requestId, preSeed, subId, minimumRequestConfirmations, callbackGasLimit, numWords, sender) +} + +func (_VRFLogEmitter *VRFLogEmitterSession) EmitRandomWordsRequested(keyHash [32]byte, requestId *big.Int, preSeed *big.Int, subId uint64, minimumRequestConfirmations uint16, callbackGasLimit uint32, numWords uint32, sender common.Address) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.EmitRandomWordsRequested(&_VRFLogEmitter.TransactOpts, keyHash, requestId, preSeed, subId, minimumRequestConfirmations, callbackGasLimit, numWords, sender) +} + +func (_VRFLogEmitter *VRFLogEmitterTransactorSession) EmitRandomWordsRequested(keyHash [32]byte, requestId *big.Int, preSeed *big.Int, subId uint64, minimumRequestConfirmations uint16, callbackGasLimit uint32, numWords uint32, sender common.Address) (*types.Transaction, error) { + return _VRFLogEmitter.Contract.EmitRandomWordsRequested(&_VRFLogEmitter.TransactOpts, keyHash, requestId, preSeed, subId, minimumRequestConfirmations, callbackGasLimit, numWords, sender) +} + +type VRFLogEmitterRandomWordsFulfilledIterator struct { + Event *VRFLogEmitterRandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFLogEmitterRandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFLogEmitterRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFLogEmitterRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFLogEmitterRandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFLogEmitterRandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFLogEmitterRandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + Payment *big.Int + Success bool + Raw types.Log +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFLogEmitterRandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFLogEmitter.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return &VRFLogEmitterRandomWordsFulfilledIterator{contract: _VRFLogEmitter.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFLogEmitterRandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFLogEmitter.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFLogEmitterRandomWordsFulfilled) + if err := _VRFLogEmitter.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) ParseRandomWordsFulfilled(log types.Log) (*VRFLogEmitterRandomWordsFulfilled, error) { + event := new(VRFLogEmitterRandomWordsFulfilled) + if err := _VRFLogEmitter.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFLogEmitterRandomWordsRequestedIterator struct { + Event *VRFLogEmitterRandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFLogEmitterRandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFLogEmitterRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFLogEmitterRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFLogEmitterRandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFLogEmitterRandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFLogEmitterRandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId uint64 + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + Raw types.Log +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFLogEmitterRandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFLogEmitter.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFLogEmitterRandomWordsRequestedIterator{contract: _VRFLogEmitter.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFLogEmitterRandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFLogEmitter.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFLogEmitterRandomWordsRequested) + if err := _VRFLogEmitter.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFLogEmitter *VRFLogEmitterFilterer) ParseRandomWordsRequested(log types.Log) (*VRFLogEmitterRandomWordsRequested, error) { + event := new(VRFLogEmitterRandomWordsRequested) + if err := _VRFLogEmitter.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFLogEmitter *VRFLogEmitter) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFLogEmitter.abi.Events["RandomWordsFulfilled"].ID: + return _VRFLogEmitter.ParseRandomWordsFulfilled(log) + case _VRFLogEmitter.abi.Events["RandomWordsRequested"].ID: + return _VRFLogEmitter.ParseRandomWordsRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFLogEmitterRandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4") +} + +func (VRFLogEmitterRandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772") +} + +func (_VRFLogEmitter *VRFLogEmitter) Address() common.Address { + return _VRFLogEmitter.address +} + +type VRFLogEmitterInterface interface { + EmitRandomWordsFulfilled(opts *bind.TransactOpts, requestId *big.Int, outputSeed *big.Int, payment *big.Int, success bool) (*types.Transaction, error) + + EmitRandomWordsRequested(opts *bind.TransactOpts, keyHash [32]byte, requestId *big.Int, preSeed *big.Int, subId uint64, minimumRequestConfirmations uint16, callbackGasLimit uint32, numWords uint32, sender common.Address) (*types.Transaction, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*VRFLogEmitterRandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFLogEmitterRandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFLogEmitterRandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*VRFLogEmitterRandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFLogEmitterRandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*VRFLogEmitterRandomWordsRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_malicious_consumer_v2/vrf_malicious_consumer_v2.go b/core/gethwrappers/generated/vrf_malicious_consumer_v2/vrf_malicious_consumer_v2.go new file mode 100644 index 00000000..1fe5e61f --- /dev/null +++ b/core/gethwrappers/generated/vrf_malicious_consumer_v2/vrf_malicious_consumer_v2.go @@ -0,0 +1,330 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_malicious_consumer_v2 + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFMaliciousConsumerV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610d7c380380610d7c83398101604081905261002f9161008e565b6001600160601b0319606083901b16608052600280546001600160a01b03199081166001600160a01b0394851617909155600380549290931691161790556100c1565b80516001600160a01b038116811461008957600080fd5b919050565b600080604083850312156100a157600080fd5b6100aa83610072565b91506100b860208401610072565b90509250929050565b60805160601c610c966100e66000396000818161017001526101d80152610c966000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c8063cf62c8ab1161005b578063cf62c8ab14610120578063e89e106a14610133578063f08c5daa1461013c578063f6eaffc81461014557600080fd5b80631fe543e31461008d57806336bfffed146100a25780635e3b709f146100b5578063706da1ca146100db575b600080fd5b6100a061009b3660046109b6565b610158565b005b6100a06100b036600461089c565b610218565b6100c86100c3366004610984565b6103a0565b6040519081526020015b60405180910390f35b6003546101079074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016100d2565b6100a061012e366004610a84565b610492565b6100c860015481565b6100c860045481565b6100c8610153366004610984565b610711565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461020a576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b6102148282610732565b5050565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff166102a3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f7420736574000000000000000000000000000000000000006044820152606401610201565b60005b815181101561021457600254600354835173ffffffffffffffffffffffffffffffffffffffff90921691637341c10c9174010000000000000000000000000000000000000000900467ffffffffffffffff169085908590811061030b5761030b610c2b565b60200260200101516040518363ffffffff1660e01b815260040161035b92919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b15801561037557600080fd5b505af1158015610389573d6000803e3d6000fd5b50505050808061039890610bcb565b9150506102a6565b60058190556002546003546040517f5d3b1d30000000000000000000000000000000000000000000000000000000008152600481018490527401000000000000000000000000000000000000000090910467ffffffffffffffff1660248201526001604482018190526207a1206064830152608482015260009173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b15801561045457600080fd5b505af1158015610468573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061048c919061099d565b92915050565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff1661063d57600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561052557600080fd5b505af1158015610539573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061055d9190610a5a565b600380547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff938416810291909117918290556002546040517f7341c10c00000000000000000000000000000000000000000000000000000000815291909204909216600483015230602483015273ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b15801561062457600080fd5b505af1158015610638573d6000803e3d6000fd5b505050505b6003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591016040516020818303038152906040526040518463ffffffff1660e01b81526004016106bf93929190610ab2565b602060405180830381600087803b1580156106d957600080fd5b505af11580156106ed573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610214919061095b565b6000818154811061072157600080fd5b600091825260209091200154905081565b5a600455805161074990600090602084019061083c565b5060018281556002546005546003546040517f5d3b1d30000000000000000000000000000000000000000000000000000000008152600481019290925274010000000000000000000000000000000000000000900467ffffffffffffffff1660248201526044810183905262030d406064820152608481019290925273ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b1580156107ff57600080fd5b505af1158015610813573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610837919061099d565b505050565b828054828255906000526020600020908101928215610877579160200282015b8281111561087757825182559160200191906001019061085c565b50610883929150610887565b5090565b5b808211156108835760008155600101610888565b600060208083850312156108af57600080fd5b823567ffffffffffffffff8111156108c657600080fd5b8301601f810185136108d757600080fd5b80356108ea6108e582610ba7565b610b58565b80828252848201915084840188868560051b870101111561090a57600080fd5b60009450845b8481101561094d57813573ffffffffffffffffffffffffffffffffffffffff8116811461093b578687fd5b84529286019290860190600101610910565b509098975050505050505050565b60006020828403121561096d57600080fd5b8151801515811461097d57600080fd5b9392505050565b60006020828403121561099657600080fd5b5035919050565b6000602082840312156109af57600080fd5b5051919050565b600080604083850312156109c957600080fd5b8235915060208084013567ffffffffffffffff8111156109e857600080fd5b8401601f810186136109f957600080fd5b8035610a076108e582610ba7565b80828252848201915084840189868560051b8701011115610a2757600080fd5b600094505b83851015610a4a578035835260019490940193918501918501610a2c565b5080955050505050509250929050565b600060208284031215610a6c57600080fd5b815167ffffffffffffffff8116811461097d57600080fd5b600060208284031215610a9657600080fd5b81356bffffffffffffffffffffffff8116811461097d57600080fd5b73ffffffffffffffffffffffffffffffffffffffff84168152600060206bffffffffffffffffffffffff85168184015260606040840152835180606085015260005b81811015610b1057858101830151858201608001528201610af4565b81811115610b22576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610b9f57610b9f610c5a565b604052919050565b600067ffffffffffffffff821115610bc157610bc1610c5a565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610c24577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFMaliciousConsumerV2ABI = VRFMaliciousConsumerV2MetaData.ABI + +var VRFMaliciousConsumerV2Bin = VRFMaliciousConsumerV2MetaData.Bin + +func DeployVRFMaliciousConsumerV2(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFMaliciousConsumerV2, error) { + parsed, err := VRFMaliciousConsumerV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFMaliciousConsumerV2Bin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFMaliciousConsumerV2{address: address, abi: *parsed, VRFMaliciousConsumerV2Caller: VRFMaliciousConsumerV2Caller{contract: contract}, VRFMaliciousConsumerV2Transactor: VRFMaliciousConsumerV2Transactor{contract: contract}, VRFMaliciousConsumerV2Filterer: VRFMaliciousConsumerV2Filterer{contract: contract}}, nil +} + +type VRFMaliciousConsumerV2 struct { + address common.Address + abi abi.ABI + VRFMaliciousConsumerV2Caller + VRFMaliciousConsumerV2Transactor + VRFMaliciousConsumerV2Filterer +} + +type VRFMaliciousConsumerV2Caller struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2Transactor struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2Filterer struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2Session struct { + Contract *VRFMaliciousConsumerV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFMaliciousConsumerV2CallerSession struct { + Contract *VRFMaliciousConsumerV2Caller + CallOpts bind.CallOpts +} + +type VRFMaliciousConsumerV2TransactorSession struct { + Contract *VRFMaliciousConsumerV2Transactor + TransactOpts bind.TransactOpts +} + +type VRFMaliciousConsumerV2Raw struct { + Contract *VRFMaliciousConsumerV2 +} + +type VRFMaliciousConsumerV2CallerRaw struct { + Contract *VRFMaliciousConsumerV2Caller +} + +type VRFMaliciousConsumerV2TransactorRaw struct { + Contract *VRFMaliciousConsumerV2Transactor +} + +func NewVRFMaliciousConsumerV2(address common.Address, backend bind.ContractBackend) (*VRFMaliciousConsumerV2, error) { + abi, err := abi.JSON(strings.NewReader(VRFMaliciousConsumerV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFMaliciousConsumerV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2{address: address, abi: abi, VRFMaliciousConsumerV2Caller: VRFMaliciousConsumerV2Caller{contract: contract}, VRFMaliciousConsumerV2Transactor: VRFMaliciousConsumerV2Transactor{contract: contract}, VRFMaliciousConsumerV2Filterer: VRFMaliciousConsumerV2Filterer{contract: contract}}, nil +} + +func NewVRFMaliciousConsumerV2Caller(address common.Address, caller bind.ContractCaller) (*VRFMaliciousConsumerV2Caller, error) { + contract, err := bindVRFMaliciousConsumerV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2Caller{contract: contract}, nil +} + +func NewVRFMaliciousConsumerV2Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFMaliciousConsumerV2Transactor, error) { + contract, err := bindVRFMaliciousConsumerV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2Transactor{contract: contract}, nil +} + +func NewVRFMaliciousConsumerV2Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFMaliciousConsumerV2Filterer, error) { + contract, err := bindVRFMaliciousConsumerV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2Filterer{contract: contract}, nil +} + +func bindVRFMaliciousConsumerV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFMaliciousConsumerV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMaliciousConsumerV2.Contract.VRFMaliciousConsumerV2Caller.contract.Call(opts, result, method, params...) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.VRFMaliciousConsumerV2Transactor.contract.Transfer(opts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.VRFMaliciousConsumerV2Transactor.contract.Transact(opts, method, params...) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMaliciousConsumerV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.contract.Transfer(opts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Caller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) SGasAvailable() (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SGasAvailable(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2CallerSession) SGasAvailable() (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SGasAvailable(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Caller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SRandomWords(&_VRFMaliciousConsumerV2.CallOpts, arg0) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2CallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SRandomWords(&_VRFMaliciousConsumerV2.CallOpts, arg0) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Caller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) SRequestId() (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SRequestId(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2CallerSession) SRequestId() (*big.Int, error) { + return _VRFMaliciousConsumerV2.Contract.SRequestId(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Caller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) SSubId() (uint64, error) { + return _VRFMaliciousConsumerV2.Contract.SSubId(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2CallerSession) SSubId() (uint64, error) { + return _VRFMaliciousConsumerV2.Contract.SSubId(&_VRFMaliciousConsumerV2.CallOpts) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Transactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.CreateSubscriptionAndFund(&_VRFMaliciousConsumerV2.TransactOpts, amount) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.CreateSubscriptionAndFund(&_VRFMaliciousConsumerV2.TransactOpts, amount) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Transactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.RawFulfillRandomWords(&_VRFMaliciousConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.RawFulfillRandomWords(&_VRFMaliciousConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Transactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.contract.Transact(opts, "requestRandomness", keyHash) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) RequestRandomness(keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.RequestRandomness(&_VRFMaliciousConsumerV2.TransactOpts, keyHash) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorSession) RequestRandomness(keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.RequestRandomness(&_VRFMaliciousConsumerV2.TransactOpts, keyHash) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Transactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2Session) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.UpdateSubscription(&_VRFMaliciousConsumerV2.TransactOpts, consumers) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2TransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2.Contract.UpdateSubscription(&_VRFMaliciousConsumerV2.TransactOpts, consumers) +} + +func (_VRFMaliciousConsumerV2 *VRFMaliciousConsumerV2) Address() common.Address { + return _VRFMaliciousConsumerV2.address +} + +type VRFMaliciousConsumerV2Interface interface { + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_malicious_consumer_v2_plus/vrf_malicious_consumer_v2_plus.go b/core/gethwrappers/generated/vrf_malicious_consumer_v2_plus/vrf_malicious_consumer_v2_plus.go new file mode 100644 index 00000000..6c06ee1e --- /dev/null +++ b/core/gethwrappers/generated/vrf_malicious_consumer_v2_plus/vrf_malicious_consumer_v2_plus.go @@ -0,0 +1,704 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_malicious_consumer_v2_plus + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFMaliciousConsumerV2PlusMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001239380380620012398339810160408190526200003491620001c2565b8133806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620000f9565b5050600280546001600160a01b039384166001600160a01b0319918216179091556005805494909316931692909217905550620001fa9050565b6001600160a01b038116331415620001545760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001bd57600080fd5b919050565b60008060408385031215620001d657600080fd5b620001e183620001a5565b9150620001f160208401620001a5565b90509250929050565b61102f806200020a6000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c80639eccacf611610081578063f08c5daa1161005b578063f08c5daa146101bd578063f2fde38b146101c6578063f6eaffc8146101d957600080fd5b80639eccacf614610181578063cf62c8ab146101a1578063e89e106a146101b457600080fd5b806379ba5097116100b257806379ba5097146101275780638da5cb5b1461012f5780638ea981171461016e57600080fd5b80631fe543e3146100d957806336bfffed146100ee5780635e3b709f14610101575b600080fd5b6100ec6100e7366004610d03565b6101ec565b005b6100ec6100fc366004610c0b565b610272565b61011461010f366004610cd1565b6103aa565b6040519081526020015b60405180910390f35b6100ec6104a0565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161011e565b6100ec61017c366004610bf0565b61059d565b6002546101499073ffffffffffffffffffffffffffffffffffffffff1681565b6100ec6101af366004610da7565b6106a8565b61011460045481565b61011460065481565b6100ec6101d4366004610bf0565b6108ae565b6101146101e7366004610cd1565b6108c2565b60025473ffffffffffffffffffffffffffffffffffffffff163314610264576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b61026e82826108e3565b5050565b6007546102db576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f742073657400000000000000000000000000000000000000604482015260640161025b565b60005b815181101561026e57600254600754835173ffffffffffffffffffffffffffffffffffffffff9092169163bec4c08c919085908590811061032157610321610fc4565b60200260200101516040518363ffffffff1660e01b815260040161036592919091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b15801561037f57600080fd5b505af1158015610393573d6000803e3d6000fd5b5050505080806103a290610f64565b9150506102de565b60088190556040805160c08101825282815260075460208083019190915260018284018190526207a1206060840152608083015282519081018352600080825260a083019190915260025492517f9b1c385e000000000000000000000000000000000000000000000000000000008152909273ffffffffffffffffffffffffffffffffffffffff1690639b1c385e90610447908490600401610e8c565b602060405180830381600087803b15801561046157600080fd5b505af1158015610475573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104999190610cea565b9392505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610521576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161025b565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff1633148015906105dd575060025473ffffffffffffffffffffffffffffffffffffffff163314155b15610661573361060260005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9384166004820152918316602483015291909116604482015260640161025b565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6007546107e057600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561071957600080fd5b505af115801561072d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107519190610cea565b60078190556002546040517fbec4c08c000000000000000000000000000000000000000000000000000000008152600481019290925230602483015273ffffffffffffffffffffffffffffffffffffffff169063bec4c08c90604401600060405180830381600087803b1580156107c757600080fd5b505af11580156107db573d6000803e3d6000fd5b505050505b6005546002546007546040805160208082019390935281518082039093018352808201918290527f4000aea00000000000000000000000000000000000000000000000000000000090915273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09361085c93911691869190604401610e40565b602060405180830381600087803b15801561087657600080fd5b505af115801561088a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061026e9190610caf565b6108b66109ee565b6108bf81610a71565b50565b600381815481106108d257600080fd5b600091825260209091200154905081565b5a60065580516108fa906003906020840190610b67565b5060048281556040805160c0810182526008548152600754602080830191909152600182840181905262030d4060608401526080830152825190810183526000815260a082015260025491517f9b1c385e000000000000000000000000000000000000000000000000000000008152909273ffffffffffffffffffffffffffffffffffffffff90921691639b1c385e9161099691859101610e8c565b602060405180830381600087803b1580156109b057600080fd5b505af11580156109c4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109e89190610cea565b50505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610a6f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161025b565b565b73ffffffffffffffffffffffffffffffffffffffff8116331415610af1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161025b565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610ba2579160200282015b82811115610ba2578251825591602001919060010190610b87565b50610bae929150610bb2565b5090565b5b80821115610bae5760008155600101610bb3565b803573ffffffffffffffffffffffffffffffffffffffff81168114610beb57600080fd5b919050565b600060208284031215610c0257600080fd5b61049982610bc7565b60006020808385031215610c1e57600080fd5b823567ffffffffffffffff811115610c3557600080fd5b8301601f81018513610c4657600080fd5b8035610c59610c5482610f40565b610ef1565b80828252848201915084840188868560051b8701011115610c7957600080fd5b600094505b83851015610ca357610c8f81610bc7565b835260019490940193918501918501610c7e565b50979650505050505050565b600060208284031215610cc157600080fd5b8151801515811461049957600080fd5b600060208284031215610ce357600080fd5b5035919050565b600060208284031215610cfc57600080fd5b5051919050565b60008060408385031215610d1657600080fd5b8235915060208084013567ffffffffffffffff811115610d3557600080fd5b8401601f81018613610d4657600080fd5b8035610d54610c5482610f40565b80828252848201915084840189868560051b8701011115610d7457600080fd5b600094505b83851015610d97578035835260019490940193918501918501610d79565b5080955050505050509250929050565b600060208284031215610db957600080fd5b81356bffffffffffffffffffffffff8116811461049957600080fd5b6000815180845260005b81811015610dfb57602081850181015186830182015201610ddf565b81811115610e0d576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff83166020820152606060408201526000610e836060830184610dd5565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c080840152610ee960e0840182610dd5565b949350505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610f3857610f38610ff3565b604052919050565b600067ffffffffffffffff821115610f5a57610f5a610ff3565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610fbd577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFMaliciousConsumerV2PlusABI = VRFMaliciousConsumerV2PlusMetaData.ABI + +var VRFMaliciousConsumerV2PlusBin = VRFMaliciousConsumerV2PlusMetaData.Bin + +func DeployVRFMaliciousConsumerV2Plus(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFMaliciousConsumerV2Plus, error) { + parsed, err := VRFMaliciousConsumerV2PlusMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFMaliciousConsumerV2PlusBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFMaliciousConsumerV2Plus{address: address, abi: *parsed, VRFMaliciousConsumerV2PlusCaller: VRFMaliciousConsumerV2PlusCaller{contract: contract}, VRFMaliciousConsumerV2PlusTransactor: VRFMaliciousConsumerV2PlusTransactor{contract: contract}, VRFMaliciousConsumerV2PlusFilterer: VRFMaliciousConsumerV2PlusFilterer{contract: contract}}, nil +} + +type VRFMaliciousConsumerV2Plus struct { + address common.Address + abi abi.ABI + VRFMaliciousConsumerV2PlusCaller + VRFMaliciousConsumerV2PlusTransactor + VRFMaliciousConsumerV2PlusFilterer +} + +type VRFMaliciousConsumerV2PlusCaller struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2PlusTransactor struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2PlusFilterer struct { + contract *bind.BoundContract +} + +type VRFMaliciousConsumerV2PlusSession struct { + Contract *VRFMaliciousConsumerV2Plus + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFMaliciousConsumerV2PlusCallerSession struct { + Contract *VRFMaliciousConsumerV2PlusCaller + CallOpts bind.CallOpts +} + +type VRFMaliciousConsumerV2PlusTransactorSession struct { + Contract *VRFMaliciousConsumerV2PlusTransactor + TransactOpts bind.TransactOpts +} + +type VRFMaliciousConsumerV2PlusRaw struct { + Contract *VRFMaliciousConsumerV2Plus +} + +type VRFMaliciousConsumerV2PlusCallerRaw struct { + Contract *VRFMaliciousConsumerV2PlusCaller +} + +type VRFMaliciousConsumerV2PlusTransactorRaw struct { + Contract *VRFMaliciousConsumerV2PlusTransactor +} + +func NewVRFMaliciousConsumerV2Plus(address common.Address, backend bind.ContractBackend) (*VRFMaliciousConsumerV2Plus, error) { + abi, err := abi.JSON(strings.NewReader(VRFMaliciousConsumerV2PlusABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFMaliciousConsumerV2Plus(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2Plus{address: address, abi: abi, VRFMaliciousConsumerV2PlusCaller: VRFMaliciousConsumerV2PlusCaller{contract: contract}, VRFMaliciousConsumerV2PlusTransactor: VRFMaliciousConsumerV2PlusTransactor{contract: contract}, VRFMaliciousConsumerV2PlusFilterer: VRFMaliciousConsumerV2PlusFilterer{contract: contract}}, nil +} + +func NewVRFMaliciousConsumerV2PlusCaller(address common.Address, caller bind.ContractCaller) (*VRFMaliciousConsumerV2PlusCaller, error) { + contract, err := bindVRFMaliciousConsumerV2Plus(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2PlusCaller{contract: contract}, nil +} + +func NewVRFMaliciousConsumerV2PlusTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFMaliciousConsumerV2PlusTransactor, error) { + contract, err := bindVRFMaliciousConsumerV2Plus(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2PlusTransactor{contract: contract}, nil +} + +func NewVRFMaliciousConsumerV2PlusFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFMaliciousConsumerV2PlusFilterer, error) { + contract, err := bindVRFMaliciousConsumerV2Plus(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2PlusFilterer{contract: contract}, nil +} + +func bindVRFMaliciousConsumerV2Plus(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFMaliciousConsumerV2PlusMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMaliciousConsumerV2Plus.Contract.VRFMaliciousConsumerV2PlusCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.VRFMaliciousConsumerV2PlusTransactor.contract.Transfer(opts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.VRFMaliciousConsumerV2PlusTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMaliciousConsumerV2Plus.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.contract.Transfer(opts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2Plus.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) Owner() (common.Address, error) { + return _VRFMaliciousConsumerV2Plus.Contract.Owner(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerSession) Owner() (common.Address, error) { + return _VRFMaliciousConsumerV2Plus.Contract.Owner(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2Plus.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) SGasAvailable() (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SGasAvailable(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerSession) SGasAvailable() (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SGasAvailable(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2Plus.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SRandomWords(&_VRFMaliciousConsumerV2Plus.CallOpts, arg0) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SRandomWords(&_VRFMaliciousConsumerV2Plus.CallOpts, arg0) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2Plus.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) SRequestId() (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SRequestId(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerSession) SRequestId() (*big.Int, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SRequestId(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFMaliciousConsumerV2Plus.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) SVrfCoordinator() (common.Address, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SVrfCoordinator(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SVrfCoordinator(&_VRFMaliciousConsumerV2Plus.CallOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.AcceptOwnership(&_VRFMaliciousConsumerV2Plus.TransactOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.AcceptOwnership(&_VRFMaliciousConsumerV2Plus.TransactOpts) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.CreateSubscriptionAndFund(&_VRFMaliciousConsumerV2Plus.TransactOpts, amount) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.CreateSubscriptionAndFund(&_VRFMaliciousConsumerV2Plus.TransactOpts, amount) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.RawFulfillRandomWords(&_VRFMaliciousConsumerV2Plus.TransactOpts, requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.RawFulfillRandomWords(&_VRFMaliciousConsumerV2Plus.TransactOpts, requestId, randomWords) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "requestRandomness", keyHash) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) RequestRandomness(keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.RequestRandomness(&_VRFMaliciousConsumerV2Plus.TransactOpts, keyHash) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) RequestRandomness(keyHash [32]byte) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.RequestRandomness(&_VRFMaliciousConsumerV2Plus.TransactOpts, keyHash) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SetCoordinator(&_VRFMaliciousConsumerV2Plus.TransactOpts, _vrfCoordinator) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.SetCoordinator(&_VRFMaliciousConsumerV2Plus.TransactOpts, _vrfCoordinator) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.TransferOwnership(&_VRFMaliciousConsumerV2Plus.TransactOpts, to) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.TransferOwnership(&_VRFMaliciousConsumerV2Plus.TransactOpts, to) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.UpdateSubscription(&_VRFMaliciousConsumerV2Plus.TransactOpts, consumers) +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFMaliciousConsumerV2Plus.Contract.UpdateSubscription(&_VRFMaliciousConsumerV2Plus.TransactOpts, consumers) +} + +type VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator struct { + Event *VRFMaliciousConsumerV2PlusOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFMaliciousConsumerV2PlusOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFMaliciousConsumerV2PlusOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFMaliciousConsumerV2PlusOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFMaliciousConsumerV2Plus.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator{contract: _VRFMaliciousConsumerV2Plus.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFMaliciousConsumerV2PlusOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFMaliciousConsumerV2Plus.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFMaliciousConsumerV2PlusOwnershipTransferRequested) + if err := _VRFMaliciousConsumerV2Plus.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFMaliciousConsumerV2PlusOwnershipTransferRequested, error) { + event := new(VRFMaliciousConsumerV2PlusOwnershipTransferRequested) + if err := _VRFMaliciousConsumerV2Plus.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFMaliciousConsumerV2PlusOwnershipTransferredIterator struct { + Event *VRFMaliciousConsumerV2PlusOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFMaliciousConsumerV2PlusOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFMaliciousConsumerV2PlusOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFMaliciousConsumerV2PlusOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFMaliciousConsumerV2PlusOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFMaliciousConsumerV2PlusOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFMaliciousConsumerV2Plus.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFMaliciousConsumerV2PlusOwnershipTransferredIterator{contract: _VRFMaliciousConsumerV2Plus.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFMaliciousConsumerV2PlusOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFMaliciousConsumerV2Plus.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFMaliciousConsumerV2PlusOwnershipTransferred) + if err := _VRFMaliciousConsumerV2Plus.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2PlusFilterer) ParseOwnershipTransferred(log types.Log) (*VRFMaliciousConsumerV2PlusOwnershipTransferred, error) { + event := new(VRFMaliciousConsumerV2PlusOwnershipTransferred) + if err := _VRFMaliciousConsumerV2Plus.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2Plus) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFMaliciousConsumerV2Plus.abi.Events["OwnershipTransferRequested"].ID: + return _VRFMaliciousConsumerV2Plus.ParseOwnershipTransferRequested(log) + case _VRFMaliciousConsumerV2Plus.abi.Events["OwnershipTransferred"].ID: + return _VRFMaliciousConsumerV2Plus.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFMaliciousConsumerV2PlusOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFMaliciousConsumerV2PlusOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFMaliciousConsumerV2Plus *VRFMaliciousConsumerV2Plus) Address() common.Address { + return _VRFMaliciousConsumerV2Plus.address +} + +type VRFMaliciousConsumerV2PlusInterface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFMaliciousConsumerV2PlusOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFMaliciousConsumerV2PlusOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFMaliciousConsumerV2PlusOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFMaliciousConsumerV2PlusOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFMaliciousConsumerV2PlusOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFMaliciousConsumerV2PlusOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_migratable_consumer_v2/vrf_migratable_consumer_v2.go b/core/gethwrappers/generated/vrf_migratable_consumer_v2/vrf_migratable_consumer_v2.go new file mode 100644 index 00000000..31defc2a --- /dev/null +++ b/core/gethwrappers/generated/vrf_migratable_consumer_v2/vrf_migratable_consumer_v2.go @@ -0,0 +1,642 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_migratable_consumer_v2 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var MigratableVRFConsumerV2MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"nativePayment\",\"type\":\"bool\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"setSubId\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50604051610ed2380380610ed283398101604081905261002f916101b7565b818133806000816100875760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100b7576100b78161010d565b5050600280546305d3b1d360e41b6001600160e01b036001600160401b03909516600160a01b026001600160e01b03199092166001600160a01b0390961695909517179290921692909217905550610209915050565b6001600160a01b0381163314156101665760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161007e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080604083850312156101ca57600080fd5b82516001600160a01b03811681146101e157600080fd5b60208401519092506001600160401b03811681146101fe57600080fd5b809150509250929050565b610cba806102186000396000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80638da5cb5b11610076578063e826149d1161005b578063e826149d14610139578063e89e106a1461014c578063f2fde38b1461015557600080fd5b80638da5cb5b146100fe5780638ea981171461012657600080fd5b80631fe543e3146100a85780633e2831fe146100bd57806379ba5097146100d05780637b95ba02146100d8575b600080fd5b6100bb6100b6366004610ac3565b610168565b005b6100bb6100cb366004610bd4565b6101ee565b6100bb610249565b6100eb6100e6366004610bb2565b610346565b6040519081526020015b60405180910390f35b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100f5565b6100bb610134366004610a16565b610377565b6100eb610147366004610a53565b6105ae565b6100eb60045481565b6100bb610163366004610a16565b61077e565b60025473ffffffffffffffffffffffffffffffffffffffff1633146101e0576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b6101ea8282610792565b5050565b6101f6610824565b6002805467ffffffffffffffff90921674010000000000000000000000000000000000000000027fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff909216919091179055565b60015473ffffffffffffffffffffffffffffffffffffffff1633146102ca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016101d7565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6003602052816000526040600020818154811061036257600080fd5b90600052602060002001600091509150505481565b61037f610824565b604080517f181f5a7700000000000000000000000000000000000000000000000000000000602082015260009101604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815290829052600254909250600091829173ffffffffffffffffffffffffffffffffffffffff1690610409908590610bfe565b6000604051808303816000865af19150503d8060008114610446576040519150601f19603f3d011682016040523d82523d6000602084013e61044b565b606091505b5091509150816104b7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f74797065416e6456657273696f6e206661696c6564000000000000000000000060448201526064016101d7565b6040516020016104f8906020808252601a908201527f565246436f6f7264696e61746f725632506c757320312e302e30000000000000604082015260600190565b604051602081830303815290604052805190602001208180519060200120141561056357600280547bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fefcf1d94000000000000000000000000000000000000000000000000000000001790555b5050600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff939093169290921790915550565b600254604080516024810188905274010000000000000000000000000000000000000000830467ffffffffffffffff16604482015261ffff8716606482015263ffffffff8681166084830152851660a482015283151560c4808301919091528251808303909101815260e490910182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167c0100000000000000000000000000000000000000000000000000000000850460e01b7fffffffff000000000000000000000000000000000000000000000000000000001617905290516000928391829173ffffffffffffffffffffffffffffffffffffffff16906106b5908590610bfe565b6000604051808303816000865af19150503d80600081146106f2576040519150601f19603f3d011682016040523d82523d6000602084013e6106f7565b606091505b509150915081610763576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f726571756573742072616e646f6d20776f726473206661696c6564000000000060448201526064016101d7565b61076c81610c39565b60048190559998505050505050505050565b610786610824565b61078f816108a7565b50565b60045482146107fd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f727265637400000000000000000060448201526064016101d7565b6004546000908152600360209081526040909120825161081f9284019061099d565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146108a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016101d7565b565b73ffffffffffffffffffffffffffffffffffffffff8116331415610927576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016101d7565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b8280548282559060005260206000209081019282156109d8579160200282015b828111156109d85782518255916020019190600101906109bd565b506109e49291506109e8565b5090565b5b808211156109e457600081556001016109e9565b803563ffffffff81168114610a1157600080fd5b919050565b600060208284031215610a2857600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610a4c57600080fd5b9392505050565b600080600080600060a08688031215610a6b57600080fd5b85359450602086013561ffff81168114610a8457600080fd5b9350610a92604087016109fd565b9250610aa0606087016109fd565b915060808601358015158114610ab557600080fd5b809150509295509295909350565b60008060408385031215610ad657600080fd5b8235915060208084013567ffffffffffffffff80821115610af657600080fd5b818601915086601f830112610b0a57600080fd5b813581811115610b1c57610b1c610c7e565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610b5f57610b5f610c7e565b604052828152858101935084860182860187018b1015610b7e57600080fd5b600095505b83861015610ba1578035855260019590950194938601938601610b83565b508096505050505050509250929050565b60008060408385031215610bc557600080fd5b50508035926020909101359150565b600060208284031215610be657600080fd5b813567ffffffffffffffff81168114610a4c57600080fd5b6000825160005b81811015610c1f5760208186018101518583015201610c05565b81811115610c2e576000828501525b509190910192915050565b80516020808301519190811015610c78577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var MigratableVRFConsumerV2ABI = MigratableVRFConsumerV2MetaData.ABI + +var MigratableVRFConsumerV2Bin = MigratableVRFConsumerV2MetaData.Bin + +func DeployMigratableVRFConsumerV2(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, subId uint64) (common.Address, *types.Transaction, *MigratableVRFConsumerV2, error) { + parsed, err := MigratableVRFConsumerV2MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(MigratableVRFConsumerV2Bin), backend, vrfCoordinator, subId) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &MigratableVRFConsumerV2{MigratableVRFConsumerV2Caller: MigratableVRFConsumerV2Caller{contract: contract}, MigratableVRFConsumerV2Transactor: MigratableVRFConsumerV2Transactor{contract: contract}, MigratableVRFConsumerV2Filterer: MigratableVRFConsumerV2Filterer{contract: contract}}, nil +} + +type MigratableVRFConsumerV2 struct { + address common.Address + abi abi.ABI + MigratableVRFConsumerV2Caller + MigratableVRFConsumerV2Transactor + MigratableVRFConsumerV2Filterer +} + +type MigratableVRFConsumerV2Caller struct { + contract *bind.BoundContract +} + +type MigratableVRFConsumerV2Transactor struct { + contract *bind.BoundContract +} + +type MigratableVRFConsumerV2Filterer struct { + contract *bind.BoundContract +} + +type MigratableVRFConsumerV2Session struct { + Contract *MigratableVRFConsumerV2 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type MigratableVRFConsumerV2CallerSession struct { + Contract *MigratableVRFConsumerV2Caller + CallOpts bind.CallOpts +} + +type MigratableVRFConsumerV2TransactorSession struct { + Contract *MigratableVRFConsumerV2Transactor + TransactOpts bind.TransactOpts +} + +type MigratableVRFConsumerV2Raw struct { + Contract *MigratableVRFConsumerV2 +} + +type MigratableVRFConsumerV2CallerRaw struct { + Contract *MigratableVRFConsumerV2Caller +} + +type MigratableVRFConsumerV2TransactorRaw struct { + Contract *MigratableVRFConsumerV2Transactor +} + +func NewMigratableVRFConsumerV2(address common.Address, backend bind.ContractBackend) (*MigratableVRFConsumerV2, error) { + abi, err := abi.JSON(strings.NewReader(MigratableVRFConsumerV2ABI)) + if err != nil { + return nil, err + } + contract, err := bindMigratableVRFConsumerV2(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2{address: address, abi: abi, MigratableVRFConsumerV2Caller: MigratableVRFConsumerV2Caller{contract: contract}, MigratableVRFConsumerV2Transactor: MigratableVRFConsumerV2Transactor{contract: contract}, MigratableVRFConsumerV2Filterer: MigratableVRFConsumerV2Filterer{contract: contract}}, nil +} + +func NewMigratableVRFConsumerV2Caller(address common.Address, caller bind.ContractCaller) (*MigratableVRFConsumerV2Caller, error) { + contract, err := bindMigratableVRFConsumerV2(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2Caller{contract: contract}, nil +} + +func NewMigratableVRFConsumerV2Transactor(address common.Address, transactor bind.ContractTransactor) (*MigratableVRFConsumerV2Transactor, error) { + contract, err := bindMigratableVRFConsumerV2(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2Transactor{contract: contract}, nil +} + +func NewMigratableVRFConsumerV2Filterer(address common.Address, filterer bind.ContractFilterer) (*MigratableVRFConsumerV2Filterer, error) { + contract, err := bindMigratableVRFConsumerV2(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2Filterer{contract: contract}, nil +} + +func bindMigratableVRFConsumerV2(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := MigratableVRFConsumerV2MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MigratableVRFConsumerV2.Contract.MigratableVRFConsumerV2Caller.contract.Call(opts, result, method, params...) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.MigratableVRFConsumerV2Transactor.contract.Transfer(opts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.MigratableVRFConsumerV2Transactor.contract.Transact(opts, method, params...) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _MigratableVRFConsumerV2.Contract.contract.Call(opts, result, method, params...) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.contract.Transfer(opts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.contract.Transact(opts, method, params...) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Caller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _MigratableVRFConsumerV2.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) Owner() (common.Address, error) { + return _MigratableVRFConsumerV2.Contract.Owner(&_MigratableVRFConsumerV2.CallOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2CallerSession) Owner() (common.Address, error) { + return _MigratableVRFConsumerV2.Contract.Owner(&_MigratableVRFConsumerV2.CallOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Caller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _MigratableVRFConsumerV2.contract.Call(opts, &out, "s_randomWords", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) SRandomWords(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _MigratableVRFConsumerV2.Contract.SRandomWords(&_MigratableVRFConsumerV2.CallOpts, arg0, arg1) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2CallerSession) SRandomWords(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _MigratableVRFConsumerV2.Contract.SRandomWords(&_MigratableVRFConsumerV2.CallOpts, arg0, arg1) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Caller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _MigratableVRFConsumerV2.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) SRequestId() (*big.Int, error) { + return _MigratableVRFConsumerV2.Contract.SRequestId(&_MigratableVRFConsumerV2.CallOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2CallerSession) SRequestId() (*big.Int, error) { + return _MigratableVRFConsumerV2.Contract.SRequestId(&_MigratableVRFConsumerV2.CallOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "acceptOwnership") +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) AcceptOwnership() (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.AcceptOwnership(&_MigratableVRFConsumerV2.TransactOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.AcceptOwnership(&_MigratableVRFConsumerV2.TransactOpts) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.RawFulfillRandomWords(&_MigratableVRFConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.RawFulfillRandomWords(&_MigratableVRFConsumerV2.TransactOpts, requestId, randomWords) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, nativePayment bool) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "requestRandomness", keyHash, minReqConfs, callbackGasLimit, numWords, nativePayment) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) RequestRandomness(keyHash [32]byte, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, nativePayment bool) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.RequestRandomness(&_MigratableVRFConsumerV2.TransactOpts, keyHash, minReqConfs, callbackGasLimit, numWords, nativePayment) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) RequestRandomness(keyHash [32]byte, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, nativePayment bool) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.RequestRandomness(&_MigratableVRFConsumerV2.TransactOpts, keyHash, minReqConfs, callbackGasLimit, numWords, nativePayment) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "setCoordinator", coordinator) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.SetCoordinator(&_MigratableVRFConsumerV2.TransactOpts, coordinator) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.SetCoordinator(&_MigratableVRFConsumerV2.TransactOpts, coordinator) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) SetSubId(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "setSubId", subId) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) SetSubId(subId uint64) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.SetSubId(&_MigratableVRFConsumerV2.TransactOpts, subId) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) SetSubId(subId uint64) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.SetSubId(&_MigratableVRFConsumerV2.TransactOpts, subId) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Transactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.contract.Transact(opts, "transferOwnership", to) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Session) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.TransferOwnership(&_MigratableVRFConsumerV2.TransactOpts, to) +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2TransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _MigratableVRFConsumerV2.Contract.TransferOwnership(&_MigratableVRFConsumerV2.TransactOpts, to) +} + +type MigratableVRFConsumerV2OwnershipTransferRequestedIterator struct { + Event *MigratableVRFConsumerV2OwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MigratableVRFConsumerV2OwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MigratableVRFConsumerV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MigratableVRFConsumerV2OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MigratableVRFConsumerV2OwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *MigratableVRFConsumerV2OwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MigratableVRFConsumerV2OwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*MigratableVRFConsumerV2OwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _MigratableVRFConsumerV2.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2OwnershipTransferRequestedIterator{contract: _MigratableVRFConsumerV2.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *MigratableVRFConsumerV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _MigratableVRFConsumerV2.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MigratableVRFConsumerV2OwnershipTransferRequested) + if err := _MigratableVRFConsumerV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) ParseOwnershipTransferRequested(log types.Log) (*MigratableVRFConsumerV2OwnershipTransferRequested, error) { + event := new(MigratableVRFConsumerV2OwnershipTransferRequested) + if err := _MigratableVRFConsumerV2.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type MigratableVRFConsumerV2OwnershipTransferredIterator struct { + Event *MigratableVRFConsumerV2OwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *MigratableVRFConsumerV2OwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(MigratableVRFConsumerV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(MigratableVRFConsumerV2OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *MigratableVRFConsumerV2OwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *MigratableVRFConsumerV2OwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type MigratableVRFConsumerV2OwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*MigratableVRFConsumerV2OwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _MigratableVRFConsumerV2.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &MigratableVRFConsumerV2OwnershipTransferredIterator{contract: _MigratableVRFConsumerV2.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *MigratableVRFConsumerV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _MigratableVRFConsumerV2.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(MigratableVRFConsumerV2OwnershipTransferred) + if err := _MigratableVRFConsumerV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2Filterer) ParseOwnershipTransferred(log types.Log) (*MigratableVRFConsumerV2OwnershipTransferred, error) { + event := new(MigratableVRFConsumerV2OwnershipTransferred) + if err := _MigratableVRFConsumerV2.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _MigratableVRFConsumerV2.abi.Events["OwnershipTransferRequested"].ID: + return _MigratableVRFConsumerV2.ParseOwnershipTransferRequested(log) + case _MigratableVRFConsumerV2.abi.Events["OwnershipTransferred"].ID: + return _MigratableVRFConsumerV2.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (MigratableVRFConsumerV2OwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (MigratableVRFConsumerV2OwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_MigratableVRFConsumerV2 *MigratableVRFConsumerV2) Address() common.Address { + return _MigratableVRFConsumerV2.address +} + +type MigratableVRFConsumerV2Interface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, nativePayment bool) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) + + SetSubId(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*MigratableVRFConsumerV2OwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *MigratableVRFConsumerV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*MigratableVRFConsumerV2OwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*MigratableVRFConsumerV2OwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *MigratableVRFConsumerV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*MigratableVRFConsumerV2OwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_mock_ethlink_aggregator/vrf_mock_ethlink_aggregator.go b/core/gethwrappers/generated/vrf_mock_ethlink_aggregator/vrf_mock_ethlink_aggregator.go new file mode 100644 index 00000000..5f1c02f1 --- /dev/null +++ b/core/gethwrappers/generated/vrf_mock_ethlink_aggregator/vrf_mock_ethlink_aggregator.go @@ -0,0 +1,377 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_mock_ethlink_aggregator + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFMockETHPLIAggregatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"_answer\",\"type\":\"int256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"answer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"ans\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"ans\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_blockTimestampDeduction\",\"type\":\"uint256\"}],\"name\":\"setBlockTimestampDeduction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x6080604052600060015534801561001557600080fd5b506040516103383803806103388339810160408190526100349161003c565b600055610055565b60006020828403121561004e57600080fd5b5051919050565b6102d4806100646000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c806385bb7d691161005b57806385bb7d69146100e65780639a6fc8f5146100ef578063f0ad37df14610139578063feaf968c1461014e57600080fd5b8063313ce5671461008257806354fd4d50146100965780637284e416146100a7575b600080fd5b604051601281526020015b60405180910390f35b60015b60405190815260200161008d565b604080518082018252601881527f5652464d6f636b4554484c494e4b41676772656761746f7200000000000000006020820152905161008d9190610216565b61009960005481565b6101026100fd3660046101e3565b610156565b6040805169ffffffffffffffffffff968716815260208101959095528401929092526060830152909116608082015260a00161008d565b61014c6101473660046101ca565b600155565b005b610102610186565b6000806000806000600160005461016b6101b5565b6101736101b5565b9299919850965090945060019350915050565b6000806000806000600160005461019b6101b5565b6101a36101b5565b92989197509550909350600192509050565b6000600154426101c59190610289565b905090565b6000602082840312156101dc57600080fd5b5035919050565b6000602082840312156101f557600080fd5b813569ffffffffffffffffffff8116811461020f57600080fd5b9392505050565b600060208083528351808285015260005b8181101561024357858101830151858201604001528201610227565b81811115610255576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000828210156102c2577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50039056fea164736f6c6343000806000a", +} + +var VRFMockETHPLIAggregatorABI = VRFMockETHPLIAggregatorMetaData.ABI + +var VRFMockETHPLIAggregatorBin = VRFMockETHPLIAggregatorMetaData.Bin + +func DeployVRFMockETHPLIAggregator(auth *bind.TransactOpts, backend bind.ContractBackend, _answer *big.Int) (common.Address, *types.Transaction, *VRFMockETHPLIAggregator, error) { + parsed, err := VRFMockETHPLIAggregatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFMockETHPLIAggregatorBin), backend, _answer) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFMockETHPLIAggregator{address: address, abi: *parsed, VRFMockETHPLIAggregatorCaller: VRFMockETHPLIAggregatorCaller{contract: contract}, VRFMockETHPLIAggregatorTransactor: VRFMockETHPLIAggregatorTransactor{contract: contract}, VRFMockETHPLIAggregatorFilterer: VRFMockETHPLIAggregatorFilterer{contract: contract}}, nil +} + +type VRFMockETHPLIAggregator struct { + address common.Address + abi abi.ABI + VRFMockETHPLIAggregatorCaller + VRFMockETHPLIAggregatorTransactor + VRFMockETHPLIAggregatorFilterer +} + +type VRFMockETHPLIAggregatorCaller struct { + contract *bind.BoundContract +} + +type VRFMockETHPLIAggregatorTransactor struct { + contract *bind.BoundContract +} + +type VRFMockETHPLIAggregatorFilterer struct { + contract *bind.BoundContract +} + +type VRFMockETHPLIAggregatorSession struct { + Contract *VRFMockETHPLIAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFMockETHPLIAggregatorCallerSession struct { + Contract *VRFMockETHPLIAggregatorCaller + CallOpts bind.CallOpts +} + +type VRFMockETHPLIAggregatorTransactorSession struct { + Contract *VRFMockETHPLIAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type VRFMockETHPLIAggregatorRaw struct { + Contract *VRFMockETHPLIAggregator +} + +type VRFMockETHPLIAggregatorCallerRaw struct { + Contract *VRFMockETHPLIAggregatorCaller +} + +type VRFMockETHPLIAggregatorTransactorRaw struct { + Contract *VRFMockETHPLIAggregatorTransactor +} + +func NewVRFMockETHPLIAggregator(address common.Address, backend bind.ContractBackend) (*VRFMockETHPLIAggregator, error) { + abi, err := abi.JSON(strings.NewReader(VRFMockETHPLIAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFMockETHPLIAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFMockETHPLIAggregator{address: address, abi: abi, VRFMockETHPLIAggregatorCaller: VRFMockETHPLIAggregatorCaller{contract: contract}, VRFMockETHPLIAggregatorTransactor: VRFMockETHPLIAggregatorTransactor{contract: contract}, VRFMockETHPLIAggregatorFilterer: VRFMockETHPLIAggregatorFilterer{contract: contract}}, nil +} + +func NewVRFMockETHPLIAggregatorCaller(address common.Address, caller bind.ContractCaller) (*VRFMockETHPLIAggregatorCaller, error) { + contract, err := bindVRFMockETHPLIAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFMockETHPLIAggregatorCaller{contract: contract}, nil +} + +func NewVRFMockETHPLIAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFMockETHPLIAggregatorTransactor, error) { + contract, err := bindVRFMockETHPLIAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFMockETHPLIAggregatorTransactor{contract: contract}, nil +} + +func NewVRFMockETHPLIAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFMockETHPLIAggregatorFilterer, error) { + contract, err := bindVRFMockETHPLIAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFMockETHPLIAggregatorFilterer{contract: contract}, nil +} + +func bindVRFMockETHPLIAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFMockETHPLIAggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMockETHPLIAggregator.Contract.VRFMockETHPLIAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.VRFMockETHPLIAggregatorTransactor.contract.Transfer(opts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.VRFMockETHPLIAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFMockETHPLIAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.contract.Transfer(opts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) Answer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "answer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) Answer() (*big.Int, error) { + return _VRFMockETHPLIAggregator.Contract.Answer(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) Answer() (*big.Int, error) { + return _VRFMockETHPLIAggregator.Contract.Answer(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) Decimals() (uint8, error) { + return _VRFMockETHPLIAggregator.Contract.Decimals(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) Decimals() (uint8, error) { + return _VRFMockETHPLIAggregator.Contract.Decimals(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) Description() (string, error) { + return _VRFMockETHPLIAggregator.Contract.Description(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) Description() (string, error) { + return _VRFMockETHPLIAggregator.Contract.Description(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Ans = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _VRFMockETHPLIAggregator.Contract.GetRoundData(&_VRFMockETHPLIAggregator.CallOpts, _roundId) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _VRFMockETHPLIAggregator.Contract.GetRoundData(&_VRFMockETHPLIAggregator.CallOpts, _roundId) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Ans = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _VRFMockETHPLIAggregator.Contract.LatestRoundData(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _VRFMockETHPLIAggregator.Contract.LatestRoundData(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFMockETHPLIAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) Version() (*big.Int, error) { + return _VRFMockETHPLIAggregator.Contract.Version(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorCallerSession) Version() (*big.Int, error) { + return _VRFMockETHPLIAggregator.Contract.Version(&_VRFMockETHPLIAggregator.CallOpts) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorTransactor) SetBlockTimestampDeduction(opts *bind.TransactOpts, _blockTimestampDeduction *big.Int) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.contract.Transact(opts, "setBlockTimestampDeduction", _blockTimestampDeduction) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorSession) SetBlockTimestampDeduction(_blockTimestampDeduction *big.Int) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.SetBlockTimestampDeduction(&_VRFMockETHPLIAggregator.TransactOpts, _blockTimestampDeduction) +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregatorTransactorSession) SetBlockTimestampDeduction(_blockTimestampDeduction *big.Int) (*types.Transaction, error) { + return _VRFMockETHPLIAggregator.Contract.SetBlockTimestampDeduction(&_VRFMockETHPLIAggregator.TransactOpts, _blockTimestampDeduction) +} + +type GetRoundData struct { + RoundId *big.Int + Ans *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestRoundData struct { + RoundId *big.Int + Ans *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +func (_VRFMockETHPLIAggregator *VRFMockETHPLIAggregator) Address() common.Address { + return _VRFMockETHPLIAggregator.address +} + +type VRFMockETHPLIAggregatorInterface interface { + Answer(opts *bind.CallOpts) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + SetBlockTimestampDeduction(opts *bind.TransactOpts, _blockTimestampDeduction *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_owner/vrf_owner.go b/core/gethwrappers/generated/vrf_owner/vrf_owner.go new file mode 100644 index 00000000..f0d9592a --- /dev/null +++ b/core/gethwrappers/generated/vrf_owner/vrf_owner.go @@ -0,0 +1,1055 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_owner + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type FeeConfig struct { + FulfillmentFlatFeeLinkPPMTier1 uint32 + FulfillmentFlatFeeLinkPPMTier2 uint32 + FulfillmentFlatFeeLinkPPMTier3 uint32 + FulfillmentFlatFeeLinkPPMTier4 uint32 + FulfillmentFlatFeeLinkPPMTier5 uint32 + ReqsForTier2 *big.Int + ReqsForTier3 *big.Int + ReqsForTier4 *big.Int + ReqsForTier5 *big.Int +} + +type VRFTypesProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFTypesRequestCommitment struct { + BlockNum uint64 + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +var VRFOwnerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EmptySendersList\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotAllowedToSetSenders\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnauthorizedSender\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"changedBy\",\"type\":\"address\"}],\"name\":\"AuthorizedSendersChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsForced\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptVRFOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"deregisterProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRFTypes.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"internalType\":\"structVRFTypes.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAuthorizedSenders\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getVRFCoordinator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"isAuthorizedSender\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"oracle\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"senders\",\"type\":\"address[]\"}],\"name\":\"setAuthorizedSenders\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier1\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier2\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier3\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier4\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPMTier5\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier2\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier3\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier4\",\"type\":\"uint24\"},{\"internalType\":\"uint24\",\"name\":\"reqsForTier5\",\"type\":\"uint24\"}],\"internalType\":\"structFeeConfig\",\"name\":\"feeConfig\",\"type\":\"tuple\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferVRFOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162002007380380620020078339810160408190526200003491620001fc565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be8162000150565b5050506001600160a01b0381166200012a5760405162461bcd60e51b815260206004820152602860248201527f76726620636f6f7264696e61746f722061646472657373206d757374206265206044820152676e6f6e2d7a65726f60c01b606482015260840162000082565b600580546001600160a01b0319166001600160a01b03929092169190911790556200022e565b6001600160a01b038116331415620001ab5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156200020f57600080fd5b81516001600160a01b03811681146200022757600080fd5b9392505050565b611dc9806200023e6000396000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c8063984e804711610097578063e72f6e3011610066578063e72f6e30146101f9578063ee56997b1461020c578063f2fde38b1461021f578063fa00763a1461023257600080fd5b8063984e8047146101ad578063a378f371146101b5578063af198b97146101d3578063c2df03e4146101e657600080fd5b80634cb48a54116100d35780634cb48a54146101405780636f64f03f1461015357806379ba5097146101665780638da5cb5b1461016e57600080fd5b806302bcc5b6146100fa57806308821d581461010f5780632408afaa14610122575b600080fd5b61010d6101083660046118f6565b610255565b005b61010d61011d366004611591565b6102ee565b61012a61034c565b6040516101379190611a0a565b60405180910390f35b61010d61014e3660046116ed565b6103bb565b61010d6101613660046114e8565b61045d565b61010d6104f3565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610137565b61010d6105f5565b60055473ffffffffffffffffffffffffffffffffffffffff16610188565b61010d6101e13660046115c6565b610681565b61010d6101f43660046114cd565b610859565b61010d6102073660046114cd565b6108b9565b61010d61021a36600461151c565b610919565b61010d61022d3660046114cd565b610a8c565b6102456102403660046114cd565b610aa0565b6040519015158152602001610137565b61025d610ab3565b6005546040517f02bcc5b600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8316600482015273ffffffffffffffffffffffffffffffffffffffff909116906302bcc5b6906024015b600060405180830381600087803b1580156102d357600080fd5b505af11580156102e7573d6000803e3d6000fd5b5050505050565b6102f6610ab3565b6005546040517f08821d5800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff909116906308821d58906102b9908490600401611a64565b606060048054806020026020016040519081016040528092919081815260200182805480156103b157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610386575b5050505050905090565b6103c3610ab3565b6005546040517f4cb48a5400000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690634cb48a549061042390899089908990899089908990600401611b82565b600060405180830381600087803b15801561043d57600080fd5b505af1158015610451573d6000803e3d6000fd5b50505050505050505050565b610465610ab3565b6005546040517f6f64f03f00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690636f64f03f906104bd9085908590600401611962565b600060405180830381600087803b1580156104d757600080fd5b505af11580156104eb573d6000803e3d6000fd5b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610579576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6105fd610ab3565b600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166379ba50976040518163ffffffff1660e01b8152600401600060405180830381600087803b15801561066757600080fd5b505af115801561067b573d6000803e3d6000fd5b50505050565b610689610b36565b600061069d83600001518460800151610b75565b905060006106a9610c7f565b805160208083015160408051610120810182526000808252938101849052908101839052606081018390526080810183905260a0810183905260c0810183905260e08101839052610100810183905293945061072b9390916001917f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff906103c3565b6005546040517faf198b9700000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063af198b97906107839087908790600401611a85565b602060405180830381600087803b15801561079d57600080fd5b505af11580156107b1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107d59190611911565b506107fc816000015182602001518360400151846060015185608001518660a001516103c3565b826080015173ffffffffffffffffffffffffffffffffffffffff16836020015167ffffffffffffffff16837fabbcd646b939d78de3053d035798eb5c9818ea1836a2fbdbad335331df51e01d60405160405180910390a450505050565b610861610ab3565b6005546040517ff2fde38b00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301529091169063f2fde38b906024016102b9565b6108c1610ab3565b6005546040517fe72f6e3000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301529091169063e72f6e30906024016102b9565b610921610fcb565b610957576040517fad77f06100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8061098e576040517f75158c3b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b6004548110156109ee576109db600482815481106109b1576109b1611d2b565b60009182526020909120015460029073ffffffffffffffffffffffffffffffffffffffff16611009565b50806109e681611c94565b915050610991565b5060005b81811015610a3f57610a2c838383818110610a0f57610a0f611d2b565b9050602002016020810190610a2491906114cd565b600290611032565b5080610a3781611c94565b9150506109f2565b50610a4c600483836112bb565b507ff263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0828233604051610a8093929190611992565b60405180910390a15050565b610a94610ab3565b610a9d81611054565b50565b6000610aad60028361114a565b92915050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610b34576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610570565b565b610b3f33610aa0565b610b34576040517f0809490800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005546040517fcaf70c4a000000000000000000000000000000000000000000000000000000008152600091829173ffffffffffffffffffffffffffffffffffffffff9091169063caf70c4a90610bd0908790600401611a77565b60206040518083038186803b158015610be857600080fd5b505afa158015610bfc573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c2091906115ad565b905060008184604051602001610c40929190918252602082015260400190565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018152919052805160209091012095945050505050565b6040805160c080820183526000808352602080840182905283850182905260608085018390526080808601849052865161012081018852848152928301849052958201839052810182905293840181905260a080850182905291840181905260e08401819052610100840152810191909152600080600080600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663c3f909d46040518163ffffffff1660e01b815260040160806040518083038186803b158015610d5f57600080fd5b505afa158015610d73573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d97919061168e565b93509350935093506000806000806000806000806000600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16635fbbc0d26040518163ffffffff1660e01b81526004016101206040518083038186803b158015610e1657600080fd5b505afa158015610e2a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e4e9190611832565b9850985098509850985098509850985098506000600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663356dac716040518163ffffffff1660e01b815260040160206040518083038186803b158015610eca57600080fd5b505afa158015610ede573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f0291906115ad565b90506040518060c001604052808f61ffff1681526020018e63ffffffff1681526020018d63ffffffff1681526020018c63ffffffff1681526020018281526020016040518061012001604052808d63ffffffff1681526020018c63ffffffff1681526020018b63ffffffff1681526020018a63ffffffff1681526020018963ffffffff1681526020018862ffffff1681526020018762ffffff1681526020018662ffffff1681526020018562ffffff168152508152509e50505050505050505050505050505090565b600033610fed60005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff1614905090565b600061102b8373ffffffffffffffffffffffffffffffffffffffff8416611179565b9392505050565b600061102b8373ffffffffffffffffffffffffffffffffffffffff841661126c565b73ffffffffffffffffffffffffffffffffffffffff81163314156110d4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610570565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600183016020526040812054151561102b565b6000818152600183016020526040812054801561126257600061119d600183611c7d565b85549091506000906111b190600190611c7d565b90508181146112165760008660000182815481106111d1576111d1611d2b565b90600052602060002001549050808760000184815481106111f4576111f4611d2b565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061122757611227611cfc565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050610aad565b6000915050610aad565b60008181526001830160205260408120546112b357508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610aad565b506000610aad565b828054828255906000526020600020908101928215611333579160200282015b828111156113335781547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8435161782556020909201916001909101906112db565b5061133f929150611343565b5090565b5b8082111561133f5760008155600101611344565b803573ffffffffffffffffffffffffffffffffffffffff8116811461137c57600080fd5b919050565b8060408101831015610aad57600080fd5b600082601f8301126113a357600080fd5b6040516040810181811067ffffffffffffffff821117156113c6576113c6611d5a565b80604052508083856040860111156113dd57600080fd5b60005b60028110156113ff5781358352602092830192909101906001016113e0565b509195945050505050565b600060a0828403121561141c57600080fd5b60405160a0810181811067ffffffffffffffff8211171561143f5761143f611d5a565b60405290508061144e836114b5565b815261145c602084016114b5565b6020820152604083013561146f81611daa565b6040820152606083013561148281611daa565b606082015261149360808401611358565b60808201525092915050565b803561137c81611d99565b803561137c81611daa565b803567ffffffffffffffff8116811461137c57600080fd5b6000602082840312156114df57600080fd5b61102b82611358565b600080606083850312156114fb57600080fd5b61150483611358565b91506115138460208501611381565b90509250929050565b6000806020838503121561152f57600080fd5b823567ffffffffffffffff8082111561154757600080fd5b818501915085601f83011261155b57600080fd5b81358181111561156a57600080fd5b8660208260051b850101111561157f57600080fd5b60209290920196919550909350505050565b6000604082840312156115a357600080fd5b61102b8383611381565b6000602082840312156115bf57600080fd5b5051919050565b6000808284036102408112156115db57600080fd5b6101a0808212156115eb57600080fd5b6115f3611c53565b91506115ff8686611392565b825261160e8660408701611392565b60208301526080850135604083015260a0850135606083015260c0850135608083015261163d60e08601611358565b60a083015261010061165187828801611392565b60c0840152611664876101408801611392565b60e084015261018086013581840152508193506116838682870161140a565b925050509250929050565b600080600080608085870312156116a457600080fd5b84516116af81611d89565b60208601519094506116c081611daa565b60408601519093506116d181611daa565b60608601519092506116e281611daa565b939692955090935050565b6000806000806000808688036101c081121561170857600080fd5b873561171381611d89565b9650602088013561172381611daa565b9550604088013561173381611daa565b9450606088013561174381611daa565b9350608088013592506101207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60820181131561177e57600080fd5b611786611c53565b915061179460a08a016114aa565b82526117a260c08a016114aa565b60208301526117b360e08a016114aa565b60408301526101006117c6818b016114aa565b60608401526117d6828b016114aa565b60808401526117e86101408b0161149f565b60a08401526117fa6101608b0161149f565b60c084015261180c6101808b0161149f565b60e084015261181e6101a08b0161149f565b818401525050809150509295509295509295565b60008060008060008060008060006101208a8c03121561185157600080fd5b895161185c81611daa565b60208b015190995061186d81611daa565b60408b015190985061187e81611daa565b60608b015190975061188f81611daa565b60808b01519096506118a081611daa565b60a08b01519095506118b181611d99565b60c08b01519094506118c281611d99565b60e08b01519093506118d381611d99565b6101008b01519092506118e581611d99565b809150509295985092959850929598565b60006020828403121561190857600080fd5b61102b826114b5565b60006020828403121561192357600080fd5b81516bffffffffffffffffffffffff8116811461102b57600080fd5b8060005b600281101561067b578151845260209384019390910190600101611943565b73ffffffffffffffffffffffffffffffffffffffff83168152606081016040836020840137600081529392505050565b6040808252810183905260008460608301825b868110156119e05773ffffffffffffffffffffffffffffffffffffffff6119cb84611358565b168252602092830192909101906001016119a5565b50809250505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b6020808252825182820181905260009190848201906040850190845b81811015611a5857835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101611a26565b50909695505050505050565b6040818101908383376000815292915050565b60408101610aad828461193f565b600061024082019050611a9982855161193f565b6020840151611aab604084018261193f565b5060408401516080830152606084015160a0830152608084015160c083015273ffffffffffffffffffffffffffffffffffffffff60a08501511660e083015260c0840151610100611afe8185018361193f565b60e08601519150611b1361014085018361193f565b85015161018084015250825167ffffffffffffffff9081166101a08401526020840151166101c0830152604083015163ffffffff9081166101e0840152606084015116610200830152608083015173ffffffffffffffffffffffffffffffffffffffff1661022083015261102b565b60006101c08201905061ffff8816825263ffffffff8088166020840152808716604084015280861660608401528460808401528084511660a08401528060208501511660c0840152506040830151611be260e084018263ffffffff169052565b506060830151610100611bfc8185018363ffffffff169052565b608085015163ffffffff1661012085015260a085015162ffffff90811661014086015260c0860151811661016086015260e086015181166101808601529401519093166101a0909201919091529695505050505050565b604051610120810167ffffffffffffffff81118282101715611c7757611c77611d5a565b60405290565b600082821015611c8f57611c8f611ccd565b500390565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611cc657611cc6611ccd565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61ffff81168114610a9d57600080fd5b62ffffff81168114610a9d57600080fd5b63ffffffff81168114610a9d57600080fdfea164736f6c6343000806000a", +} + +var VRFOwnerABI = VRFOwnerMetaData.ABI + +var VRFOwnerBin = VRFOwnerMetaData.Bin + +func DeployVRFOwner(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address) (common.Address, *types.Transaction, *VRFOwner, error) { + parsed, err := VRFOwnerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFOwnerBin), backend, _vrfCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFOwner{address: address, abi: *parsed, VRFOwnerCaller: VRFOwnerCaller{contract: contract}, VRFOwnerTransactor: VRFOwnerTransactor{contract: contract}, VRFOwnerFilterer: VRFOwnerFilterer{contract: contract}}, nil +} + +type VRFOwner struct { + address common.Address + abi abi.ABI + VRFOwnerCaller + VRFOwnerTransactor + VRFOwnerFilterer +} + +type VRFOwnerCaller struct { + contract *bind.BoundContract +} + +type VRFOwnerTransactor struct { + contract *bind.BoundContract +} + +type VRFOwnerFilterer struct { + contract *bind.BoundContract +} + +type VRFOwnerSession struct { + Contract *VRFOwner + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFOwnerCallerSession struct { + Contract *VRFOwnerCaller + CallOpts bind.CallOpts +} + +type VRFOwnerTransactorSession struct { + Contract *VRFOwnerTransactor + TransactOpts bind.TransactOpts +} + +type VRFOwnerRaw struct { + Contract *VRFOwner +} + +type VRFOwnerCallerRaw struct { + Contract *VRFOwnerCaller +} + +type VRFOwnerTransactorRaw struct { + Contract *VRFOwnerTransactor +} + +func NewVRFOwner(address common.Address, backend bind.ContractBackend) (*VRFOwner, error) { + abi, err := abi.JSON(strings.NewReader(VRFOwnerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFOwner(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFOwner{address: address, abi: abi, VRFOwnerCaller: VRFOwnerCaller{contract: contract}, VRFOwnerTransactor: VRFOwnerTransactor{contract: contract}, VRFOwnerFilterer: VRFOwnerFilterer{contract: contract}}, nil +} + +func NewVRFOwnerCaller(address common.Address, caller bind.ContractCaller) (*VRFOwnerCaller, error) { + contract, err := bindVRFOwner(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFOwnerCaller{contract: contract}, nil +} + +func NewVRFOwnerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFOwnerTransactor, error) { + contract, err := bindVRFOwner(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFOwnerTransactor{contract: contract}, nil +} + +func NewVRFOwnerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFOwnerFilterer, error) { + contract, err := bindVRFOwner(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFOwnerFilterer{contract: contract}, nil +} + +func bindVRFOwner(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFOwnerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFOwner *VRFOwnerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFOwner.Contract.VRFOwnerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFOwner *VRFOwnerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwner.Contract.VRFOwnerTransactor.contract.Transfer(opts) +} + +func (_VRFOwner *VRFOwnerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFOwner.Contract.VRFOwnerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFOwner *VRFOwnerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFOwner.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFOwner *VRFOwnerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwner.Contract.contract.Transfer(opts) +} + +func (_VRFOwner *VRFOwnerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFOwner.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFOwner *VRFOwnerCaller) GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _VRFOwner.contract.Call(opts, &out, "getAuthorizedSenders") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_VRFOwner *VRFOwnerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _VRFOwner.Contract.GetAuthorizedSenders(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerCallerSession) GetAuthorizedSenders() ([]common.Address, error) { + return _VRFOwner.Contract.GetAuthorizedSenders(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerCaller) GetVRFCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFOwner.contract.Call(opts, &out, "getVRFCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFOwner *VRFOwnerSession) GetVRFCoordinator() (common.Address, error) { + return _VRFOwner.Contract.GetVRFCoordinator(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerCallerSession) GetVRFCoordinator() (common.Address, error) { + return _VRFOwner.Contract.GetVRFCoordinator(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerCaller) IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) { + var out []interface{} + err := _VRFOwner.contract.Call(opts, &out, "isAuthorizedSender", sender) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFOwner *VRFOwnerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _VRFOwner.Contract.IsAuthorizedSender(&_VRFOwner.CallOpts, sender) +} + +func (_VRFOwner *VRFOwnerCallerSession) IsAuthorizedSender(sender common.Address) (bool, error) { + return _VRFOwner.Contract.IsAuthorizedSender(&_VRFOwner.CallOpts, sender) +} + +func (_VRFOwner *VRFOwnerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFOwner.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFOwner *VRFOwnerSession) Owner() (common.Address, error) { + return _VRFOwner.Contract.Owner(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerCallerSession) Owner() (common.Address, error) { + return _VRFOwner.Contract.Owner(&_VRFOwner.CallOpts) +} + +func (_VRFOwner *VRFOwnerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFOwner *VRFOwnerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFOwner.Contract.AcceptOwnership(&_VRFOwner.TransactOpts) +} + +func (_VRFOwner *VRFOwnerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFOwner.Contract.AcceptOwnership(&_VRFOwner.TransactOpts) +} + +func (_VRFOwner *VRFOwnerTransactor) AcceptVRFOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "acceptVRFOwnership") +} + +func (_VRFOwner *VRFOwnerSession) AcceptVRFOwnership() (*types.Transaction, error) { + return _VRFOwner.Contract.AcceptVRFOwnership(&_VRFOwner.TransactOpts) +} + +func (_VRFOwner *VRFOwnerTransactorSession) AcceptVRFOwnership() (*types.Transaction, error) { + return _VRFOwner.Contract.AcceptVRFOwnership(&_VRFOwner.TransactOpts) +} + +func (_VRFOwner *VRFOwnerTransactor) DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "deregisterProvingKey", publicProvingKey) +} + +func (_VRFOwner *VRFOwnerSession) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.Contract.DeregisterProvingKey(&_VRFOwner.TransactOpts, publicProvingKey) +} + +func (_VRFOwner *VRFOwnerTransactorSession) DeregisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.Contract.DeregisterProvingKey(&_VRFOwner.TransactOpts, publicProvingKey) +} + +func (_VRFOwner *VRFOwnerTransactor) FulfillRandomWords(opts *bind.TransactOpts, proof VRFTypesProof, rc VRFTypesRequestCommitment) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "fulfillRandomWords", proof, rc) +} + +func (_VRFOwner *VRFOwnerSession) FulfillRandomWords(proof VRFTypesProof, rc VRFTypesRequestCommitment) (*types.Transaction, error) { + return _VRFOwner.Contract.FulfillRandomWords(&_VRFOwner.TransactOpts, proof, rc) +} + +func (_VRFOwner *VRFOwnerTransactorSession) FulfillRandomWords(proof VRFTypesProof, rc VRFTypesRequestCommitment) (*types.Transaction, error) { + return _VRFOwner.Contract.FulfillRandomWords(&_VRFOwner.TransactOpts, proof, rc) +} + +func (_VRFOwner *VRFOwnerTransactor) OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "ownerCancelSubscription", subId) +} + +func (_VRFOwner *VRFOwnerSession) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFOwner.Contract.OwnerCancelSubscription(&_VRFOwner.TransactOpts, subId) +} + +func (_VRFOwner *VRFOwnerTransactorSession) OwnerCancelSubscription(subId uint64) (*types.Transaction, error) { + return _VRFOwner.Contract.OwnerCancelSubscription(&_VRFOwner.TransactOpts, subId) +} + +func (_VRFOwner *VRFOwnerTransactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "recoverFunds", to) +} + +func (_VRFOwner *VRFOwnerSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.RecoverFunds(&_VRFOwner.TransactOpts, to) +} + +func (_VRFOwner *VRFOwnerTransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.RecoverFunds(&_VRFOwner.TransactOpts, to) +} + +func (_VRFOwner *VRFOwnerTransactor) RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "registerProvingKey", oracle, publicProvingKey) +} + +func (_VRFOwner *VRFOwnerSession) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.Contract.RegisterProvingKey(&_VRFOwner.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFOwner *VRFOwnerTransactorSession) RegisterProvingKey(oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFOwner.Contract.RegisterProvingKey(&_VRFOwner.TransactOpts, oracle, publicProvingKey) +} + +func (_VRFOwner *VRFOwnerTransactor) SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "setAuthorizedSenders", senders) +} + +func (_VRFOwner *VRFOwnerSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.SetAuthorizedSenders(&_VRFOwner.TransactOpts, senders) +} + +func (_VRFOwner *VRFOwnerTransactorSession) SetAuthorizedSenders(senders []common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.SetAuthorizedSenders(&_VRFOwner.TransactOpts, senders) +} + +func (_VRFOwner *VRFOwnerTransactor) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig FeeConfig) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "setConfig", minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFOwner *VRFOwnerSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig FeeConfig) (*types.Transaction, error) { + return _VRFOwner.Contract.SetConfig(&_VRFOwner.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFOwner *VRFOwnerTransactorSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig FeeConfig) (*types.Transaction, error) { + return _VRFOwner.Contract.SetConfig(&_VRFOwner.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) +} + +func (_VRFOwner *VRFOwnerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFOwner *VRFOwnerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.TransferOwnership(&_VRFOwner.TransactOpts, to) +} + +func (_VRFOwner *VRFOwnerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.TransferOwnership(&_VRFOwner.TransactOpts, to) +} + +func (_VRFOwner *VRFOwnerTransactor) TransferVRFOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFOwner.contract.Transact(opts, "transferVRFOwnership", to) +} + +func (_VRFOwner *VRFOwnerSession) TransferVRFOwnership(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.TransferVRFOwnership(&_VRFOwner.TransactOpts, to) +} + +func (_VRFOwner *VRFOwnerTransactorSession) TransferVRFOwnership(to common.Address) (*types.Transaction, error) { + return _VRFOwner.Contract.TransferVRFOwnership(&_VRFOwner.TransactOpts, to) +} + +type VRFOwnerAuthorizedSendersChangedIterator struct { + Event *VRFOwnerAuthorizedSendersChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFOwnerAuthorizedSendersChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFOwnerAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFOwnerAuthorizedSendersChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFOwnerAuthorizedSendersChangedIterator) Error() error { + return it.fail +} + +func (it *VRFOwnerAuthorizedSendersChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFOwnerAuthorizedSendersChanged struct { + Senders []common.Address + ChangedBy common.Address + Raw types.Log +} + +func (_VRFOwner *VRFOwnerFilterer) FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*VRFOwnerAuthorizedSendersChangedIterator, error) { + + logs, sub, err := _VRFOwner.contract.FilterLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return &VRFOwnerAuthorizedSendersChangedIterator{contract: _VRFOwner.contract, event: "AuthorizedSendersChanged", logs: logs, sub: sub}, nil +} + +func (_VRFOwner *VRFOwnerFilterer) WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *VRFOwnerAuthorizedSendersChanged) (event.Subscription, error) { + + logs, sub, err := _VRFOwner.contract.WatchLogs(opts, "AuthorizedSendersChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFOwnerAuthorizedSendersChanged) + if err := _VRFOwner.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFOwner *VRFOwnerFilterer) ParseAuthorizedSendersChanged(log types.Log) (*VRFOwnerAuthorizedSendersChanged, error) { + event := new(VRFOwnerAuthorizedSendersChanged) + if err := _VRFOwner.contract.UnpackLog(event, "AuthorizedSendersChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFOwnerOwnershipTransferRequestedIterator struct { + Event *VRFOwnerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFOwnerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFOwnerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFOwnerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFOwnerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFOwnerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFOwnerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFOwner *VRFOwnerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFOwnerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFOwner.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFOwnerOwnershipTransferRequestedIterator{contract: _VRFOwner.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFOwner *VRFOwnerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFOwnerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFOwner.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFOwnerOwnershipTransferRequested) + if err := _VRFOwner.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFOwner *VRFOwnerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFOwnerOwnershipTransferRequested, error) { + event := new(VRFOwnerOwnershipTransferRequested) + if err := _VRFOwner.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFOwnerOwnershipTransferredIterator struct { + Event *VRFOwnerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFOwnerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFOwnerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFOwnerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFOwnerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFOwnerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFOwnerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFOwner *VRFOwnerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFOwnerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFOwner.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFOwnerOwnershipTransferredIterator{contract: _VRFOwner.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFOwner *VRFOwnerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFOwnerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFOwner.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFOwnerOwnershipTransferred) + if err := _VRFOwner.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFOwner *VRFOwnerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFOwnerOwnershipTransferred, error) { + event := new(VRFOwnerOwnershipTransferred) + if err := _VRFOwner.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFOwnerRandomWordsForcedIterator struct { + Event *VRFOwnerRandomWordsForced + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFOwnerRandomWordsForcedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFOwnerRandomWordsForced) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFOwnerRandomWordsForced) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFOwnerRandomWordsForcedIterator) Error() error { + return it.fail +} + +func (it *VRFOwnerRandomWordsForcedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFOwnerRandomWordsForced struct { + RequestId *big.Int + SubId uint64 + Sender common.Address + Raw types.Log +} + +func (_VRFOwner *VRFOwnerFilterer) FilterRandomWordsForced(opts *bind.FilterOpts, requestId []*big.Int, subId []uint64, sender []common.Address) (*VRFOwnerRandomWordsForcedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFOwner.contract.FilterLogs(opts, "RandomWordsForced", requestIdRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFOwnerRandomWordsForcedIterator{contract: _VRFOwner.contract, event: "RandomWordsForced", logs: logs, sub: sub}, nil +} + +func (_VRFOwner *VRFOwnerFilterer) WatchRandomWordsForced(opts *bind.WatchOpts, sink chan<- *VRFOwnerRandomWordsForced, requestId []*big.Int, subId []uint64, sender []common.Address) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFOwner.contract.WatchLogs(opts, "RandomWordsForced", requestIdRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFOwnerRandomWordsForced) + if err := _VRFOwner.contract.UnpackLog(event, "RandomWordsForced", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFOwner *VRFOwnerFilterer) ParseRandomWordsForced(log types.Log) (*VRFOwnerRandomWordsForced, error) { + event := new(VRFOwnerRandomWordsForced) + if err := _VRFOwner.contract.UnpackLog(event, "RandomWordsForced", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFOwner *VRFOwner) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFOwner.abi.Events["AuthorizedSendersChanged"].ID: + return _VRFOwner.ParseAuthorizedSendersChanged(log) + case _VRFOwner.abi.Events["OwnershipTransferRequested"].ID: + return _VRFOwner.ParseOwnershipTransferRequested(log) + case _VRFOwner.abi.Events["OwnershipTransferred"].ID: + return _VRFOwner.ParseOwnershipTransferred(log) + case _VRFOwner.abi.Events["RandomWordsForced"].ID: + return _VRFOwner.ParseRandomWordsForced(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFOwnerAuthorizedSendersChanged) Topic() common.Hash { + return common.HexToHash("0xf263cfb3e4298332e776194610cf9fdc09ccb3ada8b9aa39764d882e11fbf0a0") +} + +func (VRFOwnerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFOwnerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFOwnerRandomWordsForced) Topic() common.Hash { + return common.HexToHash("0xabbcd646b939d78de3053d035798eb5c9818ea1836a2fbdbad335331df51e01d") +} + +func (_VRFOwner *VRFOwner) Address() common.Address { + return _VRFOwner.address +} + +type VRFOwnerInterface interface { + GetAuthorizedSenders(opts *bind.CallOpts) ([]common.Address, error) + + GetVRFCoordinator(opts *bind.CallOpts) (common.Address, error) + + IsAuthorizedSender(opts *bind.CallOpts, sender common.Address) (bool, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptVRFOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof VRFTypesProof, rc VRFTypesRequestCommitment) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + SetAuthorizedSenders(opts *bind.TransactOpts, senders []common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig FeeConfig) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferVRFOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterAuthorizedSendersChanged(opts *bind.FilterOpts) (*VRFOwnerAuthorizedSendersChangedIterator, error) + + WatchAuthorizedSendersChanged(opts *bind.WatchOpts, sink chan<- *VRFOwnerAuthorizedSendersChanged) (event.Subscription, error) + + ParseAuthorizedSendersChanged(log types.Log) (*VRFOwnerAuthorizedSendersChanged, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFOwnerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFOwnerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFOwnerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFOwnerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFOwnerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFOwnerOwnershipTransferred, error) + + FilterRandomWordsForced(opts *bind.FilterOpts, requestId []*big.Int, subId []uint64, sender []common.Address) (*VRFOwnerRandomWordsForcedIterator, error) + + WatchRandomWordsForced(opts *bind.WatchOpts, sink chan<- *VRFOwnerRandomWordsForced, requestId []*big.Int, subId []uint64, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsForced(log types.Log) (*VRFOwnerRandomWordsForced, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_owner_test_consumer/vrf_owner_test_consumer.go b/core/gethwrappers/generated/vrf_owner_test_consumer/vrf_owner_test_consumer.go new file mode 100644 index 00000000..738cc840 --- /dev/null +++ b/core/gethwrappers/generated/vrf_owner_test_consumer/vrf_owner_test_consumer.go @@ -0,0 +1,1032 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_owner_test_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2OwnerTestConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCreatedFundedAndConsumerAdded\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLITOKEN\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"_subTopUpAmount\",\"type\":\"uint256\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a0604052600060065560006007556103e76008553480156200002157600080fd5b50604051620016ee380380620016ee8339810160408190526200004491620001e2565b6001600160601b0319606083901b166080523380600081620000ad5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000e057620000e08162000119565b5050600280546001600160a01b039485166001600160a01b0319918216179091556003805493909416921691909117909155506200021a565b6001600160a01b038116331415620001745760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000a4565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001dd57600080fd5b919050565b60008060408385031215620001f657600080fd5b6200020183620001c5565b91506200021160208401620001c5565b90509250929050565b60805160601c6114ae620002406000396000818161036901526103d101526114ae6000f3fe608060405234801561001057600080fd5b50600436106101365760003560e01c80638da5cb5b116100b2578063d8a4676f11610081578063eb1d28bb11610066578063eb1d28bb146102e6578063f2fde38b1461032b578063f82d24381461033e57600080fd5b8063d8a4676f146102b8578063dc1670db146102dd57600080fd5b80638da5cb5b14610207578063a168fa8914610225578063b1e2174914610290578063d826f88f1461029957600080fd5b8063557d2e921161010957806374dba124116100ee57806374dba124146101e357806379ba5097146101ec57806386850e93146101f457600080fd5b8063557d2e92146101d1578063737144bc146101da57600080fd5b80631757f11c1461013b5780631fe543e3146101575780633b2bcbf11461016c57806355380dfb146101b1575b600080fd5b61014460075481565b6040519081526020015b60405180910390f35b61016a610165366004611124565b610351565b005b60025461018c9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161014e565b60035461018c9073ffffffffffffffffffffffffffffffffffffffff1681565b61014460055481565b61014460065481565b61014460085481565b61016a610411565b61016a6102023660046110f2565b61050e565b60005473ffffffffffffffffffffffffffffffffffffffff1661018c565b6102666102333660046110f2565b600b602052600090815260409020805460028201546003830154600484015460059094015460ff90931693919290919085565b6040805195151586526020860194909452928401919091526060830152608082015260a00161014e565b61014460095481565b61016a6000600681905560078190556103e76008556005819055600455565b6102cb6102c63660046110f2565b6105ea565b60405161014e969594939291906112d5565b61014460045481565b6003546103129074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff909116815260200161014e565b61016a61033936600461102d565b6106cf565b61016a61034c36600461108c565b6106e3565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610403576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b61040d8282610c3e565b5050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610492576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016103fa565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610516610d65565b6003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591016040516020818303038152906040526040518463ffffffff1660e01b81526004016105989392919061123d565b602060405180830381600087803b1580156105b257600080fd5b505af11580156105c6573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061040d919061106a565b6000818152600b60209081526040808320815160c081018352815460ff161515815260018201805484518187028101870190955280855260609587958695869586958695919492938584019390929083018282801561066857602002820191906000526020600020905b815481526020019060010190808311610654575b505050505081526020016002820154815260200160038201548152602001600482015481526020016005820154815250509050806000015181602001518260400151836060015184608001518560a001519650965096509650965096505091939550919395565b6106d7610d65565b6106e081610de8565b50565b6106eb610d65565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561075557600080fd5b505af1158015610769573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061078d9190611213565b600380547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff938416810291909117918290556002546040517f7341c10c00000000000000000000000000000000000000000000000000000000815291909204909216600483015230602483015273ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b15801561085457600080fd5b505af1158015610868573d6000803e3d6000fd5b505050506108758161050e565b600354604080517401000000000000000000000000000000000000000090920467ffffffffffffffff16825230602083015281018290527f56c142509574e8340ca0190b029c74464b84037d2876278ea0ade3ffb1f0042c9060600160405180910390a160005b8261ffff168161ffff161015610ad9576002546003546040517f5d3b1d30000000000000000000000000000000000000000000000000000000008152600481018990527401000000000000000000000000000000000000000090910467ffffffffffffffff16602482015261ffff8916604482015263ffffffff80881660648301528616608482015260009173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b1580156109a257600080fd5b505af11580156109b6573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109da919061110b565b6009819055905060006109eb610ede565b6040805160c08101825260008082528251818152602080820185528084019182524284860152606084018390526080840186905260a08401839052878352600b815293909120825181547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169015151781559051805194955091939092610a79926001850192910190610fa2565b5060408201516002820155606082015160038201556080820151600482015560a0909101516005918201558054906000610ab28361140a565b90915550506000918252600a60205260409091205580610ad1816113e8565b9150506108dc565b506002546003546040517f9f87fad70000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015230602482015273ffffffffffffffffffffffffffffffffffffffff90911690639f87fad790604401600060405180830381600087803b158015610b7057600080fd5b505af1158015610b84573d6000803e3d6000fd5b50506002546003546040517fd7ae1d300000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015233602482015273ffffffffffffffffffffffffffffffffffffffff909116925063d7ae1d309150604401600060405180830381600087803b158015610c1e57600080fd5b505af1158015610c32573d6000803e3d6000fd5b50505050505050505050565b6000610c48610ede565b6000848152600a602052604081205491925090610c6590836113d1565b90506000610c7682620f4240611394565b9050600754821115610c885760078290555b6008548210610c9957600854610c9b565b815b600855600454610cab5780610cde565b600454610cb9906001611341565b81600454600654610cca9190611394565b610cd49190611341565b610cde9190611359565b6006556000858152600b6020908152604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811782558651610d30939290910191870190610fa2565b506000858152600b602052604081204260038201556005018490556004805491610d598361140a565b91905055505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610de6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103fa565b565b73ffffffffffffffffffffffffffffffffffffffff8116331415610e68576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103fa565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600046610eea81610f7b565b15610f7457606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b158015610f3657600080fd5b505afa158015610f4a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f6e919061110b565b91505090565b4391505090565b600061a4b1821480610f8f575062066eed82145b80610f9c575062066eee82145b92915050565b828054828255906000526020600020908101928215610fdd579160200282015b82811115610fdd578251825591602001919060010190610fc2565b50610fe9929150610fed565b5090565b5b80821115610fe95760008155600101610fee565b803561ffff8116811461101457600080fd5b919050565b803563ffffffff8116811461101457600080fd5b60006020828403121561103f57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461106357600080fd5b9392505050565b60006020828403121561107c57600080fd5b8151801515811461106357600080fd5b60008060008060008060c087890312156110a557600080fd5b6110ae87611002565b9550602087013594506110c360408801611019565b93506110d160608801611019565b92506110df60808801611002565b915060a087013590509295509295509295565b60006020828403121561110457600080fd5b5035919050565b60006020828403121561111d57600080fd5b5051919050565b6000806040838503121561113757600080fd5b8235915060208084013567ffffffffffffffff8082111561115757600080fd5b818601915086601f83011261116b57600080fd5b81358181111561117d5761117d611472565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f830116810181811085821117156111c0576111c0611472565b604052828152858101935084860182860187018b10156111df57600080fd5b600095505b838610156112025780358552600195909501949386019386016111e4565b508096505050505050509250929050565b60006020828403121561122557600080fd5b815167ffffffffffffffff8116811461106357600080fd5b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b8181101561128d57858101830151858201608001528201611271565b8181111561129f576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b600060c082018815158352602060c08185015281895180845260e086019150828b01935060005b81811015611318578451835293830193918301916001016112fc565b505060408501989098525050506060810193909352608083019190915260a09091015292915050565b6000821982111561135457611354611443565b500190565b60008261138f577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156113cc576113cc611443565b500290565b6000828210156113e3576113e3611443565b500390565b600061ffff8083168181141561140057611400611443565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561143c5761143c611443565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2OwnerTestConsumerABI = VRFV2OwnerTestConsumerMetaData.ABI + +var VRFV2OwnerTestConsumerBin = VRFV2OwnerTestConsumerMetaData.Bin + +func DeployVRFV2OwnerTestConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address) (common.Address, *types.Transaction, *VRFV2OwnerTestConsumer, error) { + parsed, err := VRFV2OwnerTestConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2OwnerTestConsumerBin), backend, _vrfCoordinator, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2OwnerTestConsumer{address: address, abi: *parsed, VRFV2OwnerTestConsumerCaller: VRFV2OwnerTestConsumerCaller{contract: contract}, VRFV2OwnerTestConsumerTransactor: VRFV2OwnerTestConsumerTransactor{contract: contract}, VRFV2OwnerTestConsumerFilterer: VRFV2OwnerTestConsumerFilterer{contract: contract}}, nil +} + +type VRFV2OwnerTestConsumer struct { + address common.Address + abi abi.ABI + VRFV2OwnerTestConsumerCaller + VRFV2OwnerTestConsumerTransactor + VRFV2OwnerTestConsumerFilterer +} + +type VRFV2OwnerTestConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFV2OwnerTestConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFV2OwnerTestConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFV2OwnerTestConsumerSession struct { + Contract *VRFV2OwnerTestConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2OwnerTestConsumerCallerSession struct { + Contract *VRFV2OwnerTestConsumerCaller + CallOpts bind.CallOpts +} + +type VRFV2OwnerTestConsumerTransactorSession struct { + Contract *VRFV2OwnerTestConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2OwnerTestConsumerRaw struct { + Contract *VRFV2OwnerTestConsumer +} + +type VRFV2OwnerTestConsumerCallerRaw struct { + Contract *VRFV2OwnerTestConsumerCaller +} + +type VRFV2OwnerTestConsumerTransactorRaw struct { + Contract *VRFV2OwnerTestConsumerTransactor +} + +func NewVRFV2OwnerTestConsumer(address common.Address, backend bind.ContractBackend) (*VRFV2OwnerTestConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2OwnerTestConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2OwnerTestConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumer{address: address, abi: abi, VRFV2OwnerTestConsumerCaller: VRFV2OwnerTestConsumerCaller{contract: contract}, VRFV2OwnerTestConsumerTransactor: VRFV2OwnerTestConsumerTransactor{contract: contract}, VRFV2OwnerTestConsumerFilterer: VRFV2OwnerTestConsumerFilterer{contract: contract}}, nil +} + +func NewVRFV2OwnerTestConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFV2OwnerTestConsumerCaller, error) { + contract, err := bindVRFV2OwnerTestConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerCaller{contract: contract}, nil +} + +func NewVRFV2OwnerTestConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2OwnerTestConsumerTransactor, error) { + contract, err := bindVRFV2OwnerTestConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerTransactor{contract: contract}, nil +} + +func NewVRFV2OwnerTestConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2OwnerTestConsumerFilterer, error) { + contract, err := bindVRFV2OwnerTestConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerFilterer{contract: contract}, nil +} + +func bindVRFV2OwnerTestConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2OwnerTestConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2OwnerTestConsumer.Contract.VRFV2OwnerTestConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.VRFV2OwnerTestConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.VRFV2OwnerTestConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2OwnerTestConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) COORDINATOR() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.COORDINATOR(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) COORDINATOR() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.COORDINATOR(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) PLITOKEN(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "PLITOKEN") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) PLITOKEN() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.PLITOKEN(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) PLITOKEN() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.PLITOKEN(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int) + outstruct.RequestTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2OwnerTestConsumer.Contract.GetRequestStatus(&_VRFV2OwnerTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2OwnerTestConsumer.Contract.GetRequestStatus(&_VRFV2OwnerTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) Owner() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.Owner(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) Owner() (common.Address, error) { + return _VRFV2OwnerTestConsumer.Contract.Owner(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SFastestFulfillment(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SFastestFulfillment(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SLastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SLastRequestId(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SLastRequestId(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_requestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SRequestCount() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SRequestCount(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SRequestCount() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SRequestCount(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RequestTimestamp = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2OwnerTestConsumer.Contract.SRequests(&_VRFV2OwnerTestConsumer.CallOpts, arg0) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2OwnerTestConsumer.Contract.SRequests(&_VRFV2OwnerTestConsumer.CallOpts, arg0) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SResponseCount() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SResponseCount(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SResponseCount() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SResponseCount(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SSlowestFulfillment(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2OwnerTestConsumer.Contract.SSlowestFulfillment(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCaller) SubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFV2OwnerTestConsumer.contract.Call(opts, &out, "subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) SubId() (uint64, error) { + return _VRFV2OwnerTestConsumer.Contract.SubId(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerCallerSession) SubId() (uint64, error) { + return _VRFV2OwnerTestConsumer.Contract.SubId(&_VRFV2OwnerTestConsumer.CallOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.AcceptOwnership(&_VRFV2OwnerTestConsumer.TransactOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.AcceptOwnership(&_VRFV2OwnerTestConsumer.TransactOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2OwnerTestConsumer.TransactOpts, requestId, randomWords) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2OwnerTestConsumer.TransactOpts, requestId, randomWords) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) RequestRandomWords(opts *bind.TransactOpts, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "requestRandomWords", _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) RequestRandomWords(_requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.RequestRandomWords(&_VRFV2OwnerTestConsumer.TransactOpts, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) RequestRandomWords(_requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.RequestRandomWords(&_VRFV2OwnerTestConsumer.TransactOpts, _requestConfirmations, _keyHash, _callbackGasLimit, _numWords, _requestCount, _subTopUpAmount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "reset") +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) Reset() (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.Reset(&_VRFV2OwnerTestConsumer.TransactOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) Reset() (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.Reset(&_VRFV2OwnerTestConsumer.TransactOpts) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.TopUpSubscription(&_VRFV2OwnerTestConsumer.TransactOpts, amount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.TopUpSubscription(&_VRFV2OwnerTestConsumer.TransactOpts, amount) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.TransferOwnership(&_VRFV2OwnerTestConsumer.TransactOpts, to) +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2OwnerTestConsumer.Contract.TransferOwnership(&_VRFV2OwnerTestConsumer.TransactOpts, to) +} + +type VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator struct { + Event *VRFV2OwnerTestConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2OwnerTestConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator{contract: _VRFV2OwnerTestConsumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2OwnerTestConsumerOwnershipTransferRequested) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2OwnerTestConsumerOwnershipTransferRequested, error) { + event := new(VRFV2OwnerTestConsumerOwnershipTransferRequested) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2OwnerTestConsumerOwnershipTransferredIterator struct { + Event *VRFV2OwnerTestConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2OwnerTestConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2OwnerTestConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2OwnerTestConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerOwnershipTransferredIterator{contract: _VRFV2OwnerTestConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2OwnerTestConsumerOwnershipTransferred) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2OwnerTestConsumerOwnershipTransferred, error) { + event := new(VRFV2OwnerTestConsumerOwnershipTransferred) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator struct { + Event *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded struct { + SubId uint64 + Consumer common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) FilterSubscriptionCreatedFundedAndConsumerAdded(opts *bind.FilterOpts) (*VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator, error) { + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.FilterLogs(opts, "SubscriptionCreatedFundedAndConsumerAdded") + if err != nil { + return nil, err + } + return &VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator{contract: _VRFV2OwnerTestConsumer.contract, event: "SubscriptionCreatedFundedAndConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) WatchSubscriptionCreatedFundedAndConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) (event.Subscription, error) { + + logs, sub, err := _VRFV2OwnerTestConsumer.contract.WatchLogs(opts, "SubscriptionCreatedFundedAndConsumerAdded") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "SubscriptionCreatedFundedAndConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumerFilterer) ParseSubscriptionCreatedFundedAndConsumerAdded(log types.Log) (*VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded, error) { + event := new(VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) + if err := _VRFV2OwnerTestConsumer.contract.UnpackLog(event, "SubscriptionCreatedFundedAndConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Fulfilled bool + RandomWords []*big.Int + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} +type SRequests struct { + Fulfilled bool + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2OwnerTestConsumer.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2OwnerTestConsumer.ParseOwnershipTransferRequested(log) + case _VRFV2OwnerTestConsumer.abi.Events["OwnershipTransferred"].ID: + return _VRFV2OwnerTestConsumer.ParseOwnershipTransferred(log) + case _VRFV2OwnerTestConsumer.abi.Events["SubscriptionCreatedFundedAndConsumerAdded"].ID: + return _VRFV2OwnerTestConsumer.ParseSubscriptionCreatedFundedAndConsumerAdded(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2OwnerTestConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2OwnerTestConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x56c142509574e8340ca0190b029c74464b84037d2876278ea0ade3ffb1f0042c") +} + +func (_VRFV2OwnerTestConsumer *VRFV2OwnerTestConsumer) Address() common.Address { + return _VRFV2OwnerTestConsumer.address +} + +type VRFV2OwnerTestConsumerInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLITOKEN(opts *bind.CallOpts) (common.Address, error) + + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SLastRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequestCount(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SubId(opts *bind.CallOpts) (uint64, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _numWords uint32, _requestCount uint16, _subTopUpAmount *big.Int) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2OwnerTestConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2OwnerTestConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2OwnerTestConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2OwnerTestConsumerOwnershipTransferred, error) + + FilterSubscriptionCreatedFundedAndConsumerAdded(opts *bind.FilterOpts) (*VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAddedIterator, error) + + WatchSubscriptionCreatedFundedAndConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded) (event.Subscription, error) + + ParseSubscriptionCreatedFundedAndConsumerAdded(log types.Log) (*VRFV2OwnerTestConsumerSubscriptionCreatedFundedAndConsumerAdded, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_ownerless_consumer_example/vrf_ownerless_consumer_example.go b/core/gethwrappers/generated/vrf_ownerless_consumer_example/vrf_ownerless_consumer_example.go new file mode 100644 index 00000000..697a8625 --- /dev/null +++ b/core/gethwrappers/generated/vrf_ownerless_consumer_example/vrf_ownerless_consumer_example.go @@ -0,0 +1,254 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_ownerless_consumer_example + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFOwnerlessConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"name\":\"rawFulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_randomnessOutput\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60c060405234801561001057600080fd5b506040516106a73803806106a783398101604081905261002f91610069565b6001600160601b0319606092831b811660a052911b1660805261009c565b80516001600160a01b038116811461006457600080fd5b919050565b6000806040838503121561007c57600080fd5b6100858361004d565b91506100936020840161004d565b90509250929050565b60805160601c60a05160601c6105d36100d46000396000818160b50152610293015260008181610167015261025701526105d36000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80635eb797831461005157806394985ddd1461006c578063a4c0ed3614610081578063e89e106a14610094575b600080fd5b61005a60015481565b60405190815260200160405180910390f35b61007f61007a3660046104cd565b61009d565b005b61007f61008f3660046103e9565b61014f565b61005a60025481565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610141576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4f6e6c7920565246436f6f7264696e61746f722063616e2066756c66696c6c0060448201526064015b60405180910390fd5b61014b82826101e2565b5050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146101be576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006101cc828401846104b4565b90506101d88185610253565b6002555050505050565b600254821461024d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f72726563740000000000000000006044820152606401610138565b60015550565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea07f0000000000000000000000000000000000000000000000000000000000000000848660006040516020016102d0929190918252602082015260400190565b6040516020818303038152906040526040518463ffffffff1660e01b81526004016102fd939291906104ef565b602060405180830381600087803b15801561031757600080fd5b505af115801561032b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061034f919061048b565b5060008381526020818152604080832054815180840188905280830185905230606082015260808082018390528351808303909101815260a0909101909252815191830191909120868452929091526103a9906001610587565b6000858152602081815260409182902092909255805180830187905280820184905281518082038301815260609091019091528051910120949350505050565b600080600080606085870312156103ff57600080fd5b843573ffffffffffffffffffffffffffffffffffffffff8116811461042357600080fd5b935060208501359250604085013567ffffffffffffffff8082111561044757600080fd5b818701915087601f83011261045b57600080fd5b81358181111561046a57600080fd5b88602082850101111561047c57600080fd5b95989497505060200194505050565b60006020828403121561049d57600080fd5b815180151581146104ad57600080fd5b9392505050565b6000602082840312156104c657600080fd5b5035919050565b600080604083850312156104e057600080fd5b50508035926020909101359150565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b8181101561053f57858101830151858201608001528201610523565b81811115610551576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b600082198211156105c1577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c6343000806000a", +} + +var VRFOwnerlessConsumerExampleABI = VRFOwnerlessConsumerExampleMetaData.ABI + +var VRFOwnerlessConsumerExampleBin = VRFOwnerlessConsumerExampleMetaData.Bin + +func DeployVRFOwnerlessConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address) (common.Address, *types.Transaction, *VRFOwnerlessConsumerExample, error) { + parsed, err := VRFOwnerlessConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFOwnerlessConsumerExampleBin), backend, _vrfCoordinator, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFOwnerlessConsumerExample{address: address, abi: *parsed, VRFOwnerlessConsumerExampleCaller: VRFOwnerlessConsumerExampleCaller{contract: contract}, VRFOwnerlessConsumerExampleTransactor: VRFOwnerlessConsumerExampleTransactor{contract: contract}, VRFOwnerlessConsumerExampleFilterer: VRFOwnerlessConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFOwnerlessConsumerExample struct { + address common.Address + abi abi.ABI + VRFOwnerlessConsumerExampleCaller + VRFOwnerlessConsumerExampleTransactor + VRFOwnerlessConsumerExampleFilterer +} + +type VRFOwnerlessConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFOwnerlessConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFOwnerlessConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFOwnerlessConsumerExampleSession struct { + Contract *VRFOwnerlessConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFOwnerlessConsumerExampleCallerSession struct { + Contract *VRFOwnerlessConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFOwnerlessConsumerExampleTransactorSession struct { + Contract *VRFOwnerlessConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFOwnerlessConsumerExampleRaw struct { + Contract *VRFOwnerlessConsumerExample +} + +type VRFOwnerlessConsumerExampleCallerRaw struct { + Contract *VRFOwnerlessConsumerExampleCaller +} + +type VRFOwnerlessConsumerExampleTransactorRaw struct { + Contract *VRFOwnerlessConsumerExampleTransactor +} + +func NewVRFOwnerlessConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFOwnerlessConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFOwnerlessConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFOwnerlessConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFOwnerlessConsumerExample{address: address, abi: abi, VRFOwnerlessConsumerExampleCaller: VRFOwnerlessConsumerExampleCaller{contract: contract}, VRFOwnerlessConsumerExampleTransactor: VRFOwnerlessConsumerExampleTransactor{contract: contract}, VRFOwnerlessConsumerExampleFilterer: VRFOwnerlessConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFOwnerlessConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFOwnerlessConsumerExampleCaller, error) { + contract, err := bindVRFOwnerlessConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFOwnerlessConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFOwnerlessConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFOwnerlessConsumerExampleTransactor, error) { + contract, err := bindVRFOwnerlessConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFOwnerlessConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFOwnerlessConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFOwnerlessConsumerExampleFilterer, error) { + contract, err := bindVRFOwnerlessConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFOwnerlessConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFOwnerlessConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFOwnerlessConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFOwnerlessConsumerExample.Contract.VRFOwnerlessConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.VRFOwnerlessConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.VRFOwnerlessConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFOwnerlessConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleCaller) SRandomnessOutput(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFOwnerlessConsumerExample.contract.Call(opts, &out, "s_randomnessOutput") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleSession) SRandomnessOutput() (*big.Int, error) { + return _VRFOwnerlessConsumerExample.Contract.SRandomnessOutput(&_VRFOwnerlessConsumerExample.CallOpts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleCallerSession) SRandomnessOutput() (*big.Int, error) { + return _VRFOwnerlessConsumerExample.Contract.SRandomnessOutput(&_VRFOwnerlessConsumerExample.CallOpts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleCaller) SRequestId(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VRFOwnerlessConsumerExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleSession) SRequestId() ([32]byte, error) { + return _VRFOwnerlessConsumerExample.Contract.SRequestId(&_VRFOwnerlessConsumerExample.CallOpts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleCallerSession) SRequestId() ([32]byte, error) { + return _VRFOwnerlessConsumerExample.Contract.SRequestId(&_VRFOwnerlessConsumerExample.CallOpts) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.contract.Transact(opts, "onTokenTransfer", arg0, _amount, _data) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.OnTokenTransfer(&_VRFOwnerlessConsumerExample.TransactOpts, arg0, _amount, _data) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactorSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.OnTokenTransfer(&_VRFOwnerlessConsumerExample.TransactOpts, arg0, _amount, _data) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactor) RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.contract.Transact(opts, "rawFulfillRandomness", requestId, randomness) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.RawFulfillRandomness(&_VRFOwnerlessConsumerExample.TransactOpts, requestId, randomness) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExampleTransactorSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFOwnerlessConsumerExample.Contract.RawFulfillRandomness(&_VRFOwnerlessConsumerExample.TransactOpts, requestId, randomness) +} + +func (_VRFOwnerlessConsumerExample *VRFOwnerlessConsumerExample) Address() common.Address { + return _VRFOwnerlessConsumerExample.address +} + +type VRFOwnerlessConsumerExampleInterface interface { + SRandomnessOutput(opts *bind.CallOpts) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) ([32]byte, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_single_consumer_example/vrf_single_consumer_example.go b/core/gethwrappers/generated/vrf_single_consumer_example/vrf_single_consumer_example.go new file mode 100644 index 00000000..ac5c081a --- /dev/null +++ b/core/gethwrappers/generated/vrf_single_consumer_example/vrf_single_consumer_example.go @@ -0,0 +1,369 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_single_consumer_example + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFSingleConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"fundAndRequestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestConfig\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"subscribe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"unsubscribe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620013383803806200133883398101604081905262000034916200031a565b606086811b6001600160601b0319166080908152600080546001600160a01b03808b166001600160a01b0319928316178355600180548316918b16919091179055600680543392169190911790556040805160a08101825291825263ffffffff8881166020840181905261ffff891692840183905290871694830185905291909201849052600280546001600160701b0319166801000000000000000090920261ffff60601b1916919091176c010000000000000000000000009092029190911763ffffffff60701b1916600160701b90920291909117905560038190556200011c62000128565b505050505050620003e2565b6006546001600160a01b031633146200014057600080fd5b604080516001808252818301909252600091602080830190803683370190505090503081600081518110620001795762000179620003cc565b60200260200101906001600160a01b031690816001600160a01b03168152505060008054906101000a90046001600160a01b03166001600160a01b031663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b158015620001e857600080fd5b505af1158015620001fd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906200022391906200039a565b600280546001600160401b0319166001600160401b039290921691821790556000805483516001600160a01b0390911692637341c10c9290918591906200026e576200026e620003cc565b60200260200101516040518363ffffffff1660e01b8152600401620002b19291906001600160401b039290921682526001600160a01b0316602082015260400190565b600060405180830381600087803b158015620002cc57600080fd5b505af1158015620002e1573d6000803e3d6000fd5b5050505050565b80516001600160a01b03811681146200030057600080fd5b919050565b805163ffffffff811681146200030057600080fd5b60008060008060008060c087890312156200033457600080fd5b6200033f87620002e8565b95506200034f60208801620002e8565b94506200035f6040880162000305565b9350606087015161ffff811681146200037757600080fd5b9250620003876080880162000305565b915060a087015190509295509295509295565b600060208284031215620003ad57600080fd5b81516001600160401b0381168114620003c557600080fd5b9392505050565b634e487b7160e01b600052603260045260246000fd5b60805160601c610f3062000408600039600081816102ea01526103520152610f306000f3fe608060405234801561001057600080fd5b50600436106100bd5760003560e01c806386850e9311610076578063e0c862891161005b578063e0c86289146101cb578063e89e106a146101d3578063f6eaffc8146101ea57600080fd5b806386850e93146101b05780638f449a05146101c357600080fd5b80636fd700bb116100a75780636fd700bb146100ea5780637262561c146100fd5780637db9263f1461011057600080fd5b8062f714ce146100c25780631fe543e3146100d7575b600080fd5b6100d56100d0366004610ce8565b6101fd565b005b6100d56100e5366004610d14565b6102d2565b6100d56100f8366004610cb6565b610392565b6100d561010b366004610c72565b6105e0565b6002546003546101699167ffffffffffffffff81169163ffffffff68010000000000000000830481169261ffff6c01000000000000000000000000820416926e0100000000000000000000000000009091049091169085565b6040805167ffffffffffffffff909616865263ffffffff948516602087015261ffff90931692850192909252919091166060830152608082015260a0015b60405180910390f35b6100d56101be366004610cb6565b6106c8565b6100d56107ad565b6100d56109d4565b6101dc60055481565b6040519081526020016101a7565b6101dc6101f8366004610cb6565b610b35565b60065473ffffffffffffffffffffffffffffffffffffffff16331461022157600080fd5b6001546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152602482018590529091169063a9059cbb90604401602060405180830381600087803b15801561029557600080fd5b505af11580156102a9573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102cd9190610c94565b505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610384576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b61038e8282610b56565b5050565b60065473ffffffffffffffffffffffffffffffffffffffff1633146103b657600080fd5b6040805160a08101825260025467ffffffffffffffff811680835263ffffffff680100000000000000008304811660208086019190915261ffff6c01000000000000000000000000850416858701526e010000000000000000000000000000909304166060840152600354608084015260015460005485518085019390935285518084039094018452828601958690527f4000aea000000000000000000000000000000000000000000000000000000000909552929373ffffffffffffffffffffffffffffffffffffffff93841693634000aea09361049e9391909216918791604401610e2d565b602060405180830381600087803b1580156104b857600080fd5b505af11580156104cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104f09190610c94565b50600054608082015182516040808501516020860151606087015192517f5d3b1d30000000000000000000000000000000000000000000000000000000008152600481019590955267ffffffffffffffff909316602485015261ffff16604484015263ffffffff918216606484015216608482015273ffffffffffffffffffffffffffffffffffffffff90911690635d3b1d309060a401602060405180830381600087803b1580156105a157600080fd5b505af11580156105b5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105d99190610ccf565b6005555050565b60065473ffffffffffffffffffffffffffffffffffffffff16331461060457600080fd5b6000546002546040517fd7ae1d3000000000000000000000000000000000000000000000000000000000815267ffffffffffffffff909116600482015273ffffffffffffffffffffffffffffffffffffffff83811660248301529091169063d7ae1d3090604401600060405180830381600087803b15801561068557600080fd5b505af1158015610699573d6000803e3d6000fd5b5050600280547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000169055505050565b60065473ffffffffffffffffffffffffffffffffffffffff1633146106ec57600080fd5b6001546000546002546040805167ffffffffffffffff909216602083015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591016040516020818303038152906040526040518463ffffffff1660e01b815260040161075b93929190610e2d565b602060405180830381600087803b15801561077557600080fd5b505af1158015610789573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061038e9190610c94565b60065473ffffffffffffffffffffffffffffffffffffffff1633146107d157600080fd5b60408051600180825281830190925260009160208083019080368337019050509050308160008151811061080757610807610ec5565b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b1580156108a957600080fd5b505af11580156108bd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108e19190610e03565b600280547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff92909216918217905560008054835173ffffffffffffffffffffffffffffffffffffffff90911692637341c10c92909185919061094f5761094f610ec5565b60200260200101516040518363ffffffff1660e01b815260040161099f92919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b1580156109b957600080fd5b505af11580156109cd573d6000803e3d6000fd5b5050505050565b60065473ffffffffffffffffffffffffffffffffffffffff1633146109f857600080fd5b6040805160a08101825260025467ffffffffffffffff811680835263ffffffff68010000000000000000830481166020850181905261ffff6c010000000000000000000000008504168587018190526e010000000000000000000000000000909404909116606085018190526003546080860181905260005496517f5d3b1d3000000000000000000000000000000000000000000000000000000000815260048101919091526024810193909352604483019390935260648201526084810191909152909173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b158015610af757600080fd5b505af1158015610b0b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610b2f9190610ccf565b60055550565b60048181548110610b4557600080fd5b600091825260209091200154905081565b6005548214610bc1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f7272656374000000000000000000604482015260640161037b565b80516004805482825560008290526102cd927f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b91820191602086018215610c24579160200282015b82811115610c24578251825591602001919060010190610c09565b50610c30929150610c34565b5090565b5b80821115610c305760008155600101610c35565b803573ffffffffffffffffffffffffffffffffffffffff81168114610c6d57600080fd5b919050565b600060208284031215610c8457600080fd5b610c8d82610c49565b9392505050565b600060208284031215610ca657600080fd5b81518015158114610c8d57600080fd5b600060208284031215610cc857600080fd5b5035919050565b600060208284031215610ce157600080fd5b5051919050565b60008060408385031215610cfb57600080fd5b82359150610d0b60208401610c49565b90509250929050565b60008060408385031215610d2757600080fd5b8235915060208084013567ffffffffffffffff80821115610d4757600080fd5b818601915086601f830112610d5b57600080fd5b813581811115610d6d57610d6d610ef4565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610db057610db0610ef4565b604052828152858101935084860182860187018b1015610dcf57600080fd5b600095505b83861015610df2578035855260019590950194938601938601610dd4565b508096505050505050509250929050565b600060208284031215610e1557600080fd5b815167ffffffffffffffff81168114610c8d57600080fd5b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b81811015610e7d57858101830151858201608001528201610e61565b81811115610e8f576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFSingleConsumerExampleABI = VRFSingleConsumerExampleMetaData.ABI + +var VRFSingleConsumerExampleBin = VRFSingleConsumerExampleMetaData.Bin + +func DeployVRFSingleConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (common.Address, *types.Transaction, *VRFSingleConsumerExample, error) { + parsed, err := VRFSingleConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFSingleConsumerExampleBin), backend, vrfCoordinator, link, callbackGasLimit, requestConfirmations, numWords, keyHash) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFSingleConsumerExample{address: address, abi: *parsed, VRFSingleConsumerExampleCaller: VRFSingleConsumerExampleCaller{contract: contract}, VRFSingleConsumerExampleTransactor: VRFSingleConsumerExampleTransactor{contract: contract}, VRFSingleConsumerExampleFilterer: VRFSingleConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFSingleConsumerExample struct { + address common.Address + abi abi.ABI + VRFSingleConsumerExampleCaller + VRFSingleConsumerExampleTransactor + VRFSingleConsumerExampleFilterer +} + +type VRFSingleConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFSingleConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFSingleConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFSingleConsumerExampleSession struct { + Contract *VRFSingleConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFSingleConsumerExampleCallerSession struct { + Contract *VRFSingleConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFSingleConsumerExampleTransactorSession struct { + Contract *VRFSingleConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFSingleConsumerExampleRaw struct { + Contract *VRFSingleConsumerExample +} + +type VRFSingleConsumerExampleCallerRaw struct { + Contract *VRFSingleConsumerExampleCaller +} + +type VRFSingleConsumerExampleTransactorRaw struct { + Contract *VRFSingleConsumerExampleTransactor +} + +func NewVRFSingleConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFSingleConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFSingleConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFSingleConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFSingleConsumerExample{address: address, abi: abi, VRFSingleConsumerExampleCaller: VRFSingleConsumerExampleCaller{contract: contract}, VRFSingleConsumerExampleTransactor: VRFSingleConsumerExampleTransactor{contract: contract}, VRFSingleConsumerExampleFilterer: VRFSingleConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFSingleConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFSingleConsumerExampleCaller, error) { + contract, err := bindVRFSingleConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFSingleConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFSingleConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFSingleConsumerExampleTransactor, error) { + contract, err := bindVRFSingleConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFSingleConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFSingleConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFSingleConsumerExampleFilterer, error) { + contract, err := bindVRFSingleConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFSingleConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFSingleConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFSingleConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFSingleConsumerExample.Contract.VRFSingleConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.VRFSingleConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.VRFSingleConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFSingleConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFSingleConsumerExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFSingleConsumerExample.Contract.SRandomWords(&_VRFSingleConsumerExample.CallOpts, arg0) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFSingleConsumerExample.Contract.SRandomWords(&_VRFSingleConsumerExample.CallOpts, arg0) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCaller) SRequestConfig(opts *bind.CallOpts) (SRequestConfig, + + error) { + var out []interface{} + err := _VRFSingleConsumerExample.contract.Call(opts, &out, "s_requestConfig") + + outstruct := new(SRequestConfig) + if err != nil { + return *outstruct, err + } + + outstruct.SubId = *abi.ConvertType(out[0], new(uint64)).(*uint64) + outstruct.CallbackGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.RequestConfirmations = *abi.ConvertType(out[2], new(uint16)).(*uint16) + outstruct.NumWords = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.KeyHash = *abi.ConvertType(out[4], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) SRequestConfig() (SRequestConfig, + + error) { + return _VRFSingleConsumerExample.Contract.SRequestConfig(&_VRFSingleConsumerExample.CallOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCallerSession) SRequestConfig() (SRequestConfig, + + error) { + return _VRFSingleConsumerExample.Contract.SRequestConfig(&_VRFSingleConsumerExample.CallOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFSingleConsumerExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) SRequestId() (*big.Int, error) { + return _VRFSingleConsumerExample.Contract.SRequestId(&_VRFSingleConsumerExample.CallOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFSingleConsumerExample.Contract.SRequestId(&_VRFSingleConsumerExample.CallOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) FundAndRequestRandomWords(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "fundAndRequestRandomWords", amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) FundAndRequestRandomWords(amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.FundAndRequestRandomWords(&_VRFSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) FundAndRequestRandomWords(amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.FundAndRequestRandomWords(&_VRFSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.RawFulfillRandomWords(&_VRFSingleConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.RawFulfillRandomWords(&_VRFSingleConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) RequestRandomWords(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "requestRandomWords") +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) RequestRandomWords() (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.RequestRandomWords(&_VRFSingleConsumerExample.TransactOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) RequestRandomWords() (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.RequestRandomWords(&_VRFSingleConsumerExample.TransactOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) Subscribe(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "subscribe") +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) Subscribe() (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Subscribe(&_VRFSingleConsumerExample.TransactOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) Subscribe() (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Subscribe(&_VRFSingleConsumerExample.TransactOpts) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.TopUpSubscription(&_VRFSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.TopUpSubscription(&_VRFSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) Unsubscribe(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "unsubscribe", to) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) Unsubscribe(to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Unsubscribe(&_VRFSingleConsumerExample.TransactOpts, to) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) Unsubscribe(to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Unsubscribe(&_VRFSingleConsumerExample.TransactOpts, to) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.contract.Transact(opts, "withdraw", amount, to) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleSession) Withdraw(amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Withdraw(&_VRFSingleConsumerExample.TransactOpts, amount, to) +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExampleTransactorSession) Withdraw(amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFSingleConsumerExample.Contract.Withdraw(&_VRFSingleConsumerExample.TransactOpts, amount, to) +} + +type SRequestConfig struct { + SubId uint64 + CallbackGasLimit uint32 + RequestConfirmations uint16 + NumWords uint32 + KeyHash [32]byte +} + +func (_VRFSingleConsumerExample *VRFSingleConsumerExample) Address() common.Address { + return _VRFSingleConsumerExample.address +} + +type VRFSingleConsumerExampleInterface interface { + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestConfig(opts *bind.CallOpts) (SRequestConfig, + + error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + FundAndRequestRandomWords(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts) (*types.Transaction, error) + + Subscribe(opts *bind.TransactOpts) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Unsubscribe(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, amount *big.Int, to common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_testnet_d20/vrf_testnet_d20.go b/core/gethwrappers/generated/vrf_testnet_d20/vrf_testnet_d20.go new file mode 100644 index 00000000..4ee9da4d --- /dev/null +++ b/core/gethwrappers/generated/vrf_testnet_d20/vrf_testnet_d20.go @@ -0,0 +1,324 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_testnet_d20 + +import ( + "math/big" + "strings" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// VRFTestnetD20ABI is the input ABI used to generate the binding from. +const VRFTestnetD20ABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"d20Results\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoll\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"d20result\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"nonces\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"randomness\",\"type\":\"uint256\"}],\"name\":\"rawFulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"userProvidedSeed\",\"type\":\"uint256\"}],\"name\":\"rollDice\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +// VRFTestnetD20Bin is the compiled bytecode used for deploying new contracts. +var VRFTestnetD20Bin = "0x60c060405234801561001057600080fd5b5060405161081b38038061081b8339818101604052606081101561003357600080fd5b50805160208201516040909201516001600160601b0319606083811b821660a05284901b16608052600255670de0b6b3a76400006003556001600160a01b03918216911661077d61009e6000398061013852806104005250806101eb52806103c4525061077d6000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80639e317f12116100505780639e317f12146100c0578063acfff377146100dd578063ae383a4d146100fa57610067565b80634ab5fc501461006c57806394985ddd1461009b575b600080fd5b6100896004803603602081101561008257600080fd5b5035610102565b60408051918252519081900360200190f35b6100be600480360360408110156100b157600080fd5b5080359060200135610120565b005b610089600480360360208110156100d657600080fd5b50356101d2565b610089600480360360208110156100f357600080fd5b50356101e4565b610089610321565b6001818154811061010f57fe5b600091825260209091200154905081565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146101c457604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4f6e6c7920565246436f6f7264696e61746f722063616e2066756c66696c6c00604482015290519081900360640190fd5b6101ce8282610365565b5050565b60006020819052908152604090205481565b60006003547f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561028657600080fd5b505afa15801561029a573d6000803e3d6000fd5b505050506040513d60208110156102b057600080fd5b50511015610309576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602b81526020018061071d602b913960400191505060405180910390fd5b600061031a600254600354856103c0565b9392505050565b60018054600091907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff810190811061035557fe5b9060005260206000200154905090565b6000610389600161037d84601463ffffffff6105a916565b9063ffffffff61062816565b6001805480820182556000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60155505050565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16634000aea07f000000000000000000000000000000000000000000000000000000000000000085878660405160200180838152602001828152602001925050506040516020818303038152906040526040518463ffffffff1660e01b8152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156104cc5781810151838201526020016104b4565b50505050905090810190601f1680156104f95780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b15801561051a57600080fd5b505af115801561052e573d6000803e3d6000fd5b505050506040513d602081101561054457600080fd5b50506000848152602081905260408120546105649086908590309061069c565b60008681526020819052604090205490915061058790600163ffffffff61062816565b6000868152602081905260409020556105a085826106f0565b95945050505050565b60008161061757604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f536166654d6174683a206d6f64756c6f206279207a65726f0000000000000000604482015290519081900360640190fd5b81838161062057fe5b069392505050565b60008282018381101561031a57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b604080516020808201969096528082019490945273ffffffffffffffffffffffffffffffffffffffff9290921660608401526080808401919091528151808403909101815260a09092019052805191012090565b60408051602080820194909452808201929092528051808303820181526060909201905280519101209056fe4e6f7420656e6f756768204c494e4b202d2066696c6c20636f6e7472616374207769746820666175636574a264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033" + +// DeployVRFTestnetD20 deploys a new Ethereum contract, binding an instance of VRFTestnetD20 to it. +func DeployVRFTestnetD20(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address, _keyHash [32]byte) (common.Address, *types.Transaction, *VRFTestnetD20, error) { + parsed, err := abi.JSON(strings.NewReader(VRFTestnetD20ABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(VRFTestnetD20Bin), backend, _vrfCoordinator, _link, _keyHash) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFTestnetD20{VRFTestnetD20Caller: VRFTestnetD20Caller{contract: contract}, VRFTestnetD20Transactor: VRFTestnetD20Transactor{contract: contract}, VRFTestnetD20Filterer: VRFTestnetD20Filterer{contract: contract}}, nil +} + +// VRFTestnetD20 is an auto generated Go binding around an Ethereum contract. +type VRFTestnetD20 struct { + VRFTestnetD20Caller // Read-only binding to the contract + VRFTestnetD20Transactor // Write-only binding to the contract + VRFTestnetD20Filterer // Log filterer for contract events +} + +// VRFTestnetD20Caller is an auto generated read-only Go binding around an Ethereum contract. +type VRFTestnetD20Caller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestnetD20Transactor is an auto generated write-only Go binding around an Ethereum contract. +type VRFTestnetD20Transactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestnetD20Filterer is an auto generated log filtering Go binding around an Ethereum contract events. +type VRFTestnetD20Filterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestnetD20Session is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type VRFTestnetD20Session struct { + Contract *VRFTestnetD20 // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFTestnetD20CallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type VRFTestnetD20CallerSession struct { + Contract *VRFTestnetD20Caller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// VRFTestnetD20TransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type VRFTestnetD20TransactorSession struct { + Contract *VRFTestnetD20Transactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFTestnetD20Raw is an auto generated low-level Go binding around an Ethereum contract. +type VRFTestnetD20Raw struct { + Contract *VRFTestnetD20 // Generic contract binding to access the raw methods on +} + +// VRFTestnetD20CallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type VRFTestnetD20CallerRaw struct { + Contract *VRFTestnetD20Caller // Generic read-only contract binding to access the raw methods on +} + +// VRFTestnetD20TransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type VRFTestnetD20TransactorRaw struct { + Contract *VRFTestnetD20Transactor // Generic write-only contract binding to access the raw methods on +} + +// NewVRFTestnetD20 creates a new instance of VRFTestnetD20, bound to a specific deployed contract. +func NewVRFTestnetD20(address common.Address, backend bind.ContractBackend) (*VRFTestnetD20, error) { + contract, err := bindVRFTestnetD20(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFTestnetD20{VRFTestnetD20Caller: VRFTestnetD20Caller{contract: contract}, VRFTestnetD20Transactor: VRFTestnetD20Transactor{contract: contract}, VRFTestnetD20Filterer: VRFTestnetD20Filterer{contract: contract}}, nil +} + +// NewVRFTestnetD20Caller creates a new read-only instance of VRFTestnetD20, bound to a specific deployed contract. +func NewVRFTestnetD20Caller(address common.Address, caller bind.ContractCaller) (*VRFTestnetD20Caller, error) { + contract, err := bindVRFTestnetD20(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFTestnetD20Caller{contract: contract}, nil +} + +// NewVRFTestnetD20Transactor creates a new write-only instance of VRFTestnetD20, bound to a specific deployed contract. +func NewVRFTestnetD20Transactor(address common.Address, transactor bind.ContractTransactor) (*VRFTestnetD20Transactor, error) { + contract, err := bindVRFTestnetD20(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFTestnetD20Transactor{contract: contract}, nil +} + +// NewVRFTestnetD20Filterer creates a new log filterer instance of VRFTestnetD20, bound to a specific deployed contract. +func NewVRFTestnetD20Filterer(address common.Address, filterer bind.ContractFilterer) (*VRFTestnetD20Filterer, error) { + contract, err := bindVRFTestnetD20(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFTestnetD20Filterer{contract: contract}, nil +} + +// bindVRFTestnetD20 binds a generic wrapper to an already deployed contract. +func bindVRFTestnetD20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(VRFTestnetD20ABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFTestnetD20 *VRFTestnetD20Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFTestnetD20.Contract.VRFTestnetD20Caller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFTestnetD20 *VRFTestnetD20Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.VRFTestnetD20Transactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFTestnetD20 *VRFTestnetD20Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.VRFTestnetD20Transactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFTestnetD20 *VRFTestnetD20CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFTestnetD20.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFTestnetD20 *VRFTestnetD20TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFTestnetD20 *VRFTestnetD20TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.contract.Transact(opts, method, params...) +} + +// D20Results is a free data retrieval call binding the contract method 0x4ab5fc50. +// +// Solidity: function d20Results(uint256 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20Caller) D20Results(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFTestnetD20.contract.Call(opts, &out, "d20Results", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// D20Results is a free data retrieval call binding the contract method 0x4ab5fc50. +// +// Solidity: function d20Results(uint256 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20Session) D20Results(arg0 *big.Int) (*big.Int, error) { + return _VRFTestnetD20.Contract.D20Results(&_VRFTestnetD20.CallOpts, arg0) +} + +// D20Results is a free data retrieval call binding the contract method 0x4ab5fc50. +// +// Solidity: function d20Results(uint256 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20CallerSession) D20Results(arg0 *big.Int) (*big.Int, error) { + return _VRFTestnetD20.Contract.D20Results(&_VRFTestnetD20.CallOpts, arg0) +} + +// LatestRoll is a free data retrieval call binding the contract method 0xae383a4d. +// +// Solidity: function latestRoll() view returns(uint256 d20result) +func (_VRFTestnetD20 *VRFTestnetD20Caller) LatestRoll(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFTestnetD20.contract.Call(opts, &out, "latestRoll") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// LatestRoll is a free data retrieval call binding the contract method 0xae383a4d. +// +// Solidity: function latestRoll() view returns(uint256 d20result) +func (_VRFTestnetD20 *VRFTestnetD20Session) LatestRoll() (*big.Int, error) { + return _VRFTestnetD20.Contract.LatestRoll(&_VRFTestnetD20.CallOpts) +} + +// LatestRoll is a free data retrieval call binding the contract method 0xae383a4d. +// +// Solidity: function latestRoll() view returns(uint256 d20result) +func (_VRFTestnetD20 *VRFTestnetD20CallerSession) LatestRoll() (*big.Int, error) { + return _VRFTestnetD20.Contract.LatestRoll(&_VRFTestnetD20.CallOpts) +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20Caller) Nonces(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) { + var out []interface{} + err := _VRFTestnetD20.contract.Call(opts, &out, "nonces", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20Session) Nonces(arg0 [32]byte) (*big.Int, error) { + return _VRFTestnetD20.Contract.Nonces(&_VRFTestnetD20.CallOpts, arg0) +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) view returns(uint256) +func (_VRFTestnetD20 *VRFTestnetD20CallerSession) Nonces(arg0 [32]byte) (*big.Int, error) { + return _VRFTestnetD20.Contract.Nonces(&_VRFTestnetD20.CallOpts, arg0) +} + +// RawFulfillRandomness is a paid mutator transaction binding the contract method 0x94985ddd. +// +// Solidity: function rawFulfillRandomness(bytes32 requestId, uint256 randomness) returns() +func (_VRFTestnetD20 *VRFTestnetD20Transactor) RawFulfillRandomness(opts *bind.TransactOpts, requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.contract.Transact(opts, "rawFulfillRandomness", requestId, randomness) +} + +// RawFulfillRandomness is a paid mutator transaction binding the contract method 0x94985ddd. +// +// Solidity: function rawFulfillRandomness(bytes32 requestId, uint256 randomness) returns() +func (_VRFTestnetD20 *VRFTestnetD20Session) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.RawFulfillRandomness(&_VRFTestnetD20.TransactOpts, requestId, randomness) +} + +// RawFulfillRandomness is a paid mutator transaction binding the contract method 0x94985ddd. +// +// Solidity: function rawFulfillRandomness(bytes32 requestId, uint256 randomness) returns() +func (_VRFTestnetD20 *VRFTestnetD20TransactorSession) RawFulfillRandomness(requestId [32]byte, randomness *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.RawFulfillRandomness(&_VRFTestnetD20.TransactOpts, requestId, randomness) +} + +// RollDice is a paid mutator transaction binding the contract method 0xacfff377. +// +// Solidity: function rollDice(uint256 userProvidedSeed) returns(bytes32 requestId) +func (_VRFTestnetD20 *VRFTestnetD20Transactor) RollDice(opts *bind.TransactOpts, userProvidedSeed *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.contract.Transact(opts, "rollDice", userProvidedSeed) +} + +// RollDice is a paid mutator transaction binding the contract method 0xacfff377. +// +// Solidity: function rollDice(uint256 userProvidedSeed) returns(bytes32 requestId) +func (_VRFTestnetD20 *VRFTestnetD20Session) RollDice(userProvidedSeed *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.RollDice(&_VRFTestnetD20.TransactOpts, userProvidedSeed) +} + +// RollDice is a paid mutator transaction binding the contract method 0xacfff377. +// +// Solidity: function rollDice(uint256 userProvidedSeed) returns(bytes32 requestId) +func (_VRFTestnetD20 *VRFTestnetD20TransactorSession) RollDice(userProvidedSeed *big.Int) (*types.Transaction, error) { + return _VRFTestnetD20.Contract.RollDice(&_VRFTestnetD20.TransactOpts, userProvidedSeed) +} diff --git a/core/gethwrappers/generated/vrf_v2_consumer_wrapper/vrf_v2_consumer_wrapper.go b/core/gethwrappers/generated/vrf_v2_consumer_wrapper/vrf_v2_consumer_wrapper.go new file mode 100644 index 00000000..6c724d3e --- /dev/null +++ b/core/gethwrappers/generated/vrf_v2_consumer_wrapper/vrf_v2_consumer_wrapper.go @@ -0,0 +1,951 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_v2_consumer_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFv2ConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"RequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"RequestSent\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"requestIds\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"exists\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610e31380380610e3183398101604081905261002f9161019a565b6001600160601b0319606082901b1660805233806000816100975760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100c7576100c7816100f0565b5050600380546001600160a01b0319166001600160a01b039390931692909217909155506101ca565b6001600160a01b0381163314156101495760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161008e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156101ac57600080fd5b81516001600160a01b03811681146101c357600080fd5b9392505050565b60805160601c610c426101ef600039600081816101be01526102260152610c426000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80639561f02311610076578063d8a4676f1161005b578063d8a4676f14610169578063f2fde38b1461018a578063fc2a88c31461019d57600080fd5b80639561f02314610113578063a168fa891461012657600080fd5b80631fe543e3146100a857806379ba5097146100bd5780638796ba8c146100c55780638da5cb5b146100eb575b600080fd5b6100bb6100b6366004610a2c565b6101a6565b005b6100bb610266565b6100d86100d33660046109fa565b610363565b6040519081526020015b60405180910390f35b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100e2565b6100d8610121366004610b1b565b610384565b6101526101343660046109fa565b60026020526000908152604090205460ff8082169161010090041682565b6040805192151583529015156020830152016100e2565b61017c6101773660046109fa565b610593565b6040516100e2929190610bca565b6100bb6101983660046109bd565b6106ad565b6100d860055481565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610258576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b61026282826106c1565b5050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146102e7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161024f565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6004818154811061037357600080fd5b600091825260209091200154905081565b600061038e6107cb565b6003546040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810184905267ffffffffffffffff8816602482015261ffff8616604482015263ffffffff80881660648301528516608482015273ffffffffffffffffffffffffffffffffffffffff90911690635d3b1d309060a401602060405180830381600087803b15801561042857600080fd5b505af115801561043c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104609190610a13565b604080516060810182526000808252600160208084018281528551848152808301875285870190815287855260028352959093208451815494517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009095169015157fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff161761010094151594909402939093178355935180519596509294919361050f9391850192910190610944565b5050600480546001810182556000919091527f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b018290555060058190556040805182815263ffffffff851660208201527fcc58b13ad3eab50626c6a6300b1d139cd6ebb1688a7cced9461c2f7e762665ee910160405180910390a195945050505050565b600081815260026020526040812054606090610100900460ff16610613576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e64000000000000000000000000000000604482015260640161024f565b60008381526002602090815260408083208151606081018352815460ff808216151583526101009091041615158185015260018201805484518187028101870186528181529295939486019383018282801561068e57602002820191906000526020600020905b81548152602001906001019080831161067a575b5050505050815250509050806000015181604001519250925050915091565b6106b56107cb565b6106be8161084e565b50565b600082815260026020526040902054610100900460ff1661073e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e64000000000000000000000000000000604482015260640161024f565b600082815260026020908152604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660019081178255835161078d939290910191840190610944565b507ffe2e2d779dba245964d4e3ef9b994be63856fd568bf7d3ca9e224755cb1bd54d82826040516107bf929190610bed565b60405180910390a15050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461084c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161024f565b565b73ffffffffffffffffffffffffffffffffffffffff81163314156108ce576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161024f565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b82805482825590600052602060002090810192821561097f579160200282015b8281111561097f578251825591602001919060010190610964565b5061098b92915061098f565b5090565b5b8082111561098b5760008155600101610990565b803563ffffffff811681146109b857600080fd5b919050565b6000602082840312156109cf57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff811681146109f357600080fd5b9392505050565b600060208284031215610a0c57600080fd5b5035919050565b600060208284031215610a2557600080fd5b5051919050565b60008060408385031215610a3f57600080fd5b8235915060208084013567ffffffffffffffff80821115610a5f57600080fd5b818601915086601f830112610a7357600080fd5b813581811115610a8557610a85610c06565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610ac857610ac8610c06565b604052828152858101935084860182860187018b1015610ae757600080fd5b600095505b83861015610b0a578035855260019590950194938601938601610aec565b508096505050505050509250929050565b600080600080600060a08688031215610b3357600080fd5b853567ffffffffffffffff81168114610b4b57600080fd5b9450610b59602087016109a4565b9350604086013561ffff81168114610b7057600080fd5b9250610b7e606087016109a4565b949793965091946080013592915050565b600081518084526020808501945080840160005b83811015610bbf57815187529582019590820190600101610ba3565b509495945050505050565b8215158152604060208201526000610be56040830184610b8f565b949350505050565b828152604060208201526000610be56040830184610b8f565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFv2ConsumerABI = VRFv2ConsumerMetaData.ABI + +var VRFv2ConsumerBin = VRFv2ConsumerMetaData.Bin + +func DeployVRFv2Consumer(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address) (common.Address, *types.Transaction, *VRFv2Consumer, error) { + parsed, err := VRFv2ConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFv2ConsumerBin), backend, vrfCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFv2Consumer{address: address, abi: *parsed, VRFv2ConsumerCaller: VRFv2ConsumerCaller{contract: contract}, VRFv2ConsumerTransactor: VRFv2ConsumerTransactor{contract: contract}, VRFv2ConsumerFilterer: VRFv2ConsumerFilterer{contract: contract}}, nil +} + +type VRFv2Consumer struct { + address common.Address + abi abi.ABI + VRFv2ConsumerCaller + VRFv2ConsumerTransactor + VRFv2ConsumerFilterer +} + +type VRFv2ConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFv2ConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFv2ConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFv2ConsumerSession struct { + Contract *VRFv2Consumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFv2ConsumerCallerSession struct { + Contract *VRFv2ConsumerCaller + CallOpts bind.CallOpts +} + +type VRFv2ConsumerTransactorSession struct { + Contract *VRFv2ConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFv2ConsumerRaw struct { + Contract *VRFv2Consumer +} + +type VRFv2ConsumerCallerRaw struct { + Contract *VRFv2ConsumerCaller +} + +type VRFv2ConsumerTransactorRaw struct { + Contract *VRFv2ConsumerTransactor +} + +func NewVRFv2Consumer(address common.Address, backend bind.ContractBackend) (*VRFv2Consumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFv2ConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFv2Consumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFv2Consumer{address: address, abi: abi, VRFv2ConsumerCaller: VRFv2ConsumerCaller{contract: contract}, VRFv2ConsumerTransactor: VRFv2ConsumerTransactor{contract: contract}, VRFv2ConsumerFilterer: VRFv2ConsumerFilterer{contract: contract}}, nil +} + +func NewVRFv2ConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFv2ConsumerCaller, error) { + contract, err := bindVRFv2Consumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFv2ConsumerCaller{contract: contract}, nil +} + +func NewVRFv2ConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFv2ConsumerTransactor, error) { + contract, err := bindVRFv2Consumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFv2ConsumerTransactor{contract: contract}, nil +} + +func NewVRFv2ConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFv2ConsumerFilterer, error) { + contract, err := bindVRFv2Consumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFv2ConsumerFilterer{contract: contract}, nil +} + +func bindVRFv2Consumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFv2ConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFv2Consumer *VRFv2ConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFv2Consumer.Contract.VRFv2ConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFv2Consumer *VRFv2ConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.VRFv2ConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFv2Consumer *VRFv2ConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.VRFv2ConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFv2Consumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.contract.Transfer(opts) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFv2Consumer *VRFv2ConsumerCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFv2Consumer.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int) + + return *outstruct, err + +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFv2Consumer.Contract.GetRequestStatus(&_VRFv2Consumer.CallOpts, _requestId) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFv2Consumer.Contract.GetRequestStatus(&_VRFv2Consumer.CallOpts, _requestId) +} + +func (_VRFv2Consumer *VRFv2ConsumerCaller) LastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFv2Consumer.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) LastRequestId() (*big.Int, error) { + return _VRFv2Consumer.Contract.LastRequestId(&_VRFv2Consumer.CallOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerSession) LastRequestId() (*big.Int, error) { + return _VRFv2Consumer.Contract.LastRequestId(&_VRFv2Consumer.CallOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFv2Consumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) Owner() (common.Address, error) { + return _VRFv2Consumer.Contract.Owner(&_VRFv2Consumer.CallOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerSession) Owner() (common.Address, error) { + return _VRFv2Consumer.Contract.Owner(&_VRFv2Consumer.CallOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerCaller) RequestIds(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFv2Consumer.contract.Call(opts, &out, "requestIds", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) RequestIds(arg0 *big.Int) (*big.Int, error) { + return _VRFv2Consumer.Contract.RequestIds(&_VRFv2Consumer.CallOpts, arg0) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerSession) RequestIds(arg0 *big.Int) (*big.Int, error) { + return _VRFv2Consumer.Contract.RequestIds(&_VRFv2Consumer.CallOpts, arg0) +} + +func (_VRFv2Consumer *VRFv2ConsumerCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFv2Consumer.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Exists = *abi.ConvertType(out[1], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFv2Consumer.Contract.SRequests(&_VRFv2Consumer.CallOpts, arg0) +} + +func (_VRFv2Consumer *VRFv2ConsumerCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFv2Consumer.Contract.SRequests(&_VRFv2Consumer.CallOpts, arg0) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFv2Consumer.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFv2Consumer.Contract.AcceptOwnership(&_VRFv2Consumer.TransactOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFv2Consumer.Contract.AcceptOwnership(&_VRFv2Consumer.TransactOpts) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFv2Consumer.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.RawFulfillRandomWords(&_VRFv2Consumer.TransactOpts, requestId, randomWords) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.RawFulfillRandomWords(&_VRFv2Consumer.TransactOpts, requestId, randomWords) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactor) RequestRandomWords(opts *bind.TransactOpts, subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFv2Consumer.contract.Transact(opts, "requestRandomWords", subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) RequestRandomWords(subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.RequestRandomWords(&_VRFv2Consumer.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorSession) RequestRandomWords(subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.RequestRandomWords(&_VRFv2Consumer.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFv2Consumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFv2Consumer *VRFv2ConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.TransferOwnership(&_VRFv2Consumer.TransactOpts, to) +} + +func (_VRFv2Consumer *VRFv2ConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFv2Consumer.Contract.TransferOwnership(&_VRFv2Consumer.TransactOpts, to) +} + +type VRFv2ConsumerOwnershipTransferRequestedIterator struct { + Event *VRFv2ConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFv2ConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFv2ConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFv2ConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFv2ConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFv2ConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFv2Consumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFv2ConsumerOwnershipTransferRequestedIterator{contract: _VRFv2Consumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFv2Consumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFv2ConsumerOwnershipTransferRequested) + if err := _VRFv2Consumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFv2ConsumerOwnershipTransferRequested, error) { + event := new(VRFv2ConsumerOwnershipTransferRequested) + if err := _VRFv2Consumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFv2ConsumerOwnershipTransferredIterator struct { + Event *VRFv2ConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFv2ConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFv2ConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFv2ConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFv2ConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFv2ConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFv2Consumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFv2ConsumerOwnershipTransferredIterator{contract: _VRFv2Consumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFv2Consumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFv2ConsumerOwnershipTransferred) + if err := _VRFv2Consumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFv2ConsumerOwnershipTransferred, error) { + event := new(VRFv2ConsumerOwnershipTransferred) + if err := _VRFv2Consumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFv2ConsumerRequestFulfilledIterator struct { + Event *VRFv2ConsumerRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFv2ConsumerRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFv2ConsumerRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFv2ConsumerRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFv2ConsumerRequestFulfilled struct { + RequestId *big.Int + RandomWords []*big.Int + Raw types.Log +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) FilterRequestFulfilled(opts *bind.FilterOpts) (*VRFv2ConsumerRequestFulfilledIterator, error) { + + logs, sub, err := _VRFv2Consumer.contract.FilterLogs(opts, "RequestFulfilled") + if err != nil { + return nil, err + } + return &VRFv2ConsumerRequestFulfilledIterator{contract: _VRFv2Consumer.contract, event: "RequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFv2Consumer.contract.WatchLogs(opts, "RequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFv2ConsumerRequestFulfilled) + if err := _VRFv2Consumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) ParseRequestFulfilled(log types.Log) (*VRFv2ConsumerRequestFulfilled, error) { + event := new(VRFv2ConsumerRequestFulfilled) + if err := _VRFv2Consumer.contract.UnpackLog(event, "RequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFv2ConsumerRequestSentIterator struct { + Event *VRFv2ConsumerRequestSent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFv2ConsumerRequestSentIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFv2ConsumerRequestSent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFv2ConsumerRequestSentIterator) Error() error { + return it.fail +} + +func (it *VRFv2ConsumerRequestSentIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFv2ConsumerRequestSent struct { + RequestId *big.Int + NumWords uint32 + Raw types.Log +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) FilterRequestSent(opts *bind.FilterOpts) (*VRFv2ConsumerRequestSentIterator, error) { + + logs, sub, err := _VRFv2Consumer.contract.FilterLogs(opts, "RequestSent") + if err != nil { + return nil, err + } + return &VRFv2ConsumerRequestSentIterator{contract: _VRFv2Consumer.contract, event: "RequestSent", logs: logs, sub: sub}, nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) WatchRequestSent(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerRequestSent) (event.Subscription, error) { + + logs, sub, err := _VRFv2Consumer.contract.WatchLogs(opts, "RequestSent") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFv2ConsumerRequestSent) + if err := _VRFv2Consumer.contract.UnpackLog(event, "RequestSent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFv2Consumer *VRFv2ConsumerFilterer) ParseRequestSent(log types.Log) (*VRFv2ConsumerRequestSent, error) { + event := new(VRFv2ConsumerRequestSent) + if err := _VRFv2Consumer.contract.UnpackLog(event, "RequestSent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Fulfilled bool + RandomWords []*big.Int +} +type SRequests struct { + Fulfilled bool + Exists bool +} + +func (_VRFv2Consumer *VRFv2Consumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFv2Consumer.abi.Events["OwnershipTransferRequested"].ID: + return _VRFv2Consumer.ParseOwnershipTransferRequested(log) + case _VRFv2Consumer.abi.Events["OwnershipTransferred"].ID: + return _VRFv2Consumer.ParseOwnershipTransferred(log) + case _VRFv2Consumer.abi.Events["RequestFulfilled"].ID: + return _VRFv2Consumer.ParseRequestFulfilled(log) + case _VRFv2Consumer.abi.Events["RequestSent"].ID: + return _VRFv2Consumer.ParseRequestSent(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFv2ConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFv2ConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFv2ConsumerRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0xfe2e2d779dba245964d4e3ef9b994be63856fd568bf7d3ca9e224755cb1bd54d") +} + +func (VRFv2ConsumerRequestSent) Topic() common.Hash { + return common.HexToHash("0xcc58b13ad3eab50626c6a6300b1d139cd6ebb1688a7cced9461c2f7e762665ee") +} + +func (_VRFv2Consumer *VRFv2Consumer) Address() common.Address { + return _VRFv2Consumer.address +} + +type VRFv2ConsumerInterface interface { + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + LastRequestId(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + RequestIds(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, subId uint64, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFv2ConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFv2ConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFv2ConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFv2ConsumerOwnershipTransferred, error) + + FilterRequestFulfilled(opts *bind.FilterOpts) (*VRFv2ConsumerRequestFulfilledIterator, error) + + WatchRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerRequestFulfilled) (event.Subscription, error) + + ParseRequestFulfilled(log types.Log) (*VRFv2ConsumerRequestFulfilled, error) + + FilterRequestSent(opts *bind.FilterOpts) (*VRFv2ConsumerRequestSentIterator, error) + + WatchRequestSent(opts *bind.WatchOpts, sink chan<- *VRFv2ConsumerRequestSent) (event.Subscription, error) + + ParseRequestSent(log types.Log) (*VRFv2ConsumerRequestSent, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics/vrf_v2plus_load_test_with_metrics.go b/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics/vrf_v2plus_load_test_with_metrics.go new file mode 100644 index 00000000..9688632c --- /dev/null +++ b/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics/vrf_v2plus_load_test_with_metrics.go @@ -0,0 +1,853 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_v2plus_load_test_with_metrics + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusLoadTestWithMetricsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"_nativePayment\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6080604052600060055560006006556103e760075534801561002057600080fd5b5060405161136538038061136583398101604081905261003f9161019b565b8033806000816100965760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100c6576100c6816100f1565b5050600280546001600160a01b0319166001600160a01b039390931692909217909155506101cb9050565b6001600160a01b03811633141561014a5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161008d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156101ad57600080fd5b81516001600160a01b03811681146101c457600080fd5b9392505050565b61118b806101da6000396000f3fe608060405234801561001057600080fd5b50600436106101005760003560e01c80638ea9811711610097578063d826f88f11610066578063d826f88f14610252578063d8a4676f14610271578063dc1670db14610296578063f2fde38b1461029f57600080fd5b80638ea98117146101ab5780639eccacf6146101be578063a168fa89146101de578063b1e217491461024957600080fd5b8063737144bc116100d3578063737144bc1461015257806374dba1241461015b57806379ba5097146101645780638da5cb5b1461016c57600080fd5b80631757f11c146101055780631fe543e314610121578063557d2e92146101365780636846de201461013f575b600080fd5b61010e60065481565b6040519081526020015b60405180910390f35b61013461012f366004610d84565b6102b2565b005b61010e60045481565b61013461014d366004610e73565b610338565b61010e60055481565b61010e60075481565b610134610565565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610118565b6101346101b9366004610d15565b610662565b6002546101869073ffffffffffffffffffffffffffffffffffffffff1681565b61021f6101ec366004610d52565b600a602052600090815260409020805460028201546003830154600484015460059094015460ff90931693919290919085565b6040805195151586526020860194909452928401919091526060830152608082015260a001610118565b61010e60085481565b6101346000600581905560068190556103e76007556004819055600355565b61028461027f366004610d52565b61076d565b60405161011896959493929190610ef2565b61010e60035481565b6101346102ad366004610d15565b610852565b60025473ffffffffffffffffffffffffffffffffffffffff16331461032a576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b6103348282610866565b5050565b610340610991565b60005b8161ffff168161ffff16101561055b5760006040518060c001604052808881526020018a81526020018961ffff1681526020018763ffffffff1681526020018563ffffffff1681526020016103a76040518060200160405280891515815250610a14565b90526002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925060009173ffffffffffffffffffffffffffffffffffffffff90911690639b1c385e90610405908590600401610f5e565b602060405180830381600087803b15801561041f57600080fd5b505af1158015610433573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104579190610d6b565b600881905590506000610468610ad0565b6040805160c08101825260008082528251818152602080820185528084019182524284860152606084018390526080840186905260a08401839052878352600a815293909120825181547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690151517815590518051949550919390926104f6926001850192910190610c8a565b506040820151600282015560608201516003820155608082015160048083019190915560a0909201516005909101558054906000610533836110e7565b9091555050600091825260096020526040909120555080610553816110c5565b915050610343565b5050505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146105e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610321565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff1633148015906106a2575060025473ffffffffffffffffffffffffffffffffffffffff163314155b1561072657336106c760005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff93841660048201529183166024830152919091166044820152606401610321565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6000818152600a60209081526040808320815160c081018352815460ff16151581526001820180548451818702810187019095528085526060958795869586958695869591949293858401939092908301828280156107eb57602002820191906000526020600020905b8154815260200190600101908083116107d7575b505050505081526020016002820154815260200160038201548152602001600482015481526020016005820154815250509050806000015181602001518260400151836060015184608001518560a001519650965096509650965096505091939550919395565b61085a610991565b61086381610b6d565b50565b6000610870610ad0565b6000848152600960205260408120549192509061088d90836110ae565b9050600061089e82620f4240611071565b90506006548211156108b05760068290555b60075482106108c1576007546108c3565b815b6007556003546108d35780610906565b6003546108e190600161101e565b816003546005546108f29190611071565b6108fc919061101e565b6109069190611036565b6005556000858152600a6020908152604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600190811782558651610958939290910191870190610c8a565b506000858152600a60205260408120426003808301919091556005909101859055805491610985836110e7565b91905055505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610a12576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610321565b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa82604051602401610a4d91511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b600046610adc81610c63565b15610b6657606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b158015610b2857600080fd5b505afa158015610b3c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610b609190610d6b565b91505090565b4391505090565b73ffffffffffffffffffffffffffffffffffffffff8116331415610bed576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610321565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600061a4b1821480610c77575062066eed82145b80610c84575062066eee82145b92915050565b828054828255906000526020600020908101928215610cc5579160200282015b82811115610cc5578251825591602001919060010190610caa565b50610cd1929150610cd5565b5090565b5b80821115610cd15760008155600101610cd6565b803561ffff81168114610cfc57600080fd5b919050565b803563ffffffff81168114610cfc57600080fd5b600060208284031215610d2757600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610d4b57600080fd5b9392505050565b600060208284031215610d6457600080fd5b5035919050565b600060208284031215610d7d57600080fd5b5051919050565b60008060408385031215610d9757600080fd5b8235915060208084013567ffffffffffffffff80821115610db757600080fd5b818601915086601f830112610dcb57600080fd5b813581811115610ddd57610ddd61114f565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610e2057610e2061114f565b604052828152858101935084860182860187018b1015610e3f57600080fd5b600095505b83861015610e62578035855260019590950194938601938601610e44565b508096505050505050509250929050565b600080600080600080600060e0888a031215610e8e57600080fd5b87359650610e9e60208901610cea565b955060408801359450610eb360608901610d01565b935060808801358015158114610ec857600080fd5b9250610ed660a08901610d01565b9150610ee460c08901610cea565b905092959891949750929550565b600060c082018815158352602060c08185015281895180845260e086019150828b01935060005b81811015610f3557845183529383019391830191600101610f19565b505060408501989098525050506060810193909352608083019190915260a09091015292915050565b6000602080835283518184015280840151604084015261ffff6040850151166060840152606084015163ffffffff80821660808601528060808701511660a0860152505060a084015160c08085015280518060e086015260005b81811015610fd55782810184015186820161010001528301610fb8565b81811115610fe857600061010083880101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169390930161010001949350505050565b6000821982111561103157611031611120565b500190565b60008261106c577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156110a9576110a9611120565b500290565b6000828210156110c0576110c0611120565b500390565b600061ffff808316818114156110dd576110dd611120565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561111957611119611120565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusLoadTestWithMetricsABI = VRFV2PlusLoadTestWithMetricsMetaData.ABI + +var VRFV2PlusLoadTestWithMetricsBin = VRFV2PlusLoadTestWithMetricsMetaData.Bin + +func DeployVRFV2PlusLoadTestWithMetrics(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address) (common.Address, *types.Transaction, *VRFV2PlusLoadTestWithMetrics, error) { + parsed, err := VRFV2PlusLoadTestWithMetricsMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusLoadTestWithMetricsBin), backend, _vrfCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusLoadTestWithMetrics{address: address, abi: *parsed, VRFV2PlusLoadTestWithMetricsCaller: VRFV2PlusLoadTestWithMetricsCaller{contract: contract}, VRFV2PlusLoadTestWithMetricsTransactor: VRFV2PlusLoadTestWithMetricsTransactor{contract: contract}, VRFV2PlusLoadTestWithMetricsFilterer: VRFV2PlusLoadTestWithMetricsFilterer{contract: contract}}, nil +} + +type VRFV2PlusLoadTestWithMetrics struct { + address common.Address + abi abi.ABI + VRFV2PlusLoadTestWithMetricsCaller + VRFV2PlusLoadTestWithMetricsTransactor + VRFV2PlusLoadTestWithMetricsFilterer +} + +type VRFV2PlusLoadTestWithMetricsCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusLoadTestWithMetricsTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusLoadTestWithMetricsFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusLoadTestWithMetricsSession struct { + Contract *VRFV2PlusLoadTestWithMetrics + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusLoadTestWithMetricsCallerSession struct { + Contract *VRFV2PlusLoadTestWithMetricsCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusLoadTestWithMetricsTransactorSession struct { + Contract *VRFV2PlusLoadTestWithMetricsTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusLoadTestWithMetricsRaw struct { + Contract *VRFV2PlusLoadTestWithMetrics +} + +type VRFV2PlusLoadTestWithMetricsCallerRaw struct { + Contract *VRFV2PlusLoadTestWithMetricsCaller +} + +type VRFV2PlusLoadTestWithMetricsTransactorRaw struct { + Contract *VRFV2PlusLoadTestWithMetricsTransactor +} + +func NewVRFV2PlusLoadTestWithMetrics(address common.Address, backend bind.ContractBackend) (*VRFV2PlusLoadTestWithMetrics, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusLoadTestWithMetricsABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusLoadTestWithMetrics(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetrics{address: address, abi: abi, VRFV2PlusLoadTestWithMetricsCaller: VRFV2PlusLoadTestWithMetricsCaller{contract: contract}, VRFV2PlusLoadTestWithMetricsTransactor: VRFV2PlusLoadTestWithMetricsTransactor{contract: contract}, VRFV2PlusLoadTestWithMetricsFilterer: VRFV2PlusLoadTestWithMetricsFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusLoadTestWithMetricsCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusLoadTestWithMetricsCaller, error) { + contract, err := bindVRFV2PlusLoadTestWithMetrics(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetricsCaller{contract: contract}, nil +} + +func NewVRFV2PlusLoadTestWithMetricsTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusLoadTestWithMetricsTransactor, error) { + contract, err := bindVRFV2PlusLoadTestWithMetrics(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetricsTransactor{contract: contract}, nil +} + +func NewVRFV2PlusLoadTestWithMetricsFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusLoadTestWithMetricsFilterer, error) { + contract, err := bindVRFV2PlusLoadTestWithMetrics(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetricsFilterer{contract: contract}, nil +} + +func bindVRFV2PlusLoadTestWithMetrics(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusLoadTestWithMetricsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusLoadTestWithMetrics.Contract.VRFV2PlusLoadTestWithMetricsCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.VRFV2PlusLoadTestWithMetricsTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.VRFV2PlusLoadTestWithMetricsTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusLoadTestWithMetrics.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[1], new([]*big.Int)).(*[]*big.Int) + outstruct.RequestTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.GetRequestStatus(&_VRFV2PlusLoadTestWithMetrics.CallOpts, _requestId) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.GetRequestStatus(&_VRFV2PlusLoadTestWithMetrics.CallOpts, _requestId) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) Owner() (common.Address, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.Owner(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.Owner(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SAverageFulfillmentInMillions(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SAverageFulfillmentInMillions(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SFastestFulfillment(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SFastestFulfillment(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SLastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SLastRequestId() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SLastRequestId(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SLastRequestId(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_requestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SRequestCount() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SRequestCount(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SRequestCount() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SRequestCount(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.RequestTimestamp = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SRequests(&_VRFV2PlusLoadTestWithMetrics.CallOpts, arg0) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SRequests(&_VRFV2PlusLoadTestWithMetrics.CallOpts, arg0) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SResponseCount() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SResponseCount(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SResponseCount() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SResponseCount(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SSlowestFulfillment(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SSlowestFulfillment(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusLoadTestWithMetrics.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SVrfCoordinator(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SVrfCoordinator(&_VRFV2PlusLoadTestWithMetrics.CallOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.AcceptOwnership(&_VRFV2PlusLoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.AcceptOwnership(&_VRFV2PlusLoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.RawFulfillRandomWords(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.RawFulfillRandomWords(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) RequestRandomWords(opts *bind.TransactOpts, _subId *big.Int, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _nativePayment bool, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "requestRandomWords", _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _nativePayment, _numWords, _requestCount) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) RequestRandomWords(_subId *big.Int, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _nativePayment bool, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.RequestRandomWords(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _nativePayment, _numWords, _requestCount) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) RequestRandomWords(_subId *big.Int, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _nativePayment bool, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.RequestRandomWords(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, _subId, _requestConfirmations, _keyHash, _callbackGasLimit, _nativePayment, _numWords, _requestCount) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "reset") +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) Reset() (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.Reset(&_VRFV2PlusLoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) Reset() (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.Reset(&_VRFV2PlusLoadTestWithMetrics.TransactOpts) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SetCoordinator(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.SetCoordinator(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.TransferOwnership(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, to) +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusLoadTestWithMetrics.Contract.TransferOwnership(&_VRFV2PlusLoadTestWithMetrics.TransactOpts, to) +} + +type VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusLoadTestWithMetrics.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator{contract: _VRFV2PlusLoadTestWithMetrics.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusLoadTestWithMetrics.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested) + if err := _VRFV2PlusLoadTestWithMetrics.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested, error) { + event := new(VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested) + if err := _VRFV2PlusLoadTestWithMetrics.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator struct { + Event *VRFV2PlusLoadTestWithMetricsOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusLoadTestWithMetricsOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusLoadTestWithMetricsOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusLoadTestWithMetricsOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusLoadTestWithMetrics.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator{contract: _VRFV2PlusLoadTestWithMetrics.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusLoadTestWithMetricsOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusLoadTestWithMetrics.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusLoadTestWithMetricsOwnershipTransferred) + if err := _VRFV2PlusLoadTestWithMetrics.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetricsFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferred, error) { + event := new(VRFV2PlusLoadTestWithMetricsOwnershipTransferred) + if err := _VRFV2PlusLoadTestWithMetrics.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Fulfilled bool + RandomWords []*big.Int + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} +type SRequests struct { + Fulfilled bool + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetrics) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusLoadTestWithMetrics.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusLoadTestWithMetrics.ParseOwnershipTransferRequested(log) + case _VRFV2PlusLoadTestWithMetrics.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusLoadTestWithMetrics.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusLoadTestWithMetricsOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusLoadTestWithMetrics *VRFV2PlusLoadTestWithMetrics) Address() common.Address { + return _VRFV2PlusLoadTestWithMetrics.address +} + +type VRFV2PlusLoadTestWithMetricsInterface interface { + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SLastRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequestCount(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, _subId *big.Int, _requestConfirmations uint16, _keyHash [32]byte, _callbackGasLimit uint32, _nativePayment bool, _numWords uint32, _requestCount uint16) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusLoadTestWithMetricsOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusLoadTestWithMetricsOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_v2plus_single_consumer/vrf_v2plus_single_consumer.go b/core/gethwrappers/generated/vrf_v2plus_single_consumer/vrf_v2plus_single_consumer.go new file mode 100644 index 00000000..f9922547 --- /dev/null +++ b/core/gethwrappers/generated/vrf_v2plus_single_consumer/vrf_v2plus_single_consumer.go @@ -0,0 +1,769 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_v2plus_single_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusSingleConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"nativePayment\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"fundAndRequestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestConfig\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"nativePayment\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"subscribe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"unsubscribe\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b506040516200185238038062001852833981016040819052620000349162000458565b8633806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620001a8565b5050600280546001600160a01b03199081166001600160a01b0394851617909155600380548216938a169390931790925550600a80543392169190911790556040805160c081018252600080825263ffffffff8881166020840181905261ffff8916948401859052908716606084018190526080840187905285151560a09094018490526004929092556005805465ffffffffffff19169091176401000000009094029390931763ffffffff60301b191666010000000000009091021790915560068390556007805460ff191690911790556200019b62000254565b5050505050505062000524565b6001600160a01b038116331415620002035760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6200025e620003c8565b6040805160018082528183019092526000916020808301908036833701905050905030816000815181106200029757620002976200050e565b6001600160a01b039283166020918202929092018101919091526002546040805163288688f960e21b81529051919093169263a21a23e49260048083019391928290030181600087803b158015620002ee57600080fd5b505af115801562000303573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620003299190620004f4565b600481905560025482516001600160a01b039091169163bec4c08c9184906000906200035957620003596200050e565b60200260200101516040518363ffffffff1660e01b8152600401620003919291909182526001600160a01b0316602082015260400190565b600060405180830381600087803b158015620003ac57600080fd5b505af1158015620003c1573d6000803e3d6000fd5b5050505050565b6000546001600160a01b03163314620004245760405162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640162000083565b565b80516001600160a01b03811681146200043e57600080fd5b919050565b805163ffffffff811681146200043e57600080fd5b600080600080600080600060e0888a0312156200047457600080fd5b6200047f8862000426565b96506200048f6020890162000426565b95506200049f6040890162000443565b9450606088015161ffff81168114620004b757600080fd5b9350620004c76080890162000443565b925060a0880151915060c08801518015158114620004e457600080fd5b8091505092959891949750929550565b6000602082840312156200050757600080fd5b5051919050565b634e487b7160e01b600052603260045260246000fd5b61131e80620005346000396000f3fe608060405234801561001057600080fd5b50600436106100f45760003560e01c80638da5cb5b11610097578063e0c8628911610066578063e0c862891461025c578063e89e106a14610264578063f2fde38b1461027b578063f6eaffc81461028e57600080fd5b80638da5cb5b146101e25780638ea98117146102215780638f449a05146102345780639eccacf61461023c57600080fd5b80637262561c116100d35780637262561c1461013457806379ba5097146101475780637db9263f1461014f57806386850e93146101cf57600080fd5b8062f714ce146100f95780631fe543e31461010e5780636fd700bb14610121575b600080fd5b61010c61010736600461108a565b6102a1565b005b61010c61011c3660046110b6565b61035a565b61010c61012f366004611058565b6103e0565b61010c610142366004611014565b610616565b61010c6106b3565b60045460055460065460075461018b939263ffffffff8082169361ffff6401000000008404169366010000000000009093049091169160ff1686565b6040805196875263ffffffff958616602088015261ffff90941693860193909352921660608401526080830191909152151560a082015260c0015b60405180910390f35b61010c6101dd366004611058565b6107b0565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101c6565b61010c61022f366004611014565b610886565b61010c610991565b6002546101fc9073ffffffffffffffffffffffffffffffffffffffff1681565b61010c610b36565b61026d60095481565b6040519081526020016101c6565b61010c610289366004611014565b610ca3565b61026d61029c366004611058565b610cb7565b6102a9610cd8565b6003546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152602482018590529091169063a9059cbb90604401602060405180830381600087803b15801561031d57600080fd5b505af1158015610331573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103559190611036565b505050565b60025473ffffffffffffffffffffffffffffffffffffffff1633146103d2576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b6103dc8282610d5b565b5050565b6103e8610cd8565b6040805160c08101825260045480825260055463ffffffff808216602080860191909152640100000000830461ffff16858701526601000000000000909204166060840152600654608084015260075460ff16151560a0840152600354600254855192830193909352929373ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918691016040516020818303038152906040526040518463ffffffff1660e01b81526004016104a493929190611210565b602060405180830381600087803b1580156104be57600080fd5b505af11580156104d2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104f69190611036565b5060006040518060c001604052808360800151815260200183600001518152602001836040015161ffff168152602001836020015163ffffffff168152602001836060015163ffffffff16815260200161056360405180602001604052808660a001511515815250610dd9565b90526002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690639b1c385e906105bc90849060040161124e565b602060405180830381600087803b1580156105d657600080fd5b505af11580156105ea573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061060e9190611071565b600955505050565b61061e610cd8565b600254600480546040517f0ae095400000000000000000000000000000000000000000000000000000000081529182015273ffffffffffffffffffffffffffffffffffffffff838116602483015290911690630ae0954090604401600060405180830381600087803b15801561069357600080fd5b505af11580156106a7573d6000803e3d6000fd5b50506000600455505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610734576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016103c9565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6107b8610cd8565b6003546002546004546040805160208082019390935281518082039093018352808201918290527f4000aea00000000000000000000000000000000000000000000000000000000090915273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09361083493911691869190604401611210565b602060405180830381600087803b15801561084e57600080fd5b505af1158015610862573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103dc9190611036565b60005473ffffffffffffffffffffffffffffffffffffffff1633148015906108c6575060025473ffffffffffffffffffffffffffffffffffffffff163314155b1561094a57336108eb60005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff938416600482015291831660248301529190911660448201526064016103c9565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b610999610cd8565b6040805160018082528183019092526000916020808301908036833701905050905030816000815181106109cf576109cf6112b3565b73ffffffffffffffffffffffffffffffffffffffff928316602091820292909201810191909152600254604080517fa21a23e40000000000000000000000000000000000000000000000000000000081529051919093169263a21a23e49260048083019391928290030181600087803b158015610a4b57600080fd5b505af1158015610a5f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a839190611071565b6004819055600254825173ffffffffffffffffffffffffffffffffffffffff9091169163bec4c08c918490600090610abd57610abd6112b3565b60200260200101516040518363ffffffff1660e01b8152600401610b0192919091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b158015610b1b57600080fd5b505af1158015610b2f573d6000803e3d6000fd5b5050505050565b610b3e610cd8565b6040805160c08082018352600454825260055463ffffffff808216602080860191825261ffff640100000000850481168789019081526601000000000000909504841660608089019182526006546080808b0191825260075460ff16151560a0808d019182528d519b8c018e5292518b528b518b8801529851909416898c0152945186169088015251909316928501929092528551918201909552905115158152919260009290820190610bf190610dd9565b90526002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690639b1c385e90610c4a90849060040161124e565b602060405180830381600087803b158015610c6457600080fd5b505af1158015610c78573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c9c9190611071565b6009555050565b610cab610cd8565b610cb481610e95565b50565b60088181548110610cc757600080fd5b600091825260209091200154905081565b60005473ffffffffffffffffffffffffffffffffffffffff163314610d59576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103c9565b565b6009548214610dc6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f727265637400000000000000000060448201526064016103c9565b8051610355906008906020840190610f8b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa82604051602401610e1291511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b73ffffffffffffffffffffffffffffffffffffffff8116331415610f15576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103c9565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610fc6579160200282015b82811115610fc6578251825591602001919060010190610fab565b50610fd2929150610fd6565b5090565b5b80821115610fd25760008155600101610fd7565b803573ffffffffffffffffffffffffffffffffffffffff8116811461100f57600080fd5b919050565b60006020828403121561102657600080fd5b61102f82610feb565b9392505050565b60006020828403121561104857600080fd5b8151801515811461102f57600080fd5b60006020828403121561106a57600080fd5b5035919050565b60006020828403121561108357600080fd5b5051919050565b6000806040838503121561109d57600080fd5b823591506110ad60208401610feb565b90509250929050565b600080604083850312156110c957600080fd5b8235915060208084013567ffffffffffffffff808211156110e957600080fd5b818601915086601f8301126110fd57600080fd5b81358181111561110f5761110f6112e2565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715611152576111526112e2565b604052828152858101935084860182860187018b101561117157600080fd5b600095505b83861015611194578035855260019590950194938601938601611176565b508096505050505050509250929050565b6000815180845260005b818110156111cb576020818501810151868301820152016111af565b818111156111dd576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff8416815282602082015260606040820152600061124560608301846111a5565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c0808401526112ab60e08401826111a5565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusSingleConsumerExampleABI = VRFV2PlusSingleConsumerExampleMetaData.ABI + +var VRFV2PlusSingleConsumerExampleBin = VRFV2PlusSingleConsumerExampleMetaData.Bin + +func DeployVRFV2PlusSingleConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (common.Address, *types.Transaction, *VRFV2PlusSingleConsumerExample, error) { + parsed, err := VRFV2PlusSingleConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusSingleConsumerExampleBin), backend, vrfCoordinator, link, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusSingleConsumerExample{address: address, abi: *parsed, VRFV2PlusSingleConsumerExampleCaller: VRFV2PlusSingleConsumerExampleCaller{contract: contract}, VRFV2PlusSingleConsumerExampleTransactor: VRFV2PlusSingleConsumerExampleTransactor{contract: contract}, VRFV2PlusSingleConsumerExampleFilterer: VRFV2PlusSingleConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFV2PlusSingleConsumerExample struct { + address common.Address + abi abi.ABI + VRFV2PlusSingleConsumerExampleCaller + VRFV2PlusSingleConsumerExampleTransactor + VRFV2PlusSingleConsumerExampleFilterer +} + +type VRFV2PlusSingleConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusSingleConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusSingleConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusSingleConsumerExampleSession struct { + Contract *VRFV2PlusSingleConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusSingleConsumerExampleCallerSession struct { + Contract *VRFV2PlusSingleConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusSingleConsumerExampleTransactorSession struct { + Contract *VRFV2PlusSingleConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusSingleConsumerExampleRaw struct { + Contract *VRFV2PlusSingleConsumerExample +} + +type VRFV2PlusSingleConsumerExampleCallerRaw struct { + Contract *VRFV2PlusSingleConsumerExampleCaller +} + +type VRFV2PlusSingleConsumerExampleTransactorRaw struct { + Contract *VRFV2PlusSingleConsumerExampleTransactor +} + +func NewVRFV2PlusSingleConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFV2PlusSingleConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusSingleConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusSingleConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExample{address: address, abi: abi, VRFV2PlusSingleConsumerExampleCaller: VRFV2PlusSingleConsumerExampleCaller{contract: contract}, VRFV2PlusSingleConsumerExampleTransactor: VRFV2PlusSingleConsumerExampleTransactor{contract: contract}, VRFV2PlusSingleConsumerExampleFilterer: VRFV2PlusSingleConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusSingleConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusSingleConsumerExampleCaller, error) { + contract, err := bindVRFV2PlusSingleConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFV2PlusSingleConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusSingleConsumerExampleTransactor, error) { + contract, err := bindVRFV2PlusSingleConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFV2PlusSingleConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusSingleConsumerExampleFilterer, error) { + contract, err := bindVRFV2PlusSingleConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFV2PlusSingleConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusSingleConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusSingleConsumerExample.Contract.VRFV2PlusSingleConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.VRFV2PlusSingleConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.VRFV2PlusSingleConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusSingleConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusSingleConsumerExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) Owner() (common.Address, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Owner(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Owner(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusSingleConsumerExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRandomWords(&_VRFV2PlusSingleConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRandomWords(&_VRFV2PlusSingleConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCaller) SRequestConfig(opts *bind.CallOpts) (SRequestConfig, + + error) { + var out []interface{} + err := _VRFV2PlusSingleConsumerExample.contract.Call(opts, &out, "s_requestConfig") + + outstruct := new(SRequestConfig) + if err != nil { + return *outstruct, err + } + + outstruct.SubId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.CallbackGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.RequestConfirmations = *abi.ConvertType(out[2], new(uint16)).(*uint16) + outstruct.NumWords = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.KeyHash = *abi.ConvertType(out[4], new([32]byte)).(*[32]byte) + outstruct.NativePayment = *abi.ConvertType(out[5], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) SRequestConfig() (SRequestConfig, + + error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRequestConfig(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerSession) SRequestConfig() (SRequestConfig, + + error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRequestConfig(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusSingleConsumerExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRequestId(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SRequestId(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusSingleConsumerExample.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SVrfCoordinator(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SVrfCoordinator(&_VRFV2PlusSingleConsumerExample.CallOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) FundAndRequestRandomWords(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "fundAndRequestRandomWords", amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) FundAndRequestRandomWords(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.FundAndRequestRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) FundAndRequestRandomWords(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.FundAndRequestRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) RequestRandomWords(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "requestRandomWords") +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) RequestRandomWords() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.RequestRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) RequestRandomWords() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.RequestRandomWords(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SetCoordinator(&_VRFV2PlusSingleConsumerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.SetCoordinator(&_VRFV2PlusSingleConsumerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) Subscribe(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "subscribe") +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) Subscribe() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Subscribe(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) Subscribe() (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Subscribe(&_VRFV2PlusSingleConsumerExample.TransactOpts) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.TopUpSubscription(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.TopUpSubscription(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.TransferOwnership(&_VRFV2PlusSingleConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.TransferOwnership(&_VRFV2PlusSingleConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) Unsubscribe(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "unsubscribe", to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) Unsubscribe(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Unsubscribe(&_VRFV2PlusSingleConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) Unsubscribe(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Unsubscribe(&_VRFV2PlusSingleConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.contract.Transact(opts, "withdraw", amount, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleSession) Withdraw(amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Withdraw(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount, to) +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleTransactorSession) Withdraw(amount *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSingleConsumerExample.Contract.Withdraw(&_VRFV2PlusSingleConsumerExample.TransactOpts, amount, to) +} + +type VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusSingleConsumerExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSingleConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSingleConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusSingleConsumerExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSingleConsumerExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator{contract: _VRFV2PlusSingleConsumerExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSingleConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSingleConsumerExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusSingleConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusSingleConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusSingleConsumerExampleOwnershipTransferRequested, error) { + event := new(VRFV2PlusSingleConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusSingleConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator struct { + Event *VRFV2PlusSingleConsumerExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSingleConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSingleConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusSingleConsumerExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSingleConsumerExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator{contract: _VRFV2PlusSingleConsumerExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSingleConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSingleConsumerExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusSingleConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusSingleConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusSingleConsumerExampleOwnershipTransferred, error) { + event := new(VRFV2PlusSingleConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusSingleConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type SRequestConfig struct { + SubId *big.Int + CallbackGasLimit uint32 + RequestConfirmations uint16 + NumWords uint32 + KeyHash [32]byte + NativePayment bool +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusSingleConsumerExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusSingleConsumerExample.ParseOwnershipTransferRequested(log) + case _VRFV2PlusSingleConsumerExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusSingleConsumerExample.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusSingleConsumerExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusSingleConsumerExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusSingleConsumerExample *VRFV2PlusSingleConsumerExample) Address() common.Address { + return _VRFV2PlusSingleConsumerExample.address +} + +type VRFV2PlusSingleConsumerExampleInterface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestConfig(opts *bind.CallOpts) (SRequestConfig, + + error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + FundAndRequestRandomWords(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + Subscribe(opts *bind.TransactOpts) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Unsubscribe(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, amount *big.Int, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSingleConsumerExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSingleConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusSingleConsumerExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSingleConsumerExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSingleConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusSingleConsumerExampleOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_v2plus_sub_owner/vrf_v2plus_sub_owner.go b/core/gethwrappers/generated/vrf_v2plus_sub_owner/vrf_v2plus_sub_owner.go new file mode 100644 index 00000000..f76961f5 --- /dev/null +++ b/core/gethwrappers/generated/vrf_v2plus_sub_owner/vrf_v2plus_sub_owner.go @@ -0,0 +1,652 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_v2plus_sub_owner + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusExternalSubOwnerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"nativePayment\",\"type\":\"bool\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50604051610d68380380610d6883398101604081905261002f916101c1565b8133806000816100865760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156100b6576100b6816100fb565b5050600280546001600160a01b039384166001600160a01b031991821617909155600380549490931693811693909317909155506006805490911633179055506101f4565b6001600160a01b0381163314156101545760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161007d565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146101bc57600080fd5b919050565b600080604083850312156101d457600080fd5b6101dd836101a5565b91506101eb602084016101a5565b90509250929050565b610b65806102036000396000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c80638ea9811711610076578063e89e106a1161005b578063e89e106a1461014f578063f2fde38b14610166578063f6eaffc81461017957600080fd5b80638ea981171461011c5780639eccacf61461012f57600080fd5b80631fe543e3146100a85780635b6c5de8146100bd57806379ba5097146100d05780638da5cb5b146100d8575b600080fd5b6100bb6100b6366004610902565b61018c565b005b6100bb6100cb3660046109f1565b610212565b6100bb610325565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100bb61012a366004610893565b610422565b6002546100f29073ffffffffffffffffffffffffffffffffffffffff1681565b61015860055481565b604051908152602001610113565b6100bb610174366004610893565b61052d565b6101586101873660046108d0565b610541565b60025473ffffffffffffffffffffffffffffffffffffffff163314610204576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b61020e8282610562565b5050565b61021a6105e5565b60006040518060c001604052808481526020018881526020018661ffff1681526020018763ffffffff1681526020018563ffffffff16815260200161026e6040518060200160405280861515815250610668565b90526002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690639b1c385e906102c7908490600401610a69565b602060405180830381600087803b1580156102e157600080fd5b505af11580156102f5573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061031991906108e9565b60055550505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146103a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016101fb565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590610462575060025473ffffffffffffffffffffffffffffffffffffffff163314155b156104e6573361048760005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff938416600482015291831660248301529190911660448201526064016101fb565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6105356105e5565b61053e81610724565b50565b6004818154811061055157600080fd5b600091825260209091200154905081565b60055482146105cd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f727265637400000000000000000060448201526064016101fb565b80516105e090600490602084019061081a565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610666576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016101fb565b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa826040516024016106a191511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b73ffffffffffffffffffffffffffffffffffffffff81163314156107a4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016101fb565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610855579160200282015b8281111561085557825182559160200191906001019061083a565b50610861929150610865565b5090565b5b808211156108615760008155600101610866565b803563ffffffff8116811461088e57600080fd5b919050565b6000602082840312156108a557600080fd5b813573ffffffffffffffffffffffffffffffffffffffff811681146108c957600080fd5b9392505050565b6000602082840312156108e257600080fd5b5035919050565b6000602082840312156108fb57600080fd5b5051919050565b6000806040838503121561091557600080fd5b8235915060208084013567ffffffffffffffff8082111561093557600080fd5b818601915086601f83011261094957600080fd5b81358181111561095b5761095b610b29565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561099e5761099e610b29565b604052828152858101935084860182860187018b10156109bd57600080fd5b600095505b838610156109e05780358552600195909501949386019386016109c2565b508096505050505050509250929050565b60008060008060008060c08789031215610a0a57600080fd5b86359550610a1a6020880161087a565b9450604087013561ffff81168114610a3157600080fd5b9350610a3f6060880161087a565b92506080870135915060a08701358015158114610a5b57600080fd5b809150509295509295509295565b6000602080835283518184015280840151604084015261ffff6040850151166060840152606084015163ffffffff80821660808601528060808701511660a0860152505060a084015160c08085015280518060e086015260005b81811015610ae05782810184015186820161010001528301610ac3565b81811115610af357600061010083880101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169390930161010001949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusExternalSubOwnerExampleABI = VRFV2PlusExternalSubOwnerExampleMetaData.ABI + +var VRFV2PlusExternalSubOwnerExampleBin = VRFV2PlusExternalSubOwnerExampleMetaData.Bin + +func DeployVRFV2PlusExternalSubOwnerExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFV2PlusExternalSubOwnerExample, error) { + parsed, err := VRFV2PlusExternalSubOwnerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusExternalSubOwnerExampleBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusExternalSubOwnerExample{address: address, abi: *parsed, VRFV2PlusExternalSubOwnerExampleCaller: VRFV2PlusExternalSubOwnerExampleCaller{contract: contract}, VRFV2PlusExternalSubOwnerExampleTransactor: VRFV2PlusExternalSubOwnerExampleTransactor{contract: contract}, VRFV2PlusExternalSubOwnerExampleFilterer: VRFV2PlusExternalSubOwnerExampleFilterer{contract: contract}}, nil +} + +type VRFV2PlusExternalSubOwnerExample struct { + address common.Address + abi abi.ABI + VRFV2PlusExternalSubOwnerExampleCaller + VRFV2PlusExternalSubOwnerExampleTransactor + VRFV2PlusExternalSubOwnerExampleFilterer +} + +type VRFV2PlusExternalSubOwnerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusExternalSubOwnerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusExternalSubOwnerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusExternalSubOwnerExampleSession struct { + Contract *VRFV2PlusExternalSubOwnerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusExternalSubOwnerExampleCallerSession struct { + Contract *VRFV2PlusExternalSubOwnerExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusExternalSubOwnerExampleTransactorSession struct { + Contract *VRFV2PlusExternalSubOwnerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusExternalSubOwnerExampleRaw struct { + Contract *VRFV2PlusExternalSubOwnerExample +} + +type VRFV2PlusExternalSubOwnerExampleCallerRaw struct { + Contract *VRFV2PlusExternalSubOwnerExampleCaller +} + +type VRFV2PlusExternalSubOwnerExampleTransactorRaw struct { + Contract *VRFV2PlusExternalSubOwnerExampleTransactor +} + +func NewVRFV2PlusExternalSubOwnerExample(address common.Address, backend bind.ContractBackend) (*VRFV2PlusExternalSubOwnerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusExternalSubOwnerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusExternalSubOwnerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExample{address: address, abi: abi, VRFV2PlusExternalSubOwnerExampleCaller: VRFV2PlusExternalSubOwnerExampleCaller{contract: contract}, VRFV2PlusExternalSubOwnerExampleTransactor: VRFV2PlusExternalSubOwnerExampleTransactor{contract: contract}, VRFV2PlusExternalSubOwnerExampleFilterer: VRFV2PlusExternalSubOwnerExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusExternalSubOwnerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusExternalSubOwnerExampleCaller, error) { + contract, err := bindVRFV2PlusExternalSubOwnerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExampleCaller{contract: contract}, nil +} + +func NewVRFV2PlusExternalSubOwnerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusExternalSubOwnerExampleTransactor, error) { + contract, err := bindVRFV2PlusExternalSubOwnerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExampleTransactor{contract: contract}, nil +} + +func NewVRFV2PlusExternalSubOwnerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusExternalSubOwnerExampleFilterer, error) { + contract, err := bindVRFV2PlusExternalSubOwnerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExampleFilterer{contract: contract}, nil +} + +func bindVRFV2PlusExternalSubOwnerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusExternalSubOwnerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusExternalSubOwnerExample.Contract.VRFV2PlusExternalSubOwnerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.VRFV2PlusExternalSubOwnerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.VRFV2PlusExternalSubOwnerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusExternalSubOwnerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusExternalSubOwnerExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) Owner() (common.Address, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.Owner(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.Owner(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusExternalSubOwnerExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SRandomWords(&_VRFV2PlusExternalSubOwnerExample.CallOpts, arg0) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SRandomWords(&_VRFV2PlusExternalSubOwnerExample.CallOpts, arg0) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusExternalSubOwnerExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SRequestId(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SRequestId(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusExternalSubOwnerExample.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SVrfCoordinator(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SVrfCoordinator(&_VRFV2PlusExternalSubOwnerExample.CallOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.AcceptOwnership(&_VRFV2PlusExternalSubOwnerExample.TransactOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.AcceptOwnership(&_VRFV2PlusExternalSubOwnerExample.TransactOpts) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactor) RequestRandomWords(opts *bind.TransactOpts, subId *big.Int, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.contract.Transact(opts, "requestRandomWords", subId, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) RequestRandomWords(subId *big.Int, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.RequestRandomWords(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorSession) RequestRandomWords(subId *big.Int, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.RequestRandomWords(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, subId, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SetCoordinator(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.SetCoordinator(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.TransferOwnership(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, to) +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusExternalSubOwnerExample.Contract.TransferOwnership(&_VRFV2PlusExternalSubOwnerExample.TransactOpts, to) +} + +type VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusExternalSubOwnerExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator{contract: _VRFV2PlusExternalSubOwnerExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusExternalSubOwnerExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested) + if err := _VRFV2PlusExternalSubOwnerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested, error) { + event := new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested) + if err := _VRFV2PlusExternalSubOwnerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator struct { + Event *VRFV2PlusExternalSubOwnerExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusExternalSubOwnerExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusExternalSubOwnerExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator{contract: _VRFV2PlusExternalSubOwnerExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusExternalSubOwnerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusExternalSubOwnerExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferred) + if err := _VRFV2PlusExternalSubOwnerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferred, error) { + event := new(VRFV2PlusExternalSubOwnerExampleOwnershipTransferred) + if err := _VRFV2PlusExternalSubOwnerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusExternalSubOwnerExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusExternalSubOwnerExample.ParseOwnershipTransferRequested(log) + case _VRFV2PlusExternalSubOwnerExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusExternalSubOwnerExample.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusExternalSubOwnerExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusExternalSubOwnerExample *VRFV2PlusExternalSubOwnerExample) Address() common.Address { + return _VRFV2PlusExternalSubOwnerExample.address +} + +type VRFV2PlusExternalSubOwnerExampleInterface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, subId *big.Int, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusExternalSubOwnerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusExternalSubOwnerExampleOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrf_v2plus_upgraded_version/vrf_v2plus_upgraded_version.go b/core/gethwrappers/generated/vrf_v2plus_upgraded_version/vrf_v2plus_upgraded_version.go new file mode 100644 index 00000000..346707fa --- /dev/null +++ b/core/gethwrappers/generated/vrf_v2plus_upgraded_version/vrf_v2plus_upgraded_version.go @@ -0,0 +1,3574 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_v2plus_upgraded_version + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type VRFCoordinatorV2PlusUpgradedVersionRequestCommitment struct { + BlockNum uint64 + SubId *big.Int + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + ExtraArgs []byte +} + +type VRFProof struct { + Pk [2]*big.Int + Gamma [2]*big.Int + C *big.Int + S *big.Int + Seed *big.Int + UWitness common.Address + CGammaWitness [2]*big.Int + SHashWitness [2]*big.Int + ZInv *big.Int +} + +type VRFV2PlusClientRandomWordsRequest struct { + KeyHash [32]byte + SubId *big.Int + RequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte +} + +var VRFCoordinatorV2PlusUpgradedVersionMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"blockhashStore\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"internalBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"externalBalance\",\"type\":\"uint256\"}],\"name\":\"BalanceInvariantViolated\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNum\",\"type\":\"uint256\"}],\"name\":\"BlockhashNotInStore\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorNotRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FailedToSendNative\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FailedToTransferLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"GasLimitTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectCommitment\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidExtraArgsTag\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"linkWei\",\"type\":\"int256\"}],\"name\":\"InvalidLinkWeiPrice\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"transferredValue\",\"type\":\"uint256\"},{\"internalType\":\"uint96\",\"name\":\"expectedValue\",\"type\":\"uint96\"}],\"name\":\"InvalidNativeBalance\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"have\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"min\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"max\",\"type\":\"uint16\"}],\"name\":\"InvalidRequestConfirmations\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"requestVersion\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"expectedVersion\",\"type\":\"uint8\"}],\"name\":\"InvalidVersion\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LinkAlreadySet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LinkNotSet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoCorrespondingRequest\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"NoSuchProvingKey\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"have\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"want\",\"type\":\"uint32\"}],\"name\":\"NumWordsTooBig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PaymentTooLarge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"ProvingKeyAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SubscriptionIDCollisionFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"FundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"MigrationCompleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"NativeFundsRecovered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"}],\"name\":\"ProvingKeyRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"outputSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint96\",\"name\":\"payment\",\"type\":\"uint96\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"preSeed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"RandomWordsRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amountLink\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amountNative\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldNativeBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newNativeBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFundedWithNative\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"BLOCKHASH_STORE\",\"outputs\":[{\"internalType\":\"contractBlockhashStoreInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_NATIVE_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_NUM_WORDS\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_REQUEST_CONFIRMATIONS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"internalType\":\"structVRF.Proof\",\"name\":\"proof\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockNum\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFCoordinatorV2PlusUpgradedVersion.RequestCommitment\",\"name\":\"rc\",\"type\":\"tuple\"},{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"name\":\"fulfillRandomWords\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"fundSubscriptionWithNative\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCount\",\"type\":\"uint256\"}],\"name\":\"getActiveSubscriptionIds\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"ids\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRequestConfig\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint96\",\"name\":\"nativeBalance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"reqCount\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"migrationVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"encodedData\",\"type\":\"bytes\"}],\"name\":\"onMigration\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"ownerCancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"pendingRequestExists\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"recoverNativeFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"registerMigratableCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"publicProvingKey\",\"type\":\"uint256[2]\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"internalType\":\"structVRFV2PlusClient.RandomWordsRequest\",\"name\":\"req\",\"type\":\"tuple\"}],\"name\":\"requestRandomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_config\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bool\",\"name\":\"reentrancyLock\",\"type\":\"bool\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_currentSubNonce\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_provingKeyHashes\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requestCommitments\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalNativeBalance\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"minimumRequestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"maxGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"gasAfterPaymentCalculation\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkDiscountPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"nativePremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"linkPremiumPercentage\",\"type\":\"uint8\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"}],\"name\":\"setPLIAndPLINativeFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"recipient\",\"type\":\"address\"}],\"name\":\"withdrawNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162005dec38038062005dec833981016040819052620000349162000183565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000d7565b50505060601b6001600160601b031916608052620001b5565b6001600160a01b038116331415620001325760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000602082840312156200019657600080fd5b81516001600160a01b0381168114620001ae57600080fd5b9392505050565b60805160601c615c11620001db600039600081816104f401526133f80152615c116000f3fe6080604052600436106101e05760003560e01c8062012291146101e5578063088070f5146102125780630ae09540146102e057806315c48b841461030257806318e3dd271461032a5780631b6b6d2314610369578063294daa49146103965780632f622e6b146103b2578063301f42e9146103d2578063405b84fa146103f257806340d6bb821461041257806341af6c871461043d57806351cff8d91461046d5780635d06b4ab1461048d57806364d51a2a146104ad57806365982744146104c2578063689c4517146104e257806372e9d5651461051657806379ba5097146105365780637bce14d11461054b5780638402595e1461056b57806386fe91c71461058b5780638da5cb5b146105ab57806395b55cfc146105c95780639b1c385e146105dc5780639d40a6fd1461060a578063a21a23e414610637578063a4c0ed361461064c578063a63e0bfb1461066c578063aa433aff1461068c578063aefb212f146106ac578063b2a7cac5146106d9578063bec4c08c146106f9578063caf70c4a14610719578063cb63179714610739578063ce3f471914610759578063d98e620e1461076c578063dac83d291461078c578063dc311dd3146107ac578063e72f6e30146107dd578063ee9d2d38146107fd578063f2fde38b1461082a575b600080fd5b3480156101f157600080fd5b506101fa61084a565b604051610209939291906156e5565b60405180910390f35b34801561021e57600080fd5b50600c546102839061ffff81169063ffffffff62010000820481169160ff600160301b8204811692600160381b8304811692600160581b8104821692600160781b8204831692600160981b83041691600160b81b8104821691600160c01b9091041689565b6040805161ffff909a168a5263ffffffff98891660208b01529615159689019690965293861660608801529185166080870152841660a08601529290921660c084015260ff91821660e08401521661010082015261012001610209565b3480156102ec57600080fd5b506103006102fb366004615358565b6108c6565b005b34801561030e57600080fd5b5061031760c881565b60405161ffff9091168152602001610209565b34801561033657600080fd5b50600a5461035190600160601b90046001600160601b031681565b6040516001600160601b039091168152602001610209565b34801561037557600080fd5b50600254610389906001600160a01b031681565b6040516102099190615589565b3480156103a257600080fd5b5060405160028152602001610209565b3480156103be57600080fd5b506103006103cd366004614e72565b61090e565b3480156103de57600080fd5b506103516103ed366004615029565b610a5a565b3480156103fe57600080fd5b5061030061040d366004615358565b610ef0565b34801561041e57600080fd5b506104286101f481565b60405163ffffffff9091168152602001610209565b34801561044957600080fd5b5061045d61045836600461533f565b6112c1565b6040519015158152602001610209565b34801561047957600080fd5b50610300610488366004614e72565b611462565b34801561049957600080fd5b506103006104a8366004614e72565b6115f0565b3480156104b957600080fd5b50610317606481565b3480156104ce57600080fd5b506103006104dd366004614e8f565b6116a7565b3480156104ee57600080fd5b506103897f000000000000000000000000000000000000000000000000000000000000000081565b34801561052257600080fd5b50600354610389906001600160a01b031681565b34801561054257600080fd5b50610300611707565b34801561055757600080fd5b50610300610566366004614f23565b6117b1565b34801561057757600080fd5b50610300610586366004614e72565b6118aa565b34801561059757600080fd5b50600a54610351906001600160601b031681565b3480156105b757600080fd5b506000546001600160a01b0316610389565b6103006105d736600461533f565b6119b6565b3480156105e857600080fd5b506105fc6105f7366004615117565b611ad7565b604051908152602001610209565b34801561061657600080fd5b5060075461062a906001600160401b031681565b604051610209919061587e565b34801561064357600080fd5b506105fc611e23565b34801561065857600080fd5b50610300610667366004614ec8565b611ff6565b34801561067857600080fd5b5061030061068736600461529e565b612170565b34801561069857600080fd5b506103006106a736600461533f565b612379565b3480156106b857600080fd5b506106cc6106c736600461537d565b6123c1565b6040516102099190615600565b3480156106e557600080fd5b506103006106f436600461533f565b6124c3565b34801561070557600080fd5b50610300610714366004615358565b6125b8565b34801561072557600080fd5b506105fc610734366004614f4b565b6126c4565b34801561074557600080fd5b50610300610754366004615358565b6126f4565b610300610767366004614f9d565b612965565b34801561077857600080fd5b506105fc61078736600461533f565b612c7c565b34801561079857600080fd5b506103006107a7366004615358565b612c9d565b3480156107b857600080fd5b506107cc6107c736600461533f565b612d33565b604051610209959493929190615892565b3480156107e957600080fd5b506103006107f8366004614e72565b612e21565b34801561080957600080fd5b506105fc61081836600461533f565b600f6020526000908152604090205481565b34801561083657600080fd5b50610300610845366004614e72565b612ffc565b600c54600e805460408051602080840282018101909252828152600094859460609461ffff8316946201000090930463ffffffff169391928391908301828280156108b457602002820191906000526020600020905b8154815260200190600101908083116108a0575b50505050509050925092509250909192565b816108d081613010565b6108d8613071565b6108e1836112c1565b156108ff57604051631685ecdd60e31b815260040160405180910390fd5b610909838361309e565b505050565b610916613071565b61091e613252565b600b54600160601b90046001600160601b031661094e57604051631e9acf1760e31b815260040160405180910390fd5b600b8054600160601b90046001600160601b0316908190600c6109718380615a78565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600a600c8282829054906101000a90046001600160601b03166109b99190615a78565b92506101000a8154816001600160601b0302191690836001600160601b031602179055506000826001600160a01b0316826001600160601b031660405160006040518083038185875af1925050503d8060008114610a33576040519150601f19603f3d011682016040523d82523d6000602084013e610a38565b606091505b50509050806109095760405163950b247960e01b815260040160405180910390fd5b6000610a64613071565b60005a90506000610a7586866132a5565b90506000856060015163ffffffff166001600160401b03811115610a9b57610a9b615b83565b604051908082528060200260200182016040528015610ac4578160200160208202803683370190505b50905060005b866060015163ffffffff16811015610b3b57826040015181604051602001610af3929190615613565b6040516020818303038152906040528051906020012060001c828281518110610b1e57610b1e615b6d565b602090810291909101015280610b3381615afc565b915050610aca565b50602080830180516000908152600f9092526040808320839055905190518291631fe543e360e01b91610b739190869060240161576f565b60408051601f198184030181529181526020820180516001600160e01b03166001600160e01b031990941693909317909252600c805460ff60301b1916600160301b1790559089015160808a0151919250600091610bd89163ffffffff169084613510565b600c805460ff60301b1916905560208a810151600090815260069091526040902054909150600160c01b90046001600160401b0316610c188160016159ea565b6020808c0151600090815260069091526040812080546001600160401b0393909316600160c01b026001600160c01b039093169290921790915560a08b01518051610c6590600190615a61565b81518110610c7557610c75615b6d565b602091010151600c5460f89190911c6001149150600090610ca6908a90600160581b900463ffffffff163a8561355c565b90508115610d9e576020808d01516000908152600690915260409020546001600160601b03808316600160601b909204161015610cf657604051631e9acf1760e31b815260040160405180910390fd5b60208c81015160009081526006909152604090208054829190600c90610d2d908490600160601b90046001600160601b0316615a78565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600b600c8282829054906101000a90046001600160601b0316610d759190615a0c565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550610e79565b6020808d01516000908152600690915260409020546001600160601b0380831691161015610ddf57604051631e9acf1760e31b815260040160405180910390fd5b6020808d015160009081526006909152604081208054839290610e0c9084906001600160601b0316615a78565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600b60008282829054906101000a90046001600160601b0316610e549190615a0c565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b8b6020015188602001517f49580fdfd9497e1ed5c1b1cec0495087ae8e3f1267470ec2fb015db32e3d6aa78a604001518488604051610ed6939291909283526001600160601b039190911660208301521515604082015260600190565b60405180910390a3985050505050505050505b9392505050565b610ef8613071565b610f01816135ab565b610f295780604051635428d44960e01b8152600401610f209190615589565b60405180910390fd5b600080600080610f3886612d33565b945094505093509350336001600160a01b0316826001600160a01b031614610f9b5760405162461bcd60e51b81526020600482015260166024820152752737ba1039bab139b1b934b83a34b7b71037bbb732b960511b6044820152606401610f20565b610fa4866112c1565b15610fea5760405162461bcd60e51b815260206004820152601660248201527550656e64696e6720726571756573742065786973747360501b6044820152606401610f20565b60006040518060c00160405280610fff600290565b60ff168152602001888152602001846001600160a01b03168152602001838152602001866001600160601b03168152602001856001600160601b031681525090506000816040516020016110539190615652565b604051602081830303815290604052905061106d88613615565b505060405163ce3f471960e01b81526001600160a01b0388169063ce3f4719906001600160601b038816906110a690859060040161563f565b6000604051808303818588803b1580156110bf57600080fd5b505af11580156110d3573d6000803e3d6000fd5b50506002546001600160a01b0316158015935091506110fc905057506001600160601b03861615155b156111c65760025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb90611133908a908a906004016155d0565b602060405180830381600087803b15801561114d57600080fd5b505af1158015611161573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111859190614f67565b6111c65760405162461bcd60e51b8152602060048201526012602482015271696e73756666696369656e742066756e647360701b6044820152606401610f20565b600c805460ff60301b1916600160301b17905560005b835181101561126f578381815181106111f7576111f7615b6d565b60200260200101516001600160a01b0316638ea98117896040518263ffffffff1660e01b815260040161122a9190615589565b600060405180830381600087803b15801561124457600080fd5b505af1158015611258573d6000803e3d6000fd5b50505050808061126790615afc565b9150506111dc565b50600c805460ff60301b191690556040517fd63ca8cb945956747ee69bfdc3ea754c24a4caf7418db70e46052f7850be4187906112af9089908b9061559d565b60405180910390a15050505050505050565b6000818152600560209081526040808320815160608101835281546001600160a01b039081168252600183015416818501526002820180548451818702810187018652818152879693958601939092919083018282801561134b57602002820191906000526020600020905b81546001600160a01b0316815260019091019060200180831161132d575b505050505081525050905060005b8160400151518110156114585760005b600e5481101561144557600061140e600e838154811061138b5761138b615b6d565b9060005260206000200154856040015185815181106113ac576113ac615b6d565b60200260200101518860046000896040015189815181106113cf576113cf615b6d565b6020908102919091018101516001600160a01b0316825281810192909252604090810160009081208d82529092529020546001600160401b03166137bd565b506000818152600f6020526040902054909150156114325750600195945050505050565b508061143d81615afc565b915050611369565b508061145081615afc565b915050611359565b5060009392505050565b61146a613071565b611472613252565b6002546001600160a01b031661149b5760405163c1f0c0a160e01b815260040160405180910390fd5b600b546001600160601b03166114c457604051631e9acf1760e31b815260040160405180910390fd5b600b80546001600160601b031690819060006114e08380615a78565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600a60008282829054906101000a90046001600160601b03166115289190615a78565b82546001600160601b039182166101009390930a92830291909202199091161790555060025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb9061157d90859085906004016155d0565b602060405180830381600087803b15801561159757600080fd5b505af11580156115ab573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906115cf9190614f67565b6115ec57604051631e9acf1760e31b815260040160405180910390fd5b5050565b6115f8613252565b611601816135ab565b15611621578060405163ac8a27ef60e01b8152600401610f209190615589565b601280546001810182556000919091527fbb8a6a4669ba250d26cd7a459eca9d215f8307e33aebe50379bc5a3617ec34440180546001600160a01b0319166001600160a01b0383161790556040517fb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af016259061169c908390615589565b60405180910390a150565b6116af613252565b6002546001600160a01b0316156116d957604051631688c53760e11b815260040160405180910390fd5b600280546001600160a01b039384166001600160a01b03199182161790915560038054929093169116179055565b6001546001600160a01b0316331461175a5760405162461bcd60e51b815260206004820152601660248201527526bab9ba10313290383937b837b9b2b21037bbb732b960511b6044820152606401610f20565b60008054336001600160a01b0319808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6117b9613252565b6040805180820182526000916117e89190849060029083908390808284376000920191909152506126c4915050565b6000818152600d602052604090205490915060ff161561181e57604051634a0b8fa760e01b815260048101829052602401610f20565b6000818152600d6020526040808220805460ff19166001908117909155600e805491820181559092527fbb7b4a454dc3493923482f07822329ed19e8244eff582cc204f8554c3620c3fd909101829055517fc9583fd3afa3d7f16eb0b88d0268e7d05c09bafa4b21e092cbd1320e1bc8089d9061189e9083815260200190565b60405180910390a15050565b6118b2613252565b600a544790600160601b90046001600160601b0316818111156118ec5780826040516354ced18160e11b8152600401610f20929190615613565b818110156109095760006119008284615a61565b90506000846001600160a01b03168260405160006040518083038185875af1925050503d806000811461194f576040519150601f19603f3d011682016040523d82523d6000602084013e611954565b606091505b50509050806119765760405163950b247960e01b815260040160405180910390fd5b7f4aed7c8eed0496c8c19ea2681fcca25741c1602342e38b045d9f1e8e905d2e9c85836040516119a792919061559d565b60405180910390a15050505050565b6119be613071565b6000818152600560205260409020546001600160a01b03166119f357604051630fb532db60e11b815260040160405180910390fd5b60008181526006602052604090208054600160601b90046001600160601b0316903490600c611a228385615a0c565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555034600a600c8282829054906101000a90046001600160601b0316611a6a9190615a0c565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550817f7603b205d03651ee812f803fccde89f1012e545a9c99f0abfea9cedd0fd8e902823484611abd91906159d2565b604051611acb929190615613565b60405180910390a25050565b6000611ae1613071565b6020808301356000908152600590915260409020546001600160a01b0316611b1c57604051630fb532db60e11b815260040160405180910390fd5b3360009081526004602090815260408083208583013584529091529020546001600160401b031680611b69578260200135336040516379bfd40160e01b8152600401610f20929190615744565b600c5461ffff16611b806060850160408601615283565b61ffff161080611ba3575060c8611b9d6060850160408601615283565b61ffff16115b15611bdd57611bb86060840160408501615283565b600c5460405163539c34bb60e11b8152610f20929161ffff169060c8906004016156c7565b600c5462010000900463ffffffff16611bfc608085016060860161539f565b63ffffffff161115611c4257611c18608084016060850161539f565b600c54604051637aebf00f60e11b8152610f20929162010000900463ffffffff1690600401615867565b6101f4611c5560a085016080860161539f565b63ffffffff161115611c8f57611c7160a084016080850161539f565b6101f46040516311ce1afb60e21b8152600401610f20929190615867565b6000611c9c8260016159ea565b9050600080611cb28635336020890135866137bd565b90925090506000611cce611cc960a08901896158e7565b613846565b90506000611cdb826138c3565b905083611ce6613934565b60208a0135611cfb60808c0160608d0161539f565b611d0b60a08d0160808e0161539f565b3386604051602001611d2397969594939291906157c7565b60405160208183030381529060405280519060200120600f600086815260200190815260200160002081905550336001600160a01b0316886020013589600001357feb0e3652e0f44f417695e6e90f2f42c99b65cd7169074c5a654b16b9748c3a4e87878d6040016020810190611d9a9190615283565b8e6060016020810190611dad919061539f565b8f6080016020810190611dc0919061539f565b89604051611dd396959493929190615788565b60405180910390a45050336000908152600460209081526040808320898301358452909152902080546001600160401b0319166001600160401b039490941693909317909255925050505b919050565b6000611e2d613071565b6007546001600160401b031633611e45600143615a61565b6040516001600160601b0319606093841b81166020830152914060348201523090921b1660548201526001600160c01b031960c083901b16606882015260700160408051601f1981840301815291905280516020909101209150611eaa8160016159ea565b600780546001600160401b0319166001600160401b03928316179055604080516000808252608082018352602080830182815283850183815260608086018581528a86526006855287862093518454935191516001600160601b039182166001600160c01b031990951694909417600160601b9190921602176001600160c01b0316600160c01b9290981691909102969096179055835194850184523385528481018281528585018481528884526005835294909220855181546001600160a01b03199081166001600160a01b039283161783559351600183018054909516911617909255925180519294939192611fa89260028501920190614b36565b50611fb8915060089050846139c4565b50827f1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d33604051611fe99190615589565b60405180910390a2505090565b611ffe613071565b6002546001600160a01b03163314612029576040516344b0e3c360e01b815260040160405180910390fd5b6020811461204a57604051638129bbcd60e01b815260040160405180910390fd5b60006120588284018461533f565b6000818152600560205260409020549091506001600160a01b031661209057604051630fb532db60e11b815260040160405180910390fd5b600081815260066020526040812080546001600160601b0316918691906120b78385615a0c565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555084600a60008282829054906101000a90046001600160601b03166120ff9190615a0c565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550817f1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a82878461215291906159d2565b604051612160929190615613565b60405180910390a2505050505050565b612178613252565b60c861ffff8a1611156121a557888960c860405163539c34bb60e11b8152600401610f20939291906156c7565b600085136121c9576040516321ea67b360e11b815260048101869052602401610f20565b604080516101208101825261ffff8b1680825263ffffffff808c16602084018190526000848601528b8216606085018190528b8316608086018190528a841660a08701819052938a1660c0870181905260ff808b1660e08901819052908a16610100909801889052600c8054600160c01b90990260ff60c01b19600160b81b9093029290921661ffff60b81b19600160981b90940263ffffffff60981b19600160781b90990298909816600160781b600160b81b0319600160581b90960263ffffffff60581b19600160381b90980297909716600160301b600160781b03196201000090990265ffffffffffff19909c16909a179a909a1796909616979097179390931791909116959095179290921793909316929092179190911790556010869055517f95cb2ddab6d2297c29a4861691de69b3969c464aa4a9c44258b101ff02ff375a90612366908b908b908b908b908b908990899061ffff97909716875263ffffffff95861660208801529385166040870152919093166060850152608084019290925260ff91821660a08401521660c082015260e00190565b60405180910390a1505050505050505050565b612381613252565b6000818152600560205260409020546001600160a01b0316806123b757604051630fb532db60e11b815260040160405180910390fd5b6115ec828261309e565b606060006123cf60086139d0565b90508084106123f157604051631390f2a160e01b815260040160405180910390fd5b60006123fd84866159d2565b90508181118061240b575083155b6124155780612417565b815b905060006124258683615a61565b9050806001600160401b0381111561243f5761243f615b83565b604051908082528060200260200182016040528015612468578160200160208202803683370190505b50935060005b818110156124b85761248b61248388836159d2565b6008906139da565b85828151811061249d5761249d615b6d565b60209081029190910101526124b181615afc565b905061246e565b505050505b92915050565b6124cb613071565b6000818152600560205260409020546001600160a01b03168061250157604051630fb532db60e11b815260040160405180910390fd5b6000828152600560205260409020600101546001600160a01b03163314612558576000828152600560205260409081902060010154905163d084e97560e01b8152610f20916001600160a01b031690600401615589565b600082815260056020526040908190208054336001600160a01b031991821681178355600190920180549091169055905183917fd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c938691611acb9185916155b6565b816125c281613010565b6125ca613071565b60008381526005602052604090206002018054606414156125fe576040516305a48e0f60e01b815260040160405180910390fd5b6001600160a01b038316600090815260046020908152604080832087845291829052909120546001600160401b031615612639575050505050565b600085815260208281526040808320805460016001600160401b0319909116811790915585549081018655858452919092200180546001600160a01b0319166001600160a01b0387161790555185907f1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e1906126b5908790615589565b60405180910390a25050505050565b6000816040516020016126d791906155f2565b604051602081830303815290604052805190602001209050919050565b816126fe81613010565b612706613071565b61270f836112c1565b1561272d57604051631685ecdd60e31b815260040160405180910390fd5b6001600160a01b03821660009081526004602090815260408083208684529091529020546001600160401b031661277b5782826040516379bfd40160e01b8152600401610f20929190615744565b6000838152600560209081526040808320600201805482518185028101850190935280835291929091908301828280156127de57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116127c0575b505050505090506000600182516127f59190615a61565b905060005b825181101561290157846001600160a01b031683828151811061281f5761281f615b6d565b60200260200101516001600160a01b031614156128ef57600083838151811061284a5761284a615b6d565b602002602001015190508060056000898152602001908152602001600020600201838154811061287c5761287c615b6d565b600091825260208083209190910180546001600160a01b0319166001600160a01b0394909416939093179092558881526005909152604090206002018054806128c7576128c7615b57565b600082815260209020810160001990810180546001600160a01b031916905501905550612901565b806128f981615afc565b9150506127fa565b506001600160a01b03841660009081526004602090815260408083208884529091529081902080546001600160401b03191690555185907f32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a7906126b5908790615589565b600061297382840184615151565b9050806000015160ff166001146129ac57805160405163237d181f60e21b815260ff909116600482015260016024820152604401610f20565b8060a001516001600160601b031634146129f05760a08101516040516306acf13560e41b81523460048201526001600160601b039091166024820152604401610f20565b6020808201516000908152600590915260409020546001600160a01b031615612a2c576040516326afa43560e11b815260040160405180910390fd5b60005b816060015151811015612acc5760016004600084606001518481518110612a5857612a58615b6d565b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060008460200151815260200190815260200160002060006101000a8154816001600160401b0302191690836001600160401b031602179055508080612ac490615afc565b915050612a2f565b50604080516060808201835260808401516001600160601b03908116835260a0850151811660208085019182526000858701818152828901805183526006845288832097518854955192516001600160401b0316600160c01b026001600160c01b03938816600160601b026001600160c01b0319909716919097161794909417169390931790945584518084018652868601516001600160a01b03908116825281860184815294880151828801908152925184526005865295909220825181549087166001600160a01b0319918216178255935160018201805491909716941693909317909455925180519192612bcb92600285019290910190614b36565b5050506080810151600a8054600090612bee9084906001600160601b0316615a0c565b92506101000a8154816001600160601b0302191690836001600160601b031602179055508060a00151600a600c8282829054906101000a90046001600160601b0316612c3a9190615a0c565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550612c76816020015160086139c490919063ffffffff16565b50505050565b600e8181548110612c8c57600080fd5b600091825260209091200154905081565b81612ca781613010565b612caf613071565b600083815260056020526040902060018101546001600160a01b03848116911614612c76576001810180546001600160a01b0319166001600160a01b03851617905560405184907f21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a190612d2590339087906155b6565b60405180910390a250505050565b600081815260056020526040812054819081906001600160a01b0316606081612d6f57604051630fb532db60e11b815260040160405180910390fd5b600086815260066020908152604080832054600583529281902060020180548251818502810185019093528083526001600160601b0380861695600160601b810490911694600160c01b9091046001600160401b0316938893929091839190830182828015612e0757602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311612de9575b505050505090509450945094509450945091939590929450565b612e29613252565b6002546001600160a01b0316612e525760405163c1f0c0a160e01b815260040160405180910390fd5b6002546040516370a0823160e01b81526000916001600160a01b0316906370a0823190612e83903090600401615589565b60206040518083038186803b158015612e9b57600080fd5b505afa158015612eaf573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612ed39190614f84565b600a549091506001600160601b031681811115612f075780826040516354ced18160e11b8152600401610f20929190615613565b81811015610909576000612f1b8284615a61565b60025460405163a9059cbb60e01b81529192506001600160a01b03169063a9059cbb90612f4e908790859060040161559d565b602060405180830381600087803b158015612f6857600080fd5b505af1158015612f7c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612fa09190614f67565b612fbd57604051631f01ff1360e21b815260040160405180910390fd5b7f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b4366008482604051612fee92919061559d565b60405180910390a150505050565b613004613252565b61300d816139e6565b50565b6000818152600560205260409020546001600160a01b03168061304657604051630fb532db60e11b815260040160405180910390fd5b336001600160a01b038216146115ec5780604051636c51fda960e11b8152600401610f209190615589565b600c54600160301b900460ff161561309c5760405163769dd35360e11b815260040160405180910390fd5b565b6000806130aa84613615565b60025491935091506001600160a01b0316158015906130d157506001600160601b03821615155b156131805760025460405163a9059cbb60e01b81526001600160a01b039091169063a9059cbb906131119086906001600160601b0387169060040161559d565b602060405180830381600087803b15801561312b57600080fd5b505af115801561313f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906131639190614f67565b61318057604051631e9acf1760e31b815260040160405180910390fd5b6000836001600160a01b0316826001600160601b031660405160006040518083038185875af1925050503d80600081146131d6576040519150601f19603f3d011682016040523d82523d6000602084013e6131db565b606091505b50509050806131fd5760405163950b247960e01b815260040160405180910390fd5b604080516001600160a01b03861681526001600160601b03808616602083015284169181019190915285907f8c74ce8b8cf87f5eb001275c8be27eb34ea2b62bfab6814fcc62192bb63e81c4906060016126b5565b6000546001600160a01b0316331461309c5760405162461bcd60e51b815260206004820152601660248201527527b7363c9031b0b63630b1363290313c9037bbb732b960511b6044820152606401610f20565b604080516060810182526000808252602082018190529181019190915260006132d184600001516126c4565b6000818152600d602052604090205490915060ff1661330657604051631dfd6e1360e21b815260048101829052602401610f20565b600081856080015160405160200161331f929190615613565b60408051601f1981840301815291815281516020928301206000818152600f9093529120549091508061336557604051631b44092560e11b815260040160405180910390fd5b845160208087015160408089015160608a015160808b015160a08c01519351613394978a979096959101615813565b6040516020818303038152906040528051906020012081146133c95760405163354a450b60e21b815260040160405180910390fd5b60006133d88660000151613a8a565b90508061349f578551604051631d2827a760e31b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169163e9413d389161342c919060040161587e565b60206040518083038186803b15801561344457600080fd5b505afa158015613458573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061347c9190614f84565b90508061349f57855160405163175dadad60e01b8152610f20919060040161587e565b60008760800151826040516020016134c1929190918252602082015260400190565b6040516020818303038152906040528051906020012060001c905060006134e88983613b67565b6040805160608101825297885260208801969096529486019490945250929695505050505050565b60005a61138881101561352257600080fd5b61138881039050846040820482031161353a57600080fd5b50823b61354657600080fd5b60008083516020850160008789f1949350505050565b60008115613589576011546135829086908690600160201b900463ffffffff1686613bd2565b90506135a3565b6011546135a0908690869063ffffffff1686613c74565b90505b949350505050565b6000805b60125481101561360c57826001600160a01b0316601282815481106135d6576135d6615b6d565b6000918252602090912001546001600160a01b031614156135fa5750600192915050565b8061360481615afc565b9150506135af565b50600092915050565b60008181526005602090815260408083206006909252822054600290910180546001600160601b0380841694600160601b90940416925b818110156136b7576004600084838154811061366a5761366a615b6d565b60009182526020808320909101546001600160a01b031683528281019390935260409182018120898252909252902080546001600160401b03191690556136b081615afc565b905061364c565b50600085815260056020526040812080546001600160a01b031990811682556001820180549091169055906136ef6002830182614b9b565b505060008581526006602052604081205561370b600886613d99565b506001600160601b0384161561375e57600a80548591906000906137399084906001600160601b0316615a78565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b6001600160601b038316156137b65782600a600c8282829054906101000a90046001600160601b03166137919190615a78565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b5050915091565b60408051602081018690526001600160a01b03851691810191909152606081018390526001600160401b03821660808201526000908190819060a00160408051601f198184030181529082905280516020918201209250613822918991849101615613565b60408051808303601f19018152919052805160209091012097909650945050505050565b6040805160208101909152600081528161386f57506040805160208101909152600081526124bd565b63125fa26760e31b6138818385615aa0565b6001600160e01b031916146138a957604051632923fee760e11b815260040160405180910390fd5b6138b682600481866159a8565b810190610ee99190614fde565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa826040516024016138fc91511515815260200190565b60408051601f198184030181529190526020810180516001600160e01b03166001600160e01b03199093169290921790915292915050565b60004661394081613da5565b156139bd5760646001600160a01b031663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561397f57600080fd5b505afa158015613993573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906139b79190614f84565b91505090565b4391505090565b6000610ee98383613dc8565b60006124bd825490565b6000610ee98383613e17565b6001600160a01b038116331415613a395760405162461bcd60e51b815260206004820152601760248201527621b0b73737ba103a3930b739b332b9103a379039b2b63360491b6044820152606401610f20565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600046613a9681613da5565b15613b5857610100836001600160401b0316613ab0613934565b613aba9190615a61565b1180613ad65750613ac9613934565b836001600160401b031610155b15613ae45750600092915050565b6040516315a03d4160e11b8152606490632b407a8290613b0890869060040161587e565b60206040518083038186803b158015613b2057600080fd5b505afa158015613b34573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ee99190614f84565b50506001600160401b03164090565b6000613b9b8360000151846020015185604001518660600151868860a001518960c001518a60e001518b6101000151613e41565b60038360200151604051602001613bb392919061575b565b60408051601f1981840301815291905280516020909101209392505050565b600080613c156000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061405c92505050565b905060005a613c2488886159d2565b613c2e9190615a61565b613c389085615a42565b90506000613c5163ffffffff871664e8d4a51000615a42565b905082613c5e82846159d2565b613c6891906159d2565b98975050505050505050565b600080613c7f614121565b905060008113613ca5576040516321ea67b360e11b815260048101829052602401610f20565b6000613ce76000368080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061405c92505050565b9050600082825a613cf88b8b6159d2565b613d029190615a61565b613d0c9088615a42565b613d1691906159d2565b613d2890670de0b6b3a7640000615a42565b613d329190615a2e565b90506000613d4b63ffffffff881664e8d4a51000615a42565b9050613d6281676765c793fa10079d601b1b615a61565b821115613d825760405163e80fa38160e01b815260040160405180910390fd5b613d8c81836159d2565b9998505050505050505050565b6000610ee983836141ec565b600061a4b1821480613db9575062066eed82145b806124bd57505062066eee1490565b6000818152600183016020526040812054613e0f575081546001818101845560008481526020808220909301849055845484825282860190935260409020919091556124bd565b5060006124bd565b6000826000018281548110613e2e57613e2e615b6d565b9060005260206000200154905092915050565b613e4a896142df565b613e935760405162461bcd60e51b815260206004820152601a6024820152797075626c6963206b6579206973206e6f74206f6e20637572766560301b6044820152606401610f20565b613e9c886142df565b613ee05760405162461bcd60e51b815260206004820152601560248201527467616d6d61206973206e6f74206f6e20637572766560581b6044820152606401610f20565b613ee9836142df565b613f355760405162461bcd60e51b815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e2063757276650000006044820152606401610f20565b613f3e826142df565b613f895760405162461bcd60e51b815260206004820152601c60248201527b73486173685769746e657373206973206e6f74206f6e20637572766560201b6044820152606401610f20565b613f95878a88876143a2565b613fdd5760405162461bcd60e51b81526020600482015260196024820152786164647228632a706b2b732a6729213d5f755769746e65737360381b6044820152606401610f20565b6000613fe98a876144b6565b90506000613ffc898b878b86898961451a565b9050600061400d838d8d8a8661462d565b9050808a1461404e5760405162461bcd60e51b815260206004820152600d60248201526c34b73b30b634b210383937b7b360991b6044820152606401610f20565b505050505050505050505050565b60004661406881613da5565b156140a757606c6001600160a01b031663c6f7de0e6040518163ffffffff1660e01b815260040160206040518083038186803b158015613b2057600080fd5b6140b08161466d565b1561360c57600f602160991b016001600160a01b03166349948e0e84604051806080016040528060488152602001615bbd604891396040516020016140f69291906154df565b6040516020818303038152906040526040518263ffffffff1660e01b8152600401613b08919061563f565b600c5460035460408051633fabe5a360e21b81529051600093600160381b900463ffffffff169283151592859283926001600160a01b03169163feaf968c9160048083019260a0929190829003018186803b15801561417f57600080fd5b505afa158015614193573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906141b791906153ba565b5094509092508491505080156141db57506141d28242615a61565b8463ffffffff16105b156135a35750601054949350505050565b600081815260018301602052604081205480156142d5576000614210600183615a61565b855490915060009061422490600190615a61565b905081811461428957600086600001828154811061424457614244615b6d565b906000526020600020015490508087600001848154811061426757614267615b6d565b6000918252602080832090910192909255918252600188019052604090208390555b855486908061429a5761429a615b57565b6001900381819060005260206000200160009055905585600101600086815260200190815260200160002060009055600193505050506124bd565b60009150506124bd565b80516000906401000003d0191161432d5760405162461bcd60e51b8152602060048201526012602482015271696e76616c696420782d6f7264696e61746560701b6044820152606401610f20565b60208201516401000003d0191161437b5760405162461bcd60e51b8152602060048201526012602482015271696e76616c696420792d6f7264696e61746560701b6044820152606401610f20565b60208201516401000003d01990800961439b8360005b60200201516146a7565b1492915050565b60006001600160a01b0382166143e85760405162461bcd60e51b815260206004820152600b60248201526a626164207769746e65737360a81b6044820152606401610f20565b6020840151600090600116156143ff57601c614402565b601b5b9050600070014551231950b75fc4402da1732fc9bebe1985876000602002015109865170014551231950b75fc4402da1732fc9bebe199182039250600091908909875160408051600080825260209091019182905292935060019161446c91869188918790615621565b6020604051602081039080840390855afa15801561448e573d6000803e3d6000fd5b5050604051601f1901516001600160a01b039081169088161495505050505050949350505050565b6144be614bb9565b6144eb600184846040516020016144d793929190615568565b6040516020818303038152906040526146cb565b90505b6144f7816142df565b6124bd57805160408051602081019290925261451391016144d7565b90506144ee565b614522614bb9565b825186516401000003d01990819006910614156145815760405162461bcd60e51b815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e637400006044820152606401610f20565b61458c878988614719565b6145d15760405162461bcd60e51b8152602060048201526016602482015275119a5c9cdd081b5d5b0818da1958dac819985a5b195960521b6044820152606401610f20565b6145dc848685614719565b6146225760405162461bcd60e51b815260206004820152601760248201527614d958dbdb99081b5d5b0818da1958dac819985a5b1959604a1b6044820152606401610f20565b613c68868484614834565b60006002868686858760405160200161464b9695949392919061550e565b60408051601f1981840301815291905280516020909101209695505050505050565b6000600a82148061467f57506101a482145b8061468c575062aa37dc82145b80614698575061210582145b806124bd57505062014a331490565b6000806401000003d01980848509840990506401000003d019600782089392505050565b6146d3614bb9565b6146dc826148f7565b81526146f16146ec826000614391565b614932565b602082018190526002900660011415611e1e576020810180516401000003d019039052919050565b6000826147565760405162461bcd60e51b815260206004820152600b60248201526a3d32b9379039b1b0b630b960a91b6044820152606401610f20565b8351602085015160009061476c90600290615b17565b1561477857601c61477b565b601b5b9050600070014551231950b75fc4402da1732fc9bebe198387096040805160008082526020909101918290529192506001906147be908390869088908790615621565b6020604051602081039080840390855afa1580156147e0573d6000803e3d6000fd5b5050506020604051035190506000866040516020016147ff91906154cd565b60408051601f1981840301815291905280516020909101206001600160a01b0392831692169190911498975050505050505050565b61483c614bb9565b83516020808601518551918601516000938493849361485d93909190614952565b919450925090506401000003d0198582096001146148b95760405162461bcd60e51b815260206004820152601960248201527834b73b2d1036bab9ba1031329034b73b32b939b29037b3103d60391b6044820152606401610f20565b60405180604001604052806401000003d019806148d8576148d8615b41565b87860981526020016401000003d0198785099052979650505050505050565b805160208201205b6401000003d0198110611e1e576040805160208082019390935281518082038401815290820190915280519101206148ff565b60006124bd82600261494b6401000003d01960016159d2565b901c614a32565b60008080600180826401000003d019896401000003d019038808905060006401000003d0198b6401000003d019038a089050600061499283838585614ac9565b90985090506149a388828e88614aed565b90985090506149b488828c87614aed565b909850905060006149c78d878b85614aed565b90985090506149d888828686614ac9565b90985090506149e988828e89614aed565b9098509050818114614a1e576401000003d019818a0998506401000003d01982890997506401000003d0198183099650614a22565b8196505b5050505050509450945094915050565b600080614a3d614bd7565b6020808252818101819052604082015260608101859052608081018490526401000003d01960a0820152614a6f614bf5565b60208160c0846005600019fa925082614abf5760405162461bcd60e51b81526020600482015260126024820152716269674d6f64457870206661696c7572652160701b6044820152606401610f20565b5195945050505050565b6000806401000003d0198487096401000003d0198487099097909650945050505050565b600080806401000003d019878509905060006401000003d01987876401000003d019030990506401000003d0198183086401000003d01986890990999098509650505050505050565b828054828255906000526020600020908101928215614b8b579160200282015b82811115614b8b57825182546001600160a01b0319166001600160a01b03909116178255602090920191600190910190614b56565b50614b97929150614c13565b5090565b508054600082559060005260206000209081019061300d9190614c13565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b5b80821115614b975760008155600101614c14565b8035611e1e81615b99565b600082601f830112614c4457600080fd5b604080519081016001600160401b0381118282101715614c6657614c66615b83565b8060405250808385604086011115614c7d57600080fd5b60005b6002811015614c9f578135835260209283019290910190600101614c80565b509195945050505050565b8035611e1e81615bae565b60008083601f840112614cc757600080fd5b5081356001600160401b03811115614cde57600080fd5b602083019150836020828501011115614cf657600080fd5b9250929050565b600082601f830112614d0e57600080fd5b81356001600160401b03811115614d2757614d27615b83565b614d3a601f8201601f1916602001615978565b818152846020838601011115614d4f57600080fd5b816020850160208301376000918101602001919091529392505050565b600060c08284031215614d7e57600080fd5b614d8661592d565b905081356001600160401b038082168214614da057600080fd5b81835260208401356020840152614db960408501614e1f565b6040840152614dca60608501614e1f565b6060840152614ddb60808501614c28565b608084015260a0840135915080821115614df457600080fd5b50614e0184828501614cfd565b60a08301525092915050565b803561ffff81168114611e1e57600080fd5b803563ffffffff81168114611e1e57600080fd5b803560ff81168114611e1e57600080fd5b80516001600160501b0381168114611e1e57600080fd5b80356001600160601b0381168114611e1e57600080fd5b600060208284031215614e8457600080fd5b8135610ee981615b99565b60008060408385031215614ea257600080fd5b8235614ead81615b99565b91506020830135614ebd81615b99565b809150509250929050565b60008060008060608587031215614ede57600080fd5b8435614ee981615b99565b93506020850135925060408501356001600160401b03811115614f0b57600080fd5b614f1787828801614cb5565b95989497509550505050565b600060408284031215614f3557600080fd5b82604083011115614f4557600080fd5b50919050565b600060408284031215614f5d57600080fd5b610ee98383614c33565b600060208284031215614f7957600080fd5b8151610ee981615bae565b600060208284031215614f9657600080fd5b5051919050565b60008060208385031215614fb057600080fd5b82356001600160401b03811115614fc657600080fd5b614fd285828601614cb5565b90969095509350505050565b600060208284031215614ff057600080fd5b604051602081016001600160401b038111828210171561501257615012615b83565b604052823561502081615bae565b81529392505050565b60008060008385036101e081121561504057600080fd5b6101a08082121561505057600080fd5b615058615955565b91506150648787614c33565b82526150738760408801614c33565b60208301526080860135604083015260a0860135606083015260c086013560808301526150a260e08701614c28565b60a08301526101006150b688828901614c33565b60c08401526150c9886101408901614c33565b60e0840152610180870135908301529093508401356001600160401b038111156150f257600080fd5b6150fe86828701614d6c565b92505061510e6101c08501614caa565b90509250925092565b60006020828403121561512957600080fd5b81356001600160401b0381111561513f57600080fd5b820160c08185031215610ee957600080fd5b6000602080838503121561516457600080fd5b82356001600160401b038082111561517b57600080fd5b9084019060c0828703121561518f57600080fd5b61519761592d565b6151a083614e33565b8152838301358482015260408301356151b881615b99565b60408201526060830135828111156151cf57600080fd5b8301601f810188136151e057600080fd5b8035838111156151f2576151f2615b83565b8060051b9350615203868501615978565b8181528681019083880186850189018c101561521e57600080fd5b600096505b8387101561524d578035945061523885615b99565b84835260019690960195918801918801615223565b5060608501525061526391505060808401614e5b565b608082015261527460a08401614e5b565b60a08201529695505050505050565b60006020828403121561529557600080fd5b610ee982614e0d565b60008060008060008060008060006101208a8c0312156152bd57600080fd5b6152c68a614e0d565b98506152d460208b01614e1f565b97506152e260408b01614e1f565b96506152f060608b01614e1f565b955060808a0135945061530560a08b01614e1f565b935061531360c08b01614e1f565b925061532160e08b01614e33565b91506153306101008b01614e33565b90509295985092959850929598565b60006020828403121561535157600080fd5b5035919050565b6000806040838503121561536b57600080fd5b823591506020830135614ebd81615b99565b6000806040838503121561539057600080fd5b50508035926020909101359150565b6000602082840312156153b157600080fd5b610ee982614e1f565b600080600080600060a086880312156153d257600080fd5b6153db86614e44565b94506020860151935060408601519250606086015191506153fe60808701614e44565b90509295509295909350565b600081518084526020808501945080840160005b838110156154435781516001600160a01b03168752958201959082019060010161541e565b509495945050505050565b8060005b6002811015612c76578151845260209384019390910190600101615452565b600081518084526020808501945080840160005b8381101561544357815187529582019590820190600101615485565b600081518084526154b9816020860160208601615ad0565b601f01601f19169290920160200192915050565b6154d7818361544e565b604001919050565b600083516154f1818460208801615ad0565b835190830190615505818360208801615ad0565b01949350505050565b86815261551e602082018761544e565b61552b606082018661544e565b61553860a082018561544e565b61554560e082018461544e565b60609190911b6001600160601b0319166101208201526101340195945050505050565b838152615578602082018461544e565b606081019190915260800192915050565b6001600160a01b0391909116815260200190565b6001600160a01b03929092168252602082015260400190565b6001600160a01b0392831681529116602082015260400190565b6001600160a01b039290921682526001600160601b0316602082015260400190565b604081016124bd828461544e565b602081526000610ee96020830184615471565b918252602082015260400190565b93845260ff9290921660208401526040830152606082015260800190565b602081526000610ee960208301846154a1565b6020815260ff82511660208201526020820151604082015260018060a01b0360408301511660608201526000606083015160c0608084015261569760e084018261540a565b60808501516001600160601b0390811660a0868101919091529095015190941660c0909301929092525090919050565b61ffff93841681529183166020830152909116604082015260600190565b60006060820161ffff86168352602063ffffffff86168185015260606040850152818551808452608086019150828701935060005b818110156157365784518352938301939183019160010161571a565b509098975050505050505050565b9182526001600160a01b0316602082015260400190565b82815260608101610ee9602083018461544e565b8281526040602082015260006135a36040830184615471565b86815285602082015261ffff85166040820152600063ffffffff808616606084015280851660808401525060c060a0830152613c6860c08301846154a1565b878152602081018790526040810186905263ffffffff8581166060830152841660808201526001600160a01b03831660a082015260e060c08201819052600090613d8c908301846154a1565b8781526001600160401b03871660208201526040810186905263ffffffff8581166060830152841660808201526001600160a01b03831660a082015260e060c08201819052600090613d8c908301846154a1565b63ffffffff92831681529116602082015260400190565b6001600160401b0391909116815260200190565b6001600160601b038681168252851660208201526001600160401b03841660408201526001600160a01b038316606082015260a0608082018190526000906158dc9083018461540a565b979650505050505050565b6000808335601e198436030181126158fe57600080fd5b8301803591506001600160401b0382111561591857600080fd5b602001915036819003821315614cf657600080fd5b60405160c081016001600160401b038111828210171561594f5761594f615b83565b60405290565b60405161012081016001600160401b038111828210171561594f5761594f615b83565b604051601f8201601f191681016001600160401b03811182821017156159a0576159a0615b83565b604052919050565b600080858511156159b857600080fd5b838611156159c557600080fd5b5050820193919092039150565b600082198211156159e5576159e5615b2b565b500190565b60006001600160401b0382811684821680830382111561550557615505615b2b565b60006001600160601b0382811684821680830382111561550557615505615b2b565b600082615a3d57615a3d615b41565b500490565b6000816000190483118215151615615a5c57615a5c615b2b565b500290565b600082821015615a7357615a73615b2b565b500390565b60006001600160601b0383811690831681811015615a9857615a98615b2b565b039392505050565b6001600160e01b03198135818116916004851015615ac85780818660040360031b1b83161692505b505092915050565b60005b83811015615aeb578181015183820152602001615ad3565b83811115612c765750506000910152565b6000600019821415615b1057615b10615b2b565b5060010190565b600082615b2657615b26615b41565b500690565b634e487b7160e01b600052601160045260246000fd5b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052603160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052604160045260246000fd5b6001600160a01b038116811461300d57600080fd5b801515811461300d57600080fdfe307866666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666a164736f6c6343000806000a", +} + +var VRFCoordinatorV2PlusUpgradedVersionABI = VRFCoordinatorV2PlusUpgradedVersionMetaData.ABI + +var VRFCoordinatorV2PlusUpgradedVersionBin = VRFCoordinatorV2PlusUpgradedVersionMetaData.Bin + +func DeployVRFCoordinatorV2PlusUpgradedVersion(auth *bind.TransactOpts, backend bind.ContractBackend, blockhashStore common.Address) (common.Address, *types.Transaction, *VRFCoordinatorV2PlusUpgradedVersion, error) { + parsed, err := VRFCoordinatorV2PlusUpgradedVersionMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorV2PlusUpgradedVersionBin), backend, blockhashStore) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinatorV2PlusUpgradedVersion{address: address, abi: *parsed, VRFCoordinatorV2PlusUpgradedVersionCaller: VRFCoordinatorV2PlusUpgradedVersionCaller{contract: contract}, VRFCoordinatorV2PlusUpgradedVersionTransactor: VRFCoordinatorV2PlusUpgradedVersionTransactor{contract: contract}, VRFCoordinatorV2PlusUpgradedVersionFilterer: VRFCoordinatorV2PlusUpgradedVersionFilterer{contract: contract}}, nil +} + +type VRFCoordinatorV2PlusUpgradedVersion struct { + address common.Address + abi abi.ABI + VRFCoordinatorV2PlusUpgradedVersionCaller + VRFCoordinatorV2PlusUpgradedVersionTransactor + VRFCoordinatorV2PlusUpgradedVersionFilterer +} + +type VRFCoordinatorV2PlusUpgradedVersionCaller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusUpgradedVersionTransactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusUpgradedVersionFilterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorV2PlusUpgradedVersionSession struct { + Contract *VRFCoordinatorV2PlusUpgradedVersion + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2PlusUpgradedVersionCallerSession struct { + Contract *VRFCoordinatorV2PlusUpgradedVersionCaller + CallOpts bind.CallOpts +} + +type VRFCoordinatorV2PlusUpgradedVersionTransactorSession struct { + Contract *VRFCoordinatorV2PlusUpgradedVersionTransactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorV2PlusUpgradedVersionRaw struct { + Contract *VRFCoordinatorV2PlusUpgradedVersion +} + +type VRFCoordinatorV2PlusUpgradedVersionCallerRaw struct { + Contract *VRFCoordinatorV2PlusUpgradedVersionCaller +} + +type VRFCoordinatorV2PlusUpgradedVersionTransactorRaw struct { + Contract *VRFCoordinatorV2PlusUpgradedVersionTransactor +} + +func NewVRFCoordinatorV2PlusUpgradedVersion(address common.Address, backend bind.ContractBackend) (*VRFCoordinatorV2PlusUpgradedVersion, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorV2PlusUpgradedVersionABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinatorV2PlusUpgradedVersion(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersion{address: address, abi: abi, VRFCoordinatorV2PlusUpgradedVersionCaller: VRFCoordinatorV2PlusUpgradedVersionCaller{contract: contract}, VRFCoordinatorV2PlusUpgradedVersionTransactor: VRFCoordinatorV2PlusUpgradedVersionTransactor{contract: contract}, VRFCoordinatorV2PlusUpgradedVersionFilterer: VRFCoordinatorV2PlusUpgradedVersionFilterer{contract: contract}}, nil +} + +func NewVRFCoordinatorV2PlusUpgradedVersionCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorV2PlusUpgradedVersionCaller, error) { + contract, err := bindVRFCoordinatorV2PlusUpgradedVersion(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionCaller{contract: contract}, nil +} + +func NewVRFCoordinatorV2PlusUpgradedVersionTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorV2PlusUpgradedVersionTransactor, error) { + contract, err := bindVRFCoordinatorV2PlusUpgradedVersion(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionTransactor{contract: contract}, nil +} + +func NewVRFCoordinatorV2PlusUpgradedVersionFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorV2PlusUpgradedVersionFilterer, error) { + contract, err := bindVRFCoordinatorV2PlusUpgradedVersion(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionFilterer{contract: contract}, nil +} + +func bindVRFCoordinatorV2PlusUpgradedVersion(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorV2PlusUpgradedVersionMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.VRFCoordinatorV2PlusUpgradedVersionCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.VRFCoordinatorV2PlusUpgradedVersionTransactor.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.VRFCoordinatorV2PlusUpgradedVersionTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "BLOCKHASH_STORE") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) BLOCKHASHSTORE() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.BLOCKHASHSTORE(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) PLI() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PLI(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) PLI() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PLI(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "PLI_NATIVE_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) PLINATIVEFEED() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PLINATIVEFEED(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) PLINATIVEFEED() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PLINATIVEFEED(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXCONSUMERS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXCONSUMERS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "MAX_NUM_WORDS") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXNUMWORDS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) MAXNUMWORDS() (uint32, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXNUMWORDS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "MAX_REQUEST_CONFIRMATIONS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) MAXREQUESTCONFIRMATIONS() (uint16, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MAXREQUESTCONFIRMATIONS(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "getActiveSubscriptionIds", startIndex, maxCount) + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetActiveSubscriptionIds(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, startIndex, maxCount) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) GetActiveSubscriptionIds(startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetActiveSubscriptionIds(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, startIndex, maxCount) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "getRequestConfig") + + if err != nil { + return *new(uint16), *new(uint32), *new([][32]byte), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + out1 := *abi.ConvertType(out[1], new(uint32)).(*uint32) + out2 := *abi.ConvertType(out[2], new([][32]byte)).(*[][32]byte) + + return out0, out1, out2, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetRequestConfig(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) GetRequestConfig() (uint16, uint32, [][32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetRequestConfig(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.NativeBalance = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.ReqCount = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[4], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.GetSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "hashOfKey", publicKey) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.HashOfKey(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, publicKey) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) HashOfKey(publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.HashOfKey(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, publicKey) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) MigrationVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "migrationVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) MigrationVersion() (uint8, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MigrationVersion(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) MigrationVersion() (uint8, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.MigrationVersion(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) Owner() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Owner(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) Owner() (common.Address, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Owner(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "pendingRequestExists", subId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) PendingRequestExists(subId *big.Int) (bool, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PendingRequestExists(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) PendingRequestExists(subId *big.Int) (bool, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.PendingRequestExists(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) SConfig(opts *bind.CallOpts) (SConfig, + + error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_config") + + outstruct := new(SConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MinimumRequestConfirmations = *abi.ConvertType(out[0], new(uint16)).(*uint16) + outstruct.MaxGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ReentrancyLock = *abi.ConvertType(out[2], new(bool)).(*bool) + outstruct.StalenessSeconds = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.GasAfterPaymentCalculation = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeNativePPM = *abi.ConvertType(out[5], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkDiscountPPM = *abi.ConvertType(out[6], new(uint32)).(*uint32) + outstruct.NativePremiumPercentage = *abi.ConvertType(out[7], new(uint8)).(*uint8) + outstruct.LinkPremiumPercentage = *abi.ConvertType(out[8], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SConfig() (SConfig, + + error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SConfig(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) SConfig() (SConfig, + + error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SConfig(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) SCurrentSubNonce(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_currentSubNonce") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SCurrentSubNonce() (uint64, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SCurrentSubNonce(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) SCurrentSubNonce() (uint64, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SCurrentSubNonce(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) SProvingKeyHashes(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_provingKeyHashes", arg0) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SProvingKeyHashes(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SProvingKeyHashes(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) SProvingKeyHashes(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SProvingKeyHashes(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) SRequestCommitments(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_requestCommitments", arg0) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SRequestCommitments(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SRequestCommitments(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) SRequestCommitments(arg0 *big.Int) ([32]byte, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SRequestCommitments(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts, arg0) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) STotalBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_totalBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) STotalBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.STotalBalance(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) STotalBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.STotalBalance(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCaller) STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinatorV2PlusUpgradedVersion.contract.Call(opts, &out, "s_totalNativeBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.STotalNativeBalance(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionCallerSession) STotalNativeBalance() (*big.Int, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.STotalNativeBalance(&_VRFCoordinatorV2PlusUpgradedVersion.CallOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AcceptOwnership(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AcceptOwnership(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AddConsumer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.AddConsumer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.CancelSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.CancelSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "createSubscription") +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.CreateSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.CreateSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV2PlusUpgradedVersionRequestCommitment, arg2 bool) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "fulfillRandomWords", proof, rc, arg2) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV2PlusUpgradedVersionRequestCommitment, arg2 bool) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.FulfillRandomWords(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, proof, rc, arg2) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) FulfillRandomWords(proof VRFProof, rc VRFCoordinatorV2PlusUpgradedVersionRequestCommitment, arg2 bool) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.FulfillRandomWords(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, proof, rc, arg2) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "fundSubscriptionWithNative", subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.FundSubscriptionWithNative(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) FundSubscriptionWithNative(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.FundSubscriptionWithNative(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) Migrate(opts *bind.TransactOpts, subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "migrate", subId, newCoordinator) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) Migrate(subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Migrate(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, newCoordinator) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) Migrate(subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Migrate(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, newCoordinator) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) OnMigration(opts *bind.TransactOpts, encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "onMigration", encodedData) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) OnMigration(encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OnMigration(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, encodedData) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) OnMigration(encodedData []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OnMigration(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, encodedData) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OnTokenTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OnTokenTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) OwnerCancelSubscription(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "ownerCancelSubscription", subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) OwnerCancelSubscription(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OwnerCancelSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) OwnerCancelSubscription(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.OwnerCancelSubscription(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "recoverFunds", to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RecoverFunds(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RecoverFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RecoverFunds(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RecoverNativeFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "recoverNativeFunds", to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RecoverNativeFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RecoverNativeFunds(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RecoverNativeFunds(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RecoverNativeFunds(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "registerMigratableCoordinator", target) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RegisterMigratableCoordinator(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, target) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RegisterMigratableCoordinator(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, target) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RegisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "registerProvingKey", publicProvingKey) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RegisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RegisterProvingKey(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RegisterProvingKey(publicProvingKey [2]*big.Int) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RegisterProvingKey(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, publicProvingKey) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RemoveConsumer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RemoveConsumer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, consumer) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "requestRandomWords", req) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RequestRandomWords(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, req) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RequestRandomWords(req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RequestRandomWords(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, req) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "setConfig", minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SetConfig(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SetConfig(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, fulfillmentFlatFeeNativePPM, fulfillmentFlatFeeLinkDiscountPPM, nativePremiumPercentage, linkPremiumPercentage) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) SetPLIAndPLINativeFeed(opts *bind.TransactOpts, link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "setPLIAndPLINativeFeed", link, linkNativeFeed) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) SetPLIAndPLINativeFeed(link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SetPLIAndPLINativeFeed(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, link, linkNativeFeed) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) SetPLIAndPLINativeFeed(link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.SetPLIAndPLINativeFeed(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, link, linkNativeFeed) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.TransferOwnership(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.TransferOwnership(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, to) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "withdraw", recipient) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) Withdraw(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Withdraw(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, recipient) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) Withdraw(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.Withdraw(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, recipient) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactor) WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.contract.Transact(opts, "withdrawNative", recipient) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionSession) WithdrawNative(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.WithdrawNative(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, recipient) +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionTransactorSession) WithdrawNative(recipient common.Address) (*types.Transaction, error) { + return _VRFCoordinatorV2PlusUpgradedVersion.Contract.WithdrawNative(&_VRFCoordinatorV2PlusUpgradedVersion.TransactOpts, recipient) +} + +type VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionConfigSet struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FallbackWeiPerUnitLink *big.Int + NativePremiumPercentage uint8 + LinkPremiumPercentage uint8 + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionConfigSet) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseConfigSet(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionConfigSet, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionConfigSet) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered struct { + CoordinatorAddress common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "CoordinatorRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "FundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "FundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseFundsRecovered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionFundsRecovered, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "FundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted struct { + NewCoordinator common.Address + SubId *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterMigrationCompleted(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "MigrationCompleted") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "MigrationCompleted", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "MigrationCompleted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseMigrationCompleted(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered struct { + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterNativeFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "NativeFundsRecovered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "NativeFundsRecovered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchNativeFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "NativeFundsRecovered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "NativeFundsRecovered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseNativeFundsRecovered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "NativeFundsRecovered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered struct { + KeyHash [32]byte + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterProvingKeyRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "ProvingKeyRegistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "ProvingKeyRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "ProvingKeyRegistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "ProvingKeyRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled struct { + RequestId *big.Int + OutputSeed *big.Int + SubID *big.Int + Payment *big.Int + Success bool + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subID []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIDRule []interface{} + for _, subIDItem := range subID { + subIDRule = append(subIDRule, subIDItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "RandomWordsFulfilled", requestIdRule, subIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, requestId []*big.Int, subID []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + var subIDRule []interface{} + for _, subIDItem := range subID { + subIDRule = append(subIDRule, subIDItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "RandomWordsFulfilled", requestIdRule, subIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested struct { + KeyHash [32]byte + RequestId *big.Int + PreSeed *big.Int + SubId *big.Int + MinimumRequestConfirmations uint16 + CallbackGasLimit uint32 + NumWords uint32 + ExtraArgs []byte + Sender common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "RandomWordsRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) { + + var keyHashRule []interface{} + for _, keyHashItem := range keyHash { + keyHashRule = append(keyHashRule, keyHashItem) + } + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "RandomWordsRequested", keyHashRule, subIdRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "RandomWordsRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled struct { + SubId *big.Int + To common.Address + AmountLink *big.Int + AmountNative *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated struct { + SubId *big.Int + Owner common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionCreated", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded struct { + SubId *big.Int + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative struct { + SubId *big.Int + OldNativeBalance *big.Int + NewNativeBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionFundedWithNative(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionFundedWithNative", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionFundedWithNative", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionFundedWithNative(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionFundedWithNative", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionFundedWithNative", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionFundedWithNative(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionFundedWithNative", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator struct { + Event *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator{contract: _VRFCoordinatorV2PlusUpgradedVersion.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinatorV2PlusUpgradedVersion.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersionFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred, error) { + event := new(VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred) + if err := _VRFCoordinatorV2PlusUpgradedVersion.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSubscription struct { + Balance *big.Int + NativeBalance *big.Int + ReqCount uint64 + Owner common.Address + Consumers []common.Address +} +type SConfig struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + ReentrancyLock bool + StalenessSeconds uint32 + GasAfterPaymentCalculation uint32 + FulfillmentFlatFeeNativePPM uint32 + FulfillmentFlatFeeLinkDiscountPPM uint32 + NativePremiumPercentage uint8 + LinkPremiumPercentage uint8 +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersion) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["ConfigSet"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseConfigSet(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["CoordinatorRegistered"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseCoordinatorRegistered(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["FundsRecovered"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseFundsRecovered(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["MigrationCompleted"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseMigrationCompleted(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["NativeFundsRecovered"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseNativeFundsRecovered(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["OwnershipTransferRequested"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseOwnershipTransferRequested(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseOwnershipTransferred(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["ProvingKeyRegistered"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseProvingKeyRegistered(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["RandomWordsFulfilled"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseRandomWordsFulfilled(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["RandomWordsRequested"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseRandomWordsRequested(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionCanceled"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionCanceled(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionConsumerAdded"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionConsumerAdded(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionConsumerRemoved"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionConsumerRemoved(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionCreated"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionCreated(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionFunded"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionFunded(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionFundedWithNative"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionFundedWithNative(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionOwnerTransferRequested(log) + case _VRFCoordinatorV2PlusUpgradedVersion.abi.Events["SubscriptionOwnerTransferred"].ID: + return _VRFCoordinatorV2PlusUpgradedVersion.ParseSubscriptionOwnerTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorV2PlusUpgradedVersionConfigSet) Topic() common.Hash { + return common.HexToHash("0x95cb2ddab6d2297c29a4861691de69b3969c464aa4a9c44258b101ff02ff375a") +} + +func (VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) Topic() common.Hash { + return common.HexToHash("0xb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af01625") +} + +func (VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600") +} + +func (VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) Topic() common.Hash { + return common.HexToHash("0xd63ca8cb945956747ee69bfdc3ea754c24a4caf7418db70e46052f7850be4187") +} + +func (VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) Topic() common.Hash { + return common.HexToHash("0x4aed7c8eed0496c8c19ea2681fcca25741c1602342e38b045d9f1e8e905d2e9c") +} + +func (VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) Topic() common.Hash { + return common.HexToHash("0xc9583fd3afa3d7f16eb0b88d0268e7d05c09bafa4b21e092cbd1320e1bc8089d") +} + +func (VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x49580fdfd9497e1ed5c1b1cec0495087ae8e3f1267470ec2fb015db32e3d6aa7") +} + +func (VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) Topic() common.Hash { + return common.HexToHash("0xeb0e3652e0f44f417695e6e90f2f42c99b65cd7169074c5a654b16b9748c3a4e") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0x8c74ce8b8cf87f5eb001275c8be27eb34ea2b62bfab6814fcc62192bb63e81c4") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e1") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a7") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0x1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative) Topic() common.Hash { + return common.HexToHash("0x7603b205d03651ee812f803fccde89f1012e545a9c99f0abfea9cedd0fd8e902") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a1") +} + +func (VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0xd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c9386") +} + +func (_VRFCoordinatorV2PlusUpgradedVersion *VRFCoordinatorV2PlusUpgradedVersion) Address() common.Address { + return _VRFCoordinatorV2PlusUpgradedVersion.address +} + +type VRFCoordinatorV2PlusUpgradedVersionInterface interface { + BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLINATIVEFEED(opts *bind.CallOpts) (common.Address, error) + + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) + + MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) + + GetActiveSubscriptionIds(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + + GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) + + GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) + + HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) + + MigrationVersion(opts *bind.CallOpts) (uint8, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PendingRequestExists(opts *bind.CallOpts, subId *big.Int) (bool, error) + + SConfig(opts *bind.CallOpts) (SConfig, + + error) + + SCurrentSubNonce(opts *bind.CallOpts) (uint64, error) + + SProvingKeyHashes(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) + + SRequestCommitments(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) + + STotalBalance(opts *bind.CallOpts) (*big.Int, error) + + STotalNativeBalance(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + FulfillRandomWords(opts *bind.TransactOpts, proof VRFProof, rc VRFCoordinatorV2PlusUpgradedVersionRequestCommitment, arg2 bool) (*types.Transaction, error) + + FundSubscriptionWithNative(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + Migrate(opts *bind.TransactOpts, subId *big.Int, newCoordinator common.Address) (*types.Transaction, error) + + OnMigration(opts *bind.TransactOpts, encodedData []byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + OwnerCancelSubscription(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RecoverNativeFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) + + RegisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, req VRFV2PlusClientRandomWordsRequest) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeNativePPM uint32, fulfillmentFlatFeeLinkDiscountPPM uint32, nativePremiumPercentage uint8, linkPremiumPercentage uint8) (*types.Transaction, error) + + SetPLIAndPLINativeFeed(opts *bind.TransactOpts, link common.Address, linkNativeFeed common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + + WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionConfigSet, error) + + FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegisteredIterator, error) + + WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered) (event.Subscription, error) + + ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionCoordinatorRegistered, error) + + FilterFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionFundsRecoveredIterator, error) + + WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionFundsRecovered) (event.Subscription, error) + + ParseFundsRecovered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionFundsRecovered, error) + + FilterMigrationCompleted(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionMigrationCompletedIterator, error) + + WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) (event.Subscription, error) + + ParseMigrationCompleted(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted, error) + + FilterNativeFundsRecovered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecoveredIterator, error) + + WatchNativeFundsRecovered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered) (event.Subscription, error) + + ParseNativeFundsRecovered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionNativeFundsRecovered, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionOwnershipTransferred, error) + + FilterProvingKeyRegistered(opts *bind.FilterOpts) (*VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegisteredIterator, error) + + WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered) (event.Subscription, error) + + ParseProvingKeyRegistered(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionProvingKeyRegistered, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int, subID []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, requestId []*big.Int, subID []*big.Int) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) + + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequestedIterator, error) + + WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, keyHash [][32]byte, subId []*big.Int, sender []common.Address) (event.Subscription, error) + + ParseRandomWordsRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFunded, error) + + FilterSubscriptionFundedWithNative(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNativeIterator, error) + + WatchSubscriptionFundedWithNative(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionFundedWithNative(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionFundedWithNative, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorV2PlusUpgradedVersionSubscriptionOwnerTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_proxy_admin/vrfv2_proxy_admin.go b/core/gethwrappers/generated/vrfv2_proxy_admin/vrfv2_proxy_admin.go new file mode 100644 index 00000000..2837ca04 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_proxy_admin/vrfv2_proxy_admin.go @@ -0,0 +1,480 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_proxy_admin + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2ProxyAdminMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"contractITransparentUpgradeableProxy\",\"name\":\"proxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"changeProxyAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractITransparentUpgradeableProxy\",\"name\":\"proxy\",\"type\":\"address\"}],\"name\":\"getProxyAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractITransparentUpgradeableProxy\",\"name\":\"proxy\",\"type\":\"address\"}],\"name\":\"getProxyImplementation\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractITransparentUpgradeableProxy\",\"name\":\"proxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"upgrade\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractITransparentUpgradeableProxy\",\"name\":\"proxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"upgradeAndCall\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061001a3361001f565b61006f565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6108438061007e6000396000f3fe60806040526004361061007b5760003560e01c80639623609d1161004e5780639623609d1461012b57806399a88ec41461013e578063f2fde38b1461015e578063f3b7dead1461017e57600080fd5b8063204e1c7a14610080578063715018a6146100c95780637eff275e146100e05780638da5cb5b14610100575b600080fd5b34801561008c57600080fd5b506100a061009b3660046105e6565b61019e565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b3480156100d557600080fd5b506100de610255565b005b3480156100ec57600080fd5b506100de6100fb366004610627565b610269565b34801561010c57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff166100a0565b6100de610139366004610660565b6102f7565b34801561014a57600080fd5b506100de610159366004610627565b61038c565b34801561016a57600080fd5b506100de6101793660046105e6565b6103e8565b34801561018a57600080fd5b506100a06101993660046105e6565b6104a4565b60008060008373ffffffffffffffffffffffffffffffffffffffff166040516101ea907f5c60da1b00000000000000000000000000000000000000000000000000000000815260040190565b600060405180830381855afa9150503d8060008114610225576040519150601f19603f3d011682016040523d82523d6000602084013e61022a565b606091505b50915091508161023957600080fd5b8080602001905181019061024d919061060a565b949350505050565b61025d6104f0565b6102676000610571565b565b6102716104f0565b6040517f8f28397000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690638f283970906024015b600060405180830381600087803b1580156102db57600080fd5b505af11580156102ef573d6000803e3d6000fd5b505050505050565b6102ff6104f0565b6040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff841690634f1ef2869034906103559086908690600401610754565b6000604051808303818588803b15801561036e57600080fd5b505af1158015610382573d6000803e3d6000fd5b5050505050505050565b6103946104f0565b6040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8281166004830152831690633659cfe6906024016102c1565b6103f06104f0565b73ffffffffffffffffffffffffffffffffffffffff8116610498576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b6104a181610571565b50565b60008060008373ffffffffffffffffffffffffffffffffffffffff166040516101ea907ff851a44000000000000000000000000000000000000000000000000000000000815260040190565b60005473ffffffffffffffffffffffffffffffffffffffff163314610267576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e6572604482015260640161048f565b6000805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6000602082840312156105f857600080fd5b813561060381610814565b9392505050565b60006020828403121561061c57600080fd5b815161060381610814565b6000806040838503121561063a57600080fd5b823561064581610814565b9150602083013561065581610814565b809150509250929050565b60008060006060848603121561067557600080fd5b833561068081610814565b9250602084013561069081610814565b9150604084013567ffffffffffffffff808211156106ad57600080fd5b818601915086601f8301126106c157600080fd5b8135818111156106d3576106d36107e5565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715610719576107196107e5565b8160405282815289602084870101111561073257600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b73ffffffffffffffffffffffffffffffffffffffff8316815260006020604081840152835180604085015260005b8181101561079e57858101830151858201606001528201610782565b818111156107b0576000606083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201606001949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff811681146104a157600080fdfea164736f6c6343000806000a", +} + +var VRFV2ProxyAdminABI = VRFV2ProxyAdminMetaData.ABI + +var VRFV2ProxyAdminBin = VRFV2ProxyAdminMetaData.Bin + +func DeployVRFV2ProxyAdmin(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFV2ProxyAdmin, error) { + parsed, err := VRFV2ProxyAdminMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2ProxyAdminBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2ProxyAdmin{address: address, abi: *parsed, VRFV2ProxyAdminCaller: VRFV2ProxyAdminCaller{contract: contract}, VRFV2ProxyAdminTransactor: VRFV2ProxyAdminTransactor{contract: contract}, VRFV2ProxyAdminFilterer: VRFV2ProxyAdminFilterer{contract: contract}}, nil +} + +type VRFV2ProxyAdmin struct { + address common.Address + abi abi.ABI + VRFV2ProxyAdminCaller + VRFV2ProxyAdminTransactor + VRFV2ProxyAdminFilterer +} + +type VRFV2ProxyAdminCaller struct { + contract *bind.BoundContract +} + +type VRFV2ProxyAdminTransactor struct { + contract *bind.BoundContract +} + +type VRFV2ProxyAdminFilterer struct { + contract *bind.BoundContract +} + +type VRFV2ProxyAdminSession struct { + Contract *VRFV2ProxyAdmin + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2ProxyAdminCallerSession struct { + Contract *VRFV2ProxyAdminCaller + CallOpts bind.CallOpts +} + +type VRFV2ProxyAdminTransactorSession struct { + Contract *VRFV2ProxyAdminTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2ProxyAdminRaw struct { + Contract *VRFV2ProxyAdmin +} + +type VRFV2ProxyAdminCallerRaw struct { + Contract *VRFV2ProxyAdminCaller +} + +type VRFV2ProxyAdminTransactorRaw struct { + Contract *VRFV2ProxyAdminTransactor +} + +func NewVRFV2ProxyAdmin(address common.Address, backend bind.ContractBackend) (*VRFV2ProxyAdmin, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2ProxyAdminABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2ProxyAdmin(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2ProxyAdmin{address: address, abi: abi, VRFV2ProxyAdminCaller: VRFV2ProxyAdminCaller{contract: contract}, VRFV2ProxyAdminTransactor: VRFV2ProxyAdminTransactor{contract: contract}, VRFV2ProxyAdminFilterer: VRFV2ProxyAdminFilterer{contract: contract}}, nil +} + +func NewVRFV2ProxyAdminCaller(address common.Address, caller bind.ContractCaller) (*VRFV2ProxyAdminCaller, error) { + contract, err := bindVRFV2ProxyAdmin(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2ProxyAdminCaller{contract: contract}, nil +} + +func NewVRFV2ProxyAdminTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2ProxyAdminTransactor, error) { + contract, err := bindVRFV2ProxyAdmin(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2ProxyAdminTransactor{contract: contract}, nil +} + +func NewVRFV2ProxyAdminFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2ProxyAdminFilterer, error) { + contract, err := bindVRFV2ProxyAdmin(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2ProxyAdminFilterer{contract: contract}, nil +} + +func bindVRFV2ProxyAdmin(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2ProxyAdminMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2ProxyAdmin.Contract.VRFV2ProxyAdminCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.VRFV2ProxyAdminTransactor.contract.Transfer(opts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.VRFV2ProxyAdminTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2ProxyAdmin.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.contract.Transfer(opts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCaller) GetProxyAdmin(opts *bind.CallOpts, proxy common.Address) (common.Address, error) { + var out []interface{} + err := _VRFV2ProxyAdmin.contract.Call(opts, &out, "getProxyAdmin", proxy) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) GetProxyAdmin(proxy common.Address) (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.GetProxyAdmin(&_VRFV2ProxyAdmin.CallOpts, proxy) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCallerSession) GetProxyAdmin(proxy common.Address) (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.GetProxyAdmin(&_VRFV2ProxyAdmin.CallOpts, proxy) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCaller) GetProxyImplementation(opts *bind.CallOpts, proxy common.Address) (common.Address, error) { + var out []interface{} + err := _VRFV2ProxyAdmin.contract.Call(opts, &out, "getProxyImplementation", proxy) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) GetProxyImplementation(proxy common.Address) (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.GetProxyImplementation(&_VRFV2ProxyAdmin.CallOpts, proxy) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCallerSession) GetProxyImplementation(proxy common.Address) (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.GetProxyImplementation(&_VRFV2ProxyAdmin.CallOpts, proxy) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2ProxyAdmin.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) Owner() (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.Owner(&_VRFV2ProxyAdmin.CallOpts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminCallerSession) Owner() (common.Address, error) { + return _VRFV2ProxyAdmin.Contract.Owner(&_VRFV2ProxyAdmin.CallOpts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactor) ChangeProxyAdmin(opts *bind.TransactOpts, proxy common.Address, newAdmin common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.contract.Transact(opts, "changeProxyAdmin", proxy, newAdmin) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) ChangeProxyAdmin(proxy common.Address, newAdmin common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.ChangeProxyAdmin(&_VRFV2ProxyAdmin.TransactOpts, proxy, newAdmin) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorSession) ChangeProxyAdmin(proxy common.Address, newAdmin common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.ChangeProxyAdmin(&_VRFV2ProxyAdmin.TransactOpts, proxy, newAdmin) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.contract.Transact(opts, "renounceOwnership") +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) RenounceOwnership() (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.RenounceOwnership(&_VRFV2ProxyAdmin.TransactOpts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorSession) RenounceOwnership() (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.RenounceOwnership(&_VRFV2ProxyAdmin.TransactOpts) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.contract.Transact(opts, "transferOwnership", newOwner) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.TransferOwnership(&_VRFV2ProxyAdmin.TransactOpts, newOwner) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.TransferOwnership(&_VRFV2ProxyAdmin.TransactOpts, newOwner) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactor) Upgrade(opts *bind.TransactOpts, proxy common.Address, implementation common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.contract.Transact(opts, "upgrade", proxy, implementation) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) Upgrade(proxy common.Address, implementation common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.Upgrade(&_VRFV2ProxyAdmin.TransactOpts, proxy, implementation) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorSession) Upgrade(proxy common.Address, implementation common.Address) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.Upgrade(&_VRFV2ProxyAdmin.TransactOpts, proxy, implementation) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactor) UpgradeAndCall(opts *bind.TransactOpts, proxy common.Address, implementation common.Address, data []byte) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.contract.Transact(opts, "upgradeAndCall", proxy, implementation, data) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminSession) UpgradeAndCall(proxy common.Address, implementation common.Address, data []byte) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.UpgradeAndCall(&_VRFV2ProxyAdmin.TransactOpts, proxy, implementation, data) +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminTransactorSession) UpgradeAndCall(proxy common.Address, implementation common.Address, data []byte) (*types.Transaction, error) { + return _VRFV2ProxyAdmin.Contract.UpgradeAndCall(&_VRFV2ProxyAdmin.TransactOpts, proxy, implementation, data) +} + +type VRFV2ProxyAdminOwnershipTransferredIterator struct { + Event *VRFV2ProxyAdminOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2ProxyAdminOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2ProxyAdminOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2ProxyAdminOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2ProxyAdminOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2ProxyAdminOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2ProxyAdminOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*VRFV2ProxyAdminOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _VRFV2ProxyAdmin.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &VRFV2ProxyAdminOwnershipTransferredIterator{contract: _VRFV2ProxyAdmin.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2ProxyAdminOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _VRFV2ProxyAdmin.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2ProxyAdminOwnershipTransferred) + if err := _VRFV2ProxyAdmin.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdminFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2ProxyAdminOwnershipTransferred, error) { + event := new(VRFV2ProxyAdminOwnershipTransferred) + if err := _VRFV2ProxyAdmin.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdmin) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2ProxyAdmin.abi.Events["OwnershipTransferred"].ID: + return _VRFV2ProxyAdmin.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2ProxyAdminOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2ProxyAdmin *VRFV2ProxyAdmin) Address() common.Address { + return _VRFV2ProxyAdmin.address +} + +type VRFV2ProxyAdminInterface interface { + GetProxyAdmin(opts *bind.CallOpts, proxy common.Address) (common.Address, error) + + GetProxyImplementation(opts *bind.CallOpts, proxy common.Address) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + ChangeProxyAdmin(opts *bind.TransactOpts, proxy common.Address, newAdmin common.Address) (*types.Transaction, error) + + RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + Upgrade(opts *bind.TransactOpts, proxy common.Address, implementation common.Address) (*types.Transaction, error) + + UpgradeAndCall(opts *bind.TransactOpts, proxy common.Address, implementation common.Address, data []byte) (*types.Transaction, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*VRFV2ProxyAdminOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2ProxyAdminOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2ProxyAdminOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_reverting_example/vrfv2_reverting_example.go b/core/gethwrappers/generated/vrfv2_reverting_example/vrfv2_reverting_example.go new file mode 100644 index 00000000..facfc931 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_reverting_example/vrfv2_reverting_example.go @@ -0,0 +1,344 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_reverting_example + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2RevertingExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a060405234801561001057600080fd5b50604051610da6380380610da683398101604081905261002f9161008e565b6001600160601b0319606083901b16608052600280546001600160a01b03199081166001600160a01b0394851617909155600380549290931691161790556100c1565b80516001600160a01b038116811461008957600080fd5b919050565b600080604083850312156100a157600080fd5b6100aa83610072565b91506100b860208401610072565b90509250929050565b60805160601c610cc06100e66000396000818161028001526102e80152610cc06000f3fe608060405234801561001057600080fd5b50600436106100a35760003560e01c8063706da1ca11610076578063e89e106a1161005b578063e89e106a14610161578063f08c5daa1461016a578063f6eaffc81461017357600080fd5b8063706da1ca14610109578063cf62c8ab1461014e57600080fd5b8063177b9692146100a85780631fe543e3146100ce5780632fa4e442146100e357806336bfffed146100f6575b600080fd5b6100bb6100b6366004610939565b610186565b6040519081526020015b60405180910390f35b6100e16100dc3660046109d4565b610268565b005b6100e16100f1366004610a95565b610328565b6100e1610104366004610851565b610488565b6003546101359074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016100c5565b6100e161015c366004610a95565b610610565b6100bb60015481565b6100bb60045481565b6100bb6101813660046109a2565b610817565b6002546040517f5d3b1d300000000000000000000000000000000000000000000000000000000081526004810187905267ffffffffffffffff8616602482015261ffff8516604482015263ffffffff80851660648301528316608482015260009173ffffffffffffffffffffffffffffffffffffffff1690635d3b1d309060a401602060405180830381600087803b15801561022157600080fd5b505af1158015610235573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061025991906109bb565b60018190559695505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461031a576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b6103248282600080fd5b5050565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff166103b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f74207365740000000000000000000000000000000000000000006044820152606401610311565b6003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b815260040161043693929190610ac3565b602060405180830381600087803b15801561045057600080fd5b505af1158015610464573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103249190610910565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff16610513576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f7420736574000000000000000000000000000000000000006044820152606401610311565b60005b815181101561032457600254600354835173ffffffffffffffffffffffffffffffffffffffff90921691637341c10c9174010000000000000000000000000000000000000000900467ffffffffffffffff169085908590811061057b5761057b610c3c565b60200260200101516040518363ffffffff1660e01b81526004016105cb92919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b1580156105e557600080fd5b505af11580156105f9573d6000803e3d6000fd5b50505050808061060890610bdc565b915050610516565b60035474010000000000000000000000000000000000000000900467ffffffffffffffff166103b357600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b1580156106a357600080fd5b505af11580156106b7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106db9190610a78565b600380547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff938416810291909117918290556002546040517f7341c10c00000000000000000000000000000000000000000000000000000000815291909204909216600483015230602483015273ffffffffffffffffffffffffffffffffffffffff1690637341c10c90604401600060405180830381600087803b1580156107a257600080fd5b505af11580156107b6573d6000803e3d6000fd5b50506003546002546040805174010000000000000000000000000000000000000000840467ffffffffffffffff16602082015273ffffffffffffffffffffffffffffffffffffffff9384169550634000aea094509290911691859101610409565b6000818154811061082757600080fd5b600091825260209091200154905081565b803563ffffffff8116811461084c57600080fd5b919050565b6000602080838503121561086457600080fd5b823567ffffffffffffffff81111561087b57600080fd5b8301601f8101851361088c57600080fd5b803561089f61089a82610bb8565b610b69565b80828252848201915084840188868560051b87010111156108bf57600080fd5b60009450845b8481101561090257813573ffffffffffffffffffffffffffffffffffffffff811681146108f0578687fd5b845292860192908601906001016108c5565b509098975050505050505050565b60006020828403121561092257600080fd5b8151801515811461093257600080fd5b9392505050565b600080600080600060a0868803121561095157600080fd5b85359450602086013561096381610c9a565b9350604086013561ffff8116811461097a57600080fd5b925061098860608701610838565b915061099660808701610838565b90509295509295909350565b6000602082840312156109b457600080fd5b5035919050565b6000602082840312156109cd57600080fd5b5051919050565b600080604083850312156109e757600080fd5b8235915060208084013567ffffffffffffffff811115610a0657600080fd5b8401601f81018613610a1757600080fd5b8035610a2561089a82610bb8565b80828252848201915084840189868560051b8701011115610a4557600080fd5b600094505b83851015610a68578035835260019490940193918501918501610a4a565b5080955050505050509250929050565b600060208284031215610a8a57600080fd5b815161093281610c9a565b600060208284031215610aa757600080fd5b81356bffffffffffffffffffffffff8116811461093257600080fd5b73ffffffffffffffffffffffffffffffffffffffff84168152600060206bffffffffffffffffffffffff85168184015260606040840152835180606085015260005b81811015610b2157858101830151858201608001528201610b05565b81811115610b33576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610bb057610bb0610c6b565b604052919050565b600067ffffffffffffffff821115610bd257610bd2610c6b565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610c35577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b67ffffffffffffffff81168114610cb057600080fd5b5056fea164736f6c6343000806000a", +} + +var VRFV2RevertingExampleABI = VRFV2RevertingExampleMetaData.ABI + +var VRFV2RevertingExampleBin = VRFV2RevertingExampleMetaData.Bin + +func DeployVRFV2RevertingExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFV2RevertingExample, error) { + parsed, err := VRFV2RevertingExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2RevertingExampleBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2RevertingExample{address: address, abi: *parsed, VRFV2RevertingExampleCaller: VRFV2RevertingExampleCaller{contract: contract}, VRFV2RevertingExampleTransactor: VRFV2RevertingExampleTransactor{contract: contract}, VRFV2RevertingExampleFilterer: VRFV2RevertingExampleFilterer{contract: contract}}, nil +} + +type VRFV2RevertingExample struct { + address common.Address + abi abi.ABI + VRFV2RevertingExampleCaller + VRFV2RevertingExampleTransactor + VRFV2RevertingExampleFilterer +} + +type VRFV2RevertingExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2RevertingExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2RevertingExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2RevertingExampleSession struct { + Contract *VRFV2RevertingExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2RevertingExampleCallerSession struct { + Contract *VRFV2RevertingExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2RevertingExampleTransactorSession struct { + Contract *VRFV2RevertingExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2RevertingExampleRaw struct { + Contract *VRFV2RevertingExample +} + +type VRFV2RevertingExampleCallerRaw struct { + Contract *VRFV2RevertingExampleCaller +} + +type VRFV2RevertingExampleTransactorRaw struct { + Contract *VRFV2RevertingExampleTransactor +} + +func NewVRFV2RevertingExample(address common.Address, backend bind.ContractBackend) (*VRFV2RevertingExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2RevertingExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2RevertingExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2RevertingExample{address: address, abi: abi, VRFV2RevertingExampleCaller: VRFV2RevertingExampleCaller{contract: contract}, VRFV2RevertingExampleTransactor: VRFV2RevertingExampleTransactor{contract: contract}, VRFV2RevertingExampleFilterer: VRFV2RevertingExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2RevertingExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2RevertingExampleCaller, error) { + contract, err := bindVRFV2RevertingExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2RevertingExampleCaller{contract: contract}, nil +} + +func NewVRFV2RevertingExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2RevertingExampleTransactor, error) { + contract, err := bindVRFV2RevertingExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2RevertingExampleTransactor{contract: contract}, nil +} + +func NewVRFV2RevertingExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2RevertingExampleFilterer, error) { + contract, err := bindVRFV2RevertingExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2RevertingExampleFilterer{contract: contract}, nil +} + +func bindVRFV2RevertingExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2RevertingExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2RevertingExample.Contract.VRFV2RevertingExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.VRFV2RevertingExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.VRFV2RevertingExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2RevertingExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2RevertingExample.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) SGasAvailable() (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SGasAvailable(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCallerSession) SGasAvailable() (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SGasAvailable(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2RevertingExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SRandomWords(&_VRFV2RevertingExample.CallOpts, arg0) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SRandomWords(&_VRFV2RevertingExample.CallOpts, arg0) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2RevertingExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) SRequestId() (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SRequestId(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFV2RevertingExample.Contract.SRequestId(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCaller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFV2RevertingExample.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) SSubId() (uint64, error) { + return _VRFV2RevertingExample.Contract.SSubId(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleCallerSession) SSubId() (uint64, error) { + return _VRFV2RevertingExample.Contract.SSubId(&_VRFV2RevertingExample.CallOpts) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.CreateSubscriptionAndFund(&_VRFV2RevertingExample.TransactOpts, amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.CreateSubscriptionAndFund(&_VRFV2RevertingExample.TransactOpts, amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.RawFulfillRandomWords(&_VRFV2RevertingExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.RawFulfillRandomWords(&_VRFV2RevertingExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2RevertingExample.contract.Transact(opts, "requestRandomness", keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.RequestRandomness(&_VRFV2RevertingExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorSession) RequestRandomness(keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.RequestRandomness(&_VRFV2RevertingExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.TopUpSubscription(&_VRFV2RevertingExample.TransactOpts, amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.TopUpSubscription(&_VRFV2RevertingExample.TransactOpts, amount) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFV2RevertingExample.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.UpdateSubscription(&_VRFV2RevertingExample.TransactOpts, consumers) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExampleTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2RevertingExample.Contract.UpdateSubscription(&_VRFV2RevertingExample.TransactOpts, consumers) +} + +func (_VRFV2RevertingExample *VRFV2RevertingExample) Address() common.Address { + return _VRFV2RevertingExample.address +} + +type VRFV2RevertingExampleInterface interface { + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy/vrfv2_transparent_upgradeable_proxy.go b/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy/vrfv2_transparent_upgradeable_proxy.go new file mode 100644 index 00000000..c370b19e --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy/vrfv2_transparent_upgradeable_proxy.go @@ -0,0 +1,626 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_transparent_upgradeable_proxy + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2TransparentUpgradeableProxyMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_logic\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"admin_\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"stateMutability\":\"payable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"previousAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"AdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"beacon\",\"type\":\"address\"}],\"name\":\"BeaconUpgraded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"Upgraded\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x6080604052604051620011863803806200118683398101604081905262000026916200045e565b8282828281620000398282600062000053565b506200004790508262000090565b505050505050620005d6565b6200005e83620000eb565b6000825111806200006c5750805b156200008b576200008983836200012d60201b620002bd1760201c565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f620000bb6200015c565b604080516001600160a01b03928316815291841660208301520160405180910390a1620000e88162000195565b50565b620000f6816200024a565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b60606200015583836040518060600160405280602781526020016200115f60279139620002fe565b9392505050565b6000620001866000805160206200113f83398151915260001b6200037d60201b620002e91760201c565b546001600160a01b0316919050565b6001600160a01b038116620002005760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b60648201526084015b60405180910390fd5b80620002296000805160206200113f83398151915260001b6200037d60201b620002e91760201c565b80546001600160a01b0319166001600160a01b039290921691909117905550565b62000260816200038060201b620002ec1760201c565b620002c45760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401620001f7565b80620002297f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc60001b6200037d60201b620002e91760201c565b6060600080856001600160a01b0316856040516200031d91906200053e565b600060405180830381855af49150503d80600081146200035a576040519150601f19603f3d011682016040523d82523d6000602084013e6200035f565b606091505b50909250905062000373868383876200038f565b9695505050505050565b90565b6001600160a01b03163b151590565b6060831562000400578251620003f8576001600160a01b0385163b620003f85760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401620001f7565b50816200040c565b6200040c838362000414565b949350505050565b815115620004255781518083602001fd5b8060405162461bcd60e51b8152600401620001f791906200055c565b80516001600160a01b03811681146200045957600080fd5b919050565b6000806000606084860312156200047457600080fd5b6200047f8462000441565b92506200048f6020850162000441565b60408501519092506001600160401b0380821115620004ad57600080fd5b818601915086601f830112620004c257600080fd5b815181811115620004d757620004d7620005c0565b604051601f8201601f19908116603f01168101908382118183101715620005025762000502620005c0565b816040528281528960208487010111156200051c57600080fd5b6200052f83602083016020880162000591565b80955050505050509250925092565b600082516200055281846020870162000591565b9190910192915050565b60208152600082518060208401526200057d81604085016020870162000591565b601f01601f19169190910160400192915050565b60005b83811015620005ae57818101518382015260200162000594565b83811115620000895750506000910152565b634e487b7160e01b600052604160045260246000fd5b610b5980620005e66000396000f3fe60806040523661001357610011610017565b005b6100115b61001f610308565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614156102b35760607fffffffff00000000000000000000000000000000000000000000000000000000600035167f3659cfe6000000000000000000000000000000000000000000000000000000008114156100b0576100a9610348565b91506102ab565b7fffffffff0000000000000000000000000000000000000000000000000000000081167f4f1ef286000000000000000000000000000000000000000000000000000000001415610102576100a961039f565b7fffffffff0000000000000000000000000000000000000000000000000000000081167f8f283970000000000000000000000000000000000000000000000000000000001415610154576100a96103e5565b7fffffffff0000000000000000000000000000000000000000000000000000000081167ff851a4400000000000000000000000000000000000000000000000000000000014156101a6576100a9610416565b7fffffffff0000000000000000000000000000000000000000000000000000000081167f5c60da1b0000000000000000000000000000000000000000000000000000000014156101f8576100a9610463565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f7879207461726760648201527f6574000000000000000000000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b815160208301f35b6102bb610477565b565b60606102e28383604051806060016040528060278152602001610b2660279139610487565b9392505050565b90565b73ffffffffffffffffffffffffffffffffffffffff163b151590565b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b5473ffffffffffffffffffffffffffffffffffffffff16919050565b606061035261050c565b60006103613660048184610aa0565b81019061036e9190610938565b905061038b81604051806020016040528060008152506000610517565b505060408051602081019091526000815290565b60606000806103b13660048184610aa0565b8101906103be9190610953565b915091506103ce82826001610517565b604051806020016040528060008152509250505090565b60606103ef61050c565b60006103fe3660048184610aa0565b81019061040b9190610938565b905061038b81610543565b606061042061050c565b600061042a610308565b6040805173ffffffffffffffffffffffffffffffffffffffff831660208201529192500160405160208183030381529060405291505090565b606061046d61050c565b600061042a6105a7565b6102bb6104826105a7565b6105b6565b60606000808573ffffffffffffffffffffffffffffffffffffffff16856040516104b19190610a33565b600060405180830381855af49150503d80600081146104ec576040519150601f19603f3d011682016040523d82523d6000602084013e6104f1565b606091505b5091509150610502868383876105da565b9695505050505050565b34156102bb57600080fd5b6105208361067f565b60008251118061052d5750805b1561053e5761053c83836102bd565b505b505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f61056c610308565b6040805173ffffffffffffffffffffffffffffffffffffffff928316815291841660208301520160405180910390a16105a4816106cc565b50565b60006105b16107d8565b905090565b3660008037600080366000845af43d6000803e8080156105d5573d6000f35b3d6000fd5b6060831561066d5782516106665773ffffffffffffffffffffffffffffffffffffffff85163b610666576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e747261637400000060448201526064016102a2565b5081610677565b6106778383610800565b949350505050565b61068881610844565b60405173ffffffffffffffffffffffffffffffffffffffff8216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b73ffffffffffffffffffffffffffffffffffffffff811661076f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016102a2565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9290921691909117905550565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61032c565b8151156108105781518083602001fd5b806040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102a29190610a4f565b73ffffffffffffffffffffffffffffffffffffffff81163b6108e8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201527f6f74206120636f6e74726163740000000000000000000000000000000000000060648201526084016102a2565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc610792565b803573ffffffffffffffffffffffffffffffffffffffff8116811461093357600080fd5b919050565b60006020828403121561094a57600080fd5b6102e28261090f565b6000806040838503121561096657600080fd5b61096f8361090f565b9150602083013567ffffffffffffffff8082111561098c57600080fd5b818501915085601f8301126109a057600080fd5b8135818111156109b2576109b2610af6565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156109f8576109f8610af6565b81604052828152886020848701011115610a1157600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b60008251610a45818460208701610aca565b9190910192915050565b6020815260008251806020840152610a6e816040850160208701610aca565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b60008085851115610ab057600080fd5b83861115610abd57600080fd5b5050820193919092039150565b60005b83811015610ae5578181015183820152602001610acd565b8381111561053c5750506000910152565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a164736f6c6343000806000ab53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", +} + +var VRFV2TransparentUpgradeableProxyABI = VRFV2TransparentUpgradeableProxyMetaData.ABI + +var VRFV2TransparentUpgradeableProxyBin = VRFV2TransparentUpgradeableProxyMetaData.Bin + +func DeployVRFV2TransparentUpgradeableProxy(auth *bind.TransactOpts, backend bind.ContractBackend, _logic common.Address, admin_ common.Address, _data []byte) (common.Address, *types.Transaction, *VRFV2TransparentUpgradeableProxy, error) { + parsed, err := VRFV2TransparentUpgradeableProxyMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2TransparentUpgradeableProxyBin), backend, _logic, admin_, _data) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2TransparentUpgradeableProxy{address: address, abi: *parsed, VRFV2TransparentUpgradeableProxyCaller: VRFV2TransparentUpgradeableProxyCaller{contract: contract}, VRFV2TransparentUpgradeableProxyTransactor: VRFV2TransparentUpgradeableProxyTransactor{contract: contract}, VRFV2TransparentUpgradeableProxyFilterer: VRFV2TransparentUpgradeableProxyFilterer{contract: contract}}, nil +} + +type VRFV2TransparentUpgradeableProxy struct { + address common.Address + abi abi.ABI + VRFV2TransparentUpgradeableProxyCaller + VRFV2TransparentUpgradeableProxyTransactor + VRFV2TransparentUpgradeableProxyFilterer +} + +type VRFV2TransparentUpgradeableProxyCaller struct { + contract *bind.BoundContract +} + +type VRFV2TransparentUpgradeableProxyTransactor struct { + contract *bind.BoundContract +} + +type VRFV2TransparentUpgradeableProxyFilterer struct { + contract *bind.BoundContract +} + +type VRFV2TransparentUpgradeableProxySession struct { + Contract *VRFV2TransparentUpgradeableProxy + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2TransparentUpgradeableProxyCallerSession struct { + Contract *VRFV2TransparentUpgradeableProxyCaller + CallOpts bind.CallOpts +} + +type VRFV2TransparentUpgradeableProxyTransactorSession struct { + Contract *VRFV2TransparentUpgradeableProxyTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2TransparentUpgradeableProxyRaw struct { + Contract *VRFV2TransparentUpgradeableProxy +} + +type VRFV2TransparentUpgradeableProxyCallerRaw struct { + Contract *VRFV2TransparentUpgradeableProxyCaller +} + +type VRFV2TransparentUpgradeableProxyTransactorRaw struct { + Contract *VRFV2TransparentUpgradeableProxyTransactor +} + +func NewVRFV2TransparentUpgradeableProxy(address common.Address, backend bind.ContractBackend) (*VRFV2TransparentUpgradeableProxy, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2TransparentUpgradeableProxyABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2TransparentUpgradeableProxy(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxy{address: address, abi: abi, VRFV2TransparentUpgradeableProxyCaller: VRFV2TransparentUpgradeableProxyCaller{contract: contract}, VRFV2TransparentUpgradeableProxyTransactor: VRFV2TransparentUpgradeableProxyTransactor{contract: contract}, VRFV2TransparentUpgradeableProxyFilterer: VRFV2TransparentUpgradeableProxyFilterer{contract: contract}}, nil +} + +func NewVRFV2TransparentUpgradeableProxyCaller(address common.Address, caller bind.ContractCaller) (*VRFV2TransparentUpgradeableProxyCaller, error) { + contract, err := bindVRFV2TransparentUpgradeableProxy(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyCaller{contract: contract}, nil +} + +func NewVRFV2TransparentUpgradeableProxyTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2TransparentUpgradeableProxyTransactor, error) { + contract, err := bindVRFV2TransparentUpgradeableProxy(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyTransactor{contract: contract}, nil +} + +func NewVRFV2TransparentUpgradeableProxyFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2TransparentUpgradeableProxyFilterer, error) { + contract, err := bindVRFV2TransparentUpgradeableProxy(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyFilterer{contract: contract}, nil +} + +func bindVRFV2TransparentUpgradeableProxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2TransparentUpgradeableProxyMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2TransparentUpgradeableProxy.Contract.VRFV2TransparentUpgradeableProxyCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.VRFV2TransparentUpgradeableProxyTransactor.contract.Transfer(opts) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.VRFV2TransparentUpgradeableProxyTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2TransparentUpgradeableProxy.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.contract.Transfer(opts) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.contract.RawTransact(opts, calldata) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.Fallback(&_VRFV2TransparentUpgradeableProxy.TransactOpts, calldata) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.Fallback(&_VRFV2TransparentUpgradeableProxy.TransactOpts, calldata) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.contract.RawTransact(opts, nil) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxySession) Receive() (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.Receive(&_VRFV2TransparentUpgradeableProxy.TransactOpts) +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyTransactorSession) Receive() (*types.Transaction, error) { + return _VRFV2TransparentUpgradeableProxy.Contract.Receive(&_VRFV2TransparentUpgradeableProxy.TransactOpts) +} + +type VRFV2TransparentUpgradeableProxyAdminChangedIterator struct { + Event *VRFV2TransparentUpgradeableProxyAdminChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2TransparentUpgradeableProxyAdminChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyAdminChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyAdminChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2TransparentUpgradeableProxyAdminChangedIterator) Error() error { + return it.fail +} + +func (it *VRFV2TransparentUpgradeableProxyAdminChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2TransparentUpgradeableProxyAdminChanged struct { + PreviousAdmin common.Address + NewAdmin common.Address + Raw types.Log +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) FilterAdminChanged(opts *bind.FilterOpts) (*VRFV2TransparentUpgradeableProxyAdminChangedIterator, error) { + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.FilterLogs(opts, "AdminChanged") + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyAdminChangedIterator{contract: _VRFV2TransparentUpgradeableProxy.contract, event: "AdminChanged", logs: logs, sub: sub}, nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) WatchAdminChanged(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyAdminChanged) (event.Subscription, error) { + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.WatchLogs(opts, "AdminChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2TransparentUpgradeableProxyAdminChanged) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "AdminChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) ParseAdminChanged(log types.Log) (*VRFV2TransparentUpgradeableProxyAdminChanged, error) { + event := new(VRFV2TransparentUpgradeableProxyAdminChanged) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "AdminChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator struct { + Event *VRFV2TransparentUpgradeableProxyBeaconUpgraded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyBeaconUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyBeaconUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator) Error() error { + return it.fail +} + +func (it *VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2TransparentUpgradeableProxyBeaconUpgraded struct { + Beacon common.Address + Raw types.Log +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) FilterBeaconUpgraded(opts *bind.FilterOpts, beacon []common.Address) (*VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator, error) { + + var beaconRule []interface{} + for _, beaconItem := range beacon { + beaconRule = append(beaconRule, beaconItem) + } + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.FilterLogs(opts, "BeaconUpgraded", beaconRule) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator{contract: _VRFV2TransparentUpgradeableProxy.contract, event: "BeaconUpgraded", logs: logs, sub: sub}, nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) WatchBeaconUpgraded(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyBeaconUpgraded, beacon []common.Address) (event.Subscription, error) { + + var beaconRule []interface{} + for _, beaconItem := range beacon { + beaconRule = append(beaconRule, beaconItem) + } + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.WatchLogs(opts, "BeaconUpgraded", beaconRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2TransparentUpgradeableProxyBeaconUpgraded) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) ParseBeaconUpgraded(log types.Log) (*VRFV2TransparentUpgradeableProxyBeaconUpgraded, error) { + event := new(VRFV2TransparentUpgradeableProxyBeaconUpgraded) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2TransparentUpgradeableProxyUpgradedIterator struct { + Event *VRFV2TransparentUpgradeableProxyUpgraded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2TransparentUpgradeableProxyUpgradedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2TransparentUpgradeableProxyUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2TransparentUpgradeableProxyUpgradedIterator) Error() error { + return it.fail +} + +func (it *VRFV2TransparentUpgradeableProxyUpgradedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2TransparentUpgradeableProxyUpgraded struct { + Implementation common.Address + Raw types.Log +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) FilterUpgraded(opts *bind.FilterOpts, implementation []common.Address) (*VRFV2TransparentUpgradeableProxyUpgradedIterator, error) { + + var implementationRule []interface{} + for _, implementationItem := range implementation { + implementationRule = append(implementationRule, implementationItem) + } + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.FilterLogs(opts, "Upgraded", implementationRule) + if err != nil { + return nil, err + } + return &VRFV2TransparentUpgradeableProxyUpgradedIterator{contract: _VRFV2TransparentUpgradeableProxy.contract, event: "Upgraded", logs: logs, sub: sub}, nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) WatchUpgraded(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyUpgraded, implementation []common.Address) (event.Subscription, error) { + + var implementationRule []interface{} + for _, implementationItem := range implementation { + implementationRule = append(implementationRule, implementationItem) + } + + logs, sub, err := _VRFV2TransparentUpgradeableProxy.contract.WatchLogs(opts, "Upgraded", implementationRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2TransparentUpgradeableProxyUpgraded) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "Upgraded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxyFilterer) ParseUpgraded(log types.Log) (*VRFV2TransparentUpgradeableProxyUpgraded, error) { + event := new(VRFV2TransparentUpgradeableProxyUpgraded) + if err := _VRFV2TransparentUpgradeableProxy.contract.UnpackLog(event, "Upgraded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxy) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2TransparentUpgradeableProxy.abi.Events["AdminChanged"].ID: + return _VRFV2TransparentUpgradeableProxy.ParseAdminChanged(log) + case _VRFV2TransparentUpgradeableProxy.abi.Events["BeaconUpgraded"].ID: + return _VRFV2TransparentUpgradeableProxy.ParseBeaconUpgraded(log) + case _VRFV2TransparentUpgradeableProxy.abi.Events["Upgraded"].ID: + return _VRFV2TransparentUpgradeableProxy.ParseUpgraded(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2TransparentUpgradeableProxyAdminChanged) Topic() common.Hash { + return common.HexToHash("0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f") +} + +func (VRFV2TransparentUpgradeableProxyBeaconUpgraded) Topic() common.Hash { + return common.HexToHash("0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e") +} + +func (VRFV2TransparentUpgradeableProxyUpgraded) Topic() common.Hash { + return common.HexToHash("0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b") +} + +func (_VRFV2TransparentUpgradeableProxy *VRFV2TransparentUpgradeableProxy) Address() common.Address { + return _VRFV2TransparentUpgradeableProxy.address +} + +type VRFV2TransparentUpgradeableProxyInterface interface { + Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterAdminChanged(opts *bind.FilterOpts) (*VRFV2TransparentUpgradeableProxyAdminChangedIterator, error) + + WatchAdminChanged(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyAdminChanged) (event.Subscription, error) + + ParseAdminChanged(log types.Log) (*VRFV2TransparentUpgradeableProxyAdminChanged, error) + + FilterBeaconUpgraded(opts *bind.FilterOpts, beacon []common.Address) (*VRFV2TransparentUpgradeableProxyBeaconUpgradedIterator, error) + + WatchBeaconUpgraded(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyBeaconUpgraded, beacon []common.Address) (event.Subscription, error) + + ParseBeaconUpgraded(log types.Log) (*VRFV2TransparentUpgradeableProxyBeaconUpgraded, error) + + FilterUpgraded(opts *bind.FilterOpts, implementation []common.Address) (*VRFV2TransparentUpgradeableProxyUpgradedIterator, error) + + WatchUpgraded(opts *bind.WatchOpts, sink chan<- *VRFV2TransparentUpgradeableProxyUpgraded, implementation []common.Address) (event.Subscription, error) + + ParseUpgraded(log types.Log) (*VRFV2TransparentUpgradeableProxyUpgraded, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_wrapper/vrfv2_wrapper.go b/core/gethwrappers/generated/vrfv2_wrapper/vrfv2_wrapper.go new file mode 100644 index 00000000..681bde09 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_wrapper/vrfv2_wrapper.go @@ -0,0 +1,1143 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2WrapperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_coordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"WrapperFulfillmentFailed\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"COORDINATOR\",\"outputs\":[{\"internalType\":\"contractExtendedVRFCoordinatorV2Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PLI_ETH_FEED\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"SUBSCRIPTION_ID\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"}],\"name\":\"calculateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"disable\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enable\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"_requestGasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"wrapperGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"coordinatorGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"wrapperPremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"maxNumWords\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_callbacks\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"callbackAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"requestGasPrice\",\"type\":\"uint256\"},{\"internalType\":\"int256\",\"name\":\"requestWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"juelsPaid\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_configured\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_disabled\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fulfillmentTxSizeBytes\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_wrapperGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_coordinatorGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"_wrapperPremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"_maxNumWords\",\"type\":\"uint8\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"size\",\"type\":\"uint32\"}],\"name\":\"setFulfillmentTxSize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101206040526001805463ffffffff60a01b1916609160a21b1790553480156200002857600080fd5b5060405162002a3c38038062002a3c8339810160408190526200004b91620002d8565b803380600081620000a35760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000d657620000d6816200020f565b5050506001600160601b0319606091821b811660805284821b811660a05283821b811660c0529082901b1660e0526040805163288688f960e21b815290516000916001600160a01b0384169163a21a23e49160048082019260209290919082900301818787803b1580156200014a57600080fd5b505af11580156200015f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019062000185919062000322565b60c081901b6001600160c01b03191661010052604051631cd0704360e21b81526001600160401b03821660048201523060248201529091506001600160a01b03831690637341c10c90604401600060405180830381600087803b158015620001ec57600080fd5b505af115801562000201573d6000803e3d6000fd5b505050505050505062000354565b6001600160a01b0381163314156200026a5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200009a565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620002d357600080fd5b919050565b600080600060608486031215620002ee57600080fd5b620002f984620002bb565b92506200030960208501620002bb565b91506200031960408501620002bb565b90509250925092565b6000602082840312156200033557600080fd5b81516001600160401b03811681146200034d57600080fd5b9392505050565b60805160601c60a05160601c60c05160601c60e05160601c6101005160c01c612655620003e7600039600081816101970152610c5701526000818161028401528181610c1801528181610fee015281816110cf015261116a0152600081816103fd015261161e01526000818161021b01528181610a6a01526112bc01526000818161055801526105c001526126556000f3fe608060405234801561001057600080fd5b506004361061018d5760003560e01c80638da5cb5b116100e3578063c15ce4d71161008c578063f2fde38b11610066578063f2fde38b14610511578063f3fef3a314610524578063fc2a88c31461053757600080fd5b8063c15ce4d714610432578063c3f909d414610445578063cdd8d885146104d457600080fd5b8063a608a1e1116100bd578063a608a1e1146103e6578063ad178361146103f8578063bf17e5591461041f57600080fd5b80638da5cb5b146103ad578063a3907d71146103cb578063a4c0ed36146103d357600080fd5b80633b2bcbf11161014557806357a8070a1161011f57806357a8070a1461037557806379ba5097146103925780637fb5d19d1461039a57600080fd5b80633b2bcbf11461027f5780634306d354146102a657806348baa1c5146102c757600080fd5b80631b6b6d23116101765780631b6b6d23146102165780631fe543e3146102625780632f2770db1461027757600080fd5b8063030932bb14610192578063181f5a77146101d7575b600080fd5b6101b97f000000000000000000000000000000000000000000000000000000000000000081565b60405167ffffffffffffffff90911681526020015b60405180910390f35b604080518082018252601281527f56524656325772617070657220312e302e300000000000000000000000000000602082015290516101ce91906122c1565b61023d7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101ce565b610275610270366004611fa6565b610540565b005b610275610600565b61023d7f000000000000000000000000000000000000000000000000000000000000000081565b6102b96102b43660046120df565b610636565b6040519081526020016101ce565b6103316102d5366004611f8d565b600860205260009081526040902080546001820154600283015460039093015473ffffffffffffffffffffffffffffffffffffffff8316937401000000000000000000000000000000000000000090930463ffffffff16929085565b6040805173ffffffffffffffffffffffffffffffffffffffff909616865263ffffffff9094166020860152928401919091526060830152608082015260a0016101ce565b6003546103829060ff1681565b60405190151581526020016101ce565b61027561073d565b6102b96103a8366004612147565b61083a565b60005473ffffffffffffffffffffffffffffffffffffffff1661023d565b610275610942565b6102756103e1366004611e6c565b610974565b60035461038290610100900460ff1681565b61023d7f000000000000000000000000000000000000000000000000000000000000000081565b61027561042d3660046120df565b610e52565b61027561044036600461221b565b610ea9565b6004546005546006546007546040805194855263ffffffff80851660208701526401000000008504811691860191909152680100000000000000008404811660608601526c01000000000000000000000000840416608085015260ff700100000000000000000000000000000000909304831660a085015260c08401919091521660e0820152610100016101ce565b6001546104fc9074010000000000000000000000000000000000000000900463ffffffff1681565b60405163ffffffff90911681526020016101ce565b61027561051f366004611e27565b611254565b610275610532366004611e42565b611268565b6102b960025481565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146105f2576040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044015b60405180910390fd5b6105fc828261133d565b5050565b610608611548565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100179055565b60035460009060ff166106a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e666967757265640000000000000060448201526064016105e9565b600354610100900460ff1615610717576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c65640000000000000000000000000060448201526064016105e9565b60006107216115cb565b90506107348363ffffffff163a8361173f565b9150505b919050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146107be576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016105e9565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60035460009060ff166108a9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e666967757265640000000000000060448201526064016105e9565b600354610100900460ff161561091b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c65640000000000000000000000000060448201526064016105e9565b60006109256115cb565b90506109388463ffffffff16848361173f565b9150505b92915050565b61094a611548565b600380547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055565b60035460ff166109e0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e666967757265640000000000000060448201526064016105e9565b600354610100900460ff1615610a52576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c65640000000000000000000000000060448201526064016105e9565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610af1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6f6e6c792063616c6c61626c652066726f6d204c494e4b00000000000000000060448201526064016105e9565b60008080610b01848601866120fc565b9250925092506000610b1284611860565b90506000610b1e6115cb565b90506000610b338663ffffffff163a8461173f565b905080891015610b9f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f66656520746f6f206c6f7700000000000000000000000000000000000000000060448201526064016105e9565b60075460ff1663ffffffff85161115610c14576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f6e756d576f72647320746f6f206869676800000000000000000000000000000060448201526064016105e9565b60007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16635d3b1d306006547f000000000000000000000000000000000000000000000000000000000000000089600560089054906101000a900463ffffffff16898d610c96919061239a565b610ca0919061239a565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e087901b168152600481019490945267ffffffffffffffff909216602484015261ffff16604483015263ffffffff90811660648301528816608482015260a401602060405180830381600087803b158015610d1f57600080fd5b505af1158015610d33573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d579190611f15565b90506040518060a001604052808c73ffffffffffffffffffffffffffffffffffffffff1681526020018863ffffffff1681526020013a81526020018481526020018b8152506008600083815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160146101000a81548163ffffffff021916908363ffffffff160217905550604082015181600101556060820151816002015560808201518160030155905050806002819055505050505050505050505050565b610e5a611548565b6001805463ffffffff90921674010000000000000000000000000000000000000000027fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff909216919091179055565b610eb1611548565b6005805460ff808616700100000000000000000000000000000000027fffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffffffff63ffffffff8981166c01000000000000000000000000027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff918c166801000000000000000002919091167fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff909516949094179390931792909216919091179091556006839055600780549183167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00928316179055600380549091166001179055604080517fc3f909d4000000000000000000000000000000000000000000000000000000008152905173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169163c3f909d4916004828101926080929190829003018186803b15801561103457600080fd5b505afa158015611048573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061106c9190611f2e565b50600580547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff929092169190911790555050604080517f356dac7100000000000000000000000000000000000000000000000000000000815290517f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169163356dac71916004808301926020929190829003018186803b15801561112a57600080fd5b505afa15801561113e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111629190611f15565b6004819055507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16635fbbc0d26040518163ffffffff1660e01b81526004016101206040518083038186803b1580156111cf57600080fd5b505afa1580156111e3573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112079190612165565b50506005805463ffffffff909816640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff909816979097179096555050505050505050505050565b61125c611548565b61126581611878565b50565b611270611548565b6040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152602482018390527f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90604401602060405180830381600087803b15801561130057600080fd5b505af1158015611314573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113389190611ef3565b505050565b6000828152600860208181526040808420815160a081018352815473ffffffffffffffffffffffffffffffffffffffff808216835263ffffffff740100000000000000000000000000000000000000008304168387015260018401805495840195909552600284018054606085015260038501805460808601528b8a52979096527fffffffffffffffff00000000000000000000000000000000000000000000000090911690925591859055918490559290915581511661145a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e6400000000000000000000000000000060448201526064016105e9565b600080631fe543e360e01b8585604051602401611478929190612334565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060006114f2846020015163ffffffff1685600001518461196e565b90508061154057835160405173ffffffffffffffffffffffffffffffffffffffff9091169087907fc551b83c151f2d1c7eeb938ac59008e0409f1c1dc1e2f112449d4d79b458902290600090a35b505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146115c9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016105e9565b565b600554604080517ffeaf968c000000000000000000000000000000000000000000000000000000008152905160009263ffffffff161515918391829173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169163feaf968c9160048082019260a092909190829003018186803b15801561166557600080fd5b505afa158015611679573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061169d919061227d565b5094509092508491505080156116c357506116b88242612582565b60055463ffffffff16105b156116cd57506004545b6000811215611738576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f496e76616c6964204c494e4b207765692070726963650000000000000000000060448201526064016105e9565b9392505050565b600154600090819061176e9074010000000000000000000000000000000000000000900463ffffffff166119ba565b60055463ffffffff6c0100000000000000000000000082048116916117a191680100000000000000009091041688612382565b6117ab9190612382565b6117b59086612545565b6117bf9190612382565b90506000836117d683670de0b6b3a7640000612545565b6117e091906123e7565b60055490915060009060649061180d90700100000000000000000000000000000000900460ff16826123c2565b61181a9060ff1684612545565b61182491906123e7565b60055490915060009061184a90640100000000900463ffffffff1664e8d4a51000612545565b6118549083612382565b98975050505050505050565b600061186d603f836123fb565b61093c90600161239a565b73ffffffffffffffffffffffffffffffffffffffff81163314156118f8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016105e9565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60005a61138881101561198057600080fd5b61138881039050846040820482031161199857600080fd5b50823b6119a457600080fd5b60008083516020850160008789f1949350505050565b6000466119c681611a92565b15611a72576000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c06040518083038186803b158015611a1457600080fd5b505afa158015611a28573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611a4c9190612095565b5050505091505083608c611a609190612382565b611a6a9082612545565b949350505050565b611a7b81611ab5565b15611a895761073483611aef565b50600092915050565b600061a4b1821480611aa6575062066eed82145b8061093c57505062066eee1490565b6000600a821480611ac757506101a482145b80611ad4575062aa37dc82145b80611ae0575061210582145b8061093c57505062014a331490565b60008073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663519b4bd36040518163ffffffff1660e01b815260040160206040518083038186803b158015611b4c57600080fd5b505afa158015611b60573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b849190611f15565b9050600080611b938186612582565b90506000611ba2826010612545565b611bad846004612545565b611bb79190612382565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff16630c18c1626040518163ffffffff1660e01b815260040160206040518083038186803b158015611c1557600080fd5b505afa158015611c29573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611c4d9190611f15565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663f45e65d86040518163ffffffff1660e01b815260040160206040518083038186803b158015611cab57600080fd5b505afa158015611cbf573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ce39190611f15565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b158015611d4157600080fd5b505afa158015611d55573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d799190611f15565b90506000611d8882600a61247f565b905060008184611d988789612382565b611da2908c612545565b611dac9190612545565b611db691906123e7565b9b9a5050505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461073857600080fd5b805162ffffff8116811461073857600080fd5b803560ff8116811461073857600080fd5b805169ffffffffffffffffffff8116811461073857600080fd5b600060208284031215611e3957600080fd5b61173882611dc5565b60008060408385031215611e5557600080fd5b611e5e83611dc5565b946020939093013593505050565b60008060008060608587031215611e8257600080fd5b611e8b85611dc5565b935060208501359250604085013567ffffffffffffffff80821115611eaf57600080fd5b818701915087601f830112611ec357600080fd5b813581811115611ed257600080fd5b886020828501011115611ee457600080fd5b95989497505060200194505050565b600060208284031215611f0557600080fd5b8151801515811461173857600080fd5b600060208284031215611f2757600080fd5b5051919050565b60008060008060808587031215611f4457600080fd5b8451611f4f81612626565b6020860151909450611f6081612636565b6040860151909350611f7181612636565b6060860151909250611f8281612636565b939692955090935050565b600060208284031215611f9f57600080fd5b5035919050565b60008060408385031215611fb957600080fd5b8235915060208084013567ffffffffffffffff80821115611fd957600080fd5b818601915086601f830112611fed57600080fd5b813581811115611fff57611fff6125f7565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715612042576120426125f7565b604052828152858101935084860182860187018b101561206157600080fd5b600095505b83861015612084578035855260019590950194938601938601612066565b508096505050505050509250929050565b60008060008060008060c087890312156120ae57600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b6000602082840312156120f157600080fd5b813561173881612636565b60008060006060848603121561211157600080fd5b833561211c81612636565b9250602084013561212c81612626565b9150604084013561213c81612636565b809150509250925092565b6000806040838503121561215a57600080fd5b8235611e5e81612636565b60008060008060008060008060006101208a8c03121561218457600080fd5b895161218f81612636565b60208b01519099506121a081612636565b60408b01519098506121b181612636565b60608b01519097506121c281612636565b60808b01519096506121d381612636565b94506121e160a08b01611de9565b93506121ef60c08b01611de9565b92506121fd60e08b01611de9565b915061220c6101008b01611de9565b90509295985092959850929598565b600080600080600060a0868803121561223357600080fd5b853561223e81612636565b9450602086013561224e81612636565b935061225c60408701611dfc565b92506060860135915061227160808701611dfc565b90509295509295909350565b600080600080600060a0868803121561229557600080fd5b61229e86611e0d565b945060208601519350604086015192506060860151915061227160808701611e0d565b600060208083528351808285015260005b818110156122ee578581018301518582016040015282016122d2565b81811115612300576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000604082018483526020604081850152818551808452606086019150828701935060005b8181101561237557845183529383019391830191600101612359565b5090979650505050505050565b6000821982111561239557612395612599565b500190565b600063ffffffff8083168185168083038211156123b9576123b9612599565b01949350505050565b600060ff821660ff84168060ff038211156123df576123df612599565b019392505050565b6000826123f6576123f66125c8565b500490565b600063ffffffff80841680612412576124126125c8565b92169190910492915050565b600181815b8085111561247757817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561245d5761245d612599565b8085161561246a57918102915b93841c9390800290612423565b509250929050565b600061173883836000826124955750600161093c565b816124a25750600061093c565b81600181146124b857600281146124c2576124de565b600191505061093c565b60ff8411156124d3576124d3612599565b50506001821b61093c565b5060208310610133831016604e8410600b8410161715612501575081810a61093c565b61250b838361241e565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561253d5761253d612599565b029392505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561257d5761257d612599565b500290565b60008282101561259457612594612599565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61ffff8116811461126557600080fd5b63ffffffff8116811461126557600080fdfea164736f6c6343000806000a", +} + +var VRFV2WrapperABI = VRFV2WrapperMetaData.ABI + +var VRFV2WrapperBin = VRFV2WrapperMetaData.Bin + +func DeployVRFV2Wrapper(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _linkEthFeed common.Address, _coordinator common.Address) (common.Address, *types.Transaction, *VRFV2Wrapper, error) { + parsed, err := VRFV2WrapperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2WrapperBin), backend, _link, _linkEthFeed, _coordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2Wrapper{address: address, abi: *parsed, VRFV2WrapperCaller: VRFV2WrapperCaller{contract: contract}, VRFV2WrapperTransactor: VRFV2WrapperTransactor{contract: contract}, VRFV2WrapperFilterer: VRFV2WrapperFilterer{contract: contract}}, nil +} + +type VRFV2Wrapper struct { + address common.Address + abi abi.ABI + VRFV2WrapperCaller + VRFV2WrapperTransactor + VRFV2WrapperFilterer +} + +type VRFV2WrapperCaller struct { + contract *bind.BoundContract +} + +type VRFV2WrapperTransactor struct { + contract *bind.BoundContract +} + +type VRFV2WrapperFilterer struct { + contract *bind.BoundContract +} + +type VRFV2WrapperSession struct { + Contract *VRFV2Wrapper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperCallerSession struct { + Contract *VRFV2WrapperCaller + CallOpts bind.CallOpts +} + +type VRFV2WrapperTransactorSession struct { + Contract *VRFV2WrapperTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperRaw struct { + Contract *VRFV2Wrapper +} + +type VRFV2WrapperCallerRaw struct { + Contract *VRFV2WrapperCaller +} + +type VRFV2WrapperTransactorRaw struct { + Contract *VRFV2WrapperTransactor +} + +func NewVRFV2Wrapper(address common.Address, backend bind.ContractBackend) (*VRFV2Wrapper, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2WrapperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2Wrapper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2Wrapper{address: address, abi: abi, VRFV2WrapperCaller: VRFV2WrapperCaller{contract: contract}, VRFV2WrapperTransactor: VRFV2WrapperTransactor{contract: contract}, VRFV2WrapperFilterer: VRFV2WrapperFilterer{contract: contract}}, nil +} + +func NewVRFV2WrapperCaller(address common.Address, caller bind.ContractCaller) (*VRFV2WrapperCaller, error) { + contract, err := bindVRFV2Wrapper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperCaller{contract: contract}, nil +} + +func NewVRFV2WrapperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2WrapperTransactor, error) { + contract, err := bindVRFV2Wrapper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperTransactor{contract: contract}, nil +} + +func NewVRFV2WrapperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2WrapperFilterer, error) { + contract, err := bindVRFV2Wrapper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2WrapperFilterer{contract: contract}, nil +} + +func bindVRFV2Wrapper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2WrapperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2Wrapper *VRFV2WrapperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2Wrapper.Contract.VRFV2WrapperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2Wrapper *VRFV2WrapperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.VRFV2WrapperTransactor.contract.Transfer(opts) +} + +func (_VRFV2Wrapper *VRFV2WrapperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.VRFV2WrapperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2Wrapper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.contract.Transfer(opts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) COORDINATOR(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "COORDINATOR") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) COORDINATOR() (common.Address, error) { + return _VRFV2Wrapper.Contract.COORDINATOR(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) COORDINATOR() (common.Address, error) { + return _VRFV2Wrapper.Contract.COORDINATOR(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) PLI(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "PLI") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) PLI() (common.Address, error) { + return _VRFV2Wrapper.Contract.PLI(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) PLI() (common.Address, error) { + return _VRFV2Wrapper.Contract.PLI(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "PLI_ETH_FEED") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) PLIETHFEED() (common.Address, error) { + return _VRFV2Wrapper.Contract.PLIETHFEED(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) PLIETHFEED() (common.Address, error) { + return _VRFV2Wrapper.Contract.PLIETHFEED(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) SUBSCRIPTIONID(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "SUBSCRIPTION_ID") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SUBSCRIPTIONID() (uint64, error) { + return _VRFV2Wrapper.Contract.SUBSCRIPTIONID(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) SUBSCRIPTIONID() (uint64, error) { + return _VRFV2Wrapper.Contract.SUBSCRIPTIONID(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "calculateRequestPrice", _callbackGasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2Wrapper.Contract.CalculateRequestPrice(&_VRFV2Wrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2Wrapper.Contract.CalculateRequestPrice(&_VRFV2Wrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "estimateRequestPrice", _callbackGasLimit, _requestGasPriceWei) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2Wrapper.Contract.EstimateRequestPrice(&_VRFV2Wrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2Wrapper.Contract.EstimateRequestPrice(&_VRFV2Wrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.StalenessSeconds = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPM = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.WrapperGasOverhead = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.CoordinatorGasOverhead = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.WrapperPremiumPercentage = *abi.ConvertType(out[5], new(uint8)).(*uint8) + outstruct.KeyHash = *abi.ConvertType(out[6], new([32]byte)).(*[32]byte) + outstruct.MaxNumWords = *abi.ConvertType(out[7], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) GetConfig() (GetConfig, + + error) { + return _VRFV2Wrapper.Contract.GetConfig(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) GetConfig() (GetConfig, + + error) { + return _VRFV2Wrapper.Contract.GetConfig(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) LastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) LastRequestId() (*big.Int, error) { + return _VRFV2Wrapper.Contract.LastRequestId(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) LastRequestId() (*big.Int, error) { + return _VRFV2Wrapper.Contract.LastRequestId(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) Owner() (common.Address, error) { + return _VRFV2Wrapper.Contract.Owner(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) Owner() (common.Address, error) { + return _VRFV2Wrapper.Contract.Owner(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) SCallbacks(opts *bind.CallOpts, arg0 *big.Int) (SCallbacks, + + error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "s_callbacks", arg0) + + outstruct := new(SCallbacks) + if err != nil { + return *outstruct, err + } + + outstruct.CallbackAddress = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.CallbackGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.RequestGasPrice = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.RequestWeiPerUnitLink = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.JuelsPaid = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SCallbacks(arg0 *big.Int) (SCallbacks, + + error) { + return _VRFV2Wrapper.Contract.SCallbacks(&_VRFV2Wrapper.CallOpts, arg0) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) SCallbacks(arg0 *big.Int) (SCallbacks, + + error) { + return _VRFV2Wrapper.Contract.SCallbacks(&_VRFV2Wrapper.CallOpts, arg0) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) SConfigured(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "s_configured") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SConfigured() (bool, error) { + return _VRFV2Wrapper.Contract.SConfigured(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) SConfigured() (bool, error) { + return _VRFV2Wrapper.Contract.SConfigured(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) SDisabled(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "s_disabled") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SDisabled() (bool, error) { + return _VRFV2Wrapper.Contract.SDisabled(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) SDisabled() (bool, error) { + return _VRFV2Wrapper.Contract.SDisabled(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) SFulfillmentTxSizeBytes(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "s_fulfillmentTxSizeBytes") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SFulfillmentTxSizeBytes() (uint32, error) { + return _VRFV2Wrapper.Contract.SFulfillmentTxSizeBytes(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) SFulfillmentTxSizeBytes() (uint32, error) { + return _VRFV2Wrapper.Contract.SFulfillmentTxSizeBytes(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFV2Wrapper.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) TypeAndVersion() (string, error) { + return _VRFV2Wrapper.Contract.TypeAndVersion(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperCallerSession) TypeAndVersion() (string, error) { + return _VRFV2Wrapper.Contract.TypeAndVersion(&_VRFV2Wrapper.CallOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.AcceptOwnership(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.AcceptOwnership(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) Disable(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "disable") +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) Disable() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Disable(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) Disable() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Disable(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) Enable(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "enable") +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) Enable() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Enable(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) Enable() (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Enable(&_VRFV2Wrapper.TransactOpts) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "onTokenTransfer", _sender, _amount, _data) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.OnTokenTransfer(&_VRFV2Wrapper.TransactOpts, _sender, _amount, _data) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.OnTokenTransfer(&_VRFV2Wrapper.TransactOpts, _sender, _amount, _data) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.RawFulfillRandomWords(&_VRFV2Wrapper.TransactOpts, requestId, randomWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.RawFulfillRandomWords(&_VRFV2Wrapper.TransactOpts, requestId, randomWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) SetConfig(opts *bind.TransactOpts, _wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "setConfig", _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SetConfig(_wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.SetConfig(&_VRFV2Wrapper.TransactOpts, _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) SetConfig(_wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.SetConfig(&_VRFV2Wrapper.TransactOpts, _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) SetFulfillmentTxSize(opts *bind.TransactOpts, size uint32) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "setFulfillmentTxSize", size) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) SetFulfillmentTxSize(size uint32) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.SetFulfillmentTxSize(&_VRFV2Wrapper.TransactOpts, size) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) SetFulfillmentTxSize(size uint32) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.SetFulfillmentTxSize(&_VRFV2Wrapper.TransactOpts, size) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.TransferOwnership(&_VRFV2Wrapper.TransactOpts, to) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.TransferOwnership(&_VRFV2Wrapper.TransactOpts, to) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.contract.Transact(opts, "withdraw", _recipient, _amount) +} + +func (_VRFV2Wrapper *VRFV2WrapperSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Withdraw(&_VRFV2Wrapper.TransactOpts, _recipient, _amount) +} + +func (_VRFV2Wrapper *VRFV2WrapperTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2Wrapper.Contract.Withdraw(&_VRFV2Wrapper.TransactOpts, _recipient, _amount) +} + +type VRFV2WrapperOwnershipTransferRequestedIterator struct { + Event *VRFV2WrapperOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperOwnershipTransferRequestedIterator{contract: _VRFV2Wrapper.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperOwnershipTransferRequested) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperOwnershipTransferRequested, error) { + event := new(VRFV2WrapperOwnershipTransferRequested) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperOwnershipTransferredIterator struct { + Event *VRFV2WrapperOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperOwnershipTransferredIterator{contract: _VRFV2Wrapper.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperOwnershipTransferred) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperOwnershipTransferred, error) { + event := new(VRFV2WrapperOwnershipTransferred) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperWrapperFulfillmentFailedIterator struct { + Event *VRFV2WrapperWrapperFulfillmentFailed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperWrapperFulfillmentFailedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperWrapperFulfillmentFailed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperWrapperFulfillmentFailed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperWrapperFulfillmentFailedIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperWrapperFulfillmentFailedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperWrapperFulfillmentFailed struct { + RequestId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) FilterWrapperFulfillmentFailed(opts *bind.FilterOpts, requestId []*big.Int, consumer []common.Address) (*VRFV2WrapperWrapperFulfillmentFailedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var consumerRule []interface{} + for _, consumerItem := range consumer { + consumerRule = append(consumerRule, consumerItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.FilterLogs(opts, "WrapperFulfillmentFailed", requestIdRule, consumerRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperWrapperFulfillmentFailedIterator{contract: _VRFV2Wrapper.contract, event: "WrapperFulfillmentFailed", logs: logs, sub: sub}, nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) WatchWrapperFulfillmentFailed(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperWrapperFulfillmentFailed, requestId []*big.Int, consumer []common.Address) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var consumerRule []interface{} + for _, consumerItem := range consumer { + consumerRule = append(consumerRule, consumerItem) + } + + logs, sub, err := _VRFV2Wrapper.contract.WatchLogs(opts, "WrapperFulfillmentFailed", requestIdRule, consumerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperWrapperFulfillmentFailed) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "WrapperFulfillmentFailed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2Wrapper *VRFV2WrapperFilterer) ParseWrapperFulfillmentFailed(log types.Log) (*VRFV2WrapperWrapperFulfillmentFailed, error) { + event := new(VRFV2WrapperWrapperFulfillmentFailed) + if err := _VRFV2Wrapper.contract.UnpackLog(event, "WrapperFulfillmentFailed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + FallbackWeiPerUnitLink *big.Int + StalenessSeconds uint32 + FulfillmentFlatFeeLinkPPM uint32 + WrapperGasOverhead uint32 + CoordinatorGasOverhead uint32 + WrapperPremiumPercentage uint8 + KeyHash [32]byte + MaxNumWords uint8 +} +type SCallbacks struct { + CallbackAddress common.Address + CallbackGasLimit uint32 + RequestGasPrice *big.Int + RequestWeiPerUnitLink *big.Int + JuelsPaid *big.Int +} + +func (_VRFV2Wrapper *VRFV2Wrapper) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2Wrapper.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2Wrapper.ParseOwnershipTransferRequested(log) + case _VRFV2Wrapper.abi.Events["OwnershipTransferred"].ID: + return _VRFV2Wrapper.ParseOwnershipTransferred(log) + case _VRFV2Wrapper.abi.Events["WrapperFulfillmentFailed"].ID: + return _VRFV2Wrapper.ParseWrapperFulfillmentFailed(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2WrapperOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2WrapperOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2WrapperWrapperFulfillmentFailed) Topic() common.Hash { + return common.HexToHash("0xc551b83c151f2d1c7eeb938ac59008e0409f1c1dc1e2f112449d4d79b4589022") +} + +func (_VRFV2Wrapper *VRFV2Wrapper) Address() common.Address { + return _VRFV2Wrapper.address +} + +type VRFV2WrapperInterface interface { + COORDINATOR(opts *bind.CallOpts) (common.Address, error) + + PLI(opts *bind.CallOpts) (common.Address, error) + + PLIETHFEED(opts *bind.CallOpts) (common.Address, error) + + SUBSCRIPTIONID(opts *bind.CallOpts) (uint64, error) + + CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) + + EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + LastRequestId(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SCallbacks(opts *bind.CallOpts, arg0 *big.Int) (SCallbacks, + + error) + + SConfigured(opts *bind.CallOpts) (bool, error) + + SDisabled(opts *bind.CallOpts) (bool, error) + + SFulfillmentTxSizeBytes(opts *bind.CallOpts) (uint32, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Disable(opts *bind.TransactOpts) (*types.Transaction, error) + + Enable(opts *bind.TransactOpts) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8) (*types.Transaction, error) + + SetFulfillmentTxSize(opts *bind.TransactOpts, size uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperOwnershipTransferred, error) + + FilterWrapperFulfillmentFailed(opts *bind.FilterOpts, requestId []*big.Int, consumer []common.Address) (*VRFV2WrapperWrapperFulfillmentFailedIterator, error) + + WatchWrapperFulfillmentFailed(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperWrapperFulfillmentFailed, requestId []*big.Int, consumer []common.Address) (event.Subscription, error) + + ParseWrapperFulfillmentFailed(log types.Log) (*VRFV2WrapperWrapperFulfillmentFailed, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_wrapper_consumer_example/vrfv2_wrapper_consumer_example.go b/core/gethwrappers/generated/vrfv2_wrapper_consumer_example/vrfv2_wrapper_consumer_example.go new file mode 100644 index 00000000..587e137c --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_wrapper_consumer_example/vrfv2_wrapper_consumer_example.go @@ -0,0 +1,930 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_wrapper_consumer_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2WrapperConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_vrfV2Wrapper\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"}],\"name\":\"WrappedRequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"}],\"name\":\"WrapperRequestMade\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"}],\"name\":\"makeRequest\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"_randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c06040523480156200001157600080fd5b50604051620011a3380380620011a38339810160408190526200003491620001ac565b6001600160601b0319606083811b821660805282901b1660a0523380600081620000a55760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000d857620000d881620000e3565b5050505050620001e4565b6001600160a01b0381163314156200013e5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200009c565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001a757600080fd5b919050565b60008060408385031215620001c057600080fd5b620001cb836200018f565b9150620001db602084016200018f565b90509250929050565b60805160601c60a05160601c610f776200022c600039600081816101e30152818161033a015281816107f8015261091f0152600081816104db01526107ce0152610f776000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c80638da5cb5b1161005b5780638da5cb5b146100e3578063a168fa891461010b578063d8a4676f1461014a578063f2fde38b1461016c57600080fd5b80630c09b8321461008d5780631fe543e3146100b357806379ba5097146100c85780637a8042bd146100d0575b600080fd5b6100a061009b366004610dc9565b61017f565b6040519081526020015b60405180910390f35b6100c66100c1366004610cda565b610322565b005b6100c66103d4565b6100c66100de366004610ca8565b6104d1565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100aa565b610135610119366004610ca8565b6002602052600090815260409020805460019091015460ff1682565b604080519283529015156020830152016100aa565b61015d610158366004610ca8565b6105d9565b6040516100aa93929190610f11565b6100c661017a366004610c49565b6106eb565b60006101896106ff565b610194848484610782565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff8616600482015290915060009073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634306d3549060240160206040518083038186803b15801561022557600080fd5b505afa158015610239573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061025d9190610cc1565b6040805160608101825282815260006020808301828152845183815280830186528486019081528884526002808452959093208451815590516001820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905591518051959650929491936102e29390850192910190610bd0565b50506040518281528391507f5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec49060200160405180910390a2509392505050565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146103c6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792056524620563220777261707065722063616e2066756c66696c6c0060448201526064015b60405180910390fd5b6103d082826109c3565b5050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610455576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064016103bd565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6104d96106ff565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb61053460005473ffffffffffffffffffffffffffffffffffffffff1690565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e084901b16815273ffffffffffffffffffffffffffffffffffffffff909116600482015260248101849052604401602060405180830381600087803b1580156105a157600080fd5b505af11580156105b5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103d09190610c86565b6000818152600260205260408120548190606090610653576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e6400000000000000000000000000000060448201526064016103bd565b6000848152600260208181526040808420815160608101835281548152600182015460ff16151581850152938101805483518186028101860185528181529294938601938301828280156106c657602002820191906000526020600020905b8154815260200190600101908083116106b2575b5050509190925250508151602083015160409093015190989297509550909350505050565b6106f36106ff565b6106fc81610ada565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314610780576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103bd565b565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff8416600482015260009073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811691634000aea0917f00000000000000000000000000000000000000000000000000000000000000009190821690634306d3549060240160206040518083038186803b15801561083d57600080fd5b505afa158015610851573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108759190610cc1565b6040805163ffffffff808b16602083015261ffff8a169282019290925290871660608201526080016040516020818303038152906040526040518463ffffffff1660e01b81526004016108ca93929190610e50565b602060405180830381600087803b1580156108e457600080fd5b505af11580156108f8573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061091c9190610c86565b507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663fc2a88c36040518163ffffffff1660e01b815260040160206040518083038186803b15801561098357600080fd5b505afa158015610997573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109bb9190610cc1565b949350505050565b600082815260026020526040902054610a38576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e6400000000000000000000000000000060448201526064016103bd565b6000828152600260208181526040909220600181810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690911790558351610a8b93919092019190840190610bd0565b50600082815260026020526040908190205490517f6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b91610ace9185918591610ee8565b60405180910390a15050565b73ffffffffffffffffffffffffffffffffffffffff8116331415610b5a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103bd565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610c0b579160200282015b82811115610c0b578251825591602001919060010190610bf0565b50610c17929150610c1b565b5090565b5b80821115610c175760008155600101610c1c565b803563ffffffff81168114610c4457600080fd5b919050565b600060208284031215610c5b57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610c7f57600080fd5b9392505050565b600060208284031215610c9857600080fd5b81518015158114610c7f57600080fd5b600060208284031215610cba57600080fd5b5035919050565b600060208284031215610cd357600080fd5b5051919050565b60008060408385031215610ced57600080fd5b8235915060208084013567ffffffffffffffff80821115610d0d57600080fd5b818601915086601f830112610d2157600080fd5b813581811115610d3357610d33610f3b565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f83011681018181108582111715610d7657610d76610f3b565b604052828152858101935084860182860187018b1015610d9557600080fd5b600095505b83861015610db8578035855260019590950194938601938601610d9a565b508096505050505050509250929050565b600080600060608486031215610dde57600080fd5b610de784610c30565b9250602084013561ffff81168114610dfe57600080fd5b9150610e0c60408501610c30565b90509250925092565b600081518084526020808501945080840160005b83811015610e4557815187529582019590820190600101610e29565b509495945050505050565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b81811015610ea057858101830151858201608001528201610e84565b81811115610eb2576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b838152606060208201526000610f016060830185610e15565b9050826040830152949350505050565b8381528215156020820152606060408201526000610f326060830184610e15565b95945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2WrapperConsumerExampleABI = VRFV2WrapperConsumerExampleMetaData.ABI + +var VRFV2WrapperConsumerExampleBin = VRFV2WrapperConsumerExampleMetaData.Bin + +func DeployVRFV2WrapperConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _vrfV2Wrapper common.Address) (common.Address, *types.Transaction, *VRFV2WrapperConsumerExample, error) { + parsed, err := VRFV2WrapperConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2WrapperConsumerExampleBin), backend, _link, _vrfV2Wrapper) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2WrapperConsumerExample{address: address, abi: *parsed, VRFV2WrapperConsumerExampleCaller: VRFV2WrapperConsumerExampleCaller{contract: contract}, VRFV2WrapperConsumerExampleTransactor: VRFV2WrapperConsumerExampleTransactor{contract: contract}, VRFV2WrapperConsumerExampleFilterer: VRFV2WrapperConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFV2WrapperConsumerExample struct { + address common.Address + abi abi.ABI + VRFV2WrapperConsumerExampleCaller + VRFV2WrapperConsumerExampleTransactor + VRFV2WrapperConsumerExampleFilterer +} + +type VRFV2WrapperConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2WrapperConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2WrapperConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2WrapperConsumerExampleSession struct { + Contract *VRFV2WrapperConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperConsumerExampleCallerSession struct { + Contract *VRFV2WrapperConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2WrapperConsumerExampleTransactorSession struct { + Contract *VRFV2WrapperConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperConsumerExampleRaw struct { + Contract *VRFV2WrapperConsumerExample +} + +type VRFV2WrapperConsumerExampleCallerRaw struct { + Contract *VRFV2WrapperConsumerExampleCaller +} + +type VRFV2WrapperConsumerExampleTransactorRaw struct { + Contract *VRFV2WrapperConsumerExampleTransactor +} + +func NewVRFV2WrapperConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFV2WrapperConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2WrapperConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2WrapperConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExample{address: address, abi: abi, VRFV2WrapperConsumerExampleCaller: VRFV2WrapperConsumerExampleCaller{contract: contract}, VRFV2WrapperConsumerExampleTransactor: VRFV2WrapperConsumerExampleTransactor{contract: contract}, VRFV2WrapperConsumerExampleFilterer: VRFV2WrapperConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2WrapperConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2WrapperConsumerExampleCaller, error) { + contract, err := bindVRFV2WrapperConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFV2WrapperConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2WrapperConsumerExampleTransactor, error) { + contract, err := bindVRFV2WrapperConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFV2WrapperConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2WrapperConsumerExampleFilterer, error) { + contract, err := bindVRFV2WrapperConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFV2WrapperConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2WrapperConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperConsumerExample.Contract.VRFV2WrapperConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.VRFV2WrapperConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.VRFV2WrapperConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2WrapperConsumerExample.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[2], new([]*big.Int)).(*[]*big.Int) + + return *outstruct, err + +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2WrapperConsumerExample.Contract.GetRequestStatus(&_VRFV2WrapperConsumerExample.CallOpts, _requestId) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2WrapperConsumerExample.Contract.GetRequestStatus(&_VRFV2WrapperConsumerExample.CallOpts, _requestId) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2WrapperConsumerExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) Owner() (common.Address, error) { + return _VRFV2WrapperConsumerExample.Contract.Owner(&_VRFV2WrapperConsumerExample.CallOpts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2WrapperConsumerExample.Contract.Owner(&_VRFV2WrapperConsumerExample.CallOpts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2WrapperConsumerExample.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2WrapperConsumerExample.Contract.SRequests(&_VRFV2WrapperConsumerExample.CallOpts, arg0) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2WrapperConsumerExample.Contract.SRequests(&_VRFV2WrapperConsumerExample.CallOpts, arg0) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.AcceptOwnership(&_VRFV2WrapperConsumerExample.TransactOpts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.AcceptOwnership(&_VRFV2WrapperConsumerExample.TransactOpts) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactor) MakeRequest(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.contract.Transact(opts, "makeRequest", _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) MakeRequest(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.MakeRequest(&_VRFV2WrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorSession) MakeRequest(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.MakeRequest(&_VRFV2WrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.contract.Transact(opts, "rawFulfillRandomWords", _requestId, _randomWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2WrapperConsumerExample.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2WrapperConsumerExample.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.TransferOwnership(&_VRFV2WrapperConsumerExample.TransactOpts, to) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.TransferOwnership(&_VRFV2WrapperConsumerExample.TransactOpts, to) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactor) WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.contract.Transact(opts, "withdrawLink", amount) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.WithdrawLink(&_VRFV2WrapperConsumerExample.TransactOpts, amount) +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleTransactorSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperConsumerExample.Contract.WithdrawLink(&_VRFV2WrapperConsumerExample.TransactOpts, amount) +} + +type VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2WrapperConsumerExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperConsumerExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator{contract: _VRFV2WrapperConsumerExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperConsumerExampleOwnershipTransferRequested) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperConsumerExampleOwnershipTransferRequested, error) { + event := new(VRFV2WrapperConsumerExampleOwnershipTransferRequested) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperConsumerExampleOwnershipTransferredIterator struct { + Event *VRFV2WrapperConsumerExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperConsumerExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperConsumerExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperConsumerExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleOwnershipTransferredIterator{contract: _VRFV2WrapperConsumerExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperConsumerExampleOwnershipTransferred) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperConsumerExampleOwnershipTransferred, error) { + event := new(VRFV2WrapperConsumerExampleOwnershipTransferred) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator struct { + Event *VRFV2WrapperConsumerExampleWrappedRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperConsumerExampleWrappedRequestFulfilled struct { + RequestId *big.Int + RandomWords []*big.Int + Payment *big.Int + Raw types.Log +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator, error) { + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.FilterLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator{contract: _VRFV2WrapperConsumerExample.contract, event: "WrappedRequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleWrappedRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.WatchLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperConsumerExampleWrappedRequestFulfilled) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) ParseWrappedRequestFulfilled(log types.Log) (*VRFV2WrapperConsumerExampleWrappedRequestFulfilled, error) { + event := new(VRFV2WrapperConsumerExampleWrappedRequestFulfilled) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperConsumerExampleWrapperRequestMadeIterator struct { + Event *VRFV2WrapperConsumerExampleWrapperRequestMade + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperConsumerExampleWrapperRequestMadeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperConsumerExampleWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperConsumerExampleWrapperRequestMadeIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperConsumerExampleWrapperRequestMadeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperConsumerExampleWrapperRequestMade struct { + RequestId *big.Int + Paid *big.Int + Raw types.Log +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2WrapperConsumerExampleWrapperRequestMadeIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.FilterLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperConsumerExampleWrapperRequestMadeIterator{contract: _VRFV2WrapperConsumerExample.contract, event: "WrapperRequestMade", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2WrapperConsumerExample.contract.WatchLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperConsumerExampleWrapperRequestMade) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExampleFilterer) ParseWrapperRequestMade(log types.Log) (*VRFV2WrapperConsumerExampleWrapperRequestMade, error) { + event := new(VRFV2WrapperConsumerExampleWrapperRequestMade) + if err := _VRFV2WrapperConsumerExample.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Paid *big.Int + Fulfilled bool + RandomWords []*big.Int +} +type SRequests struct { + Paid *big.Int + Fulfilled bool +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2WrapperConsumerExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2WrapperConsumerExample.ParseOwnershipTransferRequested(log) + case _VRFV2WrapperConsumerExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2WrapperConsumerExample.ParseOwnershipTransferred(log) + case _VRFV2WrapperConsumerExample.abi.Events["WrappedRequestFulfilled"].ID: + return _VRFV2WrapperConsumerExample.ParseWrappedRequestFulfilled(log) + case _VRFV2WrapperConsumerExample.abi.Events["WrapperRequestMade"].ID: + return _VRFV2WrapperConsumerExample.ParseWrapperRequestMade(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2WrapperConsumerExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2WrapperConsumerExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2WrapperConsumerExampleWrappedRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b") +} + +func (VRFV2WrapperConsumerExampleWrapperRequestMade) Topic() common.Hash { + return common.HexToHash("0x5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec4") +} + +func (_VRFV2WrapperConsumerExample *VRFV2WrapperConsumerExample) Address() common.Address { + return _VRFV2WrapperConsumerExample.address +} + +type VRFV2WrapperConsumerExampleInterface interface { + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + MakeRequest(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperConsumerExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperConsumerExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperConsumerExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperConsumerExampleOwnershipTransferred, error) + + FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2WrapperConsumerExampleWrappedRequestFulfilledIterator, error) + + WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleWrappedRequestFulfilled) (event.Subscription, error) + + ParseWrappedRequestFulfilled(log types.Log) (*VRFV2WrapperConsumerExampleWrappedRequestFulfilled, error) + + FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2WrapperConsumerExampleWrapperRequestMadeIterator, error) + + WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperConsumerExampleWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) + + ParseWrapperRequestMade(log types.Log) (*VRFV2WrapperConsumerExampleWrapperRequestMade, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_wrapper_interface/vrfv2_wrapper_interface.go b/core/gethwrappers/generated/vrfv2_wrapper_interface/vrfv2_wrapper_interface.go new file mode 100644 index 00000000..3505e8e1 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_wrapper_interface/vrfv2_wrapper_interface.go @@ -0,0 +1,231 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_wrapper_interface + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2WrapperInterfaceMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"}],\"name\":\"calculateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"_requestGasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", +} + +var VRFV2WrapperInterfaceABI = VRFV2WrapperInterfaceMetaData.ABI + +type VRFV2WrapperInterface struct { + address common.Address + abi abi.ABI + VRFV2WrapperInterfaceCaller + VRFV2WrapperInterfaceTransactor + VRFV2WrapperInterfaceFilterer +} + +type VRFV2WrapperInterfaceCaller struct { + contract *bind.BoundContract +} + +type VRFV2WrapperInterfaceTransactor struct { + contract *bind.BoundContract +} + +type VRFV2WrapperInterfaceFilterer struct { + contract *bind.BoundContract +} + +type VRFV2WrapperInterfaceSession struct { + Contract *VRFV2WrapperInterface + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperInterfaceCallerSession struct { + Contract *VRFV2WrapperInterfaceCaller + CallOpts bind.CallOpts +} + +type VRFV2WrapperInterfaceTransactorSession struct { + Contract *VRFV2WrapperInterfaceTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperInterfaceRaw struct { + Contract *VRFV2WrapperInterface +} + +type VRFV2WrapperInterfaceCallerRaw struct { + Contract *VRFV2WrapperInterfaceCaller +} + +type VRFV2WrapperInterfaceTransactorRaw struct { + Contract *VRFV2WrapperInterfaceTransactor +} + +func NewVRFV2WrapperInterface(address common.Address, backend bind.ContractBackend) (*VRFV2WrapperInterface, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2WrapperInterfaceABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2WrapperInterface(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2WrapperInterface{address: address, abi: abi, VRFV2WrapperInterfaceCaller: VRFV2WrapperInterfaceCaller{contract: contract}, VRFV2WrapperInterfaceTransactor: VRFV2WrapperInterfaceTransactor{contract: contract}, VRFV2WrapperInterfaceFilterer: VRFV2WrapperInterfaceFilterer{contract: contract}}, nil +} + +func NewVRFV2WrapperInterfaceCaller(address common.Address, caller bind.ContractCaller) (*VRFV2WrapperInterfaceCaller, error) { + contract, err := bindVRFV2WrapperInterface(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperInterfaceCaller{contract: contract}, nil +} + +func NewVRFV2WrapperInterfaceTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2WrapperInterfaceTransactor, error) { + contract, err := bindVRFV2WrapperInterface(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperInterfaceTransactor{contract: contract}, nil +} + +func NewVRFV2WrapperInterfaceFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2WrapperInterfaceFilterer, error) { + contract, err := bindVRFV2WrapperInterface(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2WrapperInterfaceFilterer{contract: contract}, nil +} + +func bindVRFV2WrapperInterface(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2WrapperInterfaceMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperInterface.Contract.VRFV2WrapperInterfaceCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperInterface.Contract.VRFV2WrapperInterfaceTransactor.contract.Transfer(opts) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperInterface.Contract.VRFV2WrapperInterfaceTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperInterface.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperInterface.Contract.contract.Transfer(opts) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperInterface.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCaller) CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperInterface.contract.Call(opts, &out, "calculateRequestPrice", _callbackGasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.CalculateRequestPrice(&_VRFV2WrapperInterface.CallOpts, _callbackGasLimit) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCallerSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.CalculateRequestPrice(&_VRFV2WrapperInterface.CallOpts, _callbackGasLimit) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCaller) EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperInterface.contract.Call(opts, &out, "estimateRequestPrice", _callbackGasLimit, _requestGasPriceWei) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.EstimateRequestPrice(&_VRFV2WrapperInterface.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCallerSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.EstimateRequestPrice(&_VRFV2WrapperInterface.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCaller) LastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperInterface.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceSession) LastRequestId() (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.LastRequestId(&_VRFV2WrapperInterface.CallOpts) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterfaceCallerSession) LastRequestId() (*big.Int, error) { + return _VRFV2WrapperInterface.Contract.LastRequestId(&_VRFV2WrapperInterface.CallOpts) +} + +func (_VRFV2WrapperInterface *VRFV2WrapperInterface) Address() common.Address { + return _VRFV2WrapperInterface.address +} + +type VRFV2WrapperInterfaceInterface interface { + CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) + + EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) + + LastRequestId(opts *bind.CallOpts) (*big.Int, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2_wrapper_load_test_consumer/vrfv2_wrapper_load_test_consumer.go b/core/gethwrappers/generated/vrfv2_wrapper_load_test_consumer/vrfv2_wrapper_load_test_consumer.go new file mode 100644 index 00000000..9417e662 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2_wrapper_load_test_consumer/vrfv2_wrapper_load_test_consumer.go @@ -0,0 +1,1142 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2_wrapper_load_test_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2WrapperLoadTestConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_vrfV2Wrapper\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"}],\"name\":\"WrappedRequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"}],\"name\":\"WrapperRequestMade\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_vrfV2Wrapper\",\"outputs\":[{\"internalType\":\"contractVRFV2WrapperInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"makeRequests\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"_randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x60e0604052600060045560006005556103e76006553480156200002157600080fd5b50604051620017d6380380620017d68339810160408190526200004491620001cb565b6001600160601b0319606083811b821660805282901b1660a0523380600081620000b55760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000e857620000e88162000102565b50505060601b6001600160601b03191660c0525062000203565b6001600160a01b0381163314156200015d5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000ac565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001c657600080fd5b919050565b60008060408385031215620001df57600080fd5b620001ea83620001ae565b9150620001fa60208401620001ae565b90509250929050565b60805160601c60a05160601c60c05160601c61157e6200025860003960006101600152600081816103ad015281816106d401528181610cea0152610e1101526000818161054e0152610cc0015261157e6000f3fe6080604052600436106100f75760003560e01c80638da5cb5b1161008a578063d826f88f11610059578063d826f88f14610300578063d8a4676f1461032c578063dc1670db1461035f578063f2fde38b1461037557600080fd5b80638da5cb5b1461021e578063a168fa8914610249578063afacbf9c146102ca578063b1e21749146102ea57600080fd5b8063737144bc116100c6578063737144bc146101bd57806374dba124146101d357806379ba5097146101e95780637a8042bd146101fe57600080fd5b80631757f11c146101035780631fe543e31461012c5780632353f2381461014e578063557d2e92146101a757600080fd5b366100fe57005b600080fd5b34801561010f57600080fd5b5061011960055481565b6040519081526020015b60405180910390f35b34801561013857600080fd5b5061014c61014736600461118b565b610395565b005b34801561015a57600080fd5b506101827f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610123565b3480156101b357600080fd5b5061011960035481565b3480156101c957600080fd5b5061011960045481565b3480156101df57600080fd5b5061011960065481565b3480156101f557600080fd5b5061014c610447565b34801561020a57600080fd5b5061014c610219366004611159565b610544565b34801561022a57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610182565b34801561025557600080fd5b5061029d610264366004611159565b600960205260009081526040902080546001820154600383015460048401546005850154600690950154939460ff909316939192909186565b604080519687529415156020870152938501929092526060840152608083015260a082015260c001610123565b3480156102d657600080fd5b5061014c6102e536600461127a565b61064c565b3480156102f657600080fd5b5061011960075481565b34801561030c57600080fd5b5061014c6000600481905560058190556103e76006556003819055600255565b34801561033857600080fd5b5061034c610347366004611159565b61089a565b60405161012397969594939291906113ca565b34801561036b57600080fd5b5061011960025481565b34801561038157600080fd5b5061014c6103903660046110fa565b610a01565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610439576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f6f6e6c792056524620563220777261707065722063616e2066756c66696c6c0060448201526064015b60405180910390fd5b6104438282610a15565b5050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146104c8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610430565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61054c610bf1565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6105a760005473ffffffffffffffffffffffffffffffffffffffff1690565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e084901b16815273ffffffffffffffffffffffffffffffffffffffff909116600482015260248101849052604401602060405180830381600087803b15801561061457600080fd5b505af1158015610628573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104439190611137565b610654610bf1565b60005b8161ffff168161ffff161015610893576000610674868686610c74565b600781905590506000610685610eb5565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff8916600482015290915060009073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634306d3549060240160206040518083038186803b15801561071657600080fd5b505afa15801561072a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061074e9190611172565b6040805160e08101825282815260006020808301828152845183815280830186528486019081524260608601526080850184905260a0850189905260c0850184905289845260098352949092208351815591516001830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905592518051949550919390926107ee92600285019291019061106f565b5060608201516003808301919091556080830151600483015560a0830151600583015560c090920151600690910155805490600061082b836114da565b9091555050600083815260086020526040908190208390555183907f5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec4906108759084815260200190565b60405180910390a2505050808061088b906114b8565b915050610657565b5050505050565b6000818152600960205260408120548190606090829081908190819061091c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e640000000000000000000000000000006044820152606401610430565b6000888152600960209081526040808320815160e08101835281548152600182015460ff1615158185015260028201805484518187028101870186528181529295939486019383018282801561099157602002820191906000526020600020905b81548152602001906001019080831161097d575b505050505081526020016003820154815260200160048201548152602001600582015481526020016006820154815250509050806000015181602001518260400151836060015184608001518560a001518660c00151975097509750975097509750975050919395979092949650565b610a09610bf1565b610a1281610f52565b50565b600082815260096020526040902054610a8a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e640000000000000000000000000000006044820152606401610430565b6000610a94610eb5565b60008481526008602052604081205491925090610ab190836114a1565b90506000610ac282620f4240611464565b9050600554821115610ad45760058290555b600654821015610ae45760068290555b600060025411610af45780610b27565b600254610b02906001611411565b81600254600454610b139190611464565b610b1d9190611411565b610b279190611429565b60045560028054906000610b3a836114da565b90915550506000858152600960209081526040909120600181810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690911790558551610b929260029092019187019061106f565b5060008581526009602052604090819020426004820155600681018590555490517f6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b91610be291889188916113a1565b60405180910390a15050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610c72576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610430565b565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff8416600482015260009073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811691634000aea0917f00000000000000000000000000000000000000000000000000000000000000009190821690634306d3549060240160206040518083038186803b158015610d2f57600080fd5b505afa158015610d43573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d679190611172565b6040805163ffffffff808b16602083015261ffff8a169282019290925290871660608201526080016040516020818303038152906040526040518463ffffffff1660e01b8152600401610dbc93929190611309565b602060405180830381600087803b158015610dd657600080fd5b505af1158015610dea573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e0e9190611137565b507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663fc2a88c36040518163ffffffff1660e01b815260040160206040518083038186803b158015610e7557600080fd5b505afa158015610e89573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ead9190611172565b949350505050565b600046610ec181611048565b15610f4b57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b158015610f0d57600080fd5b505afa158015610f21573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f459190611172565b91505090565b4391505090565b73ffffffffffffffffffffffffffffffffffffffff8116331415610fd2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610430565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600061a4b182148061105c575062066eed82145b80611069575062066eee82145b92915050565b8280548282559060005260206000209081019282156110aa579160200282015b828111156110aa57825182559160200191906001019061108f565b506110b69291506110ba565b5090565b5b808211156110b657600081556001016110bb565b803561ffff811681146110e157600080fd5b919050565b803563ffffffff811681146110e157600080fd5b60006020828403121561110c57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461113057600080fd5b9392505050565b60006020828403121561114957600080fd5b8151801515811461113057600080fd5b60006020828403121561116b57600080fd5b5035919050565b60006020828403121561118457600080fd5b5051919050565b6000806040838503121561119e57600080fd5b8235915060208084013567ffffffffffffffff808211156111be57600080fd5b818601915086601f8301126111d257600080fd5b8135818111156111e4576111e4611542565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561122757611227611542565b604052828152858101935084860182860187018b101561124657600080fd5b600095505b8386101561126957803585526001959095019493860193860161124b565b508096505050505050509250929050565b6000806000806080858703121561129057600080fd5b611299856110e6565b93506112a7602086016110cf565b92506112b5604086016110e6565b91506112c3606086016110cf565b905092959194509250565b600081518084526020808501945080840160005b838110156112fe578151875295820195908201906001016112e2565b509495945050505050565b73ffffffffffffffffffffffffffffffffffffffff8416815260006020848184015260606040840152835180606085015260005b818110156113595785810183015185820160800152820161133d565b8181111561136b576000608083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160800195945050505050565b8381526060602082015260006113ba60608301856112ce565b9050826040830152949350505050565b878152861515602082015260e0604082015260006113eb60e08301886112ce565b90508560608301528460808301528360a08301528260c083015298975050505050505050565b6000821982111561142457611424611513565b500190565b60008261145f577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561149c5761149c611513565b500290565b6000828210156114b3576114b3611513565b500390565b600061ffff808316818114156114d0576114d0611513565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141561150c5761150c611513565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2WrapperLoadTestConsumerABI = VRFV2WrapperLoadTestConsumerMetaData.ABI + +var VRFV2WrapperLoadTestConsumerBin = VRFV2WrapperLoadTestConsumerMetaData.Bin + +func DeployVRFV2WrapperLoadTestConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _vrfV2Wrapper common.Address) (common.Address, *types.Transaction, *VRFV2WrapperLoadTestConsumer, error) { + parsed, err := VRFV2WrapperLoadTestConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2WrapperLoadTestConsumerBin), backend, _link, _vrfV2Wrapper) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2WrapperLoadTestConsumer{address: address, abi: *parsed, VRFV2WrapperLoadTestConsumerCaller: VRFV2WrapperLoadTestConsumerCaller{contract: contract}, VRFV2WrapperLoadTestConsumerTransactor: VRFV2WrapperLoadTestConsumerTransactor{contract: contract}, VRFV2WrapperLoadTestConsumerFilterer: VRFV2WrapperLoadTestConsumerFilterer{contract: contract}}, nil +} + +type VRFV2WrapperLoadTestConsumer struct { + address common.Address + abi abi.ABI + VRFV2WrapperLoadTestConsumerCaller + VRFV2WrapperLoadTestConsumerTransactor + VRFV2WrapperLoadTestConsumerFilterer +} + +type VRFV2WrapperLoadTestConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFV2WrapperLoadTestConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFV2WrapperLoadTestConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFV2WrapperLoadTestConsumerSession struct { + Contract *VRFV2WrapperLoadTestConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperLoadTestConsumerCallerSession struct { + Contract *VRFV2WrapperLoadTestConsumerCaller + CallOpts bind.CallOpts +} + +type VRFV2WrapperLoadTestConsumerTransactorSession struct { + Contract *VRFV2WrapperLoadTestConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2WrapperLoadTestConsumerRaw struct { + Contract *VRFV2WrapperLoadTestConsumer +} + +type VRFV2WrapperLoadTestConsumerCallerRaw struct { + Contract *VRFV2WrapperLoadTestConsumerCaller +} + +type VRFV2WrapperLoadTestConsumerTransactorRaw struct { + Contract *VRFV2WrapperLoadTestConsumerTransactor +} + +func NewVRFV2WrapperLoadTestConsumer(address common.Address, backend bind.ContractBackend) (*VRFV2WrapperLoadTestConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2WrapperLoadTestConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2WrapperLoadTestConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumer{address: address, abi: abi, VRFV2WrapperLoadTestConsumerCaller: VRFV2WrapperLoadTestConsumerCaller{contract: contract}, VRFV2WrapperLoadTestConsumerTransactor: VRFV2WrapperLoadTestConsumerTransactor{contract: contract}, VRFV2WrapperLoadTestConsumerFilterer: VRFV2WrapperLoadTestConsumerFilterer{contract: contract}}, nil +} + +func NewVRFV2WrapperLoadTestConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFV2WrapperLoadTestConsumerCaller, error) { + contract, err := bindVRFV2WrapperLoadTestConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerCaller{contract: contract}, nil +} + +func NewVRFV2WrapperLoadTestConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2WrapperLoadTestConsumerTransactor, error) { + contract, err := bindVRFV2WrapperLoadTestConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerTransactor{contract: contract}, nil +} + +func NewVRFV2WrapperLoadTestConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2WrapperLoadTestConsumerFilterer, error) { + contract, err := bindVRFV2WrapperLoadTestConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerFilterer{contract: contract}, nil +} + +func bindVRFV2WrapperLoadTestConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2WrapperLoadTestConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperLoadTestConsumer.Contract.VRFV2WrapperLoadTestConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.VRFV2WrapperLoadTestConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.VRFV2WrapperLoadTestConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2WrapperLoadTestConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[2], new([]*big.Int)).(*[]*big.Int) + outstruct.RequestTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2WrapperLoadTestConsumer.Contract.GetRequestStatus(&_VRFV2WrapperLoadTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2WrapperLoadTestConsumer.Contract.GetRequestStatus(&_VRFV2WrapperLoadTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) IVrfV2Wrapper(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "i_vrfV2Wrapper") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) IVrfV2Wrapper() (common.Address, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.IVrfV2Wrapper(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) IVrfV2Wrapper() (common.Address, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.IVrfV2Wrapper(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) Owner() (common.Address, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Owner(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) Owner() (common.Address, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Owner(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SFastestFulfillment(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SFastestFulfillment(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SLastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SLastRequestId(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SLastRequestId(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_requestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SRequestCount() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SRequestCount(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SRequestCount() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SRequestCount(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RequestTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SRequests(&_VRFV2WrapperLoadTestConsumer.CallOpts, arg0) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SRequests(&_VRFV2WrapperLoadTestConsumer.CallOpts, arg0) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SResponseCount() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SResponseCount(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SResponseCount() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SResponseCount(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2WrapperLoadTestConsumer.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SSlowestFulfillment(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.SSlowestFulfillment(&_VRFV2WrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.AcceptOwnership(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.AcceptOwnership(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) MakeRequests(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "makeRequests", _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) MakeRequests(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.MakeRequests(&_VRFV2WrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) MakeRequests(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.MakeRequests(&_VRFV2WrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "rawFulfillRandomWords", _requestId, _randomWords) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2WrapperLoadTestConsumer.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2WrapperLoadTestConsumer.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "reset") +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) Reset() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Reset(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) Reset() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Reset(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.TransferOwnership(&_VRFV2WrapperLoadTestConsumer.TransactOpts, to) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.TransferOwnership(&_VRFV2WrapperLoadTestConsumer.TransactOpts, to) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.Transact(opts, "withdrawLink", amount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.WithdrawLink(&_VRFV2WrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.WithdrawLink(&_VRFV2WrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.contract.RawTransact(opts, nil) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerSession) Receive() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Receive(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerTransactorSession) Receive() (*types.Transaction, error) { + return _VRFV2WrapperLoadTestConsumer.Contract.Receive(&_VRFV2WrapperLoadTestConsumer.TransactOpts) +} + +type VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator struct { + Event *VRFV2WrapperLoadTestConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperLoadTestConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator{contract: _VRFV2WrapperLoadTestConsumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperLoadTestConsumerOwnershipTransferRequested) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperLoadTestConsumerOwnershipTransferRequested, error) { + event := new(VRFV2WrapperLoadTestConsumerOwnershipTransferRequested) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator struct { + Event *VRFV2WrapperLoadTestConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperLoadTestConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator{contract: _VRFV2WrapperLoadTestConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperLoadTestConsumerOwnershipTransferred) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperLoadTestConsumerOwnershipTransferred, error) { + event := new(VRFV2WrapperLoadTestConsumerOwnershipTransferred) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator struct { + Event *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled struct { + RequestId *big.Int + RandomWords []*big.Int + Payment *big.Int + Raw types.Log +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator, error) { + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.FilterLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator{contract: _VRFV2WrapperLoadTestConsumer.contract, event: "WrappedRequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.WatchLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) ParseWrappedRequestFulfilled(log types.Log) (*VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled, error) { + event := new(VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator struct { + Event *VRFV2WrapperLoadTestConsumerWrapperRequestMade + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2WrapperLoadTestConsumerWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator) Error() error { + return it.fail +} + +func (it *VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2WrapperLoadTestConsumerWrapperRequestMade struct { + RequestId *big.Int + Paid *big.Int + Raw types.Log +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.FilterLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return &VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator{contract: _VRFV2WrapperLoadTestConsumer.contract, event: "WrapperRequestMade", logs: logs, sub: sub}, nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2WrapperLoadTestConsumer.contract.WatchLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2WrapperLoadTestConsumerWrapperRequestMade) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumerFilterer) ParseWrapperRequestMade(log types.Log) (*VRFV2WrapperLoadTestConsumerWrapperRequestMade, error) { + event := new(VRFV2WrapperLoadTestConsumerWrapperRequestMade) + if err := _VRFV2WrapperLoadTestConsumer.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Paid *big.Int + Fulfilled bool + RandomWords []*big.Int + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} +type SRequests struct { + Paid *big.Int + Fulfilled bool + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2WrapperLoadTestConsumer.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2WrapperLoadTestConsumer.ParseOwnershipTransferRequested(log) + case _VRFV2WrapperLoadTestConsumer.abi.Events["OwnershipTransferred"].ID: + return _VRFV2WrapperLoadTestConsumer.ParseOwnershipTransferred(log) + case _VRFV2WrapperLoadTestConsumer.abi.Events["WrappedRequestFulfilled"].ID: + return _VRFV2WrapperLoadTestConsumer.ParseWrappedRequestFulfilled(log) + case _VRFV2WrapperLoadTestConsumer.abi.Events["WrapperRequestMade"].ID: + return _VRFV2WrapperLoadTestConsumer.ParseWrapperRequestMade(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2WrapperLoadTestConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2WrapperLoadTestConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b") +} + +func (VRFV2WrapperLoadTestConsumerWrapperRequestMade) Topic() common.Hash { + return common.HexToHash("0x5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec4") +} + +func (_VRFV2WrapperLoadTestConsumer *VRFV2WrapperLoadTestConsumer) Address() common.Address { + return _VRFV2WrapperLoadTestConsumer.address +} + +type VRFV2WrapperLoadTestConsumerInterface interface { + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + IVrfV2Wrapper(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SLastRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequestCount(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + MakeRequests(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperLoadTestConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2WrapperLoadTestConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2WrapperLoadTestConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2WrapperLoadTestConsumerOwnershipTransferred, error) + + FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2WrapperLoadTestConsumerWrappedRequestFulfilledIterator, error) + + WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled) (event.Subscription, error) + + ParseWrappedRequestFulfilled(log types.Log) (*VRFV2WrapperLoadTestConsumerWrappedRequestFulfilled, error) + + FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2WrapperLoadTestConsumerWrapperRequestMadeIterator, error) + + WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2WrapperLoadTestConsumerWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) + + ParseWrapperRequestMade(log types.Log) (*VRFV2WrapperLoadTestConsumerWrapperRequestMade, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_client/vrfv2plus_client.go b/core/gethwrappers/generated/vrfv2plus_client/vrfv2plus_client.go new file mode 100644 index 00000000..f6a65a63 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_client/vrfv2plus_client.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_client + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusClientMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"EXTRA_ARGS_V1_TAG\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x60a0610038600b82828239805160001a607314602b57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe730000000000000000000000000000000000000000301460806040526004361060335760003560e01c8063f7514ab4146038575b600080fd5b605e7f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa81565b6040517fffffffff00000000000000000000000000000000000000000000000000000000909116815260200160405180910390f3fea164736f6c6343000806000a", +} + +var VRFV2PlusClientABI = VRFV2PlusClientMetaData.ABI + +var VRFV2PlusClientBin = VRFV2PlusClientMetaData.Bin + +func DeployVRFV2PlusClient(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFV2PlusClient, error) { + parsed, err := VRFV2PlusClientMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusClientBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusClient{address: address, abi: *parsed, VRFV2PlusClientCaller: VRFV2PlusClientCaller{contract: contract}, VRFV2PlusClientTransactor: VRFV2PlusClientTransactor{contract: contract}, VRFV2PlusClientFilterer: VRFV2PlusClientFilterer{contract: contract}}, nil +} + +type VRFV2PlusClient struct { + address common.Address + abi abi.ABI + VRFV2PlusClientCaller + VRFV2PlusClientTransactor + VRFV2PlusClientFilterer +} + +type VRFV2PlusClientCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusClientTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusClientFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusClientSession struct { + Contract *VRFV2PlusClient + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusClientCallerSession struct { + Contract *VRFV2PlusClientCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusClientTransactorSession struct { + Contract *VRFV2PlusClientTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusClientRaw struct { + Contract *VRFV2PlusClient +} + +type VRFV2PlusClientCallerRaw struct { + Contract *VRFV2PlusClientCaller +} + +type VRFV2PlusClientTransactorRaw struct { + Contract *VRFV2PlusClientTransactor +} + +func NewVRFV2PlusClient(address common.Address, backend bind.ContractBackend) (*VRFV2PlusClient, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusClientABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusClient(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusClient{address: address, abi: abi, VRFV2PlusClientCaller: VRFV2PlusClientCaller{contract: contract}, VRFV2PlusClientTransactor: VRFV2PlusClientTransactor{contract: contract}, VRFV2PlusClientFilterer: VRFV2PlusClientFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusClientCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusClientCaller, error) { + contract, err := bindVRFV2PlusClient(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusClientCaller{contract: contract}, nil +} + +func NewVRFV2PlusClientTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusClientTransactor, error) { + contract, err := bindVRFV2PlusClient(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusClientTransactor{contract: contract}, nil +} + +func NewVRFV2PlusClientFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusClientFilterer, error) { + contract, err := bindVRFV2PlusClient(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusClientFilterer{contract: contract}, nil +} + +func bindVRFV2PlusClient(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusClientMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusClient *VRFV2PlusClientRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusClient.Contract.VRFV2PlusClientCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusClient *VRFV2PlusClientRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusClient.Contract.VRFV2PlusClientTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusClient *VRFV2PlusClientRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusClient.Contract.VRFV2PlusClientTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusClient *VRFV2PlusClientCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusClient.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusClient *VRFV2PlusClientTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusClient.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusClient *VRFV2PlusClientTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusClient.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusClient *VRFV2PlusClientCaller) EXTRAARGSV1TAG(opts *bind.CallOpts) ([4]byte, error) { + var out []interface{} + err := _VRFV2PlusClient.contract.Call(opts, &out, "EXTRA_ARGS_V1_TAG") + + if err != nil { + return *new([4]byte), err + } + + out0 := *abi.ConvertType(out[0], new([4]byte)).(*[4]byte) + + return out0, err + +} + +func (_VRFV2PlusClient *VRFV2PlusClientSession) EXTRAARGSV1TAG() ([4]byte, error) { + return _VRFV2PlusClient.Contract.EXTRAARGSV1TAG(&_VRFV2PlusClient.CallOpts) +} + +func (_VRFV2PlusClient *VRFV2PlusClientCallerSession) EXTRAARGSV1TAG() ([4]byte, error) { + return _VRFV2PlusClient.Contract.EXTRAARGSV1TAG(&_VRFV2PlusClient.CallOpts) +} + +func (_VRFV2PlusClient *VRFV2PlusClient) Address() common.Address { + return _VRFV2PlusClient.address +} + +type VRFV2PlusClientInterface interface { + EXTRAARGSV1TAG(opts *bind.CallOpts) ([4]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_consumer_example/vrfv2plus_consumer_example.go b/core/gethwrappers/generated/vrfv2plus_consumer_example/vrfv2plus_consumer_example.go new file mode 100644 index 00000000..929ab433 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_consumer_example/vrfv2plus_consumer_example.go @@ -0,0 +1,849 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_consumer_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscriptionAndFundNative\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"idx\",\"type\":\"uint256\"}],\"name\":\"getRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"randomWord\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"bool\",\"name\":\"nativePayment\",\"type\":\"bool\"}],\"name\":\"requestRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_recentRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinatorApiV1\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"setSubId\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"topUpSubscriptionNative\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b50604051620019c6380380620019c68339810160408190526200003491620001cc565b8133806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf8162000103565b5050600280546001600160a01b03199081166001600160a01b0394851617909155600580548216958416959095179094555060038054909316911617905562000204565b6001600160a01b0381163314156200015e5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001c757600080fd5b919050565b60008060408385031215620001e057600080fd5b620001eb83620001af565b9150620001fb60208401620001af565b90509250929050565b6117b280620002146000396000f3fe6080604052600436106101445760003560e01c806380980043116100c0578063b96dbba711610074578063de367c8e11610059578063de367c8e146103c0578063eff27017146103ed578063f2fde38b1461040d57600080fd5b8063b96dbba714610398578063cf62c8ab146103a057600080fd5b80638ea98117116100a55780638ea98117146102c45780639eccacf6146102e4578063a168fa891461031157600080fd5b806380980043146102795780638da5cb5b1461029957600080fd5b806336bfffed11610117578063706da1ca116100fc578063706da1ca146101fc5780637725135b1461021257806379ba50971461026457600080fd5b806336bfffed146101c65780635d7d53e3146101e657600080fd5b80631d2b2afd146101495780631fe543e31461015357806329e5d831146101735780632fa4e442146101a6575b600080fd5b61015161042d565b005b34801561015f57600080fd5b5061015161016e3660046113eb565b610528565b34801561017f57600080fd5b5061019361018e36600461148f565b6105a9565b6040519081526020015b60405180910390f35b3480156101b257600080fd5b506101516101c136600461151c565b6106e6565b3480156101d257600080fd5b506101516101e13660046112f8565b610808565b3480156101f257600080fd5b5061019360045481565b34801561020857600080fd5b5061019360065481565b34801561021e57600080fd5b5060035461023f9073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161019d565b34801561027057600080fd5b50610151610940565b34801561028557600080fd5b506101516102943660046113b9565b600655565b3480156102a557600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff1661023f565b3480156102d057600080fd5b506101516102df3660046112d6565b610a3d565b3480156102f057600080fd5b5060025461023f9073ffffffffffffffffffffffffffffffffffffffff1681565b34801561031d57600080fd5b5061036661032c3660046113b9565b6007602052600090815260409020805460019091015460ff821691610100900473ffffffffffffffffffffffffffffffffffffffff169083565b60408051931515845273ffffffffffffffffffffffffffffffffffffffff90921660208401529082015260600161019d565b610151610b48565b3480156103ac57600080fd5b506101516103bb36600461151c565b610bae565b3480156103cc57600080fd5b5060055461023f9073ffffffffffffffffffffffffffffffffffffffff1681565b3480156103f957600080fd5b506101516104083660046114b1565b610bf5565b34801561041957600080fd5b506101516104283660046112d6565b610de0565b60065461049b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f742073657400000000000000000000000000000000000000000060448201526064015b60405180910390fd5b6005546006546040517f95b55cfc000000000000000000000000000000000000000000000000000000008152600481019190915273ffffffffffffffffffffffffffffffffffffffff909116906395b55cfc9034906024015b6000604051808303818588803b15801561050d57600080fd5b505af1158015610521573d6000803e3d6000fd5b5050505050565b60025473ffffffffffffffffffffffffffffffffffffffff16331461059b576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff9091166024820152604401610492565b6105a58282610df4565b5050565b60008281526007602090815260408083208151608081018352815460ff811615158252610100900473ffffffffffffffffffffffffffffffffffffffff16818501526001820154818401526002820180548451818702810187019095528085528695929460608601939092919083018282801561064557602002820191906000526020600020905b815481526020019060010190808311610631575b50505050508152505090508060400151600014156106bf576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f72726563740000000000000000006044820152606401610492565b806060015183815181106106d5576106d5611739565b602002602001015191505092915050565b60065461074f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f74207365740000000000000000000000000000000000000000006044820152606401610492565b60035460025460065460408051602081019290925273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b81526004016107b6939291906115b5565b602060405180830381600087803b1580156107d057600080fd5b505af11580156107e4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105a5919061139c565b600654610871576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f7420736574000000000000000000000000000000000000006044820152606401610492565b60005b81518110156105a557600554600654835173ffffffffffffffffffffffffffffffffffffffff9092169163bec4c08c91908590859081106108b7576108b7611739565b60200260200101516040518363ffffffff1660e01b81526004016108fb92919091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b15801561091557600080fd5b505af1158015610929573d6000803e3d6000fd5b505050508080610938906116d9565b915050610874565b60015473ffffffffffffffffffffffffffffffffffffffff1633146109c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610492565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590610a7d575060025473ffffffffffffffffffffffffffffffffffffffff163314155b15610b015733610aa260005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff93841660048201529183166024830152919091166044820152606401610492565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b610b50610ebf565b506005546006546040517f95b55cfc000000000000000000000000000000000000000000000000000000008152600481019190915273ffffffffffffffffffffffffffffffffffffffff909116906395b55cfc9034906024016104f4565b610bb6610ebf565b5060035460025460065460408051602081019290925273ffffffffffffffffffffffffffffffffffffffff93841693634000aea0931691859101610789565b60006040518060c0016040528084815260200160065481526020018661ffff1681526020018763ffffffff1681526020018563ffffffff168152602001610c4b6040518060200160405280861515815250611004565b90526002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925060009173ffffffffffffffffffffffffffffffffffffffff90911690639b1c385e90610ca9908590600401611601565b602060405180830381600087803b158015610cc357600080fd5b505af1158015610cd7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610cfb91906113d2565b604080516080810182526000808252336020808401918252838501868152855184815280830187526060860190815287855260078352959093208451815493517fffffffffffffffffffffff0000000000000000000000000000000000000000009094169015157fffffffffffffffffffffff0000000000000000000000000000000000000000ff161761010073ffffffffffffffffffffffffffffffffffffffff9094169390930292909217825591516001820155925180519495509193849392610dce926002850192910190611239565b50505060049190915550505050505050565b610de86110c0565b610df181611143565b50565b6004548214610e5f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f7265717565737420494420697320696e636f72726563740000000000000000006044820152606401610492565b60008281526007602090815260409091208251610e8492600290920191840190611239565b5050600090815260076020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055565b600060065460001415610ffd57600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b158015610f3657600080fd5b505af1158015610f4a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f6e91906113d2565b60068190556005546040517fbec4c08c000000000000000000000000000000000000000000000000000000008152600481019290925230602483015273ffffffffffffffffffffffffffffffffffffffff169063bec4c08c90604401600060405180830381600087803b158015610fe457600080fd5b505af1158015610ff8573d6000803e3d6000fd5b505050505b5060065490565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa8260405160240161103d91511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b60005473ffffffffffffffffffffffffffffffffffffffff163314611141576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610492565b565b73ffffffffffffffffffffffffffffffffffffffff81163314156111c3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610492565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215611274579160200282015b82811115611274578251825591602001919060010190611259565b50611280929150611284565b5090565b5b808211156112805760008155600101611285565b803573ffffffffffffffffffffffffffffffffffffffff811681146112bd57600080fd5b919050565b803563ffffffff811681146112bd57600080fd5b6000602082840312156112e857600080fd5b6112f182611299565b9392505050565b6000602080838503121561130b57600080fd5b823567ffffffffffffffff81111561132257600080fd5b8301601f8101851361133357600080fd5b8035611346611341826116b5565b611666565b80828252848201915084840188868560051b870101111561136657600080fd5b600094505b838510156113905761137c81611299565b83526001949094019391850191850161136b565b50979650505050505050565b6000602082840312156113ae57600080fd5b81516112f181611797565b6000602082840312156113cb57600080fd5b5035919050565b6000602082840312156113e457600080fd5b5051919050565b600080604083850312156113fe57600080fd5b8235915060208084013567ffffffffffffffff81111561141d57600080fd5b8401601f8101861361142e57600080fd5b803561143c611341826116b5565b80828252848201915084840189868560051b870101111561145c57600080fd5b600094505b8385101561147f578035835260019490940193918501918501611461565b5080955050505050509250929050565b600080604083850312156114a257600080fd5b50508035926020909101359150565b600080600080600060a086880312156114c957600080fd5b6114d2866112c2565b9450602086013561ffff811681146114e957600080fd5b93506114f7604087016112c2565b925060608601359150608086013561150e81611797565b809150509295509295909350565b60006020828403121561152e57600080fd5b81356bffffffffffffffffffffffff811681146112f157600080fd5b6000815180845260005b8181101561157057602081850181015186830182015201611554565b81811115611582576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff831660208201526060604082015260006115f8606083018461154a565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c08084015261165e60e084018261154a565b949350505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156116ad576116ad611768565b604052919050565b600067ffffffffffffffff8211156116cf576116cf611768565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611732577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b8015158114610df157600080fdfea164736f6c6343000806000a", +} + +var VRFV2PlusConsumerExampleABI = VRFV2PlusConsumerExampleMetaData.ABI + +var VRFV2PlusConsumerExampleBin = VRFV2PlusConsumerExampleMetaData.Bin + +func DeployVRFV2PlusConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFV2PlusConsumerExample, error) { + parsed, err := VRFV2PlusConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusConsumerExampleBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusConsumerExample{address: address, abi: *parsed, VRFV2PlusConsumerExampleCaller: VRFV2PlusConsumerExampleCaller{contract: contract}, VRFV2PlusConsumerExampleTransactor: VRFV2PlusConsumerExampleTransactor{contract: contract}, VRFV2PlusConsumerExampleFilterer: VRFV2PlusConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFV2PlusConsumerExample struct { + address common.Address + abi abi.ABI + VRFV2PlusConsumerExampleCaller + VRFV2PlusConsumerExampleTransactor + VRFV2PlusConsumerExampleFilterer +} + +type VRFV2PlusConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusConsumerExampleSession struct { + Contract *VRFV2PlusConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusConsumerExampleCallerSession struct { + Contract *VRFV2PlusConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusConsumerExampleTransactorSession struct { + Contract *VRFV2PlusConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusConsumerExampleRaw struct { + Contract *VRFV2PlusConsumerExample +} + +type VRFV2PlusConsumerExampleCallerRaw struct { + Contract *VRFV2PlusConsumerExampleCaller +} + +type VRFV2PlusConsumerExampleTransactorRaw struct { + Contract *VRFV2PlusConsumerExampleTransactor +} + +func NewVRFV2PlusConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFV2PlusConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExample{address: address, abi: abi, VRFV2PlusConsumerExampleCaller: VRFV2PlusConsumerExampleCaller{contract: contract}, VRFV2PlusConsumerExampleTransactor: VRFV2PlusConsumerExampleTransactor{contract: contract}, VRFV2PlusConsumerExampleFilterer: VRFV2PlusConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusConsumerExampleCaller, error) { + contract, err := bindVRFV2PlusConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFV2PlusConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusConsumerExampleTransactor, error) { + contract, err := bindVRFV2PlusConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFV2PlusConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusConsumerExampleFilterer, error) { + contract, err := bindVRFV2PlusConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFV2PlusConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusConsumerExample.Contract.VRFV2PlusConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.VRFV2PlusConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.VRFV2PlusConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) GetRandomness(opts *bind.CallOpts, requestId *big.Int, idx *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "getRandomness", requestId, idx) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) GetRandomness(requestId *big.Int, idx *big.Int) (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.GetRandomness(&_VRFV2PlusConsumerExample.CallOpts, requestId, idx) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) GetRandomness(requestId *big.Int, idx *big.Int) (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.GetRandomness(&_VRFV2PlusConsumerExample.CallOpts, requestId, idx) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) Owner() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.Owner(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.Owner(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SLinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SLinkToken() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SLinkToken(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SLinkToken() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SLinkToken(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SRecentRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_recentRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SRecentRequestId() (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.SRecentRequestId(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SRecentRequestId() (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.SRecentRequestId(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Fulfilled = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.Requester = *abi.ConvertType(out[1], new(common.Address)).(*common.Address) + outstruct.RequestId = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusConsumerExample.Contract.SRequests(&_VRFV2PlusConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusConsumerExample.Contract.SRequests(&_VRFV2PlusConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SSubId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SSubId() (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.SSubId(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SSubId() (*big.Int, error) { + return _VRFV2PlusConsumerExample.Contract.SSubId(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SVrfCoordinator(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SVrfCoordinator(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCaller) SVrfCoordinatorApiV1(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusConsumerExample.contract.Call(opts, &out, "s_vrfCoordinatorApiV1") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SVrfCoordinatorApiV1() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SVrfCoordinatorApiV1(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleCallerSession) SVrfCoordinatorApiV1() (common.Address, error) { + return _VRFV2PlusConsumerExample.Contract.SVrfCoordinatorApiV1(&_VRFV2PlusConsumerExample.CallOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.CreateSubscriptionAndFund(&_VRFV2PlusConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.CreateSubscriptionAndFund(&_VRFV2PlusConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) CreateSubscriptionAndFundNative(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "createSubscriptionAndFundNative") +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) CreateSubscriptionAndFundNative() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.CreateSubscriptionAndFundNative(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) CreateSubscriptionAndFundNative() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.CreateSubscriptionAndFundNative(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusConsumerExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) RequestRandomWords(opts *bind.TransactOpts, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "requestRandomWords", callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) RequestRandomWords(callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.RequestRandomWords(&_VRFV2PlusConsumerExample.TransactOpts, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) RequestRandomWords(callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.RequestRandomWords(&_VRFV2PlusConsumerExample.TransactOpts, callbackGasLimit, requestConfirmations, numWords, keyHash, nativePayment) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.SetCoordinator(&_VRFV2PlusConsumerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.SetCoordinator(&_VRFV2PlusConsumerExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) SetSubId(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "setSubId", subId) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) SetSubId(subId *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.SetSubId(&_VRFV2PlusConsumerExample.TransactOpts, subId) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) SetSubId(subId *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.SetSubId(&_VRFV2PlusConsumerExample.TransactOpts, subId) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TopUpSubscription(&_VRFV2PlusConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TopUpSubscription(&_VRFV2PlusConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) TopUpSubscriptionNative(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "topUpSubscriptionNative") +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) TopUpSubscriptionNative() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TopUpSubscriptionNative(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) TopUpSubscriptionNative() (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TopUpSubscriptionNative(&_VRFV2PlusConsumerExample.TransactOpts) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TransferOwnership(&_VRFV2PlusConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.TransferOwnership(&_VRFV2PlusConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.UpdateSubscription(&_VRFV2PlusConsumerExample.TransactOpts, consumers) +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusConsumerExample.Contract.UpdateSubscription(&_VRFV2PlusConsumerExample.TransactOpts, consumers) +} + +type VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusConsumerExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusConsumerExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusConsumerExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator{contract: _VRFV2PlusConsumerExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusConsumerExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusConsumerExampleOwnershipTransferRequested, error) { + event := new(VRFV2PlusConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusConsumerExampleOwnershipTransferredIterator struct { + Event *VRFV2PlusConsumerExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusConsumerExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusConsumerExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusConsumerExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusConsumerExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusConsumerExampleOwnershipTransferredIterator{contract: _VRFV2PlusConsumerExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusConsumerExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusConsumerExampleOwnershipTransferred, error) { + event := new(VRFV2PlusConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type SRequests struct { + Fulfilled bool + Requester common.Address + RequestId *big.Int +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusConsumerExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusConsumerExample.ParseOwnershipTransferRequested(log) + case _VRFV2PlusConsumerExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusConsumerExample.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusConsumerExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusConsumerExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusConsumerExample *VRFV2PlusConsumerExample) Address() common.Address { + return _VRFV2PlusConsumerExample.address +} + +type VRFV2PlusConsumerExampleInterface interface { + GetRandomness(opts *bind.CallOpts, requestId *big.Int, idx *big.Int) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SLinkToken(opts *bind.CallOpts) (common.Address, error) + + SRecentRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SSubId(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + SVrfCoordinatorApiV1(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + CreateSubscriptionAndFundNative(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWords(opts *bind.TransactOpts, callbackGasLimit uint32, requestConfirmations uint16, numWords uint32, keyHash [32]byte, nativePayment bool) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + SetSubId(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + TopUpSubscriptionNative(opts *bind.TransactOpts) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusConsumerExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusConsumerExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusConsumerExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusConsumerExampleOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_malicious_migrator/vrfv2plus_malicious_migrator.go b/core/gethwrappers/generated/vrfv2plus_malicious_migrator/vrfv2plus_malicious_migrator.go new file mode 100644 index 00000000..03c5ffd8 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_malicious_migrator/vrfv2plus_malicious_migrator.go @@ -0,0 +1,192 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_malicious_migrator + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusMaliciousMigratorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b506040516102e03803806102e083398101604081905261002f91610054565b600080546001600160a01b0319166001600160a01b0392909216919091179055610084565b60006020828403121561006657600080fd5b81516001600160a01b038116811461007d57600080fd5b9392505050565b61024d806100936000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80638ea9811714610030575b600080fd5b61004361003e36600461012a565b610045565b005b600080546040805160c081018252838152602080820185905281830185905260608201859052608082018590528251908101835293845260a0810193909352517f9b1c385e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911691639b1c385e916100d49190600401610180565b602060405180830381600087803b1580156100ee57600080fd5b505af1158015610102573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101269190610167565b5050565b60006020828403121561013c57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461016057600080fd5b9392505050565b60006020828403121561017957600080fd5b5051919050565b6000602080835283518184015280840151604084015261ffff6040850151166060840152606084015163ffffffff80821660808601528060808701511660a0860152505060a084015160c08085015280518060e086015260005b818110156101f757828101840151868201610100015283016101da565b8181111561020a57600061010083880101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016939093016101000194935050505056fea164736f6c6343000806000a", +} + +var VRFV2PlusMaliciousMigratorABI = VRFV2PlusMaliciousMigratorMetaData.ABI + +var VRFV2PlusMaliciousMigratorBin = VRFV2PlusMaliciousMigratorMetaData.Bin + +func DeployVRFV2PlusMaliciousMigrator(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address) (common.Address, *types.Transaction, *VRFV2PlusMaliciousMigrator, error) { + parsed, err := VRFV2PlusMaliciousMigratorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusMaliciousMigratorBin), backend, _vrfCoordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusMaliciousMigrator{address: address, abi: *parsed, VRFV2PlusMaliciousMigratorCaller: VRFV2PlusMaliciousMigratorCaller{contract: contract}, VRFV2PlusMaliciousMigratorTransactor: VRFV2PlusMaliciousMigratorTransactor{contract: contract}, VRFV2PlusMaliciousMigratorFilterer: VRFV2PlusMaliciousMigratorFilterer{contract: contract}}, nil +} + +type VRFV2PlusMaliciousMigrator struct { + address common.Address + abi abi.ABI + VRFV2PlusMaliciousMigratorCaller + VRFV2PlusMaliciousMigratorTransactor + VRFV2PlusMaliciousMigratorFilterer +} + +type VRFV2PlusMaliciousMigratorCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusMaliciousMigratorTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusMaliciousMigratorFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusMaliciousMigratorSession struct { + Contract *VRFV2PlusMaliciousMigrator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusMaliciousMigratorCallerSession struct { + Contract *VRFV2PlusMaliciousMigratorCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusMaliciousMigratorTransactorSession struct { + Contract *VRFV2PlusMaliciousMigratorTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusMaliciousMigratorRaw struct { + Contract *VRFV2PlusMaliciousMigrator +} + +type VRFV2PlusMaliciousMigratorCallerRaw struct { + Contract *VRFV2PlusMaliciousMigratorCaller +} + +type VRFV2PlusMaliciousMigratorTransactorRaw struct { + Contract *VRFV2PlusMaliciousMigratorTransactor +} + +func NewVRFV2PlusMaliciousMigrator(address common.Address, backend bind.ContractBackend) (*VRFV2PlusMaliciousMigrator, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusMaliciousMigratorABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusMaliciousMigrator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusMaliciousMigrator{address: address, abi: abi, VRFV2PlusMaliciousMigratorCaller: VRFV2PlusMaliciousMigratorCaller{contract: contract}, VRFV2PlusMaliciousMigratorTransactor: VRFV2PlusMaliciousMigratorTransactor{contract: contract}, VRFV2PlusMaliciousMigratorFilterer: VRFV2PlusMaliciousMigratorFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusMaliciousMigratorCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusMaliciousMigratorCaller, error) { + contract, err := bindVRFV2PlusMaliciousMigrator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusMaliciousMigratorCaller{contract: contract}, nil +} + +func NewVRFV2PlusMaliciousMigratorTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusMaliciousMigratorTransactor, error) { + contract, err := bindVRFV2PlusMaliciousMigrator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusMaliciousMigratorTransactor{contract: contract}, nil +} + +func NewVRFV2PlusMaliciousMigratorFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusMaliciousMigratorFilterer, error) { + contract, err := bindVRFV2PlusMaliciousMigrator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusMaliciousMigratorFilterer{contract: contract}, nil +} + +func bindVRFV2PlusMaliciousMigrator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusMaliciousMigratorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusMaliciousMigrator.Contract.VRFV2PlusMaliciousMigratorCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.VRFV2PlusMaliciousMigratorTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.VRFV2PlusMaliciousMigratorTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusMaliciousMigrator.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorTransactor) SetCoordinator(opts *bind.TransactOpts, arg0 common.Address) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.contract.Transact(opts, "setCoordinator", arg0) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorSession) SetCoordinator(arg0 common.Address) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.SetCoordinator(&_VRFV2PlusMaliciousMigrator.TransactOpts, arg0) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigratorTransactorSession) SetCoordinator(arg0 common.Address) (*types.Transaction, error) { + return _VRFV2PlusMaliciousMigrator.Contract.SetCoordinator(&_VRFV2PlusMaliciousMigrator.TransactOpts, arg0) +} + +func (_VRFV2PlusMaliciousMigrator *VRFV2PlusMaliciousMigrator) Address() common.Address { + return _VRFV2PlusMaliciousMigrator.address +} + +type VRFV2PlusMaliciousMigratorInterface interface { + SetCoordinator(opts *bind.TransactOpts, arg0 common.Address) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_reverting_example/vrfv2plus_reverting_example.go b/core/gethwrappers/generated/vrfv2plus_reverting_example/vrfv2plus_reverting_example.go new file mode 100644 index 00000000..dc09c7c1 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_reverting_example/vrfv2plus_reverting_example.go @@ -0,0 +1,742 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_reverting_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusRevertingExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"createSubscriptionAndFund\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"minReqConfs\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"numWords\",\"type\":\"uint32\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint96\",\"name\":\"amount\",\"type\":\"uint96\"}],\"name\":\"topUpSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"name\":\"updateSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001215380380620012158339810160408190526200003491620001c2565b8133806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620000f9565b5050600280546001600160a01b039384166001600160a01b0319918216179091556005805494909316931692909217905550620001fa9050565b6001600160a01b038116331415620001545760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001bd57600080fd5b919050565b60008060408385031215620001d657600080fd5b620001e183620001a5565b9150620001f160208401620001a5565b90509250929050565b61100b806200020a6000396000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c80638ea981171161008c578063e89e106a11610066578063e89e106a146101e6578063f08c5daa146101ef578063f2fde38b146101f8578063f6eaffc81461020b57600080fd5b80638ea98117146101a05780639eccacf6146101b3578063cf62c8ab146101d357600080fd5b806336bfffed116100c857806336bfffed1461013d578063706da1ca1461015057806379ba5097146101595780638da5cb5b1461016157600080fd5b80631fe543e3146100ef5780632e75964e146101045780632fa4e4421461012a575b600080fd5b6101026100fd366004610cdf565b61021e565b005b610117610112366004610c4d565b6102a4565b6040519081526020015b60405180910390f35b610102610138366004610d83565b6103a1565b61010261014b366004610b87565b6104c3565b61011760065481565b6101026105fb565b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610121565b6101026101ae366004610b65565b6106f8565b60025461017b9073ffffffffffffffffffffffffffffffffffffffff1681565b6101026101e1366004610d83565b610803565b61011760045481565b61011760075481565b610102610206366004610b65565b61097a565b610117610219366004610cad565b61098e565b60025473ffffffffffffffffffffffffffffffffffffffff163314610296576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff90911660248201526044015b60405180910390fd5b6102a08282600080fd5b5050565b6040805160c081018252868152602080820187905261ffff86168284015263ffffffff80861660608401528416608083015282519081018352600080825260a083019190915260025492517f9b1c385e000000000000000000000000000000000000000000000000000000008152909273ffffffffffffffffffffffffffffffffffffffff1690639b1c385e9061033f908490600401610e68565b602060405180830381600087803b15801561035957600080fd5b505af115801561036d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103919190610cc6565b6004819055979650505050505050565b60065461040a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f737562206e6f7420736574000000000000000000000000000000000000000000604482015260640161028d565b60055460025460065460408051602081019290925273ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591015b6040516020818303038152906040526040518463ffffffff1660e01b815260040161047193929190610e1c565b602060405180830381600087803b15801561048b57600080fd5b505af115801561049f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102a09190610c2b565b60065461052c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f7375624944206e6f742073657400000000000000000000000000000000000000604482015260640161028d565b60005b81518110156102a057600254600654835173ffffffffffffffffffffffffffffffffffffffff9092169163bec4c08c919085908590811061057257610572610fa0565b60200260200101516040518363ffffffff1660e01b81526004016105b692919091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b1580156105d057600080fd5b505af11580156105e4573d6000803e3d6000fd5b5050505080806105f390610f40565b91505061052f565b60015473ffffffffffffffffffffffffffffffffffffffff16331461067c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161028d565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590610738575060025473ffffffffffffffffffffffffffffffffffffffff163314155b156107bc573361075d60005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9384166004820152918316602483015291909116604482015260640161028d565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60065461040a57600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561087457600080fd5b505af1158015610888573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108ac9190610cc6565b60068190556002546040517fbec4c08c000000000000000000000000000000000000000000000000000000008152600481019290925230602483015273ffffffffffffffffffffffffffffffffffffffff169063bec4c08c90604401600060405180830381600087803b15801561092257600080fd5b505af1158015610936573d6000803e3d6000fd5b5050505060055460025460065460405173ffffffffffffffffffffffffffffffffffffffff93841693634000aea09316918591610444919060200190815260200190565b6109826109af565b61098b81610a32565b50565b6003818154811061099e57600080fd5b600091825260209091200154905081565b60005473ffffffffffffffffffffffffffffffffffffffff163314610a30576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161028d565b565b73ffffffffffffffffffffffffffffffffffffffff8116331415610ab2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161028d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b803573ffffffffffffffffffffffffffffffffffffffff81168114610b4c57600080fd5b919050565b803563ffffffff81168114610b4c57600080fd5b600060208284031215610b7757600080fd5b610b8082610b28565b9392505050565b60006020808385031215610b9a57600080fd5b823567ffffffffffffffff811115610bb157600080fd5b8301601f81018513610bc257600080fd5b8035610bd5610bd082610f1c565b610ecd565b80828252848201915084840188868560051b8701011115610bf557600080fd5b600094505b83851015610c1f57610c0b81610b28565b835260019490940193918501918501610bfa565b50979650505050505050565b600060208284031215610c3d57600080fd5b81518015158114610b8057600080fd5b600080600080600060a08688031215610c6557600080fd5b8535945060208601359350604086013561ffff81168114610c8557600080fd5b9250610c9360608701610b51565b9150610ca160808701610b51565b90509295509295909350565b600060208284031215610cbf57600080fd5b5035919050565b600060208284031215610cd857600080fd5b5051919050565b60008060408385031215610cf257600080fd5b8235915060208084013567ffffffffffffffff811115610d1157600080fd5b8401601f81018613610d2257600080fd5b8035610d30610bd082610f1c565b80828252848201915084840189868560051b8701011115610d5057600080fd5b600094505b83851015610d73578035835260019490940193918501918501610d55565b5080955050505050509250929050565b600060208284031215610d9557600080fd5b81356bffffffffffffffffffffffff81168114610b8057600080fd5b6000815180845260005b81811015610dd757602081850181015186830182015201610dbb565b81811115610de9576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff83166020820152606060408201526000610e5f6060830184610db1565b95945050505050565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c080840152610ec560e0840182610db1565b949350505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610f1457610f14610fcf565b604052919050565b600067ffffffffffffffff821115610f3657610f36610fcf565b5060051b60200190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415610f99577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusRevertingExampleABI = VRFV2PlusRevertingExampleMetaData.ABI + +var VRFV2PlusRevertingExampleBin = VRFV2PlusRevertingExampleMetaData.Bin + +func DeployVRFV2PlusRevertingExample(auth *bind.TransactOpts, backend bind.ContractBackend, vrfCoordinator common.Address, link common.Address) (common.Address, *types.Transaction, *VRFV2PlusRevertingExample, error) { + parsed, err := VRFV2PlusRevertingExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusRevertingExampleBin), backend, vrfCoordinator, link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusRevertingExample{address: address, abi: *parsed, VRFV2PlusRevertingExampleCaller: VRFV2PlusRevertingExampleCaller{contract: contract}, VRFV2PlusRevertingExampleTransactor: VRFV2PlusRevertingExampleTransactor{contract: contract}, VRFV2PlusRevertingExampleFilterer: VRFV2PlusRevertingExampleFilterer{contract: contract}}, nil +} + +type VRFV2PlusRevertingExample struct { + address common.Address + abi abi.ABI + VRFV2PlusRevertingExampleCaller + VRFV2PlusRevertingExampleTransactor + VRFV2PlusRevertingExampleFilterer +} + +type VRFV2PlusRevertingExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusRevertingExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusRevertingExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusRevertingExampleSession struct { + Contract *VRFV2PlusRevertingExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusRevertingExampleCallerSession struct { + Contract *VRFV2PlusRevertingExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusRevertingExampleTransactorSession struct { + Contract *VRFV2PlusRevertingExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusRevertingExampleRaw struct { + Contract *VRFV2PlusRevertingExample +} + +type VRFV2PlusRevertingExampleCallerRaw struct { + Contract *VRFV2PlusRevertingExampleCaller +} + +type VRFV2PlusRevertingExampleTransactorRaw struct { + Contract *VRFV2PlusRevertingExampleTransactor +} + +func NewVRFV2PlusRevertingExample(address common.Address, backend bind.ContractBackend) (*VRFV2PlusRevertingExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusRevertingExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusRevertingExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExample{address: address, abi: abi, VRFV2PlusRevertingExampleCaller: VRFV2PlusRevertingExampleCaller{contract: contract}, VRFV2PlusRevertingExampleTransactor: VRFV2PlusRevertingExampleTransactor{contract: contract}, VRFV2PlusRevertingExampleFilterer: VRFV2PlusRevertingExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusRevertingExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusRevertingExampleCaller, error) { + contract, err := bindVRFV2PlusRevertingExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExampleCaller{contract: contract}, nil +} + +func NewVRFV2PlusRevertingExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusRevertingExampleTransactor, error) { + contract, err := bindVRFV2PlusRevertingExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExampleTransactor{contract: contract}, nil +} + +func NewVRFV2PlusRevertingExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusRevertingExampleFilterer, error) { + contract, err := bindVRFV2PlusRevertingExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExampleFilterer{contract: contract}, nil +} + +func bindVRFV2PlusRevertingExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusRevertingExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusRevertingExample.Contract.VRFV2PlusRevertingExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.VRFV2PlusRevertingExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.VRFV2PlusRevertingExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusRevertingExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) Owner() (common.Address, error) { + return _VRFV2PlusRevertingExample.Contract.Owner(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusRevertingExample.Contract.Owner(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SGasAvailable() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SGasAvailable(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) SGasAvailable() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SGasAvailable(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SRandomWords(&_VRFV2PlusRevertingExample.CallOpts, arg0) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SRandomWords(&_VRFV2PlusRevertingExample.CallOpts, arg0) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "s_requestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SRequestId(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) SRequestId() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SRequestId(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) SSubId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SSubId() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SSubId(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) SSubId() (*big.Int, error) { + return _VRFV2PlusRevertingExample.Contract.SSubId(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusRevertingExample.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusRevertingExample.Contract.SVrfCoordinator(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusRevertingExample.Contract.SVrfCoordinator(&_VRFV2PlusRevertingExample.CallOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.AcceptOwnership(&_VRFV2PlusRevertingExample.TransactOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.AcceptOwnership(&_VRFV2PlusRevertingExample.TransactOpts) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "createSubscriptionAndFund", amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.CreateSubscriptionAndFund(&_VRFV2PlusRevertingExample.TransactOpts, amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) CreateSubscriptionAndFund(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.CreateSubscriptionAndFund(&_VRFV2PlusRevertingExample.TransactOpts, amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.RawFulfillRandomWords(&_VRFV2PlusRevertingExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.RawFulfillRandomWords(&_VRFV2PlusRevertingExample.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "requestRandomness", keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) RequestRandomness(keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.RequestRandomness(&_VRFV2PlusRevertingExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) RequestRandomness(keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.RequestRandomness(&_VRFV2PlusRevertingExample.TransactOpts, keyHash, subId, minReqConfs, callbackGasLimit, numWords) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.SetCoordinator(&_VRFV2PlusRevertingExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.SetCoordinator(&_VRFV2PlusRevertingExample.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "topUpSubscription", amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.TopUpSubscription(&_VRFV2PlusRevertingExample.TransactOpts, amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) TopUpSubscription(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.TopUpSubscription(&_VRFV2PlusRevertingExample.TransactOpts, amount) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.TransferOwnership(&_VRFV2PlusRevertingExample.TransactOpts, to) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.TransferOwnership(&_VRFV2PlusRevertingExample.TransactOpts, to) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactor) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.contract.Transact(opts, "updateSubscription", consumers) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.UpdateSubscription(&_VRFV2PlusRevertingExample.TransactOpts, consumers) +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleTransactorSession) UpdateSubscription(consumers []common.Address) (*types.Transaction, error) { + return _VRFV2PlusRevertingExample.Contract.UpdateSubscription(&_VRFV2PlusRevertingExample.TransactOpts, consumers) +} + +type VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusRevertingExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusRevertingExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusRevertingExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusRevertingExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusRevertingExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator{contract: _VRFV2PlusRevertingExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusRevertingExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusRevertingExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusRevertingExampleOwnershipTransferRequested) + if err := _VRFV2PlusRevertingExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusRevertingExampleOwnershipTransferRequested, error) { + event := new(VRFV2PlusRevertingExampleOwnershipTransferRequested) + if err := _VRFV2PlusRevertingExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusRevertingExampleOwnershipTransferredIterator struct { + Event *VRFV2PlusRevertingExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusRevertingExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusRevertingExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusRevertingExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusRevertingExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusRevertingExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusRevertingExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusRevertingExampleOwnershipTransferredIterator{contract: _VRFV2PlusRevertingExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusRevertingExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusRevertingExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusRevertingExampleOwnershipTransferred) + if err := _VRFV2PlusRevertingExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusRevertingExampleOwnershipTransferred, error) { + event := new(VRFV2PlusRevertingExampleOwnershipTransferred) + if err := _VRFV2PlusRevertingExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusRevertingExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusRevertingExample.ParseOwnershipTransferRequested(log) + case _VRFV2PlusRevertingExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusRevertingExample.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusRevertingExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusRevertingExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusRevertingExample *VRFV2PlusRevertingExample) Address() common.Address { + return _VRFV2PlusRevertingExample.address +} + +type VRFV2PlusRevertingExampleInterface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestId(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (*big.Int, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + CreateSubscriptionAndFund(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subId *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusRevertingExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusRevertingExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusRevertingExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusRevertingExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusRevertingExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusRevertingExampleOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_subscription_manager/vrfv2plus_subscription_manager.go b/core/gethwrappers/generated/vrfv2plus_subscription_manager/vrfv2plus_subscription_manager.go new file mode 100644 index 00000000..53b2f662 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_subscription_manager/vrfv2plus_subscription_manager.go @@ -0,0 +1,792 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_subscription_manager + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusSubscriptionManagerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"subId\",\"type\":\"uint64\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fundSubscriptionWithEth\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"fundSubscriptionWithLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"}],\"name\":\"migrateToNewCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFSubscriptionV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkToken\",\"type\":\"address\"}],\"name\":\"setLinkToken\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setVRFCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawEth\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610149565b6001600160a01b0381163314156100f85760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b611a8280620001596000396000f3fe6080604052600436106101445760003560e01c806382359740116100c0578063a0ef91df11610074578063decbca6811610059578063decbca6814610395578063e41cfad61461039d578063f2fde38b146103bd57600080fd5b8063a0ef91df1461036b578063a21a23e41461038057600080fd5b80638dc654a2116100a55780638dc654a2146103095780639c24ea401461031e5780639eccacf61461033e57600080fd5b806382359740146102be5780638da5cb5b146102de57600080fd5b80633d96303511610117578063706da1ca116100fc578063706da1ca146102005780637725135b1461025757806379ba5097146102a957600080fd5b80633d963035146101c057806344ff81ce146101e057600080fd5b80630e27e3df14610149578063112940f91461016b57806324e9edb01461018b57806337ea7367146101a0575b600080fd5b34801561015557600080fd5b506101696101643660046116c2565b6103dd565b005b34801561017757600080fd5b506101696101863660046116c2565b610499565b34801561019757600080fd5b50610169610524565b3480156101ac57600080fd5b506101696101bb3660046116c2565b6105dc565b3480156101cc57600080fd5b506101696101db3660046116c2565b610ba4565b3480156101ec57600080fd5b506101696101fb3660046116c2565b610c2f565b34801561020c57600080fd5b506001546102399074010000000000000000000000000000000000000000900467ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020015b60405180910390f35b34801561026357600080fd5b506003546102849073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161024e565b3480156102b557600080fd5b50610169610cfb565b3480156102ca57600080fd5b506101696102d936600461173a565b610df8565b3480156102ea57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610284565b34801561031557600080fd5b50610169610ee1565b34801561032a57600080fd5b506101696103393660046116c2565b61109f565b34801561034a57600080fd5b506002546102849073ffffffffffffffffffffffffffffffffffffffff1681565b34801561037757600080fd5b5061016961116b565b34801561038c57600080fd5b50610239611228565b610169611324565b3480156103a957600080fd5b506101696103b8366004611708565b6113bd565b3480156103c957600080fd5b506101696103d83660046116c2565b61150e565b6103e561151f565b6002546001546040517f9f87fad70000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015273ffffffffffffffffffffffffffffffffffffffff838116602483015290911690639f87fad7906044015b600060405180830381600087803b15801561047e57600080fd5b505af1158015610492573d6000803e3d6000fd5b5050505050565b6104a161151f565b6002546001546040517f7341c10c0000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015273ffffffffffffffffffffffffffffffffffffffff838116602483015290911690637341c10c90604401610464565b61052c61151f565b6002546001546040517fd7ae1d300000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015230602482015273ffffffffffffffffffffffffffffffffffffffff9091169063d7ae1d3090604401600060405180830381600087803b1580156105c257600080fd5b505af11580156105d6573d6000803e3d6000fd5b50505050565b6105e461151f565b6002546001546040517fa47c76960000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015260009182918291829173ffffffffffffffffffffffffffffffffffffffff9091169063a47c76969060240160006040518083038186803b15801561067b57600080fd5b505afa15801561068f573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526106d59190810190611774565b93509350935093503073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614610777576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600960248201527f4e6f74206f776e6572000000000000000000000000000000000000000000000060448201526064015b60405180910390fd5b61077f610524565b600085905060008173ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b1580156107ce57600080fd5b505af11580156107e2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108069190611757565b90506bffffffffffffffffffffffff8616156108d8576003546040805167ffffffffffffffff8416602082015273ffffffffffffffffffffffffffffffffffffffff90921691634000aea0918a918a91016040516020818303038152906040526040518463ffffffff1660e01b81526004016108849392919061193c565b602060405180830381600087803b15801561089e57600080fd5b505af11580156108b2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108d691906116e6565b505b6bffffffffffffffffffffffff851615610986576040517f3697af8b00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8216600482015273ffffffffffffffffffffffffffffffffffffffff831690633697af8b906bffffffffffffffffffffffff8816906024016000604051808303818588803b15801561096c57600080fd5b505af1158015610980573d6000803e3d6000fd5b50505050505b60005b8351811015610b05578273ffffffffffffffffffffffffffffffffffffffff16637341c10c838684815181106109c1576109c16119df565b60200260200101516040518363ffffffff1660e01b8152600401610a1192919067ffffffffffffffff92909216825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b600060405180830381600087803b158015610a2b57600080fd5b505af1158015610a3f573d6000803e3d6000fd5b50505050838181518110610a5557610a556119df565b60209081029190910101516040517f2d6d99f300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8a8116600483015267ffffffffffffffff8516602483015290911690632d6d99f390604401600060405180830381600087803b158015610ada57600080fd5b505af1158015610aee573d6000803e3d6000fd5b505050508080610afd9061197f565b915050610989565b506001805467ffffffffffffffff90921674010000000000000000000000000000000000000000027fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff9092169190911790556002805473ffffffffffffffffffffffffffffffffffffffff9092167fffffffffffffffffffffffff00000000000000000000000000000000000000009092169190911790555050505050565b610bac61151f565b6002546001546040517f04c357cb0000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015273ffffffffffffffffffffffffffffffffffffffff8381166024830152909116906304c357cb90604401610464565b610c3761151f565b73ffffffffffffffffffffffffffffffffffffffff8116610cb4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f496e76616c696420616464726573730000000000000000000000000000000000604482015260640161076e565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60015473ffffffffffffffffffffffffffffffffffffffff163314610d7c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161076e565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610e0061151f565b6002546040517f8235974000000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8316600482015273ffffffffffffffffffffffffffffffffffffffff90911690638235974090602401600060405180830381600087803b158015610e7557600080fd5b505af1158015610e89573d6000803e3d6000fd5b50506001805467ffffffffffffffff90941674010000000000000000000000000000000000000000027fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff909416939093179092555050565b610ee961151f565b6003546040517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015273ffffffffffffffffffffffffffffffffffffffff9091169063a9059cbb90339083906370a082319060240160206040518083038186803b158015610f5c57600080fd5b505afa158015610f70573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f949190611721565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526024820152604401602060405180830381600087803b158015610fff57600080fd5b505af1158015611013573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061103791906116e6565b61109d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f556e61626c6520746f207472616e736665720000000000000000000000000000604482015260640161076e565b565b6110a761151f565b73ffffffffffffffffffffffffffffffffffffffff8116611124576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f496e76616c696420616464726573730000000000000000000000000000000000604482015260640161076e565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b61117361151f565b604051600090339047908381818185875af1925050503d80600081146111b5576040519150601f19603f3d011682016040523d82523d6000602084013e6111ba565b606091505b5050905080611225576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f556e61626c6520746f207472616e736665720000000000000000000000000000604482015260640161076e565b50565b600061123261151f565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a21a23e46040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561129c57600080fd5b505af11580156112b0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112d49190611757565b600180547fffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff167401000000000000000000000000000000000000000067ffffffffffffffff841602179055919050565b61132c61151f565b6002546001546040517f3697af8b0000000000000000000000000000000000000000000000000000000081527401000000000000000000000000000000000000000090910467ffffffffffffffff16600482015273ffffffffffffffffffffffffffffffffffffffff90911690633697af8b9034906024016000604051808303818588803b15801561047e57600080fd5b6113c561151f565b600354600254600154604080517401000000000000000000000000000000000000000090920467ffffffffffffffff16602083015260009373ffffffffffffffffffffffffffffffffffffffff90811693634000aea0939116918691016040516020818303038152906040526040518463ffffffff1660e01b815260040161144f939291906118fe565b602060405180830381600087803b15801561146957600080fd5b505af115801561147d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114a191906116e6565b90508061150a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600f60248201527f5472616e73666572206661696c65640000000000000000000000000000000000604482015260640161076e565b5050565b61151661151f565b611225816115a0565b60005473ffffffffffffffffffffffffffffffffffffffff16331461109d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161076e565b73ffffffffffffffffffffffffffffffffffffffff8116331415611620576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161076e565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516116a181611a3d565b919050565b80516bffffffffffffffffffffffff811681146116a157600080fd5b6000602082840312156116d457600080fd5b81356116df81611a3d565b9392505050565b6000602082840312156116f857600080fd5b815180151581146116df57600080fd5b60006020828403121561171a57600080fd5b5035919050565b60006020828403121561173357600080fd5b5051919050565b60006020828403121561174c57600080fd5b81356116df81611a5f565b60006020828403121561176957600080fd5b81516116df81611a5f565b6000806000806080858703121561178a57600080fd5b611793856116a6565b935060206117a28187016116a6565b935060408601516117b281611a3d565b606087015190935067ffffffffffffffff808211156117d057600080fd5b818801915088601f8301126117e457600080fd5b8151818111156117f6576117f6611a0e565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561183957611839611a0e565b604052828152858101935084860182860187018d101561185857600080fd5b600095505b838610156118825761186e81611696565b85526001959095019493860193860161185d565b50989b979a50959850505050505050565b6000815180845260005b818110156118b95760208185018101518683018201520161189d565b818111156118cb576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff841681528260208201526060604082015260006119336060830184611893565b95945050505050565b73ffffffffffffffffffffffffffffffffffffffff841681526bffffffffffffffffffffffff831660208201526060604082015260006119336060830184611893565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156119d8577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff8116811461122557600080fd5b67ffffffffffffffff8116811461122557600080fdfea164736f6c6343000806000a", +} + +var VRFV2PlusSubscriptionManagerABI = VRFV2PlusSubscriptionManagerMetaData.ABI + +var VRFV2PlusSubscriptionManagerBin = VRFV2PlusSubscriptionManagerMetaData.Bin + +func DeployVRFV2PlusSubscriptionManager(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFV2PlusSubscriptionManager, error) { + parsed, err := VRFV2PlusSubscriptionManagerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusSubscriptionManagerBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusSubscriptionManager{VRFV2PlusSubscriptionManagerCaller: VRFV2PlusSubscriptionManagerCaller{contract: contract}, VRFV2PlusSubscriptionManagerTransactor: VRFV2PlusSubscriptionManagerTransactor{contract: contract}, VRFV2PlusSubscriptionManagerFilterer: VRFV2PlusSubscriptionManagerFilterer{contract: contract}}, nil +} + +type VRFV2PlusSubscriptionManager struct { + address common.Address + abi abi.ABI + VRFV2PlusSubscriptionManagerCaller + VRFV2PlusSubscriptionManagerTransactor + VRFV2PlusSubscriptionManagerFilterer +} + +type VRFV2PlusSubscriptionManagerCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusSubscriptionManagerTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusSubscriptionManagerFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusSubscriptionManagerSession struct { + Contract *VRFV2PlusSubscriptionManager + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusSubscriptionManagerCallerSession struct { + Contract *VRFV2PlusSubscriptionManagerCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusSubscriptionManagerTransactorSession struct { + Contract *VRFV2PlusSubscriptionManagerTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusSubscriptionManagerRaw struct { + Contract *VRFV2PlusSubscriptionManager +} + +type VRFV2PlusSubscriptionManagerCallerRaw struct { + Contract *VRFV2PlusSubscriptionManagerCaller +} + +type VRFV2PlusSubscriptionManagerTransactorRaw struct { + Contract *VRFV2PlusSubscriptionManagerTransactor +} + +func NewVRFV2PlusSubscriptionManager(address common.Address, backend bind.ContractBackend) (*VRFV2PlusSubscriptionManager, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusSubscriptionManagerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusSubscriptionManager(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManager{address: address, abi: abi, VRFV2PlusSubscriptionManagerCaller: VRFV2PlusSubscriptionManagerCaller{contract: contract}, VRFV2PlusSubscriptionManagerTransactor: VRFV2PlusSubscriptionManagerTransactor{contract: contract}, VRFV2PlusSubscriptionManagerFilterer: VRFV2PlusSubscriptionManagerFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusSubscriptionManagerCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusSubscriptionManagerCaller, error) { + contract, err := bindVRFV2PlusSubscriptionManager(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManagerCaller{contract: contract}, nil +} + +func NewVRFV2PlusSubscriptionManagerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusSubscriptionManagerTransactor, error) { + contract, err := bindVRFV2PlusSubscriptionManager(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManagerTransactor{contract: contract}, nil +} + +func NewVRFV2PlusSubscriptionManagerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusSubscriptionManagerFilterer, error) { + contract, err := bindVRFV2PlusSubscriptionManager(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManagerFilterer{contract: contract}, nil +} + +func bindVRFV2PlusSubscriptionManager(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusSubscriptionManagerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusSubscriptionManager.Contract.VRFV2PlusSubscriptionManagerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.VRFV2PlusSubscriptionManagerTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.VRFV2PlusSubscriptionManagerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusSubscriptionManager.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusSubscriptionManager.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) Owner() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.Owner(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.Owner(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCaller) SLinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusSubscriptionManager.contract.Call(opts, &out, "s_linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) SLinkToken() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.SLinkToken(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCallerSession) SLinkToken() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.SLinkToken(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCaller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _VRFV2PlusSubscriptionManager.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) SSubId() (uint64, error) { + return _VRFV2PlusSubscriptionManager.Contract.SSubId(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCallerSession) SSubId() (uint64, error) { + return _VRFV2PlusSubscriptionManager.Contract.SSubId(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusSubscriptionManager.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.SVrfCoordinator(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusSubscriptionManager.Contract.SVrfCoordinator(&_VRFV2PlusSubscriptionManager.CallOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AcceptOwnership(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AcceptOwnership(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AcceptSubscriptionOwnerTransfer(&_VRFV2PlusSubscriptionManager.TransactOpts, subId) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) AcceptSubscriptionOwnerTransfer(subId uint64) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AcceptSubscriptionOwnerTransfer(&_VRFV2PlusSubscriptionManager.TransactOpts, subId) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) AddConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "addConsumer", consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) AddConsumer(consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AddConsumer(&_VRFV2PlusSubscriptionManager.TransactOpts, consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) AddConsumer(consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.AddConsumer(&_VRFV2PlusSubscriptionManager.TransactOpts, consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) CancelSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "cancelSubscription") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) CancelSubscription() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.CancelSubscription(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) CancelSubscription() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.CancelSubscription(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "createSubscription") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) CreateSubscription() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.CreateSubscription(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.CreateSubscription(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) FundSubscriptionWithEth(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "fundSubscriptionWithEth") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) FundSubscriptionWithEth() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.FundSubscriptionWithEth(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) FundSubscriptionWithEth() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.FundSubscriptionWithEth(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) FundSubscriptionWithLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "fundSubscriptionWithLink", amount) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) FundSubscriptionWithLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.FundSubscriptionWithLink(&_VRFV2PlusSubscriptionManager.TransactOpts, amount) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) FundSubscriptionWithLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.FundSubscriptionWithLink(&_VRFV2PlusSubscriptionManager.TransactOpts, amount) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) MigrateToNewCoordinator(opts *bind.TransactOpts, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "migrateToNewCoordinator", newCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) MigrateToNewCoordinator(newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.MigrateToNewCoordinator(&_VRFV2PlusSubscriptionManager.TransactOpts, newCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) MigrateToNewCoordinator(newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.MigrateToNewCoordinator(&_VRFV2PlusSubscriptionManager.TransactOpts, newCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) RemoveConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "removeConsumer", consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) RemoveConsumer(consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.RemoveConsumer(&_VRFV2PlusSubscriptionManager.TransactOpts, consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) RemoveConsumer(consumer common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.RemoveConsumer(&_VRFV2PlusSubscriptionManager.TransactOpts, consumer) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "requestSubscriptionOwnerTransfer", newOwner) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) RequestSubscriptionOwnerTransfer(newOwner common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.RequestSubscriptionOwnerTransfer(&_VRFV2PlusSubscriptionManager.TransactOpts, newOwner) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) RequestSubscriptionOwnerTransfer(newOwner common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.RequestSubscriptionOwnerTransfer(&_VRFV2PlusSubscriptionManager.TransactOpts, newOwner) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) SetLinkToken(opts *bind.TransactOpts, linkToken common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "setLinkToken", linkToken) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) SetLinkToken(linkToken common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.SetLinkToken(&_VRFV2PlusSubscriptionManager.TransactOpts, linkToken) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) SetLinkToken(linkToken common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.SetLinkToken(&_VRFV2PlusSubscriptionManager.TransactOpts, linkToken) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) SetVRFCoordinator(opts *bind.TransactOpts, vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "setVRFCoordinator", vrfCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) SetVRFCoordinator(vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.SetVRFCoordinator(&_VRFV2PlusSubscriptionManager.TransactOpts, vrfCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) SetVRFCoordinator(vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.SetVRFCoordinator(&_VRFV2PlusSubscriptionManager.TransactOpts, vrfCoordinator) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.TransferOwnership(&_VRFV2PlusSubscriptionManager.TransactOpts, to) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.TransferOwnership(&_VRFV2PlusSubscriptionManager.TransactOpts, to) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) WithdrawEth(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "withdrawEth") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) WithdrawEth() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.WithdrawEth(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) WithdrawEth() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.WithdrawEth(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactor) WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.contract.Transact(opts, "withdrawLink") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerSession) WithdrawLink() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.WithdrawLink(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerTransactorSession) WithdrawLink() (*types.Transaction, error) { + return _VRFV2PlusSubscriptionManager.Contract.WithdrawLink(&_VRFV2PlusSubscriptionManager.TransactOpts) +} + +type VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusSubscriptionManagerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSubscriptionManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSubscriptionManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusSubscriptionManagerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSubscriptionManager.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator{contract: _VRFV2PlusSubscriptionManager.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSubscriptionManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSubscriptionManager.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusSubscriptionManagerOwnershipTransferRequested) + if err := _VRFV2PlusSubscriptionManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusSubscriptionManagerOwnershipTransferRequested, error) { + event := new(VRFV2PlusSubscriptionManagerOwnershipTransferRequested) + if err := _VRFV2PlusSubscriptionManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusSubscriptionManagerOwnershipTransferredIterator struct { + Event *VRFV2PlusSubscriptionManagerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSubscriptionManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusSubscriptionManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusSubscriptionManagerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusSubscriptionManagerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSubscriptionManagerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSubscriptionManager.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusSubscriptionManagerOwnershipTransferredIterator{contract: _VRFV2PlusSubscriptionManager.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSubscriptionManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusSubscriptionManager.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusSubscriptionManagerOwnershipTransferred) + if err := _VRFV2PlusSubscriptionManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManagerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusSubscriptionManagerOwnershipTransferred, error) { + event := new(VRFV2PlusSubscriptionManagerOwnershipTransferred) + if err := _VRFV2PlusSubscriptionManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManager) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusSubscriptionManager.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusSubscriptionManager.ParseOwnershipTransferRequested(log) + case _VRFV2PlusSubscriptionManager.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusSubscriptionManager.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusSubscriptionManagerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusSubscriptionManagerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_VRFV2PlusSubscriptionManager *VRFV2PlusSubscriptionManager) Address() common.Address { + return _VRFV2PlusSubscriptionManager.address +} + +type VRFV2PlusSubscriptionManagerInterface interface { + Owner(opts *bind.CallOpts) (common.Address, error) + + SLinkToken(opts *bind.CallOpts) (common.Address, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + FundSubscriptionWithEth(opts *bind.TransactOpts) (*types.Transaction, error) + + FundSubscriptionWithLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + MigrateToNewCoordinator(opts *bind.TransactOpts, newCoordinator common.Address) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, consumer common.Address) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) + + SetLinkToken(opts *bind.TransactOpts, linkToken common.Address) (*types.Transaction, error) + + SetVRFCoordinator(opts *bind.TransactOpts, vrfCoordinator common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + WithdrawEth(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSubscriptionManagerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSubscriptionManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusSubscriptionManagerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusSubscriptionManagerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusSubscriptionManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusSubscriptionManagerOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_wrapper/vrfv2plus_wrapper.go b/core/gethwrappers/generated/vrfv2plus_wrapper/vrfv2plus_wrapper.go new file mode 100644 index 00000000..ac18541e --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_wrapper/vrfv2plus_wrapper.go @@ -0,0 +1,1295 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusWrapperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_linkNativeFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_coordinator\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"FailedToTransferLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"expectedMinimumLength\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"actualLength\",\"type\":\"uint16\"}],\"name\":\"IncorrectExtraArgsLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PLIPaymentInRequestRandomWordsInNative\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LinkAlreadySet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NativePaymentInOnTokenTransfer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyCoordinatorCanFulfill\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"OnlyOwnerOrCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"WrapperFulfillmentFailed\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"SUBSCRIPTION_ID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"}],\"name\":\"calculateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"}],\"name\":\"calculateRequestPriceNative\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"isLinkMode\",\"type\":\"bool\"}],\"name\":\"checkPaymentMode\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"disable\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enable\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"_requestGasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateRequestPrice\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"_requestGasPriceWei\",\"type\":\"uint256\"}],\"name\":\"estimateRequestPriceNative\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfig\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeLinkPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"wrapperGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"coordinatorGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"wrapperPremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"maxNumWords\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"extraArgs\",\"type\":\"bytes\"}],\"name\":\"requestRandomWordsInNative\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_callbacks\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"callbackAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"requestGasPrice\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_configured\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_disabled\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fulfillmentTxSizeBytes\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_link\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_linkNativeFeed\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_vrfCoordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorV2Plus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_wrapperGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_coordinatorGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"_wrapperPremiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint8\",\"name\":\"_maxNumWords\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"_stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"_fallbackWeiPerUnitLink\",\"type\":\"int256\"},{\"internalType\":\"uint32\",\"name\":\"_fulfillmentFlatFeeLinkPPM\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_fulfillmentFlatFeeNativePPM\",\"type\":\"uint32\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"size\",\"type\":\"uint32\"}],\"name\":\"setFulfillmentTxSize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"link\",\"type\":\"address\"}],\"name\":\"setPLI\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkNativeFeed\",\"type\":\"address\"}],\"name\":\"setLinkNativeFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040526007805463ffffffff60201b1916650244000000001790553480156200002957600080fd5b506040516200366e3803806200366e8339810160408190526200004c9162000323565b803380600081620000a45760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000d757620000d7816200025a565b5050600280546001600160a01b0319166001600160a01b03938416179055508316156200012857600680546001600160601b03166c010000000000000000000000006001600160a01b038616021790555b6001600160a01b038216156200016257600780546001600160601b03166c010000000000000000000000006001600160a01b038516021790555b6002546040805163288688f960e21b815290516000926001600160a01b03169163a21a23e491600480830192602092919082900301818787803b158015620001a957600080fd5b505af1158015620001be573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001e491906200036d565b6080819052600254604051632fb1302360e21b8152600481018390523060248201529192506001600160a01b03169063bec4c08c90604401600060405180830381600087803b1580156200023757600080fd5b505af11580156200024c573d6000803e3d6000fd5b505050505050505062000387565b6001600160a01b038116331415620002b55760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016200009b565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200031e57600080fd5b919050565b6000806000606084860312156200033957600080fd5b620003448462000306565b9250620003546020850162000306565b9150620003646040850162000306565b90509250925092565b6000602082840312156200038057600080fd5b5051919050565b6080516132b6620003b8600039600081816101fa015281816112690152818161183a0152611c3201526132b66000f3fe6080604052600436106101e35760003560e01c80639cfc058e11610102578063c3f909d411610095578063f254bdc711610064578063f254bdc71461072c578063f2fde38b14610769578063f3fef3a314610789578063fc2a88c3146107a957600080fd5b8063c3f909d4146105fc578063cdd8d88514610695578063ce5494bb146106cf578063da4f5e6d146106ef57600080fd5b8063a4c0ed36116100d1578063a4c0ed361461057d578063a608a1e11461059d578063bed41a93146105bc578063bf17e559146105dc57600080fd5b80639cfc058e146105085780639eccacf61461051b578063a02e061614610548578063a3907d711461056857600080fd5b806348baa1c51161017a57806379ba50971161014957806379ba5097146104675780637fb5d19d1461047c5780638da5cb5b1461049c5780638ea98117146104e857600080fd5b806348baa1c5146103325780634b160935146103fd57806357a8070a1461041d578063650596541461044757600080fd5b80631fe543e3116101b65780631fe543e3146102bd5780632f2770db146102dd5780633255c456146102f25780634306d3541461031257600080fd5b8063030932bb146101e857806307b18bde1461022f578063181f5a771461025157806318b6f4c81461029d575b600080fd5b3480156101f457600080fd5b5061021c7f000000000000000000000000000000000000000000000000000000000000000081565b6040519081526020015b60405180910390f35b34801561023b57600080fd5b5061024f61024a366004612a22565b6107bf565b005b34801561025d57600080fd5b50604080518082018252601281527f56524656325772617070657220312e302e300000000000000000000000000000602082015290516102269190612ebb565b3480156102a957600080fd5b5061024f6102b8366004612ac3565b61089b565b3480156102c957600080fd5b5061024f6102d8366004612b47565b610a12565b3480156102e957600080fd5b5061024f610a8f565b3480156102fe57600080fd5b5061021c61030d366004612d4a565b610ac5565b34801561031e57600080fd5b5061021c61032d366004612c4a565b610bbf565b34801561033e57600080fd5b506103bc61034d366004612b15565b60096020526000908152604090205473ffffffffffffffffffffffffffffffffffffffff81169074010000000000000000000000000000000000000000810463ffffffff16907801000000000000000000000000000000000000000000000000900467ffffffffffffffff1683565b6040805173ffffffffffffffffffffffffffffffffffffffff909416845263ffffffff909216602084015267ffffffffffffffff1690820152606001610226565b34801561040957600080fd5b5061021c610418366004612c4a565b610cc6565b34801561042957600080fd5b506008546104379060ff1681565b6040519015158152602001610226565b34801561045357600080fd5b5061024f610462366004612a07565b610db7565b34801561047357600080fd5b5061024f610e02565b34801561048857600080fd5b5061021c610497366004612d4a565b610eff565b3480156104a857600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610226565b3480156104f457600080fd5b5061024f610503366004612a07565b611005565b61021c610516366004612c65565b611110565b34801561052757600080fd5b506002546104c39073ffffffffffffffffffffffffffffffffffffffff1681565b34801561055457600080fd5b5061024f610563366004612a07565b6114a5565b34801561057457600080fd5b5061024f611550565b34801561058957600080fd5b5061024f610598366004612a4c565b611582565b3480156105a957600080fd5b5060085461043790610100900460ff1681565b3480156105c857600080fd5b5061024f6105d7366004612d66565b611a62565b3480156105e857600080fd5b5061024f6105f7366004612c4a565b611bb8565b34801561060857600080fd5b506005546006546007546008546003546040805195865263ffffffff8086166020880152640100000000860481169187019190915268010000000000000000948590048116606087015280841660808701529390920490921660a084015260ff620100008304811660c085015260e084019190915263010000009091041661010082015261012001610226565b3480156106a157600080fd5b506007546106ba90640100000000900463ffffffff1681565b60405163ffffffff9091168152602001610226565b3480156106db57600080fd5b5061024f6106ea366004612a07565b611bff565b3480156106fb57600080fd5b506006546104c3906c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1681565b34801561073857600080fd5b506007546104c3906c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1681565b34801561077557600080fd5b5061024f610784366004612a07565b611cb5565b34801561079557600080fd5b5061024f6107a4366004612a22565b611cc9565b3480156107b557600080fd5b5061021c60045481565b6107c7611dc4565b60008273ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d8060008114610821576040519150601f19603f3d011682016040523d82523d6000602084013e610826565b606091505b5050905080610896576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f6661696c656420746f207769746864726177206e61746976650000000000000060448201526064015b60405180910390fd5b505050565b81516108dc57806108d8576040517f6b81746e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050565b81516024111561092d5781516040517f51200dce00000000000000000000000000000000000000000000000000000000815261088d9160249160040161ffff92831681529116602082015260400190565b6000826023815181106109425761094261323d565b6020910101517fff00000000000000000000000000000000000000000000000000000000000000167f01000000000000000000000000000000000000000000000000000000000000001490508080156109985750815b156109cf576040517f6048aa6800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b801580156109db575081155b15610896576040517f6b81746e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60025473ffffffffffffffffffffffffffffffffffffffff163314610a85576002546040517f1cf993f400000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff909116602482015260440161088d565b6108d88282611e47565b610a97611dc4565b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100179055565b60085460009060ff16610b34576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e6669677572656400000000000000604482015260640161088d565b600854610100900460ff1615610ba6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c656400000000000000000000000000604482015260640161088d565b610bb68363ffffffff168361202f565b90505b92915050565b60085460009060ff16610c2e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e6669677572656400000000000000604482015260640161088d565b600854610100900460ff1615610ca0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c656400000000000000000000000000604482015260640161088d565b6000610caa612105565b9050610cbd8363ffffffff163a8361226c565b9150505b919050565b60085460009060ff16610d35576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e6669677572656400000000000000604482015260640161088d565b600854610100900460ff1615610da7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c656400000000000000000000000000604482015260640161088d565b610bb98263ffffffff163a61202f565b610dbf611dc4565b6007805473ffffffffffffffffffffffffffffffffffffffff9092166c01000000000000000000000000026bffffffffffffffffffffffff909216919091179055565b60015473ffffffffffffffffffffffffffffffffffffffff163314610e83576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161088d565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60085460009060ff16610f6e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e6669677572656400000000000000604482015260640161088d565b600854610100900460ff1615610fe0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c656400000000000000000000000000604482015260640161088d565b6000610fea612105565b9050610ffd8463ffffffff16848361226c565b949350505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590611045575060025473ffffffffffffffffffffffffffffffffffffffff163314155b156110c9573361106a60005473ffffffffffffffffffffffffffffffffffffffff1690565b6002546040517f061db9c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9384166004820152918316602483015291909116604482015260640161088d565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b600061115183838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920182905250925061089b915050565b600061115c8761235e565b905060006111708863ffffffff163a61202f565b9050803410156111dc576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f66656520746f6f206c6f77000000000000000000000000000000000000000000604482015260640161088d565b6008546301000000900460ff1663ffffffff87161115611258576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f6e756d576f72647320746f6f2068696768000000000000000000000000000000604482015260640161088d565b6040805160c08101825260035481527f0000000000000000000000000000000000000000000000000000000000000000602082015261ffff89169181019190915260075460009190606082019063ffffffff166112b5868d612fe0565b6112bf9190612fe0565b63ffffffff1681526020018863ffffffff16815260200187878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509152506002546040517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925073ffffffffffffffffffffffffffffffffffffffff1690639b1c385e90611365908490600401612ece565b602060405180830381600087803b15801561137f57600080fd5b505af1158015611393573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906113b79190612b2e565b6040805160608101825233815263ffffffff808d16602080840191825267ffffffffffffffff3a81168587019081526000888152600990935295909120935184549251955190911678010000000000000000000000000000000000000000000000000277ffffffffffffffffffffffffffffffffffffffffffffffff9590931674010000000000000000000000000000000000000000027fffffffffffffffff00000000000000000000000000000000000000000000000090921673ffffffffffffffffffffffffffffffffffffffff91909116171792909216919091179055935050505095945050505050565b6114ad611dc4565b6006546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff161561150d576040517f2d118a6e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6006805473ffffffffffffffffffffffffffffffffffffffff9092166c01000000000000000000000000026bffffffffffffffffffffffff909216919091179055565b611558611dc4565b600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055565b60085460ff166115ee576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f77726170706572206973206e6f7420636f6e6669677572656400000000000000604482015260640161088d565b600854610100900460ff1615611660576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f777261707065722069732064697361626c656400000000000000000000000000604482015260640161088d565b6006546c01000000000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1633146116f1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f6f6e6c792063616c6c61626c652066726f6d204c494e4b000000000000000000604482015260640161088d565b600080808061170285870187612cdb565b935093509350935061171581600161089b565b60006117208561235e565b9050600061172c612105565b905060006117418763ffffffff163a8461226c565b9050808a10156117ad576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f66656520746f6f206c6f77000000000000000000000000000000000000000000604482015260640161088d565b6008546301000000900460ff1663ffffffff86161115611829576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f6e756d576f72647320746f6f2068696768000000000000000000000000000000604482015260640161088d565b6040805160c08101825260035481527f0000000000000000000000000000000000000000000000000000000000000000602082015261ffff88169181019190915260075460009190606082019063ffffffff16611886878c612fe0565b6118909190612fe0565b63ffffffff908116825288166020820152604090810187905260025490517f9b1c385e00000000000000000000000000000000000000000000000000000000815291925060009173ffffffffffffffffffffffffffffffffffffffff90911690639b1c385e90611904908590600401612ece565b602060405180830381600087803b15801561191e57600080fd5b505af1158015611932573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906119569190612b2e565b905060405180606001604052808e73ffffffffffffffffffffffffffffffffffffffff1681526020018a63ffffffff1681526020013a67ffffffffffffffff168152506009600083815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160146101000a81548163ffffffff021916908363ffffffff16021790555060408201518160000160186101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055509050508060048190555050505050505050505050505050565b611a6a611dc4565b6007805463ffffffff9a8b167fffffffffffffffffffffffffffffffffffffffff00000000ffffffff000000009091161768010000000000000000998b168a02179055600880546003979097557fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ffff9096166201000060ff988916027fffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffff161763010000009590971694909402959095177fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117909355600680546005949094557fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff9187167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009094169390931764010000000094871694909402939093179290921691909316909102179055565b611bc0611dc4565b6007805463ffffffff909216640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff909216919091179055565b611c07611dc4565b6002546040517f405b84fa0000000000000000000000000000000000000000000000000000000081527f0000000000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff83811660248301529091169063405b84fa90604401600060405180830381600087803b158015611c9a57600080fd5b505af1158015611cae573d6000803e3d6000fd5b5050505050565b611cbd611dc4565b611cc681612376565b50565b611cd1611dc4565b6006546040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8481166004830152602482018490526c010000000000000000000000009092049091169063a9059cbb90604401602060405180830381600087803b158015611d5657600080fd5b505af1158015611d6a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d8e9190612aa6565b6108d8576040517f7c07fc4c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005473ffffffffffffffffffffffffffffffffffffffff163314611e45576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161088d565b565b60008281526009602081815260408084208151606081018352815473ffffffffffffffffffffffffffffffffffffffff808216835274010000000000000000000000000000000000000000820463ffffffff1683870152780100000000000000000000000000000000000000000000000090910467ffffffffffffffff1693820193909352878652939092529290558051909116611f41576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e64000000000000000000000000000000604482015260640161088d565b600080631fe543e360e01b8585604051602401611f5f929190612f2b565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506000611fd9846020015163ffffffff1685600001518461246c565b90508061202757835160405173ffffffffffffffffffffffffffffffffffffffff9091169087907fc551b83c151f2d1c7eeb938ac59008e0409f1c1dc1e2f112449d4d79b458902290600090a35b505050505050565b600754600090819061204e90640100000000900463ffffffff166124b8565b60075463ffffffff680100000000000000008204811691612070911687612fc8565b61207a9190612fc8565b612084908561318b565b61208e9190612fc8565b60085490915081906000906064906120af9062010000900460ff1682613008565b6120bc9060ff168461318b565b6120c6919061302d565b6006549091506000906120f09068010000000000000000900463ffffffff1664e8d4a5100061318b565b6120fa9083612fc8565b979650505050505050565b600654600754604080517ffeaf968c000000000000000000000000000000000000000000000000000000008152905160009363ffffffff16151592849283926c0100000000000000000000000090920473ffffffffffffffffffffffffffffffffffffffff169163feaf968c9160048082019260a092909190829003018186803b15801561219257600080fd5b505afa1580156121a6573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906121ca9190612e00565b5094509092508491505080156121f057506121e582426131c8565b60065463ffffffff16105b156121fa57506005545b6000811215612265576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f496e76616c6964204c494e4b2077656920707269636500000000000000000000604482015260640161088d565b9392505050565b600754600090819061228b90640100000000900463ffffffff166124b8565b60075463ffffffff6801000000000000000082048116916122ad911688612fc8565b6122b79190612fc8565b6122c1908661318b565b6122cb9190612fc8565b90506000836122e283670de0b6b3a764000061318b565b6122ec919061302d565b60085490915060009060649061230b9062010000900460ff1682613008565b6123189060ff168461318b565b612322919061302d565b60065490915060009061234890640100000000900463ffffffff1664e8d4a5100061318b565b6123529083612fc8565b98975050505050505050565b600061236b603f83613041565b610bb9906001612fe0565b73ffffffffffffffffffffffffffffffffffffffff81163314156123f6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161088d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60005a61138881101561247e57600080fd5b61138881039050846040820482031161249657600080fd5b50823b6124a257600080fd5b60008083516020850160008789f1949350505050565b6000466124c481612588565b15612568576000606c73ffffffffffffffffffffffffffffffffffffffff166341b247a86040518163ffffffff1660e01b815260040160c06040518083038186803b15801561251257600080fd5b505afa158015612526573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061254a9190612c00565b5050505091505083608c61255e9190612fc8565b610ffd908261318b565b612571816125ab565b1561257f57610cbd836125e5565b50600092915050565b600061a4b182148061259c575062066eed82145b80610bb957505062066eee1490565b6000600a8214806125bd57506101a482145b806125ca575062aa37dc82145b806125d6575061210582145b80610bb957505062014a331490565b60008073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663519b4bd36040518163ffffffff1660e01b815260040160206040518083038186803b15801561264257600080fd5b505afa158015612656573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061267a9190612b2e565b905060008061268981866131c8565b9050600061269882601061318b565b6126a384600461318b565b6126ad9190612fc8565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff16630c18c1626040518163ffffffff1660e01b815260040160206040518083038186803b15801561270b57600080fd5b505afa15801561271f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906127439190612b2e565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663f45e65d86040518163ffffffff1660e01b815260040160206040518083038186803b1580156127a157600080fd5b505afa1580156127b5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906127d99190612b2e565b9050600073420000000000000000000000000000000000000f73ffffffffffffffffffffffffffffffffffffffff1663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b15801561283757600080fd5b505afa15801561284b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061286f9190612b2e565b9050600061287e82600a6130c5565b90506000818461288e8789612fc8565b612898908c61318b565b6128a2919061318b565b6128ac919061302d565b9b9a5050505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610cc157600080fd5b60008083601f8401126128f157600080fd5b50813567ffffffffffffffff81111561290957600080fd5b60208301915083602082850101111561292157600080fd5b9250929050565b600082601f83011261293957600080fd5b813567ffffffffffffffff8111156129535761295361326c565b61298460207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601612f79565b81815284602083860101111561299957600080fd5b816020850160208301376000918101602001919091529392505050565b803561ffff81168114610cc157600080fd5b803563ffffffff81168114610cc157600080fd5b803560ff81168114610cc157600080fd5b805169ffffffffffffffffffff81168114610cc157600080fd5b600060208284031215612a1957600080fd5b610bb6826128bb565b60008060408385031215612a3557600080fd5b612a3e836128bb565b946020939093013593505050565b60008060008060608587031215612a6257600080fd5b612a6b856128bb565b935060208501359250604085013567ffffffffffffffff811115612a8e57600080fd5b612a9a878288016128df565b95989497509550505050565b600060208284031215612ab857600080fd5b81516122658161329b565b60008060408385031215612ad657600080fd5b823567ffffffffffffffff811115612aed57600080fd5b612af985828601612928565b9250506020830135612b0a8161329b565b809150509250929050565b600060208284031215612b2757600080fd5b5035919050565b600060208284031215612b4057600080fd5b5051919050565b60008060408385031215612b5a57600080fd5b8235915060208084013567ffffffffffffffff80821115612b7a57600080fd5b818601915086601f830112612b8e57600080fd5b813581811115612ba057612ba061326c565b8060051b9150612bb1848301612f79565b8181528481019084860184860187018b1015612bcc57600080fd5b600095505b83861015612bef578035835260019590950194918601918601612bd1565b508096505050505050509250929050565b60008060008060008060c08789031215612c1957600080fd5b865195506020870151945060408701519350606087015192506080870151915060a087015190509295509295509295565b600060208284031215612c5c57600080fd5b610bb6826129c8565b600080600080600060808688031215612c7d57600080fd5b612c86866129c8565b9450612c94602087016129b6565b9350612ca2604087016129c8565b9250606086013567ffffffffffffffff811115612cbe57600080fd5b612cca888289016128df565b969995985093965092949392505050565b60008060008060808587031215612cf157600080fd5b612cfa856129c8565b9350612d08602086016129b6565b9250612d16604086016129c8565b9150606085013567ffffffffffffffff811115612d3257600080fd5b612d3e87828801612928565b91505092959194509250565b60008060408385031215612d5d57600080fd5b612a3e836129c8565b60008060008060008060008060006101208a8c031215612d8557600080fd5b612d8e8a6129c8565b9850612d9c60208b016129c8565b9750612daa60408b016129dc565b965060608a01359550612dbf60808b016129dc565b9450612dcd60a08b016129c8565b935060c08a01359250612de260e08b016129c8565b9150612df16101008b016129c8565b90509295985092959850929598565b600080600080600060a08688031215612e1857600080fd5b612e21866129ed565b9450602086015193506040860151925060608601519150612e44608087016129ed565b90509295509295909350565b6000815180845260005b81811015612e7657602081850181015186830182015201612e5a565b81811115612e88576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000610bb66020830184612e50565b60208152815160208201526020820151604082015261ffff60408301511660608201526000606083015163ffffffff80821660808501528060808601511660a0850152505060a083015160c080840152610ffd60e0840182612e50565b6000604082018483526020604081850152818551808452606086019150828701935060005b81811015612f6c57845183529383019391830191600101612f50565b5090979650505050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715612fc057612fc061326c565b604052919050565b60008219821115612fdb57612fdb6131df565b500190565b600063ffffffff808316818516808303821115612fff57612fff6131df565b01949350505050565b600060ff821660ff84168060ff03821115613025576130256131df565b019392505050565b60008261303c5761303c61320e565b500490565b600063ffffffff808416806130585761305861320e565b92169190910492915050565b600181815b808511156130bd57817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048211156130a3576130a36131df565b808516156130b057918102915b93841c9390800290613069565b509250929050565b6000610bb683836000826130db57506001610bb9565b816130e857506000610bb9565b81600181146130fe576002811461310857613124565b6001915050610bb9565b60ff841115613119576131196131df565b50506001821b610bb9565b5060208310610133831016604e8410600b8410161715613147575081810a610bb9565b6131518383613064565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115613183576131836131df565b029392505050565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04831182151516156131c3576131c36131df565b500290565b6000828210156131da576131da6131df565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b8015158114611cc657600080fdfea164736f6c6343000806000a", +} + +var VRFV2PlusWrapperABI = VRFV2PlusWrapperMetaData.ABI + +var VRFV2PlusWrapperBin = VRFV2PlusWrapperMetaData.Bin + +func DeployVRFV2PlusWrapper(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _linkNativeFeed common.Address, _coordinator common.Address) (common.Address, *types.Transaction, *VRFV2PlusWrapper, error) { + parsed, err := VRFV2PlusWrapperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusWrapperBin), backend, _link, _linkNativeFeed, _coordinator) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusWrapper{address: address, abi: *parsed, VRFV2PlusWrapperCaller: VRFV2PlusWrapperCaller{contract: contract}, VRFV2PlusWrapperTransactor: VRFV2PlusWrapperTransactor{contract: contract}, VRFV2PlusWrapperFilterer: VRFV2PlusWrapperFilterer{contract: contract}}, nil +} + +type VRFV2PlusWrapper struct { + address common.Address + abi abi.ABI + VRFV2PlusWrapperCaller + VRFV2PlusWrapperTransactor + VRFV2PlusWrapperFilterer +} + +type VRFV2PlusWrapperCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperSession struct { + Contract *VRFV2PlusWrapper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperCallerSession struct { + Contract *VRFV2PlusWrapperCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusWrapperTransactorSession struct { + Contract *VRFV2PlusWrapperTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperRaw struct { + Contract *VRFV2PlusWrapper +} + +type VRFV2PlusWrapperCallerRaw struct { + Contract *VRFV2PlusWrapperCaller +} + +type VRFV2PlusWrapperTransactorRaw struct { + Contract *VRFV2PlusWrapperTransactor +} + +func NewVRFV2PlusWrapper(address common.Address, backend bind.ContractBackend) (*VRFV2PlusWrapper, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusWrapperABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusWrapper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapper{address: address, abi: abi, VRFV2PlusWrapperCaller: VRFV2PlusWrapperCaller{contract: contract}, VRFV2PlusWrapperTransactor: VRFV2PlusWrapperTransactor{contract: contract}, VRFV2PlusWrapperFilterer: VRFV2PlusWrapperFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusWrapperCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusWrapperCaller, error) { + contract, err := bindVRFV2PlusWrapper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperCaller{contract: contract}, nil +} + +func NewVRFV2PlusWrapperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusWrapperTransactor, error) { + contract, err := bindVRFV2PlusWrapper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperTransactor{contract: contract}, nil +} + +func NewVRFV2PlusWrapperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusWrapperFilterer, error) { + contract, err := bindVRFV2PlusWrapper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperFilterer{contract: contract}, nil +} + +func bindVRFV2PlusWrapper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusWrapperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapper.Contract.VRFV2PlusWrapperCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.VRFV2PlusWrapperTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.VRFV2PlusWrapperTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapper.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SUBSCRIPTIONID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "SUBSCRIPTION_ID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SUBSCRIPTIONID() (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.SUBSCRIPTIONID(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SUBSCRIPTIONID() (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.SUBSCRIPTIONID(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "calculateRequestPrice", _callbackGasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.CalculateRequestPrice(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) CalculateRequestPrice(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.CalculateRequestPrice(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) CalculateRequestPriceNative(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "calculateRequestPriceNative", _callbackGasLimit) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) CalculateRequestPriceNative(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.CalculateRequestPriceNative(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) CalculateRequestPriceNative(_callbackGasLimit uint32) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.CalculateRequestPriceNative(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) CheckPaymentMode(opts *bind.CallOpts, extraArgs []byte, isLinkMode bool) error { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "checkPaymentMode", extraArgs, isLinkMode) + + if err != nil { + return err + } + + return err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) CheckPaymentMode(extraArgs []byte, isLinkMode bool) error { + return _VRFV2PlusWrapper.Contract.CheckPaymentMode(&_VRFV2PlusWrapper.CallOpts, extraArgs, isLinkMode) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) CheckPaymentMode(extraArgs []byte, isLinkMode bool) error { + return _VRFV2PlusWrapper.Contract.CheckPaymentMode(&_VRFV2PlusWrapper.CallOpts, extraArgs, isLinkMode) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "estimateRequestPrice", _callbackGasLimit, _requestGasPriceWei) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.EstimateRequestPrice(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) EstimateRequestPrice(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.EstimateRequestPrice(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) EstimateRequestPriceNative(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "estimateRequestPriceNative", _callbackGasLimit, _requestGasPriceWei) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) EstimateRequestPriceNative(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.EstimateRequestPriceNative(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) EstimateRequestPriceNative(_callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.EstimateRequestPriceNative(&_VRFV2PlusWrapper.CallOpts, _callbackGasLimit, _requestGasPriceWei) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) GetConfig(opts *bind.CallOpts) (GetConfig, + + error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "getConfig") + + outstruct := new(GetConfig) + if err != nil { + return *outstruct, err + } + + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.StalenessSeconds = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeLinkPPM = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.FulfillmentFlatFeeNativePPM = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.WrapperGasOverhead = *abi.ConvertType(out[4], new(uint32)).(*uint32) + outstruct.CoordinatorGasOverhead = *abi.ConvertType(out[5], new(uint32)).(*uint32) + outstruct.WrapperPremiumPercentage = *abi.ConvertType(out[6], new(uint8)).(*uint8) + outstruct.KeyHash = *abi.ConvertType(out[7], new([32]byte)).(*[32]byte) + outstruct.MaxNumWords = *abi.ConvertType(out[8], new(uint8)).(*uint8) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) GetConfig() (GetConfig, + + error) { + return _VRFV2PlusWrapper.Contract.GetConfig(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) GetConfig() (GetConfig, + + error) { + return _VRFV2PlusWrapper.Contract.GetConfig(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) LastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) LastRequestId() (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.LastRequestId(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) LastRequestId() (*big.Int, error) { + return _VRFV2PlusWrapper.Contract.LastRequestId(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.Owner(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.Owner(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SCallbacks(opts *bind.CallOpts, arg0 *big.Int) (SCallbacks, + + error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_callbacks", arg0) + + outstruct := new(SCallbacks) + if err != nil { + return *outstruct, err + } + + outstruct.CallbackAddress = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.CallbackGasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.RequestGasPrice = *abi.ConvertType(out[2], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SCallbacks(arg0 *big.Int) (SCallbacks, + + error) { + return _VRFV2PlusWrapper.Contract.SCallbacks(&_VRFV2PlusWrapper.CallOpts, arg0) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SCallbacks(arg0 *big.Int) (SCallbacks, + + error) { + return _VRFV2PlusWrapper.Contract.SCallbacks(&_VRFV2PlusWrapper.CallOpts, arg0) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SConfigured(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_configured") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SConfigured() (bool, error) { + return _VRFV2PlusWrapper.Contract.SConfigured(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SConfigured() (bool, error) { + return _VRFV2PlusWrapper.Contract.SConfigured(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SDisabled(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_disabled") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SDisabled() (bool, error) { + return _VRFV2PlusWrapper.Contract.SDisabled(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SDisabled() (bool, error) { + return _VRFV2PlusWrapper.Contract.SDisabled(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SFulfillmentTxSizeBytes(opts *bind.CallOpts) (uint32, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_fulfillmentTxSizeBytes") + + if err != nil { + return *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SFulfillmentTxSizeBytes() (uint32, error) { + return _VRFV2PlusWrapper.Contract.SFulfillmentTxSizeBytes(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SFulfillmentTxSizeBytes() (uint32, error) { + return _VRFV2PlusWrapper.Contract.SFulfillmentTxSizeBytes(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SLink(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_link") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SLink() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SLink(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SLink() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SLink(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SLinkNativeFeed(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_linkNativeFeed") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SLinkNativeFeed() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SLinkNativeFeed(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SLinkNativeFeed() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SLinkNativeFeed(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "s_vrfCoordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SVrfCoordinator(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) SVrfCoordinator() (common.Address, error) { + return _VRFV2PlusWrapper.Contract.SVrfCoordinator(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFV2PlusWrapper.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) TypeAndVersion() (string, error) { + return _VRFV2PlusWrapper.Contract.TypeAndVersion(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperCallerSession) TypeAndVersion() (string, error) { + return _VRFV2PlusWrapper.Contract.TypeAndVersion(&_VRFV2PlusWrapper.CallOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.AcceptOwnership(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.AcceptOwnership(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) Disable(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "disable") +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) Disable() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Disable(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) Disable() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Disable(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) Enable(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "enable") +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) Enable() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Enable(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) Enable() (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Enable(&_VRFV2PlusWrapper.TransactOpts) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) Migrate(opts *bind.TransactOpts, newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "migrate", newCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) Migrate(newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Migrate(&_VRFV2PlusWrapper.TransactOpts, newCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) Migrate(newCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Migrate(&_VRFV2PlusWrapper.TransactOpts, newCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "onTokenTransfer", _sender, _amount, _data) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.OnTokenTransfer(&_VRFV2PlusWrapper.TransactOpts, _sender, _amount, _data) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) OnTokenTransfer(_sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.OnTokenTransfer(&_VRFV2PlusWrapper.TransactOpts, _sender, _amount, _data) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "rawFulfillRandomWords", requestId, randomWords) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapper.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) RawFulfillRandomWords(requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapper.TransactOpts, requestId, randomWords) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) RequestRandomWordsInNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, extraArgs []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "requestRandomWordsInNative", _callbackGasLimit, _requestConfirmations, _numWords, extraArgs) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) RequestRandomWordsInNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, extraArgs []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.RequestRandomWordsInNative(&_VRFV2PlusWrapper.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, extraArgs) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) RequestRandomWordsInNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, extraArgs []byte) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.RequestRandomWordsInNative(&_VRFV2PlusWrapper.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, extraArgs) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) SetConfig(opts *bind.TransactOpts, _wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8, _stalenessSeconds uint32, _fallbackWeiPerUnitLink *big.Int, _fulfillmentFlatFeeLinkPPM uint32, _fulfillmentFlatFeeNativePPM uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "setConfig", _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords, _stalenessSeconds, _fallbackWeiPerUnitLink, _fulfillmentFlatFeeLinkPPM, _fulfillmentFlatFeeNativePPM) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SetConfig(_wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8, _stalenessSeconds uint32, _fallbackWeiPerUnitLink *big.Int, _fulfillmentFlatFeeLinkPPM uint32, _fulfillmentFlatFeeNativePPM uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetConfig(&_VRFV2PlusWrapper.TransactOpts, _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords, _stalenessSeconds, _fallbackWeiPerUnitLink, _fulfillmentFlatFeeLinkPPM, _fulfillmentFlatFeeNativePPM) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) SetConfig(_wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8, _stalenessSeconds uint32, _fallbackWeiPerUnitLink *big.Int, _fulfillmentFlatFeeLinkPPM uint32, _fulfillmentFlatFeeNativePPM uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetConfig(&_VRFV2PlusWrapper.TransactOpts, _wrapperGasOverhead, _coordinatorGasOverhead, _wrapperPremiumPercentage, _keyHash, _maxNumWords, _stalenessSeconds, _fallbackWeiPerUnitLink, _fulfillmentFlatFeeLinkPPM, _fulfillmentFlatFeeNativePPM) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "setCoordinator", _vrfCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetCoordinator(&_VRFV2PlusWrapper.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) SetCoordinator(_vrfCoordinator common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetCoordinator(&_VRFV2PlusWrapper.TransactOpts, _vrfCoordinator) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) SetFulfillmentTxSize(opts *bind.TransactOpts, size uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "setFulfillmentTxSize", size) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SetFulfillmentTxSize(size uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetFulfillmentTxSize(&_VRFV2PlusWrapper.TransactOpts, size) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) SetFulfillmentTxSize(size uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetFulfillmentTxSize(&_VRFV2PlusWrapper.TransactOpts, size) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) SetPLI(opts *bind.TransactOpts, link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "setPLI", link) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SetPLI(link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetPLI(&_VRFV2PlusWrapper.TransactOpts, link) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) SetPLI(link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetPLI(&_VRFV2PlusWrapper.TransactOpts, link) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) SetLinkNativeFeed(opts *bind.TransactOpts, linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "setLinkNativeFeed", linkNativeFeed) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) SetLinkNativeFeed(linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetLinkNativeFeed(&_VRFV2PlusWrapper.TransactOpts, linkNativeFeed) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) SetLinkNativeFeed(linkNativeFeed common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.SetLinkNativeFeed(&_VRFV2PlusWrapper.TransactOpts, linkNativeFeed) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.TransferOwnership(&_VRFV2PlusWrapper.TransactOpts, to) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.TransferOwnership(&_VRFV2PlusWrapper.TransactOpts, to) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "withdraw", _recipient, _amount) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Withdraw(&_VRFV2PlusWrapper.TransactOpts, _recipient, _amount) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.Withdraw(&_VRFV2PlusWrapper.TransactOpts, _recipient, _amount) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactor) WithdrawNative(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.contract.Transact(opts, "withdrawNative", _recipient, _amount) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperSession) WithdrawNative(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.WithdrawNative(&_VRFV2PlusWrapper.TransactOpts, _recipient, _amount) +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperTransactorSession) WithdrawNative(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapper.Contract.WithdrawNative(&_VRFV2PlusWrapper.TransactOpts, _recipient, _amount) +} + +type VRFV2PlusWrapperOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusWrapperOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperOwnershipTransferRequestedIterator{contract: _VRFV2PlusWrapper.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperOwnershipTransferRequested) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperOwnershipTransferRequested, error) { + event := new(VRFV2PlusWrapperOwnershipTransferRequested) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperOwnershipTransferredIterator struct { + Event *VRFV2PlusWrapperOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperOwnershipTransferredIterator{contract: _VRFV2PlusWrapper.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperOwnershipTransferred) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperOwnershipTransferred, error) { + event := new(VRFV2PlusWrapperOwnershipTransferred) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperWrapperFulfillmentFailedIterator struct { + Event *VRFV2PlusWrapperWrapperFulfillmentFailed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperWrapperFulfillmentFailedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperWrapperFulfillmentFailed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperWrapperFulfillmentFailed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperWrapperFulfillmentFailedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperWrapperFulfillmentFailedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperWrapperFulfillmentFailed struct { + RequestId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) FilterWrapperFulfillmentFailed(opts *bind.FilterOpts, requestId []*big.Int, consumer []common.Address) (*VRFV2PlusWrapperWrapperFulfillmentFailedIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var consumerRule []interface{} + for _, consumerItem := range consumer { + consumerRule = append(consumerRule, consumerItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.FilterLogs(opts, "WrapperFulfillmentFailed", requestIdRule, consumerRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperWrapperFulfillmentFailedIterator{contract: _VRFV2PlusWrapper.contract, event: "WrapperFulfillmentFailed", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) WatchWrapperFulfillmentFailed(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperWrapperFulfillmentFailed, requestId []*big.Int, consumer []common.Address) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + var consumerRule []interface{} + for _, consumerItem := range consumer { + consumerRule = append(consumerRule, consumerItem) + } + + logs, sub, err := _VRFV2PlusWrapper.contract.WatchLogs(opts, "WrapperFulfillmentFailed", requestIdRule, consumerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperWrapperFulfillmentFailed) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "WrapperFulfillmentFailed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapperFilterer) ParseWrapperFulfillmentFailed(log types.Log) (*VRFV2PlusWrapperWrapperFulfillmentFailed, error) { + event := new(VRFV2PlusWrapperWrapperFulfillmentFailed) + if err := _VRFV2PlusWrapper.contract.UnpackLog(event, "WrapperFulfillmentFailed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetConfig struct { + FallbackWeiPerUnitLink *big.Int + StalenessSeconds uint32 + FulfillmentFlatFeeLinkPPM uint32 + FulfillmentFlatFeeNativePPM uint32 + WrapperGasOverhead uint32 + CoordinatorGasOverhead uint32 + WrapperPremiumPercentage uint8 + KeyHash [32]byte + MaxNumWords uint8 +} +type SCallbacks struct { + CallbackAddress common.Address + CallbackGasLimit uint32 + RequestGasPrice uint64 +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapper) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusWrapper.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusWrapper.ParseOwnershipTransferRequested(log) + case _VRFV2PlusWrapper.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusWrapper.ParseOwnershipTransferred(log) + case _VRFV2PlusWrapper.abi.Events["WrapperFulfillmentFailed"].ID: + return _VRFV2PlusWrapper.ParseWrapperFulfillmentFailed(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusWrapperOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusWrapperOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2PlusWrapperWrapperFulfillmentFailed) Topic() common.Hash { + return common.HexToHash("0xc551b83c151f2d1c7eeb938ac59008e0409f1c1dc1e2f112449d4d79b4589022") +} + +func (_VRFV2PlusWrapper *VRFV2PlusWrapper) Address() common.Address { + return _VRFV2PlusWrapper.address +} + +type VRFV2PlusWrapperInterface interface { + SUBSCRIPTIONID(opts *bind.CallOpts) (*big.Int, error) + + CalculateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) + + CalculateRequestPriceNative(opts *bind.CallOpts, _callbackGasLimit uint32) (*big.Int, error) + + CheckPaymentMode(opts *bind.CallOpts, extraArgs []byte, isLinkMode bool) error + + EstimateRequestPrice(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) + + EstimateRequestPriceNative(opts *bind.CallOpts, _callbackGasLimit uint32, _requestGasPriceWei *big.Int) (*big.Int, error) + + GetConfig(opts *bind.CallOpts) (GetConfig, + + error) + + LastRequestId(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SCallbacks(opts *bind.CallOpts, arg0 *big.Int) (SCallbacks, + + error) + + SConfigured(opts *bind.CallOpts) (bool, error) + + SDisabled(opts *bind.CallOpts) (bool, error) + + SFulfillmentTxSizeBytes(opts *bind.CallOpts) (uint32, error) + + SLink(opts *bind.CallOpts) (common.Address, error) + + SLinkNativeFeed(opts *bind.CallOpts) (common.Address, error) + + SVrfCoordinator(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Disable(opts *bind.TransactOpts) (*types.Transaction, error) + + Enable(opts *bind.TransactOpts) (*types.Transaction, error) + + Migrate(opts *bind.TransactOpts, newCoordinator common.Address) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestId *big.Int, randomWords []*big.Int) (*types.Transaction, error) + + RequestRandomWordsInNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, extraArgs []byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _wrapperGasOverhead uint32, _coordinatorGasOverhead uint32, _wrapperPremiumPercentage uint8, _keyHash [32]byte, _maxNumWords uint8, _stalenessSeconds uint32, _fallbackWeiPerUnitLink *big.Int, _fulfillmentFlatFeeLinkPPM uint32, _fulfillmentFlatFeeNativePPM uint32) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, _vrfCoordinator common.Address) (*types.Transaction, error) + + SetFulfillmentTxSize(opts *bind.TransactOpts, size uint32) (*types.Transaction, error) + + SetPLI(opts *bind.TransactOpts, link common.Address) (*types.Transaction, error) + + SetLinkNativeFeed(opts *bind.TransactOpts, linkNativeFeed common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + WithdrawNative(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperOwnershipTransferred, error) + + FilterWrapperFulfillmentFailed(opts *bind.FilterOpts, requestId []*big.Int, consumer []common.Address) (*VRFV2PlusWrapperWrapperFulfillmentFailedIterator, error) + + WatchWrapperFulfillmentFailed(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperWrapperFulfillmentFailed, requestId []*big.Int, consumer []common.Address) (event.Subscription, error) + + ParseWrapperFulfillmentFailed(log types.Log) (*VRFV2PlusWrapperWrapperFulfillmentFailed, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_wrapper_consumer_example/vrfv2plus_wrapper_consumer_example.go b/core/gethwrappers/generated/vrfv2plus_wrapper_consumer_example/vrfv2plus_wrapper_consumer_example.go new file mode 100644 index 00000000..6cfc0065 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_wrapper_consumer_example/vrfv2plus_wrapper_consumer_example.go @@ -0,0 +1,1046 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_wrapper_consumer_example + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusWrapperConsumerExampleMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_vrfV2Wrapper\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"PLIAlreadySet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyVRFWrapperCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"}],\"name\":\"WrappedRequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"}],\"name\":\"WrapperRequestMade\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_vrfV2PlusWrapper\",\"outputs\":[{\"internalType\":\"contractIVRFV2PlusWrapper\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"}],\"name\":\"makeRequest\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"}],\"name\":\"makeRequestNative\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"_randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"native\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"setLinkToken\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b5060405162001743380380620017438339810160408190526200003491620001db565b3380600084846001600160a01b038216156200006657600080546001600160a01b0319166001600160a01b0384161790555b60601b6001600160601b031916608052506001600160a01b038216620000d35760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600180546001600160a01b0319166001600160a01b03848116919091179091558116156200010657620001068162000111565b505050505062000213565b6001600160a01b0381163314156200016c5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000ca565b600280546001600160a01b0319166001600160a01b03838116918217909255600154604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b80516001600160a01b0381168114620001d657600080fd5b919050565b60008060408385031215620001ef57600080fd5b620001fa83620001be565b91506200020a60208401620001be565b90509250929050565b60805160601c6114e76200025c600039600081816101c80152818161049701528181610b7001528181610c1201528181610cca01528181610dbf0152610e3d01526114e76000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c80638da5cb5b1161008c578063a168fa8911610066578063a168fa89146101ea578063d8a4676f1461023c578063e76d51681461025e578063f2fde38b1461027c57600080fd5b80638da5cb5b146101715780639c24ea40146101b05780639ed0868d146101c357600080fd5b80631fe543e3116100c85780631fe543e31461012e57806379ba5097146101435780637a8042bd1461014b57806384276d811461015e57600080fd5b80630c09b832146100ef57806312065fe0146101155780631e1a34991461011b575b600080fd5b6101026100fd3660046112f4565b61028f565b6040519081526020015b60405180910390f35b47610102565b6101026101293660046112f4565b6103cc565b61014161013c366004611205565b610495565b005b610141610537565b6101416101593660046111d3565b610638565b61014161016c3660046111d3565b610726565b60015473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010c565b6101416101be366004611174565b610816565b61018b7f000000000000000000000000000000000000000000000000000000000000000081565b61021f6101f83660046111d3565b600360208190526000918252604090912080546001820154919092015460ff918216911683565b60408051938452911515602084015215159082015260600161010c565b61024f61024a3660046111d3565b6108ad565b60405161010c9392919061144d565b60005473ffffffffffffffffffffffffffffffffffffffff1661018b565b61014161028a366004611174565b6109cf565b60006102996109e3565b60006102b5604051806020016040528060001515815250610a66565b905060006102c586868685610b22565b6040805160808101825282815260006020808301828152845183815280830186528486019081526060850184905287845260038352949092208351815591516001830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905592518051959850939550909390926103549260028501929101906110fb565b5060609190910151600390910180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905560405181815283907f5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec49060200160405180910390a250509392505050565b60006103d66109e3565b60006103f2604051806020016040528060011515815250610a66565b9050600061040286868685610d71565b604080516080810182528281526000602080830182815284518381528083018652848601908152600160608601819052888552600384529590932084518155905194810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001695151595909517909455905180519598509395509093919261035492600285019291909101906110fb565b7f00000000000000000000000000000000000000000000000000000000000000003373ffffffffffffffffffffffffffffffffffffffff821614610528576040517f8ba9316e00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff821660248201526044015b60405180910390fd5b6105328383610eed565b505050565b60025473ffffffffffffffffffffffffffffffffffffffff1633146105b8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161051f565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560028054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b6106406109e3565b60005473ffffffffffffffffffffffffffffffffffffffff1663a9059cbb61067d60015473ffffffffffffffffffffffffffffffffffffffff1690565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e084901b16815273ffffffffffffffffffffffffffffffffffffffff909116600482015260248101849052604401602060405180830381600087803b1580156106ea57600080fd5b505af11580156106fe573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061072291906111b1565b5050565b61072e6109e3565b600061074f60015473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d80600081146107a6576040519150601f19603f3d011682016040523d82523d6000602084013e6107ab565b606091505b5050905080610722576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f77697468647261774e6174697665206661696c65640000000000000000000000604482015260640161051f565b60005473ffffffffffffffffffffffffffffffffffffffff1615610866576040517f64f778ae00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6000818152600360205260408120548190606090610927576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e64000000000000000000000000000000604482015260640161051f565b6000848152600360209081526040808320815160808101835281548152600182015460ff1615158185015260028201805484518187028101870186528181529295939486019383018282801561099c57602002820191906000526020600020905b815481526020019060010190808311610988575b50505091835250506003919091015460ff1615156020918201528151908201516040909201519097919650945092505050565b6109d76109e3565b6109e081611004565b50565b60015473ffffffffffffffffffffffffffffffffffffffff163314610a64576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161051f565b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa82604051602401610a9f91511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff85166004820152600090819073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634306d3549060240160206040518083038186803b158015610bb257600080fd5b505afa158015610bc6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610bea91906111ec565b60005460405191925073ffffffffffffffffffffffffffffffffffffffff1690634000aea0907f0000000000000000000000000000000000000000000000000000000000000000908490610c48908b908b908b908b9060200161146e565b6040516020818303038152906040526040518463ffffffff1660e01b8152600401610c75939291906113e6565b602060405180830381600087803b158015610c8f57600080fd5b505af1158015610ca3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610cc791906111b1565b507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663fc2a88c36040518163ffffffff1660e01b815260040160206040518083038186803b158015610d2e57600080fd5b505afa158015610d42573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d6691906111ec565b915094509492505050565b6040517f4b16093500000000000000000000000000000000000000000000000000000000815263ffffffff85166004820152600090819073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634b1609359060240160206040518083038186803b158015610e0157600080fd5b505afa158015610e15573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610e3991906111ec565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16639cfc058e82888888886040518663ffffffff1660e01b8152600401610e9b949392919061146e565b6020604051808303818588803b158015610eb457600080fd5b505af1158015610ec8573d6000803e3d6000fd5b50505050506040513d601f19601f82011682018060405250810190610d6691906111ec565b600082815260036020526040902054610f62576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e64000000000000000000000000000000604482015260640161051f565b6000828152600360209081526040909120600181810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690911790558251610fb5926002909201918401906110fb565b50600082815260036020526040908190205490517f6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b91610ff89185918591611424565b60405180910390a15050565b73ffffffffffffffffffffffffffffffffffffffff8116331415611084576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161051f565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600154604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b828054828255906000526020600020908101928215611136579160200282015b8281111561113657825182559160200191906001019061111b565b50611142929150611146565b5090565b5b808211156111425760008155600101611147565b803563ffffffff8116811461116f57600080fd5b919050565b60006020828403121561118657600080fd5b813573ffffffffffffffffffffffffffffffffffffffff811681146111aa57600080fd5b9392505050565b6000602082840312156111c357600080fd5b815180151581146111aa57600080fd5b6000602082840312156111e557600080fd5b5035919050565b6000602082840312156111fe57600080fd5b5051919050565b6000806040838503121561121857600080fd5b8235915060208084013567ffffffffffffffff8082111561123857600080fd5b818601915086601f83011261124c57600080fd5b81358181111561125e5761125e6114ab565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f830116810181811085821117156112a1576112a16114ab565b604052828152858101935084860182860187018b10156112c057600080fd5b600095505b838610156112e35780358552600195909501949386019386016112c5565b508096505050505050509250929050565b60008060006060848603121561130957600080fd5b6113128461115b565b9250602084013561ffff8116811461132957600080fd5b91506113376040850161115b565b90509250925092565b600081518084526020808501945080840160005b8381101561137057815187529582019590820190600101611354565b509495945050505050565b6000815180845260005b818110156113a157602081850181015186830182015201611385565b818111156113b3576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff8416815282602082015260606040820152600061141b606083018461137b565b95945050505050565b83815260606020820152600061143d6060830185611340565b9050826040830152949350505050565b838152821515602082015260606040820152600061141b6060830184611340565b600063ffffffff808716835261ffff86166020840152808516604084015250608060608301526114a1608083018461137b565b9695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusWrapperConsumerExampleABI = VRFV2PlusWrapperConsumerExampleMetaData.ABI + +var VRFV2PlusWrapperConsumerExampleBin = VRFV2PlusWrapperConsumerExampleMetaData.Bin + +func DeployVRFV2PlusWrapperConsumerExample(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _vrfV2Wrapper common.Address) (common.Address, *types.Transaction, *VRFV2PlusWrapperConsumerExample, error) { + parsed, err := VRFV2PlusWrapperConsumerExampleMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusWrapperConsumerExampleBin), backend, _link, _vrfV2Wrapper) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusWrapperConsumerExample{address: address, abi: *parsed, VRFV2PlusWrapperConsumerExampleCaller: VRFV2PlusWrapperConsumerExampleCaller{contract: contract}, VRFV2PlusWrapperConsumerExampleTransactor: VRFV2PlusWrapperConsumerExampleTransactor{contract: contract}, VRFV2PlusWrapperConsumerExampleFilterer: VRFV2PlusWrapperConsumerExampleFilterer{contract: contract}}, nil +} + +type VRFV2PlusWrapperConsumerExample struct { + address common.Address + abi abi.ABI + VRFV2PlusWrapperConsumerExampleCaller + VRFV2PlusWrapperConsumerExampleTransactor + VRFV2PlusWrapperConsumerExampleFilterer +} + +type VRFV2PlusWrapperConsumerExampleCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperConsumerExampleTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperConsumerExampleFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperConsumerExampleSession struct { + Contract *VRFV2PlusWrapperConsumerExample + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperConsumerExampleCallerSession struct { + Contract *VRFV2PlusWrapperConsumerExampleCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusWrapperConsumerExampleTransactorSession struct { + Contract *VRFV2PlusWrapperConsumerExampleTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperConsumerExampleRaw struct { + Contract *VRFV2PlusWrapperConsumerExample +} + +type VRFV2PlusWrapperConsumerExampleCallerRaw struct { + Contract *VRFV2PlusWrapperConsumerExampleCaller +} + +type VRFV2PlusWrapperConsumerExampleTransactorRaw struct { + Contract *VRFV2PlusWrapperConsumerExampleTransactor +} + +func NewVRFV2PlusWrapperConsumerExample(address common.Address, backend bind.ContractBackend) (*VRFV2PlusWrapperConsumerExample, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusWrapperConsumerExampleABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusWrapperConsumerExample(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExample{address: address, abi: abi, VRFV2PlusWrapperConsumerExampleCaller: VRFV2PlusWrapperConsumerExampleCaller{contract: contract}, VRFV2PlusWrapperConsumerExampleTransactor: VRFV2PlusWrapperConsumerExampleTransactor{contract: contract}, VRFV2PlusWrapperConsumerExampleFilterer: VRFV2PlusWrapperConsumerExampleFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusWrapperConsumerExampleCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusWrapperConsumerExampleCaller, error) { + contract, err := bindVRFV2PlusWrapperConsumerExample(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleCaller{contract: contract}, nil +} + +func NewVRFV2PlusWrapperConsumerExampleTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusWrapperConsumerExampleTransactor, error) { + contract, err := bindVRFV2PlusWrapperConsumerExample(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleTransactor{contract: contract}, nil +} + +func NewVRFV2PlusWrapperConsumerExampleFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusWrapperConsumerExampleFilterer, error) { + contract, err := bindVRFV2PlusWrapperConsumerExample(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleFilterer{contract: contract}, nil +} + +func bindVRFV2PlusWrapperConsumerExample(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusWrapperConsumerExampleMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapperConsumerExample.Contract.VRFV2PlusWrapperConsumerExampleCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.VRFV2PlusWrapperConsumerExampleTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.VRFV2PlusWrapperConsumerExampleTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapperConsumerExample.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) GetBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "getBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) GetBalance() (*big.Int, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetBalance(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) GetBalance() (*big.Int, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetBalance(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) GetLinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "getLinkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) GetLinkToken() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetLinkToken(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) GetLinkToken() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetLinkToken(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[2], new([]*big.Int)).(*[]*big.Int) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetRequestStatus(&_VRFV2PlusWrapperConsumerExample.CallOpts, _requestId) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusWrapperConsumerExample.Contract.GetRequestStatus(&_VRFV2PlusWrapperConsumerExample.CallOpts, _requestId) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) IVrfV2PlusWrapper(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "i_vrfV2PlusWrapper") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) IVrfV2PlusWrapper() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.IVrfV2PlusWrapper(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) IVrfV2PlusWrapper() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.IVrfV2PlusWrapper(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.Owner(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.Owner(&_VRFV2PlusWrapperConsumerExample.CallOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2PlusWrapperConsumerExample.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Native = *abi.ConvertType(out[2], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusWrapperConsumerExample.Contract.SRequests(&_VRFV2PlusWrapperConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusWrapperConsumerExample.Contract.SRequests(&_VRFV2PlusWrapperConsumerExample.CallOpts, arg0) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusWrapperConsumerExample.TransactOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.AcceptOwnership(&_VRFV2PlusWrapperConsumerExample.TransactOpts) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) MakeRequest(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "makeRequest", _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) MakeRequest(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.MakeRequest(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) MakeRequest(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.MakeRequest(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) MakeRequestNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "makeRequestNative", _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) MakeRequestNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.MakeRequestNative(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) MakeRequestNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.MakeRequestNative(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "rawFulfillRandomWords", _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) SetLinkToken(opts *bind.TransactOpts, _link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "setLinkToken", _link) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) SetLinkToken(_link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.SetLinkToken(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _link) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) SetLinkToken(_link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.SetLinkToken(&_VRFV2PlusWrapperConsumerExample.TransactOpts, _link) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.TransferOwnership(&_VRFV2PlusWrapperConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.TransferOwnership(&_VRFV2PlusWrapperConsumerExample.TransactOpts, to) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "withdrawLink", amount) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.WithdrawLink(&_VRFV2PlusWrapperConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.WithdrawLink(&_VRFV2PlusWrapperConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactor) WithdrawNative(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.contract.Transact(opts, "withdrawNative", amount) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleSession) WithdrawNative(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.WithdrawNative(&_VRFV2PlusWrapperConsumerExample.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleTransactorSession) WithdrawNative(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperConsumerExample.Contract.WithdrawNative(&_VRFV2PlusWrapperConsumerExample.TransactOpts, amount) +} + +type VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator{contract: _VRFV2PlusWrapperConsumerExample.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested, error) { + event := new(VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator struct { + Event *VRFV2PlusWrapperConsumerExampleOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperConsumerExampleOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator{contract: _VRFV2PlusWrapperConsumerExample.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferred, error) { + event := new(VRFV2PlusWrapperConsumerExampleOwnershipTransferred) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator struct { + Event *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled struct { + RequestId *big.Int + RandomWords []*big.Int + Payment *big.Int + Raw types.Log +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator, error) { + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.FilterLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator{contract: _VRFV2PlusWrapperConsumerExample.contract, event: "WrappedRequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.WatchLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) ParseWrappedRequestFulfilled(log types.Log) (*VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled, error) { + event := new(VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator struct { + Event *VRFV2PlusWrapperConsumerExampleWrapperRequestMade + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperConsumerExampleWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperConsumerExampleWrapperRequestMade struct { + RequestId *big.Int + Paid *big.Int + Raw types.Log +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.FilterLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator{contract: _VRFV2PlusWrapperConsumerExample.contract, event: "WrapperRequestMade", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2PlusWrapperConsumerExample.contract.WatchLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperConsumerExampleWrapperRequestMade) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExampleFilterer) ParseWrapperRequestMade(log types.Log) (*VRFV2PlusWrapperConsumerExampleWrapperRequestMade, error) { + event := new(VRFV2PlusWrapperConsumerExampleWrapperRequestMade) + if err := _VRFV2PlusWrapperConsumerExample.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Paid *big.Int + Fulfilled bool + RandomWords []*big.Int +} +type SRequests struct { + Paid *big.Int + Fulfilled bool + Native bool +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExample) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusWrapperConsumerExample.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusWrapperConsumerExample.ParseOwnershipTransferRequested(log) + case _VRFV2PlusWrapperConsumerExample.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusWrapperConsumerExample.ParseOwnershipTransferred(log) + case _VRFV2PlusWrapperConsumerExample.abi.Events["WrappedRequestFulfilled"].ID: + return _VRFV2PlusWrapperConsumerExample.ParseWrappedRequestFulfilled(log) + case _VRFV2PlusWrapperConsumerExample.abi.Events["WrapperRequestMade"].ID: + return _VRFV2PlusWrapperConsumerExample.ParseWrapperRequestMade(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusWrapperConsumerExampleOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b") +} + +func (VRFV2PlusWrapperConsumerExampleWrapperRequestMade) Topic() common.Hash { + return common.HexToHash("0x5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec4") +} + +func (_VRFV2PlusWrapperConsumerExample *VRFV2PlusWrapperConsumerExample) Address() common.Address { + return _VRFV2PlusWrapperConsumerExample.address +} + +type VRFV2PlusWrapperConsumerExampleInterface interface { + GetBalance(opts *bind.CallOpts) (*big.Int, error) + + GetLinkToken(opts *bind.CallOpts) (common.Address, error) + + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + IVrfV2PlusWrapper(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + MakeRequest(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) + + MakeRequestNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) + + SetLinkToken(opts *bind.TransactOpts, _link common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + WithdrawNative(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperConsumerExampleOwnershipTransferred, error) + + FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilledIterator, error) + + WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled) (event.Subscription, error) + + ParseWrappedRequestFulfilled(log types.Log) (*VRFV2PlusWrapperConsumerExampleWrappedRequestFulfilled, error) + + FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2PlusWrapperConsumerExampleWrapperRequestMadeIterator, error) + + WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperConsumerExampleWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) + + ParseWrapperRequestMade(log types.Log) (*VRFV2PlusWrapperConsumerExampleWrapperRequestMade, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer/vrfv2plus_wrapper_load_test_consumer.go b/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer/vrfv2plus_wrapper_load_test_consumer.go new file mode 100644 index 00000000..cfe19ae1 --- /dev/null +++ b/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer/vrfv2plus_wrapper_load_test_consumer.go @@ -0,0 +1,1234 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrfv2plus_wrapper_load_test_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var VRFV2PlusWrapperLoadTestConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_vrfV2PlusWrapper\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"PLIAlreadySet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"have\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"want\",\"type\":\"address\"}],\"name\":\"OnlyVRFWrapperCanFulfill\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"payment\",\"type\":\"uint256\"}],\"name\":\"WrappedRequestFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"}],\"name\":\"WrapperRequestMade\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"}],\"name\":\"getRequestStatus\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_vrfV2PlusWrapper\",\"outputs\":[{\"internalType\":\"contractIVRFV2PlusWrapper\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"makeRequests\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestConfirmations\",\"type\":\"uint16\"},{\"internalType\":\"uint32\",\"name\":\"_numWords\",\"type\":\"uint32\"},{\"internalType\":\"uint16\",\"name\":\"_requestCount\",\"type\":\"uint16\"}],\"name\":\"makeRequestsNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_requestId\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"_randomWords\",\"type\":\"uint256[]\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_lastRequestId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_requestCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"fulfilled\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"requestTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"fulfilmentBlockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"native\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_responseCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"name\":\"setLinkToken\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawNative\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x60a0604052600060055560006006556103e76007553480156200002157600080fd5b5060405162001e9838038062001e988339810160408190526200004491620001eb565b3380600084846001600160a01b038216156200007657600080546001600160a01b0319166001600160a01b0384161790555b60601b6001600160601b031916608052506001600160a01b038216620000e35760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600180546001600160a01b0319166001600160a01b03848116919091179091558116156200011657620001168162000121565b505050505062000223565b6001600160a01b0381163314156200017c5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000da565b600280546001600160a01b0319166001600160a01b03838116918217909255600154604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b80516001600160a01b0381168114620001e657600080fd5b919050565b60008060408385031215620001ff57600080fd5b6200020a83620001ce565b91506200021a60208401620001ce565b90509250929050565b60805160601c611c2c6200026c600039600081816102e9015281816104b80152818161119701528181611239015281816112f10152818161148301526115010152611c2c6000f3fe60806040526004361061016e5760003560e01c80639c24ea40116100cb578063d826f88f1161007f578063e76d516811610059578063e76d51681461044b578063f176596214610476578063f2fde38b1461049657600080fd5b8063d826f88f146103d6578063d8a4676f14610402578063dc1670db1461043557600080fd5b8063a168fa89116100b0578063a168fa891461030b578063afacbf9c146103a0578063b1e21749146103c057600080fd5b80639c24ea40146102b75780639ed0868d146102d757600080fd5b806374dba124116101225780637a8042bd116101075780637a8042bd1461022b57806384276d811461024b5780638da5cb5b1461026b57600080fd5b806374dba1241461020057806379ba50971461021657600080fd5b80631fe543e3116101535780631fe543e3146101b2578063557d2e92146101d4578063737144bc146101ea57600080fd5b806312065fe01461017a5780631757f11c1461019c57600080fd5b3661017557005b600080fd5b34801561018657600080fd5b50475b6040519081526020015b60405180910390f35b3480156101a857600080fd5b5061018960065481565b3480156101be57600080fd5b506101d26101cd3660046117eb565b6104b6565b005b3480156101e057600080fd5b5061018960045481565b3480156101f657600080fd5b5061018960055481565b34801561020c57600080fd5b5061018960075481565b34801561022257600080fd5b506101d2610558565b34801561023757600080fd5b506101d26102463660046117b9565b610659565b34801561025757600080fd5b506101d26102663660046117b9565b610747565b34801561027757600080fd5b5060015473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610193565b3480156102c357600080fd5b506101d26102d236600461175a565b610837565b3480156102e357600080fd5b506102927f000000000000000000000000000000000000000000000000000000000000000081565b34801561031757600080fd5b506103696103263660046117b9565b600a602052600090815260409020805460018201546003830154600484015460058501546006860154600790960154949560ff9485169593949293919290911687565b604080519788529515156020880152948601939093526060850191909152608084015260a0830152151560c082015260e001610193565b3480156103ac57600080fd5b506101d26103bb3660046118da565b6108ce565b3480156103cc57600080fd5b5061018960085481565b3480156103e257600080fd5b506101d26000600581905560068190556103e76007556004819055600355565b34801561040e57600080fd5b5061042261041d3660046117b9565b610ab5565b6040516101939796959493929190611a3b565b34801561044157600080fd5b5061018960035481565b34801561045757600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610292565b34801561048257600080fd5b506101d26104913660046118da565b610c38565b3480156104a257600080fd5b506101d26104b136600461175a565b610e17565b7f00000000000000000000000000000000000000000000000000000000000000003373ffffffffffffffffffffffffffffffffffffffff821614610549576040517f8ba9316e00000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff821660248201526044015b60405180910390fd5b6105538383610e2b565b505050565b60025473ffffffffffffffffffffffffffffffffffffffff1633146105d9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610540565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560028054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b61066161100a565b60005473ffffffffffffffffffffffffffffffffffffffff1663a9059cbb61069e60015473ffffffffffffffffffffffffffffffffffffffff1690565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e084901b16815273ffffffffffffffffffffffffffffffffffffffff909116600482015260248101849052604401602060405180830381600087803b15801561070b57600080fd5b505af115801561071f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107439190611797565b5050565b61074f61100a565b600061077060015473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d80600081146107c7576040519150601f19603f3d011682016040523d82523d6000602084013e6107cc565b606091505b5050905080610743576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f77697468647261774e6174697665206661696c656400000000000000000000006044820152606401610540565b60005473ffffffffffffffffffffffffffffffffffffffff1615610887576040517f64f778ae00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b6108d661100a565b60005b8161ffff168161ffff161015610aae57600061090560405180602001604052806000151581525061108d565b905060008061091688888886611149565b60088290559092509050600061092a611398565b604080516101008101825284815260006020808301828152845183815280830186528486019081524260608601526080850184905260a0850187905260c0850184905260e08501849052898452600a8352949092208351815591516001830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001691151591909117905592518051949550919390926109d29260028501929101906116cf565b5060608201516003820155608082015160048083019190915560a0830151600583015560c0830151600683015560e090920151600790910180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790558054906000610a4583611b88565b9091555050600083815260096020526040908190208290555183907f5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec490610a8f9085815260200190565b60405180910390a2505050508080610aa690611b66565b9150506108d9565b5050505050565b6000818152600a602052604081205481906060908290819081908190610b37576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e640000000000000000000000000000006044820152606401610540565b6000888152600a6020908152604080832081516101008101835281548152600182015460ff16151581850152600282018054845181870281018701865281815292959394860193830182828015610bad57602002820191906000526020600020905b815481526020019060010190808311610b99575b50505050508152602001600382015481526020016004820154815260200160058201548152602001600682015481526020016007820160009054906101000a900460ff1615151515815250509050806000015181602001518260400151836060015184608001518560a001518660c00151975097509750975097509750975050919395979092949650565b610c4061100a565b60005b8161ffff168161ffff161015610aae576000610c6f60405180602001604052806001151581525061108d565b9050600080610c8088888886611435565b600882905590925090506000610c94611398565b604080516101008101825284815260006020808301828152845183815280830186528486019081524260608601526080850184905260a0850187905260c08501849052600160e086018190528a8552600a84529590932084518155905194810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001695151595909517909455905180519495509193610d3b92600285019201906116cf565b5060608201516003820155608082015160048083019190915560a0830151600583015560c0830151600683015560e090920151600790910180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169115159190911790558054906000610dae83611b88565b9091555050600083815260096020526040908190208290555183907f5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec490610df89085815260200190565b60405180910390a2505050508080610e0f90611b66565b915050610c43565b610e1f61100a565b610e28816115b1565b50565b6000828152600a6020526040902054610ea0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f72657175657374206e6f7420666f756e640000000000000000000000000000006044820152606401610540565b6000610eaa611398565b60008481526009602052604081205491925090610ec79083611b4f565b90506000610ed882620f4240611b12565b9050600654821115610eea5760068290555b6007548210610efb57600754610efd565b815b600755600354610f0d5780610f40565b600354610f1b906001611abf565b81600354600554610f2c9190611b12565b610f369190611abf565b610f409190611ad7565b60055560038054906000610f5383611b88565b90915550506000858152600a60209081526040909120600181810180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690911790558551610fab926002909201918701906116cf565b506000858152600a602052604090819020426004820155600681018590555490517f6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b91610ffb9188918891611a12565b60405180910390a15050505050565b60015473ffffffffffffffffffffffffffffffffffffffff16331461108b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610540565b565b60607f92fd13387c7fe7befbc38d303d6468778fb9731bc4583f17d92989c6fcfdeaaa826040516024016110c691511515815260200190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009093169290921790915292915050565b6040517f4306d35400000000000000000000000000000000000000000000000000000000815263ffffffff85166004820152600090819073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634306d3549060240160206040518083038186803b1580156111d957600080fd5b505afa1580156111ed573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061121191906117d2565b60005460405191925073ffffffffffffffffffffffffffffffffffffffff1690634000aea0907f000000000000000000000000000000000000000000000000000000000000000090849061126f908b908b908b908b90602001611a82565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161129c939291906119d4565b602060405180830381600087803b1580156112b657600080fd5b505af11580156112ca573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112ee9190611797565b507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663fc2a88c36040518163ffffffff1660e01b815260040160206040518083038186803b15801561135557600080fd5b505afa158015611369573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061138d91906117d2565b915094509492505050565b6000466113a4816116a8565b1561142e57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b1580156113f057600080fd5b505afa158015611404573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061142891906117d2565b91505090565b4391505090565b6040517f4b16093500000000000000000000000000000000000000000000000000000000815263ffffffff85166004820152600090819073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690634b1609359060240160206040518083038186803b1580156114c557600080fd5b505afa1580156114d9573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114fd91906117d2565b90507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16639cfc058e82888888886040518663ffffffff1660e01b815260040161155f9493929190611a82565b6020604051808303818588803b15801561157857600080fd5b505af115801561158c573d6000803e3d6000fd5b50505050506040513d601f19601f8201168201806040525081019061138d91906117d2565b73ffffffffffffffffffffffffffffffffffffffff8116331415611631576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610540565b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600154604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b600061a4b18214806116bc575062066eed82145b806116c9575062066eee82145b92915050565b82805482825590600052602060002090810192821561170a579160200282015b8281111561170a5782518255916020019190600101906116ef565b5061171692915061171a565b5090565b5b80821115611716576000815560010161171b565b803561ffff8116811461174157600080fd5b919050565b803563ffffffff8116811461174157600080fd5b60006020828403121561176c57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461179057600080fd5b9392505050565b6000602082840312156117a957600080fd5b8151801515811461179057600080fd5b6000602082840312156117cb57600080fd5b5035919050565b6000602082840312156117e457600080fd5b5051919050565b600080604083850312156117fe57600080fd5b8235915060208084013567ffffffffffffffff8082111561181e57600080fd5b818601915086601f83011261183257600080fd5b81358181111561184457611844611bf0565b8060051b6040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0603f8301168101818110858211171561188757611887611bf0565b604052828152858101935084860182860187018b10156118a657600080fd5b600095505b838610156118c95780358552600195909501949386019386016118ab565b508096505050505050509250929050565b600080600080608085870312156118f057600080fd5b6118f985611746565b93506119076020860161172f565b925061191560408601611746565b91506119236060860161172f565b905092959194509250565b600081518084526020808501945080840160005b8381101561195e57815187529582019590820190600101611942565b509495945050505050565b6000815180845260005b8181101561198f57602081850181015186830182015201611973565b818111156119a1576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b73ffffffffffffffffffffffffffffffffffffffff84168152826020820152606060408201526000611a096060830184611969565b95945050505050565b838152606060208201526000611a2b606083018561192e565b9050826040830152949350505050565b878152861515602082015260e060408201526000611a5c60e083018861192e565b90508560608301528460808301528360a08301528260c083015298975050505050505050565b600063ffffffff808716835261ffff8616602084015280851660408401525060806060830152611ab56080830184611969565b9695505050505050565b60008219821115611ad257611ad2611bc1565b500190565b600082611b0d577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611b4a57611b4a611bc1565b500290565b600082821015611b6157611b61611bc1565b500390565b600061ffff80831681811415611b7e57611b7e611bc1565b6001019392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611bba57611bba611bc1565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var VRFV2PlusWrapperLoadTestConsumerABI = VRFV2PlusWrapperLoadTestConsumerMetaData.ABI + +var VRFV2PlusWrapperLoadTestConsumerBin = VRFV2PlusWrapperLoadTestConsumerMetaData.Bin + +func DeployVRFV2PlusWrapperLoadTestConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address, _vrfV2PlusWrapper common.Address) (common.Address, *types.Transaction, *VRFV2PlusWrapperLoadTestConsumer, error) { + parsed, err := VRFV2PlusWrapperLoadTestConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFV2PlusWrapperLoadTestConsumerBin), backend, _link, _vrfV2PlusWrapper) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFV2PlusWrapperLoadTestConsumer{address: address, abi: *parsed, VRFV2PlusWrapperLoadTestConsumerCaller: VRFV2PlusWrapperLoadTestConsumerCaller{contract: contract}, VRFV2PlusWrapperLoadTestConsumerTransactor: VRFV2PlusWrapperLoadTestConsumerTransactor{contract: contract}, VRFV2PlusWrapperLoadTestConsumerFilterer: VRFV2PlusWrapperLoadTestConsumerFilterer{contract: contract}}, nil +} + +type VRFV2PlusWrapperLoadTestConsumer struct { + address common.Address + abi abi.ABI + VRFV2PlusWrapperLoadTestConsumerCaller + VRFV2PlusWrapperLoadTestConsumerTransactor + VRFV2PlusWrapperLoadTestConsumerFilterer +} + +type VRFV2PlusWrapperLoadTestConsumerCaller struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperLoadTestConsumerTransactor struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperLoadTestConsumerFilterer struct { + contract *bind.BoundContract +} + +type VRFV2PlusWrapperLoadTestConsumerSession struct { + Contract *VRFV2PlusWrapperLoadTestConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperLoadTestConsumerCallerSession struct { + Contract *VRFV2PlusWrapperLoadTestConsumerCaller + CallOpts bind.CallOpts +} + +type VRFV2PlusWrapperLoadTestConsumerTransactorSession struct { + Contract *VRFV2PlusWrapperLoadTestConsumerTransactor + TransactOpts bind.TransactOpts +} + +type VRFV2PlusWrapperLoadTestConsumerRaw struct { + Contract *VRFV2PlusWrapperLoadTestConsumer +} + +type VRFV2PlusWrapperLoadTestConsumerCallerRaw struct { + Contract *VRFV2PlusWrapperLoadTestConsumerCaller +} + +type VRFV2PlusWrapperLoadTestConsumerTransactorRaw struct { + Contract *VRFV2PlusWrapperLoadTestConsumerTransactor +} + +func NewVRFV2PlusWrapperLoadTestConsumer(address common.Address, backend bind.ContractBackend) (*VRFV2PlusWrapperLoadTestConsumer, error) { + abi, err := abi.JSON(strings.NewReader(VRFV2PlusWrapperLoadTestConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFV2PlusWrapperLoadTestConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumer{address: address, abi: abi, VRFV2PlusWrapperLoadTestConsumerCaller: VRFV2PlusWrapperLoadTestConsumerCaller{contract: contract}, VRFV2PlusWrapperLoadTestConsumerTransactor: VRFV2PlusWrapperLoadTestConsumerTransactor{contract: contract}, VRFV2PlusWrapperLoadTestConsumerFilterer: VRFV2PlusWrapperLoadTestConsumerFilterer{contract: contract}}, nil +} + +func NewVRFV2PlusWrapperLoadTestConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFV2PlusWrapperLoadTestConsumerCaller, error) { + contract, err := bindVRFV2PlusWrapperLoadTestConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerCaller{contract: contract}, nil +} + +func NewVRFV2PlusWrapperLoadTestConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFV2PlusWrapperLoadTestConsumerTransactor, error) { + contract, err := bindVRFV2PlusWrapperLoadTestConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerTransactor{contract: contract}, nil +} + +func NewVRFV2PlusWrapperLoadTestConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFV2PlusWrapperLoadTestConsumerFilterer, error) { + contract, err := bindVRFV2PlusWrapperLoadTestConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerFilterer{contract: contract}, nil +} + +func bindVRFV2PlusWrapperLoadTestConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFV2PlusWrapperLoadTestConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.VRFV2PlusWrapperLoadTestConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.VRFV2PlusWrapperLoadTestConsumerTransactor.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.VRFV2PlusWrapperLoadTestConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.contract.Transfer(opts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) GetBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "getBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) GetBalance() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetBalance(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) GetBalance() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetBalance(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) GetLinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "getLinkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) GetLinkToken() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetLinkToken(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) GetLinkToken() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetLinkToken(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "getRequestStatus", _requestId) + + outstruct := new(GetRequestStatus) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RandomWords = *abi.ConvertType(out[2], new([]*big.Int)).(*[]*big.Int) + outstruct.RequestTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[6], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetRequestStatus(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) GetRequestStatus(_requestId *big.Int) (GetRequestStatus, + + error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.GetRequestStatus(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts, _requestId) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) IVrfV2PlusWrapper(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "i_vrfV2PlusWrapper") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) IVrfV2PlusWrapper() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.IVrfV2PlusWrapper(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) IVrfV2PlusWrapper() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.IVrfV2PlusWrapper(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Owner(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) Owner() (common.Address, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Owner(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SAverageFulfillmentInMillions(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SFastestFulfillment(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SFastestFulfillment() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SFastestFulfillment(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SLastRequestId(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_lastRequestId") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SLastRequestId(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SLastRequestId() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SLastRequestId(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SRequestCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_requestCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SRequestCount() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SRequestCount(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SRequestCount() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SRequestCount(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_requests", arg0) + + outstruct := new(SRequests) + if err != nil { + return *outstruct, err + } + + outstruct.Paid = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Fulfilled = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.RequestTimestamp = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.FulfilmentTimestamp = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.RequestBlockNumber = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + outstruct.FulfilmentBlockNumber = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + outstruct.Native = *abi.ConvertType(out[6], new(bool)).(*bool) + + return *outstruct, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SRequests(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts, arg0) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SRequests(arg0 *big.Int) (SRequests, + + error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SRequests(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts, arg0) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SResponseCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_responseCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SResponseCount() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SResponseCount(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SResponseCount() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SResponseCount(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFV2PlusWrapperLoadTestConsumer.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SSlowestFulfillment(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SSlowestFulfillment(&_VRFV2PlusWrapperLoadTestConsumer.CallOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.AcceptOwnership(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.AcceptOwnership(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) MakeRequests(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "makeRequests", _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) MakeRequests(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.MakeRequests(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) MakeRequests(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.MakeRequests(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) MakeRequestsNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "makeRequestsNative", _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) MakeRequestsNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.MakeRequestsNative(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) MakeRequestsNative(_callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.MakeRequestsNative(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _callbackGasLimit, _requestConfirmations, _numWords, _requestCount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "rawFulfillRandomWords", _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) RawFulfillRandomWords(_requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.RawFulfillRandomWords(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _requestId, _randomWords) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "reset") +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) Reset() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Reset(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) Reset() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Reset(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) SetLinkToken(opts *bind.TransactOpts, _link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "setLinkToken", _link) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) SetLinkToken(_link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SetLinkToken(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _link) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) SetLinkToken(_link common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.SetLinkToken(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, _link) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.TransferOwnership(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, to) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.TransferOwnership(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, to) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "withdrawLink", amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.WithdrawLink(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) WithdrawLink(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.WithdrawLink(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) WithdrawNative(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.Transact(opts, "withdrawNative", amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) WithdrawNative(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.WithdrawNative(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) WithdrawNative(amount *big.Int) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.WithdrawNative(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts, amount) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.contract.RawTransact(opts, nil) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerSession) Receive() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Receive(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerTransactorSession) Receive() (*types.Transaction, error) { + return _VRFV2PlusWrapperLoadTestConsumer.Contract.Receive(&_VRFV2PlusWrapperLoadTestConsumer.TransactOpts) +} + +type VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator struct { + Event *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator{contract: _VRFV2PlusWrapperLoadTestConsumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested, error) { + event := new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator struct { + Event *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator{contract: _VRFV2PlusWrapperLoadTestConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred, error) { + event := new(VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator struct { + Event *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled struct { + RequestId *big.Int + RandomWords []*big.Int + Payment *big.Int + Raw types.Log +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator, error) { + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.FilterLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator{contract: _VRFV2PlusWrapperLoadTestConsumer.contract, event: "WrappedRequestFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.WatchLogs(opts, "WrappedRequestFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) ParseWrappedRequestFulfilled(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled, error) { + event := new(VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "WrappedRequestFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator struct { + Event *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator) Error() error { + return it.fail +} + +func (it *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade struct { + RequestId *big.Int + Paid *big.Int + Raw types.Log +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.FilterLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return &VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator{contract: _VRFV2PlusWrapperLoadTestConsumer.contract, event: "WrapperRequestMade", logs: logs, sub: sub}, nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) { + + var requestIdRule []interface{} + for _, requestIdItem := range requestId { + requestIdRule = append(requestIdRule, requestIdItem) + } + + logs, sub, err := _VRFV2PlusWrapperLoadTestConsumer.contract.WatchLogs(opts, "WrapperRequestMade", requestIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumerFilterer) ParseWrapperRequestMade(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade, error) { + event := new(VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade) + if err := _VRFV2PlusWrapperLoadTestConsumer.contract.UnpackLog(event, "WrapperRequestMade", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetRequestStatus struct { + Paid *big.Int + Fulfilled bool + RandomWords []*big.Int + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int +} +type SRequests struct { + Paid *big.Int + Fulfilled bool + RequestTimestamp *big.Int + FulfilmentTimestamp *big.Int + RequestBlockNumber *big.Int + FulfilmentBlockNumber *big.Int + Native bool +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFV2PlusWrapperLoadTestConsumer.abi.Events["OwnershipTransferRequested"].ID: + return _VRFV2PlusWrapperLoadTestConsumer.ParseOwnershipTransferRequested(log) + case _VRFV2PlusWrapperLoadTestConsumer.abi.Events["OwnershipTransferred"].ID: + return _VRFV2PlusWrapperLoadTestConsumer.ParseOwnershipTransferred(log) + case _VRFV2PlusWrapperLoadTestConsumer.abi.Events["WrappedRequestFulfilled"].ID: + return _VRFV2PlusWrapperLoadTestConsumer.ParseWrappedRequestFulfilled(log) + case _VRFV2PlusWrapperLoadTestConsumer.abi.Events["WrapperRequestMade"].ID: + return _VRFV2PlusWrapperLoadTestConsumer.ParseWrapperRequestMade(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) Topic() common.Hash { + return common.HexToHash("0x6c84e12b4c188e61f1b4727024a5cf05c025fa58467e5eedf763c0744c89da7b") +} + +func (VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade) Topic() common.Hash { + return common.HexToHash("0x5f56b4c20db9f5b294cbf6f681368de4a992a27e2de2ee702dcf2cbbfa791ec4") +} + +func (_VRFV2PlusWrapperLoadTestConsumer *VRFV2PlusWrapperLoadTestConsumer) Address() common.Address { + return _VRFV2PlusWrapperLoadTestConsumer.address +} + +type VRFV2PlusWrapperLoadTestConsumerInterface interface { + GetBalance(opts *bind.CallOpts) (*big.Int, error) + + GetLinkToken(opts *bind.CallOpts) (common.Address, error) + + GetRequestStatus(opts *bind.CallOpts, _requestId *big.Int) (GetRequestStatus, + + error) + + IVrfV2PlusWrapper(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SLastRequestId(opts *bind.CallOpts) (*big.Int, error) + + SRequestCount(opts *bind.CallOpts) (*big.Int, error) + + SRequests(opts *bind.CallOpts, arg0 *big.Int) (SRequests, + + error) + + SResponseCount(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + MakeRequests(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) + + MakeRequestsNative(opts *bind.TransactOpts, _callbackGasLimit uint32, _requestConfirmations uint16, _numWords uint32, _requestCount uint16) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, _requestId *big.Int, _randomWords []*big.Int) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetLinkToken(opts *bind.TransactOpts, _link common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + WithdrawLink(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + WithdrawNative(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerOwnershipTransferred, error) + + FilterWrappedRequestFulfilled(opts *bind.FilterOpts) (*VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilledIterator, error) + + WatchWrappedRequestFulfilled(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled) (event.Subscription, error) + + ParseWrappedRequestFulfilled(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerWrappedRequestFulfilled, error) + + FilterWrapperRequestMade(opts *bind.FilterOpts, requestId []*big.Int) (*VRFV2PlusWrapperLoadTestConsumerWrapperRequestMadeIterator, error) + + WatchWrapperRequestMade(opts *bind.WatchOpts, sink chan<- *VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade, requestId []*big.Int) (event.Subscription, error) + + ParseWrapperRequestMade(log types.Log) (*VRFV2PlusWrapperLoadTestConsumerWrapperRequestMade, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/generation/compile_contracts.sh b/core/gethwrappers/generation/compile_contracts.sh new file mode 100644 index 00000000..627e0d52 --- /dev/null +++ b/core/gethwrappers/generation/compile_contracts.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +echo "compiling contracts" + +CDIR="$(dirname "$0")" +COMPILE_COMMAND="$CDIR/../../../contracts/scripts/native_solc_compile_all" + + +# Only print compilation output on failure. +OUT="$(bash -c "${COMPILE_COMMAND}" 2>&1)" +ERR="$?" + +# shellcheck disable=SC2181 +if [ "$ERR" != "0" ]; then + echo + echo "↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓" + echo "Error while compiling solidity contracts. See below for output." + echo "You can reproduce this error directly by running the command" + echo + echo " " "$COMPILE_COMMAND" + echo + echo "in the directory $SOLIDITY_DIR" + echo + echo "This is probably a problem with a solidity contract, under the" + echo "directory contracts/src/." + echo "↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑" + echo + echo "$OUT" + exit 1 +fi + +echo "finished compilation" diff --git a/core/gethwrappers/generation/compile_event_mock_contract.sh b/core/gethwrappers/generation/compile_event_mock_contract.sh new file mode 100644 index 00000000..a576a16a --- /dev/null +++ b/core/gethwrappers/generation/compile_event_mock_contract.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -euo pipefail + +echo "compiling contracts" + +CDIR="$(dirname "$0")" +COMPILE_COMMAND="$CDIR/../../../contracts/scripts/native_solc_compile_all_events_mock" + + +# Only print compilation output on failure. +OUT="$(bash -c "${COMPILE_COMMAND}" 2>&1)" +ERR="$?" + +# shellcheck disable=SC2181 +if [ "$ERR" != "0" ]; then + echo + echo "↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓" + echo "Error while compiling solidity contracts. See below for output." + echo "You can reproduce this error directly by running the command" + echo + echo " " "$COMPILE_COMMAND" + echo + echo "in the directory $SOLIDITY_DIR" + echo + echo "This is probably a problem with a solidity contract, under the" + echo "directory contracts/src/." + echo "↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑" + echo + echo "$OUT" + exit 1 +fi + +echo "finished compilation" diff --git a/core/gethwrappers/generation/fastgen.py b/core/gethwrappers/generation/fastgen.py new file mode 100644 index 00000000..5390e259 --- /dev/null +++ b/core/gethwrappers/generation/fastgen.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +'''Quick-and-dirty (very quick!) compiler and generator of go contract wrappers + +Usage: {fastgen_dir}/fastgen.py [ ...] + +DO NOT check in the outputs from this script! Instead, run `go generate` in the +parent directory. We are using solc-select for compilation of solidity contracts, and +using the abi files it outputs as a single source of truth. + +However, this is much faster and more reliable, for actual development of +contracts which interact in intricate ways with go code. Once you're done with +development, be a good citizen before you push and replace the wrappers from +this script with those generated by `go generate` as described above. +(`../go_generate_test.go` will remind you with a CI failure if you forget.) + +This requires the correct versions of abigen and the correct version of solc on +your path, which can be installed as described in `../go_generate.go`. + +''' + +import os, pathlib, sys + +thisdir = os.path.abspath(os.path.dirname(sys.argv[0])) +godir = os.path.dirname(thisdir) +gogenpath = os.path.join(godir, 'go_generate.go') + +abigenpath = 'go run ./generation/generate/wrap.go' + +pkg_to_src = {} + +for line in open(gogenpath): + if abigenpath in line: + abipath, pkgname = line.split(abigenpath)[-1].strip().split() + srcpath = os.path.abspath(os.path.join(godir, abipath)).replace( + '/abi/', '/src/').replace('.json', '.sol') + if not os.path.exists(srcpath): + srcpath = os.path.join(os.path.dirname(srcpath), 'dev', + os.path.basename(srcpath)) + if not os.path.exists(srcpath): + srcpath = srcpath.replace('/dev/', '/tests/') + if os.path.basename(srcpath) != 'OffchainAggregator.sol': + assert os.path.exists(srcpath), 'could not find ' + \ + os.path.basename(srcpath) + pkg_to_src[pkgname] = srcpath + +args = sys.argv[1:] + +if len(args) == 0 or any(p not in pkg_to_src for p in args): + print(__doc__.format(fastgen_dir=thisdir)) + print("Here is the list of packages you can build. (You can add more by") + print("updating %s)" % gogenpath) + print() + longest = max(len(p) for p in pkg_to_src) + colwidth = longest + 4 + header = "Package name".ljust(colwidth) + "Contract Source" + print(header) + print('-' * len(header)) + for pkgname, contractpath in pkg_to_src.items(): + print(pkgname.ljust(colwidth) + contractpath) + sys.exit(1) + +for pkgname in args: + solidity_path = pkg_to_src[pkgname] + outpath = os.path.abspath(os.path.join(godir, 'generated', pkgname, + pkgname + '.go')) + pathlib.Path(os.path.dirname(outpath)).mkdir(exist_ok=True) + # assert not os.system( + # f'abigen -sol {solidity_path} -pkg {pkgname} -out {outpath}') + cmd = f'abigen -sol {solidity_path} -pkg {pkgname} -out {outpath}' + assert not os.system(cmd), 'Command "%s" failed' % cmd diff --git a/core/gethwrappers/generation/generate/wrap.go b/core/gethwrappers/generation/generate/wrap.go new file mode 100644 index 00000000..5d278be6 --- /dev/null +++ b/core/gethwrappers/generation/generate/wrap.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + gethParams "github.com/ethereum/go-ethereum/params" + + gethwrappers2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers" +) + +func main() { + abiPath := os.Args[1] + binPath := os.Args[2] + className := os.Args[3] + pkgName := os.Args[4] + fmt.Println("Generating", pkgName, "contract wrapper") + + cwd, err := os.Getwd() // gethwrappers directory + if err != nil { + gethwrappers2.Exit("could not get working directory", err) + } + outDir := filepath.Join(cwd, "generated", pkgName) + if mkdErr := os.MkdirAll(outDir, 0700); err != nil { + gethwrappers2.Exit("failed to create wrapper dir", mkdErr) + } + outPath := filepath.Join(outDir, pkgName+".go") + + gethwrappers2.Abigen(gethwrappers2.AbigenArgs{ + Bin: binPath, ABI: abiPath, Out: outPath, Type: className, Pkg: pkgName, + }) + + // Build succeeded, so update the versions db with the new contract data + versions, err := gethwrappers2.ReadVersionsDB() + if err != nil { + gethwrappers2.Exit("could not read current versions database", err) + } + versions.GethVersion = gethParams.Version + versions.ContractVersions[pkgName] = gethwrappers2.ContractVersion{ + Hash: gethwrappers2.VersionHash(abiPath, binPath), + AbiPath: abiPath, + BinaryPath: binPath, + } + if err := gethwrappers2.WriteVersionsDB(versions); err != nil { + gethwrappers2.Exit("could not save versions db", err) + } +} diff --git a/core/gethwrappers/generation/generate_events_mock/create_events_mock_contract.go b/core/gethwrappers/generation/generate_events_mock/create_events_mock_contract.go new file mode 100644 index 00000000..0c72cfc0 --- /dev/null +++ b/core/gethwrappers/generation/generate_events_mock/create_events_mock_contract.go @@ -0,0 +1,294 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "text/template" + + "github.com/Masterminds/sprig/v3" +) + +type AbiEvent struct { + Type string `json:"type"` + Name string `json:"name"` + Inputs []AbiInputs `json:"inputs"` +} + +type AbiInputs struct { + Name string `json:"name"` + Type string `json:"type"` + Indexed bool `json:"indexed"` + InternalType string `json:"internalType,omitempty"` + Components []AbiComponents `json:"components,omitempty"` +} + +type AbiComponents struct { + Name string `json:"name"` + Type string `json:"type"` + InternalType string `json:"internalType"` +} + +type SolEvent struct { + Name string + Params []SolEventParam +} + +type SolEventParam struct { + Type string + InternalType string + Name string + Indexed bool +} + +type SolStruct struct { + Name string + SolStructParams []SolStructParam +} + +type SolStructParam struct { + Type string + Name string +} + +type SolFunction struct { + Name string + EventName string + Params []SolFunctionParam +} + +type SolFunctionParam struct { + Type string + Memory bool + Name string +} + +type TemplateData struct { + ContractName string + Events []SolEvent + Structs []SolStruct + Functions []SolFunction +} + +func getABIFiles(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() && strings.HasSuffix(path, ".abi") { + files = append(files, path) + } + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} + +func extractEventsAndStructs(abiJSON []byte) ([]SolEvent, []SolStruct, error) { + var parsedABI []AbiEvent + err := json.Unmarshal(abiJSON, &parsedABI) + if err != nil { + return nil, nil, err + } + + var solEvents []SolEvent + var solStructs []SolStruct + + for _, item := range parsedABI { + if item.Type == "event" { + eventName := item.Name + var eventParams []SolEventParam + + for i, input := range item.Inputs { + if input.Name == "" { + input.Name = "param" + fmt.Sprintf("%d", i+1) + } + if input.Type == "tuple" && strings.Contains(input.InternalType, "struct") { + internalType := strings.TrimPrefix(input.InternalType, "struct ") + if strings.Contains(internalType, ".") { + internalType = strings.Split(internalType, ".")[1] + } + structName := internalType + + var solStructParams []SolStructParam + for _, component := range input.Components { + solStructParam := SolStructParam{ + Type: component.Type, + Name: component.Name, + } + solStructParams = append(solStructParams, solStructParam) + } + + solStruct := SolStruct{ + Name: structName, + SolStructParams: solStructParams, + } + solStructs = append(solStructs, solStruct) + + eventParams = append(eventParams, SolEventParam{ + Type: structName, + Name: input.Name, + Indexed: input.Indexed, + InternalType: "struct", + }) + } else { + eventParams = append(eventParams, SolEventParam{ + Type: input.Type, + Name: input.Name, + Indexed: input.Indexed, + }) + } + } + + solEvents = append(solEvents, SolEvent{ + Name: eventName, + Params: eventParams, + }) + } + } + + return solEvents, solStructs, nil +} + +func generateFunctions(solEvents []SolEvent) ([]SolFunction, error) { + var solFunctions []SolFunction + + for _, event := range solEvents { + var solParams []SolFunctionParam + + for _, eventParam := range event.Params { + memory := false + if eventParam.Type == "bytes" || eventParam.Type == "string" || strings.HasSuffix(eventParam.Type, "[]") || + eventParam.InternalType == "struct" { + memory = true + } + funcParam := SolFunctionParam{ + Type: eventParam.Type, + Memory: memory, + Name: eventParam.Name, + } + solParams = append(solParams, funcParam) + } + + solFunctions = append(solFunctions, SolFunction{ + Name: "emit" + event.Name, + EventName: event.Name, + Params: solParams, + }) + } + + return solFunctions, nil +} + +func generateContract(data TemplateData) (string, error) { + const templateStr = `// SPDX-License-Identifier: MIT +// Warning: this is an autogenerated file! DO NOT EDIT. + +pragma solidity ^0.8.6; + +contract {{ .ContractName }} { + {{- range .Structs }} + struct {{ .Name }} { {{- range .SolStructParams }}{{ .Type }} {{ .Name }}; {{- end }} } + {{- end }} + + {{- range $event := .Events }} + event {{ $event.Name }}({{- range $paramIndex, $param := $event.Params }}{{ $param.Type }}{{ if $param.Indexed }} indexed{{ end }} {{ $param.Name }}{{- if lt $paramIndex (sub1 (len $event.Params)) }},{{ end }}{{ end }}); + {{- end }} + + {{- range $function := .Functions }} + function {{ $function.Name }}({{- range $paramIndex, $param := $function.Params }}{{ $param.Type }}{{ if $param.Memory }} memory{{ end }} {{ $param.Name }}{{- if lt $paramIndex (sub1 (len $function.Params)) }},{{- end }}{{ end }}) public { + emit {{ $function.EventName }}({{- range $paramIndex, $param := $function.Params }}{{ $param.Name }}{{- if lt $paramIndex (sub1 (len $function.Params)) }},{{ end }}{{ end }}); + } + {{- end }} +} +` + + funcMap := template.FuncMap{ + "add1": func(x int) int { + return x + 1 + }, + "sub1": func(x int) int { + return x - 1 + }, + } + + tmpl, err := template.New("mockContract").Funcs(funcMap).Funcs(sprig.TxtFuncMap()).Parse(templateStr) + if err != nil { + return "", err + } + + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + if err != nil { + return "", err + } + + return buf.String(), nil +} + +func main() { + abiPath := os.Args[1] + solPath := os.Args[2] + contractName := os.Args[3] + + abiFiles, err := getABIFiles(abiPath) + if err != nil { + fmt.Println("Error finding ABI files:", err) + os.Exit(1) + } + + events := []SolEvent{} + structs := []SolStruct{} + functions := []SolFunction{} + + for _, abiFile := range abiFiles { + abiJSON, err2 := os.ReadFile(abiFile) + if err2 != nil { + fmt.Println("Error reading ABI file:", err2) + os.Exit(1) + } + + fileEvents, fileStructs, err2 := extractEventsAndStructs(abiJSON) + if err2 != nil { + fmt.Println("Error parsing events:", err2) + os.Exit(1) + } + fileFunctions, err2 := generateFunctions(fileEvents) + if err2 != nil { + fmt.Println("Error generating functions:", err2) + os.Exit(1) + } + + events = append(events, fileEvents...) + structs = append(structs, fileStructs...) + functions = append(functions, fileFunctions...) + } + + // Generate the contract + data := TemplateData{ + ContractName: contractName, + Events: events, + Structs: structs, + Functions: functions, + } + contract, err := generateContract(data) + if err != nil { + fmt.Println("Error generating mock contract:", err) + os.Exit(1) + } + + // Save the mock contract to a file + err = os.WriteFile(solPath, []byte(contract), 0600) + if err != nil { + fmt.Println("Error writing mock contract to a file:", err) + os.Exit(1) + } + + fmt.Printf("Generated %s.sol mock contract!\n", contractName) +} diff --git a/core/gethwrappers/generation/generate_link/wrap_link.go b/core/gethwrappers/generation/generate_link/wrap_link.go new file mode 100644 index 00000000..1667f476 --- /dev/null +++ b/core/gethwrappers/generation/generate_link/wrap_link.go @@ -0,0 +1,77 @@ +// package main is a script for generating a geth golang contract wrappers for +// the PLI token contract. +// +// Usage: +// +// With core/gethwrappers as your working directory, run +// +// go run generation/generate_link/wrap.go +// +// This will output the generated file to +// generated/link_token_interface/link_token_interface.go + +package main + +import ( + "crypto/sha256" + "fmt" + "os" + "path/filepath" + + "github.com/tidwall/gjson" + + gethwrappers2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func main() { + pkgName := "link_token_interface" + fmt.Println("Generating", pkgName, "contract wrapper") + className := "LinkToken" + tmpDir, cleanup := gethwrappers2.TempDir(className) + defer cleanup() + linkDetails, err := os.ReadFile(filepath.Join( + gethwrappers2.GetProjectRoot(), "contracts/LinkToken.json")) + if err != nil { + gethwrappers2.Exit("could not read PLI contract details", err) + } + if fmt.Sprintf("%x", sha256.Sum256(linkDetails)) != + "27c0e17a79553fccc63a4400c6bbe415ff710d9cc7c25757bff0f7580205c922" { + gethwrappers2.Exit("PLI details should never change!", nil) + } + abi, err := utils.NormalizedJSON([]byte( + gjson.Get(string(linkDetails), "abi").String())) + if err != nil || abi == "" { + gethwrappers2.Exit("could not extract PLI ABI", err) + } + abiPath := filepath.Join(tmpDir, "abi") + if aErr := os.WriteFile(abiPath, []byte(abi), 0600); aErr != nil { + gethwrappers2.Exit("could not write contract ABI to temp dir.", aErr) + } + bin := gjson.Get(string(linkDetails), "bytecode").String() + if bin == "" { + gethwrappers2.Exit("could not extract PLI bytecode", nil) + } + binPath := filepath.Join(tmpDir, "bin") + if bErr := os.WriteFile(binPath, []byte(bin), 0600); bErr != nil { + gethwrappers2.Exit("could not write contract binary to temp dir.", bErr) + } + cwd, err := os.Getwd() + if err != nil { + gethwrappers2.Exit("could not get working directory", nil) + } + if filepath.Base(cwd) != "gethwrappers" { + gethwrappers2.Exit("must be run from gethwrappers directory", nil) + } + outDir := filepath.Join(cwd, "generated", pkgName) + if err := os.MkdirAll(outDir, 0700); err != nil { + gethwrappers2.Exit("failed to create wrapper dir", err) + } + gethwrappers2.Abigen(gethwrappers2.AbigenArgs{ + Bin: binPath, + ABI: abiPath, + Out: filepath.Join(outDir, pkgName+".go"), + Type: className, + Pkg: pkgName, + }) +} diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..ca56bbf9 --- /dev/null +++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,119 @@ +GETH_VERSION: 1.13.8 +aggregator_v2v3_interface: ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.bin 95e8814b408bb05bf21742ef580d98698b7db6a9bac6a35c3de12b23aec4ee28 +aggregator_v3_interface: ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.bin 351b55d3b0f04af67db6dfb5c92f1c64479400ca1fec77afc20bc0ce65cb49ab +authorized_forwarder: ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.bin 8ea76c883d460f8353a45a493f2aebeb5a2d9a7b4619d1bc4fff5fb590bb3e10 +authorized_receiver: ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.bin 18e8969ba3234b027e1b16c11a783aca58d0ea5c2361010ec597f134b7bf1c4f +automation_consumer_benchmark: ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.bin f52c76f1aaed4be541d82d97189d70f5aa027fc9838037dd7a7d21910c8c488e +automation_forwarder_logic: ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.bin 15ae0c367297955fdab4b552dbb10e1f2be80a8fde0efec4a4d398693e9d72b5 +automation_registrar_wrapper2_1: ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.bin eb06d853aab39d3196c593b03e555851cbe8386e0fe54a74c2479f62d14b3c42 +automation_registrar_wrapper2_2: ../../contracts/solc/v0.8.19/AutomationRegistrar2_2/AutomationRegistrar2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistrar2_2/AutomationRegistrar2_2.bin 7c61908c1bb1bfd05a4da22bb73d62c0e2c05240f3f8fb5e06331603ff2246a9 +automation_registry_logic_a_wrapper_2_2: ../../contracts/solc/v0.8.19/AutomationRegistryLogicA2_2/AutomationRegistryLogicA2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistryLogicA2_2/AutomationRegistryLogicA2_2.bin 38036d73155b89d241e6f17aab0af78be21e90dfa9e455cd575fe02d1a6474f9 +automation_registry_logic_b_wrapper_2_2: ../../contracts/solc/v0.8.19/AutomationRegistryLogicB2_2/AutomationRegistryLogicB2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistryLogicB2_2/AutomationRegistryLogicB2_2.bin e5669214a6b747b17331ebbf8f2d13cf7100d3313d652c6f1304ccf158441fc6 +automation_registry_wrapper_2_2: ../../contracts/solc/v0.8.19/AutomationRegistry2_2/AutomationRegistry2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistry2_2/AutomationRegistry2_2.bin eca1187a878b622ef3fced041a28a4229d45dd797d95630838ff6351b6afc437 +automation_utils_2_1: ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.bin 331bfa79685aee6ddf63b64c0747abee556c454cae3fb8175edff425b615d8aa +automation_utils_2_2: ../../contracts/solc/v0.8.19/AutomationUtils2_2/AutomationUtils2_2.abi ../../contracts/solc/v0.8.19/AutomationUtils2_2/AutomationUtils2_2.bin 6fe2e41b1d3b74bee4013a48c10d84da25e559f28e22749aa13efabbf2cc2ee8 +batch_blockhash_store: ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.bin 14356c48ef70f66ef74f22f644450dbf3b2a147c1b68deaa7e7d1eb8ffab15db +batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.bin d0a54963260d8c1f1bbd984b758285e6027cfb5a7e42701bcb562ab123219332 +batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin 73cb626b5cb2c3464655b61b8ac42fe7a1963fe25e6a5eea40b8e4d5bff3de36 +blockhash_store: ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.bin 12b0662f1636a341c8863bdec7a20f2ddd97c3a4fd1a7ae353fe316609face4e +chain_reader_example: ../../contracts/solc/v0.8.19/ChainReaderTestContract/LatestValueHolder.abi ../../contracts/solc/v0.8.19/ChainReaderTestContract/LatestValueHolder.bin de88c7e68de36b96aa2bec844bdc96fcd7c9017b38e25062b3b9f9cec42c814f +chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin 5f10664e31abc768f4a37901cae7a3bef90146180f97303e5a1bde5a08d84595 +consumer_wrapper: ../../contracts/solc/v0.7/Consumer/Consumer.abi ../../contracts/solc/v0.7/Consumer/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5 +cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7 +cron_upkeep_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeep.abi - 362fcfcf30a6ab3acff83095ea4b2b9056dd5e9dcb94bc5411aae58995d22709 +dummy_protocol_wrapper: ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.bin 583a448170b13abf7ed64e406e8177d78c9e55ab44efd141eee60de23a71ee3b +flags_wrapper: ../../contracts/solc/v0.6/Flags/Flags.abi ../../contracts/solc/v0.6/Flags/Flags.bin 2034d1b562ca37a63068851915e3703980276e8d5f7db6db8a3351a49d69fc4a +flux_aggregator_wrapper: ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.bin a3b0a6396c4aa3b5ee39b3c4bd45efc89789d4859379a8a92caca3a0496c5794 +gas_wrapper: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin 4a5dcdac486d18fcd58e3488c15c1710ae76b977556a3f3191bd269a4bc75723 +gas_wrapper_mock: ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin a9b08f18da59125c6fc305855710241f3d35161b8b9f3e3f635a7b1d5c6da9c8 +i_automation_registry_master_wrapper_2_2: ../../contracts/solc/v0.8.19/IAutomationRegistryMaster/IAutomationRegistryMaster.abi ../../contracts/solc/v0.8.19/IAutomationRegistryMaster/IAutomationRegistryMaster.bin 0886dd1df1f4dcf5b08012f8adcf30fd96caab28999610e70ce02beb2170c92f +i_keeper_registry_master_wrapper_2_1: ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.bin 6501bb9bcf5048bab2737b00685c6984a24867e234ddf5b60a65904eee9a4ebc +i_log_automation: ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.bin 296beccb6af655d6fc3a6e676b244831cce2da6688d3afc4f21f8738ae59e03e +keeper_consumer_performance_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.bin eeda39f5d3e1c8ffa0fb6cd1803731b98a4bc262d41833458e3fe8b40933ae90 +keeper_consumer_wrapper: ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.bin 2c6163b145082fbab74b7343577a9cec8fda8b0da9daccf2a82581b1f5a84b83 +keeper_registrar_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.bin e49b2f8b23da17af1ed2209b8ae0968cc04350554d636711e6c24a3ad3118692 +keeper_registrar_wrapper1_2_mock: ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.bin 5b155a7cb3def309fd7525de1d7cd364ebf8491bdc3060eac08ea0ff55ab29bc +keeper_registrar_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.bin 647f125c2f0dafabcdc545cb77b15dc2ec3ea9429357806813179b1fd555c2d2 +keeper_registry_logic1_3: ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.bin 903f8b9c8e25425ca6d0b81b89e339d695a83630bfbfa24a6f3b38869676bc5a +keeper_registry_logic2_0: ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.bin d69d2bc8e4844293dbc2d45abcddc50b84c88554ecccfa4fa77c0ca45ec80871 +keeper_registry_logic_a_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.bin 77481ab75c9aa86a62a7b2a708599b5ea1a6346ed1c0def6d4826e7ae523f1ee +keeper_registry_logic_b_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.bin 467d10741a04601b136553a2b1c6ab37f2a65d809366faf03180a22ff26be215 +keeper_registry_wrapper1_1: ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.bin 6ce079f2738f015f7374673a2816e8e9787143d00b780ea7652c8aa9ad9e1e20 +keeper_registry_wrapper1_1_mock: ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.bin 98ddb3680e86359de3b5d17e648253ba29a84703f087a1b52237824003a8c6df +keeper_registry_wrapper1_2: ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.bin a40ff877dd7c280f984cbbb2b428e160662b0c295e881d5f778f941c0088ca22 +keeper_registry_wrapper1_3: ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.bin d4dc760b767ae274ee25c4a604ea371e1fa603a7b6421b69efb2088ad9e8abb3 +keeper_registry_wrapper2_0: ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.bin c32dea7d5ef66b7c58ddc84ddf69aa44df1b3ae8601fbc271c95be4ff5853056 +keeper_registry_wrapper_2_1: ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.bin 604e4a0cd980c713929b523b999462a3aa0ed06f96ff563a4c8566cf59c8445b +keepers_vrf_consumer: ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.bin fa75572e689c9e84705c63e8dbe1b7b8aa1a8fe82d66356c4873d024bb9166e8 +log_emitter: ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.bin 4b129ab93432c95ff9143f0631323e189887668889e0b36ccccf18a571e41ccf +log_triggered_streams_lookup_wrapper: ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.bin f8da43a927c1a66238a9f4fd5d5dd7e280e361daa0444da1f7f79498ace901e1 +log_upkeep_counter_wrapper: ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.bin 42426bbb83f96dfbe55fc576d6c65020eaeed690e2289cf99b0c4aa810a5f4ec +mock_aggregator_proxy: ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.bin b16c108f3dd384c342ddff5e94da7c0a8d39d1be5e3d8f2cf61ecc7f0e50ff42 +mock_ethlink_aggregator_wrapper: ../../contracts/solc/v0.6/MockETHPLIAggregator/MockETHPLIAggregator.abi ../../contracts/solc/v0.6/MockETHPLIAggregator/MockETHPLIAggregator.bin 1c52c24f797b8482aa12b8251dcea1c072827bd5b3426b822621261944b99ca0 +mock_gas_aggregator_wrapper: ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.bin bacbb1ea4dc6beac0db8a13ca5c75e2fd61b903d70feea9b3b1c8b10fe8df4f3 +multiwordconsumer_wrapper: ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.bin 6e68abdf614e3ed0f5066c1b5f9d7c1199f1e7c5c5251fe8a471344a59afc6ba +offchain_aggregator_wrapper: OffchainAggregator/OffchainAggregator.abi - 5c8d6562e94166d4790f1ee6e4321d359d9f7262e6c5452a712b1f1c896f45cf +operator_factory: ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.bin 357203fabe3df436eb015e2d5094374c6967a9fc922ac8edc265b27aac4d67cf +operator_wrapper: ../../contracts/solc/v0.8.19/Operator/Operator.abi ../../contracts/solc/v0.8.19/Operator/Operator.bin c5e1db81070d940a82ef100b0bce38e055593cbeebbc73abf9d45c30d6020cd2 +oracle_wrapper: ../../contracts/solc/v0.6/Oracle/Oracle.abi ../../contracts/solc/v0.6/Oracle/Oracle.bin 7af2fbac22a6e8c2847e8e685a5400cac5101d72ddf5365213beb79e4dede43a +perform_data_checker_wrapper: ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.bin 48d8309c2117c29a24e1155917ab0b780956b2cd6a8a39ef06ae66a7f6d94f73 +simple_log_upkeep_counter_wrapper: ../../contracts/solc/v0.8.6/SimpleLogUpkeepCounter/SimpleLogUpkeepCounter.abi ../../contracts/solc/v0.8.6/SimpleLogUpkeepCounter/SimpleLogUpkeepCounter.bin a2532ca73e227f846be39b52fa63cfa9d088116c3cfc311d972fe8db886fa915 +solidity_vrf_consumer_interface: ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.bin ecc99378aa798014de9db42b2eb81320778b0663dbe208008dad75ccdc1d4366 +solidity_vrf_consumer_interface_v08: ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.bin b14f9136b15e3dc9d6154d5700f3ed4cf88ddc4f70f20c3bb57fc46050904c8f +solidity_vrf_coordinator_interface: ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.bin a23d3c395156804788c7f6fbda2994e8f7184304c0f0c9f2c4ddeaf073d346d2 +solidity_vrf_request_id: ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin 383b59e861732c1911ddb7b002c6158608496ce889979296527215fd0366b318 +solidity_vrf_request_id_v08: ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin f2559015d6f3e5d285c57b011be9b2300632e93dd6c4524e58202d6200f09edc +solidity_vrf_v08_verifier_wrapper: ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.bin f37f8b21a81c113085c6137835a2246db6ebda07da455c4f2b5c7ec60c725c3b +solidity_vrf_verifier_wrapper: ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.bin 44c2b67d8d2990ab580453deb29d63508c6147a3dc49908a1db563bef06e6474 +solidity_vrf_wrapper: ../../contracts/solc/v0.6/VRF/VRF.abi ../../contracts/solc/v0.6/VRF/VRF.bin 04ede5b83c06ba5b76ef99c081c72928007d8a7aaefcf21449a46a07cbd4bfc2 +streams_lookup_compatible_interface: ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.bin feb92cc666df21ea04ab9d7a588a513847b01b2f66fc167d06ab28ef2b17e015 +streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.bin b1a598963cacac51ed4706538d0f142bdc0d94b9a4b13e2d402131cdf05c9bcf +test_api_consumer_wrapper: ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.bin ed10893cb18894c18e275302329c955f14ea2de37ee044f84aa1e067ac5ea71e +trusted_blockhash_store: ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.bin 98cb0dc06c15af5dcd3b53bdfc98e7ed2489edc96a42203294ac2fc0efdda02b +type_and_version_interface_wrapper: ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.bin bc9c3a6e73e3ebd5b58754df0deeb3b33f4bb404d5709bb904aed51d32f4b45e +upkeep_counter_wrapper: ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.bin 77f000229a501f638dd2dc439859257f632894c728b31e68aea4f6d6c52f1b71 +upkeep_perform_counter_restrictive_wrapper: ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin 20955b21acceb58355fa287b29194a73edf5937067ba7140667301017cb2b24c +upkeep_transcoder: ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.bin 336c92a981597be26508455f81a908a0784a817b129a59686c5b2c4afcba730a +verifiable_load_log_trigger_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.bin fb674ba44c0e8f3b385cd10b2f7dea5cd07b5f38df08066747e8b1542e152557 +verifiable_load_streams_lookup_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.bin 785f68c44bfff070505eaa65e38a1af94046e5f9afc1189bcf2c8cfcd1102d66 +verifiable_load_upkeep_wrapper: ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.bin a3e02c43756ea91e7ce4b81e48c11648f1d12f6663c236780147e41dfa36ebee +vrf_consumer_v2: ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.bin 9ef258bf8e9f8d880fd229ceb145593d91e24fc89366baa0bf19169c5787d15f +vrf_consumer_v2_plus_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.bin 3155c611e4d6882e9324b6e975033b31356776ea8b031ca63d63da37589d583b +vrf_consumer_v2_upgradeable_example: ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.bin f1790a9a2f2a04c730593e483459709cb89e897f8a19d7a3ac0cfe6a97265e6e +vrf_coordinator_mock: ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.bin 5c495cf8df1f46d8736b9150cdf174cce358cb8352f60f0d5bb9581e23920501 +vrf_coordinator_test_v2: ../../contracts/solc/v0.8.6/VRFCoordinatorTestV2/VRFCoordinatorTestV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorTestV2/VRFCoordinatorTestV2.bin eaefd785c38bac67fb11a7fc2737ab2da68c988ca170e7db8ff235c80893e01c +vrf_coordinator_v2: ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.bin 295f35ce282060317dfd01f45959f5a2b05ba26913e422fbd4fb6bf90b107006 +vrf_coordinator_v2_5: ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.bin aa5875f42461b4f128483ee0fd8b1f1b72a395ee857e6153197e92bcb21d149f +vrf_coordinator_v2_plus_v2_example: ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.bin 4a5b86701983b1b65f0a8dfa116b3f6d75f8f706fa274004b57bdf5992e4cec3 +vrf_coordinator_v2plus_interface: ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.bin 86b8e23aab28c5b98e3d2384dc4f702b093e382dc985c88101278e6e4bf6f7b8 +vrf_external_sub_owner_example: ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.bin 14f888eb313930b50233a6f01ea31eba0206b7f41a41f6311670da8bb8a26963 +vrf_load_test_external_sub_owner: ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.bin 2097faa70265e420036cc8a3efb1f1e0836ad2d7323b295b9a26a125dbbe6c7d +vrf_load_test_ownerless_consumer: ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.bin 74f914843cbc70b9c3079c3e1c709382ce415225e8bb40113e7ac018bfcb0f5c +vrf_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.bin c9621c52d216a090ff6bbe942f1b75d2bce8658a27323c3789e5e14b523277ee +vrf_log_emitter: ../../contracts/solc/v0.8.19/VRFLogEmitter/VRFLogEmitter.abi ../../contracts/solc/v0.8.19/VRFLogEmitter/VRFLogEmitter.bin 15f491d445ac4d0c712d1cbe4e5054c759b080bf20de7d54bfe2a82cde4dcf06 +vrf_malicious_consumer_v2: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.bin 9755fa8ffc7f5f0b337d5d413d77b0c9f6cd6f68c31727d49acdf9d4a51bc522 +vrf_malicious_consumer_v2_plus: ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.bin e2a72638e11da807b6533d037e7e5aaeed695efd5035777b8e20d2f8973a574c +vrf_mock_ethlink_aggregator: ../../contracts/solc/v0.8.6/VRFMockETHPLIAggregator/VRFMockETHPLIAggregator.abi ../../contracts/solc/v0.8.6/VRFMockETHPLIAggregator/VRFMockETHPLIAggregator.bin a6e753984eeec8107e205ae517f74d4616bf23cffda50a25538ffc16ac4b036f +vrf_owner: ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.bin eccfae5ee295b5850e22f61240c469f79752b8d9a3bac5d64aec7ac8def2f6cb +vrf_owner_test_consumer: ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.bin 6969de242efe8f366ae4097fc279d9375c8e2d0307aaa322e31f2ce6b8c1909a +vrf_ownerless_consumer_example: ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.bin 9893b3805863273917fb282eed32274e32aa3d5c2a67a911510133e1218132be +vrf_single_consumer_example: ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.bin 892a5ed35da2e933f7fd7835cd6f7f70ef3aa63a9c03a22c5b1fd026711b0ece +vrf_v2_consumer_wrapper: ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.bin 12368b3b5e06392440143a13b94c0ea2f79c4c897becc3b060982559e10ace40 +vrf_v2plus_load_test_with_metrics: ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.bin 0a89cb7ed9dfb42f91e559b03dc351ccdbe14d281a7ab71c63bd3f47eeed7711 +vrf_v2plus_single_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.bin 6226d05afa1664033b182bfbdde11d5dfb1d4c8e3eb0bd0448c8bfb76f5b96e4 +vrf_v2plus_sub_owner: ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.bin 7541f986571b8a5671a256edc27ae9b8df9bcdff45ac3b96e5609bbfcc320e4e +vrf_v2plus_upgraded_version: ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.bin 09e4186c64cdaf1e5d36405467fb86996d7e4177cb08ecec425a4352d4246140 +vrfv2_proxy_admin: ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.bin 402b1103087ffe1aa598854a8f8b38f8cd3de2e3aaa86369e28017a9157f4980 +vrfv2_reverting_example: ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.bin 1ae46f80351d428bd85ba58b9041b2a608a1845300d79a8fed83edf96606de87 +vrfv2_transparent_upgradeable_proxy: ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.bin fe1a8e6852fbd06d91f64315c5cede86d340891f5b5cc981fb5b86563f7eac3f +vrfv2_wrapper: ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.bin d5e9a982325d2d4f517c4f2bc818795f61555408ef4b38fb59b923d144970e38 +vrfv2_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.bin 3c5c9f1c501e697a7e77e959b48767e2a0bb1372393fd7686f7aaef3eb794231 +vrfv2_wrapper_interface: ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.bin ff8560169de171a68b360b7438d13863682d07040d984fd0fb096b2379421003 +vrfv2_wrapper_load_test_consumer: ../../contracts/solc/v0.8.6/VRFV2WrapperLoadTestConsumer/VRFV2WrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2WrapperLoadTestConsumer/VRFV2WrapperLoadTestConsumer.bin 664ca7fdf4dd65cc183bc25f20708c4b369c3401bba3ee12797a93bcd70138b6 +vrfv2plus_client: ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.bin 3ffbfa4971a7e5f46051a26b1722613f265d89ea1867547ecec58500953a9501 +vrfv2plus_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.bin 2c480a6d7955d33a00690fdd943486d95802e48a03f3cc243df314448e4ddb2c +vrfv2plus_malicious_migrator: ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.bin 80dbc98be5e42246960c889d29488f978d3db0127e95e9b295352c481d8c9b07 +vrfv2plus_reverting_example: ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.bin 6c9053a94f90b8151964d3311310478b57744fbbd153e8ee742ed570e1e49798 +vrfv2plus_wrapper: ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.bin 934bafba386b934f491827e535306726069f4cafef9125079ea88abf0d808877 +vrfv2plus_wrapper_consumer_example: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.bin a14c4c6e2299cd963a8f0ed069e61dd135af5aad4c13a94f6ea7e086eced7191 +vrfv2plus_wrapper_load_test_consumer: ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.bin 55e3bd534045125fb6579a201ab766185e9b0fac5737b4f37897bb69c9f599fa diff --git a/core/gethwrappers/go_generate.go b/core/gethwrappers/go_generate.go new file mode 100644 index 00000000..4e10b5ef --- /dev/null +++ b/core/gethwrappers/go_generate.go @@ -0,0 +1,176 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Make sure solidity compiler artifacts are up-to-date. Only output stdout on failure. +//go:generate ./generation/compile_contracts.sh + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator/FluxAggregator.bin FluxAggregator flux_aggregator_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRF/VRF.abi ../../contracts/solc/v0.6/VRF/VRF.bin VRF solidity_vrf_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.6/VRFTestHelper/VRFTestHelper.bin VRFTestHelper solidity_vrf_verifier_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.abi ../../contracts/solc/v0.6/VRFCoordinator/VRFCoordinator.bin VRFCoordinator solidity_vrf_coordinator_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.6/VRFConsumer/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Flags/Flags.abi ../../contracts/solc/v0.6/Flags/Flags.bin Flags flags_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Oracle/Oracle.abi ../../contracts/solc/v0.6/Oracle/Oracle.bin Oracle oracle_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer/TestAPIConsumer.bin TestAPIConsumer test_api_consumer_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockETHPLIAggregator/MockETHPLIAggregator.abi ../../contracts/solc/v0.6/MockETHPLIAggregator/MockETHPLIAggregator.bin MockETHPLIAggregator mock_ethlink_aggregator_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator/MockGASAggregator.bin MockGASAggregator mock_gas_aggregator_wrapper + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/Consumer/Consumer.abi ../../contracts/solc/v0.7/Consumer/Consumer.bin Consumer consumer_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.abi ../../contracts/solc/v0.7/MultiWordConsumer/MultiWordConsumer.bin MultiWordConsumer multiwordconsumer_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/Operator/Operator.abi ../../contracts/solc/v0.8.19/Operator/Operator.bin Operator operator_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.abi ../../contracts/solc/v0.8.19/OperatorFactory/OperatorFactory.bin OperatorFactory operator_factory +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.abi ../../contracts/solc/v0.8.19/AuthorizedForwarder/AuthorizedForwarder.bin AuthorizedForwarder authorized_forwarder +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.abi ../../contracts/solc/v0.8.19/AuthorizedReceiver/AuthorizedReceiver.bin AuthorizedReceiver authorized_receiver +//go:generate go run ./generation/generate/wrap.go OffchainAggregator/OffchainAggregator.abi - OffchainAggregator offchain_aggregator_wrapper + +// Automation +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.abi ../../contracts/solc/v0.7/KeeperRegistry1_1/KeeperRegistry1_1.bin KeeperRegistry keeper_registry_wrapper1_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.abi ../../contracts/solc/v0.7/KeeperRegistry1_1Mock/KeeperRegistry1_1Mock.bin KeeperRegistryMock keeper_registry_wrapper1_1_mock +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.7/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.7/UpkeepCounter/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - CronUpkeepFactory cron_upkeep_factory_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeep.abi - CronUpkeep cron_upkeep_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2/KeeperRegistrar.bin KeeperRegistrar keeper_registrar_wrapper1_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistrar1_2Mock/KeeperRegistrar1_2Mock.bin KeeperRegistrarMock keeper_registrar_wrapper1_2_mock +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/KeeperRegistry1_2.bin KeeperRegistry keeper_registry_wrapper1_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_2/TypeAndVersionInterface.bin TypeAndVersionInterface type_and_version_interface_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2/KeeperRegistryCheckUpkeepGasUsageWrapper1_2.bin KeeperRegistryCheckUpkeepGasUsageWrapper gas_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.abi ../../contracts/solc/v0.8.6/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock/KeeperRegistryCheckUpkeepGasUsageWrapper1_2Mock.bin KeeperRegistryCheckUpkeepGasUsageWrapperMock gas_wrapper_mock +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistry1_3/KeeperRegistry1_3.bin KeeperRegistry keeper_registry_wrapper1_3 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic1_3/KeeperRegistryLogic1_3.bin KeeperRegistryLogic keeper_registry_logic1_3 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistrar2_0/KeeperRegistrar2_0.bin KeeperRegistrar keeper_registrar_wrapper2_0 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistry2_0/KeeperRegistry2_0.bin KeeperRegistry keeper_registry_wrapper2_0 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.abi ../../contracts/solc/v0.8.6/KeeperRegistryLogic2_0/KeeperRegistryLogic2_0.bin KeeperRegistryLogic keeper_registry_logic2_0 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.abi ../../contracts/solc/v0.8.6/UpkeepTranscoder/UpkeepTranscoder.bin UpkeepTranscoder upkeep_transcoder +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadUpkeep/VerifiableLoadUpkeep.bin VerifiableLoadUpkeep verifiable_load_upkeep_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadStreamsLookupUpkeep/VerifiableLoadStreamsLookupUpkeep.bin VerifiableLoadStreamsLookupUpkeep verifiable_load_streams_lookup_upkeep_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.abi ../../contracts/solc/v0.8.16/VerifiableLoadLogTriggerUpkeep/VerifiableLoadLogTriggerUpkeep.bin VerifiableLoadLogTriggerUpkeep verifiable_load_log_trigger_upkeep_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.abi ../../contracts/solc/v0.8.16/StreamsLookupUpkeep/StreamsLookupUpkeep.bin StreamsLookupUpkeep streams_lookup_upkeep_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.abi ../../contracts/solc/v0.8.16/StreamsLookupCompatibleInterface/StreamsLookupCompatibleInterface.bin StreamsLookupCompatibleInterface streams_lookup_compatible_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.abi ../../contracts/solc/v0.8.16/AutomationConsumerBenchmark/AutomationConsumerBenchmark.bin AutomationConsumerBenchmark automation_consumer_benchmark +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.abi ../../contracts/solc/v0.8.16/AutomationRegistrar2_1/AutomationRegistrar2_1.bin AutomationRegistrar automation_registrar_wrapper2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistry2_1/KeeperRegistry2_1.bin KeeperRegistry keeper_registry_wrapper_2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicA2_1/KeeperRegistryLogicA2_1.bin KeeperRegistryLogicA keeper_registry_logic_a_wrapper_2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.abi ../../contracts/solc/v0.8.16/KeeperRegistryLogicB2_1/KeeperRegistryLogicB2_1.bin KeeperRegistryLogicB keeper_registry_logic_b_wrapper_2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.abi ../../contracts/solc/v0.8.16/IKeeperRegistryMaster/IKeeperRegistryMaster.bin IKeeperRegistryMaster i_keeper_registry_master_wrapper_2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.abi ../../contracts/solc/v0.8.16/AutomationUtils2_1/AutomationUtils2_1.bin AutomationUtils automation_utils_2_1 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AutomationRegistrar2_2/AutomationRegistrar2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistrar2_2/AutomationRegistrar2_2.bin AutomationRegistrar automation_registrar_wrapper2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AutomationRegistry2_2/AutomationRegistry2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistry2_2/AutomationRegistry2_2.bin AutomationRegistry automation_registry_wrapper_2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AutomationRegistryLogicA2_2/AutomationRegistryLogicA2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistryLogicA2_2/AutomationRegistryLogicA2_2.bin AutomationRegistryLogicA automation_registry_logic_a_wrapper_2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AutomationRegistryLogicB2_2/AutomationRegistryLogicB2_2.abi ../../contracts/solc/v0.8.19/AutomationRegistryLogicB2_2/AutomationRegistryLogicB2_2.bin AutomationRegistryLogicB automation_registry_logic_b_wrapper_2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/IAutomationRegistryMaster/IAutomationRegistryMaster.abi ../../contracts/solc/v0.8.19/IAutomationRegistryMaster/IAutomationRegistryMaster.bin IAutomationRegistryMaster i_automation_registry_master_wrapper_2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/AutomationUtils2_2/AutomationUtils2_2.abi ../../contracts/solc/v0.8.19/AutomationUtils2_2/AutomationUtils2_2.bin AutomationUtils automation_utils_2_2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.abi ../../contracts/solc/v0.8.16/ILogAutomation/ILogAutomation.bin ILogAutomation i_log_automation +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.abi ../../contracts/solc/v0.8.16/AutomationForwarderLogic/AutomationForwarderLogic.bin AutomationForwarderLogic automation_forwarder_logic +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.abi ../../contracts/solc/v0.8.6/LogUpkeepCounter/LogUpkeepCounter.bin LogUpkeepCounter log_upkeep_counter_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/SimpleLogUpkeepCounter/SimpleLogUpkeepCounter.abi ../../contracts/solc/v0.8.6/SimpleLogUpkeepCounter/SimpleLogUpkeepCounter.bin SimpleLogUpkeepCounter simple_log_upkeep_counter_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.abi ../../contracts/solc/v0.8.16/LogTriggeredStreamsLookup/LogTriggeredStreamsLookup.bin LogTriggeredStreamsLookup log_triggered_streams_lookup_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol/DummyProtocol.bin DummyProtocol dummy_protocol_wrapper + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.abi ../../contracts/solc/v0.8.16/KeeperConsumer/KeeperConsumer.bin KeeperConsumer keeper_consumer_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.abi ../../contracts/solc/v0.8.16/KeeperConsumerPerformance/KeeperConsumerPerformance.bin KeeperConsumerPerformance keeper_consumer_performance_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.abi ../../contracts/solc/v0.8.16/PerformDataChecker/PerformDataChecker.bin PerformDataChecker perform_data_checker_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.abi ../../contracts/solc/v0.8.16/UpkeepCounter/UpkeepCounter.bin UpkeepCounter upkeep_counter_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.abi ../../contracts/solc/v0.8.16/UpkeepPerformCounterRestrictive/UpkeepPerformCounterRestrictive.bin UpkeepPerformCounterRestrictive upkeep_perform_counter_restrictive_wrapper + +// v0.8.6 VRFConsumer +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.abi ../../contracts/solc/v0.8.6/VRFCoordinatorMock/VRFCoordinatorMock.bin VRFCoordinatorMock vrf_coordinator_mock +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.abi ../../contracts/solc/v0.8.6/VRFConsumer/VRFConsumer.bin VRFConsumer solidity_vrf_consumer_interface_v08 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.8.6/VRFRequestIDBaseTestHelper/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id_v08 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.abi ../../contracts/solc/v0.8.6/VRFOwnerlessConsumerExample/VRFOwnerlessConsumerExample.bin VRFOwnerlessConsumerExample vrf_ownerless_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.abi ../../contracts/solc/v0.8.6/VRFLoadTestOwnerlessConsumer/VRFLoadTestOwnerlessConsumer.bin VRFLoadTestOwnerlessConsumer vrf_load_test_ownerless_consumer +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.abi ../../contracts/solc/v0.8.6/VRFLoadTestExternalSubOwner/VRFLoadTestExternalSubOwner.bin VRFLoadTestExternalSubOwner vrf_load_test_external_sub_owner +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2LoadTestWithMetrics/VRFV2LoadTestWithMetrics.bin VRFV2LoadTestWithMetrics vrf_load_test_with_metrics +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2OwnerTestConsumer/VRFV2OwnerTestConsumer.bin VRFV2OwnerTestConsumer vrf_owner_test_consumer +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.abi ../../contracts/solc/v0.8.6/VRFv2Consumer/VRFv2Consumer.bin VRFv2Consumer vrf_v2_consumer_wrapper + +//go:generate go run ./generation/generate_link/wrap_link.go + +// VRF V2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore/BlockhashStore.bin BlockhashStore blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner/VRFOwner.bin VRFOwner vrf_owner +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2/VRFMaliciousConsumerV2.bin VRFMaliciousConsumerV2 vrf_malicious_consumer_v2 + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.abi ../../contracts/solc/v0.8.6/VRFTestHelper/VRFTestHelper.bin VRFV08TestHelper solidity_vrf_v08_verifier_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFSingleConsumerExample/VRFSingleConsumerExample.bin VRFSingleConsumerExample vrf_single_consumer_example + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFExternalSubOwnerExample/VRFExternalSubOwnerExample.bin VRFExternalSubOwnerExample vrf_external_sub_owner_example + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2RevertingExample/VRFV2RevertingExample.bin VRFV2RevertingExample vrfv2_reverting_example + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2UpgradeableExample/VRFConsumerV2UpgradeableExample.bin VRFConsumerV2UpgradeableExample vrf_consumer_v2_upgradeable_example + +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.abi ../../contracts/solc/v0.8.6/VRFV2TransparentUpgradeableProxy/VRFV2TransparentUpgradeableProxy.bin VRFV2TransparentUpgradeableProxy vrfv2_transparent_upgradeable_proxy +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.abi ../../contracts/solc/v0.8.6/VRFV2ProxyAdmin/VRFV2ProxyAdmin.bin VRFV2ProxyAdmin vrfv2_proxy_admin +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin ChainSpecificUtilHelper chain_specific_util_helper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorTestV2/VRFCoordinatorTestV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorTestV2/VRFCoordinatorTestV2.bin VRFCoordinatorTestV2 vrf_coordinator_test_v2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMockETHPLIAggregator/VRFMockETHPLIAggregator.abi ../../contracts/solc/v0.8.6/VRFMockETHPLIAggregator/VRFMockETHPLIAggregator.bin VRFMockETHPLIAggregator vrf_mock_ethlink_aggregator + +// VRF V2 Wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.abi ../../contracts/solc/v0.8.6/VRFV2Wrapper/VRFV2Wrapper.bin VRFV2Wrapper vrfv2_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.abi ../../contracts/solc/v0.8.6/VRFV2WrapperInterface/VRFV2WrapperInterface.bin VRFV2WrapperInterface vrfv2_wrapper_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2WrapperConsumerExample/VRFV2WrapperConsumerExample.bin VRFV2WrapperConsumerExample vrfv2_wrapper_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2WrapperLoadTestConsumer/VRFV2WrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2WrapperLoadTestConsumer/VRFV2WrapperLoadTestConsumer.bin VRFV2WrapperLoadTestConsumer vrfv2_wrapper_load_test_consumer + +// Keepers X VRF v2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.abi ../../contracts/solc/v0.8.6/KeepersVRFConsumer/KeepersVRFConsumer.bin KeepersVRFConsumer keepers_vrf_consumer + +// VRF V2Plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.bin IVRFCoordinatorV2PlusInternal vrf_coordinator_v2plus_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin BatchVRFCoordinatorV2Plus batch_vrf_coordinator_v2plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.bin TrustedBlockhashStore trusted_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.bin VRFV2PlusConsumerExample vrfv2plus_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.bin VRFCoordinatorV2_5 vrf_coordinator_v2_5 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.bin VRFV2PlusWrapper vrfv2plus_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.bin VRFV2PlusWrapperConsumerExample vrfv2plus_wrapper_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.bin VRFMaliciousConsumerV2Plus vrf_malicious_consumer_v2_plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.bin VRFV2PlusSingleConsumerExample vrf_v2plus_single_consumer +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.bin VRFV2PlusExternalSubOwnerExample vrf_v2plus_sub_owner +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.bin VRFV2PlusRevertingExample vrfv2plus_reverting_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.bin VRFConsumerV2PlusUpgradeableExample vrf_consumer_v2_plus_upgradeable_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.bin VRFV2PlusClient vrfv2plus_client +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.bin VRFCoordinatorV2Plus_V2Example vrf_coordinator_v2_plus_v2_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.bin VRFV2PlusMaliciousMigrator vrfv2plus_malicious_migrator +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.bin VRFV2PlusLoadTestWithMetrics vrf_v2plus_load_test_with_metrics +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.bin VRFCoordinatorV2PlusUpgradedVersion vrf_v2plus_upgraded_version +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.bin VRFV2PlusWrapperLoadTestConsumer vrfv2plus_wrapper_load_test_consumer + +// Aggregators +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy/MockAggregatorProxy.bin MockAggregatorProxy mock_aggregator_proxy + +// Log tester + +// ChainReader test contract +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/ChainReaderTestContract/LatestValueHolder.abi ../../contracts/solc/v0.8.19/ChainReaderTestContract/LatestValueHolder.bin LatestValueHolder chain_reader_example + +// Plugin Functions +//go:generate go generate ./functions + +// Plugin Keystone +//go:generate go generate ./keystone + +// Mercury +//go:generate go generate ./llo-feeds + +// Shared +//go:generate go generate ./shared + +// Mocks that contain only events and functions to emit them +// These contracts are used in testing Atlas flows. The contracts contain no logic, only events, structures, and functions to emit them. +// The flow is as follows: +// 1. Compile all non events mock contracts. +// 2. Generate events mock .sol files based on ABI of compiled contracts. +// 3. Compile events mock contracts. ./generation/compile_event_mock_contract.sh calls contracts/scripts/native_solc_compile_all_events_mock to compile events mock contracts. +// 4. Generate wrappers for events mock contracts. +//go:generate ./generation/compile_event_mock_contract.sh + +// Transmission +//go:generate go generate ./transmission diff --git a/core/gethwrappers/go_generate_logpoller.go b/core/gethwrappers/go_generate_logpoller.go new file mode 100644 index 00000000..b28b8205 --- /dev/null +++ b/core/gethwrappers/go_generate_logpoller.go @@ -0,0 +1,7 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Log tester +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.abi ../../contracts/solc/v0.8.19/LogEmitter/LogEmitter.bin LogEmitter log_emitter +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.19/VRFLogEmitter/VRFLogEmitter.abi ../../contracts/solc/v0.8.19/VRFLogEmitter/VRFLogEmitter.bin VRFLogEmitter vrf_log_emitter diff --git a/core/gethwrappers/go_generate_test.go b/core/gethwrappers/go_generate_test.go new file mode 100644 index 00000000..d12e01f7 --- /dev/null +++ b/core/gethwrappers/go_generate_test.go @@ -0,0 +1,148 @@ +// package gethwrappers_test verifies correct and up-to-date generation of golang wrappers +// for solidity contracts. See go_generate.go for the actual generation. +package gethwrappers + +import ( + "crypto/sha256" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/fatih/color" + + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const compileCommand = "../../contracts/scripts/native_solc_compile_all" + +// TestCheckContractHashesFromLastGoGenerate compares the abi and bytecode of the +// contract artifacts in contracts/solc with the abi and bytecode stored in the +// contract wrapper +func TestCheckContractHashesFromLastGoGenerate(t *testing.T) { + versions, err := ReadVersionsDB() + require.NoError(t, err) + require.NotEmpty(t, versions.GethVersion, `version DB should have a "GETH_VERSION:" line`) + + wd, err := os.Getwd() + if err != nil { + wd = "" + } + require.Equal(t, versions.GethVersion, gethParams.Version, + color.HiRedString(utils.BoxOutput("please re-run `go generate %s` and commit the"+ + "changes", wd))) + + for _, contractVersionInfo := range versions.ContractVersions { + if isOCRContract(contractVersionInfo.AbiPath) || isVRFV2Contract(contractVersionInfo.AbiPath) { + continue + } + compareCurrentCompilerArtifactAgainstRecordsAndSoliditySources(t, contractVersionInfo) + } + // Just check that LinkToken details haven't changed (they never ought to) + linkDetails, err := os.ReadFile(filepath.Join(getProjectRoot(t), "contracts/LinkToken.json")) + require.NoError(t, err, "could not read link contract details") + require.Equal(t, fmt.Sprintf("%x", sha256.Sum256(linkDetails)), + "27c0e17a79553fccc63a4400c6bbe415ff710d9cc7c25757bff0f7580205c922", + "should never differ!") +} + +func isOCRContract(fullpath string) bool { + return strings.Contains(fullpath, "OffchainAggregator") +} + +// VRFv2 currently uses revert error types which are not supported by abigen +// and so we have to manually modify the abi to remove them. +func isVRFV2Contract(fullpath string) bool { + return strings.Contains(fullpath, "VRFCoordinatorV2") +} + +// rootDir is the local plugin root working directory +var rootDir string + +func init() { // compute rootDir + var err error + thisDir, err := os.Getwd() + if err != nil { + panic(err) + } + rootDir, err = filepath.Abs(filepath.Join(thisDir, "../..")) + if err != nil { + panic(err) + } +} + +// compareCurrentCompilerArtifactAgainstRecordsAndSoliditySources checks that +// the file at each ContractVersion.AbiPath and ContractVersion.BinaryPath hashes to its +// ContractVersion.Hash, and that the solidity source code recorded in the +// compiler artifact matches the current solidity contracts. +// +// Most of the compiler artifacts should contain output from sol-compiler, or +// "pnpm compile". The relevant parts of its schema are +// +// { "sourceCodes": { "": "", ... } } +// +// where is the path to the contract, below the truffle contracts/ +// directory, and is the source code of the contract at the time the JSON +// file was generated. +func compareCurrentCompilerArtifactAgainstRecordsAndSoliditySources( + t *testing.T, versionInfo ContractVersion, +) { + hash := VersionHash(versionInfo.AbiPath, versionInfo.BinaryPath) + recompileCommand := fmt.Sprintf("(cd %s/contracts; make wrappers-all)", rootDir) + assert.Equal(t, versionInfo.Hash, hash, + utils.BoxOutput(`compiled %s and/or %s has changed; please rerun +%s, +and commit the changes`, versionInfo.AbiPath, versionInfo.BinaryPath, recompileCommand)) +} + +// Ensure that solidity compiler artifacts are present before running this test, +// by compiling them if necessary. +func init() { + db, err := versionsDBLineReader() + if err != nil { + panic(err) + } + var solidityArtifactsMissing []string + for db.Scan() { + line := strings.Fields(db.Text()) + if stripTrailingColon(line[0], "") != "GETH_VERSION" { + if os.IsNotExist(cutils.JustError(os.Stat(line[1]))) { + solidityArtifactsMissing = append(solidityArtifactsMissing, line[1]) + } + } + } + if len(solidityArtifactsMissing) == 0 { + return + } + fmt.Printf("some solidity artifacts missing (%s); rebuilding...", + solidityArtifactsMissing) + // Don't want to run "make wrappers-all" here, because that would + // result in an infinite loop + cmd := exec.Command("bash", "-c", compileCommand) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + panic(err) + } +} + +// getProjectRoot returns the root of the plugin project +func getProjectRoot(t *testing.T) (rootPath string) { + root, err := os.Getwd() + require.NoError(t, err, "could not get current working directory") + for root != "/" { // Walk up path to find dir containing go.mod + if _, err := os.Stat(filepath.Join(root, "go.mod")); !os.IsNotExist(err) { + return root + } + root = filepath.Dir(root) + } + t.Fatal("could not find project root") + return +} diff --git a/core/gethwrappers/go_generate_vrfv2plus.go b/core/gethwrappers/go_generate_vrfv2plus.go new file mode 100644 index 00000000..efd11050 --- /dev/null +++ b/core/gethwrappers/go_generate_vrfv2plus.go @@ -0,0 +1,21 @@ +package gethwrappers + +// VRF V2Plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.abi ../../contracts/solc/v0.8.6/IVRFCoordinatorV2PlusInternal/IVRFCoordinatorV2PlusInternal.bin IVRFCoordinatorV2PlusInternal vrf_coordinator_v2plus_interface +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin BatchVRFCoordinatorV2Plus batch_vrf_coordinator_v2plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.abi ../../contracts/solc/v0.8.6/TrustedBlockhashStore/TrustedBlockhashStore.bin TrustedBlockhashStore trusted_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusConsumerExample/VRFV2PlusConsumerExample.bin VRFV2PlusConsumerExample vrfv2plus_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2_5/VRFCoordinatorV2_5.bin VRFCoordinatorV2_5 vrf_coordinator_v2_5 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapper/VRFV2PlusWrapper.bin VRFV2PlusWrapper vrfv2plus_wrapper +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperConsumerExample/VRFV2PlusWrapperConsumerExample.bin VRFV2PlusWrapperConsumerExample vrfv2plus_wrapper_consumer_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.abi ../../contracts/solc/v0.8.6/VRFMaliciousConsumerV2Plus/VRFMaliciousConsumerV2Plus.bin VRFMaliciousConsumerV2Plus vrf_malicious_consumer_v2_plus +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusSingleConsumerExample/VRFV2PlusSingleConsumerExample.bin VRFV2PlusSingleConsumerExample vrf_v2plus_single_consumer +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusExternalSubOwnerExample/VRFV2PlusExternalSubOwnerExample.bin VRFV2PlusExternalSubOwnerExample vrf_v2plus_sub_owner +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.abi ../../contracts/solc/v0.8.6/VRFV2PlusRevertingExample/VRFV2PlusRevertingExample.bin VRFV2PlusRevertingExample vrfv2plus_reverting_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.abi ../../contracts/solc/v0.8.6/VRFConsumerV2PlusUpgradeableExample/VRFConsumerV2PlusUpgradeableExample.bin VRFConsumerV2PlusUpgradeableExample vrf_consumer_v2_plus_upgradeable_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.abi ../../contracts/solc/v0.8.6/VRFV2PlusClient/VRFV2PlusClient.bin VRFV2PlusClient vrfv2plus_client +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2Plus_V2Example/VRFCoordinatorV2Plus_V2Example.bin VRFCoordinatorV2Plus_V2Example vrf_coordinator_v2_plus_v2_example +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.abi ../../contracts/solc/v0.8.6/VRFV2PlusMaliciousMigrator/VRFV2PlusMaliciousMigrator.bin VRFV2PlusMaliciousMigrator vrfv2plus_malicious_migrator +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.abi ../../contracts/solc/v0.8.6/VRFV2PlusLoadTestWithMetrics/VRFV2PlusLoadTestWithMetrics.bin VRFV2PlusLoadTestWithMetrics vrf_v2plus_load_test_with_metrics +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2PlusUpgradedVersion/VRFCoordinatorV2PlusUpgradedVersion.bin VRFCoordinatorV2PlusUpgradedVersion vrf_v2plus_upgraded_version +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.abi ../../contracts/solc/v0.8.6/VRFV2PlusWrapperLoadTestConsumer/VRFV2PlusWrapperLoadTestConsumer.bin VRFV2PlusWrapperLoadTestConsumer vrfv2plus_wrapper_load_test_consumer diff --git a/core/gethwrappers/keystone/generated/forwarder/forwarder.go b/core/gethwrappers/keystone/generated/forwarder/forwarder.go new file mode 100644 index 00000000..de0bb508 --- /dev/null +++ b/core/gethwrappers/keystone/generated/forwarder/forwarder.go @@ -0,0 +1,600 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package forwarder + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var KeystoneForwarderMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"workflowExecutionId\",\"type\":\"bytes32\"}],\"name\":\"getTransmitter\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"targetAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"signatures\",\"type\":\"bytes[]\"}],\"name\":\"report\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610148565b336001600160a01b038216036100f75760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b610c12806101576000396000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063c0965dc311610050578063c0965dc314610108578063e6b714581461012b578063f2fde38b1461016157600080fd5b8063181f5a771461007757806379ba5097146100bf5780638da5cb5b146100c9575b600080fd5b604080518082018252601781527f4b657973746f6e65466f7277617264657220312e302e30000000000000000000602082015290516100b69190610827565b60405180910390f35b6100c7610174565b005b60005473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100b6565b61011b6101163660046108bc565b610276565b60405190151581526020016100b6565b6100e3610139366004610998565b60009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b6100c761016f3660046109b1565b61058e565b60015473ffffffffffffffffffffffffffffffffffffffff1633146101fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60025460009060ff16156102b6576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556044841161034b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f696e76616c69642064617461206c656e6774680000000000000000000000000060448201526064016101f1565b600061035a85600481896109d3565b8101906103679190610a2c565b8051602082012090915060005b848110156104655760008060006103e289898681811061039657610396610afb565b90506020028101906103a89190610b2a565b8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506105a292505050565b925092509250600060018683868660405160008152602001604052604051610426949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015610448573d6000803e3d6000fd5b5086955061045d9450859350610b9692505050565b915050610374565b5060008061047284610630565b600081815260036020526040902054919350915073ffffffffffffffffffffffffffffffffffffffff16156104ae57600094505050505061055d565b6000808b73ffffffffffffffffffffffffffffffffffffffff168b8b6040516104d8929190610bf5565b6000604051808303816000865af19150503d8060008114610515576040519150601f19603f3d011682016040523d82523d6000602084013e61051a565b606091505b5050506000928352505060036020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001633179055506001925050505b600280547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905595945050505050565b6105966106af565b61059f81610732565b50565b60008060008351604114610612576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f696e76616c6964207369676e6174757265206c656e677468000000000000000060448201526064016101f1565b50505060208101516040820151606090920151909260009190911a90565b600080604083511161069e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f696e76616c6964207265706f7274206c656e677468000000000000000000000060448201526064016101f1565b505060208101516040909101519091565b60005473ffffffffffffffffffffffffffffffffffffffff163314610730576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016101f1565b565b3373ffffffffffffffffffffffffffffffffffffffff8216036107b1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016101f1565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208083528351808285015260005b8181101561085457858101830151858201604001528201610838565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff811681146108b757600080fd5b919050565b6000806000806000606086880312156108d457600080fd5b6108dd86610893565b9450602086013567ffffffffffffffff808211156108fa57600080fd5b818801915088601f83011261090e57600080fd5b81358181111561091d57600080fd5b89602082850101111561092f57600080fd5b60208301965080955050604088013591508082111561094d57600080fd5b818801915088601f83011261096157600080fd5b81358181111561097057600080fd5b8960208260051b850101111561098557600080fd5b9699959850939650602001949392505050565b6000602082840312156109aa57600080fd5b5035919050565b6000602082840312156109c357600080fd5b6109cc82610893565b9392505050565b600080858511156109e357600080fd5b838611156109f057600080fd5b5050820193919092039150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060208284031215610a3e57600080fd5b813567ffffffffffffffff80821115610a5657600080fd5b818401915084601f830112610a6a57600080fd5b813581811115610a7c57610a7c6109fd565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715610ac257610ac26109fd565b81604052828152876020848701011115610adb57600080fd5b826020860160208301376000928101602001929092525095945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610b5f57600080fd5b83018035915067ffffffffffffffff821115610b7a57600080fd5b602001915036819003821315610b8f57600080fd5b9250929050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610bee577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b818382376000910190815291905056fea164736f6c6343000813000a", +} + +var KeystoneForwarderABI = KeystoneForwarderMetaData.ABI + +var KeystoneForwarderBin = KeystoneForwarderMetaData.Bin + +func DeployKeystoneForwarder(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *KeystoneForwarder, error) { + parsed, err := KeystoneForwarderMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(KeystoneForwarderBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &KeystoneForwarder{address: address, abi: *parsed, KeystoneForwarderCaller: KeystoneForwarderCaller{contract: contract}, KeystoneForwarderTransactor: KeystoneForwarderTransactor{contract: contract}, KeystoneForwarderFilterer: KeystoneForwarderFilterer{contract: contract}}, nil +} + +type KeystoneForwarder struct { + address common.Address + abi abi.ABI + KeystoneForwarderCaller + KeystoneForwarderTransactor + KeystoneForwarderFilterer +} + +type KeystoneForwarderCaller struct { + contract *bind.BoundContract +} + +type KeystoneForwarderTransactor struct { + contract *bind.BoundContract +} + +type KeystoneForwarderFilterer struct { + contract *bind.BoundContract +} + +type KeystoneForwarderSession struct { + Contract *KeystoneForwarder + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type KeystoneForwarderCallerSession struct { + Contract *KeystoneForwarderCaller + CallOpts bind.CallOpts +} + +type KeystoneForwarderTransactorSession struct { + Contract *KeystoneForwarderTransactor + TransactOpts bind.TransactOpts +} + +type KeystoneForwarderRaw struct { + Contract *KeystoneForwarder +} + +type KeystoneForwarderCallerRaw struct { + Contract *KeystoneForwarderCaller +} + +type KeystoneForwarderTransactorRaw struct { + Contract *KeystoneForwarderTransactor +} + +func NewKeystoneForwarder(address common.Address, backend bind.ContractBackend) (*KeystoneForwarder, error) { + abi, err := abi.JSON(strings.NewReader(KeystoneForwarderABI)) + if err != nil { + return nil, err + } + contract, err := bindKeystoneForwarder(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &KeystoneForwarder{address: address, abi: abi, KeystoneForwarderCaller: KeystoneForwarderCaller{contract: contract}, KeystoneForwarderTransactor: KeystoneForwarderTransactor{contract: contract}, KeystoneForwarderFilterer: KeystoneForwarderFilterer{contract: contract}}, nil +} + +func NewKeystoneForwarderCaller(address common.Address, caller bind.ContractCaller) (*KeystoneForwarderCaller, error) { + contract, err := bindKeystoneForwarder(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &KeystoneForwarderCaller{contract: contract}, nil +} + +func NewKeystoneForwarderTransactor(address common.Address, transactor bind.ContractTransactor) (*KeystoneForwarderTransactor, error) { + contract, err := bindKeystoneForwarder(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &KeystoneForwarderTransactor{contract: contract}, nil +} + +func NewKeystoneForwarderFilterer(address common.Address, filterer bind.ContractFilterer) (*KeystoneForwarderFilterer, error) { + contract, err := bindKeystoneForwarder(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &KeystoneForwarderFilterer{contract: contract}, nil +} + +func bindKeystoneForwarder(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := KeystoneForwarderMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_KeystoneForwarder *KeystoneForwarderRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeystoneForwarder.Contract.KeystoneForwarderCaller.contract.Call(opts, result, method, params...) +} + +func (_KeystoneForwarder *KeystoneForwarderRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.KeystoneForwarderTransactor.contract.Transfer(opts) +} + +func (_KeystoneForwarder *KeystoneForwarderRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.KeystoneForwarderTransactor.contract.Transact(opts, method, params...) +} + +func (_KeystoneForwarder *KeystoneForwarderCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _KeystoneForwarder.Contract.contract.Call(opts, result, method, params...) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.contract.Transfer(opts) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.contract.Transact(opts, method, params...) +} + +func (_KeystoneForwarder *KeystoneForwarderCaller) GetTransmitter(opts *bind.CallOpts, workflowExecutionId [32]byte) (common.Address, error) { + var out []interface{} + err := _KeystoneForwarder.contract.Call(opts, &out, "getTransmitter", workflowExecutionId) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeystoneForwarder *KeystoneForwarderSession) GetTransmitter(workflowExecutionId [32]byte) (common.Address, error) { + return _KeystoneForwarder.Contract.GetTransmitter(&_KeystoneForwarder.CallOpts, workflowExecutionId) +} + +func (_KeystoneForwarder *KeystoneForwarderCallerSession) GetTransmitter(workflowExecutionId [32]byte) (common.Address, error) { + return _KeystoneForwarder.Contract.GetTransmitter(&_KeystoneForwarder.CallOpts, workflowExecutionId) +} + +func (_KeystoneForwarder *KeystoneForwarderCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _KeystoneForwarder.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_KeystoneForwarder *KeystoneForwarderSession) Owner() (common.Address, error) { + return _KeystoneForwarder.Contract.Owner(&_KeystoneForwarder.CallOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderCallerSession) Owner() (common.Address, error) { + return _KeystoneForwarder.Contract.Owner(&_KeystoneForwarder.CallOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _KeystoneForwarder.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_KeystoneForwarder *KeystoneForwarderSession) TypeAndVersion() (string, error) { + return _KeystoneForwarder.Contract.TypeAndVersion(&_KeystoneForwarder.CallOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderCallerSession) TypeAndVersion() (string, error) { + return _KeystoneForwarder.Contract.TypeAndVersion(&_KeystoneForwarder.CallOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _KeystoneForwarder.contract.Transact(opts, "acceptOwnership") +} + +func (_KeystoneForwarder *KeystoneForwarderSession) AcceptOwnership() (*types.Transaction, error) { + return _KeystoneForwarder.Contract.AcceptOwnership(&_KeystoneForwarder.TransactOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _KeystoneForwarder.Contract.AcceptOwnership(&_KeystoneForwarder.TransactOpts) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactor) Report(opts *bind.TransactOpts, targetAddress common.Address, data []byte, signatures [][]byte) (*types.Transaction, error) { + return _KeystoneForwarder.contract.Transact(opts, "report", targetAddress, data, signatures) +} + +func (_KeystoneForwarder *KeystoneForwarderSession) Report(targetAddress common.Address, data []byte, signatures [][]byte) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.Report(&_KeystoneForwarder.TransactOpts, targetAddress, data, signatures) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactorSession) Report(targetAddress common.Address, data []byte, signatures [][]byte) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.Report(&_KeystoneForwarder.TransactOpts, targetAddress, data, signatures) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _KeystoneForwarder.contract.Transact(opts, "transferOwnership", to) +} + +func (_KeystoneForwarder *KeystoneForwarderSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.TransferOwnership(&_KeystoneForwarder.TransactOpts, to) +} + +func (_KeystoneForwarder *KeystoneForwarderTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _KeystoneForwarder.Contract.TransferOwnership(&_KeystoneForwarder.TransactOpts, to) +} + +type KeystoneForwarderOwnershipTransferRequestedIterator struct { + Event *KeystoneForwarderOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeystoneForwarderOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeystoneForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeystoneForwarderOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeystoneForwarderOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *KeystoneForwarderOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeystoneForwarderOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeystoneForwarderOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeystoneForwarder.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeystoneForwarderOwnershipTransferRequestedIterator{contract: _KeystoneForwarder.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeystoneForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeystoneForwarder.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeystoneForwarderOwnershipTransferRequested) + if err := _KeystoneForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) ParseOwnershipTransferRequested(log types.Log) (*KeystoneForwarderOwnershipTransferRequested, error) { + event := new(KeystoneForwarderOwnershipTransferRequested) + if err := _KeystoneForwarder.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type KeystoneForwarderOwnershipTransferredIterator struct { + Event *KeystoneForwarderOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *KeystoneForwarderOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(KeystoneForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(KeystoneForwarderOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *KeystoneForwarderOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *KeystoneForwarderOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type KeystoneForwarderOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeystoneForwarderOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeystoneForwarder.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &KeystoneForwarderOwnershipTransferredIterator{contract: _KeystoneForwarder.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeystoneForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _KeystoneForwarder.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(KeystoneForwarderOwnershipTransferred) + if err := _KeystoneForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_KeystoneForwarder *KeystoneForwarderFilterer) ParseOwnershipTransferred(log types.Log) (*KeystoneForwarderOwnershipTransferred, error) { + event := new(KeystoneForwarderOwnershipTransferred) + if err := _KeystoneForwarder.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_KeystoneForwarder *KeystoneForwarder) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _KeystoneForwarder.abi.Events["OwnershipTransferRequested"].ID: + return _KeystoneForwarder.ParseOwnershipTransferRequested(log) + case _KeystoneForwarder.abi.Events["OwnershipTransferred"].ID: + return _KeystoneForwarder.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (KeystoneForwarderOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeystoneForwarderOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_KeystoneForwarder *KeystoneForwarder) Address() common.Address { + return _KeystoneForwarder.address +} + +type KeystoneForwarderInterface interface { + GetTransmitter(opts *bind.CallOpts, workflowExecutionId [32]byte) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Report(opts *bind.TransactOpts, targetAddress common.Address, data []byte, signatures [][]byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeystoneForwarderOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *KeystoneForwarderOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*KeystoneForwarderOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*KeystoneForwarderOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *KeystoneForwarderOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*KeystoneForwarderOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..8dad729b --- /dev/null +++ b/core/gethwrappers/keystone/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,2 @@ +GETH_VERSION: 1.13.8 +forwarder: ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin 4886b538e1fdc8aaf860901de36269e0c35acfd3e6eb190654d693ff9dbd4b6d diff --git a/core/gethwrappers/keystone/go_generate.go b/core/gethwrappers/keystone/go_generate.go new file mode 100644 index 00000000..75800132 --- /dev/null +++ b/core/gethwrappers/keystone/go_generate.go @@ -0,0 +1,7 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Keystone + +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.abi ../../../contracts/solc/v0.8.19/KeystoneForwarder/KeystoneForwarder.bin KeystoneForwarder forwarder diff --git a/core/gethwrappers/llo-feeds/generated/channel_config_store/channel_config_store.go b/core/gethwrappers/llo-feeds/generated/channel_config_store/channel_config_store.go new file mode 100644 index 00000000..bbd784d5 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/channel_config_store/channel_config_store.go @@ -0,0 +1,1032 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package channel_config_store + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type IChannelConfigStoreChannelDefinition struct { + ReportFormat [8]byte + ChainSelector uint64 + StreamIDs []uint32 +} + +var ChannelConfigStoreMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ChannelDefinitionNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"EmptyStreamIDs\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableByEOA\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"StagingConfigAlreadyPromoted\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroChainSelector\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroReportFormat\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"}],\"name\":\"ChannelDefinitionRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"},{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"uint32[]\",\"name\":\"streamIDs\",\"type\":\"uint32[]\"}],\"indexed\":false,\"internalType\":\"structIChannelConfigStore.ChannelDefinition\",\"name\":\"channelDefinition\",\"type\":\"tuple\"}],\"name\":\"NewChannelDefinition\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"}],\"name\":\"PromoteStagingConfig\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"},{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"uint32[]\",\"name\":\"streamIDs\",\"type\":\"uint32[]\"}],\"internalType\":\"structIChannelConfigStore.ChannelDefinition\",\"name\":\"channelDefinition\",\"type\":\"tuple\"}],\"name\":\"addChannel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"}],\"name\":\"getChannelDefinitions\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes8\",\"name\":\"reportFormat\",\"type\":\"bytes8\"},{\"internalType\":\"uint64\",\"name\":\"chainSelector\",\"type\":\"uint64\"},{\"internalType\":\"uint32[]\",\"name\":\"streamIDs\",\"type\":\"uint32[]\"}],\"internalType\":\"structIChannelConfigStore.ChannelDefinition\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"channelId\",\"type\":\"uint32\"}],\"name\":\"removeChannel\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5033806000816100675760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615610097576100978161009f565b505050610148565b336001600160a01b038216036100f75760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161005e565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b610f1e806101576000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c80637e37e7191161005b5780637e37e719146101535780638da5cb5b14610166578063f2fde38b1461018e578063f5810719146101a157600080fd5b806301ffc9a71461008d578063181f5a77146100f757806322d9780c1461013657806379ba50971461014b575b600080fd5b6100e261009b366004610816565b7fffffffff00000000000000000000000000000000000000000000000000000000167fa96f980c000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b604080518082018252601881527f4368616e6e656c436f6e66696753746f726520302e302e300000000000000000602082015290516100ee919061085f565b6101496101443660046108dd565b6101c1565b005b61014961032d565b610149610161366004610934565b61042f565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100ee565b61014961019c366004610951565b61050f565b6101b46101af366004610934565b610523565b6040516100ee9190610987565b6101c9610665565b6101d66040820182610a1f565b9050600003610211576040517f4b620e2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6102216040820160208301610aa4565b67ffffffffffffffff16600003610264576040517ff89d762900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6102716020820182610aef565b7fffffffffffffffff000000000000000000000000000000000000000000000000166000036102cc576040517febd3ef0200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b63ffffffff8216600090815260026020526040902081906102ed8282610ce5565b9050507fbf2cd44714205d633d3f888ac72ea66d53cd12d4c4e8723a80d9c0bc36484a548282604051610321929190610e2e565b60405180910390a15050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146103b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610437610665565b63ffffffff81166000908152600260205260408120600101549003610488576040517fd1a751e200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b63ffffffff8116600090815260026020526040812080547fffffffffffffffffffffffffffffffff00000000000000000000000000000000168155906104d160018301826107dd565b505060405163ffffffff821681527f334e877e9691ecae0660510061973bebaa8b4fb37332ed6090052e630c9798619060200160405180910390a150565b610517610665565b610520816106e8565b50565b60408051606080820183526000808352602083015291810191909152333214610578576040517f74e2cd5100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b63ffffffff82166000908152600260209081526040918290208251606081018452815460c081901b7fffffffffffffffff00000000000000000000000000000000000000000000000016825268010000000000000000900467ffffffffffffffff16818401526001820180548551818602810186018752818152929593949386019383018282801561065557602002820191906000526020600020906000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116106185790505b5050505050815250509050919050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146106e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016103aa565b565b3373ffffffffffffffffffffffffffffffffffffffff821603610767576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016103aa565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b50805460008255600701600890049060005260206000209081019061052091905b8082111561081257600081556001016107fe565b5090565b60006020828403121561082857600080fd5b81357fffffffff000000000000000000000000000000000000000000000000000000008116811461085857600080fd5b9392505050565b600060208083528351808285015260005b8181101561088c57858101830151858201604001528201610870565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b63ffffffff8116811461052057600080fd5b600080604083850312156108f057600080fd5b82356108fb816108cb565b9150602083013567ffffffffffffffff81111561091757600080fd5b83016060818603121561092957600080fd5b809150509250929050565b60006020828403121561094657600080fd5b8135610858816108cb565b60006020828403121561096357600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461085857600080fd5b60006020808352608083017fffffffffffffffff0000000000000000000000000000000000000000000000008551168285015267ffffffffffffffff82860151166040850152604085015160608086015281815180845260a0870191508483019350600092505b80831015610a1457835163ffffffff1682529284019260019290920191908401906109ee565b509695505050505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610a5457600080fd5b83018035915067ffffffffffffffff821115610a6f57600080fd5b6020019150600581901b3603821315610a8757600080fd5b9250929050565b67ffffffffffffffff8116811461052057600080fd5b600060208284031215610ab657600080fd5b813561085881610a8e565b7fffffffffffffffff0000000000000000000000000000000000000000000000008116811461052057600080fd5b600060208284031215610b0157600080fd5b813561085881610ac1565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b68010000000000000000821115610b5457610b54610b0c565b805482825580831015610bd9576000828152602081206007850160031c81016007840160031c82019150601c8660021b168015610bc0577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8083018054828460200360031b1c16815550505b505b81811015610bd557828155600101610bc2565b5050505b505050565b60008135610beb816108cb565b92915050565b67ffffffffffffffff831115610c0957610c09610b0c565b610c138382610b3b565b60008181526020902082908460031c60005b81811015610c7e576000805b6008811015610c7157610c60610c4687610bde565b63ffffffff908116600584901b90811b91901b1984161790565b602096909601959150600101610c31565b5083820155600101610c25565b507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff88616808703818814610cdb576000805b82811015610cd557610cc4610c4688610bde565b602097909701969150600101610cb0565b50848401555b5050505050505050565b8135610cf081610ac1565b8060c01c90508154817fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000082161783556020840135610d2d81610a8e565b6fffffffffffffffff00000000000000008160401b16837fffffffffffffffffffffffffffffffff0000000000000000000000000000000084161717845550505060408201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1833603018112610da357600080fd5b8201803567ffffffffffffffff811115610dbc57600080fd5b6020820191508060051b3603821315610dd457600080fd5b610de2818360018601610bf1565b50505050565b8183526000602080850194508260005b85811015610e23578135610e0b816108cb565b63ffffffff1687529582019590820190600101610df8565b509495945050505050565b63ffffffff831681526040602082015260008235610e4b81610ac1565b7fffffffffffffffff0000000000000000000000000000000000000000000000001660408301526020830135610e8081610a8e565b67ffffffffffffffff8082166060850152604085013591507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1853603018212610ec857600080fd5b6020918501918201913581811115610edf57600080fd5b8060051b3603831315610ef157600080fd5b60606080860152610f0660a086018285610de8565b97965050505050505056fea164736f6c6343000813000a", +} + +var ChannelConfigStoreABI = ChannelConfigStoreMetaData.ABI + +var ChannelConfigStoreBin = ChannelConfigStoreMetaData.Bin + +func DeployChannelConfigStore(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ChannelConfigStore, error) { + parsed, err := ChannelConfigStoreMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ChannelConfigStoreBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ChannelConfigStore{address: address, abi: *parsed, ChannelConfigStoreCaller: ChannelConfigStoreCaller{contract: contract}, ChannelConfigStoreTransactor: ChannelConfigStoreTransactor{contract: contract}, ChannelConfigStoreFilterer: ChannelConfigStoreFilterer{contract: contract}}, nil +} + +type ChannelConfigStore struct { + address common.Address + abi abi.ABI + ChannelConfigStoreCaller + ChannelConfigStoreTransactor + ChannelConfigStoreFilterer +} + +type ChannelConfigStoreCaller struct { + contract *bind.BoundContract +} + +type ChannelConfigStoreTransactor struct { + contract *bind.BoundContract +} + +type ChannelConfigStoreFilterer struct { + contract *bind.BoundContract +} + +type ChannelConfigStoreSession struct { + Contract *ChannelConfigStore + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ChannelConfigStoreCallerSession struct { + Contract *ChannelConfigStoreCaller + CallOpts bind.CallOpts +} + +type ChannelConfigStoreTransactorSession struct { + Contract *ChannelConfigStoreTransactor + TransactOpts bind.TransactOpts +} + +type ChannelConfigStoreRaw struct { + Contract *ChannelConfigStore +} + +type ChannelConfigStoreCallerRaw struct { + Contract *ChannelConfigStoreCaller +} + +type ChannelConfigStoreTransactorRaw struct { + Contract *ChannelConfigStoreTransactor +} + +func NewChannelConfigStore(address common.Address, backend bind.ContractBackend) (*ChannelConfigStore, error) { + abi, err := abi.JSON(strings.NewReader(ChannelConfigStoreABI)) + if err != nil { + return nil, err + } + contract, err := bindChannelConfigStore(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ChannelConfigStore{address: address, abi: abi, ChannelConfigStoreCaller: ChannelConfigStoreCaller{contract: contract}, ChannelConfigStoreTransactor: ChannelConfigStoreTransactor{contract: contract}, ChannelConfigStoreFilterer: ChannelConfigStoreFilterer{contract: contract}}, nil +} + +func NewChannelConfigStoreCaller(address common.Address, caller bind.ContractCaller) (*ChannelConfigStoreCaller, error) { + contract, err := bindChannelConfigStore(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ChannelConfigStoreCaller{contract: contract}, nil +} + +func NewChannelConfigStoreTransactor(address common.Address, transactor bind.ContractTransactor) (*ChannelConfigStoreTransactor, error) { + contract, err := bindChannelConfigStore(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ChannelConfigStoreTransactor{contract: contract}, nil +} + +func NewChannelConfigStoreFilterer(address common.Address, filterer bind.ContractFilterer) (*ChannelConfigStoreFilterer, error) { + contract, err := bindChannelConfigStore(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ChannelConfigStoreFilterer{contract: contract}, nil +} + +func bindChannelConfigStore(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ChannelConfigStoreMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChannelConfigStore.Contract.ChannelConfigStoreCaller.contract.Call(opts, result, method, params...) +} + +func (_ChannelConfigStore *ChannelConfigStoreRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.ChannelConfigStoreTransactor.contract.Transfer(opts) +} + +func (_ChannelConfigStore *ChannelConfigStoreRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.ChannelConfigStoreTransactor.contract.Transact(opts, method, params...) +} + +func (_ChannelConfigStore *ChannelConfigStoreCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChannelConfigStore.Contract.contract.Call(opts, result, method, params...) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.contract.Transfer(opts) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.contract.Transact(opts, method, params...) +} + +func (_ChannelConfigStore *ChannelConfigStoreCaller) GetChannelDefinitions(opts *bind.CallOpts, channelId uint32) (IChannelConfigStoreChannelDefinition, error) { + var out []interface{} + err := _ChannelConfigStore.contract.Call(opts, &out, "getChannelDefinitions", channelId) + + if err != nil { + return *new(IChannelConfigStoreChannelDefinition), err + } + + out0 := *abi.ConvertType(out[0], new(IChannelConfigStoreChannelDefinition)).(*IChannelConfigStoreChannelDefinition) + + return out0, err + +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) GetChannelDefinitions(channelId uint32) (IChannelConfigStoreChannelDefinition, error) { + return _ChannelConfigStore.Contract.GetChannelDefinitions(&_ChannelConfigStore.CallOpts, channelId) +} + +func (_ChannelConfigStore *ChannelConfigStoreCallerSession) GetChannelDefinitions(channelId uint32) (IChannelConfigStoreChannelDefinition, error) { + return _ChannelConfigStore.Contract.GetChannelDefinitions(&_ChannelConfigStore.CallOpts, channelId) +} + +func (_ChannelConfigStore *ChannelConfigStoreCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _ChannelConfigStore.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) Owner() (common.Address, error) { + return _ChannelConfigStore.Contract.Owner(&_ChannelConfigStore.CallOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreCallerSession) Owner() (common.Address, error) { + return _ChannelConfigStore.Contract.Owner(&_ChannelConfigStore.CallOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _ChannelConfigStore.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ChannelConfigStore.Contract.SupportsInterface(&_ChannelConfigStore.CallOpts, interfaceId) +} + +func (_ChannelConfigStore *ChannelConfigStoreCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ChannelConfigStore.Contract.SupportsInterface(&_ChannelConfigStore.CallOpts, interfaceId) +} + +func (_ChannelConfigStore *ChannelConfigStoreCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _ChannelConfigStore.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) TypeAndVersion() (string, error) { + return _ChannelConfigStore.Contract.TypeAndVersion(&_ChannelConfigStore.CallOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreCallerSession) TypeAndVersion() (string, error) { + return _ChannelConfigStore.Contract.TypeAndVersion(&_ChannelConfigStore.CallOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelConfigStore.contract.Transact(opts, "acceptOwnership") +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) AcceptOwnership() (*types.Transaction, error) { + return _ChannelConfigStore.Contract.AcceptOwnership(&_ChannelConfigStore.TransactOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _ChannelConfigStore.Contract.AcceptOwnership(&_ChannelConfigStore.TransactOpts) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactor) AddChannel(opts *bind.TransactOpts, channelId uint32, channelDefinition IChannelConfigStoreChannelDefinition) (*types.Transaction, error) { + return _ChannelConfigStore.contract.Transact(opts, "addChannel", channelId, channelDefinition) +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) AddChannel(channelId uint32, channelDefinition IChannelConfigStoreChannelDefinition) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.AddChannel(&_ChannelConfigStore.TransactOpts, channelId, channelDefinition) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorSession) AddChannel(channelId uint32, channelDefinition IChannelConfigStoreChannelDefinition) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.AddChannel(&_ChannelConfigStore.TransactOpts, channelId, channelDefinition) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactor) RemoveChannel(opts *bind.TransactOpts, channelId uint32) (*types.Transaction, error) { + return _ChannelConfigStore.contract.Transact(opts, "removeChannel", channelId) +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) RemoveChannel(channelId uint32) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.RemoveChannel(&_ChannelConfigStore.TransactOpts, channelId) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorSession) RemoveChannel(channelId uint32) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.RemoveChannel(&_ChannelConfigStore.TransactOpts, channelId) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _ChannelConfigStore.contract.Transact(opts, "transferOwnership", to) +} + +func (_ChannelConfigStore *ChannelConfigStoreSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.TransferOwnership(&_ChannelConfigStore.TransactOpts, to) +} + +func (_ChannelConfigStore *ChannelConfigStoreTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _ChannelConfigStore.Contract.TransferOwnership(&_ChannelConfigStore.TransactOpts, to) +} + +type ChannelConfigStoreChannelDefinitionRemovedIterator struct { + Event *ChannelConfigStoreChannelDefinitionRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelConfigStoreChannelDefinitionRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreChannelDefinitionRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreChannelDefinitionRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelConfigStoreChannelDefinitionRemovedIterator) Error() error { + return it.fail +} + +func (it *ChannelConfigStoreChannelDefinitionRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelConfigStoreChannelDefinitionRemoved struct { + ChannelId uint32 + Raw types.Log +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) FilterChannelDefinitionRemoved(opts *bind.FilterOpts) (*ChannelConfigStoreChannelDefinitionRemovedIterator, error) { + + logs, sub, err := _ChannelConfigStore.contract.FilterLogs(opts, "ChannelDefinitionRemoved") + if err != nil { + return nil, err + } + return &ChannelConfigStoreChannelDefinitionRemovedIterator{contract: _ChannelConfigStore.contract, event: "ChannelDefinitionRemoved", logs: logs, sub: sub}, nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) WatchChannelDefinitionRemoved(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreChannelDefinitionRemoved) (event.Subscription, error) { + + logs, sub, err := _ChannelConfigStore.contract.WatchLogs(opts, "ChannelDefinitionRemoved") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelConfigStoreChannelDefinitionRemoved) + if err := _ChannelConfigStore.contract.UnpackLog(event, "ChannelDefinitionRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) ParseChannelDefinitionRemoved(log types.Log) (*ChannelConfigStoreChannelDefinitionRemoved, error) { + event := new(ChannelConfigStoreChannelDefinitionRemoved) + if err := _ChannelConfigStore.contract.UnpackLog(event, "ChannelDefinitionRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelConfigStoreNewChannelDefinitionIterator struct { + Event *ChannelConfigStoreNewChannelDefinition + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelConfigStoreNewChannelDefinitionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreNewChannelDefinition) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreNewChannelDefinition) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelConfigStoreNewChannelDefinitionIterator) Error() error { + return it.fail +} + +func (it *ChannelConfigStoreNewChannelDefinitionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelConfigStoreNewChannelDefinition struct { + ChannelId uint32 + ChannelDefinition IChannelConfigStoreChannelDefinition + Raw types.Log +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) FilterNewChannelDefinition(opts *bind.FilterOpts) (*ChannelConfigStoreNewChannelDefinitionIterator, error) { + + logs, sub, err := _ChannelConfigStore.contract.FilterLogs(opts, "NewChannelDefinition") + if err != nil { + return nil, err + } + return &ChannelConfigStoreNewChannelDefinitionIterator{contract: _ChannelConfigStore.contract, event: "NewChannelDefinition", logs: logs, sub: sub}, nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) WatchNewChannelDefinition(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreNewChannelDefinition) (event.Subscription, error) { + + logs, sub, err := _ChannelConfigStore.contract.WatchLogs(opts, "NewChannelDefinition") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelConfigStoreNewChannelDefinition) + if err := _ChannelConfigStore.contract.UnpackLog(event, "NewChannelDefinition", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) ParseNewChannelDefinition(log types.Log) (*ChannelConfigStoreNewChannelDefinition, error) { + event := new(ChannelConfigStoreNewChannelDefinition) + if err := _ChannelConfigStore.contract.UnpackLog(event, "NewChannelDefinition", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelConfigStoreOwnershipTransferRequestedIterator struct { + Event *ChannelConfigStoreOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelConfigStoreOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelConfigStoreOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *ChannelConfigStoreOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelConfigStoreOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelConfigStoreOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelConfigStore.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &ChannelConfigStoreOwnershipTransferRequestedIterator{contract: _ChannelConfigStore.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelConfigStore.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelConfigStoreOwnershipTransferRequested) + if err := _ChannelConfigStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) ParseOwnershipTransferRequested(log types.Log) (*ChannelConfigStoreOwnershipTransferRequested, error) { + event := new(ChannelConfigStoreOwnershipTransferRequested) + if err := _ChannelConfigStore.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelConfigStoreOwnershipTransferredIterator struct { + Event *ChannelConfigStoreOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelConfigStoreOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStoreOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelConfigStoreOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *ChannelConfigStoreOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelConfigStoreOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelConfigStoreOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelConfigStore.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &ChannelConfigStoreOwnershipTransferredIterator{contract: _ChannelConfigStore.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelConfigStore.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelConfigStoreOwnershipTransferred) + if err := _ChannelConfigStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) ParseOwnershipTransferred(log types.Log) (*ChannelConfigStoreOwnershipTransferred, error) { + event := new(ChannelConfigStoreOwnershipTransferred) + if err := _ChannelConfigStore.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelConfigStorePromoteStagingConfigIterator struct { + Event *ChannelConfigStorePromoteStagingConfig + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelConfigStorePromoteStagingConfigIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStorePromoteStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelConfigStorePromoteStagingConfig) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelConfigStorePromoteStagingConfigIterator) Error() error { + return it.fail +} + +func (it *ChannelConfigStorePromoteStagingConfigIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelConfigStorePromoteStagingConfig struct { + ChannelId uint32 + Raw types.Log +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) FilterPromoteStagingConfig(opts *bind.FilterOpts) (*ChannelConfigStorePromoteStagingConfigIterator, error) { + + logs, sub, err := _ChannelConfigStore.contract.FilterLogs(opts, "PromoteStagingConfig") + if err != nil { + return nil, err + } + return &ChannelConfigStorePromoteStagingConfigIterator{contract: _ChannelConfigStore.contract, event: "PromoteStagingConfig", logs: logs, sub: sub}, nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) WatchPromoteStagingConfig(opts *bind.WatchOpts, sink chan<- *ChannelConfigStorePromoteStagingConfig) (event.Subscription, error) { + + logs, sub, err := _ChannelConfigStore.contract.WatchLogs(opts, "PromoteStagingConfig") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelConfigStorePromoteStagingConfig) + if err := _ChannelConfigStore.contract.UnpackLog(event, "PromoteStagingConfig", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelConfigStore *ChannelConfigStoreFilterer) ParsePromoteStagingConfig(log types.Log) (*ChannelConfigStorePromoteStagingConfig, error) { + event := new(ChannelConfigStorePromoteStagingConfig) + if err := _ChannelConfigStore.contract.UnpackLog(event, "PromoteStagingConfig", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_ChannelConfigStore *ChannelConfigStore) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _ChannelConfigStore.abi.Events["ChannelDefinitionRemoved"].ID: + return _ChannelConfigStore.ParseChannelDefinitionRemoved(log) + case _ChannelConfigStore.abi.Events["NewChannelDefinition"].ID: + return _ChannelConfigStore.ParseNewChannelDefinition(log) + case _ChannelConfigStore.abi.Events["OwnershipTransferRequested"].ID: + return _ChannelConfigStore.ParseOwnershipTransferRequested(log) + case _ChannelConfigStore.abi.Events["OwnershipTransferred"].ID: + return _ChannelConfigStore.ParseOwnershipTransferred(log) + case _ChannelConfigStore.abi.Events["PromoteStagingConfig"].ID: + return _ChannelConfigStore.ParsePromoteStagingConfig(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (ChannelConfigStoreChannelDefinitionRemoved) Topic() common.Hash { + return common.HexToHash("0x334e877e9691ecae0660510061973bebaa8b4fb37332ed6090052e630c979861") +} + +func (ChannelConfigStoreNewChannelDefinition) Topic() common.Hash { + return common.HexToHash("0xbf2cd44714205d633d3f888ac72ea66d53cd12d4c4e8723a80d9c0bc36484a54") +} + +func (ChannelConfigStoreOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (ChannelConfigStoreOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (ChannelConfigStorePromoteStagingConfig) Topic() common.Hash { + return common.HexToHash("0xbdd8ee023f9979bf23e8af6fd7241f484024e83fb0fabd11bb7fd5e9bed7308a") +} + +func (_ChannelConfigStore *ChannelConfigStore) Address() common.Address { + return _ChannelConfigStore.address +} + +type ChannelConfigStoreInterface interface { + GetChannelDefinitions(opts *bind.CallOpts, channelId uint32) (IChannelConfigStoreChannelDefinition, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddChannel(opts *bind.TransactOpts, channelId uint32, channelDefinition IChannelConfigStoreChannelDefinition) (*types.Transaction, error) + + RemoveChannel(opts *bind.TransactOpts, channelId uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterChannelDefinitionRemoved(opts *bind.FilterOpts) (*ChannelConfigStoreChannelDefinitionRemovedIterator, error) + + WatchChannelDefinitionRemoved(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreChannelDefinitionRemoved) (event.Subscription, error) + + ParseChannelDefinitionRemoved(log types.Log) (*ChannelConfigStoreChannelDefinitionRemoved, error) + + FilterNewChannelDefinition(opts *bind.FilterOpts) (*ChannelConfigStoreNewChannelDefinitionIterator, error) + + WatchNewChannelDefinition(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreNewChannelDefinition) (event.Subscription, error) + + ParseNewChannelDefinition(log types.Log) (*ChannelConfigStoreNewChannelDefinition, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelConfigStoreOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*ChannelConfigStoreOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelConfigStoreOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *ChannelConfigStoreOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*ChannelConfigStoreOwnershipTransferred, error) + + FilterPromoteStagingConfig(opts *bind.FilterOpts) (*ChannelConfigStorePromoteStagingConfigIterator, error) + + WatchPromoteStagingConfig(opts *bind.WatchOpts, sink chan<- *ChannelConfigStorePromoteStagingConfig) (event.Subscription, error) + + ParsePromoteStagingConfig(log types.Log) (*ChannelConfigStorePromoteStagingConfig, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/channel_verifier/channel_verifier.go b/core/gethwrappers/llo-feeds/generated/channel_verifier/channel_verifier.go new file mode 100644 index 00000000..886e17b5 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/channel_verifier/channel_verifier.go @@ -0,0 +1,1583 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package channel_verifier + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +var ChannelVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifierProxyAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BadVerification\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"CannotDeactivateLatestConfig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DigestEmpty\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"DigestInactive\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"DigestNotSet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxSigners\",\"type\":\"uint256\"}],\"name\":\"ExcessSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FaultToleranceMustBePositive\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FeedIdEmpty\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"InactiveFeed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"expectedNumSigners\",\"type\":\"uint256\"}],\"name\":\"IncorrectSignatureCount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minSigners\",\"type\":\"uint256\"}],\"name\":\"InsufficientSigners\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"InvalidFeed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rsLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"ssLength\",\"type\":\"uint256\"}],\"name\":\"MismatchedSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NonUniqueSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"ConfigActivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"ConfigDeactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"FeedActivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"FeedDeactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"}],\"name\":\"ReportVerified\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"activateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"activateFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"deactivateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"deactivateFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"recipientAddressesAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"sourceChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"sourceAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"newConfigCount\",\"type\":\"uint32\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"recipientAddressesAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setConfigFromSource\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"isVerifier\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"signedReport\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"verifierResponse\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620020ed380380620020ed8339810160408190526200003491620001a6565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000fb565b5050506001600160a01b038116620000e95760405163d92e233d60e01b815260040160405180910390fd5b6001600160a01b0316608052620001d8565b336001600160a01b03821603620001555760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208284031215620001b957600080fd5b81516001600160a01b0381168114620001d157600080fd5b9392505050565b608051611ef9620001f4600039600061051a0152611ef96000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063564a0a7a1161008c5780638da5cb5b116100665780638da5cb5b14610247578063afcb95d71461026f578063eb1dc803146102a4578063f2fde38b146102b757600080fd5b8063564a0a7a146101fc57806379ba50971461020f57806381ff70481461021757600080fd5b8063181f5a77116100c8578063181f5a77146101815780633d3ac1b5146101c35780633dd86430146101d657806354e68a81146101e957600080fd5b806301ffc9a7146100ef5780630d1d79af146101595780630f672ef41461016e575b600080fd5b6101446100fd3660046113d0565b7fffffffff00000000000000000000000000000000000000000000000000000000167f3d3ac1b5000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b61016c610167366004611419565b6102ca565b005b61016c61017c366004611419565b6103d0565b60408051808201909152601581527f4368616e6e656c566572696669657220302e302e30000000000000000000000060208201525b6040516101509190611496565b6101b66101d13660046114d2565b610500565b61016c6101e4366004611419565b610680565b61016c6101f7366004611866565b6106fd565b61016c61020a366004611419565b61080d565b61016c61088d565b6002546003546040805163ffffffff80851682526401000000009094049093166020840152820152606001610150565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610150565b600354600254604080516000815260208101939093526801000000000000000090910463ffffffff1690820152606001610150565b61016c6102b2366004611986565b61098a565b61016c6102c5366004611a79565b610a52565b6102d2610a63565b80610309576040517fe332262700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526005602052604081205460ff16900361035b576040517f74eb4b93000000000000000000000000000000000000000000000000000000008152600481018290526024015b60405180910390fd5b6000818152600560205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100179055517fa543797a0501218bba8a3daf75a71c8df8d1a7f791f4e44d40e43b6450183cea906103c59083815260200190565b60405180910390a150565b6103d8610a63565b8061040f576040517fe332262700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008181526005602052604081205460ff16900361045c576040517f74eb4b9300000000000000000000000000000000000000000000000000000000815260048101829052602401610352565b600354810361049a576040517f67863f4400000000000000000000000000000000000000000000000000000000815260048101829052602401610352565b6000818152600560205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055517f5bfaab86edc1b932e3c334327a591c9ded067cb521abae19b95ca927d6076579906103c59083815260200190565b60603373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610571576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080808080610583888a018a611a94565b9450945094509450945060008461059990611b6f565b60008181526004602052604090205490915060ff16156105e8576040517f36dbe74800000000000000000000000000000000000000000000000000000000815260048101829052602401610352565b8551600081815260056020526040902061060482878784610ae6565b61060f886002610bda565b86516020880120610624818a89898987610c42565b60405173ffffffffffffffffffffffffffffffffffffffff8c16815284907f58ca9502e98a536e06e72d680fcc251e5d10b72291a281665a2c2dc0ac30fcc59060200160405180910390a250959b9a5050505050505050505050565b610688610a63565b60008181526004602052604090205460ff166106fa5760008181526004602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001690555182917ff438564f793525caa89c6e3a26d41e16aa39d1e589747595751e3f3df75cb2b491a25b50565b86518560ff168060000361073d576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f821115610782576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101839052601f6024820152604401610352565b61078d816003611c12565b82116107e5578161079f826003611c12565b6107aa906001611c2f565b6040517f9dd9e6d800000000000000000000000000000000000000000000000000000000815260048101929092526024820152604401610352565b6107ed610a63565b6107ff8c8c8c8c8c8c8c8c8c8c610ebe565b505050505050505050505050565b610815610a63565b60008181526004602052604090205460ff16156106fa5760008181526004602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555182917ffc4f79b8c65b6be1773063461984c0974400d1e99654c79477a092ace83fd06191a250565b60015473ffffffffffffffffffffffffffffffffffffffff16331461090e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610352565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b86518560ff16806000036109ca576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f821115610a0f576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101839052601f6024820152604401610352565b610a1a816003611c12565b8211610a2c578161079f826003611c12565b610a34610a63565b610a47463060008c8c8c8c8c8c8c610ebe565b505050505050505050565b610a5a610a63565b6106fa81611230565b60005473ffffffffffffffffffffffffffffffffffffffff163314610ae4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610352565b565b8054600090610af99060ff166001611c42565b8254909150610100900460ff16610b3f576040517fd990d62100000000000000000000000000000000000000000000000000000000815260048101869052602401610352565b8060ff16845114610b8b5783516040517f5348a282000000000000000000000000000000000000000000000000000000008152600481019190915260ff82166024820152604401610352565b8251845114610bd357835183516040517ff0d3140800000000000000000000000000000000000000000000000000000000815260048101929092526024820152604401610352565b5050505050565b6020820151815463ffffffff600883901c81169168010000000000000000900416811115610c3c5782547fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff166801000000000000000063ffffffff8316021783555b50505050565b60008686604051602001610c57929190611c5b565b6040516020818303038152906040528051906020012090506000610c8b604080518082019091526000808252602082015290565b8651600090815b81811015610e5657600186898360208110610caf57610caf611bb4565b610cbc91901a601b611c42565b8c8481518110610cce57610cce611bb4565b60200260200101518c8581518110610ce857610ce8611bb4565b602002602001015160405160008152602001604052604051610d26949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015610d48573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526001808d01602090815291859020848601909552845460ff808216865293995093955090850192610100900490911690811115610dcd57610dcd611c97565b6001811115610dde57610dde611c97565b9052509350600184602001516001811115610dfb57610dfb611c97565b14610e32576040517f4df18f0700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b836000015160080260ff166001901b8501945080610e4f90611cc6565b9050610c92565b50837e01010101010101010101010101010101010101010101010101010101010101851614610eb1576040517f4df18f0700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050505050505050505050565b63ffffffff881615610eff57600280547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff8a16179055610f35565b6002805463ffffffff16906000610f1583611cfe565b91906101000a81548163ffffffff021916908363ffffffff160217905550505b600254600090610f54908c908c9063ffffffff168b8b8b8b8b8b611325565b600081815260056020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660ff8a16176101001790559091505b88518160ff16101561118f576000898260ff1681518110610fb857610fb8611bb4565b60200260200101519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603611028576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600080600085815260056020908152604080832073ffffffffffffffffffffffffffffffffffffffff87168452600190810190925290912054610100900460ff169081111561107957611079611c97565b14801591506110b4576040517ff67bc7c400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805180820190915260ff841681526020810160019052600085815260056020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684526001908101835292208351815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00821681178355928501519193919284927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000909216179061010090849081111561117457611174611c97565b021790555090505050508061118890611d21565b9050610f95565b506002546040517f1074b4b9a073f79bd1f7f5c808348125ce0f25c27188df7efcaa7a08276051b3916111e29163ffffffff640100000000830481169286929116908d908d908d908d908d908d90611dc1565b60405180910390a1600280547fffffffffffffffffffffffffffffffffffffffff0000000000000000ffffffff1664010000000063ffffffff43160217905560035550505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036112af576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610352565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000808a8a8a8a8a8a8a8a8a60405160200161134999989796959493929190611e57565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e09000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b6000602082840312156113e257600080fd5b81357fffffffff000000000000000000000000000000000000000000000000000000008116811461141257600080fd5b9392505050565b60006020828403121561142b57600080fd5b5035919050565b6000815180845260005b818110156114585760208185018101518683018201520161143c565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b6020815260006114126020830184611432565b803573ffffffffffffffffffffffffffffffffffffffff811681146114cd57600080fd5b919050565b6000806000604084860312156114e757600080fd5b833567ffffffffffffffff808211156114ff57600080fd5b818601915086601f83011261151357600080fd5b81358181111561152257600080fd5b87602082850101111561153457600080fd5b60209283019550935061154a91860190506114a9565b90509250925092565b803563ffffffff811681146114cd57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff811182821017156115b9576115b9611567565b60405290565b6040516060810167ffffffffffffffff811182821017156115b9576115b9611567565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561162957611629611567565b604052919050565b600067ffffffffffffffff82111561164b5761164b611567565b5060051b60200190565b600082601f83011261166657600080fd5b8135602061167b61167683611631565b6115e2565b82815260059290921b8401810191818101908684111561169a57600080fd5b8286015b848110156116bc576116af816114a9565b835291830191830161169e565b509695505050505050565b600082601f8301126116d857600080fd5b813560206116e861167683611631565b82815260059290921b8401810191818101908684111561170757600080fd5b8286015b848110156116bc578035835291830191830161170b565b803560ff811681146114cd57600080fd5b600082601f83011261174457600080fd5b813567ffffffffffffffff81111561175e5761175e611567565b61178f60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016115e2565b8181528460208386010111156117a457600080fd5b816020850160208301376000918101602001919091529392505050565b803567ffffffffffffffff811681146114cd57600080fd5b600082601f8301126117ea57600080fd5b813560206117fa61167683611631565b82815260069290921b8401810191818101908684111561181957600080fd5b8286015b848110156116bc57604081890312156118365760008081fd5b61183e611596565b611847826114a9565b81526118548583016117c1565b8186015283529183019160400161181d565b6000806000806000806000806000806101408b8d03121561188657600080fd5b8a35995061189660208c016114a9565b98506118a460408c01611553565b975060608b013567ffffffffffffffff808211156118c157600080fd5b6118cd8e838f01611655565b985060808d01359150808211156118e357600080fd5b6118ef8e838f016116c7565b97506118fd60a08e01611722565b965060c08d013591508082111561191357600080fd5b61191f8e838f01611733565b955061192d60e08e016117c1565b94506101008d013591508082111561194457600080fd5b6119508e838f01611733565b93506101208d013591508082111561196757600080fd5b506119748d828e016117d9565b9150509295989b9194979a5092959850565b600080600080600080600060e0888a0312156119a157600080fd5b873567ffffffffffffffff808211156119b957600080fd5b6119c58b838c01611655565b985060208a01359150808211156119db57600080fd5b6119e78b838c016116c7565b97506119f560408b01611722565b965060608a0135915080821115611a0b57600080fd5b611a178b838c01611733565b9550611a2560808b016117c1565b945060a08a0135915080821115611a3b57600080fd5b611a478b838c01611733565b935060c08a0135915080821115611a5d57600080fd5b50611a6a8a828b016117d9565b91505092959891949750929550565b600060208284031215611a8b57600080fd5b611412826114a9565b600080600080600060e08688031215611aac57600080fd5b86601f870112611abb57600080fd5b611ac36115bf565b806060880189811115611ad557600080fd5b885b81811015611aef578035845260209384019301611ad7565b5090965035905067ffffffffffffffff80821115611b0c57600080fd5b611b1889838a01611733565b95506080880135915080821115611b2e57600080fd5b611b3a89838a016116c7565b945060a0880135915080821115611b5057600080fd5b50611b5d888289016116c7565b9598949750929560c001359392505050565b80516020808301519190811015611bae577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082028115828204841417611c2957611c29611be3565b92915050565b80820180821115611c2957611c29611be3565b60ff8181168382160190811115611c2957611c29611be3565b828152600060208083018460005b6003811015611c8657815183529183019190830190600101611c69565b505050506080820190509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611cf757611cf7611be3565b5060010190565b600063ffffffff808316818103611d1757611d17611be3565b6001019392505050565b600060ff821660ff8103611d3757611d37611be3565b60010192915050565b600081518084526020808501945080840160005b83811015611d8657815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101611d54565b509495945050505050565b600081518084526020808501945080840160005b83811015611d8657815187529582019590820190600101611da5565b600061012063ffffffff808d1684528b6020850152808b16604085015250806060840152611df18184018a611d40565b90508281036080840152611e058189611d91565b905060ff871660a084015282810360c0840152611e228187611432565b905067ffffffffffffffff851660e0840152828103610100840152611e478185611432565b9c9b505050505050505050505050565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152611e9e8285018b611d40565b91508382036080850152611eb2828a611d91565b915060ff881660a085015283820360c0850152611ecf8288611432565b90861660e08501528381036101008501529050611e47818561143256fea164736f6c6343000813000a", +} + +var ChannelVerifierABI = ChannelVerifierMetaData.ABI + +var ChannelVerifierBin = ChannelVerifierMetaData.Bin + +func DeployChannelVerifier(auth *bind.TransactOpts, backend bind.ContractBackend, verifierProxyAddr common.Address) (common.Address, *types.Transaction, *ChannelVerifier, error) { + parsed, err := ChannelVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ChannelVerifierBin), backend, verifierProxyAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ChannelVerifier{address: address, abi: *parsed, ChannelVerifierCaller: ChannelVerifierCaller{contract: contract}, ChannelVerifierTransactor: ChannelVerifierTransactor{contract: contract}, ChannelVerifierFilterer: ChannelVerifierFilterer{contract: contract}}, nil +} + +type ChannelVerifier struct { + address common.Address + abi abi.ABI + ChannelVerifierCaller + ChannelVerifierTransactor + ChannelVerifierFilterer +} + +type ChannelVerifierCaller struct { + contract *bind.BoundContract +} + +type ChannelVerifierTransactor struct { + contract *bind.BoundContract +} + +type ChannelVerifierFilterer struct { + contract *bind.BoundContract +} + +type ChannelVerifierSession struct { + Contract *ChannelVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ChannelVerifierCallerSession struct { + Contract *ChannelVerifierCaller + CallOpts bind.CallOpts +} + +type ChannelVerifierTransactorSession struct { + Contract *ChannelVerifierTransactor + TransactOpts bind.TransactOpts +} + +type ChannelVerifierRaw struct { + Contract *ChannelVerifier +} + +type ChannelVerifierCallerRaw struct { + Contract *ChannelVerifierCaller +} + +type ChannelVerifierTransactorRaw struct { + Contract *ChannelVerifierTransactor +} + +func NewChannelVerifier(address common.Address, backend bind.ContractBackend) (*ChannelVerifier, error) { + abi, err := abi.JSON(strings.NewReader(ChannelVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindChannelVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ChannelVerifier{address: address, abi: abi, ChannelVerifierCaller: ChannelVerifierCaller{contract: contract}, ChannelVerifierTransactor: ChannelVerifierTransactor{contract: contract}, ChannelVerifierFilterer: ChannelVerifierFilterer{contract: contract}}, nil +} + +func NewChannelVerifierCaller(address common.Address, caller bind.ContractCaller) (*ChannelVerifierCaller, error) { + contract, err := bindChannelVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ChannelVerifierCaller{contract: contract}, nil +} + +func NewChannelVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*ChannelVerifierTransactor, error) { + contract, err := bindChannelVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ChannelVerifierTransactor{contract: contract}, nil +} + +func NewChannelVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*ChannelVerifierFilterer, error) { + contract, err := bindChannelVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ChannelVerifierFilterer{contract: contract}, nil +} + +func bindChannelVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ChannelVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ChannelVerifier *ChannelVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChannelVerifier.Contract.ChannelVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_ChannelVerifier *ChannelVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ChannelVerifierTransactor.contract.Transfer(opts) +} + +func (_ChannelVerifier *ChannelVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ChannelVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_ChannelVerifier *ChannelVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ChannelVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_ChannelVerifier *ChannelVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelVerifier.Contract.contract.Transfer(opts) +} + +func (_ChannelVerifier *ChannelVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ChannelVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_ChannelVerifier *ChannelVerifierCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _ChannelVerifier.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_ChannelVerifier *ChannelVerifierSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _ChannelVerifier.Contract.LatestConfigDetails(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _ChannelVerifier.Contract.LatestConfigDetails(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _ChannelVerifier.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_ChannelVerifier *ChannelVerifierSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _ChannelVerifier.Contract.LatestConfigDigestAndEpoch(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _ChannelVerifier.Contract.LatestConfigDigestAndEpoch(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _ChannelVerifier.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_ChannelVerifier *ChannelVerifierSession) Owner() (common.Address, error) { + return _ChannelVerifier.Contract.Owner(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCallerSession) Owner() (common.Address, error) { + return _ChannelVerifier.Contract.Owner(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _ChannelVerifier.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_ChannelVerifier *ChannelVerifierSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ChannelVerifier.Contract.SupportsInterface(&_ChannelVerifier.CallOpts, interfaceId) +} + +func (_ChannelVerifier *ChannelVerifierCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ChannelVerifier.Contract.SupportsInterface(&_ChannelVerifier.CallOpts, interfaceId) +} + +func (_ChannelVerifier *ChannelVerifierCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _ChannelVerifier.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_ChannelVerifier *ChannelVerifierSession) TypeAndVersion() (string, error) { + return _ChannelVerifier.Contract.TypeAndVersion(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierCallerSession) TypeAndVersion() (string, error) { + return _ChannelVerifier.Contract.TypeAndVersion(&_ChannelVerifier.CallOpts) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "acceptOwnership") +} + +func (_ChannelVerifier *ChannelVerifierSession) AcceptOwnership() (*types.Transaction, error) { + return _ChannelVerifier.Contract.AcceptOwnership(&_ChannelVerifier.TransactOpts) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _ChannelVerifier.Contract.AcceptOwnership(&_ChannelVerifier.TransactOpts) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) ActivateConfig(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "activateConfig", configDigest) +} + +func (_ChannelVerifier *ChannelVerifierSession) ActivateConfig(configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ActivateConfig(&_ChannelVerifier.TransactOpts, configDigest) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) ActivateConfig(configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ActivateConfig(&_ChannelVerifier.TransactOpts, configDigest) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) ActivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "activateFeed", feedId) +} + +func (_ChannelVerifier *ChannelVerifierSession) ActivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ActivateFeed(&_ChannelVerifier.TransactOpts, feedId) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) ActivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.ActivateFeed(&_ChannelVerifier.TransactOpts, feedId) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) DeactivateConfig(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "deactivateConfig", configDigest) +} + +func (_ChannelVerifier *ChannelVerifierSession) DeactivateConfig(configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.DeactivateConfig(&_ChannelVerifier.TransactOpts, configDigest) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) DeactivateConfig(configDigest [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.DeactivateConfig(&_ChannelVerifier.TransactOpts, configDigest) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) DeactivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "deactivateFeed", feedId) +} + +func (_ChannelVerifier *ChannelVerifierSession) DeactivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.DeactivateFeed(&_ChannelVerifier.TransactOpts, feedId) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) DeactivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _ChannelVerifier.Contract.DeactivateFeed(&_ChannelVerifier.TransactOpts, feedId) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "setConfig", signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierSession) SetConfig(signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.Contract.SetConfig(&_ChannelVerifier.TransactOpts, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) SetConfig(signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.Contract.SetConfig(&_ChannelVerifier.TransactOpts, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) SetConfigFromSource(opts *bind.TransactOpts, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "setConfigFromSource", sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierSession) SetConfigFromSource(sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.Contract.SetConfigFromSource(&_ChannelVerifier.TransactOpts, sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) SetConfigFromSource(sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _ChannelVerifier.Contract.SetConfigFromSource(&_ChannelVerifier.TransactOpts, sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "transferOwnership", to) +} + +func (_ChannelVerifier *ChannelVerifierSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _ChannelVerifier.Contract.TransferOwnership(&_ChannelVerifier.TransactOpts, to) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _ChannelVerifier.Contract.TransferOwnership(&_ChannelVerifier.TransactOpts, to) +} + +func (_ChannelVerifier *ChannelVerifierTransactor) Verify(opts *bind.TransactOpts, signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _ChannelVerifier.contract.Transact(opts, "verify", signedReport, sender) +} + +func (_ChannelVerifier *ChannelVerifierSession) Verify(signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _ChannelVerifier.Contract.Verify(&_ChannelVerifier.TransactOpts, signedReport, sender) +} + +func (_ChannelVerifier *ChannelVerifierTransactorSession) Verify(signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _ChannelVerifier.Contract.Verify(&_ChannelVerifier.TransactOpts, signedReport, sender) +} + +type ChannelVerifierConfigActivatedIterator struct { + Event *ChannelVerifierConfigActivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierConfigActivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierConfigActivatedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierConfigActivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierConfigActivated struct { + ConfigDigest [32]byte + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterConfigActivated(opts *bind.FilterOpts) (*ChannelVerifierConfigActivatedIterator, error) { + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "ConfigActivated") + if err != nil { + return nil, err + } + return &ChannelVerifierConfigActivatedIterator{contract: _ChannelVerifier.contract, event: "ConfigActivated", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchConfigActivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigActivated) (event.Subscription, error) { + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "ConfigActivated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierConfigActivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigActivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseConfigActivated(log types.Log) (*ChannelVerifierConfigActivated, error) { + event := new(ChannelVerifierConfigActivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigActivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierConfigDeactivatedIterator struct { + Event *ChannelVerifierConfigDeactivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierConfigDeactivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierConfigDeactivatedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierConfigDeactivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierConfigDeactivated struct { + ConfigDigest [32]byte + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterConfigDeactivated(opts *bind.FilterOpts) (*ChannelVerifierConfigDeactivatedIterator, error) { + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "ConfigDeactivated") + if err != nil { + return nil, err + } + return &ChannelVerifierConfigDeactivatedIterator{contract: _ChannelVerifier.contract, event: "ConfigDeactivated", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchConfigDeactivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigDeactivated) (event.Subscription, error) { + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "ConfigDeactivated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierConfigDeactivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigDeactivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseConfigDeactivated(log types.Log) (*ChannelVerifierConfigDeactivated, error) { + event := new(ChannelVerifierConfigDeactivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigDeactivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierConfigSetIterator struct { + Event *ChannelVerifierConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierConfigSetIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + OffchainTransmitters [][32]byte + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterConfigSet(opts *bind.FilterOpts) (*ChannelVerifierConfigSetIterator, error) { + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &ChannelVerifierConfigSetIterator{contract: _ChannelVerifier.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigSet) (event.Subscription, error) { + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierConfigSet) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseConfigSet(log types.Log) (*ChannelVerifierConfigSet, error) { + event := new(ChannelVerifierConfigSet) + if err := _ChannelVerifier.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierFeedActivatedIterator struct { + Event *ChannelVerifierFeedActivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierFeedActivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierFeedActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierFeedActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierFeedActivatedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierFeedActivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierFeedActivated struct { + FeedId [32]byte + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterFeedActivated(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierFeedActivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "FeedActivated", feedIdRule) + if err != nil { + return nil, err + } + return &ChannelVerifierFeedActivatedIterator{contract: _ChannelVerifier.contract, event: "FeedActivated", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchFeedActivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierFeedActivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "FeedActivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierFeedActivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "FeedActivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseFeedActivated(log types.Log) (*ChannelVerifierFeedActivated, error) { + event := new(ChannelVerifierFeedActivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "FeedActivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierFeedDeactivatedIterator struct { + Event *ChannelVerifierFeedDeactivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierFeedDeactivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierFeedDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierFeedDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierFeedDeactivatedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierFeedDeactivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierFeedDeactivated struct { + FeedId [32]byte + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterFeedDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierFeedDeactivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "FeedDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return &ChannelVerifierFeedDeactivatedIterator{contract: _ChannelVerifier.contract, event: "FeedDeactivated", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchFeedDeactivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierFeedDeactivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "FeedDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierFeedDeactivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "FeedDeactivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseFeedDeactivated(log types.Log) (*ChannelVerifierFeedDeactivated, error) { + event := new(ChannelVerifierFeedDeactivated) + if err := _ChannelVerifier.contract.UnpackLog(event, "FeedDeactivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierOwnershipTransferRequestedIterator struct { + Event *ChannelVerifierOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelVerifierOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &ChannelVerifierOwnershipTransferRequestedIterator{contract: _ChannelVerifier.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *ChannelVerifierOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierOwnershipTransferRequested) + if err := _ChannelVerifier.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseOwnershipTransferRequested(log types.Log) (*ChannelVerifierOwnershipTransferRequested, error) { + event := new(ChannelVerifierOwnershipTransferRequested) + if err := _ChannelVerifier.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierOwnershipTransferredIterator struct { + Event *ChannelVerifierOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelVerifierOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &ChannelVerifierOwnershipTransferredIterator{contract: _ChannelVerifier.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *ChannelVerifierOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierOwnershipTransferred) + if err := _ChannelVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseOwnershipTransferred(log types.Log) (*ChannelVerifierOwnershipTransferred, error) { + event := new(ChannelVerifierOwnershipTransferred) + if err := _ChannelVerifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ChannelVerifierReportVerifiedIterator struct { + Event *ChannelVerifierReportVerified + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChannelVerifierReportVerifiedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierReportVerified) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChannelVerifierReportVerified) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChannelVerifierReportVerifiedIterator) Error() error { + return it.fail +} + +func (it *ChannelVerifierReportVerifiedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChannelVerifierReportVerified struct { + FeedId [32]byte + Requester common.Address + Raw types.Log +} + +func (_ChannelVerifier *ChannelVerifierFilterer) FilterReportVerified(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierReportVerifiedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.FilterLogs(opts, "ReportVerified", feedIdRule) + if err != nil { + return nil, err + } + return &ChannelVerifierReportVerifiedIterator{contract: _ChannelVerifier.contract, event: "ReportVerified", logs: logs, sub: sub}, nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) WatchReportVerified(opts *bind.WatchOpts, sink chan<- *ChannelVerifierReportVerified, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _ChannelVerifier.contract.WatchLogs(opts, "ReportVerified", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChannelVerifierReportVerified) + if err := _ChannelVerifier.contract.UnpackLog(event, "ReportVerified", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChannelVerifier *ChannelVerifierFilterer) ParseReportVerified(log types.Log) (*ChannelVerifierReportVerified, error) { + event := new(ChannelVerifierReportVerified) + if err := _ChannelVerifier.contract.UnpackLog(event, "ReportVerified", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_ChannelVerifier *ChannelVerifier) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _ChannelVerifier.abi.Events["ConfigActivated"].ID: + return _ChannelVerifier.ParseConfigActivated(log) + case _ChannelVerifier.abi.Events["ConfigDeactivated"].ID: + return _ChannelVerifier.ParseConfigDeactivated(log) + case _ChannelVerifier.abi.Events["ConfigSet"].ID: + return _ChannelVerifier.ParseConfigSet(log) + case _ChannelVerifier.abi.Events["FeedActivated"].ID: + return _ChannelVerifier.ParseFeedActivated(log) + case _ChannelVerifier.abi.Events["FeedDeactivated"].ID: + return _ChannelVerifier.ParseFeedDeactivated(log) + case _ChannelVerifier.abi.Events["OwnershipTransferRequested"].ID: + return _ChannelVerifier.ParseOwnershipTransferRequested(log) + case _ChannelVerifier.abi.Events["OwnershipTransferred"].ID: + return _ChannelVerifier.ParseOwnershipTransferred(log) + case _ChannelVerifier.abi.Events["ReportVerified"].ID: + return _ChannelVerifier.ParseReportVerified(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (ChannelVerifierConfigActivated) Topic() common.Hash { + return common.HexToHash("0xa543797a0501218bba8a3daf75a71c8df8d1a7f791f4e44d40e43b6450183cea") +} + +func (ChannelVerifierConfigDeactivated) Topic() common.Hash { + return common.HexToHash("0x5bfaab86edc1b932e3c334327a591c9ded067cb521abae19b95ca927d6076579") +} + +func (ChannelVerifierConfigSet) Topic() common.Hash { + return common.HexToHash("0x1074b4b9a073f79bd1f7f5c808348125ce0f25c27188df7efcaa7a08276051b3") +} + +func (ChannelVerifierFeedActivated) Topic() common.Hash { + return common.HexToHash("0xf438564f793525caa89c6e3a26d41e16aa39d1e589747595751e3f3df75cb2b4") +} + +func (ChannelVerifierFeedDeactivated) Topic() common.Hash { + return common.HexToHash("0xfc4f79b8c65b6be1773063461984c0974400d1e99654c79477a092ace83fd061") +} + +func (ChannelVerifierOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (ChannelVerifierOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (ChannelVerifierReportVerified) Topic() common.Hash { + return common.HexToHash("0x58ca9502e98a536e06e72d680fcc251e5d10b72291a281665a2c2dc0ac30fcc5") +} + +func (_ChannelVerifier *ChannelVerifier) Address() common.Address { + return _ChannelVerifier.address +} + +type ChannelVerifierInterface interface { + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ActivateConfig(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) + + ActivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) + + DeactivateConfig(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) + + DeactivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + SetConfigFromSource(opts *bind.TransactOpts, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Verify(opts *bind.TransactOpts, signedReport []byte, sender common.Address) (*types.Transaction, error) + + FilterConfigActivated(opts *bind.FilterOpts) (*ChannelVerifierConfigActivatedIterator, error) + + WatchConfigActivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigActivated) (event.Subscription, error) + + ParseConfigActivated(log types.Log) (*ChannelVerifierConfigActivated, error) + + FilterConfigDeactivated(opts *bind.FilterOpts) (*ChannelVerifierConfigDeactivatedIterator, error) + + WatchConfigDeactivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigDeactivated) (event.Subscription, error) + + ParseConfigDeactivated(log types.Log) (*ChannelVerifierConfigDeactivated, error) + + FilterConfigSet(opts *bind.FilterOpts) (*ChannelVerifierConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *ChannelVerifierConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*ChannelVerifierConfigSet, error) + + FilterFeedActivated(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierFeedActivatedIterator, error) + + WatchFeedActivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierFeedActivated, feedId [][32]byte) (event.Subscription, error) + + ParseFeedActivated(log types.Log) (*ChannelVerifierFeedActivated, error) + + FilterFeedDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierFeedDeactivatedIterator, error) + + WatchFeedDeactivated(opts *bind.WatchOpts, sink chan<- *ChannelVerifierFeedDeactivated, feedId [][32]byte) (event.Subscription, error) + + ParseFeedDeactivated(log types.Log) (*ChannelVerifierFeedDeactivated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelVerifierOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *ChannelVerifierOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*ChannelVerifierOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ChannelVerifierOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *ChannelVerifierOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*ChannelVerifierOwnershipTransferred, error) + + FilterReportVerified(opts *bind.FilterOpts, feedId [][32]byte) (*ChannelVerifierReportVerifiedIterator, error) + + WatchReportVerified(opts *bind.WatchOpts, sink chan<- *ChannelVerifierReportVerified, feedId [][32]byte) (event.Subscription, error) + + ParseReportVerified(log types.Log) (*ChannelVerifierReportVerified, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/errored_verifier/errored_verifier.go b/core/gethwrappers/llo-feeds/generated/errored_verifier/errored_verifier.go new file mode 100644 index 00000000..4d140ea0 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/errored_verifier/errored_verifier.go @@ -0,0 +1,415 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package errored_verifier + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +var ErroredVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"activateConfig\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"activateFeed\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"deactivateConfig\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"deactivateFeed\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"},{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"name\":\"setConfigFromSource\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610c2e806100206000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c8063b70d929d11610076578063e7db9c2a1161005b578063e7db9c2a146101d1578063e84f128e146101e4578063f01072211461021a57600080fd5b8063b70d929d14610188578063ded6307c146101be57600080fd5b80633dd86430116100a75780633dd864301461014d578063564a0a7a1461016257806394d959801461017557600080fd5b806301ffc9a7146100c35780633d3ac1b51461012d575b600080fd5b6101186100d136600461059a565b7fffffffff00000000000000000000000000000000000000000000000000000000167f3d3ac1b5000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b61014061013b366004610741565b610228565b604051610124919061078f565b61016061015b3660046107fb565b610292565b005b6101606101703660046107fb565b6102f4565b610160610183366004610814565b610356565b61019b6101963660046107fb565b6103b8565b604080519315158452602084019290925263ffffffff1690820152606001610124565b6101606101cc366004610814565b610447565b6101606101df3660046109f1565b6104a9565b6101f76101f23660046107fb565b61050b565b6040805163ffffffff948516815293909216602084015290820152606001610124565b6101606101df366004610b24565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f4661696c656420746f207665726966790000000000000000000000000000000060448201526060906064015b60405180910390fd5b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4661696c656420746f20616374697661746520666565640000000000000000006044820152606401610289565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f4661696c656420746f20646561637469766174652066656564000000000000006044820152606401610289565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f4661696c656420746f206465616374697661746520636f6e66696700000000006044820152606401610289565b60008060006040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610289906020808252602c908201527f4661696c656420746f20676574206c617465737420636f6e666967206469676560408201527f737420616e642065706f63680000000000000000000000000000000000000000606082015260800190565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f4661696c656420746f20616374697661746520636f6e666967000000000000006044820152606401610289565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4661696c656420746f2073657420636f6e6669670000000000000000000000006044820152606401610289565b60008060006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102899060208082526023908201527f4661696c656420746f20676574206c617465737420636f6e666967206465746160408201527f696c730000000000000000000000000000000000000000000000000000000000606082015260800190565b6000602082840312156105ac57600080fd5b81357fffffffff00000000000000000000000000000000000000000000000000000000811681146105dc57600080fd5b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715610635576106356105e3565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610682576106826105e3565b604052919050565b600082601f83011261069b57600080fd5b813567ffffffffffffffff8111156106b5576106b56105e3565b6106e660207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161063b565b8181528460208386010111156106fb57600080fd5b816020850160208301376000918101602001919091529392505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461073c57600080fd5b919050565b6000806040838503121561075457600080fd5b823567ffffffffffffffff81111561076b57600080fd5b6107778582860161068a565b92505061078660208401610718565b90509250929050565b600060208083528351808285015260005b818110156107bc578581018301518582016040015282016107a0565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b60006020828403121561080d57600080fd5b5035919050565b6000806040838503121561082757600080fd5b50508035926020909101359150565b803563ffffffff8116811461073c57600080fd5b600067ffffffffffffffff821115610864576108646105e3565b5060051b60200190565b600082601f83011261087f57600080fd5b8135602061089461088f8361084a565b61063b565b82815260059290921b840181019181810190868411156108b357600080fd5b8286015b848110156108d5576108c881610718565b83529183019183016108b7565b509695505050505050565b600082601f8301126108f157600080fd5b8135602061090161088f8361084a565b82815260059290921b8401810191818101908684111561092057600080fd5b8286015b848110156108d55780358352918301918301610924565b803560ff8116811461073c57600080fd5b803567ffffffffffffffff8116811461073c57600080fd5b600082601f83011261097557600080fd5b8135602061098561088f8361084a565b82815260069290921b840181019181810190868411156109a457600080fd5b8286015b848110156108d557604081890312156109c15760008081fd5b6109c9610612565b6109d282610718565b81526109df85830161094c565b818601528352918301916040016109a8565b60008060008060008060008060008060006101608c8e031215610a1357600080fd5b8b359a5060208c01359950610a2a60408d01610718565b9850610a3860608d01610836565b975067ffffffffffffffff8060808e01351115610a5457600080fd5b610a648e60808f01358f0161086e565b97508060a08e01351115610a7757600080fd5b610a878e60a08f01358f016108e0565b9650610a9560c08e0161093b565b95508060e08e01351115610aa857600080fd5b610ab88e60e08f01358f0161068a565b9450610ac76101008e0161094c565b9350806101208e01351115610adb57600080fd5b610aec8e6101208f01358f0161068a565b9250806101408e01351115610b0057600080fd5b50610b128d6101408e01358e01610964565b90509295989b509295989b9093969950565b600080600080600080600080610100898b031215610b4157600080fd5b88359750602089013567ffffffffffffffff80821115610b6057600080fd5b610b6c8c838d0161086e565b985060408b0135915080821115610b8257600080fd5b610b8e8c838d016108e0565b9750610b9c60608c0161093b565b965060808b0135915080821115610bb257600080fd5b610bbe8c838d0161068a565b9550610bcc60a08c0161094c565b945060c08b0135915080821115610be257600080fd5b610bee8c838d0161068a565b935060e08b0135915080821115610c0457600080fd5b50610c118b828c01610964565b915050929598509295989093965056fea164736f6c6343000813000a", +} + +var ErroredVerifierABI = ErroredVerifierMetaData.ABI + +var ErroredVerifierBin = ErroredVerifierMetaData.Bin + +func DeployErroredVerifier(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ErroredVerifier, error) { + parsed, err := ErroredVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ErroredVerifierBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ErroredVerifier{address: address, abi: *parsed, ErroredVerifierCaller: ErroredVerifierCaller{contract: contract}, ErroredVerifierTransactor: ErroredVerifierTransactor{contract: contract}, ErroredVerifierFilterer: ErroredVerifierFilterer{contract: contract}}, nil +} + +type ErroredVerifier struct { + address common.Address + abi abi.ABI + ErroredVerifierCaller + ErroredVerifierTransactor + ErroredVerifierFilterer +} + +type ErroredVerifierCaller struct { + contract *bind.BoundContract +} + +type ErroredVerifierTransactor struct { + contract *bind.BoundContract +} + +type ErroredVerifierFilterer struct { + contract *bind.BoundContract +} + +type ErroredVerifierSession struct { + Contract *ErroredVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ErroredVerifierCallerSession struct { + Contract *ErroredVerifierCaller + CallOpts bind.CallOpts +} + +type ErroredVerifierTransactorSession struct { + Contract *ErroredVerifierTransactor + TransactOpts bind.TransactOpts +} + +type ErroredVerifierRaw struct { + Contract *ErroredVerifier +} + +type ErroredVerifierCallerRaw struct { + Contract *ErroredVerifierCaller +} + +type ErroredVerifierTransactorRaw struct { + Contract *ErroredVerifierTransactor +} + +func NewErroredVerifier(address common.Address, backend bind.ContractBackend) (*ErroredVerifier, error) { + abi, err := abi.JSON(strings.NewReader(ErroredVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindErroredVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ErroredVerifier{address: address, abi: abi, ErroredVerifierCaller: ErroredVerifierCaller{contract: contract}, ErroredVerifierTransactor: ErroredVerifierTransactor{contract: contract}, ErroredVerifierFilterer: ErroredVerifierFilterer{contract: contract}}, nil +} + +func NewErroredVerifierCaller(address common.Address, caller bind.ContractCaller) (*ErroredVerifierCaller, error) { + contract, err := bindErroredVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ErroredVerifierCaller{contract: contract}, nil +} + +func NewErroredVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*ErroredVerifierTransactor, error) { + contract, err := bindErroredVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ErroredVerifierTransactor{contract: contract}, nil +} + +func NewErroredVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*ErroredVerifierFilterer, error) { + contract, err := bindErroredVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ErroredVerifierFilterer{contract: contract}, nil +} + +func bindErroredVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ErroredVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ErroredVerifier *ErroredVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ErroredVerifier.Contract.ErroredVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_ErroredVerifier *ErroredVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ErroredVerifier.Contract.ErroredVerifierTransactor.contract.Transfer(opts) +} + +func (_ErroredVerifier *ErroredVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ErroredVerifier.Contract.ErroredVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_ErroredVerifier *ErroredVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ErroredVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_ErroredVerifier *ErroredVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ErroredVerifier.Contract.contract.Transfer(opts) +} + +func (_ErroredVerifier *ErroredVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ErroredVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_ErroredVerifier *ErroredVerifierCaller) ActivateConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 [32]byte) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "activateConfig", arg0, arg1) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) ActivateConfig(arg0 [32]byte, arg1 [32]byte) error { + return _ErroredVerifier.Contract.ActivateConfig(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) ActivateConfig(arg0 [32]byte, arg1 [32]byte) error { + return _ErroredVerifier.Contract.ActivateConfig(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifierCaller) ActivateFeed(opts *bind.CallOpts, arg0 [32]byte) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "activateFeed", arg0) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) ActivateFeed(arg0 [32]byte) error { + return _ErroredVerifier.Contract.ActivateFeed(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) ActivateFeed(arg0 [32]byte) error { + return _ErroredVerifier.Contract.ActivateFeed(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCaller) DeactivateConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 [32]byte) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "deactivateConfig", arg0, arg1) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) DeactivateConfig(arg0 [32]byte, arg1 [32]byte) error { + return _ErroredVerifier.Contract.DeactivateConfig(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) DeactivateConfig(arg0 [32]byte, arg1 [32]byte) error { + return _ErroredVerifier.Contract.DeactivateConfig(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifierCaller) DeactivateFeed(opts *bind.CallOpts, arg0 [32]byte) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "deactivateFeed", arg0) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) DeactivateFeed(arg0 [32]byte) error { + return _ErroredVerifier.Contract.DeactivateFeed(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) DeactivateFeed(arg0 [32]byte) error { + return _ErroredVerifier.Contract.DeactivateFeed(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCaller) LatestConfigDetails(opts *bind.CallOpts, arg0 [32]byte) (uint32, uint32, [32]byte, error) { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "latestConfigDetails", arg0) + + if err != nil { + return *new(uint32), *new(uint32), *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new(uint32)).(*uint32) + out1 := *abi.ConvertType(out[1], new(uint32)).(*uint32) + out2 := *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return out0, out1, out2, err + +} + +func (_ErroredVerifier *ErroredVerifierSession) LatestConfigDetails(arg0 [32]byte) (uint32, uint32, [32]byte, error) { + return _ErroredVerifier.Contract.LatestConfigDetails(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) LatestConfigDetails(arg0 [32]byte) (uint32, uint32, [32]byte, error) { + return _ErroredVerifier.Contract.LatestConfigDetails(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts, arg0 [32]byte) (bool, [32]byte, uint32, error) { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "latestConfigDigestAndEpoch", arg0) + + if err != nil { + return *new(bool), *new([32]byte), *new(uint32), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out1 := *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + out2 := *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return out0, out1, out2, err + +} + +func (_ErroredVerifier *ErroredVerifierSession) LatestConfigDigestAndEpoch(arg0 [32]byte) (bool, [32]byte, uint32, error) { + return _ErroredVerifier.Contract.LatestConfigDigestAndEpoch(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) LatestConfigDigestAndEpoch(arg0 [32]byte) (bool, [32]byte, uint32, error) { + return _ErroredVerifier.Contract.LatestConfigDigestAndEpoch(&_ErroredVerifier.CallOpts, arg0) +} + +func (_ErroredVerifier *ErroredVerifierCaller) SetConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 []common.Address, arg2 [][32]byte, arg3 uint8, arg4 []byte, arg5 uint64, arg6 []byte, arg7 []CommonAddressAndWeight) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "setConfig", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) SetConfig(arg0 [32]byte, arg1 []common.Address, arg2 [][32]byte, arg3 uint8, arg4 []byte, arg5 uint64, arg6 []byte, arg7 []CommonAddressAndWeight) error { + return _ErroredVerifier.Contract.SetConfig(&_ErroredVerifier.CallOpts, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) SetConfig(arg0 [32]byte, arg1 []common.Address, arg2 [][32]byte, arg3 uint8, arg4 []byte, arg5 uint64, arg6 []byte, arg7 []CommonAddressAndWeight) error { + return _ErroredVerifier.Contract.SetConfig(&_ErroredVerifier.CallOpts, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +func (_ErroredVerifier *ErroredVerifierCaller) SetConfigFromSource(opts *bind.CallOpts, arg0 [32]byte, arg1 *big.Int, arg2 common.Address, arg3 uint32, arg4 []common.Address, arg5 [][32]byte, arg6 uint8, arg7 []byte, arg8 uint64, arg9 []byte, arg10 []CommonAddressAndWeight) error { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "setConfigFromSource", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + + if err != nil { + return err + } + + return err + +} + +func (_ErroredVerifier *ErroredVerifierSession) SetConfigFromSource(arg0 [32]byte, arg1 *big.Int, arg2 common.Address, arg3 uint32, arg4 []common.Address, arg5 [][32]byte, arg6 uint8, arg7 []byte, arg8 uint64, arg9 []byte, arg10 []CommonAddressAndWeight) error { + return _ErroredVerifier.Contract.SetConfigFromSource(&_ErroredVerifier.CallOpts, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) SetConfigFromSource(arg0 [32]byte, arg1 *big.Int, arg2 common.Address, arg3 uint32, arg4 []common.Address, arg5 [][32]byte, arg6 uint8, arg7 []byte, arg8 uint64, arg9 []byte, arg10 []CommonAddressAndWeight) error { + return _ErroredVerifier.Contract.SetConfigFromSource(&_ErroredVerifier.CallOpts, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) +} + +func (_ErroredVerifier *ErroredVerifierCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_ErroredVerifier *ErroredVerifierSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ErroredVerifier.Contract.SupportsInterface(&_ErroredVerifier.CallOpts, interfaceId) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _ErroredVerifier.Contract.SupportsInterface(&_ErroredVerifier.CallOpts, interfaceId) +} + +func (_ErroredVerifier *ErroredVerifierCaller) Verify(opts *bind.CallOpts, arg0 []byte, arg1 common.Address) ([]byte, error) { + var out []interface{} + err := _ErroredVerifier.contract.Call(opts, &out, "verify", arg0, arg1) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_ErroredVerifier *ErroredVerifierSession) Verify(arg0 []byte, arg1 common.Address) ([]byte, error) { + return _ErroredVerifier.Contract.Verify(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifierCallerSession) Verify(arg0 []byte, arg1 common.Address) ([]byte, error) { + return _ErroredVerifier.Contract.Verify(&_ErroredVerifier.CallOpts, arg0, arg1) +} + +func (_ErroredVerifier *ErroredVerifier) Address() common.Address { + return _ErroredVerifier.address +} + +type ErroredVerifierInterface interface { + ActivateConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 [32]byte) error + + ActivateFeed(opts *bind.CallOpts, arg0 [32]byte) error + + DeactivateConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 [32]byte) error + + DeactivateFeed(opts *bind.CallOpts, arg0 [32]byte) error + + LatestConfigDetails(opts *bind.CallOpts, arg0 [32]byte) (uint32, uint32, [32]byte, error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts, arg0 [32]byte) (bool, [32]byte, uint32, error) + + SetConfig(opts *bind.CallOpts, arg0 [32]byte, arg1 []common.Address, arg2 [][32]byte, arg3 uint8, arg4 []byte, arg5 uint64, arg6 []byte, arg7 []CommonAddressAndWeight) error + + SetConfigFromSource(opts *bind.CallOpts, arg0 [32]byte, arg1 *big.Int, arg2 common.Address, arg3 uint32, arg4 []common.Address, arg5 [][32]byte, arg6 uint8, arg7 []byte, arg8 uint64, arg9 []byte, arg10 []CommonAddressAndWeight) error + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + Verify(opts *bind.CallOpts, arg0 []byte, arg1 common.Address) ([]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/exposed_channel_verifier/exposed_channel_verifier.go b/core/gethwrappers/llo-feeds/generated/exposed_channel_verifier/exposed_channel_verifier.go new file mode 100644 index 00000000..e516b9a2 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/exposed_channel_verifier/exposed_channel_verifier.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package exposed_channel_verifier + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var ExposedChannelVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_chainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"_configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"_offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_encodedConfig\",\"type\":\"bytes\"}],\"name\":\"exposedConfigDigestFromConfigData\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061067e806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063b05a355014610030575b600080fd5b61004361003e3660046103f2565b610055565b60405190815260200160405180910390f35b60006100a08b8b8b8b8b8b8b8b8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508d92508c91506100af9050565b9b9a5050505050505050505050565b6000808a8a8a8a8a8a8a8a8a6040516020016100d399989796959493929190610594565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e09000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461017e57600080fd5b919050565b803567ffffffffffffffff8116811461017e57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156102115761021161019b565b604052919050565b600067ffffffffffffffff8211156102335761023361019b565b5060051b60200190565b600082601f83011261024e57600080fd5b8135602061026361025e83610219565b6101ca565b82815260059290921b8401810191818101908684111561028257600080fd5b8286015b848110156102a4576102978161015a565b8352918301918301610286565b509695505050505050565b600082601f8301126102c057600080fd5b813560206102d061025e83610219565b82815260059290921b840181019181810190868411156102ef57600080fd5b8286015b848110156102a457803583529183019183016102f3565b803560ff8116811461017e57600080fd5b60008083601f84011261032d57600080fd5b50813567ffffffffffffffff81111561034557600080fd5b60208301915083602082850101111561035d57600080fd5b9250929050565b600082601f83011261037557600080fd5b813567ffffffffffffffff81111561038f5761038f61019b565b6103c060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016101ca565b8181528460208386010111156103d557600080fd5b816020850160208301376000918101602001919091529392505050565b6000806000806000806000806000806101208b8d03121561041257600080fd5b8a35995061042260208c0161015a565b985061043060408c01610183565b975060608b013567ffffffffffffffff8082111561044d57600080fd5b6104598e838f0161023d565b985060808d013591508082111561046f57600080fd5b61047b8e838f016102af565b975061048960a08e0161030a565b965060c08d013591508082111561049f57600080fd5b6104ab8e838f0161031b565b90965094508491506104bf60e08e01610183565b93506101008d01359150808211156104d657600080fd5b506104e38d828e01610364565b9150509295989b9194979a5092959850565b600081518084526020808501945080840160005b8381101561052557815187529582019590820190600101610509565b509495945050505050565b6000815180845260005b818110156105565760208185018101518683018201520161053a565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b60006101208083018c8452602073ffffffffffffffffffffffffffffffffffffffff808e168287015267ffffffffffffffff8d1660408701528360608701528293508b5180845261014087019450828d01935060005b818110156106085784518316865294830194938301936001016105ea565b5050505050828103608084015261061f81896104f5565b60ff881660a0850152905082810360c084015261063c8187610530565b67ffffffffffffffff861660e085015290508281036101008401526106618185610530565b9c9b50505050505050505050505056fea164736f6c6343000813000a", +} + +var ExposedChannelVerifierABI = ExposedChannelVerifierMetaData.ABI + +var ExposedChannelVerifierBin = ExposedChannelVerifierMetaData.Bin + +func DeployExposedChannelVerifier(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ExposedChannelVerifier, error) { + parsed, err := ExposedChannelVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ExposedChannelVerifierBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ExposedChannelVerifier{address: address, abi: *parsed, ExposedChannelVerifierCaller: ExposedChannelVerifierCaller{contract: contract}, ExposedChannelVerifierTransactor: ExposedChannelVerifierTransactor{contract: contract}, ExposedChannelVerifierFilterer: ExposedChannelVerifierFilterer{contract: contract}}, nil +} + +type ExposedChannelVerifier struct { + address common.Address + abi abi.ABI + ExposedChannelVerifierCaller + ExposedChannelVerifierTransactor + ExposedChannelVerifierFilterer +} + +type ExposedChannelVerifierCaller struct { + contract *bind.BoundContract +} + +type ExposedChannelVerifierTransactor struct { + contract *bind.BoundContract +} + +type ExposedChannelVerifierFilterer struct { + contract *bind.BoundContract +} + +type ExposedChannelVerifierSession struct { + Contract *ExposedChannelVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ExposedChannelVerifierCallerSession struct { + Contract *ExposedChannelVerifierCaller + CallOpts bind.CallOpts +} + +type ExposedChannelVerifierTransactorSession struct { + Contract *ExposedChannelVerifierTransactor + TransactOpts bind.TransactOpts +} + +type ExposedChannelVerifierRaw struct { + Contract *ExposedChannelVerifier +} + +type ExposedChannelVerifierCallerRaw struct { + Contract *ExposedChannelVerifierCaller +} + +type ExposedChannelVerifierTransactorRaw struct { + Contract *ExposedChannelVerifierTransactor +} + +func NewExposedChannelVerifier(address common.Address, backend bind.ContractBackend) (*ExposedChannelVerifier, error) { + abi, err := abi.JSON(strings.NewReader(ExposedChannelVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindExposedChannelVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ExposedChannelVerifier{address: address, abi: abi, ExposedChannelVerifierCaller: ExposedChannelVerifierCaller{contract: contract}, ExposedChannelVerifierTransactor: ExposedChannelVerifierTransactor{contract: contract}, ExposedChannelVerifierFilterer: ExposedChannelVerifierFilterer{contract: contract}}, nil +} + +func NewExposedChannelVerifierCaller(address common.Address, caller bind.ContractCaller) (*ExposedChannelVerifierCaller, error) { + contract, err := bindExposedChannelVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ExposedChannelVerifierCaller{contract: contract}, nil +} + +func NewExposedChannelVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*ExposedChannelVerifierTransactor, error) { + contract, err := bindExposedChannelVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ExposedChannelVerifierTransactor{contract: contract}, nil +} + +func NewExposedChannelVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*ExposedChannelVerifierFilterer, error) { + contract, err := bindExposedChannelVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ExposedChannelVerifierFilterer{contract: contract}, nil +} + +func bindExposedChannelVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ExposedChannelVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ExposedChannelVerifier.Contract.ExposedChannelVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ExposedChannelVerifier.Contract.ExposedChannelVerifierTransactor.contract.Transfer(opts) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ExposedChannelVerifier.Contract.ExposedChannelVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ExposedChannelVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ExposedChannelVerifier.Contract.contract.Transfer(opts) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ExposedChannelVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierCaller) ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + var out []interface{} + err := _ExposedChannelVerifier.contract.Call(opts, &out, "exposedConfigDigestFromConfigData", _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierSession) ExposedConfigDigestFromConfigData(_chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _ExposedChannelVerifier.Contract.ExposedConfigDigestFromConfigData(&_ExposedChannelVerifier.CallOpts, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifierCallerSession) ExposedConfigDigestFromConfigData(_chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _ExposedChannelVerifier.Contract.ExposedConfigDigestFromConfigData(&_ExposedChannelVerifier.CallOpts, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_ExposedChannelVerifier *ExposedChannelVerifier) Address() common.Address { + return _ExposedChannelVerifier.address +} + +type ExposedChannelVerifierInterface interface { + ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/exposed_verifier/exposed_verifier.go b/core/gethwrappers/llo-feeds/generated/exposed_verifier/exposed_verifier.go new file mode 100644 index 00000000..c7ee746b --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/exposed_verifier/exposed_verifier.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package exposed_verifier + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var ExposedVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_feedId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_chainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"_configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"_offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_encodedConfig\",\"type\":\"bytes\"}],\"name\":\"exposedConfigDigestFromConfigData\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610696806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80630ebd702314610030575b600080fd5b61004361003e3660046103f7565b610055565b60405190815260200160405180910390f35b60006100a18c8c8c8c8c8c8c8c8c8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508e92508d91506100b19050565b9c9b505050505050505050505050565b6000808b8b8b8b8b8b8b8b8b8b6040516020016100d79a999897969594939291906105a7565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e06000000000000000000000000000000000000000000000000000000000000179150509a9950505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461018357600080fd5b919050565b803567ffffffffffffffff8116811461018357600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610216576102166101a0565b604052919050565b600067ffffffffffffffff821115610238576102386101a0565b5060051b60200190565b600082601f83011261025357600080fd5b813560206102686102638361021e565b6101cf565b82815260059290921b8401810191818101908684111561028757600080fd5b8286015b848110156102a95761029c8161015f565b835291830191830161028b565b509695505050505050565b600082601f8301126102c557600080fd5b813560206102d56102638361021e565b82815260059290921b840181019181810190868411156102f457600080fd5b8286015b848110156102a957803583529183019183016102f8565b803560ff8116811461018357600080fd5b60008083601f84011261033257600080fd5b50813567ffffffffffffffff81111561034a57600080fd5b60208301915083602082850101111561036257600080fd5b9250929050565b600082601f83011261037a57600080fd5b813567ffffffffffffffff811115610394576103946101a0565b6103c560207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016101cf565b8181528460208386010111156103da57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060008060008060006101408c8e03121561041957600080fd5b8b359a5060208c0135995061043060408d0161015f565b985061043e60608d01610188565b975067ffffffffffffffff8060808e0135111561045a57600080fd5b61046a8e60808f01358f01610242565b97508060a08e0135111561047d57600080fd5b61048d8e60a08f01358f016102b4565b965061049b60c08e0161030f565b95508060e08e013511156104ae57600080fd5b6104be8e60e08f01358f01610320565b90955093506104d06101008e01610188565b9250806101208e013511156104e457600080fd5b506104f68d6101208e01358e01610369565b90509295989b509295989b9093969950565b600081518084526020808501945080840160005b838110156105385781518752958201959082019060010161051c565b509495945050505050565b6000815180845260005b818110156105695760208185018101518683018201520161054d565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8a815260208082018b905273ffffffffffffffffffffffffffffffffffffffff8a8116604084015267ffffffffffffffff8a1660608401526101406080840181905289519084018190526000926101608501928b820192855b8181101561061e578451831686529483019493830193600101610600565b505050505082810360a08401526106358189610508565b60ff881660c0850152905082810360e08401526106528187610543565b67ffffffffffffffff861661010085015290508281036101208401526106788185610543565b9d9c5050505050505050505050505056fea164736f6c6343000813000a", +} + +var ExposedVerifierABI = ExposedVerifierMetaData.ABI + +var ExposedVerifierBin = ExposedVerifierMetaData.Bin + +func DeployExposedVerifier(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *ExposedVerifier, error) { + parsed, err := ExposedVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ExposedVerifierBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ExposedVerifier{address: address, abi: *parsed, ExposedVerifierCaller: ExposedVerifierCaller{contract: contract}, ExposedVerifierTransactor: ExposedVerifierTransactor{contract: contract}, ExposedVerifierFilterer: ExposedVerifierFilterer{contract: contract}}, nil +} + +type ExposedVerifier struct { + address common.Address + abi abi.ABI + ExposedVerifierCaller + ExposedVerifierTransactor + ExposedVerifierFilterer +} + +type ExposedVerifierCaller struct { + contract *bind.BoundContract +} + +type ExposedVerifierTransactor struct { + contract *bind.BoundContract +} + +type ExposedVerifierFilterer struct { + contract *bind.BoundContract +} + +type ExposedVerifierSession struct { + Contract *ExposedVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ExposedVerifierCallerSession struct { + Contract *ExposedVerifierCaller + CallOpts bind.CallOpts +} + +type ExposedVerifierTransactorSession struct { + Contract *ExposedVerifierTransactor + TransactOpts bind.TransactOpts +} + +type ExposedVerifierRaw struct { + Contract *ExposedVerifier +} + +type ExposedVerifierCallerRaw struct { + Contract *ExposedVerifierCaller +} + +type ExposedVerifierTransactorRaw struct { + Contract *ExposedVerifierTransactor +} + +func NewExposedVerifier(address common.Address, backend bind.ContractBackend) (*ExposedVerifier, error) { + abi, err := abi.JSON(strings.NewReader(ExposedVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindExposedVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ExposedVerifier{address: address, abi: abi, ExposedVerifierCaller: ExposedVerifierCaller{contract: contract}, ExposedVerifierTransactor: ExposedVerifierTransactor{contract: contract}, ExposedVerifierFilterer: ExposedVerifierFilterer{contract: contract}}, nil +} + +func NewExposedVerifierCaller(address common.Address, caller bind.ContractCaller) (*ExposedVerifierCaller, error) { + contract, err := bindExposedVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ExposedVerifierCaller{contract: contract}, nil +} + +func NewExposedVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*ExposedVerifierTransactor, error) { + contract, err := bindExposedVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ExposedVerifierTransactor{contract: contract}, nil +} + +func NewExposedVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*ExposedVerifierFilterer, error) { + contract, err := bindExposedVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ExposedVerifierFilterer{contract: contract}, nil +} + +func bindExposedVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ExposedVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ExposedVerifier *ExposedVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ExposedVerifier.Contract.ExposedVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_ExposedVerifier *ExposedVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ExposedVerifier.Contract.ExposedVerifierTransactor.contract.Transfer(opts) +} + +func (_ExposedVerifier *ExposedVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ExposedVerifier.Contract.ExposedVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_ExposedVerifier *ExposedVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ExposedVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_ExposedVerifier *ExposedVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ExposedVerifier.Contract.contract.Transfer(opts) +} + +func (_ExposedVerifier *ExposedVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ExposedVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_ExposedVerifier *ExposedVerifierCaller) ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + var out []interface{} + err := _ExposedVerifier.contract.Call(opts, &out, "exposedConfigDigestFromConfigData", _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_ExposedVerifier *ExposedVerifierSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _ExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_ExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_ExposedVerifier *ExposedVerifierCallerSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _ExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_ExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_ExposedVerifier *ExposedVerifier) Address() common.Address { + return _ExposedVerifier.address +} + +type ExposedVerifierInterface interface { + ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/fee_manager/fee_manager.go b/core/gethwrappers/llo-feeds/generated/fee_manager/fee_manager.go new file mode 100644 index 00000000..a3255013 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/fee_manager/fee_manager.go @@ -0,0 +1,1748 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package fee_manager + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +type CommonAsset struct { + AssetAddress common.Address + Amount *big.Int +} + +type IRewardManagerFeePayment struct { + PoolId [32]byte + Amount *big.Int +} + +var FeeManagerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_linkAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_nativeAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_proxyAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_rewardManagerAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ExpiredReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDeposit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDiscount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidQuote\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidReceivingAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSurcharge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Unauthorized\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroDeficit\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCommon.Asset\",\"name\":\"fee\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCommon.Asset\",\"name\":\"reward\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"appliedDiscount\",\"type\":\"uint256\"}],\"name\":\"DiscountApplied\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"internalType\":\"uint192\",\"name\":\"amount\",\"type\":\"uint192\"}],\"indexed\":false,\"internalType\":\"structIRewardManager.FeePayment[]\",\"name\":\"rewards\",\"type\":\"tuple[]\"}],\"name\":\"InsufficientLink\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"linkQuantity\",\"type\":\"uint256\"}],\"name\":\"LinkDeficitCleared\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"newSurcharge\",\"type\":\"uint64\"}],\"name\":\"NativeSurchargeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"discount\",\"type\":\"uint64\"}],\"name\":\"SubscriberDiscountUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint192\",\"name\":\"quantity\",\"type\":\"uint192\"}],\"name\":\"Withdraw\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"quoteAddress\",\"type\":\"address\"}],\"name\":\"getFeeAndReward\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"structCommon.Asset\",\"name\":\"\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"structCommon.Asset\",\"name\":\"\",\"type\":\"tuple\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_linkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_nativeAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_proxyAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_rewardManager\",\"outputs\":[{\"internalType\":\"contractIRewardManager\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkAvailableForPayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"payLinkDeficit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"parameterPayload\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"}],\"name\":\"processFee\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"payloads\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"parameterPayload\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"}],\"name\":\"processFeeBulk\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"s_linkDeficit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_nativeSurcharge\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"s_subscriberDiscounts\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"rewardRecipientAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setFeeRecipients\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"surcharge\",\"type\":\"uint64\"}],\"name\":\"setNativeSurcharge\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"discount\",\"type\":\"uint64\"}],\"name\":\"updateSubscriberDiscount\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint192\",\"name\":\"quantity\",\"type\":\"uint192\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101006040523480156200001257600080fd5b506040516200347338038062003473833981016040819052620000359162000288565b33806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620001c0565b5050506001600160a01b0384161580620000e057506001600160a01b038316155b80620000f357506001600160a01b038216155b806200010657506001600160a01b038116155b15620001255760405163e6c4247b60e01b815260040160405180910390fd5b6001600160a01b03848116608081905284821660a05283821660c05290821660e081905260405163095ea7b360e01b81526004810191909152600019602482015263095ea7b3906044016020604051808303816000875af11580156200018f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001b59190620002e5565b505050505062000310565b336001600160a01b038216036200021a5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200028357600080fd5b919050565b600080600080608085870312156200029f57600080fd5b620002aa856200026b565b9350620002ba602086016200026b565b9250620002ca604086016200026b565b9150620002da606086016200026b565b905092959194509250565b600060208284031215620002f857600080fd5b815180151581146200030957600080fd5b9392505050565b60805160a05160c05160e051613081620003f26000396000818161027501528181611462015281816115f001528181611e0b0152612059015260008181610335015281816107fb01528181610db601526115360152600081816102ee01528181610bd901528181610fd30152818161102a015281816112d701528181611d310152611dda0152600081816104ba0152818161097601528181610b8201528181610d1901528181610ec201528181610ff801528181611081015281816111c60152818161123301528181611273015281816119ac0152611ecc01526130816000f3fe60806040526004361061016a5760003560e01c806379ba5097116100cb578063dba45fe01161007f578063ea4b861b11610059578063ea4b861b146104a8578063f2fde38b146104dc578063f65df962146104fc57600080fd5b8063dba45fe01461040a578063e03dab1a1461041d578063e389d9a41461048857600080fd5b80638da5cb5b116100b05780638da5cb5b146103aa578063ce7817d1146103d5578063d09dc339146103f557600080fd5b806379ba50971461035757806387d6d8431461036c57600080fd5b80633aa5ac0711610122578063638786681161010757806363878668146102dc5780636c2f1a17146103105780636d1342cb1461032357600080fd5b80633aa5ac071461026357806350538094146102bc57600080fd5b8063181f5a7711610153578063181f5a77146101df5780631d4d84a21461022b57806332f5f7461461024d57600080fd5b8063013f542b1461016f57806301ffc9a7146101af575b600080fd5b34801561017b57600080fd5b5061019c61018a36600461265b565b60036020526000908152604090205481565b6040519081526020015b60405180910390f35b3480156101bb57600080fd5b506101cf6101ca366004612674565b61051c565b60405190151581526020016101a6565b3480156101eb57600080fd5b50604080518082018252601081527f4665654d616e6167657220322e302e3000000000000000000000000000000000602082015290516101a691906126da565b34801561023757600080fd5b5061024b610246366004612783565b6105b5565b005b34801561025957600080fd5b5061019c60045481565b34801561026f57600080fd5b506102977f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101a6565b3480156102c857600080fd5b5061024b6102d73660046127e6565b610749565b3480156102e857600080fd5b506102977f000000000000000000000000000000000000000000000000000000000000000081565b61024b61031e36600461284a565b6107e3565b34801561032f57600080fd5b506102977f000000000000000000000000000000000000000000000000000000000000000081565b34801561036357600080fd5b5061024b610a2a565b34801561037857600080fd5b5061019c6103873660046128f9565b600260209081526000938452604080852082529284528284209052825290205481565b3480156103b657600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610297565b3480156103e157600080fd5b5061024b6103f0366004612930565b610b2c565b34801561040157600080fd5b5061019c610ce8565b61024b610418366004612981565b610d9e565b34801561042957600080fd5b5061043d610438366004612adf565b610f3a565b60408051845173ffffffffffffffffffffffffffffffffffffffff9081168252602095860151868301528451169181019190915292909101516060830152608082015260a0016101a6565b34801561049457600080fd5b5061024b6104a336600461265b565b611339565b3480156104b457600080fd5b506102977f000000000000000000000000000000000000000000000000000000000000000081565b3480156104e857600080fd5b5061024b6104f7366004612b38565b61150a565b34801561050857600080fd5b5061024b610517366004612b55565b61151e565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167fdba45fe00000000000000000000000000000000000000000000000000000000014806105af57507fffffffff0000000000000000000000000000000000000000000000000000000082167f6c2f1a1700000000000000000000000000000000000000000000000000000000145b92915050565b6105bd611660565b73ffffffffffffffffffffffffffffffffffffffff83166106925760008273ffffffffffffffffffffffffffffffffffffffff168277ffffffffffffffffffffffffffffffffffffffffffffffff1660405160006040518083038185875af1925050503d806000811461064c576040519150601f19603f3d011682016040523d82523d6000602084013e610651565b606091505b505090508061068c576040517fef2af20100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50505050565b6106cd73ffffffffffffffffffffffffffffffffffffffff84168377ffffffffffffffffffffffffffffffffffffffffffffffff84166116e3565b6040805133815273ffffffffffffffffffffffffffffffffffffffff848116602083015285168183015277ffffffffffffffffffffffffffffffffffffffffffffffff8316606082015290517f7ff78a71698bdb18dcca96f52ab25e0a1b146fb6a49adf8e6845299e49021f299181900360800190a15b505050565b610751611660565b670de0b6b3a764000067ffffffffffffffff8216111561079d576040517f05e8ac2900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff811660048190556040519081527f08f7c0d17932ddb8523bc06754d42ff19ebc77d76a8b9bfde02c28ab1ed3d6399060200160405180910390a150565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610852576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008467ffffffffffffffff81111561086d5761086d612a05565b6040519080825280602002602001820160405280156108a657816020015b6108936125ce565b81526020019060019003908161088b5790505b5090506000806000805b888110156109f15760008060006108ec8d8d868181106108d2576108d2612bd4565b90506020028101906108e49190612c03565b8d8d8d6117b7565b92509250925082602001516000146109dd5760405180608001604052808e8e8781811061091b5761091b612bd4565b905060200281019061092d9190612c03565b61093691612c68565b81526020018481526020018381526020018281525088868061095790612cd3565b97508151811061096957610969612bd4565b60200260200101819052507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16836000015173ffffffffffffffffffffffffffffffffffffffff16036109d6578660010196506109dd565b8560010195505b505050806109ea90612cd3565b90506108b0565b50821515806109ff57508115155b15610a1557610a10858585856118c7565b610a1f565b610a1f85346120db565b505050505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610ab0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610b34611660565b670de0b6b3a764000067ffffffffffffffff82161115610b80576040517f997ea36000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614158015610c2857507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b15610c5f576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff848116600081815260026020908152604080832088845282528083209487168084529482529182902067ffffffffffffffff86169081905582519485529084015285927f5eba5a8afa39780f0f99b6cbeb95f3da6a7040ca00abd46bdc91a0a060134139910160405180910390a350505050565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015610d75573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d999190612d0b565b905090565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610e0d576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000806000610e1f88888888886117b7565b9250925092508260200151600003610e4357610e3b84346120db565b505050610f33565b604080516001808252818301909252600091816020015b610e626125ce565b815260200190600190039081610e5a575050604080516080810190915290915080610e8d8a8c612c68565b81526020018581526020018481526020018381525081600081518110610eb557610eb5612bd4565b60200260200101819052507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16846000015173ffffffffffffffffffffffffffffffffffffffff1603610f2557610a108582600160006118c7565b610a1f8582600060016118c7565b5050505050565b6040805180820182526000808252602080830182905283518085018552828152808201839052845180860186528381528083018490528551808701909652838652918501839052929382610f8d88612d24565b90507fffff00000000000000000000000000000000000000000000000000000000000080821690810161102857505073ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811683527f0000000000000000000000000000000000000000000000000000000000000000168152909350915060009050611330565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168873ffffffffffffffffffffffffffffffffffffffff16141580156110d057507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168873ffffffffffffffffffffffffffffffffffffffff1614155b15611107576040517ff861803000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008060008b8060200190518101906111209190612d7d565b77ffffffffffffffffffffffffffffffffffffffffffffffff91821698509116955063ffffffff1693505050428210159050611188576040517fb6c405f500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff808e16600090815260026020908152604080832089845282528083208f851684529091529020547f000000000000000000000000000000000000000000000000000000000000000090911687526112176111ff82670de0b6b3a7640000612def565b6112099086612e02565b670de0b6b3a7640000612128565b602088015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000008116908d16036112a45773ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016885260208088015190890152611321565b6004546000906112c0906111ff90670de0b6b3a7640000612e19565b73ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168a52905061131a61131083670de0b6b3a7640000612def565b6112099083612e02565b60208a0152505b96995094975094955050505050505b93509350939050565b611341611660565b6000818152600360205260408120549081900361138a576040517f03aad31200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000828152600360205260408082208290558051600180825281830190925290816020015b60408051808201909152600080825260208201528152602001906001900390816113af57905050905060405180604001604052808481526020018377ffffffffffffffffffffffffffffffffffffffffffffffff168152508160008151811061141a5761141a612bd4565b60209081029190910101526040517fb0d9fa1900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063b0d9fa19906114999084903090600401612e8c565b600060405180830381600087803b1580156114b357600080fd5b505af11580156114c7573d6000803e3d6000fd5b50505050827f843f0b103e50b42b08f9d30f12f961845a6d02623730872e24644899c0dd9895836040516114fd91815260200190565b60405180910390a2505050565b611512611660565b61151b81612160565b50565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480159061157c575060005473ffffffffffffffffffffffffffffffffffffffff163314155b156115b3576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f14060f2300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906314060f239061162990869086908690600401612ec4565b600060405180830381600087803b15801561164357600080fd5b505af1158015611657573d6000803e3d6000fd5b50505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146116e1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610aa7565b565b60405173ffffffffffffffffffffffffffffffffffffffff83166024820152604481018290526107449084907fa9059cbb00000000000000000000000000000000000000000000000000000000906064015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090931692909217909152612255565b6040805180820190915260008082526020820152604080518082019091526000808252602082015260003073ffffffffffffffffffffffffffffffffffffffff851603611830576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061183e888a018a612f44565b91505060008161184d90612d24565b905060007e010000000000000000000000000000000000000000000000000000000000007fffff0000000000000000000000000000000000000000000000000000000000008316146118a8576118a5888a018a612b38565b90505b6118b3878483610f3a565b955095509550505050955095509592505050565b60008267ffffffffffffffff8111156118e2576118e2612a05565b60405190808252806020026020018201604052801561192757816020015b60408051808201909152600080825260208201528152602001906001900390816119005790505b50905060008267ffffffffffffffff81111561194557611945612a05565b60405190808252806020026020018201604052801561198a57816020015b60408051808201909152600080825260208201528152602001906001900390816119635790505b50905060008080808061199d888a612e19565b905060005b81811015611cec577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168b82815181106119f3576119f3612bd4565b6020026020010151602001516000015173ffffffffffffffffffffffffffffffffffffffff1603611ab95760405180604001604052808c8381518110611a3b57611a3b612bd4565b60200260200101516000015181526020018c8381518110611a5e57611a5e612bd4565b6020026020010151604001516020015177ffffffffffffffffffffffffffffffffffffffffffffffff16815250888580611a9790612cd3565b965081518110611aa957611aa9612bd4565b6020026020010181905250611bae565b60405180604001604052808c8381518110611ad657611ad6612bd4565b60200260200101516000015181526020018c8381518110611af957611af9612bd4565b6020026020010151604001516020015177ffffffffffffffffffffffffffffffffffffffffffffffff16815250878480611b3290612cd3565b955081518110611b4457611b44612bd4565b60200260200101819052508a8181518110611b6157611b61612bd4565b6020026020010151602001516020015186611b7c9190612e19565b95508a8181518110611b9057611b90612bd4565b6020026020010151604001516020015185611bab9190612e19565b94505b8a8181518110611bc057611bc0612bd4565b602002602001015160600151600014611cdc578b73ffffffffffffffffffffffffffffffffffffffff168b8281518110611bfc57611bfc612bd4565b6020026020010151600001517f88b15eb682210089cddf967648e2cb2a4535aeadc8f8f36050922e33c04e71258d8481518110611c3b57611c3b612bd4565b6020026020010151602001518e8581518110611c5957611c59612bd4565b6020026020010151604001518f8681518110611c7757611c77612bd4565b602002602001015160600151604051611cd393929190835173ffffffffffffffffffffffffffffffffffffffff908116825260209485015185830152835116604082015291909201516060820152608081019190915260a00190565b60405180910390a35b611ce581612cd3565b90506119a2565b5060003415611dba5734861115611d2f576040517fb2e532de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663d0e30db0876040518263ffffffff1660e01b81526004016000604051808303818588803b158015611d9757600080fd5b505af1158015611dab573d6000803e3d6000fd5b50505050508534039050611e02565b8515611e0257611e0273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168d3089612361565b875115611e97577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663b0d9fa19898e6040518363ffffffff1660e01b8152600401611e64929190612e8c565b600060405180830381600087803b158015611e7e57600080fd5b505af1158015611e92573d6000803e3d6000fd5b505050505b8651156120c3576040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015611f28573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611f4c9190612d0b565b85111561201c5760005b8751811015611fdf57878181518110611f7157611f71612bd4565b60200260200101516020015177ffffffffffffffffffffffffffffffffffffffffffffffff16600360008a8481518110611fad57611fad612bd4565b60209081029190910181015151825281019190915260400160002080549091019055611fd881612cd3565b9050611f56565b507ff52e5907b69d97c33392936c12d78b494463b78c5b72df50b4c497eee5720b678760405161200f9190612fe8565b60405180910390a16120c3565b6040517fb0d9fa1900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063b0d9fa1990612090908a903090600401612e8c565b600060405180830381600087803b1580156120aa57600080fd5b505af11580156120be573d6000803e3d6000fd5b505050505b6120cd8c826120db565b505050505050505050505050565b80156121245760405173ffffffffffffffffffffffffffffffffffffffff83169082156108fc029083906000818181858888f19350505050158015610744573d6000803e3d6000fd5b5050565b60008215612156578161213c600185612def565b6121469190612ffb565b612151906001612e19565b612159565b60005b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036121df576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610aa7565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60006122b7826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff166123bf9092919063ffffffff16565b80519091501561074457808060200190518101906122d59190613036565b610744576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f742073756363656564000000000000000000000000000000000000000000006064820152608401610aa7565b60405173ffffffffffffffffffffffffffffffffffffffff8085166024830152831660448201526064810182905261068c9085907f23b872dd0000000000000000000000000000000000000000000000000000000090608401611735565b60606123ce84846000856123d6565b949350505050565b606082471015612468576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c00000000000000000000000000000000000000000000000000006064820152608401610aa7565b6000808673ffffffffffffffffffffffffffffffffffffffff1685876040516124919190613058565b60006040518083038185875af1925050503d80600081146124ce576040519150601f19603f3d011682016040523d82523d6000602084013e6124d3565b606091505b50915091506124e4878383876124ef565b979650505050505050565b6060831561258557825160000361257e5773ffffffffffffffffffffffffffffffffffffffff85163b61257e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610aa7565b50816123ce565b6123ce838381511561259a5781518083602001fd5b806040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610aa791906126da565b6040518060800160405280600080191681526020016126166040518060400160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600081525090565b815260200161264e6040518060400160405280600073ffffffffffffffffffffffffffffffffffffffff168152602001600081525090565b8152602001600081525090565b60006020828403121561266d57600080fd5b5035919050565b60006020828403121561268657600080fd5b81357fffffffff000000000000000000000000000000000000000000000000000000008116811461215957600080fd5b60005b838110156126d15781810151838201526020016126b9565b50506000910152565b60208152600082518060208401526126f98160408501602087016126b6565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b73ffffffffffffffffffffffffffffffffffffffff8116811461151b57600080fd5b80356127588161272b565b919050565b77ffffffffffffffffffffffffffffffffffffffffffffffff8116811461151b57600080fd5b60008060006060848603121561279857600080fd5b83356127a38161272b565b925060208401356127b38161272b565b915060408401356127c38161275d565b809150509250925092565b803567ffffffffffffffff8116811461275857600080fd5b6000602082840312156127f857600080fd5b612159826127ce565b60008083601f84011261281357600080fd5b50813567ffffffffffffffff81111561282b57600080fd5b60208301915083602082850101111561284357600080fd5b9250929050565b60008060008060006060868803121561286257600080fd5b853567ffffffffffffffff8082111561287a57600080fd5b818801915088601f83011261288e57600080fd5b81358181111561289d57600080fd5b8960208260051b85010111156128b257600080fd5b6020928301975095509087013590808211156128cd57600080fd5b506128da88828901612801565b90945092506128ed90506040870161274d565b90509295509295909350565b60008060006060848603121561290e57600080fd5b83356129198161272b565b92506020840135915060408401356127c38161272b565b6000806000806080858703121561294657600080fd5b84356129518161272b565b93506020850135925060408501356129688161272b565b9150612976606086016127ce565b905092959194509250565b60008060008060006060868803121561299957600080fd5b853567ffffffffffffffff808211156129b157600080fd5b6129bd89838a01612801565b909750955060208801359150808211156129d657600080fd5b506129e388828901612801565b90945092505060408601356129f78161272b565b809150509295509295909350565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f830112612a4557600080fd5b813567ffffffffffffffff80821115612a6057612a60612a05565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715612aa657612aa6612a05565b81604052838152866020858801011115612abf57600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600060608486031215612af457600080fd5b8335612aff8161272b565b9250602084013567ffffffffffffffff811115612b1b57600080fd5b612b2786828701612a34565b92505060408401356127c38161272b565b600060208284031215612b4a57600080fd5b81356121598161272b565b600080600060408486031215612b6a57600080fd5b83359250602084013567ffffffffffffffff80821115612b8957600080fd5b818601915086601f830112612b9d57600080fd5b813581811115612bac57600080fd5b8760208260061b8501011115612bc157600080fd5b6020830194508093505050509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112612c3857600080fd5b83018035915067ffffffffffffffff821115612c5357600080fd5b60200191503681900382131561284357600080fd5b803560208310156105af577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b1692915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203612d0457612d04612ca4565b5060010190565b600060208284031215612d1d57600080fd5b5051919050565b80516020808301519190811015612d63577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b805163ffffffff8116811461275857600080fd5b60008060008060008060c08789031215612d9657600080fd5b86519550612da660208801612d69565b9450612db460408801612d69565b93506060870151612dc48161275d565b6080880151909350612dd58161275d565b9150612de360a08801612d69565b90509295509295509295565b818103818111156105af576105af612ca4565b80820281158282048414176105af576105af612ca4565b808201808211156105af576105af612ca4565b600081518084526020808501945080840160005b83811015612e815781518051885283015177ffffffffffffffffffffffffffffffffffffffffffffffff168388015260409096019590820190600101612e40565b509495945050505050565b604081526000612e9f6040830185612e2c565b905073ffffffffffffffffffffffffffffffffffffffff831660208301529392505050565b8381526040602080830182905282820184905260009190859060608501845b87811015612f37578335612ef68161272b565b73ffffffffffffffffffffffffffffffffffffffff16825267ffffffffffffffff612f228585016127ce565b16828401529284019290840190600101612ee3565b5098975050505050505050565b60008060808385031215612f5757600080fd5b83601f840112612f6657600080fd5b6040516060810167ffffffffffffffff8282108183111715612f8a57612f8a612a05565b816040528291506060860187811115612fa257600080fd5b865b81811015612fbc578035845260209384019301612fa4565b5092945091359180831115612fd057600080fd5b5050612fde85828601612a34565b9150509250929050565b6020815260006121596020830184612e2c565b600082613031577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b60006020828403121561304857600080fd5b8151801515811461215957600080fd5b6000825161306a8184602087016126b6565b919091019291505056fea164736f6c6343000813000a", +} + +var FeeManagerABI = FeeManagerMetaData.ABI + +var FeeManagerBin = FeeManagerMetaData.Bin + +func DeployFeeManager(auth *bind.TransactOpts, backend bind.ContractBackend, _linkAddress common.Address, _nativeAddress common.Address, _proxyAddress common.Address, _rewardManagerAddress common.Address) (common.Address, *types.Transaction, *FeeManager, error) { + parsed, err := FeeManagerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(FeeManagerBin), backend, _linkAddress, _nativeAddress, _proxyAddress, _rewardManagerAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &FeeManager{address: address, abi: *parsed, FeeManagerCaller: FeeManagerCaller{contract: contract}, FeeManagerTransactor: FeeManagerTransactor{contract: contract}, FeeManagerFilterer: FeeManagerFilterer{contract: contract}}, nil +} + +type FeeManager struct { + address common.Address + abi abi.ABI + FeeManagerCaller + FeeManagerTransactor + FeeManagerFilterer +} + +type FeeManagerCaller struct { + contract *bind.BoundContract +} + +type FeeManagerTransactor struct { + contract *bind.BoundContract +} + +type FeeManagerFilterer struct { + contract *bind.BoundContract +} + +type FeeManagerSession struct { + Contract *FeeManager + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type FeeManagerCallerSession struct { + Contract *FeeManagerCaller + CallOpts bind.CallOpts +} + +type FeeManagerTransactorSession struct { + Contract *FeeManagerTransactor + TransactOpts bind.TransactOpts +} + +type FeeManagerRaw struct { + Contract *FeeManager +} + +type FeeManagerCallerRaw struct { + Contract *FeeManagerCaller +} + +type FeeManagerTransactorRaw struct { + Contract *FeeManagerTransactor +} + +func NewFeeManager(address common.Address, backend bind.ContractBackend) (*FeeManager, error) { + abi, err := abi.JSON(strings.NewReader(FeeManagerABI)) + if err != nil { + return nil, err + } + contract, err := bindFeeManager(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &FeeManager{address: address, abi: abi, FeeManagerCaller: FeeManagerCaller{contract: contract}, FeeManagerTransactor: FeeManagerTransactor{contract: contract}, FeeManagerFilterer: FeeManagerFilterer{contract: contract}}, nil +} + +func NewFeeManagerCaller(address common.Address, caller bind.ContractCaller) (*FeeManagerCaller, error) { + contract, err := bindFeeManager(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &FeeManagerCaller{contract: contract}, nil +} + +func NewFeeManagerTransactor(address common.Address, transactor bind.ContractTransactor) (*FeeManagerTransactor, error) { + contract, err := bindFeeManager(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &FeeManagerTransactor{contract: contract}, nil +} + +func NewFeeManagerFilterer(address common.Address, filterer bind.ContractFilterer) (*FeeManagerFilterer, error) { + contract, err := bindFeeManager(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &FeeManagerFilterer{contract: contract}, nil +} + +func bindFeeManager(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := FeeManagerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_FeeManager *FeeManagerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FeeManager.Contract.FeeManagerCaller.contract.Call(opts, result, method, params...) +} + +func (_FeeManager *FeeManagerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FeeManager.Contract.FeeManagerTransactor.contract.Transfer(opts) +} + +func (_FeeManager *FeeManagerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FeeManager.Contract.FeeManagerTransactor.contract.Transact(opts, method, params...) +} + +func (_FeeManager *FeeManagerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _FeeManager.Contract.contract.Call(opts, result, method, params...) +} + +func (_FeeManager *FeeManagerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FeeManager.Contract.contract.Transfer(opts) +} + +func (_FeeManager *FeeManagerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _FeeManager.Contract.contract.Transact(opts, method, params...) +} + +func (_FeeManager *FeeManagerCaller) GetFeeAndReward(opts *bind.CallOpts, subscriber common.Address, report []byte, quoteAddress common.Address) (CommonAsset, CommonAsset, *big.Int, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "getFeeAndReward", subscriber, report, quoteAddress) + + if err != nil { + return *new(CommonAsset), *new(CommonAsset), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(CommonAsset)).(*CommonAsset) + out1 := *abi.ConvertType(out[1], new(CommonAsset)).(*CommonAsset) + out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + + return out0, out1, out2, err + +} + +func (_FeeManager *FeeManagerSession) GetFeeAndReward(subscriber common.Address, report []byte, quoteAddress common.Address) (CommonAsset, CommonAsset, *big.Int, error) { + return _FeeManager.Contract.GetFeeAndReward(&_FeeManager.CallOpts, subscriber, report, quoteAddress) +} + +func (_FeeManager *FeeManagerCallerSession) GetFeeAndReward(subscriber common.Address, report []byte, quoteAddress common.Address) (CommonAsset, CommonAsset, *big.Int, error) { + return _FeeManager.Contract.GetFeeAndReward(&_FeeManager.CallOpts, subscriber, report, quoteAddress) +} + +func (_FeeManager *FeeManagerCaller) ILinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "i_linkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) ILinkAddress() (common.Address, error) { + return _FeeManager.Contract.ILinkAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) ILinkAddress() (common.Address, error) { + return _FeeManager.Contract.ILinkAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) INativeAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "i_nativeAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) INativeAddress() (common.Address, error) { + return _FeeManager.Contract.INativeAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) INativeAddress() (common.Address, error) { + return _FeeManager.Contract.INativeAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) IProxyAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "i_proxyAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) IProxyAddress() (common.Address, error) { + return _FeeManager.Contract.IProxyAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) IProxyAddress() (common.Address, error) { + return _FeeManager.Contract.IProxyAddress(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) IRewardManager(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "i_rewardManager") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) IRewardManager() (common.Address, error) { + return _FeeManager.Contract.IRewardManager(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) IRewardManager() (common.Address, error) { + return _FeeManager.Contract.IRewardManager(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "linkAvailableForPayment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) LinkAvailableForPayment() (*big.Int, error) { + return _FeeManager.Contract.LinkAvailableForPayment(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) LinkAvailableForPayment() (*big.Int, error) { + return _FeeManager.Contract.LinkAvailableForPayment(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) Owner() (common.Address, error) { + return _FeeManager.Contract.Owner(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) Owner() (common.Address, error) { + return _FeeManager.Contract.Owner(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) SLinkDeficit(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "s_linkDeficit", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) SLinkDeficit(arg0 [32]byte) (*big.Int, error) { + return _FeeManager.Contract.SLinkDeficit(&_FeeManager.CallOpts, arg0) +} + +func (_FeeManager *FeeManagerCallerSession) SLinkDeficit(arg0 [32]byte) (*big.Int, error) { + return _FeeManager.Contract.SLinkDeficit(&_FeeManager.CallOpts, arg0) +} + +func (_FeeManager *FeeManagerCaller) SNativeSurcharge(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "s_nativeSurcharge") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) SNativeSurcharge() (*big.Int, error) { + return _FeeManager.Contract.SNativeSurcharge(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) SNativeSurcharge() (*big.Int, error) { + return _FeeManager.Contract.SNativeSurcharge(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCaller) SSubscriberDiscounts(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "s_subscriberDiscounts", arg0, arg1, arg2) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) SSubscriberDiscounts(arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + return _FeeManager.Contract.SSubscriberDiscounts(&_FeeManager.CallOpts, arg0, arg1, arg2) +} + +func (_FeeManager *FeeManagerCallerSession) SSubscriberDiscounts(arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + return _FeeManager.Contract.SSubscriberDiscounts(&_FeeManager.CallOpts, arg0, arg1, arg2) +} + +func (_FeeManager *FeeManagerCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _FeeManager.Contract.SupportsInterface(&_FeeManager.CallOpts, interfaceId) +} + +func (_FeeManager *FeeManagerCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _FeeManager.Contract.SupportsInterface(&_FeeManager.CallOpts, interfaceId) +} + +func (_FeeManager *FeeManagerCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _FeeManager.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_FeeManager *FeeManagerSession) TypeAndVersion() (string, error) { + return _FeeManager.Contract.TypeAndVersion(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerCallerSession) TypeAndVersion() (string, error) { + return _FeeManager.Contract.TypeAndVersion(&_FeeManager.CallOpts) +} + +func (_FeeManager *FeeManagerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "acceptOwnership") +} + +func (_FeeManager *FeeManagerSession) AcceptOwnership() (*types.Transaction, error) { + return _FeeManager.Contract.AcceptOwnership(&_FeeManager.TransactOpts) +} + +func (_FeeManager *FeeManagerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _FeeManager.Contract.AcceptOwnership(&_FeeManager.TransactOpts) +} + +func (_FeeManager *FeeManagerTransactor) PayLinkDeficit(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "payLinkDeficit", configDigest) +} + +func (_FeeManager *FeeManagerSession) PayLinkDeficit(configDigest [32]byte) (*types.Transaction, error) { + return _FeeManager.Contract.PayLinkDeficit(&_FeeManager.TransactOpts, configDigest) +} + +func (_FeeManager *FeeManagerTransactorSession) PayLinkDeficit(configDigest [32]byte) (*types.Transaction, error) { + return _FeeManager.Contract.PayLinkDeficit(&_FeeManager.TransactOpts, configDigest) +} + +func (_FeeManager *FeeManagerTransactor) ProcessFee(opts *bind.TransactOpts, payload []byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "processFee", payload, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerSession) ProcessFee(payload []byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.ProcessFee(&_FeeManager.TransactOpts, payload, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerTransactorSession) ProcessFee(payload []byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.ProcessFee(&_FeeManager.TransactOpts, payload, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerTransactor) ProcessFeeBulk(opts *bind.TransactOpts, payloads [][]byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "processFeeBulk", payloads, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerSession) ProcessFeeBulk(payloads [][]byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.ProcessFeeBulk(&_FeeManager.TransactOpts, payloads, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerTransactorSession) ProcessFeeBulk(payloads [][]byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.ProcessFeeBulk(&_FeeManager.TransactOpts, payloads, parameterPayload, subscriber) +} + +func (_FeeManager *FeeManagerTransactor) SetFeeRecipients(opts *bind.TransactOpts, configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "setFeeRecipients", configDigest, rewardRecipientAndWeights) +} + +func (_FeeManager *FeeManagerSession) SetFeeRecipients(configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _FeeManager.Contract.SetFeeRecipients(&_FeeManager.TransactOpts, configDigest, rewardRecipientAndWeights) +} + +func (_FeeManager *FeeManagerTransactorSession) SetFeeRecipients(configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _FeeManager.Contract.SetFeeRecipients(&_FeeManager.TransactOpts, configDigest, rewardRecipientAndWeights) +} + +func (_FeeManager *FeeManagerTransactor) SetNativeSurcharge(opts *bind.TransactOpts, surcharge uint64) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "setNativeSurcharge", surcharge) +} + +func (_FeeManager *FeeManagerSession) SetNativeSurcharge(surcharge uint64) (*types.Transaction, error) { + return _FeeManager.Contract.SetNativeSurcharge(&_FeeManager.TransactOpts, surcharge) +} + +func (_FeeManager *FeeManagerTransactorSession) SetNativeSurcharge(surcharge uint64) (*types.Transaction, error) { + return _FeeManager.Contract.SetNativeSurcharge(&_FeeManager.TransactOpts, surcharge) +} + +func (_FeeManager *FeeManagerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "transferOwnership", to) +} + +func (_FeeManager *FeeManagerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.TransferOwnership(&_FeeManager.TransactOpts, to) +} + +func (_FeeManager *FeeManagerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _FeeManager.Contract.TransferOwnership(&_FeeManager.TransactOpts, to) +} + +func (_FeeManager *FeeManagerTransactor) UpdateSubscriberDiscount(opts *bind.TransactOpts, subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "updateSubscriberDiscount", subscriber, feedId, token, discount) +} + +func (_FeeManager *FeeManagerSession) UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) { + return _FeeManager.Contract.UpdateSubscriberDiscount(&_FeeManager.TransactOpts, subscriber, feedId, token, discount) +} + +func (_FeeManager *FeeManagerTransactorSession) UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) { + return _FeeManager.Contract.UpdateSubscriberDiscount(&_FeeManager.TransactOpts, subscriber, feedId, token, discount) +} + +func (_FeeManager *FeeManagerTransactor) Withdraw(opts *bind.TransactOpts, assetAddress common.Address, recipient common.Address, quantity *big.Int) (*types.Transaction, error) { + return _FeeManager.contract.Transact(opts, "withdraw", assetAddress, recipient, quantity) +} + +func (_FeeManager *FeeManagerSession) Withdraw(assetAddress common.Address, recipient common.Address, quantity *big.Int) (*types.Transaction, error) { + return _FeeManager.Contract.Withdraw(&_FeeManager.TransactOpts, assetAddress, recipient, quantity) +} + +func (_FeeManager *FeeManagerTransactorSession) Withdraw(assetAddress common.Address, recipient common.Address, quantity *big.Int) (*types.Transaction, error) { + return _FeeManager.Contract.Withdraw(&_FeeManager.TransactOpts, assetAddress, recipient, quantity) +} + +type FeeManagerDiscountAppliedIterator struct { + Event *FeeManagerDiscountApplied + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerDiscountAppliedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerDiscountApplied) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerDiscountApplied) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerDiscountAppliedIterator) Error() error { + return it.fail +} + +func (it *FeeManagerDiscountAppliedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerDiscountApplied struct { + ConfigDigest [32]byte + Subscriber common.Address + Fee CommonAsset + Reward CommonAsset + AppliedDiscount *big.Int + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterDiscountApplied(opts *bind.FilterOpts, configDigest [][32]byte, subscriber []common.Address) (*FeeManagerDiscountAppliedIterator, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "DiscountApplied", configDigestRule, subscriberRule) + if err != nil { + return nil, err + } + return &FeeManagerDiscountAppliedIterator{contract: _FeeManager.contract, event: "DiscountApplied", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchDiscountApplied(opts *bind.WatchOpts, sink chan<- *FeeManagerDiscountApplied, configDigest [][32]byte, subscriber []common.Address) (event.Subscription, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "DiscountApplied", configDigestRule, subscriberRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerDiscountApplied) + if err := _FeeManager.contract.UnpackLog(event, "DiscountApplied", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseDiscountApplied(log types.Log) (*FeeManagerDiscountApplied, error) { + event := new(FeeManagerDiscountApplied) + if err := _FeeManager.contract.UnpackLog(event, "DiscountApplied", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerInsufficientLinkIterator struct { + Event *FeeManagerInsufficientLink + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerInsufficientLinkIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerInsufficientLink) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerInsufficientLink) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerInsufficientLinkIterator) Error() error { + return it.fail +} + +func (it *FeeManagerInsufficientLinkIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerInsufficientLink struct { + Rewards []IRewardManagerFeePayment + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterInsufficientLink(opts *bind.FilterOpts) (*FeeManagerInsufficientLinkIterator, error) { + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "InsufficientLink") + if err != nil { + return nil, err + } + return &FeeManagerInsufficientLinkIterator{contract: _FeeManager.contract, event: "InsufficientLink", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchInsufficientLink(opts *bind.WatchOpts, sink chan<- *FeeManagerInsufficientLink) (event.Subscription, error) { + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "InsufficientLink") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerInsufficientLink) + if err := _FeeManager.contract.UnpackLog(event, "InsufficientLink", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseInsufficientLink(log types.Log) (*FeeManagerInsufficientLink, error) { + event := new(FeeManagerInsufficientLink) + if err := _FeeManager.contract.UnpackLog(event, "InsufficientLink", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerLinkDeficitClearedIterator struct { + Event *FeeManagerLinkDeficitCleared + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerLinkDeficitClearedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerLinkDeficitCleared) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerLinkDeficitCleared) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerLinkDeficitClearedIterator) Error() error { + return it.fail +} + +func (it *FeeManagerLinkDeficitClearedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerLinkDeficitCleared struct { + ConfigDigest [32]byte + LinkQuantity *big.Int + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterLinkDeficitCleared(opts *bind.FilterOpts, configDigest [][32]byte) (*FeeManagerLinkDeficitClearedIterator, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "LinkDeficitCleared", configDigestRule) + if err != nil { + return nil, err + } + return &FeeManagerLinkDeficitClearedIterator{contract: _FeeManager.contract, event: "LinkDeficitCleared", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchLinkDeficitCleared(opts *bind.WatchOpts, sink chan<- *FeeManagerLinkDeficitCleared, configDigest [][32]byte) (event.Subscription, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "LinkDeficitCleared", configDigestRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerLinkDeficitCleared) + if err := _FeeManager.contract.UnpackLog(event, "LinkDeficitCleared", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseLinkDeficitCleared(log types.Log) (*FeeManagerLinkDeficitCleared, error) { + event := new(FeeManagerLinkDeficitCleared) + if err := _FeeManager.contract.UnpackLog(event, "LinkDeficitCleared", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerNativeSurchargeUpdatedIterator struct { + Event *FeeManagerNativeSurchargeUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerNativeSurchargeUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerNativeSurchargeUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerNativeSurchargeUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerNativeSurchargeUpdatedIterator) Error() error { + return it.fail +} + +func (it *FeeManagerNativeSurchargeUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerNativeSurchargeUpdated struct { + NewSurcharge uint64 + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterNativeSurchargeUpdated(opts *bind.FilterOpts) (*FeeManagerNativeSurchargeUpdatedIterator, error) { + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "NativeSurchargeUpdated") + if err != nil { + return nil, err + } + return &FeeManagerNativeSurchargeUpdatedIterator{contract: _FeeManager.contract, event: "NativeSurchargeUpdated", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchNativeSurchargeUpdated(opts *bind.WatchOpts, sink chan<- *FeeManagerNativeSurchargeUpdated) (event.Subscription, error) { + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "NativeSurchargeUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerNativeSurchargeUpdated) + if err := _FeeManager.contract.UnpackLog(event, "NativeSurchargeUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseNativeSurchargeUpdated(log types.Log) (*FeeManagerNativeSurchargeUpdated, error) { + event := new(FeeManagerNativeSurchargeUpdated) + if err := _FeeManager.contract.UnpackLog(event, "NativeSurchargeUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerOwnershipTransferRequestedIterator struct { + Event *FeeManagerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *FeeManagerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FeeManagerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &FeeManagerOwnershipTransferRequestedIterator{contract: _FeeManager.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FeeManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerOwnershipTransferRequested) + if err := _FeeManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseOwnershipTransferRequested(log types.Log) (*FeeManagerOwnershipTransferRequested, error) { + event := new(FeeManagerOwnershipTransferRequested) + if err := _FeeManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerOwnershipTransferredIterator struct { + Event *FeeManagerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *FeeManagerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FeeManagerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &FeeManagerOwnershipTransferredIterator{contract: _FeeManager.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FeeManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerOwnershipTransferred) + if err := _FeeManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseOwnershipTransferred(log types.Log) (*FeeManagerOwnershipTransferred, error) { + event := new(FeeManagerOwnershipTransferred) + if err := _FeeManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerSubscriberDiscountUpdatedIterator struct { + Event *FeeManagerSubscriberDiscountUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerSubscriberDiscountUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerSubscriberDiscountUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerSubscriberDiscountUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerSubscriberDiscountUpdatedIterator) Error() error { + return it.fail +} + +func (it *FeeManagerSubscriberDiscountUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerSubscriberDiscountUpdated struct { + Subscriber common.Address + FeedId [32]byte + Token common.Address + Discount uint64 + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterSubscriberDiscountUpdated(opts *bind.FilterOpts, subscriber []common.Address, feedId [][32]byte) (*FeeManagerSubscriberDiscountUpdatedIterator, error) { + + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "SubscriberDiscountUpdated", subscriberRule, feedIdRule) + if err != nil { + return nil, err + } + return &FeeManagerSubscriberDiscountUpdatedIterator{contract: _FeeManager.contract, event: "SubscriberDiscountUpdated", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchSubscriberDiscountUpdated(opts *bind.WatchOpts, sink chan<- *FeeManagerSubscriberDiscountUpdated, subscriber []common.Address, feedId [][32]byte) (event.Subscription, error) { + + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "SubscriberDiscountUpdated", subscriberRule, feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerSubscriberDiscountUpdated) + if err := _FeeManager.contract.UnpackLog(event, "SubscriberDiscountUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseSubscriberDiscountUpdated(log types.Log) (*FeeManagerSubscriberDiscountUpdated, error) { + event := new(FeeManagerSubscriberDiscountUpdated) + if err := _FeeManager.contract.UnpackLog(event, "SubscriberDiscountUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type FeeManagerWithdrawIterator struct { + Event *FeeManagerWithdraw + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *FeeManagerWithdrawIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(FeeManagerWithdraw) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(FeeManagerWithdraw) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *FeeManagerWithdrawIterator) Error() error { + return it.fail +} + +func (it *FeeManagerWithdrawIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type FeeManagerWithdraw struct { + AdminAddress common.Address + Recipient common.Address + AssetAddress common.Address + Quantity *big.Int + Raw types.Log +} + +func (_FeeManager *FeeManagerFilterer) FilterWithdraw(opts *bind.FilterOpts) (*FeeManagerWithdrawIterator, error) { + + logs, sub, err := _FeeManager.contract.FilterLogs(opts, "Withdraw") + if err != nil { + return nil, err + } + return &FeeManagerWithdrawIterator{contract: _FeeManager.contract, event: "Withdraw", logs: logs, sub: sub}, nil +} + +func (_FeeManager *FeeManagerFilterer) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *FeeManagerWithdraw) (event.Subscription, error) { + + logs, sub, err := _FeeManager.contract.WatchLogs(opts, "Withdraw") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(FeeManagerWithdraw) + if err := _FeeManager.contract.UnpackLog(event, "Withdraw", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_FeeManager *FeeManagerFilterer) ParseWithdraw(log types.Log) (*FeeManagerWithdraw, error) { + event := new(FeeManagerWithdraw) + if err := _FeeManager.contract.UnpackLog(event, "Withdraw", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_FeeManager *FeeManager) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _FeeManager.abi.Events["DiscountApplied"].ID: + return _FeeManager.ParseDiscountApplied(log) + case _FeeManager.abi.Events["InsufficientLink"].ID: + return _FeeManager.ParseInsufficientLink(log) + case _FeeManager.abi.Events["LinkDeficitCleared"].ID: + return _FeeManager.ParseLinkDeficitCleared(log) + case _FeeManager.abi.Events["NativeSurchargeUpdated"].ID: + return _FeeManager.ParseNativeSurchargeUpdated(log) + case _FeeManager.abi.Events["OwnershipTransferRequested"].ID: + return _FeeManager.ParseOwnershipTransferRequested(log) + case _FeeManager.abi.Events["OwnershipTransferred"].ID: + return _FeeManager.ParseOwnershipTransferred(log) + case _FeeManager.abi.Events["SubscriberDiscountUpdated"].ID: + return _FeeManager.ParseSubscriberDiscountUpdated(log) + case _FeeManager.abi.Events["Withdraw"].ID: + return _FeeManager.ParseWithdraw(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (FeeManagerDiscountApplied) Topic() common.Hash { + return common.HexToHash("0x88b15eb682210089cddf967648e2cb2a4535aeadc8f8f36050922e33c04e7125") +} + +func (FeeManagerInsufficientLink) Topic() common.Hash { + return common.HexToHash("0xf52e5907b69d97c33392936c12d78b494463b78c5b72df50b4c497eee5720b67") +} + +func (FeeManagerLinkDeficitCleared) Topic() common.Hash { + return common.HexToHash("0x843f0b103e50b42b08f9d30f12f961845a6d02623730872e24644899c0dd9895") +} + +func (FeeManagerNativeSurchargeUpdated) Topic() common.Hash { + return common.HexToHash("0x08f7c0d17932ddb8523bc06754d42ff19ebc77d76a8b9bfde02c28ab1ed3d639") +} + +func (FeeManagerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (FeeManagerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (FeeManagerSubscriberDiscountUpdated) Topic() common.Hash { + return common.HexToHash("0x5eba5a8afa39780f0f99b6cbeb95f3da6a7040ca00abd46bdc91a0a060134139") +} + +func (FeeManagerWithdraw) Topic() common.Hash { + return common.HexToHash("0x7ff78a71698bdb18dcca96f52ab25e0a1b146fb6a49adf8e6845299e49021f29") +} + +func (_FeeManager *FeeManager) Address() common.Address { + return _FeeManager.address +} + +type FeeManagerInterface interface { + GetFeeAndReward(opts *bind.CallOpts, subscriber common.Address, report []byte, quoteAddress common.Address) (CommonAsset, CommonAsset, *big.Int, error) + + ILinkAddress(opts *bind.CallOpts) (common.Address, error) + + INativeAddress(opts *bind.CallOpts) (common.Address, error) + + IProxyAddress(opts *bind.CallOpts) (common.Address, error) + + IRewardManager(opts *bind.CallOpts) (common.Address, error) + + LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SLinkDeficit(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) + + SNativeSurcharge(opts *bind.CallOpts) (*big.Int, error) + + SSubscriberDiscounts(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + PayLinkDeficit(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) + + ProcessFee(opts *bind.TransactOpts, payload []byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) + + ProcessFeeBulk(opts *bind.TransactOpts, payloads [][]byte, parameterPayload []byte, subscriber common.Address) (*types.Transaction, error) + + SetFeeRecipients(opts *bind.TransactOpts, configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + SetNativeSurcharge(opts *bind.TransactOpts, surcharge uint64) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateSubscriberDiscount(opts *bind.TransactOpts, subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, assetAddress common.Address, recipient common.Address, quantity *big.Int) (*types.Transaction, error) + + FilterDiscountApplied(opts *bind.FilterOpts, configDigest [][32]byte, subscriber []common.Address) (*FeeManagerDiscountAppliedIterator, error) + + WatchDiscountApplied(opts *bind.WatchOpts, sink chan<- *FeeManagerDiscountApplied, configDigest [][32]byte, subscriber []common.Address) (event.Subscription, error) + + ParseDiscountApplied(log types.Log) (*FeeManagerDiscountApplied, error) + + FilterInsufficientLink(opts *bind.FilterOpts) (*FeeManagerInsufficientLinkIterator, error) + + WatchInsufficientLink(opts *bind.WatchOpts, sink chan<- *FeeManagerInsufficientLink) (event.Subscription, error) + + ParseInsufficientLink(log types.Log) (*FeeManagerInsufficientLink, error) + + FilterLinkDeficitCleared(opts *bind.FilterOpts, configDigest [][32]byte) (*FeeManagerLinkDeficitClearedIterator, error) + + WatchLinkDeficitCleared(opts *bind.WatchOpts, sink chan<- *FeeManagerLinkDeficitCleared, configDigest [][32]byte) (event.Subscription, error) + + ParseLinkDeficitCleared(log types.Log) (*FeeManagerLinkDeficitCleared, error) + + FilterNativeSurchargeUpdated(opts *bind.FilterOpts) (*FeeManagerNativeSurchargeUpdatedIterator, error) + + WatchNativeSurchargeUpdated(opts *bind.WatchOpts, sink chan<- *FeeManagerNativeSurchargeUpdated) (event.Subscription, error) + + ParseNativeSurchargeUpdated(log types.Log) (*FeeManagerNativeSurchargeUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FeeManagerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *FeeManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*FeeManagerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*FeeManagerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *FeeManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*FeeManagerOwnershipTransferred, error) + + FilterSubscriberDiscountUpdated(opts *bind.FilterOpts, subscriber []common.Address, feedId [][32]byte) (*FeeManagerSubscriberDiscountUpdatedIterator, error) + + WatchSubscriberDiscountUpdated(opts *bind.WatchOpts, sink chan<- *FeeManagerSubscriberDiscountUpdated, subscriber []common.Address, feedId [][32]byte) (event.Subscription, error) + + ParseSubscriberDiscountUpdated(log types.Log) (*FeeManagerSubscriberDiscountUpdated, error) + + FilterWithdraw(opts *bind.FilterOpts) (*FeeManagerWithdrawIterator, error) + + WatchWithdraw(opts *bind.WatchOpts, sink chan<- *FeeManagerWithdraw) (event.Subscription, error) + + ParseWithdraw(log types.Log) (*FeeManagerWithdraw, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/llo_feeds/llo_feeds.go b/core/gethwrappers/llo-feeds/generated/llo_feeds/llo_feeds.go new file mode 100644 index 00000000..a3a47222 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/llo_feeds/llo_feeds.go @@ -0,0 +1,1318 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package llo_feeds + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight *big.Int +} + +type CommonAsset struct { + AssetAddress common.Address + Amount *big.Int +} + +type IFeeManagerQuote struct { + QuoteAddress common.Address +} + +var LLOFeeManagerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_linkAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_nativeAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_proxyAddress\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_rewardManagerAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"ExpiredReport\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDeposit\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDiscount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidQuote\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidSurcharge\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidToken\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"linkQuantity\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nativeQuantity\",\"type\":\"uint256\"}],\"name\":\"InsufficientLink\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newSurcharge\",\"type\":\"uint256\"}],\"name\":\"NativeSurchargeSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"discount\",\"type\":\"uint256\"}],\"name\":\"SubscriberDiscountUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"adminAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"quantity\",\"type\":\"uint256\"}],\"name\":\"Withdraw\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"quoteAddress\",\"type\":\"address\"}],\"internalType\":\"structIFeeManager.Quote\",\"name\":\"quote\",\"type\":\"tuple\"}],\"name\":\"getFeeAndReward\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"structCommon.Asset\",\"name\":\"\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"internalType\":\"structCommon.Asset\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkAvailableForPayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nativeSurcharge\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"}],\"name\":\"processFee\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"weight\",\"type\":\"uint256\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"rewardRecipientAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setFeeRecipients\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"surcharge\",\"type\":\"uint256\"}],\"name\":\"setNativeSurcharge\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"subscriberDiscounts\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"subscriber\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"token\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"discount\",\"type\":\"uint256\"}],\"name\":\"updateSubscriberDiscount\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"assetAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"quantity\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101006040523480156200001257600080fd5b50604051620024d4380380620024d4833981016040819052620000359162000288565b33806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620001c0565b5050506001600160a01b0384161580620000e057506001600160a01b038316155b80620000f357506001600160a01b038216155b806200010657506001600160a01b038116155b15620001255760405163e6c4247b60e01b815260040160405180910390fd5b6001600160a01b03848116608081905284821660a05283821660c05290821660e081905260405163095ea7b360e01b81526004810191909152600019602482015263095ea7b3906044016020604051808303816000875af11580156200018f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190620001b59190620002e5565b505050505062000310565b336001600160a01b038216036200021a5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b03811681146200028357600080fd5b919050565b600080600080608085870312156200029f57600080fd5b620002aa856200026b565b9350620002ba602086016200026b565b9250620002ca604086016200026b565b9150620002da606086016200026b565b905092959194509250565b600060208284031215620002f857600080fd5b815180151581146200030957600080fd5b9392505050565b60805160a05160c05160e051612128620003ac6000396000818161044c01528181610e5401526110b20152600081816103820152610b1b0152600081816106af015281816107020152818161090f01528181610c6901528181610d3001526112170152600081816106d40152818161075d0152818161089a01528181610a5d01528181610def01528181610fa301526111c001526121286000f3fe6080604052600436106100dd5760003560e01c806393798be71161007f578063f1387e1611610059578063f1387e16146102d6578063f237f1a8146102e9578063f2fde38b14610309578063f3fef3a31461032957600080fd5b806393798be714610255578063c541cbde14610293578063d09dc339146102c157600080fd5b80636b54d8a6116100bb5780636b54d8a6146101c757806379ba5097146101e75780637d75cc49146101fc5780638da5cb5b1461022057600080fd5b806301ffc9a7146100e2578063181f5a771461015957806369fd2b34146101a5575b600080fd5b3480156100ee57600080fd5b506101446100fd36600461167b565b7fffffffff00000000000000000000000000000000000000000000000000000000167ff1387e16000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b34801561016557600080fd5b50604080518082018252601081527f4665654d616e6167657220302e302e31000000000000000000000000000000006020820152905161015091906116c4565b3480156101b157600080fd5b506101c56101c0366004611730565b610349565b005b3480156101d357600080fd5b506101c56101e23660046117af565b6104bc565b3480156101f357600080fd5b506101c5610541565b34801561020857600080fd5b5061021260035481565b604051908152602001610150565b34801561022c57600080fd5b5060005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610150565b34801561026157600080fd5b506102126102703660046117ea565b600260209081526000938452604080852082529284528284209052825290205481565b34801561029f57600080fd5b506102b36102ae366004611961565b61063e565b604051610150929190611a02565b3480156102cd57600080fd5b50610212610a2c565b6101c56102e4366004611a56565b610ae2565b3480156102f557600080fd5b506101c5610304366004611ace565b611174565b34801561031557600080fd5b506101c5610324366004611b16565b61131b565b34801561033557600080fd5b506101c5610344366004611b33565b61132f565b60005473ffffffffffffffffffffffffffffffffffffffff163314806103a457503373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016145b61040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4f6e6c79206f776e6572206f722070726f78790000000000000000000000000060448201526064015b60405180910390fd5b6040517f633b5f6e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063633b5f6e9061048590869086908690600401611b5f565b600060405180830381600087803b15801561049f57600080fd5b505af11580156104b3573d6000803e3d6000fd5b50505050505050565b6104c46114c9565b670de0b6b3a7640000811115610506576040517f05e8ac2900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60038190556040518181527f4fbcc2b7f4cb5518be923bd7c7c887e29e07b001e2a4a0fdd47c494696d8a1479060200160405180910390a150565b60015473ffffffffffffffffffffffffffffffffffffffff1633146105c2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610406565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60408051808201909152600080825260208201526040805180820190915260008082526020820152604080518082019091526000808252602082015260408051808201909152600080825260208201528551610120106107005773ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811683527f00000000000000000000000000000000000000000000000000000000000000001681529092509050610a24565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16856000015173ffffffffffffffffffffffffffffffffffffffff16141580156107b057507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16856000015173ffffffffffffffffffffffffffffffffffffffff1614155b156107e7576040517ff861803000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000806000888060200190518101906108009190611c39565b63ffffffff169b5077ffffffffffffffffffffffffffffffffffffffffffffffff169b5077ffffffffffffffffffffffffffffffffffffffffffffffff169b5050505050505050505042811015610883576040517fb6c405f500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000008116808652602086018590528951909116036108f857835173ffffffffffffffffffffffffffffffffffffffff16855260208085015190860152610968565b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001685526003546109629061094a90670de0b6b3a7640000611d36565b6109549084611d49565b670de0b6b3a764000061154c565b60208601525b60006109738a611d86565b73ffffffffffffffffffffffffffffffffffffffff808d16600090815260026020908152604080832085845282528083208e519094168352928152919020549088015191925090670de0b6b3a7640000906109cf908390611d49565b6109d99190611dcb565b87602001516109e89190611e06565b876020018181525050610a048187602001516109549190611d49565b8660200151610a139190611e06565b602087015250949650929450505050505b935093915050565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015610ab9573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610add9190611e19565b905090565b60005473ffffffffffffffffffffffffffffffffffffffff16331480610b3d57503373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016145b610ba3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f4f6e6c79206f776e6572206f722070726f7879000000000000000000000000006044820152606401610406565b3073ffffffffffffffffffffffffffffffffffffffff821603610bf2576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000610c0083850185611ea0565b604080516020810190915260008152909250905081516101201015610c4c576000610c2d85870187611f6f565b9550505050505080806020019051810190610c489190612037565b9150505b600080610c5a33858561063e565b909250905060003415610dbd577f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16836000015173ffffffffffffffffffffffffffffffffffffffff1614610cf0576040517fb2e532de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3483602001511115610d2e576040517fb2e532de00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663d0e30db084602001516040518263ffffffff1660e01b81526004016000604051808303818588803b158015610d9a57600080fd5b505af1158015610dae573d6000803e3d6000fd5b50505050508260200151340390505b6000610dc9888a612065565b60208501519091501561111e57835173ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000008116911603610ec4576040517f84afb76e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906384afb76e90610e8d9084908b9088906004016120a1565b600060405180830381600087803b158015610ea757600080fd5b505af1158015610ebb573d6000803e3d6000fd5b5050505061111e565b34600003610f7557835160208501516040517f23b872dd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8a8116600483015230602483015260448201929092529116906323b872dd906064016020604051808303816000875af1158015610f4f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f7391906120f9565b505b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a0823190602401602060405180830381865afa158015610fff573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906110239190611e19565b836020015111156110755760208084015185820151604080519283529282015282917feb6f22018570d97db6df12dc94f202b4e2b2888a6a5d4bd179422c91b29dcdf7910160405180910390a261111e565b6040517f84afb76e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906384afb76e906110eb908490309088906004016120a1565b600060405180830381600087803b15801561110557600080fd5b505af1158015611119573d6000803e3d6000fd5b505050505b81156111695760405173ffffffffffffffffffffffffffffffffffffffff88169083156108fc029084906000818181858888f19350505050158015611167573d6000803e3d6000fd5b505b505050505050505050565b61117c6114c9565b670de0b6b3a76400008111156111be576040517f997ea36000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415801561126657507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b1561129d576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff84811660008181526002602090815260408083208884528252808320948716808452948252918290208590558151938452830184905285927f41eb9ccd292d5906dc1f0ec108bed3e2b966e3071e033df938f7215f6d30ca84910160405180910390a350505050565b6113236114c9565b61132c81611586565b50565b6113376114c9565b73ffffffffffffffffffffffffffffffffffffffff821661139d576000805460405173ffffffffffffffffffffffffffffffffffffffff9091169183156108fc02918491818181858888f19350505050158015611398573d6000803e3d6000fd5b505050565b8173ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6113d860005473ffffffffffffffffffffffffffffffffffffffff1690565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e084901b16815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602481018490526044016020604051808303816000875af115801561144a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061146e91906120f9565b506040805133815273ffffffffffffffffffffffffffffffffffffffff841660208201529081018290527f9b1bfa7fa9ee420a16e124f794c35ac9f90472acc99140eb2f6447c714cad8eb9060600160405180910390a15050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461154a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610406565b565b6000821561157a5781611560600185611e06565b61156a9190611dcb565b611575906001611d36565b61157d565b60005b90505b92915050565b3373ffffffffffffffffffffffffffffffffffffffff821603611605576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610406565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60006020828403121561168d57600080fd5b81357fffffffff00000000000000000000000000000000000000000000000000000000811681146116bd57600080fd5b9392505050565b600060208083528351808285015260005b818110156116f1578581018301518582016040015282016116d5565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b60008060006040848603121561174557600080fd5b83359250602084013567ffffffffffffffff8082111561176457600080fd5b818601915086601f83011261177857600080fd5b81358181111561178757600080fd5b8760208260061b850101111561179c57600080fd5b6020830194508093505050509250925092565b6000602082840312156117c157600080fd5b5035919050565b73ffffffffffffffffffffffffffffffffffffffff8116811461132c57600080fd5b6000806000606084860312156117ff57600080fd5b833561180a816117c8565b9250602084013591506040840135611821816117c8565b809150509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040516020810167ffffffffffffffff8111828210171561187e5761187e61182c565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156118cb576118cb61182c565b604052919050565b600082601f8301126118e457600080fd5b813567ffffffffffffffff8111156118fe576118fe61182c565b61192f60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611884565b81815284602083860101111561194457600080fd5b816020850160208301376000918101602001919091529392505050565b6000806000838503606081121561197757600080fd5b8435611982816117c8565b9350602085013567ffffffffffffffff81111561199e57600080fd5b6119aa878288016118d3565b93505060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc0820112156119dd57600080fd5b506119e661185b565b60408501356119f4816117c8565b815292959194509192509050565b825173ffffffffffffffffffffffffffffffffffffffff1681526020808401519082015260808101825173ffffffffffffffffffffffffffffffffffffffff166040830152602083015160608301526116bd565b600080600060408486031215611a6b57600080fd5b833567ffffffffffffffff80821115611a8357600080fd5b818601915086601f830112611a9757600080fd5b813581811115611aa657600080fd5b876020828501011115611ab857600080fd5b60209283019550935050840135611821816117c8565b60008060008060808587031215611ae457600080fd5b8435611aef816117c8565b9350602085013592506040850135611b06816117c8565b9396929550929360600135925050565b600060208284031215611b2857600080fd5b81356116bd816117c8565b60008060408385031215611b4657600080fd5b8235611b51816117c8565b946020939093013593505050565b8381526040602080830182905282820184905260009190859060608501845b87811015611bc1578335611b91816117c8565b73ffffffffffffffffffffffffffffffffffffffff16825283830135838301529284019290840190600101611b7e565b5098975050505050505050565b805163ffffffff81168114611be257600080fd5b919050565b8051601781900b8114611be257600080fd5b805167ffffffffffffffff81168114611be257600080fd5b805177ffffffffffffffffffffffffffffffffffffffffffffffff81168114611be257600080fd5b6000806000806000806000806000806000806101808d8f031215611c5c57600080fd5b8c519b50611c6c60208e01611bce565b9a50611c7a60408e01611be7565b9950611c8860608e01611be7565b9850611c9660808e01611be7565b9750611ca460a08e01611bf9565b965060c08d01519550611cb960e08e01611bf9565b9450611cc86101008e01611bf9565b9350611cd76101208e01611c11565b9250611ce66101408e01611c11565b9150611cf56101608e01611bce565b90509295989b509295989b509295989b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082018082111561158057611580611d07565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611d8157611d81611d07565b500290565b80516020808301519190811015611dc5577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b600082611e01577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b8181038181111561158057611580611d07565b600060208284031215611e2b57600080fd5b5051919050565b600082601f830112611e4357600080fd5b6040516060810181811067ffffffffffffffff82111715611e6657611e6661182c565b604052806060840185811115611e7b57600080fd5b845b81811015611e95578035835260209283019201611e7d565b509195945050505050565b60008060808385031215611eb357600080fd5b611ebd8484611e32565b9150606083013567ffffffffffffffff811115611ed957600080fd5b611ee5858286016118d3565b9150509250929050565b600082601f830112611f0057600080fd5b8135602067ffffffffffffffff821115611f1c57611f1c61182c565b8160051b611f2b828201611884565b9283528481018201928281019087851115611f4557600080fd5b83870192505b84831015611f6457823582529183019190830190611f4b565b979650505050505050565b6000806000806000806101008789031215611f8957600080fd5b611f938888611e32565b9550606087013567ffffffffffffffff80821115611fb057600080fd5b611fbc8a838b016118d3565b96506080890135915080821115611fd257600080fd5b611fde8a838b01611eef565b955060a0890135915080821115611ff457600080fd5b6120008a838b01611eef565b945060c0890135935060e089013591508082111561201d57600080fd5b5061202a89828a016118d3565b9150509295509295509295565b60006020828403121561204957600080fd5b61205161185b565b825161205c816117c8565b81529392505050565b80356020831015611580577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b1692915050565b83815273ffffffffffffffffffffffffffffffffffffffff83166020820152608081016120f16040830184805173ffffffffffffffffffffffffffffffffffffffff168252602090810151910152565b949350505050565b60006020828403121561210b57600080fd5b815180151581146116bd57600080fdfea164736f6c6343000810000a", +} + +var LLOFeeManagerABI = LLOFeeManagerMetaData.ABI + +var LLOFeeManagerBin = LLOFeeManagerMetaData.Bin + +func DeployLLOFeeManager(auth *bind.TransactOpts, backend bind.ContractBackend, _linkAddress common.Address, _nativeAddress common.Address, _proxyAddress common.Address, _rewardManagerAddress common.Address) (common.Address, *types.Transaction, *LLOFeeManager, error) { + parsed, err := LLOFeeManagerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LLOFeeManagerBin), backend, _linkAddress, _nativeAddress, _proxyAddress, _rewardManagerAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LLOFeeManager{LLOFeeManagerCaller: LLOFeeManagerCaller{contract: contract}, LLOFeeManagerTransactor: LLOFeeManagerTransactor{contract: contract}, LLOFeeManagerFilterer: LLOFeeManagerFilterer{contract: contract}}, nil +} + +type LLOFeeManager struct { + address common.Address + abi abi.ABI + LLOFeeManagerCaller + LLOFeeManagerTransactor + LLOFeeManagerFilterer +} + +type LLOFeeManagerCaller struct { + contract *bind.BoundContract +} + +type LLOFeeManagerTransactor struct { + contract *bind.BoundContract +} + +type LLOFeeManagerFilterer struct { + contract *bind.BoundContract +} + +type LLOFeeManagerSession struct { + Contract *LLOFeeManager + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LLOFeeManagerCallerSession struct { + Contract *LLOFeeManagerCaller + CallOpts bind.CallOpts +} + +type LLOFeeManagerTransactorSession struct { + Contract *LLOFeeManagerTransactor + TransactOpts bind.TransactOpts +} + +type LLOFeeManagerRaw struct { + Contract *LLOFeeManager +} + +type LLOFeeManagerCallerRaw struct { + Contract *LLOFeeManagerCaller +} + +type LLOFeeManagerTransactorRaw struct { + Contract *LLOFeeManagerTransactor +} + +func NewLLOFeeManager(address common.Address, backend bind.ContractBackend) (*LLOFeeManager, error) { + abi, err := abi.JSON(strings.NewReader(LLOFeeManagerABI)) + if err != nil { + return nil, err + } + contract, err := bindLLOFeeManager(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LLOFeeManager{address: address, abi: abi, LLOFeeManagerCaller: LLOFeeManagerCaller{contract: contract}, LLOFeeManagerTransactor: LLOFeeManagerTransactor{contract: contract}, LLOFeeManagerFilterer: LLOFeeManagerFilterer{contract: contract}}, nil +} + +func NewLLOFeeManagerCaller(address common.Address, caller bind.ContractCaller) (*LLOFeeManagerCaller, error) { + contract, err := bindLLOFeeManager(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LLOFeeManagerCaller{contract: contract}, nil +} + +func NewLLOFeeManagerTransactor(address common.Address, transactor bind.ContractTransactor) (*LLOFeeManagerTransactor, error) { + contract, err := bindLLOFeeManager(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LLOFeeManagerTransactor{contract: contract}, nil +} + +func NewLLOFeeManagerFilterer(address common.Address, filterer bind.ContractFilterer) (*LLOFeeManagerFilterer, error) { + contract, err := bindLLOFeeManager(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LLOFeeManagerFilterer{contract: contract}, nil +} + +func bindLLOFeeManager(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LLOFeeManagerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LLOFeeManager *LLOFeeManagerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOFeeManager.Contract.LLOFeeManagerCaller.contract.Call(opts, result, method, params...) +} + +func (_LLOFeeManager *LLOFeeManagerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOFeeManager.Contract.LLOFeeManagerTransactor.contract.Transfer(opts) +} + +func (_LLOFeeManager *LLOFeeManagerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOFeeManager.Contract.LLOFeeManagerTransactor.contract.Transact(opts, method, params...) +} + +func (_LLOFeeManager *LLOFeeManagerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOFeeManager.Contract.contract.Call(opts, result, method, params...) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOFeeManager.Contract.contract.Transfer(opts) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOFeeManager.Contract.contract.Transact(opts, method, params...) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) GetFeeAndReward(opts *bind.CallOpts, subscriber common.Address, report []byte, quote IFeeManagerQuote) (CommonAsset, CommonAsset, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "getFeeAndReward", subscriber, report, quote) + + if err != nil { + return *new(CommonAsset), *new(CommonAsset), err + } + + out0 := *abi.ConvertType(out[0], new(CommonAsset)).(*CommonAsset) + out1 := *abi.ConvertType(out[1], new(CommonAsset)).(*CommonAsset) + + return out0, out1, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) GetFeeAndReward(subscriber common.Address, report []byte, quote IFeeManagerQuote) (CommonAsset, CommonAsset, error) { + return _LLOFeeManager.Contract.GetFeeAndReward(&_LLOFeeManager.CallOpts, subscriber, report, quote) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) GetFeeAndReward(subscriber common.Address, report []byte, quote IFeeManagerQuote) (CommonAsset, CommonAsset, error) { + return _LLOFeeManager.Contract.GetFeeAndReward(&_LLOFeeManager.CallOpts, subscriber, report, quote) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "linkAvailableForPayment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) LinkAvailableForPayment() (*big.Int, error) { + return _LLOFeeManager.Contract.LinkAvailableForPayment(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) LinkAvailableForPayment() (*big.Int, error) { + return _LLOFeeManager.Contract.LinkAvailableForPayment(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) NativeSurcharge(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "nativeSurcharge") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) NativeSurcharge() (*big.Int, error) { + return _LLOFeeManager.Contract.NativeSurcharge(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) NativeSurcharge() (*big.Int, error) { + return _LLOFeeManager.Contract.NativeSurcharge(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) Owner() (common.Address, error) { + return _LLOFeeManager.Contract.Owner(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) Owner() (common.Address, error) { + return _LLOFeeManager.Contract.Owner(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) SubscriberDiscounts(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "subscriberDiscounts", arg0, arg1, arg2) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) SubscriberDiscounts(arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + return _LLOFeeManager.Contract.SubscriberDiscounts(&_LLOFeeManager.CallOpts, arg0, arg1, arg2) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) SubscriberDiscounts(arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) { + return _LLOFeeManager.Contract.SubscriberDiscounts(&_LLOFeeManager.CallOpts, arg0, arg1, arg2) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _LLOFeeManager.Contract.SupportsInterface(&_LLOFeeManager.CallOpts, interfaceId) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _LLOFeeManager.Contract.SupportsInterface(&_LLOFeeManager.CallOpts, interfaceId) +} + +func (_LLOFeeManager *LLOFeeManagerCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LLOFeeManager.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LLOFeeManager *LLOFeeManagerSession) TypeAndVersion() (string, error) { + return _LLOFeeManager.Contract.TypeAndVersion(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerCallerSession) TypeAndVersion() (string, error) { + return _LLOFeeManager.Contract.TypeAndVersion(&_LLOFeeManager.CallOpts) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "acceptOwnership") +} + +func (_LLOFeeManager *LLOFeeManagerSession) AcceptOwnership() (*types.Transaction, error) { + return _LLOFeeManager.Contract.AcceptOwnership(&_LLOFeeManager.TransactOpts) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _LLOFeeManager.Contract.AcceptOwnership(&_LLOFeeManager.TransactOpts) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) ProcessFee(opts *bind.TransactOpts, payload []byte, subscriber common.Address) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "processFee", payload, subscriber) +} + +func (_LLOFeeManager *LLOFeeManagerSession) ProcessFee(payload []byte, subscriber common.Address) (*types.Transaction, error) { + return _LLOFeeManager.Contract.ProcessFee(&_LLOFeeManager.TransactOpts, payload, subscriber) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) ProcessFee(payload []byte, subscriber common.Address) (*types.Transaction, error) { + return _LLOFeeManager.Contract.ProcessFee(&_LLOFeeManager.TransactOpts, payload, subscriber) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) SetFeeRecipients(opts *bind.TransactOpts, configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "setFeeRecipients", configDigest, rewardRecipientAndWeights) +} + +func (_LLOFeeManager *LLOFeeManagerSession) SetFeeRecipients(configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _LLOFeeManager.Contract.SetFeeRecipients(&_LLOFeeManager.TransactOpts, configDigest, rewardRecipientAndWeights) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) SetFeeRecipients(configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _LLOFeeManager.Contract.SetFeeRecipients(&_LLOFeeManager.TransactOpts, configDigest, rewardRecipientAndWeights) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) SetNativeSurcharge(opts *bind.TransactOpts, surcharge *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "setNativeSurcharge", surcharge) +} + +func (_LLOFeeManager *LLOFeeManagerSession) SetNativeSurcharge(surcharge *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.SetNativeSurcharge(&_LLOFeeManager.TransactOpts, surcharge) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) SetNativeSurcharge(surcharge *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.SetNativeSurcharge(&_LLOFeeManager.TransactOpts, surcharge) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "transferOwnership", to) +} + +func (_LLOFeeManager *LLOFeeManagerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LLOFeeManager.Contract.TransferOwnership(&_LLOFeeManager.TransactOpts, to) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LLOFeeManager.Contract.TransferOwnership(&_LLOFeeManager.TransactOpts, to) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) UpdateSubscriberDiscount(opts *bind.TransactOpts, subscriber common.Address, feedId [32]byte, token common.Address, discount *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "updateSubscriberDiscount", subscriber, feedId, token, discount) +} + +func (_LLOFeeManager *LLOFeeManagerSession) UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.UpdateSubscriberDiscount(&_LLOFeeManager.TransactOpts, subscriber, feedId, token, discount) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.UpdateSubscriberDiscount(&_LLOFeeManager.TransactOpts, subscriber, feedId, token, discount) +} + +func (_LLOFeeManager *LLOFeeManagerTransactor) Withdraw(opts *bind.TransactOpts, assetAddress common.Address, quantity *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.contract.Transact(opts, "withdraw", assetAddress, quantity) +} + +func (_LLOFeeManager *LLOFeeManagerSession) Withdraw(assetAddress common.Address, quantity *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.Withdraw(&_LLOFeeManager.TransactOpts, assetAddress, quantity) +} + +func (_LLOFeeManager *LLOFeeManagerTransactorSession) Withdraw(assetAddress common.Address, quantity *big.Int) (*types.Transaction, error) { + return _LLOFeeManager.Contract.Withdraw(&_LLOFeeManager.TransactOpts, assetAddress, quantity) +} + +type LLOFeeManagerInsufficientLinkIterator struct { + Event *LLOFeeManagerInsufficientLink + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerInsufficientLinkIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerInsufficientLink) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerInsufficientLink) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerInsufficientLinkIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerInsufficientLinkIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerInsufficientLink struct { + ConfigDigest [32]byte + LinkQuantity *big.Int + NativeQuantity *big.Int + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterInsufficientLink(opts *bind.FilterOpts, configDigest [][32]byte) (*LLOFeeManagerInsufficientLinkIterator, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "InsufficientLink", configDigestRule) + if err != nil { + return nil, err + } + return &LLOFeeManagerInsufficientLinkIterator{contract: _LLOFeeManager.contract, event: "InsufficientLink", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchInsufficientLink(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerInsufficientLink, configDigest [][32]byte) (event.Subscription, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "InsufficientLink", configDigestRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerInsufficientLink) + if err := _LLOFeeManager.contract.UnpackLog(event, "InsufficientLink", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseInsufficientLink(log types.Log) (*LLOFeeManagerInsufficientLink, error) { + event := new(LLOFeeManagerInsufficientLink) + if err := _LLOFeeManager.contract.UnpackLog(event, "InsufficientLink", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOFeeManagerNativeSurchargeSetIterator struct { + Event *LLOFeeManagerNativeSurchargeSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerNativeSurchargeSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerNativeSurchargeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerNativeSurchargeSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerNativeSurchargeSetIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerNativeSurchargeSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerNativeSurchargeSet struct { + NewSurcharge *big.Int + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterNativeSurchargeSet(opts *bind.FilterOpts) (*LLOFeeManagerNativeSurchargeSetIterator, error) { + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "NativeSurchargeSet") + if err != nil { + return nil, err + } + return &LLOFeeManagerNativeSurchargeSetIterator{contract: _LLOFeeManager.contract, event: "NativeSurchargeSet", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchNativeSurchargeSet(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerNativeSurchargeSet) (event.Subscription, error) { + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "NativeSurchargeSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerNativeSurchargeSet) + if err := _LLOFeeManager.contract.UnpackLog(event, "NativeSurchargeSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseNativeSurchargeSet(log types.Log) (*LLOFeeManagerNativeSurchargeSet, error) { + event := new(LLOFeeManagerNativeSurchargeSet) + if err := _LLOFeeManager.contract.UnpackLog(event, "NativeSurchargeSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOFeeManagerOwnershipTransferRequestedIterator struct { + Event *LLOFeeManagerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOFeeManagerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &LLOFeeManagerOwnershipTransferRequestedIterator{contract: _LLOFeeManager.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerOwnershipTransferRequested) + if err := _LLOFeeManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseOwnershipTransferRequested(log types.Log) (*LLOFeeManagerOwnershipTransferRequested, error) { + event := new(LLOFeeManagerOwnershipTransferRequested) + if err := _LLOFeeManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOFeeManagerOwnershipTransferredIterator struct { + Event *LLOFeeManagerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOFeeManagerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &LLOFeeManagerOwnershipTransferredIterator{contract: _LLOFeeManager.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerOwnershipTransferred) + if err := _LLOFeeManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseOwnershipTransferred(log types.Log) (*LLOFeeManagerOwnershipTransferred, error) { + event := new(LLOFeeManagerOwnershipTransferred) + if err := _LLOFeeManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOFeeManagerSubscriberDiscountUpdatedIterator struct { + Event *LLOFeeManagerSubscriberDiscountUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerSubscriberDiscountUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerSubscriberDiscountUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerSubscriberDiscountUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerSubscriberDiscountUpdatedIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerSubscriberDiscountUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerSubscriberDiscountUpdated struct { + Subscriber common.Address + FeedId [32]byte + Token common.Address + Discount *big.Int + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterSubscriberDiscountUpdated(opts *bind.FilterOpts, subscriber []common.Address, feedId [][32]byte) (*LLOFeeManagerSubscriberDiscountUpdatedIterator, error) { + + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "SubscriberDiscountUpdated", subscriberRule, feedIdRule) + if err != nil { + return nil, err + } + return &LLOFeeManagerSubscriberDiscountUpdatedIterator{contract: _LLOFeeManager.contract, event: "SubscriberDiscountUpdated", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchSubscriberDiscountUpdated(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerSubscriberDiscountUpdated, subscriber []common.Address, feedId [][32]byte) (event.Subscription, error) { + + var subscriberRule []interface{} + for _, subscriberItem := range subscriber { + subscriberRule = append(subscriberRule, subscriberItem) + } + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "SubscriberDiscountUpdated", subscriberRule, feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerSubscriberDiscountUpdated) + if err := _LLOFeeManager.contract.UnpackLog(event, "SubscriberDiscountUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseSubscriberDiscountUpdated(log types.Log) (*LLOFeeManagerSubscriberDiscountUpdated, error) { + event := new(LLOFeeManagerSubscriberDiscountUpdated) + if err := _LLOFeeManager.contract.UnpackLog(event, "SubscriberDiscountUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LLOFeeManagerWithdrawIterator struct { + Event *LLOFeeManagerWithdraw + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LLOFeeManagerWithdrawIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerWithdraw) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LLOFeeManagerWithdraw) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LLOFeeManagerWithdrawIterator) Error() error { + return it.fail +} + +func (it *LLOFeeManagerWithdrawIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LLOFeeManagerWithdraw struct { + AdminAddress common.Address + AssetAddress common.Address + Quantity *big.Int + Raw types.Log +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) FilterWithdraw(opts *bind.FilterOpts) (*LLOFeeManagerWithdrawIterator, error) { + + logs, sub, err := _LLOFeeManager.contract.FilterLogs(opts, "Withdraw") + if err != nil { + return nil, err + } + return &LLOFeeManagerWithdrawIterator{contract: _LLOFeeManager.contract, event: "Withdraw", logs: logs, sub: sub}, nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) WatchWithdraw(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerWithdraw) (event.Subscription, error) { + + logs, sub, err := _LLOFeeManager.contract.WatchLogs(opts, "Withdraw") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LLOFeeManagerWithdraw) + if err := _LLOFeeManager.contract.UnpackLog(event, "Withdraw", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LLOFeeManager *LLOFeeManagerFilterer) ParseWithdraw(log types.Log) (*LLOFeeManagerWithdraw, error) { + event := new(LLOFeeManagerWithdraw) + if err := _LLOFeeManager.contract.UnpackLog(event, "Withdraw", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LLOFeeManager *LLOFeeManager) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LLOFeeManager.abi.Events["InsufficientLink"].ID: + return _LLOFeeManager.ParseInsufficientLink(log) + case _LLOFeeManager.abi.Events["NativeSurchargeSet"].ID: + return _LLOFeeManager.ParseNativeSurchargeSet(log) + case _LLOFeeManager.abi.Events["OwnershipTransferRequested"].ID: + return _LLOFeeManager.ParseOwnershipTransferRequested(log) + case _LLOFeeManager.abi.Events["OwnershipTransferred"].ID: + return _LLOFeeManager.ParseOwnershipTransferred(log) + case _LLOFeeManager.abi.Events["SubscriberDiscountUpdated"].ID: + return _LLOFeeManager.ParseSubscriberDiscountUpdated(log) + case _LLOFeeManager.abi.Events["Withdraw"].ID: + return _LLOFeeManager.ParseWithdraw(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LLOFeeManagerInsufficientLink) Topic() common.Hash { + return common.HexToHash("0xeb6f22018570d97db6df12dc94f202b4e2b2888a6a5d4bd179422c91b29dcdf7") +} + +func (LLOFeeManagerNativeSurchargeSet) Topic() common.Hash { + return common.HexToHash("0x4fbcc2b7f4cb5518be923bd7c7c887e29e07b001e2a4a0fdd47c494696d8a147") +} + +func (LLOFeeManagerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (LLOFeeManagerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (LLOFeeManagerSubscriberDiscountUpdated) Topic() common.Hash { + return common.HexToHash("0x41eb9ccd292d5906dc1f0ec108bed3e2b966e3071e033df938f7215f6d30ca84") +} + +func (LLOFeeManagerWithdraw) Topic() common.Hash { + return common.HexToHash("0x9b1bfa7fa9ee420a16e124f794c35ac9f90472acc99140eb2f6447c714cad8eb") +} + +func (_LLOFeeManager *LLOFeeManager) Address() common.Address { + return _LLOFeeManager.address +} + +type LLOFeeManagerInterface interface { + GetFeeAndReward(opts *bind.CallOpts, subscriber common.Address, report []byte, quote IFeeManagerQuote) (CommonAsset, CommonAsset, error) + + LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) + + NativeSurcharge(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SubscriberDiscounts(opts *bind.CallOpts, arg0 common.Address, arg1 [32]byte, arg2 common.Address) (*big.Int, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ProcessFee(opts *bind.TransactOpts, payload []byte, subscriber common.Address) (*types.Transaction, error) + + SetFeeRecipients(opts *bind.TransactOpts, configDigest [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + SetNativeSurcharge(opts *bind.TransactOpts, surcharge *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateSubscriberDiscount(opts *bind.TransactOpts, subscriber common.Address, feedId [32]byte, token common.Address, discount *big.Int) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, assetAddress common.Address, quantity *big.Int) (*types.Transaction, error) + + FilterInsufficientLink(opts *bind.FilterOpts, configDigest [][32]byte) (*LLOFeeManagerInsufficientLinkIterator, error) + + WatchInsufficientLink(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerInsufficientLink, configDigest [][32]byte) (event.Subscription, error) + + ParseInsufficientLink(log types.Log) (*LLOFeeManagerInsufficientLink, error) + + FilterNativeSurchargeSet(opts *bind.FilterOpts) (*LLOFeeManagerNativeSurchargeSetIterator, error) + + WatchNativeSurchargeSet(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerNativeSurchargeSet) (event.Subscription, error) + + ParseNativeSurchargeSet(log types.Log) (*LLOFeeManagerNativeSurchargeSet, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOFeeManagerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*LLOFeeManagerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LLOFeeManagerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*LLOFeeManagerOwnershipTransferred, error) + + FilterSubscriberDiscountUpdated(opts *bind.FilterOpts, subscriber []common.Address, feedId [][32]byte) (*LLOFeeManagerSubscriberDiscountUpdatedIterator, error) + + WatchSubscriberDiscountUpdated(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerSubscriberDiscountUpdated, subscriber []common.Address, feedId [][32]byte) (event.Subscription, error) + + ParseSubscriberDiscountUpdated(log types.Log) (*LLOFeeManagerSubscriberDiscountUpdated, error) + + FilterWithdraw(opts *bind.FilterOpts) (*LLOFeeManagerWithdrawIterator, error) + + WatchWithdraw(opts *bind.WatchOpts, sink chan<- *LLOFeeManagerWithdraw) (event.Subscription, error) + + ParseWithdraw(log types.Log) (*LLOFeeManagerWithdraw, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/llo_feeds_test/llo_feeds_test.go b/core/gethwrappers/llo-feeds/generated/llo_feeds_test/llo_feeds_test.go new file mode 100644 index 00000000..51f0f6e4 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/llo_feeds_test/llo_feeds_test.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package llo_feeds_test + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LLOExposedVerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_feedId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_chainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_contractAddress\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"_configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"_offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_encodedConfig\",\"type\":\"bytes\"}],\"name\":\"exposedConfigDigestFromConfigData\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610696806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80630ebd702314610030575b600080fd5b61004361003e3660046103f7565b610055565b60405190815260200160405180910390f35b60006100a18c8c8c8c8c8c8c8c8c8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508e92508d91506100b19050565b9c9b505050505050505050505050565b6000808b8b8b8b8b8b8b8b8b8b6040516020016100d79a999897969594939291906105a7565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e06000000000000000000000000000000000000000000000000000000000000179150509a9950505050505050505050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461018357600080fd5b919050565b803567ffffffffffffffff8116811461018357600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610216576102166101a0565b604052919050565b600067ffffffffffffffff821115610238576102386101a0565b5060051b60200190565b600082601f83011261025357600080fd5b813560206102686102638361021e565b6101cf565b82815260059290921b8401810191818101908684111561028757600080fd5b8286015b848110156102a95761029c8161015f565b835291830191830161028b565b509695505050505050565b600082601f8301126102c557600080fd5b813560206102d56102638361021e565b82815260059290921b840181019181810190868411156102f457600080fd5b8286015b848110156102a957803583529183019183016102f8565b803560ff8116811461018357600080fd5b60008083601f84011261033257600080fd5b50813567ffffffffffffffff81111561034a57600080fd5b60208301915083602082850101111561036257600080fd5b9250929050565b600082601f83011261037a57600080fd5b813567ffffffffffffffff811115610394576103946101a0565b6103c560207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016101cf565b8181528460208386010111156103da57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060008060008060006101408c8e03121561041957600080fd5b8b359a5060208c0135995061043060408d0161015f565b985061043e60608d01610188565b975067ffffffffffffffff8060808e0135111561045a57600080fd5b61046a8e60808f01358f01610242565b97508060a08e0135111561047d57600080fd5b61048d8e60a08f01358f016102b4565b965061049b60c08e0161030f565b95508060e08e013511156104ae57600080fd5b6104be8e60e08f01358f01610320565b90955093506104d06101008e01610188565b9250806101208e013511156104e457600080fd5b506104f68d6101208e01358e01610369565b90509295989b509295989b9093969950565b600081518084526020808501945080840160005b838110156105385781518752958201959082019060010161051c565b509495945050505050565b6000815180845260005b818110156105695760208185018101518683018201520161054d565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8a815260208082018b905273ffffffffffffffffffffffffffffffffffffffff8a8116604084015267ffffffffffffffff8a1660608401526101406080840181905289519084018190526000926101608501928b820192855b8181101561061e578451831686529483019493830193600101610600565b505050505082810360a08401526106358189610508565b60ff881660c0850152905082810360e08401526106528187610543565b67ffffffffffffffff861661010085015290508281036101208401526106788185610543565b9d9c5050505050505050505050505056fea164736f6c6343000810000a", +} + +var LLOExposedVerifierABI = LLOExposedVerifierMetaData.ABI + +var LLOExposedVerifierBin = LLOExposedVerifierMetaData.Bin + +func DeployLLOExposedVerifier(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LLOExposedVerifier, error) { + parsed, err := LLOExposedVerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LLOExposedVerifierBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LLOExposedVerifier{LLOExposedVerifierCaller: LLOExposedVerifierCaller{contract: contract}, LLOExposedVerifierTransactor: LLOExposedVerifierTransactor{contract: contract}, LLOExposedVerifierFilterer: LLOExposedVerifierFilterer{contract: contract}}, nil +} + +type LLOExposedVerifier struct { + address common.Address + abi abi.ABI + LLOExposedVerifierCaller + LLOExposedVerifierTransactor + LLOExposedVerifierFilterer +} + +type LLOExposedVerifierCaller struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierTransactor struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierFilterer struct { + contract *bind.BoundContract +} + +type LLOExposedVerifierSession struct { + Contract *LLOExposedVerifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LLOExposedVerifierCallerSession struct { + Contract *LLOExposedVerifierCaller + CallOpts bind.CallOpts +} + +type LLOExposedVerifierTransactorSession struct { + Contract *LLOExposedVerifierTransactor + TransactOpts bind.TransactOpts +} + +type LLOExposedVerifierRaw struct { + Contract *LLOExposedVerifier +} + +type LLOExposedVerifierCallerRaw struct { + Contract *LLOExposedVerifierCaller +} + +type LLOExposedVerifierTransactorRaw struct { + Contract *LLOExposedVerifierTransactor +} + +func NewLLOExposedVerifier(address common.Address, backend bind.ContractBackend) (*LLOExposedVerifier, error) { + abi, err := abi.JSON(strings.NewReader(LLOExposedVerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindLLOExposedVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LLOExposedVerifier{address: address, abi: abi, LLOExposedVerifierCaller: LLOExposedVerifierCaller{contract: contract}, LLOExposedVerifierTransactor: LLOExposedVerifierTransactor{contract: contract}, LLOExposedVerifierFilterer: LLOExposedVerifierFilterer{contract: contract}}, nil +} + +func NewLLOExposedVerifierCaller(address common.Address, caller bind.ContractCaller) (*LLOExposedVerifierCaller, error) { + contract, err := bindLLOExposedVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LLOExposedVerifierCaller{contract: contract}, nil +} + +func NewLLOExposedVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*LLOExposedVerifierTransactor, error) { + contract, err := bindLLOExposedVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LLOExposedVerifierTransactor{contract: contract}, nil +} + +func NewLLOExposedVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*LLOExposedVerifierFilterer, error) { + contract, err := bindLLOExposedVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LLOExposedVerifierFilterer{contract: contract}, nil +} + +func bindLLOExposedVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LLOExposedVerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOExposedVerifier.Contract.LLOExposedVerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.LLOExposedVerifierTransactor.contract.Transfer(opts) +} + +func (_LLOExposedVerifier *LLOExposedVerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.LLOExposedVerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LLOExposedVerifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.contract.Transfer(opts) +} + +func (_LLOExposedVerifier *LLOExposedVerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LLOExposedVerifier.Contract.contract.Transact(opts, method, params...) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCaller) ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + var out []interface{} + err := _LLOExposedVerifier.contract.Call(opts, &out, "exposedConfigDigestFromConfigData", _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_LLOExposedVerifier *LLOExposedVerifierSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _LLOExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_LLOExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_LLOExposedVerifier *LLOExposedVerifierCallerSession) ExposedConfigDigestFromConfigData(_feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) { + return _LLOExposedVerifier.Contract.ExposedConfigDigestFromConfigData(&_LLOExposedVerifier.CallOpts, _feedId, _chainId, _contractAddress, _configCount, _signers, _offchainTransmitters, _f, _onchainConfig, _encodedConfigVersion, _encodedConfig) +} + +func (_LLOExposedVerifier *LLOExposedVerifier) Address() common.Address { + return _LLOExposedVerifier.address +} + +type LLOExposedVerifierInterface interface { + ExposedConfigDigestFromConfigData(opts *bind.CallOpts, _feedId [32]byte, _chainId *big.Int, _contractAddress common.Address, _configCount uint64, _signers []common.Address, _offchainTransmitters [][32]byte, _f uint8, _onchainConfig []byte, _encodedConfigVersion uint64, _encodedConfig []byte) ([32]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/reward_manager/reward_manager.go b/core/gethwrappers/llo-feeds/generated/reward_manager/reward_manager.go new file mode 100644 index 00000000..420db949 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/reward_manager/reward_manager.go @@ -0,0 +1,1420 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package reward_manager + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +type IRewardManagerFeePayment struct { + PoolId [32]byte + Amount *big.Int +} + +var RewardManagerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"linkAddress\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"InvalidAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPoolId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPoolLength\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidWeights\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Unauthorized\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newFeeManagerAddress\",\"type\":\"address\"}],\"name\":\"FeeManagerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"internalType\":\"uint192\",\"name\":\"amount\",\"type\":\"uint192\"}],\"indexed\":false,\"internalType\":\"structIRewardManager.FeePayment[]\",\"name\":\"payments\",\"type\":\"tuple[]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"payer\",\"type\":\"address\"}],\"name\":\"FeePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"indexed\":false,\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"newRewardRecipients\",\"type\":\"tuple[]\"}],\"name\":\"RewardRecipientsUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint192\",\"name\":\"quantity\",\"type\":\"uint192\"}],\"name\":\"RewardsClaimed\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"poolIds\",\"type\":\"bytes32[]\"}],\"name\":\"claimRewards\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"endIndex\",\"type\":\"uint256\"}],\"name\":\"getAvailableRewardPoolIds\",\"outputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"\",\"type\":\"bytes32[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_linkAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"internalType\":\"uint192\",\"name\":\"amount\",\"type\":\"uint192\"}],\"internalType\":\"structIRewardManager.FeePayment[]\",\"name\":\"payments\",\"type\":\"tuple[]\"},{\"internalType\":\"address\",\"name\":\"payer\",\"type\":\"address\"}],\"name\":\"onFeePaid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"internalType\":\"address[]\",\"name\":\"recipients\",\"type\":\"address[]\"}],\"name\":\"payRecipients\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_feeManagerAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_registeredPoolIds\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"s_rewardRecipientWeights\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"s_rewardRecipientWeightsSet\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"s_totalRewardRecipientFees\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"s_totalRewardRecipientFeesLastClaimedAmounts\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newFeeManagerAddress\",\"type\":\"address\"}],\"name\":\"setFeeManager\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"rewardRecipientAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setRewardRecipients\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"poolId\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"newRewardRecipients\",\"type\":\"tuple[]\"}],\"name\":\"updateRewardRecipients\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620020c2380380620020c28339810160408190526200003491620001a6565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000fb565b5050506001600160a01b038116620000e95760405163e6c4247b60e01b815260040160405180910390fd5b6001600160a01b0316608052620001d8565b336001600160a01b03821603620001555760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208284031215620001b957600080fd5b81516001600160a01b0381168114620001d157600080fd5b9392505050565b608051611ec062000202600039600081816103bd01528181610ce30152610f1e0152611ec06000f3fe608060405234801561001057600080fd5b50600436106101515760003560e01c80634d322084116100cd5780638da5cb5b11610081578063cd5f729211610066578063cd5f7292146103a5578063ea4b861b146103b8578063f2fde38b146103df57600080fd5b80638da5cb5b14610374578063b0d9fa191461039257600080fd5b806360122608116100b2578063601226081461031657806379ba5097146103415780638ac85a5c1461034957600080fd5b80634d322084146102e057806359256201146102f357600080fd5b8063276e7660116101245780634722647511610109578063472264751461029a578063472d35b9146102ba5780634944832f146102cd57600080fd5b8063276e76601461022757806339ee81e11461026c57600080fd5b806301ffc9a7146101565780630f3c34d1146101c057806314060f23146101d5578063181f5a77146101e8575b600080fd5b6101ab6101643660046117ac565b7fffffffff00000000000000000000000000000000000000000000000000000000167fb0d9fa19000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b6101d36101ce36600461186c565b6103f2565b005b6101d36101e336600461195e565b610400565b604080518082018252601381527f5265776172644d616e6167657220312e312e3000000000000000000000000000602082015290516101b791906119ce565b6007546102479073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101b7565b61028c61027a366004611a1f565b60026020526000908152604090205481565b6040519081526020016101b7565b6102ad6102a8366004611a61565b6105b6565b6040516101b79190611a94565b6101d36102c8366004611ad8565b610740565b6101d36102db36600461195e565b61080e565b6101d36102ee366004611afa565b610957565b6101ab610301366004611a1f565b60056020526000908152604090205460ff1681565b61028c610324366004611b79565b600360209081526000928352604080842090915290825290205481565b6101d3610a96565b61028c610357366004611b79565b600460209081526000928352604080842090915290825290205481565b60005473ffffffffffffffffffffffffffffffffffffffff16610247565b6101d36103a0366004611ba5565b610b98565b61028c6103b3366004611a1f565b610d4c565b6102477f000000000000000000000000000000000000000000000000000000000000000081565b6101d36103ed366004611ad8565b610d6d565b6103fc3382610d81565b5050565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590610440575060075473ffffffffffffffffffffffffffffffffffffffff163314155b15610477576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008190036104b2576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526005602052604090205460ff16156104fb576040517f0afa7ee800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6006805460018181019092557ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f01849055600084815260056020526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169091179055610577838383670de0b6b3a7640000610f4e565b827f8f668d6090683f98b3373a8b83d214da45737f7486cb7de554cc07b54e61cfe683836040516105a9929190611c11565b60405180910390a2505050565b60065460609060008184116105cb57836105cd565b815b905080851115610609576040517fa22caccc00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006106158683611ca8565b67ffffffffffffffff81111561062d5761062d6117ee565b604051908082528060200260200182016040528015610656578160200160208202803683370190505b5090506000865b838110156107335760006006828154811061067a5761067a611cbb565b600091825260208083209091015480835260048252604080842073ffffffffffffffffffffffffffffffffffffffff8f16855290925291205490915015610722576000818152600260209081526040808320546003835281842073ffffffffffffffffffffffffffffffffffffffff8f168552909252909120548114610720578185858060010196508151811061071357610713611cbb565b6020026020010181815250505b505b5061072c81611cea565b905061065d565b5090979650505050505050565b610748611125565b73ffffffffffffffffffffffffffffffffffffffff8116610795576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600780547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040519081527fe45f5e140399b0a7e12971ab020724b828fbed8ac408c420884dc7d1bbe506b49060200160405180910390a150565b610816611125565b60408051600180825281830190925260009160208083019080368337019050509050838160008151811061084c5761084c611cbb565b6020026020010181815250506000805b8381101561090957600085858381811061087857610878611cbb565b61088e9260206040909202019081019150611ad8565b600088815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff851684529091529020549091506108f28787858181106108d6576108d6611cbb565b6108ec9260206040909202019081019150611ad8565b86610d81565b5092909201915061090281611cea565b905061085c565b5061091685858584610f4e565b847f8f668d6090683f98b3373a8b83d214da45737f7486cb7de554cc07b54e61cfe68585604051610948929190611c11565b60405180910390a25050505050565b8261097760005473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141580156109c957506000818152600460209081526040808320338452909152902054155b15610a00576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604080516001808252818301909252600091602080830190803683370190505090508481600081518110610a3657610a36611cbb565b60200260200101818152505060005b83811015610a8e57610a7d858583818110610a6257610a62611cbb565b9050602002016020810190610a779190611ad8565b83610d81565b50610a8781611cea565b9050610a45565b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610b1c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60075473ffffffffffffffffffffffffffffffffffffffff163314610be9576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805b83811015610cc857848482818110610c0757610c07611cbb565b9050604002016020016020810190610c1f9190611d4a565b77ffffffffffffffffffffffffffffffffffffffffffffffff1660026000878785818110610c4f57610c4f611cbb565b6040908102929092013583525060208201929092520160002080549091019055848482818110610c8157610c81611cbb565b9050604002016020016020810190610c999190611d4a565b77ffffffffffffffffffffffffffffffffffffffffffffffff168201915080610cc190611cea565b9050610bed565b50610d0b73ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000168330846111a8565b7fa1cc025ea76bacce5d740ee4bc331899375dc2c5f2ab33933aaacbd9ba001b66848484604051610d3e93929190611d65565b60405180910390a150505050565b60068181548110610d5c57600080fd5b600091825260209091200154905081565b610d75611125565b610d7e8161128a565b50565b60008060005b8351811015610efd576000848281518110610da457610da4611cbb565b6020026020010151905060006002600083815260200190815260200160002054905080600003610dd5575050610eed565b600082815260036020908152604080832073ffffffffffffffffffffffffffffffffffffffff8b16808552908352818420548685526004845282852091855292528220549083039190670de0b6b3a764000090830204905080600003610e3e5750505050610eed565b600084815260036020908152604080832073ffffffffffffffffffffffffffffffffffffffff8d168085529252909120849055885196820196899087908110610e8957610e89611cbb565b60200260200101517f989969655bc1d593922527fe85d71347bb8e12fa423cc71f362dd8ef7cb10ef283604051610ee0919077ffffffffffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180910390a3505050505b610ef681611cea565b9050610d87565b508015610f4557610f4573ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016858361137f565b90505b92915050565b610fa98383808060200260200160405190810160405280939291908181526020016000905b82821015610f9f57610f9060408302860136819003810190611dec565b81526020019060010190610f73565b50505050506113da565b15610fe0576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000805b838110156110e457600085858381811061100057611000611cbb565b90506040020160200160208101906110189190611e47565b67ffffffffffffffff169050600086868481811061103857611038611cbb565b61104e9260206040909202019081019150611ad8565b905073ffffffffffffffffffffffffffffffffffffffff811661109d576040517fe6c4247b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600088815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff90941683529290522081905591909101906110dd81611cea565b9050610fe4565b5081811461111e576040517f84677ce800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146111a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610b13565b565b60405173ffffffffffffffffffffffffffffffffffffffff808516602483015283166044820152606481018290526112849085907f23b872dd00000000000000000000000000000000000000000000000000000000906084015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090931692909217909152611491565b50505050565b3373ffffffffffffffffffffffffffffffffffffffff821603611309576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610b13565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60405173ffffffffffffffffffffffffffffffffffffffff83166024820152604481018290526113d59084907fa9059cbb0000000000000000000000000000000000000000000000000000000090606401611202565b505050565b6000805b82518110156114885760006113f4826001611e62565b90505b835181101561147f5783818151811061141257611412611cbb565b60200260200101516000015173ffffffffffffffffffffffffffffffffffffffff1684838151811061144657611446611cbb565b60200260200101516000015173ffffffffffffffffffffffffffffffffffffffff1603611477575060019392505050565b6001016113f7565b506001016113de565b50600092915050565b60006114f3826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff1661159d9092919063ffffffff16565b8051909150156113d557808060200190518101906115119190611e75565b6113d5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f742073756363656564000000000000000000000000000000000000000000006064820152608401610b13565b60606115ac84846000856115b4565b949350505050565b606082471015611646576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c00000000000000000000000000000000000000000000000000006064820152608401610b13565b6000808673ffffffffffffffffffffffffffffffffffffffff16858760405161166f9190611e97565b60006040518083038185875af1925050503d80600081146116ac576040519150601f19603f3d011682016040523d82523d6000602084013e6116b1565b606091505b50915091506116c2878383876116cd565b979650505050505050565b6060831561176357825160000361175c5773ffffffffffffffffffffffffffffffffffffffff85163b61175c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610b13565b50816115ac565b6115ac83838151156117785781518083602001fd5b806040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b1391906119ce565b6000602082840312156117be57600080fd5b81357fffffffff0000000000000000000000000000000000000000000000000000000081168114610f4557600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611864576118646117ee565b604052919050565b6000602080838503121561187f57600080fd5b823567ffffffffffffffff8082111561189757600080fd5b818501915085601f8301126118ab57600080fd5b8135818111156118bd576118bd6117ee565b8060051b91506118ce84830161181d565b81815291830184019184810190888411156118e857600080fd5b938501935b83851015611906578435825293850193908501906118ed565b98975050505050505050565b60008083601f84011261192457600080fd5b50813567ffffffffffffffff81111561193c57600080fd5b6020830191508360208260061b850101111561195757600080fd5b9250929050565b60008060006040848603121561197357600080fd5b83359250602084013567ffffffffffffffff81111561199157600080fd5b61199d86828701611912565b9497909650939450505050565b60005b838110156119c55781810151838201526020016119ad565b50506000910152565b60208152600082518060208401526119ed8160408501602087016119aa565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b600060208284031215611a3157600080fd5b5035919050565b803573ffffffffffffffffffffffffffffffffffffffff81168114611a5c57600080fd5b919050565b600080600060608486031215611a7657600080fd5b611a7f84611a38565b95602085013595506040909401359392505050565b6020808252825182820181905260009190848201906040850190845b81811015611acc57835183529284019291840191600101611ab0565b50909695505050505050565b600060208284031215611aea57600080fd5b611af382611a38565b9392505050565b600080600060408486031215611b0f57600080fd5b83359250602084013567ffffffffffffffff80821115611b2e57600080fd5b818601915086601f830112611b4257600080fd5b813581811115611b5157600080fd5b8760208260051b8501011115611b6657600080fd5b6020830194508093505050509250925092565b60008060408385031215611b8c57600080fd5b82359150611b9c60208401611a38565b90509250929050565b600080600060408486031215611bba57600080fd5b833567ffffffffffffffff811115611bd157600080fd5b611bdd86828701611912565b9094509250611bf0905060208501611a38565b90509250925092565b803567ffffffffffffffff81168114611a5c57600080fd5b6020808252818101839052600090604080840186845b878110156107335773ffffffffffffffffffffffffffffffffffffffff611c4d83611a38565b16835267ffffffffffffffff611c64868401611bf9565b16838601529183019190830190600101611c27565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b81810381811115610f4857610f48611c79565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611d1b57611d1b611c79565b5060010190565b803577ffffffffffffffffffffffffffffffffffffffffffffffff81168114611a5c57600080fd5b600060208284031215611d5c57600080fd5b611af382611d22565b60408082528181018490526000908560608401835b87811015611dc15782358252602077ffffffffffffffffffffffffffffffffffffffffffffffff611dac828601611d22565b16908301529183019190830190600101611d7a565b5080935050505073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b600060408284031215611dfe57600080fd5b6040516040810181811067ffffffffffffffff82111715611e2157611e216117ee565b604052611e2d83611a38565b8152611e3b60208401611bf9565b60208201529392505050565b600060208284031215611e5957600080fd5b611af382611bf9565b80820180821115610f4857610f48611c79565b600060208284031215611e8757600080fd5b81518015158114610f4557600080fd5b60008251611ea98184602087016119aa565b919091019291505056fea164736f6c6343000813000a", +} + +var RewardManagerABI = RewardManagerMetaData.ABI + +var RewardManagerBin = RewardManagerMetaData.Bin + +func DeployRewardManager(auth *bind.TransactOpts, backend bind.ContractBackend, linkAddress common.Address) (common.Address, *types.Transaction, *RewardManager, error) { + parsed, err := RewardManagerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(RewardManagerBin), backend, linkAddress) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &RewardManager{address: address, abi: *parsed, RewardManagerCaller: RewardManagerCaller{contract: contract}, RewardManagerTransactor: RewardManagerTransactor{contract: contract}, RewardManagerFilterer: RewardManagerFilterer{contract: contract}}, nil +} + +type RewardManager struct { + address common.Address + abi abi.ABI + RewardManagerCaller + RewardManagerTransactor + RewardManagerFilterer +} + +type RewardManagerCaller struct { + contract *bind.BoundContract +} + +type RewardManagerTransactor struct { + contract *bind.BoundContract +} + +type RewardManagerFilterer struct { + contract *bind.BoundContract +} + +type RewardManagerSession struct { + Contract *RewardManager + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type RewardManagerCallerSession struct { + Contract *RewardManagerCaller + CallOpts bind.CallOpts +} + +type RewardManagerTransactorSession struct { + Contract *RewardManagerTransactor + TransactOpts bind.TransactOpts +} + +type RewardManagerRaw struct { + Contract *RewardManager +} + +type RewardManagerCallerRaw struct { + Contract *RewardManagerCaller +} + +type RewardManagerTransactorRaw struct { + Contract *RewardManagerTransactor +} + +func NewRewardManager(address common.Address, backend bind.ContractBackend) (*RewardManager, error) { + abi, err := abi.JSON(strings.NewReader(RewardManagerABI)) + if err != nil { + return nil, err + } + contract, err := bindRewardManager(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &RewardManager{address: address, abi: abi, RewardManagerCaller: RewardManagerCaller{contract: contract}, RewardManagerTransactor: RewardManagerTransactor{contract: contract}, RewardManagerFilterer: RewardManagerFilterer{contract: contract}}, nil +} + +func NewRewardManagerCaller(address common.Address, caller bind.ContractCaller) (*RewardManagerCaller, error) { + contract, err := bindRewardManager(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &RewardManagerCaller{contract: contract}, nil +} + +func NewRewardManagerTransactor(address common.Address, transactor bind.ContractTransactor) (*RewardManagerTransactor, error) { + contract, err := bindRewardManager(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &RewardManagerTransactor{contract: contract}, nil +} + +func NewRewardManagerFilterer(address common.Address, filterer bind.ContractFilterer) (*RewardManagerFilterer, error) { + contract, err := bindRewardManager(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &RewardManagerFilterer{contract: contract}, nil +} + +func bindRewardManager(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := RewardManagerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_RewardManager *RewardManagerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _RewardManager.Contract.RewardManagerCaller.contract.Call(opts, result, method, params...) +} + +func (_RewardManager *RewardManagerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _RewardManager.Contract.RewardManagerTransactor.contract.Transfer(opts) +} + +func (_RewardManager *RewardManagerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _RewardManager.Contract.RewardManagerTransactor.contract.Transact(opts, method, params...) +} + +func (_RewardManager *RewardManagerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _RewardManager.Contract.contract.Call(opts, result, method, params...) +} + +func (_RewardManager *RewardManagerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _RewardManager.Contract.contract.Transfer(opts) +} + +func (_RewardManager *RewardManagerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _RewardManager.Contract.contract.Transact(opts, method, params...) +} + +func (_RewardManager *RewardManagerCaller) GetAvailableRewardPoolIds(opts *bind.CallOpts, recipient common.Address, startIndex *big.Int, endIndex *big.Int) ([][32]byte, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "getAvailableRewardPoolIds", recipient, startIndex, endIndex) + + if err != nil { + return *new([][32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([][32]byte)).(*[][32]byte) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) GetAvailableRewardPoolIds(recipient common.Address, startIndex *big.Int, endIndex *big.Int) ([][32]byte, error) { + return _RewardManager.Contract.GetAvailableRewardPoolIds(&_RewardManager.CallOpts, recipient, startIndex, endIndex) +} + +func (_RewardManager *RewardManagerCallerSession) GetAvailableRewardPoolIds(recipient common.Address, startIndex *big.Int, endIndex *big.Int) ([][32]byte, error) { + return _RewardManager.Contract.GetAvailableRewardPoolIds(&_RewardManager.CallOpts, recipient, startIndex, endIndex) +} + +func (_RewardManager *RewardManagerCaller) ILinkAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "i_linkAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) ILinkAddress() (common.Address, error) { + return _RewardManager.Contract.ILinkAddress(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCallerSession) ILinkAddress() (common.Address, error) { + return _RewardManager.Contract.ILinkAddress(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) Owner() (common.Address, error) { + return _RewardManager.Contract.Owner(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCallerSession) Owner() (common.Address, error) { + return _RewardManager.Contract.Owner(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCaller) SFeeManagerAddress(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_feeManagerAddress") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) SFeeManagerAddress() (common.Address, error) { + return _RewardManager.Contract.SFeeManagerAddress(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCallerSession) SFeeManagerAddress() (common.Address, error) { + return _RewardManager.Contract.SFeeManagerAddress(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCaller) SRegisteredPoolIds(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_registeredPoolIds", arg0) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) SRegisteredPoolIds(arg0 *big.Int) ([32]byte, error) { + return _RewardManager.Contract.SRegisteredPoolIds(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCallerSession) SRegisteredPoolIds(arg0 *big.Int) ([32]byte, error) { + return _RewardManager.Contract.SRegisteredPoolIds(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCaller) SRewardRecipientWeights(opts *bind.CallOpts, arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_rewardRecipientWeights", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) SRewardRecipientWeights(arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + return _RewardManager.Contract.SRewardRecipientWeights(&_RewardManager.CallOpts, arg0, arg1) +} + +func (_RewardManager *RewardManagerCallerSession) SRewardRecipientWeights(arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + return _RewardManager.Contract.SRewardRecipientWeights(&_RewardManager.CallOpts, arg0, arg1) +} + +func (_RewardManager *RewardManagerCaller) SRewardRecipientWeightsSet(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_rewardRecipientWeightsSet", arg0) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) SRewardRecipientWeightsSet(arg0 [32]byte) (bool, error) { + return _RewardManager.Contract.SRewardRecipientWeightsSet(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCallerSession) SRewardRecipientWeightsSet(arg0 [32]byte) (bool, error) { + return _RewardManager.Contract.SRewardRecipientWeightsSet(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCaller) STotalRewardRecipientFees(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_totalRewardRecipientFees", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) STotalRewardRecipientFees(arg0 [32]byte) (*big.Int, error) { + return _RewardManager.Contract.STotalRewardRecipientFees(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCallerSession) STotalRewardRecipientFees(arg0 [32]byte) (*big.Int, error) { + return _RewardManager.Contract.STotalRewardRecipientFees(&_RewardManager.CallOpts, arg0) +} + +func (_RewardManager *RewardManagerCaller) STotalRewardRecipientFeesLastClaimedAmounts(opts *bind.CallOpts, arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "s_totalRewardRecipientFeesLastClaimedAmounts", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) STotalRewardRecipientFeesLastClaimedAmounts(arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + return _RewardManager.Contract.STotalRewardRecipientFeesLastClaimedAmounts(&_RewardManager.CallOpts, arg0, arg1) +} + +func (_RewardManager *RewardManagerCallerSession) STotalRewardRecipientFeesLastClaimedAmounts(arg0 [32]byte, arg1 common.Address) (*big.Int, error) { + return _RewardManager.Contract.STotalRewardRecipientFeesLastClaimedAmounts(&_RewardManager.CallOpts, arg0, arg1) +} + +func (_RewardManager *RewardManagerCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _RewardManager.Contract.SupportsInterface(&_RewardManager.CallOpts, interfaceId) +} + +func (_RewardManager *RewardManagerCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _RewardManager.Contract.SupportsInterface(&_RewardManager.CallOpts, interfaceId) +} + +func (_RewardManager *RewardManagerCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _RewardManager.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_RewardManager *RewardManagerSession) TypeAndVersion() (string, error) { + return _RewardManager.Contract.TypeAndVersion(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerCallerSession) TypeAndVersion() (string, error) { + return _RewardManager.Contract.TypeAndVersion(&_RewardManager.CallOpts) +} + +func (_RewardManager *RewardManagerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "acceptOwnership") +} + +func (_RewardManager *RewardManagerSession) AcceptOwnership() (*types.Transaction, error) { + return _RewardManager.Contract.AcceptOwnership(&_RewardManager.TransactOpts) +} + +func (_RewardManager *RewardManagerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _RewardManager.Contract.AcceptOwnership(&_RewardManager.TransactOpts) +} + +func (_RewardManager *RewardManagerTransactor) ClaimRewards(opts *bind.TransactOpts, poolIds [][32]byte) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "claimRewards", poolIds) +} + +func (_RewardManager *RewardManagerSession) ClaimRewards(poolIds [][32]byte) (*types.Transaction, error) { + return _RewardManager.Contract.ClaimRewards(&_RewardManager.TransactOpts, poolIds) +} + +func (_RewardManager *RewardManagerTransactorSession) ClaimRewards(poolIds [][32]byte) (*types.Transaction, error) { + return _RewardManager.Contract.ClaimRewards(&_RewardManager.TransactOpts, poolIds) +} + +func (_RewardManager *RewardManagerTransactor) OnFeePaid(opts *bind.TransactOpts, payments []IRewardManagerFeePayment, payer common.Address) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "onFeePaid", payments, payer) +} + +func (_RewardManager *RewardManagerSession) OnFeePaid(payments []IRewardManagerFeePayment, payer common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.OnFeePaid(&_RewardManager.TransactOpts, payments, payer) +} + +func (_RewardManager *RewardManagerTransactorSession) OnFeePaid(payments []IRewardManagerFeePayment, payer common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.OnFeePaid(&_RewardManager.TransactOpts, payments, payer) +} + +func (_RewardManager *RewardManagerTransactor) PayRecipients(opts *bind.TransactOpts, poolId [32]byte, recipients []common.Address) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "payRecipients", poolId, recipients) +} + +func (_RewardManager *RewardManagerSession) PayRecipients(poolId [32]byte, recipients []common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.PayRecipients(&_RewardManager.TransactOpts, poolId, recipients) +} + +func (_RewardManager *RewardManagerTransactorSession) PayRecipients(poolId [32]byte, recipients []common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.PayRecipients(&_RewardManager.TransactOpts, poolId, recipients) +} + +func (_RewardManager *RewardManagerTransactor) SetFeeManager(opts *bind.TransactOpts, newFeeManagerAddress common.Address) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "setFeeManager", newFeeManagerAddress) +} + +func (_RewardManager *RewardManagerSession) SetFeeManager(newFeeManagerAddress common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.SetFeeManager(&_RewardManager.TransactOpts, newFeeManagerAddress) +} + +func (_RewardManager *RewardManagerTransactorSession) SetFeeManager(newFeeManagerAddress common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.SetFeeManager(&_RewardManager.TransactOpts, newFeeManagerAddress) +} + +func (_RewardManager *RewardManagerTransactor) SetRewardRecipients(opts *bind.TransactOpts, poolId [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "setRewardRecipients", poolId, rewardRecipientAndWeights) +} + +func (_RewardManager *RewardManagerSession) SetRewardRecipients(poolId [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.Contract.SetRewardRecipients(&_RewardManager.TransactOpts, poolId, rewardRecipientAndWeights) +} + +func (_RewardManager *RewardManagerTransactorSession) SetRewardRecipients(poolId [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.Contract.SetRewardRecipients(&_RewardManager.TransactOpts, poolId, rewardRecipientAndWeights) +} + +func (_RewardManager *RewardManagerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "transferOwnership", to) +} + +func (_RewardManager *RewardManagerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.TransferOwnership(&_RewardManager.TransactOpts, to) +} + +func (_RewardManager *RewardManagerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _RewardManager.Contract.TransferOwnership(&_RewardManager.TransactOpts, to) +} + +func (_RewardManager *RewardManagerTransactor) UpdateRewardRecipients(opts *bind.TransactOpts, poolId [32]byte, newRewardRecipients []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.contract.Transact(opts, "updateRewardRecipients", poolId, newRewardRecipients) +} + +func (_RewardManager *RewardManagerSession) UpdateRewardRecipients(poolId [32]byte, newRewardRecipients []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.Contract.UpdateRewardRecipients(&_RewardManager.TransactOpts, poolId, newRewardRecipients) +} + +func (_RewardManager *RewardManagerTransactorSession) UpdateRewardRecipients(poolId [32]byte, newRewardRecipients []CommonAddressAndWeight) (*types.Transaction, error) { + return _RewardManager.Contract.UpdateRewardRecipients(&_RewardManager.TransactOpts, poolId, newRewardRecipients) +} + +type RewardManagerFeeManagerUpdatedIterator struct { + Event *RewardManagerFeeManagerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerFeeManagerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerFeeManagerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerFeeManagerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerFeeManagerUpdatedIterator) Error() error { + return it.fail +} + +func (it *RewardManagerFeeManagerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerFeeManagerUpdated struct { + NewFeeManagerAddress common.Address + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterFeeManagerUpdated(opts *bind.FilterOpts) (*RewardManagerFeeManagerUpdatedIterator, error) { + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "FeeManagerUpdated") + if err != nil { + return nil, err + } + return &RewardManagerFeeManagerUpdatedIterator{contract: _RewardManager.contract, event: "FeeManagerUpdated", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchFeeManagerUpdated(opts *bind.WatchOpts, sink chan<- *RewardManagerFeeManagerUpdated) (event.Subscription, error) { + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "FeeManagerUpdated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerFeeManagerUpdated) + if err := _RewardManager.contract.UnpackLog(event, "FeeManagerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseFeeManagerUpdated(log types.Log) (*RewardManagerFeeManagerUpdated, error) { + event := new(RewardManagerFeeManagerUpdated) + if err := _RewardManager.contract.UnpackLog(event, "FeeManagerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type RewardManagerFeePaidIterator struct { + Event *RewardManagerFeePaid + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerFeePaidIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerFeePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerFeePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerFeePaidIterator) Error() error { + return it.fail +} + +func (it *RewardManagerFeePaidIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerFeePaid struct { + Payments []IRewardManagerFeePayment + Payer common.Address + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterFeePaid(opts *bind.FilterOpts) (*RewardManagerFeePaidIterator, error) { + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "FeePaid") + if err != nil { + return nil, err + } + return &RewardManagerFeePaidIterator{contract: _RewardManager.contract, event: "FeePaid", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchFeePaid(opts *bind.WatchOpts, sink chan<- *RewardManagerFeePaid) (event.Subscription, error) { + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "FeePaid") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerFeePaid) + if err := _RewardManager.contract.UnpackLog(event, "FeePaid", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseFeePaid(log types.Log) (*RewardManagerFeePaid, error) { + event := new(RewardManagerFeePaid) + if err := _RewardManager.contract.UnpackLog(event, "FeePaid", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type RewardManagerOwnershipTransferRequestedIterator struct { + Event *RewardManagerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *RewardManagerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*RewardManagerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &RewardManagerOwnershipTransferRequestedIterator{contract: _RewardManager.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *RewardManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerOwnershipTransferRequested) + if err := _RewardManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseOwnershipTransferRequested(log types.Log) (*RewardManagerOwnershipTransferRequested, error) { + event := new(RewardManagerOwnershipTransferRequested) + if err := _RewardManager.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type RewardManagerOwnershipTransferredIterator struct { + Event *RewardManagerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *RewardManagerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*RewardManagerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &RewardManagerOwnershipTransferredIterator{contract: _RewardManager.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *RewardManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerOwnershipTransferred) + if err := _RewardManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseOwnershipTransferred(log types.Log) (*RewardManagerOwnershipTransferred, error) { + event := new(RewardManagerOwnershipTransferred) + if err := _RewardManager.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type RewardManagerRewardRecipientsUpdatedIterator struct { + Event *RewardManagerRewardRecipientsUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerRewardRecipientsUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerRewardRecipientsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerRewardRecipientsUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerRewardRecipientsUpdatedIterator) Error() error { + return it.fail +} + +func (it *RewardManagerRewardRecipientsUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerRewardRecipientsUpdated struct { + PoolId [32]byte + NewRewardRecipients []CommonAddressAndWeight + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterRewardRecipientsUpdated(opts *bind.FilterOpts, poolId [][32]byte) (*RewardManagerRewardRecipientsUpdatedIterator, error) { + + var poolIdRule []interface{} + for _, poolIdItem := range poolId { + poolIdRule = append(poolIdRule, poolIdItem) + } + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "RewardRecipientsUpdated", poolIdRule) + if err != nil { + return nil, err + } + return &RewardManagerRewardRecipientsUpdatedIterator{contract: _RewardManager.contract, event: "RewardRecipientsUpdated", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchRewardRecipientsUpdated(opts *bind.WatchOpts, sink chan<- *RewardManagerRewardRecipientsUpdated, poolId [][32]byte) (event.Subscription, error) { + + var poolIdRule []interface{} + for _, poolIdItem := range poolId { + poolIdRule = append(poolIdRule, poolIdItem) + } + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "RewardRecipientsUpdated", poolIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerRewardRecipientsUpdated) + if err := _RewardManager.contract.UnpackLog(event, "RewardRecipientsUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseRewardRecipientsUpdated(log types.Log) (*RewardManagerRewardRecipientsUpdated, error) { + event := new(RewardManagerRewardRecipientsUpdated) + if err := _RewardManager.contract.UnpackLog(event, "RewardRecipientsUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type RewardManagerRewardsClaimedIterator struct { + Event *RewardManagerRewardsClaimed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *RewardManagerRewardsClaimedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(RewardManagerRewardsClaimed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(RewardManagerRewardsClaimed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *RewardManagerRewardsClaimedIterator) Error() error { + return it.fail +} + +func (it *RewardManagerRewardsClaimedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type RewardManagerRewardsClaimed struct { + PoolId [32]byte + Recipient common.Address + Quantity *big.Int + Raw types.Log +} + +func (_RewardManager *RewardManagerFilterer) FilterRewardsClaimed(opts *bind.FilterOpts, poolId [][32]byte, recipient []common.Address) (*RewardManagerRewardsClaimedIterator, error) { + + var poolIdRule []interface{} + for _, poolIdItem := range poolId { + poolIdRule = append(poolIdRule, poolIdItem) + } + var recipientRule []interface{} + for _, recipientItem := range recipient { + recipientRule = append(recipientRule, recipientItem) + } + + logs, sub, err := _RewardManager.contract.FilterLogs(opts, "RewardsClaimed", poolIdRule, recipientRule) + if err != nil { + return nil, err + } + return &RewardManagerRewardsClaimedIterator{contract: _RewardManager.contract, event: "RewardsClaimed", logs: logs, sub: sub}, nil +} + +func (_RewardManager *RewardManagerFilterer) WatchRewardsClaimed(opts *bind.WatchOpts, sink chan<- *RewardManagerRewardsClaimed, poolId [][32]byte, recipient []common.Address) (event.Subscription, error) { + + var poolIdRule []interface{} + for _, poolIdItem := range poolId { + poolIdRule = append(poolIdRule, poolIdItem) + } + var recipientRule []interface{} + for _, recipientItem := range recipient { + recipientRule = append(recipientRule, recipientItem) + } + + logs, sub, err := _RewardManager.contract.WatchLogs(opts, "RewardsClaimed", poolIdRule, recipientRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(RewardManagerRewardsClaimed) + if err := _RewardManager.contract.UnpackLog(event, "RewardsClaimed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_RewardManager *RewardManagerFilterer) ParseRewardsClaimed(log types.Log) (*RewardManagerRewardsClaimed, error) { + event := new(RewardManagerRewardsClaimed) + if err := _RewardManager.contract.UnpackLog(event, "RewardsClaimed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_RewardManager *RewardManager) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _RewardManager.abi.Events["FeeManagerUpdated"].ID: + return _RewardManager.ParseFeeManagerUpdated(log) + case _RewardManager.abi.Events["FeePaid"].ID: + return _RewardManager.ParseFeePaid(log) + case _RewardManager.abi.Events["OwnershipTransferRequested"].ID: + return _RewardManager.ParseOwnershipTransferRequested(log) + case _RewardManager.abi.Events["OwnershipTransferred"].ID: + return _RewardManager.ParseOwnershipTransferred(log) + case _RewardManager.abi.Events["RewardRecipientsUpdated"].ID: + return _RewardManager.ParseRewardRecipientsUpdated(log) + case _RewardManager.abi.Events["RewardsClaimed"].ID: + return _RewardManager.ParseRewardsClaimed(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (RewardManagerFeeManagerUpdated) Topic() common.Hash { + return common.HexToHash("0xe45f5e140399b0a7e12971ab020724b828fbed8ac408c420884dc7d1bbe506b4") +} + +func (RewardManagerFeePaid) Topic() common.Hash { + return common.HexToHash("0xa1cc025ea76bacce5d740ee4bc331899375dc2c5f2ab33933aaacbd9ba001b66") +} + +func (RewardManagerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (RewardManagerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (RewardManagerRewardRecipientsUpdated) Topic() common.Hash { + return common.HexToHash("0x8f668d6090683f98b3373a8b83d214da45737f7486cb7de554cc07b54e61cfe6") +} + +func (RewardManagerRewardsClaimed) Topic() common.Hash { + return common.HexToHash("0x989969655bc1d593922527fe85d71347bb8e12fa423cc71f362dd8ef7cb10ef2") +} + +func (_RewardManager *RewardManager) Address() common.Address { + return _RewardManager.address +} + +type RewardManagerInterface interface { + GetAvailableRewardPoolIds(opts *bind.CallOpts, recipient common.Address, startIndex *big.Int, endIndex *big.Int) ([][32]byte, error) + + ILinkAddress(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SFeeManagerAddress(opts *bind.CallOpts) (common.Address, error) + + SRegisteredPoolIds(opts *bind.CallOpts, arg0 *big.Int) ([32]byte, error) + + SRewardRecipientWeights(opts *bind.CallOpts, arg0 [32]byte, arg1 common.Address) (*big.Int, error) + + SRewardRecipientWeightsSet(opts *bind.CallOpts, arg0 [32]byte) (bool, error) + + STotalRewardRecipientFees(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) + + STotalRewardRecipientFeesLastClaimedAmounts(opts *bind.CallOpts, arg0 [32]byte, arg1 common.Address) (*big.Int, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ClaimRewards(opts *bind.TransactOpts, poolIds [][32]byte) (*types.Transaction, error) + + OnFeePaid(opts *bind.TransactOpts, payments []IRewardManagerFeePayment, payer common.Address) (*types.Transaction, error) + + PayRecipients(opts *bind.TransactOpts, poolId [32]byte, recipients []common.Address) (*types.Transaction, error) + + SetFeeManager(opts *bind.TransactOpts, newFeeManagerAddress common.Address) (*types.Transaction, error) + + SetRewardRecipients(opts *bind.TransactOpts, poolId [32]byte, rewardRecipientAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UpdateRewardRecipients(opts *bind.TransactOpts, poolId [32]byte, newRewardRecipients []CommonAddressAndWeight) (*types.Transaction, error) + + FilterFeeManagerUpdated(opts *bind.FilterOpts) (*RewardManagerFeeManagerUpdatedIterator, error) + + WatchFeeManagerUpdated(opts *bind.WatchOpts, sink chan<- *RewardManagerFeeManagerUpdated) (event.Subscription, error) + + ParseFeeManagerUpdated(log types.Log) (*RewardManagerFeeManagerUpdated, error) + + FilterFeePaid(opts *bind.FilterOpts) (*RewardManagerFeePaidIterator, error) + + WatchFeePaid(opts *bind.WatchOpts, sink chan<- *RewardManagerFeePaid) (event.Subscription, error) + + ParseFeePaid(log types.Log) (*RewardManagerFeePaid, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*RewardManagerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *RewardManagerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*RewardManagerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*RewardManagerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *RewardManagerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*RewardManagerOwnershipTransferred, error) + + FilterRewardRecipientsUpdated(opts *bind.FilterOpts, poolId [][32]byte) (*RewardManagerRewardRecipientsUpdatedIterator, error) + + WatchRewardRecipientsUpdated(opts *bind.WatchOpts, sink chan<- *RewardManagerRewardRecipientsUpdated, poolId [][32]byte) (event.Subscription, error) + + ParseRewardRecipientsUpdated(log types.Log) (*RewardManagerRewardRecipientsUpdated, error) + + FilterRewardsClaimed(opts *bind.FilterOpts, poolId [][32]byte, recipient []common.Address) (*RewardManagerRewardsClaimedIterator, error) + + WatchRewardsClaimed(opts *bind.WatchOpts, sink chan<- *RewardManagerRewardsClaimed, poolId [][32]byte, recipient []common.Address) (event.Subscription, error) + + ParseRewardsClaimed(log types.Log) (*RewardManagerRewardsClaimed, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/verifier/verifier.go b/core/gethwrappers/llo-feeds/generated/verifier/verifier.go new file mode 100644 index 00000000..7ce275f9 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/verifier/verifier.go @@ -0,0 +1,1616 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package verifier + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +var VerifierMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifierProxyAddr\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BadVerification\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"CannotDeactivateLatestConfig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DigestEmpty\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"DigestInactive\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"DigestNotSet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxSigners\",\"type\":\"uint256\"}],\"name\":\"ExcessSigners\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FaultToleranceMustBePositive\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FeedIdEmpty\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"InactiveFeed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"expectedNumSigners\",\"type\":\"uint256\"}],\"name\":\"IncorrectSignatureCount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minSigners\",\"type\":\"uint256\"}],\"name\":\"InsufficientSigners\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"InvalidFeed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rsLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"ssLength\",\"type\":\"uint256\"}],\"name\":\"MismatchedSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NonUniqueSignatures\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"ConfigActivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"ConfigDeactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"FeedActivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"FeedDeactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"}],\"name\":\"ReportVerified\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"activateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"activateFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"deactivateConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"deactivateFeed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"}],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"recipientAddressesAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"feedId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"sourceChainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"sourceAddress\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"newConfigCount\",\"type\":\"uint32\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"offchainTransmitters\",\"type\":\"bytes32[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"recipientAddressesAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setConfigFromSource\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"isVerifier\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"signedReport\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"verifierResponse\",\"type\":\"bytes\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60a06040523480156200001157600080fd5b50604051620023d5380380620023d58339810160408190526200003491620001a6565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000fb565b5050506001600160a01b038116620000e95760405163d92e233d60e01b815260040160405180910390fd5b6001600160a01b0316608052620001d8565b336001600160a01b03821603620001555760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208284031215620001b957600080fd5b81516001600160a01b0381168114620001d157600080fd5b9392505050565b6080516121da620001fb6000396000818161033b015261131501526121da6000f3fe608060405234801561001057600080fd5b50600436106100ea5760003560e01c806394d959801161008c578063e7db9c2a11610066578063e7db9c2a1461028b578063e84f128e1461029e578063f0107221146102fb578063f2fde38b1461030e57600080fd5b806394d9598014610206578063b70d929d14610219578063ded6307c1461027857600080fd5b80633dd86430116100c85780633dd86430146101ae578063564a0a7a146101c357806379ba5097146101d65780638da5cb5b146101de57600080fd5b806301ffc9a7146100ef578063181f5a77146101595780633d3ac1b51461019b575b600080fd5b6101446100fd3660046115da565b7fffffffff00000000000000000000000000000000000000000000000000000000167f3d3ac1b5000000000000000000000000000000000000000000000000000000001490565b60405190151581526020015b60405180910390f35b60408051808201909152600e81527f566572696669657220312e322e3000000000000000000000000000000000000060208201525b6040516101509190611687565b61018e6101a93660046116c3565b610321565b6101c16101bc366004611744565b6104bb565b005b6101c16101d1366004611744565b61056d565b6101c161062e565b60005460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610150565b6101c161021436600461175d565b61072b565b610255610227366004611744565b6000908152600260205260408120600181015490549192909168010000000000000000900463ffffffff1690565b604080519315158452602084019290925263ffffffff1690820152606001610150565b6101c161028636600461175d565b61088c565b6101c1610299366004611a92565b61099d565b6102d86102ac366004611744565b6000908152600260205260409020805460019091015463ffffffff808316936401000000009093041691565b6040805163ffffffff948516815293909216602084015290820152606001610150565b6101c1610309366004611bc5565b610aaf565b6101c161031c366004611cc2565b610b79565b60603373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610392576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000808080806103a4888a018a611cdd565b945094509450945094506000846103ba90611db8565b60008181526002602052604090208054919250906c01000000000000000000000000900460ff1615610420576040517f36dbe748000000000000000000000000000000000000000000000000000000008152600481018390526024015b60405180910390fd5b86516000818152600283016020526040902061043f8483898985610b8d565b6104498984610c89565b8751602089012061045e818b8a8a8a87610cf1565b60405173ffffffffffffffffffffffffffffffffffffffff8d16815285907f58ca9502e98a536e06e72d680fcc251e5d10b72291a281665a2c2dc0ac30fcc59060200160405180910390a250969c9b505050505050505050505050565b6104c3610f6d565b60008181526002602052604081208054909163ffffffff9091169003610518576040517fa25b0b9600000000000000000000000000000000000000000000000000000000815260048101839052602401610417565b80547fffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffff16815560405182907ff438564f793525caa89c6e3a26d41e16aa39d1e589747595751e3f3df75cb2b490600090a25050565b610575610f6d565b60008181526002602052604081208054909163ffffffff90911690036105ca576040517fa25b0b9600000000000000000000000000000000000000000000000000000000815260048101839052602401610417565b80547fffffffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffff166c0100000000000000000000000017815560405182907ffc4f79b8c65b6be1773063461984c0974400d1e99654c79477a092ace83fd06190600090a25050565b60015473ffffffffffffffffffffffffffffffffffffffff1633146106af576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610417565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b610733610f6d565b600082815260026020526040902081610778576040517fe332262700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260028201602052604081205460ff1690036107ce576040517f8bca63110000000000000000000000000000000000000000000000000000000081526004810184905260248101839052604401610417565b80600101548203610815576040517fa403c0160000000000000000000000000000000000000000000000000000000081526004810184905260248101839052604401610417565b60008281526002820160205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff1690555183907f0e173bea63a8c59ec70bf87043f2a729693790183f16a1a54b705de9e989cc4c9061087f9085815260200190565b60405180910390a2505050565b610894610f6d565b6000828152600260205260409020816108d9576040517fe332262700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600082815260028201602052604081205460ff16900361092f576040517f8bca63110000000000000000000000000000000000000000000000000000000081526004810184905260248101839052604401610417565b60008281526002820160205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555183907f54f8872b9b94ebea6577f33576d55847bd8ea22641ccc886b965f6e50bfe77469061087f9085815260200190565b86518560ff16806000036109dd576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f821115610a22576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101839052601f6024820152604401610417565b610a2d816003611e5b565b8211610a855781610a3f826003611e5b565b610a4a906001611e78565b6040517f9dd9e6d800000000000000000000000000000000000000000000000000000000815260048101929092526024820152604401610417565b610a8d610f6d565b610aa08d8d8d8d8d8d8d8d8d8d8d610ff0565b50505050505050505050505050565b86518560ff1680600003610aef576040517f0743bae600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b601f821115610b34576040517f61750f4000000000000000000000000000000000000000000000000000000000815260048101839052601f6024820152604401610417565b610b3f816003611e5b565b8211610b515781610a3f826003611e5b565b610b59610f6d565b610b6d8a463060008d8d8d8d8d8d8d610ff0565b50505050505050505050565b610b81610f6d565b610b8a81611437565b50565b8054600090610ba09060ff166001611e8b565b8254909150610100900460ff16610bed576040517ffc10a2830000000000000000000000000000000000000000000000000000000081526004810187905260248101869052604401610417565b8060ff16845114610c395783516040517f5348a282000000000000000000000000000000000000000000000000000000008152600481019190915260ff82166024820152604401610417565b8251845114610c8157835183516040517ff0d3140800000000000000000000000000000000000000000000000000000000815260048101929092526024820152604401610417565b505050505050565b6020820151815463ffffffff600883901c81169168010000000000000000900416811115610ceb5782547fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff166801000000000000000063ffffffff8316021783555b50505050565b60008686604051602001610d06929190611ea4565b6040516020818303038152906040528051906020012090506000610d3a604080518082019091526000808252602082015290565b8651600090815b81811015610f0557600186898360208110610d5e57610d5e611dfd565b610d6b91901a601b611e8b565b8c8481518110610d7d57610d7d611dfd565b60200260200101518c8581518110610d9757610d97611dfd565b602002602001015160405160008152602001604052604051610dd5949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015610df7573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526001808d01602090815291859020848601909552845460ff808216865293995093955090850192610100900490911690811115610e7c57610e7c611ee0565b6001811115610e8d57610e8d611ee0565b9052509350600184602001516001811115610eaa57610eaa611ee0565b14610ee1576040517f4df18f0700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b836000015160080260ff166001901b8501945080610efe90611f0f565b9050610d41565b50837e01010101010101010101010101010101010101010101010101010101010101851614610f60576040517f4df18f0700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050505050505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610fee576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610417565b565b60008b815260026020526040902063ffffffff89161561103d5780547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff8a16178155611071565b805463ffffffff1681600061105183611f47565b91906101000a81548163ffffffff021916908363ffffffff160217905550505b8054600090611091908e908e908e9063ffffffff168d8d8d8d8d8d61152c565b6000818152600284016020526040812080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660ff8b16176101001790559091505b89518160ff1610156112d25760008a8260ff16815181106110f7576110f7611dfd565b60200260200101519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1603611167576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000806000858152600287016020908152604080832073ffffffffffffffffffffffffffffffffffffffff87168452600190810190925290912054610100900460ff16908111156111ba576111ba611ee0565b14801591506111f5576040517ff67bc7c400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805180820190915260ff8416815260208101600190526000858152600287016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684526001908101835292208351815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00821681178355928501519193919284927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090921617906101009084908111156112b7576112b7611ee0565b02179055509050505050806112cb90611f6a565b90506110d4565b5060018201546040517fb011b24700000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169163b011b2479161134d919085908890600401611f89565b600060405180830381600087803b15801561136757600080fd5b505af115801561137b573d6000803e3d6000fd5b505050508c7fa23a88453230b183877098801ff5a8f771a120e2573eea559ce6c4c2e305a4da8360000160049054906101000a900463ffffffff16838560000160009054906101000a900463ffffffff168d8d8d8d8d8d6040516113e79998979695949392919061208a565b60405180910390a281547fffffffffffffffffffffffffffffffffffffffff0000000000000000ffffffff1664010000000063ffffffff4316021782556001909101555050505050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff8216036114b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610417565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000808b8b8b8b8b8b8b8b8b8b6040516020016115529a99989796959493929190612120565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e06000000000000000000000000000000000000000000000000000000000000179150509a9950505050505050505050565b6000602082840312156115ec57600080fd5b81357fffffffff000000000000000000000000000000000000000000000000000000008116811461161c57600080fd5b9392505050565b6000815180845260005b818110156116495760208185018101518683018201520161162d565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b60208152600061161c6020830184611623565b803573ffffffffffffffffffffffffffffffffffffffff811681146116be57600080fd5b919050565b6000806000604084860312156116d857600080fd5b833567ffffffffffffffff808211156116f057600080fd5b818601915086601f83011261170457600080fd5b81358181111561171357600080fd5b87602082850101111561172557600080fd5b60209283019550935061173b918601905061169a565b90509250925092565b60006020828403121561175657600080fd5b5035919050565b6000806040838503121561177057600080fd5b50508035926020909101359150565b803563ffffffff811681146116be57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff811182821017156117e5576117e5611793565b60405290565b6040516060810167ffffffffffffffff811182821017156117e5576117e5611793565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561185557611855611793565b604052919050565b600067ffffffffffffffff82111561187757611877611793565b5060051b60200190565b600082601f83011261189257600080fd5b813560206118a76118a28361185d565b61180e565b82815260059290921b840181019181810190868411156118c657600080fd5b8286015b848110156118e8576118db8161169a565b83529183019183016118ca565b509695505050505050565b600082601f83011261190457600080fd5b813560206119146118a28361185d565b82815260059290921b8401810191818101908684111561193357600080fd5b8286015b848110156118e85780358352918301918301611937565b803560ff811681146116be57600080fd5b600082601f83011261197057600080fd5b813567ffffffffffffffff81111561198a5761198a611793565b6119bb60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161180e565b8181528460208386010111156119d057600080fd5b816020850160208301376000918101602001919091529392505050565b803567ffffffffffffffff811681146116be57600080fd5b600082601f830112611a1657600080fd5b81356020611a266118a28361185d565b82815260069290921b84018101918181019086841115611a4557600080fd5b8286015b848110156118e85760408189031215611a625760008081fd5b611a6a6117c2565b611a738261169a565b8152611a808583016119ed565b81860152835291830191604001611a49565b60008060008060008060008060008060006101608c8e031215611ab457600080fd5b8b359a5060208c01359950611acb60408d0161169a565b9850611ad960608d0161177f565b975067ffffffffffffffff8060808e01351115611af557600080fd5b611b058e60808f01358f01611881565b97508060a08e01351115611b1857600080fd5b611b288e60a08f01358f016118f3565b9650611b3660c08e0161194e565b95508060e08e01351115611b4957600080fd5b611b598e60e08f01358f0161195f565b9450611b686101008e016119ed565b9350806101208e01351115611b7c57600080fd5b611b8d8e6101208f01358f0161195f565b9250806101408e01351115611ba157600080fd5b50611bb38d6101408e01358e01611a05565b90509295989b509295989b9093969950565b600080600080600080600080610100898b031215611be257600080fd5b88359750602089013567ffffffffffffffff80821115611c0157600080fd5b611c0d8c838d01611881565b985060408b0135915080821115611c2357600080fd5b611c2f8c838d016118f3565b9750611c3d60608c0161194e565b965060808b0135915080821115611c5357600080fd5b611c5f8c838d0161195f565b9550611c6d60a08c016119ed565b945060c08b0135915080821115611c8357600080fd5b611c8f8c838d0161195f565b935060e08b0135915080821115611ca557600080fd5b50611cb28b828c01611a05565b9150509295985092959890939650565b600060208284031215611cd457600080fd5b61161c8261169a565b600080600080600060e08688031215611cf557600080fd5b86601f870112611d0457600080fd5b611d0c6117eb565b806060880189811115611d1e57600080fd5b885b81811015611d38578035845260209384019301611d20565b5090965035905067ffffffffffffffff80821115611d5557600080fd5b611d6189838a0161195f565b95506080880135915080821115611d7757600080fd5b611d8389838a016118f3565b945060a0880135915080821115611d9957600080fd5b50611da6888289016118f3565b9598949750929560c001359392505050565b80516020808301519190811015611df7577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082028115828204841417611e7257611e72611e2c565b92915050565b80820180821115611e7257611e72611e2c565b60ff8181168382160190811115611e7257611e72611e2c565b828152600060208083018460005b6003811015611ecf57815183529183019190830190600101611eb2565b505050506080820190509392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611f4057611f40611e2c565b5060010190565b600063ffffffff808316818103611f6057611f60611e2c565b6001019392505050565b600060ff821660ff8103611f8057611f80611e2c565b60010192915050565b600060608201858352602085818501526040606081860152828651808552608087019150838801945060005b81811015611ffa578551805173ffffffffffffffffffffffffffffffffffffffff16845285015167ffffffffffffffff16858401529484019491830191600101611fb5565b50909998505050505050505050565b600081518084526020808501945080840160005b8381101561204f57815173ffffffffffffffffffffffffffffffffffffffff168752958201959082019060010161201d565b509495945050505050565b600081518084526020808501945080840160005b8381101561204f5781518752958201959082019060010161206e565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526120ba8184018a612009565b905082810360808401526120ce818961205a565b905060ff871660a084015282810360c08401526120eb8187611623565b905067ffffffffffffffff851660e08401528281036101008401526121108185611623565b9c9b505050505050505050505050565b60006101408c83528b602084015273ffffffffffffffffffffffffffffffffffffffff8b16604084015267ffffffffffffffff808b16606085015281608085015261216d8285018b612009565b915083820360a0850152612181828a61205a565b915060ff881660c085015283820360e085015261219e8288611623565b90861661010085015283810361012085015290506121bc8185611623565b9d9c5050505050505050505050505056fea164736f6c6343000813000a", +} + +var VerifierABI = VerifierMetaData.ABI + +var VerifierBin = VerifierMetaData.Bin + +func DeployVerifier(auth *bind.TransactOpts, backend bind.ContractBackend, verifierProxyAddr common.Address) (common.Address, *types.Transaction, *Verifier, error) { + parsed, err := VerifierMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VerifierBin), backend, verifierProxyAddr) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Verifier{address: address, abi: *parsed, VerifierCaller: VerifierCaller{contract: contract}, VerifierTransactor: VerifierTransactor{contract: contract}, VerifierFilterer: VerifierFilterer{contract: contract}}, nil +} + +type Verifier struct { + address common.Address + abi abi.ABI + VerifierCaller + VerifierTransactor + VerifierFilterer +} + +type VerifierCaller struct { + contract *bind.BoundContract +} + +type VerifierTransactor struct { + contract *bind.BoundContract +} + +type VerifierFilterer struct { + contract *bind.BoundContract +} + +type VerifierSession struct { + Contract *Verifier + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VerifierCallerSession struct { + Contract *VerifierCaller + CallOpts bind.CallOpts +} + +type VerifierTransactorSession struct { + Contract *VerifierTransactor + TransactOpts bind.TransactOpts +} + +type VerifierRaw struct { + Contract *Verifier +} + +type VerifierCallerRaw struct { + Contract *VerifierCaller +} + +type VerifierTransactorRaw struct { + Contract *VerifierTransactor +} + +func NewVerifier(address common.Address, backend bind.ContractBackend) (*Verifier, error) { + abi, err := abi.JSON(strings.NewReader(VerifierABI)) + if err != nil { + return nil, err + } + contract, err := bindVerifier(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Verifier{address: address, abi: abi, VerifierCaller: VerifierCaller{contract: contract}, VerifierTransactor: VerifierTransactor{contract: contract}, VerifierFilterer: VerifierFilterer{contract: contract}}, nil +} + +func NewVerifierCaller(address common.Address, caller bind.ContractCaller) (*VerifierCaller, error) { + contract, err := bindVerifier(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VerifierCaller{contract: contract}, nil +} + +func NewVerifierTransactor(address common.Address, transactor bind.ContractTransactor) (*VerifierTransactor, error) { + contract, err := bindVerifier(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VerifierTransactor{contract: contract}, nil +} + +func NewVerifierFilterer(address common.Address, filterer bind.ContractFilterer) (*VerifierFilterer, error) { + contract, err := bindVerifier(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VerifierFilterer{contract: contract}, nil +} + +func bindVerifier(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VerifierMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Verifier *VerifierRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Verifier.Contract.VerifierCaller.contract.Call(opts, result, method, params...) +} + +func (_Verifier *VerifierRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Verifier.Contract.VerifierTransactor.contract.Transfer(opts) +} + +func (_Verifier *VerifierRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Verifier.Contract.VerifierTransactor.contract.Transact(opts, method, params...) +} + +func (_Verifier *VerifierCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Verifier.Contract.contract.Call(opts, result, method, params...) +} + +func (_Verifier *VerifierTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Verifier.Contract.contract.Transfer(opts) +} + +func (_Verifier *VerifierTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Verifier.Contract.contract.Transact(opts, method, params...) +} + +func (_Verifier *VerifierCaller) LatestConfigDetails(opts *bind.CallOpts, feedId [32]byte) (LatestConfigDetails, + + error) { + var out []interface{} + err := _Verifier.contract.Call(opts, &out, "latestConfigDetails", feedId) + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_Verifier *VerifierSession) LatestConfigDetails(feedId [32]byte) (LatestConfigDetails, + + error) { + return _Verifier.Contract.LatestConfigDetails(&_Verifier.CallOpts, feedId) +} + +func (_Verifier *VerifierCallerSession) LatestConfigDetails(feedId [32]byte) (LatestConfigDetails, + + error) { + return _Verifier.Contract.LatestConfigDetails(&_Verifier.CallOpts, feedId) +} + +func (_Verifier *VerifierCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts, feedId [32]byte) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _Verifier.contract.Call(opts, &out, "latestConfigDigestAndEpoch", feedId) + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_Verifier *VerifierSession) LatestConfigDigestAndEpoch(feedId [32]byte) (LatestConfigDigestAndEpoch, + + error) { + return _Verifier.Contract.LatestConfigDigestAndEpoch(&_Verifier.CallOpts, feedId) +} + +func (_Verifier *VerifierCallerSession) LatestConfigDigestAndEpoch(feedId [32]byte) (LatestConfigDigestAndEpoch, + + error) { + return _Verifier.Contract.LatestConfigDigestAndEpoch(&_Verifier.CallOpts, feedId) +} + +func (_Verifier *VerifierCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Verifier.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Verifier *VerifierSession) Owner() (common.Address, error) { + return _Verifier.Contract.Owner(&_Verifier.CallOpts) +} + +func (_Verifier *VerifierCallerSession) Owner() (common.Address, error) { + return _Verifier.Contract.Owner(&_Verifier.CallOpts) +} + +func (_Verifier *VerifierCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _Verifier.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_Verifier *VerifierSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _Verifier.Contract.SupportsInterface(&_Verifier.CallOpts, interfaceId) +} + +func (_Verifier *VerifierCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _Verifier.Contract.SupportsInterface(&_Verifier.CallOpts, interfaceId) +} + +func (_Verifier *VerifierCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _Verifier.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_Verifier *VerifierSession) TypeAndVersion() (string, error) { + return _Verifier.Contract.TypeAndVersion(&_Verifier.CallOpts) +} + +func (_Verifier *VerifierCallerSession) TypeAndVersion() (string, error) { + return _Verifier.Contract.TypeAndVersion(&_Verifier.CallOpts) +} + +func (_Verifier *VerifierTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "acceptOwnership") +} + +func (_Verifier *VerifierSession) AcceptOwnership() (*types.Transaction, error) { + return _Verifier.Contract.AcceptOwnership(&_Verifier.TransactOpts) +} + +func (_Verifier *VerifierTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _Verifier.Contract.AcceptOwnership(&_Verifier.TransactOpts) +} + +func (_Verifier *VerifierTransactor) ActivateConfig(opts *bind.TransactOpts, feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "activateConfig", feedId, configDigest) +} + +func (_Verifier *VerifierSession) ActivateConfig(feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.ActivateConfig(&_Verifier.TransactOpts, feedId, configDigest) +} + +func (_Verifier *VerifierTransactorSession) ActivateConfig(feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.ActivateConfig(&_Verifier.TransactOpts, feedId, configDigest) +} + +func (_Verifier *VerifierTransactor) ActivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "activateFeed", feedId) +} + +func (_Verifier *VerifierSession) ActivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.ActivateFeed(&_Verifier.TransactOpts, feedId) +} + +func (_Verifier *VerifierTransactorSession) ActivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.ActivateFeed(&_Verifier.TransactOpts, feedId) +} + +func (_Verifier *VerifierTransactor) DeactivateConfig(opts *bind.TransactOpts, feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "deactivateConfig", feedId, configDigest) +} + +func (_Verifier *VerifierSession) DeactivateConfig(feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.DeactivateConfig(&_Verifier.TransactOpts, feedId, configDigest) +} + +func (_Verifier *VerifierTransactorSession) DeactivateConfig(feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.DeactivateConfig(&_Verifier.TransactOpts, feedId, configDigest) +} + +func (_Verifier *VerifierTransactor) DeactivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "deactivateFeed", feedId) +} + +func (_Verifier *VerifierSession) DeactivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.DeactivateFeed(&_Verifier.TransactOpts, feedId) +} + +func (_Verifier *VerifierTransactorSession) DeactivateFeed(feedId [32]byte) (*types.Transaction, error) { + return _Verifier.Contract.DeactivateFeed(&_Verifier.TransactOpts, feedId) +} + +func (_Verifier *VerifierTransactor) SetConfig(opts *bind.TransactOpts, feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "setConfig", feedId, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierSession) SetConfig(feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.Contract.SetConfig(&_Verifier.TransactOpts, feedId, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierTransactorSession) SetConfig(feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.Contract.SetConfig(&_Verifier.TransactOpts, feedId, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierTransactor) SetConfigFromSource(opts *bind.TransactOpts, feedId [32]byte, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "setConfigFromSource", feedId, sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierSession) SetConfigFromSource(feedId [32]byte, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.Contract.SetConfigFromSource(&_Verifier.TransactOpts, feedId, sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierTransactorSession) SetConfigFromSource(feedId [32]byte, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _Verifier.Contract.SetConfigFromSource(&_Verifier.TransactOpts, feedId, sourceChainId, sourceAddress, newConfigCount, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) +} + +func (_Verifier *VerifierTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "transferOwnership", to) +} + +func (_Verifier *VerifierSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Verifier.Contract.TransferOwnership(&_Verifier.TransactOpts, to) +} + +func (_Verifier *VerifierTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Verifier.Contract.TransferOwnership(&_Verifier.TransactOpts, to) +} + +func (_Verifier *VerifierTransactor) Verify(opts *bind.TransactOpts, signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _Verifier.contract.Transact(opts, "verify", signedReport, sender) +} + +func (_Verifier *VerifierSession) Verify(signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _Verifier.Contract.Verify(&_Verifier.TransactOpts, signedReport, sender) +} + +func (_Verifier *VerifierTransactorSession) Verify(signedReport []byte, sender common.Address) (*types.Transaction, error) { + return _Verifier.Contract.Verify(&_Verifier.TransactOpts, signedReport, sender) +} + +type VerifierConfigActivatedIterator struct { + Event *VerifierConfigActivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierConfigActivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierConfigActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierConfigActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierConfigActivatedIterator) Error() error { + return it.fail +} + +func (it *VerifierConfigActivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierConfigActivated struct { + FeedId [32]byte + ConfigDigest [32]byte + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterConfigActivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigActivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "ConfigActivated", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierConfigActivatedIterator{contract: _Verifier.contract, event: "ConfigActivated", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchConfigActivated(opts *bind.WatchOpts, sink chan<- *VerifierConfigActivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "ConfigActivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierConfigActivated) + if err := _Verifier.contract.UnpackLog(event, "ConfigActivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseConfigActivated(log types.Log) (*VerifierConfigActivated, error) { + event := new(VerifierConfigActivated) + if err := _Verifier.contract.UnpackLog(event, "ConfigActivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierConfigDeactivatedIterator struct { + Event *VerifierConfigDeactivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierConfigDeactivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierConfigDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierConfigDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierConfigDeactivatedIterator) Error() error { + return it.fail +} + +func (it *VerifierConfigDeactivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierConfigDeactivated struct { + FeedId [32]byte + ConfigDigest [32]byte + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterConfigDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigDeactivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "ConfigDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierConfigDeactivatedIterator{contract: _Verifier.contract, event: "ConfigDeactivated", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchConfigDeactivated(opts *bind.WatchOpts, sink chan<- *VerifierConfigDeactivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "ConfigDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierConfigDeactivated) + if err := _Verifier.contract.UnpackLog(event, "ConfigDeactivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseConfigDeactivated(log types.Log) (*VerifierConfigDeactivated, error) { + event := new(VerifierConfigDeactivated) + if err := _Verifier.contract.UnpackLog(event, "ConfigDeactivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierConfigSetIterator struct { + Event *VerifierConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierConfigSetIterator) Error() error { + return it.fail +} + +func (it *VerifierConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierConfigSet struct { + FeedId [32]byte + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + OffchainTransmitters [][32]byte + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterConfigSet(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigSetIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "ConfigSet", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierConfigSetIterator{contract: _Verifier.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VerifierConfigSet, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "ConfigSet", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierConfigSet) + if err := _Verifier.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseConfigSet(log types.Log) (*VerifierConfigSet, error) { + event := new(VerifierConfigSet) + if err := _Verifier.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierFeedActivatedIterator struct { + Event *VerifierFeedActivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierFeedActivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierFeedActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierFeedActivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierFeedActivatedIterator) Error() error { + return it.fail +} + +func (it *VerifierFeedActivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierFeedActivated struct { + FeedId [32]byte + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterFeedActivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierFeedActivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "FeedActivated", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierFeedActivatedIterator{contract: _Verifier.contract, event: "FeedActivated", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchFeedActivated(opts *bind.WatchOpts, sink chan<- *VerifierFeedActivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "FeedActivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierFeedActivated) + if err := _Verifier.contract.UnpackLog(event, "FeedActivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseFeedActivated(log types.Log) (*VerifierFeedActivated, error) { + event := new(VerifierFeedActivated) + if err := _Verifier.contract.UnpackLog(event, "FeedActivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierFeedDeactivatedIterator struct { + Event *VerifierFeedDeactivated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierFeedDeactivatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierFeedDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierFeedDeactivated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierFeedDeactivatedIterator) Error() error { + return it.fail +} + +func (it *VerifierFeedDeactivatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierFeedDeactivated struct { + FeedId [32]byte + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterFeedDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierFeedDeactivatedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "FeedDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierFeedDeactivatedIterator{contract: _Verifier.contract, event: "FeedDeactivated", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchFeedDeactivated(opts *bind.WatchOpts, sink chan<- *VerifierFeedDeactivated, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "FeedDeactivated", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierFeedDeactivated) + if err := _Verifier.contract.UnpackLog(event, "FeedDeactivated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseFeedDeactivated(log types.Log) (*VerifierFeedDeactivated, error) { + event := new(VerifierFeedDeactivated) + if err := _Verifier.contract.UnpackLog(event, "FeedDeactivated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierOwnershipTransferRequestedIterator struct { + Event *VerifierOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VerifierOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifierOwnershipTransferRequestedIterator{contract: _Verifier.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifierOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierOwnershipTransferRequested) + if err := _Verifier.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseOwnershipTransferRequested(log types.Log) (*VerifierOwnershipTransferRequested, error) { + event := new(VerifierOwnershipTransferRequested) + if err := _Verifier.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierOwnershipTransferredIterator struct { + Event *VerifierOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VerifierOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifierOwnershipTransferredIterator{contract: _Verifier.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifierOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierOwnershipTransferred) + if err := _Verifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseOwnershipTransferred(log types.Log) (*VerifierOwnershipTransferred, error) { + event := new(VerifierOwnershipTransferred) + if err := _Verifier.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierReportVerifiedIterator struct { + Event *VerifierReportVerified + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierReportVerifiedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierReportVerified) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierReportVerified) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierReportVerifiedIterator) Error() error { + return it.fail +} + +func (it *VerifierReportVerifiedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierReportVerified struct { + FeedId [32]byte + Requester common.Address + Raw types.Log +} + +func (_Verifier *VerifierFilterer) FilterReportVerified(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierReportVerifiedIterator, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.FilterLogs(opts, "ReportVerified", feedIdRule) + if err != nil { + return nil, err + } + return &VerifierReportVerifiedIterator{contract: _Verifier.contract, event: "ReportVerified", logs: logs, sub: sub}, nil +} + +func (_Verifier *VerifierFilterer) WatchReportVerified(opts *bind.WatchOpts, sink chan<- *VerifierReportVerified, feedId [][32]byte) (event.Subscription, error) { + + var feedIdRule []interface{} + for _, feedIdItem := range feedId { + feedIdRule = append(feedIdRule, feedIdItem) + } + + logs, sub, err := _Verifier.contract.WatchLogs(opts, "ReportVerified", feedIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierReportVerified) + if err := _Verifier.contract.UnpackLog(event, "ReportVerified", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Verifier *VerifierFilterer) ParseReportVerified(log types.Log) (*VerifierReportVerified, error) { + event := new(VerifierReportVerified) + if err := _Verifier.contract.UnpackLog(event, "ReportVerified", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_Verifier *Verifier) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Verifier.abi.Events["ConfigActivated"].ID: + return _Verifier.ParseConfigActivated(log) + case _Verifier.abi.Events["ConfigDeactivated"].ID: + return _Verifier.ParseConfigDeactivated(log) + case _Verifier.abi.Events["ConfigSet"].ID: + return _Verifier.ParseConfigSet(log) + case _Verifier.abi.Events["FeedActivated"].ID: + return _Verifier.ParseFeedActivated(log) + case _Verifier.abi.Events["FeedDeactivated"].ID: + return _Verifier.ParseFeedDeactivated(log) + case _Verifier.abi.Events["OwnershipTransferRequested"].ID: + return _Verifier.ParseOwnershipTransferRequested(log) + case _Verifier.abi.Events["OwnershipTransferred"].ID: + return _Verifier.ParseOwnershipTransferred(log) + case _Verifier.abi.Events["ReportVerified"].ID: + return _Verifier.ParseReportVerified(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VerifierConfigActivated) Topic() common.Hash { + return common.HexToHash("0x54f8872b9b94ebea6577f33576d55847bd8ea22641ccc886b965f6e50bfe7746") +} + +func (VerifierConfigDeactivated) Topic() common.Hash { + return common.HexToHash("0x0e173bea63a8c59ec70bf87043f2a729693790183f16a1a54b705de9e989cc4c") +} + +func (VerifierConfigSet) Topic() common.Hash { + return common.HexToHash("0xa23a88453230b183877098801ff5a8f771a120e2573eea559ce6c4c2e305a4da") +} + +func (VerifierFeedActivated) Topic() common.Hash { + return common.HexToHash("0xf438564f793525caa89c6e3a26d41e16aa39d1e589747595751e3f3df75cb2b4") +} + +func (VerifierFeedDeactivated) Topic() common.Hash { + return common.HexToHash("0xfc4f79b8c65b6be1773063461984c0974400d1e99654c79477a092ace83fd061") +} + +func (VerifierOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VerifierOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VerifierReportVerified) Topic() common.Hash { + return common.HexToHash("0x58ca9502e98a536e06e72d680fcc251e5d10b72291a281665a2c2dc0ac30fcc5") +} + +func (_Verifier *Verifier) Address() common.Address { + return _Verifier.address +} + +type VerifierInterface interface { + LatestConfigDetails(opts *bind.CallOpts, feedId [32]byte) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts, feedId [32]byte) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + ActivateConfig(opts *bind.TransactOpts, feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) + + ActivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) + + DeactivateConfig(opts *bind.TransactOpts, feedId [32]byte, configDigest [32]byte) (*types.Transaction, error) + + DeactivateFeed(opts *bind.TransactOpts, feedId [32]byte) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + SetConfigFromSource(opts *bind.TransactOpts, feedId [32]byte, sourceChainId *big.Int, sourceAddress common.Address, newConfigCount uint32, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Verify(opts *bind.TransactOpts, signedReport []byte, sender common.Address) (*types.Transaction, error) + + FilterConfigActivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigActivatedIterator, error) + + WatchConfigActivated(opts *bind.WatchOpts, sink chan<- *VerifierConfigActivated, feedId [][32]byte) (event.Subscription, error) + + ParseConfigActivated(log types.Log) (*VerifierConfigActivated, error) + + FilterConfigDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigDeactivatedIterator, error) + + WatchConfigDeactivated(opts *bind.WatchOpts, sink chan<- *VerifierConfigDeactivated, feedId [][32]byte) (event.Subscription, error) + + ParseConfigDeactivated(log types.Log) (*VerifierConfigDeactivated, error) + + FilterConfigSet(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VerifierConfigSet, feedId [][32]byte) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VerifierConfigSet, error) + + FilterFeedActivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierFeedActivatedIterator, error) + + WatchFeedActivated(opts *bind.WatchOpts, sink chan<- *VerifierFeedActivated, feedId [][32]byte) (event.Subscription, error) + + ParseFeedActivated(log types.Log) (*VerifierFeedActivated, error) + + FilterFeedDeactivated(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierFeedDeactivatedIterator, error) + + WatchFeedDeactivated(opts *bind.WatchOpts, sink chan<- *VerifierFeedDeactivated, feedId [][32]byte) (event.Subscription, error) + + ParseFeedDeactivated(log types.Log) (*VerifierFeedDeactivated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifierOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VerifierOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifierOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VerifierOwnershipTransferred, error) + + FilterReportVerified(opts *bind.FilterOpts, feedId [][32]byte) (*VerifierReportVerifiedIterator, error) + + WatchReportVerified(opts *bind.WatchOpts, sink chan<- *VerifierReportVerified, feedId [][32]byte) (event.Subscription, error) + + ParseReportVerified(log types.Log) (*VerifierReportVerified, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generated/verifier_proxy/verifier_proxy.go b/core/gethwrappers/llo-feeds/generated/verifier_proxy/verifier_proxy.go new file mode 100644 index 00000000..e9255fe1 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generated/verifier_proxy/verifier_proxy.go @@ -0,0 +1,1387 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package verifier_proxy + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type CommonAddressAndWeight struct { + Addr common.Address + Weight uint64 +} + +var VerifierProxyMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"accessController\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"BadVerification\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"verifier\",\"type\":\"address\"}],\"name\":\"ConfigDigestAlreadySet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"FeeManagerInvalid\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifier\",\"type\":\"address\"}],\"name\":\"VerifierAlreadyInitialized\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"VerifierInvalid\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"VerifierNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ZeroAddress\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldAccessController\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAccessController\",\"type\":\"address\"}],\"name\":\"AccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"oldFeeManager\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newFeeManager\",\"type\":\"address\"}],\"name\":\"FeeManagerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierInitialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"oldConfigDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"newConfigDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"VerifierUnset\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"getVerifier\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"verifierAddress\",\"type\":\"address\"}],\"name\":\"initializeVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_accessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_feeManager\",\"outputs\":[{\"internalType\":\"contractIVerifierFeeManager\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"accessController\",\"type\":\"address\"}],\"name\":\"setAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIVerifierFeeManager\",\"name\":\"feeManager\",\"type\":\"address\"}],\"name\":\"setFeeManager\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"currentConfigDigest\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newConfigDigest\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"},{\"internalType\":\"uint64\",\"name\":\"weight\",\"type\":\"uint64\"}],\"internalType\":\"structCommon.AddressAndWeight[]\",\"name\":\"addressesAndWeights\",\"type\":\"tuple[]\"}],\"name\":\"setVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"unsetVerifier\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"parameterPayload\",\"type\":\"bytes\"}],\"name\":\"verify\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes[]\",\"name\":\"payloads\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"parameterPayload\",\"type\":\"bytes\"}],\"name\":\"verifyBulk\",\"outputs\":[{\"internalType\":\"bytes[]\",\"name\":\"verifiedReports\",\"type\":\"bytes[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162001d3638038062001d36833981016040819052620000349162000193565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000e8565b5050600480546001600160a01b0319166001600160a01b03939093169290921790915550620001c5565b336001600160a01b03821603620001425760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600060208284031215620001a657600080fd5b81516001600160a01b0381168114620001be57600080fd5b9392505050565b611b6180620001d56000396000f3fe6080604052600436106100dd5760003560e01c806394ba28461161007f578063f08391d811610059578063f08391d8146102be578063f2fde38b146102de578063f7e83aee146102fe578063f873a61c1461031157600080fd5b806394ba28461461022e578063b011b2471461025b578063eeb7b2481461027b57600080fd5b80636e914094116100bb5780636e914094146101ae57806379ba5097146101ce5780638c2a4d53146101e35780638da5cb5b1461020357600080fd5b8063181f5a77146100e257806338416b5b1461013a578063472d35b91461018c575b600080fd5b3480156100ee57600080fd5b5060408051808201909152601381527f566572696669657250726f787920322e302e300000000000000000000000000060208201525b60405161013191906113c3565b60405180910390f35b34801561014657600080fd5b506005546101679073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610131565b34801561019857600080fd5b506101ac6101a73660046113ff565b610331565b005b3480156101ba57600080fd5b506101ac6101c936600461141c565b6105a9565b3480156101da57600080fd5b506101ac61069a565b3480156101ef57600080fd5b506101ac6101fe3660046113ff565b610797565b34801561020f57600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff16610167565b34801561023a57600080fd5b506004546101679073ffffffffffffffffffffffffffffffffffffffff1681565b34801561026757600080fd5b506101ac610276366004611435565b6109c8565b34801561028757600080fd5b5061016761029636600461141c565b60009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b3480156102ca57600080fd5b506101ac6102d93660046113ff565b610bee565b3480156102ea57600080fd5b506101ac6102f93660046113ff565b610c75565b61012461030c366004611501565b610c89565b61032461031f36600461156d565b610e43565b60405161013191906115ee565b6103396110a7565b73ffffffffffffffffffffffffffffffffffffffff8116610386576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527fdba45fe000000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff8216906301ffc9a790602401602060405180830381865afa158015610410573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610434919061166e565b15806104eb57506040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f6c2f1a1700000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff8216906301ffc9a790602401602060405180830381865afa1580156104c5573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104e9919061166e565b155b15610522576040517f8238941900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f04628abcaa6b1674651352125cb94b65b289145bc2bc4d67720bb7d966372f0391015b60405180910390a15050565b6105b16110a7565b60008181526003602052604090205473ffffffffffffffffffffffffffffffffffffffff1680610615576040517fb151802b000000000000000000000000000000000000000000000000000000008152600481018390526024015b60405180910390fd5b6000828152600360205260409081902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055517f11dc15c4b8ac2b183166cc8427e5385a5ece8308217a4217338c6a7614845c4c9061059d908490849091825273ffffffffffffffffffffffffffffffffffffffff16602082015260400190565b60015473ffffffffffffffffffffffffffffffffffffffff16331461071b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161060c565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61079f6110a7565b8073ffffffffffffffffffffffffffffffffffffffff81166107ed576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f3d3ac1b500000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff8216906301ffc9a790602401602060405180830381865afa158015610877573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061089b919061166e565b6108d1576040517f75b0527a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff821660009081526002602052604090205460ff1615610949576040517f4e01ccfd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8316600482015260240161060c565b73ffffffffffffffffffffffffffffffffffffffff821660008181526002602090815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905590519182527f1f2cd7c97f4d801b5efe26cc409617c1fd6c5ef786e79aacb90af40923e4e8e9910161059d565b600083815260036020526040902054839073ffffffffffffffffffffffffffffffffffffffff168015610a46576040517f375d1fe60000000000000000000000000000000000000000000000000000000081526004810183905273ffffffffffffffffffffffffffffffffffffffff8216602482015260440161060c565b3360009081526002602052604090205460ff16610a8f576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600085815260036020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016331790558215610ba75760055473ffffffffffffffffffffffffffffffffffffffff16610b1a576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005546040517ff65df96200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091169063f65df96290610b7490889088908890600401611690565b600060405180830381600087803b158015610b8e57600080fd5b505af1158015610ba2573d6000803e3d6000fd5b505050505b6040805187815260208101879052338183015290517fbeb513e532542a562ac35699e7cd9ae7d198dcd3eee15bada6c857d28ceaddcf9181900360600190a1505050505050565b610bf66110a7565b6004805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f953e92b1a6442e9c3242531154a3f6f6eb00b4e9c719ba8118fa6235e4ce89b6910161059d565b610c7d6110a7565b610c868161112a565b50565b60045460609073ffffffffffffffffffffffffffffffffffffffff168015801590610d4957506040517f6b14daf800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690636b14daf890610d069033906000903690600401611762565b602060405180830381865afa158015610d23573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d47919061166e565b155b15610d80576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60055473ffffffffffffffffffffffffffffffffffffffff168015610e2e576040517fdba45fe000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82169063dba45fe0903490610dfb908b908b908b908b90339060040161179b565b6000604051808303818588803b158015610e1457600080fd5b505af1158015610e28573d6000803e3d6000fd5b50505050505b610e38878761121f565b979650505050505050565b60045460609073ffffffffffffffffffffffffffffffffffffffff168015801590610f0357506040517f6b14daf800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690636b14daf890610ec09033906000903690600401611762565b602060405180830381865afa158015610edd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610f01919061166e565b155b15610f3a576040517fef67f5d800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60055473ffffffffffffffffffffffffffffffffffffffff168015610fe8576040517f6c2f1a1700000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690636c2f1a17903490610fb5908b908b908b908b9033906004016117eb565b6000604051808303818588803b158015610fce57600080fd5b505af1158015610fe2573d6000803e3d6000fd5b50505050505b8567ffffffffffffffff811115611001576110016118fc565b60405190808252806020026020018201604052801561103457816020015b606081526020019060019003908161101f5790505b50925060005b8681101561109c5761106e8888838181106110575761105761192b565b9050602002810190611069919061195a565b61121f565b8482815181106110805761108061192b565b602002602001018190525080611095906119bf565b905061103a565b505050949350505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314611128576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161060c565b565b3373ffffffffffffffffffffffffffffffffffffffff8216036111a9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161060c565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6060600061122d8385611a1e565b60008181526003602052604090205490915073ffffffffffffffffffffffffffffffffffffffff168061128f576040517fb151802b0000000000000000000000000000000000000000000000000000000081526004810183905260240161060c565b6040517f3d3ac1b500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821690633d3ac1b5906112e590889088903390600401611a5a565b6000604051808303816000875af1158015611304573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261134a9190810190611a94565b925050505b92915050565b60005b83811015611370578181015183820152602001611358565b50506000910152565b60008151808452611391816020860160208601611355565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006113d66020830184611379565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff81168114610c8657600080fd5b60006020828403121561141157600080fd5b81356113d6816113dd565b60006020828403121561142e57600080fd5b5035919050565b6000806000806060858703121561144b57600080fd5b8435935060208501359250604085013567ffffffffffffffff8082111561147157600080fd5b818701915087601f83011261148557600080fd5b81358181111561149457600080fd5b8860208260061b85010111156114a957600080fd5b95989497505060200194505050565b60008083601f8401126114ca57600080fd5b50813567ffffffffffffffff8111156114e257600080fd5b6020830191508360208285010111156114fa57600080fd5b9250929050565b6000806000806040858703121561151757600080fd5b843567ffffffffffffffff8082111561152f57600080fd5b61153b888389016114b8565b9096509450602087013591508082111561155457600080fd5b50611561878288016114b8565b95989497509550505050565b6000806000806040858703121561158357600080fd5b843567ffffffffffffffff8082111561159b57600080fd5b818701915087601f8301126115af57600080fd5b8135818111156115be57600080fd5b8860208260051b85010111156115d357600080fd5b60209283019650945090860135908082111561155457600080fd5b6000602080830181845280855180835260408601915060408160051b870101925083870160005b82811015611661577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc088860301845261164f858351611379565b94509285019290850190600101611615565b5092979650505050505050565b60006020828403121561168057600080fd5b815180151581146113d657600080fd5b838152604060208083018290528282018490526000919085906060850184805b8881101561170a5784356116c3816113dd565b73ffffffffffffffffffffffffffffffffffffffff1683528484013567ffffffffffffffff81168082146116f5578384fd5b848601525093850193918501916001016116b0565b50909998505050505050505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff84168152604060208201526000611792604083018486611719565b95945050505050565b6060815260006117af606083018789611719565b82810360208401526117c2818688611719565b91505073ffffffffffffffffffffffffffffffffffffffff831660408301529695505050505050565b6060808252810185905260006080600587901b8301810190830188835b898110156118b7577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8086850301835281357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18c360301811261186957600080fd5b8b01602081810191359067ffffffffffffffff82111561188857600080fd5b81360383131561189757600080fd5b6118a2878385611719565b96509485019493909301925050600101611808565b50505082810360208401526118cd818688611719565b9150506118f2604083018473ffffffffffffffffffffffffffffffffffffffff169052565b9695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261198f57600080fd5b83018035915067ffffffffffffffff8211156119aa57600080fd5b6020019150368190038213156114fa57600080fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611a17577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b5060010190565b8035602083101561134f577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff602084900360031b1b1692915050565b604081526000611a6e604083018587611719565b905073ffffffffffffffffffffffffffffffffffffffff83166020830152949350505050565b600060208284031215611aa657600080fd5b815167ffffffffffffffff80821115611abe57600080fd5b818401915084601f830112611ad257600080fd5b815181811115611ae457611ae46118fc565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715611b2a57611b2a6118fc565b81604052828152876020848701011115611b4357600080fd5b610e3883602083016020880161135556fea164736f6c6343000813000a", +} + +var VerifierProxyABI = VerifierProxyMetaData.ABI + +var VerifierProxyBin = VerifierProxyMetaData.Bin + +func DeployVerifierProxy(auth *bind.TransactOpts, backend bind.ContractBackend, accessController common.Address) (common.Address, *types.Transaction, *VerifierProxy, error) { + parsed, err := VerifierProxyMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VerifierProxyBin), backend, accessController) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VerifierProxy{address: address, abi: *parsed, VerifierProxyCaller: VerifierProxyCaller{contract: contract}, VerifierProxyTransactor: VerifierProxyTransactor{contract: contract}, VerifierProxyFilterer: VerifierProxyFilterer{contract: contract}}, nil +} + +type VerifierProxy struct { + address common.Address + abi abi.ABI + VerifierProxyCaller + VerifierProxyTransactor + VerifierProxyFilterer +} + +type VerifierProxyCaller struct { + contract *bind.BoundContract +} + +type VerifierProxyTransactor struct { + contract *bind.BoundContract +} + +type VerifierProxyFilterer struct { + contract *bind.BoundContract +} + +type VerifierProxySession struct { + Contract *VerifierProxy + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VerifierProxyCallerSession struct { + Contract *VerifierProxyCaller + CallOpts bind.CallOpts +} + +type VerifierProxyTransactorSession struct { + Contract *VerifierProxyTransactor + TransactOpts bind.TransactOpts +} + +type VerifierProxyRaw struct { + Contract *VerifierProxy +} + +type VerifierProxyCallerRaw struct { + Contract *VerifierProxyCaller +} + +type VerifierProxyTransactorRaw struct { + Contract *VerifierProxyTransactor +} + +func NewVerifierProxy(address common.Address, backend bind.ContractBackend) (*VerifierProxy, error) { + abi, err := abi.JSON(strings.NewReader(VerifierProxyABI)) + if err != nil { + return nil, err + } + contract, err := bindVerifierProxy(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VerifierProxy{address: address, abi: abi, VerifierProxyCaller: VerifierProxyCaller{contract: contract}, VerifierProxyTransactor: VerifierProxyTransactor{contract: contract}, VerifierProxyFilterer: VerifierProxyFilterer{contract: contract}}, nil +} + +func NewVerifierProxyCaller(address common.Address, caller bind.ContractCaller) (*VerifierProxyCaller, error) { + contract, err := bindVerifierProxy(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VerifierProxyCaller{contract: contract}, nil +} + +func NewVerifierProxyTransactor(address common.Address, transactor bind.ContractTransactor) (*VerifierProxyTransactor, error) { + contract, err := bindVerifierProxy(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VerifierProxyTransactor{contract: contract}, nil +} + +func NewVerifierProxyFilterer(address common.Address, filterer bind.ContractFilterer) (*VerifierProxyFilterer, error) { + contract, err := bindVerifierProxy(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VerifierProxyFilterer{contract: contract}, nil +} + +func bindVerifierProxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VerifierProxyMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VerifierProxy *VerifierProxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifierProxy.Contract.VerifierProxyCaller.contract.Call(opts, result, method, params...) +} + +func (_VerifierProxy *VerifierProxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifierProxy.Contract.VerifierProxyTransactor.contract.Transfer(opts) +} + +func (_VerifierProxy *VerifierProxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifierProxy.Contract.VerifierProxyTransactor.contract.Transact(opts, method, params...) +} + +func (_VerifierProxy *VerifierProxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VerifierProxy.Contract.contract.Call(opts, result, method, params...) +} + +func (_VerifierProxy *VerifierProxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifierProxy.Contract.contract.Transfer(opts) +} + +func (_VerifierProxy *VerifierProxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VerifierProxy.Contract.contract.Transact(opts, method, params...) +} + +func (_VerifierProxy *VerifierProxyCaller) GetVerifier(opts *bind.CallOpts, configDigest [32]byte) (common.Address, error) { + var out []interface{} + err := _VerifierProxy.contract.Call(opts, &out, "getVerifier", configDigest) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifierProxy *VerifierProxySession) GetVerifier(configDigest [32]byte) (common.Address, error) { + return _VerifierProxy.Contract.GetVerifier(&_VerifierProxy.CallOpts, configDigest) +} + +func (_VerifierProxy *VerifierProxyCallerSession) GetVerifier(configDigest [32]byte) (common.Address, error) { + return _VerifierProxy.Contract.GetVerifier(&_VerifierProxy.CallOpts, configDigest) +} + +func (_VerifierProxy *VerifierProxyCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifierProxy.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifierProxy *VerifierProxySession) Owner() (common.Address, error) { + return _VerifierProxy.Contract.Owner(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCallerSession) Owner() (common.Address, error) { + return _VerifierProxy.Contract.Owner(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCaller) SAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifierProxy.contract.Call(opts, &out, "s_accessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifierProxy *VerifierProxySession) SAccessController() (common.Address, error) { + return _VerifierProxy.Contract.SAccessController(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCallerSession) SAccessController() (common.Address, error) { + return _VerifierProxy.Contract.SAccessController(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCaller) SFeeManager(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VerifierProxy.contract.Call(opts, &out, "s_feeManager") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VerifierProxy *VerifierProxySession) SFeeManager() (common.Address, error) { + return _VerifierProxy.Contract.SFeeManager(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCallerSession) SFeeManager() (common.Address, error) { + return _VerifierProxy.Contract.SFeeManager(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VerifierProxy.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VerifierProxy *VerifierProxySession) TypeAndVersion() (string, error) { + return _VerifierProxy.Contract.TypeAndVersion(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyCallerSession) TypeAndVersion() (string, error) { + return _VerifierProxy.Contract.TypeAndVersion(&_VerifierProxy.CallOpts) +} + +func (_VerifierProxy *VerifierProxyTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "acceptOwnership") +} + +func (_VerifierProxy *VerifierProxySession) AcceptOwnership() (*types.Transaction, error) { + return _VerifierProxy.Contract.AcceptOwnership(&_VerifierProxy.TransactOpts) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VerifierProxy.Contract.AcceptOwnership(&_VerifierProxy.TransactOpts) +} + +func (_VerifierProxy *VerifierProxyTransactor) InitializeVerifier(opts *bind.TransactOpts, verifierAddress common.Address) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "initializeVerifier", verifierAddress) +} + +func (_VerifierProxy *VerifierProxySession) InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.InitializeVerifier(&_VerifierProxy.TransactOpts, verifierAddress) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.InitializeVerifier(&_VerifierProxy.TransactOpts, verifierAddress) +} + +func (_VerifierProxy *VerifierProxyTransactor) SetAccessController(opts *bind.TransactOpts, accessController common.Address) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "setAccessController", accessController) +} + +func (_VerifierProxy *VerifierProxySession) SetAccessController(accessController common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetAccessController(&_VerifierProxy.TransactOpts, accessController) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) SetAccessController(accessController common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetAccessController(&_VerifierProxy.TransactOpts, accessController) +} + +func (_VerifierProxy *VerifierProxyTransactor) SetFeeManager(opts *bind.TransactOpts, feeManager common.Address) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "setFeeManager", feeManager) +} + +func (_VerifierProxy *VerifierProxySession) SetFeeManager(feeManager common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetFeeManager(&_VerifierProxy.TransactOpts, feeManager) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) SetFeeManager(feeManager common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetFeeManager(&_VerifierProxy.TransactOpts, feeManager) +} + +func (_VerifierProxy *VerifierProxyTransactor) SetVerifier(opts *bind.TransactOpts, currentConfigDigest [32]byte, newConfigDigest [32]byte, addressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "setVerifier", currentConfigDigest, newConfigDigest, addressesAndWeights) +} + +func (_VerifierProxy *VerifierProxySession) SetVerifier(currentConfigDigest [32]byte, newConfigDigest [32]byte, addressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetVerifier(&_VerifierProxy.TransactOpts, currentConfigDigest, newConfigDigest, addressesAndWeights) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) SetVerifier(currentConfigDigest [32]byte, newConfigDigest [32]byte, addressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) { + return _VerifierProxy.Contract.SetVerifier(&_VerifierProxy.TransactOpts, currentConfigDigest, newConfigDigest, addressesAndWeights) +} + +func (_VerifierProxy *VerifierProxyTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "transferOwnership", to) +} + +func (_VerifierProxy *VerifierProxySession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.TransferOwnership(&_VerifierProxy.TransactOpts, to) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VerifierProxy.Contract.TransferOwnership(&_VerifierProxy.TransactOpts, to) +} + +func (_VerifierProxy *VerifierProxyTransactor) UnsetVerifier(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "unsetVerifier", configDigest) +} + +func (_VerifierProxy *VerifierProxySession) UnsetVerifier(configDigest [32]byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.UnsetVerifier(&_VerifierProxy.TransactOpts, configDigest) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) UnsetVerifier(configDigest [32]byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.UnsetVerifier(&_VerifierProxy.TransactOpts, configDigest) +} + +func (_VerifierProxy *VerifierProxyTransactor) Verify(opts *bind.TransactOpts, payload []byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "verify", payload, parameterPayload) +} + +func (_VerifierProxy *VerifierProxySession) Verify(payload []byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.Verify(&_VerifierProxy.TransactOpts, payload, parameterPayload) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) Verify(payload []byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.Verify(&_VerifierProxy.TransactOpts, payload, parameterPayload) +} + +func (_VerifierProxy *VerifierProxyTransactor) VerifyBulk(opts *bind.TransactOpts, payloads [][]byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.contract.Transact(opts, "verifyBulk", payloads, parameterPayload) +} + +func (_VerifierProxy *VerifierProxySession) VerifyBulk(payloads [][]byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.VerifyBulk(&_VerifierProxy.TransactOpts, payloads, parameterPayload) +} + +func (_VerifierProxy *VerifierProxyTransactorSession) VerifyBulk(payloads [][]byte, parameterPayload []byte) (*types.Transaction, error) { + return _VerifierProxy.Contract.VerifyBulk(&_VerifierProxy.TransactOpts, payloads, parameterPayload) +} + +type VerifierProxyAccessControllerSetIterator struct { + Event *VerifierProxyAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyAccessControllerSet struct { + OldAccessController common.Address + NewAccessController common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterAccessControllerSet(opts *bind.FilterOpts) (*VerifierProxyAccessControllerSetIterator, error) { + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "AccessControllerSet") + if err != nil { + return nil, err + } + return &VerifierProxyAccessControllerSetIterator{contract: _VerifierProxy.contract, event: "AccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchAccessControllerSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "AccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyAccessControllerSet) + if err := _VerifierProxy.contract.UnpackLog(event, "AccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseAccessControllerSet(log types.Log) (*VerifierProxyAccessControllerSet, error) { + event := new(VerifierProxyAccessControllerSet) + if err := _VerifierProxy.contract.UnpackLog(event, "AccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyFeeManagerSetIterator struct { + Event *VerifierProxyFeeManagerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyFeeManagerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyFeeManagerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyFeeManagerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyFeeManagerSetIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyFeeManagerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyFeeManagerSet struct { + OldFeeManager common.Address + NewFeeManager common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterFeeManagerSet(opts *bind.FilterOpts) (*VerifierProxyFeeManagerSetIterator, error) { + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "FeeManagerSet") + if err != nil { + return nil, err + } + return &VerifierProxyFeeManagerSetIterator{contract: _VerifierProxy.contract, event: "FeeManagerSet", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchFeeManagerSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyFeeManagerSet) (event.Subscription, error) { + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "FeeManagerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyFeeManagerSet) + if err := _VerifierProxy.contract.UnpackLog(event, "FeeManagerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseFeeManagerSet(log types.Log) (*VerifierProxyFeeManagerSet, error) { + event := new(VerifierProxyFeeManagerSet) + if err := _VerifierProxy.contract.UnpackLog(event, "FeeManagerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyOwnershipTransferRequestedIterator struct { + Event *VerifierProxyOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierProxyOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifierProxyOwnershipTransferRequestedIterator{contract: _VerifierProxy.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifierProxyOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyOwnershipTransferRequested) + if err := _VerifierProxy.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseOwnershipTransferRequested(log types.Log) (*VerifierProxyOwnershipTransferRequested, error) { + event := new(VerifierProxyOwnershipTransferRequested) + if err := _VerifierProxy.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyOwnershipTransferredIterator struct { + Event *VerifierProxyOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierProxyOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VerifierProxyOwnershipTransferredIterator{contract: _VerifierProxy.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifierProxyOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyOwnershipTransferred) + if err := _VerifierProxy.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseOwnershipTransferred(log types.Log) (*VerifierProxyOwnershipTransferred, error) { + event := new(VerifierProxyOwnershipTransferred) + if err := _VerifierProxy.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyVerifierInitializedIterator struct { + Event *VerifierProxyVerifierInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyVerifierInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyVerifierInitializedIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyVerifierInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyVerifierInitialized struct { + VerifierAddress common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterVerifierInitialized(opts *bind.FilterOpts) (*VerifierProxyVerifierInitializedIterator, error) { + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "VerifierInitialized") + if err != nil { + return nil, err + } + return &VerifierProxyVerifierInitializedIterator{contract: _VerifierProxy.contract, event: "VerifierInitialized", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchVerifierInitialized(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierInitialized) (event.Subscription, error) { + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "VerifierInitialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyVerifierInitialized) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierInitialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseVerifierInitialized(log types.Log) (*VerifierProxyVerifierInitialized, error) { + event := new(VerifierProxyVerifierInitialized) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierInitialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyVerifierSetIterator struct { + Event *VerifierProxyVerifierSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyVerifierSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyVerifierSetIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyVerifierSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyVerifierSet struct { + OldConfigDigest [32]byte + NewConfigDigest [32]byte + VerifierAddress common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterVerifierSet(opts *bind.FilterOpts) (*VerifierProxyVerifierSetIterator, error) { + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "VerifierSet") + if err != nil { + return nil, err + } + return &VerifierProxyVerifierSetIterator{contract: _VerifierProxy.contract, event: "VerifierSet", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchVerifierSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierSet) (event.Subscription, error) { + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "VerifierSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyVerifierSet) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseVerifierSet(log types.Log) (*VerifierProxyVerifierSet, error) { + event := new(VerifierProxyVerifierSet) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VerifierProxyVerifierUnsetIterator struct { + Event *VerifierProxyVerifierUnset + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VerifierProxyVerifierUnsetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierUnset) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VerifierProxyVerifierUnset) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VerifierProxyVerifierUnsetIterator) Error() error { + return it.fail +} + +func (it *VerifierProxyVerifierUnsetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VerifierProxyVerifierUnset struct { + ConfigDigest [32]byte + VerifierAddress common.Address + Raw types.Log +} + +func (_VerifierProxy *VerifierProxyFilterer) FilterVerifierUnset(opts *bind.FilterOpts) (*VerifierProxyVerifierUnsetIterator, error) { + + logs, sub, err := _VerifierProxy.contract.FilterLogs(opts, "VerifierUnset") + if err != nil { + return nil, err + } + return &VerifierProxyVerifierUnsetIterator{contract: _VerifierProxy.contract, event: "VerifierUnset", logs: logs, sub: sub}, nil +} + +func (_VerifierProxy *VerifierProxyFilterer) WatchVerifierUnset(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierUnset) (event.Subscription, error) { + + logs, sub, err := _VerifierProxy.contract.WatchLogs(opts, "VerifierUnset") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VerifierProxyVerifierUnset) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierUnset", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VerifierProxy *VerifierProxyFilterer) ParseVerifierUnset(log types.Log) (*VerifierProxyVerifierUnset, error) { + event := new(VerifierProxyVerifierUnset) + if err := _VerifierProxy.contract.UnpackLog(event, "VerifierUnset", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_VerifierProxy *VerifierProxy) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VerifierProxy.abi.Events["AccessControllerSet"].ID: + return _VerifierProxy.ParseAccessControllerSet(log) + case _VerifierProxy.abi.Events["FeeManagerSet"].ID: + return _VerifierProxy.ParseFeeManagerSet(log) + case _VerifierProxy.abi.Events["OwnershipTransferRequested"].ID: + return _VerifierProxy.ParseOwnershipTransferRequested(log) + case _VerifierProxy.abi.Events["OwnershipTransferred"].ID: + return _VerifierProxy.ParseOwnershipTransferred(log) + case _VerifierProxy.abi.Events["VerifierInitialized"].ID: + return _VerifierProxy.ParseVerifierInitialized(log) + case _VerifierProxy.abi.Events["VerifierSet"].ID: + return _VerifierProxy.ParseVerifierSet(log) + case _VerifierProxy.abi.Events["VerifierUnset"].ID: + return _VerifierProxy.ParseVerifierUnset(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VerifierProxyAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x953e92b1a6442e9c3242531154a3f6f6eb00b4e9c719ba8118fa6235e4ce89b6") +} + +func (VerifierProxyFeeManagerSet) Topic() common.Hash { + return common.HexToHash("0x04628abcaa6b1674651352125cb94b65b289145bc2bc4d67720bb7d966372f03") +} + +func (VerifierProxyOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VerifierProxyOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VerifierProxyVerifierInitialized) Topic() common.Hash { + return common.HexToHash("0x1f2cd7c97f4d801b5efe26cc409617c1fd6c5ef786e79aacb90af40923e4e8e9") +} + +func (VerifierProxyVerifierSet) Topic() common.Hash { + return common.HexToHash("0xbeb513e532542a562ac35699e7cd9ae7d198dcd3eee15bada6c857d28ceaddcf") +} + +func (VerifierProxyVerifierUnset) Topic() common.Hash { + return common.HexToHash("0x11dc15c4b8ac2b183166cc8427e5385a5ece8308217a4217338c6a7614845c4c") +} + +func (_VerifierProxy *VerifierProxy) Address() common.Address { + return _VerifierProxy.address +} + +type VerifierProxyInterface interface { + GetVerifier(opts *bind.CallOpts, configDigest [32]byte) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SAccessController(opts *bind.CallOpts) (common.Address, error) + + SFeeManager(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + InitializeVerifier(opts *bind.TransactOpts, verifierAddress common.Address) (*types.Transaction, error) + + SetAccessController(opts *bind.TransactOpts, accessController common.Address) (*types.Transaction, error) + + SetFeeManager(opts *bind.TransactOpts, feeManager common.Address) (*types.Transaction, error) + + SetVerifier(opts *bind.TransactOpts, currentConfigDigest [32]byte, newConfigDigest [32]byte, addressesAndWeights []CommonAddressAndWeight) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + UnsetVerifier(opts *bind.TransactOpts, configDigest [32]byte) (*types.Transaction, error) + + Verify(opts *bind.TransactOpts, payload []byte, parameterPayload []byte) (*types.Transaction, error) + + VerifyBulk(opts *bind.TransactOpts, payloads [][]byte, parameterPayload []byte) (*types.Transaction, error) + + FilterAccessControllerSet(opts *bind.FilterOpts) (*VerifierProxyAccessControllerSetIterator, error) + + WatchAccessControllerSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyAccessControllerSet) (event.Subscription, error) + + ParseAccessControllerSet(log types.Log) (*VerifierProxyAccessControllerSet, error) + + FilterFeeManagerSet(opts *bind.FilterOpts) (*VerifierProxyFeeManagerSetIterator, error) + + WatchFeeManagerSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyFeeManagerSet) (event.Subscription, error) + + ParseFeeManagerSet(log types.Log) (*VerifierProxyFeeManagerSet, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierProxyOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VerifierProxyOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VerifierProxyOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VerifierProxyOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VerifierProxyOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VerifierProxyOwnershipTransferred, error) + + FilterVerifierInitialized(opts *bind.FilterOpts) (*VerifierProxyVerifierInitializedIterator, error) + + WatchVerifierInitialized(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierInitialized) (event.Subscription, error) + + ParseVerifierInitialized(log types.Log) (*VerifierProxyVerifierInitialized, error) + + FilterVerifierSet(opts *bind.FilterOpts) (*VerifierProxyVerifierSetIterator, error) + + WatchVerifierSet(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierSet) (event.Subscription, error) + + ParseVerifierSet(log types.Log) (*VerifierProxyVerifierSet, error) + + FilterVerifierUnset(opts *bind.FilterOpts) (*VerifierProxyVerifierUnsetIterator, error) + + WatchVerifierUnset(opts *bind.WatchOpts, sink chan<- *VerifierProxyVerifierUnset) (event.Subscription, error) + + ParseVerifierUnset(log types.Log) (*VerifierProxyVerifierUnset, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..31333e92 --- /dev/null +++ b/core/gethwrappers/llo-feeds/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,15 @@ +GETH_VERSION: 1.13.8 +channel_config_store: ../../../contracts/solc/v0.8.19/ChannelConfigStore/ChannelConfigStore.abi ../../../contracts/solc/v0.8.19/ChannelConfigStore/ChannelConfigStore.bin 4ae3e6ca866fdf48850d67c0c7a4bdaf4905c81a4e3ce5efb9ef9613a55d8454 +channel_config_verifier_proxy: ../../../contracts/solc/v0.8.19/ChannelVerifierProxy/ChannelVerifierProxy.abi ../../../contracts/solc/v0.8.19/ChannelVerifierProxy/ChannelVerifierProxy.bin 655658e5f61dfadfe3268de04f948b7e690ad03ca45676e645d6cd6018154661 +channel_verifier: ../../../contracts/solc/v0.8.19/ChannelVerifier/ChannelVerifier.abi ../../../contracts/solc/v0.8.19/ChannelVerifier/ChannelVerifier.bin e6020553bd8e3e6b250fcaffe7efd22aea955c8c1a0eb05d282fdeb0ab6550b7 +errored_verifier: ../../../contracts/solc/v0.8.19/ErroredVerifier/ErroredVerifier.abi ../../../contracts/solc/v0.8.19/ErroredVerifier/ErroredVerifier.bin a3e5a77262e13ee30fe8d35551b32a3452d71929e43fd780bbfefeaf4aa62e43 +exposed_channel_verifier: ../../../contracts/solc/v0.8.19/ExposedChannelVerifier/ExposedChannelVerifier.abi ../../../contracts/solc/v0.8.19/ExposedChannelVerifier/ExposedChannelVerifier.bin c21cde078900241c06de69e2bc5d906c5ef558b52db66caa68bed065940a2253 +exposed_verifier: ../../../contracts/solc/v0.8.19/ExposedVerifier/ExposedVerifier.abi ../../../contracts/solc/v0.8.19/ExposedVerifier/ExposedVerifier.bin 00816ab345f768e522c79abadeadf9155c2c688067e18f8f73e5d6ab71037663 +fee_manager: ../../../contracts/solc/v0.8.19/FeeManager/FeeManager.abi ../../../contracts/solc/v0.8.19/FeeManager/FeeManager.bin edc85f34294ae7c90d45c4c71eb5c105c60a4842dfbbf700c692870ffcc403a1 +llo_feeds: ../../../contracts/solc/v0.8.19/FeeManager.abi ../../../contracts/solc/v0.8.19/FeeManager.bin cb71e018f67e49d7bc0e194c822204dfd59f79ff42e4fc8fd8ab63f3acd71361 +llo_feeds_test: ../../../contracts/solc/v0.8.19/ExposedVerifier.abi ../../../contracts/solc/v0.8.19/ExposedVerifier.bin 6932cea8f2738e874d3ec9e1a4231d2421704030c071d9e15dd2f7f08482c246 +reward_manager: ../../../contracts/solc/v0.8.19/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.19/RewardManager/RewardManager.bin 7996cbc89a7f9af85b1ca4079ecf782d7138626b3f4bdb3bfa996248c9ccb9f4 +stream_config_store: ../../../contracts/solc/v0.8.19/StreamConfigStore/StreamConfigStore.abi ../../../contracts/solc/v0.8.19/StreamConfigStore/StreamConfigStore.bin 45ae1b0a45a90b3dee076023052aef73c212c8ef8825b829397f751f6b0a1598 +verifier: ../../../contracts/solc/v0.8.19/Verifier/Verifier.abi ../../../contracts/solc/v0.8.19/Verifier/Verifier.bin 413406be1578e9fb73e664ceb1967e6aedf5cf7c4701a2b81fe7c42b03f13573 +verifier_proxy: ../../../contracts/solc/v0.8.19/VerifierProxy/VerifierProxy.abi ../../../contracts/solc/v0.8.19/VerifierProxy/VerifierProxy.bin aca18e93b0129114f20c4c0fbaeb61c86bc0ca0724bc438ec7ae11c158038ea7 +werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94 diff --git a/core/gethwrappers/llo-feeds/go_generate.go b/core/gethwrappers/llo-feeds/go_generate.go new file mode 100644 index 00000000..fddb52c9 --- /dev/null +++ b/core/gethwrappers/llo-feeds/go_generate.go @@ -0,0 +1,14 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Plugin LLO +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/Verifier/Verifier.abi ../../../contracts/solc/v0.8.19/Verifier/Verifier.bin Verifier verifier +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/VerifierProxy/VerifierProxy.abi ../../../contracts/solc/v0.8.19/VerifierProxy/VerifierProxy.bin VerifierProxy verifier_proxy +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ErroredVerifier/ErroredVerifier.abi ../../../contracts/solc/v0.8.19/ErroredVerifier/ErroredVerifier.bin ErroredVerifier errored_verifier +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ExposedVerifier/ExposedVerifier.abi ../../../contracts/solc/v0.8.19/ExposedVerifier/ExposedVerifier.bin ExposedVerifier exposed_verifier +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/RewardManager/RewardManager.abi ../../../contracts/solc/v0.8.19/RewardManager/RewardManager.bin RewardManager reward_manager +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/FeeManager/FeeManager.abi ../../../contracts/solc/v0.8.19/FeeManager/FeeManager.bin FeeManager fee_manager +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ChannelConfigStore/ChannelConfigStore.abi ../../../contracts/solc/v0.8.19/ChannelConfigStore/ChannelConfigStore.bin ChannelConfigStore channel_config_store +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ChannelVerifier/ChannelVerifier.abi ../../../contracts/solc/v0.8.19/ChannelVerifier/ChannelVerifier.bin ChannelVerifier channel_verifier +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ExposedChannelVerifier/ExposedChannelVerifier.abi ../../../contracts/solc/v0.8.19/ExposedChannelVerifier/ExposedChannelVerifier.bin ExposedChannelVerifier exposed_channel_verifier diff --git a/core/gethwrappers/ocr2vrf/generated/dkg/dkg.go b/core/gethwrappers/ocr2vrf/generated/dkg/dkg.go new file mode 100644 index 00000000..8f4af71d --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generated/dkg/dkg.go @@ -0,0 +1,1274 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package dkg + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type KeyDataStructKeyData struct { + PublicKey []byte + Hashes [][32]byte +} + +var DKGMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expectedLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"actualLength\",\"type\":\"uint256\"}],\"name\":\"CalldataLengthMismatch\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"expected\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"actual\",\"type\":\"bytes32\"}],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expectedNumSignatures\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rsLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"ssLength\",\"type\":\"uint256\"}],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"expectedLength\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"actualLength\",\"type\":\"uint256\"}],\"name\":\"InvalidOnchainConfigLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"InvalidSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"InvalidTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"KeyIDCopyFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NonUniqueSignature\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"numFaultyOracles\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"}],\"name\":\"NumberOfFaultyOraclesTooHigh\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"repeatedSignerAddress\",\"type\":\"address\"}],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"repeatedTransmitterAddress\",\"type\":\"address\"}],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numTransmitters\",\"type\":\"uint256\"}],\"name\":\"SignersTransmittersMismatch\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"maxOracles\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"providedOracles\",\"type\":\"uint256\"}],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractDKGClient\",\"name\":\"client\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"errorData\",\"type\":\"bytes\"}],\"name\":\"DKGClientError\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"keyID\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"hashes\",\"type\":\"bytes32[]\"}],\"indexed\":false,\"internalType\":\"structKeyDataStruct.KeyData\",\"name\":\"key\",\"type\":\"tuple\"}],\"name\":\"KeyGenerated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"name\":\"Transmitted\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyID\",\"type\":\"bytes32\"},{\"internalType\":\"contractDKGClient\",\"name\":\"clientAddress\",\"type\":\"address\"}],\"name\":\"addClient\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyID\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_configDigest\",\"type\":\"bytes32\"}],\"name\":\"getKey\",\"outputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"hashes\",\"type\":\"bytes32[]\"}],\"internalType\":\"structKeyDataStruct.KeyData\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"keyID\",\"type\":\"bytes32\"},{\"internalType\":\"contractDKGClient\",\"name\":\"clientAddress\",\"type\":\"address\"}],\"name\":\"removeClient\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b503380600081620000695760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b03848116919091179091558116156200009c576200009c81620000a5565b50505062000150565b336001600160a01b03821603620000ff5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000060565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b612b8080620001606000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80638da5cb5b11610081578063c3105a6b1161005b578063c3105a6b146101db578063e3d0e712146101fb578063f2fde38b1461020e57600080fd5b80638da5cb5b14610176578063afcb95d71461019e578063b1dc65a4146101c857600080fd5b806379ba5097116100b257806379ba50971461012b5780637bf1ffc51461013357806381ff70481461014657600080fd5b8063181f5a77146100ce5780635429a79e14610116575b600080fd5b604080518082018252600981527f444b4720302e302e3100000000000000000000000000000000000000000000006020820152905161010d9190611fc8565b60405180910390f35b610129610124366004612004565b610221565b005b6101296104b0565b610129610141366004612004565b6105b2565b6007546005546040805163ffffffff8085168252640100000000909404909316602084015282015260600161010d565b60005460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161010d565b6005546004546040805160008152602081019390935263ffffffff9091169082015260600161010d565b6101296101d6366004612080565b61061e565b6101ee6101e9366004612165565b610761565b60405161010d9190612187565b6101296102093660046123dd565b61088a565b61012961021c3660046124aa565b6111ec565b610229611200565b60008281526002602090815260408083208054825181850281018501909352808352919290919083018282801561029657602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff16815260019091019060200180831161026b575b505050505090506000815167ffffffffffffffff8111156102b9576102b961220d565b6040519080825280602002602001820160405280156102e2578160200160208202803683370190505b5090506000805b83518110156103b9578473ffffffffffffffffffffffffffffffffffffffff1684828151811061031b5761031b6124c7565b602002602001015173ffffffffffffffffffffffffffffffffffffffff161461039957848361034a8484612525565b8151811061035a5761035a6124c7565b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506103a7565b816103a381612538565b9250505b806103b181612538565b9150506102e9565b5060008184516103c99190612525565b67ffffffffffffffff8111156103e1576103e161220d565b60405190808252806020026020018201604052801561040a578160200160208202803683370190505b50905060005b82855161041d9190612525565b81101561048757838181518110610436576104366124c7565b6020026020010151828281518110610450576104506124c7565b73ffffffffffffffffffffffffffffffffffffffff909216602092830291909101909101528061047f81612538565b915050610410565b50600086815260026020908152604090912082516104a792840190611e61565b50505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff163314610536576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6105ba611200565b600091825260026020908152604083208054600181018255908452922090910180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909216919091179055565b60005a604080516020601f8b018190048102820181019092528981529192508a3591818c01359161066e9184918491908e908e908190840183828082843760009201919091525061128392505050565b6040805183815263ffffffff600884901c1660208201527fb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62910160405180910390a16040805160608101825260055480825260065460ff808216602085015261010090910416928201929092529083146107215780516040517f93df584c00000000000000000000000000000000000000000000000000000000815260048101919091526024810184905260440161052d565b61072f8b8b8b8b8b8b611512565b6107408c8c8c8c8c8c8c8c89611599565b50505063ffffffff811061075657610756612570565b505050505050505050565b604080518082019091526060808252602082015260008381526003602090815260408083208584529091529081902081518083019092528054829082906107a79061259f565b80601f01602080910402602001604051908101604052809291908181526020018280546107d39061259f565b80156108205780601f106107f557610100808354040283529160200191610820565b820191906000526020600020905b81548152906001019060200180831161080357829003601f168201915b505050505081526020016001820180548060200260200160405190810160405280929190818152602001828054801561087857602002820191906000526020600020905b815481526020019060010190808311610864575b50505050508152505090505b92915050565b8551855185601f8311156108d4576040517f809fc428000000000000000000000000000000000000000000000000000000008152601f60048201526024810184905260440161052d565b818314610917576040517f988a0804000000000000000000000000000000000000000000000000000000008152600481018490526024810183905260440161052d565b6109228160036125f2565b60ff168311610969576040517ffda9db7800000000000000000000000000000000000000000000000000000000815260ff821660048201526024810184905260440161052d565b8060ff166000036109a6576040517fe77dba5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6109ae611200565b6040805160c0810182528a8152602081018a905260ff8916918101919091526060810187905267ffffffffffffffff8616608082015260a081018590525b60095415610ba157600954600090610a0690600190612525565b9050600060098281548110610a1d57610a1d6124c7565b6000918252602082200154600a805473ffffffffffffffffffffffffffffffffffffffff90921693509084908110610a5757610a576124c7565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff85811684526008909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600980549192509080610ad757610ad7612615565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055600a805480610b4057610b40612615565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055506109ec915050565b60005b81515181101561101c5760006008600084600001518481518110610bca57610bca6124c7565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115610c1457610c14612644565b14610c84578151805182908110610c2d57610c2d6124c7565b60200260200101516040517f7451f83e00000000000000000000000000000000000000000000000000000000815260040161052d919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b6040805180820190915260ff82168152600160208201528251805160089160009185908110610cb557610cb56124c7565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115610d5657610d56612644565b021790555060009150610d669050565b6008600084602001518481518110610d8057610d806124c7565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054610100900460ff166002811115610dca57610dca612644565b14610e3c5781602001518181518110610de557610de56124c7565b60200260200101516040517fe8d2989900000000000000000000000000000000000000000000000000000000815260040161052d919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b6040805180820190915260ff821681526020810160028152506008600084602001518481518110610e6f57610e6f6124c7565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040016000208251815460ff9091167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0082168117835592840151919283917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001617610100836002811115610f1057610f10612644565b021790555050825180516009925083908110610f2e57610f2e6124c7565b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909316929092179091558201518051600a919083908110610faa57610faa6124c7565b60209081029190910181015182546001810184556000938452919092200180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9092169190911790558061101481612538565b915050610ba4565b506040810151600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600754640100000000900463ffffffff1661106c611a2f565b6007805463ffffffff928316640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff8216811783556001936000926110bd928692908116911617612673565b92506101000a81548163ffffffff021916908363ffffffff160217905550600061111e4630600760009054906101000a900463ffffffff1663ffffffff1686600001518760200151886040015189606001518a608001518b60a00151611ac6565b6005819055835180516006805460ff909216610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff9092169190911790556007546020860151604080880151606089015160808a015160a08b015193519798507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05976111c3978b978b9763ffffffff9091169691959094909390929091906126e1565b60405180910390a16111de8360400151846060015183611b71565b505050505050505050505050565b6111f4611200565b6111fd81611d6c565b50565b60005473ffffffffffffffffffffffffffffffffffffffff163314611281576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161052d565b565b60006060808380602001905181019061129c9190612777565b60408051808201825283815260208082018490526000868152600282528381208054855181850281018501909652808652979a509598509396509094929391929083018282801561132357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116112f8575b5050505050905060005b815181101561144d57818181518110611348576113486124c7565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1663bf2732c7846040518263ffffffff1660e01b81526004016113889190612187565b600060405180830381600087803b1580156113a257600080fd5b505af19250505080156113b3575060015b61143b573d8080156113e1576040519150601f19603f3d011682016040523d82523d6000602084013e6113e6565b606091505b507f116391732f5df106193bda7cedf1728f3b07b62f6cdcdd611c9eeec44efcae5483838151811061141a5761141a6124c7565b602002602001015182604051611431929190612875565b60405180910390a1505b8061144581612538565b91505061132d565b5060008581526003602090815260408083208b845290915290208251839190819061147890826128fb565b5060208281015180516114919260018501920190611eeb565b5090505084887fc8db841f5b2231ccf7190311f440aa197b161e369f3b40b023508160cc555656846040516114c69190612187565b60405180910390a350506004805460089690961c63ffffffff167fffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000909616959095179094555050505050565b600061151f826020612a15565b61152a856020612a15565b61153688610144612a2c565b6115409190612a2c565b61154a9190612a2c565b611555906000612a2c565b90503681146104a7576040517ff7b94f0a0000000000000000000000000000000000000000000000000000000081526004810182905236602482015260440161052d565b60006002826020015183604001516115b19190612a3f565b6115bb9190612a58565b6115c6906001612a3f565b60408051600180825281830190925260ff929092169250600091906020820181803683370190505090508160f81b81600081518110611607576116076124c7565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535086821415806116455750868514155b1561168d576040517fe307bd5700000000000000000000000000000000000000000000000000000000815260048101839052602481018890526044810186905260640161052d565b3360009081526008602090815260408083208151808301909252805460ff808216845292939192918401916101009091041660028111156116d0576116d0612644565b60028111156116e1576116e1612644565b90525090506002816020015160028111156116fe576116fe612644565b1415806117465750600a816000015160ff1681548110611720576117206124c7565b60009182526020909120015473ffffffffffffffffffffffffffffffffffffffff163314155b1561177f576040517f2d0f0c0f00000000000000000000000000000000000000000000000000000000815233600482015260240161052d565b50505060008888604051611794929190612aa1565b6040519081900381206117ab918c90602001612ab1565b6040516020818303038152906040528051906020012090506117cb611f26565b604080518082019091526000808252602082015260005b88811015611a20576000600185888460208110611801576118016124c7565b61180e91901a601b612a3f565b8d8d86818110611820576118206124c7565b905060200201358c8c87818110611839576118396124c7565b9050602002013560405160008152602001604052604051611876949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015611898573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526008602090815290849020838501909452835460ff8082168552929650929450840191610100900416600281111561191857611918612644565b600281111561192957611929612644565b905250925060018360200151600281111561194657611946612644565b14611995576040517fbf18af4300000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015260240161052d565b8251849060ff16601f81106119ac576119ac6124c7565b6020020151156119e8576040517f21cf3b4400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600184846000015160ff16601f8110611a0357611a036124c7565b911515602090920201525080611a1881612538565b9150506117e2565b50505050505050505050505050565b60004661a4b1811480611a44575062066eed81145b15611abf57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015611a95573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ab99190612ac5565b91505090565b4391505090565b6000808a8a8a8a8a8a8a8a8a604051602001611aea99989796959493929190612ade565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b6000808351602014611bbc5783516040517f1625adfe00000000000000000000000000000000000000000000000000000000815260206004820152602481019190915260440161052d565b60208401519150808203611bfc576040517faf5e77d000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051808201909152606080825260208201526000838152600360209081526040808320878452909152902081518291908190611c3a90826128fb565b506020828101518051611c539260018501920190611eeb565b505050600083815260026020908152604080832080548251818502810185019093528083529192909190830182828015611cc357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611c98575b5050505050905060005b8151811015611d6257818181518110611ce857611ce86124c7565b602002602001015173ffffffffffffffffffffffffffffffffffffffff166355e487496040518163ffffffff1660e01b8152600401600060405180830381600087803b158015611d3757600080fd5b505af1158015611d4b573d6000803e3d6000fd5b505050508080611d5a90612538565b915050611ccd565b5050505050505050565b3373ffffffffffffffffffffffffffffffffffffffff821603611deb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161052d565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215611edb579160200282015b82811115611edb57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190611e81565b50611ee7929150611f45565b5090565b828054828255906000526020600020908101928215611edb579160200282015b82811115611edb578251825591602001919060010190611f0b565b604051806103e00160405280601f906020820280368337509192915050565b5b80821115611ee75760008155600101611f46565b60005b83811015611f75578181015183820152602001611f5d565b50506000910152565b60008151808452611f96816020860160208601611f5a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611fdb6020830184611f7e565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff811681146111fd57600080fd5b6000806040838503121561201757600080fd5b82359150602083013561202981611fe2565b809150509250929050565b60008083601f84011261204657600080fd5b50813567ffffffffffffffff81111561205e57600080fd5b6020830191508360208260051b850101111561207957600080fd5b9250929050565b60008060008060008060008060e0898b03121561209c57600080fd5b606089018a8111156120ad57600080fd5b8998503567ffffffffffffffff808211156120c757600080fd5b818b0191508b601f8301126120db57600080fd5b8135818111156120ea57600080fd5b8c60208285010111156120fc57600080fd5b6020830199508098505060808b013591508082111561211a57600080fd5b6121268c838d01612034565b909750955060a08b013591508082111561213f57600080fd5b5061214c8b828c01612034565b999c989b50969995989497949560c00135949350505050565b6000806040838503121561217857600080fd5b50508035926020909101359150565b6000602080835283516040828501526121a36060850182611f7e565b858301518582037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0016040870152805180835290840192506000918401905b8083101561220257835182529284019260019290920191908401906121e2565b509695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156122835761228361220d565b604052919050565b600067ffffffffffffffff8211156122a5576122a561220d565b5060051b60200190565b600082601f8301126122c057600080fd5b813560206122d56122d08361228b565b61223c565b82815260059290921b840181019181810190868411156122f457600080fd5b8286015b8481101561220257803561230b81611fe2565b83529183019183016122f8565b803560ff8116811461232957600080fd5b919050565b600067ffffffffffffffff8211156123485761234861220d565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600082601f83011261238557600080fd5b81356123936122d08261232e565b8181528460208386010111156123a857600080fd5b816020850160208301376000918101602001919091529392505050565b803567ffffffffffffffff8116811461232957600080fd5b60008060008060008060c087890312156123f657600080fd5b863567ffffffffffffffff8082111561240e57600080fd5b61241a8a838b016122af565b9750602089013591508082111561243057600080fd5b61243c8a838b016122af565b965061244a60408a01612318565b9550606089013591508082111561246057600080fd5b61246c8a838b01612374565b945061247a60808a016123c5565b935060a089013591508082111561249057600080fd5b5061249d89828a01612374565b9150509295509295509295565b6000602082840312156124bc57600080fd5b8135611fdb81611fe2565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b81810381811115610884576108846124f6565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203612569576125696124f6565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b600181811c908216806125b357607f821691505b6020821081036125ec577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60ff818116838216029081169081811461260e5761260e6124f6565b5092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b63ffffffff81811683821601908082111561260e5761260e6124f6565b600081518084526020808501945080840160005b838110156126d657815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016126a4565b509495945050505050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526127118184018a612690565b905082810360808401526127258189612690565b905060ff871660a084015282810360c08401526127428187611f7e565b905067ffffffffffffffff851660e08401528281036101008401526127678185611f7e565b9c9b505050505050505050505050565b60008060006060848603121561278c57600080fd5b8351925060208085015167ffffffffffffffff808211156127ac57600080fd5b818701915087601f8301126127c057600080fd5b81516127ce6122d08261232e565b81815289858386010111156127e257600080fd5b6127f182868301878701611f5a565b60408901519096509250508082111561280957600080fd5b508501601f8101871361281b57600080fd5b80516128296122d08261228b565b81815260059190911b8201830190838101908983111561284857600080fd5b928401925b828410156128665783518252928401929084019061284d565b80955050505050509250925092565b73ffffffffffffffffffffffffffffffffffffffff831681526040602082015260006128a46040830184611f7e565b949350505050565b601f8211156128f657600081815260208120601f850160051c810160208610156128d35750805b601f850160051c820191505b818110156128f2578281556001016128df565b5050505b505050565b815167ffffffffffffffff8111156129155761291561220d565b61292981612923845461259f565b846128ac565b602080601f83116001811461297c57600084156129465750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556128f2565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156129c9578886015182559484019460019091019084016129aa565b5085821015612a0557878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8082028115828204841417610884576108846124f6565b80820180821115610884576108846124f6565b60ff8181168382160190811115610884576108846124f6565b600060ff831680612a92577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b8060ff84160491505092915050565b8183823760009101908152919050565b828152606082602083013760800192915050565b600060208284031215612ad757600080fd5b5051919050565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152612b258285018b612690565b91508382036080850152612b39828a612690565b915060ff881660a085015283820360c0850152612b568288611f7e565b90861660e085015283810361010085015290506127678185611f7e56fea164736f6c6343000813000a", +} + +var DKGABI = DKGMetaData.ABI + +var DKGBin = DKGMetaData.Bin + +func DeployDKG(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *DKG, error) { + parsed, err := DKGMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DKGBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &DKG{DKGCaller: DKGCaller{contract: contract}, DKGTransactor: DKGTransactor{contract: contract}, DKGFilterer: DKGFilterer{contract: contract}}, nil +} + +type DKG struct { + address common.Address + abi abi.ABI + DKGCaller + DKGTransactor + DKGFilterer +} + +type DKGCaller struct { + contract *bind.BoundContract +} + +type DKGTransactor struct { + contract *bind.BoundContract +} + +type DKGFilterer struct { + contract *bind.BoundContract +} + +type DKGSession struct { + Contract *DKG + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type DKGCallerSession struct { + Contract *DKGCaller + CallOpts bind.CallOpts +} + +type DKGTransactorSession struct { + Contract *DKGTransactor + TransactOpts bind.TransactOpts +} + +type DKGRaw struct { + Contract *DKG +} + +type DKGCallerRaw struct { + Contract *DKGCaller +} + +type DKGTransactorRaw struct { + Contract *DKGTransactor +} + +func NewDKG(address common.Address, backend bind.ContractBackend) (*DKG, error) { + abi, err := abi.JSON(strings.NewReader(DKGABI)) + if err != nil { + return nil, err + } + contract, err := bindDKG(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &DKG{address: address, abi: abi, DKGCaller: DKGCaller{contract: contract}, DKGTransactor: DKGTransactor{contract: contract}, DKGFilterer: DKGFilterer{contract: contract}}, nil +} + +func NewDKGCaller(address common.Address, caller bind.ContractCaller) (*DKGCaller, error) { + contract, err := bindDKG(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &DKGCaller{contract: contract}, nil +} + +func NewDKGTransactor(address common.Address, transactor bind.ContractTransactor) (*DKGTransactor, error) { + contract, err := bindDKG(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &DKGTransactor{contract: contract}, nil +} + +func NewDKGFilterer(address common.Address, filterer bind.ContractFilterer) (*DKGFilterer, error) { + contract, err := bindDKG(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &DKGFilterer{contract: contract}, nil +} + +func bindDKG(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := DKGMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_DKG *DKGRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _DKG.Contract.DKGCaller.contract.Call(opts, result, method, params...) +} + +func (_DKG *DKGRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _DKG.Contract.DKGTransactor.contract.Transfer(opts) +} + +func (_DKG *DKGRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _DKG.Contract.DKGTransactor.contract.Transact(opts, method, params...) +} + +func (_DKG *DKGCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _DKG.Contract.contract.Call(opts, result, method, params...) +} + +func (_DKG *DKGTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _DKG.Contract.contract.Transfer(opts) +} + +func (_DKG *DKGTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _DKG.Contract.contract.Transact(opts, method, params...) +} + +func (_DKG *DKGCaller) GetKey(opts *bind.CallOpts, _keyID [32]byte, _configDigest [32]byte) (KeyDataStructKeyData, error) { + var out []interface{} + err := _DKG.contract.Call(opts, &out, "getKey", _keyID, _configDigest) + + if err != nil { + return *new(KeyDataStructKeyData), err + } + + out0 := *abi.ConvertType(out[0], new(KeyDataStructKeyData)).(*KeyDataStructKeyData) + + return out0, err + +} + +func (_DKG *DKGSession) GetKey(_keyID [32]byte, _configDigest [32]byte) (KeyDataStructKeyData, error) { + return _DKG.Contract.GetKey(&_DKG.CallOpts, _keyID, _configDigest) +} + +func (_DKG *DKGCallerSession) GetKey(_keyID [32]byte, _configDigest [32]byte) (KeyDataStructKeyData, error) { + return _DKG.Contract.GetKey(&_DKG.CallOpts, _keyID, _configDigest) +} + +func (_DKG *DKGCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _DKG.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_DKG *DKGSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _DKG.Contract.LatestConfigDetails(&_DKG.CallOpts) +} + +func (_DKG *DKGCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _DKG.Contract.LatestConfigDetails(&_DKG.CallOpts) +} + +func (_DKG *DKGCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _DKG.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_DKG *DKGSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _DKG.Contract.LatestConfigDigestAndEpoch(&_DKG.CallOpts) +} + +func (_DKG *DKGCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _DKG.Contract.LatestConfigDigestAndEpoch(&_DKG.CallOpts) +} + +func (_DKG *DKGCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _DKG.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_DKG *DKGSession) Owner() (common.Address, error) { + return _DKG.Contract.Owner(&_DKG.CallOpts) +} + +func (_DKG *DKGCallerSession) Owner() (common.Address, error) { + return _DKG.Contract.Owner(&_DKG.CallOpts) +} + +func (_DKG *DKGCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _DKG.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_DKG *DKGSession) TypeAndVersion() (string, error) { + return _DKG.Contract.TypeAndVersion(&_DKG.CallOpts) +} + +func (_DKG *DKGCallerSession) TypeAndVersion() (string, error) { + return _DKG.Contract.TypeAndVersion(&_DKG.CallOpts) +} + +func (_DKG *DKGTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "acceptOwnership") +} + +func (_DKG *DKGSession) AcceptOwnership() (*types.Transaction, error) { + return _DKG.Contract.AcceptOwnership(&_DKG.TransactOpts) +} + +func (_DKG *DKGTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _DKG.Contract.AcceptOwnership(&_DKG.TransactOpts) +} + +func (_DKG *DKGTransactor) AddClient(opts *bind.TransactOpts, keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "addClient", keyID, clientAddress) +} + +func (_DKG *DKGSession) AddClient(keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.Contract.AddClient(&_DKG.TransactOpts, keyID, clientAddress) +} + +func (_DKG *DKGTransactorSession) AddClient(keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.Contract.AddClient(&_DKG.TransactOpts, keyID, clientAddress) +} + +func (_DKG *DKGTransactor) RemoveClient(opts *bind.TransactOpts, keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "removeClient", keyID, clientAddress) +} + +func (_DKG *DKGSession) RemoveClient(keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.Contract.RemoveClient(&_DKG.TransactOpts, keyID, clientAddress) +} + +func (_DKG *DKGTransactorSession) RemoveClient(keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) { + return _DKG.Contract.RemoveClient(&_DKG.TransactOpts, keyID, clientAddress) +} + +func (_DKG *DKGTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "setConfig", _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_DKG *DKGSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _DKG.Contract.SetConfig(&_DKG.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_DKG *DKGTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _DKG.Contract.SetConfig(&_DKG.TransactOpts, _signers, _transmitters, _f, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_DKG *DKGTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "transferOwnership", to) +} + +func (_DKG *DKGSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _DKG.Contract.TransferOwnership(&_DKG.TransactOpts, to) +} + +func (_DKG *DKGTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _DKG.Contract.TransferOwnership(&_DKG.TransactOpts, to) +} + +func (_DKG *DKGTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _DKG.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_DKG *DKGSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _DKG.Contract.Transmit(&_DKG.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_DKG *DKGTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _DKG.Contract.Transmit(&_DKG.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +type DKGConfigSetIterator struct { + Event *DKGConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGConfigSetIterator) Error() error { + return it.fail +} + +func (it *DKGConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterConfigSet(opts *bind.FilterOpts) (*DKGConfigSetIterator, error) { + + logs, sub, err := _DKG.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &DKGConfigSetIterator{contract: _DKG.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *DKGConfigSet) (event.Subscription, error) { + + logs, sub, err := _DKG.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGConfigSet) + if err := _DKG.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseConfigSet(log types.Log) (*DKGConfigSet, error) { + event := new(DKGConfigSet) + if err := _DKG.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DKGDKGClientErrorIterator struct { + Event *DKGDKGClientError + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGDKGClientErrorIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGDKGClientError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGDKGClientError) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGDKGClientErrorIterator) Error() error { + return it.fail +} + +func (it *DKGDKGClientErrorIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGDKGClientError struct { + Client common.Address + ErrorData []byte + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterDKGClientError(opts *bind.FilterOpts) (*DKGDKGClientErrorIterator, error) { + + logs, sub, err := _DKG.contract.FilterLogs(opts, "DKGClientError") + if err != nil { + return nil, err + } + return &DKGDKGClientErrorIterator{contract: _DKG.contract, event: "DKGClientError", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchDKGClientError(opts *bind.WatchOpts, sink chan<- *DKGDKGClientError) (event.Subscription, error) { + + logs, sub, err := _DKG.contract.WatchLogs(opts, "DKGClientError") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGDKGClientError) + if err := _DKG.contract.UnpackLog(event, "DKGClientError", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseDKGClientError(log types.Log) (*DKGDKGClientError, error) { + event := new(DKGDKGClientError) + if err := _DKG.contract.UnpackLog(event, "DKGClientError", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DKGKeyGeneratedIterator struct { + Event *DKGKeyGenerated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGKeyGeneratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGKeyGenerated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGKeyGenerated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGKeyGeneratedIterator) Error() error { + return it.fail +} + +func (it *DKGKeyGeneratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGKeyGenerated struct { + ConfigDigest [32]byte + KeyID [32]byte + Key KeyDataStructKeyData + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterKeyGenerated(opts *bind.FilterOpts, configDigest [][32]byte, keyID [][32]byte) (*DKGKeyGeneratedIterator, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + var keyIDRule []interface{} + for _, keyIDItem := range keyID { + keyIDRule = append(keyIDRule, keyIDItem) + } + + logs, sub, err := _DKG.contract.FilterLogs(opts, "KeyGenerated", configDigestRule, keyIDRule) + if err != nil { + return nil, err + } + return &DKGKeyGeneratedIterator{contract: _DKG.contract, event: "KeyGenerated", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchKeyGenerated(opts *bind.WatchOpts, sink chan<- *DKGKeyGenerated, configDigest [][32]byte, keyID [][32]byte) (event.Subscription, error) { + + var configDigestRule []interface{} + for _, configDigestItem := range configDigest { + configDigestRule = append(configDigestRule, configDigestItem) + } + var keyIDRule []interface{} + for _, keyIDItem := range keyID { + keyIDRule = append(keyIDRule, keyIDItem) + } + + logs, sub, err := _DKG.contract.WatchLogs(opts, "KeyGenerated", configDigestRule, keyIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGKeyGenerated) + if err := _DKG.contract.UnpackLog(event, "KeyGenerated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseKeyGenerated(log types.Log) (*DKGKeyGenerated, error) { + event := new(DKGKeyGenerated) + if err := _DKG.contract.UnpackLog(event, "KeyGenerated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DKGOwnershipTransferRequestedIterator struct { + Event *DKGOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *DKGOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*DKGOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DKG.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &DKGOwnershipTransferRequestedIterator{contract: _DKG.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *DKGOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DKG.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGOwnershipTransferRequested) + if err := _DKG.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseOwnershipTransferRequested(log types.Log) (*DKGOwnershipTransferRequested, error) { + event := new(DKGOwnershipTransferRequested) + if err := _DKG.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DKGOwnershipTransferredIterator struct { + Event *DKGOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *DKGOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*DKGOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DKG.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &DKGOwnershipTransferredIterator{contract: _DKG.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *DKGOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _DKG.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGOwnershipTransferred) + if err := _DKG.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseOwnershipTransferred(log types.Log) (*DKGOwnershipTransferred, error) { + event := new(DKGOwnershipTransferred) + if err := _DKG.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type DKGTransmittedIterator struct { + Event *DKGTransmitted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *DKGTransmittedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(DKGTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(DKGTransmitted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *DKGTransmittedIterator) Error() error { + return it.fail +} + +func (it *DKGTransmittedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type DKGTransmitted struct { + ConfigDigest [32]byte + Epoch uint32 + Raw types.Log +} + +func (_DKG *DKGFilterer) FilterTransmitted(opts *bind.FilterOpts) (*DKGTransmittedIterator, error) { + + logs, sub, err := _DKG.contract.FilterLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return &DKGTransmittedIterator{contract: _DKG.contract, event: "Transmitted", logs: logs, sub: sub}, nil +} + +func (_DKG *DKGFilterer) WatchTransmitted(opts *bind.WatchOpts, sink chan<- *DKGTransmitted) (event.Subscription, error) { + + logs, sub, err := _DKG.contract.WatchLogs(opts, "Transmitted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(DKGTransmitted) + if err := _DKG.contract.UnpackLog(event, "Transmitted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_DKG *DKGFilterer) ParseTransmitted(log types.Log) (*DKGTransmitted, error) { + event := new(DKGTransmitted) + if err := _DKG.contract.UnpackLog(event, "Transmitted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_DKG *DKG) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _DKG.abi.Events["ConfigSet"].ID: + return _DKG.ParseConfigSet(log) + case _DKG.abi.Events["DKGClientError"].ID: + return _DKG.ParseDKGClientError(log) + case _DKG.abi.Events["KeyGenerated"].ID: + return _DKG.ParseKeyGenerated(log) + case _DKG.abi.Events["OwnershipTransferRequested"].ID: + return _DKG.ParseOwnershipTransferRequested(log) + case _DKG.abi.Events["OwnershipTransferred"].ID: + return _DKG.ParseOwnershipTransferred(log) + case _DKG.abi.Events["Transmitted"].ID: + return _DKG.ParseTransmitted(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (DKGConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (DKGDKGClientError) Topic() common.Hash { + return common.HexToHash("0x116391732f5df106193bda7cedf1728f3b07b62f6cdcdd611c9eeec44efcae54") +} + +func (DKGKeyGenerated) Topic() common.Hash { + return common.HexToHash("0xc8db841f5b2231ccf7190311f440aa197b161e369f3b40b023508160cc555656") +} + +func (DKGOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (DKGOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (DKGTransmitted) Topic() common.Hash { + return common.HexToHash("0xb04e63db38c49950639fa09d29872f21f5d49d614f3a969d8adf3d4b52e41a62") +} + +func (_DKG *DKG) Address() common.Address { + return _DKG.address +} + +type DKGInterface interface { + GetKey(opts *bind.CallOpts, _keyID [32]byte, _configDigest [32]byte) (KeyDataStructKeyData, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AddClient(opts *bind.TransactOpts, keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) + + RemoveClient(opts *bind.TransactOpts, keyID [32]byte, clientAddress common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _f uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + FilterConfigSet(opts *bind.FilterOpts) (*DKGConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *DKGConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*DKGConfigSet, error) + + FilterDKGClientError(opts *bind.FilterOpts) (*DKGDKGClientErrorIterator, error) + + WatchDKGClientError(opts *bind.WatchOpts, sink chan<- *DKGDKGClientError) (event.Subscription, error) + + ParseDKGClientError(log types.Log) (*DKGDKGClientError, error) + + FilterKeyGenerated(opts *bind.FilterOpts, configDigest [][32]byte, keyID [][32]byte) (*DKGKeyGeneratedIterator, error) + + WatchKeyGenerated(opts *bind.WatchOpts, sink chan<- *DKGKeyGenerated, configDigest [][32]byte, keyID [][32]byte) (event.Subscription, error) + + ParseKeyGenerated(log types.Log) (*DKGKeyGenerated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*DKGOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *DKGOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*DKGOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*DKGOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *DKGOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*DKGOwnershipTransferred, error) + + FilterTransmitted(opts *bind.FilterOpts) (*DKGTransmittedIterator, error) + + WatchTransmitted(opts *bind.WatchOpts, sink chan<- *DKGTransmitted) (event.Subscription, error) + + ParseTransmitted(log types.Log) (*DKGTransmitted, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/ocr2vrf/generated/load_test_beacon_consumer/load_test_beacon_consumer.go b/core/gethwrappers/ocr2vrf/generated/load_test_beacon_consumer/load_test_beacon_consumer.go new file mode 100644 index 00000000..898c03c5 --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generated/load_test_beacon_consumer/load_test_beacon_consumer.go @@ -0,0 +1,1422 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package load_test_beacon_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LoadTestBeaconVRFConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"shouldFail\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"beaconPeriodBlocks\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"MustBeCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MustBeOwnerOrCoordinator\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"CoordinatorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fail\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"reqID\",\"type\":\"uint256\"}],\"name\":\"getFulfillmentDurationByRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"reqID\",\"type\":\"uint256\"}],\"name\":\"getRawFulfillmentDurationByRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_beaconPeriodBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingRequests\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"requestHeights\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"reset\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_ReceivedRandomnessByRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_arguments\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_averageFulfillmentInMillions\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_fastestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_fulfillmentDurationInBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_mostRecentRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_myBeaconRequests\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"slotNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_rawFulfillmentDurationInBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requestIDs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_requestOutputHeights\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"\",\"type\":\"uint24\"}],\"name\":\"s_requestsIDs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_resetCounter\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_slowestRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalFulfilled\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_totalRequests\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"shouldFail\",\"type\":\"bool\"}],\"name\":\"setFail\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"reqId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"height\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"delay\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"}],\"name\":\"storeBeaconRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"}],\"name\":\"testRedeemRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelayArg\",\"type\":\"uint24\"}],\"name\":\"testRequestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"}],\"name\":\"testRequestRandomnessFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelayArg\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"batchSize\",\"type\":\"uint256\"}],\"name\":\"testRequestRandomnessFulfillmentBatch\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040526000600d556000600e556103e7600f556000601055600060115560006012553480156200003057600080fd5b5060405162001f6138038062001f618339810160408190526200005391620001d0565b828282823380600081620000ae5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000e157620000e18162000125565b5050600280546001600160a01b0319166001600160a01b03939093169290921790915550600b805460ff191692151592909217909155600c55506200022792505050565b336001600160a01b038216036200017f5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000a5565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080600060608486031215620001e657600080fd5b83516001600160a01b0381168114620001fe57600080fd5b602085015190935080151581146200021557600080fd5b80925050604084015190509250925092565b611d2a80620002376000396000f3fe608060405234801561001057600080fd5b50600436106102775760003560e01c806379ba509711610160578063d0705f04116100d8578063f2fde38b1161008c578063f6eaffc811610071578063f6eaffc8146105bc578063fc7fea37146105cf578063ffe97ca4146105d857600080fd5b8063f2fde38b1461057e578063f371829b1461059157600080fd5b8063d826f88f116100bd578063d826f88f1461055a578063ea7502ab14610562578063f08c5daa1461057557600080fd5b8063d0705f0414610534578063d21ea8fd1461054757600080fd5b80638ea981171161012f578063a9cc471811610114578063a9cc4718146104fb578063c6d6130114610518578063cd0593df1461052b57600080fd5b80638ea98117146104a95780639d769402146104bc57600080fd5b806379ba5097146104675780638866c6bd1461046f5780638d0e3165146104785780638da5cb5b1461048157600080fd5b80635a947873116101f35780636df57cc3116101c2578063737144bc116101a7578063737144bc1461044057806374dba124146104495780637716cdaa1461045257600080fd5b80636df57cc314610400578063706da1ca1461041357600080fd5b80635a947873146103b05780635f15cccc146103c3578063601201d3146103ee578063689b77ab146103f757600080fd5b80632b1a21301161024a578063341867a21161022f578063341867a21461035b578063353e0f60146103705780634a0aee291461039b57600080fd5b80632b1a21301461031d5780632fe8fa311461033057600080fd5b80631591950a1461027c5780631757f11c146102ba578063195e0d75146102c35780631e87f20e146102f0575b600080fd5b6102a761028a366004611503565b601560209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6102a7600e5481565b6102a76102d1366004611525565b6012546000908152601860209081526040808320938352929052205490565b6102a76102fe366004611525565b6012546000908152601760209081526040808320938352929052205490565b6102a761032b366004611503565b61068b565b6102a761033e366004611503565b601760209081526000928352604080842090915290825290205481565b61036e610369366004611503565b6106bc565b005b6102a761037e366004611503565b601660209081526000928352604080842090915290825290205481565b6103a36107b1565b6040516102b1919061153e565b6103a36103be3660046116cc565b6108c1565b6102a76103d136600461174d565b600460209081526000928352604080842090915290825290205481565b6102a760115481565b6102a760085481565b61036e61040e366004611779565b610a1f565b6009546104279067ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016102b1565b6102a7600d5481565b6102a7600f5481565b61045a610b5a565b6040516102b19190611823565b61036e610be8565b6102a760105481565b6102a760135481565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016102b1565b61036e6104b736600461183d565b610cea565b61036e6104ca366004611873565b600b80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b600b546105089060ff1681565b60405190151581526020016102b1565b6102a7610526366004611895565b610dd0565b6102a7600c5481565b6102a7610542366004611503565b610eda565b61036e6105553660046118f5565b610ef6565b61036e610f57565b6102a76105703660046119be565b610f8d565b6102a7600a5481565b61036e61058c36600461183d565b61109d565b6102a761059f366004611503565b601860209081526000928352604080842090915290825290205481565b6102a76105ca366004611525565b6110b1565b6102a760125481565b6106416105e6366004611525565b60056020526000908152604090205463ffffffff811690640100000000810462ffffff1690670100000000000000810461ffff16906901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1684565b6040805163ffffffff909516855262ffffff909316602085015261ffff9091169183019190915273ffffffffffffffffffffffffffffffffffffffff1660608201526080016102b1565b601460205281600052604060002081815481106106a757600080fd5b90600052602060002001600091509150505481565b60025460408051602081018252600080825291517facfc6cdd000000000000000000000000000000000000000000000000000000008152919273ffffffffffffffffffffffffffffffffffffffff169163acfc6cdd916107229187918791600401611a37565b6000604051808303816000875af1158015610741573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526107879190810190611a5f565b600083815260066020908152604090912082519293506107ab9290918401906114a3565b50505050565b6012546000908152601460205260408120546060919067ffffffffffffffff8111156107df576107df6115c0565b604051908082528060200260200182016040528015610808578160200160208202803683370190505b5090506000805b6012546000908152601460205260409020548110156108b957601254600090815260146020526040812080548390811061084b5761084b611af0565b600091825260208083209091015460125483526017825260408084208285529092529082205490925090036108a6578084848151811061088d5761088d611af0565b6020908102919091010152826108a281611b4e565b9350505b50806108b181611b4e565b91505061080f565b508152919050565b606060008267ffffffffffffffff8111156108de576108de6115c0565b604051908082528060200260200182016040528015610907578160200160208202803683370190505b5090506000600c546109176110d2565b6109219190611bb5565b9050600081600c546109316110d2565b61093b9190611bc9565b6109459190611be2565b905060005b85811015610a105760006109618c8c8c8c8c610f8d565b60108054919250600061097383611b4e565b90915550506012546000908152601560209081526040808320848452909152902083905561099f6110d2565b60128054600090815260166020908152604080832086845282528083209490945591548152601482529182208054600181018255908352912001819055845181908690849081106109f2576109f2611af0565b60209081029190910101525080610a0881611b4e565b91505061094a565b50919998505050505050505050565b600083815260046020908152604080832062ffffff861684529091528120859055600c54610a4d9085611bf5565b6040805160808101825263ffffffff928316815262ffffff958616602080830191825261ffff968716838501908152306060850190815260009b8c526005909252939099209151825491519351995173ffffffffffffffffffffffffffffffffffffffff166901000000000000000000027fffffff0000000000000000000000000000000000000000ffffffffffffffffff9a90971667010000000000000002999099167fffffff00000000000000000000000000000000000000000000ffffffffffffff93909716640100000000027fffffffffffffffffffffffffffffffffffffffffffffffffff000000000000009091169890931697909717919091171692909217179092555050565b60078054610b6790611c09565b80601f0160208091040260200160405190810160405280929190818152602001828054610b9390611c09565b8015610be05780601f10610bb557610100808354040283529160200191610be0565b820191906000526020600020905b815481529060010190602001808311610bc357829003601f168201915b505050505081565b60015473ffffffffffffffffffffffffffffffffffffffff163314610c6e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590610d2a575060025473ffffffffffffffffffffffffffffffffffffffff163314155b15610d61576040517fd4e06fd700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040517fc258faa9a17ddfdf4130b4acff63a289202e7d5f9e42f366add65368575486bc90600090a250565b600080600c54610dde6110d2565b610de89190611bb5565b9050600081600c54610df86110d2565b610e029190611bc9565b610e0c9190611be2565b60025460408051602081018252600080825291517f4ffac83a000000000000000000000000000000000000000000000000000000008152939450909273ffffffffffffffffffffffffffffffffffffffff90921691634ffac83a91610e7a918a918c918b9190600401611c5c565b6020604051808303816000875af1158015610e99573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ebd9190611c94565b9050610ecb8183878a610a1f565b60088190559695505050505050565b600660205281600052604060002081815481106106a757600080fd5b60025473ffffffffffffffffffffffffffffffffffffffff163314610f47576040517f66bf9c7200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610f52838383611169565b505050565b6000600d819055600e8190556103e7600f556010819055601181905560138190556012805491610f8683611b4e565b9190505550565b600080600c54610f9b6110d2565b610fa59190611bb5565b9050600081600c54610fb56110d2565b610fbf9190611bc9565b610fc99190611be2565b60025460408051602081018252600080825291517fdb972c8b000000000000000000000000000000000000000000000000000000008152939450909273ffffffffffffffffffffffffffffffffffffffff9092169163db972c8b9161103b918d918d918d918d918d9190600401611cad565b6020604051808303816000875af115801561105a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061107e9190611c94565b905061108c8183898b610a1f565b600881905598975050505050505050565b6110a561132b565b6110ae816113ae565b50565b600381815481106110c157600080fd5b600091825260209091200154905081565b60004661a4b18114806110e7575062066eed81145b1561116257606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015611138573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061115c9190611c94565b91505090565b4391505090565b600b5460ff16156111d6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f206661696c656420696e2066756c66696c6c52616e646f6d576f7264730000006044820152606401610c65565b600083815260066020908152604090912083516111f5928501906114a3565b50601254600090815260156020908152604080832086845290915281205461121b6110d2565b6112259190611be2565b60125460009081526016602090815260408083208884529091528120549192509061124e6110d2565b6112589190611be2565b9050600061126983620f4240611d06565b9050600e5483111561128057600e83905560138690555b600f54831061129157600f54611293565b825b600f556011546112a357806112d6565b6011546112b1906001611bc9565b81601154600d546112c29190611d06565b6112cc9190611bc9565b6112d69190611bf5565b600d55601180549060006112e983611b4e565b90915550506012805460009081526017602090815260408083208a84528252808320969096559154815260188252848120978152969052509320929092555050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146113ac576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610c65565b565b3373ffffffffffffffffffffffffffffffffffffffff82160361142d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610c65565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b8280548282559060005260206000209081019282156114de579160200282015b828111156114de5782518255916020019190600101906114c3565b506114ea9291506114ee565b5090565b5b808211156114ea57600081556001016114ef565b6000806040838503121561151657600080fd5b50508035926020909101359150565b60006020828403121561153757600080fd5b5035919050565b6020808252825182820181905260009190848201906040850190845b818110156115765783518352928401929184019160010161155a565b50909695505050505050565b803561ffff8116811461159457600080fd5b919050565b803562ffffff8116811461159457600080fd5b803563ffffffff8116811461159457600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611636576116366115c0565b604052919050565b600082601f83011261164f57600080fd5b813567ffffffffffffffff811115611669576116696115c0565b61169a60207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116016115ef565b8181528460208386010111156116af57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060008060c087890312156116e557600080fd5b863595506116f560208801611582565b945061170360408801611599565b9350611711606088016115ac565b9250608087013567ffffffffffffffff81111561172d57600080fd5b61173989828a0161163e565b92505060a087013590509295509295509295565b6000806040838503121561176057600080fd5b8235915061177060208401611599565b90509250929050565b6000806000806080858703121561178f57600080fd5b84359350602085013592506117a660408601611599565b91506117b460608601611582565b905092959194509250565b6000815180845260005b818110156117e5576020818501810151868301820152016117c9565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b60208152600061183660208301846117bf565b9392505050565b60006020828403121561184f57600080fd5b813573ffffffffffffffffffffffffffffffffffffffff8116811461183657600080fd5b60006020828403121561188557600080fd5b8135801515811461183657600080fd5b6000806000606084860312156118aa57600080fd5b6118b384611582565b9250602084013591506118c860408501611599565b90509250925092565b600067ffffffffffffffff8211156118eb576118eb6115c0565b5060051b60200190565b60008060006060848603121561190a57600080fd5b8335925060208085013567ffffffffffffffff8082111561192a57600080fd5b818701915087601f83011261193e57600080fd5b813561195161194c826118d1565b6115ef565b81815260059190911b8301840190848101908a83111561197057600080fd5b938501935b8285101561198e57843582529385019390850190611975565b9650505060408701359250808311156119a657600080fd5b50506119b48682870161163e565b9150509250925092565b600080600080600060a086880312156119d657600080fd5b853594506119e660208701611582565b93506119f460408701611599565b9250611a02606087016115ac565b9150608086013567ffffffffffffffff811115611a1e57600080fd5b611a2a8882890161163e565b9150509295509295909350565b838152826020820152606060408201526000611a5660608301846117bf565b95945050505050565b60006020808385031215611a7257600080fd5b825167ffffffffffffffff811115611a8957600080fd5b8301601f81018513611a9a57600080fd5b8051611aa861194c826118d1565b81815260059190911b82018301908381019087831115611ac757600080fd5b928401925b82841015611ae557835182529284019290840190611acc565b979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611b7f57611b7f611b1f565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082611bc457611bc4611b86565b500690565b80820180821115611bdc57611bdc611b1f565b92915050565b81810381811115611bdc57611bdc611b1f565b600082611c0457611c04611b86565b500490565b600181811c90821680611c1d57607f821691505b602082108103611c56577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b84815261ffff8416602082015262ffffff83166040820152608060608201526000611c8a60808301846117bf565b9695505050505050565b600060208284031215611ca657600080fd5b5051919050565b86815261ffff8616602082015262ffffff8516604082015263ffffffff8416606082015260c060808201526000611ce760c08301856117bf565b82810360a0840152611cf981856117bf565b9998505050505050505050565b8082028115828204841417611bdc57611bdc611b1f56fea164736f6c6343000813000a", +} + +var LoadTestBeaconVRFConsumerABI = LoadTestBeaconVRFConsumerMetaData.ABI + +var LoadTestBeaconVRFConsumerBin = LoadTestBeaconVRFConsumerMetaData.Bin + +func DeployLoadTestBeaconVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, coordinator common.Address, shouldFail bool, beaconPeriodBlocks *big.Int) (common.Address, *types.Transaction, *LoadTestBeaconVRFConsumer, error) { + parsed, err := LoadTestBeaconVRFConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LoadTestBeaconVRFConsumerBin), backend, coordinator, shouldFail, beaconPeriodBlocks) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LoadTestBeaconVRFConsumer{LoadTestBeaconVRFConsumerCaller: LoadTestBeaconVRFConsumerCaller{contract: contract}, LoadTestBeaconVRFConsumerTransactor: LoadTestBeaconVRFConsumerTransactor{contract: contract}, LoadTestBeaconVRFConsumerFilterer: LoadTestBeaconVRFConsumerFilterer{contract: contract}}, nil +} + +type LoadTestBeaconVRFConsumer struct { + address common.Address + abi abi.ABI + LoadTestBeaconVRFConsumerCaller + LoadTestBeaconVRFConsumerTransactor + LoadTestBeaconVRFConsumerFilterer +} + +type LoadTestBeaconVRFConsumerCaller struct { + contract *bind.BoundContract +} + +type LoadTestBeaconVRFConsumerTransactor struct { + contract *bind.BoundContract +} + +type LoadTestBeaconVRFConsumerFilterer struct { + contract *bind.BoundContract +} + +type LoadTestBeaconVRFConsumerSession struct { + Contract *LoadTestBeaconVRFConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LoadTestBeaconVRFConsumerCallerSession struct { + Contract *LoadTestBeaconVRFConsumerCaller + CallOpts bind.CallOpts +} + +type LoadTestBeaconVRFConsumerTransactorSession struct { + Contract *LoadTestBeaconVRFConsumerTransactor + TransactOpts bind.TransactOpts +} + +type LoadTestBeaconVRFConsumerRaw struct { + Contract *LoadTestBeaconVRFConsumer +} + +type LoadTestBeaconVRFConsumerCallerRaw struct { + Contract *LoadTestBeaconVRFConsumerCaller +} + +type LoadTestBeaconVRFConsumerTransactorRaw struct { + Contract *LoadTestBeaconVRFConsumerTransactor +} + +func NewLoadTestBeaconVRFConsumer(address common.Address, backend bind.ContractBackend) (*LoadTestBeaconVRFConsumer, error) { + abi, err := abi.JSON(strings.NewReader(LoadTestBeaconVRFConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindLoadTestBeaconVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumer{address: address, abi: abi, LoadTestBeaconVRFConsumerCaller: LoadTestBeaconVRFConsumerCaller{contract: contract}, LoadTestBeaconVRFConsumerTransactor: LoadTestBeaconVRFConsumerTransactor{contract: contract}, LoadTestBeaconVRFConsumerFilterer: LoadTestBeaconVRFConsumerFilterer{contract: contract}}, nil +} + +func NewLoadTestBeaconVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*LoadTestBeaconVRFConsumerCaller, error) { + contract, err := bindLoadTestBeaconVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerCaller{contract: contract}, nil +} + +func NewLoadTestBeaconVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*LoadTestBeaconVRFConsumerTransactor, error) { + contract, err := bindLoadTestBeaconVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerTransactor{contract: contract}, nil +} + +func NewLoadTestBeaconVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*LoadTestBeaconVRFConsumerFilterer, error) { + contract, err := bindLoadTestBeaconVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerFilterer{contract: contract}, nil +} + +func bindLoadTestBeaconVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LoadTestBeaconVRFConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LoadTestBeaconVRFConsumer.Contract.LoadTestBeaconVRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.LoadTestBeaconVRFConsumerTransactor.contract.Transfer(opts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.LoadTestBeaconVRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LoadTestBeaconVRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.contract.Transfer(opts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) Fail(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "fail") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) Fail() (bool, error) { + return _LoadTestBeaconVRFConsumer.Contract.Fail(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) Fail() (bool, error) { + return _LoadTestBeaconVRFConsumer.Contract.Fail(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) GetFulfillmentDurationByRequestID(opts *bind.CallOpts, reqID *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "getFulfillmentDurationByRequestID", reqID) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) GetFulfillmentDurationByRequestID(reqID *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.GetFulfillmentDurationByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, reqID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) GetFulfillmentDurationByRequestID(reqID *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.GetFulfillmentDurationByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, reqID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) GetRawFulfillmentDurationByRequestID(opts *bind.CallOpts, reqID *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "getRawFulfillmentDurationByRequestID", reqID) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) GetRawFulfillmentDurationByRequestID(reqID *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.GetRawFulfillmentDurationByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, reqID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) GetRawFulfillmentDurationByRequestID(reqID *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.GetRawFulfillmentDurationByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, reqID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "i_beaconPeriodBlocks") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.IBeaconPeriodBlocks(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.IBeaconPeriodBlocks(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) Owner() (common.Address, error) { + return _LoadTestBeaconVRFConsumer.Contract.Owner(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) Owner() (common.Address, error) { + return _LoadTestBeaconVRFConsumer.Contract.Owner(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) PendingRequests(opts *bind.CallOpts) ([]*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "pendingRequests") + + if err != nil { + return *new([]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([]*big.Int)).(*[]*big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) PendingRequests() ([]*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.PendingRequests(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) PendingRequests() ([]*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.PendingRequests(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) RequestHeights(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "requestHeights", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) RequestHeights(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.RequestHeights(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) RequestHeights(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.RequestHeights(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SReceivedRandomnessByRequestID(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_ReceivedRandomnessByRequestID", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SReceivedRandomnessByRequestID(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SReceivedRandomnessByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SReceivedRandomnessByRequestID(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SReceivedRandomnessByRequestID(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SArguments(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_arguments") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SArguments() ([]byte, error) { + return _LoadTestBeaconVRFConsumer.Contract.SArguments(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SArguments() ([]byte, error) { + return _LoadTestBeaconVRFConsumer.Contract.SArguments(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_averageFulfillmentInMillions") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SAverageFulfillmentInMillions(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SAverageFulfillmentInMillions() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SAverageFulfillmentInMillions(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_fastestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SFastestFulfillment() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SFastestFulfillment(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SFastestFulfillment() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SFastestFulfillment(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SFulfillmentDurationInBlocks(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_fulfillmentDurationInBlocks", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SFulfillmentDurationInBlocks(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SFulfillmentDurationInBlocks(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SFulfillmentDurationInBlocks(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SFulfillmentDurationInBlocks(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SGasAvailable() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SGasAvailable(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SGasAvailable() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SGasAvailable(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SMostRecentRequestID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_mostRecentRequestID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SMostRecentRequestID() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SMostRecentRequestID(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SMostRecentRequestID() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SMostRecentRequestID(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SMyBeaconRequests(opts *bind.CallOpts, arg0 *big.Int) (SMyBeaconRequests, + + error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_myBeaconRequests", arg0) + + outstruct := new(SMyBeaconRequests) + if err != nil { + return *outstruct, err + } + + outstruct.SlotNumber = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.ConfirmationDelay = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.NumWords = *abi.ConvertType(out[2], new(uint16)).(*uint16) + outstruct.Requester = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SMyBeaconRequests(arg0 *big.Int) (SMyBeaconRequests, + + error) { + return _LoadTestBeaconVRFConsumer.Contract.SMyBeaconRequests(&_LoadTestBeaconVRFConsumer.CallOpts, arg0) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SMyBeaconRequests(arg0 *big.Int) (SMyBeaconRequests, + + error) { + return _LoadTestBeaconVRFConsumer.Contract.SMyBeaconRequests(&_LoadTestBeaconVRFConsumer.CallOpts, arg0) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRandomWords(&_LoadTestBeaconVRFConsumer.CallOpts, arg0) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRandomWords(&_LoadTestBeaconVRFConsumer.CallOpts, arg0) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SRawFulfillmentDurationInBlocks(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_rawFulfillmentDurationInBlocks", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SRawFulfillmentDurationInBlocks(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRawFulfillmentDurationInBlocks(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SRawFulfillmentDurationInBlocks(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRawFulfillmentDurationInBlocks(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SRequestIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_requestIDs", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SRequestIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestIDs(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SRequestIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestIDs(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SRequestOutputHeights(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_requestOutputHeights", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SRequestOutputHeights(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestOutputHeights(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SRequestOutputHeights(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestOutputHeights(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SRequestsIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_requestsIDs", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SRequestsIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestsIDs(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SRequestsIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SRequestsIDs(&_LoadTestBeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SResetCounter(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_resetCounter") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SResetCounter() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SResetCounter(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SResetCounter() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SResetCounter(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_slowestFulfillment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SSlowestFulfillment() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSlowestFulfillment(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SSlowestFulfillment() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSlowestFulfillment(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SSlowestRequestID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_slowestRequestID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SSlowestRequestID() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSlowestRequestID(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SSlowestRequestID() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSlowestRequestID(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SSubId() (uint64, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSubId(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) SSubId() (uint64, error) { + return _LoadTestBeaconVRFConsumer.Contract.SSubId(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) STotalFulfilled(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_totalFulfilled") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) STotalFulfilled() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.STotalFulfilled(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) STotalFulfilled() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.STotalFulfilled(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCaller) STotalRequests(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LoadTestBeaconVRFConsumer.contract.Call(opts, &out, "s_totalRequests") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) STotalRequests() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.STotalRequests(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerCallerSession) STotalRequests() (*big.Int, error) { + return _LoadTestBeaconVRFConsumer.Contract.STotalRequests(&_LoadTestBeaconVRFConsumer.CallOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "acceptOwnership") +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.AcceptOwnership(&_LoadTestBeaconVRFConsumer.TransactOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.AcceptOwnership(&_LoadTestBeaconVRFConsumer.TransactOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "rawFulfillRandomWords", requestID, randomWords, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) RawFulfillRandomWords(requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.RawFulfillRandomWords(&_LoadTestBeaconVRFConsumer.TransactOpts, requestID, randomWords, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) RawFulfillRandomWords(requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.RawFulfillRandomWords(&_LoadTestBeaconVRFConsumer.TransactOpts, requestID, randomWords, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) Reset(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "reset") +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) Reset() (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.Reset(&_LoadTestBeaconVRFConsumer.TransactOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) Reset() (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.Reset(&_LoadTestBeaconVRFConsumer.TransactOpts) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "setCoordinator", coordinator) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.SetCoordinator(&_LoadTestBeaconVRFConsumer.TransactOpts, coordinator) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.SetCoordinator(&_LoadTestBeaconVRFConsumer.TransactOpts, coordinator) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) SetFail(opts *bind.TransactOpts, shouldFail bool) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "setFail", shouldFail) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) SetFail(shouldFail bool) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.SetFail(&_LoadTestBeaconVRFConsumer.TransactOpts, shouldFail) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) SetFail(shouldFail bool) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.SetFail(&_LoadTestBeaconVRFConsumer.TransactOpts, shouldFail) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) StoreBeaconRequest(opts *bind.TransactOpts, reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "storeBeaconRequest", reqId, height, delay, numWords) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) StoreBeaconRequest(reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.StoreBeaconRequest(&_LoadTestBeaconVRFConsumer.TransactOpts, reqId, height, delay, numWords) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) StoreBeaconRequest(reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.StoreBeaconRequest(&_LoadTestBeaconVRFConsumer.TransactOpts, reqId, height, delay, numWords) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) TestRedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "testRedeemRandomness", subID, requestID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) TestRedeemRandomness(subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRedeemRandomness(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, requestID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) TestRedeemRandomness(subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRedeemRandomness(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, requestID) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) TestRequestRandomness(opts *bind.TransactOpts, numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "testRequestRandomness", numWords, subID, confirmationDelayArg) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) TestRequestRandomness(numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomness(&_LoadTestBeaconVRFConsumer.TransactOpts, numWords, subID, confirmationDelayArg) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) TestRequestRandomness(numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomness(&_LoadTestBeaconVRFConsumer.TransactOpts, numWords, subID, confirmationDelayArg) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) TestRequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "testRequestRandomnessFulfillment", subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) TestRequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomnessFulfillment(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) TestRequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomnessFulfillment(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) TestRequestRandomnessFulfillmentBatch(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confirmationDelayArg *big.Int, callbackGasLimit uint32, arguments []byte, batchSize *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "testRequestRandomnessFulfillmentBatch", subID, numWords, confirmationDelayArg, callbackGasLimit, arguments, batchSize) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) TestRequestRandomnessFulfillmentBatch(subID *big.Int, numWords uint16, confirmationDelayArg *big.Int, callbackGasLimit uint32, arguments []byte, batchSize *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomnessFulfillmentBatch(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, numWords, confirmationDelayArg, callbackGasLimit, arguments, batchSize) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) TestRequestRandomnessFulfillmentBatch(subID *big.Int, numWords uint16, confirmationDelayArg *big.Int, callbackGasLimit uint32, arguments []byte, batchSize *big.Int) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TestRequestRandomnessFulfillmentBatch(&_LoadTestBeaconVRFConsumer.TransactOpts, subID, numWords, confirmationDelayArg, callbackGasLimit, arguments, batchSize) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TransferOwnership(&_LoadTestBeaconVRFConsumer.TransactOpts, to) +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LoadTestBeaconVRFConsumer.Contract.TransferOwnership(&_LoadTestBeaconVRFConsumer.TransactOpts, to) +} + +type LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator struct { + Event *LoadTestBeaconVRFConsumerCoordinatorUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerCoordinatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerCoordinatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator) Error() error { + return it.fail +} + +func (it *LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LoadTestBeaconVRFConsumerCoordinatorUpdated struct { + Coordinator common.Address + Raw types.Log +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) FilterCoordinatorUpdated(opts *bind.FilterOpts, coordinator []common.Address) (*LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator, error) { + + var coordinatorRule []interface{} + for _, coordinatorItem := range coordinator { + coordinatorRule = append(coordinatorRule, coordinatorItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.FilterLogs(opts, "CoordinatorUpdated", coordinatorRule) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator{contract: _LoadTestBeaconVRFConsumer.contract, event: "CoordinatorUpdated", logs: logs, sub: sub}, nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) WatchCoordinatorUpdated(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerCoordinatorUpdated, coordinator []common.Address) (event.Subscription, error) { + + var coordinatorRule []interface{} + for _, coordinatorItem := range coordinator { + coordinatorRule = append(coordinatorRule, coordinatorItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.WatchLogs(opts, "CoordinatorUpdated", coordinatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LoadTestBeaconVRFConsumerCoordinatorUpdated) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "CoordinatorUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) ParseCoordinatorUpdated(log types.Log) (*LoadTestBeaconVRFConsumerCoordinatorUpdated, error) { + event := new(LoadTestBeaconVRFConsumerCoordinatorUpdated) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "CoordinatorUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator struct { + Event *LoadTestBeaconVRFConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LoadTestBeaconVRFConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator{contract: _LoadTestBeaconVRFConsumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LoadTestBeaconVRFConsumerOwnershipTransferRequested) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*LoadTestBeaconVRFConsumerOwnershipTransferRequested, error) { + event := new(LoadTestBeaconVRFConsumerOwnershipTransferRequested) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LoadTestBeaconVRFConsumerOwnershipTransferredIterator struct { + Event *LoadTestBeaconVRFConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LoadTestBeaconVRFConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *LoadTestBeaconVRFConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LoadTestBeaconVRFConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LoadTestBeaconVRFConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &LoadTestBeaconVRFConsumerOwnershipTransferredIterator{contract: _LoadTestBeaconVRFConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LoadTestBeaconVRFConsumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LoadTestBeaconVRFConsumerOwnershipTransferred) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*LoadTestBeaconVRFConsumerOwnershipTransferred, error) { + event := new(LoadTestBeaconVRFConsumerOwnershipTransferred) + if err := _LoadTestBeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type SMyBeaconRequests struct { + SlotNumber uint32 + ConfirmationDelay *big.Int + NumWords uint16 + Requester common.Address +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LoadTestBeaconVRFConsumer.abi.Events["CoordinatorUpdated"].ID: + return _LoadTestBeaconVRFConsumer.ParseCoordinatorUpdated(log) + case _LoadTestBeaconVRFConsumer.abi.Events["OwnershipTransferRequested"].ID: + return _LoadTestBeaconVRFConsumer.ParseOwnershipTransferRequested(log) + case _LoadTestBeaconVRFConsumer.abi.Events["OwnershipTransferred"].ID: + return _LoadTestBeaconVRFConsumer.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LoadTestBeaconVRFConsumerCoordinatorUpdated) Topic() common.Hash { + return common.HexToHash("0xc258faa9a17ddfdf4130b4acff63a289202e7d5f9e42f366add65368575486bc") +} + +func (LoadTestBeaconVRFConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (LoadTestBeaconVRFConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_LoadTestBeaconVRFConsumer *LoadTestBeaconVRFConsumer) Address() common.Address { + return _LoadTestBeaconVRFConsumer.address +} + +type LoadTestBeaconVRFConsumerInterface interface { + Fail(opts *bind.CallOpts) (bool, error) + + GetFulfillmentDurationByRequestID(opts *bind.CallOpts, reqID *big.Int) (*big.Int, error) + + GetRawFulfillmentDurationByRequestID(opts *bind.CallOpts, reqID *big.Int) (*big.Int, error) + + IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + PendingRequests(opts *bind.CallOpts) ([]*big.Int, error) + + RequestHeights(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SReceivedRandomnessByRequestID(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SArguments(opts *bind.CallOpts) ([]byte, error) + + SAverageFulfillmentInMillions(opts *bind.CallOpts) (*big.Int, error) + + SFastestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SFulfillmentDurationInBlocks(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SMostRecentRequestID(opts *bind.CallOpts) (*big.Int, error) + + SMyBeaconRequests(opts *bind.CallOpts, arg0 *big.Int) (SMyBeaconRequests, + + error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRawFulfillmentDurationInBlocks(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SRequestIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SRequestOutputHeights(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SRequestsIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SResetCounter(opts *bind.CallOpts) (*big.Int, error) + + SSlowestFulfillment(opts *bind.CallOpts) (*big.Int, error) + + SSlowestRequestID(opts *bind.CallOpts) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + STotalFulfilled(opts *bind.CallOpts) (*big.Int, error) + + STotalRequests(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) + + Reset(opts *bind.TransactOpts) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) + + SetFail(opts *bind.TransactOpts, shouldFail bool) (*types.Transaction, error) + + StoreBeaconRequest(opts *bind.TransactOpts, reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) + + TestRedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int) (*types.Transaction, error) + + TestRequestRandomness(opts *bind.TransactOpts, numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) + + TestRequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) + + TestRequestRandomnessFulfillmentBatch(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confirmationDelayArg *big.Int, callbackGasLimit uint32, arguments []byte, batchSize *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterCoordinatorUpdated(opts *bind.FilterOpts, coordinator []common.Address) (*LoadTestBeaconVRFConsumerCoordinatorUpdatedIterator, error) + + WatchCoordinatorUpdated(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerCoordinatorUpdated, coordinator []common.Address) (event.Subscription, error) + + ParseCoordinatorUpdated(log types.Log) (*LoadTestBeaconVRFConsumerCoordinatorUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LoadTestBeaconVRFConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*LoadTestBeaconVRFConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LoadTestBeaconVRFConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LoadTestBeaconVRFConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*LoadTestBeaconVRFConsumerOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/ocr2vrf/generated/vrf_beacon/vrf_beacon.go b/core/gethwrappers/ocr2vrf/generated/vrf_beacon/vrf_beacon.go new file mode 100644 index 00000000..fb586d31 --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generated/vrf_beacon/vrf_beacon.go @@ -0,0 +1,2846 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_beacon + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type ECCArithmeticG1Point struct { + P [2]*big.Int +} + +type KeyDataStructKeyData struct { + PublicKey []byte + Hashes [][32]byte +} + +type VRFBeaconReportReport struct { + Outputs []VRFBeaconTypesVRFOutput + JuelsPerFeeCoin *big.Int + ReasonableGasPrice uint64 + RecentBlockHeight uint64 + RecentBlockHash [32]byte +} + +type VRFBeaconTypesCallback struct { + RequestID *big.Int + NumWords uint16 + Requester common.Address + Arguments []byte + GasAllowance *big.Int + SubID *big.Int + GasPrice *big.Int + WeiPerUnitLink *big.Int +} + +type VRFBeaconTypesCostedCallback struct { + Callback VRFBeaconTypesCallback + Price *big.Int +} + +type VRFBeaconTypesOutputServed struct { + Height uint64 + ConfirmationDelay *big.Int + ProofG1X *big.Int + ProofG1Y *big.Int +} + +type VRFBeaconTypesVRFOutput struct { + BlockHeight uint64 + ConfirmationDelay *big.Int + VrfOutput ECCArithmeticG1Point + Callbacks []VRFBeaconTypesCostedCallback + ShouldStore bool +} + +var VRFBeaconMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"link\",\"type\":\"address\"},{\"internalType\":\"contractIVRFCoordinatorProducerAPI\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"contractDKG\",\"name\":\"keyProvider\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"keyID\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"expectedLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"actualLength\",\"type\":\"uint256\"}],\"name\":\"CalldataLengthMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CannotAcceptPayeeship\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"expected\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"actual\",\"type\":\"bytes32\"}],\"name\":\"ConfigDigestMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"DuplicateSigners\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"providedHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"onchainHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockHeight\",\"type\":\"uint64\"}],\"name\":\"HistoryDomainSeparatorWrong\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectNumberOfFaultyOracles\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numTransmitters\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numPayees\",\"type\":\"uint256\"}],\"name\":\"IncorrectNumberOfPayees\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"expectedNumSignatures\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"rsLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"ssLength\",\"type\":\"uint256\"}],\"name\":\"IncorrectNumberOfSignatures\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"actualBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requiredBalance\",\"type\":\"uint256\"}],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidPayee\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"keyProvider\",\"type\":\"address\"}],\"name\":\"KeyInfoMustComeFromProvider\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LeftGasExceedsInitialGas\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MustBeOwnerOrBillingAdmin\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"numFaultyOracles\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"}],\"name\":\"NumberOfFaultyOraclesTooHigh\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"config\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"expectedLength\",\"type\":\"uint256\"}],\"name\":\"OnchainConfigHasWrongLength\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"signer\",\"type\":\"address\"}],\"name\":\"OnlyActiveSigners\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"OnlyActiveTransmitters\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCurrentPayee\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"existingPayee\",\"type\":\"address\"}],\"name\":\"PayeeAlreadySet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"repeatedSignerAddress\",\"type\":\"address\"}],\"name\":\"RepeatedSigner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"repeatedTransmitterAddress\",\"type\":\"address\"}],\"name\":\"RepeatedTransmitter\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReportDoesNotContainNewOutputs\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numSigners\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numTransmitters\",\"type\":\"uint256\"}],\"name\":\"SignersTransmittersMismatch\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"maxOracles\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"providedOracles\",\"type\":\"uint256\"}],\"name\":\"TooManyOracles\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"ocrVersion\",\"type\":\"uint64\"}],\"name\":\"UnknownConfigVersion\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"BillingAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"maximumGasPrice\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"observationPayment\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"transmissionPayment\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"accountingGas\",\"type\":\"uint24\"}],\"name\":\"BillingSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint40\",\"name\":\"epochAndRound\",\"type\":\"uint40\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint192\",\"name\":\"juelsPerFeeCoin\",\"type\":\"uint192\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"name\":\"NewTransmission\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"contractLinkTokenInterface\",\"name\":\"linkToken\",\"type\":\"address\"}],\"name\":\"OraclePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"recentBlockHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint192\",\"name\":\"juelsPerFeeCoin\",\"type\":\"uint192\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"height\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint256\",\"name\":\"proofG1X\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"proofG1Y\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structVRFBeaconTypes.OutputServed[]\",\"name\":\"outputsServed\",\"type\":\"tuple[]\"}],\"name\":\"OutputsServed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"requestIDs\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"successfulFulfillment\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes[]\",\"name\":\"truncatedErrorData\",\"type\":\"bytes[]\"},{\"indexed\":false,\"internalType\":\"uint96[]\",\"name\":\"subBalances\",\"type\":\"uint96[]\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"subIDs\",\"type\":\"uint256[]\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"nextBeaconOutputHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAllowance\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"weiPerUnitLink\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"costJuels\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newSubBalance\",\"type\":\"uint256\"}],\"name\":\"RandomnessFulfillmentRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"}],\"name\":\"RandomnessRedeemed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"nextBeaconOutputHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"costJuels\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newSubBalance\",\"type\":\"uint256\"}],\"name\":\"RandomnessRequested\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"NUM_CONF_DELAYS\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockHeight\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"}],\"internalType\":\"structECCArithmetic.G1Point\",\"name\":\"vrfOutput\",\"type\":\"tuple\"},{\"components\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"gasAllowance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"weiPerUnitLink\",\"type\":\"uint256\"}],\"internalType\":\"structVRFBeaconTypes.Callback\",\"name\":\"callback\",\"type\":\"tuple\"},{\"internalType\":\"uint96\",\"name\":\"price\",\"type\":\"uint96\"}],\"internalType\":\"structVRFBeaconTypes.CostedCallback[]\",\"name\":\"callbacks\",\"type\":\"tuple[]\"},{\"internalType\":\"bool\",\"name\":\"shouldStore\",\"type\":\"bool\"}],\"internalType\":\"structVRFBeaconTypes.VRFOutput[]\",\"name\":\"outputs\",\"type\":\"tuple[]\"},{\"internalType\":\"uint192\",\"name\":\"juelsPerFeeCoin\",\"type\":\"uint192\"},{\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"recentBlockHeight\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"recentBlockHash\",\"type\":\"bytes32\"}],\"internalType\":\"structVRFBeaconReport.Report\",\"name\":\"\",\"type\":\"tuple\"}],\"name\":\"exposeType\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBilling\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"maximumGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"observationPayment\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"transmissionPayment\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"accountingGas\",\"type\":\"uint24\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBillingAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_coordinator\",\"outputs\":[{\"internalType\":\"contractIVRFCoordinatorProducerAPI\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_link\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"hashes\",\"type\":\"bytes32[]\"}],\"internalType\":\"structKeyDataStruct.KeyData\",\"name\":\"kd\",\"type\":\"tuple\"}],\"name\":\"keyGenerated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDigestAndEpoch\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"scanLogs\",\"type\":\"bool\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkAvailableForPayment\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"availableBalance\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"newKeyRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitterAddress\",\"type\":\"address\"}],\"name\":\"owedPayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_keyID\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_keyProvider\",\"outputs\":[{\"internalType\":\"contractDKG\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_provingKeyHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"maximumGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"observationPayment\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"transmissionPayment\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"accountingGas\",\"type\":\"uint24\"}],\"name\":\"setBilling\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_billingAccessController\",\"type\":\"address\"}],\"name\":\"setBillingAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"f\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b5060405162004dfe38038062004dfe8339810160408190526200003491620001c7565b8181858581813380600081620000915760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000c457620000c48162000103565b5050506001600160a01b03918216608052811660a052600e80546001600160a01b03191695909116949094179093555060c05250620002219350505050565b336001600160a01b038216036200015d5760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000088565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620001c457600080fd5b50565b60008060008060808587031215620001de57600080fd5b8451620001eb81620001ae565b6020860151909450620001fe81620001ae565b60408601519093506200021181620001ae565b6060959095015193969295505050565b60805160a05160c051614b28620002d660003960006104810152600081816103820152818161114e015281816112260152818161131c0152818161141d015281816114bb01528181612216015281816122ee015281816124650152818161275a01528181612b8301528181612c5b0152818161316501526136cd01526000818161032b015281816112540152818161144a0152818161231c015281816124c301528181612c8901526130500152614b286000f3fe608060405234801561001057600080fd5b50600436106101c45760003560e01c8063afcb95d7116100f9578063d09dc33911610097578063e53bbc9a11610071578063e53bbc9a146104c7578063eb5dcd6c146104da578063f2fde38b146104ed578063fbffd2c11461050057600080fd5b8063d09dc339146104a3578063d57fc45a146104ab578063e3d0e712146104b457600080fd5b8063bf2732c7116100d3578063bf2732c714610438578063c10753291461044b578063c4c92b371461045e578063cc31f7dd1461047c57600080fd5b8063afcb95d7146103e8578063b121e14714610412578063b1dc65a41461042557600080fd5b806379ba5097116101665780638a1b1772116101405780638a1b17721461037d5780638ac28d5a146103a45780638da5cb5b146103b75780639c849b30146103d557600080fd5b806379ba50971461031e5780637d253aff1461032657806381ff70481461034d57600080fd5b806329937268116101a257806329937268146102415780632f7527cc146102b757806355e48749146102d15780635f27026f146102d957600080fd5b806305aeed58146101c95780630eafb25b146101dc578063181f5a7714610202575b600080fd5b6101da6101d73660046137f7565b50565b005b6101ef6101ea366004613854565b610513565b6040519081526020015b60405180910390f35b604080518082018252600f81527f565246426561636f6e20312e302e300000000000000000000000000000000000602082015290516101f991906138df565b60025460035460408051610100840467ffffffffffffffff9081168252690100000000000000000085048116602083015271010000000000000000000000000000000000909404841691810191909152918116606083015268010000000000000000900462ffffff16608082015260a0016101f9565b6102bf600881565b60405160ff90911681526020016101f9565b6101da61059f565b600e546102f99073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101f9565b6101da61061c565b6102f97f000000000000000000000000000000000000000000000000000000000000000081565b6004546005546040805163ffffffff808516825264010000000090940490931660208401528201526060016101f9565b6102f97f000000000000000000000000000000000000000000000000000000000000000081565b6101da6103b2366004613854565b610719565b60005473ffffffffffffffffffffffffffffffffffffffff166102f9565b6101da6103e336600461393e565b610782565b6005546006546040805160008152602081019390935263ffffffff909116908201526060016101f9565b6101da610420366004613854565b6109d4565b6101da6104333660046139ec565b610acc565b6101da610446366004613c89565b610f87565b6101da610459366004613d56565b611046565b600d5473ffffffffffffffffffffffffffffffffffffffff166102f9565b6101ef7f000000000000000000000000000000000000000000000000000000000000000081565b6101ef6113e0565b6101ef600f5481565b6101da6104c2366004613db9565b611573565b6101da6104d5366004613eb8565b611dc9565b6101da6104e8366004613f29565b61201b565b6101da6104fb366004613854565b612174565b6101da61050e366004613854565b612185565b73ffffffffffffffffffffffffffffffffffffffff811660009081526007602090815260408083208151606081018352905460ff80821615158084526101008304909116948301949094526201000090046bffffffffffffffffffffffff1691810191909152906105875750600092915050565b604001516bffffffffffffffffffffffff1692915050565b600e5473ffffffffffffffffffffffffffffffffffffffff16338114610614576040517f292f4fb500000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff821660248201526044015b60405180910390fd5b506000600f55565b60015473ffffffffffffffffffffffffffffffffffffffff16331461069d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015260640161060b565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b73ffffffffffffffffffffffffffffffffffffffff8181166000908152600b6020526040902054163314610779576040517fdce38c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6101d781612196565b61078a612569565b8281146107cd576040517f36d20459000000000000000000000000000000000000000000000000000000008152600481018490526024810182905260440161060b565b60005b838110156109cd5760008585838181106107ec576107ec613f62565b90506020020160208101906108019190613854565b9050600084848481811061081757610817613f62565b905060200201602081019061082c9190613854565b73ffffffffffffffffffffffffffffffffffffffff8084166000908152600b602052604090205491925016801580158161089257508273ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b156108e9576040517febdf175600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff80861660048301528316602482015260440161060b565b73ffffffffffffffffffffffffffffffffffffffff8481166000908152600b6020526040902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016858316908117909155908316146109b6578273ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b360405160405180910390a45b5050505080806109c590613fc0565b9150506107d0565b5050505050565b73ffffffffffffffffffffffffffffffffffffffff8181166000908152600c6020526040902054163314610a34576040517f9d12ec4f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8181166000818152600b602090815260408083208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217909355600c909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b60005a6040805160c08101825260025460ff808216835267ffffffffffffffff61010083048116602080860191909152690100000000000000000084048216858701527101000000000000000000000000000000000090930481166060850152600354908116608085015262ffffff680100000000000000009091041660a08401523360009081526007835293909320549394509092908c01359116610ba0576040517fb1c1f68e00000000000000000000000000000000000000000000000000000000815233600482015260240161060b565b6005548b3514610bea576005546040517f93df584c00000000000000000000000000000000000000000000000000000000815260048101919091528b35602482015260440161060b565b610bf88a8a8a8a8a8a6125ec565b8151610c05906001613ff8565b60ff1687141580610c165750868514155b15610c6e578151610c28906001613ff8565b6040517ffc33647500000000000000000000000000000000000000000000000000000000815260ff9091166004820152602481018890526044810186905260640161060b565b60008a8a604051610c80929190614011565b604051908190038120610c97918e90602001614021565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152828252805160209182012083830190925260008084529083018190529092509060005b8a811015610e885760006001858a8460208110610d0457610d04613f62565b610d1191901a601b613ff8565b8f8f86818110610d2357610d23613f62565b905060200201358e8e87818110610d3c57610d3c613f62565b9050602002013560405160008152602001604052604051610d79949392919093845260ff9290921660208401526040830152606082015260800190565b6020604051602081039080840390855afa158015610d9b573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081015173ffffffffffffffffffffffffffffffffffffffff811660009081526008602090815290849020838501909452925460ff8082161515808552610100909204169383019390935290955092509050610e61576040517f20fb74ee00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015260240161060b565b826020015160080260ff166001901b84019350508080610e8090613fc0565b915050610ce5565b5081827e010101010101010101010101010101010101010101010101010101010101011614610ee3576040517fc103be2e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5060009150819050610f328d826020020135848e8e8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061267c92505050565b600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff600888901c161790559092509050610f7884838388336128c6565b50505050505050505050505050565b600e5473ffffffffffffffffffffffffffffffffffffffff16338114610ff7576040517f292f4fb500000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff8216602482015260440161060b565b81516040516110099190602001614035565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190528051602090910120600f555050565b60005473ffffffffffffffffffffffffffffffffffffffff1633148015906111075750600d546040517f6b14daf800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690636b14daf8906110c4903390600090369060040161409a565b602060405180830381865afa1580156110e1573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061110591906140da565b155b1561113e576040517fc04ecc2800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000611148612a24565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663597d2f3c6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156111b7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111db91906140f5565b905060006111e9828461410e565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811660048301529192506000917f000000000000000000000000000000000000000000000000000000000000000016906370a0823190602401602060405180830381865afa15801561129b573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906112bf91906140f5565b905081811015611305576040517fcf479181000000000000000000000000000000000000000000000000000000008152600481018290526024810183905260440161060b565b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001663f99b1d688761135561134f8686614121565b89612b1f565b6040517fffffffff0000000000000000000000000000000000000000000000000000000060e085901b16815273ffffffffffffffffffffffffffffffffffffffff90921660048301526024820152604401600060405180830381600087803b1580156113c057600080fd5b505af11580156113d4573d6000803e3d6000fd5b50505050505050505050565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000008116600483015260009182917f000000000000000000000000000000000000000000000000000000000000000016906370a0823190602401602060405180830381865afa158015611491573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114b591906140f5565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663597d2f3c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015611524573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061154891906140f5565b90506000611554612a24565b9050816115618285614134565b61156b9190614134565b935050505090565b888787601f8311156115bb576040517f809fc428000000000000000000000000000000000000000000000000000000008152601f60048201526024810184905260440161060b565b8183146115fe576040517f988a0804000000000000000000000000000000000000000000000000000000008152600481018490526024810183905260440161060b565b61160981600361415b565b60ff168311611650576040517ffda9db7800000000000000000000000000000000000000000000000000000000815260ff821660048201526024810184905260440161060b565b61165c8160ff16612b39565b611664612569565b60006040518060c001604052808f8f80806020026020016040519081016040528093929190818152602001838360200280828437600081840152601f19601f8201169050808301925050505050505081526020018d8d8080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525050509082525060ff8c1660208083019190915260408051601f8d0183900483028101830182528c8152920191908c908c908190840183828082843760009201919091525050509082525067ffffffffffffffff891660208083019190915260408051601f8a01839004830281018301825289815292019190899089908190840183828082843760009201919091525050509152509050611786612b73565b60095460005b8181101561187f576000600982815481106117a9576117a9613f62565b6000918252602082200154600a805473ffffffffffffffffffffffffffffffffffffffff909216935090849081106117e3576117e3613f62565b600091825260208083209091015473ffffffffffffffffffffffffffffffffffffffff948516835260088252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000016905594168252600790529190912080547fffffffffffffffffffffffffffffffffffff0000000000000000000000000000169055508061187781613fc0565b91505061178c565b5061188c6009600061373a565b611898600a600061373a565b60005b825151811015611c215760086000846000015183815181106118bf576118bf613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528101919091526040016000205460ff161561196357825180518290811061190c5761190c613f62565b60200260200101516040517f7451f83e00000000000000000000000000000000000000000000000000000000815260040161060b919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b604080518082019091526001815260ff82166020820152835180516008916000918590811061199457611994613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281810192909252604001600090812083518154948401517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00009095169015157fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff161761010060ff90951694909402939093179092558401518051600792919084908110611a4657611a46613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff1682528101919091526040016000205460ff1615611aec5782602001518181518110611a9557611a95613f62565b60200260200101516040517fe8d2989900000000000000000000000000000000000000000000000000000000815260040161060b919073ffffffffffffffffffffffffffffffffffffffff91909116815260200190565b60405180606001604052806001151581526020018260ff16815260200160006bffffffffffffffffffffffff168152506007600085602001518481518110611b3657611b36613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600020835181549385015194909201516bffffffffffffffffffffffff1662010000027fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff60ff95909516610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff931515939093167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090941693909317919091179290921617905580611c1981613fc0565b91505061189b565b5081518051611c3891600991602090910190613758565b506020808301518051611c4f92600a920190613758565b506040820151600280547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff909216919091179055600454640100000000900463ffffffff16611c9f6131d5565b6004805463ffffffff928316640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff82168117909255600092611ced9281169116176001614177565b905080600460006101000a81548163ffffffff021916908363ffffffff1602179055506000611d4146308463ffffffff16886000015189602001518a604001518b606001518c608001518d60a0015161326c565b9050806005819055507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e058360055484886000015189602001518a604001518b606001518c608001518d60a00151604051611da3999897969594939291906141e5565b60405180910390a1611db58d8d613317565b505050505050505050505050505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314801590611e8a5750600d546040517f6b14daf800000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690636b14daf890611e47903390600090369060040161409a565b602060405180830381865afa158015611e64573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611e8891906140da565b155b15611ec1576040517fc04ecc2800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611ec9612b73565b6002805467ffffffffffffffff8581167101000000000000000000000000000000000081027fffffffffffffff0000000000000000ffffffffffffffffffffffffffffffffff898416690100000000000000000081027fffffffffffffffffffffffffffffff0000000000000000ffffffffffffffffff8d87166101008102919091167fffffffffffffffffffffffffffffff00000000000000000000000000000000ff909816979097171791909116919091179094556003805462ffffff87166801000000000000000081027fffffffffffffffffffffffffffffffffffffffffff00000000000000000000009092169489169485179190911790915560408051948552602085019590955293830152606082015260808101919091527f49275ddcdfc9c0519b3d094308c8bf675f06070a754ce90c152163cb6e66e8a09060a00160405180910390a15050505050565b73ffffffffffffffffffffffffffffffffffffffff8281166000908152600b602052604090205416331461207b576040517fdce38c2400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff811633036120ca576040517fb387a23800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8083166000908152600c6020526040902080548383167fffffffffffffffffffffffff00000000000000000000000000000000000000008216811790925590911690811461216f5760405173ffffffffffffffffffffffffffffffffffffffff8084169133918616907f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836790600090a45b505050565b61217c612569565b6101d781613325565b61218d612569565b6101d78161341a565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600760209081526040918290208251606081018452905460ff80821615158084526101008304909116938301939093526201000090046bffffffffffffffffffffffff1692810192909252612205575050565b600061221083610513565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663597d2f3c6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561227f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906122a391906140f5565b905060006122b1828461410e565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811660048301529192506000917f000000000000000000000000000000000000000000000000000000000000000016906370a0823190602401602060405180830381865afa158015612363573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061238791906140f5565b9050818110156123cd576040517fcf479181000000000000000000000000000000000000000000000000000000008152600481018290526024810183905260440161060b565b83156125615773ffffffffffffffffffffffffffffffffffffffff8681166000908152600b602090815260408083205460079092529182902080547fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff16905590517ff99b1d680000000000000000000000000000000000000000000000000000000081529082166004820181905260248201879052917f0000000000000000000000000000000000000000000000000000000000000000169063f99b1d6890604401600060405180830381600087803b1580156124a957600080fd5b505af11580156124bd573d6000803e3d6000fd5b505050507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff168873ffffffffffffffffffffffffffffffffffffffff167fd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c8860405161255791815260200190565b60405180910390a4505b505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146125ea576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015260640161060b565b565b60006125f982602061427b565b61260485602061427b565b6126108861014461410e565b61261a919061410e565b612624919061410e565b61262f90600061410e565b9050368114612673576040517ff7b94f0a0000000000000000000000000000000000000000000000000000000081526004810182905236602482015260440161060b565b50505050505050565b60008060008380602001905181019061269591906144bf565b905060006126a682606001516134c2565b90508082608001511461270957608082015160608301516040517faed0afe500000000000000000000000000000000000000000000000000000000815260048101929092526024820183905267ffffffffffffffff16604482015260640161060b565b81516020830151604080850151606086015191517f76f2e3f400000000000000000000000000000000000000000000000000000000815260009473ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016946376f2e3f4946127929492939192916004016147fa565b6020604051808303816000875af11580156127b1573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906127d591906140da565b90508061280e576040517f69c920fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8664ffffffffff167ffc3c7a7927e878a0fca37c904953c3c75cee3ca1d1640184a0ab1c65eec6274333856020015186604001518c6040516128a5949392919073ffffffffffffffffffffffffffffffffffffffff94909416845277ffffffffffffffffffffffffffffffffffffffffffffffff92909216602084015267ffffffffffffffff166040830152606082015260800190565b60405180910390a28260200151836040015194509450505050935093915050565b60006128f23a67ffffffffffffffff8616156128e257856128e8565b87604001515b88602001516135a5565b90506010360260005a9050600061291b8663ffffffff1685858c60a0015162ffffff16866135f6565b90506000670de0b6b3a764000077ffffffffffffffffffffffffffffffffffffffffffffffff8a16830273ffffffffffffffffffffffffffffffffffffffff881660009081526007602052604090205460808d01519290910492506201000090046bffffffffffffffffffffffff9081169167ffffffffffffffff16828401019081168211156129b157505050505050506109cd565b73ffffffffffffffffffffffffffffffffffffffff8816600090815260076020526040902080546bffffffffffffffffffffffff90921662010000027fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff9092169190911790555050505050505050505050565b600080600a805480602002602001604051908101604052809291908181526020018280548015612a8a57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612a5f575b505083519394506000925050505b81811015612b195760076000848381518110612ab657612ab6613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002054612b05906201000090046bffffffffffffffffffffffff168561410e565b935080612b1181613fc0565b915050612a98565b50505090565b600081831015612b30575081612b33565b50805b92915050565b806000036101d7576040517fe77dba5600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000612b7d612a24565b905060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663597d2f3c6040518163ffffffff1660e01b8152600401602060405180830381865afa158015612bec573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612c1091906140f5565b90506000612c1e828461410e565b6040517f70a0823100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811660048301529192506000917f000000000000000000000000000000000000000000000000000000000000000016906370a0823190602401602060405180830381865afa158015612cd0573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612cf491906140f5565b905081811015612d3a576040517fcf479181000000000000000000000000000000000000000000000000000000008152600481018290526024810183905260440161060b565b6000600a805480602002602001604051908101604052809291908181526020018280548015612d9f57602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311612d74575b5050505050905060008151905060008167ffffffffffffffff811115612dc757612dc7613aa3565b604051908082528060200260200182016040528015612df0578160200160208202803683370190505b50905060008267ffffffffffffffff811115612e0e57612e0e613aa3565b604051908082528060200260200182016040528015612e37578160200160208202803683370190505b5090506000805b8481101561311157600060076000888481518110612e5e57612e5e613f62565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060000160029054906101000a90046bffffffffffffffffffffffff166bffffffffffffffffffffffff169050600060076000898581518110612ee457612ee4613f62565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff16825281019190915260400160002080546bffffffffffffffffffffffff9290921662010000027fffffffffffffffffffffffffffffffffffff000000000000000000000000ffff909216919091179055808015613107576000600b60008a8681518110612f7557612f75613f62565b602002602001015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905080878681518110612fed57612fed613f62565b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250508186868151811061303a5761303a613f62565b60200260200101818152505084806001019550507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff168a86815181106130ae576130ae613f62565b602002602001015173ffffffffffffffffffffffffffffffffffffffff167fd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c856040516130fd91815260200190565b60405180910390a4505b5050600101612e3e565b5081518114613121578082528083525b8151156131ca576040517f73433a2f00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016906373433a2f9061319c908690869060040161496a565b600060405180830381600087803b1580156131b657600080fd5b505af1158015610f78573d6000803e3d6000fd5b505050505050505050565b60004661a4b18114806131ea575062066eed81145b1561326557606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561323b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061325f91906140f5565b91505090565b4391505090565b6000808a8a8a8a8a8a8a8a8a604051602001613290999897969594939291906149c1565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179150509998505050505050505050565b613321828261363e565b5050565b3373ffffffffffffffffffffffffffffffffffffffff8216036133a4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640161060b565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600d5473ffffffffffffffffffffffffffffffffffffffff908116908216811461332157600d80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84811691821790925560408051928416835260208301919091527f793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d48912910160405180910390a15050565b60004661a4b18114806134d7575062066eed81145b15613595576101008367ffffffffffffffff166134f26131d5565b6134fc9190614121565b111561350b5750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a8290602401602060405180830381865afa15801561356a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061358e91906140f5565b9392505050565b505067ffffffffffffffff164090565b60008367ffffffffffffffff84168110156135d9576002858567ffffffffffffffff1603816135d6576135d661493b565b04015b6135ed818467ffffffffffffffff16612b1f565b95945050505050565b600081861015613632576040517f3fef97df00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50909303019091010290565b610100818114613680578282826040517f418a179b00000000000000000000000000000000000000000000000000000000815260040161060b93929190614a56565b600061368e83850185614a7a565b90506040517f8eef585f00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001690638eef585f90613702908490600401614ae4565b600060405180830381600087803b15801561371c57600080fd5b505af1158015613730573d6000803e3d6000fd5b5050505050505050565b50805460008255906000526020600020908101906101d791906137e2565b8280548282559060005260206000209081019282156137d2579160200282015b828111156137d257825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190613778565b506137de9291506137e2565b5090565b5b808211156137de57600081556001016137e3565b60006020828403121561380957600080fd5b813567ffffffffffffffff81111561382057600080fd5b820160a0818503121561358e57600080fd5b73ffffffffffffffffffffffffffffffffffffffff811681146101d757600080fd5b60006020828403121561386657600080fd5b813561358e81613832565b60005b8381101561388c578181015183820152602001613874565b50506000910152565b600081518084526138ad816020860160208601613871565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061358e6020830184613895565b60008083601f84011261390457600080fd5b50813567ffffffffffffffff81111561391c57600080fd5b6020830191508360208260051b850101111561393757600080fd5b9250929050565b6000806000806040858703121561395457600080fd5b843567ffffffffffffffff8082111561396c57600080fd5b613978888389016138f2565b9096509450602087013591508082111561399157600080fd5b5061399e878288016138f2565b95989497509550505050565b60008083601f8401126139bc57600080fd5b50813567ffffffffffffffff8111156139d457600080fd5b60208301915083602082850101111561393757600080fd5b60008060008060008060008060e0898b031215613a0857600080fd5b606089018a811115613a1957600080fd5b8998503567ffffffffffffffff80821115613a3357600080fd5b613a3f8c838d016139aa565b909950975060808b0135915080821115613a5857600080fd5b613a648c838d016138f2565b909750955060a08b0135915080821115613a7d57600080fd5b50613a8a8b828c016138f2565b999c989b50969995989497949560c00135949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715613af557613af5613aa3565b60405290565b604051610100810167ffffffffffffffff81118282101715613af557613af5613aa3565b60405160a0810167ffffffffffffffff81118282101715613af557613af5613aa3565b6040516020810167ffffffffffffffff81118282101715613af557613af5613aa3565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715613bac57613bac613aa3565b604052919050565b600067ffffffffffffffff821115613bce57613bce613aa3565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600067ffffffffffffffff821115613c1457613c14613aa3565b5060051b60200190565b600082601f830112613c2f57600080fd5b81356020613c44613c3f83613bfa565b613b65565b82815260059290921b84018101918181019086841115613c6357600080fd5b8286015b84811015613c7e5780358352918301918301613c67565b509695505050505050565b60006020808385031215613c9c57600080fd5b823567ffffffffffffffff80821115613cb457600080fd5b9084019060408287031215613cc857600080fd5b613cd0613ad2565b823582811115613cdf57600080fd5b8301601f81018813613cf057600080fd5b8035613cfe613c3f82613bb4565b8181528987838501011115613d1257600080fd5b818784018883013760008783830101528084525050508383013582811115613d3957600080fd5b613d4588828601613c1e565b948201949094529695505050505050565b60008060408385031215613d6957600080fd5b8235613d7481613832565b946020939093013593505050565b803560ff81168114613d9357600080fd5b919050565b67ffffffffffffffff811681146101d757600080fd5b8035613d9381613d98565b60008060008060008060008060008060c08b8d031215613dd857600080fd5b8a3567ffffffffffffffff80821115613df057600080fd5b613dfc8e838f016138f2565b909c509a5060208d0135915080821115613e1557600080fd5b613e218e838f016138f2565b909a509850889150613e3560408e01613d82565b975060608d0135915080821115613e4b57600080fd5b613e578e838f016139aa565b9097509550859150613e6b60808e01613dae565b945060a08d0135915080821115613e8157600080fd5b50613e8e8d828e016139aa565b915080935050809150509295989b9194979a5092959850565b62ffffff811681146101d757600080fd5b600080600080600060a08688031215613ed057600080fd5b8535613edb81613d98565b94506020860135613eeb81613d98565b93506040860135613efb81613d98565b92506060860135613f0b81613d98565b91506080860135613f1b81613ea7565b809150509295509295909350565b60008060408385031215613f3c57600080fd5b8235613f4781613832565b91506020830135613f5781613832565b809150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203613ff157613ff1613f91565b5060010190565b60ff8181168382160190811115612b3357612b33613f91565b8183823760009101908152919050565b828152606082602083013760800192915050565b60008251614047818460208701613871565b9190910192915050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff841681526040602082015260006135ed604083018486614051565b80518015158114613d9357600080fd5b6000602082840312156140ec57600080fd5b61358e826140ca565b60006020828403121561410757600080fd5b5051919050565b80820180821115612b3357612b33613f91565b81810381811115612b3357612b33613f91565b818103600083128015838313168383128216171561415457614154613f91565b5092915050565b60ff818116838216029081169081811461415457614154613f91565b63ffffffff81811683821601908082111561415457614154613f91565b600081518084526020808501945080840160005b838110156141da57815173ffffffffffffffffffffffffffffffffffffffff16875295820195908201906001016141a8565b509495945050505050565b600061012063ffffffff808d1684528b6020850152808b166040850152508060608401526142158184018a614194565b905082810360808401526142298189614194565b905060ff871660a084015282810360c08401526142468187613895565b905067ffffffffffffffff851660e084015282810361010084015261426b8185613895565b9c9b505050505050505050505050565b8082028115828204841417612b3357612b33613f91565b8051613d9381613d98565b805161ffff81168114613d9357600080fd5b8051613d9381613832565b600082601f8301126142cb57600080fd5b81516142d9613c3f82613bb4565b8181528460208386010111156142ee57600080fd5b6142ff826020830160208701613871565b949350505050565b80516bffffffffffffffffffffffff81168114613d9357600080fd5b600082601f83011261433457600080fd5b81516020614344613c3f83613bfa565b82815260059290921b8401810191818101908684111561436357600080fd5b8286015b84811015613c7e57805167ffffffffffffffff8082111561438757600080fd5b908801907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe06040838c03820112156143be57600080fd5b6143c6613ad2565b87840151838111156143d757600080fd5b8401610100818e03840112156143ec57600080fd5b6143f4613afb565b92508881015183526144086040820161429d565b89840152614418606082016142af565b604084015260808101518481111561442f57600080fd5b61443d8e8b838501016142ba565b60608501525061444f60a08201614307565b608084015260c081015160a084015260e081015160c084015261010081015160e08401525081815261448360408501614307565b818901528652505050918301918301614367565b805177ffffffffffffffffffffffffffffffffffffffffffffffff81168114613d9357600080fd5b6000602082840312156144d157600080fd5b815167ffffffffffffffff808211156144e957600080fd5b9083019060a082860312156144fd57600080fd5b614505613b1f565b82518281111561451457600080fd5b8301601f8101871361452557600080fd5b8051614533613c3f82613bfa565b8082825260208201915060208360051b85010192508983111561455557600080fd5b602084015b838110156146a25780518781111561457157600080fd5b850160c0818d037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00112156145a557600080fd5b6145ad613b1f565b60208201516145bb81613d98565b815260408201516145cb81613ea7565b60208201526040828e037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa001121561460257600080fd5b61460a613b42565b8d607f84011261461957600080fd5b614621613ad2565b808f60a08601111561463257600080fd5b606085015b60a08601811015614652578051835260209283019201614637565b50825250604082015260a08201518981111561466d57600080fd5b61467c8e602083860101614323565b60608301525061468e60c083016140ca565b60808201528452506020928301920161455a565b508452506146b591505060208401614497565b60208201526146c660408401614292565b60408201526146d760608401614292565b60608201526080830151608082015280935050505092915050565b600081518084526020808501808196508360051b8101915082860160005b858110156147ed57828403895281516040815181875280518288015287810151606061ffff8216818a01528383015193506080915073ffffffffffffffffffffffffffffffffffffffff8416828a01528083015193505061010060a081818b015261477f6101408b0186613895565b9284015192945060c06147a18b8201856bffffffffffffffffffffffff169052565b9084015160e08b81019190915290840151918a01919091529091015161012088015250908601516bffffffffffffffffffffffff16948601949094529784019790840190600101614710565b5091979650505050505050565b6000608080830181845280885180835260a092508286019150828160051b8701016020808c016000805b858110156148df578a85037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600187528251805167ffffffffffffffff1686528481015162ffffff16858701526040808201515160c091859089015b600282101561489e57825181529188019160019190910190880161487f565b5050506060820151818c8901526148b7828901826146f2565b928c0151801515898d01529291506148cc9050565b9785019795505091830191600101614824565b50505081965061490a8189018c77ffffffffffffffffffffffffffffffffffffffffffffffff169052565b505050505050614926604083018567ffffffffffffffff169052565b67ffffffffffffffff831660608301526135ed565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b60408152600061497d6040830185614194565b82810360208481019190915284518083528582019282019060005b818110156149b457845183529383019391830191600101614998565b5090979650505050505050565b60006101208b835273ffffffffffffffffffffffffffffffffffffffff8b16602084015267ffffffffffffffff808b166040850152816060850152614a088285018b614194565b91508382036080850152614a1c828a614194565b915060ff881660a085015283820360c0850152614a398288613895565b90861660e0850152838103610100850152905061426b8185613895565b604081526000614a6a604083018587614051565b9050826020830152949350505050565b6000610100808385031215614a8e57600080fd5b83601f840112614a9d57600080fd5b614aa5613afb565b908301908085831115614ab757600080fd5b845b83811015614ada578035614acc81613ea7565b835260209283019201614ab9565b5095945050505050565b6101008101818360005b6008811015614b1257815162ffffff16835260209283019290910190600101614aee565b5050509291505056fea164736f6c6343000813000a", +} + +var VRFBeaconABI = VRFBeaconMetaData.ABI + +var VRFBeaconBin = VRFBeaconMetaData.Bin + +func DeployVRFBeacon(auth *bind.TransactOpts, backend bind.ContractBackend, link common.Address, coordinator common.Address, keyProvider common.Address, keyID [32]byte) (common.Address, *types.Transaction, *VRFBeacon, error) { + parsed, err := VRFBeaconMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFBeaconBin), backend, link, coordinator, keyProvider, keyID) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFBeacon{VRFBeaconCaller: VRFBeaconCaller{contract: contract}, VRFBeaconTransactor: VRFBeaconTransactor{contract: contract}, VRFBeaconFilterer: VRFBeaconFilterer{contract: contract}}, nil +} + +type VRFBeacon struct { + address common.Address + abi abi.ABI + VRFBeaconCaller + VRFBeaconTransactor + VRFBeaconFilterer +} + +type VRFBeaconCaller struct { + contract *bind.BoundContract +} + +type VRFBeaconTransactor struct { + contract *bind.BoundContract +} + +type VRFBeaconFilterer struct { + contract *bind.BoundContract +} + +type VRFBeaconSession struct { + Contract *VRFBeacon + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFBeaconCallerSession struct { + Contract *VRFBeaconCaller + CallOpts bind.CallOpts +} + +type VRFBeaconTransactorSession struct { + Contract *VRFBeaconTransactor + TransactOpts bind.TransactOpts +} + +type VRFBeaconRaw struct { + Contract *VRFBeacon +} + +type VRFBeaconCallerRaw struct { + Contract *VRFBeaconCaller +} + +type VRFBeaconTransactorRaw struct { + Contract *VRFBeaconTransactor +} + +func NewVRFBeacon(address common.Address, backend bind.ContractBackend) (*VRFBeacon, error) { + abi, err := abi.JSON(strings.NewReader(VRFBeaconABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFBeacon(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFBeacon{address: address, abi: abi, VRFBeaconCaller: VRFBeaconCaller{contract: contract}, VRFBeaconTransactor: VRFBeaconTransactor{contract: contract}, VRFBeaconFilterer: VRFBeaconFilterer{contract: contract}}, nil +} + +func NewVRFBeaconCaller(address common.Address, caller bind.ContractCaller) (*VRFBeaconCaller, error) { + contract, err := bindVRFBeacon(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFBeaconCaller{contract: contract}, nil +} + +func NewVRFBeaconTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFBeaconTransactor, error) { + contract, err := bindVRFBeacon(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFBeaconTransactor{contract: contract}, nil +} + +func NewVRFBeaconFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFBeaconFilterer, error) { + contract, err := bindVRFBeacon(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFBeaconFilterer{contract: contract}, nil +} + +func bindVRFBeacon(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFBeaconMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFBeacon *VRFBeaconRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFBeacon.Contract.VRFBeaconCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFBeacon *VRFBeaconRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFBeacon.Contract.VRFBeaconTransactor.contract.Transfer(opts) +} + +func (_VRFBeacon *VRFBeaconRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFBeacon.Contract.VRFBeaconTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFBeacon *VRFBeaconCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFBeacon.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFBeacon *VRFBeaconTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFBeacon.Contract.contract.Transfer(opts) +} + +func (_VRFBeacon *VRFBeaconTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFBeacon.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFBeacon *VRFBeaconCaller) NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "NUM_CONF_DELAYS") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) NUMCONFDELAYS() (uint8, error) { + return _VRFBeacon.Contract.NUMCONFDELAYS(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) NUMCONFDELAYS() (uint8, error) { + return _VRFBeacon.Contract.NUMCONFDELAYS(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) GetBilling(opts *bind.CallOpts) (GetBilling, + + error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "getBilling") + + outstruct := new(GetBilling) + if err != nil { + return *outstruct, err + } + + outstruct.MaximumGasPrice = *abi.ConvertType(out[0], new(uint64)).(*uint64) + outstruct.ReasonableGasPrice = *abi.ConvertType(out[1], new(uint64)).(*uint64) + outstruct.ObservationPayment = *abi.ConvertType(out[2], new(uint64)).(*uint64) + outstruct.TransmissionPayment = *abi.ConvertType(out[3], new(uint64)).(*uint64) + outstruct.AccountingGas = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFBeacon *VRFBeaconSession) GetBilling() (GetBilling, + + error) { + return _VRFBeacon.Contract.GetBilling(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) GetBilling() (GetBilling, + + error) { + return _VRFBeacon.Contract.GetBilling(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) GetBillingAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "getBillingAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) GetBillingAccessController() (common.Address, error) { + return _VRFBeacon.Contract.GetBillingAccessController(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) GetBillingAccessController() (common.Address, error) { + return _VRFBeacon.Contract.GetBillingAccessController(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) ICoordinator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "i_coordinator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) ICoordinator() (common.Address, error) { + return _VRFBeacon.Contract.ICoordinator(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) ICoordinator() (common.Address, error) { + return _VRFBeacon.Contract.ICoordinator(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) ILink(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "i_link") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) ILink() (common.Address, error) { + return _VRFBeacon.Contract.ILink(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) ILink() (common.Address, error) { + return _VRFBeacon.Contract.ILink(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_VRFBeacon *VRFBeaconSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _VRFBeacon.Contract.LatestConfigDetails(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _VRFBeacon.Contract.LatestConfigDetails(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "latestConfigDigestAndEpoch") + + outstruct := new(LatestConfigDigestAndEpoch) + if err != nil { + return *outstruct, err + } + + outstruct.ScanLogs = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ConfigDigest = *abi.ConvertType(out[1], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[2], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_VRFBeacon *VRFBeaconSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _VRFBeacon.Contract.LatestConfigDigestAndEpoch(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) LatestConfigDigestAndEpoch() (LatestConfigDigestAndEpoch, + + error) { + return _VRFBeacon.Contract.LatestConfigDigestAndEpoch(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "linkAvailableForPayment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) LinkAvailableForPayment() (*big.Int, error) { + return _VRFBeacon.Contract.LinkAvailableForPayment(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) LinkAvailableForPayment() (*big.Int, error) { + return _VRFBeacon.Contract.LinkAvailableForPayment(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) OwedPayment(opts *bind.CallOpts, transmitterAddress common.Address) (*big.Int, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "owedPayment", transmitterAddress) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) OwedPayment(transmitterAddress common.Address) (*big.Int, error) { + return _VRFBeacon.Contract.OwedPayment(&_VRFBeacon.CallOpts, transmitterAddress) +} + +func (_VRFBeacon *VRFBeaconCallerSession) OwedPayment(transmitterAddress common.Address) (*big.Int, error) { + return _VRFBeacon.Contract.OwedPayment(&_VRFBeacon.CallOpts, transmitterAddress) +} + +func (_VRFBeacon *VRFBeaconCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) Owner() (common.Address, error) { + return _VRFBeacon.Contract.Owner(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) Owner() (common.Address, error) { + return _VRFBeacon.Contract.Owner(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) SKeyID(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "s_keyID") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) SKeyID() ([32]byte, error) { + return _VRFBeacon.Contract.SKeyID(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) SKeyID() ([32]byte, error) { + return _VRFBeacon.Contract.SKeyID(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) SKeyProvider(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "s_keyProvider") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) SKeyProvider() (common.Address, error) { + return _VRFBeacon.Contract.SKeyProvider(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) SKeyProvider() (common.Address, error) { + return _VRFBeacon.Contract.SKeyProvider(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "s_provingKeyHash") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) SProvingKeyHash() ([32]byte, error) { + return _VRFBeacon.Contract.SProvingKeyHash(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) SProvingKeyHash() ([32]byte, error) { + return _VRFBeacon.Contract.SProvingKeyHash(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _VRFBeacon.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_VRFBeacon *VRFBeaconSession) TypeAndVersion() (string, error) { + return _VRFBeacon.Contract.TypeAndVersion(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconCallerSession) TypeAndVersion() (string, error) { + return _VRFBeacon.Contract.TypeAndVersion(&_VRFBeacon.CallOpts) +} + +func (_VRFBeacon *VRFBeaconTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFBeacon *VRFBeaconSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFBeacon.Contract.AcceptOwnership(&_VRFBeacon.TransactOpts) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFBeacon.Contract.AcceptOwnership(&_VRFBeacon.TransactOpts) +} + +func (_VRFBeacon *VRFBeaconTransactor) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "acceptPayeeship", transmitter) +} + +func (_VRFBeacon *VRFBeaconSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.AcceptPayeeship(&_VRFBeacon.TransactOpts, transmitter) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) AcceptPayeeship(transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.AcceptPayeeship(&_VRFBeacon.TransactOpts, transmitter) +} + +func (_VRFBeacon *VRFBeaconTransactor) ExposeType(opts *bind.TransactOpts, arg0 VRFBeaconReportReport) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "exposeType", arg0) +} + +func (_VRFBeacon *VRFBeaconSession) ExposeType(arg0 VRFBeaconReportReport) (*types.Transaction, error) { + return _VRFBeacon.Contract.ExposeType(&_VRFBeacon.TransactOpts, arg0) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) ExposeType(arg0 VRFBeaconReportReport) (*types.Transaction, error) { + return _VRFBeacon.Contract.ExposeType(&_VRFBeacon.TransactOpts, arg0) +} + +func (_VRFBeacon *VRFBeaconTransactor) KeyGenerated(opts *bind.TransactOpts, kd KeyDataStructKeyData) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "keyGenerated", kd) +} + +func (_VRFBeacon *VRFBeaconSession) KeyGenerated(kd KeyDataStructKeyData) (*types.Transaction, error) { + return _VRFBeacon.Contract.KeyGenerated(&_VRFBeacon.TransactOpts, kd) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) KeyGenerated(kd KeyDataStructKeyData) (*types.Transaction, error) { + return _VRFBeacon.Contract.KeyGenerated(&_VRFBeacon.TransactOpts, kd) +} + +func (_VRFBeacon *VRFBeaconTransactor) NewKeyRequested(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "newKeyRequested") +} + +func (_VRFBeacon *VRFBeaconSession) NewKeyRequested() (*types.Transaction, error) { + return _VRFBeacon.Contract.NewKeyRequested(&_VRFBeacon.TransactOpts) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) NewKeyRequested() (*types.Transaction, error) { + return _VRFBeacon.Contract.NewKeyRequested(&_VRFBeacon.TransactOpts) +} + +func (_VRFBeacon *VRFBeaconTransactor) SetBilling(opts *bind.TransactOpts, maximumGasPrice uint64, reasonableGasPrice uint64, observationPayment uint64, transmissionPayment uint64, accountingGas *big.Int) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "setBilling", maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) +} + +func (_VRFBeacon *VRFBeaconSession) SetBilling(maximumGasPrice uint64, reasonableGasPrice uint64, observationPayment uint64, transmissionPayment uint64, accountingGas *big.Int) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetBilling(&_VRFBeacon.TransactOpts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) SetBilling(maximumGasPrice uint64, reasonableGasPrice uint64, observationPayment uint64, transmissionPayment uint64, accountingGas *big.Int) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetBilling(&_VRFBeacon.TransactOpts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) +} + +func (_VRFBeacon *VRFBeaconTransactor) SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "setBillingAccessController", _billingAccessController) +} + +func (_VRFBeacon *VRFBeaconSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetBillingAccessController(&_VRFBeacon.TransactOpts, _billingAccessController) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetBillingAccessController(&_VRFBeacon.TransactOpts, _billingAccessController) +} + +func (_VRFBeacon *VRFBeaconTransactor) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "setConfig", signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_VRFBeacon *VRFBeaconSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetConfig(&_VRFBeacon.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) SetConfig(signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetConfig(&_VRFBeacon.TransactOpts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (_VRFBeacon *VRFBeaconTransactor) SetPayees(opts *bind.TransactOpts, transmitters []common.Address, payees []common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "setPayees", transmitters, payees) +} + +func (_VRFBeacon *VRFBeaconSession) SetPayees(transmitters []common.Address, payees []common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetPayees(&_VRFBeacon.TransactOpts, transmitters, payees) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) SetPayees(transmitters []common.Address, payees []common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.SetPayees(&_VRFBeacon.TransactOpts, transmitters, payees) +} + +func (_VRFBeacon *VRFBeaconTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFBeacon *VRFBeaconSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.TransferOwnership(&_VRFBeacon.TransactOpts, to) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.TransferOwnership(&_VRFBeacon.TransactOpts, to) +} + +func (_VRFBeacon *VRFBeaconTransactor) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "transferPayeeship", transmitter, proposed) +} + +func (_VRFBeacon *VRFBeaconSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.TransferPayeeship(&_VRFBeacon.TransactOpts, transmitter, proposed) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) TransferPayeeship(transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.TransferPayeeship(&_VRFBeacon.TransactOpts, transmitter, proposed) +} + +func (_VRFBeacon *VRFBeaconTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_VRFBeacon *VRFBeaconSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _VRFBeacon.Contract.Transmit(&_VRFBeacon.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _VRFBeacon.Contract.Transmit(&_VRFBeacon.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_VRFBeacon *VRFBeaconTransactor) WithdrawFunds(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "withdrawFunds", recipient, amount) +} + +func (_VRFBeacon *VRFBeaconSession) WithdrawFunds(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFBeacon.Contract.WithdrawFunds(&_VRFBeacon.TransactOpts, recipient, amount) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) WithdrawFunds(recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return _VRFBeacon.Contract.WithdrawFunds(&_VRFBeacon.TransactOpts, recipient, amount) +} + +func (_VRFBeacon *VRFBeaconTransactor) WithdrawPayment(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.contract.Transact(opts, "withdrawPayment", transmitter) +} + +func (_VRFBeacon *VRFBeaconSession) WithdrawPayment(transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.WithdrawPayment(&_VRFBeacon.TransactOpts, transmitter) +} + +func (_VRFBeacon *VRFBeaconTransactorSession) WithdrawPayment(transmitter common.Address) (*types.Transaction, error) { + return _VRFBeacon.Contract.WithdrawPayment(&_VRFBeacon.TransactOpts, transmitter) +} + +type VRFBeaconBillingAccessControllerSetIterator struct { + Event *VRFBeaconBillingAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconBillingAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconBillingAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconBillingAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconBillingAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*VRFBeaconBillingAccessControllerSetIterator, error) { + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return &VRFBeaconBillingAccessControllerSetIterator{contract: _VRFBeacon.contract, event: "BillingAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconBillingAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconBillingAccessControllerSet) + if err := _VRFBeacon.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseBillingAccessControllerSet(log types.Log) (*VRFBeaconBillingAccessControllerSet, error) { + event := new(VRFBeaconBillingAccessControllerSet) + if err := _VRFBeacon.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconBillingSetIterator struct { + Event *VRFBeaconBillingSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconBillingSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconBillingSetIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconBillingSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconBillingSet struct { + MaximumGasPrice uint64 + ReasonableGasPrice uint64 + ObservationPayment uint64 + TransmissionPayment uint64 + AccountingGas *big.Int + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterBillingSet(opts *bind.FilterOpts) (*VRFBeaconBillingSetIterator, error) { + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return &VRFBeaconBillingSetIterator{contract: _VRFBeacon.contract, event: "BillingSet", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchBillingSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconBillingSet) (event.Subscription, error) { + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconBillingSet) + if err := _VRFBeacon.contract.UnpackLog(event, "BillingSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseBillingSet(log types.Log) (*VRFBeaconBillingSet, error) { + event := new(VRFBeaconBillingSet) + if err := _VRFBeacon.contract.UnpackLog(event, "BillingSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconConfigSetIterator struct { + Event *VRFBeaconConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterConfigSet(opts *bind.FilterOpts) (*VRFBeaconConfigSetIterator, error) { + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &VRFBeaconConfigSetIterator{contract: _VRFBeacon.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconConfigSet) + if err := _VRFBeacon.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseConfigSet(log types.Log) (*VRFBeaconConfigSet, error) { + event := new(VRFBeaconConfigSet) + if err := _VRFBeacon.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconNewTransmissionIterator struct { + Event *VRFBeaconNewTransmission + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconNewTransmissionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconNewTransmissionIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconNewTransmissionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconNewTransmission struct { + EpochAndRound *big.Int + Transmitter common.Address + JuelsPerFeeCoin *big.Int + ReasonableGasPrice uint64 + ConfigDigest [32]byte + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterNewTransmission(opts *bind.FilterOpts, epochAndRound []*big.Int) (*VRFBeaconNewTransmissionIterator, error) { + + var epochAndRoundRule []interface{} + for _, epochAndRoundItem := range epochAndRound { + epochAndRoundRule = append(epochAndRoundRule, epochAndRoundItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "NewTransmission", epochAndRoundRule) + if err != nil { + return nil, err + } + return &VRFBeaconNewTransmissionIterator{contract: _VRFBeacon.contract, event: "NewTransmission", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *VRFBeaconNewTransmission, epochAndRound []*big.Int) (event.Subscription, error) { + + var epochAndRoundRule []interface{} + for _, epochAndRoundItem := range epochAndRound { + epochAndRoundRule = append(epochAndRoundRule, epochAndRoundItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "NewTransmission", epochAndRoundRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconNewTransmission) + if err := _VRFBeacon.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseNewTransmission(log types.Log) (*VRFBeaconNewTransmission, error) { + event := new(VRFBeaconNewTransmission) + if err := _VRFBeacon.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconOraclePaidIterator struct { + Event *VRFBeaconOraclePaid + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconOraclePaidIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconOraclePaidIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconOraclePaidIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconOraclePaid struct { + Transmitter common.Address + Payee common.Address + Amount *big.Int + LinkToken common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*VRFBeaconOraclePaidIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return &VRFBeaconOraclePaidIterator{contract: _VRFBeacon.contract, event: "OraclePaid", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *VRFBeaconOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconOraclePaid) + if err := _VRFBeacon.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseOraclePaid(log types.Log) (*VRFBeaconOraclePaid, error) { + event := new(VRFBeaconOraclePaid) + if err := _VRFBeacon.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconOutputsServedIterator struct { + Event *VRFBeaconOutputsServed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconOutputsServedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOutputsServed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOutputsServed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconOutputsServedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconOutputsServedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconOutputsServed struct { + RecentBlockHeight uint64 + JuelsPerFeeCoin *big.Int + ReasonableGasPrice uint64 + OutputsServed []VRFBeaconTypesOutputServed + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterOutputsServed(opts *bind.FilterOpts) (*VRFBeaconOutputsServedIterator, error) { + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "OutputsServed") + if err != nil { + return nil, err + } + return &VRFBeaconOutputsServedIterator{contract: _VRFBeacon.contract, event: "OutputsServed", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *VRFBeaconOutputsServed) (event.Subscription, error) { + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "OutputsServed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconOutputsServed) + if err := _VRFBeacon.contract.UnpackLog(event, "OutputsServed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseOutputsServed(log types.Log) (*VRFBeaconOutputsServed, error) { + event := new(VRFBeaconOutputsServed) + if err := _VRFBeacon.contract.UnpackLog(event, "OutputsServed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconOwnershipTransferRequestedIterator struct { + Event *VRFBeaconOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFBeaconOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFBeaconOwnershipTransferRequestedIterator{contract: _VRFBeacon.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconOwnershipTransferRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFBeaconOwnershipTransferRequested, error) { + event := new(VRFBeaconOwnershipTransferRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconOwnershipTransferredIterator struct { + Event *VRFBeaconOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFBeaconOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFBeaconOwnershipTransferredIterator{contract: _VRFBeacon.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFBeaconOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconOwnershipTransferred) + if err := _VRFBeacon.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseOwnershipTransferred(log types.Log) (*VRFBeaconOwnershipTransferred, error) { + event := new(VRFBeaconOwnershipTransferred) + if err := _VRFBeacon.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconPayeeshipTransferRequestedIterator struct { + Event *VRFBeaconPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconPayeeshipTransferRequested struct { + Transmitter common.Address + Current common.Address + Proposed common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*VRFBeaconPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return &VRFBeaconPayeeshipTransferRequestedIterator{contract: _VRFBeacon.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconPayeeshipTransferRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParsePayeeshipTransferRequested(log types.Log) (*VRFBeaconPayeeshipTransferRequested, error) { + event := new(VRFBeaconPayeeshipTransferRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconPayeeshipTransferredIterator struct { + Event *VRFBeaconPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconPayeeshipTransferred struct { + Transmitter common.Address + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*VRFBeaconPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return &VRFBeaconPayeeshipTransferredIterator{contract: _VRFBeacon.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *VRFBeaconPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconPayeeshipTransferred) + if err := _VRFBeacon.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParsePayeeshipTransferred(log types.Log) (*VRFBeaconPayeeshipTransferred, error) { + event := new(VRFBeaconPayeeshipTransferred) + if err := _VRFBeacon.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconRandomWordsFulfilledIterator struct { + Event *VRFBeaconRandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconRandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconRandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconRandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconRandomWordsFulfilled struct { + RequestIDs []*big.Int + SuccessfulFulfillment []byte + TruncatedErrorData [][]byte + SubBalances []*big.Int + SubIDs []*big.Int + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*VRFBeaconRandomWordsFulfilledIterator, error) { + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "RandomWordsFulfilled") + if err != nil { + return nil, err + } + return &VRFBeaconRandomWordsFulfilledIterator{contract: _VRFBeacon.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomWordsFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "RandomWordsFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconRandomWordsFulfilled) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseRandomWordsFulfilled(log types.Log) (*VRFBeaconRandomWordsFulfilled, error) { + event := new(VRFBeaconRandomWordsFulfilled) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconRandomnessFulfillmentRequestedIterator struct { + Event *VRFBeaconRandomnessFulfillmentRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconRandomnessFulfillmentRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessFulfillmentRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessFulfillmentRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconRandomnessFulfillmentRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconRandomnessFulfillmentRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconRandomnessFulfillmentRequested struct { + RequestID *big.Int + Requester common.Address + NextBeaconOutputHeight uint64 + ConfDelay *big.Int + SubID *big.Int + NumWords uint16 + GasAllowance uint32 + GasPrice *big.Int + WeiPerUnitLink *big.Int + Arguments []byte + CostJuels *big.Int + NewSubBalance *big.Int + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFBeaconRandomnessFulfillmentRequestedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "RandomnessFulfillmentRequested", requestIDRule) + if err != nil { + return nil, err + } + return &VRFBeaconRandomnessFulfillmentRequestedIterator{contract: _VRFBeacon.contract, event: "RandomnessFulfillmentRequested", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "RandomnessFulfillmentRequested", requestIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconRandomnessFulfillmentRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessFulfillmentRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseRandomnessFulfillmentRequested(log types.Log) (*VRFBeaconRandomnessFulfillmentRequested, error) { + event := new(VRFBeaconRandomnessFulfillmentRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessFulfillmentRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconRandomnessRedeemedIterator struct { + Event *VRFBeaconRandomnessRedeemed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconRandomnessRedeemedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessRedeemed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessRedeemed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconRandomnessRedeemedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconRandomnessRedeemedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconRandomnessRedeemed struct { + RequestID *big.Int + Requester common.Address + SubID *big.Int + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*VRFBeaconRandomnessRedeemedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "RandomnessRedeemed", requestIDRule, requesterRule) + if err != nil { + return nil, err + } + return &VRFBeaconRandomnessRedeemedIterator{contract: _VRFBeacon.contract, event: "RandomnessRedeemed", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "RandomnessRedeemed", requestIDRule, requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconRandomnessRedeemed) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessRedeemed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseRandomnessRedeemed(log types.Log) (*VRFBeaconRandomnessRedeemed, error) { + event := new(VRFBeaconRandomnessRedeemed) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessRedeemed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFBeaconRandomnessRequestedIterator struct { + Event *VRFBeaconRandomnessRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFBeaconRandomnessRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFBeaconRandomnessRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFBeaconRandomnessRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFBeaconRandomnessRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFBeaconRandomnessRequested struct { + RequestID *big.Int + Requester common.Address + NextBeaconOutputHeight uint64 + ConfDelay *big.Int + SubID *big.Int + NumWords uint16 + CostJuels *big.Int + NewSubBalance *big.Int + Raw types.Log +} + +func (_VRFBeacon *VRFBeaconFilterer) FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFBeaconRandomnessRequestedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFBeacon.contract.FilterLogs(opts, "RandomnessRequested", requestIDRule) + if err != nil { + return nil, err + } + return &VRFBeaconRandomnessRequestedIterator{contract: _VRFBeacon.contract, event: "RandomnessRequested", logs: logs, sub: sub}, nil +} + +func (_VRFBeacon *VRFBeaconFilterer) WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessRequested, requestID []*big.Int) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFBeacon.contract.WatchLogs(opts, "RandomnessRequested", requestIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFBeaconRandomnessRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFBeacon *VRFBeaconFilterer) ParseRandomnessRequested(log types.Log) (*VRFBeaconRandomnessRequested, error) { + event := new(VRFBeaconRandomnessRequested) + if err := _VRFBeacon.contract.UnpackLog(event, "RandomnessRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetBilling struct { + MaximumGasPrice uint64 + ReasonableGasPrice uint64 + ObservationPayment uint64 + TransmissionPayment uint64 + AccountingGas *big.Int +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestConfigDigestAndEpoch struct { + ScanLogs bool + ConfigDigest [32]byte + Epoch uint32 +} + +func (_VRFBeacon *VRFBeacon) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFBeacon.abi.Events["BillingAccessControllerSet"].ID: + return _VRFBeacon.ParseBillingAccessControllerSet(log) + case _VRFBeacon.abi.Events["BillingSet"].ID: + return _VRFBeacon.ParseBillingSet(log) + case _VRFBeacon.abi.Events["ConfigSet"].ID: + return _VRFBeacon.ParseConfigSet(log) + case _VRFBeacon.abi.Events["NewTransmission"].ID: + return _VRFBeacon.ParseNewTransmission(log) + case _VRFBeacon.abi.Events["OraclePaid"].ID: + return _VRFBeacon.ParseOraclePaid(log) + case _VRFBeacon.abi.Events["OutputsServed"].ID: + return _VRFBeacon.ParseOutputsServed(log) + case _VRFBeacon.abi.Events["OwnershipTransferRequested"].ID: + return _VRFBeacon.ParseOwnershipTransferRequested(log) + case _VRFBeacon.abi.Events["OwnershipTransferred"].ID: + return _VRFBeacon.ParseOwnershipTransferred(log) + case _VRFBeacon.abi.Events["PayeeshipTransferRequested"].ID: + return _VRFBeacon.ParsePayeeshipTransferRequested(log) + case _VRFBeacon.abi.Events["PayeeshipTransferred"].ID: + return _VRFBeacon.ParsePayeeshipTransferred(log) + case _VRFBeacon.abi.Events["RandomWordsFulfilled"].ID: + return _VRFBeacon.ParseRandomWordsFulfilled(log) + case _VRFBeacon.abi.Events["RandomnessFulfillmentRequested"].ID: + return _VRFBeacon.ParseRandomnessFulfillmentRequested(log) + case _VRFBeacon.abi.Events["RandomnessRedeemed"].ID: + return _VRFBeacon.ParseRandomnessRedeemed(log) + case _VRFBeacon.abi.Events["RandomnessRequested"].ID: + return _VRFBeacon.ParseRandomnessRequested(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFBeaconBillingAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d48912") +} + +func (VRFBeaconBillingSet) Topic() common.Hash { + return common.HexToHash("0x49275ddcdfc9c0519b3d094308c8bf675f06070a754ce90c152163cb6e66e8a0") +} + +func (VRFBeaconConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (VRFBeaconNewTransmission) Topic() common.Hash { + return common.HexToHash("0xfc3c7a7927e878a0fca37c904953c3c75cee3ca1d1640184a0ab1c65eec62743") +} + +func (VRFBeaconOraclePaid) Topic() common.Hash { + return common.HexToHash("0xd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c") +} + +func (VRFBeaconOutputsServed) Topic() common.Hash { + return common.HexToHash("0xf10ea936d00579b4c52035ee33bf46929646b3aa87554c565d8fb2c7aa549c44") +} + +func (VRFBeaconOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFBeaconOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFBeaconPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (VRFBeaconPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (VRFBeaconRandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x8f79f730779e875ce76c428039cc2052b5b5918c2a55c598fab251c1198aec54") +} + +func (VRFBeaconRandomnessFulfillmentRequested) Topic() common.Hash { + return common.HexToHash("0x01872fb9c7d6d68af06a17347935e04412da302a377224c205e672c26e18c37f") +} + +func (VRFBeaconRandomnessRedeemed) Topic() common.Hash { + return common.HexToHash("0x16f3f633197fafab10a5df69e6f3f2f7f20092f08d8d47de0a91c0f4b96a1a25") +} + +func (VRFBeaconRandomnessRequested) Topic() common.Hash { + return common.HexToHash("0xb7933fba96b6b452eb44f99fdc08052a45dff82363d59abaff0456931c3d2459") +} + +func (_VRFBeacon *VRFBeacon) Address() common.Address { + return _VRFBeacon.address +} + +type VRFBeaconInterface interface { + NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) + + GetBilling(opts *bind.CallOpts) (GetBilling, + + error) + + GetBillingAccessController(opts *bind.CallOpts) (common.Address, error) + + ICoordinator(opts *bind.CallOpts) (common.Address, error) + + ILink(opts *bind.CallOpts) (common.Address, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestConfigDigestAndEpoch(opts *bind.CallOpts) (LatestConfigDigestAndEpoch, + + error) + + LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) + + OwedPayment(opts *bind.CallOpts, transmitterAddress common.Address) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SKeyID(opts *bind.CallOpts) ([32]byte, error) + + SKeyProvider(opts *bind.CallOpts) (common.Address, error) + + SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + ExposeType(opts *bind.TransactOpts, arg0 VRFBeaconReportReport) (*types.Transaction, error) + + KeyGenerated(opts *bind.TransactOpts, kd KeyDataStructKeyData) (*types.Transaction, error) + + NewKeyRequested(opts *bind.TransactOpts) (*types.Transaction, error) + + SetBilling(opts *bind.TransactOpts, maximumGasPrice uint64, reasonableGasPrice uint64, observationPayment uint64, transmissionPayment uint64, accountingGas *big.Int) (*types.Transaction, error) + + SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, transmitters []common.Address, payees []common.Address) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) + + FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*VRFBeaconBillingAccessControllerSetIterator, error) + + WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconBillingAccessControllerSet) (event.Subscription, error) + + ParseBillingAccessControllerSet(log types.Log) (*VRFBeaconBillingAccessControllerSet, error) + + FilterBillingSet(opts *bind.FilterOpts) (*VRFBeaconBillingSetIterator, error) + + WatchBillingSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconBillingSet) (event.Subscription, error) + + ParseBillingSet(log types.Log) (*VRFBeaconBillingSet, error) + + FilterConfigSet(opts *bind.FilterOpts) (*VRFBeaconConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *VRFBeaconConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*VRFBeaconConfigSet, error) + + FilterNewTransmission(opts *bind.FilterOpts, epochAndRound []*big.Int) (*VRFBeaconNewTransmissionIterator, error) + + WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *VRFBeaconNewTransmission, epochAndRound []*big.Int) (event.Subscription, error) + + ParseNewTransmission(log types.Log) (*VRFBeaconNewTransmission, error) + + FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*VRFBeaconOraclePaidIterator, error) + + WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *VRFBeaconOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) + + ParseOraclePaid(log types.Log) (*VRFBeaconOraclePaid, error) + + FilterOutputsServed(opts *bind.FilterOpts) (*VRFBeaconOutputsServedIterator, error) + + WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *VRFBeaconOutputsServed) (event.Subscription, error) + + ParseOutputsServed(log types.Log) (*VRFBeaconOutputsServed, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFBeaconOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFBeaconOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFBeaconOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFBeaconOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFBeaconOwnershipTransferred, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*VRFBeaconPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*VRFBeaconPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*VRFBeaconPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *VRFBeaconPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*VRFBeaconPayeeshipTransferred, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*VRFBeaconRandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomWordsFulfilled) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFBeaconRandomWordsFulfilled, error) + + FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFBeaconRandomnessFulfillmentRequestedIterator, error) + + WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) + + ParseRandomnessFulfillmentRequested(log types.Log) (*VRFBeaconRandomnessFulfillmentRequested, error) + + FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*VRFBeaconRandomnessRedeemedIterator, error) + + WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) + + ParseRandomnessRedeemed(log types.Log) (*VRFBeaconRandomnessRedeemed, error) + + FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFBeaconRandomnessRequestedIterator, error) + + WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *VRFBeaconRandomnessRequested, requestID []*big.Int) (event.Subscription, error) + + ParseRandomnessRequested(log types.Log) (*VRFBeaconRandomnessRequested, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer/vrf_beacon_consumer.go b/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer/vrf_beacon_consumer.go new file mode 100644 index 00000000..2cd54eb5 --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer/vrf_beacon_consumer.go @@ -0,0 +1,1034 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_beacon_consumer + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var BeaconVRFConsumerMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"shouldFail\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"beaconPeriodBlocks\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"MustBeCoordinator\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MustBeOwnerOrCoordinator\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"CoordinatorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"fail\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_beaconPeriodBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"uint256[]\",\"name\":\"randomWords\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"}],\"name\":\"rawFulfillRandomWords\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_ReceivedRandomnessByRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_arguments\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_gasAvailable\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_mostRecentRequestID\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_myBeaconRequests\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"slotNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_randomWords\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"\",\"type\":\"uint24\"}],\"name\":\"s_requestsIDs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_subId\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinator\",\"type\":\"address\"}],\"name\":\"setCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"shouldFail\",\"type\":\"bool\"}],\"name\":\"setFail\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"reqId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"height\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"delay\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"}],\"name\":\"storeBeaconRequest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"}],\"name\":\"testRedeemRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelayArg\",\"type\":\"uint24\"}],\"name\":\"testRequestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"}],\"name\":\"testRequestRandomnessFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b50604051620018db380380620018db8339810160408190526200003491620001aa565b8233806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620000ff565b5050600280546001600160a01b0319166001600160a01b03939093169290921790915550600b805460ff191692151592909217909155600c555062000201565b336001600160a01b03821603620001595760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080600060608486031215620001c057600080fd5b83516001600160a01b0381168114620001d857600080fd5b60208501519093508015158114620001ef57600080fd5b80925050604084015190509250925092565b6116ca80620002116000396000f3fe608060405234801561001057600080fd5b506004361061016c5760003560e01c8063a9cc4718116100cd578063ea7502ab11610081578063f2fde38b11610066578063f2fde38b1461031f578063f6eaffc814610332578063ffe97ca41461034557600080fd5b8063ea7502ab14610303578063f08c5daa1461031657600080fd5b8063cd0593df116100b2578063cd0593df146102d4578063d0705f04146102dd578063d21ea8fd146102f057600080fd5b8063a9cc4718146102a4578063c6d61301146102c157600080fd5b80637716cdaa116101245780638da5cb5b116101095780638da5cb5b1461022a5780638ea98117146102525780639d7694021461026557600080fd5b80637716cdaa1461020d57806379ba50971461022257600080fd5b8063689b77ab11610155578063689b77ab146101c45780636df57cc3146101cd578063706da1ca146101e057600080fd5b8063341867a2146101715780635f15cccc14610186575b600080fd5b61018461017f366004610e87565b6103f8565b005b6101b1610194366004610ec1565b600460209081526000928352604080842090915290825290205481565b6040519081526020015b60405180910390f35b6101b160085481565b6101846101db366004610eff565b6104ed565b6009546101f49067ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016101bb565b610215610628565b6040516101bb9190610fa9565b6101846106b6565b60005460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101bb565b610184610260366004610fc3565b6107b8565b610184610273366004610ff9565b600b80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016911515919091179055565b600b546102b19060ff1681565b60405190151581526020016101bb565b6101b16102cf36600461101b565b61089e565b6101b1600c5481565b6101b16102eb366004610e87565b6109a8565b6101846102fe366004611187565b6109d9565b6101b1610311366004611250565b610a3a565b6101b1600a5481565b61018461032d366004610fc3565b610b4a565b6101b16103403660046112d4565b610b5e565b6103ae6103533660046112d4565b60056020526000908152604090205463ffffffff811690640100000000810462ffffff1690670100000000000000810461ffff16906901000000000000000000900473ffffffffffffffffffffffffffffffffffffffff1684565b6040805163ffffffff909516855262ffffff909316602085015261ffff9091169183019190915273ffffffffffffffffffffffffffffffffffffffff1660608201526080016101bb565b60025460408051602081018252600080825291517facfc6cdd000000000000000000000000000000000000000000000000000000008152919273ffffffffffffffffffffffffffffffffffffffff169163acfc6cdd9161045e91879187916004016112ed565b6000604051808303816000875af115801561047d573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526104c39190810190611315565b600083815260066020908152604090912082519293506104e7929091840190610e27565b50505050565b600083815260046020908152604080832062ffffff861684529091528120859055600c5461051b9085611404565b6040805160808101825263ffffffff928316815262ffffff958616602080830191825261ffff968716838501908152306060850190815260009b8c526005909252939099209151825491519351995173ffffffffffffffffffffffffffffffffffffffff166901000000000000000000027fffffff0000000000000000000000000000000000000000ffffffffffffffffff9a90971667010000000000000002999099167fffffff00000000000000000000000000000000000000000000ffffffffffffff93909716640100000000027fffffffffffffffffffffffffffffffffffffffffffffffffff000000000000009091169890931697909717919091171692909217179092555050565b6007805461063590611418565b80601f016020809104026020016040519081016040528092919081815260200182805461066190611418565b80156106ae5780601f10610683576101008083540402835291602001916106ae565b820191906000526020600020905b81548152906001019060200180831161069157829003601f168201915b505050505081565b60015473ffffffffffffffffffffffffffffffffffffffff16331461073c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b60005473ffffffffffffffffffffffffffffffffffffffff1633148015906107f8575060025473ffffffffffffffffffffffffffffffffffffffff163314155b1561082f576040517fd4e06fd700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83169081179091556040517fc258faa9a17ddfdf4130b4acff63a289202e7d5f9e42f366add65368575486bc90600090a250565b600080600c546108ac610b7f565b6108b6919061146b565b9050600081600c546108c6610b7f565b6108d0919061147f565b6108da9190611498565b60025460408051602081018252600080825291517f4ffac83a000000000000000000000000000000000000000000000000000000008152939450909273ffffffffffffffffffffffffffffffffffffffff90921691634ffac83a91610948918a918c918b91906004016114ab565b6020604051808303816000875af1158015610967573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061098b91906114e3565b90506109998183878a6104ed565b60088190559695505050505050565b600660205281600052604060002081815481106109c457600080fd5b90600052602060002001600091509150505481565b60025473ffffffffffffffffffffffffffffffffffffffff163314610a2a576040517f66bf9c7200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610a35838383610c16565b505050565b600080600c54610a48610b7f565b610a52919061146b565b9050600081600c54610a62610b7f565b610a6c919061147f565b610a769190611498565b60025460408051602081018252600080825291517fdb972c8b000000000000000000000000000000000000000000000000000000008152939450909273ffffffffffffffffffffffffffffffffffffffff9092169163db972c8b91610ae8918d918d918d918d918d91906004016114fc565b6020604051808303816000875af1158015610b07573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610b2b91906114e3565b9050610b398183898b6104ed565b600881905598975050505050505050565b610b52610caf565b610b5b81610d32565b50565b60038181548110610b6e57600080fd5b600091825260209091200154905081565b60004661a4b1811480610b94575062066eed81145b15610c0f57606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610be5573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c0991906114e3565b91505090565b4391505090565b600b5460ff1615610c83576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f206661696c656420696e2066756c66696c6c52616e646f6d576f7264730000006044820152606401610733565b60008381526006602090815260409091208351610ca292850190610e27565b5060076104e782826115a3565b60005473ffffffffffffffffffffffffffffffffffffffff163314610d30576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610733565b565b3373ffffffffffffffffffffffffffffffffffffffff821603610db1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610733565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b828054828255906000526020600020908101928215610e62579160200282015b82811115610e62578251825591602001919060010190610e47565b50610e6e929150610e72565b5090565b5b80821115610e6e5760008155600101610e73565b60008060408385031215610e9a57600080fd5b50508035926020909101359150565b803562ffffff81168114610ebc57600080fd5b919050565b60008060408385031215610ed457600080fd5b82359150610ee460208401610ea9565b90509250929050565b803561ffff81168114610ebc57600080fd5b60008060008060808587031215610f1557600080fd5b8435935060208501359250610f2c60408601610ea9565b9150610f3a60608601610eed565b905092959194509250565b6000815180845260005b81811015610f6b57602081850181015186830182015201610f4f565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000610fbc6020830184610f45565b9392505050565b600060208284031215610fd557600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610fbc57600080fd5b60006020828403121561100b57600080fd5b81358015158114610fbc57600080fd5b60008060006060848603121561103057600080fd5b61103984610eed565b92506020840135915061104e60408501610ea9565b90509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156110cd576110cd611057565b604052919050565b600067ffffffffffffffff8211156110ef576110ef611057565b5060051b60200190565b600082601f83011261110a57600080fd5b813567ffffffffffffffff81111561112457611124611057565b61115560207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611086565b81815284602083860101111561116a57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060006060848603121561119c57600080fd5b8335925060208085013567ffffffffffffffff808211156111bc57600080fd5b818701915087601f8301126111d057600080fd5b81356111e36111de826110d5565b611086565b81815260059190911b8301840190848101908a83111561120257600080fd5b938501935b8285101561122057843582529385019390850190611207565b96505050604087013592508083111561123857600080fd5b5050611246868287016110f9565b9150509250925092565b600080600080600060a0868803121561126857600080fd5b8535945061127860208701610eed565b935061128660408701610ea9565b9250606086013563ffffffff8116811461129f57600080fd5b9150608086013567ffffffffffffffff8111156112bb57600080fd5b6112c7888289016110f9565b9150509295509295909350565b6000602082840312156112e657600080fd5b5035919050565b83815282602082015260606040820152600061130c6060830184610f45565b95945050505050565b6000602080838503121561132857600080fd5b825167ffffffffffffffff81111561133f57600080fd5b8301601f8101851361135057600080fd5b805161135e6111de826110d5565b81815260059190911b8201830190838101908783111561137d57600080fd5b928401925b8284101561139b57835182529284019290840190611382565b979650505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600082611413576114136113a6565b500490565b600181811c9082168061142c57607f821691505b602082108103611465577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b60008261147a5761147a6113a6565b500690565b80820180821115611492576114926113d5565b92915050565b81810381811115611492576114926113d5565b84815261ffff8416602082015262ffffff831660408201526080606082015260006114d96080830184610f45565b9695505050505050565b6000602082840312156114f557600080fd5b5051919050565b86815261ffff8616602082015262ffffff8516604082015263ffffffff8416606082015260c06080820152600061153660c0830185610f45565b82810360a08401526115488185610f45565b9998505050505050505050565b601f821115610a3557600081815260208120601f850160051c8101602086101561157c5750805b601f850160051c820191505b8181101561159b57828155600101611588565b505050505050565b815167ffffffffffffffff8111156115bd576115bd611057565b6115d1816115cb8454611418565b84611555565b602080601f83116001811461162457600084156115ee5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b17855561159b565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b8281101561167157888601518255948401946001909101908401611652565b50858210156116ad57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b0190555056fea164736f6c6343000813000a", +} + +var BeaconVRFConsumerABI = BeaconVRFConsumerMetaData.ABI + +var BeaconVRFConsumerBin = BeaconVRFConsumerMetaData.Bin + +func DeployBeaconVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, coordinator common.Address, shouldFail bool, beaconPeriodBlocks *big.Int) (common.Address, *types.Transaction, *BeaconVRFConsumer, error) { + parsed, err := BeaconVRFConsumerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BeaconVRFConsumerBin), backend, coordinator, shouldFail, beaconPeriodBlocks) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BeaconVRFConsumer{BeaconVRFConsumerCaller: BeaconVRFConsumerCaller{contract: contract}, BeaconVRFConsumerTransactor: BeaconVRFConsumerTransactor{contract: contract}, BeaconVRFConsumerFilterer: BeaconVRFConsumerFilterer{contract: contract}}, nil +} + +type BeaconVRFConsumer struct { + address common.Address + abi abi.ABI + BeaconVRFConsumerCaller + BeaconVRFConsumerTransactor + BeaconVRFConsumerFilterer +} + +type BeaconVRFConsumerCaller struct { + contract *bind.BoundContract +} + +type BeaconVRFConsumerTransactor struct { + contract *bind.BoundContract +} + +type BeaconVRFConsumerFilterer struct { + contract *bind.BoundContract +} + +type BeaconVRFConsumerSession struct { + Contract *BeaconVRFConsumer + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BeaconVRFConsumerCallerSession struct { + Contract *BeaconVRFConsumerCaller + CallOpts bind.CallOpts +} + +type BeaconVRFConsumerTransactorSession struct { + Contract *BeaconVRFConsumerTransactor + TransactOpts bind.TransactOpts +} + +type BeaconVRFConsumerRaw struct { + Contract *BeaconVRFConsumer +} + +type BeaconVRFConsumerCallerRaw struct { + Contract *BeaconVRFConsumerCaller +} + +type BeaconVRFConsumerTransactorRaw struct { + Contract *BeaconVRFConsumerTransactor +} + +func NewBeaconVRFConsumer(address common.Address, backend bind.ContractBackend) (*BeaconVRFConsumer, error) { + abi, err := abi.JSON(strings.NewReader(BeaconVRFConsumerABI)) + if err != nil { + return nil, err + } + contract, err := bindBeaconVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BeaconVRFConsumer{address: address, abi: abi, BeaconVRFConsumerCaller: BeaconVRFConsumerCaller{contract: contract}, BeaconVRFConsumerTransactor: BeaconVRFConsumerTransactor{contract: contract}, BeaconVRFConsumerFilterer: BeaconVRFConsumerFilterer{contract: contract}}, nil +} + +func NewBeaconVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*BeaconVRFConsumerCaller, error) { + contract, err := bindBeaconVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerCaller{contract: contract}, nil +} + +func NewBeaconVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*BeaconVRFConsumerTransactor, error) { + contract, err := bindBeaconVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerTransactor{contract: contract}, nil +} + +func NewBeaconVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*BeaconVRFConsumerFilterer, error) { + contract, err := bindBeaconVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerFilterer{contract: contract}, nil +} + +func bindBeaconVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BeaconVRFConsumerMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BeaconVRFConsumer.Contract.BeaconVRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.BeaconVRFConsumerTransactor.contract.Transfer(opts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.BeaconVRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BeaconVRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.contract.Transfer(opts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) Fail(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "fail") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) Fail() (bool, error) { + return _BeaconVRFConsumer.Contract.Fail(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) Fail() (bool, error) { + return _BeaconVRFConsumer.Contract.Fail(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "i_beaconPeriodBlocks") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.IBeaconPeriodBlocks(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.IBeaconPeriodBlocks(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) Owner() (common.Address, error) { + return _BeaconVRFConsumer.Contract.Owner(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) Owner() (common.Address, error) { + return _BeaconVRFConsumer.Contract.Owner(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SReceivedRandomnessByRequestID(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_ReceivedRandomnessByRequestID", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SReceivedRandomnessByRequestID(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SReceivedRandomnessByRequestID(&_BeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SReceivedRandomnessByRequestID(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SReceivedRandomnessByRequestID(&_BeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SArguments(opts *bind.CallOpts) ([]byte, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_arguments") + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SArguments() ([]byte, error) { + return _BeaconVRFConsumer.Contract.SArguments(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SArguments() ([]byte, error) { + return _BeaconVRFConsumer.Contract.SArguments(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_gasAvailable") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SGasAvailable() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SGasAvailable(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SGasAvailable() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SGasAvailable(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SMostRecentRequestID(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_mostRecentRequestID") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SMostRecentRequestID() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SMostRecentRequestID(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SMostRecentRequestID() (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SMostRecentRequestID(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SMyBeaconRequests(opts *bind.CallOpts, arg0 *big.Int) (SMyBeaconRequests, + + error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_myBeaconRequests", arg0) + + outstruct := new(SMyBeaconRequests) + if err != nil { + return *outstruct, err + } + + outstruct.SlotNumber = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.ConfirmationDelay = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.NumWords = *abi.ConvertType(out[2], new(uint16)).(*uint16) + outstruct.Requester = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SMyBeaconRequests(arg0 *big.Int) (SMyBeaconRequests, + + error) { + return _BeaconVRFConsumer.Contract.SMyBeaconRequests(&_BeaconVRFConsumer.CallOpts, arg0) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SMyBeaconRequests(arg0 *big.Int) (SMyBeaconRequests, + + error) { + return _BeaconVRFConsumer.Contract.SMyBeaconRequests(&_BeaconVRFConsumer.CallOpts, arg0) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_randomWords", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SRandomWords(&_BeaconVRFConsumer.CallOpts, arg0) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SRandomWords(arg0 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SRandomWords(&_BeaconVRFConsumer.CallOpts, arg0) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SRequestsIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_requestsIDs", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SRequestsIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SRequestsIDs(&_BeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SRequestsIDs(arg0 *big.Int, arg1 *big.Int) (*big.Int, error) { + return _BeaconVRFConsumer.Contract.SRequestsIDs(&_BeaconVRFConsumer.CallOpts, arg0, arg1) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCaller) SSubId(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _BeaconVRFConsumer.contract.Call(opts, &out, "s_subId") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SSubId() (uint64, error) { + return _BeaconVRFConsumer.Contract.SSubId(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerCallerSession) SSubId() (uint64, error) { + return _BeaconVRFConsumer.Contract.SSubId(&_BeaconVRFConsumer.CallOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "acceptOwnership") +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) AcceptOwnership() (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.AcceptOwnership(&_BeaconVRFConsumer.TransactOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.AcceptOwnership(&_BeaconVRFConsumer.TransactOpts) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) RawFulfillRandomWords(opts *bind.TransactOpts, requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "rawFulfillRandomWords", requestID, randomWords, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) RawFulfillRandomWords(requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.RawFulfillRandomWords(&_BeaconVRFConsumer.TransactOpts, requestID, randomWords, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) RawFulfillRandomWords(requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.RawFulfillRandomWords(&_BeaconVRFConsumer.TransactOpts, requestID, randomWords, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "setCoordinator", coordinator) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.SetCoordinator(&_BeaconVRFConsumer.TransactOpts, coordinator) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) SetCoordinator(coordinator common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.SetCoordinator(&_BeaconVRFConsumer.TransactOpts, coordinator) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) SetFail(opts *bind.TransactOpts, shouldFail bool) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "setFail", shouldFail) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) SetFail(shouldFail bool) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.SetFail(&_BeaconVRFConsumer.TransactOpts, shouldFail) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) SetFail(shouldFail bool) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.SetFail(&_BeaconVRFConsumer.TransactOpts, shouldFail) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) StoreBeaconRequest(opts *bind.TransactOpts, reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "storeBeaconRequest", reqId, height, delay, numWords) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) StoreBeaconRequest(reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.StoreBeaconRequest(&_BeaconVRFConsumer.TransactOpts, reqId, height, delay, numWords) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) StoreBeaconRequest(reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.StoreBeaconRequest(&_BeaconVRFConsumer.TransactOpts, reqId, height, delay, numWords) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) TestRedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "testRedeemRandomness", subID, requestID) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) TestRedeemRandomness(subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRedeemRandomness(&_BeaconVRFConsumer.TransactOpts, subID, requestID) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) TestRedeemRandomness(subID *big.Int, requestID *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRedeemRandomness(&_BeaconVRFConsumer.TransactOpts, subID, requestID) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) TestRequestRandomness(opts *bind.TransactOpts, numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "testRequestRandomness", numWords, subID, confirmationDelayArg) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) TestRequestRandomness(numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRequestRandomness(&_BeaconVRFConsumer.TransactOpts, numWords, subID, confirmationDelayArg) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) TestRequestRandomness(numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRequestRandomness(&_BeaconVRFConsumer.TransactOpts, numWords, subID, confirmationDelayArg) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) TestRequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "testRequestRandomnessFulfillment", subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) TestRequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRequestRandomnessFulfillment(&_BeaconVRFConsumer.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) TestRequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TestRequestRandomnessFulfillment(&_BeaconVRFConsumer.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.contract.Transact(opts, "transferOwnership", to) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TransferOwnership(&_BeaconVRFConsumer.TransactOpts, to) +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _BeaconVRFConsumer.Contract.TransferOwnership(&_BeaconVRFConsumer.TransactOpts, to) +} + +type BeaconVRFConsumerCoordinatorUpdatedIterator struct { + Event *BeaconVRFConsumerCoordinatorUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BeaconVRFConsumerCoordinatorUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerCoordinatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerCoordinatorUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BeaconVRFConsumerCoordinatorUpdatedIterator) Error() error { + return it.fail +} + +func (it *BeaconVRFConsumerCoordinatorUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BeaconVRFConsumerCoordinatorUpdated struct { + Coordinator common.Address + Raw types.Log +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) FilterCoordinatorUpdated(opts *bind.FilterOpts, coordinator []common.Address) (*BeaconVRFConsumerCoordinatorUpdatedIterator, error) { + + var coordinatorRule []interface{} + for _, coordinatorItem := range coordinator { + coordinatorRule = append(coordinatorRule, coordinatorItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.FilterLogs(opts, "CoordinatorUpdated", coordinatorRule) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerCoordinatorUpdatedIterator{contract: _BeaconVRFConsumer.contract, event: "CoordinatorUpdated", logs: logs, sub: sub}, nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) WatchCoordinatorUpdated(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerCoordinatorUpdated, coordinator []common.Address) (event.Subscription, error) { + + var coordinatorRule []interface{} + for _, coordinatorItem := range coordinator { + coordinatorRule = append(coordinatorRule, coordinatorItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.WatchLogs(opts, "CoordinatorUpdated", coordinatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BeaconVRFConsumerCoordinatorUpdated) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "CoordinatorUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) ParseCoordinatorUpdated(log types.Log) (*BeaconVRFConsumerCoordinatorUpdated, error) { + event := new(BeaconVRFConsumerCoordinatorUpdated) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "CoordinatorUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BeaconVRFConsumerOwnershipTransferRequestedIterator struct { + Event *BeaconVRFConsumerOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BeaconVRFConsumerOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BeaconVRFConsumerOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *BeaconVRFConsumerOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BeaconVRFConsumerOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BeaconVRFConsumerOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerOwnershipTransferRequestedIterator{contract: _BeaconVRFConsumer.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BeaconVRFConsumerOwnershipTransferRequested) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) ParseOwnershipTransferRequested(log types.Log) (*BeaconVRFConsumerOwnershipTransferRequested, error) { + event := new(BeaconVRFConsumerOwnershipTransferRequested) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BeaconVRFConsumerOwnershipTransferredIterator struct { + Event *BeaconVRFConsumerOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BeaconVRFConsumerOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BeaconVRFConsumerOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BeaconVRFConsumerOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *BeaconVRFConsumerOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BeaconVRFConsumerOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BeaconVRFConsumerOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &BeaconVRFConsumerOwnershipTransferredIterator{contract: _BeaconVRFConsumer.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BeaconVRFConsumer.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BeaconVRFConsumerOwnershipTransferred) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BeaconVRFConsumer *BeaconVRFConsumerFilterer) ParseOwnershipTransferred(log types.Log) (*BeaconVRFConsumerOwnershipTransferred, error) { + event := new(BeaconVRFConsumerOwnershipTransferred) + if err := _BeaconVRFConsumer.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type SMyBeaconRequests struct { + SlotNumber uint32 + ConfirmationDelay *big.Int + NumWords uint16 + Requester common.Address +} + +func (_BeaconVRFConsumer *BeaconVRFConsumer) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _BeaconVRFConsumer.abi.Events["CoordinatorUpdated"].ID: + return _BeaconVRFConsumer.ParseCoordinatorUpdated(log) + case _BeaconVRFConsumer.abi.Events["OwnershipTransferRequested"].ID: + return _BeaconVRFConsumer.ParseOwnershipTransferRequested(log) + case _BeaconVRFConsumer.abi.Events["OwnershipTransferred"].ID: + return _BeaconVRFConsumer.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (BeaconVRFConsumerCoordinatorUpdated) Topic() common.Hash { + return common.HexToHash("0xc258faa9a17ddfdf4130b4acff63a289202e7d5f9e42f366add65368575486bc") +} + +func (BeaconVRFConsumerOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (BeaconVRFConsumerOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_BeaconVRFConsumer *BeaconVRFConsumer) Address() common.Address { + return _BeaconVRFConsumer.address +} + +type BeaconVRFConsumerInterface interface { + Fail(opts *bind.CallOpts) (bool, error) + + IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SReceivedRandomnessByRequestID(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SArguments(opts *bind.CallOpts) ([]byte, error) + + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + + SMostRecentRequestID(opts *bind.CallOpts) (*big.Int, error) + + SMyBeaconRequests(opts *bind.CallOpts, arg0 *big.Int) (SMyBeaconRequests, + + error) + + SRandomWords(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) + + SRequestsIDs(opts *bind.CallOpts, arg0 *big.Int, arg1 *big.Int) (*big.Int, error) + + SSubId(opts *bind.CallOpts) (uint64, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + RawFulfillRandomWords(opts *bind.TransactOpts, requestID *big.Int, randomWords []*big.Int, arguments []byte) (*types.Transaction, error) + + SetCoordinator(opts *bind.TransactOpts, coordinator common.Address) (*types.Transaction, error) + + SetFail(opts *bind.TransactOpts, shouldFail bool) (*types.Transaction, error) + + StoreBeaconRequest(opts *bind.TransactOpts, reqId *big.Int, height *big.Int, delay *big.Int, numWords uint16) (*types.Transaction, error) + + TestRedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int) (*types.Transaction, error) + + TestRequestRandomness(opts *bind.TransactOpts, numWords uint16, subID *big.Int, confirmationDelayArg *big.Int) (*types.Transaction, error) + + TestRequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterCoordinatorUpdated(opts *bind.FilterOpts, coordinator []common.Address) (*BeaconVRFConsumerCoordinatorUpdatedIterator, error) + + WatchCoordinatorUpdated(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerCoordinatorUpdated, coordinator []common.Address) (event.Subscription, error) + + ParseCoordinatorUpdated(log types.Log) (*BeaconVRFConsumerCoordinatorUpdated, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BeaconVRFConsumerOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*BeaconVRFConsumerOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BeaconVRFConsumerOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *BeaconVRFConsumerOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*BeaconVRFConsumerOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/ocr2vrf/generated/vrf_coordinator/vrf_coordinator.go b/core/gethwrappers/ocr2vrf/generated/vrf_coordinator/vrf_coordinator.go new file mode 100644 index 00000000..bd78d6ba --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generated/vrf_coordinator/vrf_coordinator.go @@ -0,0 +1,3870 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package vrf_coordinator + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type ECCArithmeticG1Point struct { + P [2]*big.Int +} + +type VRFBeaconTypesCallback struct { + RequestID *big.Int + NumWords uint16 + Requester common.Address + Arguments []byte + GasAllowance *big.Int + SubID *big.Int + GasPrice *big.Int + WeiPerUnitLink *big.Int +} + +type VRFBeaconTypesCoordinatorConfig struct { + UseReasonableGasPrice bool + ReentrancyLock bool + Paused bool + PremiumPercentage uint8 + UnusedGasPenaltyPercent uint8 + StalenessSeconds uint32 + RedeemableRequestGasOverhead uint32 + CallbackRequestGasOverhead uint32 + ReasonableGasPriceStalenessBlocks uint32 + FallbackWeiPerUnitLink *big.Int +} + +type VRFBeaconTypesCostedCallback struct { + Callback VRFBeaconTypesCallback + Price *big.Int +} + +type VRFBeaconTypesOutputServed struct { + Height uint64 + ConfirmationDelay *big.Int + ProofG1X *big.Int + ProofG1Y *big.Int +} + +type VRFBeaconTypesVRFOutput struct { + BlockHeight uint64 + ConfirmationDelay *big.Int + VrfOutput ECCArithmeticG1Point + Callbacks []VRFBeaconTypesCostedCallback + ShouldStore bool +} + +type VRFCoordinatorCallbackConfig struct { + MaxCallbackGasLimit uint32 + MaxCallbackArgumentsLength uint32 +} + +var VRFCoordinatorMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"beaconPeriodBlocksArg\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"linkToken\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"BeaconPeriodMustBePositive\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestHeight\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"earliestAllowed\",\"type\":\"uint256\"}],\"name\":\"BlockTooRecent\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16[10]\",\"name\":\"confirmationDelays\",\"type\":\"uint16[10]\"},{\"internalType\":\"uint8\",\"name\":\"violatingIndex\",\"type\":\"uint8\"}],\"name\":\"ConfirmationDelaysNotIncreasing\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ContractPaused\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorAlreadyRegistered\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorNotRegistered\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"gasAllowance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasLeft\",\"type\":\"uint256\"}],\"name\":\"GasAllowanceExceedsGasLeft\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"reportHeight\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"separatorHeight\",\"type\":\"uint64\"}],\"name\":\"HistoryDomainSeparatorTooOld\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"actualBalance\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requiredBalance\",\"type\":\"uint256\"}],\"name\":\"InsufficientBalance\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"expectedLength\",\"type\":\"uint16\"},{\"internalType\":\"uint256\",\"name\":\"actualLength\",\"type\":\"uint256\"}],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"InvalidConsumer\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCoordinatorConfig\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidJuelsConversion\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numRecipients\",\"type\":\"uint256\"}],\"name\":\"InvalidNumberOfRecipients\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestedSubID\",\"type\":\"uint256\"}],\"name\":\"InvalidSubscription\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"requestedVersion\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"coordinatorVersion\",\"type\":\"uint8\"}],\"name\":\"MigrationVersionMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MustBeProducer\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"proposedOwner\",\"type\":\"address\"}],\"name\":\"MustBeRequestedOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"MustBeSubOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"NativePaymentGiven\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NoWordsRequested\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint16[10]\",\"name\":\"confDelays\",\"type\":\"uint16[10]\"}],\"name\":\"NonZeroDelayAfterZeroDelay\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnMigrationNotSupported\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PendingRequestExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"producer\",\"type\":\"address\"}],\"name\":\"ProducerAlreadyInitialized\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestHeight\",\"type\":\"uint256\"}],\"name\":\"RandomnessNotAvailable\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestHeight\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"confDelay\",\"type\":\"uint256\"}],\"name\":\"RandomnessSeedNotFoundForCallbacks\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"numRecipients\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"numPayments\",\"type\":\"uint256\"}],\"name\":\"RecipientsPaymentsMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"Reentrant\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"expected\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"actual\",\"type\":\"address\"}],\"name\":\"ResponseMustBeRetrievedByRequester\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyConsumers\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManyRequestsReplaceContract\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TooManySlotsReplaceContract\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requested\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"max\",\"type\":\"uint256\"}],\"name\":\"TooManyWords\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockHeight\",\"type\":\"uint256\"}],\"name\":\"UniverseHasEndedBangBangBang\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"maxCallbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCallbackArgumentsLength\",\"type\":\"uint32\"}],\"indexed\":false,\"internalType\":\"structVRFCoordinator.CallbackConfig\",\"name\":\"newConfig\",\"type\":\"tuple\"}],\"name\":\"CallbackConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"useReasonableGasPrice\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"reentrancyLock\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"premiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"unusedGasPenaltyPercent\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"redeemableRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"callbackRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPriceStalenessBlocks\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"uint96\"}],\"indexed\":false,\"internalType\":\"structVRFBeaconTypes.CoordinatorConfig\",\"name\":\"coordinatorConfig\",\"type\":\"tuple\"}],\"name\":\"CoordinatorConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorDeregistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"coordinatorAddress\",\"type\":\"address\"}],\"name\":\"CoordinatorRegistered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint8\",\"name\":\"newVersion\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newCoordinator\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"}],\"name\":\"MigrationCompleted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"recentBlockHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint192\",\"name\":\"juelsPerFeeCoin\",\"type\":\"uint192\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"height\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint256\",\"name\":\"proofG1X\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"proofG1Y\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structVRFBeaconTypes.OutputServed[]\",\"name\":\"outputsServed\",\"type\":\"tuple[]\"}],\"name\":\"OutputsServed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"}],\"name\":\"PauseFlagChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"requestIDs\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"successfulFulfillment\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes[]\",\"name\":\"truncatedErrorData\",\"type\":\"bytes[]\"},{\"indexed\":false,\"internalType\":\"uint96[]\",\"name\":\"subBalances\",\"type\":\"uint96[]\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"subIDs\",\"type\":\"uint256[]\"}],\"name\":\"RandomWordsFulfilled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"nextBeaconOutputHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"gasAllowance\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"weiPerUnitLink\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"costJuels\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newSubBalance\",\"type\":\"uint256\"}],\"name\":\"RandomnessFulfillmentRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"}],\"name\":\"RandomnessRedeemed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"nextBeaconOutputHeight\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"costJuels\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newSubBalance\",\"type\":\"uint256\"}],\"name\":\"RandomnessRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"SubscriptionCanceled\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"SubscriptionConsumerRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"SubscriptionCreated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldBalance\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newBalance\",\"type\":\"uint256\"}],\"name\":\"SubscriptionFunded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"SubscriptionOwnerTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MAX_CONSUMERS\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"MAX_NUM_WORDS\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"NUM_CONF_DELAYS\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"acceptSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"addConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"recipients\",\"type\":\"address[]\"},{\"internalType\":\"uint256[]\",\"name\":\"paymentsInJuels\",\"type\":\"uint256[]\"}],\"name\":\"batchTransferLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"cancelSubscription\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"createSubscription\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"deregisterMigratableCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requestId\",\"type\":\"uint256\"}],\"name\":\"getCallbackMemo\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConfirmationDelays\",\"outputs\":[{\"internalType\":\"uint24[8]\",\"name\":\"\",\"type\":\"uint24[8]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"getFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"getFulfillmentFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"}],\"name\":\"getSubscription\",\"outputs\":[{\"internalType\":\"uint96\",\"name\":\"balance\",\"type\":\"uint96\"},{\"internalType\":\"uint64\",\"name\":\"pendingFulfillments\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"consumers\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSubscriptionLinkBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_beaconPeriodBlocks\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_link\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIVRFMigration\",\"name\":\"newCoordinator\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"encodedRequest\",\"type\":\"bytes\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"migrationVersion\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onMigration\",\"outputs\":[],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"blockHeight\",\"type\":\"uint64\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"components\":[{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"}],\"internalType\":\"structECCArithmetic.G1Point\",\"name\":\"vrfOutput\",\"type\":\"tuple\"},{\"components\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"internalType\":\"uint96\",\"name\":\"gasAllowance\",\"type\":\"uint96\"},{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"gasPrice\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"weiPerUnitLink\",\"type\":\"uint256\"}],\"internalType\":\"structVRFBeaconTypes.Callback\",\"name\":\"callback\",\"type\":\"tuple\"},{\"internalType\":\"uint96\",\"name\":\"price\",\"type\":\"uint96\"}],\"internalType\":\"structVRFBeaconTypes.CostedCallback[]\",\"name\":\"callbacks\",\"type\":\"tuple[]\"},{\"internalType\":\"bool\",\"name\":\"shouldStore\",\"type\":\"bool\"}],\"internalType\":\"structVRFBeaconTypes.VRFOutput[]\",\"name\":\"vrfOutputs\",\"type\":\"tuple[]\"},{\"internalType\":\"uint192\",\"name\":\"juelsPerFeeCoin\",\"type\":\"uint192\"},{\"internalType\":\"uint64\",\"name\":\"reasonableGasPrice\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"blockHeight\",\"type\":\"uint64\"}],\"name\":\"processVRFOutputs\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"containsNewOutputs\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requestID\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"redeemRandomness\",\"outputs\":[{\"internalType\":\"uint256[]\",\"name\":\"randomness\",\"type\":\"uint256[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"}],\"name\":\"registerMigratableCoordinator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"consumer\",\"type\":\"address\"}],\"name\":\"removeConsumer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subID\",\"type\":\"uint256\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"uint24\",\"name\":\"confDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint32\",\"name\":\"callbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"bytes\",\"name\":\"arguments\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"requestRandomnessFulfillment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"subId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"requestSubscriptionOwnerTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_callbackConfig\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"maxCallbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCallbackArgumentsLength\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_coordinatorConfig\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"useReasonableGasPrice\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"reentrancyLock\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"premiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"unusedGasPenaltyPercent\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"redeemableRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"callbackRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPriceStalenessBlocks\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"uint96\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"s_pendingRequests\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"slotNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint24\",\"name\":\"confirmationDelay\",\"type\":\"uint24\"},{\"internalType\":\"uint16\",\"name\":\"numWords\",\"type\":\"uint16\"},{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_producer\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"maxCallbackGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"maxCallbackArgumentsLength\",\"type\":\"uint32\"}],\"internalType\":\"structVRFCoordinator.CallbackConfig\",\"name\":\"config\",\"type\":\"tuple\"}],\"name\":\"setCallbackConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint24[8]\",\"name\":\"confDelays\",\"type\":\"uint24[8]\"}],\"name\":\"setConfirmationDelays\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"useReasonableGasPrice\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"reentrancyLock\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"paused\",\"type\":\"bool\"},{\"internalType\":\"uint8\",\"name\":\"premiumPercentage\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"unusedGasPenaltyPercent\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"redeemableRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"callbackRequestGasOverhead\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPriceStalenessBlocks\",\"type\":\"uint32\"},{\"internalType\":\"uint96\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"uint96\"}],\"internalType\":\"structVRFBeaconTypes.CoordinatorConfig\",\"name\":\"coordinatorConfig\",\"type\":\"tuple\"}],\"name\":\"setCoordinatorConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"pause\",\"type\":\"bool\"}],\"name\":\"setPauseFlag\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"producer\",\"type\":\"address\"}],\"name\":\"setProducer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"juelsAmount\",\"type\":\"uint256\"}],\"name\":\"transferLink\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c06040523480156200001157600080fd5b50604051620062f5380380620062f5833981016040819052620000349162000239565b8033806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf816200018e565b5050506001600160a01b03166080908152604080519182018152600080835260208301819052908201819052662386f26fc10000606090920191909152642386f26fc160b01b6006556004805463ffffffff60281b191668ffffffff00000000001790558290036200014457604051632abc297960e01b815260040160405180910390fd5b60a0829052600e805465ffffffffffff16906000620001638362000278565b91906101000a81548165ffffffffffff021916908365ffffffffffff160217905550505050620002ac565b336001600160a01b03821603620001e85760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080604083850312156200024d57600080fd5b825160208401519092506001600160a01b03811681146200026d57600080fd5b809150509250929050565b600065ffffffffffff808316818103620002a257634e487b7160e01b600052601160045260246000fd5b6001019392505050565b60805160a051615fdd620003186000396000818161075f01528181611ca301528181613cb301528181613ce201528181613d1a015261430e01526000818161047501528181610bb501528181611a410152818161237b01528181612d610152612df60152615fdd6000f3fe6080604052600436106101d65760003560e01c806304104edb146101db5780630ae09540146101fd57806316f6ee9a1461021d578063294daa491461025d5780632b38bafc1461027f5780632f7527cc1461029f5780633e79167f146102b457806340d6bb82146102d457806347c3e2cb146102ea5780634ffac83a14610385578063597d2f3c146103985780635d06b4ab146103b657806364d51a2a146103d657806373433a2f146103fe57806376f2e3f41461041e57806379ba50971461044e5780637d253aff1461046357806385c64e11146104a45780638c7cba66146104c65780638da5cb5b146104e65780638da92e71146105045780638eef585f146105245780639e20103614610544578063a21a23e414610564578063a4c0ed3614610579578063acfc6cdd14610599578063b2a7cac5146105c6578063b79fa6f7146105e6578063bd58017f146106cd578063bec4c08c146106ed578063c3fbb6fd1461070d578063cb6317971461072d578063cd0593df1461074d578063ce3f471914610781578063dac83d29146107a1578063db972c8b146107c1578063dc311dd3146107d4578063e30afa4a14610804578063f2fde38b14610849578063f99b1d6814610869578063f9c45ced14610889575b600080fd5b3480156101e757600080fd5b506101fb6101f6366004614abd565b6108a9565b005b34801561020957600080fd5b506101fb610218366004614ae1565b610a5e565b34801561022957600080fd5b5061024a610238366004614b11565b6000908152600c602052604090205490565b6040519081526020015b60405180910390f35b34801561026957600080fd5b5060015b60405160ff9091168152602001610254565b34801561028b57600080fd5b506101fb61029a366004614abd565b610cc0565b3480156102ab57600080fd5b5061026d600881565b3480156102c057600080fd5b506101fb6102cf366004614b2a565b610d21565b3480156102e057600080fd5b5061024a6103e881565b3480156102f657600080fd5b50610348610305366004614b11565b60106020526000908152604090205463ffffffff811690600160201b810462ffffff1690600160381b810461ffff1690600160481b90046001600160a01b031684565b6040805163ffffffff909516855262ffffff909316602085015261ffff909116918301919091526001600160a01b03166060820152608001610254565b61024a610393366004614cac565b610dd2565b3480156103a457600080fd5b506002546001600160601b031661024a565b3480156103c257600080fd5b506101fb6103d1366004614abd565b610f7e565b3480156103e257600080fd5b506103eb606481565b60405161ffff9091168152602001610254565b34801561040a57600080fd5b506101fb610419366004614d57565b61102a565b34801561042a57600080fd5b5061043e610439366004614dd9565b611115565b6040519015158152602001610254565b34801561045a57600080fd5b506101fb611453565b34801561046f57600080fd5b506104977f000000000000000000000000000000000000000000000000000000000000000081565b6040516102549190614e5b565b3480156104b057600080fd5b506104b96114fd565b6040516102549190614e6f565b3480156104d257600080fd5b506101fb6104e1366004614ec3565b611562565b3480156104f257600080fd5b506000546001600160a01b0316610497565b34801561051057600080fd5b506101fb61051f366004614f1d565b6115d6565b34801561053057600080fd5b506101fb61053f366004614f3a565b611640565b34801561055057600080fd5b5061024a61055f366004614f65565b61167c565b34801561057057600080fd5b5061024a61179d565b34801561058557600080fd5b506101fb610594366004615019565b6119e3565b3480156105a557600080fd5b506105b96105b4366004615068565b611bce565b60405161025491906150f2565b3480156105d257600080fd5b506101fb6105e1366004614b11565b611dd2565b3480156105f257600080fd5b506004546005546106629160ff80821692610100830482169262010000810483169263010000008204811692600160201b83049091169163ffffffff600160281b8204811692600160481b8304821692600160681b8104831692600160881b90910416906001600160601b03168a565b604080519a15158b5298151560208b01529615159789019790975260ff948516606089015292909316608087015263ffffffff90811660a087015291821660c0860152811660e08501529091166101008301526001600160601b031661012082015261014001610254565b3480156106d957600080fd5b50600a54610497906001600160a01b031681565b3480156106f957600080fd5b506101fb610708366004614ae1565b611f03565b34801561071957600080fd5b506101fb610728366004615105565b6120bf565b34801561073957600080fd5b506101fb610748366004614ae1565b6125ac565b34801561075957600080fd5b5061024a7f000000000000000000000000000000000000000000000000000000000000000081565b34801561078d57600080fd5b506101fb61079c366004615159565b612899565b3480156107ad57600080fd5b506101fb6107bc366004614ae1565b6128b2565b61024a6107cf36600461519a565b6129c3565b3480156107e057600080fd5b506107f46107ef366004614b11565b612c21565b6040516102549493929190615272565b34801561081057600080fd5b50600b5461082c9063ffffffff80821691600160201b90041682565b6040805163ffffffff938416815292909116602083015201610254565b34801561085557600080fd5b506101fb610864366004614abd565b612d0e565b34801561087557600080fd5b506101fb6108843660046152be565b612d1f565b34801561089557600080fd5b5061024a6108a43660046152ea565b612e88565b6108b1612f9f565b60095460005b81811015610a3657826001600160a01b0316600982815481106108dc576108dc615330565b6000918252602090912001546001600160a01b031603610a2457600961090360018461535c565b8154811061091357610913615330565b600091825260209091200154600980546001600160a01b03909216918390811061093f5761093f615330565b600091825260209091200180546001600160a01b0319166001600160a01b039290921691909117905582600961097660018561535c565b8154811061098657610986615330565b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060098054806109c5576109c561536f565b600082815260209020810160001990810180546001600160a01b03191690550190556040517ff80a1a97fd42251f3c33cda98635e7399253033a6774fe37cd3f650b5282af3790610a17908590614e5b565b60405180910390a1505050565b80610a2e81615385565b9150506108b7565b5081604051635428d44960e01b8152600401610a529190614e5b565b60405180910390fd5b50565b60008281526007602052604090205482906001600160a01b031680610a995760405163c5171ee960e01b815260048101839052602401610a52565b336001600160a01b03821614610ac45780604051636c51fda960e11b8152600401610a529190614e5b565b600454610100900460ff1615610aed5760405163769dd35360e11b815260040160405180910390fd5b600084815260086020526040902054600160601b90046001600160401b031615610b2a57604051631685ecdd60e31b815260040160405180910390fd5b6000848152600860209081526040918290208251808401909352546001600160601b038116808452600160601b9091046001600160401b031691830191909152610b7386612ff4565b600280546001600160601b03169082906000610b8f838561539e565b92506101000a8154816001600160601b0302191690836001600160601b031602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb87846001600160601b03166040518363ffffffff1660e01b8152600401610c0a9291906153c5565b6020604051808303816000875af1158015610c29573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c4d91906153de565b610c7d5760405163cf47918160e01b81526001600160601b03808316600483015283166024820152604401610a52565b867f3784f77e8e883de95b5d47cd713ced01229fa74d118c0a462224bcb0516d43f18784604051610caf9291906153fb565b60405180910390a250505050505050565b610cc8612f9f565b600a546001600160a01b031615610cff57600a5460405163ea6d390560e01b8152610a52916001600160a01b031690600401614e5b565b600a80546001600160a01b0319166001600160a01b0392909216919091179055565b610d29612f9f565b6064610d3b60a0830160808401615437565b60ff161180610d555750610d556040820160208301614f1d565b80610d6b5750610d6b6060820160408301614f1d565b15610d895760405163b0e7bd8360e01b815260040160405180910390fd5b806004610d96828261549d565b9050507e28d3a46e95e67def989d41c66eb331add9809460b95b5fb4eb006157728fc581604051610dc79190615670565b60405180910390a150565b60045460009062010000900460ff1615610dff5760405163ab35696f60e01b815260040160405180910390fd5b600454610100900460ff1615610e285760405163769dd35360e11b815260040160405180910390fd5b3415610e4957604051630b829bad60e21b8152346004820152602401610a52565b6000806000610e5a88338989613143565b925092509250600080610e6d338b61328e565b600087815260106020908152604091829020885181548a8401518b8601516060808e015163ffffffff90951666ffffffffffffff1990941693909317600160201b62ffffff9384160217600160381b600160e81b031916600160381b61ffff92831602600160481b600160e81b03191617600160481b6001600160a01b03909516949094029390931790935584513381526001600160401b038b1694810194909452918e169383019390935281018e9052908c16608082015260a081018390526001600160601b03821660c0820152919350915085907fb7933fba96b6b452eb44f99fdc08052a45dff82363d59abaff0456931c3d24599060e00160405180910390a2509298975050505050505050565b610f86612f9f565b610f8f8161346a565b15610faf578060405163ac8a27ef60e01b8152600401610a529190614e5b565b600980546001810182556000919091527f6e1540171b6c0c960b71a7020d9f60077f6af931a8bbf590da0223dacf75c7af0180546001600160a01b0319166001600160a01b0383161790556040517fb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af0162590610dc7908390614e5b565b600a546001600160a01b0316331461105557604051634bea32db60e11b815260040160405180910390fd5b828015806110635750601f81115b1561108457604051634ecc4fef60e01b815260048101829052602401610a52565b8082146110a85760405163339f8a9d60e01b8152610a529082908490600401615758565b60005b8181101561110d576110fb8686838181106110c8576110c8615330565b90506020020160208101906110dd9190614abd565b8585848181106110ef576110ef615330565b90506020020135612d1f565b8061110581615385565b9150506110ab565b505050505050565b600a546000906001600160a01b0316331461114357604051634bea32db60e11b815260040160405180910390fd5b60045462010000900460ff161561116d5760405163ab35696f60e01b815260040160405180910390fd5b6001600160c01b038416156111aa57600680546001600160601b038616600160a01b02600160201b600160a01b0390911663ffffffff4216171790555b6001600160401b038316156111ff5760068054436001600160401b03908116600160201b02600160201b600160601b0319918716600160601b0291909116600160201b600160a01b0319909216919091171790555b600080866001600160401b0381111561121a5761121a614b68565b60405190808252806020026020018201604052801561125357816020015b6112406148f6565b8152602001906001900390816112385790505b50905060005b8781101561135557600089898381811061127557611275615330565b90506020028101906112879190615766565b611290906158ea565b9050600061129f82888b6134d3565b905085806112aa5750805b604083015151519096501515806112c957506040820151516020015115155b15611340576040805160808101825283516001600160401b0316815260208085015162ffffff168183015284830180515151938301939093529151519091015160608201528451859061ffff881690811061132657611326615330565b6020026020010181905250848061133c906159d0565b9550505b5050808061134d90615385565b915050611259565b5060008261ffff166001600160401b0381111561137457611374614b68565b6040519080825280602002602001820160405280156113ad57816020015b61139a6148f6565b8152602001906001900390816113925790505b50905060005b8361ffff16811015611409578281815181106113d1576113d1615330565b60200260200101518282815181106113eb576113eb615330565b6020026020010181905250808061140190615385565b9150506113b3565b507ff10ea936d00579b4c52035ee33bf46929646b3aa87554c565d8fb2c7aa549c448588888460405161143f94939291906159f1565b60405180910390a150505095945050505050565b6001546001600160a01b031633146114a65760405162461bcd60e51b815260206004820152601660248201527526bab9ba10313290383937b837b9b2b21037bbb732b960511b6044820152606401610a52565b60008054336001600160a01b0319808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61150561492c565b6040805161010081019182905290600f90600890826000855b82829054906101000a900462ffffff1662ffffff168152602001906003019060208260020104928301926001038202915080841161151e5790505050505050905090565b61156a612f9f565b8051600b80546020808501805163ffffffff908116600160201b026001600160401b031990941695811695861793909317909355604080519485529251909116908301527f0cc54509a45ab33cd67614d4a2892c083ecf8fb43b9d29f6ea8130b9023e51df9101610dc7565b6115de612f9f565b60045460ff6201000090910416151581151514610a5b5760048054821515620100000262ff0000199091161790556040517f49ba7c1de2d8853088b6270e43df2118516b217f38b917dd2b80dea360860fbe90610dc790831515815260200190565b600a546001600160a01b0316331461166b57604051634bea32db60e11b815260040160405180910390fd5b611678600f82600861494b565b5050565b604080516101408101825260045460ff80821615158352610100808304821615156020808601919091526201000084048316151585870152630100000084048316606080870191909152600160201b808604909416608080880191909152600160281b860463ffffffff90811660a0890152600160481b8704811660c0890152600160681b8704811660e0890152600160881b9096048616938701939093526005546001600160601b039081166101208801528751938401885260065480871685526001600160401b03958104861693850193909352600160601b830490941696830196909652600160a01b90049091169381019390935260009283926117889288169187919061366b565b50506001600160601b03169695505050505050565b600454600090610100900460ff16156117c95760405163769dd35360e11b815260040160405180910390fd5b60045462010000900460ff16156117f35760405163ab35696f60e01b815260040160405180910390fd5b60003361180160014361535c565b6001546040516001600160601b0319606094851b81166020830152924060348201523090931b90911660548301526001600160c01b0319600160a01b90910460c01b16606882015260700160408051808303601f19018152919052805160209091012060018054919250600160a01b9091046001600160401b031690601461188883615a86565b91906101000a8154816001600160401b0302191690836001600160401b03160217905550506000806001600160401b038111156118c7576118c7614b68565b6040519080825280602002602001820160405280156118f0578160200160208202803683370190505b5060408051808201825260008082526020808301828152878352600882528483209351845491516001600160601b039091166001600160a01b031992831617600160601b6001600160401b039092169190910217909355835160608101855233815280820183815281860187815289855260078452959093208151815486166001600160a01b03918216178255935160018201805490961694169390931790935592518051949550919390926119ad9260028501929101906149e9565b505060405133915083907f1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d90600090a350905090565b600454610100900460ff1615611a0c5760405163769dd35360e11b815260040160405180910390fd5b60045462010000900460ff1615611a365760405163ab35696f60e01b815260040160405180910390fd5b336001600160a01b037f00000000000000000000000000000000000000000000000000000000000000001614611a7f576040516344b0e3c360e01b815260040160405180910390fd5b60208114611aa557604051636865567560e01b8152610a52906020908390600401615aaa565b6000611ab382840184614b11565b6000818152600760205260409020549091506001600160a01b0316611aee5760405163c5171ee960e01b815260048101829052602401610a52565b600081815260086020526040812080546001600160601b031691869190611b158385615abe565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555084600260008282829054906101000a90046001600160601b0316611b5d9190615abe565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550817f1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a828784611bb09190615ade565b604051611bbe929190615758565b60405180910390a2505050505050565b600454606090610100900460ff1615611bfa5760405163769dd35360e11b815260040160405180910390fd5b60008381526010602081815260408084208151608081018352815463ffffffff8116825262ffffff600160201b8204168286015261ffff600160381b820416938201939093526001600160a01b03600160481b8404811660608301908152968a9052949093526001600160e81b031990911690559151163314611c9857806060015133604051638e30e82360e01b8152600401610a52929190615af1565b8051600090611cce907f00000000000000000000000000000000000000000000000000000000000000009063ffffffff16615b0b565b90506000611cda613715565b90506000836020015162ffffff1682611cf3919061535c565b9050808310611d385782846020015162ffffff1684611d129190615ade565b611d1d906001615ade565b6040516315ad27c360e01b8152600401610a52929190615758565b6001600160401b03831115611d63576040516302c6ef8160e11b815260048101849052602401610a52565b604051888152339088907f16f3f633197fafab10a5df69e6f3f2f7f20092f08d8d47de0a91c0f4b96a1a259060200160405180910390a3611dc68785600d6000611db1888a6020015161379f565b815260200190815260200160002054866137ae565b98975050505050505050565b600454610100900460ff1615611dfb5760405163769dd35360e11b815260040160405180910390fd5b6000818152600760205260409020546001600160a01b0316611e335760405163c5171ee960e01b815260048101829052602401610a52565b6000818152600760205260409020600101546001600160a01b03163314611e8a576000818152600760205260409081902060010154905163d084e97560e01b8152610a52916001600160a01b031690600401614e5b565b6000818152600760205260409081902080546001600160a01b031980821633908117845560019093018054909116905591516001600160a01b039092169183917fd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c938691611ef7918591615af1565b60405180910390a25050565b60008281526007602052604090205482906001600160a01b031680611f3e5760405163c5171ee960e01b815260048101839052602401610a52565b336001600160a01b03821614611f695780604051636c51fda960e11b8152600401610a529190614e5b565b600454610100900460ff1615611f925760405163769dd35360e11b815260040160405180910390fd5b60045462010000900460ff1615611fbc5760405163ab35696f60e01b815260040160405180910390fd5b60008481526007602052604090206002015460631901611fef576040516305a48e0f60e01b815260040160405180910390fd5b60036000611ffd8587613967565b815260208101919091526040016000205460ff166120b9576001600360006120258688613967565b815260208082019290925260409081016000908120805460ff191694151594909417909355868352600782528083206002018054600181018255908452919092200180546001600160a01b0319166001600160a01b0386161790555184907f1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e1906120b0908690614e5b565b60405180910390a25b50505050565b600454610100900460ff16156120e85760405163769dd35360e11b815260040160405180910390fd5b6120f18361346a565b6121105782604051635428d44960e01b8152600401610a529190614e5b565b604081146121355760408051636865567560e01b8152610a5291908390600401615aaa565b600061214382840184615b22565b90506000806000806121588560200151612c21565b9350935093509350816001600160a01b0316336001600160a01b0316146121945781604051636c51fda960e11b8152600401610a529190614e5b565b876001600160a01b031663294daa496040518163ffffffff1660e01b8152600401602060405180830381865afa1580156121d2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906121f69190615b5c565b60ff16856000015160ff1614612293578460000151886001600160a01b031663294daa496040518163ffffffff1660e01b8152600401602060405180830381865afa158015612249573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061226d9190615b5c565b60405163e7aada9560e01b815260ff928316600482015291166024820152604401610a52565b6001600160401b038316156122bb57604051631685ecdd60e31b815260040160405180910390fd5b60006040518060a001604052806122d0600190565b60ff16815260200187602001518152602001846001600160a01b03168152602001838152602001866001600160601b031681525090506000816040516020016123199190615b79565b60405160208183030381529060405290506123378760200151612ff4565b600280548791906000906123559084906001600160601b031661539e565b92506101000a8154816001600160601b0302191690836001600160601b031602179055507f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663a9059cbb8b886040518363ffffffff1660e01b81526004016123c79291906153fb565b6020604051808303816000875af11580156123e6573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061240a91906153de565b61244b5760405162461bcd60e51b8152602060048201526012602482015271696e73756666696369656e742066756e647360701b6044820152606401610a52565b60405163ce3f471960e01b81526001600160a01b038b169063ce3f471990612477908490600401615c25565b600060405180830381600087803b15801561249157600080fd5b505af11580156124a5573d6000803e3d6000fd5b50506004805461ff00191661010017905550600090505b835181101561254f578381815181106124d7576124d7615330565b60200260200101516001600160a01b0316638ea981178c6040518263ffffffff1660e01b815260040161250a9190614e5b565b600060405180830381600087803b15801561252457600080fd5b505af1158015612538573d6000803e3d6000fd5b50505050808061254790615385565b9150506124bc565b506004805461ff00191690556020870151875160405160ff909116907fbd89b747474d3fc04664dfbd1d56ae7ffbe46ee097cdb9979c13916bb76269ce90612598908e90614e5b565b60405180910390a350505050505050505050565b60008281526007602052604090205482906001600160a01b0316806125e75760405163c5171ee960e01b815260048101839052602401610a52565b336001600160a01b038216146126125780604051636c51fda960e11b8152600401610a529190614e5b565b600454610100900460ff161561263b5760405163769dd35360e11b815260040160405180910390fd5b600084815260086020526040902054600160601b90046001600160401b03161561267857604051631685ecdd60e31b815260040160405180910390fd5b600360006126868587613967565b815260208101919091526040016000205460ff166126bb5783836040516379bfd40160e01b8152600401610a52929190615c38565b60008481526007602090815260408083206002018054825181850281018501909352808352919290919083018282801561271e57602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311612700575b50505050509050600060018251612735919061535c565b905060005b825181101561284057856001600160a01b031683828151811061275f5761275f615330565b60200260200101516001600160a01b03160361282e57600083838151811061278957612789615330565b6020026020010151905080600760008a815260200190815260200160002060020183815481106127bb576127bb615330565b600091825260208083209190910180546001600160a01b0319166001600160a01b0394909416939093179092558981526007909152604090206002018054806128065761280661536f565b600082815260209020810160001990810180546001600160a01b031916905501905550612840565b8061283881615385565b91505061273a565b506003600061284f8789613967565b815260208101919091526040908101600020805460ff191690555186907f32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a790611bbe908890614e5b565b604051632cb6686f60e01b815260040160405180910390fd5b60008281526007602052604090205482906001600160a01b0316806128ed5760405163c5171ee960e01b815260048101839052602401610a52565b336001600160a01b038216146129185780604051636c51fda960e11b8152600401610a529190614e5b565b600454610100900460ff16156129415760405163769dd35360e11b815260040160405180910390fd5b6000848152600760205260409020600101546001600160a01b038481169116146120b9576000848152600760205260409081902060010180546001600160a01b0319166001600160a01b0386161790555184907f21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a1906120b09033908790615af1565b60045460009062010000900460ff16156129f05760405163ab35696f60e01b815260040160405180910390fd5b600454610100900460ff1615612a195760405163769dd35360e11b815260040160405180910390fd5b3415612a3a57604051630b829bad60e21b8152346004820152602401610a52565b600080612a4989338a8a613143565b925050915060006040518061010001604052808481526020018a61ffff168152602001336001600160a01b031681526020018781526020018863ffffffff166001600160601b031681526020018b81526020016000815260200160008152509050600080612ab68361397d565b60c087019190915260e08601919091526040519193509150612ae29085908c908f908790602001615c4f565b60405160208183030381529060405280519060200120600c6000878152602001908152602001600020819055506000604051806101600160405280878152602001336001600160a01b03168152602001866001600160401b031681526020018c62ffffff1681526020018e81526020018d61ffff1681526020018b63ffffffff1681526020018581526020018a8152602001848152602001836001600160601b0316815250905080600001517f01872fb9c7d6d68af06a17347935e04412da302a377224c205e672c26e18c37f82602001518360400151846060015185608001518660a001518760c001518860e0015160c001518960e0015160e001518a61010001518b61012001518c6101400151604051612c089b9a99989796959493929190615d02565b60405180910390a250939b9a5050505050505050505050565b600081815260076020526040812054819081906060906001600160a01b0316612c605760405163c5171ee960e01b815260048101869052602401610a52565b60008581526008602090815260408083205460078352928190208054600290910180548351818602810186019094528084526001600160601b03861695600160601b90046001600160401b0316946001600160a01b03909316939192839190830182828015612cf857602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311612cda575b5050505050905093509350935093509193509193565b612d16612f9f565b610a5b81613bb4565b600a546001600160a01b03163314612d4a57604051634bea32db60e11b815260040160405180910390fd5b60405163a9059cbb60e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90612d9890859085906004016153c5565b6020604051808303816000875af1158015612db7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612ddb91906153de565b611678576040516370a0823160e01b81526001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906370a0823190612e2b903090600401614e5b565b602060405180830381865afa158015612e48573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612e6c9190615d98565b8160405163cf47918160e01b8152600401610a52929190615758565b604080516101408101825260045460ff80821615158352610100808304821615156020808601919091526201000084048316151585870152630100000084048316606080870191909152600160201b80860490941660808088019190915263ffffffff600160281b8704811660a0890152600160481b8704811660c0890152600160681b8704811660e0890152600160881b9096048616938701939093526005546001600160601b039081166101208801528751938401885260065495861684526001600160401b03948604851692840192909252600160601b850490931695820195909552600160a01b90920490931692810192909252600091612f8d9190613c57565b6001600160601b031690505b92915050565b6000546001600160a01b03163314612ff25760405162461bcd60e51b815260206004820152601660248201527527b7363c9031b0b63630b1363290313c9037bbb732b960511b6044820152606401610a52565b565b6000818152600760209081526040808320815160608101835281546001600160a01b0390811682526001830154168185015260028201805484518187028101870186528181529295939486019383018282801561307a57602002820191906000526020600020905b81546001600160a01b0316815260019091019060200180831161305c575b505050505081525050905060005b8160400151518110156130ea57600360006130c0846040015184815181106130b2576130b2615330565b602002602001015186613967565b81526020810191909152604001600020805460ff19169055806130e281615385565b915050613088565b50600082815260076020526040812080546001600160a01b031990811682556001820180549091169055906131226002830182614a3e565b505050600090815260086020526040902080546001600160a01b0319169055565b600061314d614a5c565b60006103e88561ffff16111561317c57846103e8604051634a90778560e01b8152600401610a52929190615aaa565b8461ffff166000036131a1576040516308fad2a760e01b815260040160405180910390fd5b6000806131ac613c9d565b600e54919350915065ffffffffffff1660006132178b8b84604080513060208201529081018490526001600160a01b038316606082015265ffffffffffff8216608082015260009060a00160408051601f198184030181529190528051602090910120949350505050565b9050613224826001615db1565b600e805465ffffffffffff9290921665ffffffffffff199092169190911790556040805160808101825263ffffffff909416845262ffffff8916602085015261ffff8a16908401526001600160a01b038a1660608401529550909350909150509450945094915050565b604080516080808201835260065463ffffffff8082168452600160201b8083046001600160401b03908116602080880191909152600160601b850490911686880152600160a01b9093046001600160601b0390811660608088019190915287516101408101895260045460ff808216151583526101008083048216151598840198909852620100008204811615159a83019a909a52630100000081048a169282019290925292810490971694820194909452600160281b8604821660a0820152600160481b8604821660c0820152600160681b8604821660e0820152600160881b9095041690840152600554166101208301526000918291906003836133948888613967565b815260208101919091526040016000205460ff166133c95784866040516379bfd40160e01b8152600401610a52929190615c38565b60006133d58284613c57565b600087815260086020526040902080546001600160601b0392831693509091168281101561342457815460405163cf47918160e01b8152610a52916001600160601b0316908590600401615dd0565b81546001600160601b0319908116918490036001600160601b038181169390931790935560028054918216918316859003909216179055909450925050505b9250929050565b6000805b6009548110156134ca57826001600160a01b03166009828154811061349557613495615330565b6000918252602090912001546001600160a01b0316036134b85750600192915050565b806134c281615385565b91505061346e565b50600092915050565b6000826001600160401b031684600001516001600160401b0316111561352257835160405163012d824d60e01b81526001600160401b0380861660048301529091166024820152604401610a52565b606084015151604080860151905160009161353f91602001615de9565b60405160208183030381529060405280519060200120905085604001516000015160006002811061357257613572615330565b602002015115801561358b575060408601515160200151155b156135c557600d60006135af88600001516001600160401b0316896020015161379f565b8152602001908152602001600020549050613648565b856080015115613648576000600d60006135f089600001516001600160401b03168a6020015161379f565b81526020810191909152604001600020549050806136425781600d60006136288a600001516001600160401b03168b6020015161379f565b815260208101919091526040016000205560019350613646565b8091505b505b6000613655838389613d70565b905083806136605750805b979650505050505050565b60008060008061367b8686614189565b6001600160401b031690506000613693826010615b0b565b9050600060146136a4836015615b0b565b6136ae9190615e29565b89516136ba9190615b0b565b838960e0015163ffffffff168c6136d19190615abe565b6001600160601b03166136e49190615b0b565b6136ee9190615ade565b90506000806137008360008c8c614202565b909d909c50949a509398505050505050505050565b60004661a4b181148061372a575062066eed81145b156137985760646001600160a01b031663a3b1b31d6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561376e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906137929190615d98565b91505090565b4391505090565b62ffffff1660189190911b1790565b6060826137e05760405163220a34e960e11b8152600481018690526001600160401b0383166024820152604401610a52565b604080516020808201889052865163ffffffff168284015286015162ffffff166060808301919091529186015161ffff166080820152908501516001600160a01b031660a082015260c0810184905260009060e0016040516020818303038152906040528051906020012090506103e8856040015161ffff1611156138825784604001516103e8604051634a90778560e01b8152600401610a52929190615aaa565b6000856040015161ffff166001600160401b038111156138a4576138a4614b68565b6040519080825280602002602001820160405280156138cd578160200160208202803683370190505b50905060005b866040015161ffff168161ffff16101561395c57828160405160200161391092919091825260f01b6001600160f01b031916602082015260220190565b6040516020818303038152906040528051906020012060001c828261ffff168151811061393f5761393f615330565b602090810291909101015280613954816159d0565b9150506138d3565b509695505050505050565b60a081901b6001600160a01b0383161792915050565b6000806000806003600061399987604001518860a00151613967565b815260208101919091526040016000205460ff166139d6578460a0015185604001516040516379bfd40160e01b8152600401610a52929190615c38565b604080516080808201835260065463ffffffff80821684526001600160401b03600160201b8084048216602080880191909152600160601b8504909216868801526001600160601b03600160a01b909404841660608088019190915287516101408101895260045460ff808216151583526101008083048216151596840196909652620100008204811615159a83019a909a52630100000081048a168284015292830490981688870152600160281b8204841660a0890152600160481b8204841660c0890152600160681b8204841660e0890152600160881b90910490921690860152600554909116610120850152908801519088015191929160009182918291613ae291868861366b565b60a08d0151600090815260086020526040902080546001600160601b0394851697509295509093509116841115613b3a57805460405163cf47918160e01b8152610a52916001600160601b0316908690600401615dd0565b80546001600160601b0360016001600160401b03600160601b8085048216929092011602818116828416178790038083166001600160601b03199283166001600160a01b03199095169490941793909317909355600280548083168890039092169190931617909155929a91995097509095509350505050565b336001600160a01b03821603613c065760405162461bcd60e51b815260206004820152601760248201527621b0b73737ba103a3930b739b332b9103a379039b2b63360491b6044820152606401610a52565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b600080613c648484614189565b8460c0015163ffffffff16613c799190615e3d565b6001600160401b031690506000613c938260008787614202565b5095945050505050565b6000806000613caa613715565b90506000613cd87f000000000000000000000000000000000000000000000000000000000000000083615e60565b9050600081613d077f000000000000000000000000000000000000000000000000000000000000000085615ade565b613d11919061535c565b90506000613d3f7f000000000000000000000000000000000000000000000000000000000000000083615e29565b905063ffffffff8110613d65576040516307b2a52360e41b815260040160405180910390fd5b909590945092505050565b6000806040518060c00160405280866001600160401b03811115613d9657613d96614b68565b604051908082528060200260200182016040528015613dbf578160200160208202803683370190505b508152602001866001600160401b03811115613ddd57613ddd614b68565b6040519080825280601f01601f191660200182016040528015613e07576020820181803683370190505b508152602001866001600160401b03811115613e2557613e25614b68565b604051908082528060200260200182016040528015613e5857816020015b6060815260200190600190039081613e435790505b50815260006020820152604001866001600160401b03811115613e7d57613e7d614b68565b604051908082528060200260200182016040528015613ea6578160200160208202803683370190505b508152602001866001600160401b03811115613ec457613ec4614b68565b604051908082528060200260200182016040528015613eed578160200160208202803683370190505b509052905060005b8581101561406c57600084606001518281518110613f1557613f15615330565b60200260200101519050600080600080613f3989600001518a602001518c8861424d565b93509350935093508315613f8d57828760400151886060015161ffff1681518110613f6657613f66615330565b602090810291909101015260608701805190613f81826159d0565b61ffff16905250613fc0565b600160f81b87602001518781518110613fa857613fa8615330565b60200101906001600160f81b031916908160001a9053505b8780613fca575080155b85515188518051929a50909188908110613fe657613fe6615330565b602002602001018181525050818760800151878151811061400957614009615330565b60200260200101906001600160601b031690816001600160601b031681525050846000015160a001518760a00151878151811061404857614048615330565b6020026020010181815250505050505050808061406490615385565b915050613ef5565b5060608301515115614181576000816060015161ffff166001600160401b0381111561409a5761409a614b68565b6040519080825280602002602001820160405280156140cd57816020015b60608152602001906001900390816140b85790505b50905060005b826060015161ffff1681101561413157826040015181815181106140f9576140f9615330565b602002602001015182828151811061411357614113615330565b6020026020010181905250808061412990615385565b9150506140d3565b5081516020830151608084015160a08501516040517f8f79f730779e875ce76c428039cc2052b5b5918c2a55c598fab251c1198aec549461417794909390928792615ead565b60405180910390a1505b509392505050565b815160009080156141a6575060408201516001600160401b031615155b156141fa5761010083015163ffffffff16431080806141e757506101008401516141d69063ffffffff164361535c565b83602001516001600160401b031610155b156141f85750506040810151612f99565b505b503a92915050565b600080600060648560600151606461421a9190615f50565b6142279060ff1689615b0b565b6142319190615e29565b905061423f818787876145bc565b925092505094509492505050565b805160a09081015160009081526008602090815260408083208551948501519151939460609486948594859261428b928e928e929091879101615c4f565b60408051601f19818403018152918152815160209283012084516000908152600c90935291205490915081146142fe5750505460408051808201909152601081526f756e6b6e6f776e2063616c6c6261636b60801b60208201526001955093506001600160601b031691508390506145b1565b50614307614a5c565b600061433c7f00000000000000000000000000000000000000000000000000000000000000006001600160401b038e16615e29565b6040805160808101825263ffffffff909216825262ffffff8d1660208084019190915285015161ffff16828201528401516001600160a01b0316606082015291508990506143d0575050604080518082019091526016815275756e617661696c61626c652072616e646f6d6e65737360501b60208201529054600195509093506001600160601b03169150600090506145b1565b60006143e28360000151838c8f6137ae565b606080840151855191860151604051939450909260009263d21ea8fd60e01b9261441192879190602401615f69565b60408051601f198184030181529190526020810180516001600160e01b03166001600160e01b0319909316929092179091526004805461ff00191661010017905590506000805a9050600061447c8e60000151608001516001600160601b0316896040015186614630565b9093509050806144b1578d516080015160405163aad1598360e01b8152610a52916001600160601b0316908490600401615758565b506000610bb85a6144c29190615ade565b6004805461ff00191690559050818110156144eb576144eb6144e4828461535c565b8f5161466f565b8854600160601b90046001600160401b031689600c61450983615f94565b82546001600160401b039182166101009390930a92830291909202199091161790555087516000908152600c60205260408120558261457f5760408051808201909152601081526f195e1958dd5d1a5bdb8819985a5b195960821b60208201528954600191906001600160601b0316600061459f565b604080516020810190915260008082528a549091906001600160601b0316825b9c509c509c509c505050505050505050505b945094509450949050565b6000808085156145cc57856145d6565b6145d6858561489b565b90506000816145ed89670de0b6b3a7640000615b0b565b6145f79190615e29565b9050676765c793fa10079d601b1b8111156146245760405162de437160e81b815260040160405180910390fd5b97909650945050505050565b6000805a610bb8811061466657610bb881039050856040820482031115614666576000808551602087016000898bf19250600191505b50935093915050565b80608001516001600160601b0316821115614688575050565b6004546000906064906146a590600160201b900460ff1682615fb7565b60ff168360c001518585608001516001600160601b03166146c6919061535c565b6146d09190615b0b565b6146da9190615b0b565b6146e49190615e29565b60e080840151604080516101408101825260045460ff80821615158352610100808304821615156020808601919091526201000084048316151585870152630100000084048316606080870191909152600160201b80860490941660808088019190915263ffffffff600160281b8704811660a0890152600160481b8704811660c0890152600160681b870481169a88019a909a52600160881b9095048916928601929092526005546001600160601b039081166101208701528651948501875260065498891685526001600160401b03938904841691850191909152600160601b880490921694830194909452600160a01b909504909416918401919091529293506000926147f792859291906145bc565b5060a08401516000908152600860205260408120805492935083929091906148299084906001600160601b0316615abe565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555080600260008282829054906101000a90046001600160601b03166148719190615abe565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555050505050565b60a0820151606082015160009190600163ffffffff831611908180156148d7575084516148ce9063ffffffff164261535c565b8363ffffffff16105b156148e457506101208501515b6001600160601b031695945050505050565b604051806080016040528060006001600160401b03168152602001600062ffffff16815260200160008152602001600081525090565b6040518061010001604052806008906020820280368337509192915050565b6001830191839082156149d95791602002820160005b838211156149a857833562ffffff1683826101000a81548162ffffff021916908362ffffff1602179055509260200192600301602081600201049283019260010302614961565b80156149d75782816101000a81549062ffffff02191690556003016020816002010492830192600103026149a8565b505b506149e5929150614a83565b5090565b8280548282559060005260206000209081019282156149d9579160200282015b828111156149d957825182546001600160a01b0319166001600160a01b03909116178255602090920191600190910190614a09565b5080546000825590600052602060002090810190610a5b9190614a83565b60408051608081018252600080825260208201819052918101829052606081019190915290565b5b808211156149e55760008155600101614a84565b6001600160a01b0381168114610a5b57600080fd5b8035614ab881614a98565b919050565b600060208284031215614acf57600080fd5b8135614ada81614a98565b9392505050565b60008060408385031215614af457600080fd5b823591506020830135614b0681614a98565b809150509250929050565b600060208284031215614b2357600080fd5b5035919050565b60006101408284031215614b3d57600080fd5b50919050565b803561ffff81168114614ab857600080fd5b803562ffffff81168114614ab857600080fd5b634e487b7160e01b600052604160045260246000fd5b604080519081016001600160401b0381118282101715614ba057614ba0614b68565b60405290565b60405161010081016001600160401b0381118282101715614ba057614ba0614b68565b60405160a081016001600160401b0381118282101715614ba057614ba0614b68565b604051602081016001600160401b0381118282101715614ba057614ba0614b68565b604051601f8201601f191681016001600160401b0381118282101715614c3557614c35614b68565b604052919050565b600082601f830112614c4e57600080fd5b81356001600160401b03811115614c6757614c67614b68565b614c7a601f8201601f1916602001614c0d565b818152846020838601011115614c8f57600080fd5b816020850160208301376000918101602001919091529392505050565b60008060008060808587031215614cc257600080fd5b84359350614cd260208601614b43565b9250614ce060408601614b55565b915060608501356001600160401b03811115614cfb57600080fd5b614d0787828801614c3d565b91505092959194509250565b60008083601f840112614d2557600080fd5b5081356001600160401b03811115614d3c57600080fd5b6020830191508360208260051b850101111561346357600080fd5b60008060008060408587031215614d6d57600080fd5b84356001600160401b0380821115614d8457600080fd5b614d9088838901614d13565b90965094506020870135915080821115614da957600080fd5b50614db687828801614d13565b95989497509550505050565b80356001600160401b0381168114614ab857600080fd5b600080600080600060808688031215614df157600080fd5b85356001600160401b03811115614e0757600080fd5b614e1388828901614d13565b90965094505060208601356001600160c01b0381168114614e3357600080fd5b9250614e4160408701614dc2565b9150614e4f60608701614dc2565b90509295509295909350565b6001600160a01b0391909116815260200190565b6101008101818360005b6008811015614e9d57815162ffffff16835260209283019290910190600101614e79565b50505092915050565b63ffffffff81168114610a5b57600080fd5b8035614ab881614ea6565b600060408284031215614ed557600080fd5b614edd614b7e565b8235614ee881614ea6565b81526020830135614ef881614ea6565b60208201529392505050565b8015158114610a5b57600080fd5b8035614ab881614f04565b600060208284031215614f2f57600080fd5b8135614ada81614f04565b6000610100808385031215614f4e57600080fd5b838184011115614f5d57600080fd5b509092915050565b60008060008060808587031215614f7b57600080fd5b843593506020850135614f8d81614ea6565b925060408501356001600160401b0380821115614fa957600080fd5b614fb588838901614c3d565b93506060870135915080821115614fcb57600080fd5b50614d0787828801614c3d565b60008083601f840112614fea57600080fd5b5081356001600160401b0381111561500157600080fd5b60208301915083602082850101111561346357600080fd5b6000806000806060858703121561502f57600080fd5b843561503a81614a98565b93506020850135925060408501356001600160401b0381111561505c57600080fd5b614db687828801614fd8565b60008060006060848603121561507d57600080fd5b833592506020840135915060408401356001600160401b038111156150a157600080fd5b6150ad86828701614c3d565b9150509250925092565b600081518084526020808501945080840160005b838110156150e7578151875295820195908201906001016150cb565b509495945050505050565b602081526000614ada60208301846150b7565b60008060006040848603121561511a57600080fd5b833561512581614a98565b925060208401356001600160401b0381111561514057600080fd5b61514c86828701614fd8565b9497909650939450505050565b6000806020838503121561516c57600080fd5b82356001600160401b0381111561518257600080fd5b61518e85828601614fd8565b90969095509350505050565b60008060008060008060c087890312156151b357600080fd5b863595506151c360208801614b43565b94506151d160408801614b55565b935060608701356151e181614ea6565b925060808701356001600160401b03808211156151fd57600080fd5b6152098a838b01614c3d565b935060a089013591508082111561521f57600080fd5b5061522c89828a01614c3d565b9150509295509295509295565b600081518084526020808501945080840160005b838110156150e75781516001600160a01b03168752958201959082019060010161524d565b6001600160601b03851681526001600160401b03841660208201526001600160a01b03831660408201526080606082018190526000906152b490830184615239565b9695505050505050565b600080604083850312156152d157600080fd5b82356152dc81614a98565b946020939093013593505050565b600080604083850312156152fd57600080fd5b8235915060208301356001600160401b0381111561531a57600080fd5b61532685828601614c3d565b9150509250929050565b634e487b7160e01b600052603260045260246000fd5b634e487b7160e01b600052601160045260246000fd5b81810381811115612f9957612f99615346565b634e487b7160e01b600052603160045260246000fd5b60006001820161539757615397615346565b5060010190565b6001600160601b038281168282160390808211156153be576153be615346565b5092915050565b6001600160a01b03929092168252602082015260400190565b6000602082840312156153f057600080fd5b8151614ada81614f04565b6001600160a01b039290921682526001600160601b0316602082015260400190565b60ff81168114610a5b57600080fd5b8035614ab88161541d565b60006020828403121561544957600080fd5b8135614ada8161541d565b60008135612f9981614f04565b60008135612f998161541d565b60008135612f9981614ea6565b6001600160601b0381168114610a5b57600080fd5b60008135612f998161547b565b81356154a881614f04565b815490151560ff1660ff19919091161781556154e36154c960208401615454565b82805461ff00191691151560081b61ff0016919091179055565b61550e6154f260408401615454565b82805462ff0000191691151560101b62ff000016919091179055565b61553761551d60608401615461565b825463ff000000191660189190911b63ff00000016178255565b61556461554660808401615461565b82805460ff60201b191660209290921b60ff60201b16919091179055565b61559761557360a0840161546e565b82805463ffffffff60281b191660289290921b63ffffffff60281b16919091179055565b6155ca6155a660c0840161546e565b82805463ffffffff60481b191660489290921b63ffffffff60481b16919091179055565b6155fd6155d960e0840161546e565b82805463ffffffff60681b191660689290921b63ffffffff60681b16919091179055565b61563161560d610100840161546e565b82805463ffffffff60881b191660889290921b63ffffffff60881b16919091179055565b6116786156416101208401615490565b6001830180546001600160601b0319166001600160601b0392909216919091179055565b8035614ab88161547b565b61014081016156888261568285614f12565b15159052565b61569460208401614f12565b151560208301526156a760408401614f12565b151560408301526156ba6060840161542c565b60ff1660608301526156ce6080840161542c565b60ff1660808301526156e260a08401614eb8565b63ffffffff1660a08301526156f960c08401614eb8565b63ffffffff1660c083015261571060e08401614eb8565b63ffffffff1660e0830152610100615729848201614eb8565b63ffffffff1690830152610120615741848201615665565b6001600160601b038116848301525b505092915050565b918252602082015260400190565b6000823560be1983360301811261577c57600080fd5b9190910192915050565b600082601f83011261579757600080fd5b813560206001600160401b03808311156157b3576157b3614b68565b8260051b6157c2838201614c0d565b93845285810183019383810190888611156157dc57600080fd5b84880192505b85831015611dc6578235848111156157f957600080fd5b8801601f196040828c038201121561581057600080fd5b615818614b7e565b878301358781111561582957600080fd5b8301610100818e038401121561583e57600080fd5b615846614ba6565b925088810135835261585a60408201614b43565b8984015261586a60608201614aad565b604084015260808101358881111561588157600080fd5b61588f8e8b83850101614c3d565b6060850152506158a160a08201615665565b608084015260c081013560a084015260e081013560c084015261010081013560e0840152508181526158d560408401615665565b818901528452505091840191908401906157e2565b600081360360c08112156158fd57600080fd5b615905614bc9565b61590e84614dc2565b8152602061591d818601614b55565b828201526040603f198401121561593357600080fd5b61593b614beb565b925036605f86011261594c57600080fd5b615954614b7e565b80608087013681111561596657600080fd5b604088015b81811015615982578035845292840192840161596b565b50908552604084019490945250509035906001600160401b038211156159a757600080fd5b6159b336838601615786565b60608201526159c460a08501614f12565b60808201529392505050565b600061ffff8083168181036159e7576159e7615346565b6001019392505050565b6000608080830160018060401b038089168552602060018060c01b038916818701526040828916818801526060858189015284895180875260a08a019150848b01965060005b81811015615a735787518051881684528681015162ffffff16878501528581015186850152840151848401529685019691880191600101615a37565b50909d9c50505050505050505050505050565b60006001600160401b038281166002600160401b031981016159e7576159e7615346565b61ffff929092168252602082015260400190565b6001600160601b038181168382160190808211156153be576153be615346565b80820180821115612f9957612f99615346565b6001600160a01b0392831681529116602082015260400190565b8082028115828204841417612f9957612f99615346565b600060408284031215615b3457600080fd5b615b3c614b7e565b8235615b478161541d565b81526020928301359281019290925250919050565b600060208284031215615b6e57600080fd5b8151614ada8161541d565b6020815260ff82511660208201526020820151604082015260018060a01b0360408301511660608201526000606083015160a06080840152615bbe60c0840182615239565b608094909401516001600160601b031660a093909301929092525090919050565b6000815180845260005b81811015615c0557602081850181015186830182015201615be9565b506000602082860101526020601f19601f83011685010191505092915050565b602081526000614ada6020830184615bdf565b9182526001600160a01b0316602082015260400190565b60018060401b038516815262ffffff84166020820152826040820152608060608201528151608082015261ffff60208301511660a082015260018060a01b0360408301511660c0820152600060608301516101008060e0850152615cb7610180850183615bdf565b91506080850151615cd2828601826001600160601b03169052565b505060a084015161012084015260c084015161014084015260e08401516101608401528091505095945050505050565b6001600160a01b038c1681526001600160401b038b16602082015262ffffff8a1660408201526060810189905261ffff8816608082015263ffffffff871660a082015260c0810186905260e081018590526101606101008201819052600090615d6d83820187615bdf565b61012084019590955250506001600160601b0391909116610140909101529998505050505050505050565b600060208284031215615daa57600080fd5b5051919050565b65ffffffffffff8181168382160190808211156153be576153be615346565b6001600160601b03929092168252602082015260400190565b815160408201908260005b6002811015614e9d578251825260209283019290910190600101615df4565b634e487b7160e01b600052601260045260246000fd5b600082615e3857615e38615e13565b500490565b6001600160401b0381811683821602808216919082811461575057615750615346565b600082615e6f57615e6f615e13565b500690565b600081518084526020808501945080840160005b838110156150e75781516001600160601b031687529582019590820190600101615e88565b60a081526000615ec060a08301886150b7565b602083820381850152615ed38289615bdf565b915083820360408501528187518084528284019150828160051b850101838a0160005b83811015615f2457601f19878403018552615f12838351615bdf565b94860194925090850190600101615ef6565b50508681036060880152615f38818a615e74565b9450505050508281036080840152611dc681856150b7565b60ff8181168382160190811115612f9957612f99615346565b838152606060208201526000615f8260608301856150b7565b82810360408401526152b48185615bdf565b60006001600160401b03821680615fad57615fad615346565b6000190192915050565b60ff8281168282160390811115612f9957612f9961534656fea164736f6c6343000813000a", +} + +var VRFCoordinatorABI = VRFCoordinatorMetaData.ABI + +var VRFCoordinatorBin = VRFCoordinatorMetaData.Bin + +func DeployVRFCoordinator(auth *bind.TransactOpts, backend bind.ContractBackend, beaconPeriodBlocksArg *big.Int, linkToken common.Address) (common.Address, *types.Transaction, *VRFCoordinator, error) { + parsed, err := VRFCoordinatorMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(VRFCoordinatorBin), backend, beaconPeriodBlocksArg, linkToken) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinator{VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +type VRFCoordinator struct { + address common.Address + abi abi.ABI + VRFCoordinatorCaller + VRFCoordinatorTransactor + VRFCoordinatorFilterer +} + +type VRFCoordinatorCaller struct { + contract *bind.BoundContract +} + +type VRFCoordinatorTransactor struct { + contract *bind.BoundContract +} + +type VRFCoordinatorFilterer struct { + contract *bind.BoundContract +} + +type VRFCoordinatorSession struct { + Contract *VRFCoordinator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorCallerSession struct { + Contract *VRFCoordinatorCaller + CallOpts bind.CallOpts +} + +type VRFCoordinatorTransactorSession struct { + Contract *VRFCoordinatorTransactor + TransactOpts bind.TransactOpts +} + +type VRFCoordinatorRaw struct { + Contract *VRFCoordinator +} + +type VRFCoordinatorCallerRaw struct { + Contract *VRFCoordinatorCaller +} + +type VRFCoordinatorTransactorRaw struct { + Contract *VRFCoordinatorTransactor +} + +func NewVRFCoordinator(address common.Address, backend bind.ContractBackend) (*VRFCoordinator, error) { + abi, err := abi.JSON(strings.NewReader(VRFCoordinatorABI)) + if err != nil { + return nil, err + } + contract, err := bindVRFCoordinator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinator{address: address, abi: abi, VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +func NewVRFCoordinatorCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorCaller, error) { + contract, err := bindVRFCoordinator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorCaller{contract: contract}, nil +} + +func NewVRFCoordinatorTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorTransactor, error) { + contract, err := bindVRFCoordinator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorTransactor{contract: contract}, nil +} + +func NewVRFCoordinatorFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorFilterer, error) { + contract, err := bindVRFCoordinator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorFilterer{contract: contract}, nil +} + +func bindVRFCoordinator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := VRFCoordinatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.VRFCoordinatorCaller.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transfer(opts) +} + +func (_VRFCoordinator *VRFCoordinatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.contract.Call(opts, result, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transfer(opts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transact(opts, method, params...) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "MAX_CONSUMERS") + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinator.Contract.MAXCONSUMERS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) MAXCONSUMERS() (uint16, error) { + return _VRFCoordinator.Contract.MAXCONSUMERS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) MAXNUMWORDS(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "MAX_NUM_WORDS") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) MAXNUMWORDS() (*big.Int, error) { + return _VRFCoordinator.Contract.MAXNUMWORDS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) MAXNUMWORDS() (*big.Int, error) { + return _VRFCoordinator.Contract.MAXNUMWORDS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "NUM_CONF_DELAYS") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) NUMCONFDELAYS() (uint8, error) { + return _VRFCoordinator.Contract.NUMCONFDELAYS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) NUMCONFDELAYS() (uint8, error) { + return _VRFCoordinator.Contract.NUMCONFDELAYS(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetCallbackMemo(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getCallbackMemo", requestId) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetCallbackMemo(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.GetCallbackMemo(&_VRFCoordinator.CallOpts, requestId) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetCallbackMemo(requestId *big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.GetCallbackMemo(&_VRFCoordinator.CallOpts, requestId) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getConfirmationDelays") + + if err != nil { + return *new([8]*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new([8]*big.Int)).(*[8]*big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetConfirmationDelays() ([8]*big.Int, error) { + return _VRFCoordinator.Contract.GetConfirmationDelays(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetConfirmationDelays() ([8]*big.Int, error) { + return _VRFCoordinator.Contract.GetConfirmationDelays(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetFee(opts *bind.CallOpts, arg0 *big.Int, arg1 []byte) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getFee", arg0, arg1) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetFee(arg0 *big.Int, arg1 []byte) (*big.Int, error) { + return _VRFCoordinator.Contract.GetFee(&_VRFCoordinator.CallOpts, arg0, arg1) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetFee(arg0 *big.Int, arg1 []byte) (*big.Int, error) { + return _VRFCoordinator.Contract.GetFee(&_VRFCoordinator.CallOpts, arg0, arg1) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetFulfillmentFee(opts *bind.CallOpts, arg0 *big.Int, callbackGasLimit uint32, arguments []byte, arg3 []byte) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getFulfillmentFee", arg0, callbackGasLimit, arguments, arg3) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetFulfillmentFee(arg0 *big.Int, callbackGasLimit uint32, arguments []byte, arg3 []byte) (*big.Int, error) { + return _VRFCoordinator.Contract.GetFulfillmentFee(&_VRFCoordinator.CallOpts, arg0, callbackGasLimit, arguments, arg3) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetFulfillmentFee(arg0 *big.Int, callbackGasLimit uint32, arguments []byte, arg3 []byte) (*big.Int, error) { + return _VRFCoordinator.Contract.GetFulfillmentFee(&_VRFCoordinator.CallOpts, arg0, callbackGasLimit, arguments, arg3) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getSubscription", subId) + + outstruct := new(GetSubscription) + if err != nil { + return *outstruct, err + } + + outstruct.Balance = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.PendingFulfillments = *abi.ConvertType(out[1], new(uint64)).(*uint64) + outstruct.Owner = *abi.ConvertType(out[2], new(common.Address)).(*common.Address) + outstruct.Consumers = *abi.ConvertType(out[3], new([]common.Address)).(*[]common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinator.Contract.GetSubscription(&_VRFCoordinator.CallOpts, subId) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetSubscription(subId *big.Int) (GetSubscription, + + error) { + return _VRFCoordinator.Contract.GetSubscription(&_VRFCoordinator.CallOpts, subId) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) GetSubscriptionLinkBalance(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "getSubscriptionLinkBalance") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) GetSubscriptionLinkBalance() (*big.Int, error) { + return _VRFCoordinator.Contract.GetSubscriptionLinkBalance(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) GetSubscriptionLinkBalance() (*big.Int, error) { + return _VRFCoordinator.Contract.GetSubscriptionLinkBalance(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "i_beaconPeriodBlocks") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _VRFCoordinator.Contract.IBeaconPeriodBlocks(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) IBeaconPeriodBlocks() (*big.Int, error) { + return _VRFCoordinator.Contract.IBeaconPeriodBlocks(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) ILink(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "i_link") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) ILink() (common.Address, error) { + return _VRFCoordinator.Contract.ILink(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) ILink() (common.Address, error) { + return _VRFCoordinator.Contract.ILink(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) MigrationVersion(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "migrationVersion") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) MigrationVersion() (uint8, error) { + return _VRFCoordinator.Contract.MigrationVersion(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) MigrationVersion() (uint8, error) { + return _VRFCoordinator.Contract.MigrationVersion(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) OnMigration(opts *bind.CallOpts, arg0 []byte) error { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "onMigration", arg0) + + if err != nil { + return err + } + + return err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) OnMigration(arg0 []byte) error { + return _VRFCoordinator.Contract.OnMigration(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) OnMigration(arg0 []byte) error { + return _VRFCoordinator.Contract.OnMigration(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) Owner() (common.Address, error) { + return _VRFCoordinator.Contract.Owner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) Owner() (common.Address, error) { + return _VRFCoordinator.Contract.Owner(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) SCallbackConfig(opts *bind.CallOpts) (SCallbackConfig, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "s_callbackConfig") + + outstruct := new(SCallbackConfig) + if err != nil { + return *outstruct, err + } + + outstruct.MaxCallbackGasLimit = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.MaxCallbackArgumentsLength = *abi.ConvertType(out[1], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) SCallbackConfig() (SCallbackConfig, + + error) { + return _VRFCoordinator.Contract.SCallbackConfig(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) SCallbackConfig() (SCallbackConfig, + + error) { + return _VRFCoordinator.Contract.SCallbackConfig(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) SCoordinatorConfig(opts *bind.CallOpts) (SCoordinatorConfig, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "s_coordinatorConfig") + + outstruct := new(SCoordinatorConfig) + if err != nil { + return *outstruct, err + } + + outstruct.UseReasonableGasPrice = *abi.ConvertType(out[0], new(bool)).(*bool) + outstruct.ReentrancyLock = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Paused = *abi.ConvertType(out[2], new(bool)).(*bool) + outstruct.PremiumPercentage = *abi.ConvertType(out[3], new(uint8)).(*uint8) + outstruct.UnusedGasPenaltyPercent = *abi.ConvertType(out[4], new(uint8)).(*uint8) + outstruct.StalenessSeconds = *abi.ConvertType(out[5], new(uint32)).(*uint32) + outstruct.RedeemableRequestGasOverhead = *abi.ConvertType(out[6], new(uint32)).(*uint32) + outstruct.CallbackRequestGasOverhead = *abi.ConvertType(out[7], new(uint32)).(*uint32) + outstruct.ReasonableGasPriceStalenessBlocks = *abi.ConvertType(out[8], new(uint32)).(*uint32) + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[9], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) SCoordinatorConfig() (SCoordinatorConfig, + + error) { + return _VRFCoordinator.Contract.SCoordinatorConfig(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) SCoordinatorConfig() (SCoordinatorConfig, + + error) { + return _VRFCoordinator.Contract.SCoordinatorConfig(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) SPendingRequests(opts *bind.CallOpts, arg0 *big.Int) (SPendingRequests, + + error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "s_pendingRequests", arg0) + + outstruct := new(SPendingRequests) + if err != nil { + return *outstruct, err + } + + outstruct.SlotNumber = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.ConfirmationDelay = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.NumWords = *abi.ConvertType(out[2], new(uint16)).(*uint16) + outstruct.Requester = *abi.ConvertType(out[3], new(common.Address)).(*common.Address) + + return *outstruct, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) SPendingRequests(arg0 *big.Int) (SPendingRequests, + + error) { + return _VRFCoordinator.Contract.SPendingRequests(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) SPendingRequests(arg0 *big.Int) (SPendingRequests, + + error) { + return _VRFCoordinator.Contract.SPendingRequests(&_VRFCoordinator.CallOpts, arg0) +} + +func (_VRFCoordinator *VRFCoordinatorCaller) SProducer(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _VRFCoordinator.contract.Call(opts, &out, "s_producer") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_VRFCoordinator *VRFCoordinatorSession) SProducer() (common.Address, error) { + return _VRFCoordinator.Contract.SProducer(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorCallerSession) SProducer() (common.Address, error) { + return _VRFCoordinator.Contract.SProducer(&_VRFCoordinator.CallOpts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "acceptOwnership") +} + +func (_VRFCoordinator *VRFCoordinatorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinator.Contract.AcceptOwnership(&_VRFCoordinator.TransactOpts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _VRFCoordinator.Contract.AcceptOwnership(&_VRFCoordinator.TransactOpts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "acceptSubscriptionOwnerTransfer", subId) +} + +func (_VRFCoordinator *VRFCoordinatorSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinator.TransactOpts, subId) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) AcceptSubscriptionOwnerTransfer(subId *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.AcceptSubscriptionOwnerTransfer(&_VRFCoordinator.TransactOpts, subId) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "addConsumer", subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.AddConsumer(&_VRFCoordinator.TransactOpts, subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) AddConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.AddConsumer(&_VRFCoordinator.TransactOpts, subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) BatchTransferLink(opts *bind.TransactOpts, recipients []common.Address, paymentsInJuels []*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "batchTransferLink", recipients, paymentsInJuels) +} + +func (_VRFCoordinator *VRFCoordinatorSession) BatchTransferLink(recipients []common.Address, paymentsInJuels []*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.BatchTransferLink(&_VRFCoordinator.TransactOpts, recipients, paymentsInJuels) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) BatchTransferLink(recipients []common.Address, paymentsInJuels []*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.BatchTransferLink(&_VRFCoordinator.TransactOpts, recipients, paymentsInJuels) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "cancelSubscription", subId, to) +} + +func (_VRFCoordinator *VRFCoordinatorSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.CancelSubscription(&_VRFCoordinator.TransactOpts, subId, to) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) CancelSubscription(subId *big.Int, to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.CancelSubscription(&_VRFCoordinator.TransactOpts, subId, to) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "createSubscription") +} + +func (_VRFCoordinator *VRFCoordinatorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinator.Contract.CreateSubscription(&_VRFCoordinator.TransactOpts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) CreateSubscription() (*types.Transaction, error) { + return _VRFCoordinator.Contract.CreateSubscription(&_VRFCoordinator.TransactOpts) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) DeregisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "deregisterMigratableCoordinator", target) +} + +func (_VRFCoordinator *VRFCoordinatorSession) DeregisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.DeregisterMigratableCoordinator(&_VRFCoordinator.TransactOpts, target) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) DeregisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.DeregisterMigratableCoordinator(&_VRFCoordinator.TransactOpts, target) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) Migrate(opts *bind.TransactOpts, newCoordinator common.Address, encodedRequest []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "migrate", newCoordinator, encodedRequest) +} + +func (_VRFCoordinator *VRFCoordinatorSession) Migrate(newCoordinator common.Address, encodedRequest []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Migrate(&_VRFCoordinator.TransactOpts, newCoordinator, encodedRequest) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) Migrate(newCoordinator common.Address, encodedRequest []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Migrate(&_VRFCoordinator.TransactOpts, newCoordinator, encodedRequest) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "onTokenTransfer", arg0, amount, data) +} + +func (_VRFCoordinator *VRFCoordinatorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) OnTokenTransfer(arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, arg0, amount, data) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) ProcessVRFOutputs(opts *bind.TransactOpts, vrfOutputs []VRFBeaconTypesVRFOutput, juelsPerFeeCoin *big.Int, reasonableGasPrice uint64, blockHeight uint64) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "processVRFOutputs", vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) +} + +func (_VRFCoordinator *VRFCoordinatorSession) ProcessVRFOutputs(vrfOutputs []VRFBeaconTypesVRFOutput, juelsPerFeeCoin *big.Int, reasonableGasPrice uint64, blockHeight uint64) (*types.Transaction, error) { + return _VRFCoordinator.Contract.ProcessVRFOutputs(&_VRFCoordinator.TransactOpts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) ProcessVRFOutputs(vrfOutputs []VRFBeaconTypesVRFOutput, juelsPerFeeCoin *big.Int, reasonableGasPrice uint64, blockHeight uint64) (*types.Transaction, error) { + return _VRFCoordinator.Contract.ProcessVRFOutputs(&_VRFCoordinator.TransactOpts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int, arg2 []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "redeemRandomness", subID, requestID, arg2) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RedeemRandomness(subID *big.Int, requestID *big.Int, arg2 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RedeemRandomness(&_VRFCoordinator.TransactOpts, subID, requestID, arg2) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RedeemRandomness(subID *big.Int, requestID *big.Int, arg2 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RedeemRandomness(&_VRFCoordinator.TransactOpts, subID, requestID, arg2) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "registerMigratableCoordinator", target) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterMigratableCoordinator(&_VRFCoordinator.TransactOpts, target) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RegisterMigratableCoordinator(target common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterMigratableCoordinator(&_VRFCoordinator.TransactOpts, target) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "removeConsumer", subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RemoveConsumer(&_VRFCoordinator.TransactOpts, subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RemoveConsumer(subId *big.Int, consumer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RemoveConsumer(&_VRFCoordinator.TransactOpts, subId, consumer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RequestRandomness(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, arg3 []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "requestRandomness", subID, numWords, confDelay, arg3) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RequestRandomness(subID *big.Int, numWords uint16, confDelay *big.Int, arg3 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestRandomness(&_VRFCoordinator.TransactOpts, subID, numWords, confDelay, arg3) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RequestRandomness(subID *big.Int, numWords uint16, confDelay *big.Int, arg3 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestRandomness(&_VRFCoordinator.TransactOpts, subID, numWords, confDelay, arg3) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte, arg5 []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "requestRandomnessFulfillment", subID, numWords, confDelay, callbackGasLimit, arguments, arg5) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte, arg5 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestRandomnessFulfillment(&_VRFCoordinator.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RequestRandomnessFulfillment(subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte, arg5 []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestRandomnessFulfillment(&_VRFCoordinator.TransactOpts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "requestSubscriptionOwnerTransfer", subId, newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinator.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RequestSubscriptionOwnerTransfer(subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RequestSubscriptionOwnerTransfer(&_VRFCoordinator.TransactOpts, subId, newOwner) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) SetCallbackConfig(opts *bind.TransactOpts, config VRFCoordinatorCallbackConfig) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "setCallbackConfig", config) +} + +func (_VRFCoordinator *VRFCoordinatorSession) SetCallbackConfig(config VRFCoordinatorCallbackConfig) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetCallbackConfig(&_VRFCoordinator.TransactOpts, config) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) SetCallbackConfig(config VRFCoordinatorCallbackConfig) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetCallbackConfig(&_VRFCoordinator.TransactOpts, config) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) SetConfirmationDelays(opts *bind.TransactOpts, confDelays [8]*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "setConfirmationDelays", confDelays) +} + +func (_VRFCoordinator *VRFCoordinatorSession) SetConfirmationDelays(confDelays [8]*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetConfirmationDelays(&_VRFCoordinator.TransactOpts, confDelays) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) SetConfirmationDelays(confDelays [8]*big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetConfirmationDelays(&_VRFCoordinator.TransactOpts, confDelays) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) SetCoordinatorConfig(opts *bind.TransactOpts, coordinatorConfig VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "setCoordinatorConfig", coordinatorConfig) +} + +func (_VRFCoordinator *VRFCoordinatorSession) SetCoordinatorConfig(coordinatorConfig VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetCoordinatorConfig(&_VRFCoordinator.TransactOpts, coordinatorConfig) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) SetCoordinatorConfig(coordinatorConfig VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetCoordinatorConfig(&_VRFCoordinator.TransactOpts, coordinatorConfig) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) SetPauseFlag(opts *bind.TransactOpts, pause bool) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "setPauseFlag", pause) +} + +func (_VRFCoordinator *VRFCoordinatorSession) SetPauseFlag(pause bool) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetPauseFlag(&_VRFCoordinator.TransactOpts, pause) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) SetPauseFlag(pause bool) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetPauseFlag(&_VRFCoordinator.TransactOpts, pause) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) SetProducer(opts *bind.TransactOpts, producer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "setProducer", producer) +} + +func (_VRFCoordinator *VRFCoordinatorSession) SetProducer(producer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetProducer(&_VRFCoordinator.TransactOpts, producer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) SetProducer(producer common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.SetProducer(&_VRFCoordinator.TransactOpts, producer) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) TransferLink(opts *bind.TransactOpts, recipient common.Address, juelsAmount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "transferLink", recipient, juelsAmount) +} + +func (_VRFCoordinator *VRFCoordinatorSession) TransferLink(recipient common.Address, juelsAmount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferLink(&_VRFCoordinator.TransactOpts, recipient, juelsAmount) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) TransferLink(recipient common.Address, juelsAmount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferLink(&_VRFCoordinator.TransactOpts, recipient, juelsAmount) +} + +func (_VRFCoordinator *VRFCoordinatorTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "transferOwnership", to) +} + +func (_VRFCoordinator *VRFCoordinatorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferOwnership(&_VRFCoordinator.TransactOpts, to) +} + +func (_VRFCoordinator *VRFCoordinatorTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _VRFCoordinator.Contract.TransferOwnership(&_VRFCoordinator.TransactOpts, to) +} + +type VRFCoordinatorCallbackConfigSetIterator struct { + Event *VRFCoordinatorCallbackConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorCallbackConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCallbackConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCallbackConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorCallbackConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorCallbackConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorCallbackConfigSet struct { + NewConfig VRFCoordinatorCallbackConfig + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterCallbackConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorCallbackConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "CallbackConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorCallbackConfigSetIterator{contract: _VRFCoordinator.contract, event: "CallbackConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchCallbackConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCallbackConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "CallbackConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorCallbackConfigSet) + if err := _VRFCoordinator.contract.UnpackLog(event, "CallbackConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseCallbackConfigSet(log types.Log) (*VRFCoordinatorCallbackConfigSet, error) { + event := new(VRFCoordinatorCallbackConfigSet) + if err := _VRFCoordinator.contract.UnpackLog(event, "CallbackConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorCoordinatorConfigSetIterator struct { + Event *VRFCoordinatorCoordinatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorCoordinatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorCoordinatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorCoordinatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorCoordinatorConfigSet struct { + CoordinatorConfig VRFBeaconTypesCoordinatorConfig + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterCoordinatorConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorConfigSetIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "CoordinatorConfigSet") + if err != nil { + return nil, err + } + return &VRFCoordinatorCoordinatorConfigSetIterator{contract: _VRFCoordinator.contract, event: "CoordinatorConfigSet", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchCoordinatorConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorConfigSet) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "CoordinatorConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorCoordinatorConfigSet) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseCoordinatorConfigSet(log types.Log) (*VRFCoordinatorCoordinatorConfigSet, error) { + event := new(VRFCoordinatorCoordinatorConfigSet) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorCoordinatorDeregisteredIterator struct { + Event *VRFCoordinatorCoordinatorDeregistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorCoordinatorDeregisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorDeregistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorCoordinatorDeregisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorCoordinatorDeregisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorCoordinatorDeregistered struct { + CoordinatorAddress common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterCoordinatorDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorDeregisteredIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "CoordinatorDeregistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorCoordinatorDeregisteredIterator{contract: _VRFCoordinator.contract, event: "CoordinatorDeregistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchCoordinatorDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorDeregistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "CoordinatorDeregistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorCoordinatorDeregistered) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorDeregistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseCoordinatorDeregistered(log types.Log) (*VRFCoordinatorCoordinatorDeregistered, error) { + event := new(VRFCoordinatorCoordinatorDeregistered) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorDeregistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorCoordinatorRegisteredIterator struct { + Event *VRFCoordinatorCoordinatorRegistered + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorCoordinatorRegisteredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorCoordinatorRegistered) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorCoordinatorRegisteredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorCoordinatorRegisteredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorCoordinatorRegistered struct { + CoordinatorAddress common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorRegisteredIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return &VRFCoordinatorCoordinatorRegisteredIterator{contract: _VRFCoordinator.contract, event: "CoordinatorRegistered", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorRegistered) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "CoordinatorRegistered") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorCoordinatorRegistered) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorCoordinatorRegistered, error) { + event := new(VRFCoordinatorCoordinatorRegistered) + if err := _VRFCoordinator.contract.UnpackLog(event, "CoordinatorRegistered", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorMigrationCompletedIterator struct { + Event *VRFCoordinatorMigrationCompleted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorMigrationCompletedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorMigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorMigrationCompleted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorMigrationCompletedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorMigrationCompletedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorMigrationCompleted struct { + NewVersion uint8 + NewCoordinator common.Address + SubID *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterMigrationCompleted(opts *bind.FilterOpts, newVersion []uint8, subID []*big.Int) (*VRFCoordinatorMigrationCompletedIterator, error) { + + var newVersionRule []interface{} + for _, newVersionItem := range newVersion { + newVersionRule = append(newVersionRule, newVersionItem) + } + + var subIDRule []interface{} + for _, subIDItem := range subID { + subIDRule = append(subIDRule, subIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "MigrationCompleted", newVersionRule, subIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorMigrationCompletedIterator{contract: _VRFCoordinator.contract, event: "MigrationCompleted", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorMigrationCompleted, newVersion []uint8, subID []*big.Int) (event.Subscription, error) { + + var newVersionRule []interface{} + for _, newVersionItem := range newVersion { + newVersionRule = append(newVersionRule, newVersionItem) + } + + var subIDRule []interface{} + for _, subIDItem := range subID { + subIDRule = append(subIDRule, subIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "MigrationCompleted", newVersionRule, subIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorMigrationCompleted) + if err := _VRFCoordinator.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseMigrationCompleted(log types.Log) (*VRFCoordinatorMigrationCompleted, error) { + event := new(VRFCoordinatorMigrationCompleted) + if err := _VRFCoordinator.contract.UnpackLog(event, "MigrationCompleted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorOutputsServedIterator struct { + Event *VRFCoordinatorOutputsServed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorOutputsServedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOutputsServed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOutputsServed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorOutputsServedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorOutputsServedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorOutputsServed struct { + RecentBlockHeight uint64 + JuelsPerFeeCoin *big.Int + ReasonableGasPrice uint64 + OutputsServed []VRFBeaconTypesOutputServed + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterOutputsServed(opts *bind.FilterOpts) (*VRFCoordinatorOutputsServedIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "OutputsServed") + if err != nil { + return nil, err + } + return &VRFCoordinatorOutputsServedIterator{contract: _VRFCoordinator.contract, event: "OutputsServed", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOutputsServed) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "OutputsServed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorOutputsServed) + if err := _VRFCoordinator.contract.UnpackLog(event, "OutputsServed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseOutputsServed(log types.Log) (*VRFCoordinatorOutputsServed, error) { + event := new(VRFCoordinatorOutputsServed) + if err := _VRFCoordinator.contract.UnpackLog(event, "OutputsServed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorOwnershipTransferRequestedIterator struct { + Event *VRFCoordinatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorOwnershipTransferRequestedIterator{contract: _VRFCoordinator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorOwnershipTransferRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorOwnershipTransferRequested, error) { + event := new(VRFCoordinatorOwnershipTransferRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorOwnershipTransferredIterator struct { + Event *VRFCoordinatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorOwnershipTransferredIterator{contract: _VRFCoordinator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorOwnershipTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorOwnershipTransferred, error) { + event := new(VRFCoordinatorOwnershipTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorPauseFlagChangedIterator struct { + Event *VRFCoordinatorPauseFlagChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorPauseFlagChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorPauseFlagChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorPauseFlagChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorPauseFlagChangedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorPauseFlagChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorPauseFlagChanged struct { + Paused bool + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterPauseFlagChanged(opts *bind.FilterOpts) (*VRFCoordinatorPauseFlagChangedIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "PauseFlagChanged") + if err != nil { + return nil, err + } + return &VRFCoordinatorPauseFlagChangedIterator{contract: _VRFCoordinator.contract, event: "PauseFlagChanged", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchPauseFlagChanged(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorPauseFlagChanged) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "PauseFlagChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorPauseFlagChanged) + if err := _VRFCoordinator.contract.UnpackLog(event, "PauseFlagChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParsePauseFlagChanged(log types.Log) (*VRFCoordinatorPauseFlagChanged, error) { + event := new(VRFCoordinatorPauseFlagChanged) + if err := _VRFCoordinator.contract.UnpackLog(event, "PauseFlagChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomWordsFulfilledIterator struct { + Event *VRFCoordinatorRandomWordsFulfilled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomWordsFulfilledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomWordsFulfilled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomWordsFulfilledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomWordsFulfilledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomWordsFulfilled struct { + RequestIDs []*big.Int + SuccessfulFulfillment []byte + TruncatedErrorData [][]byte + SubBalances []*big.Int + SubIDs []*big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*VRFCoordinatorRandomWordsFulfilledIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomWordsFulfilled") + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomWordsFulfilledIterator{contract: _VRFCoordinator.contract, event: "RandomWordsFulfilled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomWordsFulfilled) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomWordsFulfilled") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomWordsFulfilled) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorRandomWordsFulfilled, error) { + event := new(VRFCoordinatorRandomWordsFulfilled) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomWordsFulfilled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomnessFulfillmentRequestedIterator struct { + Event *VRFCoordinatorRandomnessFulfillmentRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomnessFulfillmentRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessFulfillmentRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessFulfillmentRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomnessFulfillmentRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomnessFulfillmentRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomnessFulfillmentRequested struct { + RequestID *big.Int + Requester common.Address + NextBeaconOutputHeight uint64 + ConfDelay *big.Int + SubID *big.Int + NumWords uint16 + GasAllowance uint32 + GasPrice *big.Int + WeiPerUnitLink *big.Int + Arguments []byte + CostJuels *big.Int + NewSubBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFCoordinatorRandomnessFulfillmentRequestedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessFulfillmentRequested", requestIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessFulfillmentRequestedIterator{contract: _VRFCoordinator.contract, event: "RandomnessFulfillmentRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessFulfillmentRequested", requestIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomnessFulfillmentRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessFulfillmentRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessFulfillmentRequested(log types.Log) (*VRFCoordinatorRandomnessFulfillmentRequested, error) { + event := new(VRFCoordinatorRandomnessFulfillmentRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessFulfillmentRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomnessRedeemedIterator struct { + Event *VRFCoordinatorRandomnessRedeemed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomnessRedeemedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRedeemed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRedeemed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomnessRedeemedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomnessRedeemedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomnessRedeemed struct { + RequestID *big.Int + Requester common.Address + SubID *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*VRFCoordinatorRandomnessRedeemedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessRedeemed", requestIDRule, requesterRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessRedeemedIterator{contract: _VRFCoordinator.contract, event: "RandomnessRedeemed", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessRedeemed", requestIDRule, requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomnessRedeemed) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRedeemed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessRedeemed(log types.Log) (*VRFCoordinatorRandomnessRedeemed, error) { + event := new(VRFCoordinatorRandomnessRedeemed) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRedeemed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorRandomnessRequestedIterator struct { + Event *VRFCoordinatorRandomnessRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorRandomnessRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorRandomnessRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorRandomnessRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorRandomnessRequested struct { + RequestID *big.Int + Requester common.Address + NextBeaconOutputHeight uint64 + ConfDelay *big.Int + SubID *big.Int + NumWords uint16 + CostJuels *big.Int + NewSubBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFCoordinatorRandomnessRequestedIterator, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessRequested", requestIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessRequestedIterator{contract: _VRFCoordinator.contract, event: "RandomnessRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequested, requestID []*big.Int) (event.Subscription, error) { + + var requestIDRule []interface{} + for _, requestIDItem := range requestID { + requestIDRule = append(requestIDRule, requestIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessRequested", requestIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorRandomnessRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessRequested(log types.Log) (*VRFCoordinatorRandomnessRequested, error) { + event := new(VRFCoordinatorRandomnessRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionCanceledIterator struct { + Event *VRFCoordinatorSubscriptionCanceled + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionCanceledIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionCanceled) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionCanceledIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionCanceledIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionCanceled struct { + SubId *big.Int + To common.Address + Amount *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionCanceledIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionCanceledIterator{contract: _VRFCoordinator.contract, event: "SubscriptionCanceled", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionCanceled, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionCanceled", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionCanceled) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorSubscriptionCanceled, error) { + event := new(VRFCoordinatorSubscriptionCanceled) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionCanceled", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionConsumerAddedIterator struct { + Event *VRFCoordinatorSubscriptionConsumerAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionConsumerAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionConsumerAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionConsumerAddedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionConsumerAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionConsumerAdded struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionConsumerAddedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionConsumerAddedIterator{contract: _VRFCoordinator.contract, event: "SubscriptionConsumerAdded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionConsumerAdded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionConsumerAdded) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorSubscriptionConsumerAdded, error) { + event := new(VRFCoordinatorSubscriptionConsumerAdded) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionConsumerAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionConsumerRemovedIterator struct { + Event *VRFCoordinatorSubscriptionConsumerRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionConsumerRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionConsumerRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionConsumerRemovedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionConsumerRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionConsumerRemoved struct { + SubId *big.Int + Consumer common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionConsumerRemovedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionConsumerRemovedIterator{contract: _VRFCoordinator.contract, event: "SubscriptionConsumerRemoved", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionConsumerRemoved", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionConsumerRemoved) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorSubscriptionConsumerRemoved, error) { + event := new(VRFCoordinatorSubscriptionConsumerRemoved) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionConsumerRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionCreatedIterator struct { + Event *VRFCoordinatorSubscriptionCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionCreatedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionCreated struct { + SubId *big.Int + Owner common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int, owner []common.Address) (*VRFCoordinatorSubscriptionCreatedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionCreated", subIdRule, ownerRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionCreatedIterator{contract: _VRFCoordinator.contract, event: "SubscriptionCreated", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionCreated, subId []*big.Int, owner []common.Address) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionCreated", subIdRule, ownerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionCreated) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorSubscriptionCreated, error) { + event := new(VRFCoordinatorSubscriptionCreated) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionFundedIterator struct { + Event *VRFCoordinatorSubscriptionFunded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionFundedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionFunded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionFundedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionFundedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionFunded struct { + SubId *big.Int + OldBalance *big.Int + NewBalance *big.Int + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionFundedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionFundedIterator{contract: _VRFCoordinator.contract, event: "SubscriptionFunded", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionFunded, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionFunded", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionFunded) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorSubscriptionFunded, error) { + event := new(VRFCoordinatorSubscriptionFunded) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionFunded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionOwnerTransferRequestedIterator struct { + Event *VRFCoordinatorSubscriptionOwnerTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionOwnerTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionOwnerTransferRequested struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionOwnerTransferRequestedIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionOwnerTransferRequestedIterator{contract: _VRFCoordinator.contract, event: "SubscriptionOwnerTransferRequested", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionOwnerTransferRequested", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionOwnerTransferRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorSubscriptionOwnerTransferRequested, error) { + event := new(VRFCoordinatorSubscriptionOwnerTransferRequested) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionOwnerTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type VRFCoordinatorSubscriptionOwnerTransferredIterator struct { + Event *VRFCoordinatorSubscriptionOwnerTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorSubscriptionOwnerTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferredIterator) Error() error { + return it.fail +} + +func (it *VRFCoordinatorSubscriptionOwnerTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type VRFCoordinatorSubscriptionOwnerTransferred struct { + SubId *big.Int + From common.Address + To common.Address + Raw types.Log +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionOwnerTransferredIterator, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorSubscriptionOwnerTransferredIterator{contract: _VRFCoordinator.contract, event: "SubscriptionOwnerTransferred", logs: logs, sub: sub}, nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) { + + var subIdRule []interface{} + for _, subIdItem := range subId { + subIdRule = append(subIdRule, subIdItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "SubscriptionOwnerTransferred", subIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(VRFCoordinatorSubscriptionOwnerTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorSubscriptionOwnerTransferred, error) { + event := new(VRFCoordinatorSubscriptionOwnerTransferred) + if err := _VRFCoordinator.contract.UnpackLog(event, "SubscriptionOwnerTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetSubscription struct { + Balance *big.Int + PendingFulfillments uint64 + Owner common.Address + Consumers []common.Address +} +type SCallbackConfig struct { + MaxCallbackGasLimit uint32 + MaxCallbackArgumentsLength uint32 +} +type SCoordinatorConfig struct { + UseReasonableGasPrice bool + ReentrancyLock bool + Paused bool + PremiumPercentage uint8 + UnusedGasPenaltyPercent uint8 + StalenessSeconds uint32 + RedeemableRequestGasOverhead uint32 + CallbackRequestGasOverhead uint32 + ReasonableGasPriceStalenessBlocks uint32 + FallbackWeiPerUnitLink *big.Int +} +type SPendingRequests struct { + SlotNumber uint32 + ConfirmationDelay *big.Int + NumWords uint16 + Requester common.Address +} + +func (_VRFCoordinator *VRFCoordinator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _VRFCoordinator.abi.Events["CallbackConfigSet"].ID: + return _VRFCoordinator.ParseCallbackConfigSet(log) + case _VRFCoordinator.abi.Events["CoordinatorConfigSet"].ID: + return _VRFCoordinator.ParseCoordinatorConfigSet(log) + case _VRFCoordinator.abi.Events["CoordinatorDeregistered"].ID: + return _VRFCoordinator.ParseCoordinatorDeregistered(log) + case _VRFCoordinator.abi.Events["CoordinatorRegistered"].ID: + return _VRFCoordinator.ParseCoordinatorRegistered(log) + case _VRFCoordinator.abi.Events["MigrationCompleted"].ID: + return _VRFCoordinator.ParseMigrationCompleted(log) + case _VRFCoordinator.abi.Events["OutputsServed"].ID: + return _VRFCoordinator.ParseOutputsServed(log) + case _VRFCoordinator.abi.Events["OwnershipTransferRequested"].ID: + return _VRFCoordinator.ParseOwnershipTransferRequested(log) + case _VRFCoordinator.abi.Events["OwnershipTransferred"].ID: + return _VRFCoordinator.ParseOwnershipTransferred(log) + case _VRFCoordinator.abi.Events["PauseFlagChanged"].ID: + return _VRFCoordinator.ParsePauseFlagChanged(log) + case _VRFCoordinator.abi.Events["RandomWordsFulfilled"].ID: + return _VRFCoordinator.ParseRandomWordsFulfilled(log) + case _VRFCoordinator.abi.Events["RandomnessFulfillmentRequested"].ID: + return _VRFCoordinator.ParseRandomnessFulfillmentRequested(log) + case _VRFCoordinator.abi.Events["RandomnessRedeemed"].ID: + return _VRFCoordinator.ParseRandomnessRedeemed(log) + case _VRFCoordinator.abi.Events["RandomnessRequested"].ID: + return _VRFCoordinator.ParseRandomnessRequested(log) + case _VRFCoordinator.abi.Events["SubscriptionCanceled"].ID: + return _VRFCoordinator.ParseSubscriptionCanceled(log) + case _VRFCoordinator.abi.Events["SubscriptionConsumerAdded"].ID: + return _VRFCoordinator.ParseSubscriptionConsumerAdded(log) + case _VRFCoordinator.abi.Events["SubscriptionConsumerRemoved"].ID: + return _VRFCoordinator.ParseSubscriptionConsumerRemoved(log) + case _VRFCoordinator.abi.Events["SubscriptionCreated"].ID: + return _VRFCoordinator.ParseSubscriptionCreated(log) + case _VRFCoordinator.abi.Events["SubscriptionFunded"].ID: + return _VRFCoordinator.ParseSubscriptionFunded(log) + case _VRFCoordinator.abi.Events["SubscriptionOwnerTransferRequested"].ID: + return _VRFCoordinator.ParseSubscriptionOwnerTransferRequested(log) + case _VRFCoordinator.abi.Events["SubscriptionOwnerTransferred"].ID: + return _VRFCoordinator.ParseSubscriptionOwnerTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (VRFCoordinatorCallbackConfigSet) Topic() common.Hash { + return common.HexToHash("0x0cc54509a45ab33cd67614d4a2892c083ecf8fb43b9d29f6ea8130b9023e51df") +} + +func (VRFCoordinatorCoordinatorConfigSet) Topic() common.Hash { + return common.HexToHash("0x0028d3a46e95e67def989d41c66eb331add9809460b95b5fb4eb006157728fc5") +} + +func (VRFCoordinatorCoordinatorDeregistered) Topic() common.Hash { + return common.HexToHash("0xf80a1a97fd42251f3c33cda98635e7399253033a6774fe37cd3f650b5282af37") +} + +func (VRFCoordinatorCoordinatorRegistered) Topic() common.Hash { + return common.HexToHash("0xb7cabbfc11e66731fc77de0444614282023bcbd41d16781c753a431d0af01625") +} + +func (VRFCoordinatorMigrationCompleted) Topic() common.Hash { + return common.HexToHash("0xbd89b747474d3fc04664dfbd1d56ae7ffbe46ee097cdb9979c13916bb76269ce") +} + +func (VRFCoordinatorOutputsServed) Topic() common.Hash { + return common.HexToHash("0xf10ea936d00579b4c52035ee33bf46929646b3aa87554c565d8fb2c7aa549c44") +} + +func (VRFCoordinatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (VRFCoordinatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (VRFCoordinatorPauseFlagChanged) Topic() common.Hash { + return common.HexToHash("0x49ba7c1de2d8853088b6270e43df2118516b217f38b917dd2b80dea360860fbe") +} + +func (VRFCoordinatorRandomWordsFulfilled) Topic() common.Hash { + return common.HexToHash("0x8f79f730779e875ce76c428039cc2052b5b5918c2a55c598fab251c1198aec54") +} + +func (VRFCoordinatorRandomnessFulfillmentRequested) Topic() common.Hash { + return common.HexToHash("0x01872fb9c7d6d68af06a17347935e04412da302a377224c205e672c26e18c37f") +} + +func (VRFCoordinatorRandomnessRedeemed) Topic() common.Hash { + return common.HexToHash("0x16f3f633197fafab10a5df69e6f3f2f7f20092f08d8d47de0a91c0f4b96a1a25") +} + +func (VRFCoordinatorRandomnessRequested) Topic() common.Hash { + return common.HexToHash("0xb7933fba96b6b452eb44f99fdc08052a45dff82363d59abaff0456931c3d2459") +} + +func (VRFCoordinatorSubscriptionCanceled) Topic() common.Hash { + return common.HexToHash("0x3784f77e8e883de95b5d47cd713ced01229fa74d118c0a462224bcb0516d43f1") +} + +func (VRFCoordinatorSubscriptionConsumerAdded) Topic() common.Hash { + return common.HexToHash("0x1e980d04aa7648e205713e5e8ea3808672ac163d10936d36f91b2c88ac1575e1") +} + +func (VRFCoordinatorSubscriptionConsumerRemoved) Topic() common.Hash { + return common.HexToHash("0x32158c6058347c1601b2d12bc696ac6901d8a9a9aa3ba10c27ab0a983e8425a7") +} + +func (VRFCoordinatorSubscriptionCreated) Topic() common.Hash { + return common.HexToHash("0x1d3015d7ba850fa198dc7b1a3f5d42779313a681035f77c8c03764c61005518d") +} + +func (VRFCoordinatorSubscriptionFunded) Topic() common.Hash { + return common.HexToHash("0x1ced9348ff549fceab2ac57cd3a9de38edaaab274b725ee82c23e8fc8c4eec7a") +} + +func (VRFCoordinatorSubscriptionOwnerTransferRequested) Topic() common.Hash { + return common.HexToHash("0x21a4dad170a6bf476c31bbcf4a16628295b0e450672eec25d7c93308e05344a1") +} + +func (VRFCoordinatorSubscriptionOwnerTransferred) Topic() common.Hash { + return common.HexToHash("0xd4114ab6e9af9f597c52041f32d62dc57c5c4e4c0d4427006069635e216c9386") +} + +func (_VRFCoordinator *VRFCoordinator) Address() common.Address { + return _VRFCoordinator.address +} + +type VRFCoordinatorInterface interface { + MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) + + MAXNUMWORDS(opts *bind.CallOpts) (*big.Int, error) + + NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) + + GetCallbackMemo(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) + + GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) + + GetFee(opts *bind.CallOpts, arg0 *big.Int, arg1 []byte) (*big.Int, error) + + GetFulfillmentFee(opts *bind.CallOpts, arg0 *big.Int, callbackGasLimit uint32, arguments []byte, arg3 []byte) (*big.Int, error) + + GetSubscription(opts *bind.CallOpts, subId *big.Int) (GetSubscription, + + error) + + GetSubscriptionLinkBalance(opts *bind.CallOpts) (*big.Int, error) + + IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) + + ILink(opts *bind.CallOpts) (common.Address, error) + + MigrationVersion(opts *bind.CallOpts) (uint8, error) + + OnMigration(opts *bind.CallOpts, arg0 []byte) error + + Owner(opts *bind.CallOpts) (common.Address, error) + + SCallbackConfig(opts *bind.CallOpts) (SCallbackConfig, + + error) + + SCoordinatorConfig(opts *bind.CallOpts) (SCoordinatorConfig, + + error) + + SPendingRequests(opts *bind.CallOpts, arg0 *big.Int) (SPendingRequests, + + error) + + SProducer(opts *bind.CallOpts) (common.Address, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) + + AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + BatchTransferLink(opts *bind.TransactOpts, recipients []common.Address, paymentsInJuels []*big.Int) (*types.Transaction, error) + + CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) + + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + + DeregisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) + + Migrate(opts *bind.TransactOpts, newCoordinator common.Address, encodedRequest []byte) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + ProcessVRFOutputs(opts *bind.TransactOpts, vrfOutputs []VRFBeaconTypesVRFOutput, juelsPerFeeCoin *big.Int, reasonableGasPrice uint64, blockHeight uint64) (*types.Transaction, error) + + RedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int, arg2 []byte) (*types.Transaction, error) + + RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) + + RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) + + RequestRandomness(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, arg3 []byte) (*types.Transaction, error) + + RequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte, arg5 []byte) (*types.Transaction, error) + + RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) + + SetCallbackConfig(opts *bind.TransactOpts, config VRFCoordinatorCallbackConfig) (*types.Transaction, error) + + SetConfirmationDelays(opts *bind.TransactOpts, confDelays [8]*big.Int) (*types.Transaction, error) + + SetCoordinatorConfig(opts *bind.TransactOpts, coordinatorConfig VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error) + + SetPauseFlag(opts *bind.TransactOpts, pause bool) (*types.Transaction, error) + + SetProducer(opts *bind.TransactOpts, producer common.Address) (*types.Transaction, error) + + TransferLink(opts *bind.TransactOpts, recipient common.Address, juelsAmount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterCallbackConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorCallbackConfigSetIterator, error) + + WatchCallbackConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCallbackConfigSet) (event.Subscription, error) + + ParseCallbackConfigSet(log types.Log) (*VRFCoordinatorCallbackConfigSet, error) + + FilterCoordinatorConfigSet(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorConfigSetIterator, error) + + WatchCoordinatorConfigSet(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorConfigSet) (event.Subscription, error) + + ParseCoordinatorConfigSet(log types.Log) (*VRFCoordinatorCoordinatorConfigSet, error) + + FilterCoordinatorDeregistered(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorDeregisteredIterator, error) + + WatchCoordinatorDeregistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorDeregistered) (event.Subscription, error) + + ParseCoordinatorDeregistered(log types.Log) (*VRFCoordinatorCoordinatorDeregistered, error) + + FilterCoordinatorRegistered(opts *bind.FilterOpts) (*VRFCoordinatorCoordinatorRegisteredIterator, error) + + WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorCoordinatorRegistered) (event.Subscription, error) + + ParseCoordinatorRegistered(log types.Log) (*VRFCoordinatorCoordinatorRegistered, error) + + FilterMigrationCompleted(opts *bind.FilterOpts, newVersion []uint8, subID []*big.Int) (*VRFCoordinatorMigrationCompletedIterator, error) + + WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorMigrationCompleted, newVersion []uint8, subID []*big.Int) (event.Subscription, error) + + ParseMigrationCompleted(log types.Log) (*VRFCoordinatorMigrationCompleted, error) + + FilterOutputsServed(opts *bind.FilterOpts) (*VRFCoordinatorOutputsServedIterator, error) + + WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOutputsServed) (event.Subscription, error) + + ParseOutputsServed(log types.Log) (*VRFCoordinatorOutputsServed, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*VRFCoordinatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*VRFCoordinatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*VRFCoordinatorOwnershipTransferred, error) + + FilterPauseFlagChanged(opts *bind.FilterOpts) (*VRFCoordinatorPauseFlagChangedIterator, error) + + WatchPauseFlagChanged(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorPauseFlagChanged) (event.Subscription, error) + + ParsePauseFlagChanged(log types.Log) (*VRFCoordinatorPauseFlagChanged, error) + + FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*VRFCoordinatorRandomWordsFulfilledIterator, error) + + WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomWordsFulfilled) (event.Subscription, error) + + ParseRandomWordsFulfilled(log types.Log) (*VRFCoordinatorRandomWordsFulfilled, error) + + FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFCoordinatorRandomnessFulfillmentRequestedIterator, error) + + WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) + + ParseRandomnessFulfillmentRequested(log types.Log) (*VRFCoordinatorRandomnessFulfillmentRequested, error) + + FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*VRFCoordinatorRandomnessRedeemedIterator, error) + + WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) + + ParseRandomnessRedeemed(log types.Log) (*VRFCoordinatorRandomnessRedeemed, error) + + FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*VRFCoordinatorRandomnessRequestedIterator, error) + + WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequested, requestID []*big.Int) (event.Subscription, error) + + ParseRandomnessRequested(log types.Log) (*VRFCoordinatorRandomnessRequested, error) + + FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionCanceledIterator, error) + + WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionCanceled, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionCanceled(log types.Log) (*VRFCoordinatorSubscriptionCanceled, error) + + FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionConsumerAddedIterator, error) + + WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerAdded(log types.Log) (*VRFCoordinatorSubscriptionConsumerAdded, error) + + FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionConsumerRemovedIterator, error) + + WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionConsumerRemoved(log types.Log) (*VRFCoordinatorSubscriptionConsumerRemoved, error) + + FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int, owner []common.Address) (*VRFCoordinatorSubscriptionCreatedIterator, error) + + WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionCreated, subId []*big.Int, owner []common.Address) (event.Subscription, error) + + ParseSubscriptionCreated(log types.Log) (*VRFCoordinatorSubscriptionCreated, error) + + FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionFundedIterator, error) + + WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionFunded, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionFunded(log types.Log) (*VRFCoordinatorSubscriptionFunded, error) + + FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionOwnerTransferRequestedIterator, error) + + WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferRequested(log types.Log) (*VRFCoordinatorSubscriptionOwnerTransferRequested, error) + + FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*VRFCoordinatorSubscriptionOwnerTransferredIterator, error) + + WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorSubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) + + ParseSubscriptionOwnerTransferred(log types.Log) (*VRFCoordinatorSubscriptionOwnerTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/ocr2vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/ocr2vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..ce0b1a09 --- /dev/null +++ b/core/gethwrappers/ocr2vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,7 @@ +GETH_VERSION: 1.12.0 +dkg: ../../../contracts/solc/v0.8.19/DKG.abi ../../../contracts/solc/v0.8.19/DKG.bin 02549733c46e50ba393c2521e39d4ec55b6a5d9a66baf4406b1a515b20470425 +load_test_beacon_consumer: ../../../contracts/solc/v0.8.19/LoadTestBeaconVRFConsumer.abi ../../../contracts/solc/v0.8.19/LoadTestBeaconVRFConsumer.bin 7306576bc1db6c0a4f0a8a83dd4c08e3078afa73b72858f7d1eaa410d1128fd2 +vrf_beacon: ../../../contracts/solc/v0.8.19/VRFBeacon.abi ../../../contracts/solc/v0.8.19/VRFBeacon.bin 63107992adf02024afccbe77fdf973777548dcd4d9af1484c8449aca6de30f4c +vrf_beacon_consumer: ../../../contracts/solc/v0.8.19/BeaconVRFConsumer.abi ../../../contracts/solc/v0.8.19/BeaconVRFConsumer.bin 520f1c24e4d926a4eb6c9504506b55b79a35ae8cc65ee02d28309a7d5b735a53 +vrf_beacon_coordinator: ../../../contracts/solc/v0.8.15/VRFBeaconCoordinator.abi ../../../contracts/solc/v0.8.15/VRFBeaconCoordinator.bin 08da747a3488fcd318ddc0db75fd0df7c07a100b2e19061f0efcb12a7180ecde +vrf_coordinator: ../../../contracts/solc/v0.8.19/VRFCoordinator.abi ../../../contracts/solc/v0.8.19/VRFCoordinator.bin 295bec795ab8c1ef08b6b27a67bab7f06233660e8a2f389211e470cc2b58c5ea diff --git a/core/gethwrappers/ocr2vrf/go_generate.go b/core/gethwrappers/ocr2vrf/go_generate.go new file mode 100644 index 00000000..475bf7e8 --- /dev/null +++ b/core/gethwrappers/ocr2vrf/go_generate.go @@ -0,0 +1,10 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// OCR2VRF - remove the _disabled tag to run these locally. +//go:generate_disabled go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/DKG.abi ../../../contracts/solc/v0.8.19/DKG.bin DKG dkg +//go:generate_disabled go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/VRFCoordinator.abi ../../../contracts/solc/v0.8.19/VRFCoordinator.bin VRFCoordinator vrf_coordinator +//go:generate_disabled go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/VRFBeacon.abi ../../../contracts/solc/v0.8.19/VRFBeacon.bin VRFBeacon vrf_beacon +//go:generate_disabled go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/BeaconVRFConsumer.abi ../../../contracts/solc/v0.8.19/BeaconVRFConsumer.bin BeaconVRFConsumer vrf_beacon_consumer +//go:generate_disabled go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/LoadTestBeaconVRFConsumer.abi ../../../contracts/solc/v0.8.19/LoadTestBeaconVRFConsumer.bin LoadTestBeaconVRFConsumer load_test_beacon_consumer diff --git a/core/gethwrappers/shared/generated/burn_mint_erc677/burn_mint_erc677.go b/core/gethwrappers/shared/generated/burn_mint_erc677/burn_mint_erc677.go new file mode 100644 index 00000000..9a03d039 --- /dev/null +++ b/core/gethwrappers/shared/generated/burn_mint_erc677/burn_mint_erc677.go @@ -0,0 +1,2068 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package burn_mint_erc677 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var BurnMintERC677MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"symbol\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"decimals_\",\"type\":\"uint8\"},{\"internalType\":\"uint256\",\"name\":\"maxSupply_\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"supplyAfterMint\",\"type\":\"uint256\"}],\"name\":\"MaxSupplyExceeded\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderNotBurner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderNotMinter\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"BurnAccessGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"BurnAccessRevoked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"MintAccessGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"MintAccessRevoked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burnFrom\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseApproval\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBurners\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMinters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"grantBurnRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burnAndMinter\",\"type\":\"address\"}],\"name\":\"grantMintAndBurnRoles\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"grantMintRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseApproval\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"isBurner\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"isMinter\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"revokeBurnRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"revokeMintRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"transferAndCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c06040523480156200001157600080fd5b50604051620022dd380380620022dd833981016040819052620000349162000277565b338060008686818160036200004a838262000391565b50600462000059828262000391565b5050506001600160a01b0384169150620000bc90505760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600580546001600160a01b0319166001600160a01b0384811691909117909155811615620000ef57620000ef8162000106565b50505060ff90911660805260a052506200045d9050565b336001600160a01b03821603620001605760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000b3565b600680546001600160a01b0319166001600160a01b03838116918217909255600554604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b634e487b7160e01b600052604160045260246000fd5b600082601f830112620001da57600080fd5b81516001600160401b0380821115620001f757620001f7620001b2565b604051601f8301601f19908116603f01168101908282118183101715620002225762000222620001b2565b816040528381526020925086838588010111156200023f57600080fd5b600091505b8382101562000263578582018301518183018401529082019062000244565b600093810190920192909252949350505050565b600080600080608085870312156200028e57600080fd5b84516001600160401b0380821115620002a657600080fd5b620002b488838901620001c8565b95506020870151915080821115620002cb57600080fd5b50620002da87828801620001c8565b935050604085015160ff81168114620002f257600080fd5b6060959095015193969295505050565b600181811c908216806200031757607f821691505b6020821081036200033857634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200038c57600081815260208120601f850160051c81016020861015620003675750805b601f850160051c820191505b81811015620003885782815560010162000373565b5050505b505050565b81516001600160401b03811115620003ad57620003ad620001b2565b620003c581620003be845462000302565b846200033e565b602080601f831160018114620003fd5760008415620003e45750858301515b600019600386901b1c1916600185901b17855562000388565b600085815260208120601f198616915b828110156200042e578886015182559484019460019091019084016200040d565b50858210156200044d5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b60805160a051611e4c6200049160003960008181610447015281816108c301526108ed015260006102710152611e4c6000f3fe608060405234801561001057600080fd5b50600436106101f05760003560e01c806379cc67901161010f578063c2e3273d116100a2578063d73dd62311610071578063d73dd6231461046b578063dd62ed3e1461047e578063f2fde38b146104c4578063f81094f3146104d757600080fd5b8063c2e3273d1461040c578063c630948d1461041f578063c64d0ebc14610432578063d5abeb011461044557600080fd5b80639dc29fac116100de5780639dc29fac146103c0578063a457c2d7146103d3578063a9059cbb146103e6578063aa271e1a146103f957600080fd5b806379cc67901461037557806386fe8b43146103885780638da5cb5b1461039057806395d89b41146103b857600080fd5b806340c10f19116101875780636618846311610156578063661884631461030f5780636b32810b1461032257806370a082311461033757806379ba50971461036d57600080fd5b806340c10f19146102c157806342966c68146102d65780634334614a146102e95780634f5632f8146102fc57600080fd5b806323b872dd116101c357806323b872dd14610257578063313ce5671461026a578063395093511461029b5780634000aea0146102ae57600080fd5b806301ffc9a7146101f557806306fdde031461021d578063095ea7b31461023257806318160ddd14610245575b600080fd5b6102086102033660046119b9565b6104ea565b60405190151581526020015b60405180910390f35b61022561061b565b6040516102149190611a5f565b610208610240366004611a9b565b6106ad565b6002545b604051908152602001610214565b610208610265366004611ac5565b6106c5565b60405160ff7f0000000000000000000000000000000000000000000000000000000000000000168152602001610214565b6102086102a9366004611a9b565b6106e9565b6102086102bc366004611b30565b610735565b6102d46102cf366004611a9b565b610858565b005b6102d46102e4366004611c19565b61097f565b6102086102f7366004611c32565b6109cc565b6102d461030a366004611c32565b6109d9565b61020861031d366004611a9b565b610a35565b61032a610a48565b6040516102149190611c4d565b610249610345366004611c32565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6102d4610a59565b6102d4610383366004611a9b565b610b5a565b61032a610ba9565b60055460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610214565b610225610bb5565b6102d46103ce366004611a9b565b610bc4565b6102086103e1366004611a9b565b610bce565b6102086103f4366004611a9b565b610c9f565b610208610407366004611c32565b610cad565b6102d461041a366004611c32565b610cba565b6102d461042d366004611c32565b610d16565b6102d4610440366004611c32565b610d24565b7f0000000000000000000000000000000000000000000000000000000000000000610249565b6102d4610479366004611a9b565b610d80565b61024961048c366004611ca7565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b6102d46104d2366004611c32565b610d8a565b6102d46104e5366004611c32565b610d9b565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f36372b0700000000000000000000000000000000000000000000000000000000148061057d57507fffffffff0000000000000000000000000000000000000000000000000000000082167f4000aea000000000000000000000000000000000000000000000000000000000145b806105c957507fffffffff0000000000000000000000000000000000000000000000000000000082167fe6599b4d00000000000000000000000000000000000000000000000000000000145b8061061557507fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a700000000000000000000000000000000000000000000000000000000145b92915050565b60606003805461062a90611cda565b80601f016020809104026020016040519081016040528092919081815260200182805461065690611cda565b80156106a35780601f10610678576101008083540402835291602001916106a3565b820191906000526020600020905b81548152906001019060200180831161068657829003601f168201915b5050505050905090565b6000336106bb818585610df7565b5060019392505050565b6000336106d3858285610e2b565b6106de858585610efc565b506001949350505050565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549091906106bb9082908690610730908790611d5c565b610df7565b60006107418484610c9f565b508373ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c1685856040516107a1929190611d6f565b60405180910390a373ffffffffffffffffffffffffffffffffffffffff84163b156106bb576040517fa4c0ed3600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85169063a4c0ed369061081c90339087908790600401611d90565b600060405180830381600087803b15801561083657600080fd5b505af115801561084a573d6000803e3d6000fd5b505050505060019392505050565b61086133610cad565b61089e576040517fe2c8c9d50000000000000000000000000000000000000000000000000000000081523360048201526024015b60405180910390fd5b813073ffffffffffffffffffffffffffffffffffffffff8216036108c157600080fd5b7f00000000000000000000000000000000000000000000000000000000000000001580159061092257507f00000000000000000000000000000000000000000000000000000000000000008261091660025490565b6109209190611d5c565b115b15610970578161093160025490565b61093b9190611d5c565b6040517fcbbf111300000000000000000000000000000000000000000000000000000000815260040161089591815260200190565b61097a8383610f2a565b505050565b610988336109cc565b6109c0576040517fc820b10b000000000000000000000000000000000000000000000000000000008152336004820152602401610895565b6109c98161101d565b50565b6000610615600983611027565b6109e1611056565b6109ec6009826110d9565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907f0a675452746933cefe3d74182e78db7afe57ba60eaa4234b5d85e9aa41b0610c90600090a250565b6000610a418383610bce565b9392505050565b6060610a5460076110fb565b905090565b60065473ffffffffffffffffffffffffffffffffffffffff163314610ada576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610895565b600580547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560068054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b610b63336109cc565b610b9b576040517fc820b10b000000000000000000000000000000000000000000000000000000008152336004820152602401610895565b610ba58282611108565b5050565b6060610a5460096110fb565b60606004805461062a90611cda565b610ba58282610b5a565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919083811015610c92576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f0000000000000000000000000000000000000000000000000000006064820152608401610895565b6106de8286868403610df7565b6000336106bb818585610efc565b6000610615600783611027565b610cc2611056565b610ccd60078261111d565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907fe46fef8bbff1389d9010703cf8ebb363fb3daf5bf56edc27080b67bc8d9251ea90600090a250565b610d1f81610cba565b6109c9815b610d2c611056565b610d3760098261111d565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907f92308bb7573b2a3d17ddb868b39d8ebec433f3194421abc22d084f89658c9bad90600090a250565b61097a82826106e9565b610d92611056565b6109c98161113f565b610da3611056565b610dae6007826110d9565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907fed998b960f6340d045f620c119730f7aa7995e7425c2401d3a5b64ff998a59e990600090a250565b813073ffffffffffffffffffffffffffffffffffffffff821603610e1a57600080fd5b610e25848484611235565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610e255781811015610eef576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610895565b610e258484848403610df7565b813073ffffffffffffffffffffffffffffffffffffffff821603610f1f57600080fd5b610e258484846113e8565b73ffffffffffffffffffffffffffffffffffffffff8216610fa7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610895565b8060026000828254610fb99190611d5c565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b6109c93382611657565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001830160205260408120541515610a41565b60055473ffffffffffffffffffffffffffffffffffffffff1633146110d7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610895565b565b6000610a418373ffffffffffffffffffffffffffffffffffffffff841661181b565b60606000610a418361190e565b611113823383610e2b565b610ba58282611657565b6000610a418373ffffffffffffffffffffffffffffffffffffffff841661196a565b3373ffffffffffffffffffffffffffffffffffffffff8216036111be576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610895565b600680547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600554604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b73ffffffffffffffffffffffffffffffffffffffff83166112d7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f72657373000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff821661137a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f73730000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff831661148b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f64726573730000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff821661152e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f65737300000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff8316600090815260208190526040902054818110156115e4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e636500000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610e25565b73ffffffffffffffffffffffffffffffffffffffff82166116fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f73000000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260208190526040902054818110156117b0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f63650000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff83166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3505050565b6000818152600183016020526040812054801561190457600061183f600183611dce565b855490915060009061185390600190611dce565b90508181146118b857600086600001828154811061187357611873611de1565b906000526020600020015490508087600001848154811061189657611896611de1565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806118c9576118c9611e10565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050610615565b6000915050610615565b60608160000180548060200260200160405190810160405280929190818152602001828054801561195e57602002820191906000526020600020905b81548152602001906001019080831161194a575b50505050509050919050565b60008181526001830160205260408120546119b157508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610615565b506000610615565b6000602082840312156119cb57600080fd5b81357fffffffff0000000000000000000000000000000000000000000000000000000081168114610a4157600080fd5b6000815180845260005b81811015611a2157602081850181015186830182015201611a05565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000610a4160208301846119fb565b803573ffffffffffffffffffffffffffffffffffffffff81168114611a9657600080fd5b919050565b60008060408385031215611aae57600080fd5b611ab783611a72565b946020939093013593505050565b600080600060608486031215611ada57600080fd5b611ae384611a72565b9250611af160208501611a72565b9150604084013590509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080600060608486031215611b4557600080fd5b611b4e84611a72565b925060208401359150604084013567ffffffffffffffff80821115611b7257600080fd5b818601915086601f830112611b8657600080fd5b813581811115611b9857611b98611b01565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715611bde57611bde611b01565b81604052828152896020848701011115611bf757600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b600060208284031215611c2b57600080fd5b5035919050565b600060208284031215611c4457600080fd5b610a4182611a72565b6020808252825182820181905260009190848201906040850190845b81811015611c9b57835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101611c69565b50909695505050505050565b60008060408385031215611cba57600080fd5b611cc383611a72565b9150611cd160208401611a72565b90509250929050565b600181811c90821680611cee57607f821691505b602082108103611d27577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082018082111561061557610615611d2d565b828152604060208201526000611d8860408301846119fb565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff84168152826020820152606060408201526000611dc560608301846119fb565b95945050505050565b8181038181111561061557610615611d2d565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000813000a", +} + +var BurnMintERC677ABI = BurnMintERC677MetaData.ABI + +var BurnMintERC677Bin = BurnMintERC677MetaData.Bin + +func DeployBurnMintERC677(auth *bind.TransactOpts, backend bind.ContractBackend, name string, symbol string, decimals_ uint8, maxSupply_ *big.Int) (common.Address, *types.Transaction, *BurnMintERC677, error) { + parsed, err := BurnMintERC677MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(BurnMintERC677Bin), backend, name, symbol, decimals_, maxSupply_) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &BurnMintERC677{address: address, abi: *parsed, BurnMintERC677Caller: BurnMintERC677Caller{contract: contract}, BurnMintERC677Transactor: BurnMintERC677Transactor{contract: contract}, BurnMintERC677Filterer: BurnMintERC677Filterer{contract: contract}}, nil +} + +type BurnMintERC677 struct { + address common.Address + abi abi.ABI + BurnMintERC677Caller + BurnMintERC677Transactor + BurnMintERC677Filterer +} + +type BurnMintERC677Caller struct { + contract *bind.BoundContract +} + +type BurnMintERC677Transactor struct { + contract *bind.BoundContract +} + +type BurnMintERC677Filterer struct { + contract *bind.BoundContract +} + +type BurnMintERC677Session struct { + Contract *BurnMintERC677 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type BurnMintERC677CallerSession struct { + Contract *BurnMintERC677Caller + CallOpts bind.CallOpts +} + +type BurnMintERC677TransactorSession struct { + Contract *BurnMintERC677Transactor + TransactOpts bind.TransactOpts +} + +type BurnMintERC677Raw struct { + Contract *BurnMintERC677 +} + +type BurnMintERC677CallerRaw struct { + Contract *BurnMintERC677Caller +} + +type BurnMintERC677TransactorRaw struct { + Contract *BurnMintERC677Transactor +} + +func NewBurnMintERC677(address common.Address, backend bind.ContractBackend) (*BurnMintERC677, error) { + abi, err := abi.JSON(strings.NewReader(BurnMintERC677ABI)) + if err != nil { + return nil, err + } + contract, err := bindBurnMintERC677(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &BurnMintERC677{address: address, abi: abi, BurnMintERC677Caller: BurnMintERC677Caller{contract: contract}, BurnMintERC677Transactor: BurnMintERC677Transactor{contract: contract}, BurnMintERC677Filterer: BurnMintERC677Filterer{contract: contract}}, nil +} + +func NewBurnMintERC677Caller(address common.Address, caller bind.ContractCaller) (*BurnMintERC677Caller, error) { + contract, err := bindBurnMintERC677(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &BurnMintERC677Caller{contract: contract}, nil +} + +func NewBurnMintERC677Transactor(address common.Address, transactor bind.ContractTransactor) (*BurnMintERC677Transactor, error) { + contract, err := bindBurnMintERC677(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &BurnMintERC677Transactor{contract: contract}, nil +} + +func NewBurnMintERC677Filterer(address common.Address, filterer bind.ContractFilterer) (*BurnMintERC677Filterer, error) { + contract, err := bindBurnMintERC677(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &BurnMintERC677Filterer{contract: contract}, nil +} + +func bindBurnMintERC677(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := BurnMintERC677MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_BurnMintERC677 *BurnMintERC677Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BurnMintERC677.Contract.BurnMintERC677Caller.contract.Call(opts, result, method, params...) +} + +func (_BurnMintERC677 *BurnMintERC677Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BurnMintERC677.Contract.BurnMintERC677Transactor.contract.Transfer(opts) +} + +func (_BurnMintERC677 *BurnMintERC677Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BurnMintERC677.Contract.BurnMintERC677Transactor.contract.Transact(opts, method, params...) +} + +func (_BurnMintERC677 *BurnMintERC677CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _BurnMintERC677.Contract.contract.Call(opts, result, method, params...) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BurnMintERC677.Contract.contract.Transfer(opts) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _BurnMintERC677.Contract.contract.Transact(opts, method, params...) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "allowance", owner, spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _BurnMintERC677.Contract.Allowance(&_BurnMintERC677.CallOpts, owner, spender) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _BurnMintERC677.Contract.Allowance(&_BurnMintERC677.CallOpts, owner, spender) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "balanceOf", account) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) BalanceOf(account common.Address) (*big.Int, error) { + return _BurnMintERC677.Contract.BalanceOf(&_BurnMintERC677.CallOpts, account) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) BalanceOf(account common.Address) (*big.Int, error) { + return _BurnMintERC677.Contract.BalanceOf(&_BurnMintERC677.CallOpts, account) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) Decimals() (uint8, error) { + return _BurnMintERC677.Contract.Decimals(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) Decimals() (uint8, error) { + return _BurnMintERC677.Contract.Decimals(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) GetBurners(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "getBurners") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) GetBurners() ([]common.Address, error) { + return _BurnMintERC677.Contract.GetBurners(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) GetBurners() ([]common.Address, error) { + return _BurnMintERC677.Contract.GetBurners(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) GetMinters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "getMinters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) GetMinters() ([]common.Address, error) { + return _BurnMintERC677.Contract.GetMinters(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) GetMinters() ([]common.Address, error) { + return _BurnMintERC677.Contract.GetMinters(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) IsBurner(opts *bind.CallOpts, burner common.Address) (bool, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "isBurner", burner) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) IsBurner(burner common.Address) (bool, error) { + return _BurnMintERC677.Contract.IsBurner(&_BurnMintERC677.CallOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) IsBurner(burner common.Address) (bool, error) { + return _BurnMintERC677.Contract.IsBurner(&_BurnMintERC677.CallOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) IsMinter(opts *bind.CallOpts, minter common.Address) (bool, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "isMinter", minter) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) IsMinter(minter common.Address) (bool, error) { + return _BurnMintERC677.Contract.IsMinter(&_BurnMintERC677.CallOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) IsMinter(minter common.Address) (bool, error) { + return _BurnMintERC677.Contract.IsMinter(&_BurnMintERC677.CallOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) MaxSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "maxSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) MaxSupply() (*big.Int, error) { + return _BurnMintERC677.Contract.MaxSupply(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) MaxSupply() (*big.Int, error) { + return _BurnMintERC677.Contract.MaxSupply(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) Name() (string, error) { + return _BurnMintERC677.Contract.Name(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) Name() (string, error) { + return _BurnMintERC677.Contract.Name(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) Owner() (common.Address, error) { + return _BurnMintERC677.Contract.Owner(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) Owner() (common.Address, error) { + return _BurnMintERC677.Contract.Owner(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _BurnMintERC677.Contract.SupportsInterface(&_BurnMintERC677.CallOpts, interfaceId) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _BurnMintERC677.Contract.SupportsInterface(&_BurnMintERC677.CallOpts, interfaceId) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) Symbol() (string, error) { + return _BurnMintERC677.Contract.Symbol(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) Symbol() (string, error) { + return _BurnMintERC677.Contract.Symbol(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Caller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _BurnMintERC677.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_BurnMintERC677 *BurnMintERC677Session) TotalSupply() (*big.Int, error) { + return _BurnMintERC677.Contract.TotalSupply(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677CallerSession) TotalSupply() (*big.Int, error) { + return _BurnMintERC677.Contract.TotalSupply(&_BurnMintERC677.CallOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "acceptOwnership") +} + +func (_BurnMintERC677 *BurnMintERC677Session) AcceptOwnership() (*types.Transaction, error) { + return _BurnMintERC677.Contract.AcceptOwnership(&_BurnMintERC677.TransactOpts) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _BurnMintERC677.Contract.AcceptOwnership(&_BurnMintERC677.TransactOpts) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "approve", spender, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Approve(&_BurnMintERC677.TransactOpts, spender, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Approve(&_BurnMintERC677.TransactOpts, spender, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) Burn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "burn", amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) Burn(amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Burn(&_BurnMintERC677.TransactOpts, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) Burn(amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Burn(&_BurnMintERC677.TransactOpts, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) Burn0(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "burn0", account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) Burn0(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Burn0(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) Burn0(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Burn0(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) BurnFrom(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "burnFrom", account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) BurnFrom(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.BurnFrom(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) BurnFrom(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.BurnFrom(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "decreaseAllowance", spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Session) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.DecreaseAllowance(&_BurnMintERC677.TransactOpts, spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.DecreaseAllowance(&_BurnMintERC677.TransactOpts, spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) DecreaseApproval(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "decreaseApproval", spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Session) DecreaseApproval(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.DecreaseApproval(&_BurnMintERC677.TransactOpts, spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) DecreaseApproval(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.DecreaseApproval(&_BurnMintERC677.TransactOpts, spender, subtractedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) GrantBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "grantBurnRole", burner) +} + +func (_BurnMintERC677 *BurnMintERC677Session) GrantBurnRole(burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantBurnRole(&_BurnMintERC677.TransactOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) GrantBurnRole(burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantBurnRole(&_BurnMintERC677.TransactOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) GrantMintAndBurnRoles(opts *bind.TransactOpts, burnAndMinter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "grantMintAndBurnRoles", burnAndMinter) +} + +func (_BurnMintERC677 *BurnMintERC677Session) GrantMintAndBurnRoles(burnAndMinter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantMintAndBurnRoles(&_BurnMintERC677.TransactOpts, burnAndMinter) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) GrantMintAndBurnRoles(burnAndMinter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantMintAndBurnRoles(&_BurnMintERC677.TransactOpts, burnAndMinter) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) GrantMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "grantMintRole", minter) +} + +func (_BurnMintERC677 *BurnMintERC677Session) GrantMintRole(minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantMintRole(&_BurnMintERC677.TransactOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) GrantMintRole(minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.GrantMintRole(&_BurnMintERC677.TransactOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "increaseAllowance", spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Session) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.IncreaseAllowance(&_BurnMintERC677.TransactOpts, spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.IncreaseAllowance(&_BurnMintERC677.TransactOpts, spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) IncreaseApproval(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "increaseApproval", spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Session) IncreaseApproval(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.IncreaseApproval(&_BurnMintERC677.TransactOpts, spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) IncreaseApproval(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.IncreaseApproval(&_BurnMintERC677.TransactOpts, spender, addedValue) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "mint", account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Mint(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Mint(&_BurnMintERC677.TransactOpts, account, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) RevokeBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "revokeBurnRole", burner) +} + +func (_BurnMintERC677 *BurnMintERC677Session) RevokeBurnRole(burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.RevokeBurnRole(&_BurnMintERC677.TransactOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) RevokeBurnRole(burner common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.RevokeBurnRole(&_BurnMintERC677.TransactOpts, burner) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) RevokeMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "revokeMintRole", minter) +} + +func (_BurnMintERC677 *BurnMintERC677Session) RevokeMintRole(minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.RevokeMintRole(&_BurnMintERC677.TransactOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) RevokeMintRole(minter common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.RevokeMintRole(&_BurnMintERC677.TransactOpts, minter) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "transfer", to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Transfer(&_BurnMintERC677.TransactOpts, to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.Transfer(&_BurnMintERC677.TransactOpts, to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) TransferAndCall(opts *bind.TransactOpts, to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "transferAndCall", to, amount, data) +} + +func (_BurnMintERC677 *BurnMintERC677Session) TransferAndCall(to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferAndCall(&_BurnMintERC677.TransactOpts, to, amount, data) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) TransferAndCall(to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferAndCall(&_BurnMintERC677.TransactOpts, to, amount, data) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "transferFrom", from, to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Session) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferFrom(&_BurnMintERC677.TransactOpts, from, to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferFrom(&_BurnMintERC677.TransactOpts, from, to, amount) +} + +func (_BurnMintERC677 *BurnMintERC677Transactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _BurnMintERC677.contract.Transact(opts, "transferOwnership", to) +} + +func (_BurnMintERC677 *BurnMintERC677Session) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferOwnership(&_BurnMintERC677.TransactOpts, to) +} + +func (_BurnMintERC677 *BurnMintERC677TransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _BurnMintERC677.Contract.TransferOwnership(&_BurnMintERC677.TransactOpts, to) +} + +type BurnMintERC677ApprovalIterator struct { + Event *BurnMintERC677Approval + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677ApprovalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Approval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Approval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677ApprovalIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677ApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677Approval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*BurnMintERC677ApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &BurnMintERC677ApprovalIterator{contract: _BurnMintERC677.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Approval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677Approval) + if err := _BurnMintERC677.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseApproval(log types.Log) (*BurnMintERC677Approval, error) { + event := new(BurnMintERC677Approval) + if err := _BurnMintERC677.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677BurnAccessGrantedIterator struct { + Event *BurnMintERC677BurnAccessGranted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677BurnAccessGrantedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677BurnAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677BurnAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677BurnAccessGrantedIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677BurnAccessGrantedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677BurnAccessGranted struct { + Burner common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterBurnAccessGranted(opts *bind.FilterOpts, burner []common.Address) (*BurnMintERC677BurnAccessGrantedIterator, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "BurnAccessGranted", burnerRule) + if err != nil { + return nil, err + } + return &BurnMintERC677BurnAccessGrantedIterator{contract: _BurnMintERC677.contract, event: "BurnAccessGranted", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchBurnAccessGranted(opts *bind.WatchOpts, sink chan<- *BurnMintERC677BurnAccessGranted, burner []common.Address) (event.Subscription, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "BurnAccessGranted", burnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677BurnAccessGranted) + if err := _BurnMintERC677.contract.UnpackLog(event, "BurnAccessGranted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseBurnAccessGranted(log types.Log) (*BurnMintERC677BurnAccessGranted, error) { + event := new(BurnMintERC677BurnAccessGranted) + if err := _BurnMintERC677.contract.UnpackLog(event, "BurnAccessGranted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677BurnAccessRevokedIterator struct { + Event *BurnMintERC677BurnAccessRevoked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677BurnAccessRevokedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677BurnAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677BurnAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677BurnAccessRevokedIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677BurnAccessRevokedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677BurnAccessRevoked struct { + Burner common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterBurnAccessRevoked(opts *bind.FilterOpts, burner []common.Address) (*BurnMintERC677BurnAccessRevokedIterator, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "BurnAccessRevoked", burnerRule) + if err != nil { + return nil, err + } + return &BurnMintERC677BurnAccessRevokedIterator{contract: _BurnMintERC677.contract, event: "BurnAccessRevoked", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchBurnAccessRevoked(opts *bind.WatchOpts, sink chan<- *BurnMintERC677BurnAccessRevoked, burner []common.Address) (event.Subscription, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "BurnAccessRevoked", burnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677BurnAccessRevoked) + if err := _BurnMintERC677.contract.UnpackLog(event, "BurnAccessRevoked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseBurnAccessRevoked(log types.Log) (*BurnMintERC677BurnAccessRevoked, error) { + event := new(BurnMintERC677BurnAccessRevoked) + if err := _BurnMintERC677.contract.UnpackLog(event, "BurnAccessRevoked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677MintAccessGrantedIterator struct { + Event *BurnMintERC677MintAccessGranted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677MintAccessGrantedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677MintAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677MintAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677MintAccessGrantedIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677MintAccessGrantedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677MintAccessGranted struct { + Minter common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterMintAccessGranted(opts *bind.FilterOpts, minter []common.Address) (*BurnMintERC677MintAccessGrantedIterator, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "MintAccessGranted", minterRule) + if err != nil { + return nil, err + } + return &BurnMintERC677MintAccessGrantedIterator{contract: _BurnMintERC677.contract, event: "MintAccessGranted", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchMintAccessGranted(opts *bind.WatchOpts, sink chan<- *BurnMintERC677MintAccessGranted, minter []common.Address) (event.Subscription, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "MintAccessGranted", minterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677MintAccessGranted) + if err := _BurnMintERC677.contract.UnpackLog(event, "MintAccessGranted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseMintAccessGranted(log types.Log) (*BurnMintERC677MintAccessGranted, error) { + event := new(BurnMintERC677MintAccessGranted) + if err := _BurnMintERC677.contract.UnpackLog(event, "MintAccessGranted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677MintAccessRevokedIterator struct { + Event *BurnMintERC677MintAccessRevoked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677MintAccessRevokedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677MintAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677MintAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677MintAccessRevokedIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677MintAccessRevokedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677MintAccessRevoked struct { + Minter common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterMintAccessRevoked(opts *bind.FilterOpts, minter []common.Address) (*BurnMintERC677MintAccessRevokedIterator, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "MintAccessRevoked", minterRule) + if err != nil { + return nil, err + } + return &BurnMintERC677MintAccessRevokedIterator{contract: _BurnMintERC677.contract, event: "MintAccessRevoked", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchMintAccessRevoked(opts *bind.WatchOpts, sink chan<- *BurnMintERC677MintAccessRevoked, minter []common.Address) (event.Subscription, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "MintAccessRevoked", minterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677MintAccessRevoked) + if err := _BurnMintERC677.contract.UnpackLog(event, "MintAccessRevoked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseMintAccessRevoked(log types.Log) (*BurnMintERC677MintAccessRevoked, error) { + event := new(BurnMintERC677MintAccessRevoked) + if err := _BurnMintERC677.contract.UnpackLog(event, "MintAccessRevoked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677OwnershipTransferRequestedIterator struct { + Event *BurnMintERC677OwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677OwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677OwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677OwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677OwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677OwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677OwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &BurnMintERC677OwnershipTransferRequestedIterator{contract: _BurnMintERC677.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *BurnMintERC677OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677OwnershipTransferRequested) + if err := _BurnMintERC677.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseOwnershipTransferRequested(log types.Log) (*BurnMintERC677OwnershipTransferRequested, error) { + event := new(BurnMintERC677OwnershipTransferRequested) + if err := _BurnMintERC677.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677OwnershipTransferredIterator struct { + Event *BurnMintERC677OwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677OwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677OwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677OwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677OwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677OwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677OwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &BurnMintERC677OwnershipTransferredIterator{contract: _BurnMintERC677.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *BurnMintERC677OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677OwnershipTransferred) + if err := _BurnMintERC677.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseOwnershipTransferred(log types.Log) (*BurnMintERC677OwnershipTransferred, error) { + event := new(BurnMintERC677OwnershipTransferred) + if err := _BurnMintERC677.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677TransferIterator struct { + Event *BurnMintERC677Transfer + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677TransferIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Transfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Transfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677TransferIterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677TransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677Transfer struct { + From common.Address + To common.Address + Value *big.Int + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677TransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &BurnMintERC677TransferIterator{contract: _BurnMintERC677.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Transfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677Transfer) + if err := _BurnMintERC677.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseTransfer(log types.Log) (*BurnMintERC677Transfer, error) { + event := new(BurnMintERC677Transfer) + if err := _BurnMintERC677.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type BurnMintERC677Transfer0Iterator struct { + Event *BurnMintERC677Transfer0 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *BurnMintERC677Transfer0Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Transfer0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(BurnMintERC677Transfer0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *BurnMintERC677Transfer0Iterator) Error() error { + return it.fail +} + +func (it *BurnMintERC677Transfer0Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type BurnMintERC677Transfer0 struct { + From common.Address + To common.Address + Value *big.Int + Data []byte + Raw types.Log +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) FilterTransfer0(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677Transfer0Iterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.FilterLogs(opts, "Transfer0", fromRule, toRule) + if err != nil { + return nil, err + } + return &BurnMintERC677Transfer0Iterator{contract: _BurnMintERC677.contract, event: "Transfer0", logs: logs, sub: sub}, nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) WatchTransfer0(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Transfer0, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _BurnMintERC677.contract.WatchLogs(opts, "Transfer0", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(BurnMintERC677Transfer0) + if err := _BurnMintERC677.contract.UnpackLog(event, "Transfer0", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_BurnMintERC677 *BurnMintERC677Filterer) ParseTransfer0(log types.Log) (*BurnMintERC677Transfer0, error) { + event := new(BurnMintERC677Transfer0) + if err := _BurnMintERC677.contract.UnpackLog(event, "Transfer0", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_BurnMintERC677 *BurnMintERC677) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _BurnMintERC677.abi.Events["Approval"].ID: + return _BurnMintERC677.ParseApproval(log) + case _BurnMintERC677.abi.Events["BurnAccessGranted"].ID: + return _BurnMintERC677.ParseBurnAccessGranted(log) + case _BurnMintERC677.abi.Events["BurnAccessRevoked"].ID: + return _BurnMintERC677.ParseBurnAccessRevoked(log) + case _BurnMintERC677.abi.Events["MintAccessGranted"].ID: + return _BurnMintERC677.ParseMintAccessGranted(log) + case _BurnMintERC677.abi.Events["MintAccessRevoked"].ID: + return _BurnMintERC677.ParseMintAccessRevoked(log) + case _BurnMintERC677.abi.Events["OwnershipTransferRequested"].ID: + return _BurnMintERC677.ParseOwnershipTransferRequested(log) + case _BurnMintERC677.abi.Events["OwnershipTransferred"].ID: + return _BurnMintERC677.ParseOwnershipTransferred(log) + case _BurnMintERC677.abi.Events["Transfer"].ID: + return _BurnMintERC677.ParseTransfer(log) + case _BurnMintERC677.abi.Events["Transfer0"].ID: + return _BurnMintERC677.ParseTransfer0(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (BurnMintERC677Approval) Topic() common.Hash { + return common.HexToHash("0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925") +} + +func (BurnMintERC677BurnAccessGranted) Topic() common.Hash { + return common.HexToHash("0x92308bb7573b2a3d17ddb868b39d8ebec433f3194421abc22d084f89658c9bad") +} + +func (BurnMintERC677BurnAccessRevoked) Topic() common.Hash { + return common.HexToHash("0x0a675452746933cefe3d74182e78db7afe57ba60eaa4234b5d85e9aa41b0610c") +} + +func (BurnMintERC677MintAccessGranted) Topic() common.Hash { + return common.HexToHash("0xe46fef8bbff1389d9010703cf8ebb363fb3daf5bf56edc27080b67bc8d9251ea") +} + +func (BurnMintERC677MintAccessRevoked) Topic() common.Hash { + return common.HexToHash("0xed998b960f6340d045f620c119730f7aa7995e7425c2401d3a5b64ff998a59e9") +} + +func (BurnMintERC677OwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (BurnMintERC677OwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (BurnMintERC677Transfer) Topic() common.Hash { + return common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") +} + +func (BurnMintERC677Transfer0) Topic() common.Hash { + return common.HexToHash("0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16") +} + +func (_BurnMintERC677 *BurnMintERC677) Address() common.Address { + return _BurnMintERC677.address +} + +type BurnMintERC677Interface interface { + Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) + + BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + GetBurners(opts *bind.CallOpts) ([]common.Address, error) + + GetMinters(opts *bind.CallOpts) ([]common.Address, error) + + IsBurner(opts *bind.CallOpts, burner common.Address) (bool, error) + + IsMinter(opts *bind.CallOpts, minter common.Address) (bool, error) + + MaxSupply(opts *bind.CallOpts) (*big.Int, error) + + Name(opts *bind.CallOpts) (string, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + Symbol(opts *bind.CallOpts) (string, error) + + TotalSupply(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) + + Burn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Burn0(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + BurnFrom(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + DecreaseApproval(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + GrantBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) + + GrantMintAndBurnRoles(opts *bind.TransactOpts, burnAndMinter common.Address) (*types.Transaction, error) + + GrantMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) + + IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + IncreaseApproval(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + RevokeBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) + + RevokeMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) + + Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferAndCall(opts *bind.TransactOpts, to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*BurnMintERC677ApprovalIterator, error) + + WatchApproval(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Approval, owner []common.Address, spender []common.Address) (event.Subscription, error) + + ParseApproval(log types.Log) (*BurnMintERC677Approval, error) + + FilterBurnAccessGranted(opts *bind.FilterOpts, burner []common.Address) (*BurnMintERC677BurnAccessGrantedIterator, error) + + WatchBurnAccessGranted(opts *bind.WatchOpts, sink chan<- *BurnMintERC677BurnAccessGranted, burner []common.Address) (event.Subscription, error) + + ParseBurnAccessGranted(log types.Log) (*BurnMintERC677BurnAccessGranted, error) + + FilterBurnAccessRevoked(opts *bind.FilterOpts, burner []common.Address) (*BurnMintERC677BurnAccessRevokedIterator, error) + + WatchBurnAccessRevoked(opts *bind.WatchOpts, sink chan<- *BurnMintERC677BurnAccessRevoked, burner []common.Address) (event.Subscription, error) + + ParseBurnAccessRevoked(log types.Log) (*BurnMintERC677BurnAccessRevoked, error) + + FilterMintAccessGranted(opts *bind.FilterOpts, minter []common.Address) (*BurnMintERC677MintAccessGrantedIterator, error) + + WatchMintAccessGranted(opts *bind.WatchOpts, sink chan<- *BurnMintERC677MintAccessGranted, minter []common.Address) (event.Subscription, error) + + ParseMintAccessGranted(log types.Log) (*BurnMintERC677MintAccessGranted, error) + + FilterMintAccessRevoked(opts *bind.FilterOpts, minter []common.Address) (*BurnMintERC677MintAccessRevokedIterator, error) + + WatchMintAccessRevoked(opts *bind.WatchOpts, sink chan<- *BurnMintERC677MintAccessRevoked, minter []common.Address) (event.Subscription, error) + + ParseMintAccessRevoked(log types.Log) (*BurnMintERC677MintAccessRevoked, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677OwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *BurnMintERC677OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*BurnMintERC677OwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677OwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *BurnMintERC677OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*BurnMintERC677OwnershipTransferred, error) + + FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677TransferIterator, error) + + WatchTransfer(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Transfer, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer(log types.Log) (*BurnMintERC677Transfer, error) + + FilterTransfer0(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*BurnMintERC677Transfer0Iterator, error) + + WatchTransfer0(opts *bind.WatchOpts, sink chan<- *BurnMintERC677Transfer0, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer0(log types.Log) (*BurnMintERC677Transfer0, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/shared/generated/erc20/erc20.go b/core/gethwrappers/shared/generated/erc20/erc20.go new file mode 100644 index 00000000..5a009a9e --- /dev/null +++ b/core/gethwrappers/shared/generated/erc20/erc20.go @@ -0,0 +1,702 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package erc20 + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var ERC20MetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"name_\",\"type\":\"string\"},{\"internalType\":\"string\",\"name\":\"symbol_\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60806040523480156200001157600080fd5b5060405162000de638038062000de683398101604081905262000034916200011f565b600362000042838262000218565b50600462000051828262000218565b505050620002e4565b634e487b7160e01b600052604160045260246000fd5b600082601f8301126200008257600080fd5b81516001600160401b03808211156200009f576200009f6200005a565b604051601f8301601f19908116603f01168101908282118183101715620000ca57620000ca6200005a565b81604052838152602092508683858801011115620000e757600080fd5b600091505b838210156200010b5785820183015181830184015290820190620000ec565b600093810190920192909252949350505050565b600080604083850312156200013357600080fd5b82516001600160401b03808211156200014b57600080fd5b620001598683870162000070565b935060208501519150808211156200017057600080fd5b506200017f8582860162000070565b9150509250929050565b600181811c908216806200019e57607f821691505b602082108103620001bf57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200021357600081815260208120601f850160051c81016020861015620001ee5750805b601f850160051c820191505b818110156200020f57828155600101620001fa565b5050505b505050565b81516001600160401b038111156200023457620002346200005a565b6200024c8162000245845462000189565b84620001c5565b602080601f8311600181146200028457600084156200026b5750858301515b600019600386901b1c1916600185901b1785556200020f565b600085815260208120601f198616915b82811015620002b55788860151825594840194600190910190840162000294565b5085821015620002d45787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b610af280620002f46000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80633950935111610081578063a457c2d71161005b578063a457c2d714610194578063a9059cbb146101a7578063dd62ed3e146101ba57600080fd5b8063395093511461014357806370a082311461015657806395d89b411461018c57600080fd5b806318160ddd116100b257806318160ddd1461010f57806323b872dd14610121578063313ce5671461013457600080fd5b806306fdde03146100ce578063095ea7b3146100ec575b600080fd5b6100d6610200565b6040516100e39190610908565b60405180910390f35b6100ff6100fa36600461099d565b610292565b60405190151581526020016100e3565b6002545b6040519081526020016100e3565b6100ff61012f3660046109c7565b6102ac565b604051601281526020016100e3565b6100ff61015136600461099d565b6102d0565b610113610164366004610a03565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6100d661031c565b6100ff6101a236600461099d565b61032b565b6100ff6101b536600461099d565b610401565b6101136101c8366004610a25565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b60606003805461020f90610a58565b80601f016020809104026020016040519081016040528092919081815260200182805461023b90610a58565b80156102885780601f1061025d57610100808354040283529160200191610288565b820191906000526020600020905b81548152906001019060200180831161026b57829003601f168201915b5050505050905090565b6000336102a081858561040f565b60019150505b92915050565b6000336102ba8582856105c2565b6102c5858585610699565b506001949350505050565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549091906102a09082908690610317908790610aab565b61040f565b60606004805461020f90610a58565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff87168452909152812054909190838110156103f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f00000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b6102c5828686840361040f565b6000336102a0818585610699565b73ffffffffffffffffffffffffffffffffffffffff83166104b1576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f726573730000000000000000000000000000000000000000000000000000000060648201526084016103eb565b73ffffffffffffffffffffffffffffffffffffffff8216610554576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f737300000000000000000000000000000000000000000000000000000000000060648201526084016103eb565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81146106935781811015610686576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e636500000060448201526064016103eb565b610693848484840361040f565b50505050565b73ffffffffffffffffffffffffffffffffffffffff831661073c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f647265737300000000000000000000000000000000000000000000000000000060648201526084016103eb565b73ffffffffffffffffffffffffffffffffffffffff82166107df576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f657373000000000000000000000000000000000000000000000000000000000060648201526084016103eb565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205481811015610895576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e6365000000000000000000000000000000000000000000000000000060648201526084016103eb565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610693565b600060208083528351808285015260005b8181101561093557858101830151858201604001528201610919565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461099857600080fd5b919050565b600080604083850312156109b057600080fd5b6109b983610974565b946020939093013593505050565b6000806000606084860312156109dc57600080fd5b6109e584610974565b92506109f360208501610974565b9150604084013590509250925092565b600060208284031215610a1557600080fd5b610a1e82610974565b9392505050565b60008060408385031215610a3857600080fd5b610a4183610974565b9150610a4f60208401610974565b90509250929050565b600181811c90821680610a6c57607f821691505b602082108103610aa5577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b808201808211156102a6577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea164736f6c6343000813000a", +} + +var ERC20ABI = ERC20MetaData.ABI + +var ERC20Bin = ERC20MetaData.Bin + +func DeployERC20(auth *bind.TransactOpts, backend bind.ContractBackend, name_ string, symbol_ string) (common.Address, *types.Transaction, *ERC20, error) { + parsed, err := ERC20MetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ERC20Bin), backend, name_, symbol_) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &ERC20{address: address, abi: *parsed, ERC20Caller: ERC20Caller{contract: contract}, ERC20Transactor: ERC20Transactor{contract: contract}, ERC20Filterer: ERC20Filterer{contract: contract}}, nil +} + +type ERC20 struct { + address common.Address + abi abi.ABI + ERC20Caller + ERC20Transactor + ERC20Filterer +} + +type ERC20Caller struct { + contract *bind.BoundContract +} + +type ERC20Transactor struct { + contract *bind.BoundContract +} + +type ERC20Filterer struct { + contract *bind.BoundContract +} + +type ERC20Session struct { + Contract *ERC20 + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type ERC20CallerSession struct { + Contract *ERC20Caller + CallOpts bind.CallOpts +} + +type ERC20TransactorSession struct { + Contract *ERC20Transactor + TransactOpts bind.TransactOpts +} + +type ERC20Raw struct { + Contract *ERC20 +} + +type ERC20CallerRaw struct { + Contract *ERC20Caller +} + +type ERC20TransactorRaw struct { + Contract *ERC20Transactor +} + +func NewERC20(address common.Address, backend bind.ContractBackend) (*ERC20, error) { + abi, err := abi.JSON(strings.NewReader(ERC20ABI)) + if err != nil { + return nil, err + } + contract, err := bindERC20(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &ERC20{address: address, abi: abi, ERC20Caller: ERC20Caller{contract: contract}, ERC20Transactor: ERC20Transactor{contract: contract}, ERC20Filterer: ERC20Filterer{contract: contract}}, nil +} + +func NewERC20Caller(address common.Address, caller bind.ContractCaller) (*ERC20Caller, error) { + contract, err := bindERC20(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ERC20Caller{contract: contract}, nil +} + +func NewERC20Transactor(address common.Address, transactor bind.ContractTransactor) (*ERC20Transactor, error) { + contract, err := bindERC20(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ERC20Transactor{contract: contract}, nil +} + +func NewERC20Filterer(address common.Address, filterer bind.ContractFilterer) (*ERC20Filterer, error) { + contract, err := bindERC20(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ERC20Filterer{contract: contract}, nil +} + +func bindERC20(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ERC20MetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_ERC20 *ERC20Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ERC20.Contract.ERC20Caller.contract.Call(opts, result, method, params...) +} + +func (_ERC20 *ERC20Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ERC20.Contract.ERC20Transactor.contract.Transfer(opts) +} + +func (_ERC20 *ERC20Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ERC20.Contract.ERC20Transactor.contract.Transact(opts, method, params...) +} + +func (_ERC20 *ERC20CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _ERC20.Contract.contract.Call(opts, result, method, params...) +} + +func (_ERC20 *ERC20TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _ERC20.Contract.contract.Transfer(opts) +} + +func (_ERC20 *ERC20TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _ERC20.Contract.contract.Transact(opts, method, params...) +} + +func (_ERC20 *ERC20Caller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "allowance", owner, spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ERC20 *ERC20Session) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _ERC20.Contract.Allowance(&_ERC20.CallOpts, owner, spender) +} + +func (_ERC20 *ERC20CallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _ERC20.Contract.Allowance(&_ERC20.CallOpts, owner, spender) +} + +func (_ERC20 *ERC20Caller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "balanceOf", account) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ERC20 *ERC20Session) BalanceOf(account common.Address) (*big.Int, error) { + return _ERC20.Contract.BalanceOf(&_ERC20.CallOpts, account) +} + +func (_ERC20 *ERC20CallerSession) BalanceOf(account common.Address) (*big.Int, error) { + return _ERC20.Contract.BalanceOf(&_ERC20.CallOpts, account) +} + +func (_ERC20 *ERC20Caller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_ERC20 *ERC20Session) Decimals() (uint8, error) { + return _ERC20.Contract.Decimals(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20CallerSession) Decimals() (uint8, error) { + return _ERC20.Contract.Decimals(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20Caller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_ERC20 *ERC20Session) Name() (string, error) { + return _ERC20.Contract.Name(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20CallerSession) Name() (string, error) { + return _ERC20.Contract.Name(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20Caller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_ERC20 *ERC20Session) Symbol() (string, error) { + return _ERC20.Contract.Symbol(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20CallerSession) Symbol() (string, error) { + return _ERC20.Contract.Symbol(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20Caller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _ERC20.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_ERC20 *ERC20Session) TotalSupply() (*big.Int, error) { + return _ERC20.Contract.TotalSupply(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20CallerSession) TotalSupply() (*big.Int, error) { + return _ERC20.Contract.TotalSupply(&_ERC20.CallOpts) +} + +func (_ERC20 *ERC20Transactor) Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.contract.Transact(opts, "approve", spender, amount) +} + +func (_ERC20 *ERC20Session) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.Approve(&_ERC20.TransactOpts, spender, amount) +} + +func (_ERC20 *ERC20TransactorSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.Approve(&_ERC20.TransactOpts, spender, amount) +} + +func (_ERC20 *ERC20Transactor) DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _ERC20.contract.Transact(opts, "decreaseAllowance", spender, subtractedValue) +} + +func (_ERC20 *ERC20Session) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.DecreaseAllowance(&_ERC20.TransactOpts, spender, subtractedValue) +} + +func (_ERC20 *ERC20TransactorSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.DecreaseAllowance(&_ERC20.TransactOpts, spender, subtractedValue) +} + +func (_ERC20 *ERC20Transactor) IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _ERC20.contract.Transact(opts, "increaseAllowance", spender, addedValue) +} + +func (_ERC20 *ERC20Session) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.IncreaseAllowance(&_ERC20.TransactOpts, spender, addedValue) +} + +func (_ERC20 *ERC20TransactorSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.IncreaseAllowance(&_ERC20.TransactOpts, spender, addedValue) +} + +func (_ERC20 *ERC20Transactor) Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.contract.Transact(opts, "transfer", to, amount) +} + +func (_ERC20 *ERC20Session) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, amount) +} + +func (_ERC20 *ERC20TransactorSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, amount) +} + +func (_ERC20 *ERC20Transactor) TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.contract.Transact(opts, "transferFrom", from, to, amount) +} + +func (_ERC20 *ERC20Session) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.TransferFrom(&_ERC20.TransactOpts, from, to, amount) +} + +func (_ERC20 *ERC20TransactorSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _ERC20.Contract.TransferFrom(&_ERC20.TransactOpts, from, to, amount) +} + +type ERC20ApprovalIterator struct { + Event *ERC20Approval + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ERC20ApprovalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ERC20Approval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ERC20Approval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ERC20ApprovalIterator) Error() error { + return it.fail +} + +func (it *ERC20ApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ERC20Approval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log +} + +func (_ERC20 *ERC20Filterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*ERC20ApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _ERC20.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &ERC20ApprovalIterator{contract: _ERC20.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +func (_ERC20 *ERC20Filterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *ERC20Approval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _ERC20.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ERC20Approval) + if err := _ERC20.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ERC20 *ERC20Filterer) ParseApproval(log types.Log) (*ERC20Approval, error) { + event := new(ERC20Approval) + if err := _ERC20.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type ERC20TransferIterator struct { + Event *ERC20Transfer + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ERC20TransferIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ERC20Transfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ERC20Transfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ERC20TransferIterator) Error() error { + return it.fail +} + +func (it *ERC20TransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ERC20Transfer struct { + From common.Address + To common.Address + Value *big.Int + Raw types.Log +} + +func (_ERC20 *ERC20Filterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ERC20TransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ERC20.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &ERC20TransferIterator{contract: _ERC20.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +func (_ERC20 *ERC20Filterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *ERC20Transfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _ERC20.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ERC20Transfer) + if err := _ERC20.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ERC20 *ERC20Filterer) ParseTransfer(log types.Log) (*ERC20Transfer, error) { + event := new(ERC20Transfer) + if err := _ERC20.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_ERC20 *ERC20) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _ERC20.abi.Events["Approval"].ID: + return _ERC20.ParseApproval(log) + case _ERC20.abi.Events["Transfer"].ID: + return _ERC20.ParseTransfer(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (ERC20Approval) Topic() common.Hash { + return common.HexToHash("0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925") +} + +func (ERC20Transfer) Topic() common.Hash { + return common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") +} + +func (_ERC20 *ERC20) Address() common.Address { + return _ERC20.address +} + +type ERC20Interface interface { + Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) + + BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Name(opts *bind.CallOpts) (string, error) + + Symbol(opts *bind.CallOpts) (string, error) + + TotalSupply(opts *bind.CallOpts) (*big.Int, error) + + Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) + + DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) + + FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*ERC20ApprovalIterator, error) + + WatchApproval(opts *bind.WatchOpts, sink chan<- *ERC20Approval, owner []common.Address, spender []common.Address) (event.Subscription, error) + + ParseApproval(log types.Log) (*ERC20Approval, error) + + FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*ERC20TransferIterator, error) + + WatchTransfer(opts *bind.WatchOpts, sink chan<- *ERC20Transfer, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer(log types.Log) (*ERC20Transfer, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/shared/generated/link_token/link_token.go b/core/gethwrappers/shared/generated/link_token/link_token.go new file mode 100644 index 00000000..cbd563da --- /dev/null +++ b/core/gethwrappers/shared/generated/link_token/link_token.go @@ -0,0 +1,2068 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package link_token + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var LinkTokenMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"supplyAfterMint\",\"type\":\"uint256\"}],\"name\":\"MaxSupplyExceeded\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderNotBurner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderNotMinter\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"BurnAccessGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"BurnAccessRevoked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"MintAccessGranted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"MintAccessRevoked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burnFrom\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseApproval\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBurners\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMinters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"grantBurnRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burnAndMinter\",\"type\":\"address\"}],\"name\":\"grantMintAndBurnRoles\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"grantMintRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseApproval\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"isBurner\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"isMinter\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"burner\",\"type\":\"address\"}],\"name\":\"revokeBurnRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"minter\",\"type\":\"address\"}],\"name\":\"revokeMintRole\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"transferAndCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c06040523480156200001157600080fd5b506040518060400160405280600f81526020016e21b430b4b72634b735902a37b5b2b760891b815250604051806040016040528060048152602001634c494e4b60e01b81525060126b033b2e3c9fd0803ce8000000338060008686818181600390816200007f91906200028c565b5060046200008e82826200028c565b5050506001600160a01b0384169150620000f190505760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600580546001600160a01b0319166001600160a01b0384811691909117909155811615620001245762000124816200013b565b50505060ff90911660805260a05250620003589050565b336001600160a01b03821603620001955760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401620000e8565b600680546001600160a01b0319166001600160a01b03838116918217909255600554604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200021257607f821691505b6020821081036200023357634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028757600081815260208120601f850160051c81016020861015620002625750805b601f850160051c820191505b8181101562000283578281556001016200026e565b5050505b505050565b81516001600160401b03811115620002a857620002a8620001e7565b620002c081620002b98454620001fd565b8462000239565b602080601f831160018114620002f85760008415620002df5750858301515b600019600386901b1c1916600185901b17855562000283565b600085815260208120601f198616915b82811015620003295788860151825594840194600190910190840162000308565b5085821015620003485787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b60805160a051611e4c6200038c60003960008181610447015281816108c301526108ed015260006102710152611e4c6000f3fe608060405234801561001057600080fd5b50600436106101f05760003560e01c806379cc67901161010f578063c2e3273d116100a2578063d73dd62311610071578063d73dd6231461046b578063dd62ed3e1461047e578063f2fde38b146104c4578063f81094f3146104d757600080fd5b8063c2e3273d1461040c578063c630948d1461041f578063c64d0ebc14610432578063d5abeb011461044557600080fd5b80639dc29fac116100de5780639dc29fac146103c0578063a457c2d7146103d3578063a9059cbb146103e6578063aa271e1a146103f957600080fd5b806379cc67901461037557806386fe8b43146103885780638da5cb5b1461039057806395d89b41146103b857600080fd5b806340c10f19116101875780636618846311610156578063661884631461030f5780636b32810b1461032257806370a082311461033757806379ba50971461036d57600080fd5b806340c10f19146102c157806342966c68146102d65780634334614a146102e95780634f5632f8146102fc57600080fd5b806323b872dd116101c357806323b872dd14610257578063313ce5671461026a578063395093511461029b5780634000aea0146102ae57600080fd5b806301ffc9a7146101f557806306fdde031461021d578063095ea7b31461023257806318160ddd14610245575b600080fd5b6102086102033660046119b9565b6104ea565b60405190151581526020015b60405180910390f35b61022561061b565b6040516102149190611a5f565b610208610240366004611a9b565b6106ad565b6002545b604051908152602001610214565b610208610265366004611ac5565b6106c5565b60405160ff7f0000000000000000000000000000000000000000000000000000000000000000168152602001610214565b6102086102a9366004611a9b565b6106e9565b6102086102bc366004611b30565b610735565b6102d46102cf366004611a9b565b610858565b005b6102d46102e4366004611c19565b61097f565b6102086102f7366004611c32565b6109cc565b6102d461030a366004611c32565b6109d9565b61020861031d366004611a9b565b610a35565b61032a610a48565b6040516102149190611c4d565b610249610345366004611c32565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6102d4610a59565b6102d4610383366004611a9b565b610b5a565b61032a610ba9565b60055460405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610214565b610225610bb5565b6102d46103ce366004611a9b565b610bc4565b6102086103e1366004611a9b565b610bce565b6102086103f4366004611a9b565b610c9f565b610208610407366004611c32565b610cad565b6102d461041a366004611c32565b610cba565b6102d461042d366004611c32565b610d16565b6102d4610440366004611c32565b610d24565b7f0000000000000000000000000000000000000000000000000000000000000000610249565b6102d4610479366004611a9b565b610d80565b61024961048c366004611ca7565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b6102d46104d2366004611c32565b610d8a565b6102d46104e5366004611c32565b610d9b565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f36372b0700000000000000000000000000000000000000000000000000000000148061057d57507fffffffff0000000000000000000000000000000000000000000000000000000082167f4000aea000000000000000000000000000000000000000000000000000000000145b806105c957507fffffffff0000000000000000000000000000000000000000000000000000000082167fe6599b4d00000000000000000000000000000000000000000000000000000000145b8061061557507fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a700000000000000000000000000000000000000000000000000000000145b92915050565b60606003805461062a90611cda565b80601f016020809104026020016040519081016040528092919081815260200182805461065690611cda565b80156106a35780601f10610678576101008083540402835291602001916106a3565b820191906000526020600020905b81548152906001019060200180831161068657829003601f168201915b5050505050905090565b6000336106bb818585610df7565b5060019392505050565b6000336106d3858285610e2b565b6106de858585610efc565b506001949350505050565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff871684529091528120549091906106bb9082908690610730908790611d5c565b610df7565b60006107418484610c9f565b508373ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c1685856040516107a1929190611d6f565b60405180910390a373ffffffffffffffffffffffffffffffffffffffff84163b156106bb576040517fa4c0ed3600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85169063a4c0ed369061081c90339087908790600401611d90565b600060405180830381600087803b15801561083657600080fd5b505af115801561084a573d6000803e3d6000fd5b505050505060019392505050565b61086133610cad565b61089e576040517fe2c8c9d50000000000000000000000000000000000000000000000000000000081523360048201526024015b60405180910390fd5b813073ffffffffffffffffffffffffffffffffffffffff8216036108c157600080fd5b7f00000000000000000000000000000000000000000000000000000000000000001580159061092257507f00000000000000000000000000000000000000000000000000000000000000008261091660025490565b6109209190611d5c565b115b15610970578161093160025490565b61093b9190611d5c565b6040517fcbbf111300000000000000000000000000000000000000000000000000000000815260040161089591815260200190565b61097a8383610f2a565b505050565b610988336109cc565b6109c0576040517fc820b10b000000000000000000000000000000000000000000000000000000008152336004820152602401610895565b6109c98161101d565b50565b6000610615600983611027565b6109e1611056565b6109ec6009826110d9565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907f0a675452746933cefe3d74182e78db7afe57ba60eaa4234b5d85e9aa41b0610c90600090a250565b6000610a418383610bce565b9392505050565b6060610a5460076110fb565b905090565b60065473ffffffffffffffffffffffffffffffffffffffff163314610ada576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610895565b600580547fffffffffffffffffffffffff00000000000000000000000000000000000000008082163390811790935560068054909116905560405173ffffffffffffffffffffffffffffffffffffffff909116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a350565b610b63336109cc565b610b9b576040517fc820b10b000000000000000000000000000000000000000000000000000000008152336004820152602401610895565b610ba58282611108565b5050565b6060610a5460096110fb565b60606004805461062a90611cda565b610ba58282610b5a565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919083811015610c92576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f0000000000000000000000000000000000000000000000000000006064820152608401610895565b6106de8286868403610df7565b6000336106bb818585610efc565b6000610615600783611027565b610cc2611056565b610ccd60078261111d565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907fe46fef8bbff1389d9010703cf8ebb363fb3daf5bf56edc27080b67bc8d9251ea90600090a250565b610d1f81610cba565b6109c9815b610d2c611056565b610d3760098261111d565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907f92308bb7573b2a3d17ddb868b39d8ebec433f3194421abc22d084f89658c9bad90600090a250565b61097a82826106e9565b610d92611056565b6109c98161113f565b610da3611056565b610dae6007826110d9565b156109c95760405173ffffffffffffffffffffffffffffffffffffffff8216907fed998b960f6340d045f620c119730f7aa7995e7425c2401d3a5b64ff998a59e990600090a250565b813073ffffffffffffffffffffffffffffffffffffffff821603610e1a57600080fd5b610e25848484611235565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610e255781811015610eef576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606401610895565b610e258484848403610df7565b813073ffffffffffffffffffffffffffffffffffffffff821603610f1f57600080fd5b610e258484846113e8565b73ffffffffffffffffffffffffffffffffffffffff8216610fa7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152606401610895565b8060026000828254610fb99190611d5c565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b6109c93382611657565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001830160205260408120541515610a41565b60055473ffffffffffffffffffffffffffffffffffffffff1633146110d7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610895565b565b6000610a418373ffffffffffffffffffffffffffffffffffffffff841661181b565b60606000610a418361190e565b611113823383610e2b565b610ba58282611657565b6000610a418373ffffffffffffffffffffffffffffffffffffffff841661196a565b3373ffffffffffffffffffffffffffffffffffffffff8216036111be576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610895565b600680547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff838116918217909255600554604051919216907fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127890600090a350565b73ffffffffffffffffffffffffffffffffffffffff83166112d7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f72657373000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff821661137a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f73730000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925910160405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff831661148b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f64726573730000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff821661152e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f65737300000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff8316600090815260208190526040902054818110156115e4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e636500000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610e25565b73ffffffffffffffffffffffffffffffffffffffff82166116fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f73000000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff8216600090815260208190526040902054818110156117b0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f63650000000000000000000000000000000000000000000000000000000000006064820152608401610895565b73ffffffffffffffffffffffffffffffffffffffff83166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3505050565b6000818152600183016020526040812054801561190457600061183f600183611dce565b855490915060009061185390600190611dce565b90508181146118b857600086600001828154811061187357611873611de1565b906000526020600020015490508087600001848154811061189657611896611de1565b6000918252602080832090910192909255918252600188019052604090208390555b85548690806118c9576118c9611e10565b600190038181906000526020600020016000905590558560010160008681526020019081526020016000206000905560019350505050610615565b6000915050610615565b60608160000180548060200260200160405190810160405280929190818152602001828054801561195e57602002820191906000526020600020905b81548152602001906001019080831161194a575b50505050509050919050565b60008181526001830160205260408120546119b157508154600181810184556000848152602080822090930184905584548482528286019093526040902091909155610615565b506000610615565b6000602082840312156119cb57600080fd5b81357fffffffff0000000000000000000000000000000000000000000000000000000081168114610a4157600080fd5b6000815180845260005b81811015611a2157602081850181015186830182015201611a05565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081526000610a4160208301846119fb565b803573ffffffffffffffffffffffffffffffffffffffff81168114611a9657600080fd5b919050565b60008060408385031215611aae57600080fd5b611ab783611a72565b946020939093013593505050565b600080600060608486031215611ada57600080fd5b611ae384611a72565b9250611af160208501611a72565b9150604084013590509250925092565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080600060608486031215611b4557600080fd5b611b4e84611a72565b925060208401359150604084013567ffffffffffffffff80821115611b7257600080fd5b818601915086601f830112611b8657600080fd5b813581811115611b9857611b98611b01565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715611bde57611bde611b01565b81604052828152896020848701011115611bf757600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b600060208284031215611c2b57600080fd5b5035919050565b600060208284031215611c4457600080fd5b610a4182611a72565b6020808252825182820181905260009190848201906040850190845b81811015611c9b57835173ffffffffffffffffffffffffffffffffffffffff1683529284019291840191600101611c69565b50909695505050505050565b60008060408385031215611cba57600080fd5b611cc383611a72565b9150611cd160208401611a72565b90509250929050565b600181811c90821680611cee57607f821691505b602082108103611d27577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8082018082111561061557610615611d2d565b828152604060208201526000611d8860408301846119fb565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff84168152826020820152606060408201526000611dc560608301846119fb565b95945050505050565b8181038181111561061557610615611d2d565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c6343000813000a", +} + +var LinkTokenABI = LinkTokenMetaData.ABI + +var LinkTokenBin = LinkTokenMetaData.Bin + +func DeployLinkToken(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LinkToken, error) { + parsed, err := LinkTokenMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(LinkTokenBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LinkToken{address: address, abi: *parsed, LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +type LinkToken struct { + address common.Address + abi abi.ABI + LinkTokenCaller + LinkTokenTransactor + LinkTokenFilterer +} + +type LinkTokenCaller struct { + contract *bind.BoundContract +} + +type LinkTokenTransactor struct { + contract *bind.BoundContract +} + +type LinkTokenFilterer struct { + contract *bind.BoundContract +} + +type LinkTokenSession struct { + Contract *LinkToken + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type LinkTokenCallerSession struct { + Contract *LinkTokenCaller + CallOpts bind.CallOpts +} + +type LinkTokenTransactorSession struct { + Contract *LinkTokenTransactor + TransactOpts bind.TransactOpts +} + +type LinkTokenRaw struct { + Contract *LinkToken +} + +type LinkTokenCallerRaw struct { + Contract *LinkTokenCaller +} + +type LinkTokenTransactorRaw struct { + Contract *LinkTokenTransactor +} + +func NewLinkToken(address common.Address, backend bind.ContractBackend) (*LinkToken, error) { + abi, err := abi.JSON(strings.NewReader(LinkTokenABI)) + if err != nil { + return nil, err + } + contract, err := bindLinkToken(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LinkToken{address: address, abi: abi, LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +func NewLinkTokenCaller(address common.Address, caller bind.ContractCaller) (*LinkTokenCaller, error) { + contract, err := bindLinkToken(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LinkTokenCaller{contract: contract}, nil +} + +func NewLinkTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*LinkTokenTransactor, error) { + contract, err := bindLinkToken(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LinkTokenTransactor{contract: contract}, nil +} + +func NewLinkTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*LinkTokenFilterer, error) { + contract, err := bindLinkToken(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LinkTokenFilterer{contract: contract}, nil +} + +func bindLinkToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := LinkTokenMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_LinkToken *LinkTokenRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.LinkTokenCaller.contract.Call(opts, result, method, params...) +} + +func (_LinkToken *LinkTokenRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transfer(opts) +} + +func (_LinkToken *LinkTokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transact(opts, method, params...) +} + +func (_LinkToken *LinkTokenCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.contract.Call(opts, result, method, params...) +} + +func (_LinkToken *LinkTokenTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transfer(opts) +} + +func (_LinkToken *LinkTokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transact(opts, method, params...) +} + +func (_LinkToken *LinkTokenCaller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "allowance", owner, spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, owner, spender) +} + +func (_LinkToken *LinkTokenCallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, owner, spender) +} + +func (_LinkToken *LinkTokenCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "balanceOf", account) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) BalanceOf(account common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, account) +} + +func (_LinkToken *LinkTokenCallerSession) BalanceOf(account common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, account) +} + +func (_LinkToken *LinkTokenCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) GetBurners(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "getBurners") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) GetBurners() ([]common.Address, error) { + return _LinkToken.Contract.GetBurners(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) GetBurners() ([]common.Address, error) { + return _LinkToken.Contract.GetBurners(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) GetMinters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "getMinters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) GetMinters() ([]common.Address, error) { + return _LinkToken.Contract.GetMinters(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) GetMinters() ([]common.Address, error) { + return _LinkToken.Contract.GetMinters(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) IsBurner(opts *bind.CallOpts, burner common.Address) (bool, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "isBurner", burner) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) IsBurner(burner common.Address) (bool, error) { + return _LinkToken.Contract.IsBurner(&_LinkToken.CallOpts, burner) +} + +func (_LinkToken *LinkTokenCallerSession) IsBurner(burner common.Address) (bool, error) { + return _LinkToken.Contract.IsBurner(&_LinkToken.CallOpts, burner) +} + +func (_LinkToken *LinkTokenCaller) IsMinter(opts *bind.CallOpts, minter common.Address) (bool, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "isMinter", minter) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) IsMinter(minter common.Address) (bool, error) { + return _LinkToken.Contract.IsMinter(&_LinkToken.CallOpts, minter) +} + +func (_LinkToken *LinkTokenCallerSession) IsMinter(minter common.Address) (bool, error) { + return _LinkToken.Contract.IsMinter(&_LinkToken.CallOpts, minter) +} + +func (_LinkToken *LinkTokenCaller) MaxSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "maxSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) MaxSupply() (*big.Int, error) { + return _LinkToken.Contract.MaxSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) MaxSupply() (*big.Int, error) { + return _LinkToken.Contract.MaxSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Owner() (common.Address, error) { + return _LinkToken.Contract.Owner(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Owner() (common.Address, error) { + return _LinkToken.Contract.Owner(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _LinkToken.Contract.SupportsInterface(&_LinkToken.CallOpts, interfaceId) +} + +func (_LinkToken *LinkTokenCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _LinkToken.Contract.SupportsInterface(&_LinkToken.CallOpts, interfaceId) +} + +func (_LinkToken *LinkTokenCaller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _LinkToken.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_LinkToken *LinkTokenSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenCallerSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +func (_LinkToken *LinkTokenTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "acceptOwnership") +} + +func (_LinkToken *LinkTokenSession) AcceptOwnership() (*types.Transaction, error) { + return _LinkToken.Contract.AcceptOwnership(&_LinkToken.TransactOpts) +} + +func (_LinkToken *LinkTokenTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _LinkToken.Contract.AcceptOwnership(&_LinkToken.TransactOpts) +} + +func (_LinkToken *LinkTokenTransactor) Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "approve", spender, amount) +} + +func (_LinkToken *LinkTokenSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, spender, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, spender, amount) +} + +func (_LinkToken *LinkTokenTransactor) Burn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "burn", amount) +} + +func (_LinkToken *LinkTokenSession) Burn(amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Burn(&_LinkToken.TransactOpts, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) Burn(amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Burn(&_LinkToken.TransactOpts, amount) +} + +func (_LinkToken *LinkTokenTransactor) Burn0(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "burn0", account, amount) +} + +func (_LinkToken *LinkTokenSession) Burn0(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Burn0(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) Burn0(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Burn0(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactor) BurnFrom(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "burnFrom", account, amount) +} + +func (_LinkToken *LinkTokenSession) BurnFrom(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.BurnFrom(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) BurnFrom(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.BurnFrom(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactor) DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "decreaseAllowance", spender, subtractedValue) +} + +func (_LinkToken *LinkTokenSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseAllowance(&_LinkToken.TransactOpts, spender, subtractedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseAllowance(&_LinkToken.TransactOpts, spender, subtractedValue) +} + +func (_LinkToken *LinkTokenTransactor) DecreaseApproval(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "decreaseApproval", spender, subtractedValue) +} + +func (_LinkToken *LinkTokenSession) DecreaseApproval(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, spender, subtractedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) DecreaseApproval(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, spender, subtractedValue) +} + +func (_LinkToken *LinkTokenTransactor) GrantBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "grantBurnRole", burner) +} + +func (_LinkToken *LinkTokenSession) GrantBurnRole(burner common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantBurnRole(&_LinkToken.TransactOpts, burner) +} + +func (_LinkToken *LinkTokenTransactorSession) GrantBurnRole(burner common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantBurnRole(&_LinkToken.TransactOpts, burner) +} + +func (_LinkToken *LinkTokenTransactor) GrantMintAndBurnRoles(opts *bind.TransactOpts, burnAndMinter common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "grantMintAndBurnRoles", burnAndMinter) +} + +func (_LinkToken *LinkTokenSession) GrantMintAndBurnRoles(burnAndMinter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantMintAndBurnRoles(&_LinkToken.TransactOpts, burnAndMinter) +} + +func (_LinkToken *LinkTokenTransactorSession) GrantMintAndBurnRoles(burnAndMinter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantMintAndBurnRoles(&_LinkToken.TransactOpts, burnAndMinter) +} + +func (_LinkToken *LinkTokenTransactor) GrantMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "grantMintRole", minter) +} + +func (_LinkToken *LinkTokenSession) GrantMintRole(minter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantMintRole(&_LinkToken.TransactOpts, minter) +} + +func (_LinkToken *LinkTokenTransactorSession) GrantMintRole(minter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.GrantMintRole(&_LinkToken.TransactOpts, minter) +} + +func (_LinkToken *LinkTokenTransactor) IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "increaseAllowance", spender, addedValue) +} + +func (_LinkToken *LinkTokenSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseAllowance(&_LinkToken.TransactOpts, spender, addedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseAllowance(&_LinkToken.TransactOpts, spender, addedValue) +} + +func (_LinkToken *LinkTokenTransactor) IncreaseApproval(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "increaseApproval", spender, addedValue) +} + +func (_LinkToken *LinkTokenSession) IncreaseApproval(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, spender, addedValue) +} + +func (_LinkToken *LinkTokenTransactorSession) IncreaseApproval(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, spender, addedValue) +} + +func (_LinkToken *LinkTokenTransactor) Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "mint", account, amount) +} + +func (_LinkToken *LinkTokenSession) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Mint(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Mint(&_LinkToken.TransactOpts, account, amount) +} + +func (_LinkToken *LinkTokenTransactor) RevokeBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "revokeBurnRole", burner) +} + +func (_LinkToken *LinkTokenSession) RevokeBurnRole(burner common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.RevokeBurnRole(&_LinkToken.TransactOpts, burner) +} + +func (_LinkToken *LinkTokenTransactorSession) RevokeBurnRole(burner common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.RevokeBurnRole(&_LinkToken.TransactOpts, burner) +} + +func (_LinkToken *LinkTokenTransactor) RevokeMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "revokeMintRole", minter) +} + +func (_LinkToken *LinkTokenSession) RevokeMintRole(minter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.RevokeMintRole(&_LinkToken.TransactOpts, minter) +} + +func (_LinkToken *LinkTokenTransactorSession) RevokeMintRole(minter common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.RevokeMintRole(&_LinkToken.TransactOpts, minter) +} + +func (_LinkToken *LinkTokenTransactor) Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transfer", to, amount) +} + +func (_LinkToken *LinkTokenSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, to, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, to, amount) +} + +func (_LinkToken *LinkTokenTransactor) TransferAndCall(opts *bind.TransactOpts, to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferAndCall", to, amount, data) +} + +func (_LinkToken *LinkTokenSession) TransferAndCall(to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, to, amount, data) +} + +func (_LinkToken *LinkTokenTransactorSession) TransferAndCall(to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, to, amount, data) +} + +func (_LinkToken *LinkTokenTransactor) TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferFrom", from, to, amount) +} + +func (_LinkToken *LinkTokenSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, from, to, amount) +} + +func (_LinkToken *LinkTokenTransactorSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, from, to, amount) +} + +func (_LinkToken *LinkTokenTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferOwnership", to) +} + +func (_LinkToken *LinkTokenSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.TransferOwnership(&_LinkToken.TransactOpts, to) +} + +func (_LinkToken *LinkTokenTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _LinkToken.Contract.TransferOwnership(&_LinkToken.TransactOpts, to) +} + +type LinkTokenApprovalIterator struct { + Event *LinkTokenApproval + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenApprovalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenApprovalIterator) Error() error { + return it.fail +} + +func (it *LinkTokenApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenApproval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*LinkTokenApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &LinkTokenApprovalIterator{contract: _LinkToken.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *LinkTokenApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseApproval(log types.Log) (*LinkTokenApproval, error) { + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenBurnAccessGrantedIterator struct { + Event *LinkTokenBurnAccessGranted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenBurnAccessGrantedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenBurnAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenBurnAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenBurnAccessGrantedIterator) Error() error { + return it.fail +} + +func (it *LinkTokenBurnAccessGrantedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenBurnAccessGranted struct { + Burner common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterBurnAccessGranted(opts *bind.FilterOpts, burner []common.Address) (*LinkTokenBurnAccessGrantedIterator, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "BurnAccessGranted", burnerRule) + if err != nil { + return nil, err + } + return &LinkTokenBurnAccessGrantedIterator{contract: _LinkToken.contract, event: "BurnAccessGranted", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchBurnAccessGranted(opts *bind.WatchOpts, sink chan<- *LinkTokenBurnAccessGranted, burner []common.Address) (event.Subscription, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "BurnAccessGranted", burnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenBurnAccessGranted) + if err := _LinkToken.contract.UnpackLog(event, "BurnAccessGranted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseBurnAccessGranted(log types.Log) (*LinkTokenBurnAccessGranted, error) { + event := new(LinkTokenBurnAccessGranted) + if err := _LinkToken.contract.UnpackLog(event, "BurnAccessGranted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenBurnAccessRevokedIterator struct { + Event *LinkTokenBurnAccessRevoked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenBurnAccessRevokedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenBurnAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenBurnAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenBurnAccessRevokedIterator) Error() error { + return it.fail +} + +func (it *LinkTokenBurnAccessRevokedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenBurnAccessRevoked struct { + Burner common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterBurnAccessRevoked(opts *bind.FilterOpts, burner []common.Address) (*LinkTokenBurnAccessRevokedIterator, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "BurnAccessRevoked", burnerRule) + if err != nil { + return nil, err + } + return &LinkTokenBurnAccessRevokedIterator{contract: _LinkToken.contract, event: "BurnAccessRevoked", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchBurnAccessRevoked(opts *bind.WatchOpts, sink chan<- *LinkTokenBurnAccessRevoked, burner []common.Address) (event.Subscription, error) { + + var burnerRule []interface{} + for _, burnerItem := range burner { + burnerRule = append(burnerRule, burnerItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "BurnAccessRevoked", burnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenBurnAccessRevoked) + if err := _LinkToken.contract.UnpackLog(event, "BurnAccessRevoked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseBurnAccessRevoked(log types.Log) (*LinkTokenBurnAccessRevoked, error) { + event := new(LinkTokenBurnAccessRevoked) + if err := _LinkToken.contract.UnpackLog(event, "BurnAccessRevoked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenMintAccessGrantedIterator struct { + Event *LinkTokenMintAccessGranted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenMintAccessGrantedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenMintAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenMintAccessGranted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenMintAccessGrantedIterator) Error() error { + return it.fail +} + +func (it *LinkTokenMintAccessGrantedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenMintAccessGranted struct { + Minter common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterMintAccessGranted(opts *bind.FilterOpts, minter []common.Address) (*LinkTokenMintAccessGrantedIterator, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "MintAccessGranted", minterRule) + if err != nil { + return nil, err + } + return &LinkTokenMintAccessGrantedIterator{contract: _LinkToken.contract, event: "MintAccessGranted", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchMintAccessGranted(opts *bind.WatchOpts, sink chan<- *LinkTokenMintAccessGranted, minter []common.Address) (event.Subscription, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "MintAccessGranted", minterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenMintAccessGranted) + if err := _LinkToken.contract.UnpackLog(event, "MintAccessGranted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseMintAccessGranted(log types.Log) (*LinkTokenMintAccessGranted, error) { + event := new(LinkTokenMintAccessGranted) + if err := _LinkToken.contract.UnpackLog(event, "MintAccessGranted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenMintAccessRevokedIterator struct { + Event *LinkTokenMintAccessRevoked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenMintAccessRevokedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenMintAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenMintAccessRevoked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenMintAccessRevokedIterator) Error() error { + return it.fail +} + +func (it *LinkTokenMintAccessRevokedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenMintAccessRevoked struct { + Minter common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterMintAccessRevoked(opts *bind.FilterOpts, minter []common.Address) (*LinkTokenMintAccessRevokedIterator, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "MintAccessRevoked", minterRule) + if err != nil { + return nil, err + } + return &LinkTokenMintAccessRevokedIterator{contract: _LinkToken.contract, event: "MintAccessRevoked", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchMintAccessRevoked(opts *bind.WatchOpts, sink chan<- *LinkTokenMintAccessRevoked, minter []common.Address) (event.Subscription, error) { + + var minterRule []interface{} + for _, minterItem := range minter { + minterRule = append(minterRule, minterItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "MintAccessRevoked", minterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenMintAccessRevoked) + if err := _LinkToken.contract.UnpackLog(event, "MintAccessRevoked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseMintAccessRevoked(log types.Log) (*LinkTokenMintAccessRevoked, error) { + event := new(LinkTokenMintAccessRevoked) + if err := _LinkToken.contract.UnpackLog(event, "MintAccessRevoked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenOwnershipTransferRequestedIterator struct { + Event *LinkTokenOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *LinkTokenOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenOwnershipTransferRequestedIterator{contract: _LinkToken.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LinkTokenOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenOwnershipTransferRequested) + if err := _LinkToken.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseOwnershipTransferRequested(log types.Log) (*LinkTokenOwnershipTransferRequested, error) { + event := new(LinkTokenOwnershipTransferRequested) + if err := _LinkToken.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenOwnershipTransferredIterator struct { + Event *LinkTokenOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *LinkTokenOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenOwnershipTransferredIterator{contract: _LinkToken.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LinkTokenOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenOwnershipTransferred) + if err := _LinkToken.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseOwnershipTransferred(log types.Log) (*LinkTokenOwnershipTransferred, error) { + event := new(LinkTokenOwnershipTransferred) + if err := _LinkToken.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenTransferIterator struct { + Event *LinkTokenTransfer + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenTransferIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenTransferIterator) Error() error { + return it.fail +} + +func (it *LinkTokenTransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenTransfer struct { + From common.Address + To common.Address + Value *big.Int + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenTransferIterator{contract: _LinkToken.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseTransfer(log types.Log) (*LinkTokenTransfer, error) { + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type LinkTokenTransfer0Iterator struct { + Event *LinkTokenTransfer0 + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *LinkTokenTransfer0Iterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer0) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *LinkTokenTransfer0Iterator) Error() error { + return it.fail +} + +func (it *LinkTokenTransfer0Iterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type LinkTokenTransfer0 struct { + From common.Address + To common.Address + Value *big.Int + Data []byte + Raw types.Log +} + +func (_LinkToken *LinkTokenFilterer) FilterTransfer0(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransfer0Iterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Transfer0", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenTransfer0Iterator{contract: _LinkToken.contract, event: "Transfer0", logs: logs, sub: sub}, nil +} + +func (_LinkToken *LinkTokenFilterer) WatchTransfer0(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer0, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Transfer0", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(LinkTokenTransfer0) + if err := _LinkToken.contract.UnpackLog(event, "Transfer0", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_LinkToken *LinkTokenFilterer) ParseTransfer0(log types.Log) (*LinkTokenTransfer0, error) { + event := new(LinkTokenTransfer0) + if err := _LinkToken.contract.UnpackLog(event, "Transfer0", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_LinkToken *LinkToken) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _LinkToken.abi.Events["Approval"].ID: + return _LinkToken.ParseApproval(log) + case _LinkToken.abi.Events["BurnAccessGranted"].ID: + return _LinkToken.ParseBurnAccessGranted(log) + case _LinkToken.abi.Events["BurnAccessRevoked"].ID: + return _LinkToken.ParseBurnAccessRevoked(log) + case _LinkToken.abi.Events["MintAccessGranted"].ID: + return _LinkToken.ParseMintAccessGranted(log) + case _LinkToken.abi.Events["MintAccessRevoked"].ID: + return _LinkToken.ParseMintAccessRevoked(log) + case _LinkToken.abi.Events["OwnershipTransferRequested"].ID: + return _LinkToken.ParseOwnershipTransferRequested(log) + case _LinkToken.abi.Events["OwnershipTransferred"].ID: + return _LinkToken.ParseOwnershipTransferred(log) + case _LinkToken.abi.Events["Transfer"].ID: + return _LinkToken.ParseTransfer(log) + case _LinkToken.abi.Events["Transfer0"].ID: + return _LinkToken.ParseTransfer0(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (LinkTokenApproval) Topic() common.Hash { + return common.HexToHash("0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925") +} + +func (LinkTokenBurnAccessGranted) Topic() common.Hash { + return common.HexToHash("0x92308bb7573b2a3d17ddb868b39d8ebec433f3194421abc22d084f89658c9bad") +} + +func (LinkTokenBurnAccessRevoked) Topic() common.Hash { + return common.HexToHash("0x0a675452746933cefe3d74182e78db7afe57ba60eaa4234b5d85e9aa41b0610c") +} + +func (LinkTokenMintAccessGranted) Topic() common.Hash { + return common.HexToHash("0xe46fef8bbff1389d9010703cf8ebb363fb3daf5bf56edc27080b67bc8d9251ea") +} + +func (LinkTokenMintAccessRevoked) Topic() common.Hash { + return common.HexToHash("0xed998b960f6340d045f620c119730f7aa7995e7425c2401d3a5b64ff998a59e9") +} + +func (LinkTokenOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (LinkTokenOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (LinkTokenTransfer) Topic() common.Hash { + return common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") +} + +func (LinkTokenTransfer0) Topic() common.Hash { + return common.HexToHash("0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16") +} + +func (_LinkToken *LinkToken) Address() common.Address { + return _LinkToken.address +} + +type LinkTokenInterface interface { + Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) + + BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + GetBurners(opts *bind.CallOpts) ([]common.Address, error) + + GetMinters(opts *bind.CallOpts) ([]common.Address, error) + + IsBurner(opts *bind.CallOpts, burner common.Address) (bool, error) + + IsMinter(opts *bind.CallOpts, minter common.Address) (bool, error) + + MaxSupply(opts *bind.CallOpts) (*big.Int, error) + + Name(opts *bind.CallOpts) (string, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) + + Symbol(opts *bind.CallOpts) (string, error) + + TotalSupply(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) + + Burn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + Burn0(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + BurnFrom(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + DecreaseApproval(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + GrantBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) + + GrantMintAndBurnRoles(opts *bind.TransactOpts, burnAndMinter common.Address) (*types.Transaction, error) + + GrantMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) + + IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + IncreaseApproval(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + RevokeBurnRole(opts *bind.TransactOpts, burner common.Address) (*types.Transaction, error) + + RevokeMintRole(opts *bind.TransactOpts, minter common.Address) (*types.Transaction, error) + + Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferAndCall(opts *bind.TransactOpts, to common.Address, amount *big.Int, data []byte) (*types.Transaction, error) + + TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*LinkTokenApprovalIterator, error) + + WatchApproval(opts *bind.WatchOpts, sink chan<- *LinkTokenApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) + + ParseApproval(log types.Log) (*LinkTokenApproval, error) + + FilterBurnAccessGranted(opts *bind.FilterOpts, burner []common.Address) (*LinkTokenBurnAccessGrantedIterator, error) + + WatchBurnAccessGranted(opts *bind.WatchOpts, sink chan<- *LinkTokenBurnAccessGranted, burner []common.Address) (event.Subscription, error) + + ParseBurnAccessGranted(log types.Log) (*LinkTokenBurnAccessGranted, error) + + FilterBurnAccessRevoked(opts *bind.FilterOpts, burner []common.Address) (*LinkTokenBurnAccessRevokedIterator, error) + + WatchBurnAccessRevoked(opts *bind.WatchOpts, sink chan<- *LinkTokenBurnAccessRevoked, burner []common.Address) (event.Subscription, error) + + ParseBurnAccessRevoked(log types.Log) (*LinkTokenBurnAccessRevoked, error) + + FilterMintAccessGranted(opts *bind.FilterOpts, minter []common.Address) (*LinkTokenMintAccessGrantedIterator, error) + + WatchMintAccessGranted(opts *bind.WatchOpts, sink chan<- *LinkTokenMintAccessGranted, minter []common.Address) (event.Subscription, error) + + ParseMintAccessGranted(log types.Log) (*LinkTokenMintAccessGranted, error) + + FilterMintAccessRevoked(opts *bind.FilterOpts, minter []common.Address) (*LinkTokenMintAccessRevokedIterator, error) + + WatchMintAccessRevoked(opts *bind.WatchOpts, sink chan<- *LinkTokenMintAccessRevoked, minter []common.Address) (event.Subscription, error) + + ParseMintAccessRevoked(log types.Log) (*LinkTokenMintAccessRevoked, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *LinkTokenOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*LinkTokenOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *LinkTokenOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*LinkTokenOwnershipTransferred, error) + + FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransferIterator, error) + + WatchTransfer(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer(log types.Log) (*LinkTokenTransfer, error) + + FilterTransfer0(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransfer0Iterator, error) + + WatchTransfer0(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer0, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer0(log types.Log) (*LinkTokenTransfer0, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/shared/generated/werc20_mock/werc20_mock.go b/core/gethwrappers/shared/generated/werc20_mock/werc20_mock.go new file mode 100644 index 00000000..f35203f3 --- /dev/null +++ b/core/gethwrappers/shared/generated/werc20_mock/werc20_mock.go @@ -0,0 +1,1052 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package werc20_mock + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var WERC20MockMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"dst\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"wad\",\"type\":\"uint256\"}],\"name\":\"Deposit\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"src\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"wad\",\"type\":\"uint256\"}],\"name\":\"Withdrawal\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"burn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"deposit\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"spender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseAllowance\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"wad\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x60806040523480156200001157600080fd5b506040518060400160405280600a8152602001695745524332304d6f636b60b01b815250604051806040016040528060048152602001635745524360e01b815250816003908162000063919062000120565b50600462000072828262000120565b505050620001ec565b634e487b7160e01b600052604160045260246000fd5b600181811c90821680620000a657607f821691505b602082108103620000c757634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200011b57600081815260208120601f850160051c81016020861015620000f65750805b601f850160051c820191505b81811015620001175782815560010162000102565b5050505b505050565b81516001600160401b038111156200013c576200013c6200007b565b62000154816200014d845462000091565b84620000cd565b602080601f8311600181146200018c5760008415620001735750858301515b600019600386901b1c1916600185901b17855562000117565b600085815260208120601f198616915b82811015620001bd578886015182559484019460019091019084016200019c565b5085821015620001dc5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b610fc980620001fc6000396000f3fe6080604052600436106100ec5760003560e01c806340c10f191161008a578063a457c2d711610059578063a457c2d71461028e578063a9059cbb146102ae578063d0e30db0146102ce578063dd62ed3e146102d657600080fd5b806340c10f19146101f657806370a082311461021657806395d89b41146102595780639dc29fac1461026e57600080fd5b806323b872dd116100c657806323b872dd1461017a5780632e1a7d4d1461019a578063313ce567146101ba57806339509351146101d657600080fd5b806306fdde0314610100578063095ea7b31461012b57806318160ddd1461015b57600080fd5b366100fb576100f9610329565b005b600080fd5b34801561010c57600080fd5b5061011561036a565b6040516101229190610dc6565b60405180910390f35b34801561013757600080fd5b5061014b610146366004610e5b565b6103fc565b6040519015158152602001610122565b34801561016757600080fd5b506002545b604051908152602001610122565b34801561018657600080fd5b5061014b610195366004610e85565b610416565b3480156101a657600080fd5b506100f96101b5366004610ec1565b61043a565b3480156101c657600080fd5b5060405160128152602001610122565b3480156101e257600080fd5b5061014b6101f1366004610e5b565b6104c6565b34801561020257600080fd5b506100f9610211366004610e5b565b610512565b34801561022257600080fd5b5061016c610231366004610eda565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b34801561026557600080fd5b50610115610520565b34801561027a57600080fd5b506100f9610289366004610e5b565b61052f565b34801561029a57600080fd5b5061014b6102a9366004610e5b565b610539565b3480156102ba57600080fd5b5061014b6102c9366004610e5b565b61060f565b6100f9610329565b3480156102e257600080fd5b5061016c6102f1366004610efc565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260016020908152604080832093909416825291909152205490565b610333333461061d565b60405134815233907fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9060200160405180910390a2565b60606003805461037990610f2f565b80601f01602080910402602001604051908101604052809291908181526020018280546103a590610f2f565b80156103f25780601f106103c7576101008083540402835291602001916103f2565b820191906000526020600020905b8154815290600101906020018083116103d557829003601f168201915b5050505050905090565b60003361040a818585610710565b60019150505b92915050565b6000336104248582856108c4565b61042f85858561099b565b506001949350505050565b3360009081526020819052604090205481111561045657600080fd5b6104603382610c0a565b604051339082156108fc029083906000818181858888f1935050505015801561048d573d6000803e3d6000fd5b5060405181815233907f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b659060200160405180910390a250565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919061040a908290869061050d908790610f82565b610710565b61051c828261061d565b5050565b60606004805461037990610f2f565b61051c8282610c0a565b33600081815260016020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716845290915281205490919083811015610602576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f00000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b61042f8286868403610710565b60003361040a81858561099b565b73ffffffffffffffffffffffffffffffffffffffff821661069a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f20616464726573730060448201526064016105f9565b80600260008282546106ac9190610f82565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600081815260208181526040808320805486019055518481527fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a35050565b73ffffffffffffffffffffffffffffffffffffffff83166107b2576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f726573730000000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff8216610855576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f737300000000000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b73ffffffffffffffffffffffffffffffffffffffff8381166000908152600160209081526040808320938616835292905220547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81146109955781811015610988576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e636500000060448201526064016105f9565b6109958484848403610710565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8316610a3e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f647265737300000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff8216610ae1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f657373000000000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205481811015610b97576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e6365000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff848116600081815260208181526040808320878703905593871680835291849020805487019055925185815290927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef910160405180910390a3610995565b73ffffffffffffffffffffffffffffffffffffffff8216610cad576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f730000000000000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff821660009081526020819052604090205481811015610d63576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f636500000000000000000000000000000000000000000000000000000000000060648201526084016105f9565b73ffffffffffffffffffffffffffffffffffffffff83166000818152602081815260408083208686039055600280548790039055518581529192917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef91016108b7565b600060208083528351808285015260005b81811015610df357858101830151858201604001528201610dd7565b5060006040828601015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8301168501019250505092915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610e5657600080fd5b919050565b60008060408385031215610e6e57600080fd5b610e7783610e32565b946020939093013593505050565b600080600060608486031215610e9a57600080fd5b610ea384610e32565b9250610eb160208501610e32565b9150604084013590509250925092565b600060208284031215610ed357600080fd5b5035919050565b600060208284031215610eec57600080fd5b610ef582610e32565b9392505050565b60008060408385031215610f0f57600080fd5b610f1883610e32565b9150610f2660208401610e32565b90509250929050565b600181811c90821680610f4357607f821691505b602082108103610f7c577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b80820180821115610410577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fdfea164736f6c6343000813000a", +} + +var WERC20MockABI = WERC20MockMetaData.ABI + +var WERC20MockBin = WERC20MockMetaData.Bin + +func DeployWERC20Mock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *WERC20Mock, error) { + parsed, err := WERC20MockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(WERC20MockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &WERC20Mock{address: address, abi: *parsed, WERC20MockCaller: WERC20MockCaller{contract: contract}, WERC20MockTransactor: WERC20MockTransactor{contract: contract}, WERC20MockFilterer: WERC20MockFilterer{contract: contract}}, nil +} + +type WERC20Mock struct { + address common.Address + abi abi.ABI + WERC20MockCaller + WERC20MockTransactor + WERC20MockFilterer +} + +type WERC20MockCaller struct { + contract *bind.BoundContract +} + +type WERC20MockTransactor struct { + contract *bind.BoundContract +} + +type WERC20MockFilterer struct { + contract *bind.BoundContract +} + +type WERC20MockSession struct { + Contract *WERC20Mock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type WERC20MockCallerSession struct { + Contract *WERC20MockCaller + CallOpts bind.CallOpts +} + +type WERC20MockTransactorSession struct { + Contract *WERC20MockTransactor + TransactOpts bind.TransactOpts +} + +type WERC20MockRaw struct { + Contract *WERC20Mock +} + +type WERC20MockCallerRaw struct { + Contract *WERC20MockCaller +} + +type WERC20MockTransactorRaw struct { + Contract *WERC20MockTransactor +} + +func NewWERC20Mock(address common.Address, backend bind.ContractBackend) (*WERC20Mock, error) { + abi, err := abi.JSON(strings.NewReader(WERC20MockABI)) + if err != nil { + return nil, err + } + contract, err := bindWERC20Mock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &WERC20Mock{address: address, abi: abi, WERC20MockCaller: WERC20MockCaller{contract: contract}, WERC20MockTransactor: WERC20MockTransactor{contract: contract}, WERC20MockFilterer: WERC20MockFilterer{contract: contract}}, nil +} + +func NewWERC20MockCaller(address common.Address, caller bind.ContractCaller) (*WERC20MockCaller, error) { + contract, err := bindWERC20Mock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &WERC20MockCaller{contract: contract}, nil +} + +func NewWERC20MockTransactor(address common.Address, transactor bind.ContractTransactor) (*WERC20MockTransactor, error) { + contract, err := bindWERC20Mock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &WERC20MockTransactor{contract: contract}, nil +} + +func NewWERC20MockFilterer(address common.Address, filterer bind.ContractFilterer) (*WERC20MockFilterer, error) { + contract, err := bindWERC20Mock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &WERC20MockFilterer{contract: contract}, nil +} + +func bindWERC20Mock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := WERC20MockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_WERC20Mock *WERC20MockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _WERC20Mock.Contract.WERC20MockCaller.contract.Call(opts, result, method, params...) +} + +func (_WERC20Mock *WERC20MockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _WERC20Mock.Contract.WERC20MockTransactor.contract.Transfer(opts) +} + +func (_WERC20Mock *WERC20MockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _WERC20Mock.Contract.WERC20MockTransactor.contract.Transact(opts, method, params...) +} + +func (_WERC20Mock *WERC20MockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _WERC20Mock.Contract.contract.Call(opts, result, method, params...) +} + +func (_WERC20Mock *WERC20MockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _WERC20Mock.Contract.contract.Transfer(opts) +} + +func (_WERC20Mock *WERC20MockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _WERC20Mock.Contract.contract.Transact(opts, method, params...) +} + +func (_WERC20Mock *WERC20MockCaller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "allowance", owner, spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _WERC20Mock.Contract.Allowance(&_WERC20Mock.CallOpts, owner, spender) +} + +func (_WERC20Mock *WERC20MockCallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _WERC20Mock.Contract.Allowance(&_WERC20Mock.CallOpts, owner, spender) +} + +func (_WERC20Mock *WERC20MockCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "balanceOf", account) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) BalanceOf(account common.Address) (*big.Int, error) { + return _WERC20Mock.Contract.BalanceOf(&_WERC20Mock.CallOpts, account) +} + +func (_WERC20Mock *WERC20MockCallerSession) BalanceOf(account common.Address) (*big.Int, error) { + return _WERC20Mock.Contract.BalanceOf(&_WERC20Mock.CallOpts, account) +} + +func (_WERC20Mock *WERC20MockCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) Decimals() (uint8, error) { + return _WERC20Mock.Contract.Decimals(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCallerSession) Decimals() (uint8, error) { + return _WERC20Mock.Contract.Decimals(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCaller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) Name() (string, error) { + return _WERC20Mock.Contract.Name(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCallerSession) Name() (string, error) { + return _WERC20Mock.Contract.Name(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCaller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) Symbol() (string, error) { + return _WERC20Mock.Contract.Symbol(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCallerSession) Symbol() (string, error) { + return _WERC20Mock.Contract.Symbol(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _WERC20Mock.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_WERC20Mock *WERC20MockSession) TotalSupply() (*big.Int, error) { + return _WERC20Mock.Contract.TotalSupply(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockCallerSession) TotalSupply() (*big.Int, error) { + return _WERC20Mock.Contract.TotalSupply(&_WERC20Mock.CallOpts) +} + +func (_WERC20Mock *WERC20MockTransactor) Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "approve", spender, amount) +} + +func (_WERC20Mock *WERC20MockSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Approve(&_WERC20Mock.TransactOpts, spender, amount) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Approve(spender common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Approve(&_WERC20Mock.TransactOpts, spender, amount) +} + +func (_WERC20Mock *WERC20MockTransactor) Burn(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "burn", account, amount) +} + +func (_WERC20Mock *WERC20MockSession) Burn(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Burn(&_WERC20Mock.TransactOpts, account, amount) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Burn(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Burn(&_WERC20Mock.TransactOpts, account, amount) +} + +func (_WERC20Mock *WERC20MockTransactor) DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "decreaseAllowance", spender, subtractedValue) +} + +func (_WERC20Mock *WERC20MockSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.DecreaseAllowance(&_WERC20Mock.TransactOpts, spender, subtractedValue) +} + +func (_WERC20Mock *WERC20MockTransactorSession) DecreaseAllowance(spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.DecreaseAllowance(&_WERC20Mock.TransactOpts, spender, subtractedValue) +} + +func (_WERC20Mock *WERC20MockTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "deposit") +} + +func (_WERC20Mock *WERC20MockSession) Deposit() (*types.Transaction, error) { + return _WERC20Mock.Contract.Deposit(&_WERC20Mock.TransactOpts) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Deposit() (*types.Transaction, error) { + return _WERC20Mock.Contract.Deposit(&_WERC20Mock.TransactOpts) +} + +func (_WERC20Mock *WERC20MockTransactor) IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "increaseAllowance", spender, addedValue) +} + +func (_WERC20Mock *WERC20MockSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.IncreaseAllowance(&_WERC20Mock.TransactOpts, spender, addedValue) +} + +func (_WERC20Mock *WERC20MockTransactorSession) IncreaseAllowance(spender common.Address, addedValue *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.IncreaseAllowance(&_WERC20Mock.TransactOpts, spender, addedValue) +} + +func (_WERC20Mock *WERC20MockTransactor) Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "mint", account, amount) +} + +func (_WERC20Mock *WERC20MockSession) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Mint(&_WERC20Mock.TransactOpts, account, amount) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Mint(&_WERC20Mock.TransactOpts, account, amount) +} + +func (_WERC20Mock *WERC20MockTransactor) Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "transfer", to, amount) +} + +func (_WERC20Mock *WERC20MockSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Transfer(&_WERC20Mock.TransactOpts, to, amount) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Transfer(to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Transfer(&_WERC20Mock.TransactOpts, to, amount) +} + +func (_WERC20Mock *WERC20MockTransactor) TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "transferFrom", from, to, amount) +} + +func (_WERC20Mock *WERC20MockSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.TransferFrom(&_WERC20Mock.TransactOpts, from, to, amount) +} + +func (_WERC20Mock *WERC20MockTransactorSession) TransferFrom(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.TransferFrom(&_WERC20Mock.TransactOpts, from, to, amount) +} + +func (_WERC20Mock *WERC20MockTransactor) Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) { + return _WERC20Mock.contract.Transact(opts, "withdraw", wad) +} + +func (_WERC20Mock *WERC20MockSession) Withdraw(wad *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Withdraw(&_WERC20Mock.TransactOpts, wad) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Withdraw(wad *big.Int) (*types.Transaction, error) { + return _WERC20Mock.Contract.Withdraw(&_WERC20Mock.TransactOpts, wad) +} + +func (_WERC20Mock *WERC20MockTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _WERC20Mock.contract.RawTransact(opts, nil) +} + +func (_WERC20Mock *WERC20MockSession) Receive() (*types.Transaction, error) { + return _WERC20Mock.Contract.Receive(&_WERC20Mock.TransactOpts) +} + +func (_WERC20Mock *WERC20MockTransactorSession) Receive() (*types.Transaction, error) { + return _WERC20Mock.Contract.Receive(&_WERC20Mock.TransactOpts) +} + +type WERC20MockApprovalIterator struct { + Event *WERC20MockApproval + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *WERC20MockApprovalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(WERC20MockApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(WERC20MockApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *WERC20MockApprovalIterator) Error() error { + return it.fail +} + +func (it *WERC20MockApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type WERC20MockApproval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log +} + +func (_WERC20Mock *WERC20MockFilterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*WERC20MockApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _WERC20Mock.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &WERC20MockApprovalIterator{contract: _WERC20Mock.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +func (_WERC20Mock *WERC20MockFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *WERC20MockApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _WERC20Mock.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(WERC20MockApproval) + if err := _WERC20Mock.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_WERC20Mock *WERC20MockFilterer) ParseApproval(log types.Log) (*WERC20MockApproval, error) { + event := new(WERC20MockApproval) + if err := _WERC20Mock.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type WERC20MockDepositIterator struct { + Event *WERC20MockDeposit + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *WERC20MockDepositIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(WERC20MockDeposit) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(WERC20MockDeposit) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *WERC20MockDepositIterator) Error() error { + return it.fail +} + +func (it *WERC20MockDepositIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type WERC20MockDeposit struct { + Dst common.Address + Wad *big.Int + Raw types.Log +} + +func (_WERC20Mock *WERC20MockFilterer) FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*WERC20MockDepositIterator, error) { + + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _WERC20Mock.contract.FilterLogs(opts, "Deposit", dstRule) + if err != nil { + return nil, err + } + return &WERC20MockDepositIterator{contract: _WERC20Mock.contract, event: "Deposit", logs: logs, sub: sub}, nil +} + +func (_WERC20Mock *WERC20MockFilterer) WatchDeposit(opts *bind.WatchOpts, sink chan<- *WERC20MockDeposit, dst []common.Address) (event.Subscription, error) { + + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _WERC20Mock.contract.WatchLogs(opts, "Deposit", dstRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(WERC20MockDeposit) + if err := _WERC20Mock.contract.UnpackLog(event, "Deposit", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_WERC20Mock *WERC20MockFilterer) ParseDeposit(log types.Log) (*WERC20MockDeposit, error) { + event := new(WERC20MockDeposit) + if err := _WERC20Mock.contract.UnpackLog(event, "Deposit", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type WERC20MockTransferIterator struct { + Event *WERC20MockTransfer + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *WERC20MockTransferIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(WERC20MockTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(WERC20MockTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *WERC20MockTransferIterator) Error() error { + return it.fail +} + +func (it *WERC20MockTransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type WERC20MockTransfer struct { + From common.Address + To common.Address + Value *big.Int + Raw types.Log +} + +func (_WERC20Mock *WERC20MockFilterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*WERC20MockTransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _WERC20Mock.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &WERC20MockTransferIterator{contract: _WERC20Mock.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +func (_WERC20Mock *WERC20MockFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *WERC20MockTransfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _WERC20Mock.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(WERC20MockTransfer) + if err := _WERC20Mock.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_WERC20Mock *WERC20MockFilterer) ParseTransfer(log types.Log) (*WERC20MockTransfer, error) { + event := new(WERC20MockTransfer) + if err := _WERC20Mock.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type WERC20MockWithdrawalIterator struct { + Event *WERC20MockWithdrawal + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *WERC20MockWithdrawalIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(WERC20MockWithdrawal) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(WERC20MockWithdrawal) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *WERC20MockWithdrawalIterator) Error() error { + return it.fail +} + +func (it *WERC20MockWithdrawalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type WERC20MockWithdrawal struct { + Src common.Address + Wad *big.Int + Raw types.Log +} + +func (_WERC20Mock *WERC20MockFilterer) FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*WERC20MockWithdrawalIterator, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + + logs, sub, err := _WERC20Mock.contract.FilterLogs(opts, "Withdrawal", srcRule) + if err != nil { + return nil, err + } + return &WERC20MockWithdrawalIterator{contract: _WERC20Mock.contract, event: "Withdrawal", logs: logs, sub: sub}, nil +} + +func (_WERC20Mock *WERC20MockFilterer) WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *WERC20MockWithdrawal, src []common.Address) (event.Subscription, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + + logs, sub, err := _WERC20Mock.contract.WatchLogs(opts, "Withdrawal", srcRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(WERC20MockWithdrawal) + if err := _WERC20Mock.contract.UnpackLog(event, "Withdrawal", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_WERC20Mock *WERC20MockFilterer) ParseWithdrawal(log types.Log) (*WERC20MockWithdrawal, error) { + event := new(WERC20MockWithdrawal) + if err := _WERC20Mock.contract.UnpackLog(event, "Withdrawal", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_WERC20Mock *WERC20Mock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _WERC20Mock.abi.Events["Approval"].ID: + return _WERC20Mock.ParseApproval(log) + case _WERC20Mock.abi.Events["Deposit"].ID: + return _WERC20Mock.ParseDeposit(log) + case _WERC20Mock.abi.Events["Transfer"].ID: + return _WERC20Mock.ParseTransfer(log) + case _WERC20Mock.abi.Events["Withdrawal"].ID: + return _WERC20Mock.ParseWithdrawal(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (WERC20MockApproval) Topic() common.Hash { + return common.HexToHash("0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925") +} + +func (WERC20MockDeposit) Topic() common.Hash { + return common.HexToHash("0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c") +} + +func (WERC20MockTransfer) Topic() common.Hash { + return common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") +} + +func (WERC20MockWithdrawal) Topic() common.Hash { + return common.HexToHash("0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65") +} + +func (_WERC20Mock *WERC20Mock) Address() common.Address { + return _WERC20Mock.address +} + +type WERC20MockInterface interface { + Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) + + BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Name(opts *bind.CallOpts) (string, error) + + Symbol(opts *bind.CallOpts) (string, error) + + TotalSupply(opts *bind.CallOpts) (*big.Int, error) + + Approve(opts *bind.TransactOpts, spender common.Address, amount *big.Int) (*types.Transaction, error) + + Burn(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + DecreaseAllowance(opts *bind.TransactOpts, spender common.Address, subtractedValue *big.Int) (*types.Transaction, error) + + Deposit(opts *bind.TransactOpts) (*types.Transaction, error) + + IncreaseAllowance(opts *bind.TransactOpts, spender common.Address, addedValue *big.Int) (*types.Transaction, error) + + Mint(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) + + Transfer(opts *bind.TransactOpts, to common.Address, amount *big.Int) (*types.Transaction, error) + + TransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) + + Withdraw(opts *bind.TransactOpts, wad *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*WERC20MockApprovalIterator, error) + + WatchApproval(opts *bind.WatchOpts, sink chan<- *WERC20MockApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) + + ParseApproval(log types.Log) (*WERC20MockApproval, error) + + FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*WERC20MockDepositIterator, error) + + WatchDeposit(opts *bind.WatchOpts, sink chan<- *WERC20MockDeposit, dst []common.Address) (event.Subscription, error) + + ParseDeposit(log types.Log) (*WERC20MockDeposit, error) + + FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*WERC20MockTransferIterator, error) + + WatchTransfer(opts *bind.WatchOpts, sink chan<- *WERC20MockTransfer, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseTransfer(log types.Log) (*WERC20MockTransfer, error) + + FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*WERC20MockWithdrawalIterator, error) + + WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *WERC20MockWithdrawal, src []common.Address) (event.Subscription, error) + + ParseWithdrawal(log types.Log) (*WERC20MockWithdrawal, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..3268bb55 --- /dev/null +++ b/core/gethwrappers/shared/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,5 @@ +GETH_VERSION: 1.13.8 +burn_mint_erc677: ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.bin 405c9016171e614b17e10588653ef8d33dcea21dd569c3fddc596a46fcff68a3 +erc20: ../../../contracts/solc/v0.8.19/ERC20/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20/ERC20.bin 5b1a93d9b24f250e49a730c96335a8113c3f7010365cba578f313b483001d4fc +link_token: ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.bin c0ef9b507103aae541ebc31d87d051c2764ba9d843076b30ec505d37cdfffaba +werc20_mock: ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.bin ff2ca3928b2aa9c412c892cb8226c4d754c73eeb291bb7481c32c48791b2aa94 diff --git a/core/gethwrappers/shared/go_generate.go b/core/gethwrappers/shared/go_generate.go new file mode 100644 index 00000000..6f3bead7 --- /dev/null +++ b/core/gethwrappers/shared/go_generate.go @@ -0,0 +1,8 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.abi ../../../contracts/solc/v0.8.19/BurnMintERC677/BurnMintERC677.bin BurnMintERC677 burn_mint_erc677 +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.abi ../../../contracts/solc/v0.8.19/LinkToken/LinkToken.bin LinkToken link_token +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/ERC20/ERC20.abi ../../../contracts/solc/v0.8.19/ERC20/ERC20.bin ERC20 erc20 +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.abi ../../../contracts/solc/v0.8.19/WERC20Mock/WERC20Mock.bin WERC20Mock werc20_mock diff --git a/core/gethwrappers/transmission/generated/entry_point/entry_point.go b/core/gethwrappers/transmission/generated/entry_point/entry_point.go new file mode 100644 index 00000000..8c755e1b --- /dev/null +++ b/core/gethwrappers/transmission/generated/entry_point/entry_point.go @@ -0,0 +1,1871 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package entry_point + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type EntryPointMemoryUserOp struct { + Sender common.Address + Nonce *big.Int + CallGasLimit *big.Int + VerificationGasLimit *big.Int + PreVerificationGas *big.Int + Paymaster common.Address + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int +} + +type EntryPointUserOpInfo struct { + MUserOp EntryPointMemoryUserOp + UserOpHash [32]byte + Prefund *big.Int + ContextOffset *big.Int + PreOpGas *big.Int +} + +type IEntryPointUserOpsPerAggregator struct { + UserOps []UserOperation + Aggregator common.Address + Signature []byte +} + +type IStakeManagerDepositInfo struct { + Deposit *big.Int + Staked bool + Stake *big.Int + UnstakeDelaySec uint32 + WithdrawTime *big.Int +} + +type UserOperation struct { + Sender common.Address + Nonce *big.Int + InitCode []byte + CallData []byte + CallGasLimit *big.Int + VerificationGasLimit *big.Int + PreVerificationGas *big.Int + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int + PaymasterAndData []byte + Signature []byte +} + +var EntryPointMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"preOpGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"paid\",\"type\":\"uint256\"},{\"internalType\":\"uint48\",\"name\":\"validAfter\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"validUntil\",\"type\":\"uint48\"},{\"internalType\":\"bool\",\"name\":\"targetSuccess\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"targetResult\",\"type\":\"bytes\"}],\"name\":\"ExecutionResult\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"opIndex\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"reason\",\"type\":\"string\"}],\"name\":\"FailedOp\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"SenderAddressResult\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"}],\"name\":\"SignatureValidationFailed\",\"type\":\"error\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"preOpGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"prefund\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"sigFailed\",\"type\":\"bool\"},{\"internalType\":\"uint48\",\"name\":\"validAfter\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"validUntil\",\"type\":\"uint48\"},{\"internalType\":\"bytes\",\"name\":\"paymasterContext\",\"type\":\"bytes\"}],\"internalType\":\"structIEntryPoint.ReturnInfo\",\"name\":\"returnInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"senderInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"factoryInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"paymasterInfo\",\"type\":\"tuple\"}],\"name\":\"ValidationResult\",\"type\":\"error\"},{\"inputs\":[{\"components\":[{\"internalType\":\"uint256\",\"name\":\"preOpGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"prefund\",\"type\":\"uint256\"},{\"internalType\":\"bool\",\"name\":\"sigFailed\",\"type\":\"bool\"},{\"internalType\":\"uint48\",\"name\":\"validAfter\",\"type\":\"uint48\"},{\"internalType\":\"uint48\",\"name\":\"validUntil\",\"type\":\"uint48\"},{\"internalType\":\"bytes\",\"name\":\"paymasterContext\",\"type\":\"bytes\"}],\"internalType\":\"structIEntryPoint.ReturnInfo\",\"name\":\"returnInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"senderInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"factoryInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"paymasterInfo\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint256\",\"name\":\"stake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"internalType\":\"structIStakeManager.StakeInfo\",\"name\":\"stakeInfo\",\"type\":\"tuple\"}],\"internalType\":\"structIEntryPoint.AggregatorStakeInfo\",\"name\":\"aggregatorInfo\",\"type\":\"tuple\"}],\"name\":\"ValidationResultWithAggregation\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"factory\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"paymaster\",\"type\":\"address\"}],\"name\":\"AccountDeployed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"totalDeposit\",\"type\":\"uint256\"}],\"name\":\"Deposited\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"aggregator\",\"type\":\"address\"}],\"name\":\"SignatureAggregatorChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"totalStaked\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"unstakeDelaySec\",\"type\":\"uint256\"}],\"name\":\"StakeLocked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"withdrawTime\",\"type\":\"uint256\"}],\"name\":\"StakeUnlocked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"withdrawAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"StakeWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"paymaster\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"actualGasCost\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"actualGasUsed\",\"type\":\"uint256\"}],\"name\":\"UserOperationEvent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"revertReason\",\"type\":\"bytes\"}],\"name\":\"UserOperationRevertReason\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"withdrawAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"Withdrawn\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"SIG_VALIDATION_FAILED\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"}],\"name\":\"_validateSenderAndPaymaster\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"unstakeDelaySec\",\"type\":\"uint32\"}],\"name\":\"addStake\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"depositTo\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"deposits\",\"outputs\":[{\"internalType\":\"uint112\",\"name\":\"deposit\",\"type\":\"uint112\"},{\"internalType\":\"bool\",\"name\":\"staked\",\"type\":\"bool\"},{\"internalType\":\"uint112\",\"name\":\"stake\",\"type\":\"uint112\"},{\"internalType\":\"uint32\",\"name\":\"unstakeDelaySec\",\"type\":\"uint32\"},{\"internalType\":\"uint48\",\"name\":\"withdrawTime\",\"type\":\"uint48\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"getDepositInfo\",\"outputs\":[{\"components\":[{\"internalType\":\"uint112\",\"name\":\"deposit\",\"type\":\"uint112\"},{\"internalType\":\"bool\",\"name\":\"staked\",\"type\":\"bool\"},{\"internalType\":\"uint112\",\"name\":\"stake\",\"type\":\"uint112\"},{\"internalType\":\"uint32\",\"name\":\"unstakeDelaySec\",\"type\":\"uint32\"},{\"internalType\":\"uint48\",\"name\":\"withdrawTime\",\"type\":\"uint48\"}],\"internalType\":\"structIStakeManager.DepositInfo\",\"name\":\"info\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"}],\"name\":\"getSenderAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation\",\"name\":\"userOp\",\"type\":\"tuple\"}],\"name\":\"getUserOpHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation[]\",\"name\":\"userOps\",\"type\":\"tuple[]\"},{\"internalType\":\"contractIAggregator\",\"name\":\"aggregator\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structIEntryPoint.UserOpsPerAggregator[]\",\"name\":\"opsPerAggregator\",\"type\":\"tuple[]\"},{\"internalType\":\"addresspayable\",\"name\":\"beneficiary\",\"type\":\"address\"}],\"name\":\"handleAggregatedOps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation[]\",\"name\":\"ops\",\"type\":\"tuple[]\"},{\"internalType\":\"addresspayable\",\"name\":\"beneficiary\",\"type\":\"address\"}],\"name\":\"handleOps\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"components\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"paymaster\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"}],\"internalType\":\"structEntryPoint.MemoryUserOp\",\"name\":\"mUserOp\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"prefund\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"contextOffset\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preOpGas\",\"type\":\"uint256\"}],\"internalType\":\"structEntryPoint.UserOpInfo\",\"name\":\"opInfo\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"context\",\"type\":\"bytes\"}],\"name\":\"innerHandleOp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"actualGasCost\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation\",\"name\":\"op\",\"type\":\"tuple\"},{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"targetCallData\",\"type\":\"bytes\"}],\"name\":\"simulateHandleOp\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation\",\"name\":\"userOp\",\"type\":\"tuple\"}],\"name\":\"simulateValidation\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unlockStake\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"withdrawAddress\",\"type\":\"address\"}],\"name\":\"withdrawStake\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"addresspayable\",\"name\":\"withdrawAddress\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"withdrawAmount\",\"type\":\"uint256\"}],\"name\":\"withdrawTo\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x60a0604052604051620000129062000050565b604051809103906000f0801580156200002f573d6000803e3d6000fd5b506001600160a01b03166080523480156200004957600080fd5b506200005e565b61020a8062004b1883390190565b608051614a97620000816000396000818161146e015261363d0152614a976000f3fe6080604052600436106101125760003560e01c8063957122ab116100a5578063bb9fe6bf11610074578063d6383f9411610059578063d6383f941461042c578063ee2194231461044c578063fc7e286d1461046c57600080fd5b8063bb9fe6bf146103f7578063c23a5cea1461040c57600080fd5b8063957122ab146103845780639b249f69146103a4578063a6193531146103c4578063b760faf9146103e457600080fd5b80634b1d7cf5116100e15780634b1d7cf5146101ad5780635287ce12146101cd57806370a082311461031c5780638f41ec5a1461036f57600080fd5b80630396cb60146101275780631d7327561461013a5780631fad948c1461016d578063205c28781461018d57600080fd5b366101225761012033610546565b005b600080fd5b6101206101353660046139b0565b6105c1565b34801561014657600080fd5b5061015a610155366004613c27565b610944565b6040519081526020015b60405180910390f35b34801561017957600080fd5b50610120610188366004613d32565b610af7565b34801561019957600080fd5b506101206101a8366004613d89565b610c38565b3480156101b957600080fd5b506101206101c8366004613d32565b610e3a565b3480156101d957600080fd5b506102bd6101e8366004613db5565b6040805160a0810182526000808252602082018190529181018290526060810182905260808101919091525073ffffffffffffffffffffffffffffffffffffffff1660009081526020818152604091829020825160a08101845281546dffffffffffffffffffffffffffff80821683526e010000000000000000000000000000820460ff161515948301949094526f0100000000000000000000000000000090049092169282019290925260019091015463ffffffff81166060830152640100000000900465ffffffffffff16608082015290565b6040805182516dffffffffffffffffffffffffffff908116825260208085015115159083015283830151169181019190915260608083015163ffffffff169082015260809182015165ffffffffffff169181019190915260a001610164565b34801561032857600080fd5b5061015a610337366004613db5565b73ffffffffffffffffffffffffffffffffffffffff166000908152602081905260409020546dffffffffffffffffffffffffffff1690565b34801561037b57600080fd5b5061015a600181565b34801561039057600080fd5b5061012061039f366004613dd2565b6112d9565b3480156103b057600080fd5b506101206103bf366004613e57565b611431565b3480156103d057600080fd5b5061015a6103df366004613eb2565b611533565b6101206103f2366004613db5565b610546565b34801561040357600080fd5b50610120611575565b34801561041857600080fd5b50610120610427366004613db5565b61172c565b34801561043857600080fd5b50610120610447366004613ee7565b611a2c565b34801561045857600080fd5b50610120610467366004613eb2565b611b5a565b34801561047857600080fd5b506104f9610487366004613db5565b600060208190529081526040902080546001909101546dffffffffffffffffffffffffffff808316926e010000000000000000000000000000810460ff16926f010000000000000000000000000000009091049091169063ffffffff811690640100000000900465ffffffffffff1685565b604080516dffffffffffffffffffffffffffff96871681529415156020860152929094169183019190915263ffffffff16606082015265ffffffffffff909116608082015260a001610164565b6105508134611ec2565b73ffffffffffffffffffffffffffffffffffffffff811660008181526020818152604091829020805492516dffffffffffffffffffffffffffff909316835292917f2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c491015b60405180910390a25050565b33600090815260208190526040902063ffffffff8216610642576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6d757374207370656369667920756e7374616b652064656c617900000000000060448201526064015b60405180910390fd5b600181015463ffffffff90811690831610156106ba576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f63616e6e6f7420646563726561736520756e7374616b652074696d65000000006044820152606401610639565b80546000906106ed9034906f0100000000000000000000000000000090046dffffffffffffffffffffffffffff16613f78565b905060008111610759576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f207374616b652073706563696669656400000000000000000000000000006044820152606401610639565b6dffffffffffffffffffffffffffff8111156107d1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f7374616b65206f766572666c6f770000000000000000000000000000000000006044820152606401610639565b6040805160a08101825283546dffffffffffffffffffffffffffff90811682526001602080840182815286841685870190815263ffffffff808b16606088019081526000608089018181523380835296829052908a9020985189549551945189166f01000000000000000000000000000000027fffffff0000000000000000000000000000ffffffffffffffffffffffffffffff9515156e010000000000000000000000000000027fffffffffffffffffffffffffffffffffff0000000000000000000000000000009097169190991617949094179290921695909517865551949092018054925165ffffffffffff16640100000000027fffffffffffffffffffffffffffffffffffffffffffff00000000000000000000909316949093169390931717905590517fa5ae833d0bb1dcd632d98a8b70973e8516812898e19bf27b70071ebc8dc52c0190610937908490879091825263ffffffff16602082015260400190565b60405180910390a2505050565b6000805a90503330146109b3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4141393220696e7465726e616c2063616c6c206f6e6c790000000000000000006044820152606401610639565b8451604081015160608201518101611388015a10156109f6577fdeaddead0000000000000000000000000000000000000000000000000000000060005260206000fd5b875160009015610a97576000610a13846000015160008c86611fbf565b905080610a95576000610a27610800611fd7565b805190915015610a8f57846000015173ffffffffffffffffffffffffffffffffffffffff168a602001517f1c4fada7374c0a9ee8841fc38afe82932dc0f8e69012e927f061a8bae611a201876020015184604051610a86929190614006565b60405180910390a35b60019250505b505b600088608001515a8603019050610ae96000838b8b8b8080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250889250612003915050565b9a9950505050505050505050565b8160008167ffffffffffffffff811115610b1357610b136139d6565b604051908082528060200260200182016040528015610b4c57816020015b610b3961390c565b815260200190600190039081610b315790505b50905060005b82811015610bc5576000828281518110610b6e57610b6e61401f565b60200260200101519050600080610ba9848a8a87818110610b9157610b9161401f565b9050602002810190610ba3919061404e565b856123e1565b91509150610bba84838360006125a3565b505050600101610b52565b506000805b83811015610c2557610c1981888884818110610be857610be861401f565b9050602002810190610bfa919061404e565b858481518110610c0c57610c0c61401f565b60200260200101516127f8565b90910190600101610bca565b50610c30848261297d565b505050505050565b33600090815260208190526040902080546dffffffffffffffffffffffffffff16821115610cc2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f576974686472617720616d6f756e7420746f6f206c61726765000000000000006044820152606401610639565b8054610cdf9083906dffffffffffffffffffffffffffff1661408c565b81547fffffffffffffffffffffffffffffffffffff0000000000000000000000000000166dffffffffffffffffffffffffffff919091161781556040805173ffffffffffffffffffffffffffffffffffffffff851681526020810184905233917fd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb910160405180910390a260008373ffffffffffffffffffffffffffffffffffffffff168360405160006040518083038185875af1925050503d8060008114610dc4576040519150601f19603f3d011682016040523d82523d6000602084013e610dc9565b606091505b5050905080610e34576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6661696c656420746f20776974686472617700000000000000000000000000006044820152606401610639565b50505050565b816000805b828110156110335736868683818110610e5a57610e5a61401f565b9050602002810190610e6c91906140a3565b9050366000610e7b83806140d7565b90925090506000610e926040850160208601613db5565b90507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff73ffffffffffffffffffffffffffffffffffffffff821601610f33576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4141393620696e76616c69642061676772656761746f720000000000000000006044820152606401610639565b73ffffffffffffffffffffffffffffffffffffffff8116156110105773ffffffffffffffffffffffffffffffffffffffff811663e3563a4f8484610f7a604089018961413f565b6040518563ffffffff1660e01b8152600401610f999493929190614355565b60006040518083038186803b158015610fb157600080fd5b505afa925050508015610fc2575060015b611010576040517f86a9f75000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610639565b61101a8287613f78565b955050505050808061102b9061440c565b915050610e3f565b5060008167ffffffffffffffff81111561104f5761104f6139d6565b60405190808252806020026020018201604052801561108857816020015b61107561390c565b81526020019060019003908161106d5790505b5090506000805b8481101561117357368888838181106110aa576110aa61401f565b90506020028101906110bc91906140a3565b90503660006110cb83806140d7565b909250905060006110e26040850160208601613db5565b90508160005b8181101561115a5760008989815181106111045761110461401f565b602002602001015190506000806111278b898987818110610b9157610b9161401f565b91509150611137848383896125a3565b8a6111418161440c565b9b505050505080806111529061440c565b9150506110e8565b505050505050808061116b9061440c565b91505061108f565b50600080915060005b8581101561129957368989838181106111975761119761401f565b90506020028101906111a991906140a3565b90506111bb6040820160208301613db5565b73ffffffffffffffffffffffffffffffffffffffff167f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d60405160405180910390a236600061120a83806140d7565b90925090508060005b8181101561128157611255888585848181106112315761123161401f565b9050602002810190611243919061404e565b8b8b81518110610c0c57610c0c61401f565b61125f9088613f78565b96508761126b8161440c565b98505080806112799061440c565b915050611213565b505050505080806112919061440c565b91505061117c565b506040516000907f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d908290a26112cf868261297d565b5050505050505050565b831580156112fc575073ffffffffffffffffffffffffffffffffffffffff83163b155b15611363576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f41413230206163636f756e74206e6f74206465706c6f796564000000000000006044820152606401610639565b601481106113f557600061137a6014828486614444565b6113839161446e565b60601c9050803b6000036113f3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f41413330207061796d6173746572206e6f74206465706c6f79656400000000006044820152606401610639565b505b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526020600482015260006024820152604401610639565b6040517f570e1a3600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169063570e1a36906114a590859085906004016144b6565b6020604051808303816000875af11580156114c4573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114e891906144ca565b6040517f6ca7b80600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610639565b600061153e82612ac9565b6040805160208101929092523090820152466060820152608001604051602081830303815290604052805190602001209050919050565b3360009081526020819052604081206001810154909163ffffffff90911690036115fb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600a60248201527f6e6f74207374616b6564000000000000000000000000000000000000000000006044820152606401610639565b80546e010000000000000000000000000000900460ff16611678576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f616c726561647920756e7374616b696e670000000000000000000000000000006044820152606401610639565b60018101546000906116909063ffffffff16426144e7565b6001830180547fffffffffffffffffffffffffffffffffffffffffffff000000000000ffffffff1664010000000065ffffffffffff84169081029190911790915583547fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff16845560405190815290915033907ffa9b3c14cc825c412c9ed81b3ba365a5b459439403f18829e572ed53a4180f0a906020016105b5565b33600090815260208190526040902080546f0100000000000000000000000000000090046dffffffffffffffffffffffffffff16806117c7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4e6f207374616b6520746f2077697468647261770000000000000000000000006044820152606401610639565b6001820154640100000000900465ffffffffffff16611842576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6d7573742063616c6c20756e6c6f636b5374616b6528292066697273740000006044820152606401610639565b60018201544264010000000090910465ffffffffffff1611156118c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f5374616b65207769746864726177616c206973206e6f742064756500000000006044820152606401610639565b6001820180547fffffffffffffffffffffffffffffffffffffffffffff0000000000000000000016905581547fffffff0000000000000000000000000000ffffffffffffffffffffffffffffff1682556040805173ffffffffffffffffffffffffffffffffffffffff851681526020810183905233917fb7c918e0e249f999e965cafeb6c664271b3f4317d296461500e71da39f0cbda3910160405180910390a260008373ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d80600081146119bc576040519150601f19603f3d011682016040523d82523d6000602084013e6119c1565b606091505b5050905080610e34576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661696c656420746f207769746864726177207374616b6500000000000000006044820152606401610639565b611a3461390c565b611a3d85612ae2565b600080611a4c600088856123e1565b915091506000611a5c8383612bd5565b9050611a6743600052565b6000611a7560008a876127f8565b9050611a8043600052565b6000606073ffffffffffffffffffffffffffffffffffffffff8a1615611b10578973ffffffffffffffffffffffffffffffffffffffff168989604051611ac7929190614511565b6000604051808303816000865af19150503d8060008114611b04576040519150601f19603f3d011682016040523d82523d6000602084013e611b09565b606091505b5090925090505b8660800151838560200151866040015185856040517f8b7ac98000000000000000000000000000000000000000000000000000000000815260040161063996959493929190614521565b611b6261390c565b611b6b82612ae2565b600080611b7a600085856123e1565b845160a001516040805180820182526000808252602080830182815273ffffffffffffffffffffffffffffffffffffffff958616835282825284832080546dffffffffffffffffffffffffffff6f01000000000000000000000000000000918290048116875260019283015463ffffffff9081169094528d51518851808a018a5287815280870188815291909a16875286865288872080549390930490911689529101549091169052835180850190945281845283015293955091935090366000611c4860408a018a61413f565b909250905060006014821015611c5f576000611c7a565b611c6d601460008486614444565b611c769161446e565b60601c5b6040805180820182526000808252602080830182815273ffffffffffffffffffffffffffffffffffffffff861683529082905292902080546f0100000000000000000000000000000090046dffffffffffffffffffffffffffff1682526001015463ffffffff1690915290915093505050506000611cf88686612bd5565b90506000816000015190506000600173ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614905060006040518060c001604052808b6080015181526020018b6040015181526020018315158152602001856020015165ffffffffffff168152602001856040015165ffffffffffff168152602001611d8f8c6060015190565b9052905073ffffffffffffffffffffffffffffffffffffffff831615801590611dcf575073ffffffffffffffffffffffffffffffffffffffff8316600114155b15611e885760408051808201825273ffffffffffffffffffffffffffffffffffffffff851680825282518084018452600080825260208083018281529382528181529085902080546f0100000000000000000000000000000090046dffffffffffffffffffffffffffff1683526001015463ffffffff169092529082015290517ffaecb4e4000000000000000000000000000000000000000000000000000000008152610639908390899089908c9086906004016145c3565b808686896040517fe0cff05f0000000000000000000000000000000000000000000000000000000081526004016106399493929190614650565b73ffffffffffffffffffffffffffffffffffffffff821660009081526020819052604081208054909190611f079084906dffffffffffffffffffffffffffff16613f78565b90506dffffffffffffffffffffffffffff811115611f81576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f6465706f736974206f766572666c6f77000000000000000000000000000000006044820152606401610639565b81547fffffffffffffffffffffffffffffffffffff0000000000000000000000000000166dffffffffffffffffffffffffffff919091161790555050565b6000806000845160208601878987f195945050505050565b60603d82811115611fe55750815b604051602082018101604052818152816000602083013e9392505050565b6000805a85519091506000908161201982612cbb565b60a083015190915073ffffffffffffffffffffffffffffffffffffffff81166120455782519350612293565b80935060008851111561229357868202955060028a600281111561206b5761206b6146a7565b146121035760608301516040517fa9a2340900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83169163a9a23409916120cb908e908d908c906004016146d6565b600060405180830381600088803b1580156120e557600080fd5b5087f11580156120f9573d6000803e3d6000fd5b5050505050612293565b60608301516040517fa9a2340900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83169163a9a234099161215e908e908d908c906004016146d6565b600060405180830381600088803b15801561217857600080fd5b5087f19350505050801561218a575060015b61229357612196614736565b806308c379a00361222657506121aa614752565b806121b55750612228565b8b816040516020016121c791906147fa565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290527f220266b60000000000000000000000000000000000000000000000000000000082526106399291600401614006565b505b8a6040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526012908201527f4141353020706f73744f70207265766572740000000000000000000000000000606082015260800190565b5a85038701965081870295508589604001511015612315578a6040517f220266b600000000000000000000000000000000000000000000000000000000815260040161063991815260406020808301829052908201527f414135312070726566756e642062656c6f772061637475616c476173436f7374606082015260800190565b60408901518690036123278582611ec2565b6000808c600281111561233c5761233c6146a7565b1490508460a0015173ffffffffffffffffffffffffffffffffffffffff16856000015173ffffffffffffffffffffffffffffffffffffffff168c602001517f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f8860200151858d8f6040516123c9949392919093845291151560208401526040830152606082015260800190565b60405180910390a45050505050505095945050505050565b60008060005a84519091506123f68682612ceb565b6123ff86611533565b6020860152604081015160608201516080830151171760e087013517610100870135176effffffffffffffffffffffffffffff81111561249b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f41413934206761732076616c756573206f766572666c6f7700000000000000006044820152606401610639565b6000806124a784612e0b565b90506124b58a8a8a84612e65565b975091506124c243600052565b60a084015160609073ffffffffffffffffffffffffffffffffffffffff16156124f7576124f28b8b8b858761317b565b975090505b60005a87039050808b60a001351015612575578b6040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639918152604060208201819052601e908201527f41413430206f76657220766572696669636174696f6e4761734c696d69740000606082015260800190565b60408a018390528160608b015260c08b01355a8803018a608001818152505050505050505050935093915050565b6000806125af8561343e565b915091508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff161461265157856040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526014908201527f41413234207369676e6174757265206572726f72000000000000000000000000606082015260800190565b80156126c257856040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526017908201527f414132322065787069726564206f72206e6f7420647565000000000000000000606082015260800190565b60006126cd8561343e565b9250905073ffffffffffffffffffffffffffffffffffffffff81161561275857866040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526014908201527f41413334207369676e6174757265206572726f72000000000000000000000000606082015260800190565b81156127ef57866040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526021908201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560608201527f6500000000000000000000000000000000000000000000000000000000000000608082015260a00190565b50505050505050565b6000805a9050600061280b846060015190565b905030631d732756612820606088018861413f565b87856040518563ffffffff1660e01b8152600401612841949392919061483f565b6020604051808303816000875af192505050801561289a575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820190925261289791810190614900565b60015b61297157600060206000803e506000517f2152215300000000000000000000000000000000000000000000000000000000810161293c57866040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639918152604060208201819052600f908201527f41413935206f7574206f66206761730000000000000000000000000000000000606082015260800190565b600085608001515a61294e908661408c565b6129589190613f78565b9050612968886002888685612003565b94505050612974565b92505b50509392505050565b73ffffffffffffffffffffffffffffffffffffffff82166129fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4141393020696e76616c69642062656e656669636961727900000000000000006044820152606401610639565b60008273ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d8060008114612a54576040519150601f19603f3d011682016040523d82523d6000602084013e612a59565b606091505b5050905080612ac4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f41413931206661696c65642073656e6420746f2062656e6566696369617279006044820152606401610639565b505050565b6000612ad482613491565b805190602001209050919050565b3063957122ab612af5604084018461413f565b612b026020860186613db5565b612b1061012087018761413f565b6040518663ffffffff1660e01b8152600401612b30959493929190614919565b60006040518083038186803b158015612b4857600080fd5b505afa925050508015612b59575060015b612bd257612b65614736565b806308c379a003612bc65750612b79614752565b80612b845750612bc8565b805115612bc2576000816040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639929190614006565b5050565b505b3d6000803e3d6000fd5b50565b6040805160608101825260008082526020820181905291810182905290612bfb846134d0565b90506000612c08846134d0565b825190915073ffffffffffffffffffffffffffffffffffffffff8116612c2c575080515b602080840151604080860151928501519085015191929165ffffffffffff8083169085161015612c5a578193505b8065ffffffffffff168365ffffffffffff161115612c76578092505b50506040805160608101825273ffffffffffffffffffffffffffffffffffffffff909416845265ffffffffffff92831660208501529116908201529250505092915050565b60c081015160e082015160009190808203612cd7575092915050565b612ce38248830161354e565b949350505050565b612cf86020830183613db5565b73ffffffffffffffffffffffffffffffffffffffff16815260208083013590820152608080830135604083015260a0830135606083015260c0808401359183019190915260e0808401359183019190915261010083013590820152366000612d6461012085018561413f565b90925090508015612dfe576014811015612dda576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f4141393320696e76616c6964207061796d6173746572416e64446174610000006044820152606401610639565b612de8601460008385614444565b612df19161446e565b60601c60a0840152610e34565b600060a084015250505050565b60a0810151600090819073ffffffffffffffffffffffffffffffffffffffff16612e36576001612e39565b60035b60ff16905060008360800151828560600151028560400151010190508360c00151810292505050919050565b60008060005a8551805191925090612e8a8988612e8560408c018c61413f565b613566565b60a0820151612e9843600052565b600073ffffffffffffffffffffffffffffffffffffffff8216612f015773ffffffffffffffffffffffffffffffffffffffff83166000908152602081905260409020546dffffffffffffffffffffffffffff16888111612efa57808903612efd565b60005b9150505b606084015160208a01516040517f3a871cdd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff861692633a871cdd929091612f61918f91879060040161495c565b60206040518083038160008887f193505050508015612fbb575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252612fb891810190614900565b60015b61306557612fc7614736565b806308c379a003612ff85750612fdb614752565b80612fe65750612ffa565b8b816040516020016121c79190614981565b505b8a6040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526016908201527f4141323320726576657274656420286f72204f4f472900000000000000000000606082015260800190565b955073ffffffffffffffffffffffffffffffffffffffff82166131685773ffffffffffffffffffffffffffffffffffffffff8316600090815260208190526040902080546dffffffffffffffffffffffffffff16808a111561312c578c6040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526017908201527f41413231206469646e2774207061792070726566756e64000000000000000000606082015260800190565b81547fffffffffffffffffffffffffffffffffffff000000000000000000000000000016908a90036dffffffffffffffffffffffffffff161790555b5a85039650505050505094509492505050565b825160608181015190916000918481116131f1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4141343120746f6f206c6974746c6520766572696669636174696f6e476173006044820152606401610639565b60a082015173ffffffffffffffffffffffffffffffffffffffff8116600090815260208190526040902080548784039291906dffffffffffffffffffffffffffff16898110156132a6578c6040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639918152604060208201819052601e908201527f41413331207061796d6173746572206465706f73697420746f6f206c6f770000606082015260800190565b8981038260000160006101000a8154816dffffffffffffffffffffffffffff02191690836dffffffffffffffffffffffffffff1602179055508273ffffffffffffffffffffffffffffffffffffffff1663f465c77e858e8e602001518e6040518563ffffffff1660e01b81526004016133219392919061495c565b60006040518083038160008887f19350505050801561338057506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261337d91908101906149c6565b60015b61342a5761338c614736565b806308c379a0036133bd57506133a0614752565b806133ab57506133bf565b8d816040516020016121c79190614a52565b505b8c6040517f220266b60000000000000000000000000000000000000000000000000000000081526004016106399181526040602082018190526016908201527f4141333320726576657274656420286f72204f4f472900000000000000000000606082015260800190565b909e909d509b505050505050505050505050565b6000808260000361345457506000928392509050565b600061345f846134d0565b9050806040015165ffffffffffff164211806134865750806020015165ffffffffffff1642105b905194909350915050565b60603660006134a461014085018561413f565b915091508360208184030360405194506020810185016040528085528082602087013750505050919050565b60408051606081018252600080825260208201819052918101919091528160a081901c65ffffffffffff811660000361350c575065ffffffffffff5b6040805160608101825273ffffffffffffffffffffffffffffffffffffffff909316835260d09490941c602083015265ffffffffffff16928101929092525090565b600081831061355d578161355f565b825b9392505050565b8015610e345782515173ffffffffffffffffffffffffffffffffffffffff81163b156135f757846040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639918152604060208201819052601f908201527f414131302073656e64657220616c726561647920636f6e737472756374656400606082015260800190565b8351606001516040517f570e1a3600000000000000000000000000000000000000000000000000000000815260009173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169163570e1a36919061367590889088906004016144b6565b60206040518083038160008887f1158015613694573d6000803e3d6000fd5b50505050506040513d601f19601f820116820180604052508101906136b991906144ca565b905073ffffffffffffffffffffffffffffffffffffffff811661374157856040517f220266b6000000000000000000000000000000000000000000000000000000008152600401610639918152604060208201819052601b908201527f4141313320696e6974436f6465206661696c6564206f72204f4f470000000000606082015260800190565b8173ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16146137de57856040517f220266b600000000000000000000000000000000000000000000000000000000815260040161063991815260406020808301829052908201527f4141313420696e6974436f6465206d7573742072657475726e2073656e646572606082015260800190565b8073ffffffffffffffffffffffffffffffffffffffff163b60000361386757856040517f220266b600000000000000000000000000000000000000000000000000000000815260040161063991815260406020808301829052908201527f4141313520696e6974436f6465206d757374206372656174652073656e646572606082015260800190565b60006138766014828688614444565b61387f9161446e565b60601c90508273ffffffffffffffffffffffffffffffffffffffff1686602001517fd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d83896000015160a001516040516138fb92919073ffffffffffffffffffffffffffffffffffffffff92831681529116602082015260400190565b60405180910390a350505050505050565b6040518060a0016040528061398b604051806101000160405280600073ffffffffffffffffffffffffffffffffffffffff16815260200160008152602001600081526020016000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff16815260200160008152602001600081525090565b8152602001600080191681526020016000815260200160008152602001600081525090565b6000602082840312156139c257600080fd5b813563ffffffff8116811461355f57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60a0810181811067ffffffffffffffff82111715613a2557613a256139d6565b60405250565b610100810181811067ffffffffffffffff82111715613a2557613a256139d6565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f830116810181811067ffffffffffffffff82111715613a9057613a906139d6565b6040525050565b600067ffffffffffffffff821115613ab157613ab16139d6565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b73ffffffffffffffffffffffffffffffffffffffff81168114612bd257600080fd5b8035613b0a81613add565b919050565b6000818303610180811215613b2357600080fd5b604051613b2f81613a05565b80925061010080831215613b4257600080fd5b6040519250613b5083613a2b565b613b5985613aff565b835260208501356020840152604085013560408401526060850135606084015260808501356080840152613b8f60a08601613aff565b60a084015260c085013560c084015260e085013560e084015282825280850135602083015250610120840135604082015261014084013560608201526101608401356080820152505092915050565b60008083601f840112613bf057600080fd5b50813567ffffffffffffffff811115613c0857600080fd5b602083019150836020828501011115613c2057600080fd5b9250929050565b6000806000806101c08587031215613c3e57600080fd5b843567ffffffffffffffff80821115613c5657600080fd5b818701915087601f830112613c6a57600080fd5b8135613c7581613a97565b604051613c828282613a4c565b8281528a6020848701011115613c9757600080fd5b82602086016020830137600060208483010152809850505050613cbd8860208901613b0f565b94506101a0870135915080821115613cd457600080fd5b50613ce187828801613bde565b95989497509550505050565b60008083601f840112613cff57600080fd5b50813567ffffffffffffffff811115613d1757600080fd5b6020830191508360208260051b8501011115613c2057600080fd5b600080600060408486031215613d4757600080fd5b833567ffffffffffffffff811115613d5e57600080fd5b613d6a86828701613ced565b9094509250506020840135613d7e81613add565b809150509250925092565b60008060408385031215613d9c57600080fd5b8235613da781613add565b946020939093013593505050565b600060208284031215613dc757600080fd5b813561355f81613add565b600080600080600060608688031215613dea57600080fd5b853567ffffffffffffffff80821115613e0257600080fd5b613e0e89838a01613bde565b909750955060208801359150613e2382613add565b90935060408701359080821115613e3957600080fd5b50613e4688828901613bde565b969995985093965092949392505050565b60008060208385031215613e6a57600080fd5b823567ffffffffffffffff811115613e8157600080fd5b613e8d85828601613bde565b90969095509350505050565b60006101608284031215613eac57600080fd5b50919050565b600060208284031215613ec457600080fd5b813567ffffffffffffffff811115613edb57600080fd5b612ce384828501613e99565b60008060008060608587031215613efd57600080fd5b843567ffffffffffffffff80821115613f1557600080fd5b613f2188838901613e99565b955060208701359150613f3382613add565b90935060408601359080821115613cd457600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60008219821115613f8b57613f8b613f49565b500190565b60005b83811015613fab578181015183820152602001613f93565b83811115610e345750506000910152565b60008151808452613fd4816020860160208601613f90565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b828152604060208201526000612ce36040830184613fbc565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffea183360301811261408257600080fd5b9190910192915050565b60008282101561409e5761409e613f49565b500390565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa183360301811261408257600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261410c57600080fd5b83018035915067ffffffffffffffff82111561412757600080fd5b6020019150600581901b3603821315613c2057600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261417457600080fd5b83018035915067ffffffffffffffff82111561418f57600080fd5b602001915036819003821315613c2057600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126141d957600080fd5b830160208101925035905067ffffffffffffffff8111156141f957600080fd5b803603821315613c2057600080fd5b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b600061016061427d8461426385613aff565b73ffffffffffffffffffffffffffffffffffffffff169052565b6020830135602085015261429460408401846141a4565b8260408701526142a78387018284614208565b925050506142b860608401846141a4565b85830360608701526142cb838284614208565b925050506080830135608085015260a083013560a085015260c083013560c085015260e083013560e0850152610100808401358186015250610120614312818501856141a4565b86840383880152614324848284614208565b9350505050610140614338818501856141a4565b8684038388015261434a848284614208565b979650505050505050565b6040808252810184905260006060600586901b830181019083018783805b898110156143f5577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa087860301845282357ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffea18c36030181126143d3578283fd5b6143df868d8301614251565b9550506020938401939290920191600101614373565b50505050828103602084015261434a818587614208565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361443d5761443d613f49565b5060010190565b6000808585111561445457600080fd5b8386111561446157600080fd5b5050820193919092039150565b7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000081358181169160148510156144ae5780818660140360031b1b83161692505b505092915050565b602081526000612ce3602083018486614208565b6000602082840312156144dc57600080fd5b815161355f81613add565b600065ffffffffffff80831681851680830382111561450857614508613f49565b01949350505050565b8183823760009101908152919050565b868152856020820152600065ffffffffffff8087166040840152808616606084015250831515608083015260c060a083015261456060c0830184613fbc565b98975050505050505050565b80518252602081015160208301526040810151151560408301526000606082015165ffffffffffff8082166060860152806080850151166080860152505060a082015160c060a0850152612ce360c0850182613fbc565b60006101408083526145d78184018961456c565b9150506145f1602083018780518252602090810151910152565b845160608301526020948501516080830152835160a08301529284015160c0820152815173ffffffffffffffffffffffffffffffffffffffff1660e0820152908301518051610100830152909201516101209092019190915292915050565b60e08152600061466360e083018761456c565b905061467c602083018680518252602090810151910152565b8351606083015260208401516080830152825160a0830152602083015160c083015295945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b60006003851061470f577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b848252606060208301526147266060830185613fbc565b9050826040830152949350505050565b600060033d111561474f5760046000803e5060005160e01c5b90565b600060443d10156147605790565b6040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc803d016004833e81513d67ffffffffffffffff81602484011181841117156147ae57505050505090565b82850191508151818111156147c65750505050505090565b843d87010160208285010111156147e05750505050505090565b6147ef60208286010187613a4c565b509095945050505050565b7f4141353020706f73744f702072657665727465643a2000000000000000000000815260008251614832816016850160208701613f90565b9190910160160192915050565b60006101c08083526148548184018789614208565b9050845173ffffffffffffffffffffffffffffffffffffffff808251166020860152602082015160408601526040820151606086015260608201516080860152608082015160a08601528060a08301511660c08601525060c081015160e085015260e08101516101008501525060208501516101208401526040850151610140840152606085015161016084015260808501516101808401528281036101a084015261434a8185613fbc565b60006020828403121561491257600080fd5b5051919050565b60608152600061492d606083018789614208565b73ffffffffffffffffffffffffffffffffffffffff861660208401528281036040840152614560818587614208565b60608152600061496f6060830186614251565b60208301949094525060400152919050565b7f414132332072657665727465643a2000000000000000000000000000000000008152600082516149b981600f850160208701613f90565b91909101600f0192915050565b600080604083850312156149d957600080fd5b825167ffffffffffffffff8111156149f057600080fd5b8301601f81018513614a0157600080fd5b8051614a0c81613a97565b604051614a198282613a4c565b828152876020848601011115614a2e57600080fd5b614a3f836020830160208701613f90565b6020969096015195979596505050505050565b7f414133332072657665727465643a2000000000000000000000000000000000008152600082516149b981600f850160208701613f9056fea164736f6c634300080f000a608060405234801561001057600080fd5b506101ea806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063570e1a3614610030575b600080fd5b61004361003e3660046100f9565b61006c565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b60008061007c601482858761016b565b61008591610195565b60601c90506000610099846014818861016b565b8080601f016020809104026020016040519081016040528093929190818152602001838380828437600092018290525084519495509360209350849250905082850182875af190506000519350806100f057600093505b50505092915050565b6000806020838503121561010c57600080fd5b823567ffffffffffffffff8082111561012457600080fd5b818501915085601f83011261013857600080fd5b81358181111561014757600080fd5b86602082850101111561015957600080fd5b60209290920196919550909350505050565b6000808585111561017b57600080fd5b8386111561018857600080fd5b5050820193919092039150565b7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000081358181169160148510156101d55780818660140360031b1b83161692505b50509291505056fea164736f6c634300080f000a", +} + +var EntryPointABI = EntryPointMetaData.ABI + +var EntryPointBin = EntryPointMetaData.Bin + +func DeployEntryPoint(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *EntryPoint, error) { + parsed, err := EntryPointMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(EntryPointBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &EntryPoint{address: address, abi: *parsed, EntryPointCaller: EntryPointCaller{contract: contract}, EntryPointTransactor: EntryPointTransactor{contract: contract}, EntryPointFilterer: EntryPointFilterer{contract: contract}}, nil +} + +type EntryPoint struct { + address common.Address + abi abi.ABI + EntryPointCaller + EntryPointTransactor + EntryPointFilterer +} + +type EntryPointCaller struct { + contract *bind.BoundContract +} + +type EntryPointTransactor struct { + contract *bind.BoundContract +} + +type EntryPointFilterer struct { + contract *bind.BoundContract +} + +type EntryPointSession struct { + Contract *EntryPoint + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type EntryPointCallerSession struct { + Contract *EntryPointCaller + CallOpts bind.CallOpts +} + +type EntryPointTransactorSession struct { + Contract *EntryPointTransactor + TransactOpts bind.TransactOpts +} + +type EntryPointRaw struct { + Contract *EntryPoint +} + +type EntryPointCallerRaw struct { + Contract *EntryPointCaller +} + +type EntryPointTransactorRaw struct { + Contract *EntryPointTransactor +} + +func NewEntryPoint(address common.Address, backend bind.ContractBackend) (*EntryPoint, error) { + abi, err := abi.JSON(strings.NewReader(EntryPointABI)) + if err != nil { + return nil, err + } + contract, err := bindEntryPoint(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &EntryPoint{address: address, abi: abi, EntryPointCaller: EntryPointCaller{contract: contract}, EntryPointTransactor: EntryPointTransactor{contract: contract}, EntryPointFilterer: EntryPointFilterer{contract: contract}}, nil +} + +func NewEntryPointCaller(address common.Address, caller bind.ContractCaller) (*EntryPointCaller, error) { + contract, err := bindEntryPoint(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &EntryPointCaller{contract: contract}, nil +} + +func NewEntryPointTransactor(address common.Address, transactor bind.ContractTransactor) (*EntryPointTransactor, error) { + contract, err := bindEntryPoint(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &EntryPointTransactor{contract: contract}, nil +} + +func NewEntryPointFilterer(address common.Address, filterer bind.ContractFilterer) (*EntryPointFilterer, error) { + contract, err := bindEntryPoint(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &EntryPointFilterer{contract: contract}, nil +} + +func bindEntryPoint(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := EntryPointMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_EntryPoint *EntryPointRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _EntryPoint.Contract.EntryPointCaller.contract.Call(opts, result, method, params...) +} + +func (_EntryPoint *EntryPointRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _EntryPoint.Contract.EntryPointTransactor.contract.Transfer(opts) +} + +func (_EntryPoint *EntryPointRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _EntryPoint.Contract.EntryPointTransactor.contract.Transact(opts, method, params...) +} + +func (_EntryPoint *EntryPointCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _EntryPoint.Contract.contract.Call(opts, result, method, params...) +} + +func (_EntryPoint *EntryPointTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _EntryPoint.Contract.contract.Transfer(opts) +} + +func (_EntryPoint *EntryPointTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _EntryPoint.Contract.contract.Transact(opts, method, params...) +} + +func (_EntryPoint *EntryPointCaller) SIGVALIDATIONFAILED(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "SIG_VALIDATION_FAILED") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_EntryPoint *EntryPointSession) SIGVALIDATIONFAILED() (*big.Int, error) { + return _EntryPoint.Contract.SIGVALIDATIONFAILED(&_EntryPoint.CallOpts) +} + +func (_EntryPoint *EntryPointCallerSession) SIGVALIDATIONFAILED() (*big.Int, error) { + return _EntryPoint.Contract.SIGVALIDATIONFAILED(&_EntryPoint.CallOpts) +} + +func (_EntryPoint *EntryPointCaller) ValidateSenderAndPaymaster(opts *bind.CallOpts, initCode []byte, sender common.Address, paymasterAndData []byte) error { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "_validateSenderAndPaymaster", initCode, sender, paymasterAndData) + + if err != nil { + return err + } + + return err + +} + +func (_EntryPoint *EntryPointSession) ValidateSenderAndPaymaster(initCode []byte, sender common.Address, paymasterAndData []byte) error { + return _EntryPoint.Contract.ValidateSenderAndPaymaster(&_EntryPoint.CallOpts, initCode, sender, paymasterAndData) +} + +func (_EntryPoint *EntryPointCallerSession) ValidateSenderAndPaymaster(initCode []byte, sender common.Address, paymasterAndData []byte) error { + return _EntryPoint.Contract.ValidateSenderAndPaymaster(&_EntryPoint.CallOpts, initCode, sender, paymasterAndData) +} + +func (_EntryPoint *EntryPointCaller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "balanceOf", account) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_EntryPoint *EntryPointSession) BalanceOf(account common.Address) (*big.Int, error) { + return _EntryPoint.Contract.BalanceOf(&_EntryPoint.CallOpts, account) +} + +func (_EntryPoint *EntryPointCallerSession) BalanceOf(account common.Address) (*big.Int, error) { + return _EntryPoint.Contract.BalanceOf(&_EntryPoint.CallOpts, account) +} + +func (_EntryPoint *EntryPointCaller) Deposits(opts *bind.CallOpts, arg0 common.Address) (Deposits, + + error) { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "deposits", arg0) + + outstruct := new(Deposits) + if err != nil { + return *outstruct, err + } + + outstruct.Deposit = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Staked = *abi.ConvertType(out[1], new(bool)).(*bool) + outstruct.Stake = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UnstakeDelaySec = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.WithdrawTime = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_EntryPoint *EntryPointSession) Deposits(arg0 common.Address) (Deposits, + + error) { + return _EntryPoint.Contract.Deposits(&_EntryPoint.CallOpts, arg0) +} + +func (_EntryPoint *EntryPointCallerSession) Deposits(arg0 common.Address) (Deposits, + + error) { + return _EntryPoint.Contract.Deposits(&_EntryPoint.CallOpts, arg0) +} + +func (_EntryPoint *EntryPointCaller) GetDepositInfo(opts *bind.CallOpts, account common.Address) (IStakeManagerDepositInfo, error) { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "getDepositInfo", account) + + if err != nil { + return *new(IStakeManagerDepositInfo), err + } + + out0 := *abi.ConvertType(out[0], new(IStakeManagerDepositInfo)).(*IStakeManagerDepositInfo) + + return out0, err + +} + +func (_EntryPoint *EntryPointSession) GetDepositInfo(account common.Address) (IStakeManagerDepositInfo, error) { + return _EntryPoint.Contract.GetDepositInfo(&_EntryPoint.CallOpts, account) +} + +func (_EntryPoint *EntryPointCallerSession) GetDepositInfo(account common.Address) (IStakeManagerDepositInfo, error) { + return _EntryPoint.Contract.GetDepositInfo(&_EntryPoint.CallOpts, account) +} + +func (_EntryPoint *EntryPointCaller) GetUserOpHash(opts *bind.CallOpts, userOp UserOperation) ([32]byte, error) { + var out []interface{} + err := _EntryPoint.contract.Call(opts, &out, "getUserOpHash", userOp) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_EntryPoint *EntryPointSession) GetUserOpHash(userOp UserOperation) ([32]byte, error) { + return _EntryPoint.Contract.GetUserOpHash(&_EntryPoint.CallOpts, userOp) +} + +func (_EntryPoint *EntryPointCallerSession) GetUserOpHash(userOp UserOperation) ([32]byte, error) { + return _EntryPoint.Contract.GetUserOpHash(&_EntryPoint.CallOpts, userOp) +} + +func (_EntryPoint *EntryPointTransactor) AddStake(opts *bind.TransactOpts, unstakeDelaySec uint32) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "addStake", unstakeDelaySec) +} + +func (_EntryPoint *EntryPointSession) AddStake(unstakeDelaySec uint32) (*types.Transaction, error) { + return _EntryPoint.Contract.AddStake(&_EntryPoint.TransactOpts, unstakeDelaySec) +} + +func (_EntryPoint *EntryPointTransactorSession) AddStake(unstakeDelaySec uint32) (*types.Transaction, error) { + return _EntryPoint.Contract.AddStake(&_EntryPoint.TransactOpts, unstakeDelaySec) +} + +func (_EntryPoint *EntryPointTransactor) DepositTo(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "depositTo", account) +} + +func (_EntryPoint *EntryPointSession) DepositTo(account common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.DepositTo(&_EntryPoint.TransactOpts, account) +} + +func (_EntryPoint *EntryPointTransactorSession) DepositTo(account common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.DepositTo(&_EntryPoint.TransactOpts, account) +} + +func (_EntryPoint *EntryPointTransactor) GetSenderAddress(opts *bind.TransactOpts, initCode []byte) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "getSenderAddress", initCode) +} + +func (_EntryPoint *EntryPointSession) GetSenderAddress(initCode []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.GetSenderAddress(&_EntryPoint.TransactOpts, initCode) +} + +func (_EntryPoint *EntryPointTransactorSession) GetSenderAddress(initCode []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.GetSenderAddress(&_EntryPoint.TransactOpts, initCode) +} + +func (_EntryPoint *EntryPointTransactor) HandleAggregatedOps(opts *bind.TransactOpts, opsPerAggregator []IEntryPointUserOpsPerAggregator, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "handleAggregatedOps", opsPerAggregator, beneficiary) +} + +func (_EntryPoint *EntryPointSession) HandleAggregatedOps(opsPerAggregator []IEntryPointUserOpsPerAggregator, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.HandleAggregatedOps(&_EntryPoint.TransactOpts, opsPerAggregator, beneficiary) +} + +func (_EntryPoint *EntryPointTransactorSession) HandleAggregatedOps(opsPerAggregator []IEntryPointUserOpsPerAggregator, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.HandleAggregatedOps(&_EntryPoint.TransactOpts, opsPerAggregator, beneficiary) +} + +func (_EntryPoint *EntryPointTransactor) HandleOps(opts *bind.TransactOpts, ops []UserOperation, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "handleOps", ops, beneficiary) +} + +func (_EntryPoint *EntryPointSession) HandleOps(ops []UserOperation, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.HandleOps(&_EntryPoint.TransactOpts, ops, beneficiary) +} + +func (_EntryPoint *EntryPointTransactorSession) HandleOps(ops []UserOperation, beneficiary common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.HandleOps(&_EntryPoint.TransactOpts, ops, beneficiary) +} + +func (_EntryPoint *EntryPointTransactor) InnerHandleOp(opts *bind.TransactOpts, callData []byte, opInfo EntryPointUserOpInfo, context []byte) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "innerHandleOp", callData, opInfo, context) +} + +func (_EntryPoint *EntryPointSession) InnerHandleOp(callData []byte, opInfo EntryPointUserOpInfo, context []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.InnerHandleOp(&_EntryPoint.TransactOpts, callData, opInfo, context) +} + +func (_EntryPoint *EntryPointTransactorSession) InnerHandleOp(callData []byte, opInfo EntryPointUserOpInfo, context []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.InnerHandleOp(&_EntryPoint.TransactOpts, callData, opInfo, context) +} + +func (_EntryPoint *EntryPointTransactor) SimulateHandleOp(opts *bind.TransactOpts, op UserOperation, target common.Address, targetCallData []byte) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "simulateHandleOp", op, target, targetCallData) +} + +func (_EntryPoint *EntryPointSession) SimulateHandleOp(op UserOperation, target common.Address, targetCallData []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.SimulateHandleOp(&_EntryPoint.TransactOpts, op, target, targetCallData) +} + +func (_EntryPoint *EntryPointTransactorSession) SimulateHandleOp(op UserOperation, target common.Address, targetCallData []byte) (*types.Transaction, error) { + return _EntryPoint.Contract.SimulateHandleOp(&_EntryPoint.TransactOpts, op, target, targetCallData) +} + +func (_EntryPoint *EntryPointTransactor) SimulateValidation(opts *bind.TransactOpts, userOp UserOperation) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "simulateValidation", userOp) +} + +func (_EntryPoint *EntryPointSession) SimulateValidation(userOp UserOperation) (*types.Transaction, error) { + return _EntryPoint.Contract.SimulateValidation(&_EntryPoint.TransactOpts, userOp) +} + +func (_EntryPoint *EntryPointTransactorSession) SimulateValidation(userOp UserOperation) (*types.Transaction, error) { + return _EntryPoint.Contract.SimulateValidation(&_EntryPoint.TransactOpts, userOp) +} + +func (_EntryPoint *EntryPointTransactor) UnlockStake(opts *bind.TransactOpts) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "unlockStake") +} + +func (_EntryPoint *EntryPointSession) UnlockStake() (*types.Transaction, error) { + return _EntryPoint.Contract.UnlockStake(&_EntryPoint.TransactOpts) +} + +func (_EntryPoint *EntryPointTransactorSession) UnlockStake() (*types.Transaction, error) { + return _EntryPoint.Contract.UnlockStake(&_EntryPoint.TransactOpts) +} + +func (_EntryPoint *EntryPointTransactor) WithdrawStake(opts *bind.TransactOpts, withdrawAddress common.Address) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "withdrawStake", withdrawAddress) +} + +func (_EntryPoint *EntryPointSession) WithdrawStake(withdrawAddress common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.WithdrawStake(&_EntryPoint.TransactOpts, withdrawAddress) +} + +func (_EntryPoint *EntryPointTransactorSession) WithdrawStake(withdrawAddress common.Address) (*types.Transaction, error) { + return _EntryPoint.Contract.WithdrawStake(&_EntryPoint.TransactOpts, withdrawAddress) +} + +func (_EntryPoint *EntryPointTransactor) WithdrawTo(opts *bind.TransactOpts, withdrawAddress common.Address, withdrawAmount *big.Int) (*types.Transaction, error) { + return _EntryPoint.contract.Transact(opts, "withdrawTo", withdrawAddress, withdrawAmount) +} + +func (_EntryPoint *EntryPointSession) WithdrawTo(withdrawAddress common.Address, withdrawAmount *big.Int) (*types.Transaction, error) { + return _EntryPoint.Contract.WithdrawTo(&_EntryPoint.TransactOpts, withdrawAddress, withdrawAmount) +} + +func (_EntryPoint *EntryPointTransactorSession) WithdrawTo(withdrawAddress common.Address, withdrawAmount *big.Int) (*types.Transaction, error) { + return _EntryPoint.Contract.WithdrawTo(&_EntryPoint.TransactOpts, withdrawAddress, withdrawAmount) +} + +func (_EntryPoint *EntryPointTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _EntryPoint.contract.RawTransact(opts, nil) +} + +func (_EntryPoint *EntryPointSession) Receive() (*types.Transaction, error) { + return _EntryPoint.Contract.Receive(&_EntryPoint.TransactOpts) +} + +func (_EntryPoint *EntryPointTransactorSession) Receive() (*types.Transaction, error) { + return _EntryPoint.Contract.Receive(&_EntryPoint.TransactOpts) +} + +type EntryPointAccountDeployedIterator struct { + Event *EntryPointAccountDeployed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointAccountDeployedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointAccountDeployed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointAccountDeployed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointAccountDeployedIterator) Error() error { + return it.fail +} + +func (it *EntryPointAccountDeployedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointAccountDeployed struct { + UserOpHash [32]byte + Sender common.Address + Factory common.Address + Paymaster common.Address + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterAccountDeployed(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address) (*EntryPointAccountDeployedIterator, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "AccountDeployed", userOpHashRule, senderRule) + if err != nil { + return nil, err + } + return &EntryPointAccountDeployedIterator{contract: _EntryPoint.contract, event: "AccountDeployed", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchAccountDeployed(opts *bind.WatchOpts, sink chan<- *EntryPointAccountDeployed, userOpHash [][32]byte, sender []common.Address) (event.Subscription, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "AccountDeployed", userOpHashRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointAccountDeployed) + if err := _EntryPoint.contract.UnpackLog(event, "AccountDeployed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseAccountDeployed(log types.Log) (*EntryPointAccountDeployed, error) { + event := new(EntryPointAccountDeployed) + if err := _EntryPoint.contract.UnpackLog(event, "AccountDeployed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointDepositedIterator struct { + Event *EntryPointDeposited + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointDepositedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointDeposited) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointDeposited) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointDepositedIterator) Error() error { + return it.fail +} + +func (it *EntryPointDepositedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointDeposited struct { + Account common.Address + TotalDeposit *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterDeposited(opts *bind.FilterOpts, account []common.Address) (*EntryPointDepositedIterator, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "Deposited", accountRule) + if err != nil { + return nil, err + } + return &EntryPointDepositedIterator{contract: _EntryPoint.contract, event: "Deposited", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchDeposited(opts *bind.WatchOpts, sink chan<- *EntryPointDeposited, account []common.Address) (event.Subscription, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "Deposited", accountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointDeposited) + if err := _EntryPoint.contract.UnpackLog(event, "Deposited", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseDeposited(log types.Log) (*EntryPointDeposited, error) { + event := new(EntryPointDeposited) + if err := _EntryPoint.contract.UnpackLog(event, "Deposited", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointSignatureAggregatorChangedIterator struct { + Event *EntryPointSignatureAggregatorChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointSignatureAggregatorChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointSignatureAggregatorChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointSignatureAggregatorChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointSignatureAggregatorChangedIterator) Error() error { + return it.fail +} + +func (it *EntryPointSignatureAggregatorChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointSignatureAggregatorChanged struct { + Aggregator common.Address + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterSignatureAggregatorChanged(opts *bind.FilterOpts, aggregator []common.Address) (*EntryPointSignatureAggregatorChangedIterator, error) { + + var aggregatorRule []interface{} + for _, aggregatorItem := range aggregator { + aggregatorRule = append(aggregatorRule, aggregatorItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "SignatureAggregatorChanged", aggregatorRule) + if err != nil { + return nil, err + } + return &EntryPointSignatureAggregatorChangedIterator{contract: _EntryPoint.contract, event: "SignatureAggregatorChanged", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchSignatureAggregatorChanged(opts *bind.WatchOpts, sink chan<- *EntryPointSignatureAggregatorChanged, aggregator []common.Address) (event.Subscription, error) { + + var aggregatorRule []interface{} + for _, aggregatorItem := range aggregator { + aggregatorRule = append(aggregatorRule, aggregatorItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "SignatureAggregatorChanged", aggregatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointSignatureAggregatorChanged) + if err := _EntryPoint.contract.UnpackLog(event, "SignatureAggregatorChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseSignatureAggregatorChanged(log types.Log) (*EntryPointSignatureAggregatorChanged, error) { + event := new(EntryPointSignatureAggregatorChanged) + if err := _EntryPoint.contract.UnpackLog(event, "SignatureAggregatorChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointStakeLockedIterator struct { + Event *EntryPointStakeLocked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointStakeLockedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeLocked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeLocked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointStakeLockedIterator) Error() error { + return it.fail +} + +func (it *EntryPointStakeLockedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointStakeLocked struct { + Account common.Address + TotalStaked *big.Int + UnstakeDelaySec *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterStakeLocked(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeLockedIterator, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "StakeLocked", accountRule) + if err != nil { + return nil, err + } + return &EntryPointStakeLockedIterator{contract: _EntryPoint.contract, event: "StakeLocked", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchStakeLocked(opts *bind.WatchOpts, sink chan<- *EntryPointStakeLocked, account []common.Address) (event.Subscription, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "StakeLocked", accountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointStakeLocked) + if err := _EntryPoint.contract.UnpackLog(event, "StakeLocked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseStakeLocked(log types.Log) (*EntryPointStakeLocked, error) { + event := new(EntryPointStakeLocked) + if err := _EntryPoint.contract.UnpackLog(event, "StakeLocked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointStakeUnlockedIterator struct { + Event *EntryPointStakeUnlocked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointStakeUnlockedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeUnlocked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeUnlocked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointStakeUnlockedIterator) Error() error { + return it.fail +} + +func (it *EntryPointStakeUnlockedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointStakeUnlocked struct { + Account common.Address + WithdrawTime *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterStakeUnlocked(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeUnlockedIterator, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "StakeUnlocked", accountRule) + if err != nil { + return nil, err + } + return &EntryPointStakeUnlockedIterator{contract: _EntryPoint.contract, event: "StakeUnlocked", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchStakeUnlocked(opts *bind.WatchOpts, sink chan<- *EntryPointStakeUnlocked, account []common.Address) (event.Subscription, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "StakeUnlocked", accountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointStakeUnlocked) + if err := _EntryPoint.contract.UnpackLog(event, "StakeUnlocked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseStakeUnlocked(log types.Log) (*EntryPointStakeUnlocked, error) { + event := new(EntryPointStakeUnlocked) + if err := _EntryPoint.contract.UnpackLog(event, "StakeUnlocked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointStakeWithdrawnIterator struct { + Event *EntryPointStakeWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointStakeWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointStakeWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointStakeWithdrawnIterator) Error() error { + return it.fail +} + +func (it *EntryPointStakeWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointStakeWithdrawn struct { + Account common.Address + WithdrawAddress common.Address + Amount *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterStakeWithdrawn(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeWithdrawnIterator, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "StakeWithdrawn", accountRule) + if err != nil { + return nil, err + } + return &EntryPointStakeWithdrawnIterator{contract: _EntryPoint.contract, event: "StakeWithdrawn", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchStakeWithdrawn(opts *bind.WatchOpts, sink chan<- *EntryPointStakeWithdrawn, account []common.Address) (event.Subscription, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "StakeWithdrawn", accountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointStakeWithdrawn) + if err := _EntryPoint.contract.UnpackLog(event, "StakeWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseStakeWithdrawn(log types.Log) (*EntryPointStakeWithdrawn, error) { + event := new(EntryPointStakeWithdrawn) + if err := _EntryPoint.contract.UnpackLog(event, "StakeWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointUserOperationEventIterator struct { + Event *EntryPointUserOperationEvent + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointUserOperationEventIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointUserOperationEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointUserOperationEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointUserOperationEventIterator) Error() error { + return it.fail +} + +func (it *EntryPointUserOperationEventIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointUserOperationEvent struct { + UserOpHash [32]byte + Sender common.Address + Paymaster common.Address + Nonce *big.Int + Success bool + ActualGasCost *big.Int + ActualGasUsed *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterUserOperationEvent(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address, paymaster []common.Address) (*EntryPointUserOperationEventIterator, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var paymasterRule []interface{} + for _, paymasterItem := range paymaster { + paymasterRule = append(paymasterRule, paymasterItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "UserOperationEvent", userOpHashRule, senderRule, paymasterRule) + if err != nil { + return nil, err + } + return &EntryPointUserOperationEventIterator{contract: _EntryPoint.contract, event: "UserOperationEvent", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchUserOperationEvent(opts *bind.WatchOpts, sink chan<- *EntryPointUserOperationEvent, userOpHash [][32]byte, sender []common.Address, paymaster []common.Address) (event.Subscription, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var paymasterRule []interface{} + for _, paymasterItem := range paymaster { + paymasterRule = append(paymasterRule, paymasterItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "UserOperationEvent", userOpHashRule, senderRule, paymasterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointUserOperationEvent) + if err := _EntryPoint.contract.UnpackLog(event, "UserOperationEvent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseUserOperationEvent(log types.Log) (*EntryPointUserOperationEvent, error) { + event := new(EntryPointUserOperationEvent) + if err := _EntryPoint.contract.UnpackLog(event, "UserOperationEvent", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointUserOperationRevertReasonIterator struct { + Event *EntryPointUserOperationRevertReason + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointUserOperationRevertReasonIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointUserOperationRevertReason) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointUserOperationRevertReason) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointUserOperationRevertReasonIterator) Error() error { + return it.fail +} + +func (it *EntryPointUserOperationRevertReasonIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointUserOperationRevertReason struct { + UserOpHash [32]byte + Sender common.Address + Nonce *big.Int + RevertReason []byte + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterUserOperationRevertReason(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address) (*EntryPointUserOperationRevertReasonIterator, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "UserOperationRevertReason", userOpHashRule, senderRule) + if err != nil { + return nil, err + } + return &EntryPointUserOperationRevertReasonIterator{contract: _EntryPoint.contract, event: "UserOperationRevertReason", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchUserOperationRevertReason(opts *bind.WatchOpts, sink chan<- *EntryPointUserOperationRevertReason, userOpHash [][32]byte, sender []common.Address) (event.Subscription, error) { + + var userOpHashRule []interface{} + for _, userOpHashItem := range userOpHash { + userOpHashRule = append(userOpHashRule, userOpHashItem) + } + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "UserOperationRevertReason", userOpHashRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointUserOperationRevertReason) + if err := _EntryPoint.contract.UnpackLog(event, "UserOperationRevertReason", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseUserOperationRevertReason(log types.Log) (*EntryPointUserOperationRevertReason, error) { + event := new(EntryPointUserOperationRevertReason) + if err := _EntryPoint.contract.UnpackLog(event, "UserOperationRevertReason", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type EntryPointWithdrawnIterator struct { + Event *EntryPointWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *EntryPointWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(EntryPointWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(EntryPointWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *EntryPointWithdrawnIterator) Error() error { + return it.fail +} + +func (it *EntryPointWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type EntryPointWithdrawn struct { + Account common.Address + WithdrawAddress common.Address + Amount *big.Int + Raw types.Log +} + +func (_EntryPoint *EntryPointFilterer) FilterWithdrawn(opts *bind.FilterOpts, account []common.Address) (*EntryPointWithdrawnIterator, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.FilterLogs(opts, "Withdrawn", accountRule) + if err != nil { + return nil, err + } + return &EntryPointWithdrawnIterator{contract: _EntryPoint.contract, event: "Withdrawn", logs: logs, sub: sub}, nil +} + +func (_EntryPoint *EntryPointFilterer) WatchWithdrawn(opts *bind.WatchOpts, sink chan<- *EntryPointWithdrawn, account []common.Address) (event.Subscription, error) { + + var accountRule []interface{} + for _, accountItem := range account { + accountRule = append(accountRule, accountItem) + } + + logs, sub, err := _EntryPoint.contract.WatchLogs(opts, "Withdrawn", accountRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(EntryPointWithdrawn) + if err := _EntryPoint.contract.UnpackLog(event, "Withdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_EntryPoint *EntryPointFilterer) ParseWithdrawn(log types.Log) (*EntryPointWithdrawn, error) { + event := new(EntryPointWithdrawn) + if err := _EntryPoint.contract.UnpackLog(event, "Withdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type Deposits struct { + Deposit *big.Int + Staked bool + Stake *big.Int + UnstakeDelaySec uint32 + WithdrawTime *big.Int +} + +func (_EntryPoint *EntryPoint) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _EntryPoint.abi.Events["AccountDeployed"].ID: + return _EntryPoint.ParseAccountDeployed(log) + case _EntryPoint.abi.Events["Deposited"].ID: + return _EntryPoint.ParseDeposited(log) + case _EntryPoint.abi.Events["SignatureAggregatorChanged"].ID: + return _EntryPoint.ParseSignatureAggregatorChanged(log) + case _EntryPoint.abi.Events["StakeLocked"].ID: + return _EntryPoint.ParseStakeLocked(log) + case _EntryPoint.abi.Events["StakeUnlocked"].ID: + return _EntryPoint.ParseStakeUnlocked(log) + case _EntryPoint.abi.Events["StakeWithdrawn"].ID: + return _EntryPoint.ParseStakeWithdrawn(log) + case _EntryPoint.abi.Events["UserOperationEvent"].ID: + return _EntryPoint.ParseUserOperationEvent(log) + case _EntryPoint.abi.Events["UserOperationRevertReason"].ID: + return _EntryPoint.ParseUserOperationRevertReason(log) + case _EntryPoint.abi.Events["Withdrawn"].ID: + return _EntryPoint.ParseWithdrawn(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (EntryPointAccountDeployed) Topic() common.Hash { + return common.HexToHash("0xd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d") +} + +func (EntryPointDeposited) Topic() common.Hash { + return common.HexToHash("0x2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c4") +} + +func (EntryPointSignatureAggregatorChanged) Topic() common.Hash { + return common.HexToHash("0x575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d") +} + +func (EntryPointStakeLocked) Topic() common.Hash { + return common.HexToHash("0xa5ae833d0bb1dcd632d98a8b70973e8516812898e19bf27b70071ebc8dc52c01") +} + +func (EntryPointStakeUnlocked) Topic() common.Hash { + return common.HexToHash("0xfa9b3c14cc825c412c9ed81b3ba365a5b459439403f18829e572ed53a4180f0a") +} + +func (EntryPointStakeWithdrawn) Topic() common.Hash { + return common.HexToHash("0xb7c918e0e249f999e965cafeb6c664271b3f4317d296461500e71da39f0cbda3") +} + +func (EntryPointUserOperationEvent) Topic() common.Hash { + return common.HexToHash("0x49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f") +} + +func (EntryPointUserOperationRevertReason) Topic() common.Hash { + return common.HexToHash("0x1c4fada7374c0a9ee8841fc38afe82932dc0f8e69012e927f061a8bae611a201") +} + +func (EntryPointWithdrawn) Topic() common.Hash { + return common.HexToHash("0xd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb") +} + +func (_EntryPoint *EntryPoint) Address() common.Address { + return _EntryPoint.address +} + +type EntryPointInterface interface { + SIGVALIDATIONFAILED(opts *bind.CallOpts) (*big.Int, error) + + ValidateSenderAndPaymaster(opts *bind.CallOpts, initCode []byte, sender common.Address, paymasterAndData []byte) error + + BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) + + Deposits(opts *bind.CallOpts, arg0 common.Address) (Deposits, + + error) + + GetDepositInfo(opts *bind.CallOpts, account common.Address) (IStakeManagerDepositInfo, error) + + GetUserOpHash(opts *bind.CallOpts, userOp UserOperation) ([32]byte, error) + + AddStake(opts *bind.TransactOpts, unstakeDelaySec uint32) (*types.Transaction, error) + + DepositTo(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + GetSenderAddress(opts *bind.TransactOpts, initCode []byte) (*types.Transaction, error) + + HandleAggregatedOps(opts *bind.TransactOpts, opsPerAggregator []IEntryPointUserOpsPerAggregator, beneficiary common.Address) (*types.Transaction, error) + + HandleOps(opts *bind.TransactOpts, ops []UserOperation, beneficiary common.Address) (*types.Transaction, error) + + InnerHandleOp(opts *bind.TransactOpts, callData []byte, opInfo EntryPointUserOpInfo, context []byte) (*types.Transaction, error) + + SimulateHandleOp(opts *bind.TransactOpts, op UserOperation, target common.Address, targetCallData []byte) (*types.Transaction, error) + + SimulateValidation(opts *bind.TransactOpts, userOp UserOperation) (*types.Transaction, error) + + UnlockStake(opts *bind.TransactOpts) (*types.Transaction, error) + + WithdrawStake(opts *bind.TransactOpts, withdrawAddress common.Address) (*types.Transaction, error) + + WithdrawTo(opts *bind.TransactOpts, withdrawAddress common.Address, withdrawAmount *big.Int) (*types.Transaction, error) + + Receive(opts *bind.TransactOpts) (*types.Transaction, error) + + FilterAccountDeployed(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address) (*EntryPointAccountDeployedIterator, error) + + WatchAccountDeployed(opts *bind.WatchOpts, sink chan<- *EntryPointAccountDeployed, userOpHash [][32]byte, sender []common.Address) (event.Subscription, error) + + ParseAccountDeployed(log types.Log) (*EntryPointAccountDeployed, error) + + FilterDeposited(opts *bind.FilterOpts, account []common.Address) (*EntryPointDepositedIterator, error) + + WatchDeposited(opts *bind.WatchOpts, sink chan<- *EntryPointDeposited, account []common.Address) (event.Subscription, error) + + ParseDeposited(log types.Log) (*EntryPointDeposited, error) + + FilterSignatureAggregatorChanged(opts *bind.FilterOpts, aggregator []common.Address) (*EntryPointSignatureAggregatorChangedIterator, error) + + WatchSignatureAggregatorChanged(opts *bind.WatchOpts, sink chan<- *EntryPointSignatureAggregatorChanged, aggregator []common.Address) (event.Subscription, error) + + ParseSignatureAggregatorChanged(log types.Log) (*EntryPointSignatureAggregatorChanged, error) + + FilterStakeLocked(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeLockedIterator, error) + + WatchStakeLocked(opts *bind.WatchOpts, sink chan<- *EntryPointStakeLocked, account []common.Address) (event.Subscription, error) + + ParseStakeLocked(log types.Log) (*EntryPointStakeLocked, error) + + FilterStakeUnlocked(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeUnlockedIterator, error) + + WatchStakeUnlocked(opts *bind.WatchOpts, sink chan<- *EntryPointStakeUnlocked, account []common.Address) (event.Subscription, error) + + ParseStakeUnlocked(log types.Log) (*EntryPointStakeUnlocked, error) + + FilterStakeWithdrawn(opts *bind.FilterOpts, account []common.Address) (*EntryPointStakeWithdrawnIterator, error) + + WatchStakeWithdrawn(opts *bind.WatchOpts, sink chan<- *EntryPointStakeWithdrawn, account []common.Address) (event.Subscription, error) + + ParseStakeWithdrawn(log types.Log) (*EntryPointStakeWithdrawn, error) + + FilterUserOperationEvent(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address, paymaster []common.Address) (*EntryPointUserOperationEventIterator, error) + + WatchUserOperationEvent(opts *bind.WatchOpts, sink chan<- *EntryPointUserOperationEvent, userOpHash [][32]byte, sender []common.Address, paymaster []common.Address) (event.Subscription, error) + + ParseUserOperationEvent(log types.Log) (*EntryPointUserOperationEvent, error) + + FilterUserOperationRevertReason(opts *bind.FilterOpts, userOpHash [][32]byte, sender []common.Address) (*EntryPointUserOperationRevertReasonIterator, error) + + WatchUserOperationRevertReason(opts *bind.WatchOpts, sink chan<- *EntryPointUserOperationRevertReason, userOpHash [][32]byte, sender []common.Address) (event.Subscription, error) + + ParseUserOperationRevertReason(log types.Log) (*EntryPointUserOperationRevertReason, error) + + FilterWithdrawn(opts *bind.FilterOpts, account []common.Address) (*EntryPointWithdrawnIterator, error) + + WatchWithdrawn(opts *bind.WatchOpts, sink chan<- *EntryPointWithdrawn, account []common.Address) (event.Subscription, error) + + ParseWithdrawn(log types.Log) (*EntryPointWithdrawn, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generated/greeter_wrapper/greeter_wrapper.go b/core/gethwrappers/transmission/generated/greeter_wrapper/greeter_wrapper.go new file mode 100644 index 00000000..9814c6a1 --- /dev/null +++ b/core/gethwrappers/transmission/generated/greeter_wrapper/greeter_wrapper.go @@ -0,0 +1,216 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package greeter_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var GreeterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"getGreeting\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"greeting\",\"type\":\"string\"}],\"name\":\"setGreeting\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061044a806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063a41368621461003b578063fe50cc7214610050575b600080fd5b61004e61004936600461013f565b61006e565b005b61005861007e565b604051610065919061020e565b60405180910390f35b600061007a8282610323565b5050565b60606000805461008d90610281565b80601f01602080910402602001604051908101604052809291908181526020018280546100b990610281565b80156101065780601f106100db57610100808354040283529160200191610106565b820191906000526020600020905b8154815290600101906020018083116100e957829003601f168201915b5050505050905090565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60006020828403121561015157600080fd5b813567ffffffffffffffff8082111561016957600080fd5b818401915084601f83011261017d57600080fd5b81358181111561018f5761018f610110565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156101d5576101d5610110565b816040528281528760208487010111156101ee57600080fd5b826020860160208301376000928101602001929092525095945050505050565b600060208083528351808285015260005b8181101561023b5785810183015185820160400152820161021f565b8181111561024d576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b600181811c9082168061029557607f821691505b6020821081036102ce577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111561031e57600081815260208120601f850160051c810160208610156102fb5750805b601f850160051c820191505b8181101561031a57828155600101610307565b5050505b505050565b815167ffffffffffffffff81111561033d5761033d610110565b6103518161034b8454610281565b846102d4565b602080601f8311600181146103a4576000841561036e5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b17855561031a565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156103f1578886015182559484019460019091019084016103d2565b508582101561042d57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b0190555056fea164736f6c634300080f000a", +} + +var GreeterABI = GreeterMetaData.ABI + +var GreeterBin = GreeterMetaData.Bin + +func DeployGreeter(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Greeter, error) { + parsed, err := GreeterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(GreeterBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Greeter{address: address, abi: *parsed, GreeterCaller: GreeterCaller{contract: contract}, GreeterTransactor: GreeterTransactor{contract: contract}, GreeterFilterer: GreeterFilterer{contract: contract}}, nil +} + +type Greeter struct { + address common.Address + abi abi.ABI + GreeterCaller + GreeterTransactor + GreeterFilterer +} + +type GreeterCaller struct { + contract *bind.BoundContract +} + +type GreeterTransactor struct { + contract *bind.BoundContract +} + +type GreeterFilterer struct { + contract *bind.BoundContract +} + +type GreeterSession struct { + Contract *Greeter + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type GreeterCallerSession struct { + Contract *GreeterCaller + CallOpts bind.CallOpts +} + +type GreeterTransactorSession struct { + Contract *GreeterTransactor + TransactOpts bind.TransactOpts +} + +type GreeterRaw struct { + Contract *Greeter +} + +type GreeterCallerRaw struct { + Contract *GreeterCaller +} + +type GreeterTransactorRaw struct { + Contract *GreeterTransactor +} + +func NewGreeter(address common.Address, backend bind.ContractBackend) (*Greeter, error) { + abi, err := abi.JSON(strings.NewReader(GreeterABI)) + if err != nil { + return nil, err + } + contract, err := bindGreeter(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Greeter{address: address, abi: abi, GreeterCaller: GreeterCaller{contract: contract}, GreeterTransactor: GreeterTransactor{contract: contract}, GreeterFilterer: GreeterFilterer{contract: contract}}, nil +} + +func NewGreeterCaller(address common.Address, caller bind.ContractCaller) (*GreeterCaller, error) { + contract, err := bindGreeter(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &GreeterCaller{contract: contract}, nil +} + +func NewGreeterTransactor(address common.Address, transactor bind.ContractTransactor) (*GreeterTransactor, error) { + contract, err := bindGreeter(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &GreeterTransactor{contract: contract}, nil +} + +func NewGreeterFilterer(address common.Address, filterer bind.ContractFilterer) (*GreeterFilterer, error) { + contract, err := bindGreeter(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &GreeterFilterer{contract: contract}, nil +} + +func bindGreeter(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := GreeterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Greeter *GreeterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Greeter.Contract.GreeterCaller.contract.Call(opts, result, method, params...) +} + +func (_Greeter *GreeterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Greeter.Contract.GreeterTransactor.contract.Transfer(opts) +} + +func (_Greeter *GreeterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Greeter.Contract.GreeterTransactor.contract.Transact(opts, method, params...) +} + +func (_Greeter *GreeterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Greeter.Contract.contract.Call(opts, result, method, params...) +} + +func (_Greeter *GreeterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Greeter.Contract.contract.Transfer(opts) +} + +func (_Greeter *GreeterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Greeter.Contract.contract.Transact(opts, method, params...) +} + +func (_Greeter *GreeterCaller) GetGreeting(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _Greeter.contract.Call(opts, &out, "getGreeting") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_Greeter *GreeterSession) GetGreeting() (string, error) { + return _Greeter.Contract.GetGreeting(&_Greeter.CallOpts) +} + +func (_Greeter *GreeterCallerSession) GetGreeting() (string, error) { + return _Greeter.Contract.GetGreeting(&_Greeter.CallOpts) +} + +func (_Greeter *GreeterTransactor) SetGreeting(opts *bind.TransactOpts, greeting string) (*types.Transaction, error) { + return _Greeter.contract.Transact(opts, "setGreeting", greeting) +} + +func (_Greeter *GreeterSession) SetGreeting(greeting string) (*types.Transaction, error) { + return _Greeter.Contract.SetGreeting(&_Greeter.TransactOpts, greeting) +} + +func (_Greeter *GreeterTransactorSession) SetGreeting(greeting string) (*types.Transaction, error) { + return _Greeter.Contract.SetGreeting(&_Greeter.TransactOpts, greeting) +} + +func (_Greeter *Greeter) Address() common.Address { + return _Greeter.address +} + +type GreeterInterface interface { + GetGreeting(opts *bind.CallOpts) (string, error) + + SetGreeting(opts *bind.TransactOpts, greeting string) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generated/paymaster_wrapper/paymaster_wrapper.go b/core/gethwrappers/transmission/generated/paymaster_wrapper/paymaster_wrapper.go new file mode 100644 index 00000000..ef25d431 --- /dev/null +++ b/core/gethwrappers/transmission/generated/paymaster_wrapper/paymaster_wrapper.go @@ -0,0 +1,719 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package paymaster_wrapper + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type UserOperation struct { + Sender common.Address + Nonce *big.Int + InitCode []byte + CallData []byte + CallGasLimit *big.Int + VerificationGasLimit *big.Int + PreVerificationGas *big.Int + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int + PaymasterAndData []byte + Signature []byte +} + +var PaymasterMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"linkToken\",\"type\":\"address\"},{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"linkEthFeed\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"entryPoint\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"juelsNeeded\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"subscriptionBalance\",\"type\":\"uint256\"}],\"name\":\"InsufficientFunds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidCalldata\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyCallableFromLink\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"validator\",\"type\":\"address\"}],\"name\":\"Unauthorized\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"}],\"name\":\"UserOperationAlreadyTried\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_entryPoint\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_linkEthFeed\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_linkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"enumIPaymaster.PostOpMode\",\"name\":\"\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"context\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"actualGasCost\",\"type\":\"uint256\"}],\"name\":\"postOp\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_config\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"stalenessSeconds\",\"type\":\"uint32\"},{\"internalType\":\"int256\",\"name\":\"fallbackWeiPerUnitLink\",\"type\":\"int256\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation\",\"name\":\"userOp\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"maxCost\",\"type\":\"uint256\"}],\"name\":\"validatePaymasterUserOp\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"context\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"validationData\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60e06040523480156200001157600080fd5b50604051620014fb380380620014fb8339810160408190526200003491620001a3565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000df565b5050506001600160a01b0392831660805290821660a0521660c052620001f7565b336001600160a01b03821603620001395760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b0381168114620001a057600080fd5b50565b600080600060608486031215620001b957600080fd5b8351620001c6816200018a565b6020850151909350620001d9816200018a565b6040850151909250620001ec816200018a565b809150509250925092565b60805160a05160c05161129c6200025f600039600081816101080152818161049f01528181610507015281816105cd015261063501526000818161018f0152610cb60152600081816101dc015281816103a201528181610ac90152610b81015261129c6000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80639b9bd4de11610081578063db37983b1161005b578063db37983b146101d7578063f2fde38b146101fe578063f465c77e1461021157600080fd5b80639b9bd4de1461018a578063a4c0ed36146101b1578063a9a23409146101c457600080fd5b806379ba5097116100b257806379ba50971461014f5780638a38f365146101595780638da5cb5b1461016c57600080fd5b8063088070f5146100ce578063140fcfb114610103575b600080fd5b6002546003546100e29163ffffffff169082565b6040805163ffffffff90931683526020830191909152015b60405180910390f35b61012a7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100fa565b610157610232565b005b610157610167366004610d5b565b610334565b60005473ffffffffffffffffffffffffffffffffffffffff1661012a565b61012a7f000000000000000000000000000000000000000000000000000000000000000081565b6101576101bf366004610dfb565b61038a565b6101576101d2366004610e57565b610487565b61012a7f000000000000000000000000000000000000000000000000000000000000000081565b61015761020c366004610eb7565b61059d565b61022461021f366004610edb565b6105b1565b6040516100fa929190610f2f565b60015473ffffffffffffffffffffffffffffffffffffffff1633146102b8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e65720000000000000000000060448201526064015b60405180910390fd5b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b61033c610849565b6040805180820190915263ffffffff9092168083526020909201819052600280547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000016909217909155600355565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146103f9576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114610433576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061044182840184610eb7565b73ffffffffffffffffffffffffffffffffffffffff811660009081526005602052604081208054929350869290919061047b908490610fd9565b90915550505050505050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610534576040517f295a81c100000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044016102af565b60008061054384860186610ff1565b9150915080610551846108cc565b61055b9190610fd9565b73ffffffffffffffffffffffffffffffffffffffff83166000908152600560205260408120805490919061059090849061100f565b9091555050505050505050565b6105a5610849565b6105ae816108f8565b50565b606060003373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610662576040517f295a81c100000000000000000000000000000000000000000000000000000000815233600482015273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660248201526044016102af565b60008481526004602052604090205460ff16156106ae576040517f7413dcf8000000000000000000000000000000000000000000000000000000008152600481018590526024016102af565b60006106b9866109ed565b90506000816106c7866108cc565b6106d19190610fd9565b905080600560006106e560208b018b610eb7565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410156107b057806005600061073860208b018b610eb7565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020546040517f03eb8b540000000000000000000000000000000000000000000000000000000081526004016102af929190918252602082015260400190565b600086815260046020908152604090912080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556107f690880188610eb7565b6040805173ffffffffffffffffffffffffffffffffffffffff9092166020830152810183905260600160405160208183030381529060405261083b6000806000610c29565b935093505050935093915050565b60005473ffffffffffffffffffffffffffffffffffffffff1633146108ca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e65720000000000000000000060448201526064016102af565b565b60006108d6610c61565b6108e883670de0b6b3a7640000611026565b6108f29190611063565b92915050565b3373ffffffffffffffffffffffffffffffffffffffff821603610977576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c6600000000000000000060448201526064016102af565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b60006109fd61012083018361109e565b9050601403610a0e57506000919050565b6000610a1e61012084018461109e565b6014818110610a2f57610a2f611103565b919091013560f81c9150819050610c23576000610a5061012085018561109e565b610a5e916015908290611132565b810190610a6b919061115c565b90508060200151600014158015610b385750602081015181516040517f70a0823100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff91821660048201527f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa158015610b12573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610b3691906111e5565b105b15610c2157805160408083015190517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169263a9059cbb92610bd59260040173ffffffffffffffffffffffffffffffffffffffff929092168252602082015260400190565b6020604051808303816000875af1158015610bf4573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c1891906111fe565b50806040015192505b505b50919050565b600060d08265ffffffffffff16901b60a08465ffffffffffff16901b85610c51576000610c54565b60015b60ff161717949350505050565b600254604080517ffeaf968c000000000000000000000000000000000000000000000000000000008152905160009263ffffffff1691821515918491829173ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000169163feaf968c9160048083019260a09291908290030181865afa158015610d01573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d25919061123f565b509450909250849150508015610d495750610d40824261100f565b8463ffffffff16105b15610d5357506003545b949350505050565b60008060408385031215610d6e57600080fd5b823563ffffffff81168114610d8257600080fd5b946020939093013593505050565b73ffffffffffffffffffffffffffffffffffffffff811681146105ae57600080fd5b60008083601f840112610dc457600080fd5b50813567ffffffffffffffff811115610ddc57600080fd5b602083019150836020828501011115610df457600080fd5b9250929050565b60008060008060608587031215610e1157600080fd5b8435610e1c81610d90565b935060208501359250604085013567ffffffffffffffff811115610e3f57600080fd5b610e4b87828801610db2565b95989497509550505050565b60008060008060608587031215610e6d57600080fd5b843560038110610e7c57600080fd5b9350602085013567ffffffffffffffff811115610e9857600080fd5b610ea487828801610db2565b9598909750949560400135949350505050565b600060208284031215610ec957600080fd5b8135610ed481610d90565b9392505050565b600080600060608486031215610ef057600080fd5b833567ffffffffffffffff811115610f0757600080fd5b84016101608187031215610f1a57600080fd5b95602085013595506040909401359392505050565b604081526000835180604084015260005b81811015610f5d5760208187018101516060868401015201610f40565b81811115610f6f576000606083860101525b50602083019390935250601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01601606001919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60008219821115610fec57610fec610faa565b500190565b6000806040838503121561100457600080fd5b8235610d8281610d90565b60008282101561102157611021610faa565b500390565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561105e5761105e610faa565b500290565b600082611099577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126110d357600080fd5b83018035915067ffffffffffffffff8211156110ee57600080fd5b602001915036819003821315610df457600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6000808585111561114257600080fd5b8386111561114f57600080fd5b5050820193919092039150565b60006060828403121561116e57600080fd5b6040516060810181811067ffffffffffffffff821117156111b8577f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405282356111c681610d90565b8152602083810135908201526040928301359281019290925250919050565b6000602082840312156111f757600080fd5b5051919050565b60006020828403121561121057600080fd5b81518015158114610ed457600080fd5b805169ffffffffffffffffffff8116811461123a57600080fd5b919050565b600080600080600060a0868803121561125757600080fd5b61126086611220565b945060208601519350604086015192506060860151915061128360808701611220565b9050929550929590935056fea164736f6c634300080f000a", +} + +var PaymasterABI = PaymasterMetaData.ABI + +var PaymasterBin = PaymasterMetaData.Bin + +func DeployPaymaster(auth *bind.TransactOpts, backend bind.ContractBackend, linkToken common.Address, linkEthFeed common.Address, entryPoint common.Address) (common.Address, *types.Transaction, *Paymaster, error) { + parsed, err := PaymasterMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(PaymasterBin), backend, linkToken, linkEthFeed, entryPoint) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Paymaster{address: address, abi: *parsed, PaymasterCaller: PaymasterCaller{contract: contract}, PaymasterTransactor: PaymasterTransactor{contract: contract}, PaymasterFilterer: PaymasterFilterer{contract: contract}}, nil +} + +type Paymaster struct { + address common.Address + abi abi.ABI + PaymasterCaller + PaymasterTransactor + PaymasterFilterer +} + +type PaymasterCaller struct { + contract *bind.BoundContract +} + +type PaymasterTransactor struct { + contract *bind.BoundContract +} + +type PaymasterFilterer struct { + contract *bind.BoundContract +} + +type PaymasterSession struct { + Contract *Paymaster + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type PaymasterCallerSession struct { + Contract *PaymasterCaller + CallOpts bind.CallOpts +} + +type PaymasterTransactorSession struct { + Contract *PaymasterTransactor + TransactOpts bind.TransactOpts +} + +type PaymasterRaw struct { + Contract *Paymaster +} + +type PaymasterCallerRaw struct { + Contract *PaymasterCaller +} + +type PaymasterTransactorRaw struct { + Contract *PaymasterTransactor +} + +func NewPaymaster(address common.Address, backend bind.ContractBackend) (*Paymaster, error) { + abi, err := abi.JSON(strings.NewReader(PaymasterABI)) + if err != nil { + return nil, err + } + contract, err := bindPaymaster(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Paymaster{address: address, abi: abi, PaymasterCaller: PaymasterCaller{contract: contract}, PaymasterTransactor: PaymasterTransactor{contract: contract}, PaymasterFilterer: PaymasterFilterer{contract: contract}}, nil +} + +func NewPaymasterCaller(address common.Address, caller bind.ContractCaller) (*PaymasterCaller, error) { + contract, err := bindPaymaster(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &PaymasterCaller{contract: contract}, nil +} + +func NewPaymasterTransactor(address common.Address, transactor bind.ContractTransactor) (*PaymasterTransactor, error) { + contract, err := bindPaymaster(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &PaymasterTransactor{contract: contract}, nil +} + +func NewPaymasterFilterer(address common.Address, filterer bind.ContractFilterer) (*PaymasterFilterer, error) { + contract, err := bindPaymaster(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &PaymasterFilterer{contract: contract}, nil +} + +func bindPaymaster(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := PaymasterMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_Paymaster *PaymasterRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Paymaster.Contract.PaymasterCaller.contract.Call(opts, result, method, params...) +} + +func (_Paymaster *PaymasterRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Paymaster.Contract.PaymasterTransactor.contract.Transfer(opts) +} + +func (_Paymaster *PaymasterRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Paymaster.Contract.PaymasterTransactor.contract.Transact(opts, method, params...) +} + +func (_Paymaster *PaymasterCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Paymaster.Contract.contract.Call(opts, result, method, params...) +} + +func (_Paymaster *PaymasterTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Paymaster.Contract.contract.Transfer(opts) +} + +func (_Paymaster *PaymasterTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Paymaster.Contract.contract.Transact(opts, method, params...) +} + +func (_Paymaster *PaymasterCaller) IEntryPoint(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Paymaster.contract.Call(opts, &out, "i_entryPoint") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Paymaster *PaymasterSession) IEntryPoint() (common.Address, error) { + return _Paymaster.Contract.IEntryPoint(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCallerSession) IEntryPoint() (common.Address, error) { + return _Paymaster.Contract.IEntryPoint(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCaller) ILinkEthFeed(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Paymaster.contract.Call(opts, &out, "i_linkEthFeed") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Paymaster *PaymasterSession) ILinkEthFeed() (common.Address, error) { + return _Paymaster.Contract.ILinkEthFeed(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCallerSession) ILinkEthFeed() (common.Address, error) { + return _Paymaster.Contract.ILinkEthFeed(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCaller) ILinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Paymaster.contract.Call(opts, &out, "i_linkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Paymaster *PaymasterSession) ILinkToken() (common.Address, error) { + return _Paymaster.Contract.ILinkToken(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCallerSession) ILinkToken() (common.Address, error) { + return _Paymaster.Contract.ILinkToken(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Paymaster.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_Paymaster *PaymasterSession) Owner() (common.Address, error) { + return _Paymaster.Contract.Owner(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCallerSession) Owner() (common.Address, error) { + return _Paymaster.Contract.Owner(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCaller) SConfig(opts *bind.CallOpts) (SConfig, + + error) { + var out []interface{} + err := _Paymaster.contract.Call(opts, &out, "s_config") + + outstruct := new(SConfig) + if err != nil { + return *outstruct, err + } + + outstruct.StalenessSeconds = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.FallbackWeiPerUnitLink = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_Paymaster *PaymasterSession) SConfig() (SConfig, + + error) { + return _Paymaster.Contract.SConfig(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterCallerSession) SConfig() (SConfig, + + error) { + return _Paymaster.Contract.SConfig(&_Paymaster.CallOpts) +} + +func (_Paymaster *PaymasterTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "acceptOwnership") +} + +func (_Paymaster *PaymasterSession) AcceptOwnership() (*types.Transaction, error) { + return _Paymaster.Contract.AcceptOwnership(&_Paymaster.TransactOpts) +} + +func (_Paymaster *PaymasterTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _Paymaster.Contract.AcceptOwnership(&_Paymaster.TransactOpts) +} + +func (_Paymaster *PaymasterTransactor) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "onTokenTransfer", arg0, _amount, _data) +} + +func (_Paymaster *PaymasterSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Paymaster.Contract.OnTokenTransfer(&_Paymaster.TransactOpts, arg0, _amount, _data) +} + +func (_Paymaster *PaymasterTransactorSession) OnTokenTransfer(arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) { + return _Paymaster.Contract.OnTokenTransfer(&_Paymaster.TransactOpts, arg0, _amount, _data) +} + +func (_Paymaster *PaymasterTransactor) PostOp(opts *bind.TransactOpts, arg0 uint8, context []byte, actualGasCost *big.Int) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "postOp", arg0, context, actualGasCost) +} + +func (_Paymaster *PaymasterSession) PostOp(arg0 uint8, context []byte, actualGasCost *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.PostOp(&_Paymaster.TransactOpts, arg0, context, actualGasCost) +} + +func (_Paymaster *PaymasterTransactorSession) PostOp(arg0 uint8, context []byte, actualGasCost *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.PostOp(&_Paymaster.TransactOpts, arg0, context, actualGasCost) +} + +func (_Paymaster *PaymasterTransactor) SetConfig(opts *bind.TransactOpts, stalenessSeconds uint32, fallbackWeiPerUnitLink *big.Int) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "setConfig", stalenessSeconds, fallbackWeiPerUnitLink) +} + +func (_Paymaster *PaymasterSession) SetConfig(stalenessSeconds uint32, fallbackWeiPerUnitLink *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.SetConfig(&_Paymaster.TransactOpts, stalenessSeconds, fallbackWeiPerUnitLink) +} + +func (_Paymaster *PaymasterTransactorSession) SetConfig(stalenessSeconds uint32, fallbackWeiPerUnitLink *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.SetConfig(&_Paymaster.TransactOpts, stalenessSeconds, fallbackWeiPerUnitLink) +} + +func (_Paymaster *PaymasterTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "transferOwnership", to) +} + +func (_Paymaster *PaymasterSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Paymaster.Contract.TransferOwnership(&_Paymaster.TransactOpts, to) +} + +func (_Paymaster *PaymasterTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Paymaster.Contract.TransferOwnership(&_Paymaster.TransactOpts, to) +} + +func (_Paymaster *PaymasterTransactor) ValidatePaymasterUserOp(opts *bind.TransactOpts, userOp UserOperation, userOpHash [32]byte, maxCost *big.Int) (*types.Transaction, error) { + return _Paymaster.contract.Transact(opts, "validatePaymasterUserOp", userOp, userOpHash, maxCost) +} + +func (_Paymaster *PaymasterSession) ValidatePaymasterUserOp(userOp UserOperation, userOpHash [32]byte, maxCost *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.ValidatePaymasterUserOp(&_Paymaster.TransactOpts, userOp, userOpHash, maxCost) +} + +func (_Paymaster *PaymasterTransactorSession) ValidatePaymasterUserOp(userOp UserOperation, userOpHash [32]byte, maxCost *big.Int) (*types.Transaction, error) { + return _Paymaster.Contract.ValidatePaymasterUserOp(&_Paymaster.TransactOpts, userOp, userOpHash, maxCost) +} + +type PaymasterOwnershipTransferRequestedIterator struct { + Event *PaymasterOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *PaymasterOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(PaymasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(PaymasterOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *PaymasterOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *PaymasterOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type PaymasterOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Paymaster *PaymasterFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*PaymasterOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Paymaster.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &PaymasterOwnershipTransferRequestedIterator{contract: _Paymaster.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_Paymaster *PaymasterFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *PaymasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Paymaster.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(PaymasterOwnershipTransferRequested) + if err := _Paymaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Paymaster *PaymasterFilterer) ParseOwnershipTransferRequested(log types.Log) (*PaymasterOwnershipTransferRequested, error) { + event := new(PaymasterOwnershipTransferRequested) + if err := _Paymaster.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type PaymasterOwnershipTransferredIterator struct { + Event *PaymasterOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *PaymasterOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(PaymasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(PaymasterOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *PaymasterOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *PaymasterOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type PaymasterOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_Paymaster *PaymasterFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*PaymasterOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Paymaster.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &PaymasterOwnershipTransferredIterator{contract: _Paymaster.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_Paymaster *PaymasterFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *PaymasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Paymaster.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(PaymasterOwnershipTransferred) + if err := _Paymaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_Paymaster *PaymasterFilterer) ParseOwnershipTransferred(log types.Log) (*PaymasterOwnershipTransferred, error) { + event := new(PaymasterOwnershipTransferred) + if err := _Paymaster.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type SConfig struct { + StalenessSeconds uint32 + FallbackWeiPerUnitLink *big.Int +} + +func (_Paymaster *Paymaster) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _Paymaster.abi.Events["OwnershipTransferRequested"].ID: + return _Paymaster.ParseOwnershipTransferRequested(log) + case _Paymaster.abi.Events["OwnershipTransferred"].ID: + return _Paymaster.ParseOwnershipTransferred(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (PaymasterOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (PaymasterOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (_Paymaster *Paymaster) Address() common.Address { + return _Paymaster.address +} + +type PaymasterInterface interface { + IEntryPoint(opts *bind.CallOpts) (common.Address, error) + + ILinkEthFeed(opts *bind.CallOpts) (common.Address, error) + + ILinkToken(opts *bind.CallOpts) (common.Address, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + SConfig(opts *bind.CallOpts) (SConfig, + + error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, _amount *big.Int, _data []byte) (*types.Transaction, error) + + PostOp(opts *bind.TransactOpts, arg0 uint8, context []byte, actualGasCost *big.Int) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, stalenessSeconds uint32, fallbackWeiPerUnitLink *big.Int) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + + ValidatePaymasterUserOp(opts *bind.TransactOpts, userOp UserOperation, userOpHash [32]byte, maxCost *big.Int) (*types.Transaction, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*PaymasterOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *PaymasterOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*PaymasterOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*PaymasterOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *PaymasterOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*PaymasterOwnershipTransferred, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generated/sca_wrapper/sca_wrapper.go b/core/gethwrappers/transmission/generated/sca_wrapper/sca_wrapper.go new file mode 100644 index 00000000..55a31077 --- /dev/null +++ b/core/gethwrappers/transmission/generated/sca_wrapper/sca_wrapper.go @@ -0,0 +1,292 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package sca_wrapper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +type UserOperation struct { + Sender common.Address + Nonce *big.Int + InitCode []byte + CallData []byte + CallGasLimit *big.Int + VerificationGasLimit *big.Int + PreVerificationGas *big.Int + MaxFeePerGas *big.Int + MaxPriorityFeePerGas *big.Int + PaymasterAndData []byte + Signature []byte +} + +var SCAMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"entryPoint\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"BadFormatOrOOG\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"currentNonce\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonceGiven\",\"type\":\"uint256\"}],\"name\":\"IncorrectNonce\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"operationHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"InvalidSignature\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"}],\"name\":\"NotAuthorized\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"deadline\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"currentTimestamp\",\"type\":\"uint256\"}],\"name\":\"TransactionExpired\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint48\",\"name\":\"deadline\",\"type\":\"uint48\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"executeTransactionFromEntryPoint\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_entryPoint\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"i_owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"s_nonce\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"callGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"verificationGasLimit\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"preVerificationGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxPriorityFeePerGas\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"paymasterAndData\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"signature\",\"type\":\"bytes\"}],\"internalType\":\"structUserOperation\",\"name\":\"userOp\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"validateUserOp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"validationData\",\"type\":\"uint256\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c060405234801561001057600080fd5b50604051610acb380380610acb83398101604081905261002f91610062565b6001600160a01b039182166080521660a052610095565b80516001600160a01b038116811461005d57600080fd5b919050565b6000806040838503121561007557600080fd5b61007e83610046565b915061008c60208401610046565b90509250929050565b60805160a051610a046100c760003960008181607101526103c301526000818161010101526102ee0152610a046000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80637eccf63e116100505780637eccf63e146100de57806389553be4146100e7578063dba6335f146100fc57600080fd5b8063140fcfb11461006c5780633a871cdd146100bd575b600080fd5b6100937f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100d06100cb36600461063a565b610123565b6040519081526020016100b4565b6100d060005481565b6100fa6100f53660046106ce565b6103ab565b005b6100937f000000000000000000000000000000000000000000000000000000000000000081565b60008054846020013514610179576000546040517f7ba633940000000000000000000000000000000000000000000000000000000081526004810191909152602085013560248201526044015b60405180910390fd5b60006102908430604080517f4750045d47fce615521b32cee713ff8db50147e98aec5ca94926b52651ca3fa060208083019190915281830194909452815180820383018152606080830184528151918601919091207f190000000000000000000000000000000000000000000000000000000000000060808401527f010000000000000000000000000000000000000000000000000000000000000060818401527f1c7d3b72b37a35523e273aaadd7b4cd66f618bb81429ab053412d51f50ccea6160828401524660a284015293901b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660c282015260d6808201939093528151808203909301835260f6019052805191012090565b905060006102a261014087018761076b565b8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509293505073ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016915061031c90508284610544565b73ffffffffffffffffffffffffffffffffffffffff161461034d576103446001600080610602565b925050506103a4565b60008054908061035c83610806565b9091555060009050610371606088018861076b565b61037f91600490829061083e565b81019061038c9190610897565b509250505061039e6000826000610602565b93505050505b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461041c576040517f4a0bfec1000000000000000000000000000000000000000000000000000000008152336004820152602401610170565b65ffffffffffff83161580159061043a57508265ffffffffffff1642115b15610481576040517f300249d700000000000000000000000000000000000000000000000000000000815265ffffffffffff84166004820152426024820152604401610170565b6000808673ffffffffffffffffffffffffffffffffffffffff168685856040516104ac929190610993565b60006040518083038185875af1925050503d80600081146104e9576040519150601f19603f3d011682016040523d82523d6000602084013e6104ee565b606091505b50915091508161053b578051600003610533576040517f20e9b5d200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805181602001fd5b50505050505050565b602082015160408084015184516000939284918791908110610568576105686109a3565b016020015160f81c905060018561058083601b6109d2565b6040805160008152602081018083529390935260ff90911690820152606081018590526080810184905260a0016020604051602081039080840390855afa1580156105cf573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00151979650505050505050565b600060d08265ffffffffffff16901b60a08465ffffffffffff16901b8561062a57600061062d565b60015b60ff161717949350505050565b60008060006060848603121561064f57600080fd5b833567ffffffffffffffff81111561066657600080fd5b8401610160818703121561067957600080fd5b95602085013595506040909401359392505050565b73ffffffffffffffffffffffffffffffffffffffff811681146106b057600080fd5b50565b803565ffffffffffff811681146106c957600080fd5b919050565b6000806000806000608086880312156106e657600080fd5b85356106f18161068e565b945060208601359350610706604087016106b3565b9250606086013567ffffffffffffffff8082111561072357600080fd5b818801915088601f83011261073757600080fd5b81358181111561074657600080fd5b89602082850101111561075857600080fd5b9699959850939650602001949392505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126107a057600080fd5b83018035915067ffffffffffffffff8211156107bb57600080fd5b6020019150368190038213156107d057600080fd5b9250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610837576108376107d7565b5060010190565b6000808585111561084e57600080fd5b8386111561085b57600080fd5b5050820193919092039150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080600080608085870312156108ad57600080fd5b84356108b88161068e565b9350602085013592506108cd604086016106b3565b9150606085013567ffffffffffffffff808211156108ea57600080fd5b818701915087601f8301126108fe57600080fd5b81358181111561091057610910610868565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561095657610956610868565b816040528281528a602084870101111561096f57600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b8183823760009101908152919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060ff821660ff84168060ff038211156109ef576109ef6107d7565b01939250505056fea164736f6c634300080f000a", +} + +var SCAABI = SCAMetaData.ABI + +var SCABin = SCAMetaData.Bin + +func DeploySCA(auth *bind.TransactOpts, backend bind.ContractBackend, owner common.Address, entryPoint common.Address) (common.Address, *types.Transaction, *SCA, error) { + parsed, err := SCAMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(SCABin), backend, owner, entryPoint) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &SCA{address: address, abi: *parsed, SCACaller: SCACaller{contract: contract}, SCATransactor: SCATransactor{contract: contract}, SCAFilterer: SCAFilterer{contract: contract}}, nil +} + +type SCA struct { + address common.Address + abi abi.ABI + SCACaller + SCATransactor + SCAFilterer +} + +type SCACaller struct { + contract *bind.BoundContract +} + +type SCATransactor struct { + contract *bind.BoundContract +} + +type SCAFilterer struct { + contract *bind.BoundContract +} + +type SCASession struct { + Contract *SCA + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type SCACallerSession struct { + Contract *SCACaller + CallOpts bind.CallOpts +} + +type SCATransactorSession struct { + Contract *SCATransactor + TransactOpts bind.TransactOpts +} + +type SCARaw struct { + Contract *SCA +} + +type SCACallerRaw struct { + Contract *SCACaller +} + +type SCATransactorRaw struct { + Contract *SCATransactor +} + +func NewSCA(address common.Address, backend bind.ContractBackend) (*SCA, error) { + abi, err := abi.JSON(strings.NewReader(SCAABI)) + if err != nil { + return nil, err + } + contract, err := bindSCA(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &SCA{address: address, abi: abi, SCACaller: SCACaller{contract: contract}, SCATransactor: SCATransactor{contract: contract}, SCAFilterer: SCAFilterer{contract: contract}}, nil +} + +func NewSCACaller(address common.Address, caller bind.ContractCaller) (*SCACaller, error) { + contract, err := bindSCA(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &SCACaller{contract: contract}, nil +} + +func NewSCATransactor(address common.Address, transactor bind.ContractTransactor) (*SCATransactor, error) { + contract, err := bindSCA(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &SCATransactor{contract: contract}, nil +} + +func NewSCAFilterer(address common.Address, filterer bind.ContractFilterer) (*SCAFilterer, error) { + contract, err := bindSCA(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &SCAFilterer{contract: contract}, nil +} + +func bindSCA(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := SCAMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_SCA *SCARaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SCA.Contract.SCACaller.contract.Call(opts, result, method, params...) +} + +func (_SCA *SCARaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SCA.Contract.SCATransactor.contract.Transfer(opts) +} + +func (_SCA *SCARaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SCA.Contract.SCATransactor.contract.Transact(opts, method, params...) +} + +func (_SCA *SCACallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SCA.Contract.contract.Call(opts, result, method, params...) +} + +func (_SCA *SCATransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SCA.Contract.contract.Transfer(opts) +} + +func (_SCA *SCATransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SCA.Contract.contract.Transact(opts, method, params...) +} + +func (_SCA *SCACaller) IEntryPoint(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _SCA.contract.Call(opts, &out, "i_entryPoint") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_SCA *SCASession) IEntryPoint() (common.Address, error) { + return _SCA.Contract.IEntryPoint(&_SCA.CallOpts) +} + +func (_SCA *SCACallerSession) IEntryPoint() (common.Address, error) { + return _SCA.Contract.IEntryPoint(&_SCA.CallOpts) +} + +func (_SCA *SCACaller) IOwner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _SCA.contract.Call(opts, &out, "i_owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_SCA *SCASession) IOwner() (common.Address, error) { + return _SCA.Contract.IOwner(&_SCA.CallOpts) +} + +func (_SCA *SCACallerSession) IOwner() (common.Address, error) { + return _SCA.Contract.IOwner(&_SCA.CallOpts) +} + +func (_SCA *SCACaller) SNonce(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SCA.contract.Call(opts, &out, "s_nonce") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_SCA *SCASession) SNonce() (*big.Int, error) { + return _SCA.Contract.SNonce(&_SCA.CallOpts) +} + +func (_SCA *SCACallerSession) SNonce() (*big.Int, error) { + return _SCA.Contract.SNonce(&_SCA.CallOpts) +} + +func (_SCA *SCATransactor) ExecuteTransactionFromEntryPoint(opts *bind.TransactOpts, to common.Address, value *big.Int, deadline *big.Int, data []byte) (*types.Transaction, error) { + return _SCA.contract.Transact(opts, "executeTransactionFromEntryPoint", to, value, deadline, data) +} + +func (_SCA *SCASession) ExecuteTransactionFromEntryPoint(to common.Address, value *big.Int, deadline *big.Int, data []byte) (*types.Transaction, error) { + return _SCA.Contract.ExecuteTransactionFromEntryPoint(&_SCA.TransactOpts, to, value, deadline, data) +} + +func (_SCA *SCATransactorSession) ExecuteTransactionFromEntryPoint(to common.Address, value *big.Int, deadline *big.Int, data []byte) (*types.Transaction, error) { + return _SCA.Contract.ExecuteTransactionFromEntryPoint(&_SCA.TransactOpts, to, value, deadline, data) +} + +func (_SCA *SCATransactor) ValidateUserOp(opts *bind.TransactOpts, userOp UserOperation, userOpHash [32]byte, arg2 *big.Int) (*types.Transaction, error) { + return _SCA.contract.Transact(opts, "validateUserOp", userOp, userOpHash, arg2) +} + +func (_SCA *SCASession) ValidateUserOp(userOp UserOperation, userOpHash [32]byte, arg2 *big.Int) (*types.Transaction, error) { + return _SCA.Contract.ValidateUserOp(&_SCA.TransactOpts, userOp, userOpHash, arg2) +} + +func (_SCA *SCATransactorSession) ValidateUserOp(userOp UserOperation, userOpHash [32]byte, arg2 *big.Int) (*types.Transaction, error) { + return _SCA.Contract.ValidateUserOp(&_SCA.TransactOpts, userOp, userOpHash, arg2) +} + +func (_SCA *SCA) Address() common.Address { + return _SCA.address +} + +type SCAInterface interface { + IEntryPoint(opts *bind.CallOpts) (common.Address, error) + + IOwner(opts *bind.CallOpts) (common.Address, error) + + SNonce(opts *bind.CallOpts) (*big.Int, error) + + ExecuteTransactionFromEntryPoint(opts *bind.TransactOpts, to common.Address, value *big.Int, deadline *big.Int, data []byte) (*types.Transaction, error) + + ValidateUserOp(opts *bind.TransactOpts, userOp UserOperation, userOpHash [32]byte, arg2 *big.Int) (*types.Transaction, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generated/smart_contract_account_factory/smart_contract_account_factory.go b/core/gethwrappers/transmission/generated/smart_contract_account_factory/smart_contract_account_factory.go new file mode 100644 index 00000000..263d5d1f --- /dev/null +++ b/core/gethwrappers/transmission/generated/smart_contract_account_factory/smart_contract_account_factory.go @@ -0,0 +1,333 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package smart_contract_account_factory + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var SmartContractAccountFactoryMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"DeploymentFailed\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"scaAddress\",\"type\":\"address\"}],\"name\":\"ContractCreated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"abiEncodedOwnerAddress\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"}],\"name\":\"deploySmartContractAccount\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"scaAddress\",\"type\":\"address\"}],\"stateMutability\":\"payable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5061021e806100206000396000f3fe60806040526004361061001e5760003560e01c80630af4926f14610023575b600080fd5b610036610031366004610138565b61005f565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200160405180910390f35b6000828251836020016000f5905073ffffffffffffffffffffffffffffffffffffffff81166100ba576040517f3011642500000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405173ffffffffffffffffffffffffffffffffffffffff821681527fcf78cf0d6f3d8371e1075c69c492ab4ec5d8cf23a1a239b6a51a1d00be7ca3129060200160405180910390a192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806040838503121561014b57600080fd5b82359150602083013567ffffffffffffffff8082111561016a57600080fd5b818501915085601f83011261017e57600080fd5b81358181111561019057610190610109565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156101d6576101d6610109565b816040528281528860208487010111156101ef57600080fd5b826020860160208301376000602084830101528095505050505050925092905056fea164736f6c634300080f000a", +} + +var SmartContractAccountFactoryABI = SmartContractAccountFactoryMetaData.ABI + +var SmartContractAccountFactoryBin = SmartContractAccountFactoryMetaData.Bin + +func DeploySmartContractAccountFactory(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *SmartContractAccountFactory, error) { + parsed, err := SmartContractAccountFactoryMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(SmartContractAccountFactoryBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &SmartContractAccountFactory{address: address, abi: *parsed, SmartContractAccountFactoryCaller: SmartContractAccountFactoryCaller{contract: contract}, SmartContractAccountFactoryTransactor: SmartContractAccountFactoryTransactor{contract: contract}, SmartContractAccountFactoryFilterer: SmartContractAccountFactoryFilterer{contract: contract}}, nil +} + +type SmartContractAccountFactory struct { + address common.Address + abi abi.ABI + SmartContractAccountFactoryCaller + SmartContractAccountFactoryTransactor + SmartContractAccountFactoryFilterer +} + +type SmartContractAccountFactoryCaller struct { + contract *bind.BoundContract +} + +type SmartContractAccountFactoryTransactor struct { + contract *bind.BoundContract +} + +type SmartContractAccountFactoryFilterer struct { + contract *bind.BoundContract +} + +type SmartContractAccountFactorySession struct { + Contract *SmartContractAccountFactory + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type SmartContractAccountFactoryCallerSession struct { + Contract *SmartContractAccountFactoryCaller + CallOpts bind.CallOpts +} + +type SmartContractAccountFactoryTransactorSession struct { + Contract *SmartContractAccountFactoryTransactor + TransactOpts bind.TransactOpts +} + +type SmartContractAccountFactoryRaw struct { + Contract *SmartContractAccountFactory +} + +type SmartContractAccountFactoryCallerRaw struct { + Contract *SmartContractAccountFactoryCaller +} + +type SmartContractAccountFactoryTransactorRaw struct { + Contract *SmartContractAccountFactoryTransactor +} + +func NewSmartContractAccountFactory(address common.Address, backend bind.ContractBackend) (*SmartContractAccountFactory, error) { + abi, err := abi.JSON(strings.NewReader(SmartContractAccountFactoryABI)) + if err != nil { + return nil, err + } + contract, err := bindSmartContractAccountFactory(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &SmartContractAccountFactory{address: address, abi: abi, SmartContractAccountFactoryCaller: SmartContractAccountFactoryCaller{contract: contract}, SmartContractAccountFactoryTransactor: SmartContractAccountFactoryTransactor{contract: contract}, SmartContractAccountFactoryFilterer: SmartContractAccountFactoryFilterer{contract: contract}}, nil +} + +func NewSmartContractAccountFactoryCaller(address common.Address, caller bind.ContractCaller) (*SmartContractAccountFactoryCaller, error) { + contract, err := bindSmartContractAccountFactory(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &SmartContractAccountFactoryCaller{contract: contract}, nil +} + +func NewSmartContractAccountFactoryTransactor(address common.Address, transactor bind.ContractTransactor) (*SmartContractAccountFactoryTransactor, error) { + contract, err := bindSmartContractAccountFactory(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &SmartContractAccountFactoryTransactor{contract: contract}, nil +} + +func NewSmartContractAccountFactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*SmartContractAccountFactoryFilterer, error) { + contract, err := bindSmartContractAccountFactory(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &SmartContractAccountFactoryFilterer{contract: contract}, nil +} + +func bindSmartContractAccountFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := SmartContractAccountFactoryMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SmartContractAccountFactory.Contract.SmartContractAccountFactoryCaller.contract.Call(opts, result, method, params...) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.SmartContractAccountFactoryTransactor.contract.Transfer(opts) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.SmartContractAccountFactoryTransactor.contract.Transact(opts, method, params...) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SmartContractAccountFactory.Contract.contract.Call(opts, result, method, params...) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.contract.Transfer(opts) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.contract.Transact(opts, method, params...) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryTransactor) DeploySmartContractAccount(opts *bind.TransactOpts, abiEncodedOwnerAddress [32]byte, initCode []byte) (*types.Transaction, error) { + return _SmartContractAccountFactory.contract.Transact(opts, "deploySmartContractAccount", abiEncodedOwnerAddress, initCode) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactorySession) DeploySmartContractAccount(abiEncodedOwnerAddress [32]byte, initCode []byte) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.DeploySmartContractAccount(&_SmartContractAccountFactory.TransactOpts, abiEncodedOwnerAddress, initCode) +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryTransactorSession) DeploySmartContractAccount(abiEncodedOwnerAddress [32]byte, initCode []byte) (*types.Transaction, error) { + return _SmartContractAccountFactory.Contract.DeploySmartContractAccount(&_SmartContractAccountFactory.TransactOpts, abiEncodedOwnerAddress, initCode) +} + +type SmartContractAccountFactoryContractCreatedIterator struct { + Event *SmartContractAccountFactoryContractCreated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *SmartContractAccountFactoryContractCreatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(SmartContractAccountFactoryContractCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(SmartContractAccountFactoryContractCreated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *SmartContractAccountFactoryContractCreatedIterator) Error() error { + return it.fail +} + +func (it *SmartContractAccountFactoryContractCreatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type SmartContractAccountFactoryContractCreated struct { + ScaAddress common.Address + Raw types.Log +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryFilterer) FilterContractCreated(opts *bind.FilterOpts) (*SmartContractAccountFactoryContractCreatedIterator, error) { + + logs, sub, err := _SmartContractAccountFactory.contract.FilterLogs(opts, "ContractCreated") + if err != nil { + return nil, err + } + return &SmartContractAccountFactoryContractCreatedIterator{contract: _SmartContractAccountFactory.contract, event: "ContractCreated", logs: logs, sub: sub}, nil +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryFilterer) WatchContractCreated(opts *bind.WatchOpts, sink chan<- *SmartContractAccountFactoryContractCreated) (event.Subscription, error) { + + logs, sub, err := _SmartContractAccountFactory.contract.WatchLogs(opts, "ContractCreated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(SmartContractAccountFactoryContractCreated) + if err := _SmartContractAccountFactory.contract.UnpackLog(event, "ContractCreated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_SmartContractAccountFactory *SmartContractAccountFactoryFilterer) ParseContractCreated(log types.Log) (*SmartContractAccountFactoryContractCreated, error) { + event := new(SmartContractAccountFactoryContractCreated) + if err := _SmartContractAccountFactory.contract.UnpackLog(event, "ContractCreated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_SmartContractAccountFactory *SmartContractAccountFactory) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _SmartContractAccountFactory.abi.Events["ContractCreated"].ID: + return _SmartContractAccountFactory.ParseContractCreated(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (SmartContractAccountFactoryContractCreated) Topic() common.Hash { + return common.HexToHash("0xcf78cf0d6f3d8371e1075c69c492ab4ec5d8cf23a1a239b6a51a1d00be7ca312") +} + +func (_SmartContractAccountFactory *SmartContractAccountFactory) Address() common.Address { + return _SmartContractAccountFactory.address +} + +type SmartContractAccountFactoryInterface interface { + DeploySmartContractAccount(opts *bind.TransactOpts, abiEncodedOwnerAddress [32]byte, initCode []byte) (*types.Transaction, error) + + FilterContractCreated(opts *bind.FilterOpts) (*SmartContractAccountFactoryContractCreatedIterator, error) + + WatchContractCreated(opts *bind.WatchOpts, sink chan<- *SmartContractAccountFactoryContractCreated) (event.Subscription, error) + + ParseContractCreated(log types.Log) (*SmartContractAccountFactoryContractCreated, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generated/smart_contract_account_helper/smart_contract_account_helper.go b/core/gethwrappers/transmission/generated/smart_contract_account_helper/smart_contract_account_helper.go new file mode 100644 index 00000000..d951227c --- /dev/null +++ b/core/gethwrappers/transmission/generated/smart_contract_account_helper/smart_contract_account_helper.go @@ -0,0 +1,322 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package smart_contract_account_helper + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var SmartContractAccountHelperMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"entryPoint\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"factory\",\"type\":\"address\"}],\"name\":\"calculateSmartContractAccountAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"topupThreshold\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"topupAmount\",\"type\":\"uint256\"}],\"name\":\"getAbiEncodedDirectRequestData\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"endContract\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"deadline\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getFullEndTxEncoding\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"encoding\",\"type\":\"bytes\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"userOpHash\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"scaAddress\",\"type\":\"address\"}],\"name\":\"getFullHashForSigning\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"factory\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"entryPoint\",\"type\":\"address\"}],\"name\":\"getInitCode\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"entryPoint\",\"type\":\"address\"}],\"name\":\"getSCAInitCodeWithConstructor\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"initCode\",\"type\":\"bytes\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]", + Bin: "0x61162261003a600b82828239805160001a60731461002d57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe730000000000000000000000000000000000000000301460806040526004361061007c5760003560e01c8063e0237bef1161005a578063e0237bef14610134578063e464b3631461016c578063fc59bac31461017f57600080fd5b80632c86cb35146100815780634b770f561461010057806382311e3314610113575b600080fd5b6100ea61008f36600461076b565b604080516060808201835273ffffffffffffffffffffffffffffffffffffffff959095168082526020808301958652918301938452825191820152925183820152905182840152805180830390930183526080909101905290565b6040516100f79190610818565b60405180910390f35b6100ea61010e36600461082b565b610192565b61012661012136600461086e565b610336565b6040519081526020016100f7565b61014761014236600461082b565b610454565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100f7565b6100ea61017a36600461089a565b6105df565b6100ea61018d3660046108f3565b61069d565b6040516060907fffffffffffffffffffffffffffffffffffffffff00000000000000000000000084831b16906000906101cd60208201610735565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604081815273ffffffffffffffffffffffffffffffffffffffff8881166020840152871690820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905261026292916020016109e6565b60405160208183030381529060405290508560601b630af4926f60e01b8383604051602401610292929190610a15565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152602080830180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909516949094179093525161031c939201610a36565b604051602081830303815290604052925050509392505050565b600061044d8383604080517f4750045d47fce615521b32cee713ff8db50147e98aec5ca94926b52651ca3fa060208083019190915281830194909452815180820383018152606080830184528151918601919091207f190000000000000000000000000000000000000000000000000000000000000060808401527f010000000000000000000000000000000000000000000000000000000000000060818401527f1c7d3b72b37a35523e273aaadd7b4cd66f618bb81429ab053412d51f50ccea6160828401524660a284015293901b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660c282015260d6808201939093528151808203909301835260f6019052805191012090565b9392505050565b6040516000907fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606086901b1690829061049060208201610735565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604081815273ffffffffffffffffffffffffffffffffffffffff8981166020840152881690820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905261052592916020016109e6565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001815282825280516020918201207fff000000000000000000000000000000000000000000000000000000000000008285015260609790971b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166021840152603583019490945260558083019690965280518083039096018652607590910190525082519201919091209392505050565b6060604051806020016105f190610735565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f90910116604081815273ffffffffffffffffffffffffffffffffffffffff8681166020840152851690820152606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905261068692916020016109e6565b604051602081830303815290604052905092915050565b60607f89553be40000000000000000000000000000000000000000000000000000000085856106cc8642610a7e565b856040516020016106e09493929190610abd565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905261071c9291602001610b02565b6040516020818303038152906040529050949350505050565b610acb80610b4b83390190565b803573ffffffffffffffffffffffffffffffffffffffff8116811461076657600080fd5b919050565b60008060006060848603121561078057600080fd5b61078984610742565b95602085013595506040909401359392505050565b60005b838110156107b95781810151838201526020016107a1565b838111156107c8576000848401525b50505050565b600081518084526107e681602086016020860161079e565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061044d60208301846107ce565b60008060006060848603121561084057600080fd5b61084984610742565b925061085760208501610742565b915061086560408501610742565b90509250925092565b6000806040838503121561088157600080fd5b8235915061089160208401610742565b90509250929050565b600080604083850312156108ad57600080fd5b6108b683610742565b915061089160208401610742565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000806080858703121561090957600080fd5b61091285610742565b93506020850135925060408501359150606085013567ffffffffffffffff8082111561093d57600080fd5b818701915087601f83011261095157600080fd5b813581811115610963576109636108c4565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156109a9576109a96108c4565b816040528281528a60208487010111156109c257600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b600083516109f881846020880161079e565b835190830190610a0c81836020880161079e565b01949350505050565b828152604060208201526000610a2e60408301846107ce565b949350505050565b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000008316815260008251610a7081601485016020870161079e565b919091016014019392505050565b60008219821115610ab8577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500190565b73ffffffffffffffffffffffffffffffffffffffff85168152836020820152826040820152608060608201526000610af860808301846107ce565b9695505050505050565b7fffffffff000000000000000000000000000000000000000000000000000000008316815260008251610b3c81600485016020870161079e565b91909101600401939250505056fe60c060405234801561001057600080fd5b50604051610acb380380610acb83398101604081905261002f91610062565b6001600160a01b039182166080521660a052610095565b80516001600160a01b038116811461005d57600080fd5b919050565b6000806040838503121561007557600080fd5b61007e83610046565b915061008c60208401610046565b90509250929050565b60805160a051610a046100c760003960008181607101526103c301526000818161010101526102ee0152610a046000f3fe608060405234801561001057600080fd5b50600436106100675760003560e01c80637eccf63e116100505780637eccf63e146100de57806389553be4146100e7578063dba6335f146100fc57600080fd5b8063140fcfb11461006c5780633a871cdd146100bd575b600080fd5b6100937f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100d06100cb36600461063a565b610123565b6040519081526020016100b4565b6100d060005481565b6100fa6100f53660046106ce565b6103ab565b005b6100937f000000000000000000000000000000000000000000000000000000000000000081565b60008054846020013514610179576000546040517f7ba633940000000000000000000000000000000000000000000000000000000081526004810191909152602085013560248201526044015b60405180910390fd5b60006102908430604080517f4750045d47fce615521b32cee713ff8db50147e98aec5ca94926b52651ca3fa060208083019190915281830194909452815180820383018152606080830184528151918601919091207f190000000000000000000000000000000000000000000000000000000000000060808401527f010000000000000000000000000000000000000000000000000000000000000060818401527f1c7d3b72b37a35523e273aaadd7b4cd66f618bb81429ab053412d51f50ccea6160828401524660a284015293901b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000001660c282015260d6808201939093528151808203909301835260f6019052805191012090565b905060006102a261014087018761076b565b8080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509293505073ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016915061031c90508284610544565b73ffffffffffffffffffffffffffffffffffffffff161461034d576103446001600080610602565b925050506103a4565b60008054908061035c83610806565b9091555060009050610371606088018861076b565b61037f91600490829061083e565b81019061038c9190610897565b509250505061039e6000826000610602565b93505050505b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161461041c576040517f4a0bfec1000000000000000000000000000000000000000000000000000000008152336004820152602401610170565b65ffffffffffff83161580159061043a57508265ffffffffffff1642115b15610481576040517f300249d700000000000000000000000000000000000000000000000000000000815265ffffffffffff84166004820152426024820152604401610170565b6000808673ffffffffffffffffffffffffffffffffffffffff168685856040516104ac929190610993565b60006040518083038185875af1925050503d80600081146104e9576040519150601f19603f3d011682016040523d82523d6000602084013e6104ee565b606091505b50915091508161053b578051600003610533576040517f20e9b5d200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805181602001fd5b50505050505050565b602082015160408084015184516000939284918791908110610568576105686109a3565b016020015160f81c905060018561058083601b6109d2565b6040805160008152602081018083529390935260ff90911690820152606081018590526080810184905260a0016020604051602081039080840390855afa1580156105cf573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00151979650505050505050565b600060d08265ffffffffffff16901b60a08465ffffffffffff16901b8561062a57600061062d565b60015b60ff161717949350505050565b60008060006060848603121561064f57600080fd5b833567ffffffffffffffff81111561066657600080fd5b8401610160818703121561067957600080fd5b95602085013595506040909401359392505050565b73ffffffffffffffffffffffffffffffffffffffff811681146106b057600080fd5b50565b803565ffffffffffff811681146106c957600080fd5b919050565b6000806000806000608086880312156106e657600080fd5b85356106f18161068e565b945060208601359350610706604087016106b3565b9250606086013567ffffffffffffffff8082111561072357600080fd5b818801915088601f83011261073757600080fd5b81358181111561074657600080fd5b89602082850101111561075857600080fd5b9699959850939650602001949392505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126107a057600080fd5b83018035915067ffffffffffffffff8211156107bb57600080fd5b6020019150368190038213156107d057600080fd5b9250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610837576108376107d7565b5060010190565b6000808585111561084e57600080fd5b8386111561085b57600080fd5b5050820193919092039150565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080600080608085870312156108ad57600080fd5b84356108b88161068e565b9350602085013592506108cd604086016106b3565b9150606085013567ffffffffffffffff808211156108ea57600080fd5b818701915087601f8301126108fe57600080fd5b81358181111561091057610910610868565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561095657610956610868565b816040528281528a602084870101111561096f57600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b8183823760009101908152919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600060ff821660ff84168060ff038211156109ef576109ef6107d7565b01939250505056fea164736f6c634300080f000aa164736f6c634300080f000a", +} + +var SmartContractAccountHelperABI = SmartContractAccountHelperMetaData.ABI + +var SmartContractAccountHelperBin = SmartContractAccountHelperMetaData.Bin + +func DeploySmartContractAccountHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *SmartContractAccountHelper, error) { + parsed, err := SmartContractAccountHelperMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(SmartContractAccountHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &SmartContractAccountHelper{address: address, abi: *parsed, SmartContractAccountHelperCaller: SmartContractAccountHelperCaller{contract: contract}, SmartContractAccountHelperTransactor: SmartContractAccountHelperTransactor{contract: contract}, SmartContractAccountHelperFilterer: SmartContractAccountHelperFilterer{contract: contract}}, nil +} + +type SmartContractAccountHelper struct { + address common.Address + abi abi.ABI + SmartContractAccountHelperCaller + SmartContractAccountHelperTransactor + SmartContractAccountHelperFilterer +} + +type SmartContractAccountHelperCaller struct { + contract *bind.BoundContract +} + +type SmartContractAccountHelperTransactor struct { + contract *bind.BoundContract +} + +type SmartContractAccountHelperFilterer struct { + contract *bind.BoundContract +} + +type SmartContractAccountHelperSession struct { + Contract *SmartContractAccountHelper + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type SmartContractAccountHelperCallerSession struct { + Contract *SmartContractAccountHelperCaller + CallOpts bind.CallOpts +} + +type SmartContractAccountHelperTransactorSession struct { + Contract *SmartContractAccountHelperTransactor + TransactOpts bind.TransactOpts +} + +type SmartContractAccountHelperRaw struct { + Contract *SmartContractAccountHelper +} + +type SmartContractAccountHelperCallerRaw struct { + Contract *SmartContractAccountHelperCaller +} + +type SmartContractAccountHelperTransactorRaw struct { + Contract *SmartContractAccountHelperTransactor +} + +func NewSmartContractAccountHelper(address common.Address, backend bind.ContractBackend) (*SmartContractAccountHelper, error) { + abi, err := abi.JSON(strings.NewReader(SmartContractAccountHelperABI)) + if err != nil { + return nil, err + } + contract, err := bindSmartContractAccountHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &SmartContractAccountHelper{address: address, abi: abi, SmartContractAccountHelperCaller: SmartContractAccountHelperCaller{contract: contract}, SmartContractAccountHelperTransactor: SmartContractAccountHelperTransactor{contract: contract}, SmartContractAccountHelperFilterer: SmartContractAccountHelperFilterer{contract: contract}}, nil +} + +func NewSmartContractAccountHelperCaller(address common.Address, caller bind.ContractCaller) (*SmartContractAccountHelperCaller, error) { + contract, err := bindSmartContractAccountHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &SmartContractAccountHelperCaller{contract: contract}, nil +} + +func NewSmartContractAccountHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*SmartContractAccountHelperTransactor, error) { + contract, err := bindSmartContractAccountHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &SmartContractAccountHelperTransactor{contract: contract}, nil +} + +func NewSmartContractAccountHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*SmartContractAccountHelperFilterer, error) { + contract, err := bindSmartContractAccountHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &SmartContractAccountHelperFilterer{contract: contract}, nil +} + +func bindSmartContractAccountHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := SmartContractAccountHelperMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SmartContractAccountHelper.Contract.SmartContractAccountHelperCaller.contract.Call(opts, result, method, params...) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SmartContractAccountHelper.Contract.SmartContractAccountHelperTransactor.contract.Transfer(opts) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SmartContractAccountHelper.Contract.SmartContractAccountHelperTransactor.contract.Transact(opts, method, params...) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SmartContractAccountHelper.Contract.contract.Call(opts, result, method, params...) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SmartContractAccountHelper.Contract.contract.Transfer(opts) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SmartContractAccountHelper.Contract.contract.Transact(opts, method, params...) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) CalculateSmartContractAccountAddress(opts *bind.CallOpts, owner common.Address, entryPoint common.Address, factory common.Address) (common.Address, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "calculateSmartContractAccountAddress", owner, entryPoint, factory) + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) CalculateSmartContractAccountAddress(owner common.Address, entryPoint common.Address, factory common.Address) (common.Address, error) { + return _SmartContractAccountHelper.Contract.CalculateSmartContractAccountAddress(&_SmartContractAccountHelper.CallOpts, owner, entryPoint, factory) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) CalculateSmartContractAccountAddress(owner common.Address, entryPoint common.Address, factory common.Address) (common.Address, error) { + return _SmartContractAccountHelper.Contract.CalculateSmartContractAccountAddress(&_SmartContractAccountHelper.CallOpts, owner, entryPoint, factory) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) GetAbiEncodedDirectRequestData(opts *bind.CallOpts, recipient common.Address, topupThreshold *big.Int, topupAmount *big.Int) ([]byte, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "getAbiEncodedDirectRequestData", recipient, topupThreshold, topupAmount) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) GetAbiEncodedDirectRequestData(recipient common.Address, topupThreshold *big.Int, topupAmount *big.Int) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetAbiEncodedDirectRequestData(&_SmartContractAccountHelper.CallOpts, recipient, topupThreshold, topupAmount) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) GetAbiEncodedDirectRequestData(recipient common.Address, topupThreshold *big.Int, topupAmount *big.Int) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetAbiEncodedDirectRequestData(&_SmartContractAccountHelper.CallOpts, recipient, topupThreshold, topupAmount) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) GetFullEndTxEncoding(opts *bind.CallOpts, endContract common.Address, value *big.Int, deadline *big.Int, data []byte) ([]byte, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "getFullEndTxEncoding", endContract, value, deadline, data) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) GetFullEndTxEncoding(endContract common.Address, value *big.Int, deadline *big.Int, data []byte) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetFullEndTxEncoding(&_SmartContractAccountHelper.CallOpts, endContract, value, deadline, data) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) GetFullEndTxEncoding(endContract common.Address, value *big.Int, deadline *big.Int, data []byte) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetFullEndTxEncoding(&_SmartContractAccountHelper.CallOpts, endContract, value, deadline, data) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) GetFullHashForSigning(opts *bind.CallOpts, userOpHash [32]byte, scaAddress common.Address) ([32]byte, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "getFullHashForSigning", userOpHash, scaAddress) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) GetFullHashForSigning(userOpHash [32]byte, scaAddress common.Address) ([32]byte, error) { + return _SmartContractAccountHelper.Contract.GetFullHashForSigning(&_SmartContractAccountHelper.CallOpts, userOpHash, scaAddress) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) GetFullHashForSigning(userOpHash [32]byte, scaAddress common.Address) ([32]byte, error) { + return _SmartContractAccountHelper.Contract.GetFullHashForSigning(&_SmartContractAccountHelper.CallOpts, userOpHash, scaAddress) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) GetInitCode(opts *bind.CallOpts, factory common.Address, owner common.Address, entryPoint common.Address) ([]byte, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "getInitCode", factory, owner, entryPoint) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) GetInitCode(factory common.Address, owner common.Address, entryPoint common.Address) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetInitCode(&_SmartContractAccountHelper.CallOpts, factory, owner, entryPoint) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) GetInitCode(factory common.Address, owner common.Address, entryPoint common.Address) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetInitCode(&_SmartContractAccountHelper.CallOpts, factory, owner, entryPoint) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCaller) GetSCAInitCodeWithConstructor(opts *bind.CallOpts, owner common.Address, entryPoint common.Address) ([]byte, error) { + var out []interface{} + err := _SmartContractAccountHelper.contract.Call(opts, &out, "getSCAInitCodeWithConstructor", owner, entryPoint) + + if err != nil { + return *new([]byte), err + } + + out0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return out0, err + +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperSession) GetSCAInitCodeWithConstructor(owner common.Address, entryPoint common.Address) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetSCAInitCodeWithConstructor(&_SmartContractAccountHelper.CallOpts, owner, entryPoint) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelperCallerSession) GetSCAInitCodeWithConstructor(owner common.Address, entryPoint common.Address) ([]byte, error) { + return _SmartContractAccountHelper.Contract.GetSCAInitCodeWithConstructor(&_SmartContractAccountHelper.CallOpts, owner, entryPoint) +} + +func (_SmartContractAccountHelper *SmartContractAccountHelper) Address() common.Address { + return _SmartContractAccountHelper.address +} + +type SmartContractAccountHelperInterface interface { + CalculateSmartContractAccountAddress(opts *bind.CallOpts, owner common.Address, entryPoint common.Address, factory common.Address) (common.Address, error) + + GetAbiEncodedDirectRequestData(opts *bind.CallOpts, recipient common.Address, topupThreshold *big.Int, topupAmount *big.Int) ([]byte, error) + + GetFullEndTxEncoding(opts *bind.CallOpts, endContract common.Address, value *big.Int, deadline *big.Int, data []byte) ([]byte, error) + + GetFullHashForSigning(opts *bind.CallOpts, userOpHash [32]byte, scaAddress common.Address) ([32]byte, error) + + GetInitCode(opts *bind.CallOpts, factory common.Address, owner common.Address, entryPoint common.Address) ([]byte, error) + + GetSCAInitCodeWithConstructor(opts *bind.CallOpts, owner common.Address, entryPoint common.Address) ([]byte, error) + + Address() common.Address +} diff --git a/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000..6d5b5c22 --- /dev/null +++ b/core/gethwrappers/transmission/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,9 @@ +GETH_VERSION: 1.13.8 +entry_point: ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.bin 2cb4bb2ba3efa8df3dfb0a57eb3727d17b68fe202682024fa7cfb4faf026833e +greeter: ../../../contracts/solc/v0.8.15/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter.bin 653dcba5c33a46292073939ce1e639372cf521c0ec2814d4c9f20c72f796f18c +greeter_wrapper: ../../../contracts/solc/v0.8.15/Greeter/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter/Greeter.bin 653dcba5c33a46292073939ce1e639372cf521c0ec2814d4c9f20c72f796f18c +paymaster_wrapper: ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.bin 189ef817a5b7a6ff53ddf35b1988465b8aec479c47b77236fe20bf7e67d48100 +sca: ../../../contracts/solc/v0.8.15/SCA.abi ../../../contracts/solc/v0.8.15/SCA.bin ae0f860cdac87d4ac505edbd228bd3ea1108550453aba67aebcb61f09cf70d0b +sca_wrapper: ../../../contracts/solc/v0.8.15/SCA/SCA.abi ../../../contracts/solc/v0.8.15/SCA/SCA.bin 2a8100fbdb41e6ce917ed333a624eaa4a8984b07e2d8d8ca6bba9bc9f74b05d7 +smart_contract_account_factory: ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.bin a44d6fa2dbf9cb3441d6d637d89e1cd656f28b6bf4146f58d508067474bf845b +smart_contract_account_helper: ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.bin 22f960a74bd1581a12aa4f8f438a3f265f32f43682f5c1897ca50707b9982d56 diff --git a/core/gethwrappers/transmission/go_generate.go b/core/gethwrappers/transmission/go_generate.go new file mode 100644 index 00000000..54c6ecf9 --- /dev/null +++ b/core/gethwrappers/transmission/go_generate.go @@ -0,0 +1,11 @@ +// Package gethwrappers provides tools for wrapping solidity contracts with +// golang packages, using abigen. +package gethwrappers + +// Transmission +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Greeter/Greeter.abi ../../../contracts/solc/v0.8.15/Greeter/Greeter.bin Greeter greeter_wrapper +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.abi ../../../contracts/solc/v0.8.15/SmartContractAccountFactory/SmartContractAccountFactory.bin SmartContractAccountFactory smart_contract_account_factory +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.abi ../../../contracts/solc/v0.8.15/EntryPoint/EntryPoint.bin EntryPoint entry_point +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.abi ../../../contracts/solc/v0.8.15/SmartContractAccountHelper/SmartContractAccountHelper.bin SmartContractAccountHelper smart_contract_account_helper +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/SCA/SCA.abi ../../../contracts/solc/v0.8.15/SCA/SCA.bin SCA sca_wrapper +//go:generate go run ../generation/generate/wrap.go ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.abi ../../../contracts/solc/v0.8.15/Paymaster/Paymaster.bin Paymaster paymaster_wrapper diff --git a/core/gethwrappers/utils.go b/core/gethwrappers/utils.go new file mode 100644 index 00000000..045f8e9c --- /dev/null +++ b/core/gethwrappers/utils.go @@ -0,0 +1,86 @@ +package gethwrappers + +import ( + "crypto/sha256" + "fmt" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// VersionHash is the hash used to detect changes in the underlying contract +func VersionHash(abiPath string, binPath string) (hash string) { + abi, err := os.ReadFile(abiPath) + if err != nil { + Exit("Could not read abi path to create version hash", err) + } + bin := []byte("") + if binPath != "-" { + bin, err = os.ReadFile(binPath) + if err != nil { + Exit("Could not read abi path to create version hash", err) + } + } + hashMsg := string(abi) + string(bin) + "\n" + return fmt.Sprintf("%x", sha256.Sum256([]byte(hashMsg))) +} + +func Exit(msg string, err error) { + if err != nil { + fmt.Println(msg+":", err) + } else { + fmt.Println(msg) + } + os.Exit(1) +} + +// GetProjectRoot returns the root of the plugin project +func GetProjectRoot() (rootPath string) { + root, err := os.Getwd() + if err != nil { + Exit("could not get current working directory while seeking project root", + err) + } + for root != "/" { // Walk up path to find dir containing go.mod + if _, err := os.Stat(filepath.Join(root, "go.mod")); !os.IsNotExist(err) { + return root + } + root = filepath.Dir(root) + } + Exit("could not find project root", nil) + panic("can't get here") +} + +func TempDir(dirPrefix string) (string, func()) { + tmpDir, err := os.MkdirTemp("", dirPrefix+"-contractWrapper") + if err != nil { + Exit("failed to create temporary working directory", err) + } + return tmpDir, func() { + if err := os.RemoveAll(tmpDir); err != nil { + fmt.Println("failure while cleaning up temporary working directory:", err) + } + } +} + +func DeepCopyLog(l types.Log) types.Log { + var cpy types.Log + cpy.Address = l.Address + if l.Topics != nil { + cpy.Topics = make([]common.Hash, len(l.Topics)) + copy(cpy.Topics, l.Topics) + } + if l.Data != nil { + cpy.Data = make([]byte, len(l.Data)) + copy(cpy.Data, l.Data) + } + cpy.BlockNumber = l.BlockNumber + cpy.TxHash = l.TxHash + cpy.TxIndex = l.TxIndex + cpy.BlockHash = l.BlockHash + cpy.Index = l.Index + cpy.Removed = l.Removed + return cpy +} diff --git a/core/gethwrappers/versions.go b/core/gethwrappers/versions.go new file mode 100644 index 00000000..acdefd06 --- /dev/null +++ b/core/gethwrappers/versions.go @@ -0,0 +1,142 @@ +package gethwrappers + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/pkg/errors" + "go.uber.org/multierr" +) + +// ContractVersion records information about the solidity compiler artifact a +// golang contract wrapper package depends on. +type ContractVersion struct { + // Hash of the artifact at the timem the wrapper was last generated + Hash string + // Path to compiled abi file + AbiPath string + // Path to compiled bin file (if exists, this can be empty) + BinaryPath string +} + +// IntegratedVersion carries the full versioning information checked in this test +type IntegratedVersion struct { + // Version of geth last used to generate the wrappers + GethVersion string + // { golang-pkg-name: version_info } + ContractVersions map[string]ContractVersion +} + +func dbPath() (path string, err error) { + dirOfThisTest, err := os.Getwd() + if err != nil { + return "", err + } + dBBasename := "generated-wrapper-dependency-versions-do-not-edit.txt" + return filepath.Join(dirOfThisTest, "generation", dBBasename), nil +} + +func versionsDBLineReader() (*bufio.Scanner, error) { + versionsDBPath, err := dbPath() + if err != nil { + return nil, errors.Wrapf(err, "could not construct versions DB path") + } + versionsDBFile, err := os.Open(versionsDBPath) + if err != nil { + return nil, errors.Wrapf(err, "could not open versions database") + } + return bufio.NewScanner(versionsDBFile), nil + +} + +// ReadVersionsDB populates an IntegratedVersion with all the info in the +// versions DB +func ReadVersionsDB() (*IntegratedVersion, error) { + rv := IntegratedVersion{} + rv.ContractVersions = make(map[string]ContractVersion) + db, err := versionsDBLineReader() + if err != nil { + return nil, err + } + for db.Scan() { + line := strings.Fields(db.Text()) + if !strings.HasSuffix(line[0], ":") { + return nil, errors.Errorf( + `each line in versions.txt should start with "$TOPIC:"`) + } + topic := stripTrailingColon(line[0], "") + if topic == "GETH_VERSION" { + if len(line) != 2 { + return nil, errors.Errorf("GETH_VERSION line should contain geth "+ + "version, and only that: %s", line) + } + if rv.GethVersion != "" { + return nil, errors.Errorf("more than one geth version") + } + rv.GethVersion = line[1] + } else { // It's a wrapper from a compiler artifact + if len(line) != 4 { + return nil, errors.Errorf(`"%s" should have four elements `+ + `": "`, + db.Text()) + } + _, alreadyExists := rv.ContractVersions[topic] + if alreadyExists { + return nil, errors.Errorf(`topic "%s" already mentioned`, topic) + } + rv.ContractVersions[topic] = ContractVersion{ + AbiPath: line[1], BinaryPath: line[2], Hash: line[3], + } + } + } + return &rv, nil +} + +var stripTrailingColon = regexp.MustCompile(":$").ReplaceAllString + +func WriteVersionsDB(db *IntegratedVersion) (err error) { + versionsDBPath, err := dbPath() + if err != nil { + return errors.Wrap(err, "could not construct path to versions DB") + } + f, err := os.Create(versionsDBPath) + if err != nil { + return errors.Wrapf(err, "while opening %s", versionsDBPath) + } + defer func() { + if cerr := f.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + gethLine := "GETH_VERSION: " + db.GethVersion + "\n" + n, err := f.WriteString(gethLine) + if err != nil { + return errors.Wrapf(err, "while recording geth version line") + } + if n != len(gethLine) { + return errors.Errorf("failed to write entire geth version line, %s", gethLine) + } + var pkgNames []string + for name := range db.ContractVersions { + pkgNames = append(pkgNames, name) + } + sort.Strings(pkgNames) + for _, name := range pkgNames { + vinfo := db.ContractVersions[name] + versionLine := fmt.Sprintf("%s: %s %s %s\n", name, + vinfo.AbiPath, vinfo.BinaryPath, vinfo.Hash) + n, err = f.WriteString(versionLine) + if err != nil { + return errors.Wrapf(err, "while recording %s version line", name) + } + if n != len(versionLine) { + return errors.Errorf("failed to write entire version line %s", versionLine) + } + } + return nil +} diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go new file mode 100644 index 00000000..9f159cca --- /dev/null +++ b/core/internal/cltest/cltest.go @@ -0,0 +1,1574 @@ +package cltest + +import ( + "bytes" + "context" + crand "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/gorilla/securecookie" + "github.com/gorilla/sessions" + "github.com/manyminds/api2go/jsonapi" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "github.com/jmoiron/sqlx" + + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/client" + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/keystest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + "github.com/goplugin/pluginv3.0/v2/plugins" + + // Force import of pgtest to ensure that txdb is registered as a DB driver + _ "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +const ( + // Collection of test fixture DB user emails per role + APIEmailAdmin = "apiuser@plugin.test" + APIEmailEdit = "apiuser-edit@plugin.test" + APIEmailRun = "apiuser-run@plugin.test" + APIEmailViewOnly = "apiuser-view-only@plugin.test" + // Password just a password we use everywhere for testing + Password = testutils.Password + // SessionSecret is the hardcoded secret solely used for test + SessionSecret = "clsession_test_secret" + // DefaultPeerID is the peer ID of the default p2p key + DefaultPeerID = configtest.DefaultPeerID + // DefaultOCRKeyBundleID is the ID of the default ocr key bundle + DefaultOCRKeyBundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" + // DefaultOCR2KeyBundleID is the ID of the fixture ocr2 key bundle + DefaultOCR2KeyBundleID = "92be59c45d0d7b192ef88d391f444ea7c78644f8607f567aab11d53668c27a4d" + // Private key seed of test keys created with `big.NewInt(1)`, representations of value present in `scrub_logs` script + KeyBigIntSeed = 1 +) + +var ( + DefaultP2PPeerID p2pkey.PeerID + FixtureChainID = *testutils.FixtureChainID + + DefaultCosmosKey = cosmoskey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) + DefaultCSAKey = csakey.MustNewV2XXXTestingOnly(big.NewInt(KeyBigIntSeed)) + DefaultOCRKey = ocrkey.MustNewV2XXXTestingOnly(big.NewInt(KeyBigIntSeed)) + DefaultOCR2Key = ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed), "evm") + DefaultP2PKey = p2pkey.MustNewV2XXXTestingOnly(big.NewInt(KeyBigIntSeed)) + DefaultSolanaKey = solkey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) + DefaultStarkNetKey = starkkey.MustNewInsecure(keystest.NewRandReaderFromSeed(KeyBigIntSeed)) + DefaultVRFKey = vrfkey.MustNewV2XXXTestingOnly(big.NewInt(KeyBigIntSeed)) + DefaultDKGSignKey = dkgsignkey.MustNewXXXTestingOnly(big.NewInt(KeyBigIntSeed)) + DefaultDKGEncryptKey = dkgencryptkey.MustNewXXXTestingOnly(big.NewInt(KeyBigIntSeed)) +) + +func init() { + gin.SetMode(gin.TestMode) + + gomega.SetDefaultEventuallyTimeout(testutils.DefaultWaitTimeout) + gomega.SetDefaultEventuallyPollingInterval(DBPollingInterval) + gomega.SetDefaultConsistentlyDuration(time.Second) + gomega.SetDefaultConsistentlyPollingInterval(100 * time.Millisecond) + + logger.InitColor(true) + gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { + fmt.Printf("[gin] %-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + } + + err := DefaultP2PPeerID.UnmarshalString(configtest.DefaultPeerID) + if err != nil { + panic(err) + } +} + +func NewRandomPositiveInt64() int64 { + id := rand.Int63() + return id +} + +func MustRandomBytes(t *testing.T, l int) (b []byte) { + t.Helper() + + b = make([]byte, l) + _, err := crand.Read(b) + if err != nil { + t.Fatal(err) + } + return b +} + +func FormatWithPrefixedChainID(chainID, id string) string { + return fmt.Sprintf("%s/%s", chainID, id) +} + +type JobPipelineV2TestHelper struct { + Prm pipeline.ORM + Jrm job.ORM + Pr pipeline.Runner +} + +type JobPipelineConfig interface { + pipeline.Config + MaxSuccessfulRuns() uint64 +} + +func NewJobPipelineV2(t testing.TB, cfg pipeline.BridgeConfig, jpcfg JobPipelineConfig, dbCfg pg.QConfig, legacyChains legacyevm.LegacyChainContainer, db *sqlx.DB, keyStore keystore.Master, restrictedHTTPClient, unrestrictedHTTPClient *http.Client) JobPipelineV2TestHelper { + lggr := logger.TestLogger(t) + prm := pipeline.NewORM(db, lggr, dbCfg, jpcfg.MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, lggr, dbCfg) + jrm := job.NewORM(db, prm, btORM, keyStore, lggr, dbCfg) + pr := pipeline.NewRunner(prm, btORM, jpcfg, cfg, legacyChains, keyStore.Eth(), keyStore.VRF(), lggr, restrictedHTTPClient, unrestrictedHTTPClient) + return JobPipelineV2TestHelper{ + prm, + jrm, + pr, + } +} + +// TestApplication holds the test application and test servers +type TestApplication struct { + t testing.TB + *plugin.PluginApplication + Logger logger.Logger + Server *httptest.Server + Started bool + Backend *backends.SimulatedBackend + Keys []ethkey.KeyV2 +} + +// NewApplicationEVMDisabled creates a new application with default config but EVM disabled +// Useful for testing controllers +func NewApplicationEVMDisabled(t *testing.T) *TestApplication { + t.Helper() + + c := configtest.NewGeneralConfig(t, nil) + + return NewApplicationWithConfig(t, c) +} + +// NewApplication creates a New TestApplication along with a NewConfig +// It mocks the keystore with no keys or accounts by default +func NewApplication(t testing.TB, flagsAndDeps ...interface{}) *TestApplication { + t.Helper() + + c := configtest.NewGeneralConfig(t, nil) + + return NewApplicationWithConfig(t, c, flagsAndDeps...) +} + +// NewApplicationWithKey creates a new TestApplication along with a new config +// It uses the native keystore and will load any keys that are in the database +func NewApplicationWithKey(t *testing.T, flagsAndDeps ...interface{}) *TestApplication { + t.Helper() + + config := configtest.NewGeneralConfig(t, nil) + return NewApplicationWithConfigAndKey(t, config, flagsAndDeps...) +} + +// NewApplicationWithConfigAndKey creates a new TestApplication with the given testorm +// it will also provide an unlocked account on the keystore +func NewApplicationWithConfigAndKey(t testing.TB, c plugin.GeneralConfig, flagsAndDeps ...interface{}) *TestApplication { + app := NewApplicationWithConfig(t, c, flagsAndDeps...) + + chainID := *ubig.New(&FixtureChainID) + for _, dep := range flagsAndDeps { + switch v := dep.(type) { + case *ubig.Big: + chainID = *v + } + } + + if len(app.Keys) == 0 { + k, _ := MustInsertRandomKey(t, app.KeyStore.Eth(), chainID) + app.Keys = []ethkey.KeyV2{k} + } else { + id, ks := chainID.ToInt(), app.KeyStore.Eth() + for _, k := range app.Keys { + ks.XXXTestingOnlyAdd(k) + require.NoError(t, ks.Add(k.Address, id)) + require.NoError(t, ks.Enable(k.Address, id)) + } + } + + return app +} + +func setKeys(t testing.TB, app *TestApplication, flagsAndDeps ...interface{}) (chainID ubig.Big) { + require.NoError(t, app.KeyStore.Unlock(Password)) + + for _, dep := range flagsAndDeps { + switch v := dep.(type) { + case ethkey.KeyV2: + app.Keys = append(app.Keys, v) + case p2pkey.KeyV2: + require.NoError(t, app.GetKeyStore().P2P().Add(v)) + case csakey.KeyV2: + require.NoError(t, app.GetKeyStore().CSA().Add(v)) + case ocr2key.KeyBundle: + require.NoError(t, app.GetKeyStore().OCR2().Add(v)) + } + } + + return +} + +const ( + UseRealExternalInitiatorManager = "UseRealExternalInitiatorManager" +) + +// NewApplicationWithConfig creates a New TestApplication with specified test config. +// This should only be used in full integration tests. For controller tests, see NewApplicationEVMDisabled. +func NewApplicationWithConfig(t testing.TB, cfg plugin.GeneralConfig, flagsAndDeps ...interface{}) *TestApplication { + t.Helper() + testutils.SkipShortDB(t) + + var lggr logger.Logger + for _, dep := range flagsAndDeps { + argLggr, is := dep.(logger.Logger) + if is { + lggr = argLggr + break + } + } + if lggr == nil { + lggr = logger.TestLogger(t) + } + + var auditLogger audit.AuditLogger + for _, dep := range flagsAndDeps { + audLgger, is := dep.(audit.AuditLogger) + if is { + auditLogger = audLgger + break + } + } + + if auditLogger == nil { + auditLogger = audit.NoopLogger + } + + url := cfg.Database().URL() + db, err := pg.NewConnection(url.String(), cfg.Database().Dialect(), cfg.Database()) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close()) }) + + var ethClient evmclient.Client + var externalInitiatorManager webhook.ExternalInitiatorManager + externalInitiatorManager = &webhook.NullExternalInitiatorManager{} + var useRealExternalInitiatorManager bool + + for _, flag := range flagsAndDeps { + switch dep := flag.(type) { + case evmclient.Client: + ethClient = dep + case webhook.ExternalInitiatorManager: + externalInitiatorManager = dep + default: + switch flag { + case UseRealExternalInitiatorManager: + externalInitiatorManager = webhook.NewExternalInitiatorManager(db, clhttptest.NewTestLocalOnlyHTTPClient(), lggr, cfg.Database()) + } + + } + } + + keyStore := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) + + mailMon := mailbox.NewMonitor(cfg.AppID().String(), lggr.Named("Mailbox")) + loopRegistry := plugins.NewLoopRegistry(lggr, nil) + + mercuryPool := wsrpc.NewPool(lggr, cache.Config{ + LatestReportTTL: cfg.Mercury().Cache().LatestReportTTL(), + MaxStaleAge: cfg.Mercury().Cache().MaxStaleAge(), + LatestReportDeadline: cfg.Mercury().Cache().LatestReportDeadline(), + }) + + relayerFactory := plugin.RelayerFactory{ + Logger: lggr, + LoopRegistry: loopRegistry, + GRPCOpts: loop.GRPCOpts{}, + MercuryPool: mercuryPool, + } + + evmOpts := plugin.EVMFactoryConfig{ + ChainOpts: legacyevm.ChainOpts{ + AppConfig: cfg, + MailMon: mailMon, + DB: db, + }, + CSAETHKeystore: keyStore, + } + + if cfg.EVMEnabled() { + if ethClient == nil { + ethClient = evmclient.NewNullClient(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), lggr) + } + chainId := ethClient.ConfiguredChainID() + evmOpts.GenEthClient = func(_ *big.Int) evmclient.Client { + if chainId.Cmp(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs())) != 0 { + t.Fatalf("expected eth client ChainID %d to match evm config chain id %d", chainId, evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs())) + } + return ethClient + } + } + + testCtx := testutils.Context(t) + // evm alway enabled for backward compatibility + initOps := []plugin.CoreRelayerChainInitFunc{plugin.InitEVM(testCtx, relayerFactory, evmOpts)} + + if cfg.CosmosEnabled() { + cosmosCfg := plugin.CosmosFactoryConfig{ + Keystore: keyStore.Cosmos(), + TOMLConfigs: cfg.CosmosConfigs(), + DB: db, + QConfig: cfg.Database(), + } + initOps = append(initOps, plugin.InitCosmos(testCtx, relayerFactory, cosmosCfg)) + } + if cfg.SolanaEnabled() { + solanaCfg := plugin.SolanaFactoryConfig{ + Keystore: keyStore.Solana(), + TOMLConfigs: cfg.SolanaConfigs(), + } + initOps = append(initOps, plugin.InitSolana(testCtx, relayerFactory, solanaCfg)) + } + if cfg.StarkNetEnabled() { + starkCfg := plugin.StarkNetFactoryConfig{ + Keystore: keyStore.StarkNet(), + TOMLConfigs: cfg.StarknetConfigs(), + } + initOps = append(initOps, plugin.InitStarknet(testCtx, relayerFactory, starkCfg)) + + } + relayChainInterops, err := plugin.NewCoreRelayerChainInteroperators(initOps...) + if err != nil { + t.Fatal(err) + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + appInstance, err := plugin.NewApplication(plugin.ApplicationOpts{ + Config: cfg, + MailMon: mailMon, + SqlxDB: db, + KeyStore: keyStore, + RelayerChainInteroperators: relayChainInterops, + Logger: lggr, + AuditLogger: auditLogger, + CloseLogger: lggr.Sync, + ExternalInitiatorManager: externalInitiatorManager, + RestrictedHTTPClient: c, + UnrestrictedHTTPClient: c, + SecretGenerator: MockSecretGenerator{}, + LoopRegistry: plugins.NewLoopRegistry(lggr, nil), + MercuryPool: mercuryPool, + }) + require.NoError(t, err) + app := appInstance.(*plugin.PluginApplication) + ta := &TestApplication{ + t: t, + PluginApplication: app, + Logger: lggr, + } + + srvr := httptest.NewUnstartedServer(web.Router(t, app, nil)) + srvr.Config.WriteTimeout = cfg.WebServer().HTTPWriteTimeout() + srvr.Start() + ta.Server = srvr + + if !useRealExternalInitiatorManager { + app.ExternalInitiatorManager = externalInitiatorManager + } + + setKeys(t, ta, flagsAndDeps...) + + return ta +} + +func NewEthMocksWithDefaultChain(t testing.TB) (c *evmclimocks.Client) { + testutils.SkipShortDB(t) + c = NewEthMocks(t) + c.On("ConfiguredChainID").Return(&FixtureChainID).Maybe() + return +} + +func NewEthMocks(t testing.TB) *evmclimocks.Client { + return evmclimocks.NewClient(t) +} + +func NewEthMocksWithStartupAssertions(t testing.TB) *evmclimocks.Client { + testutils.SkipShort(t, "long test") + c := NewEthMocks(t) + c.On("Dial", mock.Anything).Maybe().Return(nil) + c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) + c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) + c.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Maybe().Return(Head(0), nil) + c.On("ConfiguredChainID").Maybe().Return(&FixtureChainID) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + c.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil, errors.New("mocked")) + c.On("CodeAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + c.On("Close").Maybe().Return() + + block := &types.Header{ + Number: big.NewInt(100), + } + c.On("HeaderByNumber", mock.Anything, mock.Anything).Maybe().Return(block, nil) + + return c +} + +// NewEthMocksWithTransactionsOnBlocksAssertions sets an Eth mock with transactions on blocks +func NewEthMocksWithTransactionsOnBlocksAssertions(t testing.TB) *evmclimocks.Client { + testutils.SkipShort(t, "long test") + c := NewEthMocks(t) + c.On("Dial", mock.Anything).Maybe().Return(nil) + c.On("SubscribeNewHead", mock.Anything, mock.Anything).Maybe().Return(EmptyMockSubscription(t), nil) + c.On("SendTransaction", mock.Anything, mock.Anything).Maybe().Return(nil) + c.On("SendTransactionReturnCode", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(client.Successful, nil) + // Construct chain + h2 := Head(2) + h1 := HeadWithHash(1, h2.ParentHash) + h0 := HeadWithHash(0, h1.ParentHash) + c.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Maybe().Return(h2, nil) + c.On("HeadByHash", mock.Anything, h1.Hash).Maybe().Return(h1, nil) + c.On("HeadByHash", mock.Anything, h0.Hash).Maybe().Return(h0, nil) + c.On("BatchCallContext", mock.Anything, mock.Anything).Maybe().Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + if len(elems) > 0 { + elems[0].Result = &evmtypes.Block{ + Number: 42, + Hash: evmutils.NewHash(), + Transactions: LegacyTransactionsFromGasPrices(9001, 9002), + } + } + if len(elems) > 1 { + elems[1].Result = &evmtypes.Block{ + Number: 41, + Hash: evmutils.NewHash(), + Transactions: LegacyTransactionsFromGasPrices(9003, 9004), + } + } + }) + c.On("ConfiguredChainID").Maybe().Return(&FixtureChainID) + c.On("Close").Maybe().Return() + + block := &types.Header{ + Number: big.NewInt(100), + } + c.On("HeaderByHash", mock.Anything, mock.Anything).Maybe().Return(block, nil) + + return c +} + +// Start starts the plugin app and registers Stop to clean up at end of test. +func (ta *TestApplication) Start(ctx context.Context) error { + ta.t.Helper() + ta.Started = true + err := ta.PluginApplication.KeyStore.Unlock(Password) + if err != nil { + return err + } + + err = ta.PluginApplication.Start(ctx) + if err != nil { + return err + } + ta.t.Cleanup(func() { require.NoError(ta.t, ta.Stop()) }) + return nil +} + +// Stop will stop the test application and perform cleanup +func (ta *TestApplication) Stop() error { + ta.t.Helper() + + if !ta.Started { + ta.t.Fatal("TestApplication Stop() called on an unstarted application") + } + + // TODO: Here we double close, which is less than ideal. + // We would prefer to invoke a method on an interface that + // cleans up only in test. + // FIXME: TestApplication probably needs to simply be removed + err := ta.PluginApplication.StopIfStarted() + if ta.Server != nil { + ta.Server.Close() + } + return err +} + +func (ta *TestApplication) MustSeedNewSession(email string) (id string) { + session := NewSession() + ta.Logger.Infof("TestApplication creating session (id: %s, email: %s, last used: %s)", session.ID, email, session.LastUsed.String()) + err := ta.GetSqlxDB().Get(&id, `INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, $3, NOW()) RETURNING id`, session.ID, email, session.LastUsed) + require.NoError(ta.t, err) + return id +} + +// ImportKey adds private key to the application keystore and database +func (ta *TestApplication) Import(content string) { + require.NoError(ta.t, ta.KeyStore.Unlock(Password)) + _, err := ta.KeyStore.Eth().Import([]byte(content), Password, &FixtureChainID) + require.NoError(ta.t, err) +} + +type User struct { + Email string + Role clsessions.UserRole +} + +func (ta *TestApplication) NewHTTPClient(user *User) HTTPClientCleaner { + ta.t.Helper() + + if user == nil { + user = &User{} + } + + if user.Email == "" { + user.Email = fmt.Sprintf("%s@plugin.test", uuid.New()) + } + + if user.Role == "" { + user.Role = clsessions.UserRoleAdmin + } + + u, err := clsessions.NewUser(user.Email, Password, user.Role) + require.NoError(ta.t, err) + + err = ta.BasicAdminUsersORM().CreateUser(&u) + require.NoError(ta.t, err) + + sessionID := ta.MustSeedNewSession(user.Email) + + return HTTPClientCleaner{ + HTTPClient: NewMockAuthenticatedHTTPClient(ta.Logger, ta.NewClientOpts(), sessionID), + t: ta.t, + } +} + +func (ta *TestApplication) NewClientOpts() cmd.ClientOpts { + return cmd.ClientOpts{RemoteNodeURL: *MustParseURL(ta.t, ta.Server.URL), InsecureSkipVerify: true} +} + +// NewShellAndRenderer creates a new cmd.Shell for the test application +func (ta *TestApplication) NewShellAndRenderer() (*cmd.Shell, *RendererMock) { + hc := ta.NewHTTPClient(nil) + r := &RendererMock{} + lggr := logger.TestLogger(ta.t) + client := &cmd.Shell{ + Renderer: r, + Config: ta.GetConfig(), + Logger: lggr, + AppFactory: seededAppFactory{ta.PluginApplication}, + FallbackAPIInitializer: NewMockAPIInitializer(ta.t), + Runner: EmptyRunner{}, + HTTP: hc.HTTPClient, + CookieAuthenticator: MockCookieAuthenticator{t: ta.t}, + FileSessionRequestBuilder: &MockSessionRequestBuilder{}, + PromptingSessionRequestBuilder: &MockSessionRequestBuilder{}, + ChangePasswordPrompter: &MockChangePasswordPrompter{}, + } + return client, r +} + +func (ta *TestApplication) NewAuthenticatingShell(prompter cmd.Prompter) *cmd.Shell { + lggr := logger.TestLogger(ta.t) + cookieAuth := cmd.NewSessionCookieAuthenticator(ta.NewClientOpts(), &cmd.MemoryCookieStore{}, lggr) + client := &cmd.Shell{ + Renderer: &RendererMock{}, + Config: ta.GetConfig(), + Logger: lggr, + AppFactory: seededAppFactory{ta.PluginApplication}, + FallbackAPIInitializer: NewMockAPIInitializer(ta.t), + Runner: EmptyRunner{}, + HTTP: cmd.NewAuthenticatedHTTPClient(ta.Logger, ta.NewClientOpts(), cookieAuth, clsessions.SessionRequest{}), + CookieAuthenticator: cookieAuth, + FileSessionRequestBuilder: cmd.NewFileSessionRequestBuilder(lggr), + PromptingSessionRequestBuilder: cmd.NewPromptingSessionRequestBuilder(prompter), + ChangePasswordPrompter: &MockChangePasswordPrompter{}, + } + return client +} + +// NewKeyStore returns a new, unlocked keystore +func NewKeyStore(t testing.TB, db *sqlx.DB, cfg pg.QConfig) keystore.Master { + keystore := keystore.NewInMemory(db, utils.FastScryptParams, logger.TestLogger(t), cfg) + require.NoError(t, keystore.Unlock(Password)) + return keystore +} + +func ParseJSON(t testing.TB, body io.Reader) models.JSON { + t.Helper() + + b, err := io.ReadAll(body) + require.NoError(t, err) + return models.JSON{Result: gjson.ParseBytes(b)} +} + +func ParseJSONAPIErrors(t testing.TB, body io.Reader) *models.JSONAPIErrors { + t.Helper() + + b, err := io.ReadAll(body) + require.NoError(t, err) + var respJSON models.JSONAPIErrors + err = json.Unmarshal(b, &respJSON) + require.NoError(t, err) + return &respJSON +} + +// MustReadFile loads a file but should never fail +func MustReadFile(t testing.TB, file string) []byte { + t.Helper() + + content, err := os.ReadFile(file) + require.NoError(t, err) + return content +} + +type HTTPClientCleaner struct { + HTTPClient cmd.HTTPClient + t testing.TB +} + +func (r *HTTPClientCleaner) Get(path string, headers ...map[string]string) (*http.Response, func()) { + resp, err := r.HTTPClient.Get(testutils.Context(r.t), path, headers...) + return bodyCleaner(r.t, resp, err) +} + +func (r *HTTPClientCleaner) Post(path string, body io.Reader) (*http.Response, func()) { + resp, err := r.HTTPClient.Post(testutils.Context(r.t), path, body) + return bodyCleaner(r.t, resp, err) +} + +func (r *HTTPClientCleaner) Put(path string, body io.Reader) (*http.Response, func()) { + resp, err := r.HTTPClient.Put(testutils.Context(r.t), path, body) + return bodyCleaner(r.t, resp, err) +} + +func (r *HTTPClientCleaner) Patch(path string, body io.Reader, headers ...map[string]string) (*http.Response, func()) { + resp, err := r.HTTPClient.Patch(testutils.Context(r.t), path, body, headers...) + return bodyCleaner(r.t, resp, err) +} + +func (r *HTTPClientCleaner) Delete(path string) (*http.Response, func()) { + resp, err := r.HTTPClient.Delete(testutils.Context(r.t), path) + return bodyCleaner(r.t, resp, err) +} + +func bodyCleaner(t testing.TB, resp *http.Response, err error) (*http.Response, func()) { + t.Helper() + + require.NoError(t, err) + return resp, func() { require.NoError(t, resp.Body.Close()) } +} + +// ParseResponseBody will parse the given response into a byte slice +func ParseResponseBody(t testing.TB, resp *http.Response) []byte { + t.Helper() + + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + return b +} + +// ParseJSONAPIResponse parses the response and returns the JSONAPI resource. +func ParseJSONAPIResponse(t testing.TB, resp *http.Response, resource interface{}) error { + t.Helper() + + input := ParseResponseBody(t, resp) + err := jsonapi.Unmarshal(input, resource) + if err != nil { + return fmt.Errorf("web: unable to unmarshal data, %+v", err) + } + + return nil +} + +// ParseJSONAPIResponseMeta parses the bytes of the root document and returns a +// map of *json.RawMessage's within the 'meta' key. +func ParseJSONAPIResponseMeta(input []byte) (map[string]*json.RawMessage, error) { + var root map[string]*json.RawMessage + err := json.Unmarshal(input, &root) + if err != nil { + return root, err + } + + var meta map[string]*json.RawMessage + err = json.Unmarshal(*root["meta"], &meta) + return meta, err +} + +// ParseJSONAPIResponseMetaCount parses the bytes of the root document and +// returns the value of the 'count' key from the 'meta' section. +func ParseJSONAPIResponseMetaCount(input []byte) (int, error) { + meta, err := ParseJSONAPIResponseMeta(input) + if err != nil { + return -1, err + } + + var metaCount int + err = json.Unmarshal(*meta["count"], &metaCount) + return metaCount, err +} + +func CreateJobViaWeb(t testing.TB, app *TestApplication, request []byte) job.Job { + t.Helper() + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Post("/v2/jobs", bytes.NewBuffer(request)) + defer cleanup() + AssertServerResponse(t, resp, http.StatusOK) + + var createdJob job.Job + err := ParseJSONAPIResponse(t, resp, &createdJob) + require.NoError(t, err) + return createdJob +} + +func CreateJobViaWeb2(t testing.TB, app *TestApplication, spec string) webpresenters.JobResource { + t.Helper() + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Post("/v2/jobs", bytes.NewBufferString(spec)) + defer cleanup() + AssertServerResponse(t, resp, http.StatusOK) + + var jobResponse webpresenters.JobResource + err := ParseJSONAPIResponse(t, resp, &jobResponse) + require.NoError(t, err) + return jobResponse +} + +func DeleteJobViaWeb(t testing.TB, app *TestApplication, jobID int32) { + t.Helper() + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Delete(fmt.Sprintf("/v2/jobs/%v", jobID)) + defer cleanup() + AssertServerResponse(t, resp, http.StatusNoContent) +} + +func AwaitJobActive(t testing.TB, jobSpawner job.Spawner, jobID int32, waitFor time.Duration) { + t.Helper() + require.Eventually(t, func() bool { + _, exists := jobSpawner.ActiveJobs()[jobID] + return exists + }, waitFor, 100*time.Millisecond) +} + +func CreateJobRunViaExternalInitiatorV2( + t testing.TB, + app *TestApplication, + jobID uuid.UUID, + eia auth.Token, + body string, +) webpresenters.PipelineRunResource { + t.Helper() + + headers := make(map[string]string) + headers[static.ExternalInitiatorAccessKeyHeader] = eia.AccessKey + headers[static.ExternalInitiatorSecretHeader] = eia.Secret + + url := app.Server.URL + "/v2/jobs/" + jobID.String() + "/runs" + bodyBuf := bytes.NewBufferString(body) + resp, cleanup := UnauthenticatedPost(t, url, bodyBuf, headers) + defer cleanup() + AssertServerResponse(t, resp, 200) + var pr webpresenters.PipelineRunResource + err := ParseJSONAPIResponse(t, resp, &pr) + require.NoError(t, err) + + // assert.Equal(t, j.ID, pr.JobSpecID) + return pr +} + +func CreateJobRunViaUser( + t testing.TB, + app *TestApplication, + jobID uuid.UUID, + body string, +) webpresenters.PipelineRunResource { + t.Helper() + + bodyBuf := bytes.NewBufferString(body) + client := app.NewHTTPClient(nil) + resp, cleanup := client.Post("/v2/jobs/"+jobID.String()+"/runs", bodyBuf) + defer cleanup() + AssertServerResponse(t, resp, 200) + var pr webpresenters.PipelineRunResource + err := ParseJSONAPIResponse(t, resp, &pr) + require.NoError(t, err) + + return pr +} + +// CreateExternalInitiatorViaWeb creates a bridgetype via web using /v2/bridge_types +func CreateExternalInitiatorViaWeb( + t testing.TB, + app *TestApplication, + payload string, +) *webpresenters.ExternalInitiatorAuthentication { + t.Helper() + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Post("/v2/external_initiators", bytes.NewBufferString(payload)) + defer cleanup() + AssertServerResponse(t, resp, http.StatusCreated) + ei := &webpresenters.ExternalInitiatorAuthentication{} + err := ParseJSONAPIResponse(t, resp, ei) + require.NoError(t, err) + + return ei +} + +const ( + // DBPollingInterval can't be too short to avoid DOSing the test database + DBPollingInterval = 100 * time.Millisecond + // AssertNoActionTimeout shouldn't be too long, or it will slow down tests + AssertNoActionTimeout = 3 * time.Second +) + +// WaitForSpecErrorV2 polls until the passed in jobID has count number +// of job spec errors. +func WaitForSpecErrorV2(t *testing.T, db *sqlx.DB, jobID int32, count int) []job.SpecError { + t.Helper() + + g := gomega.NewWithT(t) + var jse []job.SpecError + g.Eventually(func() []job.SpecError { + err := db.Select(&jse, `SELECT * FROM job_spec_errors WHERE job_id = $1`, jobID) + assert.NoError(t, err) + return jse + }, testutils.WaitTimeout(t), DBPollingInterval).Should(gomega.HaveLen(count)) + return jse +} + +func WaitForPipelineError(t testing.TB, nodeID int, jobID int32, expectedPipelineRuns int, expectedTaskRuns int, jo job.ORM, timeout, poll time.Duration) []pipeline.Run { + t.Helper() + return WaitForPipeline(t, nodeID, jobID, expectedPipelineRuns, expectedTaskRuns, jo, timeout, poll, pipeline.RunStatusErrored) +} +func WaitForPipelineComplete(t testing.TB, nodeID int, jobID int32, expectedPipelineRuns int, expectedTaskRuns int, jo job.ORM, timeout, poll time.Duration) []pipeline.Run { + t.Helper() + return WaitForPipeline(t, nodeID, jobID, expectedPipelineRuns, expectedTaskRuns, jo, timeout, poll, pipeline.RunStatusCompleted) +} + +func WaitForPipeline(t testing.TB, nodeID int, jobID int32, expectedPipelineRuns int, expectedTaskRuns int, jo job.ORM, timeout, poll time.Duration, state pipeline.RunStatus) []pipeline.Run { + t.Helper() + + var pr []pipeline.Run + gomega.NewWithT(t).Eventually(func() bool { + prs, _, err := jo.PipelineRuns(&jobID, 0, 1000) + require.NoError(t, err) + + var matched []pipeline.Run + for _, pr := range prs { + if !pr.State.Finished() || pr.State != state { + continue + } + + // txdb effectively ignores transactionality of queries, so we need to explicitly expect a number of task runs + // (if the read occurs mid-transaction and a job run is inserted but task runs not yet). + if len(pr.PipelineTaskRuns) == expectedTaskRuns { + matched = append(matched, pr) + } + } + if len(matched) >= expectedPipelineRuns { + pr = matched + return true + } + return false + }, timeout, poll).Should( + gomega.BeTrue(), + fmt.Sprintf(`expected at least %d runs with status "%s" on node %d for job %d, total runs %d`, + expectedPipelineRuns, + state, + nodeID, + jobID, + len(pr), + ), + ) + return pr +} + +// AssertPipelineRunsStays asserts that the number of pipeline runs for a particular job remains at the provided values +func AssertPipelineRunsStays(t testing.TB, pipelineSpecID int32, db *sqlx.DB, want int) []pipeline.Run { + t.Helper() + g := gomega.NewWithT(t) + + var prs []pipeline.Run + g.Consistently(func() []pipeline.Run { + err := db.Select(&prs, `SELECT * FROM pipeline_runs WHERE pipeline_spec_id = $1`, pipelineSpecID) + assert.NoError(t, err) + return prs + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + return prs +} + +// AssertEthTxAttemptCountStays asserts that the number of tx attempts remains at the provided value +func AssertEthTxAttemptCountStays(t testing.TB, txStore txmgr.TestEvmTxStore, want int) []int64 { + g := gomega.NewWithT(t) + + var txaIds []int64 + g.Consistently(func() []txmgr.TxAttempt { + attempts, err := txStore.GetAllTxAttempts(testutils.Context(t)) + assert.NoError(t, err) + return attempts + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + return txaIds +} + +// Head given the value convert it into an Head +func Head(val interface{}) *evmtypes.Head { + var h evmtypes.Head + time := uint64(0) + switch t := val.(type) { + case int: + h = evmtypes.NewHead(big.NewInt(int64(t)), evmutils.NewHash(), evmutils.NewHash(), time, ubig.New(&FixtureChainID)) + case uint64: + h = evmtypes.NewHead(big.NewInt(int64(t)), evmutils.NewHash(), evmutils.NewHash(), time, ubig.New(&FixtureChainID)) + case int64: + h = evmtypes.NewHead(big.NewInt(t), evmutils.NewHash(), evmutils.NewHash(), time, ubig.New(&FixtureChainID)) + case *big.Int: + h = evmtypes.NewHead(t, evmutils.NewHash(), evmutils.NewHash(), time, ubig.New(&FixtureChainID)) + default: + panic(fmt.Sprintf("Could not convert %v of type %T to Head", val, val)) + } + return &h +} + +func HeadWithHash(n int64, hash common.Hash) *evmtypes.Head { + var h evmtypes.Head + time := uint64(0) + h = evmtypes.NewHead(big.NewInt(n), hash, evmutils.NewHash(), time, ubig.New(&FixtureChainID)) + return &h + +} + +// LegacyTransactionsFromGasPrices returns transactions matching the given gas prices +func LegacyTransactionsFromGasPrices(gasPrices ...int64) []evmtypes.Transaction { + return LegacyTransactionsFromGasPricesTxType(0x0, gasPrices...) +} + +func LegacyTransactionsFromGasPricesTxType(code evmtypes.TxType, gasPrices ...int64) []evmtypes.Transaction { + txs := make([]evmtypes.Transaction, len(gasPrices)) + for i, gasPrice := range gasPrices { + txs[i] = evmtypes.Transaction{Type: code, GasPrice: assets.NewWeiI(gasPrice), GasLimit: 42} + } + return txs +} + +// DynamicFeeTransactionsFromTipCaps returns EIP-1559 transactions with the +// given TipCaps (FeeCap is arbitrary) +func DynamicFeeTransactionsFromTipCaps(tipCaps ...int64) []evmtypes.Transaction { + return DynamicFeeTransactionsFromTipCapsTxType(0x02, tipCaps...) +} + +func DynamicFeeTransactionsFromTipCapsTxType(code evmtypes.TxType, tipCaps ...int64) []evmtypes.Transaction { + txs := make([]evmtypes.Transaction, len(tipCaps)) + for i, tipCap := range tipCaps { + txs[i] = evmtypes.Transaction{Type: code, MaxPriorityFeePerGas: assets.NewWeiI(tipCap), GasLimit: 42, MaxFeePerGas: assets.GWei(5000)} + } + return txs +} + +type TransactionReceipter interface { + TransactionReceipt(context.Context, common.Hash) (*types.Receipt, error) +} + +func RequireTxSuccessful(t testing.TB, client TransactionReceipter, txHash common.Hash) *types.Receipt { + t.Helper() + r, err := client.TransactionReceipt(testutils.Context(t), txHash) + require.NoError(t, err) + require.NotNil(t, r) + require.Equal(t, uint64(1), r.Status) + return r +} + +// AssertServerResponse is used to match against a client response, will print +// any errors returned if the request fails. +func AssertServerResponse(t testing.TB, resp *http.Response, expectedStatusCode int) { + t.Helper() + + if resp.StatusCode == expectedStatusCode { + return + } + + t.Logf("expected status code %s got %s", http.StatusText(expectedStatusCode), http.StatusText(resp.StatusCode)) + + if resp.StatusCode >= 300 && resp.StatusCode < 600 { + b, err := io.ReadAll(resp.Body) + if err != nil { + assert.FailNowf(t, "Unable to read body", err.Error()) + } + + var result *models.JSONAPIErrors + err = json.Unmarshal(b, &result) + if err != nil { + assert.FailNowf(t, fmt.Sprintf("Unable to unmarshal json from body '%s'", string(b)), err.Error()) + } + + assert.FailNowf(t, "Request failed", "Expected %d response, got %d with errors: %s", expectedStatusCode, resp.StatusCode, result.Errors) + } else { + assert.FailNowf(t, "Unexpected response", "Expected %d response, got %d", expectedStatusCode, resp.StatusCode) + } +} + +func DecodeSessionCookie(value string) (string, error) { + var decrypted map[interface{}]interface{} + codecs := securecookie.CodecsFromPairs([]byte(SessionSecret)) + err := securecookie.DecodeMulti(webauth.SessionName, value, &decrypted, codecs...) + if err != nil { + return "", err + } + value, ok := decrypted[webauth.SessionIDKey].(string) + if !ok { + return "", fmt.Errorf("decrypted[web.SessionIDKey] is not a string (%v)", value) + } + return value, nil +} + +func MustGenerateSessionCookie(t testing.TB, value string) *http.Cookie { + decrypted := map[interface{}]interface{}{webauth.SessionIDKey: value} + codecs := securecookie.CodecsFromPairs([]byte(SessionSecret)) + encoded, err := securecookie.EncodeMulti(webauth.SessionName, decrypted, codecs...) + if err != nil { + logger.TestLogger(t).Panic(err) + } + return sessions.NewCookie(webauth.SessionName, encoded, &sessions.Options{}) +} + +func AssertError(t testing.TB, want bool, err error) { + t.Helper() + + if want { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } +} + +func UnauthenticatedPost(t testing.TB, url string, body io.Reader, headers map[string]string) (*http.Response, func()) { + t.Helper() + return unauthenticatedHTTP(t, "POST", url, body, headers) +} + +func UnauthenticatedGet(t testing.TB, url string, headers map[string]string) (*http.Response, func()) { + t.Helper() + return unauthenticatedHTTP(t, "GET", url, nil, headers) +} + +func unauthenticatedHTTP(t testing.TB, method string, url string, body io.Reader, headers map[string]string) (*http.Response, func()) { + t.Helper() + + client := clhttptest.NewTestLocalOnlyHTTPClient() + request, err := http.NewRequestWithContext(testutils.Context(t), method, url, body) + require.NoError(t, err) + request.Header.Set("Content-Type", "application/json") + for key, value := range headers { + request.Header.Add(key, value) + } + resp, err := client.Do(request) + require.NoError(t, err) + return resp, func() { resp.Body.Close() } +} + +func MustParseDuration(t testing.TB, durationStr string) time.Duration { + t.Helper() + + duration, err := time.ParseDuration(durationStr) + require.NoError(t, err) + return duration +} + +func NewSession(optionalSessionID ...string) clsessions.Session { + session := clsessions.NewSession() + if len(optionalSessionID) > 0 { + session.ID = optionalSessionID[0] + } + return session +} + +func AllExternalInitiators(t testing.TB, db *sqlx.DB) []bridges.ExternalInitiator { + t.Helper() + + var all []bridges.ExternalInitiator + err := db.Select(&all, `SELECT * FROM external_initiators`) + require.NoError(t, err) + return all +} + +type Awaiter chan struct{} + +func NewAwaiter() Awaiter { return make(Awaiter) } + +func (a Awaiter) ItHappened() { close(a) } + +func (a Awaiter) AssertHappened(t *testing.T, expected bool) { + t.Helper() + select { + case <-a: + if !expected { + t.Fatal("It happened") + } + default: + if expected { + t.Fatal("It didn't happen") + } + } +} + +func (a Awaiter) AwaitOrFail(t testing.TB, durationParams ...time.Duration) { + t.Helper() + + duration := 10 * time.Second + if len(durationParams) > 0 { + duration = durationParams[0] + } + + select { + case <-a: + case <-time.After(duration): + t.Fatal("Timed out waiting for Awaiter to get ItHappened") + } +} + +func CallbackOrTimeout(t testing.TB, msg string, callback func(), durationParams ...time.Duration) { + t.Helper() + + duration := 100 * time.Millisecond + if len(durationParams) > 0 { + duration = durationParams[0] + } + + done := make(chan struct{}) + go func() { + callback() + close(done) + }() + + select { + case <-done: + case <-time.After(duration): + t.Fatalf("CallbackOrTimeout: %s timed out", msg) + } +} + +func MustParseURL(t testing.TB, input string) *url.URL { + return testutils.MustParseURL(t, input) +} + +// EthereumLogIterator is the interface provided by gethwrapper representations of EVM +// logs. +type EthereumLogIterator interface{ Next() bool } + +// GetLogs drains logs of EVM log representations. Since those log +// representations don't fit into a type hierarchy, this API is a bit awkward. +// It returns the logs as a slice of blank interface{}s, and if rv is non-nil, +// it must be a pointer to a slice for elements of the same type as the logs, +// in which case GetLogs will append the logs to it. +func GetLogs(t *testing.T, rv interface{}, logs EthereumLogIterator) []interface{} { + v := reflect.ValueOf(rv) + require.True(t, rv == nil || + v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice, + "must pass a slice to receive logs") + var e reflect.Value + if rv != nil { + e = v.Elem() + } + var irv []interface{} + for logs.Next() { + log := reflect.Indirect(reflect.ValueOf(logs)).FieldByName("Event") + if v.Kind() == reflect.Ptr { + e.Set(reflect.Append(e, log)) + } + irv = append(irv, log.Interface()) + } + return irv +} + +func MakeConfigDigest(t *testing.T) ocrtypes.ConfigDigest { + t.Helper() + b := make([]byte, 16) + _, err := crand.Read(b) + if err != nil { + t.Fatal(err) + } + return MustBytesToConfigDigest(t, b) +} + +func MustBytesToConfigDigest(t *testing.T, b []byte) ocrtypes.ConfigDigest { + t.Helper() + configDigest, err := ocrtypes.BytesToConfigDigest(b) + if err != nil { + t.Fatal(err) + } + return configDigest +} + +// MockApplicationEthCalls mocks all calls made by the plugin application as +// standard when starting and stopping +func MockApplicationEthCalls(t *testing.T, app *TestApplication, ethClient *evmclimocks.Client, sub *commonmocks.Subscription) { + t.Helper() + + // Start + ethClient.On("Dial", mock.Anything).Return(nil) + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil).Maybe() + ethClient.On("ConfiguredChainID", mock.Anything).Return(evmtest.MustGetDefaultChainID(t, app.GetConfig().EVMConfigs()), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe() + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Return(nil, nil).Maybe() + ethClient.On("Close").Return().Maybe() +} + +func BatchElemMatchesParams(req rpc.BatchElem, arg interface{}, method string) bool { + return req.Method == method && + len(req.Args) == 1 && req.Args[0] == arg +} + +func BatchElemMustMatchParams(t *testing.T, req rpc.BatchElem, hash common.Hash, method string) { + t.Helper() + if !BatchElemMatchesParams(req, hash, method) { + t.Fatalf("Batch hash %v does not match expected %v", req.Args[0], hash) + } +} + +// SimulateIncomingHeads spawns a goroutine which sends a stream of heads and closes the returned channel when finished. +func SimulateIncomingHeads(t *testing.T, heads []*evmtypes.Head, headTrackables ...httypes.HeadTrackable) (done chan struct{}) { + // Build the full chain of heads + ctx := testutils.Context(t) + done = make(chan struct{}) + go func(t *testing.T) { + defer close(done) + ticker := time.NewTicker(250 * time.Millisecond) + defer ticker.Stop() + + for _, h := range heads { + select { + case <-ctx.Done(): + return + case <-ticker.C: + t.Logf("Sending head: %d", h.Number) + for _, ht := range headTrackables { + ht.OnNewLongestChain(ctx, h) + } + } + } + }(t) + return done +} + +// Blocks - a helper logic to construct a range of linked heads +// and an ability to fork and create logs from them +type Blocks struct { + t *testing.T + Hashes []common.Hash + mHashes map[int64]common.Hash + Heads map[int64]*evmtypes.Head +} + +func (b *Blocks) LogOnBlockNum(i uint64, addr common.Address) types.Log { + return RawNewRoundLog(b.t, addr, b.Hashes[i], i, 0, false) +} + +func (b *Blocks) LogOnBlockNumRemoved(i uint64, addr common.Address) types.Log { + return RawNewRoundLog(b.t, addr, b.Hashes[i], i, 0, true) +} + +func (b *Blocks) LogOnBlockNumWithIndex(i uint64, logIndex uint, addr common.Address) types.Log { + return RawNewRoundLog(b.t, addr, b.Hashes[i], i, logIndex, false) +} + +func (b *Blocks) LogOnBlockNumWithIndexRemoved(i uint64, logIndex uint, addr common.Address) types.Log { + return RawNewRoundLog(b.t, addr, b.Hashes[i], i, logIndex, true) +} + +func (b *Blocks) LogOnBlockNumWithTopics(i uint64, logIndex uint, addr common.Address, topics []common.Hash) types.Log { + return RawNewRoundLogWithTopics(b.t, addr, b.Hashes[i], i, logIndex, false, topics) +} + +func (b *Blocks) HashesMap() map[int64]common.Hash { + return b.mHashes +} + +func (b *Blocks) Head(number uint64) *evmtypes.Head { + return b.Heads[int64(number)] +} + +func (b *Blocks) ForkAt(t *testing.T, blockNum int64, numHashes int) *Blocks { + forked := NewBlocks(t, len(b.Heads)+numHashes) + if _, exists := forked.Heads[blockNum]; !exists { + t.Fatalf("Not enough length for block num: %v", blockNum) + } + + for i := int64(0); i < blockNum; i++ { + forked.Heads[i] = b.Heads[i] + } + + forked.Heads[blockNum].ParentHash = b.Heads[blockNum].ParentHash + forked.Heads[blockNum].Parent = b.Heads[blockNum].Parent + return forked +} + +func (b *Blocks) NewHead(number uint64) *evmtypes.Head { + parentNumber := number - 1 + parent, ok := b.Heads[int64(parentNumber)] + if !ok { + b.t.Fatalf("Can't find parent block at index: %v", parentNumber) + } + head := &evmtypes.Head{ + Number: parent.Number + 1, + Hash: evmutils.NewHash(), + ParentHash: parent.Hash, + Parent: parent, + Timestamp: time.Unix(parent.Number+1, 0), + EVMChainID: ubig.New(&FixtureChainID), + } + return head +} + +// Slice returns a slice of heads from number i to j. Set j < 0 for all remaining. +func (b *Blocks) Slice(i, j int) []*evmtypes.Head { + b.t.Logf("Slicing heads from %v to %v...", i, j) + + if j > 0 && j-i > len(b.Heads) { + b.t.Fatalf("invalid configuration: too few blocks %d for range length %d", len(b.Heads), j-i) + } + return b.slice(i, j) +} + +func (b *Blocks) slice(i, j int) (heads []*evmtypes.Head) { + if j > 0 { + heads = make([]*evmtypes.Head, 0, j-i) + } + for n := i; j < 0 || n < j; n++ { + h, ok := b.Heads[int64(n)] + if !ok { + if j < 0 { + break // done + } + b.t.Fatalf("invalid configuration: block %d not found", n) + } + heads = append(heads, h) + } + return +} + +func NewBlocks(t *testing.T, numHashes int) *Blocks { + hashes := make([]common.Hash, 0) + heads := make(map[int64]*evmtypes.Head) + for i := int64(0); i < int64(numHashes); i++ { + hash := evmutils.NewHash() + hashes = append(hashes, hash) + + heads[i] = &evmtypes.Head{Hash: hash, Number: i, Timestamp: time.Unix(i, 0), EVMChainID: ubig.New(&FixtureChainID)} + if i > 0 { + parent := heads[i-1] + heads[i].Parent = parent + heads[i].ParentHash = parent.Hash + } + } + + hashesMap := make(map[int64]common.Hash) + for i := 0; i < len(hashes); i++ { + hashesMap[int64(i)] = hashes[i] + } + + return &Blocks{ + t: t, + Hashes: hashes, + mHashes: hashesMap, + Heads: heads, + } +} + +// HeadBuffer - stores heads in sequence, with increasing timestamps +type HeadBuffer struct { + t *testing.T + Heads []*evmtypes.Head +} + +func NewHeadBuffer(t *testing.T) *HeadBuffer { + return &HeadBuffer{ + t: t, + Heads: make([]*evmtypes.Head, 0), + } +} + +func (hb *HeadBuffer) Append(head *evmtypes.Head) { + cloned := &evmtypes.Head{ + Number: head.Number, + Hash: head.Hash, + ParentHash: head.ParentHash, + Parent: head.Parent, + Timestamp: time.Unix(int64(len(hb.Heads)), 0), + EVMChainID: head.EVMChainID, + } + hb.Heads = append(hb.Heads, cloned) +} + +type HeadTrackableFunc func(context.Context, *evmtypes.Head) + +func (fn HeadTrackableFunc) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + fn(ctx, head) +} + +type testifyExpectationsAsserter interface { + AssertExpectations(t mock.TestingT) bool +} + +type fakeT struct{} + +func (ft fakeT) Logf(format string, args ...interface{}) {} +func (ft fakeT) Errorf(format string, args ...interface{}) {} +func (ft fakeT) FailNow() {} + +func EventuallyExpectationsMet(t *testing.T, mock testifyExpectationsAsserter, timeout time.Duration, interval time.Duration) { + t.Helper() + + chTimeout := time.After(timeout) + for { + var ft fakeT + success := mock.AssertExpectations(ft) + if success { + return + } + select { + case <-chTimeout: + mock.AssertExpectations(t) + t.FailNow() + default: + time.Sleep(interval) + } + } +} + +func AssertCount(t *testing.T, db *sqlx.DB, tableName string, expected int64) { + testutils.AssertCount(t, db, tableName, expected) +} + +func WaitForCount(t *testing.T, db *sqlx.DB, tableName string, want int64) { + t.Helper() + g := gomega.NewWithT(t) + var count int64 + var err error + g.Eventually(func() int64 { + err = db.Get(&count, fmt.Sprintf(`SELECT count(*) FROM %s;`, tableName)) + assert.NoError(t, err) + return count + }, testutils.WaitTimeout(t), DBPollingInterval).Should(gomega.Equal(want)) +} + +func AssertCountStays(t testing.TB, db *sqlx.DB, tableName string, want int64) { + t.Helper() + g := gomega.NewWithT(t) + var count int64 + var err error + g.Consistently(func() int64 { + err = db.Get(&count, fmt.Sprintf(`SELECT count(*) FROM %s`, tableName)) + assert.NoError(t, err) + return count + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want)) +} + +func AssertRecordEventually(t *testing.T, db *sqlx.DB, model interface{}, stmt string, check func() bool) { + t.Helper() + g := gomega.NewWithT(t) + g.Eventually(func() bool { + err := db.Get(model, stmt) + require.NoError(t, err, "unable to find record in DB") + return check() + }, testutils.WaitTimeout(t), DBPollingInterval).Should(gomega.BeTrue()) +} + +func MustWebURL(t *testing.T, s string) *models.WebURL { + uri, err := url.Parse(s) + require.NoError(t, err) + return (*models.WebURL)(uri) +} + +func NewTestChainScopedConfig(t testing.TB) evmconfig.ChainScopedConfig { + cfg := configtest.NewGeneralConfig(t, nil) + return evmtest.NewChainScopedConfig(t, cfg) +} + +func NewTestTxStore(t *testing.T, db *sqlx.DB, cfg pg.QConfig) txmgr.TestEvmTxStore { + return txmgr.NewTxStore(db, logger.TestLogger(t), cfg) +} + +// ClearDBTables deletes all rows from the given tables +func ClearDBTables(t *testing.T, db *sqlx.DB, tables ...string) { + tx, err := db.Beginx() + require.NoError(t, err) + + for _, table := range tables { + _, err = tx.Exec(fmt.Sprintf("DELETE FROM %s", table)) + require.NoError(t, err) + } + + err = tx.Commit() + require.NoError(t, err) +} diff --git a/core/internal/cltest/contract_mock_receiver.go b/core/internal/cltest/contract_mock_receiver.go new file mode 100644 index 00000000..2d1ba7fb --- /dev/null +++ b/core/internal/cltest/contract_mock_receiver.go @@ -0,0 +1,127 @@ +package cltest + +import ( + "errors" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" +) + +// funcSigLength is the length of the function signature (including the 0x) +// ex: 0x1234ABCD +const funcSigLength = 10 + +func NewContractMockReceiver(t *testing.T, ethMock *evmclimocks.Client, abi abi.ABI, address common.Address) contractMockReceiver { + return contractMockReceiver{ + t: t, + ethMock: ethMock, + abi: abi, + address: address, + } +} + +type contractMockReceiver struct { + t *testing.T + ethMock *evmclimocks.Client + abi abi.ABI + address common.Address +} + +func (receiver contractMockReceiver) MockResponse(funcName string, responseArgs ...interface{}) *mock.Call { + funcSig := hexutil.Encode(receiver.abi.Methods[funcName].ID) + if len(funcSig) != funcSigLength { + receiver.t.Fatalf("Unable to find Registry contract function with name %s", funcName) + } + + encoded := receiver.mustEncodeResponse(funcName, responseArgs...) + + return receiver.ethMock. + On( + "CallContract", + mock.Anything, + mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + return *callArgs.To == receiver.address && + hexutil.Encode(callArgs.Data)[0:funcSigLength] == funcSig + }), + mock.Anything). + Return(encoded, nil) +} + +func (receiver contractMockReceiver) MockMatchedResponse(funcName string, matcher func(callArgs ethereum.CallMsg) bool, responseArgs ...interface{}) *mock.Call { + funcSig := hexutil.Encode(receiver.abi.Methods[funcName].ID) + if len(funcSig) != funcSigLength { + receiver.t.Fatalf("Unable to find Registry contract function with name %s", funcName) + } + + encoded := receiver.mustEncodeResponse(funcName, responseArgs...) + + return receiver.ethMock. + On( + "CallContract", + mock.Anything, + mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + return *callArgs.To == receiver.address && + hexutil.Encode(callArgs.Data)[0:funcSigLength] == funcSig && + matcher(callArgs) + }), + mock.Anything). + Return(encoded, nil) +} + +func (receiver contractMockReceiver) MockRevertResponse(funcName string) *mock.Call { + funcSig := hexutil.Encode(receiver.abi.Methods[funcName].ID) + if len(funcSig) != funcSigLength { + receiver.t.Fatalf("Unable to find Registry contract function with name %s", funcName) + } + + return receiver.ethMock. + On( + "CallContract", + mock.Anything, + mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + return *callArgs.To == receiver.address && + hexutil.Encode(callArgs.Data)[0:funcSigLength] == funcSig + }), + mock.Anything). + Return(nil, errors.New("revert")) +} + +func (receiver contractMockReceiver) mustEncodeResponse(funcName string, responseArgs ...interface{}) []byte { + if len(responseArgs) == 0 { + return []byte{} + } + + var outputList []interface{} + + firstArg := responseArgs[0] + isStruct := reflect.TypeOf(firstArg).Kind() == reflect.Struct + + if isStruct && len(responseArgs) > 1 { + receiver.t.Fatal("cannot encode response with struct and multiple return values") + } else if isStruct { + outputList = structToInterfaceSlice(firstArg) + } else { + outputList = responseArgs + } + + encoded, err := receiver.abi.Methods[funcName].Outputs.PackValues(outputList) + require.NoError(receiver.t, err) + return encoded +} + +func structToInterfaceSlice(structArg interface{}) []interface{} { + v := reflect.ValueOf(structArg) + values := make([]interface{}, v.NumField()) + for i := 0; i < v.NumField(); i++ { + values[i] = v.Field(i).Interface() + } + return values +} diff --git a/core/internal/cltest/doc.go b/core/internal/cltest/doc.go new file mode 100644 index 00000000..6cd11760 --- /dev/null +++ b/core/internal/cltest/doc.go @@ -0,0 +1,4 @@ +// Importing cltest should only be for top level black box (X_test package) integration tests, +// for example testing against the Application object itself. +// Consider using a smaller scoped package: utils or testutils/* if possible. +package cltest diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go new file mode 100644 index 00000000..b019e85b --- /dev/null +++ b/core/internal/cltest/factories.go @@ -0,0 +1,560 @@ +package cltest + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "math/big" + mathrand "math/rand" + "net/url" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func NewEIP55Address() ethkey.EIP55Address { + a := testutils.NewAddress() + e, err := ethkey.NewEIP55Address(a.Hex()) + if err != nil { + panic(err) + } + return e +} + +func NewPeerID() (id ragep2ptypes.PeerID) { + err := id.UnmarshalText([]byte("12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw")) + if err != nil { + panic(err) + } + return id +} + +type BridgeOpts struct { + Name string + URL string +} + +// NewBridgeType create new bridge type given info slice +func NewBridgeType(t testing.TB, opts BridgeOpts) (*bridges.BridgeTypeAuthentication, *bridges.BridgeType) { + btr := &bridges.BridgeTypeRequest{} + + // Must randomise default to avoid unique constraint conflicts with other parallel tests + rnd := uuid.New().String() + + if opts.Name != "" { + btr.Name = bridges.MustParseBridgeName(opts.Name) + } else { + btr.Name = bridges.MustParseBridgeName(fmt.Sprintf("test_bridge_%s", rnd)) + } + + if opts.URL != "" { + btr.URL = WebURL(t, opts.URL) + } else { + btr.URL = WebURL(t, fmt.Sprintf("https://bridge.example.com/api?%s", rnd)) + } + + bta, bt, err := bridges.NewBridgeType(btr) + require.NoError(t, err) + return bta, bt +} + +// MustCreateBridge creates a bridge +// Be careful not to specify a name here unless you ABSOLUTELY need to +// This is because name is a unique index and identical names used across transactional tests will lock/deadlock +func MustCreateBridge(t testing.TB, db *sqlx.DB, opts BridgeOpts, cfg pg.QConfig) (bta *bridges.BridgeTypeAuthentication, bt *bridges.BridgeType) { + bta, bt = NewBridgeType(t, opts) + orm := bridges.NewORM(db, logger.TestLogger(t), cfg) + err := orm.CreateBridgeType(bt) + require.NoError(t, err) + return bta, bt +} + +// WebURL parses a url into a models.WebURL +func WebURL(t testing.TB, unparsed string) models.WebURL { + parsed, err := url.Parse(unparsed) + require.NoError(t, err) + return models.WebURL(*parsed) +} + +// JSONFromString create JSON from given body and arguments +func JSONFromString(t testing.TB, body string, args ...interface{}) models.JSON { + return JSONFromBytes(t, []byte(fmt.Sprintf(body, args...))) +} + +// JSONFromBytes creates JSON from a given byte array +func JSONFromBytes(t testing.TB, body []byte) models.JSON { + j, err := models.ParseJSON(body) + require.NoError(t, err) + return j +} + +func MustJSONMarshal(t *testing.T, val interface{}) string { + t.Helper() + bs, err := json.Marshal(val) + require.NoError(t, err) + return string(bs) +} + +func EmptyCLIContext() *cli.Context { + set := flag.NewFlagSet("test", 0) + return cli.NewContext(nil, set, nil) +} + +func NewEthTx(fromAddress common.Address) txmgr.Tx { + return txmgr.Tx{ + FromAddress: fromAddress, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + Value: big.Int(assets.NewEthValue(142)), + FeeLimit: uint32(1000000000), + State: txmgrcommon.TxUnstarted, + } +} + +func NewLegacyTransaction(nonce uint64, to common.Address, value *big.Int, gasLimit uint32, gasPrice *big.Int, data []byte) *types.Transaction { + tx := types.LegacyTx{ + Nonce: nonce, + To: &to, + Value: value, + Gas: uint64(gasLimit), + GasPrice: gasPrice, + Data: data, + } + return types.NewTx(&tx) +} + +func MustInsertUnconfirmedEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress common.Address, opts ...interface{}) txmgr.Tx { + broadcastAt := time.Now() + chainID := &FixtureChainID + for _, opt := range opts { + switch v := opt.(type) { + case time.Time: + broadcastAt = v + case *big.Int: + chainID = v + } + } + etx := NewEthTx(fromAddress) + + etx.BroadcastAt = &broadcastAt + etx.InitialBroadcastAt = &broadcastAt + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + etx.State = txmgrcommon.TxUnconfirmed + etx.ChainID = chainID + require.NoError(t, txStore.InsertTx(&etx)) + return etx +} + +func MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, fromAddress common.Address, opts ...interface{}) txmgr.Tx { + etx := MustInsertUnconfirmedEthTx(t, txStore, nonce, fromAddress, opts...) + attempt := NewLegacyEthTxAttempt(t, etx.ID) + + tx := NewLegacyTransaction(uint64(nonce), testutils.NewAddress(), big.NewInt(142), 242, big.NewInt(342), []byte{1, 2, 3}) + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + attempt.SignedRawTx = rlp.Bytes() + + attempt.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + etx, err := txStore.FindTxWithAttempts(etx.ID) + require.NoError(t, err) + return etx +} + +func MustInsertConfirmedEthTxWithLegacyAttempt(t *testing.T, txStore txmgr.TestEvmTxStore, nonce int64, broadcastBeforeBlockNum int64, fromAddress common.Address) txmgr.Tx { + timeNow := time.Now() + etx := NewEthTx(fromAddress) + + etx.BroadcastAt = &timeNow + etx.InitialBroadcastAt = &timeNow + n := evmtypes.Nonce(nonce) + etx.Sequence = &n + etx.State = txmgrcommon.TxConfirmed + etx.MinConfirmations.SetValid(6) + require.NoError(t, txStore.InsertTx(&etx)) + attempt := NewLegacyEthTxAttempt(t, etx.ID) + attempt.BroadcastBeforeBlockNum = &broadcastBeforeBlockNum + attempt.State = txmgrtypes.TxAttemptBroadcast + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + etx.TxAttempts = append(etx.TxAttempts, attempt) + return etx +} + +func NewLegacyEthTxAttempt(t *testing.T, etxID int64) txmgr.TxAttempt { + gasPrice := assets.NewWeiI(1) + return txmgr.TxAttempt{ + ChainSpecificFeeLimit: 42, + TxID: etxID, + TxFee: gas.EvmFee{Legacy: gasPrice}, + // Just a random signed raw tx that decodes correctly + // Ignore all actual values + SignedRawTx: hexutil.MustDecode("0xf889808504a817c8008307a12094000000000000000000000000000000000000000080a400000000000000000000000000000000000000000000000000000000000000000000000025a0838fe165906e2547b9a052c099df08ec891813fea4fcdb3c555362285eb399c5a070db99322490eb8a0f2270be6eca6e3aedbc49ff57ef939cf2774f12d08aa85e"), + Hash: evmutils.NewHash(), + State: txmgrtypes.TxAttemptInProgress, + } +} + +func NewDynamicFeeEthTxAttempt(t *testing.T, etxID int64) txmgr.TxAttempt { + gasTipCap := assets.NewWeiI(1) + gasFeeCap := assets.NewWeiI(1) + return txmgr.TxAttempt{ + TxType: 0x2, + TxID: etxID, + TxFee: gas.EvmFee{ + DynamicTipCap: gasTipCap, + DynamicFeeCap: gasFeeCap, + }, + // Just a random signed raw tx that decodes correctly + // Ignore all actual values + SignedRawTx: hexutil.MustDecode("0xf889808504a817c8008307a12094000000000000000000000000000000000000000080a400000000000000000000000000000000000000000000000000000000000000000000000025a0838fe165906e2547b9a052c099df08ec891813fea4fcdb3c555362285eb399c5a070db99322490eb8a0f2270be6eca6e3aedbc49ff57ef939cf2774f12d08aa85e"), + Hash: evmutils.NewHash(), + State: txmgrtypes.TxAttemptInProgress, + ChainSpecificFeeLimit: 42, + } +} + +type RandomKey struct { + Nonce int64 + Disabled bool + + chainIDs []ubig.Big // nil: Fixture, set empty for none +} + +func (r RandomKey) MustInsert(t testing.TB, keystore keystore.Eth) (ethkey.KeyV2, common.Address) { + if r.chainIDs == nil { + r.chainIDs = []ubig.Big{*ubig.New(&FixtureChainID)} + } + + key := MustGenerateRandomKey(t) + keystore.XXXTestingOnlyAdd(key) + + for _, cid := range r.chainIDs { + require.NoError(t, keystore.Add(key.Address, cid.ToInt())) + require.NoError(t, keystore.Enable(key.Address, cid.ToInt())) + if r.Disabled { + require.NoError(t, keystore.Disable(key.Address, cid.ToInt())) + } + } + + return key, key.Address +} + +func (r RandomKey) MustInsertWithState(t testing.TB, keystore keystore.Eth) (ethkey.State, common.Address) { + k, address := r.MustInsert(t, keystore) + state, err := keystore.GetStateForKey(k) + require.NoError(t, err) + return state, address +} + +// MustInsertRandomKey inserts a randomly generated (not cryptographically secure) key for testing. +// By default, it is enabled for the fixture chain. Pass chainIDs to override. +// Use MustInsertRandomKeyNoChains for a key associate with no chains. +func MustInsertRandomKey(t testing.TB, keystore keystore.Eth, chainIDs ...ubig.Big) (ethkey.KeyV2, common.Address) { + r := RandomKey{} + if len(chainIDs) > 0 { + r.chainIDs = chainIDs + } + return r.MustInsert(t, keystore) +} + +func MustInsertRandomKeyNoChains(t testing.TB, keystore keystore.Eth) (ethkey.KeyV2, common.Address) { + return RandomKey{chainIDs: []ubig.Big{}}.MustInsert(t, keystore) +} + +func MustInsertRandomKeyReturningState(t testing.TB, keystore keystore.Eth) (ethkey.State, common.Address) { + return RandomKey{}.MustInsertWithState(t, keystore) +} + +func MustGenerateRandomKey(t testing.TB) ethkey.KeyV2 { + key, err := ethkey.NewV2() + require.NoError(t, err) + return key +} + +func MustGenerateRandomKeyState(_ testing.TB) ethkey.State { + return ethkey.State{Address: NewEIP55Address()} +} + +func MustInsertHead(t *testing.T, db *sqlx.DB, cfg pg.QConfig, number int64) evmtypes.Head { + h := evmtypes.NewHead(big.NewInt(number), evmutils.NewHash(), evmutils.NewHash(), 0, ubig.New(&FixtureChainID)) + horm := headtracker.NewORM(db, logger.TestLogger(t), cfg, FixtureChainID) + + err := horm.IdempotentInsertHead(testutils.Context(t), &h) + require.NoError(t, err) + return h +} + +func MustInsertV2JobSpec(t *testing.T, db *sqlx.DB, transmitterAddress common.Address) job.Job { + t.Helper() + + addr, err := ethkey.NewEIP55Address(transmitterAddress.Hex()) + require.NoError(t, err) + + pipelineSpec := pipeline.Spec{} + err = db.Get(&pipelineSpec, `INSERT INTO pipeline_specs (dot_dag_source,created_at) VALUES ('',NOW()) RETURNING *`) + require.NoError(t, err) + + oracleSpec := MustInsertOffchainreportingOracleSpec(t, db, addr) + jb := job.Job{ + OCROracleSpec: &oracleSpec, + OCROracleSpecID: &oracleSpec.ID, + ExternalJobID: uuid.New(), + Type: job.OffchainReporting, + SchemaVersion: 1, + PipelineSpec: &pipelineSpec, + PipelineSpecID: pipelineSpec.ID, + } + + jorm := job.NewORM(db, nil, nil, nil, logger.TestLogger(t), configtest.NewTestGeneralConfig(t).Database()) + err = jorm.InsertJob(&jb) + require.NoError(t, err) + return jb +} + +func MustInsertOffchainreportingOracleSpec(t *testing.T, db *sqlx.DB, transmitterAddress ethkey.EIP55Address) job.OCROracleSpec { + t.Helper() + + ocrKeyID := models.MustSha256HashFromHex(DefaultOCRKeyBundleID) + spec := job.OCROracleSpec{} + require.NoError(t, db.Get(&spec, `INSERT INTO ocr_oracle_specs (created_at, updated_at, contract_address, p2pv2_bootstrappers, is_bootstrap_peer, encrypted_ocr_key_bundle_id, transmitter_address, observation_timeout, blockchain_timeout, contract_config_tracker_subscribe_interval, contract_config_tracker_poll_interval, contract_config_confirmations, database_timeout, observation_grace_period, contract_transmitter_transmit_timeout, evm_chain_id) VALUES ( +NOW(),NOW(),$1,'{}',false,$2,$3,0,0,0,0,0,0,0,0,0 +) RETURNING *`, NewEIP55Address(), &ocrKeyID, &transmitterAddress)) + return spec +} + +func MakeDirectRequestJobSpec(t *testing.T) *job.Job { + t.Helper() + drs := &job.DirectRequestSpec{EVMChainID: (*ubig.Big)(testutils.FixtureChainID)} + spec := &job.Job{ + Type: job.DirectRequest, + SchemaVersion: 1, + ExternalJobID: uuid.New(), + DirectRequestSpec: drs, + Pipeline: pipeline.Pipeline{}, + PipelineSpec: &pipeline.Spec{}, + } + return spec +} + +func MustInsertKeeperJob(t *testing.T, db *sqlx.DB, korm keeper.ORM, from ethkey.EIP55Address, contract ethkey.EIP55Address) job.Job { + t.Helper() + + var keeperSpec job.KeeperSpec + err := korm.Q().Get(&keeperSpec, `INSERT INTO keeper_specs (contract_address, from_address, created_at, updated_at,evm_chain_id) VALUES ($1, $2, NOW(), NOW(), $3) RETURNING *`, contract, from, testutils.SimulatedChainID.Int64()) + require.NoError(t, err) + + var pipelineSpec pipeline.Spec + err = korm.Q().Get(&pipelineSpec, `INSERT INTO pipeline_specs (dot_dag_source,created_at) VALUES ('',NOW()) RETURNING *`) + require.NoError(t, err) + + jb := job.Job{ + KeeperSpec: &keeperSpec, + KeeperSpecID: &keeperSpec.ID, + ExternalJobID: uuid.New(), + Type: job.Keeper, + SchemaVersion: 1, + PipelineSpec: &pipelineSpec, + PipelineSpecID: pipelineSpec.ID, + } + + cfg := configtest.NewTestGeneralConfig(t) + tlg := logger.TestLogger(t) + prm := pipeline.NewORM(db, tlg, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, tlg, cfg.Database()) + jrm := job.NewORM(db, prm, btORM, nil, tlg, cfg.Database()) + err = jrm.InsertJob(&jb) + require.NoError(t, err) + return jb +} + +func MustInsertKeeperRegistry(t *testing.T, db *sqlx.DB, korm keeper.ORM, ethKeyStore keystore.Eth, keeperIndex, numKeepers, blockCountPerTurn int32) (keeper.Registry, job.Job) { + key, _ := MustInsertRandomKey(t, ethKeyStore, *ubig.New(testutils.SimulatedChainID)) + from := key.EIP55Address + t.Helper() + contractAddress := NewEIP55Address() + job := MustInsertKeeperJob(t, db, korm, from, contractAddress) + registry := keeper.Registry{ + ContractAddress: contractAddress, + BlockCountPerTurn: blockCountPerTurn, + CheckGas: 150_000, + FromAddress: from, + JobID: job.ID, + KeeperIndex: keeperIndex, + NumKeepers: numKeepers, + KeeperIndexMap: map[ethkey.EIP55Address]int32{ + from: keeperIndex, + }, + } + err := korm.UpsertRegistry(®istry) + require.NoError(t, err) + return registry, job +} + +func MustInsertUpkeepForRegistry(t *testing.T, db *sqlx.DB, cfg pg.QConfig, registry keeper.Registry) keeper.UpkeepRegistration { + korm := keeper.NewORM(db, logger.TestLogger(t), cfg) + upkeepID := ubig.NewI(int64(mathrand.Uint32())) + upkeep := keeper.UpkeepRegistration{ + UpkeepID: upkeepID, + ExecuteGas: uint32(150_000), + Registry: registry, + RegistryID: registry.ID, + CheckData: common.Hex2Bytes("ABC123"), + } + positioningConstant, err := keeper.CalcPositioningConstant(upkeepID, registry.ContractAddress) + require.NoError(t, err) + upkeep.PositioningConstant = positioningConstant + err = korm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + return upkeep +} + +func MustInsertPipelineRun(t *testing.T, db *sqlx.DB) (run pipeline.Run) { + require.NoError(t, db.Get(&run, `INSERT INTO pipeline_runs (state,pipeline_spec_id,created_at) VALUES ($1, 0, NOW()) RETURNING *`, pipeline.RunStatusRunning)) + return run +} + +func MustInsertPipelineRunWithStatus(t *testing.T, db *sqlx.DB, pipelineSpecID int32, status pipeline.RunStatus) (run pipeline.Run) { + var finishedAt *time.Time + var outputs pipeline.JSONSerializable + var allErrors pipeline.RunErrors + var fatalErrors pipeline.RunErrors + now := time.Now() + switch status { + case pipeline.RunStatusCompleted: + finishedAt = &now + outputs = pipeline.JSONSerializable{ + Val: "foo", + Valid: true, + } + case pipeline.RunStatusErrored: + finishedAt = &now + allErrors = []null.String{null.StringFrom("oh no!")} + fatalErrors = []null.String{null.StringFrom("oh no!")} + case pipeline.RunStatusRunning, pipeline.RunStatusSuspended: + // leave empty + default: + t.Fatalf("unknown status: %s", status) + } + require.NoError(t, db.Get(&run, `INSERT INTO pipeline_runs (state,pipeline_spec_id,finished_at,outputs,all_errors,fatal_errors,created_at) VALUES ($1, $2, $3, $4, $5, $6, NOW()) RETURNING *`, status, pipelineSpecID, finishedAt, outputs, allErrors, fatalErrors)) + return run +} + +func MustInsertPipelineSpec(t *testing.T, db *sqlx.DB) (spec pipeline.Spec) { + err := db.Get(&spec, `INSERT INTO pipeline_specs (dot_dag_source,created_at) VALUES ('',NOW()) RETURNING *`) + require.NoError(t, err) + return +} + +func MustInsertUnfinishedPipelineTaskRun(t *testing.T, db *sqlx.DB, pipelineRunID int64) (tr pipeline.TaskRun) { + /* #nosec G404 */ + require.NoError(t, db.Get(&tr, `INSERT INTO pipeline_task_runs (dot_id, pipeline_run_id, id, type, created_at) VALUES ($1,$2,$3, '', NOW()) RETURNING *`, strconv.Itoa(mathrand.Int()), pipelineRunID, uuid.New())) + return tr +} + +func RandomLog(t *testing.T) types.Log { + t.Helper() + + topics := make([]common.Hash, 4) + for i := range topics { + topics[i] = evmutils.NewHash() + } + + return types.Log{ + Address: testutils.NewAddress(), + BlockHash: evmutils.NewHash(), + BlockNumber: uint64(mathrand.Intn(9999999)), + Index: uint(mathrand.Intn(9999999)), + Data: MustRandomBytes(t, 512), + Topics: []common.Hash{evmutils.NewHash(), evmutils.NewHash(), evmutils.NewHash(), evmutils.NewHash()}, + } +} + +func RawNewRoundLog(t *testing.T, contractAddr common.Address, blockHash common.Hash, blockNumber uint64, logIndex uint, removed bool) types.Log { + t.Helper() + topic := (flux_aggregator_wrapper.FluxAggregatorNewRound{}).Topic() + topics := []common.Hash{topic, evmutils.NewHash(), evmutils.NewHash()} + return RawNewRoundLogWithTopics(t, contractAddr, blockHash, blockNumber, logIndex, removed, topics) +} + +func RawNewRoundLogWithTopics(t *testing.T, contractAddr common.Address, blockHash common.Hash, blockNumber uint64, logIndex uint, removed bool, topics []common.Hash) types.Log { + t.Helper() + return types.Log{ + Address: contractAddr, + BlockHash: blockHash, + BlockNumber: blockNumber, + Index: logIndex, + Topics: topics, + Data: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Removed: removed, + } +} + +func MustInsertExternalInitiator(t *testing.T, orm bridges.ORM) (ei bridges.ExternalInitiator) { + return MustInsertExternalInitiatorWithOpts(t, orm, ExternalInitiatorOpts{}) +} + +type ExternalInitiatorOpts struct { + NamePrefix string + URL *models.WebURL + OutgoingSecret string + OutgoingToken string +} + +func MustInsertExternalInitiatorWithOpts(t *testing.T, orm bridges.ORM, opts ExternalInitiatorOpts) (ei bridges.ExternalInitiator) { + var prefix string + if opts.NamePrefix != "" { + prefix = opts.NamePrefix + } else { + prefix = "ei" + } + ei.Name = fmt.Sprintf("%s-%s", prefix, uuid.New()) + ei.URL = opts.URL + ei.OutgoingSecret = opts.OutgoingSecret + ei.OutgoingToken = opts.OutgoingToken + token := auth.NewToken() + ei.AccessKey = token.AccessKey + ei.Salt = utils.NewSecret(utils.DefaultSecretSize) + hashedSecret, err := auth.HashedSecret(token, ei.Salt) + require.NoError(t, err) + ei.HashedSecret = hashedSecret + err = orm.CreateExternalInitiator(&ei) + require.NoError(t, err) + return ei +} diff --git a/core/internal/cltest/fixtures.go b/core/internal/cltest/fixtures.go new file mode 100644 index 00000000..86a91a54 --- /dev/null +++ b/core/internal/cltest/fixtures.go @@ -0,0 +1,37 @@ +package cltest + +import ( + "encoding/json" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +// JSONFromFixture create models.JSON from file path +func JSONFromFixture(t *testing.T, path string) models.JSON { + return JSONFromBytes(t, MustReadFile(t, path)) +} + +// LogFromFixture create ethtypes.log from file path +func LogFromFixture(t *testing.T, path string) types.Log { + value := gjson.Get(string(MustReadFile(t, path)), "params.result") + var el types.Log + require.NoError(t, json.Unmarshal([]byte(value.String()), &el)) + + return el +} + +// TxReceiptFromFixture create ethtypes.log from file path +func TxReceiptFromFixture(t *testing.T, path string) *types.Receipt { + jsonStr := JSONFromFixture(t, path).Get("result").String() + + var receipt types.Receipt + err := json.Unmarshal([]byte(jsonStr), &receipt) + require.NoError(t, err) + + return &receipt +} diff --git a/core/internal/cltest/heavyweight/orm.go b/core/internal/cltest/heavyweight/orm.go new file mode 100644 index 00000000..dcefcf8c --- /dev/null +++ b/core/internal/cltest/heavyweight/orm.go @@ -0,0 +1,96 @@ +// Package heavyweight contains test helpers that are costly and you should +// think **real carefully** before using in your tests. +package heavyweight + +import ( + "os" + "path" + "runtime" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/internal/testdb" +) + +// FullTestDBV2 creates a pristine DB which runs in a separate database than the normal +// unit tests, so you can do things like use other Postgres connection types with it. +func FullTestDBV2(t testing.TB, overrideFn func(c *plugin.Config, s *plugin.Secrets)) (plugin.GeneralConfig, *sqlx.DB) { + return KindFixtures.PrepareDB(t, overrideFn) +} + +// FullTestDBNoFixturesV2 is the same as FullTestDB, but it does not load fixtures. +func FullTestDBNoFixturesV2(t testing.TB, overrideFn func(c *plugin.Config, s *plugin.Secrets)) (plugin.GeneralConfig, *sqlx.DB) { + return KindTemplate.PrepareDB(t, overrideFn) +} + +// FullTestDBEmptyV2 creates an empty DB (without migrations). +func FullTestDBEmptyV2(t testing.TB, overrideFn func(c *plugin.Config, s *plugin.Secrets)) (plugin.GeneralConfig, *sqlx.DB) { + return KindEmpty.PrepareDB(t, overrideFn) +} + +func generateName() string { + return strings.ReplaceAll(uuid.New().String(), "-", "") +} + +type Kind int + +const ( + KindEmpty Kind = iota + KindTemplate + KindFixtures +) + +func (c Kind) PrepareDB(t testing.TB, overrideFn func(c *plugin.Config, s *plugin.Secrets)) (plugin.GeneralConfig, *sqlx.DB) { + testutils.SkipShort(t, "FullTestDB") + + gcfg := configtest.NewGeneralConfigSimulated(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Dialect = dialects.Postgres + if overrideFn != nil { + overrideFn(c, s) + } + }) + + require.NoError(t, os.MkdirAll(gcfg.RootDir(), 0700)) + migrationTestDBURL, err := testdb.CreateOrReplace(gcfg.Database().URL(), generateName(), c != KindEmpty) + require.NoError(t, err) + db, err := pg.NewConnection(migrationTestDBURL, dialects.Postgres, gcfg.Database()) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, db.Close()) + os.RemoveAll(gcfg.RootDir()) + }) + + gcfg = configtest.NewGeneralConfigSimulated(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Dialect = dialects.Postgres + s.Database.URL = models.MustSecretURL(migrationTestDBURL) + if overrideFn != nil { + overrideFn(c, s) + } + }) + + if c == KindFixtures { + _, filename, _, ok := runtime.Caller(1) + if !ok { + t.Fatal("could not get runtime.Caller(1)") + } + filepath := path.Join(path.Dir(filename), "../../../store/fixtures/fixtures.sql") + fixturesSQL, err := os.ReadFile(filepath) + require.NoError(t, err) + _, err = db.Exec(string(fixturesSQL)) + require.NoError(t, err) + } + + return gcfg, db +} diff --git a/core/internal/cltest/job_factories.go b/core/internal/cltest/job_factories.go new file mode 100644 index 00000000..c4f336d7 --- /dev/null +++ b/core/internal/cltest/job_factories.go @@ -0,0 +1,70 @@ +package cltest + +import ( + "fmt" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +const ( + minimalOCRNonBootstrapTemplate = ` + type = "offchainreporting" + schemaVersion = 1 + contractAddress = "%s" + evmChainID = "0" + p2pPeerID = "%s" + p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] + isBootstrapPeer = false + transmitterAddress = "%s" + keyBundleID = "%s" + observationTimeout = "10s" + observationSource = """ + ds1 [type=http method=GET url="http://data.com"]; + ds1_parse [type=jsonparse path="USD" lax=true]; + ds1 -> ds1_parse; + """ + ` +) + +func MinimalOCRNonBootstrapSpec(contractAddress, transmitterAddress ethkey.EIP55Address, peerID p2pkey.PeerID, keyBundleID string) string { + return fmt.Sprintf(minimalOCRNonBootstrapTemplate, contractAddress, peerID, transmitterAddress.Hex(), keyBundleID) +} + +func MustInsertWebhookSpec(t *testing.T, db *sqlx.DB) (job.Job, job.WebhookSpec) { + jobORM, pipelineORM := getORMs(t, db) + webhookSpec := job.WebhookSpec{} + require.NoError(t, jobORM.InsertWebhookSpec(&webhookSpec)) + + pSpec := pipeline.Pipeline{} + pipelineSpecID, err := pipelineORM.CreateSpec(pSpec, 0) + require.NoError(t, err) + + createdJob := job.Job{WebhookSpecID: &webhookSpec.ID, WebhookSpec: &webhookSpec, SchemaVersion: 1, Type: "webhook", + ExternalJobID: uuid.New(), PipelineSpecID: pipelineSpecID} + require.NoError(t, jobORM.InsertJob(&createdJob)) + + return createdJob, webhookSpec +} + +func getORMs(t *testing.T, db *sqlx.DB) (jobORM job.ORM, pipelineORM pipeline.ORM) { + config := configtest.NewTestGeneralConfig(t) + keyStore := NewKeyStore(t, db, config.Database()) + lggr := logger.TestLogger(t) + pipelineORM = pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgeORM := bridges.NewORM(db, lggr, config.Database()) + jobORM = job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, config.Database()) + t.Cleanup(func() { jobORM.Close() }) + return +} diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go new file mode 100644 index 00000000..c3650a39 --- /dev/null +++ b/core/internal/cltest/mocks.go @@ -0,0 +1,429 @@ +package cltest + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + + "github.com/jmoiron/sqlx" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/robfig/cron/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// MockSubscription a mock subscription +type MockSubscription struct { + t testing.TB + mut sync.Mutex + channel interface{} + unsubscribed bool + Errors chan error +} + +// EmptyMockSubscription return empty MockSubscription +func EmptyMockSubscription(t testing.TB) *MockSubscription { + return &MockSubscription{t: t, Errors: make(chan error, 1), channel: make(chan struct{})} +} + +// Err returns error channel from mes +func (mes *MockSubscription) Err() <-chan error { return mes.Errors } + +// Unsubscribe closes the subscription +func (mes *MockSubscription) Unsubscribe() { + mes.mut.Lock() + defer mes.mut.Unlock() + + if mes.unsubscribed { + return + } + mes.unsubscribed = true + switch mes.channel.(type) { + case chan struct{}: + close(mes.channel.(chan struct{})) + case chan gethTypes.Log: + close(mes.channel.(chan gethTypes.Log)) + case chan *evmtypes.Head: + close(mes.channel.(chan *evmtypes.Head)) + default: + logger.TestLogger(mes.t).Fatalf("Unable to close MockSubscription channel of type %T", mes.channel) + } + close(mes.Errors) +} + +// RendererMock a mock renderer +type RendererMock struct { + Renders []interface{} +} + +// Render appends values to renderer mock +func (rm *RendererMock) Render(v interface{}, headers ...string) error { + rm.Renders = append(rm.Renders, v) + return nil +} + +// InstanceAppFactory is an InstanceAppFactory +type InstanceAppFactory struct { + App plugin.Application +} + +// NewApplication creates a new application with specified config +func (f InstanceAppFactory) NewApplication(context.Context, plugin.GeneralConfig, logger.Logger, *sqlx.DB) (plugin.Application, error) { + return f.App, nil +} + +type seededAppFactory struct { + Application plugin.Application +} + +func (s seededAppFactory) NewApplication(context.Context, plugin.GeneralConfig, logger.Logger, *sqlx.DB) (plugin.Application, error) { + return noopStopApplication{s.Application}, nil +} + +type noopStopApplication struct { + plugin.Application +} + +// FIXME: Why bother with this wrapper? +func (a noopStopApplication) Stop() error { + return nil +} + +// BlockedRunner is a Runner that blocks until its channel is posted to +type BlockedRunner struct { + Done chan struct{} +} + +// Run runs the blocked runner, doesn't return until the channel is signalled +func (r BlockedRunner) Run(context.Context, plugin.Application) error { + <-r.Done + return nil +} + +// EmptyRunner is an EmptyRunner +type EmptyRunner struct{} + +// Run runs the empty runner +func (r EmptyRunner) Run(context.Context, plugin.Application) error { + return nil +} + +// MockCountingPrompter is a mock counting prompt +type MockCountingPrompter struct { + T *testing.T + EnteredStrings []string + Count int + NotTerminal bool +} + +// Prompt returns an entered string +func (p *MockCountingPrompter) Prompt(string) string { return p.prompt() } + +func (p *MockCountingPrompter) prompt() string { + i := p.Count + p.Count++ + if len(p.EnteredStrings)-1 < i { + p.T.Errorf("Not enough passwords supplied to MockCountingPrompter, wanted %d", i) + p.T.FailNow() + } + return p.EnteredStrings[i] +} + +// PasswordPrompt returns an entered string +func (p *MockCountingPrompter) PasswordPrompt(string) string { return p.prompt() } + +// IsTerminal always returns true in tests +func (p *MockCountingPrompter) IsTerminal() bool { + return !p.NotTerminal +} + +// NewHTTPMockServer create http test server with passed in parameters +func NewHTTPMockServer( + t *testing.T, + status int, + wantMethod string, + response string, + callback ...func(http.Header, string), +) *httptest.Server { + called := false + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, err := io.ReadAll(r.Body) + assert.NoError(t, err) + assert.Equal(t, wantMethod, r.Method) + if len(callback) > 0 { + callback[0](r.Header, string(b)) + } + called = true + + w.WriteHeader(status) + _, _ = io.WriteString(w, response) // Assignment for errcheck. Only used in tests so we can ignore. + }) + + server := httptest.NewServer(handler) + t.Cleanup(func() { + server.Close() + assert.True(t, called, "expected call Mock HTTP endpoint '%s'", server.URL) + }) + return server +} + +// NewHTTPMockServerWithRequest creates http test server that makes the request +// available in the callback +func NewHTTPMockServerWithRequest( + t *testing.T, + status int, + response string, + callback func(r *http.Request), +) *httptest.Server { + called := false + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callback(r) + called = true + + w.WriteHeader(status) + _, _ = io.WriteString(w, response) // Assignment for errcheck. Only used in tests so we can ignore. + }) + + server := httptest.NewServer(handler) + t.Cleanup(func() { + server.Close() + assert.True(t, called, "expected call Mock HTTP endpoint '%s'", server.URL) + }) + return server +} + +func NewHTTPMockServerWithAlterableResponse( + t *testing.T, response func() string) (server *httptest.Server) { + server = httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = io.WriteString(w, response()) + })) + return server +} + +func NewHTTPMockServerWithAlterableResponseAndRequest(t *testing.T, response func() string, callback func(r *http.Request)) (server *httptest.Server) { + server = httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callback(r) + w.WriteHeader(http.StatusOK) + _, _ = io.WriteString(w, response()) + })) + return server +} + +// MockCron represents a mock cron +type MockCron struct { + Entries []MockCronEntry + nextID cron.EntryID +} + +// Start starts the mockcron +func (*MockCron) Start() {} + +// Stop stops the mockcron +func (*MockCron) Stop() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx +} + +// AddFunc appends a schedule to mockcron entries +func (mc *MockCron) AddFunc(schd string, fn func()) (cron.EntryID, error) { + mc.Entries = append(mc.Entries, MockCronEntry{ + Schedule: schd, + Function: fn, + }) + mc.nextID++ + return mc.nextID, nil +} + +// RunEntries run every function for each mockcron entry +func (mc *MockCron) RunEntries() { + for _, entry := range mc.Entries { + entry.Function() + } +} + +// MockCronEntry a cron schedule and function +type MockCronEntry struct { + Schedule string + Function func() +} + +// MockHeadTrackable allows you to mock HeadTrackable +type MockHeadTrackable struct { + onNewHeadCount atomic.Int32 +} + +// OnNewLongestChain increases the OnNewLongestChainCount count by one +func (m *MockHeadTrackable) OnNewLongestChain(context.Context, *evmtypes.Head) { + m.onNewHeadCount.Add(1) +} + +// OnNewLongestChainCount returns the count of new heads, safely. +func (m *MockHeadTrackable) OnNewLongestChainCount() int32 { + return m.onNewHeadCount.Load() +} + +// NeverSleeper is a struct that never sleeps +type NeverSleeper struct{} + +// Reset resets the never sleeper +func (ns NeverSleeper) Reset() {} + +// Sleep puts the never sleeper to sleep +func (ns NeverSleeper) Sleep() {} + +// After returns a duration +func (ns NeverSleeper) After() time.Duration { return 0 * time.Microsecond } + +// Duration returns a duration +func (ns NeverSleeper) Duration() time.Duration { return 0 * time.Microsecond } + +// MustRandomUser inserts a new admin user with a random email into the test DB +func MustRandomUser(t testing.TB) sessions.User { + email := fmt.Sprintf("user-%v@plugin.test", NewRandomPositiveInt64()) + r, err := sessions.NewUser(email, Password, sessions.UserRoleAdmin) + if err != nil { + logger.TestLogger(t).Panic(err) + } + return r +} + +func NewUserWithSession(t testing.TB, orm sessions.AuthenticationProvider) sessions.User { + u := MustRandomUser(t) + require.NoError(t, orm.CreateUser(&u)) + + _, err := orm.CreateSession(sessions.SessionRequest{ + Email: u.Email, + Password: Password, + }) + require.NoError(t, err) + return u +} + +type MockAPIInitializer struct { + t testing.TB + Count int +} + +func NewMockAPIInitializer(t testing.TB) *MockAPIInitializer { + return &MockAPIInitializer{t: t} +} + +func (m *MockAPIInitializer) Initialize(orm sessions.BasicAdminUsersORM, lggr logger.Logger) (sessions.User, error) { + if user, err := orm.FindUser(APIEmailAdmin); err == nil { + return user, err + } + m.Count++ + user := MustRandomUser(m.t) + return user, orm.CreateUser(&user) +} + +func NewMockAuthenticatedHTTPClient(lggr logger.Logger, cfg cmd.ClientOpts, sessionID string) cmd.HTTPClient { + return cmd.NewAuthenticatedHTTPClient(lggr, cfg, MockCookieAuthenticator{SessionID: sessionID}, sessions.SessionRequest{}) +} + +type MockCookieAuthenticator struct { + t testing.TB + SessionID string + Error error +} + +func (m MockCookieAuthenticator) Cookie() (*http.Cookie, error) { + return MustGenerateSessionCookie(m.t, m.SessionID), m.Error +} + +func (m MockCookieAuthenticator) Authenticate(context.Context, sessions.SessionRequest) (*http.Cookie, error) { + return MustGenerateSessionCookie(m.t, m.SessionID), m.Error +} + +func (m MockCookieAuthenticator) Logout() error { + return nil +} + +type MockSessionRequestBuilder struct { + Count int + Error error +} + +func (m *MockSessionRequestBuilder) Build(string) (sessions.SessionRequest, error) { + m.Count++ + if m.Error != nil { + return sessions.SessionRequest{}, m.Error + } + return sessions.SessionRequest{Email: APIEmailAdmin, Password: Password}, nil +} + +type MockSecretGenerator struct{} + +func (m MockSecretGenerator) Generate(string) ([]byte, error) { + return []byte(SessionSecret), nil +} + +type MockChangePasswordPrompter struct { + web.UpdatePasswordRequest + err error +} + +func (m MockChangePasswordPrompter) Prompt() (web.UpdatePasswordRequest, error) { + return m.UpdatePasswordRequest, m.err +} + +type MockPasswordPrompter struct { + Password string +} + +func (m MockPasswordPrompter) Prompt() string { + return m.Password +} + +func NewLegacyChainsWithMockChain(t testing.TB, ethClient evmclient.Client, cfg legacyevm.AppConfig) legacyevm.LegacyChainContainer { + ch := new(evmmocks.Chain) + ch.On("Client").Return(ethClient) + ch.On("Logger").Return(logger.TestLogger(t)) + scopedCfg := evmtest.NewChainScopedConfig(t, cfg) + ch.On("ID").Return(scopedCfg.EVM().ChainID()) + ch.On("Config").Return(scopedCfg) + + return NewLegacyChainsWithChain(ch, cfg) + +} + +func NewLegacyChainsWithMockChainAndTxManager(t testing.TB, ethClient evmclient.Client, cfg legacyevm.AppConfig, txm txmgr.TxManager) legacyevm.LegacyChainContainer { + ch := new(evmmocks.Chain) + ch.On("Client").Return(ethClient) + ch.On("Logger").Return(logger.TestLogger(t)) + scopedCfg := evmtest.NewChainScopedConfig(t, cfg) + ch.On("ID").Return(scopedCfg.EVM().ChainID()) + ch.On("Config").Return(scopedCfg) + ch.On("TxManager").Return(txm) + + return NewLegacyChainsWithChain(ch, cfg) +} + +func NewLegacyChainsWithChain(ch legacyevm.Chain, cfg legacyevm.AppConfig) legacyevm.LegacyChainContainer { + m := map[string]legacyevm.Chain{ch.ID().String(): ch} + return legacyevm.NewLegacyChains(m, cfg.EVMConfigs()) +} diff --git a/core/internal/cltest/simulated_backend.go b/core/internal/cltest/simulated_backend.go new file mode 100644 index 00000000..32393b35 --- /dev/null +++ b/core/internal/cltest/simulated_backend.go @@ -0,0 +1,91 @@ +package cltest + +import ( + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func NewSimulatedBackend(t *testing.T, alloc core.GenesisAlloc, gasLimit uint32) *backends.SimulatedBackend { + backend := backends.NewSimulatedBackend(alloc, uint64(gasLimit)) + // NOTE: Make sure to finish closing any application/client before + // backend.Close or they can hang + t.Cleanup(func() { + logger.TestLogger(t).ErrorIfFn(backend.Close, "Error closing simulated backend") + }) + return backend +} +func NewApplicationWithConfigV2OnSimulatedBlockchain( + t testing.TB, + cfg plugin.GeneralConfig, + backend *backends.SimulatedBackend, + flagsAndDeps ...interface{}, +) *TestApplication { + if bid := backend.Blockchain().Config().ChainID; bid.Cmp(testutils.SimulatedChainID) != 0 { + t.Fatalf("expected backend chain ID to be %s but it was %s", testutils.SimulatedChainID.String(), bid.String()) + } + + require.Zero(t, evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()).Cmp(testutils.SimulatedChainID)) + chainID := big.New(testutils.SimulatedChainID) + client := client.NewSimulatedBackendClient(t, backend, testutils.SimulatedChainID) + + flagsAndDeps = append(flagsAndDeps, client, chainID) + + // app.Stop() will call client.Close on the simulated backend + app := NewApplicationWithConfig(t, cfg, flagsAndDeps...) + + return app +} + +// NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain is like NewApplicationWithConfigAndKeyOnSimulatedBlockchain +// but cfg should be v2, and configtest.NewGeneralConfigSimulated used to include the simulated chain (testutils.SimulatedChainID). +func NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain( + t testing.TB, + cfg plugin.GeneralConfig, + backend *backends.SimulatedBackend, + flagsAndDeps ...interface{}, +) *TestApplication { + if bid := backend.Blockchain().Config().ChainID; bid.Cmp(testutils.SimulatedChainID) != 0 { + t.Fatalf("expected backend chain ID to be %s but it was %s", testutils.SimulatedChainID.String(), bid.String()) + } + + require.Zero(t, evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()).Cmp(testutils.SimulatedChainID)) + chainID := big.New(testutils.SimulatedChainID) + client := client.NewSimulatedBackendClient(t, backend, testutils.SimulatedChainID) + + flagsAndDeps = append(flagsAndDeps, client, chainID) + + // app.Stop() will call client.Close on the simulated backend + return NewApplicationWithConfigAndKey(t, cfg, flagsAndDeps...) +} + +// Mine forces the simulated backend to produce a new block every X seconds +func Mine(backend *backends.SimulatedBackend, blockTime time.Duration) (stopMining func()) { + timer := time.NewTicker(blockTime) + chStop := make(chan struct{}) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + for { + select { + case <-timer.C: + backend.Commit() + case <-chStop: + wg.Done() + return + } + } + }() + return func() { close(chStop); timer.Stop(); wg.Wait() } +} diff --git a/core/internal/features/features_test.go b/core/internal/features/features_test.go new file mode 100644 index 00000000..65e58935 --- /dev/null +++ b/core/internal/features/features_test.go @@ -0,0 +1,1397 @@ +package features_test + +import ( + "bytes" + "context" + _ "embed" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/rpc" + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + "gopkg.in/guregu/null.v4" + + ocrcommontypes "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/gethwrappers/testoffchainaggregator" + "github.com/goplugin/libocr/offchainreporting/confighelper" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/multiwordconsumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +var oneETH = assets.Eth(*big.NewInt(1000000000000000000)) + +func TestIntegration_ExternalInitiatorV2(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(10 * time.Millisecond) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager) + require.NoError(t, app.Start(testutils.Context(t))) + + var ( + eiName = "substrate-ei" + eiSpec = map[string]interface{}{"foo": "bar"} + eiRequest = map[string]interface{}{"result": 42} + + jobUUID = uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46") + + expectedCreateJobRequest = map[string]interface{}{ + "jobId": jobUUID.String(), + "type": eiName, + "params": eiSpec, + } + ) + + // Setup EI + var eiURL string + var eiNotifiedOfCreate bool + var eiNotifiedOfDelete bool + { + mockEI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !eiNotifiedOfCreate { + require.Equal(t, http.MethodPost, r.Method) + + eiNotifiedOfCreate = true + defer r.Body.Close() + + var gotCreateJobRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&gotCreateJobRequest) + require.NoError(t, err) + + require.Equal(t, expectedCreateJobRequest, gotCreateJobRequest) + w.WriteHeader(http.StatusOK) + } else { + require.Equal(t, http.MethodDelete, r.Method) + + eiNotifiedOfDelete = true + defer r.Body.Close() + + require.Equal(t, fmt.Sprintf("/%v", jobUUID.String()), r.URL.Path) + } + })) + defer mockEI.Close() + eiURL = mockEI.URL + } + + // Create the EI record on the Core node + var eia *auth.Token + { + eiCreate := map[string]string{ + "name": eiName, + "url": eiURL, + } + eiCreateJSON, err := json.Marshal(eiCreate) + require.NoError(t, err) + eip := cltest.CreateExternalInitiatorViaWeb(t, app, string(eiCreateJSON)) + eia = &auth.Token{ + AccessKey: eip.AccessKey, + Secret: eip.Secret, + } + } + + // Create the bridge on the Core node + var bridgeCalled bool + { + bridgeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + bridgeCalled = true + defer r.Body.Close() + + var gotBridgeRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&gotBridgeRequest) + require.NoError(t, err) + + expectedBridgeRequest := map[string]interface{}{ + "value": float64(42), + } + require.Equal(t, expectedBridgeRequest, gotBridgeRequest) + + w.WriteHeader(http.StatusOK) + require.NoError(t, err) + _, err = io.WriteString(w, `{}`) + require.NoError(t, err) + })) + u, _ := url.Parse(bridgeServer.URL) + err := app.BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName("substrate-adapter1"), + URL: models.WebURL(*u), + }) + require.NoError(t, err) + defer bridgeServer.Close() + } + + // Create the job spec on the Core node + var jobID int32 + { + tomlSpec := fmt.Sprintf(` +type = "webhook" +schemaVersion = 1 +externalJobID = "%v" +externalInitiators = [ + { + name = "%s", + spec = """ + %s +""" + } +] +observationSource = """ + parse [type=jsonparse path="result" data="$(jobRun.requestBody)"] + submit [type=bridge name="substrate-adapter1" requestData=<{ "value": $(parse) }>] + parse -> submit +""" + `, jobUUID, eiName, cltest.MustJSONMarshal(t, eiSpec)) + + _, err := webhook.ValidatedWebhookSpec(tomlSpec, app.GetExternalInitiatorManager()) + require.NoError(t, err) + job := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + jobID = job.ID + t.Log("JOB created", job.WebhookSpecID) + + require.Eventually(t, func() bool { return eiNotifiedOfCreate }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of new job") + } + + t.Run("calling webhook_spec with non-matching external_initiator_id returns unauthorized", func(t *testing.T) { + eiaWrong := auth.NewToken() + body := cltest.MustJSONMarshal(t, eiRequest) + headers := make(map[string]string) + headers[static.ExternalInitiatorAccessKeyHeader] = eiaWrong.AccessKey + headers[static.ExternalInitiatorSecretHeader] = eiaWrong.Secret + + url := app.Server.URL + "/v2/jobs/" + jobUUID.String() + "/runs" + bodyBuf := bytes.NewBufferString(body) + resp, cleanup := cltest.UnauthenticatedPost(t, url, bodyBuf, headers) + defer cleanup() + cltest.AssertServerResponse(t, resp, 401) + + cltest.AssertCountStays(t, app.GetSqlxDB(), "pipeline_runs", 0) + }) + + t.Run("calling webhook_spec with matching external_initiator_id works", func(t *testing.T) { + // Simulate request from EI -> Core node + cltest.AwaitJobActive(t, app.JobSpawner(), jobID, 3*time.Second) + + _ = cltest.CreateJobRunViaExternalInitiatorV2(t, app, jobUUID, *eia, cltest.MustJSONMarshal(t, eiRequest)) + + pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgeORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database()) + jobORM := job.NewORM(app.GetSqlxDB(), pipelineORM, bridgeORM, app.KeyStore, logger.TestLogger(t), cfg.Database()) + + runs := cltest.WaitForPipelineComplete(t, 0, jobID, 1, 2, jobORM, 5*time.Second, 300*time.Millisecond) + require.Len(t, runs, 1) + run := runs[0] + require.Len(t, run.PipelineTaskRuns, 2) + require.Empty(t, run.PipelineTaskRuns[0].Error) + require.Empty(t, run.PipelineTaskRuns[1].Error) + + assert.True(t, bridgeCalled, "expected bridge server to be called") + }) + + // Delete the job + { + cltest.DeleteJobViaWeb(t, app, jobID) + require.Eventually(t, func() bool { return eiNotifiedOfDelete }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of deleted job") + } +} + +func TestIntegration_AuthToken(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + // set up user + mockUser := cltest.MustRandomUser(t) + key, secret := uuid.New().String(), uuid.New().String() + apiToken := auth.Token{AccessKey: key, Secret: secret} + orm := app.AuthenticationProvider() + require.NoError(t, orm.CreateUser(&mockUser)) + require.NoError(t, orm.SetAuthToken(&mockUser, &apiToken)) + + url := app.Server.URL + "/users" + headers := make(map[string]string) + headers[webauth.APIKey] = key + headers[webauth.APISecret] = secret + + resp, cleanup := cltest.UnauthenticatedGet(t, url, headers) + defer cleanup() + cltest.AssertServerResponse(t, resp, http.StatusOK) +} + +type OperatorContracts struct { + user *bind.TransactOpts + multiWordConsumerAddress common.Address + singleWordConsumerAddress common.Address + operatorAddress common.Address + linkTokenAddress common.Address + linkToken *link_token_interface.LinkToken + multiWord *multiwordconsumer_wrapper.MultiWordConsumer + singleWord *consumer_wrapper.Consumer + operator *operator_wrapper.Operator + sim *backends.SimulatedBackend +} + +func setupOperatorContracts(t *testing.T) OperatorContracts { + user := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{ + user.From: {Balance: assets.Ether(1000).ToInt()}, + } + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + b := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + linkTokenAddress, _, linkContract, err := link_token_interface.DeployLinkToken(user, b) + require.NoError(t, err) + b.Commit() + + operatorAddress, _, operatorContract, err := operator_wrapper.DeployOperator(user, b, linkTokenAddress, user.From) + require.NoError(t, err) + b.Commit() + + var empty [32]byte + multiWordConsumerAddress, _, multiWordConsumerContract, err := multiwordconsumer_wrapper.DeployMultiWordConsumer(user, b, linkTokenAddress, operatorAddress, empty) + require.NoError(t, err) + b.Commit() + + singleConsumerAddress, _, singleConsumerContract, err := consumer_wrapper.DeployConsumer(user, b, linkTokenAddress, operatorAddress, empty) + require.NoError(t, err) + b.Commit() + + // The consumer contract needs to have link in it to be able to pay + // for the data request. + _, err = linkContract.Transfer(user, multiWordConsumerAddress, big.NewInt(1000)) + require.NoError(t, err) + _, err = linkContract.Transfer(user, singleConsumerAddress, big.NewInt(1000)) + require.NoError(t, err) + + return OperatorContracts{ + user: user, + multiWordConsumerAddress: multiWordConsumerAddress, + singleWordConsumerAddress: singleConsumerAddress, + linkToken: linkContract, + linkTokenAddress: linkTokenAddress, + multiWord: multiWordConsumerContract, + singleWord: singleConsumerContract, + operator: operatorContract, + operatorAddress: operatorAddress, + sim: b, + } +} + +//go:embed singleword-spec-template.yml +var singleWordSpecTemplate string + +//go:embed multiword-spec-template.yml +var multiWordSpecTemplate string + +// Tests both single and multiple word responses - +// i.e. both fulfillOracleRequest2 and fulfillOracleRequest. +func TestIntegration_DirectRequest(t *testing.T) { + t.Parallel() + tests := []struct { + name string + eip1559 bool + }{ + {"legacy mode", false}, + {"eip1559 mode", true}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + // Simulate a consumer contract calling to obtain ETH quotes in 3 different currencies + // in a single callback. + config := configtest.NewGeneralConfigSimulated(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(100 * time.Millisecond) + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + }) + operatorContracts := setupOperatorContracts(t) + b := operatorContracts.sim + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b) + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + authorizedSenders := []common.Address{sendingKeys[0].Address} + tx, err := operatorContracts.operator.SetAuthorizedSenders(operatorContracts.user, authorizedSenders) + require.NoError(t, err) + b.Commit() + cltest.RequireTxSuccessful(t, b, tx.Hash()) + + // Fund node account with ETH. + n, err := b.NonceAt(testutils.Context(t), operatorContracts.user.From, nil) + require.NoError(t, err) + tx = cltest.NewLegacyTransaction(n, sendingKeys[0].Address, assets.Ether(100).ToInt(), 21000, big.NewInt(1000000000), nil) + signedTx, err := operatorContracts.user.Signer(operatorContracts.user.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + err = app.Start(testutils.Context(t)) + require.NoError(t, err) + + mockServerUSD := cltest.NewHTTPMockServer(t, 200, "GET", `{"USD": 614.64}`) + mockServerEUR := cltest.NewHTTPMockServer(t, 200, "GET", `{"EUR": 507.07}`) + mockServerJPY := cltest.NewHTTPMockServer(t, 200, "GET", `{"JPY": 63818.86}`) + + nameAndExternalJobID := uuid.New() + addr := operatorContracts.operatorAddress.Hex() + spec := fmt.Sprintf(multiWordSpecTemplate, nameAndExternalJobID, addr, nameAndExternalJobID, addr) + j := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: spec}))) + cltest.AwaitJobActive(t, app.JobSpawner(), j.ID, 5*time.Second) + + var jobID [32]byte + copy(jobID[:], j.ExternalJobID[:]) + tx, err = operatorContracts.multiWord.SetSpecID(operatorContracts.user, jobID) + require.NoError(t, err) + b.Commit() + cltest.RequireTxSuccessful(t, b, tx.Hash()) + + operatorContracts.user.GasLimit = 1000000 + tx, err = operatorContracts.multiWord.RequestMultipleParametersWithCustomURLs(operatorContracts.user, + mockServerUSD.URL, "USD", + mockServerEUR.URL, "EUR", + mockServerJPY.URL, "JPY", + big.NewInt(1000), + ) + require.NoError(t, err) + b.Commit() + cltest.RequireTxSuccessful(t, b, tx.Hash()) + + empty := big.NewInt(0) + assertPricesUint256(t, empty, empty, empty, operatorContracts.multiWord) + + stopBlocks := utils.FiniteTicker(100*time.Millisecond, func() { + triggerAllKeys(t, app) + b.Commit() + }) + defer stopBlocks() + + pipelineRuns := cltest.WaitForPipelineComplete(t, 0, j.ID, 1, 14, app.JobORM(), testutils.WaitTimeout(t)/2, time.Second) + pipelineRun := pipelineRuns[0] + assertPipelineTaskRunsSuccessful(t, pipelineRun.PipelineTaskRuns) + assertPricesUint256(t, big.NewInt(61464), big.NewInt(50707), big.NewInt(6381886), operatorContracts.multiWord) + + nameAndExternalJobID = uuid.New() + singleWordSpec := fmt.Sprintf(singleWordSpecTemplate, nameAndExternalJobID, addr, nameAndExternalJobID, addr) + jobSingleWord := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: singleWordSpec}))) + cltest.AwaitJobActive(t, app.JobSpawner(), jobSingleWord.ID, 5*time.Second) + + var jobIDSingleWord [32]byte + copy(jobIDSingleWord[:], jobSingleWord.ExternalJobID[:]) + tx, err = operatorContracts.singleWord.SetSpecID(operatorContracts.user, jobIDSingleWord) + require.NoError(t, err) + b.Commit() + cltest.RequireTxSuccessful(t, b, tx.Hash()) + mockServerUSD2 := cltest.NewHTTPMockServer(t, 200, "GET", `{"USD": 614.64}`) + tx, err = operatorContracts.singleWord.RequestMultipleParametersWithCustomURLs(operatorContracts.user, + mockServerUSD2.URL, "USD", + big.NewInt(1000), + ) + require.NoError(t, err) + b.Commit() + cltest.RequireTxSuccessful(t, b, tx.Hash()) + + pipelineRuns = cltest.WaitForPipelineComplete(t, 0, jobSingleWord.ID, 1, 8, app.JobORM(), testutils.WaitTimeout(t), time.Second) + pipelineRun = pipelineRuns[0] + assertPipelineTaskRunsSuccessful(t, pipelineRun.PipelineTaskRuns) + v, err := operatorContracts.singleWord.CurrentPriceInt(nil) + require.NoError(t, err) + assert.Equal(t, big.NewInt(61464), v) + }) + } +} + +func setupAppForEthTx(t *testing.T, operatorContracts OperatorContracts) (app *cltest.TestApplication, sendingAddress common.Address, o *observer.ObservedLogs) { + b := operatorContracts.sim + lggr, o := logger.TestLoggerObserved(t, zapcore.DebugLevel) + + cfg := configtest.NewGeneralConfigSimulated(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(100 * time.Millisecond) + }) + app = cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, b, lggr) + b.Commit() + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + + // Fund node account with ETH. + n, err := b.NonceAt(testutils.Context(t), operatorContracts.user.From, nil) + require.NoError(t, err) + tx := cltest.NewLegacyTransaction(n, sendingKeys[0].Address, assets.Ether(100).ToInt(), 21000, big.NewInt(1000000000), nil) + signedTx, err := operatorContracts.user.Signer(operatorContracts.user.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + err = app.Start(testutils.Context(t)) + require.NoError(t, err) + + testutils.WaitForLogMessage(t, o, "Subscribing to new heads on chain 1337") + testutils.WaitForLogMessage(t, o, "Subscribed to heads on chain 1337") + + return app, sendingKeys[0].Address, o +} + +func TestIntegration_AsyncEthTx(t *testing.T) { + t.Parallel() + operatorContracts := setupOperatorContracts(t) + b := operatorContracts.sim + + t.Run("with FailOnRevert enabled, run succeeds when transaction is successful", func(t *testing.T) { + app, sendingAddr, o := setupAppForEthTx(t, operatorContracts) + tomlSpec := ` +type = "webhook" +schemaVersion = 1 +observationSource = """ + submit_tx [type=ethtx to="%s" + data="%s" + minConfirmations="2" + failOnRevert=false + evmChainID="%s" + from="[\\"%s\\"]" + ] +""" +` + // This succeeds for whatever reason + revertingData := "0xdeadbeef" + tomlSpec = fmt.Sprintf(tomlSpec, operatorContracts.linkTokenAddress.String(), revertingData, testutils.SimulatedChainID.String(), sendingAddr) + j := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + cltest.AwaitJobActive(t, app.JobSpawner(), j.ID, testutils.WaitTimeout(t)) + + run := cltest.CreateJobRunViaUser(t, app, j.ExternalJobID, "") + assert.Equal(t, []*string(nil), run.Outputs) + assert.Equal(t, []*string(nil), run.Errors) + + testutils.WaitForLogMessage(t, o, "Sending transaction") + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + testutils.WaitForLogMessage(t, o, "Resume run success") + + pipelineRuns := cltest.WaitForPipelineComplete(t, 0, j.ID, 1, 1, app.JobORM(), testutils.WaitTimeout(t), time.Second) + + // The run should have succeeded but with the receipt detailing the reverted transaction + pipelineRun := pipelineRuns[0] + assertPipelineTaskRunsSuccessful(t, pipelineRun.PipelineTaskRuns) + + outputs := pipelineRun.Outputs.Val.([]interface{}) + require.Len(t, outputs, 1) + output := outputs[0] + receipt := output.(map[string]interface{}) + assert.Equal(t, "0x7", receipt["blockNumber"]) + assert.Equal(t, "0x538f", receipt["gasUsed"]) + assert.Equal(t, "0x0", receipt["status"]) // success + }) + + t.Run("with FailOnRevert enabled, run fails with transaction reverted error", func(t *testing.T) { + app, sendingAddr, o := setupAppForEthTx(t, operatorContracts) + tomlSpec := ` +type = "webhook" +schemaVersion = 1 +observationSource = """ + submit_tx [type=ethtx to="%s" + data="%s" + minConfirmations="2" + failOnRevert=true + evmChainID="%s" + from="[\\"%s\\"]" + ] +""" +` + // This data is a call to link token's `transfer` function and will revert due to insufficient PLI on the sender address + revertingData := "0xa9059cbb000000000000000000000000526485b5abdd8ae9c6a63548e0215a83e7135e6100000000000000000000000000000000000000000000000db069932ea4fe1400" + tomlSpec = fmt.Sprintf(tomlSpec, operatorContracts.linkTokenAddress.String(), revertingData, testutils.SimulatedChainID.String(), sendingAddr) + j := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + cltest.AwaitJobActive(t, app.JobSpawner(), j.ID, testutils.WaitTimeout(t)) + + run := cltest.CreateJobRunViaUser(t, app, j.ExternalJobID, "") + assert.Equal(t, []*string(nil), run.Outputs) + assert.Equal(t, []*string(nil), run.Errors) + + testutils.WaitForLogMessage(t, o, "Sending transaction") + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + testutils.WaitForLogMessage(t, o, "Resume run success") + + pipelineRuns := cltest.WaitForPipelineError(t, 0, j.ID, 1, 1, app.JobORM(), testutils.WaitTimeout(t), time.Second) + + // The run should have failed as a revert + pipelineRun := pipelineRuns[0] + assertPipelineTaskRunsErrored(t, pipelineRun.PipelineTaskRuns) + }) + + t.Run("with FailOnRevert disabled, run succeeds with output being reverted receipt", func(t *testing.T) { + app, sendingAddr, o := setupAppForEthTx(t, operatorContracts) + tomlSpec := ` +type = "webhook" +schemaVersion = 1 +observationSource = """ + submit_tx [type=ethtx to="%s" + data="%s" + minConfirmations="2" + failOnRevert=false + evmChainID="%s" + from="[\\"%s\\"]" + ] +""" +` + // This data is a call to link token's `transfer` function and will revert due to insufficient PLI on the sender address + revertingData := "0xa9059cbb000000000000000000000000526485b5abdd8ae9c6a63548e0215a83e7135e6100000000000000000000000000000000000000000000000db069932ea4fe1400" + tomlSpec = fmt.Sprintf(tomlSpec, operatorContracts.linkTokenAddress.String(), revertingData, testutils.SimulatedChainID.String(), sendingAddr) + j := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + cltest.AwaitJobActive(t, app.JobSpawner(), j.ID, testutils.WaitTimeout(t)) + + run := cltest.CreateJobRunViaUser(t, app, j.ExternalJobID, "") + assert.Equal(t, []*string(nil), run.Outputs) + assert.Equal(t, []*string(nil), run.Errors) + + testutils.WaitForLogMessage(t, o, "Sending transaction") + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + b.Commit() // Needs at least two confirmations + testutils.WaitForLogMessage(t, o, "Resume run success") + + pipelineRuns := cltest.WaitForPipelineComplete(t, 0, j.ID, 1, 1, app.JobORM(), testutils.WaitTimeout(t), time.Second) + + // The run should have succeeded but with the receipt detailing the reverted transaction + pipelineRun := pipelineRuns[0] + assertPipelineTaskRunsSuccessful(t, pipelineRun.PipelineTaskRuns) + + outputs := pipelineRun.Outputs.Val.([]interface{}) + require.Len(t, outputs, 1) + output := outputs[0] + receipt := output.(map[string]interface{}) + assert.Equal(t, "0x11", receipt["blockNumber"]) + assert.Equal(t, "0x7a120", receipt["gasUsed"]) + assert.Equal(t, "0x0", receipt["status"]) + }) +} + +func setupOCRContracts(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend, common.Address, *offchainaggregator.OffchainAggregator, *flags_wrapper.Flags, common.Address) { + owner := testutils.MustNewSimTransactor(t) + sb := new(big.Int) + sb, _ = sb.SetString("100000000000000000000000", 10) // 1000 eth + genesisData := core.GenesisAlloc{ + owner.From: {Balance: sb}, + } + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + b := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + linkTokenAddress, _, linkContract, err := link_token_interface.DeployLinkToken(owner, b) + require.NoError(t, err) + accessAddress, _, _, err := + testoffchainaggregator.DeploySimpleWriteAccessController(owner, b) + require.NoError(t, err, "failed to deploy test access controller contract") + b.Commit() + + min, max := new(big.Int), new(big.Int) + min.Exp(big.NewInt(-2), big.NewInt(191), nil) + max.Exp(big.NewInt(2), big.NewInt(191), nil) + max.Sub(max, big.NewInt(1)) + ocrContractAddress, _, ocrContract, err := offchainaggregator.DeployOffchainAggregator(owner, b, + 1000, // _maximumGasPrice uint32, + 200, //_reasonableGasPrice uint32, + 3.6e7, // 3.6e7 microPLI, or 36 PLI + 1e8, // _linkGweiPerObservation uint32, + 4e8, // _linkGweiPerTransmission uint32, + linkTokenAddress, //_link common.Address, + min, // -2**191 + max, // 2**191 - 1 + accessAddress, + accessAddress, + 0, + "TEST") + require.NoError(t, err) + _, err = linkContract.Transfer(owner, ocrContractAddress, big.NewInt(1000)) + require.NoError(t, err) + + flagsContractAddress, _, flagsContract, err := flags_wrapper.DeployFlags(owner, b, owner.From) + require.NoError(t, err, "failed to deploy flags contract to simulated ethereum blockchain") + + b.Commit() + return owner, b, ocrContractAddress, ocrContract, flagsContract, flagsContractAddress +} + +func setupNode(t *testing.T, owner *bind.TransactOpts, portV2 int, + b *backends.SimulatedBackend, overrides func(c *plugin.Config, s *plugin.Secrets), +) (*cltest.TestApplication, string, common.Address, ocrkey.KeyV2) { + p2pKey := keystest.NewP2PKeyV2(t) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test. + + c.OCR.Enabled = ptr(true) + c.OCR2.Enabled = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", portV2)} + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + + // GracePeriod < ObservationTimeout + c.EVM[0].OCR.ObservationGracePeriod = commonconfig.MustNewDuration(100 * time.Millisecond) + + if overrides != nil { + overrides(c, s) + } + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b, p2pKey) + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + transmitter := sendingKeys[0].Address + + // Fund the transmitter address with some ETH + n, err := b.NonceAt(testutils.Context(t), owner.From, nil) + require.NoError(t, err) + + tx := cltest.NewLegacyTransaction(n, transmitter, assets.Ether(100).ToInt(), 21000, big.NewInt(1000000000), nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + key, err := app.GetKeyStore().OCR().Create() + require.NoError(t, err) + return app, p2pKey.PeerID().Raw(), transmitter, key +} + +func setupForwarderEnabledNode(t *testing.T, owner *bind.TransactOpts, portV2 int, b *backends.SimulatedBackend, overrides func(c *plugin.Config, s *plugin.Secrets)) (*cltest.TestApplication, string, common.Address, common.Address, ocrkey.KeyV2) { + p2pKey := keystest.NewP2PKeyV2(t) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test. + + c.OCR.Enabled = ptr(true) + c.OCR2.Enabled = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", portV2)} + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + + c.EVM[0].Transactions.ForwardersEnabled = ptr(true) + + if overrides != nil { + overrides(c, s) + } + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b, p2pKey) + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + transmitter := sendingKeys[0].Address + + // Fund the transmitter address with some ETH + n, err := b.NonceAt(testutils.Context(t), owner.From, nil) + require.NoError(t, err) + + tx := cltest.NewLegacyTransaction(n, transmitter, assets.Ether(100).ToInt(), 21000, big.NewInt(1000000000), nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + key, err := app.GetKeyStore().OCR().Create() + require.NoError(t, err) + + // deploy a forwarder + forwarder, _, authorizedForwarder, err := authorized_forwarder.DeployAuthorizedForwarder(owner, b, common.HexToAddress("0x326C977E6efc84E512bB9C30f76E30c160eD06FB"), owner.From, common.Address{}, []byte{}) + require.NoError(t, err) + + // set EOA as an authorized sender for the forwarder + _, err = authorizedForwarder.SetAuthorizedSenders(owner, []common.Address{transmitter}) + require.NoError(t, err) + b.Commit() + + // add forwarder address to be tracked in db + forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + chainID := ubig.Big(*b.Blockchain().Config().ChainID) + _, err = forwarderORM.CreateForwarder(forwarder, chainID) + require.NoError(t, err) + + return app, p2pKey.PeerID().Raw(), transmitter, forwarder, key +} + +func TestIntegration_OCR(t *testing.T) { + testutils.SkipShort(t, "long test") + t.Parallel() + tests := []struct { + id int + name string + eip1559 bool + }{ + {1, "legacy mode", false}, + {2, "eip1559 mode", true}, + } + + numOracles := 4 + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + t.Parallel() + bootstrapNodePortV2 := freeport.GetOne(t) + g := gomega.NewWithT(t) + owner, b, ocrContractAddress, ocrContract, flagsContract, flagsContractAddress := setupOCRContracts(t) + + // Note it's plausible these ports could be occupied on a CI machine. + // May need a port randomize + retry approach if we observe collisions. + appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV2, b, nil) + var ( + oracles []confighelper.OracleIdentityExtra + transmitters []common.Address + keys []ocrkey.KeyV2 + apps []*cltest.TestApplication + ) + ports := freeport.GetN(t, numOracles) + for i := 0; i < numOracles; i++ { + app, peerID, transmitter, key := setupNode(t, owner, ports[i], b, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].FlagsContractAddress = ptr(ethkey.EIP55AddressFromAddress(flagsContractAddress)) + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(test.eip1559) + + c.P2P.V2.DefaultBootstrappers = &[]ocrcommontypes.BootstrapperLocator{ + {PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePortV2)}}, + } + }) + + keys = append(keys, key) + apps = append(apps, app) + transmitters = append(transmitters, transmitter) + + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnChainSigningAddress: ocrtypes.OnChainSigningAddress(key.OnChainSigning.Address()), + TransmitAddress: transmitter, + OffchainPublicKey: key.PublicKeyOffChain(), + PeerID: peerID, + }, + SharedSecretEncryptionPublicKey: key.PublicKeyConfig(), + }) + } + + stopBlocks := utils.FiniteTicker(time.Second, func() { + b.Commit() + }) + defer stopBlocks() + + _, err := ocrContract.SetPayees(owner, + transmitters, + transmitters, + ) + require.NoError(t, err) + signers, transmitters, threshold, encodedConfigVersion, encodedConfig, err := confighelper.ContractSetConfigArgsForIntegrationTest( + oracles, + 1, + 1000000000/100, // threshold PPB + ) + require.NoError(t, err) + _, err = ocrContract.SetConfig(owner, + signers, + transmitters, + threshold, + encodedConfigVersion, + encodedConfig, + ) + require.NoError(t, err) + b.Commit() + + err = appBootstrap.Start(testutils.Context(t)) + require.NoError(t, err) + + jb, err := ocr.ValidatedOracleSpecToml(appBootstrap.GetRelayers().LegacyEVMChains(), fmt.Sprintf(` +type = "offchainreporting" +schemaVersion = 1 +name = "boot" +contractAddress = "%s" +evmChainID = "%s" +isBootstrapPeer = true +`, ocrContractAddress, testutils.SimulatedChainID.String())) + require.NoError(t, err) + jb.Name = null.NewString("boot", true) + err = appBootstrap.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + + // Raising flags to initiate hibernation + _, err = flagsContract.RaiseFlag(owner, ocrContractAddress) + require.NoError(t, err, "failed to raise flag for ocrContractAddress") + _, err = flagsContract.RaiseFlag(owner, evmutils.ZeroAddress) + require.NoError(t, err, "failed to raise flag for ZeroAddress") + + b.Commit() + + var jids []int32 + var servers, slowServers = make([]*httptest.Server, 4), make([]*httptest.Server, 4) + // We expect metadata of: + // latestAnswer:nil // First call + // latestAnswer:0 + // latestAnswer:10 + // latestAnswer:20 + // latestAnswer:30 + var metaLock sync.Mutex + expectedMeta := map[string]struct{}{ + "0": {}, "10": {}, "20": {}, "30": {}, + } + for i := 0; i < numOracles; i++ { + err = apps[i].Start(testutils.Context(t)) + require.NoError(t, err) + + // Since this API speed is > ObservationTimeout we should ignore it and still produce values. + slowServers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(5 * time.Second) + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(slowServers[i].Close) + servers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + var m bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal(b, &m)) + if m.Meta.LatestAnswer != nil && m.Meta.UpdatedAt != nil { + metaLock.Lock() + delete(expectedMeta, m.Meta.LatestAnswer.String()) + metaLock.Unlock() + } + res.WriteHeader(http.StatusOK) + _, err = res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(servers[i].Close) + u, _ := url.Parse(servers[i].URL) + err := apps[i].BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(fmt.Sprintf("bridge%d", i)), + URL: models.WebURL(*u), + }) + require.NoError(t, err) + + // Note we need: observationTimeout + observationGracePeriod + DeltaGrace (500ms) < DeltaRound (1s) + // So 200ms + 200ms + 500ms < 1s + jb, err := ocr.ValidatedOracleSpecToml(apps[i].GetRelayers().LegacyEVMChains(), fmt.Sprintf(` +type = "offchainreporting" +schemaVersion = 1 +name = "web oracle spec" +contractAddress = "%s" +evmChainID = "%s" +isBootstrapPeer = false +keyBundleID = "%s" +transmitterAddress = "%s" +observationTimeout = "100ms" +contractConfigConfirmations = 1 +contractConfigTrackerPollInterval = "1s" +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" +`, ocrContractAddress, testutils.SimulatedChainID.String(), keys[i].ID(), transmitters[i], fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i)) + require.NoError(t, err) + jb.Name = null.NewString("testocr", true) + err = apps[i].AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + jids = append(jids, jb.ID) + } + + // Assert that all the OCR jobs get a run with valid values eventually. + for i := 0; i < numOracles; i++ { + // Want at least 2 runs so we see all the metadata. + pr := cltest.WaitForPipelineComplete(t, i, jids[i], + 2, 7, apps[i].JobORM(), time.Minute, time.Second) + jb, err := pr[0].Outputs.MarshalJSON() + require.NoError(t, err) + assert.Equal(t, []byte(fmt.Sprintf("[\"%d\"]", 10*i)), jb, "pr[0] %+v pr[1] %+v", pr[0], pr[1]) + require.NoError(t, err) + } + + // 4 oracles reporting 0, 10, 20, 30. Answer should be 20 (results[4/2]). + g.Eventually(func() string { + answer, err := ocrContract.LatestAnswer(nil) + require.NoError(t, err) + return answer.String() + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal("20")) + + for _, app := range apps { + jobs, _, err := app.JobORM().FindJobs(0, 1000) + require.NoError(t, err) + // No spec errors + for _, j := range jobs { + ignore := 0 + for i := range j.JobSpecErrors { + // Non-fatal timing related error, ignore for testing. + if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { + ignore++ + } + } + require.Len(t, j.JobSpecErrors, ignore) + } + } + metaLock.Lock() + defer metaLock.Unlock() + assert.Len(t, expectedMeta, 0, "expected metadata %v", expectedMeta) + }) + } +} + +func TestIntegration_OCR_ForwarderFlow(t *testing.T) { + testutils.SkipShort(t, "long test") + t.Parallel() + numOracles := 4 + t.Run("ocr_forwarder_flow", func(t *testing.T) { + bootstrapNodePortV2 := freeport.GetOne(t) + g := gomega.NewWithT(t) + owner, b, ocrContractAddress, ocrContract, flagsContract, flagsContractAddress := setupOCRContracts(t) + + // Note it's plausible these ports could be occupied on a CI machine. + // May need a port randomize + retry approach if we observe collisions. + appBootstrap, bootstrapPeerID, _, _ := setupNode(t, owner, bootstrapNodePortV2, b, nil) + + var ( + oracles []confighelper.OracleIdentityExtra + transmitters []common.Address + forwardersContracts []common.Address + keys []ocrkey.KeyV2 + apps []*cltest.TestApplication + ) + ports := freeport.GetN(t, numOracles) + for i := 0; i < numOracles; i++ { + app, peerID, transmitter, forwarder, key := setupForwarderEnabledNode(t, owner, ports[i], b, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = ptr(true) + c.EVM[0].FlagsContractAddress = ptr(ethkey.EIP55AddressFromAddress(flagsContractAddress)) + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.P2P.V2.DefaultBootstrappers = &[]ocrcommontypes.BootstrapperLocator{ + {PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePortV2)}}, + } + }) + + keys = append(keys, key) + apps = append(apps, app) + forwardersContracts = append(forwardersContracts, forwarder) + transmitters = append(transmitters, transmitter) + + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnChainSigningAddress: ocrtypes.OnChainSigningAddress(key.OnChainSigning.Address()), + TransmitAddress: forwarder, + OffchainPublicKey: key.PublicKeyOffChain(), + PeerID: peerID, + }, + SharedSecretEncryptionPublicKey: key.PublicKeyConfig(), + }) + } + + stopBlocks := utils.FiniteTicker(time.Second, func() { + b.Commit() + }) + defer stopBlocks() + + _, err := ocrContract.SetPayees(owner, + forwardersContracts, + transmitters, + ) + require.NoError(t, err) + b.Commit() + + signers, effectiveTransmitters, threshold, encodedConfigVersion, encodedConfig, err := confighelper.ContractSetConfigArgsForIntegrationTest( + oracles, + 1, + 1000000000/100, // threshold PPB + ) + require.NoError(t, err) + require.Equal(t, effectiveTransmitters, forwardersContracts) + _, err = ocrContract.SetConfig(owner, + signers, + effectiveTransmitters, // forwarder Addresses + threshold, + encodedConfigVersion, + encodedConfig, + ) + require.NoError(t, err) + b.Commit() + + err = appBootstrap.Start(testutils.Context(t)) + require.NoError(t, err) + + // set forwardingAllowed = true + jb, err := ocr.ValidatedOracleSpecToml(appBootstrap.GetRelayers().LegacyEVMChains(), fmt.Sprintf(` +type = "offchainreporting" +schemaVersion = 1 +name = "boot" +contractAddress = "%s" +evmChainID = "%s" +forwardingAllowed = true +isBootstrapPeer = true +`, ocrContractAddress, testutils.SimulatedChainID.String())) + require.NoError(t, err) + jb.Name = null.NewString("boot", true) + err = appBootstrap.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + + // Raising flags to initiate hibernation + _, err = flagsContract.RaiseFlag(owner, ocrContractAddress) + require.NoError(t, err, "failed to raise flag for ocrContractAddress") + _, err = flagsContract.RaiseFlag(owner, evmutils.ZeroAddress) + require.NoError(t, err, "failed to raise flag for ZeroAddress") + + b.Commit() + + var jids []int32 + var servers, slowServers = make([]*httptest.Server, 4), make([]*httptest.Server, 4) + // We expect metadata of: + // latestAnswer:nil // First call + // latestAnswer:0 + // latestAnswer:10 + // latestAnswer:20 + // latestAnswer:30 + var metaLock sync.Mutex + expectedMeta := map[string]struct{}{ + "0": {}, "10": {}, "20": {}, "30": {}, + } + for i := 0; i < numOracles; i++ { + err = apps[i].Start(testutils.Context(t)) + require.NoError(t, err) + + // Since this API speed is > ObservationTimeout we should ignore it and still produce values. + slowServers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(5 * time.Second) + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(slowServers[i].Close) + servers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + var m bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal(b, &m)) + if m.Meta.LatestAnswer != nil && m.Meta.UpdatedAt != nil { + metaLock.Lock() + delete(expectedMeta, m.Meta.LatestAnswer.String()) + metaLock.Unlock() + } + res.WriteHeader(http.StatusOK) + _, err = res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(servers[i].Close) + u, _ := url.Parse(servers[i].URL) + err := apps[i].BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(fmt.Sprintf("bridge%d", i)), + URL: models.WebURL(*u), + }) + require.NoError(t, err) + + // Note we need: observationTimeout + observationGracePeriod + DeltaGrace (500ms) < DeltaRound (1s) + // So 200ms + 200ms + 500ms < 1s + // forwardingAllowed = true + jb, err := ocr.ValidatedOracleSpecToml(apps[i].GetRelayers().LegacyEVMChains(), fmt.Sprintf(` +type = "offchainreporting" +schemaVersion = 1 +name = "web oracle spec" +contractAddress = "%s" +evmChainID = "%s" +forwardingAllowed = true +isBootstrapPeer = false +keyBundleID = "%s" +transmitterAddress = "%s" +observationTimeout = "100ms" +contractConfigConfirmations = 1 +contractConfigTrackerPollInterval = "1s" +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" +`, ocrContractAddress, testutils.SimulatedChainID.String(), keys[i].ID(), transmitters[i], fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i)) + require.NoError(t, err) + jb.Name = null.NewString("testocr", true) + err = apps[i].AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + jids = append(jids, jb.ID) + } + + // Assert that all the OCR jobs get a run with valid values eventually. + for i := 0; i < numOracles; i++ { + // Want at least 2 runs so we see all the metadata. + pr := cltest.WaitForPipelineComplete(t, i, jids[i], + 2, 7, apps[i].JobORM(), time.Minute, time.Second) + jb, err := pr[0].Outputs.MarshalJSON() + require.NoError(t, err) + assert.Equal(t, []byte(fmt.Sprintf("[\"%d\"]", 10*i)), jb, "pr[0] %+v pr[1] %+v", pr[0], pr[1]) + require.NoError(t, err) + } + + // 4 oracles reporting 0, 10, 20, 30. Answer should be 20 (results[4/2]). + g.Eventually(func() string { + answer, err := ocrContract.LatestAnswer(nil) + require.NoError(t, err) + return answer.String() + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal("20")) + + for _, app := range apps { + jobs, _, err := app.JobORM().FindJobs(0, 1000) + require.NoError(t, err) + // No spec errors + for _, j := range jobs { + ignore := 0 + for i := range j.JobSpecErrors { + // Non-fatal timing related error, ignore for testing. + if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { + ignore++ + } + } + require.Len(t, j.JobSpecErrors, ignore) + } + } + metaLock.Lock() + defer metaLock.Unlock() + assert.Len(t, expectedMeta, 0, "expected metadata %v", expectedMeta) + }) +} + +func TestIntegration_BlockHistoryEstimator(t *testing.T) { + t.Parallel() + + var initialDefaultGasPrice int64 = 5_000_000_000 + maxGasPrice := assets.NewWeiI(10 * initialDefaultGasPrice) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + c.EVM[0].GasEstimator.BlockHistory.CheckInclusionBlocks = ptr[uint16](0) + c.EVM[0].GasEstimator.PriceDefault = assets.NewWeiI(initialDefaultGasPrice) + c.EVM[0].GasEstimator.Mode = ptr("BlockHistory") + c.EVM[0].RPCBlockQueryDelay = ptr[uint16](0) + c.EVM[0].GasEstimator.BlockHistory.BlockHistorySize = ptr[uint16](2) + c.EVM[0].FinalityDepth = ptr[uint32](3) + }) + + ethClient := cltest.NewEthMocks(t) + ethClient.On("ConfiguredChainID").Return(big.NewInt(client.NullClientChainID)).Maybe() + chchNewHeads := make(chan evmtest.RawSub[*evmtypes.Head], 1) + + db := pgtest.NewSqlxDB(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + require.NoError(t, kst.Unlock(cltest.Password)) + + cc := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, KeyStore: kst.Eth(), Client: ethClient, GeneralConfig: cfg}) + + b41 := evmtypes.Block{ + Number: 41, + Hash: evmutils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(41_000_000_000, 41_500_000_000), + } + b42 := evmtypes.Block{ + Number: 42, + Hash: evmutils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(44_000_000_000, 45_000_000_000), + } + b43 := evmtypes.Block{ + Number: 43, + Hash: evmutils.NewHash(), + Transactions: cltest.LegacyTransactionsFromGasPrices(48_000_000_000, 49_000_000_000, 31_000_000_000), + } + + evmChainID := ubig.New(evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs())) + h40 := evmtypes.Head{Hash: evmutils.NewHash(), Number: 40, EVMChainID: evmChainID} + h41 := evmtypes.Head{Hash: b41.Hash, ParentHash: h40.Hash, Number: 41, EVMChainID: evmChainID} + h42 := evmtypes.Head{Hash: b42.Hash, ParentHash: h41.Hash, Number: 42, EVMChainID: evmChainID} + + mockEth := &evmtest.MockEth{EthClient: ethClient} + ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). + Return( + func(ctx context.Context, ch chan<- *evmtypes.Head) ethereum.Subscription { + sub := mockEth.NewSub(t) + chchNewHeads <- evmtest.NewRawSub(ch, sub.Err()) + return sub + }, + func(ctx context.Context, ch chan<- *evmtypes.Head) error { return nil }, + ) + // Nonce syncer + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) + + // BlockHistoryEstimator boot calls + ethClient.On("HeadByNumber", mock.Anything, mock.AnythingOfType("*big.Int")).Return(&h42, nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == "0x2a" && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == "0x29" + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b42 + elems[1].Result = &b41 + }) + + ethClient.On("Dial", mock.Anything).Return(nil) + ethClient.On("ConfiguredChainID", mock.Anything).Return(*evmtest.MustGetDefaultChainID(t, cfg.EVMConfigs()), nil) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) + // HeadTracker backfill + ethClient.On("HeadByHash", mock.Anything, h40.Hash).Return(&h40, nil).Maybe() + ethClient.On("HeadByHash", mock.Anything, h41.Hash).Return(&h41, nil).Maybe() + ethClient.On("HeadByHash", mock.Anything, h42.Hash).Return(&h42, nil).Maybe() + + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(cc) + for _, re := range cc.Slice() { + servicetest.Run(t, re) + } + var newHeads evmtest.RawSub[*evmtypes.Head] + select { + case newHeads = <-chchNewHeads: + case <-time.After(10 * time.Second): + t.Fatal("timed out waiting for app to subscribe") + } + + chain := evmtest.MustGetDefaultChain(t, legacyChains) + estimator := chain.GasEstimator() + gasPrice, gasLimit, err := estimator.GetFee(testutils.Context(t), nil, 500_000, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, uint32(500000), gasLimit) + assert.Equal(t, "41.5 gwei", gasPrice.Legacy.String()) + assert.Equal(t, initialDefaultGasPrice, chain.Config().EVM().GasEstimator().PriceDefault().Int64()) // unchanged + + // BlockHistoryEstimator new blocks + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 1 && b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == "0x2b" + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b43 + }) + ethClient.On("Close").Return().Once() + + // Simulate one new head and check the gas price got updated + h43 := cltest.Head(43) + h43.ParentHash = h42.Hash + newHeads.TrySend(h43) + + gomega.NewWithT(t).Eventually(func() string { + gasPrice, _, err := estimator.GetFee(testutils.Context(t), nil, 500000, maxGasPrice) + require.NoError(t, err) + return gasPrice.Legacy.String() + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal("45 gwei")) +} + +func triggerAllKeys(t *testing.T, app *cltest.TestApplication) { + for _, chain := range app.GetRelayers().LegacyEVMChains().Slice() { + keys, err := app.KeyStore.Eth().EnabledKeysForChain(chain.ID()) + require.NoError(t, err) + for _, k := range keys { + chain.TxManager().Trigger(k.Address) + } + } +} + +func assertPricesUint256(t *testing.T, usd, eur, jpy *big.Int, consumer *multiwordconsumer_wrapper.MultiWordConsumer) { + haveUsd, err := consumer.UsdInt(nil) + require.NoError(t, err) + assert.True(t, usd.Cmp(haveUsd) == 0) + haveEur, err := consumer.EurInt(nil) + require.NoError(t, err) + assert.True(t, eur.Cmp(haveEur) == 0) + haveJpy, err := consumer.JpyInt(nil) + require.NoError(t, err) + assert.True(t, jpy.Cmp(haveJpy) == 0) +} + +func ptr[T any](v T) *T { return &v } + +func assertPipelineTaskRunsSuccessful(t testing.TB, runs []pipeline.TaskRun) { + t.Helper() + for i, run := range runs { + require.True(t, run.Error.IsZero(), fmt.Sprintf("pipeline.Task run failed (idx: %v, dotID: %v, error: '%v')", i, run.GetDotID(), run.Error.ValueOrZero())) + } +} + +func assertPipelineTaskRunsErrored(t testing.TB, runs []pipeline.TaskRun) { + t.Helper() + for i, run := range runs { + require.False(t, run.Error.IsZero(), fmt.Sprintf("expected pipeline.Task run to have failed, but it succeeded (idx: %v, dotID: %v, output: '%v')", i, run.GetDotID(), run.Output)) + } +} diff --git a/core/internal/features/multiword-spec-template.yml b/core/internal/features/multiword-spec-template.yml new file mode 100644 index 00000000..8479212f --- /dev/null +++ b/core/internal/features/multiword-spec-template.yml @@ -0,0 +1,49 @@ +type = "directrequest" +schemaVersion = 1 +name = "%s" +contractAddress = "%s" +evmChainID = 1337 +externalJobID = "%s" +observationSource = """ + decode_log [type=ethabidecodelog + abi="OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + decode_cbor [type=cborparse data="$(decode_log.data)"] + decode_log -> decode_cbor + decode_cbor -> usd + decode_cbor -> eur + decode_cbor -> jpy + usd [type=http method=GET url="$(decode_cbor.urlUSD)" allowunrestrictednetworkaccess="true"] + usd_parse [type=jsonparse path="$(decode_cbor.pathUSD)"] + usd_multiply [type=multiply value="$(usd_parse)", times="100"] + usd -> usd_parse -> usd_multiply + eur [type=http method=GET url="$(decode_cbor.urlEUR)" allowunrestrictednetworkaccess="true"] + eur_parse [type=jsonparse path="$(decode_cbor.pathEUR)"] + eur_multiply [type=multiply value="$(eur_parse)", times="100"] + eur -> eur_parse -> eur_multiply + jpy [type=http method=GET url="$(decode_cbor.urlJPY)" allowunrestrictednetworkaccess="true"] + jpy_parse [type=jsonparse path="$(decode_cbor.pathJPY)"] + jpy_multiply [type=multiply value="$(jpy_parse)", times="100"] + jpy -> jpy_parse -> jpy_multiply + usd_multiply -> encode_mwr + eur_multiply -> encode_mwr + jpy_multiply -> encode_mwr + encode_mwr [type=ethabiencode + abi="(bytes32 requestId, uint256 usd, uint256 eur, uint256 jpy)" + data=<{ + "requestId": $(decode_log.requestId), + "usd": $(usd_multiply), + "eur": $(eur_multiply), + "jpy": $(jpy_multiply)}>] + encode_tx [type=ethabiencode + abi="fulfillOracleRequest2(bytes32 requestId, uint256 payment, address callbackAddress, bytes4 callbackFunctionId, uint256 expiration, bytes calldata data)" + data=<{"requestId": $(decode_log.requestId), + "payment": $(decode_log.payment), + "callbackAddress": $(decode_log.callbackAddr), + "callbackFunctionId": $(decode_log.callbackFunctionId), + "expiration": $(decode_log.cancelExpiration), + "data": $(encode_mwr)}>] + submit_tx [type=ethtx to="%s" data="$(encode_tx)" minConfirmations="2"] + encode_mwr -> encode_tx -> submit_tx +""" diff --git a/core/internal/features/ocr2/features_ocr2_plugin_test.go b/core/internal/features/ocr2/features_ocr2_plugin_test.go new file mode 100644 index 00000000..184c4d26 --- /dev/null +++ b/core/internal/features/ocr2/features_ocr2_plugin_test.go @@ -0,0 +1,14 @@ +//go:build integration + +package ocr2_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" +) + +func TestIntegration_OCR2_plugins(t *testing.T) { + t.Setenv(string(env.MedianPlugin.Cmd), "plugin-feeds") + testIntegration_OCR2(t) +} diff --git a/core/internal/features/ocr2/features_ocr2_test.go b/core/internal/features/ocr2/features_ocr2_test.go new file mode 100644 index 00000000..8a83bed2 --- /dev/null +++ b/core/internal/features/ocr2/features_ocr2_test.go @@ -0,0 +1,915 @@ +package ocr2_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "maps" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + + testoffchainaggregator2 "github.com/goplugin/libocr/gethwrappers2/testocr2aggregator" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type ocr2Node struct { + app *cltest.TestApplication + peerID string + transmitter common.Address + effectiveTransmitter common.Address + keybundle ocr2key.KeyBundle +} + +func setupOCR2Contracts(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend, common.Address, *ocr2aggregator.OCR2Aggregator) { + owner := testutils.MustNewSimTransactor(t) + sb := new(big.Int) + sb, _ = sb.SetString("100000000000000000000", 10) // 1 eth + genesisData := core.GenesisAlloc{owner.From: {Balance: sb}} + gasLimit := ethconfig.Defaults.Miner.GasCeil * 2 + b := backends.NewSimulatedBackend(genesisData, gasLimit) + linkTokenAddress, _, linkContract, err := link_token_interface.DeployLinkToken(owner, b) + require.NoError(t, err) + accessAddress, _, _, err := testoffchainaggregator2.DeploySimpleWriteAccessController(owner, b) + require.NoError(t, err, "failed to deploy test access controller contract") + b.Commit() + + minAnswer, maxAnswer := new(big.Int), new(big.Int) + minAnswer.Exp(big.NewInt(-2), big.NewInt(191), nil) + maxAnswer.Exp(big.NewInt(2), big.NewInt(191), nil) + maxAnswer.Sub(maxAnswer, big.NewInt(1)) + ocrContractAddress, _, ocrContract, err := ocr2aggregator.DeployOCR2Aggregator( + owner, + b, + linkTokenAddress, //_link common.Address, + minAnswer, // -2**191 + maxAnswer, // 2**191 - 1 + accessAddress, + accessAddress, + 9, + "TEST", + ) + // Ensure we have finality depth worth of blocks to start. + for i := 0; i < 20; i++ { + b.Commit() + } + require.NoError(t, err) + _, err = linkContract.Transfer(owner, ocrContractAddress, big.NewInt(1000)) + require.NoError(t, err) + b.Commit() + return owner, b, ocrContractAddress, ocrContract +} + +func setupNodeOCR2( + t *testing.T, + owner *bind.TransactOpts, + port int, + useForwarder bool, + b *backends.SimulatedBackend, + p2pV2Bootstrappers []commontypes.BootstrapperLocator, +) *ocr2Node { + p2pKey := keystest.NewP2PKeyV2(t) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test. + + c.Feature.LogPoller = ptr(true) + + c.OCR.Enabled = ptr(false) + c.OCR2.Enabled = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", port)} + if len(p2pV2Bootstrappers) > 0 { + c.P2P.V2.DefaultBootstrappers = &p2pV2Bootstrappers + } + + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(5 * time.Second) + c.EVM[0].Transactions.ForwardersEnabled = &useForwarder + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b, p2pKey) + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + transmitter := sendingKeys[0].Address + effectiveTransmitter := sendingKeys[0].Address + + // Fund the transmitter address with some ETH + n, err := b.NonceAt(testutils.Context(t), owner.From, nil) + require.NoError(t, err) + + tx := cltest.NewLegacyTransaction( + n, transmitter, + assets.Ether(1).ToInt(), + 21000, + assets.GWei(1).ToInt(), + nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + kb, err := app.GetKeyStore().OCR2().Create("evm") + require.NoError(t, err) + + if useForwarder { + // deploy a forwarder + faddr, _, authorizedForwarder, err2 := authorized_forwarder.DeployAuthorizedForwarder(owner, b, common.HexToAddress("0x326C977E6efc84E512bB9C30f76E30c160eD06FB"), owner.From, common.Address{}, []byte{}) + require.NoError(t, err2) + + // set EOA as an authorized sender for the forwarder + _, err2 = authorizedForwarder.SetAuthorizedSenders(owner, []common.Address{transmitter}) + require.NoError(t, err2) + b.Commit() + + // add forwarder address to be tracked in db + forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + chainID := ubig.Big(*b.Blockchain().Config().ChainID) + _, err2 = forwarderORM.CreateForwarder(faddr, chainID) + require.NoError(t, err2) + + effectiveTransmitter = faddr + } + return &ocr2Node{ + app: app, + peerID: p2pKey.PeerID().Raw(), + transmitter: transmitter, + effectiveTransmitter: effectiveTransmitter, + keybundle: kb, + } +} + +func TestIntegration_OCR2(t *testing.T) { + t.Parallel() + testIntegration_OCR2(t) +} + +func testIntegration_OCR2(t *testing.T) { + for _, test := range []struct { + name string + chainReaderAndCodec bool + }{ + {"legacy", false}, + {"chain-reader", true}, + } { + test := test + t.Run(test.name, func(t *testing.T) { + owner, b, ocrContractAddress, ocrContract := setupOCR2Contracts(t) + + lggr := logger.TestLogger(t) + bootstrapNodePort := freeport.GetOne(t) + bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, false /* useForwarders */, b, nil) + + var ( + oracles []confighelper2.OracleIdentityExtra + transmitters []common.Address + kbs []ocr2key.KeyBundle + apps []*cltest.TestApplication + ) + ports := freeport.GetN(t, 4) + for i := 0; i < 4; i++ { + node := setupNodeOCR2(t, owner, ports[i], false /* useForwarders */, b, []commontypes.BootstrapperLocator{ + // Supply the bootstrap IP and port as a V2 peer address + {PeerID: bootstrapNode.peerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}}, + }) + + kbs = append(kbs, node.keybundle) + apps = append(apps, node.app) + transmitters = append(transmitters, node.transmitter) + + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: node.keybundle.PublicKey(), + TransmitAccount: ocrtypes2.Account(node.transmitter.String()), + OffchainPublicKey: node.keybundle.OffchainPublicKey(), + PeerID: node.peerID, + }, + ConfigEncryptionPublicKey: node.keybundle.ConfigEncryptionPublicKey(), + }) + } + + tick := time.NewTicker(1 * time.Second) + defer tick.Stop() + go func() { + for range tick.C { + b.Commit() + } + }() + + blockBeforeConfig := initOCR2(t, lggr, b, ocrContract, owner, bootstrapNode, oracles, transmitters, transmitters, func(blockNum int64) string { + return fmt.Sprintf(` +type = "bootstrap" +name = "bootstrap" +relay = "evm" +schemaVersion = 1 +contractID = "%s" +[relayConfig] +chainID = 1337 +fromBlock = %d +`, ocrContractAddress, blockNum) + }) + + var jids []int32 + var servers, slowServers = make([]*httptest.Server, 4), make([]*httptest.Server, 4) + // We expect metadata of: + // latestAnswer:nil // First call + // latestAnswer:0 + // latestAnswer:10 + // latestAnswer:20 + // latestAnswer:30 + var metaLock sync.Mutex + expectedMeta := map[string]struct{}{ + "0": {}, "10": {}, "20": {}, "30": {}, + } + returnData := int(10) + for i := 0; i < 4; i++ { + s := i + require.NoError(t, apps[i].Start(testutils.Context(t))) + + // API speed is > observation timeout set in ContractSetConfigArgsForIntegrationTest + slowServers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(5 * time.Second) + var result string + metaLock.Lock() + result = fmt.Sprintf(`{"data":%d}`, returnData) + metaLock.Unlock() + res.WriteHeader(http.StatusOK) + t.Logf("Slow Bridge %d returning data:10", s) + _, err := res.Write([]byte(result)) + require.NoError(t, err) + })) + t.Cleanup(slowServers[s].Close) + servers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + var m bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal(b, &m)) + var result string + metaLock.Lock() + result = fmt.Sprintf(`{"data":%d}`, returnData) + metaLock.Unlock() + if m.Meta.LatestAnswer != nil && m.Meta.UpdatedAt != nil { + t.Logf("Bridge %d deleting %s, from request body: %s", s, m.Meta.LatestAnswer, b) + metaLock.Lock() + delete(expectedMeta, m.Meta.LatestAnswer.String()) + metaLock.Unlock() + } + res.WriteHeader(http.StatusOK) + _, err = res.Write([]byte(result)) + require.NoError(t, err) + })) + t.Cleanup(servers[s].Close) + u, _ := url.Parse(servers[i].URL) + require.NoError(t, apps[i].BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(fmt.Sprintf("bridge%d", i)), + URL: models.WebURL(*u), + })) + + var chainReaderSpec string + if test.chainReaderAndCodec { + chainReaderSpec = ` +[relayConfig.chainReader.contracts.median] +contractABI = ''' +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "requester", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "round", + "type": "uint8" + } + ], + "name": "RoundRequested", + "type": "event" + }, + { + "inputs": [], + "name": "latestTransmissionDetails", + "outputs": [ + { + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "internalType": "uint8", + "name": "round", + "type": "uint8" + }, + { + "internalType": "int192", + "name": "latestAnswer_", + "type": "int192" + }, + { + "internalType": "uint64", + "name": "latestTimestamp_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + } +] +''' + +[relayConfig.chainReader.contracts.median.configs] +LatestRoundRequested = ''' +{ + "chainSpecificName": "RoundRequested", + "readType": "event" +} +''' +LatestTransmissionDetails = ''' +{ + "chainSpecificName": "latestTransmissionDetails", + "outputModifications": [ + { + "Fields": [ + "LatestTimestamp_" + ], + "type": "epoch to time" + }, + { + "Fields": { + "LatestAnswer_": "LatestAnswer", + "LatestTimestamp_": "LatestTimestamp" + }, + "type": "rename" + } + ] +} +''' + +[relayConfig.codec.configs.MedianReport] +typeABI = ''' +[ + { + "Name": "Timestamp", + "Type": "uint32" + }, + { + "Name": "Observers", + "Type": "bytes32" + }, + { + "Name": "Observations", + "Type": "int192[]" + }, + { + "Name": "JuelsPerFeeCoin", + "Type": "int192" + } +] +''' +` + } + ocrJob, err := validate.ValidatedOracleSpecToml(apps[i].Config.OCR2(), apps[i].Config.Insecure(), fmt.Sprintf(` +type = "offchainreporting2" +relay = "evm" +schemaVersion = 1 +pluginType = "median" +name = "web oracle spec" +contractID = "%s" +ocrKeyBundleID = "%s" +transmitterID = "%s" +contractConfigConfirmations = 1 +contractConfigTrackerPollInterval = "1s" +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" + +[relayConfig] +chainID = 1337 +fromBlock = %d +%s + +[pluginConfig] +juelsPerFeeCoinSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" +`, ocrContractAddress, kbs[i].ID(), transmitters[i], fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i, blockBeforeConfig.Number().Int64(), chainReaderSpec, fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i)) + require.NoError(t, err) + err = apps[i].AddJobV2(testutils.Context(t), &ocrJob) + require.NoError(t, err) + jids = append(jids, ocrJob.ID) + } + + // Watch for OCR2AggregatorTransmitted events + start := uint64(0) + txEvents := make(chan *ocr2aggregator.OCR2AggregatorTransmitted) + _, err := ocrContract.WatchTransmitted(&bind.WatchOpts{Start: &start, Context: testutils.Context(t)}, txEvents) + require.NoError(t, err) + newTxEvents := make(chan *ocr2aggregator.OCR2AggregatorNewTransmission) + _, err = ocrContract.WatchNewTransmission(&bind.WatchOpts{Start: &start, Context: testutils.Context(t)}, newTxEvents, []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + require.NoError(t, err) + + go func() { + var newTxEvent *ocr2aggregator.OCR2AggregatorNewTransmission + select { + case txEvent := <-txEvents: + t.Logf("txEvent: %v", txEvent) + if newTxEvent != nil { + assert.Equal(t, txEvent.Epoch, uint32(newTxEvent.EpochAndRound.Uint64())) + } + case newTxEvent = <-newTxEvents: + t.Logf("newTxEvent: %v", newTxEvent) + } + }() + + for trial := 0; trial < 2; trial++ { + var retVal int + + metaLock.Lock() + returnData = 10 * (trial + 1) + retVal = returnData + for i := 0; i < 4; i++ { + expectedMeta[fmt.Sprintf("%d", returnData*i)] = struct{}{} + } + metaLock.Unlock() + + // Assert that all the OCR jobs get a run with valid values eventually. + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + ic := i + wg.Add(1) + go func() { + defer wg.Done() + completedRuns, err2 := apps[ic].JobORM().FindPipelineRunIDsByJobID(jids[ic], 0, 1000) + require.NoError(t, err2) + // Want at least 2 runs so we see all the metadata. + pr := cltest.WaitForPipelineComplete(t, ic, jids[ic], len(completedRuns)+2, 7, apps[ic].JobORM(), 2*time.Minute, 5*time.Second) + jb, err2 := pr[0].Outputs.MarshalJSON() + require.NoError(t, err2) + assert.Equal(t, []byte(fmt.Sprintf("[\"%d\"]", retVal*ic)), jb, "pr[0] %+v pr[1] %+v", pr[0], pr[1]) + require.NoError(t, err2) + }() + } + wg.Wait() + + // Trail #1: 4 oracles reporting 0, 10, 20, 30. Answer should be 20 (results[4/2]). + // Trial #2: 4 oracles reporting 0, 20, 40, 60. Answer should be 40 (results[4/2]). + gomega.NewGomegaWithT(t).Eventually(func() string { + answer, err2 := ocrContract.LatestAnswer(nil) + require.NoError(t, err2) + return answer.String() + }, 1*time.Minute, 200*time.Millisecond).Should(gomega.Equal(fmt.Sprintf("%d", 2*retVal))) + + for _, app := range apps { + jobs, _, err2 := app.JobORM().FindJobs(0, 1000) + require.NoError(t, err2) + // No spec errors + for _, j := range jobs { + ignore := 0 + for i := range j.JobSpecErrors { + // Non-fatal timing related error, ignore for testing. + if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { + ignore++ + } + } + require.Len(t, j.JobSpecErrors, ignore) + } + } + em := map[string]struct{}{} + metaLock.Lock() + maps.Copy(em, expectedMeta) + metaLock.Unlock() + assert.Len(t, em, 0, "expected metadata %v", em) + + t.Logf("======= Summary =======") + roundId, err2 := ocrContract.LatestRound(nil) + require.NoError(t, err2) + for i := 0; i <= int(roundId.Int64()); i++ { + roundData, err3 := ocrContract.GetRoundData(nil, big.NewInt(int64(i))) + require.NoError(t, err3) + t.Logf("RoundId: %d, AnsweredInRound: %d, Answer: %d, StartedAt: %v, UpdatedAt: %v", roundData.RoundId, roundData.AnsweredInRound, roundData.Answer, roundData.StartedAt, roundData.UpdatedAt) + } + + expectedAnswer := big.NewInt(2 * int64(retVal)) + + // Assert we can read the latest config digest and epoch after a report has been submitted. + contractABI, err2 := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + require.NoError(t, err2) + apps[0].GetRelayers().LegacyEVMChains().Slice() + ct, err2 := evm.NewOCRContractTransmitter(ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) + require.NoError(t, err2) + configDigest, epoch, err2 := ct.LatestConfigDigestAndEpoch(testutils.Context(t)) + require.NoError(t, err2) + details, err2 := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err2) + assert.True(t, bytes.Equal(configDigest[:], details.ConfigDigest[:])) + digestAndEpoch, err2 := ocrContract.LatestConfigDigestAndEpoch(nil) + require.NoError(t, err2) + assert.Equal(t, digestAndEpoch.Epoch, epoch) + latestTransmissionDetails, err2 := ocrContract.LatestTransmissionDetails(nil) + require.NoError(t, err2) + assert.Equal(t, expectedAnswer, latestTransmissionDetails.LatestAnswer) + require.NoError(t, err2) + newTransmissionEvents, err2 := ocrContract.FilterTransmitted(&bind.FilterOpts{Start: 0, End: nil}) + require.NoError(t, err2) + for newTransmissionEvents.Next() { + assert.Equal(t, 3, newTransmissionEvents.Event.Epoch) + } + } + }) + } +} + +func initOCR2(t *testing.T, lggr logger.Logger, b *backends.SimulatedBackend, + ocrContract *ocr2aggregator.OCR2Aggregator, + owner *bind.TransactOpts, + bootstrapNode *ocr2Node, + oracles []confighelper2.OracleIdentityExtra, + transmitters []common.Address, + payees []common.Address, + specFn func(int64) string, +) ( + blockBeforeConfig *types.Block, +) { + lggr.Debugw("Setting Payees on OraclePlugin Contract", "transmitters", payees) + _, err := ocrContract.SetPayees( + owner, + transmitters, + payees, + ) + require.NoError(t, err) + blockBeforeConfig, err = b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + signers, effectiveTransmitters, threshold, _, encodedConfigVersion, encodedConfig, err := confighelper2.ContractSetConfigArgsForEthereumIntegrationTest( + oracles, + 1, + 1000000000/100, // threshold PPB + ) + require.NoError(t, err) + + minAnswer, maxAnswer := new(big.Int), new(big.Int) + minAnswer.Exp(big.NewInt(-2), big.NewInt(191), nil) + maxAnswer.Exp(big.NewInt(2), big.NewInt(191), nil) + maxAnswer.Sub(maxAnswer, big.NewInt(1)) + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(minAnswer, maxAnswer) + require.NoError(t, err) + + lggr.Debugw("Setting Config on Oracle Contract", + "signers", signers, + "transmitters", transmitters, + "effectiveTransmitters", effectiveTransmitters, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", encodedConfigVersion, + ) + _, err = ocrContract.SetConfig( + owner, + signers, + effectiveTransmitters, + threshold, + onchainConfig, + encodedConfigVersion, + encodedConfig, + ) + require.NoError(t, err) + b.Commit() + + err = bootstrapNode.app.Start(testutils.Context(t)) + require.NoError(t, err) + + chainSet := bootstrapNode.app.GetRelayers().LegacyEVMChains() + require.NotNil(t, chainSet) + ocrJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(specFn(blockBeforeConfig.Number().Int64())) + require.NoError(t, err) + err = bootstrapNode.app.AddJobV2(testutils.Context(t), &ocrJob) + require.NoError(t, err) + return +} + +func TestIntegration_OCR2_ForwarderFlow(t *testing.T) { + t.Parallel() + owner, b, ocrContractAddress, ocrContract := setupOCR2Contracts(t) + + lggr := logger.TestLogger(t) + bootstrapNodePort := freeport.GetOne(t) + bootstrapNode := setupNodeOCR2(t, owner, bootstrapNodePort, true /* useForwarders */, b, nil) + + var ( + oracles []confighelper2.OracleIdentityExtra + transmitters []common.Address + forwarderContracts []common.Address + kbs []ocr2key.KeyBundle + apps []*cltest.TestApplication + ) + ports := freeport.GetN(t, 4) + for i := uint16(0); i < 4; i++ { + node := setupNodeOCR2(t, owner, ports[i], true /* useForwarders */, b, []commontypes.BootstrapperLocator{ + // Supply the bootstrap IP and port as a V2 peer address + {PeerID: bootstrapNode.peerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}}, + }) + + // Effective transmitter should be a forwarder not an EOA. + require.NotEqual(t, node.effectiveTransmitter, node.transmitter) + + kbs = append(kbs, node.keybundle) + apps = append(apps, node.app) + forwarderContracts = append(forwarderContracts, node.effectiveTransmitter) + transmitters = append(transmitters, node.transmitter) + + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: node.keybundle.PublicKey(), + TransmitAccount: ocrtypes2.Account(node.effectiveTransmitter.String()), + OffchainPublicKey: node.keybundle.OffchainPublicKey(), + PeerID: node.peerID, + }, + ConfigEncryptionPublicKey: node.keybundle.ConfigEncryptionPublicKey(), + }) + } + + tick := time.NewTicker(1 * time.Second) + defer tick.Stop() + go func() { + for range tick.C { + b.Commit() + } + }() + + blockBeforeConfig := initOCR2(t, lggr, b, ocrContract, owner, bootstrapNode, oracles, forwarderContracts, transmitters, func(int64) string { + return fmt.Sprintf(` +type = "bootstrap" +name = "bootstrap" +relay = "evm" +schemaVersion = 1 +forwardingAllowed = true +contractID = "%s" +[relayConfig] +chainID = 1337 +`, ocrContractAddress) + }) + + var jids []int32 + var servers, slowServers = make([]*httptest.Server, 4), make([]*httptest.Server, 4) + // We expect metadata of: + // latestAnswer:nil // First call + // latestAnswer:0 + // latestAnswer:10 + // latestAnswer:20 + // latestAnswer:30 + var metaLock sync.Mutex + expectedMeta := map[string]struct{}{ + "0": {}, "10": {}, "20": {}, "30": {}, + } + for i := 0; i < 4; i++ { + s := i + require.NoError(t, apps[i].Start(testutils.Context(t))) + + // API speed is > observation timeout set in ContractSetConfigArgsForIntegrationTest + slowServers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(5 * time.Second) + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(func() { + slowServers[s].Close() + }) + servers[i] = httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + var m bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal(b, &m)) + if m.Meta.LatestAnswer != nil && m.Meta.UpdatedAt != nil { + metaLock.Lock() + delete(expectedMeta, m.Meta.LatestAnswer.String()) + metaLock.Unlock() + } + res.WriteHeader(http.StatusOK) + _, err = res.Write([]byte(`{"data":10}`)) + require.NoError(t, err) + })) + t.Cleanup(func() { + servers[s].Close() + }) + u, _ := url.Parse(servers[i].URL) + require.NoError(t, apps[i].BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(fmt.Sprintf("bridge%d", i)), + URL: models.WebURL(*u), + })) + + ocrJob, err := validate.ValidatedOracleSpecToml(apps[i].Config.OCR2(), apps[i].Config.Insecure(), fmt.Sprintf(` +type = "offchainreporting2" +relay = "evm" +schemaVersion = 1 +pluginType = "median" +name = "web oracle spec" +forwardingAllowed = true +contractID = "%s" +ocrKeyBundleID = "%s" +transmitterID = "%s" +contractConfigConfirmations = 1 +contractConfigTrackerPollInterval = "1s" +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=%d]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=%d]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" +`, ocrContractAddress, kbs[i].ID(), transmitters[i], fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i, fmt.Sprintf("bridge%d", i), i, slowServers[i].URL, i)) + require.NoError(t, err) + err = apps[i].AddJobV2(testutils.Context(t), &ocrJob) + require.NoError(t, err) + jids = append(jids, ocrJob.ID) + } + + // Once all the jobs are added, replay to ensure we have the configSet logs. + for _, app := range apps { + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), blockBeforeConfig.Number().Int64())) + } + require.NoError(t, bootstrapNode.app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), blockBeforeConfig.Number().Int64())) + + // Assert that all the OCR jobs get a run with valid values eventually. + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + ic := i + wg.Add(1) + go func() { + defer wg.Done() + // Want at least 2 runs so we see all the metadata. + pr := cltest.WaitForPipelineComplete(t, ic, jids[ic], 2, 7, apps[ic].JobORM(), 2*time.Minute, 5*time.Second) + jb, err := pr[0].Outputs.MarshalJSON() + require.NoError(t, err) + assert.Equal(t, []byte(fmt.Sprintf("[\"%d\"]", 10*ic)), jb, "pr[0] %+v pr[1] %+v", pr[0], pr[1]) + require.NoError(t, err) + }() + } + wg.Wait() + + // 4 oracles reporting 0, 10, 20, 30. Answer should be 20 (results[4/2]). + gomega.NewGomegaWithT(t).Eventually(func() string { + answer, err := ocrContract.LatestAnswer(nil) + require.NoError(t, err) + return answer.String() + }, 1*time.Minute, 200*time.Millisecond).Should(gomega.Equal("20")) + + for _, app := range apps { + jobs, _, err := app.JobORM().FindJobs(0, 1000) + require.NoError(t, err) + // No spec errors + for _, j := range jobs { + ignore := 0 + for i := range j.JobSpecErrors { + // Non-fatal timing related error, ignore for testing. + if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { + ignore++ + } + } + require.Len(t, j.JobSpecErrors, ignore) + } + } + em := map[string]struct{}{} + metaLock.Lock() + maps.Copy(em, expectedMeta) + metaLock.Unlock() + assert.Len(t, em, 0, "expected metadata %v", em) + + // Assert we can read the latest config digest and epoch after a report has been submitted. + contractABI, err := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + require.NoError(t, err) + ct, err := evm.NewOCRContractTransmitter(ocrContractAddress, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].Client(), contractABI, nil, apps[0].GetRelayers().LegacyEVMChains().Slice()[0].LogPoller(), lggr, nil) + require.NoError(t, err) + configDigest, epoch, err := ct.LatestConfigDigestAndEpoch(testutils.Context(t)) + require.NoError(t, err) + details, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + assert.True(t, bytes.Equal(configDigest[:], details.ConfigDigest[:])) + digestAndEpoch, err := ocrContract.LatestConfigDigestAndEpoch(nil) + require.NoError(t, err) + assert.Equal(t, digestAndEpoch.Epoch, epoch) +} + +func ptr[T any](v T) *T { return &v } diff --git a/core/internal/features/singleword-spec-template.yml b/core/internal/features/singleword-spec-template.yml new file mode 100644 index 00000000..4ceef574 --- /dev/null +++ b/core/internal/features/singleword-spec-template.yml @@ -0,0 +1,27 @@ +type = "directrequest" +schemaVersion = 1 +name = "%s" +contractAddress = "%s" +externalJobID = "%s" +evmChainID = 1337 +observationSource = """ + decode_log [type=ethabidecodelog + abi="OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + decode_cbor [type=cborparse data="$(decode_log.data)"] + ds1 [type=http method=GET url="$(decode_cbor.urlUSD)" allowunrestrictednetworkaccess="true"]; + ds1_parse [type=jsonparse path="$(decode_cbor.pathUSD)"]; + ds1_multiply [type=multiply value="$(ds1_parse)" times=100]; + encode_data [type=ethabiencode abi="(uint256 value)" data=<{"value": $(ds1_multiply)}>] + encode_tx [type=ethabiencode + abi="fulfillOracleRequest(bytes32 requestId, uint256 payment, address callbackAddress, bytes4 callbackFunctionId, uint256 expiration, bytes32 data)" + data=<{"requestId": $(decode_log.requestId), + "payment": $(decode_log.payment), + "callbackAddress": $(decode_log.callbackAddr), + "callbackFunctionId": $(decode_log.callbackFunctionId), + "expiration": $(decode_log.cancelExpiration), + "data": $(encode_data)}>] + submit [type=ethtx to="%s" data="$(encode_tx)" minConfirmations="2"] + decode_log->decode_cbor->ds1 -> ds1_parse -> ds1_multiply->encode_data->encode_tx->submit; +""" diff --git a/core/internal/fixtures/apicredentials b/core/internal/fixtures/apicredentials new file mode 100644 index 00000000..87407672 --- /dev/null +++ b/core/internal/fixtures/apicredentials @@ -0,0 +1,2 @@ +apiuser@plugin.test +16charlengthp4SsW0rD1!@#_ diff --git a/core/internal/fixtures/badcookie/cookie b/core/internal/fixtures/badcookie/cookie new file mode 100644 index 00000000..e69de29b diff --git a/core/internal/fixtures/cookie b/core/internal/fixtures/cookie new file mode 100644 index 00000000..e2e92d11 --- /dev/null +++ b/core/internal/fixtures/cookie @@ -0,0 +1 @@ +clsession=MTUzMTkyMzI0NHxEdi1CQkFFQ180SUFBUkFCRUFBQVJ2LUNBQUVHYzNSeWFXNW5EQTRBREdOc2MyVnpjMmx2Ymw5cFpBWnpkSEpwYm1jTUlnQWdNR016TWpobVptWmhaVFkxTkRNd05UaGpNelF3WXpZMFlqTTJNVGM1WkRBPXzDcp8k2N2ylrXA_3AQZlqiYpQqJIZOlpnh9XkQnj2HFQ==; Path=/; Expires=Fri, 17 Aug 2018 14:14:04 GMT; Max-Age=2592000 diff --git a/core/internal/fixtures/correct_password.txt b/core/internal/fixtures/correct_password.txt new file mode 100644 index 00000000..a6f42a5b --- /dev/null +++ b/core/internal/fixtures/correct_password.txt @@ -0,0 +1 @@ +16charlengthp4SsW0rD1!@#_ diff --git a/core/internal/fixtures/incorrect_password.txt b/core/internal/fixtures/incorrect_password.txt new file mode 100644 index 00000000..6881a875 --- /dev/null +++ b/core/internal/fixtures/incorrect_password.txt @@ -0,0 +1 @@ +thisisawrongpassword diff --git a/core/internal/fixtures/keys/3cb8e3fd9d27e39a5e9e6852b0e96160061fd4ea.json b/core/internal/fixtures/keys/3cb8e3fd9d27e39a5e9e6852b0e96160061fd4ea.json new file mode 100644 index 00000000..741d2716 --- /dev/null +++ b/core/internal/fixtures/keys/3cb8e3fd9d27e39a5e9e6852b0e96160061fd4ea.json @@ -0,0 +1 @@ +{"address":"3cb8e3FD9d27e39a5e9e6852b0e96160061fd4ea","crypto":{"cipher":"aes-128-ctr","ciphertext":"7515678239ccbeeaaaf0b103f0fba46a979bf6b2a52260015f35b9eb5fed5c17","cipherparams":{"iv":"87e5a5db334305e1e4fb8b3538ceea12"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d89ac837b5dcdce5690af764762fe349d8162bb0086cea2bc3a4289c47853f96"},"mac":"57a7f4ada10d3d89644f541c91f89b5bde73e15e827ee40565e2d1f88bb0ac96"},"id":"c8cb9bc7-0a51-43bd-8348-8a67fd1ec52c","version":3} diff --git a/core/internal/fixtures/keys/7fc66c61f88A61DFB670627cA715Fe808057123e.json b/core/internal/fixtures/keys/7fc66c61f88A61DFB670627cA715Fe808057123e.json new file mode 100644 index 00000000..2ee0a0e8 --- /dev/null +++ b/core/internal/fixtures/keys/7fc66c61f88A61DFB670627cA715Fe808057123e.json @@ -0,0 +1,2 @@ +{"id": "6afac18b-29b6-47ab-9009-ca8c95c46776", "crypto": {"kdf": "scrypt", "mac": "cde427ce063d9e17d1b1d49063ef9a257cdc293f83af61f50369bc32f6d0ac82", "cipher": "aes-128-ctr", "kdfparams": {"n": 262144, "p": 1, "r": 8, "salt": "4efba5a3dfbb826f4dcaf1665224d4a63fdd14f056ad086984523f38f3137c17", "dklen": 32}, "ciphertext": "a72c7e9777778d2fcf373bb9876966894cb4868bdbfffd6ff93d8197d6ce1dff", "cipherparams": {"iv": "1235e4b205e69b9dc7505ac199d03c58"}}, "address": "7fc66c61f88a61dfb670627ca715fe808057123e", "version": 3} + diff --git a/core/internal/fixtures/keys/testkey-0x69Ca211a68100E18B40683E96b55cD217AC95006.json b/core/internal/fixtures/keys/testkey-0x69Ca211a68100E18B40683E96b55cD217AC95006.json new file mode 100644 index 00000000..6259dba9 --- /dev/null +++ b/core/internal/fixtures/keys/testkey-0x69Ca211a68100E18B40683E96b55cD217AC95006.json @@ -0,0 +1 @@ +{"address":"69ca211a68100e18b40683e96b55cd217ac95006","crypto":{"cipher":"aes-128-ctr","ciphertext":"f2c49f0673ed93cddb5940abe1b8e8ed64cf1d1e7f61cdc0106a8d0575442e61","cipherparams":{"iv":"c21faabfdc6c6991713d36aeb404751a"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d1eeaf0c5b039da9ee3bdc15da408694dd8ee53be96ae73867228f3f5daf149a"},"mac":"6a9980f114a04e22ef02a442723c40f9c5d117f68fc2995a3ac4860d7f0862d2"},"id":"879d7c0a-ddf9-4644-aeab-7f626ec37030","version":3} \ No newline at end of file diff --git a/core/internal/fixtures/keys/testkey-0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4.json b/core/internal/fixtures/keys/testkey-0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4.json new file mode 100644 index 00000000..506e1597 --- /dev/null +++ b/core/internal/fixtures/keys/testkey-0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4.json @@ -0,0 +1 @@ +{"address":"f67d0290337bca0847005c7ffd1bc75ba9aae6e4","crypto":{"cipher":"aes-128-ctr","ciphertext":"57b56a26f50c1fffb2380d18e64a85c74c48ea60fcf085e33360aa293a8118b2","cipherparams":{"iv":"2b2269c3d4dad1e30af5fdb078e904bf"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":2,"r":8,"salt":"90c23401c52f1b57a1a37d56438598a1781f5e176c6b73b8e7c17063987069d3"},"mac":"28fdff9c725a74b6bebdad969003929564457a10c7c0a31c672378c9d174fe87"},"id":"62ba4104-0f3e-4cef-8a0e-84f62912f85d","version":3} \ No newline at end of file diff --git a/core/internal/fixtures/new_password.txt b/core/internal/fixtures/new_password.txt new file mode 100644 index 00000000..b781c882 --- /dev/null +++ b/core/internal/fixtures/new_password.txt @@ -0,0 +1 @@ +new_p@55word!!! diff --git a/core/internal/fixtures/wasm/checketh.wat b/core/internal/fixtures/wasm/checketh.wat new file mode 100644 index 00000000..6fce589d --- /dev/null +++ b/core/internal/fixtures/wasm/checketh.wat @@ -0,0 +1,6 @@ +(module + (func $perform (param $value i64) (result i32) + (i64.lt_s (i64.const 450) (get_local $value)) + ) + (export "perform" (func $perform)) +) diff --git a/core/internal/fixtures/wasm/checkethf.wat b/core/internal/fixtures/wasm/checkethf.wat new file mode 100644 index 00000000..0b5b819b --- /dev/null +++ b/core/internal/fixtures/wasm/checkethf.wat @@ -0,0 +1,6 @@ +(module + (func $perform (param $value f64) (result i32) + (f64.lt (f64.const 450.0) (get_local $value)) + ) + (export "perform" (func $perform)) +) diff --git a/core/internal/fixtures/wasm/helloworld.wat b/core/internal/fixtures/wasm/helloworld.wat new file mode 100644 index 00000000..9eab847b --- /dev/null +++ b/core/internal/fixtures/wasm/helloworld.wat @@ -0,0 +1,10 @@ +(module + ;; Allocate a page of linear memory (64kb). Export it as "memory" + (memory (export "memory") 1) + + ;; Write the string at the start of the linear memory. + (data (i32.const 0) "Hello, world!") ;; write string at location 0 + + ;; Export the position and length of the string. + (global (export "length") i32 (i32.const 12)) + (global (export "position") i32 (i32.const 0))) diff --git a/core/internal/gethwrappers2/compile.sh b/core/internal/gethwrappers2/compile.sh new file mode 100644 index 00000000..03619da3 --- /dev/null +++ b/core/internal/gethwrappers2/compile.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -ex + +optimize_runs="$1" +solpath="$2" +solcoptions=("--optimize" "--optimize-runs" "$optimize_runs" "--metadata-hash" "none") + +basefilename="$(basename "$solpath" .sol)" +pkgname="$(echo $basefilename | tr '[:upper:]' '[:lower:]')" + +here="$(dirname $0)" +pkgdir="${here}/${pkgname}" +mkdir -p "$pkgdir" +outpath="${pkgdir}/${pkgname}.go" +abi="${pkgdir}/${basefilename}.abi" +bin="${pkgdir}/${basefilename}.bin" + +solc-select use 0.7.6 +solc --version | grep 0.7.6 || ( echo "You need solc version 0.7.6" && exit 1 ) + +# FIXME: solc seems to find and compile every .sol file in this path, so invoking this once for every file produces n*3 artifacts +solc "$solpath" ${solcoptions[@]} --abi --bin --combined-json bin,bin-runtime,srcmap-runtime --overwrite -o "$(dirname $outpath)" + +go run wrap.go "$abi" "$bin" "$basefilename" "$pkgname" diff --git a/core/internal/gethwrappers2/generated/offchainaggregator/offchainaggregator.go b/core/internal/gethwrappers2/generated/offchainaggregator/offchainaggregator.go new file mode 100644 index 00000000..9cbb040b --- /dev/null +++ b/core/internal/gethwrappers2/generated/offchainaggregator/offchainaggregator.go @@ -0,0 +1,3286 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package offchainaggregator + +import ( + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +const OffchainAggregatorABI = "[{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerTransmission\",\"type\":\"uint32\"},{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"_link\",\"type\":\"address\"},{\"internalType\":\"int192\",\"name\":\"_minAnswer\",\"type\":\"int192\"},{\"internalType\":\"int192\",\"name\":\"_maxAnswer\",\"type\":\"int192\"},{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_billingAccessController\",\"type\":\"address\"},{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_requesterAccessController\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"_decimals\",\"type\":\"uint8\"},{\"internalType\":\"string\",\"name\":\"_description\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"BillingAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"BillingSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"threshold\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"onchainConfig\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"encodedConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encoded\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"contractLinkTokenInterface\",\"name\":\"_oldLinkToken\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"contractLinkTokenInterface\",\"name\":\"_newLinkToken\",\"type\":\"address\"}],\"name\":\"LinkTokenSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"aggregatorRoundId\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"answer\",\"type\":\"int192\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"int192[]\",\"name\":\"observations\",\"type\":\"int192[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"observers\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint40\",\"name\":\"epochAndRound\",\"type\":\"uint40\"}],\"name\":\"NewTransmission\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"contractLinkTokenInterface\",\"name\":\"linkToken\",\"type\":\"address\"}],\"name\":\"OraclePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"contractAccessControllerInterface\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"RequesterAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"}],\"name\":\"RoundRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"previousValidator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousGasLimit\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"currentValidator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"currentGasLimit\",\"type\":\"uint32\"}],\"name\":\"ValidatorConfigSet\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"acceptPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"billingAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBilling\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLinkToken\",\"outputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"linkToken\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"_roundId\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_roundId\",\"type\":\"uint256\"}],\"name\":\"getTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestAnswer\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestConfigDetails\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"configCount\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blockNumber\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRound\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestTransmissionDetails\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"configDigest\",\"type\":\"bytes32\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"},{\"internalType\":\"int192\",\"name\":\"latestAnswer\",\"type\":\"int192\"},{\"internalType\":\"uint64\",\"name\":\"latestTimestamp\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"linkAvailableForPayment\",\"outputs\":[{\"internalType\":\"int256\",\"name\":\"availableBalance\",\"type\":\"int256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxAnswer\",\"outputs\":[{\"internalType\":\"int192\",\"name\":\"\",\"type\":\"int192\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minAnswer\",\"outputs\":[{\"internalType\":\"int192\",\"name\":\"\",\"type\":\"int192\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_signerOrTransmitter\",\"type\":\"address\"}],\"name\":\"oracleObservationCount\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"owedPayment\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"addresspayable\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestNewRound\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requesterAccessController\",\"outputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"setBilling\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_billingAccessController\",\"type\":\"address\"}],\"name\":\"setBillingAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"_threshold\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"_onchainConfig\",\"type\":\"bytes\"},{\"internalType\":\"uint64\",\"name\":\"_offchainConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"_offchainConfig\",\"type\":\"bytes\"}],\"name\":\"setConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"_linkToken\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"}],\"name\":\"setLinkToken\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"_transmitters\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"_payees\",\"type\":\"address[]\"}],\"name\":\"setPayees\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAccessControllerInterface\",\"name\":\"_requesterAccessController\",\"type\":\"address\"}],\"name\":\"setRequesterAccessController\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"_newValidator\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"_newGasLimit\",\"type\":\"uint32\"}],\"name\":\"setValidatorConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_proposed\",\"type\":\"address\"}],\"name\":\"transferPayeeship\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32[3]\",\"name\":\"reportContext\",\"type\":\"bytes32[3]\"},{\"internalType\":\"bytes\",\"name\":\"report\",\"type\":\"bytes\"},{\"internalType\":\"bytes32[]\",\"name\":\"rs\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"ss\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32\",\"name\":\"rawVs\",\"type\":\"bytes32\"}],\"name\":\"transmit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"transmitters\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"validatorConfig\",\"outputs\":[{\"internalType\":\"contractAggregatorValidatorInterface\",\"name\":\"validator\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"gasLimit\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawFunds\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_transmitter\",\"type\":\"address\"}],\"name\":\"withdrawPayment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +var OffchainAggregatorBin = "0x6101006040523480156200001257600080fd5b5060405162005add38038062005add83398181016040526101808110156200003957600080fd5b815160208301516040808501516060860151608087015160a088015160c089015160e08a01516101008b01516101208c01516101408d01516101608e0180519a519c9e9b9d999c989b979a969995989497939692959194939182019284640100000000821115620000a957600080fd5b908301906020820185811115620000bf57600080fd5b8251640100000000811182820188101715620000da57600080fd5b82525081516020918201929091019080838360005b8381101562000109578181015183820152602001620000ef565b50505050905090810190601f168015620001375780820380516001836020036101000a031916815260200191505b506040525050600080546001600160a01b03191633178155608052508b8b8b8b8b8b89620001698787878787620002ce565b600980546001600160a01b0319166001600160a01b0384169081179091556040516000907f4966a50c93f855342ccf6c5c0d358b85b91335b2acedc7da0932f691f351711a908290a3620001bd81620003c0565b620001c762000657565b620001d162000657565b60005b601f8160ff16101562000221576001838260ff16601f8110620001f357fe5b61ffff909216602092909202015260018260ff8316601f81106200021357fe5b6020020152600101620001d4565b5062000231600b83601f62000676565b5062000241600f82601f62000713565b505050505060f887901b7fff000000000000000000000000000000000000000000000000000000000000001660e052505083516200028a93506032925060208501915062000744565b50620002968362000439565b620002a360008062000511565b50505050601791820b820b604090811b60a05290820b90910b901b60c05250620007dd945050505050565b6040805160a0808201835263ffffffff88811680845288821660208086018290528984168688018190528985166060808901829052958a1660809889018190526008805463ffffffff1916871763ffffffff60201b191664010000000087021763ffffffff60401b19166801000000000000000085021763ffffffff60601b19166c0100000000000000000000000084021763ffffffff60801b1916600160801b830217905589519586529285019390935283880152928201529283015291517fd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b6929181900390910190a15050505050565b600a546001600160a01b0390811690821681146200043557600a80546001600160a01b0319166001600160a01b03848116918217909255604080519284168352602083019190915280517f793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d489129281900390910190a15b5050565b6000546001600160a01b0316331462000499576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6031546001600160a01b0390811690821681146200043557603180546001600160a01b0319166001600160a01b03848116918217909255604080519284168352602083019190915280517f27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae6349281900390910190a15050565b6000546001600160a01b0316331462000571576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b604080518082019091526030546001600160a01b03808216808452600160a01b90920463ffffffff1660208401528416141580620005bf57508163ffffffff16816020015163ffffffff1614155b1562000652576040805180820182526001600160a01b0385811680835263ffffffff8681166020948501819052603080546001600160a01b031916841763ffffffff60a01b1916600160a01b8302179055865187860151875193168352948201528451919493909216927fb04e3a37abe9c0fcdfebdeae019a8e2b12ddf53f5d55ffb0caccc1bedaca1541928290030190a35b505050565b604051806103e00160405280601f906020820280368337509192915050565b600283019183908215620007015791602002820160005b83821115620006cf57835183826101000a81548161ffff021916908361ffff16021790555092602001926002016020816001010492830192600103026200068d565b8015620006ff5782816101000a81549061ffff0219169055600201602081600101049283019260010302620006cf565b505b506200070f929150620007c6565b5090565b82601f810192821562000701579160200282015b828111156200070157825182559160200191906001019062000727565b828054600181600116156101000203166002900490600052602060002090601f0160209004810192826200077c576000855562000701565b82601f106200079757805160ff191683800117855562000701565b828001600101855582156200070157918201828111156200070157825182559160200191906001019062000727565b5b808211156200070f5760008155600101620007c7565b60805160f81c60a05160401c60c05160401c60e05160f81c6152b46200082960003980610ea25250806111ef528061416d525080610e015280614140525080611d6c52506152b46000f3fe608060405234801561001057600080fd5b50600436106102c85760003560e01c8063996e82981161017b578063d09dc339116100d8578063eb4571631161008c578063f2fde38b11610071578063f2fde38b14610c29578063fbffd2c114610c4f578063feaf968c14610c75576102c8565b8063eb45716314610bc9578063eb5dcd6c14610bfb576102c8565b8063e4902f82116100bd578063e4902f8214610b3d578063e5fe457714610b7a578063e76d516814610bc1576102c8565b8063d09dc339146108e3578063e3d0e712146108eb576102c8565b8063b1dc65a41161012f578063b633620c11610114578063b633620c14610855578063bd82470614610872578063c1075329146108b7576102c8565b8063b1dc65a414610721578063b5ab58dc14610838576102c8565b80639c849b30116101605780639c849b30146106135780639e3ceeab146106d5578063b121e147146106fb576102c8565b8063996e8298146105985780639a6fc8f5146105a0576102c8565b806370efdf2d116102295780638205bf6a116101dd5780638da5cb5b116101c25780638da5cb5b146105395780638e0566de1461054157806398e5b12a14610571576102c8565b80638205bf6a1461050b5780638ac28d5a14610513576102c8565b806379ba50971161020e57806379ba50971461047c578063814118341461048457806381ff7048146104dc576102c8565b806370efdf2d146104505780637284e41614610474576102c8565b80634fb174701161028057806354fd4d501161026557806354fd4d5014610438578063668a0f021461044057806370da2f6714610448576102c8565b80634fb174701461040057806350d25bcd14610430576102c8565b806322adbc78116102b157806322adbc781461038257806329937268146103a1578063313ce567146103e2576102c8565b80630eafb25b146102cd578063181f5a7714610305575b600080fd5b6102f3600480360360208110156102e357600080fd5b50356001600160a01b0316610c7d565b60408051918252519081900360200190f35b61030d610dc8565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561034757818101518382015260200161032f565b50505050905090810190601f1680156103745780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61038a610dff565b6040805160179290920b8252519081900360200190f35b6103a9610e23565b6040805163ffffffff96871681529486166020860152928516848401529084166060840152909216608082015290519081900360a00190f35b6103ea610ea0565b6040805160ff9092168252519081900360200190f35b61042e6004803603604081101561041657600080fd5b506001600160a01b0381358116916020013516610ec4565b005b6102f36111a8565b6102f36111d3565b6102f36111d8565b61038a6111ed565b610458611211565b604080516001600160a01b039092168252519081900360200190f35b61030d611220565b61042e6112d4565b61048c6113a2565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156104c85781810151838201526020016104b0565b505050509050019250505060405180910390f35b6104e4611403565b6040805163ffffffff94851681529290931660208301528183015290519081900360600190f35b6102f361141f565b61042e6004803603602081101561052957600080fd5b50356001600160a01b0316611469565b6104586114e3565b6105496114f2565b604080516001600160a01b03909316835263ffffffff90911660208301528051918290030190f35b610579611536565b6040805169ffffffffffffffffffff9092168252519081900360200190f35b610458611711565b6105c9600480360360208110156105b657600080fd5b503569ffffffffffffffffffff16611720565b604051808669ffffffffffffffffffff1681526020018581526020018481526020018381526020018269ffffffffffffffffffff1681526020019550505050505060405180910390f35b61042e6004803603604081101561062957600080fd5b81019060208101813564010000000081111561064457600080fd5b82018360208201111561065657600080fd5b8035906020019184602083028401116401000000008311171561067857600080fd5b91939092909160208101903564010000000081111561069657600080fd5b8201836020820111156106a857600080fd5b803590602001918460208302840111640100000000831117156106ca57600080fd5b50909250905061186b565b61042e600480360360208110156106eb57600080fd5b50356001600160a01b0316611aa4565b61042e6004803603602081101561071157600080fd5b50356001600160a01b0316611b92565b61042e600480360360e081101561073757600080fd5b81018160808101606082013564010000000081111561075557600080fd5b82018360208201111561076757600080fd5b8035906020019184600183028401116401000000008311171561078957600080fd5b9193909290916020810190356401000000008111156107a757600080fd5b8201836020820111156107b957600080fd5b803590602001918460208302840111640100000000831117156107db57600080fd5b9193909290916020810190356401000000008111156107f957600080fd5b82018360208201111561080b57600080fd5b8035906020019184602083028401116401000000008311171561082d57600080fd5b919350915035611c8b565b6102f36004803603602081101561084e57600080fd5b50356121f1565b6102f36004803603602081101561086b57600080fd5b5035612227565b61042e600480360360a081101561088857600080fd5b5063ffffffff81358116916020810135821691604082013581169160608101358216916080909101351661227c565b61042e600480360360408110156108cd57600080fd5b506001600160a01b0381351690602001356123e2565b6102f361270a565b61042e600480360360c081101561090157600080fd5b81019060208101813564010000000081111561091c57600080fd5b82018360208201111561092e57600080fd5b8035906020019184602083028401116401000000008311171561095057600080fd5b91908080602002602001604051908101604052809392919081815260200183836020028082843760009201919091525092959493602081019350359150506401000000008111156109a057600080fd5b8201836020820111156109b257600080fd5b803590602001918460208302840111640100000000831117156109d457600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929560ff853516959094909350604081019250602001359050640100000000811115610a2f57600080fd5b820183602082011115610a4157600080fd5b80359060200191846001830284011164010000000083111715610a6357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929567ffffffffffffffff853516959094909350604081019250602001359050640100000000811115610ac857600080fd5b820183602082011115610ada57600080fd5b80359060200191846001830284011164010000000083111715610afc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295506127b4945050505050565b610b6360048036036020811015610b5357600080fd5b50356001600160a01b03166131f8565b6040805161ffff9092168252519081900360200190f35b610b826132a5565b6040805195865263ffffffff909416602086015260ff9092168484015260170b606084015267ffffffffffffffff166080830152519081900360a00190f35b61045861336e565b61042e60048036036040811015610bdf57600080fd5b5080356001600160a01b0316906020013563ffffffff1661337d565b61042e60048036036040811015610c1157600080fd5b506001600160a01b0381358116916020013516613512565b61042e60048036036020811015610c3f57600080fd5b50356001600160a01b031661366d565b61042e60048036036020811015610c6557600080fd5b50356001600160a01b0316613735565b6105c961379d565b6001600160a01b03811660009081526005602090815260408083208151808301909252805460ff808216845285948401916101009004166002811115610cbf57fe5b6002811115610cca57fe5b9052509050600081602001516002811115610ce157fe5b1415610cf1576000915050610dc3565b6040805160a08101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c010000000000000000000000008104831660608301819052700100000000000000000000000000000000909104909216608082015282519091600091600190600b9060ff16601f8110610d7c57fe5b601091828204019190066002029054906101000a900461ffff160361ffff1602633b9aca000290506001600f846000015160ff16601f8110610dba57fe5b01540301925050505b919050565b60408051808201909152601881527f4f6666636861696e41676772656761746f7220332e302e300000000000000000602082015290565b7f000000000000000000000000000000000000000000000000000000000000000081565b6040805160a08101825260085463ffffffff808216808452640100000000830482166020850181905268010000000000000000840483169585018690526c01000000000000000000000000840483166060860181905270010000000000000000000000000000000090940490921660809094018490529490939290565b7f000000000000000000000000000000000000000000000000000000000000000081565b6000546001600160a01b03163314610f23576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6009546001600160a01b03908116908316811415610f4157506111a4565b604080517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015290516001600160a01b038516916370a08231916024808301926020929190829003018186803b158015610fa057600080fd5b505afa158015610fb4573d6000803e3d6000fd5b505050506040513d6020811015610fca57600080fd5b50610fd59050613810565b6000816001600160a01b03166370a08231306040518263ffffffff1660e01b815260040180826001600160a01b0316815260200191505060206040518083038186803b15801561102457600080fd5b505afa158015611038573d6000803e3d6000fd5b505050506040513d602081101561104e57600080fd5b5051604080517fa9059cbb0000000000000000000000000000000000000000000000000000000081526001600160a01b0386811660048301526024820184905291519293509084169163a9059cbb916044808201926020929091908290030181600087803b1580156110bf57600080fd5b505af11580156110d3573d6000803e3d6000fd5b505050506040513d60208110156110e957600080fd5b505161113c576040805162461bcd60e51b815260206004820152601f60248201527f7472616e736665722072656d61696e696e672066756e6473206661696c656400604482015290519081900360640190fd5b600980547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0386811691821790925560405190918416907f4966a50c93f855342ccf6c5c0d358b85b91335b2acedc7da0932f691f351711a90600090a350505b5050565b602e5465010000000000900463ffffffff166000908152602f6020526040902054601790810b900b90565b600481565b602e5465010000000000900463ffffffff1690565b7f000000000000000000000000000000000000000000000000000000000000000081565b6031546001600160a01b031690565b60328054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152606093909290918301828280156112ca5780601f1061129f576101008083540402835291602001916112ca565b820191906000526020600020905b8154815290600101906020018083116112ad57829003601f168201915b5050505050905090565b6001546001600160a01b03163314611333576040805162461bcd60e51b815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e657200000000000000000000604482015290519081900360640190fd5b60008054337fffffffffffffffffffffffff0000000000000000000000000000000000000000808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b606060078054806020026020016040519081016040528092919081815260200182805480156112ca57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116113dc575050505050905090565b60045460025463ffffffff808316926401000000009004169192565b602e5465010000000000900463ffffffff166000908152602f60205260409020547801000000000000000000000000000000000000000000000000900467ffffffffffffffff1690565b6001600160a01b038181166000908152600d60205260409020541633146114d7576040805162461bcd60e51b815260206004820152601760248201527f4f6e6c792070617965652063616e207769746864726177000000000000000000604482015290519081900360640190fd5b6114e081613bc0565b50565b6000546001600160a01b031681565b604080518082019091526030546001600160a01b0381168083527401000000000000000000000000000000000000000090910463ffffffff16602090920182905291565b600080546001600160a01b03163314806116305750603154604080517f6b14daf800000000000000000000000000000000000000000000000000000000815233600482018181526024830193845236604484018190526001600160a01b0390951694636b14daf894929360009391929190606401848480828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016909201965060209550909350505081840390508186803b15801561160357600080fd5b505afa158015611617573d6000803e3d6000fd5b505050506040513d602081101561162d57600080fd5b50515b611681576040805162461bcd60e51b815260206004820152601d60248201527f4f6e6c79206f776e6572267265717565737465722063616e2063616c6c000000604482015290519081900360640190fd5b604080518082018252602e5464ffffffffff8116825263ffffffff65010000000000820481166020808501919091526002548551908152600884901c9092169082015260ff909116818401529151909133917f41e3990591fd372502daa15842da15bc7f41c75309ab3ff4f56f1848c178825c9181900360600190a2806020015160010163ffffffff1691505090565b600a546001600160a01b031690565b600080600080600063ffffffff8669ffffffffffffffffffff1611156040518060400160405280600f81526020017f4e6f20646174612070726573656e740000000000000000000000000000000000815250906117fb5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156117c05781810151838201526020016117a8565b50505050905090810190601f1680156117ed5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5050505063ffffffff83166000908152602f6020908152604091829020825180840190935254601781810b810b810b808552780100000000000000000000000000000000000000000000000090920467ffffffffffffffff1693909201839052949594900b939092508291508490565b6000546001600160a01b031633146118ca576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b82811461191e576040805162461bcd60e51b815260206004820181905260248201527f7472616e736d6974746572732e73697a6520213d207061796565732e73697a65604482015290519081900360640190fd5b60005b83811015611a9d57600085858381811061193757fe5b905060200201356001600160a01b03169050600084848481811061195757fe5b6001600160a01b038581166000908152600d602090815260409091205492029390930135831693509091169050801580806119a35750826001600160a01b0316826001600160a01b0316145b6119f4576040805162461bcd60e51b815260206004820152601160248201527f706179656520616c726561647920736574000000000000000000000000000000604482015290519081900360640190fd5b6001600160a01b038481166000908152600d6020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001685831690811790915590831614611a8d57826001600160a01b0316826001600160a01b0316856001600160a01b03167f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b360405160405180910390a45b5050600190920191506119219050565b5050505050565b6000546001600160a01b03163314611b03576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6031546001600160a01b0390811690821681146111a457603180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b03848116918217909255604080519284168352602083019190915280517f27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae6349281900390910190a15050565b6001600160a01b038181166000908152600e6020526040902054163314611c00576040805162461bcd60e51b815260206004820152601f60248201527f6f6e6c792070726f706f736564207061796565732063616e2061636365707400604482015290519081900360640190fd5b6001600160a01b038181166000818152600d602090815260408083208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217909355600e909452828520805490921690915590519416939092849290917f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b39190a45050565b60005a604080516020601f8b018190048102820181019092528981529192508a3591818c013591611cdb9184918491908e908e9081908401838280828437600092019190915250613dcc92505050565b6040805160608101825260025480825260035460ff80821660208501526101009091041692820192909252908314611d5a576040805162461bcd60e51b815260206004820152601560248201527f636f6e666967446967657374206d69736d617463680000000000000000000000604482015290519081900360640190fd5b611d688b8b8b8b8b8b614505565b60007f000000000000000000000000000000000000000000000000000000000000000015611db5576002826020015183604001510160ff1681611da757fe5b0460010160ff169050611dc3565b816020015160010160ff1690505b888114611e17576040805162461bcd60e51b815260206004820152601a60248201527f77726f6e67206e756d626572206f66207369676e617475726573000000000000604482015290519081900360640190fd5b888714611e6b576040805162461bcd60e51b815260206004820152601e60248201527f7369676e617475726573206f7574206f6620726567697374726174696f6e0000604482015290519081900360640190fd5b3360009081526005602090815260408083208151808301909252805460ff80821684529293919291840191610100909104166002811115611ea857fe5b6002811115611eb357fe5b9052509050600281602001516002811115611eca57fe5b148015611efe57506007816000015160ff1681548110611ee657fe5b6000918252602090912001546001600160a01b031633145b611f4f576040805162461bcd60e51b815260206004820152601860248201527f756e617574686f72697a6564207472616e736d69747465720000000000000000604482015290519081900360640190fd5b50505050506000888860405180838380828437808301925050509250505060405180910390208a60405160200180838152602001826003602002808284378083019250505092505050604051602081830303815290604052805190602001209050611fb8615174565b611fc0615193565b60005b888110156121cb576000600185888460208110611fdc57fe5b1a601b018d8d86818110611fec57fe5b905060200201358c8c87818110611fff57fe5b9050602002013560405160008152602001604052604051808581526020018460ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561205a573d6000803e3d6000fd5b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08101516001600160a01b03811660009081526005602090815290849020838501909452835460ff808216855292965092945084019161010090041660028111156120c757fe5b60028111156120d257fe5b90525092506001836020015160028111156120e957fe5b1461213b576040805162461bcd60e51b815260206004820152601e60248201527f61646472657373206e6f7420617574686f72697a656420746f207369676e0000604482015290519081900360640190fd5b8251849060ff16601f811061214c57fe5b6020020151156121a3576040805162461bcd60e51b815260206004820152601460248201527f6e6f6e2d756e69717565207369676e6174757265000000000000000000000000604482015290519081900360640190fd5b600184846000015160ff16601f81106121b857fe5b9115156020909202015250600101611fc3565b5050505063ffffffff81106121dc57fe5b6121e68133614571565b505050505050505050565b600063ffffffff82111561220757506000610dc3565b5063ffffffff166000908152602f6020526040902054601790810b900b90565b600063ffffffff82111561223d57506000610dc3565b5063ffffffff166000908152602f60205260409020547801000000000000000000000000000000000000000000000000900467ffffffffffffffff1690565b600a546000546001600160a01b0391821691163314806123745750604080517f6b14daf800000000000000000000000000000000000000000000000000000000815233600482018181526024830193845236604484018190526001600160a01b03861694636b14daf8946000939190606401848480828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016909201965060209550909350505081840390508186803b15801561234757600080fd5b505afa15801561235b573d6000803e3d6000fd5b505050506040513d602081101561237157600080fd5b50515b6123c5576040805162461bcd60e51b815260206004820181905260248201527f4f6e6c79206f776e65722662696c6c696e6741646d696e2063616e2063616c6c604482015290519081900360640190fd5b6123cd613810565b6123da8686868686614704565b505050505050565b6000546001600160a01b03163314806124db5750600a54604080517f6b14daf800000000000000000000000000000000000000000000000000000000815233600482018181526024830193845236604484018190526001600160a01b0390951694636b14daf894929360009391929190606401848480828437600083820152604051601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016909201965060209550909350505081840390508186803b1580156124ae57600080fd5b505afa1580156124c2573d6000803e3d6000fd5b505050506040513d60208110156124d857600080fd5b50515b61252c576040805162461bcd60e51b815260206004820181905260248201527f4f6e6c79206f776e65722662696c6c696e6741646d696e2063616e2063616c6c604482015290519081900360640190fd5b600061253661487e565b600954604080517f70a0823100000000000000000000000000000000000000000000000000000000815230600482015290519293506000926001600160a01b03909216916370a0823191602480820192602092909190829003018186803b1580156125a057600080fd5b505afa1580156125b4573d6000803e3d6000fd5b505050506040513d60208110156125ca57600080fd5b5051905081811015612623576040805162461bcd60e51b815260206004820152601460248201527f696e73756666696369656e742062616c616e6365000000000000000000000000604482015290519081900360640190fd5b6009546001600160a01b031663a9059cbb8561264185850387614a4e565b6040518363ffffffff1660e01b815260040180836001600160a01b0316815260200182815260200192505050602060405180830381600087803b15801561268757600080fd5b505af115801561269b573d6000803e3d6000fd5b505050506040513d60208110156126b157600080fd5b5051612704576040805162461bcd60e51b815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b50505050565b600954604080517f70a08231000000000000000000000000000000000000000000000000000000008152306004820152905160009283926001600160a01b03909116916370a0823191602480820192602092909190829003018186803b15801561277357600080fd5b505afa158015612787573d6000803e3d6000fd5b505050506040513d602081101561279d57600080fd5b5051905060006127ab61487e565b90910391505090565b855185518560ff16601f831115612812576040805162461bcd60e51b815260206004820152601060248201527f746f6f206d616e79207369676e65727300000000000000000000000000000000604482015290519081900360640190fd5b60008111612867576040805162461bcd60e51b815260206004820152601a60248201527f7468726573686f6c64206d75737420626520706f736974697665000000000000604482015290519081900360640190fd5b8183146128a55760405162461bcd60e51b81526004018080602001828103825260248152602001806152846024913960400191505060405180910390fd5b8060030283116128fc576040805162461bcd60e51b815260206004820181905260248201527f6661756c74792d6f7261636c65207468726573686f6c6420746f6f2068696768604482015290519081900360640190fd5b6000546001600160a01b0316331461295b576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6040805160c0810182528a8152602081018a905260ff89169181018290526060810188905267ffffffffffffffff8716608082015260a08101869052906129a29088614a68565b60065415612b3757600680547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff810191600091839081106129df57fe5b6000918252602082200154600780546001600160a01b0390921693509084908110612a0657fe5b60009182526020808320909101546001600160a01b0385811684526005909252604080842080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000090811690915592909116808452922080549091169055600680549192509080612a7357fe5b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190556007805480612ad657fe5b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055019055506129a2915050565b60005b815151811015612ede5760006005600084600001518481518110612b5a57fe5b6020908102919091018101516001600160a01b0316825281019190915260400160002054610100900460ff166002811115612b9157fe5b14612be3576040805162461bcd60e51b815260206004820152601760248201527f7265706561746564207369676e65722061646472657373000000000000000000604482015290519081900360640190fd5b6040805180820190915260ff82168152600160208201528251805160059160009185908110612c0e57fe5b6020908102919091018101516001600160a01b0316825281810192909252604001600020825181547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff9091161780825591830151909182907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100836002811115612c9a57fe5b021790555060009150612caa9050565b6005600084602001518481518110612cbe57fe5b6020908102919091018101516001600160a01b0316825281019190915260400160002054610100900460ff166002811115612cf557fe5b14612d47576040805162461bcd60e51b815260206004820152601c60248201527f7265706561746564207472616e736d6974746572206164647265737300000000604482015290519081900360640190fd5b6040805180820190915260ff821681526020810160028152506005600084602001518481518110612d7457fe5b6020908102919091018101516001600160a01b0316825281810192909252604001600020825181547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff9091161780825591830151909182907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100836002811115612e0057fe5b021790555050825180516006925083908110612e1857fe5b602090810291909101810151825460018101845560009384529282902090920180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b039093169290921790915582015180516007919083908110612e8157fe5b60209081029190910181015182546001808201855560009485529290932090920180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b039093169290921790915501612b3a565b5060408101516003805460ff83167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00909116179055600480544363ffffffff9081166401000000009081027fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff84161780831660010183167fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000009091161793849055855160208701516060880151608089015160a08a0151949096048516974697612fb09789973097921695949391614a9c565b60026000018190555050816000015151600260010160016101000a81548160ff021916908360ff1602179055507f1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e0581600260000154600460009054906101000a900463ffffffff16856000015186602001518760400151886060015189608001518a60a00151604051808a63ffffffff1681526020018981526020018863ffffffff16815260200180602001806020018760ff168152602001806020018667ffffffffffffffff1681526020018060200185810385528b818151815260200191508051906020019060200280838360005b838110156130b95781810151838201526020016130a1565b5050505090500185810384528a818151815260200191508051906020019060200280838360005b838110156130f85781810151838201526020016130e0565b50505050905001858103835288818151815260200191508051906020019080838360005b8381101561313457818101518382015260200161311c565b50505050905090810190601f1680156131615780820380516001836020036101000a031916815260200191505b50858103825286518152865160209182019188019080838360005b8381101561319457818101518382015260200161317c565b50505050905090810190601f1680156131c15780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a16131eb826040015183606001516111a4565b5050505050505050505050565b6001600160a01b03811660009081526005602090815260408083208151808301909252805460ff80821684528594840191610100900416600281111561323a57fe5b600281111561324557fe5b905250905060008160200151600281111561325c57fe5b141561326c576000915050610dc3565b6001600b826000015160ff16601f811061328257fe5b601091828204019190066002029054906101000a900461ffff1603915050919050565b6000808080803332146132ff576040805162461bcd60e51b815260206004820152601460248201527f4f6e6c792063616c6c61626c6520627920454f41000000000000000000000000604482015290519081900360640190fd5b5050600254602e5463ffffffff65010000000000820481166000908152602f60205260409020549296600883901c909116955064ffffffffff9091169350601782900b9250780100000000000000000000000000000000000000000000000090910467ffffffffffffffff1690565b6009546001600160a01b031690565b6000546001600160a01b031633146133dc576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b604080518082019091526030546001600160a01b038082168084527401000000000000000000000000000000000000000090920463ffffffff166020840152841614158061343a57508163ffffffff16816020015163ffffffff1614155b1561350d576040805180820182526001600160a01b0385811680835263ffffffff8681166020948501819052603080547fffffffffffffffffffffffff00000000000000000000000000000000000000001684177fffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffff16740100000000000000000000000000000000000000008302179055865187860151875193168352948201528451919493909216927fb04e3a37abe9c0fcdfebdeae019a8e2b12ddf53f5d55ffb0caccc1bedaca1541928290030190a35b505050565b6001600160a01b038281166000908152600d6020526040902054163314613580576040805162461bcd60e51b815260206004820152601d60248201527f6f6e6c792063757272656e742070617965652063616e20757064617465000000604482015290519081900360640190fd5b336001600160a01b03821614156135de576040805162461bcd60e51b815260206004820152601760248201527f63616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015290519081900360640190fd5b6001600160a01b038083166000908152600e6020526040902080548383167fffffffffffffffffffffffff00000000000000000000000000000000000000008216811790925590911690811461350d576040516001600160a01b038084169133918616907f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836790600090a4505050565b6000546001600160a01b031633146136cc576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b600180547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000546001600160a01b03163314613794576040805162461bcd60e51b815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e657200000000000000000000604482015290519081900360640190fd5b6114e081614cd3565b602e5465010000000000900463ffffffff166000818152602f6020908152604091829020825180840190935254601781810b810b810b808552780100000000000000000000000000000000000000000000000090920467ffffffffffffffff1693909201839052929392900b9181908490565b6040805160a08101825260085463ffffffff80821683526401000000008204811660208401526801000000000000000082048116838501526c0100000000000000000000000082048116606084015270010000000000000000000000000000000090910416608082015260095482516103e081019384905291926001600160a01b0390911691600091600b90601f908285855b82829054906101000a900461ffff1661ffff16815260200190600201906020826001010492830192600103820291508084116138a3575050604080516103e08101918290529596506000959450600f9350601f9250905082845b8154815260200190600101908083116138fd57505050505090506000600780548060200260200160405190810160405280929190818152602001828054801561396f57602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311613951575b5050505050905060005b8151811015613ba457600060018483601f811061399257fe5b6020020151039050600060018684601f81106139aa57fe5b60200201510361ffff169050600082896060015163ffffffff168302633b9aca00020190506000811115613b99576000600d60008787815181106139ea57fe5b60200260200101516001600160a01b03166001600160a01b0316815260200190815260200160002060009054906101000a90046001600160a01b03169050886001600160a01b031663a9059cbb82846040518363ffffffff1660e01b815260040180836001600160a01b0316815260200182815260200192505050602060405180830381600087803b158015613a7f57600080fd5b505af1158015613a93573d6000803e3d6000fd5b505050506040513d6020811015613aa957600080fd5b5051613afc576040805162461bcd60e51b815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b60018886601f8110613b0a57fe5b61ffff909216602092909202015260018786601f8110613b2657fe5b602002018181525050886001600160a01b0316816001600160a01b0316878781518110613b4f57fe5b60200260200101516001600160a01b03167fd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c856040518082815260200191505060405180910390a4505b505050600101613979565b50613bb2600b84601f6151aa565b506123da600f83601f615240565b6001600160a01b03811660009081526005602090815260408083208151808301909252805460ff80821684529293919291840191610100909104166002811115613c0657fe5b6002811115613c1157fe5b90525090506000613c2183610c7d565b9050801561350d576001600160a01b038084166000908152600d602090815260408083205460095482517fa9059cbb000000000000000000000000000000000000000000000000000000008152918616600483018190526024830188905292519295169363a9059cbb9360448084019491939192918390030190829087803b158015613cac57600080fd5b505af1158015613cc0573d6000803e3d6000fd5b505050506040513d6020811015613cd657600080fd5b5051613d29576040805162461bcd60e51b815260206004820152601260248201527f696e73756666696369656e742066756e64730000000000000000000000000000604482015290519081900360640190fd5b6001600b846000015160ff16601f8110613d3f57fe5b601091828204019190066002026101000a81548161ffff021916908361ffff1602179055506001600f846000015160ff16601f8110613d7a57fe5b01556009546040805184815290516001600160a01b039283169284811692908816917fd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c9181900360200190a450505050565b60408051808201909152602e5464ffffffffff8082168084526501000000000090920463ffffffff166020840152841611613e4e576040805162461bcd60e51b815260206004820152600c60248201527f7374616c65207265706f72740000000000000000000000000000000000000000604482015290519081900360640190fd5b60006060613e5b84614d62565b9092509050613e6a8482614e1b565b600354815160ff90911690601f1015613eca576040805162461bcd60e51b815260206004820152601e60248201527f6e756d206f62736572766174696f6e73206f7574206f6620626f756e64730000604482015290519081900360640190fd5b80600202825111613f22576040805162461bcd60e51b815260206004820152601e60248201527f746f6f206665772076616c75657320746f207472757374206d656469616e0000604482015290519081900360640190fd5b6000825167ffffffffffffffff81118015613f3c57600080fd5b506040519080825280601f01601f191660200182016040528015613f67576020820181803683370190505b509050613f72615174565b60005b8451811015614061576000868260208110613f8c57fe5b1a90508281601f8110613f9b57fe5b602002015115613ff2576040805162461bcd60e51b815260206004820152601760248201527f6f6273657276657220696e646578207265706561746564000000000000000000604482015290519081900360640190fd5b60018382601f811061400057fe5b9115156020928302919091015287908390811061401957fe5b1a60f81b84838151811061402957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050600101613f75565b5061406b82614e7f565b64ffffffffff8816865260005b600185510381101561411657600085826001018151811061409557fe5b602002602001015160170b8683815181106140ac57fe5b602002602001015160170b131590508061410d576040805162461bcd60e51b815260206004820152601760248201527f6f62736572766174696f6e73206e6f7420736f72746564000000000000000000604482015290519081900360640190fd5b50600101614078565b50600084600286518161412557fe5b048151811061413057fe5b602002602001015190508060170b7f000000000000000000000000000000000000000000000000000000000000000060170b1315801561419657507f000000000000000000000000000000000000000000000000000000000000000060170b8160170b13155b6141e7576040805162461bcd60e51b815260206004820152601e60248201527f6d656469616e206973206f7574206f66206d696e2d6d61782072616e67650000604482015290519081900360640190fd5b86602001805180919060010163ffffffff1663ffffffff168152505060405180604001604052808260170b81526020014267ffffffffffffffff16815250602f6000896020015163ffffffff1663ffffffff16815260200190815260200160002060008201518160000160006101000a81548177ffffffffffffffffffffffffffffffffffffffffffffffff021916908360170b77ffffffffffffffffffffffffffffffffffffffffffffffff16021790555060208201518160000160186101000a81548167ffffffffffffffff021916908367ffffffffffffffff160217905550905050866020015163ffffffff167f8235efcbf95cfe12e2d5afec1e5e568dc529cb92d6a9b4195da079f1411244f8823388878f8f604051808760170b8152602001866001600160a01b0316815260200180602001806020018581526020018464ffffffffff168152602001838103835287818151815260200191508051906020019060200280838360005b8381101561436d578181015183820152602001614355565b50505050905001838103825286818151815260200191508051906020019080838360005b838110156143a9578181015183820152602001614391565b50505050905090810190601f1680156143d65780820380516001836020036101000a031916815260200191505b509850505050505050505060405180910390a260208088015160408051428152905160009363ffffffff909316927f0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271928290030190a3866020015163ffffffff168160170b7f0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f426040518082815260200191505060405180910390a361448387602001518260170b614eee565b50508451602e805460209097015163ffffffff1665010000000000027fffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffff64ffffffffff9093167fffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000090981697909717919091169590951790945550505050505050565b602083810286019082020161014401368114614568576040805162461bcd60e51b815260206004820152601860248201527f63616c6c64617461206c656e677468206d69736d617463680000000000000000604482015290519081900360640190fd5b50505050505050565b6001600160a01b03811660009081526005602090815260408083208151808301909252805460ff808216845292939192918401916101009091041660028111156145b757fe5b60028111156145c257fe5b90525090506002816020015160028111156145d957fe5b146145e357600080fd5b6040805160a08101825260085463ffffffff80821680845264010000000083048216602085018190526801000000000000000084048316958501959095526c01000000000000000000000000830482166060850152700100000000000000000000000000000000909204166080830152909160009161466991633b9aca003a049161502e565b90506010360260005a905060006146888863ffffffff16858585615054565b6fffffffffffffffffffffffffffffffff1690506000620f4240866040015163ffffffff168302816146b657fe5b049050856080015163ffffffff16633b9aca000281600f896000015160ff16601f81106146df57fe5b01540101600f886000015160ff16601f81106146f757fe5b0155505050505050505050565b6040805160a0808201835263ffffffff88811680845288821660208086018290528984168688018190528985166060808901829052958a166080988901819052600880547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001687177fffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffff166401000000008702177fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff16680100000000000000008502177fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff166c010000000000000000000000008402177fffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffff16700100000000000000000000000000000000830217905589519586529285019390935283880152928201529283015291517fd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b6929181900390910190a15050505050565b604080516103e0810191829052600091829190600b90601f908285855b82829054906101000a900461ffff1661ffff168152602001906002019060208260010104928301926001038202915080841161489b5790505050505050905060005b601f81101561490b5760018282601f81106148f457fe5b60200201510361ffff1692909201916001016148dd565b506040805160a08101825260085463ffffffff8082168352640100000000820481166020808501919091526801000000000000000083048216848601526c01000000000000000000000000830482166060850181905270010000000000000000000000000000000090930490911660808401526007805485518184028101840190965280865296909202633b9aca00029592936000939092918301828280156149dd57602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116149bf575b5050604080516103e08101918290529495506000949350600f9250601f915082845b8154815260200190600101908083116149ff575050505050905060005b8251811015614a465760018282601f8110614a3357fe5b6020020151039590950194600101614a1c565b505050505090565b600081831015614a5f575081614a62565b50805b92915050565b614a70613810565b5050602e80547fffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000169055565b6000808a8a8a8a8a8a8a8a8a604051602001808a8152602001896001600160a01b031681526020018867ffffffffffffffff16815260200180602001806020018760ff168152602001806020018667ffffffffffffffff1681526020018060200185810385528b818151815260200191508051906020019060200280838360005b83811015614b35578181015183820152602001614b1d565b5050505090500185810384528a818151815260200191508051906020019060200280838360005b83811015614b74578181015183820152602001614b5c565b50505050905001858103835288818151815260200191508051906020019080838360005b83811015614bb0578181015183820152602001614b98565b50505050905090810190601f168015614bdd5780820380516001836020036101000a031916815260200191505b50858103825286518152865160209182019188019080838360005b83811015614c10578181015183820152602001614bf8565b50505050905090810190601f168015614c3d5780820380516001836020036101000a031916815260200191505b50604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e01000000000000000000000000000000000000000000000000000000000000179f505050505050505050505050505050509998505050505050505050565b600a546001600160a01b0390811690821681146111a457600a80547fffffffffffffffffffffffff0000000000000000000000000000000000000000166001600160a01b03848116918217909255604080519284168352602083019190915280517f793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d489129281900390910190a15050565b60006060828060200190516040811015614d7b57600080fd5b815160208301805160405192949293830192919084640100000000821115614da257600080fd5b908301906020820185811115614db757600080fd5b8251866020820283011164010000000082111715614dd457600080fd5b82525081516020918201928201910280838360005b83811015614e01578181015183820152602001614de9565b505050509050016040525050508092508193505050915091565b6000815160200260600160000190508083511461350d576040805162461bcd60e51b815260206004820152601660248201527f7265706f7274206c656e677468206d69736d6174636800000000000000000000604482015290519081900360640190fd5b604080516103e0810191829052614ee091839190600b90601f90826000855b82829054906101000a900461ffff1661ffff1681526020019060020190602082600101049283019260010382029150808411614e9e57905050505050506150e0565b6111a490600b90601f6151aa565b604080518082019091526030546001600160a01b0381168083527401000000000000000000000000000000000000000090910463ffffffff166020830152614f3657506111a4565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff830163ffffffff8181166000818152602f602090815260408083205487518884015183517fbeed9b510000000000000000000000000000000000000000000000000000000081526004810197909752601792830b90920b602487018190528b88166044880152606487018b9052925192966001600160a01b039091169563beed9b51959290911693608480830194919391928390030190829088803b158015614fff57600080fd5b5087f19350505050801561502557506040513d602081101561502057600080fd5b505160015b6123da57611a9d565b6000838381101561504157600285850304015b61504b8184614a4e565b95945050505050565b6000818510156150ab576040805162461bcd60e51b815260206004820181905260248201527f6761734c6566742063616e6e6f742065786365656420696e697469616c476173604482015290519081900360640190fd5b818503830161179301633b9aca00858202026fffffffffffffffffffffffffffffffff81106150d657fe5b9695505050505050565b6150e8615174565b60005b835181101561514d57600084828151811061510257fe5b016020015160f81c90506151278482601f811061511b57fe5b60200201516001615155565b848260ff16601f811061513657fe5b61ffff9092166020929092020152506001016150eb565b509092915050565b600061516d8261ffff168461ffff160161ffff614a4e565b9392505050565b604051806103e00160405280601f906020820280368337509192915050565b604080518082019091526000808252602082015290565b6002830191839082156152305791602002820160005b8382111561520057835183826101000a81548161ffff021916908361ffff16021790555092602001926002016020816001010492830192600103026151c0565b801561522e5782816101000a81549061ffff0219169055600201602081600101049283019260010302615200565b505b5061523c92915061526e565b5090565b82601f8101928215615230579160200282015b82811115615230578251825591602001919060010190615253565b5b8082111561523c576000815560010161526f56fe6f7261636c6520616464726573736573206f7574206f6620726567697374726174696f6ea164736f6c6343000706000a" + +func DeployOffchainAggregator(auth *bind.TransactOpts, backend bind.ContractBackend, _maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32, _link common.Address, _minAnswer *big.Int, _maxAnswer *big.Int, _billingAccessController common.Address, _requesterAccessController common.Address, _decimals uint8, _description string) (common.Address, *types.Transaction, *OffchainAggregator, error) { + parsed, err := abi.JSON(strings.NewReader(OffchainAggregatorABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(OffchainAggregatorBin), backend, _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission, _link, _minAnswer, _maxAnswer, _billingAccessController, _requesterAccessController, _decimals, _description) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OffchainAggregator{OffchainAggregatorCaller: OffchainAggregatorCaller{contract: contract}, OffchainAggregatorTransactor: OffchainAggregatorTransactor{contract: contract}, OffchainAggregatorFilterer: OffchainAggregatorFilterer{contract: contract}}, nil +} + +type OffchainAggregator struct { + address common.Address + abi abi.ABI + OffchainAggregatorCaller + OffchainAggregatorTransactor + OffchainAggregatorFilterer +} + +type OffchainAggregatorCaller struct { + contract *bind.BoundContract +} + +type OffchainAggregatorTransactor struct { + contract *bind.BoundContract +} + +type OffchainAggregatorFilterer struct { + contract *bind.BoundContract +} + +type OffchainAggregatorSession struct { + Contract *OffchainAggregator + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorCallerSession struct { + Contract *OffchainAggregatorCaller + CallOpts bind.CallOpts +} + +type OffchainAggregatorTransactorSession struct { + Contract *OffchainAggregatorTransactor + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorRaw struct { + Contract *OffchainAggregator +} + +type OffchainAggregatorCallerRaw struct { + Contract *OffchainAggregatorCaller +} + +type OffchainAggregatorTransactorRaw struct { + Contract *OffchainAggregatorTransactor +} + +func NewOffchainAggregator(address common.Address, backend bind.ContractBackend) (*OffchainAggregator, error) { + abi, err := abi.JSON(strings.NewReader(OffchainAggregatorABI)) + if err != nil { + return nil, err + } + contract, err := bindOffchainAggregator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OffchainAggregator{address: address, abi: abi, OffchainAggregatorCaller: OffchainAggregatorCaller{contract: contract}, OffchainAggregatorTransactor: OffchainAggregatorTransactor{contract: contract}, OffchainAggregatorFilterer: OffchainAggregatorFilterer{contract: contract}}, nil +} + +func NewOffchainAggregatorCaller(address common.Address, caller bind.ContractCaller) (*OffchainAggregatorCaller, error) { + contract, err := bindOffchainAggregator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorCaller{contract: contract}, nil +} + +func NewOffchainAggregatorTransactor(address common.Address, transactor bind.ContractTransactor) (*OffchainAggregatorTransactor, error) { + contract, err := bindOffchainAggregator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorTransactor{contract: contract}, nil +} + +func NewOffchainAggregatorFilterer(address common.Address, filterer bind.ContractFilterer) (*OffchainAggregatorFilterer, error) { + contract, err := bindOffchainAggregator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OffchainAggregatorFilterer{contract: contract}, nil +} + +func bindOffchainAggregator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(OffchainAggregatorABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregator.Contract.OffchainAggregatorCaller.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.Contract.OffchainAggregatorTransactor.contract.Transfer(opts) +} + +func (_OffchainAggregator *OffchainAggregatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregator.Contract.OffchainAggregatorTransactor.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregator.Contract.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.Contract.contract.Transfer(opts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregator.Contract.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) BillingAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "billingAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) BillingAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.BillingAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) BillingAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.BillingAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Decimals() (uint8, error) { + return _OffchainAggregator.Contract.Decimals(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Decimals() (uint8, error) { + return _OffchainAggregator.Contract.Decimals(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Description(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "description") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Description() (string, error) { + return _OffchainAggregator.Contract.Description(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Description() (string, error) { + return _OffchainAggregator.Contract.Description(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getAnswer", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetAnswer(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetAnswer(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetAnswer(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetBilling(opts *bind.CallOpts) (GetBilling, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getBilling") + + outstruct := new(GetBilling) + if err != nil { + return *outstruct, err + } + + outstruct.MaximumGasPrice = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.ReasonableGasPrice = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.MicroLinkPerEth = *abi.ConvertType(out[2], new(uint32)).(*uint32) + outstruct.LinkGweiPerObservation = *abi.ConvertType(out[3], new(uint32)).(*uint32) + outstruct.LinkGweiPerTransmission = *abi.ConvertType(out[4], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetBilling() (GetBilling, + + error) { + return _OffchainAggregator.Contract.GetBilling(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetBilling() (GetBilling, + + error) { + return _OffchainAggregator.Contract.GetBilling(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetLinkToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getLinkToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetLinkToken() (common.Address, error) { + return _OffchainAggregator.Contract.GetLinkToken(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetLinkToken() (common.Address, error) { + return _OffchainAggregator.Contract.GetLinkToken(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getRoundData", _roundId) + + outstruct := new(GetRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _OffchainAggregator.Contract.GetRoundData(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetRoundData(_roundId *big.Int) (GetRoundData, + + error) { + return _OffchainAggregator.Contract.GetRoundData(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "getTimestamp", _roundId) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetTimestamp(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) GetTimestamp(_roundId *big.Int) (*big.Int, error) { + return _OffchainAggregator.Contract.GetTimestamp(&_OffchainAggregator.CallOpts, _roundId) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestConfigDetails") + + outstruct := new(LatestConfigDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigCount = *abi.ConvertType(out[0], new(uint32)).(*uint32) + outstruct.BlockNumber = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.ConfigDigest = *abi.ConvertType(out[2], new([32]byte)).(*[32]byte) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OffchainAggregator.Contract.LatestConfigDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestConfigDetails() (LatestConfigDetails, + + error) { + return _OffchainAggregator.Contract.LatestConfigDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestRound") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestRound() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestRound(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestRound() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestRound(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestRoundData") + + outstruct := new(LatestRoundData) + if err != nil { + return *outstruct, err + } + + outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) + outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestRoundData() (LatestRoundData, + + error) { + return _OffchainAggregator.Contract.LatestRoundData(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestRoundData() (LatestRoundData, + + error) { + return _OffchainAggregator.Contract.LatestRoundData(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestTimestamp") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestTimestamp() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestTimestamp(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestTimestamp() (*big.Int, error) { + return _OffchainAggregator.Contract.LatestTimestamp(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LatestTransmissionDetails(opts *bind.CallOpts) (LatestTransmissionDetails, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "latestTransmissionDetails") + + outstruct := new(LatestTransmissionDetails) + if err != nil { + return *outstruct, err + } + + outstruct.ConfigDigest = *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + outstruct.Epoch = *abi.ConvertType(out[1], new(uint32)).(*uint32) + outstruct.Round = *abi.ConvertType(out[2], new(uint8)).(*uint8) + outstruct.LatestAnswer = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + outstruct.LatestTimestamp = *abi.ConvertType(out[4], new(uint64)).(*uint64) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LatestTransmissionDetails() (LatestTransmissionDetails, + + error) { + return _OffchainAggregator.Contract.LatestTransmissionDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LatestTransmissionDetails() (LatestTransmissionDetails, + + error) { + return _OffchainAggregator.Contract.LatestTransmissionDetails(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "linkAvailableForPayment") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) LinkAvailableForPayment() (*big.Int, error) { + return _OffchainAggregator.Contract.LinkAvailableForPayment(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) LinkAvailableForPayment() (*big.Int, error) { + return _OffchainAggregator.Contract.LinkAvailableForPayment(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) MaxAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "maxAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) MaxAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MaxAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) MaxAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MaxAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) MinAnswer(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "minAnswer") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) MinAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MinAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) MinAnswer() (*big.Int, error) { + return _OffchainAggregator.Contract.MinAnswer(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) OracleObservationCount(opts *bind.CallOpts, _signerOrTransmitter common.Address) (uint16, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "oracleObservationCount", _signerOrTransmitter) + + if err != nil { + return *new(uint16), err + } + + out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) OracleObservationCount(_signerOrTransmitter common.Address) (uint16, error) { + return _OffchainAggregator.Contract.OracleObservationCount(&_OffchainAggregator.CallOpts, _signerOrTransmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) OracleObservationCount(_signerOrTransmitter common.Address) (uint16, error) { + return _OffchainAggregator.Contract.OracleObservationCount(&_OffchainAggregator.CallOpts, _signerOrTransmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) OwedPayment(opts *bind.CallOpts, _transmitter common.Address) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "owedPayment", _transmitter) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) OwedPayment(_transmitter common.Address) (*big.Int, error) { + return _OffchainAggregator.Contract.OwedPayment(&_OffchainAggregator.CallOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) OwedPayment(_transmitter common.Address) (*big.Int, error) { + return _OffchainAggregator.Contract.OwedPayment(&_OffchainAggregator.CallOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Owner() (common.Address, error) { + return _OffchainAggregator.Contract.Owner(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Owner() (common.Address, error) { + return _OffchainAggregator.Contract.Owner(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) RequesterAccessController(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "requesterAccessController") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) RequesterAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.RequesterAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) RequesterAccessController() (common.Address, error) { + return _OffchainAggregator.Contract.RequesterAccessController(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Transmitters(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "transmitters") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Transmitters() ([]common.Address, error) { + return _OffchainAggregator.Contract.Transmitters(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Transmitters() ([]common.Address, error) { + return _OffchainAggregator.Contract.Transmitters(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) TypeAndVersion() (string, error) { + return _OffchainAggregator.Contract.TypeAndVersion(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) TypeAndVersion() (string, error) { + return _OffchainAggregator.Contract.TypeAndVersion(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) ValidatorConfig(opts *bind.CallOpts) (ValidatorConfig, + + error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "validatorConfig") + + outstruct := new(ValidatorConfig) + if err != nil { + return *outstruct, err + } + + outstruct.Validator = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + outstruct.GasLimit = *abi.ConvertType(out[1], new(uint32)).(*uint32) + + return *outstruct, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) ValidatorConfig() (ValidatorConfig, + + error) { + return _OffchainAggregator.Contract.ValidatorConfig(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) ValidatorConfig() (ValidatorConfig, + + error) { + return _OffchainAggregator.Contract.ValidatorConfig(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCaller) Version(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _OffchainAggregator.contract.Call(opts, &out, "version") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +func (_OffchainAggregator *OffchainAggregatorSession) Version() (*big.Int, error) { + return _OffchainAggregator.Contract.Version(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorCallerSession) Version() (*big.Int, error) { + return _OffchainAggregator.Contract.Version(&_OffchainAggregator.CallOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "acceptOwnership") +} + +func (_OffchainAggregator *OffchainAggregatorSession) AcceptOwnership() (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptOwnership(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptOwnership(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) AcceptPayeeship(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "acceptPayeeship", _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorSession) AcceptPayeeship(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptPayeeship(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) AcceptPayeeship(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.AcceptPayeeship(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "requestNewRound") +} + +func (_OffchainAggregator *OffchainAggregatorSession) RequestNewRound() (*types.Transaction, error) { + return _OffchainAggregator.Contract.RequestNewRound(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) RequestNewRound() (*types.Transaction, error) { + return _OffchainAggregator.Contract.RequestNewRound(&_OffchainAggregator.TransactOpts) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetBilling(opts *bind.TransactOpts, _maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setBilling", _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetBilling(_maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBilling(&_OffchainAggregator.TransactOpts, _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetBilling(_maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBilling(&_OffchainAggregator.TransactOpts, _maximumGasPrice, _reasonableGasPrice, _microLinkPerEth, _linkGweiPerObservation, _linkGweiPerTransmission) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setBillingAccessController", _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBillingAccessController(&_OffchainAggregator.TransactOpts, _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetBillingAccessController(_billingAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetBillingAccessController(&_OffchainAggregator.TransactOpts, _billingAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _threshold uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setConfig", _signers, _transmitters, _threshold, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _threshold uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetConfig(&_OffchainAggregator.TransactOpts, _signers, _transmitters, _threshold, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetConfig(_signers []common.Address, _transmitters []common.Address, _threshold uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetConfig(&_OffchainAggregator.TransactOpts, _signers, _transmitters, _threshold, _onchainConfig, _offchainConfigVersion, _offchainConfig) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetLinkToken(opts *bind.TransactOpts, _linkToken common.Address, _recipient common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setLinkToken", _linkToken, _recipient) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetLinkToken(_linkToken common.Address, _recipient common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetLinkToken(&_OffchainAggregator.TransactOpts, _linkToken, _recipient) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetLinkToken(_linkToken common.Address, _recipient common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetLinkToken(&_OffchainAggregator.TransactOpts, _linkToken, _recipient) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetPayees(opts *bind.TransactOpts, _transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setPayees", _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetPayees(_transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetPayees(&_OffchainAggregator.TransactOpts, _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetPayees(_transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetPayees(&_OffchainAggregator.TransactOpts, _transmitters, _payees) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetRequesterAccessController(opts *bind.TransactOpts, _requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setRequesterAccessController", _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetRequesterAccessController(_requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetRequesterAccessController(&_OffchainAggregator.TransactOpts, _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetRequesterAccessController(_requesterAccessController common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetRequesterAccessController(&_OffchainAggregator.TransactOpts, _requesterAccessController) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) SetValidatorConfig(opts *bind.TransactOpts, _newValidator common.Address, _newGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "setValidatorConfig", _newValidator, _newGasLimit) +} + +func (_OffchainAggregator *OffchainAggregatorSession) SetValidatorConfig(_newValidator common.Address, _newGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetValidatorConfig(&_OffchainAggregator.TransactOpts, _newValidator, _newGasLimit) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) SetValidatorConfig(_newValidator common.Address, _newGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregator.Contract.SetValidatorConfig(&_OffchainAggregator.TransactOpts, _newValidator, _newGasLimit) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transferOwnership", _to) +} + +func (_OffchainAggregator *OffchainAggregatorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferOwnership(&_OffchainAggregator.TransactOpts, _to) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) TransferOwnership(_to common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferOwnership(&_OffchainAggregator.TransactOpts, _to) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) TransferPayeeship(opts *bind.TransactOpts, _transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transferPayeeship", _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorSession) TransferPayeeship(_transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferPayeeship(&_OffchainAggregator.TransactOpts, _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) TransferPayeeship(_transmitter common.Address, _proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.TransferPayeeship(&_OffchainAggregator.TransactOpts, _transmitter, _proposed) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "transmit", reportContext, report, rs, ss, rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.Transmit(&_OffchainAggregator.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) Transmit(reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + return _OffchainAggregator.Contract.Transmit(&_OffchainAggregator.TransactOpts, reportContext, report, rs, ss, rawVs) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "withdrawFunds", _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawFunds(&_OffchainAggregator.TransactOpts, _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) WithdrawFunds(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawFunds(&_OffchainAggregator.TransactOpts, _recipient, _amount) +} + +func (_OffchainAggregator *OffchainAggregatorTransactor) WithdrawPayment(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.contract.Transact(opts, "withdrawPayment", _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorSession) WithdrawPayment(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawPayment(&_OffchainAggregator.TransactOpts, _transmitter) +} + +func (_OffchainAggregator *OffchainAggregatorTransactorSession) WithdrawPayment(_transmitter common.Address) (*types.Transaction, error) { + return _OffchainAggregator.Contract.WithdrawPayment(&_OffchainAggregator.TransactOpts, _transmitter) +} + +type OffchainAggregatorAnswerUpdatedIterator struct { + Event *OffchainAggregatorAnswerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorAnswerUpdatedIterator{contract: _OffchainAggregator.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorAnswerUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseAnswerUpdated(log types.Log) (*OffchainAggregatorAnswerUpdated, error) { + event := new(OffchainAggregatorAnswerUpdated) + if err := _OffchainAggregator.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorBillingAccessControllerSetIterator struct { + Event *OffchainAggregatorBillingAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorBillingAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorBillingAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorBillingAccessControllerSetIterator{contract: _OffchainAggregator.contract, event: "BillingAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorBillingAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorBillingAccessControllerSet, error) { + event := new(OffchainAggregatorBillingAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorBillingSetIterator struct { + Event *OffchainAggregatorBillingSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorBillingSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorBillingSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorBillingSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorBillingSet struct { + MaximumGasPrice uint32 + ReasonableGasPrice uint32 + MicroLinkPerEth uint32 + LinkGweiPerObservation uint32 + LinkGweiPerTransmission uint32 + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorBillingSetIterator{contract: _OffchainAggregator.contract, event: "BillingSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorBillingSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseBillingSet(log types.Log) (*OffchainAggregatorBillingSet, error) { + event := new(OffchainAggregatorBillingSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "BillingSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorConfigSetIterator struct { + Event *OffchainAggregatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigDigest [32]byte + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + Threshold uint8 + OnchainConfig []byte + EncodedConfigVersion uint64 + Encoded []byte + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorConfigSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorConfigSetIterator{contract: _OffchainAggregator.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorConfigSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseConfigSet(log types.Log) (*OffchainAggregatorConfigSet, error) { + event := new(OffchainAggregatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorLinkTokenSetIterator struct { + Event *OffchainAggregatorLinkTokenSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorLinkTokenSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorLinkTokenSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorLinkTokenSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorLinkTokenSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorLinkTokenSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorLinkTokenSet struct { + OldLinkToken common.Address + NewLinkToken common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterLinkTokenSet(opts *bind.FilterOpts, _oldLinkToken []common.Address, _newLinkToken []common.Address) (*OffchainAggregatorLinkTokenSetIterator, error) { + + var _oldLinkTokenRule []interface{} + for _, _oldLinkTokenItem := range _oldLinkToken { + _oldLinkTokenRule = append(_oldLinkTokenRule, _oldLinkTokenItem) + } + var _newLinkTokenRule []interface{} + for _, _newLinkTokenItem := range _newLinkToken { + _newLinkTokenRule = append(_newLinkTokenRule, _newLinkTokenItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "LinkTokenSet", _oldLinkTokenRule, _newLinkTokenRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorLinkTokenSetIterator{contract: _OffchainAggregator.contract, event: "LinkTokenSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchLinkTokenSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorLinkTokenSet, _oldLinkToken []common.Address, _newLinkToken []common.Address) (event.Subscription, error) { + + var _oldLinkTokenRule []interface{} + for _, _oldLinkTokenItem := range _oldLinkToken { + _oldLinkTokenRule = append(_oldLinkTokenRule, _oldLinkTokenItem) + } + var _newLinkTokenRule []interface{} + for _, _newLinkTokenItem := range _newLinkToken { + _newLinkTokenRule = append(_newLinkTokenRule, _newLinkTokenItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "LinkTokenSet", _oldLinkTokenRule, _newLinkTokenRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorLinkTokenSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "LinkTokenSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseLinkTokenSet(log types.Log) (*OffchainAggregatorLinkTokenSet, error) { + event := new(OffchainAggregatorLinkTokenSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "LinkTokenSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorNewRoundIterator struct { + Event *OffchainAggregatorNewRound + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorNewRoundIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorNewRoundIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorNewRoundIterator{contract: _OffchainAggregator.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorNewRound) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseNewRound(log types.Log) (*OffchainAggregatorNewRound, error) { + event := new(OffchainAggregatorNewRound) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorNewTransmissionIterator struct { + Event *OffchainAggregatorNewTransmission + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorNewTransmissionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorNewTransmissionIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorNewTransmissionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorNewTransmission struct { + AggregatorRoundId uint32 + Answer *big.Int + Transmitter common.Address + Observations []*big.Int + Observers []byte + ConfigDigest [32]byte + EpochAndRound *big.Int + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorNewTransmissionIterator, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorNewTransmissionIterator{contract: _OffchainAggregator.contract, event: "NewTransmission", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorNewTransmission) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseNewTransmission(log types.Log) (*OffchainAggregatorNewTransmission, error) { + event := new(OffchainAggregatorNewTransmission) + if err := _OffchainAggregator.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOraclePaidIterator struct { + Event *OffchainAggregatorOraclePaid + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOraclePaidIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOraclePaidIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOraclePaidIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOraclePaid struct { + Transmitter common.Address + Payee common.Address + Amount *big.Int + LinkToken common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*OffchainAggregatorOraclePaidIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorOraclePaidIterator{contract: _OffchainAggregator.contract, event: "OraclePaid", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOraclePaid) + if err := _OffchainAggregator.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOraclePaid(log types.Log) (*OffchainAggregatorOraclePaid, error) { + event := new(OffchainAggregatorOraclePaid) + if err := _OffchainAggregator.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOwnershipTransferRequestedIterator struct { + Event *OffchainAggregatorOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorOwnershipTransferRequestedIterator{contract: _OffchainAggregator.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOwnershipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorOwnershipTransferRequested, error) { + event := new(OffchainAggregatorOwnershipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorOwnershipTransferredIterator struct { + Event *OffchainAggregatorOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorOwnershipTransferredIterator{contract: _OffchainAggregator.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorOwnershipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorOwnershipTransferred, error) { + event := new(OffchainAggregatorOwnershipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorPayeeshipTransferRequestedIterator struct { + Event *OffchainAggregatorPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorPayeeshipTransferRequested struct { + Transmitter common.Address + Current common.Address + Proposed common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorPayeeshipTransferRequestedIterator{contract: _OffchainAggregator.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorPayeeshipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorPayeeshipTransferRequested, error) { + event := new(OffchainAggregatorPayeeshipTransferRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorPayeeshipTransferredIterator struct { + Event *OffchainAggregatorPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorPayeeshipTransferred struct { + Transmitter common.Address + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorPayeeshipTransferredIterator{contract: _OffchainAggregator.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorPayeeshipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorPayeeshipTransferred, error) { + event := new(OffchainAggregatorPayeeshipTransferred) + if err := _OffchainAggregator.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorRequesterAccessControllerSetIterator struct { + Event *OffchainAggregatorRequesterAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorRequesterAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorRequesterAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorRequesterAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorRequesterAccessControllerSetIterator{contract: _OffchainAggregator.contract, event: "RequesterAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRequesterAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorRequesterAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorRequesterAccessControllerSet, error) { + event := new(OffchainAggregatorRequesterAccessControllerSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorRoundRequestedIterator struct { + Event *OffchainAggregatorRoundRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorRoundRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorRoundRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorRoundRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorRoundRequested struct { + Requester common.Address + ConfigDigest [32]byte + Epoch uint32 + Round uint8 + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorRoundRequestedIterator, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorRoundRequestedIterator{contract: _OffchainAggregator.contract, event: "RoundRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRoundRequested, requester []common.Address) (event.Subscription, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorRoundRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseRoundRequested(log types.Log) (*OffchainAggregatorRoundRequested, error) { + event := new(OffchainAggregatorRoundRequested) + if err := _OffchainAggregator.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorValidatorConfigSetIterator struct { + Event *OffchainAggregatorValidatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorValidatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorValidatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorValidatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorValidatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorValidatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorValidatorConfigSet struct { + PreviousValidator common.Address + PreviousGasLimit uint32 + CurrentValidator common.Address + CurrentGasLimit uint32 + Raw types.Log +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) FilterValidatorConfigSet(opts *bind.FilterOpts, previousValidator []common.Address, currentValidator []common.Address) (*OffchainAggregatorValidatorConfigSetIterator, error) { + + var previousValidatorRule []interface{} + for _, previousValidatorItem := range previousValidator { + previousValidatorRule = append(previousValidatorRule, previousValidatorItem) + } + + var currentValidatorRule []interface{} + for _, currentValidatorItem := range currentValidator { + currentValidatorRule = append(currentValidatorRule, currentValidatorItem) + } + + logs, sub, err := _OffchainAggregator.contract.FilterLogs(opts, "ValidatorConfigSet", previousValidatorRule, currentValidatorRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorValidatorConfigSetIterator{contract: _OffchainAggregator.contract, event: "ValidatorConfigSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) WatchValidatorConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorValidatorConfigSet, previousValidator []common.Address, currentValidator []common.Address) (event.Subscription, error) { + + var previousValidatorRule []interface{} + for _, previousValidatorItem := range previousValidator { + previousValidatorRule = append(previousValidatorRule, previousValidatorItem) + } + + var currentValidatorRule []interface{} + for _, currentValidatorItem := range currentValidator { + currentValidatorRule = append(currentValidatorRule, currentValidatorItem) + } + + logs, sub, err := _OffchainAggregator.contract.WatchLogs(opts, "ValidatorConfigSet", previousValidatorRule, currentValidatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorValidatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ValidatorConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregator *OffchainAggregatorFilterer) ParseValidatorConfigSet(log types.Log) (*OffchainAggregatorValidatorConfigSet, error) { + event := new(OffchainAggregatorValidatorConfigSet) + if err := _OffchainAggregator.contract.UnpackLog(event, "ValidatorConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type GetBilling struct { + MaximumGasPrice uint32 + ReasonableGasPrice uint32 + MicroLinkPerEth uint32 + LinkGweiPerObservation uint32 + LinkGweiPerTransmission uint32 +} +type GetRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestConfigDetails struct { + ConfigCount uint32 + BlockNumber uint32 + ConfigDigest [32]byte +} +type LatestRoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} +type LatestTransmissionDetails struct { + ConfigDigest [32]byte + Epoch uint32 + Round uint8 + LatestAnswer *big.Int + LatestTimestamp uint64 +} +type ValidatorConfig struct { + Validator common.Address + GasLimit uint32 +} + +func (_OffchainAggregator *OffchainAggregator) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OffchainAggregator.abi.Events["AnswerUpdated"].ID: + return _OffchainAggregator.ParseAnswerUpdated(log) + case _OffchainAggregator.abi.Events["BillingAccessControllerSet"].ID: + return _OffchainAggregator.ParseBillingAccessControllerSet(log) + case _OffchainAggregator.abi.Events["BillingSet"].ID: + return _OffchainAggregator.ParseBillingSet(log) + case _OffchainAggregator.abi.Events["ConfigSet"].ID: + return _OffchainAggregator.ParseConfigSet(log) + case _OffchainAggregator.abi.Events["LinkTokenSet"].ID: + return _OffchainAggregator.ParseLinkTokenSet(log) + case _OffchainAggregator.abi.Events["NewRound"].ID: + return _OffchainAggregator.ParseNewRound(log) + case _OffchainAggregator.abi.Events["NewTransmission"].ID: + return _OffchainAggregator.ParseNewTransmission(log) + case _OffchainAggregator.abi.Events["OraclePaid"].ID: + return _OffchainAggregator.ParseOraclePaid(log) + case _OffchainAggregator.abi.Events["OwnershipTransferRequested"].ID: + return _OffchainAggregator.ParseOwnershipTransferRequested(log) + case _OffchainAggregator.abi.Events["OwnershipTransferred"].ID: + return _OffchainAggregator.ParseOwnershipTransferred(log) + case _OffchainAggregator.abi.Events["PayeeshipTransferRequested"].ID: + return _OffchainAggregator.ParsePayeeshipTransferRequested(log) + case _OffchainAggregator.abi.Events["PayeeshipTransferred"].ID: + return _OffchainAggregator.ParsePayeeshipTransferred(log) + case _OffchainAggregator.abi.Events["RequesterAccessControllerSet"].ID: + return _OffchainAggregator.ParseRequesterAccessControllerSet(log) + case _OffchainAggregator.abi.Events["RoundRequested"].ID: + return _OffchainAggregator.ParseRoundRequested(log) + case _OffchainAggregator.abi.Events["ValidatorConfigSet"].ID: + return _OffchainAggregator.ParseValidatorConfigSet(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OffchainAggregatorAnswerUpdated) Topic() common.Hash { + return common.HexToHash("0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f") +} + +func (OffchainAggregatorBillingAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d48912") +} + +func (OffchainAggregatorBillingSet) Topic() common.Hash { + return common.HexToHash("0xd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b6") +} + +func (OffchainAggregatorConfigSet) Topic() common.Hash { + return common.HexToHash("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") +} + +func (OffchainAggregatorLinkTokenSet) Topic() common.Hash { + return common.HexToHash("0x4966a50c93f855342ccf6c5c0d358b85b91335b2acedc7da0932f691f351711a") +} + +func (OffchainAggregatorNewRound) Topic() common.Hash { + return common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271") +} + +func (OffchainAggregatorNewTransmission) Topic() common.Hash { + return common.HexToHash("0x8235efcbf95cfe12e2d5afec1e5e568dc529cb92d6a9b4195da079f1411244f8") +} + +func (OffchainAggregatorOraclePaid) Topic() common.Hash { + return common.HexToHash("0xd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c") +} + +func (OffchainAggregatorOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OffchainAggregatorOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OffchainAggregatorPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (OffchainAggregatorPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (OffchainAggregatorRequesterAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae634") +} + +func (OffchainAggregatorRoundRequested) Topic() common.Hash { + return common.HexToHash("0x41e3990591fd372502daa15842da15bc7f41c75309ab3ff4f56f1848c178825c") +} + +func (OffchainAggregatorValidatorConfigSet) Topic() common.Hash { + return common.HexToHash("0xb04e3a37abe9c0fcdfebdeae019a8e2b12ddf53f5d55ffb0caccc1bedaca1541") +} + +func (_OffchainAggregator *OffchainAggregator) Address() common.Address { + return _OffchainAggregator.address +} + +type OffchainAggregatorInterface interface { + BillingAccessController(opts *bind.CallOpts) (common.Address, error) + + Decimals(opts *bind.CallOpts) (uint8, error) + + Description(opts *bind.CallOpts) (string, error) + + GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + GetBilling(opts *bind.CallOpts) (GetBilling, + + error) + + GetLinkToken(opts *bind.CallOpts) (common.Address, error) + + GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (GetRoundData, + + error) + + GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) + + LatestAnswer(opts *bind.CallOpts) (*big.Int, error) + + LatestConfigDetails(opts *bind.CallOpts) (LatestConfigDetails, + + error) + + LatestRound(opts *bind.CallOpts) (*big.Int, error) + + LatestRoundData(opts *bind.CallOpts) (LatestRoundData, + + error) + + LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) + + LatestTransmissionDetails(opts *bind.CallOpts) (LatestTransmissionDetails, + + error) + + LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) + + MaxAnswer(opts *bind.CallOpts) (*big.Int, error) + + MinAnswer(opts *bind.CallOpts) (*big.Int, error) + + OracleObservationCount(opts *bind.CallOpts, _signerOrTransmitter common.Address) (uint16, error) + + OwedPayment(opts *bind.CallOpts, _transmitter common.Address) (*big.Int, error) + + Owner(opts *bind.CallOpts) (common.Address, error) + + RequesterAccessController(opts *bind.CallOpts) (common.Address, error) + + Transmitters(opts *bind.CallOpts) ([]common.Address, error) + + TypeAndVersion(opts *bind.CallOpts) (string, error) + + ValidatorConfig(opts *bind.CallOpts) (ValidatorConfig, + + error) + + Version(opts *bind.CallOpts) (*big.Int, error) + + AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) + + AcceptPayeeship(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) + + RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) + + SetBilling(opts *bind.TransactOpts, _maximumGasPrice uint32, _reasonableGasPrice uint32, _microLinkPerEth uint32, _linkGweiPerObservation uint32, _linkGweiPerTransmission uint32) (*types.Transaction, error) + + SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) + + SetConfig(opts *bind.TransactOpts, _signers []common.Address, _transmitters []common.Address, _threshold uint8, _onchainConfig []byte, _offchainConfigVersion uint64, _offchainConfig []byte) (*types.Transaction, error) + + SetLinkToken(opts *bind.TransactOpts, _linkToken common.Address, _recipient common.Address) (*types.Transaction, error) + + SetPayees(opts *bind.TransactOpts, _transmitters []common.Address, _payees []common.Address) (*types.Transaction, error) + + SetRequesterAccessController(opts *bind.TransactOpts, _requesterAccessController common.Address) (*types.Transaction, error) + + SetValidatorConfig(opts *bind.TransactOpts, _newValidator common.Address, _newGasLimit uint32) (*types.Transaction, error) + + TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) + + TransferPayeeship(opts *bind.TransactOpts, _transmitter common.Address, _proposed common.Address) (*types.Transaction, error) + + Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) + + WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) + + WithdrawPayment(opts *bind.TransactOpts, _transmitter common.Address) (*types.Transaction, error) + + FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorAnswerUpdatedIterator, error) + + WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) + + ParseAnswerUpdated(log types.Log) (*OffchainAggregatorAnswerUpdated, error) + + FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingAccessControllerSetIterator, error) + + WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingAccessControllerSet) (event.Subscription, error) + + ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorBillingAccessControllerSet, error) + + FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorBillingSetIterator, error) + + WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorBillingSet) (event.Subscription, error) + + ParseBillingSet(log types.Log) (*OffchainAggregatorBillingSet, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OffchainAggregatorConfigSet, error) + + FilterLinkTokenSet(opts *bind.FilterOpts, _oldLinkToken []common.Address, _newLinkToken []common.Address) (*OffchainAggregatorLinkTokenSetIterator, error) + + WatchLinkTokenSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorLinkTokenSet, _oldLinkToken []common.Address, _newLinkToken []common.Address) (event.Subscription, error) + + ParseLinkTokenSet(log types.Log) (*OffchainAggregatorLinkTokenSet, error) + + FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorNewRoundIterator, error) + + WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) + + ParseNewRound(log types.Log) (*OffchainAggregatorNewRound, error) + + FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorNewTransmissionIterator, error) + + WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) + + ParseNewTransmission(log types.Log) (*OffchainAggregatorNewTransmission, error) + + FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*OffchainAggregatorOraclePaidIterator, error) + + WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) + + ParseOraclePaid(log types.Log) (*OffchainAggregatorOraclePaid, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorOwnershipTransferred, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorPayeeshipTransferred, error) + + FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorRequesterAccessControllerSetIterator, error) + + WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRequesterAccessControllerSet) (event.Subscription, error) + + ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorRequesterAccessControllerSet, error) + + FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorRoundRequestedIterator, error) + + WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorRoundRequested, requester []common.Address) (event.Subscription, error) + + ParseRoundRequested(log types.Log) (*OffchainAggregatorRoundRequested, error) + + FilterValidatorConfigSet(opts *bind.FilterOpts, previousValidator []common.Address, currentValidator []common.Address) (*OffchainAggregatorValidatorConfigSetIterator, error) + + WatchValidatorConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorValidatorConfigSet, previousValidator []common.Address, currentValidator []common.Address) (event.Subscription, error) + + ParseValidatorConfigSet(log types.Log) (*OffchainAggregatorValidatorConfigSet, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/core/internal/gethwrappers2/go_generate.go b/core/internal/gethwrappers2/go_generate.go new file mode 100644 index 00000000..80c3a829 --- /dev/null +++ b/core/internal/gethwrappers2/go_generate.go @@ -0,0 +1,4 @@ +// Package gethwrappers keeps track of the golang wrappers of the solidity contracts +package main + +//TODO how is OffchainAggregator generated?! https://smartcontract-it.atlassian.net/browse/BCF-1930 diff --git a/core/internal/gethwrappers2/wrap.go b/core/internal/gethwrappers2/wrap.go new file mode 100644 index 00000000..5d278be6 --- /dev/null +++ b/core/internal/gethwrappers2/wrap.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + gethParams "github.com/ethereum/go-ethereum/params" + + gethwrappers2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers" +) + +func main() { + abiPath := os.Args[1] + binPath := os.Args[2] + className := os.Args[3] + pkgName := os.Args[4] + fmt.Println("Generating", pkgName, "contract wrapper") + + cwd, err := os.Getwd() // gethwrappers directory + if err != nil { + gethwrappers2.Exit("could not get working directory", err) + } + outDir := filepath.Join(cwd, "generated", pkgName) + if mkdErr := os.MkdirAll(outDir, 0700); err != nil { + gethwrappers2.Exit("failed to create wrapper dir", mkdErr) + } + outPath := filepath.Join(outDir, pkgName+".go") + + gethwrappers2.Abigen(gethwrappers2.AbigenArgs{ + Bin: binPath, ABI: abiPath, Out: outPath, Type: className, Pkg: pkgName, + }) + + // Build succeeded, so update the versions db with the new contract data + versions, err := gethwrappers2.ReadVersionsDB() + if err != nil { + gethwrappers2.Exit("could not read current versions database", err) + } + versions.GethVersion = gethParams.Version + versions.ContractVersions[pkgName] = gethwrappers2.ContractVersion{ + Hash: gethwrappers2.VersionHash(abiPath, binPath), + AbiPath: abiPath, + BinaryPath: binPath, + } + if err := gethwrappers2.WriteVersionsDB(versions); err != nil { + gethwrappers2.Exit("could not save versions db", err) + } +} diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go new file mode 100644 index 00000000..3a9e2293 --- /dev/null +++ b/core/internal/mocks/application.go @@ -0,0 +1,669 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + audit "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + + bridges "github.com/goplugin/pluginv3.0/v2/core/bridges" + + plugin "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + + context "context" + + feeds "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + + job "github.com/goplugin/pluginv3.0/v2/core/services/job" + + keystore "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + + logger "github.com/goplugin/pluginv3.0/v2/core/logger" + + mock "github.com/stretchr/testify/mock" + + pipeline "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + plugins "github.com/goplugin/pluginv3.0/v2/plugins" + + services "github.com/goplugin/pluginv3.0/v2/core/services" + + sessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + + sqlx "github.com/jmoiron/sqlx" + + txmgr "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + + types "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + uuid "github.com/google/uuid" + + webhook "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + + zapcore "go.uber.org/zap/zapcore" +) + +// Application is an autogenerated mock type for the Application type +type Application struct { + mock.Mock +} + +// AddJobV2 provides a mock function with given fields: ctx, _a1 +func (_m *Application) AddJobV2(ctx context.Context, _a1 *job.Job) error { + ret := _m.Called(ctx, _a1) + + if len(ret) == 0 { + panic("no return value specified for AddJobV2") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *job.Job) error); ok { + r0 = rf(ctx, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AuthenticationProvider provides a mock function with given fields: +func (_m *Application) AuthenticationProvider() sessions.AuthenticationProvider { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AuthenticationProvider") + } + + var r0 sessions.AuthenticationProvider + if rf, ok := ret.Get(0).(func() sessions.AuthenticationProvider); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sessions.AuthenticationProvider) + } + } + + return r0 +} + +// BasicAdminUsersORM provides a mock function with given fields: +func (_m *Application) BasicAdminUsersORM() sessions.BasicAdminUsersORM { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BasicAdminUsersORM") + } + + var r0 sessions.BasicAdminUsersORM + if rf, ok := ret.Get(0).(func() sessions.BasicAdminUsersORM); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sessions.BasicAdminUsersORM) + } + } + + return r0 +} + +// BridgeORM provides a mock function with given fields: +func (_m *Application) BridgeORM() bridges.ORM { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for BridgeORM") + } + + var r0 bridges.ORM + if rf, ok := ret.Get(0).(func() bridges.ORM); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(bridges.ORM) + } + } + + return r0 +} + +// DeleteJob provides a mock function with given fields: ctx, jobID +func (_m *Application) DeleteJob(ctx context.Context, jobID int32) error { + ret := _m.Called(ctx, jobID) + + if len(ret) == 0 { + panic("no return value specified for DeleteJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int32) error); ok { + r0 = rf(ctx, jobID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EVMORM provides a mock function with given fields: +func (_m *Application) EVMORM() types.Configs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMORM") + } + + var r0 types.Configs + if rf, ok := ret.Get(0).(func() types.Configs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Configs) + } + } + + return r0 +} + +// GetAuditLogger provides a mock function with given fields: +func (_m *Application) GetAuditLogger() audit.AuditLogger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAuditLogger") + } + + var r0 audit.AuditLogger + if rf, ok := ret.Get(0).(func() audit.AuditLogger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(audit.AuditLogger) + } + } + + return r0 +} + +// GetConfig provides a mock function with given fields: +func (_m *Application) GetConfig() plugin.GeneralConfig { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetConfig") + } + + var r0 plugin.GeneralConfig + if rf, ok := ret.Get(0).(func() plugin.GeneralConfig); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(plugin.GeneralConfig) + } + } + + return r0 +} + +// GetExternalInitiatorManager provides a mock function with given fields: +func (_m *Application) GetExternalInitiatorManager() webhook.ExternalInitiatorManager { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExternalInitiatorManager") + } + + var r0 webhook.ExternalInitiatorManager + if rf, ok := ret.Get(0).(func() webhook.ExternalInitiatorManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(webhook.ExternalInitiatorManager) + } + } + + return r0 +} + +// GetFeedsService provides a mock function with given fields: +func (_m *Application) GetFeedsService() feeds.Service { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFeedsService") + } + + var r0 feeds.Service + if rf, ok := ret.Get(0).(func() feeds.Service); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(feeds.Service) + } + } + + return r0 +} + +// GetHealthChecker provides a mock function with given fields: +func (_m *Application) GetHealthChecker() services.Checker { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetHealthChecker") + } + + var r0 services.Checker + if rf, ok := ret.Get(0).(func() services.Checker); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(services.Checker) + } + } + + return r0 +} + +// GetKeyStore provides a mock function with given fields: +func (_m *Application) GetKeyStore() keystore.Master { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetKeyStore") + } + + var r0 keystore.Master + if rf, ok := ret.Get(0).(func() keystore.Master); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.Master) + } + } + + return r0 +} + +// GetLogger provides a mock function with given fields: +func (_m *Application) GetLogger() logger.SugaredLogger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLogger") + } + + var r0 logger.SugaredLogger + if rf, ok := ret.Get(0).(func() logger.SugaredLogger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logger.SugaredLogger) + } + } + + return r0 +} + +// GetLoopRegistry provides a mock function with given fields: +func (_m *Application) GetLoopRegistry() *plugins.LoopRegistry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLoopRegistry") + } + + var r0 *plugins.LoopRegistry + if rf, ok := ret.Get(0).(func() *plugins.LoopRegistry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*plugins.LoopRegistry) + } + } + + return r0 +} + +// GetRelayers provides a mock function with given fields: +func (_m *Application) GetRelayers() plugin.RelayerChainInteroperators { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetRelayers") + } + + var r0 plugin.RelayerChainInteroperators + if rf, ok := ret.Get(0).(func() plugin.RelayerChainInteroperators); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(plugin.RelayerChainInteroperators) + } + } + + return r0 +} + +// GetSqlxDB provides a mock function with given fields: +func (_m *Application) GetSqlxDB() *sqlx.DB { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetSqlxDB") + } + + var r0 *sqlx.DB + if rf, ok := ret.Get(0).(func() *sqlx.DB); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sqlx.DB) + } + } + + return r0 +} + +// GetWebAuthnConfiguration provides a mock function with given fields: +func (_m *Application) GetWebAuthnConfiguration() sessions.WebAuthnConfiguration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetWebAuthnConfiguration") + } + + var r0 sessions.WebAuthnConfiguration + if rf, ok := ret.Get(0).(func() sessions.WebAuthnConfiguration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(sessions.WebAuthnConfiguration) + } + + return r0 +} + +// ID provides a mock function with given fields: +func (_m *Application) ID() uuid.UUID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func() uuid.UUID); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } + } + + return r0 +} + +// JobORM provides a mock function with given fields: +func (_m *Application) JobORM() job.ORM { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for JobORM") + } + + var r0 job.ORM + if rf, ok := ret.Get(0).(func() job.ORM); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(job.ORM) + } + } + + return r0 +} + +// JobSpawner provides a mock function with given fields: +func (_m *Application) JobSpawner() job.Spawner { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for JobSpawner") + } + + var r0 job.Spawner + if rf, ok := ret.Get(0).(func() job.Spawner); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(job.Spawner) + } + } + + return r0 +} + +// PipelineORM provides a mock function with given fields: +func (_m *Application) PipelineORM() pipeline.ORM { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PipelineORM") + } + + var r0 pipeline.ORM + if rf, ok := ret.Get(0).(func() pipeline.ORM); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pipeline.ORM) + } + } + + return r0 +} + +// ReplayFromBlock provides a mock function with given fields: chainID, number, forceBroadcast +func (_m *Application) ReplayFromBlock(chainID *big.Int, number uint64, forceBroadcast bool) error { + ret := _m.Called(chainID, number, forceBroadcast) + + if len(ret) == 0 { + panic("no return value specified for ReplayFromBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*big.Int, uint64, bool) error); ok { + r0 = rf(chainID, number, forceBroadcast) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResumeJobV2 provides a mock function with given fields: ctx, taskID, result +func (_m *Application) ResumeJobV2(ctx context.Context, taskID uuid.UUID, result pipeline.Result) error { + ret := _m.Called(ctx, taskID, result) + + if len(ret) == 0 { + panic("no return value specified for ResumeJobV2") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, pipeline.Result) error); ok { + r0 = rf(ctx, taskID, result) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RunJobV2 provides a mock function with given fields: ctx, jobID, meta +func (_m *Application) RunJobV2(ctx context.Context, jobID int32, meta map[string]interface{}) (int64, error) { + ret := _m.Called(ctx, jobID, meta) + + if len(ret) == 0 { + panic("no return value specified for RunJobV2") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int32, map[string]interface{}) (int64, error)); ok { + return rf(ctx, jobID, meta) + } + if rf, ok := ret.Get(0).(func(context.Context, int32, map[string]interface{}) int64); ok { + r0 = rf(ctx, jobID, meta) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, int32, map[string]interface{}) error); ok { + r1 = rf(ctx, jobID, meta) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RunWebhookJobV2 provides a mock function with given fields: ctx, jobUUID, requestBody, meta +func (_m *Application) RunWebhookJobV2(ctx context.Context, jobUUID uuid.UUID, requestBody string, meta pipeline.JSONSerializable) (int64, error) { + ret := _m.Called(ctx, jobUUID, requestBody, meta) + + if len(ret) == 0 { + panic("no return value specified for RunWebhookJobV2") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, string, pipeline.JSONSerializable) (int64, error)); ok { + return rf(ctx, jobUUID, requestBody, meta) + } + if rf, ok := ret.Get(0).(func(context.Context, uuid.UUID, string, pipeline.JSONSerializable) int64); ok { + r0 = rf(ctx, jobUUID, requestBody, meta) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uuid.UUID, string, pipeline.JSONSerializable) error); ok { + r1 = rf(ctx, jobUUID, requestBody, meta) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SecretGenerator provides a mock function with given fields: +func (_m *Application) SecretGenerator() plugin.SecretGenerator { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SecretGenerator") + } + + var r0 plugin.SecretGenerator + if rf, ok := ret.Get(0).(func() plugin.SecretGenerator); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(plugin.SecretGenerator) + } + } + + return r0 +} + +// SetLogLevel provides a mock function with given fields: lvl +func (_m *Application) SetLogLevel(lvl zapcore.Level) error { + ret := _m.Called(lvl) + + if len(ret) == 0 { + panic("no return value specified for SetLogLevel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(zapcore.Level) error); ok { + r0 = rf(lvl) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: ctx +func (_m *Application) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Application) Stop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Stop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TxmStorageService provides a mock function with given fields: +func (_m *Application) TxmStorageService() txmgr.EvmTxStore { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TxmStorageService") + } + + var r0 txmgr.EvmTxStore + if rf, ok := ret.Get(0).(func() txmgr.EvmTxStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(txmgr.EvmTxStore) + } + } + + return r0 +} + +// WakeSessionReaper provides a mock function with given fields: +func (_m *Application) WakeSessionReaper() { + _m.Called() +} + +// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewApplication(t interface { + mock.TestingT + Cleanup(func()) +}) *Application { + mock := &Application{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/internal/mocks/flags.go b/core/internal/mocks/flags.go new file mode 100644 index 00000000..9f760fb5 --- /dev/null +++ b/core/internal/mocks/flags.go @@ -0,0 +1,1371 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + common "github.com/ethereum/go-ethereum/common" + + event "github.com/ethereum/go-ethereum/event" + + flags_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Flags is an autogenerated mock type for the FlagsInterface type +type Flags struct { + mock.Mock +} + +// AcceptOwnership provides a mock function with given fields: opts +func (_m *Flags) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AcceptOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddAccess provides a mock function with given fields: opts, _user +func (_m *Flags) AddAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _user) + + if len(ret) == 0 { + panic("no return value specified for AddAccess") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _user) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _user) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _user) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Address provides a mock function with given fields: +func (_m *Flags) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// CheckEnabled provides a mock function with given fields: opts +func (_m *Flags) CheckEnabled(opts *bind.CallOpts) (bool, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for CheckEnabled") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (bool, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) bool); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DisableAccessCheck provides a mock function with given fields: opts +func (_m *Flags) DisableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for DisableAccessCheck") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnableAccessCheck provides a mock function with given fields: opts +func (_m *Flags) EnableAccessCheck(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for EnableAccessCheck") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterAddedAccess provides a mock function with given fields: opts +func (_m *Flags) FilterAddedAccess(opts *bind.FilterOpts) (*flags_wrapper.FlagsAddedAccessIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterAddedAccess") + } + + var r0 *flags_wrapper.FlagsAddedAccessIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*flags_wrapper.FlagsAddedAccessIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *flags_wrapper.FlagsAddedAccessIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsAddedAccessIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCheckAccessDisabled provides a mock function with given fields: opts +func (_m *Flags) FilterCheckAccessDisabled(opts *bind.FilterOpts) (*flags_wrapper.FlagsCheckAccessDisabledIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCheckAccessDisabled") + } + + var r0 *flags_wrapper.FlagsCheckAccessDisabledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*flags_wrapper.FlagsCheckAccessDisabledIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *flags_wrapper.FlagsCheckAccessDisabledIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsCheckAccessDisabledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCheckAccessEnabled provides a mock function with given fields: opts +func (_m *Flags) FilterCheckAccessEnabled(opts *bind.FilterOpts) (*flags_wrapper.FlagsCheckAccessEnabledIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCheckAccessEnabled") + } + + var r0 *flags_wrapper.FlagsCheckAccessEnabledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*flags_wrapper.FlagsCheckAccessEnabledIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *flags_wrapper.FlagsCheckAccessEnabledIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsCheckAccessEnabledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterFlagLowered provides a mock function with given fields: opts, subject +func (_m *Flags) FilterFlagLowered(opts *bind.FilterOpts, subject []common.Address) (*flags_wrapper.FlagsFlagLoweredIterator, error) { + ret := _m.Called(opts, subject) + + if len(ret) == 0 { + panic("no return value specified for FilterFlagLowered") + } + + var r0 *flags_wrapper.FlagsFlagLoweredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*flags_wrapper.FlagsFlagLoweredIterator, error)); ok { + return rf(opts, subject) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *flags_wrapper.FlagsFlagLoweredIterator); ok { + r0 = rf(opts, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsFlagLoweredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterFlagRaised provides a mock function with given fields: opts, subject +func (_m *Flags) FilterFlagRaised(opts *bind.FilterOpts, subject []common.Address) (*flags_wrapper.FlagsFlagRaisedIterator, error) { + ret := _m.Called(opts, subject) + + if len(ret) == 0 { + panic("no return value specified for FilterFlagRaised") + } + + var r0 *flags_wrapper.FlagsFlagRaisedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*flags_wrapper.FlagsFlagRaisedIterator, error)); ok { + return rf(opts, subject) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *flags_wrapper.FlagsFlagRaisedIterator); ok { + r0 = rf(opts, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsFlagRaisedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferRequested provides a mock function with given fields: opts, from, to +func (_m *Flags) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*flags_wrapper.FlagsOwnershipTransferRequestedIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferRequested") + } + + var r0 *flags_wrapper.FlagsOwnershipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flags_wrapper.FlagsOwnershipTransferRequestedIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flags_wrapper.FlagsOwnershipTransferRequestedIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsOwnershipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferred provides a mock function with given fields: opts, from, to +func (_m *Flags) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*flags_wrapper.FlagsOwnershipTransferredIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferred") + } + + var r0 *flags_wrapper.FlagsOwnershipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flags_wrapper.FlagsOwnershipTransferredIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flags_wrapper.FlagsOwnershipTransferredIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsOwnershipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRaisingAccessControllerUpdated provides a mock function with given fields: opts, previous, current +func (_m *Flags) FilterRaisingAccessControllerUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*flags_wrapper.FlagsRaisingAccessControllerUpdatedIterator, error) { + ret := _m.Called(opts, previous, current) + + if len(ret) == 0 { + panic("no return value specified for FilterRaisingAccessControllerUpdated") + } + + var r0 *flags_wrapper.FlagsRaisingAccessControllerUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flags_wrapper.FlagsRaisingAccessControllerUpdatedIterator, error)); ok { + return rf(opts, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flags_wrapper.FlagsRaisingAccessControllerUpdatedIterator); ok { + r0 = rf(opts, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsRaisingAccessControllerUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRemovedAccess provides a mock function with given fields: opts +func (_m *Flags) FilterRemovedAccess(opts *bind.FilterOpts) (*flags_wrapper.FlagsRemovedAccessIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterRemovedAccess") + } + + var r0 *flags_wrapper.FlagsRemovedAccessIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*flags_wrapper.FlagsRemovedAccessIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *flags_wrapper.FlagsRemovedAccessIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsRemovedAccessIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFlag provides a mock function with given fields: opts, subject +func (_m *Flags) GetFlag(opts *bind.CallOpts, subject common.Address) (bool, error) { + ret := _m.Called(opts, subject) + + if len(ret) == 0 { + panic("no return value specified for GetFlag") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) (bool, error)); ok { + return rf(opts, subject) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) bool); ok { + r0 = rf(opts, subject) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address) error); ok { + r1 = rf(opts, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFlags provides a mock function with given fields: opts, subjects +func (_m *Flags) GetFlags(opts *bind.CallOpts, subjects []common.Address) ([]bool, error) { + ret := _m.Called(opts, subjects) + + if len(ret) == 0 { + panic("no return value specified for GetFlags") + } + + var r0 []bool + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, []common.Address) ([]bool, error)); ok { + return rf(opts, subjects) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, []common.Address) []bool); ok { + r0 = rf(opts, subjects) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bool) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, []common.Address) error); ok { + r1 = rf(opts, subjects) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HasAccess provides a mock function with given fields: opts, _user, _calldata +func (_m *Flags) HasAccess(opts *bind.CallOpts, _user common.Address, _calldata []byte) (bool, error) { + ret := _m.Called(opts, _user, _calldata) + + if len(ret) == 0 { + panic("no return value specified for HasAccess") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address, []byte) (bool, error)); ok { + return rf(opts, _user, _calldata) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address, []byte) bool); ok { + r0 = rf(opts, _user, _calldata) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address, []byte) error); ok { + r1 = rf(opts, _user, _calldata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LowerFlags provides a mock function with given fields: opts, subjects +func (_m *Flags) LowerFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subjects) + + if len(ret) == 0 { + panic("no return value specified for LowerFlags") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address) (*types.Transaction, error)); ok { + return rf(opts, subjects) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address) *types.Transaction); ok { + r0 = rf(opts, subjects) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address) error); ok { + r1 = rf(opts, subjects) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Owner provides a mock function with given fields: opts +func (_m *Flags) Owner(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Owner") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseAddedAccess provides a mock function with given fields: log +func (_m *Flags) ParseAddedAccess(log types.Log) (*flags_wrapper.FlagsAddedAccess, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseAddedAccess") + } + + var r0 *flags_wrapper.FlagsAddedAccess + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsAddedAccess, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsAddedAccess); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsAddedAccess) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCheckAccessDisabled provides a mock function with given fields: log +func (_m *Flags) ParseCheckAccessDisabled(log types.Log) (*flags_wrapper.FlagsCheckAccessDisabled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCheckAccessDisabled") + } + + var r0 *flags_wrapper.FlagsCheckAccessDisabled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsCheckAccessDisabled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsCheckAccessDisabled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsCheckAccessDisabled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCheckAccessEnabled provides a mock function with given fields: log +func (_m *Flags) ParseCheckAccessEnabled(log types.Log) (*flags_wrapper.FlagsCheckAccessEnabled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCheckAccessEnabled") + } + + var r0 *flags_wrapper.FlagsCheckAccessEnabled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsCheckAccessEnabled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsCheckAccessEnabled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsCheckAccessEnabled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseFlagLowered provides a mock function with given fields: log +func (_m *Flags) ParseFlagLowered(log types.Log) (*flags_wrapper.FlagsFlagLowered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseFlagLowered") + } + + var r0 *flags_wrapper.FlagsFlagLowered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsFlagLowered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsFlagLowered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsFlagLowered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseFlagRaised provides a mock function with given fields: log +func (_m *Flags) ParseFlagRaised(log types.Log) (*flags_wrapper.FlagsFlagRaised, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseFlagRaised") + } + + var r0 *flags_wrapper.FlagsFlagRaised + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsFlagRaised, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsFlagRaised); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsFlagRaised) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *Flags) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferRequested provides a mock function with given fields: log +func (_m *Flags) ParseOwnershipTransferRequested(log types.Log) (*flags_wrapper.FlagsOwnershipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferRequested") + } + + var r0 *flags_wrapper.FlagsOwnershipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsOwnershipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsOwnershipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsOwnershipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferred provides a mock function with given fields: log +func (_m *Flags) ParseOwnershipTransferred(log types.Log) (*flags_wrapper.FlagsOwnershipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferred") + } + + var r0 *flags_wrapper.FlagsOwnershipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsOwnershipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsOwnershipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsOwnershipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRaisingAccessControllerUpdated provides a mock function with given fields: log +func (_m *Flags) ParseRaisingAccessControllerUpdated(log types.Log) (*flags_wrapper.FlagsRaisingAccessControllerUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRaisingAccessControllerUpdated") + } + + var r0 *flags_wrapper.FlagsRaisingAccessControllerUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsRaisingAccessControllerUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsRaisingAccessControllerUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsRaisingAccessControllerUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRemovedAccess provides a mock function with given fields: log +func (_m *Flags) ParseRemovedAccess(log types.Log) (*flags_wrapper.FlagsRemovedAccess, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRemovedAccess") + } + + var r0 *flags_wrapper.FlagsRemovedAccess + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flags_wrapper.FlagsRemovedAccess, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flags_wrapper.FlagsRemovedAccess); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flags_wrapper.FlagsRemovedAccess) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RaiseFlag provides a mock function with given fields: opts, subject +func (_m *Flags) RaiseFlag(opts *bind.TransactOpts, subject common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subject) + + if len(ret) == 0 { + panic("no return value specified for RaiseFlag") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subject) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RaiseFlags provides a mock function with given fields: opts, subjects +func (_m *Flags) RaiseFlags(opts *bind.TransactOpts, subjects []common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subjects) + + if len(ret) == 0 { + panic("no return value specified for RaiseFlags") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address) (*types.Transaction, error)); ok { + return rf(opts, subjects) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address) *types.Transaction); ok { + r0 = rf(opts, subjects) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address) error); ok { + r1 = rf(opts, subjects) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RaisingAccessController provides a mock function with given fields: opts +func (_m *Flags) RaisingAccessController(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for RaisingAccessController") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveAccess provides a mock function with given fields: opts, _user +func (_m *Flags) RemoveAccess(opts *bind.TransactOpts, _user common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _user) + + if len(ret) == 0 { + panic("no return value specified for RemoveAccess") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _user) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _user) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _user) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetRaisingAccessController provides a mock function with given fields: opts, racAddress +func (_m *Flags) SetRaisingAccessController(opts *bind.TransactOpts, racAddress common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, racAddress) + + if len(ret) == 0 { + panic("no return value specified for SetRaisingAccessController") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, racAddress) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, racAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, racAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferOwnership provides a mock function with given fields: opts, _to +func (_m *Flags) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _to) + + if len(ret) == 0 { + panic("no return value specified for TransferOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchAddedAccess provides a mock function with given fields: opts, sink +func (_m *Flags) WatchAddedAccess(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsAddedAccess) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchAddedAccess") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsAddedAccess) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsAddedAccess) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsAddedAccess) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCheckAccessDisabled provides a mock function with given fields: opts, sink +func (_m *Flags) WatchCheckAccessDisabled(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsCheckAccessDisabled) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCheckAccessDisabled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessDisabled) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessDisabled) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessDisabled) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCheckAccessEnabled provides a mock function with given fields: opts, sink +func (_m *Flags) WatchCheckAccessEnabled(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsCheckAccessEnabled) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCheckAccessEnabled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessEnabled) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessEnabled) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsCheckAccessEnabled) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchFlagLowered provides a mock function with given fields: opts, sink, subject +func (_m *Flags) WatchFlagLowered(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsFlagLowered, subject []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, subject) + + if len(ret) == 0 { + panic("no return value specified for WatchFlagLowered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagLowered, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, subject) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagLowered, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagLowered, []common.Address) error); ok { + r1 = rf(opts, sink, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchFlagRaised provides a mock function with given fields: opts, sink, subject +func (_m *Flags) WatchFlagRaised(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsFlagRaised, subject []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, subject) + + if len(ret) == 0 { + panic("no return value specified for WatchFlagRaised") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagRaised, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, subject) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagRaised, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, subject) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsFlagRaised, []common.Address) error); ok { + r1 = rf(opts, sink, subject) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferRequested provides a mock function with given fields: opts, sink, from, to +func (_m *Flags) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferRequested, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferRequested, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferRequested, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferred provides a mock function with given fields: opts, sink, from, to +func (_m *Flags) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferred, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferred, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsOwnershipTransferred, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRaisingAccessControllerUpdated provides a mock function with given fields: opts, sink, previous, current +func (_m *Flags) WatchRaisingAccessControllerUpdated(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsRaisingAccessControllerUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, previous, current) + + if len(ret) == 0 { + panic("no return value specified for WatchRaisingAccessControllerUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRaisingAccessControllerUpdated, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRaisingAccessControllerUpdated, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRaisingAccessControllerUpdated, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRemovedAccess provides a mock function with given fields: opts, sink +func (_m *Flags) WatchRemovedAccess(opts *bind.WatchOpts, sink chan<- *flags_wrapper.FlagsRemovedAccess) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchRemovedAccess") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRemovedAccess) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRemovedAccess) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flags_wrapper.FlagsRemovedAccess) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFlags creates a new instance of Flags. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFlags(t interface { + mock.TestingT + Cleanup(func()) +}) *Flags { + mock := &Flags{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/internal/mocks/flux_aggregator.go b/core/internal/mocks/flux_aggregator.go new file mode 100644 index 00000000..e845e7d4 --- /dev/null +++ b/core/internal/mocks/flux_aggregator.go @@ -0,0 +1,2379 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + common "github.com/ethereum/go-ethereum/common" + + event "github.com/ethereum/go-ethereum/event" + + flux_aggregator_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// FluxAggregator is an autogenerated mock type for the FluxAggregatorInterface type +type FluxAggregator struct { + mock.Mock +} + +// AcceptAdmin provides a mock function with given fields: opts, _oracle +func (_m *FluxAggregator) AcceptAdmin(opts *bind.TransactOpts, _oracle common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _oracle) + + if len(ret) == 0 { + panic("no return value specified for AcceptAdmin") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _oracle) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AcceptOwnership provides a mock function with given fields: opts +func (_m *FluxAggregator) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AcceptOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Address provides a mock function with given fields: +func (_m *FluxAggregator) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// AllocatedFunds provides a mock function with given fields: opts +func (_m *FluxAggregator) AllocatedFunds(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AllocatedFunds") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AvailableFunds provides a mock function with given fields: opts +func (_m *FluxAggregator) AvailableFunds(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AvailableFunds") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChangeOracles provides a mock function with given fields: opts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay +func (_m *FluxAggregator) ChangeOracles(opts *bind.TransactOpts, _removed []common.Address, _added []common.Address, _addedAdmins []common.Address, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32) (*types.Transaction, error) { + ret := _m.Called(opts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) + + if len(ret) == 0 { + panic("no return value specified for ChangeOracles") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address, []common.Address, uint32, uint32, uint32) (*types.Transaction, error)); ok { + return rf(opts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address, []common.Address, uint32, uint32, uint32) *types.Transaction); ok { + r0 = rf(opts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address, []common.Address, []common.Address, uint32, uint32, uint32) error); ok { + r1 = rf(opts, _removed, _added, _addedAdmins, _minSubmissions, _maxSubmissions, _restartDelay) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Decimals provides a mock function with given fields: opts +func (_m *FluxAggregator) Decimals(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Decimals") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Description provides a mock function with given fields: opts +func (_m *FluxAggregator) Description(opts *bind.CallOpts) (string, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Description") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (string, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) string); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterAnswerUpdated provides a mock function with given fields: opts, current, roundId +func (_m *FluxAggregator) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*flux_aggregator_wrapper.FluxAggregatorAnswerUpdatedIterator, error) { + ret := _m.Called(opts, current, roundId) + + if len(ret) == 0 { + panic("no return value specified for FilterAnswerUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorAnswerUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []*big.Int) (*flux_aggregator_wrapper.FluxAggregatorAnswerUpdatedIterator, error)); ok { + return rf(opts, current, roundId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []*big.Int) *flux_aggregator_wrapper.FluxAggregatorAnswerUpdatedIterator); ok { + r0 = rf(opts, current, roundId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorAnswerUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []*big.Int) error); ok { + r1 = rf(opts, current, roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterAvailableFundsUpdated provides a mock function with given fields: opts, amount +func (_m *FluxAggregator) FilterAvailableFundsUpdated(opts *bind.FilterOpts, amount []*big.Int) (*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdatedIterator, error) { + ret := _m.Called(opts, amount) + + if len(ret) == 0 { + panic("no return value specified for FilterAvailableFundsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdatedIterator, error)); ok { + return rf(opts, amount) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdatedIterator); ok { + r0 = rf(opts, amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterNewRound provides a mock function with given fields: opts, roundId, startedBy +func (_m *FluxAggregator) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*flux_aggregator_wrapper.FluxAggregatorNewRoundIterator, error) { + ret := _m.Called(opts, roundId, startedBy) + + if len(ret) == 0 { + panic("no return value specified for FilterNewRound") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorNewRoundIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorNewRoundIterator, error)); ok { + return rf(opts, roundId, startedBy) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) *flux_aggregator_wrapper.FluxAggregatorNewRoundIterator); ok { + r0 = rf(opts, roundId, startedBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorNewRoundIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, roundId, startedBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOracleAdminUpdateRequested provides a mock function with given fields: opts, oracle +func (_m *FluxAggregator) FilterOracleAdminUpdateRequested(opts *bind.FilterOpts, oracle []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequestedIterator, error) { + ret := _m.Called(opts, oracle) + + if len(ret) == 0 { + panic("no return value specified for FilterOracleAdminUpdateRequested") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequestedIterator, error)); ok { + return rf(opts, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequestedIterator); ok { + r0 = rf(opts, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOracleAdminUpdated provides a mock function with given fields: opts, oracle, newAdmin +func (_m *FluxAggregator) FilterOracleAdminUpdated(opts *bind.FilterOpts, oracle []common.Address, newAdmin []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdatedIterator, error) { + ret := _m.Called(opts, oracle, newAdmin) + + if len(ret) == 0 { + panic("no return value specified for FilterOracleAdminUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdatedIterator, error)); ok { + return rf(opts, oracle, newAdmin) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdatedIterator); ok { + r0 = rf(opts, oracle, newAdmin) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, oracle, newAdmin) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOraclePermissionsUpdated provides a mock function with given fields: opts, oracle, whitelisted +func (_m *FluxAggregator) FilterOraclePermissionsUpdated(opts *bind.FilterOpts, oracle []common.Address, whitelisted []bool) (*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdatedIterator, error) { + ret := _m.Called(opts, oracle, whitelisted) + + if len(ret) == 0 { + panic("no return value specified for FilterOraclePermissionsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []bool) (*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdatedIterator, error)); ok { + return rf(opts, oracle, whitelisted) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []bool) *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdatedIterator); ok { + r0 = rf(opts, oracle, whitelisted) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []bool) error); ok { + r1 = rf(opts, oracle, whitelisted) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferRequested provides a mock function with given fields: opts, from, to +func (_m *FluxAggregator) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequestedIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferRequested") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequestedIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequestedIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferred provides a mock function with given fields: opts, from, to +func (_m *FluxAggregator) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferredIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferred") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferredIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferredIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRequesterPermissionsSet provides a mock function with given fields: opts, requester +func (_m *FluxAggregator) FilterRequesterPermissionsSet(opts *bind.FilterOpts, requester []common.Address) (*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSetIterator, error) { + ret := _m.Called(opts, requester) + + if len(ret) == 0 { + panic("no return value specified for FilterRequesterPermissionsSet") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSetIterator, error)); ok { + return rf(opts, requester) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSetIterator); ok { + r0 = rf(opts, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRoundDetailsUpdated provides a mock function with given fields: opts, paymentAmount, minSubmissionCount, maxSubmissionCount +func (_m *FluxAggregator) FilterRoundDetailsUpdated(opts *bind.FilterOpts, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdatedIterator, error) { + ret := _m.Called(opts, paymentAmount, minSubmissionCount, maxSubmissionCount) + + if len(ret) == 0 { + panic("no return value specified for FilterRoundDetailsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []uint32, []uint32) (*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdatedIterator, error)); ok { + return rf(opts, paymentAmount, minSubmissionCount, maxSubmissionCount) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []uint32, []uint32) *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdatedIterator); ok { + r0 = rf(opts, paymentAmount, minSubmissionCount, maxSubmissionCount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []uint32, []uint32) error); ok { + r1 = rf(opts, paymentAmount, minSubmissionCount, maxSubmissionCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubmissionReceived provides a mock function with given fields: opts, submission, round, oracle +func (_m *FluxAggregator) FilterSubmissionReceived(opts *bind.FilterOpts, submission []*big.Int, round []uint32, oracle []common.Address) (*flux_aggregator_wrapper.FluxAggregatorSubmissionReceivedIterator, error) { + ret := _m.Called(opts, submission, round, oracle) + + if len(ret) == 0 { + panic("no return value specified for FilterSubmissionReceived") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorSubmissionReceivedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []uint32, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorSubmissionReceivedIterator, error)); ok { + return rf(opts, submission, round, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []uint32, []common.Address) *flux_aggregator_wrapper.FluxAggregatorSubmissionReceivedIterator); ok { + r0 = rf(opts, submission, round, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorSubmissionReceivedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []uint32, []common.Address) error); ok { + r1 = rf(opts, submission, round, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterValidatorUpdated provides a mock function with given fields: opts, previous, current +func (_m *FluxAggregator) FilterValidatorUpdated(opts *bind.FilterOpts, previous []common.Address, current []common.Address) (*flux_aggregator_wrapper.FluxAggregatorValidatorUpdatedIterator, error) { + ret := _m.Called(opts, previous, current) + + if len(ret) == 0 { + panic("no return value specified for FilterValidatorUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorValidatorUpdatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*flux_aggregator_wrapper.FluxAggregatorValidatorUpdatedIterator, error)); ok { + return rf(opts, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *flux_aggregator_wrapper.FluxAggregatorValidatorUpdatedIterator); ok { + r0 = rf(opts, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorValidatorUpdatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAdmin provides a mock function with given fields: opts, _oracle +func (_m *FluxAggregator) GetAdmin(opts *bind.CallOpts, _oracle common.Address) (common.Address, error) { + ret := _m.Called(opts, _oracle) + + if len(ret) == 0 { + panic("no return value specified for GetAdmin") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) (common.Address, error)); ok { + return rf(opts, _oracle) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) common.Address); ok { + r0 = rf(opts, _oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address) error); ok { + r1 = rf(opts, _oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAnswer provides a mock function with given fields: opts, _roundId +func (_m *FluxAggregator) GetAnswer(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + ret := _m.Called(opts, _roundId) + + if len(ret) == 0 { + panic("no return value specified for GetAnswer") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (*big.Int, error)); ok { + return rf(opts, _roundId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) *big.Int); ok { + r0 = rf(opts, _roundId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, _roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOracles provides a mock function with given fields: opts +func (_m *FluxAggregator) GetOracles(opts *bind.CallOpts) ([]common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetOracles") + } + + var r0 []common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([]common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) []common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRoundData provides a mock function with given fields: opts, _roundId +func (_m *FluxAggregator) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (flux_aggregator_wrapper.GetRoundData, error) { + ret := _m.Called(opts, _roundId) + + if len(ret) == 0 { + panic("no return value specified for GetRoundData") + } + + var r0 flux_aggregator_wrapper.GetRoundData + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (flux_aggregator_wrapper.GetRoundData, error)); ok { + return rf(opts, _roundId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) flux_aggregator_wrapper.GetRoundData); ok { + r0 = rf(opts, _roundId) + } else { + r0 = ret.Get(0).(flux_aggregator_wrapper.GetRoundData) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, _roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTimestamp provides a mock function with given fields: opts, _roundId +func (_m *FluxAggregator) GetTimestamp(opts *bind.CallOpts, _roundId *big.Int) (*big.Int, error) { + ret := _m.Called(opts, _roundId) + + if len(ret) == 0 { + panic("no return value specified for GetTimestamp") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (*big.Int, error)); ok { + return rf(opts, _roundId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) *big.Int); ok { + r0 = rf(opts, _roundId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, _roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestAnswer provides a mock function with given fields: opts +func (_m *FluxAggregator) LatestAnswer(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestAnswer") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestRound provides a mock function with given fields: opts +func (_m *FluxAggregator) LatestRound(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestRound") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestRoundData provides a mock function with given fields: opts +func (_m *FluxAggregator) LatestRoundData(opts *bind.CallOpts) (flux_aggregator_wrapper.LatestRoundData, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestRoundData") + } + + var r0 flux_aggregator_wrapper.LatestRoundData + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (flux_aggregator_wrapper.LatestRoundData, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) flux_aggregator_wrapper.LatestRoundData); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(flux_aggregator_wrapper.LatestRoundData) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestTimestamp provides a mock function with given fields: opts +func (_m *FluxAggregator) LatestTimestamp(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestTimestamp") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LinkToken provides a mock function with given fields: opts +func (_m *FluxAggregator) LinkToken(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LinkToken") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MaxSubmissionCount provides a mock function with given fields: opts +func (_m *FluxAggregator) MaxSubmissionCount(opts *bind.CallOpts) (uint32, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MaxSubmissionCount") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint32, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint32); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MaxSubmissionValue provides a mock function with given fields: opts +func (_m *FluxAggregator) MaxSubmissionValue(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MaxSubmissionValue") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MinSubmissionCount provides a mock function with given fields: opts +func (_m *FluxAggregator) MinSubmissionCount(opts *bind.CallOpts) (uint32, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MinSubmissionCount") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint32, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint32); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MinSubmissionValue provides a mock function with given fields: opts +func (_m *FluxAggregator) MinSubmissionValue(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MinSubmissionValue") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnTokenTransfer provides a mock function with given fields: opts, arg0, arg1, _data +func (_m *FluxAggregator) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, arg1 *big.Int, _data []byte) (*types.Transaction, error) { + ret := _m.Called(opts, arg0, arg1, _data) + + if len(ret) == 0 { + panic("no return value specified for OnTokenTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) (*types.Transaction, error)); ok { + return rf(opts, arg0, arg1, _data) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) *types.Transaction); ok { + r0 = rf(opts, arg0, arg1, _data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) error); ok { + r1 = rf(opts, arg0, arg1, _data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OracleCount provides a mock function with given fields: opts +func (_m *FluxAggregator) OracleCount(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for OracleCount") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OracleRoundState provides a mock function with given fields: opts, _oracle, _queriedRoundId +func (_m *FluxAggregator) OracleRoundState(opts *bind.CallOpts, _oracle common.Address, _queriedRoundId uint32) (flux_aggregator_wrapper.OracleRoundState, error) { + ret := _m.Called(opts, _oracle, _queriedRoundId) + + if len(ret) == 0 { + panic("no return value specified for OracleRoundState") + } + + var r0 flux_aggregator_wrapper.OracleRoundState + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address, uint32) (flux_aggregator_wrapper.OracleRoundState, error)); ok { + return rf(opts, _oracle, _queriedRoundId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address, uint32) flux_aggregator_wrapper.OracleRoundState); ok { + r0 = rf(opts, _oracle, _queriedRoundId) + } else { + r0 = ret.Get(0).(flux_aggregator_wrapper.OracleRoundState) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address, uint32) error); ok { + r1 = rf(opts, _oracle, _queriedRoundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Owner provides a mock function with given fields: opts +func (_m *FluxAggregator) Owner(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Owner") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseAnswerUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseAnswerUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseAnswerUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorAnswerUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseAvailableFundsUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseAvailableFundsUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseAvailableFundsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *FluxAggregator) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseNewRound provides a mock function with given fields: log +func (_m *FluxAggregator) ParseNewRound(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorNewRound, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseNewRound") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorNewRound + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorNewRound, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorNewRound); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorNewRound) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOracleAdminUpdateRequested provides a mock function with given fields: log +func (_m *FluxAggregator) ParseOracleAdminUpdateRequested(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOracleAdminUpdateRequested") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOracleAdminUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseOracleAdminUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOracleAdminUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOraclePermissionsUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseOraclePermissionsUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOraclePermissionsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferRequested provides a mock function with given fields: log +func (_m *FluxAggregator) ParseOwnershipTransferRequested(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferRequested") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferred provides a mock function with given fields: log +func (_m *FluxAggregator) ParseOwnershipTransferred(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferred") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRequesterPermissionsSet provides a mock function with given fields: log +func (_m *FluxAggregator) ParseRequesterPermissionsSet(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRequesterPermissionsSet") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRoundDetailsUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseRoundDetailsUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRoundDetailsUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubmissionReceived provides a mock function with given fields: log +func (_m *FluxAggregator) ParseSubmissionReceived(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubmissionReceived") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorSubmissionReceived) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseValidatorUpdated provides a mock function with given fields: log +func (_m *FluxAggregator) ParseValidatorUpdated(log types.Log) (*flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseValidatorUpdated") + } + + var r0 *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flux_aggregator_wrapper.FluxAggregatorValidatorUpdated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PaymentAmount provides a mock function with given fields: opts +func (_m *FluxAggregator) PaymentAmount(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for PaymentAmount") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestNewRound provides a mock function with given fields: opts +func (_m *FluxAggregator) RequestNewRound(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for RequestNewRound") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RestartDelay provides a mock function with given fields: opts +func (_m *FluxAggregator) RestartDelay(opts *bind.CallOpts) (uint32, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for RestartDelay") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint32, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint32); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetRequesterPermissions provides a mock function with given fields: opts, _requester, _authorized, _delay +func (_m *FluxAggregator) SetRequesterPermissions(opts *bind.TransactOpts, _requester common.Address, _authorized bool, _delay uint32) (*types.Transaction, error) { + ret := _m.Called(opts, _requester, _authorized, _delay) + + if len(ret) == 0 { + panic("no return value specified for SetRequesterPermissions") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, bool, uint32) (*types.Transaction, error)); ok { + return rf(opts, _requester, _authorized, _delay) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, bool, uint32) *types.Transaction); ok { + r0 = rf(opts, _requester, _authorized, _delay) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, bool, uint32) error); ok { + r1 = rf(opts, _requester, _authorized, _delay) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetValidator provides a mock function with given fields: opts, _newValidator +func (_m *FluxAggregator) SetValidator(opts *bind.TransactOpts, _newValidator common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _newValidator) + + if len(ret) == 0 { + panic("no return value specified for SetValidator") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _newValidator) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _newValidator) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _newValidator) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Submit provides a mock function with given fields: opts, _roundId, _submission +func (_m *FluxAggregator) Submit(opts *bind.TransactOpts, _roundId *big.Int, _submission *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, _roundId, _submission) + + if len(ret) == 0 { + panic("no return value specified for Submit") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, _roundId, _submission) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, *big.Int) *types.Transaction); ok { + r0 = rf(opts, _roundId, _submission) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, *big.Int) error); ok { + r1 = rf(opts, _roundId, _submission) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Timeout provides a mock function with given fields: opts +func (_m *FluxAggregator) Timeout(opts *bind.CallOpts) (uint32, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Timeout") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint32, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint32); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferAdmin provides a mock function with given fields: opts, _oracle, _newAdmin +func (_m *FluxAggregator) TransferAdmin(opts *bind.TransactOpts, _oracle common.Address, _newAdmin common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _oracle, _newAdmin) + + if len(ret) == 0 { + panic("no return value specified for TransferAdmin") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _oracle, _newAdmin) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address) *types.Transaction); ok { + r0 = rf(opts, _oracle, _newAdmin) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, common.Address) error); ok { + r1 = rf(opts, _oracle, _newAdmin) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferOwnership provides a mock function with given fields: opts, _to +func (_m *FluxAggregator) TransferOwnership(opts *bind.TransactOpts, _to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _to) + + if len(ret) == 0 { + panic("no return value specified for TransferOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateAvailableFunds provides a mock function with given fields: opts +func (_m *FluxAggregator) UpdateAvailableFunds(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for UpdateAvailableFunds") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateFutureRounds provides a mock function with given fields: opts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout +func (_m *FluxAggregator) UpdateFutureRounds(opts *bind.TransactOpts, _paymentAmount *big.Int, _minSubmissions uint32, _maxSubmissions uint32, _restartDelay uint32, _timeout uint32) (*types.Transaction, error) { + ret := _m.Called(opts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) + + if len(ret) == 0 { + panic("no return value specified for UpdateFutureRounds") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint32, uint32, uint32, uint32) (*types.Transaction, error)); ok { + return rf(opts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint32, uint32, uint32, uint32) *types.Transaction); ok { + r0 = rf(opts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, uint32, uint32, uint32, uint32) error); ok { + r1 = rf(opts, _paymentAmount, _minSubmissions, _maxSubmissions, _restartDelay, _timeout) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Validator provides a mock function with given fields: opts +func (_m *FluxAggregator) Validator(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Validator") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Version provides a mock function with given fields: opts +func (_m *FluxAggregator) Version(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Version") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchAnswerUpdated provides a mock function with given fields: opts, sink, current, roundId +func (_m *FluxAggregator) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, current, roundId) + + if len(ret) == 0 { + panic("no return value specified for WatchAnswerUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, []*big.Int, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, current, roundId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, []*big.Int, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, current, roundId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated, []*big.Int, []*big.Int) error); ok { + r1 = rf(opts, sink, current, roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchAvailableFundsUpdated provides a mock function with given fields: opts, sink, amount +func (_m *FluxAggregator) WatchAvailableFundsUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, amount []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, amount) + + if len(ret) == 0 { + panic("no return value specified for WatchAvailableFundsUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, amount) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorAvailableFundsUpdated, []*big.Int) error); ok { + r1 = rf(opts, sink, amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchNewRound provides a mock function with given fields: opts, sink, roundId, startedBy +func (_m *FluxAggregator) WatchNewRound(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, roundId, startedBy) + + if len(ret) == 0 { + panic("no return value specified for WatchNewRound") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorNewRound, []*big.Int, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, roundId, startedBy) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorNewRound, []*big.Int, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, roundId, startedBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorNewRound, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, sink, roundId, startedBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOracleAdminUpdateRequested provides a mock function with given fields: opts, sink, oracle +func (_m *FluxAggregator) WatchOracleAdminUpdateRequested(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, oracle []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, oracle) + + if len(ret) == 0 { + panic("no return value specified for WatchOracleAdminUpdateRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdateRequested, []common.Address) error); ok { + r1 = rf(opts, sink, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOracleAdminUpdated provides a mock function with given fields: opts, sink, oracle, newAdmin +func (_m *FluxAggregator) WatchOracleAdminUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, oracle []common.Address, newAdmin []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, oracle, newAdmin) + + if len(ret) == 0 { + panic("no return value specified for WatchOracleAdminUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, oracle, newAdmin) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, oracle, newAdmin) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOracleAdminUpdated, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, oracle, newAdmin) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOraclePermissionsUpdated provides a mock function with given fields: opts, sink, oracle, whitelisted +func (_m *FluxAggregator) WatchOraclePermissionsUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, oracle []common.Address, whitelisted []bool) (event.Subscription, error) { + ret := _m.Called(opts, sink, oracle, whitelisted) + + if len(ret) == 0 { + panic("no return value specified for WatchOraclePermissionsUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, []common.Address, []bool) (event.Subscription, error)); ok { + return rf(opts, sink, oracle, whitelisted) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, []common.Address, []bool) event.Subscription); ok { + r0 = rf(opts, sink, oracle, whitelisted) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOraclePermissionsUpdated, []common.Address, []bool) error); ok { + r1 = rf(opts, sink, oracle, whitelisted) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferRequested provides a mock function with given fields: opts, sink, from, to +func (_m *FluxAggregator) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferRequested, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferred provides a mock function with given fields: opts, sink, from, to +func (_m *FluxAggregator) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorOwnershipTransferred, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRequesterPermissionsSet provides a mock function with given fields: opts, sink, requester +func (_m *FluxAggregator) WatchRequesterPermissionsSet(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, requester []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, requester) + + if len(ret) == 0 { + panic("no return value specified for WatchRequesterPermissionsSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, requester) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRequesterPermissionsSet, []common.Address) error); ok { + r1 = rf(opts, sink, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRoundDetailsUpdated provides a mock function with given fields: opts, sink, paymentAmount, minSubmissionCount, maxSubmissionCount +func (_m *FluxAggregator) WatchRoundDetailsUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, paymentAmount []*big.Int, minSubmissionCount []uint32, maxSubmissionCount []uint32) (event.Subscription, error) { + ret := _m.Called(opts, sink, paymentAmount, minSubmissionCount, maxSubmissionCount) + + if len(ret) == 0 { + panic("no return value specified for WatchRoundDetailsUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, []*big.Int, []uint32, []uint32) (event.Subscription, error)); ok { + return rf(opts, sink, paymentAmount, minSubmissionCount, maxSubmissionCount) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, []*big.Int, []uint32, []uint32) event.Subscription); ok { + r0 = rf(opts, sink, paymentAmount, minSubmissionCount, maxSubmissionCount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorRoundDetailsUpdated, []*big.Int, []uint32, []uint32) error); ok { + r1 = rf(opts, sink, paymentAmount, minSubmissionCount, maxSubmissionCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubmissionReceived provides a mock function with given fields: opts, sink, submission, round, oracle +func (_m *FluxAggregator) WatchSubmissionReceived(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, submission []*big.Int, round []uint32, oracle []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, submission, round, oracle) + + if len(ret) == 0 { + panic("no return value specified for WatchSubmissionReceived") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, []*big.Int, []uint32, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, submission, round, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, []*big.Int, []uint32, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, submission, round, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived, []*big.Int, []uint32, []common.Address) error); ok { + r1 = rf(opts, sink, submission, round, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchValidatorUpdated provides a mock function with given fields: opts, sink, previous, current +func (_m *FluxAggregator) WatchValidatorUpdated(opts *bind.WatchOpts, sink chan<- *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, previous []common.Address, current []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, previous, current) + + if len(ret) == 0 { + panic("no return value specified for WatchValidatorUpdated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *flux_aggregator_wrapper.FluxAggregatorValidatorUpdated, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithdrawFunds provides a mock function with given fields: opts, _recipient, _amount +func (_m *FluxAggregator) WithdrawFunds(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, _recipient, _amount) + + if len(ret) == 0 { + panic("no return value specified for WithdrawFunds") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, _recipient, _amount) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) *types.Transaction); ok { + r0 = rf(opts, _recipient, _amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int) error); ok { + r1 = rf(opts, _recipient, _amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithdrawPayment provides a mock function with given fields: opts, _oracle, _recipient, _amount +func (_m *FluxAggregator) WithdrawPayment(opts *bind.TransactOpts, _oracle common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, _oracle, _recipient, _amount) + + if len(ret) == 0 { + panic("no return value specified for WithdrawPayment") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, _oracle, _recipient, _amount) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address, *big.Int) *types.Transaction); ok { + r0 = rf(opts, _oracle, _recipient, _amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, common.Address, *big.Int) error); ok { + r1 = rf(opts, _oracle, _recipient, _amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithdrawablePayment provides a mock function with given fields: opts, _oracle +func (_m *FluxAggregator) WithdrawablePayment(opts *bind.CallOpts, _oracle common.Address) (*big.Int, error) { + ret := _m.Called(opts, _oracle) + + if len(ret) == 0 { + panic("no return value specified for WithdrawablePayment") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) (*big.Int, error)); ok { + return rf(opts, _oracle) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) *big.Int); ok { + r0 = rf(opts, _oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address) error); ok { + r1 = rf(opts, _oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFluxAggregator creates a new instance of FluxAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFluxAggregator(t interface { + mock.TestingT + Cleanup(func()) +}) *FluxAggregator { + mock := &FluxAggregator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/internal/mocks/go_generate.go b/core/internal/mocks/go_generate.go new file mode 100644 index 00000000..4e1827df --- /dev/null +++ b/core/internal/mocks/go_generate.go @@ -0,0 +1,8 @@ +package mocks + +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper --name FluxAggregatorInterface --output . --case=underscore --structname FluxAggregator --filename flux_aggregator.go +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper --name FlagsInterface --output . --case=underscore --structname Flags --filename flags.go +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface --name AggregatorV3InterfaceInterface --output ../../services/vrf/mocks/ --case=underscore --structname AggregatorV3Interface --filename aggregator_v3_interface.go +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2 --name VRFCoordinatorV2Interface --output ../../services/vrf/mocks/ --case=underscore --structname VRFCoordinatorV2Interface --filename vrf_coordinator_v2.go +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon --name VRFBeaconInterface --output ../../services/ocr2/plugins/ocr2vrf/coordinator/mocks --case=underscore --structname VRFBeaconInterface --filename vrf_beacon.go +//go:generate mockery --quiet --srcpkg github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator --name VRFCoordinatorInterface --output ../../services/ocr2/plugins/ocr2vrf/coordinator/mocks --case=underscore --structname VRFCoordinatorInterface --filename vrf_coordinator.go diff --git a/core/internal/mocks/prometheus_backend.go b/core/internal/mocks/prometheus_backend.go new file mode 100644 index 00000000..6573dbaf --- /dev/null +++ b/core/internal/mocks/prometheus_backend.go @@ -0,0 +1,53 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" +) + +// PrometheusBackend is an autogenerated mock type for the PrometheusBackend type +type PrometheusBackend struct { + mock.Mock +} + +// SetMaxUnconfirmedAge provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetMaxUnconfirmedAge(_a0 *big.Int, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetMaxUnconfirmedBlocks provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetMaxUnconfirmedBlocks(_a0 *big.Int, _a1 int64) { + _m.Called(_a0, _a1) +} + +// SetPipelineRunsQueued provides a mock function with given fields: n +func (_m *PrometheusBackend) SetPipelineRunsQueued(n int) { + _m.Called(n) +} + +// SetPipelineTaskRunsQueued provides a mock function with given fields: n +func (_m *PrometheusBackend) SetPipelineTaskRunsQueued(n int) { + _m.Called(n) +} + +// SetUnconfirmedTransactions provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetUnconfirmedTransactions(_a0 *big.Int, _a1 int64) { + _m.Called(_a0, _a1) +} + +// NewPrometheusBackend creates a new instance of PrometheusBackend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPrometheusBackend(t interface { + mock.TestingT + Cleanup(func()) +}) *PrometheusBackend { + mock := &PrometheusBackend{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/internal/testutils/configtest/general_config.go b/core/internal/testutils/configtest/general_config.go new file mode 100644 index 00000000..4fc92a67 --- /dev/null +++ b/core/internal/testutils/configtest/general_config.go @@ -0,0 +1,126 @@ +package configtest + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +const DefaultPeerID = "12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" + +// NewTestGeneralConfig returns a new plugin.GeneralConfig with default test overrides and one chain with evmclient.NullClientChainID. +func NewTestGeneralConfig(t testing.TB) plugin.GeneralConfig { return NewGeneralConfig(t, nil) } + +// NewGeneralConfig returns a new plugin.GeneralConfig with overrides. +// The default test overrides are applied before overrideFn, and include one chain with evmclient.NullClientChainID. +func NewGeneralConfig(t testing.TB, overrideFn func(*plugin.Config, *plugin.Secrets)) plugin.GeneralConfig { + tempDir := t.TempDir() + g, err := plugin.GeneralConfigOpts{ + OverrideFn: func(c *plugin.Config, s *plugin.Secrets) { + overrides(c, s) + c.RootDir = &tempDir + if fn := overrideFn; fn != nil { + fn(c, s) + } + }, + }.New() + require.NoError(t, err) + return g +} + +// overrides applies some test config settings and adds a default chain with evmclient.NullClientChainID. +func overrides(c *plugin.Config, s *plugin.Secrets) { + s.Password.Keystore = models.NewSecret("dummy-to-pass-validation") + + c.Insecure.OCRDevelopmentMode = ptr(true) + c.InsecureFastScrypt = ptr(true) + c.ShutdownGracePeriod = commonconfig.MustNewDuration(testutils.DefaultWaitTimeout) + + c.Database.Dialect = dialects.TransactionWrappedPostgres + c.Database.Lock.Enabled = ptr(false) + c.Database.MaxIdleConns = ptr[int64](20) + c.Database.MaxOpenConns = ptr[int64](20) + c.Database.MigrateOnStartup = ptr(false) + c.Database.DefaultLockTimeout = commonconfig.MustNewDuration(1 * time.Minute) + + c.JobPipeline.ReaperInterval = commonconfig.MustNewDuration(0) + + c.P2P.V2.Enabled = ptr(false) + + c.WebServer.SessionTimeout = commonconfig.MustNewDuration(2 * time.Minute) + c.WebServer.BridgeResponseURL = commonconfig.MustParseURL("http://localhost:6688") + testIP := net.ParseIP("127.0.0.1") + c.WebServer.ListenIP = &testIP + c.WebServer.TLS.ListenIP = &testIP + + chainID := big.NewI(evmclient.NullClientChainID) + + chainCfg := evmcfg.Defaults(chainID) + chainCfg.LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) // speed it up from the standard 15s for tests + + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: chainID, + Chain: chainCfg, + Nodes: evmcfg.EVMNodes{ + &evmcfg.Node{ + Name: ptr("test"), + WSURL: &commonconfig.URL{}, + HTTPURL: &commonconfig.URL{}, + SendOnly: new(bool), + Order: ptr[int32](100), + }, + }, + }) +} + +// NewGeneralConfigSimulated returns a new plugin.GeneralConfig with overrides, including the simulated EVM chain. +// The default test overrides are applied before overrideFn. +// The simulated chain (testutils.SimulatedChainID) replaces the null chain (evmclient.NullClientChainID). +func NewGeneralConfigSimulated(t testing.TB, overrideFn func(*plugin.Config, *plugin.Secrets)) plugin.GeneralConfig { + return NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + simulated(c, s) + if fn := overrideFn; fn != nil { + fn(c, s) + } + }) +} + +// simulated is a config override func that appends the simulated EVM chain (testutils.SimulatedChainID), +// or replaces the null chain (client.NullClientChainID) if that is the only entry. +func simulated(c *plugin.Config, s *plugin.Secrets) { + chainID := big.New(testutils.SimulatedChainID) + enabled := true + cfg := evmcfg.EVMConfig{ + ChainID: chainID, + Chain: evmcfg.Defaults(chainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{&validTestNode}, + } + if len(c.EVM) == 1 && c.EVM[0].ChainID.Cmp(big.NewI(client.NullClientChainID)) == 0 { + c.EVM[0] = &cfg // replace null, if only entry + } else { + c.EVM = append(c.EVM, &cfg) + } +} + +var validTestNode = evmcfg.Node{ + Name: ptr("simulated-node"), + WSURL: commonconfig.MustParseURL("WSS://simulated-wss.com/ws"), + HTTPURL: commonconfig.MustParseURL("http://simulated.com"), + SendOnly: nil, + Order: ptr(int32(1)), +} + +func ptr[T any](v T) *T { return &v } diff --git a/core/internal/testutils/configtest/toml.go b/core/internal/testutils/configtest/toml.go new file mode 100644 index 00000000..78db05f9 --- /dev/null +++ b/core/internal/testutils/configtest/toml.go @@ -0,0 +1,23 @@ +package configtest + +import ( + "os" + "path/filepath" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/require" +) + +// WriteTOMLFile is used in tests to output toml config types to a toml string. +// Secret values are redacted. +func WriteTOMLFile(t *testing.T, contents any, fileName string) string { + d := t.TempDir() + p := filepath.Join(d, fileName) + + b, err := toml.Marshal(contents) + require.NoError(t, err) + + require.NoError(t, os.WriteFile(p, b, 0600)) + return p +} diff --git a/core/internal/testutils/cosmostest/cosmostest.go b/core/internal/testutils/cosmostest/cosmostest.go new file mode 100644 index 00000000..833c7279 --- /dev/null +++ b/core/internal/testutils/cosmostest/cosmostest.go @@ -0,0 +1,12 @@ +package cosmostest + +import ( + "fmt" + + "github.com/google/uuid" +) + +// RandomChainID returns a random chain id for testing. Use this instead of a constant to prevent DB collisions. +func RandomChainID() string { + return fmt.Sprintf("Plugintest-%s", uuid.New()) +} diff --git a/core/internal/testutils/evmtest/evmtest.go b/core/internal/testutils/evmtest/evmtest.go new file mode 100644 index 00000000..0079ed10 --- /dev/null +++ b/core/internal/testutils/evmtest/evmtest.go @@ -0,0 +1,371 @@ +package evmtest + +import ( + "fmt" + "math/big" + "slices" + "sync" + "sync/atomic" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/jmoiron/sqlx" + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + evmtoml "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func NewChainScopedConfig(t testing.TB, cfg legacyevm.AppConfig) evmconfig.ChainScopedConfig { + var evmCfg *evmtoml.EVMConfig + if len(cfg.EVMConfigs()) > 0 { + evmCfg = cfg.EVMConfigs()[0] + } else { + var chainID = (*ubig.Big)(testutils.FixtureChainID) + evmCfg = &evmtoml.EVMConfig{ + ChainID: chainID, + Chain: evmtoml.Defaults(chainID), + } + } + + return evmconfig.NewTOMLChainScopedConfig(cfg, evmCfg, logger.TestLogger(t)) + +} + +type TestChainOpts struct { + Client evmclient.Client + LogBroadcaster log.Broadcaster + LogPoller logpoller.LogPoller + GeneralConfig legacyevm.AppConfig + HeadTracker httypes.HeadTracker + DB *sqlx.DB + TxManager txmgr.TxManager + KeyStore keystore.Eth + MailMon *mailbox.Monitor + GasEstimator gas.EvmFeeEstimator +} + +// NewChainRelayExtenders returns a simple chain collection with one chain and +// allows to mock client/config on that chain +func NewChainRelayExtenders(t testing.TB, testopts TestChainOpts) *evmrelay.ChainRelayerExtenders { + opts := NewChainRelayExtOpts(t, testopts) + cc, err := evmrelay.NewChainRelayerExtenders(testutils.Context(t), opts) + require.NoError(t, err) + return cc +} + +func NewChainRelayExtOpts(t testing.TB, testopts TestChainOpts) legacyevm.ChainRelayExtenderConfig { + require.NotNil(t, testopts.KeyStore) + lggr := logger.TestLogger(t) + opts := legacyevm.ChainRelayExtenderConfig{ + Logger: lggr, + KeyStore: testopts.KeyStore, + ChainOpts: legacyevm.ChainOpts{ + AppConfig: testopts.GeneralConfig, + MailMon: testopts.MailMon, + GasEstimator: testopts.GasEstimator, + DB: testopts.DB, + }, + } + opts.GenEthClient = func(*big.Int) evmclient.Client { + if testopts.Client != nil { + return testopts.Client + } + return evmclient.NewNullClient(MustGetDefaultChainID(t, testopts.GeneralConfig.EVMConfigs()), logger.TestLogger(t)) + } + if testopts.LogBroadcaster != nil { + opts.GenLogBroadcaster = func(*big.Int) log.Broadcaster { + return testopts.LogBroadcaster + } + } + if testopts.LogPoller != nil { + opts.GenLogPoller = func(*big.Int) logpoller.LogPoller { + return testopts.LogPoller + } + } + if testopts.HeadTracker != nil { + opts.GenHeadTracker = func(*big.Int, httypes.HeadBroadcaster) httypes.HeadTracker { + return testopts.HeadTracker + } + } + if testopts.TxManager != nil { + opts.GenTxManager = func(*big.Int) txmgr.TxManager { + return testopts.TxManager + } + } + if opts.MailMon == nil { + opts.MailMon = servicetest.Run(t, mailboxtest.NewMonitor(t)) + } + if testopts.GasEstimator != nil { + opts.GenGasEstimator = func(*big.Int) gas.EvmFeeEstimator { + return testopts.GasEstimator + } + } + + return opts +} + +// Deprecated, this is a replacement function for tests for now removed default evmChainID logic +func MustGetDefaultChainID(t testing.TB, evmCfgs evmtoml.EVMConfigs) *big.Int { + if len(evmCfgs) == 0 { + t.Fatalf("at least one evm chain config must be defined") + } + return evmCfgs[0].ChainID.ToInt() +} + +// Deprecated, this is a replacement function for tests for now removed default chain logic +func MustGetDefaultChain(t testing.TB, cc legacyevm.LegacyChainContainer) legacyevm.Chain { + if len(cc.Slice()) == 0 { + t.Fatalf("at least one evm chain container must be defined") + } + + return cc.Slice()[0] +} + +type TestConfigs struct { + mu sync.RWMutex + evmtoml.EVMConfigs +} + +var _ evmtypes.Configs = &TestConfigs{} + +func NewTestConfigs(cs ...*evmtoml.EVMConfig) *TestConfigs { + return &TestConfigs{EVMConfigs: evmtoml.EVMConfigs(cs)} +} + +func (mo *TestConfigs) PutChains(cs ...evmtoml.EVMConfig) { + mo.mu.Lock() + defer mo.mu.Unlock() +chains: + for i := range cs { + id := cs[i].ChainID + for j, c2 := range mo.EVMConfigs { + if c2.ChainID == id { + mo.EVMConfigs[j] = &cs[i] // replace + continue chains + } + } + mo.EVMConfigs = append(mo.EVMConfigs, &cs[i]) + } +} + +func (mo *TestConfigs) Chains(ids ...relay.ChainID) (cs []types.ChainStatus, count int, err error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + if len(ids) == 0 { + for _, c := range mo.EVMConfigs { + c2 := types.ChainStatus{ + ID: c.ChainID.String(), + Enabled: c.IsEnabled(), + } + c2.Config, err = c.TOMLString() + if err != nil { + return + } + cs = append(cs, c2) + } + count = len(cs) + return + } + for i := range mo.EVMConfigs { + c := mo.EVMConfigs[i] + chainID := c.ChainID.String() + if !slices.Contains(ids, chainID) { + continue + } + c2 := types.ChainStatus{ + ID: chainID, + Enabled: c.IsEnabled(), + } + c2.Config, err = c.TOMLString() + if err != nil { + return + } + cs = append(cs, c2) + } + count = len(cs) + return +} + +// Nodes implements evmtypes.Configs +func (mo *TestConfigs) Nodes(id relay.ChainID) (nodes []evmtypes.Node, err error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + + for i := range mo.EVMConfigs { + c := mo.EVMConfigs[i] + if id == c.ChainID.String() { + for _, n := range c.Nodes { + nodes = append(nodes, legacyNode(n, c.ChainID)) + } + } + } + err = fmt.Errorf("no nodes: chain %s: %w", id, chains.ErrNotFound) + return +} + +func (mo *TestConfigs) Node(name string) (evmtypes.Node, error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + + for i := range mo.EVMConfigs { + c := mo.EVMConfigs[i] + for _, n := range c.Nodes { + if *n.Name == name { + return legacyNode(n, c.ChainID), nil + } + } + } + return evmtypes.Node{}, fmt.Errorf("node %s: %w", name, chains.ErrNotFound) +} + +func (mo *TestConfigs) NodeStatusesPaged(offset int, limit int, chainIDs ...string) (nodes []types.NodeStatus, cnt int, err error) { + mo.mu.RLock() + defer mo.mu.RUnlock() + + for i := range mo.EVMConfigs { + c := mo.EVMConfigs[i] + id := c.ChainID.String() + if !slices.Contains(chainIDs, id) { + continue + } + for _, n := range c.Nodes { + var n2 types.NodeStatus + n2, err = nodeStatus(n, id) + if err != nil { + return + } + nodes = append(nodes, n2) + } + } + cnt = len(nodes) + return +} + +func legacyNode(n *evmtoml.Node, chainID *ubig.Big) (v2 evmtypes.Node) { + v2.Name = *n.Name + v2.EVMChainID = *chainID + if n.HTTPURL != nil { + v2.HTTPURL = null.StringFrom(n.HTTPURL.String()) + } + if n.WSURL != nil { + v2.WSURL = null.StringFrom(n.WSURL.String()) + } + if n.SendOnly != nil { + v2.SendOnly = *n.SendOnly + } + return +} + +func nodeStatus(n *evmtoml.Node, chainID string) (types.NodeStatus, error) { + var s types.NodeStatus + s.ChainID = chainID + s.Name = *n.Name + b, err := toml.Marshal(n) + if err != nil { + return types.NodeStatus{}, err + } + s.Config = string(b) + return s, nil +} + +func NewEthClientMock(t *testing.T) *evmclimocks.Client { + return evmclimocks.NewClient(t) +} + +func NewEthClientMockWithDefaultChain(t *testing.T) *evmclimocks.Client { + c := NewEthClientMock(t) + c.On("ConfiguredChainID").Return(testutils.FixtureChainID).Maybe() + c.On("IsL2").Return(false).Maybe() + return c +} + +type MockEth struct { + EthClient *evmclimocks.Client + CheckFilterLogs func(int64, int64) + + subsMu sync.RWMutex + subs []*commonmocks.Subscription + errChs []chan error + subscribeCalls atomic.Int32 + unsubscribeCalls atomic.Int32 +} + +func (m *MockEth) SubscribeCallCount() int32 { + return m.subscribeCalls.Load() +} + +func (m *MockEth) UnsubscribeCallCount() int32 { + return m.unsubscribeCalls.Load() +} + +func (m *MockEth) NewSub(t *testing.T) ethereum.Subscription { + m.subscribeCalls.Add(1) + sub := commonmocks.NewSubscription(t) + errCh := make(chan error) + sub.On("Err"). + Return(func() <-chan error { return errCh }).Maybe() + sub.On("Unsubscribe"). + Run(func(mock.Arguments) { + m.unsubscribeCalls.Add(1) + close(errCh) + }).Return().Maybe() + m.subsMu.Lock() + m.subs = append(m.subs, sub) + m.errChs = append(m.errChs, errCh) + m.subsMu.Unlock() + return sub +} + +func (m *MockEth) SubsErr(err error) { + m.subsMu.Lock() + defer m.subsMu.Unlock() + for _, errCh := range m.errChs { + errCh <- err + } +} + +type RawSub[T any] struct { + ch chan<- T + err <-chan error +} + +func NewRawSub[T any](ch chan<- T, err <-chan error) RawSub[T] { + return RawSub[T]{ch: ch, err: err} +} + +func (r *RawSub[T]) CloseCh() { + close(r.ch) +} + +func (r *RawSub[T]) TrySend(t T) { + select { + case <-r.err: + case r.ch <- t: + } +} diff --git a/core/internal/testutils/evmtest/v2/evmtest.go b/core/internal/testutils/evmtest/v2/evmtest.go new file mode 100644 index 00000000..45044a27 --- /dev/null +++ b/core/internal/testutils/evmtest/v2/evmtest.go @@ -0,0 +1,22 @@ +package v2 + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func ChainEthMainnet(t *testing.T) config.ChainScopedConfig { return scopedConfig(t, 1) } +func ChainOptimismMainnet(t *testing.T) config.ChainScopedConfig { return scopedConfig(t, 10) } +func ChainArbitrumMainnet(t *testing.T) config.ChainScopedConfig { return scopedConfig(t, 42161) } +func ChainArbitrumRinkeby(t *testing.T) config.ChainScopedConfig { return scopedConfig(t, 421611) } + +func scopedConfig(t *testing.T, chainID int64) config.ChainScopedConfig { + id := big.NewI(chainID) + evmCfg := toml.EVMConfig{ChainID: id, Chain: toml.Defaults(id)} + return config.NewTOMLChainScopedConfig(configtest.NewTestGeneralConfig(t), &evmCfg, logger.TestLogger(t)) +} diff --git a/core/internal/testutils/httptest/httptest.go b/core/internal/testutils/httptest/httptest.go new file mode 100644 index 00000000..7607ca75 --- /dev/null +++ b/core/internal/testutils/httptest/httptest.go @@ -0,0 +1,37 @@ +package httptest + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/pkg/errors" +) + +// NewTestHTTPClient returns a real HTTP client that may only make requests to +// localhost +func NewTestLocalOnlyHTTPClient() *http.Client { + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.DialContext = testDialContext + tr.DisableCompression = true + return &http.Client{Transport: tr} +} + +func testDialContext(ctx context.Context, network, address string) (net.Conn, error) { + con, err := (&net.Dialer{ + // Defaults from GoLang standard http package + // https://golang.org/pkg/net/http/#RoundTripper + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + if err != nil { + return con, err + } + a := con.RemoteAddr().(*net.TCPAddr) + if a != nil && !a.IP.IsLoopback() { + return nil, errors.Errorf("Test HTTP client may only dial localhost, got address: %v", a.String()) + } + return con, err +} diff --git a/core/internal/testutils/keystest/keystest.go b/core/internal/testutils/keystest/keystest.go new file mode 100644 index 00000000..beb7e580 --- /dev/null +++ b/core/internal/testutils/keystest/keystest.go @@ -0,0 +1,30 @@ +package keystest + +import ( + "crypto/ecdsa" + "crypto/rand" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "github.com/pkg/errors" +) + +// NewKey pulled from geth +func NewKey() (key keystore.Key, err error) { + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + if err != nil { + return key, err + } + + id, err := uuid.NewRandom() + if err != nil { + return key, errors.Errorf("Could not create random uuid: %v", err) + } + + return keystore.Key{ + Id: id, + Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), + PrivateKey: privateKeyECDSA, + }, nil +} diff --git a/core/internal/testutils/keystest/seedable_rand_reader.go b/core/internal/testutils/keystest/seedable_rand_reader.go new file mode 100644 index 00000000..34692924 --- /dev/null +++ b/core/internal/testutils/keystest/seedable_rand_reader.go @@ -0,0 +1,13 @@ +package keystest + +import ( + "io" + "math/rand" +) + +// NewRandReaderFromSeed returns a seedable random io reader, producing deterministic +// output. This is useful for deterministically producing keys for tests. This is an +// insecure source of randomness and therefor should only be used in tests. +func NewRandReaderFromSeed(seed int64) io.Reader { + return rand.New(rand.NewSource(seed)) +} diff --git a/core/internal/testutils/logger.go b/core/internal/testutils/logger.go new file mode 100644 index 00000000..fb67421c --- /dev/null +++ b/core/internal/testutils/logger.go @@ -0,0 +1,36 @@ +package testutils + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// LoggerAssertMaxLevel returns a test logger which is observed on cleanup +// and asserts that no lines were logged at a higher level. +func LoggerAssertMaxLevel(t *testing.T, lvl zapcore.Level) logger.Logger { + if lvl >= zapcore.FatalLevel { + t.Fatalf("no levels exist after %s", zapcore.FatalLevel) + } + lggr, o := logger.TestLoggerObserved(t, lvl+1) + t.Cleanup(func() { + assert.Empty(t, o.Len(), fmt.Sprintf("logger contains entries with levels above %q:\n%s", lvl, loggedEntries(o.All()))) + }) + return lggr +} + +type loggedEntries []observer.LoggedEntry + +func (logs loggedEntries) String() string { + var sb strings.Builder + for _, l := range logs { + fmt.Fprintln(&sb, l) + } + return sb.String() +} diff --git a/core/internal/testutils/pgtest/pgtest.go b/core/internal/testutils/pgtest/pgtest.go new file mode 100644 index 00000000..588f9a41 --- /dev/null +++ b/core/internal/testutils/pgtest/pgtest.go @@ -0,0 +1,53 @@ +package pgtest + +import ( + "database/sql" + "testing" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/scylladb/go-reflectx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +func NewQConfig(logSQL bool) pg.QConfig { + return pg.NewQConfig(logSQL) +} + +func NewSqlDB(t *testing.T) *sql.DB { + testutils.SkipShortDB(t) + db, err := sql.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String()) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close()) }) + + return db +} + +func NewSqlxDB(t testing.TB) *sqlx.DB { + testutils.SkipShortDB(t) + db, err := sqlx.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String()) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, db.Close()) }) + db.MapperFunc(reflectx.CamelToSnakeASCII) + + return db +} + +func MustExec(t *testing.T, db *sqlx.DB, stmt string, args ...interface{}) { + require.NoError(t, utils.JustError(db.Exec(stmt, args...))) +} + +func MustSelect(t *testing.T, db *sqlx.DB, dest interface{}, stmt string, args ...interface{}) { + require.NoError(t, db.Select(dest, stmt, args...)) +} + +func MustCount(t *testing.T, db *sqlx.DB, stmt string, args ...interface{}) (cnt int) { + require.NoError(t, db.Get(&cnt, stmt, args...)) + return +} diff --git a/core/internal/testutils/pgtest/txdb.go b/core/internal/testutils/pgtest/txdb.go new file mode 100644 index 00000000..46967a4f --- /dev/null +++ b/core/internal/testutils/pgtest/txdb.go @@ -0,0 +1,509 @@ +package pgtest + +import ( + "context" + "database/sql" + "database/sql/driver" + "flag" + "fmt" + "io" + "net/url" + "strings" + "sync" + "testing" + + "github.com/jmoiron/sqlx" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +// txdb is a simplified version of https://github.com/DATA-DOG/go-txdb +// +// The original lib has various problems and is hard to understand because it +// tries to be more general. The version in this file is more tightly focused +// to our needs and should be easier to reason about and less likely to have +// subtle bugs/races. +// +// It doesn't currently support savepoints but could be made to if necessary. +// +// Transaction BEGIN/ROLLBACK effectively becomes a no-op, this should have no +// negative impact on normal test operation. +// +// If you MUST test BEGIN/ROLLBACK behaviour, you will have to configure your +// store to use the raw DialectPostgres dialect and setup a one-use database. +// See heavyweight.FullTestDB() as a convenience function to help you do this, +// but please use sparingly because as it's name implies, it is expensive. +func init() { + testing.Init() + if !flag.Parsed() { + flag.Parse() + } + if testing.Short() { + // -short tests don't need a DB + return + } + dbURL := string(env.DatabaseURL.Get()) + if dbURL == "" { + panic("you must provide a CL_DATABASE_URL environment variable") + } + + parsed, err := url.Parse(dbURL) + if err != nil { + panic(err) + } + if parsed.Path == "" { + msg := fmt.Sprintf("invalid %[1]s: `%[2]s`. You must set %[1]s env var to point to your test database. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %[1]s=postgresql://postgres@localhost:5432/plugin_test?sslmode=disable", env.DatabaseURL, parsed.String()) + panic(msg) + } + if !strings.HasSuffix(parsed.Path, "_test") { + msg := fmt.Sprintf("cannot run tests against database named `%s`. Note that the test database MUST end in `_test` to differentiate from a possible production DB. HINT: Try %s=postgresql://postgres@localhost:5432/plugin_test?sslmode=disable", parsed.Path[1:], env.DatabaseURL) + panic(msg) + } + name := string(dialects.TransactionWrappedPostgres) + sql.Register(name, &txDriver{ + dbURL: dbURL, + conns: make(map[string]*conn), + }) + sqlx.BindDriver(name, sqlx.DOLLAR) +} + +var _ driver.Conn = &conn{} + +var _ driver.Validator = &conn{} +var _ driver.SessionResetter = &conn{} + +// txDriver is an sql driver which runs on single transaction +// when the Close is called, transaction is rolled back +type txDriver struct { + sync.Mutex + db *sql.DB + conns map[string]*conn + + dbURL string +} + +func (d *txDriver) Open(dsn string) (driver.Conn, error) { + d.Lock() + defer d.Unlock() + // Open real db connection if its the first call + if d.db == nil { + db, err := sql.Open("pgx", d.dbURL) + if err != nil { + return nil, err + } + d.db = db + } + c, exists := d.conns[dsn] + if !exists || !c.tryOpen() { + tx, err := d.db.Begin() + if err != nil { + return nil, err + } + c = &conn{tx: tx, opened: 1, dsn: dsn} + c.removeSelf = func() error { + return d.deleteConn(c) + } + d.conns[dsn] = c + } + return c, nil +} + +// deleteConn is called by connection when it is closed +// It also auto-closes the DB when the last checked out connection is closed +func (d *txDriver) deleteConn(c *conn) error { + // must lock here to avoid racing with Open + d.Lock() + defer d.Unlock() + + if d.conns[c.dsn] != c { + return nil // already been replaced + } + delete(d.conns, c.dsn) + if len(d.conns) == 0 && d.db != nil { + if err := d.db.Close(); err != nil { + return err + } + d.db = nil + } + return nil +} + +type conn struct { + sync.Mutex + dsn string + tx *sql.Tx // tx may be shared by many conns, definitive one lives in the map keyed by DSN on the txDriver. Do not modify from conn + closed bool + opened int + removeSelf func() error +} + +func (c *conn) Begin() (driver.Tx, error) { + c.Lock() + defer c.Unlock() + if c.closed { + panic("conn is closed") + } + // Begin is a noop because the transaction was already opened + return tx{c.tx}, nil +} + +// Implement the "ConnBeginTx" interface +func (c *conn) BeginTx(_ context.Context, opts driver.TxOptions) (driver.Tx, error) { + // Context is ignored, because single transaction is shared by all callers, thus caller should not be able to + // control it with local context + return c.Begin() +} + +// Prepare returns a prepared statement, bound to this connection. +func (c *conn) Prepare(query string) (driver.Stmt, error) { + return c.PrepareContext(context.Background(), query) +} + +// Implement the "ConnPrepareContext" interface +func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + c.Lock() + defer c.Unlock() + if c.closed { + panic("conn is closed") + } + + // TODO: Fix context handling + // FIXME: It is not safe to give the passed in context to the tx directly + // because the tx is shared by many conns and cancelling the context will + // destroy the tx which can affect other conns + st, err := c.tx.PrepareContext(context.Background(), query) + if err != nil { + return nil, err + } + return &stmt{st, c}, nil +} + +// IsValid is called prior to placing the connection into the +// connection pool by database/sql. The connection will be discarded if false is returned. +func (c *conn) IsValid() bool { + c.Lock() + defer c.Unlock() + return !c.closed +} + +func (c *conn) ResetSession(ctx context.Context) error { + // Ensure bad connections are reported: From database/sql/driver: + // If a connection is never returned to the connection pool but immediately reused, then + // ResetSession is called prior to reuse but IsValid is not called. + c.Lock() + defer c.Unlock() + if c.closed { + return driver.ErrBadConn + } + + return nil +} + +// pgx returns nil +func (c *conn) CheckNamedValue(nv *driver.NamedValue) error { + return nil +} + +// Implement the "QueryerContext" interface +func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + c.Lock() + defer c.Unlock() + if c.closed { + panic("conn is closed") + } + + // TODO: Fix context handling + rs, err := c.tx.QueryContext(context.Background(), query, mapNamedArgs(args)...) + if err != nil { + return nil, err + } + defer rs.Close() + + return buildRows(rs) +} + +// Implement the "ExecerContext" interface +func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + c.Lock() + defer c.Unlock() + if c.closed { + panic("conn is closed") + } + // TODO: Fix context handling + return c.tx.ExecContext(context.Background(), query, mapNamedArgs(args)...) +} + +// tryOpen attempts to increment the open count, but returns false if closed. +func (c *conn) tryOpen() bool { + c.Lock() + defer c.Unlock() + if c.closed { + return false + } + c.opened++ + return true +} + +// Close invalidates and potentially stops any current +// prepared statements and transactions, marking this +// connection as no longer in use. +// +// Because the sql package maintains a free pool of +// connections and only calls Close when there's a surplus of +// idle connections, it shouldn't be necessary for drivers to +// do their own connection caching. +// +// Drivers must ensure all network calls made by Close +// do not block indefinitely (e.g. apply a timeout). +func (c *conn) Close() (err error) { + if !c.close() { + return + } + // Wait to remove self to avoid nesting locks. + if err := c.removeSelf(); err != nil { + panic(err) + } + return +} + +func (c *conn) close() bool { + c.Lock() + defer c.Unlock() + if c.closed { + // Double close, should be a safe to make this a noop + // PGX allows double close + // See: https://github.com/jackc/pgx/blob/a457da8bffa4f90ad672fa093ee87f20cf06687b/conn.go#L249 + return false + } + + c.opened-- + if c.opened > 0 { + return false + } + if c.tx != nil { + if err := c.tx.Rollback(); err != nil { + panic(err) + } + c.tx = nil + } + c.closed = true + return true +} + +type tx struct { + tx *sql.Tx +} + +func (tx tx) Commit() error { + // Commit is a noop because the transaction will be rolled back at the end + return nil +} + +func (tx tx) Rollback() error { + // Rollback is a noop because the transaction will be rolled back at the end + return nil +} + +type stmt struct { + st *sql.Stmt + conn *conn +} + +func (s stmt) Exec(args []driver.Value) (driver.Result, error) { + s.conn.Lock() + defer s.conn.Unlock() + if s.conn.closed { + panic("conn is closed") + } + return s.st.Exec(mapArgs(args)...) +} + +// Implement the "StmtExecContext" interface +func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + s.conn.Lock() + defer s.conn.Unlock() + if s.conn.closed { + panic("conn is closed") + } + // TODO: Fix context handling + return s.st.ExecContext(context.Background(), mapNamedArgs(args)...) +} + +func mapArgs(args []driver.Value) (res []interface{}) { + res = make([]interface{}, len(args)) + for i := range args { + res[i] = args[i] + } + return +} + +func (s stmt) NumInput() int { + return -1 +} + +func (s stmt) Query(args []driver.Value) (driver.Rows, error) { + s.conn.Lock() + defer s.conn.Unlock() + if s.conn.closed { + panic("conn is closed") + } + rows, err := s.st.Query(mapArgs(args)...) + defer func() { + err = multierr.Combine(err, rows.Close()) + }() + if err != nil { + return nil, err + } + return buildRows(rows) +} + +// Implement the "StmtQueryContext" interface +func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + s.conn.Lock() + defer s.conn.Unlock() + if s.conn.closed { + panic("conn is closed") + } + // TODO: Fix context handling + rows, err := s.st.QueryContext(context.Background(), mapNamedArgs(args)...) + if err != nil { + return nil, err + } + return buildRows(rows) +} + +func (s stmt) Close() error { + s.conn.Lock() + defer s.conn.Unlock() + return s.st.Close() +} + +func buildRows(r *sql.Rows) (driver.Rows, error) { + set := &rowSets{} + rs := &rows{} + if err := rs.read(r); err != nil { + return set, err + } + set.sets = append(set.sets, rs) + for r.NextResultSet() { + rss := &rows{} + if err := rss.read(r); err != nil { + return set, err + } + set.sets = append(set.sets, rss) + } + return set, nil +} + +// Implement the "RowsNextResultSet" interface +func (rs *rowSets) HasNextResultSet() bool { + return rs.pos+1 < len(rs.sets) +} + +// Implement the "RowsNextResultSet" interface +func (rs *rowSets) NextResultSet() error { + if !rs.HasNextResultSet() { + return io.EOF + } + + rs.pos++ + return nil +} + +type rows struct { + rows [][]driver.Value + pos int + cols []string + colTypes []*sql.ColumnType +} + +func (r *rows) Columns() []string { + return r.cols +} + +func (r *rows) ColumnTypeDatabaseTypeName(index int) string { + return r.colTypes[index].DatabaseTypeName() +} + +func (r *rows) Next(dest []driver.Value) error { + r.pos++ + if r.pos > len(r.rows) { + return io.EOF + } + + for i, val := range r.rows[r.pos-1] { + dest[i] = *(val.(*interface{})) + } + + return nil +} + +func (r *rows) Close() error { + return nil +} + +func (r *rows) read(rs *sql.Rows) error { + var err error + r.cols, err = rs.Columns() + if err != nil { + return err + } + + r.colTypes, err = rs.ColumnTypes() + if err != nil { + return err + } + + for rs.Next() { + values := make([]interface{}, len(r.cols)) + for i := range values { + values[i] = new(interface{}) + } + if err := rs.Scan(values...); err != nil { + return err + } + row := make([]driver.Value, len(r.cols)) + for i, v := range values { + row[i] = driver.Value(v) + } + r.rows = append(r.rows, row) + } + return rs.Err() +} + +type rowSets struct { + sets []*rows + pos int +} + +func (rs *rowSets) Columns() []string { + return rs.sets[rs.pos].cols +} + +func (rs *rowSets) ColumnTypeDatabaseTypeName(index int) string { + return rs.sets[rs.pos].ColumnTypeDatabaseTypeName(index) +} + +func (rs *rowSets) Close() error { + return nil +} + +// advances to next row +func (rs *rowSets) Next(dest []driver.Value) error { + return rs.sets[rs.pos].Next(dest) +} + +func mapNamedArgs(args []driver.NamedValue) (res []interface{}) { + res = make([]interface{}, len(args)) + for i := range args { + name := args[i].Name + if name != "" { + res[i] = sql.Named(name, args[i].Value) + } else { + res[i] = args[i].Value + } + } + return +} diff --git a/core/internal/testutils/pgtest/txdb_test.go b/core/internal/testutils/pgtest/txdb_test.go new file mode 100644 index 00000000..5a416536 --- /dev/null +++ b/core/internal/testutils/pgtest/txdb_test.go @@ -0,0 +1,50 @@ +package pgtest + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestTxDBDriver(t *testing.T) { + db := NewSqlxDB(t) + dropTable := func() error { + _, err := db.Exec(`DROP TABLE IF EXISTS txdb_test`) + return err + } + // clean up, if previous tests failed + err := dropTable() + assert.NoError(t, err) + _, err = db.Exec(`CREATE TABLE txdb_test (id TEXT NOT NULL)`) + assert.NoError(t, err) + t.Cleanup(func() { + _ = dropTable() + }) + _, err = db.Exec(`INSERT INTO txdb_test VALUES ($1)`, uuid.New().String()) + assert.NoError(t, err) + ensureValuesPresent := func(t *testing.T, db *sqlx.DB) { + var ids []string + err = db.Select(&ids, `SELECT id from txdb_test`) + assert.NoError(t, err) + assert.Len(t, ids, 1) + } + + ensureValuesPresent(t, db) + t.Run("Cancel of tx's context does not trigger rollback of driver's tx", func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + _, err := db.BeginTx(ctx, nil) + assert.NoError(t, err) + cancel() + // BeginTx spawns separate goroutine that rollbacks the tx and tries to close underlying connection, unless + // db driver says that connection is still active. + // This approach is not ideal, but there is no better way to wait for independent goroutine to complete + time.Sleep(time.Second * 10) + ensureValuesPresent(t, db) + }) +} diff --git a/core/internal/testutils/solanatest/solanatest.go b/core/internal/testutils/solanatest/solanatest.go new file mode 100644 index 00000000..f6a4968c --- /dev/null +++ b/core/internal/testutils/solanatest/solanatest.go @@ -0,0 +1,10 @@ +package solanatest + +import ( + "github.com/google/uuid" +) + +// RandomChainID returns a random uuid id for testing. Use this instead of a constant to prevent DB collisions. +func RandomChainID() string { + return uuid.New().String() +} diff --git a/core/internal/testutils/testutils.go b/core/internal/testutils/testutils.go new file mode 100644 index 00000000..48e3a210 --- /dev/null +++ b/core/internal/testutils/testutils.go @@ -0,0 +1,457 @@ +package testutils + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "encoding/base64" + "flag" + "fmt" + "math" + "math/big" + mrand "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "github.com/gorilla/websocket" + "github.com/tidwall/gjson" + "go.uber.org/zap/zaptest/observer" + + "github.com/jmoiron/sqlx" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + // NOTE: To avoid circular dependencies, this package MUST NOT import + // anything from "github.com/goplugin/pluginv3.0/v2/core" +) + +const ( + // Password just a password we use everywhere for testing + Password = "16charlengthp4SsW0rD1!@#_" +) + +// FixtureChainID matches the chain always added by fixtures.sql +// It is set to 0 since no real chain ever has this ID and allows a virtual +// "test" chain ID to be used without clashes +var FixtureChainID = big.NewInt(0) + +// SimulatedChainID is the chain ID for the go-ethereum simulated backend +var SimulatedChainID = big.NewInt(1337) + +// MustNewSimTransactor returns a transactor for interacting with the +// geth simulated backend. +func MustNewSimTransactor(t testing.TB) *bind.TransactOpts { + key, err := crypto.GenerateKey() + require.NoError(t, err) + transactor, err := bind.NewKeyedTransactorWithChainID(key, SimulatedChainID) + require.NoError(t, err) + return transactor +} + +// NewAddress return a random new address +func NewAddress() common.Address { + return common.BytesToAddress(randomBytes(20)) +} + +func NewAddressPtr() *common.Address { + a := common.BytesToAddress(randomBytes(20)) + return &a +} + +// NewPrivateKeyAndAddress returns a new private key and the corresponding address +func NewPrivateKeyAndAddress(t testing.TB) (*ecdsa.PrivateKey, common.Address) { + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + require.True(t, ok) + + address := crypto.PubkeyToAddress(*publicKeyECDSA) + return privateKey, address +} + +// NewRandomPositiveInt64 returns a (non-cryptographically secure) random positive int64 +func NewRandomPositiveInt64() int64 { + id := mrand.Int63() + return id +} + +// NewRandomEVMChainID returns a suitable random chain ID that will not conflict +// with fixtures +func NewRandomEVMChainID() *big.Int { + id := mrand.Int63n(math.MaxInt32) + 10000 + return big.NewInt(id) +} + +func randomBytes(n int) []byte { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + return b +} + +// Random32Byte returns a random [32]byte +func Random32Byte() (b [32]byte) { + copy(b[:], randomBytes(32)) + return b +} + +// RandomizeName appends a random UUID to the provided name +func RandomizeName(n string) string { + id := uuid.New().String() + return n + id +} + +// DefaultWaitTimeout is the default wait timeout. If you have a *testing.T, use WaitTimeout instead. +const DefaultWaitTimeout = 30 * time.Second + +// WaitTimeout returns a timeout based on the test's Deadline, if available. +// Especially important to use in parallel tests, as their individual execution +// can get paused for arbitrary amounts of time. +func WaitTimeout(t *testing.T) time.Duration { + if d, ok := t.Deadline(); ok { + // 10% buffer for cleanup and scheduling delay + return time.Until(d) * 9 / 10 + } + return DefaultWaitTimeout +} + +// AfterWaitTimeout returns a channel that will send a time value when the +// WaitTimeout is reached +func AfterWaitTimeout(t *testing.T) <-chan time.Time { + return time.After(WaitTimeout(t)) +} + +// Context returns a context with the test's deadline, if available. +func Context(tb testing.TB) context.Context { + ctx := context.Background() + var cancel func() + switch t := tb.(type) { + case *testing.T: + if d, ok := t.Deadline(); ok { + ctx, cancel = context.WithDeadline(ctx, d) + } + } + if cancel == nil { + ctx, cancel = context.WithCancel(ctx) + } + tb.Cleanup(cancel) + return ctx +} + +// MustParseURL parses the URL or fails the test +func MustParseURL(t testing.TB, input string) *url.URL { + u, err := url.Parse(input) + require.NoError(t, err) + return u +} + +// MustParseBigInt parses a big int value from string or fails the test +func MustParseBigInt(t *testing.T, input string) *big.Int { + i := new(big.Int) + _, err := fmt.Sscan(input, i) + require.NoError(t, err) + return i +} + +// JSONRPCHandler is called with the method and request param(s). +// respResult will be sent immediately. notifyResult is optional, and sent after a short delay. +type JSONRPCHandler func(reqMethod string, reqParams gjson.Result) JSONRPCResponse + +type JSONRPCResponse struct { + Result, Notify string // raw JSON (i.e. quoted strings etc.) + + Error struct { + Code int + Message string + } +} + +type testWSServer struct { + t *testing.T + s *httptest.Server + mu sync.RWMutex + wsconns []*websocket.Conn + wg sync.WaitGroup +} + +// NewWSServer starts a websocket server which invokes callback for each message received. +// If chainID is set, then eth_chainId calls will be automatically handled. +func NewWSServer(t *testing.T, chainID *big.Int, callback JSONRPCHandler) (ts *testWSServer) { + ts = new(testWSServer) + ts.t = t + ts.wsconns = make([]*websocket.Conn, 0) + handler := ts.newWSHandler(chainID, callback) + ts.s = httptest.NewServer(handler) + t.Cleanup(ts.Close) + return +} + +func (ts *testWSServer) Close() { + if func() bool { + ts.mu.Lock() + defer ts.mu.Unlock() + if ts.wsconns == nil { + ts.t.Log("Test WS server already closed") + return false + } + ts.s.CloseClientConnections() + ts.s.Close() + for _, ws := range ts.wsconns { + ws.Close() + } + ts.wsconns = nil // nil indicates server closed + return true + }() { + ts.wg.Wait() + } +} + +func (ts *testWSServer) WSURL() *url.URL { + return WSServerURL(ts.t, ts.s) +} + +func (ts *testWSServer) MustWriteBinaryMessageSync(t *testing.T, msg string) { + ts.mu.Lock() + defer ts.mu.Unlock() + conns := ts.wsconns + if len(conns) != 1 { + t.Fatalf("expected 1 conn, got %d", len(conns)) + } + conn := conns[0] + err := conn.WriteMessage(websocket.BinaryMessage, []byte(msg)) + require.NoError(t, err) +} + +func (ts *testWSServer) newWSHandler(chainID *big.Int, callback JSONRPCHandler) (handler http.HandlerFunc) { + if callback == nil { + callback = func(method string, params gjson.Result) (resp JSONRPCResponse) { return } + } + t := ts.t + upgrader := websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { return true }, + } + return func(w http.ResponseWriter, r *http.Request) { + ts.mu.Lock() + if ts.wsconns == nil { // closed + ts.mu.Unlock() + return + } + ts.wg.Add(1) + defer ts.wg.Done() + conn, err := upgrader.Upgrade(w, r, nil) + if !assert.NoError(t, err, "Failed to upgrade WS connection") { + ts.mu.Unlock() + return + } + defer conn.Close() + ts.wsconns = append(ts.wsconns, conn) + ts.mu.Unlock() + + for { + _, data, err := conn.ReadMessage() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseAbnormalClosure) { + ts.t.Log("Websocket closing") + return + } + ts.t.Logf("Failed to read message: %v", err) + return + } + ts.t.Log("Received message", string(data)) + req := gjson.ParseBytes(data) + if !req.IsObject() { + ts.t.Logf("Request must be object: %v", req.Type) + return + } + if e := req.Get("error"); e.Exists() { + ts.t.Logf("Received jsonrpc error: %v", e) + continue + } + m := req.Get("method") + if m.Type != gjson.String { + ts.t.Logf("Method must be string: %v", m.Type) + return + } + + var resp JSONRPCResponse + if chainID != nil && m.String() == "eth_chainId" { + resp.Result = `"0x` + chainID.Text(16) + `"` + } else { + resp = callback(m.String(), req.Get("params")) + } + id := req.Get("id") + var msg string + if resp.Error.Message != "" { + msg = fmt.Sprintf(`{"jsonrpc":"2.0","id":%s,"error":{"code":%d,"message":"%s"}}`, id, resp.Error.Code, resp.Error.Message) + } else { + msg = fmt.Sprintf(`{"jsonrpc":"2.0","id":%s,"result":%s}`, id, resp.Result) + } + ts.t.Logf("Sending message: %v", msg) + ts.mu.Lock() + err = conn.WriteMessage(websocket.BinaryMessage, []byte(msg)) + ts.mu.Unlock() + if err != nil { + ts.t.Logf("Failed to write message: %v", err) + return + } + + if resp.Notify != "" { + time.Sleep(100 * time.Millisecond) + msg := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_subscription","params":{"subscription":"0x00","result":%s}}`, resp.Notify) + ts.t.Log("Sending message", msg) + ts.mu.Lock() + err = conn.WriteMessage(websocket.BinaryMessage, []byte(msg)) + ts.mu.Unlock() + if err != nil { + ts.t.Logf("Failed to write message: %v", err) + return + } + } + } + } +} + +// WaitWithTimeout waits for the channel to close (or receive anything) and +// fatals the test if the default wait timeout is exceeded +func WaitWithTimeout(t *testing.T, ch <-chan struct{}, failMsg string) { + select { + case <-ch: + case <-time.After(WaitTimeout(t)): + t.Fatal(failMsg) + } +} + +// WSServerURL returns a ws:// url for the server +func WSServerURL(t *testing.T, s *httptest.Server) *url.URL { + u, err := url.Parse(s.URL) + require.NoError(t, err, "Failed to parse url") + u.Scheme = "ws" + return u +} + +// IntToHex converts int to geth-compatible hex +func IntToHex(n int) string { + return hexutil.EncodeBig(big.NewInt(int64(n))) +} + +// TestInterval is just a sensible poll interval that gives fast tests without +// risk of spamming +const TestInterval = 100 * time.Millisecond + +// AssertEventually waits for f to return true +func AssertEventually(t *testing.T, f func() bool) { + assert.Eventually(t, f, WaitTimeout(t), TestInterval/2) +} + +// RequireLogMessage fails the test if emitted logs don't contain the given message +func RequireLogMessage(t *testing.T, observedLogs *observer.ObservedLogs, msg string) { + for _, l := range observedLogs.All() { + if strings.Contains(l.Message, msg) { + return + } + } + t.Log("observed logs", observedLogs.All()) + t.Fatalf("expected observed logs to contain msg %q, but it didn't", msg) +} + +// WaitForLogMessage waits until at least one log message containing the +// specified msg is emitted. +// NOTE: This does not "pop" messages so it cannot be used multiple times to +// check for new instances of the same msg. See WaitForLogMessageCount instead. +// +// Get a *observer.ObservedLogs like so: +// +// observedZapCore, observedLogs := observer.New(zap.DebugLevel) +// lggr := logger.TestLogger(t, observedZapCore) +func WaitForLogMessage(t *testing.T, observedLogs *observer.ObservedLogs, msg string) { + AssertEventually(t, func() bool { + for _, l := range observedLogs.All() { + if strings.Contains(l.Message, msg) { + return true + } + } + return false + }) +} + +// WaitForLogMessageCount waits until at least count log message containing the +// specified msg is emitted +func WaitForLogMessageCount(t *testing.T, observedLogs *observer.ObservedLogs, msg string, count int) { + AssertEventually(t, func() bool { + i := 0 + for _, l := range observedLogs.All() { + if strings.Contains(l.Message, msg) { + i++ + if i >= count { + return true + } + } + } + return false + }) +} + +// SkipShort skips tb during -short runs, and notes why. +func SkipShort(tb testing.TB, why string) { + if testing.Short() { + tb.Skipf("skipping: %s", why) + } +} + +// SkipShortDB skips tb during -short runs, and notes the DB dependency. +func SkipShortDB(tb testing.TB) { + SkipShort(tb, "DB dependency") +} + +func AssertCount(t *testing.T, db *sqlx.DB, tableName string, expected int64) { + t.Helper() + var count int64 + err := db.Get(&count, fmt.Sprintf(`SELECT count(*) FROM %s;`, tableName)) + require.NoError(t, err) + require.Equal(t, expected, count) +} + +func NewTestFlagSet() *flag.FlagSet { + return flag.NewFlagSet("test", flag.PanicOnError) +} + +// Ptr takes pointer of anything +func Ptr[T any](v T) *T { + return &v +} + +func MustDecodeBase64(s string) (b []byte) { + var err error + b, err = base64.StdEncoding.DecodeString(s) + if err != nil { + panic(err) + } + return +} + +func SkipFlakey(t *testing.T, ticketURL string) { + t.Skip("Flakey", ticketURL) +} + +func MustRandBytes(n int) (b []byte) { + b = make([]byte, n) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + return +} diff --git a/core/logger/audit/audit_logger.go b/core/logger/audit/audit_logger.go new file mode 100644 index 00000000..e376aa22 --- /dev/null +++ b/core/logger/audit/audit_logger.go @@ -0,0 +1,273 @@ +package audit + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +const bufferCapacity = 2048 +const webRequestTimeout = 10 + +type Data = map[string]any + +type AuditLogger interface { + services.Service + + Audit(eventID EventID, data Data) +} + +type HTTPAuditLoggerInterface interface { + Do(req *http.Request) (*http.Response, error) +} + +type AuditLoggerService struct { + logger logger.Logger // The standard logger configured in the node + enabled bool // Whether the audit logger is enabled or not + forwardToUrl commonconfig.URL // Location we are going to send logs to + headers []models.ServiceHeader // Headers to be sent along with logs for identification/authentication + jsonWrapperKey string // Wrap audit data as a map under this key if present + environmentName string // Decorate the environment this is coming from + hostname string // The self-reported hostname of the machine + localIP string // A non-loopback IP address as reported by the machine + loggingClient HTTPAuditLoggerInterface // Abstract type for sending logs onward + + loggingChannel chan wrappedAuditLog + chStop services.StopChan + chDone chan struct{} +} + +type wrappedAuditLog struct { + eventID EventID + data Data +} + +var NoopLogger AuditLogger = &AuditLoggerService{} + +// NewAuditLogger returns a buffer push system that ingests audit log events and +// asynchronously pushes them up to an HTTP log service. +// Parses and validates the AUDIT_LOGS_* environment values and returns an enabled +// AuditLogger instance. If the environment variables are not set, the logger +// is disabled and short circuits execution via enabled flag. +func NewAuditLogger(logger logger.Logger, config config.AuditLogger) (AuditLogger, error) { + // If the unverified config is nil, then we assume this came from the + // configuration system and return a nil logger. + if config == nil || !config.Enabled() { + return &AuditLoggerService{}, nil + } + + hostname, err := os.Hostname() + if err != nil { + return nil, fmt.Errorf("initialization error - unable to get hostname: %w", err) + } + + forwardToUrl, err := config.ForwardToUrl() + if err != nil { + return &AuditLoggerService{}, nil + } + + headers, err := config.Headers() + if err != nil { + return &AuditLoggerService{}, nil + } + + loggingChannel := make(chan wrappedAuditLog, bufferCapacity) + + // Create new AuditLoggerService + auditLogger := AuditLoggerService{ + logger: logger.Helper(1), + enabled: true, + forwardToUrl: forwardToUrl, + headers: headers, + jsonWrapperKey: config.JsonWrapperKey(), + environmentName: config.Environment(), + hostname: hostname, + localIP: getLocalIP(), + loggingClient: &http.Client{Timeout: time.Second * webRequestTimeout}, + + loggingChannel: loggingChannel, + chStop: make(chan struct{}), + chDone: make(chan struct{}), + } + + return &auditLogger, nil +} + +func (l *AuditLoggerService) SetLoggingClient(newClient HTTPAuditLoggerInterface) { + l.loggingClient = newClient +} + +// Entrypoint for new audit logs. This buffers all logs that come in they will +// sent out by the goroutine that was started when the AuditLoggerService was +// created. If this service was not enabled, this immeidately returns. +// +// This function never blocks. +func (l *AuditLoggerService) Audit(eventID EventID, data Data) { + if !l.enabled { + return + } + + wrappedLog := wrappedAuditLog{ + eventID: eventID, + data: data, + } + + select { + case l.loggingChannel <- wrappedLog: + default: + l.logger.Errorf("buffer is full. Dropping log with eventID: %s", eventID) + } +} + +// Start the audit logger and begin processing logs on the channel +func (l *AuditLoggerService) Start(context.Context) error { + if !l.enabled { + return errors.New("The audit logger is not enabled") + } + + go l.runLoop() + return nil +} + +// Stops the logger and will close the channel. +func (l *AuditLoggerService) Close() error { + if !l.enabled { + return errors.New("The audit logger is not enabled") + } + + l.logger.Warnf("Disabled the audit logger service") + close(l.chStop) + <-l.chDone + + return nil +} + +func (l *AuditLoggerService) Name() string { + return l.logger.Name() +} + +func (l *AuditLoggerService) HealthReport() map[string]error { + var err error + if !l.enabled { + err = errors.New("the audit logger is not enabled") + } else if len(l.loggingChannel) == bufferCapacity { + err = errors.New("buffer is full") + } + return map[string]error{l.Name(): err} +} + +func (l *AuditLoggerService) Ready() error { + if !l.enabled { + return errors.New("the audit logger is not enabled") + } + + return nil +} + +// Entrypoint for our log handling goroutine. This waits on the channel and sends out +// logs as they come in. +// +// This function calls postLogToLogService which blocks. +func (l *AuditLoggerService) runLoop() { + defer close(l.chDone) + + for { + select { + case <-l.chStop: + l.logger.Warn("The audit logger is shutting down") + return + case event := <-l.loggingChannel: + l.postLogToLogService(event.eventID, event.data) + } + } +} + +// Takes an EventID and associated data and sends it to the configured logging +// endpoint. This function blocks on the send by timesout after a period of +// several seconds. This helps us prevent getting stuck on a single log +// due to transient network errors. +// +// This function blocks when called. +func (l *AuditLoggerService) postLogToLogService(eventID EventID, data Data) { + // Audit log JSON data + logItem := map[string]interface{}{ + "eventID": eventID, + "hostname": l.hostname, + "localIP": l.localIP, + "env": l.environmentName, + "data": data, + } + + // Optionally wrap audit log data into JSON object to help dynamically structure for an HTTP log service call + if l.jsonWrapperKey != "" { + logItem = map[string]interface{}{l.jsonWrapperKey: logItem} + } + + serializedLog, err := json.Marshal(logItem) + if err != nil { + l.logger.Errorw("unable to serialize wrapped audit log item to JSON", "err", err, "logItem", logItem) + return + } + ctx, cancel := l.chStop.NewCtx() + defer cancel() + + // Send to remote service + req, err := http.NewRequestWithContext(ctx, "POST", (*url.URL)(&l.forwardToUrl).String(), bytes.NewReader(serializedLog)) + if err != nil { + l.logger.Error("failed to create request to remote logging service!") + } + for _, header := range l.headers { + req.Header.Add(header.Header, header.Value) + } + resp, err := l.loggingClient.Do(req) + if err != nil { + l.logger.Errorw("failed to send audit log to HTTP log service", "err", err, "logItem", logItem) + return + } + if resp.StatusCode != 200 { + if resp.Body == nil { + l.logger.Errorw("no body to read. Possibly an error occurred sending", "logItem", logItem) + return + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + l.logger.Errorw("error reading errored HTTP log service webhook response body", "err", err, "logItem", logItem) + return + } + l.logger.Errorw("error sending log to HTTP log service", "statusCode", resp.StatusCode, "bodyString", string(bodyBytes)) + return + + } +} + +// getLocalIP returns the first non-loopback local IP of the host +func getLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // filter and return address types for first non loopback address + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/core/logger/audit/audit_logger_test.go b/core/logger/audit/audit_logger_test.go new file mode 100644 index 00000000..4feed8d3 --- /dev/null +++ b/core/logger/audit/audit_logger_test.go @@ -0,0 +1,146 @@ +package audit_test + +import ( + "encoding/json" + "flag" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type MockedHTTPEvent struct { + body string +} + +type MockHTTPClient struct { + audit.HTTPAuditLoggerInterface + + loggingChannel chan MockedHTTPEvent +} + +type LoginData struct { + Email string `json:"email"` +} + +type LoginLogItem struct { + EventID string `json:"eventID"` + Env string `json:"env"` + Data LoginData `json:"data"` +} + +func (mock *MockHTTPClient) Do(req *http.Request) (*http.Response, error) { + b, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + message := MockedHTTPEvent{ + body: string(b), + } + + mock.loggingChannel <- message + + return &http.Response{}, nil +} + +type Config struct{} + +func (c Config) Enabled() bool { + return true +} + +func (c Config) Environment() string { + return "test" +} + +func (c Config) ForwardToUrl() (commonconfig.URL, error) { + url, err := commonconfig.ParseURL("http://localhost:9898") + if err != nil { + return commonconfig.URL{}, err + } + return *url, nil +} + +func (c Config) Headers() (models.ServiceHeaders, error) { + return make(models.ServiceHeaders, 0), nil +} + +func (c Config) JsonWrapperKey() string { + return "" +} + +func TestCheckLoginAuditLog(t *testing.T) { + t.Parallel() + + // Create a channel that will be used instead of an HTTP client + loggingChannel := make(chan MockedHTTPEvent, 2048) + + // Create the mock structure that will be used + mockHTTPClient := MockHTTPClient{ + loggingChannel: loggingChannel, + } + + // Create a test logger because the audit logger relies on this logger + // as well + logger := logger.TestLogger(t) + + auditLoggerTestConfig := Config{} + + // Create new AuditLoggerService + auditLogger, err := audit.NewAuditLogger(logger.Named("AuditLogger"), &auditLoggerTestConfig) + assert.NoError(t, err) + + // Cast to concrete type so we can swap out the internals + auditLoggerService, ok := auditLogger.(*audit.AuditLoggerService) + assert.True(t, ok) + + // Swap the internals with a testing handler + auditLoggerService.SetLoggingClient(&mockHTTPClient) + assert.NoError(t, auditLoggerService.Ready()) + + // Create a new plugin test application passing in our test logger + // and audit logger + app := cltest.NewApplication(t, logger, auditLogger) + require.NoError(t, app.Start(testutils.Context(t))) + + enteredStrings := []string{cltest.APIEmailAdmin, cltest.Password} + prompter := &cltest.MockCountingPrompter{T: t, EnteredStrings: enteredStrings} + client := app.NewAuthenticatingShell(prompter) + + set := flag.NewFlagSet("test", 0) + set.Bool("bypass-version-check", true, "") + set.String("admin-credentials-file", "", "") + c := cli.NewContext(nil, set, nil) + + // Login + err = client.RemoteLogin(c) + assert.NoError(t, err) + + select { + case event := <-loggingChannel: + deserialized := &LoginLogItem{} + assert.NoError(t, json.Unmarshal([]byte(event.body), deserialized)) + + assert.Equal(t, deserialized.Data.Email, cltest.APIEmailAdmin) + assert.Equal(t, deserialized.Env, "test") + + assert.Equal(t, deserialized.EventID, "AUTH_LOGIN_SUCCESS_NO_2FA") + return + case <-time.After(5 * time.Second): + } + + assert.True(t, false) +} diff --git a/core/logger/audit/audit_types.go b/core/logger/audit/audit_types.go new file mode 100644 index 00000000..8ce2017f --- /dev/null +++ b/core/logger/audit/audit_types.go @@ -0,0 +1,91 @@ +package audit + +type EventID string + +// Static audit log event type constants +const ( + AuthLoginFailedEmail EventID = "AUTH_LOGIN_FAILED_EMAIL" + AuthLoginFailedPassword EventID = "AUTH_LOGIN_FAILED_PASSWORD" + AuthLoginFailed2FA EventID = "AUTH_LOGIN_FAILED_2FA" + AuthLoginSuccessWith2FA EventID = "AUTH_LOGIN_SUCCESS_WITH_2FA" + AuthLoginSuccessNo2FA EventID = "AUTH_LOGIN_SUCCESS_NO_2FA" + Auth2FAEnrolled EventID = "AUTH_2FA_ENROLLED" + AuthSessionDeleted EventID = "SESSION_DELETED" + + PasswordResetAttemptFailedMismatch EventID = "PASSWORD_RESET_ATTEMPT_FAILED_MISMATCH" + PasswordResetSuccess EventID = "PASSWORD_RESET_SUCCESS" + + APITokenCreateAttemptPasswordMismatch EventID = "API_TOKEN_CREATE_ATTEMPT_PASSWORD_MISMATCH" + APITokenCreated EventID = "API_TOKEN_CREATED" + APITokenDeleteAttemptPasswordMismatch EventID = "API_TOKEN_DELETE_ATTEMPT_PASSWORD_MISMATCH" + APITokenDeleted EventID = "API_TOKEN_DELETED" + + FeedsManCreated EventID = "FEEDS_MAN_CREATED" + FeedsManUpdated EventID = "FEEDS_MAN_UPDATED" + + FeedsManChainConfigCreated EventID = "FEEDS_MAN_CHAIN_CONFIG_CREATED" + FeedsManChainConfigUpdated EventID = "FEEDS_MAN_CHAIN_CONFIG_UPDATED" + FeedsManChainConfigDeleted EventID = "FEEDS_MAN_CHAIN_CONFIG_DELETED" + + CSAKeyCreated EventID = "CSA_KEY_CREATED" + CSAKeyImported EventID = "CSA_KEY_IMPORTED" + CSAKeyExported EventID = "CSA_KEY_EXPORTED" + CSAKeyDeleted EventID = "CSA_KEY_DELETED" + + OCRKeyBundleCreated EventID = "OCR_KEY_BUNDLE_CREATED" + OCRKeyBundleImported EventID = "OCR_KEY_BUNDLE_IMPORTED" + OCRKeyBundleExported EventID = "OCR_KEY_BUNDLE_EXPORTED" + OCRKeyBundleDeleted EventID = "OCR_KEY_BUNDLE_DELETED" + + OCR2KeyBundleCreated EventID = "OCR2_KEY_BUNDLE_CREATED" + OCR2KeyBundleImported EventID = "OCR2_KEY_BUNDLE_IMPORTED" + OCR2KeyBundleExported EventID = "OCR2_KEY_BUNDLE_EXPORTED" + OCR2KeyBundleDeleted EventID = "OCR2_KEY_BUNDLE_DELETED" + + KeyCreated EventID = "KEY_CREATED" + KeyUpdated EventID = "KEY_UPDATED" + KeyImported EventID = "KEY_IMPORTED" + KeyExported EventID = "KEY_EXPORTED" + KeyDeleted EventID = "KEY_DELETED" + + EthTransactionCreated EventID = "ETH_TRANSACTION_CREATED" + CosmosTransactionCreated EventID = "COSMOS_TRANSACTION_CREATED" + SolanaTransactionCreated EventID = "SOLANA_TRANSACTION_CREATED" + + JobCreated EventID = "JOB_CREATED" + JobDeleted EventID = "JOB_DELETED" + + ChainAdded EventID = "CHAIN_ADDED" + ChainSpecUpdated EventID = "CHAIN_SPEC_UPDATED" + ChainDeleted EventID = "CHAIN_DELETED" + + ChainRpcNodeAdded EventID = "CHAIN_RPC_NODE_ADDED" + ChainRpcNodeDeleted EventID = "CHAIN_RPC_NODE_DELETED" + + BridgeCreated EventID = "BRIDGE_CREATED" + BridgeUpdated EventID = "BRIDGE_UPDATED" + BridgeDeleted EventID = "BRIDGE_DELETED" + + ForwarderCreated EventID = "FORWARDER_CREATED" + ForwarderDeleted EventID = "FORWARDER_DELETED" + + ExternalInitiatorCreated EventID = "EXTERNAL_INITIATOR_CREATED" + ExternalInitiatorDeleted EventID = "EXTERNAL_INITIATOR_DELETED" + + JobProposalSpecApproved EventID = "JOB_PROPOSAL_SPEC_APPROVED" + JobProposalSpecUpdated EventID = "JOB_PROPOSAL_SPEC_UPDATED" + JobProposalSpecCanceled EventID = "JOB_PROPOSAL_SPEC_CANCELED" + JobProposalSpecRejected EventID = "JOB_PROPOSAL_SPEC_REJECTED" + + ConfigUpdated EventID = "CONFIG_UPDATED" + ConfigSqlLoggingEnabled EventID = "CONFIG_SQL_LOGGING_ENABLED" + ConfigSqlLoggingDisabled EventID = "CONFIG_SQL_LOGGING_DISABLED" + GlobalLogLevelSet EventID = "GLOBAL_LOG_LEVEL_SET" + + JobErrorDismissed EventID = "JOB_ERROR_DISMISSED" + JobRunSet EventID = "JOB_RUN_SET" + + EnvNoncriticalEnvDumped EventID = "ENV_NONCRITICAL_ENV_DUMPED" + + UnauthedRunResumed EventID = "UNAUTHED_RUN_RESUMED" +) diff --git a/core/logger/critical.go b/core/logger/critical.go new file mode 100644 index 00000000..7904b189 --- /dev/null +++ b/core/logger/critical.go @@ -0,0 +1,25 @@ +package logger + +import "go.uber.org/zap/zapcore" + +// encodeLevel is a zapcore.EncodeLevel that encodes 'crit' in place of dpanic for our custom Critical* level. +func encodeLevel(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { + if l == zapcore.DPanicLevel { + enc.AppendString("crit") + } else { + zapcore.LowercaseLevelEncoder(l, enc) + } +} + +func (l *zapLogger) Critical(args ...interface{}) { + // DPanic is used for the appropriate numerical level (between error and panic), but we never actually panic. + l.sugaredHelper(1).DPanic(args...) +} + +func (l *zapLogger) Criticalf(format string, values ...interface{}) { + l.sugaredHelper(1).DPanicf(format, values...) +} + +func (l *zapLogger) Criticalw(msg string, keysAndValues ...interface{}) { + l.sugaredHelper(1).DPanicw(msg, keysAndValues...) +} diff --git a/core/logger/disk_stats.go b/core/logger/disk_stats.go new file mode 100644 index 00000000..984e4d99 --- /dev/null +++ b/core/logger/disk_stats.go @@ -0,0 +1,20 @@ +package logger + +import ( + "github.com/shirou/gopsutil/v3/disk" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// diskSpaceAvailableFn is used for testing to replace the default diskSpaceAvailable. +type diskSpaceAvailableFn func(path string) (utils.FileSize, error) + +// diskSpaceAvailable returns the available/free disk space in the requested `path`. Returns an error if it fails to find the path. +func diskSpaceAvailable(path string) (utils.FileSize, error) { + diskUsage, err := disk.Usage(path) + if err != nil { + return 0, err + } + + return utils.FileSize(diskUsage.Free), nil +} diff --git a/core/logger/disk_stats_test.go b/core/logger/disk_stats_test.go new file mode 100644 index 00000000..ee0e5bbf --- /dev/null +++ b/core/logger/disk_stats_test.go @@ -0,0 +1,18 @@ +package logger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_diskSpaceAvailable(t *testing.T) { + t.Parallel() + + size, err := diskSpaceAvailable(".") + assert.NoError(t, err) + assert.NotZero(t, size) + + _, err = diskSpaceAvailable("") + assert.Error(t, err) +} diff --git a/core/logger/fields.go b/core/logger/fields.go new file mode 100644 index 00000000..b2fc2353 --- /dev/null +++ b/core/logger/fields.go @@ -0,0 +1,44 @@ +package logger + +type Fields map[string]interface{} + +func (f Fields) With(xs ...interface{}) Fields { + if len(xs)%2 != 0 { + panic("expected even number of arguments") + } + f2 := make(Fields, len(f)+(len(xs)/2)) + for k, v := range f { + f2[k] = v + } + for i := 0; i < len(xs)/2; i++ { + key, is := xs[i*2].(string) + if !is { + continue + } + val := xs[i*2+1] + f2[key] = val + } + return f2 +} + +func (f Fields) Merge(f2 Fields) Fields { + f3 := make(Fields, len(f)+len(f2)) + for k, v := range f { + f3[k] = v + } + for k, v := range f2 { + f3[k] = v + } + return f3 +} + +func (f Fields) Slice() []interface{} { + s := make([]interface{}, len(f)*2) + var i int + for k, v := range f { + s[i*2] = k + s[i*2+1] = v + i++ + } + return s +} diff --git a/core/logger/fields_test.go b/core/logger/fields_test.go new file mode 100644 index 00000000..95a692a3 --- /dev/null +++ b/core/logger/fields_test.go @@ -0,0 +1,90 @@ +package logger_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestFields_Merge(t *testing.T) { + t.Parallel() + + f1 := make(logger.Fields) + f1["key1"] = "value1" + f2 := make(logger.Fields) + f2["key2"] = "value2" + + merged := f1.Merge(f2) + assert.Len(t, merged, 2) + + v1, ok1 := merged["key1"] + assert.True(t, ok1) + assert.Equal(t, "value1", v1) + + v2, ok2 := merged["key2"] + assert.True(t, ok2) + assert.Equal(t, "value2", v2) + + t.Run("self merge", func(t *testing.T) { + t.Parallel() + + merged := f1.Merge(f1) + assert.Len(t, merged, 1) + assert.Equal(t, f1, merged) + }) +} + +func TestFields_Slice(t *testing.T) { + t.Parallel() + + f := make(logger.Fields) + f["str"] = "foo" + f["int"] = 123 + + s := f.Slice() + assert.Len(t, s, 4) + for i := 0; i < len(s); i += 2 { + switch s[i] { + case "int": + assert.Equal(t, 123, s[i+1]) + case "str": + assert.Equal(t, "foo", s[i+1]) + } + } + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + empty := make(logger.Fields) + assert.Empty(t, empty.Slice()) + }) +} + +func TestFields_With(t *testing.T) { + t.Parallel() + + f := make(logger.Fields) + f["str"] = "foo" + f["int"] = 123 + + w := f.With("bool", true, "float", 3.14) + assert.Len(t, w, 4) + + t.Run("single", func(t *testing.T) { + t.Parallel() + + assert.Panics(t, func() { + //lint:ignore SA5012 we expect panic here + _ = f.With("xyz") + }, "expected even number of arguments") + }) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + empty := make(logger.Fields).With() + assert.Empty(t, empty) + }) +} diff --git a/core/logger/internal/colortest/prettyconsole_test.go b/core/logger/internal/colortest/prettyconsole_test.go new file mode 100644 index 00000000..bbe5c22f --- /dev/null +++ b/core/logger/internal/colortest/prettyconsole_test.go @@ -0,0 +1,99 @@ +package colortest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func init() { + logger.InitColor(true) +} + +func TestPrettyConsole_Write(t *testing.T) { + tests := []struct { + name string + input string + want string + wantError bool + }{ + { + "debug", + `{"ts":1523537728, "level":"debug", "msg":"top level", "details":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[32m[DEBUG] \x1b[0mtop level \x1b[34m\x1b[0m \x1b[32mdetails\x1b[0m=nuances \n", + false, + }, + { + "info", + `{"ts":1523537728.7260377, "level":"info", "msg":"top level"}`, + "2018-04-12T12:55:28Z \x1b[37m[INFO] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "warn", + `{"ts":1523537728, "level":"warn", "msg":"top level", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[33m[WARN] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "error", + `{"ts":1523537728, "level":"error", "msg":"top level", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[31m[ERROR] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "critical", + `{"ts":1523537728, "level":"crit", "msg":"top level", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[91m[CRIT] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "panic", + `{"ts":1523537728, "level":"panic", "msg":"top level", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[91m[PANIC] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "fatal", + `{"ts":1523537728, "level":"fatal", "msg":"top level", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[91m[FATAL] \x1b[0mtop level \x1b[34m\x1b[0m \n", + false, + }, + { + "control", + `{"ts":1523537728, "level":"fatal", "msg":"\u0008\t\n\r\u000b\u000c\ufffd\ufffd", "hash":"nuances"}`, + "2018-04-12T12:55:28Z \x1b[91m[FATAL] \x1b[0m\\b\t\n\r\\v\\f�� \x1b[34m\x1b[0m \n", + false, + }, + {"broken", `{"broken":}`, `{}`, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tr := &testReader{} + pc := logger.PrettyConsole{Sink: tr} + _, err := pc.Write([]byte(tt.input)) + + if tt.wantError { + assert.Error(t, err) + } else { + t.Log(tr.Written) + assert.Equal(t, tt.want, tr.Written) + } + }) + } +} + +type testReader struct { + Written string +} + +func (*testReader) Sync() error { return nil } +func (*testReader) Close() error { return nil } + +func (tr *testReader) Write(b []byte) (int, error) { + tr.Written = string(b) + return 0, nil +} diff --git a/core/logger/logger.go b/core/logger/logger.go new file mode 100644 index 00000000..bdf858d0 --- /dev/null +++ b/core/logger/logger.go @@ -0,0 +1,311 @@ +package logger + +import ( + "errors" + "fmt" + "log" + "os" + "path/filepath" + + "github.com/fatih/color" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" + + common "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// logsFile describes the logs file name +const logsFile = "plugin_debug.log" + +// Create a standard error writer to avoid test issues around os.Stderr being +// reassigned when verbose logging is enabled +type stderrWriter struct{} + +func (sw stderrWriter) Write(p []byte) (n int, err error) { + return os.Stderr.Write(p) +} +func (sw stderrWriter) Close() error { + return nil // never close stderr +} +func (sw stderrWriter) Sync() error { + return os.Stderr.Sync() +} + +func init() { + err := zap.RegisterSink("pretty", prettyConsoleSink(stderrWriter{})) + if err != nil { + log.Fatalf("failed to register pretty printer %+v", err) + } + err = registerOSSinks() + if err != nil { + log.Fatalf("failed to register os specific sinks %+v", err) + } + if os.Getenv("CL_LOG_COLOR") != "true" { + InitColor(false) + } +} + +var _ common.Logger = (Logger)(nil) + +//go:generate mockery --quiet --name Logger --output . --filename logger_mock_test.go --inpackage --case=underscore +//go:generate mockery --quiet --name Logger --output ./mocks/ --case=underscore + +// Logger is the main interface of this package. +// It implements uber/zap's SugaredLogger interface and adds conditional logging helpers. +// +// Loggers should be injected (and usually Named as well): e.g. lggr.Named("") +// +// Tests +// - Tests should use a TestLogger, with NewLogger being reserved for actual +// runtime and limited direct testing. +// +// Levels +// - Fatal: Logs and then calls os.Exit(1). Be careful about using this since it does NOT unwind the stack and may exit uncleanly. +// - Panic: Unrecoverable error. Example: invariant violation, programmer error +// - Critical: Requires quick action from the node op, obviously these should happen extremely rarely. Example: failed to listen on TCP port +// - Error: Something bad happened, and it was clearly on the node op side. No need for immediate action though. Example: database write timed out +// - Warn: Something bad happened, not clear who/what is at fault. Node ops should have a rough look at these once in a while to see whether anything stands out. Example: connection to peer was closed unexpectedly. observation timed out. +// - Info: High level information. First level we’d expect node ops to look at. Example: entered new epoch with leader, made an observation with value, etc. +// - Debug: Useful for forensic debugging, but we don't expect nops to look at this. Example: Got a message, dropped a message, ... +// - Trace: Only included if compiled with the trace tag. For example: go test -tags trace ... +// +// Node Operator Docs: https://docs.chain.link/docs/configuration-variables/#log_level +type Logger interface { + // With creates a new Logger with the given arguments + With(args ...interface{}) Logger + // Named creates a new Logger sub-scoped with name. + // Names are inherited and dot-separated. + // a := l.Named("A") // logger=A + // b := a.Named("A") // logger=A.B + // Names are generally `MixedCaps`, without spaces, like Go names. + Named(name string) Logger + + // SetLogLevel changes the log level for this and all connected Loggers. + SetLogLevel(zapcore.Level) + + Trace(args ...interface{}) + Debug(args ...interface{}) + Info(args ...interface{}) + Warn(args ...interface{}) + Error(args ...interface{}) + Critical(args ...interface{}) + Panic(args ...interface{}) + // Fatal logs and then calls os.Exit(1) + // Be careful about using this since it does NOT unwind the stack and may + // exit uncleanly + Fatal(args ...interface{}) + + Tracef(format string, values ...interface{}) + Debugf(format string, values ...interface{}) + Infof(format string, values ...interface{}) + Warnf(format string, values ...interface{}) + Errorf(format string, values ...interface{}) + Criticalf(format string, values ...interface{}) + Panicf(format string, values ...interface{}) + Fatalf(format string, values ...interface{}) + + Tracew(msg string, keysAndValues ...interface{}) + Debugw(msg string, keysAndValues ...interface{}) + Infow(msg string, keysAndValues ...interface{}) + Warnw(msg string, keysAndValues ...interface{}) + Errorw(msg string, keysAndValues ...interface{}) + Criticalw(msg string, keysAndValues ...interface{}) + Panicw(msg string, keysAndValues ...interface{}) + Fatalw(msg string, keysAndValues ...interface{}) + + // Sync flushes any buffered log entries. + // Some insignificant errors are suppressed. + Sync() error + + // Helper creates a new logger with the number of callers skipped by caller annotation increased by skip. + // This allows wrappers and helpers to point higher up the stack (like testing.T.Helper()). + Helper(skip int) Logger + + // Name returns the fully qualified name of the logger. + Name() string + + // Recover reports recovered panics; this is useful because it avoids + // double-reporting to sentry + Recover(panicErr interface{}) +} + +// newZapConfigProd returns a new production zap.Config. +func newZapConfigProd(jsonConsole bool, unixTS bool) zap.Config { + config := newZapConfigBase() + if !unixTS { + config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } + if !jsonConsole { + config.OutputPaths = []string{"pretty://console"} + } + return config +} + +func verShaNameStatic() string { + sha, ver := static.Short() + return fmt.Sprintf("%s@%s", ver, sha) +} + +// NewLogger returns a new Logger with default configuration. +// Tests should use TestLogger. +func NewLogger() (Logger, func() error) { + var c Config + return c.New() +} + +type Config struct { + LogLevel zapcore.Level + Dir string + JsonConsole bool + UnixTS bool + FileMaxSizeMB int + FileMaxAgeDays int + FileMaxBackups int // files + + diskSpaceAvailableFn diskSpaceAvailableFn + diskPollConfig zapDiskPollConfig + // This is for tests only + testDiskLogLvlChan chan zapcore.Level +} + +// New returns a new Logger with pretty printing to stdout, prometheus counters, and sentry forwarding. +// Tests should use TestLogger. +func (c *Config) New() (Logger, func() error) { + if c.diskSpaceAvailableFn == nil { + c.diskSpaceAvailableFn = diskSpaceAvailable + } + if !c.diskPollConfig.isSet() { + c.diskPollConfig = newDiskPollConfig(diskPollInterval) + } + + cfg := newZapConfigProd(c.JsonConsole, c.UnixTS) + cfg.Level.SetLevel(c.LogLevel) + var ( + l Logger + closeLogger func() error + err error + ) + if !c.DebugLogsToDisk() { + l, closeLogger, err = newDefaultLogger(cfg, c.UnixTS) + } else { + l, closeLogger, err = newRotatingFileLogger(cfg, *c) + } + if err != nil { + log.Fatal(err) + } + + l = newSentryLogger(l) + l = newPrometheusLogger(l) + l = l.With("version", verShaNameStatic()) + return l, closeLogger +} + +// DebugLogsToDisk returns whether debug logs should be stored in disk +func (c Config) DebugLogsToDisk() bool { + return c.FileMaxSizeMB > 0 +} + +// RequiredDiskSpace returns the required disk space in order to allow debug logs to be stored in disk +func (c Config) RequiredDiskSpace() utils.FileSize { + return utils.FileSize(c.FileMaxSizeMB * utils.MB * (c.FileMaxBackups + 1)) +} + +func (c *Config) DiskSpaceAvailable(path string) (utils.FileSize, error) { + if c.diskSpaceAvailableFn == nil { + c.diskSpaceAvailableFn = diskSpaceAvailable + } + + return c.diskSpaceAvailableFn(path) +} + +func (c Config) LogsFile() string { + return filepath.Join(c.Dir, logsFile) +} + +// InitColor explicitly sets the global color.NoColor option. +// Not safe for concurrent use. Only to be called from init(). +func InitColor(c bool) { + color.NoColor = !c +} + +// newZapConfigBase returns a zap.NewProductionConfig with sampling disabled and a modified level encoder. +func newZapConfigBase() zap.Config { + cfg := zap.NewProductionConfig() + cfg.Sampling = nil + cfg.EncoderConfig.EncodeLevel = encodeLevel + return cfg +} + +func newDefaultLogger(zcfg zap.Config, unixTS bool) (Logger, func() error, error) { + core, coreCloseFn, err := newDefaultLoggingCore(zcfg, unixTS) + if err != nil { + return nil, nil, err + } + + l, loggerCloseFn, err := newLoggerForCore(zcfg, core) + if err != nil { + coreCloseFn() + return nil, nil, err + } + + return l, func() error { + coreCloseFn() + loggerCloseFn() + return nil + }, nil +} + +func newLoggerForCore(zcfg zap.Config, core zapcore.Core) (*zapLogger, func(), error) { + errSink, closeFn, err := zap.Open(zcfg.ErrorOutputPaths...) + if err != nil { + return nil, nil, err + } + + return &zapLogger{ + level: zcfg.Level, + SugaredLogger: zap.New(core, zap.ErrorOutput(errSink), zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel)).Sugar(), + }, closeFn, nil +} + +func newDefaultLoggingCore(zcfg zap.Config, unixTS bool) (zapcore.Core, func(), error) { + encoder := zapcore.NewJSONEncoder(makeEncoderConfig(unixTS)) + + sink, closeOut, err := zap.Open(zcfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + + if zcfg.Level == (zap.AtomicLevel{}) { + return nil, nil, errors.New("missing Level") + } + + filteredLogLevels := zap.LevelEnablerFunc(zcfg.Level.Enabled) + + core := zapcore.NewCore(encoder, sink, filteredLogLevels) + return core, closeOut, nil +} + +func newDiskCore(diskLogLevel zap.AtomicLevel, local Config) (zapcore.Core, error) { + diskUsage, err := local.DiskSpaceAvailable(local.Dir) + if err != nil || diskUsage < local.RequiredDiskSpace() { + diskLogLevel.SetLevel(disabledLevel) + } + + var ( + encoder = zapcore.NewConsoleEncoder(makeEncoderConfig(local.UnixTS)) + sink = zapcore.AddSync(&lumberjack.Logger{ + Filename: local.logFileURI(), + MaxSize: local.FileMaxSizeMB, + MaxAge: local.FileMaxAgeDays, + MaxBackups: local.FileMaxBackups, + Compress: true, + }) + allLogLevels = zap.LevelEnablerFunc(diskLogLevel.Enabled) + ) + + return zapcore.NewCore(encoder, sink, allLogLevels), nil +} diff --git a/core/logger/logger_mock_test.go b/core/logger/logger_mock_test.go new file mode 100644 index 00000000..afddd031 --- /dev/null +++ b/core/logger/logger_mock_test.go @@ -0,0 +1,319 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package logger + +import ( + mock "github.com/stretchr/testify/mock" + zapcore "go.uber.org/zap/zapcore" +) + +// MockLogger is an autogenerated mock type for the Logger type +type MockLogger struct { + mock.Mock +} + +// Critical provides a mock function with given fields: args +func (_m *MockLogger) Critical(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Criticalf provides a mock function with given fields: format, values +func (_m *MockLogger) Criticalf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Criticalw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Criticalw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Debug provides a mock function with given fields: args +func (_m *MockLogger) Debug(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Debugf provides a mock function with given fields: format, values +func (_m *MockLogger) Debugf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Debugw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Debugw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Error provides a mock function with given fields: args +func (_m *MockLogger) Error(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Errorf provides a mock function with given fields: format, values +func (_m *MockLogger) Errorf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Errorw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Errorw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Fatal provides a mock function with given fields: args +func (_m *MockLogger) Fatal(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Fatalf provides a mock function with given fields: format, values +func (_m *MockLogger) Fatalf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Fatalw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Fatalw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Helper provides a mock function with given fields: skip +func (_m *MockLogger) Helper(skip int) Logger { + ret := _m.Called(skip) + + if len(ret) == 0 { + panic("no return value specified for Helper") + } + + var r0 Logger + if rf, ok := ret.Get(0).(func(int) Logger); ok { + r0 = rf(skip) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Logger) + } + } + + return r0 +} + +// Info provides a mock function with given fields: args +func (_m *MockLogger) Info(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Infof provides a mock function with given fields: format, values +func (_m *MockLogger) Infof(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Infow provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Infow(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Name provides a mock function with given fields: +func (_m *MockLogger) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Named provides a mock function with given fields: name +func (_m *MockLogger) Named(name string) Logger { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for Named") + } + + var r0 Logger + if rf, ok := ret.Get(0).(func(string) Logger); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Logger) + } + } + + return r0 +} + +// Panic provides a mock function with given fields: args +func (_m *MockLogger) Panic(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Panicf provides a mock function with given fields: format, values +func (_m *MockLogger) Panicf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Panicw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Panicw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Recover provides a mock function with given fields: panicErr +func (_m *MockLogger) Recover(panicErr interface{}) { + _m.Called(panicErr) +} + +// SetLogLevel provides a mock function with given fields: _a0 +func (_m *MockLogger) SetLogLevel(_a0 zapcore.Level) { + _m.Called(_a0) +} + +// Sync provides a mock function with given fields: +func (_m *MockLogger) Sync() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Trace provides a mock function with given fields: args +func (_m *MockLogger) Trace(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Tracef provides a mock function with given fields: format, values +func (_m *MockLogger) Tracef(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Tracew provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Tracew(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Warn provides a mock function with given fields: args +func (_m *MockLogger) Warn(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Warnf provides a mock function with given fields: format, values +func (_m *MockLogger) Warnf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Warnw provides a mock function with given fields: msg, keysAndValues +func (_m *MockLogger) Warnw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// With provides a mock function with given fields: args +func (_m *MockLogger) With(args ...interface{}) Logger { + var _ca []interface{} + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for With") + } + + var r0 Logger + if rf, ok := ret.Get(0).(func(...interface{}) Logger); ok { + r0 = rf(args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(Logger) + } + } + + return r0 +} + +// NewMockLogger creates a new instance of MockLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *MockLogger { + mock := &MockLogger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/logger/logger_test.go b/core/logger/logger_test.go new file mode 100644 index 00000000..dc7558a5 --- /dev/null +++ b/core/logger/logger_test.go @@ -0,0 +1,30 @@ +package logger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfig(t *testing.T) { + // no sampling + assert.Nil(t, newZapConfigBase().Sampling) + assert.Nil(t, newZapConfigProd(false, false).Sampling) + + // not development, which would trigger panics for Critical level + assert.False(t, newZapConfigBase().Development) + assert.False(t, newZapConfigProd(false, false).Development) +} + +func TestStderrWriter(t *testing.T) { + sw := stderrWriter{} + + // Test Write + n, err := sw.Write([]byte("Hello, World!")) + assert.NoError(t, err) + assert.Equal(t, 13, n, "Expected 13 bytes written") + + // Test Close + err = sw.Close() + assert.NoError(t, err) +} diff --git a/core/logger/logger_unix.go b/core/logger/logger_unix.go new file mode 100644 index 00000000..77034468 --- /dev/null +++ b/core/logger/logger_unix.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package logger + +import "path/filepath" + +func registerOSSinks() error { + return nil +} + +// logFileURI returns the full path to the file the +// NewLogger logs to, and uses zap's built in default file sink. +func (c Config) logFileURI() string { + return filepath.ToSlash(c.LogsFile()) +} diff --git a/core/logger/logger_windows.go b/core/logger/logger_windows.go new file mode 100644 index 00000000..d067bcd2 --- /dev/null +++ b/core/logger/logger_windows.go @@ -0,0 +1,30 @@ +//go:build windows +// +build windows + +package logger + +import ( + "net/url" + "os" + "path/filepath" + + "go.uber.org/zap" +) + +// logFileURI returns the scheme and path to the log file for the passed +// directory, with a custom scheme winfile:/// specifically tailored for +// Windows to get around their handling of the file:// schema in uber.org/zap. +// https://github.com/uber-go/zap/issues/621 +func (c Config) logFileURI() string { + return "winfile:///" + filepath.ToSlash(c.LogsFile()) +} + +func registerOSSinks() error { + return zap.RegisterSink("winfile", newWinFileSink) +} + +func newWinFileSink(u *url.URL) (zap.Sink, error) { + // https://github.com/uber-go/zap/issues/621 + // Remove leading slash left by url.Parse() + return os.OpenFile(u.Path[1:], os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} diff --git a/core/logger/mocks/logger.go b/core/logger/mocks/logger.go new file mode 100644 index 00000000..2d35df7a --- /dev/null +++ b/core/logger/mocks/logger.go @@ -0,0 +1,321 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + logger "github.com/goplugin/pluginv3.0/v2/core/logger" + mock "github.com/stretchr/testify/mock" + + zapcore "go.uber.org/zap/zapcore" +) + +// Logger is an autogenerated mock type for the Logger type +type Logger struct { + mock.Mock +} + +// Critical provides a mock function with given fields: args +func (_m *Logger) Critical(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Criticalf provides a mock function with given fields: format, values +func (_m *Logger) Criticalf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Criticalw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Criticalw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Debug provides a mock function with given fields: args +func (_m *Logger) Debug(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Debugf provides a mock function with given fields: format, values +func (_m *Logger) Debugf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Debugw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Debugw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Error provides a mock function with given fields: args +func (_m *Logger) Error(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Errorf provides a mock function with given fields: format, values +func (_m *Logger) Errorf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Errorw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Errorw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Fatal provides a mock function with given fields: args +func (_m *Logger) Fatal(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Fatalf provides a mock function with given fields: format, values +func (_m *Logger) Fatalf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Fatalw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Fatalw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Helper provides a mock function with given fields: skip +func (_m *Logger) Helper(skip int) logger.Logger { + ret := _m.Called(skip) + + if len(ret) == 0 { + panic("no return value specified for Helper") + } + + var r0 logger.Logger + if rf, ok := ret.Get(0).(func(int) logger.Logger); ok { + r0 = rf(skip) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logger.Logger) + } + } + + return r0 +} + +// Info provides a mock function with given fields: args +func (_m *Logger) Info(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Infof provides a mock function with given fields: format, values +func (_m *Logger) Infof(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Infow provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Infow(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Name provides a mock function with given fields: +func (_m *Logger) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Named provides a mock function with given fields: name +func (_m *Logger) Named(name string) logger.Logger { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for Named") + } + + var r0 logger.Logger + if rf, ok := ret.Get(0).(func(string) logger.Logger); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logger.Logger) + } + } + + return r0 +} + +// Panic provides a mock function with given fields: args +func (_m *Logger) Panic(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Panicf provides a mock function with given fields: format, values +func (_m *Logger) Panicf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Panicw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Panicw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Recover provides a mock function with given fields: panicErr +func (_m *Logger) Recover(panicErr interface{}) { + _m.Called(panicErr) +} + +// SetLogLevel provides a mock function with given fields: _a0 +func (_m *Logger) SetLogLevel(_a0 zapcore.Level) { + _m.Called(_a0) +} + +// Sync provides a mock function with given fields: +func (_m *Logger) Sync() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Trace provides a mock function with given fields: args +func (_m *Logger) Trace(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Tracef provides a mock function with given fields: format, values +func (_m *Logger) Tracef(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Tracew provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Tracew(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// Warn provides a mock function with given fields: args +func (_m *Logger) Warn(args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Warnf provides a mock function with given fields: format, values +func (_m *Logger) Warnf(format string, values ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, values...) + _m.Called(_ca...) +} + +// Warnw provides a mock function with given fields: msg, keysAndValues +func (_m *Logger) Warnw(msg string, keysAndValues ...interface{}) { + var _ca []interface{} + _ca = append(_ca, msg) + _ca = append(_ca, keysAndValues...) + _m.Called(_ca...) +} + +// With provides a mock function with given fields: args +func (_m *Logger) With(args ...interface{}) logger.Logger { + var _ca []interface{} + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for With") + } + + var r0 logger.Logger + if rf, ok := ret.Get(0).(func(...interface{}) logger.Logger); ok { + r0 = rf(args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(logger.Logger) + } + } + + return r0 +} + +// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *Logger { + mock := &Logger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/logger/null_logger.go b/core/logger/null_logger.go new file mode 100644 index 00000000..9bddd9b3 --- /dev/null +++ b/core/logger/null_logger.go @@ -0,0 +1,47 @@ +package logger + +import ( + "go.uber.org/zap/zapcore" +) + +// nolint +var NullLogger Logger = &nullLogger{} + +type nullLogger struct{} + +func (l *nullLogger) With(args ...interface{}) Logger { return l } +func (l *nullLogger) Named(name string) Logger { return l } +func (l *nullLogger) SetLogLevel(_ zapcore.Level) {} + +func (l *nullLogger) Trace(args ...interface{}) {} +func (l *nullLogger) Debug(args ...interface{}) {} +func (l *nullLogger) Info(args ...interface{}) {} +func (l *nullLogger) Warn(args ...interface{}) {} +func (l *nullLogger) Error(args ...interface{}) {} +func (l *nullLogger) Critical(args ...interface{}) {} +func (l *nullLogger) Panic(args ...interface{}) {} +func (l *nullLogger) Fatal(args ...interface{}) {} + +func (l *nullLogger) Tracef(format string, values ...interface{}) {} +func (l *nullLogger) Debugf(format string, values ...interface{}) {} +func (l *nullLogger) Infof(format string, values ...interface{}) {} +func (l *nullLogger) Warnf(format string, values ...interface{}) {} +func (l *nullLogger) Errorf(format string, values ...interface{}) {} +func (l *nullLogger) Criticalf(format string, values ...interface{}) {} +func (l *nullLogger) Panicf(format string, values ...interface{}) {} +func (l *nullLogger) Fatalf(format string, values ...interface{}) {} + +func (l *nullLogger) Tracew(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Debugw(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Infow(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Warnw(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Errorw(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Criticalw(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Panicw(msg string, keysAndValues ...interface{}) {} +func (l *nullLogger) Fatalw(msg string, keysAndValues ...interface{}) {} + +func (l *nullLogger) Sync() error { return nil } +func (l *nullLogger) Helper(skip int) Logger { return l } +func (l *nullLogger) Name() string { return "nullLogger" } + +func (l *nullLogger) Recover(panicErr interface{}) {} diff --git a/core/logger/null_logger_test.go b/core/logger/null_logger_test.go new file mode 100644 index 00000000..80b85dec --- /dev/null +++ b/core/logger/null_logger_test.go @@ -0,0 +1,56 @@ +package logger_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestNullLogger(t *testing.T) { + t.Parallel() + + t.Run("names", func(t *testing.T) { + t.Parallel() + + l := logger.NullLogger + assert.Equal(t, l, l.Named("foo")) + assert.Equal(t, l, l.With("foo")) + assert.Equal(t, l, l.Helper(123)) + }) + + t.Run("no-op", func(t *testing.T) { + t.Parallel() + + l := logger.NullLogger + l.SetLogLevel(zapcore.DebugLevel) + l.Trace() + l.Debug() + l.Info() + l.Warn() + l.Error() + l.Critical() + l.Panic() + l.Fatal() + l.Tracef("msg") + l.Debugf("msg") + l.Infof("msg") + l.Warnf("msg") + l.Errorf("msg") + l.Criticalf("msg") + l.Panicf("msg") + l.Fatalf("msg") + l.Tracew("msg") + l.Debugw("msg") + l.Infow("msg") + l.Warnw("msg") + l.Errorw("msg") + l.Criticalw("msg") + l.Panicw("msg") + l.Fatalw("msg") + l.Recover(nil) + assert.Nil(t, l.Sync()) + }) +} diff --git a/core/logger/passthrough_test.go b/core/logger/passthrough_test.go new file mode 100644 index 00000000..c3ebbb09 --- /dev/null +++ b/core/logger/passthrough_test.go @@ -0,0 +1,117 @@ +package logger + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +var errTest = errors.New("error") + +func TestLogger_Passthrough(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + create func(passthrough Logger) Logger + }{ + {"prometheus", newPrometheusLogger}, + {"sentry", newSentryLogger}, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + m := setupMockLogger(t) + l := test.create(m) + + l.With() + l.Named("xxx") + l.SetLogLevel(zapcore.DebugLevel) + + l.Trace() + l.Debug() + l.Info() + l.Warn() + l.Error() + l.Critical() + l.Panic() + l.Fatal() + + l.Tracef("msg") + l.Debugf("msg") + l.Infof("msg") + l.Warnf("msg") + l.Errorf("msg") + l.Criticalf("msg") + l.Panicf("msg") + l.Fatalf("msg") + + l.Tracew("msg") + l.Debugw("msg") + l.Infow("msg") + l.Warnw("msg") + l.Errorw("msg") + l.Criticalw("msg") + l.Panicw("msg") + l.Fatalw("msg") + + nm := l.Name() + require.Equal(t, "mockLogger", nm) + + err := l.Sync() + assert.ErrorIs(t, err, errTest) + + l.Recover(errTest) + }) + } +} + +func setupMockLogger(t *testing.T) *MockLogger { + ml := NewMockLogger(t) + + ml.On("Helper", 1).Return(ml).Once() + ml.On("With", mock.Anything, mock.Anything).Return(ml) + ml.On("Named", "xxx").Return(ml).Once() + ml.On("SetLogLevel", zapcore.DebugLevel).Once() + + ml.On("Trace").Once() + ml.On("Debug").Once() + ml.On("Info").Once() + ml.On("Warn").Once() + ml.On("Error").Once() + ml.On("Critical").Once() + ml.On("Panic").Once() + ml.On("Fatal").Once() + + ml.On("Tracef", "msg").Once() + ml.On("Debugf", "msg").Once() + ml.On("Infof", "msg").Once() + ml.On("Warnf", "msg").Once() + ml.On("Errorf", "msg").Once() + ml.On("Criticalf", "msg").Once() + ml.On("Panicf", "msg").Once() + ml.On("Fatalf", "msg").Once() + + ml.On("Tracew", "msg").Once() + ml.On("Debugw", "msg").Once() + ml.On("Infow", "msg").Once() + ml.On("Warnw", "msg").Once() + ml.On("Errorw", "msg", mock.Anything, mock.Anything).Once() + ml.On("Criticalw", "msg", mock.Anything, mock.Anything).Once() + ml.On("Panicw", "msg", mock.Anything, mock.Anything).Once() + ml.On("Fatalw", "msg", mock.Anything, mock.Anything).Once() + + ml.On("Sync").Return(errTest).Once() + ml.On("Name").Return("mockLogger").Once() + ml.On("Recover", errTest).Once() + + return ml +} diff --git a/core/logger/prettyconsole.go b/core/logger/prettyconsole.go new file mode 100644 index 00000000..5150f1f3 --- /dev/null +++ b/core/logger/prettyconsole.go @@ -0,0 +1,156 @@ +package logger + +import ( + "fmt" + "math" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "github.com/fatih/color" + "github.com/tidwall/gjson" + "go.uber.org/zap" +) + +var levelColors = map[string]func(...interface{}) string{ + "default": color.New(color.FgWhite).SprintFunc(), + "debug": color.New(color.FgGreen).SprintFunc(), + "info": color.New(color.FgWhite).SprintFunc(), + "warn": color.New(color.FgYellow).SprintFunc(), + "error": color.New(color.FgRed).SprintFunc(), + "panic": color.New(color.FgHiRed).SprintFunc(), + "crit": color.New(color.FgHiRed).SprintFunc(), + "fatal": color.New(color.FgHiRed).SprintFunc(), +} + +var blue = color.New(color.FgBlue).SprintFunc() +var green = color.New(color.FgGreen).SprintFunc() + +// PrettyConsole wraps a Sink (Writer, Syncer, Closer), usually stdout, and +// formats the incoming json bytes with colors and white space for readability +// before passing on to the underlying Writer in Sink. +type PrettyConsole struct { + zap.Sink +} + +// Write reformats the incoming json bytes with colors, newlines and whitespace +// for better readability in console. +func (pc PrettyConsole) Write(b []byte) (int, error) { + if !gjson.ValidBytes(b) { + return 0, fmt.Errorf("unable to parse json for pretty console: %s", string(b)) + } + js := gjson.ParseBytes(b) + headline := generateHeadline(js) + details := generateDetails(js) + return pc.Sink.Write([]byte(fmt.Sprintln(headline, details))) +} + +// Close is overridden to prevent accidental closure of stderr/stdout +func (pc PrettyConsole) Close() error { + switch pc.Sink { + case os.Stderr, os.Stdout: + // Never close Stderr/Stdout because this will break any future go runtime logging from panics etc + return nil + default: + return pc.Sink.Close() + } +} + +func generateHeadline(js gjson.Result) string { + ts := js.Get("ts") + var tsStr string + if f := ts.Float(); f > 1 { + sec, dec := math.Modf(f) + tsStr = iso8601UTC(time.Unix(int64(sec), int64(dec*(1e9)))) + } else { + // assume already formatted + tsStr = ts.Str + } + headline := []interface{}{ + tsStr, + " ", + coloredLevel(js.Get("level")), + fmt.Sprintf("%-50s", sanitized(js.Get("msg").String())), + " ", + fmt.Sprintf("%-32s", blue(js.Get("caller"))), + } + return fmt.Sprint(headline...) +} + +// detailsBlacklist of keys to show in details. This does not +// exclude it from being present in other logger sinks, like .jsonl files. +var detailsBlacklist = map[string]bool{ + "level": true, + "ts": true, + "msg": true, + "caller": true, + "hash": true, +} + +func generateDetails(js gjson.Result) string { + data := js.Map() + keys := []string{} + + for k := range data { + if detailsBlacklist[k] || len(data[k].String()) == 0 { + continue + } + keys = append(keys, k) + } + + sort.Strings(keys) + + var details strings.Builder + + for _, v := range keys { + details.WriteString(fmt.Sprintf("%s=%v ", green(sanitized(v)), sanitized(data[v].String()))) + } + + return details.String() +} + +func coloredLevel(level gjson.Result) string { + color, ok := levelColors[level.String()] + if !ok { + color = levelColors["default"] + } + return color(fmt.Sprintf("%-8s", fmt.Sprint("[", strings.ToUpper(level.String()), "]"))) +} + +// iso8601UTC formats given time to ISO8601. +func iso8601UTC(t time.Time) string { + return t.UTC().Format(time.RFC3339) +} + +func prettyConsoleSink(s zap.Sink) func(*url.URL) (zap.Sink, error) { + return func(*url.URL) (zap.Sink, error) { + return PrettyConsole{s}, nil + } +} + +type sanitized string + +// String replaces control characters with Go escape sequences, except for newlines and tabs. +// See strconv.QuoteRune. +func (s sanitized) String() string { + var out string + for _, r := range s { + switch r { + case '\n', '\r', '\t': + // allowed + default: + // escape others + if unicode.IsControl(r) { + q := strconv.QuoteRune(r) + out += q[1 : len(q)-1] // trim quotes + continue + } + } + out += string(r) + } + return out +} diff --git a/core/logger/prometheus.go b/core/logger/prometheus.go new file mode 100644 index 00000000..95503001 --- /dev/null +++ b/core/logger/prometheus.go @@ -0,0 +1,219 @@ +package logger + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/zap/zapcore" +) + +var warnCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "log_warn_count", + Help: "Number of warning messages in log", +}) +var errorCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "log_error_count", + Help: "Number of error messages in log", +}) +var criticalCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "log_critical_count", + Help: "Number of critical messages in log", +}) +var panicCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "log_panic_count", + Help: "Number of panic messages in log", +}) +var fatalCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "log_fatal_count", + Help: "Number of fatal messages in log", +}) + +type prometheusLogger struct { + h Logger + warnCnt prometheus.Counter + errorCnt prometheus.Counter + criticalCnt prometheus.Counter + panicCnt prometheus.Counter + fatalCnt prometheus.Counter +} + +func newPrometheusLoggerWithCounters( + l Logger, + warnCounter prometheus.Counter, + errorCounter prometheus.Counter, + criticalCounter prometheus.Counter, + panicCounter prometheus.Counter, + fatalCounter prometheus.Counter) Logger { + return &prometheusLogger{ + h: l.Helper(1), + warnCnt: warnCounter, + errorCnt: errorCounter, + criticalCnt: criticalCounter, + panicCnt: panicCounter, + fatalCnt: fatalCounter, + } +} + +func newPrometheusLogger(l Logger) Logger { + return newPrometheusLoggerWithCounters(l, warnCounter, errorCounter, criticalCounter, panicCounter, fatalCounter) +} + +func (s *prometheusLogger) With(args ...interface{}) Logger { + return &prometheusLogger{ + h: s.h.With(args...), + warnCnt: s.warnCnt, + errorCnt: s.errorCnt, + criticalCnt: s.criticalCnt, + panicCnt: s.panicCnt, + fatalCnt: s.fatalCnt, + } +} + +func (s *prometheusLogger) Named(name string) Logger { + return &prometheusLogger{ + h: s.h.Named(name), + warnCnt: s.warnCnt, + errorCnt: s.errorCnt, + criticalCnt: s.criticalCnt, + panicCnt: s.panicCnt, + fatalCnt: s.fatalCnt, + } +} + +func (s *prometheusLogger) Name() string { + return s.h.Name() +} + +func (s *prometheusLogger) SetLogLevel(level zapcore.Level) { + s.h.SetLogLevel(level) +} + +func (s *prometheusLogger) Trace(args ...interface{}) { + s.h.Trace(args...) +} + +func (s *prometheusLogger) Debug(args ...interface{}) { + s.h.Debug(args...) +} + +func (s *prometheusLogger) Info(args ...interface{}) { + s.h.Info(args...) +} + +func (s *prometheusLogger) Warn(args ...interface{}) { + s.warnCnt.Inc() + s.h.Warn(args...) +} + +func (s *prometheusLogger) Error(args ...interface{}) { + s.errorCnt.Inc() + s.h.Error(args...) +} + +func (s *prometheusLogger) Critical(args ...interface{}) { + s.criticalCnt.Inc() + s.h.Critical(args...) +} + +func (s *prometheusLogger) Panic(args ...interface{}) { + s.panicCnt.Inc() + s.h.Panic(args...) +} + +func (s *prometheusLogger) Fatal(args ...interface{}) { + s.fatalCnt.Inc() + s.h.Fatal(args...) +} + +func (s *prometheusLogger) Tracef(format string, values ...interface{}) { + s.h.Tracef(format, values...) +} + +func (s *prometheusLogger) Debugf(format string, values ...interface{}) { + s.h.Debugf(format, values...) +} + +func (s *prometheusLogger) Infof(format string, values ...interface{}) { + s.h.Infof(format, values...) +} + +func (s *prometheusLogger) Warnf(format string, values ...interface{}) { + s.warnCnt.Inc() + s.h.Warnf(format, values...) +} + +func (s *prometheusLogger) Errorf(format string, values ...interface{}) { + s.errorCnt.Inc() + s.h.Errorf(format, values...) +} + +func (s *prometheusLogger) Criticalf(format string, values ...interface{}) { + s.criticalCnt.Inc() + s.h.Criticalf(format, values...) +} + +func (s *prometheusLogger) Panicf(format string, values ...interface{}) { + s.panicCnt.Inc() + s.h.Panicf(format, values...) +} + +func (s *prometheusLogger) Fatalf(format string, values ...interface{}) { + s.fatalCnt.Inc() + s.h.Fatalf(format, values...) +} + +func (s *prometheusLogger) Tracew(msg string, keysAndValues ...interface{}) { + s.h.Tracew(msg, keysAndValues...) +} + +func (s *prometheusLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.h.Debugw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Infow(msg string, keysAndValues ...interface{}) { + s.h.Infow(msg, keysAndValues...) +} + +func (s *prometheusLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.warnCnt.Inc() + s.h.Warnw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.errorCnt.Inc() + s.h.Errorw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Criticalw(msg string, keysAndValues ...interface{}) { + s.criticalCnt.Inc() + s.h.Criticalw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.panicCnt.Inc() + s.h.Panicw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.fatalCnt.Inc() + s.h.Fatalw(msg, keysAndValues...) +} + +func (s *prometheusLogger) Sync() error { + return s.h.Sync() +} + +func (s *prometheusLogger) Helper(add int) Logger { + return &prometheusLogger{ + s.h.Helper(add), + s.warnCnt, + s.errorCnt, + s.criticalCnt, + s.panicCnt, + s.fatalCnt, + } +} + +func (s *prometheusLogger) Recover(panicErr interface{}) { + s.panicCnt.Inc() + s.h.Recover(panicErr) +} diff --git a/core/logger/prometheus_test.go b/core/logger/prometheus_test.go new file mode 100644 index 00000000..c1342643 --- /dev/null +++ b/core/logger/prometheus_test.go @@ -0,0 +1,100 @@ +package logger + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" +) + +func TestPrometheusLogger_Counters(t *testing.T) { + t.Parallel() + + createRandomNameCounter := func() prometheus.Counter { + name := fmt.Sprintf("test_counter_%d", rand.Int31n(999999)) + return promauto.NewCounter(prometheus.CounterOpts{Name: name}) + } + + warnCounter := createRandomNameCounter() + errorCounter := createRandomNameCounter() + criticalCounter := createRandomNameCounter() + panicCounter := createRandomNameCounter() + fatalCounter := createRandomNameCounter() + + l := newPrometheusLoggerWithCounters(NullLogger, warnCounter, errorCounter, criticalCounter, panicCounter, fatalCounter) + repeat(l.Warn, 1) + repeat(l.Error, 2) + repeat(l.Critical, 3) + repeat(l.Panic, 4) + repeat(l.Fatal, 5) + + assertCounterValue(t, warnCounter, 1) + assertCounterValue(t, errorCounter, 2) + assertCounterValue(t, criticalCounter, 3) + assertCounterValue(t, panicCounter, 4) + assertCounterValue(t, fatalCounter, 5) + + nl := l.Named("foo") // reusing counters + repeat(nl.Warn, 1) + repeat(nl.Error, 1) + repeat(nl.Critical, 1) + repeat(nl.Panic, 1) + repeat(nl.Fatal, 1) + + assertCounterValue(t, warnCounter, 2) + assertCounterValue(t, errorCounter, 3) + assertCounterValue(t, criticalCounter, 4) + assertCounterValue(t, panicCounter, 5) + assertCounterValue(t, fatalCounter, 6) + + wl := l.With("bar") // reusing counters + repeat(wl.Warn, 1) + repeat(wl.Error, 1) + repeat(wl.Critical, 1) + repeat(wl.Panic, 1) + repeat(wl.Fatal, 1) + + assertCounterValue(t, warnCounter, 3) + assertCounterValue(t, errorCounter, 4) + assertCounterValue(t, criticalCounter, 5) + assertCounterValue(t, panicCounter, 6) + assertCounterValue(t, fatalCounter, 7) + + l.Warnf("msg") + l.Warnw("msg") + assertCounterValue(t, warnCounter, 5) + + l.Errorf("msg") + l.Errorf("msg") + assertCounterValue(t, errorCounter, 6) + + l.Criticalf("msg") + l.Criticalw("msg") + assertCounterValue(t, criticalCounter, 7) + + l.Panicf("msg") + l.Panicw("msg") + l.Recover(nil) + assertCounterValue(t, panicCounter, 9) + + l.Fatalf("msg") + l.Fatalw("msg") + assertCounterValue(t, fatalCounter, 9) +} + +func assertCounterValue(t *testing.T, c prometheus.Counter, v int) { + var m io_prometheus_client.Metric + err := c.Write(&m) + assert.NoError(t, err) + assert.Equal(t, v, int(m.GetCounter().GetValue())) +} + +func repeat(f func(args ...interface{}), c int) { + for ; c > 0; c-- { + f() + } +} diff --git a/core/logger/pyroscope.go b/core/logger/pyroscope.go new file mode 100644 index 00000000..5016907c --- /dev/null +++ b/core/logger/pyroscope.go @@ -0,0 +1,56 @@ +package logger + +import ( + "runtime" + + "github.com/grafana/pyroscope-go" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +type PprofConfig interface { + BlockProfileRate() int + MutexProfileFraction() int +} + +// StartPyroscope starts continuous profiling of the Plugin Node +func StartPyroscope(pyroConfig config.Pyroscope, pprofConfig PprofConfig) (*pyroscope.Profiler, error) { + runtime.SetBlockProfileRate(pprofConfig.BlockProfileRate()) + runtime.SetMutexProfileFraction(pprofConfig.MutexProfileFraction()) + + sha, ver := static.Short() + + return pyroscope.Start(pyroscope.Config{ + // Maybe configurable to identify the specific NOP - TBD + ApplicationName: "plugin-node", + + ServerAddress: pyroConfig.ServerAddress(), + AuthToken: pyroConfig.AuthToken(), + + // We disable logging the profiling info, it will be in the Pyroscope instance anyways... + Logger: nil, + + Tags: map[string]string{ + "SHA": sha, + "Version": ver, + "Environment": pyroConfig.Environment(), + }, + + ProfileTypes: []pyroscope.ProfileType{ + // these profile types are enabled by default: + pyroscope.ProfileCPU, + pyroscope.ProfileAllocObjects, + pyroscope.ProfileAllocSpace, + pyroscope.ProfileInuseObjects, + pyroscope.ProfileInuseSpace, + + // these profile types are optional: + pyroscope.ProfileGoroutines, + pyroscope.ProfileMutexCount, + pyroscope.ProfileMutexDuration, + pyroscope.ProfileBlockCount, + pyroscope.ProfileBlockDuration, + }, + }) +} diff --git a/core/logger/sentry.go b/core/logger/sentry.go new file mode 100644 index 00000000..ac28e9ea --- /dev/null +++ b/core/logger/sentry.go @@ -0,0 +1,272 @@ +package logger + +import ( + "fmt" + "time" + + "github.com/getsentry/sentry-go" + "go.uber.org/zap/zapcore" +) + +const ( + // SentryFlushDeadline indicates the maximum amount of time we allow sentry to + // flush events on manual flush + SentryFlushDeadline = 5 * time.Second + + loggerContextName = "Logger" +) + +type sentryLogger struct { + h Logger +} + +func newSentryLogger(l Logger) Logger { + return &sentryLogger{h: l.Helper(1)} +} + +func (s *sentryLogger) With(args ...interface{}) Logger { + return &sentryLogger{ + h: s.h.With(args...), + } +} + +func (s *sentryLogger) Named(name string) Logger { + return &sentryLogger{ + h: s.h.Named(name), + } +} + +func (s *sentryLogger) Name() string { + return s.h.Name() +} + +func (s *sentryLogger) SetLogLevel(level zapcore.Level) { + s.h.SetLogLevel(level) +} + +func (s *sentryLogger) Trace(args ...interface{}) { + s.h.Trace(args...) +} + +func (s *sentryLogger) Debug(args ...interface{}) { + s.h.Debug(args...) +} + +func (s *sentryLogger) Info(args ...interface{}) { + s.h.Info(args...) +} + +func (s *sentryLogger) Warn(args ...interface{}) { + s.h.Warn(args...) +} + +func (s *sentryLogger) Error(args ...interface{}) { + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "args": args, + }) + scope.SetLevel(sentry.LevelError) + }) + eid := hub.CaptureMessage(fmt.Sprintf("%v", args)) + s.h.With("sentryEventID", eid).Error(args...) +} + +func (s *sentryLogger) Critical(args ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "args": args, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf("%v", args)) + s.h.With("sentryEventID", eid).Critical(args...) +} + +func (s *sentryLogger) Panic(args ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "args": args, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf("%v", args)) + s.h.With("sentryEventID", eid).Panic(args...) +} + +func (s *sentryLogger) Fatal(args ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "args": args, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf("%v", args)) + s.h.With("sentryEventID", eid).Fatal(args...) +} + +func (s *sentryLogger) Tracef(format string, values ...interface{}) { + s.h.Tracef(format, values...) +} + +func (s *sentryLogger) Debugf(format string, values ...interface{}) { + s.h.Debugf(format, values...) +} + +func (s *sentryLogger) Infof(format string, values ...interface{}) { + s.h.Infof(format, values...) +} + +func (s *sentryLogger) Warnf(format string, values ...interface{}) { + s.h.Warnf(format, values...) +} + +func (s *sentryLogger) Errorf(format string, values ...interface{}) { + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "values": values, + }) + scope.SetLevel(sentry.LevelError) + }) + eid := hub.CaptureMessage(fmt.Sprintf(format, values...)) + s.h.With("sentryEventID", eid).Errorf(format, values...) +} + +func (s *sentryLogger) Criticalf(format string, values ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "values": values, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf(format, values...)) + s.h.With("sentryEventID", eid).Criticalf(format, values...) +} + +func (s *sentryLogger) Panicf(format string, values ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "values": values, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf(format, values...)) + s.h.With("sentryEventID", eid).Panicf(format, values...) +} + +func (s *sentryLogger) Fatalf(format string, values ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, map[string]interface{}{ + "values": values, + }) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(fmt.Sprintf(format, values...)) + s.h.With("sentryEventID", eid).Fatalf(format, values...) +} + +func (s *sentryLogger) Tracew(msg string, keysAndValues ...interface{}) { + s.h.Tracew(msg, keysAndValues...) +} + +func (s *sentryLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.h.Debugw(msg, keysAndValues...) +} + +func (s *sentryLogger) Infow(msg string, keysAndValues ...interface{}) { + s.h.Infow(msg, keysAndValues...) +} + +func (s *sentryLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.h.Warnw(msg, keysAndValues...) +} + +func (s *sentryLogger) Errorw(msg string, keysAndValues ...interface{}) { + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, toMap(keysAndValues)) + scope.SetLevel(sentry.LevelError) + }) + eid := hub.CaptureMessage(msg) + s.h.Errorw(msg, append(keysAndValues, "sentryEventID", eid)...) +} + +func (s *sentryLogger) Criticalw(msg string, keysAndValues ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, toMap(keysAndValues)) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(msg) + s.h.Criticalw(msg, append(keysAndValues, "sentryEventID", eid)...) +} + +func (s *sentryLogger) Panicw(msg string, keysAndValues ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, toMap(keysAndValues)) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(msg) + s.h.Panicw(msg, append(keysAndValues, "sentryEventID", eid)...) +} + +func (s *sentryLogger) Fatalw(msg string, keysAndValues ...interface{}) { + defer sentry.Flush(SentryFlushDeadline) + hub := sentry.CurrentHub().Clone() + hub.ConfigureScope(func(scope *sentry.Scope) { + scope.SetContext(loggerContextName, toMap(keysAndValues)) + scope.SetLevel(sentry.LevelFatal) + }) + eid := hub.CaptureMessage(msg) + s.h.Fatalw(msg, append(keysAndValues, "sentryEventID", eid)...) +} + +func (s *sentryLogger) Sync() error { + return s.h.Sync() +} + +func (s *sentryLogger) Helper(add int) Logger { + return &sentryLogger{s.h.Helper(add)} +} + +func toMap(args []interface{}) (m map[string]interface{}) { + m = make(map[string]interface{}, len(args)/2) + for i := 0; i < len(args); { + // Make sure this element isn't a dangling key + if i == len(args)-1 { + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string ignore it + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); ok { + m[keyStr] = val + } + i += 2 + } + return m +} + +func (s *sentryLogger) Recover(panicErr interface{}) { + eid := sentry.CurrentHub().Recover(panicErr) + sentry.Flush(SentryFlushDeadline) + + s.h.With("sentryEventID", eid).Recover(panicErr) +} diff --git a/core/logger/sentry_test.go b/core/logger/sentry_test.go new file mode 100644 index 00000000..28cdaf81 --- /dev/null +++ b/core/logger/sentry_test.go @@ -0,0 +1,29 @@ +package logger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_toMap(t *testing.T) { + t.Run("with even number of keys/values", func(t *testing.T) { + keysAndValues := []interface{}{ + "foo", 1, "bar", 42.43, "boggly", "str", + } + + m := toMap(keysAndValues) + + assert.Equal(t, map[string]interface{}{"bar": 42.43, "boggly": "str", "foo": 1}, m) + }) + + t.Run("with odd number of keys/values, drops the last one", func(t *testing.T) { + keysAndValues := []interface{}{ + "foo", 1, "bar", 42.43, "boggly", "str", "odd", + } + + m := toMap(keysAndValues) + + assert.Equal(t, map[string]interface{}{"bar": 42.43, "boggly": "str", "foo": 1}, m) + }) +} diff --git a/core/logger/sugared.go b/core/logger/sugared.go new file mode 100644 index 00000000..57922851 --- /dev/null +++ b/core/logger/sugared.go @@ -0,0 +1,55 @@ +package logger + +// SugaredLogger extends the base Logger interface with syntactic sugar, similar to zap.SugaredLogger. +type SugaredLogger interface { + Logger + // AssumptionViolation variants log at error level with the message prefix "AssumptionViolation: ". + AssumptionViolation(args ...interface{}) + AssumptionViolationf(format string, vals ...interface{}) + AssumptionViolationw(msg string, keyvals ...interface{}) + // ErrorIf logs the error if present. + ErrorIf(err error, msg string) + // ErrorIfFn calls fn() and logs any returned error along with msg. + // Unlike ErrorIf, this can be deffered inline, since the function call is delayed. + ErrorIfFn(fn func() error, msg string) +} + +// Sugared returns a new SugaredLogger wrapping the given Logger. +func Sugared(l Logger) SugaredLogger { + return &sugared{ + Logger: l, + h: l.Helper(1), + } +} + +type sugared struct { + Logger + h Logger // helper with stack trace skip level +} + +// AssumptionViolation wraps Error logs with assumption violation tag. +func (s *sugared) AssumptionViolation(args ...interface{}) { + s.h.Error(append([]interface{}{"AssumptionViolation:"}, args...)) +} + +// AssumptionViolationf wraps Errorf logs with assumption violation tag. +func (s *sugared) AssumptionViolationf(format string, vals ...interface{}) { + s.h.Errorf("AssumptionViolation: "+format, vals...) +} + +// AssumptionViolationw wraps Errorw logs with assumption violation tag. +func (s *sugared) AssumptionViolationw(msg string, keyvals ...interface{}) { + s.h.Errorw("AssumptionViolation: "+msg, keyvals...) +} + +func (s *sugared) ErrorIf(err error, msg string) { + if err != nil { + s.h.Errorw(msg, "err", err) + } +} + +func (s *sugared) ErrorIfFn(fn func() error, msg string) { + if err := fn(); err != nil { + s.h.Errorw(msg, "err", err) + } +} diff --git a/core/logger/test_logger.go b/core/logger/test_logger.go new file mode 100644 index 00000000..a18c30e5 --- /dev/null +++ b/core/logger/test_logger.go @@ -0,0 +1,46 @@ +package logger + +import ( + "testing" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest" + "go.uber.org/zap/zaptest/observer" +) + +// TestLogger creates a logger that directs output to PrettyConsole configured +// for test output, and to the buffer testMemoryLog. t is optional. +// Log level is DEBUG by default. +// +// Note: It is not necessary to Sync(). +func TestLogger(tb testing.TB) SugaredLogger { + return testLogger(tb, nil) +} + +// TestLoggerObserved creates a logger with an observer that can be used to +// test emitted logs at the given level or above +// +// Note: It is not necessary to Sync(). +func TestLoggerObserved(tb testing.TB, lvl zapcore.Level) (Logger, *observer.ObservedLogs) { + observedZapCore, observedLogs := observer.New(lvl) + return testLogger(tb, observedZapCore), observedLogs +} + +// testLogger returns a new SugaredLogger for tests. core is optional. +func testLogger(tb testing.TB, core zapcore.Core) SugaredLogger { + a := zap.NewAtomicLevelAt(zap.DebugLevel) + opts := []zaptest.LoggerOption{zaptest.Level(a)} + zapOpts := []zap.Option{zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel)} + if core != nil { + zapOpts = append(zapOpts, zap.WrapCore(func(c zapcore.Core) zapcore.Core { + return zapcore.NewTee(c, core) + })) + } + opts = append(opts, zaptest.WrapOptions(zapOpts...)) + l := &zapLogger{ + level: a, + SugaredLogger: zaptest.NewLogger(tb, opts...).Sugar(), + } + return Sugared(l.With("version", verShaNameStatic())) +} diff --git a/core/logger/test_logger_test.go b/core/logger/test_logger_test.go new file mode 100644 index 00000000..feafbc69 --- /dev/null +++ b/core/logger/test_logger_test.go @@ -0,0 +1,81 @@ +package logger + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func init() { + InitColor(false) +} + +func TestTestLogger(t *testing.T) { + lgr, observed := TestLoggerObserved(t, zapcore.DebugLevel) + + const ( + testMessage = "Test message" + ) + lgr.Warn(testMessage) + // [WARN] Test message logger/test_logger_test.go:23 version=unset@unset + logs := observed.TakeAll() + require.Len(t, logs, 1) + log := logs[0] + assert.Equal(t, zap.WarnLevel, log.Level) + assert.Equal(t, testMessage, log.Message) + assert.Equal(t, "", log.LoggerName) + ver := log.ContextMap()["version"] + assert.Contains(t, ver, "@") + + const ( + serviceName = "ServiceName" + serviceMessage = "Service message" + key, value = "key", "value" + ) + srvLgr := lgr.Named(serviceName) + srvLgr.SetLogLevel(zapcore.DebugLevel) + srvLgr.Debugw(serviceMessage, key, value) + // [DEBUG] Service message logger/test_logger_test.go:35 version=unset@unset key=value logger=ServiceName + logs = observed.TakeAll() + require.Len(t, logs, 1) + log = logs[0] + assert.Equal(t, zap.DebugLevel, log.Level) + assert.Equal(t, serviceMessage, log.Message) + assert.Equal(t, serviceName, log.LoggerName) + assert.Equal(t, value, log.ContextMap()[key]) + assert.Contains(t, log.Caller.String(), "core/logger/test_logger_test.go") + assert.Equal(t, log.Caller.Line, 41) + + const ( + workerName = "WorkerName" + workerMessage = "Did some work" + idKey, workerId = "workerId", "42" + resultKey, resultVal = "result", "success" + ) + wrkLgr := srvLgr.Named(workerName).With(idKey, workerId) + wrkLgr.Infow(workerMessage, resultKey, resultVal) + // [INFO] Did some work logger/test_logger_test.go:49 version=unset@unset logger=ServiceName.WorkerName result=success workerId=42 + logs = observed.TakeAll() + require.Len(t, logs, 1) + log = logs[0] + assert.Equal(t, zap.InfoLevel, log.Level) + assert.Equal(t, workerMessage, log.Message) + assert.Equal(t, fmt.Sprintf("%s.%s", serviceName, workerName), log.LoggerName) + assert.Equal(t, workerId, log.ContextMap()[idKey]) + assert.Equal(t, resultVal, log.ContextMap()[resultKey]) + + const ( + critMsg = "Critical error" + ) + lgr.Critical(critMsg) + logs = observed.TakeAll() + require.Len(t, logs, 1) + log = logs[0] + assert.Equal(t, zap.DPanicLevel, log.Level) + assert.Equal(t, critMsg, log.Message) + assert.Equal(t, "", log.LoggerName) +} diff --git a/core/logger/trace.go b/core/logger/trace.go new file mode 100644 index 00000000..c5f36fbc --- /dev/null +++ b/core/logger/trace.go @@ -0,0 +1,20 @@ +//go:build trace + +package logger + +import "fmt" + +const tracePrefix = "[TRACE] " + +func (l *zapLogger) Trace(args ...interface{}) { + args[0] = fmt.Sprint(tracePrefix, args[0]) + l.sugaredHelper(1).Debug(args...) +} + +func (l *zapLogger) Tracef(format string, values ...interface{}) { + l.sugaredHelper(1).Debugf(fmt.Sprint(tracePrefix, format), values...) +} + +func (l *zapLogger) Tracew(msg string, keysAndValues ...interface{}) { + l.sugaredHelper(1).Debugw(fmt.Sprint(tracePrefix, msg), keysAndValues...) +} diff --git a/core/logger/trace_noop.go b/core/logger/trace_noop.go new file mode 100644 index 00000000..e2423697 --- /dev/null +++ b/core/logger/trace_noop.go @@ -0,0 +1,9 @@ +//go:build !trace + +package logger + +func (l *zapLogger) Trace(args ...interface{}) {} + +func (l *zapLogger) Tracef(format string, values ...interface{}) {} + +func (l *zapLogger) Tracew(msg string, keysAndValues ...interface{}) {} diff --git a/core/logger/trace_noop_test.go b/core/logger/trace_noop_test.go new file mode 100644 index 00000000..0a261290 --- /dev/null +++ b/core/logger/trace_noop_test.go @@ -0,0 +1,27 @@ +//go:build !trace + +package logger + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func TestTrace(t *testing.T) { + lgr, observed := TestLoggerObserved(t, zapcore.InfoLevel) + + const ( + testName = "TestTrace" + testMessage = "Trace message" + ) + lgr.Trace(testMessage) + // [DEBUG] [TRACE] Trace message logger/test_logger_test.go:23 logger=TestLogger + require.Empty(t, observed.TakeAll()) + + lgr.SetLogLevel(zapcore.DebugLevel) + lgr.Trace(testMessage) + // [DEBUG] [TRACE] Trace message logger/test_logger_test.go:23 logger=TestLogger + require.Empty(t, observed.TakeAll()) +} diff --git a/core/logger/trace_test.go b/core/logger/trace_test.go new file mode 100644 index 00000000..bbaebf01 --- /dev/null +++ b/core/logger/trace_test.go @@ -0,0 +1,31 @@ +//go:build trace + +package logger + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func TestTrace(t *testing.T) { + lgr, observed := TestLoggerObserved(t, zapcore.DebugLevel) + lgr.SetLogLevel(zapcore.InfoLevel) + + const ( + testMessage = "Trace message" + ) + lgr.Trace(testMessage) + // [DEBUG] [TRACE] Trace message + require.Empty(t, observed.TakeAll()) + + lgr.SetLogLevel(zapcore.DebugLevel) + lgr.Trace(testMessage) + // [DEBUG] [TRACE] Trace message + logs := observed.TakeAll() + require.Len(t, logs, 1) + log := logs[0] + require.Equal(t, zapcore.DebugLevel, log.Level) + require.Equal(t, "[TRACE] "+testMessage, log.Message) +} diff --git a/core/logger/zap.go b/core/logger/zap.go new file mode 100644 index 00000000..c739a80d --- /dev/null +++ b/core/logger/zap.go @@ -0,0 +1,94 @@ +package logger + +import ( + "os" + + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var _ Logger = &zapLogger{} + +type zapLogger struct { + *zap.SugaredLogger + level zap.AtomicLevel + fields []interface{} + callerSkip int +} + +func makeEncoderConfig(unixTS bool) zapcore.EncoderConfig { + encoderConfig := zap.NewProductionEncoderConfig() + + if !unixTS { + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } + + encoderConfig.EncodeLevel = encodeLevel + + return encoderConfig +} + +func (l *zapLogger) SetLogLevel(lvl zapcore.Level) { + l.level.SetLevel(lvl) +} + +func (l *zapLogger) With(args ...interface{}) Logger { + newLogger := *l + newLogger.SugaredLogger = l.SugaredLogger.With(args...) + newLogger.fields = copyFields(l.fields, args...) + return &newLogger +} + +// copyFields returns a copy of fields with add appended. +func copyFields(fields []interface{}, add ...interface{}) []interface{} { + f := make([]interface{}, 0, len(fields)+len(add)) + f = append(f, fields...) + f = append(f, add...) + return f +} + +func (l *zapLogger) Named(name string) Logger { + newLogger := *l + newLogger.SugaredLogger = l.SugaredLogger.Named(name) + newLogger.Trace("Named logger created") + return &newLogger +} + +func (l *zapLogger) Helper(skip int) Logger { + newLogger := *l + newLogger.SugaredLogger = l.sugaredHelper(skip) + newLogger.callerSkip += skip + return &newLogger +} + +func (l *zapLogger) Name() string { + return l.Desugar().Name() +} + +func (l *zapLogger) sugaredHelper(skip int) *zap.SugaredLogger { + return l.SugaredLogger.WithOptions(zap.AddCallerSkip(skip)) +} + +func (l *zapLogger) Sync() error { + err := l.SugaredLogger.Sync() + if err == nil { + return nil + } + var msg string + if uw := errors.Unwrap(err); uw != nil { + msg = uw.Error() + } else { + msg = err.Error() + } + switch msg { + case os.ErrInvalid.Error(), "bad file descriptor", + "inappropriate ioctl for device": + return nil + } + return err +} + +func (l *zapLogger) Recover(panicErr interface{}) { + l.Criticalw("Recovered goroutine panic", "panic", panicErr) +} diff --git a/core/logger/zap_disk_logging.go b/core/logger/zap_disk_logging.go new file mode 100644 index 00000000..3f92f610 --- /dev/null +++ b/core/logger/zap_disk_logging.go @@ -0,0 +1,133 @@ +package logger + +import ( + "sync" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // `Fatal` is the max log level allowed, so log levels like `Panic` or `Critical` won't be logged to disk if this is set. + disabledLevel = zapcore.FatalLevel + 1 + + diskPollInterval = 1 * time.Minute +) + +type zapDiskPollConfig struct { + stop func() + pollChan <-chan time.Time +} + +func (c zapDiskPollConfig) isSet() bool { + return c.stop != nil || c.pollChan != nil +} + +func newDiskPollConfig(interval time.Duration) zapDiskPollConfig { + ticker := time.NewTicker(utils.WithJitter(interval)) + + return zapDiskPollConfig{ + pollChan: ticker.C, + stop: ticker.Stop, + } +} + +var _ Logger = &zapDiskLogger{} + +type zapDiskLogger struct { + zapLogger + config Config + diskLogLevel zap.AtomicLevel + pollDiskSpaceStop chan struct{} + pollDiskSpaceDone chan struct{} +} + +func (l *zapDiskLogger) pollDiskSpace() { + defer l.config.diskPollConfig.stop() + defer close(l.pollDiskSpaceDone) + + for { + select { + case <-l.pollDiskSpaceStop: + return + case <-l.config.diskPollConfig.pollChan: + lvl := zapcore.DebugLevel + + diskUsage, err := l.config.DiskSpaceAvailable(l.config.Dir) + if err != nil { + // Will no longer log to disk + lvl = disabledLevel + l.Warnw("Error getting disk space available for logging", "err", err) + } else if diskUsage < l.config.RequiredDiskSpace() { + // Will no longer log to disk + lvl = disabledLevel + l.Warnf( + "Disk space is not enough to log into disk any longer, required disk space: %s, Available disk space: %s", + l.config.RequiredDiskSpace(), + diskUsage, + ) + } + + lvlBefore := l.diskLogLevel.Level() + + l.diskLogLevel.SetLevel(lvl) + + if lvlBefore == disabledLevel && lvl == zapcore.DebugLevel { + l.Info("Resuming disk logs, disk has enough space") + } + + if l.config.testDiskLogLvlChan != nil { + l.config.testDiskLogLvlChan <- lvl + } + } + } +} + +func newRotatingFileLogger(zcfg zap.Config, c Config, cores ...zapcore.Core) (*zapDiskLogger, func() error, error) { + defaultCore, defaultCloseFn, err := newDefaultLoggingCore(zcfg, c.UnixTS) + if err != nil { + return nil, nil, err + } + cores = append(cores, defaultCore) + + diskLogLevel := zap.NewAtomicLevelAt(zapcore.DebugLevel) + diskCore, diskErr := newDiskCore(diskLogLevel, c) + if diskErr != nil { + defaultCloseFn() + return nil, nil, diskErr + } + cores = append(cores, diskCore) + + core := zapcore.NewTee(cores...) + l, diskCloseFn, err := newLoggerForCore(zcfg, core) + if err != nil { + defaultCloseFn() + return nil, nil, err + } + + lggr := &zapDiskLogger{ + config: c, + + pollDiskSpaceStop: make(chan struct{}), + pollDiskSpaceDone: make(chan struct{}), + zapLogger: *l, + diskLogLevel: diskLogLevel, + } + + go lggr.pollDiskSpace() + + closeLogger := sync.OnceValue(func() error { + defer defaultCloseFn() + defer diskCloseFn() + + close(lggr.pollDiskSpaceStop) + <-lggr.pollDiskSpaceDone + + return lggr.Sync() + }) + + return lggr, closeLogger, err +} diff --git a/core/logger/zap_test.go b/core/logger/zap_test.go new file mode 100644 index 00000000..4f24d07d --- /dev/null +++ b/core/logger/zap_test.go @@ -0,0 +1,269 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func newTestLogger(t *testing.T, cfg Config) Logger { + lggr, closeFn := cfg.New() + t.Cleanup(func() { + assert.NoError(t, closeFn()) + }) + return lggr +} + +func TestZapLogger_OutOfDiskSpace(t *testing.T) { + maxSize := utils.FileSize(5 * utils.MB) + + logsDir := t.TempDir() + tmpFile, err := os.CreateTemp(logsDir, "*") + assert.NoError(t, err) + defer func() { assert.NoError(t, tmpFile.Close()) }() + + var logFileSize utils.FileSize + err = logFileSize.UnmarshalText([]byte("100mb")) + assert.NoError(t, err) + + pollCfg := newDiskPollConfig(1 * time.Second) + + local := Config{ + Dir: logsDir, + FileMaxAgeDays: 0, + FileMaxBackups: 1, + FileMaxSizeMB: int(logFileSize / utils.MB), + + diskPollConfig: pollCfg, + testDiskLogLvlChan: make(chan zapcore.Level), + } + + t.Run("on logger creation", func(t *testing.T) { + pollChan := make(chan time.Time) + stop := func() { + close(pollChan) + } + + local.diskSpaceAvailableFn = func(path string) (utils.FileSize, error) { + assert.Equal(t, logsDir, path) + return maxSize, nil + } + local.diskPollConfig = zapDiskPollConfig{ + stop: stop, + pollChan: pollChan, + } + local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 + + lggr := newTestLogger(t, local) + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + lggr.Debug("trying to write to disk when the disk logs should not be created") + + logFile := local.LogsFile() + _, err = os.ReadFile(logFile) + + require.Error(t, err) + require.Contains(t, err.Error(), "no such file or directory") + }) + + t.Run("on logger creation generic error", func(t *testing.T) { + pollChan := make(chan time.Time) + stop := func() { + close(pollChan) + } + + local.diskSpaceAvailableFn = func(path string) (utils.FileSize, error) { + assert.Equal(t, logsDir, path) + return 0, nil + } + local.diskPollConfig = zapDiskPollConfig{ + stop: stop, + pollChan: pollChan, + } + local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 + + lggr := newTestLogger(t, local) + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + lggr.Debug("trying to write to disk when the disk logs should not be created - generic error") + + logFile := local.LogsFile() + _, err = os.ReadFile(logFile) + + require.Error(t, err) + require.Contains(t, err.Error(), "no such file or directory") + }) + + t.Run("after logger is created", func(t *testing.T) { + pollChan := make(chan time.Time) + stop := func() { + close(pollChan) + } + + available := maxSize * 10 + local.diskSpaceAvailableFn = func(path string) (utils.FileSize, error) { + assert.Equal(t, logsDir, path) + return available, nil + } + local.diskPollConfig = zapDiskPollConfig{ + stop: stop, + pollChan: pollChan, + } + local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 + + lggr := newTestLogger(t, local) + + lggr.Debug("writing to disk on test") + + available = maxSize + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + lggr.SetLogLevel(zapcore.WarnLevel) + lggr.Debug("writing to disk on test again") + lggr.Warn("writing to disk on test again") + + logFile := local.LogsFile() + b, err := os.ReadFile(logFile) + assert.NoError(t, err) + + logs := string(b) + lines := strings.Split(logs, "\n") + // the last line is a blank line, hence why using len(lines) - 2 makes sense + actualMessage := lines[len(lines)-2] + expectedMessage := fmt.Sprintf( + "Disk space is not enough to log into disk any longer, required disk space: %s, Available disk space: %s", + local.RequiredDiskSpace(), + maxSize, + ) + + require.Contains(t, actualMessage, expectedMessage) + }) + + t.Run("after logger is created, recovers disk space", func(t *testing.T) { + pollChan := make(chan time.Time) + stop := func() { + close(pollChan) + } + + available := maxSize * 10 + + local.diskSpaceAvailableFn = func(path string) (utils.FileSize, error) { + assert.Equal(t, logsDir, path) + return available, nil + } + local.diskPollConfig = zapDiskPollConfig{ + stop: stop, + pollChan: pollChan, + } + local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 + + lggr := newTestLogger(t, local) + + lggr.Debug("test") + + available = maxSize + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + available = maxSize * 12 + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + lggr.Debug("test again") + + logFile := local.LogsFile() + b, err := os.ReadFile(logFile) + assert.NoError(t, err) + + logs := string(b) + lines := strings.Split(logs, "\n") + expectedMessage := fmt.Sprintf( + "Disk space is not enough to log into disk any longer, required disk space: %s, Available disk space: %s", + local.RequiredDiskSpace(), + maxSize, + ) + + // the last line is a blank line, hence why using len(lines) - N makes sense + require.Contains(t, lines[len(lines)-4], expectedMessage) + require.Contains(t, lines[len(lines)-3], "Resuming disk logs, disk has enough space") + require.Contains(t, lines[len(lines)-2], "test again") + }) +} + +func TestZapLogger_LogCaller(t *testing.T) { + maxSize := utils.FileSize(5 * utils.MB) + + logsDir := t.TempDir() + tmpFile, err := os.CreateTemp(logsDir, "*") + assert.NoError(t, err) + defer func() { assert.NoError(t, tmpFile.Close()) }() + + var logFileSize utils.FileSize + err = logFileSize.UnmarshalText([]byte("100mb")) + assert.NoError(t, err) + + pollChan := make(chan time.Time) + stop := func() { + close(pollChan) + } + local := Config{ + Dir: logsDir, + FileMaxAgeDays: 1, + FileMaxBackups: 1, + FileMaxSizeMB: int(logFileSize / utils.MB), + + diskPollConfig: zapDiskPollConfig{ + stop: stop, + pollChan: pollChan, + }, + testDiskLogLvlChan: make(chan zapcore.Level), + } + + local.diskSpaceAvailableFn = func(path string) (utils.FileSize, error) { + assert.Equal(t, logsDir, path) + return maxSize * 10, nil + } + local.FileMaxSizeMB = int(maxSize/utils.MB) * 2 + + lggr := newTestLogger(t, local) + + lggr.Debug("test message with caller") + + pollChan <- time.Now() + <-local.testDiskLogLvlChan + + logFile := local.LogsFile() + b, err := os.ReadFile(logFile) + assert.NoError(t, err) + + logs := string(b) + lines := strings.Split(logs, "\n") + + require.Contains(t, lines[0], "logger/zap_test.go:246") +} + +func TestZapLogger_Name(t *testing.T) { + cfg := Config{} + lggr := newTestLogger(t, cfg) + require.Equal(t, "", lggr.Name()) + lggr1 := lggr.Named("Lggr1") + require.Equal(t, "Lggr1", lggr1.Name()) + lggr2 := lggr1.Named("Lggr2") + require.Equal(t, "Lggr1.Lggr2", lggr2.Name()) +} diff --git a/core/main.go b/core/main.go new file mode 100644 index 00000000..763e4a8f --- /dev/null +++ b/core/main.go @@ -0,0 +1,52 @@ +package core + +import ( + "fmt" + "log" + "os" + + "github.com/Masterminds/semver/v3" + + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/recovery" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +func init() { + // check version + if static.Version == static.Unset { + if !build.IsProd() { + return + } + log.Println(`Version was unset on production build. Plugin should be built with static.Version set to a valid semver for production builds.`) + } else if _, err := semver.NewVersion(static.Version); err != nil { + panic(fmt.Sprintf("Version invalid: %q is not valid semver", static.Version)) + } +} + +func Main() (code int) { + recovery.ReportPanics(func() { + app := cmd.NewApp(newProductionClient()) + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error running app: %v\n", err) + code = 1 + } + }) + return +} + +// newProductionClient configures an instance of the CLI to be used in production. +func newProductionClient() *cmd.Shell { + prompter := cmd.NewTerminalPrompter() + return &cmd.Shell{ + Renderer: cmd.RendererTable{Writer: os.Stdout}, + AppFactory: cmd.PluginAppFactory{}, + KeyStoreAuthenticator: cmd.TerminalKeyStoreAuthenticator{Prompter: prompter}, + FallbackAPIInitializer: cmd.NewPromptingAPIInitializer(prompter), + Runner: cmd.PluginRunner{}, + PromptingSessionRequestBuilder: cmd.NewPromptingSessionRequestBuilder(prompter), + ChangePasswordPrompter: cmd.NewChangePasswordPrompter(), + PasswordPrompter: cmd.NewPasswordPrompter(), + } +} diff --git a/core/null/int64.go b/core/null/int64.go new file mode 100644 index 00000000..ca236784 --- /dev/null +++ b/core/null/int64.go @@ -0,0 +1,158 @@ +package null + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" +) + +// Int64 encapsulates the value and validity (not null) of a int64 value, +// to differentiate nil from 0 in json and sql. +type Int64 struct { + Int64 int64 + Valid bool +} + +// NewInt64 returns an instance of Int64 with the passed parameters. +func NewInt64(i int64, valid bool) Int64 { + return Int64{ + Int64: i, + Valid: valid, + } +} + +// Int64From creates a new Int64 that will always be valid. +func Int64From(i int64) Int64 { + return NewInt64(i, true) +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will not be considered a null Int. +func (i *Int64) UnmarshalJSON(data []byte) error { + var err error + var v interface{} + if err = json.Unmarshal(data, &v); err != nil { + return err + } + switch x := v.(type) { + case float64: + // Unmarshal again, directly to value, to avoid intermediate float64 + err = json.Unmarshal(data, &i.Int64) + case string: + str := x + if len(str) == 0 { + i.Valid = false + return nil + } + i.Int64, err = parse64(str) + case nil: + i.Valid = false + return nil + default: + err = fmt.Errorf("json: cannot unmarshal %v into Go value of type null.Int64", reflect.TypeOf(v).Name()) + } + i.Valid = err == nil + return err +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Int64 if the input is a blank or not an integer. +// It will return an error if the input is not an integer, blank, or "null". +func (i *Int64) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + i.Valid = false + return nil + } + var err error + i.Int64, err = parse64(string(text)) + i.Valid = err == nil + return err +} + +func parse64(str string) (int64, error) { + v, err := strconv.ParseInt(str, 10, 64) + return v, err +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Int64 is null. +func (i Int64) MarshalJSON() ([]byte, error) { + if !i.Valid { + return []byte("null"), nil + } + return []byte(strconv.FormatInt(i.Int64, 10)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string if this Int64 is null. +func (i Int64) MarshalText() ([]byte, error) { + if !i.Valid { + return []byte{}, nil + } + return []byte(strconv.FormatInt(i.Int64, 10)), nil +} + +// SetValid changes this Int64's value and also sets it to be non-null. +func (i *Int64) SetValid(n int64) { + i.Int64 = n + i.Valid = true +} + +// Value returns this instance serialized for database storage. +func (i Int64) Value() (driver.Value, error) { + if !i.Valid { + return nil, nil + } + + // golang's sql driver types as determined by IsValue only supports: + // []byte, bool, float64, int64, string, time.Time + // https://golang.org/src/database/sql/driver/types.go + return i.Int64, nil +} + +// Scan reads the database value and returns an instance. +func (i *Int64) Scan(value interface{}) error { + if value == nil { + *i = Int64{} + return nil + } + + switch typed := value.(type) { + case int: + safe := int64(typed) + *i = Int64From(safe) + case int32: + safe := int64(typed) + *i = Int64From(safe) + case int64: + safe := typed + *i = Int64From(safe) + case uint: + if typed > uint(math.MaxInt64) { + return fmt.Errorf("unable to convert %v of %T to Int64; overflow", value, value) + } + safe := int64(typed) + *i = Int64From(safe) + case uint64: + if typed > uint64(math.MaxInt64) { + return fmt.Errorf("unable to convert %v of %T to Int64; overflow", value, value) + } + safe := int64(typed) + *i = Int64From(safe) + default: + return fmt.Errorf("unable to convert %v of %T to Int64", value, value) + } + return nil +} + +func (i Int64) Ptr() *int64 { + if i.Valid { + return &i.Int64 + } + return nil +} diff --git a/core/null/int64_test.go b/core/null/int64_test.go new file mode 100644 index 00000000..d22bb025 --- /dev/null +++ b/core/null/int64_test.go @@ -0,0 +1,204 @@ +package null_test + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +func TestInt64From(t *testing.T) { + tests := []struct { + input int64 + }{ + {12345}, + {0}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.input), func(t *testing.T) { + i := null.Int64From(test.input) + assert.True(t, i.Valid) + assert.Equal(t, test.input, i.Int64) + }) + } +} + +func TestUnmarshalInt64_Valid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"int json", `12345`}, + {"int string json", `"12345"`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Int64 + err := json.Unmarshal([]byte(test.input), &i) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + }) + } +} + +func TestUnmarshalInt64_Invalid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"blank json string", `""`}, + {"null json", `null`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Int64 + err := json.Unmarshal([]byte(test.input), &i) + require.NoError(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestUnmarshalInt64_Error(t *testing.T) { + tests := []struct { + name, input string + }{ + {"wrong type json", `true`}, + {"invalid json", `:)`}, + {"float", `1.2345`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Int64 + err := json.Unmarshal([]byte(test.input), &i) + require.Error(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestUnmarshalUint64Overflow(t *testing.T) { + // Max int64 should decode successfully + var i null.Int64 + err := json.Unmarshal([]byte(strconv.FormatInt(math.MaxInt64, 10)), &i) + require.NoError(t, err) + + // Attempt to overflow + err = json.Unmarshal([]byte(strconv.FormatUint(math.MaxUint64, 10)), &i) + require.Error(t, err) +} + +func TestTextUnmarshalInt64_Valid(t *testing.T) { + var i null.Int64 + err := i.UnmarshalText([]byte("12345")) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) +} + +func TestTextUnmarshalInt64_Invalid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"empty", ""}, + {"null", "null"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Int64 + err := i.UnmarshalText([]byte(test.input)) + require.NoError(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestMarshalInt64(t *testing.T) { + i := null.Int64From(12345) + data, err := json.Marshal(i) + require.NoError(t, err) + assertJSONEquals(t, data, "12345", "non-empty json marshal") + + // invalid values should be encoded as null + null := null.NewInt64(0, false) + data, err = json.Marshal(null) + require.NoError(t, err) + assertJSONEquals(t, data, "null", "null json marshal") +} + +func TestMarshalInt64Text(t *testing.T) { + i := null.Int64From(12345) + data, err := i.MarshalText() + require.NoError(t, err) + assertJSONEquals(t, data, "12345", "non-empty text marshal") + + // invalid values should be encoded as null + null := null.NewInt64(0, false) + data, err = null.MarshalText() + require.NoError(t, err) + assertJSONEquals(t, data, "", "null text marshal") +} + +func TestInt64SetValid(t *testing.T) { + change := null.NewInt64(0, false) + change.SetValid(12345) + assert.True(t, change.Valid) + assert.Equal(t, int64(12345), change.Int64) +} + +func TestInt64Scan(t *testing.T) { + var i null.Int64 + err := i.Scan(int(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + + err = i.Scan(int32(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + + err = i.Scan(int64(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + + err = i.Scan(math.MaxInt64) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(math.MaxInt64), i.Int64) + + err = i.Scan(uint(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + + err = i.Scan(uint64(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, int64(12345), i.Int64) + + // uint64 overflows int64 + overflowingUint64 := uint64(math.MaxInt64) + 1 + err = i.Scan(overflowingUint64) + require.Error(t, err) + + // uint overflows int64 + overflowingUint := uint(math.MaxInt64) + 1 + err = i.Scan(overflowingUint) + require.Error(t, err) + + err = i.Scan(nil) + require.NoError(t, err) + assert.False(t, i.Valid) +} diff --git a/core/null/uint32.go b/core/null/uint32.go new file mode 100644 index 00000000..1d4fbc15 --- /dev/null +++ b/core/null/uint32.go @@ -0,0 +1,150 @@ +package null + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// Uint32 encapsulates the value and validity (not null) of a uint32 value, +// to differentiate nil from 0 in json and sql. +type Uint32 struct { + Uint32 uint32 + Valid bool +} + +// NewUint32 returns an instance of Uint32 with the passed parameters. +func NewUint32(i uint32, valid bool) Uint32 { + return Uint32{ + Uint32: i, + Valid: valid, + } +} + +// Uint32From creates a new Uint32 that will always be valid. +func Uint32From(i uint32) Uint32 { + return NewUint32(i, true) +} + +// UnmarshalJSON implements json.Unmarshaler. +// It supports number and null input. +// 0 will not be considered a null Int. +func (i *Uint32) UnmarshalJSON(data []byte) error { + var err error + var v interface{} + if err = json.Unmarshal(data, &v); err != nil { + return err + } + switch x := v.(type) { + case float64: + // Unmarshal again, directly to value, to avoid intermediate float64 + err = json.Unmarshal(data, &i.Uint32) + case string: + str := x + if len(str) == 0 { + i.Valid = false + return nil + } + i.Uint32, err = parse(str) + case nil: + i.Valid = false + return nil + default: + err = fmt.Errorf("json: cannot unmarshal %v into Go value of type null.Uint32", reflect.TypeOf(v).Name()) + } + i.Valid = err == nil + return err +} + +// UnmarshalText implements encoding.TextUnmarshaler. +// It will unmarshal to a null Uint32 if the input is a blank or not an integer. +// It will return an error if the input is not an integer, blank, or "null". +func (i *Uint32) UnmarshalText(text []byte) error { + str := string(text) + if str == "" || str == "null" { + i.Valid = false + return nil + } + var err error + i.Uint32, err = parse(string(text)) + i.Valid = err == nil + return err +} + +func parse(str string) (uint32, error) { + v, err := strconv.ParseUint(str, 10, 32) + return uint32(v), err +} + +// MarshalJSON implements json.Marshaler. +// It will encode null if this Uint32 is null. +func (i Uint32) MarshalJSON() ([]byte, error) { + if !i.Valid { + return []byte("null"), nil + } + return []byte(strconv.FormatUint(uint64(i.Uint32), 10)), nil +} + +// MarshalText implements encoding.TextMarshaler. +// It will encode a blank string if this Uint32 is null. +func (i Uint32) MarshalText() ([]byte, error) { + if !i.Valid { + return []byte{}, nil + } + return []byte(strconv.FormatUint(uint64(i.Uint32), 10)), nil +} + +// SetValid changes this Uint32's value and also sets it to be non-null. +func (i *Uint32) SetValid(n uint32) { + i.Uint32 = n + i.Valid = true +} + +// Value returns this instance serialized for database storage. +func (i Uint32) Value() (driver.Value, error) { + if !i.Valid { + return nil, nil + } + + // golang's sql driver types as determined by IsValue only supports: + // []byte, bool, float64, int64, string, time.Time + // https://golang.org/src/database/sql/driver/types.go + return int64(i.Uint32), nil +} + +// Scan reads the database value and returns an instance. +func (i *Uint32) Scan(value interface{}) error { + if value == nil { + *i = Uint32{} + return nil + } + + switch typed := value.(type) { + case int: + safe := uint32(typed) + if int(safe) != typed { + return fmt.Errorf("unable to convert %v of %T to Uint32; overflow", value, value) + } + *i = Uint32From(safe) + case int64: + safe := uint32(typed) + if int64(safe) != typed { + return fmt.Errorf("unable to convert %v of %T to Uint32; overflow", value, value) + } + *i = Uint32From(safe) + case uint: + safe := uint32(typed) + if uint(safe) != typed { + return fmt.Errorf("unable to convert %v of %T to Uint32; overflow", value, value) + } + *i = Uint32From(safe) + case uint32: + safe := typed + *i = Uint32From(safe) + default: + return fmt.Errorf("unable to convert %v of %T to Uint32", value, value) + } + return nil +} diff --git a/core/null/uint32_test.go b/core/null/uint32_test.go new file mode 100644 index 00000000..08b3b8ed --- /dev/null +++ b/core/null/uint32_test.go @@ -0,0 +1,200 @@ +package null_test + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +func TestUint32From(t *testing.T) { + tests := []struct { + input uint32 + }{ + {12345}, + {0}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.input), func(t *testing.T) { + i := null.Uint32From(test.input) + assert.True(t, i.Valid) + assert.Equal(t, test.input, i.Uint32) + }) + } +} + +func TestUnmarshalUint32_Valid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"int json", `12345`}, + {"int string json", `"12345"`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Uint32 + err := json.Unmarshal([]byte(test.input), &i) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) + }) + } +} + +func TestUnmarshalUint32_Invalid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"blank json string", `""`}, + {"null json", `null`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Uint32 + err := json.Unmarshal([]byte(test.input), &i) + require.NoError(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestUnmarshalUint32_Error(t *testing.T) { + tests := []struct { + name, input string + }{ + {"wrong type json", `true`}, + {"invalid json", `:)`}, + {"float", `1.2345`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Uint32 + err := json.Unmarshal([]byte(test.input), &i) + require.Error(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestUnmarshalUint32Overflow(t *testing.T) { + maxUint32 := uint64(math.MaxUint32) + + // Max uint32 should decode successfully + var i null.Uint32 + err := json.Unmarshal([]byte(strconv.FormatUint(maxUint32, 10)), &i) + require.NoError(t, err) + + // Attempt to overflow + err = json.Unmarshal([]byte(strconv.FormatUint(maxUint32+1, 10)), &i) + require.Error(t, err) +} + +func TestTextUnmarshalInt_Valid(t *testing.T) { + var i null.Uint32 + err := i.UnmarshalText([]byte("12345")) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) +} + +func TestTextUnmarshalInt_Invalid(t *testing.T) { + tests := []struct { + name, input string + }{ + {"empty", ""}, + {"null", "null"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i null.Uint32 + err := i.UnmarshalText([]byte(test.input)) + require.NoError(t, err) + assert.False(t, i.Valid) + }) + } +} + +func TestMarshalInt(t *testing.T) { + i := null.Uint32From(12345) + data, err := json.Marshal(i) + require.NoError(t, err) + assertJSONEquals(t, data, "12345", "non-empty json marshal") + + // invalid values should be encoded as null + null := null.NewUint32(0, false) + data, err = json.Marshal(null) + require.NoError(t, err) + assertJSONEquals(t, data, "null", "null json marshal") +} + +func TestMarshalIntText(t *testing.T) { + i := null.Uint32From(12345) + data, err := i.MarshalText() + require.NoError(t, err) + assertJSONEquals(t, data, "12345", "non-empty text marshal") + + // invalid values should be encoded as null + null := null.NewUint32(0, false) + data, err = null.MarshalText() + require.NoError(t, err) + assertJSONEquals(t, data, "", "null text marshal") +} + +func TestUint32SetValid(t *testing.T) { + change := null.NewUint32(0, false) + change.SetValid(12345) + assert.True(t, change.Valid) + assert.Equal(t, uint32(12345), change.Uint32) +} + +func TestUint32Scan(t *testing.T) { + var i null.Uint32 + err := i.Scan(12345) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) + + err = i.Scan(int64(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) + + // int64 overflows uint32 + err = i.Scan(int64(math.MaxInt64)) + require.Error(t, err) + + err = i.Scan(uint(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) + + // uint overflows uint32 + err = i.Scan(uint(math.MaxUint64)) + require.Error(t, err) + + err = i.Scan(uint32(12345)) + require.NoError(t, err) + assert.True(t, i.Valid) + assert.Equal(t, uint32(12345), i.Uint32) + + err = i.Scan(nil) + require.NoError(t, err) + assert.False(t, i.Valid) +} + +func assertJSONEquals(t *testing.T, data []byte, cmp string, from string) { + if string(data) != cmp { + t.Errorf("bad %s data: %s ≠ %s\n", from, data, cmp) + } +} diff --git a/core/plugin.Dockerfile b/core/plugin.Dockerfile new file mode 100644 index 00000000..400a4ab2 --- /dev/null +++ b/core/plugin.Dockerfile @@ -0,0 +1,73 @@ +# Build image: Plugin binary +FROM golang:1.21-bullseye as buildgo +RUN go version +WORKDIR /plugin + +COPY GNUmakefile VERSION ./ +COPY tools/bin/ldflags ./tools/bin/ + +ADD go.mod go.sum ./ +RUN go mod download + +# Env vars needed for plugin build +ARG COMMIT_SHA + +COPY . . + +# Build the golang binary +RUN make install-plugin + +# Link LOOP Plugin source dirs with simple names +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-feeds | xargs -I % ln -s % /plugin-feeds +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-solana | xargs -I % ln -s % /plugin-solana + +# Build image: Plugins +FROM golang:1.21-bullseye as buildplugins +RUN go version + +WORKDIR /plugin-feeds +COPY --from=buildgo /plugin-feeds . +RUN go install ./cmd/plugin-feeds + +WORKDIR /plugin-solana +COPY --from=buildgo /plugin-solana . +RUN go install ./pkg/solana/cmd/plugin-solana + +# Final image: ubuntu with plugin binary +FROM ubuntu:20.04 + +ARG PLUGIN_USER=root +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y ca-certificates gnupg lsb-release curl + +# Install Postgres for CLI tools, needed specifically for DB backups +RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |tee /etc/apt/sources.list.d/pgdg.list \ + && apt-get update && apt-get install -y postgresql-client-15 \ + && apt-get clean all + +COPY --from=buildgo /go/bin/plugin /usr/local/bin/ + +# Install (but don't enable) LOOP Plugins +COPY --from=buildplugins /go/bin/plugin-feeds /usr/local/bin/ +COPY --from=buildplugins /go/bin/plugin-solana /usr/local/bin/ + +# Dependency of CosmWasm/wasmd +COPY --from=buildgo /go/pkg/mod/github.com/\!cosm\!wasm/wasmvm@v*/internal/api/libwasmvm.*.so /usr/lib/ +RUN chmod 755 /usr/lib/libwasmvm.*.so + +RUN if [ ${PLUGIN_USER} != root ]; then \ + useradd --uid 14933 --create-home ${PLUGIN_USER}; \ + fi +USER ${PLUGIN_USER} +WORKDIR /home/${PLUGIN_USER} +# explicit set the cache dir. needed so both root and non-root user has an explicit location +ENV XDG_CACHE_HOME /home/${PLUGIN_USER}/.cache +RUN mkdir -p ${XDG_CACHE_HOME} + +EXPOSE 6688 +ENTRYPOINT ["plugin"] + +HEALTHCHECK CMD curl -f http://localhost:6688/health || exit 1 + +CMD ["local", "node"] diff --git a/core/plugin.devspace.Dockerfile b/core/plugin.devspace.Dockerfile new file mode 100644 index 00000000..fa6ed766 --- /dev/null +++ b/core/plugin.devspace.Dockerfile @@ -0,0 +1,73 @@ +# Build image: Plugin binary +FROM golang:1.21-bullseye AS buildgo +RUN go version +WORKDIR /plugin + +COPY GNUmakefile VERSION ./ +COPY tools/bin/ldflags ./tools/bin/ + +ADD go.mod go.sum ./ +RUN go mod download + +# Env vars needed for plugin build +ARG COMMIT_SHA + +COPY . . + +# Build the golang binary +RUN make install-plugin + +# Link LOOP Plugin source dirs with simple names +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-feeds | xargs -I % ln -s % /plugin-feeds +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-solana | xargs -I % ln -s % /plugin-solana + +# Build image: Plugins +FROM golang:1.21-bullseye AS buildplugins +RUN go version + +WORKDIR /plugin-feeds +COPY --from=buildgo /plugin-feeds . +RUN go install ./cmd/plugin-feeds + +WORKDIR /plugin-solana +COPY --from=buildgo /plugin-solana . +RUN go install ./pkg/solana/cmd/plugin-solana + +# Final image: ubuntu with plugin binary +FROM --platform=linux/amd64 golang:1.21-bullseye + +ARG PLUGIN_USER=plugin +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y ca-certificates gnupg lsb-release curl + +# Install Postgres for CLI tools, needed specifically for DB backups +RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |tee /etc/apt/sources.list.d/pgdg.list \ + && apt-get update && apt-get install -y postgresql-client-15 \ + && apt-get clean all + +COPY --from=buildgo /go/bin/plugin /usr/local/bin/ + +# Install (but don't enable) LOOP Plugins +COPY --from=buildplugins /go/bin/plugin-feeds /usr/local/bin/ +COPY --from=buildplugins /go/bin/plugin-solana /usr/local/bin/ + +# Dependency of CosmWasm/wasmd +COPY --from=buildgo /go/pkg/mod/github.com/\!cosm\!wasm/wasmvm@v*/internal/api/libwasmvm.*.so /usr/lib/ +RUN chmod 755 /usr/lib/libwasmvm.*.so + +RUN if [ ${PLUGIN_USER} != root ]; then \ + useradd --uid 14933 --create-home ${PLUGIN_USER}; \ + fi +USER ${PLUGIN_USER} +WORKDIR /home/${PLUGIN_USER} +# explicit set the cache dir. needed so both root and non-root user has an explicit location +ENV XDG_CACHE_HOME /home/${PLUGIN_USER}/.cache +RUN mkdir -p ${XDG_CACHE_HOME} + +EXPOSE 6688 +ENTRYPOINT ["plugin"] + +HEALTHCHECK CMD curl -f http://localhost:6688/health || exit 1 + +CMD ["local", "node"] diff --git a/core/plugin.goreleaser.Dockerfile b/core/plugin.goreleaser.Dockerfile new file mode 100644 index 00000000..7638db3a --- /dev/null +++ b/core/plugin.goreleaser.Dockerfile @@ -0,0 +1,35 @@ +# This will replace plugin.Dockerfile once all builds are migrated to goreleaser + +# Final image: ubuntu with plugin binary +FROM ubuntu:20.04 + +ARG PLUGIN_USER=root +ARG TARGETARCH +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y ca-certificates gnupg lsb-release curl + +# Install Postgres for CLI tools, needed specifically for DB backups +RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |tee /etc/apt/sources.list.d/pgdg.list \ + && apt-get update && apt-get install -y postgresql-client-15 \ + && apt-get clean all + +COPY . /usr/local/bin/ +# Copy native libs if cgo is enabled +COPY ./tmp/linux_${TARGETARCH}/libs /usr/local/bin/libs + +RUN if [ ${PLUGIN_USER} != root ]; then \ + useradd --uid 14933 --create-home ${PLUGIN_USER}; \ + fi +USER ${PLUGIN_USER} +WORKDIR /home/${PLUGIN_USER} +# explicit set the cache dir. needed so both root and non-root user has an explicit location +ENV XDG_CACHE_HOME /home/${PLUGIN_USER}/.cache +RUN mkdir -p ${XDG_CACHE_HOME} + +EXPOSE 6688 +ENTRYPOINT ["plugin"] + +HEALTHCHECK CMD curl -f http://localhost:6688/health || exit 1 + +CMD ["local", "node"] diff --git a/core/recovery/recover.go b/core/recovery/recover.go new file mode 100644 index 00000000..eaf6307c --- /dev/null +++ b/core/recovery/recover.go @@ -0,0 +1,41 @@ +package recovery + +import ( + "github.com/getsentry/sentry-go" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func ReportPanics(fn func()) { + defer func() { + if err := recover(); err != nil { + sentry.CurrentHub().Recover(err) + sentry.Flush(logger.SentryFlushDeadline) + + panic(err) + } + }() + fn() +} + +func WrapRecover(lggr logger.Logger, fn func()) { + defer func() { + if err := recover(); err != nil { + lggr.Recover(err) + } + }() + fn() +} + +func WrapRecoverHandle(lggr logger.Logger, fn func(), onPanic func(interface{})) { + defer func() { + if err := recover(); err != nil { + lggr.Recover(err) + + if onPanic != nil { + onPanic(err) + } + } + }() + fn() +} diff --git a/core/scripts/chaincli/.gitignore b/core/scripts/chaincli/.gitignore new file mode 100644 index 00000000..63400aa2 --- /dev/null +++ b/core/scripts/chaincli/.gitignore @@ -0,0 +1,2 @@ +*.csv +*.txt \ No newline at end of file diff --git a/core/scripts/chaincli/DEBUGGING.md b/core/scripts/chaincli/DEBUGGING.md new file mode 100644 index 00000000..092f7a30 --- /dev/null +++ b/core/scripts/chaincli/DEBUGGING.md @@ -0,0 +1,95 @@ +## Automation Debugging Script + +### Context + +The debugging script is a tool within ChainCLI designed to facilitate the debugging of upkeeps in Automation v21, covering both conditional and log-based scenarios. + +### Setup + +Before starting, you will need: +1. Git clone this plugin [repo](https://github.com/goplugin/pluginv3.0) +2. A working [Go](https://go.dev/doc/install) installation +2. Change directory to `core/scripts/chaincli` and create a `.env` file based on the example `.env.debugging.example` + +### Configuration in `.env` File + +#### Mandatory Fields + +Ensure the following fields are provided in your `.env` file: + +- `NODE_URL`: Archival node URL +- `KEEPER_REGISTRY_ADDRESS`: Address of the Keeper Registry contract. Refer to the [Supported Networks](https://docs.chain.link/plugin-automation/overview/supported-networks#configurations) doc for addresses. + +#### Optional Fields (Streams Lookup) + +If your targeted upkeep involves streams lookup, please provide the following details. If you are using Data Streams v0.3 (which is likely), only provide the DATA_STREAMS_URL. The DATA_STREAMS_LEGACY_URL is specifically for Data Streams v0.2. + +- `DATA_STREAMS_ID` +- `DATA_STREAMS_KEY` +- `DATA_STREAMS_LEGACY_URL` +- `DATA_STREAMS_URL` + +#### Optional Fields (Tenderly Integration) + +For detailed transaction simulation logs, set up Tenderly credentials. Refer to the [Tenderly Documentation](https://docs.tenderly.co/other/platform-access/how-to-generate-api-access-tokens) for creating an API key, account name, and project name. + +- `TENDERLY_KEY` +- `TENDERLY_ACCOUNT_NAME` +- `TENDERLY_PROJECT_NAME` + +### Usage + +Execute the following command based on your upkeep type: + +- For conditional upkeep, if a block number is given we use that block, otherwise we use the latest block: + + ```bash + go run main.go keeper debug UPKEEP_ID [OPTIONAL BLOCK_NUMBER] + ``` + +- For log trigger upkeep: + + ```bash + go run main.go keeper debug UPKEEP_ID TX_HASH LOG_INDEX + ``` + +### Checks Performed by the Debugging Script + +1. **Fetch and Sanity Check Upkeep:** + - Verify upkeep status: active, paused, or canceled + - Check upkeep balance + +2. **For Conditional Upkeep:** + - Check conditional upkeep + - Simulate `performUpkeep` + +3. **For Log Trigger Upkeep:** + - Check if the upkeep has already run for log-trigger-based upkeep + - Verify if log matches trigger configuration + - Check upkeep + - If check result indicates a streams lookup is required (TargetCheckReverted): + - Verify if the upkeep is allowed to use Mercury + - Execute Mercury request + - Execute check callback + + - Simulate `performUpkeep` + +### Examples +- Eligible and log trigger based and using mercury lookup v0.3: + + ```bash + go run main.go keeper debug 5591498142036749453487419299781783197030971023186134955311257372668222176389 0xdc6d0e547a5aa85fefa5b0f3a37e3493eafb5aeba8b5f3071ce53c9e9a539e9c 0 + ``` + +- Ineligible and conditional upkeep: + + ```bash + go run main.go keeper debug 52635131310730056105456985154251306793887717546629785340977553840883117540096 + ``` + +- Ineligible and Log does not match trigger config: + + ```bash + go run main.go keeper debug 5591498142036749453487419299781783197030971023186134955311257372668222176389 0xc0686ae85d2a7a976ef46df6c613517b9fd46f23340ac583be4e44f5c8b7a186 1 + ``` +--- \ No newline at end of file diff --git a/core/scripts/chaincli/README.md b/core/scripts/chaincli/README.md new file mode 100644 index 00000000..c9d5e4a4 --- /dev/null +++ b/core/scripts/chaincli/README.md @@ -0,0 +1,126 @@ +## Setup + +Before starting, you will need: +1. A working [Go](https://go.dev/doc/install) installation +2. EVM chain endpoint URLs + - The endpoint can be a local node, or an externally hosted node, e.g. [alchemy](alchemy.com) or [infura](infura.io) + - Both the HTTPS and WSS URLs of your endpoint are needed +3. The chain ID corresponding to your chain, you can find the chain ID for your chosen chain [here](https://chainlist.org/) +4. The private key of an account funded with PLI, and the chain's native token (to pay transaction fees) + - Steps for exporting your private key from Metamask can be found [here](https://metamask.zendesk.com/hc/en-us/articles/360015289632-How-to-Export-an-Account-Private-Key) +5. The PLI address, PLI-ETH feed address, fast gas feed address for your chain +6. Install [docker](https://docs.docker.com/get-docker/) for CLI and GUI (optional) +7. \[Optional\] get a [tenderly API key](https://docs.tenderly.co/other/platform-access/how-to-generate-api-access-tokens) and find your [username / project name](https://docs.tenderly.co/other/platform-access/how-to-find-the-project-slug-username-and-organization-name). + +The example .env in this repo is for the Polygon Mumbai testnet. You can use [this faucet](https://faucets.chain.link/mumbai) to send testnet PLI +to your wallet ahead of executing the next steps + +>Note: Be careful with your key. When using testnets, it's best to use a separate account that does not hold real funds. + +## Run OCR2Keepers locally + +Build a local copy of the plugin docker image by running this command in the root directory of the plugin repo: + +```bash +docker build -t plugin:local -f ./core/plugin.Dockerfile . +``` + +Next, from the root directory again, `cd` into the chaincli directory: + +```shell +cd core/scripts/chaincli +``` + +Build `chaincli` by running the following command: + +```shell +go build +``` + +Create the `.env` file based on the example `.env.example`, adding the node endpoint URLs and the private key of your wallet + +### Keeper Registry +Next, use chaincli to deploy the registry: + +Example: +```shell +./chaincli keeper registry deploy +``` + +Other options include: +- `./chaincli keeper registry update`: update existing keeper registry +- `./chaincli keeper registry withdraw`: cancel upkeeps and withdraw funds from registry +- `./chaincli keeper registry verify `: verify keeper registry contract + +As the `keeper registry deploy` command executes, _two_ address are written to the terminal: + +- KeeperRegistry2.0 Logic _(can be ignored)_ +- KeeperRegistry2.0 + +The second address, `KeeperRegistry2.0` is the address you need; in the `.env` file, set `KEEPER_REGISTRY_ADDRESS` variable to the `KeeperRegistry2.0` address. + +Note that this command doesn't run contract verification by default. If you want to run verification (eth, op and arb supported), config your .env and add the `--verify=true` flag in command. + +If you already have keeper registry contract deployed and want to run only contract verification, you can use the following command: + +```shell +./chaincli keeper registry verify +``` + +### Bootstrap Nodes +Run the following `bootstrap` command to start bootstrap nodes: + +Example: +```shell +./chaincli bootstrap +``` + +Other options include: +- `--ui-port`: default `5688`, the Plugin node UI listen port +- `--p2pv2-port`: default `8000`, the Plugin node P2P listen port +- `--force | -f`: default `false`, if existing containers should be forcefully removed + +The output of this command will show the tcp address of the deployed bootstrap node in the following format: `@bootstrap:8000`. +Copy this entire string, including the `@bootstrap:8000` suffix, and the set the `BOOTSTRAP_NODE_ADDR` variable to this address in the `.env` file. + +### Keeper launch and test +Once the bootstrap node is running, run the following command to launch the ocr2keeper nodes: + +Example: +```shell +./chaincli keeper launch-and-test +``` + +Other options include: +- `--withdraw | -w`: default `true`, if funds should be withdrawn and upkeeps should be canceled after the test +- `--export-logs | -l`: default `false`, if container logs should be exported to ./ directory +- `--force | -f`: default `false`, if existing containers should be forcefully removed + +You can also combine the `bootstrap` and `launch-and-test` commands into a single command: + +```shell +./chaincli keeper launch-and-test --bootstrap +``` +In the output of this command, you will see the http address of the nodes, e.g. `http://localhost:6688`. This is the Plugin Operator GUI. You can use the default username `notreal@fakeemail.ch` and password `fj293fbBnlQ!f9vNs~#` to log in. + +### Logs +Now that the nodes are running, you can use the `logs` subcommand to stream the output of the containers to your local terminal: + +Example: +```shell +./chaincli keeper logs +``` + +Other options include: +- `--container-pattern`: default `^/keeper-\d+$`, regex pattern of container names to listen to for logs +- `--grep [string terms]`: default `empty string`, comma separated list of terms logs must include +- `--grepv [string terms]`: default `empty string`, comma separated list of terms logs must not include + + +You can use the `grep` and `grepv` flags to filter log lines, e.g. to only show output of the ocr2keepers plugin across the nodes, run: + +```shell +./chaincli keeper logs --grep keepers-plugin +``` + +--- \ No newline at end of file diff --git a/core/scripts/chaincli/command/bootstrap.go b/core/scripts/chaincli/command/bootstrap.go new file mode 100644 index 00000000..7d7ee16f --- /dev/null +++ b/core/scripts/chaincli/command/bootstrap.go @@ -0,0 +1,44 @@ +package command + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// BootstrapNodeCmd launches a plugin node with a bootstrap job +var BootstrapNodeCmd = &cobra.Command{ + Use: "bootstrap [ui-port] [p2pv2-port]", + Short: "Setup a bootstrap node.", + Long: `This commands launches a plugin node inside the docker container and sets up the bootstrap job`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + baseHandler := handler.NewBaseHandler(cfg) + + force, err := cmd.Flags().GetBool("force") + if err != nil { + log.Fatal("failed to get force flag: ", err) + } + + uiPort, err := cmd.Flags().GetInt("ui-port") + if err != nil { + log.Fatal("failed to get ui-port flag: ", err) + } + + p2pv2Port, err := cmd.Flags().GetInt("p2pv2-port") + if err != nil { + log.Fatal("failed to get p2pv2-port flag: ", err) + } + + baseHandler.StartBootstrapNode(cmd.Context(), cfg.RegistryAddress, uiPort, p2pv2Port, force) + }, +} + +func init() { + BootstrapNodeCmd.Flags().BoolP("force", "f", false, "Specify if existing containers should be forcefully removed") + BootstrapNodeCmd.Flags().Int("ui-port", 5688, "Plugin node UI listen port") + BootstrapNodeCmd.Flags().Int("p2pv2-port", 8000, "Plugin node P2P listen port") +} diff --git a/core/scripts/chaincli/command/keeper/debug.go b/core/scripts/chaincli/command/keeper/debug.go new file mode 100644 index 00000000..d41814bf --- /dev/null +++ b/core/scripts/chaincli/command/keeper/debug.go @@ -0,0 +1,20 @@ +package keeper + +import ( + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// jobCmd represents the command to run the service +var debugCmd = &cobra.Command{ + Use: "debug", + Short: "Debug an upkeep", + Long: `This command debugs an upkeep on the povided registry to figure out why it is not performing`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + hdlr.Debug(cmd.Context(), args) + }, +} diff --git a/core/scripts/chaincli/command/keeper/deploy.go b/core/scripts/chaincli/command/keeper/deploy.go new file mode 100644 index 00000000..242d29ef --- /dev/null +++ b/core/scripts/chaincli/command/keeper/deploy.go @@ -0,0 +1,26 @@ +package keeper + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// deployCmd represents the command to run the service +var deployCmd = &cobra.Command{ + Use: "deploy", + Short: "Deploy keepers", + Long: `This command deploys keepers (keeper registry + upkeeps).`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + if err := cfg.Validate(); err != nil { + log.Fatal(err) + } + + hdlr := handler.NewKeeper(cfg) + hdlr.DeployKeepers(cmd.Context()) + }, +} diff --git a/core/scripts/chaincli/command/keeper/jobs.go b/core/scripts/chaincli/command/keeper/jobs.go new file mode 100644 index 00000000..65f4b1eb --- /dev/null +++ b/core/scripts/chaincli/command/keeper/jobs.go @@ -0,0 +1,20 @@ +package keeper + +import ( + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// jobCmd represents the command to run the service +var jobCmd = &cobra.Command{ + Use: "jobs", + Short: "Add job to keeper nodes", + Long: `This command creates a job on keepers.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + hdlr.CreateJob(cmd.Context()) + }, +} diff --git a/core/scripts/chaincli/command/keeper/launch.go b/core/scripts/chaincli/command/keeper/launch.go new file mode 100644 index 00000000..99d2da6b --- /dev/null +++ b/core/scripts/chaincli/command/keeper/launch.go @@ -0,0 +1,53 @@ +package keeper + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +var launchAndTestCmd = &cobra.Command{ + Use: "launch-and-test", + Short: "Launches keepers and starts performing", + Long: `This command launches plugin nodes, keeper setup and starts performing upkeeps.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + if err := cfg.Validate(); err != nil { + log.Fatal(err) + } + + hdlr := handler.NewKeeper(cfg) + + withdraw, err := cmd.Flags().GetBool("withdraw") + if err != nil { + log.Fatal("failed to get withdraw flag: ", err) + } + + bootstrap, err := cmd.Flags().GetBool("bootstrap") + if err != nil { + log.Fatal("failed to get bootstrap flag: ", err) + } + + printLogs, err := cmd.Flags().GetBool("export-logs") + if err != nil { + log.Fatal("failed to get export-logs flag: ", err) + } + + force, err := cmd.Flags().GetBool("force") + if err != nil { + log.Fatal("failed to get force flag: ", err) + } + + hdlr.LaunchAndTest(cmd.Context(), withdraw, printLogs, force, bootstrap) + }, +} + +func init() { + launchAndTestCmd.Flags().BoolP("withdraw", "w", true, "Specify if funds should be withdrawn and upkeeps should be canceled") + launchAndTestCmd.Flags().BoolP("bootstrap", "b", false, "Specify if launching bootstrap node is required. Default listen ports(5688, 8000) are used, if you need to use custom ports, please use bootstrap command") + launchAndTestCmd.Flags().BoolP("export-logs", "l", false, "Specify if container logs should be exported to ./") + launchAndTestCmd.Flags().BoolP("force", "f", false, "Specify if existing containers should be forcefully removed ./") +} diff --git a/core/scripts/chaincli/command/keeper/logs.go b/core/scripts/chaincli/command/keeper/logs.go new file mode 100644 index 00000000..aeb11a4b --- /dev/null +++ b/core/scripts/chaincli/command/keeper/logs.go @@ -0,0 +1,39 @@ +package keeper + +import ( + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +var logsCmd = &cobra.Command{ + Use: "logs", + Short: "Print the logs of your keeper nodes", + Long: `This command prints the logs of all keeper nodes.`, + + Run: func(cmd *cobra.Command, args []string) { + containerPattern, err := cmd.Flags().GetString("container-pattern") + if err != nil { + panic(err) + } + grep, err := cmd.Flags().GetStringSlice("grep") + if err != nil { + panic(err) + } + grepv, err := cmd.Flags().GetStringSlice("grepv") + if err != nil { + panic(err) + } + cfg := config.New() + keeper := handler.NewKeeper(cfg) + + keeper.PrintLogs(cmd.Context(), containerPattern, grep, grepv) + }, +} + +func init() { + logsCmd.Flags().String("container-pattern", `^/keeper-\d+$`, "Regex pattern of container names to listen to for logs") + logsCmd.Flags().StringSlice("grep", []string{}, "comma separated list of terms logs must include") + logsCmd.Flags().StringSlice("grepv", []string{}, "comma separated list of terms logs must not include") +} diff --git a/core/scripts/chaincli/command/keeper/registry.go b/core/scripts/chaincli/command/keeper/registry.go new file mode 100644 index 00000000..af6297a7 --- /dev/null +++ b/core/scripts/chaincli/command/keeper/registry.go @@ -0,0 +1,78 @@ +package keeper + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +var registryCmd = &cobra.Command{ + Use: "registry", + Short: "Keeper registry management", + Long: `This command provides an interface to manage keeper registry.`, +} + +var deployRegistryCmd = &cobra.Command{ + Use: "deploy", + Short: "Deploy keeper registry", + Long: `This command deploys a new keeper registry.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + + verify, err := cmd.Flags().GetBool("verify") + if err != nil { + log.Fatal("failed to get verify flag: ", err) + } + + hdlr.DeployRegistry(cmd.Context(), verify) + }, +} + +var verifyRegistryCmd = &cobra.Command{ + Use: "verify", + Short: "Verify keeper registry", + Long: `This command verifys a keeper registry.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + + hdlr.VerifyContract(args...) + }, +} + +var updateRegistryCmd = &cobra.Command{ + Use: "update", + Short: "Update keeper registry", + Long: `This command updates existing keeper registry.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + + hdlr.UpdateRegistry(cmd.Context()) + }, +} + +var withdrawFromRegistryCmd = &cobra.Command{ + Use: "withdraw", + Short: "cancel upkeeps and withdraw funds from registry", + Long: `This command will cancel all registered upkeeps and withdraw the funds left. args = Registry address`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + + hdlr.Withdraw(cmd.Context(), args[0]) + }, +} + +func init() { + deployRegistryCmd.Flags().BoolP("verify", "v", false, "Specify if contracts should be verified on Etherscan") + registryCmd.AddCommand(deployRegistryCmd) + registryCmd.AddCommand(verifyRegistryCmd) + registryCmd.AddCommand(updateRegistryCmd) + registryCmd.AddCommand(withdrawFromRegistryCmd) +} diff --git a/core/scripts/chaincli/command/keeper/root.go b/core/scripts/chaincli/command/keeper/root.go new file mode 100644 index 00000000..e00052e3 --- /dev/null +++ b/core/scripts/chaincli/command/keeper/root.go @@ -0,0 +1,27 @@ +package keeper + +import ( + "github.com/spf13/cobra" +) + +// RootCmd represents the root keeper sub-command to manage keepers +var RootCmd = &cobra.Command{ + Use: "keeper", + Short: "Manage keepers", + Long: `This command represents a CLI interface to manage keepers.`, +} + +func init() { + RootCmd.AddCommand(deployCmd) + RootCmd.AddCommand(debugCmd) + RootCmd.AddCommand(jobCmd) + RootCmd.AddCommand(logsCmd) + RootCmd.AddCommand(registryCmd) + RootCmd.AddCommand(launchAndTestCmd) + RootCmd.AddCommand(upkeepEventsCmd) + RootCmd.AddCommand(upkeepHistoryCmd) + RootCmd.AddCommand(ocr2UpkeepReportHistoryCmd) + RootCmd.AddCommand(ocr2UpdateConfigCmd) + RootCmd.AddCommand(scrapeNodes) + RootCmd.AddCommand(verifiableLoad) +} diff --git a/core/scripts/chaincli/command/keeper/scrape_node_config.go b/core/scripts/chaincli/command/keeper/scrape_node_config.go new file mode 100644 index 00000000..d0d5563d --- /dev/null +++ b/core/scripts/chaincli/command/keeper/scrape_node_config.go @@ -0,0 +1,20 @@ +package keeper + +import ( + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// scrapeNodes represents the command to run the service +var scrapeNodes = &cobra.Command{ + Use: "scrape-node-config", + Short: "Scrape OCR2 node configs", + Long: `This command scrape OCR2 node configs. Users need to provide node URLs, emails, passwords, node URL etc as env vars.`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewBaseHandler(cfg) + hdlr.ScrapeNodes() + }, +} diff --git a/core/scripts/chaincli/command/keeper/upkeep.go b/core/scripts/chaincli/command/keeper/upkeep.go new file mode 100644 index 00000000..ae3f9470 --- /dev/null +++ b/core/scripts/chaincli/command/keeper/upkeep.go @@ -0,0 +1,159 @@ +package keeper + +import ( + "encoding/csv" + "fmt" + "log" + "os" + "strconv" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +// upkeepEventsCmd represents the command to run the upkeep events counter command +// In order to use this command, deploy, register, and fund the UpkeepCounter contract and run this command after it +// emits events on chain. +var upkeepEventsCmd = &cobra.Command{ + Use: "upkeep-events", + Short: "Print upkeep perform events(stdout and csv file)", + Long: `Print upkeep perform events and write to a csv file. args = hexaddr, fromBlock, toBlock`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + fromBlock, err := strconv.ParseUint(args[1], 10, 64) + if err != nil { + log.Fatal(err) + } + toBlock, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + log.Fatal(err) + } + hdlr.UpkeepCounterEvents(cmd.Context(), args[0], fromBlock, toBlock) + }, +} + +// upkeepHistoryCmd represents the command to run the upkeep history command +var upkeepHistoryCmd = &cobra.Command{ + Use: "upkeep-history", + Short: "Print checkUpkeep history", + Long: `Print checkUpkeep status and keeper responsibility for a given upkeep in a set block range`, + Run: func(cmd *cobra.Command, args []string) { + upkeepIdStr, err := cmd.Flags().GetString("upkeep-id") + if err != nil { + log.Fatal("failed to get 'upkeep-id' flag: ", err) + } + upkeepId, ok := keeper.ParseUpkeepId(upkeepIdStr) + if !ok { + log.Fatal("failed to parse upkeep-id") + } + + fromBlock, err := cmd.Flags().GetUint64("from") + if err != nil { + log.Fatal("failed to get 'from' flag: ", err) + } + + toBlock, err := cmd.Flags().GetUint64("to") + if err != nil { + log.Fatal("failed to get 'to' flag: ", err) + } + + gasPrice, err := cmd.Flags().GetUint64("gas-price") + if err != nil { + log.Fatal("failed to get 'gas-price' flag: ", err) + } + + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + + hdlr.UpkeepHistory(cmd.Context(), upkeepId, fromBlock, toBlock, gasPrice) + }, +} + +var ocr2UpkeepReportHistoryCmd = &cobra.Command{ + Use: "ocr2-reports", + Short: "Print ocr2 automation reports", + Long: "Print ocr2 automation reports within specified range for registry address", + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewBaseHandler(cfg) + + var hashes []string + path, err := cmd.Flags().GetString("csv") + if err == nil && len(path) != 0 { + rec, err2 := readCsvFile(path) + if err2 != nil { + log.Fatal(err2) + } + + if len(rec) < 1 { + log.Fatal("not enough records") + } + + hashes = make([]string, len(rec)) + for i := 0; i < len(rec); i++ { + hashes[i] = rec[i][0] + } + } else { + hashes, err = cmd.Flags().GetStringSlice("tx-hashes") + if err != nil { + log.Fatalf("failed to get transaction hashes from input: %s", err) + } + } + + if err = handler.OCR2AutomationReports(hdlr, hashes); err != nil { + log.Fatalf("failed to collect transaction data: %s", err) + } + }, +} + +var ocr2UpdateConfigCmd = &cobra.Command{ + Use: "ocr2-get-config", + Short: "Get OCR2 config parameters", + Long: "Get latest OCR2 config parameters from registry contract address", + Run: func(cmd *cobra.Command, args []string) { + + cfg := config.New() + hdlr := handler.NewBaseHandler(cfg) + + if err := handler.OCR2GetConfig(hdlr, cfg.RegistryAddress); err != nil { + log.Fatalf("failed to get config data: %s", err) + } + }, +} + +func init() { + upkeepHistoryCmd.Flags().String("upkeep-id", "", "upkeep ID") + upkeepHistoryCmd.Flags().Uint64("from", 0, "from block") + upkeepHistoryCmd.Flags().Uint64("to", 0, "to block") + upkeepHistoryCmd.Flags().Uint64("gas-price", 0, "gas price to use") + + ocr2UpkeepReportHistoryCmd.Flags().StringSlice("tx-hashes", []string{}, "list of transaction hashes to get information for") + ocr2UpkeepReportHistoryCmd.Flags().String("csv", "", "path to csv file containing transaction hashes; first element per line should be transaction hash; file should not have headers") + + ocr2UpdateConfigCmd.Flags().String("tx", "", "transaction of last config update") +} + +func readCsvFile(filePath string) ([][]string, error) { + var records [][]string + var err error + + f, err := os.Open(filePath) + if err != nil { + return records, fmt.Errorf("Unable to read input file "+filePath, err) + } + defer f.Close() + + csvReader := csv.NewReader(f) + csvReader.FieldsPerRecord = 0 + csvReader.LazyQuotes = false + records, err = csvReader.ReadAll() + if err != nil { + return records, fmt.Errorf("Unable to parse file as CSV for "+filePath, err) + } + + return records, nil +} diff --git a/core/scripts/chaincli/command/keeper/verifiable_load.go b/core/scripts/chaincli/command/keeper/verifiable_load.go new file mode 100644 index 00000000..c57793ca --- /dev/null +++ b/core/scripts/chaincli/command/keeper/verifiable_load.go @@ -0,0 +1,30 @@ +package keeper + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// verifiableLoad represents the command to get verifiable load testing details +var verifiableLoad = &cobra.Command{ + Use: "verifiable-load", + Short: "Print verifiable load testing details to console", + Long: `Print verifiable load testing details to console, including details of every active upkeep and total result`, + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + hdlr := handler.NewKeeper(cfg) + csv, err := cmd.Flags().GetBool("csv") + if err != nil { + log.Fatal("failed to get verify flag: ", err) + } + hdlr.PrintVerifiableLoadStats(cmd.Context(), csv) + }, +} + +func init() { + verifiableLoad.Flags().BoolP("csv", "c", false, "Specify if stats should be output as CSV") +} diff --git a/core/scripts/chaincli/command/revert_reason.go b/core/scripts/chaincli/command/revert_reason.go new file mode 100644 index 00000000..4d358e7b --- /dev/null +++ b/core/scripts/chaincli/command/revert_reason.go @@ -0,0 +1,21 @@ +package command + +import ( + "github.com/spf13/cobra" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/handler" +) + +// RevertReasonCmd takes in a failed tx hash and tries to give you the reason +var RevertReasonCmd = &cobra.Command{ + Use: "reason", + Short: "Revert reason for failed TX.", + Long: `Given a failed TX tries to find the revert reason. args = tx hex address`, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + cfg := config.New() + baseHandler := handler.NewBaseHandler(cfg) + baseHandler.RevertReason(args[0]) + }, +} diff --git a/core/scripts/chaincli/command/root.go b/core/scripts/chaincli/command/root.go new file mode 100644 index 00000000..610b37aa --- /dev/null +++ b/core/scripts/chaincli/command/root.go @@ -0,0 +1,38 @@ +package command + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/command/keeper" +) + +var configFile string + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "chaincli", + Short: "Plugin CLI tool to manage products such as keeper, vrf, etc.", + Long: `chaincli is a CLI for running the product management commands, e.g. keepers deployment.`, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the RootCmd. +func Execute() { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { + RootCmd.PersistentFlags().StringVar(&configFile, "config", "", "config file (default is .env)") + _ = viper.BindPFlag("config", RootCmd.PersistentFlags().Lookup("config")) + + RootCmd.AddCommand(keeper.RootCmd) + RootCmd.AddCommand(BootstrapNodeCmd) + RootCmd.AddCommand(RevertReasonCmd) +} diff --git a/core/scripts/chaincli/config/config.go b/core/scripts/chaincli/config/config.go new file mode 100644 index 00000000..a2a791be --- /dev/null +++ b/core/scripts/chaincli/config/config.go @@ -0,0 +1,183 @@ +package config + +import ( + "fmt" + "log" + + "github.com/spf13/viper" + + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +// UpkeepType represents an upkeep type +type UpkeepType int + +const ( + Conditional UpkeepType = iota + Mercury + LogTrigger + LogTriggeredFeedLookup +) + +// Config represents configuration fields +type Config struct { + NodeURL string `mapstructure:"NODE_URL"` + NodeHttpURL string `mapstructure:"NODE_HTTP_URL"` + ExplorerAPIKey string `mapstructure:"EXPLORER_API_KEY"` + NetworkName string `mapstructure:"NETWORK_NAME"` + ChainID int64 `mapstructure:"CHAIN_ID"` + PrivateKey string `mapstructure:"PRIVATE_KEY"` + LinkTokenAddr string `mapstructure:"PLI_TOKEN_ADDR"` + Keepers []string `mapstructure:"KEEPERS"` + KeeperURLs []string `mapstructure:"KEEPER_URLS"` + KeeperEmails []string `mapstructure:"KEEPER_EMAILS"` + KeeperPasswords []string `mapstructure:"KEEPER_PASSWORDS"` + KeeperKeys []string `mapstructure:"KEEPER_KEYS"` + ApproveAmount string `mapstructure:"APPROVE_AMOUNT"` + GasLimit uint64 `mapstructure:"GAS_LIMIT"` + FundNodeAmount string `mapstructure:"FUND_PLUGIN_NODE"` + PluginDockerImage string `mapstructure:"PLUGIN_DOCKER_IMAGE"` + PostgresDockerImage string `mapstructure:"POSTGRES_DOCKER_IMAGE"` + + // OCR Config + BootstrapNodeAddr string `mapstructure:"BOOTSTRAP_NODE_ADDR"` + OCR2Keepers bool `mapstructure:"KEEPER_OCR2"` + + // Keeper config + Mode uint8 `mapstructure:"MODE"` + LinkETHFeedAddr string `mapstructure:"PLI_ETH_FEED"` + FastGasFeedAddr string `mapstructure:"FAST_GAS_FEED"` + PaymentPremiumPBB uint32 `mapstructure:"PAYMENT_PREMIUM_PBB"` + FlatFeeMicroLink uint32 `mapstructure:"FLAT_FEE_MICRO_PLI"` + BlockCountPerTurn int64 `mapstructure:"BLOCK_COUNT_PER_TURN"` + CheckGasLimit uint32 `mapstructure:"CHECK_GAS_LIMIT"` + StalenessSeconds int64 `mapstructure:"STALENESS_SECONDS"` + GasCeilingMultiplier uint16 `mapstructure:"GAS_CEILING_MULTIPLIER"` + MinUpkeepSpend int64 `mapstructure:"MIN_UPKEEP_SPEND"` + MaxPerformGas uint32 `mapstructure:"MAX_PERFORM_GAS"` + MaxCheckDataSize uint32 `mapstructure:"MAX_CHECK_DATA_SIZE"` + MaxPerformDataSize uint32 `mapstructure:"MAX_PERFORM_DATA_SIZE"` + MaxRevertDataSize uint32 `mapstructure:"MAX_REVERT_DATA_SIZE"` + FallbackGasPrice int64 `mapstructure:"FALLBACK_GAS_PRICE"` + FallbackLinkPrice int64 `mapstructure:"FALLBACK_PLI_PRICE"` + Transcoder string `mapstructure:"TRANSCODER"` + Registrar string `mapstructure:"REGISTRAR"` + UpkeepPrivilegeManager string `mapstructure:"UPKEEP_PRIVILEGE_MANAGER"` + + // Upkeep Config + RegistryVersion keeper.RegistryVersion `mapstructure:"KEEPER_REGISTRY_VERSION"` + RegistryAddress string `mapstructure:"KEEPER_REGISTRY_ADDRESS"` + RegistryConfigUpdate bool `mapstructure:"KEEPER_CONFIG_UPDATE"` + KeepersCount int `mapstructure:"KEEPERS_COUNT"` + UpkeepTestRange int64 `mapstructure:"UPKEEP_TEST_RANGE"` + UpkeepAverageEligibilityCadence int64 `mapstructure:"UPKEEP_AVERAGE_ELIGIBILITY_CADENCE"` + UpkeepInterval int64 `mapstructure:"UPKEEP_INTERVAL"` + UpkeepCheckData string `mapstructure:"UPKEEP_CHECK_DATA"` + UpkeepGasLimit uint32 `mapstructure:"UPKEEP_GAS_LIMIT"` + UpkeepCount int64 `mapstructure:"UPKEEP_COUNT"` + AddFundsAmount string `mapstructure:"UPKEEP_ADD_FUNDS_AMOUNT"` + VerifiableLoadTest bool `mapstructure:"VERIFIABLE_LOAD_TEST"` + UseArbBlockNumber bool `mapstructure:"USE_ARB_BLOCK_NUMBER"` + VerifiableLoadContractAddress string `mapstructure:"VERIFIABLE_LOAD_CONTRACT_ADDRESS"` + UpkeepType UpkeepType `mapstructure:"UPKEEP_TYPE"` + + // Node config scraping and verification + NodeConfigURL string `mapstructure:"NODE_CONFIG_URL"` + VerifyNodes bool `mapstructure:"VERIFY_NODES"` + + // Feeds config + FeedBaseAddr string `mapstructure:"FEED_BASE_ADDR"` + FeedQuoteAddr string `mapstructure:"FEED_QUOTE_ADDR"` + FeedDecimals uint8 `mapstructure:"FEED_DECIMALS"` + + // Data Streams Config + DataStreamsURL string `mapstructure:"DATA_STREAMS_URL"` + DataStreamsLegacyURL string `mapstructure:"DATA_STREAMS_LEGACY_URL"` + DataStreamsID string `mapstructure:"DATA_STREAMS_ID"` + DataStreamsKey string `mapstructure:"DATA_STREAMS_KEY"` + DataStreamsCredName string `mapstructure:"DATA_STREAMS_CRED_NAME"` + + // Tenderly + TenderlyKey string `mapstructure:"TENDERLY_KEY"` + TenderlyAccountName string `mapstructure:"TENDERLY_ACCOUNT_NAME"` + TenderlyProjectName string `mapstructure:"TENDERLY_PROJECT_NAME"` +} + +// New creates a new config +func New() *Config { + var cfg Config + configFile := viper.GetString("config") + if configFile != "" { + log.Println("Using config file", configFile) + // Use config file from the flag. + viper.SetConfigFile(configFile) + } else { + log.Println("Using config file .env") + viper.SetConfigFile(".env") + } + viper.AutomaticEnv() + if err := viper.ReadInConfig(); err != nil { + log.Fatal("failed to read config: ", err) + } + if err := viper.Unmarshal(&cfg); err != nil { + log.Fatal("failed to unmarshal config: ", err) + } + + return &cfg +} + +// Validate validates the given config +func (c *Config) Validate() error { + // OCR2Keeper job could be ran only with the registry 2.0 + if c.OCR2Keepers && c.RegistryVersion < keeper.RegistryVersion_2_0 { + return fmt.Errorf("ocr2keeper job could be ran only with the registry 2.0, but %s specified", c.RegistryVersion) + } + + // validate keepers env vars + keepersFields := [][]string{c.KeeperURLs, c.KeeperEmails, c.KeeperPasswords, c.KeeperKeys} + for i := 0; i < len(keepersFields); i++ { + if len(keepersFields[i]) != 0 && len(keepersFields[i]) != c.KeepersCount { + return fmt.Errorf("keepers config length doesn't match expected keeper count, check keeper env vars") + } + } + + if c.UpkeepType > 4 { + return fmt.Errorf("unknown upkeep type") + } + + return nil +} + +func init() { + viper.SetDefault("APPROVE_AMOUNT", "100000000000000000000000") // 1000 PLI + viper.SetDefault("GAS_LIMIT", 8000000) + viper.SetDefault("PAYMENT_PREMIUM_PBB", 200000000) + viper.SetDefault("FLAT_FEE_MICRO_PLI", 0) + viper.SetDefault("BLOCK_COUNT_PER_TURN", 1) + viper.SetDefault("CHECK_GAS_LIMIT", 650000000) + viper.SetDefault("STALENESS_SECONDS", 90000) + viper.SetDefault("GAS_CEILING_MULTIPLIER", 1) + viper.SetDefault("FALLBACK_GAS_PRICE", 200000000000) + viper.SetDefault("FALLBACK_PLI_PRICE", 20000000000000000) + viper.SetDefault("PLUGIN_DOCKER_IMAGE", "smartcontract/plugin:1.13.0-root") + viper.SetDefault("POSTGRES_DOCKER_IMAGE", "postgres:latest") + + viper.SetDefault("UPKEEP_ADD_FUNDS_AMOUNT", "100000000000000000000") // 100 PLI + viper.SetDefault("UPKEEP_TEST_RANGE", 1) + viper.SetDefault("UPKEEP_INTERVAL", 10) + viper.SetDefault("UPKEEP_CHECK_DATA", "0x00") + viper.SetDefault("UPKEEP_GAS_LIMIT", 500000) + viper.SetDefault("UPKEEP_COUNT", 5) + viper.SetDefault("UPKEEP_TYPE", 0) // conditional upkeep + viper.SetDefault("KEEPERS_COUNT", 2) + + viper.SetDefault("FEED_DECIMALS", 18) + viper.SetDefault("MUST_TAKE_TURNS", true) + + viper.SetDefault("MIN_UPKEEP_SPEND", 0) + viper.SetDefault("MAX_PERFORM_GAS", 5000000) + viper.SetDefault("TRANSCODER", "0x0000000000000000000000000000000000000000") + viper.SetDefault("REGISTRAR", "0x0000000000000000000000000000000000000000") + viper.SetDefault("KEEPER_REGISTRY_VERSION", 2) + viper.SetDefault("FUND_PLUGIN_NODE", "20000000000000000000") +} diff --git a/core/scripts/chaincli/handler/bootstrap.go b/core/scripts/chaincli/handler/bootstrap.go new file mode 100644 index 00000000..adb383e3 --- /dev/null +++ b/core/scripts/chaincli/handler/bootstrap.go @@ -0,0 +1,95 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/web" +) + +const ( + bootstrapJobSpec = `type = "bootstrap" +schemaVersion = 1 +name = "ocr2keeper bootstrap node" +contractID = "%s" +relay = "evm" + +[relayConfig] +chainID = %d` + + bootstrapTOML = `[P2P] +[P2P.V2] +ListenAddresses = ["0.0.0.0:%s"]` +) + +// StartBootstrapNode starts the ocr2 bootstrap node with the given contract address, returns the tcp address of the node +func (h *baseHandler) StartBootstrapNode(ctx context.Context, addr string, uiPort, p2pv2Port int, force bool) string { + lggr, closeLggr := logger.NewLogger() + logger.Sugared(lggr).ErrorIfFn(closeLggr, "Failed to close logger") + + const containerName = "bootstrap" + + urlRaw, _, err := h.launchPluginNode( + ctx, + uiPort, + containerName, + fmt.Sprintf(bootstrapTOML, strconv.Itoa(p2pv2Port)), + force, + ) + if err != nil { + lggr.Fatal("Failed to launch plugin node, ", err) + } + + cl, err := authenticate(ctx, urlRaw, defaultPluginNodeLogin, defaultPluginNodePassword, lggr) + if err != nil { + lggr.Fatal("Authentication failed, ", err) + } + + p2pKeyID, err := getP2PKeyID(ctx, cl) + if err != nil { + lggr.Fatal("Failed to get P2P key ID, ", err) + } + + if err = h.createBootstrapJob(ctx, cl, addr); err != nil { + lggr.Fatal("Failed to create keeper job: ", err) + } + + tcpAddr := fmt.Sprintf("%s@%s:%d", p2pKeyID, containerName, p2pv2Port) + lggr.Info("Bootstrap job has been successfully created in the Plugin node with address ", urlRaw, ", tcp: ", tcpAddr) + + return tcpAddr +} + +// createBootstrapJob creates a bootstrap job in the plugin node by the given address +func (h *baseHandler) createBootstrapJob(ctx context.Context, client cmd.HTTPClient, contractAddr string) error { + request, err := json.Marshal(web.CreateJobRequest{ + TOML: fmt.Sprintf(bootstrapJobSpec, contractAddr, h.cfg.ChainID), + }) + if err != nil { + return fmt.Errorf("failed to marshal request: %s", err) + } + + resp, err := client.Post(ctx, "/v2/jobs", bytes.NewReader(request)) + if err != nil { + return fmt.Errorf("failed to create bootstrap job: %s", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read error response body: %s", err) + } + + return fmt.Errorf("unable to create bootstrap job: '%v' [%d]", string(body), resp.StatusCode) + } + log.Println("Bootstrap job has been successfully created in the Plugin node") + return nil +} diff --git a/core/scripts/chaincli/handler/debug.go b/core/scripts/chaincli/handler/debug.go new file mode 100644 index 00000000..ae31fb27 --- /dev/null +++ b/core/scripts/chaincli/handler/debug.go @@ -0,0 +1,635 @@ +package handler + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "math" + "math/big" + "net/http" + "os" + "strconv" + + types2 "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + evm21 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21" + + commonhex "github.com/goplugin/plugin-common/pkg/utils/hex" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" + bigmath "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +const ( + ConditionTrigger uint8 = iota + LogTrigger + expectedTypeAndVersion = "KeeperRegistry 2.1.0" +) + +var mercuryPacker = mercury.NewAbiPacker() +var packer = encoding.NewAbiPacker() + +var links []string + +func (k *Keeper) Debug(ctx context.Context, args []string) { + if len(args) < 1 { + failCheckArgs("no upkeepID supplied", nil) + } + + // test that we are connected to an archive node + _, err := k.client.BalanceAt(ctx, gethcommon.Address{}, big.NewInt(1)) + if err != nil { + failCheckConfig("you are not connected to an archive node; try using infura or alchemy", err) + } + + chainIDBig, err := k.client.ChainID(ctx) + if err != nil { + failUnknown("unable to retrieve chainID from rpc client", err) + } + chainID := chainIDBig.Int64() + + // Log triggers: always use block from tx + // Conditional: use latest block if no block number is provided, otherwise use block from user input + var triggerCallOpts *bind.CallOpts // use a certain block + latestCallOpts := &bind.CallOpts{Context: ctx} // use the latest block + + // connect to registry contract + registryAddress := gethcommon.HexToAddress(k.cfg.RegistryAddress) + keeperRegistry21, err := iregistry21.NewIKeeperRegistryMaster(registryAddress, k.client) + if err != nil { + failUnknown("failed to connect to the registry contract", err) + } + + // verify contract is correct + typeAndVersion, err := keeperRegistry21.TypeAndVersion(latestCallOpts) + if err != nil { + failCheckConfig("failed to get typeAndVersion: make sure your registry contract address and archive node are valid", err) + } + if typeAndVersion != expectedTypeAndVersion { + failCheckConfig(fmt.Sprintf("invalid registry contract: this command can only debug %s, got: %s", expectedTypeAndVersion, typeAndVersion), nil) + } + // get upkeepID from command args + upkeepID := big.NewInt(0) + upkeepIDNoPrefix := commonhex.TrimPrefix(args[0]) + _, wasBase10 := upkeepID.SetString(upkeepIDNoPrefix, 10) + if !wasBase10 { + _, wasBase16 := upkeepID.SetString(upkeepIDNoPrefix, 16) + if !wasBase16 { + failCheckArgs("invalid upkeep ID", nil) + } + } + // get upkeep info + triggerType, err := keeperRegistry21.GetTriggerType(latestCallOpts, upkeepID) + if err != nil { + failUnknown("failed to get trigger type: ", err) + } + upkeepInfo, err := keeperRegistry21.GetUpkeep(latestCallOpts, upkeepID) + if err != nil { + failUnknown("failed to get trigger type: ", err) + } + minBalance, err := keeperRegistry21.GetMinBalance(latestCallOpts, upkeepID) + if err != nil { + failUnknown("failed to get min balance: ", err) + } + // do basic sanity checks + if (upkeepInfo.Target == gethcommon.Address{}) { + failCheckArgs("this upkeep does not exist on this registry", nil) + } + addLink("upkeep link", common.UpkeepLink(chainID, upkeepID)) + addLink("upkeep contract address", common.ContractExplorerLink(chainID, upkeepInfo.Target)) + if upkeepInfo.Paused { + resolveIneligible("upkeep is paused") + } + if upkeepInfo.MaxValidBlocknumber != math.MaxUint32 { + resolveIneligible("upkeep is cancelled") + } + message("upkeep is active (not paused or cancelled)") + if upkeepInfo.Balance.Cmp(minBalance) == -1 { + resolveIneligible("minBalance is < upkeep balance") + } + message("upkeep is funded above the min balance") + if bigmath.Div(bigmath.Mul(bigmath.Sub(upkeepInfo.Balance, minBalance), big.NewInt(100)), minBalance).Cmp(big.NewInt(5)) == -1 { + warning("upkeep balance is < 5% larger than minBalance") + } + // local state for pipeline results + var checkResult iregistry21.CheckUpkeep + var blockNum uint64 + var performData []byte + var workID [32]byte + var trigger ocr2keepers.Trigger + upkeepNeeded := false + // check upkeep + if triggerType == ConditionTrigger { + message("upkeep identified as conditional trigger") + + if len(args) > 1 { + // if a block number is provided, use that block for both checkUpkeep and simulatePerformUpkeep + blockNum, err = strconv.ParseUint(args[1], 10, 64) + if err != nil { + failCheckArgs("unable to parse block number", err) + } + triggerCallOpts = &bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(blockNum)} + } else { + // if no block number is provided, use latest block for both checkUpkeep and simulatePerformUpkeep + triggerCallOpts = latestCallOpts + } + + var tmpCheckResult iregistry21.CheckUpkeep0 + tmpCheckResult, err = keeperRegistry21.CheckUpkeep0(triggerCallOpts, upkeepID) + if err != nil { + failUnknown("failed to check upkeep: ", err) + } + checkResult = iregistry21.CheckUpkeep(tmpCheckResult) + // do tenderly simulation + var rawCall []byte + rawCall, err = core.RegistryABI.Pack("checkUpkeep", upkeepID, []byte{}) + if err != nil { + failUnknown("failed to pack raw checkUpkeep call", err) + } + addLink("checkUpkeep simulation", tenderlySimLink(ctx, k.cfg, chainID, 0, rawCall, registryAddress)) + } else if triggerType == LogTrigger { + // validate inputs + message("upkeep identified as log trigger") + if len(args) != 3 { + failCheckArgs("txHash and log index must be supplied to command in order to debug log triggered upkeeps", nil) + } + txHash := gethcommon.HexToHash(args[1]) + + var logIndex int64 + logIndex, err = strconv.ParseInt(args[2], 10, 64) + if err != nil { + failCheckArgs("unable to parse log index", err) + } + + // check that tx is confirmed + var isPending bool + _, isPending, err = k.client.TransactionByHash(ctx, txHash) + if err != nil { + log.Fatal("failed to get tx by hash", err) + } + if isPending { + resolveIneligible(fmt.Sprintf("tx %s is still pending confirmation", txHash)) + } + + // find transaction receipt + var receipt *types.Receipt + receipt, err = k.client.TransactionReceipt(ctx, txHash) + if err != nil { + failCheckArgs("failed to fetch tx receipt", err) + } + addLink("trigger transaction", common.ExplorerLink(chainID, txHash)) + blockNum = receipt.BlockNumber.Uint64() + // find matching log event in tx + var triggeringEvent *types.Log + for i, log := range receipt.Logs { + if log.Index == uint(logIndex) { + triggeringEvent = receipt.Logs[i] + } + } + if triggeringEvent == nil { + failCheckArgs(fmt.Sprintf("unable to find log with index %d in transaction", logIndex), nil) + } + // check that tx for this upkeep / tx was not already performed + message(fmt.Sprintf("LogTrigger{blockNum: %d, blockHash: %s, txHash: %s, logIndex: %d}", blockNum, receipt.BlockHash.Hex(), txHash, logIndex)) + trigger = mustAutomationTrigger(txHash, logIndex, blockNum, receipt.BlockHash) + workID = mustUpkeepWorkID(upkeepID, trigger) + message(fmt.Sprintf("workID computed: %s", hex.EncodeToString(workID[:]))) + var hasKey bool + hasKey, err = keeperRegistry21.HasDedupKey(latestCallOpts, workID) + if err != nil { + failUnknown("failed to check if upkeep was already performed: ", err) + } + if hasKey { + resolveIneligible("upkeep was already performed") + } + triggerCallOpts = &bind.CallOpts{Context: ctx, BlockNumber: big.NewInt(receipt.BlockNumber.Int64())} + var rawTriggerConfig []byte + rawTriggerConfig, err = keeperRegistry21.GetUpkeepTriggerConfig(triggerCallOpts, upkeepID) + if err != nil { + failUnknown("failed to fetch trigger config for upkeep", err) + } + var triggerConfig automation_utils_2_1.LogTriggerConfig + triggerConfig, err = packer.UnpackLogTriggerConfig(rawTriggerConfig) + if err != nil { + failUnknown("failed to unpack trigger config", err) + } + if triggerConfig.FilterSelector > 7 { + resolveIneligible(fmt.Sprintf("invalid filter selector %d", triggerConfig.FilterSelector)) + } + if !logMatchesTriggerConfig(triggeringEvent, triggerConfig) { + resolveIneligible("log does not match trigger config") + } + var header *types.Header + header, err = k.client.HeaderByHash(ctx, receipt.BlockHash) + if err != nil { + failUnknown("failed to find block", err) + } + var triggerData []byte + triggerData, err = packTriggerData(triggeringEvent, header.Time) + if err != nil { + failUnknown("failed to pack trigger data", err) + } + checkResult, err = keeperRegistry21.CheckUpkeep(triggerCallOpts, upkeepID, triggerData) + if err != nil { + failUnknown("failed to check upkeep", err) + } + // do tenderly simulations + var rawCall []byte + rawCall, err = core.RegistryABI.Pack("checkUpkeep", upkeepID, triggerData) + if err != nil { + failUnknown("failed to pack raw checkUpkeep call", err) + } + addLink("checkUpkeep simulation", tenderlySimLink(ctx, k.cfg, chainID, blockNum, rawCall, registryAddress)) + rawCall = append(core.ILogAutomationABI.Methods["checkLog"].ID, triggerData...) + addLink("checkLog (direct) simulation", tenderlySimLink(ctx, k.cfg, chainID, blockNum, rawCall, upkeepInfo.Target)) + } else { + resolveIneligible(fmt.Sprintf("invalid trigger type: %d", triggerType)) + } + upkeepNeeded, performData = checkResult.UpkeepNeeded, checkResult.PerformData + + if checkResult.UpkeepFailureReason != 0 { + message(fmt.Sprintf("checkUpkeep failed with UpkeepFailureReason %s", getCheckUpkeepFailureReason(checkResult.UpkeepFailureReason))) + } + + // handle data streams lookup + if checkResult.UpkeepFailureReason == uint8(encoding.UpkeepFailureReasonTargetCheckReverted) { + mc := &types2.MercuryCredentials{LegacyURL: k.cfg.DataStreamsLegacyURL, URL: k.cfg.DataStreamsURL, Username: k.cfg.DataStreamsID, Password: k.cfg.DataStreamsKey} + mercuryConfig := evm21.NewMercuryConfig(mc, core.StreamsCompatibleABI) + lggr, _ := logger.NewLogger() + blockSub := &blockSubscriber{k.client} + streams := streams.NewStreamsLookup(mercuryConfig, blockSub, k.rpcClient, keeperRegistry21, lggr) + + var streamsLookupErr *mercury.StreamsLookupError + streamsLookupErr, err = mercuryPacker.DecodeStreamsLookupRequest(checkResult.PerformData) + if err == nil { + message("upkeep reverted with StreamsLookup") + message(fmt.Sprintf("StreamsLookup data: {FeedParamKey: %s, Feeds: %v, TimeParamKey: %s, Time: %d, ExtraData: %s}", streamsLookupErr.FeedParamKey, streamsLookupErr.Feeds, streamsLookupErr.TimeParamKey, streamsLookupErr.Time.Uint64(), hexutil.Encode(streamsLookupErr.ExtraData))) + + streamsLookup := &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: streamsLookupErr.FeedParamKey, + Feeds: streamsLookupErr.Feeds, + TimeParamKey: streamsLookupErr.TimeParamKey, + Time: streamsLookupErr.Time, + ExtraData: streamsLookupErr.ExtraData, + }, + UpkeepId: upkeepID, + Block: blockNum, + } + + if streamsLookup.IsMercuryV02() { + message("using data streams lookup v0.2") + // check if upkeep is allowed to use mercury v0.2 + var allowed bool + _, _, _, allowed, err = streams.AllowedToUseMercury(triggerCallOpts, upkeepID) + if err != nil { + failUnknown("failed to check if upkeep is allowed to use data streams", err) + } + if !allowed { + resolveIneligible("upkeep reverted with StreamsLookup but is not allowed to access streams") + } + } else if streamsLookup.IsMercuryV03() { + // handle v0.3 + message("using data streams lookup v0.3") + } else { + resolveIneligible("upkeep reverted with StreamsLookup but the configuration is invalid") + } + + if k.cfg.DataStreamsLegacyURL == "" || k.cfg.DataStreamsURL == "" || k.cfg.DataStreamsID == "" || k.cfg.DataStreamsKey == "" { + failCheckConfig("Data streams configs not set properly, check your DATA_STREAMS_LEGACY_URL, DATA_STREAMS_URL, DATA_STREAMS_ID and DATA_STREAMS_KEY", nil) + } + + // do mercury request + automationCheckResult := mustAutomationCheckResult(upkeepID, checkResult, trigger) + checkResults := []ocr2keepers.CheckResult{automationCheckResult} + + var values [][]byte + values, err = streams.DoMercuryRequest(ctx, streamsLookup, checkResults, 0) + + if checkResults[0].IneligibilityReason == uint8(encoding.UpkeepFailureReasonInvalidRevertDataInput) { + resolveIneligible("upkeep used invalid revert data") + } + if checkResults[0].PipelineExecutionState == uint8(encoding.InvalidMercuryRequest) { + resolveIneligible("the data streams request data is invalid") + } + if err != nil { + failCheckConfig("failed to do data streams request ", err) + } + + // do checkCallback + err = streams.CheckCallback(ctx, values, streamsLookup, checkResults, 0) + if err != nil { + failUnknown("failed to execute data streams callback ", err) + } + if checkResults[0].IneligibilityReason != 0 { + message(fmt.Sprintf("checkCallback failed with UpkeepFailureReason %d", checkResults[0].IneligibilityReason)) + } + upkeepNeeded, performData = checkResults[0].Eligible, checkResults[0].PerformData + // do tenderly simulations for checkCallback + var rawCall []byte + rawCall, err = core.RegistryABI.Pack("checkCallback", upkeepID, values, streamsLookup.ExtraData) + if err != nil { + failUnknown("failed to pack raw checkCallback call", err) + } + addLink("checkCallback simulation", tenderlySimLink(ctx, k.cfg, chainID, blockNum, rawCall, registryAddress)) + rawCall, err = core.StreamsCompatibleABI.Pack("checkCallback", values, streamsLookup.ExtraData) + if err != nil { + failUnknown("failed to pack raw checkCallback (direct) call", err) + } + addLink("checkCallback (direct) simulation", tenderlySimLink(ctx, k.cfg, chainID, blockNum, rawCall, upkeepInfo.Target)) + } else { + message("did not revert with StreamsLookup error") + } + } + if !upkeepNeeded { + resolveIneligible("upkeep is not needed") + } + // simulate perform upkeep + simulateResult, err := keeperRegistry21.SimulatePerformUpkeep(triggerCallOpts, upkeepID, performData) + if err != nil { + failUnknown("failed to simulate perform upkeep: ", err) + } + + // do tenderly simulation + rawCall, err := core.RegistryABI.Pack("simulatePerformUpkeep", upkeepID, performData) + if err != nil { + failUnknown("failed to pack raw simulatePerformUpkeep call", err) + } + addLink("simulatePerformUpkeep simulation", tenderlySimLink(ctx, k.cfg, chainID, blockNum, rawCall, registryAddress)) + + if simulateResult.Success { + resolveEligible() + } else { + // Convert performGas to *big.Int for comparison + performGasBigInt := new(big.Int).SetUint64(uint64(upkeepInfo.PerformGas)) + // Compare PerformGas and GasUsed + result := performGasBigInt.Cmp(simulateResult.GasUsed) + + if result < 0 { + // PerformGas is smaller than GasUsed + resolveIneligible(fmt.Sprintf("simulate perform upkeep unsuccessful, PerformGas (%d) is lower than GasUsed (%s)", upkeepInfo.PerformGas, simulateResult.GasUsed.String())) + } else { + resolveIneligible("simulate perform upkeep unsuccessful") + } + } +} + +func getCheckUpkeepFailureReason(reasonIndex uint8) string { + // Copied from KeeperRegistryBase2_1.sol + reasonStrings := []string{ + "NONE", + "UPKEEP_CANCELLED", + "UPKEEP_PAUSED", + "TARGET_CHECK_REVERTED", + "UPKEEP_NOT_NEEDED", + "PERFORM_DATA_EXCEEDS_LIMIT", + "INSUFFICIENT_BALANCE", + "CALLBACK_REVERTED", + "REVERT_DATA_EXCEEDS_LIMIT", + "REGISTRY_PAUSED", + } + + if int(reasonIndex) < len(reasonStrings) { + return reasonStrings[reasonIndex] + } + + return fmt.Sprintf("Unknown : %d", reasonIndex) +} + +func mustAutomationCheckResult(upkeepID *big.Int, checkResult iregistry21.CheckUpkeep, trigger ocr2keepers.Trigger) ocr2keepers.CheckResult { + upkeepIdentifier := mustUpkeepIdentifier(upkeepID) + checkResult2 := ocr2keepers.CheckResult{ + Eligible: checkResult.UpkeepNeeded, + IneligibilityReason: checkResult.UpkeepFailureReason, + UpkeepID: upkeepIdentifier, + Trigger: trigger, + WorkID: core.UpkeepWorkID(upkeepIdentifier, trigger), + GasAllocated: 0, + PerformData: checkResult.PerformData, + FastGasWei: checkResult.FastGasWei, + LinkNative: checkResult.LinkNative, + } + + return checkResult2 +} + +type blockSubscriber struct { + ethClient *ethclient.Client +} + +func (bs *blockSubscriber) LatestBlock() *ocr2keepers.BlockKey { + header, err := bs.ethClient.HeaderByNumber(context.Background(), nil) + if err != nil { + return nil + } + + return &ocr2keepers.BlockKey{ + Number: ocr2keepers.BlockNumber(header.Number.Uint64()), + Hash: header.Hash(), + } +} + +func logMatchesTriggerConfig(log *types.Log, config automation_utils_2_1.LogTriggerConfig) bool { + if log.Topics[0] != config.Topic0 { + return false + } + if config.FilterSelector&1 > 0 && (len(log.Topics) < 1 || log.Topics[1] != config.Topic1) { + return false + } + if config.FilterSelector&2 > 0 && (len(log.Topics) < 2 || log.Topics[2] != config.Topic2) { + return false + } + if config.FilterSelector&4 > 0 && (len(log.Topics) < 3 || log.Topics[3] != config.Topic3) { + return false + } + return true +} + +func packTriggerData(log *types.Log, blockTime uint64) ([]byte, error) { + var topics [][32]byte + for _, topic := range log.Topics { + topics = append(topics, topic) + } + b, err := core.UtilsABI.Methods["_log"].Inputs.Pack(&automation_utils_2_1.Log{ + Index: big.NewInt(int64(log.Index)), + Timestamp: big.NewInt(int64(blockTime)), + TxHash: log.TxHash, + BlockNumber: big.NewInt(int64(log.BlockNumber)), + BlockHash: log.BlockHash, + Source: log.Address, + Topics: topics, + Data: log.Data, + }) + if err != nil { + return nil, err + } + return b, nil +} + +func mustUpkeepWorkID(upkeepID *big.Int, trigger ocr2keepers.Trigger) [32]byte { + upkeepIdentifier := mustUpkeepIdentifier(upkeepID) + + workID := core.UpkeepWorkID(upkeepIdentifier, trigger) + workIDBytes, err := hex.DecodeString(workID) + if err != nil { + failUnknown("failed to decode workID", err) + } + + var result [32]byte + copy(result[:], workIDBytes[:]) + return result +} + +func mustUpkeepIdentifier(upkeepID *big.Int) ocr2keepers.UpkeepIdentifier { + upkeepIdentifier := &ocr2keepers.UpkeepIdentifier{} + upkeepIdentifier.FromBigInt(upkeepID) + return *upkeepIdentifier +} + +func mustAutomationTrigger(txHash [32]byte, logIndex int64, blockNum uint64, blockHash [32]byte) ocr2keepers.Trigger { + trigger := ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + TxHash: txHash, + Index: uint32(logIndex), + BlockNumber: ocr2keepers.BlockNumber(blockNum), + BlockHash: blockHash, + }, + } + return trigger +} + +func message(msg string) { + log.Printf("☑️ %s", msg) +} + +func warning(msg string) { + log.Printf("⚠️ %s", msg) +} + +func resolveIneligible(msg string) { + exit(fmt.Sprintf("❌ this upkeep is not eligible: %s", msg), nil, 0) +} + +func resolveEligible() { + exit("✅ this upkeep is eligible", nil, 0) +} + +func rerun(msg string, err error) { + exit(fmt.Sprintf("🔁 %s: rerun this command", msg), err, 1) +} + +func failUnknown(msg string, err error) { + exit(fmt.Sprintf("🤷 %s: this should not happen - this script may be broken or your RPC may be experiencing issues", msg), err, 1) +} + +func failCheckConfig(msg string, err error) { + rerun(fmt.Sprintf("%s: check your config", msg), err) +} + +func failCheckArgs(msg string, err error) { + rerun(fmt.Sprintf("%s: check your command arguments", msg), err) +} + +func addLink(identifier string, link string) { + links = append(links, fmt.Sprintf("🔗 %s: %s", identifier, link)) +} + +func printLinks() { + for i := 0; i < len(links); i++ { + log.Println(links[i]) + } +} + +func exit(msg string, err error, code int) { + if err != nil { + log.Printf("⚠️ %v", err) + } + log.Println(msg) + printLinks() + os.Exit(code) +} + +type TenderlyAPIResponse struct { + Simulation struct { + Id string + } +} + +func tenderlySimLink(ctx context.Context, cfg *config.Config, chainID int64, blockNumber uint64, input []byte, contractAddress gethcommon.Address) string { + errResult := "" + if cfg.TenderlyAccountName == "" || cfg.TenderlyKey == "" || cfg.TenderlyProjectName == "" { + warning("tenderly credentials not properly configured - this is optional but helpful") + return errResult + } + values := map[string]interface{}{ + "network_id": fmt.Sprintf("%d", chainID), + "from": "0x0000000000000000000000000000000000000000", + "input": hexutil.Encode(input), + "to": contractAddress.Hex(), + "gas": 50_000_000, + "save": true, + } + if blockNumber > 0 { + values["block_number"] = blockNumber + } + jsonData, err := json.Marshal(values) + if err != nil { + warning(fmt.Sprintf("unable to marshal tenderly request data: %v", err)) + return errResult + } + request, err := http.NewRequestWithContext( + ctx, + "POST", + fmt.Sprintf("https://api.tenderly.co/api/v1/account/%s/project/%s/simulate", cfg.TenderlyAccountName, cfg.TenderlyProjectName), + bytes.NewBuffer(jsonData), + ) + if err != nil { + warning(fmt.Sprintf("unable to create tenderly request: %v", err)) + return errResult + } + request.Header.Set("X-Access-Key", cfg.TenderlyKey) + request.Header.Set("Content-Type", "application/json") + client := &http.Client{} + response, err := client.Do(request) + if err != nil { + warning(fmt.Sprintf("could not run tenderly simulation: %v", err)) + return errResult + } + defer response.Body.Close() + body, err := io.ReadAll(response.Body) + if err != nil { + warning(fmt.Sprintf("unable to read response body from tenderly response: %v", err)) + return errResult + } + var responseJSON = &TenderlyAPIResponse{} + err = json.Unmarshal(body, responseJSON) + if err != nil { + warning(fmt.Sprintf("unable to unmarshal tenderly response: %v", err)) + return errResult + } + if responseJSON.Simulation.Id == "" { + warning("unable to simulate tenderly tx") + return errResult + } + return common.TenderlySimLink(responseJSON.Simulation.Id) +} diff --git a/core/scripts/chaincli/handler/handler.go b/core/scripts/chaincli/handler/handler.go new file mode 100644 index 00000000..51423821 --- /dev/null +++ b/core/scripts/chaincli/handler/handler.go @@ -0,0 +1,642 @@ +package handler + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math/big" + "net/http" + "net/url" + "os" + "regexp" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + link "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + bigmath "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +const ( + defaultPluginNodeLogin = "notreal@fakeemail.ch" + defaultPluginNodePassword = "fj293fbBnlQ!f9vNs~#" + ethKeysEndpoint = "/v2/keys/eth" + ocr2KeysEndpoint = "/v2/keys/ocr2" + p2pKeysEndpoint = "/v2/keys/p2p" + csaKeysEndpoint = "/v2/keys/csa" +) + +const ( + nodeTOML = `[Log] +JSONConsole = true +Level = 'debug' +[WebServer] +AllowOrigins = '*' +SecureCookies = false +SessionTimeout = '999h0m0s' +[WebServer.TLS] +HTTPSPort = 0 +[Feature] +LogPoller = true +[OCR2] +Enabled = true + +[Keeper] +TurnLookBack = 0 +[[EVM]] +ChainID = '%d' +[[EVM.Nodes]] +Name = 'node-0' +WSURL = '%s' +HTTPURL = '%s' +` + secretTOML = ` +[Mercury.Credentials.cred1] +URL = '%s' +Username = '%s' +Password = '%s' +` +) + +// baseHandler is the common handler with a common logic +type baseHandler struct { + cfg *config.Config + + rpcClient *rpc.Client + client *ethclient.Client + privateKey *ecdsa.PrivateKey + linkToken *link.LinkToken + fromAddr common.Address + approveAmount *big.Int +} + +// NewBaseHandler is the constructor of baseHandler +func NewBaseHandler(cfg *config.Config) *baseHandler { + // Created a client by the given node address + rpcClient, err := rpc.Dial(cfg.NodeURL) + if err != nil { + log.Fatal("failed to deal with ETH node: ", err) + } + nodeClient := ethclient.NewClient(rpcClient) + + // Parse private key + var fromAddr common.Address + var privateKey *ecdsa.PrivateKey + if cfg.PrivateKey != "" { + d := new(big.Int).SetBytes(common.FromHex(cfg.PrivateKey)) + pkX, pkY := crypto.S256().ScalarBaseMult(d.Bytes()) + privateKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: crypto.S256(), + X: pkX, + Y: pkY, + }, + D: d, + } + + // Init from address + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + log.Fatal("error casting public key to ECDSA") + } + fromAddr = crypto.PubkeyToAddress(*publicKeyECDSA) + } else { + log.Println("WARNING: no PRIVATE_KEY set: cannot use commands that deploy contracts or send transactions") + } + + // Create link token wrapper + linkToken, err := link.NewLinkToken(common.HexToAddress(cfg.LinkTokenAddr), nodeClient) + if err != nil { + log.Fatal(err) + } + + approveAmount := big.NewInt(0) + approveAmount.SetString(cfg.ApproveAmount, 10) + + return &baseHandler{ + cfg: cfg, + client: nodeClient, + rpcClient: rpcClient, + privateKey: privateKey, + linkToken: linkToken, + fromAddr: fromAddr, + approveAmount: approveAmount, + } +} + +func (h *baseHandler) buildTxOpts(ctx context.Context) *bind.TransactOpts { + nonce, err := h.client.PendingNonceAt(ctx, h.fromAddr) + if err != nil { + log.Fatal("PendingNonceAt failed: ", err) + } + + gasPrice, err := h.client.SuggestGasPrice(ctx) + if err != nil { + log.Fatal("SuggestGasPrice failed: ", err) + } + + gasPrice = bigmath.Add(gasPrice, bigmath.Div(gasPrice, big.NewInt(5))) // add 20% + + auth, err := bind.NewKeyedTransactorWithChainID(h.privateKey, big.NewInt(h.cfg.ChainID)) + if err != nil { + log.Fatal("NewKeyedTransactorWithChainID failed: ", err) + } + + auth.Nonce = big.NewInt(int64(nonce)) + auth.Value = big.NewInt(0) // in wei + auth.GasLimit = h.cfg.GasLimit // in units + auth.GasPrice = gasPrice + + return auth +} + +// Send eth from prefunded account. +// Amount is number of wei. +func (k *Keeper) sendEth(ctx context.Context, to common.Address, amount *big.Int) error { + txOpts := k.buildTxOpts(ctx) + + tx := ethtypes.NewTx(ðtypes.LegacyTx{ + Nonce: txOpts.Nonce.Uint64(), + To: &to, + Value: amount, + Gas: txOpts.GasLimit, + GasPrice: txOpts.GasPrice, + Data: nil, + }) + signedTx, err := ethtypes.SignTx(tx, ethtypes.NewEIP155Signer(big.NewInt(k.cfg.ChainID)), k.privateKey) + if err != nil { + return fmt.Errorf("failed to sign tx: %w", err) + } + + if err = k.client.SendTransaction(ctx, signedTx); err != nil { + return fmt.Errorf("failed to send tx: %w", err) + } + + if err := k.waitTx(ctx, signedTx); err != nil { + log.Fatalf("Send ETH failed, error is %s", err.Error()) + } + log.Println("Send ETH successfully") + + return nil +} + +func (h *baseHandler) waitDeployment(ctx context.Context, tx *ethtypes.Transaction) { + if _, err := bind.WaitDeployed(ctx, h.client, tx); err != nil { + log.Fatal("WaitDeployed failed: ", err, " ", helpers.ExplorerLink(h.cfg.ChainID, tx.Hash())) + } +} + +func (h *baseHandler) waitTx(ctx context.Context, tx *ethtypes.Transaction) error { + receipt, err := bind.WaitMined(ctx, h.client, tx) + if err != nil { + log.Println("WaitTx failed: ", err) + return err + } + + if receipt.Status == ethtypes.ReceiptStatusFailed { + log.Println("Transaction failed: ", helpers.ExplorerLink(h.cfg.ChainID, tx.Hash())) + return errors.New("Transaction failed") + } + + return nil +} + +func (h *baseHandler) launchPluginNode(ctx context.Context, port int, containerName string, extraTOML string, force bool) (string, func(bool), error) { + // Create docker client to launch nodes + dockerClient, err := client.NewClientWithOpts(client.WithAPIVersionNegotiation()) + if err != nil { + return "", nil, fmt.Errorf("failed to create docker client from env: %w", err) + } + + // Make sure everything works well + if _, err = dockerClient.Ping(ctx); err != nil { + return "", nil, fmt.Errorf("failed to ping docker server: %w", err) + } + + // Pull DB image if needed + var out io.ReadCloser + if _, _, err = dockerClient.ImageInspectWithRaw(ctx, h.cfg.PostgresDockerImage); err != nil { + log.Println("Pulling Postgres docker image...") + if out, err = dockerClient.ImagePull(ctx, h.cfg.PostgresDockerImage, types.ImagePullOptions{}); err != nil { + return "", nil, fmt.Errorf("failed to pull Postgres image: %w", err) + } + out.Close() + log.Println("Postgres docker image successfully pulled!") + } + + // Create network config + const networkName = "chaincli-local" + existingNetworks, err := dockerClient.NetworkList(ctx, types.NetworkListOptions{}) + if err != nil { + return "", nil, fmt.Errorf("failed to list networks: %w", err) + } + + var found bool + for _, ntwrk := range existingNetworks { + if ntwrk.Name == networkName { + found = true + break + } + } + + if !found { + if _, err = dockerClient.NetworkCreate(ctx, networkName, types.NetworkCreate{}); err != nil { + return "", nil, fmt.Errorf("failed to create network: %w", err) + } + } + + postgresContainerName := fmt.Sprintf("%s-postgres", containerName) + + // If force flag is on, we check and remove containers with the same name before creating new ones + if force { + if err = checkAndRemoveContainer(ctx, dockerClient, postgresContainerName); err != nil { + return "", nil, fmt.Errorf("failed to remove container: %w", err) + } + } + + // Create DB container + dbContainerResp, err := dockerClient.ContainerCreate(ctx, &container.Config{ + Image: h.cfg.PostgresDockerImage, + Cmd: []string{"postgres", "-c", `max_connections=1000`}, + Env: []string{ + "POSTGRES_USER=postgres", + "POSTGRES_PASSWORD=verylongdatabasepassword", + }, + ExposedPorts: nat.PortSet{"5432": struct{}{}}, + }, nil, &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + networkName: {Aliases: []string{postgresContainerName}}, + }, + }, nil, postgresContainerName) + if err != nil { + return "", nil, fmt.Errorf("failed to create Postgres container, use --force=true to force removing existing containers: %w", err) + } + + // Start container + if err = dockerClient.ContainerStart(ctx, dbContainerResp.ID, types.ContainerStartOptions{}); err != nil { + return "", nil, fmt.Errorf("failed to start DB container: %w", err) + } + log.Println("Postgres docker container successfully created and started: ", dbContainerResp.ID) + + time.Sleep(time.Second * 10) + + // If force flag is on, we check and remove containers with the same name before creating new ones + if force { + if err = checkAndRemoveContainer(ctx, dockerClient, containerName); err != nil { + return "", nil, fmt.Errorf("failed to remove container: %w", err) + } + } + + // Pull node image if needed + if _, _, err = dockerClient.ImageInspectWithRaw(ctx, h.cfg.PluginDockerImage); err != nil { + log.Println("Pulling node docker image...") + if out, err = dockerClient.ImagePull(ctx, h.cfg.PluginDockerImage, types.ImagePullOptions{}); err != nil { + return "", nil, fmt.Errorf("failed to pull node image: %w", err) + } + out.Close() + log.Println("Node docker image successfully pulled!") + } + + // Create temporary file with plugin node login creds + apiFile, passwordFile, fileCleanup, err := createCredsFiles() + if err != nil { + return "", nil, fmt.Errorf("failed to create creds files: %w", err) + } + + var baseTOML = fmt.Sprintf(nodeTOML, h.cfg.ChainID, h.cfg.NodeURL, h.cfg.NodeHttpURL) + tomlFile, tomlFileCleanup, err := createTomlFile(baseTOML) + if err != nil { + return "", nil, fmt.Errorf("failed to create toml file: %w", err) + } + var secretTOMLStr = fmt.Sprintf(secretTOML, h.cfg.DataStreamsURL, h.cfg.DataStreamsID, h.cfg.DataStreamsKey) + secretFile, secretTOMLFileCleanup, err := createTomlFile(secretTOMLStr) + if err != nil { + return "", nil, fmt.Errorf("failed to create secret toml file: %w", err) + } + // Create container with mounted files + portStr := fmt.Sprintf("%d", port) + nodeContainerResp, err := dockerClient.ContainerCreate(ctx, &container.Config{ + Image: h.cfg.PluginDockerImage, + Cmd: []string{"-s", "/run/secrets/01-secret.toml", "-c", "/run/secrets/01-config.toml", "local", "n", "-a", "/run/secrets/plugin-node-api"}, + Env: []string{ + "CL_CONFIG=" + extraTOML, + "CL_PASSWORD_KEYSTORE=" + defaultPluginNodePassword, + "CL_DATABASE_URL=postgresql://postgres:verylongdatabasepassword@" + postgresContainerName + ":5432/postgres?sslmode=disable", + }, + ExposedPorts: map[nat.Port]struct{}{ + nat.Port(portStr): {}, + }, + }, &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: apiFile, + Target: "/run/secrets/plugin-node-api", + }, + { + Type: mount.TypeBind, + Source: passwordFile, + Target: "/run/secrets/plugin-node-password", + }, + { + Type: mount.TypeBind, + Source: tomlFile, + Target: "/run/secrets/01-config.toml", + }, + { + Type: mount.TypeBind, + Source: secretFile, + Target: "/run/secrets/01-secret.toml", + }, + }, + PortBindings: nat.PortMap{ + "6688/tcp": []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: portStr, + }, + }, + }, + }, &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + networkName: {Aliases: []string{containerName}}, + }, + }, nil, containerName) + if err != nil { + return "", nil, fmt.Errorf("failed to create node container, use --force=true to force removing existing containers: %w", err) + } + + // Start container + if err = dockerClient.ContainerStart(ctx, nodeContainerResp.ID, types.ContainerStartOptions{}); err != nil { + return "", nil, fmt.Errorf("failed to start node container: %w", err) + } + + addr := fmt.Sprintf("http://localhost:%s", portStr) + log.Println("Node docker container successfully created and started: ", nodeContainerResp.ID, addr) + + if err = waitForNodeReady(ctx, addr); err != nil { + log.Fatal(err, nodeContainerResp.ID) + } + log.Println("Node ready: ", nodeContainerResp.ID) + + return addr, func(writeLogs bool) { + fileCleanup() + tomlFileCleanup() + secretTOMLFileCleanup() + + if writeLogs { + var rdr io.ReadCloser + rdr, err2 := dockerClient.ContainerLogs(ctx, nodeContainerResp.ID, types.ContainerLogsOptions{ + ShowStderr: true, + Timestamps: true, + }) + if err2 != nil { + rdr.Close() + log.Fatal("Failed to collect logs from container: ", err2) + } + + stdErr, err2 := os.OpenFile(fmt.Sprintf("./%s-stderr.log", nodeContainerResp.ID), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err2 != nil { + rdr.Close() + stdErr.Close() + log.Fatal("Failed to open file: ", err2) + } + + if _, err2 := stdcopy.StdCopy(io.Discard, stdErr, rdr); err2 != nil { + rdr.Close() + stdErr.Close() + log.Fatal("Failed to write logs to file: ", err2) + } + + rdr.Close() + stdErr.Close() + } + + if err2 := dockerClient.ContainerStop(ctx, nodeContainerResp.ID, container.StopOptions{}); err2 != nil { + log.Fatal("Failed to stop node container: ", err2) + } + if err2 := dockerClient.ContainerRemove(ctx, nodeContainerResp.ID, types.ContainerRemoveOptions{}); err2 != nil { + log.Fatal("Failed to remove node container: ", err2) + } + + if err2 := dockerClient.ContainerStop(ctx, dbContainerResp.ID, container.StopOptions{}); err2 != nil { + log.Fatal("Failed to stop DB container: ", err2) + } + if err2 := dockerClient.ContainerRemove(ctx, dbContainerResp.ID, types.ContainerRemoveOptions{}); err2 != nil { + log.Fatal("Failed to remove DB container: ", err2) + } + }, nil +} + +func checkAndRemoveContainer(ctx context.Context, dockerClient *client.Client, containerName string) error { + opts := types.ContainerListOptions{ + Filters: filters.NewArgs(filters.Arg("name", "^/"+regexp.QuoteMeta(containerName)+"$")), + } + + containers, err := dockerClient.ContainerList(ctx, opts) + if err != nil { + return fmt.Errorf("failed to list containers: %w", err) + } + + if len(containers) > 1 { + log.Fatal("more than two containers with the same name should not happen") + } else if len(containers) > 0 { + if err := dockerClient.ContainerRemove(ctx, containers[0].ID, types.ContainerRemoveOptions{ + Force: true, + }); err != nil { + return fmt.Errorf("failed to remove existing container: %w", err) + } + log.Println("successfully removed an existing container with name: ", containerName) + } + + return nil +} + +func waitForNodeReady(ctx context.Context, addr string) error { + client := &http.Client{} + defer client.CloseIdleConnections() + const timeout = 120 + startTime := time.Now().Unix() + for { + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/health", addr), nil) + if err != nil { + return err + } + req.Close = true + resp, err := client.Do(req) + if err == nil { + resp.Body.Close() + if resp.StatusCode == http.StatusOK { + return nil + } + } + if time.Now().Unix()-startTime > int64(timeout*time.Second) { + return fmt.Errorf("timed out waiting for node to start, waited %d seconds", timeout) + } + time.Sleep(time.Second * 5) + } +} + +// authenticate creates a http client with URL, email and password +func authenticate(ctx context.Context, urlStr, email, password string, lggr logger.Logger) (cmd.HTTPClient, error) { + remoteNodeURL, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + c := cmd.ClientOpts{RemoteNodeURL: *remoteNodeURL} + sr := sessions.SessionRequest{Email: email, Password: password} + store := &cmd.MemoryCookieStore{} + + tca := cmd.NewSessionCookieAuthenticator(c, store, lggr) + if _, err = tca.Authenticate(ctx, sr); err != nil { + log.Println("failed to authenticate: ", err) + return nil, err + } + + return cmd.NewAuthenticatedHTTPClient(lggr, c, tca, sr), nil +} + +func nodeRequest(ctx context.Context, client cmd.HTTPClient, path string) ([]byte, error) { + resp, err := client.Get(ctx, path) + if err != nil { + return []byte{}, fmt.Errorf("GET error from client: %w", err) + } + defer resp.Body.Close() + + raw, err := io.ReadAll(resp.Body) + if err != nil { + return []byte{}, fmt.Errorf("failed to read response body: %w", err) + } + + type errorDetail struct { + Detail string `json:"detail"` + } + + type errorResp struct { + Errors []errorDetail `json:"errors"` + } + + var errs errorResp + if err := json.Unmarshal(raw, &errs); err == nil && len(errs.Errors) > 0 { + return []byte{}, fmt.Errorf("error returned from api: %s", errs.Errors[0].Detail) + } + + return raw, nil +} + +// getNodeAddress returns plugin node's wallet address +func getNodeAddress(ctx context.Context, client cmd.HTTPClient) (string, error) { + resp, err := nodeRequest(ctx, client, ethKeysEndpoint) + if err != nil { + return "", fmt.Errorf("failed to get ETH keys: %w", err) + } + + var keys cmd.EthKeyPresenters + if err = jsonapi.Unmarshal(resp, &keys); err != nil { + return "", fmt.Errorf("failed to unmarshal response body: %w", err) + } + + return keys[0].Address, nil +} + +// getNodeOCR2Config returns plugin node's OCR2 bundle key ID +func getNodeOCR2Config(ctx context.Context, client cmd.HTTPClient) (*cmd.OCR2KeyBundlePresenter, error) { + resp, err := nodeRequest(ctx, client, ocr2KeysEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to get OCR2 keys: %w", err) + } + + var keys cmd.OCR2KeyBundlePresenters + if err = jsonapi.Unmarshal(resp, &keys); err != nil { + return nil, fmt.Errorf("failed to unmarshal response body: %w", err) + } + + var evmKey cmd.OCR2KeyBundlePresenter + for _, key := range keys { + if key.ChainType == string(chaintype.EVM) { + evmKey = key + break + } + } + + return &evmKey, nil +} + +// getP2PKeyID returns plugin node's P2P key ID +func getP2PKeyID(ctx context.Context, client cmd.HTTPClient) (string, error) { + resp, err := nodeRequest(ctx, client, p2pKeysEndpoint) + if err != nil { + return "", fmt.Errorf("failed to get P2P keys: %w", err) + } + + var keys cmd.P2PKeyPresenters + if err = jsonapi.Unmarshal(resp, &keys); err != nil { + return "", fmt.Errorf("failed to unmarshal response body: %w", err) + } + + return keys[0].ID, nil +} + +// createCredsFiles creates two temporary files with node creds: api and password. +func createCredsFiles() (string, string, func(), error) { + // Create temporary file with plugin node login creds + apiFile, err := os.CreateTemp("", "plugin-node-api") + if err != nil { + return "", "", nil, fmt.Errorf("failed to create api file: %w", err) + } + _, _ = apiFile.WriteString(defaultPluginNodeLogin) + _, _ = apiFile.WriteString("\n") + _, _ = apiFile.WriteString(defaultPluginNodePassword) + + // Create temporary file with plugin node password + passwordFile, err := os.CreateTemp("", "plugin-node-password") + if err != nil { + return "", "", nil, fmt.Errorf("failed to create password file: %w", err) + } + _, _ = passwordFile.WriteString(defaultPluginNodePassword) + + return apiFile.Name(), passwordFile.Name(), func() { + os.RemoveAll(apiFile.Name()) + os.RemoveAll(passwordFile.Name()) + }, nil +} + +// createTomlFile creates temporary file with TOML config +func createTomlFile(tomlString string) (string, func(), error) { + // Create temporary file with plugin node TOML config + tomlFile, err := os.CreateTemp("", "plugin-toml-config") + if err != nil { + return "", nil, fmt.Errorf("failed to create toml file: %w", err) + } + _, _ = tomlFile.WriteString(tomlString) + + return tomlFile.Name(), func() { + os.RemoveAll(tomlFile.Name()) + }, nil +} diff --git a/core/scripts/chaincli/handler/jobs.go b/core/scripts/chaincli/handler/jobs.go new file mode 100644 index 00000000..7f6b3c8f --- /dev/null +++ b/core/scripts/chaincli/handler/jobs.go @@ -0,0 +1,39 @@ +package handler + +import ( + "context" + "log" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func (k *Keeper) CreateJob(ctx context.Context) { + k.createJobs(ctx) +} + +func (k *Keeper) createJobs(ctx context.Context) { + lggr, closeLggr := logger.NewLogger() + logger.Sugared(lggr).ErrorIfFn(closeLggr, "Failed to close logger") + + // Create Keeper Jobs on Nodes for Registry + for i, keeperAddr := range k.cfg.Keepers { + url := k.cfg.KeeperURLs[i] + email := k.cfg.KeeperEmails[i] + if len(email) == 0 { + email = defaultPluginNodeLogin + } + pwd := k.cfg.KeeperPasswords[i] + if len(pwd) == 0 { + pwd = defaultPluginNodePassword + } + + cl, err := authenticate(ctx, url, email, pwd, lggr) + if err != nil { + log.Fatal(err) + } + + if err = k.createKeeperJob(ctx, cl, k.cfg.RegistryAddress, keeperAddr); err != nil { + log.Fatal(err) + } + } +} diff --git a/core/scripts/chaincli/handler/keeper.go b/core/scripts/chaincli/handler/keeper.go new file mode 100644 index 00000000..59453258 --- /dev/null +++ b/core/scripts/chaincli/handler/keeper.go @@ -0,0 +1,813 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "log" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/umbracle/ethgo/abi" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + automationForwarderLogic "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_forwarder_logic" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + registrylogic20 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic2_0" + registrylogica21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1" + registrylogicb21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1" + registry11 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry12 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry20 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + registry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/streams_lookup_upkeep_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_counter_wrapper" + upkeep "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/verifiable_load_streams_lookup_upkeep_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/verifiable_load_upkeep_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +// Keeper is the keepers commands handler +type Keeper struct { + *baseHandler + + addFundsAmount *big.Int +} + +// NewKeeper creates new instance of Keeper +func NewKeeper(cfg *config.Config) *Keeper { + addFundsAmount := big.NewInt(0) + addFundsAmount.SetString(cfg.AddFundsAmount, 10) + + return &Keeper{ + baseHandler: NewBaseHandler(cfg), + addFundsAmount: addFundsAmount, + } +} + +// DeployKeepers contains a logic to deploy keepers. +func (k *Keeper) DeployKeepers(ctx context.Context) { + lggr, closeLggr := logger.NewLogger() + logger.Sugared(lggr).ErrorIfFn(closeLggr, "Failed to close logger") + + keepers, owners := k.keepers() + upkeepCount, registryAddr, deployer := k.prepareRegistry(ctx) + + // Create Keeper Jobs on Nodes for Registry + cls := make([]cmd.HTTPClient, len(k.cfg.Keepers)) + for i, keeperAddr := range k.cfg.Keepers { + url := k.cfg.KeeperURLs[i] + email := k.cfg.KeeperEmails[i] + if len(email) == 0 { + email = defaultPluginNodeLogin + } + pwd := k.cfg.KeeperPasswords[i] + if len(pwd) == 0 { + pwd = defaultPluginNodePassword + } + + cl, err := authenticate(ctx, url, email, pwd, lggr) + if err != nil { + log.Fatal(err) + } + cls[i] = cl + + if err = k.createKeeperJob(ctx, cl, k.cfg.RegistryAddress, keeperAddr); err != nil { + log.Fatal(err) + } + } + + // Approve keeper registry + k.approveFunds(ctx, registryAddr) + + // Deploy Upkeeps + k.deployUpkeeps(ctx, registryAddr, deployer, upkeepCount) + + // Set Keepers on the registry + k.setKeepers(ctx, cls, deployer, keepers, owners) +} + +// DeployRegistry deploys a new keeper registry. +func (k *Keeper) DeployRegistry(ctx context.Context, verify bool) { + if verify { + if k.cfg.RegistryVersion != keeper.RegistryVersion_2_1 && k.cfg.RegistryVersion != keeper.RegistryVersion_2_0 { + log.Fatal("keeper registry verification is only supported for version 2.0 and 2.1") + } + if k.cfg.ExplorerAPIKey == "" || k.cfg.ExplorerAPIKey == "" || k.cfg.NetworkName == "" || k.cfg.NetworkName == "" { + log.Fatal("please set your explore API key and network name in the .env file to verify the registry contract") + } + + // Get the current working directory + currentDir, err := os.Getwd() + if err != nil { + log.Fatal("failed to get current working directory: %w", err) + } + + // Check if it is the root directory of chaincli + if !strings.HasSuffix(currentDir, "core/scripts/chaincli") { + log.Fatal("please run the command from the root directory of chaincli to verify the registry") + } + } + + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + k.deployRegistry11(ctx) + case keeper.RegistryVersion_1_2: + k.deployRegistry12(ctx) + case keeper.RegistryVersion_2_0: + k.deployRegistry20(ctx, verify) + case keeper.RegistryVersion_2_1: + k.deployRegistry21(ctx, verify) + default: + panic("unsupported registry version") + } +} + +func (k *Keeper) prepareRegistry(ctx context.Context) (int64, common.Address, keepersDeployer) { + var upkeepCount int64 + var registryAddr common.Address + var deployer keepersDeployer + var keeperRegistry11 *registry11.KeeperRegistry + var keeperRegistry12 *registry12.KeeperRegistry + var keeperRegistry20 *registry20.KeeperRegistry + var keeperRegistry21 *iregistry21.IKeeperRegistryMaster + if k.cfg.RegistryAddress != "" { + callOpts := bind.CallOpts{ + From: k.fromAddr, + Context: ctx, + } + + // Get existing keeper registry + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + registryAddr, keeperRegistry11 = k.getRegistry11(ctx) + count, err := keeperRegistry11.GetUpkeepCount(&callOpts) + if err != nil { + log.Fatal(registryAddr.Hex(), ": UpkeepCount failed - ", err) + } + upkeepCount = count.Int64() + deployer = &v11KeeperDeployer{keeperRegistry11} + case keeper.RegistryVersion_1_2: + registryAddr, keeperRegistry12 = k.getRegistry12(ctx) + state, err := keeperRegistry12.GetState(&callOpts) + if err != nil { + log.Fatal(registryAddr.Hex(), ": failed to getState - ", err) + } + upkeepCount = state.State.NumUpkeeps.Int64() + deployer = &v12KeeperDeployer{keeperRegistry12} + case keeper.RegistryVersion_2_0: + registryAddr, keeperRegistry20 = k.getRegistry20(ctx) + state, err := keeperRegistry20.GetState(&callOpts) + if err != nil { + log.Fatal(registryAddr.Hex(), ": failed to getState - ", err) + } + upkeepCount = state.State.NumUpkeeps.Int64() + deployer = &v20KeeperDeployer{KeeperRegistryInterface: keeperRegistry20, cfg: k.cfg} + case keeper.RegistryVersion_2_1: + registryAddr, keeperRegistry21 = k.getRegistry21(ctx) + state, err := keeperRegistry21.GetState(&callOpts) + if err != nil { + log.Fatal(registryAddr.Hex(), ": failed to getState - ", err) + } + upkeepCount = state.State.NumUpkeeps.Int64() + deployer = &v21KeeperDeployer{IKeeperRegistryMasterInterface: keeperRegistry21, cfg: k.cfg} + default: + panic(fmt.Errorf("version %s is not supported", k.cfg.RegistryVersion)) + } + } else { + // Deploy keeper registry + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + registryAddr, keeperRegistry11 = k.deployRegistry11(ctx) + deployer = &v11KeeperDeployer{keeperRegistry11} + case keeper.RegistryVersion_1_2: + registryAddr, keeperRegistry12 = k.deployRegistry12(ctx) + deployer = &v12KeeperDeployer{keeperRegistry12} + case keeper.RegistryVersion_2_0: + registryAddr, keeperRegistry20 = k.deployRegistry20(ctx, true) + deployer = &v20KeeperDeployer{KeeperRegistryInterface: keeperRegistry20, cfg: k.cfg} + case keeper.RegistryVersion_2_1: + registryAddr, keeperRegistry21 = k.deployRegistry21(ctx, false) + deployer = &v21KeeperDeployer{IKeeperRegistryMasterInterface: keeperRegistry21, cfg: k.cfg} + default: + panic(fmt.Errorf("version %s is not supported", k.cfg.RegistryVersion)) + } + } + + return upkeepCount, registryAddr, deployer +} + +func (k *Keeper) approveFunds(ctx context.Context, registryAddr common.Address) { + if k.approveAmount.Cmp(big.NewInt(0)) == 0 { + return + } + // Approve keeper registry + approveRegistryTx, err := k.linkToken.Approve(k.buildTxOpts(ctx), registryAddr, k.approveAmount) + if err != nil { + log.Fatal(registryAddr.Hex(), ": Approve failed - ", err) + } + + if err := k.waitTx(ctx, approveRegistryTx); err != nil { + log.Fatalf("KeeperRegistry ApproveFunds failed for registryAddr: %s, and approveAmount: %s, error is: %s", k.cfg.RegistryAddress, k.approveAmount, err.Error()) + } + + log.Println(registryAddr.Hex(), ": KeeperRegistry approved - ", helpers.ExplorerLink(k.cfg.ChainID, approveRegistryTx.Hash())) +} + +func (k *Keeper) VerifyContract(params ...string) { + // Change to the contracts directory where the hardhat.config.ts file is located + if err := k.changeToContractsDirectory(); err != nil { + log.Fatalf("failed to change to directory where the hardhat.config.ts file is located: %v", err) + } + + // Append the address and params to the commandArgs slice + commandArgs := append([]string{}, params...) + + // Format the command string with the commandArgs + command := fmt.Sprintf( + "NODE_HTTP_URL='%s' EXPLORER_API_KEY='%s' NETWORK_NAME='%s' pnpm hardhat verify --network env %s", + k.cfg.NodeHttpURL, + k.cfg.ExplorerAPIKey, + k.cfg.NetworkName, + strings.Join(commandArgs, " "), + ) + + fmt.Println("Running command to verify contract: ", command) + if err := k.runCommand(command); err != nil { + log.Println("Contract verification on Explorer failed: ", err) + + } +} + +// deployRegistry21 deploys a version 2.1 keeper registry +func (k *Keeper) deployRegistry21(ctx context.Context, verify bool) (common.Address, *iregistry21.IKeeperRegistryMaster) { + automationForwarderLogicAddr, tx, _, err := automationForwarderLogic.DeployAutomationForwarderLogic(k.buildTxOpts(ctx), k.client) + if err != nil { + log.Fatal("Deploy AutomationForwarderLogic failed: ", err) + } + k.waitDeployment(ctx, tx) + log.Println("AutomationForwarderLogic deployed:", automationForwarderLogicAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, tx.Hash())) + + registryLogicBAddr, tx, _, err := registrylogicb21.DeployKeeperRegistryLogicB( + k.buildTxOpts(ctx), + k.client, + k.cfg.Mode, + common.HexToAddress(k.cfg.LinkTokenAddr), + common.HexToAddress(k.cfg.LinkETHFeedAddr), + common.HexToAddress(k.cfg.FastGasFeedAddr), + automationForwarderLogicAddr, + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, tx) + log.Println("KeeperRegistry LogicB 2.1 deployed:", registryLogicBAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, tx.Hash())) + + // verify KeeperRegistryLogicB + if verify { + k.VerifyContract(registryLogicBAddr.String(), "0", k.cfg.LinkTokenAddr, k.cfg.LinkETHFeedAddr, k.cfg.FastGasFeedAddr) + log.Println("KeeperRegistry LogicB 2.1 verified successfully") + } + + registryLogicAAddr, tx, _, err := registrylogica21.DeployKeeperRegistryLogicA( + k.buildTxOpts(ctx), + k.client, + registryLogicBAddr, + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, tx) + log.Println("KeeperRegistry LogicA 2.1 deployed:", registryLogicAAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, tx.Hash())) + + // verify KeeperRegistryLogicA + if verify { + k.VerifyContract(registryLogicAAddr.String(), registryLogicBAddr.String()) + log.Println("KeeperRegistry LogicA 2.1 verified successfully") + } + + registryAddr, deployKeeperRegistryTx, _, err := registry21.DeployKeeperRegistry( + k.buildTxOpts(ctx), + k.client, + registryLogicAAddr, + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, deployKeeperRegistryTx) + log.Println("KeeperRegistry 2.1 deployed:", registryAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, deployKeeperRegistryTx.Hash())) + + registryInstance, err := iregistry21.NewIKeeperRegistryMaster(registryAddr, k.client) + if err != nil { + log.Fatal("Failed to attach to deployed contract: ", err) + } + + // verify KeeperRegistry + if verify { + k.VerifyContract(registryAddr.String(), registryLogicAAddr.String()) + log.Println("KeeperRegistry 2.1 verified successfully") + } + + return registryAddr, registryInstance +} + +// deployRegistry20 deploys a version 2.0 keeper registry +func (k *Keeper) deployRegistry20(ctx context.Context, verify bool) (common.Address, *registry20.KeeperRegistry) { + registryLogicAddr, deployKeeperRegistryLogicTx, _, err := registrylogic20.DeployKeeperRegistryLogic( + k.buildTxOpts(ctx), + k.client, + k.cfg.Mode, + common.HexToAddress(k.cfg.LinkTokenAddr), + common.HexToAddress(k.cfg.LinkETHFeedAddr), + common.HexToAddress(k.cfg.FastGasFeedAddr), + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, deployKeeperRegistryLogicTx) + log.Println("KeeperRegistry2.0 Logic deployed:", registryLogicAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, deployKeeperRegistryLogicTx.Hash())) + + // verify KeeperRegistryLogic + if verify { + k.VerifyContract(registryLogicAddr.String(), "0", k.cfg.LinkTokenAddr, k.cfg.LinkETHFeedAddr, k.cfg.FastGasFeedAddr) + log.Println("KeeperRegistry Logic 2.0 verified successfully") + } + + registryAddr, deployKeeperRegistryTx, registryInstance, err := registry20.DeployKeeperRegistry( + k.buildTxOpts(ctx), + k.client, + registryLogicAddr, + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, deployKeeperRegistryTx) + log.Println("KeeperRegistry2.0 deployed:", registryAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, deployKeeperRegistryTx.Hash())) + + // verify KeeperRegistry + if verify { + k.VerifyContract(registryAddr.String(), registryLogicAddr.String()) + log.Println("KeeperRegistry 2.0 verified successfully") + } + + return registryAddr, registryInstance +} + +// deployRegistry12 deploys a version 1.2 keeper registry +func (k *Keeper) deployRegistry12(ctx context.Context) (common.Address, *registry12.KeeperRegistry) { + registryAddr, deployKeeperRegistryTx, registryInstance, err := registry12.DeployKeeperRegistry( + k.buildTxOpts(ctx), + k.client, + common.HexToAddress(k.cfg.LinkTokenAddr), + common.HexToAddress(k.cfg.LinkETHFeedAddr), + common.HexToAddress(k.cfg.FastGasFeedAddr), + *k.getConfigForRegistry12(), + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, deployKeeperRegistryTx) + log.Println("KeeperRegistry1.2 deployed:", registryAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, deployKeeperRegistryTx.Hash())) + return registryAddr, registryInstance +} + +// deployRegistry11 deploys a version 1.1 keeper registry +func (k *Keeper) deployRegistry11(ctx context.Context) (common.Address, *registry11.KeeperRegistry) { + registryAddr, deployKeeperRegistryTx, registryInstance, err := registry11.DeployKeeperRegistry(k.buildTxOpts(ctx), k.client, + common.HexToAddress(k.cfg.LinkTokenAddr), + common.HexToAddress(k.cfg.LinkETHFeedAddr), + common.HexToAddress(k.cfg.FastGasFeedAddr), + k.cfg.PaymentPremiumPBB, + k.cfg.FlatFeeMicroLink, + big.NewInt(k.cfg.BlockCountPerTurn), + k.cfg.CheckGasLimit, + big.NewInt(k.cfg.StalenessSeconds), + k.cfg.GasCeilingMultiplier, + big.NewInt(k.cfg.FallbackGasPrice), + big.NewInt(k.cfg.FallbackLinkPrice), + ) + if err != nil { + log.Fatal("DeployAbi failed: ", err) + } + k.waitDeployment(ctx, deployKeeperRegistryTx) + log.Println("KeeperRegistry1.1 deployed:", registryAddr.Hex(), "-", helpers.ExplorerLink(k.cfg.ChainID, deployKeeperRegistryTx.Hash())) + return registryAddr, registryInstance +} + +// UpdateRegistry attaches to an existing registry and possibly updates registry config +func (k *Keeper) UpdateRegistry(ctx context.Context) { + var registryAddr common.Address + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + registryAddr, _ = k.getRegistry11(ctx) + case keeper.RegistryVersion_1_2: + registryAddr, _ = k.getRegistry12(ctx) + case keeper.RegistryVersion_2_0: + registryAddr, _ = k.getRegistry20(ctx) + case keeper.RegistryVersion_2_1: + registryAddr, _ = k.getRegistry21(ctx) + default: + panic("unexpected registry address") + } + log.Println("KeeperRegistry at:", registryAddr) +} + +// getRegistry20 attaches to an existing 2.0 registry and possibly updates registry config +func (k *Keeper) getRegistry20(ctx context.Context) (common.Address, *registry20.KeeperRegistry) { + registryAddr := common.HexToAddress(k.cfg.RegistryAddress) + keeperRegistry20, err := registry20.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + if k.cfg.RegistryConfigUpdate { + panic("KeeperRegistry2.0 could not be updated") + } + log.Println("KeeperRegistry2.0 config not updated: KEEPER_CONFIG_UPDATE=false") + return registryAddr, keeperRegistry20 +} + +// getRegistry21 attaches to an existing 2.1 registry and possibly updates registry config +func (k *Keeper) getRegistry21(ctx context.Context) (common.Address, *iregistry21.IKeeperRegistryMaster) { + registryAddr := common.HexToAddress(k.cfg.RegistryAddress) + keeperRegistry21, err := iregistry21.NewIKeeperRegistryMaster( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + if k.cfg.RegistryConfigUpdate { + panic("KeeperRegistry2.1 could not be updated") + } + log.Println("KeeperRegistry2.1 config not updated: KEEPER_CONFIG_UPDATE=false") + return registryAddr, keeperRegistry21 +} + +// getRegistry12 attaches to an existing 1.2 registry and possibly updates registry config +func (k *Keeper) getRegistry12(ctx context.Context) (common.Address, *registry12.KeeperRegistry) { + registryAddr := common.HexToAddress(k.cfg.RegistryAddress) + keeperRegistry12, err := registry12.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + if k.cfg.RegistryConfigUpdate { + transaction, err := keeperRegistry12.SetConfig(k.buildTxOpts(ctx), *k.getConfigForRegistry12()) + if err != nil { + log.Fatal("Registry config update: ", err) + } + + if err := k.waitTx(ctx, transaction); err != nil { + log.Fatalf("KeeperRegistry config update failed on registry address: %s, error is: %s", k.cfg.RegistryAddress, err.Error()) + } + log.Println("KeeperRegistry config update:", k.cfg.RegistryAddress, "-", helpers.ExplorerLink(k.cfg.ChainID, transaction.Hash())) + } + log.Println("KeeperRegistry config not updated: KEEPER_CONFIG_UPDATE=false") + return registryAddr, keeperRegistry12 +} + +// getRegistry11 attaches to an existing 1.1 registry and possibly updates registry config +func (k *Keeper) getRegistry11(ctx context.Context) (common.Address, *registry11.KeeperRegistry) { + registryAddr := common.HexToAddress(k.cfg.RegistryAddress) + keeperRegistry11, err := registry11.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + if k.cfg.RegistryConfigUpdate { + transaction, err := keeperRegistry11.SetConfig(k.buildTxOpts(ctx), + k.cfg.PaymentPremiumPBB, + k.cfg.FlatFeeMicroLink, + big.NewInt(k.cfg.BlockCountPerTurn), + k.cfg.CheckGasLimit, + big.NewInt(k.cfg.StalenessSeconds), + k.cfg.GasCeilingMultiplier, + big.NewInt(k.cfg.FallbackGasPrice), + big.NewInt(k.cfg.FallbackLinkPrice)) + if err != nil { + log.Fatal("Registry config update: ", err) + } + + if err := k.waitTx(ctx, transaction); err != nil { + log.Fatalf("KeeperRegistry config update failed on registry address: %s, error is %s", k.cfg.RegistryAddress, err.Error()) + } + log.Println("KeeperRegistry config update:", k.cfg.RegistryAddress, "-", helpers.ExplorerLink(k.cfg.ChainID, transaction.Hash())) + } + log.Println("KeeperRegistry config not updated: KEEPER_CONFIG_UPDATE=false") + return registryAddr, keeperRegistry11 +} + +// deployUpkeeps deploys upkeeps and funds upkeeps +func (k *Keeper) deployUpkeeps(ctx context.Context, registryAddr common.Address, deployer upkeepDeployer, existingCount int64) { + fmt.Println() + log.Println("Deploying upkeeps...") + var upkeepAddrs []common.Address + for i := existingCount; i < k.cfg.UpkeepCount+existingCount; i++ { + fmt.Println() + // Deploy + var upkeepAddr common.Address + var deployUpkeepTx *types.Transaction + var registerUpkeepTx *types.Transaction + var logUpkeepCounter *log_upkeep_counter_wrapper.LogUpkeepCounter + var checkData []byte + + switch k.cfg.UpkeepType { + case config.Conditional: + checkData = []byte(k.cfg.UpkeepCheckData) + var err error + if k.cfg.UpkeepAverageEligibilityCadence > 0 { + upkeepAddr, deployUpkeepTx, _, err = upkeep.DeployUpkeepPerformCounterRestrictive( + k.buildTxOpts(ctx), + k.client, + big.NewInt(k.cfg.UpkeepTestRange), + big.NewInt(k.cfg.UpkeepAverageEligibilityCadence), + ) + } else if k.cfg.VerifiableLoadTest { + upkeepAddr, deployUpkeepTx, _, err = verifiable_load_upkeep_wrapper.DeployVerifiableLoadUpkeep( + k.buildTxOpts(ctx), + k.client, + common.HexToAddress(k.cfg.Registrar), + k.cfg.UseArbBlockNumber, + ) + } else { + upkeepAddr, deployUpkeepTx, _, err = upkeep_counter_wrapper.DeployUpkeepCounter( + k.buildTxOpts(ctx), + k.client, + big.NewInt(k.cfg.UpkeepTestRange), + big.NewInt(k.cfg.UpkeepInterval), + ) + } + if err != nil { + log.Fatal(i, ": Deploy Upkeep failed - ", err) + } + k.waitDeployment(ctx, deployUpkeepTx) + log.Println(i, upkeepAddr.Hex(), ": Upkeep deployed - ", helpers.ExplorerLink(k.cfg.ChainID, deployUpkeepTx.Hash())) + registerUpkeepTx, err = deployer.RegisterUpkeep(k.buildTxOpts(ctx), + upkeepAddr, k.cfg.UpkeepGasLimit, k.fromAddr, checkData, []byte{}, + ) + if err != nil { + log.Fatal(i, upkeepAddr.Hex(), ": RegisterUpkeep failed - ", err) + } + case config.Mercury: + checkData = []byte(k.cfg.UpkeepCheckData) + var err error + if k.cfg.VerifiableLoadTest { + upkeepAddr, deployUpkeepTx, _, err = verifiable_load_streams_lookup_upkeep_wrapper.DeployVerifiableLoadStreamsLookupUpkeep( + k.buildTxOpts(ctx), + k.client, + common.HexToAddress(k.cfg.Registrar), + k.cfg.UseArbBlockNumber, + ) + } else { + upkeepAddr, deployUpkeepTx, _, err = streams_lookup_upkeep_wrapper.DeployStreamsLookupUpkeep( + k.buildTxOpts(ctx), + k.client, + big.NewInt(k.cfg.UpkeepTestRange), + big.NewInt(k.cfg.UpkeepInterval), + true, /* useArbBlock */ + true, /* staging */ + false, /* verify mercury response */ + ) + } + if err != nil { + log.Fatal(i, ": Deploy Upkeep failed - ", err) + } + k.waitDeployment(ctx, deployUpkeepTx) + log.Println(i, upkeepAddr.Hex(), ": Upkeep deployed - ", helpers.ExplorerLink(k.cfg.ChainID, deployUpkeepTx.Hash())) + registerUpkeepTx, err = deployer.RegisterUpkeep(k.buildTxOpts(ctx), + upkeepAddr, k.cfg.UpkeepGasLimit, k.fromAddr, checkData, []byte{}, + ) + if err != nil { + log.Fatal(i, upkeepAddr.Hex(), ": RegisterUpkeep failed - ", err) + } + case config.LogTrigger: + var err error + upkeepAddr, deployUpkeepTx, logUpkeepCounter, err = log_upkeep_counter_wrapper.DeployLogUpkeepCounter( + k.buildTxOpts(ctx), + k.client, + big.NewInt(k.cfg.UpkeepTestRange), + ) + if err != nil { + log.Fatal(i, ": Deploy Upkeep failed - ", err) + } + logTriggerConfigType := abi.MustNewType("tuple(address contractAddress, uint8 filterSelector, bytes32 topic0, bytes32 topic1, bytes32 topic2, bytes32 topic3)") + logTriggerConfig, err := abi.Encode(map[string]interface{}{ + "contractAddress": upkeepAddr, + "filterSelector": 0, // no indexed topics filtered + "topic0": "0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d", // event sig for Trigger() + "topic1": "0x", + "topic2": "0x", + "topic3": "0x", + }, logTriggerConfigType) + if err != nil { + log.Fatal("failed to encode log trigger config", err) + } + k.waitDeployment(ctx, deployUpkeepTx) + log.Println(i, upkeepAddr.Hex(), ": Upkeep deployed - ", helpers.ExplorerLink(k.cfg.ChainID, deployUpkeepTx.Hash())) + registerUpkeepTx, err = deployer.RegisterUpkeepV2(k.buildTxOpts(ctx), + upkeepAddr, k.cfg.UpkeepGasLimit, k.fromAddr, 1, []byte{}, logTriggerConfig, []byte{}, + ) + if err != nil { + log.Fatal(i, upkeepAddr.Hex(), ": RegisterUpkeep failed - ", err) + } + + // Start up log trigger cycle + logUpkeepStartTx, err := logUpkeepCounter.Start(k.buildTxOpts(ctx)) + if err != nil { + log.Fatal("failed to start log upkeep counter", err) + } + if err = k.waitTx(ctx, logUpkeepStartTx); err != nil { + log.Fatalf("Log upkeep Start() failed for upkeepId: %s, error is %s", upkeepAddr.Hex(), err.Error()) + } + log.Println(i, upkeepAddr.Hex(), ": Log upkeep successfully started - ", helpers.ExplorerLink(k.cfg.ChainID, logUpkeepStartTx.Hash())) + default: + log.Fatal("unexpected upkeep type") + } + + if err := k.waitTx(ctx, registerUpkeepTx); err != nil { + log.Fatalf("RegisterUpkeep failed for upkeepId: %s, error is %s", upkeepAddr.Hex(), err.Error()) + } + log.Println(i, upkeepAddr.Hex(), ": Upkeep registered - ", helpers.ExplorerLink(k.cfg.ChainID, registerUpkeepTx.Hash())) + + upkeepAddrs = append(upkeepAddrs, upkeepAddr) + } + + var upkeepGetter activeUpkeepGetter + upkeepCount := big.NewInt(k.cfg.UpkeepCount) // second arg in GetActiveUpkeepIds (on registry) + { + var err error + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + panic("not supported 1.1 registry") + case keeper.RegistryVersion_1_2: + upkeepGetter, err = registry12.NewKeeperRegistry( + registryAddr, + k.client, + ) + case keeper.RegistryVersion_2_0: + upkeepGetter, err = registry20.NewKeeperRegistry( + registryAddr, + k.client, + ) + case keeper.RegistryVersion_2_1: + upkeepGetter, err = iregistry21.NewIKeeperRegistryMaster( + registryAddr, + k.client, + ) + default: + panic("unexpected registry address") + } + if err != nil { + log.Fatal("Registry failed: ", err) + } + } + + activeUpkeepIds := k.getActiveUpkeepIds(ctx, upkeepGetter, big.NewInt(existingCount), upkeepCount) + + for index, upkeepAddr := range upkeepAddrs { + // Approve + k.approveFunds(ctx, registryAddr) + + upkeepId := activeUpkeepIds[index] + + // Fund + addFundsTx, err := deployer.AddFunds(k.buildTxOpts(ctx), upkeepId, k.addFundsAmount) + if err != nil { + log.Fatal(upkeepId, upkeepAddr.Hex(), ": AddFunds failed - ", err) + } + + // Onchain transaction + if err := k.waitTx(ctx, addFundsTx); err != nil { + log.Fatalf("AddFunds failed for upkeepId: %s, and upkeepAddr: %s, error is: %s", upkeepId, upkeepAddr.Hex(), err.Error()) + } + + log.Println(upkeepId, upkeepAddr.Hex(), ": Upkeep funded - ", helpers.ExplorerLink(k.cfg.ChainID, addFundsTx.Hash())) + } + + // set administrative offchain config for mercury upkeeps + if (k.cfg.UpkeepType == config.Mercury || k.cfg.UpkeepType == config.LogTriggeredFeedLookup) && k.cfg.RegistryVersion == keeper.RegistryVersion_2_1 { + reg21, err := iregistry21.NewIKeeperRegistryMaster(registryAddr, k.client) + if err != nil { + log.Fatalf("cannot create registry 2.1: %v", err) + } + v, err := reg21.TypeAndVersion(nil) + if err != nil { + log.Fatalf("failed to fetch type and version from registry 2.1: %v", err) + } + log.Printf("registry version is %s", v) + log.Printf("active upkeep ids: %v", activeUpkeepIds) + + adminBytes, err := json.Marshal(streams.UpkeepPrivilegeConfig{ + MercuryEnabled: true, + }) + if err != nil { + log.Fatalf("failed to marshal upkeep privilege config: %v", err) + } + + for _, id := range activeUpkeepIds { + tx, err2 := reg21.SetUpkeepPrivilegeConfig(k.buildTxOpts(ctx), id, adminBytes) + if err2 != nil { + log.Fatalf("failed to upkeep privilege config: %v", err2) + } + err2 = k.waitTx(ctx, tx) + if err2 != nil { + log.Fatalf("failed to wait for tx: %v", err2) + } + log.Printf("upkeep privilege config is set for %s", id.String()) + + info, err2 := reg21.GetUpkeep(nil, id) + if err2 != nil { + log.Fatalf("failed to fetch upkeep id %s from registry 2.1: %v", id, err2) + } + min, err2 := reg21.GetMinBalanceForUpkeep(nil, id) + if err2 != nil { + log.Fatalf("failed to fetch upkeep id %s from registry 2.1: %v", id, err2) + } + log.Printf(" Balance: %s", info.Balance) + log.Printf("Min Balance: %s", min.String()) + } + } + + fmt.Println() +} + +// setKeepers set the keeper list for a registry +func (k *Keeper) setKeepers(ctx context.Context, cls []cmd.HTTPClient, deployer keepersDeployer, keepers, owners []common.Address) { + if len(keepers) > 0 { + log.Println("Set keepers...") + opts := k.buildTxOpts(ctx) + setKeepersTx, err := deployer.SetKeepers(ctx, opts, cls, keepers, owners) + if err != nil { + log.Fatal("SetKeepers failed: ", err) + } + + if err = k.waitTx(ctx, setKeepersTx); err != nil { + log.Fatalf("SetKeepers failed, error is: %s", err.Error()) + } + + log.Println("Keepers registered:", helpers.ExplorerLink(k.cfg.ChainID, setKeepersTx.Hash())) + } else { + log.Println("No Keepers to register") + } +} + +func (k *Keeper) keepers() ([]common.Address, []common.Address) { + var addrs []common.Address + var fromAddrs []common.Address + for _, addr := range k.cfg.Keepers { + addrs = append(addrs, common.HexToAddress(addr)) + fromAddrs = append(fromAddrs, k.fromAddr) + } + return addrs, fromAddrs +} + +type activeUpkeepGetter interface { + Address() common.Address + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) +} + +// getActiveUpkeepIds retrieves active upkeep ids from registry +func (k *Keeper) getActiveUpkeepIds(ctx context.Context, registry activeUpkeepGetter, from, to *big.Int) []*big.Int { + activeUpkeepIds, _ := registry.GetActiveUpkeepIDs(&bind.CallOpts{ + Pending: false, + From: k.fromAddr, + Context: ctx, + }, from, to) + return activeUpkeepIds +} + +// getConfigForRegistry12 returns a config object for registry 1.2 +func (k *Keeper) getConfigForRegistry12() *registry12.Config { + return ®istry12.Config{ + PaymentPremiumPPB: k.cfg.PaymentPremiumPBB, + FlatFeeMicroLink: k.cfg.FlatFeeMicroLink, + BlockCountPerTurn: big.NewInt(k.cfg.BlockCountPerTurn), + CheckGasLimit: k.cfg.CheckGasLimit, + StalenessSeconds: big.NewInt(k.cfg.StalenessSeconds), + GasCeilingMultiplier: k.cfg.GasCeilingMultiplier, + MinUpkeepSpend: big.NewInt(k.cfg.MinUpkeepSpend), + MaxPerformGas: k.cfg.MaxPerformGas, + FallbackGasPrice: big.NewInt(k.cfg.FallbackGasPrice), + FallbackLinkPrice: big.NewInt(k.cfg.FallbackLinkPrice), + Transcoder: common.HexToAddress(k.cfg.Transcoder), + Registrar: common.HexToAddress(k.cfg.Registrar), + } +} diff --git a/core/scripts/chaincli/handler/keeper_deployer.go b/core/scripts/chaincli/handler/keeper_deployer.go new file mode 100644 index 00000000..48c0ceee --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_deployer.go @@ -0,0 +1,371 @@ +package handler + +import ( + "context" + "crypto/ed25519" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/umbracle/ethgo/abi" + + ocr2config "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/config" + + offchain20config "github.com/goplugin/plugin-automation/pkg/v2/config" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + registry11 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry12 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry20 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" +) + +// canceller describes the behavior to cancel upkeeps +type canceller interface { + CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) + WithdrawFunds(opts *bind.TransactOpts, id *big.Int, to common.Address) (*types.Transaction, error) + RecoverFunds(opts *bind.TransactOpts) (*types.Transaction, error) +} + +// upkeepDeployer contains functions needed to deploy an upkeep +type upkeepDeployer interface { + RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) + RegisterUpkeepV2(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, pipelineData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) + AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) +} + +// keepersDeployer contains functions needed to deploy keepers +type keepersDeployer interface { + canceller + upkeepDeployer + SetKeepers(ctx context.Context, opts *bind.TransactOpts, _ []cmd.HTTPClient, keepers []common.Address, payees []common.Address) (*types.Transaction, error) +} + +type v11KeeperDeployer struct { + registry11.KeeperRegistryInterface +} + +func (d *v11KeeperDeployer) SetKeepers(ctx context.Context, opts *bind.TransactOpts, _ []cmd.HTTPClient, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return d.KeeperRegistryInterface.SetKeepers(opts, keepers, payees) +} + +func (d *v11KeeperDeployer) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return d.KeeperRegistryInterface.RegisterUpkeep(opts, target, gasLimit, admin, checkData) +} + +func (d *v11KeeperDeployer) RegisterUpkeepV2(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, pipelineData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + panic("not implemented") +} + +type v12KeeperDeployer struct { + registry12.KeeperRegistryInterface +} + +func (d *v12KeeperDeployer) SetKeepers(ctx context.Context, opts *bind.TransactOpts, _ []cmd.HTTPClient, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + return d.KeeperRegistryInterface.SetKeepers(opts, keepers, payees) +} + +func (d *v12KeeperDeployer) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return d.KeeperRegistryInterface.RegisterUpkeep(opts, target, gasLimit, admin, checkData) +} + +func (d *v12KeeperDeployer) RegisterUpkeepV2(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, pipelineData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + panic("not implemented") +} + +type v20KeeperDeployer struct { + registry20.KeeperRegistryInterface + cfg *config.Config +} + +func (d *v20KeeperDeployer) SetKeepers(ctx context.Context, opts *bind.TransactOpts, cls []cmd.HTTPClient, keepers []common.Address, _ []common.Address) (*types.Transaction, error) { + S := make([]int, len(cls)) + oracleIdentities := make([]ocr2config.OracleIdentityExtra, len(cls)) + sharedSecretEncryptionPublicKeys := make([]ocr2types.ConfigEncryptionPublicKey, len(cls)) + var wg sync.WaitGroup + for i, cl := range cls { + wg.Add(1) + go func(i int, cl cmd.HTTPClient) { + defer wg.Done() + + ocr2Config, err := getNodeOCR2Config(ctx, cl) + if err != nil { + panic(err) + } + + p2pKeyID, err := getP2PKeyID(ctx, cl) + if err != nil { + panic(err) + } + + offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.OffChainPublicKey, err)) + } + + offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(offchainPkBytesFixed[:], offchainPkBytes) + if n != ed25519.PublicKeySize { + panic(fmt.Errorf("wrong num elements copied")) + } + + configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.ConfigPublicKey, err)) + } + + configPkBytesFixed := [ed25519.PublicKeySize]byte{} + n = copy(configPkBytesFixed[:], configPkBytes) + if n != ed25519.PublicKeySize { + panic(fmt.Errorf("wrong num elements copied")) + } + + onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnchainPublicKey, "ocr2on_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.OnchainPublicKey, err)) + } + + sharedSecretEncryptionPublicKeys[i] = configPkBytesFixed + oracleIdentities[i] = ocr2config.OracleIdentityExtra{ + OracleIdentity: ocr2config.OracleIdentity{ + OnchainPublicKey: onchainPkBytes, + OffchainPublicKey: offchainPkBytesFixed, + PeerID: p2pKeyID, + TransmitAccount: ocr2types.Account(keepers[i].String()), + }, + ConfigEncryptionPublicKey: configPkBytesFixed, + } + S[i] = 1 + }(i, cl) + } + wg.Wait() + + offC, err := json.Marshal(offchain20config.OffchainConfig{ + PerformLockoutWindow: 100 * 3 * 1000, // ~100 block lockout (on mumbai) + MinConfirmations: 1, + }) + if err != nil { + panic(err) + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err := ocr2config.ContractSetConfigArgsForTests( + 5*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 2500*time.Millisecond, // deltaRound time.Duration, + 40*time.Millisecond, // deltaGrace time.Duration, + 30*time.Second, // deltaStage time.Duration, + 50, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 1600*time.Millisecond, // maxDurationObservation time.Duration, + 800*time.Millisecond, // maxDurationReport time.Duration, sum of MaxDurationQuery/Observation/Report must be less than DeltaProgress + 20*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return nil, err + } + + var signers []common.Address + for _, signer := range signerOnchainPublicKeys { + if len(signer) != 20 { + return nil, fmt.Errorf("OnChainPublicKey has wrong length for address") + } + signers = append(signers, common.BytesToAddress(signer)) + } + + var transmitters []common.Address + for _, transmitter := range transmitterAccounts { + if !common.IsHexAddress(string(transmitter)) { + return nil, fmt.Errorf("TransmitAccount is not a valid Ethereum address") + } + transmitters = append(transmitters, common.HexToAddress(string(transmitter))) + } + + configType := abi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") + onchainConfig, err := abi.Encode(map[string]interface{}{ + "paymentPremiumPPB": d.cfg.PaymentPremiumPBB, + "flatFeeMicroLink": d.cfg.FlatFeeMicroLink, + "checkGasLimit": d.cfg.CheckGasLimit, + "stalenessSeconds": d.cfg.StalenessSeconds, + "gasCeilingMultiplier": d.cfg.GasCeilingMultiplier, + "minUpkeepSpend": d.cfg.MinUpkeepSpend, + "maxPerformGas": d.cfg.MaxPerformGas, + "maxCheckDataSize": d.cfg.MaxCheckDataSize, + "maxPerformDataSize": d.cfg.MaxPerformDataSize, + "fallbackGasPrice": big.NewInt(d.cfg.FallbackGasPrice), + "fallbackLinkPrice": big.NewInt(d.cfg.FallbackLinkPrice), + "transcoder": common.HexToAddress(d.cfg.Transcoder), + "registrar": common.HexToAddress(d.cfg.Registrar), + }, configType) + if err != nil { + return nil, err + } + + return d.KeeperRegistryInterface.SetConfig(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +func (d *v20KeeperDeployer) RegisterUpkeepV2(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, pipelineData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + panic("not implemented") +} + +type v21KeeperDeployer struct { + iregistry21.IKeeperRegistryMasterInterface + cfg *config.Config +} + +func (d *v21KeeperDeployer) SetKeepers(ctx context.Context, opts *bind.TransactOpts, cls []cmd.HTTPClient, keepers []common.Address, _ []common.Address) (*types.Transaction, error) { + S := make([]int, len(cls)) + oracleIdentities := make([]ocr2config.OracleIdentityExtra, len(cls)) + sharedSecretEncryptionPublicKeys := make([]ocr2types.ConfigEncryptionPublicKey, len(cls)) + var wg sync.WaitGroup + for i, cl := range cls { + wg.Add(1) + go func(i int, cl cmd.HTTPClient) { + defer wg.Done() + + ocr2Config, err := getNodeOCR2Config(ctx, cl) + if err != nil { + panic(err) + } + + p2pKeyID, err := getP2PKeyID(ctx, cl) + if err != nil { + panic(err) + } + + offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.OffChainPublicKey, err)) + } + + offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(offchainPkBytesFixed[:], offchainPkBytes) + if n != ed25519.PublicKeySize { + panic(fmt.Errorf("wrong num elements copied")) + } + + configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.ConfigPublicKey, err)) + } + + configPkBytesFixed := [ed25519.PublicKeySize]byte{} + n = copy(configPkBytesFixed[:], configPkBytes) + if n != ed25519.PublicKeySize { + panic(fmt.Errorf("wrong num elements copied")) + } + + onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnchainPublicKey, "ocr2on_evm_")) + if err != nil { + panic(fmt.Errorf("failed to decode %s: %v", ocr2Config.OnchainPublicKey, err)) + } + + sharedSecretEncryptionPublicKeys[i] = configPkBytesFixed + oracleIdentities[i] = ocr2config.OracleIdentityExtra{ + OracleIdentity: ocr2config.OracleIdentity{ + OnchainPublicKey: onchainPkBytes, + OffchainPublicKey: offchainPkBytesFixed, + PeerID: p2pKeyID, + TransmitAccount: ocr2types.Account(keepers[i].String()), + }, + ConfigEncryptionPublicKey: configPkBytesFixed, + } + S[i] = 1 + }(i, cl) + } + wg.Wait() + + offC, err := json.Marshal(offchain20config.OffchainConfig{ + PerformLockoutWindow: 100 * 3 * 1000, // ~100 block lockout (on mumbai) + MinConfirmations: 1, + MercuryLookup: d.cfg.UpkeepType == config.Mercury || d.cfg.UpkeepType == config.LogTriggeredFeedLookup, + }) + if err != nil { + panic(err) + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTests( + 5*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 400*time.Millisecond, // deltaInitial time.Duration, + 2500*time.Millisecond, // deltaRound time.Duration, + 40*time.Millisecond, // deltaGrace time.Duration, + 300*time.Millisecond, // deltaCertifiedCommitRequest time.Duration, + 30*time.Second, // deltaStage time.Duration, + 50, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 1600*time.Millisecond, // maxDurationObservation time.Duration, + 20*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return nil, err + } + + var signers []common.Address + for _, signer := range signerOnchainPublicKeys { + if len(signer) != 20 { + return nil, fmt.Errorf("OnChainPublicKey has wrong length for address") + } + signers = append(signers, common.BytesToAddress(signer)) + } + + var transmitters []common.Address + for _, transmitter := range transmitterAccounts { + if !common.IsHexAddress(string(transmitter)) { + return nil, fmt.Errorf("TransmitAccount is not a valid Ethereum address") + } + transmitters = append(transmitters, common.HexToAddress(string(transmitter))) + } + + onchainConfig := iregistry21.KeeperRegistryBase21OnchainConfig{ + PaymentPremiumPPB: d.cfg.PaymentPremiumPBB, + FlatFeeMicroLink: d.cfg.FlatFeeMicroLink, + CheckGasLimit: d.cfg.CheckGasLimit, + StalenessSeconds: big.NewInt(d.cfg.StalenessSeconds), + GasCeilingMultiplier: d.cfg.GasCeilingMultiplier, + MinUpkeepSpend: big.NewInt(d.cfg.MinUpkeepSpend), + MaxPerformGas: d.cfg.MaxPerformGas, + MaxCheckDataSize: d.cfg.MaxCheckDataSize, + MaxPerformDataSize: d.cfg.MaxPerformDataSize, + MaxRevertDataSize: d.cfg.MaxRevertDataSize, + FallbackGasPrice: big.NewInt(d.cfg.FallbackGasPrice), + FallbackLinkPrice: big.NewInt(d.cfg.FallbackLinkPrice), + Transcoder: common.HexToAddress(d.cfg.Transcoder), + Registrars: []common.Address{common.HexToAddress(d.cfg.Registrar)}, + UpkeepPrivilegeManager: common.HexToAddress(d.cfg.UpkeepPrivilegeManager), + } + + return d.IKeeperRegistryMasterInterface.SetConfigTypeSafe(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) +} + +// legacy support function +func (d *v21KeeperDeployer) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte, offchainConfig []byte) (*types.Transaction, error) { + return d.IKeeperRegistryMasterInterface.RegisterUpkeep0(opts, target, gasLimit, admin, checkData, offchainConfig) +} + +// the new registerUpkeep function only available on version 2.1 and above +func (d *v21KeeperDeployer) RegisterUpkeepV2(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, triggerType uint8, pipelineData []byte, triggerConfig []byte, offchainConfig []byte) (*types.Transaction, error) { + return d.IKeeperRegistryMasterInterface.RegisterUpkeep(opts, target, gasLimit, admin, triggerType, pipelineData, triggerConfig, offchainConfig) +} diff --git a/core/scripts/chaincli/handler/keeper_launch.go b/core/scripts/chaincli/handler/keeper_launch.go new file mode 100644 index 00000000..18836cbe --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_launch.go @@ -0,0 +1,450 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "math/big" + "net/url" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/ethereum/go-ethereum/common" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + registry12 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry20 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web" +) + +type startedNodeData struct { + url string + cleanup func(bool) +} + +// LaunchAndTest launches keeper registry, plugin nodes, upkeeps and start performing. +// 1. launch plugin node using docker image +// 2. get keeper registry instance, deploy if needed +// 3. deploy upkeeps +// 4. create keeper jobs +// 5. fund nodes if needed +// 6. set keepers in the registry +// 7. withdraw funds after tests are done -> TODO: wait until tests are done instead of cancel manually +func (k *Keeper) LaunchAndTest(ctx context.Context, withdraw, printLogs, force, bootstrap bool) { + lggr, closeLggr := logger.NewLogger() + logger.Sugared(lggr).ErrorIfFn(closeLggr, "Failed to close logger") + + if bootstrap { + baseHandler := NewBaseHandler(k.cfg) + tcpAddr := baseHandler.StartBootstrapNode(ctx, k.cfg.RegistryAddress, 5688, 8000, force) + k.cfg.BootstrapNodeAddr = tcpAddr + } + + var extraTOML string + if k.cfg.OCR2Keepers { + extraTOML = "[P2P]\n[P2P.V2]\nListenAddresses = [\"0.0.0.0:8000\"]" + } + + // Run plugin nodes and create jobs + startedNodes := make([]startedNodeData, k.cfg.KeepersCount) + var wg sync.WaitGroup + for i := 0; i < k.cfg.KeepersCount; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + startedNodes[i] = startedNodeData{} + + // Run plugin node + var err error + if startedNodes[i].url, startedNodes[i].cleanup, err = k.launchPluginNode(ctx, 6688+i, fmt.Sprintf("keeper-%d", i), extraTOML, force); err != nil { + log.Fatal("Failed to start node: ", err) + } + }(i) + } + wg.Wait() + + // Deploy keeper registry or get an existing one + upkeepCount, registryAddr, deployer := k.prepareRegistry(ctx) + + // Approve keeper registry + k.approveFunds(ctx, registryAddr) + + // Prepare keeper addresses and owners + var keepers []common.Address + var owners []common.Address + var cls []cmd.HTTPClient + for i, startedNode := range startedNodes { + // Create authenticated client + var cl cmd.HTTPClient + var err error + cl, err = authenticate(ctx, startedNode.url, defaultPluginNodeLogin, defaultPluginNodePassword, lggr) + if err != nil { + log.Fatal("Authentication failed, ", err) + } + + var nodeAddrHex string + + if len(k.cfg.KeeperKeys) > 0 { + // import key if exists + nodeAddrHex, err = k.addKeyToKeeper(ctx, cl, k.cfg.KeeperKeys[i]) + if err != nil { + log.Fatal("could not add key to keeper", err) + } + } else { + // get node's default wallet address + nodeAddrHex, err = getNodeAddress(ctx, cl) + if err != nil { + log.Println("Failed to get node addr: ", err) + continue + } + } + + nodeAddr := common.HexToAddress(nodeAddrHex) + + // Create keepers + if err = k.createKeeperJob(ctx, cl, registryAddr.Hex(), nodeAddr.Hex()); err != nil { + log.Println("Failed to create keeper job: ", err) + continue + } + + // Fund node if needed + fundAmt, ok := (&big.Int{}).SetString(k.cfg.FundNodeAmount, 10) + if !ok { + log.Printf("failed to parse FUND_PLUGIN_NODE: %s", k.cfg.FundNodeAmount) + continue + } + if fundAmt.Cmp(big.NewInt(0)) != 0 { + if err = k.sendEth(ctx, nodeAddr, fundAmt); err != nil { + log.Println("Failed to fund plugin node: ", err) + continue + } + } + + cls = append(cls, cl) + keepers = append(keepers, nodeAddr) + owners = append(owners, k.fromAddr) + } + + if len(keepers) == 0 { + log.Fatal("no keepers available") + } + + // Set Keepers + k.setKeepers(ctx, cls, deployer, keepers, owners) + + // Deploy Upkeeps + k.deployUpkeeps(ctx, registryAddr, deployer, upkeepCount) + + log.Println("All nodes successfully launched, now running. Use Ctrl+C to terminate") + + termChan := make(chan os.Signal, 1) + signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + <-termChan // Blocks here until either SIGINT or SIGTERM is received. + log.Println("Stopping...") + + // Cleanup resources + for _, startedNode := range startedNodes { + if startedNode.cleanup != nil { + startedNode.cleanup(printLogs) + } + } + + // Cancel upkeeps and withdraw funds + if withdraw { + log.Println("Canceling upkeeps...") + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + if err := k.cancelAndWithdrawUpkeeps(ctx, big.NewInt(upkeepCount), deployer); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + case keeper.RegistryVersion_1_2: + registry, err := registry12.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + + activeUpkeepIds := k.getActiveUpkeepIds(ctx, registry, big.NewInt(0), big.NewInt(0)) + if err := k.cancelAndWithdrawActiveUpkeeps(ctx, activeUpkeepIds, deployer); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + case keeper.RegistryVersion_2_0: + registry, err := registry20.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + + activeUpkeepIds := k.getActiveUpkeepIds(ctx, registry, big.NewInt(0), big.NewInt(0)) + if err := k.cancelAndWithdrawActiveUpkeeps(ctx, activeUpkeepIds, deployer); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + case keeper.RegistryVersion_2_1: + registry, err := iregistry21.NewIKeeperRegistryMaster( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + activeUpkeepIds := k.getActiveUpkeepIds(ctx, registry, big.NewInt(0), big.NewInt(0)) + if err := k.cancelAndWithdrawActiveUpkeeps(ctx, activeUpkeepIds, deployer); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + default: + panic("unexpected registry address") + } + log.Println("Upkeeps successfully canceled") + } +} + +// cancelAndWithdrawActiveUpkeeps cancels all active upkeeps and withdraws funds for registry 1.2 +func (k *Keeper) cancelAndWithdrawActiveUpkeeps(ctx context.Context, activeUpkeepIds []*big.Int, canceller canceller) error { + for i := 0; i < len(activeUpkeepIds); i++ { + upkeepId := activeUpkeepIds[i] + tx, err := canceller.CancelUpkeep(k.buildTxOpts(ctx), upkeepId) + if err != nil { + return fmt.Errorf("failed to cancel upkeep %s: %s", upkeepId.String(), err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to cancel upkeep for upkeepId: %s, error is: %s", upkeepId.String(), err.Error()) + } + + tx, err = canceller.WithdrawFunds(k.buildTxOpts(ctx), upkeepId, k.fromAddr) + if err != nil { + return fmt.Errorf("failed to withdraw upkeep %s: %s", upkeepId.String(), err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to withdraw upkeep for upkeepId: %s, error is: %s", upkeepId.String(), err.Error()) + } + + log.Printf("Upkeep %s successfully canceled and refunded: ", upkeepId.String()) + } + + tx, err := canceller.RecoverFunds(k.buildTxOpts(ctx)) + if err != nil { + return fmt.Errorf("failed to recover funds: %s", err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to recover funds, error is: %s", err.Error()) + } + + return nil +} + +// cancelAndWithdrawUpkeeps cancels all upkeeps for 1.1 registry and withdraws funds +func (k *Keeper) cancelAndWithdrawUpkeeps(ctx context.Context, upkeepCount *big.Int, canceller canceller) error { + var err error + for i := int64(0); i < upkeepCount.Int64(); i++ { + var tx *ethtypes.Transaction + if tx, err = canceller.CancelUpkeep(k.buildTxOpts(ctx), big.NewInt(i)); err != nil { + return fmt.Errorf("failed to cancel upkeep %d: %s", i, err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to cancel upkeep, error is: %s", err.Error()) + } + + if tx, err = canceller.WithdrawFunds(k.buildTxOpts(ctx), big.NewInt(i), k.fromAddr); err != nil { + return fmt.Errorf("failed to withdraw upkeep %d: %s", i, err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to withdraw upkeep, error is: %s", err.Error()) + } + + log.Println("Upkeep successfully canceled and refunded: ", i) + } + + var tx *ethtypes.Transaction + if tx, err = canceller.RecoverFunds(k.buildTxOpts(ctx)); err != nil { + return fmt.Errorf("failed to recover funds: %s", err) + } + + if err = k.waitTx(ctx, tx); err != nil { + log.Fatalf("failed to recover funds, error is: %s", err.Error()) + } + + return nil +} + +// createKeeperJob creates a keeper job in the plugin node by the given address +func (k *Keeper) createKeeperJob(ctx context.Context, client cmd.HTTPClient, registryAddr, nodeAddr string) error { + var err error + if k.cfg.OCR2Keepers { + err = k.createOCR2KeeperJob(ctx, client, registryAddr, nodeAddr) + } else { + err = k.createLegacyKeeperJob(ctx, client, registryAddr, nodeAddr) + } + if err != nil { + return err + } + + log.Println("Keeper job has been successfully created in the Plugin node with address: ", nodeAddr) + + return nil +} + +// createLegacyKeeperJob creates a legacy keeper job in the plugin node by the given address +func (k *Keeper) createLegacyKeeperJob(ctx context.Context, client cmd.HTTPClient, registryAddr, nodeAddr string) error { + request, err := json.Marshal(web.CreateJobRequest{ + TOML: testspecs.GenerateKeeperSpec(testspecs.KeeperSpecParams{ + Name: fmt.Sprintf("keeper job - registry %s", registryAddr), + ContractAddress: registryAddr, + FromAddress: nodeAddr, + EvmChainID: int(k.cfg.ChainID), + }).Toml(), + }) + if err != nil { + return fmt.Errorf("failed to marshal request: %s", err) + } + + resp, err := client.Post(ctx, "/v2/jobs", bytes.NewReader(request)) + if err != nil { + return fmt.Errorf("failed to create keeper job: %s", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read error response body: %s", err) + } + + return fmt.Errorf("unable to create keeper job: '%v' [%d]", string(body), resp.StatusCode) + } + + return nil +} + +const ocr2keeperJobTemplate = `type = "offchainreporting2" +pluginType = "ocr2automation" +relay = "evm" +name = "ocr2-automation" +forwardingAllowed = false +schemaVersion = 1 +contractID = "%s" +contractConfigTrackerPollInterval = "15s" +ocrKeyBundleID = "%s" +transmitterID = "%s" +p2pv2Bootstrappers = [ + "%s" +] + +[relayConfig] +chainID = %d + +[pluginConfig] +maxServiceWorkers = 100 +cacheEvictionInterval = "1s" +contractVersion = "%s" +mercuryCredentialName = "%s"` + +// createOCR2KeeperJob creates an ocr2keeper job in the plugin node by the given address +func (k *Keeper) createOCR2KeeperJob(ctx context.Context, client cmd.HTTPClient, contractAddr, nodeAddr string) error { + ocr2KeyConfig, err := getNodeOCR2Config(ctx, client) + if err != nil { + return fmt.Errorf("failed to get node OCR2 key bundle ID: %s", err) + } + + // Correctly assign contract version in OCR job spec. + contractVersion := "v2.0" + if k.cfg.RegistryVersion == keeper.RegistryVersion_2_1 { + contractVersion = "v2.1" + } + + request, err := json.Marshal(web.CreateJobRequest{ + TOML: fmt.Sprintf(ocr2keeperJobTemplate, + contractAddr, // contractID + ocr2KeyConfig.ID, // ocrKeyBundleID + nodeAddr, // transmitterID - node wallet address + k.cfg.BootstrapNodeAddr, // bootstrap node key and address + k.cfg.ChainID, // chainID + contractVersion, // contractVersion + k.cfg.DataStreamsCredName, // mercury credential name + ), + }) + if err != nil { + return fmt.Errorf("failed to marshal request: %s", err) + } + + resp, err := client.Post(ctx, "/v2/jobs", bytes.NewReader(request)) + if err != nil { + return fmt.Errorf("failed to create ocr2keeper job: %s", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read error response body: %s", err) + } + + return fmt.Errorf("unable to create ocr2keeper job: '%v' [%d]", string(body), resp.StatusCode) + } + + return nil +} + +// addKeyToKeeper imports the provided ETH sending key to the keeper +func (k *Keeper) addKeyToKeeper(ctx context.Context, client cmd.HTTPClient, privKeyHex string) (string, error) { + privkey, err := crypto.HexToECDSA(hex.TrimPrefix(privKeyHex)) + if err != nil { + log.Fatalf("Failed to decode priv key %s: %v", privKeyHex, err) + } + address := crypto.PubkeyToAddress(privkey.PublicKey).Hex() + log.Printf("importing keeper key %s", address) + keyJSON, err := ethkey.FromPrivateKey(privkey).ToEncryptedJSON(defaultPluginNodePassword, utils.FastScryptParams) + if err != nil { + log.Fatalf("Failed to encrypt piv key %s: %v", privKeyHex, err) + } + importUrl := url.URL{ + Path: "/v2/keys/evm/import", + } + query := importUrl.Query() + + query.Set("oldpassword", defaultPluginNodePassword) + query.Set("evmChainID", fmt.Sprint(k.cfg.ChainID)) + + importUrl.RawQuery = query.Encode() + resp, err := client.Post(ctx, importUrl.String(), bytes.NewReader(keyJSON)) + if err != nil { + log.Fatalf("Failed to import priv key %s: %v", privKeyHex, err) + } + + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read error response body: %s", err) + } + + return "", fmt.Errorf("unable to create ocr2keeper job: '%v' [%d]", string(body), resp.StatusCode) + } + + return address, nil +} diff --git a/core/scripts/chaincli/handler/keeper_upkeep_events.go b/core/scripts/chaincli/handler/keeper_upkeep_events.go new file mode 100644 index 00000000..4473c473 --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_upkeep_events.go @@ -0,0 +1,65 @@ +package handler + +import ( + "context" + "encoding/csv" + "fmt" + "log" + "os" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_counter_wrapper" +) + +// UpkeepCounterEvents print out emitted events and write to csv file +func (k *Keeper) UpkeepCounterEvents(ctx context.Context, hexAddr string, fromBlock, toBlock uint64) { + contractAddress := common.HexToAddress(hexAddr) + upkeepCounter, err := upkeep_counter_wrapper.NewUpkeepCounter(contractAddress, k.client) + if err != nil { + log.Fatalln("Failed to create a new upkeep counter", err) + } + filterOpts := bind.FilterOpts{ + Start: fromBlock, + End: &toBlock, + Context: ctx, + } + upkeepIterator, err := upkeepCounter.FilterPerformingUpkeep(&filterOpts, nil) + if err != nil { + log.Fatalln("Failed to get upkeep iterator", err) + } + filename := fmt.Sprintf("%s.csv", hexAddr) + file, err := os.Create(filename) + if err != nil { + log.Fatalln("failed to open file", err) + } + defer file.Close() + + w := csv.NewWriter(file) + defer w.Flush() + + fmt.Println("From, InitialBlock, LastBlock, PreviousBlock, Counter") + row := []string{"From", "InitialBlock", "LastBlock", "PreviousBlock", "Counter"} + if err = w.Write(row); err != nil { + log.Fatalln("error writing record to file", err) + } + + for upkeepIterator.Next() { + fmt.Printf("%s,%s,%s,%s,%s\n", + upkeepIterator.Event.From, + upkeepIterator.Event.InitialBlock, + upkeepIterator.Event.LastBlock, + upkeepIterator.Event.PreviousBlock, + upkeepIterator.Event.Counter, + ) + row = []string{upkeepIterator.Event.From.String(), + upkeepIterator.Event.InitialBlock.String(), + upkeepIterator.Event.LastBlock.String(), + upkeepIterator.Event.PreviousBlock.String(), + upkeepIterator.Event.Counter.String()} + if err = w.Write(row); err != nil { + log.Fatalln("error writing record to file", err) + } + } +} diff --git a/core/scripts/chaincli/handler/keeper_upkeep_history.go b/core/scripts/chaincli/handler/keeper_upkeep_history.go new file mode 100644 index 00000000..8c1c644d --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_upkeep_history.go @@ -0,0 +1,277 @@ +package handler + +import ( + "context" + "encoding/hex" + "fmt" + "log" + "math/big" + "os" + "strings" + "text/tabwriter" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + + registry11 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry12 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +const ( + defaultMaxBlocksRange = 1000 + defaultLookBackRange = 1000 +) + +var ( + checkUpkeepArguments1 abi.Arguments + checkUpkeepArguments2 abi.Arguments + registry11ABI = keeper.Registry1_1ABI + registry12ABI = keeper.Registry1_2ABI +) + +type result struct { + block uint64 + checkUpkeep bool + keeperIndex uint64 + keeperAddress common.Address + reason string + performData string + maxLinkPayment *big.Int + gasLimit *big.Int + adjustedGasWei *big.Int + linkEth *big.Int +} + +func init() { + checkUpkeepArguments1 = registry11ABI.Methods["checkUpkeep"].Outputs + checkUpkeepArguments2 = registry12ABI.Methods["checkUpkeep"].Outputs +} + +// UpkeepHistory prints the checkUpkeep status and keeper responsibility for a given upkeep in a set block range +func (k *Keeper) UpkeepHistory(ctx context.Context, upkeepId *big.Int, from, to, gasPrice uint64) { + // There must not be a large different between boundaries + if to-from > defaultMaxBlocksRange { + log.Fatalf("blocks range difference must not more than %d", defaultMaxBlocksRange) + } + + var keeperRegistry11 *registry11.KeeperRegistry + var keeperRegistry12 *registry12.KeeperRegistry + // var keeperRegistry20 *registry20.KeeperRegistry + + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + _, keeperRegistry11 = k.getRegistry11(ctx) + case keeper.RegistryVersion_1_2: + _, keeperRegistry12 = k.getRegistry12(ctx) + default: + panic("unsupported registry version") + } + + log.Println("Preparing a batch call request") + var reqs []rpc.BatchElem + var results []*string + var keeperPerBlockIndex []uint64 + var keeperPerBlockAddress []common.Address + for block := from; block <= to; block++ { + callOpts := &bind.CallOpts{ + Context: ctx, + BlockNumber: big.NewInt(0).SetUint64(block), + } + + var keepers []common.Address + var bcpt uint64 + var payload []byte + var keeperIndex uint64 + var lastKeeper common.Address + + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + config, err2 := keeperRegistry11.GetConfig(callOpts) + if err2 != nil { + log.Fatal("failed to fetch registry config: ", err2) + } + + bcpt = config.BlockCountPerTurn.Uint64() + keepers, err2 = keeperRegistry11.GetKeeperList(callOpts) + if err2 != nil { + log.Fatal("failed to fetch keepers list: ", err2) + } + + upkeep, err2 := keeperRegistry11.GetUpkeep(callOpts, upkeepId) + if err2 != nil { + log.Fatal("failed to fetch the upkeep: ", err2) + } + lastKeeper = upkeep.LastKeeper + + case keeper.RegistryVersion_1_2: + state, err2 := keeperRegistry12.GetState(callOpts) + if err2 != nil { + log.Fatal("failed to fetch registry state: ", err2) + } + bcpt = state.Config.BlockCountPerTurn.Uint64() + keepers = state.Keepers + + upkeep, err2 := keeperRegistry12.GetUpkeep(callOpts, upkeepId) + if err2 != nil { + log.Fatal("failed to fetch the upkeep: ", err2) + } + lastKeeper = upkeep.LastKeeper + + default: + panic("unsupported registry version") + } + + turnBinary, err2 := turnBlockHashBinary(block, bcpt, defaultLookBackRange, k.client) + if err2 != nil { + log.Fatal("failed to calculate turn block hash: ", err2) + } + + // least significant 32 bits of upkeep id + lhs := keeper.LeastSignificant32(upkeepId) + + // least significant 32 bits of the turn block hash + turnBinaryPtr, ok := math.ParseBig256(string([]byte(turnBinary)[len(turnBinary)-32:])) + if !ok { + log.Fatal("failed to parse turn binary ", turnBinary) + } + rhs := keeper.LeastSignificant32(turnBinaryPtr) + + // bitwise XOR + turn := lhs ^ rhs + + keepersCnt := uint64(len(keepers)) + keeperIndex = turn % keepersCnt + if keepers[keeperIndex] == lastKeeper { + keeperIndex = (keeperIndex + keepersCnt - 1) % keepersCnt + } + + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + payload, err2 = registry11ABI.Pack("checkUpkeep", upkeepId, keepers[keeperIndex]) + if err2 != nil { + log.Fatal("failed to pack checkUpkeep: ", err2) + } + case keeper.RegistryVersion_1_2: + payload, err2 = registry12ABI.Pack("checkUpkeep", upkeepId, keepers[keeperIndex]) + if err2 != nil { + log.Fatal("failed to pack checkUpkeep: ", err2) + } + default: + panic("unsupported registry version") + } + + args := map[string]interface{}{ + "to": k.cfg.RegistryAddress, + "data": hexutil.Bytes(payload), + } + if gasPrice > 0 { + args["gasPrice"] = hexutil.EncodeUint64(gasPrice) + } + + var res string + reqs = append(reqs, rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + args, + // The block at which we want to inspect the upkeep state + hexutil.EncodeUint64(block), + }, + Result: &res, + }) + + results = append(results, &res) + keeperPerBlockIndex = append(keeperPerBlockIndex, keeperIndex) + keeperPerBlockAddress = append(keeperPerBlockAddress, keepers[keeperIndex]) + } + + k.batchProcess(ctx, reqs, from, keeperPerBlockIndex, keeperPerBlockAddress, results) +} + +func (k *Keeper) batchProcess(ctx context.Context, reqs []rpc.BatchElem, from uint64, keeperPerBlockIndex []uint64, keeperPerBlockAddress []common.Address, results []*string) { + log.Println("Doing batch call to check upkeeps") + if err := k.rpcClient.BatchCallContext(ctx, reqs); err != nil { + log.Fatal("failed to batch call checkUpkeep: ", err) + } + + log.Println("Parsing batch call response") + var parsedResults []result + isVersion12 := k.cfg.RegistryVersion == keeper.RegistryVersion_1_2 + for i, req := range reqs { + if req.Error != nil { + parsedResults = append(parsedResults, result{ + block: uint64(i) + from, + checkUpkeep: false, + keeperIndex: keeperPerBlockIndex[i], + keeperAddress: keeperPerBlockAddress[i], + reason: strings.TrimPrefix(req.Error.Error(), "execution reverted: "), + }) + continue + } + + var returnValues []interface{} + var err error + if isVersion12 { + returnValues, err = checkUpkeepArguments2.UnpackValues(hexutil.MustDecode(*results[i])) + } else { + returnValues, err = checkUpkeepArguments1.UnpackValues(hexutil.MustDecode(*results[i])) + } + if err != nil { + log.Fatal("unpack checkUpkeep return: ", err, *results[i]) + } + + parsedResults = append(parsedResults, result{ + block: uint64(i) + from, + checkUpkeep: true, + keeperIndex: keeperPerBlockIndex[i], + keeperAddress: keeperPerBlockAddress[i], + performData: "0x" + hex.EncodeToString(*abi.ConvertType(returnValues[0], new([]byte)).(*[]byte)), + maxLinkPayment: *abi.ConvertType(returnValues[1], new(*big.Int)).(**big.Int), + gasLimit: *abi.ConvertType(returnValues[2], new(*big.Int)).(**big.Int), + adjustedGasWei: *abi.ConvertType(returnValues[3], new(*big.Int)).(**big.Int), + linkEth: *abi.ConvertType(returnValues[4], new(*big.Int)).(**big.Int), + }) + } + + printResultsToConsole(parsedResults) +} + +// printResultsToConsole writes parsed results to the console +func printResultsToConsole(parsedResults []result) { + writer := tabwriter.NewWriter(os.Stdout, 8, 8, 0, '\t', 0) + defer writer.Flush() + + fmt.Fprintf(writer, "\n %s\t\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t", "Block", "checkUpkeep", "Keeper Index", "Keeper Address", "Max PLI Payment", "Gas Limit", "Adjusted Gas", "PLI ETH", "Perform Data", "Reason") + fmt.Fprintf(writer, "\n %s\t\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t", "----", "----", "----", "----", "----", "----", "----", "----", "----", "----") + for _, res := range parsedResults { + fmt.Fprintf(writer, "\n %d\t\t%t\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t", + res.block, + res.checkUpkeep, + res.keeperIndex, + res.keeperAddress, + res.maxLinkPayment, + res.gasLimit, + res.adjustedGasWei, + res.linkEth, + res.performData, + res.reason, + ) + } + fmt.Fprintf(writer, "\n %s\t\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t\n", "----", "----", "----", "----", "----", "----", "----", "----", "----", "----") +} + +func turnBlockHashBinary(blockNum, bcpt, lookback uint64, ethClient *ethclient.Client) (string, error) { + turnBlock := blockNum - (blockNum % bcpt) - lookback + block, err := ethClient.BlockByNumber(context.Background(), big.NewInt(int64(turnBlock))) + if err != nil { + return "", err + } + hashAtHeight := block.Hash() + binaryString := fmt.Sprintf("%b", hashAtHeight.Big()) + return binaryString, nil +} diff --git a/core/scripts/chaincli/handler/keeper_verifiable_load.go b/core/scripts/chaincli/handler/keeper_verifiable_load.go new file mode 100644 index 00000000..53f4f6cc --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_verifiable_load.go @@ -0,0 +1,219 @@ +package handler + +import ( + "context" + "fmt" + "log" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/montanaflynn/stats" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/verifiable_load_upkeep_wrapper" +) + +const ( + // workerNum is the total number of workers calculating upkeeps' delay summary + workerNum = 5 + // retryDelay is the time the go routine will wait before calling the same contract function + retryDelay = 1 * time.Second + // retryNum defines how many times the go routine will attempt the same contract call + retryNum = 3 + // maxUpkeepNum defines the size of channels. Increase if there are lots of upkeeps. + maxUpkeepNum = 100 +) + +type upkeepInfo struct { + mu sync.Mutex + ID *big.Int + Bucket uint16 + DelayBuckets map[uint16][]float64 + SortedAllDelays []float64 + TotalDelayBlock float64 + TotalPerforms uint64 +} + +type verifiableLoad interface { + GetAllActiveUpkeepIDsOnRegistry(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + Counters(opts *bind.CallOpts, upkeepId *big.Int) (*big.Int, error) + GetBucketedDelays(opts *bind.CallOpts, upkeepId *big.Int, bucket uint16) ([]*big.Int, error) + Buckets(opts *bind.CallOpts, arg0 *big.Int) (uint16, error) +} + +func (ui *upkeepInfo) AddBucket(bucketNum uint16, bucketDelays []float64) { + ui.mu.Lock() + defer ui.mu.Unlock() + ui.DelayBuckets[bucketNum] = bucketDelays +} + +type upkeepStats struct { + BlockNumber uint64 + AllInfos []*upkeepInfo + TotalDelayBlock float64 + TotalPerforms uint64 + SortedAllDelays []float64 +} + +func (k *Keeper) PrintVerifiableLoadStats(ctx context.Context, csv bool) { + var v verifiableLoad + var err error + addr := common.HexToAddress(k.cfg.VerifiableLoadContractAddress) + v, err = verifiable_load_upkeep_wrapper.NewVerifiableLoadUpkeep(addr, k.client) + if err != nil { + log.Fatalf("failed to create a new verifiable load upkeep from address %s: %v", k.cfg.VerifiableLoadContractAddress, err) + } + + // get all the stats from this block + blockNum, err := k.client.BlockNumber(ctx) + if err != nil { + log.Fatalf("failed to get block number: %v", err) + } + + opts := &bind.CallOpts{ + From: k.fromAddr, + Context: ctx, + BlockNumber: big.NewInt(int64(blockNum)), + } + + // get all active upkeep IDs on this verifiable load contract + upkeepIds, err := v.GetAllActiveUpkeepIDsOnRegistry(opts, big.NewInt(0), big.NewInt(0)) + if err != nil { + log.Fatalf("failed to get active upkeep IDs from %s: %v", k.cfg.VerifiableLoadContractAddress, err) + } + + if csv { + fmt.Println("upkeep ID,total performs,p50,p90,p95,p99,max delay,total delay blocks,average perform delay") + } + + us := &upkeepStats{BlockNumber: blockNum} + + resultsChan := make(chan *upkeepInfo, maxUpkeepNum) + idChan := make(chan *big.Int, maxUpkeepNum) + + var wg sync.WaitGroup + + // create a number of workers to process the upkeep ids in batch + for i := 0; i < workerNum; i++ { + wg.Add(1) + go k.fetchUpkeepInfo(idChan, resultsChan, v, opts, &wg, csv) + } + + for _, id := range upkeepIds { + idChan <- id + } + + close(idChan) + wg.Wait() + + close(resultsChan) + + for info := range resultsChan { + us.AllInfos = append(us.AllInfos, info) + us.TotalPerforms += info.TotalPerforms + us.TotalDelayBlock += info.TotalDelayBlock + us.SortedAllDelays = append(us.SortedAllDelays, info.SortedAllDelays...) + } + + sort.Float64s(us.SortedAllDelays) + + log.Println("\n\n================================== ALL UPKEEPS SUMMARY =======================================================") + p50, _ := stats.Percentile(us.SortedAllDelays, 50) + p90, _ := stats.Percentile(us.SortedAllDelays, 90) + p95, _ := stats.Percentile(us.SortedAllDelays, 95) + p99, _ := stats.Percentile(us.SortedAllDelays, 99) + + maxDelay := float64(0) + if len(us.SortedAllDelays) > 0 { + maxDelay = us.SortedAllDelays[len(us.SortedAllDelays)-1] + } + log.Printf("For total %d upkeeps: total performs: %d, p50: %f, p90: %f, p95: %f, p99: %f, max delay: %f, total delay blocks: %f, average perform delay: %f\n", len(upkeepIds), us.TotalPerforms, p50, p90, p95, p99, maxDelay, us.TotalDelayBlock, us.TotalDelayBlock/float64(us.TotalPerforms)) + log.Printf("All STATS ABOVE ARE CALCULATED AT BLOCK %d", blockNum) +} + +func (k *Keeper) fetchUpkeepInfo(idChan chan *big.Int, resultsChan chan *upkeepInfo, v verifiableLoad, opts *bind.CallOpts, wg *sync.WaitGroup, csv bool) { + defer wg.Done() + + for id := range idChan { + // fetch how many times this upkeep has been executed + c, err := v.Counters(opts, id) + if err != nil { + log.Fatalf("failed to get counter for %s: %v", id.String(), err) + } + + // get all the buckets of an upkeep. 100 performs is a bucket. + b, err := v.Buckets(opts, id) + if err != nil { + log.Fatalf("failed to get current bucket count for %s: %v", id.String(), err) + } + + info := &upkeepInfo{ + ID: id, + Bucket: b, + TotalPerforms: c.Uint64(), + DelayBuckets: map[uint16][]float64{}, + } + + var delays []float64 + var wg1 sync.WaitGroup + for i := uint16(0); i <= b; i++ { + wg1.Add(1) + go k.fetchBucketData(v, opts, id, i, &wg1, info) + } + wg1.Wait() + + for i := uint16(0); i <= b; i++ { + bucketDelays := info.DelayBuckets[i] + delays = append(delays, bucketDelays...) + for _, d := range bucketDelays { + info.TotalDelayBlock += d + } + } + sort.Float64s(delays) + info.SortedAllDelays = delays + info.TotalPerforms = uint64(len(info.SortedAllDelays)) + + p50, _ := stats.Percentile(info.SortedAllDelays, 50) + p90, _ := stats.Percentile(info.SortedAllDelays, 90) + p95, _ := stats.Percentile(info.SortedAllDelays, 95) + p99, _ := stats.Percentile(info.SortedAllDelays, 99) + + maxDelay := float64(0) + + if len(info.SortedAllDelays) > 0 { + maxDelay = info.SortedAllDelays[len(info.SortedAllDelays)-1] + } + + if csv { + fmt.Printf("%s,%d,%f,%f,%f,%f,%f,%d,%f\n", id, info.TotalPerforms, p50, p90, p95, p99, maxDelay, uint64(info.TotalDelayBlock), info.TotalDelayBlock/float64(info.TotalPerforms)) + } else { + log.Printf("upkeep ID %s has %d performs in total. p50: %f, p90: %f, p95: %f, p99: %f, max delay: %f, total delay blocks: %d, average perform delay: %f\n", id, info.TotalPerforms, p50, p90, p95, p99, maxDelay, uint64(info.TotalDelayBlock), info.TotalDelayBlock/float64(info.TotalPerforms)) + } + resultsChan <- info + } +} + +func (k *Keeper) fetchBucketData(v verifiableLoad, opts *bind.CallOpts, id *big.Int, bucketNum uint16, wg *sync.WaitGroup, info *upkeepInfo) { + defer wg.Done() + + var bucketDelays []*big.Int + var err error + for i := 0; i < retryNum; i++ { + bucketDelays, err = v.GetBucketedDelays(opts, id, bucketNum) + if err == nil { + break + } + log.Printf("failed to get bucketed delays for upkeep id %s bucket %d: %v, retrying...", id.String(), bucketNum, err) + time.Sleep(retryDelay) + } + + var floatBucketDelays []float64 + for _, d := range bucketDelays { + floatBucketDelays = append(floatBucketDelays, float64(d.Uint64())) + } + sort.Float64s(floatBucketDelays) + info.AddBucket(bucketNum, floatBucketDelays) +} diff --git a/core/scripts/chaincli/handler/keeper_withdraw.go b/core/scripts/chaincli/handler/keeper_withdraw.go new file mode 100644 index 00000000..e22bb01d --- /dev/null +++ b/core/scripts/chaincli/handler/keeper_withdraw.go @@ -0,0 +1,74 @@ +package handler + +import ( + "context" + "log" + "math/big" + + registry20 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + registry11 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry12 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +// Withdraw takes a keeper registry address, cancels all upkeeps and withdraws the funds +func (k *Keeper) Withdraw(ctx context.Context, hexAddr string) { + registryAddr := common.HexToAddress(hexAddr) + switch k.cfg.RegistryVersion { + case keeper.RegistryVersion_1_1: + keeperRegistry11, err := registry11.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + + upkeepCount, err := keeperRegistry11.GetUpkeepCount(&bind.CallOpts{Context: ctx}) + if err != nil { + log.Fatal("failed to get upkeeps count: ", err) + } + + log.Println("Canceling upkeeps...") + if err = k.cancelAndWithdrawUpkeeps(ctx, upkeepCount, keeperRegistry11); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + case keeper.RegistryVersion_1_2: + keeperRegistry12, err := registry12.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + + activeUpkeepIds := k.getActiveUpkeepIds(ctx, keeperRegistry12, big.NewInt(0), big.NewInt(0)) + + log.Println("Canceling upkeeps...") + if err = k.cancelAndWithdrawActiveUpkeeps(ctx, activeUpkeepIds, keeperRegistry12); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + case keeper.RegistryVersion_2_0: + keeperRegistry20, err := registry20.NewKeeperRegistry( + registryAddr, + k.client, + ) + if err != nil { + log.Fatal("Registry failed: ", err) + } + + activeUpkeepIds := k.getActiveUpkeepIds(ctx, keeperRegistry20, big.NewInt(0), big.NewInt(0)) + + log.Println("Canceling upkeeps...") + if err = k.cancelAndWithdrawActiveUpkeeps(ctx, activeUpkeepIds, keeperRegistry20); err != nil { + log.Fatal("Failed to cancel upkeeps: ", err) + } + default: + panic("unexpected registry version") + } + log.Println("Upkeeps successfully canceled") +} diff --git a/core/scripts/chaincli/handler/logs.go b/core/scripts/chaincli/handler/logs.go new file mode 100644 index 00000000..7e435768 --- /dev/null +++ b/core/scripts/chaincli/handler/logs.go @@ -0,0 +1,159 @@ +package handler + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "log" + "os" + "os/signal" + "regexp" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +func (k *Keeper) PrintLogs(ctx context.Context, pattern string, grep, vgrep []string) { + k.streamLogs(ctx, pattern, grep, vgrep) +} + +func (k *Keeper) streamLogs(ctx context.Context, pattern string, grep, vgrep []string) { + dockerClient, err := client.NewClientWithOpts(client.WithAPIVersionNegotiation()) + if err != nil { + return + } + + // Make sure everything works well + if _, err = dockerClient.Ping(ctx); err != nil { + return + } + + allContainers, err := dockerClient.ContainerList(ctx, types.ContainerListOptions{ + All: true, + }) + if err != nil { + panic(err.Error()) + } + + re := regexp.MustCompile(pattern) + + var containerNames []string + for _, container := range allContainers { + for _, name := range container.Names { + if re.MatchString(name) { + containerNames = append(containerNames, name) + } + } + } + + if len(containerNames) == 0 { + panic(fmt.Sprintf("no container names matching regex: %s", pattern)) + } + + containerChannels := make([]chan string, len(containerNames)) + for i := range containerChannels { + containerChannels[i] = make(chan string) + } + + for i, containerName := range containerNames { + go k.containerLogs(ctx, dockerClient, containerName, containerChannels[i], grep, vgrep) + } + + mergedChannel := k.mergeChannels(containerChannels) + + go func() { + for logLine := range mergedChannel { + fmt.Println(logLine) + } + }() + + termChan := make(chan os.Signal, 1) + signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + <-termChan // Blocks here until either SIGINT or SIGTERM is received. + log.Println("Stopping...") +} + +func (k *Keeper) containerLogs(ctx context.Context, cli *client.Client, containerID string, logsChan chan<- string, grep, vgrep []string) { + out, err := cli.ContainerLogs(ctx, containerID, types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + Timestamps: false, + }) + if err != nil { + panic(err) + } + defer func() { + if err := out.Close(); err != nil { + panic(err) + } + }() + + reader, writer := io.Pipe() + go func() { + if _, err := stdcopy.StdCopy(writer, writer, out); err != nil { + panic(err) + } + if err := writer.Close(); err != nil { + panic(err) + } + }() + + scanner := bufio.NewScanner(reader) +Scan: + for scanner.Scan() { + rawLogLine := scanner.Text() + for _, exclude := range vgrep { + if strings.Contains(rawLogLine, exclude) { + continue Scan + } + } + for _, include := range grep { + if !strings.Contains(rawLogLine, include) { + continue Scan + } + } + var m map[string]interface{} + if err := json.Unmarshal([]byte(rawLogLine), &m); err != nil { + continue + } + m["containerID"] = containerID + decoratedLogLine, err := json.Marshal(m) + if err != nil { + continue + } + select { + case logsChan <- string(decoratedLogLine): + case <-ctx.Done(): + return + } + } +} + +func (k *Keeper) mergeChannels(containerChannels []chan string) <-chan string { + mergeChannel := make(chan string) + var wg sync.WaitGroup + wg.Add(len(containerChannels)) + + for _, containerChannel := range containerChannels { + go func(containerCh <-chan string) { + defer wg.Done() + for containerLogLine := range containerCh { + mergeChannel <- containerLogLine + } + }(containerChannel) + } + + go func() { + wg.Wait() + close(mergeChannel) + }() + + return mergeChannel +} diff --git a/core/scripts/chaincli/handler/ocr2_config.go b/core/scripts/chaincli/handler/ocr2_config.go new file mode 100644 index 00000000..51a42a74 --- /dev/null +++ b/core/scripts/chaincli/handler/ocr2_config.go @@ -0,0 +1,134 @@ +package handler + +import ( + "bytes" + "context" + "fmt" + "log" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/olekukonko/tablewriter" + + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + + ocr2keepers20config "github.com/goplugin/plugin-automation/pkg/v2/config" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" +) + +func OCR2GetConfig(hdlr *baseHandler, registry_addr string) error { + + b, err := common.ParseHexOrString(registry_addr) + if err != nil { + return fmt.Errorf("failed to parse address hash: %s", err) + } + + addr := common.BytesToAddress(b) + registry, err := keeper_registry_wrapper2_0.NewKeeperRegistry(addr, hdlr.client) + if err != nil { + return fmt.Errorf("failed to create caller for address and backend: %s", err) + } + + log.Printf("getting config details from contract: %s\n", addr.Hex()) + detail, err := registry.LatestConfigDetails(nil) + if err != nil { + return fmt.Errorf("failed to get latest config detail from contract: %s", err) + } + + block, err := hdlr.client.BlockByNumber(context.Background(), big.NewInt(int64(detail.BlockNumber))) + if err != nil { + return fmt.Errorf("failed to get block at number %d: %s", detail.BlockNumber, err) + } + + config, err := configFromBlock(block, addr, detail) + if err != nil { + return fmt.Errorf("failed to get config from block: %s", err) + } + + printConfigValues(config) + return nil +} + +func configFromBlock(bl *types.Block, addr common.Address, detail keeper_registry_wrapper2_0.LatestConfigDetails) (*confighelper.PublicConfig, error) { + for _, tx := range bl.Transactions() { + if tx.To() != nil && bytes.Equal(tx.To()[:], addr[:]) { + // this is our transaction + // txRes, txErr, err := getTransactionDetailForHashes(hdlr, []string{tx}) + ocr2Tx, err := NewBaseOCR2Tx(tx) + if err != nil { + log.Printf("failed to create set config transaction: %s", err) + continue + } + + method, err := ocr2Tx.Method() + if err != nil { + log.Printf("failed to parse method signature: %s", err) + continue + } + + if method.Name == "setConfig" { + log.Printf("found transaction for last config update: %s", ocr2Tx.Hash()) + confTx, err := NewOCR2SetConfigTx(tx) + if err != nil { + log.Printf("failed to create conf tx: %s", err) + continue + } + + conf, err := confTx.Config() + if err != nil { + log.Printf("failed to parse transaction config: %s", err) + } + conf.ConfigCount = uint64(detail.ConfigCount) + conf.ConfigDigest = detail.ConfigDigest + + pubConf, err := confighelper.PublicConfigFromContractConfig(true, conf) + if err != nil { + log.Printf("failed to parse public config: %s", err) + } + + return &pubConf, nil + } + } + } + + return nil, fmt.Errorf("public config not found") +} + +func printConfigValues(config *confighelper.PublicConfig) { + data := [][]string{} + + data = append(data, []string{"DeltaProgress", config.DeltaProgress.String()}) + data = append(data, []string{"DeltaResend", config.DeltaResend.String()}) + data = append(data, []string{"DeltaRound", config.DeltaRound.String()}) + data = append(data, []string{"DeltaGrace", config.DeltaGrace.String()}) + data = append(data, []string{"DeltaStage", config.DeltaStage.String()}) + data = append(data, []string{"RMax", fmt.Sprintf("%d", config.RMax)}) + data = append(data, []string{"S", fmt.Sprintf("%v", config.S)}) + data = append(data, []string{"MaxDurationQuery", config.MaxDurationQuery.String()}) + data = append(data, []string{"MaxDurationObservation", config.MaxDurationObservation.String()}) + data = append(data, []string{"MaxDurationReport", config.MaxDurationReport.String()}) + data = append(data, []string{"MaxDurationShouldAcceptFinalizedReport", config.MaxDurationShouldAcceptFinalizedReport.String()}) + data = append(data, []string{"MaxDurationShouldTransmitAcceptedReport", config.MaxDurationShouldTransmitAcceptedReport.String()}) + data = append(data, []string{"F", fmt.Sprintf("%v", config.F)}) + + if offConf, err := ocr2keepers20config.DecodeOffchainConfig(config.ReportingPluginConfig); err == nil { + data = append(data, []string{"", ""}) + data = append(data, []string{"TargetProbability", offConf.TargetProbability}) + data = append(data, []string{"GasLimitPerReport", fmt.Sprintf("%d", offConf.GasLimitPerReport)}) + data = append(data, []string{"GasOverheadPerUpkeep", fmt.Sprintf("%d", offConf.GasOverheadPerUpkeep)}) + data = append(data, []string{"MinConfirmations", fmt.Sprintf("%d", offConf.MinConfirmations)}) + data = append(data, []string{"PerformLockoutWindow", fmt.Sprintf("%d", offConf.PerformLockoutWindow)}) + data = append(data, []string{"SamplingJobDuration", fmt.Sprintf("%d", offConf.SamplingJobDuration)}) + data = append(data, []string{"TargetInRounds", fmt.Sprintf("%d", offConf.TargetInRounds)}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Field", "Value"}) + // table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer + table.SetBorder(false) // Set Border to false + table.AppendBulk(data) // Add Bulk Data + table.Render() +} diff --git a/core/scripts/chaincli/handler/reason.go b/core/scripts/chaincli/handler/reason.go new file mode 100644 index 00000000..bfab29ef --- /dev/null +++ b/core/scripts/chaincli/handler/reason.go @@ -0,0 +1,62 @@ +package handler + +import ( + "context" + "log" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +// RevertReason attempts to fetch more info on failed TX +func (h *baseHandler) RevertReason(hash string) { + txHash := common.HexToHash(hash) + // Get transaction object + tx, isPending, err := h.client.TransactionByHash(context.Background(), txHash) + if err != nil { + log.Fatal("Transaction not found") + } + if isPending { + log.Fatal("Transaction is still pending") + } + // Get transaction receipt + receipt, err := h.client.TransactionReceipt(context.Background(), tx.Hash()) + if err != nil { + log.Fatal("Failed to retrieve receipt: " + err.Error()) + } + + if receipt.Status == 1 { + log.Println("Transaction was successful") + return + } + + // Get failure reason + reason := getFailureReason(h.client, h.fromAddr, tx, receipt.BlockNumber) + log.Println("Revert reason: " + reason) +} + +func getFailureReason(client *ethclient.Client, from common.Address, tx *types.Transaction, blockNumber *big.Int) string { + code, err := client.CallContract(context.Background(), createCallMsgFromTransaction(from, tx), blockNumber) + if err != nil { + log.Println("Cannot not get revert reason: " + err.Error()) + return "not found" + } + if len(code) == 0 { + return "no error message or out of gas" + } + return string(code) +} + +func createCallMsgFromTransaction(from common.Address, tx *types.Transaction) ethereum.CallMsg { + return ethereum.CallMsg{ + From: from, + To: tx.To(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + Value: tx.Value(), + Data: tx.Data(), + } +} diff --git a/core/scripts/chaincli/handler/report.go b/core/scripts/chaincli/handler/report.go new file mode 100644 index 00000000..7e8a5bbb --- /dev/null +++ b/core/scripts/chaincli/handler/report.go @@ -0,0 +1,484 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/big" + "os" + "sort" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/olekukonko/tablewriter" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers20 "github.com/goplugin/plugin-automation/pkg/v2" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + evm "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20" +) + +type OCR2ReportDataElem struct { + Err string + From string + To string + ChainID string + BlockNumber string + PerformKeys string + PerformBlockChecks string +} + +// JsonError is a rpc.jsonError interface +type JsonError interface { + Error() string + // ErrorCode() int + ErrorData() interface{} +} + +func OCR2AutomationReports(hdlr *baseHandler, txs []string) error { + latestBlock, err := hdlr.client.BlockByNumber(context.Background(), nil) + if err != nil { + return fmt.Errorf("failed to get latest block number: %s", err) + } + + fmt.Println("") + fmt.Printf("latest block: %s\n", latestBlock.Number()) + fmt.Println("") + + txRes, txErr, err := getTransactionDetailForHashes(hdlr, txs) + if err != nil { + return fmt.Errorf("batch call error: %s", err) + } + + ocr2Txs := make([]*OCR2TransmitTx, len(txRes)) + elements := make([]OCR2ReportDataElem, len(txRes)) + simBatch := make([]rpc.BatchElem, len(txRes)) + for i := range txRes { + if txErr[i] != nil { + elements[i].Err = txErr[i].Error() + continue + } + + if txRes[i] == nil { + elements[i].Err = "nil response" + continue + } + + ocr2Txs[i], err = NewOCR2TransmitTx(*txRes[i]) + if err != nil { + elements[i].Err = fmt.Sprintf("failed to create ocr2 transaction: %s", err) + continue + } + + ocr2Txs[i].SetStaticValues(&elements[i]) + simBatch[i], err = ocr2Txs[i].BatchElem() + if err != nil { + return err + } + } + + txRes, txErr, err = getSimulationsForTxs(hdlr, simBatch) + if err != nil { + return err + } + for i := range txRes { + if txErr[i] == nil { + continue + } + + err2, ok := txErr[i].(JsonError) //nolint:errorlint + if ok { + decoded, err := hexutil.Decode(err2.ErrorData().(string)) + if err != nil { + elements[i].Err = err.Error() + continue + } + + elements[i].Err = ocr2Txs[i].DecodeError(decoded) + } else if err2 != nil { + elements[i].Err = err2.Error() + } + } + + data := make([][]string, len(elements)) + for i, elem := range elements { + data[i] = []string{ + txs[i], + elem.ChainID, + elem.BlockNumber, + elem.Err, + elem.From, + elem.To, + elem.PerformKeys, + elem.PerformBlockChecks, + } + } + + sort.Slice(data, func(i, j int) bool { + return data[i][2] > data[j][2] + }) + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Hash", "ChainID", "Block", "Error", "From", "To", "Keys", "CheckBlocks"}) + // table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer + table.SetBorder(false) // Set Border to false + table.AppendBulk(data) // Add Bulk Data + table.Render() + + return nil +} + +func getTransactionDetailForHashes(hdlr *baseHandler, txs []string) ([]*map[string]interface{}, []error, error) { + var ( + txReqs = make([]rpc.BatchElem, len(txs)) + txRes = make([]*map[string]interface{}, len(txs)) + txErr = make([]error, len(txs)) + ) + + for i, txHash := range txs { + b, err := common.ParseHexOrString(txHash) + if err != nil { + return txRes, txErr, fmt.Errorf("failed to parse transaction hash: %s", txHash) + } + + var result map[string]interface{} + txReqs[i] = rpc.BatchElem{ + Method: "eth_getTransactionByHash", + Args: []interface{}{ + common.BytesToHash(b), + }, + Result: &result, + } + + txRes[i] = &result + } + + err := hdlr.rpcClient.BatchCallContext(context.Background(), txReqs) + + for i := range txReqs { + txErr[i] = txReqs[i].Error + } + + return txRes, txErr, err +} + +func getSimulationsForTxs(hdlr *baseHandler, txReqs []rpc.BatchElem) ([]*map[string]interface{}, []error, error) { + var ( + txRes = make([]*map[string]interface{}, len(txReqs)) + txErr = make([]error, len(txReqs)) + ) + + for i := range txReqs { + var result map[string]interface{} + txReqs[i].Result = &result + txRes[i] = &result + } + + err := hdlr.rpcClient.BatchCallContext(context.Background(), txReqs) + + for i := range txReqs { + txErr[i] = txReqs[i].Error + } + + return txRes, txErr, err +} + +func NewOCR2Transaction(raw map[string]interface{}) (*OCR2Transaction, error) { + contract, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + txBytes, err := json.Marshal(raw) + if err != nil { + return nil, err + } + + var tx types.Transaction + if err := json.Unmarshal(txBytes, &tx); err != nil { + return nil, err + } + + return &OCR2Transaction{ + encoder: evm.EVMAutomationEncoder20{}, + abi: contract, + raw: raw, + tx: tx, + }, nil +} + +type OCR2Transaction struct { + encoder evm.EVMAutomationEncoder20 + abi abi.ABI + raw map[string]interface{} + tx types.Transaction +} + +func (t *OCR2Transaction) TransactionHash() common.Hash { + return t.tx.Hash() +} + +func (t *OCR2Transaction) ChainId() *big.Int { + return t.tx.ChainId() +} + +func (t *OCR2Transaction) BlockNumber() (uint64, error) { + if bl, ok := t.raw["blockNumber"]; ok { + var blStr string + blStr, ok = bl.(string) + if ok { + block, err := hexutil.DecodeUint64(blStr) + if err != nil { + return 0, fmt.Errorf("failed to parse block number: %s", err) + } + return block, nil + } + return 0, fmt.Errorf("not a string") + } + return 0, fmt.Errorf("not found") +} + +func (t *OCR2Transaction) To() *common.Address { + return t.tx.To() +} + +func (t *OCR2Transaction) From() (common.Address, error) { + + switch t.tx.Type() { + case 2: + from, err := types.Sender(types.NewLondonSigner(t.tx.ChainId()), &t.tx) + if err != nil { + return common.Address{}, fmt.Errorf("failed to get from addr: %s", err) + } else { + return from, nil + } + } + + return common.Address{}, fmt.Errorf("from address not found") +} + +func (t *OCR2Transaction) Method() (*abi.Method, error) { + return t.abi.MethodById(t.tx.Data()[0:4]) +} + +func (t *OCR2Transaction) DecodeError(b []byte) string { + j := common.Bytes2Hex(b) + + for _, e := range t.abi.Errors { + if bytes.Equal(e.ID[:4], b[:4]) { + return e.Name + } + } + + return j +} + +func NewOCR2TransmitTx(raw map[string]interface{}) (*OCR2TransmitTx, error) { + tx, err := NewOCR2Transaction(raw) + if err != nil { + return nil, err + } + + return &OCR2TransmitTx{ + OCR2Transaction: *tx, + }, nil +} + +type OCR2TransmitTx struct { + OCR2Transaction +} + +func (t *OCR2TransmitTx) UpkeepsInTransmit() ([]ocr2keepers20.UpkeepResult, error) { + + txData := t.tx.Data() + + // recover Method from signature and ABI + method, err := t.abi.MethodById(txData[0:4]) + if err != nil { + return nil, fmt.Errorf("failed to get method from sig: %s", err) + } + + vals := make(map[string]interface{}) + if err := t.abi.Methods[method.Name].Inputs.UnpackIntoMap(vals, txData[4:]); err != nil { + return nil, fmt.Errorf("unpacking error: %s", err) + } + + reportData, ok := vals["rawReport"] + if !ok { + return nil, fmt.Errorf("raw report data missing from input") + } + + reportBytes, ok := reportData.([]byte) + if !ok { + return nil, fmt.Errorf("report data not bytes: %T", reportData) + } + + return t.encoder.DecodeReport(reportBytes) +} + +func (t *OCR2TransmitTx) SetStaticValues(elem *OCR2ReportDataElem) { + if t.To() != nil { + elem.To = t.To().String() + } + + elem.ChainID = t.ChainId().String() + + from, err := t.From() + if err != nil { + elem.Err = err.Error() + return + } + elem.From = from.String() + + block, err := t.BlockNumber() + if err != nil { + elem.Err = err.Error() + return + } + elem.BlockNumber = fmt.Sprintf("%d", block) + + upkeeps, err := t.UpkeepsInTransmit() + if err != nil { + elem.Err = err.Error() + } + + keys := []string{} + chkBlocks := []string{} + + for _, u := range upkeeps { + val, ok := u.(evm.EVMAutomationUpkeepResult20) + if !ok { + panic("unrecognized upkeep result type") + } + + keys = append(keys, val.ID.String()) + chkBlocks = append(chkBlocks, fmt.Sprintf("%d", val.CheckBlockNumber)) + } + + elem.PerformKeys = strings.Join(keys, "\n") + elem.PerformBlockChecks = strings.Join(chkBlocks, "\n") +} + +func (t *OCR2TransmitTx) BatchElem() (rpc.BatchElem, error) { + + bn, err := t.BlockNumber() + if err != nil { + return rpc.BatchElem{}, err + } + + from, err := t.From() + if err != nil { + return rpc.BatchElem{}, err + } + + return rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "from": from.Hex(), + "to": t.To().Hex(), + "data": hexutil.Bytes(t.tx.Data()), + }, + hexutil.EncodeBig(big.NewInt(int64(bn) - 1)), + }, + }, nil +} + +func NewBaseOCR2Tx(tx *types.Transaction) (*BaseOCR2Tx, error) { + contract, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + + return &BaseOCR2Tx{ + abi: contract, + Transaction: *tx, + }, nil +} + +type BaseOCR2Tx struct { + abi abi.ABI + types.Transaction +} + +func (tx *BaseOCR2Tx) Method() (*abi.Method, error) { + return tx.abi.MethodById(tx.Data()[0:4]) +} + +func (tx *BaseOCR2Tx) DataMap() (map[string]interface{}, error) { + txData := tx.Data() + + // recover Method from signature and ABI + method, err := tx.abi.MethodById(txData[0:4]) + if err != nil { + return nil, fmt.Errorf("failed to get method from sig: %s", err) + } + + vals := make(map[string]interface{}) + if err := tx.abi.Methods[method.Name].Inputs.UnpackIntoMap(vals, txData[4:]); err != nil { + return nil, fmt.Errorf("unpacking error: %s", err) + } + + return vals, nil +} + +func NewOCR2SetConfigTx(tx *types.Transaction) (*OCR2SetConfigTx, error) { + base, err := NewBaseOCR2Tx(tx) + if err != nil { + return nil, err + } + + return &OCR2SetConfigTx{ + BaseOCR2Tx: *base, + }, nil +} + +type OCR2SetConfigTx struct { + BaseOCR2Tx +} + +func (tx *OCR2SetConfigTx) Config() (ocrtypes.ContractConfig, error) { + conf := ocrtypes.ContractConfig{} + + vals, err := tx.DataMap() + if err != nil { + return conf, err + } + + if fVal, ok := vals["f"]; ok { + conf.F = fVal.(uint8) + } + + if onVal, ok := vals["onchainConfig"]; ok { + conf.OnchainConfig = onVal.([]byte) + } + + if vVal, ok := vals["offchainConfigVersion"]; ok { + conf.OffchainConfigVersion = vVal.(uint64) + } + + if onVal, ok := vals["offchainConfig"]; ok { + conf.OffchainConfig = onVal.([]byte) + } + + if sVal, ok := vals["signers"]; ok { + for _, s := range sVal.([]common.Address) { + conf.Signers = append(conf.Signers, s.Bytes()) + } + } + + if tVal, ok := vals["transmitters"]; ok { + for _, t := range tVal.([]common.Address) { + conf.Transmitters = append(conf.Transmitters, ocrtypes.Account(t.Hex())) + } + } + + return conf, nil +} diff --git a/core/scripts/chaincli/handler/scrape_node_config.go b/core/scripts/chaincli/handler/scrape_node_config.go new file mode 100644 index 00000000..dc9d2bb2 --- /dev/null +++ b/core/scripts/chaincli/handler/scrape_node_config.go @@ -0,0 +1,283 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type CSAKeyInfo struct { + NodeName string `json:"nodeName"` + NodeAddress string `json:"nodeAddress"` + PublicKey string `json:"publicKey"` +} + +func (ci *CSAKeyInfo) Equals(ci2 *CSAKeyInfo) bool { + return ci.PublicKey == ci2.PublicKey && ci.NodeAddress == ci2.NodeAddress +} + +type NodeInfo struct { + AdminAddress common.Address `json:"adminAddress"` + CSAKeys []*CSAKeyInfo `json:"csaKeys"` + DisplayName string `json:"displayName"` + Ocr2ConfigPublicKey []string `json:"ocr2ConfigPublicKey"` + Ocr2Id []string `json:"ocr2Id"` + Ocr2OffchainPublicKey []string `json:"ocr2OffchainPublicKey"` + Ocr2OnchainPublicKey []string `json:"ocr2OnchainPublicKey"` + NodeAddress []string `json:"ocrNodeAddress"` + OcrSigningAddress []string `json:"ocrSigningAddress"` + PayeeAddress common.Address `json:"payeeAddress"` + PeerId []string `json:"peerId"` + Status string `json:"status"` +} + +func (node NodeInfo) Equals(ni NodeInfo, log logger.Logger) bool { + diffs := 0 + + if len(node.CSAKeys) != len(ni.CSAKeys) { + log.Errorf("CSA Keys length differs. The node returns %d but weiwatcher has %d", len(node.CSAKeys), len(ni.CSAKeys)) + } + for i, ci := range node.CSAKeys { + if !ci.Equals(ni.CSAKeys[i]) { + diffs++ + log.Errorf("CSA Info differs. The node returns %s but weiwatcher has %s", ci, ni.CSAKeys[i]) + } + } + + if !cmp.Equal(node.Ocr2Id, ni.Ocr2Id) { + diffs++ + log.Errorf("OCR2 ID differs. The node returns %s but weiwatcher has %s", node.Ocr2Id, ni.Ocr2Id) + } + + if !cmp.Equal(node.NodeAddress, ni.NodeAddress) { + diffs++ + log.Errorf("Node address differs. The node returns %s but weiwatcher has %s", node.NodeAddress, ni.NodeAddress) + } + + if !cmp.Equal(node.PeerId, ni.PeerId) { + diffs++ + log.Errorf("Peer Id differs. The node returns %s but weiwatcher has %s", node.PeerId, ni.PeerId) + } + + if !cmp.Equal(node.Ocr2OffchainPublicKey, ni.Ocr2OffchainPublicKey) { + diffs++ + log.Errorf("OCR2 Offchain Public Key differs. The node returns %s but weiwatcher has %s", node.Ocr2OffchainPublicKey, ni.Ocr2OffchainPublicKey) + } + + if !cmp.Equal(node.Ocr2OnchainPublicKey, ni.Ocr2OnchainPublicKey) { + diffs++ + log.Errorf("OCR2 Onchain Public Key differs. The node returns %s but weiwatcher has %s", node.Ocr2OnchainPublicKey, ni.Ocr2OnchainPublicKey) + } + + if !cmp.Equal(node.Ocr2ConfigPublicKey, ni.Ocr2ConfigPublicKey) { + diffs++ + log.Errorf("OCR2 Config Public Key differs. The node returns %s but weiwatcher has %s", node.Ocr2ConfigPublicKey, ni.Ocr2ConfigPublicKey) + } + + return diffs == 0 +} + +func (h *baseHandler) ScrapeNodes() { + log, closeLggr := logger.NewLogger() + logger.Sugared(log).ErrorIfFn(closeLggr, "Failed to close logger") + + ctx := context.Background() + h.scrapeNodes(ctx, log) +} + +func (h *baseHandler) scrapeNodes(ctx context.Context, log logger.Logger) { + log.Warn("This scrapes node address, peer ID, CSA node address, CSA public key, OCR2 ID, OCR2 config pub key, OCR2 onchain pub key, and OCR2 offchain pub key.") + log.Warn("This does NOT scrape for payee address, admin address etc. Please verify that manually.") + cls := make([]cmd.HTTPClient, len(h.cfg.KeeperURLs)) + for i := range h.cfg.KeeperURLs { + url := h.cfg.KeeperURLs[i] + email := h.cfg.KeeperEmails[i] + if len(email) == 0 { + email = defaultPluginNodeLogin + } + pwd := h.cfg.KeeperPasswords[i] + if len(pwd) == 0 { + pwd = defaultPluginNodePassword + } + + cl, err := authenticate(ctx, url, email, pwd, log) + if err != nil { + log.Fatal(err) + } + cls[i] = cl + } + + nodes := map[string]*NodeInfo{} + var wg sync.WaitGroup + for i, cl := range cls { + wg.Add(1) + go h.scrapeNodeInfo(ctx, &wg, i, cl, nodes, log) + } + wg.Wait() + + // if node info is not in RDD and weiwatchers, don't proceed further + if !h.cfg.VerifyNodes { + return + } + nodeInfos := h.fetchNodeInfosFromWeiwatchers(ctx, log) + cnt := 0 + for _, ni := range nodeInfos { + if len(ni.NodeAddress) == 0 { + log.Fatalf("%s node is missing node address in RDD weiwatchers.", ni.DisplayName) + } + if len(ni.NodeAddress) > 1 { + log.Warnf("%s node has more than 1 node addresses. is this a multi-chain node? or this node used to serve another chain?", ni.DisplayName) + } + nodeAddr := ni.NodeAddress[0] + node := nodes[nodeAddr] + if node == nil { + continue + } + cnt++ + + log.Infof("start comparing data for node %s", nodeAddr) + if node.Equals(ni, log) { + log.Infof("node %s info is correct", nodeAddr) + } else { + log.Errorf("node %s info differs between the node instance and weiwatcher", nodeAddr) + } + } + + if cnt != len(nodes) { + log.Infof("there are %d nodes provisioned , but .env is missing %d nodes", len(nodes), len(nodes)-cnt) + } +} + +func (h *baseHandler) fetchNodeInfosFromWeiwatchers(ctx context.Context, log logger.Logger) []NodeInfo { + client := http.Client{Timeout: 1 * time.Minute} + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, h.cfg.NodeConfigURL, nil) + if err != nil { + log.Fatalf("failed to build a GET request: %s", err) + } + + resp, err := client.Do(req) + if err != nil { + log.Fatalf("failed to make a GET request: %s", err) + } + defer resp.Body.Close() + + var nodeInfos []NodeInfo + if err := json.NewDecoder(resp.Body).Decode(&nodeInfos); err != nil { + log.Fatalf("failed to read response: %s", err) + } + + return nodeInfos +} + +func (h *baseHandler) fetchNodeInfosFromNodes(ctx context.Context, i int, cl cmd.HTTPClient, log logger.Logger) ([]string, *cmd.OCR2KeyBundlePresenter, string, *cmd.CSAKeyPresenters) { + resp, err := nodeRequest(ctx, cl, ethKeysEndpoint) + if err != nil { + log.Fatalf("failed to get ETH keys: %s", err) + } + var ethKeys cmd.EthKeyPresenters + if err = jsonapi.Unmarshal(resp, ðKeys); err != nil { + log.Fatalf("failed to unmarshal response body: %s", err) + } + var nodeAddresses []string + for index := range ethKeys { + nodeAddresses = append(nodeAddresses, common.HexToAddress(ethKeys[index].Address).Hex()) + } + if len(nodeAddresses) == 0 { + log.Fatalf("%d th node is missing a node address. Has this node been properly configured by infra?", i) + } + if len(nodeAddresses) > 1 { + log.Warnf("%d th node has more than 1 node addresses. is this a multi-chain node? or this node used to serve another chain?", i) + } + + ocr2Config, err := getNodeOCR2Config(ctx, cl) + if err != nil { + log.Fatalf("failed to get node OCR2 config: %s", err) + } + + peerId, err := getP2PKeyID(ctx, cl) + if err != nil { + log.Fatalf("failed to get p2p keys: %s", err) + } + + resp, err = nodeRequest(ctx, cl, csaKeysEndpoint) + if err != nil { + log.Fatalf("failed to get CSA keys: %s", err) + } + var csaKeys cmd.CSAKeyPresenters + if err = jsonapi.Unmarshal(resp, &csaKeys); err != nil { + log.Fatalf("failed to unmarshal response body: %s", err) + } + if len(csaKeys) == 0 { + log.Fatalf("%d th node does not have CSA keys configured", i) + } + if len(csaKeys) > 1 { + log.Warnf("%d th node has more than 1 CSA keys configured. Please verify with RTSP about which CSA key to use.", i) + } + + return nodeAddresses, ocr2Config, peerId, &csaKeys +} + +func (h *baseHandler) scrapeNodeInfo(ctx context.Context, wg *sync.WaitGroup, i int, cl cmd.HTTPClient, nodes map[string]*NodeInfo, log logger.Logger) { + defer wg.Done() + + nodeAddresses, ocr2Config, peerId, csaKeys := h.fetchNodeInfosFromNodes(ctx, i, cl, log) + + // this assumes the nodes are not multichain nodes and have only 1 node address assigned. + // for a multichain node, we can pass in a chain id and filter `ethKeys` array based on the chain id + // in terms of CSA keys, we need to wait for RTSP to support multichain nodes, which may involve creating one + // CSA key for each chain. but this is still pending so assume only 1 CSA key on a node for now. + csaKey := &CSAKeyInfo{ + NodeAddress: nodeAddresses[0], + PublicKey: strings.TrimPrefix((*csaKeys)[0].PubKey, "csa_"), + } + ni := &NodeInfo{ + CSAKeys: []*CSAKeyInfo{csaKey}, + NodeAddress: nodeAddresses, + Ocr2ConfigPublicKey: []string{ocr2Config.ConfigPublicKey}, + Ocr2Id: []string{ocr2Config.ID}, + Ocr2OffchainPublicKey: []string{ocr2Config.OffChainPublicKey}, + Ocr2OnchainPublicKey: []string{ocr2Config.OnchainPublicKey}, + OcrSigningAddress: []string{common.HexToAddress(strings.TrimPrefix(ocr2Config.OnchainPublicKey, "ocr2on_evm_")).Hex()}, + PeerId: []string{peerId}, + } + + err := writeJSON(ni, strconv.Itoa(i)+".json") + if err != nil { + panic(fmt.Errorf("failed to write node info to JSON: %v", err)) + } + + nodes[nodeAddresses[0]] = ni +} + +func JSONMarshalWithoutEscape(t interface{}) ([]byte, error) { + buffer := &bytes.Buffer{} + encoder := json.NewEncoder(buffer) + encoder.SetEscapeHTML(false) + encoder.SetIndent("", " ") + err := encoder.Encode(t) + return buffer.Bytes(), err +} + +func writeJSON(data interface{}, path string) error { + dataBytes, err := JSONMarshalWithoutEscape(data) + if err != nil { + return err + } + + return os.WriteFile(path, dataBytes, 0644) //nolint:gosec +} diff --git a/core/scripts/chaincli/handler/verify.go b/core/scripts/chaincli/handler/verify.go new file mode 100644 index 00000000..c583357f --- /dev/null +++ b/core/scripts/chaincli/handler/verify.go @@ -0,0 +1,48 @@ +package handler + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" +) + +func (k *Keeper) changeToContractsDirectory() error { + // Get the current working directory + currentDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current working directory: %w", err) + } + + // Check if hardhat.config.ts exists in the current directory, return if it does + if _, err := os.Stat(filepath.Join(currentDir, "hardhat.config.ts")); err == nil { + return nil + } + + // Command should run from core/scripts/chaincli, so we need to change directory to contracts + // Calculate the absolute path of the target directory + absPath := filepath.Join(currentDir, "../../../contracts") + + // Change directory + if err := os.Chdir(absPath); err != nil { + return fmt.Errorf("failed to change directory: %w", err) + } + + // Check if hardhat.config.ts exists in the current directory + if _, err := os.Stat(filepath.Join(absPath, "hardhat.config.ts")); err != nil { + return fmt.Errorf("hardhat.config.ts not found in the current directory") + } + + log.Printf("Successfully changed to directory %s\n", absPath) + + return nil +} + +func (k *Keeper) runCommand(command string) error { + cmd := exec.Command("bash", "-c", command) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} diff --git a/core/scripts/chaincli/main.go b/core/scripts/chaincli/main.go new file mode 100644 index 00000000..85650355 --- /dev/null +++ b/core/scripts/chaincli/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/goplugin/pluginv3.0/core/scripts/chaincli/command" +) + +func main() { + command.Execute() +} diff --git a/core/scripts/common/arbitrum.go b/core/scripts/common/arbitrum.go new file mode 100644 index 00000000..251f9b76 --- /dev/null +++ b/core/scripts/common/arbitrum.go @@ -0,0 +1,13 @@ +package common + +const ( + ArbitrumGoerliChainID int64 = 421613 + ArbitrumOneChainID int64 = 42161 + ArbitrumSepoliaChainID int64 = 421614 +) + +// IsArbitrumChainID returns true if and only if the given chain ID corresponds +// to an Arbitrum chain (testnet or mainnet). +func IsArbitrumChainID(chainID int64) bool { + return chainID == ArbitrumGoerliChainID || chainID == ArbitrumOneChainID || chainID == ArbitrumSepoliaChainID +} diff --git a/core/scripts/common/avalanche.go b/core/scripts/common/avalanche.go new file mode 100644 index 00000000..8699463c --- /dev/null +++ b/core/scripts/common/avalanche.go @@ -0,0 +1,265 @@ +package common + +import ( + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + // BloomByteLength represents the number of bytes used in a header log bloom. + BloomByteLength = 256 + + // BloomBitLength represents the number of bits used in a header log bloom. + BloomBitLength = 8 * BloomByteLength +) + +// AvaBloom represents a 2048 bit bloom filter. +type AvaBloom [BloomByteLength]byte + +// SetBytes sets the content of b to the given bytes. +// It panics if d is not of suitable size. +func (b *AvaBloom) SetBytes(d []byte) { + if len(b) < len(d) { + panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d))) + } + copy(b[BloomByteLength-len(d):], d) +} + +// Add adds d to the filter. Future calls of Test(d) will return true. +func (b *AvaBloom) Add(d []byte) { + b.add(d, make([]byte, 6)) +} + +// add is internal version of Add, which takes a scratch buffer for reuse (needs to be at least 6 bytes) +func (b *AvaBloom) add(d []byte, buf []byte) { + i1, v1, i2, v2, i3, v3 := bloomValues(d, buf) + b[i1] |= v1 + b[i2] |= v2 + b[i3] |= v3 +} + +// Big converts b to a big integer. +// Note: Converting a bloom filter to a big.Int and then calling GetBytes +// does not return the same bytes, since big.Int will trim leading zeroes +func (b AvaBloom) Big() *big.Int { + return new(big.Int).SetBytes(b[:]) +} + +// Bytes returns the backing byte slice of the bloom +func (b AvaBloom) Bytes() []byte { + return b[:] +} + +// Test checks if the given topic is present in the bloom filter +func (b AvaBloom) Test(topic []byte) bool { + i1, v1, i2, v2, i3, v3 := bloomValues(topic, make([]byte, 6)) + return v1 == v1&b[i1] && + v2 == v2&b[i2] && + v3 == v3&b[i3] +} + +// MarshalText encodes b as a hex string with 0x prefix. +func (b AvaBloom) MarshalText() ([]byte, error) { + return hexutil.Bytes(b[:]).MarshalText() +} + +// UnmarshalText b as a hex string with 0x prefix. +func (b *AvaBloom) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("Bloom", input, b[:]) +} + +// bloomValues returns the bytes (index-value pairs) to set for the given data +func bloomValues(data []byte, hashbuf []byte) (uint, byte, uint, byte, uint, byte) { + sha := crypto.NewKeccakState() + sha.Write(data) //nolint:errcheck + sha.Read(hashbuf) //nolint:errcheck + // The actual bits to flip + v1 := byte(1 << (hashbuf[1] & 0x7)) + v2 := byte(1 << (hashbuf[3] & 0x7)) + v3 := byte(1 << (hashbuf[5] & 0x7)) + // The indices for the bytes to OR in + i1 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf)&0x7ff)>>3) - 1 + i2 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[2:])&0x7ff)>>3) - 1 + i3 := BloomByteLength - uint((binary.BigEndian.Uint16(hashbuf[4:])&0x7ff)>>3) - 1 + + return i1, v1, i2, v2, i3, v3 +} + +// A AvaBlockNonce is a 64-bit hash which proves (combined with the +// mix-hash) that a sufficient amount of computation has been carried +// out on a block. +type AvaBlockNonce [8]byte + +// EncodeNonce converts the given integer to a block nonce. +func EncodeNonce(i uint64) AvaBlockNonce { + var n AvaBlockNonce + binary.BigEndian.PutUint64(n[:], i) + return n +} + +// Uint64 returns the integer value of a block nonce. +func (n AvaBlockNonce) Uint64() uint64 { + return binary.BigEndian.Uint64(n[:]) +} + +// MarshalText encodes n as a hex string with 0x prefix. +func (n AvaBlockNonce) MarshalText() ([]byte, error) { + return hexutil.Bytes(n[:]).MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (n *AvaBlockNonce) UnmarshalText(input []byte) error { + return hexutil.UnmarshalFixedText("AvaBlockNonce", input, n[:]) +} + +// AvaHeader is a copy of [github.com/ava-labs/coreth/core/types.Header] to avoid importing the whole module. +type AvaHeader struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom AvaBloom `json:"logsBloom" gencodec:"required"` + Difficulty *big.Int `json:"difficulty" gencodec:"required"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce AvaBlockNonce `json:"nonce"` + ExtDataHash common.Hash `json:"extDataHash" gencodec:"required"` + + // BaseFee was added by EIP-1559 and is ignored in legacy headers. + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + + // ExtDataGasUsed was added by Apricot Phase 4 and is ignored in legacy + // headers. + // + // It is not a uint64 like GasLimit or GasUsed because it is not possible to + // correctly encode this field optionally with uint64. + ExtDataGasUsed *big.Int `json:"extDataGasUsed" rlp:"optional"` + + // BlockGasCost was added by Apricot Phase 4 and is ignored in legacy + // headers. + BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` +} + +func (h *AvaHeader) UnmarshalJSON(input []byte) error { + type Header struct { + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner" gencodec:"required"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom *AvaBloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *AvaBlockNonce `json:"nonce"` + ExtDataHash *common.Hash `json:"extDataHash" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` + BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + } + var dec Header + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.ParentHash == nil { + return errors.New("missing required field 'parentHash' for Header") + } + h.ParentHash = *dec.ParentHash + if dec.UncleHash == nil { + return errors.New("missing required field 'sha3Uncles' for Header") + } + h.UncleHash = *dec.UncleHash + if dec.Coinbase == nil { + return errors.New("missing required field 'miner' for Header") + } + h.Coinbase = *dec.Coinbase + if dec.Root == nil { + return errors.New("missing required field 'stateRoot' for Header") + } + h.Root = *dec.Root + if dec.TxHash == nil { + return errors.New("missing required field 'transactionsRoot' for Header") + } + h.TxHash = *dec.TxHash + if dec.ReceiptHash == nil { + return errors.New("missing required field 'receiptsRoot' for Header") + } + h.ReceiptHash = *dec.ReceiptHash + if dec.Bloom == nil { + return errors.New("missing required field 'logsBloom' for Header") + } + h.Bloom = *dec.Bloom + if dec.Difficulty == nil { + return errors.New("missing required field 'difficulty' for Header") + } + h.Difficulty = (*big.Int)(dec.Difficulty) + if dec.Number == nil { + return errors.New("missing required field 'number' for Header") + } + h.Number = (*big.Int)(dec.Number) + if dec.GasLimit == nil { + return errors.New("missing required field 'gasLimit' for Header") + } + h.GasLimit = uint64(*dec.GasLimit) + if dec.GasUsed == nil { + return errors.New("missing required field 'gasUsed' for Header") + } + h.GasUsed = uint64(*dec.GasUsed) + if dec.Time == nil { + return errors.New("missing required field 'timestamp' for Header") + } + h.Time = uint64(*dec.Time) + if dec.Extra == nil { + return errors.New("missing required field 'extraData' for Header") + } + h.Extra = *dec.Extra + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest + } + if dec.Nonce != nil { + h.Nonce = *dec.Nonce + } + if dec.ExtDataHash == nil { + return errors.New("missing required field 'extDataHash' for Header") + } + h.ExtDataHash = *dec.ExtDataHash + if dec.BaseFee != nil { + h.BaseFee = (*big.Int)(dec.BaseFee) + } + if dec.ExtDataGasUsed != nil { + h.ExtDataGasUsed = (*big.Int)(dec.ExtDataGasUsed) + } + if dec.BlockGasCost != nil { + h.BlockGasCost = (*big.Int)(dec.BlockGasCost) + } + return nil +} +func (h *AvaHeader) Hash() common.Hash { + return rlpHash(h) +} +func rlpHash(x interface{}) (h common.Hash) { + sha := crypto.NewKeccakState() + sha.Reset() + rlp.Encode(sha, x) //nolint:errcheck + sha.Read(h[:]) //nolint:errcheck + return h +} diff --git a/core/scripts/common/avalanche_subnet.go b/core/scripts/common/avalanche_subnet.go new file mode 100644 index 00000000..238f193d --- /dev/null +++ b/core/scripts/common/avalanche_subnet.go @@ -0,0 +1,126 @@ +package common + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// AvaSubnetHeader is a copy of [github.com/ava-labs/subnet-evm/core/types.Header] to avoid importing the whole module. +type AvaSubnetHeader struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom AvaBloom `json:"logsBloom" gencodec:"required"` + Difficulty *big.Int `json:"difficulty" gencodec:"required"` + Number *big.Int `json:"number" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + Time uint64 `json:"timestamp" gencodec:"required"` + Extra []byte `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce AvaBlockNonce `json:"nonce"` + BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` +} + +func (h *AvaSubnetHeader) UnmarshalJSON(input []byte) error { + type Header struct { + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.Address `json:"miner" gencodec:"required"` + Root *common.Hash `json:"stateRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom *AvaBloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *AvaBlockNonce `json:"nonce"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + } + var dec Header + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.ParentHash == nil { + return errors.New("missing required field 'parentHash' for Header") + } + h.ParentHash = *dec.ParentHash + if dec.UncleHash == nil { + return errors.New("missing required field 'sha3Uncles' for Header") + } + h.UncleHash = *dec.UncleHash + if dec.Coinbase == nil { + return errors.New("missing required field 'miner' for Header") + } + h.Coinbase = *dec.Coinbase + if dec.Root == nil { + return errors.New("missing required field 'stateRoot' for Header") + } + h.Root = *dec.Root + if dec.TxHash == nil { + return errors.New("missing required field 'transactionsRoot' for Header") + } + h.TxHash = *dec.TxHash + if dec.ReceiptHash == nil { + return errors.New("missing required field 'receiptsRoot' for Header") + } + h.ReceiptHash = *dec.ReceiptHash + if dec.Bloom == nil { + return errors.New("missing required field 'logsBloom' for Header") + } + h.Bloom = *dec.Bloom + if dec.Difficulty == nil { + return errors.New("missing required field 'difficulty' for Header") + } + h.Difficulty = (*big.Int)(dec.Difficulty) + if dec.Number == nil { + return errors.New("missing required field 'number' for Header") + } + h.Number = (*big.Int)(dec.Number) + if dec.GasLimit == nil { + return errors.New("missing required field 'gasLimit' for Header") + } + h.GasLimit = uint64(*dec.GasLimit) + if dec.GasUsed == nil { + return errors.New("missing required field 'gasUsed' for Header") + } + h.GasUsed = uint64(*dec.GasUsed) + if dec.Time == nil { + return errors.New("missing required field 'timestamp' for Header") + } + h.Time = uint64(*dec.Time) + if dec.Extra == nil { + return errors.New("missing required field 'extraData' for Header") + } + h.Extra = *dec.Extra + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest + } + if dec.Nonce != nil { + h.Nonce = *dec.Nonce + } + if dec.BaseFee != nil { + h.BaseFee = (*big.Int)(dec.BaseFee) + } + if dec.BlockGasCost != nil { + h.BlockGasCost = (*big.Int)(dec.BlockGasCost) + } + return nil +} + +func (h *AvaSubnetHeader) Hash() common.Hash { + return rlpHash(h) +} diff --git a/core/scripts/common/helpers.go b/core/scripts/common/helpers.go new file mode 100644 index 00000000..3bdb67a6 --- /dev/null +++ b/core/scripts/common/helpers.go @@ -0,0 +1,604 @@ +package common + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "flag" + "fmt" + "math/big" + "os" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" + "github.com/shopspring/decimal" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" +) + +type Environment struct { + Owner *bind.TransactOpts + Ec *ethclient.Client + + Jc *rpc.Client + + ChainID int64 +} + +func DeployLinkToken(e Environment) common.Address { + _, tx, _, err := link_token_interface.DeployLinkToken(e.Owner, e.Ec) + PanicErr(err) + return ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployLinkEthFeed(e Environment, linkAddress string, weiPerUnitLink *big.Int) common.Address { + _, tx, _, err := + mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + e.Owner, e.Ec, 18, weiPerUnitLink) + PanicErr(err) + return ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +// SetupEnv returns an Environment object populated from environment variables. +// If overrideNonce is set to true, the nonce will be set to what is returned +// by NonceAt (rather than the typical PendingNonceAt). +func SetupEnv(overrideNonce bool) Environment { + ethURL, set := os.LookupEnv("ETH_URL") + if !set { + panic("need eth url") + } + + chainIDEnv, set := os.LookupEnv("ETH_CHAIN_ID") + if !set { + panic("need chain ID") + } + + accountKey, set := os.LookupEnv("ACCOUNT_KEY") + if !set { + panic("need account key") + } + + ec, err := ethclient.Dial(ethURL) + PanicErr(err) + + jsonRPCClient, err := rpc.Dial(ethURL) + PanicErr(err) + + chainID, err := strconv.ParseInt(chainIDEnv, 10, 64) + PanicErr(err) + + // Owner key. Make sure it has eth + b, err := hex.DecodeString(accountKey) + PanicErr(err) + d := new(big.Int).SetBytes(b) + + pkX, pkY := crypto.S256().ScalarBaseMult(d.Bytes()) + privateKey := ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: crypto.S256(), + X: pkX, + Y: pkY, + }, + D: d, + } + owner, err := bind.NewKeyedTransactorWithChainID(&privateKey, big.NewInt(chainID)) + PanicErr(err) + // Explicitly set gas price to ensure non-eip 1559 + gp, err := ec.SuggestGasPrice(context.Background()) + PanicErr(err) + fmt.Println("Suggested Gas Price:", gp, "wei") + owner.GasPrice = gp + gasLimit, set := os.LookupEnv("GAS_LIMIT") + if set { + parsedGasLimit, err := strconv.ParseUint(gasLimit, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failure while parsing GAS_LIMIT: %s", gasLimit)) + } + owner.GasLimit = parsedGasLimit + } + + if overrideNonce { + block, err := ec.BlockNumber(context.Background()) + PanicErr(err) + + nonce, err := ec.NonceAt(context.Background(), owner.From, big.NewInt(int64(block))) + PanicErr(err) + + owner.Nonce = big.NewInt(int64(nonce)) + } + owner.GasPrice = gp.Mul(gp, big.NewInt(2)) + fmt.Println("Modified Gas Price that will be set:", owner.GasPrice, "wei") + // the execution environment for the scripts + return Environment{ + Owner: owner, + Ec: ec, + Jc: jsonRPCClient, + ChainID: chainID, + } +} + +// PanicErr panics if error the given error is non-nil. +func PanicErr(err error) { + if err != nil { + panic(err) + } +} + +// ParseArgs parses arguments and ensures required args are set. +func ParseArgs(flagSet *flag.FlagSet, args []string, requiredArgs ...string) { + PanicErr(flagSet.Parse(args)) + seen := map[string]bool{} + argValues := map[string]string{} + flagSet.Visit(func(f *flag.Flag) { + seen[f.Name] = true + argValues[f.Name] = f.Value.String() + }) + for _, req := range requiredArgs { + if !seen[req] { + panic(fmt.Errorf("missing required -%s argument/flag", req)) + } + } +} + +func explorerLinkPrefix(chainID int64) (prefix string) { + switch chainID { + case 1: // ETH mainnet + prefix = "https://etherscan.io" + case 4: // Rinkeby + prefix = "https://rinkeby.etherscan.io" + case 5: // Goerli + prefix = "https://goerli.etherscan.io" + case 42: // Kovan + prefix = "https://kovan.etherscan.io" + case 11155111: // Sepolia + prefix = "https://sepolia.etherscan.io" + + case 420: // Optimism Goerli + prefix = "https://goerli-optimism.etherscan.io" + + case ArbitrumGoerliChainID: // Arbitrum Goerli + prefix = "https://goerli.arbiscan.io" + case ArbitrumOneChainID: // Arbitrum mainnet + prefix = "https://arbiscan.io" + case ArbitrumSepoliaChainID: // Arbitrum Sepolia + prefix = "https://sepolia.arbiscan.io" + + case 56: // BSC mainnet + prefix = "https://bscscan.com" + case 97: // BSC testnet + prefix = "https://testnet.bscscan.com" + + case 137: // Polygon mainnet + prefix = "https://polygonscan.com" + case 80001: // Polygon Mumbai testnet + prefix = "https://mumbai.polygonscan.com" + + case 250: // Fantom mainnet + prefix = "https://ftmscan.com" + case 4002: // Fantom testnet + prefix = "https://testnet.ftmscan.com" + + case 43114: // Avalanche mainnet + prefix = "https://snowtrace.io" + case 43113: // Avalanche testnet + prefix = "https://testnet.snowtrace.io" + case 335: // Defi Kingdoms testnet + prefix = "https://subnets-test.avax.network/defi-kingdoms" + case 53935: // Defi Kingdoms mainnet + prefix = "https://subnets.avax.network/defi-kingdoms" + + case 1666600000, 1666600001, 1666600002, 1666600003: // Harmony mainnet + prefix = "https://explorer.harmony.one" + case 1666700000, 1666700001, 1666700002, 1666700003: // Harmony testnet + prefix = "https://explorer.testnet.harmony.one" + + case 84531: + prefix = "https://goerli.basescan.org" + case 8453: + prefix = "https://basescan.org" + + case 280: // zkSync Goerli testnet + prefix = "https://goerli.explorer.zksync.io" + case 324: // zkSync mainnet + prefix = "https://explorer.zksync.io" + + default: // Unknown chain, return prefix as-is + prefix = "" + } + return +} + +func automationExplorerNetworkName(chainID int64) (prefix string) { + switch chainID { + case 1: // ETH mainnet + prefix = "mainnet" + case 5: // Goerli + prefix = "goerli" + case 11155111: // Sepolia + prefix = "sepolia" + + case 420: // Optimism Goerli + prefix = "optimism-goerli" + + case ArbitrumGoerliChainID: // Arbitrum Goerli + prefix = "arbitrum-goerli" + case ArbitrumOneChainID: // Arbitrum mainnet + prefix = "arbitrum" + case ArbitrumSepoliaChainID: // Arbitrum Sepolia + prefix = "arbitrum-sepolia" + + case 56: // BSC mainnet + prefix = "bsc" + case 97: // BSC testnet + prefix = "bnb-chain-testnet" + + case 137: // Polygon mainnet + prefix = "polygon" + case 80001: // Polygon Mumbai testnet + prefix = "mumbai" + + case 250: // Fantom mainnet + prefix = "fantom" + case 4002: // Fantom testnet + prefix = "fantom-testnet" + + case 43114: // Avalanche mainnet + prefix = "avalanche" + case 43113: // Avalanche testnet + prefix = "fuji" + + default: // Unknown chain, return prefix as-is + prefix = "" + } + return +} + +// ExplorerLink creates a block explorer link for the given transaction hash. If the chain ID is +// unrecognized, the hash is returned as-is. +func ExplorerLink(chainID int64, txHash common.Hash) string { + prefix := explorerLinkPrefix(chainID) + if prefix != "" { + return fmt.Sprintf("%s/tx/%s", prefix, txHash.String()) + } + return txHash.String() +} + +// ContractExplorerLink creates a block explorer link for the given contract address. +// If the chain ID is unrecognized the address is returned as-is. +func ContractExplorerLink(chainID int64, contractAddress common.Address) string { + prefix := explorerLinkPrefix(chainID) + if prefix != "" { + return fmt.Sprintf("%s/address/%s", prefix, contractAddress.Hex()) + } + return contractAddress.Hex() +} + +func TenderlySimLink(simID string) string { + return fmt.Sprintf("https://dashboard.tenderly.co/simulator/%s", simID) +} + +// ConfirmTXMined confirms that the given transaction is mined and prints useful execution information. +func ConfirmTXMined(context context.Context, client *ethclient.Client, transaction *types.Transaction, chainID int64, txInfo ...string) (receipt *types.Receipt) { + fmt.Println("Executing TX", ExplorerLink(chainID, transaction.Hash()), txInfo) + receipt, err := bind.WaitMined(context, client, transaction) + PanicErr(err) + fmt.Println("TX", receipt.TxHash, "mined. \nBlock Number:", receipt.BlockNumber, + "\nGas Used: ", receipt.GasUsed, + "\nBlock hash: ", receipt.BlockHash.String()) + return +} + +// ConfirmContractDeployed confirms that the given contract deployment transaction completed and prints useful execution information. +func ConfirmContractDeployed(context context.Context, client *ethclient.Client, transaction *types.Transaction, chainID int64) (address common.Address) { + fmt.Println("Executing contract deployment, TX:", ExplorerLink(chainID, transaction.Hash())) + contractAddress, err := bind.WaitDeployed(context, client, transaction) + PanicErr(err) + fmt.Println("Contract Address:", contractAddress.String()) + fmt.Println("Contract explorer link:", ContractExplorerLink(chainID, contractAddress)) + return contractAddress +} + +func ConfirmCodeAt(ctx context.Context, client *ethclient.Client, addr common.Address, chainID int64) { + fmt.Println("Confirming contract deployment:", addr) + timeout := time.After(time.Minute) + for { + select { + case <-time.After(2 * time.Second): + fmt.Println("getting code at", addr) + code, err := client.CodeAt(ctx, addr, nil) + PanicErr(err) + if len(code) > 0 { + fmt.Println("contract deployment confirmed:", ContractExplorerLink(chainID, addr)) + return + } + case <-timeout: + fmt.Println("Could not confirm contract deployment:", addr) + return + } + } +} + +// ParseBigIntSlice parses the given comma-separated string of integers into a slice +// of *big.Int objects. +func ParseBigIntSlice(arg string) (ret []*big.Int) { + parts := strings.Split(arg, ",") + ret = []*big.Int{} + for _, part := range parts { + ret = append(ret, decimal.RequireFromString(part).BigInt()) + } + return ret +} + +// ParseIntSlice parses the given comma-separated string of integers into a slice +// of int. +func ParseIntSlice(arg string) (ret []int) { + parts := strings.Split(arg, ",") + for _, part := range parts { + num, err := strconv.Atoi(part) + PanicErr(err) + ret = append(ret, num) + } + return ret +} + +// ParseAddressSlice parses the given comma-separated string of addresses into a slice +// of common.Address objects. +func ParseAddressSlice(arg string) (ret []common.Address) { + parts := strings.Split(arg, ",") + ret = []common.Address{} + for _, part := range parts { + ret = append(ret, common.HexToAddress(part)) + } + return +} + +// ParseHashSlice parses the given comma-separated string of hashes into a slice of +// common.Hash objects. +func ParseHashSlice(arg string) (ret []common.Hash) { + parts := strings.Split(arg, ",") + ret = []common.Hash{} + for _, part := range parts { + ret = append(ret, common.HexToHash(part)) + } + return +} + +func ParseHexSlice(arg string) (ret [][]byte) { + parts := strings.Split(arg, ",") + for _, part := range parts { + ret = append(ret, hexutil.MustDecode(part)) + } + return +} + +func FundNodes(e Environment, transmitters []string, fundingAmount *big.Int) { + for _, transmitter := range transmitters { + FundNode(e, transmitter, fundingAmount) + } +} + +func FundNode(e Environment, address string, fundingAmount *big.Int) { + block, err := e.Ec.BlockNumber(context.Background()) + PanicErr(err) + + nonce, err := e.Ec.NonceAt(context.Background(), e.Owner.From, big.NewInt(int64(block))) + PanicErr(err) + // Special case for Arbitrum since gas estimation there is different. + + var gasLimit uint64 + if IsArbitrumChainID(e.ChainID) { + to := common.HexToAddress(address) + estimated, err2 := e.Ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: e.Owner.From, + To: &to, + Value: fundingAmount, + }) + PanicErr(err2) + gasLimit = estimated + } else { + gasLimit = uint64(21_000) + } + toAddress := common.HexToAddress(address) + + tx := types.NewTx( + &types.LegacyTx{ + Nonce: nonce, + GasPrice: e.Owner.GasPrice, + Gas: gasLimit, + To: &toAddress, + Value: fundingAmount, + Data: nil, + }) + + signedTx, err := e.Owner.Signer(e.Owner.From, tx) + PanicErr(err) + err = e.Ec.SendTransaction(context.Background(), signedTx) + PanicErr(err) + fmt.Printf("Sending to %s: %s\n", address, ExplorerLink(e.ChainID, signedTx.Hash())) + PanicErr(err) + _, err = bind.WaitMined(context.Background(), e.Ec, signedTx) + PanicErr(err) +} + +// binarySearch finds the highest value within the range bottom-top at which the test function is +// true. +func BinarySearch(top, bottom *big.Int, test func(amount *big.Int) bool) *big.Int { + var runs int + // While the difference between top and bottom is > 1 + for new(big.Int).Sub(top, bottom).Cmp(big.NewInt(1)) > 0 { + // Calculate midpoint between top and bottom + midpoint := new(big.Int).Sub(top, bottom) + midpoint.Div(midpoint, big.NewInt(2)) + midpoint.Add(midpoint, bottom) + + // Check if the midpoint amount is withdrawable + if test(midpoint) { + bottom = midpoint + } else { + top = midpoint + } + + runs++ + if runs%10 == 0 { + fmt.Printf("Searching... current range %s-%s\n", bottom.String(), top.String()) + } + } + + return bottom +} + +// GetRlpHeaders gets RLP encoded headers of a list of block numbers +// Makes RPC network call eth_getBlockByNumber to blockchain RPC node +// to fetch header info +func GetRlpHeaders(env Environment, blockNumbers []*big.Int, getParentBlocks bool) (headers [][]byte, hashes []string, err error) { + + hashes = make([]string, 0) + + offset := big.NewInt(0) + if getParentBlocks { + offset = big.NewInt(1) + } + + headers = [][]byte{} + var rlpHeader []byte + for _, blockNum := range blockNumbers { + // Avalanche block headers are special, handle them by using the avalanche rpc client + // rather than the regular go-ethereum ethclient. + if IsAvaxNetwork(env.ChainID) { + var h AvaHeader + // Get child block since it's the one that has the parent hash in its header. + nextBlockNum := new(big.Int).Set(blockNum).Add(blockNum, offset) + err2 := env.Jc.CallContext(context.Background(), &h, "eth_getBlockByNumber", hexutil.EncodeBig(nextBlockNum), false) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to get header: %+v", err2) + } + // We can still use vanilla go-ethereum rlp.EncodeToBytes, see e.g + // https://github.com/ava-labs/coreth/blob/e3ca41bf5295a9a7ca1aeaf29d541fcbb94f79b1/core/types/hashing.go#L49-L57. + rlpHeader, err2 = rlp.EncodeToBytes(h) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to encode rlp: %+v", err2) + } + + hashes = append(hashes, h.Hash().String()) + + // Sanity check - can be un-commented if storeVerifyHeader is failing due to unexpected + // blockhash. + //bh := crypto.Keccak256Hash(rlpHeader) + //fmt.Println("Calculated BH:", bh.String(), + // "fetched BH:", h.Hash(), + // "block number:", new(big.Int).Set(blockNum).Add(blockNum, offset).String()) + } else if IsAvaxSubnet(env.ChainID) { + var h AvaSubnetHeader + // Get child block since it's the one that has the parent hash in its header. + nextBlockNum := new(big.Int).Set(blockNum).Add(blockNum, offset) + err2 := env.Jc.CallContext(context.Background(), &h, "eth_getBlockByNumber", hexutil.EncodeBig(nextBlockNum), false) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to get header: %+v", err2) + } + rlpHeader, err2 = rlp.EncodeToBytes(h) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to encode rlp: %+v", err2) + } + + hashes = append(hashes, h.Hash().String()) + } else if IsPolygonEdgeNetwork(env.ChainID) { + + // Get child block since it's the one that has the parent hash in its header. + nextBlockNum := new(big.Int).Set(blockNum).Add(blockNum, offset) + var hash string + rlpHeader, hash, err = GetPolygonEdgeRLPHeader(env.Jc, nextBlockNum) + if err != nil { + return nil, hashes, fmt.Errorf("failed to encode rlp: %+v", err) + } + + hashes = append(hashes, hash) + + } else { + // Get child block since it's the one that has the parent hash in its header. + h, err2 := env.Ec.HeaderByNumber( + context.Background(), + new(big.Int).Set(blockNum).Add(blockNum, offset), + ) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to get header: %+v", err2) + } + rlpHeader, err2 = rlp.EncodeToBytes(h) + if err2 != nil { + return nil, hashes, fmt.Errorf("failed to encode rlp: %+v", err2) + } + + hashes = append(hashes, h.Hash().String()) + } + + headers = append(headers, rlpHeader) + } + return +} + +// IsPolygonEdgeNetwork returns true if the given chain ID corresponds to an Pologyon Edge network. +func IsPolygonEdgeNetwork(chainID int64) bool { + return chainID == 100 || // Nexon test supernet + chainID == 500 // Nexon test supernet +} + +func CalculateLatestBlockHeader(env Environment, blockNumberInput int) (err error) { + blockNumber := uint64(blockNumberInput) + if blockNumberInput == -1 { + blockNumber, err = env.Ec.BlockNumber(context.Background()) + if err != nil { + return fmt.Errorf("failed to fetch latest block: %+v", err) + } + } + + // GetRLPHeaders method increments the blockNum sent by 1 and then fetches + // block headers for the child block. + blockNumber = blockNumber - 1 + + blockNumberBigInts := []*big.Int{big.NewInt(int64(blockNumber))} + headers, hashes, err := GetRlpHeaders(env, blockNumberBigInts, true) + if err != nil { + fmt.Println(err) + return err + } + + rlpHeader := headers[0] + bh := crypto.Keccak256Hash(rlpHeader) + fmt.Println("Calculated BH:", bh.String(), + "\nfetched BH:", hashes[0], + "\nRLP encoding of header: ", hex.EncodeToString(rlpHeader), ", len: ", len(rlpHeader), + "\nblock number:", new(big.Int).Set(blockNumberBigInts[0]).Add(blockNumberBigInts[0], big.NewInt(1)).String(), + fmt.Sprintf("\nblock number hex: 0x%x\n", blockNumber+1)) + + return err +} + +// IsAvaxNetwork returns true if the given chain ID corresponds to an avalanche network. +func IsAvaxNetwork(chainID int64) bool { + return chainID == 43114 || // C-chain mainnet + chainID == 43113 // Fuji testnet +} + +// IsAvaxSubnet returns true if the given chain ID corresponds to an avalanche subnet. +func IsAvaxSubnet(chainID int64) bool { + return chainID == 335 || // DFK testnet + chainID == 53935 || // DFK mainnet + chainID == 955081 || // Nexon Dev + chainID == 595581 || // Nexon Test + chainID == 807424 || // Nexon QA + chainID == 847799 // Nexon Stage +} + +func UpkeepLink(chainID int64, upkeepID *big.Int) string { + return fmt.Sprintf("https://automation.chain.link/%s/%s", automationExplorerNetworkName(chainID), upkeepID.String()) +} diff --git a/core/scripts/common/helpers_test.go b/core/scripts/common/helpers_test.go new file mode 100644 index 00000000..4ca0823d --- /dev/null +++ b/core/scripts/common/helpers_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBinarySearch(t *testing.T) { + tests := []struct { + name string + top, bottom int64 + result int64 + }{ + { + name: "zero 1", + bottom: 0, + top: 100, + result: 0, + }, + { + name: "zero 2", + bottom: 0, + top: 99, + result: 0, + }, + { + name: "one", + bottom: 0, + top: 100, + result: 1, + }, + { + name: "one2", + bottom: 0, + top: 99, + result: 1, + }, + { + name: "mid", + bottom: 0, + top: 159, + result: 80, + }, + { + name: "mid 2", + bottom: 0, + top: 159, + result: 81, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + testFunc := func(val *big.Int) bool { + return val.Cmp(big.NewInt(test.result)) < 1 + } + + result := BinarySearch(big.NewInt(test.top), big.NewInt(test.bottom), testFunc) + assert.Equal(t, test.result, result.Int64()) + }) + } +} diff --git a/core/scripts/common/polygonedge.go b/core/scripts/common/polygonedge.go new file mode 100644 index 00000000..c91d76cc --- /dev/null +++ b/core/scripts/common/polygonedge.go @@ -0,0 +1,187 @@ +package common + +import ( + "encoding/hex" + "fmt" + "math/big" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" + "github.com/umbracle/fastrlp" +) + +type Nonce [8]byte + +var ( + nonceT = reflect.TypeOf(Nonce{}) +) + +func (n Nonce) String() string { + return hexutil.Encode(n[:]) +} + +// MarshalText implements encoding.TextMarshaler +func (n Nonce) MarshalText() ([]byte, error) { + return []byte(n.String()), nil +} + +// UnmarshalJSON parses a nonce in hex syntax. +func (n *Nonce) UnmarshalJSON(input []byte) error { + return hexutil.UnmarshalFixedJSON(nonceT, input, n[:]) +} + +type ExtraData string + +func (e ExtraData) Decode() ([]byte, error) { + return hexutil.Decode(string(e)) +} + +// Header represents a block header in the Ethereum blockchain. +type PolygonEdgeHeader struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + Sha3Uncles common.Hash `json:"sha3Uncles" gencodec:"required"` + Miner common.Address `json:"miner"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + TxRoot common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom types.Bloom `json:"logsBloom" gencodec:"required"` + Difficulty hexutil.Uint64 `json:"difficulty" gencodec:"required"` + Number hexutil.Uint64 `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData ExtraData `json:"extraData" gencodec:"required"` + MixHash common.Hash `json:"mixHash"` + Nonce Nonce `json:"nonce"` + Hash common.Hash `json:"hash"` + + // baseFeePerGas is the response format from go-ethereum. Polygon-Edge + // seems to have fixed this in this commit: + // https://github.com/0xPolygon/polygon-edge/commit/e859acf7e7f0286ceeecce022b978c8fdb57d71b + // But node operators dont seem to have updated their polygon-edge client + // version and still send baseFee instead of baseFeePerGas. + // BaseFee was added by EIP-1559 and is ignored in legacy headers. + BaseFee hexutil.Uint64 `json:"baseFeePerGas"` + BaseFeeAlt hexutil.Uint64 `json:"baseFee,omitempty"` +} + +func GetPolygonEdgeRLPHeader(jsonRPCClient *rpc.Client, blockNum *big.Int) (rlpHeader []byte, hash string, err error) { + var h PolygonEdgeHeader + err = jsonRPCClient.Call(&h, "eth_getBlockByNumber", "0x"+blockNum.Text(16), true) + if err != nil { + return nil, "", fmt.Errorf("failed to get poloygon-edge header: %+v", err) + } + + ar := &fastrlp.Arena{} + val, err := MarshalRLPWith(ar, &h) + if err != nil { + return nil, "", err + } + + dst := make([]byte, 0) + dst = val.MarshalTo(dst) + + return dst, h.Hash.String(), err +} + +// MarshalRLPWith marshals the header to RLP with a specific fastrlp.Arena +// Adding polygon-edge as a dependency caused a lot of issues with conflicting +// dependency version with other libraries in this repo and some methods being +// referenced from older versions +// Reference: https://github.com/0xPolygon/polygon-edge/blob/develop/types/rlp_marshal.go#L73C50-L73C53 +func MarshalRLPWith(arena *fastrlp.Arena, h *PolygonEdgeHeader) (*fastrlp.Value, error) { + vv := arena.NewArray() + + vv.Set(arena.NewCopyBytes(h.ParentHash.Bytes())) + vv.Set(arena.NewCopyBytes(h.Sha3Uncles.Bytes())) + vv.Set(arena.NewCopyBytes(h.Miner[:])) + vv.Set(arena.NewCopyBytes(h.StateRoot.Bytes())) + vv.Set(arena.NewCopyBytes(h.TxRoot.Bytes())) + vv.Set(arena.NewCopyBytes(h.ReceiptsRoot.Bytes())) + vv.Set(arena.NewCopyBytes(h.LogsBloom[:])) + + vv.Set(arena.NewUint(uint64(h.Difficulty))) + vv.Set(arena.NewUint(uint64(h.Number))) + vv.Set(arena.NewUint(uint64(h.GasLimit))) + vv.Set(arena.NewUint(uint64(h.GasUsed))) + vv.Set(arena.NewUint(uint64(h.Timestamp))) + + extraDataBytes, err := h.ExtraData.Decode() + if err != nil { + return nil, fmt.Errorf("failed to hex decode polygon-edge ExtraData: %+v", err) + } + extraDataBytes, err = GetIbftExtraClean(extraDataBytes) + if err != nil { + return nil, fmt.Errorf("GetIbftExtraClean error : %+v", err) + } + vv.Set(arena.NewCopyBytes(extraDataBytes)) + vv.Set(arena.NewCopyBytes(h.MixHash.Bytes())) + + nonceHexString := h.Nonce.String() + nonceBytes, err := hexutil.Decode(nonceHexString) + if err != nil { + return nil, fmt.Errorf("failed to hex decode polygon-edge ExtraData: %+v", err) + } + vv.Set(arena.NewCopyBytes(nonceBytes)) + + baseFee := h.BaseFee + if h.BaseFeeAlt > 0 { + baseFee = h.BaseFeeAlt + } + vv.Set(arena.NewUint(uint64(baseFee))) + + return vv, nil +} + +// Remove blockHeader.ExtraData.Committed without unpacking ExtraData into +// its full fledged type, which needs the full import of the package +// github.com/0xPolygon/polygon-edge. polygon-edge is a node implementation, +// and not a client. Adding polygon-edge as a dependency caused a lot of +// issues with conflicting dependency version with other libraries in this +// repo and some methods being referenced from older versions. +func GetIbftExtraClean(extra []byte) (cleanedExtra []byte, err error) { + // Capture prefix 0's sent by nexon supernet + hexExtra := hex.EncodeToString(extra) + prefix := "" + for _, s := range hexExtra { + if s != '0' { + break + } + prefix = prefix + "0" + } + + hexExtra = strings.TrimLeft(hexExtra, "0") + extra, err = hex.DecodeString(hexExtra) + if err != nil { + return nil, fmt.Errorf("invalid extra data in polygon-edge chain: %+v", err) + } + + var extraData []interface{} + err = rlp.DecodeBytes(extra, &extraData) + if err != nil { + return nil, err + } + + // Remove Committed from blockHeader.ExtraData, because it holds signatures for + // the current block which is not finalized until the next block. So this gets + // ignored when calculating the hash + // Reference: https://github.com/0xPolygon/polygon-edge/blob/develop/consensus/polybft/hash.go#L20-L27. + if len(extraData) > 3 { + extraData[2] = []interface{}{[]byte{}, []byte{}} + } + + cleanedExtra, err = rlp.EncodeToBytes(extraData) + if err != nil { + return nil, err + } + + // Add prefix 0's sent by nexon supernet before sending output + hexExtra = prefix + hex.EncodeToString(cleanedExtra) + cleanedExtra, err = hex.DecodeString(hexExtra) + return cleanedExtra, err +} diff --git a/core/scripts/common/vrf/constants/constants.go b/core/scripts/common/vrf/constants/constants.go new file mode 100644 index 00000000..2a064593 --- /dev/null +++ b/core/scripts/common/vrf/constants/constants.go @@ -0,0 +1,36 @@ +package constants + +import ( + "math/big" +) + +var ( + SubscriptionBalanceJuels = "1e19" + SubscriptionBalanceNativeWei = "1e18" + + // optional flags + FallbackWeiPerUnitLink = big.NewInt(6e16) + BatchFulfillmentEnabled = true + MinConfs = 3 + NodeSendingKeyFundingAmount = "1e17" + MaxGasLimit = int64(2.5e6) + StalenessSeconds = int64(86400) + GasAfterPayment = int64(33285) + + //vrfv2 + FlatFeeTier1 = int64(500) + FlatFeeTier2 = int64(500) + FlatFeeTier3 = int64(500) + FlatFeeTier4 = int64(500) + FlatFeeTier5 = int64(500) + ReqsForTier2 = int64(0) + ReqsForTier3 = int64(0) + ReqsForTier4 = int64(0) + ReqsForTier5 = int64(0) + + //vrfv2plus + FlatFeeNativePPM = uint32(500) + FlatFeeLinkDiscountPPM = uint32(100) + NativePremiumPercentage = uint8(1) + LinkPremiumPercentage = uint8(1) +) diff --git a/core/scripts/common/vrf/docker/db/create-multiple-databases.sh b/core/scripts/common/vrf/docker/db/create-multiple-databases.sh new file mode 100644 index 00000000..9b0c7b0d --- /dev/null +++ b/core/scripts/common/vrf/docker/db/create-multiple-databases.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e +set -u + +function create_user_and_database() { + local database=$1 + echo " Creating user and database '$database'" + psql -v ON_ERROR_STOP=1 --username postgres <<-EOSQL + CREATE DATABASE $database; +EOSQL +} + +if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then + echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" + for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do + create_user_and_database $db + done + echo "Multiple databases created" +fi diff --git a/core/scripts/common/vrf/docker/docker-compose.yml b/core/scripts/common/vrf/docker/docker-compose.yml new file mode 100644 index 00000000..aeae4595 --- /dev/null +++ b/core/scripts/common/vrf/docker/docker-compose.yml @@ -0,0 +1,140 @@ +version: '3.5' +services: + plugin-node-0: + container_name: vrf-primary-node + image: ${NODE_IMAGE_VERSION} + entrypoint: + - /opt/docker-wait-for-others.sh + - database:5432 + - -- + command: plugin -config /opt/config/base.toml -config /opt/config/rpc-nodes.toml -config /opt/config/vrf-primary.toml node start -p /run/secrets/node_password -a /run/secrets/apicredentials + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@database:5432/plugin_0_test?sslmode=disable + platform: "linux/x86_64" + volumes: + - ./wait-for-others/docker-wait-for-it.sh:/opt/docker-wait-for-it.sh:ro + - ./wait-for-others/docker-wait-for-others.sh:/opt/docker-wait-for-others.sh:ro + - ./toml-config:/opt/config:ro + ports: + - "6610:6688" + - "6059:6060" + secrets: + - apicredentials + - node_password + depends_on: + - database + + plugin-node-1: + container_name: vrf-backup-node + image: ${NODE_IMAGE_VERSION} + entrypoint: + - /opt/docker-wait-for-others.sh + - database:5432 + - -- + command: plugin node -config /opt/config/base.toml -config /opt/config/rpc-nodes.toml -config /opt/config/vrf-primary.toml -config /opt/config/vrf-backup.toml start -p /run/secrets/node_password -a /run/secrets/apicredentials + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@database:5432/plugin_1_test?sslmode=disable + platform: "linux/x86_64" + volumes: + - ./wait-for-others/docker-wait-for-it.sh:/opt/docker-wait-for-it.sh:ro + - ./wait-for-others/docker-wait-for-others.sh:/opt/docker-wait-for-others.sh:ro + - ./toml-config:/opt/config:ro + ports: + - "6611:6688" + - "6060:6060" + secrets: + - apicredentials + - node_password + depends_on: + - database + + plugin-node-2: + container_name: bhs-node + image: ${NODE_IMAGE_VERSION} + entrypoint: + - /opt/docker-wait-for-others.sh + - database:5432 + - -- + command: plugin node -config /opt/config/base.toml -config /opt/config/rpc-nodes.toml -config /opt/config/bhs.toml start -p /run/secrets/node_password -a /run/secrets/apicredentials + volumes: + - ./wait-for-others/docker-wait-for-it.sh:/opt/docker-wait-for-it.sh:ro + - ./wait-for-others/docker-wait-for-others.sh:/opt/docker-wait-for-others.sh:ro + - ./toml-config:/opt/config:ro + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@database:5432/plugin_2_test?sslmode=disable + platform: "linux/x86_64" + ports: + - "6612:6688" + - "6062:6060" + secrets: + - apicredentials + - node_password + depends_on: + - database + + plugin-node-3: + container_name: bhs-backup-node + image: ${NODE_IMAGE_VERSION} + entrypoint: + - /opt/docker-wait-for-others.sh + - database:5432 + - -- + command: plugin node -config /opt/config/base.toml -config /opt/config/rpc-nodes.toml -config /opt/config/bhs.toml start -p /run/secrets/node_password -a /run/secrets/apicredentials + volumes: + - ./wait-for-others/docker-wait-for-it.sh:/opt/docker-wait-for-it.sh:ro + - ./wait-for-others/docker-wait-for-others.sh:/opt/docker-wait-for-others.sh:ro + - ./toml-config:/opt/config:ro + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@database:5432/plugin_3_test?sslmode=disable + platform: "linux/x86_64" + ports: + - "6613:6688" + - "6063:6060" + secrets: + - apicredentials + - node_password + depends_on: + - database + + plugin-node-4: + container_name: bhf-node + image: ${NODE_IMAGE_VERSION} + entrypoint: + - /opt/docker-wait-for-others.sh + - database:5432 + - -- + command: plugin node -config /opt/config/base.toml -config /opt/config/rpc-nodes.toml -config /opt/config/bhf.toml start -p /run/secrets/node_password -a /run/secrets/apicredentials + volumes: + - ./wait-for-others/docker-wait-for-it.sh:/opt/docker-wait-for-it.sh:ro + - ./wait-for-others/docker-wait-for-others.sh:/opt/docker-wait-for-others.sh:ro + - ./toml-config:/opt/config:ro + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@database:5432/plugin_4_test?sslmode=disable + platform: "linux/x86_64" + ports: + - "6614:6688" + - "6064:6060" + secrets: + - apicredentials + - node_password + depends_on: + - database + + database: + image: postgres:11.6 + volumes: + - ./db/create-multiple-databases.sh:/docker-entrypoint-initdb.d/create-multiple-databases.sh + environment: + POSTGRES_PASSWORD: plugin + POSTGRES_MULTIPLE_DATABASES: plugin_0_test,plugin_1_test,plugin_2_test,plugin_3_test,plugin_4_test + ports: + - "5432:5432" + +secrets: + node_password: + file: secrets/password.txt + apicredentials: + file: secrets/apicredentials + +volumes: + docker-compose-db: diff --git a/core/scripts/common/vrf/docker/sample.env b/core/scripts/common/vrf/docker/sample.env new file mode 100644 index 00000000..db8ac128 --- /dev/null +++ b/core/scripts/common/vrf/docker/sample.env @@ -0,0 +1,5 @@ +#used for docker compose variable only +NODE_IMAGE_VERSION=public.ecr.aws/plugin/plugin:2.3.0 + +PLUGIN_DB_NAME=plugin +PLUGIN_PGPASSWORD=plugin diff --git a/core/scripts/common/vrf/docker/secrets/apicredentials b/core/scripts/common/vrf/docker/secrets/apicredentials new file mode 100644 index 00000000..fa32a892 --- /dev/null +++ b/core/scripts/common/vrf/docker/secrets/apicredentials @@ -0,0 +1,2 @@ +test@test.com +1234567890password diff --git a/core/scripts/common/vrf/docker/secrets/password.txt b/core/scripts/common/vrf/docker/secrets/password.txt new file mode 100644 index 00000000..54714ef7 --- /dev/null +++ b/core/scripts/common/vrf/docker/secrets/password.txt @@ -0,0 +1 @@ +1234567890password diff --git a/core/scripts/common/vrf/docker/toml-config/base.toml b/core/scripts/common/vrf/docker/toml-config/base.toml new file mode 100644 index 00000000..ea6c9d12 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/base.toml @@ -0,0 +1,30 @@ +RootDir = '/home/plugin' + +[Database] +MaxIdleConns = 20 +MaxOpenConns = 40 +MigrateOnStartup = true + +[Log] +Level = 'debug' +JSONConsole = true + +[Log.File] +MaxSize = '0b' + +[WebServer] +AllowOrigins = '*' +HTTPPort = 6688 +SecureCookies = false + +[WebServer.RateLimit] +Authenticated = 2000 +Unauthenticated = 100 + +[WebServer.TLS] +HTTPSPort = 0 + +[P2P] +[P2P.V2] +AnnounceAddresses = ['0.0.0.0:6690'] +ListenAddresses = ['0.0.0.0:6690'] diff --git a/core/scripts/common/vrf/docker/toml-config/bhf.toml b/core/scripts/common/vrf/docker/toml-config/bhf.toml new file mode 100644 index 00000000..9de5eb43 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/bhf.toml @@ -0,0 +1,7 @@ + +[Feature] +LogPoller = true + +[[EVM]] +ChainID = '11155111' +FinalityDepth = 10 diff --git a/core/scripts/common/vrf/docker/toml-config/bhs.toml b/core/scripts/common/vrf/docker/toml-config/bhs.toml new file mode 100644 index 00000000..9de5eb43 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/bhs.toml @@ -0,0 +1,7 @@ + +[Feature] +LogPoller = true + +[[EVM]] +ChainID = '11155111' +FinalityDepth = 10 diff --git a/core/scripts/common/vrf/docker/toml-config/rpc-nodes.toml b/core/scripts/common/vrf/docker/toml-config/rpc-nodes.toml new file mode 100644 index 00000000..c977ee4a --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/rpc-nodes.toml @@ -0,0 +1,8 @@ +[[EVM]] +ChainID = "11155111" + +[[EVM.Nodes]] +HTTPURL = "" +Name = "RPC Node" +SendOnly = false +WSURL = "" diff --git a/core/scripts/common/vrf/docker/toml-config/secrets.toml b/core/scripts/common/vrf/docker/toml-config/secrets.toml new file mode 100644 index 00000000..00d12b06 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/secrets.toml @@ -0,0 +1,4 @@ +[Password] +Keystore = 'mysecretkeystorepassword' +[Database] +URL = 'postgresql://postgres:mysecretpassword@host.docker.internal:5432/postgres?sslmode=disable' diff --git a/core/scripts/common/vrf/docker/toml-config/vrf-backup-other-chains.toml b/core/scripts/common/vrf/docker/toml-config/vrf-backup-other-chains.toml new file mode 100644 index 00000000..ba71abe9 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/vrf-backup-other-chains.toml @@ -0,0 +1,20 @@ +[Feature] +LogPoller = false #VRF V2 uses Log Broadcaster instead of Log poller + +[[EVM]] +ChainID = '11155111' +BlockBackfillDepth = 500 +LogBackfillBatchSize = 1000 +MinIncomingConfirmations = 100 +RPCDefaultBatchSize = 25 + +[EVM.Transactions] +MaxInFlight = 128 +MaxQueued = 0 + +[EVM.GasEstimator] +LimitDefault = 3500000 +PriceMax = '30 gwei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 100 diff --git a/core/scripts/common/vrf/docker/toml-config/vrf-backup.toml b/core/scripts/common/vrf/docker/toml-config/vrf-backup.toml new file mode 100644 index 00000000..92b9e717 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/vrf-backup.toml @@ -0,0 +1,3 @@ +[[EVM]] +ChainID = '11155111' +MinIncomingConfirmations = 100 diff --git a/core/scripts/common/vrf/docker/toml-config/vrf-primary.toml b/core/scripts/common/vrf/docker/toml-config/vrf-primary.toml new file mode 100644 index 00000000..6cb78789 --- /dev/null +++ b/core/scripts/common/vrf/docker/toml-config/vrf-primary.toml @@ -0,0 +1,15 @@ +[Feature] +LogPoller = false #VRF V2 uses Log Broadcaster instead of Log poller + +[[EVM]] +ChainID = '11155111' +BlockBackfillDepth = 500 +MinIncomingConfirmations = 3 + +[EVM.Transactions] +MaxQueued = 10000 + +[EVM.GasEstimator] +LimitDefault = 3500000 +PriceMax = '30 gwei' +FeeCapDefault = '20 gwei' diff --git a/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-it.sh b/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-it.sh new file mode 100644 index 00000000..7f9943c9 --- /dev/null +++ b/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-it.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# From https://github.com/vishnubob/wait-for-it +# Use this script to test if a given TCP host/port are available + +cmdname=$(basename $0) + +echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $TIMEOUT -gt 0 ]]; then + echoerr "$cmdname: waiting $TIMEOUT seconds for $HOST:$PORT" + else + echoerr "$cmdname: waiting for $HOST:$PORT without a timeout" + fi + start_ts=$(date +%s) + while : + do + if [[ $ISBUSY -eq 1 ]]; then + nc -z $HOST $PORT + result=$? + else + (echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1 + result=$? + fi + if [[ $result -eq 0 ]]; then + end_ts=$(date +%s) + echoerr "$cmdname: $HOST:$PORT is available after $((end_ts - start_ts)) seconds" + break + fi + sleep 1 + done + return $result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $QUIET -eq 1 ]]; then + timeout $BUSYTIMEFLAG $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & + else + timeout $BUSYTIMEFLAG $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & + fi + PID=$! + trap "kill -INT -$PID" INT + wait $PID + RESULT=$? + if [[ $RESULT -ne 0 ]]; then + echoerr "$cmdname: timeout occurred after waiting $TIMEOUT seconds for $HOST:$PORT" + fi + return $RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + hostport=(${1//:/ }) + HOST=${hostport[0]} + PORT=${hostport[1]} + shift 1 + ;; + --child) + CHILD=1 + shift 1 + ;; + -q | --quiet) + QUIET=1 + shift 1 + ;; + -s | --strict) + STRICT=1 + shift 1 + ;; + -h) + HOST="$2" + if [[ $HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + HOST="${1#*=}" + shift 1 + ;; + -p) + PORT="$2" + if [[ $PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + PORT="${1#*=}" + shift 1 + ;; + -t) + TIMEOUT="$2" + if [[ $TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$HOST" == "" || "$PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +TIMEOUT=${TIMEOUT:-15} +STRICT=${STRICT:-0} +CHILD=${CHILD:-0} +QUIET=${QUIET:-0} + +# check to see if timeout is from busybox? +# check to see if timeout is from busybox? +TIMEOUT_PATH=$(realpath $(which timeout)) +if [[ $TIMEOUT_PATH =~ "busybox" ]]; then + ISBUSY=1 + BUSYTIMEFLAG="-t" +else + ISBUSY=0 + BUSYTIMEFLAG="" +fi + +if [[ $CHILD -gt 0 ]]; then + wait_for + RESULT=$? + exit $RESULT +else + if [[ $TIMEOUT -gt 0 ]]; then + wait_for_wrapper + RESULT=$? + else + wait_for + RESULT=$? + fi +fi + +if [[ $CLI != "" ]]; then + if [[ $RESULT -ne 0 && $STRICT -eq 1 ]]; then + echoerr "$cmdname: strict mode, refusing to execute subprocess" + exit $RESULT + fi + exec "${CLI[@]}" +else + exit $RESULT +fi diff --git a/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-others.sh b/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-others.sh new file mode 100644 index 00000000..88684e5d --- /dev/null +++ b/core/scripts/common/vrf/docker/wait-for-others/docker-wait-for-others.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +while [ "$#" -gt 1 ] && [ "$1" != "--" ]; do + /opt/docker-wait-for-it.sh $1 + shift +done + +# Hand off to the CMD +exec "$@" diff --git a/core/scripts/common/vrf/jobs/jobs.go b/core/scripts/common/vrf/jobs/jobs.go new file mode 100644 index 00000000..66bdf712 --- /dev/null +++ b/core/scripts/common/vrf/jobs/jobs.go @@ -0,0 +1,146 @@ +package jobs + +var ( + VRFV2JobFormatted = `type = "vrf" +name = "vrf_v2" +schemaVersion = 1 +coordinatorAddress = "%s" +batchCoordinatorAddress = "%s" +batchFulfillmentEnabled = %t +batchFulfillmentGasMultiplier = %f +customRevertsPipelineEnabled = %t +publicKey = "%s" +minIncomingConfirmations = %d +evmChainID = "%d" +fromAddresses = ["%s"] +pollPeriod = "%s" +requestTimeout = "%s" +observationSource = """decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint64 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrfv2 + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="%s" + multiplier="%f" + data="$(vrf.output)" + block="%s"] +simulate [type=ethcall + from="%s" + to="%s" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="%s" + data="$(vrf.output)" + block="%s"] +decode_log->vrf->estimate_gas->simulate +"""` + + VRFV2PlusJobFormatted = ` +type = "vrf" +name = "vrf_v2_plus" +schemaVersion = 1 +coordinatorAddress = "%s" +batchCoordinatorAddress = "%s" +batchFulfillmentEnabled = %t +batchFulfillmentGasMultiplier = %f +publicKey = "%s" +minIncomingConfirmations = %d +evmChainID = "%d" +fromAddresses = ["%s"] +pollPeriod = "%s" +requestTimeout = "%s" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint256 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,bytes extraArgs,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +generate_proof [type=vrfv2plus + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="%s" + multiplier="%f" + data="$(generate_proof.output)" + block="%s"] +simulate_fulfillment [type=ethcall + from="%s" + to="%s" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="%s" + data="$(generate_proof.output)" + block="%s"] +decode_log->generate_proof->estimate_gas->simulate_fulfillment +""" +` + + BHSJobFormatted = `type = "blockhashstore" +schemaVersion = 1 +name = "blockhashstore" +forwardingAllowed = false +coordinatorV2Address = "%s" +waitBlocks = %d +lookbackBlocks = %d +blockhashStoreAddress = "%s" +pollPeriod = "30s" +runTimeout = "1m0s" +evmChainID = "%d" +fromAddresses = ["%s"] +` + BHSPlusJobFormatted = `type = "blockhashstore" +schemaVersion = 1 +name = "blockhashstore" +forwardingAllowed = false +coordinatorV2PlusAddress = "%s" +waitBlocks = %d +lookbackBlocks = %d +blockhashStoreAddress = "%s" +pollPeriod = "30s" +runTimeout = "1m0s" +evmChainID = "%d" +fromAddresses = ["%s"] +` + + BHFJobFormatted = `type = "blockheaderfeeder" +schemaVersion = 1 +name = "blockheaderfeeder" +forwardingAllowed = false +coordinatorV2Address = "%s" +waitBlocks = 256 +lookbackBlocks = 1_000 +blockhashStoreAddress = "%s" +batchBlockhashStoreAddress = "%s" +pollPeriod = "10s" +runTimeout = "30s" +evmChainID = "%d" +fromAddresses = ["%s"] +getBlockhashesBatchSize = 50 +storeBlockhashesBatchSize = 10 +` + + BHFPlusJobFormatted = `type = "blockheaderfeeder" +schemaVersion = 1 +name = "blockheaderfeeder" +forwardingAllowed = false +coordinatorV2PlusAddress = "%s" +waitBlocks = 256 +lookbackBlocks = 1_000 +blockhashStoreAddress = "%s" +batchBlockhashStoreAddress = "%s" +pollPeriod = "10s" +runTimeout = "30s" +evmChainID = "%d" +fromAddresses = ["%s"] +getBlockhashesBatchSize = 50 +storeBlockhashesBatchSize = 10 +` +) diff --git a/core/scripts/common/vrf/model/model.go b/core/scripts/common/vrf/model/model.go new file mode 100644 index 00000000..ba919eb3 --- /dev/null +++ b/core/scripts/common/vrf/model/model.go @@ -0,0 +1,60 @@ +package model + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + VRFPrimaryNodeName = "vrf-primary-node" + VRFBackupNodeName = "vrf-backup-node" + BHSNodeName = "bhs-node" + BHSBackupNodeName = "bhs-backup-node" + BHFNodeName = "bhf-node" +) + +type Node struct { + URL string + CredsFile string + SendingKeys []SendingKey + NumberOfSendingKeysToCreate int + SendingKeyFundingAmount *big.Int + VrfKeys []string +} + +type SendingKey struct { + Address string + BalanceEth *big.Int +} + +type JobSpecs struct { + VRFPrimaryNode string + VRFBackupyNode string + BHSNode string + BHSBackupNode string + BHFNode string +} + +type ContractAddresses struct { + LinkAddress string + LinkEthAddress string + BhsContractAddress common.Address + BatchBHSAddress common.Address + CoordinatorAddress common.Address + BatchCoordinatorAddress common.Address +} + +type VRFKeyRegistrationConfig struct { + VRFKeyUncompressedPubKey string + RegisterAgainstAddress string +} + +type CoordinatorJobSpecConfig struct { + BatchFulfillmentEnabled bool + BatchFulfillmentGasMultiplier float64 + EstimateGasMultiplier float64 + PollPeriod string + RequestTimeout string + RevertsPipelineEnabled bool +} diff --git a/core/scripts/common/vrf/setup-envs/README.md b/core/scripts/common/vrf/setup-envs/README.md new file mode 100644 index 00000000..ba749d97 --- /dev/null +++ b/core/scripts/common/vrf/setup-envs/README.md @@ -0,0 +1,166 @@ +## NOTE: +* Script will delete all existing jobs on the node! +* Currently works only with 0 or 1 VRF Keys on the node! Otherwise, will stop execution! +* Currently possible to fund all nodes with one amount of native tokens +## Commands: +1. If using Docker Compose + 1. create `.env` file in `core/scripts/common/vrf/docker` (can use `sample.env` file as an example) + 2. go to `core/scripts/common/vrf/docker` folder and start containers - `docker compose up` +2. Update [rpc-nodes.toml](..%2Fdocker%2Ftoml-config%2Frpc-nodes.toml) with relevant RPC nodes +3. Create files with credentials desirably outside `plugin` repo (just not to push creds accidentally). Populate the files with relevant credentials for the nodes +4. Ensure that following env variables are set +``` +export ETH_URL= +export ETH_CHAIN_ID= +export ACCOUNT_KEY= +``` +5. execute from `core/scripts/common/vrf/setup-envs` folder + * `--vrf-version` - "v2" or "v2plus" + +#### VRF V2 +``` +go run . \ +--vrf-version="v2" \ +--vrf-primary-node-url=http://localhost:6610 \ +--vrf-primary-creds-file \ +--vrf-backup-node-url=http://localhost:6611 \ +--vrf-bk-creds-file \ +--bhs-node-url=http://localhost:6612 \ +--bhs-creds-file \ +--bhs-backup-node-url=http://localhost:6613 \ +--bhs-bk-creds-file \ +--bhf-node-url=http://localhost:6614 \ +--bhf-creds-file \ +--num-eth-keys=1 \ +--num-vrf-keys=1 \ +--sending-key-funding-amount="1e17" \ +--deploy-contracts-and-create-jobs="true" \ +--subscription-balance="1e19" \ +--subscription-balance-native="1e18" \ +--batch-fulfillment-enabled="true" \ +--batch-fulfillment-gas-multiplier=1.1 \ +--estimate-gas-multiplier=1.1 \ +--poll-period="5s" \ +--request-timeout="30m0s" \ +--reverts-pipeline-enabled="true" \ +--min-confs=3 \ +--register-vrf-key-against-address= \ +--deploy-vrfv2-owner="true" \ +--use-test-coordinator="true" +``` +#### VRF V2 Plus +* does not need to register VRF key against address +* does not need to deploy VRFV2Owner contract +* does not need to use test coordinator + +VRF V2 Plus example: +``` +go run . \ +--vrf-version="v2plus" \ +--vrf-primary-node-url=http://localhost:6610 \ +--vrf-primary-creds-file \ +--vrf-backup-node-url=http://localhost:6611 \ +--vrf-bk-creds-file \ +--bhs-node-url=http://localhost:6612 \ +--bhs-creds-file \ +--bhs-backup-node-url=http://localhost:6613 \ +--bhs-bk-creds-file \ +--bhf-node-url=http://localhost:6614 \ +--bhf-creds-file \ +--num-eth-keys=1 \ +--num-vrf-keys=1 \ +--sending-key-funding-amount="1e17" \ +--deploy-contracts-and-create-jobs="true" \ +--subscription-balance="1e19" \ +--subscription-balance-native="1e18" \ +--batch-fulfillment-enabled="true" \ +--batch-fulfillment-gas-multiplier=1.1 \ +--estimate-gas-multiplier=1.1 \ +--poll-period="5s" \ +--request-timeout="30m0s" \ +--min-confs=3 +``` + +Optional parameters - will not be deployed if specified +``` + --link-address
\ + --link-eth-feed
\ +``` + +WIP - Not working yet: +``` + --bhs-address
\ + --batch-bhs-address
\ + --coordinator-address
\ + --batch-coordinator-address
+``` + + +## Process Example + +1. If the CL nodes do not have needed amount of ETH and VRF keys, you need to create them first: +``` +go run . \ +--vrf-version="v2" \ +--vrf-primary-node-url= \ +--vrf-primary-creds-file \ +--bhs-node-url= \ +--bhs-creds-file \ +--num-eth-keys=3 \ +--num-vrf-keys=1 \ +--sending-key-funding-amount="1e17" \ +--deploy-contracts-and-create-jobs="false" +``` +Then update corresponding deployment scripts in infra-k8s repo with the new ETH addresses, specifying max gas price for each key + +e.g.: +``` +[[EVM.KeySpecific]] +Key = '' +GasEstimator.PriceMax = '30 gwei' +``` + +2. If the CL nodes already have needed amount of ETH and VRF keys, you can deploy contracts and create jobs with the following command: +NOTE - nodes will be funded at least to the amount specified in `--sending-key-funding-amount` parameter. +``` +go run . \ +--vrf-version="v2" \ +--vrf-primary-node-url= \ +--vrf-primary-creds-file \ +--bhs-node-url= \ +--bhs-creds-file \ +--num-eth-keys=3 \ +--num-vrf-keys=1 \ +--sending-key-funding-amount="1e17" \ +--deploy-contracts-and-create-jobs="true" \ +--subscription-balance="1e19" \ +--subscription-balance-native="1e18" \ +--batch-fulfillment-enabled="true" \ +--min-confs=3 \ +--register-vrf-key-against-address="" \ +--deploy-vrfv2-owner="true" \ +--link-address "" \ +--link-eth-feed "" +``` + + +3. We can run sample rand request to see if the setup works. + After previous script was done, we should see the command to run in the console: + + e.g. to trigger rand request: + 1. navigate to `core/scripts/vrfv2plus/testnet` or `core/scripts/vrfv2/testnet` folder + 2. set needed env variables + ``` + export ETH_URL= + export ETH_CHAIN_ID= + export ACCOUNT_KEY= + ``` + 3. Trigger rand request (get this command from the console after running `setup-envs` script ) + ```bash + go run . eoa-load-test-request-with-metrics --consumer-address= --sub-id=1 --key-hash= --request-confirmations <> --requests 1 --runs 1 --cb-gas-limit 1_000_000 + ``` + 4. Then to check that rand request was fulfilled (get this command from the console after running `setup-envs` script ) + ```bash + go run . eoa-load-test-read-metrics --consumer-address= + ``` \ No newline at end of file diff --git a/core/scripts/common/vrf/setup-envs/main.go b/core/scripts/common/vrf/setup-envs/main.go new file mode 100644 index 00000000..e597a2ed --- /dev/null +++ b/core/scripts/common/vrf/setup-envs/main.go @@ -0,0 +1,561 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/shopspring/decimal" + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/constants" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/model" + "github.com/goplugin/pluginv3.0/core/scripts/vrfv2/testnet/v2scripts" + "github.com/goplugin/pluginv3.0/core/scripts/vrfv2plus/testnet/v2plusscripts" + clcmd "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func newApp(remoteNodeURL string, writer io.Writer) (*clcmd.Shell, *cli.App) { + prompter := clcmd.NewTerminalPrompter() + client := &clcmd.Shell{ + Renderer: clcmd.RendererJSON{Writer: writer}, + AppFactory: clcmd.PluginAppFactory{}, + KeyStoreAuthenticator: clcmd.TerminalKeyStoreAuthenticator{Prompter: prompter}, + FallbackAPIInitializer: clcmd.NewPromptingAPIInitializer(prompter), + Runner: clcmd.PluginRunner{}, + PromptingSessionRequestBuilder: clcmd.NewPromptingSessionRequestBuilder(prompter), + ChangePasswordPrompter: clcmd.NewChangePasswordPrompter(), + PasswordPrompter: clcmd.NewPasswordPrompter(), + } + app := clcmd.NewApp(client) + fs := flag.NewFlagSet("blah", flag.ContinueOnError) + fs.Bool("json", true, "") + fs.String("remote-node-url", remoteNodeURL, "") + helpers.PanicErr(app.Before(cli.NewContext(nil, fs, nil))) + // overwrite renderer since it's set to stdout after Before() is called + client.Renderer = clcmd.RendererJSON{Writer: writer} + return client, app +} + +var ( + checkMarkEmoji = "✅" + xEmoji = "❌" +) + +func main() { + + vrfPrimaryNodeURL := flag.String("vrf-primary-node-url", "", "remote node URL") + vrfBackupNodeURL := flag.String("vrf-backup-node-url", "", "remote node URL") + bhsNodeURL := flag.String("bhs-node-url", "", "remote node URL") + bhsBackupNodeURL := flag.String("bhs-backup-node-url", "", "remote node URL") + bhfNodeURL := flag.String("bhf-node-url", "", "remote node URL") + nodeSendingKeyFundingAmount := flag.String("sending-key-funding-amount", constants.NodeSendingKeyFundingAmount, "sending key funding amount") + + vrfPrimaryCredsFile := flag.String("vrf-primary-creds-file", "", "Creds to authenticate to the node") + vrfBackupCredsFile := flag.String("vrf-bk-creds-file", "", "Creds to authenticate to the node") + bhsCredsFile := flag.String("bhs-creds-file", "", "Creds to authenticate to the node") + bhsBackupCredsFile := flag.String("bhs-bk-creds-file", "", "Creds to authenticate to the node") + bhfCredsFile := flag.String("bhf-creds-file", "", "Creds to authenticate to the node") + + numEthKeys := flag.Int("num-eth-keys", 5, "Number of eth keys to create") + maxGasPriceGwei := flag.Int("max-gas-price-gwei", 1e12, "Max gas price gwei of the eth keys") + numVRFKeys := flag.Int("num-vrf-keys", 1, "Number of vrf keys to create") + batchFulfillmentEnabled := flag.Bool("batch-fulfillment-enabled", constants.BatchFulfillmentEnabled, "whether send randomness fulfillments in batches inside one tx from CL node") + batchFulfillmentGasMultiplier := flag.Float64("batch-fulfillment-gas-multiplier", 1.1, "") + estimateGasMultiplier := flag.Float64("estimate-gas-multiplier", 1.1, "") + pollPeriod := flag.String("poll-period", "300ms", "") + requestTimeout := flag.String("request-timeout", "30m0s", "") + revertsPipelineEnabled := flag.Bool("reverts-pipeline-enabled", true, "") + + vrfVersion := flag.String("vrf-version", "v2", "VRF version to use") + deployContractsAndCreateJobs := flag.Bool("deploy-contracts-and-create-jobs", false, "whether to deploy contracts and create jobs") + + subscriptionBalanceJuelsString := flag.String("subscription-balance", constants.SubscriptionBalanceJuels, "amount to fund subscription with Link token (Juels)") + subscriptionBalanceNativeWeiString := flag.String("subscription-balance-native", constants.SubscriptionBalanceNativeWei, "amount to fund subscription with native token (Wei)") + + minConfs := flag.Int("min-confs", constants.MinConfs, "minimum confirmations") + nativeOnly := flag.Bool("native-only", false, "if true, link and link feed are not set up. Only used in v2 plus") + linkAddress := flag.String("link-address", "", "address of link token") + linkEthAddress := flag.String("link-eth-feed", "", "address of link eth feed") + bhsContractAddressString := flag.String("bhs-address", "", "address of BHS contract") + batchBHSAddressString := flag.String("batch-bhs-address", "", "address of Batch BHS contract") + coordinatorAddressString := flag.String("coordinator-address", "", "address of VRF Coordinator contract") + batchCoordinatorAddressString := flag.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract") + registerVRFKeyAgainstAddress := flag.String("register-vrf-key-against-address", "", "VRF Key registration against address - "+ + "from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments") + deployVRFOwner := flag.Bool("deploy-vrfv2-owner", true, "whether to deploy VRF owner contracts") + useTestCoordinator := flag.Bool("use-test-coordinator", true, "whether to use test coordinator contract or use the normal one") + simulationBlock := flag.String("simulation-block", "pending", "simulation block can be 'pending' or 'latest'") + + e := helpers.SetupEnv(false) + flag.Parse() + nodesMap := make(map[string]model.Node) + + if *vrfVersion != "v2" && *vrfVersion != "v2plus" { + panic(fmt.Sprintf("Invalid VRF Version `%s`. Only `v2` and `v2plus` are supported", *vrfVersion)) + } + fmt.Println("Using VRF Version:", *vrfVersion) + + if *simulationBlock != "pending" && *simulationBlock != "latest" { + helpers.PanicErr(fmt.Errorf("simulation block must be 'pending' or 'latest'")) + } + + fundingAmount := decimal.RequireFromString(*nodeSendingKeyFundingAmount).BigInt() + subscriptionBalanceJuels := decimal.RequireFromString(*subscriptionBalanceJuelsString).BigInt() + subscriptionBalanceNativeWei := decimal.RequireFromString(*subscriptionBalanceNativeWeiString).BigInt() + + if *vrfPrimaryNodeURL != "" { + nodesMap[model.VRFPrimaryNodeName] = model.Node{ + URL: *vrfPrimaryNodeURL, + SendingKeyFundingAmount: fundingAmount, + CredsFile: *vrfPrimaryCredsFile, + } + } + if *vrfBackupNodeURL != "" { + nodesMap[model.VRFBackupNodeName] = model.Node{ + URL: *vrfBackupNodeURL, + SendingKeyFundingAmount: fundingAmount, + CredsFile: *vrfBackupCredsFile, + } + } + if *bhsNodeURL != "" { + nodesMap[model.BHSNodeName] = model.Node{ + URL: *bhsNodeURL, + SendingKeyFundingAmount: fundingAmount, + CredsFile: *bhsCredsFile, + } + } + if *bhsBackupNodeURL != "" { + nodesMap[model.BHSBackupNodeName] = model.Node{ + URL: *bhsBackupNodeURL, + SendingKeyFundingAmount: fundingAmount, + CredsFile: *bhsBackupCredsFile, + } + } + + if *bhfNodeURL != "" { + nodesMap[model.BHFNodeName] = model.Node{ + URL: *bhfNodeURL, + SendingKeyFundingAmount: fundingAmount, + CredsFile: *bhfCredsFile, + } + } + + output := &bytes.Buffer{} + for key, node := range nodesMap { + node := node + client, app := connectToNode(&node.URL, output, node.CredsFile) + ethKeys := createETHKeysIfNeeded(client, app, output, numEthKeys, &node.URL, maxGasPriceGwei) + if key == model.VRFPrimaryNodeName { + vrfKeys := createVRFKeyIfNeeded(client, app, output, numVRFKeys, &node.URL) + node.VrfKeys = mapVrfKeysToStringArr(vrfKeys) + printVRFKeyData(vrfKeys) + exportVRFKey(client, app, vrfKeys[0], output) + } + + if key == model.VRFBackupNodeName { + vrfKeys := getVRFKeys(client, app, output) + node.VrfKeys = mapVrfKeysToStringArr(vrfKeys) + } + + node.SendingKeys = mapEthKeysToSendingKeyArr(ethKeys) + printETHKeyData(ethKeys) + fundNodesIfNeeded(node, key, e) + nodesMap[key] = node + } + importVRFKeyToNodeIfSet(vrfBackupNodeURL, nodesMap, output, nodesMap[model.VRFBackupNodeName].CredsFile) + + if *deployContractsAndCreateJobs { + + contractAddresses := model.ContractAddresses{ + LinkAddress: *linkAddress, + LinkEthAddress: *linkEthAddress, + BhsContractAddress: common.HexToAddress(*bhsContractAddressString), + BatchBHSAddress: common.HexToAddress(*batchBHSAddressString), + CoordinatorAddress: common.HexToAddress(*coordinatorAddressString), + BatchCoordinatorAddress: common.HexToAddress(*batchCoordinatorAddressString), + } + + vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{ + VRFKeyUncompressedPubKey: nodesMap[model.VRFPrimaryNodeName].VrfKeys[0], + RegisterAgainstAddress: *registerVRFKeyAgainstAddress, + } + + var jobSpecs model.JobSpecs + + switch *vrfVersion { + case "v2": + feeConfigV2 := vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: uint32(constants.FlatFeeTier1), + FulfillmentFlatFeeLinkPPMTier2: uint32(constants.FlatFeeTier2), + FulfillmentFlatFeeLinkPPMTier3: uint32(constants.FlatFeeTier3), + FulfillmentFlatFeeLinkPPMTier4: uint32(constants.FlatFeeTier4), + FulfillmentFlatFeeLinkPPMTier5: uint32(constants.FlatFeeTier5), + ReqsForTier2: big.NewInt(constants.ReqsForTier2), + ReqsForTier3: big.NewInt(constants.ReqsForTier3), + ReqsForTier4: big.NewInt(constants.ReqsForTier4), + ReqsForTier5: big.NewInt(constants.ReqsForTier5), + } + + coordinatorConfigV2 := v2scripts.CoordinatorConfigV2{ + MinConfs: *minConfs, + MaxGasLimit: constants.MaxGasLimit, + StalenessSeconds: constants.StalenessSeconds, + GasAfterPayment: constants.GasAfterPayment, + FallbackWeiPerUnitLink: constants.FallbackWeiPerUnitLink, + FeeConfig: feeConfigV2, + } + + coordinatorJobSpecConfig := model.CoordinatorJobSpecConfig{ + BatchFulfillmentEnabled: *batchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *batchFulfillmentGasMultiplier, + EstimateGasMultiplier: *estimateGasMultiplier, + PollPeriod: *pollPeriod, + RequestTimeout: *requestTimeout, + RevertsPipelineEnabled: *revertsPipelineEnabled, + } + + jobSpecs = v2scripts.VRFV2DeployUniverse( + e, + subscriptionBalanceJuels, + vrfKeyRegistrationConfig, + contractAddresses, + coordinatorConfigV2, + nodesMap, + *deployVRFOwner, + coordinatorJobSpecConfig, + *useTestCoordinator, + *simulationBlock, + ) + case "v2plus": + coordinatorConfigV2Plus := v2plusscripts.CoordinatorConfigV2Plus{ + MinConfs: *minConfs, + MaxGasLimit: constants.MaxGasLimit, + StalenessSeconds: constants.StalenessSeconds, + GasAfterPayment: constants.GasAfterPayment, + FallbackWeiPerUnitLink: constants.FallbackWeiPerUnitLink, + FulfillmentFlatFeeNativePPM: constants.FlatFeeNativePPM, + FulfillmentFlatFeeLinkDiscountPPM: constants.FlatFeeLinkDiscountPPM, + NativePremiumPercentage: constants.NativePremiumPercentage, + LinkPremiumPercentage: constants.LinkPremiumPercentage, + } + + coordinatorJobSpecConfig := model.CoordinatorJobSpecConfig{ + BatchFulfillmentEnabled: *batchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *batchFulfillmentGasMultiplier, + EstimateGasMultiplier: *estimateGasMultiplier, + PollPeriod: *pollPeriod, + RequestTimeout: *requestTimeout, + } + + jobSpecs = v2plusscripts.VRFV2PlusDeployUniverse( + e, + subscriptionBalanceJuels, + subscriptionBalanceNativeWei, + vrfKeyRegistrationConfig, + contractAddresses, + coordinatorConfigV2Plus, + *batchFulfillmentEnabled, + *nativeOnly, + nodesMap, + uint64(*maxGasPriceGwei), + coordinatorJobSpecConfig, + *simulationBlock, + ) + } + + for key, node := range nodesMap { + node := node + client, app := connectToNode(&node.URL, output, node.CredsFile) + + //GET ALL JOBS + jobIDs := getAllJobIDs(client, app, output) + + //DELETE ALL EXISTING JOBS + for _, jobID := range jobIDs { + deleteJob(jobID, client, app, output) + } + //CREATE JOBS + + switch key { + case model.VRFPrimaryNodeName: + createJob(jobSpecs.VRFPrimaryNode, client, app, output) + case model.VRFBackupNodeName: + createJob(jobSpecs.VRFBackupyNode, client, app, output) + case model.BHSNodeName: + createJob(jobSpecs.BHSNode, client, app, output) + case model.BHSBackupNodeName: + createJob(jobSpecs.BHSBackupNode, client, app, output) + case model.BHFNodeName: + createJob(jobSpecs.BHFNode, client, app, output) + } + } + } +} + +func fundNodesIfNeeded(node model.Node, key string, e helpers.Environment) { + if node.SendingKeyFundingAmount.Cmp(big.NewInt(0)) == 1 { + fmt.Println("\nFunding", key, "Node's Sending Keys. Need to fund each key with", node.SendingKeyFundingAmount, "wei") + for _, sendingKey := range node.SendingKeys { + fundingToSendWei := new(big.Int).Sub(node.SendingKeyFundingAmount, sendingKey.BalanceEth) + if fundingToSendWei.Cmp(big.NewInt(0)) == 1 { + helpers.FundNode(e, sendingKey.Address, fundingToSendWei) + } else { + fmt.Println("\nSkipping Funding", sendingKey.Address, "since it has", sendingKey.BalanceEth.String(), "wei") + } + } + } else { + fmt.Println("\nSkipping Funding", key, "Node's Sending Keys since funding amount is 0 wei") + } +} + +func importVRFKeyToNodeIfSet(vrfBackupNodeURL *string, nodes map[string]model.Node, output *bytes.Buffer, file string) { + if *vrfBackupNodeURL != "" { + vrfBackupNode := nodes[model.VRFBackupNodeName] + vrfPrimaryNode := nodes[model.VRFBackupNodeName] + + if len(vrfBackupNode.VrfKeys) == 0 || vrfPrimaryNode.VrfKeys[0] != vrfBackupNode.VrfKeys[0] { + client, app := connectToNode(&vrfBackupNode.URL, output, file) + importVRFKey(client, app, output) + + vrfKeys := getVRFKeys(client, app, output) + + vrfBackupNode.VrfKeys = mapVrfKeysToStringArr(vrfKeys) + if len(vrfBackupNode.VrfKeys) == 0 { + panic("VRF Key was not imported to VRF Backup Node") + } + printVRFKeyData(vrfKeys) + } + } +} + +func getVRFKeys(client *clcmd.Shell, app *cli.App, output *bytes.Buffer) []presenters.VRFKeyResource { + var vrfKeys []presenters.VRFKeyResource + + err := client.ListVRFKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), &vrfKeys)) + output.Reset() + return vrfKeys +} + +func createJob(jobSpec string, client *clcmd.Shell, app *cli.App, output *bytes.Buffer) { + if err := os.WriteFile("job-spec.toml", []byte(jobSpec), 0666); err != nil { //nolint:gosec + helpers.PanicErr(err) + } + job := presenters.JobResource{} + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + err := flagSet.Parse([]string{"./job-spec.toml"}) + helpers.PanicErr(err) + err = client.CreateJob(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), &job)) + output.Reset() +} + +func exportVRFKey(client *clcmd.Shell, app *cli.App, vrfKey presenters.VRFKeyResource, output *bytes.Buffer) { + if err := os.WriteFile("vrf-key-password.txt", []byte("twochains"), 0666); err != nil { //nolint:gosec + helpers.PanicErr(err) + } + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + flagSet.String("new-password", "./vrf-key-password.txt", "") + flagSet.String("output", "exportedvrf.json", "") + err := flagSet.Parse([]string{vrfKey.Compressed}) + helpers.PanicErr(err) + err = client.ExportVRFKey(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + output.Reset() +} + +func importVRFKey(client *clcmd.Shell, app *cli.App, output *bytes.Buffer) { + if err := os.WriteFile("vrf-key-password.txt", []byte("twochains"), 0666); err != nil { //nolint:gosec + helpers.PanicErr(err) + } + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + flagSet.String("old-password", "./vrf-key-password.txt", "") + err := flagSet.Parse([]string{"exportedvrf.json"}) + helpers.PanicErr(err) + err = client.ImportVRFKey(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + output.Reset() +} + +func deleteJob(jobID string, client *clcmd.Shell, app *cli.App, output *bytes.Buffer) { + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + err := flagSet.Parse([]string{jobID}) + helpers.PanicErr(err) + err = client.DeleteJob(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + output.Reset() +} + +func getAllJobIDs(client *clcmd.Shell, app *cli.App, output *bytes.Buffer) []string { + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + err := client.ListJobs(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + jobs := clcmd.JobPresenters{} + helpers.PanicErr(json.Unmarshal(output.Bytes(), &jobs)) + var jobIDs []string + for _, job := range jobs { + jobIDs = append(jobIDs, job.ID) + } + output.Reset() + return jobIDs +} + +func printETHKeyData(ethKeys []presenters.ETHKeyResource) { + fmt.Println("------------- NODE INFORMATION -------------") + for _, ethKey := range ethKeys { + fmt.Println("-----------ETH Key-----------") + fmt.Println("Address: ", ethKey.Address) + fmt.Println("MaxGasPriceWei: ", ethKey.MaxGasPriceWei) + fmt.Println("EthBalance: ", ethKey.EthBalance) + fmt.Println("-----------------------------") + } +} + +func mapEthKeysToSendingKeyArr(ethKeys []presenters.ETHKeyResource) []model.SendingKey { + var sendingKeys []model.SendingKey + for _, ethKey := range ethKeys { + sendingKey := model.SendingKey{Address: ethKey.Address, BalanceEth: ethKey.EthBalance.ToInt()} + sendingKeys = append(sendingKeys, sendingKey) + } + return sendingKeys +} + +func mapVrfKeysToStringArr(vrfKeys []presenters.VRFKeyResource) []string { + var vrfKeysString []string + for _, vrfKey := range vrfKeys { + vrfKeysString = append(vrfKeysString, vrfKey.Uncompressed) + } + return vrfKeysString +} + +func printVRFKeyData(vrfKeys []presenters.VRFKeyResource) { + fmt.Println("Number of VRF Keys on the node: ", len(vrfKeys)) + fmt.Println("------------- NODE INFORMATION -------------") + for _, vrfKey := range vrfKeys { + fmt.Println("-----------VRF Key-----------") + fmt.Println("Compressed: ", vrfKey.Compressed) + fmt.Println("Uncompressed: ", vrfKey.Uncompressed) + fmt.Println("Hash: ", vrfKey.Hash) + fmt.Println("-----------------------------") + } +} + +func connectToNode(nodeURL *string, output *bytes.Buffer, credFile string) (*clcmd.Shell, *cli.App) { + client, app := newApp(*nodeURL, output) + // login first to establish the session + fmt.Println("logging in to:", *nodeURL) + loginFs := flag.NewFlagSet("test", flag.ContinueOnError) + loginFs.String("file", credFile, "") + loginFs.Bool("bypass-version-check", true, "") + loginCtx := cli.NewContext(app, loginFs, nil) + err := client.RemoteLogin(loginCtx) + helpers.PanicErr(err) + output.Reset() + fmt.Println() + return client, app +} + +func createVRFKeyIfNeeded(client *clcmd.Shell, app *cli.App, output *bytes.Buffer, numVRFKeys *int, nodeURL *string) []presenters.VRFKeyResource { + var allVRFKeys []presenters.VRFKeyResource + var newKeys []presenters.VRFKeyResource + + vrfKeys := getVRFKeys(client, app, output) + + switch { + case len(vrfKeys) == *numVRFKeys: + fmt.Println(checkMarkEmoji, "found", len(vrfKeys), "vrf keys on", *nodeURL) + case len(vrfKeys) > *numVRFKeys: + fmt.Println(xEmoji, "found", len(vrfKeys), "vrf keys on", nodeURL, " which is more than expected") + os.Exit(1) + default: + fmt.Println(xEmoji, "found only", len(vrfKeys), "vrf keys on", nodeURL, ", creating", + *numVRFKeys-len(vrfKeys), "more") + toCreate := *numVRFKeys - len(vrfKeys) + for i := 0; i < toCreate; i++ { + output.Reset() + newKey := createVRFKey(client, app, output) + newKeys = append(newKeys, newKey) + } + fmt.Println("NEW VRF KEYS:", strings.Join(func() (r []string) { + for _, k := range newKeys { + r = append(r, k.Uncompressed) + } + return + }(), ", ")) + } + fmt.Println() + allVRFKeys = append(allVRFKeys, vrfKeys...) + allVRFKeys = append(allVRFKeys, newKeys...) + return allVRFKeys +} + +func createVRFKey(client *clcmd.Shell, app *cli.App, output *bytes.Buffer) presenters.VRFKeyResource { + var newKey presenters.VRFKeyResource + err := client.CreateVRFKey( + cli.NewContext(app, flag.NewFlagSet("blah", flag.ExitOnError), nil)) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), &newKey)) + output.Reset() + return newKey +} + +func createETHKeysIfNeeded(client *clcmd.Shell, app *cli.App, output *bytes.Buffer, numEthKeys *int, nodeURL *string, maxGasPriceGwei *int) []presenters.ETHKeyResource { + var allETHKeysNode []presenters.ETHKeyResource + var ethKeys []presenters.ETHKeyResource + var newKeys []presenters.ETHKeyResource + // check for ETH keys + err := client.ListETHKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), ðKeys)) + switch { + case len(ethKeys) >= *numEthKeys: + fmt.Println(checkMarkEmoji, "found", len(ethKeys), "eth keys on", *nodeURL) + case len(ethKeys) < *numEthKeys: + fmt.Println(xEmoji, "found only", len(ethKeys), "eth keys on", *nodeURL, + "; creating", *numEthKeys-len(ethKeys), "more") + toCreate := *numEthKeys - len(ethKeys) + for i := 0; i < toCreate; i++ { + output.Reset() + var newKey presenters.ETHKeyResource + + flagSet := flag.NewFlagSet("blah", flag.ExitOnError) + flagSet.String("evm-chain-id", os.Getenv("ETH_CHAIN_ID"), "chain id") + if *maxGasPriceGwei > 0 { + helpers.PanicErr(flagSet.Set("max-gas-price-gwei", fmt.Sprintf("%d", *maxGasPriceGwei))) + } + err := flagSet.Parse([]string{"-evm-chain-id", os.Getenv("ETH_CHAIN_ID")}) + helpers.PanicErr(err) + err = client.CreateETHKey(cli.NewContext(app, flagSet, nil)) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), &newKey)) + newKeys = append(newKeys, newKey) + } + fmt.Println("NEW ETH KEYS:", strings.Join(func() (r []string) { + for _, k := range newKeys { + r = append(r, k.Address) + } + return + }(), ", ")) + } + output.Reset() + fmt.Println() + allETHKeysNode = append(allETHKeysNode, ethKeys...) + allETHKeysNode = append(allETHKeysNode, newKeys...) + return allETHKeysNode +} diff --git a/core/scripts/common/vrf/util/util.go b/core/scripts/common/vrf/util/util.go new file mode 100644 index 00000000..0acad3b8 --- /dev/null +++ b/core/scripts/common/vrf/util/util.go @@ -0,0 +1,22 @@ +package util + +import ( + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/model" +) + +func MapToSendingKeyArr(nodeSendingKeys []string) []model.SendingKey { + var sendingKeys []model.SendingKey + + for _, key := range nodeSendingKeys { + sendingKeys = append(sendingKeys, model.SendingKey{Address: key}) + } + return sendingKeys +} + +func MapToAddressArr(sendingKeys []model.SendingKey) []string { + var sendingKeysString []string + for _, sendingKey := range sendingKeys { + sendingKeysString = append(sendingKeysString, sendingKey.Address) + } + return sendingKeysString +} diff --git a/core/scripts/functions/.gitignore b/core/scripts/functions/.gitignore new file mode 100644 index 00000000..6bdcf257 --- /dev/null +++ b/core/scripts/functions/.gitignore @@ -0,0 +1,4 @@ +.remotes* +cookie +artefacts/*.toml +artefacts/*.json \ No newline at end of file diff --git a/core/scripts/functions/USAGE.md b/core/scripts/functions/USAGE.md new file mode 100644 index 00000000..3462ef56 --- /dev/null +++ b/core/scripts/functions/USAGE.md @@ -0,0 +1,55 @@ +# Tools for configuring Functions DON + +Kudos to VRF team for inspiration. + +## Usage + +1. Create a hidden file containing nodes list: hosts, logins and passwords. + It is a simple text file, where each row has the following structure: + +``` +boot_node_url login password +node0_url login0 password0 +node1_url login1 password1 +... +``` + +Note: the single _bootstrap_ node must go first. To support multiple bootstrap nodes, alter the tool. + +Preparing such a file is the only "manual" step that is required. + +2. Review `/templates` + +This sub-folder contains two `toml` templates that will be processed with `generate-jobspecs` command. + +3. Run the tool: `go run .` to see available commands. + +Initial set of commands: + +- `generate-ocr2config` generates `FunctionsOracleConfig.json` that is consumed by contracts tooling. + - If manual configuration is preferred instead of automatically fetching the required parameters from the nodes, create a file that follows the same format as `src/sample_keys.json` and enter the required parameters which can be found on each node's Key Management page in the UI. Note that the bootstrap node is not included in this file. The path to this file should be entered for the `-keys` parameter instead of using the `-nodes` parameter. + - When using the `-nodes` parameter, a `DONPublicKeys.json` file will be generated. This can be used to simplify the creation of the `keyGenConfig.json` file when generating threshold keys with the [Functions admin tooling](https://github.com/goplugin/functions-admin-tooling/blob/main/threshold_key_manager/README.md). + - Use `src/sample_config.json` as a template for the `-config` file. +- `generate-jobspecs` generates Job Specs (toml) for each node based on its role. + - Jobspecs can also be created manually by swapping the relevant values in the `toml` files. +- `deploy-jobspecs` deploys Job Specs generated by `generate-jobspec` to all the nodes. + +All generated artefacts are saved in `artefacts` sub-directory. + +Each command has its own parameters. Simply run a command without parameters to see usage. + +4. Common caveats + +- The current implementation expects a single bootstrap node and a few oracle nodes. + Bootstrap node must come first in the nodes list file. +- You must provide the `http://` or `https://` prefix for hosts +- Any command would terminate immediately with the first issue detected. +- `deploy-jobspecs` command does NOT check for the existing jobs, be careful. +- `deploy-jobspecs` command does not deploy bridges, they should exist prior to execution. +- The tooling does not interact with chains/contracts. + +1. Future enhancements + +- Add `deploy-bridges` command. +- For NOPs: make commands to run against a single node with terminal authorization. +- For NOPs: make sure we can inject their node config from a file. diff --git a/core/scripts/functions/artefacts/README.md b/core/scripts/functions/artefacts/README.md new file mode 100644 index 00000000..68f06dbd --- /dev/null +++ b/core/scripts/functions/artefacts/README.md @@ -0,0 +1 @@ +All generated artefacts will be saved here. \ No newline at end of file diff --git a/core/scripts/functions/main.go b/core/scripts/functions/main.go new file mode 100644 index 00000000..ccf86827 --- /dev/null +++ b/core/scripts/functions/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/goplugin/pluginv3.0/core/scripts/functions/src" +) + +type command interface { + Run([]string) + Name() string +} + +func main() { + commands := []command{ + src.NewGenerateOCR2ConfigCommand(), + src.NewGenerateJobSpecsCommand(), + src.NewDeployJobSpecsCommand(), + src.NewDeleteJobsCommand(), + } + + commandsList := func(commands []command) string { + var scs []string + for _, command := range commands { + scs = append(scs, command.Name()) + } + return strings.Join(scs, ", ") + }(commands) + + if len(os.Args) >= 2 { + requestedCommand := os.Args[1] + + for _, command := range commands { + if command.Name() == requestedCommand { + command.Run(os.Args[2:]) + return + } + } + fmt.Println("Unknown command:", requestedCommand) + } else { + fmt.Println("No command specified") + } + + fmt.Println("Supported commands:", commandsList) + os.Exit(1) +} diff --git a/core/scripts/functions/src/app.go b/core/scripts/functions/src/app.go new file mode 100644 index 00000000..1c8302d7 --- /dev/null +++ b/core/scripts/functions/src/app.go @@ -0,0 +1,31 @@ +package src + +import ( + "flag" + "io" + + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + clcmd "github.com/goplugin/pluginv3.0/v2/core/cmd" +) + +func newApp(n *node, writer io.Writer) (*clcmd.Shell, *cli.App) { + client := &clcmd.Shell{ + Renderer: clcmd.RendererJSON{Writer: writer}, + AppFactory: clcmd.PluginAppFactory{}, + KeyStoreAuthenticator: clcmd.TerminalKeyStoreAuthenticator{Prompter: n}, + FallbackAPIInitializer: clcmd.NewPromptingAPIInitializer(n), + Runner: clcmd.PluginRunner{}, + PromptingSessionRequestBuilder: clcmd.NewPromptingSessionRequestBuilder(n), + ChangePasswordPrompter: clcmd.NewChangePasswordPrompter(), + PasswordPrompter: clcmd.NewPasswordPrompter(), + } + app := clcmd.NewApp(client) + fs := flag.NewFlagSet("blah", flag.ContinueOnError) + fs.String("remote-node-url", n.url.String(), "") + helpers.PanicErr(app.Before(cli.NewContext(nil, fs, nil))) + // overwrite renderer since it's set to stdout after Before() is called + client.Renderer = clcmd.RendererJSON{Writer: writer} + return client, app +} diff --git a/core/scripts/functions/src/delete_jobs.go b/core/scripts/functions/src/delete_jobs.go new file mode 100644 index 00000000..3d93c662 --- /dev/null +++ b/core/scripts/functions/src/delete_jobs.go @@ -0,0 +1,85 @@ +package src + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +type deleteJobs struct { +} + +type OCRSpec struct { + ContractID string +} + +type BootSpec struct { + ContractID string +} + +type JobSpec struct { + Id string + Name string + BootstrapSpec BootSpec + OffChainReporting2OracleSpec OCRSpec +} + +func NewDeleteJobsCommand() *deleteJobs { + return &deleteJobs{} +} + +func (g *deleteJobs) Name() string { + return "delete-jobs" +} + +func (g *deleteJobs) Run(args []string) { + fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) + nodesFile := fs.String("nodes", "", "a file containing nodes urls, logins and passwords") + contractAddress := fs.String("contract", "", "oracle contract address") + err := fs.Parse(args) + if err != nil || *nodesFile == "" || *contractAddress == "" { + fs.Usage() + os.Exit(1) + } + + nodes := mustReadNodesList(*nodesFile) + for _, node := range nodes { + output := &bytes.Buffer{} + client, app := newApp(node, output) + + fmt.Println("Logging in:", node.url) + loginFs := flag.NewFlagSet("test", flag.ContinueOnError) + loginFs.Bool("bypass-version-check", true, "") + loginCtx := cli.NewContext(app, loginFs, nil) + err := client.RemoteLogin(loginCtx) + helpers.PanicErr(err) + output.Reset() + + fileFs := flag.NewFlagSet("test", flag.ExitOnError) + err = client.ListJobs(cli.NewContext(app, fileFs, nil)) + helpers.PanicErr(err) + + var parsed []JobSpec + err = json.Unmarshal(output.Bytes(), &parsed) + helpers.PanicErr(err) + + for _, jobSpec := range parsed { + if jobSpec.BootstrapSpec.ContractID == *contractAddress || jobSpec.OffChainReporting2OracleSpec.ContractID == *contractAddress { + fmt.Println("Deleting job ID:", jobSpec.Id, "name:", jobSpec.Name) + set := flag.NewFlagSet("test", flag.ExitOnError) + err = set.Parse([]string{jobSpec.Id}) + helpers.PanicErr(err) + err = client.DeleteJob(cli.NewContext(app, set, nil)) + helpers.PanicErr(err) + } + } + + output.Reset() + } +} diff --git a/core/scripts/functions/src/deploy_jobspecs_cmd.go b/core/scripts/functions/src/deploy_jobspecs_cmd.go new file mode 100644 index 00000000..4a58b80d --- /dev/null +++ b/core/scripts/functions/src/deploy_jobspecs_cmd.go @@ -0,0 +1,66 @@ +package src + +import ( + "bytes" + "errors" + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +type deployJobSpecs struct { +} + +func NewDeployJobSpecsCommand() *deployJobSpecs { + return &deployJobSpecs{} +} + +func (g *deployJobSpecs) Name() string { + return "deploy-jobspecs" +} + +func (g *deployJobSpecs) Run(args []string) { + fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) + nodesFile := fs.String("nodes", "", "a file containing nodes urls, logins and passwords") + + if err := fs.Parse(args); err != nil || nodesFile == nil || *nodesFile == "" { + fs.Usage() + os.Exit(1) + } + + nodes := mustReadNodesList(*nodesFile) + for _, n := range nodes { + output := &bytes.Buffer{} + client, app := newApp(n, output) + + fmt.Println("Logging in:", n.url) + loginFs := flag.NewFlagSet("test", flag.ContinueOnError) + loginFs.Bool("bypass-version-check", true, "") + loginCtx := cli.NewContext(app, loginFs, nil) + err := client.RemoteLogin(loginCtx) + helpers.PanicErr(err) + output.Reset() + + tomlPath := filepath.Join(artefactsDir, n.url.Host+".toml") + tomlPath, err = filepath.Abs(tomlPath) + if err != nil { + helpers.PanicErr(err) + } + fmt.Println("Deploying jobspec:", tomlPath) + if _, err = os.Stat(tomlPath); err != nil { + helpers.PanicErr(errors.New("toml file does not exist")) + } + + fileFs := flag.NewFlagSet("test", flag.ExitOnError) + err = fileFs.Parse([]string{tomlPath}) + helpers.PanicErr(err) + err = client.CreateJob(cli.NewContext(app, fileFs, nil)) + helpers.PanicErr(err) + output.Reset() + } +} diff --git a/core/scripts/functions/src/fetching.go b/core/scripts/functions/src/fetching.go new file mode 100644 index 00000000..020d8010 --- /dev/null +++ b/core/scripts/functions/src/fetching.go @@ -0,0 +1,125 @@ +package src + +import ( + "bytes" + "encoding/json" + "errors" + "flag" + "fmt" + "strings" + + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type ocr2Bundle struct { + ID string `json:"id"` + ChainType string `json:"chainType"` + OnchainPublicKey string `json:"onchainPublicKey"` + OffchainPublicKey string `json:"offchainPublicKey"` + ConfigPublicKey string `json:"configPublicKey"` +} + +func mustFetchNodesKeys(chainID int64, nodes []*node) (nca []NodeKeys) { + for _, n := range nodes { + output := &bytes.Buffer{} + client, app := newApp(n, output) + + fmt.Println("Logging in:", n.url) + loginFs := flag.NewFlagSet("test", flag.ContinueOnError) + loginFs.Bool("bypass-version-check", true, "") + loginCtx := cli.NewContext(app, loginFs, nil) + err := client.RemoteLogin(loginCtx) + helpers.PanicErr(err) + output.Reset() + + err = client.ListETHKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var ethKeys []presenters.ETHKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), ðKeys)) + ethAddress, err := findFirstGoodEthKeyAddress(chainID, ethKeys) + helpers.PanicErr(err) + output.Reset() + + err = client.ListP2PKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var p2pKeys []presenters.P2PKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), &p2pKeys)) + if len(p2pKeys) != 1 { + helpers.PanicErr(errors.New("node must have single p2p key")) + } + peerID := strings.TrimPrefix(p2pKeys[0].PeerID, "p2p_") + output.Reset() + + var ocr2Bundles []ocr2Bundle + err = client.ListOCR2KeyBundles(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + helpers.PanicErr(json.Unmarshal(output.Bytes(), &ocr2Bundles)) + ocr2BundleIndex := findEvmOCR2Bundle(ocr2Bundles) + if ocr2BundleIndex == -1 { + helpers.PanicErr(errors.New("node must have EVM OCR2 bundle")) + } + ocr2Bndl := ocr2Bundles[ocr2BundleIndex] + output.Reset() + + err = client.ListCSAKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var csaKeys []presenters.CSAKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), &csaKeys)) + csaPubKey, err := findFirstCSAPublicKey(csaKeys) + helpers.PanicErr(err) + output.Reset() + + nc := NodeKeys{ + EthAddress: ethAddress, + P2PPeerID: peerID, + OCR2BundleID: ocr2Bndl.ID, + OCR2ConfigPublicKey: strings.TrimPrefix(ocr2Bndl.ConfigPublicKey, "ocr2cfg_evm_"), + OCR2OnchainPublicKey: strings.TrimPrefix(ocr2Bndl.OnchainPublicKey, "ocr2on_evm_"), + OCR2OffchainPublicKey: strings.TrimPrefix(ocr2Bndl.OffchainPublicKey, "ocr2off_evm_"), + CSAPublicKey: csaPubKey, + } + + nca = append(nca, nc) + } + return +} + +func findFirstCSAPublicKey(csaKeyResources []presenters.CSAKeyResource) (string, error) { + for _, r := range csaKeyResources { + return r.PubKey, nil + } + return "", errors.New("did not find any CSA Key Resources") +} + +func findEvmOCR2Bundle(ocr2Bundles []ocr2Bundle) int { + for i, b := range ocr2Bundles { + if b.ChainType == "evm" { + return i + } + } + return -1 +} + +func findFirstGoodEthKeyAddress(chainID int64, ethKeys []presenters.ETHKeyResource) (string, error) { + for _, ethKey := range ethKeys { + if ethKey.EVMChainID.Equal(ubig.NewI(chainID)) && !ethKey.Disabled { + if ethKey.EthBalance.IsZero() { + fmt.Println("WARN: selected ETH address has zero balance", ethKey.Address) + } + return ethKey.Address, nil + } + } + return "", errors.New("did not find an enabled ETH key for the given chain ID") +} diff --git a/core/scripts/functions/src/files.go b/core/scripts/functions/src/files.go new file mode 100644 index 00000000..82abcb59 --- /dev/null +++ b/core/scripts/functions/src/files.go @@ -0,0 +1,52 @@ +package src + +import ( + "bufio" + "fmt" + "os" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + configFile = "config.yaml" + templatesDir = "templates" + artefactsDir = "artefacts" + ocr2ConfigJson = "FunctionsOracleConfig.json" + ocr2PublicKeysJSON = "OCR2PublicKeys.json" + bootstrapSpecTemplate = "bootstrap.toml" + oracleSpecTemplate = "oracle.toml" +) + +func writeLines(lines []string, path string) error { + file, err := os.Create(path) + if err != nil { + return err + } + wc := utils.NewDeferableWriteCloser(file) + defer wc.Close() + + w := bufio.NewWriter(file) + for _, line := range lines { + fmt.Fprintln(w, line) + } + if err := w.Flush(); err != nil { + return err + } + return wc.Close() +} + +func readLines(path string) ([]string, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, scanner.Err() +} diff --git a/core/scripts/functions/src/files_test.go b/core/scripts/functions/src/files_test.go new file mode 100644 index 00000000..4f6c5aeb --- /dev/null +++ b/core/scripts/functions/src/files_test.go @@ -0,0 +1,37 @@ +package src + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_writeLines(t *testing.T) { + type args struct { + lines []string + } + tests := []struct { + name string + args args + }{ + { + name: "write read lines", + args: args{ + lines: []string{"a", "b"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pth := filepath.Join(t.TempDir(), strings.ReplaceAll(tt.name, " ", "_")) + err := writeLines(tt.args.lines, pth) + assert.NoError(t, err) + got, err := readLines(pth) + assert.NoError(t, err) + assert.Equal(t, tt.args.lines, got) + + }) + } +} diff --git a/core/scripts/functions/src/generate_jobspecs_cmd.go b/core/scripts/functions/src/generate_jobspecs_cmd.go new file mode 100644 index 00000000..74617f66 --- /dev/null +++ b/core/scripts/functions/src/generate_jobspecs_cmd.go @@ -0,0 +1,87 @@ +package src + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +type generateJobSpecs struct { +} + +func NewGenerateJobSpecsCommand() *generateJobSpecs { + return &generateJobSpecs{} +} + +func (g *generateJobSpecs) Name() string { + return "generate-jobspecs" +} + +func (g *generateJobSpecs) Run(args []string) { + fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) + nodesFile := fs.String("nodes", "", "a file containing nodes urls, logins and passwords") + chainID := fs.Int64("chainid", 80001, "chain id") + p2pPort := fs.Int64("p2pport", 6690, "p2p port") + donID := fs.String("donid", "", "don id string") + routerAddress := fs.String("contract", "", "router contract address") + truncateHostname := fs.Bool("truncateboothostname", false, "truncate host name to first segment (needed for staging DONs)") + gatewayID := fs.String("gatewayid", "", "gateway id string") + gatewayURL := fs.String("gatewayurl", "", "gateway url string") + err := fs.Parse(args) + if err != nil || nodesFile == nil || *nodesFile == "" || routerAddress == nil || *routerAddress == "" { + fs.Usage() + os.Exit(1) + } + + nodes := mustReadNodesList(*nodesFile) + nca := mustFetchNodesKeys(*chainID, nodes) + bootstrapNode := nca[0] + + lines, err := readLines(filepath.Join(templatesDir, bootstrapSpecTemplate)) + helpers.PanicErr(err) + + bootHost := nodes[0].url.Host + lines = replacePlaceholders(lines, *donID, *chainID, *p2pPort, *routerAddress, bootHost, &bootstrapNode, &bootstrapNode, *truncateHostname, *gatewayID, *gatewayURL) + outputPath := filepath.Join(artefactsDir, bootHost+".toml") + err = writeLines(lines, outputPath) + helpers.PanicErr(err) + fmt.Println("Saved bootstrap node jobspec:", outputPath) + + lines, err = readLines(filepath.Join(templatesDir, oracleSpecTemplate)) + helpers.PanicErr(err) + for i := 1; i < len(nodes); i++ { + oracleLines := replacePlaceholders(lines, *donID, *chainID, *p2pPort, *routerAddress, bootHost, &bootstrapNode, &nca[i], *truncateHostname, *gatewayID, *gatewayURL) + outputPath := filepath.Join(artefactsDir, nodes[i].url.Host+".toml") + err = writeLines(oracleLines, outputPath) + helpers.PanicErr(err) + fmt.Println("Saved oracle node jobspec:", outputPath) + } +} + +func replacePlaceholders(lines []string, donID string, chainID, p2pPort int64, routerAddress, bootHost string, boot *NodeKeys, node *NodeKeys, truncateHostname bool, gatewayID string, gatewayURL string) (output []string) { + chainIDStr := strconv.FormatInt(chainID, 10) + if truncateHostname { + bootHost = bootHost[:strings.IndexByte(bootHost, '.')] + } + bootstrapper := fmt.Sprintf("%s@%s:%d", boot.P2PPeerID, bootHost, p2pPort) + ts := time.Now().UTC().Format("2006-01-02T15:04") + for _, l := range lines { + l = strings.Replace(l, "{{chain_id}}", chainIDStr, 1) + l = strings.Replace(l, "{{router_contract_address}}", routerAddress, 1) + l = strings.Replace(l, "{{node_eth_address}}", node.EthAddress, 1) + l = strings.Replace(l, "{{ocr2_key_bundle_id}}", node.OCR2BundleID, 1) + l = strings.Replace(l, "{{p2p_bootstrapper}}", bootstrapper, 1) + l = strings.Replace(l, "{{timestamp}}", ts, 1) + l = strings.Replace(l, "{{don_id}}", donID, 1) + l = strings.Replace(l, "{{gateway_id}}", gatewayID, 1) + l = strings.Replace(l, "{{gateway_url}}", gatewayURL, 1) + output = append(output, l) + } + return +} diff --git a/core/scripts/functions/src/generate_ocr2_config_cmd.go b/core/scripts/functions/src/generate_ocr2_config_cmd.go new file mode 100644 index 00000000..575f6f92 --- /dev/null +++ b/core/scripts/functions/src/generate_ocr2_config_cmd.go @@ -0,0 +1,288 @@ +package src + +import ( + "crypto/ed25519" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" +) + +// NOTE: Field names need to match what's in the JSON input file (see sample_config.json) +type TopLevelConfigSource struct { + OracleConfig OracleConfigSource +} + +type ThresholdOffchainConfig struct { + MaxQueryLengthBytes uint32 + MaxObservationLengthBytes uint32 + MaxReportLengthBytes uint32 + RequestCountLimit uint32 + RequestTotalBytesLimit uint32 + RequireLocalRequestCheck bool + K uint32 +} + +type S4ReportingPluginConfig struct { + MaxQueryLengthBytes uint32 + MaxObservationLengthBytes uint32 + MaxReportLengthBytes uint32 + NSnapshotShards uint32 + MaxObservationEntries uint32 + MaxReportEntries uint32 + MaxDeleteExpiredEntries uint32 +} + +type OracleConfigSource struct { + MaxQueryLengthBytes uint32 + MaxObservationLengthBytes uint32 + MaxReportLengthBytes uint32 + MaxRequestBatchSize uint32 + DefaultAggregationMethod int32 + UniqueReports bool + ThresholdOffchainConfig ThresholdOffchainConfig + S4ReportingPluginConfig S4ReportingPluginConfig + MaxReportTotalCallbackGas uint32 + + DeltaProgressMillis uint32 + DeltaResendMillis uint32 + DeltaRoundMillis uint32 + DeltaGraceMillis uint32 + DeltaStageMillis uint32 + MaxRoundsPerEpoch uint8 + TransmissionSchedule []int + + MaxDurationQueryMillis uint32 + MaxDurationObservationMillis uint32 + MaxDurationReportMillis uint32 + MaxDurationAcceptMillis uint32 + MaxDurationTransmitMillis uint32 + + MaxFaultyOracles int +} + +type NodeKeys struct { + EthAddress string + P2PPeerID string // p2p_ + OCR2BundleID string // used only in job spec + OCR2OnchainPublicKey string // ocr2on_evm_ + OCR2OffchainPublicKey string // ocr2off_evm_ + OCR2ConfigPublicKey string // ocr2cfg_evm_ + CSAPublicKey string +} + +type orc2drOracleConfig struct { + Signers []string `json:"signers"` + Transmitters []string `json:"transmitters"` + F uint8 `json:"f"` + OnchainConfig string `json:"onchainConfig"` + OffchainConfigVersion uint64 `json:"offchainConfigVersion"` + OffchainConfig string `json:"offchainConfig"` +} + +type generateOCR2Config struct { +} + +func NewGenerateOCR2ConfigCommand() *generateOCR2Config { + return &generateOCR2Config{} +} + +func (g *generateOCR2Config) Name() string { + return "generate-ocr2config" +} + +func mustParseJSONConfigFile(fileName string) (output TopLevelConfigSource) { + return mustParseJSON[TopLevelConfigSource](fileName) +} + +func mustParseKeysFile(fileName string) (output []NodeKeys) { + return mustParseJSON[[]NodeKeys](fileName) +} + +func mustParseJSON[T any](fileName string) (output T) { + jsonFile, err := os.Open(fileName) + if err != nil { + panic(err) + } + defer jsonFile.Close() + bytes, err := io.ReadAll(jsonFile) + if err != nil { + panic(err) + } + err = json.Unmarshal(bytes, &output) + if err != nil { + panic(err) + } + return +} + +func (g *generateOCR2Config) Run(args []string) { + fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) + nodesFile := fs.String("nodes", "", "a file containing nodes urls, logins and passwords") + keysFile := fs.String("keys", "", "a file containing nodes public keys") + configFile := fs.String("config", "", "a file containing JSON config") + chainID := fs.Int64("chainid", 80001, "chain id") + if err := fs.Parse(args); err != nil || (*nodesFile == "" && *keysFile == "") || *configFile == "" || chainID == nil { + fs.Usage() + os.Exit(1) + } + + topLevelCfg := mustParseJSONConfigFile(*configFile) + cfg := topLevelCfg.OracleConfig + var nca []NodeKeys + if *keysFile != "" { + nca = mustParseKeysFile(*keysFile) + } else { + nodes := mustReadNodesList(*nodesFile) + nca = mustFetchNodesKeys(*chainID, nodes)[1:] // ignore boot node + + nodePublicKeys, err := json.MarshalIndent(nca, "", " ") + if err != nil { + panic(err) + } + filepath := filepath.Join(artefactsDir, ocr2PublicKeysJSON) + err = os.WriteFile(filepath, nodePublicKeys, 0600) + if err != nil { + panic(err) + } + fmt.Println("Functions OCR2 public keys have been saved to:", filepath) + } + + onchainPubKeys := []common.Address{} + for _, n := range nca { + onchainPubKeys = append(onchainPubKeys, common.HexToAddress(n.OCR2OnchainPublicKey)) + } + + offchainPubKeysBytes := []types.OffchainPublicKey{} + for _, n := range nca { + pkBytes, err := hex.DecodeString(n.OCR2OffchainPublicKey) + if err != nil { + panic(err) + } + + pkBytesFixed := [ed25519.PublicKeySize]byte{} + nCopied := copy(pkBytesFixed[:], pkBytes) + if nCopied != ed25519.PublicKeySize { + panic("wrong num elements copied from ocr2 offchain public key") + } + + offchainPubKeysBytes = append(offchainPubKeysBytes, types.OffchainPublicKey(pkBytesFixed)) + } + + configPubKeysBytes := []types.ConfigEncryptionPublicKey{} + for _, n := range nca { + pkBytes, err := hex.DecodeString(n.OCR2ConfigPublicKey) + helpers.PanicErr(err) + + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + panic("wrong num elements copied") + } + + configPubKeysBytes = append(configPubKeysBytes, types.ConfigEncryptionPublicKey(pkBytesFixed)) + } + + identities := []confighelper.OracleIdentityExtra{} + for index := range nca { + identities = append(identities, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPubKeys[index][:], + OffchainPublicKey: offchainPubKeysBytes[index], + PeerID: nca[index].P2PPeerID, + TransmitAccount: types.Account(nca[index].EthAddress), + }, + ConfigEncryptionPublicKey: configPubKeysBytes[index], + }) + } + + reportingPluginConfigBytes, err := config.EncodeReportingPluginConfig(&config.ReportingPluginConfigWrapper{ + Config: &config.ReportingPluginConfig{ + MaxQueryLengthBytes: cfg.MaxQueryLengthBytes, + MaxObservationLengthBytes: cfg.MaxObservationLengthBytes, + MaxReportLengthBytes: cfg.MaxReportLengthBytes, + MaxRequestBatchSize: cfg.MaxRequestBatchSize, + DefaultAggregationMethod: config.AggregationMethod(cfg.DefaultAggregationMethod), + UniqueReports: cfg.UniqueReports, + ThresholdPluginConfig: &config.ThresholdReportingPluginConfig{ + MaxQueryLengthBytes: cfg.ThresholdOffchainConfig.MaxQueryLengthBytes, + MaxObservationLengthBytes: cfg.ThresholdOffchainConfig.MaxObservationLengthBytes, + MaxReportLengthBytes: cfg.ThresholdOffchainConfig.MaxReportLengthBytes, + RequestCountLimit: cfg.ThresholdOffchainConfig.RequestCountLimit, + RequestTotalBytesLimit: cfg.ThresholdOffchainConfig.RequestTotalBytesLimit, + RequireLocalRequestCheck: cfg.ThresholdOffchainConfig.RequireLocalRequestCheck, + K: cfg.ThresholdOffchainConfig.K, + }, + S4PluginConfig: &config.S4ReportingPluginConfig{ + MaxQueryLengthBytes: cfg.S4ReportingPluginConfig.MaxQueryLengthBytes, + MaxObservationLengthBytes: cfg.S4ReportingPluginConfig.MaxObservationLengthBytes, + MaxReportLengthBytes: cfg.S4ReportingPluginConfig.MaxReportLengthBytes, + NSnapshotShards: cfg.S4ReportingPluginConfig.NSnapshotShards, + MaxObservationEntries: cfg.S4ReportingPluginConfig.MaxObservationEntries, + MaxReportEntries: cfg.S4ReportingPluginConfig.MaxReportEntries, + MaxDeleteExpiredEntries: cfg.S4ReportingPluginConfig.MaxDeleteExpiredEntries, + }, + MaxReportTotalCallbackGas: cfg.MaxReportTotalCallbackGas, + }, + }) + if err != nil { + panic(err) + } + + signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + time.Duration(cfg.DeltaProgressMillis)*time.Millisecond, + time.Duration(cfg.DeltaResendMillis)*time.Millisecond, + time.Duration(cfg.DeltaRoundMillis)*time.Millisecond, + time.Duration(cfg.DeltaGraceMillis)*time.Millisecond, + time.Duration(cfg.DeltaStageMillis)*time.Millisecond, + cfg.MaxRoundsPerEpoch, + cfg.TransmissionSchedule, + identities, + reportingPluginConfigBytes, + time.Duration(cfg.MaxDurationQueryMillis)*time.Millisecond, + time.Duration(cfg.MaxDurationObservationMillis)*time.Millisecond, + time.Duration(cfg.MaxDurationReportMillis)*time.Millisecond, + time.Duration(cfg.MaxDurationAcceptMillis)*time.Millisecond, + time.Duration(cfg.MaxDurationTransmitMillis)*time.Millisecond, + cfg.MaxFaultyOracles, + nil, // empty onChain config + ) + helpers.PanicErr(err) + + var signersStr []string + var transmittersStr []string + for i := range transmitters { + signersStr = append(signersStr, "0x"+hex.EncodeToString(signers[i])) + transmittersStr = append(transmittersStr, string(transmitters[i])) + } + + config := orc2drOracleConfig{ + Signers: signersStr, + Transmitters: transmittersStr, + F: f, + OnchainConfig: "0x" + hex.EncodeToString(onchainConfig), + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: "0x" + hex.EncodeToString(offchainConfig), + } + + js, err := json.MarshalIndent(config, "", " ") + helpers.PanicErr(err) + + filepath := filepath.Join(artefactsDir, ocr2ConfigJson) + err = os.WriteFile(filepath, js, 0600) + helpers.PanicErr(err) + + fmt.Println("Functions OCR2 config has been saved to:", filepath) +} diff --git a/core/scripts/functions/src/nodes.go b/core/scripts/functions/src/nodes.go new file mode 100644 index 00000000..5a645502 --- /dev/null +++ b/core/scripts/functions/src/nodes.go @@ -0,0 +1,61 @@ +package src + +import ( + "errors" + "fmt" + "net/url" + "strings" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +type node struct { + url *url.URL + login string + password string +} + +func (n node) IsTerminal() bool { + return false +} + +func (n node) PasswordPrompt(p string) string { + return n.password +} + +func (n node) Prompt(p string) string { + return n.login +} + +func mustReadNodesList(path string) []*node { + fmt.Println("Reading nodes list from", path) + + nodesList, err := readLines(path) + if err != nil { + helpers.PanicErr(err) + } + var nodes []*node + var hasBoot bool + for _, r := range nodesList { + rr := strings.TrimSpace(r) + if len(rr) == 0 { + continue + } + s := strings.Split(rr, " ") + if len(s) != 3 { + helpers.PanicErr(errors.New("wrong nodes list format")) + } + if strings.Contains(s[0], "boot") && hasBoot { + helpers.PanicErr(errors.New("the single boot node must come first")) + } + hasBoot = true + url, err := url.Parse(s[0]) + helpers.PanicErr(err) + nodes = append(nodes, &node{ + url: url, + login: s[1], + password: s[2], + }) + } + return nodes +} diff --git a/core/scripts/functions/src/sample_config.json b/core/scripts/functions/src/sample_config.json new file mode 100644 index 00000000..a6f4f5b0 --- /dev/null +++ b/core/scripts/functions/src/sample_config.json @@ -0,0 +1,47 @@ +{ + "OracleConfig": { + "MaxQueryLengthBytes": 10000, + "MaxObservationLengthBytes": 10000, + "MaxReportLengthBytes": 10000, + "MaxRequestBatchSize": 10, + "DefaultAggregationMethod": 0, + "UniqueReports": true, + "MaxReportTotalCallbackGas": 2000000, + + "ThresholdOffchainConfig": { + "MaxQueryLengthBytes": 10000, + "MaxObservationLengthBytes": 10000, + "MaxReportLengthBytes": 10000, + "RequestCountLimit": 100, + "RequestTotalBytesLimit": 100000, + "RequireLocalRequestCheck": true, + "K": 3 + }, + + "S4ReportingPluginConfig": { + "MaxQueryLengthBytes": 50000, + "MaxObservationLengthBytes": 50000, + "MaxReportLengthBytes": 50000, + "NSnapshotShards": 1, + "MaxObservationEntries": 1000, + "MaxReportEntries": 1000, + "MaxDeleteExpiredEntries": 1000 + }, + + "DeltaProgressMillis": 30000, + "DeltaResendMillis": 10000, + "DeltaRoundMillis": 10000, + "DeltaGraceMillis": 2000, + "DeltaStageMillis": 30000, + "MaxRoundsPerEpoch": 5, + "TransmissionSchedule": [1, 1, 1, 1], + + "MaxDurationQueryMillis": 5000, + "MaxDurationObservationMillis": 5000, + "MaxDurationReportMillis": 5000, + "MaxDurationAcceptMillis": 5000, + "MaxDurationTransmitMillis": 5000, + + "MaxFaultyOracles": 1 + } +} diff --git a/core/scripts/functions/src/sample_keys.json b/core/scripts/functions/src/sample_keys.json new file mode 100644 index 00000000..ea6bd1f7 --- /dev/null +++ b/core/scripts/functions/src/sample_keys.json @@ -0,0 +1,38 @@ +[ + { + "EthAddress": "0x0000000000000000000000000000000000000000", + "P2PPeerID": "12D3KooW00000000000000000000000000000000000000000000", + "OCR2BundleID": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2OnchainPublicKey": "0000000000000000000000000000000000000000", + "OCR2OffchainPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2ConfigPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "CSAPublicKey": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "EthAddress": "0x0000000000000000000000000000000000000000", + "P2PPeerID": "12D3KooW00000000000000000000000000000000000000000000", + "OCR2BundleID": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2OnchainPublicKey": "0000000000000000000000000000000000000000", + "OCR2OffchainPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2ConfigPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "CSAPublicKey": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "EthAddress": "0x0000000000000000000000000000000000000000", + "P2PPeerID": "12D3KooW00000000000000000000000000000000000000000000", + "OCR2BundleID": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2OnchainPublicKey": "0000000000000000000000000000000000000000", + "OCR2OffchainPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2ConfigPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "CSAPublicKey": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "EthAddress": "0x0000000000000000000000000000000000000000", + "P2PPeerID": "12D3KooW00000000000000000000000000000000000000000000", + "OCR2BundleID": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2OnchainPublicKey": "0000000000000000000000000000000000000000", + "OCR2OffchainPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "OCR2ConfigPublicKey": "0000000000000000000000000000000000000000000000000000000000000000", + "CSAPublicKey": "0000000000000000000000000000000000000000000000000000000000000000" + } +] diff --git a/core/scripts/functions/templates/bootstrap.toml b/core/scripts/functions/templates/bootstrap.toml new file mode 100644 index 00000000..a738a53e --- /dev/null +++ b/core/scripts/functions/templates/bootstrap.toml @@ -0,0 +1,12 @@ +type = "bootstrap" +schemaVersion = 1 +name = "Functions V1 bootstrap {{timestamp}}" +forwardingAllowed = false +contractID = "{{router_contract_address}}" +relay = "evm" + +[relayConfig] +chainID = {{chain_id}} +contractUpdateCheckFrequencySec = 60 +contractVersion = 1 +donID = "{{don_id}}" diff --git a/core/scripts/functions/templates/oracle.toml b/core/scripts/functions/templates/oracle.toml new file mode 100644 index 00000000..19d1df70 --- /dev/null +++ b/core/scripts/functions/templates/oracle.toml @@ -0,0 +1,85 @@ +type = "offchainreporting2" +schemaVersion = 1 +name = "Functions V1 {{timestamp}}" +forwardingAllowed = false +contractID = "{{router_contract_address}}" +ocrKeyBundleID = "{{ocr2_key_bundle_id}}" +p2pv2Bootstrappers = [ + "{{p2p_bootstrapper}}" +] +relay = "evm" +pluginType = "functions" +transmitterID = "{{node_eth_address}}" +observationSource = """ + run_computation [type="bridge" name="ea_bridge" requestData="{\\"note\\": \\"observationSource is unused but the bridge is required\\"}"] + run_computation +""" + +[relayConfig] +chainID = {{chain_id}} + +[pluginConfig] +contractUpdateCheckFrequencySec = 300 +contractVersion = 1 +donID = "{{don_id}}" +enableRequestSignatureCheck = false +listenerEventHandlerTimeoutSec = 210 +listenerEventsCheckFrequencyMillis = 500 +maxRequestSizeBytes = 30_720 +minIncomingConfirmations = 3 +pruneBatchSize = 500 +pruneCheckFrequencySec = 600 +pruneMaxStoredRequests = 20_000 +requestTimeoutBatchLookupSize = 200 +requestTimeoutCheckFrequencySec = 10 +requestTimeoutSec = 300 +maxRequestSizesList = [30_720, 51_200, 102_400, 204_800, 512_000, 1_048_576, 2_097_152, 3_145_728, 5_242_880, 10_485_760] +maxSecretsSizesList = [10_240, 20_480, 51_200, 102_400, 307_200, 512_000, 1_048_576, 2_097_152] +minimumSubscriptionBalance = "2 pli" +pastBlocksToPoll = 25 + + + [pluginConfig.OnchainAllowlist] + blockConfirmations = 1 + contractAddress = "{{router_contract_address}}" + contractVersion = 1 + updateFrequencySec = 30 + updateTimeoutSec = 10 + + [pluginConfig.OnchainSubscriptions] + blockConfirmations = 1 + contractAddress = "{{router_contract_address}}" + updateFrequencySec = 30 + updateTimeoutSec = 10 + updateRangeSize = 2000 + + [pluginConfig.RateLimiter] + globalBurst = 30 + globalRPS = 20 + perSenderBurst = 5 + perSenderRPS = 1 + + [pluginConfig.S4Constraints] + maxPayloadSizeBytes = 20_000 + maxSlotsPerUser = 5 + maxExpirationLengthSec = 259_200 + + [pluginConfig.decryptionQueueConfig] + completedCacheTimeoutSec = 300 + decryptRequestTimeoutSec = 180 + maxCiphertextBytes = 20_000 + maxCiphertextIdLength = 100 + maxQueueLength = 5_000 + + [pluginConfig.gatewayConnectorConfig] + AuthMinChallengeLen = 20 + AuthTimestampToleranceSec = 20 + DonID = "{{don_id}}" + NodeAddress = "{{node_eth_address}}" + + [pluginConfig.gatewayConnectorConfig.WsClientConfig] + HandshakeTimeoutMillis = 1_000 + + [[pluginConfig.gatewayConnectorConfig.Gateways]] + Id = "{{gateway_id}}" + URL = "{{gateway_url}}" \ No newline at end of file diff --git a/core/scripts/gateway/client/README.md b/core/scripts/gateway/client/README.md new file mode 100644 index 00000000..8c257753 --- /dev/null +++ b/core/scripts/gateway/client/README.md @@ -0,0 +1,20 @@ +# A gateway client script + +This script is used to connect to a gateway server and send commands to it. + +## Usage + +All requests have to be signed on behalf of a user, you need to provide your private key in .env file, e.g. + +``` +PRIVATE_KEY=1a2b3c... +``` + +The script will automatically sign the message using the provided private key. +Run the script without arguments to get the list of available commands. + +## Example + +``` +go run . -gateway_url https://01.functions-gateway.chain.link -don_id fun-avalanche-mainnet-2 -method secrets_list -message_id 123 +``` diff --git a/core/scripts/gateway/client/send_request.go b/core/scripts/gateway/client/send_request.go new file mode 100644 index 00000000..dc7e8ad5 --- /dev/null +++ b/core/scripts/gateway/client/send_request.go @@ -0,0 +1,155 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/joho/godotenv" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +func main() { + gatewayURL := flag.String("gateway_url", "", "Gateway URL") + privateKey := flag.String("private_key", "", "Private key to sign the message with") + messageId := flag.String("message_id", "", "Request ID") + methodName := flag.String("method", "", "Method name") + donId := flag.String("don_id", "", "DON ID") + s4SetSlotId := flag.Uint("s4_set_slot_id", 0, "S4 set slot ID") + s4SetVersion := flag.Uint64("s4_set_version", 0, "S4 set version") + s4SetExpirationPeriod := flag.Int64("s4_set_expiration_period", 60*60*1000, "S4 how long until the entry expires from now (in milliseconds)") + s4SetPayloadFile := flag.String("s4_set_payload_file", "", "S4 payload file to set secret") + repeat := flag.Bool("repeat", false, "Repeat sending the request every 10 seconds") + flag.Parse() + + if privateKey == nil || *privateKey == "" { + if err := godotenv.Load(); err != nil { + panic(err) + } + + privateKeyEnvVar := os.Getenv("PRIVATE_KEY") + privateKey = &privateKeyEnvVar + fmt.Println("Loaded private key from .env") + } + + // validate key and extract address + key, err := crypto.HexToECDSA(*privateKey) + if err != nil { + fmt.Println("error parsing private key", err) + return + } + address := crypto.PubkeyToAddress(key.PublicKey) + + var s4SetPayload []byte + if *methodName == functions.MethodSecretsSet { + s4SetPayload, err = os.ReadFile(*s4SetPayloadFile) + if err != nil { + fmt.Println("error reading S4 payload file", err) + return + } + } + + // build payload (if relevant) + var payloadJSON []byte + if *methodName == functions.MethodSecretsSet { + envelope := s4.Envelope{ + Address: address.Bytes(), + SlotID: *s4SetSlotId, + Version: *s4SetVersion, + Payload: s4SetPayload, + Expiration: time.Now().UnixMilli() + *s4SetExpirationPeriod, + } + signature, err2 := envelope.Sign(key) + if err2 != nil { + fmt.Println("error signing S4 envelope", err2) + return + } + + payloadJSON, err2 = json.Marshal(functions.SecretsSetRequest{ + SlotID: envelope.SlotID, + Version: envelope.Version, + Expiration: envelope.Expiration, + Payload: s4SetPayload, + Signature: signature, + }) + if err2 != nil { + fmt.Println("error marshaling S4 payload", err2) + return + } + } + + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: *messageId, + Method: *methodName, + DonId: *donId, + Payload: json.RawMessage(payloadJSON), + }, + } + + if err = msg.Sign(key); err != nil { + fmt.Println("error signing message", err) + return + } + codec := api.JsonRPCCodec{} + rawMsg, err := codec.EncodeRequest(msg) + if err != nil { + fmt.Println("error JSON-RPC encoding", err) + return + } + + createRequest := func() (req *http.Request, err error) { + req, err = http.NewRequestWithContext(context.Background(), "POST", *gatewayURL, bytes.NewBuffer(rawMsg)) + if err == nil { + req.Header.Set("Content-Type", "application/json") + } + return + } + + client := &http.Client{} + + sendRequest := func() { + req, err2 := createRequest() + if err2 != nil { + fmt.Println("error creating a request", err2) + return + } + + resp, err2 := client.Do(req) + if err2 != nil { + fmt.Println("error sending a request", err2) + return + } + defer resp.Body.Close() + + body, err2 := io.ReadAll(resp.Body) + if err2 != nil { + fmt.Println("error sending a request", err2) + return + } + + var prettyJSON bytes.Buffer + if err2 = json.Indent(&prettyJSON, body, "", " "); err2 != nil { + fmt.Println(string(body)) + } else { + fmt.Println(prettyJSON.String()) + } + } + + sendRequest() + + for *repeat { + time.Sleep(10 * time.Second) + sendRequest() + } +} diff --git a/core/scripts/gateway/connector/run_connector.go b/core/scripts/gateway/connector/run_connector.go new file mode 100644 index 00000000..6408c0cf --- /dev/null +++ b/core/scripts/gateway/connector/run_connector.go @@ -0,0 +1,88 @@ +package main + +import ( + "context" + "crypto/ecdsa" + "flag" + "fmt" + "os" + "os/signal" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/jonboulle/clockwork" + "github.com/pelletier/go-toml/v2" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" +) + +// Script to run Connector outside of the core node. +// +// Usage (without TLS): +// +// go run run_connector.go --config sample_config.toml +type client struct { + privateKey *ecdsa.PrivateKey + connector connector.GatewayConnector + lggr logger.Logger +} + +func (h *client) HandleGatewayMessage(ctx context.Context, gatewayId string, msg *api.Message) { + h.lggr.Infof("received message from gateway %s. Echoing back.", gatewayId) + err := h.connector.SendToGateway(ctx, gatewayId, msg) + if err != nil { + h.lggr.Errorw("failed to send to gateway", "id", gatewayId, "err", err) + } +} + +func (h *client) Sign(data ...[]byte) ([]byte, error) { + return common.SignData(h.privateKey, data...) +} + +func (h *client) Start(ctx context.Context) error { + return nil +} + +func (h *client) Close() error { + return nil +} + +func main() { + configFile := flag.String("config", "", "Path to TOML config file") + flag.Parse() + + rawConfig, err := os.ReadFile(*configFile) + if err != nil { + fmt.Println("error reading config:", err) + return + } + + var cfg connector.ConnectorConfig + err = toml.Unmarshal(rawConfig, &cfg) + if err != nil { + fmt.Println("error parsing config:", err) + return + } + + sampleKey, _ := crypto.HexToECDSA("cd47d3fafdbd652dd2b66c6104fa79b372c13cb01f4a4fbfc36107cce913ac1d") + lggr, _ := logger.NewLogger() + client := &client{privateKey: sampleKey, lggr: lggr} + connector, _ := connector.NewGatewayConnector(&cfg, client, client, clockwork.NewRealClock(), lggr) + client.connector = connector + + ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt) + err = connector.Start(ctx) + if err != nil { + fmt.Println("error staring connector:", err) + return + } + + <-ctx.Done() + err = connector.Close() + if err != nil { + fmt.Println("error closing connector:", err) + return + } +} diff --git a/core/scripts/gateway/connector/sample_config.toml b/core/scripts/gateway/connector/sample_config.toml new file mode 100644 index 00000000..5d042998 --- /dev/null +++ b/core/scripts/gateway/connector/sample_config.toml @@ -0,0 +1,11 @@ +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "example_don" +AuthMinChallengeLen = 10 +AuthTimestampToleranceSec = 10 + +[WsClientConfig] +HandshakeTimeoutMillis = 1000 + +[[Gateways]] +Id = "example_gateway" +URL = "ws://localhost:8081/node" diff --git a/core/scripts/gateway/run_gateway.go b/core/scripts/gateway/run_gateway.go new file mode 100644 index 00000000..bdc75472 --- /dev/null +++ b/core/scripts/gateway/run_gateway.go @@ -0,0 +1,71 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/signal" + + "github.com/pelletier/go-toml/v2" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" +) + +// Script to run Gateway outside of the core node. It works only with simple handlers. +// Any handlers that depend on core services will fail in their factory methods. +// +// Usage without TLS: +// +// go run run_gateway.go --config sample_config.toml +// +// curl -X POST -d '{"jsonrpc":"2.0","method":"test","id":"abcd","params":{"body":{"don_id":"example_don"}}}' http://localhost:8080/user +// +// Usage with TLS: +// +// openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem +// go run run_gateway.go --config sample_config_tls.toml +// +// curl -X POST -d '{"jsonrpc":"2.0","method":"test","id":"abcd","params":{"body":{"don_id":"example_don"}}}' https://localhost:8088/user -k +func main() { + configFile := flag.String("config", "", "Path to TOML config file") + flag.Parse() + + rawConfig, err := os.ReadFile(*configFile) + if err != nil { + fmt.Println("error reading config:", err) + return + } + + var cfg config.GatewayConfig + err = toml.Unmarshal(rawConfig, &cfg) + if err != nil { + fmt.Println("error parsing config:", err) + return + } + + lggr, _ := logger.NewLogger() + + handlerFactory := gateway.NewHandlerFactory(nil, nil, nil, lggr) + gw, err := gateway.NewGatewayFromConfig(&cfg, handlerFactory, lggr) + if err != nil { + fmt.Println("error creating Gateway object:", err) + return + } + + ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt) + err = gw.Start(ctx) + if err != nil { + fmt.Println("error staring gateway:", err) + return + } + + <-ctx.Done() + err = gw.Close() + if err != nil { + fmt.Println("error closing gateway:", err) + return + } +} diff --git a/core/scripts/gateway/sample_config.toml b/core/scripts/gateway/sample_config.toml new file mode 100644 index 00000000..c58e2564 --- /dev/null +++ b/core/scripts/gateway/sample_config.toml @@ -0,0 +1,30 @@ +[UserServerConfig] +Port = 8080 +Path = "/user" +ContentTypeHeader = "application/jsonrpc" +ReadTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +MaxRequestBytes = 10_000 + +[NodeServerConfig] +Port = 8081 +Path = "/node" +ReadTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +MaxRequestBytes = 10_000 +HandshakeTimeoutMillis = 1000 + +[ConnectionManagerConfig] +AuthGatewayId = "example_gateway" +AuthTimestampToleranceSec = 60 +AuthChallengeLen = 32 + +[[Dons]] +DonId = "example_don" +HandlerName = "dummy" + +[[Dons.Members]] +Name = "example_node" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" \ No newline at end of file diff --git a/core/scripts/gateway/sample_config_tls.toml b/core/scripts/gateway/sample_config_tls.toml new file mode 100644 index 00000000..a91d71a9 --- /dev/null +++ b/core/scripts/gateway/sample_config_tls.toml @@ -0,0 +1,36 @@ +[UserServerConfig] +Port = 8088 +TLSEnabled = true +TLSCertPath = "certificate.pem" +TLSKeyPath = "key.pem" +Path = "/user" +ContentTypeHeader = "application/jsonrpc" +ReadTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +MaxRequestBytes = 10_000 + +[NodeServerConfig] +Port = 8089 +TLSEnabled = true +TLSCertPath = "certificate.pem" +TLSKeyPath = "key.pem" +Path = "/node" +ReadTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +MaxRequestBytes = 10_000 +HandshakeTimeoutMillis = 1000 + +[ConnectionManagerConfig] +AuthGatewayId = "example_gateway" +AuthTimestampToleranceSec = 60 +AuthChallengeLen = 32 + +[[Dons]] +DonId = "example_don" +HandlerName = "dummy" + +[[Dons.Members]] +Name = "example_node" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" \ No newline at end of file diff --git a/core/scripts/go.mod b/core/scripts/go.mod new file mode 100644 index 00000000..3a5767fa --- /dev/null +++ b/core/scripts/go.mod @@ -0,0 +1,340 @@ +module github.com/goplugin/pluginv3.0/core/scripts + +go 1.21.3 + +// Make sure we're working with the latest plugin libs +replace github.com/goplugin/pluginv3.0/v2 => ../../ + +require ( + github.com/docker/docker v24.0.7+incompatible + github.com/docker/go-connections v0.4.0 + github.com/ethereum/go-ethereum v1.13.8 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.4.0 + github.com/jmoiron/sqlx v1.3.5 + github.com/joho/godotenv v1.4.0 + github.com/jonboulle/clockwork v0.4.0 + github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f + github.com/montanaflynn/stats v0.7.1 + github.com/olekukonko/tablewriter v0.0.5 + github.com/pelletier/go-toml/v2 v2.1.1 + github.com/shopspring/decimal v1.3.1 + github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 + github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 + github.com/goplugin/pluginv3.0/v2 v2.0.0-00010101000000-000000000000 + github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a + github.com/spf13/cobra v1.6.1 + github.com/spf13/viper v1.15.0 + github.com/stretchr/testify v1.8.4 + github.com/umbracle/ethgo v0.1.3 + github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 + github.com/urfave/cli v1.22.14 + go.dedis.ch/kyber/v3 v3.1.0 +) + +require ( + contrib.go.opencensus.io/exporter/stackdriver v0.13.5 // indirect + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.3 // indirect + cosmossdk.io/errors v1.0.0 // indirect + cosmossdk.io/math v1.0.1 // indirect + filippo.io/edwards25519 v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect + github.com/CosmWasm/wasmd v0.40.1 // indirect + github.com/CosmWasm/wasmvm v1.2.4 // indirect + github.com/DataDog/zstd v1.5.2 // indirect + github.com/Depado/ginprom v1.8.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/avast/retry-go/v4 v4.5.1 // indirect + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/blendle/zapdriver v1.3.1 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/btcsuite/btcd/btcutil v1.1.3 // indirect + github.com/bytedance/sonic v1.10.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cometbft/cometbft v0.37.2 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.0 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.2 // indirect + github.com/cosmos/cosmos-sdk v0.47.4 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect + github.com/cosmos/iavl v0.20.0 // indirect + github.com/cosmos/ibc-go/v7 v7.0.1 // indirect + github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab // indirect + github.com/cosmos/ledger-cosmos-go v0.12.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/esote/minmaxheap v1.0.0 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gagliardetto/binary v0.7.1 // indirect + github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 // indirect + github.com/gagliardetto/treeout v0.1.4 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect + github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect + github.com/getsentry/sentry-go v0.19.0 // indirect + github.com/gin-contrib/cors v1.5.0 // indirect + github.com/gin-contrib/expvar v0.0.1 // indirect + github.com/gin-contrib/sessions v0.0.5 // indirect + github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.9.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.6 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.15.5 // indirect + github.com/go-webauthn/webauthn v0.9.4 // indirect + github.com/go-webauthn/x v0.1.5 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang/glog v1.1.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/go-tpm v0.9.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/gorilla/context v1.1.1 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/sessions v1.2.2 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grafana/pyroscope-go v1.0.4 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect + github.com/graph-gophers/dataloader v5.0.0+incompatible // indirect + github.com/graph-gophers/graphql-go v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/consul/sdk v0.14.1 // indirect + github.com/hashicorp/go-envparse v0.1.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.2.4 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/logrusorgru/aurora v2.0.3+incompatible // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/mwitkow/grpc-proxy v0.0.0-20230212185441-f345521cb9c9 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/pressly/goose/v3 v3.16.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/prometheus v0.48.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/scylladb/go-reflectx v1.0.1 // indirect + github.com/sethvargo/go-retry v0.2.4 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect + github.com/goplugin/chain-selectors v1.0.10 // indirect + github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 // indirect + github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 // indirect + github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 // indirect + github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 // indirect + github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 // indirect + github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect + github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 // indirect + github.com/goplugin/wsrpc v0.7.2 // indirect + github.com/spf13/afero v1.9.3 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/status-im/keycard-go v0.2.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect + github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ulule/limiter/v3 v3.11.2 // indirect + github.com/unrolled/secure v1.13.0 // indirect + github.com/valyala/fastjson v1.4.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zondax/hid v0.9.1 // indirect + github.com/zondax/ledger-go v0.14.1 // indirect + go.dedis.ch/fixbuf v1.0.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/ratelimit v0.2.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.18.0 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect + gopkg.in/guregu/null.v2 v2.1.2 // indirect + gopkg.in/guregu/null.v4 v4.0.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + pgregory.net/rapid v0.5.5 // indirect + rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +replace ( + // replicating the replace directive on cosmos SDK + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + + // until merged upstream: https://github.com/hashicorp/go-plugin/pull/257 + github.com/hashicorp/go-plugin => github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 + + // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 + github.com/mwitkow/grpc-proxy => github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f + +) diff --git a/core/scripts/go.sum b/core/scripts/go.sum new file mode 100644 index 00000000..9b131d6b --- /dev/null +++ b/core/scripts/go.sum @@ -0,0 +1,1987 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v1.1.4 h1:K6n/GZHFTtEoKT5aUG3l9diPi0VduZNQ1PfdnpkkIFk= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5 h1:TNaexHK16gPUoc7uzELKOU7JULqccn1NDuqUxmxSqfo= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.3 h1:6evFIgj//Y3w09bqOUOzEpFj5tsxBqdc5CfkO7z+zfw= +cosmossdk.io/depinject v1.0.0-alpha.3/go.mod h1:eRbcdQ7MRpIPEM5YUJh8k97nxHpYbc3sMUnEtt8HPWU= +cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= +cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca h1:msenprh2BLLRwNT7zN56TbBHOGk/7ARQckXHxXyvjoQ= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca/go.mod h1:PkIAKXZvaxrTRc++z53XMRvFk8AcGGWYHcMIPzVYX9c= +cosmossdk.io/math v1.0.1 h1:Qx3ifyOPaMLNH/89WeZFH268yCvU4xEcnPLu3sJqPPg= +cosmossdk.io/math v1.0.1/go.mod h1:Ygz4wBHrgc7g0N+8+MrnTfS9LLn9aaTGa9hKopuym5k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/AlekSi/pointer v1.1.0 h1:SSDMPcXD9jSl8FPy9cRzoRaMJtm9g9ggGTxecRUbQoI= +github.com/AlekSi/pointer v1.1.0/go.mod h1:y7BvfRI3wXPWKXEBhU71nbnIEEZX0QTSB2Bj48UJIZE= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0= +github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw= +github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4= +github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/CosmWasm/wasmd v0.40.1 h1:LxbO78t/6S8TkeQlUrJ0m5O87HtAwLx4RGHq3rdrOEU= +github.com/CosmWasm/wasmd v0.40.1/go.mod h1:6EOwnv7MpuFaEqxcUOdFV9i4yvrdOciaY6VQ1o7A3yg= +github.com/CosmWasm/wasmvm v1.2.4 h1:6OfeZuEcEH/9iqwrg2pkeVtDCkMoj9U6PpKtcrCyVrQ= +github.com/CosmWasm/wasmvm v1.2.4/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Depado/ginprom v1.8.0 h1:zaaibRLNI1dMiiuj1MKzatm8qrcHzikMlCc1anqOdyo= +github.com/Depado/ginprom v1.8.0/go.mod h1:XBaKzeNBqPF4vxJpNLincSQZeMDnZp1tIbU0FU0UKgg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= +github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= +github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= +github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= +github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= +github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= +github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3 h1:SDlJ7bAm4ewvrmZtR0DaiYbQGdKPeaaIm7bM+qRhFeU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc= +github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= +github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/apd/v3 v3.1.0 h1:MK3Ow7LH0W8zkd5GMKA1PvS9qG3bWFI95WaVNfyZJ/w= +github.com/cockroachdb/apd/v3 v3.1.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.2 h1:XB0yyHGT0lwmJlFmM4+rsRnczPlHoAKFX6K8Zgc2/Jc= +github.com/cometbft/cometbft v0.37.2/go.mod h1:Y2MMMN//O5K4YKd8ze4r9jmk4Y7h0ajqILXbH5JQFVs= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= +github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.2 h1:X3OKvWgK9Gsejo0F1qs5l8Qn6xJV/AzgIWR2wZ8Nua8= +github.com/cosmos/cosmos-proto v1.0.0-beta.2/go.mod h1:+XRCLJ14pr5HFEHIUcn51IKXD1Fy3rkEQqt4WqmN4V0= +github.com/cosmos/cosmos-sdk v0.47.4 h1:FVUpEprm58nMmBX4xkRdMDaIG5Nr4yy92HZAfGAw9bg= +github.com/cosmos/cosmos-sdk v0.47.4/go.mod h1:R5n+uM7vguVPFap4pgkdvQCT1nVo/OtPwrlAU40rvok= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v0.20.0 h1:fTVznVlepH0KK8NyKq8w+U7c2L6jofa27aFX6YGlm38= +github.com/cosmos/iavl v0.20.0/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ibc-go/v7 v7.0.1 h1:NIBNRWjlOoFvFQu1ZlgwkaSeHO5avf4C1YQiWegt8jw= +github.com/cosmos/ibc-go/v7 v7.0.1/go.mod h1:vEaapV6nuLPQlS+g8IKmxMo6auPi0i7HMv1PhViht/E= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab h1:I9ialKTQo7248V827Bba4OuKPmk+FPzmTVHsLXaIJWw= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab/go.mod h1:2CwqasX5dSD7Hbp/9b6lhK6BwoBDCBldx7gPKRukR60= +github.com/cosmos/ledger-cosmos-go v0.12.1 h1:sMBxza5p/rNK/06nBSNmsI/WDqI0pVJFVNihy1Y984w= +github.com/cosmos/ledger-cosmos-go v0.12.1/go.mod h1:dhO6kj+Y+AHIOgAe4L9HL/6NDdyyth4q238I9yFpD2g= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/creachadair/taskgroup v0.4.2 h1:jsBLdAJE42asreGss2xZGZ8fJra7WtwnHWeJFxv2Li8= +github.com/creachadair/taskgroup v0.4.2/go.mod h1:qiXUOSrbwAY3u0JPGTzObbE3yf9hcXHDKBZ2ZjpCbgM= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/common/gherkin/go/v22 v22.0.0 h1:4K8NqptbvdOrjL9DEea6HFjSpbdT9+Q5kgLpmmsHYl0= +github.com/cucumber/common/gherkin/go/v22 v22.0.0/go.mod h1:3mJT10B2GGn3MvVPd3FwR7m2u4tLhSRhWUqJU4KN4Fg= +github.com/cucumber/common/messages/go/v17 v17.1.1 h1:RNqopvIFyLWnKv0LfATh34SWBhXeoFTJnSrgm9cT/Ts= +github.com/cucumber/common/messages/go/v17 v17.1.1/go.mod h1:bpGxb57tDE385Rb2EohgUadLkAbhoC4IyCFi89u/JQI= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e h1:5jVSh2l/ho6ajWhSPNN84eHEdq3dp0T7+f6r3Tc6hsk= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e/go.mod h1:IJgIiGUARc4aOr4bOQ85klmjsShkEEfiRc6q/yBSfo8= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= +github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dfuse-io/logging v0.0.0-20201110202154-26697de88c79/go.mod h1:V+ED4kT/t/lKtH99JQmKIb0v9WL3VaYkJ36CfHlVECI= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 h1:CuJS05R9jmNlUK8GOxrEELPbfXm0EuGh/30LjkjN5vo= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70/go.mod h1:EoK/8RFbMEteaCaz89uessDTnCWjbbcr+DXcBh4el5o= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao= +github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/esote/minmaxheap v1.0.0 h1:rgA7StnXXpZG6qlM0S7pUmEv1KpWe32rYT4x8J8ntaA= +github.com/esote/minmaxheap v1.0.0/go.mod h1:Ln8+i7fS1k3PLgZI2JAo0iA1as95QnIYiGCrqSJ5FZk= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/binary v0.7.1 h1:6ggDQ26vR+4xEvl/S13NcdLK3MUCi4oSy73pS9aI1cI= +github.com/gagliardetto/binary v0.7.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/gofuzz v1.2.2 h1:XL/8qDMzcgvR4+CyRQW9UGdwPRPMHVJfqQ/uMvSUuQw= +github.com/gagliardetto/gofuzz v1.2.2/go.mod h1:bkH/3hYLZrMLbfYWA0pWzXmi5TTRZnu4pMGZBkqMKvY= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 h1:q2IztKyRQUxJ6abXRsawaBtvDFvM+szj4jDqV4od1gs= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27/go.mod h1:NFuoDwHPvw858ZMHUJr6bkhN8qHt4x6e+U3EYHxAwNY= +github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= +github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.19.0 h1:BcCH3CN5tXt5aML+gwmbFwVptLLQA+eT866fCO9wVOM= +github.com/getsentry/sentry-go v0.19.0/go.mod h1:y3+lGEFEFexZtpbG1GUE2WD/f9zGyKYwpEqryTOC/nE= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk= +github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI= +github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w= +github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw= +github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE= +github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs97miaG2+7neKY= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 h1:Z9J0PVIt1PuibOShaOw1jH8hUYz+Ak8NLsR/GI0Hv5I= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4/go.mod h1:CEPcgZiz8998l9E8fDm16h8UfHRL7b+5oG0j/0koeVw= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI= +github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= +github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= +github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-webauthn/webauthn v0.9.4 h1:YxvHSqgUyc5AK2pZbqkWWR55qKeDPhP8zLDr6lpIc2g= +github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAhr9xlRbdbgnTw= +github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0= +github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= +github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0= +github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= +github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= +github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 h1:o95KDiV/b1xdkumY5YbLR0/n2+wBxUpgf3HgfKgTyLI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3/go.mod h1:hTxjzRcX49ogbTGVJ1sM5mz5s+SSgiGIyL3jjPxl32E= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= +github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= +github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY= +github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw= +github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a h1:dHCfT5W7gghzPtfsW488uPmEOm85wewI+ypUwibyTdU= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 h1:mPMvm6X6tf4w8y7j9YIt6V9jfWhL6QlbEc7CCmeQlWk= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1/go.mod h1:ye2e/VUEtE2BHE+G/QcKkcLQVAEJoYRFj5VUOQatCRE= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= +github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s= +github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg= +github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= +github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/gocuke v0.6.2 h1:pHviZ0kKAq2U2hN2q3smKNxct6hS0mGByFMHGnWA97M= +github.com/regen-network/gocuke v0.6.2/go.mod h1:zYaqIHZobHyd0xOrHGPQjbhGJsuZ1oElx150u2o1xuk= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= +github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= +github.com/goplugin/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= +github.com/goplugin/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 h1:hpNkTpLtwWXKqguf7wYqetxpmxY/bSO+1PLpY8VBu2w= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 h1:9IxmR+1NH1WxaX44+t553fOrrZRfxwMVvnDuBIy0tgs= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0/go.mod h1:JiykN+8W5TA4UD2ClrzQCVvcH3NcyLEVv7RwY0busrw= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 h1:7m9PVtccb8/pvKTXMaGuyceFno1icRyC2SFH7KG7+70= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0/go.mod h1:SZ899lZYQ0maUulWbZg+SWqabHQ1wTbyk3jT8wJfyo8= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a h1:nGkZ9uXS8lPIJOi68rdftEo2c9Q8qbRAi5+XMnKobVc= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a/go.mod h1:kC0qmVPUaVkFqGiZMNhmRmjdphuUmeyLEdlWFOQzFWI= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= +github.com/goplugin/wsrpc v0.7.2 h1:iBXzMeg7vc5YoezIQBq896y25BARw7OKbhrb6vPbtRQ= +github.com/goplugin/wsrpc v0.7.2/go.mod h1:sj7QX2NQibhkhxTfs3KOhAj/5xwgqMipTvJVSssT9i0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= +github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= +github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= +github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= +github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= +github.com/unrolled/secure v1.13.0 h1:sdr3Phw2+f8Px8HE5sd1EHdj1aV3yUwed/uZXChLFsk= +github.com/unrolled/secure v1.13.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE= +github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA= +github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= +github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= +github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/kyber/v3 v3.1.0 h1:ghu+kiRgM5JyD9TJ0hTIxTLQlJBR/ehjWvWwYW3XsC0= +go.dedis.ch/kyber/v3 v3.1.0/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 h1:mMv2jG58h6ZI5t5S9QCVGdzCmAsTakMa3oxVgpSD44g= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1/go.mod h1:oqRuNKG0upTaDPbLVCG8AD0G2ETrfDtmh7jViy7ox6M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/propagators/b3 v1.21.1 h1:WPYiUgmw3+b7b3sQ1bFBFAf0q+Di9dvNc3AtYfnT4RQ= +go.opentelemetry.io/contrib/propagators/b3 v1.21.1/go.mod h1:EmzokPoSqsYMBVK4nRnhsfm5mbn8J1eDuz/U1UaQaWg= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210401141331-865547bb08e2/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc/examples v0.0.0-20210424002626-9572fd6faeae/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/guregu/null.v2 v2.1.2 h1:YOuepWdYqGnrenzPyMi+ybCjeDzjdazynbwsXXOk4i8= +gopkg.in/guregu/null.v2 v2.1.2/go.mod h1:XORrx8tyS5ZDcyUboCIxQtta/Aujk/6pfWrn9Xe33mU= +gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= +gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= +modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= +modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= +modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= +modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc= +modernc.org/libc v1.32.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8= +modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= +pgregory.net/rapid v0.5.5/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/core/scripts/install-protoc.sh b/core/scripts/install-protoc.sh new file mode 100644 index 00000000..8d9dc408 --- /dev/null +++ b/core/scripts/install-protoc.sh @@ -0,0 +1,54 @@ +#!/bin/bash +set -x + + +VERSION=$1 + +if [ "$VERSION" == "" ]; then + echo "version required" + exit 1 +fi + +os=$(uname) +arch=$(uname -m) + +install_dir=$HOME/.local +$install_dir/bin/protoc --version | grep $VERSION +rc=$? +if [ $rc -eq 0 ]; then + # we have the current VERSION + echo "protoc up-to-date @ $VERSION" + exit 0 +fi + + +if [ "$os" == "Linux" ] ; then + os="linux" + if [$arch != "x86_64"]; then + echo "unsupported os $os-$arch update $0" + exit 1 + fi +elif [ "$os" == "Darwin" ] ; then + os="osx" + # make life simply and download the universal binary + arch="universal_binary" +else + echo "unsupported os $os. update $0" + exit 1 +fi + +workdir=$(mktemp -d) +pushd $workdir +pb_url="https://github.com/protocolbuffers/protobuf/releases" +artifact=protoc-$VERSION-$os-$arch.zip +curl -LO $pb_url/download/v${VERSION}/$artifact +if [[ ! -d $install_dir ]]; then + mkdir $install_dir +fi +unzip -uo $artifact -d $install_dir +rm $artifact + +echo "protoc $VERSION installed in $install_dir" +echo "Add $install_dir/bin to PATH" +export PATH=$install_dir/bin:$PATH +popd diff --git a/core/scripts/ocr2vrf/main.go b/core/scripts/ocr2vrf/main.go new file mode 100644 index 00000000..6bf496b7 --- /dev/null +++ b/core/scripts/ocr2vrf/main.go @@ -0,0 +1,455 @@ +package main + +import ( + "context" + "flag" + "fmt" + "math/big" + "os" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/shopspring/decimal" + + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" +) + +type commonSetConfigArgs struct { + onchainPubKeys string + offchainPubKeys string + configPubKeys string + peerIDs string + transmitters string + schedule string + f uint + deltaProgress time.Duration + deltaResend time.Duration + deltaRound time.Duration + deltaGrace time.Duration + deltaStage time.Duration + maxRounds uint8 + maxDurationQuery time.Duration + maxDurationObservation time.Duration + maxDurationReport time.Duration + maxDurationAccept time.Duration + maxDurationTransmit time.Duration +} + +type dkgSetConfigArgs struct { + commonSetConfigArgs + dkgEncryptionPubKeys string + dkgSigningPubKeys string + keyID string +} + +type vrfBeaconSetConfigArgs struct { + commonSetConfigArgs + confDelays string + coordinatorConfig ocr2vrftypes.CoordinatorConfig +} + +func main() { + e := helpers.SetupEnv(false) + + switch os.Args[1] { + case "dkg-deploy": + deployDKG(e) + case "coordinator-deploy": + cmd := flag.NewFlagSet("coordinator-deploy", flag.ExitOnError) + beaconPeriodBlocks := cmd.Int64("beacon-period-blocks", 1, "beacon period in number of blocks") + linkAddress := cmd.String("link-address", "", "link contract address") + linkEthFeed := cmd.String("link-eth-feed", "", "link/eth feed address") + helpers.ParseArgs(cmd, os.Args[2:], "beacon-period-blocks", "link-address", "link-eth-feed") + deployVRFCoordinator(e, big.NewInt(*beaconPeriodBlocks), *linkAddress, *linkEthFeed) + case "beacon-deploy": + cmd := flag.NewFlagSet("beacon-deploy", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator contract address") + linkAddress := cmd.String("link-address", "", "link contract address") + dkgAddress := cmd.String("dkg-address", "", "dkg contract address") + keyID := cmd.String("key-id", "", "key ID") + helpers.ParseArgs(cmd, os.Args[2:], "beacon-deploy", "coordinator-address", "link-address", "dkg-address", "key-id") + deployVRFBeacon(e, *coordinatorAddress, *linkAddress, *dkgAddress, *keyID) + case "dkg-add-client": + cmd := flag.NewFlagSet("dkg-add-client", flag.ExitOnError) + dkgAddress := cmd.String("dkg-address", "", "DKG contract address") + keyID := cmd.String("key-id", "", "key ID") + clientAddress := cmd.String("client-address", "", "client address") + helpers.ParseArgs(cmd, os.Args[2:], "dkg-address", "key-id", "client-address") + addClientToDKG(e, *dkgAddress, *keyID, *clientAddress) + case "dkg-remove-client": + cmd := flag.NewFlagSet("dkg-add-client", flag.ExitOnError) + dkgAddress := cmd.String("dkg-address", "", "DKG contract address") + keyID := cmd.String("key-id", "", "key ID") + clientAddress := cmd.String("client-address", "", "client address") + helpers.ParseArgs(cmd, os.Args[2:], "dkg-address", "key-id", "client-address") + removeClientFromDKG(e, *dkgAddress, *keyID, *clientAddress) + case "dkg-set-config": + cmd := flag.NewFlagSet("dkg-set-config", flag.ExitOnError) + dkgAddress := cmd.String("dkg-address", "", "DKG contract address") + keyID := cmd.String("key-id", "", "key ID") + onchainPubKeys := cmd.String("onchain-pub-keys", "", "comma-separated list of OCR on-chain pubkeys") + offchainPubKeys := cmd.String("offchain-pub-keys", "", "comma-separated list of OCR off-chain pubkeys") + configPubKeys := cmd.String("config-pub-keys", "", "comma-separated list of OCR config pubkeys") + peerIDs := cmd.String("peer-ids", "", "comma-separated list of peer IDs") + transmitters := cmd.String("transmitters", "", "comma-separated list transmitters") + dkgEncryptionPubKeys := cmd.String("dkg-encryption-pub-keys", "", "comma-separated list of DKG encryption pubkeys") + dkgSigningPubKeys := cmd.String("dkg-signing-pub-keys", "", "comma-separated list of DKG signing pubkeys") + schedule := cmd.String("schedule", "", "comma-separted list of transmission schedule") + f := cmd.Uint("f", 1, "number of faulty oracles") + deltaProgress := cmd.Duration("delta-progress", 30*time.Second, "duration of delta progress") + deltaResend := cmd.Duration("delta-resend", 10*time.Second, "duration of delta resend") + deltaRound := cmd.Duration("delta-round", 10*time.Second, "duration of delta round") + deltaGrace := cmd.Duration("delta-grace", 20*time.Second, "duration of delta grace") + deltaStage := cmd.Duration("delta-stage", 20*time.Second, "duration of delta stage") + maxRounds := cmd.Uint("max-rounds", 3, "maximum number of rounds") + maxDurationQuery := cmd.Duration("max-duration-query", 10*time.Millisecond, "maximum duration of query") + maxDurationObservation := cmd.Duration("max-duration-observation", 10*time.Second, "maximum duration of observation method") + maxDurationReport := cmd.Duration("max-duration-report", 10*time.Second, "maximum duration of report method") + maxDurationAccept := cmd.Duration("max-duration-accept", 10*time.Millisecond, "maximum duration of shouldAcceptFinalizedReport method") + maxDurationTransmit := cmd.Duration("max-duration-transmit", 1*time.Second, "maximum duration of shouldTransmitAcceptedReport method") + + helpers.ParseArgs(cmd, + os.Args[2:], + "dkg-address", + "key-id", + "onchain-pub-keys", + "offchain-pub-keys", + "config-pub-keys", + "peer-ids", + "transmitters", + "dkg-encryption-pub-keys", + "dkg-signing-pub-keys", + "schedule") + + commands := dkgSetConfigArgs{ + commonSetConfigArgs: commonSetConfigArgs{ + onchainPubKeys: *onchainPubKeys, + offchainPubKeys: *offchainPubKeys, + configPubKeys: *configPubKeys, + peerIDs: *peerIDs, + transmitters: *transmitters, + schedule: *schedule, + f: *f, + deltaProgress: *deltaProgress, + deltaResend: *deltaResend, + deltaRound: *deltaRound, + deltaGrace: *deltaGrace, + deltaStage: *deltaStage, + maxRounds: uint8(*maxRounds), + maxDurationQuery: *maxDurationQuery, + maxDurationObservation: *maxDurationObservation, + maxDurationReport: *maxDurationReport, + maxDurationAccept: *maxDurationAccept, + maxDurationTransmit: *maxDurationTransmit, + }, + dkgEncryptionPubKeys: *dkgEncryptionPubKeys, + dkgSigningPubKeys: *dkgSigningPubKeys, + keyID: *keyID, + } + + setDKGConfig(e, *dkgAddress, commands) + case "beacon-set-config": + cmd := flag.NewFlagSet("beacon-set-config", flag.ExitOnError) + beaconAddress := cmd.String("beacon-address", "", "VRF beacon contract address") + confDelays := cmd.String("conf-delays", "1,2,3,4,5,6,7,8", "comma-separted list of 8 confirmation delays") + onchainPubKeys := cmd.String("onchain-pub-keys", "", "comma-separated list of OCR on-chain pubkeys") + offchainPubKeys := cmd.String("offchain-pub-keys", "", "comma-separated list of OCR off-chain pubkeys") + configPubKeys := cmd.String("config-pub-keys", "", "comma-separated list of OCR config pubkeys") + peerIDs := cmd.String("peer-ids", "", "comma-separated list of peer IDs") + transmitters := cmd.String("transmitters", "", "comma-separated list transmitters") + schedule := cmd.String("schedule", "", "comma-separted list of transmission schedule") + f := cmd.Uint("f", 1, "number of faulty oracles") + // TODO: Adjust default delta* and maxDuration* values below after benchmarking latency + deltaProgress := cmd.Duration("delta-progress", 30*time.Second, "duration of delta progress") + deltaResend := cmd.Duration("delta-resend", 10*time.Second, "duration of delta resend") + deltaRound := cmd.Duration("delta-round", 10*time.Second, "duration of delta round") + deltaGrace := cmd.Duration("delta-grace", 20*time.Second, "duration of delta grace") + deltaStage := cmd.Duration("delta-stage", 20*time.Second, "duration of delta stage") + cacheEvictionWindowSeconds := cmd.Int64("cache-eviction-window", 60, "cache eviction window, in seconds") + batchGasLimit := cmd.Int64("batch-gas-limit", 5_000_000, "batch gas limit") + coordinatorOverhead := cmd.Int64("coordinator-overhead", 50_000, "coordinator overhead") + callbackOverhead := cmd.Int64("callback-overhead", 50_000, "callback overhead") + blockGasOverhead := cmd.Int64("block-gas-overhead", 50_000, "block gas overhead") + lookbackBlocks := cmd.Uint64("lookback-blocks", 1000, "lookback blocks") + maxRounds := cmd.Uint("max-rounds", 3, "maximum number of rounds") + maxDurationQuery := cmd.Duration("max-duration-query", 10*time.Millisecond, "maximum duration of query") + maxDurationObservation := cmd.Duration("max-duration-observation", 10*time.Second, "maximum duration of observation method") + maxDurationReport := cmd.Duration("max-duration-report", 10*time.Second, "maximum duration of report method") + maxDurationAccept := cmd.Duration("max-duration-accept", 5*time.Second, "maximum duration of shouldAcceptFinalizedReport method") + maxDurationTransmit := cmd.Duration("max-duration-transmit", 1*time.Second, "maximum duration of shouldTransmitAcceptedReport method") + + helpers.ParseArgs(cmd, + os.Args[2:], + "beacon-address", + "onchain-pub-keys", + "offchain-pub-keys", + "config-pub-keys", + "peer-ids", + "transmitters", + "schedule") + + commands := vrfBeaconSetConfigArgs{ + commonSetConfigArgs: commonSetConfigArgs{ + onchainPubKeys: *onchainPubKeys, + offchainPubKeys: *offchainPubKeys, + configPubKeys: *configPubKeys, + peerIDs: *peerIDs, + transmitters: *transmitters, + schedule: *schedule, + f: *f, + deltaProgress: *deltaProgress, + deltaResend: *deltaResend, + deltaRound: *deltaRound, + deltaGrace: *deltaGrace, + deltaStage: *deltaStage, + maxRounds: uint8(*maxRounds), + maxDurationQuery: *maxDurationQuery, + maxDurationObservation: *maxDurationObservation, + maxDurationReport: *maxDurationReport, + maxDurationAccept: *maxDurationAccept, + maxDurationTransmit: *maxDurationTransmit, + }, + confDelays: *confDelays, + coordinatorConfig: ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: *cacheEvictionWindowSeconds, + BatchGasLimit: *batchGasLimit, + CoordinatorOverhead: *coordinatorOverhead, + CallbackOverhead: *callbackOverhead, + BlockGasOverhead: *blockGasOverhead, + LookbackBlocks: *lookbackBlocks, + }, + } + + commands.setVRFBeaconConfig(e, *beaconAddress) + case "coordinator-set-producer": + cmd := flag.NewFlagSet("coordinator-set-producer", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + beaconAddress := cmd.String("beacon-address", "", "VRF beacon contract address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "beacon-address") + setProducer(e, *coordinatorAddress, *beaconAddress) + case "coordinator-request-randomness": + cmd := flag.NewFlagSet("coordinator-request-randomness", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + numWords := cmd.Uint("num-words", 1, "number of words to request") + subID := cmd.String("sub-id", "", "subscription ID") + confDelay := cmd.Int64("conf-delay", 1, "confirmation delay") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "sub-id") + requestRandomness( + e, + *coordinatorAddress, + uint16(*numWords), + decimal.RequireFromString(*subID).BigInt(), + big.NewInt(*confDelay)) + case "coordinator-redeem-randomness": + cmd := flag.NewFlagSet("coordinator-redeem-randomness", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + subID := cmd.String("sub-id", "", "subscription ID") + requestID := cmd.Int64("request-id", 0, "request ID") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "sub-id", "request-id") + redeemRandomness( + e, + *coordinatorAddress, + decimal.RequireFromString(*subID).BigInt(), + big.NewInt(*requestID)) + case "beacon-info": + cmd := flag.NewFlagSet("beacon-info", flag.ExitOnError) + beaconAddress := cmd.String("beacon-address", "", "VRF beacon contract address") + helpers.ParseArgs(cmd, os.Args[2:], "beacon-address") + beacon := newVRFBeacon(common.HexToAddress(*beaconAddress), e.Ec) + keyID, err := beacon.SKeyID(nil) + helpers.PanicErr(err) + fmt.Println("beacon key id:", hexutil.Encode(keyID[:])) + keyHash, err := beacon.SProvingKeyHash(nil) + helpers.PanicErr(err) + fmt.Println("beacon proving key hash:", hexutil.Encode(keyHash[:])) + case "coordinator-create-sub": + cmd := flag.NewFlagSet("coordinator-create-sub", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + createSubscription(e, *coordinatorAddress) + case "coordinator-add-consumer": + cmd := flag.NewFlagSet("coordinator-add-consumer", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + consumerAddress := cmd.String("consumer-address", "", "VRF consumer contract address") + subId := cmd.String("sub-id", "", "subscription ID") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "consumer-address") + addConsumer(e, *coordinatorAddress, *consumerAddress, decimal.RequireFromString(*subId).BigInt()) + case "coordinator-get-sub": + cmd := flag.NewFlagSet("coordinator-get-sub", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + subId := cmd.String("sub-id", "", "subscription ID") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + sub := getSubscription(e, *coordinatorAddress, decimal.RequireFromString(*subId).BigInt()) + fmt.Println("subscription ID:", *subId) + fmt.Println("balance:", sub.Balance) + fmt.Println("consumers:", sub.Consumers) + fmt.Println("owner:", sub.Owner) + case "link-balance": + cmd := flag.NewFlagSet("link-balance", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "link address") + helpers.ParseArgs(cmd, os.Args[2:], "link-address") + + l, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + + bal, err := l.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("link balance of", e.Owner.From, "is", assets.NewWei(bal).String()) + case "get-balances": + cmd := flag.NewFlagSet("get-balances", flag.ExitOnError) + addresses := cmd.String("addresses", "", "comma-separated list of addresses") + helpers.ParseArgs(cmd, os.Args[2:], "addresses") + + for _, account := range strings.Split(*addresses, ",") { + bal, err := e.Ec.BalanceAt(context.Background(), common.HexToAddress(account), nil) + helpers.PanicErr(err) + + fmt.Println("ETH balance of", account, "is", assets.NewWei(bal).String()) + } + case "coordinator-fund-sub": + cmd := flag.NewFlagSet("coordinator-fund-sub", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + linkAddress := cmd.String("link-address", "", "link-address") + fundingAmount := cmd.String("funding-amount", "5e18", "funding amount in juels. can use scientific notation, e.g 10e18 for 10 PLI") // 5 PLI + subID := cmd.String("sub-id", "", "subscription ID") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "link-address") + eoaFundSubscription(e, *coordinatorAddress, *linkAddress, decimal.RequireFromString(*fundingAmount).BigInt(), decimal.RequireFromString(*subID).BigInt()) + case "beacon-set-payees": + cmd := flag.NewFlagSet("beacon-set-payees", flag.ExitOnError) + beaconAddress := cmd.String("beacon-address", "", "VRF beacon contract address") + transmitters := cmd.String("transmitters", "", "comma-separated list of transmitters") + payees := cmd.String("payees", "", "comma-separated list of payees") + helpers.ParseArgs(cmd, os.Args[2:], "beacon-address", "transmitters", "payees") + setPayees(e, *beaconAddress, helpers.ParseAddressSlice(*transmitters), helpers.ParseAddressSlice(*payees)) + case "consumer-deploy": + cmd := flag.NewFlagSet("consumer-deploy", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator address") + shouldFail := cmd.Bool("should-fail", false, "shouldFail flag") + beaconPeriodBlocks := cmd.Int64("beacon-period-blocks", 1, "beacon period in number of blocks") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "beacon-period-blocks") + deployVRFBeaconCoordinatorConsumer(e, *coordinatorAddress, *shouldFail, big.NewInt(*beaconPeriodBlocks)) + case "consumer-request-randomness": + cmd := flag.NewFlagSet("consumer-request-randomness", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF coordinator consumer address") + numWords := cmd.Uint("num-words", 1, "number of words to request") + subID := cmd.String("sub-id", "", "subscription ID") + confDelay := cmd.Int64("conf-delay", 1, "confirmation delay") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address", "sub-id") + requestRandomnessFromConsumer(e, *consumerAddress, uint16(*numWords), decimal.RequireFromString(*subID).BigInt(), big.NewInt(*confDelay)) + case "consumer-redeem-randomness": + cmd := flag.NewFlagSet("consumer-redeem-randomness", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF coordinator consumer address") + subID := cmd.String("sub-id", "", "subscription ID") + requestID := cmd.String("request-id", "0", "request ID") + numWords := cmd.Int64("num-words", 1, "number of words to print after redeeming") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address", "request-id") + reqIdInt := decimal.RequireFromString(*requestID).BigInt() + redeemRandomnessFromConsumer(e, *consumerAddress, decimal.RequireFromString(*subID).BigInt(), reqIdInt, *numWords) + case "consumer-request-callback": + cmd := flag.NewFlagSet("consumer-request-callback", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF coordinator consumer address") + numWords := cmd.Uint("num-words", 1, "number of words to request") + subID := cmd.String("sub-id", "", "subscription ID") + confDelay := cmd.Int64("conf-delay", 1, "confirmation delay") + callbackGasLimit := cmd.Uint("cb-gas-limit", 100_000, "callback gas limit") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + requestRandomnessCallback( + e, + *consumerAddress, + uint16(*numWords), + decimal.RequireFromString(*subID).BigInt(), + big.NewInt(*confDelay), + uint32(*callbackGasLimit), + nil, // test consumer doesn't use any args + ) + case "consumer-read-randomness": + cmd := flag.NewFlagSet("consumer-read-randomness", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF coordinator consumer address") + requestID := cmd.String("request-id", "", "VRF request ID") + numWords := cmd.Int("num-words", 1, "number of words to fetch") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + readRandomness(e, *consumerAddress, decimal.RequireFromString(*requestID).BigInt(), *numWords) + case "consumer-request-callback-batch": + cmd := flag.NewFlagSet("consumer-request-callback", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF beacon consumer address") + numWords := cmd.Uint("num-words", 1, "number of words to request") + subID := cmd.String("sub-id", "", "subscription ID") + confDelay := cmd.Int64("conf-delay", 1, "confirmation delay") + batchSize := cmd.Int64("batch-size", 1, "batch size") + callbackGasLimit := cmd.Uint("cb-gas-limit", 200_000, "callback gas limit") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + requestRandomnessCallbackBatch( + e, + *consumerAddress, + uint16(*numWords), + decimal.RequireFromString(*subID).BigInt(), + big.NewInt(*confDelay), + uint32(*callbackGasLimit), + nil, // test consumer doesn't use any args, + big.NewInt(*batchSize), + ) + case "consumer-request-callback-batch-load-test": + cmd := flag.NewFlagSet("consumer-request-callback-load-test", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "VRF beacon batch consumer address") + numWords := cmd.Uint("num-words", 1, "number of words to request") + subID := cmd.String("sub-id", "", "subscription ID") + confDelay := cmd.Int64("conf-delay", 1, "confirmation delay") + batchSize := cmd.Int64("batch-size", 1, "batch size") + batchCount := cmd.Int64("batch-count", 1, "number of batches to run") + callbackGasLimit := cmd.Uint("cb-gas-limit", 200_000, "callback gas limit") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + + for i := int64(0); i < *batchCount; i++ { + requestRandomnessCallbackBatch( + e, + *consumerAddress, + uint16(*numWords), + decimal.RequireFromString(*subID).BigInt(), + big.NewInt(*confDelay), + uint32(*callbackGasLimit), + nil, // test consumer doesn't use any args, + big.NewInt(*batchSize), + ) + } + case "deploy-load-test-consumer": + cmd := flag.NewFlagSet("deploy-load-test-consumer", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator address") + beaconPeriodBlocks := cmd.Int64("beacon-period-blocks", 1, "beacon period in number of blocks") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "beacon-period-blocks") + + deployLoadTestVRFBeaconCoordinatorConsumer(e, *coordinatorAddress, false, big.NewInt(*beaconPeriodBlocks)) + case "get-load-test-results": + cmd := flag.NewFlagSet("get-load-test-results", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "Load test contract address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + + printLoadtestResults(e, *consumerAddress) + case "verify-beacon-randomness": + cmd := flag.NewFlagSet("verify-randomness", flag.ExitOnError) + dkgAddress := cmd.String("dkg-address", "", "DKG contract address") + beaconAddress := cmd.String("beacon-address", "", "VRF beacon contract address") + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator contract address") + height := cmd.Uint64("height", 0, "block height of VRF beacon output") + confDelay := cmd.Uint64("conf-delay", 1, "confirmation delay of VRF beacon output") + searchWindow := cmd.Uint64("search-window", 200, "search space size for beacon transmission. Number of blocks after beacon height") + helpers.ParseArgs(cmd, os.Args[2:], "dkg-address", "coordinator-address", "beacon-address", "height", "conf-delay") + + verifyBeaconRandomness(e, *dkgAddress, *beaconAddress, *coordinatorAddress, *height, *confDelay, *searchWindow) + case "dkg-setup": + setupDKGNodes(e) + case "ocr2vrf-setup": + setupOCR2VRFNodes(e) + case "ocr2vrf-setup-infra-forwarder": + setupOCR2VRFNodesForInfraWithForwarder(e) + case "ocr2vrf-fund-nodes": + fundOCR2VRFNodes(e) + default: + panic("unrecognized subcommand: " + os.Args[1]) + } +} diff --git a/core/scripts/ocr2vrf/readiness/main.go b/core/scripts/ocr2vrf/readiness/main.go new file mode 100644 index 00000000..cdcc6bbd --- /dev/null +++ b/core/scripts/ocr2vrf/readiness/main.go @@ -0,0 +1,214 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "strings" + + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + clcmd "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func newApp(remoteNodeURL string, writer io.Writer) (*clcmd.Shell, *cli.App) { + prompter := clcmd.NewTerminalPrompter() + client := &clcmd.Shell{ + Renderer: clcmd.RendererJSON{Writer: writer}, + AppFactory: clcmd.PluginAppFactory{}, + KeyStoreAuthenticator: clcmd.TerminalKeyStoreAuthenticator{Prompter: prompter}, + FallbackAPIInitializer: clcmd.NewPromptingAPIInitializer(prompter), + Runner: clcmd.PluginRunner{}, + PromptingSessionRequestBuilder: clcmd.NewPromptingSessionRequestBuilder(prompter), + ChangePasswordPrompter: clcmd.NewChangePasswordPrompter(), + PasswordPrompter: clcmd.NewPasswordPrompter(), + } + app := clcmd.NewApp(client) + fs := flag.NewFlagSet("blah", flag.ContinueOnError) + fs.Bool("json", true, "") + fs.String("remote-node-url", remoteNodeURL, "") + helpers.PanicErr(app.Before(cli.NewContext(nil, fs, nil))) + // overwrite renderer since it's set to stdout after Before() is called + client.Renderer = clcmd.RendererJSON{Writer: writer} + return client, app +} + +var ( + remoteNodeURLs = flag.String("remote-node-urls", "", "remote node URL") + checkMarkEmoji = "✅" + xEmoji = "❌" + infoEmoji = "ℹ️" +) + +type ocr2Bundle struct { + ID string `json:"id"` + ChainType string `json:"chainType"` + OnchainPublicKey string `json:"onchainPublicKey"` + OffchainPublicKey string `json:"offchainPublicKey"` + ConfigPublicKey string `json:"configPublicKey"` +} + +func main() { + flag.Parse() + + if remoteNodeURLs == nil { + fmt.Println("flag -remote-node-urls required") + os.Exit(1) + } + + urls := strings.Split(*remoteNodeURLs, ",") + var ( + allDKGSignKeys []string + allDKGEncryptKeys []string + allOCR2KeyIDs []string + allOCR2OffchainPubkeys []string + allOCR2OnchainPubkeys []string + allOCR2ConfigPubkeys []string + allETHKeys []string + allPeerIDs []string + ) + for _, remoteNodeURL := range urls { + output := &bytes.Buffer{} + client, app := newApp(remoteNodeURL, output) + + // login first to establish the session + fmt.Println("logging in to:", remoteNodeURL) + loginFs := flag.NewFlagSet("test", flag.ContinueOnError) + loginFs.String("file", "", "") + loginFs.Bool("bypass-version-check", true, "") + loginCtx := cli.NewContext(app, loginFs, nil) + err := client.RemoteLogin(loginCtx) + helpers.PanicErr(err) + output.Reset() + fmt.Println() + + // check for DKG signing keys + err = clcmd.NewDKGSignKeysClient(client).ListKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var dkgSignKeys []presenters.DKGSignKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), &dkgSignKeys)) + switch len(dkgSignKeys) { + case 1: + fmt.Println(checkMarkEmoji, "found 1 DKG sign key on", remoteNodeURL) + case 0: + fmt.Println(xEmoji, "did not find any DKG sign keys on", remoteNodeURL, ", please create one") + default: + fmt.Println(infoEmoji, "found more than 1 DKG sign key on", remoteNodeURL, ", consider removing all but one") + } + output.Reset() + fmt.Println() + + // check for DKG encryption keys + err = clcmd.NewDKGEncryptKeysClient(client).ListKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var dkgEncryptKeys []presenters.DKGEncryptKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), &dkgEncryptKeys)) + switch len(dkgEncryptKeys) { + case 1: + fmt.Println(checkMarkEmoji, "found 1 DKG encrypt key on", remoteNodeURL) + case 0: + fmt.Println(xEmoji, "did not find any DKG encrypt keys on", remoteNodeURL, ", please create one") + default: + fmt.Println(infoEmoji, "found more than 1 DKG encrypt key on", remoteNodeURL, ", consider removing all but one") + } + output.Reset() + fmt.Println() + + // check for OCR2 keys + err = client.ListOCR2KeyBundles(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var ocr2Keys []ocr2Bundle + helpers.PanicErr(json.Unmarshal(output.Bytes(), &ocr2Keys)) + ethBundle := func() *ocr2Bundle { + for _, b := range ocr2Keys { + if b.ChainType == "evm" { + return &b + } + } + return nil + }() + if ethBundle != nil { + fmt.Println(checkMarkEmoji, "found ocr evm key bundle on", remoteNodeURL) + } else { + fmt.Println(xEmoji, "did not find ocr evm key bundle on", remoteNodeURL, ", please create one") + } + output.Reset() + fmt.Println() + + // check for ETH keys + err = client.ListETHKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var ethKeys []presenters.ETHKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), ðKeys)) + switch { + case len(ethKeys) >= 5: + fmt.Println(checkMarkEmoji, "found", len(ethKeys), "eth keys on", remoteNodeURL) + case len(ethKeys) < 5: + fmt.Println(xEmoji, "found only", len(ethKeys), "eth keys on", remoteNodeURL, ", consider creating more") + } + output.Reset() + fmt.Println() + + // check for peer ids + err = client.ListP2PKeys(&cli.Context{ + App: app, + }) + helpers.PanicErr(err) + var p2pKeys []presenters.P2PKeyResource + helpers.PanicErr(json.Unmarshal(output.Bytes(), &p2pKeys)) + switch len(p2pKeys) { + case 1: + fmt.Println(checkMarkEmoji, "found P2P key on", remoteNodeURL) + case 0: + fmt.Println(xEmoji, "no P2P keys found on", remoteNodeURL, ", please create one") + default: + fmt.Println(infoEmoji, "found", len(p2pKeys), "P2P keys on", remoteNodeURL, ", consider removing all but one") + } + output.Reset() + fmt.Println() + + for _, dkgSign := range dkgSignKeys { + allDKGSignKeys = append(allDKGSignKeys, dkgSign.PublicKey) + } + for _, dkgEncrypt := range dkgEncryptKeys { + allDKGEncryptKeys = append(allDKGEncryptKeys, dkgEncrypt.PublicKey) + } + for _, ocr2Bundle := range ocr2Keys { + if ocr2Bundle.ChainType == "evm" { + allOCR2KeyIDs = append(allOCR2KeyIDs, ocr2Bundle.ID) + allOCR2ConfigPubkeys = append(allOCR2ConfigPubkeys, strings.TrimPrefix(ocr2Bundle.ConfigPublicKey, "ocr2cfg_evm_")) + allOCR2OffchainPubkeys = append(allOCR2OffchainPubkeys, strings.TrimPrefix(ocr2Bundle.OffchainPublicKey, "ocr2off_evm_")) + allOCR2OnchainPubkeys = append(allOCR2OnchainPubkeys, strings.TrimPrefix(ocr2Bundle.OnchainPublicKey, "ocr2on_evm_")) + } + } + for _, ethKey := range ethKeys { + allETHKeys = append(allETHKeys, ethKey.Address) + } + for _, peerKey := range p2pKeys { + allPeerIDs = append(allPeerIDs, strings.TrimPrefix(peerKey.PeerID, "p2p_")) + } + } + + fmt.Println("------------- NODE INFORMATION -------------") + fmt.Println("DKG sign keys:", strings.Join(allDKGSignKeys, ",")) + fmt.Println("DKG encrypt keys:", strings.Join(allDKGEncryptKeys, ",")) + fmt.Println("OCR2 key IDs:", strings.Join(allOCR2KeyIDs, ",")) + fmt.Println("OCR2 config public keys:", strings.Join(allOCR2ConfigPubkeys, ",")) + fmt.Println("OCR2 onchain public keys:", strings.Join(allOCR2OnchainPubkeys, ",")) + fmt.Println("OCR2 offchain public keys:", strings.Join(allOCR2OffchainPubkeys, ",")) + fmt.Println("ETH addresses:", strings.Join(allETHKeys, ",")) + fmt.Println("Peer IDs:", strings.Join(allPeerIDs, ",")) +} diff --git a/core/scripts/ocr2vrf/setup_dkg.go b/core/scripts/ocr2vrf/setup_dkg.go new file mode 100644 index 00000000..3b9627d2 --- /dev/null +++ b/core/scripts/ocr2vrf/setup_dkg.go @@ -0,0 +1,101 @@ +package main + +import ( + "flag" + "fmt" + "math/big" + "os" + "strings" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +func setupDKGNodes(e helpers.Environment) { + // Websocket URL & HTTP url required. + wsUrl := os.Getenv("ETH_URL") + httpUrl := os.Getenv("ETH_HTTP_URL") + if len(wsUrl) == 0 || len(httpUrl) == 0 { + fmt.Println("ETH_URL & ETH_HTTP_URL are required for this script.") + os.Exit(1) + } + + cmd := flag.NewFlagSet("dkg-setup", flag.ExitOnError) + keyID := cmd.String("key-id", "aee00d81f822f882b6fe28489822f59ebb21ea95c0ae21d9f67c0239461148fc", "key ID") + apiFile := cmd.String("api", "../../../tools/secrets/apicredentials", "api credentials file") + passwordFile := cmd.String("password", "../../../tools/secrets/password.txt", "password file") + databasePrefix := cmd.String("database-prefix", "postgres://postgres:postgres_password_padded_for_security@localhost:5432/dkg-test", "database prefix") + databaseSuffixes := cmd.String("database-suffixes", "sslmode=disable", "database parameters to be added") + nodeCount := cmd.Int("node-count", 6, "number of nodes") + fundingAmount := cmd.Int64("funding-amount", 10000000000000000, "amount to fund nodes") // .1 ETH + helpers.ParseArgs(cmd, os.Args[2:]) + + if *nodeCount < 6 { + fmt.Println("Node count too low for DKG job, need at least 6.") + os.Exit(1) + } + + //Deploy DKG contract. + // uncomment for faster txs + // e.Owner.GasPrice = e.Owner.GasPrice.Mul(e.Owner.GasPrice, big.NewInt(2)) + dkgAddress := deployDKG(e).String() + + // Initialize dkg-set-config arguments. + onChainPublicKeys := []string{} + offChainPublicKeys := []string{} + configPublicKeys := []string{} + peerIDs := []string{} + transmitters := []string{} + dkgEncrypters := []string{} + dkgSigners := []string{} + + // Iterate through all nodes and create jobs. + for i := 0; i < *nodeCount; i++ { + flagSet := flag.NewFlagSet("run-dkg-job-creation", flag.ExitOnError) + flagSet.String("api", *apiFile, "api file") + flagSet.String("password", *passwordFile, "password file") + flagSet.String("bootstrapPort", fmt.Sprintf("%d", 8000), "port of bootstrap") + flagSet.String("job-type", string(jobTypeDKG), "the job type") + flagSet.String("keyID", *keyID, "") + flagSet.String("contractID", dkgAddress, "the contract address of the DKG") + flagSet.Int64("chainID", e.ChainID, "the chain ID") + flagSet.Bool("dangerWillRobinson", true, "for resetting databases") + flagSet.Bool("isBootstrapper", i == 0, "is first node") + bootstrapperPeerID := "" + if len(peerIDs) != 0 { + bootstrapperPeerID = peerIDs[0] + } + flagSet.String("bootstrapperPeerID", bootstrapperPeerID, "peerID of first node") + + // Setup DKG node. + payload := SetupNode(e, flagSet, i, *databasePrefix, *databaseSuffixes, false, true, wsUrl, httpUrl) + + // Append arguments for dkg-set-config command. + onChainPublicKeys = append(onChainPublicKeys, payload.OnChainPublicKey) + offChainPublicKeys = append(offChainPublicKeys, payload.OffChainPublicKey) + configPublicKeys = append(configPublicKeys, payload.ConfigPublicKey) + peerIDs = append(peerIDs, payload.PeerID) + transmitters = append(transmitters, payload.Transmitter) + dkgEncrypters = append(dkgEncrypters, payload.DkgEncrypt) + dkgSigners = append(dkgSigners, payload.DkgSign) + } + + // Fund transmitters with funding amount. + helpers.FundNodes(e, transmitters, big.NewInt(*fundingAmount)) + + // Construct and print dkg-set-config command. + fmt.Println("Generated setConfig Command:") + command := fmt.Sprintf( + "go run . dkg-set-config --dkg-address %s -key-id %s -onchain-pub-keys %s -offchain-pub-keys %s -config-pub-keys %s -peer-ids %s -transmitters %s -dkg-encryption-pub-keys %s -dkg-signing-pub-keys %s -schedule 1,1,1,1,1", + dkgAddress, + *keyID, + strings.Join(onChainPublicKeys[1:], ","), + strings.Join(offChainPublicKeys[1:], ","), + strings.Join(configPublicKeys[1:], ","), + strings.Join(peerIDs[1:], ","), + strings.Join(transmitters[1:], ","), + strings.Join(dkgEncrypters[1:], ","), + strings.Join(dkgSigners[1:], ","), + ) + + fmt.Println(command) +} diff --git a/core/scripts/ocr2vrf/setup_ocr2vrf.go b/core/scripts/ocr2vrf/setup_ocr2vrf.go new file mode 100644 index 00000000..876dc40e --- /dev/null +++ b/core/scripts/ocr2vrf/setup_ocr2vrf.go @@ -0,0 +1,573 @@ +package main + +import ( + "flag" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/shopspring/decimal" + "github.com/urfave/cli" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/cmd" +) + +type jobType string + +const ( + jobTypeDKG jobType = "DKG" + jobTypeOCR2VRF jobType = "OCR2VRF" +) + +func fundOCR2VRFNodes(e helpers.Environment) { + fs := flag.NewFlagSet("ocr2vrf-setup", flag.ExitOnError) + ethSendingKeysString := fs.String("eth-sending-keys", "", "eth sending keys") + fundingAmount := fs.String("funding-amount", "1e18", "funding amount in wei. can use scientific notation, e.g 10e18 for 10 ether") // 1 ETH + helpers.ParseArgs(fs, os.Args[2:], "eth-sending-keys") + + flatSendingKeys := strings.Split(*ethSendingKeysString, ",") + helpers.FundNodes(e, flatSendingKeys, decimal.RequireFromString(*fundingAmount).BigInt()) +} + +func setupOCR2VRFNodes(e helpers.Environment) { + // Websocket URL & HTTP url required. + wsUrl := os.Getenv("ETH_URL") + httpUrl := os.Getenv("ETH_HTTP_URL") + if len(wsUrl) == 0 || len(httpUrl) == 0 { + fmt.Println("ETH_URL & ETH_HTTP_URL are required for this script.") + os.Exit(1) + } + + fs := flag.NewFlagSet("ocr2vrf-setup", flag.ExitOnError) + + keyID := fs.String("key-id", "aee00d81f822f882b6fe28489822f59ebb21ea95c0ae21d9f67c0239461148fc", "key ID") + linkAddress := fs.String("link-address", "", "PLI token address") + linkEthFeed := fs.String("link-eth-feed", "", "PLI-ETH feed address") + useForwarder := fs.Bool("use-forwarder", false, "boolean to use the forwarder") + confDelays := fs.String("conf-delays", "1,2,3,4,5,6,7,8", "8 confirmation delays") + weiPerUnitLink := fs.String("wei-per-unit-link", "6e16", "wei per unit link price for feed") + beaconPeriodBlocks := fs.Int64("beacon-period-blocks", 3, "beacon period in blocks") + subscriptionBalanceString := fs.String("subscription-balance", "1e19", "amount to fund subscription") + maxCallbackGasLimit := fs.Uint("max-cb-gas-limit", 2.5e6, "max callback gas limit") + maxCallbackArgumentsLength := fs.Uint("max-cb-args-length", 32*10 /* 10 EVM words */, "max callback arguments length") + + apiFile := fs.String("api", "../../../tools/secrets/apicredentials", "api credentials file") + passwordFile := fs.String("password", "../../../tools/secrets/password.txt", "password file") + databasePrefix := fs.String("database-prefix", "postgres://postgres:postgres_password_padded_for_security@localhost:5432/ocr2vrf-test", "database prefix") + databaseSuffixes := fs.String("database-suffixes", "sslmode=disable", "database parameters to be added") + nodeCount := fs.Int("node-count", 6, "number of nodes") + fundingAmount := fs.Int64("funding-amount", 1e17, "amount to fund nodes") // .1 ETH + resetDatabase := fs.Bool("reset-database", true, "boolean to reset database") + + helpers.ParseArgs(fs, os.Args[2:]) + + if *nodeCount < 6 { + fmt.Println("Node count too low for OCR2VRF job, need at least 6.") + os.Exit(1) + } + + delays := helpers.ParseIntSlice(*confDelays) + if len(delays) != 8 { + fmt.Println("confDelays must have a length of 8") + os.Exit(1) + } + + var link common.Address + if *linkAddress == "" { + link = helpers.DeployLinkToken(e) + } else { + link = common.HexToAddress(*linkAddress) + } + + // Deploy DKG and VRF contracts, and add VRF + // as a consumer of DKG events. + fmt.Println("Deploying DKG contract...") + dkgAddress := deployDKG(e) + + // Deploy a new feed if needed + var feedAddress common.Address + if *linkEthFeed == "" { + fmt.Println("Deploying PLI-ETH feed...") + feedAddress = helpers.DeployLinkEthFeed(e, *linkAddress, decimal.RequireFromString(*weiPerUnitLink).BigInt()) + } else { + feedAddress = common.HexToAddress(*linkEthFeed) + } + + fmt.Println("Deploying VRF coordinator...") + vrfCoordinatorAddress, vrfCoordinator := deployVRFCoordinator(e, big.NewInt(*beaconPeriodBlocks), link.String(), feedAddress.String()) + + fmt.Println("Configuring VRF coordinator...") + configureVRFCoordinator(e, vrfCoordinator, uint32(*maxCallbackGasLimit), uint32(*maxCallbackArgumentsLength)) + + fmt.Println("Deploying VRF beacon...") + vrfBeaconAddress := deployVRFBeacon(e, vrfCoordinatorAddress.String(), link.String(), dkgAddress.String(), *keyID) + + fmt.Println("Adding VRF Beacon as DKG client...") + addClientToDKG(e, dkgAddress.String(), *keyID, vrfBeaconAddress.String()) + + fmt.Println("Adding VRF Beacon as producer in VRF Coordinator") + setProducer(e, vrfCoordinatorAddress.String(), vrfBeaconAddress.String()) + + fmt.Println("Deploying beacon consumer...") + consumerAddress := deployVRFBeaconCoordinatorConsumer(e, vrfCoordinatorAddress.String(), false, big.NewInt(*beaconPeriodBlocks)) + + fmt.Println("Creating subscription...") + createSubscription(e, vrfCoordinatorAddress.String()) + subID := findSubscriptionID(e, vrfCoordinatorAddress.String()) + + fmt.Println("Adding consumer to subscription...") + addConsumer(e, vrfCoordinatorAddress.String(), consumerAddress.String(), subID) + + subscriptionBalance := decimal.RequireFromString(*subscriptionBalanceString).BigInt() + if subscriptionBalance.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with", subscriptionBalance, "juels...") + eoaFundSubscription(e, vrfCoordinatorAddress.String(), link.String(), subscriptionBalance, subID) + } else { + fmt.Println("Subscription", subID, "NOT getting funded. You must fund the subscription in order to use it!") + } + + var forwarderAddresses []common.Address + var forwarderAddressesStrings []string + // If using the forwarder, set up a forwarder for each node. + if *useForwarder { + fmt.Println("Deploying transaction forwarders...") + for i := 0; i < *nodeCount-1; i++ { + // Deploy an authorized forwarder, and add it to the list of forwarders. + f := deployAuthorizedForwarder(e, link, e.Owner.From) + forwarderAddresses = append(forwarderAddresses, f) + forwarderAddressesStrings = append(forwarderAddressesStrings, f.String()) + } + fmt.Printf("ForwarderAddresses : %v", forwarderAddressesStrings) + } + + fmt.Println("Deploying batch beacon consumer...") + loadTestConsumerAddress := deployLoadTestVRFBeaconCoordinatorConsumer(e, vrfCoordinatorAddress.String(), false, big.NewInt(*beaconPeriodBlocks)) + addConsumer(e, vrfCoordinatorAddress.String(), loadTestConsumerAddress.String(), subID) + + fmt.Println("Configuring nodes with OCR2VRF jobs...") + var ( + onChainPublicKeys []string + offChainPublicKeys []string + configPublicKeys []string + peerIDs []string + transmitters []string + dkgEncrypters []string + dkgSigners []string + sendingKeys [][]string + ) + + for i := 0; i < *nodeCount; i++ { + flagSet := flag.NewFlagSet("run-ocr2vrf-job-creation", flag.ExitOnError) + flagSet.String("api", *apiFile, "api file") + flagSet.String("password", *passwordFile, "password file") + flagSet.String("vrfpassword", *passwordFile, "vrf password file") + flagSet.String("bootstrapPort", fmt.Sprintf("%d", 8000), "port of bootstrap") + flagSet.Int64("chainID", e.ChainID, "the chain ID") + flagSet.Bool("applyInitServerConfig", true, "override for using initServerConfig in App.Before") + + flagSet.String("job-type", string(jobTypeOCR2VRF), "the job type") + + // used by bootstrap template instantiation + flagSet.String("contractID", dkgAddress.String(), "the contract to get peers from") + + // DKG args + flagSet.String("keyID", *keyID, "") + flagSet.String("dkg-address", dkgAddress.String(), "the contract address of the DKG") + + // VRF args + flagSet.String("vrf-beacon-address", vrfBeaconAddress.String(), "the contract address of the VRF Beacon") + flagSet.String("vrf-coordinator-address", vrfCoordinatorAddress.String(), "the contract address of the VRF Coordinator") + flagSet.String("link-eth-feed-address", feedAddress.Hex(), "link eth feed address") + + // Apply forwarder args if using the forwarder. + if i > 0 && *useForwarder { + flagSet.Bool("use-forwarder", *useForwarder, "use a transaction forwarder") + flagSet.String("forwarder-address", forwarderAddressesStrings[i-1], "transaction forwarder address") + } + + flagSet.Bool("dangerWillRobinson", *resetDatabase, "for resetting databases") + flagSet.Bool("isBootstrapper", i == 0, "is first node") + bootstrapperPeerID := "" + if len(peerIDs) != 0 { + bootstrapperPeerID = peerIDs[0] + } + flagSet.String("bootstrapperPeerID", bootstrapperPeerID, "peerID of first node") + + payload := SetupNode(e, flagSet, i, *databasePrefix, *databaseSuffixes, *useForwarder, *resetDatabase, wsUrl, httpUrl) + + onChainPublicKeys = append(onChainPublicKeys, payload.OnChainPublicKey) + offChainPublicKeys = append(offChainPublicKeys, payload.OffChainPublicKey) + configPublicKeys = append(configPublicKeys, payload.ConfigPublicKey) + peerIDs = append(peerIDs, payload.PeerID) + transmitters = append(transmitters, payload.Transmitter) + dkgEncrypters = append(dkgEncrypters, payload.DkgEncrypt) + dkgSigners = append(dkgSigners, payload.DkgSign) + sendingKeys = append(sendingKeys, payload.SendingKeys) + } + + var nodesToFund []string + + // If using the forwarder, set up a forwarder for each node. + if *useForwarder { + fmt.Println("Setting authorized senders...") + for i, f := range forwarderAddresses { + + // Convert the sending strings for a transmitter to addresses. + var sendinKeysAddresses []common.Address + sendingKeysStrings := sendingKeys[i+1] + for _, s := range sendingKeysStrings { + sendinKeysAddresses = append(sendinKeysAddresses, common.HexToAddress(s)) + } + + // Set authorized senders for the corresponding forwarder. + setAuthorizedSenders(e, f, sendinKeysAddresses) + + // Fund the sending keys. + nodesToFund = append(nodesToFund, sendingKeysStrings...) + + // Set the authorized forwarder as the OCR transmitter. + transmitters[i+1] = f.String() + } + } else { + nodesToFund = append(nodesToFund, transmitters[1:]...) + } + + var payees []common.Address + var reportTransmitters []common.Address // all transmitters excluding bootstrap + for _, t := range transmitters[1:] { + payees = append(payees, e.Owner.From) + reportTransmitters = append(reportTransmitters, common.HexToAddress(t)) + } + + fmt.Printf("Setting EOA: %s as payee for transmitters: %v \n", e.Owner.From, reportTransmitters) + setPayees(e, vrfBeaconAddress.String(), reportTransmitters, payees) + + fmt.Println("Funding transmitters...") + helpers.FundNodes(e, nodesToFund, big.NewInt(*fundingAmount)) + + printStandardCommands( + dkgAddress, + vrfBeaconAddress, + consumerAddress, + loadTestConsumerAddress, + keyID, + confDelays, + onChainPublicKeys[1:], + offChainPublicKeys[1:], + configPublicKeys[1:], + peerIDs[1:], + transmitters[1:], + dkgEncrypters[1:], + dkgSigners[1:], + subID.String(), + ) +} + +func setupOCR2VRFNodesForInfraWithForwarder(e helpers.Environment) { + fs := flag.NewFlagSet("ocr2vrf-setup-infra", flag.ExitOnError) + + keyID := fs.String("key-id", "aee00d81f822f882b6fe28489822f59ebb21ea95c0ae21d9f67c0239461148fc", "key ID") + linkAddress := fs.String("link-address", "", "PLI token address") + linkEthFeed := fs.String("link-eth-feed", "", "PLI-ETH feed address") + confDelays := fs.String("conf-delays", "1,2,3,4,5,6,7,8", "8 confirmation delays") + beaconPeriodBlocks := fs.Int64("beacon-period-blocks", 3, "beacon period in blocks") + subscriptionBalanceString := fs.String("subscription-balance", "1e19", "amount to fund subscription") + + peerIDsString := fs.String("peer-ids", "", "peer ids") + dkgSignersString := fs.String("dkg-sign-keys", "", "dkg signing keys") + dkgEncryptersString := fs.String("dkg-encrypt-keys", "", "dkg encrypt keys") + ocr2KeyBundleIDsString := fs.String("key-bundle-ids", "", "ocr2 key bundle ids") + onChainPublicKeysString := fs.String("on-chain-public-keys", "", "ocr2 on-chain public keys") + offChainPublicKeysString := fs.String("off-chain-public-keys", "", "ocr2 off-chain public keys") + configPublicKeysString := fs.String("config-public-keys", "", "ocr2 config public keys") + ethSendingKeysString := fs.String("eth-sending-keys", "", "eth sending keys") + + nodeCount := fs.Int("node-count", 6, "number of nodes") + fundingAmount := fs.Int64("funding-amount", 1e17, "amount to fund nodes") // .1 ETH + + helpers.ParseArgs( + fs, + os.Args[2:], + "link-address", + "link-eth-feed", + "dkg-sign-keys", + "dkg-encrypt-keys", + "key-bundle-ids", + "on-chain-public-keys", + "off-chain-public-keys", + "config-public-keys", + "eth-sending-keys", + "peer-ids", + ) + + peerIDs := strings.Split(*peerIDsString, ",") + dkgSigners := strings.Split(*dkgSignersString, ",") + dkgEncrypters := strings.Split(*dkgEncryptersString, ",") + ocr2KeyBundleIDs := strings.Split(*ocr2KeyBundleIDsString, ",") + onChainPublicKeys := strings.Split(*onChainPublicKeysString, ",") + offChainPublicKeys := strings.Split(*offChainPublicKeysString, ",") + configPublicKeys := strings.Split(*configPublicKeysString, ",") + + var sendingKeys [][]string + flatSendingKeys := strings.Split(*ethSendingKeysString, ",") + sendingKeysPerNode := len(flatSendingKeys) / (*nodeCount - 1) + for i := 0; i < *nodeCount-1; i++ { + sendingKeys = append(sendingKeys, flatSendingKeys[i*sendingKeysPerNode:i*sendingKeysPerNode+sendingKeysPerNode]) + } + + if *nodeCount < 6 { + fmt.Println("Node count too low for OCR2VRF job, need at least 6.") + os.Exit(1) + } + + delays := helpers.ParseIntSlice(*confDelays) + if len(delays) != 8 { + fmt.Println("confDelays must have a length of 8") + os.Exit(1) + } + + link := common.HexToAddress(*linkAddress) + feedAddress := common.HexToAddress(*linkEthFeed) + + // Deploy DKG and VRF contracts, and add VRF + // as a consumer of DKG events. + fmt.Println("Deploying DKG contract...") + dkgAddress := deployDKG(e) + + fmt.Println("Deploying VRF coordinator...") + vrfCoordinatorAddress, _ := deployVRFCoordinator(e, big.NewInt(*beaconPeriodBlocks), link.String(), feedAddress.String()) + + fmt.Println("Deploying VRF beacon...") + vrfBeaconAddress := deployVRFBeacon(e, vrfCoordinatorAddress.String(), link.String(), dkgAddress.String(), *keyID) + + fmt.Println("Adding VRF Beacon as DKG client...") + addClientToDKG(e, dkgAddress.String(), *keyID, vrfBeaconAddress.String()) + + fmt.Println("Adding VRF Beacon as producer in VRF Coordinator") + setProducer(e, vrfCoordinatorAddress.String(), vrfBeaconAddress.String()) + + fmt.Println("Deploying beacon consumer...") + consumerAddress := deployVRFBeaconCoordinatorConsumer(e, vrfCoordinatorAddress.String(), false, big.NewInt(*beaconPeriodBlocks)) + + fmt.Println("Creating subscription...") + createSubscription(e, vrfCoordinatorAddress.String()) + + subID := findSubscriptionID(e, vrfCoordinatorAddress.String()) + + fmt.Println("Adding consumer to subscription...") + addConsumer(e, vrfCoordinatorAddress.String(), consumerAddress.String(), subID) + + subscriptionBalance := decimal.RequireFromString(*subscriptionBalanceString).BigInt() + if subscriptionBalance.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with", subscriptionBalance, "juels...") + eoaFundSubscription(e, vrfCoordinatorAddress.String(), link.String(), subscriptionBalance, subID) + } else { + fmt.Println("Subscription", subID, "NOT getting funded. You must fund the subscription in order to use it!") + } + + var forwarderAddresses []common.Address + var forwarderAddressesStrings []string + var nodesToFund []string + var transmitters []string + + // Set up a forwarder for each node. + fmt.Println("Deploying transaction forwarders...") + for i := 0; i < *nodeCount-1; i++ { + // Deploy an authorized forwarder, and add it to the list of forwarders. + f := deployAuthorizedForwarder(e, link, e.Owner.From) + forwarderAddresses = append(forwarderAddresses, f) + forwarderAddressesStrings = append(forwarderAddressesStrings, f.String()) + } + + // Assign the sending keys to the deployed forwarders. + fmt.Printf("ForwarderAddresses : %v\n", forwarderAddressesStrings) + for i, f := range forwarderAddresses { + // Convert the sending strings for a transmitter to addresses. + var sendinKeysAddresses []common.Address + sendingKeysStrings := sendingKeys[i] + for _, s := range sendingKeysStrings { + sendinKeysAddresses = append(sendinKeysAddresses, common.HexToAddress(s)) + } + + // Set authorized senders for the corresponding forwarder. + setAuthorizedSenders(e, f, sendinKeysAddresses) + + // Fund the sending keys. + nodesToFund = append(nodesToFund, sendingKeysStrings...) + + // Set the authorized forwarder as the OCR transmitter. + transmitters = append(transmitters, f.String()) + } + + var payees []common.Address + var reportTransmitters []common.Address + for _, t := range transmitters { + payees = append(payees, e.Owner.From) + reportTransmitters = append(reportTransmitters, common.HexToAddress(t)) + } + + fmt.Printf("Setting EOA: %s as payee for transmitters: %v \n", e.Owner.From, reportTransmitters) + setPayees(e, vrfBeaconAddress.String(), reportTransmitters, payees) + + fmt.Println("Funding transmitters...") + helpers.FundNodes(e, nodesToFund, big.NewInt(*fundingAmount)) + + fmt.Println("Deploying batch beacon consumer...") + loadTestConsumerAddress := deployLoadTestVRFBeaconCoordinatorConsumer(e, vrfCoordinatorAddress.String(), false, big.NewInt(*beaconPeriodBlocks)) + addConsumer(e, vrfCoordinatorAddress.String(), loadTestConsumerAddress.String(), subID) + + for i := 0; i < *nodeCount; i++ { + // Apply forwarder args if using the forwarder. + if i > 0 { + adjustedIndex := i - 1 + vrfJob := fmt.Sprintf( + cmd.OCR2VRFTemplate, + e.ChainID, + vrfBeaconAddress.String(), + ocr2KeyBundleIDs[adjustedIndex], + forwarderAddresses[adjustedIndex].String(), + true, // forwardingAllowed + "", // P2P Bootstrapper + e.ChainID, + sendingKeys[adjustedIndex], + dkgEncrypters[adjustedIndex], + dkgSigners[adjustedIndex], + *keyID, + dkgAddress.String(), + vrfCoordinatorAddress.String(), + *linkEthFeed, + ) + fmt.Printf("VRF JOB FOR NODE %d:\n%v\n", i-1, vrfJob) // zero-based index to match infra. + } else { + bootstrapJob := fmt.Sprintf(cmd.BootstrapTemplate, e.ChainID, dkgAddress.String(), e.ChainID) + fmt.Printf("VRF BOOTSTRAP JOB:\n%v\n", bootstrapJob) + } + } + printStandardCommands( + dkgAddress, + vrfBeaconAddress, + consumerAddress, + loadTestConsumerAddress, + keyID, + confDelays, + onChainPublicKeys, + offChainPublicKeys, + configPublicKeys, + peerIDs, + transmitters, + dkgEncrypters, + dkgSigners, + subID.String(), + ) +} + +func printStandardCommands( + dkgAddress common.Address, + vrfBeaconAddress common.Address, + consumerAddress common.Address, + loadTestConsumerAddress common.Address, + keyID *string, + confDelays *string, + onChainPublicKeys []string, + offChainPublicKeys []string, + configPublicKeys []string, + peerIDs []string, + transmitters []string, + dkgEncrypters []string, + dkgSigners []string, + subID string, +) { + fmt.Println("Generated dkg setConfig command:") + dkgCommand := fmt.Sprintf( + "go run . dkg-set-config -dkg-address %s -key-id %s -onchain-pub-keys %s -offchain-pub-keys %s -config-pub-keys %s -peer-ids %s -transmitters %s -dkg-encryption-pub-keys %s -dkg-signing-pub-keys %s -schedule 1,1,1,1,1", + dkgAddress.String(), + *keyID, + strings.Join(onChainPublicKeys, ","), + strings.Join(offChainPublicKeys, ","), + strings.Join(configPublicKeys, ","), + strings.Join(peerIDs, ","), + strings.Join(transmitters, ","), + strings.Join(dkgEncrypters, ","), + strings.Join(dkgSigners, ","), + ) + fmt.Println(dkgCommand) + + fmt.Println() + fmt.Println("Generated vrf setConfig command:") + vrfCommand := fmt.Sprintf( + "go run . beacon-set-config -beacon-address %s -conf-delays %s -onchain-pub-keys %s -offchain-pub-keys %s -config-pub-keys %s -peer-ids %s -transmitters %s -schedule 1,1,1,1,1", + vrfBeaconAddress.String(), + *confDelays, + strings.Join(onChainPublicKeys, ","), + strings.Join(offChainPublicKeys, ","), + strings.Join(configPublicKeys, ","), + strings.Join(peerIDs, ","), + strings.Join(transmitters, ","), + ) + fmt.Println(vrfCommand) + + fmt.Println() + fmt.Println("Consumer address:", consumerAddress.String()) + fmt.Println("Consumer request command:") + requestCommand := fmt.Sprintf( + "go run . consumer-request-randomness -consumer-address %s -sub-id %s", + consumerAddress.Hex(), subID) + fmt.Println(requestCommand) + fmt.Println() + + fmt.Println("Consumer callback request command:") + callbackCommand := fmt.Sprintf( + "go run . consumer-request-callback -consumer-address %s -sub-id %s", + consumerAddress.Hex(), subID) + fmt.Println(callbackCommand) + fmt.Println() + + fmt.Println("Consumer callback batch request command:") + callbackCommand = fmt.Sprintf( + "go run . consumer-request-callback-batch -consumer-address %s -sub-id %s -batch-size ", + loadTestConsumerAddress.Hex(), subID) + fmt.Println(callbackCommand) + fmt.Println() + + fmt.Println("Consumer redeem randomness command:") + redeemCommand := fmt.Sprintf( + "go run . consumer-redeem-randomness -consumer-address %s -sub-id %s -request-id ", + consumerAddress.Hex(), subID) + fmt.Println(redeemCommand) + fmt.Println() +} + +func SetupNode( + e helpers.Environment, + flagSet *flag.FlagSet, + nodeIdx int, + databasePrefix, + databaseSuffixes string, + useForwarder bool, + resetDB bool, + wsUrl string, + httpUrl string, +) *cmd.SetupOCR2VRFNodePayload { + configureEnvironmentVariables((useForwarder) && (nodeIdx > 0), e.ChainID, wsUrl, httpUrl, nodeIdx, databasePrefix, databaseSuffixes) + + client := newSetupClient() + app := cmd.NewApp(client) + ctx := cli.NewContext(app, flagSet, nil) + + defer func() { + err := app.After(ctx) + helpers.PanicErr(err) + }() + + err := app.Before(ctx) + helpers.PanicErr(err) + + if resetDB { + resetDatabase(client, ctx) + } + + return setupOCR2VRFNodeFromClient(client, ctx, e) +} diff --git a/core/scripts/ocr2vrf/util.go b/core/scripts/ocr2vrf/util.go new file mode 100644 index 00000000..bccf22de --- /dev/null +++ b/core/scripts/ocr2vrf/util.go @@ -0,0 +1,619 @@ +package main + +import ( + "context" + "crypto/ed25519" + "encoding/hex" + "fmt" + "math/big" + "os" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/urfave/cli" + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/edwards25519" + "go.dedis.ch/kyber/v3/pairing" + + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-vrf/altbn_128" + "github.com/goplugin/plugin-vrf/dkg" + "github.com/goplugin/plugin-vrf/ocr2vrf" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/cmd" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + dkgContract "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/load_test_beacon_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" +) + +var ( + suite pairing.Suite = &altbn_128.PairingSuite{} + g1 = suite.G1() + g2 = suite.G2() + tomlConfigTemplate = ` + [P2P.V2] + ListenAddresses = ["127.0.0.1:8000"] + + [Feature] + LogPoller = true + + [OCR2] + Enabled = true + + [[EVM]] + FinalityDepth = 1 + ChainID = '%d' + + [EVM.Transactions] + ForwardersEnabled = %t + + [EVM.HeadTracker] + HistoryDepth = 1 + + [[EVM.Nodes]] + Name = "chain1" + HTTPURL = "%s" + WSURL = "%s" + ` +) + +func deployDKG(e helpers.Environment) common.Address { + _, tx, _, err := dkgContract.DeployDKG(e.Owner, e.Ec) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func deployVRFCoordinator(e helpers.Environment, beaconPeriodBlocks *big.Int, linkAddress, linkEthFeed string) (common.Address, *vrf_coordinator.VRFCoordinator) { + _, tx, coordinator, err := vrf_coordinator.DeployVRFCoordinator( + e.Owner, + e.Ec, + beaconPeriodBlocks, + common.HexToAddress(linkAddress), + ) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID), coordinator +} + +func configureVRFCoordinator(e helpers.Environment, coordinator *vrf_coordinator.VRFCoordinator, maxCbGasLimit, maxCbArgsLen uint32) *gethtypes.Receipt { + tx, err := coordinator.SetCallbackConfig(e.Owner, vrf_coordinator.VRFCoordinatorCallbackConfig{ + MaxCallbackGasLimit: maxCbGasLimit, + MaxCallbackArgumentsLength: maxCbArgsLen, + }) + helpers.PanicErr(err) + return helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "vrf coordinator setConfig") +} + +func deployAuthorizedForwarder(e helpers.Environment, link common.Address, owner common.Address) common.Address { + _, tx, _, err := authorized_forwarder.DeployAuthorizedForwarder(e.Owner, e.Ec, link, owner, common.Address{}, []byte{}) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func setAuthorizedSenders(e helpers.Environment, forwarder common.Address, senders []common.Address) { + f, err := authorized_forwarder.NewAuthorizedForwarder(forwarder, e.Ec) + helpers.PanicErr(err) + tx, err := f.SetAuthorizedSenders(e.Owner, senders) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func deployVRFBeacon(e helpers.Environment, coordinatorAddress, linkAddress, dkgAddress, keyID string) common.Address { + keyIDBytes := decodeHexTo32ByteArray(keyID) + _, tx, _, err := vrf_beacon.DeployVRFBeacon(e.Owner, e.Ec, common.HexToAddress(linkAddress), common.HexToAddress(coordinatorAddress), common.HexToAddress(dkgAddress), keyIDBytes) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func deployVRFBeaconCoordinatorConsumer(e helpers.Environment, coordinatorAddress string, shouldFail bool, beaconPeriodBlocks *big.Int) common.Address { + _, tx, _, err := vrf_beacon_consumer.DeployBeaconVRFConsumer(e.Owner, e.Ec, common.HexToAddress(coordinatorAddress), shouldFail, beaconPeriodBlocks) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func deployLoadTestVRFBeaconCoordinatorConsumer(e helpers.Environment, coordinatorAddress string, shouldFail bool, beaconPeriodBlocks *big.Int) common.Address { + _, tx, _, err := load_test_beacon_consumer.DeployLoadTestBeaconVRFConsumer(e.Owner, e.Ec, common.HexToAddress(coordinatorAddress), shouldFail, beaconPeriodBlocks) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func addClientToDKG(e helpers.Environment, dkgAddress string, keyID string, clientAddress string) { + keyIDBytes := decodeHexTo32ByteArray(keyID) + + dkg, err := dkgContract.NewDKG(common.HexToAddress(dkgAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := dkg.AddClient(e.Owner, keyIDBytes, common.HexToAddress(clientAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func removeClientFromDKG(e helpers.Environment, dkgAddress string, keyID string, clientAddress string) { + keyIDBytes := decodeHexTo32ByteArray(keyID) + + dkg, err := dkgContract.NewDKG(common.HexToAddress(dkgAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := dkg.RemoveClient(e.Owner, keyIDBytes, common.HexToAddress(clientAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func setDKGConfig(e helpers.Environment, dkgAddress string, c dkgSetConfigArgs) { + oracleIdentities := toOraclesIdentityList( + helpers.ParseAddressSlice(c.onchainPubKeys), + strings.Split(c.offchainPubKeys, ","), + strings.Split(c.configPubKeys, ","), + strings.Split(c.peerIDs, ","), + strings.Split(c.transmitters, ",")) + + ed25519Suite := edwards25519.NewBlakeSHA256Ed25519() + var signingKeys []kyber.Point + for _, signingKey := range strings.Split(c.dkgSigningPubKeys, ",") { + signingKeyBytes, err := hex.DecodeString(signingKey) + helpers.PanicErr(err) + signingKeyPoint := ed25519Suite.Point() + helpers.PanicErr(signingKeyPoint.UnmarshalBinary(signingKeyBytes)) + signingKeys = append(signingKeys, signingKeyPoint) + } + + altbn128Suite := &altbn_128.PairingSuite{} + var encryptionKeys []kyber.Point + for _, encryptionKey := range strings.Split(c.dkgEncryptionPubKeys, ",") { + encryptionKeyBytes, err := hex.DecodeString(encryptionKey) + helpers.PanicErr(err) + encryptionKeyPoint := altbn128Suite.G1().Point() + helpers.PanicErr(encryptionKeyPoint.UnmarshalBinary(encryptionKeyBytes)) + encryptionKeys = append(encryptionKeys, encryptionKeyPoint) + } + + keyIDBytes := decodeHexTo32ByteArray(c.keyID) + + offchainConfig, err := dkg.OffchainConfig(encryptionKeys, signingKeys, &altbn_128.G1{}, &ocr2vrftypes.PairingTranslation{ + Suite: &altbn_128.PairingSuite{}, + }) + helpers.PanicErr(err) + onchainConfig, err := dkg.OnchainConfig(dkg.KeyID(keyIDBytes)) + helpers.PanicErr(err) + + fmt.Println("dkg offchain config:", hex.EncodeToString(offchainConfig)) + fmt.Println("dkg onchain config:", hex.EncodeToString(onchainConfig)) + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + c.deltaProgress, + c.deltaResend, + c.deltaRound, + c.deltaGrace, + c.deltaStage, + c.maxRounds, + helpers.ParseIntSlice(c.schedule), + oracleIdentities, + offchainConfig, + c.maxDurationQuery, + c.maxDurationObservation, + c.maxDurationReport, + c.maxDurationAccept, + c.maxDurationTransmit, + int(c.f), + onchainConfig) + + helpers.PanicErr(err) + + dkg := newDKG(common.HexToAddress(dkgAddress), e.Ec) + + tx, err := dkg.SetConfig(e.Owner, helpers.ParseAddressSlice(c.onchainPubKeys), helpers.ParseAddressSlice(c.transmitters), f, onchainConfig, offchainConfigVersion, offchainConfig) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func (c *vrfBeaconSetConfigArgs) setVRFBeaconConfig(e helpers.Environment, vrfBeaconAddr string) { + oracleIdentities := toOraclesIdentityList( + helpers.ParseAddressSlice(c.onchainPubKeys), + strings.Split(c.offchainPubKeys, ","), + strings.Split(c.configPubKeys, ","), + strings.Split(c.peerIDs, ","), + strings.Split(c.transmitters, ",")) + + confDelays := make(map[uint32]struct{}) + for _, c := range strings.Split(c.confDelays, ",") { + confDelay, err := strconv.ParseUint(c, 0, 32) + helpers.PanicErr(err) + confDelays[uint32(confDelay)] = struct{}{} + } + + onchainConfig := ocr2vrf.OnchainConfig(confDelays) + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + c.deltaProgress, + c.deltaResend, + c.deltaRound, + c.deltaGrace, + c.deltaStage, + c.maxRounds, + helpers.ParseIntSlice(c.schedule), + oracleIdentities, + ocr2vrf.OffchainConfig(&c.coordinatorConfig), // off-chain config + c.maxDurationQuery, + c.maxDurationObservation, + c.maxDurationReport, + c.maxDurationAccept, + c.maxDurationTransmit, + int(c.f), + onchainConfig) + + helpers.PanicErr(err) + + beacon := newVRFBeacon(common.HexToAddress(vrfBeaconAddr), e.Ec) + + tx, err := beacon.SetConfig(e.Owner, helpers.ParseAddressSlice(c.onchainPubKeys), helpers.ParseAddressSlice(c.transmitters), f, onchainConfig, offchainConfigVersion, offchainConfig) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func setProducer(e helpers.Environment, vrfCoordinatorAddr, vrfBeaconAddr string) { + coordinator := newVRFCoordinator(common.HexToAddress(vrfCoordinatorAddr), e.Ec) + + tx, err := coordinator.SetProducer(e.Owner, common.HexToAddress(vrfBeaconAddr)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func createSubscription(e helpers.Environment, vrfCoordinatorAddr string) { + coordinator := newVRFCoordinator(common.HexToAddress(vrfCoordinatorAddr), e.Ec) + + tx, err := coordinator.CreateSubscription(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func getSubscription(e helpers.Environment, vrfCoordinatorAddr string, subId *big.Int) vrf_coordinator.GetSubscription { + coordinator := newVRFCoordinator(common.HexToAddress(vrfCoordinatorAddr), e.Ec) + + sub, err := coordinator.GetSubscription(nil, subId) + helpers.PanicErr(err) + return sub +} + +// returns subscription ID that belongs to the given owner. Returns result found first +func findSubscriptionID(e helpers.Environment, vrfCoordinatorAddr string) *big.Int { + // Use most recent 500 blocks as search window. + head, err := e.Ec.BlockNumber(context.Background()) + helpers.PanicErr(err) + fopts := &bind.FilterOpts{ + Start: head - 500, + } + + coordinator := newVRFCoordinator(common.HexToAddress(vrfCoordinatorAddr), e.Ec) + subscriptionIterator, err := coordinator.FilterSubscriptionCreated(fopts, nil, []common.Address{e.Owner.From}) + helpers.PanicErr(err) + + if !subscriptionIterator.Next() { + helpers.PanicErr(fmt.Errorf("expected at leats 1 subID for the given owner %s", e.Owner.From.Hex())) + } + return subscriptionIterator.Event.SubId +} + +func addConsumer(e helpers.Environment, vrfCoordinatorAddr, consumerAddr string, subId *big.Int) { + coordinator := newVRFCoordinator(common.HexToAddress(vrfCoordinatorAddr), e.Ec) + + tx, err := coordinator.AddConsumer(e.Owner, subId, common.HexToAddress(consumerAddr)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func setPayees(e helpers.Environment, vrfBeaconAddr string, transmitters, payees []common.Address) { + beacon := newVRFBeacon(common.HexToAddress(vrfBeaconAddr), e.Ec) + + tx, err := beacon.SetPayees(e.Owner, transmitters, payees) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func eoaFundSubscription(e helpers.Environment, coordinatorAddress, linkAddress string, amount, subID *big.Int) { + linkToken, err := link_token_interface.NewLinkToken(common.HexToAddress(linkAddress), e.Ec) + helpers.PanicErr(err) + bal, err := linkToken.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("Initial account balance:", bal, e.Owner.From.String(), "Funding amount:", amount.String()) + b, err := utils.ABIEncode(`[{"type":"uint256"}]`, subID) + helpers.PanicErr(err) + tx, err := linkToken.TransferAndCall(e.Owner, common.HexToAddress(coordinatorAddress), amount, b) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("sub ID: %d", subID)) +} + +func toOraclesIdentityList(onchainPubKeys []common.Address, offchainPubKeys, configPubKeys, peerIDs, transmitters []string) []confighelper.OracleIdentityExtra { + offchainPubKeysBytes := []types.OffchainPublicKey{} + for _, pkHex := range offchainPubKeys { + pkBytes, err := hex.DecodeString(pkHex) + helpers.PanicErr(err) + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + panic("wrong num elements copied") + } + + offchainPubKeysBytes = append(offchainPubKeysBytes, types.OffchainPublicKey(pkBytesFixed)) + } + + configPubKeysBytes := []types.ConfigEncryptionPublicKey{} + for _, pkHex := range configPubKeys { + pkBytes, err := hex.DecodeString(pkHex) + helpers.PanicErr(err) + + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + panic("wrong num elements copied") + } + + configPubKeysBytes = append(configPubKeysBytes, types.ConfigEncryptionPublicKey(pkBytesFixed)) + } + + o := []confighelper.OracleIdentityExtra{} + for index := range configPubKeys { + o = append(o, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPubKeys[index][:], + OffchainPublicKey: offchainPubKeysBytes[index], + PeerID: peerIDs[index], + TransmitAccount: types.Account(transmitters[index]), + }, + ConfigEncryptionPublicKey: configPubKeysBytes[index], + }) + } + return o +} + +func requestRandomness(e helpers.Environment, coordinatorAddress string, numWords uint16, subID, confDelay *big.Int) { + coordinator := newVRFCoordinator(common.HexToAddress(coordinatorAddress), e.Ec) + + tx, err := coordinator.RequestRandomness(e.Owner, confDelay, numWords, confDelay, nil) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func redeemRandomness(e helpers.Environment, coordinatorAddress string, requestID, subID *big.Int) { + coordinator := newVRFCoordinator(common.HexToAddress(coordinatorAddress), e.Ec) + + tx, err := coordinator.RedeemRandomness(e.Owner, subID, requestID, nil) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func requestRandomnessFromConsumer(e helpers.Environment, consumerAddress string, numWords uint16, subID, confDelay *big.Int) *big.Int { + consumer := newVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + + tx, err := consumer.TestRequestRandomness(e.Owner, numWords, subID, confDelay) + helpers.PanicErr(err) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + + periodBlocks, err := consumer.IBeaconPeriodBlocks(nil) + helpers.PanicErr(err) + + blockNumber := receipt.BlockNumber + periodOffset := new(big.Int).Mod(blockNumber, periodBlocks) + nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset) + + fmt.Println("nextBeaconOutputHeight: ", nextBeaconOutputHeight) + + requestID, err := consumer.SRequestsIDs(nil, nextBeaconOutputHeight, confDelay) + helpers.PanicErr(err) + fmt.Println("requestID: ", requestID) + + return requestID +} + +func readRandomness( + e helpers.Environment, + consumerAddress string, + requestID *big.Int, + numWords int) { + consumer := newVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + for i := 0; i < numWords; i++ { + r, err := consumer.SReceivedRandomnessByRequestID(nil, requestID, big.NewInt(int64(i))) + helpers.PanicErr(err) + fmt.Println("random word", i, ":", r.String()) + } +} + +func requestRandomnessCallback( + e helpers.Environment, + consumerAddress string, + numWords uint16, + subID, confDelay *big.Int, + callbackGasLimit uint32, + args []byte, +) (requestID *big.Int) { + consumer := newVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + + tx, err := consumer.TestRequestRandomnessFulfillment(e.Owner, subID, numWords, confDelay, callbackGasLimit, args) + helpers.PanicErr(err) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "TestRequestRandomnessFulfillment") + + periodBlocks, err := consumer.IBeaconPeriodBlocks(nil) + helpers.PanicErr(err) + + blockNumber := receipt.BlockNumber + periodOffset := new(big.Int).Mod(blockNumber, periodBlocks) + nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset) + + fmt.Println("nextBeaconOutputHeight: ", nextBeaconOutputHeight) + + requestID, err = consumer.SRequestsIDs(nil, nextBeaconOutputHeight, confDelay) + helpers.PanicErr(err) + fmt.Println("requestID: ", requestID) + + return requestID +} + +func redeemRandomnessFromConsumer(e helpers.Environment, consumerAddress string, subID, requestID *big.Int, numWords int64) { + consumer := newVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + + tx, err := consumer.TestRedeemRandomness(e.Owner, subID, requestID) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + + printRandomnessFromConsumer(consumer, requestID, numWords) +} + +func printRandomnessFromConsumer(consumer *vrf_beacon_consumer.BeaconVRFConsumer, requestID *big.Int, numWords int64) { + for i := int64(0); i < numWords; i++ { + randomness, err := consumer.SReceivedRandomnessByRequestID(nil, requestID, big.NewInt(0)) + helpers.PanicErr(err) + fmt.Println("random words index", i, ":", randomness.String()) + } +} + +func newVRFCoordinator(addr common.Address, client *ethclient.Client) *vrf_coordinator.VRFCoordinator { + coordinator, err := vrf_coordinator.NewVRFCoordinator(addr, client) + helpers.PanicErr(err) + return coordinator +} + +func newDKG(addr common.Address, client *ethclient.Client) *dkgContract.DKG { + dkg, err := dkgContract.NewDKG(addr, client) + helpers.PanicErr(err) + return dkg +} + +func newVRFBeaconCoordinatorConsumer(addr common.Address, client *ethclient.Client) *vrf_beacon_consumer.BeaconVRFConsumer { + consumer, err := vrf_beacon_consumer.NewBeaconVRFConsumer(addr, client) + helpers.PanicErr(err) + return consumer +} + +func newLoadTestVRFBeaconCoordinatorConsumer(addr common.Address, client *ethclient.Client) *load_test_beacon_consumer.LoadTestBeaconVRFConsumer { + consumer, err := load_test_beacon_consumer.NewLoadTestBeaconVRFConsumer(addr, client) + helpers.PanicErr(err) + return consumer +} + +func newVRFBeacon(addr common.Address, client *ethclient.Client) *vrf_beacon.VRFBeacon { + beacon, err := vrf_beacon.NewVRFBeacon(addr, client) + helpers.PanicErr(err) + return beacon +} + +func decodeHexTo32ByteArray(val string) (byteArray [32]byte) { + decoded, err := hex.DecodeString(val) + helpers.PanicErr(err) + if len(decoded) != 32 { + panic(fmt.Sprintf("expected value to be 32 bytes but received %d bytes", len(decoded))) + } + copy(byteArray[:], decoded) + return +} + +func setupOCR2VRFNodeFromClient(client *cmd.Shell, context *cli.Context, e helpers.Environment) *cmd.SetupOCR2VRFNodePayload { + payload, err := client.ConfigureOCR2VRFNode(context, e.Owner, e.Ec) + helpers.PanicErr(err) + + return payload +} + +func configureEnvironmentVariables(useForwarder bool, chainID int64, wsUrl string, ethURL string, index int, databasePrefix string, databaseSuffixes string) { + // Set permitted envars for v2. + helpers.PanicErr(os.Setenv("CL_DATABASE_URL", fmt.Sprintf("%s-%d?%s", databasePrefix, index, databaseSuffixes))) + helpers.PanicErr(os.Setenv("CL_CONFIG", fmt.Sprintf(tomlConfigTemplate, chainID, useForwarder, ethURL, wsUrl))) + + // Unset prohibited envars for v2. + helpers.PanicErr(os.Unsetenv("ETH_URL")) + helpers.PanicErr(os.Unsetenv("ETH_HTTP_URL")) + helpers.PanicErr(os.Unsetenv("ETH_CHAIN_ID")) +} + +func resetDatabase(client *cmd.Shell, context *cli.Context) { + helpers.PanicErr(client.ResetDatabase(context)) +} + +func newSetupClient() *cmd.Shell { + prompter := cmd.NewTerminalPrompter() + return &cmd.Shell{ + Renderer: cmd.RendererTable{Writer: os.Stdout}, + AppFactory: cmd.PluginAppFactory{}, + KeyStoreAuthenticator: cmd.TerminalKeyStoreAuthenticator{Prompter: prompter}, + FallbackAPIInitializer: cmd.NewPromptingAPIInitializer(prompter), + Runner: cmd.PluginRunner{}, + PromptingSessionRequestBuilder: cmd.NewPromptingSessionRequestBuilder(prompter), + ChangePasswordPrompter: cmd.NewChangePasswordPrompter(), + PasswordPrompter: cmd.NewPasswordPrompter(), + } +} + +func requestRandomnessCallbackBatch( + e helpers.Environment, + consumerAddress string, + numWords uint16, + subID, confDelay *big.Int, + callbackGasLimit uint32, + args []byte, + batchSize *big.Int, +) (requestID *big.Int) { + consumer := newLoadTestVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + + tx, err := consumer.TestRequestRandomnessFulfillmentBatch(e.Owner, subID, numWords, confDelay, callbackGasLimit, args, batchSize) + helpers.PanicErr(err) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "TestRequestRandomnessFulfillment") + + periodBlocks, err := consumer.IBeaconPeriodBlocks(nil) + helpers.PanicErr(err) + + blockNumber := receipt.BlockNumber + periodOffset := new(big.Int).Mod(blockNumber, periodBlocks) + nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset) + + fmt.Println("nextBeaconOutputHeight: ", nextBeaconOutputHeight) + + requestID, err = consumer.SRequestsIDs(nil, nextBeaconOutputHeight, confDelay) + helpers.PanicErr(err) + fmt.Println("requestID: ", requestID) + + return requestID +} + +func printLoadtestResults(e helpers.Environment, consumerAddress string) { + consumer := newLoadTestVRFBeaconCoordinatorConsumer(common.HexToAddress(consumerAddress), e.Ec) + + totalRequests, err := consumer.STotalRequests(nil) + helpers.PanicErr(err) + + totalFulfilled, err := consumer.STotalFulfilled(nil) + helpers.PanicErr(err) + + avgBlocksInMil, err := consumer.SAverageFulfillmentInMillions(nil) + helpers.PanicErr(err) + + slowestBlocks, err := consumer.SSlowestFulfillment(nil) + helpers.PanicErr(err) + + fastestBlock, err := consumer.SFastestFulfillment(nil) + helpers.PanicErr(err) + + slowestRequest, err := consumer.SSlowestRequestID(nil) + helpers.PanicErr(err) + + pendingRequests, err := consumer.PendingRequests(nil) + helpers.PanicErr(err) + + fmt.Println("Total Requests: ", totalRequests.Uint64()) + fmt.Println("Total Fulfilled: ", totalFulfilled.Uint64()) + fmt.Println("Average Fulfillment Delay in Blocks: ", float64(avgBlocksInMil.Uint64())/1000000) + fmt.Println("Slowest Fulfillment Delay in Blocks: ", slowestBlocks.Uint64()) + fmt.Println("Slowest Request ID: ", slowestRequest.Uint64()) + fmt.Println("Fastest Fulfillment Delay in Blocks: ", fastestBlock.Uint64()) + fmt.Println("Pending Requests: ", pendingRequests) +} diff --git a/core/scripts/ocr2vrf/verify.go b/core/scripts/ocr2vrf/verify.go new file mode 100644 index 00000000..b020f93b --- /dev/null +++ b/core/scripts/ocr2vrf/verify.go @@ -0,0 +1,181 @@ +package main + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + bn256 "github.com/ethereum/go-ethereum/crypto/bn256/google" + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/mod" + "go.dedis.ch/kyber/v3/pairing" + + "github.com/goplugin/plugin-vrf/altbn_128" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + dkgContract "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" +) + +func getDKGLatestConfigDetails(e helpers.Environment, dkgAddress string) dkgContract.LatestConfigDetails { + dkg := newDKG(common.HexToAddress(dkgAddress), e.Ec) + dkgConfig, err := dkg.LatestConfigDetails(nil) + helpers.PanicErr(err) + + return dkgConfig +} + +func getVRFLatestConfigDetails(e helpers.Environment, beaconAddress string) vrf_beacon.LatestConfigDetails { + beacon := newVRFBeacon(common.HexToAddress(beaconAddress), e.Ec) + beaconConfig, err := beacon.LatestConfigDetails(nil) + helpers.PanicErr(err) + + return beaconConfig +} + +func getDKGKeyData(e helpers.Environment, dkgAddress string, keyID, configDigest [32]byte) dkgContract.KeyDataStructKeyData { + dkg := newDKG(common.HexToAddress(dkgAddress), e.Ec) + keyData, err := dkg.GetKey(nil, keyID, configDigest) + helpers.PanicErr(err) + + return keyData +} + +func getKeyID(e helpers.Environment, beaconAddress string) [32]byte { + beacon := newVRFBeacon(common.HexToAddress(beaconAddress), e.Ec) + keyID, err := beacon.SKeyID(nil) + helpers.PanicErr(err) + return keyID +} + +func getPublicKey(e helpers.Environment, dkgAddress string, keyID, configDigest [32]byte) kyber.Point { + keyData := getDKGKeyData(e, dkgAddress, keyID, configDigest) + kg := &altbn_128.G2{} + pk := kg.Point() + err := pk.UnmarshalBinary(keyData.PublicKey) + helpers.PanicErr(err) + return pk +} + +func getHashToCurveMessage(e helpers.Environment, height uint64, confDelay uint32, vrfConfigDigest [32]byte, pk kyber.Point) *altbn_128.HashProof { + blockNumber := big.NewInt(0).SetUint64(height) + block, err := e.Ec.BlockByNumber(context.Background(), blockNumber) + helpers.PanicErr(err) + b := ocr2vrftypes.Block{ + Height: height, + ConfirmationDelay: confDelay, + Hash: block.Hash(), + } + h := b.VRFHash(vrfConfigDigest, pk) + return altbn_128.NewHashProof(h) +} + +func getVRFSignature(e helpers.Environment, coordinatorAddress string, height, confDelay, searchWindow uint64) (proofG1X, proofG1Y *big.Int) { + // get transmission logs from requested block to requested block + search window blocks + // TODO: index transmission logs by height and confirmation delay to + // make the FilterQuery call more efficient + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(0).SetUint64(height), + ToBlock: big.NewInt(0).SetUint64(height + searchWindow), + Addresses: []common.Address{ + common.HexToAddress(coordinatorAddress), + }, + Topics: [][]common.Hash{ + { + vrf_coordinator.VRFCoordinatorOutputsServed{}.Topic(), + }, + }, + } + logs, err := e.Ec.FilterLogs(context.Background(), query) + helpers.PanicErr(err) + + coordinator := newVRFCoordinator(common.HexToAddress(coordinatorAddress), e.Ec) + for _, log := range logs { + t, err := coordinator.ParseOutputsServed(log) + helpers.PanicErr(err) + for _, o := range t.OutputsServed { + if o.ConfirmationDelay.Uint64() == confDelay && o.Height == height { + proofG1X = o.ProofG1X + proofG1Y = o.ProofG1Y + } + } + } + return +} + +func verifyBeaconRandomness(e helpers.Environment, dkgAddress, beaconAddress string, coordinatorAddress string, height, confDelay, searchWindow uint64) bool { + dkgConfig := getDKGLatestConfigDetails(e, dkgAddress) + vrfConfig := getVRFLatestConfigDetails(e, beaconAddress) + keyID := getKeyID(e, beaconAddress) + pk := getPublicKey(e, dkgAddress, keyID, dkgConfig.ConfigDigest) + h := getHashToCurveMessage(e, height, uint32(confDelay), vrfConfig.ConfigDigest, pk) + hpoint := h.HashPoint + negHpoint := g1.Point() + negHpoint.Neg(hpoint) + g2Base := g2.Point().Base() + + // get BLS signature for the given height and confirmation delay + proofG1X, proofG1Y := getVRFSignature(e, coordinatorAddress, height, confDelay, searchWindow) + if proofG1X.Cmp(big.NewInt(0)) == 0 || proofG1Y.Cmp(big.NewInt(0)) == 0 { + panic("signature not found") + } + g1Proof, err := altbn_128.CoordinatesToG1(mod.NewInt(proofG1X, bn256.P), mod.NewInt(proofG1Y, bn256.P)) + helpers.PanicErr(err) + + // Perform verification of BLS signature is done using pairing function + isValid := validateSignature(suite, hpoint, pk, g1Proof) + fmt.Println("Verification Result: ", isValid) + + // Perform the same verification as above using precompiled contract 0x8 + // This should always result in same result as validateSignature() + // signature is valid iff contract0x8(-b_x, -b_y, pk_x, pk_y, p_x, p_y, g2_x, g2_y) == 1 + input := make([]byte, 384) + hb := altbn_128.LongMarshal(negHpoint) + if len(hb) != 64 { + panic("wrong length of hpoint") + } + copy(input[:64], hb[:]) + + pkb, err := pk.MarshalBinary() + helpers.PanicErr(err) + if len(pkb) != 128 { + panic("wrong length of public key") + } + copy(input[64:192], pkb) + + if len(proofG1X.Bytes()) != 32 { + panic("wrong length of VRF signature x-coordinator") + } + if len(proofG1Y.Bytes()) != 32 { + panic("wrong length of VRF signature y-coordinator") + } + copy(input[192:224], proofG1X.Bytes()) + copy(input[224:256], proofG1Y.Bytes()) + + g2b, err := g2Base.MarshalBinary() + helpers.PanicErr(err) + if len(g2b) != 128 { + panic("wrong length of altbn_128 base points") + } + copy(input[256:384], g2b) + + contract := vm.PrecompiledContractsByzantium[common.HexToAddress("0x8")] + res, err := contract.Run(input) + helpers.PanicErr(err) + isValidPrecompiledContract := big.NewInt(0).SetBytes(res).Uint64() == 1 + fmt.Println("Verification Result Using Precompiled Contract 0x8: ", isValidPrecompiledContract) + + if isValid && isValidPrecompiledContract { + return true + } + return false +} + +func validateSignature(p pairing.Suite, msg, publicKey, signature kyber.Point) bool { + return p.Pair(msg, publicKey).Equal(p.Pair(signature, p.G2().Point().Base())) +} diff --git a/core/scripts/vrfv1/README.md b/core/scripts/vrfv1/README.md new file mode 100644 index 00000000..c09a1d56 --- /dev/null +++ b/core/scripts/vrfv1/README.md @@ -0,0 +1,122 @@ +# Using the Ownerless Consumer Example + +The [ownerless consumer example contract](../../../contracts/src/v0.8/tests/VRFOwnerlessConsumerExample.sol) +allows anyone to request randomness from VRF V1 without needing to deploy their +own consuming contract. It does not hold any ETH or PLI; a caller must send it +PLI and spend that PLI on a randomness request within the same transaction. + +This guide covers requesting randomness and optionally deploying the contract. + +## Setup + +Before starting, you will need: + 1. An EVM chain endpoint URL + 2. The chain ID corresponding to your chain + 3. The private key of an account funded with PLI, and the chain's native token + (to pay transaction fees) + 4. [The PLI address, VRF coordinator address, and key hash](https://docs.chain.link/docs/vrf-contracts/) + for your chain + 5. [Go](https://go.dev/doc/install) + +The endpoint URL can be a locally running node, or an externally hosted one like +[alchemy](https://www.alchemy.com/). Your chain ID will be a number +corresponding to the chain you pick. For example the Rinkeby testnet has chain +ID 4. Your private key can be exported from [MetaMask](https://metamask.zendesk.com/hc/en-us/articles/360015289632-How-to-Export-an-Account-Private-Key). + +Note: Be careful with your key. When using testnets, it's best to use a separate +account that does not hold real funds. + +Run the following command to set up your environment: + +```shell +export ETH_URL= +export ETH_CHAIN_ID= +export ACCOUNT_KEY= +export PLI= +export COORDINATOR= +export KEY_HASH= +``` + +Now "cd" into the VRF V1 scripts directory: + +```shell +cd /core/scripts/vrfv1 +``` + +## Getting a Consumer + +Since this contract is ownerless, you can use an existing instance instead of +deploying your own. To use an existing instance, copy the command corresponding +to the chain you want to use below, otherwise go to the +[deployment](#deploying-a-new-consumer) section. + +Once you have chosen or deployed a consumer, run: +```shell +export CONSUMER= +``` + +### Existing Consumers + +#### Testnets + +##### Ethereum Rinkeby Testnet + +```0x1b7D5F1bD3054474cC043207aA1e7f8C152d263F``` + +#### BSC Testnet + +```0x640F2D8fd734cb53a6938CeC4CfC0543BbcC0348``` + +#### Polygon Mumbai Testnet + +```0x640F2D8fd734cb53a6938CeC4CfC0543BbcC0348``` + +### Deploying a New Consumer + +To deploy the contract, run: +```shell +go run main.go ownerless-consumer-deploy --coordinator-address=$COORDINATOR --link-address=$PLI +``` + +You should see output like: +``` +Ownerless Consumer: TX Hash: +``` + +## Requesting Randomness + +Since the ownerless consumer does not hold PLI funds, it can only request +randomness through a transferAndCall from the +[PLI contract](../../../contracts/src/v0.4/LinkToken.sol). The transaction has +the following steps: +1. An externally owned account (controlled by your private key) initiates a + transferAndCall on the LinkToken contract. +2. The LinkToken contract transfers funds to the ownerless consumer. +3. The ownerless consumer requests randomness from the + [VRF Coordinator](../../../contracts/src/v0.6/VRFCoordinator.sol), using the + PLI from step 2 to pay for it. + +To request randomness for your chosen consumer, run: +```shell +go run main.go ownerless-consumer-request --link-address=$PLI --consumer-address=$CONSUMER --key-hash=$KEY_HASH +``` + +You should see the output: +``` +TX Hash: +``` + +You can put this transaction hash into a block explorer to check its progress. +Shortly after it's confirmed, usually only a few minutes, you should see a +second incoming transaction to your consumer containing the randomness +result. + +## Debugging Reverted Transactions + +A reverted transaction could have number of root causes, for example +insufficient funds / PLI, or incorrect contract addresses. + +[Tenderly](https://dashboard.tenderly.co/explorer) can be useful for debugging +why a transaction failed. For example [this Rinkeby transaction](https://dashboard.tenderly.co/tx/rinkeby/0x71a7279033b47472ca453f7a19ccb685d0f32cdb4854a45052f1aaccd80436e9) +failed because a non-owner tried to request random words from +[VRFExternalSubOwnerExample](../../../../contracts/src/v0.8/tests/VRFExternalSubOwnerExample.sol). diff --git a/core/scripts/vrfv1/main.go b/core/scripts/vrfv1/main.go new file mode 100644 index 00000000..e0473656 --- /dev/null +++ b/core/scripts/vrfv1/main.go @@ -0,0 +1,251 @@ +package main + +import ( + "context" + "encoding/hex" + "flag" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "github.com/shopspring/decimal" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + linktoken "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + vrfltoc "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_ownerless_consumer" + vrfoc "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_ownerless_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func main() { + e := helpers.SetupEnv(false) + + switch os.Args[1] { + case "topics": + randomnessRequestTopic := solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic() + randomnessFulfilledTopic := solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic() + fmt.Println("RandomnessRequest:", randomnessRequestTopic.String(), + "RandomnessRequestFulfilled:", randomnessFulfilledTopic.String()) + case "request-report": + cmd := flag.NewFlagSet("request-report", flag.ExitOnError) + txHashes := cmd.String("tx-hashes", "", "comma separated transaction hashes") + requestIDs := cmd.String("request-ids", "", "comma separated request IDs in hex") + bhsAddress := cmd.String("bhs-address", "", "BHS contract address") + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator address") + + helpers.ParseArgs(cmd, os.Args[2:], "tx-hashes", "bhs-address", "request-ids", "coordinator-address") + + hashes := helpers.ParseHashSlice(*txHashes) + reqIDs := parseRequestIDs(*requestIDs) + bhs, err := blockhash_store.NewBlockhashStore( + common.HexToAddress(*bhsAddress), + e.Ec) + helpers.PanicErr(err) + coordinator, err := solidity_vrf_coordinator_interface.NewVRFCoordinator( + common.HexToAddress(*coordinatorAddress), + e.Ec) + helpers.PanicErr(err) + + if len(hashes) != len(reqIDs) { + panic(fmt.Errorf("len(hashes) [%d] != len(reqIDs) [%d]", len(hashes), len(reqIDs))) + } + + var bhsMissedBlocks []*big.Int + for i := range hashes { + receipt, err := e.Ec.TransactionReceipt(context.Background(), hashes[i]) + helpers.PanicErr(err) + + reqID := reqIDs[i] + callbacks, err := coordinator.Callbacks(nil, reqID) + helpers.PanicErr(err) + fulfilled := utils.IsEmpty(callbacks.SeedAndBlockNum[:]) + + _, err = bhs.GetBlockhash(nil, receipt.BlockNumber) + if err != nil { + fmt.Println("Blockhash for block", receipt.BlockNumber, "not stored (tx", hashes[i].String(), + ", request ID", hex.EncodeToString(reqID[:]), ", fulfilled:", fulfilled, ")") + if !fulfilled { + // not fulfilled and bh not stored means the feeder missed a store + bhsMissedBlocks = append(bhsMissedBlocks, receipt.BlockNumber) + } + } else { + fmt.Println("Blockhash for block", receipt.BlockNumber, "stored (tx", hashes[i].String(), + ", request ID", hex.EncodeToString(reqID[:]), ", fulfilled:", fulfilled, ")") + } + } + + if len(bhsMissedBlocks) == 0 { + fmt.Println("Didn't miss any bh stores!") + return + } + fmt.Println("Missed stores:") + for _, blockNumber := range bhsMissedBlocks { + fmt.Println("\t* ", blockNumber.String()) + } + case "get-receipt": + cmd := flag.NewFlagSet("get-tx", flag.ExitOnError) + txHashes := cmd.String("tx-hashes", "", "comma separated transaction hashes") + helpers.ParseArgs(cmd, os.Args[2:], "tx-hashes") + hashes := helpers.ParseHashSlice(*txHashes) + + for _, h := range hashes { + receipt, err := e.Ec.TransactionReceipt(context.Background(), h) + helpers.PanicErr(err) + fmt.Println("Tx", h.String(), "Included in block:", receipt.BlockNumber, + ", blockhash:", receipt.BlockHash.String()) + } + case "get-callback": + cmd := flag.NewFlagSet("get-callback", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator address") + requestIDs := cmd.String("request-ids", "", "comma separated request IDs in hex") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "request-ids") + coordinator, err := solidity_vrf_coordinator_interface.NewVRFCoordinator( + common.HexToAddress(*coordinatorAddress), + e.Ec) + helpers.PanicErr(err) + reqIDs := parseRequestIDs(*requestIDs) + for _, reqID := range reqIDs { + callbacks, err := coordinator.Callbacks(nil, reqID) + helpers.PanicErr(err) + if utils.IsEmpty(callbacks.SeedAndBlockNum[:]) { + fmt.Println("request", hex.EncodeToString(reqID[:]), "fulfilled") + } else { + fmt.Println("request", hex.EncodeToString(reqID[:]), "not fulfilled") + } + } + case "coordinator-deploy": + cmd := flag.NewFlagSet("coordinator-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "PLI token contract address") + bhsAddress := cmd.String("bhs-address", "", "blockhash store contract address") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "bhs-address") + _, tx, _, err := solidity_vrf_coordinator_interface.DeployVRFCoordinator( + e.Owner, e.Ec, common.HexToAddress(*linkAddress), common.HexToAddress(*bhsAddress)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "coordinator-register-key": + cmd := flag.NewFlagSet("coordinator-register-key", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "address of VRF coordinator") + pubKeyUncompressed := cmd.String("pubkey-uncompressed", "", "uncompressed VRF public key in hex") + oracleAddress := cmd.String("oracle-address", "", "oracle address") + fee := cmd.String("fee", "", "VRF fee in juels") + jobID := cmd.String("job-id", "", "Job UUID on the plugin node (UUID)") + helpers.ParseArgs(cmd, os.Args[2:], + "coordinator-address", "pubkey-uncompressed", "oracle-address", "fee", "job-id") + + coordinator, err := solidity_vrf_coordinator_interface.NewVRFCoordinator( + common.HexToAddress(*coordinatorAddress), + e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*pubKeyUncompressed, "0x") { + *pubKeyUncompressed = strings.Replace(*pubKeyUncompressed, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(*pubKeyUncompressed) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + + uid := uuid.MustParse(*jobID) + tx, err := coordinator.RegisterProvingKey( + e.Owner, + decimal.RequireFromString(*fee).BigInt(), + common.HexToAddress(*oracleAddress), + [2]*big.Int{pk.X, pk.Y}, + job.ExternalJobIDEncodeStringToTopic(uid), + ) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "ownerless-consumer-deploy": + cmd := flag.NewFlagSet("ownerless-consumer-deploy", flag.ExitOnError) + coordAddr := cmd.String("coordinator-address", "", "address of VRF coordinator") + linkAddr := cmd.String("link-address", "", "address of link token") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "link-address") + _, tx, _, err := vrfoc.DeployVRFOwnerlessConsumerExample( + e.Owner, + e.Ec, + common.HexToAddress(*coordAddr), + common.HexToAddress(*linkAddr)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "loadtest-ownerless-consumer-deploy": + cmd := flag.NewFlagSet("loadtest-ownerless-consumer-deploy", flag.ExitOnError) + coordAddr := cmd.String("coordinator-address", "", "address of VRF coordinator") + linkAddr := cmd.String("link-address", "", "address of link token") + priceStr := cmd.String("price", "", "the price of each VRF request in Juels") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "link-address") + price := decimal.RequireFromString(*priceStr).BigInt() + _, tx, _, err := vrfltoc.DeployVRFLoadTestOwnerlessConsumer( + e.Owner, + e.Ec, + common.HexToAddress(*coordAddr), + common.HexToAddress(*linkAddr), + price) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "ownerless-consumer-request": + cmd := flag.NewFlagSet("ownerless-consumer-request", flag.ExitOnError) + linkAddr := cmd.String("link-address", "", "address of link token") + consumerAddr := cmd.String("consumer-address", "", "address of the deployed ownerless consumer") + paymentStr := cmd.String("payment", "" /* 0.1 PLI */, "the payment amount in PLI") + keyHash := cmd.String("key-hash", "", "key hash") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "consumer-address", "payment", "key-hash") + payment, ok := big.NewInt(0).SetString(*paymentStr, 10) + if !ok { + panic(fmt.Sprintf("failed to parse payment amount: %s", *paymentStr)) + } + link, err := linktoken.NewLinkToken(common.HexToAddress(*linkAddr), e.Ec) + helpers.PanicErr(err) + data, err := evmutils.ABIEncode(`[{"type":"bytes32"}]`, common.HexToHash(*keyHash)) + helpers.PanicErr(err) + tx, err := link.TransferAndCall(e.Owner, common.HexToAddress(*consumerAddr), payment, data) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "load-test-read": + cmd := flag.NewFlagSet("load-test-read", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "load test consumer address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrfltoc.NewVRFLoadTestOwnerlessConsumer(common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + count, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + fmt.Println("response count:", count.String(), "consumer:", *consumerAddress) + case "ownerless-consumer-read": + cmd := flag.NewFlagSet("ownerless-consumer-read", flag.ExitOnError) + consumerAddr := cmd.String("consumer-address", "", "address of the deployed ownerless consumer") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrfoc.NewVRFOwnerlessConsumerExample( + common.HexToAddress(*consumerAddr), + e.Ec) + helpers.PanicErr(err) + requestID, err := consumer.SRequestId(nil) + helpers.PanicErr(err) + fmt.Println("request ID:", requestID) + output, err := consumer.SRandomnessOutput(nil) + helpers.PanicErr(err) + fmt.Println("randomness:", output) + } +} + +func parseRequestIDs(arg string) (ret [][32]byte) { + split := strings.Split(arg, ",") + for _, rid := range split { + if strings.HasPrefix(rid, "0x") { + rid = strings.Replace(rid, "0x", "", 1) + } + reqID, err := hex.DecodeString(rid) + helpers.PanicErr(err) + var reqIDFixed [32]byte + copy(reqIDFixed[:], reqID) + ret = append(ret, reqIDFixed) + } + return +} diff --git a/core/scripts/vrfv2/genvrfnum/main.go b/core/scripts/vrfv2/genvrfnum/main.go new file mode 100644 index 00000000..fa8943f5 --- /dev/null +++ b/core/scripts/vrfv2/genvrfnum/main.go @@ -0,0 +1,255 @@ +package main + +import ( + "encoding/csv" + "encoding/hex" + "flag" + "fmt" + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/shopspring/decimal" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func main() { + switch os.Args[1] { + case "gen-vrf-key": + cmd := flag.NewFlagSet("gen-vrf-key", flag.ExitOnError) + password := cmd.String("pw", "", "password to encrypt key with") + outfile := cmd.String("o", "key.json", "path to output file") + helpers.ParseArgs(cmd, os.Args[2:], "pw") + key, err := vrfkey.NewV2() + helpers.PanicErr(err) + exportJSON, err := key.ToEncryptedJSON(*password, utils.DefaultScryptParams) + helpers.PanicErr(err) + err = os.WriteFile(*outfile, exportJSON, 0600) + helpers.PanicErr(err) + fmt.Println("generated vrf key", key.PublicKey.String(), "and saved encrypted in", *outfile) + case "gen-vrf-numbers": + cmd := flag.NewFlagSet("gen-vrf-numbers", flag.ExitOnError) + keyPath := cmd.String("key", "key.json", "path to encrypted key contents") + numCount := cmd.Int("n", 100, "how many numbers to generate") + password := cmd.String("pw", "", "password to decrypt the key with") + outfile := cmd.String("o", "randomnumbers.csv", "path to output file") + // preseed information + senderAddr := cmd.String("sender", "", "sender of the requestRandomWords tx") + subID := cmd.Uint64("subid", 1, "sub id") + // seed information - can be fetched from a real chain's explorer + blockhashStr := cmd.String("blockhash", "", "blockhash the request is in") + blockNum := cmd.Uint64("blocknum", 10, "block number the request is in") + cbGasLimit := cmd.Uint("cb-gas-limit", 100_000, "callback gas limit") + numWords := cmd.Uint("num-words", 1, "num words") + numWorkers := cmd.Uint64("num-workers", uint64(runtime.NumCPU()), "num workers") + + helpers.ParseArgs(cmd, os.Args[2:], "pw", "sender", "blockhash") + + fileBytes, err := os.ReadFile(*keyPath) + helpers.PanicErr(err) + key, err := vrfkey.FromEncryptedJSON(fileBytes, *password) + helpers.PanicErr(err) + + keyHash := key.PublicKey.MustHash() + sender := common.HexToAddress(*senderAddr) + blockhash := common.HexToHash(*blockhashStr) + + // columns: + // (keyHashHex, senderAddrHex, subID, nonce) preseed info + // (preSeed, blockhash, blocknum, subID, cbGasLimit, numWords, senderAddrHex) + // pubKeyHex, keyHashHex, senderAddrHex, subID, nonce, preSeed, blockhash, blocknum, cbGasLimit, numWords, finalSeed, proof..., randomNumber + header := []string{ + "keyHashHex", "senderAddrHex", "subID", "nonce", "preSeed", "blockhash", + "blocknum", "cbGasLimit", "numWords", "finalSeed", + "proofPubKey", "proofGamma", "proofC", "proofS", "proofSeed", + "randomNumber", + } + + genProofs := func( + nonceRange []uint64, + outChan chan []string) { + numIters := 0 + for nonce := nonceRange[0]; nonce <= nonceRange[1]; nonce++ { + var record []string + + // construct preseed using typical preseed data + preSeed := preseed(keyHash, sender, *subID, nonce) + record = append(record, + keyHash.String(), sender.String(), // keyHash, sender addr + fmt.Sprintf("%d", *subID), fmt.Sprintf("%d", nonce), hexutil.Encode(preSeed[:]), // subId, nonce, preseed + *blockhashStr, fmt.Sprintf("%d", *blockNum), // blockhash, blocknum + fmt.Sprintf("%d", *cbGasLimit), fmt.Sprintf("%d", *numWords)) // cb gas limit, num words + + preseedData := proof.PreSeedDataV2{ + PreSeed: preSeed, + BlockHash: blockhash, + BlockNum: *blockNum, + SubId: *subID, + CallbackGasLimit: uint32(*cbGasLimit), + NumWords: uint32(*numWords), + Sender: sender, + } + finalSeed := proof.FinalSeedV2(preseedData) + + record = append(record, finalSeed.String()) + + // generate proof + pf, err2 := key.GenerateProof(finalSeed) + helpers.PanicErr(err2) + + record = append(record, + hex.EncodeToString(secp256k1.LongMarshal(pf.PublicKey)), // pub key + hex.EncodeToString(secp256k1.LongMarshal(pf.Gamma)), // gamma + pf.C.String(), pf.S.String(), // c, s + pf.Seed.String(), pf.Output.String()) // seed, output + + if len(record) != len(header) { + panic("record length doesn't match header length - update one of them?") + } + outChan <- record + numIters++ + } + fmt.Println("genProofs worker wrote", numIters, "records to channel") + } + + outFile, err := os.Create(*outfile) + wc := utils.NewDeferableWriteCloser(outFile) + defer wc.Close() + helpers.PanicErr(err) + + csvWriter := csv.NewWriter(outFile) + helpers.PanicErr(csvWriter.Write(header)) + gather := func(outChan chan []string) { + for { + select { + case row := <-outChan: + helpers.PanicErr(csvWriter.Write(row)) + case <-time.After(500 * time.Millisecond): + // if no work is produced in this much time, we're probably done + return + } + } + } + + ranges := nonceRanges(1, uint64(*numCount), *numWorkers) + + fmt.Println("nonce ranges:", ranges, "generating proofs...") + + outC := make(chan []string) + + for _, nonceRange := range ranges { + go genProofs( + nonceRange, + outC) + } + + gather(outC) + csvWriter.Flush() + if csvWriter.Error() != nil { + helpers.PanicErr(err) + } + if err := wc.Close(); err != nil { + helpers.PanicErr(err) + } + case "verify-vrf-proofs": + cmd := flag.NewFlagSet("verify-vrf-proofs", flag.ExitOnError) + csvPath := cmd.String("p", "randomnumbers.csv", "path to csv file generated by gen-vrf-numbers") + numWorkers := cmd.Int("num-workers", runtime.NumCPU()-1, "number of workers to run verification") + + helpers.ParseArgs(cmd, os.Args[2:]) + + numValid := &atomic.Int64{} + proofsChan := make(chan *vrfkey.Proof) + + verify := func(inC chan *vrfkey.Proof) { + for { + select { + case pf := <-inC: + valid, err := pf.VerifyVRFProof() + helpers.PanicErr(err) + if !valid { + fmt.Println("proof", pf.String(), "is not valid", "total valid proofs:", numValid) + panic("found invalid proof") + } + numValid.Add(1) + case <-time.After(250 * time.Millisecond): + return + } + } + } + + for i := 0; i < *numWorkers; i++ { + go verify(proofsChan) + } + + f, err := os.Open(*csvPath) + helpers.PanicErr(err) + defer f.Close() + reader := csv.NewReader(f) + _, err = reader.Read() // read the column titles + helpers.PanicErr(err) + for { + rec, err := reader.Read() + if err != nil { + break + } + proofPubKey, err := secp256k1.LongUnmarshal(hexutil.MustDecode("0x" + rec[len(rec)-6])) + helpers.PanicErr(err) + proofGamma, err := secp256k1.LongUnmarshal(hexutil.MustDecode("0x" + rec[len(rec)-5])) + helpers.PanicErr(err) + proofC := decimal.RequireFromString(rec[len(rec)-4]).BigInt() + proofS := decimal.RequireFromString(rec[len(rec)-3]).BigInt() + proofSeed := decimal.RequireFromString(rec[len(rec)-2]).BigInt() + proofOutput := decimal.RequireFromString(rec[len(rec)-1]).BigInt() + + pf := &vrfkey.Proof{ + PublicKey: proofPubKey, + Gamma: proofGamma, + C: proofC, + S: proofS, + Seed: proofSeed, + Output: proofOutput, + } + + proofsChan <- pf + } + fmt.Println("all proofs valid! num proofs:", numValid) + } +} + +func preseed(keyHash common.Hash, sender common.Address, subID, nonce uint64) [32]byte { + encoded, err := evmutils.ABIEncode( + `[{"type":"bytes32"}, {"type":"address"}, {"type":"uint64"}, {"type", "uint64"}]`, + keyHash, + sender, + subID, + nonce) + helpers.PanicErr(err) + preSeed := crypto.Keccak256(encoded) + var preSeedSized [32]byte + copy(preSeedSized[:], preSeed) + return preSeedSized +} + +func nonceRanges(start, end, numWorkers uint64) (ranges [][]uint64) { + rangeSize := (end - start) / numWorkers + for i := start; i <= end; i += rangeSize + 1 { + j := i + rangeSize + if j > end { + j = end + } + + ranges = append(ranges, []uint64{i, j}) + } + return +} diff --git a/core/scripts/vrfv2/revert-reason/main.go b/core/scripts/vrfv2/revert-reason/main.go new file mode 100644 index 00000000..abbc3049 --- /dev/null +++ b/core/scripts/vrfv2/revert-reason/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +func panicErr(err error) { + if err != nil { + panic(err) + } +} + +func main() { + ec, err := ethclient.Dial("TODO") + panicErr(err) + txHash := "0xedeeecf6bd763ecc82b5dff31e073af9cc4cf8a4b47708df526ba61cf0201d25" // non-custom on goerli + //txHash := "0x6ec8a69657600786f0b31726f36287e80196029e60f8365528d4d540a6f70763" // custom error on mainnet + tx, _, err := ec.TransactionByHash(context.Background(), gethCommon.HexToHash(txHash)) + panicErr(err) + re, err := ec.TransactionReceipt(context.Background(), gethCommon.HexToHash(txHash)) + panicErr(err) + fmt.Println(re.Status, re.GasUsed, re.CumulativeGasUsed) + requester := gethCommon.HexToAddress("0xffe4a8b862971611dce48f3ba295d4ebfeb5b2fe") + call := ethereum.CallMsg{ + From: requester, + To: tx.To(), + Data: tx.Data(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + } + r, err := ec.CallContract(context.Background(), call, re.BlockNumber) + fmt.Println("call contract", "r", r, "err", err) + rpcError, err := evmclient.ExtractRPCError(err) + fmt.Println("extracting rpc error", rpcError.String(), err) +} diff --git a/core/scripts/vrfv2/testnet/README.md b/core/scripts/vrfv2/testnet/README.md new file mode 100644 index 00000000..81d3519f --- /dev/null +++ b/core/scripts/vrfv2/testnet/README.md @@ -0,0 +1,259 @@ +# Using the External Subscription Owner Example + +The [external subscription owner example contract](../../../../contracts/src/v0.8/tests/VRFExternalSubOwnerExample.sol) +allows its owner to request random words from VRF V2 if it is added as a +consumer for a funded VRF subscription. + +This guide covers: + 1. Deploying the contract + 2. Creating, funding, checking balance, and adding a consumer to a VRF V2 + subscription + 3. Requesting randomness from the contract + +## Setup + +Before starting, you will need: +1. An EVM chain endpoint URL +2. The chain ID corresponding to your chain +3. The private key of an account funded with PLI, and the chain's native token + (to pay transaction fees) +4. [The PLI address, VRF coordinator address, and key hash](https://docs.chain.link/docs/vrf/v2/supported-networks/) + for your chain. +5. [Go](https://go.dev/doc/install) + +The endpoint URL can be a locally running node, or an externally hosted one like +[alchemy](https://www.alchemy.com/). Your chain ID will be a number +corresponding to the chain you pick. For example the Rinkeby testnet has chain +ID 4. Your private key can be exported from [MetaMask](https://metamask.zendesk.com/hc/en-us/articles/360015289632-How-to-Export-an-Account-Private-Key). + +Note: Be careful with your key. When using testnets, it's best to use a separate +account that does not hold real funds. + +Run the following command to set up your environment: + +```shell +export ETH_URL= +export ETH_CHAIN_ID= +export ACCOUNT_KEY= +export PLI= +export PLI_ETH_FEED=
+export COORDINATOR= +export KEY_HASH= +export ORACLE_ADDRESS= +export PUB_KEY= +``` + +By default, the script automatically estimates gas limits for operations. Optionally, `ETH_GAS_LIMIT_DEFAULT` environment variable can be set to override gas limit for operations. + +Now "cd" into the VRF V2 testnet scripts directory: + +```shell +cd /core/scripts/vrfv2/testnet +``` + +## Deploying a full VRF Universe (BHS, Registered + Funded Coordinator, Consumer) + +To deploy a full VRF environment on-chain, run: + +```shell +go run . deploy-universe \ +--subscription-balance=5000000000000000000 \ #5 PLI +--uncompressed-pub-key= \ +--vrf-primary-node-sending-keys="" \ #used to fund the keys and for sample VRF Job Spec generation +--sending-key-funding-amount 100000000000000000 \ #0.1 ETH, fund addresses specified in vrf-primary-node-sending-keys +--batch-fulfillment-enabled false \ #only used for sample VRF Job Spec generation +--register-vrf-key-against-address=<"from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments> +``` +```shell +go run . deploy-universe \ +--subscription-balance=5000000000000000000 \ +--uncompressed-pub-key="0xf3706e247a7b205c8a8bd25a6e8c4650474da496151371085d45beeead27e568c1a5e8330c7fa718f8a31226efbff6632ed6f8ed470b637aa9be2b948e9dcef6" \ +--batch-fulfillment-enabled false \ +--register-vrf-key-against-address="0x23b5613fc04949F4A53d1cc8d6BCCD21ffc38C11" +``` + +## Deploying the Consumer Contract + +To deploy the VRFExternalSubOwnerExample contract, run: + +```shell +go run . eoa-consumer-deploy --coordinator-address=$COORDINATOR --link-address=$PLI +``` + +You should get the output: +``` +Consumer address hash +``` + +Run the command: +```shell +export CONSUMER= +``` + +## Setting up a VRF V2 Subscription + +In order for your newly deployed consumer to make VRF requests, it needs to be +authorized for a funded subscription. + +### Creating a Subscription + +```shell +go run . eoa-create-sub --coordinator-address=$COORDINATOR +``` + +You should get the output: +``` +Create sub TX hash +``` + +In order to get the subscription ID created by your transaction, you should use +an online block explorer and input your transaction hash. Once the transaction +is confirmed you should see a log (on Etherscan, this is in the "Logs" tab of +the transaction details screen) with the created subscription details including +the decimal representation of your subscription ID. + +Once you have found the ID, run: +```shell +export SUB_ID= +``` + +### Funding a Subscription + +In order to fund your subscription with 10 PLI, run: +```shell +go run . eoa-fund-sub --coordinator-address $COORDINATOR --link-address=$PLI --sub-id=$SUB_ID --amount=10000000000000000000 # 10e18 or 10 PLI +``` + +You should get the output: +``` +Initial account balance: Funding amount: 10000000000000000000 +Funding sub 61 hash +``` + +### (Optional) Checking Subscription Balance + +To check the PLI balance of your subscription, run: +```shell +go run . sub-balance --coordinator-address $COORDINATOR --sub-id=$SUB_ID +``` + +You should get the output: +``` +sub id balance: +``` + +### Adding a Consumer to Your Subscription + +In order to authorize the consumer contract to use the new subscription, run the +command: +```shell +go run . eoa-add-sub-consumer --coordinator-address $COORDINATOR --sub-id=$SUB_ID --consumer-address=$CONSUMER +``` + +### Requesting Randomness + +At this point, the consumer is authorized as a consumer of a funded +subscription, and is ready to request random words. + +To make a request, run: +```shell +go run . eoa-request --consumer-address=$CONSUMER --sub-id=$SUB_ID --key-hash=$KEY_HASH --num-words 1 +``` + +You should get the output: +``` +TX hash: 0x599022228ffca10b0192e0b13bea64ff74f6dab2f0a3002b0825cbe22bd98249 +``` + +You can put this transaction hash into a block explorer to check its progress. +Shortly after it's confirmed, usually only a few minutes, you should see a +second incoming transaction to your consumer containing the randomness +result. + +## Debugging Reverted Transactions + +A reverted transaction could have number of root causes, for example +insufficient funds / PLI, or incorrect contract addresses. + +[Tenderly](https://dashboard.tenderly.co/explorer) can be useful for debugging +why a transaction failed. For example [this Rinkeby transaction](https://dashboard.tenderly.co/tx/rinkeby/0x71a7279033b47472ca453f7a19ccb685d0f32cdb4854a45052f1aaccd80436e9) +failed because a non-owner tried to request random words from +[VRFExternalSubOwnerExample](../../../../contracts/src/v0.8/tests/VRFExternalSubOwnerExample.sol). + +## Using the `BatchBlockhashStore` Contract + +The `BatchBlockhashStore` contract acts as a proxy to the `BlockhashStore` contract, allowing callers to store +and fetch many blockhashes in a single transaction. + +### Deploy a `BatchBlockhashStore` instance + +``` +go run . batch-bhs-deploy -bhs-address $BHS_ADDRESS +``` + +where `$BHS_ADDRESS` is an environment variable that points to an existing `BlockhashStore` contract. If one is not available, +you can easily deploy one using this command: + +``` +go run . bhs-deploy +``` + +### Store many blockhashes + +``` +go run . batch-bhs-store -batch-bhs-address $BATCH_BHS_ADDRESS -block-numbers 10298742,10298741,10298740,10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, and `-block-numbers` is a comma-separated +list of block numbers you want to store in a single transaction. + +Please note that these block numbers must not be further than 256 from the latest head, otherwise the store will fail. + +### Fetch many blockhashes + +``` +go run . batch-bhs-get -batch-bhs-address $BATCH_BHS_ADDRESS -block-numbers 10298742,10298741,10298740,10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, and `-block-numbers` is a comma-separated +list of block numbers you want to get in a single transaction. + +### Store many blockhashes, possibly farther back than 256 blocks + +In order to store blockhashes farther back than 256 blocks we can make use of the `storeVerifyHeader` method on the `BatchBlockhashStore`. + +Here's how to use it: + +``` +go run . batch-bhs-storeVerify -batch-bhs-address $BATCH_BHS_ADDRESS -num-blocks 25 -start-block 10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, `-num-blocks` is the amount of blocks to store, and +`-start-block` is the block to start storing from, backwards. The block number specified by `-start-block` MUST be +in the blockhash store already, or this will not work. + +### Batch BHS "Backwards Mode" + +There may be a situation where you want to backfill a lot of blockhashes, down to a certain block number. + +This is where "Backwrads Mode" comes in - you're going to need the following: + +* A block number that has already been stored in the BHS. The closer it is to the target block range you want to store, +the better. You can view the most oldest "Store" transactions on the BHS contract that is still ahead of the block range you +are interested in. For example, if you want to store blocks 100 to 200, and 210 and 220 are available, specify `-start-block` +as `210`. +* A destination block number, where you want to stop storing after this one has been stored in the BHS. This number doesn't have +to be in the BHS already but must be less than the block specified for `--start-block` +* A batch size to use. This is how many stores we will attempt to do in a single transaction. A good value for this is usually 50-75 +for big block ranges. +* The address of the batch BHS to use. + +Example: + +``` +go run . batch-bhs-backwards -batch-bhs-address $BATCH_BHS_ADDRESS -start-block 25814538 -end-block 25811350 -batch-size 50 +``` + +This script is simplistic on purpose, where we wait for the transaction to mine before proceeding with the next one. This +is to avoid issues where a transaction gets sent and not included on-chain, and subsequent calls to `storeVerifyHeader` will +fail. diff --git a/core/scripts/vrfv2/testnet/main.go b/core/scripts/vrfv2/testnet/main.go new file mode 100644 index 00000000..3b0a670a --- /dev/null +++ b/core/scripts/vrfv2/testnet/main.go @@ -0,0 +1,1448 @@ +package main + +import ( + "bytes" + "context" + "encoding/hex" + "flag" + "fmt" + "log" + "math/big" + "os" + "strings" + "sync" + + "github.com/goplugin/pluginv3.0/core/scripts/vrfv2/testnet/v2scripts" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner_test_consumer" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/shopspring/decimal" + + "github.com/jmoiron/sqlx" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keepers_vrf_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_external_sub_owner_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_external_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_single_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI) +) + +func main() { + e := helpers.SetupEnv(false) + + switch os.Args[1] { + case "manual-fulfill": + cmd := flag.NewFlagSet("manual-fulfill", flag.ExitOnError) + // In order to get the tx data for a fulfillment transaction, you can grep the + // plugin node logs for the VRF v2 request ID in hex. You will find a log for + // the vrf task in the VRF pipeline, specifically the "output" log field. + // Sample Loki query: + // {app="app-name"} | json | taskType="vrfv2" |~ "39f2d812c04e07cb9c71e93ce6547e48b7dd23ed4cc02616dfef5ef063a58bde" + txdatas := cmd.String("txdatas", "", "hex encoded tx data") + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator address") + gasMultiplier := cmd.Float64("gas-multiplier", 1.1, "gas multiplier") + helpers.ParseArgs(cmd, os.Args[2:], "txdatas", "coordinator-address") + txdatasParsed := helpers.ParseHexSlice(*txdatas) + coordinatorAddr := common.HexToAddress(*coordinatorAddress) + for i, txdata := range txdatasParsed { + nonce, err := e.Ec.PendingNonceAt(context.Background(), e.Owner.From) + helpers.PanicErr(err) + estimate, err := e.Ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: common.HexToAddress("0x0"), + To: &coordinatorAddr, + Data: txdata, + }) + helpers.PanicErr(err) + finalEstimate := uint64(*gasMultiplier * float64(estimate)) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: e.Owner.GasPrice, + Gas: finalEstimate, + To: &coordinatorAddr, + Data: txdata, + }) + signedTx, err := e.Owner.Signer(e.Owner.From, tx) + helpers.PanicErr(err) + err = e.Ec.SendTransaction(context.Background(), signedTx) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, signedTx, e.ChainID, fmt.Sprintf("manual fulfillment %d", i+1)) + } + case "topics": + randomWordsRequested := vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic() + randomWordsFulfilled := vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic() + fmt.Println("RandomWordsRequested:", randomWordsRequested.String(), + "RandomWordsFulfilled:", randomWordsFulfilled.String()) + case "request-report": + cmd := flag.NewFlagSet("request-report", flag.ExitOnError) + txHashes := cmd.String("tx-hashes", "", "comma separated transaction hashes") + requestIDs := cmd.String("request-ids", "", "comma separated request IDs in decimal") + bhsAddress := cmd.String("bhs-address", "", "BHS contract address") + coordinatorAddress := cmd.String("coordinator-address", "", "VRF coordinator address") + + helpers.ParseArgs(cmd, os.Args[2:], "tx-hashes", "bhs-address", "request-ids", "coordinator-address") + + hashes := helpers.ParseHashSlice(*txHashes) + reqIDs := helpers.ParseBigIntSlice(*requestIDs) + bhs, err := blockhash_store.NewBlockhashStore( + common.HexToAddress(*bhsAddress), + e.Ec) + helpers.PanicErr(err) + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2( + common.HexToAddress(*coordinatorAddress), + e.Ec) + helpers.PanicErr(err) + + if len(hashes) != len(reqIDs) { + panic(fmt.Errorf("len(hashes) [%d] != len(reqIDs) [%d]", len(hashes), len(reqIDs))) + } + + var bhsMissedBlocks []*big.Int + for i := range hashes { + receipt, err := e.Ec.TransactionReceipt(context.Background(), hashes[i]) + helpers.PanicErr(err) + + reqID := reqIDs[i] + commitment, err := coordinator.GetCommitment(nil, reqID) + helpers.PanicErr(err) + fulfilled := utils.IsEmpty(commitment[:]) + + _, err = bhs.GetBlockhash(nil, receipt.BlockNumber) + if err != nil { + fmt.Println("Blockhash for block", receipt.BlockNumber, "not stored (tx", hashes[i].String(), + ", request ID", reqID, ", fulfilled:", fulfilled, ")") + if !fulfilled { + // not fulfilled and bh not stored means the feeder missed a store + bhsMissedBlocks = append(bhsMissedBlocks, receipt.BlockNumber) + } + } else { + fmt.Println("Blockhash for block", receipt.BlockNumber, "stored (tx", hashes[i].String(), + ", request ID", reqID, ", fulfilled:", fulfilled, ")") + } + } + + if len(bhsMissedBlocks) == 0 { + fmt.Println("Didn't miss any bh stores!") + return + } + fmt.Println("Missed stores:") + for _, blockNumber := range bhsMissedBlocks { + fmt.Println("\t* ", blockNumber.String()) + } + case "keepers-vrf-consumer-deploy": + cmd := flag.NewFlagSet("keepers-vrf-consumer-deploy", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "vrf coordinator v2 address") + subID := cmd.Uint64("sub-id", 0, "subscription id") + keyHash := cmd.String("key-hash", "", "vrf v2 key hash") + requestConfs := cmd.Uint("request-confs", 3, "request confirmations") + upkeepIntervalSeconds := cmd.Int64("upkeep-interval-seconds", 600, "upkeep interval in seconds") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "sub-id", "key-hash") + _, tx, _, err := keepers_vrf_consumer.DeployKeepersVRFConsumer( + e.Owner, e.Ec, + common.HexToAddress(*coordinatorAddress), // vrf coordinator address + *subID, // subscription id + common.HexToHash(*keyHash), // key hash + uint16(*requestConfs), // request confirmations + big.NewInt(*upkeepIntervalSeconds), // upkeep interval seconds + ) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "batch-coordinatorv2-deploy": + cmd := flag.NewFlagSet("batch-coordinatorv2-deploy", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + _, tx, _, err := batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2(e.Owner, e.Ec, common.HexToAddress(*coordinatorAddr)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "batch-coordinatorv2-fulfill": + cmd := flag.NewFlagSet("batch-coordinatorv2-fulfill", flag.ExitOnError) + batchCoordinatorAddr := cmd.String("batch-coordinator-address", "", "address of the batch vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + submit := cmd.Bool("submit", false, "whether to submit the fulfillments or not") + estimateGas := cmd.Bool("estimate-gas", false, "whether to estimate gas or not") + + // NOTE: it is assumed all of these are of the same length and that + // elements correspond to each other index-wise. this property is not checked. + preSeeds := cmd.String("preseeds", "", "comma-separated request preSeeds") + blockHashes := cmd.String("blockhashes", "", "comma-separated request blockhashes") + blockNums := cmd.String("blocknums", "", "comma-separated request blocknumbers") + subIDs := cmd.String("subids", "", "comma-separated request subids") + cbGasLimits := cmd.String("cbgaslimits", "", "comma-separated request callback gas limits") + numWordses := cmd.String("numwordses", "", "comma-separated request num words") + senders := cmd.String("senders", "", "comma-separated request senders") + + helpers.ParseArgs(cmd, os.Args[2:], + "batch-coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseeds", "blockhashes", "blocknums", + "subids", "cbgaslimits", "numwordses", "senders", "submit", + ) + + preSeedSlice := helpers.ParseBigIntSlice(*preSeeds) + bhSlice := helpers.ParseHashSlice(*blockHashes) + blockNumSlice := helpers.ParseBigIntSlice(*blockNums) + subIDSlice := helpers.ParseBigIntSlice(*subIDs) + cbLimitsSlice := helpers.ParseBigIntSlice(*cbGasLimits) + numWordsSlice := helpers.ParseBigIntSlice(*numWordses) + senderSlice := helpers.ParseAddressSlice(*senders) + + batchCoordinator, err := batch_vrf_coordinator_v2.NewBatchVRFCoordinatorV2(common.HexToAddress(*batchCoordinatorAddr), e.Ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, pg.NewQConfig(false)) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + proofs := []batch_vrf_coordinator_v2.VRFTypesProof{} + reqCommits := []batch_vrf_coordinator_v2.VRFTypesRequestCommitment{} + for i := range preSeedSlice { + ps, err := proof.BigToSeed(preSeedSlice[i]) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2{ + PreSeed: ps, + BlockHash: bhSlice[i], + BlockNum: blockNumSlice[i].Uint64(), + SubId: subIDSlice[i].Uint64(), + CallbackGasLimit: uint32(cbLimitsSlice[i].Uint64()), + NumWords: uint32(numWordsSlice[i].Uint64()), + Sender: senderSlice[i], + } + fmt.Printf("preseed data iteration %d: %+v\n", i, preSeedData) + finalSeed := proof.FinalSeedV2(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2(p, preSeedData) + helpers.PanicErr(err) + + proofs = append(proofs, batch_vrf_coordinator_v2.VRFTypesProof(onChainProof)) + reqCommits = append(reqCommits, batch_vrf_coordinator_v2.VRFTypesRequestCommitment(rc)) + } + + fmt.Printf("proofs: %+v\n\n", proofs) + fmt.Printf("request commitments: %+v\n\n", reqCommits) + + if *submit { + fmt.Println("submitting fulfillments...") + tx, err := batchCoordinator.FulfillRandomWords(e.Owner, proofs, reqCommits) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("done") + } + + if *estimateGas { + fmt.Println("estimating gas") + payload, err := batchCoordinatorV2ABI.Pack("fulfillRandomWords", proofs, reqCommits) + helpers.PanicErr(err) + + a := batchCoordinator.Address() + gasEstimate, err := e.Ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: e.Owner.From, + To: &a, + Data: payload, + }) + helpers.PanicErr(err) + + fmt.Println("gas estimate:", gasEstimate) + } + case "coordinatorv2-fulfill": + cmd := flag.NewFlagSet("coordinatorv2-fulfill", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + + preSeed := cmd.String("preseed", "", "request preSeed") + blockHash := cmd.String("blockhash", "", "request blockhash") + blockNum := cmd.Uint64("blocknum", 0, "request blocknumber") + subID := cmd.Uint64("subid", 0, "request subid") + cbGasLimit := cmd.Uint("cbgaslimit", 0, "request callback gas limit") + numWords := cmd.Uint("numwords", 0, "request num words") + sender := cmd.String("sender", "", "request sender") + + helpers.ParseArgs(cmd, os.Args[2:], + "coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseed", "blockhash", "blocknum", + "subid", "cbgaslimit", "numwords", "sender", + ) + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddr), e.Ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, pg.NewQConfig(false)) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + ps, err := proof.BigToSeed(decimal.RequireFromString(*preSeed).BigInt()) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2{ + PreSeed: ps, + BlockHash: common.HexToHash(*blockHash), + BlockNum: *blockNum, + SubId: *subID, + CallbackGasLimit: uint32(*cbGasLimit), + NumWords: uint32(*numWords), + Sender: common.HexToAddress(*sender), + } + fmt.Printf("preseed data: %+v\n", preSeedData) + finalSeed := proof.FinalSeedV2(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2(p, preSeedData) + helpers.PanicErr(err) + + fmt.Printf("Proof: %+v, commitment: %+v\nSending fulfillment!", onChainProof, rc) + + tx, err := coordinator.FulfillRandomWords(e.Owner, onChainProof, rc) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("done") + case "batch-bhs-deploy": + cmd := flag.NewFlagSet("batch-bhs-deploy", flag.ExitOnError) + bhsAddr := cmd.String("bhs-address", "", "address of the blockhash store contract") + helpers.ParseArgs(cmd, os.Args[2:], "bhs-address") + v2scripts.DeployBatchBHS(e, common.HexToAddress(*bhsAddr)) + case "batch-bhs-store": + cmd := flag.NewFlagSet("batch-bhs-store", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + blockNumbersArg := cmd.String("block-numbers", "", "block numbers to store in a single transaction") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockNumbers := helpers.ParseBigIntSlice(*blockNumbersArg) + helpers.PanicErr(err) + tx, err := batchBHS.Store(e.Owner, blockNumbers) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "batch-bhs-get": + cmd := flag.NewFlagSet("batch-bhs-get", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + blockNumbersArg := cmd.String("block-numbers", "", "block numbers to store in a single transaction") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockNumbers := helpers.ParseBigIntSlice(*blockNumbersArg) + helpers.PanicErr(err) + blockhashes, err := batchBHS.GetBlockhashes(nil, blockNumbers) + helpers.PanicErr(err) + for i, bh := range blockhashes { + fmt.Println("blockhash(", blockNumbers[i], ") = ", common.Bytes2Hex(bh[:])) + } + case "batch-bhs-storeVerify": + cmd := flag.NewFlagSet("batch-bhs-storeVerify", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + startBlock := cmd.Int64("start-block", -1, "block number to start from. Must be in the BHS already.") + numBlocks := cmd.Int64("num-blocks", -1, "number of blockhashes to store. will be stored in a single tx, can't be > 150") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "start-block", "num-blocks") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockRange, err := blockhashstore.DecreasingBlockRange(big.NewInt(*startBlock-1), big.NewInt(*startBlock-*numBlocks-1)) + helpers.PanicErr(err) + rlpHeaders, _, err := helpers.GetRlpHeaders(e, blockRange, true) + helpers.PanicErr(err) + tx, err := batchBHS.StoreVerifyHeader(e.Owner, blockRange, rlpHeaders) + helpers.PanicErr(err) + fmt.Println("storeVerifyHeader(", blockRange, ", ...) tx:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("blockRange: %d", blockRange)) + case "batch-bhs-backwards": + cmd := flag.NewFlagSet("batch-bhs-backwards", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + bhsAddr := cmd.String("bhs-address", "", "address of the bhs contract") + startBlock := cmd.Int64("start-block", -1, "block number to start from. Must be in the BHS already.") + endBlock := cmd.Int64("end-block", -1, "block number to end at. Must be less than startBlock") + batchSize := cmd.Int64("batch-size", -1, "batch size") + gasMultiplier := cmd.Int64("gas-price-multiplier", 1, "gas price multiplier to use, defaults to 1 (no multiplication)") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "bhs-address", "end-block", "batch-size") + + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + + bhs, err := blockhash_store.NewBlockhashStore(common.HexToAddress(*bhsAddr), e.Ec) + helpers.PanicErr(err) + + // Sanity check BHS address in the Batch BHS. + bhsAddressBatchBHS, err := batchBHS.BHS(nil) + helpers.PanicErr(err) + + if bhsAddressBatchBHS != common.HexToAddress(*bhsAddr) { + log.Panicf("Mismatch in bhs addresses: batch bhs has %s while given %s", bhsAddressBatchBHS.String(), *bhsAddr) + } + + if *startBlock == -1 { + closestBlock, err2 := v2scripts.ClosestBlock(e, common.HexToAddress(*batchAddr), uint64(*endBlock), uint64(*batchSize)) + // found a block with blockhash stored that's more recent that end block + if err2 == nil { + *startBlock = int64(closestBlock) + } else { + fmt.Println("encountered error while looking for closest block:", err2) + tx, err2 := bhs.StoreEarliest(e.Owner) + helpers.PanicErr(err2) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "Store Earliest") + // storeEarliest will store receipt block number minus 256 which is the earliest block + // the blockhash() instruction will work on. + *startBlock = receipt.BlockNumber.Int64() - 256 + } + } + + // Check if the provided start block is in the BHS. If it's not, print out an appropriate + // helpful error message. Otherwise users would get the cryptic "header has unknown blockhash" + // error which is a bit more difficult to diagnose. + // The Batch BHS returns a zero'd [32]byte array in the event the provided block number doesn't + // have it's blockhash in the BHS. + var notFound [32]byte + hsh, err := batchBHS.GetBlockhashes(nil, []*big.Int{big.NewInt(*startBlock)}) + helpers.PanicErr(err) + + if len(hsh) != 1 { + helpers.PanicErr(fmt.Errorf("expected 1 item in returned array from BHS store, got: %d", len(hsh))) + } + + if bytes.Equal(hsh[0][:], notFound[:]) { + helpers.PanicErr(fmt.Errorf("expected block number %d (start-block argument) to be in the BHS already, did not find it there", *startBlock)) + } + + blockRange, err := blockhashstore.DecreasingBlockRange(big.NewInt(*startBlock-1), big.NewInt(*endBlock)) + helpers.PanicErr(err) + + for i := 0; i < len(blockRange); i += int(*batchSize) { + j := i + int(*batchSize) + if j > len(blockRange) { + j = len(blockRange) + } + + // Get suggested gas price and multiply by multiplier on every iteration + // so we don't have our transaction getting stuck. Need to be as fast as + // possible. + gp, err := e.Ec.SuggestGasPrice(context.Background()) + helpers.PanicErr(err) + e.Owner.GasPrice = new(big.Int).Mul(gp, big.NewInt(*gasMultiplier)) + + fmt.Println("using gas price", e.Owner.GasPrice, "wei") + + blockNumbers := blockRange[i:j] + blockHeaders, _, err := helpers.GetRlpHeaders(e, blockNumbers, true) + fmt.Println("storing blockNumbers:", blockNumbers) + helpers.PanicErr(err) + + tx, err := batchBHS.StoreVerifyHeader(e.Owner, blockNumbers, blockHeaders) + helpers.PanicErr(err) + + fmt.Println("sent tx:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + + fmt.Println("waiting for it to mine...") + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + + fmt.Println("received receipt, continuing") + fmt.Println("there are", len(blockRange)-j, "blocks left to store") + } + fmt.Println("done") + case "latest-head": + h, err := e.Ec.HeaderByNumber(context.Background(), nil) + helpers.PanicErr(err) + fmt.Println("latest head number:", h.Number.String()) + case "bhs-deploy": + v2scripts.DeployBHS(e) + case "coordinator-deploy": + coordinatorDeployCmd := flag.NewFlagSet("coordinator-deploy", flag.ExitOnError) + coordinatorDeployLinkAddress := coordinatorDeployCmd.String("link-address", "", "address of link token") + coordinatorDeployBHSAddress := coordinatorDeployCmd.String("bhs-address", "", "address of bhs") + coordinatorDeployLinkEthFeedAddress := coordinatorDeployCmd.String("link-eth-feed", "", "address of link-eth-feed") + helpers.ParseArgs(coordinatorDeployCmd, os.Args[2:], "link-address", "bhs-address", "link-eth-feed") + v2scripts.DeployCoordinator(e, *coordinatorDeployLinkAddress, *coordinatorDeployBHSAddress, *coordinatorDeployLinkEthFeedAddress) + case "coordinator-get-config": + cmd := flag.NewFlagSet("coordinator-get-config", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + v2scripts.PrintCoordinatorConfig(coordinator) + case "coordinator-set-config": + cmd := flag.NewFlagSet("coordinator-set-config", flag.ExitOnError) + setConfigAddress := cmd.String("coordinator-address", "", "coordinator address") + minConfs := cmd.Int("min-confs", 3, "min confs") + maxGasLimit := cmd.Int64("max-gas-limit", 2.5e6, "max gas limit") + stalenessSeconds := cmd.Int64("staleness-seconds", 86400, "staleness in seconds") + gasAfterPayment := cmd.Int64("gas-after-payment", 33285, "gas after payment calculation") + fallbackWeiPerUnitLink := cmd.String("fallback-wei-per-unit-link", "", "fallback wei per unit link") + flatFeeTier1 := cmd.Int64("flat-fee-tier-1", 500, "flat fee tier 1") + flatFeeTier2 := cmd.Int64("flat-fee-tier-2", 500, "flat fee tier 2") + flatFeeTier3 := cmd.Int64("flat-fee-tier-3", 500, "flat fee tier 3") + flatFeeTier4 := cmd.Int64("flat-fee-tier-4", 500, "flat fee tier 4") + flatFeeTier5 := cmd.Int64("flat-fee-tier-5", 500, "flat fee tier 5") + reqsForTier2 := cmd.Int64("reqs-for-tier-2", 0, "requests for tier 2") + reqsForTier3 := cmd.Int64("reqs-for-tier-3", 0, "requests for tier 3") + reqsForTier4 := cmd.Int64("reqs-for-tier-4", 0, "requests for tier 4") + reqsForTier5 := cmd.Int64("reqs-for-tier-5", 0, "requests for tier 5") + + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "fallback-wei-per-unit-link") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*setConfigAddress), e.Ec) + helpers.PanicErr(err) + + v2scripts.SetCoordinatorConfig( + e, + *coordinator, + uint16(*minConfs), + uint32(*maxGasLimit), + uint32(*stalenessSeconds), + uint32(*gasAfterPayment), + decimal.RequireFromString(*fallbackWeiPerUnitLink).BigInt(), + vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: uint32(*flatFeeTier1), + FulfillmentFlatFeeLinkPPMTier2: uint32(*flatFeeTier2), + FulfillmentFlatFeeLinkPPMTier3: uint32(*flatFeeTier3), + FulfillmentFlatFeeLinkPPMTier4: uint32(*flatFeeTier4), + FulfillmentFlatFeeLinkPPMTier5: uint32(*flatFeeTier5), + ReqsForTier2: big.NewInt(*reqsForTier2), + ReqsForTier3: big.NewInt(*reqsForTier3), + ReqsForTier4: big.NewInt(*reqsForTier4), + ReqsForTier5: big.NewInt(*reqsForTier5), + }, + ) + case "coordinator-register-key": + coordinatorRegisterKey := flag.NewFlagSet("coordinator-register-key", flag.ExitOnError) + registerKeyAddress := coordinatorRegisterKey.String("address", "", "coordinator address") + registerKeyUncompressedPubKey := coordinatorRegisterKey.String("pubkey", "", "uncompressed pubkey") + registerKeyOracleAddress := coordinatorRegisterKey.String("oracle-address", "", "oracle address") + helpers.ParseArgs(coordinatorRegisterKey, os.Args[2:], "address", "pubkey", "oracle-address") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*registerKeyAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*registerKeyUncompressedPubKey, "0x") { + *registerKeyUncompressedPubKey = strings.Replace(*registerKeyUncompressedPubKey, "0x", "04", 1) + } + + v2scripts.RegisterCoordinatorProvingKey(e, *coordinator, *registerKeyUncompressedPubKey, *registerKeyOracleAddress) + case "coordinator-deregister-key": + coordinatorDeregisterKey := flag.NewFlagSet("coordinator-deregister-key", flag.ExitOnError) + deregisterKeyAddress := coordinatorDeregisterKey.String("address", "", "coordinator address") + deregisterKeyUncompressedPubKey := coordinatorDeregisterKey.String("pubkey", "", "uncompressed pubkey") + helpers.ParseArgs(coordinatorDeregisterKey, os.Args[2:], "address", "pubkey") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*deregisterKeyAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*deregisterKeyUncompressedPubKey, "0x") { + *deregisterKeyUncompressedPubKey = strings.Replace(*deregisterKeyUncompressedPubKey, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(*deregisterKeyUncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + tx, err := coordinator.DeregisterProvingKey(e.Owner, [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "coordinator-subscription": + coordinatorSub := flag.NewFlagSet("coordinator-subscription", flag.ExitOnError) + address := coordinatorSub.String("address", "", "coordinator address") + subID := coordinatorSub.Int64("sub-id", 0, "sub-id") + helpers.ParseArgs(coordinatorSub, os.Args[2:], "address", "sub-id") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*address), e.Ec) + helpers.PanicErr(err) + fmt.Println("sub-id", *subID, "address", *address, coordinator.Address()) + s, err := coordinator.GetSubscription(nil, uint64(*subID)) + helpers.PanicErr(err) + fmt.Printf("Subscription %+v\n", s) + case "consumer-deploy": + consumerDeployCmd := flag.NewFlagSet("consumer-deploy", flag.ExitOnError) + consumerCoordinator := consumerDeployCmd.String("coordinator-address", "", "coordinator address") + keyHash := consumerDeployCmd.String("key-hash", "", "key hash") + consumerLinkAddress := consumerDeployCmd.String("link-address", "", "link-address") + // TODO: add other params + helpers.ParseArgs(consumerDeployCmd, os.Args[2:], "coordinator-address", "key-hash", "link-address") + keyHashBytes := common.HexToHash(*keyHash) + _, tx, _, err := vrf_single_consumer_example.DeployVRFSingleConsumerExample( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + common.HexToAddress(*consumerLinkAddress), + uint32(1000000), // gas callback + uint16(5), // confs + uint32(1), // words + keyHashBytes) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-subscribe": + consumerSubscribeCmd := flag.NewFlagSet("consumer-subscribe", flag.ExitOnError) + consumerSubscribeAddress := consumerSubscribeCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerSubscribeCmd, os.Args[2:], "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*consumerSubscribeAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.Subscribe(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "link-balance": + linkBalanceCmd := flag.NewFlagSet("link-balance", flag.ExitOnError) + linkAddress := linkBalanceCmd.String("link-address", "", "link-address") + address := linkBalanceCmd.String("address", "", "address") + helpers.ParseArgs(linkBalanceCmd, os.Args[2:], "link-address", "address") + lt, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + b, err := lt.BalanceOf(nil, common.HexToAddress(*address)) + helpers.PanicErr(err) + fmt.Println(b) + case "consumer-cancel": + consumerCancelCmd := flag.NewFlagSet("consumer-cancel", flag.ExitOnError) + consumerCancelAddress := consumerCancelCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerCancelCmd, os.Args[2:], "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*consumerCancelAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.Unsubscribe(e.Owner, e.Owner.From) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-topup": + // NOTE NEED TO FUND CONSUMER WITH PLI FIRST + consumerTopupCmd := flag.NewFlagSet("consumer-topup", flag.ExitOnError) + consumerTopupAmount := consumerTopupCmd.String("amount", "", "amount in juels") + consumerTopupAddress := consumerTopupCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerTopupCmd, os.Args[2:], "amount", "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*consumerTopupAddress), e.Ec) + helpers.PanicErr(err) + amount, s := big.NewInt(0).SetString(*consumerTopupAmount, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *consumerTopupAmount)) + } + tx, err := consumer.TopUpSubscription(e.Owner, amount) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-request": + consumerRequestCmd := flag.NewFlagSet("consumer-request", flag.ExitOnError) + consumerRequestAddress := consumerRequestCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerRequestCmd, os.Args[2:], "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*consumerRequestAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.RequestRandomWords(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-fund-and-request": + consumerRequestCmd := flag.NewFlagSet("consumer-request", flag.ExitOnError) + consumerRequestAddress := consumerRequestCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerRequestCmd, os.Args[2:], "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*consumerRequestAddress), e.Ec) + helpers.PanicErr(err) + // Fund and request 3 pli + tx, err := consumer.FundAndRequestRandomWords(e.Owner, big.NewInt(3000000000000000000)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-print": + consumerPrint := flag.NewFlagSet("consumer-print", flag.ExitOnError) + address := consumerPrint.String("address", "", "consumer address") + helpers.ParseArgs(consumerPrint, os.Args[2:], "address") + consumer, err := vrf_single_consumer_example.NewVRFSingleConsumerExample(common.HexToAddress(*address), e.Ec) + helpers.PanicErr(err) + rc, err := consumer.SRequestConfig(nil) + helpers.PanicErr(err) + rw, err := consumer.SRandomWords(nil, big.NewInt(0)) + if err != nil { + fmt.Println("no words") + } + rid, err := consumer.SRequestId(nil) + helpers.PanicErr(err) + fmt.Printf("Request config %+v Rw %+v Rid %+v\n", rc, rw, rid) + case "deploy-universe": + v2scripts.DeployUniverseViaCLI(e) + case "eoa-consumer-deploy": + consumerDeployCmd := flag.NewFlagSet("eoa-consumer-deploy", flag.ExitOnError) + consumerCoordinator := consumerDeployCmd.String("coordinator-address", "", "coordinator address") + consumerLinkAddress := consumerDeployCmd.String("link-address", "", "link-address") + helpers.ParseArgs(consumerDeployCmd, os.Args[2:], "coordinator-address", "link-address") + + v2scripts.EoaDeployConsumer(e, *consumerCoordinator, *consumerLinkAddress) + case "eoa-load-test-consumer-deploy": + loadTestConsumerDeployCmd := flag.NewFlagSet("eoa-load-test-consumer-deploy", flag.ExitOnError) + consumerCoordinator := loadTestConsumerDeployCmd.String("coordinator-address", "", "coordinator address") + consumerLinkAddress := loadTestConsumerDeployCmd.String("link-address", "", "link-address") + helpers.ParseArgs(loadTestConsumerDeployCmd, os.Args[2:], "coordinator-address", "link-address") + _, tx, _, err := vrf_load_test_external_sub_owner.DeployVRFLoadTestExternalSubOwner( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + common.HexToAddress(*consumerLinkAddress)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-load-test-consumer-with-metrics-deploy": + loadTestConsumerDeployCmd := flag.NewFlagSet("eoa-load-test-consumer-with-metrics-deploy", flag.ExitOnError) + consumerCoordinator := loadTestConsumerDeployCmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(loadTestConsumerDeployCmd, os.Args[2:], "coordinator-address") + v2scripts.EoaLoadTestConsumerWithMetricsDeploy(e, *consumerCoordinator) + case "eoa-vrf-owner-test-consumer-deploy": + loadTestConsumerDeployCmd := flag.NewFlagSet("eoa-vrf-owner-test-consumer-deploy", flag.ExitOnError) + consumerCoordinator := loadTestConsumerDeployCmd.String("coordinator-address", "", "coordinator address") + consumerLinkAddress := loadTestConsumerDeployCmd.String("link-address", "", "link-address") + + helpers.ParseArgs(loadTestConsumerDeployCmd, os.Args[2:], "coordinator-address", "link-address") + + _, tx, _, err := vrf_owner_test_consumer.DeployVRFV2OwnerTestConsumer( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + common.HexToAddress(*consumerLinkAddress), + ) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + + case "eoa-create-sub": + createSubCmd := flag.NewFlagSet("eoa-create-sub", flag.ExitOnError) + coordinatorAddress := createSubCmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(createSubCmd, os.Args[2:], "coordinator-address") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + v2scripts.EoaCreateSub(e, *coordinator) + case "eoa-add-sub-consumer": + addSubConsCmd := flag.NewFlagSet("eoa-add-sub-consumer", flag.ExitOnError) + coordinatorAddress := addSubConsCmd.String("coordinator-address", "", "coordinator address") + subID := addSubConsCmd.Uint64("sub-id", 0, "sub-id") + consumerAddress := addSubConsCmd.String("consumer-address", "", "consumer address") + helpers.ParseArgs(addSubConsCmd, os.Args[2:], "coordinator-address", "sub-id", "consumer-address") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + v2scripts.EoaAddConsumerToSub(e, *coordinator, *subID, *consumerAddress) + case "eoa-create-fund-authorize-sub": + // Lets just treat the owner key as the EOA controlling the sub + cfaSubCmd := flag.NewFlagSet("eoa-create-fund-authorize-sub", flag.ExitOnError) + coordinatorAddress := cfaSubCmd.String("coordinator-address", "", "coordinator address") + amountStr := cfaSubCmd.String("amount", "", "amount to fund in juels") + consumerAddress := cfaSubCmd.String("consumer-address", "", "consumer address") + consumerLinkAddress := cfaSubCmd.String("link-address", "", "link-address") + helpers.ParseArgs(cfaSubCmd, os.Args[2:], "coordinator-address", "amount", "consumer-address", "link-address") + amount, s := big.NewInt(0).SetString(*amountStr, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *amountStr)) + } + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + fmt.Println(amount, consumerLinkAddress) + txcreate, err := coordinator.CreateSubscription(e.Owner) + helpers.PanicErr(err) + fmt.Println("Create sub", "TX", helpers.ExplorerLink(e.ChainID, txcreate.Hash())) + helpers.ConfirmTXMined(context.Background(), e.Ec, txcreate, e.ChainID) + sub := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated) + subscription, err := coordinator.WatchSubscriptionCreated(nil, sub, nil) + helpers.PanicErr(err) + defer subscription.Unsubscribe() + created := <-sub + linkToken, err := link_token_interface.NewLinkToken(common.HexToAddress(*consumerLinkAddress), e.Ec) + helpers.PanicErr(err) + bal, err := linkToken.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("OWNER BALANCE", bal, e.Owner.From.String(), amount.String()) + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, created.SubId) + helpers.PanicErr(err) + e.Owner.GasLimit = 500000 + tx, err := linkToken.TransferAndCall(e.Owner, coordinator.Address(), amount, b) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("Sub id: %d", created.SubId)) + subFunded := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded) + fundSub, err := coordinator.WatchSubscriptionFunded(nil, subFunded, []uint64{created.SubId}) + helpers.PanicErr(err) + defer fundSub.Unsubscribe() + <-subFunded // Add a consumer once its funded + txadd, err := coordinator.AddConsumer(e.Owner, created.SubId, common.HexToAddress(*consumerAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, txadd, e.ChainID) + case "eoa-request": + request := flag.NewFlagSet("eoa-request", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.Uint64("sub-id", 0, "subscription ID") + cbGasLimit := request.Uint("cb-gas-limit", 1_000_000, "callback gas limit") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + numWords := request.Uint("num-words", 3, "number of words to request") + keyHash := request.String("key-hash", "", "key hash") + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_external_sub_owner_example.NewVRFExternalSubOwnerExample( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + tx, err := consumer.RequestRandomWords(e.Owner, *subID, uint32(*cbGasLimit), uint16(*requestConfirmations), uint32(*numWords), keyHashBytes) + helpers.PanicErr(err) + fmt.Println("TX", helpers.ExplorerLink(e.ChainID, tx.Hash())) + r, err := bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("Receipt blocknumber:", r.BlockNumber) + case "eoa-load-test-read": + cmd := flag.NewFlagSet("eoa-load-test-read", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "consumer address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrf_load_test_external_sub_owner.NewVRFLoadTestExternalSubOwner( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + rc, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + fmt.Println("load tester", *consumerAddress, "response count:", rc) + case "eoa-load-test-request": + request := flag.NewFlagSet("eoa-load-test-request", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.Uint64("sub-id", 0, "subscription ID") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + keyHash := request.String("key-hash", "", "key hash") + requests := request.Uint("requests", 10, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_load_test_external_sub_owner.NewVRFLoadTestExternalSubOwner( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + var txes []*types.Transaction + for i := 0; i < int(*runs); i++ { + tx, err := consumer.RequestRandomWords(e.Owner, *subID, uint16(*requestConfirmations), + keyHashBytes, uint16(*requests)) + helpers.PanicErr(err) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(e.ChainID, tx.Hash())) + txes = append(txes, tx) + } + fmt.Println("Total number of requests sent:", (*requests)*(*runs)) + fmt.Println("fetching receipts for all transactions") + for i, tx := range txes { + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("load test %d", i+1)) + } + case "eoa-vrf-owner-test-request": + request := flag.NewFlagSet("eoa-eoa-vrf-owner-test-request", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + keyHash := request.String("key-hash", "", "key hash") + cbGasLimit := request.Uint("cb-gas-limit", 100_000, "request callback gas limit") + numWords := request.Uint("num-words", 1, "num words to request") + requests := request.Uint("requests", 1, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") + subFundingAmountJuels := request.String("sub-funding-amount-juels", "0", "amount of Juels to fund subscription with") + vrfOwnerAddress := request.String("vrf-owner-address", "", "vrf owner address") + linkAddress := request.String("link-address", "", "link-address") + + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + + linkTransferTX, err := link.Transfer(e.Owner, common.HexToAddress(*consumerAddress), decimal.RequireFromString(*subFundingAmountJuels).BigInt()) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, linkTransferTX, e.ChainID, "transfer", *subFundingAmountJuels, "juels to", *consumerAddress) + + consumerBalanceJuels, err := link.BalanceOf(nil, common.HexToAddress(*consumerAddress)) + helpers.PanicErr(err) + fmt.Println("Consumer Balance:", consumerBalanceJuels.String(), "juels") + + consumer, err := vrf_owner_test_consumer.NewVRFV2OwnerTestConsumer( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + var txes []*types.Transaction + for i := 0; i < int(*runs); i++ { + requestRandTX, errRequestRandomWords := consumer.RequestRandomWords( + e.Owner, + uint16(*requestConfirmations), + keyHashBytes, + uint32(*cbGasLimit), + uint32(*numWords), + uint16(*requests), + decimal.RequireFromString(*subFundingAmountJuels).BigInt(), + ) + helpers.PanicErr(errRequestRandomWords) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(e.ChainID, requestRandTX.Hash())) + txes = append(txes, requestRandTX) + } + + coordinatorAddress, err := consumer.COORDINATOR(nil) + helpers.PanicErr(err) + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(coordinatorAddress, e.Ec) + helpers.PanicErr(err) + + coordinatorOwnerAddress, err := coordinator.Owner(nil) + helpers.PanicErr(err) + + fmt.Println("Actual Coordinator Owner Address:", coordinatorOwnerAddress.String()) + fmt.Println("Provided VRF Owner Address:", *vrfOwnerAddress) + if coordinatorOwnerAddress.String() != *vrfOwnerAddress { + panic("Actual Coordinator Owner and provided Coordinator Owner Addresses does not match") + } + + var subId uint64 + var wg sync.WaitGroup + + wg.Add(1) + + go func() { + subCreatedChan := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated) + subCreatedSubscription, errw := coordinator.WatchSubscriptionCreated(nil, subCreatedChan, nil) + helpers.PanicErr(errw) + defer subCreatedSubscription.Unsubscribe() + subscriptionCreatedEvent := <-subCreatedChan + subId = subscriptionCreatedEvent.SubId + fmt.Println("VRF Owner Test Consumer's Sub ID:", subId) + defer wg.Done() + }() + + wg.Wait() + + totalNumberOfRequests := (*requests) * (*runs) + fmt.Println("Total number of requests sent:", totalNumberOfRequests) + fmt.Println("fetching receipts for all transactions") + + var receipt *types.Receipt + for i, tx := range txes { + receipt = helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("load test %d", i+1)) + } + blockNumber := receipt.BlockNumber.Uint64() + fmt.Println("subId", subId) + subFundedIterator, err := coordinator.FilterSubscriptionFunded(&bind.FilterOpts{End: &blockNumber}, []uint64{subId}) + helpers.PanicErr(err) + + if !subFundedIterator.Next() { + panic("Sub Funded Event not found") + } + + fmt.Println("Sub Funded, sub ID:", subFundedIterator.Event.SubId, ", Old Balance:", subFundedIterator.Event.OldBalance, ", New Balance:", subFundedIterator.Event.NewBalance) + case "eoa-vrf-owner-test-read-metrics": + request := flag.NewFlagSet("eoa-load-test-read-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + metrics := getVRFOwnerTestConsumerMetrics(*consumerAddress, e) + printLoadTestMetrics(metrics) + + case "eoa-vrf-owner-test-reset-metrics": + request := flag.NewFlagSet("eoa-vrf-owner-test-reset-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + consumer, err := vrf_owner_test_consumer.NewVRFV2OwnerTestConsumer( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + _, err = consumer.Reset(e.Owner) + helpers.PanicErr(err) + fmt.Println("Load Test Consumer With Metrics was reset ") + case "eoa-load-test-request-with-metrics": + request := flag.NewFlagSet("eoa-load-test-request-with-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.Uint64("sub-id", 0, "subscription ID") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + keyHash := request.String("key-hash", "", "key hash") + cbGasLimit := request.Uint("cb-gas-limit", 100_000, "request callback gas limit") + numWords := request.Uint("num-words", 1, "num words to request") + requests := request.Uint("requests", 10, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_load_test_with_metrics.NewVRFV2LoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + var txes []*types.Transaction + for i := 0; i < int(*runs); i++ { + tx, err := consumer.RequestRandomWords( + e.Owner, + *subID, + uint16(*requestConfirmations), + keyHashBytes, + uint32(*cbGasLimit), + uint32(*numWords), + uint16(*requests), + ) + helpers.PanicErr(err) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(e.ChainID, tx.Hash())) + txes = append(txes, tx) + } + fmt.Println("Total number of requests sent:", (*requests)*(*runs)) + fmt.Println("fetching receipts for all transactions") + for i, tx := range txes { + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("load test %d", i+1)) + } + case "eoa-load-test-read-metrics": + request := flag.NewFlagSet("eoa-load-test-read-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + consumer, err := vrf_load_test_with_metrics.NewVRFV2LoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + responseCount, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + fmt.Println("Response Count: ", responseCount) + requestCount, err := consumer.SRequestCount(nil) + helpers.PanicErr(err) + fmt.Println("Request Count: ", requestCount) + averageFulfillmentInMillions, err := consumer.SAverageFulfillmentInMillions(nil) + helpers.PanicErr(err) + fmt.Println("Average Fulfillment In Millions: ", averageFulfillmentInMillions) + slowestFulfillment, err := consumer.SSlowestFulfillment(nil) + helpers.PanicErr(err) + fmt.Println("Slowest Fulfillment: ", slowestFulfillment) + fastestFulfillment, err := consumer.SFastestFulfillment(nil) + helpers.PanicErr(err) + fmt.Println("Fastest Fulfillment: ", fastestFulfillment) + case "eoa-load-test-reset-metrics": + request := flag.NewFlagSet("eoa-load-test-reset-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + consumer, err := vrf_load_test_with_metrics.NewVRFV2LoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + _, err = consumer.Reset(e.Owner) + helpers.PanicErr(err) + fmt.Println("Load Test Consumer With Metrics was reset ") + case "eoa-transfer-sub": + trans := flag.NewFlagSet("eoa-transfer-sub", flag.ExitOnError) + coordinatorAddress := trans.String("coordinator-address", "", "coordinator address") + subID := trans.Int64("sub-id", 0, "sub-id") + to := trans.String("to", "", "to") + helpers.ParseArgs(trans, os.Args[2:], "coordinator-address", "sub-id", "to") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.RequestSubscriptionOwnerTransfer(e.Owner, uint64(*subID), common.HexToAddress(*to)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-accept-sub": + accept := flag.NewFlagSet("eoa-accept-sub", flag.ExitOnError) + coordinatorAddress := accept.String("coordinator-address", "", "coordinator address") + subID := accept.Int64("sub-id", 0, "sub-id") + helpers.ParseArgs(accept, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.AcceptSubscriptionOwnerTransfer(e.Owner, uint64(*subID)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-cancel-sub": + cancel := flag.NewFlagSet("eoa-cancel-sub", flag.ExitOnError) + coordinatorAddress := cancel.String("coordinator-address", "", "coordinator address") + subID := cancel.Int64("sub-id", 0, "sub-id") + helpers.ParseArgs(cancel, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.CancelSubscription(e.Owner, uint64(*subID), e.Owner.From) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-fund-sub": + fund := flag.NewFlagSet("eoa-fund-sub", flag.ExitOnError) + coordinatorAddress := fund.String("coordinator-address", "", "coordinator address") + amountStr := fund.String("amount", "", "amount to fund in juels") + subID := fund.Int64("sub-id", 0, "sub-id") + consumerLinkAddress := fund.String("link-address", "", "link-address") + helpers.ParseArgs(fund, os.Args[2:], "coordinator-address", "amount", "sub-id", "link-address") + amount, s := big.NewInt(0).SetString(*amountStr, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *amountStr)) + } + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + v2scripts.EoaFundSubscription(e, *coordinator, *consumerLinkAddress, amount, uint64(*subID)) + case "eoa-read": + cmd := flag.NewFlagSet("eoa-read", flag.ExitOnError) + consumerAddress := cmd.String("consumer", "", "consumer address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer") + consumer, err := vrf_external_sub_owner_example.NewVRFExternalSubOwnerExample(common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + word, err := consumer.SRandomWords(nil, big.NewInt(0)) + if err != nil { + fmt.Println("no words (yet?)") + } + reqID, err := consumer.SRequestId(nil) + helpers.PanicErr(err) + fmt.Println("request id:", reqID.String(), "1st random word:", word) + case "owner-cancel-sub": + cancel := flag.NewFlagSet("owner-cancel-sub", flag.ExitOnError) + coordinatorAddress := cancel.String("coordinator-address", "", "coordinator address") + subID := cancel.Int64("sub-id", 0, "sub-id") + helpers.ParseArgs(cancel, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.OwnerCancelSubscription(e.Owner, uint64(*subID)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "sub-balance": + consumerBalanceCmd := flag.NewFlagSet("sub-balance", flag.ExitOnError) + coordinatorAddress := consumerBalanceCmd.String("coordinator-address", "", "coordinator address") + subID := consumerBalanceCmd.Uint64("sub-id", 0, "subscription id") + helpers.ParseArgs(consumerBalanceCmd, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + resp, err := coordinator.GetSubscription(nil, *subID) + helpers.PanicErr(err) + fmt.Println("sub id", *subID, "balance:", resp.Balance) + case "coordinator-withdrawable-tokens": + withdrawableTokensCmd := flag.NewFlagSet("coordinator-withdrawable-tokens", flag.ExitOnError) + coordinator := withdrawableTokensCmd.String("coordinator-address", "", "coordinator address") + oracle := withdrawableTokensCmd.String("oracle-address", "", "oracle address") + start := withdrawableTokensCmd.Int("start-link", 10_000, "the starting amount of PLI to check") + helpers.ParseArgs(withdrawableTokensCmd, os.Args[2:], "coordinator-address", "oracle-address") + + coordinatorAddress := common.HexToAddress(*coordinator) + oracleAddress := common.HexToAddress(*oracle) + abi, err := vrf_coordinator_v2.VRFCoordinatorV2MetaData.GetAbi() + helpers.PanicErr(err) + + isWithdrawable := func(amount *big.Int) bool { + data, err := abi.Pack("oracleWithdraw", oracleAddress /* this can be any address */, amount) + helpers.PanicErr(err) + + _, err = e.Ec.CallContract(context.Background(), ethereum.CallMsg{ + From: oracleAddress, + To: &coordinatorAddress, + Data: data, + }, nil) + if err == nil { + return true + } else if strings.Contains(err.Error(), "execution reverted") { + return false + } + panic(err) + } + + result := helpers.BinarySearch(assets.Ether(int64(*start*2)).ToInt(), big.NewInt(0), isWithdrawable) + + fmt.Printf("Withdrawable amount for oracle %s is %s\n", oracleAddress.String(), result.String()) + case "coordinator-transfer-ownership": + cmd := flag.NewFlagSet("coordinator-transfer-ownership", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "v2 coordinator address") + newOwner := cmd.String("new-owner", "", "new owner address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "new-owner") + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := coordinator.TransferOwnership(e.Owner, common.HexToAddress(*newOwner)) + helpers.PanicErr(err) + + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "transfer ownership to", *newOwner) + case "vrf-owner-deploy": + cmd := flag.NewFlagSet("vrf-owner-deploy", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "v2 coordinator address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + + _, tx, _, err := vrf_owner.DeployVRFOwner(e.Owner, e.Ec, common.HexToAddress(*coordinatorAddress)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "vrf-owner-set-authorized-senders": + cmd := flag.NewFlagSet("vrf-owner-set-authorized-senders", flag.ExitOnError) + vrfOwnerAddress := cmd.String("vrf-owner-address", "", "vrf owner address") + authorizedSenders := cmd.String("authorized-senders", "", "comma separated list of authorized senders") + helpers.ParseArgs(cmd, os.Args[2:], "vrf-owner-address", "authorized-senders") + + vrfOwner, err := vrf_owner.NewVRFOwner(common.HexToAddress(*vrfOwnerAddress), e.Ec) + helpers.PanicErr(err) + + authorizedSendersSlice := helpers.ParseAddressSlice(*authorizedSenders) + + tx, err := vrfOwner.SetAuthorizedSenders(e.Owner, authorizedSendersSlice) + helpers.PanicErr(err) + + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "vrf owner set authorized senders") + case "vrf-owner-accept-vrf-ownership": + cmd := flag.NewFlagSet("vrf-owner-accept-vrf-ownership", flag.ExitOnError) + vrfOwnerAddress := cmd.String("vrf-owner-address", "", "vrf owner address") + helpers.ParseArgs(cmd, os.Args[2:], "vrf-owner-address") + + vrfOwner, err := vrf_owner.NewVRFOwner(common.HexToAddress(*vrfOwnerAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := vrfOwner.AcceptVRFOwnership(e.Owner) + helpers.PanicErr(err) + + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "vrf owner accepting vrf ownership") + case "coordinator-reregister-proving-key": + coordinatorReregisterKey := flag.NewFlagSet("coordinator-register-key", flag.ExitOnError) + coordinatorAddress := coordinatorReregisterKey.String("coordinator-address", "", "coordinator address") + uncompressedPubKey := coordinatorReregisterKey.String("pubkey", "", "uncompressed pubkey") + newOracleAddress := coordinatorReregisterKey.String("new-oracle-address", "", "oracle address") + skipDeregister := coordinatorReregisterKey.Bool("skip-deregister", false, "if true, key will not be deregistered") + helpers.ParseArgs(coordinatorReregisterKey, os.Args[2:], "coordinator-address", "pubkey", "new-oracle-address") + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*uncompressedPubKey, "0x") { + *uncompressedPubKey = strings.Replace(*uncompressedPubKey, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(*uncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + + var deregisterTx *types.Transaction + if !*skipDeregister { + deregisterTx, err = coordinator.DeregisterProvingKey(e.Owner, [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + fmt.Println("Deregister transaction", helpers.ExplorerLink(e.ChainID, deregisterTx.Hash())) + } + + // Use a higher gas price for the register call + e.Owner.GasPrice.Mul(e.Owner.GasPrice, big.NewInt(2)) + registerTx, err := coordinator.RegisterProvingKey(e.Owner, + common.HexToAddress(*newOracleAddress), + [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + fmt.Println("Register transaction", helpers.ExplorerLink(e.ChainID, registerTx.Hash())) + + if !*skipDeregister { + fmt.Println("Waiting for deregister transaction to be mined...") + var deregisterReceipt *types.Receipt + deregisterReceipt, err = bind.WaitMined(context.Background(), e.Ec, deregisterTx) + helpers.PanicErr(err) + fmt.Printf("Deregister transaction included in block %s\n", deregisterReceipt.BlockNumber.String()) + } + + fmt.Println("Waiting for register transaction to be mined...") + registerReceipt, err := bind.WaitMined(context.Background(), e.Ec, registerTx) + helpers.PanicErr(err) + fmt.Printf("Register transaction included in block %s\n", registerReceipt.BlockNumber.String()) + case "wrapper-deploy": + cmd := flag.NewFlagSet("wrapper-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + linkETHFeedAddress := cmd.String("link-eth-feed", "", "address of link-eth-feed") + coordinatorAddress := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "link-eth-feed", "coordinator-address") + v2scripts.WrapperDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*linkETHFeedAddress), + common.HexToAddress(*coordinatorAddress)) + case "wrapper-withdraw": + cmd := flag.NewFlagSet("wrapper-withdraw", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + recipientAddress := cmd.String("recipient-address", "", "address to withdraw to") + linkAddress := cmd.String("link-address", "", "address of link token") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "recipient-address", "link-address") + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + balance, err := link.BalanceOf(nil, common.HexToAddress(*wrapperAddress)) + helpers.PanicErr(err) + tx, err := wrapper.Withdraw(e.Owner, common.HexToAddress(*recipientAddress), balance) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "withdrawing", balance.String(), "Juels from", *wrapperAddress, "to", *recipientAddress) + case "wrapper-get-subscription-id": + cmd := flag.NewFlagSet("wrapper-get-subscription-id", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address") + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + subID, err := wrapper.SUBSCRIPTIONID(nil) + helpers.PanicErr(err) + fmt.Println("subscription id of wrapper", *wrapperAddress, "is:", subID) + case "wrapper-configure": + cmd := flag.NewFlagSet("wrapper-configure", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + wrapperGasOverhead := cmd.Uint("wrapper-gas-overhead", 50_000, "amount of gas overhead in wrapper fulfillment") + coordinatorGasOverhead := cmd.Uint("coordinator-gas-overhead", 52_000, "amount of gas overhead in coordinator fulfillment") + wrapperPremiumPercentage := cmd.Uint("wrapper-premium-percentage", 25, "gas premium charged by wrapper") + keyHash := cmd.String("key-hash", "", "the keyhash that wrapper requests should use") + maxNumWords := cmd.Uint("max-num-words", 10, "the keyhash that wrapper requests should use") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "key-hash") + + v2scripts.WrapperConfigure(e, + common.HexToAddress(*wrapperAddress), + *wrapperGasOverhead, + *coordinatorGasOverhead, + *wrapperPremiumPercentage, + *keyHash, + *maxNumWords) + case "wrapper-get-fulfillment-tx-size": + cmd := flag.NewFlagSet("wrapper-get-fulfillment-tx-size", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address") + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + size, err := wrapper.SFulfillmentTxSizeBytes(nil) + helpers.PanicErr(err) + fmt.Println("fulfillment tx size of wrapper", *wrapperAddress, "is:", size) + case "wrapper-set-fulfillment-tx-size": + cmd := flag.NewFlagSet("wrapper-set-fulfillment-tx-size", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + size := cmd.Uint("size", 0, "size of the fulfillment transaction") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "size") + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + tx, err := wrapper.SetFulfillmentTxSize(e.Owner, uint32(*size)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "set fulfillment tx size") + case "wrapper-consumer-deploy": + cmd := flag.NewFlagSet("wrapper-consumer-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "wrapper-address") + + v2scripts.WrapperConsumerDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*wrapperAddress)) + case "wrapper-consumer-request": + cmd := flag.NewFlagSet("wrapper-consumer-request", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + cbGasLimit := cmd.Uint("cb-gas-limit", 100_000, "request callback gas limit") + confirmations := cmd.Uint("request-confirmations", 3, "request confirmations") + numWords := cmd.Uint("num-words", 1, "num words to request") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + + consumer, err := vrfv2_wrapper_consumer_example.NewVRFV2WrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := consumer.MakeRequest(e.Owner, uint32(*cbGasLimit), uint16(*confirmations), uint32(*numWords)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "wrapper-consumer-request-status": + cmd := flag.NewFlagSet("wrapper-consumer-request-status", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + requestID := cmd.String("request-id", "", "request id of vrf request") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address", "request-id") + + consumer, err := vrfv2_wrapper_consumer_example.NewVRFV2WrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + + status, err := consumer.GetRequestStatus(nil, decimal.RequireFromString(*requestID).BigInt()) + helpers.PanicErr(err) + + statusStringer := func(status vrfv2_wrapper_consumer_example.GetRequestStatus) string { + return fmt.Sprint("paid (juels):", status.Paid.String(), + ", fulfilled?:", status.Fulfilled, + ", random words:", status.RandomWords) + } + + fmt.Println("status for request", *requestID, "is:") + fmt.Println(statusStringer(status)) + case "wrapper-consumer-withdraw-link": + cmd := flag.NewFlagSet("wrapper-consumer-withdraw-link", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + linkAddress := cmd.String("link-address", "", "address of link token") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrfv2_wrapper_consumer_example.NewVRFV2WrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + balance, err := link.BalanceOf(nil, common.HexToAddress(*consumerAddress)) + helpers.PanicErr(err) + tx, err := consumer.WithdrawLink(e.Owner, balance) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, + "withdrawing", balance.String(), "juels from", *consumerAddress, "to", e.Owner.From.Hex()) + case "transfer-link": + cmd := flag.NewFlagSet("transfer-link", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + amountJuels := cmd.String("amount-juels", "0", "amount in juels to fund") + receiverAddress := cmd.String("receiver-address", "", "address of receiver (contract or eoa)") + helpers.ParseArgs(cmd, os.Args[2:], "amount-juels", "link-address", "receiver-address") + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + tx, err := link.Transfer(e.Owner, common.HexToAddress(*receiverAddress), decimal.RequireFromString(*amountJuels).BigInt()) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "transfer", *amountJuels, "juels to", *receiverAddress) + case "latest-block-header": + cmd := flag.NewFlagSet("latest-block-header", flag.ExitOnError) + blockNumber := cmd.Int("block-number", -1, "block number") + helpers.ParseArgs(cmd, os.Args[2:]) + _ = helpers.CalculateLatestBlockHeader(e, *blockNumber) + case "closest-block": + cmd := flag.NewFlagSet("closest-block", flag.ExitOnError) + blockNumber := cmd.Uint64("block-number", 0, "block number") + batchBHSAddress := cmd.String("batch-bhs-address", "", "address of the batch blockhash store") + batchSize := cmd.Uint64("batch-size", 100, "batch size") + helpers.ParseArgs(cmd, os.Args[2:], "block-number", "batch-bhs-address") + _, err := v2scripts.ClosestBlock(e, common.HexToAddress(*batchBHSAddress), *blockNumber, *batchSize) + helpers.PanicErr(err) + case "wrapper-universe-deploy": + v2scripts.DeployWrapperUniverse(e) + default: + panic("unrecognized subcommand: " + os.Args[1]) + } +} + +func getVRFOwnerTestConsumerMetrics(consumerAddress string, e helpers.Environment) LoadTestMetrics { + consumer, err := vrf_owner_test_consumer.NewVRFV2OwnerTestConsumer( + common.HexToAddress(consumerAddress), + e.Ec) + helpers.PanicErr(err) + responseCount, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + requestCount, err := consumer.SRequestCount(nil) + helpers.PanicErr(err) + averageFulfillmentInMillions, err := consumer.SAverageFulfillmentInMillions(nil) + helpers.PanicErr(err) + slowestFulfillment, err := consumer.SSlowestFulfillment(nil) + helpers.PanicErr(err) + fastestFulfillment, err := consumer.SFastestFulfillment(nil) + helpers.PanicErr(err) + + metrics := LoadTestMetrics{ + ResponseCount: responseCount, + RequestCount: requestCount, + AverageFulfillmentInMillions: averageFulfillmentInMillions, + SlowestFulfillment: slowestFulfillment, + FastestFulfillment: fastestFulfillment, + } + return metrics +} + +func printLoadTestMetrics(metrics LoadTestMetrics) { + fmt.Println("Response Count: ", metrics.ResponseCount) + fmt.Println("Request Count: ", metrics.RequestCount) + fmt.Println("Average Fulfillment In Millions: ", metrics.AverageFulfillmentInMillions) + fmt.Println("Slowest Fulfillment: ", metrics.SlowestFulfillment) + fmt.Println("Fastest Fulfillment: ", metrics.FastestFulfillment) +} + +type LoadTestMetrics struct { + ResponseCount *big.Int + RequestCount *big.Int + AverageFulfillmentInMillions *big.Int + SlowestFulfillment *big.Int + FastestFulfillment *big.Int +} diff --git a/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go b/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go new file mode 100644 index 00000000..c1849564 --- /dev/null +++ b/core/scripts/vrfv2/testnet/v2scripts/super_scripts.go @@ -0,0 +1,521 @@ +package v2scripts + +import ( + "context" + "encoding/hex" + "flag" + "fmt" + "math/big" + "os" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/shopspring/decimal" + + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/constants" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/jobs" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/model" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/util" + + evmtypes "github.com/ethereum/go-ethereum/core/types" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +type CoordinatorConfigV2 struct { + MinConfs int + MaxGasLimit int64 + StalenessSeconds int64 + GasAfterPayment int64 + FallbackWeiPerUnitLink *big.Int + FeeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig +} + +func DeployUniverseViaCLI(e helpers.Environment) { + deployCmd := flag.NewFlagSet("deploy-universe", flag.ExitOnError) + + // required flags + linkAddress := deployCmd.String("link-address", "", "address of link token") + linkEthAddress := deployCmd.String("link-eth-feed", "", "address of link eth feed") + bhsContractAddressString := deployCmd.String("bhs-address", "", "address of BHS contract") + batchBHSAddressString := deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract") + coordinatorAddressString := deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract") + batchCoordinatorAddressString := deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract") + + subscriptionBalanceJuelsString := deployCmd.String("subscription-balance", constants.SubscriptionBalanceJuels, "amount to fund subscription") + nodeSendingKeyFundingAmount := deployCmd.String("sending-key-funding-amount", constants.NodeSendingKeyFundingAmount, "CL node sending key funding amount") + + batchFulfillmentEnabled := deployCmd.Bool("batch-fulfillment-enabled", constants.BatchFulfillmentEnabled, "whether send randomness fulfillments in batches inside one tx from CL node") + batchFulfillmentGasMultiplier := deployCmd.Float64("batch-fulfillment-gas-multiplier", 1.1, "") + estimateGasMultiplier := deployCmd.Float64("estimate-gas-multiplier", 1.1, "") + pollPeriod := deployCmd.String("poll-period", "300ms", "") + requestTimeout := deployCmd.String("request-timeout", "30m0s", "") + revertsPipelineEnabled := deployCmd.Bool("reverts-pipeline-enabled", true, "") + + deployVRFOwner := deployCmd.Bool("deploy-vrf-owner", true, "whether to deploy VRF owner contracts") + useTestCoordinator := deployCmd.Bool("use-test-coordinator", true, "whether to use test coordinator") + simulationBlock := deployCmd.String("simulation-block", "pending", "simulation block can be 'pending' or 'latest'") + + // optional flags + fallbackWeiPerUnitLinkString := deployCmd.String("fallback-wei-per-unit-link", constants.FallbackWeiPerUnitLink.String(), "fallback wei/link ratio") + registerVRFKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key") + registerVRFKeyAgainstAddress := deployCmd.String("register-vrf-key-against-address", "", "VRF Key registration against address - "+ + "from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments") + + vrfPrimaryNodeSendingKeysString := deployCmd.String("vrf-primary-node-sending-keys", "", "VRF Primary Node sending keys") + + minConfs := deployCmd.Int("min-confs", constants.MinConfs, "min confs") + maxGasLimit := deployCmd.Int64("max-gas-limit", constants.MaxGasLimit, "max gas limit") + stalenessSeconds := deployCmd.Int64("staleness-seconds", constants.StalenessSeconds, "staleness in seconds") + gasAfterPayment := deployCmd.Int64("gas-after-payment", constants.GasAfterPayment, "gas after payment calculation") + flatFeeTier1 := deployCmd.Int64("flat-fee-tier-1", constants.FlatFeeTier1, "flat fee tier 1") + flatFeeTier2 := deployCmd.Int64("flat-fee-tier-2", constants.FlatFeeTier2, "flat fee tier 2") + flatFeeTier3 := deployCmd.Int64("flat-fee-tier-3", constants.FlatFeeTier3, "flat fee tier 3") + flatFeeTier4 := deployCmd.Int64("flat-fee-tier-4", constants.FlatFeeTier4, "flat fee tier 4") + flatFeeTier5 := deployCmd.Int64("flat-fee-tier-5", constants.FlatFeeTier5, "flat fee tier 5") + reqsForTier2 := deployCmd.Int64("reqs-for-tier-2", constants.ReqsForTier2, "requests for tier 2") + reqsForTier3 := deployCmd.Int64("reqs-for-tier-3", constants.ReqsForTier3, "requests for tier 3") + reqsForTier4 := deployCmd.Int64("reqs-for-tier-4", constants.ReqsForTier4, "requests for tier 4") + reqsForTier5 := deployCmd.Int64("reqs-for-tier-5", constants.ReqsForTier5, "requests for tier 5") + + if *simulationBlock != "pending" && *simulationBlock != "latest" { + helpers.PanicErr(fmt.Errorf("simulation block must be 'pending' or 'latest'")) + } + + helpers.ParseArgs( + deployCmd, os.Args[2:], + ) + + fallbackWeiPerUnitLink := decimal.RequireFromString(*fallbackWeiPerUnitLinkString).BigInt() + subscriptionBalanceJuels := decimal.RequireFromString(*subscriptionBalanceJuelsString).BigInt() + + feeConfig := vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: uint32(*flatFeeTier1), + FulfillmentFlatFeeLinkPPMTier2: uint32(*flatFeeTier2), + FulfillmentFlatFeeLinkPPMTier3: uint32(*flatFeeTier3), + FulfillmentFlatFeeLinkPPMTier4: uint32(*flatFeeTier4), + FulfillmentFlatFeeLinkPPMTier5: uint32(*flatFeeTier5), + ReqsForTier2: big.NewInt(*reqsForTier2), + ReqsForTier3: big.NewInt(*reqsForTier3), + ReqsForTier4: big.NewInt(*reqsForTier4), + ReqsForTier5: big.NewInt(*reqsForTier5), + } + + var vrfPrimaryNodeSendingKeys []string + if len(*vrfPrimaryNodeSendingKeysString) > 0 { + vrfPrimaryNodeSendingKeys = strings.Split(*vrfPrimaryNodeSendingKeysString, ",") + } + + nodesMap := make(map[string]model.Node) + + fundingAmount := decimal.RequireFromString(*nodeSendingKeyFundingAmount).BigInt() + nodesMap[model.VRFPrimaryNodeName] = model.Node{ + SendingKeys: util.MapToSendingKeyArr(vrfPrimaryNodeSendingKeys), + SendingKeyFundingAmount: fundingAmount, + } + + bhsContractAddress := common.HexToAddress(*bhsContractAddressString) + batchBHSAddress := common.HexToAddress(*batchBHSAddressString) + coordinatorAddress := common.HexToAddress(*coordinatorAddressString) + batchCoordinatorAddress := common.HexToAddress(*batchCoordinatorAddressString) + + contractAddresses := model.ContractAddresses{ + LinkAddress: *linkAddress, + LinkEthAddress: *linkEthAddress, + BhsContractAddress: bhsContractAddress, + BatchBHSAddress: batchBHSAddress, + CoordinatorAddress: coordinatorAddress, + BatchCoordinatorAddress: batchCoordinatorAddress, + } + + coordinatorConfig := CoordinatorConfigV2{ + MinConfs: *minConfs, + MaxGasLimit: *maxGasLimit, + StalenessSeconds: *stalenessSeconds, + GasAfterPayment: *gasAfterPayment, + FallbackWeiPerUnitLink: fallbackWeiPerUnitLink, + FeeConfig: feeConfig, + } + + vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{ + VRFKeyUncompressedPubKey: *registerVRFKeyUncompressedPubKey, + RegisterAgainstAddress: *registerVRFKeyAgainstAddress, + } + + coordinatorJobSpecConfig := model.CoordinatorJobSpecConfig{ + BatchFulfillmentEnabled: *batchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *batchFulfillmentGasMultiplier, + EstimateGasMultiplier: *estimateGasMultiplier, + PollPeriod: *pollPeriod, + RequestTimeout: *requestTimeout, + RevertsPipelineEnabled: *revertsPipelineEnabled, + } + + VRFV2DeployUniverse( + e, + subscriptionBalanceJuels, + vrfKeyRegistrationConfig, + contractAddresses, + coordinatorConfig, + nodesMap, + *deployVRFOwner, + coordinatorJobSpecConfig, + *useTestCoordinator, + *simulationBlock, + ) + + vrfPrimaryNode := nodesMap[model.VRFPrimaryNodeName] + fmt.Println("Funding node's sending keys...") + for _, sendingKey := range vrfPrimaryNode.SendingKeys { + helpers.FundNode(e, sendingKey.Address, vrfPrimaryNode.SendingKeyFundingAmount) + } +} + +func VRFV2DeployUniverse( + e helpers.Environment, + subscriptionBalanceJuels *big.Int, + vrfKeyRegistrationConfig model.VRFKeyRegistrationConfig, + contractAddresses model.ContractAddresses, + coordinatorConfig CoordinatorConfigV2, + nodesMap map[string]model.Node, + deployVRFOwner bool, + coordinatorJobSpecConfig model.CoordinatorJobSpecConfig, + useTestCoordinator bool, + simulationBlock string, +) model.JobSpecs { + var compressedPkHex string + var keyHash common.Hash + if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 { + // Put key in ECDSA format + if strings.HasPrefix(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x") { + vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey = strings.Replace(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x", "04", 1) + } + + // Generate compressed public key and key hash + pubBytes, err := hex.DecodeString(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + var pkBytes []byte + if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 { + pkBytes = append(pk.X.Bytes(), 1) + } else { + pkBytes = append(pk.X.Bytes(), 0) + } + var newPK secp256k1.PublicKey + copy(newPK[:], pkBytes) + + compressedPkHex = hexutil.Encode(pkBytes) + keyHash, err = newPK.Hash() + helpers.PanicErr(err) + } + + if len(contractAddresses.LinkAddress) == 0 { + fmt.Println("\nDeploying PLI Token...") + contractAddresses.LinkAddress = helpers.DeployLinkToken(e).String() + } + + if len(contractAddresses.LinkEthAddress) == 0 { + fmt.Println("\nDeploying PLI/ETH Feed...") + contractAddresses.LinkEthAddress = helpers.DeployLinkEthFeed(e, contractAddresses.LinkAddress, coordinatorConfig.FallbackWeiPerUnitLink).String() + } + + if contractAddresses.BhsContractAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying BHS...") + contractAddresses.BhsContractAddress = DeployBHS(e) + } + + if contractAddresses.BatchBHSAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Batch BHS...") + contractAddresses.BatchBHSAddress = DeployBatchBHS(e, contractAddresses.BhsContractAddress) + } + + if useTestCoordinator { + if contractAddresses.CoordinatorAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Test Coordinator...") + contractAddresses.CoordinatorAddress = DeployTestCoordinator(e, contractAddresses.LinkAddress, contractAddresses.BhsContractAddress.String(), contractAddresses.LinkEthAddress) + } + } else { + if contractAddresses.CoordinatorAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Coordinator...") + contractAddresses.CoordinatorAddress = DeployCoordinator(e, contractAddresses.LinkAddress, contractAddresses.BhsContractAddress.String(), contractAddresses.LinkEthAddress) + } + } + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(contractAddresses.CoordinatorAddress, e.Ec) + helpers.PanicErr(err) + + var vrfOwnerAddress common.Address + if deployVRFOwner { + var tx *evmtypes.Transaction + fmt.Printf("\nDeploying VRF Owner for coordinator %v\n", contractAddresses.CoordinatorAddress) + vrfOwnerAddress, tx, _, err = vrf_owner.DeployVRFOwner(e.Owner, e.Ec, contractAddresses.CoordinatorAddress) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + } + + if contractAddresses.BatchCoordinatorAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Batch Coordinator...") + contractAddresses.BatchCoordinatorAddress = DeployBatchCoordinatorV2(e, contractAddresses.CoordinatorAddress) + } + + fmt.Println("\nSetting Coordinator Config...") + SetCoordinatorConfig( + e, + *coordinator, + uint16(coordinatorConfig.MinConfs), + uint32(coordinatorConfig.MaxGasLimit), + uint32(coordinatorConfig.StalenessSeconds), + uint32(coordinatorConfig.GasAfterPayment), + coordinatorConfig.FallbackWeiPerUnitLink, + coordinatorConfig.FeeConfig, + ) + + fmt.Println("\nConfig set, getting current config from deployed contract...") + PrintCoordinatorConfig(coordinator) + + if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 { + fmt.Println("\nRegistering proving key...") + + //NOTE - register proving key against EOA account, and not against Oracle's sending address in other to be able + // easily withdraw funds from Coordinator contract back to EOA account + RegisterCoordinatorProvingKey(e, *coordinator, vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, vrfKeyRegistrationConfig.RegisterAgainstAddress) + + fmt.Println("\nProving key registered, getting proving key hashes from deployed contract...") + _, _, provingKeyHashes, configErr := coordinator.GetRequestConfig(nil) + helpers.PanicErr(configErr) + fmt.Println("Key hash registered:", hex.EncodeToString(provingKeyHashes[0][:])) + } else { + fmt.Println("NOT registering proving key - you must do this eventually in order to fully deploy VRF!") + } + + fmt.Println("\nDeploying consumer...") + consumerAddress := EoaLoadTestConsumerWithMetricsDeploy(e, contractAddresses.CoordinatorAddress.String()) + + fmt.Println("\nAdding subscription...") + EoaCreateSub(e, *coordinator) + subID := uint64(1) + + fmt.Println("\nAdding consumer to subscription...") + EoaAddConsumerToSub(e, *coordinator, subID, consumerAddress.String()) + + if subscriptionBalanceJuels.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with", subscriptionBalanceJuels, "juels...") + EoaFundSubscription(e, *coordinator, contractAddresses.LinkAddress, subscriptionBalanceJuels, subID) + } else { + fmt.Println("Subscription", subID, "NOT getting funded. You must fund the subscription in order to use it!") + } + + fmt.Println("\nSubscribed and (possibly) funded, retrieving subscription from deployed contract...") + s, err := coordinator.GetSubscription(nil, subID) + helpers.PanicErr(err) + fmt.Printf("Subscription %+v\n", s) + + if deployVRFOwner { + // VRF Owner + vrfOwner, err := vrf_owner.NewVRFOwner(vrfOwnerAddress, e.Ec) + helpers.PanicErr(err) + var authorizedSendersSlice []common.Address + for _, s := range nodesMap[model.VRFPrimaryNodeName].SendingKeys { + authorizedSendersSlice = append(authorizedSendersSlice, common.HexToAddress(s.Address)) + } + fmt.Printf("\nSetting authorised senders for VRF Owner: %v, Authorised senders %v\n", vrfOwnerAddress.String(), authorizedSendersSlice) + tx, err := vrfOwner.SetAuthorizedSenders(e.Owner, authorizedSendersSlice) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "vrf owner set authorized senders") + fmt.Printf("\nTransferring ownership of coordinator: %v, VRF Owner %v\n", contractAddresses.CoordinatorAddress, vrfOwnerAddress.String()) + tx, err = coordinator.TransferOwnership(e.Owner, vrfOwnerAddress) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "transfer ownership to", vrfOwnerAddress.String()) + fmt.Printf("\nAccepting ownership of coordinator: %v, VRF Owner %v\n", contractAddresses.CoordinatorAddress, vrfOwnerAddress.String()) + tx, err = vrfOwner.AcceptVRFOwnership(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "vrf owner accepting vrf ownership") + } + + formattedVrfPrimaryJobSpec := fmt.Sprintf( + jobs.VRFV2JobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress + coordinatorJobSpecConfig.BatchFulfillmentEnabled, //batchFulfillmentEnabled + coordinatorJobSpecConfig.BatchFulfillmentGasMultiplier, //batchFulfillmentGasMultiplier + coordinatorJobSpecConfig.RevertsPipelineEnabled, //revertsPipelineEnabled + compressedPkHex, //publicKey + coordinatorConfig.MinConfs, //minIncomingConfirmations + e.ChainID, //evmChainID + strings.Join(util.MapToAddressArr(nodesMap[model.VRFPrimaryNodeName].SendingKeys), "\",\""), //fromAddresses + coordinatorJobSpecConfig.PollPeriod, //pollPeriod + coordinatorJobSpecConfig.RequestTimeout, //requestTimeout + contractAddresses.CoordinatorAddress, + coordinatorJobSpecConfig.EstimateGasMultiplier, //estimateGasMultiplier + simulationBlock, + func() string { + if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 { + return keys[0].Address + } + return common.HexToAddress("0x0").String() + }(), + contractAddresses.CoordinatorAddress, + contractAddresses.CoordinatorAddress, + simulationBlock, + ) + if deployVRFOwner { + formattedVrfPrimaryJobSpec = strings.Replace(formattedVrfPrimaryJobSpec, + "minIncomingConfirmations", + fmt.Sprintf("vrfOwnerAddress = \"%s\"\nminIncomingConfirmations", vrfOwnerAddress.Hex()), + 1) + } + + formattedVrfBackupJobSpec := fmt.Sprintf( + jobs.VRFV2JobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress + coordinatorJobSpecConfig.BatchFulfillmentEnabled, //batchFulfillmentEnabled + coordinatorJobSpecConfig.BatchFulfillmentGasMultiplier, //batchFulfillmentGasMultiplier + coordinatorJobSpecConfig.RevertsPipelineEnabled, //revertsPipelineEnabled + compressedPkHex, //publicKey + 100, //minIncomingConfirmations + e.ChainID, //evmChainID + strings.Join(util.MapToAddressArr(nodesMap[model.VRFBackupNodeName].SendingKeys), "\",\""), //fromAddresses + coordinatorJobSpecConfig.PollPeriod, //pollPeriod + coordinatorJobSpecConfig.RequestTimeout, //requestTimeout + contractAddresses.CoordinatorAddress, + coordinatorJobSpecConfig.EstimateGasMultiplier, //estimateGasMultiplier + simulationBlock, + func() string { + if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 { + return keys[0].Address + } + return common.HexToAddress("0x0").String() + }(), + contractAddresses.CoordinatorAddress, + contractAddresses.CoordinatorAddress, + simulationBlock, + ) + if deployVRFOwner { + formattedVrfBackupJobSpec = strings.Replace(formattedVrfBackupJobSpec, + "minIncomingConfirmations", + fmt.Sprintf("vrfOwnerAddress = \"%s\"\nminIncomingConfirmations", vrfOwnerAddress.Hex()), + 1) + } + + formattedBHSJobSpec := fmt.Sprintf( + jobs.BHSJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + 30, //waitBlocks + 200, //lookbackBlocks + contractAddresses.BhsContractAddress, //bhs address + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHSNodeName].SendingKeys), "\",\""), //sending addresses + ) + + formattedBHSBackupJobSpec := fmt.Sprintf( + jobs.BHSJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + 100, //waitBlocks + 200, //lookbackBlocks + contractAddresses.BhsContractAddress, //bhs adreess + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHSBackupNodeName].SendingKeys), "\",\""), //sending addresses + ) + + formattedBHFJobSpec := fmt.Sprintf( + jobs.BHFJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BhsContractAddress, //bhs adreess + contractAddresses.BatchBHSAddress, //batchBHS + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHFNodeName].SendingKeys), "\",\""), //sending addresses + ) + + fmt.Println( + "\nDeployment complete.", + "\nPLI Token contract address:", contractAddresses.LinkAddress, + "\nPLI/ETH Feed contract address:", contractAddresses.LinkEthAddress, + "\nBlockhash Store contract address:", contractAddresses.BhsContractAddress, + "\nBatch Blockhash Store contract address:", contractAddresses.BatchBHSAddress, + "\nVRF Coordinator Address:", contractAddresses.CoordinatorAddress, + "\nBatch VRF Coordinator Address:", contractAddresses.BatchCoordinatorAddress, + "\nVRF Consumer Address:", consumerAddress, + "\nVRF Owner Address:", vrfOwnerAddress, + "\nVRF Subscription Id:", subID, + "\nVRF Subscription Balance:", *subscriptionBalanceJuels, + "\nPossible VRF Request command: ", + fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, coordinatorConfig.MinConfs), + "\nRetrieve Request Status: ", + fmt.Sprintf("go run . eoa-load-test-read-metrics --consumer-address=%s", consumerAddress), + "\nA node can now be configured to run a VRF job with the below job spec :\n", + formattedVrfPrimaryJobSpec, + ) + + return model.JobSpecs{ + VRFPrimaryNode: formattedVrfPrimaryJobSpec, + VRFBackupyNode: formattedVrfBackupJobSpec, + BHSNode: formattedBHSJobSpec, + BHSBackupNode: formattedBHSBackupJobSpec, + BHFNode: formattedBHFJobSpec, + } +} + +func DeployWrapperUniverse(e helpers.Environment) { + cmd := flag.NewFlagSet("wrapper-universe-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + linkETHFeedAddress := cmd.String("link-eth-feed", "", "address of link-eth-feed") + coordinatorAddress := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + wrapperGasOverhead := cmd.Uint("wrapper-gas-overhead", 50_000, "amount of gas overhead in wrapper fulfillment") + coordinatorGasOverhead := cmd.Uint("coordinator-gas-overhead", 52_000, "amount of gas overhead in coordinator fulfillment") + wrapperPremiumPercentage := cmd.Uint("wrapper-premium-percentage", 25, "gas premium charged by wrapper") + keyHash := cmd.String("key-hash", "", "the keyhash that wrapper requests should use") + maxNumWords := cmd.Uint("max-num-words", 10, "the keyhash that wrapper requests should use") + subFunding := cmd.String("sub-funding", "10000000000000000000", "amount to fund the subscription with") + consumerFunding := cmd.String("consumer-funding", "10000000000000000000", "amount to fund the consumer with") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "link-eth-feed", "coordinator-address", "key-hash") + + amount, s := big.NewInt(0).SetString(*subFunding, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *subFunding)) + } + + wrapper, subID := WrapperDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*linkETHFeedAddress), + common.HexToAddress(*coordinatorAddress)) + + WrapperConfigure(e, + wrapper, + *wrapperGasOverhead, + *coordinatorGasOverhead, + *wrapperPremiumPercentage, + *keyHash, + *maxNumWords) + + consumer := WrapperConsumerDeploy(e, + common.HexToAddress(*linkAddress), + wrapper) + + coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + EoaFundSubscription(e, *coordinator, *linkAddress, amount, subID) + + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + consumerAmount, s := big.NewInt(0).SetString(*consumerFunding, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *consumerFunding)) + } + + tx, err := link.Transfer(e.Owner, consumer, consumerAmount) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "link transfer to consumer") + + fmt.Println("wrapper universe deployment complete") + fmt.Println("wrapper address:", wrapper.String()) + fmt.Println("wrapper consumer address:", consumer.String()) +} diff --git a/core/scripts/vrfv2/testnet/v2scripts/util.go b/core/scripts/vrfv2/testnet/v2scripts/util.go new file mode 100644 index 00000000..4a709fc8 --- /dev/null +++ b/core/scripts/vrfv2/testnet/v2scripts/util.go @@ -0,0 +1,269 @@ +package v2scripts + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_test_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_with_metrics" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_external_sub_owner_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper_consumer_example" +) + +func DeployBHS(e helpers.Environment) (blockhashStoreAddress common.Address) { + _, tx, _, err := blockhash_store.DeployBlockhashStore(e.Owner, e.Ec) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployBatchBHS(e helpers.Environment, bhsAddress common.Address) (batchBHSAddress common.Address) { + _, tx, _, err := batch_blockhash_store.DeployBatchBlockhashStore(e.Owner, e.Ec, bhsAddress) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployCoordinator( + e helpers.Environment, + linkAddress string, + bhsAddress string, + linkEthAddress string, +) (coordinatorAddress common.Address) { + _, tx, _, err := vrf_coordinator_v2.DeployVRFCoordinatorV2( + e.Owner, + e.Ec, + common.HexToAddress(linkAddress), + common.HexToAddress(bhsAddress), + common.HexToAddress(linkEthAddress)) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployTestCoordinator( + e helpers.Environment, + linkAddress string, + bhsAddress string, + linkEthAddress string, +) (coordinatorAddress common.Address) { + _, tx, _, err := vrf_coordinator_test_v2.DeployVRFCoordinatorTestV2( + e.Owner, + e.Ec, + common.HexToAddress(linkAddress), + common.HexToAddress(bhsAddress), + common.HexToAddress(linkEthAddress)) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployBatchCoordinatorV2(e helpers.Environment, coordinatorAddress common.Address) (batchCoordinatorAddress common.Address) { + _, tx, _, err := batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2(e.Owner, e.Ec, coordinatorAddress) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func EoaAddConsumerToSub(e helpers.Environment, coordinator vrf_coordinator_v2.VRFCoordinatorV2, subID uint64, consumerAddress string) { + txadd, err := coordinator.AddConsumer(e.Owner, subID, common.HexToAddress(consumerAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, txadd, e.ChainID) +} + +func EoaCreateSub(e helpers.Environment, coordinator vrf_coordinator_v2.VRFCoordinatorV2) { + tx, err := coordinator.CreateSubscription(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func EoaDeployConsumer(e helpers.Environment, coordinatorAddress string, linkAddress string) (consumerAddress common.Address) { + _, tx, _, err := vrf_external_sub_owner_example.DeployVRFExternalSubOwnerExample( + e.Owner, + e.Ec, + common.HexToAddress(coordinatorAddress), + common.HexToAddress(linkAddress)) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func EoaFundSubscription(e helpers.Environment, coordinator vrf_coordinator_v2.VRFCoordinatorV2, linkAddress string, amount *big.Int, subID uint64) { + linkToken, err := link_token_interface.NewLinkToken(common.HexToAddress(linkAddress), e.Ec) + helpers.PanicErr(err) + bal, err := linkToken.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("Initial account balance:", bal, e.Owner.From.String(), "Funding amount:", amount.String()) + b, err := utils.ABIEncode(`[{"type":"uint64"}]`, subID) + helpers.PanicErr(err) + tx, err := linkToken.TransferAndCall(e.Owner, coordinator.Address(), amount, b) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("sub ID: %d", subID)) +} + +func PrintCoordinatorConfig(coordinator *vrf_coordinator_v2.VRFCoordinatorV2) { + cfg, err := coordinator.GetConfig(nil) + helpers.PanicErr(err) + + feeConfig, err := coordinator.GetFeeConfig(nil) + helpers.PanicErr(err) + + fmt.Printf("Coordinator config: %+v\n", cfg) + fmt.Printf("Coordinator fee config: %+v\n", feeConfig) +} + +func SetCoordinatorConfig( + e helpers.Environment, + coordinator vrf_coordinator_v2.VRFCoordinatorV2, + minConfs uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPayment uint32, + fallbackWeiPerUnitLink *big.Int, + feeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig, +) { + tx, err := coordinator.SetConfig( + e.Owner, + minConfs, // minRequestConfirmations + maxGasLimit, // max gas limit + stalenessSeconds, // stalenessSeconds + gasAfterPayment, // gasAfterPaymentCalculation + fallbackWeiPerUnitLink, // 0.01 eth per link fallbackLinkPrice + feeConfig, + ) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func RegisterCoordinatorProvingKey(e helpers.Environment, coordinator vrf_coordinator_v2.VRFCoordinatorV2, uncompressed string, oracleAddress string) { + pubBytes, err := hex.DecodeString(uncompressed) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + tx, err := coordinator.RegisterProvingKey(e.Owner, + common.HexToAddress(oracleAddress), + [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + helpers.ConfirmTXMined( + context.Background(), + e.Ec, + tx, + e.ChainID, + fmt.Sprintf("Uncompressed public key: %s,", uncompressed), + fmt.Sprintf("Oracle address: %s,", oracleAddress), + ) +} + +func WrapperDeploy( + e helpers.Environment, + link, linkEthFeed, coordinator common.Address, +) (common.Address, uint64) { + address, tx, _, err := vrfv2_wrapper.DeployVRFV2Wrapper(e.Owner, e.Ec, + link, + linkEthFeed, + coordinator) + helpers.PanicErr(err) + + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + fmt.Println("VRFV2Wrapper address:", address) + + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(address, e.Ec) + helpers.PanicErr(err) + + subID, err := wrapper.SUBSCRIPTIONID(nil) + helpers.PanicErr(err) + fmt.Println("VRFV2Wrapper subscription id:", subID) + + return address, subID +} + +func WrapperConfigure( + e helpers.Environment, + wrapperAddress common.Address, + wrapperGasOverhead, coordinatorGasOverhead, premiumPercentage uint, + keyHash string, + maxNumWords uint, +) { + wrapper, err := vrfv2_wrapper.NewVRFV2Wrapper(wrapperAddress, e.Ec) + helpers.PanicErr(err) + + tx, err := wrapper.SetConfig( + e.Owner, + uint32(wrapperGasOverhead), + uint32(coordinatorGasOverhead), + uint8(premiumPercentage), + common.HexToHash(keyHash), + uint8(maxNumWords)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func WrapperConsumerDeploy( + e helpers.Environment, + link, wrapper common.Address, +) common.Address { + address, tx, _, err := vrfv2_wrapper_consumer_example.DeployVRFV2WrapperConsumerExample(e.Owner, e.Ec, + link, + wrapper) + helpers.PanicErr(err) + + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + fmt.Printf("VRFV2WrapperConsumerExample address: %s\n", address) + return address +} + +func EoaLoadTestConsumerWithMetricsDeploy(e helpers.Environment, consumerCoordinator string) (consumerAddress common.Address) { + _, tx, _, err := vrf_load_test_with_metrics.DeployVRFV2LoadTestWithMetrics( + e.Owner, + e.Ec, + common.HexToAddress(consumerCoordinator), + ) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func ClosestBlock(e helpers.Environment, batchBHSAddress common.Address, blockMissingBlockhash uint64, batchSize uint64) (uint64, error) { + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(batchBHSAddress, e.Ec) + if err != nil { + return 0, err + } + startBlock := blockMissingBlockhash + 1 + endBlock := startBlock + batchSize + for { + latestBlock, err := e.Ec.HeaderByNumber(context.Background(), nil) + if err != nil { + return 0, err + } + if latestBlock.Number.Uint64() < endBlock { + return 0, errors.New("closest block with blockhash not found") + } + var blockRange []*big.Int + for i := startBlock; i <= endBlock; i++ { + blockRange = append(blockRange, big.NewInt(int64(i))) + } + fmt.Println("Searching range", startBlock, "-", endBlock, "inclusive") + hashes, err := batchBHS.GetBlockhashes(nil, blockRange) + if err != nil { + return 0, err + } + for i, hash := range hashes { + if hash != (common.Hash{}) { + fmt.Println("found closest block:", startBlock+uint64(i), "hash:", hexutil.Encode(hash[:])) + fmt.Println("distance from missing block:", startBlock+uint64(i)-blockMissingBlockhash) + return startBlock + uint64(i), nil + } + } + startBlock = endBlock + 1 + endBlock = startBlock + batchSize + } +} diff --git a/core/scripts/vrfv2plus/testnet/README.md b/core/scripts/vrfv2plus/testnet/README.md new file mode 100644 index 00000000..289cd2c4 --- /dev/null +++ b/core/scripts/vrfv2plus/testnet/README.md @@ -0,0 +1,257 @@ +# Using the External Subscription Owner Example + +The [external subscription owner example contract](../../../../contracts/src/v0.8/dev/vrf/testhelpers/VRFV2PlusExternalSubOwnerExample.sol) +allows its owner to request random words from VRF V2+ if it is added as a +consumer for a funded VRF subscription. + +This guide covers: + 1. Deploying the contract + 2. Creating, funding, checking balance, and adding a consumer to a VRF V2+ + subscription + 3. Requesting randomness from the contract + +## Setup + +Before starting, you will need: +1. An EVM chain endpoint URL +2. The chain ID corresponding to your chain +3. The private key of an account funded with PLI, and the chain's native token + (to pay transaction fees) +4. [The PLI address, VRF coordinator address, and key hash](https://docs.chain.link/docs/vrf/v2/supported-networks/) + for your chain. +5. [Go](https://go.dev/doc/install) + +The endpoint URL can be a locally running node, or an externally hosted one like +[alchemy](https://www.alchemy.com/). Your chain ID will be a number +corresponding to the chain you pick. For example the Rinkeby testnet has chain +ID 4. Your private key can be exported from [MetaMask](https://metamask.zendesk.com/hc/en-us/articles/360015289632-How-to-Export-an-Account-Private-Key). + +Note: Be careful with your key. When using testnets, it's best to use a separate +account that does not hold real funds. + +Run the following command to set up your environment: + +```shell +export ETH_URL= +export ETH_CHAIN_ID= +export ACCOUNT_KEY= +export PLI= +export PLI_ETH_FEED=
+export COORDINATOR= +export KEY_HASH= +export ORACLE_ADDRESS= +export PUB_KEY= +``` + +By default, the script automatically estimates gas limits for operations. Optionally, `ETH_GAS_LIMIT_DEFAULT` environment variable can be set to override gas limit for operations. + +Now "cd" into the VRF V2 testnet scripts directory: + +```shell +cd /core/scripts/vrfv2/testnet +``` + +## Deploying a full VRF Universe (BHS, Registered + Funded Coordinator, Consumer) + +- To deploy a full VRF environment on-chain, run the command below +- Not specifying `--link-address` would make the super script deploy a new PLI token contract and use it to fund VRF V2+ subscription +- Not specifying `--link-eth-feed` would make the super script deploy a new PLI-ETH feed contract and use it for funding VRF V2+ subscription + +```shell +go run . deploy-universe \ +--link-address=$PLI \ +--link-eth-feed=$PLI_ETH_FEED \ +--subscription-balance=5000000000000000000 \ #5 PLI +--subscription-balance-native=1000000000000000000 \ #1 ETH +--uncompressed-pub-key= \ +--vrf-primary-node-sending-keys="" \ #used to fund the keys and for sample VRF Job Spec generation +--sending-key-funding-amount 100000000000000000 \ #0.1 ETH, fund addresses specified in vrf-primary-node-sending-keys +--batch-fulfillment-enabled false \ #only used for sample VRF Job Spec generation +--register-vrf-key-against-address="" # from this address you can perform `coordinator.oracleWithdraw` to withdraw earned funds from rand request fulfilments +``` + +## Deploying the Consumer Contract + +To deploy the VRFExternalSubOwnerExample contract, run: + +```shell +go run . eoa-consumer-deploy --coordinator-address=$COORDINATOR --link-address=$PLI +``` + +You should get the output: +``` +Consumer address hash +``` + +Run the command: +```shell +export CONSUMER= +``` + +## Setting up a VRF V2 Subscription + +In order for your newly deployed consumer to make VRF requests, it needs to be +authorized for a funded subscription. + +### Creating a Subscription + +```shell +go run . eoa-create-sub --coordinator-address=$COORDINATOR +``` + +You should get the output: +``` +Create sub TX hash +``` + +In order to get the subscription ID created by your transaction, you should use +an online block explorer and input your transaction hash. Once the transaction +is confirmed you should see a log (on Etherscan, this is in the "Logs" tab of +the transaction details screen) with the created subscription details including +the decimal representation of your subscription ID. + +Once you have found the ID, run: +```shell +export SUB_ID= +``` + +### Funding a Subscription + +In order to fund your subscription with 10 PLI, run: +```shell +go run . eoa-fund-sub --coordinator-address $COORDINATOR --link-address=$PLI --sub-id=$SUB_ID --amount=10000000000000000000 # 10e18 or 10 PLI +``` + +You should get the output: +``` +Initial account balance: Funding amount: 10000000000000000000 +Funding sub 61 hash +``` + +### (Optional) Checking Subscription Balance + +To check the PLI balance of your subscription, run: +```shell +go run . sub-balance --coordinator-address $COORDINATOR --sub-id=$SUB_ID +``` + +You should get the output: +``` +sub id balance: +``` + +### Adding a Consumer to Your Subscription + +In order to authorize the consumer contract to use the new subscription, run the +command: +```shell +go run . eoa-add-sub-consumer --coordinator-address $COORDINATOR --sub-id=$SUB_ID --consumer-address=$CONSUMER +``` + +### Requesting Randomness + +At this point, the consumer is authorized as a consumer of a funded +subscription, and is ready to request random words. + +To make a request, run: +```shell +go run . eoa-request --consumer-address=$CONSUMER --sub-id=$SUB_ID --key-hash=$KEY_HASH --num-words 1 +``` + +You should get the output: +``` +TX hash: 0x599022228ffca10b0192e0b13bea64ff74f6dab2f0a3002b0825cbe22bd98249 +``` + +You can put this transaction hash into a block explorer to check its progress. +Shortly after it's confirmed, usually only a few minutes, you should see a +second incoming transaction to your consumer containing the randomness +result. + +## Debugging Reverted Transactions + +A reverted transaction could have number of root causes, for example +insufficient funds / PLI, or incorrect contract addresses. + +[Tenderly](https://dashboard.tenderly.co/explorer) can be useful for debugging +why a transaction failed. For example [this Rinkeby transaction](https://dashboard.tenderly.co/tx/rinkeby/0x71a7279033b47472ca453f7a19ccb685d0f32cdb4854a45052f1aaccd80436e9) +failed because a non-owner tried to request random words from +[VRFExternalSubOwnerExample](../../../../contracts/src/v0.8/tests/VRFExternalSubOwnerExample.sol). + +## Using the `BatchBlockhashStore` Contract + +The `BatchBlockhashStore` contract acts as a proxy to the `BlockhashStore` contract, allowing callers to store +and fetch many blockhashes in a single transaction. + +### Deploy a `BatchBlockhashStore` instance + +``` +go run . batch-bhs-deploy -bhs-address $BHS_ADDRESS +``` + +where `$BHS_ADDRESS` is an environment variable that points to an existing `BlockhashStore` contract. If one is not available, +you can easily deploy one using this command: + +``` +go run . bhs-deploy +``` + +### Store many blockhashes + +``` +go run . batch-bhs-store -batch-bhs-address $BATCH_BHS_ADDRESS -block-numbers 10298742,10298741,10298740,10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, and `-block-numbers` is a comma-separated +list of block numbers you want to store in a single transaction. + +Please note that these block numbers must not be further than 256 from the latest head, otherwise the store will fail. + +### Fetch many blockhashes + +``` +go run . batch-bhs-get -batch-bhs-address $BATCH_BHS_ADDRESS -block-numbers 10298742,10298741,10298740,10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, and `-block-numbers` is a comma-separated +list of block numbers you want to get in a single transaction. + +### Store many blockhashes, possibly farther back than 256 blocks + +In order to store blockhashes farther back than 256 blocks we can make use of the `storeVerifyHeader` method on the `BatchBlockhashStore`. + +Here's how to use it: + +``` +go run . batch-bhs-storeVerify -batch-bhs-address $BATCH_BHS_ADDRESS -num-blocks 25 -start-block 10298739 +``` + +where `$BATCH_BHS_ADDRESS` points to the `BatchBlockhashStore` contract deployed above, `-num-blocks` is the amount of blocks to store, and +`-start-block` is the block to start storing from, backwards. The block number specified by `-start-block` MUST be +in the blockhash store already, or this will not work. + +### Batch BHS "Backwards Mode" + +There may be a situation where you want to backfill a lot of blockhashes, down to a certain block number. + +This is where "Backwrads Mode" comes in - you're going to need the following: + +* A block number that has already been stored in the BHS. The closer it is to the target block range you want to store, +the better. You can view the most oldest "Store" transactions on the BHS contract that is still ahead of the block range you +are interested in. For example, if you want to store blocks 100 to 200, and 210 and 220 are available, specify `-start-block` +as `210`. +* A destination block number, where you want to stop storing after this one has been stored in the BHS. This number doesn't have +to be in the BHS already but must be less than the block specified for `--start-block` +* A batch size to use. This is how many stores we will attempt to do in a single transaction. A good value for this is usually 50-75 +for big block ranges. +* The address of the batch BHS to use. + +Example: + +``` +go run . batch-bhs-backwards -batch-bhs-address $BATCH_BHS_ADDRESS -start-block 25814538 -end-block 25811350 -batch-size 50 +``` + +This script is simplistic on purpose, where we wait for the transaction to mine before proceeding with the next one. This +is to avoid issues where a transaction gets sent and not included on-chain, and subsequent calls to `storeVerifyHeader` will +fail. diff --git a/core/scripts/vrfv2plus/testnet/main.go b/core/scripts/vrfv2plus/testnet/main.go new file mode 100644 index 00000000..11929354 --- /dev/null +++ b/core/scripts/vrfv2plus/testnet/main.go @@ -0,0 +1,1287 @@ +package main + +import ( + "bytes" + "context" + "encoding/hex" + "flag" + "fmt" + "log" + "math/big" + "os" + "strings" + + "github.com/goplugin/pluginv3.0/core/scripts/vrfv2plus/testnet/v2plusscripts" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/chain_specific_util_helper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/shopspring/decimal" + + "github.com/jmoiron/sqlx" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/trusted_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_external_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_single_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/extraargs" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + batchCoordinatorV2PlusABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2plus.BatchVRFCoordinatorV2PlusABI) +) + +func main() { + e := helpers.SetupEnv(false) + + switch os.Args[1] { + case "csu-deploy": + addr, tx, _, err := chain_specific_util_helper.DeployChainSpecificUtilHelper(e.Owner, e.Ec) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "deploying chain specific util helper") + fmt.Println("deployed chain specific util helper at:", addr) + case "csu-block-number": + cmd := flag.NewFlagSet("csu-block-number", flag.ExitOnError) + csuAddress := cmd.String("csu-address", "", "address of the chain specific util helper contract") + helpers.ParseArgs(cmd, os.Args[2:], "csu-address") + csu, err := chain_specific_util_helper.NewChainSpecificUtilHelper(common.HexToAddress(*csuAddress), e.Ec) + helpers.PanicErr(err) + blockNumber, err := csu.GetBlockNumber(nil) + helpers.PanicErr(err) + fmt.Println("block number:", blockNumber) + case "csu-block-hash": + cmd := flag.NewFlagSet("csu-block-hash", flag.ExitOnError) + csuAddress := cmd.String("csu-address", "", "address of the chain specific util helper contract") + blockNumber := cmd.Uint64("block-number", 0, "block number to get the hash of") + helpers.ParseArgs(cmd, os.Args[2:], "csu-address") + csu, err := chain_specific_util_helper.NewChainSpecificUtilHelper(common.HexToAddress(*csuAddress), e.Ec) + helpers.PanicErr(err) + blockHash, err := csu.GetBlockhash(nil, *blockNumber) + helpers.PanicErr(err) + fmt.Println("block hash:", hexutil.Encode(blockHash[:])) + case "csu-current-tx-l1-gas-fees": + cmd := flag.NewFlagSet("csu-current-tx-l1-gas-fees", flag.ExitOnError) + csuAddress := cmd.String("csu-address", "", "address of the chain specific util helper contract") + calldata := cmd.String("calldata", "", "calldata to estimate gas fees for") + helpers.ParseArgs(cmd, os.Args[2:], "csu-address", "calldata") + csu, err := chain_specific_util_helper.NewChainSpecificUtilHelper(common.HexToAddress(*csuAddress), e.Ec) + helpers.PanicErr(err) + gasFees, err := csu.GetCurrentTxL1GasFees(nil, *calldata) + helpers.PanicErr(err) + fmt.Println("gas fees:", gasFees) + case "csu-l1-calldata-gas-cost": + cmd := flag.NewFlagSet("csu-l1-calldata-gas-cost", flag.ExitOnError) + csuAddress := cmd.String("csu-address", "", "address of the chain specific util helper contract") + calldataSize := cmd.String("calldata-size", "", "size of the calldata to estimate gas fees for") + helpers.ParseArgs(cmd, os.Args[2:], "csu-address", "calldata-size") + csu, err := chain_specific_util_helper.NewChainSpecificUtilHelper(common.HexToAddress(*csuAddress), e.Ec) + helpers.PanicErr(err) + gasCost, err := csu.GetL1CalldataGasCost(nil, decimal.RequireFromString(*calldataSize).BigInt()) + helpers.PanicErr(err) + fmt.Println("gas cost:", gasCost) + case "smoke": + v2plusscripts.SmokeTestVRF(e) + case "smoke-bhs": + v2plusscripts.SmokeTestBHS(e) + case "manual-fulfill": + cmd := flag.NewFlagSet("manual-fulfill", flag.ExitOnError) + // In order to get the tx data for a fulfillment transaction, you can grep the + // plugin node logs for the VRF v2 request ID in hex. You will find a log for + // the vrf task in the VRF pipeline, specifically the "output" log field. + // Sample Loki query: + // {app="app-name"} | json | taskType="vrfv2plus" |~ "39f2d812c04e07cb9c71e93ce6547e48b7dd23ed4cc02616dfef5ef063a58bde" + txdatas := cmd.String("txdatas", "", "hex encoded tx data") + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator address") + gasMultiplier := cmd.Float64("gas-multiplier", 1.1, "gas multiplier") + helpers.ParseArgs(cmd, os.Args[2:], "txdatas", "coordinator-address") + txdatasParsed := helpers.ParseHexSlice(*txdatas) + coordinatorAddr := common.HexToAddress(*coordinatorAddress) + for i, txdata := range txdatasParsed { + nonce, err := e.Ec.PendingNonceAt(context.Background(), e.Owner.From) + helpers.PanicErr(err) + estimate, err := e.Ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: common.HexToAddress("0x0"), + To: &coordinatorAddr, + Data: txdata, + }) + helpers.PanicErr(err) + finalEstimate := uint64(*gasMultiplier * float64(estimate)) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: e.Owner.GasPrice, + Gas: finalEstimate, + To: &coordinatorAddr, + Data: txdata, + }) + signedTx, err := e.Owner.Signer(e.Owner.From, tx) + helpers.PanicErr(err) + err = e.Ec.SendTransaction(context.Background(), signedTx) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, signedTx, e.ChainID, fmt.Sprintf("manual fulfillment %d", i+1)) + } + case "topics": + randomWordsRequested := vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested{}.Topic() + randomWordsFulfilled := vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled{}.Topic() + fmt.Println("RandomWordsRequested:", randomWordsRequested.String(), + "RandomWordsFulfilled:", randomWordsFulfilled.String()) + case "batch-coordinatorv2plus-deploy": + cmd := flag.NewFlagSet("batch-coordinatorv2plus-deploy", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + _, tx, _, err := batch_vrf_coordinator_v2plus.DeployBatchVRFCoordinatorV2Plus( + e.Owner, e.Ec, common.HexToAddress(*coordinatorAddr)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "batch-coordinatorv2plus-fulfill": + cmd := flag.NewFlagSet("batch-coordinatorv2plus-fulfill", flag.ExitOnError) + batchCoordinatorAddr := cmd.String("batch-coordinator-address", "", "address of the batch vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + submit := cmd.Bool("submit", false, "whether to submit the fulfillments or not") + estimateGas := cmd.Bool("estimate-gas", false, "whether to estimate gas or not") + nativePayment := cmd.Bool("native-payment", false, "whether to use native payment or not") + + // NOTE: it is assumed all of these are of the same length and that + // elements correspond to each other index-wise. this property is not checked. + preSeeds := cmd.String("preseeds", "", "comma-separated request preSeeds") + blockHashes := cmd.String("blockhashes", "", "comma-separated request blockhashes") + blockNums := cmd.String("blocknums", "", "comma-separated request blocknumbers") + subIDs := cmd.String("subids", "", "comma-separated request subids") + cbGasLimits := cmd.String("cbgaslimits", "", "comma-separated request callback gas limits") + numWordses := cmd.String("numwordses", "", "comma-separated request num words") + senders := cmd.String("senders", "", "comma-separated request senders") + + helpers.ParseArgs(cmd, os.Args[2:], + "batch-coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseeds", "blockhashes", "blocknums", + "subids", "cbgaslimits", "numwordses", "senders", "submit", + ) + + preSeedSlice := helpers.ParseBigIntSlice(*preSeeds) + bhSlice := helpers.ParseHashSlice(*blockHashes) + blockNumSlice := helpers.ParseBigIntSlice(*blockNums) + subIDSlice := helpers.ParseBigIntSlice(*subIDs) + cbLimitsSlice := helpers.ParseBigIntSlice(*cbGasLimits) + numWordsSlice := helpers.ParseBigIntSlice(*numWordses) + senderSlice := helpers.ParseAddressSlice(*senders) + + batchCoordinator, err := batch_vrf_coordinator_v2plus.NewBatchVRFCoordinatorV2Plus(common.HexToAddress(*batchCoordinatorAddr), e.Ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, pg.NewQConfig(false)) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + proofs := []batch_vrf_coordinator_v2plus.VRFTypesProof{} + reqCommits := []batch_vrf_coordinator_v2plus.VRFTypesRequestCommitmentV2Plus{} + for i := range preSeedSlice { + ps, err := proof.BigToSeed(preSeedSlice[i]) + helpers.PanicErr(err) + extraArgs, err := extraargs.ExtraArgsV1(*nativePayment) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2Plus{ + PreSeed: ps, + BlockHash: bhSlice[i], + BlockNum: blockNumSlice[i].Uint64(), + SubId: subIDSlice[i], + CallbackGasLimit: uint32(cbLimitsSlice[i].Uint64()), + NumWords: uint32(numWordsSlice[i].Uint64()), + Sender: senderSlice[i], + ExtraArgs: extraArgs, + } + fmt.Printf("preseed data iteration %d: %+v\n", i, preSeedData) + finalSeed := proof.FinalSeedV2Plus(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2Plus(p, preSeedData) + helpers.PanicErr(err) + + proofs = append(proofs, batch_vrf_coordinator_v2plus.VRFTypesProof(onChainProof)) + reqCommits = append(reqCommits, batch_vrf_coordinator_v2plus.VRFTypesRequestCommitmentV2Plus(rc)) + } + + fmt.Printf("proofs: %+v\n\n", proofs) + fmt.Printf("request commitments: %+v\n\n", reqCommits) + + if *submit { + fmt.Println("submitting fulfillments...") + tx, err := batchCoordinator.FulfillRandomWords(e.Owner, proofs, reqCommits) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("done") + } + + if *estimateGas { + fmt.Println("estimating gas") + payload, err := batchCoordinatorV2PlusABI.Pack("fulfillRandomWords", proofs, reqCommits) + helpers.PanicErr(err) + + a := batchCoordinator.Address() + gasEstimate, err := e.Ec.EstimateGas(context.Background(), ethereum.CallMsg{ + From: e.Owner.From, + To: &a, + Data: payload, + }) + helpers.PanicErr(err) + + fmt.Println("gas estimate:", gasEstimate) + } + case "coordinatorv2-fulfill": + cmd := flag.NewFlagSet("coordinatorv2-fulfill", flag.ExitOnError) + coordinatorAddr := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + pubKeyHex := cmd.String("pubkeyhex", "", "compressed pubkey hex") + dbURL := cmd.String("db-url", "", "postgres database url") + keystorePassword := cmd.String("keystore-pw", "", "password to the keystore") + nativePayment := cmd.Bool("native-payment", false, "whether to use native payment or not") + onlyPremium := cmd.Bool("only-premium", false, "whether to bill only premium amount") + preSeed := cmd.String("preseed", "", "request preSeed") + blockHash := cmd.String("blockhash", "", "request blockhash") + blockNum := cmd.Uint64("blocknum", 0, "request blocknumber") + subID := cmd.String("subid", "", "request subid") + cbGasLimit := cmd.Uint("cbgaslimit", 0, "request callback gas limit") + numWords := cmd.Uint("numwords", 0, "request num words") + sender := cmd.String("sender", "", "request sender") + + helpers.ParseArgs(cmd, os.Args[2:], + "coordinator-address", "pubkeyhex", "db-url", + "keystore-pw", "preseed", "blockhash", "blocknum", + "subid", "cbgaslimit", "numwords", "sender", + ) + + coordinator, err := vrf_coordinator_v2plus_interface.NewIVRFCoordinatorV2PlusInternal(common.HexToAddress(*coordinatorAddr), e.Ec) + helpers.PanicErr(err) + + db := sqlx.MustOpen("postgres", *dbURL) + lggr, _ := logger.NewLogger() + + keyStore := keystore.New(db, utils.DefaultScryptParams, lggr, pg.NewQConfig(false)) + err = keyStore.Unlock(*keystorePassword) + helpers.PanicErr(err) + + k, err := keyStore.VRF().Get(*pubKeyHex) + helpers.PanicErr(err) + + fmt.Println("vrf key found:", k) + + ps, err := proof.BigToSeed(decimal.RequireFromString(*preSeed).BigInt()) + helpers.PanicErr(err) + + parsedSubID := parseSubID(*subID) + extraArgs, err := extraargs.ExtraArgsV1(*nativePayment) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2Plus{ + PreSeed: ps, + BlockHash: common.HexToHash(*blockHash), + BlockNum: *blockNum, + SubId: parsedSubID, + CallbackGasLimit: uint32(*cbGasLimit), + NumWords: uint32(*numWords), + Sender: common.HexToAddress(*sender), + ExtraArgs: extraArgs, + } + fmt.Printf("preseed data: %+v\n", preSeedData) + finalSeed := proof.FinalSeedV2Plus(preSeedData) + + p, err := keyStore.VRF().GenerateProof(*pubKeyHex, finalSeed) + helpers.PanicErr(err) + + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2Plus(p, preSeedData) + helpers.PanicErr(err) + + fmt.Printf("Proof: %+v, commitment: %+v\nSending fulfillment!", onChainProof, rc) + + tx, err := coordinator.FulfillRandomWords(e.Owner, onChainProof, rc, *onlyPremium) + helpers.PanicErr(err) + + fmt.Println("waiting for it to mine:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("done") + case "batch-bhs-deploy": + cmd := flag.NewFlagSet("batch-bhs-deploy", flag.ExitOnError) + bhsAddr := cmd.String("bhs-address", "", "address of the blockhash store contract") + helpers.ParseArgs(cmd, os.Args[2:], "bhs-address") + v2plusscripts.DeployBatchBHS(e, common.HexToAddress(*bhsAddr)) + case "batch-bhs-store": + cmd := flag.NewFlagSet("batch-bhs-store", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + blockNumbersArg := cmd.String("block-numbers", "", "block numbers to store in a single transaction") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockNumbers := helpers.ParseBigIntSlice(*blockNumbersArg) + helpers.PanicErr(err) + tx, err := batchBHS.Store(e.Owner, blockNumbers) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "batch-bhs-get": + cmd := flag.NewFlagSet("batch-bhs-get", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + blockNumbersArg := cmd.String("block-numbers", "", "block numbers to store in a single transaction") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "block-numbers") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockNumbers := helpers.ParseBigIntSlice(*blockNumbersArg) + helpers.PanicErr(err) + blockhashes, err := batchBHS.GetBlockhashes(nil, blockNumbers) + helpers.PanicErr(err) + for i, bh := range blockhashes { + fmt.Println("blockhash(", blockNumbers[i], ") = ", common.Bytes2Hex(bh[:])) + } + case "batch-bhs-storeVerify": + cmd := flag.NewFlagSet("batch-bhs-storeVerify", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + startBlock := cmd.Int64("start-block", -1, "block number to start from. Must be in the BHS already.") + numBlocks := cmd.Int64("num-blocks", -1, "number of blockhashes to store. will be stored in a single tx, can't be > 150") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "start-block", "num-blocks") + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + blockRange, err := blockhashstore.DecreasingBlockRange(big.NewInt(*startBlock-1), big.NewInt(*startBlock-*numBlocks-1)) + helpers.PanicErr(err) + rlpHeaders, _, err := helpers.GetRlpHeaders(e, blockRange, true) + helpers.PanicErr(err) + tx, err := batchBHS.StoreVerifyHeader(e.Owner, blockRange, rlpHeaders) + helpers.PanicErr(err) + fmt.Println("storeVerifyHeader(", blockRange, ", ...) tx:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("blockRange: %d", blockRange)) + case "batch-bhs-backwards": + cmd := flag.NewFlagSet("batch-bhs-backwards", flag.ExitOnError) + batchAddr := cmd.String("batch-bhs-address", "", "address of the batch bhs contract") + bhsAddr := cmd.String("bhs-address", "", "address of the bhs contract") + startBlock := cmd.Int64("start-block", -1, "block number to start from. Must be in the BHS already.") + endBlock := cmd.Int64("end-block", -1, "block number to end at. Must be less than startBlock") + batchSize := cmd.Int64("batch-size", -1, "batch size") + gasMultiplier := cmd.Int64("gas-price-multiplier", 1, "gas price multiplier to use, defaults to 1 (no multiplication)") + helpers.ParseArgs(cmd, os.Args[2:], "batch-bhs-address", "bhs-address", "end-block", "batch-size") + + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(common.HexToAddress(*batchAddr), e.Ec) + helpers.PanicErr(err) + + bhs, err := blockhash_store.NewBlockhashStore(common.HexToAddress(*bhsAddr), e.Ec) + helpers.PanicErr(err) + + // Sanity check BHS address in the Batch BHS. + bhsAddressBatchBHS, err := batchBHS.BHS(nil) + helpers.PanicErr(err) + + if bhsAddressBatchBHS != common.HexToAddress(*bhsAddr) { + log.Panicf("Mismatch in bhs addresses: batch bhs has %s while given %s", bhsAddressBatchBHS.String(), *bhsAddr) + } + + if *startBlock == -1 { + tx, err2 := bhs.StoreEarliest(e.Owner) + helpers.PanicErr(err2) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "Store Earliest") + // storeEarliest will store receipt block number minus 256 which is the earliest block + // the blockhash() instruction will work on. + *startBlock = receipt.BlockNumber.Int64() - 256 + } + + // Check if the provided start block is in the BHS. If it's not, print out an appropriate + // helpful error message. Otherwise users would get the cryptic "header has unknown blockhash" + // error which is a bit more difficult to diagnose. + // The Batch BHS returns a zero'd [32]byte array in the event the provided block number doesn't + // have it's blockhash in the BHS. + var notFound [32]byte + hsh, err := batchBHS.GetBlockhashes(nil, []*big.Int{big.NewInt(*startBlock)}) + helpers.PanicErr(err) + + if len(hsh) != 1 { + helpers.PanicErr(fmt.Errorf("expected 1 item in returned array from BHS store, got: %d", len(hsh))) + } + + if bytes.Equal(hsh[0][:], notFound[:]) { + helpers.PanicErr(fmt.Errorf("expected block number %d (start-block argument) to be in the BHS already, did not find it there", *startBlock)) + } + + blockRange, err := blockhashstore.DecreasingBlockRange(big.NewInt(*startBlock-1), big.NewInt(*endBlock)) + helpers.PanicErr(err) + + for i := 0; i < len(blockRange); i += int(*batchSize) { + j := i + int(*batchSize) + if j > len(blockRange) { + j = len(blockRange) + } + + // Get suggested gas price and multiply by multiplier on every iteration + // so we don't have our transaction getting stuck. Need to be as fast as + // possible. + gp, err := e.Ec.SuggestGasPrice(context.Background()) + helpers.PanicErr(err) + e.Owner.GasPrice = new(big.Int).Mul(gp, big.NewInt(*gasMultiplier)) + + fmt.Println("using gas price", e.Owner.GasPrice, "wei") + + blockNumbers := blockRange[i:j] + blockHeaders, _, err := helpers.GetRlpHeaders(e, blockNumbers, true) + fmt.Println("storing blockNumbers:", blockNumbers) + helpers.PanicErr(err) + + tx, err := batchBHS.StoreVerifyHeader(e.Owner, blockNumbers, blockHeaders) + helpers.PanicErr(err) + + fmt.Println("sent tx:", helpers.ExplorerLink(e.ChainID, tx.Hash())) + + fmt.Println("waiting for it to mine...") + _, err = bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + + fmt.Println("received receipt, continuing") + } + fmt.Println("done") + case "trusted-bhs-store": + cmd := flag.NewFlagSet("trusted-bhs-backwards", flag.ExitOnError) + trustedBHSAddr := cmd.String("trusted-bhs-address", "", "address of the trusted bhs contract") + blockNumbersString := cmd.String("block-numbers", "", "comma-separated list of block numbers e.g 123,456 ") + batchSizePtr := cmd.Int64("batch-size", -1, "batch size") + helpers.ParseArgs(cmd, os.Args[2:], "trusted-bhs-address", "batch-size", "block-numbers") + + // Parse batch size. + batchSize := int(*batchSizePtr) + + // Parse block numbers. + blockNumbers := helpers.ParseBigIntSlice(*blockNumbersString) + + // Instantiate trusted bhs. + trustedBHS, err := trusted_blockhash_store.NewTrustedBlockhashStore(common.HexToAddress(*trustedBHSAddr), e.Ec) + helpers.PanicErr(err) + + for i := 0; i < len(blockNumbers); i += batchSize { + // Get recent blockhash and block number anew each iteration. We do this so they do not get stale. + recentBlockNumber, err := e.Ec.BlockNumber(context.Background()) + helpers.PanicErr(err) + recentBlock, err := e.Ec.HeaderByNumber(context.Background(), big.NewInt(int64(recentBlockNumber))) + helpers.PanicErr(err) + recentBlockhash := recentBlock.Hash() + + // Get blockhashes to store. + blockNumbersSlice := blockNumbers[i : i+batchSize] + _, blockhashesStrings, err := helpers.GetRlpHeaders(e, blockNumbersSlice, false) + helpers.PanicErr(err) + fmt.Println("storing blockNumbers:", blockNumbers) + var blockhashes [][32]byte + for _, h := range blockhashesStrings { + blockhashes = append(blockhashes, common.HexToHash(h)) + } + + // Execute storage tx. + tx, err := trustedBHS.StoreTrusted(e.Owner, blockNumbersSlice, blockhashes, big.NewInt(int64(recentBlockNumber)), recentBlockhash) + helpers.PanicErr(err) + fmt.Println("waiting for it to mine...") + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + fmt.Println("received receipt, continuing") + } + fmt.Println("done") + case "latest-head": + h, err := e.Ec.HeaderByNumber(context.Background(), nil) + helpers.PanicErr(err) + fmt.Println("latest head number:", h.Number.String()) + case "bhs-deploy": + v2plusscripts.DeployBHS(e) + case "coordinator-deploy": + coordinatorDeployCmd := flag.NewFlagSet("coordinator-deploy", flag.ExitOnError) + coordinatorDeployLinkAddress := coordinatorDeployCmd.String("link-address", "", "address of link token") + coordinatorDeployBHSAddress := coordinatorDeployCmd.String("bhs-address", "", "address of bhs") + coordinatorDeployLinkEthFeedAddress := coordinatorDeployCmd.String("link-eth-feed", "", "address of link-eth-feed") + helpers.ParseArgs(coordinatorDeployCmd, os.Args[2:], "link-address", "bhs-address", "link-eth-feed") + v2plusscripts.DeployCoordinator(e, *coordinatorDeployLinkAddress, *coordinatorDeployBHSAddress, *coordinatorDeployLinkEthFeedAddress) + case "coordinator-get-config": + cmd := flag.NewFlagSet("coordinator-get-config", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address") + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + v2plusscripts.PrintCoordinatorConfig(coordinator) + case "coordinator-set-config": + cmd := flag.NewFlagSet("coordinator-set-config", flag.ExitOnError) + setConfigAddress := cmd.String("coordinator-address", "", "coordinator address") + minConfs := cmd.Int("min-confs", 3, "min confs") + maxGasLimit := cmd.Int64("max-gas-limit", 2.5e6, "max gas limit") + stalenessSeconds := cmd.Int64("staleness-seconds", 86400, "staleness in seconds") + gasAfterPayment := cmd.Int64("gas-after-payment", 33285, "gas after payment calculation") + fallbackWeiPerUnitLink := cmd.String("fallback-wei-per-unit-link", "", "fallback wei per unit link") + flatFeeEthPPM := cmd.Int64("flat-fee-eth-ppm", 500, "fulfillment flat fee ETH ppm") + flatFeeLinkDiscountPPM := cmd.Int64("flat-fee-link-discount-ppm", 100, "fulfillment flat fee discount for PLI payment denominated in native ppm") + nativePremiumPercentage := cmd.Int64("native-premium-percentage", 1, "premium percentage for native payment") + linkPremiumPercentage := cmd.Int64("link-premium-percentage", 1, "premium percentage for PLI payment") + + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "fallback-wei-per-unit-link") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*setConfigAddress), e.Ec) + helpers.PanicErr(err) + + v2plusscripts.SetCoordinatorConfig( + e, + *coordinator, + uint16(*minConfs), + uint32(*maxGasLimit), + uint32(*stalenessSeconds), + uint32(*gasAfterPayment), + decimal.RequireFromString(*fallbackWeiPerUnitLink).BigInt(), + uint32(*flatFeeEthPPM), + uint32(*flatFeeLinkDiscountPPM), + uint8(*nativePremiumPercentage), + uint8(*linkPremiumPercentage), + ) + case "coordinator-register-key": + coordinatorRegisterKey := flag.NewFlagSet("coordinator-register-key", flag.ExitOnError) + registerKeyAddress := coordinatorRegisterKey.String("address", "", "coordinator address") + registerKeyUncompressedPubKey := coordinatorRegisterKey.String("pubkey", "", "uncompressed pubkey") + gasLaneMaxGas := coordinatorRegisterKey.Uint64("gas-lane-max-gas", 1e12, "gas lane max gas price") + helpers.ParseArgs(coordinatorRegisterKey, os.Args[2:], "address", "pubkey", "oracle-address") + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*registerKeyAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*registerKeyUncompressedPubKey, "0x") { + *registerKeyUncompressedPubKey = strings.Replace(*registerKeyUncompressedPubKey, "0x", "04", 1) + } + + v2plusscripts.RegisterCoordinatorProvingKey(e, *coordinator, *registerKeyUncompressedPubKey, *gasLaneMaxGas) + case "coordinator-deregister-key": + coordinatorDeregisterKey := flag.NewFlagSet("coordinator-deregister-key", flag.ExitOnError) + deregisterKeyAddress := coordinatorDeregisterKey.String("address", "", "coordinator address") + deregisterKeyUncompressedPubKey := coordinatorDeregisterKey.String("pubkey", "", "uncompressed pubkey") + helpers.ParseArgs(coordinatorDeregisterKey, os.Args[2:], "address", "pubkey") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*deregisterKeyAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*deregisterKeyUncompressedPubKey, "0x") { + *deregisterKeyUncompressedPubKey = strings.Replace(*deregisterKeyUncompressedPubKey, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(*deregisterKeyUncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + tx, err := coordinator.DeregisterProvingKey(e.Owner, [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "coordinator-subscription": + coordinatorSub := flag.NewFlagSet("coordinator-subscription", flag.ExitOnError) + address := coordinatorSub.String("address", "", "coordinator address") + subID := coordinatorSub.String("sub-id", "", "sub-id") + helpers.ParseArgs(coordinatorSub, os.Args[2:], "address", "sub-id") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*address), e.Ec) + helpers.PanicErr(err) + fmt.Println("sub-id", *subID, "address", *address, coordinator.Address()) + parsedSubID := parseSubID(*subID) + s, err := coordinator.GetSubscription(nil, parsedSubID) + helpers.PanicErr(err) + fmt.Printf("Subscription %+v\n", s) + case "consumer-deploy": + consumerDeployCmd := flag.NewFlagSet("consumer-deploy", flag.ExitOnError) + consumerCoordinator := consumerDeployCmd.String("coordinator-address", "", "coordinator address") + keyHash := consumerDeployCmd.String("key-hash", "", "key hash") + consumerLinkAddress := consumerDeployCmd.String("link-address", "", "link-address") + nativePayment := consumerDeployCmd.Bool("native-payment", false, "whether to use native payment or not") + + // TODO: add other params + helpers.ParseArgs(consumerDeployCmd, os.Args[2:], "coordinator-address", "key-hash", "link-address") + keyHashBytes := common.HexToHash(*keyHash) + _, tx, _, err := vrf_v2plus_single_consumer.DeployVRFV2PlusSingleConsumerExample( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + common.HexToAddress(*consumerLinkAddress), + uint32(1000000), // gas callback + uint16(5), // confs + uint32(1), // words + keyHashBytes, + *nativePayment) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-subscribe": + consumerSubscribeCmd := flag.NewFlagSet("consumer-subscribe", flag.ExitOnError) + consumerSubscribeAddress := consumerSubscribeCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerSubscribeCmd, os.Args[2:], "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerSubscribeAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.Subscribe(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "link-balance": + linkBalanceCmd := flag.NewFlagSet("link-balance", flag.ExitOnError) + linkAddress := linkBalanceCmd.String("link-address", "", "link-address") + address := linkBalanceCmd.String("address", "", "address") + helpers.ParseArgs(linkBalanceCmd, os.Args[2:], "link-address", "address") + lt, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + b, err := lt.BalanceOf(nil, common.HexToAddress(*address)) + helpers.PanicErr(err) + fmt.Println(b) + case "consumer-cancel": + consumerCancelCmd := flag.NewFlagSet("consumer-cancel", flag.ExitOnError) + consumerCancelAddress := consumerCancelCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerCancelCmd, os.Args[2:], "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerCancelAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.Unsubscribe(e.Owner, e.Owner.From) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-topup": + // NOTE NEED TO FUND CONSUMER WITH PLI FIRST + consumerTopupCmd := flag.NewFlagSet("consumer-topup", flag.ExitOnError) + consumerTopupAmount := consumerTopupCmd.String("amount", "", "amount in juels") + consumerTopupAddress := consumerTopupCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerTopupCmd, os.Args[2:], "amount", "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerTopupAddress), e.Ec) + helpers.PanicErr(err) + amount, s := big.NewInt(0).SetString(*consumerTopupAmount, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *consumerTopupAmount)) + } + tx, err := consumer.TopUpSubscription(e.Owner, amount) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-request": + consumerRequestCmd := flag.NewFlagSet("consumer-request", flag.ExitOnError) + consumerRequestAddress := consumerRequestCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerRequestCmd, os.Args[2:], "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerRequestAddress), e.Ec) + helpers.PanicErr(err) + tx, err := consumer.RequestRandomWords(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-fund-and-request": + consumerRequestCmd := flag.NewFlagSet("consumer-request", flag.ExitOnError) + consumerRequestAddress := consumerRequestCmd.String("address", "", "consumer address") + helpers.ParseArgs(consumerRequestCmd, os.Args[2:], "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerRequestAddress), e.Ec) + helpers.PanicErr(err) + // Fund and request 3 pli + tx, err := consumer.FundAndRequestRandomWords(e.Owner, big.NewInt(3000000000000000000)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "consumer-print": + consumerPrint := flag.NewFlagSet("consumer-print", flag.ExitOnError) + address := consumerPrint.String("address", "", "consumer address") + helpers.ParseArgs(consumerPrint, os.Args[2:], "address") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*address), e.Ec) + helpers.PanicErr(err) + rc, err := consumer.SRequestConfig(nil) + helpers.PanicErr(err) + rw, err := consumer.SRandomWords(nil, big.NewInt(0)) + if err != nil { + fmt.Println("no words") + } + rid, err := consumer.SRequestId(nil) + helpers.PanicErr(err) + fmt.Printf("Request config %+v Rw %+v Rid %+v\n", rc, rw, rid) + case "deploy-universe": + v2plusscripts.DeployUniverseViaCLI(e) + case "generate-proof-v2-plus": + generateProofForV2Plus(e) + case "eoa-consumer-deploy": + consumerDeployCmd := flag.NewFlagSet("eoa-consumer-deploy", flag.ExitOnError) + consumerCoordinator := consumerDeployCmd.String("coordinator-address", "", "coordinator address") + consumerLinkAddress := consumerDeployCmd.String("link-address", "", "link-address") + helpers.ParseArgs(consumerDeployCmd, os.Args[2:], "coordinator-address", "link-address", "key-hash") + + v2plusscripts.EoaDeployConsumer(e, *consumerCoordinator, *consumerLinkAddress) + case "eoa-load-test-consumer-deploy": + loadTestConsumerDeployCmd := flag.NewFlagSet("eoa-load-test-consumer-deploy", flag.ExitOnError) + consumerCoordinator := loadTestConsumerDeployCmd.String("coordinator-address", "", "coordinator address") + consumerLinkAddress := loadTestConsumerDeployCmd.String("link-address", "", "link-address") + helpers.ParseArgs(loadTestConsumerDeployCmd, os.Args[2:], "coordinator-address", "link-address") + _, tx, _, err := vrf_load_test_external_sub_owner.DeployVRFLoadTestExternalSubOwner( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + common.HexToAddress(*consumerLinkAddress)) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-load-test-consumer-with-metrics-deploy": + loadTestConsumerDeployCmd := flag.NewFlagSet("eoa-load-test-consumer-with-metrics-deploy", flag.ExitOnError) + consumerCoordinator := loadTestConsumerDeployCmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(loadTestConsumerDeployCmd, os.Args[2:], "coordinator-address") + _, tx, _, err := vrf_v2plus_load_test_with_metrics.DeployVRFV2PlusLoadTestWithMetrics( + e.Owner, + e.Ec, + common.HexToAddress(*consumerCoordinator), + ) + helpers.PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-create-sub": + createSubCmd := flag.NewFlagSet("eoa-create-sub", flag.ExitOnError) + coordinatorAddress := createSubCmd.String("coordinator-address", "", "coordinator address") + helpers.ParseArgs(createSubCmd, os.Args[2:], "coordinator-address") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + v2plusscripts.EoaCreateSub(e, *coordinator) + case "eoa-add-sub-consumer": + addSubConsCmd := flag.NewFlagSet("eoa-add-sub-consumer", flag.ExitOnError) + coordinatorAddress := addSubConsCmd.String("coordinator-address", "", "coordinator address") + subID := addSubConsCmd.String("sub-id", "", "sub-id") + consumerAddress := addSubConsCmd.String("consumer-address", "", "consumer address") + helpers.ParseArgs(addSubConsCmd, os.Args[2:], "coordinator-address", "sub-id", "consumer-address") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + parsedSubID := parseSubID(*subID) + v2plusscripts.EoaAddConsumerToSub(e, *coordinator, parsedSubID, *consumerAddress) + case "eoa-create-fund-authorize-sub": + // Lets just treat the owner key as the EOA controlling the sub + cfaSubCmd := flag.NewFlagSet("eoa-create-fund-authorize-sub", flag.ExitOnError) + coordinatorAddress := cfaSubCmd.String("coordinator-address", "", "coordinator address") + amountStr := cfaSubCmd.String("amount", "", "amount to fund in juels") + consumerAddress := cfaSubCmd.String("consumer-address", "", "consumer address") + consumerLinkAddress := cfaSubCmd.String("link-address", "", "link-address") + helpers.ParseArgs(cfaSubCmd, os.Args[2:], "coordinator-address", "amount", "consumer-address", "link-address") + amount, s := big.NewInt(0).SetString(*amountStr, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *amountStr)) + } + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + fmt.Println(amount, consumerLinkAddress) + txcreate, err := coordinator.CreateSubscription(e.Owner) + helpers.PanicErr(err) + fmt.Println("Create sub", "TX", helpers.ExplorerLink(e.ChainID, txcreate.Hash())) + helpers.ConfirmTXMined(context.Background(), e.Ec, txcreate, e.ChainID) + sub := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated) + subscription, err := coordinator.WatchSubscriptionCreated(nil, sub, nil) + helpers.PanicErr(err) + defer subscription.Unsubscribe() + created := <-sub + linkToken, err := link_token_interface.NewLinkToken(common.HexToAddress(*consumerLinkAddress), e.Ec) + helpers.PanicErr(err) + bal, err := linkToken.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("OWNER BALANCE", bal, e.Owner.From.String(), amount.String()) + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, created.SubId) + helpers.PanicErr(err) + e.Owner.GasLimit = 500000 + tx, err := linkToken.TransferAndCall(e.Owner, coordinator.Address(), amount, b) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("Sub id: %d", created.SubId)) + subFunded := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionFunded) + fundSub, err := coordinator.WatchSubscriptionFunded(nil, subFunded, []*big.Int{created.SubId}) + helpers.PanicErr(err) + defer fundSub.Unsubscribe() + <-subFunded // Add a consumer once its funded + txadd, err := coordinator.AddConsumer(e.Owner, created.SubId, common.HexToAddress(*consumerAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, txadd, e.ChainID) + case "eoa-request": + request := flag.NewFlagSet("eoa-request", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.String("sub-id", "", "subscription ID") + cbGasLimit := request.Uint("cb-gas-limit", 1_000_000, "callback gas limit") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + numWords := request.Uint("num-words", 3, "number of words to request") + keyHash := request.String("key-hash", "", "key hash") + nativePayment := request.Bool("native-payment", false, "whether to use native payment or not") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_v2plus_sub_owner.NewVRFV2PlusExternalSubOwnerExample( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + tx, err := consumer.RequestRandomWords(e.Owner, parseSubID(*subID), uint32(*cbGasLimit), uint16(*requestConfirmations), uint32(*numWords), keyHashBytes, *nativePayment) + helpers.PanicErr(err) + fmt.Println("TX", helpers.ExplorerLink(e.ChainID, tx.Hash())) + r, err := bind.WaitMined(context.Background(), e.Ec, tx) + helpers.PanicErr(err) + fmt.Println("Receipt blocknumber:", r.BlockNumber) + case "eoa-load-test-read": + cmd := flag.NewFlagSet("eoa-load-test-read", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "consumer address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrf_load_test_external_sub_owner.NewVRFLoadTestExternalSubOwner( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + rc, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + fmt.Println("load tester", *consumerAddress, "response count:", rc) + case "eoa-load-test-request": + request := flag.NewFlagSet("eoa-load-test-request", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.Uint64("sub-id", 0, "subscription ID") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + keyHash := request.String("key-hash", "", "key hash") + requests := request.Uint("requests", 10, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_load_test_external_sub_owner.NewVRFLoadTestExternalSubOwner( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + var txes []*types.Transaction + for i := 0; i < int(*runs); i++ { + tx, err := consumer.RequestRandomWords(e.Owner, *subID, uint16(*requestConfirmations), + keyHashBytes, uint16(*requests)) + helpers.PanicErr(err) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(e.ChainID, tx.Hash())) + txes = append(txes, tx) + } + fmt.Println("Total number of requests sent:", (*requests)*(*runs)) + fmt.Println("fetching receipts for all transactions") + for i, tx := range txes { + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("load test %d", i+1)) + } + case "eoa-load-test-request-with-metrics": + request := flag.NewFlagSet("eoa-load-test-request-with-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + subID := request.String("sub-id", "", "subscription ID") + requestConfirmations := request.Uint("request-confirmations", 3, "minimum request confirmations") + keyHash := request.String("key-hash", "", "key hash") + cbGasLimit := request.Uint("cb-gas-limit", 100_000, "request callback gas limit") + nativePaymentEnabled := request.Bool("native-payment-enabled", false, "native payment enabled") + numWords := request.Uint("num-words", 1, "num words to request") + requests := request.Uint("requests", 10, "number of randomness requests to make per run") + runs := request.Uint("runs", 1, "number of runs to do. total randomness requests will be (requests * runs).") + helpers.ParseArgs(request, os.Args[2:], "consumer-address", "sub-id", "key-hash") + keyHashBytes := common.HexToHash(*keyHash) + consumer, err := vrf_v2plus_load_test_with_metrics.NewVRFV2PlusLoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + var txes []*types.Transaction + for i := 0; i < int(*runs); i++ { + tx, err := consumer.RequestRandomWords( + e.Owner, + decimal.RequireFromString(*subID).BigInt(), + uint16(*requestConfirmations), + keyHashBytes, + uint32(*cbGasLimit), + *nativePaymentEnabled, + uint32(*numWords), + uint16(*requests), + ) + helpers.PanicErr(err) + fmt.Printf("TX %d: %s\n", i+1, helpers.ExplorerLink(e.ChainID, tx.Hash())) + txes = append(txes, tx) + } + fmt.Println("Total number of requests sent:", (*requests)*(*runs)) + fmt.Println("fetching receipts for all transactions") + for i, tx := range txes { + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("load test %d", i+1)) + } + case "eoa-load-test-read-metrics": + request := flag.NewFlagSet("eoa-load-test-read-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + consumer, err := vrf_v2plus_load_test_with_metrics.NewVRFV2PlusLoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + responseCount, err := consumer.SResponseCount(nil) + helpers.PanicErr(err) + fmt.Println("Response Count: ", responseCount) + requestCount, err := consumer.SRequestCount(nil) + helpers.PanicErr(err) + fmt.Println("Request Count: ", requestCount) + averageFulfillmentInMillions, err := consumer.SAverageFulfillmentInMillions(nil) + helpers.PanicErr(err) + fmt.Println("Average Fulfillment In Millions: ", averageFulfillmentInMillions) + slowestFulfillment, err := consumer.SSlowestFulfillment(nil) + helpers.PanicErr(err) + fmt.Println("Slowest Fulfillment: ", slowestFulfillment) + fastestFulfillment, err := consumer.SFastestFulfillment(nil) + helpers.PanicErr(err) + fmt.Println("Fastest Fulfillment: ", fastestFulfillment) + case "eoa-load-test-reset-metrics": + request := flag.NewFlagSet("eoa-load-test-reset-metrics", flag.ExitOnError) + consumerAddress := request.String("consumer-address", "", "consumer address") + helpers.ParseArgs(request, os.Args[2:], "consumer-address") + consumer, err := vrf_v2plus_load_test_with_metrics.NewVRFV2PlusLoadTestWithMetrics( + common.HexToAddress(*consumerAddress), + e.Ec) + helpers.PanicErr(err) + _, err = consumer.Reset(e.Owner) + helpers.PanicErr(err) + fmt.Println("Load Test Consumer With Metrics was reset ") + case "eoa-transfer-sub": + trans := flag.NewFlagSet("eoa-transfer-sub", flag.ExitOnError) + coordinatorAddress := trans.String("coordinator-address", "", "coordinator address") + subID := trans.String("sub-id", "", "sub-id") + to := trans.String("to", "", "to") + helpers.ParseArgs(trans, os.Args[2:], "coordinator-address", "sub-id", "to") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.RequestSubscriptionOwnerTransfer(e.Owner, parseSubID(*subID), common.HexToAddress(*to)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-accept-sub": + accept := flag.NewFlagSet("eoa-accept-sub", flag.ExitOnError) + coordinatorAddress := accept.String("coordinator-address", "", "coordinator address") + subID := accept.String("sub-id", "", "sub-id") + helpers.ParseArgs(accept, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.AcceptSubscriptionOwnerTransfer(e.Owner, parseSubID(*subID)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-cancel-sub": + cancel := flag.NewFlagSet("eoa-cancel-sub", flag.ExitOnError) + coordinatorAddress := cancel.String("coordinator-address", "", "coordinator address") + subID := cancel.String("sub-id", "", "sub-id") + helpers.ParseArgs(cancel, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.CancelSubscription(e.Owner, parseSubID(*subID), e.Owner.From) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "eoa-fund-sub-with-native-token": + fund := flag.NewFlagSet("eoa-fund-sub-with-native-token", flag.ExitOnError) + coordinatorAddress := fund.String("coordinator-address", "", "coordinator address") + amountStr := fund.String("amount", "", "amount to fund in wei") + subID := fund.String("sub-id", "", "sub-id") + helpers.ParseArgs(fund, os.Args[2:], "coordinator-address", "amount", "sub-id") + amount, s := big.NewInt(0).SetString(*amountStr, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *amountStr)) + } + parsedSubID := parseSubID(*subID) + + v2plusscripts.EoaFundSubWithNative(e, common.HexToAddress(*coordinatorAddress), parsedSubID, amount) + case "eoa-fund-sub": + fund := flag.NewFlagSet("eoa-fund-sub", flag.ExitOnError) + coordinatorAddress := fund.String("coordinator-address", "", "coordinator address") + amountStr := fund.String("amount", "", "amount to fund in juels") + subID := fund.String("sub-id", "", "sub-id") + consumerLinkAddress := fund.String("link-address", "", "link-address") + helpers.ParseArgs(fund, os.Args[2:], "coordinator-address", "amount", "sub-id", "link-address") + amount, s := big.NewInt(0).SetString(*amountStr, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *amountStr)) + } + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + v2plusscripts.EoaFundSubWithLink(e, *coordinator, *consumerLinkAddress, amount, parseSubID(*subID)) + case "eoa-read": + cmd := flag.NewFlagSet("eoa-read", flag.ExitOnError) + consumerAddress := cmd.String("consumer", "", "consumer address") + helpers.ParseArgs(cmd, os.Args[2:], "consumer") + consumer, err := vrf_v2plus_single_consumer.NewVRFV2PlusSingleConsumerExample(common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + word, err := consumer.SRandomWords(nil, big.NewInt(0)) + if err != nil { + fmt.Println("no words (yet?)") + } + reqID, err := consumer.SRequestId(nil) + helpers.PanicErr(err) + fmt.Println("request id:", reqID.String(), "1st random word:", word) + case "owner-cancel-sub": + cancel := flag.NewFlagSet("owner-cancel-sub", flag.ExitOnError) + coordinatorAddress := cancel.String("coordinator-address", "", "coordinator address") + subID := cancel.String("sub-id", "", "sub-id") + helpers.ParseArgs(cancel, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + tx, err := coordinator.OwnerCancelSubscription(e.Owner, parseSubID(*subID)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "sub-balance": + consumerBalanceCmd := flag.NewFlagSet("sub-balance", flag.ExitOnError) + coordinatorAddress := consumerBalanceCmd.String("coordinator-address", "", "coordinator address") + subID := consumerBalanceCmd.String("sub-id", "", "subscription id") + helpers.ParseArgs(consumerBalanceCmd, os.Args[2:], "coordinator-address", "sub-id") + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + resp, err := coordinator.GetSubscription(nil, parseSubID(*subID)) + helpers.PanicErr(err) + fmt.Println("sub id", *subID, "balance:", resp.Balance) + case "coordinator-withdrawable-tokens": + withdrawableTokensCmd := flag.NewFlagSet("coordinator-withdrawable-tokens", flag.ExitOnError) + coordinator := withdrawableTokensCmd.String("coordinator-address", "", "coordinator address") + oracle := withdrawableTokensCmd.String("oracle-address", "", "oracle address") + start := withdrawableTokensCmd.Int("start-link", 10_000, "the starting amount of PLI to check") + helpers.ParseArgs(withdrawableTokensCmd, os.Args[2:], "coordinator-address", "oracle-address") + + coordinatorAddress := common.HexToAddress(*coordinator) + oracleAddress := common.HexToAddress(*oracle) + abi, err := vrf_coordinator_v2_5.VRFCoordinatorV25MetaData.GetAbi() + helpers.PanicErr(err) + + isWithdrawable := func(amount *big.Int) bool { + data, err := abi.Pack("oracleWithdraw", oracleAddress /* this can be any address */, amount) + helpers.PanicErr(err) + + _, err = e.Ec.CallContract(context.Background(), ethereum.CallMsg{ + From: oracleAddress, + To: &coordinatorAddress, + Data: data, + }, nil) + if err == nil { + return true + } else if strings.Contains(err.Error(), "execution reverted") { + return false + } + panic(err) + } + + result := helpers.BinarySearch(assets.Ether(int64(*start*2)).ToInt(), big.NewInt(0), isWithdrawable) + + fmt.Printf("Withdrawable amount for oracle %s is %s\n", oracleAddress.String(), result.String()) + case "coordinator-transfer-ownership": + cmd := flag.NewFlagSet("coordinator-transfer-ownership", flag.ExitOnError) + coordinatorAddress := cmd.String("coordinator-address", "", "v2 coordinator address") + newOwner := cmd.String("new-owner", "", "new owner address") + helpers.ParseArgs(cmd, os.Args[2:], "coordinator-address", "new-owner") + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := coordinator.TransferOwnership(e.Owner, common.HexToAddress(*newOwner)) + helpers.PanicErr(err) + + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "transfer ownership to", *newOwner) + case "coordinator-reregister-proving-key": + coordinatorReregisterKey := flag.NewFlagSet("coordinator-register-key", flag.ExitOnError) + coordinatorAddress := coordinatorReregisterKey.String("coordinator-address", "", "coordinator address") + uncompressedPubKey := coordinatorReregisterKey.String("pubkey", "", "uncompressed pubkey") + skipDeregister := coordinatorReregisterKey.Bool("skip-deregister", false, "if true, key will not be deregistered") + gasLaneMaxGas := coordinatorReregisterKey.Uint64("gas-lane-max-gas", 1e12, "gas lane max gas") + helpers.ParseArgs(coordinatorReregisterKey, os.Args[2:], "coordinator-address", "pubkey", "new-oracle-address") + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + // Put key in ECDSA format + if strings.HasPrefix(*uncompressedPubKey, "0x") { + *uncompressedPubKey = strings.Replace(*uncompressedPubKey, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(*uncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + + var deregisterTx *types.Transaction + if !*skipDeregister { + deregisterTx, err = coordinator.DeregisterProvingKey(e.Owner, [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + fmt.Println("Deregister transaction", helpers.ExplorerLink(e.ChainID, deregisterTx.Hash())) + } + + // Use a higher gas price for the register call + e.Owner.GasPrice.Mul(e.Owner.GasPrice, big.NewInt(2)) + registerTx, err := coordinator.RegisterProvingKey(e.Owner, + [2]*big.Int{pk.X, pk.Y}, *gasLaneMaxGas) + helpers.PanicErr(err) + fmt.Println("Register transaction", helpers.ExplorerLink(e.ChainID, registerTx.Hash())) + + if !*skipDeregister { + fmt.Println("Waiting for deregister transaction to be mined...") + var deregisterReceipt *types.Receipt + deregisterReceipt, err = bind.WaitMined(context.Background(), e.Ec, deregisterTx) + helpers.PanicErr(err) + fmt.Printf("Deregister transaction included in block %s\n", deregisterReceipt.BlockNumber.String()) + } + + fmt.Println("Waiting for register transaction to be mined...") + registerReceipt, err := bind.WaitMined(context.Background(), e.Ec, registerTx) + helpers.PanicErr(err) + fmt.Printf("Register transaction included in block %s\n", registerReceipt.BlockNumber.String()) + case "wrapper-deploy": + cmd := flag.NewFlagSet("wrapper-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + linkETHFeedAddress := cmd.String("link-eth-feed", "", "address of link-eth-feed") + coordinatorAddress := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "link-eth-feed", "coordinator-address") + v2plusscripts.WrapperDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*linkETHFeedAddress), + common.HexToAddress(*coordinatorAddress)) + case "wrapper-withdraw": + cmd := flag.NewFlagSet("wrapper-withdraw", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + recipientAddress := cmd.String("recipient-address", "", "address to withdraw to") + linkAddress := cmd.String("link-address", "", "address of link token") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "recipient-address", "link-address") + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + balance, err := link.BalanceOf(nil, common.HexToAddress(*wrapperAddress)) + helpers.PanicErr(err) + tx, err := wrapper.Withdraw(e.Owner, common.HexToAddress(*recipientAddress), balance) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "withdrawing", balance.String(), "Juels from", *wrapperAddress, "to", *recipientAddress) + case "wrapper-get-subscription-id": + cmd := flag.NewFlagSet("wrapper-get-subscription-id", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address") + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + subID, err := wrapper.SUBSCRIPTIONID(nil) + helpers.PanicErr(err) + fmt.Println("subscription id of wrapper", *wrapperAddress, "is:", subID) + case "wrapper-configure": + cmd := flag.NewFlagSet("wrapper-configure", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + wrapperGasOverhead := cmd.Uint("wrapper-gas-overhead", 50_000, "amount of gas overhead in wrapper fulfillment") + coordinatorGasOverhead := cmd.Uint("coordinator-gas-overhead", 52_000, "amount of gas overhead in coordinator fulfillment") + wrapperPremiumPercentage := cmd.Uint("wrapper-premium-percentage", 25, "gas premium charged by wrapper") + keyHash := cmd.String("key-hash", "", "the keyhash that wrapper requests should use") + maxNumWords := cmd.Uint("max-num-words", 10, "the keyhash that wrapper requests should use") + fallbackWeiPerUnitLink := cmd.String("fallback-wei-per-unit-link", "", "the fallback wei per unit link") + stalenessSeconds := cmd.Uint("staleness-seconds", 86400, "the number of seconds of staleness to allow") + fulfillmentFlatFeeLinkPPM := cmd.Uint("fulfillment-flat-fee-link-ppm", 500, "the link flat fee in ppm to charge for fulfillment") + fulfillmentFlatFeeNativePPM := cmd.Uint("fulfillment-flat-fee-native-ppm", 500, "the native flat fee in ppm to charge for fulfillment") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "key-hash", "fallback-wei-per-unit-link") + + v2plusscripts.WrapperConfigure(e, + common.HexToAddress(*wrapperAddress), + *wrapperGasOverhead, + *coordinatorGasOverhead, + *wrapperPremiumPercentage, + *keyHash, + *maxNumWords, + decimal.RequireFromString(*fallbackWeiPerUnitLink).BigInt(), + uint32(*stalenessSeconds), + uint32(*fulfillmentFlatFeeLinkPPM), + uint32(*fulfillmentFlatFeeNativePPM)) + case "wrapper-get-fulfillment-tx-size": + cmd := flag.NewFlagSet("wrapper-get-fulfillment-tx-size", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address") + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + size, err := wrapper.SFulfillmentTxSizeBytes(nil) + helpers.PanicErr(err) + fmt.Println("fulfillment tx size of wrapper", *wrapperAddress, "is:", size) + case "wrapper-set-fulfillment-tx-size": + cmd := flag.NewFlagSet("wrapper-set-fulfillment-tx-size", flag.ExitOnError) + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + size := cmd.Uint("size", 0, "size of the fulfillment transaction") + helpers.ParseArgs(cmd, os.Args[2:], "wrapper-address", "size") + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(common.HexToAddress(*wrapperAddress), e.Ec) + helpers.PanicErr(err) + tx, err := wrapper.SetFulfillmentTxSize(e.Owner, uint32(*size)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "set fulfillment tx size") + case "wrapper-consumer-deploy": + cmd := flag.NewFlagSet("wrapper-consumer-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + wrapperAddress := cmd.String("wrapper-address", "", "address of the VRFV2Wrapper contract") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "wrapper-address") + + v2plusscripts.WrapperConsumerDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*wrapperAddress)) + case "wrapper-consumer-request": + cmd := flag.NewFlagSet("wrapper-consumer-request", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + cbGasLimit := cmd.Uint("cb-gas-limit", 100_000, "request callback gas limit") + confirmations := cmd.Uint("request-confirmations", 3, "request confirmations") + numWords := cmd.Uint("num-words", 1, "num words to request") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + + consumer, err := vrfv2plus_wrapper_consumer_example.NewVRFV2PlusWrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + + tx, err := consumer.MakeRequest(e.Owner, uint32(*cbGasLimit), uint16(*confirmations), uint32(*numWords)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) + case "wrapper-consumer-request-status": + cmd := flag.NewFlagSet("wrapper-consumer-request-status", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + requestID := cmd.String("request-id", "", "request id of vrf request") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address", "request-id") + + consumer, err := vrfv2plus_wrapper_consumer_example.NewVRFV2PlusWrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + + status, err := consumer.GetRequestStatus(nil, decimal.RequireFromString(*requestID).BigInt()) + helpers.PanicErr(err) + + statusStringer := func(status vrfv2plus_wrapper_consumer_example.GetRequestStatus) string { + return fmt.Sprint("paid (juels):", status.Paid.String(), + ", fulfilled?:", status.Fulfilled, + ", random words:", status.RandomWords) + } + + fmt.Println("status for request", *requestID, "is:") + fmt.Println(statusStringer(status)) + case "wrapper-consumer-withdraw-link": + cmd := flag.NewFlagSet("wrapper-consumer-withdraw-link", flag.ExitOnError) + consumerAddress := cmd.String("consumer-address", "", "address of wrapper consumer") + linkAddress := cmd.String("link-address", "", "address of link token") + helpers.ParseArgs(cmd, os.Args[2:], "consumer-address") + consumer, err := vrfv2plus_wrapper_consumer_example.NewVRFV2PlusWrapperConsumerExample( + common.HexToAddress(*consumerAddress), e.Ec) + helpers.PanicErr(err) + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + balance, err := link.BalanceOf(nil, common.HexToAddress(*consumerAddress)) + helpers.PanicErr(err) + tx, err := consumer.WithdrawLink(e.Owner, balance) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, + "withdrawing", balance.String(), "juels from", *consumerAddress, "to", e.Owner.From.Hex()) + case "transfer-link": + cmd := flag.NewFlagSet("transfer-link", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + amountJuels := cmd.String("amount-juels", "0", "amount in juels to fund") + receiverAddress := cmd.String("receiver-address", "", "address of receiver (contract or eoa)") + helpers.ParseArgs(cmd, os.Args[2:], "amount-juels", "link-address", "receiver-address") + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + tx, err := link.Transfer(e.Owner, common.HexToAddress(*receiverAddress), decimal.RequireFromString(*amountJuels).BigInt()) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "transfer", *amountJuels, "juels to", *receiverAddress) + case "latest-block-header": + cmd := flag.NewFlagSet("latest-block-header", flag.ExitOnError) + blockNumber := cmd.Int("block-number", -1, "block number") + helpers.ParseArgs(cmd, os.Args[2:]) + _ = helpers.CalculateLatestBlockHeader(e, *blockNumber) + case "wrapper-universe-deploy": + v2plusscripts.DeployWrapperUniverse(e) + default: + panic("unrecognized subcommand: " + os.Args[1]) + } +} + +func parseSubID(subID string) *big.Int { + parsedSubID, ok := new(big.Int).SetString(subID, 10) + if !ok { + helpers.PanicErr(fmt.Errorf("sub ID %s cannot be parsed", subID)) + } + return parsedSubID +} diff --git a/core/scripts/vrfv2plus/testnet/proofs.go b/core/scripts/vrfv2plus/testnet/proofs.go new file mode 100644 index 00000000..53589188 --- /dev/null +++ b/core/scripts/vrfv2plus/testnet/proofs.go @@ -0,0 +1,161 @@ +package main + +import ( + "bytes" + "errors" + "flag" + "fmt" + "math/big" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/shopspring/decimal" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/extraargs" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" +) + +var vrfProofTemplate = `{ + pk: [ + %s, + %s + ], + gamma: [ + %s, + %s + ], + c: %s, + s: %s, + seed: %s, + uWitness: %s, + cGammaWitness: [ + %s, + %s + ], + sHashWitness: [ + %s, + %s + ], + zInv: %s +} +` + +var rcTemplate = `{ + blockNum: %d, + subId: %d, + callbackGasLimit: %d, + numWords: %d, + sender: %s, + nativePayment: %t +} +` + +func generateProofForV2Plus(e helpers.Environment) { + + deployCmd := flag.NewFlagSet("generate-proof", flag.ExitOnError) + + keyHashString := deployCmd.String("key-hash", "", "key hash for VRF request") + preSeedString := deployCmd.String("pre-seed", "", "pre-seed for VRF request") + blockhashString := deployCmd.String("block-hash", "", "blockhash of VRF request") + blockNum := deployCmd.Uint64("block-num", 0, "block number of VRF request") + senderString := deployCmd.String("sender", "", "requestor of VRF request") + secretKeyString := deployCmd.String("secret-key", "10", "secret key for VRF V2Key") + subId := deployCmd.String("sub-id", "1", "subscription Id for VRF request") + callbackGasLimit := deployCmd.Uint64("gas-limit", 1_000_000, "callback gas limit for VRF request") + numWords := deployCmd.Uint64("num-words", 1, "number of words for VRF request") + nativePayment := deployCmd.Bool("native-payment", false, "requestor of VRF request") + + helpers.ParseArgs( + deployCmd, os.Args[2:], "key-hash", "pre-seed", "block-hash", "block-num", "sender", + ) + + // Generate V2Key from secret key. + secretKey := decimal.RequireFromString(*secretKeyString).BigInt() + key := vrfkey.MustNewV2XXXTestingOnly(secretKey) + uncompressed, err := key.PublicKey.StringUncompressed() + if err != nil { + panic(err) + } + pk := key.PublicKey + pkh := pk.MustHash() + fmt.Println("Compressed: ", pk.String()) + fmt.Println("Uncompressed: ", uncompressed) + fmt.Println("Hash: ", pkh.String()) + + // Parse big ints and hexes. + requestKeyHash := common.HexToHash(*keyHashString) + requestPreSeed := decimal.RequireFromString(*preSeedString).BigInt() + sender := common.HexToAddress(*senderString) + blockHash := common.HexToHash(*blockhashString) + + // Ensure that the provided keyhash of the request matches the keyhash of the secret key. + if !bytes.Equal(requestKeyHash[:], pkh[:]) { + helpers.PanicErr(errors.New("invalid key hash")) + } + + // Generate proof. + preSeed, err := proof.BigToSeed(requestPreSeed) + if err != nil { + helpers.PanicErr(fmt.Errorf("unable to parse preseed: %w", err)) + } + + parsedSubId, ok := new(big.Int).SetString(*subId, 10) + if !ok { + helpers.PanicErr(fmt.Errorf("unable to parse subID: %s %w", *subId, err)) + } + extraArgs, err := extraargs.ExtraArgsV1(*nativePayment) + helpers.PanicErr(err) + preSeedData := proof.PreSeedDataV2Plus{ + PreSeed: preSeed, + BlockHash: blockHash, + BlockNum: *blockNum, + SubId: parsedSubId, + CallbackGasLimit: uint32(*callbackGasLimit), + NumWords: uint32(*numWords), + Sender: sender, + ExtraArgs: extraArgs, + } + finalSeed := proof.FinalSeedV2Plus(preSeedData) + p, err := key.GenerateProof(finalSeed) + if err != nil { + helpers.PanicErr(fmt.Errorf("unable to generate proof: %w", err)) + } + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2Plus(p, preSeedData) + if err != nil { + helpers.PanicErr(fmt.Errorf("unable to generate proof response: %w", err)) + } + + // Print formatted VRF proof. + fmt.Println("ON-CHAIN PROOF:") + fmt.Printf( + vrfProofTemplate, + onChainProof.Pk[0], + onChainProof.Pk[1], + onChainProof.Gamma[0], + onChainProof.Gamma[1], + onChainProof.C, + onChainProof.S, + onChainProof.Seed, + onChainProof.UWitness, + onChainProof.CGammaWitness[0], + onChainProof.CGammaWitness[1], + onChainProof.SHashWitness[0], + onChainProof.SHashWitness[1], + onChainProof.ZInv, + ) + + // Print formatted request commitment. + fmt.Println("\nREQUEST COMMITMENT:") + fmt.Printf( + rcTemplate, + rc.BlockNum, + rc.SubId, + rc.CallbackGasLimit, + rc.NumWords, + rc.Sender, + hexutil.Encode(rc.ExtraArgs), + ) +} diff --git a/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go b/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go new file mode 100644 index 00000000..4f271b4a --- /dev/null +++ b/core/scripts/vrfv2plus/testnet/v2plusscripts/super_scripts.go @@ -0,0 +1,911 @@ +package v2plusscripts + +import ( + "bytes" + "context" + "encoding/hex" + "flag" + "fmt" + "math/big" + "os" + "strings" + + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/constants" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/jobs" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/model" + "github.com/goplugin/pluginv3.0/core/scripts/common/vrf/util" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/shopspring/decimal" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" +) + +var coordinatorV2PlusABI = evmtypes.MustGetABI(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalABI) + +type CoordinatorConfigV2Plus struct { + MinConfs int + MaxGasLimit int64 + StalenessSeconds int64 + GasAfterPayment int64 + FallbackWeiPerUnitLink *big.Int + FulfillmentFlatFeeNativePPM uint32 + FulfillmentFlatFeeLinkDiscountPPM uint32 + NativePremiumPercentage uint8 + LinkPremiumPercentage uint8 +} + +func SmokeTestVRF(e helpers.Environment) { + smokeCmd := flag.NewFlagSet("smoke", flag.ExitOnError) + + // required flags + linkAddress := smokeCmd.String("link-address", "", "address of link token") + linkEthAddress := smokeCmd.String("link-eth-feed", "", "address of link eth feed") + bhsAddressStr := smokeCmd.String("bhs-address", "", "address of blockhash store") + batchBHSAddressStr := smokeCmd.String("batch-bhs-address", "", "address of batch blockhash store") + coordinatorAddressStr := smokeCmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + batchCoordinatorAddressStr := smokeCmd.String("batch-coordinator-address", "", "address of the batch vrf coordinator v2 contract") + subscriptionBalanceString := smokeCmd.String("subscription-balance", "1e19", "amount to fund subscription") + skipConfig := smokeCmd.Bool("skip-config", false, "skip setting coordinator config") + + // optional flags + fallbackWeiPerUnitLinkString := smokeCmd.String("fallback-wei-per-unit-link", "6e16", "fallback wei/link ratio") + minConfs := smokeCmd.Int("min-confs", 3, "min confs") + maxGasLimit := smokeCmd.Int64("max-gas-limit", 2.5e6, "max gas limit") + stalenessSeconds := smokeCmd.Int64("staleness-seconds", 86400, "staleness in seconds") + gasAfterPayment := smokeCmd.Int64("gas-after-payment", 33285, "gas after payment calculation") + flatFeeEthPPM := smokeCmd.Int64("flat-fee-eth-ppm", 500, "fulfillment flat fee ETH ppm") + flatFeeLinkDiscountPPM := smokeCmd.Int64("flat-fee-link-discount-ppm", 100, "fulfillment flat fee discount for PLI payment denominated in native ppm") + nativePremiumPercentage := smokeCmd.Int64("native-premium-percentage", 1, "premium percentage for native payment") + linkPremiumPercentage := smokeCmd.Int64("link-premium-percentage", 1, "premium percentage for PLI payment") + gasLaneMaxGas := smokeCmd.Int64("gas-lane-max-gas", 1e12, "gas lane max gas price") + + helpers.ParseArgs( + smokeCmd, os.Args[2:], + ) + + fallbackWeiPerUnitLink := decimal.RequireFromString(*fallbackWeiPerUnitLinkString).BigInt() + subscriptionBalance := decimal.RequireFromString(*subscriptionBalanceString).BigInt() + + // generate VRF key + key, err := vrfkey.NewV2() + helpers.PanicErr(err) + fmt.Println("vrf private key:", hexutil.Encode(key.Raw())) + fmt.Println("vrf public key:", key.PublicKey.String()) + fmt.Println("vrf key hash:", key.PublicKey.MustHash()) + + if len(*linkAddress) == 0 { + fmt.Println("\nDeploying PLI Token...") + address := helpers.DeployLinkToken(e).String() + linkAddress = &address + } + + if len(*linkEthAddress) == 0 { + fmt.Println("\nDeploying PLI/ETH Feed...") + address := helpers.DeployLinkEthFeed(e, *linkAddress, fallbackWeiPerUnitLink).String() + linkEthAddress = &address + } + + var bhsContractAddress common.Address + if len(*bhsAddressStr) == 0 { + fmt.Println("\nDeploying BHS...") + bhsContractAddress = DeployBHS(e) + } else { + bhsContractAddress = common.HexToAddress(*bhsAddressStr) + } + + var batchBHSAddress common.Address + if len(*batchBHSAddressStr) == 0 { + fmt.Println("\nDeploying Batch BHS...") + batchBHSAddress = DeployBatchBHS(e, bhsContractAddress) + } else { + batchBHSAddress = common.HexToAddress(*batchBHSAddressStr) + } + + var coordinatorAddress common.Address + if len(*coordinatorAddressStr) == 0 { + fmt.Println("\nDeploying Coordinator...") + coordinatorAddress = DeployCoordinator(e, *linkAddress, bhsContractAddress.String(), *linkEthAddress) + } else { + coordinatorAddress = common.HexToAddress(*coordinatorAddressStr) + } + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(coordinatorAddress, e.Ec) + helpers.PanicErr(err) + + var batchCoordinatorAddress common.Address + if len(*batchCoordinatorAddressStr) == 0 { + fmt.Println("\nDeploying Batch Coordinator...") + batchCoordinatorAddress = DeployBatchCoordinatorV2(e, coordinatorAddress) + } else { + batchCoordinatorAddress = common.HexToAddress(*batchCoordinatorAddressStr) + } + + if !*skipConfig { + fmt.Println("\nSetting Coordinator Config...") + SetCoordinatorConfig( + e, + *coordinator, + uint16(*minConfs), + uint32(*maxGasLimit), + uint32(*stalenessSeconds), + uint32(*gasAfterPayment), + fallbackWeiPerUnitLink, + uint32(*flatFeeEthPPM), + uint32(*flatFeeLinkDiscountPPM), + uint8(*nativePremiumPercentage), + uint8(*linkPremiumPercentage), + ) + } + + fmt.Println("\nConfig set, getting current config from deployed contract...") + PrintCoordinatorConfig(coordinator) + + // Generate compressed public key and key hash + uncompressed, err := key.PublicKey.StringUncompressed() + helpers.PanicErr(err) + if strings.HasPrefix(uncompressed, "0x") { + uncompressed = strings.Replace(uncompressed, "0x", "04", 1) + } + pubBytes, err := hex.DecodeString(uncompressed) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + var pkBytes []byte + if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 { + pkBytes = append(pk.X.Bytes(), 1) + } else { + pkBytes = append(pk.X.Bytes(), 0) + } + var newPK secp256k1.PublicKey + copy(newPK[:], pkBytes) + + compressedPkHex := hexutil.Encode(pkBytes) + keyHash, err := newPK.Hash() + helpers.PanicErr(err) + fmt.Println("vrf key hash from unmarshal:", hexutil.Encode(keyHash[:])) + fmt.Println("vrf key hash from key:", key.PublicKey.MustHash()) + if kh := key.PublicKey.MustHash(); !bytes.Equal(keyHash[:], kh[:]) { + panic(fmt.Sprintf("unexpected key hash %s, expected %s", hexutil.Encode(keyHash[:]), key.PublicKey.MustHash().String())) + } + fmt.Println("compressed public key from unmarshal:", compressedPkHex) + fmt.Println("compressed public key from key:", key.PublicKey.String()) + if compressedPkHex != key.PublicKey.String() { + panic(fmt.Sprintf("unexpected compressed public key %s, expected %s", compressedPkHex, key.PublicKey.String())) + } + + kh1, err := coordinator.HashOfKey(nil, [2]*big.Int{pk.X, pk.Y}) + helpers.PanicErr(err) + fmt.Println("key hash from coordinator:", hexutil.Encode(kh1[:])) + if !bytes.Equal(kh1[:], keyHash[:]) { + panic(fmt.Sprintf("unexpected key hash %s, expected %s", hexutil.Encode(kh1[:]), hexutil.Encode(keyHash[:]))) + } + + fmt.Println("\nRegistering proving key...") + point, err := key.PublicKey.Point() + helpers.PanicErr(err) + x, y := secp256k1.Coordinates(point) + fmt.Println("proving key points x:", x, ", y:", y) + fmt.Println("proving key points from unmarshal:", pk.X, pk.Y) + tx, err := coordinator.RegisterProvingKey(e.Owner, [2]*big.Int{x, y}, uint64(*gasLaneMaxGas)) + helpers.PanicErr(err) + registerReceipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "register proving key on", coordinatorAddress.String()) + var provingKeyRegisteredLog *vrf_coordinator_v2_5.VRFCoordinatorV25ProvingKeyRegistered + for _, log := range registerReceipt.Logs { + if log.Address == coordinatorAddress { + var err2 error + provingKeyRegisteredLog, err2 = coordinator.ParseProvingKeyRegistered(*log) + if err2 != nil { + continue + } + } + } + if provingKeyRegisteredLog == nil { + panic("no proving key registered log found") + } + if !bytes.Equal(provingKeyRegisteredLog.KeyHash[:], keyHash[:]) { + panic(fmt.Sprintf("unexpected key hash registered %s, expected %s", hexutil.Encode(provingKeyRegisteredLog.KeyHash[:]), hexutil.Encode(keyHash[:]))) + } + fmt.Println("key hash registered:", hexutil.Encode(provingKeyRegisteredLog.KeyHash[:])) + + fmt.Println("\nProving key registered, getting proving key hashes from deployed contract...") + registerdKeyHash, err := coordinator.SProvingKeyHashes(nil, big.NewInt(0)) + helpers.PanicErr(err) + fmt.Printf("Key hash registered: %x\n", registerdKeyHash) + ourKeyHash := key.PublicKey.MustHash() + if !bytes.Equal(registerdKeyHash[:], ourKeyHash[:]) { + panic(fmt.Sprintf("unexpected key hash %s, expected %s", hexutil.Encode(registerdKeyHash[:]), hexutil.Encode(ourKeyHash[:]))) + } + + fmt.Println("\nDeploying consumer...") + consumerAddress := EoaDeployConsumer(e, coordinatorAddress.String(), *linkAddress) + + fmt.Println("\nAdding subscription...") + EoaCreateSub(e, *coordinator) + + subID := FindSubscriptionID(e, coordinator) + helpers.PanicErr(err) + + fmt.Println("\nAdding consumer to subscription...") + EoaAddConsumerToSub(e, *coordinator, subID, consumerAddress.String()) + + if subscriptionBalance.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with", subscriptionBalance, "juels...") + EoaFundSubWithLink(e, *coordinator, *linkAddress, subscriptionBalance, subID) + } else { + fmt.Println("Subscription", subID, "NOT getting funded. You must fund the subscription in order to use it!") + } + + fmt.Println("\nSubscribed and (possibly) funded, retrieving subscription from deployed contract...") + s, err := coordinator.GetSubscription(nil, subID) + helpers.PanicErr(err) + fmt.Printf("Subscription %+v\n", s) + + fmt.Println( + "\nDeployment complete.", + "\nPLI Token contract address:", *linkAddress, + "\nPLI/ETH Feed contract address:", *linkEthAddress, + "\nBlockhash Store contract address:", bhsContractAddress, + "\nBatch Blockhash Store contract address:", batchBHSAddress, + "\nVRF Coordinator Address:", coordinatorAddress, + "\nBatch VRF Coordinator Address:", batchCoordinatorAddress, + "\nVRF Consumer Address:", consumerAddress, + "\nVRF Subscription Id:", subID, + "\nVRF Subscription Balance:", *subscriptionBalanceString, + ) + + fmt.Println("making a request on consumer", consumerAddress) + consumer, err := vrf_v2plus_sub_owner.NewVRFV2PlusExternalSubOwnerExample(consumerAddress, e.Ec) + helpers.PanicErr(err) + tx, err = consumer.RequestRandomWords(e.Owner, subID, 100_000, 3, 3, provingKeyRegisteredLog.KeyHash, false) + helpers.PanicErr(err) + receipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "request random words from", consumerAddress.String()) + fmt.Println("request blockhash:", receipt.BlockHash) + + // extract the RandomWordsRequested log from the receipt logs + var rwrLog *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested + for _, log := range receipt.Logs { + if log.Address == coordinatorAddress { + var err2 error + rwrLog, err2 = coordinator.ParseRandomWordsRequested(*log) + if err2 != nil { + continue + } + } + } + if rwrLog == nil { + panic("no RandomWordsRequested log found") + } + + fmt.Println("key hash:", hexutil.Encode(rwrLog.KeyHash[:])) + fmt.Println("request id:", rwrLog.RequestId) + fmt.Println("preseed:", rwrLog.PreSeed) + fmt.Println("num words:", rwrLog.NumWords) + fmt.Println("callback gas limit:", rwrLog.CallbackGasLimit) + fmt.Println("sender:", rwrLog.Sender) + fmt.Println("extra args:", hexutil.Encode(rwrLog.ExtraArgs)) + + // generate the VRF proof, follow the same process as the node + // we assume there is enough funds in the subscription to pay for the gas + preSeed, err := proof.BigToSeed(rwrLog.PreSeed) + helpers.PanicErr(err) + + preSeedData := proof.PreSeedDataV2Plus{ + PreSeed: preSeed, + BlockHash: rwrLog.Raw.BlockHash, + BlockNum: rwrLog.Raw.BlockNumber, + SubId: rwrLog.SubId, + CallbackGasLimit: rwrLog.CallbackGasLimit, + NumWords: rwrLog.NumWords, + Sender: rwrLog.Sender, + ExtraArgs: rwrLog.ExtraArgs, + } + finalSeed := proof.FinalSeedV2Plus(preSeedData) + pf, err := key.GenerateProof(finalSeed) + helpers.PanicErr(err) + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2Plus(pf, preSeedData) + helpers.PanicErr(err) + b, err := coordinatorV2PlusABI.Pack("fulfillRandomWords", onChainProof, rc) + helpers.PanicErr(err) + fmt.Println("calldata for fulfillRandomWords:", hexutil.Encode(b)) + + // call fulfillRandomWords with onChainProof and rc appropriately + fmt.Println("proof c:", onChainProof.C) + fmt.Println("proof s:", onChainProof.S) + fmt.Println("proof gamma:", onChainProof.Gamma) + fmt.Println("proof seed:", onChainProof.Seed) + fmt.Println("proof pk:", onChainProof.Pk) + fmt.Println("proof c gamma witness:", onChainProof.CGammaWitness) + fmt.Println("proof u witness:", onChainProof.UWitness) + fmt.Println("proof s hash witness:", onChainProof.SHashWitness) + fmt.Println("proof z inv:", onChainProof.ZInv) + fmt.Println("request commitment sub id:", rc.SubId) + fmt.Println("request commitment callback gas limit:", rc.CallbackGasLimit) + fmt.Println("request commitment num words:", rc.NumWords) + fmt.Println("request commitment sender:", rc.Sender) + fmt.Println("request commitment extra args:", hexutil.Encode(rc.ExtraArgs)) + + receipt, txHash := sendTx(e, coordinatorAddress, b) + if receipt.Status != 1 { + fmt.Println("fulfillment tx failed, extracting revert reason") + tx, _, err := e.Ec.TransactionByHash(context.Background(), txHash) + helpers.PanicErr(err) + call := ethereum.CallMsg{ + From: e.Owner.From, + To: tx.To(), + Data: tx.Data(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + } + r, err := e.Ec.CallContract(context.Background(), call, receipt.BlockNumber) + fmt.Println("call contract", "r", r, "err", err) + rpcError, err := evmclient.ExtractRPCError(err) + fmt.Println("extracting rpc error", rpcError.String(), err) + os.Exit(1) + } + + fmt.Println("\nfulfillment successful") +} + +func SmokeTestBHS(e helpers.Environment) { + smokeCmd := flag.NewFlagSet("smoke-bhs", flag.ExitOnError) + + // optional args + bhsAddress := smokeCmd.String("bhs-address", "", "address of blockhash store") + batchBHSAddress := smokeCmd.String("batch-bhs-address", "", "address of batch blockhash store") + + helpers.ParseArgs(smokeCmd, os.Args[2:]) + + var bhsContractAddress common.Address + if len(*bhsAddress) == 0 { + fmt.Println("\nDeploying BHS...") + bhsContractAddress = DeployBHS(e) + } else { + bhsContractAddress = common.HexToAddress(*bhsAddress) + } + + var batchBHSContractAddress common.Address + if len(*batchBHSAddress) == 0 { + fmt.Println("\nDeploying Batch BHS...") + batchBHSContractAddress = DeployBatchBHS(e, bhsContractAddress) + } else { + batchBHSContractAddress = common.HexToAddress(*batchBHSAddress) + } + + bhs, err := blockhash_store.NewBlockhashStore(bhsContractAddress, e.Ec) + helpers.PanicErr(err) + + batchBHS, err := batch_blockhash_store.NewBatchBlockhashStore(batchBHSContractAddress, e.Ec) + helpers.PanicErr(err) + batchBHS.Address() + + fmt.Println("\nexecuting storeEarliest") + tx, err := bhs.StoreEarliest(e.Owner) + helpers.PanicErr(err) + seReceipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "storeEarliest on", bhsContractAddress.String()) + var anchorBlockNumber *big.Int + if seReceipt.Status != 1 { + fmt.Println("storeEarliest failed") + os.Exit(1) + } + fmt.Println("storeEarliest succeeded, checking BH is there") + bh, err := bhs.GetBlockhash(nil, seReceipt.BlockNumber.Sub(seReceipt.BlockNumber, big.NewInt(256))) + helpers.PanicErr(err) + fmt.Println("blockhash stored by storeEarliest:", hexutil.Encode(bh[:])) + anchorBlockNumber = seReceipt.BlockNumber + + if anchorBlockNumber == nil { + panic("no anchor block number") + } + + fmt.Println("\nexecuting store(n)") + latestHead, err := e.Ec.HeaderByNumber(context.Background(), nil) + helpers.PanicErr(err) + toStore := latestHead.Number.Sub(latestHead.Number, big.NewInt(1)) + tx, err = bhs.Store(e.Owner, toStore) + helpers.PanicErr(err) + sReceipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "store on", bhsContractAddress.String()) + if sReceipt.Status != 1 { + fmt.Println("store failed") + os.Exit(1) + } + fmt.Println("store succeeded, checking BH is there") + bh, err = bhs.GetBlockhash(nil, toStore) + helpers.PanicErr(err) + fmt.Println("blockhash stored by store:", hexutil.Encode(bh[:])) + + fmt.Println("\nexecuting storeVerifyHeader") + headers, _, err := helpers.GetRlpHeaders(e, []*big.Int{anchorBlockNumber}, false) + helpers.PanicErr(err) + + toStore = anchorBlockNumber.Sub(anchorBlockNumber, big.NewInt(1)) + tx, err = bhs.StoreVerifyHeader(e.Owner, toStore, headers[0]) + helpers.PanicErr(err) + svhReceipt := helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "storeVerifyHeader on", bhsContractAddress.String()) + if svhReceipt.Status != 1 { + fmt.Println("storeVerifyHeader failed") + os.Exit(1) + } + fmt.Println("storeVerifyHeader succeeded, checking BH is there") + bh, err = bhs.GetBlockhash(nil, toStore) + helpers.PanicErr(err) + fmt.Println("blockhash stored by storeVerifyHeader:", hexutil.Encode(bh[:])) +} + +func sendTx(e helpers.Environment, to common.Address, data []byte) (*types.Receipt, common.Hash) { + nonce, err := e.Ec.PendingNonceAt(context.Background(), e.Owner.From) + helpers.PanicErr(err) + gasPrice, err := e.Ec.SuggestGasPrice(context.Background()) + helpers.PanicErr(err) + rawTx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: &to, + Data: data, + Value: big.NewInt(0), + Gas: 1_000_000, + GasPrice: gasPrice, + }) + signedTx, err := e.Owner.Signer(e.Owner.From, rawTx) + helpers.PanicErr(err) + err = e.Ec.SendTransaction(context.Background(), signedTx) + helpers.PanicErr(err) + return helpers.ConfirmTXMined(context.Background(), e.Ec, signedTx, + e.ChainID, "send tx", signedTx.Hash().String(), "to", to.String()), signedTx.Hash() +} + +func DeployUniverseViaCLI(e helpers.Environment) { + deployCmd := flag.NewFlagSet("deploy-universe", flag.ExitOnError) + + // required flags + nativeOnly := deployCmd.Bool("native-only", false, "if true, link and link feed are not set up") + linkAddress := deployCmd.String("link-address", "", "address of link token") + linkEthAddress := deployCmd.String("link-eth-feed", "", "address of link eth feed") + bhsContractAddressString := deployCmd.String("bhs-address", "", "address of BHS contract") + batchBHSAddressString := deployCmd.String("batch-bhs-address", "", "address of Batch BHS contract") + coordinatorAddressString := deployCmd.String("coordinator-address", "", "address of VRF Coordinator contract") + batchCoordinatorAddressString := deployCmd.String("batch-coordinator-address", "", "address Batch VRF Coordinator contract") + subscriptionBalanceJuelsString := deployCmd.String("subscription-balance", "1e19", "amount to fund subscription with Link token (Juels)") + subscriptionBalanceNativeWeiString := deployCmd.String("subscription-balance-native", "1e18", "amount to fund subscription with native token (Wei)") + + batchFulfillmentEnabled := deployCmd.Bool("batch-fulfillment-enabled", constants.BatchFulfillmentEnabled, "whether send randomness fulfillments in batches inside one tx from CL node") + batchFulfillmentGasMultiplier := deployCmd.Float64("batch-fulfillment-gas-multiplier", 1.1, "") + estimateGasMultiplier := deployCmd.Float64("estimate-gas-multiplier", 1.1, "") + pollPeriod := deployCmd.String("poll-period", "300ms", "") + requestTimeout := deployCmd.String("request-timeout", "30m0s", "") + simulationBlock := deployCmd.String("simulation-block", "pending", "simulation block can be 'pending' or 'latest'") + + // optional flags + fallbackWeiPerUnitLinkString := deployCmd.String("fallback-wei-per-unit-link", "6e16", "fallback wei/link ratio") + registerVRFKeyUncompressedPubKey := deployCmd.String("uncompressed-pub-key", "", "uncompressed public key") + + vrfPrimaryNodeSendingKeysString := deployCmd.String("vrf-primary-node-sending-keys", "", "VRF Primary Node sending keys") + minConfs := deployCmd.Int("min-confs", constants.MinConfs, "min confs") + nodeSendingKeyFundingAmount := deployCmd.String("sending-key-funding-amount", constants.NodeSendingKeyFundingAmount, "CL node sending key funding amount") + maxGasLimit := deployCmd.Int64("max-gas-limit", constants.MaxGasLimit, "max gas limit") + stalenessSeconds := deployCmd.Int64("staleness-seconds", constants.StalenessSeconds, "staleness in seconds") + gasAfterPayment := deployCmd.Int64("gas-after-payment", constants.GasAfterPayment, "gas after payment calculation") + flatFeeEthPPM := deployCmd.Int64("flat-fee-eth-ppm", 500, "fulfillment flat fee ETH ppm") + flatFeeLinkDiscountPPM := deployCmd.Int64("flat-fee-link-discount-ppm", 100, "fulfillment flat fee discount for PLI payment denominated in native ppm") + nativePremiumPercentage := deployCmd.Int64("native-premium-percentage", 1, "premium percentage for native payment") + linkPremiumPercentage := deployCmd.Int64("link-premium-percentage", 1, "premium percentage for PLI payment") + gasLaneMaxGas := deployCmd.Int64("gas-lane-max-gas", 1e12, "gas lane max gas price") + + helpers.ParseArgs( + deployCmd, os.Args[2:], + ) + + if *nativeOnly { + if *linkAddress != "" || *linkEthAddress != "" { + panic("native-only flag is set, but link address or link eth address is provided") + } + if *subscriptionBalanceJuelsString != "0" { + panic("native-only flag is set, but link subscription balance is provided") + } + } + + if *simulationBlock != "pending" && *simulationBlock != "latest" { + helpers.PanicErr(fmt.Errorf("simulation block must be 'pending' or 'latest'")) + } + + fallbackWeiPerUnitLink := decimal.RequireFromString(*fallbackWeiPerUnitLinkString).BigInt() + subscriptionBalanceJuels := decimal.RequireFromString(*subscriptionBalanceJuelsString).BigInt() + subscriptionBalanceNativeWei := decimal.RequireFromString(*subscriptionBalanceNativeWeiString).BigInt() + fundingAmount := decimal.RequireFromString(*nodeSendingKeyFundingAmount).BigInt() + + var vrfPrimaryNodeSendingKeys []string + if len(*vrfPrimaryNodeSendingKeysString) > 0 { + vrfPrimaryNodeSendingKeys = strings.Split(*vrfPrimaryNodeSendingKeysString, ",") + } + + nodesMap := make(map[string]model.Node) + + nodesMap[model.VRFPrimaryNodeName] = model.Node{ + SendingKeys: util.MapToSendingKeyArr(vrfPrimaryNodeSendingKeys), + SendingKeyFundingAmount: fundingAmount, + } + + bhsContractAddress := common.HexToAddress(*bhsContractAddressString) + batchBHSAddress := common.HexToAddress(*batchBHSAddressString) + coordinatorAddress := common.HexToAddress(*coordinatorAddressString) + batchCoordinatorAddress := common.HexToAddress(*batchCoordinatorAddressString) + + contractAddresses := model.ContractAddresses{ + LinkAddress: *linkAddress, + LinkEthAddress: *linkEthAddress, + BhsContractAddress: bhsContractAddress, + BatchBHSAddress: batchBHSAddress, + CoordinatorAddress: coordinatorAddress, + BatchCoordinatorAddress: batchCoordinatorAddress, + } + + coordinatorConfig := CoordinatorConfigV2Plus{ + MinConfs: *minConfs, + MaxGasLimit: *maxGasLimit, + StalenessSeconds: *stalenessSeconds, + GasAfterPayment: *gasAfterPayment, + FallbackWeiPerUnitLink: fallbackWeiPerUnitLink, + FulfillmentFlatFeeNativePPM: uint32(*flatFeeEthPPM), + FulfillmentFlatFeeLinkDiscountPPM: uint32(*flatFeeLinkDiscountPPM), + NativePremiumPercentage: uint8(*nativePremiumPercentage), + LinkPremiumPercentage: uint8(*linkPremiumPercentage), + } + + vrfKeyRegistrationConfig := model.VRFKeyRegistrationConfig{ + VRFKeyUncompressedPubKey: *registerVRFKeyUncompressedPubKey, + } + + coordinatorJobSpecConfig := model.CoordinatorJobSpecConfig{ + BatchFulfillmentEnabled: *batchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *batchFulfillmentGasMultiplier, + EstimateGasMultiplier: *estimateGasMultiplier, + PollPeriod: *pollPeriod, + RequestTimeout: *requestTimeout, + } + + VRFV2PlusDeployUniverse( + e, + subscriptionBalanceJuels, + subscriptionBalanceNativeWei, + vrfKeyRegistrationConfig, + contractAddresses, + coordinatorConfig, + *batchFulfillmentEnabled, + *nativeOnly, + nodesMap, + uint64(*gasLaneMaxGas), + coordinatorJobSpecConfig, + *simulationBlock, + ) + + vrfPrimaryNode := nodesMap[model.VRFPrimaryNodeName] + fmt.Println("Funding node's sending keys...") + for _, sendingKey := range vrfPrimaryNode.SendingKeys { + helpers.FundNode(e, sendingKey.Address, vrfPrimaryNode.SendingKeyFundingAmount) + } +} + +func VRFV2PlusDeployUniverse(e helpers.Environment, + subscriptionBalanceJuels *big.Int, + subscriptionBalanceNativeWei *big.Int, + vrfKeyRegistrationConfig model.VRFKeyRegistrationConfig, + contractAddresses model.ContractAddresses, + coordinatorConfig CoordinatorConfigV2Plus, + batchFulfillmentEnabled bool, + nativeOnly bool, + nodesMap map[string]model.Node, + gasLaneMaxGas uint64, + coordinatorJobSpecConfig model.CoordinatorJobSpecConfig, + simulationBlock string, +) model.JobSpecs { + var compressedPkHex string + var keyHash common.Hash + if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 { + // Put key in ECDSA format + if strings.HasPrefix(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x") { + vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey = strings.Replace(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, "0x", "04", 1) + } + + // Generate compressed public key and key hash + pubBytes, err := hex.DecodeString(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + var pkBytes []byte + if big.NewInt(0).Mod(pk.Y, big.NewInt(2)).Uint64() != 0 { + pkBytes = append(pk.X.Bytes(), 1) + } else { + pkBytes = append(pk.X.Bytes(), 0) + } + var newPK secp256k1.PublicKey + copy(newPK[:], pkBytes) + + compressedPkHex = hexutil.Encode(pkBytes) + keyHash, err = newPK.Hash() + helpers.PanicErr(err) + } + + if !nativeOnly && len(contractAddresses.LinkAddress) == 0 { + fmt.Println("\nDeploying PLI Token...") + contractAddresses.LinkAddress = helpers.DeployLinkToken(e).String() + } + + if !nativeOnly && len(contractAddresses.LinkEthAddress) == 0 { + fmt.Println("\nDeploying PLI/ETH Feed...") + contractAddresses.LinkEthAddress = helpers.DeployLinkEthFeed(e, contractAddresses.LinkAddress, coordinatorConfig.FallbackWeiPerUnitLink).String() + } + + if contractAddresses.BhsContractAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying BHS...") + contractAddresses.BhsContractAddress = DeployBHS(e) + } + + if contractAddresses.BatchBHSAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Batch BHS...") + contractAddresses.BatchBHSAddress = DeployBatchBHS(e, contractAddresses.BhsContractAddress) + } + + if contractAddresses.CoordinatorAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Coordinator...") + contractAddresses.CoordinatorAddress = DeployCoordinator(e, contractAddresses.LinkAddress, contractAddresses.BhsContractAddress.String(), contractAddresses.LinkEthAddress) + } + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(contractAddresses.CoordinatorAddress, e.Ec) + helpers.PanicErr(err) + + if contractAddresses.BatchCoordinatorAddress.String() == "0x0000000000000000000000000000000000000000" { + fmt.Println("\nDeploying Batch Coordinator...") + contractAddresses.BatchCoordinatorAddress = DeployBatchCoordinatorV2(e, contractAddresses.CoordinatorAddress) + } + + fmt.Println("\nSetting Coordinator Config...") + SetCoordinatorConfig( + e, + *coordinator, + uint16(coordinatorConfig.MinConfs), + uint32(coordinatorConfig.MaxGasLimit), + uint32(coordinatorConfig.StalenessSeconds), + uint32(coordinatorConfig.GasAfterPayment), + coordinatorConfig.FallbackWeiPerUnitLink, + coordinatorConfig.FulfillmentFlatFeeNativePPM, + coordinatorConfig.FulfillmentFlatFeeLinkDiscountPPM, + coordinatorConfig.NativePremiumPercentage, + coordinatorConfig.LinkPremiumPercentage, + ) + + fmt.Println("\nConfig set, getting current config from deployed contract...") + PrintCoordinatorConfig(coordinator) + + if len(vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey) > 0 { + fmt.Println("\nRegistering proving key...") + + //NOTE - register proving key against EOA account, and not against Oracle's sending address in other to be able + // easily withdraw funds from Coordinator contract back to EOA account + RegisterCoordinatorProvingKey(e, *coordinator, vrfKeyRegistrationConfig.VRFKeyUncompressedPubKey, gasLaneMaxGas) + + fmt.Println("\nProving key registered, getting proving key hashes from deployed contract...") + registerdKeyHash, err2 := coordinator.SProvingKeyHashes(nil, big.NewInt(0)) + helpers.PanicErr(err2) + fmt.Println("Key hash registered:", hex.EncodeToString(registerdKeyHash[:])) + } else { + fmt.Println("NOT registering proving key - you must do this eventually in order to fully deploy VRF!") + } + + fmt.Println("\nDeploying consumer...") + consumerAddress := EoaV2PlusLoadTestConsumerWithMetricsDeploy(e, contractAddresses.CoordinatorAddress.String()) + + fmt.Println("\nAdding subscription...") + EoaCreateSub(e, *coordinator) + + subID := FindSubscriptionID(e, coordinator) + + fmt.Println("\nAdding consumer to subscription...") + EoaAddConsumerToSub(e, *coordinator, subID, consumerAddress.String()) + + if subscriptionBalanceJuels.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with Link Token.", subscriptionBalanceJuels, "juels...") + EoaFundSubWithLink(e, *coordinator, contractAddresses.LinkAddress, subscriptionBalanceJuels, subID) + } else { + fmt.Println("Subscription", subID, "NOT getting funded with Link Token. You must fund the subscription in order to use it!") + } + if subscriptionBalanceNativeWei.Cmp(big.NewInt(0)) > 0 { + fmt.Println("\nFunding subscription with Native Token.", subscriptionBalanceNativeWei, "wei...") + EoaFundSubWithNative(e, coordinator.Address(), subID, subscriptionBalanceNativeWei) + } else { + fmt.Println("Subscription", subID, "NOT getting funded with Native Token. You must fund the subscription in order to use it!") + } + + fmt.Println("\nSubscribed and (possibly) funded, retrieving subscription from deployed contract...") + s, err := coordinator.GetSubscription(nil, subID) + helpers.PanicErr(err) + fmt.Printf("Subscription %+v\n", s) + + formattedVrfV2PlusPrimaryJobSpec := fmt.Sprintf( + jobs.VRFV2PlusJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress + coordinatorJobSpecConfig.BatchFulfillmentEnabled, //batchFulfillmentEnabled + coordinatorJobSpecConfig.BatchFulfillmentGasMultiplier, //batchFulfillmentGasMultiplier + compressedPkHex, //publicKey + coordinatorConfig.MinConfs, //minIncomingConfirmations + e.ChainID, //evmChainID + strings.Join(util.MapToAddressArr(nodesMap[model.VRFPrimaryNodeName].SendingKeys), "\",\""), //fromAddresses + coordinatorJobSpecConfig.PollPeriod, //pollPeriod + coordinatorJobSpecConfig.RequestTimeout, //requestTimeout + contractAddresses.CoordinatorAddress, + coordinatorJobSpecConfig.EstimateGasMultiplier, //estimateGasMultiplier + simulationBlock, + func() string { + if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 { + return keys[0].Address + } + return common.HexToAddress("0x0").String() + }(), + contractAddresses.CoordinatorAddress, + contractAddresses.CoordinatorAddress, + simulationBlock, + ) + + formattedVrfV2PlusBackupJobSpec := fmt.Sprintf( + jobs.VRFV2PlusJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BatchCoordinatorAddress, //batchCoordinatorAddress + coordinatorJobSpecConfig.BatchFulfillmentEnabled, //batchFulfillmentEnabled + coordinatorJobSpecConfig.BatchFulfillmentGasMultiplier, //batchFulfillmentGasMultiplier + compressedPkHex, //publicKey + 100, //minIncomingConfirmations + e.ChainID, //evmChainID + strings.Join(util.MapToAddressArr(nodesMap[model.VRFBackupNodeName].SendingKeys), "\",\""), //fromAddresses + coordinatorJobSpecConfig.PollPeriod, //pollPeriod + coordinatorJobSpecConfig.RequestTimeout, //requestTimeout + contractAddresses.CoordinatorAddress, + coordinatorJobSpecConfig.EstimateGasMultiplier, //estimateGasMultiplier + simulationBlock, + func() string { + if keys := nodesMap[model.VRFPrimaryNodeName].SendingKeys; len(keys) > 0 { + return keys[0].Address + } + return common.HexToAddress("0x0").String() + }(), + contractAddresses.CoordinatorAddress, + contractAddresses.CoordinatorAddress, + simulationBlock, + ) + + formattedBHSJobSpec := fmt.Sprintf( + jobs.BHSPlusJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + 30, //waitBlocks + 200, //lookbackBlocks + contractAddresses.BhsContractAddress, //bhs address + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHSNodeName].SendingKeys), "\",\""), //sending addresses + ) + + formattedBHSBackupJobSpec := fmt.Sprintf( + jobs.BHSPlusJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + 100, //waitBlocks + 200, //lookbackBlocks + contractAddresses.BhsContractAddress, //bhs adreess + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHSBackupNodeName].SendingKeys), "\",\""), //sending addresses + ) + + formattedBHFJobSpec := fmt.Sprintf( + jobs.BHFPlusJobFormatted, + contractAddresses.CoordinatorAddress, //coordinatorAddress + contractAddresses.BhsContractAddress, //bhs adreess + contractAddresses.BatchBHSAddress, //batchBHS + e.ChainID, //chain id + strings.Join(util.MapToAddressArr(nodesMap[model.BHFNodeName].SendingKeys), "\",\""), //sending addresses + ) + + fmt.Println( + "\nDeployment complete.", + "\nPLI Token contract address:", contractAddresses.LinkAddress, + "\nPLI/ETH Feed contract address:", contractAddresses.LinkEthAddress, + "\nBlockhash Store contract address:", contractAddresses.BhsContractAddress, + "\nBatch Blockhash Store contract address:", contractAddresses.BatchBHSAddress, + "\nVRF Coordinator Address:", contractAddresses.CoordinatorAddress, + "\nBatch VRF Coordinator Address:", contractAddresses.BatchCoordinatorAddress, + "\nVRF Consumer Address:", consumerAddress, + "\nVRF Subscription Id:", subID, + "\nVRF Subscription PLI Balance:", *subscriptionBalanceJuels, + "\nVRF Subscription Native Balance:", *subscriptionBalanceNativeWei, + "\nPossible VRF Request command: ", + fmt.Sprintf("go run . eoa-load-test-request-with-metrics --consumer-address=%s --sub-id=%d --key-hash=%s --request-confirmations %d --requests 1 --runs 1 --cb-gas-limit 1_000_000", consumerAddress, subID, keyHash, coordinatorConfig.MinConfs), + "\nRetrieve Request Status: ", + fmt.Sprintf("go run . eoa-load-test-read-metrics --consumer-address=%s", consumerAddress), + "\nA node can now be configured to run a VRF job with the below job spec :\n", + formattedVrfV2PlusPrimaryJobSpec, + ) + + return model.JobSpecs{ + VRFPrimaryNode: formattedVrfV2PlusPrimaryJobSpec, + VRFBackupyNode: formattedVrfV2PlusBackupJobSpec, + BHSNode: formattedBHSJobSpec, + BHSBackupNode: formattedBHSBackupJobSpec, + BHFNode: formattedBHFJobSpec, + } +} + +func DeployWrapperUniverse(e helpers.Environment) { + cmd := flag.NewFlagSet("wrapper-universe-deploy", flag.ExitOnError) + linkAddress := cmd.String("link-address", "", "address of link token") + linkETHFeedAddress := cmd.String("link-eth-feed", "", "address of link-eth-feed") + coordinatorAddress := cmd.String("coordinator-address", "", "address of the vrf coordinator v2 contract") + wrapperGasOverhead := cmd.Uint("wrapper-gas-overhead", 50_000, "amount of gas overhead in wrapper fulfillment") + coordinatorGasOverhead := cmd.Uint("coordinator-gas-overhead", 52_000, "amount of gas overhead in coordinator fulfillment") + wrapperPremiumPercentage := cmd.Uint("wrapper-premium-percentage", 25, "gas premium charged by wrapper") + keyHash := cmd.String("key-hash", "", "the keyhash that wrapper requests should use") + maxNumWords := cmd.Uint("max-num-words", 10, "the keyhash that wrapper requests should use") + subFunding := cmd.String("sub-funding", "10000000000000000000", "amount to fund the subscription with") + consumerFunding := cmd.String("consumer-funding", "10000000000000000000", "amount to fund the consumer with") + fallbackWeiPerUnitLink := cmd.String("fallback-wei-per-unit-link", "", "the fallback wei per unit link") + stalenessSeconds := cmd.Uint("staleness-seconds", 86400, "the number of seconds of staleness to allow") + fulfillmentFlatFeeLinkPPM := cmd.Uint("fulfillment-flat-fee-link-ppm", 500, "the link flat fee in ppm to charge for fulfillment") + fulfillmentFlatFeeNativePPM := cmd.Uint("fulfillment-flat-fee-native-ppm", 500, "the native flat fee in ppm to charge for fulfillment") + helpers.ParseArgs(cmd, os.Args[2:], "link-address", "link-eth-feed", "coordinator-address", "key-hash", "fallback-wei-per-unit-link") + + amount, s := big.NewInt(0).SetString(*subFunding, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *subFunding)) + } + + wrapper, subID := WrapperDeploy(e, + common.HexToAddress(*linkAddress), + common.HexToAddress(*linkETHFeedAddress), + common.HexToAddress(*coordinatorAddress)) + + WrapperConfigure(e, + wrapper, + *wrapperGasOverhead, + *coordinatorGasOverhead, + *wrapperPremiumPercentage, + *keyHash, + *maxNumWords, + decimal.RequireFromString(*fallbackWeiPerUnitLink).BigInt(), + uint32(*stalenessSeconds), + uint32(*fulfillmentFlatFeeLinkPPM), + uint32(*fulfillmentFlatFeeNativePPM), + ) + + consumer := WrapperConsumerDeploy(e, + common.HexToAddress(*linkAddress), + wrapper) + + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(common.HexToAddress(*coordinatorAddress), e.Ec) + helpers.PanicErr(err) + + EoaFundSubWithLink(e, *coordinator, *linkAddress, amount, subID) + + link, err := link_token_interface.NewLinkToken(common.HexToAddress(*linkAddress), e.Ec) + helpers.PanicErr(err) + consumerAmount, s := big.NewInt(0).SetString(*consumerFunding, 10) + if !s { + panic(fmt.Sprintf("failed to parse top up amount '%s'", *consumerFunding)) + } + + tx, err := link.Transfer(e.Owner, consumer, consumerAmount) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, "link transfer to consumer") + + fmt.Println("wrapper universe deployment complete") + fmt.Println("wrapper address:", wrapper.String()) + fmt.Println("wrapper consumer address:", consumer.String()) +} diff --git a/core/scripts/vrfv2plus/testnet/v2plusscripts/util.go b/core/scripts/vrfv2plus/testnet/v2plusscripts/util.go new file mode 100644 index 00000000..e632397a --- /dev/null +++ b/core/scripts/vrfv2plus/testnet/v2plusscripts/util.go @@ -0,0 +1,277 @@ +package v2plusscripts + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + helpers "github.com/goplugin/pluginv3.0/core/scripts/common" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper_consumer_example" +) + +func DeployBHS(e helpers.Environment) (blockhashStoreAddress common.Address) { + _, tx, _, err := blockhash_store.DeployBlockhashStore(e.Owner, e.Ec) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployBatchBHS(e helpers.Environment, bhsAddress common.Address) (batchBHSAddress common.Address) { + _, tx, _, err := batch_blockhash_store.DeployBatchBlockhashStore(e.Owner, e.Ec, bhsAddress) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func DeployCoordinator( + e helpers.Environment, + linkAddress string, + bhsAddress string, + linkEthAddress string, +) (coordinatorAddress common.Address) { + _, tx, _, err := vrf_coordinator_v2_5.DeployVRFCoordinatorV25( + e.Owner, + e.Ec, + common.HexToAddress(bhsAddress)) + helpers.PanicErr(err) + coordinatorAddress = helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + + // Set PLI and PLI ETH + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(coordinatorAddress, e.Ec) + helpers.PanicErr(err) + + if linkAddress != "" && linkEthAddress != "" { + linkTx, err := coordinator.SetPLIAndPLINativeFeed(e.Owner, + common.HexToAddress(linkAddress), common.HexToAddress(linkEthAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, linkTx, e.ChainID) + } + return coordinatorAddress +} + +func DeployBatchCoordinatorV2(e helpers.Environment, coordinatorAddress common.Address) (batchCoordinatorAddress common.Address) { + _, tx, _, err := batch_vrf_coordinator_v2plus.DeployBatchVRFCoordinatorV2Plus(e.Owner, e.Ec, coordinatorAddress) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func EoaAddConsumerToSub( + e helpers.Environment, + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25, + subID *big.Int, + consumerAddress string, +) { + txadd, err := coordinator.AddConsumer(e.Owner, subID, common.HexToAddress(consumerAddress)) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, txadd, e.ChainID) +} + +func EoaCreateSub(e helpers.Environment, coordinator vrf_coordinator_v2_5.VRFCoordinatorV25) { + tx, err := coordinator.CreateSubscription(e.Owner) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +// returns subscription ID that belongs to the given owner. Returns result found first +func FindSubscriptionID(e helpers.Environment, coordinator *vrf_coordinator_v2_5.VRFCoordinatorV25) *big.Int { + // Use most recent 500 blocks as search window. + head, err := e.Ec.BlockNumber(context.Background()) + helpers.PanicErr(err) + fopts := &bind.FilterOpts{ + Start: head - 500, + } + + subscriptionIterator, err := coordinator.FilterSubscriptionCreated(fopts, nil) + helpers.PanicErr(err) + + if !subscriptionIterator.Next() { + helpers.PanicErr(fmt.Errorf("expected at least 1 subID for the given owner %s", e.Owner.From.Hex())) + } + return subscriptionIterator.Event.SubId +} + +func EoaDeployConsumer(e helpers.Environment, + coordinatorAddress string, + linkAddress string) ( + consumerAddress common.Address) { + _, tx, _, err := vrf_v2plus_sub_owner.DeployVRFV2PlusExternalSubOwnerExample( + e.Owner, + e.Ec, + common.HexToAddress(coordinatorAddress), + common.HexToAddress(linkAddress)) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} + +func EoaFundSubWithLink( + e helpers.Environment, + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25, + linkAddress string, amount, + subID *big.Int, +) { + linkToken, err := link_token_interface.NewLinkToken(common.HexToAddress(linkAddress), e.Ec) + helpers.PanicErr(err) + bal, err := linkToken.BalanceOf(nil, e.Owner.From) + helpers.PanicErr(err) + fmt.Println("Initial account balance (Juels):", bal, e.Owner.From.String(), "Funding amount:", amount.String()) + b, err := utils.ABIEncode(`[{"type":"uint256"}]`, subID) + helpers.PanicErr(err) + tx, err := linkToken.TransferAndCall(e.Owner, coordinator.Address(), amount, b) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID, fmt.Sprintf("sub ID: %d", subID)) +} + +func EoaFundSubWithNative(e helpers.Environment, coordinatorAddress common.Address, subID *big.Int, amount *big.Int) { + coordinator, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(coordinatorAddress, e.Ec) + helpers.PanicErr(err) + e.Owner.Value = amount + tx, err := coordinator.FundSubscriptionWithNative(e.Owner, subID) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func PrintCoordinatorConfig(coordinator *vrf_coordinator_v2_5.VRFCoordinatorV25) { + cfg, err := coordinator.SConfig(nil) + helpers.PanicErr(err) + + fmt.Printf("Coordinator config: %+v\n", cfg) +} + +func SetCoordinatorConfig( + e helpers.Environment, + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25, + minConfs uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPayment uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeNativePPM uint32, + fulfillmentFlatFeeLinkDiscountPPM uint32, + nativePremiumPercentage uint8, + linkPremiumPercentage uint8, +) { + tx, err := coordinator.SetConfig( + e.Owner, + minConfs, // minRequestConfirmations + maxGasLimit, // max gas limit + stalenessSeconds, // stalenessSeconds + gasAfterPayment, // gasAfterPaymentCalculation + fallbackWeiPerUnitLink, // 0.01 eth per link fallbackLinkPrice + fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage, + linkPremiumPercentage, + ) + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func RegisterCoordinatorProvingKey(e helpers.Environment, + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25, uncompressed string, gasLaneMaxGas uint64) { + pubBytes, err := hex.DecodeString(uncompressed) + helpers.PanicErr(err) + pk, err := crypto.UnmarshalPubkey(pubBytes) + helpers.PanicErr(err) + tx, err := coordinator.RegisterProvingKey(e.Owner, + [2]*big.Int{pk.X, pk.Y}, gasLaneMaxGas) + helpers.PanicErr(err) + helpers.ConfirmTXMined( + context.Background(), + e.Ec, + tx, + e.ChainID, + fmt.Sprintf("Uncompressed public key: %s,", uncompressed), + fmt.Sprintf("Gas Lane Max Gas: %d,", gasLaneMaxGas), + ) +} + +func WrapperDeploy( + e helpers.Environment, + link, linkEthFeed, coordinator common.Address, +) (common.Address, *big.Int) { + address, tx, _, err := vrfv2plus_wrapper.DeployVRFV2PlusWrapper(e.Owner, e.Ec, + link, + linkEthFeed, + coordinator) + helpers.PanicErr(err) + + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + fmt.Println("VRFV2Wrapper address:", address) + + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(address, e.Ec) + helpers.PanicErr(err) + + subID, err := wrapper.SUBSCRIPTIONID(nil) + helpers.PanicErr(err) + fmt.Println("VRFV2Wrapper subscription id:", subID) + + return address, subID +} + +func WrapperConfigure( + e helpers.Environment, + wrapperAddress common.Address, + wrapperGasOverhead, coordinatorGasOverhead, premiumPercentage uint, + keyHash string, + maxNumWords uint, + fallbackWeiPerUnitLink *big.Int, + stalenessSeconds uint32, + fulfillmentFlatFeeLinkPPM uint32, + fulfillmentFlatFeeNativePPM uint32, +) { + wrapper, err := vrfv2plus_wrapper.NewVRFV2PlusWrapper(wrapperAddress, e.Ec) + helpers.PanicErr(err) + + tx, err := wrapper.SetConfig( + e.Owner, + uint32(wrapperGasOverhead), + uint32(coordinatorGasOverhead), + uint8(premiumPercentage), + common.HexToHash(keyHash), + uint8(maxNumWords), + stalenessSeconds, + fallbackWeiPerUnitLink, + fulfillmentFlatFeeLinkPPM, + fulfillmentFlatFeeNativePPM, + ) + + helpers.PanicErr(err) + helpers.ConfirmTXMined(context.Background(), e.Ec, tx, e.ChainID) +} + +func WrapperConsumerDeploy( + e helpers.Environment, + link, wrapper common.Address, +) common.Address { + address, tx, _, err := vrfv2plus_wrapper_consumer_example.DeployVRFV2PlusWrapperConsumerExample(e.Owner, e.Ec, + link, + wrapper) + helpers.PanicErr(err) + + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + fmt.Printf("VRFV2WrapperConsumerExample address: %s\n", address) + return address +} + +func EoaV2PlusLoadTestConsumerWithMetricsDeploy(e helpers.Environment, consumerCoordinator string) (consumerAddress common.Address) { + _, tx, _, err := vrf_v2plus_load_test_with_metrics.DeployVRFV2PlusLoadTestWithMetrics( + e.Owner, + e.Ec, + common.HexToAddress(consumerCoordinator), + ) + helpers.PanicErr(err) + return helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) +} diff --git a/core/services/blockhashstore/batch_bhs.go b/core/services/blockhashstore/batch_bhs.go new file mode 100644 index 00000000..7e238a33 --- /dev/null +++ b/core/services/blockhashstore/batch_bhs.go @@ -0,0 +1,81 @@ +package blockhashstore + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type batchBHSConfig interface { + LimitDefault() uint32 +} + +type BatchBlockhashStore struct { + config batchBHSConfig + txm txmgr.TxManager + abi *abi.ABI + batchbhs batch_blockhash_store.BatchBlockhashStoreInterface + lggr logger.Logger +} + +func NewBatchBHS( + config batchBHSConfig, + fromAddresses []ethkey.EIP55Address, + txm txmgr.TxManager, + batchbhs batch_blockhash_store.BatchBlockhashStoreInterface, + chainID *big.Int, + gethks keystore.Eth, + lggr logger.Logger, +) (*BatchBlockhashStore, error) { + abi, err := batch_blockhash_store.BatchBlockhashStoreMetaData.GetAbi() + if err != nil { + return nil, errors.Wrap(err, "building ABI") + } + return &BatchBlockhashStore{ + config: config, + txm: txm, + abi: abi, + batchbhs: batchbhs, + lggr: lggr, + }, nil +} + +func (b *BatchBlockhashStore) GetBlockhashes(ctx context.Context, blockNumbers []*big.Int) ([][32]byte, error) { + blockhashes, err := b.batchbhs.GetBlockhashes(&bind.CallOpts{Context: ctx}, blockNumbers) + if err != nil { + return nil, errors.Wrap(err, "getting blockhashes") + } + return blockhashes, nil +} + +func (b *BatchBlockhashStore) StoreVerifyHeader(ctx context.Context, blockNumbers []*big.Int, blockHeaders [][]byte, fromAddress common.Address) error { + payload, err := b.abi.Pack("storeVerifyHeader", blockNumbers, blockHeaders) + if err != nil { + return errors.Wrap(err, "packing args") + } + + _, err = b.txm.CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: b.batchbhs.Address(), + EncodedPayload: payload, + FeeLimit: b.config.LimitDefault(), + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + + if err != nil { + return errors.Wrap(err, "creating transaction") + } + + return nil +} diff --git a/core/services/blockhashstore/bhs.go b/core/services/blockhashstore/bhs.go new file mode 100644 index 00000000..c92d3ff9 --- /dev/null +++ b/core/services/blockhashstore/bhs.go @@ -0,0 +1,206 @@ +// The blockhash store package provides a service that stores blockhashes such that they are available +// for on-chain proofs beyond the EVM 256 block limit. +package blockhashstore + +import ( + "context" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/trusted_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +var _ BHS = &BulletproofBHS{} + +type bpBHSConfig interface { + LimitDefault() uint32 +} + +type bpBHSDatabaseConfig interface { + DefaultQueryTimeout() time.Duration +} + +// BulletproofBHS is an implementation of BHS that writes "store" transactions to a bulletproof +// transaction manager, and reads BlockhashStore state from the contract. +type BulletproofBHS struct { + config bpBHSConfig + dbConfig bpBHSDatabaseConfig + jobID uuid.UUID + fromAddresses []ethkey.EIP55Address + txm txmgr.TxManager + abi *abi.ABI + trustedAbi *abi.ABI + bhs blockhash_store.BlockhashStoreInterface + trustedBHS *trusted_blockhash_store.TrustedBlockhashStore + chainID *big.Int + gethks keystore.Eth +} + +// NewBulletproofBHS creates a new instance with the given transaction manager and blockhash store. +func NewBulletproofBHS( + config bpBHSConfig, + dbConfig bpBHSDatabaseConfig, + fromAddresses []ethkey.EIP55Address, + txm txmgr.TxManager, + bhs blockhash_store.BlockhashStoreInterface, + trustedBHS *trusted_blockhash_store.TrustedBlockhashStore, + chainID *big.Int, + gethks keystore.Eth, +) (*BulletproofBHS, error) { + bhsABI, err := blockhash_store.BlockhashStoreMetaData.GetAbi() + if err != nil { + // blockhash_store.BlockhashStoreABI is generated code, this should never happen + return nil, errors.Wrap(err, "building ABI") + } + + trustedBHSAbi, err := trusted_blockhash_store.TrustedBlockhashStoreMetaData.GetAbi() + if err != nil { + return nil, errors.Wrap(err, "building trusted BHS ABI") + } + + return &BulletproofBHS{ + config: config, + dbConfig: dbConfig, + fromAddresses: fromAddresses, + txm: txm, + abi: bhsABI, + trustedAbi: trustedBHSAbi, + bhs: bhs, + trustedBHS: trustedBHS, + chainID: chainID, + gethks: gethks, + }, nil +} + +// Store satisfies the BHS interface. +func (c *BulletproofBHS) Store(ctx context.Context, blockNum uint64) error { + payload, err := c.abi.Pack("store", new(big.Int).SetUint64(blockNum)) + if err != nil { + return errors.Wrap(err, "packing args") + } + + fromAddress, err := c.gethks.GetRoundRobinAddress(c.chainID, SendingKeys(c.fromAddresses)...) + if err != nil { + return errors.Wrap(err, "getting next from address") + } + + _, err = c.txm.CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: c.bhs.Address(), + EncodedPayload: payload, + FeeLimit: c.config.LimitDefault(), + + // Set a queue size of 256. At most we store the blockhash of every block, and only the + // latest 256 can possibly be stored. + Strategy: txmgrcommon.NewQueueingTxStrategy(c.jobID, 256, c.dbConfig.DefaultQueryTimeout()), + }) + if err != nil { + return errors.Wrap(err, "creating transaction") + } + + return nil +} + +func (c *BulletproofBHS) StoreTrusted( + ctx context.Context, + blockNums []uint64, + blockhashes []common.Hash, + recentBlock uint64, + recentBlockhash common.Hash, +) error { + // Convert and pack arguments for a "storeTrusted" function call to the trusted BHS. + var blockNumsBig []*big.Int + for _, b := range blockNums { + blockNumsBig = append(blockNumsBig, new(big.Int).SetUint64(b)) + } + recentBlockBig := new(big.Int).SetUint64(recentBlock) + payload, err := c.trustedAbi.Pack("storeTrusted", blockNumsBig, blockhashes, recentBlockBig, recentBlockhash) + if err != nil { + return errors.Wrap(err, "packing args") + } + + // Create a transaction from the given batch and send it to the TXM. + fromAddress, err := c.gethks.GetRoundRobinAddress(c.chainID, SendingKeys(c.fromAddresses)...) + if err != nil { + return errors.Wrap(err, "getting next from address") + } + _, err = c.txm.CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: c.trustedBHS.Address(), + EncodedPayload: payload, + FeeLimit: c.config.LimitDefault(), + + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + if err != nil { + return errors.Wrap(err, "creating transaction") + } + + return nil +} + +func (c *BulletproofBHS) IsTrusted() bool { + return c.trustedBHS != nil +} + +// IsStored satisfies the BHS interface. +func (c *BulletproofBHS) IsStored(ctx context.Context, blockNum uint64) (bool, error) { + var err error + if c.IsTrusted() { + _, err = c.trustedBHS.GetBlockhash(&bind.CallOpts{Context: ctx}, big.NewInt(int64(blockNum))) + } else { + _, err = c.bhs.GetBlockhash(&bind.CallOpts{Context: ctx}, big.NewInt(int64(blockNum))) + } + if err != nil && strings.Contains(err.Error(), "reverted") { + // Transaction reverted because the blockhash is not stored + return false, nil + } else if err != nil { + return false, errors.Wrap(err, "getting blockhash") + } + return true, nil +} + +func (c *BulletproofBHS) sendingKeys() []common.Address { + var keys []common.Address + for _, a := range c.fromAddresses { + keys = append(keys, a.Address()) + } + return keys +} + +func (c *BulletproofBHS) StoreEarliest(ctx context.Context) error { + payload, err := c.abi.Pack("storeEarliest") + if err != nil { + return errors.Wrap(err, "packing args") + } + + fromAddress, err := c.gethks.GetRoundRobinAddress(c.chainID, c.sendingKeys()...) + if err != nil { + return errors.Wrap(err, "getting next from address") + } + + _, err = c.txm.CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: c.bhs.Address(), + EncodedPayload: payload, + FeeLimit: c.config.LimitDefault(), + Strategy: txmgrcommon.NewSendEveryStrategy(), + }) + if err != nil { + return errors.Wrap(err, "creating transaction") + } + + return nil +} diff --git a/core/services/blockhashstore/bhs_test.go b/core/services/blockhashstore/bhs_test.go new file mode 100644 index 00000000..28417f47 --- /dev/null +++ b/core/services/blockhashstore/bhs_test.go @@ -0,0 +1,76 @@ +package blockhashstore_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestStoreRotatesFromAddresses(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := configtest.NewTestGeneralConfig(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + require.NoError(t, kst.Unlock(cltest.Password)) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, KeyStore: kst.Eth(), GeneralConfig: cfg, Client: ethClient}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + chain, err := legacyChains.Get(cltest.FixtureChainID.String()) + require.NoError(t, err) + lggr := logger.TestLogger(t) + ks := keystore.New(db, utils.FastScryptParams, lggr, cfg.Database()) + require.NoError(t, ks.Unlock("blah")) + k1, err := ks.Eth().Create(&cltest.FixtureChainID) + require.NoError(t, err) + k2, err := ks.Eth().Create(&cltest.FixtureChainID) + require.NoError(t, err) + fromAddresses := []ethkey.EIP55Address{k1.EIP55Address, k2.EIP55Address} + txm := new(txmmocks.MockEvmTxManager) + bhsAddress := common.HexToAddress("0x31Ca8bf590360B3198749f852D5c516c642846F6") + + store, err := blockhash_store.NewBlockhashStore(bhsAddress, chain.Client()) + require.NoError(t, err) + bhs, err := blockhashstore.NewBulletproofBHS( + chain.Config().EVM().GasEstimator(), + chain.Config().Database(), + fromAddresses, + txm, + store, + nil, + &cltest.FixtureChainID, + ks.Eth(), + ) + require.NoError(t, err) + + txm.On("CreateTransaction", mock.Anything, mock.MatchedBy(func(tx txmgr.TxRequest) bool { + return tx.FromAddress.String() == k1.Address.String() + })).Once().Return(txmgr.Tx{}, nil) + + txm.On("CreateTransaction", mock.Anything, mock.MatchedBy(func(tx txmgr.TxRequest) bool { + return tx.FromAddress.String() == k2.Address.String() + })).Once().Return(txmgr.Tx{}, nil) + + ctx := testutils.Context(t) + + // store 2 blocks + err = bhs.Store(ctx, 1) + require.NoError(t, err) + err = bhs.Store(ctx, 2) + require.NoError(t, err) +} diff --git a/core/services/blockhashstore/common.go b/core/services/blockhashstore/common.go new file mode 100644 index 00000000..f3459972 --- /dev/null +++ b/core/services/blockhashstore/common.go @@ -0,0 +1,142 @@ +package blockhashstore + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +// Coordinator defines an interface for fetching request and fulfillment metadata from a VRF +// coordinator. +type Coordinator interface { + // Requests fetches VRF requests that occurred within the specified blocks. + Requests(ctx context.Context, fromBlock uint64, toBlock uint64) ([]Event, error) + + // Fulfillments fetches VRF fulfillments that occurred since the specified block. + Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) +} + +// Event contains metadata about a VRF randomness request or fulfillment. +type Event struct { + // ID of the relevant VRF request. For a VRF V1 request, this will an encoded 32 byte array. + // For VRF V2, it will be an integer in string form. + ID string + + // Block that the request or fulfillment was included in. + Block uint64 +} + +// BHS defines an interface for interacting with a BlockhashStore contract. +// +//go:generate mockery --quiet --name BHS --output ./mocks/ --case=underscore +type BHS interface { + // Store the hash associated with blockNum. + Store(ctx context.Context, blockNum uint64) error + + // IsStored checks whether the hash associated with blockNum is already stored. + IsStored(ctx context.Context, blockNum uint64) (bool, error) + + // StoreEarliest stores the earliest possible blockhash (i.e. block.number - 256) + StoreEarliest(ctx context.Context) error + + IsTrusted() bool + + StoreTrusted(ctx context.Context, blockNums []uint64, blockhashes []common.Hash, recentBlock uint64, recentBlockhash common.Hash) error +} + +func GetUnfulfilledBlocksAndRequests( + ctx context.Context, + lggr logger.Logger, + coordinator Coordinator, + fromBlock, toBlock uint64, +) (map[uint64]map[string]struct{}, error) { + blockToRequests := make(map[uint64]map[string]struct{}) + requestIDToBlock := make(map[string]uint64) + + reqs, err := coordinator.Requests(ctx, fromBlock, toBlock) + if err != nil { + lggr.Errorw("Failed to fetch VRF requests", + "err", err) + return nil, errors.Wrap(err, "fetching VRF requests") + } + for _, req := range reqs { + if _, ok := blockToRequests[req.Block]; !ok { + blockToRequests[req.Block] = make(map[string]struct{}) + } + blockToRequests[req.Block][req.ID] = struct{}{} + requestIDToBlock[req.ID] = req.Block + } + + fuls, err := coordinator.Fulfillments(ctx, fromBlock) + if err != nil { + lggr.Errorw("Failed to fetch VRF fulfillments", + "err", err) + return nil, errors.Wrap(err, "fetching VRF fulfillments") + } + for _, ful := range fuls { + requestBlock, ok := requestIDToBlock[ful.ID] + if !ok { + continue + } + delete(blockToRequests[requestBlock], ful.ID) + } + + return blockToRequests, nil +} + +// LimitReqIDs converts a set of request IDs to a slice limited to maxLength. +func LimitReqIDs(reqs map[string]struct{}, maxLength int) []string { + var reqIDs []string + for id := range reqs { + reqIDs = append(reqIDs, id) + if len(reqIDs) >= maxLength { + break + } + } + return reqIDs +} + +// DecreasingBlockRange creates a contiguous block range starting with +// block `start` (inclusive) and ending at block `end` (inclusive). +func DecreasingBlockRange(start, end *big.Int) (ret []*big.Int, err error) { + if start.Cmp(end) == -1 { + return nil, fmt.Errorf("start (%s) must be greater than end (%s)", start.String(), end.String()) + } + ret = []*big.Int{} + for i := new(big.Int).Set(start); i.Cmp(end) >= 0; i.Sub(i, big.NewInt(1)) { + ret = append(ret, new(big.Int).Set(i)) + } + return +} + +// GetSearchWindow returns the search window (fromBlock, toBlock) given the latest block number, wait blocks and lookback blocks +func GetSearchWindow(latestBlock, waitBlocks, lookbackBlocks int) (uint64, uint64) { + var ( + fromBlock = latestBlock - lookbackBlocks + toBlock = latestBlock - waitBlocks + ) + + if fromBlock < 0 { + fromBlock = 0 + } + if toBlock < 0 { + toBlock = 0 + } + + return uint64(fromBlock), uint64(toBlock) +} + +// SendingKeys returns a list of sending keys (common.Address) given EIP55 addresses +func SendingKeys(fromAddresses []ethkey.EIP55Address) []common.Address { + var keys []common.Address + for _, a := range fromAddresses { + keys = append(keys, a.Address()) + } + return keys +} diff --git a/core/services/blockhashstore/coordinators.go b/core/services/blockhashstore/coordinators.go new file mode 100644 index 00000000..276f130a --- /dev/null +++ b/core/services/blockhashstore/coordinators.go @@ -0,0 +1,336 @@ +package blockhashstore + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + v1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + v2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + v2plus "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + _ Coordinator = MultiCoordinator{} + _ Coordinator = &V1Coordinator{} + _ Coordinator = &V2Coordinator{} + _ Coordinator = &V2PlusCoordinator{} +) + +// MultiCoordinator combines the data from multiple coordinators. +type MultiCoordinator []Coordinator + +// NewMultiCoordinator creates a new Coordinator that combines the results of the given +// coordinators. +func NewMultiCoordinator(coordinators ...Coordinator) Coordinator { + if len(coordinators) == 1 { + return coordinators[0] + } + return MultiCoordinator(coordinators) +} + +// Requests satisfies the Coordinator interface. +func (m MultiCoordinator) Requests( + ctx context.Context, + fromBlock uint64, + toBlock uint64, +) ([]Event, error) { + var reqs []Event + for _, c := range m { + r, err := c.Requests(ctx, fromBlock, toBlock) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + reqs = append(reqs, r...) + } + return reqs, nil +} + +// Fulfillments satisfies the Coordinator interface. +func (m MultiCoordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { + var fuls []Event + for _, c := range m { + f, err := c.Fulfillments(ctx, fromBlock) + if err != nil { + return nil, fmt.Errorf("%w", err) + } + fuls = append(fuls, f...) + } + return fuls, nil +} + +// V1Coordinator fetches request and fulfillment logs from a VRF V1 coordinator contract. +type V1Coordinator struct { + c v1.VRFCoordinatorInterface + lp logpoller.LogPoller +} + +// NewV1Coordinator creates a new V1Coordinator from the given contract. +func NewV1Coordinator(c v1.VRFCoordinatorInterface, lp logpoller.LogPoller) (*V1Coordinator, error) { + err := lp.RegisterFilter(logpoller.Filter{ + Name: logpoller.FilterName("VRFv1CoordinatorFeeder", c.Address()), + EventSigs: []common.Hash{ + v1.VRFCoordinatorRandomnessRequest{}.Topic(), + v1.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), + }, Addresses: []common.Address{c.Address()}, + }) + if err != nil { + return nil, err + } + return &V1Coordinator{c, lp}, nil +} + +// Requests satisfies the Coordinator interface. +func (v *V1Coordinator) Requests( + ctx context.Context, + fromBlock uint64, + toBlock uint64, +) ([]Event, error) { + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + int64(toBlock), + []common.Hash{ + v1.VRFCoordinatorRandomnessRequest{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v1 requests") + } + + var reqs []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v1.VRFCoordinatorRandomnessRequest) + if !ok { + continue // malformed log should not break flow + } + reqs = append(reqs, Event{ID: hex.EncodeToString(request.RequestID[:]), Block: request.Raw.BlockNumber}) + } + + return reqs, nil +} + +// Fulfillments satisfies the Coordinator interface. +func (v *V1Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { + toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "fetching latest block") + } + + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + toBlock.BlockNumber, + []common.Hash{ + v1.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v1 fulfillments") + } + + var fuls []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v1.VRFCoordinatorRandomnessRequestFulfilled) + if !ok { + continue // malformed log should not break flow + } + fuls = append(fuls, Event{ID: hex.EncodeToString(request.RequestId[:]), Block: request.Raw.BlockNumber}) + } + return fuls, nil +} + +// V2Coordinator fetches request and fulfillment logs from a VRF V2 coordinator contract. +type V2Coordinator struct { + c v2.VRFCoordinatorV2Interface + lp logpoller.LogPoller +} + +// NewV2Coordinator creates a new V2Coordinator from the given contract. +func NewV2Coordinator(c v2.VRFCoordinatorV2Interface, lp logpoller.LogPoller) (*V2Coordinator, error) { + err := lp.RegisterFilter(logpoller.Filter{ + Name: logpoller.FilterName("VRFv2CoordinatorFeeder", c.Address()), + EventSigs: []common.Hash{ + v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), + v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), + }, Addresses: []common.Address{c.Address()}, + }) + + if err != nil { + return nil, err + } + + return &V2Coordinator{c, lp}, err +} + +// Requests satisfies the Coordinator interface. +func (v *V2Coordinator) Requests( + ctx context.Context, + fromBlock uint64, + toBlock uint64, +) ([]Event, error) { + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + int64(toBlock), + []common.Hash{ + v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v2 requests") + } + + var reqs []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v2.VRFCoordinatorV2RandomWordsRequested) + if !ok { + continue // malformed log should not break flow + } + reqs = append(reqs, Event{ID: request.RequestId.String(), Block: request.Raw.BlockNumber}) + } + + return reqs, nil +} + +// Fulfillments satisfies the Coordinator interface. +func (v *V2Coordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { + toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "fetching latest block") + } + + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + toBlock.BlockNumber, + []common.Hash{ + v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v2 fulfillments") + } + + var fuls []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v2.VRFCoordinatorV2RandomWordsFulfilled) + if !ok { + continue // malformed log should not break flow + } + fuls = append(fuls, Event{ID: request.RequestId.String(), Block: request.Raw.BlockNumber}) + } + return fuls, nil +} + +// V2PlusCoordinator fetches request and fulfillment logs from a VRF V2Plus coordinator contract. +type V2PlusCoordinator struct { + c v2plus.IVRFCoordinatorV2PlusInternalInterface + lp logpoller.LogPoller +} + +// NewV2Coordinator creates a new V2Coordinator from the given contract. +func NewV2PlusCoordinator(c v2plus.IVRFCoordinatorV2PlusInternalInterface, lp logpoller.LogPoller) (*V2PlusCoordinator, error) { + err := lp.RegisterFilter(logpoller.Filter{ + Name: logpoller.FilterName("VRFv2PlusCoordinatorFeeder", c.Address()), + EventSigs: []common.Hash{ + v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), + v2plus.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic(), + }, Addresses: []common.Address{c.Address()}, + }) + + if err != nil { + return nil, err + } + + return &V2PlusCoordinator{c, lp}, err +} + +// Requests satisfies the Coordinator interface. +func (v *V2PlusCoordinator) Requests( + ctx context.Context, + fromBlock uint64, + toBlock uint64, +) ([]Event, error) { + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + int64(toBlock), + []common.Hash{ + v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v2 requests") + } + + var reqs []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v2plus.IVRFCoordinatorV2PlusInternalRandomWordsRequested) + if !ok { + continue // malformed log should not break flow + } + reqs = append(reqs, Event{ID: request.RequestId.String(), Block: request.Raw.BlockNumber}) + } + + return reqs, nil +} + +// Fulfillments satisfies the Coordinator interface. +func (v *V2PlusCoordinator) Fulfillments(ctx context.Context, fromBlock uint64) ([]Event, error) { + toBlock, err := v.lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "fetching latest block") + } + + logs, err := v.lp.LogsWithSigs( + int64(fromBlock), + toBlock.BlockNumber, + []common.Hash{ + v2plus.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic(), + }, + v.c.Address(), + pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "filter v2 fulfillments") + } + + var fuls []Event + for _, l := range logs { + requestLog, err := v.c.ParseLog(l.ToGethLog()) + if err != nil { + continue // malformed log should not break flow + } + request, ok := requestLog.(*v2plus.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled) + if !ok { + continue // malformed log should not break flow + } + fuls = append(fuls, Event{ID: request.RequestId.String(), Block: request.Raw.BlockNumber}) + } + return fuls, nil +} diff --git a/core/services/blockhashstore/delegate.go b/core/services/blockhashstore/delegate.go new file mode 100644 index 00000000..95466dc6 --- /dev/null +++ b/core/services/blockhashstore/delegate.go @@ -0,0 +1,259 @@ +package blockhashstore + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + v1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/trusted_blockhash_store" + v2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + v2plus "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ job.ServiceCtx = &service{} + +// Delegate creates BlockhashStore feeder jobs. +type Delegate struct { + logger logger.Logger + legacyChains legacyevm.LegacyChainContainer + ks keystore.Eth +} + +// NewDelegate creates a new Delegate. +func NewDelegate( + logger logger.Logger, + legacyChains legacyevm.LegacyChainContainer, + ks keystore.Eth, +) *Delegate { + return &Delegate{ + logger: logger, + legacyChains: legacyChains, + ks: ks, + } +} + +// JobType satisfies the job.Delegate interface. +func (d *Delegate) JobType() job.Type { + return job.BlockhashStore +} + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { + if jb.BlockhashStoreSpec == nil { + return nil, errors.Errorf( + "blockhashstore.Delegate expects a BlockhashStoreSpec to be present, got %+v", jb) + } + + chain, err := d.legacyChains.Get(jb.BlockhashStoreSpec.EVMChainID.String()) + if err != nil { + return nil, fmt.Errorf( + "getting chain ID %d: %w", jb.BlockhashStoreSpec.EVMChainID.ToInt(), err) + } + + if !chain.Config().Feature().LogPoller() { + return nil, errors.New("log poller must be enabled to run blockhashstore") + } + + keys, err := d.ks.EnabledKeysForChain(chain.ID()) + if err != nil { + return nil, errors.Wrap(err, "getting sending keys") + } + if len(keys) == 0 { + return nil, fmt.Errorf("missing sending keys for chain ID: %v", chain.ID()) + } + fromAddresses := []ethkey.EIP55Address{keys[0].EIP55Address} + if jb.BlockhashStoreSpec.FromAddresses != nil { + fromAddresses = jb.BlockhashStoreSpec.FromAddresses + } + + bhs, err := blockhash_store.NewBlockhashStore( + jb.BlockhashStoreSpec.BlockhashStoreAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "building BHS") + } + + var trustedBHS *trusted_blockhash_store.TrustedBlockhashStore + if jb.BlockhashStoreSpec.TrustedBlockhashStoreAddress != nil && jb.BlockhashStoreSpec.TrustedBlockhashStoreAddress.Hex() != EmptyAddress { + trustedBHS, err = trusted_blockhash_store.NewTrustedBlockhashStore( + jb.BlockhashStoreSpec.TrustedBlockhashStoreAddress.Address(), + chain.Client(), + ) + if err != nil { + return nil, errors.Wrap(err, "building trusted BHS") + } + } + + lp := chain.LogPoller() + var coordinators []Coordinator + if jb.BlockhashStoreSpec.CoordinatorV1Address != nil { + var c *v1.VRFCoordinator + if c, err = v1.NewVRFCoordinator( + jb.BlockhashStoreSpec.CoordinatorV1Address.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V1 coordinator") + } + + var coord *V1Coordinator + coord, err = NewV1Coordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V1 coordinator") + } + coordinators = append(coordinators, coord) + } + if jb.BlockhashStoreSpec.CoordinatorV2Address != nil { + var c *v2.VRFCoordinatorV2 + if c, err = v2.NewVRFCoordinatorV2( + jb.BlockhashStoreSpec.CoordinatorV2Address.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V2 coordinator") + } + + var coord *V2Coordinator + coord, err = NewV2Coordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V2 coordinator") + } + coordinators = append(coordinators, coord) + } + if jb.BlockhashStoreSpec.CoordinatorV2PlusAddress != nil { + var c v2plus.IVRFCoordinatorV2PlusInternalInterface + if c, err = v2plus.NewIVRFCoordinatorV2PlusInternal( + jb.BlockhashStoreSpec.CoordinatorV2PlusAddress.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V2Plus coordinator") + } + + var coord *V2PlusCoordinator + coord, err = NewV2PlusCoordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V2Plus coordinator") + } + coordinators = append(coordinators, coord) + } + + bpBHS, err := NewBulletproofBHS( + chain.Config().EVM().GasEstimator(), + chain.Config().Database(), + fromAddresses, + chain.TxManager(), + bhs, + trustedBHS, + chain.ID(), + d.ks, + ) + if err != nil { + return nil, errors.Wrap(err, "building bulletproof bhs") + } + + log := d.logger.Named("BHSFeeder").With("jobID", jb.ID, "externalJobID", jb.ExternalJobID) + feeder := NewFeeder( + log, + NewMultiCoordinator(coordinators...), + bpBHS, + lp, + jb.BlockhashStoreSpec.TrustedBlockhashStoreBatchSize, + int(jb.BlockhashStoreSpec.WaitBlocks), + int(jb.BlockhashStoreSpec.LookbackBlocks), + jb.BlockhashStoreSpec.HeartbeatPeriod, + func(ctx context.Context) (uint64, error) { + head, err := lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return 0, errors.Wrap(err, "getting chain head") + } + return uint64(head.BlockNumber), nil + }) + + return []job.ServiceCtx{&service{ + feeder: feeder, + pollPeriod: jb.BlockhashStoreSpec.PollPeriod, + runTimeout: jb.BlockhashStoreSpec.RunTimeout, + logger: log, + }}, nil +} + +// AfterJobCreated satisfies the job.Delegate interface. +func (d *Delegate) AfterJobCreated(spec job.Job) {} + +// AfterJobCreated satisfies the job.Delegate interface. +func (d *Delegate) BeforeJobCreated(spec job.Job) {} + +// AfterJobCreated satisfies the job.Delegate interface. +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} + +// OnDeleteJob satisfies the job.Delegate interface. +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// service is a job.Service that runs the BHS feeder every pollPeriod. +type service struct { + services.StateMachine + feeder *Feeder + wg sync.WaitGroup + pollPeriod time.Duration + runTimeout time.Duration + logger logger.Logger + parentCtx context.Context + cancel context.CancelFunc +} + +// Start the BHS feeder service, satisfying the job.Service interface. +func (s *service) Start(context.Context) error { + return s.StartOnce("BHS Feeder Service", func() error { + s.logger.Infow("Starting BHS feeder") + ticker := time.NewTicker(utils.WithJitter(s.pollPeriod)) + s.parentCtx, s.cancel = context.WithCancel(context.Background()) + s.wg.Add(2) + go func() { + defer s.wg.Done() + s.feeder.StartHeartbeats(s.parentCtx, &realTimer{}) + }() + go func() { + defer s.wg.Done() + defer ticker.Stop() + for { + select { + case <-ticker.C: + s.runFeeder() + case <-s.parentCtx.Done(): + return + } + } + }() + return nil + }) +} + +// Close the BHS feeder service, satisfying the job.Service interface. +func (s *service) Close() error { + return s.StopOnce("BHS Feeder Service", func() error { + s.logger.Infow("Stopping BHS feeder") + s.cancel() + s.wg.Wait() + return nil + }) +} + +func (s *service) runFeeder() { + s.logger.Debugw("Running BHS feeder") + ctx, cancel := context.WithTimeout(s.parentCtx, s.runTimeout) + defer cancel() + err := s.feeder.Run(ctx) + if err == nil { + s.logger.Debugw("BHS feeder run completed successfully") + } else { + s.logger.Errorw("BHS feeder run was at least partially unsuccessful", + "err", err) + } +} diff --git a/core/services/blockhashstore/delegate_test.go b/core/services/blockhashstore/delegate_test.go new file mode 100644 index 00000000..6ef49d35 --- /dev/null +++ b/core/services/blockhashstore/delegate_test.go @@ -0,0 +1,175 @@ +package blockhashstore_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + mocklp "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestDelegate_JobType(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + delegate := blockhashstore.NewDelegate(lggr, nil, nil) + + assert.Equal(t, job.BlockhashStore, delegate.JobType()) +} + +type testData struct { + ethClient *mocks.Client + ethKeyStore keystore.Eth + legacyChains legacyevm.LegacyChainContainer + sendingKey ethkey.KeyV2 + logs *observer.ObservedLogs +} + +func createTestDelegate(t *testing.T) (*blockhashstore.Delegate, *testData) { + t.Helper() + + lggr, logs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = func(b bool) *bool { return &b }(true) + }) + db := pgtest.NewSqlxDB(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + sendingKey, _ := cltest.MustInsertRandomKey(t, kst) + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("LatestBlock", mock.Anything, mock.Anything).Return(logpoller.LogPollerBlock{}, nil) + + relayExtenders := evmtest.NewChainRelayExtenders( + t, + evmtest.TestChainOpts{ + DB: db, + KeyStore: kst, + GeneralConfig: cfg, + Client: ethClient, + LogPoller: lp, + }, + ) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + return blockhashstore.NewDelegate(lggr, legacyChains, kst), &testData{ + ethClient: ethClient, + ethKeyStore: kst, + legacyChains: legacyChains, + sendingKey: sendingKey, + logs: logs, + } +} + +func TestDelegate_ServicesForSpec(t *testing.T) { + t.Parallel() + + delegate, testData := createTestDelegate(t) + + require.NotEmpty(t, testData.legacyChains.Slice()) + defaultWaitBlocks := (int32)(testData.legacyChains.Slice()[0].Config().EVM().FinalityDepth()) + + t.Run("happy", func(t *testing.T) { + spec := job.Job{BlockhashStoreSpec: &job.BlockhashStoreSpec{WaitBlocks: defaultWaitBlocks, EVMChainID: (*big.Big)(testutils.FixtureChainID)}} + services, err := delegate.ServicesForSpec(spec) + + require.NoError(t, err) + require.Len(t, services, 1) + }) + + t.Run("happy with coordinators", func(t *testing.T) { + coordinatorV1 := cltest.NewEIP55Address() + coordinatorV2 := cltest.NewEIP55Address() + coordinatorV2Plus := cltest.NewEIP55Address() + + spec := job.Job{BlockhashStoreSpec: &job.BlockhashStoreSpec{ + WaitBlocks: defaultWaitBlocks, + CoordinatorV1Address: &coordinatorV1, + CoordinatorV2Address: &coordinatorV2, + CoordinatorV2PlusAddress: &coordinatorV2Plus, + EVMChainID: (*big.Big)(testutils.FixtureChainID), + }} + services, err := delegate.ServicesForSpec(spec) + + require.NoError(t, err) + require.Len(t, services, 1) + }) + + t.Run("missing BlockhashStoreSpec", func(t *testing.T) { + spec := job.Job{BlockhashStoreSpec: nil} + _, err := delegate.ServicesForSpec(spec) + assert.Error(t, err) + }) + + t.Run("wrong EVMChainID", func(t *testing.T) { + spec := job.Job{BlockhashStoreSpec: &job.BlockhashStoreSpec{ + EVMChainID: big.NewI(123), + }} + _, err := delegate.ServicesForSpec(spec) + assert.Error(t, err) + }) + + t.Run("missing EnabledKeysForChain", func(t *testing.T) { + _, err := testData.ethKeyStore.Delete(testData.sendingKey.ID()) + require.NoError(t, err) + + spec := job.Job{BlockhashStoreSpec: &job.BlockhashStoreSpec{ + WaitBlocks: defaultWaitBlocks, + }} + _, err = delegate.ServicesForSpec(spec) + assert.Error(t, err) + }) +} + +func TestDelegate_StartStop(t *testing.T) { + t.Parallel() + + delegate, testData := createTestDelegate(t) + + require.NotEmpty(t, testData.legacyChains.Slice()) + defaultWaitBlocks := (int32)(testData.legacyChains.Slice()[0].Config().EVM().FinalityDepth()) + spec := job.Job{BlockhashStoreSpec: &job.BlockhashStoreSpec{ + WaitBlocks: defaultWaitBlocks, + PollPeriod: time.Second, + RunTimeout: testutils.WaitTimeout(t), + EVMChainID: (*big.Big)(testutils.FixtureChainID), + }} + services, err := delegate.ServicesForSpec(spec) + + require.NoError(t, err) + require.Len(t, services, 1) + + err = services[0].Start(testutils.Context(t)) + require.NoError(t, err) + + assert.Eventually(t, func() bool { + return testData.logs.FilterMessage("Starting BHS feeder").Len() > 0 && + testData.logs.FilterMessage("Running BHS feeder").Len() > 0 && + testData.logs.FilterMessage("BHS feeder run completed successfully").Len() > 0 + }, testutils.WaitTimeout(t), testutils.TestInterval) + + err = services[0].Close() + require.NoError(t, err) + + assert.NotZero(t, testData.logs.FilterMessage("Stopping BHS feeder").Len()) +} diff --git a/core/services/blockhashstore/feeder.go b/core/services/blockhashstore/feeder.go new file mode 100644 index 00000000..c829c082 --- /dev/null +++ b/core/services/blockhashstore/feeder.go @@ -0,0 +1,304 @@ +package blockhashstore + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "go.uber.org/multierr" + "golang.org/x/exp/maps" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +const trustedTimeout = 1 * time.Second + +// NewFeeder creates a new Feeder instance. +func NewFeeder( + logger logger.Logger, + coordinator Coordinator, + bhs BHS, + lp logpoller.LogPoller, + trustedBHSBatchSize int32, + waitBlocks int, + lookbackBlocks int, + heartbeatPeriod time.Duration, + latestBlock func(ctx context.Context) (uint64, error), +) *Feeder { + return &Feeder{ + lggr: logger, + coordinator: coordinator, + bhs: bhs, + lp: lp, + trustedBHSBatchSize: trustedBHSBatchSize, + waitBlocks: waitBlocks, + lookbackBlocks: lookbackBlocks, + latestBlock: latestBlock, + stored: make(map[uint64]struct{}), + storedTrusted: make(map[uint64]common.Hash), + lastRunBlock: 0, + wgStored: sync.WaitGroup{}, + heartbeatPeriod: heartbeatPeriod, + } +} + +// Feeder checks recent VRF coordinator events and stores any blockhashes for blocks within +// waitBlocks and lookbackBlocks that have unfulfilled requests. +type Feeder struct { + lggr logger.Logger + coordinator Coordinator + bhs BHS + lp logpoller.LogPoller + trustedBHSBatchSize int32 + waitBlocks int + lookbackBlocks int + latestBlock func(ctx context.Context) (uint64, error) + + // heartbeatPeriodTime is a heartbeat period in seconds by which + // the feeder will always store a blockhash, even if there are no + // unfulfilled requests. This is to ensure that there are blockhashes + // in the store to start from if we ever need to run backwards mode. + heartbeatPeriod time.Duration + + stored map[uint64]struct{} // used for trustless feeder + storedTrusted map[uint64]common.Hash // used for trusted feeder + lastRunBlock uint64 + wgStored sync.WaitGroup + batchLock sync.Mutex + errsLock sync.Mutex +} + +//go:generate mockery --quiet --name Timer --output ./mocks/ --case=underscore +type Timer interface { + After(d time.Duration) <-chan time.Time +} + +type realTimer struct{} + +func (r *realTimer) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (f *Feeder) StartHeartbeats(ctx context.Context, timer Timer) { + if f.heartbeatPeriod == 0 { + f.lggr.Infow("Not starting heartbeat blockhash using storeEarliest") + return + } + f.lggr.Infow(fmt.Sprintf("Starting heartbeat blockhash using storeEarliest every %s", f.heartbeatPeriod.String())) + for { + after := timer.After(f.heartbeatPeriod) + select { + case <-after: + f.lggr.Infow("storing heartbeat blockhash using storeEarliest", + "heartbeatPeriodSeconds", f.heartbeatPeriod.Seconds()) + if err := f.bhs.StoreEarliest(ctx); err != nil { + f.lggr.Infow("failed to store heartbeat blockhash using storeEarliest", + "heartbeatPeriodSeconds", f.heartbeatPeriod.Seconds(), + "err", err) + } + case <-ctx.Done(): + return + } + } +} + +// Run the feeder. +func (f *Feeder) Run(ctx context.Context) error { + latestBlock, err := f.latestBlock(ctx) + if err != nil { + f.lggr.Errorw("Failed to fetch current block number", "err", err) + return errors.Wrap(err, "fetching block number") + } + + fromBlock, toBlock := GetSearchWindow(int(latestBlock), f.waitBlocks, f.lookbackBlocks) + if toBlock == 0 { + // Nothing to process, no blocks are in range. + return nil + } + + lggr := f.lggr.With("latestBlock", latestBlock, "fromBlock", fromBlock, "toBlock", toBlock) + blockToRequests, err := GetUnfulfilledBlocksAndRequests(ctx, lggr, f.coordinator, fromBlock, toBlock) + if err != nil { + return err + } + + // For a trusted BHS, run our trusted logic. + if f.bhs.IsTrusted() { + return f.runTrusted(ctx, latestBlock, fromBlock, blockToRequests) + } + + var errs error + for block, unfulfilledReqs := range blockToRequests { + if len(unfulfilledReqs) == 0 { + continue + } + if _, ok := f.stored[block]; ok { + // Already stored + continue + } + stored, err := f.bhs.IsStored(ctx, block) + if err != nil { + f.lggr.Errorw("Failed to check if block is already stored, attempting to store anyway", + "err", err, + "block", block) + errs = multierr.Append(errs, errors.Wrap(err, "checking if stored")) + } else if stored { + // IsStored() can be based on unfinalized blocks. Therefore, f.stored mapping is not updated + f.lggr.Infow("Blockhash already stored", + "block", block, "latestBlock", latestBlock, + "unfulfilledReqIDs", LimitReqIDs(unfulfilledReqs, 50)) + continue + } + + // Block needs to be stored + err = f.bhs.Store(ctx, block) + if err != nil { + f.lggr.Errorw("Failed to store block", "err", err, "block", block) + errs = multierr.Append(errs, errors.Wrap(err, "storing block")) + continue + } + + f.lggr.Infow("Stored blockhash", + "block", block, "latestBlock", latestBlock, + "unfulfilledReqIDs", LimitReqIDs(unfulfilledReqs, 50)) + f.stored[block] = struct{}{} + } + + if f.lastRunBlock != 0 { + // Prune stored, anything older than fromBlock can be discarded + for block := f.lastRunBlock - uint64(f.lookbackBlocks); block < fromBlock; block++ { + if _, ok := f.stored[block]; ok { + delete(f.stored, block) + f.lggr.Debugw("Pruned block from stored cache", + "block", block, "latestBlock", latestBlock) + } + } + } + f.lastRunBlock = latestBlock + return errs +} + +func (f *Feeder) runTrusted( + ctx context.Context, + latestBlock uint64, + fromBlock uint64, + blockToRequests map[uint64]map[string]struct{}, +) error { + var errs error + + // Iterate through each request block via waitGroup. + // For blocks with pending requests, add them to the batch to be stored. + // Note: Golang maps sort items in a range randomly, so although the batch size is used + // to limit blocks-per-batch, every block has an equal chance of getting picked up + // on each run. + var batch = make(map[uint64]struct{}) + for blockKey, unfulfilledReqs := range blockToRequests { + f.wgStored.Add(1) + var unfulfilled = unfulfilledReqs + var block = blockKey + go func() { + defer f.wgStored.Done() + if len(unfulfilled) == 0 { + return + } + + // Do not store a block if it has been marked as stored; otherwise, store it even + // if the RPC call errors, as to be conservative. + timeoutCtx, cancel := context.WithTimeout(ctx, trustedTimeout) + defer cancel() + stored, err := f.bhs.IsStored(timeoutCtx, block) + if err != nil { + f.lggr.Errorw("Failed to check if block is already stored, attempting to store anyway", + "err", err, + "block", block) + f.errsLock.Lock() + errs = multierr.Append(errs, errors.Wrap(err, "checking if stored")) + f.errsLock.Unlock() + } else if stored { + f.lggr.Infow("Blockhash already stored", + "block", block, "latestBlock", latestBlock, + "unfulfilledReqIDs", LimitReqIDs(unfulfilled, 50)) + return + } + + // If there's room, store the block in the batch. Threadsafe. + f.batchLock.Lock() + if len(batch) < int(f.trustedBHSBatchSize) { + batch[block] = struct{}{} + } + f.batchLock.Unlock() + }() + } + + // Ensure all blocks are checked before storing the batch. + f.wgStored.Wait() + + // For a non-empty batch, store all blocks. + if len(batch) != 0 { + var blocksToStore []uint64 + var blockhashesToStore []common.Hash + var latestBlockhash common.Hash + + // Get all logpoller blocks for the range including the batch and the latest block, + // as to include the recent blockhash. + lpBlocks, err := f.lp.GetBlocksRange(ctx, append(maps.Keys(batch), latestBlock)) + if err != nil { + f.lggr.Errorw("Failed to get blocks range", + "err", err, + "blocks", batch) + errs = multierr.Append(errs, errors.Wrap(err, "log poller get blocks range")) + return errs + } + + // If the log poller block's blocknumber is included in the desired batch, + // append its blockhash to our blockhashes we want to store. + // If it is the log poller block pertaining to our recent block number, assig it. + for _, b := range lpBlocks { + if b.BlockNumber == int64(latestBlock) { + latestBlockhash = b.BlockHash + } + if f.storedTrusted[uint64(b.BlockNumber)] == b.BlockHash { + // blockhash is already stored. skip to save gas + continue + } + if _, ok := batch[uint64(b.BlockNumber)]; ok { + blocksToStore = append(blocksToStore, uint64(b.BlockNumber)) + blockhashesToStore = append(blockhashesToStore, b.BlockHash) + } + } + + if len(blocksToStore) == 0 { + f.lggr.Debugw("no blocks to store", "latestBlock", latestBlock) + return errs + } + // Store the batch of blocks and their blockhashes. + err = f.bhs.StoreTrusted(ctx, blocksToStore, blockhashesToStore, latestBlock, latestBlockhash) + if err != nil { + f.lggr.Errorw("Failed to store trusted", + "err", err, + "blocks", blocksToStore, + "blockhashesToStore", blockhashesToStore, + "latestBlock", latestBlock, + "latestBlockhash", latestBlockhash, + ) + errs = multierr.Append(errs, errors.Wrap(err, "checking if stored")) + return errs + } + for i, block := range blocksToStore { + f.storedTrusted[block] = blockhashesToStore[i] + } + } + + // Prune storedTrusted, anything older than fromBlock can be discarded. + for b := range f.storedTrusted { + if b < fromBlock { + delete(f.storedTrusted, b) + } + } + + return errs +} diff --git a/core/services/blockhashstore/feeder_test.go b/core/services/blockhashstore/feeder_test.go new file mode 100644 index 00000000..3fcf67be --- /dev/null +++ b/core/services/blockhashstore/feeder_test.go @@ -0,0 +1,1119 @@ +package blockhashstore + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + mocklp "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + bhsmocks "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore/mocks" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + loggermocks "github.com/goplugin/pluginv3.0/v2/core/logger/mocks" +) + +const ( + // VRF-only events. + randomWordsRequestedV2Plus string = "RandomWordsRequested" + randomWordsFulfilledV2Plus string = "RandomWordsFulfilled" + randomWordsRequestedV2 string = "RandomWordsRequested" + randomWordsFulfilledV2 string = "RandomWordsFulfilled" + randomWordsRequestedV1 string = "RandomnessRequest" + randomWordsFulfilledV1 string = "RandomnessRequestFulfilled" +) + +var ( + vrfCoordinatorV2PlusABI = evmtypes.MustGetABI(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalMetaData.ABI) + vrfCoordinatorV2ABI = evmtypes.MustGetABI(vrf_coordinator_v2.VRFCoordinatorV2MetaData.ABI) + vrfCoordinatorV1ABI = evmtypes.MustGetABI(solidity_vrf_coordinator_interface.VRFCoordinatorMetaData.ABI) + + _ Coordinator = &TestCoordinator{} + _ BHS = &TestBHS{} + tests = []testCase{ + { + name: "single unfulfilled request", + requests: []Event{{Block: 150, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{150}, + expectedStoredMapBlocks: []uint64{150}, + }, + { + name: "single fulfilled request", + requests: []Event{{Block: 150, ID: "1000"}}, + fulfillments: []Event{{Block: 155, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "single already fulfilled", + requests: []Event{{Block: 150, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + bhs: TestBHS{Stored: []uint64{150}}, + expectedStored: []uint64{150}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "error checking if stored, store anyway", + requests: []Event{{Block: 150, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + bhs: TestBHS{ErrorsIsStored: []uint64{150}}, + expectedStored: []uint64{150}, + expectedStoredMapBlocks: []uint64{150}, + expectedErrMsg: "checking if stored: error checking if stored", + }, + { + name: "error storing, continue to next block anyway", + requests: []Event{{Block: 150, ID: "1000"}, {Block: 151, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + bhs: TestBHS{ErrorsStore: []uint64{150}}, + expectedStored: []uint64{151}, + expectedStoredMapBlocks: []uint64{151}, + expectedErrMsg: "storing block: error storing", + }, + { + name: "multiple requests same block, some fulfilled", + requests: []Event{ + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10002"}, + {Block: 150, ID: "10003"}}, + fulfillments: []Event{ + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10003"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{150}, + expectedStoredMapBlocks: []uint64{150}, + }, + { + name: "multiple requests same block, all fulfilled", + requests: []Event{ + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10002"}, + {Block: 150, ID: "10003"}}, + fulfillments: []Event{ + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10002"}, + {Block: 150, ID: "10003"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "fulfillment no matching request no error", + requests: []Event{{Block: 150, ID: "1000"}}, + fulfillments: []Event{{Block: 199, ID: "10002"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{150}, + expectedStoredMapBlocks: []uint64{150}, + }, + { + name: "multiple unfulfilled requests", + requests: []Event{{Block: 150, ID: "10001"}, {Block: 151, ID: "10002"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{150, 151}, + expectedStoredMapBlocks: []uint64{150, 151}, + }, + { + name: "multiple fulfilled requests", + requests: []Event{{Block: 150, ID: "10001"}, {Block: 151, ID: "10002"}}, + fulfillments: []Event{{Block: 150, ID: "10001"}, {Block: 151, ID: "10002"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "recent unfulfilled request do not store", + requests: []Event{{Block: 185, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "old unfulfilled request do not store", + requests: []Event{{Block: 99, ID: "1000"}, {Block: 57, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{}, + expectedStoredMapBlocks: []uint64{}, + }, + { + name: "mixed", + requests: []Event{ + // Block 150 + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10002"}, + {Block: 150, ID: "10003"}, + + // Block 151 + {Block: 151, ID: "10004"}, + {Block: 151, ID: "10005"}, + + // Block 153 + {Block: 153, ID: "10006"}, + + // Block 154 + {Block: 154, ID: "10007"}}, + fulfillments: []Event{ + // Block 150 + {Block: 150, ID: "10001"}, + {Block: 150, ID: "10002"}, + // request3 no fulfillment + + // Block 151 + {Block: 151, ID: "10004"}, + {Block: 151, ID: "10005"}, + + // Block 153 - no fulfillment + + // Block 154 + {Block: 154, ID: "10007"}}, + wait: 25, + lookback: 100, + latest: 200, + expectedStored: []uint64{150, 153}, + expectedStoredMapBlocks: []uint64{150, 153}, + }, + { + name: "lookback before 0th block", + requests: []Event{{Block: 20, ID: "1000"}}, + wait: 25, + lookback: 100, + latest: 50, + expectedStored: []uint64{20}, + expectedStoredMapBlocks: []uint64{20}, + }, + } +) + +func TestStartHeartbeats(t *testing.T) { + t.Run("bhs_heartbeat_happy_path", func(t *testing.T) { + expectedDuration := 600 * time.Second + mockBHS := bhsmocks.NewBHS(t) + mockLogger := loggermocks.NewLogger(t) + feeder := NewFeeder( + mockLogger, + &TestCoordinator{}, // Not used for this test + mockBHS, + &mocklp.LogPoller{}, // Not used for this test + 0, + 25, // Not used for this test + 100, // Not used for this test + expectedDuration, + func(ctx context.Context) (uint64, error) { + return tests[0].latest, nil + }) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + mockTimer := bhsmocks.NewTimer(t) + + mockBHS.On("StoreEarliest", ctx).Return(nil).Once() + mockTimer.On("After", expectedDuration).Return(func() <-chan time.Time { + c := make(chan time.Time) + close(c) + return c + }()).Once() + mockTimer.On("After", expectedDuration).Return(func() <-chan time.Time { + c := make(chan time.Time) + return c + }()).Run(func(args mock.Arguments) { + cancel() + }).Once() + mockLogger.On("Infow", "Starting heartbeat blockhash using storeEarliest every 10m0s").Once() + mockLogger.On("Infow", "storing heartbeat blockhash using storeEarliest", + "heartbeatPeriodSeconds", expectedDuration.Seconds()).Once() + require.Len(t, mockLogger.ExpectedCalls, 2) + require.Len(t, mockTimer.ExpectedCalls, 2) + defer mockTimer.AssertExpectations(t) + defer mockBHS.AssertExpectations(t) + defer mockLogger.AssertExpectations(t) + + feeder.StartHeartbeats(ctx, mockTimer) + }) + + t.Run("bhs_heartbeat_sad_path_store_earliest_err", func(t *testing.T) { + expectedDuration := 600 * time.Second + expectedError := fmt.Errorf("insufficient gas") + mockBHS := bhsmocks.NewBHS(t) + mockLogger := loggermocks.NewLogger(t) + feeder := NewFeeder( + mockLogger, + &TestCoordinator{}, // Not used for this test + mockBHS, + &mocklp.LogPoller{}, // Not used for this test + 0, + 25, // Not used for this test + 100, // Not used for this test + expectedDuration, + func(ctx context.Context) (uint64, error) { + return tests[0].latest, nil + }) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + mockTimer := bhsmocks.NewTimer(t) + + mockBHS.On("StoreEarliest", ctx).Return(expectedError).Once() + mockTimer.On("After", expectedDuration).Return(func() <-chan time.Time { + c := make(chan time.Time) + close(c) + return c + }()).Once() + mockTimer.On("After", expectedDuration).Return(func() <-chan time.Time { + c := make(chan time.Time) + return c + }()).Run(func(args mock.Arguments) { + cancel() + }).Once() + mockLogger.On("Infow", "Starting heartbeat blockhash using storeEarliest every 10m0s").Once() + mockLogger.On("Infow", "storing heartbeat blockhash using storeEarliest", + "heartbeatPeriodSeconds", expectedDuration.Seconds()).Once() + mockLogger.On("Infow", "failed to store heartbeat blockhash using storeEarliest", + "heartbeatPeriodSeconds", expectedDuration.Seconds(), + "err", expectedError).Once() + require.Len(t, mockLogger.ExpectedCalls, 3) + require.Len(t, mockTimer.ExpectedCalls, 2) + defer mockTimer.AssertExpectations(t) + defer mockBHS.AssertExpectations(t) + defer mockLogger.AssertExpectations(t) + + feeder.StartHeartbeats(ctx, mockTimer) + }) + + t.Run("bhs_heartbeat_sad_path_heartbeat_0", func(t *testing.T) { + expectedDuration := 0 * time.Second + mockBHS := bhsmocks.NewBHS(t) + mockLogger := loggermocks.NewLogger(t) + feeder := NewFeeder( + mockLogger, + &TestCoordinator{}, // Not used for this test + mockBHS, + &mocklp.LogPoller{}, // Not used for this test + 0, + 25, // Not used for this test + 100, // Not used for this test + expectedDuration, + func(ctx context.Context) (uint64, error) { + return tests[0].latest, nil + }) + + mockTimer := bhsmocks.NewTimer(t) + mockLogger.On("Infow", "Not starting heartbeat blockhash using storeEarliest").Once() + require.Len(t, mockLogger.ExpectedCalls, 1) + require.Len(t, mockBHS.ExpectedCalls, 0) + require.Len(t, mockTimer.ExpectedCalls, 0) + defer mockTimer.AssertExpectations(t) + defer mockBHS.AssertExpectations(t) + defer mockLogger.AssertExpectations(t) + + feeder.StartHeartbeats(testutils.Context(t), mockTimer) + }) +} + +type testCase struct { + name string + requests []Event + fulfillments []Event + wait int + lookback int + latest uint64 + bhs TestBHS + expectedStored []uint64 + expectedStoredMapBlocks []uint64 // expected state of stored map in Feeder struct + expectedErrMsg string +} + +func TestFeeder(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeeder) + } +} + +func (test testCase) testFeeder(t *testing.T) { + coordinator := &TestCoordinator{ + RequestEvents: test.requests, + FulfillmentEvents: test.fulfillments, + } + + lp := &mocklp.LogPoller{} + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }) + + err := feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) + } + + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) +} + +func TestFeederWithLogPollerVRFv1(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv1) + } +} + +func (test testCase) testFeederWithLogPollerVRFv1(t *testing.T) { + var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") + + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V1Coordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV1(t, r.Block, r.ID, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV1(t, r.Block, r.ID, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) + } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) +} + +func TestFeederWithLogPollerVRFv2(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv2) + } +} + +func (test testCase) testFeederWithLogPollerVRFv2(t *testing.T) { + var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") + + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := vrf_coordinator_v2.NewVRFCoordinatorV2(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V2Coordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV2(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV2(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) + } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) +} + +func TestFeederWithLogPollerVRFv2Plus(t *testing.T) { + for _, test := range tests { + t.Run(test.name, test.testFeederWithLogPollerVRFv2Plus) + } +} + +func (test testCase) testFeederWithLogPollerVRFv2Plus(t *testing.T) { + var coordinatorAddress = common.HexToAddress("0x514910771AF9Ca656af840dff83E8264EcF986CA") + + // Instantiate log poller & coordinator. + lp := &mocklp.LogPoller{} + lp.On("RegisterFilter", mock.Anything).Return(nil) + c, err := vrf_coordinator_v2plus_interface.NewIVRFCoordinatorV2PlusInternal(coordinatorAddress, nil) + require.NoError(t, err) + coordinator := &V2PlusCoordinator{ + c: c, + lp: lp, + } + + // Assert search window. + latest := int64(test.latest) + fromBlock := mathutil.Max(latest-int64(test.lookback), 0) + toBlock := mathutil.Max(latest-int64(test.wait), 0) + + // Construct request logs. + var requestLogs []logpoller.Log + for _, r := range test.requests { + if r.Block < uint64(fromBlock) || r.Block > uint64(toBlock) { + continue // do not include blocks outside our search window + } + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + requestLogs = append( + requestLogs, + newRandomnessRequestedLogV2Plus(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Construct fulfillment logs. + var fulfillmentLogs []logpoller.Log + for _, r := range test.fulfillments { + reqId, ok := big.NewInt(0).SetString(r.ID, 10) + require.True(t, ok) + fulfillmentLogs = append( + fulfillmentLogs, + newRandomnessFulfilledLogV2Plus(t, r.Block, reqId, coordinatorAddress), + ) + } + + // Mock log poller. + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: latest}, nil) + lp.On( + "LogsWithSigs", + fromBlock, + toBlock, + []common.Hash{ + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(requestLogs, nil) + lp.On( + "LogsWithSigs", + fromBlock, + latest, + []common.Hash{ + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic(), + }, + coordinatorAddress, + mock.Anything, + ).Return(fulfillmentLogs, nil) + + // Instantiate feeder. + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + &test.bhs, + lp, + 0, + test.wait, + test.lookback, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }) + + // Run feeder and assert correct results. + err = feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) + } + require.ElementsMatch(t, test.expectedStored, test.bhs.Stored) + require.ElementsMatch(t, test.expectedStoredMapBlocks, maps.Keys(feeder.stored)) +} + +func TestFeeder_CachesStoredBlocks(t *testing.T) { + coordinator := &TestCoordinator{ + RequestEvents: []Event{{Block: 100, ID: "1000"}}, + } + + bhs := &TestBHS{} + + lp := &mocklp.LogPoller{} + feeder := NewFeeder( + logger.TestLogger(t), + coordinator, + bhs, + lp, + 0, + 100, + 200, + 600*time.Second, + func(ctx context.Context) (uint64, error) { + return 250, nil + }) + + // Should store block 100 + require.NoError(t, feeder.Run(testutils.Context(t))) + require.ElementsMatch(t, []uint64{100}, bhs.Stored) + + // Remove 100 from the BHS and try again, it should not be stored since it's cached in the + // feeder + bhs.Stored = nil + require.NoError(t, feeder.Run(testutils.Context(t))) + require.Empty(t, bhs.Stored) + + // Run the feeder on a later block and make sure the cache is pruned + feeder.latestBlock = func(ctx context.Context) (uint64, error) { + return 500, nil + } + require.NoError(t, feeder.Run(testutils.Context(t))) + require.Empty(t, feeder.stored) +} + +func newRandomnessRequestedLogV1( + t *testing.T, + requestBlock uint64, + requestID string, + coordinatorAddress common.Address, +) logpoller.Log { + e := solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{ + KeyHash: common.HexToHash("keyhash"), + Seed: big.NewInt(0), + Sender: common.Address{}, + JobID: common.HexToHash("job"), + Fee: big.NewInt(0), + RequestID: common.HexToHash(requestID), + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV1ABI.Events[randomWordsRequestedV1].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.KeyHash, + e.Seed, + e.Sender, + e.Fee, + e.RequestID, + ) + require.NoError(t, err) + + jobIDType, err := abi.NewType("bytes32", "", nil) + require.NoError(t, err) + + jobIDArg := abi.Arguments{abi.Argument{ + Name: "jobID", + Type: jobIDType, + Indexed: true, + }} + + topic1, err := jobIDArg.Pack(e.JobID) + require.NoError(t, err) + + topic0 := vrfCoordinatorV1ABI.Events[randomWordsRequestedV1].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is JobID since it's indexed + topic1, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessFulfilledLogV1( + t *testing.T, + requestBlock uint64, + requestID string, + coordinatorAddress common.Address, +) logpoller.Log { + e := solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{ + RequestId: common.HexToHash(requestID), + Output: big.NewInt(0), + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV1ABI.Events[randomWordsFulfilledV1].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.RequestId, + e.Output, + ) + require.NoError(t, err) + + topic0 := vrfCoordinatorV1ABI.Events[randomWordsFulfilledV1].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessRequestedLogV2( + t *testing.T, + requestBlock uint64, + requestID *big.Int, + coordinatorAddress common.Address, +) logpoller.Log { + e := vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: requestID, + PreSeed: big.NewInt(0), + MinimumRequestConfirmations: 0, + CallbackGasLimit: 0, + NumWords: 0, + Sender: common.HexToAddress("0xeFF41C8725be95e66F6B10489B6bF34b08055853"), + Raw: types.Log{ + BlockNumber: requestBlock, + }, + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV2ABI.Events[randomWordsRequestedV2].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.RequestId, + e.PreSeed, + e.MinimumRequestConfirmations, + e.CallbackGasLimit, + e.NumWords, + ) + require.NoError(t, err) + + keyHashType, err := abi.NewType("bytes32", "", nil) + require.NoError(t, err) + + subIdType, err := abi.NewType("uint64", "", nil) + require.NoError(t, err) + + senderType, err := abi.NewType("address", "", nil) + require.NoError(t, err) + + keyHashArg := abi.Arguments{abi.Argument{ + Name: "keyHash", + Type: keyHashType, + Indexed: true, + }} + subIdArg := abi.Arguments{abi.Argument{ + Name: "subId", + Type: subIdType, + Indexed: true, + }} + + senderArg := abi.Arguments{abi.Argument{ + Name: "sender", + Type: senderType, + Indexed: true, + }} + + topic1, err := keyHashArg.Pack(e.KeyHash) + require.NoError(t, err) + topic2, err := subIdArg.Pack(e.SubId) + require.NoError(t, err) + topic3, err := senderArg.Pack(e.Sender) + require.NoError(t, err) + + topic0 := vrfCoordinatorV2ABI.Events[randomWordsRequestedV2].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is keyHash since it's indexed + topic1, + // third topic is subId since it's indexed + topic2, + // third topic is sender since it's indexed + topic3, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessFulfilledLogV2( + t *testing.T, + requestBlock uint64, + requestID *big.Int, + coordinatorAddress common.Address, +) logpoller.Log { + e := vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{ + RequestId: requestID, + OutputSeed: big.NewInt(0), + Payment: big.NewInt(0), + Success: true, + Raw: types.Log{ + BlockNumber: requestBlock, + }, + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV2ABI.Events[randomWordsFulfilledV2].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.OutputSeed, + e.Payment, + e.Success, + ) + require.NoError(t, err) + + requestIdType, err := abi.NewType("uint256", "", nil) + require.NoError(t, err) + + requestIdArg := abi.Arguments{abi.Argument{ + Name: "requestId", + Type: requestIdType, + Indexed: true, + }} + + topic1, err := requestIdArg.Pack(e.RequestId) + require.NoError(t, err) + + topic0 := vrfCoordinatorV2ABI.Events[randomWordsFulfilledV2].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is requestId since it's indexed + topic1, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessRequestedLogV2Plus( + t *testing.T, + requestBlock uint64, + requestID *big.Int, + coordinatorAddress common.Address, +) logpoller.Log { + e := vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsRequested{ + RequestId: requestID, + PreSeed: big.NewInt(0), + MinimumRequestConfirmations: 0, + CallbackGasLimit: 0, + NumWords: 0, + Sender: common.HexToAddress("0xeFF41C8725be95e66F6B10489B6bF34b08055853"), + ExtraArgs: []byte{}, + SubId: big.NewInt(0), + Raw: types.Log{ + BlockNumber: requestBlock, + }, + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV2PlusABI.Events[randomWordsRequestedV2Plus].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.RequestId, + e.PreSeed, + e.MinimumRequestConfirmations, + e.CallbackGasLimit, + e.NumWords, + e.ExtraArgs, + ) + require.NoError(t, err) + + keyHashType, err := abi.NewType("bytes32", "", nil) + require.NoError(t, err) + + subIdType, err := abi.NewType("uint256", "", nil) + require.NoError(t, err) + + senderType, err := abi.NewType("address", "", nil) + require.NoError(t, err) + + keyHashArg := abi.Arguments{abi.Argument{ + Name: "keyHash", + Type: keyHashType, + Indexed: true, + }} + subIdArg := abi.Arguments{abi.Argument{ + Name: "subId", + Type: subIdType, + Indexed: true, + }} + + senderArg := abi.Arguments{abi.Argument{ + Name: "sender", + Type: senderType, + Indexed: true, + }} + + topic1, err := keyHashArg.Pack(e.KeyHash) + require.NoError(t, err) + topic2, err := subIdArg.Pack(e.SubId) + require.NoError(t, err) + topic3, err := senderArg.Pack(e.Sender) + require.NoError(t, err) + + topic0 := vrfCoordinatorV2PlusABI.Events[randomWordsRequestedV2Plus].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is keyHash since it's indexed + topic1, + // third topic is subId since it's indexed + topic2, + // third topic is sender since it's indexed + topic3, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessFulfilledLogV2Plus( + t *testing.T, + requestBlock uint64, + requestID *big.Int, + coordinatorAddress common.Address, +) logpoller.Log { + e := vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{ + RequestId: requestID, + OutputSeed: big.NewInt(0), + Payment: big.NewInt(0), + Success: true, + Raw: types.Log{ + BlockNumber: requestBlock, + }, + SubId: big.NewInt(0), + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorV2PlusABI.Events[randomWordsFulfilledV2Plus].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.OutputSeed, + e.Payment, + e.Success, + e.OnlyPremium, + ) + require.NoError(t, err) + + requestIdType, err := abi.NewType("uint256", "", nil) + require.NoError(t, err) + subIdType, err := abi.NewType("uint256", "", nil) + require.NoError(t, err) + + requestIdArg := abi.Arguments{abi.Argument{ + Name: "requestId", + Type: requestIdType, + Indexed: true, + }} + subIdArg := abi.Arguments{abi.Argument{ + Name: "subID", + Type: subIdType, + Indexed: true, + }} + + topic1, err := requestIdArg.Pack(e.RequestId) + require.NoError(t, err) + topic2, err := subIdArg.Pack(e.SubId) + require.NoError(t, err) + + topic0 := vrfCoordinatorV2PlusABI.Events[randomWordsFulfilledV2Plus].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is requestId since it's indexed + topic1, + topic2, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} diff --git a/core/services/blockhashstore/mocks/bhs.go b/core/services/blockhashstore/mocks/bhs.go new file mode 100644 index 00000000..a69016c8 --- /dev/null +++ b/core/services/blockhashstore/mocks/bhs.go @@ -0,0 +1,130 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// BHS is an autogenerated mock type for the BHS type +type BHS struct { + mock.Mock +} + +// IsStored provides a mock function with given fields: ctx, blockNum +func (_m *BHS) IsStored(ctx context.Context, blockNum uint64) (bool, error) { + ret := _m.Called(ctx, blockNum) + + if len(ret) == 0 { + panic("no return value specified for IsStored") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (bool, error)); ok { + return rf(ctx, blockNum) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) bool); ok { + r0 = rf(ctx, blockNum) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsTrusted provides a mock function with given fields: +func (_m *BHS) IsTrusted() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsTrusted") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Store provides a mock function with given fields: ctx, blockNum +func (_m *BHS) Store(ctx context.Context, blockNum uint64) error { + ret := _m.Called(ctx, blockNum) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, blockNum) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StoreEarliest provides a mock function with given fields: ctx +func (_m *BHS) StoreEarliest(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for StoreEarliest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StoreTrusted provides a mock function with given fields: ctx, blockNums, blockhashes, recentBlock, recentBlockhash +func (_m *BHS) StoreTrusted(ctx context.Context, blockNums []uint64, blockhashes []common.Hash, recentBlock uint64, recentBlockhash common.Hash) error { + ret := _m.Called(ctx, blockNums, blockhashes, recentBlock, recentBlockhash) + + if len(ret) == 0 { + panic("no return value specified for StoreTrusted") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []uint64, []common.Hash, uint64, common.Hash) error); ok { + r0 = rf(ctx, blockNums, blockhashes, recentBlock, recentBlockhash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBHS creates a new instance of BHS. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBHS(t interface { + mock.TestingT + Cleanup(func()) +}) *BHS { + mock := &BHS{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/blockhashstore/mocks/timer.go b/core/services/blockhashstore/mocks/timer.go new file mode 100644 index 00000000..4236bdf8 --- /dev/null +++ b/core/services/blockhashstore/mocks/timer.go @@ -0,0 +1,48 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// Timer is an autogenerated mock type for the Timer type +type Timer struct { + mock.Mock +} + +// After provides a mock function with given fields: d +func (_m *Timer) After(d time.Duration) <-chan time.Time { + ret := _m.Called(d) + + if len(ret) == 0 { + panic("no return value specified for After") + } + + var r0 <-chan time.Time + if rf, ok := ret.Get(0).(func(time.Duration) <-chan time.Time); ok { + r0 = rf(d) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan time.Time) + } + } + + return r0 +} + +// NewTimer creates a new instance of Timer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimer(t interface { + mock.TestingT + Cleanup(func()) +}) *Timer { + mock := &Timer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/blockhashstore/test_util.go b/core/services/blockhashstore/test_util.go new file mode 100644 index 00000000..0ab07931 --- /dev/null +++ b/core/services/blockhashstore/test_util.go @@ -0,0 +1,151 @@ +package blockhashstore + +import ( + "context" + "crypto/rand" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" +) + +type TestCoordinator struct { + RequestEvents []Event + FulfillmentEvents []Event +} + +func (t *TestCoordinator) Addresses() []common.Address { + return []common.Address{} +} + +func (t *TestCoordinator) Requests(_ context.Context, fromBlock uint64, toBlock uint64) ([]Event, error) { + var result []Event + for _, req := range t.RequestEvents { + if req.Block >= fromBlock && req.Block <= toBlock { + result = append(result, req) + } + } + return result, nil +} + +func (t *TestCoordinator) Fulfillments(_ context.Context, fromBlock uint64) ([]Event, error) { + var result []Event + for _, ful := range t.FulfillmentEvents { + if ful.Block >= fromBlock { + result = append(result, ful) + } + } + return result, nil +} + +type TestBHS struct { + Stored []uint64 + + StoredEarliest bool + + // errorsStore defines which block numbers should return errors on Store. + ErrorsStore []uint64 + + // errorsIsStored defines which block numbers should return errors on IsStored. + ErrorsIsStored []uint64 +} + +func (t *TestBHS) Store(_ context.Context, blockNum uint64) error { + for _, e := range t.ErrorsStore { + if e == blockNum { + return errors.New("error storing") + } + } + + t.Stored = append(t.Stored, blockNum) + return nil +} + +func (t *TestBHS) IsTrusted() bool { + return false +} + +func (t *TestBHS) StoreTrusted( + ctx context.Context, blockNums []uint64, blockhashes []common.Hash, recentBlock uint64, recentBlockhash common.Hash, +) error { + return errors.New("not implemented") +} + +func (t *TestBHS) IsStored(_ context.Context, blockNum uint64) (bool, error) { + for _, e := range t.ErrorsIsStored { + if e == blockNum { + return false, errors.New("error checking if stored") + } + } + + for _, s := range t.Stored { + if s == blockNum { + return true, nil + } + } + return false, nil +} + +func (t *TestBHS) StoreEarliest(ctx context.Context) error { + t.StoredEarliest = true + return nil +} + +type TestBatchBHS struct { + Stored []uint64 + GetBlockhashesCallCounter uint16 + StoreVerifyHeaderCallCounter uint16 + GetBlockhashesError error + StoreVerifyHeadersError error +} + +func (t *TestBatchBHS) GetBlockhashes(_ context.Context, blockNumbers []*big.Int) ([][32]byte, error) { + t.GetBlockhashesCallCounter++ + if t.GetBlockhashesError != nil { + return nil, t.GetBlockhashesError + } + var blockhashes [][32]byte + for _, b := range blockNumbers { + for _, stored := range t.Stored { + var randomBlockhash [32]byte + if stored == b.Uint64() { + _, err := rand.Read(randomBlockhash[:]) + if err != nil { + return nil, err + } + } + blockhashes = append(blockhashes, randomBlockhash) + } + } + return blockhashes, nil +} + +func (t *TestBatchBHS) StoreVerifyHeader(ctx context.Context, blockNumbers []*big.Int, blockHeaders [][]byte, fromAddress common.Address) error { + t.StoreVerifyHeaderCallCounter++ + if t.StoreVerifyHeadersError != nil { + return t.StoreVerifyHeadersError + } + if len(blockNumbers) != len(blockHeaders) { + return errors.Errorf("input length did not match. blockNumbers length: %d, blockHeaders length: %d", len(blockNumbers), len(blockHeaders)) + } + for _, blockNumber := range blockNumbers { + t.Stored = append(t.Stored, blockNumber.Uint64()) + } + return nil +} + +type TestBlockHeaderProvider struct { +} + +func (p *TestBlockHeaderProvider) RlpHeadersBatch(ctx context.Context, blockRange []*big.Int) ([][]byte, error) { + var headers [][]byte + for range blockRange { + var randomBytes [30]byte //random length + _, err := rand.Read(randomBytes[:]) + if err != nil { + return nil, err + } + headers = append(headers, randomBytes[:]) + } + return headers, nil +} diff --git a/core/services/blockhashstore/validate.go b/core/services/blockhashstore/validate.go new file mode 100644 index 00000000..082927d8 --- /dev/null +++ b/core/services/blockhashstore/validate.go @@ -0,0 +1,94 @@ +package blockhashstore + +import ( + "time" + + "github.com/google/uuid" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +var EmptyAddress = utils.ZeroAddress.Hex() + +// ValidatedSpec validates and converts the given toml string to a job.Job. +func ValidatedSpec(tomlString string) (job.Job, error) { + jb := job.Job{ + // Default to generating a UUID, can be overwritten by the specified one in tomlString. + ExternalJobID: uuid.New(), + } + + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "loading toml") + } + + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "unmarshalling toml spec") + } + + if jb.Type != job.BlockhashStore { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + var spec job.BlockhashStoreSpec + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "unmarshalling toml job") + } + + // Required fields + if spec.CoordinatorV1Address == nil && spec.CoordinatorV2Address == nil && spec.CoordinatorV2PlusAddress == nil { + return jb, errors.New( + `at least one of "coordinatorV1Address", "coordinatorV2Address" and "coordinatorV2PlusAddress" must be set`) + } + if spec.BlockhashStoreAddress == "" { + return jb, notSet("blockhashStoreAddress") + } + if spec.EVMChainID == nil { + return jb, notSet("evmChainID") + } + if spec.TrustedBlockhashStoreAddress != nil && spec.TrustedBlockhashStoreAddress.Hex() != EmptyAddress && spec.TrustedBlockhashStoreBatchSize == 0 { + return jb, notSet("trustedBlockhashStoreBatchSize") + } + + // Defaults + if spec.WaitBlocks == 0 { + spec.WaitBlocks = 100 + } + if spec.LookbackBlocks == 0 { + spec.LookbackBlocks = 200 + } + if spec.PollPeriod == 0 { + spec.PollPeriod = 30 * time.Second + } + if spec.RunTimeout == 0 { + spec.RunTimeout = 30 * time.Second + } + if spec.HeartbeatPeriod < 0 { + return jb, errors.New(`"heartbeatPeriod" must be greater than 0`) + } + // spec.HeartbeatPeriodTime == 0, default is heartbeat disabled + + // Validation + if spec.WaitBlocks >= spec.LookbackBlocks { + return jb, errors.New(`"waitBlocks" must be less than "lookbackBlocks"`) + } + if (spec.TrustedBlockhashStoreAddress == nil || spec.TrustedBlockhashStoreAddress.Hex() == EmptyAddress) && spec.WaitBlocks >= 256 { + return jb, errors.New(`"waitBlocks" must be less than 256`) + } + if (spec.TrustedBlockhashStoreAddress == nil || spec.TrustedBlockhashStoreAddress.Hex() == EmptyAddress) && spec.LookbackBlocks >= 256 { + return jb, errors.New(`"lookbackBlocks" must be less than 256`) + } + + jb.BlockhashStoreSpec = &spec + + return jb, nil +} + +func notSet(field string) error { + return errors.Errorf("%q must be set", field) +} diff --git a/core/services/blockhashstore/validate_test.go b/core/services/blockhashstore/validate_test.go new file mode 100644 index 00000000..8c92399f --- /dev/null +++ b/core/services/blockhashstore/validate_test.go @@ -0,0 +1,259 @@ +package blockhashstore + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func TestValidate(t *testing.T) { + v1Coordinator := ethkey.EIP55Address("0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139") + v2Coordinator := ethkey.EIP55Address("0x2be990eE17832b59E0086534c5ea2459Aa75E38F") + fromAddresses := []ethkey.EIP55Address{("0x469aA2CD13e037DC5236320783dCfd0e641c0559")} + + var tests = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "valid", + toml: ` +type = "blockhashstore" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +waitBlocks = 59 +lookbackBlocks = 159 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, job.BlockhashStore, os.Type) + require.Equal(t, "valid-test", os.Name.String) + require.Equal(t, &v1Coordinator, + os.BlockhashStoreSpec.CoordinatorV1Address) + require.Equal(t, &v2Coordinator, + os.BlockhashStoreSpec.CoordinatorV2Address) + require.Equal(t, int32(59), os.BlockhashStoreSpec.WaitBlocks) + require.Equal(t, int32(159), os.BlockhashStoreSpec.LookbackBlocks) + require.Equal(t, ethkey.EIP55Address("0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17"), + os.BlockhashStoreSpec.BlockhashStoreAddress) + require.Equal(t, 23*time.Second, os.BlockhashStoreSpec.PollPeriod) + require.Equal(t, 7*time.Second, os.BlockhashStoreSpec.RunTimeout) + require.Equal(t, big.NewI(4), os.BlockhashStoreSpec.EVMChainID) + require.Equal(t, fromAddresses, + os.BlockhashStoreSpec.FromAddresses) + }, + }, + { + name: "defaults", + toml: ` +type = "blockhashstore" +name = "defaults-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, int32(100), os.BlockhashStoreSpec.WaitBlocks) + require.Equal(t, int32(200), os.BlockhashStoreSpec.LookbackBlocks) + require.Equal(t, time.Duration(0), os.BlockhashStoreSpec.HeartbeatPeriod) + require.Nil(t, os.BlockhashStoreSpec.FromAddresses) + require.Equal(t, 30*time.Second, os.BlockhashStoreSpec.PollPeriod) + require.Equal(t, 30*time.Second, os.BlockhashStoreSpec.RunTimeout) + }, + }, + { + name: "heartbeattimeset", + toml: ` +type = "blockhashstore" +name = "heartbeat-blocks-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +heartbeatPeriod = "650s" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, int32(100), os.BlockhashStoreSpec.WaitBlocks) + require.Equal(t, int32(200), os.BlockhashStoreSpec.LookbackBlocks) + require.Equal(t, time.Duration(650)*time.Second, os.BlockhashStoreSpec.HeartbeatPeriod) + require.Nil(t, os.BlockhashStoreSpec.FromAddresses) + require.Equal(t, 30*time.Second, os.BlockhashStoreSpec.PollPeriod) + require.Equal(t, 30*time.Second, os.BlockhashStoreSpec.RunTimeout) + }, + }, + { + name: "v1 only", + toml: ` +type = "blockhashstore" +name = "defaults-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, &v1Coordinator, + os.BlockhashStoreSpec.CoordinatorV1Address) + require.Nil(t, os.BlockhashStoreSpec.CoordinatorV2Address) + }, + }, + { + name: "v2 only", + toml: ` +type = "blockhashstore" +name = "defaults-test" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Nil(t, os.BlockhashStoreSpec.CoordinatorV1Address) + require.Equal(t, &v2Coordinator, os.BlockhashStoreSpec.CoordinatorV2Address) + }, + }, + { + name: "invalid no coordinators", + toml: ` +type = "blockhashstore" +name = "defaults-test" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `at least one of "coordinatorV1Address", "coordinatorV2Address" and "coordinatorV2PlusAddress" must be set`) + }, + }, + { + name: "invalid no blockhashstore", + toml: ` +type = "blockhashstore" +name = "defaults-test" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"blockhashStoreAddress" must be set`) + }, + }, + { + name: "invalid no chain ID", + toml: ` +type = "blockhashstore" +name = "defaults-test" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"]`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"evmChainID" must be set`) + }, + }, + { + name: "invalid waitBlocks too high", + toml: ` +type = "blockhashstore" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +waitBlocks = 257 +lookbackBlocks = 258 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"waitBlocks" must be less than 256`) + }, + }, + { + name: "invalid lookbackBlocks too high", + toml: ` +type = "blockhashstore" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +lookbackBlocks = 257 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"lookbackBlocks" must be less than 256`) + }, + }, + { + name: "invalid waitBlocks higher than lookbackBlocks", + toml: ` +type = "blockhashstore" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +waitBlocks = 200 +lookbackBlocks = 100 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"waitBlocks" must be less than "lookbackBlocks"`) + }, + }, + { + name: "invalid waitBlocks higher than lookbackBlocks", + toml: ` +type = "blockhashstore" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +waitBlocks = 10 +lookbackBlocks = 100 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +trustedBlockhashStoreAddress = "0x469aA2CD13e037DC5236320783dCfd0e641c0559" +evmChainID = "4"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, `"trustedBlockhashStoreBatchSize" must be set`) + }, + }, + { + name: "invalid toml", + toml: ` +type = invalid`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "loading toml") + }, + }, + { + name: "toml wrong type for spec", + toml: ` +type = 123`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "unmarshalling toml spec") + }, + }, + { + name: "toml wrong type for job", + toml: ` +type = "blockhashstore" +waitBlocks = "shouldBeInt"`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "unmarshalling toml job") + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s, err := ValidatedSpec(test.toml) + test.assertion(t, s, err) + }) + } +} diff --git a/core/services/blockheaderfeeder/block_header.go b/core/services/blockheaderfeeder/block_header.go new file mode 100644 index 00000000..518a4628 --- /dev/null +++ b/core/services/blockheaderfeeder/block_header.go @@ -0,0 +1,67 @@ +package blockheaderfeeder + +import ( + "bytes" + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" +) + +type Client interface { + BatchCallContext(ctx context.Context, b []rpc.BatchElem) error +} + +type GethBlockHeaderProvider struct { + client Client +} + +func NewGethBlockHeaderProvider(client Client) *GethBlockHeaderProvider { + return &GethBlockHeaderProvider{ + client: client, + } +} + +// RlpHeadersBatch retrieves RLP-encoded block headers +// this function is not supported for Avax because Avalanche +// block header format is different from go-ethereum types.Header. +// validation for invalid chain ID is done upstream in blockheaderfeeder.validate.go +func (p *GethBlockHeaderProvider) RlpHeadersBatch(ctx context.Context, blockRange []*big.Int) ([][]byte, error) { + var reqs []rpc.BatchElem + for _, num := range blockRange { + parentBlockNum := big.NewInt(num.Int64() + 1) + req := rpc.BatchElem{ + Method: "eth_getHeaderByNumber", + // Get child block since it's the one that has the parent hash in its header. + Args: []interface{}{hexutil.EncodeBig(parentBlockNum)}, + Result: &types.Header{}, + } + reqs = append(reqs, req) + } + err := p.client.BatchCallContext(ctx, reqs) + if err != nil { + return nil, err + } + + var headers [][]byte + for _, req := range reqs { + header, ok := req.Result.(*types.Header) + if !ok { + return nil, errors.Errorf("received invalid type: %T", req.Result) + } + if header == nil { + return nil, errors.New("invariant violation: got nil header") + } + headerBuffer := new(bytes.Buffer) + err := header.EncodeRLP(headerBuffer) + if err != nil { + return nil, err + } + headers = append(headers, headerBuffer.Bytes()) + } + + return headers, nil +} diff --git a/core/services/blockheaderfeeder/block_header_feeder.go b/core/services/blockheaderfeeder/block_header_feeder.go new file mode 100644 index 00000000..89ec9731 --- /dev/null +++ b/core/services/blockheaderfeeder/block_header_feeder.go @@ -0,0 +1,257 @@ +// The block header feeder package enables automated lookback and blockhash filling beyond the +// EVM 256 block lookback window to catch missed block hashes. +package blockheaderfeeder + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +var ( + zeroHash [32]byte +) + +type BlockHeaderProvider interface { + RlpHeadersBatch(ctx context.Context, blockRange []*big.Int) ([][]byte, error) +} + +// BatchBHS defines an interface for interacting with a BatchBlockhashStore contract. +type BatchBHS interface { + // GetBlockhashes returns blockhashes for given blockNumbers + GetBlockhashes(ctx context.Context, blockNumbers []*big.Int) ([][32]byte, error) + + // StoreVerifyHeader stores blockhashes on-chain by using block headers + StoreVerifyHeader(ctx context.Context, blockNumbers []*big.Int, blockHeaders [][]byte, fromAddress common.Address) error +} + +// NewBlockHeaderFeeder creates a new BlockHeaderFeeder instance. +func NewBlockHeaderFeeder( + logger logger.Logger, + coordinator blockhashstore.Coordinator, + bhs blockhashstore.BHS, + batchBHS BatchBHS, + blockHeaderProvider BlockHeaderProvider, + waitBlocks int, + lookbackBlocks int, + latestBlock func(ctx context.Context) (uint64, error), + gethks keystore.Eth, + getBlockhashesBatchSize uint16, + storeBlockhashesBatchSize uint16, + fromAddresses []ethkey.EIP55Address, + chainID *big.Int, +) *BlockHeaderFeeder { + return &BlockHeaderFeeder{ + lggr: logger, + coordinator: coordinator, + bhs: bhs, + batchBHS: batchBHS, + waitBlocks: waitBlocks, + lookbackBlocks: lookbackBlocks, + latestBlock: latestBlock, + stored: make(map[uint64]struct{}), + lastRunBlock: 0, + getBlockhashesBatchSize: getBlockhashesBatchSize, + storeBlockhashesBatchSize: storeBlockhashesBatchSize, + blockHeaderProvider: blockHeaderProvider, + gethks: gethks, + fromAddresses: fromAddresses, + chainID: chainID, + } +} + +// BlockHeaderFeeder checks recent VRF coordinator events and stores any blockhashes for blocks within +// waitBlocks and lookbackBlocks that have unfulfilled requests. +type BlockHeaderFeeder struct { + lggr logger.Logger + coordinator blockhashstore.Coordinator + bhs blockhashstore.BHS + batchBHS BatchBHS + waitBlocks int + lookbackBlocks int + latestBlock func(ctx context.Context) (uint64, error) + stored map[uint64]struct{} + blockHeaderProvider BlockHeaderProvider + lastRunBlock uint64 + getBlockhashesBatchSize uint16 + storeBlockhashesBatchSize uint16 + gethks keystore.Eth + fromAddresses []ethkey.EIP55Address + chainID *big.Int +} + +// Run the feeder. +func (f *BlockHeaderFeeder) Run(ctx context.Context) error { + latestBlockNumber, err := f.latestBlock(ctx) + if err != nil { + f.lggr.Errorw("Failed to fetch current block number", "err", err) + return errors.Wrap(err, "fetching block number") + } + + fromBlock, toBlock := blockhashstore.GetSearchWindow(int(latestBlockNumber), f.waitBlocks, f.lookbackBlocks) + if toBlock == 0 { + // Nothing to process, no blocks are in range. + return nil + } + + lggr := f.lggr.With("latestBlock", latestBlockNumber, "fromBlock", fromBlock, "toBlock", toBlock) + lggr.Debug("searching for unfulfilled blocks") + + blockToRequests, err := blockhashstore.GetUnfulfilledBlocksAndRequests(ctx, lggr, f.coordinator, fromBlock, toBlock) + if err != nil { + return err + } + + minBlockNumber := f.findLowestBlockNumberWithoutBlockhash(ctx, lggr, blockToRequests) + if minBlockNumber == nil { + lggr.Debug("no blocks to store") + return nil + } + + lggr.Debugw("found lowest block number without blockhash", "minBlockNumber", minBlockNumber) + + earliestStoredBlockNumber, err := f.findEarliestBlockNumberWithBlockhash(ctx, lggr, minBlockNumber.Uint64()+1, toBlock) + if err != nil { + return errors.Wrap(err, "finding earliest blocknumber with blockhash") + } + + lggr.Debugw("found earliest block number with blockhash", "earliestStoredBlockNumber", earliestStoredBlockNumber) + + if earliestStoredBlockNumber == nil { + // store earliest blockhash and return + // on next iteration, earliestStoredBlockNumber will be found and + // will make progress in storing blockhashes using blockheader. + // In this scenario, f.stored is not updated until the next iteration + // because we do not know which block number will be stored in the current iteration + err = f.bhs.StoreEarliest(ctx) + if err != nil { + return errors.Wrap(err, "storing earliest") + } + lggr.Info("Stored earliest block number") + return nil + } + + // get the block range from (earliestStoredBlockNumber - 1) (inclusive) to minBlockNumber (inclusive) in descending order + blocks, err := blockhashstore.DecreasingBlockRange(earliestStoredBlockNumber.Sub(earliestStoredBlockNumber, big.NewInt(1)), minBlockNumber) + if err != nil { + return err + } + + // use 1 sending key for all batches because ordering matters for StoreVerifyHeader + fromAddress, err := f.gethks.GetRoundRobinAddress(f.chainID, blockhashstore.SendingKeys(f.fromAddresses)...) + if err != nil { + return errors.Wrap(err, "getting round robin address") + } + + for i := 0; i < len(blocks); i += int(f.storeBlockhashesBatchSize) { + j := i + int(f.storeBlockhashesBatchSize) + if j > len(blocks) { + j = len(blocks) + } + blockRange := blocks[i:j] + blockHeaders, err := f.blockHeaderProvider.RlpHeadersBatch(ctx, blockRange) + if err != nil { + return errors.Wrap(err, "fetching block headers") + } + + lggr.Debugw("storing block headers", "blockRange", blockRange) + err = f.batchBHS.StoreVerifyHeader(ctx, blockRange, blockHeaders, fromAddress) + if err != nil { + return errors.Wrap(err, "store block headers") + } + for _, blockNumber := range blockRange { + f.stored[blockNumber.Uint64()] = struct{}{} + } + } + + if f.lastRunBlock != 0 { + // Prune stored, anything older than fromBlock can be discarded + for block := f.lastRunBlock - uint64(f.lookbackBlocks); block < fromBlock; block++ { + if _, ok := f.stored[block]; ok { + delete(f.stored, block) + lggr.Debugw("Pruned block from stored cache", + "block", block) + } + } + } + // lastRunBlock is only used for pruning + // only time we update lastRunBlock is when the run reaches completion, indicating + // that new block has been stored + f.lastRunBlock = latestBlockNumber + return nil +} + +func (f *BlockHeaderFeeder) findLowestBlockNumberWithoutBlockhash(ctx context.Context, lggr logger.Logger, blockToRequests map[uint64]map[string]struct{}) *big.Int { + var min *big.Int + for block, unfulfilledReqs := range blockToRequests { + if len(unfulfilledReqs) == 0 { + continue + } + if _, ok := f.stored[block]; ok { + // Already stored + continue + } + stored, err := f.bhs.IsStored(ctx, block) + if err != nil { + lggr.Warnw("Failed to check if block is already stored", + "err", err, + "block", block) + continue + } else if stored { + lggr.Infow("Blockhash already stored", + "block", block, "unfulfilledReqIDs", blockhashstore.LimitReqIDs(unfulfilledReqs, 50)) + f.stored[block] = struct{}{} + continue + } + blockNumber := big.NewInt(0).SetUint64(block) + if min == nil || min.Cmp(blockNumber) >= 0 { + min = blockNumber + } + } + return min +} + +// findEarliestBlockNumberWithBlockhash searches [startBlock, toBlock) where startBlock is inclusive and toBlock is exclusive +// and returns the first block that has blockhash already stored. Returns nil if no blockhashes are found +func (f *BlockHeaderFeeder) findEarliestBlockNumberWithBlockhash(ctx context.Context, lggr logger.Logger, startBlock, toBlock uint64) (*big.Int, error) { + for i := startBlock; i < toBlock; i += uint64(f.getBlockhashesBatchSize) { + j := i + uint64(f.getBlockhashesBatchSize) + if j > toBlock { + j = toBlock + } + + lggr.Debug(fmt.Sprintf("Looking for earliest block number with blockhash %v thru %v", i, j)) + + blockNumber := i + var blocks []*big.Int + for blockNumber < j { + blocks = append(blocks, big.NewInt(0).SetUint64(blockNumber)) + blockNumber++ + } + + blockhashes, err := f.batchBHS.GetBlockhashes(ctx, blocks) + if err != nil { + return nil, errors.Wrap(err, "fetching blockhashes") + } + + for idx, bh := range blockhashes { + if !bytes.Equal(bh[:], zeroHash[:]) { + earliestBlockNumber := i + uint64(idx) + lggr.Infow("found earliest block number with blockhash", "earliestBlockNumber", earliestBlockNumber, "blockhash", hex.EncodeToString(bh[:])) + f.stored[blockNumber] = struct{}{} + return big.NewInt(0).SetUint64(earliestBlockNumber), nil + } + } + } + return nil, nil +} diff --git a/core/services/blockheaderfeeder/block_header_feeder_test.go b/core/services/blockheaderfeeder/block_header_feeder_test.go new file mode 100644 index 00000000..52970e1a --- /dev/null +++ b/core/services/blockheaderfeeder/block_header_feeder_test.go @@ -0,0 +1,320 @@ +package blockheaderfeeder + +import ( + "context" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + keystoremocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" +) + +type testCase struct { + name string + requests []blockhashstore.Event + fulfillments []blockhashstore.Event + wait int + lookback int + latest uint64 + alreadyStored []uint64 + expectedStored []uint64 + expectedErrMsg string + getBatchSize uint16 + storeBatchSize uint16 + getBatchCallCount uint16 + storeBatchCallCount uint16 + storedEarliest bool + bhs blockhashstore.TestBHS + batchBHS blockhashstore.TestBatchBHS +} + +func TestFeeder(t *testing.T) { + tests := []testCase{ + { + name: "single missing block", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{150, 151, 152, 153, 154, 155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 5, + storeBatchCallCount: 5, + storedEarliest: false, + }, + { + name: "multiple missing blocks", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}, {Block: 149, ID: "request"}, {Block: 148, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{148, 149, 150, 151, 152, 153, 154, 155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 7, + storeBatchCallCount: 7, + storedEarliest: false, + }, + { + name: "single missing get batch size = 2", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{150, 151, 152, 153, 154, 155}, + getBatchSize: 2, + storeBatchSize: 1, + getBatchCallCount: 3, + storeBatchCallCount: 5, + storedEarliest: false, + }, + { + name: "single missing get and store batch size = 3", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{150, 151, 152, 153, 154, 155}, + getBatchSize: 3, + storeBatchSize: 3, + getBatchCallCount: 2, + storeBatchCallCount: 2, + storedEarliest: false, + }, + { + name: "single missing block store earliest", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + getBatchSize: 10, + getBatchCallCount: 5, + storedEarliest: true, + }, + { + name: "request already fulfilled", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + fulfillments: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + }, + { + name: "fulfillment no matching request no error", + requests: []blockhashstore.Event{{Block: 150, ID: "request1"}}, + fulfillments: []blockhashstore.Event{{Block: 153, ID: "request2"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{150, 151, 152, 153, 154, 155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 5, + storeBatchCallCount: 5, + }, + { + name: "error checking if stored, store subsequent blocks", + requests: []blockhashstore.Event{{Block: 150, ID: "request1"}, {Block: 151, ID: "request2"}}, + wait: 256, + lookback: 500, + latest: 450, + bhs: blockhashstore.TestBHS{ErrorsIsStored: []uint64{150}}, + alreadyStored: []uint64{155}, + expectedStored: []uint64{151, 152, 153, 154, 155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 4, + storeBatchCallCount: 4, + }, + { + name: "another error checking if stored, store subsequent blocks", + requests: []blockhashstore.Event{{Block: 150, ID: "request1"}, {Block: 151, ID: "request2"}}, + wait: 256, + lookback: 500, + latest: 450, + bhs: blockhashstore.TestBHS{ErrorsIsStored: []uint64{151}}, + alreadyStored: []uint64{155}, + expectedStored: []uint64{150, 151, 152, 153, 154, 155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 5, + storeBatchCallCount: 5, + }, + { + name: "error checking getBlockhashes, return with error", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{155}, + getBatchSize: 1, + getBatchCallCount: 1, + batchBHS: blockhashstore.TestBatchBHS{GetBlockhashesError: errors.New("internal failure")}, + expectedErrMsg: "finding earliest blocknumber with blockhash: fetching blockhashes: internal failure", + }, + { + name: "error while storing block headers, return with error", + requests: []blockhashstore.Event{{Block: 150, ID: "request"}}, + wait: 256, + lookback: 500, + latest: 450, + alreadyStored: []uint64{155}, + expectedStored: []uint64{155}, + getBatchSize: 1, + storeBatchSize: 1, + getBatchCallCount: 5, + storeBatchCallCount: 1, + batchBHS: blockhashstore.TestBatchBHS{StoreVerifyHeadersError: errors.New("invalid header")}, + expectedErrMsg: "store block headers: invalid header", + }, + } + + for _, test := range tests { + t.Run(test.name, test.testFeeder) + } +} + +func (test testCase) testFeeder(t *testing.T) { + lggr := logger.TestLogger(t) + lggr.Debugf("running test case: %s", test.name) + coordinator := &blockhashstore.TestCoordinator{ + RequestEvents: test.requests, + FulfillmentEvents: test.fulfillments, + } + + test.batchBHS.Stored = append(test.batchBHS.Stored, test.alreadyStored...) + + blockHeaderProvider := &blockhashstore.TestBlockHeaderProvider{} + fromAddress := "0x469aA2CD13e037DC5236320783dCfd0e641c0559" + fromAddresses := []ethkey.EIP55Address{ethkey.EIP55Address(fromAddress)} + ks := keystoremocks.NewEth(t) + ks.On("GetRoundRobinAddress", testutils.FixtureChainID, mock.Anything).Maybe().Return(common.HexToAddress(fromAddress), nil) + + feeder := NewBlockHeaderFeeder( + lggr, + coordinator, + &test.bhs, + &test.batchBHS, + blockHeaderProvider, + test.wait, + test.lookback, + func(ctx context.Context) (uint64, error) { + return test.latest, nil + }, + ks, + test.getBatchSize, + test.storeBatchSize, + fromAddresses, + testutils.FixtureChainID, + ) + + err := feeder.Run(testutils.Context(t)) + if test.expectedErrMsg == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, test.expectedErrMsg) + } + + require.ElementsMatch(t, test.expectedStored, test.batchBHS.Stored) + require.Equal(t, test.storedEarliest, test.bhs.StoredEarliest) + require.Equal(t, test.getBatchCallCount, test.batchBHS.GetBlockhashesCallCounter) + require.Equal(t, test.storeBatchCallCount, test.batchBHS.StoreVerifyHeaderCallCounter) +} + +func TestFeeder_CachesStoredBlocks(t *testing.T) { + coordinator := &blockhashstore.TestCoordinator{ + RequestEvents: []blockhashstore.Event{{Block: 74, ID: "request"}}, + } + + bhs := &blockhashstore.TestBHS{} + batchBHS := &blockhashstore.TestBatchBHS{Stored: []uint64{75}} + blockHeaderProvider := &blockhashstore.TestBlockHeaderProvider{} + fromAddress := "0x469aA2CD13e037DC5236320783dCfd0e641c0559" + fromAddresses := []ethkey.EIP55Address{ethkey.EIP55Address(fromAddress)} + ks := keystoremocks.NewEth(t) + ks.On("GetRoundRobinAddress", testutils.FixtureChainID, mock.Anything).Maybe().Return(common.HexToAddress(fromAddress), nil) + + feeder := NewBlockHeaderFeeder( + logger.TestLogger(t), + coordinator, + bhs, + batchBHS, + blockHeaderProvider, + 20, + 30, + func(ctx context.Context) (uint64, error) { + return 100, nil + }, + ks, + 1, + 1, + fromAddresses, + testutils.FixtureChainID, + ) + + // Should store block 74. block 75 was already stored from above + require.NoError(t, feeder.Run(testutils.Context(t))) + require.ElementsMatch(t, []uint64{74, 75}, batchBHS.Stored) + + // Run the feeder at a later block + // cache should not be pruned yet because from block is lower than the stored blocks + // latest block = 101 + // lookback block = 30 + // stored blocks = [74, 75] + // from block = 71 + feeder.latestBlock = func(ctx context.Context) (uint64, error) { + return 101, nil + } + // remove stored blocks + batchBHS.Stored = nil + require.NoError(t, feeder.Run(testutils.Context(t))) + // nothing should be stored because of the feeder cache + require.Empty(t, batchBHS.Stored) + + // Remove stored blocks from batchBHS and try again + // for blocks 74, 75, nothing should be stored + // because nothing was pruned above + feeder.coordinator = &blockhashstore.TestCoordinator{ + RequestEvents: []blockhashstore.Event{ + {Block: 74, ID: "request1"}, + {Block: 75, ID: "request2"}, + }, + } + batchBHS.Stored = nil + require.NoError(t, feeder.Run(testutils.Context(t))) + require.Empty(t, batchBHS.Stored) + + // Run the feeder at a later block. this time, the feeder cache will be pruned + feeder.latestBlock = func(ctx context.Context) (uint64, error) { + return 200, nil + } + batchBHS.Stored = []uint64{175} + feeder.coordinator = &blockhashstore.TestCoordinator{RequestEvents: []blockhashstore.Event{{Block: 174, ID: "request"}}} + require.NoError(t, feeder.Run(testutils.Context(t))) + // nothing should be stored in this run because the cache will be pruned at the end of the current iteration. + // in the next run, cache should be empty + require.ElementsMatch(t, []uint64{174, 175}, batchBHS.Stored) + + // Rewind latest block + feeder.coordinator = &blockhashstore.TestCoordinator{RequestEvents: []blockhashstore.Event{{Block: 74, ID: "request"}}} + feeder.latestBlock = func(ctx context.Context) (uint64, error) { + return 100, nil + } + batchBHS.Stored = []uint64{75} + require.NoError(t, feeder.Run(testutils.Context(t))) + require.ElementsMatch(t, []uint64{74, 75}, batchBHS.Stored) +} diff --git a/core/services/blockheaderfeeder/delegate.go b/core/services/blockheaderfeeder/delegate.go new file mode 100644 index 00000000..8221c2b7 --- /dev/null +++ b/core/services/blockheaderfeeder/delegate.go @@ -0,0 +1,278 @@ +package blockheaderfeeder + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + v1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + v2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + v2plus "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ job.ServiceCtx = &service{} + +type Delegate struct { + logger logger.Logger + legacyChains legacyevm.LegacyChainContainer + ks keystore.Eth +} + +func NewDelegate( + logger logger.Logger, + legacyChains legacyevm.LegacyChainContainer, + ks keystore.Eth, +) *Delegate { + return &Delegate{ + logger: logger, + legacyChains: legacyChains, + ks: ks, + } +} + +// JobType satisfies the job.Delegate interface. +func (d *Delegate) JobType() job.Type { + return job.BlockHeaderFeeder +} + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { + if jb.BlockHeaderFeederSpec == nil { + return nil, errors.Errorf("Delegate expects a BlockHeaderFeederSpec to be present, got %+v", jb) + } + + chain, err := d.legacyChains.Get(jb.BlockHeaderFeederSpec.EVMChainID.String()) + if err != nil { + return nil, fmt.Errorf( + "getting chain ID %d: %w", jb.BlockHeaderFeederSpec.EVMChainID.ToInt(), err) + } + + if !chain.Config().Feature().LogPoller() { + return nil, errors.New("log poller must be enabled to run blockheaderfeeder") + } + + if jb.BlockHeaderFeederSpec.LookbackBlocks < int32(chain.Config().EVM().FinalityDepth()) { + return nil, fmt.Errorf( + "lookbackBlocks must be greater than or equal to chain's finality depth (%d), currently %d", + chain.Config().EVM().FinalityDepth(), jb.BlockHeaderFeederSpec.LookbackBlocks) + } + + keys, err := d.ks.EnabledKeysForChain(chain.ID()) + if err != nil { + return nil, errors.Wrap(err, "getting sending keys") + } + if len(keys) == 0 { + return nil, fmt.Errorf("missing sending keys for chain ID: %v", chain.ID()) + } + if err = CheckFromAddressesExist(jb, d.ks); err != nil { + return nil, err + } + fromAddresses := jb.BlockHeaderFeederSpec.FromAddresses + + bhs, err := blockhash_store.NewBlockhashStore( + jb.BlockHeaderFeederSpec.BlockhashStoreAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "building BHS") + } + + batchBlockhashStore, err := batch_blockhash_store.NewBatchBlockhashStore( + jb.BlockHeaderFeederSpec.BatchBlockhashStoreAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "building batch BHS") + } + + lp := chain.LogPoller() + var coordinators []blockhashstore.Coordinator + if jb.BlockHeaderFeederSpec.CoordinatorV1Address != nil { + var c *v1.VRFCoordinator + if c, err = v1.NewVRFCoordinator( + jb.BlockHeaderFeederSpec.CoordinatorV1Address.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V1 coordinator") + } + var coord *blockhashstore.V1Coordinator + coord, err = blockhashstore.NewV1Coordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V1 coordinator") + } + coordinators = append(coordinators, coord) + } + if jb.BlockHeaderFeederSpec.CoordinatorV2Address != nil { + var c *v2.VRFCoordinatorV2 + if c, err = v2.NewVRFCoordinatorV2( + jb.BlockHeaderFeederSpec.CoordinatorV2Address.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V2 coordinator") + } + var coord *blockhashstore.V2Coordinator + coord, err = blockhashstore.NewV2Coordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V2 coordinator") + } + coordinators = append(coordinators, coord) + } + if jb.BlockHeaderFeederSpec.CoordinatorV2PlusAddress != nil { + var c v2plus.IVRFCoordinatorV2PlusInternalInterface + if c, err = v2plus.NewIVRFCoordinatorV2PlusInternal( + jb.BlockHeaderFeederSpec.CoordinatorV2PlusAddress.Address(), chain.Client()); err != nil { + + return nil, errors.Wrap(err, "building V2 plus coordinator") + } + var coord *blockhashstore.V2PlusCoordinator + coord, err = blockhashstore.NewV2PlusCoordinator(c, lp) + if err != nil { + return nil, errors.Wrap(err, "building V2 plus coordinator") + } + coordinators = append(coordinators, coord) + } + + bpBHS, err := blockhashstore.NewBulletproofBHS(chain.Config().EVM().GasEstimator(), chain.Config().Database(), fromAddresses, chain.TxManager(), bhs, nil, chain.ID(), d.ks) + if err != nil { + return nil, errors.Wrap(err, "building bulletproof bhs") + } + + batchBHS, err := blockhashstore.NewBatchBHS( + chain.Config().EVM().GasEstimator(), + fromAddresses, + chain.TxManager(), + batchBlockhashStore, + chain.ID(), + d.ks, + d.logger, + ) + if err != nil { + return nil, errors.Wrap(err, "building batchBHS") + } + + log := d.logger.Named("BlockHeaderFeeder").With( + "jobID", jb.ID, + "externalJobID", jb.ExternalJobID, + "bhsAddress", bhs.Address(), + "batchBHSAddress", batchBlockhashStore.Address(), + ) + + blockHeaderProvider := NewGethBlockHeaderProvider(chain.Client()) + + feeder := NewBlockHeaderFeeder( + log, + blockhashstore.NewMultiCoordinator(coordinators...), + bpBHS, + batchBHS, + blockHeaderProvider, + int(jb.BlockHeaderFeederSpec.WaitBlocks), + int(jb.BlockHeaderFeederSpec.LookbackBlocks), + func(ctx context.Context) (uint64, error) { + head, err := chain.Client().HeadByNumber(ctx, nil) + if err != nil { + return 0, errors.Wrap(err, "getting chain head") + } + return uint64(head.Number), nil + }, + d.ks, + jb.BlockHeaderFeederSpec.GetBlockhashesBatchSize, + jb.BlockHeaderFeederSpec.StoreBlockhashesBatchSize, + fromAddresses, + chain.ID(), + ) + + services := []job.ServiceCtx{&service{ + feeder: feeder, + pollPeriod: jb.BlockHeaderFeederSpec.PollPeriod, + runTimeout: jb.BlockHeaderFeederSpec.RunTimeout, + logger: log, + done: make(chan struct{}), + }} + + return services, nil +} + +// AfterJobCreated satisfies the job.Delegate interface. +func (d *Delegate) AfterJobCreated(spec job.Job) {} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} + +// BeforeJobDeleted satisfies the job.Delegate interface. +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} + +// OnDeleteJob satisfies the job.Delegate interface. +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// service is a job.Service that runs the BHS feeder every pollPeriod. +type service struct { + services.StateMachine + feeder *BlockHeaderFeeder + done chan struct{} + pollPeriod time.Duration + runTimeout time.Duration + logger logger.Logger + parentCtx context.Context + cancel context.CancelFunc +} + +// Start the BHS feeder service, satisfying the job.Service interface. +func (s *service) Start(context.Context) error { + return s.StartOnce("Block Header Feeder Service", func() error { + s.logger.Infow("Starting BlockHeaderFeeder") + ticker := time.NewTicker(utils.WithJitter(s.pollPeriod)) + s.parentCtx, s.cancel = context.WithCancel(context.Background()) + go func() { + defer close(s.done) + defer ticker.Stop() + for { + select { + case <-ticker.C: + s.runFeeder() + case <-s.parentCtx.Done(): + return + } + } + }() + return nil + }) +} + +// Close the BHS feeder service, satisfying the job.Service interface. +func (s *service) Close() error { + return s.StopOnce("Block Header Feeder Service", func() error { + s.logger.Infow("Stopping BlockHeaderFeeder") + s.cancel() + <-s.done + return nil + }) +} + +func (s *service) runFeeder() { + s.logger.Debugw("Running BlockHeaderFeeder") + ctx, cancel := context.WithTimeout(s.parentCtx, s.runTimeout) + defer cancel() + err := s.feeder.Run(ctx) + if err == nil { + s.logger.Debugw("BlockHeaderFeeder run completed successfully") + } else { + s.logger.Errorw("BlockHeaderFeeder run was at least partially unsuccessful", + "err", err) + } +} + +// CheckFromAddressesExist returns an error if and only if one of the addresses +// in the BlockHeaderFeeder spec's fromAddresses field does not exist in the keystore. +func CheckFromAddressesExist(jb job.Job, gethks keystore.Eth) (err error) { + for _, a := range jb.BlockHeaderFeederSpec.FromAddresses { + _, err2 := gethks.Get(a.Hex()) + err = multierr.Append(err, err2) + } + return +} diff --git a/core/services/blockheaderfeeder/validate.go b/core/services/blockheaderfeeder/validate.go new file mode 100644 index 00000000..abd5f580 --- /dev/null +++ b/core/services/blockheaderfeeder/validate.go @@ -0,0 +1,109 @@ +package blockheaderfeeder + +import ( + "time" + + "github.com/google/uuid" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// ValidatedSpec validates and converts the given toml string to a job.Job. +func ValidatedSpec(tomlString string) (job.Job, error) { + jb := job.Job{ + // Default to generating a UUID, can be overwritten by the specified one in tomlString. + ExternalJobID: uuid.New(), + } + + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "loading toml") + } + + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "unmarshalling toml spec") + } + + if jb.Type != job.BlockHeaderFeeder { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + var spec job.BlockHeaderFeederSpec + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "unmarshalling toml job") + } + + // Required fields + if spec.CoordinatorV1Address == nil && spec.CoordinatorV2Address == nil && spec.CoordinatorV2PlusAddress == nil { + return jb, errors.New( + `at least one of "coordinatorV1Address", "coordinatorV2Address" and "coordinatorV2PlusAddress" must be set`) + } + if spec.BlockhashStoreAddress == "" { + return jb, notSet("blockhashStoreAddress") + } + if spec.BatchBlockhashStoreAddress == "" { + return jb, notSet("batchBlockhashStoreAddress") + } + if spec.EVMChainID == nil { + return jb, notSet("evmChainID") + } + + err = validateChainID(spec.EVMChainID.Int64()) + if err != nil { + return jb, err + } + + // Defaults + if spec.WaitBlocks == 0 { + spec.WaitBlocks = 256 + } + if spec.LookbackBlocks == 0 { + spec.LookbackBlocks = 1000 + } + if spec.PollPeriod == 0 { + spec.PollPeriod = 15 * time.Second + } + if spec.RunTimeout == 0 { + spec.RunTimeout = 30 * time.Second + } + if spec.StoreBlockhashesBatchSize == 0 { + spec.StoreBlockhashesBatchSize = 10 + } + if spec.GetBlockhashesBatchSize == 0 { + spec.GetBlockhashesBatchSize = 100 + } + + if spec.WaitBlocks < 256 { + return jb, errors.New(`"waitBlocks" must be greater than or equal to 256`) + } + if spec.LookbackBlocks <= 256 { + return jb, errors.New(`"lookbackBlocks" must be greater than 256`) + } + if spec.WaitBlocks >= spec.LookbackBlocks { + return jb, errors.New(`"lookbackBlocks" must be greater than "waitBlocks"`) + } + + jb.BlockHeaderFeederSpec = &spec + + return jb, nil +} + +func notSet(field string) error { + return errors.Errorf("%q must be set", field) +} + +// validateChainID validates whether the given chain is supported +// Avax chain is not supported because block header format +// is different from go-ethereum types.Header. +// Special handling for Avax chains is not yet supported +func validateChainID(evmChainID int64) error { + if evmChainID == 43114 || // C-chain mainnet + evmChainID == 43113 { // Fuji testnet + return errors.Errorf("unsupported chain") + } + return nil +} diff --git a/core/services/blockheaderfeeder/validate_test.go b/core/services/blockheaderfeeder/validate_test.go new file mode 100644 index 00000000..d58e8cce --- /dev/null +++ b/core/services/blockheaderfeeder/validate_test.go @@ -0,0 +1,269 @@ +package blockheaderfeeder + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func TestValidate(t *testing.T) { + v1Coordinator := ethkey.EIP55Address("0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139") + v2Coordinator := ethkey.EIP55Address("0x2be990eE17832b59E0086534c5ea2459Aa75E38F") + v2PlusCoordinator := ethkey.EIP55Address("0x92B5e28Ac583812874e4271380c7d070C5FB6E6b") + fromAddresses := []ethkey.EIP55Address{("0x469aA2CD13e037DC5236320783dCfd0e641c0559")} + + var tests = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "valid", + toml: ` +type = "blockheaderfeeder" +name = "valid-test" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +coordinatorV2PlusAddress = "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b" +lookbackBlocks = 2000 +waitBlocks = 500 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, job.BlockHeaderFeeder, os.Type) + require.Equal(t, "valid-test", os.Name.String) + require.Equal(t, &v1Coordinator, + os.BlockHeaderFeederSpec.CoordinatorV1Address) + require.Equal(t, &v2Coordinator, + os.BlockHeaderFeederSpec.CoordinatorV2Address) + require.Equal(t, &v2PlusCoordinator, + os.BlockHeaderFeederSpec.CoordinatorV2PlusAddress) + require.Equal(t, int32(2000), os.BlockHeaderFeederSpec.LookbackBlocks) + require.Equal(t, int32(500), os.BlockHeaderFeederSpec.WaitBlocks) + require.Equal(t, ethkey.EIP55Address("0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17"), + os.BlockHeaderFeederSpec.BlockhashStoreAddress) + require.Equal(t, ethkey.EIP55Address("0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63"), + os.BlockHeaderFeederSpec.BatchBlockhashStoreAddress) + require.Equal(t, 23*time.Second, os.BlockHeaderFeederSpec.PollPeriod) + require.Equal(t, 7*time.Second, os.BlockHeaderFeederSpec.RunTimeout) + require.Equal(t, big.NewI(4), os.BlockHeaderFeederSpec.EVMChainID) + require.Equal(t, fromAddresses, + os.BlockHeaderFeederSpec.FromAddresses) + require.Equal(t, uint16(20), + os.BlockHeaderFeederSpec.GetBlockhashesBatchSize) + require.Equal(t, uint16(10), + os.BlockHeaderFeederSpec.StoreBlockhashesBatchSize) + }, + }, + { + name: "defaults-test", + toml: ` +type = "blockheaderfeeder" +name = "defaults-test" +evmChainID = "4" +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +coordinatorV2Address = "0x2be990eE17832b59E0086534c5ea2459Aa75E38F" +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, int32(1000), os.BlockHeaderFeederSpec.LookbackBlocks) + require.Equal(t, int32(256), os.BlockHeaderFeederSpec.WaitBlocks) + require.Equal(t, 15*time.Second, os.BlockHeaderFeederSpec.PollPeriod) + require.Equal(t, 30*time.Second, os.BlockHeaderFeederSpec.RunTimeout) + require.Equal(t, big.NewI(4), os.BlockHeaderFeederSpec.EVMChainID) + require.Equal(t, fromAddresses, + os.BlockHeaderFeederSpec.FromAddresses) + require.Equal(t, uint16(100), + os.BlockHeaderFeederSpec.GetBlockhashesBatchSize) + require.Equal(t, uint16(10), + os.BlockHeaderFeederSpec.StoreBlockhashesBatchSize) + }, + }, + { + name: "invalid-job-type", + toml: ` +type = "invalidjob" +name = "invalid-job-type" +lookbackBlocks = 2000 +waitBlocks = 500 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, "unsupported type invalidjob") + }, + }, + { + name: "missing-coordinators", + toml: ` +type = "blockheaderfeeder" +name = "missing-coordinators" +lookbackBlocks = 2000 +waitBlocks = 500 +blockhashStoreAddress = "0x3e20Cef636EdA7ba135bCbA4fe6177Bd3cE0aB17" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `at least one of "coordinatorV1Address", "coordinatorV2Address" and "coordinatorV2PlusAddress" must be set`) + }, + }, + { + name: "missing blockhash store address", + toml: ` +type = "blockheaderfeeder" +name = "missing blockhash store address" +lookbackBlocks = 2000 +waitBlocks = 500 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"blockhashStoreAddress" must be set`) + }, + }, + { + name: "missing batch blockhash store address", + toml: ` +type = "blockheaderfeeder" +name = "missing batch blockhash store address" +lookbackBlocks = 2000 +waitBlocks = 500 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"batchBlockhashStoreAddress" must be set`) + }, + }, + { + name: "missing evmChainID", + toml: ` +type = "blockheaderfeeder" +name = "missing evmChainID" +lookbackBlocks = 2000 +waitBlocks = 500 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"evmChainID" must be set`) + }, + }, + { + name: "wait block lower than 256 blocks", + toml: ` +type = "blockheaderfeeder" +name = "wait block lower than 256 blocks" +lookbackBlocks = 2000 +waitBlocks = 255 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"waitBlocks" must be greater than or equal to 256`) + }, + }, + { + name: "lookback block lower than 256 blocks", + toml: ` +type = "blockheaderfeeder" +name = "lookback block lower than 256 blocks" +lookbackBlocks = 255 +waitBlocks = 256 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"lookbackBlocks" must be greater than 256`) + }, + }, + { + name: "lookback blocks lower than wait blocks", + toml: ` +type = "blockheaderfeeder" +name = "lookback blocks lower than wait blocks" +lookbackBlocks = 300 +waitBlocks = 500 +coordinatorV1Address = "0x1F72B4A5DCf7CC6d2E38423bF2f4BFA7db97d139" +blockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +batchBlockhashStoreAddress = "0xD04E5b2ea4e55AEbe6f7522bc2A69Ec6639bfc63" +pollPeriod = "23s" +runTimeout = "7s" +evmChainID = "4" +fromAddresses = ["0x469aA2CD13e037DC5236320783dCfd0e641c0559"] +getBlockhashesBatchSize = 20 +storeBlockhashesBatchSize = 10 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Equal(t, err.Error(), `"lookbackBlocks" must be greater than "waitBlocks"`) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + s, err := ValidatedSpec(test.toml) + test.assertion(t, s, err) + }) + } +} diff --git a/core/services/chainlink/CONFIG.md b/core/services/chainlink/CONFIG.md new file mode 100644 index 00000000..38917fa7 --- /dev/null +++ b/core/services/chainlink/CONFIG.md @@ -0,0 +1,114 @@ +# Configuration Transition + +- subgraph names are packages +- thick lines indicate control flow +- dotted lines indicate implicit interface implementation +- regular w/ dot indicate implementation types + +```mermaid +flowchart LR + + subgraph cmd + + subgraph cmd/app + NewApp([func NewApp]) + end + + cli>$ plugin node start] + + RunNode([func Client.RunNode]) + + NewApplication([func NewApplication]) + + cli == 1. Before ==> NewApp + cli == 2. Action ==> RunNode + RunNode ==> NewApplication + + end + + toml{{TOML?}} + + subgraph services/plugin + + Config[[Config]] + + NewTOMLGeneralConfig([func NewTOMLGeneralConfig]) + + generalConfig --o Config + + NewTOMLGeneralConfig --> generalConfig + + end + + subgraph config + + BasicConfig(BasicConfig) + + NewGeneralConfig([func NewGeneralConfig]) + + generalConfig2[generalConfig] + + NewGeneralConfig --> generalConfig2 + + subgraph config/v2 + + Core[[Core]] + + end + + end + + Config --o Core + + NewApp ==> toml + toml == yes ==> NewTOMLGeneralConfig + toml == no ==> NewGeneralConfig + generalConfig -.-> BasicConfig + generalConfig2 -.-> BasicConfig + + + subgraph chains/evm + + LoadChainSet([func LoadChainSet]) + tomlChain{{TOML?}} + LoadChainSet ==> tomlChain + + subgraph chains/evm/config + + NewChainScopedConfig([func NewChainScopedConfig]) + + ChainScopedOnlyConfig(ChainScopedOnlyConfig) + + chainScopedConfig + + NewChainScopedConfig --> chainScopedConfig + + chainScopedConfig -.-> ChainScopedOnlyConfig + + subgraph chains/evm/config/v2 + + NewTOMLChainScopedConfig([func NewTOMLChainScopedConfig]) + + ChainScoped + + NewTOMLChainScopedConfig --> ChainScoped + + ChainScoped -.-> ChainScopedOnlyConfig + + EVMConfig[[EVMConfig]] + + end + + end + + tomlChain == no ==>NewChainScopedConfig + tomlChain == yes ==>NewTOMLChainScopedConfig + Config --o EVMConfig + end + + chainScopedConfig --o generalConfig + ChainScoped --o generalConfig2 + + NewApplication ==> LoadChainSet + +``` diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go new file mode 100644 index 00000000..3a6afb2f --- /dev/null +++ b/core/services/chainlink/application.go @@ -0,0 +1,849 @@ +package plugin + +import ( + "bytes" + "context" + "fmt" + "math/big" + "net/http" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/grafana/pyroscope-go" + "github.com/pkg/errors" + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/loop" + commonservices "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/pluginv3.0/v2/core/capabilities" + "github.com/goplugin/pluginv3.0/v2/core/static" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/blockheaderfeeder" + "github.com/goplugin/pluginv3.0/v2/core/services/cron" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/periodicbackup" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/promreporter" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/core/services/streams" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/services/workflows" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/ldapauth" + "github.com/goplugin/pluginv3.0/v2/core/sessions/localauth" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +// Application implements the common functions used in the core node. +// +//go:generate mockery --quiet --name Application --output ../../internal/mocks/ --case=underscore +type Application interface { + Start(ctx context.Context) error + Stop() error + GetLogger() logger.SugaredLogger + GetAuditLogger() audit.AuditLogger + GetHealthChecker() services.Checker + GetSqlxDB() *sqlx.DB + GetConfig() GeneralConfig + SetLogLevel(lvl zapcore.Level) error + GetKeyStore() keystore.Master + WakeSessionReaper() + GetWebAuthnConfiguration() sessions.WebAuthnConfiguration + + GetExternalInitiatorManager() webhook.ExternalInitiatorManager + GetRelayers() RelayerChainInteroperators + GetLoopRegistry() *plugins.LoopRegistry + + // V2 Jobs (TOML specified) + JobSpawner() job.Spawner + JobORM() job.ORM + EVMORM() evmtypes.Configs + PipelineORM() pipeline.ORM + BridgeORM() bridges.ORM + BasicAdminUsersORM() sessions.BasicAdminUsersORM + AuthenticationProvider() sessions.AuthenticationProvider + TxmStorageService() txmgr.EvmTxStore + AddJobV2(ctx context.Context, job *job.Job) error + DeleteJob(ctx context.Context, jobID int32) error + RunWebhookJobV2(ctx context.Context, jobUUID uuid.UUID, requestBody string, meta pipeline.JSONSerializable) (int64, error) + ResumeJobV2(ctx context.Context, taskID uuid.UUID, result pipeline.Result) error + // Testing only + RunJobV2(ctx context.Context, jobID int32, meta map[string]interface{}) (int64, error) + + // Feeds + GetFeedsService() feeds.Service + + // ReplayFromBlock replays logs from on or after the given block number. If forceBroadcast is + // set to true, consumers will reprocess data even if it has already been processed. + ReplayFromBlock(chainID *big.Int, number uint64, forceBroadcast bool) error + + // ID is unique to this particular application instance + ID() uuid.UUID + + SecretGenerator() SecretGenerator +} + +// PluginApplication contains fields for the JobSubscriber, Scheduler, +// and Store. The JobSubscriber and Scheduler are also available +// in the services package, but the Store has its own package. +type PluginApplication struct { + relayers *CoreRelayerChainInteroperators + jobORM job.ORM + jobSpawner job.Spawner + pipelineORM pipeline.ORM + pipelineRunner pipeline.Runner + bridgeORM bridges.ORM + localAdminUsersORM sessions.BasicAdminUsersORM + authenticationProvider sessions.AuthenticationProvider + txmStorageService txmgr.EvmTxStore + FeedsService feeds.Service + webhookJobRunner webhook.JobRunner + Config GeneralConfig + KeyStore keystore.Master + ExternalInitiatorManager webhook.ExternalInitiatorManager + SessionReaper *utils.SleeperTask + shutdownOnce sync.Once + srvcs []services.ServiceCtx + HealthChecker services.Checker + Nurse *services.Nurse + logger logger.SugaredLogger + AuditLogger audit.AuditLogger + closeLogger func() error + sqlxDB *sqlx.DB + secretGenerator SecretGenerator + profiler *pyroscope.Profiler + loopRegistry *plugins.LoopRegistry + + started bool + startStopMu sync.Mutex +} + +type ApplicationOpts struct { + Config GeneralConfig + Logger logger.Logger + MailMon *mailbox.Monitor + SqlxDB *sqlx.DB + KeyStore keystore.Master + RelayerChainInteroperators *CoreRelayerChainInteroperators + AuditLogger audit.AuditLogger + CloseLogger func() error + ExternalInitiatorManager webhook.ExternalInitiatorManager + Version string + RestrictedHTTPClient *http.Client + UnrestrictedHTTPClient *http.Client + SecretGenerator SecretGenerator + LoopRegistry *plugins.LoopRegistry + GRPCOpts loop.GRPCOpts + MercuryPool wsrpc.Pool +} + +// NewApplication initializes a new store if one is not already +// present at the configured root directory (default: ~/.plugin), +// the logger at the same directory and returns the Application to +// be used by the node. +// TODO: Inject more dependencies here to save booting up useless stuff in tests +func NewApplication(opts ApplicationOpts) (Application, error) { + var srvcs []services.ServiceCtx + auditLogger := opts.AuditLogger + db := opts.SqlxDB + cfg := opts.Config + relayerChainInterops := opts.RelayerChainInteroperators + mailMon := opts.MailMon + externalInitiatorManager := opts.ExternalInitiatorManager + globalLogger := logger.Sugared(opts.Logger) + keyStore := opts.KeyStore + restrictedHTTPClient := opts.RestrictedHTTPClient + unrestrictedHTTPClient := opts.UnrestrictedHTTPClient + registry := capabilities.NewRegistry() + + // LOOPs can be created as options, in the case of LOOP relayers, or + // as OCR2 job implementations, in the case of Median today. + // We will have a non-nil registry here in LOOP relayers are being used, otherwise + // we need to initialize in case we serve OCR2 LOOPs + loopRegistry := opts.LoopRegistry + if loopRegistry == nil { + loopRegistry = plugins.NewLoopRegistry(globalLogger, opts.Config.Tracing()) + } + + // If the audit logger is enabled + if auditLogger.Ready() == nil { + srvcs = append(srvcs, auditLogger) + } + + var profiler *pyroscope.Profiler + if cfg.Pyroscope().ServerAddress() != "" { + globalLogger.Debug("Pyroscope (automatic pprof profiling) is enabled") + var err error + profiler, err = logger.StartPyroscope(cfg.Pyroscope(), cfg.AutoPprof()) + if err != nil { + return nil, errors.Wrap(err, "starting pyroscope (automatic pprof profiling) failed") + } + } else { + globalLogger.Debug("Pyroscope (automatic pprof profiling) is disabled") + } + + ap := cfg.AutoPprof() + var nurse *services.Nurse + if ap.Enabled() { + globalLogger.Info("Nurse service (automatic pprof profiling) is enabled") + nurse = services.NewNurse(ap, globalLogger) + err := nurse.Start() + if err != nil { + return nil, err + } + } else { + globalLogger.Info("Nurse service (automatic pprof profiling) is disabled") + } + + telemetryManager := telemetry.NewManager(cfg.TelemetryIngress(), keyStore.CSA(), globalLogger) + srvcs = append(srvcs, telemetryManager) + + backupCfg := cfg.Database().Backup() + if backupCfg.Mode() != config.DatabaseBackupModeNone && backupCfg.Frequency() > 0 { + globalLogger.Infow("DatabaseBackup: periodic database backups are enabled", "frequency", backupCfg.Frequency()) + + databaseBackup, err := periodicbackup.NewDatabaseBackup(cfg.Database().URL(), cfg.RootDir(), backupCfg, globalLogger) + if err != nil { + return nil, errors.Wrap(err, "NewApplication: failed to initialize database backup") + } + srvcs = append(srvcs, databaseBackup) + } else { + globalLogger.Info("DatabaseBackup: periodic database backups are disabled. To enable automatic backups, set Database.Backup.Mode=lite or Database.Backup.Mode=full") + } + + // pool must be started before all relayers and stopped after them + if opts.MercuryPool != nil { + srvcs = append(srvcs, opts.MercuryPool) + } + + // EVM chains are used all over the place. This will need to change for fully EVM extraction + // TODO: BCF-2510, BCF-2511 + + legacyEVMChains := relayerChainInterops.LegacyEVMChains() + if legacyEVMChains == nil { + return nil, fmt.Errorf("no evm chains found") + } + + srvcs = append(srvcs, mailMon) + srvcs = append(srvcs, relayerChainInterops.Services()...) + promReporter := promreporter.NewPromReporter(db.DB, legacyEVMChains, globalLogger) + srvcs = append(srvcs, promReporter) + + // Initialize Local Users ORM and Authentication Provider specified in config + // BasicAdminUsersORM is initialized and required regardless of separate Authentication Provider + localAdminUsersORM := localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) + + // Initialize Sessions ORM based on environment configured authenticator + // localDB auth or remote LDAP auth + authMethod := cfg.WebServer().AuthenticationMethod() + var authenticationProvider sessions.AuthenticationProvider + var sessionReaper *utils.SleeperTask + + switch sessions.AuthenticationProviderName(authMethod) { + case sessions.LDAPAuth: + var err error + authenticationProvider, err = ldapauth.NewLDAPAuthenticator( + db, cfg.Database(), cfg.WebServer().LDAP(), cfg.Insecure().DevWebServer(), globalLogger, auditLogger, + ) + if err != nil { + return nil, errors.Wrap(err, "NewApplication: failed to initialize LDAP Authentication module") + } + sessionReaper = ldapauth.NewLDAPServerStateSync(db, cfg.Database(), cfg.WebServer().LDAP(), globalLogger) + case sessions.LocalAuth: + authenticationProvider = localauth.NewORM(db, cfg.WebServer().SessionTimeout().Duration(), globalLogger, cfg.Database(), auditLogger) + sessionReaper = localauth.NewSessionReaper(db.DB, cfg.WebServer(), globalLogger) + default: + return nil, errors.Errorf("NewApplication: Unexpected 'AuthenticationMethod': %s supported values: %s, %s", authMethod, sessions.LocalAuth, sessions.LDAPAuth) + } + + var ( + pipelineORM = pipeline.NewORM(db, globalLogger, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgeORM = bridges.NewORM(db, globalLogger, cfg.Database()) + mercuryORM = mercury.NewORM(db, globalLogger, cfg.Database()) + pipelineRunner = pipeline.NewRunner(pipelineORM, bridgeORM, cfg.JobPipeline(), cfg.WebServer(), legacyEVMChains, keyStore.Eth(), keyStore.VRF(), globalLogger, restrictedHTTPClient, unrestrictedHTTPClient) + jobORM = job.NewORM(db, pipelineORM, bridgeORM, keyStore, globalLogger, cfg.Database()) + txmORM = txmgr.NewTxStore(db, globalLogger, cfg.Database()) + streamRegistry = streams.NewRegistry(globalLogger, pipelineRunner) + ) + + for _, chain := range legacyEVMChains.Slice() { + chain.HeadBroadcaster().Subscribe(promReporter) + chain.TxManager().RegisterResumeCallback(pipelineRunner.ResumeRun) + } + + srvcs = append(srvcs, pipelineORM) + + var ( + delegates = map[job.Type]job.Delegate{ + job.DirectRequest: directrequest.NewDelegate( + globalLogger, + pipelineRunner, + pipelineORM, + legacyEVMChains, + mailMon), + job.Keeper: keeper.NewDelegate( + db, + jobORM, + pipelineRunner, + globalLogger, + legacyEVMChains, + mailMon), + job.VRF: vrf.NewDelegate( + db, + keyStore, + pipelineRunner, + pipelineORM, + legacyEVMChains, + globalLogger, + cfg.Database(), + mailMon), + job.Webhook: webhook.NewDelegate( + pipelineRunner, + externalInitiatorManager, + globalLogger), + job.Cron: cron.NewDelegate( + pipelineRunner, + globalLogger), + job.BlockhashStore: blockhashstore.NewDelegate( + globalLogger, + legacyEVMChains, + keyStore.Eth()), + job.BlockHeaderFeeder: blockheaderfeeder.NewDelegate( + globalLogger, + legacyEVMChains, + keyStore.Eth()), + job.Gateway: gateway.NewDelegate( + legacyEVMChains, + keyStore.Eth(), + db, + cfg.Database(), + globalLogger), + job.Stream: streams.NewDelegate( + globalLogger, + streamRegistry, + pipelineRunner, + cfg.JobPipeline()), + job.Workflow: workflows.NewDelegate( + globalLogger, + registry, + legacyEVMChains, + ), + } + webhookJobRunner = delegates[job.Webhook].(*webhook.Delegate).WebhookJobRunner() + ) + + // Flux monitor requires ethereum just to boot, silence errors with a null delegate + if !cfg.EVMRPCEnabled() { + delegates[job.FluxMonitor] = &job.NullDelegate{Type: job.FluxMonitor} + } else { + delegates[job.FluxMonitor] = fluxmonitorv2.NewDelegate( + keyStore.Eth(), + jobORM, + pipelineORM, + pipelineRunner, + db, + legacyEVMChains, + globalLogger, + ) + } + + var peerWrapper *ocrcommon.SingletonPeerWrapper + if !cfg.OCR().Enabled() && !cfg.OCR2().Enabled() { + globalLogger.Debug("P2P stack not needed") + } else if cfg.P2P().Enabled() { + if err := ocrcommon.ValidatePeerWrapperConfig(cfg.P2P()); err != nil { + return nil, err + } + peerWrapper = ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, globalLogger) + srvcs = append(srvcs, peerWrapper) + } else { + globalLogger.Debug("P2P stack disabled") + } + + if cfg.OCR().Enabled() { + delegates[job.OffchainReporting] = ocr.NewDelegate( + db, + jobORM, + keyStore, + pipelineRunner, + peerWrapper, + telemetryManager, + legacyEVMChains, + globalLogger, + cfg.Database(), + mailMon, + ) + } else { + globalLogger.Debug("Off-chain reporting disabled") + } + if cfg.OCR2().Enabled() { + globalLogger.Debug("Off-chain reporting v2 enabled") + registrarConfig := plugins.NewRegistrarConfig(opts.GRPCOpts, opts.LoopRegistry.Register) + ocr2DelegateConfig := ocr2.NewDelegateConfig(cfg.OCR2(), cfg.Mercury(), cfg.Threshold(), cfg.Insecure(), cfg.JobPipeline(), cfg.Database(), registrarConfig) + delegates[job.OffchainReporting2] = ocr2.NewDelegate( + db, + jobORM, + bridgeORM, + mercuryORM, + pipelineRunner, + peerWrapper, + telemetryManager, + legacyEVMChains, + globalLogger, + ocr2DelegateConfig, + keyStore.OCR2(), + keyStore.DKGSign(), + keyStore.DKGEncrypt(), + keyStore.Eth(), + opts.RelayerChainInteroperators, + mailMon, + registry, + ) + delegates[job.Bootstrap] = ocrbootstrap.NewDelegateBootstrap( + db, + jobORM, + peerWrapper, + globalLogger, + cfg.OCR2(), + cfg.Insecure(), + opts.RelayerChainInteroperators, + ) + } else { + globalLogger.Debug("Off-chain reporting v2 disabled") + } + + healthChecker := commonservices.NewChecker(static.Version, static.Sha) + + var lbs []utils.DependentAwaiter + for _, c := range legacyEVMChains.Slice() { + lbs = append(lbs, c.LogBroadcaster()) + } + jobSpawner := job.NewSpawner(jobORM, cfg.Database(), healthChecker, delegates, db, globalLogger, lbs) + srvcs = append(srvcs, jobSpawner, pipelineRunner) + + // We start the log poller after the job spawner + // so jobs have a chance to apply their initial log filters. + if cfg.Feature().LogPoller() { + for _, c := range legacyEVMChains.Slice() { + srvcs = append(srvcs, c.LogPoller()) + } + } + + var feedsService feeds.Service + if cfg.Feature().FeedsManager() { + feedsORM := feeds.NewORM(db, opts.Logger, cfg.Database()) + feedsService = feeds.NewService( + feedsORM, + jobORM, + db, + jobSpawner, + keyStore, + cfg.Insecure(), + cfg.JobPipeline(), + cfg.OCR(), + cfg.OCR2(), + cfg.Database(), + legacyEVMChains, + globalLogger, + opts.Version, + ) + } else { + feedsService = &feeds.NullService{} + } + + for _, s := range srvcs { + if s == nil { + panic("service unexpectedly nil") + } + if err := healthChecker.Register(s); err != nil { + return nil, err + } + } + + return &PluginApplication{ + relayers: opts.RelayerChainInteroperators, + jobORM: jobORM, + jobSpawner: jobSpawner, + pipelineRunner: pipelineRunner, + pipelineORM: pipelineORM, + bridgeORM: bridgeORM, + localAdminUsersORM: localAdminUsersORM, + authenticationProvider: authenticationProvider, + txmStorageService: txmORM, + FeedsService: feedsService, + Config: cfg, + webhookJobRunner: webhookJobRunner, + KeyStore: keyStore, + SessionReaper: sessionReaper, + ExternalInitiatorManager: externalInitiatorManager, + HealthChecker: healthChecker, + Nurse: nurse, + logger: globalLogger, + AuditLogger: auditLogger, + closeLogger: opts.CloseLogger, + secretGenerator: opts.SecretGenerator, + profiler: profiler, + loopRegistry: loopRegistry, + + sqlxDB: opts.SqlxDB, + + // NOTE: Can keep things clean by putting more things in srvcs instead of manually start/closing + srvcs: srvcs, + }, nil +} + +func (app *PluginApplication) SetLogLevel(lvl zapcore.Level) error { + if err := app.Config.SetLogLevel(lvl); err != nil { + return err + } + app.logger.SetLogLevel(lvl) + return nil +} + +// Start all necessary services. If successful, nil will be returned. +// Start sequence is aborted if the context gets cancelled. +func (app *PluginApplication) Start(ctx context.Context) error { + app.startStopMu.Lock() + defer app.startStopMu.Unlock() + if app.started { + panic("application is already started") + } + + if app.FeedsService != nil { + if err := app.FeedsService.Start(ctx); err != nil { + app.logger.Errorf("[Feeds Service] Failed to start %v", err) + app.FeedsService = &feeds.NullService{} // so we don't try to Close() later + } + } + + var ms services.MultiStart + for _, service := range app.srvcs { + if ctx.Err() != nil { + err := errors.Wrap(ctx.Err(), "aborting start") + return multierr.Combine(err, ms.Close()) + } + + app.logger.Debugw("Starting service...", "name", service.Name()) + + if err := ms.Start(ctx, service); err != nil { + return err + } + } + + // Start HealthChecker last, so that the other services had the chance to + // start enough to immediately pass the readiness check. + if err := app.HealthChecker.Start(); err != nil { + return err + } + + app.started = true + + return nil +} + +func (app *PluginApplication) StopIfStarted() error { + app.startStopMu.Lock() + defer app.startStopMu.Unlock() + if app.started { + return app.stop() + } + return nil +} + +func (app *PluginApplication) GetLoopRegistry() *plugins.LoopRegistry { + return app.loopRegistry +} + +// Stop allows the application to exit by halting schedules, closing +// logs, and closing the DB connection. +func (app *PluginApplication) Stop() error { + app.startStopMu.Lock() + defer app.startStopMu.Unlock() + return app.stop() +} + +func (app *PluginApplication) stop() (err error) { + if !app.started { + panic("application is already stopped") + } + app.shutdownOnce.Do(func() { + defer func() { + if app.closeLogger == nil { + return + } + if lerr := app.closeLogger(); lerr != nil { + err = multierr.Append(err, lerr) + } + }() + app.logger.Info("Gracefully exiting...") + + // Stop services in the reverse order from which they were started + for i := len(app.srvcs) - 1; i >= 0; i-- { + service := app.srvcs[i] + app.logger.Debugw("Closing service...", "name", service.Name()) + err = multierr.Append(err, service.Close()) + } + + app.logger.Debug("Stopping SessionReaper...") + err = multierr.Append(err, app.SessionReaper.Stop()) + app.logger.Debug("Closing HealthChecker...") + err = multierr.Append(err, app.HealthChecker.Close()) + if app.FeedsService != nil { + app.logger.Debug("Closing Feeds Service...") + err = multierr.Append(err, app.FeedsService.Close()) + } + + if app.Nurse != nil { + err = multierr.Append(err, app.Nurse.Close()) + } + + if app.profiler != nil { + err = multierr.Append(err, app.profiler.Stop()) + } + + app.logger.Info("Exited all services") + + app.started = false + }) + return err +} + +func (app *PluginApplication) GetConfig() GeneralConfig { + return app.Config +} + +func (app *PluginApplication) GetKeyStore() keystore.Master { + return app.KeyStore +} + +func (app *PluginApplication) GetLogger() logger.SugaredLogger { + return app.logger +} + +func (app *PluginApplication) GetAuditLogger() audit.AuditLogger { + return app.AuditLogger +} + +func (app *PluginApplication) GetHealthChecker() services.Checker { + return app.HealthChecker +} + +func (app *PluginApplication) JobSpawner() job.Spawner { + return app.jobSpawner +} + +func (app *PluginApplication) JobORM() job.ORM { + return app.jobORM +} + +func (app *PluginApplication) BridgeORM() bridges.ORM { + return app.bridgeORM +} + +func (app *PluginApplication) BasicAdminUsersORM() sessions.BasicAdminUsersORM { + return app.localAdminUsersORM +} + +func (app *PluginApplication) AuthenticationProvider() sessions.AuthenticationProvider { + return app.authenticationProvider +} + +// TODO BCF-2516 remove this all together remove EVM specifics +func (app *PluginApplication) EVMORM() evmtypes.Configs { + return app.GetRelayers().LegacyEVMChains().ChainNodeConfigs() +} + +func (app *PluginApplication) PipelineORM() pipeline.ORM { + return app.pipelineORM +} + +func (app *PluginApplication) TxmStorageService() txmgr.EvmTxStore { + return app.txmStorageService +} + +func (app *PluginApplication) GetExternalInitiatorManager() webhook.ExternalInitiatorManager { + return app.ExternalInitiatorManager +} + +func (app *PluginApplication) SecretGenerator() SecretGenerator { + return app.secretGenerator +} + +// WakeSessionReaper wakes up the reaper to do its reaping. +func (app *PluginApplication) WakeSessionReaper() { + app.SessionReaper.WakeUp() +} + +func (app *PluginApplication) AddJobV2(ctx context.Context, j *job.Job) error { + return app.jobSpawner.CreateJob(j, pg.WithParentCtx(ctx)) +} + +func (app *PluginApplication) DeleteJob(ctx context.Context, jobID int32) error { + // Do not allow the job to be deleted if it is managed by the Feeds Manager + isManaged, err := app.FeedsService.IsJobManaged(ctx, int64(jobID)) + if err != nil { + return err + } + + if isManaged { + return errors.New("job must be deleted in the feeds manager") + } + + return app.jobSpawner.DeleteJob(jobID, pg.WithParentCtx(ctx)) +} + +func (app *PluginApplication) RunWebhookJobV2(ctx context.Context, jobUUID uuid.UUID, requestBody string, meta pipeline.JSONSerializable) (int64, error) { + return app.webhookJobRunner.RunJob(ctx, jobUUID, requestBody, meta) +} + +// Only used for local testing, not supported by the UI. +func (app *PluginApplication) RunJobV2( + ctx context.Context, + jobID int32, + meta map[string]interface{}, +) (int64, error) { + if build.IsProd() { + return 0, errors.New("manual job runs not supported on secure builds") + } + jb, err := app.jobORM.FindJob(ctx, jobID) + if err != nil { + return 0, errors.Wrapf(err, "job ID %v", jobID) + } + var runID int64 + + // Some jobs are special in that they do not have a task graph. + isBootstrap := jb.Type == job.OffchainReporting && jb.OCROracleSpec != nil && jb.OCROracleSpec.IsBootstrapPeer + if jb.Type.RequiresPipelineSpec() || !isBootstrap { + var vars map[string]interface{} + var saveTasks bool + if jb.Type == job.VRF { + saveTasks = true + // Create a dummy log to trigger a run + testLog := types.Log{ + Data: bytes.Join([][]byte{ + jb.VRFSpec.PublicKey.MustHash().Bytes(), // key hash + common.BigToHash(big.NewInt(42)).Bytes(), // seed + evmutils.NewHash().Bytes(), // sender + evmutils.NewHash().Bytes(), // fee + evmutils.NewHash().Bytes()}, // requestID + []byte{}), + Topics: []common.Hash{{}, jb.ExternalIDEncodeBytesToTopic()}, // jobID BYTES + TxHash: evmutils.NewHash(), + BlockNumber: 10, + BlockHash: evmutils.NewHash(), + } + vars = map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": jb.ID, + "externalJobID": jb.ExternalJobID, + "name": jb.Name.ValueOrZero(), + "publicKey": jb.VRFSpec.PublicKey[:], + "evmChainID": jb.VRFSpec.EVMChainID.String(), + }, + "jobRun": map[string]interface{}{ + "meta": meta, + "logBlockHash": testLog.BlockHash[:], + "logBlockNumber": testLog.BlockNumber, + "logTxHash": testLog.TxHash, + "logTopics": testLog.Topics, + "logData": testLog.Data, + }, + } + } else { + vars = map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": meta, + }, + } + } + runID, _, err = app.pipelineRunner.ExecuteAndInsertFinishedRun(ctx, *jb.PipelineSpec, pipeline.NewVarsFrom(vars), app.logger, saveTasks) + } + return runID, err +} + +func (app *PluginApplication) ResumeJobV2( + ctx context.Context, + taskID uuid.UUID, + result pipeline.Result, +) error { + return app.pipelineRunner.ResumeRun(taskID, result.Value, result.Error) +} + +func (app *PluginApplication) GetFeedsService() feeds.Service { + return app.FeedsService +} + +// ReplayFromBlock implements the Application interface. +func (app *PluginApplication) ReplayFromBlock(chainID *big.Int, number uint64, forceBroadcast bool) error { + chain, err := app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + return err + } + chain.LogBroadcaster().ReplayFromBlock(int64(number), forceBroadcast) + if app.Config.Feature().LogPoller() { + chain.LogPoller().ReplayAsync(int64(number)) + } + return nil +} + +func (app *PluginApplication) GetRelayers() RelayerChainInteroperators { + return app.relayers +} + +func (app *PluginApplication) GetSqlxDB() *sqlx.DB { + return app.sqlxDB +} + +// Returns the configuration to use for creating and authenticating +// new WebAuthn credentials +func (app *PluginApplication) GetWebAuthnConfiguration() sessions.WebAuthnConfiguration { + rpid := app.Config.WebServer().MFA().RPID() + rporigin := app.Config.WebServer().MFA().RPOrigin() + if rpid == "" { + app.GetLogger().Errorf("RPID is not set, WebAuthn will likely not work as intended") + } + + if rporigin == "" { + app.GetLogger().Errorf("RPOrigin is not set, WebAuthn will likely not work as intended") + } + + return sessions.WebAuthnConfiguration{ + RPID: rpid, + RPOrigin: rporigin, + } +} + +func (app *PluginApplication) ID() uuid.UUID { + return app.Config.AppID() +} diff --git a/core/services/chainlink/cfgtest/cfgtest.go b/core/services/chainlink/cfgtest/cfgtest.go new file mode 100644 index 00000000..106fa929 --- /dev/null +++ b/core/services/chainlink/cfgtest/cfgtest.go @@ -0,0 +1,106 @@ +package cfgtest + +import ( + "encoding" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func AssertFieldsNotNil(t *testing.T, s interface{}) { + err := assertValNotNil(t, "", reflect.ValueOf(s)) + _, err = utils.MultiErrorList(err) + assert.NoError(t, err) +} + +// assertFieldsNotNil recursively checks the struct s for nil fields. +func assertFieldsNotNil(t *testing.T, prefix string, s reflect.Value) (err error) { + t.Helper() + require.Equal(t, reflect.Struct, s.Kind()) + + typ := s.Type() + for i := 0; i < s.NumField(); i++ { + f := s.Field(i) + key := prefix + if tf := typ.Field(i); !tf.Anonymous { + if key != "" { + key += "." + } + key += tf.Name + } + err = multierr.Combine(err, assertValNotNil(t, key, f)) + } + return +} + +// assertValuesNotNil recursively checks the map m for nil values. +func assertValuesNotNil(t *testing.T, prefix string, m reflect.Value) (err error) { + t.Helper() + require.Equal(t, reflect.Map, m.Kind()) + if prefix != "" { + prefix += "." + } + + mi := m.MapRange() + for mi.Next() { + key := prefix + mi.Key().String() + err = multierr.Combine(err, assertValNotNil(t, key, mi.Value())) + } + return +} + +// assertElementsNotNil recursively checks the slice s for nil values. +func assertElementsNotNil(t *testing.T, prefix string, s reflect.Value) (err error) { + t.Helper() + require.Equal(t, reflect.Slice, s.Kind()) + + for i := 0; i < s.Len(); i++ { + err = multierr.Combine(err, assertValNotNil(t, prefix, s.Index(i))) + } + return +} + +var ( + textUnmarshaler encoding.TextUnmarshaler + textUnmarshalerType = reflect.TypeOf(&textUnmarshaler).Elem() +) + +// assertValNotNil recursively checks that val is not nil. val must be a struct, map, slice, or point to one. +func assertValNotNil(t *testing.T, key string, val reflect.Value) error { + t.Helper() + k := val.Kind() + switch k { //nolint:exhaustive + case reflect.Ptr, reflect.Map: + if val.IsNil() { + return fmt.Errorf("%s: nil", key) + } + } + if k == reflect.Ptr { + if val.Type().Implements(textUnmarshalerType) { + return nil // skip values unmarshaled from strings + } + val = val.Elem() + } + switch val.Kind() { + case reflect.Struct: + if val.Type().Implements(textUnmarshalerType) { + return nil // skip values unmarshaled from strings + } + return assertFieldsNotNil(t, key, val) + case reflect.Map: + return assertValuesNotNil(t, key, val) + case reflect.Slice: + if val.IsNil() { + return nil // not actually a problem + } + return assertElementsNotNil(t, key, val) + default: + return nil + } +} diff --git a/core/services/chainlink/cfgtest/defaults.go b/core/services/chainlink/cfgtest/defaults.go new file mode 100644 index 00000000..01573f9b --- /dev/null +++ b/core/services/chainlink/cfgtest/defaults.go @@ -0,0 +1,87 @@ +package cfgtest + +import ( + "bufio" + "fmt" + "io" + "reflect" + "strings" + + "github.com/pkg/errors" +) + +// DocDefaultsOnly reads only the default values from a docs TOML file and decodes in to cfg. +// Fields without defaults will set to zero values. +func DocDefaultsOnly(r io.Reader, cfg any, decode func(io.Reader, any) error) error { + pr, pw := io.Pipe() + defer pr.Close() + go writeDefaults(r, pw) + if err := decode(pr, cfg); err != nil { + return errors.Wrapf(err, "failed to decode default core configuration") + } + // replace niled examples with zero values. + nilToZero(reflect.ValueOf(cfg)) + return nil +} + +// writeDefaults writes default lines from defaultsTOML to w. +func writeDefaults(r io.Reader, w *io.PipeWriter) { + defer w.Close() + s := bufio.NewScanner(r) + for s.Scan() { + t := s.Text() + // Skip comments and examples (which become zero values) + if strings.HasPrefix(t, "#") || strings.HasSuffix(t, "# Example") { + continue + } + if _, err := io.WriteString(w, t); err != nil { + w.CloseWithError(err) + } + if _, err := w.Write([]byte{'\n'}); err != nil { + w.CloseWithError(err) + } + } + if err := s.Err(); err != nil { + w.CloseWithError(fmt.Errorf("failed to scan core defaults: %v", err)) + } +} + +func nilToZero(val reflect.Value) { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + t := val.Type().Elem() + val.Set(reflect.New(t)) + } + if val.Type().Implements(textUnmarshalerType) { + return // don't descend inside - leave whole zero value + } + val = val.Elem() + } + switch val.Kind() { + case reflect.Struct: + if val.Type().Implements(textUnmarshalerType) { + return // skip values unmarshaled from strings + } + for i := 0; i < val.NumField(); i++ { + f := val.Field(i) + nilToZero(f) + } + return + case reflect.Map: + if !val.IsNil() { + for _, k := range val.MapKeys() { + nilToZero(val.MapIndex(k)) + } + } + return + case reflect.Slice: + if !val.IsNil() { + for i := 0; i < val.Len(); i++ { + nilToZero(val.Index(i)) + } + } + return + default: + return + } +} diff --git a/core/services/chainlink/config.go b/core/services/chainlink/config.go new file mode 100644 index 00000000..618e5d60 --- /dev/null +++ b/core/services/chainlink/config.go @@ -0,0 +1,271 @@ +package plugin + +import ( + "errors" + "fmt" + + "go.uber.org/multierr" + + gotoml "github.com/pelletier/go-toml/v2" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/config/docs" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils/config" +) + +// Config is the root type used for TOML configuration. +// +// See docs at /docs/CONFIG.md generated via config.GenerateDocs from /internal/config/docs.toml +// +// When adding a new field: +// - consider including a unit suffix with the field name +// - TOML is limited to int64/float64, so fields requiring greater range/precision must use non-standard types +// implementing encoding.TextMarshaler/TextUnmarshaler, like big.Big and decimal.Decimal +// - std lib types that don't implement encoding.TextMarshaler/TextUnmarshaler (time.Duration, url.URL, big.Int) won't +// work as expected, and require wrapper types. See commonconfig.Duration, commonconfig.URL, big.Big. +type Config struct { + toml.Core + + EVM evmcfg.EVMConfigs `toml:",omitempty"` + + Cosmos coscfg.TOMLConfigs `toml:",omitempty"` + + Solana solana.TOMLConfigs `toml:",omitempty"` + + Starknet stkcfg.TOMLConfigs `toml:",omitempty"` +} + +// TOMLString returns a TOML encoded string. +func (c *Config) TOMLString() (string, error) { + b, err := gotoml.Marshal(c) + if err != nil { + return "", err + } + return string(b), nil +} + +// warnings aggregates warnings from valueWarnings and deprecationWarnings +func (c *Config) warnings() (err error) { + deprecationErr := c.deprecationWarnings() + warningErr := c.valueWarnings() + err = multierr.Append(deprecationErr, warningErr) + _, list := utils.MultiErrorList(err) + return list +} + +// valueWarnings returns an error if the Config contains values that hint at misconfiguration before defaults are applied. +func (c *Config) valueWarnings() (err error) { + if c.Tracing.Enabled != nil && *c.Tracing.Enabled { + if c.Tracing.Mode != nil && *c.Tracing.Mode == "unencrypted" { + if c.Tracing.TLSCertPath != nil { + err = multierr.Append(err, config.ErrInvalid{Name: "Tracing.TLSCertPath", Value: *c.Tracing.TLSCertPath, Msg: "must be empty when Tracing.Mode is 'unencrypted'"}) + } + } + } + return +} + +// deprecationWarnings returns an error if the Config contains deprecated fields. +// This is typically used before defaults have been applied, with input from the user. +func (c *Config) deprecationWarnings() (err error) { + // none + return +} + +// Validate returns an error if the Config is not valid for use, as-is. +// This is typically used after defaults have been applied. +func (c *Config) Validate() error { + if err := config.Validate(c); err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + return nil +} + +// setDefaults initializes unset fields with default values. +func (c *Config) setDefaults() { + core := docs.CoreDefaults() + core.SetFrom(&c.Core) + c.Core = core + + for i := range c.EVM { + if input := c.EVM[i]; input == nil { + c.EVM[i] = &evmcfg.EVMConfig{Chain: evmcfg.Defaults(nil)} + } else { + input.Chain = evmcfg.Defaults(input.ChainID, &input.Chain) + } + } + + for i := range c.Cosmos { + if c.Cosmos[i] == nil { + c.Cosmos[i] = new(coscfg.TOMLConfig) + } + c.Cosmos[i].Chain.SetDefaults() + } + + for i := range c.Solana { + if c.Solana[i] == nil { + c.Solana[i] = new(solana.TOMLConfig) + } + c.Solana[i].Chain.SetDefaults() + } + + for i := range c.Starknet { + if c.Starknet[i] == nil { + c.Starknet[i] = new(stkcfg.TOMLConfig) + } + c.Starknet[i].Chain.SetDefaults() + } +} + +func (c *Config) SetFrom(f *Config) (err error) { + c.Core.SetFrom(&f.Core) + + if err1 := c.EVM.SetFrom(&f.EVM); err1 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err1, "EVM")) + } + + if err2 := c.Cosmos.SetFrom(&f.Cosmos); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Cosmos")) + } + + if err3 := c.Solana.SetFrom(&f.Solana); err3 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err3, "Solana")) + } + + if err4 := c.Starknet.SetFrom(&f.Starknet); err4 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err4, "Starknet")) + } + + _, err = utils.MultiErrorList(err) + + return err +} + +type Secrets struct { + toml.Secrets +} + +func (s *Secrets) SetFrom(f *Secrets) (err error) { + if err2 := s.Database.SetFrom(&f.Database); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Database")) + } + + if err2 := s.Password.SetFrom(&f.Password); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Password")) + } + + if err2 := s.WebServer.SetFrom(&f.WebServer); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "WebServer")) + } + + if err2 := s.Pyroscope.SetFrom(&f.Pyroscope); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Pyroscope")) + } + + if err2 := s.Prometheus.SetFrom(&f.Prometheus); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Prometheus")) + } + + if err2 := s.Mercury.SetFrom(&f.Mercury); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Mercury")) + } + + if err2 := s.Threshold.SetFrom(&f.Threshold); err2 != nil { + err = multierr.Append(err, config.NamedMultiErrorList(err2, "Threshold")) + } + + _, err = utils.MultiErrorList(err) + + return err +} + +func (s *Secrets) setDefaults() { + if nil == s.Database.AllowSimplePasswords { + s.Database.AllowSimplePasswords = new(bool) + } +} + +// TOMLString returns a TOML encoded string with secret values redacted. +func (s *Secrets) TOMLString() (string, error) { + b, err := gotoml.Marshal(s) + if err != nil { + return "", err + } + return string(b), nil +} + +var ErrInvalidSecrets = errors.New("invalid secrets") + +// Validate validates every consitutent secret and return an accumulated error +func (s *Secrets) Validate() error { + if err := config.Validate(s); err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSecrets, err) + } + return nil +} + +// ValidateDB only validates the encompassed DatabaseSecret +func (s *Secrets) ValidateDB() error { + // This implementation was chosen so that error reporting is uniform + // when validating all the secret or only the db secrets, + // and so we could reuse config.Validate, which contains fearsome reflection logic. + // This meets the current needs, but if we ever wanted to compose secret + // validation we may need to rethink this approach and instead find a way to + // toggle on/off the validation of the embedded secrets. + + type dbValidationType struct { + // choose field name to match that of Secrets.Database so we have + // consistent error messages. + Database toml.DatabaseSecrets + } + s.setDefaults() + v := &dbValidationType{s.Database} + if err := config.Validate(v); err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSecrets, err) + } + return nil +} + +// setEnv overrides fields from ENV vars, if present. +func (s *Secrets) setEnv() error { + if dbURL := env.DatabaseURL.Get(); dbURL != "" { + s.Database.URL = new(models.SecretURL) + if err := s.Database.URL.UnmarshalText([]byte(dbURL)); err != nil { + return err + } + } + if dbBackupUrl := env.DatabaseBackupURL.Get(); dbBackupUrl != "" { + s.Database.BackupURL = new(models.SecretURL) + if err := s.Database.BackupURL.UnmarshalText([]byte(dbBackupUrl)); err != nil { + return err + } + } + if env.DatabaseAllowSimplePasswords.IsTrue() { + s.Database.AllowSimplePasswords = new(bool) + *s.Database.AllowSimplePasswords = true + } + if keystorePassword := env.PasswordKeystore.Get(); keystorePassword != "" { + s.Password.Keystore = &keystorePassword + } + if vrfPassword := env.PasswordVRF.Get(); vrfPassword != "" { + s.Password.VRF = &vrfPassword + } + if pyroscopeAuthToken := env.PyroscopeAuthToken.Get(); pyroscopeAuthToken != "" { + s.Pyroscope.AuthToken = &pyroscopeAuthToken + } + if prometheusAuthToken := env.PrometheusAuthToken.Get(); prometheusAuthToken != "" { + s.Prometheus.AuthToken = &prometheusAuthToken + } + if thresholdKeyShare := env.ThresholdKeyShare.Get(); thresholdKeyShare != "" { + s.Threshold.ThresholdKeyShare = &thresholdKeyShare + } + return nil +} diff --git a/core/services/chainlink/config_audit_logger.go b/core/services/chainlink/config_audit_logger.go new file mode 100644 index 00000000..40e303c2 --- /dev/null +++ b/core/services/chainlink/config_audit_logger.go @@ -0,0 +1,35 @@ +package plugin + +import ( + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type auditLoggerConfig struct { + c toml.AuditLogger +} + +func (a auditLoggerConfig) Enabled() bool { + return *a.c.Enabled +} + +func (a auditLoggerConfig) ForwardToUrl() (commonconfig.URL, error) { + return *a.c.ForwardToUrl, nil +} + +func (a auditLoggerConfig) Environment() string { + if !build.IsProd() { + return "develop" + } + return "production" +} + +func (a auditLoggerConfig) JsonWrapperKey() string { + return *a.c.JsonWrapperKey +} + +func (a auditLoggerConfig) Headers() (models.ServiceHeaders, error) { + return *a.c.Headers, nil +} diff --git a/core/services/chainlink/config_audit_logger_test.go b/core/services/chainlink/config_audit_logger_test.go new file mode 100644 index 00000000..19609e64 --- /dev/null +++ b/core/services/chainlink/config_audit_logger_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAuditLoggerConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + auditConfig := cfg.AuditLogger() + + require.Equal(t, true, auditConfig.Enabled()) + require.Equal(t, "event", auditConfig.JsonWrapperKey()) + + fUrl, err := auditConfig.ForwardToUrl() + require.NoError(t, err) + require.Equal(t, "http", fUrl.Scheme) + require.Equal(t, "localhost:9898", fUrl.Host) + + headers, err := auditConfig.Headers() + require.NoError(t, err) + require.Len(t, headers, 2) + require.Equal(t, "Authorization", headers[0].Header) + require.Equal(t, "token", headers[0].Value) + require.Equal(t, "X-SomeOther-Header", headers[1].Header) + require.Equal(t, "value with spaces | and a bar+*", headers[1].Value) +} diff --git a/core/services/chainlink/config_auto_pprof.go b/core/services/chainlink/config_auto_pprof.go new file mode 100644 index 00000000..53495fb6 --- /dev/null +++ b/core/services/chainlink/config_auto_pprof.go @@ -0,0 +1,69 @@ +package plugin + +import ( + "path/filepath" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ config.AutoPprof = (*autoPprofConfig)(nil) + +type autoPprofConfig struct { + c toml.AutoPprof + rootDir func() string +} + +func (a *autoPprofConfig) Enabled() bool { + return *a.c.Enabled +} + +func (a *autoPprofConfig) BlockProfileRate() int { + return int(*a.c.BlockProfileRate) +} + +func (a *autoPprofConfig) CPUProfileRate() int { + return int(*a.c.CPUProfileRate) +} + +func (a *autoPprofConfig) GatherDuration() commonconfig.Duration { + return *commonconfig.MustNewDuration(a.c.GatherDuration.Duration()) +} + +func (a *autoPprofConfig) GatherTraceDuration() commonconfig.Duration { + return *commonconfig.MustNewDuration(a.c.GatherTraceDuration.Duration()) +} + +func (a *autoPprofConfig) GoroutineThreshold() int { + return int(*a.c.GoroutineThreshold) +} + +func (a *autoPprofConfig) MaxProfileSize() utils.FileSize { + return *a.c.MaxProfileSize +} + +func (a *autoPprofConfig) MemProfileRate() int { + return int(*a.c.MemProfileRate) +} + +func (a *autoPprofConfig) MemThreshold() utils.FileSize { + return *a.c.MemThreshold +} + +func (a *autoPprofConfig) MutexProfileFraction() int { + return int(*a.c.MutexProfileFraction) +} + +func (a *autoPprofConfig) PollInterval() commonconfig.Duration { + return *a.c.PollInterval +} + +func (a *autoPprofConfig) ProfileRoot() string { + s := *a.c.ProfileRoot + if s == "" { + s = filepath.Join(a.rootDir(), "pprof") + } + return s +} diff --git a/core/services/chainlink/config_auto_pprof_test.go b/core/services/chainlink/config_auto_pprof_test.go new file mode 100644 index 00000000..1ad57b71 --- /dev/null +++ b/core/services/chainlink/config_auto_pprof_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestAutoPprofTest(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ap := cfg.AutoPprof() + assert.True(t, ap.Enabled()) + assert.Equal(t, "prof/root", ap.ProfileRoot()) + assert.Equal(t, 1*time.Minute, ap.PollInterval().Duration()) + assert.Equal(t, 12*time.Second, ap.GatherDuration().Duration()) + assert.Equal(t, 13*time.Second, ap.GatherTraceDuration().Duration()) + assert.Equal(t, utils.FileSize(1*utils.GB), ap.MaxProfileSize()) + assert.Equal(t, 7, ap.CPUProfileRate()) + assert.Equal(t, 9, ap.MemProfileRate()) + assert.Equal(t, 5, ap.BlockProfileRate()) + assert.Equal(t, 2, ap.MutexProfileFraction()) + assert.Equal(t, utils.FileSize(1*utils.GB), ap.MemThreshold()) + assert.Equal(t, 999, ap.GoroutineThreshold()) +} diff --git a/core/services/chainlink/config_database.go b/core/services/chainlink/config_database.go new file mode 100644 index 00000000..b830a511 --- /dev/null +++ b/core/services/chainlink/config_database.go @@ -0,0 +1,130 @@ +package plugin + +import ( + "net/url" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +type backupConfig struct { + c toml.DatabaseBackup + s toml.DatabaseSecrets +} + +func (b *backupConfig) Dir() string { + return *b.c.Dir +} + +func (b *backupConfig) Frequency() time.Duration { + return b.c.Frequency.Duration() +} + +func (b *backupConfig) Mode() config.DatabaseBackupMode { + return *b.c.Mode +} + +func (b *backupConfig) OnVersionUpgrade() bool { + return *b.c.OnVersionUpgrade +} + +func (b *backupConfig) URL() *url.URL { + return b.s.BackupURL.URL() +} + +type lockConfig struct { + c toml.DatabaseLock +} + +func (l *lockConfig) LockingMode() string { + return l.c.Mode() +} + +func (l *lockConfig) LeaseDuration() time.Duration { + return l.c.LeaseDuration.Duration() +} + +func (l *lockConfig) LeaseRefreshInterval() time.Duration { + return l.c.LeaseRefreshInterval.Duration() +} + +type listenerConfig struct { + c toml.DatabaseListener +} + +func (l *listenerConfig) MaxReconnectDuration() time.Duration { + return l.c.MaxReconnectDuration.Duration() +} + +func (l *listenerConfig) MinReconnectInterval() time.Duration { + return l.c.MinReconnectInterval.Duration() +} + +func (l *listenerConfig) FallbackPollInterval() time.Duration { + return l.c.FallbackPollInterval.Duration() +} + +var _ config.Database = (*databaseConfig)(nil) + +type databaseConfig struct { + c toml.Database + s toml.DatabaseSecrets + logSQL func() bool +} + +func (d *databaseConfig) Backup() config.Backup { + return &backupConfig{ + c: d.c.Backup, + s: d.s, + } +} + +func (d *databaseConfig) Lock() config.Lock { + return &lockConfig{ + d.c.Lock, + } +} + +func (d *databaseConfig) Listener() config.Listener { + return &listenerConfig{ + c: d.c.Listener, + } +} + +func (d *databaseConfig) DefaultIdleInTxSessionTimeout() time.Duration { + return d.c.DefaultIdleInTxSessionTimeout.Duration() +} + +func (d *databaseConfig) DefaultLockTimeout() time.Duration { + return d.c.DefaultLockTimeout.Duration() +} + +func (d *databaseConfig) DefaultQueryTimeout() time.Duration { + return d.c.DefaultQueryTimeout.Duration() +} + +func (d *databaseConfig) URL() url.URL { + return *d.s.URL.URL() +} + +func (d *databaseConfig) Dialect() dialects.DialectName { + return d.c.Dialect +} + +func (d *databaseConfig) MigrateDatabase() bool { + return *d.c.MigrateOnStartup +} + +func (d *databaseConfig) MaxIdleConns() int { + return int(*d.c.MaxIdleConns) +} + +func (d *databaseConfig) MaxOpenConns() int { + return int(*d.c.MaxOpenConns) +} + +func (d *databaseConfig) LogSQL() (sql bool) { + return d.logSQL() +} diff --git a/core/services/chainlink/config_database_test.go b/core/services/chainlink/config_database_test.go new file mode 100644 index 00000000..ec90284e --- /dev/null +++ b/core/services/chainlink/config_database_test.go @@ -0,0 +1,49 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +func TestDatabaseConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + backup := cfg.Database().Backup() + assert.Equal(t, backup.Dir(), "test/backup/dir") + assert.Equal(t, backup.Frequency(), 1*time.Hour) + assert.Equal(t, backup.Mode(), config.DatabaseBackupModeFull) + assert.Equal(t, backup.OnVersionUpgrade(), true) + assert.Nil(t, backup.URL()) + + db := cfg.Database() + assert.Equal(t, db.DefaultIdleInTxSessionTimeout(), 1*time.Minute) + assert.Equal(t, db.DefaultLockTimeout(), 1*time.Hour) + assert.Equal(t, db.DefaultQueryTimeout(), 1*time.Second) + assert.Equal(t, db.LogSQL(), true) + assert.Equal(t, db.MaxIdleConns(), 7) + assert.Equal(t, db.MaxOpenConns(), 13) + assert.Equal(t, db.MigrateDatabase(), true) + assert.Equal(t, db.Dialect(), dialects.Postgres) + url := db.URL() + assert.NotEqual(t, url.String(), "") + + lock := db.Lock() + assert.Equal(t, lock.LockingMode(), "none") + assert.Equal(t, lock.LeaseDuration(), 1*time.Minute) + assert.Equal(t, lock.LeaseRefreshInterval(), 1*time.Second) + + l := db.Listener() + assert.Equal(t, l.MaxReconnectDuration(), 1*time.Minute) + assert.Equal(t, l.MinReconnectInterval(), 5*time.Minute) + assert.Equal(t, l.FallbackPollInterval(), 2*time.Minute) +} diff --git a/core/services/chainlink/config_feature.go b/core/services/chainlink/config_feature.go new file mode 100644 index 00000000..6f2c9388 --- /dev/null +++ b/core/services/chainlink/config_feature.go @@ -0,0 +1,19 @@ +package plugin + +import "github.com/goplugin/pluginv3.0/v2/core/config/toml" + +type featureConfig struct { + c toml.Feature +} + +func (f *featureConfig) FeedsManager() bool { + return *f.c.FeedsManager +} + +func (f *featureConfig) LogPoller() bool { + return *f.c.LogPoller +} + +func (f *featureConfig) UICSAKeys() bool { + return *f.c.UICSAKeys +} diff --git a/core/services/chainlink/config_feature_test.go b/core/services/chainlink/config_feature_test.go new file mode 100644 index 00000000..4cd61adf --- /dev/null +++ b/core/services/chainlink/config_feature_test.go @@ -0,0 +1,21 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFeatureConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + f := cfg.Feature() + assert.True(t, f.LogPoller()) + assert.True(t, f.FeedsManager()) + assert.True(t, f.UICSAKeys()) +} diff --git a/core/services/chainlink/config_flux_monitor.go b/core/services/chainlink/config_flux_monitor.go new file mode 100644 index 00000000..8311c7d8 --- /dev/null +++ b/core/services/chainlink/config_flux_monitor.go @@ -0,0 +1,15 @@ +package plugin + +import "github.com/goplugin/pluginv3.0/v2/core/config/toml" + +type fluxMonitorConfig struct { + c toml.FluxMonitor +} + +func (f *fluxMonitorConfig) DefaultTransactionQueueDepth() uint32 { + return *f.c.DefaultTransactionQueueDepth +} + +func (f *fluxMonitorConfig) SimulateTransactions() bool { + return *f.c.SimulateTransactions +} diff --git a/core/services/chainlink/config_flux_monitor_test.go b/core/services/chainlink/config_flux_monitor_test.go new file mode 100644 index 00000000..7ca5f8f2 --- /dev/null +++ b/core/services/chainlink/config_flux_monitor_test.go @@ -0,0 +1,21 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFluxMonitorConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + fm := cfg.FluxMonitor() + + assert.Equal(t, uint32(100), fm.DefaultTransactionQueueDepth()) + assert.Equal(t, true, fm.SimulateTransactions()) +} diff --git a/core/services/chainlink/config_general.go b/core/services/chainlink/config_general.go new file mode 100644 index 00000000..e00bd666 --- /dev/null +++ b/core/services/chainlink/config_general.go @@ -0,0 +1,511 @@ +package plugin + +import ( + _ "embed" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + starknet "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/config" + coreconfig "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/config/parse" + v2 "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// generalConfig is a wrapper to adapt Config to the config.GeneralConfig interface. +type generalConfig struct { + inputTOML string // user input, normalized via de/re-serialization + effectiveTOML string // with default values included + secretsTOML string // with env overrides includes, redacted + + c *Config // all fields non-nil (unless the legacy method signature return a pointer) + secrets *Secrets + + warning error // warnings about inputTOML, e.g. deprecated fields + + logLevelDefault zapcore.Level + + appIDOnce sync.Once + + logMu sync.RWMutex // for the mutable fields Log.Level & Log.SQL + + passwordMu sync.RWMutex // passwords are set after initialization +} + +// GeneralConfigOpts holds configuration options for creating a coreconfig.GeneralConfig via New(). +// +// See ParseTOML to initilialize Config and Secrets from TOML. +type GeneralConfigOpts struct { + ConfigStrings []string + SecretsStrings []string + + Config + Secrets + + // OverrideFn is a *test-only* hook to override effective values. + OverrideFn func(*Config, *Secrets) + + SkipEnv bool +} + +func (o *GeneralConfigOpts) Setup(configFiles []string, secretsFiles []string) error { + configs := []string{} + for _, fileName := range configFiles { + b, err := os.ReadFile(fileName) + if err != nil { + return errors.Wrapf(err, "failed to read config file: %s", fileName) + } + configs = append(configs, string(b)) + } + + if configTOML := env.Config.Get(); configTOML != "" { + configs = append(configs, configTOML) + } + + o.ConfigStrings = configs + + secrets := []string{} + for _, fileName := range secretsFiles { + b, err := os.ReadFile(fileName) + if err != nil { + return errors.Wrapf(err, "failed to read secrets file: %s", fileName) + } + secrets = append(secrets, string(b)) + } + + o.SecretsStrings = secrets + return nil +} + +// parseConfig sets Config from the given TOML string, overriding any existing duplicate Config fields. +func (o *GeneralConfigOpts) parseConfig(config string) error { + var c Config + if err2 := commonconfig.DecodeTOML(strings.NewReader(config), &c); err2 != nil { + return fmt.Errorf("failed to decode config TOML: %w", err2) + } + + // Overrides duplicate fields + if err4 := o.Config.SetFrom(&c); err4 != nil { + return fmt.Errorf("invalid configuration: %w", err4) + } + return nil +} + +// parseSecrets sets Secrets from the given TOML string. Errors on overrides +func (o *GeneralConfigOpts) parseSecrets(secrets string) error { + var s Secrets + if err2 := commonconfig.DecodeTOML(strings.NewReader(secrets), &s); err2 != nil { + return fmt.Errorf("failed to decode secrets TOML: %w", err2) + } + + // merge fields and err on overrides + if err4 := o.Secrets.SetFrom(&s); err4 != nil { + return fmt.Errorf("invalid secrets: %w", err4) + } + + return nil +} + +// New returns a GeneralConfig for the given options. +func (o GeneralConfigOpts) New() (GeneralConfig, error) { + err := o.parse() + if err != nil { + return nil, err + } + + input, err := o.Config.TOMLString() + if err != nil { + return nil, err + } + + _, warning := utils.MultiErrorList(o.Config.warnings()) + + o.Config.setDefaults() + if !o.SkipEnv { + err = o.Secrets.setEnv() + if err != nil { + return nil, err + } + } + + if fn := o.OverrideFn; fn != nil { + fn(&o.Config, &o.Secrets) + } + + effective, err := o.Config.TOMLString() + if err != nil { + return nil, err + } + + secrets, err := o.Secrets.TOMLString() + if err != nil { + return nil, err + } + + cfg := &generalConfig{ + inputTOML: input, + effectiveTOML: effective, + secretsTOML: secrets, + c: &o.Config, + secrets: &o.Secrets, + warning: warning, + } + if lvl := o.Config.Log.Level; lvl != nil { + cfg.logLevelDefault = zapcore.Level(*lvl) + } + + return cfg, nil +} + +func (o *GeneralConfigOpts) parse() (err error) { + for _, c := range o.ConfigStrings { + err := o.parseConfig(c) + if err != nil { + return err + } + } + + for _, s := range o.SecretsStrings { + err := o.parseSecrets(s) + if err != nil { + return err + } + } + + o.Secrets.setDefaults() + return +} + +func (g *generalConfig) EVMConfigs() evmcfg.EVMConfigs { + return g.c.EVM +} + +func (g *generalConfig) CosmosConfigs() coscfg.TOMLConfigs { + return g.c.Cosmos +} + +func (g *generalConfig) SolanaConfigs() solana.TOMLConfigs { + return g.c.Solana +} + +func (g *generalConfig) StarknetConfigs() starknet.TOMLConfigs { + return g.c.Starknet +} + +func (g *generalConfig) Validate() error { + return g.validate(g.secrets.Validate) +} + +func (g *generalConfig) validate(secretsValidationFn func() error) error { + err := multierr.Combine( + validateEnv(), + g.c.Validate(), + secretsValidationFn(), + ) + + _, errList := utils.MultiErrorList(err) + return errList +} + +func (g *generalConfig) ValidateDB() error { + return g.validate(g.secrets.ValidateDB) +} + +//go:embed legacy.env +var emptyStringsEnv string + +// validateEnv returns an error if any legacy environment variables are set, unless a v2 equivalent exists with the same value. +func validateEnv() (err error) { + defer func() { + if err != nil { + _, err = utils.MultiErrorList(err) + err = fmt.Errorf("invalid environment: %w", err) + } + }() + for _, kv := range strings.Split(emptyStringsEnv, "\n") { + if strings.TrimSpace(kv) == "" { + continue + } + i := strings.Index(kv, "=") + if i == -1 { + return errors.Errorf("malformed .env file line: %s", kv) + } + k := kv[:i] + _, ok := os.LookupEnv(k) + if ok { + err = multierr.Append(err, fmt.Errorf("environment variable %s must not be set: %v", k, v2.ErrUnsupported)) + } + } + return +} + +func (g *generalConfig) LogConfiguration(log, warn coreconfig.LogfFn) { + log("# Secrets:\n%s\n", g.secretsTOML) + log("# Input Configuration:\n%s\n", g.inputTOML) + log("# Effective Configuration, with defaults applied:\n%s\n", g.effectiveTOML) + if g.warning != nil { + warn("# Configuration warning:\n%s\n", g.warning) + } +} + +// ConfigTOML implements plugin.ConfigV2 +func (g *generalConfig) ConfigTOML() (user, effective string) { + return g.inputTOML, g.effectiveTOML +} + +func (g *generalConfig) Feature() coreconfig.Feature { + return &featureConfig{c: g.c.Feature} +} + +func (g *generalConfig) FeatureFeedsManager() bool { + return *g.c.Feature.FeedsManager +} + +func (g *generalConfig) OCR() config.OCR { + return &ocrConfig{c: g.c.OCR} +} + +func (g *generalConfig) OCR2Enabled() bool { + return *g.c.OCR2.Enabled +} + +func (g *generalConfig) FeatureLogPoller() bool { + return *g.c.Feature.LogPoller +} + +func (g *generalConfig) FeatureUICSAKeys() bool { + return *g.c.Feature.UICSAKeys +} + +func (g *generalConfig) AutoPprof() config.AutoPprof { + return &autoPprofConfig{c: g.c.AutoPprof, rootDir: g.RootDir} +} + +func (g *generalConfig) EVMEnabled() bool { + for _, c := range g.c.EVM { + if c.IsEnabled() { + return true + } + } + return false +} + +func (g *generalConfig) EVMRPCEnabled() bool { + for _, c := range g.c.EVM { + if c.IsEnabled() { + if len(c.Nodes) > 0 { + return true + } + } + } + return false +} + +func (g *generalConfig) SolanaEnabled() bool { + for _, c := range g.c.Solana { + if c.IsEnabled() { + return true + } + } + return false +} + +func (g *generalConfig) CosmosEnabled() bool { + for _, c := range g.c.Cosmos { + if c.IsEnabled() { + return true + } + } + return false +} + +func (g *generalConfig) StarkNetEnabled() bool { + for _, c := range g.c.Starknet { + if c.IsEnabled() { + return true + } + } + return false +} + +func (g *generalConfig) WebServer() config.WebServer { + return &webServerConfig{c: g.c.WebServer, s: g.secrets.WebServer, rootDir: g.RootDir} +} + +func (g *generalConfig) AutoPprofBlockProfileRate() int { + return int(*g.c.AutoPprof.BlockProfileRate) +} + +func (g *generalConfig) AutoPprofCPUProfileRate() int { + return int(*g.c.AutoPprof.CPUProfileRate) +} + +func (g *generalConfig) AutoPprofGatherDuration() commonconfig.Duration { + return *commonconfig.MustNewDuration(g.c.AutoPprof.GatherDuration.Duration()) +} + +func (g *generalConfig) AutoPprofGatherTraceDuration() commonconfig.Duration { + return *commonconfig.MustNewDuration(g.c.AutoPprof.GatherTraceDuration.Duration()) +} + +func (g *generalConfig) AutoPprofGoroutineThreshold() int { + return int(*g.c.AutoPprof.GoroutineThreshold) +} + +func (g *generalConfig) AutoPprofMaxProfileSize() utils.FileSize { + return *g.c.AutoPprof.MaxProfileSize +} + +func (g *generalConfig) AutoPprofMemProfileRate() int { + return int(*g.c.AutoPprof.MemProfileRate) +} + +func (g *generalConfig) AutoPprofMemThreshold() utils.FileSize { + return *g.c.AutoPprof.MemThreshold +} + +func (g *generalConfig) AutoPprofMutexProfileFraction() int { + return int(*g.c.AutoPprof.MutexProfileFraction) +} + +func (g *generalConfig) AutoPprofPollInterval() commonconfig.Duration { + return *g.c.AutoPprof.PollInterval +} + +func (g *generalConfig) AutoPprofProfileRoot() string { + s := *g.c.AutoPprof.ProfileRoot + if s == "" { + s = filepath.Join(g.RootDir(), "pprof") + } + return s +} + +func (g *generalConfig) Database() coreconfig.Database { + return &databaseConfig{c: g.c.Database, s: g.secrets.Secrets.Database, logSQL: g.logSQL} +} + +func (g *generalConfig) ShutdownGracePeriod() time.Duration { + return g.c.ShutdownGracePeriod.Duration() +} + +func (g *generalConfig) FluxMonitor() config.FluxMonitor { + return &fluxMonitorConfig{c: g.c.FluxMonitor} +} + +func (g *generalConfig) InsecureFastScrypt() bool { + return *g.c.InsecureFastScrypt +} + +func (g *generalConfig) JobPipelineReaperInterval() time.Duration { + return g.c.JobPipeline.ReaperInterval.Duration() +} + +func (g *generalConfig) JobPipelineResultWriteQueueDepth() uint64 { + return uint64(*g.c.JobPipeline.ResultWriteQueueDepth) +} + +func (g *generalConfig) JobPipeline() coreconfig.JobPipeline { + return &jobPipelineConfig{c: g.c.JobPipeline} +} + +func (g *generalConfig) Keeper() config.Keeper { + return &keeperConfig{c: g.c.Keeper} +} + +func (g *generalConfig) Log() config.Log { + return &logConfig{c: g.c.Log, rootDir: g.RootDir, level: g.logLevel, defaultLevel: g.logLevelDefault} +} + +func (g *generalConfig) OCR2() config.OCR2 { + return &ocr2Config{c: g.c.OCR2} +} + +func (g *generalConfig) P2P() config.P2P { + return &p2p{c: g.c.P2P} +} + +func (g *generalConfig) P2PPeerID() p2pkey.PeerID { + return *g.c.P2P.PeerID +} + +func (g *generalConfig) P2PPeerIDRaw() string { + return g.c.P2P.PeerID.String() +} + +func (g *generalConfig) P2PIncomingMessageBufferSize() int { + return int(*g.c.P2P.IncomingMessageBufferSize) +} + +func (g *generalConfig) P2POutgoingMessageBufferSize() int { + return int(*g.c.P2P.OutgoingMessageBufferSize) +} + +func (g *generalConfig) Pyroscope() config.Pyroscope { + return &pyroscopeConfig{c: g.c.Pyroscope, s: g.secrets.Pyroscope} +} + +func (g *generalConfig) RootDir() string { + d := *g.c.RootDir + h, err := parse.HomeDir(d) + if err != nil { + panic(err) // never happens since we validate that the RootDir is expandable in config.Core.ValidateConfig(). + } + return h +} + +func (g *generalConfig) TelemetryIngress() coreconfig.TelemetryIngress { + return &telemetryIngressConfig{ + c: g.c.TelemetryIngress, + } +} + +func (g *generalConfig) AuditLogger() coreconfig.AuditLogger { + return auditLoggerConfig{c: g.c.AuditLogger} +} + +func (g *generalConfig) Insecure() config.Insecure { + return &insecureConfig{c: g.c.Insecure} +} + +func (g *generalConfig) Sentry() coreconfig.Sentry { + return sentryConfig{g.c.Sentry} +} + +func (g *generalConfig) Password() coreconfig.Password { + return &passwordConfig{keystore: g.keystorePassword, vrf: g.vrfPassword} +} + +func (g *generalConfig) Prometheus() coreconfig.Prometheus { + return &prometheusConfig{s: g.secrets.Prometheus} +} + +func (g *generalConfig) Mercury() coreconfig.Mercury { + return &mercuryConfig{c: g.c.Mercury, s: g.secrets.Mercury} +} + +func (g *generalConfig) Threshold() coreconfig.Threshold { + return &thresholdConfig{s: g.secrets.Threshold} +} + +func (g *generalConfig) Tracing() coreconfig.Tracing { + return &tracingConfig{s: g.c.Tracing} +} + +var zeroSha256Hash = models.Sha256Hash{} diff --git a/core/services/chainlink/config_general_dev_test.go b/core/services/chainlink/config_general_dev_test.go new file mode 100644 index 00000000..0bcfcfbf --- /dev/null +++ b/core/services/chainlink/config_general_dev_test.go @@ -0,0 +1,57 @@ +//go:build dev + +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Tests in this file only run in dev mode +// /usr/bin/go test --tags=dev -timeout 360s -run ^TestTOMLGeneralConfig_DevModeInsecureConfig github.com/goplugin/pluginv3.0/v2/core/services/plugin + +func TestTOMLGeneralConfig_DevModeInsecureConfig(t *testing.T) { + t.Parallel() + + t.Run("all insecure configs are false by default", func(t *testing.T) { + config, err := GeneralConfigOpts{}.New(logger.TestLogger(t)) + require.NoError(t, err) + + assert.False(t, config.Insecure().DevWebServer()) + assert.False(t, config.Insecure().DisableRateLimiting()) + assert.False(t, config.Insecure().InfiniteDepthQueries()) + assert.False(t, config.Insecure().OCRDevelopmentMode()) + }) + + t.Run("insecure config ignore override on non-dev builds", func(t *testing.T) { + config, err := GeneralConfigOpts{ + OverrideFn: func(c *Config, s *Secrets) { + *c.Insecure.DevWebServer = true + *c.Insecure.DisableRateLimiting = true + *c.Insecure.InfiniteDepthQueries = true + *c.Insecure.OCRDevelopmentMode = true + }}.New(logger.TestLogger(t)) + require.NoError(t, err) + + assert.True(t, config.Insecure().DevWebServer()) + assert.True(t, config.Insecure().DisableRateLimiting()) + assert.True(t, config.Insecure().InfiniteDepthQueries()) + assert.True(t, config.OCRDevelopmentMode()) + }) + + t.Run("ParseConfig accepts insecure values on dev builds", func(t *testing.T) { + opts := GeneralConfigOpts{} + err := opts.ParseConfig(` + [insecure] + DevWebServer = true + `) + cfg, err := opts.init() + require.NoError(t, err) + err = cfg.c.Validate() + require.NoError(t, err) + }) +} diff --git a/core/services/chainlink/config_general_secrets.go b/core/services/chainlink/config_general_secrets.go new file mode 100644 index 00000000..28e261f4 --- /dev/null +++ b/core/services/chainlink/config_general_secrets.go @@ -0,0 +1,16 @@ +package plugin + +import ( + "net/url" +) + +func (g *generalConfig) DatabaseURL() url.URL { + if g.secrets.Database.URL == nil { + return url.URL{} + } + return *g.secrets.Database.URL.URL() +} + +func (g *generalConfig) DatabaseBackupURL() *url.URL { + return g.secrets.Database.BackupURL.URL() +} diff --git a/core/services/chainlink/config_general_state.go b/core/services/chainlink/config_general_state.go new file mode 100644 index 00000000..06f269b5 --- /dev/null +++ b/core/services/chainlink/config_general_state.go @@ -0,0 +1,75 @@ +package plugin + +import ( + "github.com/google/uuid" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func (g *generalConfig) AppID() uuid.UUID { + g.appIDOnce.Do(func() { + if g.c.AppID != (uuid.UUID{}) { + return // already set (e.g. test override) + } + g.c.AppID = uuid.New() // randomize + }) + return g.c.AppID +} + +func (g *generalConfig) logLevel() (ll zapcore.Level) { + g.logMu.RLock() + ll = zapcore.Level(*g.c.Log.Level) + g.logMu.RUnlock() + return +} + +func (g *generalConfig) SetLogLevel(lvl zapcore.Level) error { + g.logMu.Lock() + g.c.Log.Level = (*toml.LogLevel)(&lvl) + g.logMu.Unlock() + return nil +} + +func (g *generalConfig) logSQL() (sql bool) { + g.logMu.RLock() + sql = *g.c.Database.LogQueries + g.logMu.RUnlock() + return +} + +func (g *generalConfig) SetLogSQL(logSQL bool) { + g.logMu.Lock() + g.c.Database.LogQueries = &logSQL + g.logMu.Unlock() +} + +func (g *generalConfig) SetPasswords(keystore, vrf *string) { + g.passwordMu.Lock() + defer g.passwordMu.Unlock() + if keystore != nil { + g.secrets.Password.Keystore = (*models.Secret)(keystore) + } + if vrf != nil { + g.secrets.Password.VRF = (*models.Secret)(vrf) + } +} + +func (g *generalConfig) keystorePassword() string { + g.passwordMu.RLock() + defer g.passwordMu.RUnlock() + if g.secrets.Password.Keystore == nil { + return "" + } + return string(*g.secrets.Password.Keystore) +} + +func (g *generalConfig) vrfPassword() string { + g.passwordMu.RLock() + defer g.passwordMu.RUnlock() + if g.secrets.Password.VRF == nil { + return "" + } + return string(*g.secrets.Password.VRF) +} diff --git a/core/services/chainlink/config_general_test.go b/core/services/chainlink/config_general_test.go new file mode 100644 index 00000000..ce45f901 --- /dev/null +++ b/core/services/chainlink/config_general_test.go @@ -0,0 +1,252 @@ +//go:build !dev + +package plugin + +import ( + _ "embed" + "fmt" + "maps" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +func TestTOMLGeneralConfig_Defaults(t *testing.T) { + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + assert.Equal(t, (*url.URL)(nil), config.WebServer().BridgeResponseURL()) + assert.False(t, config.EVMRPCEnabled()) + assert.False(t, config.EVMEnabled()) + assert.False(t, config.CosmosEnabled()) + assert.False(t, config.SolanaEnabled()) + assert.False(t, config.StarkNetEnabled()) + assert.Equal(t, false, config.JobPipeline().ExternalInitiatorsEnabled()) + assert.Equal(t, 15*time.Minute, config.WebServer().SessionTimeout().Duration()) +} + +func TestTOMLGeneralConfig_InsecureConfig(t *testing.T) { + t.Parallel() + + t.Run("all insecure configs are false by default", func(t *testing.T) { + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + + assert.False(t, config.Insecure().DevWebServer()) + assert.False(t, config.Insecure().DisableRateLimiting()) + assert.False(t, config.Insecure().InfiniteDepthQueries()) + assert.False(t, config.Insecure().OCRDevelopmentMode()) + }) + + t.Run("insecure config ignore override on non-dev builds", func(t *testing.T) { + config, err := GeneralConfigOpts{ + OverrideFn: func(c *Config, s *Secrets) { + *c.Insecure.DevWebServer = true + *c.Insecure.DisableRateLimiting = true + *c.Insecure.InfiniteDepthQueries = true + *c.AuditLogger.Enabled = true + }}.New() + require.NoError(t, err) + + // Just asserting that override logic work on a safe config + assert.True(t, config.AuditLogger().Enabled()) + + assert.False(t, config.Insecure().DevWebServer()) + assert.False(t, config.Insecure().DisableRateLimiting()) + assert.False(t, config.Insecure().InfiniteDepthQueries()) + }) + + t.Run("ValidateConfig fails if insecure config is set on non-dev builds", func(t *testing.T) { + config := ` + [insecure] + DevWebServer = true + DisableRateLimiting = false + InfiniteDepthQueries = false + OCRDevelopmentMode = false + ` + opts := GeneralConfigOpts{ + ConfigStrings: []string{config}, + } + cfg, err := opts.New() + require.NoError(t, err) + err = cfg.Validate() + require.Contains(t, err.Error(), "invalid configuration: Insecure.DevWebServer: invalid value (true): insecure configs are not allowed on secure builds") + }) +} + +func TestValidateDB(t *testing.T) { + t.Setenv(string(env.Config), "") + + t.Run("unset db url", func(t *testing.T) { + t.Setenv(string(env.DatabaseURL), "") + + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + + err = config.ValidateDB() + require.Error(t, err) + require.ErrorIs(t, err, ErrInvalidSecrets) + }) + + t.Run("dev url", func(t *testing.T) { + t.Setenv(string(env.DatabaseURL), "postgres://postgres:admin@localhost:5432/plugin_dev_test?sslmode=disable") + + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + err = config.ValidateDB() + require.NoError(t, err) + }) + + t.Run("bad password url", func(t *testing.T) { + t.Setenv(string(env.DatabaseURL), "postgres://postgres:pwdTooShort@localhost:5432/plugin_dev_prod?sslmode=disable") + t.Setenv(string(env.DatabaseAllowSimplePasswords), "false") + + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + err = config.ValidateDB() + require.Error(t, err) + require.ErrorIs(t, err, ErrInvalidSecrets) + }) + +} + +func TestConfig_LogSQL(t *testing.T) { + config, err := GeneralConfigOpts{}.New() + require.NoError(t, err) + + config.SetLogSQL(true) + assert.Equal(t, config.Database().LogSQL(), true) + + config.SetLogSQL(false) + assert.Equal(t, config.Database().LogSQL(), false) +} + +//go:embed testdata/mergingsecretsdata/secrets-database.toml +var databaseSecretsTOML string + +//go:embed testdata/mergingsecretsdata/secrets-password.toml +var passwordSecretsTOML string + +//go:embed testdata/mergingsecretsdata/secrets-pyroscope.toml +var pyroscopeSecretsTOML string + +//go:embed testdata/mergingsecretsdata/secrets-prometheus.toml +var prometheusSecretsTOML string + +//go:embed testdata/mergingsecretsdata/secrets-mercury-split-one.toml +var mercurySecretsTOMLSplitOne string + +//go:embed testdata/mergingsecretsdata/secrets-mercury-split-two.toml +var mercurySecretsTOMLSplitTwo string + +//go:embed testdata/mergingsecretsdata/secrets-threshold.toml +var thresholdSecretsTOML string + +//go:embed testdata/mergingsecretsdata/secrets-webserver-ldap.toml +var WebServerLDAPSecretsTOML string + +func TestConfig_SecretsMerging(t *testing.T) { + t.Run("verify secrets merging in GeneralConfigOpts.New()", func(t *testing.T) { + databaseSecrets, err := parseSecrets(databaseSecretsTOML) + require.NoErrorf(t, err, "error: %s", err) + passwordSecrets, err2 := parseSecrets(passwordSecretsTOML) + require.NoErrorf(t, err2, "error: %s", err2) + pyroscopeSecrets, err3 := parseSecrets(pyroscopeSecretsTOML) + require.NoErrorf(t, err3, "error: %s", err3) + prometheusSecrets, err4 := parseSecrets(prometheusSecretsTOML) + require.NoErrorf(t, err4, "error: %s", err4) + mercurySecrets_a, err5 := parseSecrets(mercurySecretsTOMLSplitOne) + require.NoErrorf(t, err5, "error: %s", err5) + mercurySecrets_b, err6 := parseSecrets(mercurySecretsTOMLSplitTwo) + require.NoErrorf(t, err6, "error: %s", err6) + thresholdSecrets, err7 := parseSecrets(thresholdSecretsTOML) + require.NoErrorf(t, err7, "error: %s", err7) + webserverLDAPSecrets, err8 := parseSecrets(WebServerLDAPSecretsTOML) + require.NoErrorf(t, err8, "error: %s", err8) + + opts := new(GeneralConfigOpts) + configFiles := []string{ + "testdata/mergingsecretsdata/config.toml", + } + secretsFiles := []string{ + "testdata/mergingsecretsdata/secrets-database.toml", + "testdata/mergingsecretsdata/secrets-password.toml", + "testdata/mergingsecretsdata/secrets-pyroscope.toml", + "testdata/mergingsecretsdata/secrets-prometheus.toml", + "testdata/mergingsecretsdata/secrets-mercury-split-one.toml", + "testdata/mergingsecretsdata/secrets-mercury-split-two.toml", + "testdata/mergingsecretsdata/secrets-threshold.toml", + "testdata/mergingsecretsdata/secrets-webserver-ldap.toml", + } + err = opts.Setup(configFiles, secretsFiles) + require.NoErrorf(t, err, "error: %s", err) + + err = opts.parse() + require.NoErrorf(t, err, "error testing: %s, %s", configFiles, secretsFiles) + + assert.Equal(t, databaseSecrets.Database.URL.URL().String(), opts.Secrets.Database.URL.URL().String()) + assert.Equal(t, databaseSecrets.Database.BackupURL.URL().String(), opts.Secrets.Database.BackupURL.URL().String()) + + assert.Equal(t, (string)(*passwordSecrets.Password.Keystore), (string)(*opts.Secrets.Password.Keystore)) + assert.Equal(t, (string)(*passwordSecrets.Password.VRF), (string)(*opts.Secrets.Password.VRF)) + assert.Equal(t, (string)(*pyroscopeSecrets.Pyroscope.AuthToken), (string)(*opts.Secrets.Pyroscope.AuthToken)) + assert.Equal(t, (string)(*prometheusSecrets.Prometheus.AuthToken), (string)(*opts.Secrets.Prometheus.AuthToken)) + assert.Equal(t, (string)(*thresholdSecrets.Threshold.ThresholdKeyShare), (string)(*opts.Secrets.Threshold.ThresholdKeyShare)) + + assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ServerAddress.URL().String(), opts.Secrets.WebServer.LDAP.ServerAddress.URL().String()) + assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ReadOnlyUserLogin, opts.Secrets.WebServer.LDAP.ReadOnlyUserLogin) + assert.Equal(t, webserverLDAPSecrets.WebServer.LDAP.ReadOnlyUserPass, opts.Secrets.WebServer.LDAP.ReadOnlyUserPass) + + err = assertDeepEqualityMercurySecrets(*merge(mercurySecrets_a.Mercury, mercurySecrets_b.Mercury), opts.Secrets.Mercury) + require.NoErrorf(t, err, "merged mercury secrets unequal") + }) +} + +func parseSecrets(secrets string) (*Secrets, error) { + var s Secrets + if err := config.DecodeTOML(strings.NewReader(secrets), &s); err != nil { + return nil, fmt.Errorf("failed to decode secrets TOML: %w", err) + } + + return &s, nil +} + +func assertDeepEqualityMercurySecrets(expected toml.MercurySecrets, actual toml.MercurySecrets) error { + if len(expected.Credentials) != len(actual.Credentials) { + return fmt.Errorf("maps are not equal in length: len(expected): %d, len(actual): %d", len(expected.Credentials), len(actual.Credentials)) + } + + for key, value := range expected.Credentials { + equal := true + actualValue := actual.Credentials[key] + if (string)(*value.Username) != (string)(*actualValue.Username) { + equal = false + } + if (string)(*value.Password) != (string)(*actualValue.Password) { + equal = false + } + if value.URL.URL().String() != actualValue.URL.URL().String() { + equal = false + } + if !equal { + return fmt.Errorf("maps are not equal: expected[%s] = {%s, %s, %s}, actual[%s] = {%s, %s, %s}", + key, (string)(*value.Username), (string)(*value.Password), value.URL.URL().String(), + key, (string)(*actualValue.Username), (string)(*actualValue.Password), actualValue.URL.URL().String()) + } + } + return nil +} + +func merge(map1 toml.MercurySecrets, map2 toml.MercurySecrets) *toml.MercurySecrets { + combinedMap := make(map[string]toml.MercuryCredentials) + maps.Copy(combinedMap, map1.Credentials) + maps.Copy(combinedMap, map2.Credentials) + return &toml.MercurySecrets{Credentials: combinedMap} +} diff --git a/core/services/chainlink/config_insecure.go b/core/services/chainlink/config_insecure.go new file mode 100644 index 00000000..d8086ef6 --- /dev/null +++ b/core/services/chainlink/config_insecure.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +type insecureConfig struct { + c toml.Insecure +} + +func (i *insecureConfig) DevWebServer() bool { + return build.IsDev() && i.c.DevWebServer != nil && + *i.c.DevWebServer +} + +func (i *insecureConfig) DisableRateLimiting() bool { + return build.IsDev() && i.c.DisableRateLimiting != nil && + *i.c.DisableRateLimiting +} + +func (i *insecureConfig) OCRDevelopmentMode() bool { + // OCRDevelopmentMode is allowed in TestBuilds as well + return (build.IsDev() || build.IsTest()) && i.c.OCRDevelopmentMode != nil && + *i.c.OCRDevelopmentMode +} + +func (i *insecureConfig) InfiniteDepthQueries() bool { + return build.IsDev() && i.c.InfiniteDepthQueries != nil && + *i.c.InfiniteDepthQueries +} diff --git a/core/services/chainlink/config_insecure_test.go b/core/services/chainlink/config_insecure_test.go new file mode 100644 index 00000000..15af98aa --- /dev/null +++ b/core/services/chainlink/config_insecure_test.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsecureConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ins := cfg.Insecure() + assert.False(t, ins.DevWebServer()) + assert.False(t, ins.DisableRateLimiting()) + assert.False(t, ins.OCRDevelopmentMode()) + assert.False(t, ins.InfiniteDepthQueries()) +} diff --git a/core/services/chainlink/config_job_pipeline.go b/core/services/chainlink/config_job_pipeline.go new file mode 100644 index 00000000..69ef31ab --- /dev/null +++ b/core/services/chainlink/config_job_pipeline.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.JobPipeline = (*jobPipelineConfig)(nil) + +type jobPipelineConfig struct { + c toml.JobPipeline +} + +func (j *jobPipelineConfig) DefaultHTTPLimit() int64 { + return int64(*j.c.HTTPRequest.MaxSize) +} + +func (j *jobPipelineConfig) DefaultHTTPTimeout() commonconfig.Duration { + return *j.c.HTTPRequest.DefaultTimeout +} + +func (j *jobPipelineConfig) MaxRunDuration() time.Duration { + return j.c.MaxRunDuration.Duration() +} + +func (j *jobPipelineConfig) MaxSuccessfulRuns() uint64 { + return *j.c.MaxSuccessfulRuns +} + +func (j *jobPipelineConfig) ReaperInterval() time.Duration { + return j.c.ReaperInterval.Duration() +} + +func (j *jobPipelineConfig) ReaperThreshold() time.Duration { + return j.c.ReaperThreshold.Duration() +} + +func (j *jobPipelineConfig) ResultWriteQueueDepth() uint64 { + return uint64(*j.c.ResultWriteQueueDepth) +} + +func (j *jobPipelineConfig) ExternalInitiatorsEnabled() bool { + return *j.c.ExternalInitiatorsEnabled +} diff --git a/core/services/chainlink/config_job_pipeline_test.go b/core/services/chainlink/config_job_pipeline_test.go new file mode 100644 index 00000000..78978bef --- /dev/null +++ b/core/services/chainlink/config_job_pipeline_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestJobPipelineConfigTest(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + jp := cfg.JobPipeline() + + assert.Equal(t, int64(100*utils.MB), jp.DefaultHTTPLimit()) + d, err := commonconfig.NewDuration(1 * time.Minute) + require.NoError(t, err) + assert.Equal(t, d, jp.DefaultHTTPTimeout()) + assert.Equal(t, 1*time.Hour, jp.MaxRunDuration()) + assert.Equal(t, uint64(123456), jp.MaxSuccessfulRuns()) + assert.Equal(t, 4*time.Hour, jp.ReaperInterval()) + assert.Equal(t, 168*time.Hour, jp.ReaperThreshold()) + assert.Equal(t, uint64(10), jp.ResultWriteQueueDepth()) + assert.True(t, jp.ExternalInitiatorsEnabled()) +} diff --git a/core/services/chainlink/config_keeper.go b/core/services/chainlink/config_keeper.go new file mode 100644 index 00000000..8176aa61 --- /dev/null +++ b/core/services/chainlink/config_keeper.go @@ -0,0 +1,66 @@ +package plugin + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.Keeper = (*keeperConfig)(nil) + +type registryConfig struct { + c toml.KeeperRegistry +} + +func (r *registryConfig) CheckGasOverhead() uint32 { + return *r.c.CheckGasOverhead +} + +func (r *registryConfig) PerformGasOverhead() uint32 { + return *r.c.PerformGasOverhead +} + +func (r *registryConfig) MaxPerformDataSize() uint32 { + return *r.c.MaxPerformDataSize +} + +func (r *registryConfig) SyncInterval() time.Duration { + return r.c.SyncInterval.Duration() +} + +func (r *registryConfig) SyncUpkeepQueueSize() uint32 { + return *r.c.SyncUpkeepQueueSize +} + +type keeperConfig struct { + c toml.Keeper +} + +func (k *keeperConfig) Registry() config.Registry { + return ®istryConfig{c: k.c.Registry} +} + +func (k *keeperConfig) DefaultTransactionQueueDepth() uint32 { + return *k.c.DefaultTransactionQueueDepth +} + +func (k *keeperConfig) GasPriceBufferPercent() uint16 { + return *k.c.GasPriceBufferPercent +} + +func (k *keeperConfig) GasTipCapBufferPercent() uint16 { + return *k.c.GasTipCapBufferPercent +} + +func (k *keeperConfig) BaseFeeBufferPercent() uint16 { + return *k.c.BaseFeeBufferPercent +} + +func (k *keeperConfig) MaxGracePeriod() int64 { + return *k.c.MaxGracePeriod +} + +func (k *keeperConfig) TurnLookBack() int64 { + return *k.c.TurnLookBack +} diff --git a/core/services/chainlink/config_keeper_test.go b/core/services/chainlink/config_keeper_test.go new file mode 100644 index 00000000..59beb363 --- /dev/null +++ b/core/services/chainlink/config_keeper_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKeeperConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + keeper := cfg.Keeper() + + assert.Equal(t, uint32(17), keeper.DefaultTransactionQueueDepth()) + assert.Equal(t, uint16(12), keeper.GasPriceBufferPercent()) + assert.Equal(t, uint16(43), keeper.GasTipCapBufferPercent()) + assert.Equal(t, uint16(89), keeper.BaseFeeBufferPercent()) + assert.Equal(t, int64(91), keeper.TurnLookBack()) + assert.Equal(t, int64(31), keeper.MaxGracePeriod()) + + registry := keeper.Registry() + assert.Equal(t, uint32(90), registry.CheckGasOverhead()) + assert.Equal(t, uint32(4294967295), registry.PerformGasOverhead()) + assert.Equal(t, uint32(5000), registry.MaxPerformDataSize()) + assert.Equal(t, 1*time.Hour, registry.SyncInterval()) + assert.Equal(t, uint32(31), registry.SyncUpkeepQueueSize()) +} diff --git a/core/services/chainlink/config_log.go b/core/services/chainlink/config_log.go new file mode 100644 index 00000000..d3a1ce8b --- /dev/null +++ b/core/services/chainlink/config_log.go @@ -0,0 +1,63 @@ +package plugin + +import ( + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ config.Log = (*logConfig)(nil) + +type logConfig struct { + c toml.Log + rootDir func() string + defaultLevel zapcore.Level + level func() zapcore.Level +} + +type fileConfig struct { + c toml.LogFile + rootDir func() string +} + +func (f *fileConfig) Dir() string { + s := *f.c.Dir + if s == "" { + s = f.rootDir() + } + return s +} + +func (f *fileConfig) MaxSize() utils.FileSize { + return *f.c.MaxSize +} + +func (f *fileConfig) MaxAgeDays() int64 { + return *f.c.MaxAgeDays +} + +func (f *fileConfig) MaxBackups() int64 { + return *f.c.MaxBackups +} + +func (l *logConfig) File() config.File { + return &fileConfig{c: l.c.File, rootDir: l.rootDir} +} + +func (l *logConfig) UnixTimestamps() bool { + return *l.c.UnixTS +} + +func (l *logConfig) JSONConsole() bool { + return *l.c.JSONConsole +} + +func (l *logConfig) DefaultLevel() zapcore.Level { + return l.defaultLevel +} + +func (l *logConfig) Level() zapcore.Level { + return l.level() +} diff --git a/core/services/chainlink/config_log_test.go b/core/services/chainlink/config_log_test.go new file mode 100644 index 00000000..484968c2 --- /dev/null +++ b/core/services/chainlink/config_log_test.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestLogConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + log := cfg.Log() + file := log.File() + + assert.Equal(t, "log/file/dir", file.Dir()) + assert.Equal(t, uint64(100*utils.GB), uint64(file.MaxSize())) + assert.Equal(t, int64(17), file.MaxAgeDays()) + assert.Equal(t, int64(9), file.MaxBackups()) + assert.Equal(t, true, log.UnixTimestamps()) + assert.Equal(t, true, log.JSONConsole()) + assert.Equal(t, zapcore.Level(3), log.DefaultLevel()) + assert.Equal(t, zapcore.Level(3), log.Level()) +} diff --git a/core/services/chainlink/config_mercury.go b/core/services/chainlink/config_mercury.go new file mode 100644 index 00000000..0b6a9e56 --- /dev/null +++ b/core/services/chainlink/config_mercury.go @@ -0,0 +1,62 @@ +package plugin + +import ( + "time" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.MercuryCache = (*mercuryCacheConfig)(nil) + +type mercuryCacheConfig struct { + c toml.MercuryCache +} + +func (m *mercuryCacheConfig) LatestReportTTL() time.Duration { + return m.c.LatestReportTTL.Duration() +} +func (m *mercuryCacheConfig) MaxStaleAge() time.Duration { + return m.c.MaxStaleAge.Duration() +} +func (m *mercuryCacheConfig) LatestReportDeadline() time.Duration { + return m.c.LatestReportDeadline.Duration() +} + +type mercuryTLSConfig struct { + c toml.MercuryTLS +} + +func (m *mercuryTLSConfig) CertFile() string { + return *m.c.CertFile +} + +type mercuryConfig struct { + c toml.Mercury + s toml.MercurySecrets +} + +func (m *mercuryConfig) Credentials(credName string) *types.MercuryCredentials { + if mc, ok := m.s.Credentials[credName]; ok { + c := &types.MercuryCredentials{ + URL: mc.URL.URL().String(), + Password: string(*mc.Password), + Username: string(*mc.Username), + } + if mc.LegacyURL != nil && mc.LegacyURL.URL() != nil { + c.LegacyURL = mc.LegacyURL.URL().String() + } + return c + } + return nil +} + +func (m *mercuryConfig) Cache() config.MercuryCache { + return &mercuryCacheConfig{c: m.c.Cache} +} + +func (m *mercuryConfig) TLS() config.MercuryTLS { + return &mercuryTLSConfig{c: m.c.TLS} +} diff --git a/core/services/chainlink/config_mercury_test.go b/core/services/chainlink/config_mercury_test.go new file mode 100644 index 00000000..5fa5e63e --- /dev/null +++ b/core/services/chainlink/config_mercury_test.go @@ -0,0 +1,50 @@ +package plugin + +import ( + "testing" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +const ( + secretsMercury = ` +[Mercury.Credentials.cred1] +URL = "https://chain1.link" +Username = "username1" +Password = "password1" + +[Mercury.Credentials.cred2] +URL = "https://chain2.link" +Username = "username2" +Password = "password2" +` +) + +func TestMercuryConfig(t *testing.T) { + opts := GeneralConfigOpts{ + SecretsStrings: []string{secretsMercury}, + } + cfg, err := opts.New() + require.NoError(t, err) + + m := cfg.Mercury() + assert.Equal(t, &types.MercuryCredentials{URL: "https://chain1.link", Username: "username1", Password: "password1"}, m.Credentials("cred1")) + assert.Equal(t, &types.MercuryCredentials{URL: "https://chain2.link", Username: "username2", Password: "password2"}, m.Credentials("cred2")) +} + +func TestMercuryTLS(t *testing.T) { + certPath := "/path/to/cert.pem" + transmission := toml.Mercury{ + TLS: toml.MercuryTLS{ + CertFile: &certPath, + }, + } + cfg := mercuryConfig{c: transmission} + + assert.Equal(t, certPath, cfg.TLS().CertFile()) +} diff --git a/core/services/chainlink/config_ocr.go b/core/services/chainlink/config_ocr.go new file mode 100644 index 00000000..618f828c --- /dev/null +++ b/core/services/chainlink/config_ocr.go @@ -0,0 +1,69 @@ +package plugin + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +var _ config.OCR = (*ocrConfig)(nil) + +type ocrConfig struct { + c toml.OCR +} + +func (o *ocrConfig) Enabled() bool { + return *o.c.Enabled +} + +func (o *ocrConfig) BlockchainTimeout() time.Duration { + return o.c.BlockchainTimeout.Duration() +} + +func (o *ocrConfig) ContractPollInterval() time.Duration { + return o.c.ContractPollInterval.Duration() +} + +func (o *ocrConfig) ContractSubscribeInterval() time.Duration { + return o.c.ContractSubscribeInterval.Duration() +} + +func (o *ocrConfig) KeyBundleID() (string, error) { + b := o.c.KeyBundleID + if *b == zeroSha256Hash { + return "", nil + } + return b.String(), nil +} + +func (o *ocrConfig) ObservationTimeout() time.Duration { + return o.c.ObservationTimeout.Duration() +} + +func (o *ocrConfig) SimulateTransactions() bool { + return *o.c.SimulateTransactions +} + +func (o *ocrConfig) TransmitterAddress() (ethkey.EIP55Address, error) { + a := *o.c.TransmitterAddress + if a.IsZero() { + return a, errors.Wrap(config.ErrEnvUnset, "OCR.TransmitterAddress is not set") + } + return a, nil +} + +func (o *ocrConfig) TraceLogging() bool { + return *o.c.TraceLogging +} + +func (o *ocrConfig) DefaultTransactionQueueDepth() uint32 { + return *o.c.DefaultTransactionQueueDepth +} + +func (o *ocrConfig) CaptureEATelemetry() bool { + return *o.c.CaptureEATelemetry +} diff --git a/core/services/chainlink/config_ocr2.go b/core/services/chainlink/config_ocr2.go new file mode 100644 index 00000000..d844678b --- /dev/null +++ b/core/services/chainlink/config_ocr2.go @@ -0,0 +1,70 @@ +package plugin + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.OCR2 = (*ocr2Config)(nil) + +type ocr2Config struct { + c toml.OCR2 +} + +func (o *ocr2Config) Enabled() bool { + return *o.c.Enabled +} + +func (o *ocr2Config) ContractConfirmations() uint16 { + return uint16(*o.c.ContractConfirmations) +} + +func (o *ocr2Config) ContractTransmitterTransmitTimeout() time.Duration { + return o.c.ContractTransmitterTransmitTimeout.Duration() +} + +func (o *ocr2Config) BlockchainTimeout() time.Duration { + return o.c.BlockchainTimeout.Duration() +} + +func (o *ocr2Config) DatabaseTimeout() time.Duration { + return o.c.DatabaseTimeout.Duration() +} + +func (o *ocr2Config) ContractPollInterval() time.Duration { + return o.c.ContractPollInterval.Duration() +} + +func (o *ocr2Config) ContractSubscribeInterval() time.Duration { + return o.c.ContractSubscribeInterval.Duration() +} + +func (o *ocr2Config) KeyBundleID() (string, error) { + b := o.c.KeyBundleID + if *b == zeroSha256Hash { + return "", nil + } + return b.String(), nil +} + +func (o *ocr2Config) TraceLogging() bool { + return *o.c.TraceLogging +} + +func (o *ocr2Config) CaptureEATelemetry() bool { + return *o.c.CaptureEATelemetry +} + +func (o *ocr2Config) CaptureAutomationCustomTelemetry() bool { + return *o.c.CaptureAutomationCustomTelemetry +} + +func (o *ocr2Config) DefaultTransactionQueueDepth() uint32 { + return *o.c.DefaultTransactionQueueDepth +} + +func (o *ocr2Config) SimulateTransactions() bool { + return *o.c.SimulateTransactions +} diff --git a/core/services/chainlink/config_ocr2_test.go b/core/services/chainlink/config_ocr2_test.go new file mode 100644 index 00000000..8a91106b --- /dev/null +++ b/core/services/chainlink/config_ocr2_test.go @@ -0,0 +1,46 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestOCR2Config(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ocr2Cfg := cfg.OCR2() + + expectedContractTransmitterTransmitTimeout, err := time.ParseDuration("1m0s") + require.NoError(t, err) + expectedBlockchainTimeout, err := time.ParseDuration("3s") + require.NoError(t, err) + expectedDatabaseTimeout, err := time.ParseDuration("8s") + require.NoError(t, err) + expectedContractPollInterval, err := time.ParseDuration("1h0m0s") + require.NoError(t, err) + expectedContractSubscribeInterval, err := time.ParseDuration("1m0s") + require.NoError(t, err) + + require.Equal(t, true, ocr2Cfg.Enabled()) + require.Equal(t, uint16(11), ocr2Cfg.ContractConfirmations()) + require.Equal(t, expectedContractTransmitterTransmitTimeout, ocr2Cfg.ContractTransmitterTransmitTimeout()) + require.Equal(t, expectedBlockchainTimeout, ocr2Cfg.BlockchainTimeout()) + require.Equal(t, expectedDatabaseTimeout, ocr2Cfg.DatabaseTimeout()) + require.Equal(t, expectedContractPollInterval, ocr2Cfg.ContractPollInterval()) + require.Equal(t, expectedContractSubscribeInterval, ocr2Cfg.ContractSubscribeInterval()) + require.Equal(t, false, ocr2Cfg.SimulateTransactions()) + require.Equal(t, false, ocr2Cfg.TraceLogging()) + require.Equal(t, uint32(1), ocr2Cfg.DefaultTransactionQueueDepth()) + require.Equal(t, false, ocr2Cfg.CaptureEATelemetry()) + require.Equal(t, true, ocr2Cfg.CaptureAutomationCustomTelemetry()) + + keyBundleID, err := ocr2Cfg.KeyBundleID() + require.NoError(t, err) + require.Equal(t, "7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000", keyBundleID) +} diff --git a/core/services/chainlink/config_ocr_test.go b/core/services/chainlink/config_ocr_test.go new file mode 100644 index 00000000..332a295f --- /dev/null +++ b/core/services/chainlink/config_ocr_test.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestOCRConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ocrCfg := cfg.OCR() + + expectedObservationTimeout, err := time.ParseDuration("11s") + require.NoError(t, err) + expectedBlockchainTimeout, err := time.ParseDuration("3s") + require.NoError(t, err) + expectedContractPollInterval, err := time.ParseDuration("1h0m0s") + require.NoError(t, err) + expectedContractSubscribeInterval, err := time.ParseDuration("1m0s") + require.NoError(t, err) + + require.Equal(t, true, ocrCfg.Enabled()) + require.Equal(t, expectedObservationTimeout, ocrCfg.ObservationTimeout()) + require.Equal(t, expectedBlockchainTimeout, ocrCfg.BlockchainTimeout()) + require.Equal(t, expectedContractPollInterval, ocrCfg.ContractPollInterval()) + require.Equal(t, expectedContractSubscribeInterval, ocrCfg.ContractSubscribeInterval()) + require.Equal(t, true, ocrCfg.SimulateTransactions()) + require.Equal(t, false, ocrCfg.TraceLogging()) + require.Equal(t, uint32(12), ocrCfg.DefaultTransactionQueueDepth()) + require.Equal(t, false, ocrCfg.CaptureEATelemetry()) + + keyBundleID, err := ocrCfg.KeyBundleID() + require.NoError(t, err) + require.Equal(t, "acdd42797a8b921b2910497badc5000600000000000000000000000000000000", keyBundleID) + + transmitterAddress, err := ocrCfg.TransmitterAddress() + require.NoError(t, err) + require.Equal(t, "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", transmitterAddress.String()) +} diff --git a/core/services/chainlink/config_p2p.go b/core/services/chainlink/config_p2p.go new file mode 100644 index 00000000..05adbcab --- /dev/null +++ b/core/services/chainlink/config_p2p.go @@ -0,0 +1,82 @@ +package plugin + +import ( + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + + "github.com/goplugin/libocr/commontypes" +) + +type p2p struct { + c toml.P2P +} + +func (p *p2p) Enabled() bool { + return p.V2().Enabled() +} + +func (p *p2p) PeerID() p2pkey.PeerID { + return *p.c.PeerID +} + +func (p *p2p) TraceLogging() bool { + return *p.c.TraceLogging +} + +func (p *p2p) IncomingMessageBufferSize() int { + return int(*p.c.IncomingMessageBufferSize) +} + +func (p *p2p) OutgoingMessageBufferSize() int { + return int(*p.c.OutgoingMessageBufferSize) +} + +func (p *p2p) V2() config.V2 { + return &p2pv2{p.c.V2} +} + +type p2pv2 struct { + c toml.P2PV2 +} + +func (v *p2pv2) Enabled() bool { + return *v.c.Enabled +} + +func (v *p2pv2) AnnounceAddresses() []string { + if a := v.c.AnnounceAddresses; a != nil { + return *a + } + return nil +} + +func (v *p2pv2) DefaultBootstrappers() (locators []commontypes.BootstrapperLocator) { + if d := v.c.DefaultBootstrappers; d != nil { + return *d + } + return nil +} + +func (v *p2pv2) DeltaDial() commonconfig.Duration { + if d := v.c.DeltaDial; d != nil { + return *d + } + return commonconfig.Duration{} +} + +func (v *p2pv2) DeltaReconcile() commonconfig.Duration { + if d := v.c.DeltaReconcile; d != nil { + return *d + + } + return commonconfig.Duration{} +} + +func (v *p2pv2) ListenAddresses() []string { + if l := v.c.ListenAddresses; l != nil { + return *l + } + return nil +} diff --git a/core/services/chainlink/config_p2p_test.go b/core/services/chainlink/config_p2p_test.go new file mode 100644 index 00000000..de037501 --- /dev/null +++ b/core/services/chainlink/config_p2p_test.go @@ -0,0 +1,46 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/commontypes" +) + +func TestP2PConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + p2p := cfg.P2P() + assert.Equal(t, "p2p_12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", p2p.PeerID().String()) + assert.Equal(t, 13, p2p.IncomingMessageBufferSize()) + assert.Equal(t, 17, p2p.OutgoingMessageBufferSize()) + assert.True(t, p2p.TraceLogging()) + + v2 := p2p.V2() + assert.False(t, v2.Enabled()) + assert.Equal(t, []string{"a", "b", "c"}, v2.AnnounceAddresses()) + assert.ElementsMatch( + t, + []commontypes.BootstrapperLocator{ + { + PeerID: "12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", + Addrs: []string{"test:99"}, + }, + { + PeerID: "12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", + Addrs: []string{"foo:42", "bar:10"}, + }, + }, + v2.DefaultBootstrappers(), + ) + assert.Equal(t, time.Minute, v2.DeltaDial().Duration()) + assert.Equal(t, time.Second, v2.DeltaReconcile().Duration()) + assert.Equal(t, []string{"foo", "bar"}, v2.ListenAddresses()) +} diff --git a/core/services/chainlink/config_password.go b/core/services/chainlink/config_password.go new file mode 100644 index 00000000..b8a89d29 --- /dev/null +++ b/core/services/chainlink/config_password.go @@ -0,0 +1,10 @@ +package plugin + +type passwordConfig struct { + keystore func() string + vrf func() string +} + +func (p *passwordConfig) Keystore() string { return p.keystore() } + +func (p *passwordConfig) VRF() string { return p.vrf() } diff --git a/core/services/chainlink/config_password_test.go b/core/services/chainlink/config_password_test.go new file mode 100644 index 00000000..356009b1 --- /dev/null +++ b/core/services/chainlink/config_password_test.go @@ -0,0 +1,20 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPasswordConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + p := cfg.Password() + assert.Equal(t, "", p.VRF()) + assert.Equal(t, "", p.Keystore()) +} diff --git a/core/services/chainlink/config_prometheus.go b/core/services/chainlink/config_prometheus.go new file mode 100644 index 00000000..e616b113 --- /dev/null +++ b/core/services/chainlink/config_prometheus.go @@ -0,0 +1,16 @@ +package plugin + +import ( + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +type prometheusConfig struct { + s toml.PrometheusSecrets +} + +func (p *prometheusConfig) AuthToken() string { + if p.s.AuthToken == nil { + return "" + } + return string(*p.s.AuthToken) +} diff --git a/core/services/chainlink/config_prometheus_test.go b/core/services/chainlink/config_prometheus_test.go new file mode 100644 index 00000000..29c06a16 --- /dev/null +++ b/core/services/chainlink/config_prometheus_test.go @@ -0,0 +1,19 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPrometheusConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + p := cfg.Prometheus() + assert.Equal(t, "", p.AuthToken()) +} diff --git a/core/services/chainlink/config_pyroscope.go b/core/services/chainlink/config_pyroscope.go new file mode 100644 index 00000000..3b433a13 --- /dev/null +++ b/core/services/chainlink/config_pyroscope.go @@ -0,0 +1,23 @@ +package plugin + +import "github.com/goplugin/pluginv3.0/v2/core/config/toml" + +type pyroscopeConfig struct { + c toml.Pyroscope + s toml.PyroscopeSecrets +} + +func (p *pyroscopeConfig) AuthToken() string { + if p.s.AuthToken == nil { + return "" + } + return string(*p.s.AuthToken) +} + +func (p *pyroscopeConfig) ServerAddress() string { + return *p.c.ServerAddress +} + +func (p *pyroscopeConfig) Environment() string { + return *p.c.Environment +} diff --git a/core/services/chainlink/config_pyroscope_test.go b/core/services/chainlink/config_pyroscope_test.go new file mode 100644 index 00000000..76274202 --- /dev/null +++ b/core/services/chainlink/config_pyroscope_test.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPyroscopeConfigTest(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + SecretsStrings: []string{secretsFullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + pcfg := cfg.Pyroscope() + + require.Equal(t, "pyroscope-token", pcfg.AuthToken()) + require.Equal(t, "http://localhost:4040", pcfg.ServerAddress()) + require.Equal(t, "tests", pcfg.Environment()) + +} diff --git a/core/services/chainlink/config_sentry.go b/core/services/chainlink/config_sentry.go new file mode 100644 index 00000000..d360b746 --- /dev/null +++ b/core/services/chainlink/config_sentry.go @@ -0,0 +1,25 @@ +package plugin + +import ( + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +type sentryConfig struct { + c toml.Sentry +} + +func (s sentryConfig) DSN() string { + return *s.c.DSN +} + +func (s sentryConfig) Debug() bool { + return *s.c.Debug +} + +func (s sentryConfig) Environment() string { + return *s.c.Environment +} + +func (s sentryConfig) Release() string { + return *s.c.Release +} diff --git a/core/services/chainlink/config_telemetry_ingress.go b/core/services/chainlink/config_telemetry_ingress.go new file mode 100644 index 00000000..8e1e1e39 --- /dev/null +++ b/core/services/chainlink/config_telemetry_ingress.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "net/url" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.TelemetryIngress = (*telemetryIngressConfig)(nil) + +type telemetryIngressConfig struct { + c toml.TelemetryIngress +} + +type telemetryIngressEndpointConfig struct { + c toml.TelemetryIngressEndpoint +} + +func (t *telemetryIngressConfig) Logging() bool { + return *t.c.Logging +} + +func (t *telemetryIngressConfig) UniConn() bool { + return *t.c.UniConn +} + +func (t *telemetryIngressConfig) BufferSize() uint { + return uint(*t.c.BufferSize) +} + +func (t *telemetryIngressConfig) MaxBatchSize() uint { + return uint(*t.c.MaxBatchSize) +} + +func (t *telemetryIngressConfig) SendInterval() time.Duration { + return t.c.SendInterval.Duration() +} + +func (t *telemetryIngressConfig) SendTimeout() time.Duration { + return t.c.SendTimeout.Duration() +} + +func (t *telemetryIngressConfig) UseBatchSend() bool { + return *t.c.UseBatchSend +} + +func (t *telemetryIngressConfig) Endpoints() []config.TelemetryIngressEndpoint { + var endpoints []config.TelemetryIngressEndpoint + for _, e := range t.c.Endpoints { + endpoints = append(endpoints, &telemetryIngressEndpointConfig{ + c: e, + }) + } + return endpoints +} + +func (t *telemetryIngressEndpointConfig) Network() string { + return *t.c.Network +} + +func (t *telemetryIngressEndpointConfig) ChainID() string { + return *t.c.ChainID +} + +func (t *telemetryIngressEndpointConfig) URL() *url.URL { + if t.c.URL.IsZero() { + return nil + } + return t.c.URL.URL() +} + +func (t *telemetryIngressEndpointConfig) ServerPubKey() string { + return *t.c.ServerPubKey +} diff --git a/core/services/chainlink/config_telemetry_ingress_test.go b/core/services/chainlink/config_telemetry_ingress_test.go new file mode 100644 index 00000000..cd8b6d48 --- /dev/null +++ b/core/services/chainlink/config_telemetry_ingress_test.go @@ -0,0 +1,34 @@ +package plugin + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTelemetryIngressConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ticfg := cfg.TelemetryIngress() + assert.True(t, ticfg.Logging()) + assert.True(t, ticfg.UniConn()) + assert.Equal(t, uint(1234), ticfg.BufferSize()) + assert.Equal(t, uint(4321), ticfg.MaxBatchSize()) + assert.Equal(t, time.Minute, ticfg.SendInterval()) + assert.Equal(t, 5*time.Second, ticfg.SendTimeout()) + assert.True(t, ticfg.UseBatchSend()) + + tec := cfg.TelemetryIngress().Endpoints() + + assert.Equal(t, 1, len(tec)) + assert.Equal(t, "EVM", tec[0].Network()) + assert.Equal(t, "1", tec[0].ChainID()) + assert.Equal(t, "prom.test", tec[0].URL().String()) + assert.Equal(t, "test-pub-key", tec[0].ServerPubKey()) +} diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go new file mode 100644 index 00000000..1cb6b59d --- /dev/null +++ b/core/services/chainlink/config_test.go @@ -0,0 +1,1572 @@ +package plugin + +import ( + _ "embed" + "math" + "math/big" + "net" + "strings" + "testing" + "time" + + "github.com/kylelemons/godebug/diff" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + ocrcommontypes "github.com/goplugin/libocr/commontypes" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/config" + commoncfg "github.com/goplugin/plugin-common/pkg/config" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/utils/hex" + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + solcfg "github.com/goplugin/plugin-solana/pkg/solana/config" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + legacy "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin/cfgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + //go:embed testdata/config-full.toml + fullTOML string + //go:embed testdata/config-multi-chain.toml + multiChainTOML string + + multiChain = Config{ + Core: toml.Core{ + RootDir: ptr("my/root/dir"), + AuditLogger: toml.AuditLogger{ + Enabled: ptr(true), + ForwardToUrl: mustURL("http://localhost:9898"), + Headers: ptr([]models.ServiceHeader{ + { + Header: "Authorization", + Value: "token", + }, + { + Header: "X-SomeOther-Header", + Value: "value with spaces | and a bar+*", + }, + }), + JsonWrapperKey: ptr("event"), + }, + Database: toml.Database{ + Listener: toml.DatabaseListener{ + FallbackPollInterval: commonconfig.MustNewDuration(2 * time.Minute), + }, + }, + Log: toml.Log{ + Level: ptr(toml.LogLevel(zapcore.PanicLevel)), + JSONConsole: ptr(true), + }, + JobPipeline: toml.JobPipeline{ + HTTPRequest: toml.JobPipelineHTTPRequest{ + DefaultTimeout: commonconfig.MustNewDuration(30 * time.Second), + }, + }, + OCR2: toml.OCR2{ + Enabled: ptr(true), + DatabaseTimeout: commonconfig.MustNewDuration(20 * time.Second), + }, + OCR: toml.OCR{ + Enabled: ptr(true), + BlockchainTimeout: commonconfig.MustNewDuration(5 * time.Second), + }, + P2P: toml.P2P{ + IncomingMessageBufferSize: ptr[int64](999), + }, + Keeper: toml.Keeper{ + GasPriceBufferPercent: ptr[uint16](10), + }, + AutoPprof: toml.AutoPprof{ + CPUProfileRate: ptr[int64](7), + }, + }, + EVM: []*evmcfg.EVMConfig{ + { + ChainID: ubig.NewI(1), + Chain: evmcfg.Chain{ + FinalityDepth: ptr[uint32](26), + FinalityTagEnabled: ptr[bool](false), + }, + Nodes: []*evmcfg.Node{ + { + Name: ptr("primary"), + WSURL: mustURL("wss://web.socket/mainnet"), + }, + { + Name: ptr("secondary"), + HTTPURL: mustURL("http://broadcast.mirror"), + SendOnly: ptr(true), + }, + }}, + { + ChainID: ubig.NewI(42), + Chain: evmcfg.Chain{ + GasEstimator: evmcfg.GasEstimator{ + PriceDefault: assets.NewWeiI(math.MaxInt64), + }, + }, + Nodes: []*evmcfg.Node{ + { + Name: ptr("foo"), + WSURL: mustURL("wss://web.socket/test/foo"), + }, + }}, + { + ChainID: ubig.NewI(137), + Chain: evmcfg.Chain{ + GasEstimator: evmcfg.GasEstimator{ + Mode: ptr("FixedPrice"), + }, + }, + Nodes: []*evmcfg.Node{ + { + Name: ptr("bar"), + WSURL: mustURL("wss://web.socket/test/bar"), + }, + }}, + }, + Cosmos: []*coscfg.TOMLConfig{ + { + ChainID: ptr("Ibiza-808"), + Chain: coscfg.Chain{ + MaxMsgsPerBatch: ptr[int64](13), + }, + Nodes: []*coscfg.Node{ + {Name: ptr("primary"), TendermintURL: commoncfg.MustParseURL("http://columbus.cosmos.com")}, + }}, + { + ChainID: ptr("Malaga-420"), + Chain: coscfg.Chain{ + BlocksUntilTxTimeout: ptr[int64](20), + }, + Nodes: []*coscfg.Node{ + {Name: ptr("secondary"), TendermintURL: commoncfg.MustParseURL("http://bombay.cosmos.com")}, + }}, + }, + Solana: []*solana.TOMLConfig{ + { + ChainID: ptr("mainnet"), + Chain: solcfg.Chain{ + MaxRetries: ptr[int64](12), + }, + Nodes: []*solcfg.Node{ + {Name: ptr("primary"), URL: commoncfg.MustParseURL("http://mainnet.solana.com")}, + }, + }, + { + ChainID: ptr("testnet"), + Chain: solcfg.Chain{ + OCR2CachePollPeriod: commoncfg.MustNewDuration(time.Minute), + }, + Nodes: []*solcfg.Node{ + {Name: ptr("secondary"), URL: commoncfg.MustParseURL("http://testnet.solana.com")}, + }, + }, + }, + Starknet: []*stkcfg.TOMLConfig{ + { + ChainID: ptr("foobar"), + Chain: stkcfg.Chain{ + ConfirmationPoll: commoncfg.MustNewDuration(time.Hour), + }, + Nodes: []*stkcfg.Node{ + {Name: ptr("primary"), URL: commoncfg.MustParseURL("http://stark.node")}, + }, + }, + }, + } +) + +func TestConfig_Marshal(t *testing.T) { + zeroSeconds := *commonconfig.MustNewDuration(time.Second * 0) + second := *commonconfig.MustNewDuration(time.Second) + minute := *commonconfig.MustNewDuration(time.Minute) + hour := *commonconfig.MustNewDuration(time.Hour) + mustPeerID := func(s string) *p2pkey.PeerID { + id, err := p2pkey.MakePeerID(s) + require.NoError(t, err) + return &id + } + mustDecimal := func(s string) *decimal.Decimal { + d, err := decimal.NewFromString(s) + require.NoError(t, err) + return &d + } + mustAddress := func(s string) *ethkey.EIP55Address { + a, err := ethkey.NewEIP55Address(s) + require.NoError(t, err) + return &a + } + selectionMode := client.NodeSelectionMode_HighestHead + + global := Config{ + Core: toml.Core{ + InsecureFastScrypt: ptr(true), + RootDir: ptr("test/root/dir"), + ShutdownGracePeriod: commonconfig.MustNewDuration(10 * time.Second), + Insecure: toml.Insecure{ + DevWebServer: ptr(false), + OCRDevelopmentMode: ptr(false), + InfiniteDepthQueries: ptr(false), + DisableRateLimiting: ptr(false), + }, + Tracing: toml.Tracing{ + Enabled: ptr(true), + CollectorTarget: ptr("localhost:4317"), + NodeID: ptr("clc-ocr-sol-devnet-node-1"), + SamplingRatio: ptr(1.0), + Mode: ptr("tls"), + TLSCertPath: ptr("/path/to/cert.pem"), + Attributes: map[string]string{ + "test": "load", + "env": "dev", + }, + }, + }, + } + + full := global + + serviceHeaders := []models.ServiceHeader{ + {Header: "Authorization", Value: "token"}, + {Header: "X-SomeOther-Header", Value: "value with spaces | and a bar+*"}, + } + full.AuditLogger = toml.AuditLogger{ + Enabled: ptr(true), + ForwardToUrl: mustURL("http://localhost:9898"), + Headers: ptr(serviceHeaders), + JsonWrapperKey: ptr("event"), + } + + full.Feature = toml.Feature{ + FeedsManager: ptr(true), + LogPoller: ptr(true), + UICSAKeys: ptr(true), + } + full.Database = toml.Database{ + DefaultIdleInTxSessionTimeout: commonconfig.MustNewDuration(time.Minute), + DefaultLockTimeout: commonconfig.MustNewDuration(time.Hour), + DefaultQueryTimeout: commonconfig.MustNewDuration(time.Second), + LogQueries: ptr(true), + MigrateOnStartup: ptr(true), + MaxIdleConns: ptr[int64](7), + MaxOpenConns: ptr[int64](13), + Listener: toml.DatabaseListener{ + MaxReconnectDuration: commonconfig.MustNewDuration(time.Minute), + MinReconnectInterval: commonconfig.MustNewDuration(5 * time.Minute), + FallbackPollInterval: commonconfig.MustNewDuration(2 * time.Minute), + }, + Lock: toml.DatabaseLock{ + Enabled: ptr(false), + LeaseDuration: &minute, + LeaseRefreshInterval: &second, + }, + Backup: toml.DatabaseBackup{ + Dir: ptr("test/backup/dir"), + Frequency: &hour, + Mode: &legacy.DatabaseBackupModeFull, + OnVersionUpgrade: ptr(true), + }, + } + full.TelemetryIngress = toml.TelemetryIngress{ + UniConn: ptr(true), + Logging: ptr(true), + BufferSize: ptr[uint16](1234), + MaxBatchSize: ptr[uint16](4321), + SendInterval: commonconfig.MustNewDuration(time.Minute), + SendTimeout: commonconfig.MustNewDuration(5 * time.Second), + UseBatchSend: ptr(true), + Endpoints: []toml.TelemetryIngressEndpoint{{ + Network: ptr("EVM"), + ChainID: ptr("1"), + ServerPubKey: ptr("test-pub-key"), + URL: mustURL("prom.test")}, + }, + } + + full.Log = toml.Log{ + Level: ptr(toml.LogLevel(zapcore.DPanicLevel)), + JSONConsole: ptr(true), + UnixTS: ptr(true), + File: toml.LogFile{ + Dir: ptr("log/file/dir"), + MaxSize: ptr[utils.FileSize](100 * utils.GB), + MaxAgeDays: ptr[int64](17), + MaxBackups: ptr[int64](9), + }, + } + full.WebServer = toml.WebServer{ + AuthenticationMethod: ptr("local"), + AllowOrigins: ptr("*"), + BridgeResponseURL: mustURL("https://bridge.response"), + BridgeCacheTTL: commonconfig.MustNewDuration(10 * time.Second), + HTTPWriteTimeout: commonconfig.MustNewDuration(time.Minute), + HTTPPort: ptr[uint16](56), + SecureCookies: ptr(true), + SessionTimeout: commonconfig.MustNewDuration(time.Hour), + SessionReaperExpiration: commonconfig.MustNewDuration(7 * 24 * time.Hour), + HTTPMaxSize: ptr(utils.FileSize(uint64(32770))), + StartTimeout: commonconfig.MustNewDuration(15 * time.Second), + ListenIP: mustIP("192.158.1.37"), + MFA: toml.WebServerMFA{ + RPID: ptr("test-rpid"), + RPOrigin: ptr("test-rp-origin"), + }, + LDAP: toml.WebServerLDAP{ + ServerTLS: ptr(true), + SessionTimeout: commonconfig.MustNewDuration(15 * time.Minute), + QueryTimeout: commonconfig.MustNewDuration(2 * time.Minute), + BaseUserAttr: ptr("uid"), + BaseDN: ptr("dc=custom,dc=example,dc=com"), + UsersDN: ptr("ou=users"), + GroupsDN: ptr("ou=groups"), + ActiveAttribute: ptr("organizationalStatus"), + ActiveAttributeAllowedValue: ptr("ACTIVE"), + AdminUserGroupCN: ptr("NodeAdmins"), + EditUserGroupCN: ptr("NodeEditors"), + RunUserGroupCN: ptr("NodeRunners"), + ReadUserGroupCN: ptr("NodeReadOnly"), + UserApiTokenEnabled: ptr(false), + UserAPITokenDuration: commonconfig.MustNewDuration(240 * time.Hour), + UpstreamSyncInterval: commonconfig.MustNewDuration(0 * time.Second), + UpstreamSyncRateLimit: commonconfig.MustNewDuration(2 * time.Minute), + }, + RateLimit: toml.WebServerRateLimit{ + Authenticated: ptr[int64](42), + AuthenticatedPeriod: commonconfig.MustNewDuration(time.Second), + Unauthenticated: ptr[int64](7), + UnauthenticatedPeriod: commonconfig.MustNewDuration(time.Minute), + }, + TLS: toml.WebServerTLS{ + CertPath: ptr("tls/cert/path"), + Host: ptr("tls-host"), + KeyPath: ptr("tls/key/path"), + HTTPSPort: ptr[uint16](6789), + ForceRedirect: ptr(true), + ListenIP: mustIP("192.158.1.38"), + }, + } + full.JobPipeline = toml.JobPipeline{ + ExternalInitiatorsEnabled: ptr(true), + MaxRunDuration: commonconfig.MustNewDuration(time.Hour), + MaxSuccessfulRuns: ptr[uint64](123456), + ReaperInterval: commonconfig.MustNewDuration(4 * time.Hour), + ReaperThreshold: commonconfig.MustNewDuration(7 * 24 * time.Hour), + ResultWriteQueueDepth: ptr[uint32](10), + HTTPRequest: toml.JobPipelineHTTPRequest{ + MaxSize: ptr[utils.FileSize](100 * utils.MB), + DefaultTimeout: commonconfig.MustNewDuration(time.Minute), + }, + } + full.FluxMonitor = toml.FluxMonitor{ + DefaultTransactionQueueDepth: ptr[uint32](100), + SimulateTransactions: ptr(true), + } + full.OCR2 = toml.OCR2{ + Enabled: ptr(true), + ContractConfirmations: ptr[uint32](11), + BlockchainTimeout: commonconfig.MustNewDuration(3 * time.Second), + ContractPollInterval: commonconfig.MustNewDuration(time.Hour), + ContractSubscribeInterval: commonconfig.MustNewDuration(time.Minute), + ContractTransmitterTransmitTimeout: commonconfig.MustNewDuration(time.Minute), + DatabaseTimeout: commonconfig.MustNewDuration(8 * time.Second), + KeyBundleID: ptr(models.MustSha256HashFromHex("7a5f66bbe6594259325bf2b4f5b1a9c9")), + CaptureEATelemetry: ptr(false), + CaptureAutomationCustomTelemetry: ptr(true), + DefaultTransactionQueueDepth: ptr[uint32](1), + SimulateTransactions: ptr(false), + TraceLogging: ptr(false), + } + full.OCR = toml.OCR{ + Enabled: ptr(true), + ObservationTimeout: commonconfig.MustNewDuration(11 * time.Second), + BlockchainTimeout: commonconfig.MustNewDuration(3 * time.Second), + ContractPollInterval: commonconfig.MustNewDuration(time.Hour), + ContractSubscribeInterval: commonconfig.MustNewDuration(time.Minute), + DefaultTransactionQueueDepth: ptr[uint32](12), + KeyBundleID: ptr(models.MustSha256HashFromHex("acdd42797a8b921b2910497badc50006")), + SimulateTransactions: ptr(true), + TransmitterAddress: ptr(ethkey.MustEIP55Address("0xa0788FC17B1dEe36f057c42B6F373A34B014687e")), + CaptureEATelemetry: ptr(false), + TraceLogging: ptr(false), + } + full.P2P = toml.P2P{ + IncomingMessageBufferSize: ptr[int64](13), + OutgoingMessageBufferSize: ptr[int64](17), + PeerID: mustPeerID("12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw"), + TraceLogging: ptr(true), + V2: toml.P2PV2{ + Enabled: ptr(false), + AnnounceAddresses: &[]string{"a", "b", "c"}, + DefaultBootstrappers: &[]ocrcommontypes.BootstrapperLocator{ + {PeerID: "12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", Addrs: []string{"foo:42", "bar:10"}}, + {PeerID: "12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", Addrs: []string{"test:99"}}, + }, + DeltaDial: commonconfig.MustNewDuration(time.Minute), + DeltaReconcile: commonconfig.MustNewDuration(time.Second), + ListenAddresses: &[]string{"foo", "bar"}, + }, + } + full.Keeper = toml.Keeper{ + DefaultTransactionQueueDepth: ptr[uint32](17), + GasPriceBufferPercent: ptr[uint16](12), + GasTipCapBufferPercent: ptr[uint16](43), + BaseFeeBufferPercent: ptr[uint16](89), + MaxGracePeriod: ptr[int64](31), + TurnLookBack: ptr[int64](91), + Registry: toml.KeeperRegistry{ + CheckGasOverhead: ptr[uint32](90), + PerformGasOverhead: ptr[uint32](math.MaxUint32), + SyncInterval: commonconfig.MustNewDuration(time.Hour), + SyncUpkeepQueueSize: ptr[uint32](31), + MaxPerformDataSize: ptr[uint32](5000), + }, + } + full.AutoPprof = toml.AutoPprof{ + Enabled: ptr(true), + ProfileRoot: ptr("prof/root"), + PollInterval: commonconfig.MustNewDuration(time.Minute), + GatherDuration: commonconfig.MustNewDuration(12 * time.Second), + GatherTraceDuration: commonconfig.MustNewDuration(13 * time.Second), + MaxProfileSize: ptr[utils.FileSize](utils.GB), + CPUProfileRate: ptr[int64](7), + MemProfileRate: ptr[int64](9), + BlockProfileRate: ptr[int64](5), + MutexProfileFraction: ptr[int64](2), + MemThreshold: ptr[utils.FileSize](utils.GB), + GoroutineThreshold: ptr[int64](999), + } + full.Pyroscope = toml.Pyroscope{ + ServerAddress: ptr("http://localhost:4040"), + Environment: ptr("tests"), + } + full.Sentry = toml.Sentry{ + Debug: ptr(true), + DSN: ptr("sentry-dsn"), + Environment: ptr("dev"), + Release: ptr("v1.2.3"), + } + full.EVM = []*evmcfg.EVMConfig{ + { + ChainID: ubig.NewI(1), + Enabled: ptr(false), + Chain: evmcfg.Chain{ + AutoCreateKey: ptr(false), + BalanceMonitor: evmcfg.BalanceMonitor{ + Enabled: ptr(true), + }, + BlockBackfillDepth: ptr[uint32](100), + BlockBackfillSkip: ptr(true), + ChainType: ptr("Optimism"), + FinalityDepth: ptr[uint32](42), + FinalityTagEnabled: ptr[bool](false), + FlagsContractAddress: mustAddress("0xae4E781a6218A8031764928E88d457937A954fC3"), + + GasEstimator: evmcfg.GasEstimator{ + Mode: ptr("SuggestedPrice"), + EIP1559DynamicFees: ptr(true), + BumpPercent: ptr[uint16](10), + BumpThreshold: ptr[uint32](6), + BumpTxDepth: ptr[uint32](6), + BumpMin: assets.NewWeiI(100), + FeeCapDefault: assets.NewWeiI(math.MaxInt64), + LimitDefault: ptr[uint32](12), + LimitMax: ptr[uint32](17), + LimitMultiplier: mustDecimal("1.234"), + LimitTransfer: ptr[uint32](100), + TipCapDefault: assets.NewWeiI(2), + TipCapMin: assets.NewWeiI(1), + PriceDefault: assets.NewWeiI(math.MaxInt64), + PriceMax: assets.NewWei(mustHexToBig(t, "FFFFFFFFFFFF")), + PriceMin: assets.NewWeiI(13), + + LimitJobType: evmcfg.GasLimitJobType{ + OCR: ptr[uint32](1001), + DR: ptr[uint32](1002), + VRF: ptr[uint32](1003), + FM: ptr[uint32](1004), + Keeper: ptr[uint32](1005), + OCR2: ptr[uint32](1006), + }, + + BlockHistory: evmcfg.BlockHistoryEstimator{ + BatchSize: ptr[uint32](17), + BlockHistorySize: ptr[uint16](12), + CheckInclusionBlocks: ptr[uint16](18), + CheckInclusionPercentile: ptr[uint16](19), + EIP1559FeeCapBufferBlocks: ptr[uint16](13), + TransactionPercentile: ptr[uint16](15), + }, + }, + + KeySpecific: []evmcfg.KeySpecific{ + { + Key: mustAddress("0x2a3e23c6f242F5345320814aC8a1b4E58707D292"), + GasEstimator: evmcfg.KeySpecificGasEstimator{ + PriceMax: assets.NewWei(mustHexToBig(t, "FFFFFFFFFFFFFFFFFFFFFFFF")), + }, + }, + }, + + LinkContractAddress: mustAddress("0x538aAaB4ea120b2bC2fe5D296852D948F07D849e"), + LogBackfillBatchSize: ptr[uint32](17), + LogPollInterval: &minute, + LogKeepBlocksDepth: ptr[uint32](100000), + MinContractPayment: commonassets.NewLinkFromJuels(math.MaxInt64), + MinIncomingConfirmations: ptr[uint32](13), + NonceAutoSync: ptr(true), + NoNewHeadsThreshold: &minute, + OperatorFactoryAddress: mustAddress("0xa5B85635Be42F21f94F28034B7DA440EeFF0F418"), + RPCDefaultBatchSize: ptr[uint32](17), + RPCBlockQueryDelay: ptr[uint16](10), + + Transactions: evmcfg.Transactions{ + MaxInFlight: ptr[uint32](19), + MaxQueued: ptr[uint32](99), + ReaperInterval: &minute, + ReaperThreshold: &minute, + ResendAfterThreshold: &hour, + ForwardersEnabled: ptr(true), + }, + + HeadTracker: evmcfg.HeadTracker{ + HistoryDepth: ptr[uint32](15), + MaxBufferSize: ptr[uint32](17), + SamplingInterval: &hour, + }, + + NodePool: evmcfg.NodePool{ + PollFailureThreshold: ptr[uint32](5), + PollInterval: &minute, + SelectionMode: &selectionMode, + SyncThreshold: ptr[uint32](13), + LeaseDuration: &zeroSeconds, + }, + OCR: evmcfg.OCR{ + ContractConfirmations: ptr[uint16](11), + ContractTransmitterTransmitTimeout: &minute, + DatabaseTimeout: &second, + DeltaCOverride: commonconfig.MustNewDuration(time.Hour), + DeltaCJitterOverride: commonconfig.MustNewDuration(time.Second), + ObservationGracePeriod: &second, + }, + OCR2: evmcfg.OCR2{ + Automation: evmcfg.Automation{ + GasLimit: ptr[uint32](540), + }, + }, + }, + Nodes: []*evmcfg.Node{ + { + Name: ptr("foo"), + HTTPURL: mustURL("https://foo.web"), + WSURL: mustURL("wss://web.socket/test/foo"), + }, + { + Name: ptr("bar"), + HTTPURL: mustURL("https://bar.com"), + WSURL: mustURL("wss://web.socket/test/bar"), + }, + { + Name: ptr("broadcast"), + HTTPURL: mustURL("http://broadcast.mirror"), + SendOnly: ptr(true), + }, + }}, + } + full.Solana = []*solana.TOMLConfig{ + { + ChainID: ptr("mainnet"), + Enabled: ptr(false), + Chain: solcfg.Chain{ + BalancePollPeriod: commoncfg.MustNewDuration(time.Minute), + ConfirmPollPeriod: commoncfg.MustNewDuration(time.Second), + OCR2CachePollPeriod: commoncfg.MustNewDuration(time.Minute), + OCR2CacheTTL: commoncfg.MustNewDuration(time.Hour), + TxTimeout: commoncfg.MustNewDuration(time.Hour), + TxRetryTimeout: commoncfg.MustNewDuration(time.Minute), + TxConfirmTimeout: commoncfg.MustNewDuration(time.Second), + SkipPreflight: ptr(true), + Commitment: ptr("banana"), + MaxRetries: ptr[int64](7), + FeeEstimatorMode: ptr("fixed"), + ComputeUnitPriceMax: ptr[uint64](1000), + ComputeUnitPriceMin: ptr[uint64](10), + ComputeUnitPriceDefault: ptr[uint64](100), + FeeBumpPeriod: commoncfg.MustNewDuration(time.Minute), + }, + Nodes: []*solcfg.Node{ + {Name: ptr("primary"), URL: commoncfg.MustParseURL("http://solana.web")}, + {Name: ptr("foo"), URL: commoncfg.MustParseURL("http://solana.foo")}, + {Name: ptr("bar"), URL: commoncfg.MustParseURL("http://solana.bar")}, + }, + }, + } + full.Starknet = []*stkcfg.TOMLConfig{ + { + ChainID: ptr("foobar"), + Enabled: ptr(true), + Chain: stkcfg.Chain{ + OCR2CachePollPeriod: commoncfg.MustNewDuration(6 * time.Hour), + OCR2CacheTTL: commoncfg.MustNewDuration(3 * time.Minute), + RequestTimeout: commoncfg.MustNewDuration(time.Minute + 3*time.Second), + TxTimeout: commoncfg.MustNewDuration(13 * time.Second), + ConfirmationPoll: commoncfg.MustNewDuration(42 * time.Second), + }, + Nodes: []*stkcfg.Node{ + {Name: ptr("primary"), URL: commoncfg.MustParseURL("http://stark.node")}, + }, + }, + } + full.Cosmos = []*coscfg.TOMLConfig{ + { + ChainID: ptr("Malaga-420"), + Enabled: ptr(true), + Chain: coscfg.Chain{ + Bech32Prefix: ptr("wasm"), + BlockRate: commoncfg.MustNewDuration(time.Minute), + BlocksUntilTxTimeout: ptr[int64](12), + ConfirmPollPeriod: commoncfg.MustNewDuration(time.Second), + FallbackGasPrice: mustDecimal("0.001"), + GasToken: ptr("ucosm"), + GasLimitMultiplier: mustDecimal("1.2"), + MaxMsgsPerBatch: ptr[int64](17), + OCR2CachePollPeriod: commoncfg.MustNewDuration(time.Minute), + OCR2CacheTTL: commoncfg.MustNewDuration(time.Hour), + TxMsgTimeout: commoncfg.MustNewDuration(time.Second), + }, + Nodes: []*coscfg.Node{ + {Name: ptr("primary"), TendermintURL: commoncfg.MustParseURL("http://tender.mint")}, + {Name: ptr("foo"), TendermintURL: commoncfg.MustParseURL("http://foo.url")}, + {Name: ptr("bar"), TendermintURL: commoncfg.MustParseURL("http://bar.web")}, + }, + }, + } + full.Mercury = toml.Mercury{ + Cache: toml.MercuryCache{ + LatestReportTTL: commonconfig.MustNewDuration(100 * time.Second), + MaxStaleAge: commonconfig.MustNewDuration(101 * time.Second), + LatestReportDeadline: commonconfig.MustNewDuration(102 * time.Second), + }, + TLS: toml.MercuryTLS{ + CertFile: ptr("/path/to/cert.pem"), + }, + } + + for _, tt := range []struct { + name string + config Config + exp string + }{ + {"empty", Config{}, ``}, + {"global", global, `InsecureFastScrypt = true +RootDir = 'test/root/dir' +ShutdownGracePeriod = '10s' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = true +CollectorTarget = 'localhost:4317' +NodeID = 'clc-ocr-sol-devnet-node-1' +SamplingRatio = 1.0 +Mode = 'tls' +TLSCertPath = '/path/to/cert.pem' + +[Tracing.Attributes] +env = 'dev' +test = 'load' +`}, + {"AuditLogger", Config{Core: toml.Core{AuditLogger: full.AuditLogger}}, `[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] +`}, + {"Feature", Config{Core: toml.Core{Feature: full.Feature}}, `[Feature] +FeedsManager = true +LogPoller = true +UICSAKeys = true +`}, + {"Database", Config{Core: toml.Core{Database: full.Database}}, `[Database] +DefaultIdleInTxSessionTimeout = '1m0s' +DefaultLockTimeout = '1h0m0s' +DefaultQueryTimeout = '1s' +LogQueries = true +MaxIdleConns = 7 +MaxOpenConns = 13 +MigrateOnStartup = true + +[Database.Backup] +Dir = 'test/backup/dir' +Frequency = '1h0m0s' +Mode = 'full' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '1m0s' +MinReconnectInterval = '5m0s' +FallbackPollInterval = '2m0s' + +[Database.Lock] +Enabled = false +LeaseDuration = '1m0s' +LeaseRefreshInterval = '1s' +`}, + {"TelemetryIngress", Config{Core: toml.Core{TelemetryIngress: full.TelemetryIngress}}, `[TelemetryIngress] +UniConn = true +Logging = true +BufferSize = 1234 +MaxBatchSize = 4321 +SendInterval = '1m0s' +SendTimeout = '5s' +UseBatchSend = true + +[[TelemetryIngress.Endpoints]] +Network = 'EVM' +ChainID = '1' +URL = 'prom.test' +ServerPubKey = 'test-pub-key' +`}, + + {"Log", Config{Core: toml.Core{Log: full.Log}}, `[Log] +Level = 'crit' +JSONConsole = true +UnixTS = true + +[Log.File] +Dir = 'log/file/dir' +MaxSize = '100.00gb' +MaxAgeDays = 17 +MaxBackups = 9 +`}, + {"WebServer", Config{Core: toml.Core{WebServer: full.WebServer}}, `[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = '*' +BridgeResponseURL = 'https://bridge.response' +BridgeCacheTTL = '10s' +HTTPWriteTimeout = '1m0s' +HTTPPort = 56 +SecureCookies = true +SessionTimeout = '1h0m0s' +SessionReaperExpiration = '168h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '192.158.1.37' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = 'dc=custom,dc=example,dc=com' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = 'organizationalStatus' +ActiveAttributeAllowedValue = 'ACTIVE' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = 'test-rpid' +RPOrigin = 'test-rp-origin' + +[WebServer.RateLimit] +Authenticated = 42 +AuthenticatedPeriod = '1s' +Unauthenticated = 7 +UnauthenticatedPeriod = '1m0s' + +[WebServer.TLS] +CertPath = 'tls/cert/path' +ForceRedirect = true +Host = 'tls-host' +HTTPSPort = 6789 +KeyPath = 'tls/key/path' +ListenIP = '192.158.1.38' +`}, + {"FluxMonitor", Config{Core: toml.Core{FluxMonitor: full.FluxMonitor}}, `[FluxMonitor] +DefaultTransactionQueueDepth = 100 +SimulateTransactions = true +`}, + {"JobPipeline", Config{Core: toml.Core{JobPipeline: full.JobPipeline}}, `[JobPipeline] +ExternalInitiatorsEnabled = true +MaxRunDuration = '1h0m0s' +MaxSuccessfulRuns = 123456 +ReaperInterval = '4h0m0s' +ReaperThreshold = '168h0m0s' +ResultWriteQueueDepth = 10 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '1m0s' +MaxSize = '100.00mb' +`}, + {"OCR", Config{Core: toml.Core{OCR: full.OCR}}, `[OCR] +Enabled = true +ObservationTimeout = '11s' +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +DefaultTransactionQueueDepth = 12 +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' +SimulateTransactions = true +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' +CaptureEATelemetry = false +TraceLogging = false +`}, + {"OCR2", Config{Core: toml.Core{OCR2: full.OCR2}}, `[OCR2] +Enabled = true +ContractConfirmations = 11 +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '8s' +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false +`}, + {"P2P", Config{Core: toml.Core{P2P: full.P2P}}, `[P2P] +IncomingMessageBufferSize = 13 +OutgoingMessageBufferSize = 17 +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' +TraceLogging = true + +[P2P.V2] +Enabled = false +AnnounceAddresses = ['a', 'b', 'c'] +DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99'] +DeltaDial = '1m0s' +DeltaReconcile = '1s' +ListenAddresses = ['foo', 'bar'] +`}, + {"Keeper", Config{Core: toml.Core{Keeper: full.Keeper}}, `[Keeper] +DefaultTransactionQueueDepth = 17 +GasPriceBufferPercent = 12 +GasTipCapBufferPercent = 43 +BaseFeeBufferPercent = 89 +MaxGracePeriod = 31 +TurnLookBack = 91 + +[Keeper.Registry] +CheckGasOverhead = 90 +PerformGasOverhead = 4294967295 +MaxPerformDataSize = 5000 +SyncInterval = '1h0m0s' +SyncUpkeepQueueSize = 31 +`}, + {"AutoPprof", Config{Core: toml.Core{AutoPprof: full.AutoPprof}}, `[AutoPprof] +Enabled = true +ProfileRoot = 'prof/root' +PollInterval = '1m0s' +GatherDuration = '12s' +GatherTraceDuration = '13s' +MaxProfileSize = '1.00gb' +CPUProfileRate = 7 +MemProfileRate = 9 +BlockProfileRate = 5 +MutexProfileFraction = 2 +MemThreshold = '1.00gb' +GoroutineThreshold = 999 +`}, + {"Pyroscope", Config{Core: toml.Core{Pyroscope: full.Pyroscope}}, `[Pyroscope] +ServerAddress = 'http://localhost:4040' +Environment = 'tests' +`}, + {"Sentry", Config{Core: toml.Core{Sentry: full.Sentry}}, `[Sentry] +Debug = true +DSN = 'sentry-dsn' +Environment = 'dev' +Release = 'v1.2.3' +`}, + {"EVM", Config{EVM: full.EVM}, `[[EVM]] +ChainID = '1' +Enabled = false +AutoCreateKey = false +BlockBackfillDepth = 100 +BlockBackfillSkip = true +ChainType = 'Optimism' +FinalityDepth = 42 +FinalityTagEnabled = false +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' +LogBackfillBatchSize = 17 +LogPollInterval = '1m0s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 13 +MinContractPayment = '9.223372036854775807 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' +RPCDefaultBatchSize = 17 +RPCBlockQueryDelay = 10 + +[EVM.Transactions] +ForwardersEnabled = true +MaxInFlight = 19 +MaxQueued = 99 +ReaperInterval = '1m0s' +ReaperThreshold = '1m0s' +ResendAfterThreshold = '1h0m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '9.223372036854775807 ether' +PriceMax = '281.474976710655 micro' +PriceMin = '13 wei' +LimitDefault = 12 +LimitMax = 17 +LimitMultiplier = '1.234' +LimitTransfer = 100 +BumpMin = '100 wei' +BumpPercent = 10 +BumpThreshold = 6 +BumpTxDepth = 6 +EIP1559DynamicFees = true +FeeCapDefault = '9.223372036854775807 ether' +TipCapDefault = '2 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.LimitJobType] +OCR = 1001 +OCR2 = 1006 +DR = 1002 +VRF = 1003 +FM = 1004 +Keeper = 1005 + +[EVM.GasEstimator.BlockHistory] +BatchSize = 17 +BlockHistorySize = 12 +CheckInclusionBlocks = 18 +CheckInclusionPercentile = 19 +EIP1559FeeCapBufferBlocks = 13 +TransactionPercentile = 15 + +[EVM.HeadTracker] +HistoryDepth = 15 +MaxBufferSize = 17 +SamplingInterval = '1h0m0s' + +[[EVM.KeySpecific]] +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' + +[EVM.KeySpecific.GasEstimator] +PriceMax = '79.228162514264337593543950335 gether' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '1m0s' +SelectionMode = 'HighestHead' +SyncThreshold = 13 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 11 +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '1s' +DeltaCOverride = '1h0m0s' +DeltaCJitterOverride = '1s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 540 + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' +HTTPURL = 'https://foo.web' + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' +HTTPURL = 'https://bar.com' + +[[EVM.Nodes]] +Name = 'broadcast' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true +`}, + {"Cosmos", Config{Cosmos: full.Cosmos}, `[[Cosmos]] +ChainID = 'Malaga-420' +Enabled = true +Bech32Prefix = 'wasm' +BlockRate = '1m0s' +BlocksUntilTxTimeout = 12 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.001' +GasToken = 'ucosm' +GasLimitMultiplier = '1.2' +MaxMsgsPerBatch = 17 +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxMsgTimeout = '1s' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[Cosmos.Nodes]] +Name = 'foo' +TendermintURL = 'http://foo.url' + +[[Cosmos.Nodes]] +Name = 'bar' +TendermintURL = 'http://bar.web' +`}, + {"Solana", Config{Solana: full.Solana}, `[[Solana]] +ChainID = 'mainnet' +Enabled = false +BalancePollPeriod = '1m0s' +ConfirmPollPeriod = '1s' +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxTimeout = '1h0m0s' +TxRetryTimeout = '1m0s' +TxConfirmTimeout = '1s' +SkipPreflight = true +Commitment = 'banana' +MaxRetries = 7 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 10 +ComputeUnitPriceDefault = 100 +FeeBumpPeriod = '1m0s' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Solana.Nodes]] +Name = 'foo' +URL = 'http://solana.foo' + +[[Solana.Nodes]] +Name = 'bar' +URL = 'http://solana.bar' +`}, + {"Starknet", Config{Starknet: full.Starknet}, `[[Starknet]] +ChainID = 'foobar' +Enabled = true +OCR2CachePollPeriod = '6h0m0s' +OCR2CacheTTL = '3m0s' +RequestTimeout = '1m3s' +TxTimeout = '13s' +ConfirmationPoll = '42s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' +`}, + {"Mercury", Config{Core: toml.Core{Mercury: full.Mercury}}, `[Mercury] +[Mercury.Cache] +LatestReportTTL = '1m40s' +MaxStaleAge = '1m41s' +LatestReportDeadline = '1m42s' + +[Mercury.TLS] +CertFile = '/path/to/cert.pem' +`}, + {"full", full, fullTOML}, + {"multi-chain", multiChain, multiChainTOML}, + } { + t.Run(tt.name, func(t *testing.T) { + s, err := tt.config.TOMLString() + require.NoError(t, err) + assert.Equal(t, tt.exp, s, diff.Diff(tt.exp, s)) + + var got Config + + require.NoError(t, config.DecodeTOML(strings.NewReader(s), &got)) + ts, err := got.TOMLString() + require.NoError(t, err) + assert.Equal(t, tt.config, got, diff.Diff(s, ts)) + }) + } +} + +func TestConfig_full(t *testing.T) { + var got Config + require.NoError(t, config.DecodeTOML(strings.NewReader(fullTOML), &got)) + // Except for some EVM node fields. + for c := range got.EVM { + addr, err := ethkey.NewEIP55Address("0x2a3e23c6f242F5345320814aC8a1b4E58707D292") + require.NoError(t, err) + if got.EVM[c].ChainWriter.FromAddress == nil { + got.EVM[c].ChainWriter.FromAddress = &addr + } + if got.EVM[c].ChainWriter.ForwarderAddress == nil { + got.EVM[c].ChainWriter.ForwarderAddress = &addr + } + for n := range got.EVM[c].Nodes { + if got.EVM[c].Nodes[n].WSURL == nil { + got.EVM[c].Nodes[n].WSURL = new(commonconfig.URL) + } + if got.EVM[c].Nodes[n].SendOnly == nil { + got.EVM[c].Nodes[n].SendOnly = ptr(true) + } + if got.EVM[c].Nodes[n].Order == nil { + got.EVM[c].Nodes[n].Order = ptr(int32(100)) + } + } + } + + cfgtest.AssertFieldsNotNil(t, got) +} + +//go:embed testdata/config-invalid.toml +var invalidTOML string + +func TestConfig_Validate(t *testing.T) { + for _, tt := range []struct { + name string + toml string + exp string + }{ + {name: "invalid", toml: invalidTOML, exp: `invalid configuration: 6 errors: + - Database.Lock.LeaseRefreshInterval: invalid value (6s): must be less than or equal to half of LeaseDuration (10s) + - WebServer: 8 errors: + - LDAP.BaseDN: invalid value (): LDAP BaseDN can not be empty + - LDAP.BaseUserAttr: invalid value (): LDAP BaseUserAttr can not be empty + - LDAP.UsersDN: invalid value (): LDAP UsersDN can not be empty + - LDAP.GroupsDN: invalid value (): LDAP GroupsDN can not be empty + - LDAP.AdminUserGroupCN: invalid value (): LDAP AdminUserGroupCN can not be empty + - LDAP.RunUserGroupCN: invalid value (): LDAP ReadUserGroupCN can not be empty + - LDAP.RunUserGroupCN: invalid value (): LDAP RunUserGroupCN can not be empty + - LDAP.ReadUserGroupCN: invalid value (): LDAP ReadUserGroupCN can not be empty + - EVM: 8 errors: + - 1.ChainID: invalid value (1): duplicate - must be unique + - 0.Nodes.1.Name: invalid value (foo): duplicate - must be unique + - 3.Nodes.4.WSURL: invalid value (ws://dupe.com): duplicate - must be unique + - 0: 3 errors: + - GasEstimator.BumpTxDepth: invalid value (11): must be less than or equal to Transactions.MaxInFlight + - GasEstimator: 6 errors: + - BumpPercent: invalid value (1): may not be less than Geth's default of 10 + - TipCapDefault: invalid value (3 wei): must be greater than or equal to TipCapMinimum + - FeeCapDefault: invalid value (3 wei): must be greater than or equal to TipCapDefault + - PriceMin: invalid value (10 gwei): must be less than or equal to PriceDefault + - PriceMax: invalid value (10 gwei): must be greater than or equal to PriceDefault + - BlockHistory.BlockHistorySize: invalid value (0): must be greater than or equal to 1 with BlockHistory Mode + - Nodes: 2 errors: + - 0: 2 errors: + - WSURL: missing: required for primary nodes + - HTTPURL: missing: required for all nodes + - 1.HTTPURL: missing: required for all nodes + - 1: 6 errors: + - ChainType: invalid value (Foo): must not be set with this chain id + - Nodes: missing: must have at least one node + - ChainType: invalid value (Foo): must be one of arbitrum, metis, xdai, optimismBedrock, celo, kroma, wemix, zksync, scroll or omitted + - HeadTracker.HistoryDepth: invalid value (30): must be equal to or greater than FinalityDepth + - GasEstimator: 2 errors: + - FeeCapDefault: invalid value (101 wei): must be equal to PriceMax (99 wei) since you are using FixedPrice estimation with gas bumping disabled in EIP1559 mode - PriceMax will be used as the FeeCap for transactions instead of FeeCapDefault + - PriceMax: invalid value (1 gwei): must be greater than or equal to PriceDefault + - KeySpecific.Key: invalid value (0xde709f2102306220921060314715629080e2fb77): duplicate - must be unique + - 2: 5 errors: + - ChainType: invalid value (Arbitrum): only "optimismBedrock" can be used with this chain id + - Nodes: missing: must have at least one node + - ChainType: invalid value (Arbitrum): must be one of arbitrum, metis, xdai, optimismBedrock, celo, kroma, wemix, zksync, scroll or omitted + - FinalityDepth: invalid value (0): must be greater than or equal to 1 + - MinIncomingConfirmations: invalid value (0): must be greater than or equal to 1 + - 3.Nodes: 5 errors: + - 0: 3 errors: + - Name: missing: required for all nodes + - WSURL: missing: required for primary nodes + - HTTPURL: empty: required for all nodes + - 1: 3 errors: + - Name: missing: required for all nodes + - WSURL: invalid value (http): must be ws or wss + - HTTPURL: missing: required for all nodes + - 2: 3 errors: + - Name: empty: required for all nodes + - WSURL: missing: required for primary nodes + - HTTPURL: invalid value (ws): must be http or https + - 3.HTTPURL: missing: required for all nodes + - 4.HTTPURL: missing: required for all nodes + - 4: 2 errors: + - ChainID: missing: required for all chains + - Nodes: missing: must have at least one node + - Cosmos: 5 errors: + - 1.ChainID: invalid value (Malaga-420): duplicate - must be unique + - 0.Nodes.1.Name: invalid value (test): duplicate - must be unique + - 0.Nodes: 2 errors: + - 0.TendermintURL: missing: required for all nodes + - 1.TendermintURL: missing: required for all nodes + - 1.Nodes: missing: must have at least one node + - 2: 2 errors: + - ChainID: missing: required for all chains + - Nodes: missing: must have at least one node + - Solana: 5 errors: + - 1.ChainID: invalid value (mainnet): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (bar): duplicate - must be unique + - 0.Nodes: missing: must have at least one node + - 1.Nodes: 2 errors: + - 0.URL: missing: required for all nodes + - 1.URL: missing: required for all nodes + - 2: 2 errors: + - ChainID: missing: required for all chains + - Nodes: missing: must have at least one node + - Starknet: 3 errors: + - 0.Nodes.1.Name: invalid value (primary): duplicate - must be unique + - 0.ChainID: missing: required for all chains + - 1: 2 errors: + - ChainID: missing: required for all chains + - Nodes: missing: must have at least one node`}, + } { + t.Run(tt.name, func(t *testing.T) { + var c Config + require.NoError(t, config.DecodeTOML(strings.NewReader(tt.toml), &c)) + c.setDefaults() + assertValidationError(t, &c, tt.exp) + }) + } +} + +func mustURL(s string) *commonconfig.URL { + var u commonconfig.URL + if err := u.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return &u +} + +func mustIP(s string) *net.IP { + var ip net.IP + if err := ip.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return &ip +} + +var ( + //go:embed testdata/secrets-empty-effective.toml + emptyEffectiveSecretsTOML string + //go:embed testdata/config-empty-effective.toml + emptyEffectiveTOML string + //go:embed testdata/config-multi-chain-effective.toml + multiChainEffectiveTOML string + + //go:embed testdata/secrets-full.toml + secretsFullTOML string + //go:embed testdata/secrets-full-redacted.toml + secretsFullRedactedTOML string + + //go:embed testdata/secrets-multi.toml + secretsMultiTOML string + //go:embed testdata/secrets-multi-redacted.toml + secretsMultiRedactedTOML string +) + +func Test_generalConfig_LogConfiguration(t *testing.T) { + const ( + secrets = "# Secrets:\n" + input = "# Input Configuration:\n" + effective = "# Effective Configuration, with defaults applied:\n" + warning = "# Configuration warning:\n" + + deprecated = `` // none + ) + tests := []struct { + name string + inputConfig string + inputSecrets string + + wantConfig string + wantEffective string + wantSecrets string + wantWarning string + }{ + {name: "empty", wantEffective: emptyEffectiveTOML, wantSecrets: emptyEffectiveSecretsTOML}, + {name: "full", inputSecrets: secretsFullTOML, inputConfig: fullTOML, + wantConfig: fullTOML, wantEffective: fullTOML, wantSecrets: secretsFullRedactedTOML, wantWarning: deprecated}, + {name: "multi-chain", inputSecrets: secretsMultiTOML, inputConfig: multiChainTOML, + wantConfig: multiChainTOML, wantEffective: multiChainEffectiveTOML, wantSecrets: secretsMultiRedactedTOML}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lggr, observed := logger.TestLoggerObserved(t, zapcore.InfoLevel) + opts := GeneralConfigOpts{ + SkipEnv: true, + ConfigStrings: []string{tt.inputConfig}, + SecretsStrings: []string{tt.inputSecrets}, + } + c, err := opts.New() + require.NoError(t, err) + c.LogConfiguration(lggr.Infof, lggr.Warnf) + + inputLogs := observed.FilterMessageSnippet(secrets).All() + if assert.Len(t, inputLogs, 1) { + assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level) + got := strings.TrimPrefix(inputLogs[0].Message, secrets) + got = strings.TrimSuffix(got, "\n") + assert.Equal(t, tt.wantSecrets, got) + } + + inputLogs = observed.FilterMessageSnippet(input).All() + if assert.Len(t, inputLogs, 1) { + assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level) + got := strings.TrimPrefix(inputLogs[0].Message, input) + got = strings.TrimSuffix(got, "\n") + assert.Equal(t, tt.wantConfig, got) + } + + inputLogs = observed.FilterMessageSnippet(effective).All() + if assert.Len(t, inputLogs, 1) { + assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level) + got := strings.TrimPrefix(inputLogs[0].Message, effective) + got = strings.TrimSuffix(got, "\n") + assert.Equal(t, tt.wantEffective, got) + } + + inputLogs = observed.FilterMessageSnippet(warning).All() + if tt.wantWarning != "" && assert.Len(t, inputLogs, 1) { + assert.Equal(t, zapcore.WarnLevel, inputLogs[0].Level) + got := strings.TrimPrefix(inputLogs[0].Message, warning) + got = strings.TrimSuffix(got, "\n") + assert.Equal(t, tt.wantWarning, got) + } + }) + } +} + +func TestNewGeneralConfig_ParsingError_InvalidSyntax(t *testing.T) { + invalidTOML := "{ bad syntax {" + opts := GeneralConfigOpts{ + ConfigStrings: []string{invalidTOML}, + SecretsStrings: []string{secretsFullTOML}, + } + _, err := opts.New() + assert.EqualError(t, err, "failed to decode config TOML: toml: invalid character at start of key: {") +} + +func TestNewGeneralConfig_ParsingError_DuplicateField(t *testing.T) { + invalidTOML := `Dev = false +Dev = true` + opts := GeneralConfigOpts{ + ConfigStrings: []string{invalidTOML}, + SecretsStrings: []string{secretsFullTOML}, + } + _, err := opts.New() + assert.EqualError(t, err, "failed to decode config TOML: toml: key Dev is already defined") +} + +func TestNewGeneralConfig_SecretsOverrides(t *testing.T) { + // Provide a keystore password file and an env var with DB URL + const PWD_OVERRIDE = "great_password" + const DBURL_OVERRIDE = "http://user@db" + + t.Setenv("CL_DATABASE_URL", DBURL_OVERRIDE) + + // Check for two overrides + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + SecretsStrings: []string{secretsFullTOML}, + } + c, err := opts.New() + assert.NoError(t, err) + c.SetPasswords(ptr(PWD_OVERRIDE), nil) + assert.Equal(t, PWD_OVERRIDE, c.Password().Keystore()) + dbURL := c.Database().URL() + assert.Equal(t, DBURL_OVERRIDE, (&dbURL).String()) +} + +func TestSecrets_Validate(t *testing.T) { + for _, tt := range []struct { + name string + toml string + exp string + }{ + {name: "partial", + toml: ` +Database.AllowSimplePasswords = true`, + exp: `invalid secrets: 2 errors: + - Database.URL: empty: must be provided and non-empty + - Password.Keystore: empty: must be provided and non-empty`}, + + {name: "invalid-urls", + toml: `[Database] +URL = "postgresql://user:passlocalhost:5432/asdf" +BackupURL = "foo-bar?password=asdf" +AllowSimplePasswords = false`, + exp: `invalid secrets: 2 errors: + - Database: 2 errors: + - URL: invalid value (*****): missing or insufficiently complex password: DB URL must be authenticated; plaintext URLs are not allowed. Database should be secured by a password matching the following complexity requirements: + Must have a length of 16-50 characters + Must not comprise: + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) + + - BackupURL: invalid value (*****): missing or insufficiently complex password: + Expected password complexity: + Must be at least 16 characters long + Must not comprise: + Leading or trailing whitespace + A user's API email + + Faults: + password is less than 16 characters long + . Database should be secured by a password matching the following complexity requirements: + Must have a length of 16-50 characters + Must not comprise: + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) + + - Password.Keystore: empty: must be provided and non-empty`}, + + {name: "invalid-urls-allowed", + toml: `[Database] +URL = "postgresql://user:passlocalhost:5432/asdf" +BackupURL = "foo-bar?password=asdf" +AllowSimplePasswords = true`, + exp: `invalid secrets: Password.Keystore: empty: must be provided and non-empty`}, + } { + t.Run(tt.name, func(t *testing.T) { + var s Secrets + require.NoError(t, config.DecodeTOML(strings.NewReader(tt.toml), &s)) + assertValidationError(t, &s, tt.exp) + }) + } +} + +func assertValidationError(t *testing.T, invalid interface{ Validate() error }, expMsg string) { + t.Helper() + if err := invalid.Validate(); assert.Error(t, err) { + got := err.Error() + assert.Equal(t, expMsg, got, diff.Diff(expMsg, got)) + } +} + +func TestConfig_setDefaults(t *testing.T) { + var c Config + c.EVM = evmcfg.EVMConfigs{{ChainID: ubig.NewI(99999133712345)}} + c.Cosmos = coscfg.TOMLConfigs{{ChainID: ptr("unknown cosmos chain")}} + c.Solana = solana.TOMLConfigs{{ChainID: ptr("unknown solana chain")}} + c.Starknet = stkcfg.TOMLConfigs{{ChainID: ptr("unknown starknet chain")}} + c.setDefaults() + if s, err := c.TOMLString(); assert.NoError(t, err) { + t.Log(s, err) + } + cfgtest.AssertFieldsNotNil(t, c.Core) +} + +func Test_validateEnv(t *testing.T) { + t.Setenv("LOG_LEVEL", "warn") + t.Setenv("DATABASE_URL", "foo") + assert.ErrorContains(t, validateEnv(), `invalid environment: 2 errors: + - environment variable DATABASE_URL must not be set: unsupported with config v2 + - environment variable LOG_LEVEL must not be set: unsupported with config v2`) + + t.Setenv("GAS_UPDATER_ENABLED", "true") + t.Setenv("ETH_GAS_BUMP_TX_DEPTH", "7") + assert.ErrorContains(t, validateEnv(), `invalid environment: 4 errors: + - environment variable DATABASE_URL must not be set: unsupported with config v2 + - environment variable LOG_LEVEL must not be set: unsupported with config v2 + - environment variable ETH_GAS_BUMP_TX_DEPTH must not be set: unsupported with config v2 + - environment variable GAS_UPDATER_ENABLED must not be set: unsupported with config v2`) +} + +func TestConfig_SetFrom(t *testing.T) { + t.Parallel() + for _, tt := range []struct { + name string + exp string + from []string + }{ + {"empty", "", []string{""}}, + {"empty-full", fullTOML, []string{"", fullTOML}}, + {"empty-multi", multiChainTOML, []string{"", multiChainTOML}}, + {"full-empty", fullTOML, []string{fullTOML, ""}}, + {"multi-empty", multiChainTOML, []string{multiChainTOML, ""}}, + } { + t.Run(tt.name, func(t *testing.T) { + var c Config + for _, fs := range tt.from { + var f Config + require.NoError(t, config.DecodeTOML(strings.NewReader(fs), &f)) + require.NoError(t, c.SetFrom(&f)) + } + ts, err := c.TOMLString() + require.NoError(t, err) + assert.Equal(t, tt.exp, ts) + }) + } +} + +func TestConfig_warnings(t *testing.T) { + tests := []struct { + name string + config Config + expectedErrors []string + }{ + { + name: "No warnings", + config: Config{}, + expectedErrors: nil, + }, + { + name: "Value warning - unencrypted mode with TLS path set", + config: Config{ + Core: toml.Core{ + Tracing: toml.Tracing{ + Enabled: ptr(true), + Mode: ptr("unencrypted"), + TLSCertPath: ptr("/path/to/cert.pem"), + }, + }, + }, + expectedErrors: []string{"Tracing.TLSCertPath: invalid value (/path/to/cert.pem): must be empty when Tracing.Mode is 'unencrypted'"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.warnings() + if len(tt.expectedErrors) == 0 { + assert.NoError(t, err) + } else { + for _, expectedErr := range tt.expectedErrors { + assert.Contains(t, err.Error(), expectedErr) + } + } + }) + } +} + +func ptr[T any](t T) *T { return &t } + +func mustHexToBig(t *testing.T, hx string) *big.Int { + n, err := hex.ParseBig(hx) + require.NoError(t, err) + return n +} diff --git a/core/services/chainlink/config_threshold.go b/core/services/chainlink/config_threshold.go new file mode 100644 index 00000000..c37d040e --- /dev/null +++ b/core/services/chainlink/config_threshold.go @@ -0,0 +1,14 @@ +package plugin + +import "github.com/goplugin/pluginv3.0/v2/core/config/toml" + +type thresholdConfig struct { + s toml.ThresholdKeyShareSecrets +} + +func (t *thresholdConfig) ThresholdKeyShare() string { + if t.s.ThresholdKeyShare == nil { + return "" + } + return string(*t.s.ThresholdKeyShare) +} diff --git a/core/services/chainlink/config_threshold_test.go b/core/services/chainlink/config_threshold_test.go new file mode 100644 index 00000000..d0841375 --- /dev/null +++ b/core/services/chainlink/config_threshold_test.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + thresholdSecrets = ` +[Threshold] +ThresholdKeyShare = "something" +` +) + +func TestThresholdConfig(t *testing.T) { + opts := GeneralConfigOpts{ + SecretsStrings: []string{thresholdSecrets}, + } + cfg, err := opts.New() + require.NoError(t, err) + + th := cfg.Threshold() + assert.Equal(t, "something", th.ThresholdKeyShare()) +} diff --git a/core/services/chainlink/config_tracing.go b/core/services/chainlink/config_tracing.go new file mode 100644 index 00000000..776de1bb --- /dev/null +++ b/core/services/chainlink/config_tracing.go @@ -0,0 +1,35 @@ +package plugin + +import "github.com/goplugin/pluginv3.0/v2/core/config/toml" + +type tracingConfig struct { + s toml.Tracing +} + +func (t tracingConfig) Enabled() bool { + return *t.s.Enabled +} + +func (t tracingConfig) CollectorTarget() string { + return *t.s.CollectorTarget +} + +func (t tracingConfig) NodeID() string { + return *t.s.NodeID +} + +func (t tracingConfig) SamplingRatio() float64 { + return *t.s.SamplingRatio +} + +func (t tracingConfig) Mode() string { + return *t.s.Mode +} + +func (t tracingConfig) TLSCertPath() string { + return *t.s.TLSCertPath +} + +func (t tracingConfig) Attributes() map[string]string { + return t.s.Attributes +} diff --git a/core/services/chainlink/config_tracing_test.go b/core/services/chainlink/config_tracing_test.go new file mode 100644 index 00000000..4e1d49ba --- /dev/null +++ b/core/services/chainlink/config_tracing_test.go @@ -0,0 +1,50 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +func TestTracing_Config(t *testing.T) { + // Test when all fields are non-nil + enabled := true + collectorTarget := "http://localhost:9000" + nodeID := "Node1" + samplingRatio := 0.5 + mode := "tls" + tlsCertPath := "/path/to/cert.pem" + attributes := map[string]string{"key": "value"} + tracing := toml.Tracing{ + Enabled: &enabled, + CollectorTarget: &collectorTarget, + NodeID: &nodeID, + SamplingRatio: &samplingRatio, + Mode: &mode, + TLSCertPath: &tlsCertPath, + Attributes: attributes, + } + tConfig := tracingConfig{s: tracing} + + assert.True(t, tConfig.Enabled()) + assert.Equal(t, "http://localhost:9000", tConfig.CollectorTarget()) + assert.Equal(t, "Node1", tConfig.NodeID()) + assert.Equal(t, 0.5, tConfig.SamplingRatio()) + assert.Equal(t, "tls", tConfig.Mode()) + assert.Equal(t, "/path/to/cert.pem", tConfig.TLSCertPath()) + assert.Equal(t, map[string]string{"key": "value"}, tConfig.Attributes()) + + // Test when all fields are nil + nilTracing := toml.Tracing{} + nilConfig := tracingConfig{s: nilTracing} + + assert.Panics(t, func() { nilConfig.Enabled() }) + assert.Panics(t, func() { nilConfig.CollectorTarget() }) + assert.Panics(t, func() { nilConfig.NodeID() }) + assert.Panics(t, func() { nilConfig.SamplingRatio() }) + assert.Panics(t, func() { nilConfig.Mode() }) + assert.Panics(t, func() { nilConfig.TLSCertPath() }) + assert.Nil(t, nilConfig.Attributes()) +} diff --git a/core/services/chainlink/config_web_server.go b/core/services/chainlink/config_web_server.go new file mode 100644 index 00000000..aca6a338 --- /dev/null +++ b/core/services/chainlink/config_web_server.go @@ -0,0 +1,315 @@ +package plugin + +import ( + "net" + "net/http" + "net/url" + "path/filepath" + "time" + + "github.com/gin-contrib/sessions" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" +) + +var _ config.WebServer = (*webServerConfig)(nil) + +type tlsConfig struct { + c toml.WebServerTLS + rootDir func() string +} + +func (t *tlsConfig) Dir() string { + return filepath.Join(t.rootDir(), "tls") +} + +func (t *tlsConfig) Host() string { + return *t.c.Host +} + +func (t *tlsConfig) HTTPSPort() uint16 { + return *t.c.HTTPSPort +} + +func (t *tlsConfig) ForceRedirect() bool { + return *t.c.ForceRedirect +} + +func (t *tlsConfig) certPath() string { + return *t.c.CertPath +} + +func (t *tlsConfig) CertFile() string { + s := t.certPath() + if s == "" { + s = filepath.Join(t.Dir(), "server.crt") + } + return s +} + +func (t *tlsConfig) keyPath() string { + return *t.c.KeyPath +} + +func (t *tlsConfig) KeyFile() string { + if t.keyPath() == "" { + return filepath.Join(t.Dir(), "server.key") + } + return t.keyPath() +} + +func (t *tlsConfig) ListenIP() net.IP { + return *t.c.ListenIP +} + +type rateLimitConfig struct { + c toml.WebServerRateLimit +} + +func (r *rateLimitConfig) Authenticated() int64 { + return *r.c.Authenticated +} + +func (r *rateLimitConfig) AuthenticatedPeriod() time.Duration { + return r.c.AuthenticatedPeriod.Duration() +} + +func (r *rateLimitConfig) Unauthenticated() int64 { + return *r.c.Unauthenticated +} + +func (r *rateLimitConfig) UnauthenticatedPeriod() time.Duration { + return r.c.UnauthenticatedPeriod.Duration() +} + +type mfaConfig struct { + c toml.WebServerMFA +} + +func (m *mfaConfig) RPID() string { + return *m.c.RPID +} + +func (m *mfaConfig) RPOrigin() string { + return *m.c.RPOrigin +} + +type webServerConfig struct { + c toml.WebServer + s toml.WebServerSecrets + rootDir func() string +} + +func (w *webServerConfig) TLS() config.TLS { + return &tlsConfig{c: w.c.TLS, rootDir: w.rootDir} +} + +func (w *webServerConfig) RateLimit() config.RateLimit { + return &rateLimitConfig{c: w.c.RateLimit} +} + +func (w *webServerConfig) MFA() config.MFA { + return &mfaConfig{c: w.c.MFA} +} + +func (w *webServerConfig) LDAP() config.LDAP { + return &ldapConfig{c: w.c.LDAP, s: w.s.LDAP} +} + +func (w *webServerConfig) AuthenticationMethod() string { + return *w.c.AuthenticationMethod +} + +func (w *webServerConfig) AllowOrigins() string { + return *w.c.AllowOrigins +} + +func (w *webServerConfig) BridgeResponseURL() *url.URL { + if w.c.BridgeResponseURL.IsZero() { + return nil + } + return w.c.BridgeResponseURL.URL() +} + +func (w *webServerConfig) BridgeCacheTTL() time.Duration { + return w.c.BridgeCacheTTL.Duration() +} + +func (w *webServerConfig) HTTPMaxSize() int64 { + return int64(*w.c.HTTPMaxSize) +} + +func (w *webServerConfig) StartTimeout() time.Duration { + return w.c.StartTimeout.Duration() +} + +func (w *webServerConfig) HTTPWriteTimeout() time.Duration { + return w.c.HTTPWriteTimeout.Duration() +} + +func (w *webServerConfig) HTTPPort() uint16 { + return *w.c.HTTPPort +} + +func (w *webServerConfig) SessionReaperExpiration() commonconfig.Duration { + return *w.c.SessionReaperExpiration +} + +func (w *webServerConfig) SecureCookies() bool { + return *w.c.SecureCookies +} + +func (w *webServerConfig) SessionOptions() sessions.Options { + return sessions.Options{ + Secure: w.SecureCookies(), + HttpOnly: true, + MaxAge: 86400 * 30, + SameSite: http.SameSiteStrictMode, + } +} + +func (w *webServerConfig) SessionTimeout() commonconfig.Duration { + return *commonconfig.MustNewDuration(w.c.SessionTimeout.Duration()) +} + +func (w *webServerConfig) ListenIP() net.IP { + return *w.c.ListenIP +} + +type ldapConfig struct { + c toml.WebServerLDAP + s toml.WebServerLDAPSecrets +} + +func (l *ldapConfig) ServerAddress() string { + if l.s.ServerAddress == nil { + return "" + } + return l.s.ServerAddress.URL().String() +} + +func (l *ldapConfig) ReadOnlyUserLogin() string { + if l.s.ReadOnlyUserLogin == nil { + return "" + } + return string(*l.s.ReadOnlyUserLogin) +} + +func (l *ldapConfig) ReadOnlyUserPass() string { + if l.s.ReadOnlyUserPass == nil { + return "" + } + return string(*l.s.ReadOnlyUserPass) +} + +func (l *ldapConfig) ServerTLS() bool { + if l.c.ServerTLS == nil { + return false + } + return *l.c.ServerTLS +} + +func (l *ldapConfig) SessionTimeout() commonconfig.Duration { + return *l.c.SessionTimeout +} + +func (l *ldapConfig) QueryTimeout() time.Duration { + return l.c.QueryTimeout.Duration() +} + +func (l *ldapConfig) UserAPITokenDuration() commonconfig.Duration { + return *l.c.UserAPITokenDuration +} + +func (l *ldapConfig) BaseUserAttr() string { + if l.c.BaseUserAttr == nil { + return "" + } + return *l.c.BaseUserAttr +} + +func (l *ldapConfig) BaseDN() string { + if l.c.BaseDN == nil { + return "" + } + return *l.c.BaseDN +} + +func (l *ldapConfig) UsersDN() string { + if l.c.UsersDN == nil { + return "" + } + return *l.c.UsersDN +} + +func (l *ldapConfig) GroupsDN() string { + if l.c.GroupsDN == nil { + return "" + } + return *l.c.GroupsDN +} + +func (l *ldapConfig) ActiveAttribute() string { + if l.c.ActiveAttribute == nil { + return "" + } + return *l.c.ActiveAttribute +} + +func (l *ldapConfig) ActiveAttributeAllowedValue() string { + if l.c.ActiveAttributeAllowedValue == nil { + return "" + } + return *l.c.ActiveAttributeAllowedValue +} + +func (l *ldapConfig) AdminUserGroupCN() string { + if l.c.AdminUserGroupCN == nil { + return "" + } + return *l.c.AdminUserGroupCN +} + +func (l *ldapConfig) EditUserGroupCN() string { + if l.c.EditUserGroupCN == nil { + return "" + } + return *l.c.EditUserGroupCN +} + +func (l *ldapConfig) RunUserGroupCN() string { + if l.c.RunUserGroupCN == nil { + return "" + } + return *l.c.RunUserGroupCN +} + +func (l *ldapConfig) ReadUserGroupCN() string { + if l.c.ReadUserGroupCN == nil { + return "" + } + return *l.c.ReadUserGroupCN +} + +func (l *ldapConfig) UserApiTokenEnabled() bool { + if l.c.UserApiTokenEnabled == nil { + return false + } + return *l.c.UserApiTokenEnabled +} + +func (l *ldapConfig) UpstreamSyncInterval() commonconfig.Duration { + if l.c.UpstreamSyncInterval == nil { + return commonconfig.Duration{} + } + return *l.c.UpstreamSyncInterval +} + +func (l *ldapConfig) UpstreamSyncRateLimit() commonconfig.Duration { + if l.c.UpstreamSyncRateLimit == nil { + return commonconfig.Duration{} + } + return *l.c.UpstreamSyncRateLimit +} diff --git a/core/services/chainlink/config_web_server_test.go b/core/services/chainlink/config_web_server_test.go new file mode 100644 index 00000000..5e9d973c --- /dev/null +++ b/core/services/chainlink/config_web_server_test.go @@ -0,0 +1,49 @@ +package plugin + +import ( + "testing" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWebServerConfig(t *testing.T) { + opts := GeneralConfigOpts{ + ConfigStrings: []string{fullTOML}, + } + cfg, err := opts.New() + require.NoError(t, err) + + ws := cfg.WebServer() + assert.Equal(t, "*", ws.AllowOrigins()) + assert.Equal(t, "https://bridge.response", ws.BridgeResponseURL().String()) + assert.Equal(t, 10*time.Second, ws.BridgeCacheTTL()) + assert.Equal(t, 1*time.Minute, ws.HTTPWriteTimeout()) + assert.Equal(t, uint16(56), ws.HTTPPort()) + assert.True(t, ws.SecureCookies()) + assert.Equal(t, *commonconfig.MustNewDuration(1 * time.Hour), ws.SessionTimeout()) + assert.Equal(t, *commonconfig.MustNewDuration(168 * time.Hour), ws.SessionReaperExpiration()) + assert.Equal(t, int64(32770), ws.HTTPMaxSize()) + assert.Equal(t, 15*time.Second, ws.StartTimeout()) + tls := ws.TLS() + assert.Equal(t, "test/root/dir/tls", tls.Dir()) + assert.Equal(t, "tls/cert/path", tls.(*tlsConfig).certPath()) + assert.True(t, tls.ForceRedirect()) + assert.Equal(t, "tls-host", tls.Host()) + assert.Equal(t, uint16(6789), tls.HTTPSPort()) + assert.Equal(t, "tls/key/path", tls.(*tlsConfig).keyPath()) + + rl := ws.RateLimit() + assert.Equal(t, int64(42), rl.Authenticated()) + assert.Equal(t, 1*time.Second, rl.AuthenticatedPeriod()) + assert.Equal(t, int64(7), rl.Unauthenticated()) + assert.Equal(t, 1*time.Minute, rl.UnauthenticatedPeriod()) + + mf := ws.MFA() + assert.Equal(t, "test-rpid", mf.RPID()) + assert.Equal(t, "test-rp-origin", mf.RPOrigin()) + +} diff --git a/core/services/chainlink/legacy.env b/core/services/chainlink/legacy.env new file mode 100644 index 00000000..f674b817 --- /dev/null +++ b/core/services/chainlink/legacy.env @@ -0,0 +1,272 @@ +DATABASE_URL= + +AUDIT_LOGGER_ENABLED= +AUDIT_LOGGER_FORWARD_TO_URL= +AUDIT_LOGGER_HEADERS= +AUDIT_LOGGER_JSON_WRAPPER_KEY= + +CHAIN_TYPE= +PLUGIN_DEV= +FLAGS_CONTRACT_ADDRESS= +INSECURE_FAST_SCRYPT= +REAPER_EXPIRATION= +ROOT= +TELEMETRY_INGRESS_UNICONN= +TELEMETRY_INGRESS_LOGGING= +TELEMETRY_INGRESS_SERVER_PUB_KEY= +TELEMETRY_INGRESS_URL= +TELEMETRY_INGRESS_BUFFER_SIZE= +TELEMETRY_INGRESS_MAX_BATCH_SIZE= +TELEMETRY_INGRESS_SEND_INTERVAL= +TELEMETRY_INGRESS_SEND_TIMEOUT= +TELEMETRY_INGRESS_USE_BATCH_SEND= +SHUTDOWN_GRACE_PERIOD= + +DATABASE_LISTENER_MAX_RECONNECT_DURATION= +DATABASE_LISTENER_MIN_RECONNECT_INTERVAL= +MIGRATE_DATABASE= +ORM_MAX_IDLE_CONNS= +ORM_MAX_OPEN_CONNS= +TRIGGER_FALLBACK_DB_POLL_INTERVAL= + +ADVISORY_LOCK_CHECK_INTERVAL= +ADVISORY_LOCK_ID= +DATABASE_LOCKING_MODE= +LEASE_LOCK_DURATION= +LEASE_LOCK_REFRESH_INTERVAL= + +DATABASE_BACKUP_DIR= +DATABASE_BACKUP_FREQUENCY= +DATABASE_BACKUP_MODE= +DATABASE_BACKUP_ON_VERSION_UPGRADE= +DATABASE_BACKUP_URL= + +JSON_CONSOLE= +LOG_FILE_DIR= +LOG_LEVEL= +LOG_SQL= +LOG_FILE_MAX_SIZE= +LOG_FILE_MAX_AGE= +LOG_FILE_MAX_BACKUPS= +LOG_UNIX_TS= + +ALLOW_ORIGINS= +AUTHENTICATED_RATE_LIMIT= +AUTHENTICATED_RATE_LIMIT_PERIOD= +BRIDGE_CACHE_TTL= +BRIDGE_RESPONSE_URL= +HTTP_SERVER_WRITE_TIMEOUT= +PLUGIN_PORT= +SECURE_COOKIES= +SESSION_TIMEOUT= +UNAUTHENTICATED_RATE_LIMIT= +UNAUTHENTICATED_RATE_LIMIT_PERIOD= + +MFA_RPID= +MFA_RPORIGIN= + +TLS_CERT_PATH= +PLUGIN_TLS_HOST= +TLS_KEY_PATH= +PLUGIN_TLS_PORT= +PLUGIN_TLS_REDIRECT= + +FEATURE_FEEDS_MANAGER= +FEATURE_UI_CSA_KEYS= + +FEATURE_LOG_POLLER= + +EVM_ENABLED= +EVM_RPC_ENABLED= +SOLANA_ENABLED= +SOLANA_NODES= +STARKNET_ENABLED= +STARKNET_NODES= +TERRA_ENABLED= +TERRA_NODES= + +ETH_HTTP_URL= +EVM_NODES= +ETH_SECONDARY_URL= +ETH_SECONDARY_URLS= +ETH_URL= +ETH_CHAIN_ID= + +BALANCE_MONITOR_ENABLED= +BLOCK_BACKFILL_DEPTH= +BLOCK_BACKFILL_SKIP= +BLOCK_EMISSION_IDLE_WARNING_THRESHOLD= +ETH_TX_REAPER_INTERVAL= +ETH_TX_REAPER_THRESHOLD= +ETH_TX_RESEND_AFTER_THRESHOLD= +ETH_FINALITY_DEPTH= +ETH_HEAD_TRACKER_HISTORY_DEPTH= +ETH_HEAD_TRACKER_MAX_BUFFER_SIZE= +ETH_HEAD_TRACKER_SAMPLING_INTERVAL= +ETH_LOG_BACKFILL_BATCH_SIZE= +ETH_LOG_POLL_INTERVAL= +ETH_LOG_KEEP_BLOCKS_DEPTH= +ETH_RPC_DEFAULT_BATCH_SIZE= +PLI_CONTRACT_ADDRESS= +OPERATOR_FACTORY_ADDRESS= +MIN_INCOMING_CONFIRMATIONS= +MINIMUM_CONTRACT_PAYMENT_PLI_JUELS= + +NODE_NO_NEW_HEADS_THRESHOLD= +NODE_POLL_FAILURE_THRESHOLD= +NODE_POLL_INTERVAL= +NODE_SELECTION_MODE= +NODE_SYNC_THRESHOLD= + +EVM_EIP1559_DYNAMIC_FEES= +ETH_GAS_BUMP_PERCENT= +ETH_GAS_BUMP_THRESHOLD= +ETH_GAS_BUMP_WEI= +EVM_GAS_FEE_CAP_DEFAULT= +ETH_GAS_LIMIT_DEFAULT= +ETH_GAS_LIMIT_MAX= +ETH_GAS_LIMIT_MULTIPLIER= +ETH_GAS_LIMIT_TRANSFER= +ETH_GAS_PRICE_DEFAULT= +ETH_MAX_GAS_PRICE_WEI= +ETH_MIN_GAS_PRICE_WEI= +EVM_GAS_TIP_CAP_DEFAULT= +EVM_GAS_TIP_CAP_MINIMUM= + +ETH_GAS_LIMIT_OCR_JOB_TYPE= +ETH_GAS_LIMIT_DR_JOB_TYPE= +ETH_GAS_LIMIT_VRF_JOB_TYPE= +ETH_GAS_LIMIT_FM_JOB_TYPE= +ETH_GAS_LIMIT_KEEPER_JOB_TYPE= + +GAS_ESTIMATOR_MODE= +BLOCK_HISTORY_ESTIMATOR_BATCH_SIZE= +BLOCK_HISTORY_ESTIMATOR_BLOCK_DELAY= +BLOCK_HISTORY_ESTIMATOR_BLOCK_HISTORY_SIZE= +BLOCK_HISTORY_ESTIMATOR_CHECK_INCLUSION_BLOCKS= +BLOCK_HISTORY_ESTIMATOR_CHECK_INCLUSION_PERCENTILE= +BLOCK_HISTORY_ESTIMATOR_EIP1559_FEE_CAP_BUFFER_BLOCKS= +BLOCK_HISTORY_ESTIMATOR_TRANSACTION_PERCENTILE= + +ETH_GAS_BUMP_TX_DEPTH= +ETH_MAX_IN_FLIGHT_TRANSACTIONS= +ETH_MAX_QUEUED_TRANSACTIONS= +ETH_NONCE_AUTO_SYNC= +ETH_USE_FORWARDERS= + +DEFAULT_HTTP_LIMIT= +DEFAULT_HTTP_TIMEOUT= +FEATURE_EXTERNAL_INITIATORS= +JOB_PIPELINE_MAX_RUN_DURATION= +JOB_PIPELINE_MAX_SUCCESSFUL_RUNS= +JOB_PIPELINE_REAPER_INTERVAL= +JOB_PIPELINE_REAPER_THRESHOLD= +JOB_PIPELINE_RESULT_WRITE_QUEUE_DEPTH= + +FM_DEFAULT_TRANSACTION_QUEUE_DEPTH= +FM_SIMULATE_TRANSACTIONS= + +FEATURE_OFFCHAIN_REPORTING2= + +OCR2_AUTOMATION_GAS_LIMIT= +OCR2_CAPTURE_EA_TELEMETRY= +OCR2_CONTRACT_CONFIRMATIONS= +OCR2_BLOCKCHAIN_TIMEOUT= +OCR2_CONTRACT_POLL_INTERVAL= +OCR2_CONTRACT_SUBSCRIBE_INTERVAL= +OCR2_CONTRACT_TRANSMITTER_TRANSMIT_TIMEOUT= +OCR2_DATABASE_TIMEOUT= +OCR2_KEY_BUNDLE_ID= + +FEATURE_OFFCHAIN_REPORTING= + +OCR_CAPTURE_EA_TELEMETRY= +OCR_CONTRACT_CONFIRMATIONS= +OCR_CONTRACT_TRANSMITTER_TRANSMIT_TIMEOUT= +OCR_DATABASE_TIMEOUT= +OCR_OBSERVATION_GRACE_PERIOD= + +OCR_OBSERVATION_TIMEOUT= +OCR_BLOCKCHAIN_TIMEOUT= +OCR_CONTRACT_POLL_INTERVAL= +OCR_CONTRACT_SUBSCRIBE_INTERVAL= +OCR_DEFAULT_TRANSACTION_QUEUE_DEPTH= + +OCR_KEY_BUNDLE_ID= +OCR_SIMULATE_TRANSACTIONS= +OCR_TRACE_LOGGING= +OCR_TRANSMITTER_ADDRESS= + +P2P_NETWORKING_STACK= +P2P_INCOMING_MESSAGE_BUFFER_SIZE= +P2P_OUTGOING_MESSAGE_BUFFER_SIZE= + +P2P_ANNOUNCE_IP= +P2P_ANNOUNCE_PORT= +P2P_BOOTSTRAP_CHECK_INTERVAL= +P2P_BOOTSTRAP_PEERS= +P2P_DHT_ANNOUNCEMENT_COUNTER_USER_PREFIX= +P2P_DHT_LOOKUP_INTERVAL= +P2P_LISTEN_IP= +P2P_LISTEN_PORT= +P2P_NEW_STREAM_TIMEOUT= +P2P_PEER_ID= +P2P_PEERSTORE_WRITE_INTERVAL= + +P2PV2_ANNOUNCE_ADDRESSES= +P2PV2_BOOTSTRAPPERS= +P2PV2_DELTA_DIAL= +P2PV2_DELTA_RECONCILE= +P2PV2_LISTEN_ADDRESSES= + +OCR_OUTGOING_MESSAGE_BUFFER_SIZE= +OCR_INCOMING_MESSAGE_BUFFER_SIZE= +OCR_DHT_LOOKUP_INTERVAL= +OCR_BOOTSTRAP_CHECK_INTERVAL= +OCR_NEW_STREAM_TIMEOUT= + +KEEPER_DEFAULT_TRANSACTION_QUEUE_DEPTH= +KEEPER_GAS_PRICE_BUFFER_PERCENT= +KEEPER_GAS_TIP_CAP_BUFFER_PERCENT= +KEEPER_BASE_FEE_BUFFER_PERCENT= +KEEPER_MAXIMUM_GRACE_PERIOD= +KEEPER_REGISTRY_CHECK_GAS_OVERHEAD= +KEEPER_REGISTRY_PERFORM_GAS_OVERHEAD= +KEEPER_REGISTRY_MAX_PERFORM_DATA_SIZE= +KEEPER_REGISTRY_SYNC_INTERVAL= +KEEPER_REGISTRY_SYNC_UPKEEP_QUEUE_SIZE= +KEEPER_TURN_LOOK_BACK= + +AUTO_PPROF_ENABLED= +AUTO_PPROF_PROFILE_ROOT= +AUTO_PPROF_POLL_INTERVAL= +AUTO_PPROF_GATHER_DURATION= +AUTO_PPROF_GATHER_TRACE_DURATION= +AUTO_PPROF_MAX_PROFILE_SIZE= +AUTO_PPROF_CPU_PROFILE_RATE= +AUTO_PPROF_MEM_PROFILE_RATE= +AUTO_PPROF_BLOCK_PROFILE_RATE= +AUTO_PPROF_MUTEX_PROFILE_FRACTION= +AUTO_PPROF_MEM_THRESHOLD= +AUTO_PPROF_GOROUTINE_THRESHOLD= + +PYROSCOPE_AUTH_TOKEN= +PYROSCOPE_SERVER_ADDRESS= +PYROSCOPE_ENVIRONMENT= + +DATABASE_DEFAULT_IDLE_IN_TX_SESSION_TIMEOUT= +DATABASE_DEFAULT_LOCK_TIMEOUT= +DATABASE_DEFAULT_QUERY_TIMEOUT= + +SENTRY_DSN= +SENTRY_DEBUG= +SENTRY_ENVIRONMENT= +SENTRY_RELEASE= + +GAS_UPDATER_ENABLED= +GAS_UPDATER_BATCH_SIZE= +GAS_UPDATER_BLOCK_DELAY= +GAS_UPDATER_BLOCK_HISTORY_SIZE= +GAS_UPDATER_TRANSACTION_PERCENTILE= + +PROMETHEUS_AUTH_TOKEN= diff --git a/core/services/chainlink/mocks/general_config.go b/core/services/chainlink/mocks/general_config.go new file mode 100644 index 00000000..74cfd0ab --- /dev/null +++ b/core/services/chainlink/mocks/general_config.go @@ -0,0 +1,802 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + pluginconfig "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + config "github.com/goplugin/pluginv3.0/v2/core/config" + + cosmosconfig "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + + mock "github.com/stretchr/testify/mock" + + solana "github.com/goplugin/plugin-solana/pkg/solana" + + time "time" + + toml "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + + uuid "github.com/google/uuid" + + zapcore "go.uber.org/zap/zapcore" +) + +// GeneralConfig is an autogenerated mock type for the GeneralConfig type +type GeneralConfig struct { + mock.Mock +} + +// AppID provides a mock function with given fields: +func (_m *GeneralConfig) AppID() uuid.UUID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AppID") + } + + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func() uuid.UUID); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } + } + + return r0 +} + +// AuditLogger provides a mock function with given fields: +func (_m *GeneralConfig) AuditLogger() config.AuditLogger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AuditLogger") + } + + var r0 config.AuditLogger + if rf, ok := ret.Get(0).(func() config.AuditLogger); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.AuditLogger) + } + } + + return r0 +} + +// AutoPprof provides a mock function with given fields: +func (_m *GeneralConfig) AutoPprof() config.AutoPprof { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for AutoPprof") + } + + var r0 config.AutoPprof + if rf, ok := ret.Get(0).(func() config.AutoPprof); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.AutoPprof) + } + } + + return r0 +} + +// ConfigTOML provides a mock function with given fields: +func (_m *GeneralConfig) ConfigTOML() (string, string) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ConfigTOML") + } + + var r0 string + var r1 string + if rf, ok := ret.Get(0).(func() (string, string)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() string); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(string) + } + + return r0, r1 +} + +// CosmosConfigs provides a mock function with given fields: +func (_m *GeneralConfig) CosmosConfigs() cosmosconfig.TOMLConfigs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CosmosConfigs") + } + + var r0 cosmosconfig.TOMLConfigs + if rf, ok := ret.Get(0).(func() cosmosconfig.TOMLConfigs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cosmosconfig.TOMLConfigs) + } + } + + return r0 +} + +// CosmosEnabled provides a mock function with given fields: +func (_m *GeneralConfig) CosmosEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CosmosEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Database provides a mock function with given fields: +func (_m *GeneralConfig) Database() config.Database { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Database") + } + + var r0 config.Database + if rf, ok := ret.Get(0).(func() config.Database); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Database) + } + } + + return r0 +} + +// EVMConfigs provides a mock function with given fields: +func (_m *GeneralConfig) EVMConfigs() toml.EVMConfigs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMConfigs") + } + + var r0 toml.EVMConfigs + if rf, ok := ret.Get(0).(func() toml.EVMConfigs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(toml.EVMConfigs) + } + } + + return r0 +} + +// EVMEnabled provides a mock function with given fields: +func (_m *GeneralConfig) EVMEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// EVMRPCEnabled provides a mock function with given fields: +func (_m *GeneralConfig) EVMRPCEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EVMRPCEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Feature provides a mock function with given fields: +func (_m *GeneralConfig) Feature() config.Feature { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Feature") + } + + var r0 config.Feature + if rf, ok := ret.Get(0).(func() config.Feature); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Feature) + } + } + + return r0 +} + +// FluxMonitor provides a mock function with given fields: +func (_m *GeneralConfig) FluxMonitor() config.FluxMonitor { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FluxMonitor") + } + + var r0 config.FluxMonitor + if rf, ok := ret.Get(0).(func() config.FluxMonitor); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.FluxMonitor) + } + } + + return r0 +} + +// Insecure provides a mock function with given fields: +func (_m *GeneralConfig) Insecure() config.Insecure { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Insecure") + } + + var r0 config.Insecure + if rf, ok := ret.Get(0).(func() config.Insecure); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Insecure) + } + } + + return r0 +} + +// InsecureFastScrypt provides a mock function with given fields: +func (_m *GeneralConfig) InsecureFastScrypt() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for InsecureFastScrypt") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// JobPipeline provides a mock function with given fields: +func (_m *GeneralConfig) JobPipeline() config.JobPipeline { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for JobPipeline") + } + + var r0 config.JobPipeline + if rf, ok := ret.Get(0).(func() config.JobPipeline); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.JobPipeline) + } + } + + return r0 +} + +// Keeper provides a mock function with given fields: +func (_m *GeneralConfig) Keeper() config.Keeper { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Keeper") + } + + var r0 config.Keeper + if rf, ok := ret.Get(0).(func() config.Keeper); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Keeper) + } + } + + return r0 +} + +// Log provides a mock function with given fields: +func (_m *GeneralConfig) Log() config.Log { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Log") + } + + var r0 config.Log + if rf, ok := ret.Get(0).(func() config.Log); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Log) + } + } + + return r0 +} + +// LogConfiguration provides a mock function with given fields: log, warn +func (_m *GeneralConfig) LogConfiguration(log config.LogfFn, warn config.LogfFn) { + _m.Called(log, warn) +} + +// Mercury provides a mock function with given fields: +func (_m *GeneralConfig) Mercury() config.Mercury { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Mercury") + } + + var r0 config.Mercury + if rf, ok := ret.Get(0).(func() config.Mercury); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Mercury) + } + } + + return r0 +} + +// OCR provides a mock function with given fields: +func (_m *GeneralConfig) OCR() config.OCR { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR") + } + + var r0 config.OCR + if rf, ok := ret.Get(0).(func() config.OCR); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.OCR) + } + } + + return r0 +} + +// OCR2 provides a mock function with given fields: +func (_m *GeneralConfig) OCR2() config.OCR2 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR2") + } + + var r0 config.OCR2 + if rf, ok := ret.Get(0).(func() config.OCR2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.OCR2) + } + } + + return r0 +} + +// P2P provides a mock function with given fields: +func (_m *GeneralConfig) P2P() config.P2P { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for P2P") + } + + var r0 config.P2P + if rf, ok := ret.Get(0).(func() config.P2P); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.P2P) + } + } + + return r0 +} + +// Password provides a mock function with given fields: +func (_m *GeneralConfig) Password() config.Password { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Password") + } + + var r0 config.Password + if rf, ok := ret.Get(0).(func() config.Password); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Password) + } + } + + return r0 +} + +// Prometheus provides a mock function with given fields: +func (_m *GeneralConfig) Prometheus() config.Prometheus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Prometheus") + } + + var r0 config.Prometheus + if rf, ok := ret.Get(0).(func() config.Prometheus); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Prometheus) + } + } + + return r0 +} + +// Pyroscope provides a mock function with given fields: +func (_m *GeneralConfig) Pyroscope() config.Pyroscope { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Pyroscope") + } + + var r0 config.Pyroscope + if rf, ok := ret.Get(0).(func() config.Pyroscope); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Pyroscope) + } + } + + return r0 +} + +// RootDir provides a mock function with given fields: +func (_m *GeneralConfig) RootDir() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RootDir") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Sentry provides a mock function with given fields: +func (_m *GeneralConfig) Sentry() config.Sentry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Sentry") + } + + var r0 config.Sentry + if rf, ok := ret.Get(0).(func() config.Sentry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Sentry) + } + } + + return r0 +} + +// SetLogLevel provides a mock function with given fields: lvl +func (_m *GeneralConfig) SetLogLevel(lvl zapcore.Level) error { + ret := _m.Called(lvl) + + if len(ret) == 0 { + panic("no return value specified for SetLogLevel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(zapcore.Level) error); ok { + r0 = rf(lvl) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetLogSQL provides a mock function with given fields: logSQL +func (_m *GeneralConfig) SetLogSQL(logSQL bool) { + _m.Called(logSQL) +} + +// SetPasswords provides a mock function with given fields: keystore, vrf +func (_m *GeneralConfig) SetPasswords(keystore *string, vrf *string) { + _m.Called(keystore, vrf) +} + +// ShutdownGracePeriod provides a mock function with given fields: +func (_m *GeneralConfig) ShutdownGracePeriod() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ShutdownGracePeriod") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SolanaConfigs provides a mock function with given fields: +func (_m *GeneralConfig) SolanaConfigs() solana.TOMLConfigs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SolanaConfigs") + } + + var r0 solana.TOMLConfigs + if rf, ok := ret.Get(0).(func() solana.TOMLConfigs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(solana.TOMLConfigs) + } + } + + return r0 +} + +// SolanaEnabled provides a mock function with given fields: +func (_m *GeneralConfig) SolanaEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SolanaEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// StarkNetEnabled provides a mock function with given fields: +func (_m *GeneralConfig) StarkNetEnabled() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StarkNetEnabled") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// StarknetConfigs provides a mock function with given fields: +func (_m *GeneralConfig) StarknetConfigs() pluginconfig.TOMLConfigs { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StarknetConfigs") + } + + var r0 pluginconfig.TOMLConfigs + if rf, ok := ret.Get(0).(func() pluginconfig.TOMLConfigs); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pluginconfig.TOMLConfigs) + } + } + + return r0 +} + +// TelemetryIngress provides a mock function with given fields: +func (_m *GeneralConfig) TelemetryIngress() config.TelemetryIngress { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TelemetryIngress") + } + + var r0 config.TelemetryIngress + if rf, ok := ret.Get(0).(func() config.TelemetryIngress); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.TelemetryIngress) + } + } + + return r0 +} + +// Threshold provides a mock function with given fields: +func (_m *GeneralConfig) Threshold() config.Threshold { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Threshold") + } + + var r0 config.Threshold + if rf, ok := ret.Get(0).(func() config.Threshold); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Threshold) + } + } + + return r0 +} + +// Tracing provides a mock function with given fields: +func (_m *GeneralConfig) Tracing() config.Tracing { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tracing") + } + + var r0 config.Tracing + if rf, ok := ret.Get(0).(func() config.Tracing); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.Tracing) + } + } + + return r0 +} + +// Validate provides a mock function with given fields: +func (_m *GeneralConfig) Validate() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Validate") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateDB provides a mock function with given fields: +func (_m *GeneralConfig) ValidateDB() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ValidateDB") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WebServer provides a mock function with given fields: +func (_m *GeneralConfig) WebServer() config.WebServer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for WebServer") + } + + var r0 config.WebServer + if rf, ok := ret.Get(0).(func() config.WebServer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.WebServer) + } + } + + return r0 +} + +// NewGeneralConfig creates a new instance of GeneralConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGeneralConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *GeneralConfig { + mock := &GeneralConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/chainlink/mocks/relayer_chain_interoperators.go b/core/services/chainlink/mocks/relayer_chain_interoperators.go new file mode 100644 index 00000000..7225eac7 --- /dev/null +++ b/core/services/chainlink/mocks/relayer_chain_interoperators.go @@ -0,0 +1,62 @@ +package mocks + +import ( + "context" + "slices" + + services2 "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + + "github.com/goplugin/plugin-common/pkg/loop" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + + "github.com/goplugin/plugin-common/pkg/types" +) + +// FakeRelayerChainInteroperators is a fake plugin.RelayerChainInteroperators. +// This exists because mockery generation doesn't understand how to produce an alias instead of the underlying type (which is not exported in this case). +type FakeRelayerChainInteroperators struct { + Relayers []loop.Relayer + EVMChains legacyevm.LegacyChainContainer + Nodes []types.NodeStatus + NodesErr error +} + +func (f *FakeRelayerChainInteroperators) LegacyEVMChains() legacyevm.LegacyChainContainer { + return f.EVMChains +} + +func (f *FakeRelayerChainInteroperators) NodeStatuses(ctx context.Context, offset, limit int, relayIDs ...relay.ID) (nodes []types.NodeStatus, count int, err error) { + return slices.Clone(f.Nodes), len(f.Nodes), f.NodesErr +} + +func (f *FakeRelayerChainInteroperators) Services() []services2.ServiceCtx { + panic("unimplemented") +} + +func (f *FakeRelayerChainInteroperators) List(filter plugin.FilterFn) plugin.RelayerChainInteroperators { + panic("unimplemented") +} + +func (f *FakeRelayerChainInteroperators) Get(id relay.ID) (loop.Relayer, error) { + panic("unimplemented") +} + +func (f *FakeRelayerChainInteroperators) Slice() []loop.Relayer { + return f.Relayers +} + +func (f *FakeRelayerChainInteroperators) LegacyCosmosChains() plugin.LegacyCosmosContainer { + panic("unimplemented") +} + +func (f *FakeRelayerChainInteroperators) ChainStatus(ctx context.Context, id relay.ID) (types.ChainStatus, error) { + panic("unimplemented") +} + +func (f *FakeRelayerChainInteroperators) ChainStatuses(ctx context.Context, offset, limit int) ([]types.ChainStatus, int, error) { + panic("unimplemented") +} diff --git a/core/services/chainlink/relayer_chain_interoperators.go b/core/services/chainlink/relayer_chain_interoperators.go new file mode 100644 index 00000000..e027f018 --- /dev/null +++ b/core/services/chainlink/relayer_chain_interoperators.go @@ -0,0 +1,395 @@ +package plugin + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-cosmos/pkg/cosmos" + "github.com/goplugin/plugin-cosmos/pkg/cosmos/adapters" + + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +var ErrNoSuchRelayer = errors.New("relayer does not exist") + +// RelayerChainInteroperators +// encapsulates relayers and chains and is the primary entry point for +// the node to access relayers, get legacy chains associated to a relayer +// and get status about the chains and nodes +type RelayerChainInteroperators interface { + Services() []services.ServiceCtx + + List(filter FilterFn) RelayerChainInteroperators + + LoopRelayerStorer + LegacyChainer + ChainsNodesStatuser +} + +// LoopRelayerStorer is key-value like interface for storing and +// retrieving [loop.Relayer] +type LoopRelayerStorer interface { + ocr2.RelayGetter + Slice() []loop.Relayer +} + +// LegacyChainer is an interface for getting legacy chains +// This will be deprecated/removed when products depend only +// on the relayer interface. +type LegacyChainer interface { + LegacyEVMChains() legacyevm.LegacyChainContainer + LegacyCosmosChains() LegacyCosmosContainer +} + +type ChainStatuser interface { + ChainStatus(ctx context.Context, id relay.ID) (types.ChainStatus, error) + ChainStatuses(ctx context.Context, offset, limit int) ([]types.ChainStatus, int, error) +} + +// NodesStatuser is an interface for node configuration and state. +// TODO BCF-2440, BCF-2511 may need Node(ctx,name) to get a node status by name +type NodesStatuser interface { + NodeStatuses(ctx context.Context, offset, limit int, relayIDs ...relay.ID) (nodes []types.NodeStatus, count int, err error) +} + +// ChainsNodesStatuser report statuses about chains and nodes +type ChainsNodesStatuser interface { + ChainStatuser + NodesStatuser +} + +var _ RelayerChainInteroperators = &CoreRelayerChainInteroperators{} + +// CoreRelayerChainInteroperators implements [RelayerChainInteroperators] +// as needed for the core [plugin.Application] +type CoreRelayerChainInteroperators struct { + mu sync.Mutex + loopRelayers map[relay.ID]loop.Relayer + legacyChains legacyChains + + // we keep an explicit list of services because the legacy implementations have more than + // just the relayer service + srvs []services.ServiceCtx +} + +func NewCoreRelayerChainInteroperators(initFuncs ...CoreRelayerChainInitFunc) (*CoreRelayerChainInteroperators, error) { + cr := &CoreRelayerChainInteroperators{ + loopRelayers: make(map[relay.ID]loop.Relayer), + srvs: make([]services.ServiceCtx, 0), + } + for _, initFn := range initFuncs { + err := initFn(cr) + if err != nil { + return nil, err + } + } + return cr, nil +} + +// CoreRelayerChainInitFunc is a hook in the constructor to create relayers from a factory. +type CoreRelayerChainInitFunc func(op *CoreRelayerChainInteroperators) error + +// InitEVM is a option for instantiating evm relayers +func InitEVM(ctx context.Context, factory RelayerFactory, config EVMFactoryConfig) CoreRelayerChainInitFunc { + return func(op *CoreRelayerChainInteroperators) (err error) { + adapters, err2 := factory.NewEVM(ctx, config) + if err2 != nil { + return fmt.Errorf("failed to setup EVM relayer: %w", err2) + } + + legacyMap := make(map[string]legacyevm.Chain) + for id, a := range adapters { + // adapter is a service + op.srvs = append(op.srvs, a) + op.loopRelayers[id] = a + legacyMap[id.ChainID] = a.Chain() + } + op.legacyChains.EVMChains = legacyevm.NewLegacyChains(legacyMap, config.AppConfig.EVMConfigs()) + return nil + } +} + +// InitCosmos is a option for instantiating Cosmos relayers +func InitCosmos(ctx context.Context, factory RelayerFactory, config CosmosFactoryConfig) CoreRelayerChainInitFunc { + return func(op *CoreRelayerChainInteroperators) (err error) { + adapters, err2 := factory.NewCosmos(config) + if err2 != nil { + return fmt.Errorf("failed to setup Cosmos relayer: %w", err2) + } + legacyMap := make(map[string]cosmos.Chain) + + for id, a := range adapters { + op.srvs = append(op.srvs, a) + op.loopRelayers[id] = a + legacyMap[id.ChainID] = a.Chain() + } + op.legacyChains.CosmosChains = NewLegacyCosmos(legacyMap) + + return nil + } +} + +// InitSolana is a option for instantiating Solana relayers +func InitSolana(ctx context.Context, factory RelayerFactory, config SolanaFactoryConfig) CoreRelayerChainInitFunc { + return func(op *CoreRelayerChainInteroperators) error { + solRelayers, err := factory.NewSolana(config.Keystore, config.TOMLConfigs) + if err != nil { + return fmt.Errorf("failed to setup Solana relayer: %w", err) + } + + for id, relayer := range solRelayers { + op.srvs = append(op.srvs, relayer) + op.loopRelayers[id] = relayer + } + + return nil + } +} + +// InitStarknet is a option for instantiating Starknet relayers +func InitStarknet(ctx context.Context, factory RelayerFactory, config StarkNetFactoryConfig) CoreRelayerChainInitFunc { + return func(op *CoreRelayerChainInteroperators) (err error) { + starkRelayers, err := factory.NewStarkNet(config.Keystore, config.TOMLConfigs) + if err != nil { + return fmt.Errorf("failed to setup StarkNet relayer: %w", err) + } + + for id, relayer := range starkRelayers { + op.srvs = append(op.srvs, relayer) + op.loopRelayers[id] = relayer + } + + return nil + } +} + +// Get a [loop.Relayer] by id +func (rs *CoreRelayerChainInteroperators) Get(id relay.ID) (loop.Relayer, error) { + rs.mu.Lock() + defer rs.mu.Unlock() + lr, exist := rs.loopRelayers[id] + if !exist { + return nil, fmt.Errorf("%w: %s", ErrNoSuchRelayer, id) + } + return lr, nil +} + +// LegacyEVMChains returns a container with all the evm chains +// TODO BCF-2511 +func (rs *CoreRelayerChainInteroperators) LegacyEVMChains() legacyevm.LegacyChainContainer { + rs.mu.Lock() + defer rs.mu.Unlock() + return rs.legacyChains.EVMChains +} + +// LegacyCosmosChains returns a container with all the cosmos chains +// TODO BCF-2511 +func (rs *CoreRelayerChainInteroperators) LegacyCosmosChains() LegacyCosmosContainer { + rs.mu.Lock() + defer rs.mu.Unlock() + return rs.legacyChains.CosmosChains +} + +// ChainStatus gets [types.ChainStatus] +func (rs *CoreRelayerChainInteroperators) ChainStatus(ctx context.Context, id relay.ID) (types.ChainStatus, error) { + + lr, err := rs.Get(id) + if err != nil { + return types.ChainStatus{}, fmt.Errorf("%w: error getting chain status: %w", chains.ErrNotFound, err) + } + + return lr.GetChainStatus(ctx) +} + +func (rs *CoreRelayerChainInteroperators) ChainStatuses(ctx context.Context, offset, limit int) ([]types.ChainStatus, int, error) { + + var ( + stats []types.ChainStatus + totalErr error + ) + rs.mu.Lock() + defer rs.mu.Unlock() + + relayerIds := make([]relay.ID, 0) + for rid := range rs.loopRelayers { + relayerIds = append(relayerIds, rid) + } + sort.Slice(relayerIds, func(i, j int) bool { + return relayerIds[i].String() < relayerIds[j].String() + }) + for _, rid := range relayerIds { + lr := rs.loopRelayers[rid] + stat, err := lr.GetChainStatus(ctx) + if err != nil { + totalErr = errors.Join(totalErr, err) + continue + } + stats = append(stats, stat) + } + + if totalErr != nil { + return nil, 0, totalErr + } + cnt := len(stats) + if len(stats) > limit+offset && limit > 0 { + return stats[offset : offset+limit], cnt, nil + } + return stats[offset:], cnt, nil +} + +func (rs *CoreRelayerChainInteroperators) Node(ctx context.Context, name string) (types.NodeStatus, error) { + // This implementation is round-about + // TODO BFC-2511, may be better in the loop.Relayer interface itself + stats, _, err := rs.NodeStatuses(ctx, 0, -1) + if err != nil { + return types.NodeStatus{}, err + } + for _, stat := range stats { + if stat.Name == name { + return stat, nil + } + } + return types.NodeStatus{}, fmt.Errorf("node %s: %w", name, chains.ErrNotFound) +} + +// ids must be a string representation of relay.Identifier +// ids are a filter; if none are specified, all are returned. +func (rs *CoreRelayerChainInteroperators) NodeStatuses(ctx context.Context, offset, limit int, relayerIDs ...relay.ID) (nodes []types.NodeStatus, count int, err error) { + var ( + totalErr error + result []types.NodeStatus + ) + if len(relayerIDs) == 0 { + for _, lr := range rs.loopRelayers { + stats, _, total, err := lr.ListNodeStatuses(ctx, int32(limit), "") + if err != nil { + totalErr = errors.Join(totalErr, err) + continue + } + result = append(result, stats...) + count += total + } + } else { + for _, rid := range relayerIDs { + lr, exist := rs.loopRelayers[rid] + if !exist { + totalErr = errors.Join(totalErr, fmt.Errorf("relayer %s does not exist", rid.Name())) + continue + } + nodeStats, _, total, err := lr.ListNodeStatuses(ctx, int32(limit), "") + + if err != nil { + totalErr = errors.Join(totalErr, err) + continue + } + result = append(result, nodeStats...) + count += total + } + } + if totalErr != nil { + return nil, 0, totalErr + } + if len(result) > limit && limit > 0 { + return result[offset : offset+limit], count, nil + } + return result[offset:], count, nil +} + +type FilterFn func(id relay.ID) bool + +var AllRelayers = func(id relay.ID) bool { + return true +} + +// Returns true if the given network matches id.Network +func FilterRelayersByType(network relay.Network) func(id relay.ID) bool { + return func(id relay.ID) bool { + return id.Network == network + } +} + +// List returns all the [RelayerChainInteroperators] that match the [FilterFn]. +// A typical usage pattern to use [List] with [FilterByType] to obtain a set of [RelayerChainInteroperators] +// for a given chain +func (rs *CoreRelayerChainInteroperators) List(filter FilterFn) RelayerChainInteroperators { + + matches := make(map[relay.ID]loop.Relayer) + rs.mu.Lock() + for id, relayer := range rs.loopRelayers { + if filter(id) { + matches[id] = relayer + } + } + rs.mu.Unlock() + return &CoreRelayerChainInteroperators{ + loopRelayers: matches, + } +} + +// Returns a slice of [loop.Relayer]. A typically usage pattern to is +// use [List(criteria)].Slice() for range based operations +func (rs *CoreRelayerChainInteroperators) Slice() []loop.Relayer { + var result []loop.Relayer + for _, r := range rs.loopRelayers { + result = append(result, r) + } + return result +} +func (rs *CoreRelayerChainInteroperators) Services() (s []services.ServiceCtx) { + return rs.srvs +} + +// legacyChains encapsulates the chain-specific dependencies. Will be +// deprecated when chain-specific logic is removed from products. +type legacyChains struct { + EVMChains legacyevm.LegacyChainContainer + CosmosChains LegacyCosmosContainer +} + +// LegacyCosmosContainer is container interface for Cosmos chains +type LegacyCosmosContainer interface { + Get(id string) (adapters.Chain, error) + Len() int + List(ids ...string) ([]adapters.Chain, error) + Slice() []adapters.Chain +} + +type LegacyCosmos = chains.ChainsKV[adapters.Chain] + +var _ LegacyCosmosContainer = &LegacyCosmos{} + +func NewLegacyCosmos(m map[string]adapters.Chain) *LegacyCosmos { + return chains.NewChainsKV[adapters.Chain](m) +} + +type CosmosLoopRelayerChainer interface { + loop.Relayer + Chain() adapters.Chain +} + +type CosmosLoopRelayerChain struct { + loop.Relayer + chain adapters.Chain +} + +func NewCosmosLoopRelayerChain(r *cosmos.Relayer, s adapters.Chain) *CosmosLoopRelayerChain { + ra := relay.NewServerAdapter(r, s) + return &CosmosLoopRelayerChain{ + Relayer: ra, + chain: s, + } +} +func (r *CosmosLoopRelayerChain) Chain() adapters.Chain { + return r.chain +} + +var _ CosmosLoopRelayerChainer = &CosmosLoopRelayerChain{} diff --git a/core/services/chainlink/relayer_chain_interoperators_test.go b/core/services/chainlink/relayer_chain_interoperators_test.go new file mode 100644 index 00000000..f93eb24a --- /dev/null +++ b/core/services/chainlink/relayer_chain_interoperators_test.go @@ -0,0 +1,459 @@ +package plugin_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + solcfg "github.com/goplugin/plugin-solana/pkg/solana/config" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/plugin-solana/pkg/solana" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/plugins" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" +) + +func TestCoreRelayerChainInteroperators(t *testing.T) { + + evmChainID1, evmChainID2 := ubig.New(big.NewInt(1)), ubig.New(big.NewInt(2)) + solanaChainID1, solanaChainID2 := "solana-id-1", "solana-id-2" + starknetChainID1, starknetChainID2 := "starknet-id-1", "starknet-id-2" + cosmosChainID1, cosmosChainID2 := "cosmos-id-1", "cosmos-id-2" + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + + cfg := evmcfg.Defaults(evmChainID1) + node1_1 := evmcfg.Node{ + Name: ptr("Test node chain1:1"), + WSURL: commonconfig.MustParseURL("ws://localhost:8546"), + HTTPURL: commonconfig.MustParseURL("http://localhost:8546"), + SendOnly: ptr(false), + Order: ptr(int32(15)), + } + node1_2 := evmcfg.Node{ + Name: ptr("Test node chain1:2"), + WSURL: commonconfig.MustParseURL("ws://localhost:8547"), + HTTPURL: commonconfig.MustParseURL("http://localhost:8547"), + SendOnly: ptr(false), + Order: ptr(int32(36)), + } + node2_1 := evmcfg.Node{ + Name: ptr("Test node chain2:1"), + WSURL: commonconfig.MustParseURL("ws://localhost:8547"), + HTTPURL: commonconfig.MustParseURL("http://localhost:8547"), + SendOnly: ptr(false), + Order: ptr(int32(11)), + } + c.EVM[0] = &evmcfg.EVMConfig{ + ChainID: evmChainID1, + Enabled: ptr(true), + Chain: cfg, + Nodes: evmcfg.EVMNodes{&node1_1, &node1_2}, + } + id2 := ubig.New(big.NewInt(2)) + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: evmChainID2, + Chain: evmcfg.Defaults(id2), + Enabled: ptr(true), + Nodes: evmcfg.EVMNodes{&node2_1}, + }) + + c.Solana = solana.TOMLConfigs{ + &solana.TOMLConfig{ + ChainID: &solanaChainID1, + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{{ + Name: ptr("solana chain 1 node 1"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:8547").URL())), + }}, + }, + &solana.TOMLConfig{ + ChainID: &solanaChainID2, + Enabled: ptr(true), + Chain: solcfg.Chain{}, + Nodes: []*solcfg.Node{{ + Name: ptr("solana chain 2 node 1"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:8527").URL())), + }}, + }, + } + + c.Starknet = stkcfg.TOMLConfigs{ + &stkcfg.TOMLConfig{ + ChainID: &starknetChainID1, + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*stkcfg.Node{ + { + Name: ptr("starknet chain 1 node 1"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:8547").URL())), + }, + { + Name: ptr("starknet chain 1 node 2"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:8548").URL())), + }, + { + Name: ptr("starknet chain 1 node 3"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:8549").URL())), + }, + }, + }, + &stkcfg.TOMLConfig{ + ChainID: &starknetChainID2, + Enabled: ptr(true), + Chain: stkcfg.Chain{}, + Nodes: []*stkcfg.Node{ + { + Name: ptr("starknet chain 2 node 1"), + URL: ((*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:3547").URL())), + }, + }, + }, + } + + c.Cosmos = coscfg.TOMLConfigs{ + &coscfg.TOMLConfig{ + ChainID: &cosmosChainID1, + Enabled: ptr(true), + Chain: coscfg.Chain{ + GasLimitMultiplier: ptr(decimal.RequireFromString("1.55555")), + Bech32Prefix: ptr("wasm"), + GasToken: ptr("cosm"), + }, + Nodes: coscfg.Nodes{ + &coscfg.Node{ + Name: ptr("cosmos chain 1 node 1"), + TendermintURL: (*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:9548").URL()), + }, + }, + }, + &coscfg.TOMLConfig{ + ChainID: &cosmosChainID2, + Enabled: ptr(true), + Chain: coscfg.Chain{ + GasLimitMultiplier: ptr(decimal.RequireFromString("0.777")), + Bech32Prefix: ptr("wasm"), + GasToken: ptr("cosm"), + }, + Nodes: coscfg.Nodes{ + &coscfg.Node{ + Name: ptr("cosmos chain 2 node 1"), + TendermintURL: (*commonconfig.URL)(commonconfig.MustParseURL("http://localhost:9598").URL()), + }, + }, + }, + } + }) + + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + + lggr := logger.TestLogger(t) + + factory := plugin.RelayerFactory{ + Logger: lggr, + LoopRegistry: plugins.NewLoopRegistry(lggr, nil), + GRPCOpts: loop.GRPCOpts{}, + } + + testctx := testutils.Context(t) + + tests := []struct { + name string + initFuncs []plugin.CoreRelayerChainInitFunc + expectedRelayerNetworks map[relay.Network]struct{} + + expectedEVMChainCnt int + expectedEVMNodeCnt int + expectedEVMRelayerIds []relay.ID + + expectedSolanaChainCnt int + expectedSolanaNodeCnt int + expectedSolanaRelayerIds []relay.ID + + expectedStarknetChainCnt int + expectedStarknetNodeCnt int + expectedStarknetRelayerIds []relay.ID + + expectedCosmosChainCnt int + expectedCosmosNodeCnt int + expectedCosmosRelayerIds []relay.ID + }{ + + {name: "2 evm chains with 3 nodes", + initFuncs: []plugin.CoreRelayerChainInitFunc{ + plugin.InitEVM(testctx, factory, plugin.EVMFactoryConfig{ + ChainOpts: legacyevm.ChainOpts{ + AppConfig: cfg, + MailMon: &mailbox.Monitor{}, + DB: db, + }, + CSAETHKeystore: keyStore, + }), + }, + expectedEVMChainCnt: 2, + expectedEVMNodeCnt: 3, + expectedEVMRelayerIds: []relay.ID{ + {Network: relay.EVM, ChainID: evmChainID1.String()}, + {Network: relay.EVM, ChainID: evmChainID2.String()}, + }, + expectedRelayerNetworks: map[relay.Network]struct{}{relay.EVM: {}}, + }, + + {name: "2 solana chain with 2 node", + + initFuncs: []plugin.CoreRelayerChainInitFunc{ + plugin.InitSolana(testctx, factory, plugin.SolanaFactoryConfig{ + Keystore: keyStore.Solana(), + TOMLConfigs: cfg.SolanaConfigs()}), + }, + expectedSolanaChainCnt: 2, + expectedSolanaNodeCnt: 2, + expectedSolanaRelayerIds: []relay.ID{ + {Network: relay.Solana, ChainID: solanaChainID1}, + {Network: relay.Solana, ChainID: solanaChainID2}, + }, + expectedRelayerNetworks: map[relay.Network]struct{}{relay.Solana: {}}, + }, + + {name: "2 starknet chain with 4 nodes", + + initFuncs: []plugin.CoreRelayerChainInitFunc{ + plugin.InitStarknet(testctx, factory, plugin.StarkNetFactoryConfig{ + Keystore: keyStore.StarkNet(), + TOMLConfigs: cfg.StarknetConfigs()}), + }, + expectedStarknetChainCnt: 2, + expectedStarknetNodeCnt: 4, + expectedStarknetRelayerIds: []relay.ID{ + {Network: relay.StarkNet, ChainID: starknetChainID1}, + {Network: relay.StarkNet, ChainID: starknetChainID2}, + }, + expectedRelayerNetworks: map[relay.Network]struct{}{relay.StarkNet: {}}, + }, + + { + name: "2 cosmos chains with 2 nodes", + initFuncs: []plugin.CoreRelayerChainInitFunc{ + plugin.InitCosmos(testctx, factory, plugin.CosmosFactoryConfig{ + Keystore: keyStore.Cosmos(), + TOMLConfigs: cfg.CosmosConfigs(), + DB: db, + QConfig: cfg.Database()}), + }, + expectedCosmosChainCnt: 2, + expectedCosmosNodeCnt: 2, + expectedCosmosRelayerIds: []relay.ID{ + {Network: relay.Cosmos, ChainID: cosmosChainID1}, + {Network: relay.Cosmos, ChainID: cosmosChainID2}, + }, + expectedRelayerNetworks: map[relay.Network]struct{}{relay.Cosmos: {}}, + }, + + {name: "all chains", + + initFuncs: []plugin.CoreRelayerChainInitFunc{plugin.InitSolana(testctx, factory, plugin.SolanaFactoryConfig{ + Keystore: keyStore.Solana(), + TOMLConfigs: cfg.SolanaConfigs()}), + plugin.InitEVM(testctx, factory, plugin.EVMFactoryConfig{ + ChainOpts: legacyevm.ChainOpts{ + AppConfig: cfg, + + MailMon: &mailbox.Monitor{}, + DB: db, + }, + CSAETHKeystore: keyStore, + }), + plugin.InitStarknet(testctx, factory, plugin.StarkNetFactoryConfig{ + Keystore: keyStore.StarkNet(), + TOMLConfigs: cfg.StarknetConfigs()}), + plugin.InitCosmos(testctx, factory, plugin.CosmosFactoryConfig{ + Keystore: keyStore.Cosmos(), + TOMLConfigs: cfg.CosmosConfigs(), + DB: db, + QConfig: cfg.Database(), + }), + }, + expectedEVMChainCnt: 2, + expectedEVMNodeCnt: 3, + expectedEVMRelayerIds: []relay.ID{ + {Network: relay.EVM, ChainID: evmChainID1.String()}, + {Network: relay.EVM, ChainID: evmChainID2.String()}, + }, + + expectedSolanaChainCnt: 2, + expectedSolanaNodeCnt: 2, + expectedSolanaRelayerIds: []relay.ID{ + {Network: relay.Solana, ChainID: solanaChainID1}, + {Network: relay.Solana, ChainID: solanaChainID2}, + }, + + expectedStarknetChainCnt: 2, + expectedStarknetNodeCnt: 4, + expectedStarknetRelayerIds: []relay.ID{ + {Network: relay.StarkNet, ChainID: starknetChainID1}, + {Network: relay.StarkNet, ChainID: starknetChainID2}, + }, + + expectedCosmosChainCnt: 2, + expectedCosmosNodeCnt: 2, + expectedCosmosRelayerIds: []relay.ID{ + {Network: relay.Cosmos, ChainID: cosmosChainID1}, + {Network: relay.Cosmos, ChainID: cosmosChainID2}, + }, + + expectedRelayerNetworks: map[relay.Network]struct{}{relay.EVM: {}, relay.Cosmos: {}, relay.Solana: {}, relay.StarkNet: {}}, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var cr *plugin.CoreRelayerChainInteroperators + { + var err error + cr, err = plugin.NewCoreRelayerChainInteroperators(tt.initFuncs...) + require.NoError(t, err) + + expectedChainCnt := tt.expectedEVMChainCnt + tt.expectedCosmosChainCnt + tt.expectedSolanaChainCnt + tt.expectedStarknetChainCnt + allChainsStats, cnt, err := cr.ChainStatuses(testctx, 0, 0) + assert.NoError(t, err) + assert.Len(t, allChainsStats, expectedChainCnt) + assert.Equal(t, cnt, len(allChainsStats)) + assert.Len(t, cr.Slice(), expectedChainCnt) + + // should be one relayer per chain and one service per relayer + assert.Len(t, cr.Slice(), expectedChainCnt) + assert.Len(t, cr.Services(), expectedChainCnt) + + expectedNodeCnt := tt.expectedEVMNodeCnt + tt.expectedCosmosNodeCnt + tt.expectedSolanaNodeCnt + tt.expectedStarknetNodeCnt + allNodeStats, cnt, err := cr.NodeStatuses(testctx, 0, 0) + assert.NoError(t, err) + assert.Len(t, allNodeStats, expectedNodeCnt) + assert.Equal(t, cnt, len(allNodeStats)) + } + + gotRelayerNetworks := make(map[relay.Network]struct{}) + for relayNetwork := range relay.SupportedRelays { + var expectedChainCnt, expectedNodeCnt int + switch relayNetwork { + case relay.EVM: + expectedChainCnt, expectedNodeCnt = tt.expectedEVMChainCnt, tt.expectedEVMNodeCnt + case relay.Cosmos: + expectedChainCnt, expectedNodeCnt = tt.expectedCosmosChainCnt, tt.expectedCosmosNodeCnt + case relay.Solana: + expectedChainCnt, expectedNodeCnt = tt.expectedSolanaChainCnt, tt.expectedSolanaNodeCnt + case relay.StarkNet: + expectedChainCnt, expectedNodeCnt = tt.expectedStarknetChainCnt, tt.expectedStarknetNodeCnt + default: + require.Fail(t, "untested relay network", relayNetwork) + } + + interops := cr.List(plugin.FilterRelayersByType(relayNetwork)) + assert.Len(t, cr.List(plugin.FilterRelayersByType(relayNetwork)).Slice(), expectedChainCnt) + if len(interops.Slice()) > 0 { + gotRelayerNetworks[relayNetwork] = struct{}{} + } + + // check legacy chains for those that haven't migrated fully to the loop relayer interface + if relayNetwork == relay.EVM { + _, wantEVM := tt.expectedRelayerNetworks[relay.EVM] + if wantEVM { + assert.Len(t, cr.LegacyEVMChains().Slice(), expectedChainCnt) + } else { + assert.Nil(t, cr.LegacyEVMChains()) + } + } + if relayNetwork == relay.Cosmos { + _, wantCosmos := tt.expectedRelayerNetworks[relay.Cosmos] + if wantCosmos { + assert.Len(t, cr.LegacyCosmosChains().Slice(), expectedChainCnt) + } else { + assert.Nil(t, cr.LegacyCosmosChains()) + } + } + + nodesStats, cnt, err := interops.NodeStatuses(testctx, 0, 0) + assert.NoError(t, err) + assert.Len(t, nodesStats, expectedNodeCnt) + assert.Equal(t, cnt, len(nodesStats)) + + } + assert.EqualValues(t, gotRelayerNetworks, tt.expectedRelayerNetworks) + + allRelayerIds := [][]relay.ID{ + tt.expectedEVMRelayerIds, + tt.expectedCosmosRelayerIds, + tt.expectedSolanaRelayerIds, + tt.expectedStarknetRelayerIds, + } + + for _, chainSpecificRelayerIds := range allRelayerIds { + for _, wantId := range chainSpecificRelayerIds { + lr, err := cr.Get(wantId) + assert.NotNil(t, lr) + assert.NoError(t, err) + stat, err := cr.ChainStatus(testctx, wantId) + assert.NoError(t, err) + assert.Equal(t, wantId.ChainID, stat.ID) + // check legacy chains for evm and cosmos + if wantId.Network == relay.EVM { + c, err := cr.LegacyEVMChains().Get(wantId.ChainID) + assert.NoError(t, err) + assert.NotNil(t, c) + assert.Equal(t, wantId.ChainID, c.ID().String()) + } + if wantId.Network == relay.Cosmos { + c, err := cr.LegacyCosmosChains().Get(wantId.ChainID) + assert.NoError(t, err) + assert.NotNil(t, c) + assert.Equal(t, wantId.ChainID, c.ID()) + } + } + } + + expectedMissing := relay.ID{Network: relay.Cosmos, ChainID: "not a chain id"} + unwanted, err := cr.Get(expectedMissing) + assert.Nil(t, unwanted) + assert.ErrorIs(t, err, plugin.ErrNoSuchRelayer) + + }) + + } + + t.Run("bad init func", func(t *testing.T) { + t.Parallel() + errBadFunc := errors.New("this is a bad func") + badFunc := func() plugin.CoreRelayerChainInitFunc { + return func(op *plugin.CoreRelayerChainInteroperators) error { + return errBadFunc + } + } + cr, err := plugin.NewCoreRelayerChainInteroperators(badFunc()) + assert.Nil(t, cr) + assert.ErrorIs(t, err, errBadFunc) + }) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go new file mode 100644 index 00000000..ae9e090b --- /dev/null +++ b/core/services/chainlink/relayer_factory.go @@ -0,0 +1,299 @@ +package plugin + +import ( + "context" + "errors" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pelletier/go-toml/v2" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-cosmos/pkg/cosmos" + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + pkgsolana "github.com/goplugin/plugin-solana/pkg/solana" + pkgstarknet "github.com/goplugin/plugin-starknet/relayer/pkg/plugin" + starkchain "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/chain" + "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type RelayerFactory struct { + logger.Logger + *plugins.LoopRegistry + loop.GRPCOpts + MercuryPool wsrpc.Pool +} + +type EVMFactoryConfig struct { + legacyevm.ChainOpts + evmrelay.CSAETHKeystore +} + +func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (map[relay.ID]evmrelay.LoopRelayAdapter, error) { + // TODO impl EVM loop. For now always 'fallback' to an adapter and embedded chain + + relayers := make(map[relay.ID]evmrelay.LoopRelayAdapter) + + lggr := r.Logger.Named("EVM") + + // override some common opts with the factory values. this seems weird... maybe other signatures should change, or this should take a different type... + ccOpts := legacyevm.ChainRelayExtenderConfig{ + Logger: lggr, + KeyStore: config.CSAETHKeystore.Eth(), + ChainOpts: config.ChainOpts, + } + + evmRelayExtenders, err := evmrelay.NewChainRelayerExtenders(ctx, ccOpts) + if err != nil { + return nil, err + } + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(evmRelayExtenders) + for _, ext := range evmRelayExtenders.Slice() { + relayID := relay.ID{Network: relay.EVM, ChainID: ext.Chain().ID().String()} + chain, err2 := legacyChains.Get(relayID.ChainID) + if err2 != nil { + return nil, err2 + } + + relayerOpts := evmrelay.RelayerOpts{ + DB: ccOpts.DB, + QConfig: ccOpts.AppConfig.Database(), + CSAETHKeystore: config.CSAETHKeystore, + MercuryPool: r.MercuryPool, + } + relayer, err2 := evmrelay.NewRelayer(lggr.Named(relayID.ChainID), chain, relayerOpts) + if err2 != nil { + err = errors.Join(err, err2) + continue + } + + relayers[relayID] = evmrelay.NewLoopRelayServerAdapter(relayer, ext) + } + + // always return err because it is accumulating individual errors + return relayers, err +} + +type SolanaFactoryConfig struct { + Keystore keystore.Solana + solana.TOMLConfigs +} + +func (r *RelayerFactory) NewSolana(ks keystore.Solana, chainCfgs solana.TOMLConfigs) (map[relay.ID]loop.Relayer, error) { + solanaRelayers := make(map[relay.ID]loop.Relayer) + var ( + solLggr = r.Logger.Named("Solana") + signer = &keystore.SolanaSigner{Solana: ks} + ) + + unique := make(map[string]struct{}) + // create one relayer per chain id + for _, chainCfg := range chainCfgs { + + relayID := relay.ID{Network: relay.Solana, ChainID: *chainCfg.ChainID} + _, alreadyExists := unique[relayID.Name()] + if alreadyExists { + return nil, fmt.Errorf("duplicate chain definitions for %s", relayID.Name()) + } + unique[relayID.Name()] = struct{}{} + + // skip disabled chains from further processing + if !chainCfg.IsEnabled() { + solLggr.Warnw("Skipping disabled chain", "id", chainCfg.ChainID) + continue + } + + lggr := solLggr.Named(relayID.ChainID) + + if cmdName := env.SolanaPlugin.Cmd.Get(); cmdName != "" { + + // setup the solana relayer to be a LOOP + cfgTOML, err := toml.Marshal(struct { + Solana solana.TOMLConfig + }{Solana: *chainCfg}) + + if err != nil { + return nil, fmt.Errorf("failed to marshal Solana configs: %w", err) + } + envVars, err := plugins.ParseEnvFile(env.SolanaPlugin.Env.Get()) + if err != nil { + return nil, fmt.Errorf("failed to parse Solana env file: %w", err) + } + solCmdFn, err := plugins.NewCmdFactory(r.Register, plugins.CmdConfig{ + ID: relayID.Name(), + Cmd: cmdName, + Env: envVars, + }) + if err != nil { + return nil, fmt.Errorf("failed to create Solana LOOP command: %w", err) + } + + solanaRelayers[relayID] = loop.NewRelayerService(lggr, r.GRPCOpts, solCmdFn, string(cfgTOML), signer) + + } else { + // fallback to embedded chain + opts := solana.ChainOpts{ + Logger: lggr, + KeyStore: signer, + } + + chain, err := solana.NewChain(chainCfg, opts) + if err != nil { + return nil, err + } + solanaRelayers[relayID] = relay.NewServerAdapter(pkgsolana.NewRelayer(lggr, chain), chain) + } + } + return solanaRelayers, nil +} + +type StarkNetFactoryConfig struct { + Keystore keystore.StarkNet + config.TOMLConfigs +} + +// TODO BCF-2606 consider consolidating the driving logic with that of NewSolana above via generics +// perhaps when we implement a Cosmos LOOP +func (r *RelayerFactory) NewStarkNet(ks keystore.StarkNet, chainCfgs config.TOMLConfigs) (map[relay.ID]loop.Relayer, error) { + starknetRelayers := make(map[relay.ID]loop.Relayer) + + var ( + starkLggr = r.Logger.Named("StarkNet") + loopKs = &keystore.StarknetLooppSigner{StarkNet: ks} + ) + + unique := make(map[string]struct{}) + // create one relayer per chain id + for _, chainCfg := range chainCfgs { + relayID := relay.ID{Network: relay.StarkNet, ChainID: *chainCfg.ChainID} + _, alreadyExists := unique[relayID.Name()] + if alreadyExists { + return nil, fmt.Errorf("duplicate chain definitions for %s", relayID.Name()) + } + unique[relayID.Name()] = struct{}{} + + // skip disabled chains from further processing + if !chainCfg.IsEnabled() { + starkLggr.Warnw("Skipping disabled chain", "id", chainCfg.ChainID) + continue + } + + lggr := starkLggr.Named(relayID.ChainID) + + if cmdName := env.StarknetPlugin.Cmd.Get(); cmdName != "" { + // setup the starknet relayer to be a LOOP + cfgTOML, err := toml.Marshal(struct { + Starknet config.TOMLConfig + }{Starknet: *chainCfg}) + if err != nil { + return nil, fmt.Errorf("failed to marshal StarkNet configs: %w", err) + } + + envVars, err := plugins.ParseEnvFile(env.StarknetPlugin.Env.Get()) + if err != nil { + return nil, fmt.Errorf("failed to parse Starknet env file: %w", err) + } + starknetCmdFn, err := plugins.NewCmdFactory(r.Register, plugins.CmdConfig{ + ID: relayID.Name(), + Cmd: cmdName, + Env: envVars, + }) + if err != nil { + return nil, fmt.Errorf("failed to create StarkNet LOOP command: %w", err) + } + // the starknet relayer service has a delicate keystore dependency. the value that is passed to NewRelayerService must + // be compatible with instantiating a starknet transaction manager KeystoreAdapter within the LOOPp executable. + starknetRelayers[relayID] = loop.NewRelayerService(lggr, r.GRPCOpts, starknetCmdFn, string(cfgTOML), loopKs) + } else { + // fallback to embedded chain + opts := starkchain.ChainOpts{ + Logger: lggr, + KeyStore: loopKs, + } + + chain, err := starkchain.NewChain(chainCfg, opts) + if err != nil { + return nil, err + } + + starknetRelayers[relayID] = relay.NewServerAdapter(pkgstarknet.NewRelayer(lggr, chain), chain) + } + } + return starknetRelayers, nil + +} + +type CosmosFactoryConfig struct { + Keystore keystore.Cosmos + coscfg.TOMLConfigs + *sqlx.DB + pg.QConfig +} + +func (c CosmosFactoryConfig) Validate() error { + var err error + if c.Keystore == nil { + err = errors.Join(err, fmt.Errorf("nil Keystore")) + } + if len(c.TOMLConfigs) == 0 { + err = errors.Join(err, fmt.Errorf("no CosmosConfigs provided")) + } + if c.DB == nil { + err = errors.Join(err, fmt.Errorf("nil DB")) + } + if c.QConfig == nil { + err = errors.Join(err, fmt.Errorf("nil QConfig")) + } + + if err != nil { + err = fmt.Errorf("invalid CosmosFactoryConfig: %w", err) + } + return err +} + +func (r *RelayerFactory) NewCosmos(config CosmosFactoryConfig) (map[relay.ID]CosmosLoopRelayerChainer, error) { + err := config.Validate() + if err != nil { + return nil, fmt.Errorf("cannot create Cosmos relayer: %w", err) + } + relayers := make(map[relay.ID]CosmosLoopRelayerChainer) + + var ( + cosmosLggr = r.Logger.Named("Cosmos") + loopKs = &keystore.CosmosLoopKeystore{Cosmos: config.Keystore} + ) + + // create one relayer per chain id + for _, chainCfg := range config.TOMLConfigs { + relayID := relay.ID{Network: relay.Cosmos, ChainID: *chainCfg.ChainID} + + lggr := cosmosLggr.Named(relayID.ChainID) + + opts := cosmos.ChainOpts{ + Logger: lggr, + DB: config.DB, + KeyStore: loopKs, + } + + chain, err := cosmos.NewChain(chainCfg, opts) + if err != nil { + return nil, fmt.Errorf("failed to load Cosmos chain %q: %w", relayID, err) + } + + relayers[relayID] = NewCosmosLoopRelayerChain(cosmos.NewRelayer(lggr, chain), chain) + + } + return relayers, nil + +} diff --git a/core/services/chainlink/secret_generator.go b/core/services/chainlink/secret_generator.go new file mode 100644 index 00000000..ceeff222 --- /dev/null +++ b/core/services/chainlink/secret_generator.go @@ -0,0 +1,39 @@ +package plugin + +import ( + "encoding/base64" + "os" + "path/filepath" + + "github.com/gorilla/securecookie" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// this permission grants read / write access to file owners only +const readWritePerms = os.FileMode(0600) + +// SecretGenerator is the interface for objects that generate a secret +// used to sign or encrypt. +type SecretGenerator interface { + Generate(string) ([]byte, error) +} + +type FilePersistedSecretGenerator struct{} + +func (f FilePersistedSecretGenerator) Generate(rootDir string) ([]byte, error) { + sessionPath := filepath.Join(rootDir, "secret") + if exists, err := utils.FileExists(sessionPath); err != nil { + return nil, err + } else if exists { + data, err := os.ReadFile(sessionPath) + if err != nil { + return data, err + } + return base64.StdEncoding.DecodeString(string(data)) + } + key := securecookie.GenerateRandomKey(32) + str := base64.StdEncoding.EncodeToString(key) + err := utils.WriteFileWithMaxPerms(sessionPath, []byte(str), readWritePerms) + return key, err +} diff --git a/core/services/chainlink/secret_generator_test.go b/core/services/chainlink/secret_generator_test.go new file mode 100644 index 00000000..00da079e --- /dev/null +++ b/core/services/chainlink/secret_generator_test.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFilePersistedSecretGenerator(t *testing.T) { + t.Parallel() + rootDir := t.TempDir() + var secretGenerator FilePersistedSecretGenerator + + initial, err := secretGenerator.Generate(rootDir) + require.NoError(t, err) + require.NotEqual(t, "", initial) + require.NotEqual(t, "clsession_test_secret", initial) + + second, err := secretGenerator.Generate(rootDir) + require.NoError(t, err) + require.Equal(t, initial, second) +} diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml new file mode 100644 index 00000000..2cac38a1 --- /dev/null +++ b/core/services/chainlink/testdata/config-empty-effective.toml @@ -0,0 +1,230 @@ +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'info' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml new file mode 100644 index 00000000..ab5deb42 --- /dev/null +++ b/core/services/chainlink/testdata/config-full.toml @@ -0,0 +1,423 @@ +InsecureFastScrypt = true +RootDir = 'test/root/dir' +ShutdownGracePeriod = '10s' + +[Feature] +FeedsManager = true +LogPoller = true +UICSAKeys = true + +[Database] +DefaultIdleInTxSessionTimeout = '1m0s' +DefaultLockTimeout = '1h0m0s' +DefaultQueryTimeout = '1s' +LogQueries = true +MaxIdleConns = 7 +MaxOpenConns = 13 +MigrateOnStartup = true + +[Database.Backup] +Dir = 'test/backup/dir' +Frequency = '1h0m0s' +Mode = 'full' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '1m0s' +MinReconnectInterval = '5m0s' +FallbackPollInterval = '2m0s' + +[Database.Lock] +Enabled = false +LeaseDuration = '1m0s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = true +BufferSize = 1234 +MaxBatchSize = 4321 +SendInterval = '1m0s' +SendTimeout = '5s' +UseBatchSend = true + +[[TelemetryIngress.Endpoints]] +Network = 'EVM' +ChainID = '1' +URL = 'prom.test' +ServerPubKey = 'test-pub-key' + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'crit' +JSONConsole = true +UnixTS = true + +[Log.File] +Dir = 'log/file/dir' +MaxSize = '100.00gb' +MaxAgeDays = 17 +MaxBackups = 9 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = '*' +BridgeResponseURL = 'https://bridge.response' +BridgeCacheTTL = '10s' +HTTPWriteTimeout = '1m0s' +HTTPPort = 56 +SecureCookies = true +SessionTimeout = '1h0m0s' +SessionReaperExpiration = '168h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '192.158.1.37' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = 'dc=custom,dc=example,dc=com' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = 'organizationalStatus' +ActiveAttributeAllowedValue = 'ACTIVE' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = 'test-rpid' +RPOrigin = 'test-rp-origin' + +[WebServer.RateLimit] +Authenticated = 42 +AuthenticatedPeriod = '1s' +Unauthenticated = 7 +UnauthenticatedPeriod = '1m0s' + +[WebServer.TLS] +CertPath = 'tls/cert/path' +ForceRedirect = true +Host = 'tls-host' +HTTPSPort = 6789 +KeyPath = 'tls/key/path' +ListenIP = '192.158.1.38' + +[JobPipeline] +ExternalInitiatorsEnabled = true +MaxRunDuration = '1h0m0s' +MaxSuccessfulRuns = 123456 +ReaperInterval = '4h0m0s' +ReaperThreshold = '168h0m0s' +ResultWriteQueueDepth = 10 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '1m0s' +MaxSize = '100.00mb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 100 +SimulateTransactions = true + +[OCR2] +Enabled = true +ContractConfirmations = 11 +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '8s' +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = true +ObservationTimeout = '11s' +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +DefaultTransactionQueueDepth = 12 +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' +SimulateTransactions = true +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 13 +OutgoingMessageBufferSize = 17 +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' +TraceLogging = true + +[P2P.V2] +Enabled = false +AnnounceAddresses = ['a', 'b', 'c'] +DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99'] +DeltaDial = '1m0s' +DeltaReconcile = '1s' +ListenAddresses = ['foo', 'bar'] + +[Keeper] +DefaultTransactionQueueDepth = 17 +GasPriceBufferPercent = 12 +GasTipCapBufferPercent = 43 +BaseFeeBufferPercent = 89 +MaxGracePeriod = 31 +TurnLookBack = 91 + +[Keeper.Registry] +CheckGasOverhead = 90 +PerformGasOverhead = 4294967295 +MaxPerformDataSize = 5000 +SyncInterval = '1h0m0s' +SyncUpkeepQueueSize = 31 + +[AutoPprof] +Enabled = true +ProfileRoot = 'prof/root' +PollInterval = '1m0s' +GatherDuration = '12s' +GatherTraceDuration = '13s' +MaxProfileSize = '1.00gb' +CPUProfileRate = 7 +MemProfileRate = 9 +BlockProfileRate = 5 +MutexProfileFraction = 2 +MemThreshold = '1.00gb' +GoroutineThreshold = 999 + +[Pyroscope] +ServerAddress = 'http://localhost:4040' +Environment = 'tests' + +[Sentry] +Debug = true +DSN = 'sentry-dsn' +Environment = 'dev' +Release = 'v1.2.3' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = true +CollectorTarget = 'localhost:4317' +NodeID = 'clc-ocr-sol-devnet-node-1' +SamplingRatio = 1.0 +Mode = 'tls' +TLSCertPath = '/path/to/cert.pem' + +[Tracing.Attributes] +env = 'dev' +test = 'load' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1m40s' +MaxStaleAge = '1m41s' +LatestReportDeadline = '1m42s' + +[Mercury.TLS] +CertFile = '/path/to/cert.pem' + +[[EVM]] +ChainID = '1' +Enabled = false +AutoCreateKey = false +BlockBackfillDepth = 100 +BlockBackfillSkip = true +ChainType = 'Optimism' +FinalityDepth = 42 +FinalityTagEnabled = false +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' +LogBackfillBatchSize = 17 +LogPollInterval = '1m0s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 13 +MinContractPayment = '9.223372036854775807 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' +RPCDefaultBatchSize = 17 +RPCBlockQueryDelay = 10 + +[EVM.Transactions] +ForwardersEnabled = true +MaxInFlight = 19 +MaxQueued = 99 +ReaperInterval = '1m0s' +ReaperThreshold = '1m0s' +ResendAfterThreshold = '1h0m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '9.223372036854775807 ether' +PriceMax = '281.474976710655 micro' +PriceMin = '13 wei' +LimitDefault = 12 +LimitMax = 17 +LimitMultiplier = '1.234' +LimitTransfer = 100 +BumpMin = '100 wei' +BumpPercent = 10 +BumpThreshold = 6 +BumpTxDepth = 6 +EIP1559DynamicFees = true +FeeCapDefault = '9.223372036854775807 ether' +TipCapDefault = '2 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.LimitJobType] +OCR = 1001 +OCR2 = 1006 +DR = 1002 +VRF = 1003 +FM = 1004 +Keeper = 1005 + +[EVM.GasEstimator.BlockHistory] +BatchSize = 17 +BlockHistorySize = 12 +CheckInclusionBlocks = 18 +CheckInclusionPercentile = 19 +EIP1559FeeCapBufferBlocks = 13 +TransactionPercentile = 15 + +[EVM.HeadTracker] +HistoryDepth = 15 +MaxBufferSize = 17 +SamplingInterval = '1h0m0s' + +[[EVM.KeySpecific]] +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' + +[EVM.KeySpecific.GasEstimator] +PriceMax = '79.228162514264337593543950335 gether' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '1m0s' +SelectionMode = 'HighestHead' +SyncThreshold = 13 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 11 +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '1s' +DeltaCOverride = '1h0m0s' +DeltaCJitterOverride = '1s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 540 + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' +HTTPURL = 'https://foo.web' + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' +HTTPURL = 'https://bar.com' + +[[EVM.Nodes]] +Name = 'broadcast' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[Cosmos]] +ChainID = 'Malaga-420' +Enabled = true +Bech32Prefix = 'wasm' +BlockRate = '1m0s' +BlocksUntilTxTimeout = 12 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.001' +GasToken = 'ucosm' +GasLimitMultiplier = '1.2' +MaxMsgsPerBatch = 17 +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxMsgTimeout = '1s' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[Cosmos.Nodes]] +Name = 'foo' +TendermintURL = 'http://foo.url' + +[[Cosmos.Nodes]] +Name = 'bar' +TendermintURL = 'http://bar.web' + +[[Solana]] +ChainID = 'mainnet' +Enabled = false +BalancePollPeriod = '1m0s' +ConfirmPollPeriod = '1s' +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxTimeout = '1h0m0s' +TxRetryTimeout = '1m0s' +TxConfirmTimeout = '1s' +SkipPreflight = true +Commitment = 'banana' +MaxRetries = 7 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 10 +ComputeUnitPriceDefault = 100 +FeeBumpPeriod = '1m0s' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Solana.Nodes]] +Name = 'foo' +URL = 'http://solana.foo' + +[[Solana.Nodes]] +Name = 'bar' +URL = 'http://solana.bar' + +[[Starknet]] +ChainID = 'foobar' +Enabled = true +OCR2CachePollPeriod = '6h0m0s' +OCR2CacheTTL = '3m0s' +RequestTimeout = '1m3s' +TxTimeout = '13s' +ConfirmationPoll = '42s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/services/chainlink/testdata/config-invalid.toml b/core/services/chainlink/testdata/config-invalid.toml new file mode 100644 index 00000000..4d8c9bc2 --- /dev/null +++ b/core/services/chainlink/testdata/config-invalid.toml @@ -0,0 +1,140 @@ +[Database.Lock] +LeaseRefreshInterval='6s' +LeaseDuration='10s' + +[WebServer] +AuthenticationMethod = 'ldap' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = '' +BaseDN = '' +UsersDN = '' +GroupsDN = '' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = '' +EditUserGroupCN = '' +RunUserGroupCN = '' +ReadUserGroupCN = '' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[[EVM]] +ChainID = '1' +Transactions.MaxInFlight= 10 + +[EVM.GasEstimator] +Mode = 'BlockHistory' +BumpTxDepth = 11 +BumpPercent = 1 +TipCapDefault = 3 +TipCapMin = 4 +FeeCapDefault = 2 +PriceMin = '10 gwei' +PriceDefault = '9 gwei' +PriceMax = '5 gwei' + +[EVM.GasEstimator.BlockHistory] +BlockHistorySize = 0 + +[[EVM.Nodes]] +Name = 'foo' + +[[EVM.Nodes]] +Name = 'foo' +SendOnly = true + +[[EVM]] +ChainID = '1' +ChainType = 'Foo' +FinalityDepth = 32 + +[EVM.GasEstimator] +Mode = 'FixedPrice' +BumpThreshold = 0 +EIP1559DynamicFees = true +FeeCapDefault = 101 +PriceMax = 99 + +[EVM.HeadTracker] +HistoryDepth = 30 + +[[EVM.KeySpecific]] +Key = '0xde709f2102306220921060314715629080e2fb77' + +[[EVM.KeySpecific]] +Key = '0xde709f2102306220921060314715629080e2fb77' + +[[EVM]] +ChainID = '10' +ChainType = 'Arbitrum' +FinalityDepth = 0 +MinIncomingConfirmations = 0 + +[[EVM]] +ChainID = '99' + +[[EVM.Nodes]] +HTTPURl = '' + +[[EVM.Nodes]] +WSURL = 'http://asdf.test' + +[[EVM.Nodes]] +Name = '' +HTTPURl = 'ws://foo.bar' + +[[EVM.Nodes]] +Name = 'dupe' +WSURL = 'ws://dupe.com' + +[[EVM.Nodes]] +Name = 'dupe2' +WSURL = 'ws://dupe.com' + +[[EVM]] + +[[Cosmos]] +ChainID = 'Malaga-420' + +[[Cosmos.Nodes]] +Name = 'test' + +[[Cosmos.Nodes]] +Name = 'test' + +[[Cosmos]] +ChainID = 'Malaga-420' + +[[Cosmos]] + +[[Solana]] +ChainID = 'mainnet' + +[[Solana]] +ChainID = 'mainnet' + +[[Solana.Nodes]] +Name = 'bar' + +[[Solana.Nodes]] +Name = 'bar' + +[[Solana]] + +[[Starknet]] + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://second.stark.node' + +[[Starknet]] diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml new file mode 100644 index 00000000..8c83b3d8 --- /dev/null +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -0,0 +1,572 @@ +InsecureFastScrypt = false +RootDir = 'my/root/dir' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '2m0s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'panic' +JSONConsole = true +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '30s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = true +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '20s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = true +ObservationTimeout = '5s' +BlockchainTimeout = '5s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 999 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 10 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 7 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 26 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'primary' +WSURL = 'wss://web.socket/mainnet' + +[[EVM.Nodes]] +Name = 'secondary' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[EVM]] +ChainID = '42' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xa36085F69e2889c224210F603D836748e7dC0088' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '9.223372036854775807 ether' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' + +[[EVM]] +ChainID = '137' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 500 +FinalityTagEnabled = false +LinkContractAddress = '0xb0897686c545045aFc77CF20eC7A532E3120E0F1' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 5 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 10 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 5000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'FixedPrice' +PriceDefault = '30 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '30 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '20 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[EVM.HeadTracker] +HistoryDepth = 2000 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' + +[[Cosmos]] +ChainID = 'Ibiza-808' +Bech32Prefix = 'wasm' +BlockRate = '6s' +BlocksUntilTxTimeout = 30 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.015' +GasToken = 'ucosm' +GasLimitMultiplier = '1.5' +MaxMsgsPerBatch = 13 +OCR2CachePollPeriod = '4s' +OCR2CacheTTL = '1m0s' +TxMsgTimeout = '10m0s' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://columbus.cosmos.com' + +[[Cosmos]] +ChainID = 'Malaga-420' +Bech32Prefix = 'wasm' +BlockRate = '6s' +BlocksUntilTxTimeout = 20 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.015' +GasToken = 'ucosm' +GasLimitMultiplier = '1.5' +MaxMsgsPerBatch = 100 +OCR2CachePollPeriod = '4s' +OCR2CacheTTL = '1m0s' +TxMsgTimeout = '10m0s' + +[[Cosmos.Nodes]] +Name = 'secondary' +TendermintURL = 'http://bombay.cosmos.com' + +[[Solana]] +ChainID = 'mainnet' +BalancePollPeriod = '5s' +ConfirmPollPeriod = '500ms' +OCR2CachePollPeriod = '1s' +OCR2CacheTTL = '1m0s' +TxTimeout = '1m0s' +TxRetryTimeout = '10s' +TxConfirmTimeout = '30s' +SkipPreflight = true +Commitment = 'confirmed' +MaxRetries = 12 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://mainnet.solana.com' + +[[Solana]] +ChainID = 'testnet' +BalancePollPeriod = '5s' +ConfirmPollPeriod = '500ms' +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1m0s' +TxTimeout = '1m0s' +TxRetryTimeout = '10s' +TxConfirmTimeout = '30s' +SkipPreflight = true +Commitment = 'confirmed' +MaxRetries = 0 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' + +[[Solana.Nodes]] +Name = 'secondary' +URL = 'http://testnet.solana.com' + +[[Starknet]] +ChainID = 'foobar' +OCR2CachePollPeriod = '5s' +OCR2CacheTTL = '1m0s' +RequestTimeout = '10s' +TxTimeout = '10s' +ConfirmationPoll = '1h0m0s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/services/chainlink/testdata/config-multi-chain.toml b/core/services/chainlink/testdata/config-multi-chain.toml new file mode 100644 index 00000000..53425dd1 --- /dev/null +++ b/core/services/chainlink/testdata/config-multi-chain.toml @@ -0,0 +1,110 @@ +RootDir = 'my/root/dir' + +[Database] +[Database.Listener] +FallbackPollInterval = '2m0s' + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'panic' +JSONConsole = true + +[JobPipeline] +[JobPipeline.HTTPRequest] +DefaultTimeout = '30s' + +[OCR2] +Enabled = true +DatabaseTimeout = '20s' + +[OCR] +Enabled = true +BlockchainTimeout = '5s' + +[P2P] +IncomingMessageBufferSize = 999 + +[Keeper] +GasPriceBufferPercent = 10 + +[AutoPprof] +CPUProfileRate = 7 + +[[EVM]] +ChainID = '1' +FinalityDepth = 26 +FinalityTagEnabled = false + +[[EVM.Nodes]] +Name = 'primary' +WSURL = 'wss://web.socket/mainnet' + +[[EVM.Nodes]] +Name = 'secondary' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[EVM]] +ChainID = '42' + +[EVM.GasEstimator] +PriceDefault = '9.223372036854775807 ether' + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' + +[[EVM]] +ChainID = '137' + +[EVM.GasEstimator] +Mode = 'FixedPrice' + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' + +[[Cosmos]] +ChainID = 'Ibiza-808' +MaxMsgsPerBatch = 13 + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://columbus.cosmos.com' + +[[Cosmos]] +ChainID = 'Malaga-420' +BlocksUntilTxTimeout = 20 + +[[Cosmos.Nodes]] +Name = 'secondary' +TendermintURL = 'http://bombay.cosmos.com' + +[[Solana]] +ChainID = 'mainnet' +MaxRetries = 12 + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://mainnet.solana.com' + +[[Solana]] +ChainID = 'testnet' +OCR2CachePollPeriod = '1m0s' + +[[Solana.Nodes]] +Name = 'secondary' +URL = 'http://testnet.solana.com' + +[[Starknet]] +ChainID = 'foobar' +ConfirmationPoll = '1h0m0s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/services/chainlink/testdata/mergingsecretsdata/config.toml b/core/services/chainlink/testdata/mergingsecretsdata/config.toml new file mode 100644 index 00000000..6118f7e1 --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/config.toml @@ -0,0 +1,6 @@ +RootDir = 'set in config file' + +[P2P] +[P2P.V2] +AnnounceAddresses = ['set in config file'] +ListenAddresses = ['set in config file'] \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-database.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-database.toml new file mode 100644 index 00000000..db9d683a --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-database.toml @@ -0,0 +1,4 @@ +[Database] +URL = "postgres://172.17.0.1:5432/primary" +BackupURL = "postgres://172.17.0.1:5433/replica" +AllowSimplePasswords = false diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-one.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-one.toml new file mode 100644 index 00000000..7f0820b1 --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-one.toml @@ -0,0 +1,9 @@ +[Mercury.Credentials.key1] +Username = "user" +Password = "user_pass" +URL = "https://mercury.stage.link" + +[Mercury.Credentials.key2] +Username = "user" +Password = "user_pass" +URL = "https://mercury.stage.link" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-two.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-two.toml new file mode 100644 index 00000000..dbd63e6f --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-mercury-split-two.toml @@ -0,0 +1,9 @@ +[Mercury.Credentials.key3] +Username = "user" +Password = "user_pass" +URL = "https://mercury.stage.link" + +[Mercury.Credentials.key4] +Username = "user" +Password = "user_pass" +URL = "https://mercury.stage.link" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-password.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-password.toml new file mode 100644 index 00000000..b18e36fb --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-password.toml @@ -0,0 +1,3 @@ +[Password] +Keystore = "mysecretpassword" +VRF = "mysecretvrfpassword" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-prometheus.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-prometheus.toml new file mode 100644 index 00000000..4f653dd3 --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-prometheus.toml @@ -0,0 +1,2 @@ +[Prometheus] +AuthToken = "PROM_TOKEN" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-pyroscope.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-pyroscope.toml new file mode 100644 index 00000000..d2fc51db --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-pyroscope.toml @@ -0,0 +1,2 @@ +[Pyroscope] +AuthToken = "PYROSCOPE_TOKEN" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-threshold.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-threshold.toml new file mode 100644 index 00000000..2f7457fc --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-threshold.toml @@ -0,0 +1,2 @@ +[Threshold] +ThresholdKeyShare = "THRESHOLD_SECRET" \ No newline at end of file diff --git a/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml b/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml new file mode 100644 index 00000000..f73efcff --- /dev/null +++ b/core/services/chainlink/testdata/mergingsecretsdata/secrets-webserver-ldap.toml @@ -0,0 +1,4 @@ +[WebServer.LDAP] +ServerAddress = 'ldaps://127.0.0.1' +ReadOnlyUserLogin = 'viewer@example.com' +ReadOnlyUserPass = 'password' \ No newline at end of file diff --git a/core/services/chainlink/testdata/secrets-empty-effective.toml b/core/services/chainlink/testdata/secrets-empty-effective.toml new file mode 100644 index 00000000..bcb5e53b --- /dev/null +++ b/core/services/chainlink/testdata/secrets-empty-effective.toml @@ -0,0 +1,2 @@ +[Database] +AllowSimplePasswords = false diff --git a/core/services/chainlink/testdata/secrets-full-redacted.toml b/core/services/chainlink/testdata/secrets-full-redacted.toml new file mode 100644 index 00000000..9d91d79c --- /dev/null +++ b/core/services/chainlink/testdata/secrets-full-redacted.toml @@ -0,0 +1,38 @@ +[Database] +URL = 'xxxxx' +BackupURL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' +VRF = 'xxxxx' + +[WebServer] +[WebServer.LDAP] +ServerAddress = 'xxxxx' +ReadOnlyUserLogin = 'xxxxx' +ReadOnlyUserPass = 'xxxxx' + +[Pyroscope] +AuthToken = 'xxxxx' + +[Prometheus] +AuthToken = 'xxxxx' + +[Mercury] +[Mercury.Credentials] +[Mercury.Credentials.cred1] +URL = 'xxxxx' +Username = 'xxxxx' +Password = 'xxxxx' + +[Mercury.Credentials.cred2] +URL = 'xxxxx' +Username = 'xxxxx' +Password = 'xxxxx' + +[Mercury.Credentials.cred3] +LegacyURL = 'xxxxx' +URL = 'xxxxx' +Username = 'xxxxx' +Password = 'xxxxx' diff --git a/core/services/chainlink/testdata/secrets-full.toml b/core/services/chainlink/testdata/secrets-full.toml new file mode 100644 index 00000000..37a3e2e7 --- /dev/null +++ b/core/services/chainlink/testdata/secrets-full.toml @@ -0,0 +1,35 @@ +[Database] +URL = "postgresql://user:pass@localhost:5432/dbname?sslmode=disable" +BackupURL = "postgresql://user:pass@localhost:5432/backupdbname?sslmode=disable" + +[Password] +Keystore = "keystore_pass" +VRF = "VRF_pass" + +[WebServer] +[WebServer.LDAP] +ServerAddress = 'ldaps://127.0.0.1' +ReadOnlyUserLogin = 'viewer@example.com' +ReadOnlyUserPass = 'password' + +[Pyroscope] +AuthToken = "pyroscope-token" + +[Prometheus] +AuthToken = "prometheus-token" + +[Mercury.Credentials.cred1] +URL = "https://chain1.link" +Username = "username1" +Password = "password1" + +[Mercury.Credentials.cred2] +URL = "https://chain2.link" +Username = "username2" +Password = "password2" + +[Mercury.Credentials.cred3] +LegacyURL = "https://chain2.old.link" +URL = "https://chain2.link" +Username = "username2" +Password = "password2" diff --git a/core/services/chainlink/testdata/secrets-multi-redacted.toml b/core/services/chainlink/testdata/secrets-multi-redacted.toml new file mode 100644 index 00000000..27a1eb9f --- /dev/null +++ b/core/services/chainlink/testdata/secrets-multi-redacted.toml @@ -0,0 +1,6 @@ +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' diff --git a/core/services/chainlink/testdata/secrets-multi.toml b/core/services/chainlink/testdata/secrets-multi.toml new file mode 100644 index 00000000..23438f9e --- /dev/null +++ b/core/services/chainlink/testdata/secrets-multi.toml @@ -0,0 +1,5 @@ +[Database] +URL = "postgresql://user:pass@localhost:5432/dbname?sslmode=disable" + +[Password] +Keystore = "keystore_pass" diff --git a/core/services/chainlink/types.go b/core/services/chainlink/types.go new file mode 100644 index 00000000..a6f9201e --- /dev/null +++ b/core/services/chainlink/types.go @@ -0,0 +1,22 @@ +package plugin + +import ( + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-solana/pkg/solana" + stkcfg "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/config" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/config" +) + +//go:generate mockery --quiet --name GeneralConfig --output ./mocks/ --case=underscore + +type GeneralConfig interface { + config.AppConfig + toml.HasEVMConfigs + CosmosConfigs() coscfg.TOMLConfigs + SolanaConfigs() solana.TOMLConfigs + StarknetConfigs() stkcfg.TOMLConfigs + // ConfigTOML returns both the user provided and effective configuration as TOML. + ConfigTOML() (user, effective string) +} diff --git a/core/services/cron/cron.go b/core/services/cron/cron.go new file mode 100644 index 00000000..576c24e3 --- /dev/null +++ b/core/services/cron/cron.go @@ -0,0 +1,91 @@ +package cron + +import ( + "context" + "fmt" + + "github.com/robfig/cron/v3" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// Cron runs a cron jobSpec from a CronSpec +type Cron struct { + cronRunner *cron.Cron + logger logger.Logger + jobSpec job.Job + pipelineRunner pipeline.Runner + chStop services.StopChan +} + +// NewCronFromJobSpec instantiates a job that executes on a predefined schedule. +func NewCronFromJobSpec( + jobSpec job.Job, + pipelineRunner pipeline.Runner, + logger logger.Logger, +) (*Cron, error) { + cronLogger := logger.Named("Cron").With( + "jobID", jobSpec.ID, + "schedule", jobSpec.CronSpec.CronSchedule, + ) + + return &Cron{ + cronRunner: cronRunner(), + logger: cronLogger, + jobSpec: jobSpec, + pipelineRunner: pipelineRunner, + chStop: make(chan struct{}), + }, nil +} + +// Start implements the job.Service interface. +func (cr *Cron) Start(context.Context) error { + cr.logger.Debug("Starting") + + _, err := cr.cronRunner.AddFunc(cr.jobSpec.CronSpec.CronSchedule, cr.runPipeline) + if err != nil { + cr.logger.Errorw(fmt.Sprintf("Error running cron job %d", cr.jobSpec.ID), "err", err, "schedule", cr.jobSpec.CronSpec.CronSchedule, "jobID", cr.jobSpec.ID) + return err + } + cr.cronRunner.Start() + return nil +} + +// Close implements the job.Service interface. It stops this job from +// running and cleans up resources. +func (cr *Cron) Close() error { + cr.logger.Debug("Closing") + cr.cronRunner.Stop() + return nil +} + +func (cr *Cron) runPipeline() { + ctx, cancel := cr.chStop.NewCtx() + defer cancel() + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": cr.jobSpec.ID, + "externalJobID": cr.jobSpec.ExternalJobID, + "name": cr.jobSpec.Name.ValueOrZero(), + }, + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{}, + }, + }) + + run := pipeline.NewRun(*cr.jobSpec.PipelineSpec, vars) + + _, err := cr.pipelineRunner.Run(ctx, run, cr.logger, false, nil) + if err != nil { + cr.logger.Errorf("Error executing new run for jobSpec ID %v", cr.jobSpec.ID) + } +} + +func cronRunner() *cron.Cron { + return cron.New(cron.WithSeconds()) +} diff --git a/core/services/cron/cron_test.go b/core/services/cron/cron_test.go new file mode 100644 index 00000000..17d20d29 --- /dev/null +++ b/core/services/cron/cron_test.go @@ -0,0 +1,77 @@ +package cron_test + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/cron" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipelinemocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" +) + +func TestCronV2Pipeline(t *testing.T) { + runner := pipelinemocks.NewRunner(t) + cfg := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + lggr := logger.TestLogger(t) + orm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, lggr, cfg.Database()) + jobORM := job.NewORM(db, orm, btORM, keyStore, lggr, cfg.Database()) + + jb := &job.Job{ + Type: job.Cron, + SchemaVersion: 1, + CronSpec: &job.CronSpec{CronSchedule: "@every 1s"}, + PipelineSpec: &pipeline.Spec{}, + ExternalJobID: uuid.New(), + } + delegate := cron.NewDelegate(runner, lggr) + + require.NoError(t, jobORM.CreateJob(jb)) + serviceArray, err := delegate.ServicesForSpec(*jb) + require.NoError(t, err) + assert.Len(t, serviceArray, 1) + service := serviceArray[0] + + err = service.Start(testutils.Context(t)) + require.NoError(t, err) + defer func() { assert.NoError(t, service.Close()) }() +} + +func TestCronV2Schedule(t *testing.T) { + t.Parallel() + + spec := job.Job{ + Type: job.Cron, + SchemaVersion: 1, + CronSpec: &job.CronSpec{CronSchedule: "@every 1s"}, + PipelineSpec: &pipeline.Spec{}, + } + runner := pipelinemocks.NewRunner(t) + awaiter := cltest.NewAwaiter() + runner.On("Run", mock.Anything, mock.AnythingOfType("*pipeline.Run"), mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { awaiter.ItHappened() }). + Return(false, nil). + Once() + + service, err := cron.NewCronFromJobSpec(spec, runner, logger.TestLogger(t)) + require.NoError(t, err) + err = service.Start(testutils.Context(t)) + require.NoError(t, err) + defer func() { assert.NoError(t, service.Close()) }() + + awaiter.AwaitOrFail(t) +} diff --git a/core/services/cron/delegate.go b/core/services/cron/delegate.go new file mode 100644 index 00000000..a80ba5e0 --- /dev/null +++ b/core/services/cron/delegate.go @@ -0,0 +1,47 @@ +package cron + +import ( + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type Delegate struct { + pipelineRunner pipeline.Runner + lggr logger.Logger +} + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate(pipelineRunner pipeline.Runner, lggr logger.Logger) *Delegate { + return &Delegate{ + pipelineRunner: pipelineRunner, + lggr: lggr, + } +} + +func (d *Delegate) JobType() job.Type { + return job.Cron +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec returns the scheduler to be used for running cron jobs +func (d *Delegate) ServicesForSpec(spec job.Job) (services []job.ServiceCtx, err error) { + if spec.CronSpec == nil { + return nil, errors.Errorf("services.Delegate expects a *jobSpec.CronSpec to be present, got %v", spec) + } + + cron, err := NewCronFromJobSpec(spec, d.pipelineRunner, d.lggr) + if err != nil { + return nil, err + } + + return []job.ServiceCtx{cron}, nil +} diff --git a/core/services/cron/validate.go b/core/services/cron/validate.go new file mode 100644 index 00000000..eb4c8053 --- /dev/null +++ b/core/services/cron/validate.go @@ -0,0 +1,42 @@ +package cron + +import ( + "github.com/google/uuid" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func ValidatedCronSpec(tomlString string) (job.Job, error) { + var jb = job.Job{ + ExternalJobID: uuid.New(), // Default to generating a uuid, can be overwritten by the specified one in tomlString. + } + + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "toml error on load") + } + + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on spec") + } + + var spec job.CronSpec + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + + jb.CronSpec = &spec + if jb.Type != job.Cron { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + if err := utils.ValidateCronSchedule(spec.CronSchedule); err != nil { + return jb, errors.Wrapf(err, "while validating cron schedule '%v'", spec.CronSchedule) + } + + return jb, nil +} diff --git a/core/services/cron/validate_test.go b/core/services/cron/validate_test.go new file mode 100644 index 00000000..871ef847 --- /dev/null +++ b/core/services/cron/validate_test.go @@ -0,0 +1,87 @@ +package cron_test + +import ( + "strings" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/cron" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +func TestValidatedCronJobSpec(t *testing.T) { + var tt = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "valid spec", + toml: ` +type = "cron" +schemaVersion = 1 +schedule = "CRON_TZ=UTC 0 0 1 1 * *" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + require.NotNil(t, s.CronSpec) + b, err := jsonapi.Marshal(s.CronSpec) + require.NoError(t, err) + var r job.CronSpec + err = jsonapi.Unmarshal(b, &r) + require.NoError(t, err) + }, + }, + { + name: "no timezone", + toml: ` +type = "cron" +schemaVersion = 1 +schedule = "0 0 1 1 * *" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "cron schedule must specify a time zone using CRON_TZ")) + }, + }, + { + name: "invalid cron schedule", + toml: ` +type = "cron" +schemaVersion = 1 +schedule = "CRON_TZ=UTC x x" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "invalid cron schedule")) + }, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, err := cron.ValidatedCronSpec(tc.toml) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/directrequest/delegate.go b/core/services/directrequest/delegate.go new file mode 100644 index 00000000..b116d42d --- /dev/null +++ b/core/services/directrequest/delegate.go @@ -0,0 +1,419 @@ +package directrequest + +import ( + "context" + "fmt" + "reflect" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type ( + Delegate struct { + logger logger.Logger + pipelineRunner pipeline.Runner + pipelineORM pipeline.ORM + chHeads chan *evmtypes.Head + legacyChains legacyevm.LegacyChainContainer + mailMon *mailbox.Monitor + } + + Config interface { + MinIncomingConfirmations() uint32 + MinContractPayment() *assets.Link + } +) + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate( + logger logger.Logger, + pipelineRunner pipeline.Runner, + pipelineORM pipeline.ORM, + legacyChains legacyevm.LegacyChainContainer, + mailMon *mailbox.Monitor, +) *Delegate { + return &Delegate{ + logger: logger.Named("DirectRequest"), + pipelineRunner: pipelineRunner, + pipelineORM: pipelineORM, + chHeads: make(chan *evmtypes.Head, 1), + legacyChains: legacyChains, + mailMon: mailMon, + } +} + +func (d *Delegate) JobType() job.Type { + return job.DirectRequest +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec returns the log listener service for a direct request job +func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { + if jb.DirectRequestSpec == nil { + return nil, errors.Errorf("DirectRequest: directrequest.Delegate expects a *job.DirectRequestSpec to be present, got %v", jb) + } + chain, err := d.legacyChains.Get(jb.DirectRequestSpec.EVMChainID.String()) + if err != nil { + return nil, err + } + concreteSpec := job.SetDRMinIncomingConfirmations(chain.Config().EVM().MinIncomingConfirmations(), *jb.DirectRequestSpec) + + oracle, err := operator_wrapper.NewOperator(concreteSpec.ContractAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrapf(err, "DirectRequest: failed to create an operator wrapper for address: %v", concreteSpec.ContractAddress.Address().String()) + } + + svcLogger := d.logger.Named(jb.ExternalJobID.String()). + With( + "contract", concreteSpec.ContractAddress.Address().String(), + "jobName", jb.PipelineSpec.JobName, + "jobID", jb.PipelineSpec.JobID, + "externalJobID", jb.ExternalJobID, + ) + + logListener := &listener{ + logger: svcLogger.Named("Listener"), + config: chain.Config().EVM(), + logBroadcaster: chain.LogBroadcaster(), + oracle: oracle, + pipelineRunner: d.pipelineRunner, + pipelineORM: d.pipelineORM, + mailMon: d.mailMon, + job: jb, + mbOracleRequests: mailbox.NewHighCapacity[log.Broadcast](), + mbOracleCancelRequests: mailbox.NewHighCapacity[log.Broadcast](), + minIncomingConfirmations: concreteSpec.MinIncomingConfirmations.Uint32, + requesters: concreteSpec.Requesters, + minContractPayment: concreteSpec.MinContractPayment, + chStop: make(chan struct{}), + } + var services []job.ServiceCtx + services = append(services, logListener) + + return services, nil +} + +var ( + _ log.Listener = &listener{} + _ job.ServiceCtx = &listener{} +) + +type listener struct { + services.StateMachine + logger logger.Logger + config Config + logBroadcaster log.Broadcaster + oracle operator_wrapper.OperatorInterface + pipelineRunner pipeline.Runner + pipelineORM pipeline.ORM + mailMon *mailbox.Monitor + job job.Job + runs sync.Map // map[string]services.StopChan + shutdownWaitGroup sync.WaitGroup + mbOracleRequests *mailbox.Mailbox[log.Broadcast] + mbOracleCancelRequests *mailbox.Mailbox[log.Broadcast] + minIncomingConfirmations uint32 + requesters models.AddressCollection + minContractPayment *assets.Link + chStop chan struct{} +} + +func (l *listener) HealthReport() map[string]error { + return map[string]error{l.Name(): l.Healthy()} +} + +func (l *listener) Name() string { return l.logger.Name() } + +// Start complies with job.Service +func (l *listener) Start(context.Context) error { + return l.StartOnce("DirectRequestListener", func() error { + unsubscribeLogs := l.logBroadcaster.Register(l, log.ListenerOpts{ + Contract: l.oracle.Address(), + ParseLog: l.oracle.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + operator_wrapper.OperatorOracleRequest{}.Topic(): {{log.Topic(l.job.ExternalIDEncodeBytesToTopic()), log.Topic(l.job.ExternalIDEncodeStringToTopic())}}, + operator_wrapper.OperatorCancelOracleRequest{}.Topic(): {{log.Topic(l.job.ExternalIDEncodeBytesToTopic()), log.Topic(l.job.ExternalIDEncodeStringToTopic())}}, + }, + MinIncomingConfirmations: l.minIncomingConfirmations, + }) + l.shutdownWaitGroup.Add(3) + go l.processOracleRequests() + go l.processCancelOracleRequests() + + go func() { + <-l.chStop + unsubscribeLogs() + l.shutdownWaitGroup.Done() + }() + + l.mailMon.Monitor(l.mbOracleRequests, "DirectRequest", "Requests", fmt.Sprint(l.job.PipelineSpec.JobID)) + l.mailMon.Monitor(l.mbOracleCancelRequests, "DirectRequest", "Cancel", fmt.Sprint(l.job.PipelineSpec.JobID)) + + return nil + }) +} + +// Close complies with job.Service +func (l *listener) Close() error { + return l.StopOnce("DirectRequestListener", func() error { + l.runs.Range(func(key, runCloserChannelIf interface{}) bool { + runCloserChannel := runCloserChannelIf.(services.StopChan) + close(runCloserChannel) + return true + }) + l.runs = sync.Map{} + + close(l.chStop) + l.shutdownWaitGroup.Wait() + + return services.CloseAll(l.mbOracleRequests, l.mbOracleCancelRequests) + }) +} + +func (l *listener) HandleLog(lb log.Broadcast) { + log := lb.DecodedLog() + if log == nil || reflect.ValueOf(log).IsNil() { + l.logger.Error("HandleLog: ignoring nil value") + return + } + + switch log := log.(type) { + case *operator_wrapper.OperatorOracleRequest: + wasOverCapacity := l.mbOracleRequests.Deliver(lb) + if wasOverCapacity { + l.logger.Error("OracleRequest log mailbox is over capacity - dropped the oldest log") + } + case *operator_wrapper.OperatorCancelOracleRequest: + wasOverCapacity := l.mbOracleCancelRequests.Deliver(lb) + if wasOverCapacity { + l.logger.Error("CancelOracleRequest log mailbox is over capacity - dropped the oldest log") + } + default: + l.logger.Warnf("Unexpected log type %T", log) + } +} + +func (l *listener) processOracleRequests() { + for { + select { + case <-l.chStop: + l.shutdownWaitGroup.Done() + return + case <-l.mbOracleRequests.Notify(): + l.handleReceivedLogs(l.mbOracleRequests) + } + } +} + +func (l *listener) processCancelOracleRequests() { + for { + select { + case <-l.chStop: + l.shutdownWaitGroup.Done() + return + case <-l.mbOracleCancelRequests.Notify(): + l.handleReceivedLogs(l.mbOracleCancelRequests) + } + } +} + +func (l *listener) handleReceivedLogs(mailbox *mailbox.Mailbox[log.Broadcast]) { + for { + select { + case <-l.chStop: + return + default: + } + lb, exists := mailbox.Retrieve() + if !exists { + return + } + was, err := l.logBroadcaster.WasAlreadyConsumed(lb) + if err != nil { + l.logger.Errorw("Could not determine if log was already consumed", "err", err) + continue + } else if was { + continue + } + + logJobSpecID := lb.RawLog().Topics[1] + if logJobSpecID == (common.Hash{}) || (logJobSpecID != l.job.ExternalIDEncodeStringToTopic() && logJobSpecID != l.job.ExternalIDEncodeBytesToTopic()) { + l.logger.Debugw("Skipping Run for Log with wrong Job ID", "logJobSpecID", logJobSpecID) + l.markLogConsumed(lb) + continue + } + + log := lb.DecodedLog() + if log == nil || reflect.ValueOf(log).IsNil() { + l.logger.Error("HandleLog: ignoring nil value") + continue + } + + switch log := log.(type) { + case *operator_wrapper.OperatorOracleRequest: + l.handleOracleRequest(log, lb) + case *operator_wrapper.OperatorCancelOracleRequest: + l.handleCancelOracleRequest(log, lb) + default: + l.logger.Warnf("Unexpected log type %T", log) + } + } +} + +func oracleRequestToMap(request *operator_wrapper.OperatorOracleRequest) map[string]interface{} { + result := make(map[string]interface{}) + result["specId"] = fmt.Sprintf("0x%x", request.SpecId) + result["requester"] = request.Requester.Hex() + result["requestId"] = formatRequestId(request.RequestId) + result["payment"] = fmt.Sprintf("%v", request.Payment) + result["callbackAddr"] = request.CallbackAddr.Hex() + result["callbackFunctionId"] = fmt.Sprintf("0x%x", request.CallbackFunctionId) + result["cancelExpiration"] = fmt.Sprintf("%v", request.CancelExpiration) + result["dataVersion"] = fmt.Sprintf("%v", request.DataVersion) + result["data"] = fmt.Sprintf("0x%x", request.Data) + return result +} + +func (l *listener) handleOracleRequest(request *operator_wrapper.OperatorOracleRequest, lb log.Broadcast) { + l.logger.Infow("Oracle request received", + "specId", fmt.Sprintf("%0x", request.SpecId), + "requester", request.Requester, + "requestId", fmt.Sprintf("%0x", request.RequestId), + "payment", request.Payment, + "callbackAddr", request.CallbackAddr, + "callbackFunctionId", fmt.Sprintf("%0x", request.CallbackFunctionId), + "cancelExpiration", request.CancelExpiration, + "dataVersion", request.DataVersion, + "data", fmt.Sprintf("%0x", request.Data), + ) + + if !l.allowRequester(request.Requester) { + l.logger.Infow("Rejected run for invalid requester", + "requester", request.Requester, + "allowedRequesters", l.requesters.ToStrings(), + ) + l.markLogConsumed(lb) + return + } + + var minContractPayment *assets.Link + if l.minContractPayment != nil { + minContractPayment = l.minContractPayment + } else { + minContractPayment = l.config.MinContractPayment() + } + if minContractPayment != nil && request.Payment != nil { + requestPayment := assets.Link(*request.Payment) + if minContractPayment.Cmp(&requestPayment) > 0 { + l.logger.Warnw("Rejected run for insufficient payment", + "minContractPayment", minContractPayment.String(), + "requestPayment", requestPayment.String(), + ) + l.markLogConsumed(lb) + return + } + } + + meta := make(map[string]interface{}) + meta["oracleRequest"] = oracleRequestToMap(request) + + runCloserChannel := make(services.StopChan) + runCloserChannelIf, loaded := l.runs.LoadOrStore(formatRequestId(request.RequestId), runCloserChannel) + if loaded { + runCloserChannel = runCloserChannelIf.(services.StopChan) + } + ctx, cancel := runCloserChannel.NewCtx() + defer cancel() + + evmChainID := lb.EVMChainID() + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": l.job.ID, + "externalJobID": l.job.ExternalJobID, + "name": l.job.Name.ValueOrZero(), + "pipelineSpec": &pipeline.Spec{ + ForwardingAllowed: l.job.ForwardingAllowed, + }, + "evmChainID": evmChainID.String(), + }, + "jobRun": map[string]interface{}{ + "meta": meta, + "logBlockHash": request.Raw.BlockHash, + "logBlockNumber": request.Raw.BlockNumber, + "logTxHash": request.Raw.TxHash, + "logAddress": request.Raw.Address, + "logTopics": request.Raw.Topics, + "logData": request.Raw.Data, + "blockReceiptsRoot": lb.ReceiptsRoot(), + "blockTransactionsRoot": lb.TransactionsRoot(), + "blockStateRoot": lb.StateRoot(), + }, + }) + run := pipeline.NewRun(*l.job.PipelineSpec, vars) + _, err := l.pipelineRunner.Run(ctx, run, l.logger, true, func(tx pg.Queryer) error { + l.markLogConsumed(lb, pg.WithQueryer(tx)) + return nil + }) + if ctx.Err() != nil { + return + } else if err != nil { + l.logger.Errorw("Failed executing run", "err", err) + } +} + +func (l *listener) allowRequester(requester common.Address) bool { + if len(l.requesters) == 0 { + return true + } + for _, addr := range l.requesters { + if addr == requester { + return true + } + } + return false +} + +// Cancels runs that haven't been started yet, with the given request ID +func (l *listener) handleCancelOracleRequest(request *operator_wrapper.OperatorCancelOracleRequest, lb log.Broadcast) { + runCloserChannelIf, loaded := l.runs.LoadAndDelete(formatRequestId(request.RequestId)) + if loaded { + close(runCloserChannelIf.(services.StopChan)) + } + l.markLogConsumed(lb) +} + +func (l *listener) markLogConsumed(lb log.Broadcast, qopts ...pg.QOpt) { + if err := l.logBroadcaster.MarkConsumed(lb, qopts...); err != nil { + l.logger.Errorw("Unable to mark log consumed", "err", err, "log", lb.String()) + } +} + +// JobID - Job complies with log.Listener +func (l *listener) JobID() int32 { + return l.job.ID +} + +func formatRequestId(requestId [32]byte) string { + return fmt.Sprintf("0x%x", requestId) +} diff --git a/core/services/directrequest/delegate_test.go b/core/services/directrequest/delegate_test.go new file mode 100644 index 00000000..b83e1509 --- /dev/null +++ b/core/services/directrequest/delegate_test.go @@ -0,0 +1,552 @@ +package directrequest_test + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + log_mocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipeline_mocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestDelegate_ServicesForSpec(t *testing.T) { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + runner := pipeline_mocks.NewRunner(t) + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + relayerExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, Client: ethClient, MailMon: mailMon, KeyStore: keyStore.Eth()}) + + lggr := logger.TestLogger(t) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayerExtenders) + delegate := directrequest.NewDelegate(lggr, runner, nil, legacyChains, mailMon) + + t.Run("Spec without DirectRequestSpec", func(t *testing.T) { + spec := job.Job{} + _, err := delegate.ServicesForSpec(spec) + assert.Error(t, err, "expects a *job.DirectRequestSpec to be present") + }) + + t.Run("Spec with DirectRequestSpec", func(t *testing.T) { + spec := job.Job{DirectRequestSpec: &job.DirectRequestSpec{EVMChainID: (*ubig.Big)(testutils.FixtureChainID)}, PipelineSpec: &pipeline.Spec{}} + services, err := delegate.ServicesForSpec(spec) + require.NoError(t, err) + assert.Len(t, services, 1) + }) +} + +type DirectRequestUniverse struct { + spec *job.Job + runner *pipeline_mocks.Runner + service job.ServiceCtx + jobORM job.ORM + listener log.Listener + logBroadcaster *log_mocks.Broadcaster + cleanup func() +} + +func NewDirectRequestUniverseWithConfig(t *testing.T, cfg plugin.GeneralConfig, specF func(spec *job.Job)) *DirectRequestUniverse { + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + broadcaster := log_mocks.NewBroadcaster(t) + runner := pipeline_mocks.NewRunner(t) + broadcaster.On("AddDependents", 1) + + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, Client: ethClient, LogBroadcaster: broadcaster, MailMon: mailMon, KeyStore: keyStore.Eth()}) + lggr := logger.TestLogger(t) + orm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, lggr, cfg.Database()) + jobORM := job.NewORM(db, orm, btORM, keyStore, lggr, cfg.Database()) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + delegate := directrequest.NewDelegate(lggr, runner, orm, legacyChains, mailMon) + + jb := cltest.MakeDirectRequestJobSpec(t) + jb.ExternalJobID = uuid.New() + if specF != nil { + specF(jb) + } + require.NoError(t, jobORM.CreateJob(jb)) + serviceArray, err := delegate.ServicesForSpec(*jb) + require.NoError(t, err) + assert.Len(t, serviceArray, 1) + service := serviceArray[0] + + uni := &DirectRequestUniverse{ + spec: jb, + runner: runner, + service: service, + jobORM: jobORM, + listener: nil, + logBroadcaster: broadcaster, + cleanup: func() { jobORM.Close() }, + } + + broadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}).Run(func(args mock.Arguments) { + uni.listener = args.Get(0).(log.Listener) + }) + + return uni +} + +func NewDirectRequestUniverse(t *testing.T) *DirectRequestUniverse { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + }) + return NewDirectRequestUniverseWithConfig(t, cfg, nil) +} + +func (uni *DirectRequestUniverse) Cleanup() { + uni.cleanup() +} + +func TestDelegate_ServicesListenerHandleLog(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + t.Run("Log is an OracleRequest", func(t *testing.T) { + uni := NewDirectRequestUniverse(t) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + log.On("ReceiptsRoot").Return(common.Hash{}) + log.On("TransactionsRoot").Return(common.Hash{}) + log.On("StateRoot").Return(common.Hash{}) + log.On("EVMChainID").Return(*big.NewInt(0)) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("DecodedLog").Return(&logOracleRequest) + log.On("String").Return("") + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + runBeganAwaiter := cltest.NewAwaiter() + uni.runner.On("Run", mock.Anything, mock.AnythingOfType("*pipeline.Run"), mock.Anything, mock.Anything, mock.Anything). + Return(false, nil). + Run(func(args mock.Arguments) { + runBeganAwaiter.ItHappened() + fn := args.Get(4).(func(pg.Queryer) error) + require.NoError(t, fn(nil)) + }).Once() + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + require.NotNil(t, uni.listener, "listener was nil; expected broadcaster.Register to have been called") + // check if the job exists under the correct ID + drJob, jErr := uni.jobORM.FindJob(testutils.Context(t), uni.listener.JobID()) + require.NoError(t, jErr) + require.Equal(t, drJob.ID, uni.listener.JobID()) + require.NotNil(t, drJob.DirectRequestSpec) + + uni.listener.HandleLog(log) + + runBeganAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) + + t.Run("Log is not consumed, as it's too young", func(t *testing.T) { + uni := NewDirectRequestUniverse(t) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil).Maybe() + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + BlockNumber: 0, + }).Maybe() + log.On("DecodedLog").Return(&logOracleRequest).Maybe() + log.On("String").Return("") + log.On("EVMChainID").Return(*big.NewInt(0)) + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil).Maybe() + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + uni.listener.HandleLog(log) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + log.On("ReceiptsRoot").Return(common.Hash{}) + log.On("TransactionsRoot").Return(common.Hash{}) + log.On("StateRoot").Return(common.Hash{}) + + runBeganAwaiter := cltest.NewAwaiter() + uni.runner.On("Run", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + runBeganAwaiter.ItHappened() + fn := args.Get(4).(func(pg.Queryer) error) + require.NoError(t, fn(nil)) + }).Once().Return(false, nil) + + // but should after this one, as the head Number is larger + runBeganAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) + + t.Run("Log has wrong jobID", func(t *testing.T) { + uni := NewDirectRequestUniverse(t) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + lbAwaiter := cltest.NewAwaiter() + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { lbAwaiter.ItHappened() }).Return(nil) + + logCancelOracleRequest := operator_wrapper.OperatorCancelOracleRequest{RequestId: uni.spec.ExternalIDEncodeStringToTopic()} + logAwaiter := cltest.NewAwaiter() + log.On("DecodedLog").Run(func(args mock.Arguments) { logAwaiter.ItHappened() }).Return(&logCancelOracleRequest) + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{{}, {}}, + }) + log.On("String").Return("") + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + uni.listener.HandleLog(log) + + logAwaiter.AwaitOrFail(t) + lbAwaiter.AwaitOrFail(t) + + uni.service.Close() + }) + + t.Run("Log is a CancelOracleRequest with no matching run", func(t *testing.T) { + uni := NewDirectRequestUniverse(t) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logCancelOracleRequest := operator_wrapper.OperatorCancelOracleRequest{RequestId: uni.spec.ExternalIDEncodeStringToTopic()} + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("String").Return("") + log.On("DecodedLog").Return(&logCancelOracleRequest) + lbAwaiter := cltest.NewAwaiter() + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { lbAwaiter.ItHappened() }).Return(nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + uni.listener.HandleLog(log) + + lbAwaiter.AwaitOrFail(t) + + uni.service.Close() + }) + + t.Run("Log is a CancelOracleRequest with a matching run", func(t *testing.T) { + uni := NewDirectRequestUniverse(t) + defer uni.Cleanup() + + runLog := log_mocks.NewBroadcast(t) + runLog.On("ReceiptsRoot").Return(common.Hash{}) + runLog.On("TransactionsRoot").Return(common.Hash{}) + runLog.On("StateRoot").Return(common.Hash{}) + runLog.On("EVMChainID").Return(*big.NewInt(0)) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + RequestId: uni.spec.ExternalIDEncodeStringToTopic(), + } + runLog.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + runLog.On("DecodedLog").Return(&logOracleRequest) + runLog.On("String").Return("") + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + cancelLog := log_mocks.NewBroadcast(t) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logCancelOracleRequest := operator_wrapper.OperatorCancelOracleRequest{RequestId: uni.spec.ExternalIDEncodeStringToTopic()} + cancelLog.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + cancelLog.On("DecodedLog").Return(&logCancelOracleRequest) + cancelLog.On("String").Return("") + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + timeout := 5 * time.Second + runBeganAwaiter := cltest.NewAwaiter() + runCancelledAwaiter := cltest.NewAwaiter() + uni.runner.On("Run", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + runBeganAwaiter.ItHappened() + ctx := args[0].(context.Context) + select { + case <-time.After(timeout): + t.Fatalf("Timed out waiting for Run to be canceled (%v)", timeout) + case <-ctx.Done(): + runCancelledAwaiter.ItHappened() + } + }).Once().Return(false, nil) + uni.listener.HandleLog(runLog) + + runBeganAwaiter.AwaitOrFail(t, timeout) + + uni.listener.HandleLog(cancelLog) + + runCancelledAwaiter.AwaitOrFail(t, timeout) + + uni.service.Close() + }) + + t.Run("Log has sufficient funds", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].MinContractPayment = assets.NewLinkFromJuels(100) + }) + uni := NewDirectRequestUniverseWithConfig(t, cfg, nil) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + log.On("ReceiptsRoot").Return(common.Hash{}) + log.On("TransactionsRoot").Return(common.Hash{}) + log.On("StateRoot").Return(common.Hash{}) + log.On("EVMChainID").Return(*big.NewInt(0)) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + Payment: big.NewInt(100), + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("DecodedLog").Return(&logOracleRequest) + log.On("String").Return("") + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + runBeganAwaiter := cltest.NewAwaiter() + uni.runner.On("Run", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + runBeganAwaiter.ItHappened() + fn := args.Get(4).(func(pg.Queryer) error) + require.NoError(t, fn(nil)) + }).Once().Return(false, nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + // check if the job exists under the correct ID + drJob, jErr := uni.jobORM.FindJob(testutils.Context(t), uni.listener.JobID()) + require.NoError(t, jErr) + require.Equal(t, drJob.ID, uni.listener.JobID()) + require.NotNil(t, drJob.DirectRequestSpec) + + uni.listener.HandleLog(log) + + runBeganAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) + + t.Run("Log has insufficient funds", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].MinContractPayment = assets.NewLinkFromJuels(100) + }) + uni := NewDirectRequestUniverseWithConfig(t, cfg, nil) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + Payment: big.NewInt(99), + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("DecodedLog").Return(&logOracleRequest) + log.On("String").Return("") + markConsumedLogAwaiter := cltest.NewAwaiter() + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + markConsumedLogAwaiter.ItHappened() + }).Return(nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + uni.listener.HandleLog(log) + + markConsumedLogAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) + + t.Run("requesters is specified and log is requested by a whitelisted address", func(t *testing.T) { + requester := testutils.NewAddress() + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].MinContractPayment = assets.NewLinkFromJuels(100) + }) + uni := NewDirectRequestUniverseWithConfig(t, cfg, func(jb *job.Job) { + jb.DirectRequestSpec.Requesters = []common.Address{testutils.NewAddress(), requester} + }) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + log.On("ReceiptsRoot").Return(common.Hash{}) + log.On("TransactionsRoot").Return(common.Hash{}) + log.On("StateRoot").Return(common.Hash{}) + log.On("EVMChainID").Return(*big.NewInt(0)) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + Payment: big.NewInt(100), + Requester: requester, + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("DecodedLog").Return(&logOracleRequest) + log.On("String").Return("") + markConsumedLogAwaiter := cltest.NewAwaiter() + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + markConsumedLogAwaiter.ItHappened() + }).Return(nil) + + runBeganAwaiter := cltest.NewAwaiter() + uni.runner.On("Run", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + runBeganAwaiter.ItHappened() + fn := args.Get(4).(func(pg.Queryer) error) + require.NoError(t, fn(nil)) + }).Once().Return(false, nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + // check if the job exists under the correct ID + drJob, jErr := uni.jobORM.FindJob(testutils.Context(t), uni.listener.JobID()) + require.NoError(t, jErr) + require.Equal(t, drJob.ID, uni.listener.JobID()) + require.NotNil(t, drJob.DirectRequestSpec) + + uni.listener.HandleLog(log) + + runBeganAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) + + t.Run("requesters is specified and log is requested by a non-whitelisted address", func(t *testing.T) { + requester := testutils.NewAddress() + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].MinContractPayment = assets.NewLinkFromJuels(100) + }) + uni := NewDirectRequestUniverseWithConfig(t, cfg, func(jb *job.Job) { + jb.DirectRequestSpec.Requesters = []common.Address{testutils.NewAddress(), testutils.NewAddress()} + }) + defer uni.Cleanup() + + log := log_mocks.NewBroadcast(t) + + uni.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + logOracleRequest := operator_wrapper.OperatorOracleRequest{ + CancelExpiration: big.NewInt(0), + Payment: big.NewInt(100), + Requester: requester, + } + log.On("RawLog").Return(types.Log{ + Topics: []common.Hash{ + {}, + uni.spec.ExternalIDEncodeStringToTopic(), + }, + }) + log.On("DecodedLog").Return(&logOracleRequest) + log.On("String").Return("") + markConsumedLogAwaiter := cltest.NewAwaiter() + uni.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + markConsumedLogAwaiter.ItHappened() + }).Return(nil) + + err := uni.service.Start(testutils.Context(t)) + require.NoError(t, err) + + uni.listener.HandleLog(log) + + markConsumedLogAwaiter.AwaitOrFail(t, 5*time.Second) + + uni.service.Close() + }) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/directrequest/validate.go b/core/services/directrequest/validate.go new file mode 100644 index 00000000..660861a7 --- /dev/null +++ b/core/services/directrequest/validate.go @@ -0,0 +1,50 @@ +package directrequest + +import ( + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type DirectRequestToml struct { + ContractAddress ethkey.EIP55Address `toml:"contractAddress"` + Requesters models.AddressCollection `toml:"requesters"` + MinContractPayment *assets.Link `toml:"minContractPaymentLinkJuels"` + EVMChainID *big.Big `toml:"evmChainID"` + MinIncomingConfirmations null.Uint32 `toml:"minIncomingConfirmations"` +} + +func ValidatedDirectRequestSpec(tomlString string) (job.Job, error) { + var jb = job.Job{} + tree, err := toml.Load(tomlString) + if err != nil { + return jb, err + } + err = tree.Unmarshal(&jb) + if err != nil { + return jb, err + } + var spec DirectRequestToml + err = tree.Unmarshal(&spec) + if err != nil { + return jb, err + } + jb.DirectRequestSpec = &job.DirectRequestSpec{ + ContractAddress: spec.ContractAddress, + Requesters: spec.Requesters, + MinContractPayment: spec.MinContractPayment, + EVMChainID: spec.EVMChainID, + MinIncomingConfirmations: spec.MinIncomingConfirmations, + } + + if jb.Type != job.DirectRequest { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + return jb, nil +} diff --git a/core/services/directrequest/validate_test.go b/core/services/directrequest/validate_test.go new file mode 100644 index 00000000..91cd7ece --- /dev/null +++ b/core/services/directrequest/validate_test.go @@ -0,0 +1,74 @@ +package directrequest + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidatedDirectRequestSpec(t *testing.T) { + t.Parallel() + + toml := ` +type = "directrequest" +schemaVersion = 1 +name = "example eth request event spec" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "A5AC14E8-7629-4726-B1F1-1AE053FC829E" +observationSource = """ + ds1 [type=http method=GET url="example.com" allowunrestrictednetworkaccess="true"]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; +""" +` + + s, err := ValidatedDirectRequestSpec(toml) + require.NoError(t, err) + + assert.Equal(t, int32(0), s.ID) + assert.Equal(t, "0x613a38AC1659769640aaE063C651F48E0250454C", s.DirectRequestSpec.ContractAddress.Hex()) + assert.Equal(t, "0x6135616331346538373632393437323662316631316165303533666338323965", s.ExternalIDEncodeStringToTopic().String()) + assert.Equal(t, "0xa5ac14e876294726b1f11ae053fc829e00000000000000000000000000000000", s.ExternalIDEncodeBytesToTopic().String()) + assert.NotZero(t, s.ExternalJobID[:]) + assert.Equal(t, time.Time{}, s.DirectRequestSpec.CreatedAt) + assert.Equal(t, time.Time{}, s.DirectRequestSpec.UpdatedAt) +} + +func TestValidatedDirectRequestSpec_MinIncomingConfirmations(t *testing.T) { + t.Parallel() + + t.Run("no minIncomingConfirmations specified", func(t *testing.T) { + t.Parallel() + + toml := ` + type = "directrequest" + schemaVersion = 1 + name = "example eth request event spec" + ` + + s, err := ValidatedDirectRequestSpec(toml) + require.NoError(t, err) + + assert.False(t, s.DirectRequestSpec.MinIncomingConfirmations.Valid) + }) + + t.Run("minIncomingConfirmations set to 100", func(t *testing.T) { + t.Parallel() + + toml := ` + type = "directrequest" + schemaVersion = 1 + name = "example eth request event spec" + minIncomingConfirmations = 100 + ` + + s, err := ValidatedDirectRequestSpec(toml) + require.NoError(t, err) + + assert.True(t, s.DirectRequestSpec.MinIncomingConfirmations.Valid) + assert.Equal(t, uint32(100), s.DirectRequestSpec.MinIncomingConfirmations.Uint32) + }) +} diff --git a/core/services/doc.go b/core/services/doc.go new file mode 100644 index 00000000..44ab77fb --- /dev/null +++ b/core/services/doc.go @@ -0,0 +1,27 @@ +// Package services contain the key components of the Plugin +// node. This includes the Application, JobRunner, LogListener, +// and Scheduler. +// +// # Application +// +// The Application is the main component used for starting and +// stopping the Plugin node. +// +// # JobRunner +// +// The JobRunner keeps track of Runs within a Job and ensures +// that they're executed in order. Within each Run, the tasks +// are also executed from the JobRunner. +// +// # JobSubscriber +// +// The JobSubscriber coordinates running job events with +// the EventLog in the Store, and also subscribes to the given +// address on the Ethereum blockchain. +// +// # Scheduler +// +// The Scheduler ensures that recurring events are executed +// according to their schedule, and one-time events occur only +// when the specified time has passed. +package services diff --git a/core/services/feeds/config.go b/core/services/feeds/config.go new file mode 100644 index 00000000..f2a1c8fe --- /dev/null +++ b/core/services/feeds/config.go @@ -0,0 +1,29 @@ +package feeds + +import ( + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" +) + +type JobConfig interface { + DefaultHTTPTimeout() commonconfig.Duration +} + +type InsecureConfig interface { + OCRDevelopmentMode() bool +} + +type OCRConfig interface { + Enabled() bool +} + +type OCR2Config interface { + Enabled() bool + BlockchainTimeout() time.Duration + ContractConfirmations() uint16 + ContractPollInterval() time.Duration + ContractTransmitterTransmitTimeout() time.Duration + DatabaseTimeout() time.Duration + TraceLogging() bool +} diff --git a/core/services/feeds/connection_manager.go b/core/services/feeds/connection_manager.go new file mode 100644 index 00000000..b2351afc --- /dev/null +++ b/core/services/feeds/connection_manager.go @@ -0,0 +1,214 @@ +package feeds + +import ( + "context" + "crypto/ed25519" + "sync" + + "github.com/pkg/errors" + + "github.com/goplugin/wsrpc" + "github.com/goplugin/wsrpc/connectivity" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/recovery" + pb "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" +) + +//go:generate mockery --quiet --name ConnectionsManager --output ./mocks/ --case=underscore + +type ConnectionsManager interface { + Connect(opts ConnectOpts) + Disconnect(id int64) error + Close() + GetClient(id int64) (pb.FeedsManagerClient, error) + IsConnected(id int64) bool +} + +// connectionsManager manages the rpc connections to Feeds Manager services +type connectionsManager struct { + mu sync.Mutex + wgClosed sync.WaitGroup + + connections map[int64]*connection + lggr logger.Logger +} + +type connection struct { + // ctx allows us to cancel any connections which are currently blocking + // while waiting to establish a connection to FMS. + ctx context.Context + cancel context.CancelFunc + + connected bool + client pb.FeedsManagerClient +} + +func newConnectionsManager(lggr logger.Logger) *connectionsManager { + return &connectionsManager{ + mu: sync.Mutex{}, + connections: map[int64]*connection{}, + lggr: lggr, + } +} + +// ConnectOpts defines the required options to connect to an FMS server +type ConnectOpts struct { + FeedsManagerID int64 + + // URI is the URI of the feeds manager + URI string + + // Privkey defines the local CSA private key + Privkey []byte + + // Pubkey defines the Feeds Manager Service's public key + Pubkey []byte + + // Handlers defines the wsrpc Handlers + Handlers pb.NodeServiceServer + + // OnConnect defines a callback for when the dial succeeds + OnConnect func(pb.FeedsManagerClient) +} + +// Connects to a feeds manager +// +// Connection to FMS is handled in a goroutine because the Dial will block +// until it can establish a connection. This is important during startup because +// we do not want to block other services from starting. +// +// Eventually when FMS does come back up, wsrpc will establish the connection +// without any interaction on behalf of the node operator. +func (mgr *connectionsManager) Connect(opts ConnectOpts) { + ctx, cancel := context.WithCancel(context.Background()) + + conn := &connection{ + ctx: ctx, + cancel: cancel, + connected: false, + } + + mgr.wgClosed.Add(1) + + mgr.mu.Lock() + mgr.connections[opts.FeedsManagerID] = conn + mgr.mu.Unlock() + + go recovery.WrapRecover(mgr.lggr, func() { + defer mgr.wgClosed.Done() + + mgr.lggr.Infow("Connecting to Feeds Manager...", "feedsManagerID", opts.FeedsManagerID) + + clientConn, err := wsrpc.DialWithContext(conn.ctx, opts.URI, + wsrpc.WithTransportCreds(opts.Privkey, ed25519.PublicKey(opts.Pubkey)), + wsrpc.WithBlock(), + wsrpc.WithLogger(mgr.lggr), + ) + if err != nil { + // We only want to log if there was an error that did not occur + // from a context cancel. + if conn.ctx.Err() == nil { + mgr.lggr.Warnf("Error connecting to Feeds Manager server: %v", err) + } else { + mgr.lggr.Infof("Closing wsrpc websocket connection: %v", err) + } + + return + } + defer clientConn.Close() + + mgr.lggr.Infow("Connected to Feeds Manager", "feedsManagerID", opts.FeedsManagerID) + + // Initialize a new wsrpc client to make RPC calls + mgr.mu.Lock() + conn.connected = true + conn.client = pb.NewFeedsManagerClient(clientConn) + mgr.connections[opts.FeedsManagerID] = conn + mgr.mu.Unlock() + + // Initialize RPC call handlers on the client connection + pb.RegisterNodeServiceServer(clientConn, opts.Handlers) + + if opts.OnConnect != nil { + opts.OnConnect(conn.client) + } + + // Detect changes in connection status + go func() { + for { + s := clientConn.GetState() + + clientConn.WaitForStateChange(conn.ctx, s) + + s = clientConn.GetState() + + // Exit the goroutine if we shutdown the connection + if s == connectivity.Shutdown { + break + } + + mgr.mu.Lock() + conn.connected = s == connectivity.Ready + mgr.mu.Unlock() + } + }() + + // Wait for close + <-conn.ctx.Done() + }) +} + +// Disconnect closes a single connection +func (mgr *connectionsManager) Disconnect(id int64) error { + mgr.mu.Lock() + defer mgr.mu.Unlock() + + conn, ok := mgr.connections[id] + if !ok { + return errors.New("feeds manager is not connected") + } + + conn.cancel() + delete(mgr.connections, id) + + mgr.lggr.Infow("Disconnected Feeds Manager", "feedsManagerID", id) + + return nil +} + +// Close closes all connections +func (mgr *connectionsManager) Close() { + mgr.mu.Lock() + for _, conn := range mgr.connections { + conn.cancel() + } + + mgr.mu.Unlock() + + mgr.wgClosed.Wait() +} + +// GetClient returns a single client by id +func (mgr *connectionsManager) GetClient(id int64) (pb.FeedsManagerClient, error) { + mgr.mu.Lock() + conn, ok := mgr.connections[id] + mgr.mu.Unlock() + if !ok || !conn.connected { + return nil, errors.New("feeds manager is not connected") + } + + return conn.client, nil +} + +// IsConnected returns true if the connection to a feeds manager is active +func (mgr *connectionsManager) IsConnected(id int64) bool { + mgr.mu.Lock() + conn, ok := mgr.connections[id] + mgr.mu.Unlock() + if !ok { + return false + } + + return conn.connected +} diff --git a/core/services/feeds/connection_manager_test.go b/core/services/feeds/connection_manager_test.go new file mode 100644 index 00000000..b2d59298 --- /dev/null +++ b/core/services/feeds/connection_manager_test.go @@ -0,0 +1,73 @@ +package feeds + +import ( + "testing" +) + +func Test_connectionsManager_IsConnected(t *testing.T) { + type fields struct { + connections map[int64]*connection + } + type args struct { + id int64 + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "inactive connection exists", + fields: fields{ + connections: map[int64]*connection{ + 1: { + connected: false, + }, + }, + }, + args: args{ + id: 1, + }, + want: false, + }, + { + name: "active connection exists", + fields: fields{ + connections: map[int64]*connection{ + 1: { + connected: true, + }, + }, + }, + args: args{ + id: 1, + }, + want: true, + }, + { + name: "connection does not exist", + fields: fields{ + connections: map[int64]*connection{ + 1: { + connected: true, + }, + }, + }, + args: args{ + id: 2, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mgr := &connectionsManager{ + connections: tt.fields.connections, + } + if got := mgr.IsConnected(tt.args.id); got != tt.want { + t.Errorf("IsConnected() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/core/services/feeds/helpers_test.go b/core/services/feeds/helpers_test.go new file mode 100644 index 00000000..b34495cd --- /dev/null +++ b/core/services/feeds/helpers_test.go @@ -0,0 +1,7 @@ +package feeds + +// SetConnectionsManager allows us to manually set the connections manager. +// Only used for testing. +func (s *service) SetConnectionsManager(cm ConnectionsManager) { + s.connMgr = cm +} diff --git a/core/services/feeds/mocks/connections_manager.go b/core/services/feeds/mocks/connections_manager.go new file mode 100644 index 00000000..4dbfdf48 --- /dev/null +++ b/core/services/feeds/mocks/connections_manager.go @@ -0,0 +1,105 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + feeds "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + mock "github.com/stretchr/testify/mock" + + proto "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" +) + +// ConnectionsManager is an autogenerated mock type for the ConnectionsManager type +type ConnectionsManager struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *ConnectionsManager) Close() { + _m.Called() +} + +// Connect provides a mock function with given fields: opts +func (_m *ConnectionsManager) Connect(opts feeds.ConnectOpts) { + _m.Called(opts) +} + +// Disconnect provides a mock function with given fields: id +func (_m *ConnectionsManager) Disconnect(id int64) error { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Disconnect") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64) error); ok { + r0 = rf(id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetClient provides a mock function with given fields: id +func (_m *ConnectionsManager) GetClient(id int64) (proto.FeedsManagerClient, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetClient") + } + + var r0 proto.FeedsManagerClient + var r1 error + if rf, ok := ret.Get(0).(func(int64) (proto.FeedsManagerClient, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) proto.FeedsManagerClient); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(proto.FeedsManagerClient) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsConnected provides a mock function with given fields: id +func (_m *ConnectionsManager) IsConnected(id int64) bool { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for IsConnected") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(int64) bool); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewConnectionsManager creates a new instance of ConnectionsManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnectionsManager(t interface { + mock.TestingT + Cleanup(func()) +}) *ConnectionsManager { + mock := &ConnectionsManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/feeds/mocks/feeds_manager_client.go b/core/services/feeds/mocks/feeds_manager_client.go new file mode 100644 index 00000000..1d439548 --- /dev/null +++ b/core/services/feeds/mocks/feeds_manager_client.go @@ -0,0 +1,179 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + proto "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" + mock "github.com/stretchr/testify/mock" +) + +// FeedsManagerClient is an autogenerated mock type for the FeedsManagerClient type +type FeedsManagerClient struct { + mock.Mock +} + +// ApprovedJob provides a mock function with given fields: ctx, in +func (_m *FeedsManagerClient) ApprovedJob(ctx context.Context, in *proto.ApprovedJobRequest) (*proto.ApprovedJobResponse, error) { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for ApprovedJob") + } + + var r0 *proto.ApprovedJobResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proto.ApprovedJobRequest) (*proto.ApprovedJobResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *proto.ApprovedJobRequest) *proto.ApprovedJobResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proto.ApprovedJobResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proto.ApprovedJobRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CancelledJob provides a mock function with given fields: ctx, in +func (_m *FeedsManagerClient) CancelledJob(ctx context.Context, in *proto.CancelledJobRequest) (*proto.CancelledJobResponse, error) { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for CancelledJob") + } + + var r0 *proto.CancelledJobResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proto.CancelledJobRequest) (*proto.CancelledJobResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *proto.CancelledJobRequest) *proto.CancelledJobResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proto.CancelledJobResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proto.CancelledJobRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Healthcheck provides a mock function with given fields: ctx, in +func (_m *FeedsManagerClient) Healthcheck(ctx context.Context, in *proto.HealthcheckRequest) (*proto.HealthcheckResponse, error) { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for Healthcheck") + } + + var r0 *proto.HealthcheckResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proto.HealthcheckRequest) (*proto.HealthcheckResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *proto.HealthcheckRequest) *proto.HealthcheckResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proto.HealthcheckResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proto.HealthcheckRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RejectedJob provides a mock function with given fields: ctx, in +func (_m *FeedsManagerClient) RejectedJob(ctx context.Context, in *proto.RejectedJobRequest) (*proto.RejectedJobResponse, error) { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for RejectedJob") + } + + var r0 *proto.RejectedJobResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proto.RejectedJobRequest) (*proto.RejectedJobResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *proto.RejectedJobRequest) *proto.RejectedJobResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proto.RejectedJobResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proto.RejectedJobRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateNode provides a mock function with given fields: ctx, in +func (_m *FeedsManagerClient) UpdateNode(ctx context.Context, in *proto.UpdateNodeRequest) (*proto.UpdateNodeResponse, error) { + ret := _m.Called(ctx, in) + + if len(ret) == 0 { + panic("no return value specified for UpdateNode") + } + + var r0 *proto.UpdateNodeResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *proto.UpdateNodeRequest) (*proto.UpdateNodeResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *proto.UpdateNodeRequest) *proto.UpdateNodeResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*proto.UpdateNodeResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *proto.UpdateNodeRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFeedsManagerClient creates a new instance of FeedsManagerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFeedsManagerClient(t interface { + mock.TestingT + Cleanup(func()) +}) *FeedsManagerClient { + mock := &FeedsManagerClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/feeds/mocks/orm.go b/core/services/feeds/mocks/orm.go new file mode 100644 index 00000000..6f711c8c --- /dev/null +++ b/core/services/feeds/mocks/orm.go @@ -0,0 +1,2192 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + feeds "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + uuid "github.com/google/uuid" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +type ORM_Expecter struct { + mock *mock.Mock +} + +func (_m *ORM) EXPECT() *ORM_Expecter { + return &ORM_Expecter{mock: &_m.Mock} +} + +// ApproveSpec provides a mock function with given fields: id, externalJobID, qopts +func (_m *ORM) ApproveSpec(id int64, externalJobID uuid.UUID, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id, externalJobID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ApproveSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, uuid.UUID, ...pg.QOpt) error); ok { + r0 = rf(id, externalJobID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_ApproveSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApproveSpec' +type ORM_ApproveSpec_Call struct { + *mock.Call +} + +// ApproveSpec is a helper method to define mock.On call +// - id int64 +// - externalJobID uuid.UUID +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) ApproveSpec(id interface{}, externalJobID interface{}, qopts ...interface{}) *ORM_ApproveSpec_Call { + return &ORM_ApproveSpec_Call{Call: _e.mock.On("ApproveSpec", + append([]interface{}{id, externalJobID}, qopts...)...)} +} + +func (_c *ORM_ApproveSpec_Call) Run(run func(id int64, externalJobID uuid.UUID, qopts ...pg.QOpt)) *ORM_ApproveSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), args[1].(uuid.UUID), variadicArgs...) + }) + return _c +} + +func (_c *ORM_ApproveSpec_Call) Return(_a0 error) *ORM_ApproveSpec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_ApproveSpec_Call) RunAndReturn(run func(int64, uuid.UUID, ...pg.QOpt) error) *ORM_ApproveSpec_Call { + _c.Call.Return(run) + return _c +} + +// CancelSpec provides a mock function with given fields: id, qopts +func (_m *ORM) CancelSpec(id int64, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CancelSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) error); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_CancelSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CancelSpec' +type ORM_CancelSpec_Call struct { + *mock.Call +} + +// CancelSpec is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) CancelSpec(id interface{}, qopts ...interface{}) *ORM_CancelSpec_Call { + return &ORM_CancelSpec_Call{Call: _e.mock.On("CancelSpec", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_CancelSpec_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_CancelSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_CancelSpec_Call) Return(_a0 error) *ORM_CancelSpec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_CancelSpec_Call) RunAndReturn(run func(int64, ...pg.QOpt) error) *ORM_CancelSpec_Call { + _c.Call.Return(run) + return _c +} + +// CountJobProposals provides a mock function with given fields: +func (_m *ORM) CountJobProposals() (int64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountJobProposals") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func() (int64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CountJobProposals_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountJobProposals' +type ORM_CountJobProposals_Call struct { + *mock.Call +} + +// CountJobProposals is a helper method to define mock.On call +func (_e *ORM_Expecter) CountJobProposals() *ORM_CountJobProposals_Call { + return &ORM_CountJobProposals_Call{Call: _e.mock.On("CountJobProposals")} +} + +func (_c *ORM_CountJobProposals_Call) Run(run func()) *ORM_CountJobProposals_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ORM_CountJobProposals_Call) Return(_a0 int64, _a1 error) *ORM_CountJobProposals_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CountJobProposals_Call) RunAndReturn(run func() (int64, error)) *ORM_CountJobProposals_Call { + _c.Call.Return(run) + return _c +} + +// CountJobProposalsByStatus provides a mock function with given fields: +func (_m *ORM) CountJobProposalsByStatus() (*feeds.JobProposalCounts, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountJobProposalsByStatus") + } + + var r0 *feeds.JobProposalCounts + var r1 error + if rf, ok := ret.Get(0).(func() (*feeds.JobProposalCounts, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *feeds.JobProposalCounts); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalCounts) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CountJobProposalsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountJobProposalsByStatus' +type ORM_CountJobProposalsByStatus_Call struct { + *mock.Call +} + +// CountJobProposalsByStatus is a helper method to define mock.On call +func (_e *ORM_Expecter) CountJobProposalsByStatus() *ORM_CountJobProposalsByStatus_Call { + return &ORM_CountJobProposalsByStatus_Call{Call: _e.mock.On("CountJobProposalsByStatus")} +} + +func (_c *ORM_CountJobProposalsByStatus_Call) Run(run func()) *ORM_CountJobProposalsByStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ORM_CountJobProposalsByStatus_Call) Return(counts *feeds.JobProposalCounts, err error) *ORM_CountJobProposalsByStatus_Call { + _c.Call.Return(counts, err) + return _c +} + +func (_c *ORM_CountJobProposalsByStatus_Call) RunAndReturn(run func() (*feeds.JobProposalCounts, error)) *ORM_CountJobProposalsByStatus_Call { + _c.Call.Return(run) + return _c +} + +// CountManagers provides a mock function with given fields: +func (_m *ORM) CountManagers() (int64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountManagers") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func() (int64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CountManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountManagers' +type ORM_CountManagers_Call struct { + *mock.Call +} + +// CountManagers is a helper method to define mock.On call +func (_e *ORM_Expecter) CountManagers() *ORM_CountManagers_Call { + return &ORM_CountManagers_Call{Call: _e.mock.On("CountManagers")} +} + +func (_c *ORM_CountManagers_Call) Run(run func()) *ORM_CountManagers_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ORM_CountManagers_Call) Return(_a0 int64, _a1 error) *ORM_CountManagers_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CountManagers_Call) RunAndReturn(run func() (int64, error)) *ORM_CountManagers_Call { + _c.Call.Return(run) + return _c +} + +// CreateBatchChainConfig provides a mock function with given fields: cfgs, qopts +func (_m *ORM) CreateBatchChainConfig(cfgs []feeds.ChainConfig, qopts ...pg.QOpt) ([]int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, cfgs) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateBatchChainConfig") + } + + var r0 []int64 + var r1 error + if rf, ok := ret.Get(0).(func([]feeds.ChainConfig, ...pg.QOpt) ([]int64, error)); ok { + return rf(cfgs, qopts...) + } + if rf, ok := ret.Get(0).(func([]feeds.ChainConfig, ...pg.QOpt) []int64); ok { + r0 = rf(cfgs, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + if rf, ok := ret.Get(1).(func([]feeds.ChainConfig, ...pg.QOpt) error); ok { + r1 = rf(cfgs, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CreateBatchChainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBatchChainConfig' +type ORM_CreateBatchChainConfig_Call struct { + *mock.Call +} + +// CreateBatchChainConfig is a helper method to define mock.On call +// - cfgs []feeds.ChainConfig +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) CreateBatchChainConfig(cfgs interface{}, qopts ...interface{}) *ORM_CreateBatchChainConfig_Call { + return &ORM_CreateBatchChainConfig_Call{Call: _e.mock.On("CreateBatchChainConfig", + append([]interface{}{cfgs}, qopts...)...)} +} + +func (_c *ORM_CreateBatchChainConfig_Call) Run(run func(cfgs []feeds.ChainConfig, qopts ...pg.QOpt)) *ORM_CreateBatchChainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].([]feeds.ChainConfig), variadicArgs...) + }) + return _c +} + +func (_c *ORM_CreateBatchChainConfig_Call) Return(_a0 []int64, _a1 error) *ORM_CreateBatchChainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CreateBatchChainConfig_Call) RunAndReturn(run func([]feeds.ChainConfig, ...pg.QOpt) ([]int64, error)) *ORM_CreateBatchChainConfig_Call { + _c.Call.Return(run) + return _c +} + +// CreateChainConfig provides a mock function with given fields: cfg, qopts +func (_m *ORM) CreateChainConfig(cfg feeds.ChainConfig, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, cfg) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(feeds.ChainConfig, ...pg.QOpt) (int64, error)); ok { + return rf(cfg, qopts...) + } + if rf, ok := ret.Get(0).(func(feeds.ChainConfig, ...pg.QOpt) int64); ok { + r0 = rf(cfg, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(feeds.ChainConfig, ...pg.QOpt) error); ok { + r1 = rf(cfg, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CreateChainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateChainConfig' +type ORM_CreateChainConfig_Call struct { + *mock.Call +} + +// CreateChainConfig is a helper method to define mock.On call +// - cfg feeds.ChainConfig +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) CreateChainConfig(cfg interface{}, qopts ...interface{}) *ORM_CreateChainConfig_Call { + return &ORM_CreateChainConfig_Call{Call: _e.mock.On("CreateChainConfig", + append([]interface{}{cfg}, qopts...)...)} +} + +func (_c *ORM_CreateChainConfig_Call) Run(run func(cfg feeds.ChainConfig, qopts ...pg.QOpt)) *ORM_CreateChainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(feeds.ChainConfig), variadicArgs...) + }) + return _c +} + +func (_c *ORM_CreateChainConfig_Call) Return(_a0 int64, _a1 error) *ORM_CreateChainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CreateChainConfig_Call) RunAndReturn(run func(feeds.ChainConfig, ...pg.QOpt) (int64, error)) *ORM_CreateChainConfig_Call { + _c.Call.Return(run) + return _c +} + +// CreateJobProposal provides a mock function with given fields: jp +func (_m *ORM) CreateJobProposal(jp *feeds.JobProposal) (int64, error) { + ret := _m.Called(jp) + + if len(ret) == 0 { + panic("no return value specified for CreateJobProposal") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(*feeds.JobProposal) (int64, error)); ok { + return rf(jp) + } + if rf, ok := ret.Get(0).(func(*feeds.JobProposal) int64); ok { + r0 = rf(jp) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(*feeds.JobProposal) error); ok { + r1 = rf(jp) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CreateJobProposal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateJobProposal' +type ORM_CreateJobProposal_Call struct { + *mock.Call +} + +// CreateJobProposal is a helper method to define mock.On call +// - jp *feeds.JobProposal +func (_e *ORM_Expecter) CreateJobProposal(jp interface{}) *ORM_CreateJobProposal_Call { + return &ORM_CreateJobProposal_Call{Call: _e.mock.On("CreateJobProposal", jp)} +} + +func (_c *ORM_CreateJobProposal_Call) Run(run func(jp *feeds.JobProposal)) *ORM_CreateJobProposal_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*feeds.JobProposal)) + }) + return _c +} + +func (_c *ORM_CreateJobProposal_Call) Return(_a0 int64, _a1 error) *ORM_CreateJobProposal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CreateJobProposal_Call) RunAndReturn(run func(*feeds.JobProposal) (int64, error)) *ORM_CreateJobProposal_Call { + _c.Call.Return(run) + return _c +} + +// CreateManager provides a mock function with given fields: ms, qopts +func (_m *ORM) CreateManager(ms *feeds.FeedsManager, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ms) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateManager") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(*feeds.FeedsManager, ...pg.QOpt) (int64, error)); ok { + return rf(ms, qopts...) + } + if rf, ok := ret.Get(0).(func(*feeds.FeedsManager, ...pg.QOpt) int64); ok { + r0 = rf(ms, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(*feeds.FeedsManager, ...pg.QOpt) error); ok { + r1 = rf(ms, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CreateManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateManager' +type ORM_CreateManager_Call struct { + *mock.Call +} + +// CreateManager is a helper method to define mock.On call +// - ms *feeds.FeedsManager +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) CreateManager(ms interface{}, qopts ...interface{}) *ORM_CreateManager_Call { + return &ORM_CreateManager_Call{Call: _e.mock.On("CreateManager", + append([]interface{}{ms}, qopts...)...)} +} + +func (_c *ORM_CreateManager_Call) Run(run func(ms *feeds.FeedsManager, qopts ...pg.QOpt)) *ORM_CreateManager_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(*feeds.FeedsManager), variadicArgs...) + }) + return _c +} + +func (_c *ORM_CreateManager_Call) Return(_a0 int64, _a1 error) *ORM_CreateManager_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CreateManager_Call) RunAndReturn(run func(*feeds.FeedsManager, ...pg.QOpt) (int64, error)) *ORM_CreateManager_Call { + _c.Call.Return(run) + return _c +} + +// CreateSpec provides a mock function with given fields: spec, qopts +func (_m *ORM) CreateSpec(spec feeds.JobProposalSpec, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, spec) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateSpec") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(feeds.JobProposalSpec, ...pg.QOpt) (int64, error)); ok { + return rf(spec, qopts...) + } + if rf, ok := ret.Get(0).(func(feeds.JobProposalSpec, ...pg.QOpt) int64); ok { + r0 = rf(spec, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(feeds.JobProposalSpec, ...pg.QOpt) error); ok { + r1 = rf(spec, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_CreateSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSpec' +type ORM_CreateSpec_Call struct { + *mock.Call +} + +// CreateSpec is a helper method to define mock.On call +// - spec feeds.JobProposalSpec +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) CreateSpec(spec interface{}, qopts ...interface{}) *ORM_CreateSpec_Call { + return &ORM_CreateSpec_Call{Call: _e.mock.On("CreateSpec", + append([]interface{}{spec}, qopts...)...)} +} + +func (_c *ORM_CreateSpec_Call) Run(run func(spec feeds.JobProposalSpec, qopts ...pg.QOpt)) *ORM_CreateSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(feeds.JobProposalSpec), variadicArgs...) + }) + return _c +} + +func (_c *ORM_CreateSpec_Call) Return(_a0 int64, _a1 error) *ORM_CreateSpec_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_CreateSpec_Call) RunAndReturn(run func(feeds.JobProposalSpec, ...pg.QOpt) (int64, error)) *ORM_CreateSpec_Call { + _c.Call.Return(run) + return _c +} + +// DeleteChainConfig provides a mock function with given fields: id +func (_m *ORM) DeleteChainConfig(id int64) (int64, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for DeleteChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(int64) (int64, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) int64); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_DeleteChainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteChainConfig' +type ORM_DeleteChainConfig_Call struct { + *mock.Call +} + +// DeleteChainConfig is a helper method to define mock.On call +// - id int64 +func (_e *ORM_Expecter) DeleteChainConfig(id interface{}) *ORM_DeleteChainConfig_Call { + return &ORM_DeleteChainConfig_Call{Call: _e.mock.On("DeleteChainConfig", id)} +} + +func (_c *ORM_DeleteChainConfig_Call) Run(run func(id int64)) *ORM_DeleteChainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *ORM_DeleteChainConfig_Call) Return(_a0 int64, _a1 error) *ORM_DeleteChainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_DeleteChainConfig_Call) RunAndReturn(run func(int64) (int64, error)) *ORM_DeleteChainConfig_Call { + _c.Call.Return(run) + return _c +} + +// DeleteProposal provides a mock function with given fields: id, qopts +func (_m *ORM) DeleteProposal(id int64, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteProposal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) error); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_DeleteProposal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteProposal' +type ORM_DeleteProposal_Call struct { + *mock.Call +} + +// DeleteProposal is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) DeleteProposal(id interface{}, qopts ...interface{}) *ORM_DeleteProposal_Call { + return &ORM_DeleteProposal_Call{Call: _e.mock.On("DeleteProposal", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_DeleteProposal_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_DeleteProposal_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_DeleteProposal_Call) Return(_a0 error) *ORM_DeleteProposal_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_DeleteProposal_Call) RunAndReturn(run func(int64, ...pg.QOpt) error) *ORM_DeleteProposal_Call { + _c.Call.Return(run) + return _c +} + +// ExistsSpecByJobProposalIDAndVersion provides a mock function with given fields: jpID, version, qopts +func (_m *ORM) ExistsSpecByJobProposalIDAndVersion(jpID int64, version int32, qopts ...pg.QOpt) (bool, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jpID, version) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ExistsSpecByJobProposalIDAndVersion") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int64, int32, ...pg.QOpt) (bool, error)); ok { + return rf(jpID, version, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, int32, ...pg.QOpt) bool); ok { + r0 = rf(jpID, version, qopts...) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int64, int32, ...pg.QOpt) error); ok { + r1 = rf(jpID, version, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ExistsSpecByJobProposalIDAndVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExistsSpecByJobProposalIDAndVersion' +type ORM_ExistsSpecByJobProposalIDAndVersion_Call struct { + *mock.Call +} + +// ExistsSpecByJobProposalIDAndVersion is a helper method to define mock.On call +// - jpID int64 +// - version int32 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) ExistsSpecByJobProposalIDAndVersion(jpID interface{}, version interface{}, qopts ...interface{}) *ORM_ExistsSpecByJobProposalIDAndVersion_Call { + return &ORM_ExistsSpecByJobProposalIDAndVersion_Call{Call: _e.mock.On("ExistsSpecByJobProposalIDAndVersion", + append([]interface{}{jpID, version}, qopts...)...)} +} + +func (_c *ORM_ExistsSpecByJobProposalIDAndVersion_Call) Run(run func(jpID int64, version int32, qopts ...pg.QOpt)) *ORM_ExistsSpecByJobProposalIDAndVersion_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), args[1].(int32), variadicArgs...) + }) + return _c +} + +func (_c *ORM_ExistsSpecByJobProposalIDAndVersion_Call) Return(exists bool, err error) *ORM_ExistsSpecByJobProposalIDAndVersion_Call { + _c.Call.Return(exists, err) + return _c +} + +func (_c *ORM_ExistsSpecByJobProposalIDAndVersion_Call) RunAndReturn(run func(int64, int32, ...pg.QOpt) (bool, error)) *ORM_ExistsSpecByJobProposalIDAndVersion_Call { + _c.Call.Return(run) + return _c +} + +// GetApprovedSpec provides a mock function with given fields: jpID, qopts +func (_m *ORM) GetApprovedSpec(jpID int64, qopts ...pg.QOpt) (*feeds.JobProposalSpec, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jpID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetApprovedSpec") + } + + var r0 *feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) (*feeds.JobProposalSpec, error)); ok { + return rf(jpID, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) *feeds.JobProposalSpec); ok { + r0 = rf(jpID, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func(int64, ...pg.QOpt) error); ok { + r1 = rf(jpID, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetApprovedSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetApprovedSpec' +type ORM_GetApprovedSpec_Call struct { + *mock.Call +} + +// GetApprovedSpec is a helper method to define mock.On call +// - jpID int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) GetApprovedSpec(jpID interface{}, qopts ...interface{}) *ORM_GetApprovedSpec_Call { + return &ORM_GetApprovedSpec_Call{Call: _e.mock.On("GetApprovedSpec", + append([]interface{}{jpID}, qopts...)...)} +} + +func (_c *ORM_GetApprovedSpec_Call) Run(run func(jpID int64, qopts ...pg.QOpt)) *ORM_GetApprovedSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_GetApprovedSpec_Call) Return(_a0 *feeds.JobProposalSpec, _a1 error) *ORM_GetApprovedSpec_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetApprovedSpec_Call) RunAndReturn(run func(int64, ...pg.QOpt) (*feeds.JobProposalSpec, error)) *ORM_GetApprovedSpec_Call { + _c.Call.Return(run) + return _c +} + +// GetChainConfig provides a mock function with given fields: id +func (_m *ORM) GetChainConfig(id int64) (*feeds.ChainConfig, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetChainConfig") + } + + var r0 *feeds.ChainConfig + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.ChainConfig, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.ChainConfig); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.ChainConfig) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetChainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChainConfig' +type ORM_GetChainConfig_Call struct { + *mock.Call +} + +// GetChainConfig is a helper method to define mock.On call +// - id int64 +func (_e *ORM_Expecter) GetChainConfig(id interface{}) *ORM_GetChainConfig_Call { + return &ORM_GetChainConfig_Call{Call: _e.mock.On("GetChainConfig", id)} +} + +func (_c *ORM_GetChainConfig_Call) Run(run func(id int64)) *ORM_GetChainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *ORM_GetChainConfig_Call) Return(_a0 *feeds.ChainConfig, _a1 error) *ORM_GetChainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetChainConfig_Call) RunAndReturn(run func(int64) (*feeds.ChainConfig, error)) *ORM_GetChainConfig_Call { + _c.Call.Return(run) + return _c +} + +// GetJobProposal provides a mock function with given fields: id, qopts +func (_m *ORM) GetJobProposal(id int64, qopts ...pg.QOpt) (*feeds.JobProposal, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetJobProposal") + } + + var r0 *feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) (*feeds.JobProposal, error)); ok { + return rf(id, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) *feeds.JobProposal); ok { + r0 = rf(id, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func(int64, ...pg.QOpt) error); ok { + r1 = rf(id, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetJobProposal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJobProposal' +type ORM_GetJobProposal_Call struct { + *mock.Call +} + +// GetJobProposal is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) GetJobProposal(id interface{}, qopts ...interface{}) *ORM_GetJobProposal_Call { + return &ORM_GetJobProposal_Call{Call: _e.mock.On("GetJobProposal", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_GetJobProposal_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_GetJobProposal_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_GetJobProposal_Call) Return(_a0 *feeds.JobProposal, _a1 error) *ORM_GetJobProposal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetJobProposal_Call) RunAndReturn(run func(int64, ...pg.QOpt) (*feeds.JobProposal, error)) *ORM_GetJobProposal_Call { + _c.Call.Return(run) + return _c +} + +// GetJobProposalByRemoteUUID provides a mock function with given fields: _a0 +func (_m *ORM) GetJobProposalByRemoteUUID(_a0 uuid.UUID) (*feeds.JobProposal, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetJobProposalByRemoteUUID") + } + + var r0 *feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func(uuid.UUID) (*feeds.JobProposal, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(uuid.UUID) *feeds.JobProposal); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func(uuid.UUID) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetJobProposalByRemoteUUID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJobProposalByRemoteUUID' +type ORM_GetJobProposalByRemoteUUID_Call struct { + *mock.Call +} + +// GetJobProposalByRemoteUUID is a helper method to define mock.On call +// - _a0 uuid.UUID +func (_e *ORM_Expecter) GetJobProposalByRemoteUUID(_a0 interface{}) *ORM_GetJobProposalByRemoteUUID_Call { + return &ORM_GetJobProposalByRemoteUUID_Call{Call: _e.mock.On("GetJobProposalByRemoteUUID", _a0)} +} + +func (_c *ORM_GetJobProposalByRemoteUUID_Call) Run(run func(_a0 uuid.UUID)) *ORM_GetJobProposalByRemoteUUID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uuid.UUID)) + }) + return _c +} + +func (_c *ORM_GetJobProposalByRemoteUUID_Call) Return(_a0 *feeds.JobProposal, _a1 error) *ORM_GetJobProposalByRemoteUUID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetJobProposalByRemoteUUID_Call) RunAndReturn(run func(uuid.UUID) (*feeds.JobProposal, error)) *ORM_GetJobProposalByRemoteUUID_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestSpec provides a mock function with given fields: jpID +func (_m *ORM) GetLatestSpec(jpID int64) (*feeds.JobProposalSpec, error) { + ret := _m.Called(jpID) + + if len(ret) == 0 { + panic("no return value specified for GetLatestSpec") + } + + var r0 *feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.JobProposalSpec, error)); ok { + return rf(jpID) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.JobProposalSpec); ok { + r0 = rf(jpID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(jpID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetLatestSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestSpec' +type ORM_GetLatestSpec_Call struct { + *mock.Call +} + +// GetLatestSpec is a helper method to define mock.On call +// - jpID int64 +func (_e *ORM_Expecter) GetLatestSpec(jpID interface{}) *ORM_GetLatestSpec_Call { + return &ORM_GetLatestSpec_Call{Call: _e.mock.On("GetLatestSpec", jpID)} +} + +func (_c *ORM_GetLatestSpec_Call) Run(run func(jpID int64)) *ORM_GetLatestSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *ORM_GetLatestSpec_Call) Return(_a0 *feeds.JobProposalSpec, _a1 error) *ORM_GetLatestSpec_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetLatestSpec_Call) RunAndReturn(run func(int64) (*feeds.JobProposalSpec, error)) *ORM_GetLatestSpec_Call { + _c.Call.Return(run) + return _c +} + +// GetManager provides a mock function with given fields: id +func (_m *ORM) GetManager(id int64) (*feeds.FeedsManager, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetManager") + } + + var r0 *feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.FeedsManager, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.FeedsManager); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetManager' +type ORM_GetManager_Call struct { + *mock.Call +} + +// GetManager is a helper method to define mock.On call +// - id int64 +func (_e *ORM_Expecter) GetManager(id interface{}) *ORM_GetManager_Call { + return &ORM_GetManager_Call{Call: _e.mock.On("GetManager", id)} +} + +func (_c *ORM_GetManager_Call) Run(run func(id int64)) *ORM_GetManager_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64)) + }) + return _c +} + +func (_c *ORM_GetManager_Call) Return(_a0 *feeds.FeedsManager, _a1 error) *ORM_GetManager_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetManager_Call) RunAndReturn(run func(int64) (*feeds.FeedsManager, error)) *ORM_GetManager_Call { + _c.Call.Return(run) + return _c +} + +// GetSpec provides a mock function with given fields: id, qopts +func (_m *ORM) GetSpec(id int64, qopts ...pg.QOpt) (*feeds.JobProposalSpec, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetSpec") + } + + var r0 *feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) (*feeds.JobProposalSpec, error)); ok { + return rf(id, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) *feeds.JobProposalSpec); ok { + r0 = rf(id, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func(int64, ...pg.QOpt) error); ok { + r1 = rf(id, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_GetSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSpec' +type ORM_GetSpec_Call struct { + *mock.Call +} + +// GetSpec is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) GetSpec(id interface{}, qopts ...interface{}) *ORM_GetSpec_Call { + return &ORM_GetSpec_Call{Call: _e.mock.On("GetSpec", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_GetSpec_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_GetSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_GetSpec_Call) Return(_a0 *feeds.JobProposalSpec, _a1 error) *ORM_GetSpec_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_GetSpec_Call) RunAndReturn(run func(int64, ...pg.QOpt) (*feeds.JobProposalSpec, error)) *ORM_GetSpec_Call { + _c.Call.Return(run) + return _c +} + +// IsJobManaged provides a mock function with given fields: jobID, qopts +func (_m *ORM) IsJobManaged(jobID int64, qopts ...pg.QOpt) (bool, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jobID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for IsJobManaged") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) (bool, error)); ok { + return rf(jobID, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) bool); ok { + r0 = rf(jobID, qopts...) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int64, ...pg.QOpt) error); ok { + r1 = rf(jobID, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_IsJobManaged_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsJobManaged' +type ORM_IsJobManaged_Call struct { + *mock.Call +} + +// IsJobManaged is a helper method to define mock.On call +// - jobID int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) IsJobManaged(jobID interface{}, qopts ...interface{}) *ORM_IsJobManaged_Call { + return &ORM_IsJobManaged_Call{Call: _e.mock.On("IsJobManaged", + append([]interface{}{jobID}, qopts...)...)} +} + +func (_c *ORM_IsJobManaged_Call) Run(run func(jobID int64, qopts ...pg.QOpt)) *ORM_IsJobManaged_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_IsJobManaged_Call) Return(_a0 bool, _a1 error) *ORM_IsJobManaged_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_IsJobManaged_Call) RunAndReturn(run func(int64, ...pg.QOpt) (bool, error)) *ORM_IsJobManaged_Call { + _c.Call.Return(run) + return _c +} + +// ListChainConfigsByManagerIDs provides a mock function with given fields: mgrIDs +func (_m *ORM) ListChainConfigsByManagerIDs(mgrIDs []int64) ([]feeds.ChainConfig, error) { + ret := _m.Called(mgrIDs) + + if len(ret) == 0 { + panic("no return value specified for ListChainConfigsByManagerIDs") + } + + var r0 []feeds.ChainConfig + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.ChainConfig, error)); ok { + return rf(mgrIDs) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.ChainConfig); ok { + r0 = rf(mgrIDs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.ChainConfig) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(mgrIDs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListChainConfigsByManagerIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListChainConfigsByManagerIDs' +type ORM_ListChainConfigsByManagerIDs_Call struct { + *mock.Call +} + +// ListChainConfigsByManagerIDs is a helper method to define mock.On call +// - mgrIDs []int64 +func (_e *ORM_Expecter) ListChainConfigsByManagerIDs(mgrIDs interface{}) *ORM_ListChainConfigsByManagerIDs_Call { + return &ORM_ListChainConfigsByManagerIDs_Call{Call: _e.mock.On("ListChainConfigsByManagerIDs", mgrIDs)} +} + +func (_c *ORM_ListChainConfigsByManagerIDs_Call) Run(run func(mgrIDs []int64)) *ORM_ListChainConfigsByManagerIDs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]int64)) + }) + return _c +} + +func (_c *ORM_ListChainConfigsByManagerIDs_Call) Return(_a0 []feeds.ChainConfig, _a1 error) *ORM_ListChainConfigsByManagerIDs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_ListChainConfigsByManagerIDs_Call) RunAndReturn(run func([]int64) ([]feeds.ChainConfig, error)) *ORM_ListChainConfigsByManagerIDs_Call { + _c.Call.Return(run) + return _c +} + +// ListJobProposals provides a mock function with given fields: +func (_m *ORM) ListJobProposals() ([]feeds.JobProposal, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListJobProposals") + } + + var r0 []feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func() ([]feeds.JobProposal, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []feeds.JobProposal); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListJobProposals_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListJobProposals' +type ORM_ListJobProposals_Call struct { + *mock.Call +} + +// ListJobProposals is a helper method to define mock.On call +func (_e *ORM_Expecter) ListJobProposals() *ORM_ListJobProposals_Call { + return &ORM_ListJobProposals_Call{Call: _e.mock.On("ListJobProposals")} +} + +func (_c *ORM_ListJobProposals_Call) Run(run func()) *ORM_ListJobProposals_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ORM_ListJobProposals_Call) Return(jps []feeds.JobProposal, err error) *ORM_ListJobProposals_Call { + _c.Call.Return(jps, err) + return _c +} + +func (_c *ORM_ListJobProposals_Call) RunAndReturn(run func() ([]feeds.JobProposal, error)) *ORM_ListJobProposals_Call { + _c.Call.Return(run) + return _c +} + +// ListJobProposalsByManagersIDs provides a mock function with given fields: ids, qopts +func (_m *ORM) ListJobProposalsByManagersIDs(ids []int64, qopts ...pg.QOpt) ([]feeds.JobProposal, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ids) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ListJobProposalsByManagersIDs") + } + + var r0 []feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func([]int64, ...pg.QOpt) ([]feeds.JobProposal, error)); ok { + return rf(ids, qopts...) + } + if rf, ok := ret.Get(0).(func([]int64, ...pg.QOpt) []feeds.JobProposal); ok { + r0 = rf(ids, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func([]int64, ...pg.QOpt) error); ok { + r1 = rf(ids, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListJobProposalsByManagersIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListJobProposalsByManagersIDs' +type ORM_ListJobProposalsByManagersIDs_Call struct { + *mock.Call +} + +// ListJobProposalsByManagersIDs is a helper method to define mock.On call +// - ids []int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) ListJobProposalsByManagersIDs(ids interface{}, qopts ...interface{}) *ORM_ListJobProposalsByManagersIDs_Call { + return &ORM_ListJobProposalsByManagersIDs_Call{Call: _e.mock.On("ListJobProposalsByManagersIDs", + append([]interface{}{ids}, qopts...)...)} +} + +func (_c *ORM_ListJobProposalsByManagersIDs_Call) Run(run func(ids []int64, qopts ...pg.QOpt)) *ORM_ListJobProposalsByManagersIDs_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].([]int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_ListJobProposalsByManagersIDs_Call) Return(_a0 []feeds.JobProposal, _a1 error) *ORM_ListJobProposalsByManagersIDs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_ListJobProposalsByManagersIDs_Call) RunAndReturn(run func([]int64, ...pg.QOpt) ([]feeds.JobProposal, error)) *ORM_ListJobProposalsByManagersIDs_Call { + _c.Call.Return(run) + return _c +} + +// ListManagers provides a mock function with given fields: +func (_m *ORM) ListManagers() ([]feeds.FeedsManager, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListManagers") + } + + var r0 []feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func() ([]feeds.FeedsManager, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []feeds.FeedsManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListManagers' +type ORM_ListManagers_Call struct { + *mock.Call +} + +// ListManagers is a helper method to define mock.On call +func (_e *ORM_Expecter) ListManagers() *ORM_ListManagers_Call { + return &ORM_ListManagers_Call{Call: _e.mock.On("ListManagers")} +} + +func (_c *ORM_ListManagers_Call) Run(run func()) *ORM_ListManagers_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ORM_ListManagers_Call) Return(mgrs []feeds.FeedsManager, err error) *ORM_ListManagers_Call { + _c.Call.Return(mgrs, err) + return _c +} + +func (_c *ORM_ListManagers_Call) RunAndReturn(run func() ([]feeds.FeedsManager, error)) *ORM_ListManagers_Call { + _c.Call.Return(run) + return _c +} + +// ListManagersByIDs provides a mock function with given fields: ids +func (_m *ORM) ListManagersByIDs(ids []int64) ([]feeds.FeedsManager, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for ListManagersByIDs") + } + + var r0 []feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.FeedsManager, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.FeedsManager); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListManagersByIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListManagersByIDs' +type ORM_ListManagersByIDs_Call struct { + *mock.Call +} + +// ListManagersByIDs is a helper method to define mock.On call +// - ids []int64 +func (_e *ORM_Expecter) ListManagersByIDs(ids interface{}) *ORM_ListManagersByIDs_Call { + return &ORM_ListManagersByIDs_Call{Call: _e.mock.On("ListManagersByIDs", ids)} +} + +func (_c *ORM_ListManagersByIDs_Call) Run(run func(ids []int64)) *ORM_ListManagersByIDs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]int64)) + }) + return _c +} + +func (_c *ORM_ListManagersByIDs_Call) Return(_a0 []feeds.FeedsManager, _a1 error) *ORM_ListManagersByIDs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_ListManagersByIDs_Call) RunAndReturn(run func([]int64) ([]feeds.FeedsManager, error)) *ORM_ListManagersByIDs_Call { + _c.Call.Return(run) + return _c +} + +// ListSpecsByJobProposalIDs provides a mock function with given fields: ids, qopts +func (_m *ORM) ListSpecsByJobProposalIDs(ids []int64, qopts ...pg.QOpt) ([]feeds.JobProposalSpec, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ids) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ListSpecsByJobProposalIDs") + } + + var r0 []feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func([]int64, ...pg.QOpt) ([]feeds.JobProposalSpec, error)); ok { + return rf(ids, qopts...) + } + if rf, ok := ret.Get(0).(func([]int64, ...pg.QOpt) []feeds.JobProposalSpec); ok { + r0 = rf(ids, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func([]int64, ...pg.QOpt) error); ok { + r1 = rf(ids, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_ListSpecsByJobProposalIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSpecsByJobProposalIDs' +type ORM_ListSpecsByJobProposalIDs_Call struct { + *mock.Call +} + +// ListSpecsByJobProposalIDs is a helper method to define mock.On call +// - ids []int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) ListSpecsByJobProposalIDs(ids interface{}, qopts ...interface{}) *ORM_ListSpecsByJobProposalIDs_Call { + return &ORM_ListSpecsByJobProposalIDs_Call{Call: _e.mock.On("ListSpecsByJobProposalIDs", + append([]interface{}{ids}, qopts...)...)} +} + +func (_c *ORM_ListSpecsByJobProposalIDs_Call) Run(run func(ids []int64, qopts ...pg.QOpt)) *ORM_ListSpecsByJobProposalIDs_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].([]int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_ListSpecsByJobProposalIDs_Call) Return(_a0 []feeds.JobProposalSpec, _a1 error) *ORM_ListSpecsByJobProposalIDs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_ListSpecsByJobProposalIDs_Call) RunAndReturn(run func([]int64, ...pg.QOpt) ([]feeds.JobProposalSpec, error)) *ORM_ListSpecsByJobProposalIDs_Call { + _c.Call.Return(run) + return _c +} + +// RejectSpec provides a mock function with given fields: id, qopts +func (_m *ORM) RejectSpec(id int64, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RejectSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) error); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_RejectSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RejectSpec' +type ORM_RejectSpec_Call struct { + *mock.Call +} + +// RejectSpec is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) RejectSpec(id interface{}, qopts ...interface{}) *ORM_RejectSpec_Call { + return &ORM_RejectSpec_Call{Call: _e.mock.On("RejectSpec", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_RejectSpec_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_RejectSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_RejectSpec_Call) Return(_a0 error) *ORM_RejectSpec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_RejectSpec_Call) RunAndReturn(run func(int64, ...pg.QOpt) error) *ORM_RejectSpec_Call { + _c.Call.Return(run) + return _c +} + +// RevokeSpec provides a mock function with given fields: id, qopts +func (_m *ORM) RevokeSpec(id int64, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RevokeSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) error); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_RevokeSpec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RevokeSpec' +type ORM_RevokeSpec_Call struct { + *mock.Call +} + +// RevokeSpec is a helper method to define mock.On call +// - id int64 +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) RevokeSpec(id interface{}, qopts ...interface{}) *ORM_RevokeSpec_Call { + return &ORM_RevokeSpec_Call{Call: _e.mock.On("RevokeSpec", + append([]interface{}{id}, qopts...)...)} +} + +func (_c *ORM_RevokeSpec_Call) Run(run func(id int64, qopts ...pg.QOpt)) *ORM_RevokeSpec_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), variadicArgs...) + }) + return _c +} + +func (_c *ORM_RevokeSpec_Call) Return(_a0 error) *ORM_RevokeSpec_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_RevokeSpec_Call) RunAndReturn(run func(int64, ...pg.QOpt) error) *ORM_RevokeSpec_Call { + _c.Call.Return(run) + return _c +} + +// UpdateChainConfig provides a mock function with given fields: cfg +func (_m *ORM) UpdateChainConfig(cfg feeds.ChainConfig) (int64, error) { + ret := _m.Called(cfg) + + if len(ret) == 0 { + panic("no return value specified for UpdateChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(feeds.ChainConfig) (int64, error)); ok { + return rf(cfg) + } + if rf, ok := ret.Get(0).(func(feeds.ChainConfig) int64); ok { + r0 = rf(cfg) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(feeds.ChainConfig) error); ok { + r1 = rf(cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_UpdateChainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateChainConfig' +type ORM_UpdateChainConfig_Call struct { + *mock.Call +} + +// UpdateChainConfig is a helper method to define mock.On call +// - cfg feeds.ChainConfig +func (_e *ORM_Expecter) UpdateChainConfig(cfg interface{}) *ORM_UpdateChainConfig_Call { + return &ORM_UpdateChainConfig_Call{Call: _e.mock.On("UpdateChainConfig", cfg)} +} + +func (_c *ORM_UpdateChainConfig_Call) Run(run func(cfg feeds.ChainConfig)) *ORM_UpdateChainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(feeds.ChainConfig)) + }) + return _c +} + +func (_c *ORM_UpdateChainConfig_Call) Return(_a0 int64, _a1 error) *ORM_UpdateChainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_UpdateChainConfig_Call) RunAndReturn(run func(feeds.ChainConfig) (int64, error)) *ORM_UpdateChainConfig_Call { + _c.Call.Return(run) + return _c +} + +// UpdateJobProposalStatus provides a mock function with given fields: id, status, qopts +func (_m *ORM) UpdateJobProposalStatus(id int64, status feeds.JobProposalStatus, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id, status) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpdateJobProposalStatus") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, feeds.JobProposalStatus, ...pg.QOpt) error); ok { + r0 = rf(id, status, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_UpdateJobProposalStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateJobProposalStatus' +type ORM_UpdateJobProposalStatus_Call struct { + *mock.Call +} + +// UpdateJobProposalStatus is a helper method to define mock.On call +// - id int64 +// - status feeds.JobProposalStatus +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) UpdateJobProposalStatus(id interface{}, status interface{}, qopts ...interface{}) *ORM_UpdateJobProposalStatus_Call { + return &ORM_UpdateJobProposalStatus_Call{Call: _e.mock.On("UpdateJobProposalStatus", + append([]interface{}{id, status}, qopts...)...)} +} + +func (_c *ORM_UpdateJobProposalStatus_Call) Run(run func(id int64, status feeds.JobProposalStatus, qopts ...pg.QOpt)) *ORM_UpdateJobProposalStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), args[1].(feeds.JobProposalStatus), variadicArgs...) + }) + return _c +} + +func (_c *ORM_UpdateJobProposalStatus_Call) Return(_a0 error) *ORM_UpdateJobProposalStatus_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_UpdateJobProposalStatus_Call) RunAndReturn(run func(int64, feeds.JobProposalStatus, ...pg.QOpt) error) *ORM_UpdateJobProposalStatus_Call { + _c.Call.Return(run) + return _c +} + +// UpdateManager provides a mock function with given fields: mgr, qopts +func (_m *ORM) UpdateManager(mgr feeds.FeedsManager, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, mgr) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpdateManager") + } + + var r0 error + if rf, ok := ret.Get(0).(func(feeds.FeedsManager, ...pg.QOpt) error); ok { + r0 = rf(mgr, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_UpdateManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateManager' +type ORM_UpdateManager_Call struct { + *mock.Call +} + +// UpdateManager is a helper method to define mock.On call +// - mgr feeds.FeedsManager +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) UpdateManager(mgr interface{}, qopts ...interface{}) *ORM_UpdateManager_Call { + return &ORM_UpdateManager_Call{Call: _e.mock.On("UpdateManager", + append([]interface{}{mgr}, qopts...)...)} +} + +func (_c *ORM_UpdateManager_Call) Run(run func(mgr feeds.FeedsManager, qopts ...pg.QOpt)) *ORM_UpdateManager_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(feeds.FeedsManager), variadicArgs...) + }) + return _c +} + +func (_c *ORM_UpdateManager_Call) Return(_a0 error) *ORM_UpdateManager_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_UpdateManager_Call) RunAndReturn(run func(feeds.FeedsManager, ...pg.QOpt) error) *ORM_UpdateManager_Call { + _c.Call.Return(run) + return _c +} + +// UpdateSpecDefinition provides a mock function with given fields: id, spec, qopts +func (_m *ORM) UpdateSpecDefinition(id int64, spec string, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id, spec) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpdateSpecDefinition") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64, string, ...pg.QOpt) error); ok { + r0 = rf(id, spec, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ORM_UpdateSpecDefinition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateSpecDefinition' +type ORM_UpdateSpecDefinition_Call struct { + *mock.Call +} + +// UpdateSpecDefinition is a helper method to define mock.On call +// - id int64 +// - spec string +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) UpdateSpecDefinition(id interface{}, spec interface{}, qopts ...interface{}) *ORM_UpdateSpecDefinition_Call { + return &ORM_UpdateSpecDefinition_Call{Call: _e.mock.On("UpdateSpecDefinition", + append([]interface{}{id, spec}, qopts...)...)} +} + +func (_c *ORM_UpdateSpecDefinition_Call) Run(run func(id int64, spec string, qopts ...pg.QOpt)) *ORM_UpdateSpecDefinition_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(int64), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *ORM_UpdateSpecDefinition_Call) Return(_a0 error) *ORM_UpdateSpecDefinition_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ORM_UpdateSpecDefinition_Call) RunAndReturn(run func(int64, string, ...pg.QOpt) error) *ORM_UpdateSpecDefinition_Call { + _c.Call.Return(run) + return _c +} + +// UpsertJobProposal provides a mock function with given fields: jp, qopts +func (_m *ORM) UpsertJobProposal(jp *feeds.JobProposal, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jp) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpsertJobProposal") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(*feeds.JobProposal, ...pg.QOpt) (int64, error)); ok { + return rf(jp, qopts...) + } + if rf, ok := ret.Get(0).(func(*feeds.JobProposal, ...pg.QOpt) int64); ok { + r0 = rf(jp, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(*feeds.JobProposal, ...pg.QOpt) error); ok { + r1 = rf(jp, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ORM_UpsertJobProposal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpsertJobProposal' +type ORM_UpsertJobProposal_Call struct { + *mock.Call +} + +// UpsertJobProposal is a helper method to define mock.On call +// - jp *feeds.JobProposal +// - qopts ...pg.QOpt +func (_e *ORM_Expecter) UpsertJobProposal(jp interface{}, qopts ...interface{}) *ORM_UpsertJobProposal_Call { + return &ORM_UpsertJobProposal_Call{Call: _e.mock.On("UpsertJobProposal", + append([]interface{}{jp}, qopts...)...)} +} + +func (_c *ORM_UpsertJobProposal_Call) Run(run func(jp *feeds.JobProposal, qopts ...pg.QOpt)) *ORM_UpsertJobProposal_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]pg.QOpt, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(pg.QOpt) + } + } + run(args[0].(*feeds.JobProposal), variadicArgs...) + }) + return _c +} + +func (_c *ORM_UpsertJobProposal_Call) Return(_a0 int64, _a1 error) *ORM_UpsertJobProposal_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ORM_UpsertJobProposal_Call) RunAndReturn(run func(*feeds.JobProposal, ...pg.QOpt) (int64, error)) *ORM_UpsertJobProposal_Call { + _c.Call.Return(run) + return _c +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/feeds/mocks/service.go b/core/services/feeds/mocks/service.go new file mode 100644 index 00000000..c2c69275 --- /dev/null +++ b/core/services/feeds/mocks/service.go @@ -0,0 +1,760 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + feeds "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + mock "github.com/stretchr/testify/mock" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// ApproveSpec provides a mock function with given fields: ctx, id, force +func (_m *Service) ApproveSpec(ctx context.Context, id int64, force bool) error { + ret := _m.Called(ctx, id, force) + + if len(ret) == 0 { + panic("no return value specified for ApproveSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, bool) error); ok { + r0 = rf(ctx, id, force) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CancelSpec provides a mock function with given fields: ctx, id +func (_m *Service) CancelSpec(ctx context.Context, id int64) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for CancelSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *Service) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CountJobProposalsByStatus provides a mock function with given fields: +func (_m *Service) CountJobProposalsByStatus() (*feeds.JobProposalCounts, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountJobProposalsByStatus") + } + + var r0 *feeds.JobProposalCounts + var r1 error + if rf, ok := ret.Get(0).(func() (*feeds.JobProposalCounts, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *feeds.JobProposalCounts); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalCounts) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CountManagers provides a mock function with given fields: +func (_m *Service) CountManagers() (int64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountManagers") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func() (int64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateChainConfig provides a mock function with given fields: ctx, cfg +func (_m *Service) CreateChainConfig(ctx context.Context, cfg feeds.ChainConfig) (int64, error) { + ret := _m.Called(ctx, cfg) + + if len(ret) == 0 { + panic("no return value specified for CreateChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, feeds.ChainConfig) (int64, error)); ok { + return rf(ctx, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, feeds.ChainConfig) int64); ok { + r0 = rf(ctx, cfg) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, feeds.ChainConfig) error); ok { + r1 = rf(ctx, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteChainConfig provides a mock function with given fields: ctx, id +func (_m *Service) DeleteChainConfig(ctx context.Context, id int64) (int64, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64) (int64, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, int64) int64); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteJob provides a mock function with given fields: ctx, args +func (_m *Service) DeleteJob(ctx context.Context, args *feeds.DeleteJobArgs) (int64, error) { + ret := _m.Called(ctx, args) + + if len(ret) == 0 { + panic("no return value specified for DeleteJob") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *feeds.DeleteJobArgs) (int64, error)); ok { + return rf(ctx, args) + } + if rf, ok := ret.Get(0).(func(context.Context, *feeds.DeleteJobArgs) int64); ok { + r0 = rf(ctx, args) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, *feeds.DeleteJobArgs) error); ok { + r1 = rf(ctx, args) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetChainConfig provides a mock function with given fields: id +func (_m *Service) GetChainConfig(id int64) (*feeds.ChainConfig, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetChainConfig") + } + + var r0 *feeds.ChainConfig + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.ChainConfig, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.ChainConfig); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.ChainConfig) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetJobProposal provides a mock function with given fields: id +func (_m *Service) GetJobProposal(id int64) (*feeds.JobProposal, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetJobProposal") + } + + var r0 *feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.JobProposal, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.JobProposal); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetManager provides a mock function with given fields: id +func (_m *Service) GetManager(id int64) (*feeds.FeedsManager, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetManager") + } + + var r0 *feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.FeedsManager, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.FeedsManager); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSpec provides a mock function with given fields: id +func (_m *Service) GetSpec(id int64) (*feeds.JobProposalSpec, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetSpec") + } + + var r0 *feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func(int64) (*feeds.JobProposalSpec, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) *feeds.JobProposalSpec); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsJobManaged provides a mock function with given fields: ctx, jobID +func (_m *Service) IsJobManaged(ctx context.Context, jobID int64) (bool, error) { + ret := _m.Called(ctx, jobID) + + if len(ret) == 0 { + panic("no return value specified for IsJobManaged") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64) (bool, error)); ok { + return rf(ctx, jobID) + } + if rf, ok := ret.Get(0).(func(context.Context, int64) bool); ok { + r0 = rf(ctx, jobID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok { + r1 = rf(ctx, jobID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListChainConfigsByManagerIDs provides a mock function with given fields: mgrIDs +func (_m *Service) ListChainConfigsByManagerIDs(mgrIDs []int64) ([]feeds.ChainConfig, error) { + ret := _m.Called(mgrIDs) + + if len(ret) == 0 { + panic("no return value specified for ListChainConfigsByManagerIDs") + } + + var r0 []feeds.ChainConfig + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.ChainConfig, error)); ok { + return rf(mgrIDs) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.ChainConfig); ok { + r0 = rf(mgrIDs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.ChainConfig) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(mgrIDs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListJobProposals provides a mock function with given fields: +func (_m *Service) ListJobProposals() ([]feeds.JobProposal, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListJobProposals") + } + + var r0 []feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func() ([]feeds.JobProposal, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []feeds.JobProposal); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListJobProposalsByManagersIDs provides a mock function with given fields: ids +func (_m *Service) ListJobProposalsByManagersIDs(ids []int64) ([]feeds.JobProposal, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for ListJobProposalsByManagersIDs") + } + + var r0 []feeds.JobProposal + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.JobProposal, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.JobProposal); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposal) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListManagers provides a mock function with given fields: +func (_m *Service) ListManagers() ([]feeds.FeedsManager, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListManagers") + } + + var r0 []feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func() ([]feeds.FeedsManager, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []feeds.FeedsManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListManagersByIDs provides a mock function with given fields: ids +func (_m *Service) ListManagersByIDs(ids []int64) ([]feeds.FeedsManager, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for ListManagersByIDs") + } + + var r0 []feeds.FeedsManager + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.FeedsManager, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.FeedsManager); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.FeedsManager) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListSpecsByJobProposalIDs provides a mock function with given fields: ids +func (_m *Service) ListSpecsByJobProposalIDs(ids []int64) ([]feeds.JobProposalSpec, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for ListSpecsByJobProposalIDs") + } + + var r0 []feeds.JobProposalSpec + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]feeds.JobProposalSpec, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []feeds.JobProposalSpec); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]feeds.JobProposalSpec) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProposeJob provides a mock function with given fields: ctx, args +func (_m *Service) ProposeJob(ctx context.Context, args *feeds.ProposeJobArgs) (int64, error) { + ret := _m.Called(ctx, args) + + if len(ret) == 0 { + panic("no return value specified for ProposeJob") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *feeds.ProposeJobArgs) (int64, error)); ok { + return rf(ctx, args) + } + if rf, ok := ret.Get(0).(func(context.Context, *feeds.ProposeJobArgs) int64); ok { + r0 = rf(ctx, args) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, *feeds.ProposeJobArgs) error); ok { + r1 = rf(ctx, args) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterManager provides a mock function with given fields: ctx, params +func (_m *Service) RegisterManager(ctx context.Context, params feeds.RegisterManagerParams) (int64, error) { + ret := _m.Called(ctx, params) + + if len(ret) == 0 { + panic("no return value specified for RegisterManager") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, feeds.RegisterManagerParams) (int64, error)); ok { + return rf(ctx, params) + } + if rf, ok := ret.Get(0).(func(context.Context, feeds.RegisterManagerParams) int64); ok { + r0 = rf(ctx, params) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, feeds.RegisterManagerParams) error); ok { + r1 = rf(ctx, params) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RejectSpec provides a mock function with given fields: ctx, id +func (_m *Service) RejectSpec(ctx context.Context, id int64) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for RejectSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RevokeJob provides a mock function with given fields: ctx, args +func (_m *Service) RevokeJob(ctx context.Context, args *feeds.RevokeJobArgs) (int64, error) { + ret := _m.Called(ctx, args) + + if len(ret) == 0 { + panic("no return value specified for RevokeJob") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *feeds.RevokeJobArgs) (int64, error)); ok { + return rf(ctx, args) + } + if rf, ok := ret.Get(0).(func(context.Context, *feeds.RevokeJobArgs) int64); ok { + r0 = rf(ctx, args) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, *feeds.RevokeJobArgs) error); ok { + r1 = rf(ctx, args) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: ctx +func (_m *Service) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SyncNodeInfo provides a mock function with given fields: ctx, id +func (_m *Service) SyncNodeInfo(ctx context.Context, id int64) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for SyncNodeInfo") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Unsafe_SetConnectionsManager provides a mock function with given fields: _a0 +func (_m *Service) Unsafe_SetConnectionsManager(_a0 feeds.ConnectionsManager) { + _m.Called(_a0) +} + +// UpdateChainConfig provides a mock function with given fields: ctx, cfg +func (_m *Service) UpdateChainConfig(ctx context.Context, cfg feeds.ChainConfig) (int64, error) { + ret := _m.Called(ctx, cfg) + + if len(ret) == 0 { + panic("no return value specified for UpdateChainConfig") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, feeds.ChainConfig) (int64, error)); ok { + return rf(ctx, cfg) + } + if rf, ok := ret.Get(0).(func(context.Context, feeds.ChainConfig) int64); ok { + r0 = rf(ctx, cfg) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, feeds.ChainConfig) error); ok { + r1 = rf(ctx, cfg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateManager provides a mock function with given fields: ctx, mgr +func (_m *Service) UpdateManager(ctx context.Context, mgr feeds.FeedsManager) error { + ret := _m.Called(ctx, mgr) + + if len(ret) == 0 { + panic("no return value specified for UpdateManager") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, feeds.FeedsManager) error); ok { + r0 = rf(ctx, mgr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateSpecDefinition provides a mock function with given fields: ctx, id, spec +func (_m *Service) UpdateSpecDefinition(ctx context.Context, id int64, spec string) error { + ret := _m.Called(ctx, id, spec) + + if len(ret) == 0 { + panic("no return value specified for UpdateSpecDefinition") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, string) error); ok { + r0 = rf(ctx, id, spec) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { + mock.TestingT + Cleanup(func()) +}) *Service { + mock := &Service{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/feeds/models.go b/core/services/feeds/models.go new file mode 100644 index 00000000..02a96483 --- /dev/null +++ b/core/services/feeds/models.go @@ -0,0 +1,269 @@ +package feeds + +import ( + "database/sql/driver" + "encoding/json" + "strings" + "time" + + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +const ( + JobTypeFluxMonitor = "fluxmonitor" + JobTypeOffchainReporting = "ocr" + JobTypeOffchainReporting2 = "ocr2" +) + +type PluginType string + +const ( + PluginTypeCommit PluginType = "COMMIT" + PluginTypeExecute PluginType = "EXECUTE" + PluginTypeMedian PluginType = "MEDIAN" + PluginTypeMercury PluginType = "MERCURY" + PluginTypeUnknown PluginType = "UNKNOWN" +) + +func FromPluginTypeInput(pt PluginType) string { + return strings.ToLower(string(pt)) +} + +func ToPluginType(s string) (PluginType, error) { + switch s { + case "commit": + return PluginTypeCommit, nil + case "execute": + return PluginTypeExecute, nil + case "median": + return PluginTypeMedian, nil + case "mercury": + return PluginTypeMercury, nil + default: + return PluginTypeUnknown, errors.New("unknown plugin type") + } +} + +type Plugins struct { + Commit bool `json:"commit"` + Execute bool `json:"execute"` + Median bool `json:"median"` + Mercury bool `json:"mercury"` +} + +func (p Plugins) Value() (driver.Value, error) { + return json.Marshal(p) +} + +func (p *Plugins) Scan(value interface{}) error { + b, ok := value.(string) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal([]byte(b), &p) +} + +type ChainType string + +const ( + ChainTypeUnknown ChainType = "UNKNOWN" + ChainTypeEVM ChainType = "EVM" +) + +func NewChainType(s string) (ChainType, error) { + switch s { + case "EVM": + return ChainTypeEVM, nil + default: + return ChainTypeUnknown, errors.New("invalid chain type") + } +} + +// FeedsManager defines a registered Feeds Manager Service and the connection +// information. +type FeedsManager struct { + ID int64 + Name string + URI string + PublicKey crypto.PublicKey + IsConnectionActive bool + CreatedAt time.Time + UpdatedAt time.Time +} + +// ChainConfig defines the chain configuration for a Feeds Manager. +type ChainConfig struct { + ID int64 + FeedsManagerID int64 + ChainID string + ChainType ChainType + AccountAddress string + AdminAddress string + FluxMonitorConfig FluxMonitorConfig + OCR1Config OCR1Config + OCR2Config OCR2ConfigModel + CreatedAt time.Time + UpdatedAt time.Time +} + +// FluxMonitorConfig defines configuration for FluxMonitorJobs. +type FluxMonitorConfig struct { + Enabled bool `json:"enabled"` +} + +func (c FluxMonitorConfig) Value() (driver.Value, error) { + return json.Marshal(c) +} + +func (c *FluxMonitorConfig) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &c) +} + +// OCR1Config defines configuration for OCR1 Jobs. +type OCR1Config struct { + Enabled bool `json:"enabled"` + IsBootstrap bool `json:"is_bootstrap"` + Multiaddr null.String `json:"multiaddr"` + P2PPeerID null.String `json:"p2p_peer_id"` + KeyBundleID null.String `json:"key_bundle_id"` +} + +func (c OCR1Config) Value() (driver.Value, error) { + return json.Marshal(c) +} + +func (c *OCR1Config) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &c) +} + +// OCR2ConfigModel defines configuration for OCR2 Jobs. +type OCR2ConfigModel struct { + Enabled bool `json:"enabled"` + IsBootstrap bool `json:"is_bootstrap"` + Multiaddr null.String `json:"multiaddr"` + ForwarderAddress null.String `json:"forwarder_address"` + P2PPeerID null.String `json:"p2p_peer_id"` + KeyBundleID null.String `json:"key_bundle_id"` + Plugins Plugins `json:"plugins"` +} + +func (c OCR2ConfigModel) Value() (driver.Value, error) { + return json.Marshal(c) +} + +func (c *OCR2ConfigModel) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &c) +} + +// JobProposalStatus are the status codes that define the stage of a proposal +type JobProposalStatus string + +const ( + JobProposalStatusPending JobProposalStatus = "pending" + JobProposalStatusApproved JobProposalStatus = "approved" + JobProposalStatusRejected JobProposalStatus = "rejected" + JobProposalStatusCancelled JobProposalStatus = "cancelled" + JobProposalStatusDeleted JobProposalStatus = "deleted" + JobProposalStatusRevoked JobProposalStatus = "revoked" +) + +// JobProposal represents a proposal which has been sent by a Feeds Manager. +// +// A job proposal has multiple spec versions which are created each time +// the Feeds Manager sends a new proposal version. +type JobProposal struct { + ID int64 + Name null.String + RemoteUUID uuid.UUID // RemoteUUID is the uuid of the proposal in FMS. + Status JobProposalStatus + ExternalJobID uuid.NullUUID // ExternalJobID is the external job id in the job spec. + FeedsManagerID int64 + Multiaddrs pq.StringArray + PendingUpdate bool + CreatedAt time.Time + UpdatedAt time.Time +} + +// SpecStatus is the status of each proposed spec. +type SpecStatus string + +const ( + // SpecStatusPending defines a spec status which has been proposed by the + // FMS. + SpecStatusPending SpecStatus = "pending" + // SpecStatusApproved defines a spec status which the node op has approved. + // An approved spec is currently being run by the node. + SpecStatusApproved SpecStatus = "approved" + // SpecStatusRejected defines a spec status which was proposed, but was + // rejected by the node op. + SpecStatusRejected SpecStatus = "rejected" + // SpecStatusCancelled defines a spec status which was previously approved, + // but cancelled by the node op. A cancelled spec is not being run by the + // node. + SpecStatusCancelled SpecStatus = "cancelled" + // SpecStatusRevoked defines a spec status which was revoked. A revoked spec cannot be + // approved. + SpecStatusRevoked SpecStatus = "revoked" +) + +// JobProposalSpec defines a versioned proposed spec for a JobProposal. +type JobProposalSpec struct { + ID int64 + Definition string + Status SpecStatus + Version int32 + JobProposalID int64 + StatusUpdatedAt time.Time + CreatedAt time.Time + UpdatedAt time.Time +} + +// CanEditDefinition checks if the spec definition can be edited. +func (s *JobProposalSpec) CanEditDefinition() bool { + return s.Status == SpecStatusPending || + s.Status == SpecStatusCancelled +} + +// JobProposalCounts defines the counts for job proposals of each status. +type JobProposalCounts struct { + Pending int64 + Cancelled int64 + Approved int64 + Rejected int64 + Deleted int64 + Revoked int64 +} + +// toMetrics transforms JobProposalCounts into a map with float64 values for setting metrics +// in prometheus. +func (jpc *JobProposalCounts) toMetrics() map[JobProposalStatus]float64 { + metrics := make(map[JobProposalStatus]float64, 6) + metrics[JobProposalStatusPending] = float64(jpc.Pending) + metrics[JobProposalStatusApproved] = float64(jpc.Approved) + metrics[JobProposalStatusCancelled] = float64(jpc.Cancelled) + metrics[JobProposalStatusRejected] = float64(jpc.Rejected) + metrics[JobProposalStatusRevoked] = float64(jpc.Revoked) + metrics[JobProposalStatusDeleted] = float64(jpc.Deleted) + return metrics +} diff --git a/core/services/feeds/models_test.go b/core/services/feeds/models_test.go new file mode 100644 index 00000000..8a26d77e --- /dev/null +++ b/core/services/feeds/models_test.go @@ -0,0 +1,403 @@ +package feeds + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" +) + +func Test_NewChainType(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + give string + want ChainType + wantErr error + }{ + { + name: "EVM Chain Type", + give: "EVM", + want: ChainTypeEVM, + }, + { + name: "Invalid Chain Type", + give: "", + want: ChainTypeUnknown, + wantErr: errors.New("invalid chain type"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + ct, err := NewChainType(tt.give) + + assert.Equal(t, tt.want, ct) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } + }) + } +} + +func Test_ToPluginType(t *testing.T) { + t.Parallel() + + pt, err := ToPluginType("commit") + require.NoError(t, err) + assert.Equal(t, pt, PluginTypeCommit) + + pt, err = ToPluginType("execute") + require.NoError(t, err) + assert.Equal(t, pt, PluginTypeExecute) + + pt, err = ToPluginType("median") + require.NoError(t, err) + assert.Equal(t, pt, PluginTypeMedian) + + pt, err = ToPluginType("mercury") + require.NoError(t, err) + assert.Equal(t, pt, PluginTypeMercury) + + pt, err = ToPluginType("xxx") + require.Error(t, err) + assert.Equal(t, pt, PluginTypeUnknown) + assert.EqualError(t, err, "unknown plugin type") +} + +func Test_FromPluginType(t *testing.T) { + t.Parallel() + + assert.Equal(t, "commit", FromPluginTypeInput(PluginTypeCommit)) + assert.Equal(t, "execute", FromPluginTypeInput(PluginTypeExecute)) + assert.Equal(t, "median", FromPluginTypeInput(PluginTypeMedian)) + assert.Equal(t, "mercury", FromPluginTypeInput(PluginTypeMercury)) + assert.Equal(t, "unknown", FromPluginTypeInput(PluginTypeUnknown)) +} + +func Test_FluxMonitorConfig_Value(t *testing.T) { + t.Parallel() + + cfg := FluxMonitorConfig{Enabled: true} + want := `{"enabled":true}` + + val, err := cfg.Value() + require.NoError(t, err) + + actual, ok := val.([]byte) + require.True(t, ok) + + assert.Equal(t, want, string(actual)) +} + +func Test_FluxMonitorConfig_Scan(t *testing.T) { + t.Parallel() + + var ( + give = `{"enabled":true}` + want = FluxMonitorConfig{Enabled: true} + ) + + var actual FluxMonitorConfig + err := actual.Scan([]byte(give)) + require.NoError(t, err) + + assert.Equal(t, want, actual) +} + +func Test_OCR1Config_Value(t *testing.T) { + t.Parallel() + + var ( + multiaddr = "multiaddr" + p2pPeerID = "peerid" + keyBundleID = "ocrkeyid" + ) + + tests := []struct { + name string + give OCR1Config + want string + }{ + { + name: "all fields populated", + give: OCR1Config{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFrom(multiaddr), + P2PPeerID: null.StringFrom(p2pPeerID), + KeyBundleID: null.StringFrom(keyBundleID), + }, + want: `{"enabled":true,"is_bootstrap":false,"multiaddr":"multiaddr","p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid"}`, + }, + { + name: "bootstrap fields populated", + give: OCR1Config{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom(multiaddr), + P2PPeerID: null.StringFromPtr(nil), + KeyBundleID: null.StringFromPtr(nil), + }, + want: `{"enabled":true,"is_bootstrap":true,"multiaddr":"multiaddr","p2p_peer_id":null,"key_bundle_id":null}`, + }, + { + name: "multiaddr field populated", + give: OCR1Config{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFromPtr(nil), + P2PPeerID: null.StringFrom(p2pPeerID), + KeyBundleID: null.StringFrom(keyBundleID), + }, + want: `{"enabled":true,"is_bootstrap":false,"multiaddr":null,"p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid"}`, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + val, err := tt.give.Value() + require.NoError(t, err) + + actual, ok := val.([]byte) + require.True(t, ok) + + assert.Equal(t, tt.want, string(actual)) + }) + } +} + +func Test_OCR1Config_Scan(t *testing.T) { + t.Parallel() + + var ( + multiaddr = "multiaddr" + p2pPeerID = "peerid" + keyBundleID = "ocrkeyid" + ) + + tests := []struct { + name string + give string + want OCR1Config + }{ + { + name: "all fields populated", + give: `{"enabled":true,"is_bootstrap":false,"multiaddr":"multiaddr","p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid"}`, + want: OCR1Config{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFrom(multiaddr), + P2PPeerID: null.StringFrom(p2pPeerID), + KeyBundleID: null.StringFrom(keyBundleID), + }, + }, + { + name: "bootstrap fields populated", + give: `{"enabled":true,"is_bootstrap":true,"multiaddr":"multiaddr","p2p_peer_id":null,"key_bundle_id":null}`, + want: OCR1Config{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom(multiaddr), + P2PPeerID: null.StringFromPtr(nil), + KeyBundleID: null.StringFromPtr(nil), + }, + }, + { + name: "multiaddr field populated", + give: `{"enabled":true,"is_bootstrap":false,"multiaddr":null,"p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid"}`, + want: OCR1Config{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFromPtr(nil), + P2PPeerID: null.StringFrom(p2pPeerID), + KeyBundleID: null.StringFrom(keyBundleID), + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + var actual OCR1Config + err := actual.Scan([]byte(tt.give)) + require.NoError(t, err) + + assert.Equal(t, tt.want, actual) + }) + } +} + +func Test_Plugins_Value(t *testing.T) { + t.Parallel() + + var ( + give = Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + } + want = `{"commit":true,"execute":true,"median":false,"mercury":true}` + ) + + val, err := give.Value() + require.NoError(t, err) + + actual, ok := val.([]byte) + require.True(t, ok) + + assert.Equal(t, want, string(actual)) +} + +func Test_Plugins_Scan(t *testing.T) { + t.Parallel() + + var ( + give = `{"commit":true,"execute":true,"median":false,"mercury":true}` + want = Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + } + ) + + var actual Plugins + err := actual.Scan(give) + require.NoError(t, err) + + assert.Equal(t, want, actual) +} + +func Test_OCR2Config_Value(t *testing.T) { + t.Parallel() + + var ( + give = OCR2ConfigModel{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFrom("multiaddr"), + ForwarderAddress: null.StringFrom("forwarderaddress"), + P2PPeerID: null.StringFrom("peerid"), + KeyBundleID: null.StringFrom("ocrkeyid"), + Plugins: Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + } + want = `{"enabled":true,"is_bootstrap":false,"multiaddr":"multiaddr","forwarder_address":"forwarderaddress","p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid","plugins":{"commit":true,"execute":true,"median":false,"mercury":true}}` + ) + + val, err := give.Value() + require.NoError(t, err) + + actual, ok := val.([]byte) + require.True(t, ok) + + assert.Equal(t, want, string(actual)) +} + +func Test_OCR2Config_Scan(t *testing.T) { + t.Parallel() + + var ( + give = `{"enabled":true,"is_bootstrap":false,"multiaddr":"multiaddr","forwarder_address":"forwarderaddress","p2p_peer_id":"peerid","key_bundle_id":"ocrkeyid","plugins":{"commit":true,"execute":true,"median":false,"mercury":true}}` + want = OCR2ConfigModel{ + Enabled: true, + IsBootstrap: false, + Multiaddr: null.StringFrom("multiaddr"), + ForwarderAddress: null.StringFrom("forwarderaddress"), + P2PPeerID: null.StringFrom("peerid"), + KeyBundleID: null.StringFrom("ocrkeyid"), + Plugins: Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + } + ) + + var actual OCR2ConfigModel + err := actual.Scan([]byte(give)) + require.NoError(t, err) + + assert.Equal(t, want, actual) +} + +func Test_JobProposal_CanEditDefinition(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + status SpecStatus + want bool + }{ + { + name: "pending", + status: SpecStatusPending, + want: true, + }, + { + name: "cancelled", + status: SpecStatusCancelled, + want: true, + }, + { + name: "approved", + status: SpecStatusApproved, + want: false, + }, + { + name: "rejected", + status: SpecStatusRejected, + want: false, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + jp := &JobProposalSpec{Status: tc.status} + assert.Equal(t, tc.want, jp.CanEditDefinition()) + }) + } +} + +// Test_toMetrics tests the toMetrics method +func Test_toMetrics(t *testing.T) { + t.Parallel() + + jpCounts := JobProposalCounts{ + Cancelled: 0, + Pending: 1, + Approved: 2, + Rejected: 3, + Deleted: 4, + Revoked: 5, + } + + metrics := jpCounts.toMetrics() + + assert.Equal(t, metrics, map[JobProposalStatus]float64{ + JobProposalStatusCancelled: 0, + JobProposalStatusPending: 1, + JobProposalStatusApproved: 2, + JobProposalStatusRejected: 3, + JobProposalStatusDeleted: 4, + JobProposalStatusRevoked: 5, + }) +} diff --git a/core/services/feeds/orm.go b/core/services/feeds/orm.go new file mode 100644 index 00000000..988b1ba0 --- /dev/null +++ b/core/services/feeds/orm.go @@ -0,0 +1,818 @@ +package feeds + +import ( + "database/sql" + "fmt" + "strings" + + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --with-expecter=true --quiet --name ORM --output ./mocks/ --case=underscore + +type ORM interface { + CountManagers() (int64, error) + CreateManager(ms *FeedsManager, qopts ...pg.QOpt) (int64, error) + GetManager(id int64) (*FeedsManager, error) + ListManagers() (mgrs []FeedsManager, err error) + ListManagersByIDs(ids []int64) ([]FeedsManager, error) + UpdateManager(mgr FeedsManager, qopts ...pg.QOpt) error + + CreateBatchChainConfig(cfgs []ChainConfig, qopts ...pg.QOpt) ([]int64, error) + CreateChainConfig(cfg ChainConfig, qopts ...pg.QOpt) (int64, error) + DeleteChainConfig(id int64) (int64, error) + GetChainConfig(id int64) (*ChainConfig, error) + ListChainConfigsByManagerIDs(mgrIDs []int64) ([]ChainConfig, error) + UpdateChainConfig(cfg ChainConfig) (int64, error) + + CountJobProposals() (int64, error) + CountJobProposalsByStatus() (counts *JobProposalCounts, err error) + CreateJobProposal(jp *JobProposal) (int64, error) + DeleteProposal(id int64, qopts ...pg.QOpt) error + GetJobProposal(id int64, qopts ...pg.QOpt) (*JobProposal, error) + GetJobProposalByRemoteUUID(uuid uuid.UUID) (*JobProposal, error) + ListJobProposals() (jps []JobProposal, err error) + ListJobProposalsByManagersIDs(ids []int64, qopts ...pg.QOpt) ([]JobProposal, error) + UpdateJobProposalStatus(id int64, status JobProposalStatus, qopts ...pg.QOpt) error // NEEDED? + UpsertJobProposal(jp *JobProposal, qopts ...pg.QOpt) (int64, error) + + ApproveSpec(id int64, externalJobID uuid.UUID, qopts ...pg.QOpt) error + CancelSpec(id int64, qopts ...pg.QOpt) error + CreateSpec(spec JobProposalSpec, qopts ...pg.QOpt) (int64, error) + ExistsSpecByJobProposalIDAndVersion(jpID int64, version int32, qopts ...pg.QOpt) (exists bool, err error) + GetApprovedSpec(jpID int64, qopts ...pg.QOpt) (*JobProposalSpec, error) + GetLatestSpec(jpID int64) (*JobProposalSpec, error) + GetSpec(id int64, qopts ...pg.QOpt) (*JobProposalSpec, error) + ListSpecsByJobProposalIDs(ids []int64, qopts ...pg.QOpt) ([]JobProposalSpec, error) + RejectSpec(id int64, qopts ...pg.QOpt) error + RevokeSpec(id int64, qopts ...pg.QOpt) error + UpdateSpecDefinition(id int64, spec string, qopts ...pg.QOpt) error + + IsJobManaged(jobID int64, qopts ...pg.QOpt) (bool, error) +} + +var _ ORM = &orm{} + +type orm struct { + q pg.Q +} + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *orm { + return &orm{ + q: pg.NewQ(db, lggr, cfg), + } +} + +// Count counts the number of feeds manager records. +func (o *orm) CountManagers() (count int64, err error) { + stmt := ` +SELECT COUNT(*) +FROM feeds_managers + ` + + err = o.q.Get(&count, stmt) + return count, errors.Wrap(err, "CountManagers failed") +} + +// CreateManager creates a feeds manager. +func (o *orm) CreateManager(ms *FeedsManager, qopts ...pg.QOpt) (id int64, err error) { + stmt := ` +INSERT INTO feeds_managers (name, uri, public_key, created_at, updated_at) +VALUES ($1,$2,$3,NOW(),NOW()) +RETURNING id; +` + err = o.q.WithOpts(qopts...).Get(&id, stmt, ms.Name, ms.URI, ms.PublicKey) + + return id, errors.Wrap(err, "CreateManager failed") +} + +// CreateChainConfig creates a new chain config. +func (o *orm) CreateChainConfig(cfg ChainConfig, qopts ...pg.QOpt) (id int64, err error) { + stmt := ` +INSERT INTO feeds_manager_chain_configs (feeds_manager_id, chain_id, chain_type, account_address, admin_address, flux_monitor_config, ocr1_config, ocr2_config, created_at, updated_at) +VALUES ($1,$2,$3,$4,$5,$6,$7,$8,NOW(),NOW()) +RETURNING id; +` + + err = o.q.WithOpts(qopts...).Get(&id, + stmt, + cfg.FeedsManagerID, + cfg.ChainID, + cfg.ChainType, + cfg.AccountAddress, + cfg.AdminAddress, + cfg.FluxMonitorConfig, + cfg.OCR1Config, + cfg.OCR2Config, + ) + + return id, errors.Wrap(err, "CreateChainConfig failed") +} + +// CreateBatchChainConfig creates multiple chain configs. +func (o *orm) CreateBatchChainConfig(cfgs []ChainConfig, qopts ...pg.QOpt) (ids []int64, err error) { + if len(cfgs) == 0 { + return + } + + stmt := ` +INSERT INTO feeds_manager_chain_configs (feeds_manager_id, chain_id, chain_type, account_address, admin_address, flux_monitor_config, ocr1_config, ocr2_config, created_at, updated_at) +VALUES %s +RETURNING id; + ` + + var ( + vStrs = make([]string, 0, len(cfgs)) + vArgs = make([]interface{}, 0) + ) + + for i, cfg := range cfgs { + // Generate the placeholders + pnumidx := i * 8 + + lo, hi := pnumidx+1, pnumidx+8 + pnums := make([]any, hi-lo+1) + for i := range pnums { + pnums[i] = i + lo + } + + vStrs = append(vStrs, fmt.Sprintf( + "($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, NOW(), NOW())", pnums..., + )) + + // Append the values + vArgs = append(vArgs, + cfg.FeedsManagerID, + cfg.ChainID, + cfg.ChainType, + cfg.AccountAddress, + cfg.AdminAddress, + cfg.FluxMonitorConfig, + cfg.OCR1Config, + cfg.OCR2Config, + ) + } + + err = o.q.WithOpts(qopts...).Select(&ids, + fmt.Sprintf(stmt, strings.Join(vStrs, ",")), + vArgs..., + ) + + return ids, errors.Wrap(err, "CreateBatchChainConfig failed") +} + +// DeleteChainConfig deletes a chain config. +func (o *orm) DeleteChainConfig(id int64) (int64, error) { + stmt := ` +DELETE FROM feeds_manager_chain_configs +WHERE id = $1 +RETURNING id; +` + + var ccid int64 + err := o.q.Get(&ccid, stmt, id) + + return ccid, errors.Wrap(err, "DeleteChainConfig failed") +} + +// GetChainConfig fetches a chain config. +func (o *orm) GetChainConfig(id int64) (*ChainConfig, error) { + stmt := ` +SELECT id, feeds_manager_id, chain_id, chain_type, account_address, admin_address, flux_monitor_config, ocr1_config, ocr2_config, created_at, updated_at +FROM feeds_manager_chain_configs +WHERE id = $1; +` + + var cfg ChainConfig + err := o.q.Get(&cfg, stmt, id) + + return &cfg, errors.Wrap(err, "GetChainConfig failed") +} + +// ListChainConfigsByManagerIDs fetches the chain configs matching all manager +// ids. +func (o *orm) ListChainConfigsByManagerIDs(mgrIDs []int64) ([]ChainConfig, error) { + stmt := ` +SELECT id, feeds_manager_id, chain_id, chain_type, account_address, admin_address, flux_monitor_config, ocr1_config, ocr2_config, created_at, updated_at +FROM feeds_manager_chain_configs +WHERE feeds_manager_id = ANY($1) + ` + + var cfgs []ChainConfig + err := o.q.Select(&cfgs, stmt, mgrIDs) + + return cfgs, errors.Wrap(err, "ListJobProposalsByManagersIDs failed") +} + +// UpdateChainConfig updates a chain config. +func (o *orm) UpdateChainConfig(cfg ChainConfig) (int64, error) { + stmt := ` +UPDATE feeds_manager_chain_configs +SET account_address = $1, + admin_address = $2, + flux_monitor_config = $3, + ocr1_config = $4, + ocr2_config = $5, + updated_at = NOW() +WHERE id = $6 +RETURNING id; +` + + var cfgID int64 + err := o.q.Get(&cfgID, stmt, + cfg.AccountAddress, + cfg.AdminAddress, + cfg.FluxMonitorConfig, + cfg.OCR1Config, + cfg.OCR2Config, + cfg.ID, + ) + + return cfgID, errors.Wrap(err, "UpdateChainConfig failed") +} + +// GetManager gets a feeds manager by id. +func (o *orm) GetManager(id int64) (mgr *FeedsManager, err error) { + stmt := ` +SELECT id, name, uri, public_key, created_at, updated_at +FROM feeds_managers +WHERE id = $1 +` + + mgr = new(FeedsManager) + err = o.q.Get(mgr, stmt, id) + return mgr, errors.Wrap(err, "GetManager failed") +} + +// ListManager lists all feeds managers. +func (o *orm) ListManagers() (mgrs []FeedsManager, err error) { + stmt := ` +SELECT id, name, uri, public_key, created_at, updated_at +FROM feeds_managers; +` + + err = o.q.Select(&mgrs, stmt) + return mgrs, errors.Wrap(err, "ListManagers failed") +} + +// ListManagersByIDs gets feeds managers by ids. +func (o *orm) ListManagersByIDs(ids []int64) (managers []FeedsManager, err error) { + stmt := ` +SELECT id, name, uri, public_key, created_at, updated_at +FROM feeds_managers +WHERE id = ANY($1) +ORDER BY created_at, id;` + + mgrIds := pq.Array(ids) + err = o.q.Select(&managers, stmt, mgrIds) + + return managers, errors.Wrap(err, "GetManagers failed") +} + +// UpdateManager updates the manager details. +func (o *orm) UpdateManager(mgr FeedsManager, qopts ...pg.QOpt) (err error) { + stmt := ` +UPDATE feeds_managers +SET name = $1, uri = $2, public_key = $3, updated_at = NOW() +WHERE id = $4; +` + + res, err := o.q.WithOpts(qopts...).Exec(stmt, mgr.Name, mgr.URI, mgr.PublicKey, mgr.ID) + if err != nil { + return errors.Wrap(err, "UpdateManager failed to update feeds_managers") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return errors.Wrap(err, "UpdateManager failed to get RowsAffected") + } + if rowsAffected == 0 { + return sql.ErrNoRows + } + return nil +} + +// CreateJobProposal creates a job proposal. +func (o *orm) CreateJobProposal(jp *JobProposal) (id int64, err error) { + stmt := ` +INSERT INTO job_proposals (name, remote_uuid, status, feeds_manager_id, multiaddrs, created_at, updated_at) +VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) +RETURNING id; +` + + err = o.q.Get(&id, stmt, jp.Name, jp.RemoteUUID, jp.Status, jp.FeedsManagerID, jp.Multiaddrs) + return id, errors.Wrap(err, "CreateJobProposal failed") +} + +// CountJobProposals counts the number of job proposal records. +func (o *orm) CountJobProposals() (count int64, err error) { + stmt := `SELECT COUNT(*) FROM job_proposals` + + err = o.q.Get(&count, stmt) + return count, errors.Wrap(err, "CountJobProposals failed") +} + +// CountJobProposals counts the number of job proposal records. +func (o *orm) CountJobProposalsByStatus() (counts *JobProposalCounts, err error) { + stmt := ` +SELECT + COUNT(*) filter (where job_proposals.status = 'pending' OR job_proposals.pending_update = TRUE) as pending, + COUNT(*) filter (where job_proposals.status = 'approved' AND job_proposals.pending_update = FALSE) as approved, + COUNT(*) filter (where job_proposals.status = 'rejected' AND job_proposals.pending_update = FALSE) as rejected, + COUNT(*) filter (where job_proposals.status = 'revoked' AND job_proposals.pending_update = FALSE) as revoked, + COUNT(*) filter (where job_proposals.status = 'deleted' AND job_proposals.pending_update = FALSE) as deleted, + COUNT(*) filter (where job_proposals.status = 'cancelled' AND job_proposals.pending_update = FALSE) as cancelled +FROM job_proposals; + ` + + counts = new(JobProposalCounts) + err = o.q.Get(counts, stmt) + return counts, errors.Wrap(err, "CountJobProposalsByStatus failed") +} + +// GetJobProposal gets a job proposal by id. +func (o *orm) GetJobProposal(id int64, qopts ...pg.QOpt) (jp *JobProposal, err error) { + stmt := ` +SELECT * +FROM job_proposals +WHERE id = $1 +` + jp = new(JobProposal) + err = o.q.WithOpts(qopts...).Get(jp, stmt, id) + return jp, errors.Wrap(err, "GetJobProposal failed") +} + +// GetJobProposalByRemoteUUID gets a job proposal by the remote FMS uuid. This +// method will filter out the deleted job proposals. To get all job proposals, +// use the GetJobProposal get by id method. +func (o *orm) GetJobProposalByRemoteUUID(id uuid.UUID) (jp *JobProposal, err error) { + stmt := ` +SELECT * +FROM job_proposals +WHERE remote_uuid = $1 +AND status <> $2; +` + + jp = new(JobProposal) + err = o.q.Get(jp, stmt, id, JobProposalStatusDeleted) + return jp, errors.Wrap(err, "GetJobProposalByRemoteUUID failed") +} + +// ListJobProposals lists all job proposals. +func (o *orm) ListJobProposals() (jps []JobProposal, err error) { + stmt := ` +SELECT * +FROM job_proposals; +` + + err = o.q.Select(&jps, stmt) + return jps, errors.Wrap(err, "ListJobProposals failed") +} + +// ListJobProposalsByManagersIDs gets job proposals by feeds managers IDs. +func (o *orm) ListJobProposalsByManagersIDs(ids []int64, qopts ...pg.QOpt) ([]JobProposal, error) { + stmt := ` +SELECT * +FROM job_proposals +WHERE feeds_manager_id = ANY($1) +` + var jps []JobProposal + err := o.q.WithOpts(qopts...).Select(&jps, stmt, ids) + return jps, errors.Wrap(err, "ListJobProposalsByManagersIDs failed") +} + +// UpdateJobProposalStatus updates the status of a job proposal by id. +func (o *orm) UpdateJobProposalStatus(id int64, status JobProposalStatus, qopts ...pg.QOpt) error { + stmt := ` +UPDATE job_proposals +SET status = $1, + updated_at = NOW() +WHERE id = $2; +` + + result, err := o.q.WithOpts(qopts...).Exec(stmt, status, id) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// UpsertJobProposal creates a job proposal if it does not exist. If it does exist, +// then we update the details of the existing job proposal only if the provided +// feeds manager id exists. +func (o *orm) UpsertJobProposal(jp *JobProposal, qopts ...pg.QOpt) (id int64, err error) { + stmt := ` +INSERT INTO job_proposals (name, remote_uuid, status, feeds_manager_id, multiaddrs, created_at, updated_at) +VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) +ON CONFLICT (remote_uuid) +DO + UPDATE SET + pending_update = TRUE, + name = EXCLUDED.name, + status = ( + CASE + WHEN job_proposals.status = 'deleted' THEN 'deleted'::job_proposal_status + WHEN job_proposals.status = 'approved' THEN 'approved'::job_proposal_status + ELSE EXCLUDED.status + END + ), + multiaddrs = EXCLUDED.multiaddrs, + updated_at = EXCLUDED.updated_at +RETURNING id; +` + + err = o.q.WithOpts(qopts...).Get(&id, stmt, jp.Name, jp.RemoteUUID, jp.Status, jp.FeedsManagerID, jp.Multiaddrs) + return id, errors.Wrap(err, "UpsertJobProposal") +} + +// ApproveSpec approves the spec and sets the external job ID on the associated +// job proposal. +func (o *orm) ApproveSpec(id int64, externalJobID uuid.UUID, qopts ...pg.QOpt) error { + // Update the status of the approval + stmt := ` +UPDATE job_proposal_specs +SET status = $1, + status_updated_at = NOW(), + updated_at = NOW() +WHERE id = $2 +RETURNING job_proposal_id; +` + + var jpID int64 + if err := o.q.WithOpts(qopts...).Get(&jpID, stmt, JobProposalStatusApproved, id); err != nil { + return err + } + + // Update the job proposal external id + stmt = ` +UPDATE job_proposals +SET status = $1, + external_job_id = $2, + pending_update = FALSE, + updated_at = NOW() +WHERE id = $3; +` + + result, err := o.q.WithOpts(qopts...).Exec(stmt, JobProposalStatusApproved, externalJobID, jpID) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// CancelSpec cancels the spec and removes the external job id from the associated job proposal. It +// sets the status of the spec and the proposal to cancelled, except in the case of deleted +// proposals. +func (o *orm) CancelSpec(id int64, qopts ...pg.QOpt) error { + // Update the status of the approval + stmt := ` +UPDATE job_proposal_specs +SET status = $1, + status_updated_at = NOW(), + updated_at = NOW() +WHERE id = $2 +RETURNING job_proposal_id; +` + + var jpID int64 + if err := o.q.WithOpts(qopts...).Get(&jpID, stmt, SpecStatusCancelled, id); err != nil { + return err + } + + stmt = ` +UPDATE job_proposals +SET status = ( + CASE + WHEN status = 'deleted' THEN 'deleted'::job_proposal_status + ELSE 'cancelled'::job_proposal_status + END + ), + pending_update = FALSE, + external_job_id = $2, + updated_at = NOW() +WHERE id = $1; +` + result, err := o.q.WithOpts(qopts...).Exec(stmt, jpID, nil) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// CreateSpec creates a new job proposal spec +func (o *orm) CreateSpec(spec JobProposalSpec, qopts ...pg.QOpt) (int64, error) { + stmt := ` +INSERT INTO job_proposal_specs (definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at) +VALUES ($1, $2, $3, $4, NOW(), NOW(), NOW()) +RETURNING id; +` + + var id int64 + err := o.q.WithOpts(qopts...).Get(&id, stmt, spec.Definition, spec.Version, spec.Status, spec.JobProposalID) + + return id, errors.Wrap(err, "CreateJobProposalSpec failed") +} + +// ExistsSpecByJobProposalIDAndVersion checks if a job proposal spec exists for a specific job +// proposal and version. +func (o *orm) ExistsSpecByJobProposalIDAndVersion(jpID int64, version int32, qopts ...pg.QOpt) (exists bool, err error) { + stmt := ` +SELECT exists ( + SELECT 1 + FROM job_proposal_specs + WHERE job_proposal_id = $1 AND version = $2 +); +` + + err = o.q.WithOpts(qopts...).Get(&exists, stmt, jpID, version) + return exists, errors.Wrap(err, "JobProposalSpecVersionExists failed") +} + +// DeleteProposal performs a soft delete of the job proposal by setting the status to deleted +func (o *orm) DeleteProposal(id int64, qopts ...pg.QOpt) error { + // Get the latest spec for the proposal. + stmt := ` + SELECT id, definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at +FROM job_proposal_specs +WHERE (job_proposal_id, version) IN +( + SELECT job_proposal_id, MAX(version) + FROM job_proposal_specs + GROUP BY job_proposal_id +) +AND job_proposal_id = $1 +` + + var spec JobProposalSpec + err := o.q.WithOpts(qopts...).Get(&spec, stmt, id) + if err != nil { + return err + } + + // Set pending update to true only if the latest proposal is approved so that any running jobs + // are reminded to be cancelled. + pendingUpdate := spec.Status == SpecStatusApproved + stmt = ` +UPDATE job_proposals +SET status = $1, + pending_update = $3, + updated_at = NOW() +WHERE id = $2; +` + + result, err := o.q.WithOpts(qopts...).Exec(stmt, JobProposalStatusDeleted, id, pendingUpdate) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// GetSpec fetches the job proposal spec by id +func (o *orm) GetSpec(id int64, qopts ...pg.QOpt) (*JobProposalSpec, error) { + stmt := ` +SELECT id, definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at +FROM job_proposal_specs +WHERE id = $1; +` + var spec JobProposalSpec + err := o.q.WithOpts(qopts...).Get(&spec, stmt, id) + + return &spec, errors.Wrap(err, "CreateJobProposalSpec failed") +} + +// GetApprovedSpec gets the approved spec for a job proposal +func (o *orm) GetApprovedSpec(jpID int64, qopts ...pg.QOpt) (*JobProposalSpec, error) { + stmt := ` +SELECT id, definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at +FROM job_proposal_specs +WHERE status = $1 +AND job_proposal_id = $2 +` + + var spec JobProposalSpec + err := o.q.WithOpts(qopts...).Get(&spec, stmt, SpecStatusApproved, jpID) + + return &spec, errors.Wrap(err, "GetApprovedSpec failed") +} + +// GetLatestSpec gets the latest spec for a job proposal. +func (o *orm) GetLatestSpec(jpID int64) (*JobProposalSpec, error) { + stmt := ` + SELECT id, definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at +FROM job_proposal_specs +WHERE (job_proposal_id, version) IN +( + SELECT job_proposal_id, MAX(version) + FROM job_proposal_specs + GROUP BY job_proposal_id +) +AND job_proposal_id = $1 +` + + var spec JobProposalSpec + err := o.q.Get(&spec, stmt, jpID) + + return &spec, errors.Wrap(err, "GetLatestSpec failed") +} + +// ListSpecsByJobProposalIDs lists the specs which belong to any of job proposal +// ids. +func (o *orm) ListSpecsByJobProposalIDs(ids []int64, qopts ...pg.QOpt) ([]JobProposalSpec, error) { + stmt := ` +SELECT id, definition, version, status, job_proposal_id, status_updated_at, created_at, updated_at +FROM job_proposal_specs +WHERE job_proposal_id = ANY($1) +` + var specs []JobProposalSpec + err := o.q.WithOpts(qopts...).Select(&specs, stmt, ids) + return specs, errors.Wrap(err, "GetJobProposalsByManagersIDs failed") +} + +// RejectSpec rejects the spec and updates the job proposal +func (o *orm) RejectSpec(id int64, qopts ...pg.QOpt) error { + stmt := ` +UPDATE job_proposal_specs +SET status = $1, + status_updated_at = NOW(), + updated_at = NOW() +WHERE id = $2 +RETURNING job_proposal_id; +` + + var jpID int64 + if err := o.q.WithOpts(qopts...).Get(&jpID, stmt, SpecStatusRejected, id); err != nil { + return err + } + + stmt = ` +UPDATE job_proposals +SET status = ( + CASE + WHEN status = 'approved' THEN 'approved'::job_proposal_status + WHEN status = 'deleted' THEN 'deleted'::job_proposal_status + ELSE 'rejected'::job_proposal_status + END + ), + pending_update = FALSE, + updated_at = NOW() +WHERE id = $1 +` + + result, err := o.q.WithOpts(qopts...).Exec(stmt, jpID) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// RevokeSpec revokes a job proposal with a pending job spec. An approved +// proposal cannot be revoked. A revoked proposal's job spec cannot be approved +// or edited, but the job can be reproposed by FMS. +func (o *orm) RevokeSpec(id int64, qopts ...pg.QOpt) error { + // Update the status of the spec + stmt := ` +UPDATE job_proposal_specs +SET status = ( + CASE + WHEN status = 'approved' THEN 'approved'::job_proposal_spec_status + ELSE $2 + END + ), + status_updated_at = NOW(), + updated_at = NOW() +WHERE id = $1 +RETURNING job_proposal_id; +` + + var jpID int64 + if err := o.q.WithOpts(qopts...).Get(&jpID, stmt, id, SpecStatusRevoked); err != nil { + return err + } + + stmt = ` +UPDATE job_proposals +SET status = ( + CASE + WHEN status = 'deleted' THEN 'deleted'::job_proposal_status + WHEN status = 'approved' THEN 'approved'::job_proposal_status + ELSE $3 + END + ), + pending_update = FALSE, + external_job_id = ( + CASE + WHEN status <> 'approved' THEN $2 + ELSE job_proposals.external_job_id + END + ), + updated_at = NOW() +WHERE id = $1 + ` + + result, err := o.q.WithOpts(qopts...).Exec(stmt, jpID, nil, JobProposalStatusRevoked) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// UpdateSpecDefinition updates the definition of a job proposal spec by id. +func (o *orm) UpdateSpecDefinition(id int64, spec string, qopts ...pg.QOpt) error { + stmt := ` +UPDATE job_proposal_specs +SET definition = $1, + updated_at = NOW() +WHERE id = $2; +` + + res, err := o.q.WithOpts(qopts...).Exec(stmt, spec, id) + if err != nil { + return errors.Wrap(err, "UpdateSpecDefinition failed to update definition") + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + return errors.Wrap(err, "UpdateSpecDefinition failed to get RowsAffected") + } + + if rowsAffected == 0 { + return sql.ErrNoRows + } + + return nil +} + +// IsJobManaged determines if a job is managed by the feeds manager. +func (o *orm) IsJobManaged(jobID int64, qopts ...pg.QOpt) (exists bool, err error) { + stmt := ` +SELECT exists ( + SELECT 1 + FROM job_proposals + INNER JOIN jobs ON job_proposals.external_job_id = jobs.external_job_id + WHERE jobs.id = $1 +); +` + + err = o.q.WithOpts(qopts...).Get(&exists, stmt, jobID) + return exists, errors.Wrap(err, "IsJobManaged failed") +} diff --git a/core/services/feeds/orm_test.go b/core/services/feeds/orm_test.go new file mode 100644 index 00000000..1d5bcac6 --- /dev/null +++ b/core/services/feeds/orm_test.go @@ -0,0 +1,1712 @@ +package feeds_test + +import ( + "database/sql" + "testing" + + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +var ( + uri = "http://192.168.0.1" + name = "Plugin FMS" + publicKey = crypto.PublicKey([]byte("11111111111111111111111111111111")) +) + +type TestORM struct { + feeds.ORM + + db *sqlx.DB +} + +func setupORM(t *testing.T) *TestORM { + t.Helper() + + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.TestLogger(t) + orm = feeds.NewORM(db, lggr, pgtest.NewQConfig(true)) + ) + + return &TestORM{ORM: orm, db: db} +} + +// Managers + +func Test_ORM_CreateManager(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + mgr = &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + ) + + count, err := orm.CountManagers() + require.NoError(t, err) + require.Equal(t, int64(0), count) + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + count, err = orm.CountManagers() + require.NoError(t, err) + require.Equal(t, int64(1), count) + + assert.NotZero(t, id) +} + +func Test_ORM_GetManager(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + mgr = &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + ) + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + actual, err := orm.GetManager(id) + require.NoError(t, err) + + assert.Equal(t, id, actual.ID) + assert.Equal(t, uri, actual.URI) + assert.Equal(t, name, actual.Name) + assert.Equal(t, publicKey, actual.PublicKey) + + _, err = orm.GetManager(-1) + require.Error(t, err) +} + +func Test_ORM_ListManagers(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + mgr = &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + ) + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + mgrs, err := orm.ListManagers() + require.NoError(t, err) + require.Len(t, mgrs, 1) + + actual := mgrs[0] + assert.Equal(t, id, actual.ID) + assert.Equal(t, uri, actual.URI) + assert.Equal(t, name, actual.Name) + assert.Equal(t, publicKey, actual.PublicKey) +} + +func Test_ORM_ListManagersByIDs(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + mgr = &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + ) + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + mgrs, err := orm.ListManagersByIDs([]int64{id}) + require.NoError(t, err) + require.Equal(t, 1, len(mgrs)) + + actual := mgrs[0] + assert.Equal(t, id, actual.ID) + assert.Equal(t, uri, actual.URI) + assert.Equal(t, name, actual.Name) + assert.Equal(t, publicKey, actual.PublicKey) +} + +func Test_ORM_UpdateManager(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + mgr = &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + ) + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + updatedMgr := feeds.FeedsManager{ + ID: id, + URI: "127.0.0.1", + Name: "New Name", + PublicKey: crypto.PublicKey([]byte("22222222222222222222222222222222")), + } + + err = orm.UpdateManager(updatedMgr) + require.NoError(t, err) + + actual, err := orm.GetManager(id) + require.NoError(t, err) + + assert.Equal(t, updatedMgr.URI, actual.URI) + assert.Equal(t, updatedMgr.Name, actual.Name) + assert.Equal(t, updatedMgr.PublicKey, actual.PublicKey) +} + +// Chain Config + +func Test_ORM_CreateChainConfig(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + cfg1 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "1", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0001", + AdminAddress: "0x1001", + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: true, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + IsBootstrap: false, + P2PPeerID: null.StringFrom("p2pkey"), + KeyBundleID: null.StringFrom("ocrkey"), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom("dns/4"), + }, + } + ) + + id, err := orm.CreateChainConfig(cfg1) + require.NoError(t, err) + + actual, err := orm.GetChainConfig(id) + require.NoError(t, err) + + assertChainConfigEqual(t, map[string]interface{}{ + "feedsManagerID": cfg1.FeedsManagerID, + "chainID": cfg1.ChainID, + "chainType": cfg1.ChainType, + "accountAddress": cfg1.AccountAddress, + "adminAddress": cfg1.AdminAddress, + "fluxMonitorConfig": cfg1.FluxMonitorConfig, + "ocrConfig": cfg1.OCR1Config, + "ocr2Config": cfg1.OCR2Config, + }, *actual) +} + +func Test_ORM_CreateBatchChainConfig(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + cfg1 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "1", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0001", + AdminAddress: "0x1001", + } + cfg2 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "42", + ChainType: "EVM", + AccountAddress: "0x0002", + AdminAddress: "0x2002", + } + ) + + ids, err := orm.CreateBatchChainConfig([]feeds.ChainConfig{cfg1, cfg2}) + require.NoError(t, err) + + assert.Len(t, ids, 2) + + actual, err := orm.GetChainConfig(ids[0]) + require.NoError(t, err) + + assertChainConfigEqual(t, map[string]interface{}{ + "feedsManagerID": cfg1.FeedsManagerID, + "chainID": cfg1.ChainID, + "chainType": cfg1.ChainType, + "accountAddress": cfg1.AccountAddress, + "adminAddress": cfg1.AdminAddress, + "fluxMonitorConfig": cfg1.FluxMonitorConfig, + "ocrConfig": cfg1.OCR1Config, + "ocr2Config": cfg1.OCR2Config, + }, *actual) + + actual, err = orm.GetChainConfig(ids[1]) + require.NoError(t, err) + + assertChainConfigEqual(t, map[string]interface{}{ + "feedsManagerID": cfg2.FeedsManagerID, + "chainID": cfg2.ChainID, + "chainType": cfg2.ChainType, + "accountAddress": cfg2.AccountAddress, + "adminAddress": cfg2.AdminAddress, + "fluxMonitorConfig": cfg1.FluxMonitorConfig, + "ocrConfig": cfg1.OCR1Config, + "ocr2Config": cfg1.OCR2Config, + }, *actual) + + // Test empty configs + ids, err = orm.CreateBatchChainConfig([]feeds.ChainConfig{}) + require.NoError(t, err) + require.Empty(t, ids) +} + +func Test_ORM_DeleteChainConfig(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + cfg1 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "1", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0001", + AdminAddress: "0x1001", + } + ) + + id, err := orm.CreateChainConfig(cfg1) + require.NoError(t, err) + + _, err = orm.GetChainConfig(id) + require.NoError(t, err) + + actual, err := orm.DeleteChainConfig(id) + require.NoError(t, err) + require.Equal(t, id, actual) + + _, err = orm.GetChainConfig(id) + require.Error(t, err) +} + +func Test_ORM_ListChainConfigsByManagerIDs(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + cfg1 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "1", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0001", + AdminAddress: "0x1001", + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: true, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + IsBootstrap: false, + P2PPeerID: null.StringFrom("p2pkey"), + KeyBundleID: null.StringFrom("ocrkey"), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom("dns/4"), + }, + } + ) + + _, err := orm.CreateChainConfig(cfg1) + require.NoError(t, err) + + actual, err := orm.ListChainConfigsByManagerIDs([]int64{fmID}) + require.NoError(t, err) + require.Len(t, actual, 1) + + assertChainConfigEqual(t, map[string]interface{}{ + "feedsManagerID": cfg1.FeedsManagerID, + "chainID": cfg1.ChainID, + "chainType": cfg1.ChainType, + "accountAddress": cfg1.AccountAddress, + "adminAddress": cfg1.AdminAddress, + "fluxMonitorConfig": cfg1.FluxMonitorConfig, + "ocrConfig": cfg1.OCR1Config, + "ocr2Config": cfg1.OCR2Config, + }, actual[0]) +} + +func Test_ORM_UpdateChainConfig(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + cfg1 = feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: "1", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0001", + AdminAddress: "0x1001", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: false}, + OCR1Config: feeds.OCR1Config{Enabled: false}, + OCR2Config: feeds.OCR2ConfigModel{Enabled: false}, + } + updateCfg = feeds.ChainConfig{ + AccountAddress: "0x0002", + AdminAddress: "0x1002", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: true}, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + IsBootstrap: false, + P2PPeerID: null.StringFrom("p2pkey"), + KeyBundleID: null.StringFrom("ocrkey"), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom("dns/4"), + }, + } + ) + + id, err := orm.CreateChainConfig(cfg1) + require.NoError(t, err) + + updateCfg.ID = id + + id, err = orm.UpdateChainConfig(updateCfg) + require.NoError(t, err) + + actual, err := orm.GetChainConfig(id) + require.NoError(t, err) + + assertChainConfigEqual(t, map[string]interface{}{ + "feedsManagerID": cfg1.FeedsManagerID, + "chainID": cfg1.ChainID, + "chainType": cfg1.ChainType, + "accountAddress": updateCfg.AccountAddress, + "adminAddress": updateCfg.AdminAddress, + "fluxMonitorConfig": updateCfg.FluxMonitorConfig, + "ocrConfig": updateCfg.OCR1Config, + "ocr2Config": updateCfg.OCR2Config, + }, *actual) +} + +// Job Proposals + +func Test_ORM_CreateJobProposal(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + fmID := createFeedsManager(t, orm) + + jp := &feeds.JobProposal{ + Name: null.StringFrom("jp1"), + RemoteUUID: uuid.New(), + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + } + + count, err := orm.CountJobProposals() + require.NoError(t, err) + require.Equal(t, int64(0), count) + + id, err := orm.CreateJobProposal(jp) + require.NoError(t, err) + + actual, err := orm.GetJobProposal(id) + require.NoError(t, err) + require.Equal(t, jp.Name, actual.Name) + require.Equal(t, jp.RemoteUUID, actual.RemoteUUID) + require.Equal(t, jp.Status, actual.Status) + require.Equal(t, jp.FeedsManagerID, actual.FeedsManagerID) + require.False(t, actual.PendingUpdate) + require.NotEmpty(t, actual.CreatedAt) + require.Equal(t, actual.CreatedAt.String(), actual.UpdatedAt.String()) + + assert.NotZero(t, id) +} + +func Test_ORM_GetJobProposal(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + fmID := createFeedsManager(t, orm) + remoteUUID := uuid.New() + deletedUUID := uuid.New() + name := null.StringFrom("jp1") + + jp := &feeds.JobProposal{ + Name: name, + RemoteUUID: remoteUUID, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + } + + deletedJp := &feeds.JobProposal{ + Name: name, + RemoteUUID: deletedUUID, + Status: feeds.JobProposalStatusDeleted, + FeedsManagerID: fmID, + } + + id, err := orm.CreateJobProposal(jp) + require.NoError(t, err) + + _, err = orm.CreateJobProposal(deletedJp) + require.NoError(t, err) + + assertJobEquals := func(actual *feeds.JobProposal) { + assert.Equal(t, id, actual.ID) + assert.Equal(t, name, actual.Name) + assert.Equal(t, remoteUUID, actual.RemoteUUID) + assert.Equal(t, jp.Status, actual.Status) + assert.False(t, actual.ExternalJobID.Valid) + assert.False(t, actual.PendingUpdate) + assert.Equal(t, jp.FeedsManagerID, actual.FeedsManagerID) + } + + t.Run("by id", func(t *testing.T) { + actual, err := orm.GetJobProposal(id) + require.NoError(t, err) + + assert.Equal(t, id, actual.ID) + assertJobEquals(actual) + + _, err = orm.GetJobProposal(int64(0)) + require.Error(t, err) + }) + + t.Run("by remote uuid", func(t *testing.T) { + actual, err := orm.GetJobProposalByRemoteUUID(remoteUUID) + require.NoError(t, err) + + assertJobEquals(actual) + + _, err = orm.GetJobProposalByRemoteUUID(deletedUUID) + require.Error(t, err) + + _, err = orm.GetJobProposalByRemoteUUID(uuid.New()) + require.Error(t, err) + }) +} + +func Test_ORM_ListJobProposals(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + fmID := createFeedsManager(t, orm) + uuid := uuid.New() + name := null.StringFrom("jp1") + + jp := &feeds.JobProposal{ + Name: name, + RemoteUUID: uuid, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + } + + id, err := orm.CreateJobProposal(jp) + require.NoError(t, err) + + jps, err := orm.ListJobProposals() + require.NoError(t, err) + require.Len(t, jps, 1) + + actual := jps[0] + assert.Equal(t, id, actual.ID) + assert.Equal(t, name, actual.Name) + assert.Equal(t, uuid, actual.RemoteUUID) + assert.Equal(t, jp.Status, actual.Status) + assert.False(t, actual.ExternalJobID.Valid) + assert.False(t, actual.PendingUpdate) + assert.Equal(t, jp.FeedsManagerID, actual.FeedsManagerID) +} + +func Test_ORM_CountJobProposalsByStatus(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + before func(orm *TestORM) *feeds.JobProposalCounts + wantApproved, wantRejected, wantDeleted, wantRevoked, wantPending, wantCancelled int64 + }{ + { + name: "correctly counts when there are no job proposals", + before: func(orm *TestORM) *feeds.JobProposalCounts { + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + }, + { + name: "correctly counts a pending and cancelled job proposal by status", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + createJobProposal(t, orm, feeds.JobProposalStatusCancelled, fmID) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantPending: 1, + wantCancelled: 1, + }, + { + // Verify that the counts are correct even if the proposal status is not pending. A + // spec is considered pending if its status is pending OR pending_update is TRUE + name: "correctly counts the pending specs when pending_update is true but the status itself is not pending", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + + // Create a pending job proposal. + jUUID := uuid.New() + jpID, err := orm.CreateJobProposal(&feeds.JobProposal{ + RemoteUUID: jUUID, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + }) + require.NoError(t, err) + + // Upsert the proposal and change its status to rejected + _, err = orm.UpsertJobProposal(&feeds.JobProposal{ + RemoteUUID: jUUID, + Status: feeds.JobProposalStatusRejected, + FeedsManagerID: fmID, + }) + require.NoError(t, err) + + // Assert that the upserted job proposal is now pending update. + jp, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + assert.Equal(t, true, jp.PendingUpdate) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantPending: 1, + }, + { + name: "correctly counts when approving a job proposal", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + + // Create a pending job proposal. + jUUID := uuid.New() + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + + // Create a spec for the pending job proposal + specID := createJobSpec(t, orm, jpID) + + // Defer the FK requirement of an existing job for a job proposal to be approved + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + // Approve the pending job proposal. + err := orm.ApproveSpec(specID, jUUID) + require.NoError(t, err) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantApproved: 1, + }, + { + name: "correctly counts when revoking a job proposal", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + // Revoke the pending job proposal. + err := orm.RevokeSpec(specID) + require.NoError(t, err) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantRevoked: 1, + }, + { + name: "correctly counts when deleting a job proposal", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + createJobSpec(t, orm, jpID) + + // Delete the pending job proposal. + err := orm.DeleteProposal(jpID) + require.NoError(t, err) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantDeleted: 1, + }, + { + name: "correctly counts when deleting a job proposal with an approved spec", + before: func(orm *TestORM) *feeds.JobProposalCounts { + fmID := createFeedsManager(t, orm) + + // Create a pending job proposal. + jUUID := uuid.New() + jpID, err := orm.CreateJobProposal(&feeds.JobProposal{ + RemoteUUID: jUUID, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + PendingUpdate: true, + }) + require.NoError(t, err) + + // Create a spec for the pending job proposal + specID := createJobSpec(t, orm, jpID) + + // Defer the FK requirement of an existing job for a job proposal to be approved + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err = orm.ApproveSpec(specID, jUUID) + require.NoError(t, err) + + // Delete the pending job proposal. + err = orm.DeleteProposal(jpID) + require.NoError(t, err) + + counts, err := orm.CountJobProposalsByStatus() + require.NoError(t, err) + + return counts + }, + wantPending: 1, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + orm := setupORM(t) + + counts := tc.before(orm) + + assert.Equal(t, tc.wantPending, counts.Pending) + assert.Equal(t, tc.wantApproved, counts.Approved) + assert.Equal(t, tc.wantRejected, counts.Rejected) + assert.Equal(t, tc.wantCancelled, counts.Cancelled) + assert.Equal(t, tc.wantDeleted, counts.Deleted) + assert.Equal(t, tc.wantRevoked, counts.Revoked) + }) + } +} + +func Test_ORM_ListJobProposalByManagersIDs(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + fmID := createFeedsManager(t, orm) + uuid := uuid.New() + name := null.StringFrom("jp1") + + jp := &feeds.JobProposal{ + Name: name, + RemoteUUID: uuid, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + } + + id, err := orm.CreateJobProposal(jp) + require.NoError(t, err) + + jps, err := orm.ListJobProposalsByManagersIDs([]int64{fmID}) + require.NoError(t, err) + require.Len(t, jps, 1) + + actual := jps[0] + assert.Equal(t, id, actual.ID) + assert.Equal(t, name, actual.Name) + assert.Equal(t, uuid, actual.RemoteUUID) + assert.Equal(t, jp.Status, actual.Status) + assert.False(t, actual.ExternalJobID.Valid) + assert.False(t, actual.PendingUpdate) + assert.Equal(t, jp.FeedsManagerID, actual.FeedsManagerID) +} + +func Test_ORM_UpdateJobProposalStatus(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + + actualCreated, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + err = orm.UpdateJobProposalStatus(jpID, feeds.JobProposalStatusRejected) + require.NoError(t, err) + + actual, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, jpID, actual.ID) + assert.Equal(t, feeds.JobProposalStatusRejected, actual.Status) + assert.Equal(t, actualCreated.CreatedAt, actual.CreatedAt) +} + +func Test_ORM_UpsertJobProposal(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + name = null.StringFrom("jp1") + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + ) + + jp := &feeds.JobProposal{ + Name: name, + RemoteUUID: uuid.New(), + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + } + + // The constraint chk_job_proposals_status_fsm ensures that approved job proposals must have an + // externalJobID, deleted job proposals are ignored from the check, and all other statuses + // should have a null externalJobID. We should test the transition between the statuses, moving + // from pending to approved, and then approved to pending, and pending to deleted and so forth. + + // Create + count, err := orm.CountJobProposals() + require.NoError(t, err) + require.Equal(t, int64(0), count) + + jpID, err := orm.UpsertJobProposal(jp) + require.NoError(t, err) + + createdActual, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.False(t, createdActual.PendingUpdate) + + count, err = orm.CountJobProposals() + require.NoError(t, err) + require.Equal(t, int64(1), count) + + assert.NotZero(t, jpID) + + // Update + jp.Multiaddrs = pq.StringArray{"dns/example.com"} + jp.Name = null.StringFrom("jp1_updated") + + jpID, err = orm.UpsertJobProposal(jp) + require.NoError(t, err) + + actual, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + assert.Equal(t, jp.Name, actual.Name) + assert.Equal(t, jp.Status, actual.Status) + assert.Equal(t, jp.Multiaddrs, actual.Multiaddrs) + + // Ensure there is a difference in the created proposal and the upserted + // proposal + assert.NotEqual(t, createdActual.Multiaddrs, actual.Multiaddrs) + assert.Equal(t, createdActual.CreatedAt, actual.CreatedAt) // CreatedAt does not change + assert.True(t, actual.PendingUpdate) + + // Approve + specID := createJobSpec(t, orm, jpID) + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err = orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + actual, err = orm.GetJobProposal(jpID) + require.NoError(t, err) + + // Assert that the job proposal is now approved. + assert.Equal(t, feeds.JobProposalStatusApproved, actual.Status) + assert.Equal(t, externalJobID, actual.ExternalJobID) + + // Update the proposal again + jp.Multiaddrs = pq.StringArray{"dns/example1.com"} + jp.Name = null.StringFrom("jp1_updated_again") + jp.Status = feeds.JobProposalStatusPending + + _, err = orm.UpsertJobProposal(jp) + require.NoError(t, err) + + actual, err = orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, feeds.JobProposalStatusApproved, actual.Status) + assert.Equal(t, externalJobID, actual.ExternalJobID) + assert.True(t, actual.PendingUpdate) + + // Delete the proposal + err = orm.DeleteProposal(jpID) + require.NoError(t, err) + + actual, err = orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, feeds.JobProposalStatusDeleted, actual.Status) + + // Update deleted proposal + jp.Status = feeds.JobProposalStatusRejected + + jpID, err = orm.UpsertJobProposal(jp) + require.NoError(t, err) + + // Ensure the deleted proposal does not get updated + actual, err = orm.GetJobProposal(jpID) + require.NoError(t, err) + assert.NotEqual(t, jp.Status, actual.Status) + assert.Equal(t, feeds.JobProposalStatusDeleted, actual.Status) +} + +// Job Proposal Specs + +func Test_ORM_ApproveSpec(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + ) + + // Manually create the job proposal to set pending update + jpID, err := orm.CreateJobProposal(&feeds.JobProposal{ + RemoteUUID: uuid.New(), + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + PendingUpdate: true, + }) + require.NoError(t, err) + specID := createJobSpec(t, orm, jpID) + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err = orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + actual, err := orm.GetSpec(specID) + require.NoError(t, err) + + assert.Equal(t, specID, actual.ID) + assert.Equal(t, feeds.SpecStatusApproved, actual.Status) + + actualJP, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, externalJobID, actualJP.ExternalJobID) + assert.Equal(t, feeds.JobProposalStatusApproved, actualJP.Status) + assert.False(t, actualJP.PendingUpdate) +} + +func Test_ORM_CancelSpec(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + before func(orm *TestORM) (int64, int64) + wantSpecStatus feeds.SpecStatus + wantProposalStatus feeds.JobProposalStatus + wantErr string + }{ + { + name: "pending proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusCancelled, + wantProposalStatus: feeds.JobProposalStatusCancelled, + }, + { + name: "deleted proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusDeleted, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusCancelled, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "not found", + before: func(orm *TestORM) (int64, int64) { + return 0, 0 + }, + wantErr: "sql: no rows in result set", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + orm := setupORM(t) + + jpID, specID := tc.before(orm) + + err := orm.CancelSpec(specID) + + if tc.wantErr != "" { + require.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + + actual, err := orm.GetSpec(specID) + require.NoError(t, err) + + assert.Equal(t, specID, actual.ID) + assert.Equal(t, tc.wantSpecStatus, actual.Status) + + actualJP, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, tc.wantProposalStatus, actualJP.Status) + assert.False(t, actualJP.PendingUpdate) + } + }) + } +} + +func Test_ORM_DeleteProposal(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + before func(orm *TestORM) int64 + wantProposalStatus feeds.JobProposalStatus + wantProposalPendingUpdate bool + wantErr string + }{ + { + name: "pending proposal", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + createJobSpec(t, orm, jpID) + + return jpID + }, + wantProposalPendingUpdate: false, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "approved proposal with approved spec", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + externalJobID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err := orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + return jpID + }, + wantProposalPendingUpdate: true, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "approved proposal with pending spec", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + externalJobID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err := orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + jp, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + // Update the proposal to pending and create a new pending spec + _, err = orm.UpsertJobProposal(&feeds.JobProposal{ + RemoteUUID: jp.RemoteUUID, + Status: feeds.JobProposalStatusPending, + FeedsManagerID: fmID, + }) + require.NoError(t, err) + + _, err = orm.CreateSpec(feeds.JobProposalSpec{ + Definition: "spec data", + Version: 2, + Status: feeds.SpecStatusPending, + JobProposalID: jpID, + }) + require.NoError(t, err) + + return jpID + }, + wantProposalPendingUpdate: false, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "cancelled proposal", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusCancelled, fmID) + createJobSpec(t, orm, jpID) + + return jpID + }, + wantProposalPendingUpdate: false, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "rejected proposal", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusRejected, fmID) + createJobSpec(t, orm, jpID) + + return jpID + }, + wantProposalPendingUpdate: false, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "not found spec", + before: func(orm *TestORM) int64 { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusRejected, fmID) + + return jpID + }, + wantErr: "sql: no rows in result set", + }, + { + name: "not found proposal", + before: func(orm *TestORM) int64 { + return 0 + }, + wantErr: "sql: no rows in result set", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + orm := setupORM(t) + + jpID := tc.before(orm) + + err := orm.DeleteProposal(jpID) + + if tc.wantErr != "" { + require.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + + actual, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, jpID, actual.ID) + assert.Equal(t, tc.wantProposalStatus, actual.Status) + assert.Equal(t, tc.wantProposalPendingUpdate, actual.PendingUpdate) + } + }) + } +} + +func Test_ORM_RevokeSpec(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + before func(orm *TestORM) (int64, int64) + wantProposalStatus feeds.JobProposalStatus + wantSpecStatus feeds.SpecStatus + wantErr string + wantPendingUpdate bool + }{ + { + name: "pending proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantProposalStatus: feeds.JobProposalStatusRevoked, + wantSpecStatus: feeds.SpecStatusRevoked, + wantPendingUpdate: false, + }, + { + name: "approved proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + externalJobID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err := orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + return jpID, specID + }, + wantProposalStatus: feeds.JobProposalStatusApproved, + wantSpecStatus: feeds.SpecStatusApproved, + }, + { + name: "cancelled proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusCancelled, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantProposalStatus: feeds.JobProposalStatusRevoked, + wantSpecStatus: feeds.SpecStatusRevoked, + }, + { + name: "rejected proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusRejected, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantProposalStatus: feeds.JobProposalStatusRevoked, + wantSpecStatus: feeds.SpecStatusRevoked, + }, + { + name: "deleted proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusDeleted, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantProposalStatus: feeds.JobProposalStatusDeleted, + wantSpecStatus: feeds.SpecStatusRevoked, + }, + { + name: "not found", + before: func(orm *TestORM) (int64, int64) { + return 0, 0 + }, + wantErr: "sql: no rows in result set", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + orm := setupORM(t) + + jpID, specID := tc.before(orm) + + err := orm.RevokeSpec(specID) + + if tc.wantErr != "" { + require.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + + actualJP, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, tc.wantProposalStatus, actualJP.Status) + assert.False(t, actualJP.PendingUpdate) + + assert.Equal(t, jpID, actualJP.ID) + assert.Equal(t, tc.wantProposalStatus, actualJP.Status) + assert.Equal(t, tc.wantPendingUpdate, actualJP.PendingUpdate) + } + }) + } +} + +func Test_ORM_ExistsSpecByJobProposalIDAndVersion(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + ) + + createJobSpec(t, orm, jpID) + + exists, err := orm.ExistsSpecByJobProposalIDAndVersion(jpID, 1) + require.NoError(t, err) + require.True(t, exists) + + exists, err = orm.ExistsSpecByJobProposalIDAndVersion(jpID, 2) + require.NoError(t, err) + require.False(t, exists) +} + +func Test_ORM_GetSpec(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID = createJobSpec(t, orm, jpID) + ) + + actual, err := orm.GetSpec(specID) + require.NoError(t, err) + + assert.Equal(t, "spec data", actual.Definition) + assert.Equal(t, int32(1), actual.Version) + assert.Equal(t, feeds.SpecStatusPending, actual.Status) + assert.Equal(t, jpID, actual.JobProposalID) +} + +func Test_ORM_GetApprovedSpec(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID = createJobSpec(t, orm, jpID) + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + ) + + // Defer the FK requirement of a job proposal so we don't have to setup a + // real job. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err := orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + actual, err := orm.GetApprovedSpec(jpID) + require.NoError(t, err) + + assert.Equal(t, specID, actual.ID) + assert.Equal(t, feeds.SpecStatusApproved, actual.Status) + + err = orm.CancelSpec(specID) + require.NoError(t, err) + + _, err = orm.GetApprovedSpec(jpID) + require.Error(t, err) + + assert.ErrorIs(t, err, sql.ErrNoRows) +} + +func Test_ORM_GetLatestSpec(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + ) + + _ = createJobSpec(t, orm, jpID) + spec2ID, err := orm.CreateSpec(feeds.JobProposalSpec{ + Definition: "spec data", + Version: 2, + Status: feeds.SpecStatusPending, + JobProposalID: jpID, + }) + require.NoError(t, err) + + actual, err := orm.GetSpec(spec2ID) + require.NoError(t, err) + + assert.Equal(t, spec2ID, actual.ID) + assert.Equal(t, "spec data", actual.Definition) + assert.Equal(t, int32(2), actual.Version) + assert.Equal(t, feeds.SpecStatusPending, actual.Status) + assert.Equal(t, jpID, actual.JobProposalID) +} + +func Test_ORM_ListSpecsByJobProposalIDs(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + + jp1ID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + jp2ID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + ) + + // Create the specs for the proposals + createJobSpec(t, orm, jp1ID) + createJobSpec(t, orm, jp2ID) + + specs, err := orm.ListSpecsByJobProposalIDs([]int64{jp1ID, jp2ID}) + require.NoError(t, err) + require.Len(t, specs, 2) + + actual := specs[0] + + assert.Equal(t, "spec data", actual.Definition) + assert.Equal(t, int32(1), actual.Version) + assert.Equal(t, feeds.SpecStatusPending, actual.Status) + assert.Equal(t, jp1ID, actual.JobProposalID) + + actual = specs[1] + + assert.Equal(t, "spec data", actual.Definition) + assert.Equal(t, int32(1), actual.Version) + assert.Equal(t, feeds.SpecStatusPending, actual.Status) + assert.Equal(t, jp2ID, actual.JobProposalID) +} + +func Test_ORM_RejectSpec(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + before func(orm *TestORM) (int64, int64) + wantSpecStatus feeds.SpecStatus + wantProposalStatus feeds.JobProposalStatus + wantErr string + }{ + { + name: "pending proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusRejected, + wantProposalStatus: feeds.JobProposalStatusRejected, + }, + { + name: "approved proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID := createJobSpec(t, orm, jpID) + + externalJobID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + + // Defer the FK requirement of an existing job for a job proposal. + require.NoError(t, utils.JustError(orm.db.Exec( + `SET CONSTRAINTS job_proposals_job_id_fkey DEFERRED`, + ))) + + err := orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusRejected, + wantProposalStatus: feeds.JobProposalStatusApproved, + }, + { + name: "cancelled proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusCancelled, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusRejected, + wantProposalStatus: feeds.JobProposalStatusRejected, + }, + { + name: "deleted proposal", + before: func(orm *TestORM) (int64, int64) { + fmID := createFeedsManager(t, orm) + jpID := createJobProposal(t, orm, feeds.JobProposalStatusDeleted, fmID) + specID := createJobSpec(t, orm, jpID) + + return jpID, specID + }, + wantSpecStatus: feeds.SpecStatusRejected, + wantProposalStatus: feeds.JobProposalStatusDeleted, + }, + { + name: "not found", + before: func(orm *TestORM) (int64, int64) { + return 0, 0 + }, + wantErr: "sql: no rows in result set", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + orm := setupORM(t) + + jpID, specID := tc.before(orm) + + err := orm.RejectSpec(specID) + + if tc.wantErr != "" { + require.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + + actual, err := orm.GetSpec(specID) + require.NoError(t, err) + + assert.Equal(t, specID, actual.ID) + assert.Equal(t, tc.wantSpecStatus, actual.Status) + + actualJP, err := orm.GetJobProposal(jpID) + require.NoError(t, err) + + assert.Equal(t, tc.wantProposalStatus, actualJP.Status) + assert.False(t, actualJP.PendingUpdate) + } + }) + } +} + +func Test_ORM_UpdateSpecDefinition(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID = createJobSpec(t, orm, jpID) + ) + + prev, err := orm.GetSpec(specID) + require.NoError(t, err) + + err = orm.UpdateSpecDefinition(specID, "updated spec") + require.NoError(t, err) + + actual, err := orm.GetSpec(specID) + require.NoError(t, err) + + assert.Equal(t, specID, actual.ID) + require.NotEqual(t, prev.Definition, actual.Definition) + require.Equal(t, "updated spec", actual.Definition) + + // Not found + err = orm.UpdateSpecDefinition(-1, "updated spec") + require.Error(t, err) +} + +// Other + +func Test_ORM_IsJobManaged(t *testing.T) { + t.Parallel() + + var ( + orm = setupORM(t) + fmID = createFeedsManager(t, orm) + jpID = createJobProposal(t, orm, feeds.JobProposalStatusPending, fmID) + specID = createJobSpec(t, orm, jpID) + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + ) + + j := createJob(t, orm.db, externalJobID.UUID) + + isManaged, err := orm.IsJobManaged(int64(j.ID)) + require.NoError(t, err) + assert.False(t, isManaged) + + err = orm.ApproveSpec(specID, externalJobID.UUID) + require.NoError(t, err) + + isManaged, err = orm.IsJobManaged(int64(j.ID)) + require.NoError(t, err) + assert.True(t, isManaged) +} + +// Helpers + +func assertChainConfigEqual(t *testing.T, want map[string]interface{}, actual feeds.ChainConfig) { + t.Helper() + + assert.Equal(t, want["feedsManagerID"], actual.FeedsManagerID) + assert.Equal(t, want["chainID"], actual.ChainID) + assert.Equal(t, want["chainType"], actual.ChainType) + assert.Equal(t, want["accountAddress"], actual.AccountAddress) + assert.Equal(t, want["adminAddress"], actual.AdminAddress) + assert.Equal(t, want["fluxMonitorConfig"], actual.FluxMonitorConfig) + assert.Equal(t, want["ocrConfig"], actual.OCR1Config) + assert.Equal(t, want["ocr2Config"], actual.OCR2Config) +} + +// createFeedsManager is a test helper to create a feeds manager +func createFeedsManager(t *testing.T, orm feeds.ORM) int64 { + t.Helper() + + mgr := &feeds.FeedsManager{ + URI: uri, + Name: name, + PublicKey: publicKey, + } + + id, err := orm.CreateManager(mgr) + require.NoError(t, err) + + return id +} + +func createJob(t *testing.T, db *sqlx.DB, externalJobID uuid.UUID) *job.Job { + t.Helper() + + var ( + config = configtest.NewGeneralConfig(t, nil) + keyStore = cltest.NewKeyStore(t, db, config.Database()) + lggr = logger.TestLogger(t) + pipelineORM = pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgeORM = bridges.NewORM(db, lggr, config.Database()) + relayExtenders = evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + ) + orm := job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + defer func() { assert.NoError(t, orm.Close()) }() + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + return &jb +} + +func createJobProposal(t *testing.T, orm feeds.ORM, status feeds.JobProposalStatus, fmID int64) int64 { + t.Helper() + + id, err := orm.CreateJobProposal(&feeds.JobProposal{ + RemoteUUID: uuid.New(), + Status: status, + FeedsManagerID: fmID, + PendingUpdate: true, + }) + require.NoError(t, err) + + return id +} + +func createJobSpec(t *testing.T, orm feeds.ORM, jpID int64) int64 { + t.Helper() + + id, err := orm.CreateSpec(feeds.JobProposalSpec{ + Definition: "spec data", + Version: 1, + Status: feeds.SpecStatusPending, + JobProposalID: jpID, + }) + require.NoError(t, err) + + return id +} diff --git a/core/services/feeds/proto/feeds_manager.pb.go b/core/services/feeds/proto/feeds_manager.pb.go new file mode 100644 index 00000000..880cb8ed --- /dev/null +++ b/core/services/feeds/proto/feeds_manager.pb.go @@ -0,0 +1,2367 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v3.21.7 +// source: pkg/noderpc/proto/feeds_manager.proto + +package proto + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines the allowed job types +type JobType int32 + +const ( + JobType_JOB_TYPE_UNSPECIFIED JobType = 0 + JobType_JOB_TYPE_FLUX_MONITOR JobType = 1 + JobType_JOB_TYPE_OCR JobType = 2 + JobType_JOB_TYPE_OCR2 JobType = 3 +) + +// Enum value maps for JobType. +var ( + JobType_name = map[int32]string{ + 0: "JOB_TYPE_UNSPECIFIED", + 1: "JOB_TYPE_FLUX_MONITOR", + 2: "JOB_TYPE_OCR", + 3: "JOB_TYPE_OCR2", + } + JobType_value = map[string]int32{ + "JOB_TYPE_UNSPECIFIED": 0, + "JOB_TYPE_FLUX_MONITOR": 1, + "JOB_TYPE_OCR": 2, + "JOB_TYPE_OCR2": 3, + } +) + +func (x JobType) Enum() *JobType { + p := new(JobType) + *p = x + return p +} + +func (x JobType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JobType) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_noderpc_proto_feeds_manager_proto_enumTypes[0].Descriptor() +} + +func (JobType) Type() protoreflect.EnumType { + return &file_pkg_noderpc_proto_feeds_manager_proto_enumTypes[0] +} + +func (x JobType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use JobType.Descriptor instead. +func (JobType) EnumDescriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{0} +} + +type ChainType int32 + +const ( + ChainType_CHAIN_TYPE_UNSPECIFIED ChainType = 0 + ChainType_CHAIN_TYPE_EVM ChainType = 1 + ChainType_CHAIN_TYPE_SOLANA ChainType = 2 +) + +// Enum value maps for ChainType. +var ( + ChainType_name = map[int32]string{ + 0: "CHAIN_TYPE_UNSPECIFIED", + 1: "CHAIN_TYPE_EVM", + 2: "CHAIN_TYPE_SOLANA", + } + ChainType_value = map[string]int32{ + "CHAIN_TYPE_UNSPECIFIED": 0, + "CHAIN_TYPE_EVM": 1, + "CHAIN_TYPE_SOLANA": 2, + } +) + +func (x ChainType) Enum() *ChainType { + p := new(ChainType) + *p = x + return p +} + +func (x ChainType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ChainType) Descriptor() protoreflect.EnumDescriptor { + return file_pkg_noderpc_proto_feeds_manager_proto_enumTypes[1].Descriptor() +} + +func (ChainType) Type() protoreflect.EnumType { + return &file_pkg_noderpc_proto_feeds_manager_proto_enumTypes[1] +} + +func (x ChainType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ChainType.Descriptor instead. +func (ChainType) EnumDescriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{1} +} + +type Chain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Type ChainType `protobuf:"varint,2,opt,name=type,proto3,enum=cfm.ChainType" json:"type,omitempty"` +} + +func (x *Chain) Reset() { + *x = Chain{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Chain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Chain) ProtoMessage() {} + +func (x *Chain) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Chain.ProtoReflect.Descriptor instead. +func (*Chain) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{0} +} + +func (x *Chain) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Chain) GetType() ChainType { + if x != nil { + return x.Type + } + return ChainType_CHAIN_TYPE_UNSPECIFIED +} + +// An account on a specific blockchain +type Account struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChainType ChainType `protobuf:"varint,1,opt,name=chain_type,json=chainType,proto3,enum=cfm.ChainType" json:"chain_type,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *Account) Reset() { + *x = Account{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Account) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Account) ProtoMessage() {} + +func (x *Account) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Account.ProtoReflect.Descriptor instead. +func (*Account) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{1} +} + +func (x *Account) GetChainType() ChainType { + if x != nil { + return x.ChainType + } + return ChainType_CHAIN_TYPE_UNSPECIFIED +} + +func (x *Account) GetChainId() string { + if x != nil { + return x.ChainId + } + return "" +} + +func (x *Account) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +// The config for Flux Monitor on a specific chain +type FluxMonitorConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *FluxMonitorConfig) Reset() { + *x = FluxMonitorConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FluxMonitorConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FluxMonitorConfig) ProtoMessage() {} + +func (x *FluxMonitorConfig) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FluxMonitorConfig.ProtoReflect.Descriptor instead. +func (*FluxMonitorConfig) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{2} +} + +func (x *FluxMonitorConfig) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +// The config for OCR1 on a specific chain +type OCR1Config struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + IsBootstrap bool `protobuf:"varint,2,opt,name=is_bootstrap,json=isBootstrap,proto3" json:"is_bootstrap,omitempty"` + P2PKeyBundle *OCR1Config_P2PKeyBundle `protobuf:"bytes,3,opt,name=p2p_key_bundle,json=p2pKeyBundle,proto3" json:"p2p_key_bundle,omitempty"` + OcrKeyBundle *OCR1Config_OCRKeyBundle `protobuf:"bytes,4,opt,name=ocr_key_bundle,json=ocrKeyBundle,proto3" json:"ocr_key_bundle,omitempty"` + Multiaddr string `protobuf:"bytes,5,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"` +} + +func (x *OCR1Config) Reset() { + *x = OCR1Config{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR1Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR1Config) ProtoMessage() {} + +func (x *OCR1Config) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR1Config.ProtoReflect.Descriptor instead. +func (*OCR1Config) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{3} +} + +func (x *OCR1Config) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *OCR1Config) GetIsBootstrap() bool { + if x != nil { + return x.IsBootstrap + } + return false +} + +func (x *OCR1Config) GetP2PKeyBundle() *OCR1Config_P2PKeyBundle { + if x != nil { + return x.P2PKeyBundle + } + return nil +} + +func (x *OCR1Config) GetOcrKeyBundle() *OCR1Config_OCRKeyBundle { + if x != nil { + return x.OcrKeyBundle + } + return nil +} + +func (x *OCR1Config) GetMultiaddr() string { + if x != nil { + return x.Multiaddr + } + return "" +} + +// The config for OCR2 on a specific chain +type OCR2Config struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + IsBootstrap bool `protobuf:"varint,2,opt,name=is_bootstrap,json=isBootstrap,proto3" json:"is_bootstrap,omitempty"` + P2PKeyBundle *OCR2Config_P2PKeyBundle `protobuf:"bytes,3,opt,name=p2p_key_bundle,json=p2pKeyBundle,proto3" json:"p2p_key_bundle,omitempty"` + OcrKeyBundle *OCR2Config_OCRKeyBundle `protobuf:"bytes,4,opt,name=ocr_key_bundle,json=ocrKeyBundle,proto3" json:"ocr_key_bundle,omitempty"` + Multiaddr string `protobuf:"bytes,5,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"` + Plugins *OCR2Config_Plugins `protobuf:"bytes,6,opt,name=plugins,proto3" json:"plugins,omitempty"` + ForwarderAddress *string `protobuf:"bytes,7,opt,name=forwarder_address,json=forwarderAddress,proto3,oneof" json:"forwarder_address,omitempty"` +} + +func (x *OCR2Config) Reset() { + *x = OCR2Config{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR2Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR2Config) ProtoMessage() {} + +func (x *OCR2Config) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR2Config.ProtoReflect.Descriptor instead. +func (*OCR2Config) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{4} +} + +func (x *OCR2Config) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *OCR2Config) GetIsBootstrap() bool { + if x != nil { + return x.IsBootstrap + } + return false +} + +func (x *OCR2Config) GetP2PKeyBundle() *OCR2Config_P2PKeyBundle { + if x != nil { + return x.P2PKeyBundle + } + return nil +} + +func (x *OCR2Config) GetOcrKeyBundle() *OCR2Config_OCRKeyBundle { + if x != nil { + return x.OcrKeyBundle + } + return nil +} + +func (x *OCR2Config) GetMultiaddr() string { + if x != nil { + return x.Multiaddr + } + return "" +} + +func (x *OCR2Config) GetPlugins() *OCR2Config_Plugins { + if x != nil { + return x.Plugins + } + return nil +} + +func (x *OCR2Config) GetForwarderAddress() string { + if x != nil && x.ForwarderAddress != nil { + return *x.ForwarderAddress + } + return "" +} + +type ChainConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Chain *Chain `protobuf:"bytes,1,opt,name=chain,proto3" json:"chain,omitempty"` + AccountAddress string `protobuf:"bytes,2,opt,name=account_address,json=accountAddress,proto3" json:"account_address,omitempty"` + AdminAddress string `protobuf:"bytes,3,opt,name=admin_address,json=adminAddress,proto3" json:"admin_address,omitempty"` + FluxMonitorConfig *FluxMonitorConfig `protobuf:"bytes,4,opt,name=flux_monitor_config,json=fluxMonitorConfig,proto3" json:"flux_monitor_config,omitempty"` + Ocr1Config *OCR1Config `protobuf:"bytes,5,opt,name=ocr1_config,json=ocr1Config,proto3" json:"ocr1_config,omitempty"` + Ocr2Config *OCR2Config `protobuf:"bytes,6,opt,name=ocr2_config,json=ocr2Config,proto3" json:"ocr2_config,omitempty"` +} + +func (x *ChainConfig) Reset() { + *x = ChainConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChainConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChainConfig) ProtoMessage() {} + +func (x *ChainConfig) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChainConfig.ProtoReflect.Descriptor instead. +func (*ChainConfig) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{5} +} + +func (x *ChainConfig) GetChain() *Chain { + if x != nil { + return x.Chain + } + return nil +} + +func (x *ChainConfig) GetAccountAddress() string { + if x != nil { + return x.AccountAddress + } + return "" +} + +func (x *ChainConfig) GetAdminAddress() string { + if x != nil { + return x.AdminAddress + } + return "" +} + +func (x *ChainConfig) GetFluxMonitorConfig() *FluxMonitorConfig { + if x != nil { + return x.FluxMonitorConfig + } + return nil +} + +func (x *ChainConfig) GetOcr1Config() *OCR1Config { + if x != nil { + return x.Ocr1Config + } + return nil +} + +func (x *ChainConfig) GetOcr2Config() *OCR2Config { + if x != nil { + return x.Ocr2Config + } + return nil +} + +type UpdateNodeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + JobTypes []JobType `protobuf:"varint,1,rep,packed,name=job_types,json=jobTypes,proto3,enum=cfm.JobType" json:"job_types,omitempty"` + ChainId int64 `protobuf:"varint,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` // To be removed when all nodes are upgraded to 1.2 + AccountAddresses []string `protobuf:"bytes,3,rep,name=account_addresses,json=accountAddresses,proto3" json:"account_addresses,omitempty"` + IsBootstrapPeer bool `protobuf:"varint,4,opt,name=is_bootstrap_peer,json=isBootstrapPeer,proto3" json:"is_bootstrap_peer,omitempty"` + BootstrapMultiaddr string `protobuf:"bytes,5,opt,name=bootstrap_multiaddr,json=bootstrapMultiaddr,proto3" json:"bootstrap_multiaddr,omitempty"` + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + ChainIds []int64 `protobuf:"varint,7,rep,packed,name=chain_ids,json=chainIds,proto3" json:"chain_ids,omitempty"` + Accounts []*Account `protobuf:"bytes,8,rep,name=accounts,proto3" json:"accounts,omitempty"` + Chains []*Chain `protobuf:"bytes,9,rep,name=chains,proto3" json:"chains,omitempty"` + ChainConfigs []*ChainConfig `protobuf:"bytes,10,rep,name=chain_configs,json=chainConfigs,proto3" json:"chain_configs,omitempty"` +} + +func (x *UpdateNodeRequest) Reset() { + *x = UpdateNodeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateNodeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateNodeRequest) ProtoMessage() {} + +func (x *UpdateNodeRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNodeRequest.ProtoReflect.Descriptor instead. +func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateNodeRequest) GetJobTypes() []JobType { + if x != nil { + return x.JobTypes + } + return nil +} + +func (x *UpdateNodeRequest) GetChainId() int64 { + if x != nil { + return x.ChainId + } + return 0 +} + +func (x *UpdateNodeRequest) GetAccountAddresses() []string { + if x != nil { + return x.AccountAddresses + } + return nil +} + +func (x *UpdateNodeRequest) GetIsBootstrapPeer() bool { + if x != nil { + return x.IsBootstrapPeer + } + return false +} + +func (x *UpdateNodeRequest) GetBootstrapMultiaddr() string { + if x != nil { + return x.BootstrapMultiaddr + } + return "" +} + +func (x *UpdateNodeRequest) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *UpdateNodeRequest) GetChainIds() []int64 { + if x != nil { + return x.ChainIds + } + return nil +} + +func (x *UpdateNodeRequest) GetAccounts() []*Account { + if x != nil { + return x.Accounts + } + return nil +} + +func (x *UpdateNodeRequest) GetChains() []*Chain { + if x != nil { + return x.Chains + } + return nil +} + +func (x *UpdateNodeRequest) GetChainConfigs() []*ChainConfig { + if x != nil { + return x.ChainConfigs + } + return nil +} + +type UpdateNodeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateNodeResponse) Reset() { + *x = UpdateNodeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateNodeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateNodeResponse) ProtoMessage() {} + +func (x *UpdateNodeResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateNodeResponse.ProtoReflect.Descriptor instead. +func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{7} +} + +type ApprovedJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ApprovedJobRequest) Reset() { + *x = ApprovedJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApprovedJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApprovedJobRequest) ProtoMessage() {} + +func (x *ApprovedJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApprovedJobRequest.ProtoReflect.Descriptor instead. +func (*ApprovedJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{8} +} + +func (x *ApprovedJobRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *ApprovedJobRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type ApprovedJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApprovedJobResponse) Reset() { + *x = ApprovedJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApprovedJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApprovedJobResponse) ProtoMessage() {} + +func (x *ApprovedJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApprovedJobResponse.ProtoReflect.Descriptor instead. +func (*ApprovedJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{9} +} + +type HealthcheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HealthcheckRequest) Reset() { + *x = HealthcheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthcheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthcheckRequest) ProtoMessage() {} + +func (x *HealthcheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthcheckRequest.ProtoReflect.Descriptor instead. +func (*HealthcheckRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{10} +} + +type HealthcheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HealthcheckResponse) Reset() { + *x = HealthcheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthcheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthcheckResponse) ProtoMessage() {} + +func (x *HealthcheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthcheckResponse.ProtoReflect.Descriptor instead. +func (*HealthcheckResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{11} +} + +type RejectedJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *RejectedJobRequest) Reset() { + *x = RejectedJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RejectedJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RejectedJobRequest) ProtoMessage() {} + +func (x *RejectedJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RejectedJobRequest.ProtoReflect.Descriptor instead. +func (*RejectedJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{12} +} + +func (x *RejectedJobRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *RejectedJobRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type RejectedJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RejectedJobResponse) Reset() { + *x = RejectedJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RejectedJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RejectedJobResponse) ProtoMessage() {} + +func (x *RejectedJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RejectedJobResponse.ProtoReflect.Descriptor instead. +func (*RejectedJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{13} +} + +type CancelledJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *CancelledJobRequest) Reset() { + *x = CancelledJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelledJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelledJobRequest) ProtoMessage() {} + +func (x *CancelledJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelledJobRequest.ProtoReflect.Descriptor instead. +func (*CancelledJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{14} +} + +func (x *CancelledJobRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *CancelledJobRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type CancelledJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelledJobResponse) Reset() { + *x = CancelledJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CancelledJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelledJobResponse) ProtoMessage() {} + +func (x *CancelledJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelledJobResponse.ProtoReflect.Descriptor instead. +func (*CancelledJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{15} +} + +type ProposeJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Spec string `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"` + Multiaddrs []string `protobuf:"bytes,3,rep,name=multiaddrs,proto3" json:"multiaddrs,omitempty"` + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ProposeJobRequest) Reset() { + *x = ProposeJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProposeJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProposeJobRequest) ProtoMessage() {} + +func (x *ProposeJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProposeJobRequest.ProtoReflect.Descriptor instead. +func (*ProposeJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{16} +} + +func (x *ProposeJobRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ProposeJobRequest) GetSpec() string { + if x != nil { + return x.Spec + } + return "" +} + +func (x *ProposeJobRequest) GetMultiaddrs() []string { + if x != nil { + return x.Multiaddrs + } + return nil +} + +func (x *ProposeJobRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type ProposeJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ProposeJobResponse) Reset() { + *x = ProposeJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProposeJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProposeJobResponse) ProtoMessage() {} + +func (x *ProposeJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProposeJobResponse.ProtoReflect.Descriptor instead. +func (*ProposeJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{17} +} + +func (x *ProposeJobResponse) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type DeleteJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *DeleteJobRequest) Reset() { + *x = DeleteJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteJobRequest) ProtoMessage() {} + +func (x *DeleteJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteJobRequest.ProtoReflect.Descriptor instead. +func (*DeleteJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{18} +} + +func (x *DeleteJobRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type DeleteJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *DeleteJobResponse) Reset() { + *x = DeleteJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteJobResponse) ProtoMessage() {} + +func (x *DeleteJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteJobResponse.ProtoReflect.Descriptor instead. +func (*DeleteJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{19} +} + +func (x *DeleteJobResponse) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type RevokeJobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *RevokeJobRequest) Reset() { + *x = RevokeJobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeJobRequest) ProtoMessage() {} + +func (x *RevokeJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeJobRequest.ProtoReflect.Descriptor instead. +func (*RevokeJobRequest) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{20} +} + +func (x *RevokeJobRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type RevokeJobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *RevokeJobResponse) Reset() { + *x = RevokeJobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeJobResponse) ProtoMessage() {} + +func (x *RevokeJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeJobResponse.ProtoReflect.Descriptor instead. +func (*RevokeJobResponse) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{21} +} + +func (x *RevokeJobResponse) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type OCR1Config_P2PKeyBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` + PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` +} + +func (x *OCR1Config_P2PKeyBundle) Reset() { + *x = OCR1Config_P2PKeyBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR1Config_P2PKeyBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR1Config_P2PKeyBundle) ProtoMessage() {} + +func (x *OCR1Config_P2PKeyBundle) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR1Config_P2PKeyBundle.ProtoReflect.Descriptor instead. +func (*OCR1Config_P2PKeyBundle) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *OCR1Config_P2PKeyBundle) GetPeerId() string { + if x != nil { + return x.PeerId + } + return "" +} + +func (x *OCR1Config_P2PKeyBundle) GetPublicKey() string { + if x != nil { + return x.PublicKey + } + return "" +} + +type OCR1Config_OCRKeyBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BundleId string `protobuf:"bytes,1,opt,name=bundle_id,json=bundleId,proto3" json:"bundle_id,omitempty"` + ConfigPublicKey string `protobuf:"bytes,2,opt,name=config_public_key,json=configPublicKey,proto3" json:"config_public_key,omitempty"` + OffchainPublicKey string `protobuf:"bytes,3,opt,name=offchain_public_key,json=offchainPublicKey,proto3" json:"offchain_public_key,omitempty"` + OnchainSigningAddress string `protobuf:"bytes,4,opt,name=onchain_signing_address,json=onchainSigningAddress,proto3" json:"onchain_signing_address,omitempty"` +} + +func (x *OCR1Config_OCRKeyBundle) Reset() { + *x = OCR1Config_OCRKeyBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR1Config_OCRKeyBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR1Config_OCRKeyBundle) ProtoMessage() {} + +func (x *OCR1Config_OCRKeyBundle) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR1Config_OCRKeyBundle.ProtoReflect.Descriptor instead. +func (*OCR1Config_OCRKeyBundle) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *OCR1Config_OCRKeyBundle) GetBundleId() string { + if x != nil { + return x.BundleId + } + return "" +} + +func (x *OCR1Config_OCRKeyBundle) GetConfigPublicKey() string { + if x != nil { + return x.ConfigPublicKey + } + return "" +} + +func (x *OCR1Config_OCRKeyBundle) GetOffchainPublicKey() string { + if x != nil { + return x.OffchainPublicKey + } + return "" +} + +func (x *OCR1Config_OCRKeyBundle) GetOnchainSigningAddress() string { + if x != nil { + return x.OnchainSigningAddress + } + return "" +} + +type OCR2Config_P2PKeyBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` + PublicKey string `protobuf:"bytes,2,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` +} + +func (x *OCR2Config_P2PKeyBundle) Reset() { + *x = OCR2Config_P2PKeyBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR2Config_P2PKeyBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR2Config_P2PKeyBundle) ProtoMessage() {} + +func (x *OCR2Config_P2PKeyBundle) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR2Config_P2PKeyBundle.ProtoReflect.Descriptor instead. +func (*OCR2Config_P2PKeyBundle) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *OCR2Config_P2PKeyBundle) GetPeerId() string { + if x != nil { + return x.PeerId + } + return "" +} + +func (x *OCR2Config_P2PKeyBundle) GetPublicKey() string { + if x != nil { + return x.PublicKey + } + return "" +} + +type OCR2Config_OCRKeyBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BundleId string `protobuf:"bytes,1,opt,name=bundle_id,json=bundleId,proto3" json:"bundle_id,omitempty"` + ConfigPublicKey string `protobuf:"bytes,2,opt,name=config_public_key,json=configPublicKey,proto3" json:"config_public_key,omitempty"` + OffchainPublicKey string `protobuf:"bytes,3,opt,name=offchain_public_key,json=offchainPublicKey,proto3" json:"offchain_public_key,omitempty"` + OnchainSigningAddress string `protobuf:"bytes,4,opt,name=onchain_signing_address,json=onchainSigningAddress,proto3" json:"onchain_signing_address,omitempty"` +} + +func (x *OCR2Config_OCRKeyBundle) Reset() { + *x = OCR2Config_OCRKeyBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR2Config_OCRKeyBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR2Config_OCRKeyBundle) ProtoMessage() {} + +func (x *OCR2Config_OCRKeyBundle) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR2Config_OCRKeyBundle.ProtoReflect.Descriptor instead. +func (*OCR2Config_OCRKeyBundle) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *OCR2Config_OCRKeyBundle) GetBundleId() string { + if x != nil { + return x.BundleId + } + return "" +} + +func (x *OCR2Config_OCRKeyBundle) GetConfigPublicKey() string { + if x != nil { + return x.ConfigPublicKey + } + return "" +} + +func (x *OCR2Config_OCRKeyBundle) GetOffchainPublicKey() string { + if x != nil { + return x.OffchainPublicKey + } + return "" +} + +func (x *OCR2Config_OCRKeyBundle) GetOnchainSigningAddress() string { + if x != nil { + return x.OnchainSigningAddress + } + return "" +} + +type OCR2Config_Plugins struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Commit bool `protobuf:"varint,1,opt,name=commit,proto3" json:"commit,omitempty"` + Execute bool `protobuf:"varint,2,opt,name=execute,proto3" json:"execute,omitempty"` + Median bool `protobuf:"varint,3,opt,name=median,proto3" json:"median,omitempty"` + Mercury bool `protobuf:"varint,4,opt,name=mercury,proto3" json:"mercury,omitempty"` +} + +func (x *OCR2Config_Plugins) Reset() { + *x = OCR2Config_Plugins{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCR2Config_Plugins) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCR2Config_Plugins) ProtoMessage() {} + +func (x *OCR2Config_Plugins) ProtoReflect() protoreflect.Message { + mi := &file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCR2Config_Plugins.ProtoReflect.Descriptor instead. +func (*OCR2Config_Plugins) Descriptor() ([]byte, []int) { + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP(), []int{4, 2} +} + +func (x *OCR2Config_Plugins) GetCommit() bool { + if x != nil { + return x.Commit + } + return false +} + +func (x *OCR2Config_Plugins) GetExecute() bool { + if x != nil { + return x.Execute + } + return false +} + +func (x *OCR2Config_Plugins) GetMedian() bool { + if x != nil { + return x.Median + } + return false +} + +func (x *OCR2Config_Plugins) GetMercury() bool { + if x != nil { + return x.Mercury + } + return false +} + +var File_pkg_noderpc_proto_feeds_manager_proto protoreflect.FileDescriptor + +var file_pkg_noderpc_proto_feeds_manager_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x66, 0x65, 0x65, 0x64, 0x73, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x63, 0x66, 0x6d, 0x22, 0x3b, 0x0a, 0x05, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x6d, 0x0a, 0x07, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x2d, 0x0a, 0x11, 0x46, 0x6c, 0x75, 0x78, + 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0xf9, 0x03, 0x0a, 0x0a, 0x4f, 0x43, 0x52, 0x31, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, + 0x72, 0x61, 0x70, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x66, + 0x6d, 0x2e, 0x4f, 0x43, 0x52, 0x31, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x32, 0x50, + 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0c, 0x70, 0x32, 0x70, 0x4b, 0x65, + 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6f, 0x63, 0x72, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4f, 0x43, 0x52, 0x31, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4f, 0x43, 0x52, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0c, 0x6f, + 0x63, 0x72, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x1a, 0x46, 0x0a, 0x0c, 0x50, 0x32, 0x50, + 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x1a, 0xbf, 0x01, 0x0a, 0x0c, 0x4f, 0x43, 0x52, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x2a, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, 0x6f, + 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x6f, + 0x6e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x6f, 0x6e, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x22, 0xe3, 0x05, 0x0a, 0x0a, 0x4f, 0x43, 0x52, 0x32, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, + 0x42, 0x0a, 0x0e, 0x70, 0x32, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4f, 0x43, + 0x52, 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x32, 0x50, 0x4b, 0x65, 0x79, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0c, 0x70, 0x32, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6f, 0x63, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x66, + 0x6d, 0x2e, 0x4f, 0x43, 0x52, 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x43, 0x52, + 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0c, 0x6f, 0x63, 0x72, 0x4b, 0x65, + 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x61, 0x64, 0x64, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4f, 0x43, 0x52, + 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, + 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x1a, 0x46, 0x0a, 0x0c, 0x50, 0x32, + 0x50, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x1a, 0xbf, 0x01, 0x0a, 0x0c, 0x4f, 0x43, 0x52, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x49, 0x64, + 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x13, + 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x66, 0x66, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x17, + 0x6f, 0x6e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x6f, + 0x6e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x1a, 0x6d, 0x0a, 0x07, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x72, + 0x63, 0x75, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6d, 0x65, 0x72, 0x63, + 0x75, 0x72, 0x79, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, + 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa9, 0x02, 0x0a, 0x0b, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x05, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x05, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x46, 0x0a, 0x13, 0x66, 0x6c, 0x75, + 0x78, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x46, 0x6c, 0x75, + 0x78, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, + 0x66, 0x6c, 0x75, 0x78, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x30, 0x0a, 0x0b, 0x6f, 0x63, 0x72, 0x31, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4f, 0x43, 0x52, + 0x31, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x6f, 0x63, 0x72, 0x31, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x0b, 0x6f, 0x63, 0x72, 0x32, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4f, + 0x43, 0x52, 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x6f, 0x63, 0x72, 0x32, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9f, 0x03, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x09, 0x6a, + 0x6f, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0c, + 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x4a, 0x6f, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x6a, 0x6f, + 0x62, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2a, + 0x0a, 0x11, 0x69, 0x73, 0x5f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x42, 0x6f, 0x6f, + 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x13, 0x62, 0x6f, + 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, + 0x61, 0x70, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x61, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x03, 0x52, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x06, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x63, + 0x66, 0x6d, 0x2e, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x06, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, + 0x12, 0x35, 0x0a, 0x0d, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x42, 0x0a, + 0x12, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x15, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x43, 0x0a, 0x13, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, + 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x71, 0x0a, + 0x11, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x61, + 0x64, 0x64, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x24, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x22, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x23, 0x0a, 0x11, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, + 0x22, 0x0a, 0x10, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x22, 0x23, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x4a, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x2a, 0x63, 0x0a, 0x07, 0x4a, 0x6f, 0x62, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4a, 0x4f, 0x42, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4a, 0x4f, 0x42, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x55, 0x58, 0x5f, 0x4d, + 0x4f, 0x4e, 0x49, 0x54, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4a, 0x4f, 0x42, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x43, 0x52, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4a, 0x4f, + 0x42, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x43, 0x52, 0x32, 0x10, 0x03, 0x2a, 0x52, 0x0a, + 0x09, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x48, + 0x41, 0x49, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x56, 0x4d, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x48, + 0x41, 0x49, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4f, 0x4c, 0x41, 0x4e, 0x41, 0x10, + 0x02, 0x32, 0xd8, 0x02, 0x0a, 0x0c, 0x46, 0x65, 0x65, 0x64, 0x73, 0x4d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x4a, 0x6f, + 0x62, 0x12, 0x17, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, + 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x63, 0x66, 0x6d, + 0x2e, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x63, + 0x66, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, + 0x66, 0x6d, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x52, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, + 0x63, 0x66, 0x6d, 0x2e, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x0c, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x6c, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x18, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x19, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, + 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xc4, 0x01, 0x0a, + 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0a, + 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x16, 0x2e, 0x63, 0x66, 0x6d, + 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x09, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x15, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x09, 0x52, 0x65, 0x76, 0x6f, 0x6b, + 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x15, 0x2e, 0x63, 0x66, 0x6d, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, + 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x63, 0x66, + 0x6d, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x3d, 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, + 0x69, 0x74, 0x2f, 0x66, 0x65, 0x65, 0x64, 0x73, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_noderpc_proto_feeds_manager_proto_rawDescOnce sync.Once + file_pkg_noderpc_proto_feeds_manager_proto_rawDescData = file_pkg_noderpc_proto_feeds_manager_proto_rawDesc +) + +func file_pkg_noderpc_proto_feeds_manager_proto_rawDescGZIP() []byte { + file_pkg_noderpc_proto_feeds_manager_proto_rawDescOnce.Do(func() { + file_pkg_noderpc_proto_feeds_manager_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_noderpc_proto_feeds_manager_proto_rawDescData) + }) + return file_pkg_noderpc_proto_feeds_manager_proto_rawDescData +} + +var file_pkg_noderpc_proto_feeds_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_pkg_noderpc_proto_feeds_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_pkg_noderpc_proto_feeds_manager_proto_goTypes = []interface{}{ + (JobType)(0), // 0: cfm.JobType + (ChainType)(0), // 1: cfm.ChainType + (*Chain)(nil), // 2: cfm.Chain + (*Account)(nil), // 3: cfm.Account + (*FluxMonitorConfig)(nil), // 4: cfm.FluxMonitorConfig + (*OCR1Config)(nil), // 5: cfm.OCR1Config + (*OCR2Config)(nil), // 6: cfm.OCR2Config + (*ChainConfig)(nil), // 7: cfm.ChainConfig + (*UpdateNodeRequest)(nil), // 8: cfm.UpdateNodeRequest + (*UpdateNodeResponse)(nil), // 9: cfm.UpdateNodeResponse + (*ApprovedJobRequest)(nil), // 10: cfm.ApprovedJobRequest + (*ApprovedJobResponse)(nil), // 11: cfm.ApprovedJobResponse + (*HealthcheckRequest)(nil), // 12: cfm.HealthcheckRequest + (*HealthcheckResponse)(nil), // 13: cfm.HealthcheckResponse + (*RejectedJobRequest)(nil), // 14: cfm.RejectedJobRequest + (*RejectedJobResponse)(nil), // 15: cfm.RejectedJobResponse + (*CancelledJobRequest)(nil), // 16: cfm.CancelledJobRequest + (*CancelledJobResponse)(nil), // 17: cfm.CancelledJobResponse + (*ProposeJobRequest)(nil), // 18: cfm.ProposeJobRequest + (*ProposeJobResponse)(nil), // 19: cfm.ProposeJobResponse + (*DeleteJobRequest)(nil), // 20: cfm.DeleteJobRequest + (*DeleteJobResponse)(nil), // 21: cfm.DeleteJobResponse + (*RevokeJobRequest)(nil), // 22: cfm.RevokeJobRequest + (*RevokeJobResponse)(nil), // 23: cfm.RevokeJobResponse + (*OCR1Config_P2PKeyBundle)(nil), // 24: cfm.OCR1Config.P2PKeyBundle + (*OCR1Config_OCRKeyBundle)(nil), // 25: cfm.OCR1Config.OCRKeyBundle + (*OCR2Config_P2PKeyBundle)(nil), // 26: cfm.OCR2Config.P2PKeyBundle + (*OCR2Config_OCRKeyBundle)(nil), // 27: cfm.OCR2Config.OCRKeyBundle + (*OCR2Config_Plugins)(nil), // 28: cfm.OCR2Config.Plugins +} +var file_pkg_noderpc_proto_feeds_manager_proto_depIdxs = []int32{ + 1, // 0: cfm.Chain.type:type_name -> cfm.ChainType + 1, // 1: cfm.Account.chain_type:type_name -> cfm.ChainType + 24, // 2: cfm.OCR1Config.p2p_key_bundle:type_name -> cfm.OCR1Config.P2PKeyBundle + 25, // 3: cfm.OCR1Config.ocr_key_bundle:type_name -> cfm.OCR1Config.OCRKeyBundle + 26, // 4: cfm.OCR2Config.p2p_key_bundle:type_name -> cfm.OCR2Config.P2PKeyBundle + 27, // 5: cfm.OCR2Config.ocr_key_bundle:type_name -> cfm.OCR2Config.OCRKeyBundle + 28, // 6: cfm.OCR2Config.plugins:type_name -> cfm.OCR2Config.Plugins + 2, // 7: cfm.ChainConfig.chain:type_name -> cfm.Chain + 4, // 8: cfm.ChainConfig.flux_monitor_config:type_name -> cfm.FluxMonitorConfig + 5, // 9: cfm.ChainConfig.ocr1_config:type_name -> cfm.OCR1Config + 6, // 10: cfm.ChainConfig.ocr2_config:type_name -> cfm.OCR2Config + 0, // 11: cfm.UpdateNodeRequest.job_types:type_name -> cfm.JobType + 3, // 12: cfm.UpdateNodeRequest.accounts:type_name -> cfm.Account + 2, // 13: cfm.UpdateNodeRequest.chains:type_name -> cfm.Chain + 7, // 14: cfm.UpdateNodeRequest.chain_configs:type_name -> cfm.ChainConfig + 10, // 15: cfm.FeedsManager.ApprovedJob:input_type -> cfm.ApprovedJobRequest + 12, // 16: cfm.FeedsManager.Healthcheck:input_type -> cfm.HealthcheckRequest + 8, // 17: cfm.FeedsManager.UpdateNode:input_type -> cfm.UpdateNodeRequest + 14, // 18: cfm.FeedsManager.RejectedJob:input_type -> cfm.RejectedJobRequest + 16, // 19: cfm.FeedsManager.CancelledJob:input_type -> cfm.CancelledJobRequest + 18, // 20: cfm.NodeService.ProposeJob:input_type -> cfm.ProposeJobRequest + 20, // 21: cfm.NodeService.DeleteJob:input_type -> cfm.DeleteJobRequest + 22, // 22: cfm.NodeService.RevokeJob:input_type -> cfm.RevokeJobRequest + 11, // 23: cfm.FeedsManager.ApprovedJob:output_type -> cfm.ApprovedJobResponse + 13, // 24: cfm.FeedsManager.Healthcheck:output_type -> cfm.HealthcheckResponse + 9, // 25: cfm.FeedsManager.UpdateNode:output_type -> cfm.UpdateNodeResponse + 15, // 26: cfm.FeedsManager.RejectedJob:output_type -> cfm.RejectedJobResponse + 17, // 27: cfm.FeedsManager.CancelledJob:output_type -> cfm.CancelledJobResponse + 19, // 28: cfm.NodeService.ProposeJob:output_type -> cfm.ProposeJobResponse + 21, // 29: cfm.NodeService.DeleteJob:output_type -> cfm.DeleteJobResponse + 23, // 30: cfm.NodeService.RevokeJob:output_type -> cfm.RevokeJobResponse + 23, // [23:31] is the sub-list for method output_type + 15, // [15:23] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_pkg_noderpc_proto_feeds_manager_proto_init() } +func file_pkg_noderpc_proto_feeds_manager_proto_init() { + if File_pkg_noderpc_proto_feeds_manager_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Chain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Account); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FluxMonitorConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR1Config); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR2Config); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChainConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNodeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNodeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApprovedJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApprovedJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthcheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthcheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RejectedJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RejectedJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelledJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelledJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProposeJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProposeJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeJobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeJobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR1Config_P2PKeyBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR1Config_OCRKeyBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR2Config_P2PKeyBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR2Config_OCRKeyBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCR2Config_Plugins); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pkg_noderpc_proto_feeds_manager_proto_msgTypes[4].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_noderpc_proto_feeds_manager_proto_rawDesc, + NumEnums: 2, + NumMessages: 27, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_pkg_noderpc_proto_feeds_manager_proto_goTypes, + DependencyIndexes: file_pkg_noderpc_proto_feeds_manager_proto_depIdxs, + EnumInfos: file_pkg_noderpc_proto_feeds_manager_proto_enumTypes, + MessageInfos: file_pkg_noderpc_proto_feeds_manager_proto_msgTypes, + }.Build() + File_pkg_noderpc_proto_feeds_manager_proto = out.File + file_pkg_noderpc_proto_feeds_manager_proto_rawDesc = nil + file_pkg_noderpc_proto_feeds_manager_proto_goTypes = nil + file_pkg_noderpc_proto_feeds_manager_proto_depIdxs = nil +} diff --git a/core/services/feeds/proto/feeds_manager_wsrpc.pb.go b/core/services/feeds/proto/feeds_manager_wsrpc.pb.go new file mode 100644 index 00000000..2706ee8b --- /dev/null +++ b/core/services/feeds/proto/feeds_manager_wsrpc.pb.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-go-wsrpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-wsrpc v0.0.1 +// - protoc v3.21.7 + +package proto + +import ( + context "context" + + wsrpc "github.com/goplugin/wsrpc" +) + +// FeedsManagerClient is the client API for FeedsManager service. +type FeedsManagerClient interface { + ApprovedJob(ctx context.Context, in *ApprovedJobRequest) (*ApprovedJobResponse, error) + Healthcheck(ctx context.Context, in *HealthcheckRequest) (*HealthcheckResponse, error) + UpdateNode(ctx context.Context, in *UpdateNodeRequest) (*UpdateNodeResponse, error) + RejectedJob(ctx context.Context, in *RejectedJobRequest) (*RejectedJobResponse, error) + CancelledJob(ctx context.Context, in *CancelledJobRequest) (*CancelledJobResponse, error) +} + +type feedsManagerClient struct { + cc wsrpc.ClientInterface +} + +func NewFeedsManagerClient(cc wsrpc.ClientInterface) FeedsManagerClient { + return &feedsManagerClient{cc} +} + +func (c *feedsManagerClient) ApprovedJob(ctx context.Context, in *ApprovedJobRequest) (*ApprovedJobResponse, error) { + out := new(ApprovedJobResponse) + err := c.cc.Invoke(ctx, "ApprovedJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *feedsManagerClient) Healthcheck(ctx context.Context, in *HealthcheckRequest) (*HealthcheckResponse, error) { + out := new(HealthcheckResponse) + err := c.cc.Invoke(ctx, "Healthcheck", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *feedsManagerClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest) (*UpdateNodeResponse, error) { + out := new(UpdateNodeResponse) + err := c.cc.Invoke(ctx, "UpdateNode", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *feedsManagerClient) RejectedJob(ctx context.Context, in *RejectedJobRequest) (*RejectedJobResponse, error) { + out := new(RejectedJobResponse) + err := c.cc.Invoke(ctx, "RejectedJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *feedsManagerClient) CancelledJob(ctx context.Context, in *CancelledJobRequest) (*CancelledJobResponse, error) { + out := new(CancelledJobResponse) + err := c.cc.Invoke(ctx, "CancelledJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +// FeedsManagerServer is the server API for FeedsManager service. +type FeedsManagerServer interface { + ApprovedJob(context.Context, *ApprovedJobRequest) (*ApprovedJobResponse, error) + Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) + UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error) + RejectedJob(context.Context, *RejectedJobRequest) (*RejectedJobResponse, error) + CancelledJob(context.Context, *CancelledJobRequest) (*CancelledJobResponse, error) +} + +func RegisterFeedsManagerServer(s wsrpc.ServiceRegistrar, srv FeedsManagerServer) { + s.RegisterService(&FeedsManager_ServiceDesc, srv) +} + +func _FeedsManager_ApprovedJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(ApprovedJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(FeedsManagerServer).ApprovedJob(ctx, in) +} + +func _FeedsManager_Healthcheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(HealthcheckRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(FeedsManagerServer).Healthcheck(ctx, in) +} + +func _FeedsManager_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(UpdateNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(FeedsManagerServer).UpdateNode(ctx, in) +} + +func _FeedsManager_RejectedJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(RejectedJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(FeedsManagerServer).RejectedJob(ctx, in) +} + +func _FeedsManager_CancelledJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(CancelledJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(FeedsManagerServer).CancelledJob(ctx, in) +} + +// FeedsManager_ServiceDesc is the wsrpc.ServiceDesc for FeedsManager service. +// It's only intended for direct use with wsrpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FeedsManager_ServiceDesc = wsrpc.ServiceDesc{ + ServiceName: "cfm.FeedsManager", + HandlerType: (*FeedsManagerServer)(nil), + Methods: []wsrpc.MethodDesc{ + { + MethodName: "ApprovedJob", + Handler: _FeedsManager_ApprovedJob_Handler, + }, + { + MethodName: "Healthcheck", + Handler: _FeedsManager_Healthcheck_Handler, + }, + { + MethodName: "UpdateNode", + Handler: _FeedsManager_UpdateNode_Handler, + }, + { + MethodName: "RejectedJob", + Handler: _FeedsManager_RejectedJob_Handler, + }, + { + MethodName: "CancelledJob", + Handler: _FeedsManager_CancelledJob_Handler, + }, + }, +} + +// NodeServiceClient is the client API for NodeService service. +type NodeServiceClient interface { + ProposeJob(ctx context.Context, in *ProposeJobRequest) (*ProposeJobResponse, error) + DeleteJob(ctx context.Context, in *DeleteJobRequest) (*DeleteJobResponse, error) + RevokeJob(ctx context.Context, in *RevokeJobRequest) (*RevokeJobResponse, error) +} + +type nodeServiceClient struct { + cc wsrpc.ClientInterface +} + +func NewNodeServiceClient(cc wsrpc.ClientInterface) NodeServiceClient { + return &nodeServiceClient{cc} +} + +func (c *nodeServiceClient) ProposeJob(ctx context.Context, in *ProposeJobRequest) (*ProposeJobResponse, error) { + out := new(ProposeJobResponse) + err := c.cc.Invoke(ctx, "ProposeJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeServiceClient) DeleteJob(ctx context.Context, in *DeleteJobRequest) (*DeleteJobResponse, error) { + out := new(DeleteJobResponse) + err := c.cc.Invoke(ctx, "DeleteJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeServiceClient) RevokeJob(ctx context.Context, in *RevokeJobRequest) (*RevokeJobResponse, error) { + out := new(RevokeJobResponse) + err := c.cc.Invoke(ctx, "RevokeJob", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServiceServer is the server API for NodeService service. +type NodeServiceServer interface { + ProposeJob(context.Context, *ProposeJobRequest) (*ProposeJobResponse, error) + DeleteJob(context.Context, *DeleteJobRequest) (*DeleteJobResponse, error) + RevokeJob(context.Context, *RevokeJobRequest) (*RevokeJobResponse, error) +} + +func RegisterNodeServiceServer(s wsrpc.ServiceRegistrar, srv NodeServiceServer) { + s.RegisterService(&NodeService_ServiceDesc, srv) +} + +func _NodeService_ProposeJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(ProposeJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(NodeServiceServer).ProposeJob(ctx, in) +} + +func _NodeService_DeleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(DeleteJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(NodeServiceServer).DeleteJob(ctx, in) +} + +func _NodeService_RevokeJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(RevokeJobRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(NodeServiceServer).RevokeJob(ctx, in) +} + +// NodeService_ServiceDesc is the wsrpc.ServiceDesc for NodeService service. +// It's only intended for direct use with wsrpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var NodeService_ServiceDesc = wsrpc.ServiceDesc{ + ServiceName: "cfm.NodeService", + HandlerType: (*NodeServiceServer)(nil), + Methods: []wsrpc.MethodDesc{ + { + MethodName: "ProposeJob", + Handler: _NodeService_ProposeJob_Handler, + }, + { + MethodName: "DeleteJob", + Handler: _NodeService_DeleteJob_Handler, + }, + { + MethodName: "RevokeJob", + Handler: _NodeService_RevokeJob_Handler, + }, + }, +} diff --git a/core/services/feeds/rpc_handlers.go b/core/services/feeds/rpc_handlers.go new file mode 100644 index 00000000..39711bea --- /dev/null +++ b/core/services/feeds/rpc_handlers.go @@ -0,0 +1,79 @@ +package feeds + +import ( + "context" + + "github.com/google/uuid" + + pb "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" +) + +// RPCHandlers define handlers for RPC method calls from the Feeds Manager +type RPCHandlers struct { + svc Service + feedsManagerID int64 +} + +func NewRPCHandlers(svc Service, feedsManagerID int64) *RPCHandlers { + return &RPCHandlers{ + svc: svc, + feedsManagerID: feedsManagerID, + } +} + +// ProposeJob creates a new job proposal record for the feeds manager +func (h *RPCHandlers) ProposeJob(ctx context.Context, req *pb.ProposeJobRequest) (*pb.ProposeJobResponse, error) { + remoteUUID, err := uuid.Parse(req.Id) + if err != nil { + return nil, err + } + + _, err = h.svc.ProposeJob(ctx, &ProposeJobArgs{ + Spec: req.GetSpec(), + FeedsManagerID: h.feedsManagerID, + RemoteUUID: remoteUUID, + Version: int32(req.GetVersion()), + Multiaddrs: req.GetMultiaddrs(), + }) + if err != nil { + return nil, err + } + + return &pb.ProposeJobResponse{}, nil +} + +// DeleteJob deletes a job proposal record. +func (h *RPCHandlers) DeleteJob(ctx context.Context, req *pb.DeleteJobRequest) (*pb.DeleteJobResponse, error) { + remoteUUID, err := uuid.Parse(req.Id) + if err != nil { + return nil, err + } + + _, err = h.svc.DeleteJob(ctx, &DeleteJobArgs{ + FeedsManagerID: h.feedsManagerID, + RemoteUUID: remoteUUID, + }) + if err != nil { + return nil, err + } + + return &pb.DeleteJobResponse{}, nil +} + +// RevokeJob revokes a pending job proposal record. +func (h *RPCHandlers) RevokeJob(ctx context.Context, req *pb.RevokeJobRequest) (*pb.RevokeJobResponse, error) { + remoteUUID, err := uuid.Parse(req.Id) + if err != nil { + return nil, err + } + + _, err = h.svc.RevokeJob(ctx, &RevokeJobArgs{ + FeedsManagerID: h.feedsManagerID, + RemoteUUID: remoteUUID, + }) + if err != nil { + return nil, err + } + + return &pb.RevokeJobResponse{}, nil +} diff --git a/core/services/feeds/rpc_handlers_test.go b/core/services/feeds/rpc_handlers_test.go new file mode 100644 index 00000000..02136294 --- /dev/null +++ b/core/services/feeds/rpc_handlers_test.go @@ -0,0 +1,101 @@ +package feeds_test + +import ( + "fmt" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds/mocks" + pb "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" +) + +type TestRPCHandlers struct { + *feeds.RPCHandlers + + svc *mocks.Service + feedsManagerID int64 +} + +func setupTestHandlers(t *testing.T) *TestRPCHandlers { + var ( + svc = mocks.NewService(t) + feedsManagerID = int64(1) + ) + + return &TestRPCHandlers{ + RPCHandlers: feeds.NewRPCHandlers(svc, feedsManagerID), + svc: svc, + feedsManagerID: feedsManagerID, + } +} + +func Test_RPCHandlers_ProposeJob(t *testing.T) { + var ( + ctx = testutils.Context(t) + jobID = uuid.New() + nameAndExternalJobID = uuid.New() + spec = fmt.Sprintf(FluxMonitorTestSpecTemplate, nameAndExternalJobID, nameAndExternalJobID) + version = int64(1) + ) + h := setupTestHandlers(t) + + h.svc. + On("ProposeJob", ctx, &feeds.ProposeJobArgs{ + FeedsManagerID: h.feedsManagerID, + RemoteUUID: jobID, + Spec: spec, + Version: int32(version), + }). + Return(int64(1), nil) + + _, err := h.ProposeJob(ctx, &pb.ProposeJobRequest{ + Id: jobID.String(), + Spec: spec, + Version: version, + }) + require.NoError(t, err) +} + +func Test_RPCHandlers_DeleteJob(t *testing.T) { + var ( + ctx = testutils.Context(t) + jobID = uuid.New() + ) + h := setupTestHandlers(t) + + h.svc. + On("DeleteJob", ctx, &feeds.DeleteJobArgs{ + FeedsManagerID: h.feedsManagerID, + RemoteUUID: jobID, + }). + Return(int64(1), nil) + + _, err := h.DeleteJob(ctx, &pb.DeleteJobRequest{ + Id: jobID.String(), + }) + require.NoError(t, err) +} + +func Test_RPCHandlers_RevokeJob(t *testing.T) { + var ( + ctx = testutils.Context(t) + jobID = uuid.New() + ) + h := setupTestHandlers(t) + + h.svc. + On("RevokeJob", ctx, &feeds.RevokeJobArgs{ + FeedsManagerID: h.feedsManagerID, + RemoteUUID: jobID, + }). + Return(int64(1), nil) + + _, err := h.RevokeJob(ctx, &pb.RevokeJobRequest{ + Id: jobID.String(), + }) + require.NoError(t, err) +} diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go new file mode 100644 index 00000000..34d02339 --- /dev/null +++ b/core/services/feeds/service.go @@ -0,0 +1,1469 @@ +package feeds + +import ( + "context" + "database/sql" + "encoding/hex" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + pb "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + ocr2 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +//go:generate mockery --quiet --name Service --output ./mocks/ --case=underscore +//go:generate mockery --quiet --dir ./proto --name FeedsManagerClient --output ./mocks/ --case=underscore + +var ( + ErrOCR2Disabled = errors.New("ocr2 is disabled") + ErrOCRDisabled = errors.New("ocr is disabled") + ErrSingleFeedsManager = errors.New("only a single feeds manager is supported") + ErrJobAlreadyExists = errors.New("a job for this contract address already exists - please use the 'force' option to replace it") + ErrFeedsManagerDisabled = errors.New("feeds manager is disabled") + + promJobProposalRequest = promauto.NewCounter(prometheus.CounterOpts{ + Name: "feeds_job_proposal_requests", + Help: "Metric to track job proposal requests", + }) + + promJobProposalCounts = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "feeds_job_proposal_count", + Help: "Number of job proposals for the node partitioned by status.", + }, []string{ + // Job Proposal status + "status", + }) +) + +// Service represents a behavior of the feeds service +type Service interface { + Start(ctx context.Context) error + Close() error + + CountManagers() (int64, error) + GetManager(id int64) (*FeedsManager, error) + ListManagers() ([]FeedsManager, error) + ListManagersByIDs(ids []int64) ([]FeedsManager, error) + RegisterManager(ctx context.Context, params RegisterManagerParams) (int64, error) + UpdateManager(ctx context.Context, mgr FeedsManager) error + + CreateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) + DeleteChainConfig(ctx context.Context, id int64) (int64, error) + GetChainConfig(id int64) (*ChainConfig, error) + ListChainConfigsByManagerIDs(mgrIDs []int64) ([]ChainConfig, error) + UpdateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) + + DeleteJob(ctx context.Context, args *DeleteJobArgs) (int64, error) + IsJobManaged(ctx context.Context, jobID int64) (bool, error) + ProposeJob(ctx context.Context, args *ProposeJobArgs) (int64, error) + RevokeJob(ctx context.Context, args *RevokeJobArgs) (int64, error) + SyncNodeInfo(ctx context.Context, id int64) error + + CountJobProposalsByStatus() (*JobProposalCounts, error) + GetJobProposal(id int64) (*JobProposal, error) + ListJobProposals() ([]JobProposal, error) + ListJobProposalsByManagersIDs(ids []int64) ([]JobProposal, error) + + ApproveSpec(ctx context.Context, id int64, force bool) error + CancelSpec(ctx context.Context, id int64) error + GetSpec(id int64) (*JobProposalSpec, error) + ListSpecsByJobProposalIDs(ids []int64) ([]JobProposalSpec, error) + RejectSpec(ctx context.Context, id int64) error + UpdateSpecDefinition(ctx context.Context, id int64, spec string) error + + Unsafe_SetConnectionsManager(ConnectionsManager) +} + +type service struct { + services.StateMachine + + orm ORM + jobORM job.ORM + q pg.Q + csaKeyStore keystore.CSA + p2pKeyStore keystore.P2P + ocr1KeyStore keystore.OCR + ocr2KeyStore keystore.OCR2 + jobSpawner job.Spawner + insecureCfg InsecureConfig + jobCfg JobConfig + ocrCfg OCRConfig + ocr2cfg OCR2Config + connMgr ConnectionsManager + legacyChains legacyevm.LegacyChainContainer + lggr logger.Logger + version string +} + +// NewService constructs a new feeds service +func NewService( + orm ORM, + jobORM job.ORM, + db *sqlx.DB, + jobSpawner job.Spawner, + keyStore keystore.Master, + insecureCfg InsecureConfig, + jobCfg JobConfig, + ocrCfg OCRConfig, + ocr2Cfg OCR2Config, + dbCfg pg.QConfig, + legacyChains legacyevm.LegacyChainContainer, + lggr logger.Logger, + version string, +) *service { + lggr = lggr.Named("Feeds") + svc := &service{ + orm: orm, + jobORM: jobORM, + q: pg.NewQ(db, lggr, dbCfg), + jobSpawner: jobSpawner, + p2pKeyStore: keyStore.P2P(), + csaKeyStore: keyStore.CSA(), + ocr1KeyStore: keyStore.OCR(), + ocr2KeyStore: keyStore.OCR2(), + insecureCfg: insecureCfg, + jobCfg: jobCfg, + ocrCfg: ocrCfg, + ocr2cfg: ocr2Cfg, + connMgr: newConnectionsManager(lggr), + legacyChains: legacyChains, + lggr: lggr, + version: version, + } + + return svc +} + +type RegisterManagerParams struct { + Name string + URI string + PublicKey crypto.PublicKey + ChainConfigs []ChainConfig +} + +// RegisterManager registers a new ManagerService and attempts to establish a +// connection. +// +// Only a single feeds manager is currently supported. +func (s *service) RegisterManager(ctx context.Context, params RegisterManagerParams) (int64, error) { + count, err := s.CountManagers() + if err != nil { + return 0, err + } + if count >= 1 { + return 0, ErrSingleFeedsManager + } + + mgr := FeedsManager{ + Name: params.Name, + URI: params.URI, + PublicKey: params.PublicKey, + } + + var id int64 + q := s.q.WithOpts(pg.WithParentCtx(ctx)) + err = q.Transaction(func(tx pg.Queryer) error { + var txerr error + + id, txerr = s.orm.CreateManager(&mgr, pg.WithQueryer(tx)) + if err != nil { + return txerr + } + + if _, txerr = s.orm.CreateBatchChainConfig(params.ChainConfigs, pg.WithQueryer(tx)); txerr != nil { + return txerr + } + + return nil + }) + + privkey, err := s.getCSAPrivateKey() + if err != nil { + return 0, err + } + + // Establish a connection + mgr.ID = id + s.connectFeedManager(ctx, mgr, privkey) + + return id, nil +} + +// SyncNodeInfo syncs the node's information with FMS +func (s *service) SyncNodeInfo(ctx context.Context, id int64) error { + // Get the FMS RPC client + fmsClient, err := s.connMgr.GetClient(id) + if err != nil { + return errors.Wrap(err, "could not fetch client") + } + + cfgs, err := s.orm.ListChainConfigsByManagerIDs([]int64{id}) + if err != nil { + return errors.Wrap(err, "could not fetch chain configs") + } + + cfgMsgs := make([]*pb.ChainConfig, 0, len(cfgs)) + for _, cfg := range cfgs { + cfgMsg, msgErr := s.newChainConfigMsg(cfg) + if msgErr != nil { + s.lggr.Errorf("SyncNodeInfo: %v", msgErr) + + continue + } + + cfgMsgs = append(cfgMsgs, cfgMsg) + } + + if _, err = fmsClient.UpdateNode(ctx, &pb.UpdateNodeRequest{ + Version: s.version, + ChainConfigs: cfgMsgs, + }); err != nil { + return err + } + + return nil +} + +// UpdateManager updates the feed manager details, takes down the +// connection and reestablishes a new connection with the updated public key. +func (s *service) UpdateManager(ctx context.Context, mgr FeedsManager) error { + q := s.q.WithOpts(pg.WithParentCtx(ctx)) + err := q.Transaction(func(tx pg.Queryer) error { + txerr := s.orm.UpdateManager(mgr, pg.WithQueryer(tx)) + if txerr != nil { + return errors.Wrap(txerr, "could not update manager") + } + + return nil + }) + if err != nil { + return err + } + + if err := s.restartConnection(ctx, mgr); err != nil { + s.lggr.Errorf("could not restart FMS connection: %w", err) + } + + return nil +} + +// ListManagerServices lists all the manager services. +func (s *service) ListManagers() ([]FeedsManager, error) { + managers, err := s.orm.ListManagers() + if err != nil { + return nil, errors.Wrap(err, "failed to get a list of managers") + } + + for i := range managers { + managers[i].IsConnectionActive = s.connMgr.IsConnected(managers[i].ID) + } + + return managers, nil +} + +// GetManager gets a manager service by id. +func (s *service) GetManager(id int64) (*FeedsManager, error) { + manager, err := s.orm.GetManager(id) + if err != nil { + return nil, errors.Wrap(err, "failed to get manager by ID") + } + + manager.IsConnectionActive = s.connMgr.IsConnected(manager.ID) + return manager, nil +} + +// ListManagersByIDs get managers services by ids. +func (s *service) ListManagersByIDs(ids []int64) ([]FeedsManager, error) { + managers, err := s.orm.ListManagersByIDs(ids) + if err != nil { + return nil, errors.Wrap(err, "failed to list managers by IDs") + } + + for _, manager := range managers { + manager.IsConnectionActive = s.connMgr.IsConnected(manager.ID) + } + + return managers, nil +} + +// CountManagers gets the total number of manager services +func (s *service) CountManagers() (int64, error) { + return s.orm.CountManagers() +} + +// CreateChainConfig creates a chain config. +func (s *service) CreateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { + var err error + if cfg.AdminAddress != "" { + _, err = common.NewMixedcaseAddressFromString(cfg.AdminAddress) + if err != nil { + return 0, fmt.Errorf("invalid admin address: %v", cfg.AdminAddress) + } + } + + id, err := s.orm.CreateChainConfig(cfg) + if err != nil { + return 0, errors.Wrap(err, "CreateChainConfig failed") + } + + mgr, err := s.orm.GetManager(cfg.FeedsManagerID) + if err != nil { + return 0, errors.Wrap(err, "CreateChainConfig: failed to fetch manager") + } + + if err := s.SyncNodeInfo(ctx, mgr.ID); err != nil { + s.lggr.Infof("FMS: Unable to sync node info: %w", err) + } + + return id, nil +} + +// DeleteChainConfig deletes the chain config by id. +func (s *service) DeleteChainConfig(ctx context.Context, id int64) (int64, error) { + cfg, err := s.orm.GetChainConfig(id) + if err != nil { + return 0, errors.Wrap(err, "DeleteChainConfig failed: could not get chain config") + } + + _, err = s.orm.DeleteChainConfig(id) + if err != nil { + return 0, errors.Wrap(err, "DeleteChainConfig failed") + } + + mgr, err := s.orm.GetManager(cfg.FeedsManagerID) + if err != nil { + return 0, errors.Wrap(err, "DeleteChainConfig: failed to fetch manager") + } + + if err := s.SyncNodeInfo(ctx, mgr.ID); err != nil { + s.lggr.Infof("FMS: Unable to sync node info: %w", err) + } + + return id, nil +} + +func (s *service) GetChainConfig(id int64) (*ChainConfig, error) { + cfg, err := s.orm.GetChainConfig(id) + if err != nil { + return nil, errors.Wrap(err, "GetChainConfig failed") + } + + return cfg, nil +} + +func (s *service) ListChainConfigsByManagerIDs(mgrIDs []int64) ([]ChainConfig, error) { + cfgs, err := s.orm.ListChainConfigsByManagerIDs(mgrIDs) + + return cfgs, errors.Wrap(err, "ListChainConfigsByManagerIDs failed") +} + +func (s *service) UpdateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { + var err error + if cfg.AdminAddress != "" { + _, err = common.NewMixedcaseAddressFromString(cfg.AdminAddress) + if err != nil { + return 0, fmt.Errorf("invalid admin address: %v", cfg.AdminAddress) + } + } + + id, err := s.orm.UpdateChainConfig(cfg) + if err != nil { + return 0, errors.Wrap(err, "UpdateChainConfig failed") + } + + ccfg, err := s.orm.GetChainConfig(cfg.ID) + if err != nil { + return 0, errors.Wrap(err, "UpdateChainConfig failed: could not get chain config") + } + + if err := s.SyncNodeInfo(ctx, ccfg.FeedsManagerID); err != nil { + s.lggr.Infof("FMS: Unable to sync node info: %w", err) + } + + return id, nil +} + +// Lists all JobProposals +// +// When we support multiple feed managers, we will need to change this to filter +// by feeds manager +func (s *service) ListJobProposals() ([]JobProposal, error) { + return s.orm.ListJobProposals() +} + +// ListJobProposalsByManagersIDs gets job proposals by feeds managers IDs +func (s *service) ListJobProposalsByManagersIDs(ids []int64) ([]JobProposal, error) { + return s.orm.ListJobProposalsByManagersIDs(ids) +} + +// DeleteJobArgs are the arguments to provide to the DeleteJob method. +type DeleteJobArgs struct { + FeedsManagerID int64 + RemoteUUID uuid.UUID +} + +// DeleteJob deletes a job proposal if it exist. The feeds manager id check +// ensures that only the intended feed manager can make this request. +func (s *service) DeleteJob(ctx context.Context, args *DeleteJobArgs) (int64, error) { + proposal, err := s.orm.GetJobProposalByRemoteUUID(args.RemoteUUID) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return 0, errors.Wrap(err, "GetJobProposalByRemoteUUID failed to check existence of job proposal") + } + + return 0, errors.Wrap(err, "GetJobProposalByRemoteUUID did not find any proposals to delete") + } + + logger := s.lggr.With( + "job_proposal_id", proposal.ID, + ) + + // Ensure that if the job proposal exists, that it belongs to the feeds + // manager which previously proposed a job using the remote UUID. + if args.FeedsManagerID != proposal.FeedsManagerID { + return 0, errors.New("cannot delete a job proposal belonging to another feeds manager") + } + + pctx := pg.WithParentCtx(ctx) + if err = s.orm.DeleteProposal(proposal.ID, pctx); err != nil { + s.lggr.Errorw("Failed to delete the proposal", "err", err) + + return 0, errors.Wrap(err, "DeleteProposal failed") + } + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for job proposal deletion", err) + } + + return proposal.ID, nil +} + +// RevokeJobArgs are the arguments to provide the RevokeJob method +type RevokeJobArgs struct { + FeedsManagerID int64 + RemoteUUID uuid.UUID +} + +// RevokeJob revokes a pending job proposal if it exist. The feeds manager +// id check ensures that only the intended feed manager can make this request. +func (s *service) RevokeJob(ctx context.Context, args *RevokeJobArgs) (int64, error) { + proposal, err := s.orm.GetJobProposalByRemoteUUID(args.RemoteUUID) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return 0, errors.Wrap(err, "GetJobProposalByRemoteUUID failed to check existence of job proposal") + } + + return 0, errors.Wrap(err, "GetJobProposalByRemoteUUID did not find any proposals to revoke") + } + + // Ensure that if the job proposal exists, that it belongs to the feeds + // manager which previously proposed a job using the remote UUID. + if args.FeedsManagerID != proposal.FeedsManagerID { + return 0, errors.New("cannot revoke a job proposal belonging to another feeds manager") + } + + // get the latest spec for the proposal + latest, err := s.orm.GetLatestSpec(proposal.ID) + if err != nil { + return 0, errors.Wrap(err, "GetLatestSpec failed to get latest spec") + } + + if canRevoke := s.isRevokable(proposal.Status, latest.Status); !canRevoke { + return 0, errors.New("only pending job specs can be revoked") + } + + pctx := pg.WithParentCtx(ctx) + if err = s.orm.RevokeSpec(latest.ID, pctx); err != nil { + s.lggr.Errorw("Failed to revoke the proposal", "err", err) + + return 0, errors.Wrap(err, "RevokeSpec failed") + } + + logger := s.lggr.With( + "job_proposal_id", proposal.ID, + "job_proposal_spec_id", latest.ID, + ) + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for revoke job", err) + } + + return proposal.ID, nil +} + +// ProposeJobArgs are the arguments to provide to the ProposeJob method. +type ProposeJobArgs struct { + FeedsManagerID int64 + RemoteUUID uuid.UUID + Multiaddrs pq.StringArray + Version int32 + Spec string +} + +// ProposeJob creates a job proposal if it does not exist. If it already exists +// and a new version is provided, a new spec is created. +// +// The feeds manager id check exists for support of multiple feeds managers in +// the future so that in the (very slim) off chance that the same uuid is +// generated by another feeds manager or they maliciously send an existing uuid +// belonging to another feeds manager, we do not update it. +func (s *service) ProposeJob(ctx context.Context, args *ProposeJobArgs) (int64, error) { + // Validate the args + if err := s.validateProposeJobArgs(*args); err != nil { + return 0, err + } + + existing, err := s.orm.GetJobProposalByRemoteUUID(args.RemoteUUID) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return 0, errors.Wrap(err, "failed to check existence of job proposal") + } + } + + // Validation for existing job proposals + if err == nil { + // Ensure that if the job proposal exists, that it belongs to the feeds + // manager which previously proposed a job using the remote UUID. + if args.FeedsManagerID != existing.FeedsManagerID { + return 0, errors.New("cannot update a job proposal belonging to another feeds manager") + } + + // Check the version being proposed has not been previously proposed. + var exists bool + exists, err = s.orm.ExistsSpecByJobProposalIDAndVersion(existing.ID, args.Version) + if err != nil { + return 0, errors.Wrap(err, "failed to check existence of spec") + } + + if exists { + return 0, errors.New("proposed job spec version already exists") + } + } + + logger := s.lggr.With( + "job_proposal_remote_uuid", args.RemoteUUID, + ) + + var id int64 + q := s.q.WithOpts(pg.WithParentCtx(ctx)) + err = q.Transaction(func(tx pg.Queryer) error { + var txerr error + + // Parse the Job Spec TOML to extract the name + name := extractName(args.Spec) + + // Upsert job proposal + id, txerr = s.orm.UpsertJobProposal(&JobProposal{ + Name: name, + RemoteUUID: args.RemoteUUID, + Status: JobProposalStatusPending, + FeedsManagerID: args.FeedsManagerID, + Multiaddrs: args.Multiaddrs, + }, pg.WithQueryer(tx)) + if txerr != nil { + return errors.Wrap(txerr, "failed to upsert job proposal") + } + + // Create the spec version + _, txerr = s.orm.CreateSpec(JobProposalSpec{ + Definition: args.Spec, + Status: SpecStatusPending, + Version: args.Version, + JobProposalID: id, + }, pg.WithQueryer(tx)) + if txerr != nil { + return errors.Wrap(txerr, "failed to create spec") + } + + return nil + }) + if err != nil { + return 0, err + } + + // Track the given job proposal request + promJobProposalRequest.Inc() + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for propose job", err) + } + + return id, nil +} + +// GetJobProposal gets a job proposal by id. +func (s *service) GetJobProposal(id int64) (*JobProposal, error) { + return s.orm.GetJobProposal(id) +} + +// CountJobProposalsByStatus returns the count of job proposals with a given status. +func (s *service) CountJobProposalsByStatus() (*JobProposalCounts, error) { + return s.orm.CountJobProposalsByStatus() +} + +// RejectSpec rejects a spec. +func (s *service) RejectSpec(ctx context.Context, id int64) error { + pctx := pg.WithParentCtx(ctx) + + spec, err := s.orm.GetSpec(id, pctx) + if err != nil { + return errors.Wrap(err, "orm: job proposal spec") + } + + // Validate + if spec.Status != SpecStatusPending { + return errors.New("must be a pending job proposal spec") + } + + proposal, err := s.orm.GetJobProposal(spec.JobProposalID, pctx) + if err != nil { + return errors.Wrap(err, "orm: job proposal") + } + + fmsClient, err := s.connMgr.GetClient(proposal.FeedsManagerID) + if err != nil { + return errors.Wrap(err, "fms rpc client is not connected") + } + + logger := s.lggr.With( + "job_proposal_id", proposal.ID, + "job_proposal_spec_id", id, + ) + + q := s.q.WithOpts(pctx) + err = q.Transaction(func(tx pg.Queryer) error { + if err = s.orm.RejectSpec(id, pg.WithQueryer(tx)); err != nil { + return err + } + + if _, err = fmsClient.RejectedJob(ctx, &pb.RejectedJobRequest{ + Uuid: proposal.RemoteUUID.String(), + Version: int64(spec.Version), + }); err != nil { + return err + } + + return nil + }) + if err != nil { + return errors.Wrap(err, "could not reject job proposal") + } + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for job rejection", err) + } + + return nil +} + +// IsJobManaged determines is a job is managed by the Feeds Manager. +func (s *service) IsJobManaged(ctx context.Context, jobID int64) (bool, error) { + return s.orm.IsJobManaged(jobID, pg.WithParentCtx(ctx)) +} + +// ApproveSpec approves a spec for a job proposal and creates a job with the +// spec. +func (s *service) ApproveSpec(ctx context.Context, id int64, force bool) error { + pctx := pg.WithParentCtx(ctx) + + spec, err := s.orm.GetSpec(id, pctx) + if err != nil { + return errors.Wrap(err, "orm: job proposal spec") + } + + proposal, err := s.orm.GetJobProposal(spec.JobProposalID, pctx) + if err != nil { + return errors.Wrap(err, "orm: job proposal") + } + + if err = s.isApprovable(proposal.Status, proposal.ID, spec.Status, spec.ID); err != nil { + return err + } + + logger := s.lggr.With( + "job_proposal_id", proposal.ID, + "job_proposal_spec_id", id, + ) + + fmsClient, err := s.connMgr.GetClient(proposal.FeedsManagerID) + if err != nil { + logger.Errorw("Failed to get FMS Client", "err", err) + + return errors.Wrap(err, "fms rpc client") + } + + j, err := s.generateJob(spec.Definition) + if err != nil { + return errors.Wrap(err, "could not generate job from spec") + } + + // All job specs should have external_job_ids + if j.ExternalJobID == uuid.Nil { + return errors.New("failed to approve job spec due to missing ExternalJobID in spec") + } + + // Check that the bridges exist + if err = s.jobORM.AssertBridgesExist(j.Pipeline); err != nil { + logger.Errorw("Failed to approve job spec due to bridge check", "err", err.Error()) + + return errors.Wrap(err, "failed to approve job spec due to bridge check") + } + + q := s.q.WithOpts(pctx) + err = q.Transaction(func(tx pg.Queryer) error { + var ( + txerr error + existingJobID int32 + + pgOpts = pg.WithQueryer(tx) + ) + + // Use the external job id to check if a job already exists + foundJob, txerr := s.jobORM.FindJobByExternalJobID(j.ExternalJobID, pgOpts) + if txerr != nil { + // Return an error if the repository errors. If there is a not found + // error we want to continue with approving the job. + if !errors.Is(txerr, sql.ErrNoRows) { + return errors.Wrap(txerr, "FindJobByExternalJobID failed") + } + } + + if txerr == nil { + existingJobID = foundJob.ID + } + + // If no job was found by external job id, check if a job exists by address + if existingJobID == 0 { + switch j.Type { + case job.OffchainReporting, job.FluxMonitor: + existingJobID, txerr = s.findExistingJobForOCRFlux(j, pgOpts) + if txerr != nil { + // Return an error if the repository errors. If there is a not found + // error we want to continue with approving the job. + if !errors.Is(txerr, sql.ErrNoRows) { + return errors.Wrap(txerr, "FindJobIDByAddress failed") + } + } + case job.OffchainReporting2, job.Bootstrap: + existingJobID, txerr = s.findExistingJobForOCR2(j, pgOpts) + if txerr != nil { + // Return an error if the repository errors. If there is a not found + // error we want to continue with approving the job. + if !errors.Is(txerr, sql.ErrNoRows) { + return errors.Wrap(txerr, "FindOCR2JobIDByAddress failed") + } + } + default: + return errors.Errorf("unsupported job type when approving job proposal specs: %s", j.Type) + } + } + + // Remove the existing job since a job was found + if existingJobID != 0 { + // Do not proceed to remove the running job unless the force flag is true + if !force { + return ErrJobAlreadyExists + } + + // Check if the job is managed by FMS + approvedSpec, serr := s.orm.GetApprovedSpec(proposal.ID, pgOpts) + if serr != nil { + if !errors.Is(serr, sql.ErrNoRows) { + logger.Errorw("Failed to get approved spec", "err", serr) + + // Return an error for any other errors fetching the + // approved spec + return errors.Wrap(serr, "GetApprovedSpec failed") + } + } + + // If a spec is found, cancel the existing job spec + if serr == nil { + if cerr := s.orm.CancelSpec(approvedSpec.ID, pgOpts); cerr != nil { + logger.Errorw("Failed to delete the cancel the spec", "err", cerr) + + return cerr + } + } + + // Delete the job + if serr = s.jobSpawner.DeleteJob(existingJobID, pgOpts); serr != nil { + logger.Errorw("Failed to delete the job", "err", serr) + + return errors.Wrap(serr, "DeleteJob failed") + } + } + + // Create the job + if txerr = s.jobSpawner.CreateJob(j, pgOpts); txerr != nil { + logger.Errorw("Failed to create job", "err", txerr) + + return txerr + } + + // Approve the job proposal spec + if txerr = s.orm.ApproveSpec(id, j.ExternalJobID, pgOpts); txerr != nil { + logger.Errorw("Failed to approve spec", "err", txerr) + + return txerr + } + + // Send to FMS Client + if _, txerr = fmsClient.ApprovedJob(ctx, &pb.ApprovedJobRequest{ + Uuid: proposal.RemoteUUID.String(), + Version: int64(spec.Version), + }); txerr != nil { + logger.Errorw("Failed to approve job to FMS", "err", txerr) + + return txerr + } + + return nil + }) + if err != nil { + return errors.Wrap(err, "could not approve job proposal") + } + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for job approval", err) + } + + return nil +} + +// CancelSpec cancels a spec for a job proposal. +func (s *service) CancelSpec(ctx context.Context, id int64) error { + pctx := pg.WithParentCtx(ctx) + + spec, err := s.orm.GetSpec(id, pctx) + if err != nil { + return errors.Wrap(err, "orm: job proposal spec") + } + + if spec.Status != SpecStatusApproved { + return errors.New("must be an approved job proposal spec") + } + + jp, err := s.orm.GetJobProposal(spec.JobProposalID, pg.WithParentCtx(ctx)) + if err != nil { + return errors.Wrap(err, "orm: job proposal") + } + + fmsClient, err := s.connMgr.GetClient(jp.FeedsManagerID) + if err != nil { + return errors.Wrap(err, "fms rpc client") + } + + logger := s.lggr.With( + "job_proposal_id", jp.ID, + "job_proposal_spec_id", id, + ) + + q := s.q.WithOpts(pctx) + err = q.Transaction(func(tx pg.Queryer) error { + var ( + txerr error + pgOpts = pg.WithQueryer(tx) + ) + + if txerr = s.orm.CancelSpec(id, pgOpts); txerr != nil { + return txerr + } + + // Delete the job + if jp.ExternalJobID.Valid { + j, txerr := s.jobORM.FindJobByExternalJobID(jp.ExternalJobID.UUID, pgOpts) + if txerr != nil { + // Return an error if the repository errors. If there is a not found error we want + // to continue with cancelling the spec but we won't have to cancel any jobs. + if !errors.Is(txerr, sql.ErrNoRows) { + return errors.Wrap(txerr, "FindJobByExternalJobID failed") + } + } + + if txerr == nil { + if serr := s.jobSpawner.DeleteJob(j.ID, pgOpts); serr != nil { + return errors.Wrap(serr, "DeleteJob failed") + } + } + } + + // Send to FMS Client + if _, err = fmsClient.CancelledJob(ctx, &pb.CancelledJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }); err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + + if err = s.observeJobProposalCounts(); err != nil { + logger.Errorw("Failed to push metrics for job cancellation", err) + } + + return nil +} + +// ListSpecsByJobProposalIDs gets the specs which belong to the job proposal ids. +func (s *service) ListSpecsByJobProposalIDs(ids []int64) ([]JobProposalSpec, error) { + return s.orm.ListSpecsByJobProposalIDs(ids) +} + +// GetSpec gets the spec details by id. +func (s *service) GetSpec(id int64) (*JobProposalSpec, error) { + return s.orm.GetSpec(id) +} + +// UpdateSpecDefinition updates the spec's TOML definition. +func (s *service) UpdateSpecDefinition(ctx context.Context, id int64, defn string) error { + pctx := pg.WithParentCtx(ctx) + + spec, err := s.orm.GetSpec(id, pctx) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return errors.Wrap(err, "job proposal spec does not exist") + } + + return errors.Wrap(err, "database error") + } + + if !spec.CanEditDefinition() { + return errors.New("must be a pending or cancelled spec") + } + + // Update the spec definition + if err = s.orm.UpdateSpecDefinition(id, defn, pctx); err != nil { + return errors.Wrap(err, "could not update job proposal") + } + + return nil +} + +// Start starts the service. +func (s *service) Start(ctx context.Context) error { + return s.StartOnce("FeedsService", func() error { + privkey, err := s.getCSAPrivateKey() + if err != nil { + return err + } + + // We only support a single feeds manager right now + mgrs, err := s.ListManagers() + if err != nil { + return err + } + if len(mgrs) < 1 { + s.lggr.Info("no feeds managers registered") + + return nil + } + + mgr := mgrs[0] + s.connectFeedManager(ctx, mgr, privkey) + + if err = s.observeJobProposalCounts(); err != nil { + s.lggr.Error("failed to observe job proposal count when starting service", err) + } + + return nil + }) +} + +// Close shuts down the service +func (s *service) Close() error { + return s.StopOnce("FeedsService", func() error { + // This blocks until it finishes + s.connMgr.Close() + + return nil + }) +} + +// connectFeedManager connects to a feeds manager +func (s *service) connectFeedManager(ctx context.Context, mgr FeedsManager, privkey []byte) { + s.connMgr.Connect(ConnectOpts{ + FeedsManagerID: mgr.ID, + URI: mgr.URI, + Privkey: privkey, + Pubkey: mgr.PublicKey, + Handlers: &RPCHandlers{ + feedsManagerID: mgr.ID, + svc: s, + }, + OnConnect: func(pb.FeedsManagerClient) { + // Sync the node's information with FMS once connected + err := s.SyncNodeInfo(ctx, mgr.ID) + if err != nil { + s.lggr.Infof("Error syncing node info: %v", err) + } + }, + }) +} + +// getCSAPrivateKey gets the server's CSA private key +func (s *service) getCSAPrivateKey() (privkey []byte, err error) { + // Fetch the server's public key + keys, err := s.csaKeyStore.GetAll() + if err != nil { + return privkey, err + } + if len(keys) < 1 { + return privkey, errors.New("CSA key does not exist") + } + return keys[0].Raw(), nil +} + +// observeJobProposalCounts is a helper method that queries the repository for the count of +// job proposals by status and then updates prometheus gauges. +func (s *service) observeJobProposalCounts() error { + counts, err := s.CountJobProposalsByStatus() + if err != nil { + return errors.Wrap(err, "failed to fetch counts of job proposals") + } + + // Transform counts into prometheus metrics. + metrics := counts.toMetrics() + + // Set the prometheus gauge metrics. + for _, status := range []JobProposalStatus{JobProposalStatusPending, JobProposalStatusApproved, + JobProposalStatusCancelled, JobProposalStatusRejected, JobProposalStatusDeleted, JobProposalStatusRevoked} { + + status := status + + promJobProposalCounts.With(prometheus.Labels{"status": string(status)}).Set(metrics[status]) + } + + return nil +} + +// Unsafe_SetConnectionsManager sets the ConnectionsManager on the service. +// +// We need to be able to inject a mock for the client to facilitate integration +// tests. +// +// ONLY TO BE USED FOR TESTING. +func (s *service) Unsafe_SetConnectionsManager(connMgr ConnectionsManager) { + s.connMgr = connMgr +} + +// findExistingJobForOCR2 looks for existing job for OCR2 +func (s *service) findExistingJobForOCR2(j *job.Job, qopts pg.QOpt) (int32, error) { + var contractID string + var feedID *common.Hash + + switch j.Type { + case job.OffchainReporting2: + contractID = j.OCR2OracleSpec.ContractID + feedID = j.OCR2OracleSpec.FeedID + case job.Bootstrap: + contractID = j.BootstrapSpec.ContractID + if j.BootstrapSpec.FeedID != nil { + feedID = j.BootstrapSpec.FeedID + } + case job.FluxMonitor, job.OffchainReporting: + return 0, errors.Errorf("contradID and feedID not applicable for job type: %s", j.Type) + default: + return 0, errors.Errorf("unsupported job type: %s", j.Type) + } + + return s.jobORM.FindOCR2JobIDByAddress(contractID, feedID, qopts) +} + +// findExistingJobForOCRFlux looks for existing job for OCR or flux +func (s *service) findExistingJobForOCRFlux(j *job.Job, qopts pg.QOpt) (int32, error) { + var address ethkey.EIP55Address + var evmChainID *big.Big + + switch j.Type { + case job.OffchainReporting: + address = j.OCROracleSpec.ContractAddress + evmChainID = j.OCROracleSpec.EVMChainID + case job.FluxMonitor: + address = j.FluxMonitorSpec.ContractAddress + evmChainID = j.FluxMonitorSpec.EVMChainID + case job.OffchainReporting2, job.Bootstrap: + return 0, errors.Errorf("epi55address and evmchainID not applicable for job type: %s", j.Type) + default: + return 0, errors.Errorf("unsupported job type: %s", j.Type) + } + + return s.jobORM.FindJobIDByAddress(address, evmChainID, qopts) +} + +// generateJob validates and generates a job from a spec. +func (s *service) generateJob(spec string) (*job.Job, error) { + jobType, err := job.ValidateSpec(spec) + if err != nil { + return nil, errors.Wrap(err, "failed to parse job spec TOML") + } + + var js job.Job + switch jobType { + case job.OffchainReporting: + if !s.ocrCfg.Enabled() { + return nil, ErrOCRDisabled + } + js, err = ocr.ValidatedOracleSpecToml(s.legacyChains, spec) + case job.OffchainReporting2: + if !s.ocr2cfg.Enabled() { + return nil, ErrOCR2Disabled + } + js, err = ocr2.ValidatedOracleSpecToml(s.ocr2cfg, s.insecureCfg, spec) + case job.Bootstrap: + if !s.ocr2cfg.Enabled() { + return nil, ErrOCR2Disabled + } + js, err = ocrbootstrap.ValidatedBootstrapSpecToml(spec) + case job.FluxMonitor: + js, err = fluxmonitorv2.ValidatedFluxMonitorSpec(s.jobCfg, spec) + default: + return nil, errors.Errorf("unknown job type: %s", jobType) + + } + if err != nil { + return nil, err + } + + return &js, nil +} + +// newChainConfigMsg generates a chain config protobuf message. +func (s *service) newChainConfigMsg(cfg ChainConfig) (*pb.ChainConfig, error) { + // Only supports EVM Chains + if cfg.ChainType != "EVM" { + return nil, errors.New("unsupported chain type") + } + + ocr1Cfg, err := s.newOCR1ConfigMsg(cfg.OCR1Config) + if err != nil { + return nil, err + } + + ocr2Cfg, err := s.newOCR2ConfigMsg(cfg.OCR2Config) + if err != nil { + return nil, err + } + + return &pb.ChainConfig{ + Chain: &pb.Chain{ + Id: cfg.ChainID, + Type: pb.ChainType_CHAIN_TYPE_EVM, + }, + AccountAddress: cfg.AccountAddress, + AdminAddress: cfg.AdminAddress, + FluxMonitorConfig: s.newFluxMonitorConfigMsg(cfg.FluxMonitorConfig), + Ocr1Config: ocr1Cfg, + Ocr2Config: ocr2Cfg, + }, nil +} + +// newFMConfigMsg generates a FMConfig protobuf message. Flux Monitor does not +// have any configuration but this is here for consistency. +func (*service) newFluxMonitorConfigMsg(cfg FluxMonitorConfig) *pb.FluxMonitorConfig { + return &pb.FluxMonitorConfig{Enabled: cfg.Enabled} +} + +// newOCR1ConfigMsg generates a OCR1Config protobuf message. +func (s *service) newOCR1ConfigMsg(cfg OCR1Config) (*pb.OCR1Config, error) { + if !cfg.Enabled { + return &pb.OCR1Config{Enabled: false}, nil + } + + msg := &pb.OCR1Config{ + Enabled: true, + IsBootstrap: cfg.IsBootstrap, + Multiaddr: cfg.Multiaddr.ValueOrZero(), + } + + // Fetch the P2P key bundle + if cfg.P2PPeerID.Valid { + peerID, err := p2pkey.MakePeerID(cfg.P2PPeerID.String) + if err != nil { + return nil, errors.Wrapf(err, "invalid peer id: %s", cfg.P2PPeerID.String) + } + p2pKey, err := s.p2pKeyStore.Get(peerID) + if err != nil { + return nil, errors.Wrapf(err, "p2p key not found: %s", cfg.P2PPeerID.String) + } + + msg.P2PKeyBundle = &pb.OCR1Config_P2PKeyBundle{ + PeerId: p2pKey.PeerID().String(), + PublicKey: p2pKey.PublicKeyHex(), + } + } + + if cfg.KeyBundleID.Valid { + ocrKey, err := s.ocr1KeyStore.Get(cfg.KeyBundleID.String) + if err != nil { + return nil, errors.Wrapf(err, "ocr key not found: %s", cfg.KeyBundleID.String) + } + + msg.OcrKeyBundle = &pb.OCR1Config_OCRKeyBundle{ + BundleId: ocrKey.GetID(), + ConfigPublicKey: ocrkey.ConfigPublicKey(ocrKey.PublicKeyConfig()).String(), + OffchainPublicKey: ocrKey.OffChainSigning.PublicKey().String(), + OnchainSigningAddress: ocrKey.OnChainSigning.Address().String(), + } + } + + return msg, nil +} + +// newOCR2ConfigMsg generates a OCR2ConfigModel protobuf message. +func (s *service) newOCR2ConfigMsg(cfg OCR2ConfigModel) (*pb.OCR2Config, error) { + if !cfg.Enabled { + return &pb.OCR2Config{Enabled: false}, nil + } + + msg := &pb.OCR2Config{ + Enabled: true, + IsBootstrap: cfg.IsBootstrap, + Multiaddr: cfg.Multiaddr.ValueOrZero(), + ForwarderAddress: cfg.ForwarderAddress.Ptr(), + Plugins: &pb.OCR2Config_Plugins{ + Commit: cfg.Plugins.Commit, + Execute: cfg.Plugins.Execute, + Median: cfg.Plugins.Median, + Mercury: cfg.Plugins.Mercury, + }, + } + + // Fetch the P2P key bundle + if cfg.P2PPeerID.Valid { + peerID, err := p2pkey.MakePeerID(cfg.P2PPeerID.String) + if err != nil { + return nil, errors.Wrapf(err, "invalid peer id: %s", cfg.P2PPeerID.String) + } + p2pKey, err := s.p2pKeyStore.Get(peerID) + if err != nil { + return nil, errors.Wrapf(err, "p2p key not found: %s", cfg.P2PPeerID.String) + } + + msg.P2PKeyBundle = &pb.OCR2Config_P2PKeyBundle{ + PeerId: p2pKey.PeerID().String(), + PublicKey: p2pKey.PublicKeyHex(), + } + } + + // Fetch the OCR Key Bundle + if cfg.KeyBundleID.Valid { + ocrKey, err := s.ocr2KeyStore.Get(cfg.KeyBundleID.String) + if err != nil { + return nil, errors.Wrapf(err, "ocr key not found: %s", cfg.KeyBundleID.String) + } + + ocrConfigPublicKey := ocrKey.ConfigEncryptionPublicKey() + ocrOffChainPublicKey := ocrKey.OffchainPublicKey() + + msg.OcrKeyBundle = &pb.OCR2Config_OCRKeyBundle{ + BundleId: ocrKey.ID(), + ConfigPublicKey: hex.EncodeToString(ocrConfigPublicKey[:]), + OffchainPublicKey: hex.EncodeToString(ocrOffChainPublicKey[:]), + OnchainSigningAddress: ocrKey.OnChainPublicKey(), + } + } + + return msg, nil +} + +func (s *service) validateProposeJobArgs(args ProposeJobArgs) error { + // Validate the job spec + j, err := s.generateJob(args.Spec) + if err != nil { + return errors.Wrap(err, "failed to generate a job based on spec") + } + + // Validate bootstrap multiaddrs which are only allowed for OCR jobs + if len(args.Multiaddrs) > 0 && j.Type != job.OffchainReporting && j.Type != job.OffchainReporting2 { + return errors.New("only OCR job type supports multiaddr") + } + + return nil +} + +func (s *service) restartConnection(ctx context.Context, mgr FeedsManager) error { + s.lggr.Infof("Restarting connection") + + if err := s.connMgr.Disconnect(mgr.ID); err != nil { + s.lggr.Info("Feeds Manager not connected, attempting to connect") + } + + // Establish a new connection + privkey, err := s.getCSAPrivateKey() + if err != nil { + return err + } + + s.connectFeedManager(ctx, mgr, privkey) + + return nil +} + +// extractName extracts the name from the TOML returning an null string if +// there is an error. +func extractName(defn string) null.String { + spec := struct { + Name null.String + }{} + + if err := toml.Unmarshal([]byte(defn), &spec); err != nil { + return null.StringFromPtr(nil) + } + + return spec.Name +} + +// isApprovable returns nil if a spec can be approved based on the current +// proposal and spec status, and if it can't be approved, the reason as an +// error. +func (s *service) isApprovable(propStatus JobProposalStatus, proposalID int64, specStatus SpecStatus, specID int64) error { + if propStatus == JobProposalStatusDeleted { + return errors.New("cannot approve spec for a deleted job proposal") + } + + if propStatus == JobProposalStatusRevoked { + return errors.New("cannot approve spec for a revoked job proposal") + } + + switch specStatus { + case SpecStatusApproved: + return errors.New("cannot approve an approved spec") + case SpecStatusRejected: + return errors.New("cannot approve a rejected spec") + case SpecStatusRevoked: + return errors.New("cannot approve a revoked spec") + case SpecStatusCancelled: + // Allowed to approve a cancelled job if it is the latest job + latest, serr := s.orm.GetLatestSpec(proposalID) + if serr != nil { + return errors.Wrap(serr, "failed to get latest spec") + } + + if latest.ID != specID { + return errors.New("cannot approve a cancelled spec") + } + + return nil + case SpecStatusPending: + return nil + default: + return errors.New("invalid job spec status") + } +} + +func (s *service) isRevokable(propStatus JobProposalStatus, specStatus SpecStatus) bool { + return propStatus != JobProposalStatusDeleted && (specStatus == SpecStatusPending || specStatus == SpecStatusCancelled) +} + +var _ Service = &NullService{} + +// NullService defines an implementation of the Feeds Service that is used +// when the Feeds Service is disabled. +type NullService struct{} + +//revive:disable +func (ns NullService) Start(ctx context.Context) error { return nil } +func (ns NullService) Close() error { return nil } +func (ns NullService) ApproveSpec(ctx context.Context, id int64, force bool) error { + return ErrFeedsManagerDisabled +} +func (ns NullService) CountManagers() (int64, error) { return 0, nil } +func (ns NullService) CountJobProposalsByStatus() (*JobProposalCounts, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) CancelSpec(ctx context.Context, id int64) error { + return ErrFeedsManagerDisabled +} +func (ns NullService) GetJobProposal(id int64) (*JobProposal, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) ListSpecsByJobProposalIDs(ids []int64) ([]JobProposalSpec, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) GetManager(id int64) (*FeedsManager, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) ListManagersByIDs(ids []int64) ([]FeedsManager, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) GetSpec(id int64) (*JobProposalSpec, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) ListManagers() ([]FeedsManager, error) { return nil, nil } +func (ns NullService) CreateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) GetChainConfig(id int64) (*ChainConfig, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) DeleteChainConfig(ctx context.Context, id int64) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) ListChainConfigsByManagerIDs(mgrIDs []int64) ([]ChainConfig, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) UpdateChainConfig(ctx context.Context, cfg ChainConfig) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) ListJobProposals() ([]JobProposal, error) { return nil, nil } +func (ns NullService) ListJobProposalsByManagersIDs(ids []int64) ([]JobProposal, error) { + return nil, ErrFeedsManagerDisabled +} +func (ns NullService) ProposeJob(ctx context.Context, args *ProposeJobArgs) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) DeleteJob(ctx context.Context, args *DeleteJobArgs) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) RevokeJob(ctx context.Context, args *RevokeJobArgs) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) RegisterManager(ctx context.Context, params RegisterManagerParams) (int64, error) { + return 0, ErrFeedsManagerDisabled +} +func (ns NullService) RejectSpec(ctx context.Context, id int64) error { + return ErrFeedsManagerDisabled +} +func (ns NullService) SyncNodeInfo(ctx context.Context, id int64) error { return nil } +func (ns NullService) UpdateManager(ctx context.Context, mgr FeedsManager) error { + return ErrFeedsManagerDisabled +} +func (ns NullService) IsJobManaged(ctx context.Context, jobID int64) (bool, error) { + return false, nil +} +func (ns NullService) UpdateSpecDefinition(ctx context.Context, id int64, spec string) error { + return ErrFeedsManagerDisabled +} +func (ns NullService) Unsafe_SetConnectionsManager(_ ConnectionsManager) {} + +//revive:enable diff --git a/core/services/feeds/service_test.go b/core/services/feeds/service_test.go new file mode 100644 index 00000000..2415b139 --- /dev/null +++ b/core/services/feeds/service_test.go @@ -0,0 +1,3471 @@ +package feeds_test + +import ( + "context" + "database/sql" + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds/proto" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + jobmocks "github.com/goplugin/pluginv3.0/v2/core/services/job/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/versioning" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +const FluxMonitorTestSpecTemplate = ` +type = "fluxmonitor" +schemaVersion = 1 +name = "%s" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +externalJobID = "%s" +threshold = 0.5 +absoluteThreshold = 0.0 # optional + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "1m" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="https://api.coindesk.com/v1/bpi/currentprice.json"]; +jp1 [type=jsonparse path="bpi,USD,rate_float"]; +ds1 -> jp1 -> answer1; +answer1 [type=median index=0]; +""" +` + +const OCR1TestSpecTemplate = ` +type = "offchainreporting" +schemaVersion = 1 +name = "%s" +externalJobID = "%s" +evmChainID = 0 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +keyBundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" +transmitterAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +isBootstrapPeer = false +observationSource = """ + // data source 1 + ds1 [type=memo value=<"{\\"USD\\": 1}">]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=3]; + + ds2 [type=memo value=<"{\\"USD\\": 1}">]; + ds2_parse [type=jsonparse path="USD"]; + ds2_multiply [type=multiply times=3]; + + ds3 [type=fail msg="uh oh"]; + + ds1 -> ds1_parse -> ds1_multiply -> answer; + ds2 -> ds2_parse -> ds2_multiply -> answer; + ds3 -> answer; + + answer [type=median index=0]; +""" +` + +const OCR2TestSpecTemplate = ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +name = "%s" +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "%s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +` +const BootstrapTestSpecTemplate = ` +type = "bootstrap" +schemaVersion = 1 +name = "%s" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +relay = "evm" +[relayConfig] +chainID = 1337 +` + +type TestService struct { + feeds.Service + orm *mocks.ORM + jobORM *jobmocks.ORM + connMgr *mocks.ConnectionsManager + spawner *jobmocks.Spawner + fmsClient *mocks.FeedsManagerClient + csaKeystore *ksmocks.CSA + p2pKeystore *ksmocks.P2P + ocr1Keystore *ksmocks.OCR + ocr2Keystore *ksmocks.OCR2 + legacyChains legacyevm.LegacyChainContainer +} + +func setupTestService(t *testing.T) *TestService { + t.Helper() + + return setupTestServiceCfg(t, nil) +} + +func setupTestServiceCfg(t *testing.T, overrideCfg func(c *plugin.Config, s *plugin.Secrets)) *TestService { + t.Helper() + + var ( + orm = mocks.NewORM(t) + jobORM = jobmocks.NewORM(t) + connMgr = mocks.NewConnectionsManager(t) + spawner = jobmocks.NewSpawner(t) + fmsClient = mocks.NewFeedsManagerClient(t) + csaKeystore = ksmocks.NewCSA(t) + p2pKeystore = ksmocks.NewP2P(t) + ocr1Keystore = ksmocks.NewOCR(t) + ocr2Keystore = ksmocks.NewOCR2(t) + ) + + lggr := logger.TestLogger(t) + + db := pgtest.NewSqlxDB(t) + gcfg := configtest.NewGeneralConfig(t, overrideCfg) + keyStore := new(ksmocks.Master) + scopedConfig := evmtest.NewChainScopedConfig(t, gcfg) + ethKeyStore := cltest.NewKeyStore(t, db, gcfg.Database()).Eth() + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: gcfg, + HeadTracker: headtracker.NullTracker, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + keyStore.On("Eth").Return(ethKeyStore) + keyStore.On("CSA").Return(csaKeystore) + keyStore.On("P2P").Return(p2pKeystore) + keyStore.On("OCR").Return(ocr1Keystore) + keyStore.On("OCR2").Return(ocr2Keystore) + svc := feeds.NewService(orm, jobORM, db, spawner, keyStore, scopedConfig.Insecure(), scopedConfig.JobPipeline(), scopedConfig.OCR(), scopedConfig.OCR2(), scopedConfig.Database(), legacyChains, lggr, "1.0.0") + svc.SetConnectionsManager(connMgr) + + return &TestService{ + Service: svc, + orm: orm, + jobORM: jobORM, + connMgr: connMgr, + spawner: spawner, + fmsClient: fmsClient, + csaKeystore: csaKeystore, + p2pKeystore: p2pKeystore, + ocr1Keystore: ocr1Keystore, + ocr2Keystore: ocr2Keystore, + legacyChains: legacyChains, + } +} + +func Test_Service_RegisterManager(t *testing.T) { + t.Parallel() + + key := cltest.DefaultCSAKey + + var ( + id = int64(1) + pubKeyHex = "0f17c3bf72de8beef6e2d17a14c0a972f5d7e0e66e70722373f12b88382d40f9" + ) + + var pubKey crypto.PublicKey + _, err := hex.Decode([]byte(pubKeyHex), pubKey) + require.NoError(t, err) + + var ( + mgr = feeds.FeedsManager{ + Name: "FMS", + URI: "localhost:8080", + PublicKey: pubKey, + } + params = feeds.RegisterManagerParams{ + Name: "FMS", + URI: "localhost:8080", + PublicKey: pubKey, + } + ) + + svc := setupTestService(t) + + svc.orm.On("CountManagers").Return(int64(0), nil) + svc.orm.On("CreateManager", &mgr, mock.Anything). + Return(id, nil) + svc.orm.On("CreateBatchChainConfig", params.ChainConfigs, mock.Anything). + Return([]int64{}, nil) + svc.csaKeystore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + // ListManagers runs in a goroutine so it might be called. + svc.orm.On("ListManagers", testutils.Context(t)).Return([]feeds.FeedsManager{mgr}, nil).Maybe() + svc.connMgr.On("Connect", mock.IsType(feeds.ConnectOpts{})) + + actual, err := svc.RegisterManager(testutils.Context(t), params) + // We need to stop the service because the manager will attempt to make a + // connection + svc.Close() + require.NoError(t, err) + + assert.Equal(t, actual, id) +} + +func Test_Service_ListManagers(t *testing.T) { + t.Parallel() + + var ( + mgr = feeds.FeedsManager{} + mgrs = []feeds.FeedsManager{mgr} + ) + svc := setupTestService(t) + + svc.orm.On("ListManagers").Return(mgrs, nil) + svc.connMgr.On("IsConnected", mgr.ID).Return(false) + + actual, err := svc.ListManagers() + require.NoError(t, err) + + assert.Equal(t, mgrs, actual) +} + +func Test_Service_GetManager(t *testing.T) { + t.Parallel() + + var ( + id = int64(1) + mgr = feeds.FeedsManager{ID: id} + ) + svc := setupTestService(t) + + svc.orm.On("GetManager", id). + Return(&mgr, nil) + svc.connMgr.On("IsConnected", mgr.ID).Return(false) + + actual, err := svc.GetManager(id) + require.NoError(t, err) + + assert.Equal(t, actual, &mgr) +} + +func Test_Service_UpdateFeedsManager(t *testing.T) { + key := cltest.DefaultCSAKey + + var ( + mgr = feeds.FeedsManager{ID: 1} + ) + + svc := setupTestService(t) + + svc.orm.On("UpdateManager", mgr, mock.Anything).Return(nil) + svc.csaKeystore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + svc.connMgr.On("Disconnect", mgr.ID).Return(nil) + svc.connMgr.On("Connect", mock.IsType(feeds.ConnectOpts{})).Return(nil) + + err := svc.UpdateManager(testutils.Context(t), mgr) + require.NoError(t, err) +} + +func Test_Service_ListManagersByIDs(t *testing.T) { + t.Parallel() + + var ( + mgr = feeds.FeedsManager{} + mgrs = []feeds.FeedsManager{mgr} + ) + svc := setupTestService(t) + + svc.orm.On("ListManagersByIDs", []int64{mgr.ID}). + Return(mgrs, nil) + svc.connMgr.On("IsConnected", mgr.ID).Return(false) + + actual, err := svc.ListManagersByIDs([]int64{mgr.ID}) + require.NoError(t, err) + + assert.Equal(t, mgrs, actual) +} + +func Test_Service_CountManagers(t *testing.T) { + t.Parallel() + + var ( + count = int64(1) + ) + svc := setupTestService(t) + + svc.orm.On("CountManagers"). + Return(count, nil) + + actual, err := svc.CountManagers() + require.NoError(t, err) + + assert.Equal(t, count, actual) +} + +func Test_Service_CreateChainConfig(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + nodeVersion = &versioning.NodeVersion{ + Version: "1.0.0", + } + cfg = feeds.ChainConfig{ + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000000000000000000000000000000000000000", + AdminAddress: "0x0000000000000000000000000000000000000001", + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: true, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: false, + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: false, + }, + } + + svc = setupTestService(t) + ) + + svc.orm.On("CreateChainConfig", cfg).Return(int64(1), nil) + svc.orm.On("GetManager", mgr.ID).Return(&mgr, nil) + svc.connMgr.On("GetClient", mgr.ID).Return(svc.fmsClient, nil) + svc.orm.On("ListChainConfigsByManagerIDs", []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.fmsClient.On("UpdateNode", mock.Anything, &proto.UpdateNodeRequest{ + Version: nodeVersion.Version, + ChainConfigs: []*proto.ChainConfig{ + { + Chain: &proto.Chain{ + Id: cfg.ChainID, + Type: proto.ChainType_CHAIN_TYPE_EVM, + }, + AccountAddress: cfg.AccountAddress, + AdminAddress: cfg.AdminAddress, + FluxMonitorConfig: &proto.FluxMonitorConfig{Enabled: true}, + Ocr1Config: &proto.OCR1Config{Enabled: false}, + Ocr2Config: &proto.OCR2Config{Enabled: false}, + }, + }, + }).Return(&proto.UpdateNodeResponse{}, nil) + + actual, err := svc.CreateChainConfig(testutils.Context(t), cfg) + require.NoError(t, err) + assert.Equal(t, int64(1), actual) +} + +func Test_Service_CreateChainConfig_InvalidAdminAddress(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + cfg = feeds.ChainConfig{ + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000000000000000000000000000000000000000", + AdminAddress: "0x00000000000", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: false}, + OCR1Config: feeds.OCR1Config{Enabled: false}, + OCR2Config: feeds.OCR2ConfigModel{Enabled: false}, + } + + svc = setupTestService(t) + ) + _, err := svc.CreateChainConfig(testutils.Context(t), cfg) + require.Error(t, err) + assert.Equal(t, "invalid admin address: 0x00000000000", err.Error()) +} + +func Test_Service_DeleteChainConfig(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + nodeVersion = &versioning.NodeVersion{ + Version: "1.0.0", + } + cfg = feeds.ChainConfig{ + ID: 1, + FeedsManagerID: mgr.ID, + } + + svc = setupTestService(t) + ) + + svc.orm.On("GetChainConfig", cfg.ID).Return(&cfg, nil) + svc.orm.On("DeleteChainConfig", cfg.ID).Return(cfg.ID, nil) + svc.orm.On("GetManager", mgr.ID).Return(&mgr, nil) + svc.connMgr.On("GetClient", mgr.ID).Return(svc.fmsClient, nil) + svc.orm.On("ListChainConfigsByManagerIDs", []int64{mgr.ID}).Return([]feeds.ChainConfig{}, nil) + svc.fmsClient.On("UpdateNode", mock.Anything, &proto.UpdateNodeRequest{ + Version: nodeVersion.Version, + ChainConfigs: []*proto.ChainConfig{}, + }).Return(&proto.UpdateNodeResponse{}, nil) + + actual, err := svc.DeleteChainConfig(testutils.Context(t), cfg.ID) + require.NoError(t, err) + assert.Equal(t, int64(1), actual) +} + +func Test_Service_ListChainConfigsByManagerIDs(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + cfg = feeds.ChainConfig{ + ID: 1, + FeedsManagerID: mgr.ID, + } + ids = []int64{cfg.ID} + + svc = setupTestService(t) + ) + + svc.orm.On("ListChainConfigsByManagerIDs", ids).Return([]feeds.ChainConfig{cfg}, nil) + + actual, err := svc.ListChainConfigsByManagerIDs(ids) + require.NoError(t, err) + assert.Equal(t, []feeds.ChainConfig{cfg}, actual) +} + +func Test_Service_UpdateChainConfig(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + nodeVersion = &versioning.NodeVersion{ + Version: "1.0.0", + } + cfg = feeds.ChainConfig{ + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000000000000000000000000000000000000000", + AdminAddress: "0x0000000000000000000000000000000000000001", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: false}, + OCR1Config: feeds.OCR1Config{Enabled: false}, + OCR2Config: feeds.OCR2ConfigModel{Enabled: false}, + } + + svc = setupTestService(t) + ) + + svc.orm.On("UpdateChainConfig", cfg).Return(int64(1), nil) + svc.orm.On("GetChainConfig", cfg.ID).Return(&cfg, nil) + svc.connMgr.On("GetClient", mgr.ID).Return(svc.fmsClient, nil) + svc.orm.On("ListChainConfigsByManagerIDs", []int64{mgr.ID}).Return([]feeds.ChainConfig{cfg}, nil) + svc.fmsClient.On("UpdateNode", mock.Anything, &proto.UpdateNodeRequest{ + Version: nodeVersion.Version, + ChainConfigs: []*proto.ChainConfig{ + { + Chain: &proto.Chain{ + Id: cfg.ChainID, + Type: proto.ChainType_CHAIN_TYPE_EVM, + }, + AccountAddress: cfg.AccountAddress, + AdminAddress: cfg.AdminAddress, + FluxMonitorConfig: &proto.FluxMonitorConfig{Enabled: false}, + Ocr1Config: &proto.OCR1Config{Enabled: false}, + Ocr2Config: &proto.OCR2Config{Enabled: false}, + }, + }, + }).Return(&proto.UpdateNodeResponse{}, nil) + + actual, err := svc.UpdateChainConfig(testutils.Context(t), cfg) + require.NoError(t, err) + assert.Equal(t, int64(1), actual) +} + +func Test_Service_UpdateChainConfig_InvalidAdminAddress(t *testing.T) { + var ( + mgr = feeds.FeedsManager{ID: 1} + cfg = feeds.ChainConfig{ + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000000000000000000000000000000000000000", + AdminAddress: "0x00000000000", + FluxMonitorConfig: feeds.FluxMonitorConfig{Enabled: false}, + OCR1Config: feeds.OCR1Config{Enabled: false}, + OCR2Config: feeds.OCR2ConfigModel{Enabled: false}, + } + + svc = setupTestService(t) + ) + _, err := svc.UpdateChainConfig(testutils.Context(t), cfg) + require.Error(t, err) + assert.Equal(t, "invalid admin address: 0x00000000000", err.Error()) +} + +func Test_Service_ProposeJob(t *testing.T) { + t.Parallel() + + var ( + idFluxMonitor = int64(1) + remoteUUIDFluxMonitor = uuid.New() + nameAndExternalJobID = uuid.New() + spec = fmt.Sprintf(FluxMonitorTestSpecTemplate, nameAndExternalJobID, nameAndExternalJobID) + argsFluxMonitor = &feeds.ProposeJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUIDFluxMonitor, + Spec: spec, + Version: 1, + } + jpFluxMonitor = feeds.JobProposal{ + FeedsManagerID: 1, + Name: null.StringFrom(nameAndExternalJobID.String()), + RemoteUUID: remoteUUIDFluxMonitor, + Status: feeds.JobProposalStatusPending, + } + specFluxMonitor = feeds.JobProposalSpec{ + Definition: spec, + Status: feeds.SpecStatusPending, + Version: argsFluxMonitor.Version, + JobProposalID: idFluxMonitor, + } + + idOCR1 = int64(2) + remoteUUIDOCR1 = uuid.New() + ocr1NameAndExternalJobID = uuid.New() + ocr1Spec = fmt.Sprintf(OCR1TestSpecTemplate, ocr1NameAndExternalJobID, ocr1NameAndExternalJobID) + argsOCR1 = &feeds.ProposeJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUIDOCR1, + Spec: ocr1Spec, + Version: 1, + } + jpOCR1 = feeds.JobProposal{ + FeedsManagerID: 1, + Name: null.StringFrom(ocr1NameAndExternalJobID.String()), + RemoteUUID: remoteUUIDOCR1, + Status: feeds.JobProposalStatusPending, + } + specOCR1 = feeds.JobProposalSpec{ + Definition: ocr1Spec, + Status: feeds.SpecStatusPending, + Version: argsOCR1.Version, + JobProposalID: idOCR1, + } + + idOCR2 = int64(3) + remoteUUIDOCR2 = uuid.New() + ocr2NameAndExternalJobID = uuid.New() + ocr2Spec = fmt.Sprintf(OCR2TestSpecTemplate, ocr2NameAndExternalJobID, ocr2NameAndExternalJobID) + argsOCR2 = &feeds.ProposeJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUIDOCR2, + Spec: ocr2Spec, + Version: 1, + } + jpOCR2 = feeds.JobProposal{ + FeedsManagerID: 1, + Name: null.StringFrom(ocr2NameAndExternalJobID.String()), + RemoteUUID: remoteUUIDOCR2, + Status: feeds.JobProposalStatusPending, + } + specOCR2 = feeds.JobProposalSpec{ + Definition: ocr2Spec, + Status: feeds.SpecStatusPending, + Version: argsOCR2.Version, + JobProposalID: idOCR2, + } + + idBootstrap = int64(4) + remoteUUIDBootstrap = uuid.New() + bootstrapName = uuid.New() + bootstrapSpec = fmt.Sprintf(BootstrapTestSpecTemplate, bootstrapName) + argsBootstrap = &feeds.ProposeJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUIDBootstrap, + Spec: bootstrapSpec, + Version: 1, + } + jpBootstrap = feeds.JobProposal{ + FeedsManagerID: 1, + Name: null.StringFrom(bootstrapName.String()), + RemoteUUID: remoteUUIDBootstrap, + Status: feeds.JobProposalStatusPending, + } + specBootstrap = feeds.JobProposalSpec{ + Definition: bootstrapSpec, + Status: feeds.SpecStatusPending, + Version: argsBootstrap.Version, + JobProposalID: idBootstrap, + } + + httpTimeout = *commonconfig.MustNewDuration(1 * time.Second) + ) + + testCases := []struct { + name string + args *feeds.ProposeJobArgs + before func(svc *TestService) + wantID int64 + wantErr string + }{ + { + name: "Create success (Flux Monitor)", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpFluxMonitor, mock.Anything).Return(idFluxMonitor, nil) + svc.orm.On("CreateSpec", specFluxMonitor, mock.Anything).Return(int64(100), nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: argsFluxMonitor, + wantID: idFluxMonitor, + }, + { + name: "Create success (OCR1)", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpOCR1.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpOCR1, mock.Anything).Return(idOCR1, nil) + svc.orm.On("CreateSpec", specOCR1, mock.Anything).Return(int64(100), nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: argsOCR1, + wantID: idOCR1, + }, + { + name: "Create success (OCR2)", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpOCR2.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpOCR2, mock.Anything).Return(idOCR2, nil) + svc.orm.On("CreateSpec", specOCR2, mock.Anything).Return(int64(100), nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: argsOCR2, + wantID: idOCR2, + }, + { + name: "Create success (Bootstrap)", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpBootstrap.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpBootstrap, mock.Anything).Return(idBootstrap, nil) + svc.orm.On("CreateSpec", specBootstrap, mock.Anything).Return(int64(102), nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: argsBootstrap, + wantID: idBootstrap, + }, + { + name: "Update success", + before: func(svc *TestService) { + svc.orm. + On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID). + Return(&feeds.JobProposal{ + FeedsManagerID: jpFluxMonitor.FeedsManagerID, + RemoteUUID: jpFluxMonitor.RemoteUUID, + Status: feeds.JobProposalStatusPending, + }, nil) + svc.orm.On("ExistsSpecByJobProposalIDAndVersion", jpFluxMonitor.ID, argsFluxMonitor.Version).Return(false, nil) + svc.orm.On("UpsertJobProposal", &jpFluxMonitor, mock.Anything).Return(idFluxMonitor, nil) + svc.orm.On("CreateSpec", specFluxMonitor, mock.Anything).Return(int64(100), nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: argsFluxMonitor, + wantID: idFluxMonitor, + }, + { + name: "contains invalid job spec", + args: &feeds.ProposeJobArgs{}, + wantErr: "invalid job type", + }, + { + name: "must be an ocr job to include bootstraps", + before: func(svc *TestService) {}, + args: &feeds.ProposeJobArgs{ + Spec: spec, + Multiaddrs: pq.StringArray{"/dns4/example.com"}, + }, + wantErr: "only OCR job type supports multiaddr", + }, + { + name: "ensure an upsert validates the job proposal belongs to the feeds manager", + before: func(svc *TestService) { + svc.orm. + On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID). + Return(&feeds.JobProposal{ + FeedsManagerID: 2, + RemoteUUID: jpFluxMonitor.RemoteUUID, + }, nil) + }, + args: argsFluxMonitor, + wantErr: "cannot update a job proposal belonging to another feeds manager", + }, + { + name: "spec version already exists", + before: func(svc *TestService) { + svc.orm. + On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID). + Return(&feeds.JobProposal{ + FeedsManagerID: jpFluxMonitor.FeedsManagerID, + RemoteUUID: jpFluxMonitor.RemoteUUID, + Status: feeds.JobProposalStatusPending, + }, nil) + svc.orm.On("ExistsSpecByJobProposalIDAndVersion", jpFluxMonitor.ID, argsFluxMonitor.Version).Return(true, nil) + }, + args: argsFluxMonitor, + wantErr: "proposed job spec version already exists", + }, + { + name: "upsert error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpFluxMonitor, mock.Anything).Return(int64(0), errors.New("orm error")) + }, + args: argsFluxMonitor, + wantErr: "failed to upsert job proposal", + }, + { + name: "Create spec error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", jpFluxMonitor.RemoteUUID).Return(new(feeds.JobProposal), sql.ErrNoRows) + svc.orm.On("UpsertJobProposal", &jpFluxMonitor, mock.Anything).Return(idFluxMonitor, nil) + svc.orm.On("CreateSpec", specFluxMonitor, mock.Anything).Return(int64(0), errors.New("orm error")) + }, + args: argsFluxMonitor, + wantErr: "failed to create spec", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = &httpTimeout + c.OCR.Enabled = testutils.Ptr(true) + c.OCR2.Enabled = testutils.Ptr(true) + }) + if tc.before != nil { + tc.before(svc) + } + + actual, err := svc.ProposeJob(testutils.Context(t), tc.args) + + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + } else { + require.NoError(t, err) + assert.Equal(t, tc.wantID, actual) + } + }) + } +} + +func Test_Service_DeleteJob(t *testing.T) { + t.Parallel() + + var ( + remoteUUID = uuid.New() + args = &feeds.DeleteJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUID, + } + + approved = feeds.JobProposal{ + ID: 1, + FeedsManagerID: 1, + RemoteUUID: remoteUUID, + Status: feeds.JobProposalStatusApproved, + } + + httpTimeout = *commonconfig.MustNewDuration(1 * time.Second) + ) + + testCases := []struct { + name string + args *feeds.DeleteJobArgs + before func(svc *TestService) + wantID int64 + wantErr string + }{ + { + name: "Delete success", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", approved.RemoteUUID).Return(&approved, nil) + svc.orm.On("DeleteProposal", approved.ID, mock.Anything).Return(nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: args, + wantID: approved.ID, + }, + { + name: "Job proposal being deleted belongs to the feeds manager", + before: func(svc *TestService) { + svc.orm. + On("GetJobProposalByRemoteUUID", approved.RemoteUUID). + Return(&feeds.JobProposal{ + FeedsManagerID: 2, + RemoteUUID: approved.RemoteUUID, + Status: feeds.JobProposalStatusApproved, + }, nil) + }, + args: args, + wantErr: "cannot delete a job proposal belonging to another feeds manager", + }, + { + name: "Get proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", approved.RemoteUUID).Return(nil, errors.New("orm error")) + }, + args: args, + wantErr: "GetJobProposalByRemoteUUID failed", + }, + { + name: "No proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", approved.RemoteUUID).Return(nil, sql.ErrNoRows) + }, + args: args, + wantErr: "GetJobProposalByRemoteUUID did not find any proposals to delete", + }, + { + name: "Delete proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", approved.RemoteUUID).Return(&approved, nil) + svc.orm.On("DeleteProposal", approved.ID, mock.Anything).Return(errors.New("orm error")) + }, + args: args, + wantErr: "DeleteProposal failed", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = &httpTimeout + }) + if tc.before != nil { + tc.before(svc) + } + + _, err := svc.DeleteJob(testutils.Context(t), tc.args) + + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_RevokeJob(t *testing.T) { + t.Parallel() + + var ( + remoteUUID = uuid.New() + args = &feeds.RevokeJobArgs{ + FeedsManagerID: 1, + RemoteUUID: remoteUUID, + } + + defn = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = '00000000-0000-0000-0000-000000000001' +observationSource = """ +// data source 1 +ds1 [type=bridge name=\"bridge-api0\" requestData="{\\\"data\\": {\\\"from\\\":\\\"PLI\\\",\\\"to\\\":\\\"ETH\\\"}}"]; +ds1_parse [type=jsonparse path="result"]; +ds1_multiply [type=multiply times=1000000000000000000]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; + +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 0 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +` + + pendingProposal = &feeds.JobProposal{ + ID: 1, + FeedsManagerID: 1, + RemoteUUID: remoteUUID, + Status: feeds.JobProposalStatusPending, + } + + pendingSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: pendingProposal.ID, + Version: 1, + Definition: defn, + } + + httpTimeout = *commonconfig.MustNewDuration(1 * time.Second) + ) + + testCases := []struct { + name string + args *feeds.RevokeJobArgs + before func(svc *TestService) + wantID int64 + wantErr string + }{ + { + name: "Revoke success when latest spec status is pending", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(pendingSpec, nil) + svc.orm.On("RevokeSpec", pendingSpec.ID, mock.Anything).Return(nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: args, + wantID: pendingProposal.ID, + }, + { + name: "Revoke success when latest spec status is cancelled", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusCancelled, + JobProposalID: pendingProposal.ID, + Version: 1, + Definition: defn, + }, nil) + svc.orm.On("RevokeSpec", pendingSpec.ID, mock.Anything).Return(nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + args: args, + wantID: pendingProposal.ID, + }, + { + name: "Job proposal being revoked belongs to the feeds manager", + before: func(svc *TestService) { + svc.orm. + On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID). + Return(&feeds.JobProposal{ + FeedsManagerID: 2, + RemoteUUID: pendingProposal.RemoteUUID, + Status: feeds.JobProposalStatusApproved, + }, nil) + }, + args: args, + wantErr: "cannot revoke a job proposal belonging to another feeds manager", + }, + { + name: "Get proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(nil, errors.New("orm error")) + }, + args: args, + wantErr: "GetJobProposalByRemoteUUID failed", + }, + { + name: "No proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(nil, sql.ErrNoRows) + }, + args: args, + wantErr: "GetJobProposalByRemoteUUID did not find any proposals to revoke", + }, + { + name: "Get latest spec error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(nil, sql.ErrNoRows) + }, + args: args, + wantErr: "GetLatestSpec failed to get latest spec", + }, + { + name: "Not revokable due to spec status approved", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusApproved, + JobProposalID: pendingProposal.ID, + Version: 1, + Definition: defn, + }, nil) + }, + args: args, + wantErr: "only pending job specs can be revoked", + }, + { + name: "Not revokable due to spec status rejected", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusRejected, + JobProposalID: pendingProposal.ID, + Version: 1, + Definition: defn, + }, nil) + }, + args: args, + wantErr: "only pending job specs can be revoked", + }, + { + name: "Not revokable due to spec status already revoked", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusRevoked, + JobProposalID: pendingProposal.ID, + Version: 1, + Definition: defn, + }, nil) + }, + args: args, + wantErr: "only pending job specs can be revoked", + }, + { + name: "Not revokable due to proposal status deleted", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(&feeds.JobProposal{ + ID: 1, + FeedsManagerID: 1, + RemoteUUID: remoteUUID, + Status: feeds.JobProposalStatusDeleted, + }, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(pendingSpec, nil) + }, + args: args, + wantErr: "only pending job specs can be revoked", + }, + { + name: "Revoke proposal error", + before: func(svc *TestService) { + svc.orm.On("GetJobProposalByRemoteUUID", pendingProposal.RemoteUUID).Return(pendingProposal, nil) + svc.orm.On("GetLatestSpec", pendingSpec.JobProposalID).Return(pendingSpec, nil) + svc.orm.On("RevokeSpec", pendingSpec.ID, mock.Anything).Return(errors.New("orm error")) + }, + args: args, + wantErr: "RevokeSpec failed", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR2.Enabled = testutils.Ptr(true) + c.JobPipeline.HTTPRequest.DefaultTimeout = &httpTimeout + }) + if tc.before != nil { + tc.before(svc) + } + + _, err := svc.RevokeJob(testutils.Context(t), tc.args) + + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_SyncNodeInfo(t *testing.T) { + p2pKey := keystest.NewP2PKeyV2(t) + + ocrKey, err := ocrkey.NewV2() + require.NoError(t, err) + + var ( + multiaddr = "/dns4/chain.link/tcp/1234/p2p/16Uiu2HAm58SP7UL8zsnpeuwHfytLocaqgnyaYKP8wu7qRdrixLju" + mgr = &feeds.FeedsManager{ID: 1} + forwarderAddr = "0x0002" + ccfg = feeds.ChainConfig{ + ID: 100, + FeedsManagerID: mgr.ID, + ChainID: "42", + ChainType: feeds.ChainTypeEVM, + AccountAddress: "0x0000", + AdminAddress: "0x0001", + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: true, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + IsBootstrap: false, + P2PPeerID: null.StringFrom(p2pKey.PeerID().String()), + KeyBundleID: null.StringFrom(ocrKey.GetID()), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + IsBootstrap: true, + Multiaddr: null.StringFrom(multiaddr), + ForwarderAddress: null.StringFrom(forwarderAddr), + Plugins: feeds.Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + }, + } + chainConfigs = []feeds.ChainConfig{ccfg} + nodeVersion = &versioning.NodeVersion{Version: "1.0.0"} + ) + + svc := setupTestService(t) + + svc.connMgr.On("GetClient", mgr.ID).Return(svc.fmsClient, nil) + svc.orm.On("ListChainConfigsByManagerIDs", []int64{mgr.ID}).Return(chainConfigs, nil) + + // OCR1 key fetching + svc.p2pKeystore.On("Get", p2pKey.PeerID()).Return(p2pKey, nil) + svc.ocr1Keystore.On("Get", ocrKey.GetID()).Return(ocrKey, nil) + + svc.fmsClient.On("UpdateNode", mock.Anything, &proto.UpdateNodeRequest{ + Version: nodeVersion.Version, + ChainConfigs: []*proto.ChainConfig{ + { + Chain: &proto.Chain{ + Id: ccfg.ChainID, + Type: proto.ChainType_CHAIN_TYPE_EVM, + }, + AccountAddress: ccfg.AccountAddress, + AdminAddress: ccfg.AdminAddress, + FluxMonitorConfig: &proto.FluxMonitorConfig{Enabled: true}, + Ocr1Config: &proto.OCR1Config{ + Enabled: true, + IsBootstrap: ccfg.OCR1Config.IsBootstrap, + P2PKeyBundle: &proto.OCR1Config_P2PKeyBundle{ + PeerId: p2pKey.PeerID().String(), + PublicKey: p2pKey.PublicKeyHex(), + }, + OcrKeyBundle: &proto.OCR1Config_OCRKeyBundle{ + BundleId: ocrKey.GetID(), + ConfigPublicKey: ocrkey.ConfigPublicKey(ocrKey.PublicKeyConfig()).String(), + OffchainPublicKey: ocrKey.OffChainSigning.PublicKey().String(), + OnchainSigningAddress: ocrKey.OnChainSigning.Address().String(), + }, + }, + Ocr2Config: &proto.OCR2Config{ + Enabled: true, + IsBootstrap: ccfg.OCR2Config.IsBootstrap, + Multiaddr: multiaddr, + ForwarderAddress: &forwarderAddr, + Plugins: &proto.OCR2Config_Plugins{ + Commit: ccfg.OCR2Config.Plugins.Commit, + Execute: ccfg.OCR2Config.Plugins.Execute, + Median: ccfg.OCR2Config.Plugins.Median, + Mercury: ccfg.OCR2Config.Plugins.Mercury, + }, + }, + }, + }, + }).Return(&proto.UpdateNodeResponse{}, nil) + + err = svc.SyncNodeInfo(testutils.Context(t), mgr.ID) + require.NoError(t, err) +} + +func Test_Service_IsJobManaged(t *testing.T) { + t.Parallel() + + svc := setupTestService(t) + ctx := testutils.Context(t) + jobID := int64(1) + + svc.orm.On("IsJobManaged", jobID, mock.Anything).Return(true, nil) + + isManaged, err := svc.IsJobManaged(ctx, jobID) + require.NoError(t, err) + assert.True(t, isManaged) +} + +func Test_Service_ListJobProposals(t *testing.T) { + t.Parallel() + + var ( + jp = feeds.JobProposal{} + jps = []feeds.JobProposal{jp} + ) + svc := setupTestService(t) + + svc.orm.On("ListJobProposals"). + Return(jps, nil) + + actual, err := svc.ListJobProposals() + require.NoError(t, err) + + assert.Equal(t, actual, jps) +} + +func Test_Service_ListJobProposalsByManagersIDs(t *testing.T) { + t.Parallel() + + var ( + jp = feeds.JobProposal{} + jps = []feeds.JobProposal{jp} + fmIDs = []int64{1} + ) + svc := setupTestService(t) + + svc.orm.On("ListJobProposalsByManagersIDs", fmIDs). + Return(jps, nil) + + actual, err := svc.ListJobProposalsByManagersIDs(fmIDs) + require.NoError(t, err) + + assert.Equal(t, actual, jps) +} + +func Test_Service_GetJobProposal(t *testing.T) { + t.Parallel() + + var ( + id = int64(1) + ms = feeds.JobProposal{ID: id} + ) + svc := setupTestService(t) + + svc.orm.On("GetJobProposal", id). + Return(&ms, nil) + + actual, err := svc.GetJobProposal(id) + require.NoError(t, err) + + assert.Equal(t, actual, &ms) +} + +func Test_Service_CancelSpec(t *testing.T) { + var ( + externalJobID = uuid.New() + jp = &feeds.JobProposal{ + ID: 1, + ExternalJobID: uuid.NullUUID{UUID: externalJobID, Valid: true}, + RemoteUUID: externalJobID, + FeedsManagerID: 100, + } + spec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusApproved, + JobProposalID: jp.ID, + Version: 1, + } + j = job.Job{ + ID: 1, + ExternalJobID: externalJobID, + } + ) + + testCases := []struct { + name string + before func(svc *TestService) + specID int64 + wantErr string + }{ + { + name: "success", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.fmsClient.On("CancelledJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.CancelledJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.CancelledJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + specID: spec.ID, + }, + { + name: "success without external job id", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(&feeds.JobProposal{ + ID: 1, + RemoteUUID: externalJobID, + FeedsManagerID: 100, + }, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.fmsClient.On("CancelledJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.CancelledJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.CancelledJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + specID: spec.ID, + }, + { + name: "success without jobs", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.fmsClient.On("CancelledJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.CancelledJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.CancelledJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + specID: spec.ID, + }, + { + name: "spec does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + specID: spec.ID, + wantErr: "orm: job proposal spec: Not Found", + }, + { + name: "must be an approved job proposal spec", + before: func(svc *TestService) { + pspec := &feeds.JobProposalSpec{ + ID: spec.ID, + Status: feeds.SpecStatusPending, + } + svc.orm.On("GetSpec", pspec.ID, mock.Anything).Return(pspec, nil) + }, + specID: spec.ID, + wantErr: "must be an approved job proposal spec", + }, + { + name: "job proposal does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + specID: spec.ID, + wantErr: "orm: job proposal: Not Found", + }, + { + name: "rpc client not connected", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(nil, errors.New("Not Connected")) + }, + specID: spec.ID, + wantErr: "fms rpc client: Not Connected", + }, + { + name: "cancel spec orm fails", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(errors.New("failure")) + }, + specID: spec.ID, + wantErr: "failure", + }, + { + name: "find by external uuid orm fails", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, errors.New("failure")) + }, + specID: spec.ID, + wantErr: "FindJobByExternalJobID failed: failure", + }, + { + name: "delete job fails", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(errors.New("failure")) + }, + specID: spec.ID, + wantErr: "DeleteJob failed: failure", + }, + { + name: "cancelled job rpc call fails", + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + + svc.orm.On("CancelSpec", spec.ID, mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.fmsClient.On("CancelledJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.CancelledJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(nil, errors.New("failure")) + }, + specID: spec.ID, + wantErr: "failure", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestService(t) + + if tc.before != nil { + tc.before(svc) + } + + err := svc.CancelSpec(testutils.Context(t), tc.specID) + + if tc.wantErr != "" { + require.Error(t, err) + assert.EqualError(t, err, tc.wantErr) + + return + } + + require.NoError(t, err) + }) + } +} + +func Test_Service_GetSpec(t *testing.T) { + t.Parallel() + + var ( + id = int64(1) + spec = feeds.JobProposalSpec{ID: id} + ) + svc := setupTestService(t) + + svc.orm.On("GetSpec", id). + Return(&spec, nil) + + actual, err := svc.GetSpec(id) + require.NoError(t, err) + + assert.Equal(t, &spec, actual) +} + +func Test_Service_ListSpecsByJobProposalIDs(t *testing.T) { + t.Parallel() + + var ( + id = int64(1) + jpID = int64(200) + spec = feeds.JobProposalSpec{ID: id, JobProposalID: jpID} + specs = []feeds.JobProposalSpec{spec} + ) + svc := setupTestService(t) + + svc.orm.On("ListSpecsByJobProposalIDs", []int64{jpID}). + Return(specs, nil) + + actual, err := svc.ListSpecsByJobProposalIDs([]int64{jpID}) + require.NoError(t, err) + + assert.Equal(t, specs, actual) +} + +func Test_Service_ApproveSpec(t *testing.T) { + var evmChainID *big.Big + address := ethkey.EIP55AddressFromAddress(common.Address{}) + externalJobID := uuid.New() + + var ( + ctx = testutils.Context(t) + defn = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +schemaVersion = 1 +contractAddress = '0x0000000000000000000000000000000000000000' +externalJobID = '%s' +type = 'fluxmonitor' +threshold = 1.0 +idleTimerPeriod = '4h' +idleTimerDisabled = false +pollingTimerPeriod = '1m' +pollingTimerDisabled = false +observationSource = """ +// data source 1 +ds1 [type=bridge name=\"bridge-api0\" requestData="{\\\"data\\": {\\\"from\\\":\\\"PLI\\\",\\\"to\\\":\\\"ETH\\\"}}"]; +ds1_parse [type=jsonparse path="result"]; +ds1_multiply [type=multiply times=1000000000000000000]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; + +answer1 [type=median index=0]; +""" +` + jp = &feeds.JobProposal{ + ID: 1, + FeedsManagerID: 100, + } + spec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID), + } + spec2 = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, uuid.Nil), + } + rejectedSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusRejected, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID), + } + cancelledSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusCancelled, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID), + } + j = job.Job{ + ID: 1, + ExternalJobID: externalJobID, + } + ) + + testCases := []struct { + name string + httpTimeout *commonconfig.Duration + before func(svc *TestService) + id int64 + force bool + wantErr string + }{ + { + name: "pending job success for new proposals", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + mock.IsType(uuid.UUID{}), + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: false, + }, + { + name: "cancelled spec success when it is the latest spec", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(cancelledSpec, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + cancelledSpec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: cancelledSpec.ID, + force: false, + }, + { + name: "pending job fail due to spec missing external job id", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec2, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "failed to approve job spec due to missing ExternalJobID in spec", + }, + { + name: "failed due to proposal being revoked", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(&feeds.JobProposal{ + ID: 1, + Status: feeds.JobProposalStatusRevoked, + }, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve spec for a revoked job proposal", + }, + { + name: "failed due to proposal being deleted", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(&feeds.JobProposal{ + ID: jp.ID, + Status: feeds.JobProposalStatusDeleted, + }, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve spec for a deleted job proposal", + }, + { + name: "failed due to spec already approved", + before: func(svc *TestService) { + aspec := &feeds.JobProposalSpec{ + ID: spec.ID, + Status: feeds.SpecStatusApproved, + JobProposalID: jp.ID, + } + svc.orm.On("GetSpec", aspec.ID, mock.Anything).Return(aspec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve an approved spec", + }, + { + name: "rejected spec fail", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(rejectedSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: rejectedSpec.ID, + force: false, + wantErr: "cannot approve a rejected spec", + }, + { + name: "cancelled spec failed not latest spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 21, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 2, + Definition: defn, + }, nil) + }, + id: cancelledSpec.ID, + force: false, + wantErr: "cannot approve a cancelled spec", + }, + { + name: "already existing job replacement (found via external job id) error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: a job for this contract address already exists - please use the 'force' option to replace it", + }, + { + name: "already existing job replacement error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(j.ID, nil) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: a job for this contract address already exists - please use the 'force' option to replace it", + }, + { + name: "already existing self managed job replacement success if forced (via external job id)", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing self managed job replacement success if forced", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(j.ID, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing FMS managed job replacement success if forced", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(j.ID, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(&feeds.JobProposalSpec{ID: 100}, nil) + svc.orm.EXPECT().CancelSpec(int64(100), mock.Anything).Return(nil) + + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "spec does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + force: false, + wantErr: "orm: job proposal spec: Not Found", + }, + { + name: "job proposal does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + wantErr: "orm: job proposal: Not Found", + }, + { + name: "bridges do not exist", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(errors.New("bridges do not exist")) + }, + id: spec.ID, + wantErr: "failed to approve job spec due to bridge check: bridges do not exist", + }, + { + name: "rpc client not connected", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(nil, errors.New("Not Connected")) + }, + id: spec.ID, + force: false, + wantErr: "fms rpc client: Not Connected", + }, + { + name: "Fetching the approved spec fails (via external job id)", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, errors.New("failure")) + }, + id: spec.ID, + force: true, + wantErr: "could not approve job proposal: GetApprovedSpec failed: failure", + }, + { + name: "Fetching the approved spec fails", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(j.ID, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, errors.New("failure")) + }, + id: spec.ID, + force: true, + wantErr: "could not approve job proposal: GetApprovedSpec failed: failure", + }, + { + name: "spec cancellation fails (via external job id)", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(j, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(&feeds.JobProposalSpec{ID: 100}, nil) + svc.orm.EXPECT().CancelSpec(int64(100), mock.Anything).Return(errors.New("failure")) + }, + id: spec.ID, + force: true, + wantErr: "could not approve job proposal: failure", + }, + { + name: "spec cancellation fails", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.EXPECT().GetSpec(spec.ID, mock.Anything).Return(spec, nil) + svc.orm.EXPECT().GetJobProposal(jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(j.ID, nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(&feeds.JobProposalSpec{ID: 100}, nil) + svc.orm.EXPECT().CancelSpec(int64(100), mock.Anything).Return(errors.New("failure")) + }, + id: spec.ID, + force: true, + wantErr: "could not approve job proposal: failure", + }, + { + name: "create job error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Return(errors.New("could not save")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: could not save", + }, + { + name: "approve spec orm error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + { + name: "fms call error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindJobIDByAddress", address, evmChainID, mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(nil, errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR2.Enabled = testutils.Ptr(true) + if tc.httpTimeout != nil { + c.JobPipeline.HTTPRequest.DefaultTimeout = tc.httpTimeout + } + }) + + if tc.before != nil { + tc.before(svc) + } + + err := svc.ApproveSpec(ctx, tc.id, tc.force) + + if tc.wantErr != "" { + require.Error(t, err) + assert.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_ApproveSpec_OCR2(t *testing.T) { + address := "0x613a38AC1659769640aaE063C651F48E0250454C" + feedIDHex := "0x0000000000000000000000000000000000000000000000000000000000000001" + feedID := common.HexToHash(feedIDHex) + externalJobID := uuid.New() + + var ( + ctx = testutils.Context(t) + defn = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = '%s' +observationSource = """ +// data source 1 +ds1 [type=bridge name=\"bridge-api0\" requestData="{\\\"data\\": {\\\"from\\\":\\\"PLI\\\",\\\"to\\\":\\\"ETH\\\"}}"]; +ds1_parse [type=jsonparse path="result"]; +ds1_multiply [type=multiply times=1000000000000000000]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; + +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 0 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +` + defn2 = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = '%s' +feedID = '%s' +observationSource = """ +// data source 1 +ds1 [type=bridge name=\"bridge-api0\" requestData="{\\\"data\\": {\\\"from\\\":\\\"PLI\\\",\\\"to\\\":\\\"ETH\\\"}}"]; +ds1_parse [type=jsonparse path="result"]; +ds1_multiply [type=multiply times=1000000000000000000]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; + +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 0 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +` + + jp = &feeds.JobProposal{ + ID: 1, + FeedsManagerID: 100, + } + spec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + rejectedSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusRejected, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + cancelledSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusCancelled, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + j = job.Job{ + ID: 1, + ExternalJobID: externalJobID, + } + ) + + testCases := []struct { + name string + httpTimeout *commonconfig.Duration + before func(svc *TestService) + id int64 + force bool + wantErr string + }{ + { + name: "pending job success", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: false, + }, + { + name: "cancelled spec success when it is the latest spec", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(cancelledSpec, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + cancelledSpec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: cancelledSpec.ID, + force: false, + }, + { + name: "cancelled spec failed not latest spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 21, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 2, + Definition: defn, + }, nil) + }, + id: cancelledSpec.ID, + force: false, + wantErr: "cannot approve a cancelled spec", + }, + { + name: "rejected spec failed cannot be approved", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(rejectedSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: rejectedSpec.ID, + force: false, + wantErr: "cannot approve a rejected spec", + }, + { + name: "already existing job replacement error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: a job for this contract address already exists - please use the 'force' option to replace it", + }, + { + name: "already existing self managed job replacement success if forced without feedID", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing self managed job replacement success if forced with feedID", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn2, externalJobID.String(), &feedID), + }, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, &feedID, mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing FMS managed job replacement success if forced", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(&feeds.JobProposalSpec{ID: 100}, nil) + svc.orm.EXPECT().CancelSpec(int64(100), mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "spec does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + force: false, + wantErr: "orm: job proposal spec: Not Found", + }, + { + name: "cannot approve an approved spec", + before: func(svc *TestService) { + aspec := &feeds.JobProposalSpec{ + ID: spec.ID, + JobProposalID: jp.ID, + Status: feeds.SpecStatusApproved, + } + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(aspec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve an approved spec", + }, + { + name: "cannot approved a rejected spec", + before: func(svc *TestService) { + rspec := &feeds.JobProposalSpec{ + ID: spec.ID, + JobProposalID: jp.ID, + Status: feeds.SpecStatusRejected, + } + svc.orm.On("GetSpec", rspec.ID, mock.Anything).Return(rspec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve a rejected spec", + }, + { + name: "job proposal does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + wantErr: "orm: job proposal: Not Found", + }, + { + name: "bridges do not exist", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(errors.New("bridges do not exist")) + }, + id: spec.ID, + wantErr: "failed to approve job spec due to bridge check: bridges do not exist", + }, + { + name: "rpc client not connected", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(nil, errors.New("Not Connected")) + }, + id: spec.ID, + force: false, + wantErr: "fms rpc client: Not Connected", + }, + { + name: "create job error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Return(errors.New("could not save")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: could not save", + }, + { + name: "approve spec orm error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + { + name: "fms call error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(nil, errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR2.Enabled = testutils.Ptr(true) + if tc.httpTimeout != nil { + c.JobPipeline.HTTPRequest.DefaultTimeout = tc.httpTimeout + } + }) + + if tc.before != nil { + tc.before(svc) + } + + err := svc.ApproveSpec(ctx, tc.id, tc.force) + + if tc.wantErr != "" { + require.Error(t, err) + assert.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_ApproveSpec_Bootstrap(t *testing.T) { + address := "0x613a38AC1659769640aaE063C651F48E0250454C" + feedIDHex := "0x0000000000000000000000000000000000000000000000000000000000000001" + feedID := common.HexToHash(feedIDHex) + externalJobID := uuid.New() + + var ( + ctx = testutils.Context(t) + defn = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +type = 'bootstrap' +schemaVersion = 1 +contractID = '0x613a38AC1659769640aaE063C651F48E0250454C' +externalJobID = '%s' +relay = 'evm' + +[relayConfig] +chainID = 0 +` + defn2 = ` +name = 'PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000' +type = 'bootstrap' +schemaVersion = 1 +contractID = '0x613a38AC1659769640aaE063C651F48E0250454C' +externalJobID = '%s' +feedID = '%s' +relay = 'evm' + +[relayConfig] +chainID = 0 +` + + jp = &feeds.JobProposal{ + ID: 1, + FeedsManagerID: 100, + } + spec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + rejectedSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusRejected, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + cancelledSpec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusCancelled, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn, externalJobID.String()), + } + j = job.Job{ + ID: 1, + ExternalJobID: externalJobID, + } + ) + + testCases := []struct { + name string + httpTimeout *commonconfig.Duration + before func(svc *TestService) + id int64 + force bool + wantErr string + }{ + { + name: "pending job success", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: false, + }, + { + name: "cancelled spec success when it is the latest spec", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(cancelledSpec, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + cancelledSpec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: cancelledSpec.ID, + force: false, + }, + { + name: "cancelled spec failed not latest spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(cancelledSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.orm.On("GetLatestSpec", cancelledSpec.JobProposalID).Return(&feeds.JobProposalSpec{ + ID: 21, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 2, + Definition: defn, + }, nil) + }, + id: cancelledSpec.ID, + force: false, + wantErr: "cannot approve a cancelled spec", + }, + { + name: "rejected spec failed cannot be approved", + before: func(svc *TestService) { + svc.orm.On("GetSpec", cancelledSpec.ID, mock.Anything).Return(rejectedSpec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: rejectedSpec.ID, + force: false, + wantErr: "cannot approve a rejected spec", + }, + { + name: "already existing job replacement error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: a job for this contract address already exists - please use the 'force' option to replace it", + }, + { + name: "already existing self managed job replacement success if forced without feedID", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing self managed job replacement success if forced with feedID", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(&feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + Definition: fmt.Sprintf(defn2, externalJobID.String(), feedID), + }, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(nil, sql.ErrNoRows) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, &feedID, mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "already existing FMS managed job replacement success if forced", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + svc.orm.EXPECT().GetApprovedSpec(jp.ID, mock.Anything).Return(&feeds.JobProposalSpec{ID: 100}, nil) + svc.orm.EXPECT().CancelSpec(int64(100), mock.Anything).Return(nil) + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(j.ID, nil) + svc.spawner.On("DeleteJob", j.ID, mock.Anything).Return(nil) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.ApprovedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + id: spec.ID, + force: true, + }, + { + name: "spec does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + force: false, + wantErr: "orm: job proposal spec: Not Found", + }, + { + name: "cannot approve an approved spec", + before: func(svc *TestService) { + aspec := &feeds.JobProposalSpec{ + ID: spec.ID, + JobProposalID: jp.ID, + Status: feeds.SpecStatusApproved, + } + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(aspec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve an approved spec", + }, + { + name: "cannot approved a rejected spec", + before: func(svc *TestService) { + rspec := &feeds.JobProposalSpec{ + ID: spec.ID, + JobProposalID: jp.ID, + Status: feeds.SpecStatusRejected, + } + svc.orm.On("GetSpec", rspec.ID, mock.Anything).Return(rspec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + }, + id: spec.ID, + force: false, + wantErr: "cannot approve a rejected spec", + }, + { + name: "job proposal does not exist", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(nil, errors.New("Not Found")) + }, + id: spec.ID, + wantErr: "orm: job proposal: Not Found", + }, + { + name: "bridges do not exist", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(errors.New("bridges do not exist")) + }, + id: spec.ID, + wantErr: "failed to approve job spec due to bridge check: bridges do not exist", + }, + { + name: "rpc client not connected", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(nil, errors.New("Not Connected")) + }, + id: spec.ID, + force: false, + wantErr: "fms rpc client: Not Connected", + }, + { + name: "create job error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Return(errors.New("could not save")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: could not save", + }, + { + name: "approve spec orm error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + { + name: "fms call error", + httpTimeout: commonconfig.MustNewDuration(1 * time.Minute), + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.jobORM.On("AssertBridgesExist", mock.IsType(pipeline.Pipeline{})).Return(nil) + + svc.jobORM.On("FindJobByExternalJobID", externalJobID, mock.Anything).Return(job.Job{}, sql.ErrNoRows) + svc.jobORM.On("FindOCR2JobIDByAddress", address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows) + + svc.spawner. + On("CreateJob", + mock.MatchedBy(func(j *job.Job) bool { + return j.Name.String == "PLI / ETH | version 3 | contract 0x0000000000000000000000000000000000000000" + }), + mock.Anything, + ). + Run(func(args mock.Arguments) { (args.Get(0).(*job.Job)).ID = 1 }). + Return(nil) + svc.orm.On("ApproveSpec", + spec.ID, + externalJobID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("ApprovedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.ApprovedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(nil, errors.New("failure")) + }, + id: spec.ID, + force: false, + wantErr: "could not approve job proposal: failure", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + svc := setupTestServiceCfg(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR2.Enabled = testutils.Ptr(true) + if tc.httpTimeout != nil { + c.JobPipeline.HTTPRequest.DefaultTimeout = tc.httpTimeout + } + }) + + if tc.before != nil { + tc.before(svc) + } + + err := svc.ApproveSpec(ctx, tc.id, tc.force) + + if tc.wantErr != "" { + require.Error(t, err) + assert.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_RejectSpec(t *testing.T) { + var ( + ctx = testutils.Context(t) + jp = &feeds.JobProposal{ + ID: 1, + FeedsManagerID: 100, + } + spec = &feeds.JobProposalSpec{ + ID: 20, + Status: feeds.SpecStatusPending, + JobProposalID: jp.ID, + Version: 1, + } + ) + + testCases := []struct { + name string + before func(svc *TestService) + wantErr string + }{ + { + name: "Success", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("RejectSpec", + spec.ID, + mock.Anything, + ).Return(nil) + svc.fmsClient.On("RejectedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.RejectedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }, + ).Return(&proto.RejectedJobResponse{}, nil) + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + }, + { + name: "Fails to get spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(nil, errors.New("failure")) + }, + wantErr: "failure", + }, + { + name: "Cannot be a rejected proposal", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(&feeds.JobProposalSpec{ + Status: feeds.SpecStatusRejected, + }, nil) + }, + wantErr: "must be a pending job proposal spec", + }, + { + name: "Fails to get proposal", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(nil, errors.New("failure")) + }, + wantErr: "failure", + }, + { + name: "FMS not connected", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(nil, errors.New("disconnected")) + }, + wantErr: "disconnected", + }, + { + name: "Fails to update spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("RejectSpec", mock.Anything, mock.Anything).Return(errors.New("failure")) + }, + wantErr: "failure", + }, + { + name: "Fails to update spec", + before: func(svc *TestService) { + svc.orm.On("GetSpec", spec.ID, mock.Anything).Return(spec, nil) + svc.orm.On("GetJobProposal", jp.ID, mock.Anything).Return(jp, nil) + svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil) + svc.orm.On("RejectSpec", mock.Anything, mock.Anything).Return(nil) + svc.fmsClient. + On("RejectedJob", + mock.MatchedBy(func(ctx context.Context) bool { return true }), + &proto.RejectedJobRequest{ + Uuid: jp.RemoteUUID.String(), + Version: int64(spec.Version), + }). + Return(nil, errors.New("rpc failure")) + }, + wantErr: "rpc failure", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestService(t) + if tc.before != nil { + tc.before(svc) + } + + err := svc.RejectSpec(ctx, spec.ID) + + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_UpdateSpecDefinition(t *testing.T) { + var ( + ctx = testutils.Context(t) + specID = int64(1) + updatedSpec = "updated spec" + spec = &feeds.JobProposalSpec{ + ID: specID, + Status: feeds.SpecStatusPending, + Definition: "spec", + } + ) + + testCases := []struct { + name string + before func(svc *TestService) + specID int64 + wantErr string + }{ + { + name: "success", + before: func(svc *TestService) { + svc.orm. + On("GetSpec", specID, mock.Anything). + Return(spec, nil) + svc.orm.On("UpdateSpecDefinition", + specID, + updatedSpec, + mock.Anything, + ).Return(nil) + }, + specID: specID, + }, + { + name: "does not exist", + before: func(svc *TestService) { + svc.orm. + On("GetSpec", specID, mock.Anything). + Return(nil, sql.ErrNoRows) + }, + specID: specID, + wantErr: "job proposal spec does not exist: sql: no rows in result set", + }, + { + name: "other get errors", + before: func(svc *TestService) { + svc.orm. + On("GetSpec", specID, mock.Anything). + Return(nil, errors.New("other db error")) + }, + specID: specID, + wantErr: "database error: other db error", + }, + { + name: "cannot edit", + before: func(svc *TestService) { + spec := &feeds.JobProposalSpec{ + ID: 1, + Status: feeds.SpecStatusApproved, + } + + svc.orm. + On("GetSpec", specID, mock.Anything). + Return(spec, nil) + }, + specID: specID, + wantErr: "must be a pending or cancelled spec", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestService(t) + + if tc.before != nil { + tc.before(svc) + } + + err := svc.UpdateSpecDefinition(ctx, tc.specID, updatedSpec) + if tc.wantErr != "" { + assert.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_Service_StartStop(t *testing.T) { + key := cltest.DefaultCSAKey + + var ( + mgr = feeds.FeedsManager{ + ID: 1, + URI: "localhost:2000", + } + pubKeyHex = "0f17c3bf72de8beef6e2d17a14c0a972f5d7e0e66e70722373f12b88382d40f9" + ) + + var pubKey crypto.PublicKey + _, err := hex.Decode([]byte(pubKeyHex), pubKey) + require.NoError(t, err) + + tests := []struct { + name string + beforeFunc func(svc *TestService) + }{ + { + name: "success with a feeds manager connection", + beforeFunc: func(svc *TestService) { + svc.csaKeystore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + svc.orm.On("ListManagers").Return([]feeds.FeedsManager{mgr}, nil) + svc.connMgr.On("IsConnected", mgr.ID).Return(false) + svc.connMgr.On("Connect", mock.IsType(feeds.ConnectOpts{})) + svc.connMgr.On("Close") + svc.orm.On("CountJobProposalsByStatus").Return(&feeds.JobProposalCounts{}, nil) + }, + }, + { + name: "success with no registered managers", + beforeFunc: func(svc *TestService) { + svc.csaKeystore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + svc.orm.On("ListManagers").Return([]feeds.FeedsManager{}, nil) + svc.connMgr.On("Close") + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + svc := setupTestService(t) + + if tt.beforeFunc != nil { + tt.beforeFunc(svc) + } + + servicetest.Run(t, svc) + }) + } +} diff --git a/core/services/fluxmonitorv2/config.go b/core/services/fluxmonitorv2/config.go new file mode 100644 index 00000000..f09184cc --- /dev/null +++ b/core/services/fluxmonitorv2/config.go @@ -0,0 +1,37 @@ +package fluxmonitorv2 + +import ( + "time" + + "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" +) + +// Config defines the Flux Monitor configuration. +type Config interface { + FlagsContractAddress() string // Evm + MinContractPayment() *assets.Link // Evm +} + +type EvmFeeConfig interface { + LimitDefault() uint32 // Evm + LimitJobType() config.LimitJobType +} + +type EvmTransactionsConfig interface { + MaxQueued() uint64 // Evm +} + +type FluxMonitorConfig interface { + DefaultTransactionQueueDepth() uint32 +} + +type JobPipelineConfig interface { + DefaultHTTPTimeout() commonconfig.Duration +} + +// MinimumPollingInterval returns the minimum duration between polling ticks +func MinimumPollingInterval(c JobPipelineConfig) time.Duration { + return c.DefaultHTTPTimeout().Duration() +} diff --git a/core/services/fluxmonitorv2/contract_submitter.go b/core/services/fluxmonitorv2/contract_submitter.go new file mode 100644 index 00000000..63856b8c --- /dev/null +++ b/core/services/fluxmonitorv2/contract_submitter.go @@ -0,0 +1,69 @@ +package fluxmonitorv2 + +import ( + "context" + "math/big" + + "github.com/pkg/errors" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" +) + +//go:generate mockery --quiet --name ContractSubmitter --output ./mocks/ --case=underscore + +// FluxAggregatorABI initializes the Flux Aggregator ABI +var FluxAggregatorABI = evmtypes.MustGetABI(flux_aggregator_wrapper.FluxAggregatorABI) + +// ContractSubmitter defines an interface to submit an eth tx. +type ContractSubmitter interface { + Submit(ctx context.Context, roundID *big.Int, submission *big.Int, idempotencyKey *string) error +} + +// FluxAggregatorContractSubmitter submits the polled answer in an eth tx. +type FluxAggregatorContractSubmitter struct { + flux_aggregator_wrapper.FluxAggregatorInterface + orm ORM + keyStore KeyStoreInterface + gasLimit uint32 + forwardingAllowed bool + chainID *big.Int +} + +// NewFluxAggregatorContractSubmitter constructs a new NewFluxAggregatorContractSubmitter +func NewFluxAggregatorContractSubmitter( + contract flux_aggregator_wrapper.FluxAggregatorInterface, + orm ORM, + keyStore KeyStoreInterface, + gasLimit uint32, + forwardingAllowed bool, + chainID *big.Int, +) *FluxAggregatorContractSubmitter { + return &FluxAggregatorContractSubmitter{ + FluxAggregatorInterface: contract, + orm: orm, + keyStore: keyStore, + gasLimit: gasLimit, + forwardingAllowed: forwardingAllowed, + chainID: chainID, + } +} + +// Submit submits the answer by writing a EthTx for the txmgr to +// pick up +func (c *FluxAggregatorContractSubmitter) Submit(ctx context.Context, roundID *big.Int, submission *big.Int, idempotencyKey *string) error { + fromAddress, err := c.keyStore.GetRoundRobinAddress(c.chainID) + if err != nil { + return err + } + + payload, err := FluxAggregatorABI.Pack("submit", roundID, submission) + if err != nil { + return errors.Wrap(err, "abi.Pack failed") + } + + return errors.Wrap( + c.orm.CreateEthTransaction(ctx, fromAddress, c.Address(), payload, c.gasLimit, idempotencyKey), + "failed to send Eth transaction", + ) +} diff --git a/core/services/fluxmonitorv2/contract_submitter_test.go b/core/services/fluxmonitorv2/contract_submitter_test.go new file mode 100644 index 00000000..7094b5d8 --- /dev/null +++ b/core/services/fluxmonitorv2/contract_submitter_test.go @@ -0,0 +1,43 @@ +package fluxmonitorv2_test + +import ( + "math/big" + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + fmmocks "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2/mocks" +) + +func TestFluxAggregatorContractSubmitter_Submit(t *testing.T) { + var ( + fluxAggregator = mocks.NewFluxAggregator(t) + orm = fmmocks.NewORM(t) + keyStore = fmmocks.NewKeyStoreInterface(t) + gasLimit = uint32(2100) + forwardingAllowed = false + submitter = fluxmonitorv2.NewFluxAggregatorContractSubmitter(fluxAggregator, orm, keyStore, gasLimit, forwardingAllowed, testutils.FixtureChainID) + + toAddress = testutils.NewAddress() + fromAddress = testutils.NewAddress() + roundID = big.NewInt(1) + submission = big.NewInt(2) + ) + + payload, err := fluxmonitorv2.FluxAggregatorABI.Pack("submit", roundID, submission) + assert.NoError(t, err) + + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID).Return(fromAddress, nil) + fluxAggregator.On("Address").Return(toAddress) + + idempotencyKey := uuid.New().String() + orm.On("CreateEthTransaction", mock.Anything, fromAddress, toAddress, payload, gasLimit, &idempotencyKey).Return(nil) + + err = submitter.Submit(testutils.Context(t), roundID, submission, &idempotencyKey) + assert.NoError(t, err) +} diff --git a/core/services/fluxmonitorv2/delegate.go b/core/services/fluxmonitorv2/delegate.go new file mode 100644 index 00000000..0f2c84f3 --- /dev/null +++ b/core/services/fluxmonitorv2/delegate.go @@ -0,0 +1,101 @@ +package fluxmonitorv2 + +import ( + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// Delegate represents a Flux Monitor delegate +type Delegate struct { + db *sqlx.DB + ethKeyStore keystore.Eth + jobORM job.ORM + pipelineORM pipeline.ORM + pipelineRunner pipeline.Runner + legacyChains legacyevm.LegacyChainContainer + lggr logger.Logger +} + +var _ job.Delegate = (*Delegate)(nil) + +// NewDelegate constructs a new delegate +func NewDelegate( + ethKeyStore keystore.Eth, + jobORM job.ORM, + pipelineORM pipeline.ORM, + pipelineRunner pipeline.Runner, + db *sqlx.DB, + legacyChains legacyevm.LegacyChainContainer, + lggr logger.Logger, +) *Delegate { + return &Delegate{ + db: db, + ethKeyStore: ethKeyStore, + jobORM: jobORM, + pipelineORM: pipelineORM, + pipelineRunner: pipelineRunner, + legacyChains: legacyChains, + lggr: lggr.Named("FluxMonitor"), + } +} + +// JobType implements the job.Delegate interface +func (d *Delegate) JobType() job.Type { + return job.FluxMonitor +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec returns the flux monitor service for the job spec +func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err error) { + if jb.FluxMonitorSpec == nil { + return nil, errors.Errorf("Delegate expects a *job.FluxMonitorSpec to be present, got %v", jb) + } + chain, err := d.legacyChains.Get(jb.FluxMonitorSpec.EVMChainID.String()) + if err != nil { + return nil, err + } + cfg := chain.Config() + strategy := txmgrcommon.NewQueueingTxStrategy(jb.ExternalJobID, cfg.FluxMonitor().DefaultTransactionQueueDepth(), cfg.Database().DefaultQueryTimeout()) + var checker txmgr.TransmitCheckerSpec + if chain.Config().FluxMonitor().SimulateTransactions() { + checker.CheckerType = txmgr.TransmitCheckerTypeSimulate + } + + fm, err := NewFromJobSpec( + jb, + d.db, + NewORM(d.db, d.lggr, chain.Config().Database(), chain.TxManager(), strategy, checker), + d.jobORM, + d.pipelineORM, + NewKeyStore(d.ethKeyStore), + chain.Client(), + chain.LogBroadcaster(), + d.pipelineRunner, + chain.Config().EVM(), + chain.Config().EVM().GasEstimator(), + chain.Config().EVM().Transactions(), + chain.Config().FluxMonitor(), + chain.Config().JobPipeline(), + chain.Config().Database(), + d.lggr, + ) + if err != nil { + return nil, err + } + + return []job.ServiceCtx{fm}, nil +} diff --git a/core/services/fluxmonitorv2/deviation_checker.go b/core/services/fluxmonitorv2/deviation_checker.go new file mode 100644 index 00000000..a0afc65c --- /dev/null +++ b/core/services/fluxmonitorv2/deviation_checker.go @@ -0,0 +1,80 @@ +package fluxmonitorv2 + +import ( + "github.com/shopspring/decimal" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// DeviationThresholds carries parameters used by the threshold-trigger logic +type DeviationThresholds struct { + Rel float64 // Relative change required, i.e. |new-old|/|old| >= Rel + Abs float64 // Absolute change required, i.e. |new-old| >= Abs +} + +// DeviationChecker checks the deviation of the next answer against the current +// answer. +type DeviationChecker struct { + Thresholds DeviationThresholds + lggr logger.Logger +} + +// NewDeviationChecker constructs a new deviation checker with thresholds. +func NewDeviationChecker(rel, abs float64, lggr logger.Logger) *DeviationChecker { + return &DeviationChecker{ + Thresholds: DeviationThresholds{ + Rel: rel, + Abs: abs, + }, + lggr: lggr.Named("DeviationChecker").With("threshold", rel, "absoluteThreshold", abs), + } +} + +// NewZeroDeviationChecker constructs a new deviation checker with 0 as thresholds. +func NewZeroDeviationChecker(lggr logger.Logger) *DeviationChecker { + return NewDeviationChecker(0, 0, lggr) +} + +// OutsideDeviation checks whether the next price is outside the threshold. +// If both thresholds are zero (default value), always returns true. +func (c *DeviationChecker) OutsideDeviation(curAnswer, nextAnswer decimal.Decimal) bool { + loggerFields := []interface{}{ + "currentAnswer", curAnswer, + "nextAnswer", nextAnswer, + } + + if c.Thresholds.Rel == 0 && c.Thresholds.Abs == 0 { + c.lggr.Debugw( + "Deviation thresholds both zero; short-circuiting deviation checker to "+ + "true, regardless of feed values", loggerFields...) + return true + } + diff := curAnswer.Sub(nextAnswer).Abs() + loggerFields = append(loggerFields, "absoluteDeviation", diff) + + if !diff.GreaterThan(decimal.NewFromFloat(c.Thresholds.Abs)) { + c.lggr.Debugw("Absolute deviation threshold not met", loggerFields...) + return false + } + + if curAnswer.IsZero() { + if nextAnswer.IsZero() { + c.lggr.Debugw("Relative deviation is undefined; can't satisfy threshold", loggerFields...) + return false + } + c.lggr.Infow("Threshold met: relative deviation is ∞", loggerFields...) + return true + } + + // 100*|new-old|/|old|: Deviation (relative to curAnswer) as a percentage + percentage := diff.Div(curAnswer.Abs()).Mul(decimal.NewFromInt(100)) + + loggerFields = append(loggerFields, "percentage", percentage) + + if percentage.LessThan(decimal.NewFromFloat(c.Thresholds.Rel)) { + c.lggr.Debugw("Relative deviation threshold not met", loggerFields...) + return false + } + c.lggr.Infow("Relative and absolute deviation thresholds both met", loggerFields...) + return true +} diff --git a/core/services/fluxmonitorv2/deviation_checker_test.go b/core/services/fluxmonitorv2/deviation_checker_test.go new file mode 100644 index 00000000..a1da29fe --- /dev/null +++ b/core/services/fluxmonitorv2/deviation_checker_test.go @@ -0,0 +1,83 @@ +package fluxmonitorv2_test + +import ( + "fmt" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +type outsideDeviationRow struct { + name string + curPrice, nextPrice decimal.Decimal + threshold float64 // in percentage + absoluteThreshold float64 + expectation bool +} + +func (o outsideDeviationRow) String() string { + return fmt.Sprintf( + `{name: "%s", curPrice: %s, nextPrice: %s, threshold: %.2f, `+ + "absoluteThreshold: %f, expectation: %v}", o.name, o.curPrice, o.nextPrice, + o.threshold, o.absoluteThreshold, o.expectation) +} + +func TestDeviationChecker_OutsideDeviation(t *testing.T) { + t.Parallel() + + f, i := decimal.NewFromFloat, decimal.NewFromInt + testCases := []outsideDeviationRow{ + // Start with a huge absoluteThreshold, to test relative threshold behavior + {"0 current price, outside deviation", i(0), i(100), 2, 0, true}, + {"0 current and next price", i(0), i(0), 2, 0, false}, + + {"inside deviation", i(100), i(101), 2, 0, false}, + {"equal to deviation", i(100), i(102), 2, 0, true}, + {"outside deviation", i(100), i(103), 2, 0, true}, + {"outside deviation zero", i(100), i(0), 2, 0, true}, + + {"inside deviation, crosses 0 backwards", f(0.1), f(-0.1), 201, 0, false}, + {"equal to deviation, crosses 0 backwards", f(0.1), f(-0.1), 200, 0, true}, + {"outside deviation, crosses 0 backwards", f(0.1), f(-0.1), 199, 0, true}, + + {"inside deviation, crosses 0 forwards", f(-0.1), f(0.1), 201, 0, false}, + {"equal to deviation, crosses 0 forwards", f(-0.1), f(0.1), 200, 0, true}, + {"outside deviation, crosses 0 forwards", f(-0.1), f(0.1), 199, 0, true}, + + {"thresholds=0, deviation", i(0), i(100), 0, 0, true}, + {"thresholds=0, no deviation", i(100), i(100), 0, 0, true}, + {"thresholds=0, all zeros", i(0), i(0), 0, 0, true}, + } + + c := func(tc outsideDeviationRow) { + checker := fluxmonitorv2.NewDeviationChecker(tc.threshold, tc.absoluteThreshold, logger.TestLogger(t)) + + assert.Equal(t, tc.expectation, + checker.OutsideDeviation(tc.curPrice, tc.nextPrice), + "check on OutsideDeviation failed for %s", tc, + ) + } + + for _, tc := range testCases { + tc := tc + // Checks on relative threshold + t.Run(tc.name, func(t *testing.T) { c(tc) }) + // Check corresponding absolute threshold tests; make relative threshold + // always pass (as long as curPrice and nextPrice aren't both 0.) + test2 := tc + test2.threshold = 0 + // absoluteThreshold is initially zero, so any change will trigger + test2.expectation = test2.curPrice.Sub(tc.nextPrice).Abs().GreaterThan(i(0)) || + test2.absoluteThreshold == 0 + t.Run(tc.name+" threshold zeroed", func(t *testing.T) { c(test2) }) + // Huge absoluteThreshold means trigger always fails + test3 := tc + test3.absoluteThreshold = 1e307 + test3.expectation = false + t.Run(tc.name+" max absolute threshold", func(t *testing.T) { c(test3) }) + } +} diff --git a/core/services/fluxmonitorv2/flags.go b/core/services/fluxmonitorv2/flags.go new file mode 100644 index 00000000..ed6d885e --- /dev/null +++ b/core/services/fluxmonitorv2/flags.go @@ -0,0 +1,79 @@ +package fluxmonitorv2 + +import ( + "reflect" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" +) + +//go:generate mockery --quiet --name Flags --output ./mocks/ --case=underscore --structname Flags --filename flags.go + +type Flags interface { + ContractExists() bool + IsLowered(contractAddr common.Address) (bool, error) + Address() common.Address + ParseLog(log types.Log) (generated.AbigenLog, error) +} + +// ContractFlags wraps the a contract +type ContractFlags struct { + flags_wrapper.FlagsInterface +} + +// NewFlags constructs a new Flags from a flags contract address +func NewFlags(addrHex string, ethClient evmclient.Client) (Flags, error) { + flags := &ContractFlags{} + + if addrHex == "" { + return flags, nil + } + + contractAddr := common.HexToAddress(addrHex) + contract, err := flags_wrapper.NewFlags(contractAddr, ethClient) + if err != nil { + return flags, err + } + + // This is necessary due to the unfortunate fact that assigning `nil` to an + // interface variable causes `x == nil` checks to always return false. If we + // do this here, in the constructor, we can avoid using reflection when we + // check `p.flags == nil` later in the code. + if contract != nil && !reflect.ValueOf(contract).IsNil() { + flags.FlagsInterface = contract + } + + return flags, nil +} + +// Contract returns the flags contract +func (f *ContractFlags) Contract() flags_wrapper.FlagsInterface { + return f.FlagsInterface +} + +// ContractExists returns whether a flag contract exists +func (f *ContractFlags) ContractExists() bool { + return f.FlagsInterface != nil +} + +// IsLowered determines whether the flag is lowered for a given contract. +// If a contract does not exist, it is considered to be lowered +func (f *ContractFlags) IsLowered(contractAddr common.Address) (bool, error) { + if !f.ContractExists() { + return true, nil + } + + flags, err := f.GetFlags(nil, + []common.Address{utils.ZeroAddress, contractAddr}, + ) + if err != nil { + return true, err + } + + return !flags[0] || !flags[1], nil +} diff --git a/core/services/fluxmonitorv2/flags_test.go b/core/services/fluxmonitorv2/flags_test.go new file mode 100644 index 00000000..2eb59542 --- /dev/null +++ b/core/services/fluxmonitorv2/flags_test.go @@ -0,0 +1,56 @@ +package fluxmonitorv2_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +func TestFlags_IsLowered(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + getFlagsResult []bool + expected bool + }{ + {"both lowered", []bool{false, false}, true}, + {"global lowered", []bool{false, true}, true}, + {"contract lowered", []bool{true, false}, true}, + {"both raised", []bool{true, true}, false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var ( + flagsContract = mocks.NewFlags(t) + address = testutils.NewAddress() + ) + + flags := fluxmonitorv2.ContractFlags{FlagsInterface: flagsContract} + + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + require.Equal(t, []common.Address{ + utils.ZeroAddress, + address, + }, args.Get(1).([]common.Address)) + }). + Return(tc.getFlagsResult, nil) + + result, err := flags.IsLowered(address) + require.NoError(t, err) + require.Equal(t, tc.expected, result) + }) + } +} diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go new file mode 100644 index 00000000..90c12f43 --- /dev/null +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -0,0 +1,1136 @@ +package fluxmonitorv2 + +import ( + "context" + "database/sql" + "fmt" + "math/big" + mrand "math/rand" + "reflect" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/recovery" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2/promfm" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// PollRequest defines a request to initiate a poll +type PollRequest struct { + Type PollRequestType + Timestamp time.Time +} + +// PollRequestType defines which method was used to request a poll +type PollRequestType int + +const ( + PollRequestTypeUnknown PollRequestType = iota + PollRequestTypeInitial + PollRequestTypePoll + PollRequestTypeIdle + PollRequestTypeRound + PollRequestTypeHibernation + PollRequestTypeRetry + PollRequestTypeAwaken + PollRequestTypeDrumbeat +) + +// DefaultHibernationPollPeriod defines the hibernation polling period +const DefaultHibernationPollPeriod = 24 * time.Hour + +// FluxMonitor polls external price adapters via HTTP to check for price swings. +type FluxMonitor struct { + services.StateMachine + contractAddress common.Address + oracleAddress common.Address + jobSpec job.Job + spec pipeline.Spec + runner pipeline.Runner + q pg.Q + orm ORM + jobORM job.ORM + pipelineORM pipeline.ORM + keyStore KeyStoreInterface + pollManager *PollManager + paymentChecker *PaymentChecker + contractSubmitter ContractSubmitter + deviationChecker *DeviationChecker + submissionChecker *SubmissionChecker + flags Flags + fluxAggregator flux_aggregator_wrapper.FluxAggregatorInterface + logBroadcaster log.Broadcaster + chainID *big.Int + + logger logger.SugaredLogger + + backlog *utils.BoundedPriorityQueue[log.Broadcast] + chProcessLogs chan struct{} + + chStop services.StopChan + waitOnStop chan struct{} +} + +// NewFluxMonitor returns a new instance of PollingDeviationChecker. +func NewFluxMonitor( + pipelineRunner pipeline.Runner, + jobSpec job.Job, + spec pipeline.Spec, + q pg.Q, + orm ORM, + jobORM job.ORM, + pipelineORM pipeline.ORM, + keyStore KeyStoreInterface, + pollManager *PollManager, + paymentChecker *PaymentChecker, + contractAddress common.Address, + contractSubmitter ContractSubmitter, + deviationChecker *DeviationChecker, + submissionChecker *SubmissionChecker, + flags Flags, + fluxAggregator flux_aggregator_wrapper.FluxAggregatorInterface, + logBroadcaster log.Broadcaster, + fmLogger logger.Logger, + chainID *big.Int, +) (*FluxMonitor, error) { + fm := &FluxMonitor{ + q: q, + runner: pipelineRunner, + jobSpec: jobSpec, + spec: spec, + orm: orm, + jobORM: jobORM, + pipelineORM: pipelineORM, + keyStore: keyStore, + pollManager: pollManager, + paymentChecker: paymentChecker, + contractAddress: contractAddress, + contractSubmitter: contractSubmitter, + deviationChecker: deviationChecker, + submissionChecker: submissionChecker, + flags: flags, + logBroadcaster: logBroadcaster, + fluxAggregator: fluxAggregator, + logger: logger.Sugared(fmLogger), + chainID: chainID, + backlog: utils.NewBoundedPriorityQueue[log.Broadcast](map[uint]int{ + // We want reconnecting nodes to be able to submit to a round + // that hasn't hit maxAnswers yet, as well as the newest round. + PriorityNewRoundLog: 2, + PriorityAnswerUpdatedLog: 1, + PriorityFlagChangedLog: 2, + }), + chProcessLogs: make(chan struct{}, 1), + chStop: make(services.StopChan), + waitOnStop: make(chan struct{}), + } + + return fm, nil +} + +// NewFromJobSpec constructs an instance of FluxMonitor with sane defaults and +// validation. +func NewFromJobSpec( + jobSpec job.Job, + db *sqlx.DB, + orm ORM, + jobORM job.ORM, + pipelineORM pipeline.ORM, + keyStore KeyStoreInterface, + ethClient evmclient.Client, + logBroadcaster log.Broadcaster, + pipelineRunner pipeline.Runner, + cfg Config, + fcfg EvmFeeConfig, + ecfg EvmTransactionsConfig, + fmcfg FluxMonitorConfig, + jcfg JobPipelineConfig, + dbCfg pg.QConfig, + lggr logger.Logger, +) (*FluxMonitor, error) { + fmSpec := jobSpec.FluxMonitorSpec + chainId := ethClient.ConfiguredChainID() + + if !validatePollTimer(fmSpec.PollTimerDisabled, MinimumPollingInterval(jcfg), fmSpec.PollTimerPeriod) { + return nil, fmt.Errorf( + "PollTimerPeriod (%s), must be equal or greater than JobPipeline.HTTPRequest.DefaultTimeout (%s) ", + fmSpec.PollTimerPeriod, + MinimumPollingInterval(jcfg), + ) + } + + // Set up the flux aggregator + fluxAggregator, err := flux_aggregator_wrapper.NewFluxAggregator( + fmSpec.ContractAddress.Address(), + ethClient, + ) + if err != nil { + return nil, err + } + + gasLimit := fcfg.LimitDefault() + fmLimit := fcfg.LimitJobType().FM() + if jobSpec.GasLimit.Valid { + gasLimit = jobSpec.GasLimit.Uint32 + } else if fmLimit != nil { + gasLimit = *fmLimit + } + + contractSubmitter := NewFluxAggregatorContractSubmitter( + fluxAggregator, + orm, + keyStore, + gasLimit, + jobSpec.ForwardingAllowed, + chainId, + ) + + flags, err := NewFlags(cfg.FlagsContractAddress(), ethClient) + logger.Sugared(lggr).ErrorIf(err, + fmt.Sprintf( + "Error creating Flags contract instance, check address: %s", + cfg.FlagsContractAddress(), + ), + ) + + paymentChecker := &PaymentChecker{ + MinContractPayment: cfg.MinContractPayment(), + MinJobPayment: fmSpec.MinPayment, + } + + min, err := fluxAggregator.MinSubmissionValue(nil) + if err != nil { + return nil, err + } + + max, err := fluxAggregator.MaxSubmissionValue(nil) + if err != nil { + return nil, err + } + + fmLogger := lggr.With( + "jobID", jobSpec.ID, + "contract", fmSpec.ContractAddress.Hex(), + ) + + pollManager, err := NewPollManager( + PollManagerConfig{ + PollTickerInterval: fmSpec.PollTimerPeriod, + PollTickerDisabled: fmSpec.PollTimerDisabled, + IdleTimerPeriod: fmSpec.IdleTimerPeriod, + IdleTimerDisabled: fmSpec.IdleTimerDisabled, + DrumbeatSchedule: fmSpec.DrumbeatSchedule, + DrumbeatEnabled: fmSpec.DrumbeatEnabled, + DrumbeatRandomDelay: fmSpec.DrumbeatRandomDelay, + HibernationPollPeriod: DefaultHibernationPollPeriod, // Not currently configurable + MinRetryBackoffDuration: 1 * time.Minute, + MaxRetryBackoffDuration: 1 * time.Hour, + }, + fmLogger, + ) + if err != nil { + return nil, err + } + + return NewFluxMonitor( + pipelineRunner, + jobSpec, + *jobSpec.PipelineSpec, + pg.NewQ(db, lggr, dbCfg), + orm, + jobORM, + pipelineORM, + keyStore, + pollManager, + paymentChecker, + fmSpec.ContractAddress.Address(), + contractSubmitter, + NewDeviationChecker( + float64(fmSpec.Threshold), + float64(fmSpec.AbsoluteThreshold), + fmLogger, + ), + NewSubmissionChecker(min, max), + flags, + fluxAggregator, + logBroadcaster, + fmLogger, + chainId, + ) +} + +const ( + PriorityFlagChangedLog uint = 0 + PriorityNewRoundLog uint = 1 + PriorityAnswerUpdatedLog uint = 2 +) + +// Start implements the job.Service interface. It begins the CSP consumer in a +// single goroutine to poll the price adapters and listen to NewRound events. +func (fm *FluxMonitor) Start(context.Context) error { + return fm.StartOnce("FluxMonitor", func() error { + fm.logger.Debug("Starting Flux Monitor for job") + + go fm.consume() + + return nil + }) +} + +func (fm *FluxMonitor) IsHibernating() bool { + if !fm.flags.ContractExists() { + return false + } + + isFlagLowered, err := fm.flags.IsLowered(fm.contractAddress) + if err != nil { + fm.logger.Errorf("unable to determine hibernation status: %v", err) + + return false + } + + return !isFlagLowered +} + +// Close implements the job.Service interface. It stops this instance from +// polling, cleaning up resources. +func (fm *FluxMonitor) Close() error { + return fm.StopOnce("FluxMonitor", func() error { + fm.pollManager.Stop() + close(fm.chStop) + <-fm.waitOnStop + + return nil + }) +} + +// JobID implements the listener.Listener interface. +func (fm *FluxMonitor) JobID() int32 { return fm.spec.JobID } + +// HandleLog processes the contract logs +func (fm *FluxMonitor) HandleLog(broadcast log.Broadcast) { + log := broadcast.DecodedLog() + if log == nil || reflect.ValueOf(log).IsNil() { + fm.logger.Panic("HandleLog: failed to handle log of type nil") + } + + switch log := log.(type) { + case *flux_aggregator_wrapper.FluxAggregatorNewRound: + fm.backlog.Add(PriorityNewRoundLog, broadcast) + + case *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated: + fm.backlog.Add(PriorityAnswerUpdatedLog, broadcast) + + case *flags_wrapper.FlagsFlagRaised: + if log.Subject == evmutils.ZeroAddress || log.Subject == fm.contractAddress { + fm.backlog.Add(PriorityFlagChangedLog, broadcast) + } + + case *flags_wrapper.FlagsFlagLowered: + if log.Subject == evmutils.ZeroAddress || log.Subject == fm.contractAddress { + fm.backlog.Add(PriorityFlagChangedLog, broadcast) + } + + default: + fm.logger.Warnf("unexpected log type %T", log) + return + } + + select { + case fm.chProcessLogs <- struct{}{}: + default: + } +} + +func (fm *FluxMonitor) consume() { + defer close(fm.waitOnStop) + + if err := fm.SetOracleAddress(); err != nil { + fm.logger.Warnw( + "unable to set oracle address, this flux monitor job may not work correctly", + "err", err, + ) + } + + // Subscribe to contract logs + unsubscribe := fm.logBroadcaster.Register(fm, log.ListenerOpts{ + Contract: fm.fluxAggregator.Address(), + ParseLog: fm.fluxAggregator.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + flux_aggregator_wrapper.FluxAggregatorNewRound{}.Topic(): nil, + flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}.Topic(): nil, + }, + MinIncomingConfirmations: 0, + }) + defer unsubscribe() + + if fm.flags.ContractExists() { + unsubscribe := fm.logBroadcaster.Register(fm, log.ListenerOpts{ + Contract: fm.flags.Address(), + ParseLog: fm.flags.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + flags_wrapper.FlagsFlagLowered{}.Topic(): nil, + flags_wrapper.FlagsFlagRaised{}.Topic(): nil, + }, + MinIncomingConfirmations: 0, + }) + defer unsubscribe() + } + + fm.pollManager.Start(fm.IsHibernating(), fm.initialRoundState()) + + tickLogger := fm.logger.With( + "pollInterval", fm.pollManager.cfg.PollTickerInterval, + "idlePeriod", fm.pollManager.cfg.IdleTimerPeriod, + ) + + for { + select { + case <-fm.chStop: + return + + case <-fm.chProcessLogs: + recovery.WrapRecover(fm.logger, fm.processLogs) + + case at := <-fm.pollManager.PollTickerTicks(): + tickLogger.Debugf("Poll ticker fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypePoll, fm.deviationChecker, nil) + }) + + case at := <-fm.pollManager.IdleTimerTicks(): + tickLogger.Debugf("Idle timer fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypeIdle, NewZeroDeviationChecker(fm.logger), nil) + }) + + case at := <-fm.pollManager.RoundTimerTicks(): + tickLogger.Debugf("Round timer fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypeRound, fm.deviationChecker, nil) + }) + + case at := <-fm.pollManager.HibernationTimerTicks(): + tickLogger.Debugf("Hibernation timer fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypeHibernation, NewZeroDeviationChecker(fm.logger), nil) + }) + + case at := <-fm.pollManager.RetryTickerTicks(): + tickLogger.Debugf("Retry ticker fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypeRetry, NewZeroDeviationChecker(fm.logger), nil) + }) + + case at := <-fm.pollManager.DrumbeatTicks(): + tickLogger.Debugf("Drumbeat ticker fired on %v", formatTime(at)) + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(PollRequestTypeDrumbeat, NewZeroDeviationChecker(fm.logger), nil) + }) + + case request := <-fm.pollManager.Poll(): + switch request.Type { + case PollRequestTypeUnknown: + break + default: + recovery.WrapRecover(fm.logger, func() { + fm.pollIfEligible(request.Type, fm.deviationChecker, nil) + }) + } + } + } +} + +func formatTime(at time.Time) string { + ago := time.Since(at) + return fmt.Sprintf("%v (%v ago)", at.UTC().Format(time.RFC3339), ago) +} + +// SetOracleAddress sets the oracle address which matches the node's keys. +// If none match, it uses the first available key +func (fm *FluxMonitor) SetOracleAddress() error { + oracleAddrs, err := fm.fluxAggregator.GetOracles(nil) + if err != nil { + fm.logger.Error("failed to get list of oracles from FluxAggregator contract") + return errors.Wrap(err, "failed to get list of oracles from FluxAggregator contract") + } + keys, err := fm.keyStore.EnabledKeysForChain(fm.chainID) + if err != nil { + return errors.Wrap(err, "failed to load keys") + } + for _, k := range keys { + for _, oracleAddr := range oracleAddrs { + if k.Address == oracleAddr { + fm.oracleAddress = oracleAddr + return nil + } + } + } + + log := fm.logger.With( + "keys", keys, + "oracleAddresses", oracleAddrs, + ) + + if len(keys) > 0 { + addr := keys[0].Address + log.Warnw("None of the node's keys matched any oracle addresses, using first available key. This flux monitor job may not work correctly", + "address", addr.Hex(), + ) + fm.oracleAddress = addr + + return nil + } + + log.Error("No keys found. This flux monitor job may not work correctly") + return errors.New("No keys found") +} + +func (fm *FluxMonitor) processLogs() { + for !fm.backlog.Empty() { + broadcast := fm.backlog.Take() + fm.processBroadcast(broadcast) + } +} + +func (fm *FluxMonitor) processBroadcast(broadcast log.Broadcast) { + // If the log is a duplicate of one we've seen before, ignore it (this + // happens because of the LogBroadcaster's backfilling behavior). + consumed, err := fm.logBroadcaster.WasAlreadyConsumed(broadcast) + + if err != nil { + fm.logger.Errorf("Error determining if log was already consumed: %v", err) + return + } else if consumed { + fm.logger.Debug("Log was already consumed by Flux Monitor, skipping") + return + } + + started := time.Now() + decodedLog := broadcast.DecodedLog() + switch log := decodedLog.(type) { + case *flux_aggregator_wrapper.FluxAggregatorNewRound: + fm.respondToNewRoundLog(*log, broadcast) + case *flux_aggregator_wrapper.FluxAggregatorAnswerUpdated: + fm.respondToAnswerUpdatedLog(*log) + fm.markLogAsConsumed(broadcast, decodedLog, started) + case *flags_wrapper.FlagsFlagRaised: + fm.respondToFlagsRaisedLog() + fm.markLogAsConsumed(broadcast, decodedLog, started) + case *flags_wrapper.FlagsFlagLowered: + // Only reactivate if it is hibernating + if fm.pollManager.isHibernating.Load() { + fm.pollManager.Awaken(fm.initialRoundState()) + fm.pollIfEligible(PollRequestTypeAwaken, NewZeroDeviationChecker(fm.logger), broadcast) + } + default: + fm.logger.Errorf("unknown log %v of type %T", log, log) + } +} + +func (fm *FluxMonitor) markLogAsConsumed(broadcast log.Broadcast, decodedLog interface{}, started time.Time) { + if err := fm.logBroadcaster.MarkConsumed(broadcast); err != nil { + fm.logger.Errorw("Failed to mark log as consumed", + "err", err, "logType", fmt.Sprintf("%T", decodedLog), "log", broadcast.String(), "elapsed", time.Since(started)) + } +} + +func (fm *FluxMonitor) respondToFlagsRaisedLog() { + fm.logger.Debug("FlagsFlagRaised log") + // check the contract before hibernating, because one flag could be lowered + // while the other flag remains raised + isFlagLowered, err := fm.flags.IsLowered(fm.contractAddress) + fm.logger.ErrorIf(err, "Error determining if flag is still raised") + if !isFlagLowered { + fm.pollManager.Hibernate() + } +} + +// The AnswerUpdated log tells us that round has successfully closed with a new +// answer. We update our view of the oracleRoundState in case this log was +// generated by a chain reorg. +func (fm *FluxMonitor) respondToAnswerUpdatedLog(log flux_aggregator_wrapper.FluxAggregatorAnswerUpdated) { + answerUpdatedLogger := fm.logger.With( + "round", log.RoundId, + "answer", log.Current.String(), + "timestamp", log.UpdatedAt.String(), + ) + + answerUpdatedLogger.Debug("AnswerUpdated log") + + roundState, err := fm.roundState(0) + if err != nil { + answerUpdatedLogger.Errorf("could not fetch oracleRoundState: %v", err) + + return + } + + fm.pollManager.Reset(roundState) +} + +// The NewRound log tells us that an oracle has initiated a new round. This tells us that we +// need to poll and submit an answer to the contract regardless of the deviation. +func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggregatorNewRound, lb log.Broadcast) { + started := time.Now() + ctx, cancel := fm.chStop.NewCtx() + defer cancel() + + newRoundLogger := fm.logger.With( + "round", log.RoundId, + "startedBy", log.StartedBy.Hex(), + "startedAt", log.StartedAt.String(), + "startedAtUtc", time.Unix(log.StartedAt.Int64(), 0).UTC().Format(time.RFC3339), + ) + var markConsumed = true + defer func() { + if markConsumed { + if err := fm.logBroadcaster.MarkConsumed(lb); err != nil { + fm.logger.Errorw("Failed to mark log consumed", "err", err, "log", lb.String()) + } + } + }() + + newRoundLogger.Debug("NewRound log") + promfm.SetBigInt(promfm.SeenRound.WithLabelValues(fmt.Sprintf("%d", fm.spec.JobID)), log.RoundId) + + // + // NewRound answer submission logic: + // - Any log that reaches this point, regardless of chain reorgs or log backfilling, is one that we have + // not seen before. Therefore, we should consider acting upon it. + // - We always take the round ID from the log, rather than the round ID suggested by `.RoundState`. The + // reason is that if two NewRound logs come in in rapid succession, and we submit a tx for the first, + // the `.ReportableRoundID` field in the roundState() response for the 2nd log will not reflect the + // fact that we've submitted for the first round (assuming it hasn't been mined yet). + // - In the event of a reorg that pushes our previous submissions back into the mempool, we can rely on the + // TxManager to ensure they end up being mined into blocks, but this may cause them to revert if they + // are mined in an order that violates certain conditions in the FluxAggregator (restartDelay, etc.). + // Therefore, the cleanest solution at present is to resubmit for the reorged rounds. The drawback + // of this approach is that one or the other submission tx for a given round will revert, costing the + // node operator some gas. The benefit is that those submissions are guaranteed to be made, ensuring + // that we have high data availability (and also ensuring that node operators get paid). + // - There are a few straightforward cases where we don't want to submit: + // - When we're not eligible + // - When the aggregator is underfunded + // - When we were the initiator of the round (i.e. we've received our own NewRound log) + // - There are a few more nuanced cases as well: + // - When our node polls at the same time as another node, and both attempt to start a round. In that + // case, it's possible that the other node will start the round, and our node will see the NewRound + // log and try to submit again. + // - When the poll ticker fires very soon after we've responded to a NewRound log. + // + // To handle these more nuanced cases, we record round IDs and whether we've submitted for those rounds + // in the DB. If we see we've already submitted for a given round, we simply bail out. + // + // However, in the case of a chain reorganization, we might see logs with round IDs that we've already + // seen. As mentioned above, we want to re-respond to these rounds to ensure high data availability. + // Therefore, if a log arrives with a round ID that is < the most recent that we submitted to, we delete + // all of the round IDs in the DB back to (and including) the incoming round ID. This essentially + // rewinds the system back to a state wherein those reorg'ed rounds never occurred, allowing it to move + // forward normally. + // + // There is one small exception: if the reorg is fairly shallow, and only un-starts a single round, we + // do not need to resubmit, because the TxManager will ensure that our existing submission gets back + // into the chain. There is a very small risk that one of the nodes in the quorum (namely, whichever + // one started the previous round) will have its existing submission mined first, thereby violating + // the restartDelay, but as this risk is isolated to a single node, the round will not time out and + // go stale. We consider this acceptable. + // + + logRoundID := uint32(log.RoundId.Uint64()) + + // We always want to reset the idle timer upon receiving a NewRound log, so we do it before any `return` statements. + fm.pollManager.ResetIdleTimer(log.StartedAt.Uint64()) + + mostRecentRoundID, err := fm.orm.MostRecentFluxMonitorRoundID(fm.contractAddress) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + newRoundLogger.Errorf("error fetching Flux Monitor most recent round ID from DB: %v", err) + return + } + + roundStats, jobRunStatus, err := fm.statsAndStatusForRound(logRoundID, 1) + if err != nil { + newRoundLogger.Errorf("error determining round stats / run status for round: %v", err) + return + } + + if logRoundID < mostRecentRoundID && roundStats.NumNewRoundLogs > 0 { + newRoundLogger.Debugf("Received an older round log (and number of previously received NewRound logs is: %v) - "+ + "a possible reorg, hence deleting round ids from %v to %v", roundStats.NumNewRoundLogs, logRoundID, mostRecentRoundID) + err = fm.orm.DeleteFluxMonitorRoundsBackThrough(fm.contractAddress, logRoundID) + if err != nil { + newRoundLogger.Errorf("error deleting reorged Flux Monitor rounds from DB: %v", err) + return + } + + // as all newer stats were deleted, at this point a new round stats entry will be created + roundStats, err = fm.orm.FindOrCreateFluxMonitorRoundStats(fm.contractAddress, logRoundID, 1) + if err != nil { + newRoundLogger.Errorf("error determining subsequent round stats for round: %v", err) + return + } + } + + if roundStats.NumSubmissions > 0 { + // This indicates either that: + // - We tried to start a round at the same time as another node, and their transaction was mined first, or + // - The chain experienced a shallow reorg that unstarted the current round. + // If our previous attempt is still pending, return early and don't re-submit + // If our previous attempt is already over (completed or errored), we should retry + newRoundLogger.Debugf("There are already %v existing submissions to this round, while job run status is: %v", roundStats.NumSubmissions, jobRunStatus) + if !jobRunStatus.Finished() { + newRoundLogger.Debug("Ignoring new round request: started round simultaneously with another node") + return + } + } + + // Ignore rounds we started + if fm.oracleAddress == log.StartedBy { + newRoundLogger.Info("Ignoring new round request: we started this round") + return + } + + // Ignore rounds we're not eligible for, or for which we won't be paid + roundState, err := fm.roundState(logRoundID) + if err != nil { + newRoundLogger.Errorf("Ignoring new round request: error fetching eligibility from contract: %v", err) + return + } + + fm.pollManager.Reset(roundState) + err = fm.checkEligibilityAndAggregatorFunding(roundState) + if err != nil { + newRoundLogger.Infof("Ignoring new round request: %v", err) + return + } + + newRoundLogger.Info("Responding to new round request") + + // Best effort to attach metadata. + var metaDataForBridge map[string]interface{} + lrd, err := fm.fluxAggregator.LatestRoundData(nil) + if err != nil { + newRoundLogger.Warnw("Couldn't read latest round data for request meta", "err", err) + } else { + metaDataForBridge, err = bridges.MarshalBridgeMetaData(lrd.Answer, lrd.UpdatedAt) + if err != nil { + newRoundLogger.Warnw("Error marshalling roundState for request meta", "err", err) + } + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": fm.jobSpec.ID, + "externalJobID": fm.jobSpec.ExternalJobID, + "name": fm.jobSpec.Name.ValueOrZero(), + "evmChainID": fm.chainID.String(), + }, + "jobRun": map[string]interface{}{ + "meta": metaDataForBridge, + }, + }) + + // Call the v2 pipeline to execute a new job run + run, results, err := fm.runner.ExecuteRun(ctx, fm.spec, vars, fm.logger) + if err != nil { + newRoundLogger.Errorw(fmt.Sprintf("error executing new run for job ID %v name %v", fm.spec.JobID, fm.spec.JobName), "err", err) + return + } + result, err := results.FinalResult(newRoundLogger).SingularResult() + if err != nil || result.Error != nil { + newRoundLogger.Errorw("can't fetch answer", "err", err, "result", result) + fm.jobORM.TryRecordError(fm.spec.JobID, "Error polling") + return + } + answer, err := utils.ToDecimal(result.Value) + if err != nil { + newRoundLogger.Errorw(fmt.Sprintf("error executing new run for job ID %v name %v", fm.spec.JobID, fm.spec.JobName), "err", err) + return + } + + if !fm.isValidSubmission(newRoundLogger, answer, started) { + return + } + + if roundState.PaymentAmount == nil { + newRoundLogger.Error("roundState.PaymentAmount shouldn't be nil") + } + + err = fm.q.Transaction(func(tx pg.Queryer) error { + if err2 := fm.runner.InsertFinishedRun(run, false, pg.WithQueryer(tx)); err2 != nil { + return err2 + } + if err2 := fm.queueTransactionForTxm(ctx, tx, run.ID, answer, roundState.RoundId, &log); err2 != nil { + return err2 + } + return fm.logBroadcaster.MarkConsumed(lb, pg.WithQueryer(tx)) + }) + // Either the tx failed and we want to reprocess the log, or it succeeded and already marked it consumed + markConsumed = false + if err != nil { + newRoundLogger.Errorf("unable to create job run: %v", err) + return + } +} + +var ( + // ErrNotEligible defines when the round is not eligible for submission + ErrNotEligible = errors.New("not eligible to submit") + // ErrUnderfunded defines when the aggregator does not have sufficient funds + ErrUnderfunded = errors.New("aggregator is underfunded") + // ErrPaymentTooLow defines when the round payment is too low + ErrPaymentTooLow = errors.New("round payment amount < minimum contract payment") +) + +func (fm *FluxMonitor) checkEligibilityAndAggregatorFunding(roundState flux_aggregator_wrapper.OracleRoundState) error { + if !roundState.EligibleToSubmit { + return ErrNotEligible + } else if !fm.paymentChecker.SufficientFunds( + roundState.AvailableFunds, + roundState.PaymentAmount, + roundState.OracleCount, + ) { + return ErrUnderfunded + } else if !fm.paymentChecker.SufficientPayment(roundState.PaymentAmount) { + return ErrPaymentTooLow + } + return nil +} + +func (fm *FluxMonitor) pollIfEligible(pollReq PollRequestType, deviationChecker *DeviationChecker, broadcast log.Broadcast) { + started := time.Now() + ctx, cancel := fm.chStop.NewCtx() + defer cancel() + + l := fm.logger.With( + "threshold", deviationChecker.Thresholds.Rel, + "absoluteThreshold", deviationChecker.Thresholds.Abs, + ) + var markConsumed = true + defer func() { + if markConsumed && broadcast != nil { + if err := fm.logBroadcaster.MarkConsumed(broadcast); err != nil { + l.Errorw("Failed to mark log consumed", "err", err, "log", broadcast.String()) + } + } + }() + + if pollReq != PollRequestTypeHibernation && fm.pollManager.isHibernating.Load() { + l.Warnw("Skipping poll because a ticker fired while hibernating") + return + } + + if !fm.logBroadcaster.IsConnected() { + l.Warnw("LogBroadcaster is not connected to Ethereum node, skipping poll") + return + } + + // + // Poll ticker submission logic: + // - We avoid saving on-chain state wherever possible. Therefore, we do not know which round we should be + // submitting for when the pollTicker fires. + // - We pass 0 into `roundState()`, and the FluxAggregator returns a suggested roundID for us to + // submit to, as well as our eligibility to submit to that round. + // - If the poll ticker fires very soon after we've responded to a NewRound log, and our tx has not been + // mined, we risk double-submitting for a round. To detect this, we check the DB to see whether + // we've responded to this round already, and bail out if so. + // + + // Ask the FluxAggregator which round we should be submitting to, and what the state of that round is. + roundState, err := fm.roundState(0) + if err != nil { + l.Errorw("unable to determine eligibility to submit from FluxAggregator contract", "err", err) + fm.jobORM.TryRecordError( + fm.spec.JobID, + "Unable to call roundState method on provided contract. Check contract address.", + ) + + return + } + + l = l.With("reportableRound", roundState.RoundId) + + // Because drumbeat ticker may fire at the same time on multiple nodes, we wait a short random duration + // after getting a recommended round id, to avoid starting multiple rounds in case of chains with instant tx confirmation + if pollReq == PollRequestTypeDrumbeat && fm.pollManager.cfg.DrumbeatEnabled && fm.pollManager.cfg.DrumbeatRandomDelay > 0 { + // #nosec + delay := time.Duration(mrand.Int63n(int64(fm.pollManager.cfg.DrumbeatRandomDelay))) + l.Infof("waiting %v (of max: %v) before continuing...", delay, fm.pollManager.cfg.DrumbeatRandomDelay) + time.Sleep(delay) + + roundStateNew, err2 := fm.roundState(roundState.RoundId) + if err2 != nil { + l.Errorw("unable to determine eligibility to submit from FluxAggregator contract", "err", err2) + fm.jobORM.TryRecordError( + fm.spec.JobID, + "Unable to call roundState method on provided contract. Check contract address.", + ) + + return + } + roundState = roundStateNew + } + + fm.pollManager.Reset(roundState) + // Retry if a idle timer fails + defer func() { + if pollReq == PollRequestTypeIdle { + if err != nil { + if fm.pollManager.StartRetryTicker() { + min, max := fm.pollManager.retryTicker.Bounds() + l.Debugw(fmt.Sprintf("started retry ticker (frequency between: %v - %v) because of error: '%v'", min, max, err.Error())) + } + return + } + fm.pollManager.StopRetryTicker() + } + }() + + roundStats, jobRunStatus, err := fm.statsAndStatusForRound(roundState.RoundId, 0) + if err != nil { + l.Errorw("error determining round stats / run status for round", "err", err) + + return + } + + // If we've already successfully submitted to this round (ie through a NewRound log) + // and the associated JobRun hasn't errored, skip polling + if roundStats.NumSubmissions > 0 && !jobRunStatus.Errored() { + l.Infow("skipping poll: round already answered, tx unconfirmed", "jobRunStatus", jobRunStatus) + + return + } + + // Don't submit if we're not eligible, or won't get paid + err = fm.checkEligibilityAndAggregatorFunding(roundState) + if err != nil { + l.Infof("skipping poll: %v", err) + + return + } + + var metaDataForBridge map[string]interface{} + lrd, err := fm.fluxAggregator.LatestRoundData(nil) + if err != nil { + l.Warnw("Couldn't read latest round data for request meta", "err", err) + } else { + metaDataForBridge, err = bridges.MarshalBridgeMetaData(lrd.Answer, lrd.UpdatedAt) + if err != nil { + l.Warnw("Error marshalling roundState for request meta", "err", err) + } + } + + // Call the v2 pipeline to execute a new pipeline run + // Note: we expect the FM pipeline to scale the fetched answer by the same + // amount as "decimals" in the FM contract. + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": fm.jobSpec.ID, + "externalJobID": fm.jobSpec.ExternalJobID, + "name": fm.jobSpec.Name.ValueOrZero(), + "evmChainID": fm.chainID.String(), + }, + "jobRun": map[string]interface{}{ + "meta": metaDataForBridge, + }, + }) + + run, results, err := fm.runner.ExecuteRun(ctx, fm.spec, vars, fm.logger) + if err != nil { + l.Errorw("can't fetch answer", "err", err) + fm.jobORM.TryRecordError(fm.spec.JobID, "Error polling") + return + } + result, err := results.FinalResult(l).SingularResult() + if err != nil || result.Error != nil { + l.Errorw("can't fetch answer", "err", err, "result", result) + fm.jobORM.TryRecordError(fm.spec.JobID, "Error polling") + return + } + answer, err := utils.ToDecimal(result.Value) + if err != nil { + l.Errorw(fmt.Sprintf("error executing new run for job ID %v name %v", fm.spec.JobID, fm.spec.JobName), "err", err) + return + } + + if !fm.isValidSubmission(l, answer, started) { + return + } + + jobID := fmt.Sprintf("%d", fm.spec.JobID) + latestAnswer := decimal.NewFromBigInt(roundState.LatestSubmission, 0) + promfm.SetDecimal(promfm.SeenValue.WithLabelValues(jobID), answer) + + l = l.With( + "latestAnswer", latestAnswer, + "answer", answer, + ) + + if roundState.RoundId > 1 && !deviationChecker.OutsideDeviation(latestAnswer, answer) { + l.Debugw("deviation < threshold, not submitting") + return + } + + if roundState.RoundId > 1 { + l.Infow("deviation > threshold, submitting") + } else { + l.Infow("starting first round") + } + + if roundState.PaymentAmount == nil { + l.Error("roundState.PaymentAmount shouldn't be nil") + } + + err = fm.q.Transaction(func(tx pg.Queryer) error { + if err2 := fm.runner.InsertFinishedRun(run, true, pg.WithQueryer(tx)); err2 != nil { + return err2 + } + if err2 := fm.queueTransactionForTxm(ctx, tx, run.ID, answer, roundState.RoundId, nil); err2 != nil { + return err2 + } + if broadcast != nil { + // In the case of a flag lowered, the pollEligible call is triggered by a log. + return fm.logBroadcaster.MarkConsumed(broadcast, pg.WithQueryer(tx)) + } + return nil + }) + // Either the tx failed and we want to reprocess the log, or it succeeded and already marked it consumed + markConsumed = false + if err != nil { + l.Errorw("can't create job run", "err", err) + return + } + + promfm.SetDecimal(promfm.ReportedValue.WithLabelValues(jobID), answer) + promfm.SetUint32(promfm.ReportedRound.WithLabelValues(jobID), roundState.RoundId) +} + +// If the answer is outside the allowable range, log an error and don't submit. +// to avoid an onchain reversion. +func (fm *FluxMonitor) isValidSubmission(l logger.Logger, answer decimal.Decimal, started time.Time) bool { + if fm.submissionChecker.IsValid(answer) { + return true + } + + l.Errorw("answer is outside acceptable range", + "min", fm.submissionChecker.Min, + "max", fm.submissionChecker.Max, + "answer", answer, + ) + fm.jobORM.TryRecordError(fm.spec.JobID, "Answer is outside acceptable range") + + jobId := fm.spec.JobID + jobName := fm.spec.JobName + elapsed := time.Since(started) + pipeline.PromPipelineTaskExecutionTime.WithLabelValues(fmt.Sprintf("%d", jobId), jobName, "", job.FluxMonitor.String()).Set(float64(elapsed)) + pipeline.PromPipelineRunErrors.WithLabelValues(fmt.Sprintf("%d", jobId), jobName).Inc() + pipeline.PromPipelineRunTotalTimeToCompletion.WithLabelValues(fmt.Sprintf("%d", jobId), jobName).Set(float64(elapsed)) + pipeline.PromPipelineTasksTotalFinished.WithLabelValues(fmt.Sprintf("%d", jobId), jobName, "", job.FluxMonitor.String(), "", "error").Inc() + return false +} + +func (fm *FluxMonitor) roundState(roundID uint32) (flux_aggregator_wrapper.OracleRoundState, error) { + return fm.fluxAggregator.OracleRoundState(nil, fm.oracleAddress, roundID) +} + +// initialRoundState fetches the round information that the fluxmonitor should use when starting +// new jobs. Choosing the correct round on startup is key to setting timers correctly. +func (fm *FluxMonitor) initialRoundState() flux_aggregator_wrapper.OracleRoundState { + defaultRoundState := flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + } + latestRoundData, err := fm.fluxAggregator.LatestRoundData(nil) + if err != nil { + fm.logger.Warnf( + "unable to retrieve latestRoundData for FluxAggregator contract - defaulting "+ + "to current time for tickers: %v", + err, + ) + return defaultRoundState + } + roundID := uint32(latestRoundData.RoundId.Uint64()) + latestRoundState, err := fm.fluxAggregator.OracleRoundState(nil, fm.oracleAddress, roundID) + if err != nil { + fm.logger.Warnf( + "unable to call roundState for latest round, round: %d, err: %v", + latestRoundData.RoundId, + err, + ) + return defaultRoundState + } + return latestRoundState +} + +func (fm *FluxMonitor) queueTransactionForTxm(ctx context.Context, tx pg.Queryer, runID int64, answer decimal.Decimal, roundID uint32, log *flux_aggregator_wrapper.FluxAggregatorNewRound) error { + // Use pipeline run ID to generate globally unique key that can correlate this run to a Tx + idempotencyKey := fmt.Sprintf("fluxmonitor-%d", runID) + // Submit the Eth Tx + err := fm.contractSubmitter.Submit( + ctx, + new(big.Int).SetInt64(int64(roundID)), + answer.BigInt(), + &idempotencyKey, + ) + if err != nil { + fm.logger.Errorw("failed to submit Tx to TXM", "err", err) + return err + } + + numLogs := uint(0) + if log != nil { + numLogs = 1 + } + // Update the flux monitor round stats + err = fm.orm.UpdateFluxMonitorRoundStats( + fm.contractAddress, + roundID, + runID, + numLogs, + pg.WithQueryer(tx), + ) + if err != nil { + fm.logger.Errorw( + fmt.Sprintf("error updating FM round submission count: %v", err), + "roundID", roundID, + ) + + return err + } + + return nil +} + +func (fm *FluxMonitor) statsAndStatusForRound(roundID uint32, newRoundLogs uint) (FluxMonitorRoundStatsV2, pipeline.RunStatus, error) { + roundStats, err := fm.orm.FindOrCreateFluxMonitorRoundStats(fm.contractAddress, roundID, newRoundLogs) + if err != nil { + return FluxMonitorRoundStatsV2{}, pipeline.RunStatusUnknown, err + } + + // JobRun will not exist if this is the first time responding to this round + var run pipeline.Run + if roundStats.PipelineRunID.Valid { + run, err = fm.pipelineORM.FindRun(roundStats.PipelineRunID.Int64) + if err != nil { + return FluxMonitorRoundStatsV2{}, pipeline.RunStatusUnknown, err + } + } + + return roundStats, run.Status(), nil +} diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go new file mode 100644 index 00000000..2f4a0ca9 --- /dev/null +++ b/core/services/fluxmonitorv2/flux_monitor_test.go @@ -0,0 +1,1931 @@ +package fluxmonitorv2_test + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + corenull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + fmmocks "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + jobmocks "github.com/goplugin/pluginv3.0/v2/core/services/job/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipelinemocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" +) + +const oracleCount uint8 = 17 + +var ( + defaultMinimumContractPayment = assets.NewLinkFromJuels(10_000_000_000_000) // 0.00001 PLI +) + +type answerSet struct{ latestAnswer, polledAnswer int64 } + +func newORM(t *testing.T, db *sqlx.DB, cfg pg.QConfig, txm txmgr.TxManager) fluxmonitorv2.ORM { + return fluxmonitorv2.NewORM(db, logger.TestLogger(t), cfg, txm, txmgrcommon.NewSendEveryStrategy(), txmgr.TransmitCheckerSpec{}) +} + +var ( + now = func() uint64 { return uint64(time.Now().UTC().Unix()) } + nilOpts *bind.CallOpts + + makeRoundDataForRoundID = func(roundID uint32) flux_aggregator_wrapper.LatestRoundData { + return flux_aggregator_wrapper.LatestRoundData{ + RoundId: big.NewInt(int64(roundID)), + } + } + freshContractRoundDataResponse = func() (flux_aggregator_wrapper.LatestRoundData, error) { + return flux_aggregator_wrapper.LatestRoundData{}, errors.New("No data present") + } + + contractAddress = testutils.NewAddress() + threshold = float64(0.5) + absoluteThreshold = float64(0.01) + idleTimerPeriod = time.Minute + pipelineSpec = pipeline.Spec{ + ID: 1, + DotDagSource: ` +// data source 1 +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; + +// data source 2 +ds2 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds2_parse [type=jsonparse path="latest"]; + +ds1 -> ds1_parse -> answer1; +ds2 -> ds2_parse -> answer1; + +answer1 [type=median index=0]; +`, + JobID: 1, + } +) + +// testMocks defines all the mock interfaces used by the Flux Monitor +type testMocks struct { + fluxAggregator *mocks.FluxAggregator + logBroadcast *logmocks.Broadcast + logBroadcaster *logmocks.Broadcaster + orm *fmmocks.ORM + jobORM *jobmocks.ORM + pipelineORM *pipelinemocks.ORM + pipelineRunner *pipelinemocks.Runner + keyStore *fmmocks.KeyStoreInterface + contractSubmitter *fmmocks.ContractSubmitter + flags *fmmocks.Flags +} + +func setupMocks(t *testing.T) *testMocks { + t.Helper() + + tm := &testMocks{ + fluxAggregator: mocks.NewFluxAggregator(t), + logBroadcast: logmocks.NewBroadcast(t), + logBroadcaster: logmocks.NewBroadcaster(t), + orm: fmmocks.NewORM(t), + jobORM: jobmocks.NewORM(t), + pipelineORM: pipelinemocks.NewORM(t), + pipelineRunner: pipelinemocks.NewRunner(t), + keyStore: fmmocks.NewKeyStoreInterface(t), + contractSubmitter: fmmocks.NewContractSubmitter(t), + flags: fmmocks.NewFlags(t), + } + + tm.flags.On("ContractExists").Maybe().Return(false) + tm.logBroadcast.On("String").Maybe().Return("") + + return tm +} + +func buildIdempotencyKey(ID int64) *string { + key := fmt.Sprintf("fluxmonitor-%d", ID) + return &key +} + +type setupOptions struct { + pollTickerDisabled bool + idleTimerDisabled bool + idleTimerPeriod time.Duration + drumbeatEnabled bool + drumbeatSchedule string + drumbeatRandomDelay time.Duration + hibernationPollPeriod time.Duration + flags *fmmocks.Flags + orm fluxmonitorv2.ORM +} + +// setup sets up a Flux Monitor for testing, allowing the test to provide +// functional options to configure the setup +func setup(t *testing.T, db *sqlx.DB, optionFns ...func(*setupOptions)) (*fluxmonitorv2.FluxMonitor, *testMocks) { + t.Helper() + testutils.SkipShort(t, "long test") + + tm := setupMocks(t) + options := setupOptions{ + idleTimerPeriod: time.Minute, + hibernationPollPeriod: fluxmonitorv2.DefaultHibernationPollPeriod, + flags: tm.flags, + orm: tm.orm, + } + + for _, optionFn := range optionFns { + optionFn(&options) + } + + tm.flags = options.flags + + lggr := logger.TestLogger(t) + + pollManager, err := fluxmonitorv2.NewPollManager( + fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: time.Minute, + PollTickerDisabled: options.pollTickerDisabled, + IdleTimerPeriod: options.idleTimerPeriod, + IdleTimerDisabled: options.idleTimerDisabled, + DrumbeatEnabled: options.drumbeatEnabled, + DrumbeatSchedule: options.drumbeatSchedule, + DrumbeatRandomDelay: options.drumbeatRandomDelay, + HibernationPollPeriod: options.hibernationPollPeriod, + MinRetryBackoffDuration: 1 * time.Minute, + MaxRetryBackoffDuration: 1 * time.Hour, + }, + lggr, + ) + require.NoError(t, err) + + fm, err := fluxmonitorv2.NewFluxMonitor( + tm.pipelineRunner, + job.Job{}, + pipelineSpec, + pg.NewQ(db, lggr, pgtest.NewQConfig(true)), + options.orm, + tm.jobORM, + tm.pipelineORM, + tm.keyStore, + pollManager, + fluxmonitorv2.NewPaymentChecker(assets.NewLinkFromJuels(1), nil), + contractAddress, + tm.contractSubmitter, + fluxmonitorv2.NewDeviationChecker(threshold, absoluteThreshold, lggr), + fluxmonitorv2.NewSubmissionChecker(big.NewInt(0), big.NewInt(100000000000)), + options.flags, + tm.fluxAggregator, + tm.logBroadcaster, + lggr, + testutils.FixtureChainID, + ) + require.NoError(t, err) + + return fm, tm +} + +// disablePollTicker is an option to disable the poll ticker during setup +func disablePollTicker(disabled bool) func(*setupOptions) { + return func(opts *setupOptions) { + opts.pollTickerDisabled = disabled + } +} + +// disableIdleTimer is an option to disable the idle timer during setup +func disableIdleTimer(disabled bool) func(*setupOptions) { + return func(opts *setupOptions) { + opts.idleTimerDisabled = disabled + } +} + +// enableDrumbeatTicker is an option to enable the drumbeat ticker during setup +func enableDrumbeatTicker(schedule string, randomDelay time.Duration) func(*setupOptions) { + return func(opts *setupOptions) { + opts.drumbeatEnabled = true + opts.drumbeatSchedule = schedule + opts.drumbeatRandomDelay = randomDelay + } +} + +// setIdleTimerPeriod is an option to set the idle timer period during setup +func setIdleTimerPeriod(period time.Duration) func(*setupOptions) { + return func(opts *setupOptions) { + opts.idleTimerPeriod = period + } +} + +// setHibernationTickerPeriod is an option to set the hibernation ticker period during setup +func setHibernationTickerPeriod(period time.Duration) func(*setupOptions) { + return func(opts *setupOptions) { + opts.hibernationPollPeriod = period + } +} + +// setHibernationTickerPeriod is an option to set the hibernation ticker period during setup +func setHibernationState(t *testing.T, hibernating bool) func(*setupOptions) { + return func(opts *setupOptions) { + opts.flags = fmmocks.NewFlags(t) + opts.flags.On("ContractExists").Return(true) + opts.flags.On("Address").Return(common.Address{}) + opts.flags.On("IsLowered", mock.Anything).Return(!hibernating, nil) + } +} + +func setFlags(flags *fmmocks.Flags) func(*setupOptions) { + return func(opts *setupOptions) { + opts.flags = flags + } +} + +// withORM is an option to switch out the ORM during set up. Useful when you +// want to use a database backed ORM +func withORM(orm fluxmonitorv2.ORM) func(*setupOptions) { + return func(opts *setupOptions) { + opts.orm = orm + } +} + +// setupStoreWithKey setups a new store and adds a key to the keystore +func setupStoreWithKey(t *testing.T) (*sqlx.DB, common.Address) { + db := pgtest.NewSqlxDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, pgtest.NewQConfig(true)).Eth() + _, nodeAddr := cltest.MustInsertRandomKey(t, ethKeyStore) + + return db, nodeAddr +} + +// setupStoreWithKey setups a new store and adds a key to the keystore +func setupFullDBWithKey(t *testing.T) (*sqlx.DB, common.Address) { + cfg, db := heavyweight.FullTestDBV2(t, nil) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, nodeAddr := cltest.MustInsertRandomKey(t, ethKeyStore) + + return db, nodeAddr +} + +func TestFluxMonitor_PollIfEligible(t *testing.T) { + testCases := []struct { + name string + eligible bool + connected bool + funded bool + answersDeviate bool + hasPreviousRun bool + previousRunStatus pipeline.RunStatus + expectedToPoll bool + expectedToSubmit bool + }{ + { + name: "eligible", + eligible: true, connected: true, funded: true, answersDeviate: true, + expectedToPoll: true, expectedToSubmit: true, + }, + { + name: "ineligible", + eligible: false, connected: true, funded: true, answersDeviate: true, + expectedToPoll: false, expectedToSubmit: false, + }, { + name: "disconnected", + eligible: true, connected: false, funded: true, answersDeviate: true, + expectedToPoll: false, expectedToSubmit: false, + }, { + name: "under funded", + eligible: true, connected: true, funded: false, answersDeviate: true, + expectedToPoll: false, expectedToSubmit: false, + }, { + name: "answer undeviated", + eligible: true, connected: true, funded: true, answersDeviate: false, + expectedToPoll: true, expectedToSubmit: false, + }, { + name: "previous job run completed", + eligible: true, connected: true, funded: true, answersDeviate: true, + hasPreviousRun: true, previousRunStatus: pipeline.RunStatusCompleted, + expectedToPoll: false, expectedToSubmit: false, + }, { + name: "previous job run in progress", + eligible: true, connected: true, funded: true, answersDeviate: true, + hasPreviousRun: true, previousRunStatus: pipeline.RunStatusRunning, + expectedToPoll: false, expectedToSubmit: false, + }, { + name: "previous job run errored", + eligible: true, connected: true, funded: true, answersDeviate: true, + hasPreviousRun: true, previousRunStatus: pipeline.RunStatusErrored, + expectedToPoll: true, expectedToSubmit: true, + }, + } + + db, nodeAddr := setupStoreWithKey(t) + + const reportableRoundID = 2 + var ( + thresholds = struct{ abs, rel float64 }{0.1, 200} + deviatedAnswers = answerSet{1, 100} + undeviatedAnswers = answerSet{100, 101} + ) + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + fm, tm := setup(t, db) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("IsConnected").Return(tc.connected).Once() + + // Setup Answers + answers := undeviatedAnswers + if tc.answersDeviate { + answers = deviatedAnswers + } + latestAnswer := answers.latestAnswer + + // Setup Run + run := pipeline.Run{ + ID: 1, + PipelineSpecID: 1, + } + if tc.hasPreviousRun { + switch tc.previousRunStatus { + case pipeline.RunStatusCompleted: + now := time.Now() + run.FinishedAt = null.TimeFrom(now) + case pipeline.RunStatusErrored: + run.FatalErrors = []null.String{ + null.StringFrom("Random: String, foo"), + } + default: + } + + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(reportableRoundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: reportableRoundID, + PipelineRunID: corenull.Int64From(run.ID), + NumSubmissions: 1, + }, nil) + + tm.pipelineORM. + On("FindRun", run.ID). + Return(run, nil) + } else { + if tc.connected { + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(reportableRoundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: reportableRoundID, + }, nil) + } + } + + // Set up funds + var availableFunds *big.Int + var paymentAmount *big.Int + minPayment := defaultMinimumContractPayment.ToInt() + if tc.funded { + availableFunds = big.NewInt(1).Mul(big.NewInt(10000), minPayment) + paymentAmount = minPayment + } else { + availableFunds = big.NewInt(1) + paymentAmount = minPayment + } + + roundState := flux_aggregator_wrapper.OracleRoundState{ + RoundId: reportableRoundID, + EligibleToSubmit: tc.eligible, + LatestSubmission: big.NewInt(latestAnswer), + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: oracleCount, + } + tm.fluxAggregator. + On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(roundState, nil).Maybe() + + if tc.expectedToPoll { + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{ + Answer: big.NewInt(10), + UpdatedAt: big.NewInt(100), + }, nil) + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "latestAnswer": float64(10), + "updatedAt": float64(100), + }, + }, + "jobSpec": map[string]interface{}{ + "databaseID": int32(0), + "externalJobID": uuid.UUID{}, + "name": "", + "evmChainID": testutils.FixtureChainID.String(), + }, + }, + ), mock.Anything). + Return(&run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(answers.polledAnswer), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + } + + if tc.expectedToSubmit { + tm.pipelineRunner.On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }). + Once() + tm.contractSubmitter. + On("Submit", mock.Anything, big.NewInt(reportableRoundID), big.NewInt(answers.polledAnswer), buildIdempotencyKey(run.ID)). + Return(nil). + Once() + + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(reportableRoundID), + int64(1), + mock.Anything, + mock.Anything, + ). + Return(nil) + } + + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + require.NoError(t, fm.SetOracleAddress()) + fm.ExportedPollIfEligible(thresholds.rel, thresholds.abs) + }) + } +} + +// If the roundState method is unable to communicate with the contract (possibly due to +// incorrect address) then the pollIfEligible method should create a JobErr record +func TestFluxMonitor_PollIfEligible_Creates_JobErr(t *testing.T) { + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + var ( + roundState = flux_aggregator_wrapper.OracleRoundState{} + ) + + fm, tm := setup(t, db) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("IsConnected").Return(true).Once() + + tm.jobORM. + On("TryRecordError", + pipelineSpec.JobID, + "Unable to call roundState method on provided contract. Check contract address.", + ).Once() + + tm.fluxAggregator. + On("OracleRoundState", nilOpts, nodeAddr, mock.Anything). + Return(roundState, errors.New("err")). + Once() + + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + require.NoError(t, fm.SetOracleAddress()) + + fm.ExportedPollIfEligible(1, 1) +} + +func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + fm, tm := setup(t, + db, + disableIdleTimer(true), + disablePollTicker(true), + ) + + const ( + fetchedValue = 100 + ) + + // Test helpers + var ( + makeRoundStateForRoundID = func(roundID uint32) flux_aggregator_wrapper.OracleRoundState { + return flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + EligibleToSubmit: true, + LatestSubmission: big.NewInt(100), + AvailableFunds: defaultMinimumContractPayment.ToInt(), + PaymentAmount: defaultMinimumContractPayment.ToInt(), + } + } + ) + + readyToAssert := cltest.NewAwaiter() + readyToFillQueue := cltest.NewAwaiter() + logsAwaiter := cltest.NewAwaiter() + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Maybe() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)). + Return(makeRoundStateForRoundID(1), nil). + Run(func(mock.Arguments) { + readyToFillQueue.ItHappened() + logsAwaiter.AwaitOrFail(t) + }). + Once() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(3)).Return(makeRoundStateForRoundID(3), nil).Once() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(4)).Return(makeRoundStateForRoundID(4), nil).Once() + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + // tm.fluxAggregator.On("Address").Return(contractAddress, nil) + + tm.logBroadcaster.On("Register", fm, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(1), nil) + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(3), nil) + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(4), nil) + + // Round 1 + run := &pipeline.Run{ID: 1} + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 1, + }, nil) + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(fetchedValue), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil).Once() + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }).Once() + tm.contractSubmitter. + On("Submit", mock.Anything, big.NewInt(1), big.NewInt(fetchedValue), buildIdempotencyKey(run.ID)). + Return(nil). + Once() + + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(1), + mock.AnythingOfType("int64"), //int64(1), + mock.Anything, + mock.Anything, + ). + Return(nil).Once() + + // Round 3 + run = &pipeline.Run{ID: 2} + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(3), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 3, + }, nil) + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(fetchedValue), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil).Once() + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 2 + }).Once() + tm.contractSubmitter. + On("Submit", mock.Anything, big.NewInt(3), big.NewInt(fetchedValue), buildIdempotencyKey(run.ID)). + Return(nil). + Once() + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(3), + mock.AnythingOfType("int64"), //int64(2), + mock.Anything, + mock.Anything, + ). + Return(nil).Once() + + // Round 4 + run = &pipeline.Run{ID: 3} + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(4), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 3, + }, nil) + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(fetchedValue), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil).Once() + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 3 + }).Once() + tm.contractSubmitter. + On("Submit", mock.Anything, big.NewInt(4), big.NewInt(fetchedValue), buildIdempotencyKey(run.ID)). + Return(nil). + Once() + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(4), + mock.AnythingOfType("int64"), //int64(3), + mock.Anything, + mock.Anything, + ). + Return(nil). + Once(). + Run(func(mock.Arguments) { readyToAssert.ItHappened() }) + + servicetest.Run(t, fm) + + var logBroadcasts []*logmocks.Broadcast + + for i := 1; i <= 4; i++ { + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&flux_aggregator_wrapper.FluxAggregatorNewRound{RoundId: big.NewInt(int64(i)), StartedAt: big.NewInt(0)}) + logBroadcast.On("String").Maybe().Return("") + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + logBroadcasts = append(logBroadcasts, logBroadcast) + } + + fm.HandleLog(logBroadcasts[0]) // Get the checker to start processing a log so we can freeze it + + readyToFillQueue.AwaitOrFail(t) + + fm.HandleLog(logBroadcasts[1]) // This log is evicted from the priority queue + fm.HandleLog(logBroadcasts[2]) + fm.HandleLog(logBroadcasts[3]) + + logsAwaiter.ItHappened() + readyToAssert.AwaitOrFail(t) +} + +func TestFluxMonitor_TriggerIdleTimeThreshold(t *testing.T) { + g := gomega.NewWithT(t) + + testCases := []struct { + name string + idleTimerDisabled bool + idleDuration time.Duration + expectedToSubmit bool + }{ + {"no idleDuration", true, 0, false}, + {"idleDuration > 0", false, 2 * time.Second, true}, + } + + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var ( + orm = newORM(t, db, pgtest.NewQConfig(true), nil) + ) + + fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(tc.idleTimerDisabled), setIdleTimerPeriod(tc.idleDuration), withORM(orm)) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + idleDurationOccured := make(chan struct{}, 3) + + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + if tc.expectedToSubmit { + // performInitialPoll() + roundState1 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState1, nil).Once() + // idleDuration 1 + roundState2 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState2, nil).Once().Run(func(args mock.Arguments) { + idleDurationOccured <- struct{}{} + }) + } + + require.NoError(t, fm.Start(testutils.Context(t))) + require.Len(t, idleDurationOccured, 0, "no Job Runs created") + + if tc.expectedToSubmit { + g.Eventually(func() int { return len(idleDurationOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(1)) + + chBlock := make(chan struct{}) + // NewRound resets the idle timer + roundState2 := flux_aggregator_wrapper.OracleRoundState{RoundId: 2, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(2)).Return(roundState2, nil).Once().Run(func(args mock.Arguments) { + close(chBlock) + }) + + decodedLog := flux_aggregator_wrapper.FluxAggregatorNewRound{RoundId: big.NewInt(2), StartedAt: big.NewInt(0)} + tm.logBroadcast.On("DecodedLog").Return(&decodedLog) + tm.logBroadcast.On("String").Maybe().Return("") + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + fm.HandleLog(tm.logBroadcast) + + g.Eventually(chBlock).Should(gomega.BeClosed()) + + // idleDuration 2 + roundState3 := flux_aggregator_wrapper.OracleRoundState{RoundId: 3, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState3, nil).Once().Run(func(args mock.Arguments) { + idleDurationOccured <- struct{}{} + }) + + g.Eventually(func() int { return len(idleDurationOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(2)) + } + + fm.Close() + + if !tc.expectedToSubmit { + require.Len(t, idleDurationOccured, 0) + } + }) + } +} + +func TestFluxMonitor_HibernationTickerFiresMultipleTimes(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + fm, tm := setup(t, + db, + disablePollTicker(true), + disableIdleTimer(true), + setHibernationTickerPeriod(time.Second), + setHibernationState(t, true), + ) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + tm.fluxAggregator.On("Address").Return(contractAddress) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + + pollOccured := make(chan struct{}, 4) + + err := fm.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { fm.Close() }) + + roundState1 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState1, nil).Once().Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 0, + }, nil).Once() + + g.Eventually(func() int { return len(pollOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(1)) + + // hiberation tick 1 triggers using the same round id as the initial poll. This resets the idle timer + roundState1Responded := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now() + 1} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState1Responded, nil).Once().Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }) + + // Finds an existing run created by the initial poll + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 1, + }, nil).Once() + finishedAt := time.Now() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{ + FinishedAt: null.TimeFrom(finishedAt), + }, nil) + + g.Eventually(func() int { return len(pollOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(2)) + + // hiberation tick 2 triggers a new round. Started at is 0 + roundState2 := flux_aggregator_wrapper.OracleRoundState{RoundId: 2, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: 0} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState2, nil).Once().Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(2), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 2, + NumSubmissions: 0, + }, nil).Once() + + g.Eventually(func() int { return len(pollOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(3)) +} + +func TestFluxMonitor_HibernationIsEnteredAndRetryTickerStopped(t *testing.T) { + db, nodeAddr := setupFullDBWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + const ( + roundZero = uint32(0) + roundOne = uint32(1) + roundTwo = uint32(2) + ) + + flags := fmmocks.NewFlags(t) + flags.On("ContractExists").Return(true) + flags.On("Address").Return(common.Address{}) + flags.On("IsLowered", mock.Anything).Return(true, nil).Once() + + fm, tm := setup(t, + db, + setIdleTimerPeriod(time.Second), + disablePollTicker(true), + setHibernationTickerPeriod(4*time.Second), + setFlags(flags), + ) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + tm.fluxAggregator.On("Address").Return(contractAddress) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + + pollOccured := make(chan struct{}, 4) + + err := fm.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { fm.Close() }) + + // idle ticker + roundState1 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now(), AvailableFunds: big.NewInt(0)} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, roundZero).Return(roundState1, nil).Once().Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, roundOne, mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 0, + }, nil).Once() + + select { + case <-pollOccured: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("Poll did not occur!") + } + + roundState1Responded := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now() + 1} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, roundZero).Return(roundState1Responded, nil).Once().Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }) + + // Finds an error run, so that retry ticker will be kicked off + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, roundOne, mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 1, + }, nil).Once() + finishedAt := time.Now() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{ + FinishedAt: null.TimeFrom(finishedAt), + FatalErrors: []null.String{null.StringFrom("an error to start retry ticker")}, + }, nil) + + select { + case <-pollOccured: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("Poll did not occur!") + } + + // ---------- Begin hibernation mode ------------ + flags.On("IsLowered", mock.Anything).Return(false, nil) + fm.ExportedRespondToFlagsRaisedLog() + + // hibernation ticker + roundState2 := flux_aggregator_wrapper.OracleRoundState{RoundId: 2, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: 0} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, roundZero).Return(roundState2, nil).Once() + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, roundTwo, mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 2, + NumSubmissions: 0, + }, nil). + Run(func(args mock.Arguments) { + pollOccured <- struct{}{} + }). + Once() + + select { + case <-pollOccured: + t.Fatal("Poll should not occur for next few seconds because we are in hibernation mode and all other tickers should be stopped") + case <-time.After(2 * time.Second): + } + + select { + case <-pollOccured: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("Poll did not occur, though it should have via hibernation ticker") + } +} + +func TestFluxMonitor_IdleTimerResetsOnNewRound(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + fm, tm := setup(t, + db, + disablePollTicker(true), + setIdleTimerPeriod(2*time.Second), + ) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + tm.fluxAggregator.On("Address").Return(contractAddress) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + + idleDurationOccured := make(chan struct{}, 4) + initialPollOccurred := make(chan struct{}, 1) + + servicetest.Run(t, fm) + + // Initial Poll + roundState1 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState1, nil).Once() + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 0, + }, nil).Once().Run(func(args mock.Arguments) { + initialPollOccurred <- struct{}{} + }) + require.Len(t, idleDurationOccured, 0, "no Job Runs created") + g.Eventually(func() int { return len(initialPollOccurred) }, testutils.WaitTimeout(t)).Should(gomega.Equal(1)) + + // idleDuration 1 triggers using the same round id as the initial poll. This resets the idle timer + roundState1Responded := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now() + 1} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState1Responded, nil).Once().Run(func(args mock.Arguments) { + idleDurationOccured <- struct{}{} + }) + // Finds an existing run created by the initial poll + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(1), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: 1, + NumSubmissions: 1, + }, nil).Once() + finishedAt := time.Now() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{ + FinishedAt: null.TimeFrom(finishedAt), + }, nil) + + g.Eventually(func() int { return len(idleDurationOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(1)) + + // idleDuration 2 triggers a new round. Started at is 0 + roundState2 := flux_aggregator_wrapper.OracleRoundState{RoundId: 2, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: 0} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState2, nil).Once().Run(func(args mock.Arguments) { + idleDurationOccured <- struct{}{} + }) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(2), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 2, + NumSubmissions: 0, + }, nil).Once() + + g.Eventually(func() int { return len(idleDurationOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(2)) + + // idleDuration 3 triggers from the previous new round + roundState3 := flux_aggregator_wrapper.OracleRoundState{RoundId: 3, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now() - 1000000} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState3, nil).Twice().Run(func(args mock.Arguments) { + idleDurationOccured <- struct{}{} + }) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(3), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: 3, + NumSubmissions: 0, + }, nil).Once() + + // AnswerUpdated comes in, which attempts to reset the timers + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil).Once() + tm.logBroadcast.On("DecodedLog").Return(&flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}) + tm.logBroadcast.On("String").Maybe().Return("") + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil).Once() + fm.ExportedBacklog().Add(fluxmonitorv2.PriorityNewRoundLog, tm.logBroadcast) + fm.ExportedProcessLogs() + + g.Eventually(func() int { return len(idleDurationOccured) }, testutils.WaitTimeout(t)).Should(gomega.Equal(4)) +} + +func TestFluxMonitor_RoundTimeoutCausesPoll_timesOutAtZero(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + db, nodeAddr := setupStoreWithKey(t) + + var ( + oracles = []common.Address{nodeAddr, testutils.NewAddress()} + orm = newORM(t, db, pgtest.NewQConfig(true), nil) + ) + + fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) + + tm.keyStore. + On("EnabledKeysForChain", testutils.FixtureChainID). + Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil). + Twice() // Once called from the test, once during start + + ch := make(chan struct{}) + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + roundState0 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(roundState0, nil).Once() // initialRoundState() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + LatestSubmission: answerBigInt, + StartedAt: 0, + Timeout: 0, + }, nil). + Run(func(mock.Arguments) { close(ch) }). + Once() + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + + require.NoError(t, fm.SetOracleAddress()) + fm.ExportedRoundState(t) + servicetest.Run(t, fm) + + g.Eventually(ch).Should(gomega.BeClosed()) +} + +func TestFluxMonitor_UsesPreviousRoundStateOnStartup_RoundTimeout(t *testing.T) { + g := gomega.NewWithT(t) + + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + tests := []struct { + name string + timeout uint64 + expectedToSubmit bool + }{ + {"active round exists - round will time out", 2, true}, + {"active round exists - round will not time out", 100, false}, + {"no active round", 0, false}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + cfg := configtest.NewTestGeneralConfig(t) + var ( + orm = newORM(t, db, cfg.Database(), nil) + ) + + fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + StartedAt: now(), + Timeout: test.timeout, + }, nil).Once() + + // 2nd roundstate call means round timer triggered + chRoundState := make(chan struct{}) + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + }, nil). + Run(func(mock.Arguments) { close(chRoundState) }). + Maybe() + + servicetest.Run(t, fm) + + if test.expectedToSubmit { + g.Eventually(chRoundState).Should(gomega.BeClosed()) + } else { + g.Consistently(chRoundState).ShouldNot(gomega.BeClosed()) + } + }) + } +} + +func TestFluxMonitor_UsesPreviousRoundStateOnStartup_IdleTimer(t *testing.T) { + g := gomega.NewWithT(t) + + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + almostExpired := time.Now(). + Add(idleTimerPeriod * -1). + Add(2 * time.Second). + Unix() + + testCases := []struct { + name string + startedAt uint64 + expectedToSubmit bool + }{ + {"active round exists - idleTimer about to expired", uint64(almostExpired), true}, + {"active round exists - idleTimer will not expire", 100, false}, + {"no active round", 0, false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + cfg := configtest.NewTestGeneralConfig(t) + + var ( + orm = newORM(t, db, cfg.Database(), nil) + ) + + fm, tm := setup(t, + db, + disablePollTicker(true), + withORM(orm), + ) + initialPollOccurred := make(chan struct{}, 1) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + + // first roundstate calling initialRoundState on fm.Start() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + StartedAt: tc.startedAt, + Timeout: 10000, // round won't time out + }, nil) + + // 2nd roundstate in initial poll + roundState := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false} + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState, nil). + Once(). + Run(func(args mock.Arguments) { + initialPollOccurred <- struct{}{} + }) + + // 3rd roundState call means idleTimer triggered + chRoundState := make(chan struct{}) + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState, nil). + Run(func(mock.Arguments) { + close(chRoundState) + }). + Maybe() + + servicetest.Run(t, fm) + + assert.Eventually(t, func() bool { return len(initialPollOccurred) == 1 }, 3*time.Second, 10*time.Millisecond) + + if tc.expectedToSubmit { + g.Eventually(chRoundState).Should(gomega.BeClosed()) + } else { + g.Consistently(chRoundState).ShouldNot(gomega.BeClosed()) + } + }) + } +} + +func TestFluxMonitor_RoundTimeoutCausesPoll_timesOutNotZero(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + cfg := configtest.NewTestGeneralConfig(t) + + var ( + orm = newORM(t, db, cfg.Database(), nil) + ) + + fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), withORM(orm)) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + chRoundState1 := make(chan struct{}) + chRoundState2 := make(chan struct{}) + + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + LatestSubmission: answerBigInt, + StartedAt: now(), + Timeout: uint64(1000000), + }, nil).Once() + + startedAt := uint64(time.Now().Unix()) + timeout := uint64(3) + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + LatestSubmission: answerBigInt, + StartedAt: startedAt, + PaymentAmount: big.NewInt(10), + AvailableFunds: big.NewInt(100), + Timeout: timeout, + }, nil).Once(). + Run(func(mock.Arguments) { close(chRoundState1) }). + Once() + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: 1, + EligibleToSubmit: false, + LatestSubmission: answerBigInt, + PaymentAmount: big.NewInt(10), + AvailableFunds: big.NewInt(100), + StartedAt: startedAt, + Timeout: timeout, + }, nil).Once(). + Run(func(mock.Arguments) { close(chRoundState2) }). + Once() + + servicetest.Run(t, fm) + + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + tm.logBroadcast.On("DecodedLog").Return(&flux_aggregator_wrapper.FluxAggregatorNewRound{ + RoundId: big.NewInt(0), + StartedAt: big.NewInt(time.Now().UTC().Unix()), + }) + tm.logBroadcast.On("String").Maybe().Return("") + // To mark it consumed, we need to be eligible to submit. + fm.HandleLog(tm.logBroadcast) + + g.Eventually(chRoundState1).Should(gomega.BeClosed()) + g.Eventually(chRoundState2).Should(gomega.BeClosed()) + + time.Sleep(time.Duration(2*timeout) * time.Second) +} + +func TestFluxMonitor_ConsumeLogBroadcast(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + fm, tm := setup(t, db) + + tm.fluxAggregator. + On("OracleRoundState", nilOpts, mock.Anything, mock.Anything). + Return(flux_aggregator_wrapper.OracleRoundState{RoundId: 123}, nil) + + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil).Once() + tm.logBroadcast.On("DecodedLog").Return(&flux_aggregator_wrapper.FluxAggregatorAnswerUpdated{}) + tm.logBroadcast.On("String").Maybe().Return("") + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil).Once() + + fm.ExportedBacklog().Add(fluxmonitorv2.PriorityNewRoundLog, tm.logBroadcast) + fm.ExportedProcessLogs() +} + +func TestFluxMonitor_ConsumeLogBroadcast_Error(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + consumed bool + err error + }{ + {"already consumed", true, nil}, + {"error determining already consumed", false, errors.New("err")}, + } + + db := pgtest.NewSqlxDB(t) + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + fm, tm := setup(t, db) + + tm.logBroadcaster.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(tc.consumed, tc.err).Once() + + fm.ExportedBacklog().Add(fluxmonitorv2.PriorityNewRoundLog, tm.logBroadcast) + fm.ExportedProcessLogs() + }) + } +} + +func TestFluxMonitor_DoesNotDoubleSubmit(t *testing.T) { + t.Run("when NewRound log arrives, then poll ticker fires", func(t *testing.T) { + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + fm, tm := setup(t, + db, + disableIdleTimer(true), + disablePollTicker(true), + ) + + var ( + paymentAmount = defaultMinimumContractPayment.ToInt() + availableFunds = big.NewInt(1).Mul(paymentAmount, big.NewInt(1000)) + ) + + const ( + roundID = 3 + answer = 100 + ) + + run := &pipeline.Run{ID: 1} + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + // Mocks initiated by the New Round log + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(roundID), nil).Once() + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(roundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: roundID, + }, nil).Once() + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(answer), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }) + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil).Once() + tm.contractSubmitter.On("Submit", mock.Anything, big.NewInt(roundID), big.NewInt(answer), buildIdempotencyKey(run.ID)).Return(nil).Once() + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(roundID), + int64(1), + uint(1), + mock.Anything, + ). + Return(nil) + + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + require.NoError(t, fm.SetOracleAddress()) + + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{ + Answer: big.NewInt(10), + UpdatedAt: big.NewInt(100), + }, nil) + + // Fire off the NewRound log, which the node should respond to + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(roundID)). + Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + LatestSubmission: big.NewInt(answer), + EligibleToSubmit: true, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: 1, + }, nil). + Once() + + fm.ExportedRespondToNewRoundLog(&flux_aggregator_wrapper.FluxAggregatorNewRound{ + RoundId: big.NewInt(roundID), + StartedAt: big.NewInt(0), + }, log.NewLogBroadcast(types.Log{}, cltest.FixtureChainID, nil)) + + // Mocks initiated by polling + // Now force the node to try to poll and ensure it does not respond this time + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + LatestSubmission: big.NewInt(answer), + EligibleToSubmit: true, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: 1, + }, nil). + Once() + + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(roundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: roundID, + NumSubmissions: 1, + }, nil).Once() + + now := time.Now() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{ + FinishedAt: null.TimeFrom(now), + }, nil) + + fm.ExportedPollIfEligible(0, 0) + }) + + t.Run("when poll ticker fires, then NewRound log arrives", func(t *testing.T) { + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + fm, tm := setup(t, + db, + disableIdleTimer(true), + disablePollTicker(true), + ) + + var ( + paymentAmount = defaultMinimumContractPayment.ToInt() + availableFunds = big.NewInt(1).Mul(paymentAmount, big.NewInt(1000)) + ) + + const ( + roundID = 3 + answer = 100 + ) + + run := &pipeline.Run{ID: 1} + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + // First, force the node to try to poll, which should result in a submission + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{ + Answer: big.NewInt(10), + UpdatedAt: big.NewInt(100), + }, nil) + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + LatestSubmission: big.NewInt(answer), + EligibleToSubmit: true, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: 1, + }, nil). + Once() + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(roundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: roundID, + }, nil).Once() + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(answer), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }) + tm.contractSubmitter.On("Submit", mock.Anything, big.NewInt(roundID), big.NewInt(answer), buildIdempotencyKey(run.ID)).Return(nil).Once() + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(roundID), + int64(1), + uint(0), + mock.Anything, + ). + Return(nil). + Once() + + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + require.NoError(t, fm.SetOracleAddress()) + fm.ExportedPollIfEligible(0, 0) + + // Now fire off the NewRound log and ensure it does not respond this time + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(roundID), nil) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(roundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: roundID, + NumSubmissions: 1, + }, nil).Once() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{}, nil) + + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + fm.ExportedRespondToNewRoundLog(&flux_aggregator_wrapper.FluxAggregatorNewRound{ + RoundId: big.NewInt(roundID), + StartedAt: big.NewInt(0), + }, log.NewLogBroadcast(types.Log{}, cltest.FixtureChainID, nil)) + }) + + t.Run("when poll ticker fires, then an older NewRound log arrives, but does submit on a log arrival after a reorg", func(t *testing.T) { + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + fm, tm := setup(t, + db, + disableIdleTimer(true), + disablePollTicker(true), + ) + + var ( + paymentAmount = defaultMinimumContractPayment.ToInt() + availableFunds = big.NewInt(1).Mul(paymentAmount, big.NewInt(1000)) + ) + + const ( + olderRoundID = 2 + roundID = 3 + answer = 100 + ) + run := &pipeline.Run{ID: 1} + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil).Once() + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + // First, force the node to try to poll, which should result in a submission + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{ + Answer: big.NewInt(10), + UpdatedAt: big.NewInt(100), + }, nil) + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + LatestSubmission: big.NewInt(answer), + EligibleToSubmit: true, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: 1, + }, nil). + Once() + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(roundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + Aggregator: contractAddress, + RoundID: roundID, + }, nil).Once() + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, mock.Anything, mock.Anything). + Return(run, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(answer), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + tm.pipelineRunner. + On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }) + tm.contractSubmitter.On("Submit", mock.Anything, big.NewInt(roundID), big.NewInt(answer), buildIdempotencyKey(run.ID)).Return(nil).Once() + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(roundID), + int64(1), + uint(0), + mock.Anything, + ). + Return(nil). + Once() + + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + require.NoError(t, fm.SetOracleAddress()) + fm.ExportedPollIfEligible(0, 0) + + // Now fire off the NewRound log and ensure it does not respond this time + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(roundID), nil) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(olderRoundID), mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: olderRoundID, + NumSubmissions: 1, + }, nil).Once() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{}, nil) + + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + fm.ExportedRespondToNewRoundLog(&flux_aggregator_wrapper.FluxAggregatorNewRound{ + RoundId: big.NewInt(olderRoundID), + StartedAt: big.NewInt(0), + }, log.NewLogBroadcast(types.Log{}, cltest.FixtureChainID, nil)) + + // Simulate a reorg - fire the same NewRound log again, which should result in a submission this time + tm.orm.On("MostRecentFluxMonitorRoundID", contractAddress).Return(uint32(roundID), nil) + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(olderRoundID), uint(1)). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: olderRoundID, + NumSubmissions: 1, + NumNewRoundLogs: 1, + }, nil).Once() + tm.pipelineORM.On("FindRun", int64(1)).Return(pipeline.Run{}, nil) + + // all newer round stats should be deleted + tm.orm.On("DeleteFluxMonitorRoundsBackThrough", contractAddress, uint32(olderRoundID)).Return(nil) + + // then we are returning a fresh round stat, with NumSubmissions: 0 + tm.orm. + On("FindOrCreateFluxMonitorRoundStats", contractAddress, uint32(olderRoundID), uint(1)). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{ + PipelineRunID: corenull.NewInt64(int64(1), true), + Aggregator: contractAddress, + RoundID: olderRoundID, + NumSubmissions: 0, + NumNewRoundLogs: 1, + }, nil).Once() + + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(olderRoundID)). + Return(flux_aggregator_wrapper.OracleRoundState{ + RoundId: olderRoundID, + LatestSubmission: big.NewInt(answer), + EligibleToSubmit: true, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + OracleCount: 1, + }, nil). + Once() + + // and that should result in a new submission + tm.contractSubmitter.On("Submit", mock.Anything, big.NewInt(olderRoundID), big.NewInt(answer), buildIdempotencyKey(run.ID)).Return(nil).Once() + + tm.orm. + On("UpdateFluxMonitorRoundStats", + contractAddress, + uint32(olderRoundID), + int64(1), + uint(1), + mock.Anything, + ). + Return(nil). + Once() + + tm.logBroadcaster.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + fm.ExportedRespondToNewRoundLog(&flux_aggregator_wrapper.FluxAggregatorNewRound{ + RoundId: big.NewInt(olderRoundID), + StartedAt: big.NewInt(0), + }, log.NewLogBroadcast(types.Log{}, cltest.FixtureChainID, nil)) + }) +} + +func TestFluxMonitor_DrumbeatTicker(t *testing.T) { + t.Parallel() + + db, nodeAddr := setupStoreWithKey(t) + oracles := []common.Address{nodeAddr, testutils.NewAddress()} + + // a setup with a random delay being zero + _, _ = setup(t, db, enableDrumbeatTicker("@every 10s", 0)) + + fm, tm := setup(t, db, disablePollTicker(true), disableIdleTimer(true), enableDrumbeatTicker("@every 3s", 2*time.Second)) + + tm.keyStore.On("EnabledKeysForChain", testutils.FixtureChainID).Return([]ethkey.KeyV2{{Address: nodeAddr}}, nil) + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer) + + tm.fluxAggregator.On("Address").Return(common.Address{}) + tm.fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) + tm.logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(func() {}) + tm.logBroadcaster.On("IsConnected").Return(true).Maybe() + + tm.fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + + expectSubmission := func(roundID uint32, runID int64) { + roundState := flux_aggregator_wrapper.OracleRoundState{ + RoundId: roundID, + EligibleToSubmit: true, + LatestSubmission: answerBigInt, + AvailableFunds: big.NewInt(1).Mul(big.NewInt(10000), defaultMinimumContractPayment.ToInt()), + PaymentAmount: defaultMinimumContractPayment.ToInt(), + StartedAt: now(), + } + + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(roundState, nil). + Once() + + tm.fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, roundID). + Return(roundState, nil). + Once() + + tm.orm.On("FindOrCreateFluxMonitorRoundStats", contractAddress, roundID, mock.Anything). + Return(fluxmonitorv2.FluxMonitorRoundStatsV2{Aggregator: contractAddress, RoundID: roundID}, nil). + Once() + + tm.fluxAggregator.On("LatestRoundData", nilOpts). + Return(flux_aggregator_wrapper.LatestRoundData{ + Answer: answerBigInt, + UpdatedAt: big.NewInt(100), + }, nil). + Once() + + tm.pipelineRunner. + On("ExecuteRun", mock.Anything, pipelineSpec, pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "latestAnswer": float64(fetchedAnswer), + "updatedAt": float64(100), + }, + }, + "jobSpec": map[string]interface{}{ + "databaseID": int32(0), + "externalJobID": uuid.UUID{}, + "name": "", + "evmChainID": testutils.FixtureChainID.String(), + }, + }, + ), mock.Anything). + Return(&pipeline.Run{ID: runID}, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: decimal.NewFromInt(fetchedAnswer), + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil). + Once() + + tm.pipelineRunner.On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = runID + }). + Once() + tm.contractSubmitter. + On("Submit", mock.Anything, big.NewInt(int64(roundID)), answerBigInt, buildIdempotencyKey(runID)). + Return(nil). + Once() + + tm.orm. + On("UpdateFluxMonitorRoundStats", contractAddress, roundID, runID, mock.Anything, mock.Anything). + Return(nil). + Once() + } + + expectSubmission(2, 1) + expectSubmission(3, 2) + expectSubmission(4, 3) + + // catch remaining drumbeats + tm.fluxAggregator. + On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). + Return(flux_aggregator_wrapper.OracleRoundState{RoundId: 4, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()}, nil). + Maybe() + + servicetest.Run(t, fm) + + waitTime := 15 * time.Second + interval := 50 * time.Millisecond + cltest.EventuallyExpectationsMet(t, tm.logBroadcaster, waitTime, interval) + cltest.EventuallyExpectationsMet(t, tm.fluxAggregator, waitTime, interval) + cltest.EventuallyExpectationsMet(t, tm.orm, waitTime, interval) + cltest.EventuallyExpectationsMet(t, tm.pipelineORM, waitTime, interval) + cltest.EventuallyExpectationsMet(t, tm.contractSubmitter, waitTime, interval) +} diff --git a/core/services/fluxmonitorv2/helpers_test.go b/core/services/fluxmonitorv2/helpers_test.go new file mode 100644 index 00000000..7e3f7022 --- /dev/null +++ b/core/services/fluxmonitorv2/helpers_test.go @@ -0,0 +1,50 @@ +package fluxmonitorv2 + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// Format implements fmt.Formatter to always print just the pointer address. +// This is a hack to work around a race in github.com/stretchr/testify which +// prints internal fields, including the state of nested, embedded mutexes. +func (fm *FluxMonitor) Format(f fmt.State, verb rune) { + fmt.Fprintf(f, "%[1]T<%[1]p>", fm) +} + +func (fm *FluxMonitor) ExportedPollIfEligible(threshold, absoluteThreshold float64) { + fm.pollIfEligible(PollRequestTypePoll, NewDeviationChecker(threshold, absoluteThreshold, fm.logger), nil) +} + +func (fm *FluxMonitor) ExportedProcessLogs() { + fm.processLogs() +} + +func (fm *FluxMonitor) ExportedBacklog() *utils.BoundedPriorityQueue[log.Broadcast] { + return fm.backlog +} + +func (fm *FluxMonitor) ExportedRoundState(t *testing.T) { + _, err := fm.roundState(0) + require.NoError(t, err) +} + +func (fm *FluxMonitor) ExportedRespondToNewRoundLog(log *flux_aggregator_wrapper.FluxAggregatorNewRound, broadcast log.Broadcast) { + fm.respondToNewRoundLog(*log, broadcast) +} + +func (fm *FluxMonitor) ExportedRespondToFlagsRaisedLog() { + fm.respondToFlagsRaisedLog() + fm.rotateSelectLoop() +} + +func (fm *FluxMonitor) rotateSelectLoop() { + // the PollRequest is sent to 'rotate' the main select loop, so that new timers will be evaluated + fm.pollManager.chPoll <- PollRequest{Type: PollRequestTypeUnknown} +} diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go new file mode 100644 index 00000000..1b614e35 --- /dev/null +++ b/core/services/fluxmonitorv2/integrations_test.go @@ -0,0 +1,1094 @@ +package fluxmonitorv2_test + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + faw "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web" +) + +const description = "exactly thirty-three characters!!" + +const decimals = 8 +const fee = int64(100) // Amount paid by FA contract, in PLI-wei +const faTimeout = uint32(1) + +var pollTimerPeriod = 200 * time.Millisecond // if failing due to timeouts, increase this +var oneEth = big.NewInt(1000000000000000000) +var emptyList = []common.Address{} + +// fluxAggregatorUniverse represents the universe with which the aggregator +// contract interacts +type fluxAggregatorUniverse struct { + key ethkey.KeyV2 + aggregatorContract *faw.FluxAggregator + aggregatorContractAddress common.Address + linkContract *link_token_interface.LinkToken + flagsContract *flags_wrapper.Flags + flagsContractAddress common.Address + evmChainID big.Int + // Abstraction representation of the ethereum blockchain + backend *backends.SimulatedBackend + aggregatorABI abi.ABI + // Cast of participants + sergey *bind.TransactOpts // Owns all the PLI initially + neil *bind.TransactOpts // Node operator Flux Monitor Oracle + ned *bind.TransactOpts // Node operator Flux Monitor Oracle + nallory *bind.TransactOpts // Node operator Flux Monitor Oracle running this node +} + +type fluxAggregatorUniverseConfig struct { + MinSubmission *big.Int + MaxSubmission *big.Int +} + +func WithMinMaxSubmission(min, max *big.Int) func(cfg *fluxAggregatorUniverseConfig) { + return func(cfg *fluxAggregatorUniverseConfig) { + cfg.MinSubmission = min + cfg.MaxSubmission = max + } +} + +// setupFluxAggregatorUniverse returns a fully initialized fluxAggregator universe. The +// arguments match the arguments of the same name in the FluxAggregator +// constructor. +func setupFluxAggregatorUniverse(t *testing.T, configOptions ...func(cfg *fluxAggregatorUniverseConfig)) fluxAggregatorUniverse { + testutils.SkipShort(t, "VRFCoordinatorV2Universe") + cfg := &fluxAggregatorUniverseConfig{ + MinSubmission: big.NewInt(0), + MaxSubmission: big.NewInt(100000000000), + } + + for _, optFn := range configOptions { + optFn(cfg) + } + + key, err := ethkey.NewV2() + require.NoError(t, err) + oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID) + require.NoError(t, err) + + var f fluxAggregatorUniverse + f.evmChainID = *testutils.SimulatedChainID + f.key = key + f.sergey = testutils.MustNewSimTransactor(t) + f.neil = testutils.MustNewSimTransactor(t) + f.ned = testutils.MustNewSimTransactor(t) + f.nallory = oracleTransactor + genesisData := core.GenesisAlloc{ + f.sergey.From: {Balance: assets.Ether(1000).ToInt()}, + f.neil.From: {Balance: assets.Ether(1000).ToInt()}, + f.ned.From: {Balance: assets.Ether(1000).ToInt()}, + f.nallory.From: {Balance: assets.Ether(1000).ToInt()}, + } + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + f.backend = cltest.NewSimulatedBackend(t, genesisData, gasLimit) + + f.aggregatorABI, err = abi.JSON(strings.NewReader(faw.FluxAggregatorABI)) + require.NoError(t, err, "could not parse FluxAggregator ABI") + + var linkAddress common.Address + linkAddress, _, f.linkContract, err = link_token_interface.DeployLinkToken(f.sergey, f.backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + + f.flagsContractAddress, _, f.flagsContract, err = flags_wrapper.DeployFlags(f.sergey, f.backend, f.sergey.From) + require.NoError(t, err, "failed to deploy flags contract to simulated ethereum blockchain") + + f.backend.Commit() + + // FluxAggregator contract subtracts timeout from block timestamp, which will + // be less than the timeout, leading to a SafeMath error. Wait for longer than + // the timeout... Golang is unpleasant about mixing int64 and time.Duration in + // arithmetic operations, so do everything as int64 and then convert. + waitTimeMs := int64(faTimeout * 5000) + time.Sleep(time.Duration((waitTimeMs + waitTimeMs/20) * int64(time.Millisecond))) + oldGasLimit := f.sergey.GasLimit + f.sergey.GasLimit = uint64(gasLimit) + f.aggregatorContractAddress, _, f.aggregatorContract, err = faw.DeployFluxAggregator( + f.sergey, + f.backend, + linkAddress, + big.NewInt(fee), + faTimeout, + common.Address{}, + cfg.MinSubmission, + cfg.MaxSubmission, + decimals, + description, + ) + f.backend.Commit() // Must commit contract to chain before we can fund with PLI + require.NoError(t, err, "failed to deploy FluxAggregator contract to simulated ethereum blockchain") + + f.sergey.GasLimit = oldGasLimit + + _, err = f.linkContract.Transfer(f.sergey, f.aggregatorContractAddress, oneEth) // Actually, PLI + require.NoError(t, err, "failed to fund FluxAggregator contract with PLI") + + _, err = f.aggregatorContract.UpdateAvailableFunds(f.sergey) + require.NoError(t, err, "failed to update aggregator's availableFunds field") + + f.backend.Commit() + availableFunds, err := f.aggregatorContract.AvailableFunds(nil) + require.NoError(t, err, "failed to retrieve AvailableFunds") + require.Equal(t, availableFunds, oneEth) + + ilogs, err := f.aggregatorContract.FilterAvailableFundsUpdated(nil, []*big.Int{oneEth}) + require.NoError(t, err, "failed to gather AvailableFundsUpdated logs") + + logs := cltest.GetLogs(t, nil, ilogs) + require.Len(t, logs, 1, "a single AvailableFundsUpdated log should be emitted") + + return f +} + +// watchSubmissionReceived creates a channel which sends the log when a +// submission is received. When event appears on submissionReceived, +// it indicates that flux monitor job run is complete. +// +// It will only watch for logs from addresses that are provided +func (fau fluxAggregatorUniverse) WatchSubmissionReceived(t *testing.T, addresses []common.Address) chan *faw.FluxAggregatorSubmissionReceived { + submissionReceived := make(chan *faw.FluxAggregatorSubmissionReceived) + subscription, err := fau.aggregatorContract.WatchSubmissionReceived( + nil, + submissionReceived, + []*big.Int{}, + []uint32{}, + addresses, + ) + require.NoError(t, err, "failed to subscribe to SubmissionReceived events") + t.Cleanup(subscription.Unsubscribe) + + return submissionReceived +} + +func startApplication( + t *testing.T, + fa fluxAggregatorUniverse, + overrides func(c *plugin.Config, s *plugin.Secrets), +) *cltest.TestApplication { + config, _ := heavyweight.FullTestDBV2(t, overrides) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, fa.backend, fa.key) + require.NoError(t, app.Start(testutils.Context(t))) + return app +} + +// checkOraclesAdded asserts that the correct logs were emitted for each oracle added +func checkOraclesAdded(t *testing.T, f fluxAggregatorUniverse, oracleList []common.Address) { + iaddedLogs, err := f.aggregatorContract.FilterOraclePermissionsUpdated(nil, oracleList, []bool{true}) + require.NoError(t, err, "failed to gather OraclePermissionsUpdated logs") + + addedLogs := cltest.GetLogs(t, nil, iaddedLogs) + require.Len(t, addedLogs, len(oracleList), "should have log for each oracle") + + iadminLogs, err := f.aggregatorContract.FilterOracleAdminUpdated(nil, oracleList, oracleList) + require.NoError(t, err, "failed to gather OracleAdminUpdated logs") + + adminLogs := cltest.GetLogs(t, nil, iadminLogs) + require.Len(t, adminLogs, len(oracleList), "should have log for each oracle") + + for oracleIdx, oracle := range oracleList { + require.Equal(t, oracle, addedLogs[oracleIdx].(*faw.FluxAggregatorOraclePermissionsUpdated).Oracle, "log for wrong oracle emitted") + require.Equal(t, oracle, adminLogs[oracleIdx].(*faw.FluxAggregatorOracleAdminUpdated).Oracle, "log for wrong oracle emitted") + } +} + +func generatePriceResponseFn(price func() int64) func() string { + return func() string { + return fmt.Sprintf(`{"data":{"result": %d}}`, price()) + } +} + +type answerParams struct { + fa *fluxAggregatorUniverse + roundId, answer int64 + from *bind.TransactOpts + isNewRound, completesAnswer bool +} + +// checkSubmission verifies all the logs emitted by fa's FluxAggregator +// contract after an updateAnswer with the given values. +func checkSubmission(t *testing.T, p answerParams, currentBalance int64, receiptBlock uint64) { + t.Helper() + if receiptBlock == 0 { + receiptBlock = p.fa.backend.Blockchain().CurrentBlock().Number.Uint64() + } + blockRange := &bind.FilterOpts{Start: 0, End: &receiptBlock} + + // Could filter for the known values here, but while that would be more + // succinct it leads to less informative error messages... Did the log not + // appear at all, or did it just have a wrong value? + ilogs, err := p.fa.aggregatorContract.FilterSubmissionReceived( + blockRange, + []*big.Int{big.NewInt(p.answer)}, + []uint32{uint32(p.roundId)}, + []common.Address{p.from.From}, + ) + require.NoError(t, err, "failed to get SubmissionReceived logs") + + var srlogs []*faw.FluxAggregatorSubmissionReceived + _ = cltest.GetLogs(t, &srlogs, ilogs) + require.Len(t, srlogs, 1, "FluxAggregator did not emit correct "+ + "SubmissionReceived log") + + inrlogs, err := p.fa.aggregatorContract.FilterNewRound( + blockRange, []*big.Int{big.NewInt(p.roundId)}, []common.Address{p.from.From}, + ) + require.NoError(t, err, "failed to get NewRound logs") + + if p.isNewRound { + var nrlogs []*faw.FluxAggregatorNewRound + cltest.GetLogs(t, &nrlogs, inrlogs) + require.Len(t, nrlogs, 1, "FluxAggregator did not emit correct NewRound "+ + "log") + } else { + assert.Len(t, cltest.GetLogs(t, nil, inrlogs), 0, "FluxAggregator emitted "+ + "unexpected NewRound log") + } + + iaflogs, err := p.fa.aggregatorContract.FilterAvailableFundsUpdated( + blockRange, []*big.Int{big.NewInt(currentBalance - fee)}, + ) + require.NoError(t, err, "failed to get AvailableFundsUpdated logs") + var aflogs []*faw.FluxAggregatorAvailableFundsUpdated + _ = cltest.GetLogs(t, &aflogs, iaflogs) + assert.Len(t, aflogs, 1, "FluxAggregator did not emit correct "+ + "AvailableFundsUpdated log") + + iaulogs, err := p.fa.aggregatorContract.FilterAnswerUpdated(blockRange, + []*big.Int{big.NewInt(p.answer)}, []*big.Int{big.NewInt(p.roundId)}, + ) + require.NoError(t, err, "failed to get AnswerUpdated logs") + if p.completesAnswer { + var aulogs []*faw.FluxAggregatorAnswerUpdated + _ = cltest.GetLogs(t, &aulogs, iaulogs) + // XXX: sometimes this log is repeated; don't know why... + assert.NotEmpty(t, aulogs, "FluxAggregator did not emit correct "+ + "AnswerUpdated log") + } +} + +// currentbalance returns the current balance of fa's FluxAggregator +func currentBalance(t *testing.T, fa *fluxAggregatorUniverse) *big.Int { + currentBalance, err := fa.aggregatorContract.AvailableFunds(nil) + require.NoError(t, err, "failed to get current FA balance") + return currentBalance +} + +// submitAnswer simulates a call to fa's FluxAggregator contract from a fake +// node (neil or ned), with the given roundId and answer, and checks that all +// the logs emitted by the contract are correct +func submitAnswer(t *testing.T, p answerParams) { + cb := currentBalance(t, p.fa) + + // used to ensure that the simulated backend has processed the submission, + // before we search for the log and check it. + srCh := make(chan *faw.FluxAggregatorSubmissionReceived) + fromBlock := uint64(0) + srSubscription, err := p.fa.aggregatorContract.WatchSubmissionReceived( + &bind.WatchOpts{Start: &fromBlock}, + srCh, + []*big.Int{big.NewInt(p.answer)}, + []uint32{uint32(p.roundId)}, + []common.Address{p.from.From}, + ) + defer func() { + srSubscription.Unsubscribe() + err = <-srSubscription.Err() + require.NoError(t, err, "failed to unsubscribe from AvailableFundsUpdated logs") + }() + + _, err = p.fa.aggregatorContract.Submit( + p.from, big.NewInt(p.roundId), big.NewInt(p.answer), + ) + require.NoError(t, err, "failed to submit answer to flux aggregator") + + p.fa.backend.Commit() + + select { + case <-srCh: + case <-time.After(5 * time.Second): + t.Fatal("failed to complete submission to flux aggregator") + } + checkSubmission(t, p, cb.Int64(), 0) +} + +func awaitSubmission(t *testing.T, backend *backends.SimulatedBackend, submissionReceived chan *faw.FluxAggregatorSubmissionReceived) ( + receiptBlock uint64, answer int64, +) { + t.Helper() + + // Send blocks until we get a response + stopBlocks := utils.FiniteTicker(time.Second, func() { backend.Commit() }) + defer stopBlocks() + select { // block until FluxAggregator contract acknowledges plugin message + case log := <-submissionReceived: + return log.Raw.BlockNumber, log.Submission.Int64() + case <-time.After(20 * pollTimerPeriod): + t.Fatal("plugin failed to submit answer to FluxAggregator contract") + return 0, 0 // unreachable + } +} + +// assertNoSubmission asserts that no submission was sent for a given duration +func assertNoSubmission(t *testing.T, + submissionReceived chan *faw.FluxAggregatorSubmissionReceived, + duration time.Duration, + msg string, +) { + + // drain the channel + for len(submissionReceived) > 0 { + <-submissionReceived + } + + select { + case <-submissionReceived: + assert.Fail(t, "flags are up, but submission was sent", msg) + case <-time.After(duration): + } +} + +// assertPipelineRunCreated checks that a pipeline exists for a given round and +// verifies the answer +func assertPipelineRunCreated(t *testing.T, db *sqlx.DB, roundID int64, result int64) pipeline.Run { + // Fetch the stats to extract the run id + stats := fluxmonitorv2.FluxMonitorRoundStatsV2{} + require.NoError(t, db.Get(&stats, "SELECT * FROM flux_monitor_round_stats_v2 WHERE round_id = $1", roundID)) + if stats.ID == 0 { + t.Fatalf("Stats for round id: %v not found!", roundID) + } + require.True(t, stats.PipelineRunID.Valid) + // Verify the pipeline run data + run := pipeline.Run{} + require.NoError(t, db.Get(&run, `SELECT * FROM pipeline_runs WHERE id = $1`, stats.PipelineRunID.Int64), "runID %v", stats.PipelineRunID) + assert.Equal(t, []interface{}{result}, run.Outputs.Val) + return run +} + +func checkLogWasConsumed(t *testing.T, fa fluxAggregatorUniverse, db *sqlx.DB, pipelineSpecID int32, blockNumber uint64, cfg pg.QConfig) { + t.Helper() + lggr := logger.TestLogger(t) + lggr.Infof("Waiting for log on block: %v, job id: %v", blockNumber, pipelineSpecID) + + g := gomega.NewWithT(t) + g.Eventually(func() bool { + block := fa.backend.Blockchain().GetBlockByNumber(blockNumber) + require.NotNil(t, block) + orm := log.NewORM(db, lggr, cfg, fa.evmChainID) + consumed, err := orm.WasBroadcastConsumed(block.Hash(), 0, pipelineSpecID) + require.NoError(t, err) + fa.backend.Commit() + return consumed + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func TestFluxMonitor_Deviation(t *testing.T) { + tests := []struct { + name string + eip1559 bool + }{ + {"legacy", false}, + {"eip1559", true}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 1, 0) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up plugin app + app := startApplication(t, fa, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(100 * time.Millisecond) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(1 * time.Second) + c.EVM[0].GasEstimator.EIP1559DynamicFees = &test.eip1559 + }) + + type v struct { + count int + updatedAt int64 + } + expectedMeta := map[string]v{} + var expMetaMu sync.Mutex + + var reportPrice atomic.Int64 + reportPrice.Store(100) + mockServer := cltest.NewHTTPMockServerWithAlterableResponseAndRequest(t, + generatePriceResponseFn(reportPrice.Load), + func(r *http.Request) { + b, err1 := io.ReadAll(r.Body) + require.NoError(t, err1) + var m bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal(b, &m)) + if m.Meta.LatestAnswer != nil && m.Meta.UpdatedAt != nil { + k := m.Meta.LatestAnswer.String() + expMetaMu.Lock() + curr := expectedMeta[k] + assert.True(t, m.Meta.UpdatedAt.IsInt64()) // sanity check unix ts + expectedMeta[k] = v{curr.count + 1, m.Meta.UpdatedAt.Int64()} + expMetaMu.Unlock() + } + }, + ) + t.Cleanup(mockServer.Close) + u, _ := url.Parse(mockServer.URL) + require.NoError(t, app.BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: "bridge", + URL: models.WebURL(*u), + })) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` + type = "fluxmonitor" + schemaVersion = 1 + name = "integration test" + contractAddress = "%s" + threshold = 2.0 + absoluteThreshold = 0.0 + evmChainID = 1337 + + idleTimerPeriod = "10s" + idleTimerDisabled = false + + pollTimerPeriod = "%s" + pollTimerDisabled = false + + observationSource = """ + ds1 [type=bridge name=bridge]; + ds1_parse [type=jsonparse path="data,result"]; + + ds1 -> ds1_parse + """ + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, 2*time.Second) + + requestBody, err := json.Marshal(web.CreateJobRequest{ + TOML: s, + }) + assert.NoError(t, err) + + initialBalance := currentBalance(t, &fa).Int64() + + jobResponse := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + i, err := strconv.ParseInt(jobResponse.ID, 10, 32) + require.NoError(t, err) + jobID := int32(i) + + // Waiting for flux monitor to finish Register process in log broadcaster + // and then to have log broadcaster backfill logs after the debounceResubscribe period of ~ 1 sec + g.Eventually(func() uint32 { + lb := evmtest.MustGetDefaultChain(t, app.GetRelayers().LegacyEVMChains()).LogBroadcaster() + return lb.(log.BroadcasterInTest).TrackedAddressesCount() + }, testutils.WaitTimeout(t), 200*time.Millisecond).Should(gomega.BeNumerically(">=", 1)) + + // Initial Poll + receiptBlock, answer := awaitSubmission(t, fa.backend, submissionReceived) + + lggr := logger.TestLogger(t) + lggr.Infof("Detected submission: %v in block %v", answer, receiptBlock) + + assert.Equal(t, reportPrice.Load(), answer, + "failed to report correct price to contract") + + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 1, + answer: int64(100), + from: fa.nallory, + isNewRound: true, + completesAnswer: true, + }, + initialBalance, + receiptBlock, + ) + assertPipelineRunCreated(t, app.GetSqlxDB(), 1, int64(100)) + + // Need to wait until NewRound log is consumed - otherwise there is a chance + // it will arrive after the next answer is submitted, and cause + // DeleteFluxMonitorRoundsBackThrough to delete previous stats + checkLogWasConsumed(t, fa, app.GetSqlxDB(), jobID, receiptBlock, app.GetConfig().Database()) + + lggr.Info("Updating price to 103") + // Change reported price to a value outside the deviation + reportPrice.Store(103) + receiptBlock, answer = awaitSubmission(t, fa.backend, submissionReceived) + + lggr.Infof("Detected submission: %v in block %v", answer, receiptBlock) + + assert.Equal(t, reportPrice.Load(), answer, + "failed to report correct price to contract") + + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 2, + answer: int64(103), + from: fa.nallory, + isNewRound: true, + completesAnswer: true, + }, + initialBalance-fee, + receiptBlock, + ) + assertPipelineRunCreated(t, app.GetSqlxDB(), 2, int64(103)) + + // Need to wait until NewRound log is consumed - otherwise there is a chance + // it will arrive after the next answer is submitted, and cause + // DeleteFluxMonitorRoundsBackThrough to delete previous stats + checkLogWasConsumed(t, fa, app.GetSqlxDB(), jobID, receiptBlock, app.GetConfig().Database()) + + // Should not received a submission as it is inside the deviation + reportPrice.Store(104) + assertNoSubmission(t, submissionReceived, 2*time.Second, "Should not receive a submission") + + expMetaMu.Lock() + defer expMetaMu.Unlock() + assert.Len(t, expectedMeta, 2, "expected metadata %v", expectedMeta) + assert.Greater(t, expectedMeta["100"].count, 0, "Stored answer metadata does not contain 100 but contains: %v", expectedMeta) + assert.Greater(t, expectedMeta["103"].count, 0, "Stored answer metadata does not contain 103 but contains: %v", expectedMeta) + assert.Greater(t, expectedMeta["103"].updatedAt, expectedMeta["100"].updatedAt) + }) + } +} + +func TestFluxMonitor_NewRound(t *testing.T) { + g := gomega.NewWithT(t) + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.neil.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 2, 1) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up plugin app + app := startApplication(t, fa, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(100 * time.Millisecond) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(1 * time.Second) + flags := ethkey.EIP55AddressFromAddress(fa.flagsContractAddress) + c.EVM[0].FlagsContractAddress = &flags + }) + + initialBalance := currentBalance(t, &fa).Int64() + + // Create mock server + var reportPrice atomic.Int64 + reportPrice.Store(1) + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(reportPrice.Load), + ) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +evmChainID = "%s" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, testutils.SimulatedChainID.String(), pollTimerPeriod, mockServer.URL) + + // raise flags to disable polling + _, err = fa.flagsContract.RaiseFlag(fa.sergey, evmutils.ZeroAddress) // global kill switch + require.NoError(t, err) + _, err = fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + require.NoError(t, err) + fa.backend.Commit() + + requestBody, err := json.Marshal(web.CreateJobRequest{ + TOML: s, + }) + assert.NoError(t, err) + + cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + // Waiting for flux monitor to finish Register process in log broadcaster + // and then to have log broadcaster backfill logs after the debounceResubscribe period of ~ 1 sec + g.Eventually(func() uint32 { + lb := evmtest.MustGetDefaultChain(t, app.GetRelayers().LegacyEVMChains()).LogBroadcaster() + return lb.(log.BroadcasterInTest).TrackedAddressesCount() + }, testutils.WaitTimeout(t), 200*time.Millisecond).Should(gomega.BeNumerically(">=", 2)) + + // Have the the fake node start a new round + submitAnswer(t, answerParams{ + fa: &fa, + roundId: 1, + answer: 2, + from: fa.neil, + isNewRound: true, + completesAnswer: false, + }) + + // Finally, the logs from log broadcaster are sent only after a next block is received. + fa.backend.Commit() + + // Wait for the node's submission, and ensure it submits to the round + // started by the fake node + receiptBlock, _ := awaitSubmission(t, fa.backend, submissionReceived) + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 1, + answer: int64(1), + from: fa.nallory, + isNewRound: false, + completesAnswer: true, + }, + initialBalance-fee, + receiptBlock, + ) +} + +func TestFluxMonitor_HibernationMode(t *testing.T) { + g := gomega.NewWithT(t) + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 1, 0) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Start plugin app + app := startApplication(t, fa, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(100 * time.Millisecond) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(1 * time.Second) + flags := ethkey.EIP55AddressFromAddress(fa.flagsContractAddress) + c.EVM[0].FlagsContractAddress = &flags + }) + + // Create mock server + var reportPrice atomic.Int64 + reportPrice.Store(1) + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(reportPrice.Load), + ) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +evmChainID = "%s" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, testutils.SimulatedChainID.String(), "1000ms", mockServer.URL) + + // raise flags + _, err = fa.flagsContract.RaiseFlag(fa.sergey, evmutils.ZeroAddress) // global kill switch + require.NoError(t, err) + + _, err = fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + require.NoError(t, err) + fa.backend.Commit() + + requestBody, err := json.Marshal(web.CreateJobRequest{ + TOML: s, + }) + assert.NoError(t, err) + + j := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + // node doesn't submit initial response, because flag is up + // Wait here so the next lower flags doesn't trigger immediately + cltest.AssertPipelineRunsStays(t, j.PipelineSpec.ID, app.GetSqlxDB(), 0) + + // lower global kill switch flag - should trigger job run + _, err = fa.flagsContract.LowerFlags(fa.sergey, []common.Address{evmutils.ZeroAddress}) + require.NoError(t, err) + fa.backend.Commit() + awaitSubmission(t, fa.backend, submissionReceived) + + reportPrice.Store(2) // change in price should trigger run + awaitSubmission(t, fa.backend, submissionReceived) + + // lower contract's flag - should have no effect + _, err = fa.flagsContract.LowerFlags(fa.sergey, []common.Address{fa.aggregatorContractAddress}) + require.NoError(t, err) + fa.backend.Commit() + assertNoSubmission(t, submissionReceived, 5*pollTimerPeriod, "should not trigger a new run because FM is already hibernating") + + // change in price should trigger run + reportPrice.Store(4) + awaitSubmission(t, fa.backend, submissionReceived) + + // raise both flags + _, err = fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + require.NoError(t, err) + _, err = fa.flagsContract.RaiseFlag(fa.sergey, evmutils.ZeroAddress) + require.NoError(t, err) + fa.backend.Commit() + + // wait for FM to receive flags raised logs + g.Eventually(func() int { + ilogs, err := fa.flagsContract.FilterFlagRaised(nil, []common.Address{}) + require.NoError(t, err) + logs := cltest.GetLogs(t, nil, ilogs) + return len(logs) + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.Equal(4)) + + // change in price should not trigger run + reportPrice.Store(8) + assertNoSubmission(t, submissionReceived, 5*pollTimerPeriod, "should not trigger a new run, while flag is raised") +} + +func TestFluxMonitor_InvalidSubmission(t *testing.T) { + // 8 decimals places used for prices. + fa := setupFluxAggregatorUniverse(t, WithMinMaxSubmission( + big.NewInt(100000000), // 1 * 10^8 + big.NewInt(1000000000000), // 10000 * 10^8 + )) + + oracleList := []common.Address{fa.neil.From, fa.ned.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 3, 2) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + + // Set up plugin app + app := startApplication(t, fa, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(100 * time.Millisecond) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + + // Report a price that is above the maximum allowed value, + // causing it to revert. + var reportPrice atomic.Int64 + reportPrice.Store(10001) // 10001 ETH/USD price is outside the range. + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(reportPrice.Load), + ) + t.Cleanup(mockServer.Close) + + // Generate custom TOML for this test due to precision change + toml := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +evmChainID = "%s" +threshold = 0.5 +absoluteThreshold = 0.01 + +idleTimerPeriod = "1h" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" +` + + s := fmt.Sprintf(toml, fa.aggregatorContractAddress, testutils.SimulatedChainID.String(), "100ms", mockServer.URL) + + // raise flags + _, err = fa.flagsContract.RaiseFlag(fa.sergey, evmutils.ZeroAddress) // global kill switch + require.NoError(t, err) + _, err = fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + require.NoError(t, err) + fa.backend.Commit() + + requestBody, err := json.Marshal(web.CreateJobRequest{ + TOML: s, + }) + assert.NoError(t, err) + + j := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + closer := cltest.Mine(fa.backend, 500*time.Millisecond) + defer closer() + + // We should see a spec error because the value is too large to submit on-chain. + jobID, err := strconv.ParseInt(j.ID, 10, 32) + require.NoError(t, err) + + jse := cltest.WaitForSpecErrorV2(t, app.GetSqlxDB(), int32(jobID), 1) + assert.Contains(t, jse[0].Description, "Answer is outside acceptable range") +} + +func TestFluxMonitorAntiSpamLogic(t *testing.T) { + // - deploy a brand new FM contract + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.neil.From, fa.ned.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 2, 3, 2) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up plugin app + app := startApplication(t, fa, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(100 * time.Millisecond) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + + answer := int64(1) // Answer the nodes give on the first round + + //- have one of the fake nodes start a round. + roundId := int64(1) + processedAnswer := answer * 100 /* job has multiply times 100 */ + submitAnswer(t, answerParams{ + fa: &fa, + roundId: roundId, + answer: processedAnswer, + from: fa.neil, + isNewRound: true, + completesAnswer: false, + }) + + // - successfully close the round through the submissions of the other nodes + // Response by spammy plugin node, nallory + // + // The initial balance is the PLI balance of flux aggregator contract. We + // use it to check that the fee for submitting an answer has been paid out. + initialBalance := currentBalance(t, &fa).Int64() + var reportPrice atomic.Int64 + reportPrice.Store(answer) + priceResponse := func() string { + return fmt.Sprintf(`{"data":{"result": %d}}`, reportPrice.Load()) + } + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, priceResponse) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create FM Job, and wait for job run to start (the above submitAnswr call + // to FluxAggregator contract initiates a run.) + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +evmChainID = "%s" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; +ds1_multiply [type=multiply times=100] + +ds1 -> ds1_parse -> ds1_multiply +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, testutils.SimulatedChainID.String(), "200ms", mockServer.URL) + requestBody, err := json.Marshal(web.CreateJobRequest{ + TOML: s, + }) + assert.NoError(t, err) + + cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + receiptBlock, answer := awaitSubmission(t, fa.backend, submissionReceived) + + assert.Equal(t, 100*reportPrice.Load(), answer, + "failed to report correct price to contract") + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: roundId, + answer: processedAnswer, + from: fa.nallory, + isNewRound: false, + completesAnswer: true}, + initialBalance, + receiptBlock, + ) + + //- have the malicious node start the next round. + nextRoundBalance := initialBalance - fee + // Triggers a new round, since price deviation exceeds threshold + reportPrice.Store(answer + 1) + + receiptBlock, _ = awaitSubmission(t, fa.backend, submissionReceived) + newRound := roundId + 1 + processedAnswer = 100 * reportPrice.Load() + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.nallory, + isNewRound: true, + completesAnswer: false}, + nextRoundBalance, + receiptBlock, + ) + + // Successfully close the round through the submissions of the other nodes + submitAnswer(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.neil, + isNewRound: false, + completesAnswer: true}, + ) + + // Have the malicious node try to start another round. It should not pass as + // restartDelay has not been reached. + newRound = newRound + 1 + processedAnswer = 100 * reportPrice.Load() + + submitMaliciousAnswer(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.nallory, + isNewRound: true, + completesAnswer: false}, + ) + + assertNoSubmission(t, submissionReceived, 5*pollTimerPeriod, "FA allowed plugin node to start a new round early") + + // Try to start a new round directly, should fail because of delay + _, err = fa.aggregatorContract.RequestNewRound(fa.nallory) + assert.Error(t, err, "FA allowed plugin node to start a new round early") + + //- finally, ensure it can start a legitimate round after restartDelay is + //reached start an intervening round + submitAnswer(t, answerParams{fa: &fa, roundId: newRound, + answer: processedAnswer, from: fa.ned, isNewRound: true, + completesAnswer: false}) + submitAnswer(t, answerParams{fa: &fa, roundId: newRound, + answer: processedAnswer, from: fa.neil, isNewRound: false, + completesAnswer: true}) + + // start a legitimate new round + reportPrice.Add(3) + + // Wait for the node's submission, and ensure it submits to the round + // started by the fake node + awaitSubmission(t, fa.backend, submissionReceived) +} + +// submitMaliciousAnswer simulates a call to fa's FluxAggregator contract from +// nallory, with the given roundId and answer and errors +func submitMaliciousAnswer(t *testing.T, p answerParams) { + _, err := p.fa.aggregatorContract.Submit( + p.from, big.NewInt(p.roundId), big.NewInt(p.answer), + ) + require.Error(t, err) + + p.fa.backend.Commit() +} diff --git a/core/services/fluxmonitorv2/key_store.go b/core/services/fluxmonitorv2/key_store.go new file mode 100644 index 00000000..dc22a2a1 --- /dev/null +++ b/core/services/fluxmonitorv2/key_store.go @@ -0,0 +1,28 @@ +package fluxmonitorv2 + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +//go:generate mockery --quiet --name KeyStoreInterface --output ./mocks/ --case=underscore + +// KeyStoreInterface defines an interface to interact with the keystore +type KeyStoreInterface interface { + EnabledKeysForChain(chainID *big.Int) ([]ethkey.KeyV2, error) + GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) +} + +// KeyStore implements KeyStoreInterface +type KeyStore struct { + keystore.Eth +} + +// NewKeyStore initializes a new keystore +func NewKeyStore(ks keystore.Eth) *KeyStore { + return &KeyStore{ks} +} diff --git a/core/services/fluxmonitorv2/key_store_test.go b/core/services/fluxmonitorv2/key_store_test.go new file mode 100644 index 00000000..8dc9b4f1 --- /dev/null +++ b/core/services/fluxmonitorv2/key_store_test.go @@ -0,0 +1,54 @@ +package fluxmonitorv2_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +func TestKeyStore_EnabledKeysForChain(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + ethKeyStore := cltest.NewKeyStore(t, db, cfg).Eth() + + ks := fluxmonitorv2.NewKeyStore(ethKeyStore) + + key, err := ethKeyStore.Create(testutils.FixtureChainID) + require.NoError(t, err) + key2, err := ethKeyStore.Create(testutils.SimulatedChainID) + require.NoError(t, err) + + keys, err := ks.EnabledKeysForChain(testutils.FixtureChainID) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key, keys[0]) + + keys, err = ks.EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key2, keys[0]) +} + +func TestKeyStore_GetRoundRobinAddress(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + ethKeyStore := cltest.NewKeyStore(t, db, cfg).Eth() + + _, k0Address := cltest.MustInsertRandomKey(t, ethKeyStore) + + ks := fluxmonitorv2.NewKeyStore(ethKeyStore) + + // Gets the only address in the keystore + addr, err := ks.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + require.Equal(t, k0Address, addr) +} diff --git a/core/services/fluxmonitorv2/mocks/contract_submitter.go b/core/services/fluxmonitorv2/mocks/contract_submitter.go new file mode 100644 index 00000000..3154b4c8 --- /dev/null +++ b/core/services/fluxmonitorv2/mocks/contract_submitter.go @@ -0,0 +1,47 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" +) + +// ContractSubmitter is an autogenerated mock type for the ContractSubmitter type +type ContractSubmitter struct { + mock.Mock +} + +// Submit provides a mock function with given fields: ctx, roundID, submission, idempotencyKey +func (_m *ContractSubmitter) Submit(ctx context.Context, roundID *big.Int, submission *big.Int, idempotencyKey *string) error { + ret := _m.Called(ctx, roundID, submission, idempotencyKey) + + if len(ret) == 0 { + panic("no return value specified for Submit") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int, *big.Int, *string) error); ok { + r0 = rf(ctx, roundID, submission, idempotencyKey) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewContractSubmitter creates a new instance of ContractSubmitter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContractSubmitter(t interface { + mock.TestingT + Cleanup(func()) +}) *ContractSubmitter { + mock := &ContractSubmitter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/fluxmonitorv2/mocks/flags.go b/core/services/fluxmonitorv2/mocks/flags.go new file mode 100644 index 00000000..2da3a30e --- /dev/null +++ b/core/services/fluxmonitorv2/mocks/flags.go @@ -0,0 +1,128 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Flags is an autogenerated mock type for the Flags type +type Flags struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *Flags) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// ContractExists provides a mock function with given fields: +func (_m *Flags) ContractExists() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ContractExists") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsLowered provides a mock function with given fields: contractAddr +func (_m *Flags) IsLowered(contractAddr common.Address) (bool, error) { + ret := _m.Called(contractAddr) + + if len(ret) == 0 { + panic("no return value specified for IsLowered") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(common.Address) (bool, error)); ok { + return rf(contractAddr) + } + if rf, ok := ret.Get(0).(func(common.Address) bool); ok { + r0 = rf(contractAddr) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(contractAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *Flags) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFlags creates a new instance of Flags. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFlags(t interface { + mock.TestingT + Cleanup(func()) +}) *Flags { + mock := &Flags{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/fluxmonitorv2/mocks/key_store_interface.go b/core/services/fluxmonitorv2/mocks/key_store_interface.go new file mode 100644 index 00000000..cf20df59 --- /dev/null +++ b/core/services/fluxmonitorv2/mocks/key_store_interface.go @@ -0,0 +1,98 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + ethkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + + mock "github.com/stretchr/testify/mock" +) + +// KeyStoreInterface is an autogenerated mock type for the KeyStoreInterface type +type KeyStoreInterface struct { + mock.Mock +} + +// EnabledKeysForChain provides a mock function with given fields: chainID +func (_m *KeyStoreInterface) EnabledKeysForChain(chainID *big.Int) ([]ethkey.KeyV2, error) { + ret := _m.Called(chainID) + + if len(ret) == 0 { + panic("no return value specified for EnabledKeysForChain") + } + + var r0 []ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) ([]ethkey.KeyV2, error)); ok { + return rf(chainID) + } + if rf, ok := ret.Get(0).(func(*big.Int) []ethkey.KeyV2); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRoundRobinAddress provides a mock function with given fields: chainID, addrs +func (_m *KeyStoreInterface) GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) { + _va := make([]interface{}, len(addrs)) + for _i := range addrs { + _va[_i] = addrs[_i] + } + var _ca []interface{} + _ca = append(_ca, chainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetRoundRobinAddress") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) (common.Address, error)); ok { + return rf(chainID, addrs...) + } + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addrs...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addrs...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewKeyStoreInterface creates a new instance of KeyStoreInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKeyStoreInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *KeyStoreInterface { + mock := &KeyStoreInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/fluxmonitorv2/mocks/orm.go b/core/services/fluxmonitorv2/mocks/orm.go new file mode 100644 index 00000000..f237bcf4 --- /dev/null +++ b/core/services/fluxmonitorv2/mocks/orm.go @@ -0,0 +1,179 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + fluxmonitorv2 "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// CountFluxMonitorRoundStats provides a mock function with given fields: +func (_m *ORM) CountFluxMonitorRoundStats() (int, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CountFluxMonitorRoundStats") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func() (int, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateEthTransaction provides a mock function with given fields: ctx, fromAddress, toAddress, payload, gasLimit, idempotencyKey +func (_m *ORM) CreateEthTransaction(ctx context.Context, fromAddress common.Address, toAddress common.Address, payload []byte, gasLimit uint32, idempotencyKey *string) error { + ret := _m.Called(ctx, fromAddress, toAddress, payload, gasLimit, idempotencyKey) + + if len(ret) == 0 { + panic("no return value specified for CreateEthTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, common.Address, []byte, uint32, *string) error); ok { + r0 = rf(ctx, fromAddress, toAddress, payload, gasLimit, idempotencyKey) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteFluxMonitorRoundsBackThrough provides a mock function with given fields: aggregator, roundID +func (_m *ORM) DeleteFluxMonitorRoundsBackThrough(aggregator common.Address, roundID uint32) error { + ret := _m.Called(aggregator, roundID) + + if len(ret) == 0 { + panic("no return value specified for DeleteFluxMonitorRoundsBackThrough") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, uint32) error); ok { + r0 = rf(aggregator, roundID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindOrCreateFluxMonitorRoundStats provides a mock function with given fields: aggregator, roundID, newRoundLogs +func (_m *ORM) FindOrCreateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, newRoundLogs uint) (fluxmonitorv2.FluxMonitorRoundStatsV2, error) { + ret := _m.Called(aggregator, roundID, newRoundLogs) + + if len(ret) == 0 { + panic("no return value specified for FindOrCreateFluxMonitorRoundStats") + } + + var r0 fluxmonitorv2.FluxMonitorRoundStatsV2 + var r1 error + if rf, ok := ret.Get(0).(func(common.Address, uint32, uint) (fluxmonitorv2.FluxMonitorRoundStatsV2, error)); ok { + return rf(aggregator, roundID, newRoundLogs) + } + if rf, ok := ret.Get(0).(func(common.Address, uint32, uint) fluxmonitorv2.FluxMonitorRoundStatsV2); ok { + r0 = rf(aggregator, roundID, newRoundLogs) + } else { + r0 = ret.Get(0).(fluxmonitorv2.FluxMonitorRoundStatsV2) + } + + if rf, ok := ret.Get(1).(func(common.Address, uint32, uint) error); ok { + r1 = rf(aggregator, roundID, newRoundLogs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MostRecentFluxMonitorRoundID provides a mock function with given fields: aggregator +func (_m *ORM) MostRecentFluxMonitorRoundID(aggregator common.Address) (uint32, error) { + ret := _m.Called(aggregator) + + if len(ret) == 0 { + panic("no return value specified for MostRecentFluxMonitorRoundID") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(common.Address) (uint32, error)); ok { + return rf(aggregator) + } + if rf, ok := ret.Get(0).(func(common.Address) uint32); ok { + r0 = rf(aggregator) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(aggregator) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateFluxMonitorRoundStats provides a mock function with given fields: aggregator, roundID, runID, newRoundLogsAddition, qopts +func (_m *ORM) UpdateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, runID int64, newRoundLogsAddition uint, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, aggregator, roundID, runID, newRoundLogsAddition) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpdateFluxMonitorRoundStats") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, uint32, int64, uint, ...pg.QOpt) error); ok { + r0 = rf(aggregator, roundID, runID, newRoundLogsAddition, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/fluxmonitorv2/models.go b/core/services/fluxmonitorv2/models.go new file mode 100644 index 00000000..8f2b9333 --- /dev/null +++ b/core/services/fluxmonitorv2/models.go @@ -0,0 +1,17 @@ +package fluxmonitorv2 + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +// FluxMonitorRoundStatsV2 defines the stats for a round +type FluxMonitorRoundStatsV2 struct { + ID uint64 + PipelineRunID null.Int64 + Aggregator common.Address + RoundID uint32 + NumNewRoundLogs uint64 + NumSubmissions uint64 +} diff --git a/core/services/fluxmonitorv2/orm.go b/core/services/fluxmonitorv2/orm.go new file mode 100644 index 00000000..f62b3a79 --- /dev/null +++ b/core/services/fluxmonitorv2/orm.go @@ -0,0 +1,136 @@ +package fluxmonitorv2 + +import ( + "context" + "database/sql" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type transmitter interface { + CreateTransaction(ctx context.Context, txRequest txmgr.TxRequest) (tx txmgr.Tx, err error) +} + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +// ORM defines an interface for database commands related to Flux Monitor v2 +type ORM interface { + MostRecentFluxMonitorRoundID(aggregator common.Address) (uint32, error) + DeleteFluxMonitorRoundsBackThrough(aggregator common.Address, roundID uint32) error + FindOrCreateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, newRoundLogs uint) (FluxMonitorRoundStatsV2, error) + UpdateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, runID int64, newRoundLogsAddition uint, qopts ...pg.QOpt) error + CreateEthTransaction(ctx context.Context, fromAddress, toAddress common.Address, payload []byte, gasLimit uint32, idempotencyKey *string) error + CountFluxMonitorRoundStats() (count int, err error) +} + +type orm struct { + q pg.Q + txm transmitter + strategy types.TxStrategy + checker txmgr.TransmitCheckerSpec + logger logger.Logger +} + +// NewORM initializes a new ORM +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, txm transmitter, strategy types.TxStrategy, checker txmgr.TransmitCheckerSpec) ORM { + namedLogger := lggr.Named("FluxMonitorORM") + q := pg.NewQ(db, namedLogger, cfg) + return &orm{ + q, + txm, + strategy, + checker, + namedLogger, + } +} + +// MostRecentFluxMonitorRoundID finds roundID of the most recent round that the +// provided oracle address submitted to +func (o *orm) MostRecentFluxMonitorRoundID(aggregator common.Address) (uint32, error) { + var stats FluxMonitorRoundStatsV2 + err := o.q.Get(&stats, `SELECT * FROM flux_monitor_round_stats_v2 WHERE aggregator = $1 ORDER BY round_id DESC LIMIT 1`, aggregator) + return stats.RoundID, errors.Wrap(err, "MostRecentFluxMonitorRoundID failed") +} + +// DeleteFluxMonitorRoundsBackThrough deletes all the RoundStat records for a +// given oracle address starting from the most recent round back through the +// given round +func (o *orm) DeleteFluxMonitorRoundsBackThrough(aggregator common.Address, roundID uint32) error { + _, err := o.q.Exec(` + DELETE FROM flux_monitor_round_stats_v2 + WHERE aggregator = $1 + AND round_id >= $2 + `, aggregator, roundID) + return errors.Wrap(err, "DeleteFluxMonitorRoundsBackThrough failed") +} + +// FindOrCreateFluxMonitorRoundStats find the round stats record for a given +// oracle on a given round, or creates it if no record exists +func (o *orm) FindOrCreateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, newRoundLogs uint) (stats FluxMonitorRoundStatsV2, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + err = tx.Get(&stats, + `INSERT INTO flux_monitor_round_stats_v2 (aggregator, round_id, num_new_round_logs, num_submissions) VALUES ($1, $2, $3, 0) + ON CONFLICT (aggregator, round_id) DO NOTHING`, + aggregator, roundID, newRoundLogs) + if errors.Is(err, sql.ErrNoRows) { + err = tx.Get(&stats, `SELECT * FROM flux_monitor_round_stats_v2 WHERE aggregator=$1 AND round_id=$2`, aggregator, roundID) + } + return err + }) + + return stats, errors.Wrap(err, "FindOrCreateFluxMonitorRoundStats failed") +} + +// UpdateFluxMonitorRoundStats trys to create a RoundStat record for the given oracle +// at the given round. If one already exists, it increments the num_submissions column. +func (o *orm) UpdateFluxMonitorRoundStats(aggregator common.Address, roundID uint32, runID int64, newRoundLogsAddition uint, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + INSERT INTO flux_monitor_round_stats_v2 ( + aggregator, round_id, pipeline_run_id, num_new_round_logs, num_submissions + ) VALUES ( + $1, $2, $3, $4, 1 + ) ON CONFLICT (aggregator, round_id) + DO UPDATE SET + num_new_round_logs = flux_monitor_round_stats_v2.num_new_round_logs + $5, + num_submissions = flux_monitor_round_stats_v2.num_submissions + 1, + pipeline_run_id = EXCLUDED.pipeline_run_id + `, aggregator, roundID, runID, newRoundLogsAddition, newRoundLogsAddition) + return errors.Wrapf(err, "Failed to insert round stats for roundID=%v, runID=%v, newRoundLogsAddition=%v", roundID, runID, newRoundLogsAddition) +} + +// CountFluxMonitorRoundStats counts the total number of records +func (o *orm) CountFluxMonitorRoundStats() (count int, err error) { + err = o.q.Get(&count, `SELECT count(*) FROM flux_monitor_round_stats_v2`) + return count, errors.Wrap(err, "CountFluxMonitorRoundStats failed") +} + +// CreateEthTransaction creates an ethereum transaction for the Txm to pick up +func (o *orm) CreateEthTransaction( + ctx context.Context, + fromAddress common.Address, + toAddress common.Address, + payload []byte, + gasLimit uint32, + idempotencyKey *string, +) (err error) { + + _, err = o.txm.CreateTransaction(ctx, txmgr.TxRequest{ + IdempotencyKey: idempotencyKey, + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + Strategy: o.strategy, + Checker: o.checker, + }) + return errors.Wrap(err, "Skipped Flux Monitor submission") +} diff --git a/core/services/fluxmonitorv2/orm_test.go b/core/services/fluxmonitorv2/orm_test.go new file mode 100644 index 00000000..25dde231 --- /dev/null +++ b/core/services/fluxmonitorv2/orm_test.go @@ -0,0 +1,197 @@ +package fluxmonitorv2_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "gopkg.in/guregu/null.v4" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + commontxmmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestORM_MostRecentFluxMonitorRoundID(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + orm := newORM(t, db, cfg, nil) + + address := testutils.NewAddress() + + // Setup the rounds + for round := uint32(0); round < 10; round++ { + _, err := orm.FindOrCreateFluxMonitorRoundStats(address, round, 1) + require.NoError(t, err) + } + + count, err := orm.CountFluxMonitorRoundStats() + require.NoError(t, err) + require.Equal(t, 10, count) + + // Ensure round stats are not created again for the same address/roundID + stats, err := orm.FindOrCreateFluxMonitorRoundStats(address, uint32(0), 1) + require.NoError(t, err) + require.Equal(t, uint32(0), stats.RoundID) + require.Equal(t, address, stats.Aggregator) + require.Equal(t, uint64(1), stats.NumNewRoundLogs) + + count, err = orm.CountFluxMonitorRoundStats() + require.NoError(t, err) + require.Equal(t, 10, count) + + roundID, err := orm.MostRecentFluxMonitorRoundID(testutils.NewAddress()) + require.Error(t, err) + require.Equal(t, uint32(0), roundID) + + roundID, err = orm.MostRecentFluxMonitorRoundID(address) + require.NoError(t, err) + require.Equal(t, uint32(9), roundID) + + // Deleting rounds against a new address should incur no changes + err = orm.DeleteFluxMonitorRoundsBackThrough(testutils.NewAddress(), 5) + require.NoError(t, err) + + count, err = orm.CountFluxMonitorRoundStats() + require.NoError(t, err) + require.Equal(t, 10, count) + + // Deleting rounds against the address + err = orm.DeleteFluxMonitorRoundsBackThrough(address, 5) + require.NoError(t, err) + + count, err = orm.CountFluxMonitorRoundStats() + require.NoError(t, err) + require.Equal(t, 5, count) +} + +func TestORM_UpdateFluxMonitorRoundStats(t *testing.T) { + t.Parallel() + + cfg := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + lggr := logger.TestLogger(t) + + // Instantiate a real pipeline ORM because we need to create a pipeline run + // for the foreign key constraint of the stats record + pipelineORM := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgeORM := bridges.NewORM(db, lggr, cfg.Database()) + + // Instantiate a real job ORM because we need to create a job to satisfy + // a check in pipeline.CreateRun + jobORM := job.NewORM(db, pipelineORM, bridgeORM, keyStore, lggr, cfg.Database()) + orm := newORM(t, db, cfg.Database(), nil) + + address := testutils.NewAddress() + var roundID uint32 = 1 + + jb := makeJob(t) + require.NoError(t, jobORM.CreateJob(jb)) + + for expectedCount := uint64(1); expectedCount < 4; expectedCount++ { + f := time.Now() + run := + &pipeline.Run{ + State: pipeline.RunStatusCompleted, + PipelineSpecID: jb.PipelineSpec.ID, + PipelineSpec: *jb.PipelineSpec, + CreatedAt: time.Now(), + FinishedAt: null.TimeFrom(f), + AllErrors: pipeline.RunErrors{null.String{}}, + FatalErrors: pipeline.RunErrors{null.String{}}, + Outputs: pipeline.JSONSerializable{Val: []interface{}{10}, Valid: true}, + PipelineTaskRuns: []pipeline.TaskRun{ + { + ID: uuid.New(), + Type: pipeline.TaskTypeHTTP, + Output: pipeline.JSONSerializable{Val: 10, Valid: true}, + CreatedAt: f, + FinishedAt: null.TimeFrom(f), + }, + }, + } + err := pipelineORM.InsertFinishedRun(run, true) + require.NoError(t, err) + + err = orm.UpdateFluxMonitorRoundStats(address, roundID, run.ID, 0) + require.NoError(t, err) + + stats, err := orm.FindOrCreateFluxMonitorRoundStats(address, roundID, 0) + require.NoError(t, err) + require.Equal(t, expectedCount, stats.NumSubmissions) + require.True(t, stats.PipelineRunID.Valid) + require.Equal(t, run.ID, stats.PipelineRunID.Int64) + } +} + +func makeJob(t *testing.T) *job.Job { + t.Helper() + + return &job.Job{ + ID: 1, + Type: "fluxmonitor", + SchemaVersion: 1, + ExternalJobID: uuid.New(), + FluxMonitorSpec: &job.FluxMonitorSpec{ + ID: 2, + ContractAddress: cltest.NewEIP55Address(), + Threshold: 0.5, + PollTimerPeriod: 1 * time.Second, + PollTimerDisabled: false, + IdleTimerPeriod: 1 * time.Minute, + IdleTimerDisabled: false, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + EVMChainID: (*big.Big)(testutils.FixtureChainID), + }, + } +} + +func TestORM_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + ethKeyStore := cltest.NewKeyStore(t, db, cfg).Eth() + + strategy := commontxmmocks.NewTxStrategy(t) + + var ( + txm = txmmocks.NewMockEvmTxManager(t) + orm = fluxmonitorv2.NewORM(db, logger.TestLogger(t), cfg, txm, strategy, txmgr.TransmitCheckerSpec{}) + + _, from = cltest.MustInsertRandomKey(t, ethKeyStore) + to = testutils.NewAddress() + payload = []byte{1, 0, 0} + gasLimit = uint32(21000) + ) + idempotencyKey := uuid.New().String() + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + IdempotencyKey: &idempotencyKey, + FromAddress: from, + ToAddress: to, + EncodedPayload: payload, + FeeLimit: gasLimit, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + + require.NoError(t, orm.CreateEthTransaction(testutils.Context(t), from, to, payload, gasLimit, &idempotencyKey)) +} diff --git a/core/services/fluxmonitorv2/payment_checker.go b/core/services/fluxmonitorv2/payment_checker.go new file mode 100644 index 00000000..4ee8a48c --- /dev/null +++ b/core/services/fluxmonitorv2/payment_checker.go @@ -0,0 +1,50 @@ +package fluxmonitorv2 + +import ( + "math/big" + + "github.com/goplugin/plugin-common/pkg/assets" +) + +// MinFundedRounds defines the minimum number of rounds that needs to be paid +// to oracles on a contract +const MinFundedRounds int64 = 3 + +// PaymentChecker provides helper functions to check whether payments are valid +type PaymentChecker struct { + // The minimum amount for a payment set in the ENV Var + MinContractPayment *assets.Link + // The minimum amount for a payment set in the job + MinJobPayment *assets.Link +} + +// NewPaymentChecker constructs a new payment checker +func NewPaymentChecker(minContractPayment, minJobPayment *assets.Link) *PaymentChecker { + return &PaymentChecker{ + MinContractPayment: minContractPayment, + MinJobPayment: minJobPayment, + } +} + +// SufficientFunds checks if the contract has sufficient funding to pay all the +// oracles on a contract for a minimum number of rounds, based on the payment +// amount in the contract +func (c *PaymentChecker) SufficientFunds(availableFunds *big.Int, paymentAmount *big.Int, oracleCount uint8) bool { + min := big.NewInt(int64(oracleCount)) + min = min.Mul(min, big.NewInt(MinFundedRounds)) + min = min.Mul(min, paymentAmount) + + return availableFunds.Cmp(min) >= 0 +} + +// SufficientPayment checks if the available payment is enough to submit an +// answer. It compares the payment amount on chain with the min payment amount +// listed in the job / ENV var. +func (c *PaymentChecker) SufficientPayment(payment *big.Int) bool { + aboveOrEqMinGlobalPayment := payment.Cmp(c.MinContractPayment.ToInt()) >= 0 + aboveOrEqMinJobPayment := true + if c.MinJobPayment != nil { + aboveOrEqMinJobPayment = payment.Cmp(c.MinJobPayment.ToInt()) >= 0 + } + return aboveOrEqMinGlobalPayment && aboveOrEqMinJobPayment +} diff --git a/core/services/fluxmonitorv2/payment_checker_test.go b/core/services/fluxmonitorv2/payment_checker_test.go new file mode 100644 index 00000000..2dc4923e --- /dev/null +++ b/core/services/fluxmonitorv2/payment_checker_test.go @@ -0,0 +1,93 @@ +package fluxmonitorv2_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +func TestPaymentChecker_SufficientFunds(t *testing.T) { + var ( + checker = fluxmonitorv2.NewPaymentChecker(nil, nil) + payment = 100 + rounds = 3 + oracleCount = 21 + min = payment * rounds * oracleCount + ) + + testCases := []struct { + name string + funds int + want bool + }{ + {"above minimum", min + 1, true}, + {"equal to minimum", min, true}, + {"below minimum", min - 1, false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + assert.Equal(t, tc.want, checker.SufficientFunds( + big.NewInt(int64(tc.funds)), + big.NewInt(int64(payment)), + uint8(oracleCount), + )) + }) + } +} + +func TestPaymentChecker_SufficientPayment(t *testing.T) { + var ( + payment int64 = 10 + eq = payment + gt = payment + 1 + lt = payment - 1 + ) + + testCases := []struct { + name string + minContractPayment int64 + minJobPayment interface{} // nil or int64 + want bool + }{ + {"payment above min contract payment, no min job payment", lt, nil, true}, + {"payment equal to min contract payment, no min job payment", eq, nil, true}, + {"payment below min contract payment, no min job payment", gt, nil, false}, + + {"payment above min contract payment, above min job payment", lt, lt, true}, + {"payment equal to min contract payment, above min job payment", eq, lt, true}, + {"payment below min contract payment, above min job payment", gt, lt, false}, + + {"payment above min contract payment, equal to min job payment", lt, eq, true}, + {"payment equal to min contract payment, equal to min job payment", eq, eq, true}, + {"payment below min contract payment, equal to min job payment", gt, eq, false}, + + {"payment above minimum contract payment, below min job payment", lt, gt, false}, + {"payment equal to minimum contract payment, below min job payment", eq, gt, false}, + {"payment below minimum contract payment, below min job payment", gt, gt, false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var minJobPayment *assets.Link + if tc.minJobPayment != nil { + mjb := assets.Link(*big.NewInt(tc.minJobPayment.(int64))) + minJobPayment = &mjb + } + + checker := fluxmonitorv2.NewPaymentChecker(assets.NewLinkFromJuels(tc.minContractPayment), minJobPayment) + + assert.Equal(t, tc.want, checker.SufficientPayment(big.NewInt(payment))) + }) + } +} diff --git a/core/services/fluxmonitorv2/poll_manager.go b/core/services/fluxmonitorv2/poll_manager.go new file mode 100644 index 00000000..a10440e3 --- /dev/null +++ b/core/services/fluxmonitorv2/poll_manager.go @@ -0,0 +1,355 @@ +package fluxmonitorv2 + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type PollManagerConfig struct { + IsHibernating bool + PollTickerInterval time.Duration + PollTickerDisabled bool + IdleTimerPeriod time.Duration + IdleTimerDisabled bool + DrumbeatSchedule string + DrumbeatEnabled bool + DrumbeatRandomDelay time.Duration + HibernationPollPeriod time.Duration + MinRetryBackoffDuration time.Duration + MaxRetryBackoffDuration time.Duration +} + +// PollManager manages the tickers/timers which cause the Flux Monitor to start +// a poll. It contains 4 types of tickers and timers which determine when to +// initiate a poll +// +// HibernationTimer - The PollManager can be set to hibernate, which disables all +// other ticker/timers, and enables the hibernation timer. Upon expiry of the +// hibernation timer, a poll is requested. When the PollManager is awakened, the +// other tickers and timers are enabled with the current round state, and the +// hibernation timer is disabled. +// +// PollTicker - The poll ticker requests a poll at a given interval defined in +// PollManagerConfig. Disabling this through config will permanently disable +// the ticker, even through a resets. +// +// IdleTimer - The idle timer requests a poll after no poll has taken place +// since the last round was start and the IdleTimerPeriod has elapsed. This can +// also be known as a heartbeat. +// +// RoundTimer - The round timer requests a poll when the round state provided by +// the contract has timed out. +// +// RetryTicker - The retry ticker requests a poll with a backoff duration. This +// is started when the idle timer fails, and will poll with a maximum backoff +// of either 1 hour or the idle timer period if it is lower +type PollManager struct { + cfg PollManagerConfig + + isHibernating atomic.Bool + hibernationTimer utils.ResettableTimer + pollTicker utils.PausableTicker + idleTimer utils.ResettableTimer + roundTimer utils.ResettableTimer + retryTicker utils.BackoffTicker + drumbeat utils.CronTicker + chPoll chan PollRequest + + logger logger.Logger +} + +// NewPollManager initializes a new PollManager +func NewPollManager(cfg PollManagerConfig, logger logger.Logger) (*PollManager, error) { + minBackoffDuration := cfg.MinRetryBackoffDuration + if cfg.IdleTimerPeriod < minBackoffDuration { + minBackoffDuration = cfg.IdleTimerPeriod + } + maxBackoffDuration := cfg.MaxRetryBackoffDuration + if cfg.IdleTimerPeriod < maxBackoffDuration { + maxBackoffDuration = cfg.IdleTimerPeriod + } + // Always initialize the idle timer so that no matter what it has a ticker + // and won't get starved by an old startedAt timestamp from the oracle state on boot. + var idleTimer = utils.NewResettableTimer() + if !cfg.IdleTimerDisabled { + idleTimer.Reset(cfg.IdleTimerPeriod) + } + + p := &PollManager{ + cfg: cfg, + logger: logger.Named("PollManager"), + + hibernationTimer: utils.NewResettableTimer(), + pollTicker: utils.NewPausableTicker(cfg.PollTickerInterval), + idleTimer: idleTimer, + roundTimer: utils.NewResettableTimer(), + retryTicker: utils.NewBackoffTicker(minBackoffDuration, maxBackoffDuration), + chPoll: make(chan PollRequest), + } + var err error + if cfg.DrumbeatEnabled { + p.drumbeat, err = utils.NewCronTicker(cfg.DrumbeatSchedule) + if err != nil { + return nil, err + } + } + p.isHibernating.Store(cfg.IsHibernating) + return p, nil +} + +// PollTickerTicks ticks on a given interval +func (pm *PollManager) PollTickerTicks() <-chan time.Time { + return pm.pollTicker.Ticks() +} + +// IdleTimerTicks ticks after a given period +func (pm *PollManager) IdleTimerTicks() <-chan time.Time { + return pm.idleTimer.Ticks() +} + +// HibernationTimerTicks ticks after a given period +func (pm *PollManager) HibernationTimerTicks() <-chan time.Time { + return pm.hibernationTimer.Ticks() +} + +// RoundTimerTicks ticks after a given period +func (pm *PollManager) RoundTimerTicks() <-chan time.Time { + return pm.roundTimer.Ticks() +} + +// RetryTickerTicks ticks with a backoff when the retry ticker is activated +func (pm *PollManager) RetryTickerTicks() <-chan time.Time { + return pm.retryTicker.Ticks() +} + +// DrumbeatTicks ticks on a cron schedule when the drumbeat ticker is activated +func (pm *PollManager) DrumbeatTicks() <-chan time.Time { + return pm.drumbeat.Ticks() +} + +// Poll returns a channel which the manager will use to send polling requests +// +// Note: In the future, we should change the tickers above to send their request +// through this channel to simplify the listener. +func (pm *PollManager) Poll() <-chan PollRequest { + return pm.chPoll +} + +// Start initializes all the timers and determines whether to go into immediate +// hibernation. +func (pm *PollManager) Start(hibernate bool, roundState flux_aggregator_wrapper.OracleRoundState) { + pm.isHibernating.Store(hibernate) + + if pm.ShouldPerformInitialPoll() { + // We want this to be non blocking but if there is no received for the + // polling channel, this go routine would hang around forever. Since we + // should always have a receiver for the polling channel, set a timeout + // of 5 seconds to kill the goroutine. + go func() { + select { + case pm.chPoll <- PollRequest{PollRequestTypeInitial, time.Now()}: + case <-time.After(5 * time.Second): + pm.logger.Warn("Start up poll was not consumed") + } + }() + } + + pm.maybeWarnAboutIdleAndPollIntervals() + + if hibernate { + pm.Hibernate() + } else { + pm.Awaken(roundState) + } +} + +// ShouldPerformInitialPoll determines whether to perform an initial poll +func (pm *PollManager) ShouldPerformInitialPoll() bool { + return (!pm.cfg.PollTickerDisabled || !pm.cfg.IdleTimerDisabled) && !pm.isHibernating.Load() +} + +// Reset resets the timers except for the hibernation timer. Will not reset if +// hibernating. +func (pm *PollManager) Reset(roundState flux_aggregator_wrapper.OracleRoundState) { + if pm.isHibernating.Load() { + pm.hibernationTimer.Reset(pm.cfg.HibernationPollPeriod) + } else { + pm.startPollTicker() + pm.startIdleTimer(roundState.StartedAt) + pm.startRoundTimer(roundStateTimesOutAt(roundState)) + pm.startDrumbeat() + } +} + +// ResetIdleTimer resets the idle timer unless hibernating +func (pm *PollManager) ResetIdleTimer(roundStartedAtUTC uint64) { + if !pm.isHibernating.Load() { + pm.startIdleTimer(roundStartedAtUTC) + } +} + +// StartRetryTicker starts the retry ticker +func (pm *PollManager) StartRetryTicker() bool { + return pm.retryTicker.Start() +} + +// StopRetryTicker stops the retry ticker +func (pm *PollManager) StopRetryTicker() { + if pm.retryTicker.Stop() { + pm.logger.Debug("stopped retry ticker") + } +} + +// Stop stops all timers/tickers +func (pm *PollManager) Stop() { + pm.hibernationTimer.Stop() + pm.pollTicker.Destroy() + pm.idleTimer.Stop() + pm.roundTimer.Stop() + pm.drumbeat.Stop() +} + +// Hibernate sets hibernation to true, starts the hibernation timer and stops +// all other ticker/timers +func (pm *PollManager) Hibernate() { + pm.logger.Infof("entering hibernation mode (period: %v)", pm.cfg.HibernationPollPeriod) + + // Start the hibernation timer + pm.isHibernating.Store(true) + pm.hibernationTimer.Reset(pm.cfg.HibernationPollPeriod) + + // Stop the other tickers + pm.pollTicker.Pause() + pm.idleTimer.Stop() + pm.roundTimer.Stop() + pm.drumbeat.Stop() + pm.StopRetryTicker() +} + +// Awaken sets hibernation to false, stops the hibernation timer and starts all +// other tickers +func (pm *PollManager) Awaken(roundState flux_aggregator_wrapper.OracleRoundState) { + pm.logger.Info("exiting hibernation mode, reactivating contract") + + // Stop the hibernation timer + pm.isHibernating.Store(false) + pm.hibernationTimer.Stop() + + // Start the other tickers + pm.startPollTicker() + pm.startIdleTimer(roundState.StartedAt) + pm.startRoundTimer(roundStateTimesOutAt(roundState)) + pm.startDrumbeat() +} + +// startPollTicker starts the poll ticker if it is enabled +func (pm *PollManager) startPollTicker() { + if pm.cfg.PollTickerDisabled { + pm.pollTicker.Pause() + + return + } + + pm.pollTicker.Resume() +} + +// startIdleTimer starts the idle timer if it is enabled +func (pm *PollManager) startIdleTimer(roundStartedAtUTC uint64) { + + if pm.cfg.IdleTimerDisabled { + pm.idleTimer.Stop() + + return + } + + // Keep using the idleTimer we already have + if roundStartedAtUTC == 0 { + pm.logger.Debugw("not resetting idleTimer, no active round") + + return + } + + startedAt := time.Unix(int64(roundStartedAtUTC), 0) + deadline := startedAt.Add(pm.cfg.IdleTimerPeriod) + deadlineDuration := time.Until(deadline) + + log := pm.logger.With( + "pollFrequency", pm.cfg.PollTickerInterval, + "idleDuration", pm.cfg.IdleTimerPeriod, + "startedAt", roundStartedAtUTC, + "timeUntilIdleDeadline", deadlineDuration, + ) + + if deadlineDuration <= 0 { + log.Debugw("not resetting idleTimer, round was started further in the past than idle timer period") + return + } + + // Stop the retry timer when the idle timer is started + if pm.retryTicker.Stop() { + pm.logger.Debugw("stopped the retryTicker") + } + + pm.idleTimer.Reset(deadlineDuration) + log.Debugw("resetting idleTimer") +} + +// startRoundTimer starts the round timer +func (pm *PollManager) startRoundTimer(roundTimesOutAt uint64) { + log := pm.logger.With( + "pollFrequency", pm.cfg.PollTickerInterval, + "idleDuration", pm.cfg.IdleTimerPeriod, + "timesOutAt", roundTimesOutAt, + ) + + if roundTimesOutAt == 0 { + log.Debugw("disabling roundTimer, no active round") + pm.roundTimer.Stop() + + return + } + + timesOutAt := time.Unix(int64(roundTimesOutAt), 0) + timeoutDuration := time.Until(timesOutAt) + + if timeoutDuration <= 0 { + log.Debugw(fmt.Sprintf("disabling roundTimer, as the round is already past its timeout by %v", -timeoutDuration)) + pm.roundTimer.Stop() + + return + } + + pm.roundTimer.Reset(timeoutDuration) + log.Debugw("updating roundState.TimesOutAt", "value", roundTimesOutAt) +} + +// startDrumbeat starts the drumbeat ticker if it is enabled +func (pm *PollManager) startDrumbeat() { + if !pm.cfg.DrumbeatEnabled { + if pm.drumbeat.Stop() { + pm.logger.Debug("disabled drumbeat ticker") + } + return + } + + if pm.drumbeat.Start() { + pm.logger.Debugw("started drumbeat ticker", "schedule", pm.cfg.DrumbeatSchedule) + } +} + +func roundStateTimesOutAt(rs flux_aggregator_wrapper.OracleRoundState) uint64 { + return rs.StartedAt + rs.Timeout +} + +// ShouldPerformInitialPoll determines whether to perform an initial poll +func (pm *PollManager) maybeWarnAboutIdleAndPollIntervals() { + if !pm.cfg.IdleTimerDisabled && !pm.cfg.PollTickerDisabled && pm.cfg.IdleTimerPeriod < pm.cfg.PollTickerInterval { + pm.logger.Warnw("The value of IdleTimerPeriod is lower than PollTickerInterval. The idle timer should usually be less frequent that poll", + "IdleTimerPeriod", pm.cfg.IdleTimerPeriod, "PollTickerInterval", pm.cfg.PollTickerInterval) + } +} diff --git a/core/services/fluxmonitorv2/poll_manager_test.go b/core/services/fluxmonitorv2/poll_manager_test.go new file mode 100644 index 00000000..d7446888 --- /dev/null +++ b/core/services/fluxmonitorv2/poll_manager_test.go @@ -0,0 +1,456 @@ +package fluxmonitorv2_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +var ( + pollTickerDefaultDuration = 200 * time.Millisecond + idleTickerDefaultDuration = 1 * time.Second // Setting this too low will cause the idle timer to fire before the assert +) + +func newPollManager(t *testing.T) *fluxmonitorv2.PollManager { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + IsHibernating: false, + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.TestLogger(t)) + require.NoError(t, err) + return pm +} + +type tickChecker struct { + pollTicked bool + idleTicked bool + roundTicked bool + hibernationTicked bool + retryTicked bool + initialPoll bool +} + +// watchTicks watches the PollManager for ticks for the waitDuration +func watchTicks(t *testing.T, pm *fluxmonitorv2.PollManager, waitDuration time.Duration) tickChecker { + ticks := tickChecker{ + pollTicked: false, + idleTicked: false, + roundTicked: false, + hibernationTicked: false, + retryTicked: false, + initialPoll: false, + } + + waitCh := time.After(waitDuration) + for { + select { + case <-pm.PollTickerTicks(): + ticks.pollTicked = true + case <-pm.IdleTimerTicks(): + ticks.idleTicked = true + case <-pm.RoundTimerTicks(): + ticks.roundTicked = true + case <-pm.HibernationTimerTicks(): + ticks.hibernationTicked = true + case <-pm.RetryTickerTicks(): + ticks.retryTicked = true + case request := <-pm.Poll(): + switch request.Type { + case fluxmonitorv2.PollRequestTypeInitial: + ticks.initialPoll = true + // Don't do anything with the other types for now + default: + } + + case <-waitCh: + waitCh = nil + } + + if waitCh == nil { + break + } + } + + return ticks +} + +func TestPollManager_PollTicker(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 24 * time.Hour, + }, logger.TestLogger(t)) + require.NoError(t, err) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{}) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_IdleTimer(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: 100 * time.Millisecond, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.TestLogger(t)) + require.NoError(t, err) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()) - 10, // Even 10 seconds old the idle timer should tick + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_RoundTimer(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 24 * time.Hour, + }, logger.TestLogger(t)) + require.NoError(t, err) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestPollManager_RetryTimer(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 24 * time.Hour, + MinRetryBackoffDuration: 200 * time.Microsecond, + MaxRetryBackoffDuration: 1 * time.Minute, + }, logger.TestLogger(t)) + require.NoError(t, err) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 10000, // in seconds. Don't timeout the round + }) + t.Cleanup(pm.Stop) + + pm.StartRetryTicker() + + // Retry ticker fires + ticks := watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + assert.True(t, ticks.retryTicked) + + pm.StopRetryTicker() + + ticks = watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + assert.False(t, ticks.retryTicked) +} + +func TestPollManager_InitialPoll(t *testing.T) { + pm := newPollManager(t) + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{}) + + ticks := watchTicks(t, pm, 1*time.Second) + assert.True(t, ticks.initialPoll) +} + +func TestPollManager_HibernationTimer(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 1 * time.Second, + }, logger.TestLogger(t)) + require.NoError(t, err) + + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.hibernationTicked) +} + +func TestPollManager_HibernationOnStartThenAwaken(t *testing.T) { + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.TestLogger(t)) + require.NoError(t, err) + t.Cleanup(pm.Stop) + + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + + pm.Awaken(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestPollManager_AwakeOnStartThenHibernate(t *testing.T) { + pm := newPollManager(t) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + pm.Hibernate() + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_ShouldPerformInitialPoll(t *testing.T) { + testCases := []struct { + name string + pollTickerDisabled bool + idleTimerDisabled bool + isHibernating bool + want bool + }{ + { + name: "perform poll - all enabled", + pollTickerDisabled: false, + idleTimerDisabled: false, + isHibernating: false, + want: true, + }, + { + name: "don't perform poll - hibernating", + pollTickerDisabled: false, + idleTimerDisabled: false, + isHibernating: true, + want: false, + }, + { + name: "perform poll - only pollTickerDisabled", + pollTickerDisabled: true, + idleTimerDisabled: false, + isHibernating: false, + want: true, + }, + { + name: "perform poll - only idleTimerDisabled", + pollTickerDisabled: false, + idleTimerDisabled: true, + isHibernating: false, + want: true, + }, + { + name: "don't perform poll - idleTimerDisabled and pollTimerDisabled", + pollTickerDisabled: true, + idleTimerDisabled: true, + isHibernating: false, + want: false, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + pm, err := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + IsHibernating: tc.isHibernating, + HibernationPollPeriod: 24 * time.Hour, + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: tc.pollTickerDisabled, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: tc.idleTimerDisabled, + }, logger.TestLogger(t)) + require.NoError(t, err) + + assert.Equal(t, tc.want, pm.ShouldPerformInitialPoll()) + }) + + } +} + +func TestPollManager_Stop(t *testing.T) { + pm := newPollManager(t) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + pm.Stop() + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_ResetIdleTimer(t *testing.T) { + pm := newPollManager(t) + + // Start again in awake mode + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + t.Cleanup(pm.Stop) + + // Idle timer fires when not hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.idleTicked) + + // Idle timer fires again after reset + pm.ResetIdleTimer(uint64(time.Now().Unix()) + 1) // 1 second after now + ticks = watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.idleTicked) +} + +func TestPollManager_ResetIdleTimerWhenHibernating(t *testing.T) { + pm := newPollManager(t) + + // Start in hibernation + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + t.Cleanup(pm.Stop) + + // Idle timer does not fire when hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.idleTicked) + + // Idle timer does not reset because in hibernation, so it does not fire + pm.ResetIdleTimer(uint64(time.Now().Unix())) + ticks = watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.idleTicked) +} + +func TestPollManager_Reset(t *testing.T) { + pm := newPollManager(t) + + // Start again in awake mode + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + t.Cleanup(pm.Stop) + + // Ticker/timers fires when not hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + // Idle timer fires again after reset + pm.Reset(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + ticks = watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestPollManager_ResetWhenHibernating(t *testing.T) { + pm := newPollManager(t) + + // Start in hibernation + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + t.Cleanup(pm.Stop) + + // Ticker/timers do not fire when hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + + // Ticker/timers does not reset because in hibernation, so they do not fire + pm.Reset(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + ticks = watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} diff --git a/core/services/fluxmonitorv2/promfm/prometheus.go b/core/services/fluxmonitorv2/promfm/prometheus.go new file mode 100644 index 00000000..d7d9db83 --- /dev/null +++ b/core/services/fluxmonitorv2/promfm/prometheus.go @@ -0,0 +1,59 @@ +package promfm + +import ( + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/shopspring/decimal" +) + +var ( + ReportedValue = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "flux_monitor_reported_value", + Help: "Flux monitor's last reported price", + }, + []string{"job_spec_id"}, + ) + + SeenValue = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "flux_monitor_seen_value", + Help: "Flux monitor's last observed value from target", + }, + []string{"job_spec_id"}, + ) + + ReportedRound = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "flux_monitor_reported_round", + Help: "Flux monitor's last reported round", + }, + []string{"job_spec_id"}, + ) + + SeenRound = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "flux_monitor_seen_round", + Help: "Last seen round by other node operators", + }, + []string{"job_spec_id"}, + ) +) + +// SetDecimal sets a decimal metric +func SetDecimal(gauge prometheus.Gauge, arg decimal.Decimal) { + val, _ := arg.Float64() + gauge.Set(val) +} + +// SetBigInt sets a big.Int metric +func SetBigInt(gauge prometheus.Gauge, arg *big.Int) { + gauge.Set(float64(arg.Int64())) +} + +// SetUint32 sets a uint32 metric +func SetUint32(gauge prometheus.Gauge, arg uint32) { + gauge.Set(float64(arg)) +} diff --git a/core/services/fluxmonitorv2/submission_checker.go b/core/services/fluxmonitorv2/submission_checker.go new file mode 100644 index 00000000..b26e70ac --- /dev/null +++ b/core/services/fluxmonitorv2/submission_checker.go @@ -0,0 +1,26 @@ +package fluxmonitorv2 + +import ( + "math/big" + + "github.com/shopspring/decimal" +) + +// SubmissionChecker checks whether an answer is inside the allowable range. +type SubmissionChecker struct { + Min decimal.Decimal + Max decimal.Decimal +} + +// NewSubmissionChecker initializes a new SubmissionChecker +func NewSubmissionChecker(min *big.Int, max *big.Int) *SubmissionChecker { + return &SubmissionChecker{ + Min: decimal.NewFromBigInt(min, 0), + Max: decimal.NewFromBigInt(max, 0), + } +} + +// IsValid checks if the submission is between the min and max +func (c *SubmissionChecker) IsValid(answer decimal.Decimal) bool { + return answer.GreaterThanOrEqual(c.Min) && answer.LessThanOrEqual(c.Max) +} diff --git a/core/services/fluxmonitorv2/submission_checker_test.go b/core/services/fluxmonitorv2/submission_checker_test.go new file mode 100644 index 00000000..5be8f6a5 --- /dev/null +++ b/core/services/fluxmonitorv2/submission_checker_test.go @@ -0,0 +1,59 @@ +package fluxmonitorv2_test + +import ( + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +func TestSubmissionChecker_IsValid(t *testing.T) { + testCases := []struct { + name string + answer decimal.Decimal + want bool + }{ + { + name: "equal to min", + answer: decimal.NewFromFloat(1), + want: true, + }, + { + name: "in between", + answer: decimal.NewFromFloat(2), + want: true, + }, + { + name: "equal to max", + answer: decimal.NewFromFloat(3), + want: true, + }, + { + name: "below min", + answer: decimal.NewFromFloat(0), + want: false, + }, + { + name: "over max", + answer: decimal.NewFromFloat(4), + want: false, + }, + } + + checker := fluxmonitorv2.NewSubmissionChecker( + big.NewInt(1), + big.NewInt(3), + ) + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + assert.Equal(t, tc.want, checker.IsValid(tc.answer)) + }) + } +} diff --git a/core/services/fluxmonitorv2/validate.go b/core/services/fluxmonitorv2/validate.go new file mode 100644 index 00000000..febfb747 --- /dev/null +++ b/core/services/fluxmonitorv2/validate.go @@ -0,0 +1,90 @@ +package fluxmonitorv2 + +import ( + "time" + + "github.com/google/uuid" + + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type ValidationConfig interface { + DefaultHTTPTimeout() commonconfig.Duration +} + +func ValidatedFluxMonitorSpec(config ValidationConfig, ts string) (job.Job, error) { + var jb = job.Job{ + ExternalJobID: uuid.New(), // Default to generating a uuid, can be overwritten by the specified one in tomlString. + } + var spec job.FluxMonitorSpec + tree, err := toml.Load(ts) + if err != nil { + return jb, err + } + err = tree.Unmarshal(&jb) + if err != nil { + return jb, err + } + err = tree.Unmarshal(&spec) + if err != nil { + return jb, err + } + jb.FluxMonitorSpec = &spec + + if jb.Type != job.FluxMonitor { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + // Find the smallest of all the timeouts + // and ensure the polling period is greater than that. + minTaskTimeout, aTimeoutSet, err := jb.Pipeline.MinTimeout() + if err != nil { + return jb, err + } + timeouts := []time.Duration{ + config.DefaultHTTPTimeout().Duration(), + time.Duration(jb.MaxTaskDuration), + } + if aTimeoutSet { + timeouts = append(timeouts, minTaskTimeout) + } + var minTimeout time.Duration = 1<<63 - 1 + for _, timeout := range timeouts { + if timeout < minTimeout { + minTimeout = timeout + } + } + + if jb.FluxMonitorSpec.DrumbeatEnabled { + err := utils.ValidateCronSchedule(jb.FluxMonitorSpec.DrumbeatSchedule) + if err != nil { + return jb, errors.Wrap(err, "while validating drumbeat schedule") + } + + if !spec.IdleTimerDisabled { + return jb, errors.Errorf("When the drumbeat ticker is enabled, the idle timer must be disabled. Please set IdleTimerDisabled to true") + } + } + + if !validatePollTimer(jb.FluxMonitorSpec.PollTimerDisabled, minTimeout, jb.FluxMonitorSpec.PollTimerPeriod) { + return jb, errors.Errorf("PollTimerPeriod (%v) must be equal or greater than the smallest value of MaxTaskDuration param, JobPipeline.HTTPRequest.DefaultTimeout config var, or MinTimeout of all tasks (%v)", jb.FluxMonitorSpec.PollTimerPeriod, minTimeout) + } + + return jb, nil +} + +// validatePollTime validates the period is greater than the min timeout for an +// enabled poll timer. +func validatePollTimer(disabled bool, minTimeout time.Duration, period time.Duration) bool { + // Disabled timers do not need to validate the period + if disabled { + return true + } + + return period >= minTimeout +} diff --git a/core/services/fluxmonitorv2/validate_test.go b/core/services/fluxmonitorv2/validate_test.go new file mode 100644 index 00000000..14760cec --- /dev/null +++ b/core/services/fluxmonitorv2/validate_test.go @@ -0,0 +1,220 @@ +package fluxmonitorv2 + +import ( + "regexp" + "testing" + "time" + + "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils/tomlutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testcfg struct{} + +func (testcfg) DefaultHTTPTimeout() commonconfig.Duration { + return *commonconfig.MustNewDuration(2 * time.Second) +} + +func TestValidate(t *testing.T) { + var tt = []struct { + name string + toml string + config ValidationConfig + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "valid spec", + toml: ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerDisabled = true + +pollTimerPeriod = "1m" +pollTimerDisabled = false + +drumbeatEnabled = true +drumbeatSchedule = "@every 1m" +drumbeatRandomDelay = "10s" + +minPayment = 1000000000000000000 + +observationSource = """ +// data source 1 +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; + +// data source 2 +ds2 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds2_parse [type=jsonparse path="latest"]; + +ds1 -> ds1_parse -> answer1; +ds2 -> ds2_parse -> answer1; + +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, j job.Job, err error) { + require.NoError(t, err) + require.NotNil(t, j.FluxMonitorSpec) + spec := j.FluxMonitorSpec + assert.Equal(t, "example flux monitor spec", j.Name.String) + assert.Equal(t, "fluxmonitor", j.Type.String()) + assert.Equal(t, uint32(1), j.SchemaVersion) + assert.Equal(t, "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42", j.FluxMonitorSpec.ContractAddress.String()) + assert.Equal(t, tomlutils.Float32(0.5), spec.Threshold) + assert.Equal(t, tomlutils.Float32(0), spec.AbsoluteThreshold) + assert.Equal(t, true, spec.IdleTimerDisabled) + assert.Equal(t, 1*time.Minute, spec.PollTimerPeriod) + assert.Equal(t, false, spec.PollTimerDisabled) + assert.Equal(t, true, spec.DrumbeatEnabled) + assert.Equal(t, "@every 1m", spec.DrumbeatSchedule) + assert.Equal(t, 10*time.Second, spec.DrumbeatRandomDelay) + assert.Equal(t, false, spec.PollTimerDisabled) + assert.Equal(t, assets.NewLinkFromJuels(1000000000000000000), spec.MinPayment) + assert.NotZero(t, j.Pipeline) + }, + }, + { + name: "invalid contract addr", + toml: ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "0x3CCad4715152693fE3BC4460591e3D3Fbd071b42" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "1m" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; +ds1 -> ds1_parse; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Nil(t, s.FluxMonitorSpec) + require.Error(t, err) + assert.Regexp(t, regexp.MustCompile("^.*is not a valid EIP55 formatted address$"), err.Error()) + }, + }, + { + name: "invalid poll interval", + toml: ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +maxTaskDuration = "1s" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "400ms" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}" timeout="500ms"]; +ds1_parse [type=jsonparse path="latest"]; +ds1 -> ds1_parse; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + assert.EqualError(t, err, "PollTimerPeriod (400ms) must be equal or greater than the smallest value of MaxTaskDuration param, JobPipeline.HTTPRequest.DefaultTimeout config var, or MinTimeout of all tasks (500ms)") + }, + }, + { + name: "drumbeat and idle both active", + toml: ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +maxTaskDuration = "1s" +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerDisabled = false +idleTimerPeriod = "1s" + +drumbeatEnabled = true +drumbeatSchedule = "@every 1m" + +pollTimerPeriod = "800ms" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}" timeout="500ms"]; +ds1_parse [type=jsonparse path="latest"]; +ds1 -> ds1_parse; +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + assert.EqualError(t, err, "When the drumbeat ticker is enabled, the idle timer must be disabled. Please set IdleTimerDisabled to true") + }, + }, + { + name: "integer thresholds", + toml: ` +type = "fluxmonitor" +schemaVersion = 1 +name = "ADA / USD version 3 contract 0x3e4a23dB81D1F1268983f0CE78F1a9dC329A5b36 1624906849640" +contractAddress = "0x3e4a23dB81D1F1268983f0CE78F1a9dC329A5b36" +precision = 8 +threshold = 2 +idleTimerPeriod = "1m0s" +idleTimerDisabled = false +pollTimerPeriod = "1m0s" +pollTimerDisabled = false +maxTaskDuration = "0s" +observationSource = """ + // Node definitions. + feed0 [method=POST name="bridge-coinmarketcap" requestData="{\\"data\\":{\\"from\\":\\"ADA\\",\\"to\\":\\"USD\\"}}" type=bridge]; + jsonparse0 [ path="data,result" type=jsonparse ]; + feed0 -> jsonparse0; + jsonparse0 -> median; + feed1 [method=POST name="bridge-kaiko" requestData="{\\"data\\":{\\"from\\":\\"ADA\\",\\"to\\":\\"USD\\"}}" type=bridge]; + feed1 -> jsonparse1; + jsonparse1 -> median; + jsonparse1 [path="data,result" type=jsonparse]; + feed2 [method=POST name="bridge-nomics" requestData="{\\"data\\":{\\"from\\":\\"ADA\\",\\"to\\":\\"USD\\"}}" type=bridge]; + jsonparse2 [path="data,result" type=jsonparse]; + feed2 -> jsonparse2; + jsonparse2 -> median; + // Edge definitions. + median [type=median]; + multiply0 [times=100000000 type=multiply]; + median -> multiply0; +""" +externalJobID = "cfa3fa6b-2850-446b-b973-8f4c3b29d519" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + }, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, err := ValidatedFluxMonitorSpec(testcfg{}, tc.toml) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/functions/connector_handler.go b/core/services/functions/connector_handler.go new file mode 100644 index 00000000..16b5e47d --- /dev/null +++ b/core/services/functions/connector_handler.go @@ -0,0 +1,358 @@ +package functions + +import ( + "bytes" + "context" + "crypto/ecdsa" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + + ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" + fallow "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" + fsub "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +type functionsConnectorHandler struct { + services.StateMachine + + connector connector.GatewayConnector + signerKey *ecdsa.PrivateKey + nodeAddress string + storage s4.Storage + allowlist fallow.OnchainAllowlist + rateLimiter *hc.RateLimiter + subscriptions fsub.OnchainSubscriptions + minimumBalance assets.Link + listener FunctionsListener + offchainTransmitter OffchainTransmitter + allowedHeartbeatInitiators map[string]struct{} + heartbeatRequests map[RequestID]*HeartbeatResponse + requestTimeoutSec uint32 + orderedRequests []RequestID + mu sync.Mutex + chStop services.StopChan + shutdownWaitGroup sync.WaitGroup + lggr logger.Logger +} + +const HeartbeatCacheSize = 1000 + +var ( + _ connector.Signer = &functionsConnectorHandler{} + _ connector.GatewayConnectorHandler = &functionsConnectorHandler{} +) + +var ( + promStorageUserUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "storage_user_updates", + Help: "Number of storage updates performed by users", + }, []string{}) +) + +// internal request ID is a hash of (sender, requestID) +func InternalId(sender []byte, requestId []byte) RequestID { + return RequestID(crypto.Keccak256Hash(append(sender, requestId...)).Bytes()) +} + +func NewFunctionsConnectorHandler(pluginConfig *config.PluginConfig, signerKey *ecdsa.PrivateKey, storage s4.Storage, allowlist fallow.OnchainAllowlist, rateLimiter *hc.RateLimiter, subscriptions fsub.OnchainSubscriptions, listener FunctionsListener, offchainTransmitter OffchainTransmitter, lggr logger.Logger) (*functionsConnectorHandler, error) { + if signerKey == nil || storage == nil || allowlist == nil || rateLimiter == nil || subscriptions == nil || listener == nil || offchainTransmitter == nil { + return nil, fmt.Errorf("all dependencies must be non-nil") + } + allowedHeartbeatInitiators := make(map[string]struct{}) + for _, initiator := range pluginConfig.AllowedHeartbeatInitiators { + allowedHeartbeatInitiators[strings.ToLower(initiator)] = struct{}{} + } + return &functionsConnectorHandler{ + nodeAddress: pluginConfig.GatewayConnectorConfig.NodeAddress, + signerKey: signerKey, + storage: storage, + allowlist: allowlist, + rateLimiter: rateLimiter, + subscriptions: subscriptions, + minimumBalance: pluginConfig.MinimumSubscriptionBalance, + listener: listener, + offchainTransmitter: offchainTransmitter, + allowedHeartbeatInitiators: allowedHeartbeatInitiators, + heartbeatRequests: make(map[RequestID]*HeartbeatResponse), + requestTimeoutSec: pluginConfig.RequestTimeoutSec, + chStop: make(services.StopChan), + lggr: lggr.Named("FunctionsConnectorHandler"), + }, nil +} + +func (h *functionsConnectorHandler) SetConnector(connector connector.GatewayConnector) { + h.connector = connector +} + +func (h *functionsConnectorHandler) Sign(data ...[]byte) ([]byte, error) { + return common.SignData(h.signerKey, data...) +} + +func (h *functionsConnectorHandler) HandleGatewayMessage(ctx context.Context, gatewayId string, msg *api.Message) { + body := &msg.Body + fromAddr := ethCommon.HexToAddress(body.Sender) + if !h.allowlist.Allow(fromAddr) { + h.lggr.Errorw("allowlist prevented the request from this address", "id", gatewayId, "address", fromAddr) + return + } + if !h.rateLimiter.Allow(body.Sender) { + h.lggr.Errorw("request rate-limited", "id", gatewayId, "address", fromAddr) + return + } + h.lggr.Debugw("handling gateway request", "id", gatewayId, "method", body.Method) + + switch body.Method { + case functions.MethodSecretsList: + h.handleSecretsList(ctx, gatewayId, body, fromAddr) + case functions.MethodSecretsSet: + if balance, err := h.subscriptions.GetMaxUserBalance(fromAddr); err != nil || balance.Cmp(h.minimumBalance.ToInt()) < 0 { + h.lggr.Errorw("user subscription has insufficient balance", "id", gatewayId, "address", fromAddr, "balance", balance, "minBalance", h.minimumBalance) + response := functions.ResponseBase{ + Success: false, + ErrorMessage: "user subscription has insufficient balance", + } + h.sendResponseAndLog(ctx, gatewayId, body, response) + return + } + h.handleSecretsSet(ctx, gatewayId, body, fromAddr) + case functions.MethodHeartbeat: + h.handleHeartbeat(ctx, gatewayId, body, fromAddr) + default: + h.lggr.Errorw("unsupported method", "id", gatewayId, "method", body.Method) + } +} + +func (h *functionsConnectorHandler) Start(ctx context.Context) error { + return h.StartOnce("FunctionsConnectorHandler", func() error { + if err := h.allowlist.Start(ctx); err != nil { + return err + } + if err := h.subscriptions.Start(ctx); err != nil { + return err + } + h.shutdownWaitGroup.Add(1) + go h.reportLoop() + return nil + }) +} + +func (h *functionsConnectorHandler) Close() error { + return h.StopOnce("FunctionsConnectorHandler", func() (err error) { + close(h.chStop) + err = multierr.Combine(err, h.allowlist.Close()) + err = multierr.Combine(err, h.subscriptions.Close()) + h.shutdownWaitGroup.Wait() + return + }) +} + +func (h *functionsConnectorHandler) handleSecretsList(ctx context.Context, gatewayId string, body *api.MessageBody, fromAddr ethCommon.Address) { + var response functions.SecretsListResponse + snapshot, err := h.storage.List(ctx, fromAddr) + if err == nil { + response.Success = true + response.Rows = make([]functions.SecretsListRow, len(snapshot)) + for i, row := range snapshot { + response.Rows[i] = functions.SecretsListRow{ + SlotID: row.SlotId, + Version: row.Version, + Expiration: row.Expiration, + } + } + } else { + response.ErrorMessage = fmt.Sprintf("Failed to list secrets: %v", err) + } + h.sendResponseAndLog(ctx, gatewayId, body, response) +} + +func (h *functionsConnectorHandler) handleSecretsSet(ctx context.Context, gatewayId string, body *api.MessageBody, fromAddr ethCommon.Address) { + var request functions.SecretsSetRequest + var response functions.SecretsSetResponse + err := json.Unmarshal(body.Payload, &request) + if err == nil { + key := s4.Key{ + Address: fromAddr, + SlotId: request.SlotID, + Version: request.Version, + } + record := s4.Record{ + Expiration: request.Expiration, + Payload: request.Payload, + } + h.lggr.Debugw("handling a secrets_set request", "address", fromAddr, "slotId", request.SlotID, "payloadVersion", request.Version, "expiration", request.Expiration) + err = h.storage.Put(ctx, &key, &record, request.Signature) + if err == nil { + response.Success = true + promStorageUserUpdatesCount.WithLabelValues().Inc() + } else { + response.ErrorMessage = fmt.Sprintf("Failed to set secret: %v", err) + } + } else { + response.ErrorMessage = fmt.Sprintf("Bad request to set secret: %v", err) + } + h.sendResponseAndLog(ctx, gatewayId, body, response) +} + +func (h *functionsConnectorHandler) handleHeartbeat(ctx context.Context, gatewayId string, requestBody *api.MessageBody, fromAddr ethCommon.Address) { + var request *OffchainRequest + err := json.Unmarshal(requestBody.Payload, &request) + if err != nil { + h.sendResponseAndLog(ctx, gatewayId, requestBody, internalErrorResponse(fmt.Sprintf("failed to unmarshal request: %v", err))) + return + } + if _, ok := h.allowedHeartbeatInitiators[requestBody.Sender]; !ok { + h.sendResponseAndLog(ctx, gatewayId, requestBody, internalErrorResponse("sender not allowed to send heartbeat requests")) + return + } + if !bytes.Equal(request.RequestInitiator, fromAddr.Bytes()) { + h.sendResponseAndLog(ctx, gatewayId, requestBody, internalErrorResponse("RequestInitiator doesn't match sender")) + return + } + if !bytes.Equal(request.SubscriptionOwner, fromAddr.Bytes()) { + h.sendResponseAndLog(ctx, gatewayId, requestBody, internalErrorResponse("SubscriptionOwner doesn't match sender")) + return + } + if request.Timestamp < uint64(time.Now().Unix())-uint64(h.requestTimeoutSec) { + h.sendResponseAndLog(ctx, gatewayId, requestBody, internalErrorResponse("Request is too old")) + return + } + + internalId := InternalId(fromAddr.Bytes(), request.RequestId) + request.RequestId = internalId[:] + h.lggr.Infow("handling offchain heartbeat", "messageId", requestBody.MessageId, "internalId", internalId, "sender", requestBody.Sender) + h.mu.Lock() + response, ok := h.heartbeatRequests[internalId] + if !ok { // new request + response = &HeartbeatResponse{ + Status: RequestStatePending, + ReceivedTs: uint64(time.Now().Unix()), + } + h.cacheNewRequestLocked(internalId, response) + h.shutdownWaitGroup.Add(1) + go h.handleOffchainRequest(request) + } + responseToSend := *response + h.mu.Unlock() + requestBody.Receiver = requestBody.Sender + h.sendResponseAndLog(ctx, gatewayId, requestBody, responseToSend) +} + +func internalErrorResponse(internalError string) HeartbeatResponse { + return HeartbeatResponse{ + Status: RequestStateInternalError, + InternalError: internalError, + } +} + +func (h *functionsConnectorHandler) handleOffchainRequest(request *OffchainRequest) { + defer h.shutdownWaitGroup.Done() + stopCtx, _ := h.chStop.NewCtx() + ctx, cancel := context.WithTimeout(stopCtx, time.Duration(h.requestTimeoutSec)*time.Second) + defer cancel() + err := h.listener.HandleOffchainRequest(ctx, request) + if err != nil { + h.lggr.Errorw("internal error while processing", "id", request.RequestId, "err", err) + h.mu.Lock() + defer h.mu.Unlock() + state, ok := h.heartbeatRequests[RequestID(request.RequestId)] + if !ok { + h.lggr.Errorw("request unexpectedly disappeared from local cache", "id", request.RequestId) + return + } + state.CompletedTs = uint64(time.Now().Unix()) + state.Status = RequestStateInternalError + state.InternalError = err.Error() + } else { + // no error - results will be sent to OCR aggregation and returned via reportLoop() + h.lggr.Infow("request processed successfully, waiting for aggregation ...", "id", request.RequestId) + } +} + +// Listen to OCR reports passed from the plugin and process them against a local cache of requests. +func (h *functionsConnectorHandler) reportLoop() { + defer h.shutdownWaitGroup.Done() + for { + select { + case report := <-h.offchainTransmitter.ReportChannel(): + h.lggr.Infow("received report", "requestId", report.RequestId, "resultLen", len(report.Result), "errorLen", len(report.Error)) + if len(report.RequestId) != RequestIDLength { + h.lggr.Errorw("report has invalid requestId", "requestId", report.RequestId) + continue + } + h.mu.Lock() + cachedResponse, ok := h.heartbeatRequests[RequestID(report.RequestId)] + if !ok { + h.lggr.Infow("received report for unknown request, caching it", "id", report.RequestId) + cachedResponse = &HeartbeatResponse{} + h.cacheNewRequestLocked(RequestID(report.RequestId), cachedResponse) + } + cachedResponse.CompletedTs = uint64(time.Now().Unix()) + cachedResponse.Status = RequestStateComplete + cachedResponse.Response = report + h.mu.Unlock() + case <-h.chStop: + h.lggr.Info("exiting reportLoop") + return + } + } +} + +func (h *functionsConnectorHandler) cacheNewRequestLocked(requestId RequestID, response *HeartbeatResponse) { + // remove oldest requests + for len(h.orderedRequests) >= HeartbeatCacheSize { + delete(h.heartbeatRequests, h.orderedRequests[0]) + h.orderedRequests = h.orderedRequests[1:] + } + h.heartbeatRequests[requestId] = response + h.orderedRequests = append(h.orderedRequests, requestId) +} + +func (h *functionsConnectorHandler) sendResponseAndLog(ctx context.Context, gatewayId string, requestBody *api.MessageBody, payload any) { + err := h.sendResponse(ctx, gatewayId, requestBody, payload) + if err != nil { + h.lggr.Errorw("failed to send response to gateway", "id", gatewayId, "err", err) + } else { + h.lggr.Debugw("sent to gateway", "id", gatewayId, "messageId", requestBody.MessageId, "donId", requestBody.DonId, "method", requestBody.Method) + } +} + +func (h *functionsConnectorHandler) sendResponse(ctx context.Context, gatewayId string, requestBody *api.MessageBody, payload any) error { + payloadJson, err := json.Marshal(payload) + if err != nil { + return err + } + + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: requestBody.MessageId, + DonId: requestBody.DonId, + Method: requestBody.Method, + Receiver: requestBody.Sender, + Payload: payloadJson, + }, + } + if err = msg.Sign(h.signerKey); err != nil { + return err + } + return h.connector.SendToGateway(ctx, gatewayId, msg) +} diff --git a/core/services/functions/connector_handler_test.go b/core/services/functions/connector_handler_test.go new file mode 100644 index 00000000..8691f801 --- /dev/null +++ b/core/services/functions/connector_handler_test.go @@ -0,0 +1,364 @@ +package functions_test + +import ( + "crypto/rand" + "encoding/base64" + "encoding/json" + "errors" + "math/big" + "testing" + "time" + + geth_common "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/onsi/gomega" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" + sfmocks "github.com/goplugin/pluginv3.0/v2/core/services/functions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + gwconnector "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + gcmocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector/mocks" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + fallowMocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist/mocks" + fsubMocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + s4mocks "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func newOffchainRequest(t *testing.T, sender []byte, ageSec uint64) (*api.Message, functions.RequestID) { + requestId := make([]byte, 32) + _, err := rand.Read(requestId) + require.NoError(t, err) + request := &functions.OffchainRequest{ + RequestId: requestId, + RequestInitiator: sender, + SubscriptionId: 1, + SubscriptionOwner: sender, + Timestamp: uint64(time.Now().Unix()) - ageSec, + } + + internalId := functions.InternalId(request.RequestInitiator, request.RequestId) + req, err := json.Marshal(request) + require.NoError(t, err) + msg := &api.Message{ + Body: api.MessageBody{ + DonId: "fun4", + MessageId: "1", + Method: "heartbeat", + Payload: req, + }, + } + return msg, internalId +} + +func TestFunctionsConnectorHandler(t *testing.T) { + t.Parallel() + + logger := logger.TestLogger(t) + privateKey, addr := testutils.NewPrivateKeyAndAddress(t) + storage := s4mocks.NewStorage(t) + connector := gcmocks.NewGatewayConnector(t) + allowlist := fallowMocks.NewOnchainAllowlist(t) + rateLimiter, err := hc.NewRateLimiter(hc.RateLimiterConfig{GlobalRPS: 100.0, GlobalBurst: 100, PerSenderRPS: 100.0, PerSenderBurst: 100}) + subscriptions := fsubMocks.NewOnchainSubscriptions(t) + reportCh := make(chan *functions.OffchainResponse) + offchainTransmitter := sfmocks.NewOffchainTransmitter(t) + offchainTransmitter.On("ReportChannel", mock.Anything).Return(reportCh) + listener := sfmocks.NewFunctionsListener(t) + require.NoError(t, err) + allowlist.On("Start", mock.Anything).Return(nil) + allowlist.On("Close", mock.Anything).Return(nil) + subscriptions.On("Start", mock.Anything).Return(nil) + subscriptions.On("Close", mock.Anything).Return(nil) + config := &config.PluginConfig{ + GatewayConnectorConfig: &gwconnector.ConnectorConfig{ + NodeAddress: addr.Hex(), + }, + MinimumSubscriptionBalance: *assets.NewLinkFromJuels(100), + RequestTimeoutSec: 1_000, + AllowedHeartbeatInitiators: []string{crypto.PubkeyToAddress(privateKey.PublicKey).Hex()}, + } + handler, err := functions.NewFunctionsConnectorHandler(config, privateKey, storage, allowlist, rateLimiter, subscriptions, listener, offchainTransmitter, logger) + require.NoError(t, err) + + handler.SetConnector(connector) + + err = handler.Start(testutils.Context(t)) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, handler.Close()) + }) + + t.Run("Sign", func(t *testing.T) { + signature, err := handler.Sign([]byte("test")) + require.NoError(t, err) + + signer, err := common.ExtractSigner(signature, []byte("test")) + require.NoError(t, err) + require.Equal(t, addr.Bytes(), signer) + }) + + t.Run("HandleGatewayMessage", func(t *testing.T) { + t.Run("secrets_list", func(t *testing.T) { + msg := api.Message{ + Body: api.MessageBody{ + DonId: "fun4", + MessageId: "1", + Method: "secrets_list", + Sender: addr.Hex(), + }, + } + require.NoError(t, msg.Sign(privateKey)) + + ctx := testutils.Context(t) + snapshot := []*s4.SnapshotRow{ + {SlotId: 1, Version: 1, Expiration: 1}, + {SlotId: 2, Version: 2, Expiration: 2}, + } + storage.On("List", ctx, addr).Return(snapshot, nil).Once() + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":true,"rows":[{"slot_id":1,"version":1,"expiration":1},{"slot_id":2,"version":2,"expiration":2}]}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + + t.Run("orm error", func(t *testing.T) { + storage.On("List", ctx, addr).Return(nil, errors.New("boom")).Once() + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":false,"error_message":"Failed to list secrets: boom"}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + + t.Run("not allowed", func(t *testing.T) { + allowlist.On("Allow", addr).Return(false).Once() + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + }) + + t.Run("secrets_set", func(t *testing.T) { + ctx := testutils.Context(t) + key := s4.Key{ + Address: addr, + SlotId: 3, + Version: 4, + } + record := s4.Record{ + Expiration: 5, + Payload: []byte("test"), + } + signature, err := s4.NewEnvelopeFromRecord(&key, &record).Sign(privateKey) + signatureB64 := base64.StdEncoding.EncodeToString(signature) + require.NoError(t, err) + + msg := api.Message{ + Body: api.MessageBody{ + DonId: "fun4", + MessageId: "1", + Method: "secrets_set", + Sender: addr.Hex(), + Payload: json.RawMessage(`{"slot_id":3,"version":4,"expiration":5,"payload":"dGVzdA==","signature":"` + signatureB64 + `"}`), + }, + } + require.NoError(t, msg.Sign(privateKey)) + + storage.On("Put", ctx, &key, &record, signature).Return(nil).Once() + allowlist.On("Allow", addr).Return(true).Once() + subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":true}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + + t.Run("orm error", func(t *testing.T) { + storage.On("Put", ctx, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("boom")).Once() + allowlist.On("Allow", addr).Return(true).Once() + subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":false,"error_message":"Failed to set secret: boom"}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + + t.Run("missing signature", func(t *testing.T) { + msg.Body.Payload = json.RawMessage(`{"slot_id":3,"version":4,"expiration":5,"payload":"dGVzdA=="}`) + require.NoError(t, msg.Sign(privateKey)) + storage.On("Put", ctx, mock.Anything, mock.Anything, mock.Anything).Return(s4.ErrWrongSignature).Once() + allowlist.On("Allow", addr).Return(true).Once() + subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":false,"error_message":"Failed to set secret: wrong signature"}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + + t.Run("malformed request", func(t *testing.T) { + msg.Body.Payload = json.RawMessage(`{sdfgdfgoscsicosd:sdf:::sdf ::; xx}`) + require.NoError(t, msg.Sign(privateKey)) + allowlist.On("Allow", addr).Return(true).Once() + subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(100), nil).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":false,"error_message":"Bad request to set secret: invalid character 's' looking for beginning of object key string"}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + + t.Run("insufficient balance", func(t *testing.T) { + allowlist.On("Allow", addr).Return(true).Once() + subscriptions.On("GetMaxUserBalance", mock.Anything).Return(big.NewInt(0), nil).Once() + connector.On("SendToGateway", ctx, "gw1", mock.Anything).Run(func(args mock.Arguments) { + msg, ok := args[2].(*api.Message) + require.True(t, ok) + require.Equal(t, `{"success":false,"error_message":"user subscription has insufficient balance"}`, string(msg.Body.Payload)) + + }).Return(nil).Once() + + handler.HandleGatewayMessage(ctx, "gw1", &msg) + }) + }) + + t.Run("unsupported method", func(t *testing.T) { + msg := api.Message{ + Body: api.MessageBody{ + DonId: "fun4", + MessageId: "1", + Method: "foobar", + Sender: addr.Hex(), + Payload: []byte("whatever"), + }, + } + require.NoError(t, msg.Sign(privateKey)) + + allowlist.On("Allow", addr).Return(true).Once() + handler.HandleGatewayMessage(testutils.Context(t), "gw1", &msg) + }) + }) + + t.Run("heartbeat success", func(t *testing.T) { + ctx := testutils.Context(t) + msg, internalId := newOffchainRequest(t, addr.Bytes(), 0) + require.NoError(t, msg.Sign(privateKey)) + + // first call to trigger the request + var response functions.HeartbeatResponse + allowlist.On("Allow", addr).Return(true).Once() + listener.On("HandleOffchainRequest", mock.Anything, mock.Anything).Return(nil).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Run(func(args mock.Arguments) { + respMsg, ok := args[2].(*api.Message) + require.True(t, ok) + require.NoError(t, json.Unmarshal(respMsg.Body.Payload, &response)) + require.Equal(t, functions.RequestStatePending, response.Status) + }).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + + // async response computation + reportCh <- &functions.OffchainResponse{ + RequestId: internalId[:], + Result: []byte("ok!"), + } + reportCh <- &functions.OffchainResponse{} // sending second item to make sure the first one got processed + + // second call to collect the response + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Run(func(args mock.Arguments) { + respMsg, ok := args[2].(*api.Message) + require.True(t, ok) + require.NoError(t, json.Unmarshal(respMsg.Body.Payload, &response)) + require.Equal(t, functions.RequestStateComplete, response.Status) + }).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + }) + + t.Run("heartbeat internal error", func(t *testing.T) { + ctx := testutils.Context(t) + msg, _ := newOffchainRequest(t, addr.Bytes(), 0) + require.NoError(t, msg.Sign(privateKey)) + + // first call to trigger the request + var response functions.HeartbeatResponse + allowlist.On("Allow", addr).Return(true).Once() + listener.On("HandleOffchainRequest", mock.Anything, mock.Anything).Return(errors.New("boom")).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + + // collect the response - should eventually result in an internal error + gomega.NewGomegaWithT(t).Eventually(func() bool { + returnedState := 0 + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Run(func(args mock.Arguments) { + respMsg, ok := args[2].(*api.Message) + require.True(t, ok) + require.NoError(t, json.Unmarshal(respMsg.Body.Payload, &response)) + returnedState = response.Status + }).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + return returnedState == functions.RequestStateInternalError + }, testutils.WaitTimeout(t), 50*time.Millisecond).Should(gomega.BeTrue()) + }) + + t.Run("heartbeat sender address doesn't match", func(t *testing.T) { + ctx := testutils.Context(t) + msg, _ := newOffchainRequest(t, geth_common.BytesToAddress([]byte("0x1234")).Bytes(), 0) + require.NoError(t, msg.Sign(privateKey)) + + var response functions.HeartbeatResponse + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Run(func(args mock.Arguments) { + respMsg, ok := args[2].(*api.Message) + require.True(t, ok) + require.NoError(t, json.Unmarshal(respMsg.Body.Payload, &response)) + require.Equal(t, functions.RequestStateInternalError, response.Status) + }).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + }) + + t.Run("heartbeat request too old", func(t *testing.T) { + ctx := testutils.Context(t) + msg, _ := newOffchainRequest(t, addr.Bytes(), 10_000) + require.NoError(t, msg.Sign(privateKey)) + + var response functions.HeartbeatResponse + allowlist.On("Allow", addr).Return(true).Once() + connector.On("SendToGateway", mock.Anything, "gw1", mock.Anything).Run(func(args mock.Arguments) { + respMsg, ok := args[2].(*api.Message) + require.True(t, ok) + require.NoError(t, json.Unmarshal(respMsg.Body.Payload, &response)) + require.Equal(t, functions.RequestStateInternalError, response.Status) + }).Return(nil).Once() + handler.HandleGatewayMessage(ctx, "gw1", msg) + }) +} diff --git a/core/services/functions/external_adapter_client.go b/core/services/functions/external_adapter_client.go new file mode 100644 index 00000000..2e7f426e --- /dev/null +++ b/core/services/functions/external_adapter_client.go @@ -0,0 +1,261 @@ +package functions + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/bridges" +) + +// ExternalAdapterClient supports two endpoints: +// 1. Request (aka "lambda") for executing Functions requests via RunComputation() +// 2. Secrets (aka "fetcher") for fetching offchain secrets via FetchEncryptedSecrets() +// +// Both endpoints share the same response format. +// All methods are thread-safe. +// +//go:generate mockery --quiet --name ExternalAdapterClient --output ./mocks/ --case=underscore +type ExternalAdapterClient interface { + RunComputation( + ctx context.Context, + requestId string, + jobName string, + subscriptionOwner string, + subscriptionId uint64, + flags RequestFlags, + nodeProvidedSecrets string, + requestData *RequestData, + ) (userResult, userError []byte, domains []string, err error) + + FetchEncryptedSecrets(ctx context.Context, encryptedSecretsUrls []byte, requestId string, jobName string) (encryptedSecrets, userError []byte, err error) +} + +type externalAdapterClient struct { + adapterURL url.URL + maxResponseBytes int64 +} + +var _ ExternalAdapterClient = (*externalAdapterClient)(nil) + +//go:generate mockery --quiet --name BridgeAccessor --output ./mocks/ --case=underscore +type BridgeAccessor interface { + NewExternalAdapterClient() (ExternalAdapterClient, error) +} + +type bridgeAccessor struct { + bridgeORM bridges.ORM + bridgeName string + maxResponseBytes int64 +} + +var _ BridgeAccessor = (*bridgeAccessor)(nil) + +type requestPayload struct { + Endpoint string `json:"endpoint"` + RequestId string `json:"requestId"` + JobName string `json:"jobName"` + SubscriptionOwner string `json:"subscriptionOwner"` + SubscriptionId uint64 `json:"subscriptionId"` + Flags RequestFlags `json:"flags"` // marshalled as an array of numbers + NodeProvidedSecrets string `json:"nodeProvidedSecrets"` + Data *RequestData `json:"data"` +} + +type secretsPayload struct { + Endpoint string `json:"endpoint"` + RequestId string `json:"requestId"` + JobName string `json:"jobName"` + Data secretsData `json:"data"` +} + +type secretsData struct { + RequestType string `json:"requestType"` + EncryptedSecretsUrls []byte `json:"encryptedSecretsUrls"` +} + +type response struct { + Result string `json:"result"` + Data *responseData `json:"data"` + StatusCode int `json:"statusCode"` +} + +type responseData struct { + Result string `json:"result"` + Error string `json:"error"` + ErrorString string `json:"errorString"` + Domains []string `json:"domains"` +} + +var ( + promEAClientLatency = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "functions_external_adapter_client_latency", + Help: "Functions EA client latency in seconds scoped by endpoint", + }, + []string{"name"}, + ) + promEAClientErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_external_adapter_client_errors_total", + Help: "Functions EA client error count scoped by endpoint", + }, + []string{"name"}, + ) +) + +func NewExternalAdapterClient(adapterURL url.URL, maxResponseBytes int64) ExternalAdapterClient { + return &externalAdapterClient{ + adapterURL: adapterURL, + maxResponseBytes: maxResponseBytes, + } +} + +func (ea *externalAdapterClient) RunComputation( + ctx context.Context, + requestId string, + jobName string, + subscriptionOwner string, + subscriptionId uint64, + flags RequestFlags, + nodeProvidedSecrets string, + requestData *RequestData, +) (userResult, userError []byte, domains []string, err error) { + requestData.Secrets = nil // secrets are passed in nodeProvidedSecrets + + payload := requestPayload{ + Endpoint: "lambda", + RequestId: requestId, + JobName: jobName, + SubscriptionOwner: subscriptionOwner, + SubscriptionId: subscriptionId, + Flags: flags, + NodeProvidedSecrets: nodeProvidedSecrets, + Data: requestData, + } + + userResult, userError, domains, err = ea.request(ctx, payload, requestId, jobName, "run_computation") + if err != nil { + return nil, nil, nil, errors.Wrap(err, "error running computation") + } + + return userResult, userError, domains, nil +} + +func (ea *externalAdapterClient) FetchEncryptedSecrets(ctx context.Context, encryptedSecretsUrls []byte, requestId string, jobName string) (encryptedSecrets, userError []byte, err error) { + data := secretsData{ + RequestType: "fetchThresholdEncryptedSecrets", + EncryptedSecretsUrls: encryptedSecretsUrls, + } + + payload := secretsPayload{ + Endpoint: "fetcher", + RequestId: requestId, + JobName: jobName, + Data: data, + } + + encryptedSecrets, userError, _, err = ea.request(ctx, payload, requestId, jobName, "fetch_secrets") + if err != nil { + return nil, nil, errors.Wrap(err, "error fetching encrypted secrets") + } + + return encryptedSecrets, userError, nil +} + +func (ea *externalAdapterClient) request( + ctx context.Context, + payload interface{}, + requestId string, + jobName string, + label string, +) (userResult, userError []byte, domains []string, err error) { + jsonPayload, err := json.Marshal(payload) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "error constructing external adapter request payload") + } + + req, err := http.NewRequestWithContext(ctx, "POST", ea.adapterURL.String(), bytes.NewBuffer(jsonPayload)) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "error constructing external adapter request") + } + req.Header.Set("Content-Type", "application/json") + + start := time.Now() + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + promEAClientErrors.WithLabelValues(label).Inc() + return nil, nil, nil, errors.Wrap(err, "error during external adapter request") + } + defer resp.Body.Close() + + source := http.MaxBytesReader(nil, resp.Body, ea.maxResponseBytes) + body, err := io.ReadAll(source) + elapsed := time.Since(start) + promEAClientLatency.WithLabelValues(label).Set(elapsed.Seconds()) + if err != nil { + promEAClientErrors.WithLabelValues(label).Inc() + return nil, nil, nil, errors.Wrap(err, "error reading external adapter response") + } + + if resp.StatusCode != http.StatusOK { + promEAClientErrors.WithLabelValues(label).Inc() + return nil, nil, nil, fmt.Errorf("external adapter responded with HTTP %d, body: %s", resp.StatusCode, body) + } + + var eaResp response + err = json.Unmarshal(body, &eaResp) + if err != nil { + return nil, nil, nil, errors.Wrap(err, fmt.Sprintf("error parsing external adapter response %s", body)) + } + + if eaResp.StatusCode != http.StatusOK { + return nil, nil, nil, fmt.Errorf("external adapter invalid StatusCode %d", eaResp.StatusCode) + } + + if eaResp.Data == nil { + return nil, nil, nil, errors.New("external adapter response data was empty") + } + + switch eaResp.Result { + case "error": + userError, err = hex.DecodeString(eaResp.Data.Error) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "error decoding userError hex string") + } + return nil, userError, eaResp.Data.Domains, nil + case "success": + userResult, err = hex.DecodeString(eaResp.Data.Result) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "error decoding result hex string") + } + return userResult, nil, eaResp.Data.Domains, nil + default: + return nil, nil, nil, fmt.Errorf("unexpected result in response: '%+v'", eaResp.Result) + } +} + +func NewBridgeAccessor(bridgeORM bridges.ORM, bridgeName string, maxResponseBytes int64) BridgeAccessor { + return &bridgeAccessor{ + bridgeORM: bridgeORM, + bridgeName: bridgeName, + maxResponseBytes: maxResponseBytes, + } +} + +func (b *bridgeAccessor) NewExternalAdapterClient() (ExternalAdapterClient, error) { + bridge, err := b.bridgeORM.FindBridge(bridges.BridgeName(b.bridgeName)) + if err != nil { + return nil, err + } + return NewExternalAdapterClient(url.URL(bridge.URL), b.maxResponseBytes), nil +} diff --git a/core/services/functions/external_adapter_client_test.go b/core/services/functions/external_adapter_client_test.go new file mode 100644 index 00000000..dcda2eff --- /dev/null +++ b/core/services/functions/external_adapter_client_test.go @@ -0,0 +1,223 @@ +package functions_test + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" +) + +func runFetcherTest(t *testing.T, adapterJSONResponse, expectedSecrets, expectedUserError string, expectedError error) { + t.Helper() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, adapterJSONResponse) + })) + defer ts.Close() + + adapterUrl, err := url.Parse(ts.URL) + assert.NoError(t, err, "Unexpected error") + + ea := functions.NewExternalAdapterClient(*adapterUrl, 100_000) + encryptedSecrets, userError, err := ea.FetchEncryptedSecrets(testutils.Context(t), []byte("urls to secrets"), "requestID1234", "TestJob") + + if expectedError != nil { + assert.Equal(t, expectedError.Error(), err.Error(), "Unexpected error") + } else { + assert.Nil(t, err) + } + assert.Equal(t, expectedUserError, string(userError), "Unexpected userError") + assert.Equal(t, expectedSecrets, string(encryptedSecrets), "Unexpected secrets") +} + +func runRequestTest(t *testing.T, adapterJSONResponse, expectedUserResult, expectedUserError string, expectedDomains []string, expectedError error) { + t.Helper() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, adapterJSONResponse) + })) + defer ts.Close() + + adapterUrl, err := url.Parse(ts.URL) + assert.NoError(t, err, "Unexpected error") + + ea := functions.NewExternalAdapterClient(*adapterUrl, 100_000) + userResult, userError, domains, err := ea.RunComputation(testutils.Context(t), "requestID1234", "TestJob", "SubOwner", 1, functions.RequestFlags{}, "", &functions.RequestData{}) + + if expectedError != nil { + assert.Equal(t, expectedError.Error(), err.Error(), "Unexpected error") + } else { + assert.Nil(t, err) + } + assert.Equal(t, expectedUserResult, string(userResult), "Unexpected user result") + assert.Equal(t, expectedUserError, string(userError), "Unexpected user error") + assert.Equal(t, expectedDomains, domains, "Unexpected domains") +} + +func TestFetchEncryptedSecrets_Success(t *testing.T) { + runFetcherTest(t, `{ + "result": "success", + "data": { + "result": "0x616263646566", + "error": "" + }, + "statusCode": 200 + }`, "abcdef", "", nil) +} + +func TestFetchEncryptedSecrets_UserError(t *testing.T) { + runFetcherTest(t, `{ + "result": "error", + "data": { + "result": "", + "error": "0x616263646566" + }, + "statusCode": 200 + }`, "", "abcdef", nil) +} + +func TestFetchEncryptedSecrets_UnexpectedResponse(t *testing.T) { + runFetcherTest(t, `{ + "invalid": "invalid", + "statusCode": 200 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: external adapter response data was empty")) +} + +func TestFetchEncryptedSecrets_FailedStatusCode(t *testing.T) { + runFetcherTest(t, `{ + "result": "success", + "data": { + "result": "", + "error": "0x616263646566" + }, + "statusCode": 400 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: external adapter invalid StatusCode 400")) +} + +func TestFetchEncryptedSecrets_MissingData(t *testing.T) { + runFetcherTest(t, `{ + "result": "success", + "statusCode": 200 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: external adapter response data was empty")) +} + +func TestFetchEncryptedSecrets_InvalidResponse(t *testing.T) { + runFetcherTest(t, `{ + "result": "success", + "data": { + "result": "invalidHexstring", + "error": "" + }, + "statusCode": 200 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: error decoding result hex string: hex string must have 0x prefix")) +} + +func TestFetchEncryptedSecrets_InvalidUserError(t *testing.T) { + runFetcherTest(t, `{ + "result": "error", + "data": { + "error": "invalidHexstring", + "result": "" + }, + "statusCode": 200 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: error decoding userError hex string: hex string must have 0x prefix")) +} + +func TestFetchEncryptedSecrets_UnexpectedResult(t *testing.T) { + runFetcherTest(t, `{ + "result": "unexpected", + "data": { + "result": "0x01", + "error": "" + }, + "statusCode": 200 + }`, "", "", fmt.Errorf("error fetching encrypted secrets: unexpected result in response: 'unexpected'")) +} + +func TestRunComputation_Success(t *testing.T) { + runRequestTest(t, `{ + "result": "success", + "data": { + "result": "0x616263646566", + "error": "", + "domains": ["domain1", "domain2"] + }, + "statusCode": 200 + }`, "abcdef", "", []string{"domain1", "domain2"}, nil) +} + +func TestRunComputation_MissingData(t *testing.T) { + runRequestTest(t, `{ + "result": "success", + "statusCode": 200 + }`, "", "", nil, fmt.Errorf("error running computation: external adapter response data was empty")) +} + +func TestRunComputation_CorrectAdapterRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + assert.NoError(t, err) + expectedData := `{"source":"abcd","language":7,"codeLocation":42,"secretsLocation":88,"args":["arg1","arg2"]}` + expectedBody := fmt.Sprintf(`{"endpoint":"lambda","requestId":"requestID1234","jobName":"TestJob","subscriptionOwner":"SubOwner","subscriptionId":1,"flags":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"nodeProvidedSecrets":"secRETS","data":%s}`, expectedData) + assert.Equal(t, expectedBody, string(body)) + + fmt.Fprintln(w, "}}invalidJSON") + })) + defer ts.Close() + + adapterUrl, err := url.Parse(ts.URL) + assert.NoError(t, err) + + ea := functions.NewExternalAdapterClient(*adapterUrl, 100_000) + reqData := &functions.RequestData{ + Source: "abcd", + Language: 7, + CodeLocation: 42, + Secrets: []byte{0xaa, 0xbb, 0xcc}, // "qrvM" base64 encoded + SecretsLocation: 88, + Args: []string{"arg1", "arg2"}, + } + _, _, _, err = ea.RunComputation(testutils.Context(t), "requestID1234", "TestJob", "SubOwner", 1, functions.RequestFlags{}, "secRETS", reqData) + assert.Error(t, err) +} + +func TestRunComputation_HTTP500(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + adapterUrl, err := url.Parse(ts.URL) + assert.NoError(t, err) + + ea := functions.NewExternalAdapterClient(*adapterUrl, 100_000) + _, _, _, err = ea.RunComputation(testutils.Context(t), "requestID1234", "TestJob", "SubOwner", 1, functions.RequestFlags{}, "secRETS", &functions.RequestData{}) + assert.Error(t, err) +} + +func TestRunComputation_ContextRespected(t *testing.T) { + done := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-done + })) + defer ts.Close() + + adapterUrl, err := url.Parse(ts.URL) + assert.NoError(t, err) + + ea := functions.NewExternalAdapterClient(*adapterUrl, 100_000) + ctx, cancel := context.WithTimeout(testutils.Context(t), 10*time.Millisecond) + defer cancel() + _, _, _, err = ea.RunComputation(ctx, "requestID1234", "TestJob", "SubOwner", 1, functions.RequestFlags{}, "secRETS", &functions.RequestData{}) + assert.Error(t, err) + close(done) +} diff --git a/core/services/functions/listener.go b/core/services/functions/listener.go new file mode 100644 index 00000000..b8d4e021 --- /dev/null +++ b/core/services/functions/listener.go @@ -0,0 +1,624 @@ +package functions + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/cbor" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/threshold" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + evmrelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +var ( + sizeBuckets = []float64{ + 1024, + 1024 * 4, + 1024 * 8, + 1024 * 16, + 1024 * 64, + 1024 * 256, + } + + promRequestReceived = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_received", + Help: "Metric to track received request events", + }, []string{"router"}) + + promRequestInternalError = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_internal_error", + Help: "Metric to track internal errors", + }, []string{"router"}) + + promRequestComputationError = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_computation_error", + Help: "Metric to track computation errors", + }, []string{"router"}) + + promRequestComputationSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_computation_success", + Help: "Metric to track number of computed requests", + }, []string{"router"}) + + promRequestTimeout = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_timeout", + Help: "Metric to track number of timed out requests", + }, []string{"router"}) + + promRequestConfirmed = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_confirmed", + Help: "Metric to track number of confirmed requests", + }, []string{"router"}) + + promRequestDataSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "functions_request_data_size", + Help: "Metric to track request data size", + Buckets: sizeBuckets, + }, []string{"router"}) + + promComputationResultSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "functions_request_computation_result_size", + Help: "Metric to track computation result size in bytes", + }, []string{"router"}) + + promComputationErrorSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "functions_request_computation_error_size", + Help: "Metric to track computation error size in bytes", + }, []string{"router"}) + + promComputationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "functions_request_computation_duration", + Help: "Metric to track computation duration in ms", + Buckets: []float64{ + float64(10 * time.Millisecond), + float64(100 * time.Millisecond), + float64(500 * time.Millisecond), + float64(time.Second), + float64(10 * time.Second), + float64(30 * time.Second), + float64(60 * time.Second), + }, + }, []string{"router"}) + + promPrunedRequests = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_request_pruned", + Help: "Metric to track number of requests pruned from the DB", + }, []string{"router"}) +) + +const ( + DefaultPruneMaxStoredRequests uint32 = 20_000 + DefaultPruneCheckFrequencySec uint32 = 60 * 10 + DefaultPruneBatchSize uint32 = 500 + + // Used in place of OnchainMetadata for all offchain requests. + OffchainRequestMarker string = "OFFCHAIN_REQUEST" + + FlagCBORMaxSize uint32 = 1 + FlagSecretsMaxSize uint32 = 2 +) + +//go:generate mockery --quiet --name FunctionsListener --output ./mocks/ --case=underscore +type FunctionsListener interface { + job.ServiceCtx + + HandleOffchainRequest(ctx context.Context, request *OffchainRequest) error +} + +type functionsListener struct { + services.StateMachine + client client.Client + contractAddressHex string + job job.Job + bridgeAccessor BridgeAccessor + shutdownWaitGroup sync.WaitGroup + serviceContext context.Context + serviceCancel context.CancelFunc + chStop chan struct{} + pluginORM ORM + pluginConfig config.PluginConfig + s4Storage s4.Storage + logger logger.Logger + urlsMonEndpoint commontypes.MonitoringEndpoint + decryptor threshold.Decryptor + logPollerWrapper evmrelayTypes.LogPollerWrapper +} + +var _ FunctionsListener = &functionsListener{} + +func (l *functionsListener) HealthReport() map[string]error { + return map[string]error{l.Name(): l.Healthy()} +} + +func (l *functionsListener) Name() string { return l.logger.Name() } + +func formatRequestId(requestId [32]byte) string { + return fmt.Sprintf("0x%x", requestId) +} + +func NewFunctionsListener( + job job.Job, + client client.Client, + contractAddressHex string, + bridgeAccessor BridgeAccessor, + pluginORM ORM, + pluginConfig config.PluginConfig, + s4Storage s4.Storage, + lggr logger.Logger, + urlsMonEndpoint commontypes.MonitoringEndpoint, + decryptor threshold.Decryptor, + logPollerWrapper evmrelayTypes.LogPollerWrapper, +) *functionsListener { + return &functionsListener{ + client: client, + contractAddressHex: contractAddressHex, + job: job, + bridgeAccessor: bridgeAccessor, + chStop: make(chan struct{}), + pluginORM: pluginORM, + pluginConfig: pluginConfig, + s4Storage: s4Storage, + logger: lggr, + urlsMonEndpoint: urlsMonEndpoint, + decryptor: decryptor, + logPollerWrapper: logPollerWrapper, + } +} + +// Start complies with job.Service +func (l *functionsListener) Start(context.Context) error { + return l.StartOnce("FunctionsListener", func() error { + l.serviceContext, l.serviceCancel = context.WithCancel(context.Background()) + + switch l.pluginConfig.ContractVersion { + case 1: + l.shutdownWaitGroup.Add(1) + go l.processOracleEventsV1() + default: + return fmt.Errorf("unsupported contract version: %d", l.pluginConfig.ContractVersion) + } + + if l.pluginConfig.ListenerEventHandlerTimeoutSec == 0 { + l.logger.Warn("listenerEventHandlerTimeoutSec set to zero! ORM calls will never time out.") + } + l.shutdownWaitGroup.Add(3) + go l.timeoutRequests() + go l.pruneRequests() + go func() { + <-l.chStop + l.shutdownWaitGroup.Done() + }() + return nil + }) +} + +// Close complies with job.Service +func (l *functionsListener) Close() error { + return l.StopOnce("FunctionsListener", func() error { + l.serviceCancel() + close(l.chStop) + l.shutdownWaitGroup.Wait() + return nil + }) +} + +func (l *functionsListener) processOracleEventsV1() { + defer l.shutdownWaitGroup.Done() + freqMillis := l.pluginConfig.ListenerEventsCheckFrequencyMillis + if freqMillis == 0 { + l.logger.Errorw("ListenerEventsCheckFrequencyMillis must set to more than 0 in PluginConfig") + return + } + ticker := time.NewTicker(time.Duration(freqMillis) * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-l.chStop: + return + case <-ticker.C: + requests, responses, err := l.logPollerWrapper.LatestEvents() + if err != nil { + l.logger.Errorw("error when calling LatestEvents()", "err", err) + break + } + l.logger.Debugw("processOracleEventsV1: processing v1 events", "nRequests", len(requests), "nResponses", len(responses)) + for _, request := range requests { + request := request + l.shutdownWaitGroup.Add(1) + go l.handleOracleRequestV1(&request) + } + for _, response := range responses { + response := response + l.shutdownWaitGroup.Add(1) + go l.handleOracleResponseV1(&response) + } + } + } +} + +func (l *functionsListener) getNewHandlerContext() (context.Context, context.CancelFunc) { + timeoutSec := l.pluginConfig.ListenerEventHandlerTimeoutSec + if timeoutSec == 0 { + return context.WithCancel(l.serviceContext) + } + return context.WithTimeout(l.serviceContext, time.Duration(timeoutSec)*time.Second) +} + +func (l *functionsListener) setError(ctx context.Context, requestId RequestID, errType ErrType, errBytes []byte) { + if errType == INTERNAL_ERROR { + promRequestInternalError.WithLabelValues(l.contractAddressHex).Inc() + } else { + promRequestComputationError.WithLabelValues(l.contractAddressHex).Inc() + } + readyForProcessing := errType != INTERNAL_ERROR + if err := l.pluginORM.SetError(requestId, errType, errBytes, time.Now(), readyForProcessing, pg.WithParentCtx(ctx)); err != nil { + l.logger.Errorw("call to SetError failed", "requestID", formatRequestId(requestId), "err", err) + } +} + +func (l *functionsListener) getMaxCBORsize(flags RequestFlags) uint32 { + idx := flags[FlagCBORMaxSize] + if int(idx) >= len(l.pluginConfig.MaxRequestSizesList) { + return l.pluginConfig.MaxRequestSizeBytes // deprecated + } + return l.pluginConfig.MaxRequestSizesList[idx] +} + +func (l *functionsListener) getMaxSecretsSize(flags RequestFlags) uint32 { + idx := flags[FlagSecretsMaxSize] + if int(idx) >= len(l.pluginConfig.MaxSecretsSizesList) { + return math.MaxUint32 // not enforced if not configured + } + return l.pluginConfig.MaxSecretsSizesList[idx] +} + +func (l *functionsListener) HandleOffchainRequest(ctx context.Context, request *OffchainRequest) error { + if request == nil { + return errors.New("HandleOffchainRequest: received nil request") + } + if len(request.RequestId) != RequestIDLength { + return fmt.Errorf("HandleOffchainRequest: invalid request ID length %d", len(request.RequestId)) + } + if len(request.SubscriptionOwner) != common.AddressLength || len(request.RequestInitiator) != common.AddressLength { + return fmt.Errorf("HandleOffchainRequest: SubscriptionOwner and RequestInitiator must be set to valid addresses") + } + if request.Timestamp < uint64(time.Now().Unix()-int64(l.pluginConfig.RequestTimeoutSec)) { + return fmt.Errorf("HandleOffchainRequest: request timestamp is too old") + } + + var requestId RequestID + copy(requestId[:], request.RequestId[:32]) + subscriptionOwner := common.BytesToAddress(request.SubscriptionOwner) + senderAddr := common.BytesToAddress(request.RequestInitiator) + emptyTxHash := common.Hash{} + zeroCallbackGasLimit := uint32(0) + newReq := &Request{ + RequestID: requestId, + RequestTxHash: &emptyTxHash, + ReceivedAt: time.Now(), + Flags: []byte{}, + CallbackGasLimit: &zeroCallbackGasLimit, + // use sender address in place of coordinator contract to keep batches uniform + CoordinatorContractAddress: &senderAddr, + OnchainMetadata: []byte(OffchainRequestMarker), + } + if err := l.pluginORM.CreateRequest(newReq, pg.WithParentCtx(ctx)); err != nil { + if errors.Is(err, ErrDuplicateRequestID) { + l.logger.Warnw("HandleOffchainRequest: received duplicate request ID", "requestID", formatRequestId(requestId), "err", err) + } else { + l.logger.Errorw("HandleOffchainRequest: failed to create a DB entry for new request", "requestID", formatRequestId(requestId), "err", err) + } + return err + } + return l.handleRequest(ctx, requestId, request.SubscriptionId, subscriptionOwner, RequestFlags{}, &request.Data) +} + +func (l *functionsListener) handleOracleRequestV1(request *evmrelayTypes.OracleRequest) { + defer l.shutdownWaitGroup.Done() + l.logger.Infow("handleOracleRequestV1: oracle request v1 received", "requestID", formatRequestId(request.RequestId)) + ctx, cancel := l.getNewHandlerContext() + defer cancel() + + callbackGasLimit := uint32(request.CallbackGasLimit) + newReq := &Request{ + RequestID: request.RequestId, + RequestTxHash: &request.TxHash, + ReceivedAt: time.Now(), + Flags: request.Flags[:], + CallbackGasLimit: &callbackGasLimit, + CoordinatorContractAddress: &request.CoordinatorContract, + OnchainMetadata: request.OnchainMetadata, + } + if err := l.pluginORM.CreateRequest(newReq, pg.WithParentCtx(ctx)); err != nil { + if errors.Is(err, ErrDuplicateRequestID) { + l.logger.Warnw("handleOracleRequestV1: received a log with duplicate request ID", "requestID", formatRequestId(request.RequestId), "err", err) + } else { + l.logger.Errorw("handleOracleRequestV1: failed to create a DB entry for new request", "requestID", formatRequestId(request.RequestId), "err", err) + } + return + } + + promRequestReceived.WithLabelValues(l.contractAddressHex).Inc() + promRequestDataSize.WithLabelValues(l.contractAddressHex).Observe(float64(len(request.Data))) + requestData, err := l.parseCBOR(request.RequestId, request.Data, l.getMaxCBORsize(request.Flags)) + if err != nil { + l.setError(ctx, request.RequestId, USER_ERROR, []byte(err.Error())) + return + } + err = l.handleRequest(ctx, request.RequestId, request.SubscriptionId, request.SubscriptionOwner, request.Flags, requestData) + if err != nil { + l.logger.Errorw("handleOracleRequestV1: error in handleRequest()", "requestID", formatRequestId(request.RequestId), "err", err) + } +} + +func (l *functionsListener) parseCBOR(requestId RequestID, cborData []byte, maxSizeBytes uint32) (*RequestData, error) { + if maxSizeBytes > 0 && uint32(len(cborData)) > maxSizeBytes { + l.logger.Errorw("request too big", "requestID", formatRequestId(requestId), "requestSize", len(cborData), "maxRequestSize", maxSizeBytes) + return nil, fmt.Errorf("request too big (max %d bytes)", maxSizeBytes) + } + + var requestData RequestData + if err := cbor.ParseDietCBORToStruct(cborData, &requestData); err != nil { + l.logger.Errorw("failed to parse CBOR", "requestID", formatRequestId(requestId), "err", err) + return nil, errors.New("CBOR parsing error") + } + + return &requestData, nil +} + +// Handle secret fetching/decryption and functions computation. Return error only for internal errors. +func (l *functionsListener) handleRequest(ctx context.Context, requestID RequestID, subscriptionId uint64, subscriptionOwner common.Address, flags RequestFlags, requestData *RequestData) error { + startTime := time.Now() + defer func() { + duration := time.Since(startTime) + promComputationDuration.WithLabelValues(l.contractAddressHex).Observe(float64(duration.Milliseconds())) + }() + requestIDStr := formatRequestId(requestID) + l.logger.Infow("processing request", "requestID", requestIDStr) + + eaClient, err := l.bridgeAccessor.NewExternalAdapterClient() + if err != nil { + l.logger.Errorw("failed to create ExternalAdapterClient", "requestID", requestIDStr, "err", err) + l.setError(ctx, requestID, INTERNAL_ERROR, []byte(err.Error())) + return err + } + + nodeProvidedSecrets, userErr, internalErr := l.getSecrets(ctx, eaClient, requestID, subscriptionOwner, requestData) + if internalErr != nil { + l.logger.Errorw("internal error during getSecrets", "requestID", requestIDStr, "err", internalErr) + l.setError(ctx, requestID, INTERNAL_ERROR, []byte(internalErr.Error())) + return internalErr + } + if userErr != nil { + l.logger.Debugw("user error during getSecrets", "requestID", requestIDStr, "err", userErr) + l.setError(ctx, requestID, USER_ERROR, []byte(userErr.Error())) + return nil // user error + } + + maxSecretsSize := l.getMaxSecretsSize(flags) + if uint32(len(nodeProvidedSecrets)) > maxSecretsSize { + l.logger.Errorw("secrets size too big", "requestID", requestIDStr, "secretsSize", len(nodeProvidedSecrets), "maxSecretsSize", maxSecretsSize) + l.setError(ctx, requestID, USER_ERROR, []byte("secrets size too big")) + return nil // user error + } + + computationResult, computationError, domains, err := eaClient.RunComputation(ctx, requestIDStr, l.job.Name.ValueOrZero(), subscriptionOwner.Hex(), subscriptionId, flags, nodeProvidedSecrets, requestData) + + if err != nil { + l.logger.Errorw("internal adapter error", "requestID", requestIDStr, "err", err) + l.setError(ctx, requestID, INTERNAL_ERROR, []byte(err.Error())) + return err + } + + if len(computationError) == 0 && len(computationResult) == 0 { + l.logger.Errorw("both result and error are empty - saving result", "requestID", requestIDStr) + computationResult = []byte{} + computationError = []byte{} + } + + if len(domains) > 0 { + l.reportSourceCodeDomains(requestID, domains) + } + + if len(computationError) != 0 { + if len(computationResult) != 0 { + l.logger.Warnw("both result and error are non-empty - using error", "requestID", requestIDStr) + } + l.logger.Debugw("saving computation error", "requestID", requestIDStr) + l.setError(ctx, requestID, USER_ERROR, computationError) + promComputationErrorSize.WithLabelValues(l.contractAddressHex).Set(float64(len(computationError))) + } else { + promRequestComputationSuccess.WithLabelValues(l.contractAddressHex).Inc() + promComputationResultSize.WithLabelValues(l.contractAddressHex).Set(float64(len(computationResult))) + l.logger.Debugw("saving computation result", "requestID", requestIDStr) + if err2 := l.pluginORM.SetResult(requestID, computationResult, time.Now(), pg.WithParentCtx(ctx)); err2 != nil { + l.logger.Errorw("call to SetResult failed", "requestID", requestIDStr, "err", err2) + return err2 + } + } + return nil +} + +func (l *functionsListener) handleOracleResponseV1(response *evmrelayTypes.OracleResponse) { + defer l.shutdownWaitGroup.Done() + l.logger.Infow("oracle response v1 received", "requestID", formatRequestId(response.RequestId)) + + ctx, cancel := l.getNewHandlerContext() + defer cancel() + if err := l.pluginORM.SetConfirmed(response.RequestId, pg.WithParentCtx(ctx)); err != nil { + l.logger.Errorw("setting CONFIRMED state failed", "requestID", formatRequestId(response.RequestId), "err", err) + } + promRequestConfirmed.WithLabelValues(l.contractAddressHex).Inc() +} + +func (l *functionsListener) timeoutRequests() { + defer l.shutdownWaitGroup.Done() + timeoutSec, freqSec, batchSize := l.pluginConfig.RequestTimeoutSec, l.pluginConfig.RequestTimeoutCheckFrequencySec, l.pluginConfig.RequestTimeoutBatchLookupSize + if timeoutSec == 0 || freqSec == 0 || batchSize == 0 { + l.logger.Warn("request timeout checker not configured - disabling it") + return + } + ticker := time.NewTicker(time.Duration(freqSec) * time.Second) + defer ticker.Stop() + for { + select { + case <-l.chStop: + return + case <-ticker.C: + cutoff := time.Now().Add(-(time.Duration(timeoutSec) * time.Second)) + ctx, cancel := l.getNewHandlerContext() + ids, err := l.pluginORM.TimeoutExpiredResults(cutoff, batchSize, pg.WithParentCtx(ctx)) + cancel() + if err != nil { + l.logger.Errorw("error when calling FindExpiredResults", "err", err) + break + } + if len(ids) > 0 { + promRequestTimeout.WithLabelValues(l.contractAddressHex).Add(float64(len(ids))) + var idStrs []string + for _, id := range ids { + idStrs = append(idStrs, formatRequestId(id)) + } + l.logger.Debugw("timed out requests", "requestIDs", idStrs) + } else { + l.logger.Debug("no requests to time out") + } + } + } +} + +func (l *functionsListener) pruneRequests() { + defer l.shutdownWaitGroup.Done() + maxStoredRequests, freqSec, batchSize := l.pluginConfig.PruneMaxStoredRequests, l.pluginConfig.PruneCheckFrequencySec, l.pluginConfig.PruneBatchSize + if maxStoredRequests == 0 { + l.logger.Warnw("pruneMaxStoredRequests not configured - using default", "DefaultPruneMaxStoredRequests", DefaultPruneMaxStoredRequests) + maxStoredRequests = DefaultPruneMaxStoredRequests + } + if freqSec == 0 { + l.logger.Warnw("pruneCheckFrequencySec not configured - using default", "DefaultPruneCheckFrequencySec", DefaultPruneCheckFrequencySec) + freqSec = DefaultPruneCheckFrequencySec + } + if batchSize == 0 { + l.logger.Warnw("pruneBatchSize not configured - using default", "DefaultPruneBatchSize", DefaultPruneBatchSize) + batchSize = DefaultPruneBatchSize + } + + ticker := time.NewTicker(time.Duration(freqSec) * time.Second) + defer ticker.Stop() + for { + select { + case <-l.chStop: + return + case <-ticker.C: + ctx, cancel := l.getNewHandlerContext() + startTime := time.Now() + nTotal, nPruned, err := l.pluginORM.PruneOldestRequests(maxStoredRequests, batchSize, pg.WithParentCtx(ctx)) + cancel() + elapsedMillis := time.Since(startTime).Milliseconds() + if err != nil { + l.logger.Errorw("error when calling PruneOldestRequests", "err", err, "elapsedMillis", elapsedMillis) + break + } + if nPruned > 0 { + promPrunedRequests.WithLabelValues(l.contractAddressHex).Add(float64(nPruned)) + l.logger.Debugw("pruned requests from the DB", "nTotal", nTotal, "nPruned", nPruned, "elapsedMillis", elapsedMillis) + } else { + l.logger.Debugw("no pruned requests at this time", "nTotal", nTotal, "elapsedMillis", elapsedMillis) + } + } + } +} + +func (l *functionsListener) reportSourceCodeDomains(requestId RequestID, domains []string) { + r := &telem.FunctionsRequest{ + RequestId: formatRequestId(requestId), + NodeAddress: l.job.OCR2OracleSpec.TransmitterID.ValueOrZero(), + Domains: domains, + } + + bytes, err := proto.Marshal(r) + if err != nil { + l.logger.Warnw("telem.FunctionsRequest marshal error", "err", err) + } else { + l.urlsMonEndpoint.SendLog(bytes) + } +} + +func (l *functionsListener) getSecrets(ctx context.Context, eaClient ExternalAdapterClient, requestID RequestID, subscriptionOwner common.Address, requestData *RequestData) (decryptedSecrets string, userError, internalError error) { + if l.decryptor == nil { + l.logger.Warn("Decryptor not configured") + return "", nil, nil + } + + var secrets []byte + requestIDStr := formatRequestId(requestID) + + switch requestData.SecretsLocation { + case LocationInline: + if len(requestData.Secrets) > 0 { + l.logger.Warnw("request used Inline secrets location, processing with no secrets", "requestID", requestIDStr) + } else { + l.logger.Debugw("request does not use any secrets", "requestID", requestIDStr) + } + return "", nil, nil + case LocationRemote: + thresholdEncSecrets, userError, err := eaClient.FetchEncryptedSecrets(ctx, requestData.Secrets, requestIDStr, l.job.Name.ValueOrZero()) + if err != nil { + return "", nil, errors.Wrap(err, "failed to fetch encrypted secrets") + } + if len(userError) != 0 { + return "", errors.New(string(userError)), nil + } + secrets = thresholdEncSecrets + case LocationDONHosted: + if l.s4Storage == nil { + return "", nil, errors.New("S4 storage not configured") + } + var donSecrets DONHostedSecrets + if err := cbor.ParseDietCBORToStruct(requestData.Secrets, &donSecrets); err != nil { + return "", errors.Wrap(err, "failed to parse DONHosted secrets CBOR"), nil + } + record, _, err := l.s4Storage.Get(ctx, &s4.Key{ + Address: subscriptionOwner, + SlotId: donSecrets.SlotID, + Version: donSecrets.Version, + }) + if err != nil { + return "", errors.Wrap(err, "failed to fetch DONHosted secrets"), nil + } + secrets = record.Payload + } + + if len(secrets) == 0 { + return "", nil, nil + } + + decryptCtx, cancel := context.WithTimeout(ctx, time.Duration(l.pluginConfig.DecryptionQueueConfig.DecryptRequestTimeoutSec)*time.Second) + defer cancel() + + decryptedSecretsBytes, err := l.decryptor.Decrypt(decryptCtx, requestID[:], secrets) + if err != nil { + l.logger.Debugw("threshold decryption of secrets failed", "requestID", requestIDStr, "err", err) + return "", errors.New("threshold decryption of secrets failed"), nil + } + return string(decryptedSecretsBytes), nil, nil +} diff --git a/core/services/functions/listener_test.go b/core/services/functions/listener_test.go new file mode 100644 index 00000000..ebee6d17 --- /dev/null +++ b/core/services/functions/listener_test.go @@ -0,0 +1,434 @@ +package functions_test + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/fxamacker/cbor/v2" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + decryptionPlugin "github.com/goplugin/tdh2/go/ocr2/decryptionplugin" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + log_mocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + functions_service "github.com/goplugin/pluginv3.0/v2/core/services/functions" + functions_mocks "github.com/goplugin/pluginv3.0/v2/core/services/functions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + threshold_mocks "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/threshold/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + evmrelay_mocks "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types/mocks" + s4_mocks "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + sync_mocks "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" +) + +type FunctionsListenerUniverse struct { + service functions_service.FunctionsListener + bridgeAccessor *functions_mocks.BridgeAccessor + eaClient *functions_mocks.ExternalAdapterClient + pluginORM *functions_mocks.ORM + ingressClient *sync_mocks.TelemetryService + decryptor *threshold_mocks.Decryptor + logPollerWrapper *evmrelay_mocks.LogPollerWrapper +} + +func ptr[T any](t T) *T { return &t } + +var ( + RequestID = newRequestID() + RequestIDStr = fmt.Sprintf("0x%x", [32]byte(RequestID)) + SubscriptionOwner = common.BigToAddress(big.NewInt(42069)) + SubscriptionID = uint64(5) + ResultBytes = []byte{0xab, 0xcd} + ErrorBytes = []byte{0xff, 0x11} + Domains = []string{"github.com", "google.com"} + EncryptedSecretsUrls = []byte{0x11, 0x22} + EncryptedSecrets = []byte(`{"TDH2Ctxt":"eyJHcm","SymCtxt":"+yHR","Nonce":"kgjHyT3Jar0M155E"}`) + DecryptedSecrets = []byte(`{"0x0":"lhcK"}`) + SignedCBORRequestHex = "a666736f75726365782172657475726e2046756e6374696f6e732e656e636f646555696e743235362831296773656372657473421234686c616e6775616765006c636f64654c6f636174696f6e006f736563726574734c6f636174696f6e0170726571756573745369676e617475726558416fb6d10871aa3865b6620dc5f4594d2a9ad9166ba6b1dbc3f508362fd27aa0461babada48979092a11ecadec9c663a2ea99da4e368408b36a3fb414acfefdd2a1c" + SubOwnerAddr = common.HexToAddress("0x2334dE553AB93c69b0ccbe278B6f5E8350Db6204") + NonSubOwnerAddr = common.HexToAddress("0x60C9CF55b9de9A956d921A97575108149b758131") +) + +func NewFunctionsListenerUniverse(t *testing.T, timeoutSec int, pruneFrequencySec int) *FunctionsListenerUniverse { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + }) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + broadcaster := log_mocks.NewBroadcaster(t) + broadcaster.On("AddDependents", 1) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + db := pgtest.NewSqlxDB(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, Client: ethClient, KeyStore: kst.Eth(), LogBroadcaster: broadcaster, MailMon: mailMon}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + + chain := legacyChains.Slice()[0] + lggr := logger.TestLogger(t) + + pluginORM := functions_mocks.NewORM(t) + jsonConfig := job.JSONConfig{ + "requestTimeoutSec": timeoutSec, + "requestTimeoutCheckFrequencySec": 1, + "requestTimeoutBatchLookupSize": 1, + "listenerEventHandlerTimeoutSec": 1, + "pruneCheckFrequencySec": pruneFrequencySec, + "decryptionQueueConfig": map[string]interface{}{ + "decryptRequestTimeoutSec": 100, + }, + "contractVersion": 1, + "listenerEventsCheckFrequencyMillis": 100, + } + jsonConfig["maxRequestSizesList"] = []uint32{10, 100, 1_000} + jsonConfig["maxSecretsSizesList"] = []uint32{10, 100, 200} + jb := job.Job{ + Type: job.OffchainReporting2, + SchemaVersion: 1, + ExternalJobID: uuid.New(), + PipelineSpec: &pipeline.Spec{}, + OCR2OracleSpec: &job.OCR2OracleSpec{ + PluginConfig: jsonConfig, + }, + } + eaClient := functions_mocks.NewExternalAdapterClient(t) + bridgeAccessor := functions_mocks.NewBridgeAccessor(t) + decryptor := threshold_mocks.NewDecryptor(t) + + var pluginConfig config.PluginConfig + require.NoError(t, json.Unmarshal(jsonConfig.Bytes(), &pluginConfig)) + + contractAddress := "0xa" + + ingressClient := sync_mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient) + monEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", contractAddress, synchronization.FunctionsRequests) + + s4Storage := s4_mocks.NewStorage(t) + client := chain.Client() + logPollerWrapper := evmrelay_mocks.NewLogPollerWrapper(t) + functionsListener := functions_service.NewFunctionsListener(jb, client, contractAddress, bridgeAccessor, pluginORM, pluginConfig, s4Storage, lggr, monEndpoint, decryptor, logPollerWrapper) + + return &FunctionsListenerUniverse{ + service: functionsListener, + bridgeAccessor: bridgeAccessor, + eaClient: eaClient, + pluginORM: pluginORM, + ingressClient: ingressClient, + decryptor: decryptor, + logPollerWrapper: logPollerWrapper, + } +} + +func packFlags(requestSizeTier int, secretSizeTier int) [32]byte { + var flags [32]byte + flags[1] = byte(requestSizeTier) + flags[2] = byte(secretSizeTier) + return flags +} + +func TestFunctionsListener_HandleOracleRequestV1_Success(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + doneCh := make(chan struct{}) + + request := types.OracleRequest{ + RequestId: RequestID, + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner, + Flags: packFlags(1, 0), // tier no 1 of request size, allows up to 100 bytes + Data: make([]byte, 12), + } + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, nil, nil) + uni.pluginORM.On("SetResult", RequestID, ResultBytes, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + close(doneCh) + }).Return(nil) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_HandleOffchainRequest_Success(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, nil, nil) + uni.pluginORM.On("SetResult", RequestID, ResultBytes, mock.Anything, mock.Anything).Return(nil) + + request := &functions_service.OffchainRequest{ + RequestId: RequestID[:], + RequestInitiator: SubscriptionOwner.Bytes(), + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner.Bytes(), + Timestamp: uint64(time.Now().Unix()), + Data: functions_service.RequestData{}, + } + require.NoError(t, uni.service.HandleOffchainRequest(testutils.Context(t), request)) +} + +func TestFunctionsListener_HandleOffchainRequest_Invalid(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + + request := &functions_service.OffchainRequest{ + RequestId: RequestID[:], + RequestInitiator: []byte("invalid_address"), + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner.Bytes(), + Timestamp: uint64(time.Now().Unix()), + Data: functions_service.RequestData{}, + } + require.Error(t, uni.service.HandleOffchainRequest(testutils.Context(t), request)) + + request.RequestInitiator = SubscriptionOwner.Bytes() + request.SubscriptionOwner = []byte("invalid_address") + require.Error(t, uni.service.HandleOffchainRequest(testutils.Context(t), request)) + + request.SubscriptionOwner = SubscriptionOwner.Bytes() + request.Timestamp = 1 + require.Error(t, uni.service.HandleOffchainRequest(testutils.Context(t), request)) +} + +func TestFunctionsListener_HandleOffchainRequest_InternalError(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, nil, errors.New("error")) + uni.pluginORM.On("SetError", RequestID, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + request := &functions_service.OffchainRequest{ + RequestId: RequestID[:], + RequestInitiator: SubscriptionOwner.Bytes(), + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner.Bytes(), + Timestamp: uint64(time.Now().Unix()), + Data: functions_service.RequestData{}, + } + require.Error(t, uni.service.HandleOffchainRequest(testutils.Context(t), request)) +} + +func TestFunctionsListener_HandleOracleRequestV1_ComputationError(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + doneCh := make(chan struct{}) + + request := types.OracleRequest{ + RequestId: RequestID, + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner, + Flags: packFlags(1, 0), // tier no 1 of request size, allows up to 100 bytes + Data: make([]byte, 12), + } + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(nil, ErrorBytes, nil, nil) + uni.pluginORM.On("SetError", RequestID, mock.Anything, ErrorBytes, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + close(doneCh) + }).Return(nil) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_HandleOracleRequestV1_ThresholdDecryptedSecrets(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + reqData := &struct { + SecretsLocation int `cbor:"secretsLocation"` + Secrets []byte `cbor:"secrets"` + }{ + SecretsLocation: 1, + Secrets: EncryptedSecretsUrls, + } + cborBytes, err := cbor.Marshal(reqData) + require.NoError(t, err) + // Remove first byte (map header) to make it "diet" CBOR + cborBytes = cborBytes[1:] + request := types.OracleRequest{ + RequestId: RequestID, + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner, + Flags: packFlags(1, 1), // tiers no 1 of request size and secrets size, allow up to 100 bytes + Data: cborBytes, + } + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + doneCh := make(chan struct{}) + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("FetchEncryptedSecrets", mock.Anything, mock.Anything, RequestIDStr, mock.Anything, mock.Anything).Return(EncryptedSecrets, nil, nil) + uni.decryptor.On("Decrypt", mock.Anything, decryptionPlugin.CiphertextId(RequestID[:]), EncryptedSecrets).Return(DecryptedSecrets, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, nil, nil) + uni.pluginORM.On("SetResult", RequestID, ResultBytes, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + close(doneCh) + }).Return(nil) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_HandleOracleRequestV1_CBORTooBig(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + doneCh := make(chan struct{}) + + request := types.OracleRequest{ + RequestId: RequestID, + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner, + Flags: packFlags(0, 0), // tier no 0 of request size, allows only for max 10 bytes + Data: make([]byte, 20), + } + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.pluginORM.On("SetError", RequestID, functions_service.USER_ERROR, []byte("request too big (max 10 bytes)"), mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + close(doneCh) + }).Return(nil) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_ReportSourceCodeDomains(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1_000_000) + doneCh := make(chan struct{}) + + request := types.OracleRequest{ + RequestId: RequestID, + SubscriptionId: SubscriptionID, + SubscriptionOwner: SubscriptionOwner, + Flags: packFlags(1, 0), // tier no 1 of request size, allows up to 100 bytes + Data: make([]byte, 12), + } + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Return(nil) + uni.bridgeAccessor.On("NewExternalAdapterClient").Return(uni.eaClient, nil) + uni.eaClient.On("RunComputation", mock.Anything, RequestIDStr, mock.Anything, SubscriptionOwner.Hex(), SubscriptionID, mock.Anything, mock.Anything, mock.Anything).Return(ResultBytes, nil, Domains, nil) + uni.pluginORM.On("SetResult", RequestID, ResultBytes, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + close(doneCh) + }).Return(nil) + var sentMessage []byte + uni.ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + sentMessage = args[1].([]byte) + }) + + require.NoError(t, uni.service.Start(testutils.Context(t))) + <-doneCh + uni.service.Close() + + assert.NotEmpty(t, sentMessage) + var req telem.FunctionsRequest + err := proto.Unmarshal(sentMessage, &req) + assert.NoError(t, err) + assert.Equal(t, RequestIDStr, req.RequestId) + assert.EqualValues(t, Domains, req.Domains) +} + +func TestFunctionsListener_PruneRequests(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 0, 1) + doneCh := make(chan bool) + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("PruneOldestRequests", functions_service.DefaultPruneMaxStoredRequests, functions_service.DefaultPruneBatchSize, mock.Anything).Return(uint32(0), uint32(0), nil).Run(func(args mock.Arguments) { + doneCh <- true + }) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_TimeoutRequests(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + uni := NewFunctionsListenerUniverse(t, 1, 0) + doneCh := make(chan bool) + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("TimeoutExpiredResults", mock.Anything, uint32(1), mock.Anything).Return([]functions_service.RequestID{}, nil).Run(func(args mock.Arguments) { + doneCh <- true + }) + + servicetest.Run(t, uni.service) + <-doneCh +} + +func TestFunctionsListener_ORMDoesNotFreezeHandlersForever(t *testing.T) { + testutils.SkipShortDB(t) + t.Parallel() + + var ormCallExited sync.WaitGroup + ormCallExited.Add(1) + uni := NewFunctionsListenerUniverse(t, 0, 0) + request := types.OracleRequest{} + + uni.logPollerWrapper.On("LatestEvents").Return([]types.OracleRequest{request}, nil, nil).Once() + uni.logPollerWrapper.On("LatestEvents").Return(nil, nil, nil) + uni.pluginORM.On("CreateRequest", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + var queryerWrapper pg.Q + args.Get(1).(pg.QOpt)(&queryerWrapper) + <-queryerWrapper.ParentCtx.Done() + ormCallExited.Done() + }).Return(errors.New("timeout")) + + servicetest.Run(t, uni.service) + ormCallExited.Wait() // should not freeze +} diff --git a/core/services/functions/mocks/bridge_accessor.go b/core/services/functions/mocks/bridge_accessor.go new file mode 100644 index 00000000..f76b0bb2 --- /dev/null +++ b/core/services/functions/mocks/bridge_accessor.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + functions "github.com/goplugin/pluginv3.0/v2/core/services/functions" + mock "github.com/stretchr/testify/mock" +) + +// BridgeAccessor is an autogenerated mock type for the BridgeAccessor type +type BridgeAccessor struct { + mock.Mock +} + +// NewExternalAdapterClient provides a mock function with given fields: +func (_m *BridgeAccessor) NewExternalAdapterClient() (functions.ExternalAdapterClient, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NewExternalAdapterClient") + } + + var r0 functions.ExternalAdapterClient + var r1 error + if rf, ok := ret.Get(0).(func() (functions.ExternalAdapterClient, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() functions.ExternalAdapterClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(functions.ExternalAdapterClient) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewBridgeAccessor creates a new instance of BridgeAccessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeAccessor(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeAccessor { + mock := &BridgeAccessor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/functions/mocks/external_adapter_client.go b/core/services/functions/mocks/external_adapter_client.go new file mode 100644 index 00000000..b5b5bd4b --- /dev/null +++ b/core/services/functions/mocks/external_adapter_client.go @@ -0,0 +1,116 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + functions "github.com/goplugin/pluginv3.0/v2/core/services/functions" + mock "github.com/stretchr/testify/mock" +) + +// ExternalAdapterClient is an autogenerated mock type for the ExternalAdapterClient type +type ExternalAdapterClient struct { + mock.Mock +} + +// FetchEncryptedSecrets provides a mock function with given fields: ctx, encryptedSecretsUrls, requestId, jobName +func (_m *ExternalAdapterClient) FetchEncryptedSecrets(ctx context.Context, encryptedSecretsUrls []byte, requestId string, jobName string) ([]byte, []byte, error) { + ret := _m.Called(ctx, encryptedSecretsUrls, requestId, jobName) + + if len(ret) == 0 { + panic("no return value specified for FetchEncryptedSecrets") + } + + var r0 []byte + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, string, string) ([]byte, []byte, error)); ok { + return rf(ctx, encryptedSecretsUrls, requestId, jobName) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, string, string) []byte); ok { + r0 = rf(ctx, encryptedSecretsUrls, requestId, jobName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, string, string) []byte); ok { + r1 = rf(ctx, encryptedSecretsUrls, requestId, jobName) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, string, string) error); ok { + r2 = rf(ctx, encryptedSecretsUrls, requestId, jobName) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RunComputation provides a mock function with given fields: ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData +func (_m *ExternalAdapterClient) RunComputation(ctx context.Context, requestId string, jobName string, subscriptionOwner string, subscriptionId uint64, flags functions.RequestFlags, nodeProvidedSecrets string, requestData *functions.RequestData) ([]byte, []byte, []string, error) { + ret := _m.Called(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + + if len(ret) == 0 { + panic("no return value specified for RunComputation") + } + + var r0 []byte + var r1 []byte + var r2 []string + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, uint64, functions.RequestFlags, string, *functions.RequestData) ([]byte, []byte, []string, error)); ok { + return rf(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, uint64, functions.RequestFlags, string, *functions.RequestData) []byte); ok { + r0 = rf(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, uint64, functions.RequestFlags, string, *functions.RequestData) []byte); ok { + r1 = rf(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string, string, string, uint64, functions.RequestFlags, string, *functions.RequestData) []string); ok { + r2 = rf(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).([]string) + } + } + + if rf, ok := ret.Get(3).(func(context.Context, string, string, string, uint64, functions.RequestFlags, string, *functions.RequestData) error); ok { + r3 = rf(ctx, requestId, jobName, subscriptionOwner, subscriptionId, flags, nodeProvidedSecrets, requestData) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// NewExternalAdapterClient creates a new instance of ExternalAdapterClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExternalAdapterClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ExternalAdapterClient { + mock := &ExternalAdapterClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/functions/mocks/functions_listener.go b/core/services/functions/mocks/functions_listener.go new file mode 100644 index 00000000..d67bfbd5 --- /dev/null +++ b/core/services/functions/mocks/functions_listener.go @@ -0,0 +1,83 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + functions "github.com/goplugin/pluginv3.0/v2/core/services/functions" + mock "github.com/stretchr/testify/mock" +) + +// FunctionsListener is an autogenerated mock type for the FunctionsListener type +type FunctionsListener struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *FunctionsListener) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandleOffchainRequest provides a mock function with given fields: ctx, request +func (_m *FunctionsListener) HandleOffchainRequest(ctx context.Context, request *functions.OffchainRequest) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for HandleOffchainRequest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *functions.OffchainRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *FunctionsListener) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewFunctionsListener creates a new instance of FunctionsListener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFunctionsListener(t interface { + mock.TestingT + Cleanup(func()) +}) *FunctionsListener { + mock := &FunctionsListener{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/functions/mocks/offchain_transmitter.go b/core/services/functions/mocks/offchain_transmitter.go new file mode 100644 index 00000000..49dbe32c --- /dev/null +++ b/core/services/functions/mocks/offchain_transmitter.go @@ -0,0 +1,67 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + functions "github.com/goplugin/pluginv3.0/v2/core/services/functions" + mock "github.com/stretchr/testify/mock" +) + +// OffchainTransmitter is an autogenerated mock type for the OffchainTransmitter type +type OffchainTransmitter struct { + mock.Mock +} + +// ReportChannel provides a mock function with given fields: +func (_m *OffchainTransmitter) ReportChannel() chan *functions.OffchainResponse { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ReportChannel") + } + + var r0 chan *functions.OffchainResponse + if rf, ok := ret.Get(0).(func() chan *functions.OffchainResponse); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan *functions.OffchainResponse) + } + } + + return r0 +} + +// TransmitReport provides a mock function with given fields: ctx, report +func (_m *OffchainTransmitter) TransmitReport(ctx context.Context, report *functions.OffchainResponse) error { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for TransmitReport") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *functions.OffchainResponse) error); ok { + r0 = rf(ctx, report) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewOffchainTransmitter creates a new instance of OffchainTransmitter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOffchainTransmitter(t interface { + mock.TestingT + Cleanup(func()) +}) *OffchainTransmitter { + mock := &OffchainTransmitter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/functions/mocks/orm.go b/core/services/functions/mocks/orm.go new file mode 100644 index 00000000..d38a350e --- /dev/null +++ b/core/services/functions/mocks/orm.go @@ -0,0 +1,309 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + functions "github.com/goplugin/pluginv3.0/v2/core/services/functions" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + time "time" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// CreateRequest provides a mock function with given fields: request, qopts +func (_m *ORM) CreateRequest(request *functions.Request, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, request) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateRequest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*functions.Request, ...pg.QOpt) error); ok { + r0 = rf(request, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindById provides a mock function with given fields: requestID, qopts +func (_m *ORM) FindById(requestID functions.RequestID, qopts ...pg.QOpt) (*functions.Request, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, requestID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindById") + } + + var r0 *functions.Request + var r1 error + if rf, ok := ret.Get(0).(func(functions.RequestID, ...pg.QOpt) (*functions.Request, error)); ok { + return rf(requestID, qopts...) + } + if rf, ok := ret.Get(0).(func(functions.RequestID, ...pg.QOpt) *functions.Request); ok { + r0 = rf(requestID, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*functions.Request) + } + } + + if rf, ok := ret.Get(1).(func(functions.RequestID, ...pg.QOpt) error); ok { + r1 = rf(requestID, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindOldestEntriesByState provides a mock function with given fields: state, limit, qopts +func (_m *ORM) FindOldestEntriesByState(state functions.RequestState, limit uint32, qopts ...pg.QOpt) ([]functions.Request, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, state, limit) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindOldestEntriesByState") + } + + var r0 []functions.Request + var r1 error + if rf, ok := ret.Get(0).(func(functions.RequestState, uint32, ...pg.QOpt) ([]functions.Request, error)); ok { + return rf(state, limit, qopts...) + } + if rf, ok := ret.Get(0).(func(functions.RequestState, uint32, ...pg.QOpt) []functions.Request); ok { + r0 = rf(state, limit, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]functions.Request) + } + } + + if rf, ok := ret.Get(1).(func(functions.RequestState, uint32, ...pg.QOpt) error); ok { + r1 = rf(state, limit, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PruneOldestRequests provides a mock function with given fields: maxRequestsInDB, batchSize, qopts +func (_m *ORM) PruneOldestRequests(maxRequestsInDB uint32, batchSize uint32, qopts ...pg.QOpt) (uint32, uint32, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, maxRequestsInDB, batchSize) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for PruneOldestRequests") + } + + var r0 uint32 + var r1 uint32 + var r2 error + if rf, ok := ret.Get(0).(func(uint32, uint32, ...pg.QOpt) (uint32, uint32, error)); ok { + return rf(maxRequestsInDB, batchSize, qopts...) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, ...pg.QOpt) uint32); ok { + r0 = rf(maxRequestsInDB, batchSize, qopts...) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, ...pg.QOpt) uint32); ok { + r1 = rf(maxRequestsInDB, batchSize, qopts...) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(uint32, uint32, ...pg.QOpt) error); ok { + r2 = rf(maxRequestsInDB, batchSize, qopts...) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SetConfirmed provides a mock function with given fields: requestID, qopts +func (_m *ORM) SetConfirmed(requestID functions.RequestID, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, requestID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SetConfirmed") + } + + var r0 error + if rf, ok := ret.Get(0).(func(functions.RequestID, ...pg.QOpt) error); ok { + r0 = rf(requestID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetError provides a mock function with given fields: requestID, errorType, computationError, readyAt, readyForProcessing, qopts +func (_m *ORM) SetError(requestID functions.RequestID, errorType functions.ErrType, computationError []byte, readyAt time.Time, readyForProcessing bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, requestID, errorType, computationError, readyAt, readyForProcessing) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SetError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(functions.RequestID, functions.ErrType, []byte, time.Time, bool, ...pg.QOpt) error); ok { + r0 = rf(requestID, errorType, computationError, readyAt, readyForProcessing, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetFinalized provides a mock function with given fields: requestID, reportedResult, reportedError, qopts +func (_m *ORM) SetFinalized(requestID functions.RequestID, reportedResult []byte, reportedError []byte, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, requestID, reportedResult, reportedError) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SetFinalized") + } + + var r0 error + if rf, ok := ret.Get(0).(func(functions.RequestID, []byte, []byte, ...pg.QOpt) error); ok { + r0 = rf(requestID, reportedResult, reportedError, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetResult provides a mock function with given fields: requestID, computationResult, readyAt, qopts +func (_m *ORM) SetResult(requestID functions.RequestID, computationResult []byte, readyAt time.Time, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, requestID, computationResult, readyAt) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SetResult") + } + + var r0 error + if rf, ok := ret.Get(0).(func(functions.RequestID, []byte, time.Time, ...pg.QOpt) error); ok { + r0 = rf(requestID, computationResult, readyAt, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TimeoutExpiredResults provides a mock function with given fields: cutoff, limit, qopts +func (_m *ORM) TimeoutExpiredResults(cutoff time.Time, limit uint32, qopts ...pg.QOpt) ([]functions.RequestID, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, cutoff, limit) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for TimeoutExpiredResults") + } + + var r0 []functions.RequestID + var r1 error + if rf, ok := ret.Get(0).(func(time.Time, uint32, ...pg.QOpt) ([]functions.RequestID, error)); ok { + return rf(cutoff, limit, qopts...) + } + if rf, ok := ret.Get(0).(func(time.Time, uint32, ...pg.QOpt) []functions.RequestID); ok { + r0 = rf(cutoff, limit, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]functions.RequestID) + } + } + + if rf, ok := ret.Get(1).(func(time.Time, uint32, ...pg.QOpt) error); ok { + r1 = rf(cutoff, limit, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/functions/models.go b/core/services/functions/models.go new file mode 100644 index 00000000..2dadf3da --- /dev/null +++ b/core/services/functions/models.go @@ -0,0 +1,187 @@ +package functions + +import ( + "database/sql/driver" + "encoding/hex" + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" +) + +const RequestIDLength int = 32 + +type RequestID [RequestIDLength]byte +type Request struct { + RequestID RequestID + ReceivedAt time.Time + RequestTxHash *common.Hash + State RequestState + ResultReadyAt *time.Time + Result []byte + ErrorType *ErrType + Error []byte + TransmittedResult []byte + TransmittedError []byte + Flags []byte + AggregationMethod *AggregationMethod + CallbackGasLimit *uint32 + CoordinatorContractAddress *common.Address + OnchainMetadata []byte + ProcessingMetadata []byte +} + +type RequestState int8 +type AggregationMethod int8 + +const ( + // IN_PROGRESS is the initial state of a request, set right after receiving it in an on-chain event. + IN_PROGRESS RequestState = iota + + // RESULT_READY means that computation has finished executing (with either success or user error). + // OCR2 reporting includes only requests in RESULT_READY state (for Query and Observation phases). + RESULT_READY + + // TIMED_OUT request has been waiting to get confirmed on chain for too long. + // It won't be included in OCR2 reporting rounds any more. + TIMED_OUT + + // FINALIZED request is a part of a report produced by OCR2 and has now entered the transmission protocol + // (i.e. passed through ShouldAcceptFinalizedReport()). + FINALIZED + + // CONFIRMED state indicates that we received an on-chain confirmation event + // (with or without this node's participation in an earlier OCR round). + // We can transition here at any time (full fan-in) and cannot transition out (empty fan-out). + // This is a desired and expected final state for every request. + CONFIRMED +) + +/* + * +-----------+ + * +----+IN_PROGRESS+----------------+ + * | +-----+-----+ | + * | | | + * | v v + * | +------------+ +---------+ + * | |RESULT_READY+---------->|TIMED_OUT| + * | +------+-----+ +---------+ + * | | ^ + * | v | + * | +---------+ | + * +---->|FINALIZED|-----------------+ + * +---------+ + * + * \ / + * | + * v + * +---------+ + * |CONFIRMED| + * +---------+ + */ +func CheckStateTransition(prev RequestState, next RequestState) error { + sameStateError := errors.New("attempt to set the same state") + if prev == CONFIRMED { + return errors.New("cannot transition out of CONFIRMED state") + } + transitions := map[RequestState]map[RequestState]error{ + IN_PROGRESS: { + IN_PROGRESS: nil, // allowed for re-tries due to internal errors (request will stay IN_PROGRESS until processing succeeds) + RESULT_READY: nil, // computation completed (either successfully or not) + TIMED_OUT: nil, // timing out a request in progress - what happened to the computation? + FINALIZED: nil, // generated a report without this node's participation in OCR round + CONFIRMED: nil, // received an on-chain result confirmation + }, + RESULT_READY: { + IN_PROGRESS: errors.New("cannot go back from RESULT_READY to IN_PROGRESS"), + RESULT_READY: sameStateError, + TIMED_OUT: nil, // timing out a request - why was it never picked up by OCR reporting? + FINALIZED: nil, // part of an OCR report as expected + CONFIRMED: nil, // received an on-chain result confirmation + }, + TIMED_OUT: { + IN_PROGRESS: errors.New("cannot go back from TIMED_OUT to IN_PROGRESS"), + RESULT_READY: errors.New("cannot go back from TIMED_OUT to RESULT_READY"), + TIMED_OUT: sameStateError, + FINALIZED: errors.New("result already timed out but we're trying to transmit it (maybe a harmless race with the timer?)"), + CONFIRMED: nil, // received an on-chain result confirmation + }, + FINALIZED: { + IN_PROGRESS: errors.New("cannot go back from FINALIZED to IN_PROGRESS"), + RESULT_READY: errors.New("cannot go back from FINALIZED to RESULT_READY, result was already finalized by DON before this request was picked up"), + TIMED_OUT: nil, // timed out while in transmission - no reason to attempt sending it any more + FINALIZED: sameStateError, + CONFIRMED: nil, // received an on-chain result confirmation + }, + // CONFIRMED handled earlier + } + + nextMap, exists := transitions[prev] + if !exists { + return fmt.Errorf("unaccounted for state transition attempt, this should never happen (prev: %v, next: %v)", prev, next) + } + retErr, exists := nextMap[next] + if !exists { + return fmt.Errorf("unaccounted for state transition attempt, this should never happen (prev: %v, next: %v)", prev, next) + } + return retErr +} + +type ErrType int8 + +const ( + NONE ErrType = iota + // caused by internal infra problems, potentially retryable + INTERNAL_ERROR + // caused by user's code (exception, crash, timeout, ...) + USER_ERROR +) + +func (r *RequestID) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return fmt.Errorf("can't scan %T into RequestID", value) + } + if len(bytes) != RequestIDLength { + return fmt.Errorf("can't scan []byte of len %d into RequestID, want %d", len(bytes), RequestIDLength) + } + copy(r[:], bytes) + return nil +} + +func (r RequestID) Value() (driver.Value, error) { + return r[:], nil +} + +func (s RequestState) String() string { + switch s { + case IN_PROGRESS: + return "InProgress" + case RESULT_READY: + return "ResultReady" + case TIMED_OUT: + return "TimedOut" + case FINALIZED: + return "Finalized" + case CONFIRMED: + return "Confirmed" + } + return "unknown" +} + +func (e ErrType) String() string { + switch e { + case NONE: + return "None" + case INTERNAL_ERROR: + return "InternalError" + case USER_ERROR: + return "UserError" + } + return "unknown" +} + +func (r RequestID) String() string { + return hex.EncodeToString(r[:]) +} diff --git a/core/services/functions/offchain_transmitter.go b/core/services/functions/offchain_transmitter.go new file mode 100644 index 00000000..63527937 --- /dev/null +++ b/core/services/functions/offchain_transmitter.go @@ -0,0 +1,39 @@ +package functions + +import ( + "context" + + "github.com/pkg/errors" +) + +// Simple wrapper around a channel to transmit offchain reports between +// OCR plugin and Gateway connector +// +//go:generate mockery --quiet --name OffchainTransmitter --output ./mocks/ --case=underscore +type OffchainTransmitter interface { + TransmitReport(ctx context.Context, report *OffchainResponse) error + ReportChannel() chan *OffchainResponse +} + +type offchainTransmitter struct { + reportCh chan *OffchainResponse +} + +func NewOffchainTransmitter(chanSize uint32) OffchainTransmitter { + return &offchainTransmitter{ + reportCh: make(chan *OffchainResponse, chanSize), + } +} + +func (t *offchainTransmitter) TransmitReport(ctx context.Context, report *OffchainResponse) error { + select { + case t.reportCh <- report: + return nil + case <-ctx.Done(): + return errors.New("context cancelled") + } +} + +func (t *offchainTransmitter) ReportChannel() chan *OffchainResponse { + return t.reportCh +} diff --git a/core/services/functions/offchain_transmitter_test.go b/core/services/functions/offchain_transmitter_test.go new file mode 100644 index 00000000..e2448b48 --- /dev/null +++ b/core/services/functions/offchain_transmitter_test.go @@ -0,0 +1,31 @@ +package functions_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" +) + +func TestOffchainTransmitter(t *testing.T) { + t.Parallel() + + transmitter := functions.NewOffchainTransmitter(1) + ch := transmitter.ReportChannel() + report := &functions.OffchainResponse{RequestId: []byte("testID")} + ctx := testutils.Context(t) + + require.NoError(t, transmitter.TransmitReport(ctx, report)) + require.Equal(t, report, <-ch) + + require.NoError(t, transmitter.TransmitReport(ctx, report)) + + ctxTimeout, cancel := context.WithTimeout(ctx, time.Millisecond*20) + defer cancel() + // should not freeze + require.Error(t, transmitter.TransmitReport(ctxTimeout, report)) +} diff --git a/core/services/functions/orm.go b/core/services/functions/orm.go new file mode 100644 index 00000000..76e368d9 --- /dev/null +++ b/core/services/functions/orm.go @@ -0,0 +1,260 @@ +package functions + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +type ORM interface { + CreateRequest(request *Request, qopts ...pg.QOpt) error + + SetResult(requestID RequestID, computationResult []byte, readyAt time.Time, qopts ...pg.QOpt) error + SetError(requestID RequestID, errorType ErrType, computationError []byte, readyAt time.Time, readyForProcessing bool, qopts ...pg.QOpt) error + SetFinalized(requestID RequestID, reportedResult []byte, reportedError []byte, qopts ...pg.QOpt) error + SetConfirmed(requestID RequestID, qopts ...pg.QOpt) error + + TimeoutExpiredResults(cutoff time.Time, limit uint32, qopts ...pg.QOpt) ([]RequestID, error) + + FindOldestEntriesByState(state RequestState, limit uint32, qopts ...pg.QOpt) ([]Request, error) + FindById(requestID RequestID, qopts ...pg.QOpt) (*Request, error) + + PruneOldestRequests(maxRequestsInDB uint32, batchSize uint32, qopts ...pg.QOpt) (total uint32, pruned uint32, err error) +} + +type orm struct { + q pg.Q + contractAddress common.Address +} + +var _ ORM = (*orm)(nil) + +var ErrDuplicateRequestID = errors.New("Functions ORM: duplicate request ID") + +const ( + tableName = "functions_requests" + defaultInitialState = IN_PROGRESS + requestFields = "request_id, received_at, request_tx_hash, " + + "state, result_ready_at, result, error_type, error, " + + "transmitted_result, transmitted_error, flags, aggregation_method, " + + "callback_gas_limit, coordinator_contract_address, onchain_metadata, processing_metadata" +) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, contractAddress common.Address) ORM { + return &orm{ + q: pg.NewQ(db, lggr, cfg), + contractAddress: contractAddress, + } +} + +func (o *orm) CreateRequest(request *Request, qopts ...pg.QOpt) error { + stmt := fmt.Sprintf(` + INSERT INTO %s (request_id, contract_address, received_at, request_tx_hash, state, flags, aggregation_method, callback_gas_limit, coordinator_contract_address, onchain_metadata) + VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10) ON CONFLICT (request_id) DO NOTHING; + `, tableName) + result, err := o.q.WithOpts(qopts...).Exec( + stmt, + request.RequestID, + o.contractAddress, + request.ReceivedAt, + request.RequestTxHash, + defaultInitialState, + request.Flags, + request.AggregationMethod, + request.CallbackGasLimit, + request.CoordinatorContractAddress, + request.OnchainMetadata) + if err != nil { + return err + } + nrows, err := result.RowsAffected() + if err != nil { + return err + } + if nrows == 0 { + return ErrDuplicateRequestID + } + return nil +} + +func (o *orm) setWithStateTransitionCheck(requestID RequestID, newState RequestState, setter func(pg.Queryer) error, qopts ...pg.QOpt) error { + err := o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + prevState := defaultInitialState + stmt := fmt.Sprintf(`SELECT state FROM %s WHERE request_id=$1 AND contract_address=$2;`, tableName) + if err2 := tx.Get(&prevState, stmt, requestID, o.contractAddress); err2 != nil { + return err2 + } + if err2 := CheckStateTransition(prevState, newState); err2 != nil { + return err2 + } + return setter(tx) + }) + + return err +} + +func (o *orm) SetResult(requestID RequestID, computationResult []byte, readyAt time.Time, qopts ...pg.QOpt) error { + newState := RESULT_READY + err := o.setWithStateTransitionCheck(requestID, newState, func(tx pg.Queryer) error { + stmt := fmt.Sprintf(` + UPDATE %s + SET result=$3, result_ready_at=$4, state=$5 + WHERE request_id=$1 AND contract_address=$2; + `, tableName) + _, err2 := tx.Exec(stmt, requestID, o.contractAddress, computationResult, readyAt, newState) + return err2 + }, qopts...) + return err +} + +func (o *orm) SetError(requestID RequestID, errorType ErrType, computationError []byte, readyAt time.Time, readyForProcessing bool, qopts ...pg.QOpt) error { + var newState RequestState + if readyForProcessing { + newState = RESULT_READY + } else { + newState = IN_PROGRESS + } + err := o.setWithStateTransitionCheck(requestID, newState, func(tx pg.Queryer) error { + stmt := fmt.Sprintf(` + UPDATE %s + SET error=$3, error_type=$4, result_ready_at=$5, state=$6 + WHERE request_id=$1 AND contract_address=$2; + `, tableName) + _, err2 := tx.Exec(stmt, requestID, o.contractAddress, computationError, errorType, readyAt, newState) + return err2 + }, qopts...) + return err +} + +func (o *orm) SetFinalized(requestID RequestID, reportedResult []byte, reportedError []byte, qopts ...pg.QOpt) error { + newState := FINALIZED + err := o.setWithStateTransitionCheck(requestID, newState, func(tx pg.Queryer) error { + stmt := fmt.Sprintf(` + UPDATE %s + SET transmitted_result=$3, transmitted_error=$4, state=$5 + WHERE request_id=$1 AND contract_address=$2; + `, tableName) + _, err2 := tx.Exec(stmt, requestID, o.contractAddress, reportedResult, reportedError, newState) + return err2 + }, qopts...) + return err +} + +func (o *orm) SetConfirmed(requestID RequestID, qopts ...pg.QOpt) error { + newState := CONFIRMED + err := o.setWithStateTransitionCheck(requestID, newState, func(tx pg.Queryer) error { + stmt := fmt.Sprintf(`UPDATE %s SET state=$3 WHERE request_id=$1 AND contract_address=$2;`, tableName) + _, err2 := tx.Exec(stmt, requestID, o.contractAddress, newState) + return err2 + }, qopts...) + return err +} + +func (o *orm) TimeoutExpiredResults(cutoff time.Time, limit uint32, qopts ...pg.QOpt) ([]RequestID, error) { + var ids []RequestID + allowedPrevStates := []RequestState{IN_PROGRESS, RESULT_READY, FINALIZED} + nextState := TIMED_OUT + for _, state := range allowedPrevStates { + // sanity checks + if err := CheckStateTransition(state, nextState); err != nil { + return ids, err + } + } + err := o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + selectStmt := fmt.Sprintf(` + SELECT request_id + FROM %s + WHERE (state=$1 OR state=$2 OR state=$3) AND contract_address=$4 AND received_at < ($5) + ORDER BY received_at + LIMIT $6;`, tableName) + if err2 := tx.Select(&ids, selectStmt, allowedPrevStates[0], allowedPrevStates[1], allowedPrevStates[2], o.contractAddress, cutoff, limit); err2 != nil { + return err2 + } + if len(ids) == 0 { + return nil + } + + a := map[string]any{ + "nextState": nextState, + "contractAddr": o.contractAddress, + "ids": ids, + } + updateStmt, args, err2 := sqlx.Named(fmt.Sprintf(` + UPDATE %s + SET state = :nextState + WHERE contract_address = :contractAddr AND request_id IN (:ids);`, tableName), a) + if err2 != nil { + return err2 + } + updateStmt, args, err2 = sqlx.In(updateStmt, args...) + if err2 != nil { + return err2 + } + updateStmt = tx.Rebind(updateStmt) + if _, err2 := tx.Exec(updateStmt, args...); err2 != nil { + return err2 + } + return nil + }) + + return ids, err +} + +func (o *orm) FindOldestEntriesByState(state RequestState, limit uint32, qopts ...pg.QOpt) ([]Request, error) { + var requests []Request + stmt := fmt.Sprintf(`SELECT %s FROM %s WHERE state=$1 AND contract_address=$2 ORDER BY received_at LIMIT $3;`, requestFields, tableName) + if err := o.q.WithOpts(qopts...).Select(&requests, stmt, state, o.contractAddress, limit); err != nil { + return nil, err + } + return requests, nil +} + +func (o *orm) FindById(requestID RequestID, qopts ...pg.QOpt) (*Request, error) { + var request Request + stmt := fmt.Sprintf(`SELECT %s FROM %s WHERE request_id=$1 AND contract_address=$2;`, requestFields, tableName) + if err := o.q.WithOpts(qopts...).Get(&request, stmt, requestID, o.contractAddress); err != nil { + return nil, err + } + return &request, nil +} + +func (o *orm) PruneOldestRequests(maxStoredRequests uint32, batchSize uint32, qopts ...pg.QOpt) (total uint32, pruned uint32, err error) { + err = o.q.WithOpts(qopts...).Transaction(func(tx pg.Queryer) error { + stmt := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE contract_address=$1`, tableName) + if err2 := tx.Get(&total, stmt, o.contractAddress); err2 != nil { + return errors.Wrap(err, "failed to get request count") + } + + if total <= maxStoredRequests { + pruned = 0 + return nil + } + + pruneLimit := total - maxStoredRequests + if pruneLimit > batchSize { + pruneLimit = batchSize + } + + with := fmt.Sprintf(`WITH ids AS (SELECT request_id FROM %s WHERE contract_address = $1 ORDER BY received_at LIMIT $2)`, tableName) + deleteStmt := fmt.Sprintf(`%s DELETE FROM %s WHERE contract_address = $1 AND request_id IN (SELECT request_id FROM ids);`, with, tableName) + res, err2 := tx.Exec(deleteStmt, o.contractAddress, pruneLimit) + if err2 != nil { + return err2 + } + prunedInt64, err2 := res.RowsAffected() + if err2 == nil { + pruned = uint32(prunedInt64) + } + return err2 + }) + return +} diff --git a/core/services/functions/orm_test.go b/core/services/functions/orm_test.go new file mode 100644 index 00000000..f5ea4cb2 --- /dev/null +++ b/core/services/functions/orm_test.go @@ -0,0 +1,381 @@ +package functions_test + +import ( + "errors" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" +) + +var ( + defaultFlags = []byte{0x1, 0x2, 0x3} + defaultAggregationMethod = functions.AggregationMethod(65) + defaultGasLimit = uint32(100_000) + defaultCoordinatorContract = common.HexToAddress("0x0000000000000000000000000000000000000000") + defaultMetadata = []byte{0xbb} +) + +func setupORM(t *testing.T) functions.ORM { + t.Helper() + + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.TestLogger(t) + contract = testutils.NewAddress() + orm = functions.NewORM(db, lggr, pgtest.NewQConfig(true), contract) + ) + + return orm +} + +func newRequestID() functions.RequestID { + return testutils.Random32Byte() +} + +func createRequest(t *testing.T, orm functions.ORM) (functions.RequestID, common.Hash, time.Time) { + ts := time.Now().Round(time.Second) + id, hash := createRequestWithTimestamp(t, orm, ts) + return id, hash, ts +} + +func createRequestWithTimestamp(t *testing.T, orm functions.ORM, ts time.Time) (functions.RequestID, common.Hash) { + id := newRequestID() + txHash := utils.RandomHash() + newReq := &functions.Request{ + RequestID: id, + RequestTxHash: &txHash, + ReceivedAt: ts, + Flags: defaultFlags, + AggregationMethod: &defaultAggregationMethod, + CallbackGasLimit: &defaultGasLimit, + CoordinatorContractAddress: &defaultCoordinatorContract, + OnchainMetadata: defaultMetadata, + } + err := orm.CreateRequest(newReq) + require.NoError(t, err) + return id, txHash +} + +func TestORM_CreateRequestsAndFindByID(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id1, txHash1, ts1 := createRequest(t, orm) + id2, txHash2, ts2 := createRequest(t, orm) + + req1, err := orm.FindById(id1) + require.NoError(t, err) + require.Equal(t, id1, req1.RequestID) + require.Equal(t, &txHash1, req1.RequestTxHash) + require.Equal(t, ts1, req1.ReceivedAt) + require.Equal(t, functions.IN_PROGRESS, req1.State) + require.Equal(t, defaultFlags, req1.Flags) + require.Equal(t, defaultAggregationMethod, *req1.AggregationMethod) + require.Equal(t, defaultGasLimit, *req1.CallbackGasLimit) + require.Equal(t, defaultCoordinatorContract, *req1.CoordinatorContractAddress) + require.Equal(t, defaultMetadata, req1.OnchainMetadata) + + req2, err := orm.FindById(id2) + require.NoError(t, err) + require.Equal(t, id2, req2.RequestID) + require.Equal(t, &txHash2, req2.RequestTxHash) + require.Equal(t, ts2, req2.ReceivedAt) + require.Equal(t, functions.IN_PROGRESS, req2.State) + + t.Run("missing ID", func(t *testing.T) { + req, err := orm.FindById(newRequestID()) + require.Error(t, err) + require.Nil(t, req) + }) + + t.Run("duplicated", func(t *testing.T) { + newReq := &functions.Request{RequestID: id1, RequestTxHash: &txHash1, ReceivedAt: ts1} + err := orm.CreateRequest(newReq) + require.Error(t, err) + require.True(t, errors.Is(err, functions.ErrDuplicateRequestID)) + }) +} + +func TestORM_SetResult(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id, _, ts := createRequest(t, orm) + + rdts := time.Now().Round(time.Second) + err := orm.SetResult(id, []byte("result"), rdts) + require.NoError(t, err) + + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, id, req.RequestID) + require.Equal(t, ts, req.ReceivedAt) + require.NotNil(t, req.ResultReadyAt) + require.Equal(t, rdts, *req.ResultReadyAt) + require.Equal(t, functions.RESULT_READY, req.State) + require.Equal(t, []byte("result"), req.Result) +} + +func TestORM_SetError(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id, _, ts := createRequest(t, orm) + + rdts := time.Now().Round(time.Second) + err := orm.SetError(id, functions.USER_ERROR, []byte("error"), rdts, true) + require.NoError(t, err) + + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, id, req.RequestID) + require.Equal(t, ts, req.ReceivedAt) + require.NotNil(t, req.ResultReadyAt) + require.Equal(t, rdts, *req.ResultReadyAt) + require.NotNil(t, req.ErrorType) + require.Equal(t, functions.USER_ERROR, *req.ErrorType) + require.Equal(t, functions.RESULT_READY, req.State) + require.Equal(t, []byte("error"), req.Error) +} + +func TestORM_SetError_Internal(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id, _, ts := createRequest(t, orm) + + rdts := time.Now().Round(time.Second) + err := orm.SetError(id, functions.INTERNAL_ERROR, []byte("error"), rdts, false) + require.NoError(t, err) + + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, id, req.RequestID) + require.Equal(t, ts, req.ReceivedAt) + require.Equal(t, functions.INTERNAL_ERROR, *req.ErrorType) + require.Equal(t, functions.IN_PROGRESS, req.State) + require.Equal(t, []byte("error"), req.Error) +} + +func TestORM_SetFinalized(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id, _, _ := createRequest(t, orm) + + err := orm.SetFinalized(id, []byte("result"), []byte("error")) + require.NoError(t, err) + + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, []byte("result"), req.TransmittedResult) + require.Equal(t, []byte("error"), req.TransmittedError) + require.Equal(t, functions.FINALIZED, req.State) +} + +func TestORM_SetConfirmed(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + id, _, _ := createRequest(t, orm) + + err := orm.SetConfirmed(id) + require.NoError(t, err) + + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.CONFIRMED, req.State) +} + +func TestORM_StateTransitions(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + now := time.Now() + id, _ := createRequestWithTimestamp(t, orm, now) + req, err := orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.IN_PROGRESS, req.State) + + err = orm.SetResult(id, []byte{}, now) + require.NoError(t, err) + req, err = orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.RESULT_READY, req.State) + + _, err = orm.TimeoutExpiredResults(now.Add(time.Minute), 1) + require.NoError(t, err) + req, err = orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.TIMED_OUT, req.State) + + err = orm.SetFinalized(id, nil, nil) + require.Error(t, err) + req, err = orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.TIMED_OUT, req.State) + + err = orm.SetConfirmed(id) + require.NoError(t, err) + req, err = orm.FindById(id) + require.NoError(t, err) + require.Equal(t, functions.CONFIRMED, req.State) +} + +func TestORM_FindOldestEntriesByState(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + now := time.Now() + id2, _ := createRequestWithTimestamp(t, orm, now.Add(2*time.Minute)) + createRequestWithTimestamp(t, orm, now.Add(3*time.Minute)) + id1, _ := createRequestWithTimestamp(t, orm, now.Add(1*time.Minute)) + + t.Run("with limit", func(t *testing.T) { + result, err := orm.FindOldestEntriesByState(functions.IN_PROGRESS, 2) + require.NoError(t, err) + require.Equal(t, 2, len(result), "incorrect results length") + require.Equal(t, id1, result[0].RequestID, "incorrect results order") + require.Equal(t, id2, result[1].RequestID, "incorrect results order") + + require.Equal(t, defaultFlags, result[0].Flags) + require.Equal(t, defaultAggregationMethod, *result[0].AggregationMethod) + require.Equal(t, defaultGasLimit, *result[0].CallbackGasLimit) + require.Equal(t, defaultCoordinatorContract, *result[0].CoordinatorContractAddress) + require.Equal(t, defaultMetadata, result[0].OnchainMetadata) + + }) + + t.Run("with no limit", func(t *testing.T) { + result, err := orm.FindOldestEntriesByState(functions.IN_PROGRESS, 20) + require.NoError(t, err) + require.Equal(t, 3, len(result), "incorrect results length") + }) + + t.Run("no matching entries", func(t *testing.T) { + result, err := orm.FindOldestEntriesByState(functions.RESULT_READY, 10) + require.NoError(t, err) + require.Equal(t, 0, len(result), "incorrect results length") + }) +} + +func TestORM_TimeoutExpiredResults(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + now := time.Now() + var ids []functions.RequestID + for offset := -50; offset <= -10; offset += 10 { + id, _ := createRequestWithTimestamp(t, orm, now.Add(time.Duration(offset)*time.Minute)) + ids = append(ids, id) + } + // can time out IN_PROGRESS, RESULT_READY or FINALIZED + err := orm.SetResult(ids[0], []byte("result"), now) + require.NoError(t, err) + err = orm.SetFinalized(ids[1], []byte("result"), []byte("")) + require.NoError(t, err) + // can't time out CONFIRMED + err = orm.SetConfirmed(ids[2]) + require.NoError(t, err) + + results, err := orm.TimeoutExpiredResults(now.Add(-35*time.Minute), 1) + require.NoError(t, err) + require.Equal(t, 1, len(results), "not respecting limit") + require.Equal(t, ids[0], results[0], "incorrect results order") + + results, err = orm.TimeoutExpiredResults(now.Add(-15*time.Minute), 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, ids[1], results[0], "incorrect results order") + require.Equal(t, ids[3], results[1], "incorrect results order") + + results, err = orm.TimeoutExpiredResults(now.Add(-15*time.Minute), 10) + require.NoError(t, err) + require.Equal(t, 0, len(results), "not idempotent") + + expectedFinalStates := []functions.RequestState{ + functions.TIMED_OUT, + functions.TIMED_OUT, + functions.CONFIRMED, + functions.TIMED_OUT, + functions.IN_PROGRESS, + } + for i, expectedState := range expectedFinalStates { + req, err := orm.FindById(ids[i]) + require.NoError(t, err) + require.Equal(t, req.State, expectedState, "incorrect state") + } +} + +func TestORM_PruneOldestRequests(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + now := time.Now() + var ids []functions.RequestID + // store 5 requests + for offset := -50; offset <= -10; offset += 10 { + id, _ := createRequestWithTimestamp(t, orm, now.Add(time.Duration(offset)*time.Minute)) + ids = append(ids, id) + } + + // don't prune if max not hit + total, pruned, err := orm.PruneOldestRequests(6, 3) + require.NoError(t, err) + require.Equal(t, uint32(5), total) + require.Equal(t, uint32(0), pruned) + + // prune up to max batch size + total, pruned, err = orm.PruneOldestRequests(1, 2) + require.NoError(t, err) + require.Equal(t, uint32(5), total) + require.Equal(t, uint32(2), pruned) + + // prune all above the limit + total, pruned, err = orm.PruneOldestRequests(1, 20) + require.NoError(t, err) + require.Equal(t, uint32(3), total) + require.Equal(t, uint32(2), pruned) + + // no pruning needed any more + total, pruned, err = orm.PruneOldestRequests(1, 20) + require.NoError(t, err) + require.Equal(t, uint32(1), total) + require.Equal(t, uint32(0), pruned) + + // verify only the newest one is left after pruning + result, err := orm.FindOldestEntriesByState(functions.IN_PROGRESS, 20) + require.NoError(t, err) + require.Equal(t, 1, len(result), "incorrect results length") + require.Equal(t, ids[4], result[0].RequestID, "incorrect results order") +} + +func TestORM_PruneOldestRequests_Large(t *testing.T) { + t.Parallel() + + orm := setupORM(t) + now := time.Now() + // store 1000 requests + for offset := -1000; offset <= -1; offset++ { + _, _ = createRequestWithTimestamp(t, orm, now.Add(time.Duration(offset)*time.Minute)) + } + + // prune 900/1000 + total, pruned, err := orm.PruneOldestRequests(100, 1000) + require.NoError(t, err) + require.Equal(t, uint32(1000), total) + require.Equal(t, uint32(900), pruned) + + // verify there's 100 left + result, err := orm.FindOldestEntriesByState(functions.IN_PROGRESS, 200) + require.NoError(t, err) + require.Equal(t, 100, len(result), "incorrect results length") +} diff --git a/core/services/functions/request.go b/core/services/functions/request.go new file mode 100644 index 00000000..eaa92fc8 --- /dev/null +++ b/core/services/functions/request.go @@ -0,0 +1,61 @@ +package functions + +const ( + LocationInline = 0 + LocationRemote = 1 + LocationDONHosted = 2 + LanguageJavaScript = 0 + + RequestStatePending = 1 + RequestStateComplete = 2 + RequestStateInternalError = 3 +) + +type RequestFlags [32]byte + +type OffchainRequest struct { + RequestId []byte `json:"requestId"` + RequestInitiator []byte `json:"requestInitiator"` + SubscriptionId uint64 `json:"subscriptionId"` + SubscriptionOwner []byte `json:"subscriptionOwner"` + Timestamp uint64 `json:"timestamp"` + Data RequestData `json:"data"` +} + +type RequestData struct { + Source string `json:"source" cbor:"source"` + Language int `json:"language" cbor:"language"` + CodeLocation int `json:"codeLocation" cbor:"codeLocation"` + Secrets []byte `json:"secrets,omitempty" cbor:"secrets"` + SecretsLocation int `json:"secretsLocation" cbor:"secretsLocation"` + Args []string `json:"args,omitempty" cbor:"args"` + BytesArgs [][]byte `json:"bytesArgs,omitempty" cbor:"bytesArgs"` +} + +// NOTE: to be extended with raw report and signatures when needed +type OffchainResponse struct { + RequestId []byte `json:"requestId"` + Result []byte `json:"result,omitempty"` + Error []byte `json:"error,omitempty"` +} + +type HeartbeatResponse struct { + Status int `json:"status"` + InternalError string `json:"internalError,omitempty"` + ReceivedTs uint64 `json:"receivedTs"` + CompletedTs uint64 `json:"completedTs"` + Response *OffchainResponse `json:"response,omitempty"` +} + +type DONHostedSecrets struct { + SlotID uint `json:"slotId" cbor:"slotId"` + Version uint64 `json:"version" cbor:"version"` +} + +type SignedRequestData struct { + CodeLocation int `json:"codeLocation" cbor:"codeLocation"` + Language int `json:"language" cbor:"language"` + Secrets []byte `json:"secrets" cbor:"secrets"` + SecretsLocation int `json:"secretsLocation" cbor:"secretsLocation"` + Source string `json:"source" cbor:"source"` +} diff --git a/core/services/gateway/api/codec.go b/core/services/gateway/api/codec.go new file mode 100644 index 00000000..01d07e9c --- /dev/null +++ b/core/services/gateway/api/codec.go @@ -0,0 +1,14 @@ +package api + +// Codec implements (de)serialization of Message objects. +type Codec interface { + DecodeRequest(msgBytes []byte) (*Message, error) + + EncodeRequest(msg *Message) ([]byte, error) + + DecodeResponse(msgBytes []byte) (*Message, error) + + EncodeResponse(msg *Message) ([]byte, error) + + EncodeNewErrorResponse(id string, code int, message string, data []byte) ([]byte, error) +} diff --git a/core/services/gateway/api/constants.go b/core/services/gateway/api/constants.go new file mode 100644 index 00000000..c028e259 --- /dev/null +++ b/core/services/gateway/api/constants.go @@ -0,0 +1,72 @@ +package api + +type ErrorCode int + +const ( + NoError ErrorCode = iota + UserMessageParseError + UnsupportedDONIdError + HandlerError + RequestTimeoutError + NodeReponseEncodingError + FatalError +) + +func (e ErrorCode) String() string { + switch e { + case NoError: + return "NoError" + case UserMessageParseError: + return "UserMessageParseError" + case UnsupportedDONIdError: + return "UnsupportedDONIdError" + case HandlerError: + return "HandlerError" + case RequestTimeoutError: + return "RequestTimeoutError" + case NodeReponseEncodingError: + return "NodeReponseEncodingError" + case FatalError: + return "FatalError" + default: + return "UnknownError" + } +} + +// See https://www.jsonrpc.org/specification#error_object +func ToJsonRPCErrorCode(errorCode ErrorCode) int { + gatewayErrorToJsonRPCError := map[ErrorCode]int{ + NoError: 0, + UserMessageParseError: -32700, // Parse Error + UnsupportedDONIdError: -32602, // Invalid Params + HandlerError: -32600, // Invalid Request + RequestTimeoutError: -32000, // Server Error + NodeReponseEncodingError: -32603, // Internal Error + FatalError: -32000, // Server Error + } + + code, ok := gatewayErrorToJsonRPCError[errorCode] + if !ok { + return -32000 + } + return code +} + +// See https://go.dev/src/net/http/status.go +func ToHttpErrorCode(errorCode ErrorCode) int { + gatewayErrorToHttpError := map[ErrorCode]int{ + NoError: 200, // OK + UserMessageParseError: 400, // Bad Request + UnsupportedDONIdError: 400, // Bad Request + HandlerError: 400, // Bad Request + RequestTimeoutError: 504, // Gateway Timeout + NodeReponseEncodingError: 500, // Internal Server Error + FatalError: 500, // Internal Server Error + } + + code, ok := gatewayErrorToHttpError[errorCode] + if !ok { + return 500 + } + return code +} diff --git a/core/services/gateway/api/jsonrpccodec.go b/core/services/gateway/api/jsonrpccodec.go new file mode 100644 index 00000000..e570941a --- /dev/null +++ b/core/services/gateway/api/jsonrpccodec.go @@ -0,0 +1,101 @@ +package api + +import ( + "encoding/json" + "errors" + "fmt" +) + +// Wrapping/unwrapping Message objects into JSON RPC ones folllowing https://www.jsonrpc.org/specification +type JsonRPCRequest struct { + Version string `json:"jsonrpc"` + Id string `json:"id"` + Method string `json:"method"` + Params *Message `json:"params,omitempty"` +} + +type JsonRPCResponse struct { + Version string `json:"jsonrpc"` + Id string `json:"id"` + Result *Message `json:"result,omitempty"` + Error *JsonRPCError `json:"error,omitempty"` +} + +// JSON-RPC error can only be sent to users. It is not used for messages between Gateways and Nodes. +type JsonRPCError struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data,omitempty"` +} + +type JsonRPCCodec struct { +} + +var _ Codec = (*JsonRPCCodec)(nil) + +func (*JsonRPCCodec) DecodeRequest(msgBytes []byte) (*Message, error) { + var request JsonRPCRequest + err := json.Unmarshal(msgBytes, &request) + if err != nil { + return nil, err + } + if request.Version != "2.0" { + return nil, errors.New("incorrect jsonrpc version") + } + if request.Method == "" { + return nil, errors.New("empty method field") + } + if request.Params == nil { + return nil, errors.New("missing params attribute") + } + request.Params.Body.MessageId = request.Id + request.Params.Body.Method = request.Method + return request.Params, nil +} + +func (*JsonRPCCodec) EncodeRequest(msg *Message) ([]byte, error) { + request := JsonRPCRequest{ + Version: "2.0", + Id: msg.Body.MessageId, + Method: msg.Body.Method, + Params: msg, + } + return json.Marshal(request) +} + +func (*JsonRPCCodec) DecodeResponse(msgBytes []byte) (*Message, error) { + var response JsonRPCResponse + err := json.Unmarshal(msgBytes, &response) + if err != nil { + return nil, err + } + if response.Error != nil { + return nil, fmt.Errorf("received non-empty error field: %v", response.Error) + } + if response.Result != nil { + response.Result.Body.MessageId = response.Id + } + return response.Result, nil +} + +func (*JsonRPCCodec) EncodeResponse(msg *Message) ([]byte, error) { + response := JsonRPCResponse{ + Version: "2.0", + Id: msg.Body.MessageId, + Result: msg, + } + return json.Marshal(response) +} + +func (*JsonRPCCodec) EncodeNewErrorResponse(id string, code int, message string, data []byte) ([]byte, error) { + response := JsonRPCResponse{ + Version: "2.0", + Id: id, + Error: &JsonRPCError{ + Code: code, + Message: message, + Data: data, + }, + } + return json.Marshal(response) +} diff --git a/core/services/gateway/api/jsonrpccodec_test.go b/core/services/gateway/api/jsonrpccodec_test.go new file mode 100644 index 00000000..ae4342ea --- /dev/null +++ b/core/services/gateway/api/jsonrpccodec_test.go @@ -0,0 +1,91 @@ +package api_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" +) + +func TestJsonRPCRequest_Decode_Correct(t *testing.T) { + t.Parallel() + + input := []byte(`{"jsonrpc": "2.0", "id": "aa-bb", "method": "upload", "params": {"body":{"don_id": "functions_local", "payload": {"field": 123}}}}`) + codec := api.JsonRPCCodec{} + msg, err := codec.DecodeRequest(input) + require.NoError(t, err) + require.Equal(t, "functions_local", msg.Body.DonId) + require.Equal(t, "aa-bb", msg.Body.MessageId) + require.Equal(t, "upload", msg.Body.Method) + require.NotEmpty(t, msg.Body.Payload) +} + +func TestJsonRPCRequest_Decode_Incorrect(t *testing.T) { + t.Parallel() + + testCases := map[string]string{ + "missing params": `{"jsonrpc": "2.0", "id": "abc", "method": "upload"}`, + "numeric id": `{"jsonrpc": "2.0", "id": 123, "method": "upload", "params": {}}`, + "empty method": `{"jsonrpc": "2.0", "id": "abc", "method": "", "params": {}}`, + "incorrect rpc version": `{"jsonrpc": "5.1", "id": "abc", "method": "upload", "params": {}}`, + } + + codec := api.JsonRPCCodec{} + for _, input := range testCases { + _, err := codec.DecodeRequest([]byte(input)) + require.Error(t, err) + } +} + +func TestJsonRPCRequest_Encode(t *testing.T) { + t.Parallel() + + var msg api.Message + msg.Body = api.MessageBody{ + MessageId: "aA-bB", + Receiver: "0x1234", + Method: "upload", + } + codec := api.JsonRPCCodec{} + bytes, err := codec.EncodeRequest(&msg) + require.NoError(t, err) + + decoded, err := codec.DecodeRequest(bytes) + require.NoError(t, err) + require.Equal(t, "aA-bB", decoded.Body.MessageId) + require.Equal(t, "0x1234", decoded.Body.Receiver) + require.Equal(t, "upload", decoded.Body.Method) +} + +func TestJsonRPCResponse_Decode(t *testing.T) { + t.Parallel() + + input := []byte(`{"jsonrpc": "2.0", "id": "aa-bb", "result": {"body": {"don_id": "functions_local", "payload": {"field": 123}}}}`) + codec := api.JsonRPCCodec{} + msg, err := codec.DecodeResponse(input) + require.NoError(t, err) + require.Equal(t, "functions_local", msg.Body.DonId) + require.Equal(t, "aa-bb", msg.Body.MessageId) + require.NotEmpty(t, msg.Body.Payload) +} + +func TestJsonRPCResponse_Encode(t *testing.T) { + t.Parallel() + + var msg api.Message + msg.Body = api.MessageBody{ + MessageId: "aA-bB", + Receiver: "0x1234", + Method: "upload", + } + codec := api.JsonRPCCodec{} + bytes, err := codec.EncodeResponse(&msg) + require.NoError(t, err) + + decoded, err := codec.DecodeResponse(bytes) + require.NoError(t, err) + require.Equal(t, "aA-bB", decoded.Body.MessageId) + require.Equal(t, "0x1234", decoded.Body.Receiver) + require.Equal(t, "upload", decoded.Body.Method) +} diff --git a/core/services/gateway/api/message.go b/core/services/gateway/api/message.go new file mode 100644 index 00000000..d185b53a --- /dev/null +++ b/core/services/gateway/api/message.go @@ -0,0 +1,128 @@ +package api + +import ( + "crypto/ecdsa" + "encoding/json" + "errors" + "strings" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + gw_common "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + MessageSignatureLen = 65 + MessageSignatureHexEncodedLen = 2 + 2*MessageSignatureLen + MessageIdMaxLen = 128 + MessageMethodMaxLen = 64 + MessageDonIdMaxLen = 64 + MessageReceiverLen = 2 + 2*20 + NullChar = "\x00" +) + +/* + * Top-level Message structure containing: + * - universal fields identifying the request, the sender and the target DON/service + * - product-specific payload + * + * Signature, Receiver and Sender are hex-encoded with a "0x" prefix. + */ +type Message struct { + Signature string `json:"signature"` + Body MessageBody `json:"body"` +} + +type MessageBody struct { + MessageId string `json:"message_id"` + Method string `json:"method"` + DonId string `json:"don_id"` + Receiver string `json:"receiver"` + // Service-specific payload, decoded inside the Handler. + Payload json.RawMessage `json:"payload,omitempty"` + + // Fields only used locally for convenience. Not serialized. + Sender string `json:"-"` +} + +func (m *Message) Validate() error { + if m == nil { + return errors.New("nil message") + } + if len(m.Signature) != MessageSignatureHexEncodedLen { + return errors.New("invalid hex-encoded signature length") + } + if len(m.Body.MessageId) == 0 || len(m.Body.MessageId) > MessageIdMaxLen { + return errors.New("invalid message ID length") + } + if strings.HasSuffix(m.Body.MessageId, NullChar) { + return errors.New("message ID ending with null bytes") + } + if len(m.Body.Method) == 0 || len(m.Body.Method) > MessageMethodMaxLen { + return errors.New("invalid method name length") + } + if strings.HasSuffix(m.Body.Method, NullChar) { + return errors.New("method name ending with null bytes") + } + if len(m.Body.DonId) == 0 || len(m.Body.DonId) > MessageDonIdMaxLen { + return errors.New("invalid DON ID length") + } + if strings.HasSuffix(m.Body.DonId, NullChar) { + return errors.New("DON ID ending with null bytes") + } + if len(m.Body.Receiver) != 0 && len(m.Body.Receiver) != MessageReceiverLen { + return errors.New("invalid Receiver length") + } + signerBytes, err := m.ExtractSigner() + if err != nil { + return err + } + m.Body.Sender = utils.StringToHex(string(signerBytes)) + return nil +} + +// Message signatures are over the following data: +// 1. MessageId aligned to 128 bytes +// 2. Method aligned to 64 bytes +// 3. DonId aligned to 64 bytes +// 4. Receiver (in hex) aligned to 42 bytes +// 5. Payload (raw bytes before parsing) +func (m *Message) Sign(privateKey *ecdsa.PrivateKey) error { + if m == nil { + return errors.New("nil message") + } + rawData := getRawMessageBody(&m.Body) + signature, err := gw_common.SignData(privateKey, rawData...) + if err != nil { + return err + } + m.Signature = utils.StringToHex(string(signature)) + m.Body.Sender = strings.ToLower(crypto.PubkeyToAddress(privateKey.PublicKey).Hex()) + return nil +} + +func (m *Message) ExtractSigner() (signerAddress []byte, err error) { + if m == nil { + return nil, errors.New("nil message") + } + rawData := getRawMessageBody(&m.Body) + signatureBytes, err := hex.DecodeString(m.Signature) + if err != nil { + return nil, err + } + return gw_common.ExtractSigner(signatureBytes, rawData...) +} + +func getRawMessageBody(msgBody *MessageBody) [][]byte { + alignedMessageId := make([]byte, MessageIdMaxLen) + copy(alignedMessageId, msgBody.MessageId) + alignedMethod := make([]byte, MessageMethodMaxLen) + copy(alignedMethod, msgBody.Method) + alignedDonId := make([]byte, MessageDonIdMaxLen) + copy(alignedDonId, msgBody.DonId) + alignedReceiver := make([]byte, MessageReceiverLen) + copy(alignedReceiver, msgBody.Receiver) + return [][]byte{alignedMessageId, alignedMethod, alignedDonId, alignedReceiver, msgBody.Payload} +} diff --git a/core/services/gateway/api/message_test.go b/core/services/gateway/api/message_test.go new file mode 100644 index 00000000..9a4c23f4 --- /dev/null +++ b/core/services/gateway/api/message_test.go @@ -0,0 +1,103 @@ +package api_test + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" +) + +func TestMessage_Validate(t *testing.T) { + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: "abcd", + Method: "request", + DonId: "donA", + Receiver: "0x0000000000000000000000000000000000000000", + Payload: []byte("datadata"), + }, + } + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + err = msg.Sign(privateKey) + require.NoError(t, err) + + // valid + require.NoError(t, msg.Validate()) + + // missing message ID + msg.Body.MessageId = "" + require.Error(t, msg.Validate()) + // message ID ending with null bytes + msg.Body.MessageId = "myid\x00\x00" + require.Error(t, msg.Validate()) + msg.Body.MessageId = "abcd" + require.NoError(t, msg.Validate()) + + // missing DON ID + msg.Body.DonId = "" + require.Error(t, msg.Validate()) + // DON ID ending with null bytes + msg.Body.DonId = "mydon\x00\x00" + require.Error(t, msg.Validate()) + msg.Body.DonId = "donA" + require.NoError(t, msg.Validate()) + + // method name too long + msg.Body.Method = string(bytes.Repeat([]byte("a"), api.MessageMethodMaxLen+1)) + require.Error(t, msg.Validate()) + // empty method name + msg.Body.Method = "" + require.Error(t, msg.Validate()) + // method name ending with null bytes + msg.Body.Method = "method\x00" + require.Error(t, msg.Validate()) + msg.Body.Method = "request" + require.NoError(t, msg.Validate()) + + // incorrect receiver + msg.Body.Receiver = "blah" + require.Error(t, msg.Validate()) + msg.Body.Receiver = "0x0000000000000000000000000000000000000000" + require.NoError(t, msg.Validate()) + + // invalid signature + msg.Signature = "0x00" + require.Error(t, msg.Validate()) +} + +func TestMessage_MessageSignAndValidateSignature(t *testing.T) { + t.Parallel() + + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: "abcd", + Method: "request", + DonId: "donA", + Receiver: "0x33", + Payload: []byte("datadata"), + }, + } + + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + address := crypto.PubkeyToAddress(privateKey.PublicKey).Bytes() + + err = msg.Sign(privateKey) + require.NoError(t, err) + require.Equal(t, api.MessageSignatureHexEncodedLen, len(msg.Signature)) + + // valid + signer, err := msg.ExtractSigner() + require.NoError(t, err) + require.True(t, bytes.Equal(address, signer)) + + // invalid + msg.Body.MessageId = "dbca" + signer, err = msg.ExtractSigner() + require.NoError(t, err) + require.False(t, bytes.Equal(address, signer)) +} diff --git a/core/services/gateway/common/testutils.go b/core/services/gateway/common/testutils.go new file mode 100644 index 00000000..b903eeef --- /dev/null +++ b/core/services/gateway/common/testutils.go @@ -0,0 +1,26 @@ +package common + +import ( + "crypto/ecdsa" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +type TestNode struct { + Address string + PrivateKey *ecdsa.PrivateKey +} + +func NewTestNodes(t *testing.T, n int) []TestNode { + nodes := make([]TestNode, n) + for i := 0; i < n; i++ { + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + address := strings.ToLower(crypto.PubkeyToAddress(privateKey.PublicKey).Hex()) + nodes[i] = TestNode{Address: address, PrivateKey: privateKey} + } + return nodes +} diff --git a/core/services/gateway/common/utils.go b/core/services/gateway/common/utils.go new file mode 100644 index 00000000..74e2583c --- /dev/null +++ b/core/services/gateway/common/utils.go @@ -0,0 +1,54 @@ +package common + +import ( + "crypto/ecdsa" + "encoding/binary" + "slices" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func Uint32ToBytes(val uint32) []byte { + result := make([]byte, 4) + binary.BigEndian.PutUint32(result, val) + return result +} + +func BytesToUint32(data []byte) uint32 { + return binary.BigEndian.Uint32(data) +} + +// input string can't have any 0x0 characters +func StringToAlignedBytes(input string, size int) []byte { + aligned := make([]byte, size) + copy(aligned, input) + return aligned +} + +func AlignedBytesToString(data []byte) string { + idx := slices.IndexFunc(data, func(b byte) bool { return b == 0 }) + if idx == -1 { + return string(data) + } + return string(data[:idx]) +} + +func flatten(data ...[]byte) []byte { + var result []byte + for _, d := range data { + result = append(result, d...) + } + return result +} + +func SignData(privateKey *ecdsa.PrivateKey, data ...[]byte) ([]byte, error) { + return utils.GenerateEthSignature(privateKey, flatten(data...)) +} + +func ExtractSigner(signature []byte, data ...[]byte) (signerAddress []byte, err error) { + addr, err := utils.GetSignersEthAddress(flatten(data...), signature) + if err != nil { + return nil, err + } + return addr.Bytes(), nil +} diff --git a/core/services/gateway/common/utils_test.go b/core/services/gateway/common/utils_test.go new file mode 100644 index 00000000..677d6310 --- /dev/null +++ b/core/services/gateway/common/utils_test.go @@ -0,0 +1,65 @@ +package common_test + +import ( + "bytes" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" +) + +func TestUtils_BytesUint32Conversions(t *testing.T) { + t.Parallel() + + val := uint32(time.Now().Unix()) + data := common.Uint32ToBytes(val) + require.Equal(t, val, common.BytesToUint32(data)) +} + +func TestUtils_StringAlignedBytesConversions(t *testing.T) { + t.Parallel() + + val := "my_string" + data := common.StringToAlignedBytes(val, 40) + require.Equal(t, val, common.AlignedBytesToString(data)) + + val = "0123456789" + data = common.StringToAlignedBytes(val, 10) + require.Equal(t, val, common.AlignedBytesToString(data)) + + val = "世界" + data = common.StringToAlignedBytes(val, 40) + require.Equal(t, val, common.AlignedBytesToString(data)) +} + +func TestUtils_BytesSignAndValidate(t *testing.T) { + t.Parallel() + + data := []byte("data_data") + incorrectData := []byte("some_other_data") + + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + address := crypto.PubkeyToAddress(privateKey.PublicKey).Bytes() + + signature, err := common.SignData(privateKey, data) + require.NoError(t, err) + require.Equal(t, 65, len(signature)) + + // valid + signer, err := common.ExtractSigner(signature, data) + require.NoError(t, err) + require.True(t, bytes.Equal(signer, address)) + + // invalid + signer, err = common.ExtractSigner(signature, incorrectData) + require.NoError(t, err) + require.False(t, bytes.Equal(signer, address)) + + // invalid format + _, err = common.ExtractSigner([]byte{0xaa, 0xbb}, data) + require.Error(t, err) +} diff --git a/core/services/gateway/config/config.go b/core/services/gateway/config/config.go new file mode 100644 index 00000000..ed2f1839 --- /dev/null +++ b/core/services/gateway/config/config.go @@ -0,0 +1,34 @@ +package config + +import ( + "encoding/json" + + gw_net "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" +) + +type GatewayConfig struct { + UserServerConfig gw_net.HTTPServerConfig + NodeServerConfig gw_net.WebSocketServerConfig + ConnectionManagerConfig ConnectionManagerConfig + Dons []DONConfig +} + +type ConnectionManagerConfig struct { + AuthGatewayId string + AuthTimestampToleranceSec uint32 + AuthChallengeLen uint32 + HeartbeatIntervalSec uint32 +} + +type DONConfig struct { + DonId string + HandlerName string + HandlerConfig json.RawMessage + Members []NodeConfig + F int +} + +type NodeConfig struct { + Name string + Address string +} diff --git a/core/services/gateway/connectionmanager.go b/core/services/gateway/connectionmanager.go new file mode 100644 index 00000000..13ab0b90 --- /dev/null +++ b/core/services/gateway/connectionmanager.go @@ -0,0 +1,337 @@ +package gateway + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/jonboulle/clockwork" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +var promKeepalivesSent = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "gateway_keepalives_sent", + Help: "Metric to track the number of successful keepalive ping messages per DON", +}, []string{"don_id"}) + +// ConnectionManager holds all connections between Gateway and Nodes. +type ConnectionManager interface { + job.ServiceCtx + network.ConnectionAcceptor + + DONConnectionManager(donId string) *donConnectionManager + GetPort() int +} + +type connectionManager struct { + services.StateMachine + + config *config.ConnectionManagerConfig + dons map[string]*donConnectionManager + wsServer network.WebSocketServer + clock clockwork.Clock + connAttempts map[string]*connAttempt + connAttemptCounter uint64 + connAttemptsMu sync.Mutex + lggr logger.Logger +} + +func (m *connectionManager) HealthReport() map[string]error { + hr := map[string]error{m.Name(): m.Healthy()} + for _, d := range m.dons { + for _, n := range d.nodes { + services.CopyHealth(hr, n.conn.HealthReport()) + } + } + return hr +} + +func (m *connectionManager) Name() string { return m.lggr.Name() } + +type donConnectionManager struct { + donConfig *config.DONConfig + nodes map[string]*nodeState + handler handlers.Handler + codec api.Codec + closeWait sync.WaitGroup + shutdownCh services.StopChan + lggr logger.Logger +} + +type nodeState struct { + name string + conn network.WSConnectionWrapper +} + +// immutable +type connAttempt struct { + nodeState *nodeState + nodeAddress string + challenge network.ChallengeElems + timestamp uint32 +} + +func NewConnectionManager(gwConfig *config.GatewayConfig, clock clockwork.Clock, lggr logger.Logger) (ConnectionManager, error) { + codec := &api.JsonRPCCodec{} + dons := make(map[string]*donConnectionManager) + for _, donConfig := range gwConfig.Dons { + donConfig := donConfig + if donConfig.DonId == "" { + return nil, errors.New("empty DON ID") + } + _, ok := dons[donConfig.DonId] + if ok { + return nil, fmt.Errorf("duplicate DON ID %s", donConfig.DonId) + } + nodes := make(map[string]*nodeState) + for _, nodeConfig := range donConfig.Members { + nodeAddress := strings.ToLower(nodeConfig.Address) + _, ok := nodes[nodeAddress] + if ok { + return nil, fmt.Errorf("duplicate node address %s in DON %s", nodeAddress, donConfig.DonId) + } + connWrapper := network.NewWSConnectionWrapper(lggr) + if connWrapper == nil { + return nil, fmt.Errorf("error creating WSConnectionWrapper for node %s", nodeAddress) + } + nodes[nodeAddress] = &nodeState{ + name: nodeConfig.Name, + conn: connWrapper, + } + } + dons[donConfig.DonId] = &donConnectionManager{ + donConfig: &donConfig, + codec: codec, + nodes: nodes, + shutdownCh: make(chan struct{}), + lggr: lggr.Named("DONConnectionManager." + donConfig.DonId), + } + } + connMgr := &connectionManager{ + config: &gwConfig.ConnectionManagerConfig, + dons: dons, + connAttempts: make(map[string]*connAttempt), + clock: clock, + lggr: lggr.Named("ConnectionManager"), + } + wsServer := network.NewWebSocketServer(&gwConfig.NodeServerConfig, connMgr, lggr) + connMgr.wsServer = wsServer + return connMgr, nil +} + +func (m *connectionManager) DONConnectionManager(donId string) *donConnectionManager { + return m.dons[donId] +} + +func (m *connectionManager) Start(ctx context.Context) error { + return m.StartOnce("ConnectionManager", func() error { + m.lggr.Info("starting connection manager") + for _, donConnMgr := range m.dons { + donConnMgr.closeWait.Add(len(donConnMgr.nodes)) + for nodeAddress, nodeState := range donConnMgr.nodes { + if err := nodeState.conn.Start(ctx); err != nil { + return err + } + go donConnMgr.readLoop(nodeAddress, nodeState) + } + donConnMgr.closeWait.Add(1) + go donConnMgr.keepaliveLoop(m.config.HeartbeatIntervalSec) + } + return m.wsServer.Start(ctx) + }) +} + +func (m *connectionManager) Close() error { + return m.StopOnce("ConnectionManager", func() (err error) { + m.lggr.Info("closing connection manager") + err = multierr.Combine(err, m.wsServer.Close()) + for _, donConnMgr := range m.dons { + close(donConnMgr.shutdownCh) + for _, nodeState := range donConnMgr.nodes { + nodeState.conn.Close() + } + } + for _, donConnMgr := range m.dons { + donConnMgr.closeWait.Wait() + } + return + }) +} + +func (m *connectionManager) StartHandshake(authHeader []byte) (attemptId string, challenge []byte, err error) { + m.lggr.Debug("StartHandshake") + authHeaderElems, signer, err := network.UnpackSignedAuthHeader(authHeader) + if err != nil { + return "", nil, multierr.Append(network.ErrAuthHeaderParse, err) + } + nodeAddress := "0x" + hex.EncodeToString(signer) + donConnMgr, ok := m.dons[authHeaderElems.DonId] + if !ok { + return "", nil, network.ErrAuthInvalidDonId + } + nodeState, ok := donConnMgr.nodes[nodeAddress] + if !ok { + return "", nil, network.ErrAuthInvalidNode + } + if authHeaderElems.GatewayId != m.config.AuthGatewayId { + return "", nil, network.ErrAuthInvalidGateway + } + nowTs := uint32(m.clock.Now().Unix()) + ts := authHeaderElems.Timestamp + if ts < nowTs-m.config.AuthTimestampToleranceSec || nowTs+m.config.AuthTimestampToleranceSec < ts { + return "", nil, network.ErrAuthInvalidTimestamp + } + attemptId, challenge, err = m.newAttempt(nodeState, nodeAddress, ts) + if err != nil { + return "", nil, err + } + return attemptId, challenge, nil +} + +func (m *connectionManager) newAttempt(nodeSt *nodeState, nodeAddress string, timestamp uint32) (string, []byte, error) { + challengeBytes := make([]byte, m.config.AuthChallengeLen) + _, err := rand.Read(challengeBytes) + if err != nil { + return "", nil, err + } + challenge := network.ChallengeElems{Timestamp: timestamp, GatewayId: m.config.AuthGatewayId, ChallengeBytes: challengeBytes} + m.connAttemptsMu.Lock() + defer m.connAttemptsMu.Unlock() + m.connAttemptCounter++ + newId := fmt.Sprintf("%s_%d", nodeAddress, m.connAttemptCounter) + m.connAttempts[newId] = &connAttempt{nodeState: nodeSt, nodeAddress: nodeAddress, challenge: challenge, timestamp: timestamp} + return newId, network.PackChallenge(&challenge), nil +} + +func (m *connectionManager) FinalizeHandshake(attemptId string, response []byte, conn *websocket.Conn) error { + m.lggr.Debugw("FinalizeHandshake", "attemptId", attemptId) + m.connAttemptsMu.Lock() + attempt, ok := m.connAttempts[attemptId] + delete(m.connAttempts, attemptId) + m.connAttemptsMu.Unlock() + if !ok { + return network.ErrChallengeAttemptNotFound + } + signer, err := common.ExtractSigner(response, network.PackChallenge(&attempt.challenge)) + if err != nil || attempt.nodeAddress != "0x"+hex.EncodeToString(signer) { + return network.ErrChallengeInvalidSignature + } + if conn != nil { + conn.SetPongHandler(func(data string) error { + m.lggr.Debugw("received keepalive pong from node", "nodeAddress", attempt.nodeAddress) + return nil + }) + } + attempt.nodeState.conn.Reset(conn) + m.lggr.Infof("node %s connected", attempt.nodeAddress) + return nil +} + +func (m *connectionManager) AbortHandshake(attemptId string) { + m.lggr.Debugw("AbortHandshake", "attemptId", attemptId) + m.connAttemptsMu.Lock() + defer m.connAttemptsMu.Unlock() + delete(m.connAttempts, attemptId) +} + +func (m *connectionManager) GetPort() int { + return m.wsServer.GetPort() +} + +func (m *donConnectionManager) SetHandler(handler handlers.Handler) { + m.handler = handler +} + +func (m *donConnectionManager) SendToNode(ctx context.Context, nodeAddress string, msg *api.Message) error { + if msg == nil { + return errors.New("nil message") + } + data, err := m.codec.EncodeRequest(msg) + if err != nil { + return fmt.Errorf("error encoding request for node %s: %v", nodeAddress, err) + } + nodeState := m.nodes[nodeAddress] + if nodeState == nil { + return fmt.Errorf("node %s not found", nodeAddress) + } + return nodeState.conn.Write(ctx, websocket.BinaryMessage, data) +} + +func (m *donConnectionManager) readLoop(nodeAddress string, nodeState *nodeState) { + ctx, _ := m.shutdownCh.NewCtx() + for { + select { + case <-m.shutdownCh: + m.closeWait.Done() + return + case item := <-nodeState.conn.ReadChannel(): + msg, err := m.codec.DecodeResponse(item.Data) + if err != nil { + m.lggr.Errorw("parse error when reading from node", "nodeAddress", nodeAddress, "err", err) + break + } + if err = msg.Validate(); err != nil { + m.lggr.Errorw("message validation error when reading from node", "nodeAddress", nodeAddress, "err", err) + break + } + if msg.Body.Sender != nodeAddress { + m.lggr.Errorw("message sender mismatch when reading from node", "nodeAddress", nodeAddress, "sender", msg.Body.Sender) + break + } + err = m.handler.HandleNodeMessage(ctx, msg, nodeAddress) + if err != nil { + m.lggr.Error("error when calling HandleNodeMessage ", err) + } + } + } +} + +func (m *donConnectionManager) keepaliveLoop(intervalSec uint32) { + ctx, _ := m.shutdownCh.NewCtx() + defer m.closeWait.Done() + + if intervalSec == 0 { + m.lggr.Errorw("keepalive interval is 0, keepalive disabled", "donID", m.donConfig.DonId) + return + } + m.lggr.Infow("starting keepalive loop", "donID", m.donConfig.DonId) + + keepaliveTicker := time.NewTicker(time.Duration(intervalSec) * time.Second) + defer keepaliveTicker.Stop() + + for { + select { + case <-m.shutdownCh: + return + case <-keepaliveTicker.C: + errorCount := 0 + for nodeAddress, nodeState := range m.nodes { + err := nodeState.conn.Write(ctx, websocket.PingMessage, []byte{}) + if err != nil { + m.lggr.Debugw("unable to send keepalive ping to node", "nodeAddress", nodeAddress, "name", nodeState.name, "donID", m.donConfig.DonId, "err", err) + errorCount++ + } + } + promKeepalivesSent.WithLabelValues(m.donConfig.DonId).Set(float64(len(m.nodes) - errorCount)) + m.lggr.Infow("sent keepalive pings to nodes", "donID", m.donConfig.DonId, "errCount", errorCount) + } + } +} diff --git a/core/services/gateway/connectionmanager_test.go b/core/services/gateway/connectionmanager_test.go new file mode 100644 index 00000000..d4e5d2c5 --- /dev/null +++ b/core/services/gateway/connectionmanager_test.go @@ -0,0 +1,244 @@ +package gateway_test + +import ( + "crypto/ecdsa" + "fmt" + "testing" + + "github.com/jonboulle/clockwork" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + gc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" +) + +const defaultConfig = ` +[nodeServerConfig] +Path = "/node" + +[[dons]] +DonId = "my_don_1" +HandlerName = "dummy" + +[[dons.members]] +Name = "example_node" +Address = "0x68902D681C28119F9B2531473A417088BF008E59" + +[[dons]] +DonId = "my_don_2" +HandlerName = "dummy" + +[[dons.members]] +Name = "example_node" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" +` + +func TestConnectionManager_NewConnectionManager_ValidConfig(t *testing.T) { + t.Parallel() + + tomlConfig := parseTOMLConfig(t, defaultConfig) + + _, err := gateway.NewConnectionManager(tomlConfig, clockwork.NewFakeClock(), logger.TestLogger(t)) + require.NoError(t, err) +} + +func TestConnectionManager_NewConnectionManager_InvalidConfig(t *testing.T) { + t.Parallel() + + invalidCases := map[string]string{ + "duplicate DON ID": ` +[[dons]] +DonId = "my_don" +[[dons]] +DonId = "my_don" +`, + "duplicate node address": ` +[[dons]] +DonId = "my_don" +[[dons.members]] +Name = "node_1" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" +[[dons.members]] +Name = "node_2" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" +`, + "duplicate node address with different casing": ` +[[dons]] +DonId = "my_don" +[[dons.members]] +Name = "node_1" +Address = "0x68902d681c28119f9b2531473a417088bf008e59" +[[dons.members]] +Name = "node_2" +Address = "0x68902D681c28119f9b2531473a417088bf008E59" +`, + } + + for name, config := range invalidCases { + config := config + t.Run(name, func(t *testing.T) { + fullConfig := ` +[nodeServerConfig] +Path = "/node"` + config + _, err := gateway.NewConnectionManager(parseTOMLConfig(t, fullConfig), clockwork.NewFakeClock(), logger.TestLogger(t)) + require.Error(t, err) + }) + } +} + +func newTestConfig(t *testing.T, nNodes int) (*config.GatewayConfig, []gc.TestNode) { + nodes := gc.NewTestNodes(t, nNodes) + + config := ` +[nodeServerConfig] +Path = "/node" +[connectionManagerConfig] +AuthGatewayId = "my_gateway_no_3" +AuthTimestampToleranceSec = 5 +AuthChallengeLen = 100 +[[dons]] +DonId = "my_don_1" +HandlerName = "dummy" +` + + for i := 0; i < nNodes; i++ { + config += `[[dons.members]]` + "\n" + config += fmt.Sprintf(`Name = "node_%d"`, i) + "\n" + config += fmt.Sprintf(`Address = "%s"`, nodes[i].Address) + "\n" + } + + return parseTOMLConfig(t, config), nodes +} + +func signAndPackAuthHeader(t *testing.T, authHeaderElems *network.AuthHeaderElems, signerKey *ecdsa.PrivateKey) []byte { + packedElems := network.PackAuthHeader(authHeaderElems) + signature, err := gc.SignData(signerKey, packedElems) + require.NoError(t, err) + return append(packedElems, signature...) +} + +func TestConnectionManager_StartHandshake(t *testing.T) { + t.Parallel() + + config, nodes := newTestConfig(t, 4) + unrelatedNode := gc.NewTestNodes(t, 1)[0] + clock := clockwork.NewFakeClock() + mgr, err := gateway.NewConnectionManager(config, clock, logger.TestLogger(t)) + require.NoError(t, err) + + authHeaderElems := network.AuthHeaderElems{ + Timestamp: uint32(clock.Now().Unix()), + DonId: "my_don_1", + GatewayId: "my_gateway_no_3", + } + + // valid + _, _, err = mgr.StartHandshake(signAndPackAuthHeader(t, &authHeaderElems, nodes[0].PrivateKey)) + require.NoError(t, err) + + // header too short + _, _, err = mgr.StartHandshake([]byte("ab")) + require.ErrorIs(t, err, network.ErrAuthHeaderParse) + + // invalid DON ID + badAuthHeaderElems := authHeaderElems + badAuthHeaderElems.DonId = "my_don_2" + _, _, err = mgr.StartHandshake(signAndPackAuthHeader(t, &badAuthHeaderElems, nodes[0].PrivateKey)) + require.ErrorIs(t, err, network.ErrAuthInvalidDonId) + + // invalid Gateway URL + badAuthHeaderElems = authHeaderElems + badAuthHeaderElems.GatewayId = "www.example.com" + _, _, err = mgr.StartHandshake(signAndPackAuthHeader(t, &badAuthHeaderElems, nodes[0].PrivateKey)) + require.ErrorIs(t, err, network.ErrAuthInvalidGateway) + + // invalid Signer Address + badAuthHeaderElems = authHeaderElems + _, _, err = mgr.StartHandshake(signAndPackAuthHeader(t, &badAuthHeaderElems, unrelatedNode.PrivateKey)) + require.ErrorIs(t, err, network.ErrAuthInvalidNode) + + // invalid signature + badAuthHeaderElems = authHeaderElems + rawHeader := signAndPackAuthHeader(t, &badAuthHeaderElems, nodes[0].PrivateKey) + copy(rawHeader[len(rawHeader)-65:], make([]byte, 65)) + _, _, err = mgr.StartHandshake(rawHeader) + require.ErrorIs(t, err, network.ErrAuthHeaderParse) + + // invalid timestamp + badAuthHeaderElems = authHeaderElems + badAuthHeaderElems.Timestamp -= 10 + _, _, err = mgr.StartHandshake(signAndPackAuthHeader(t, &badAuthHeaderElems, nodes[0].PrivateKey)) + require.ErrorIs(t, err, network.ErrAuthInvalidTimestamp) +} + +func TestConnectionManager_FinalizeHandshake(t *testing.T) { + t.Parallel() + + config, nodes := newTestConfig(t, 4) + clock := clockwork.NewFakeClock() + mgr, err := gateway.NewConnectionManager(config, clock, logger.TestLogger(t)) + require.NoError(t, err) + + authHeaderElems := network.AuthHeaderElems{ + Timestamp: uint32(clock.Now().Unix()), + DonId: "my_don_1", + GatewayId: "my_gateway_no_3", + } + + // correct + attemptId, challenge, err := mgr.StartHandshake(signAndPackAuthHeader(t, &authHeaderElems, nodes[0].PrivateKey)) + require.NoError(t, err) + response, err := gc.SignData(nodes[0].PrivateKey, challenge) + require.NoError(t, err) + require.NoError(t, mgr.FinalizeHandshake(attemptId, response, nil)) + + // invalid attempt + err = mgr.FinalizeHandshake("fake_attempt", response, nil) + require.ErrorIs(t, err, network.ErrChallengeAttemptNotFound) + + // invalid signature + attemptId, challenge, err = mgr.StartHandshake(signAndPackAuthHeader(t, &authHeaderElems, nodes[0].PrivateKey)) + require.NoError(t, err) + response, err = gc.SignData(nodes[1].PrivateKey, challenge) + require.NoError(t, err) + err = mgr.FinalizeHandshake(attemptId, response, nil) + require.ErrorIs(t, err, network.ErrChallengeInvalidSignature) +} + +func TestConnectionManager_SendToNode_Failures(t *testing.T) { + t.Parallel() + + config, nodes := newTestConfig(t, 2) + clock := clockwork.NewFakeClock() + mgr, err := gateway.NewConnectionManager(config, clock, logger.TestLogger(t)) + require.NoError(t, err) + + donMgr := mgr.DONConnectionManager("my_don_1") + err = donMgr.SendToNode(testutils.Context(t), nodes[0].Address, nil) + require.Error(t, err) + + message := &api.Message{} + err = donMgr.SendToNode(testutils.Context(t), "some_other_node", message) + require.Error(t, err) +} + +func TestConnectionManager_CleanStartClose(t *testing.T) { + t.Parallel() + + config, _ := newTestConfig(t, 2) + config.ConnectionManagerConfig.HeartbeatIntervalSec = 1 + clock := clockwork.NewFakeClock() + mgr, err := gateway.NewConnectionManager(config, clock, logger.TestLogger(t)) + require.NoError(t, err) + + err = mgr.Start(testutils.Context(t)) + require.NoError(t, err) + + err = mgr.Close() + require.NoError(t, err) +} diff --git a/core/services/gateway/connector/config.go b/core/services/gateway/connector/config.go new file mode 100644 index 00000000..f8e68775 --- /dev/null +++ b/core/services/gateway/connector/config.go @@ -0,0 +1,19 @@ +package connector + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" +) + +type ConnectorConfig struct { + NodeAddress string + DonId string + Gateways []ConnectorGatewayConfig + WsClientConfig network.WebSocketClientConfig + AuthMinChallengeLen int + AuthTimestampToleranceSec uint32 +} + +type ConnectorGatewayConfig struct { + Id string + URL string +} diff --git a/core/services/gateway/connector/connector.go b/core/services/gateway/connector/connector.go new file mode 100644 index 00000000..ab4d40d3 --- /dev/null +++ b/core/services/gateway/connector/connector.go @@ -0,0 +1,265 @@ +package connector + +import ( + "context" + "errors" + "fmt" + "net/url" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/jonboulle/clockwork" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/hex" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +//go:generate mockery --quiet --name GatewayConnector --output ./mocks/ --case=underscore +//go:generate mockery --quiet --name Signer --output ./mocks/ --case=underscore +//go:generate mockery --quiet --name GatewayConnectorHandler --output ./mocks/ --case=underscore + +// GatewayConnector is a component run by Nodes to connect to a set of Gateways. +type GatewayConnector interface { + job.ServiceCtx + network.ConnectionInitiator + + SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error +} + +// Signer implementation needs to be provided by a GatewayConnector user (node) +// in order to sign handshake messages with node's private key. +type Signer interface { + // Sign keccak256 hash of data. + Sign(data ...[]byte) ([]byte, error) +} + +// GatewayConnector user (node) implements application logic in the Handler interface. +type GatewayConnectorHandler interface { + job.ServiceCtx + + HandleGatewayMessage(ctx context.Context, gatewayId string, msg *api.Message) +} + +type gatewayConnector struct { + services.StateMachine + + config *ConnectorConfig + codec api.Codec + clock clockwork.Clock + nodeAddress []byte + signer Signer + handler GatewayConnectorHandler + gateways map[string]*gatewayState + urlToId map[string]string + closeWait sync.WaitGroup + shutdownCh services.StopChan + lggr logger.Logger +} + +func (c *gatewayConnector) HealthReport() map[string]error { + m := map[string]error{c.Name(): c.Healthy()} + for _, g := range c.gateways { + services.CopyHealth(m, g.conn.HealthReport()) + } + return m +} + +func (c *gatewayConnector) Name() string { return c.lggr.Name() } + +type gatewayState struct { + conn network.WSConnectionWrapper + config ConnectorGatewayConfig + url *url.URL + wsClient network.WebSocketClient +} + +func NewGatewayConnector(config *ConnectorConfig, signer Signer, handler GatewayConnectorHandler, clock clockwork.Clock, lggr logger.Logger) (GatewayConnector, error) { + if config == nil || signer == nil || handler == nil || clock == nil || lggr == nil { + return nil, errors.New("nil dependency") + } + if len(config.DonId) == 0 || len(config.DonId) > network.HandshakeDonIdLen { + return nil, errors.New("invalid DON ID") + } + addressBytes, err := hex.DecodeString(config.NodeAddress) + if err != nil { + return nil, err + } + connector := &gatewayConnector{ + config: config, + codec: &api.JsonRPCCodec{}, + clock: clock, + nodeAddress: addressBytes, + signer: signer, + handler: handler, + shutdownCh: make(chan struct{}), + lggr: lggr.Named("GatewayConnector"), + } + gateways := make(map[string]*gatewayState) + urlToId := make(map[string]string) + for _, gw := range config.Gateways { + gw := gw + if _, exists := gateways[gw.Id]; exists { + return nil, fmt.Errorf("duplicate Gateway ID %s", gw.Id) + } + if _, exists := urlToId[gw.URL]; exists { + return nil, fmt.Errorf("duplicate Gateway URL %s", gw.URL) + } + parsedURL, err := url.Parse(gw.URL) + if err != nil { + return nil, err + } + gateway := &gatewayState{ + conn: network.NewWSConnectionWrapper(lggr), + config: gw, + url: parsedURL, + wsClient: network.NewWebSocketClient(config.WsClientConfig, connector, lggr), + } + gateways[gw.Id] = gateway + urlToId[gw.URL] = gw.Id + } + connector.gateways = gateways + connector.urlToId = urlToId + return connector, nil +} + +func (c *gatewayConnector) SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error { + data, err := c.codec.EncodeResponse(msg) + if err != nil { + return fmt.Errorf("error encoding response for gateway %s: %v", gatewayId, err) + } + gateway, ok := c.gateways[gatewayId] + if !ok { + return fmt.Errorf("invalid Gateway ID %s", gatewayId) + } + if gateway.conn == nil { + return fmt.Errorf("connector not started") + } + return gateway.conn.Write(ctx, websocket.BinaryMessage, data) +} + +func (c *gatewayConnector) readLoop(gatewayState *gatewayState) { + ctx, cancel := c.shutdownCh.NewCtx() + defer cancel() + + for { + select { + case <-c.shutdownCh: + c.closeWait.Done() + return + case item := <-gatewayState.conn.ReadChannel(): + msg, err := c.codec.DecodeRequest(item.Data) + if err != nil { + c.lggr.Errorw("parse error when reading from Gateway", "id", gatewayState.config.Id, "err", err) + break + } + if err = msg.Validate(); err != nil { + c.lggr.Errorw("failed to validate message signature", "id", gatewayState.config.Id, "err", err) + break + } + c.handler.HandleGatewayMessage(ctx, gatewayState.config.Id, msg) + } + } +} + +func (c *gatewayConnector) reconnectLoop(gatewayState *gatewayState) { + redialBackoff := utils.NewRedialBackoff() + ctx, cancel := c.shutdownCh.NewCtx() + defer cancel() + + for { + conn, err := gatewayState.wsClient.Connect(ctx, gatewayState.url) + if err != nil { + c.lggr.Errorw("connection error", "url", gatewayState.url, "err", err) + } else { + c.lggr.Infow("connected successfully", "url", gatewayState.url) + closeCh := gatewayState.conn.Reset(conn) + <-closeCh + c.lggr.Infow("connection closed", "url", gatewayState.url) + // reset backoff + redialBackoff = utils.NewRedialBackoff() + } + select { + case <-c.shutdownCh: + c.closeWait.Done() + return + case <-time.After(redialBackoff.Duration()): + c.lggr.Info("reconnecting ...") + } + } +} + +func (c *gatewayConnector) Start(ctx context.Context) error { + return c.StartOnce("GatewayConnector", func() error { + c.lggr.Info("starting gateway connector") + if err := c.handler.Start(ctx); err != nil { + return err + } + for _, gatewayState := range c.gateways { + gatewayState := gatewayState + if err := gatewayState.conn.Start(ctx); err != nil { + return err + } + c.closeWait.Add(2) + go c.readLoop(gatewayState) + go c.reconnectLoop(gatewayState) + } + return nil + }) +} + +func (c *gatewayConnector) Close() error { + return c.StopOnce("GatewayConnector", func() (err error) { + c.lggr.Info("closing gateway connector") + close(c.shutdownCh) + for _, gatewayState := range c.gateways { + gatewayState.conn.Close() + } + c.closeWait.Wait() + return c.handler.Close() + }) +} + +func (c *gatewayConnector) NewAuthHeader(url *url.URL) ([]byte, error) { + gatewayId, found := c.urlToId[url.String()] + if !found { + return nil, network.ErrAuthInvalidGateway + } + authHeaderElems := &network.AuthHeaderElems{ + Timestamp: uint32(c.clock.Now().Unix()), + DonId: c.config.DonId, + GatewayId: gatewayId, + } + packedElems := network.PackAuthHeader(authHeaderElems) + signature, err := c.signer.Sign(packedElems) + if err != nil { + return nil, err + } + return append(packedElems, signature...), nil +} + +func (c *gatewayConnector) ChallengeResponse(url *url.URL, challenge []byte) ([]byte, error) { + challengeElems, err := network.UnpackChallenge(challenge) + if err != nil { + return nil, err + } + if len(challengeElems.ChallengeBytes) < c.config.AuthMinChallengeLen { + return nil, network.ErrChallengeTooShort + } + gatewayId, found := c.urlToId[url.String()] + if !found || challengeElems.GatewayId != gatewayId { + return nil, network.ErrAuthInvalidGateway + } + nowTs := uint32(c.clock.Now().Unix()) + ts := challengeElems.Timestamp + if ts < nowTs-c.config.AuthTimestampToleranceSec || nowTs+c.config.AuthTimestampToleranceSec < ts { + return nil, network.ErrAuthInvalidTimestamp + } + return c.signer.Sign(challenge) +} diff --git a/core/services/gateway/connector/connector_test.go b/core/services/gateway/connector/connector_test.go new file mode 100644 index 00000000..a965ee42 --- /dev/null +++ b/core/services/gateway/connector/connector_test.go @@ -0,0 +1,193 @@ +package connector_test + +import ( + "errors" + "net/url" + "testing" + "time" + + "github.com/jonboulle/clockwork" + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" +) + +const defaultConfig = ` +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "example_don" +AuthMinChallengeLen = 10 +AuthTimestampToleranceSec = 5 + +[[Gateways]] +Id = "example_gateway" +URL = "ws://localhost:8081/node" + +[[Gateways]] +Id = "another_one" +URL = "wss://example.com:8090/node_endpoint" +` + +func parseTOMLConfig(t *testing.T, tomlConfig string) *connector.ConnectorConfig { + var cfg connector.ConnectorConfig + err := toml.Unmarshal([]byte(tomlConfig), &cfg) + require.NoError(t, err) + return &cfg +} + +func newTestConnector(t *testing.T, config *connector.ConnectorConfig, now time.Time) (connector.GatewayConnector, *mocks.Signer, *mocks.GatewayConnectorHandler) { + signer := mocks.NewSigner(t) + handler := mocks.NewGatewayConnectorHandler(t) + clock := clockwork.NewFakeClock() + connector, err := connector.NewGatewayConnector(config, signer, handler, clock, logger.TestLogger(t)) + require.NoError(t, err) + return connector, signer, handler +} + +func TestGatewayConnector_NewGatewayConnector_ValidConfig(t *testing.T) { + t.Parallel() + + tomlConfig := parseTOMLConfig(t, ` +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "example_don" + +[[Gateways]] +Id = "example_gateway" +URL = "ws://localhost:8081/node" +`) + + newTestConnector(t, tomlConfig, time.Now()) +} + +func TestGatewayConnector_NewGatewayConnector_InvalidConfig(t *testing.T) { + t.Parallel() + + invalidCases := map[string]string{ + "invalid DON ID": ` +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "" +`, + "invalid node address": ` +NodeAddress = "2531473a417088bf008e59" +DonId = "example_don" +`, + "duplicate gateway ID": ` +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "example_don" + +[[Gateways]] +Id = "example_gateway" +URL = "ws://localhost:8081/a" + +[[Gateways]] +Id = "example_gateway" +URL = "ws://localhost:8081/b" +`, + "duplicate gateway URL": ` +NodeAddress = "0x68902d681c28119f9b2531473a417088bf008e59" +DonId = "example_don" + +[[Gateways]] +Id = "gateway_A" +URL = "ws://localhost:8081/node" + +[[Gateways]] +Id = "gateway_B" +URL = "ws://localhost:8081/node" +`, + } + + signer := mocks.NewSigner(t) + handler := mocks.NewGatewayConnectorHandler(t) + clock := clockwork.NewFakeClock() + for name, config := range invalidCases { + config := config + t.Run(name, func(t *testing.T) { + _, err := connector.NewGatewayConnector(parseTOMLConfig(t, config), signer, handler, clock, logger.TestLogger(t)) + require.Error(t, err) + }) + } +} + +func TestGatewayConnector_CleanStartAndClose(t *testing.T) { + t.Parallel() + + connector, signer, handler := newTestConnector(t, parseTOMLConfig(t, defaultConfig), time.Now()) + handler.On("Start", mock.Anything).Return(nil) + handler.On("Close").Return(nil) + signer.On("Sign", mock.Anything).Return(nil, errors.New("cannot sign")) + servicetest.Run(t, connector) +} + +func TestGatewayConnector_NewAuthHeader_SignerError(t *testing.T) { + t.Parallel() + + connector, signer, _ := newTestConnector(t, parseTOMLConfig(t, defaultConfig), time.Now()) + signer.On("Sign", mock.Anything).Return(nil, errors.New("cannot sign")) + + url, err := url.Parse("ws://localhost:8081/node") + require.NoError(t, err) + _, err = connector.NewAuthHeader(url) + require.Error(t, err) +} + +func TestGatewayConnector_NewAuthHeader_Success(t *testing.T) { + t.Parallel() + + testSignature := make([]byte, network.HandshakeSignatureLen) + testSignature[1] = 0xfa + connector, signer, _ := newTestConnector(t, parseTOMLConfig(t, defaultConfig), time.Now()) + signer.On("Sign", mock.Anything).Return(testSignature, nil) + url, err := url.Parse("ws://localhost:8081/node") + require.NoError(t, err) + + header, err := connector.NewAuthHeader(url) + require.NoError(t, err) + require.Equal(t, testSignature, header[len(header)-65:]) +} + +func TestGatewayConnector_ChallengeResponse(t *testing.T) { + t.Parallel() + + testSignature := make([]byte, network.HandshakeSignatureLen) + testSignature[1] = 0xfa + now := time.Now() + connector, signer, _ := newTestConnector(t, parseTOMLConfig(t, defaultConfig), now) + signer.On("Sign", mock.Anything).Return(testSignature, nil) + url, err := url.Parse("ws://localhost:8081/node") + require.NoError(t, err) + + challenge := network.ChallengeElems{ + Timestamp: uint32(now.Unix()), + GatewayId: "example_gateway", + ChallengeBytes: []byte("1234567890"), + } + + // valid + signature, err := connector.ChallengeResponse(url, network.PackChallenge(&challenge)) + require.NoError(t, err) + require.Equal(t, testSignature, signature) + + // invalid timestamp + badChallenge := challenge + badChallenge.Timestamp += 100 + _, err = connector.ChallengeResponse(url, network.PackChallenge(&badChallenge)) + require.Equal(t, network.ErrAuthInvalidTimestamp, err) + + // too short + badChallenge = challenge + badChallenge.ChallengeBytes = []byte("aabb") + _, err = connector.ChallengeResponse(url, network.PackChallenge(&badChallenge)) + require.Equal(t, network.ErrChallengeTooShort, err) + + // invalid GatewayId + badChallenge = challenge + badChallenge.GatewayId = "wrong" + _, err = connector.ChallengeResponse(url, network.PackChallenge(&badChallenge)) + require.Equal(t, network.ErrAuthInvalidGateway, err) +} diff --git a/core/services/gateway/connector/mocks/gateway_connector.go b/core/services/gateway/connector/mocks/gateway_connector.go new file mode 100644 index 00000000..1b39f2e6 --- /dev/null +++ b/core/services/gateway/connector/mocks/gateway_connector.go @@ -0,0 +1,146 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + api "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + + context "context" + + mock "github.com/stretchr/testify/mock" + + url "net/url" +) + +// GatewayConnector is an autogenerated mock type for the GatewayConnector type +type GatewayConnector struct { + mock.Mock +} + +// ChallengeResponse provides a mock function with given fields: _a0, challenge +func (_m *GatewayConnector) ChallengeResponse(_a0 *url.URL, challenge []byte) ([]byte, error) { + ret := _m.Called(_a0, challenge) + + if len(ret) == 0 { + panic("no return value specified for ChallengeResponse") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*url.URL, []byte) ([]byte, error)); ok { + return rf(_a0, challenge) + } + if rf, ok := ret.Get(0).(func(*url.URL, []byte) []byte); ok { + r0 = rf(_a0, challenge) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*url.URL, []byte) error); ok { + r1 = rf(_a0, challenge) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Close provides a mock function with given fields: +func (_m *GatewayConnector) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAuthHeader provides a mock function with given fields: _a0 +func (_m *GatewayConnector) NewAuthHeader(_a0 *url.URL) ([]byte, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for NewAuthHeader") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*url.URL) ([]byte, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(*url.URL) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*url.URL) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendToGateway provides a mock function with given fields: ctx, gatewayId, msg +func (_m *GatewayConnector) SendToGateway(ctx context.Context, gatewayId string, msg *api.Message) error { + ret := _m.Called(ctx, gatewayId, msg) + + if len(ret) == 0 { + panic("no return value specified for SendToGateway") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *api.Message) error); ok { + r0 = rf(ctx, gatewayId, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *GatewayConnector) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewGatewayConnector creates a new instance of GatewayConnector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGatewayConnector(t interface { + mock.TestingT + Cleanup(func()) +}) *GatewayConnector { + mock := &GatewayConnector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/connector/mocks/gateway_connector_handler.go b/core/services/gateway/connector/mocks/gateway_connector_handler.go new file mode 100644 index 00000000..b7d85197 --- /dev/null +++ b/core/services/gateway/connector/mocks/gateway_connector_handler.go @@ -0,0 +1,71 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + api "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// GatewayConnectorHandler is an autogenerated mock type for the GatewayConnectorHandler type +type GatewayConnectorHandler struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *GatewayConnectorHandler) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandleGatewayMessage provides a mock function with given fields: ctx, gatewayId, msg +func (_m *GatewayConnectorHandler) HandleGatewayMessage(ctx context.Context, gatewayId string, msg *api.Message) { + _m.Called(ctx, gatewayId, msg) +} + +// Start provides a mock function with given fields: _a0 +func (_m *GatewayConnectorHandler) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewGatewayConnectorHandler creates a new instance of GatewayConnectorHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGatewayConnectorHandler(t interface { + mock.TestingT + Cleanup(func()) +}) *GatewayConnectorHandler { + mock := &GatewayConnectorHandler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/connector/mocks/signer.go b/core/services/gateway/connector/mocks/signer.go new file mode 100644 index 00000000..18c7186f --- /dev/null +++ b/core/services/gateway/connector/mocks/signer.go @@ -0,0 +1,60 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Signer is an autogenerated mock type for the Signer type +type Signer struct { + mock.Mock +} + +// Sign provides a mock function with given fields: data +func (_m *Signer) Sign(data ...[]byte) ([]byte, error) { + _va := make([]interface{}, len(data)) + for _i := range data { + _va[_i] = data[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Sign") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(...[]byte) ([]byte, error)); ok { + return rf(data...) + } + if rf, ok := ret.Get(0).(func(...[]byte) []byte); ok { + r0 = rf(data...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(...[]byte) error); ok { + r1 = rf(data...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSigner(t interface { + mock.TestingT + Cleanup(func()) +}) *Signer { + mock := &Signer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/delegate.go b/core/services/gateway/delegate.go new file mode 100644 index 00000000..37472118 --- /dev/null +++ b/core/services/gateway/delegate.go @@ -0,0 +1,93 @@ +package gateway + +import ( + "encoding/json" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type Delegate struct { + legacyChains legacyevm.LegacyChainContainer + ks keystore.Eth + db *sqlx.DB + cfg pg.QConfig + lggr logger.Logger +} + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate(legacyChains legacyevm.LegacyChainContainer, ks keystore.Eth, db *sqlx.DB, cfg pg.QConfig, lggr logger.Logger) *Delegate { + return &Delegate{ + legacyChains: legacyChains, + ks: ks, + db: db, + cfg: cfg, + lggr: lggr, + } +} + +func (d *Delegate) JobType() job.Type { + return job.Gateway +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec returns the scheduler to be used for running observer jobs +func (d *Delegate) ServicesForSpec(spec job.Job) (services []job.ServiceCtx, err error) { + if spec.GatewaySpec == nil { + return nil, errors.Errorf("services.Delegate expects a *jobSpec.GatewaySpec to be present, got %v", spec) + } + + var gatewayConfig config.GatewayConfig + err2 := json.Unmarshal(spec.GatewaySpec.GatewayConfig.Bytes(), &gatewayConfig) + if err2 != nil { + return nil, errors.Wrap(err2, "unmarshal gateway config") + } + handlerFactory := NewHandlerFactory(d.legacyChains, d.db, d.cfg, d.lggr) + gateway, err := NewGatewayFromConfig(&gatewayConfig, handlerFactory, d.lggr) + if err != nil { + return nil, err + } + + return []job.ServiceCtx{gateway}, nil +} + +func ValidatedGatewaySpec(tomlString string) (job.Job, error) { + var jb = job.Job{ExternalJobID: uuid.New()} + + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "toml error on load") + } + + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on spec") + } + + var spec job.GatewaySpec + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + + jb.GatewaySpec = &spec + if jb.Type != job.Gateway { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + return jb, nil +} diff --git a/core/services/gateway/delegate_test.go b/core/services/gateway/delegate_test.go new file mode 100644 index 00000000..132fc4d6 --- /dev/null +++ b/core/services/gateway/delegate_test.go @@ -0,0 +1,57 @@ +package gateway_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" +) + +func TestDelegate_JobSpecValidator(t *testing.T) { + t.Parallel() + + var tt = []struct { + name string + toml string + valid bool + }{ + { + "valid spec", + ` +type = "gateway" +schemaVersion = 1 +name = "The Best Gateway Job Ever!" +[gatewayConfig.NodeServerConfig] +Port = 666 +`, + true, + }, + { + "parse error", + ` +cantparsethis{{{{ +`, + false, + }, + { + "invalid job type", + ` +type = "gatez wayz" +schemaVersion = 1 +`, + false, + }, + } + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + _, err := gateway.ValidatedGatewaySpec(tc.toml) + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/core/services/gateway/gateway.go b/core/services/gateway/gateway.go new file mode 100644 index 00000000..7a58deb8 --- /dev/null +++ b/core/services/gateway/gateway.go @@ -0,0 +1,190 @@ +package gateway + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "go.uber.org/multierr" + + "github.com/ethereum/go-ethereum/common" + "github.com/jonboulle/clockwork" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + gw_net "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +var promRequest = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_request", + Help: "Metric to track received requests and response codes", +}, []string{"response_code"}) + +type Gateway interface { + job.ServiceCtx + gw_net.HTTPRequestHandler + + GetUserPort() int + GetNodePort() int +} + +type HandlerType = string + +type HandlerFactory interface { + NewHandler(handlerType HandlerType, handlerConfig json.RawMessage, donConfig *config.DONConfig, don handlers.DON) (handlers.Handler, error) +} + +type gateway struct { + services.StateMachine + + codec api.Codec + httpServer gw_net.HttpServer + handlers map[string]handlers.Handler + connMgr ConnectionManager + lggr logger.Logger +} + +func NewGatewayFromConfig(config *config.GatewayConfig, handlerFactory HandlerFactory, lggr logger.Logger) (Gateway, error) { + codec := &api.JsonRPCCodec{} + httpServer := gw_net.NewHttpServer(&config.UserServerConfig, lggr) + connMgr, err := NewConnectionManager(config, clockwork.NewRealClock(), lggr) + if err != nil { + return nil, err + } + + handlerMap := make(map[string]handlers.Handler) + for _, donConfig := range config.Dons { + donConfig := donConfig + _, ok := handlerMap[donConfig.DonId] + if ok { + return nil, fmt.Errorf("duplicate DON ID %s", donConfig.DonId) + } + donConnMgr := connMgr.DONConnectionManager(donConfig.DonId) + if donConnMgr == nil { + return nil, fmt.Errorf("connection manager ID %s not found", donConfig.DonId) + } + for idx, nodeConfig := range donConfig.Members { + donConfig.Members[idx].Address = strings.ToLower(nodeConfig.Address) + if !common.IsHexAddress(nodeConfig.Address) { + return nil, fmt.Errorf("invalid node address %s", nodeConfig.Address) + } + } + handler, err := handlerFactory.NewHandler(donConfig.HandlerName, donConfig.HandlerConfig, &donConfig, donConnMgr) + if err != nil { + return nil, err + } + handlerMap[donConfig.DonId] = handler + donConnMgr.SetHandler(handler) + } + return NewGateway(codec, httpServer, handlerMap, connMgr, lggr), nil +} + +func NewGateway(codec api.Codec, httpServer gw_net.HttpServer, handlers map[string]handlers.Handler, connMgr ConnectionManager, lggr logger.Logger) Gateway { + gw := &gateway{ + codec: codec, + httpServer: httpServer, + handlers: handlers, + connMgr: connMgr, + lggr: lggr.Named("Gateway"), + } + httpServer.SetHTTPRequestHandler(gw) + return gw +} + +func (g *gateway) Start(ctx context.Context) error { + return g.StartOnce("Gateway", func() error { + g.lggr.Info("starting gateway") + for _, handler := range g.handlers { + if err := handler.Start(ctx); err != nil { + return err + } + } + if err := g.connMgr.Start(ctx); err != nil { + return err + } + return g.httpServer.Start(ctx) + }) +} + +func (g *gateway) Close() error { + return g.StopOnce("Gateway", func() (err error) { + g.lggr.Info("closing gateway") + err = multierr.Combine(err, g.httpServer.Close()) + err = multierr.Combine(err, g.connMgr.Close()) + for _, handler := range g.handlers { + err = multierr.Combine(err, handler.Close()) + } + return + }) +} + +// Called by the server +func (g *gateway) ProcessRequest(ctx context.Context, rawRequest []byte) (rawResponse []byte, httpStatusCode int) { + // decode + msg, err := g.codec.DecodeRequest(rawRequest) + if err != nil { + return newError(g.codec, "", api.UserMessageParseError, err.Error()) + } + if msg == nil { + return newError(g.codec, "", api.UserMessageParseError, "nil message") + } + if err = msg.Validate(); err != nil { + return newError(g.codec, msg.Body.MessageId, api.UserMessageParseError, err.Error()) + } + // find correct handler + handler, ok := g.handlers[msg.Body.DonId] + if !ok { + return newError(g.codec, msg.Body.MessageId, api.UnsupportedDONIdError, "unsupported DON ID") + } + // send to the handler + responseCh := make(chan handlers.UserCallbackPayload, 1) + err = handler.HandleUserMessage(ctx, msg, responseCh) + if err != nil { + return newError(g.codec, msg.Body.MessageId, api.HandlerError, err.Error()) + } + // await response + var response handlers.UserCallbackPayload + select { + case <-ctx.Done(): + return newError(g.codec, msg.Body.MessageId, api.RequestTimeoutError, "handler timeout") + case response = <-responseCh: + break + } + if response.ErrCode != api.NoError { + return newError(g.codec, msg.Body.MessageId, response.ErrCode, response.ErrMsg) + } + // encode + rawResponse, err = g.codec.EncodeResponse(response.Msg) + if err != nil { + return newError(g.codec, msg.Body.MessageId, api.NodeReponseEncodingError, "") + } + promRequest.WithLabelValues(api.NoError.String()).Inc() + return rawResponse, api.ToHttpErrorCode(api.NoError) +} + +func newError(codec api.Codec, id string, errCode api.ErrorCode, errMsg string) ([]byte, int) { + rawResponse, err := codec.EncodeNewErrorResponse(id, api.ToJsonRPCErrorCode(errCode), errMsg, nil) + if err != nil { + // we're not even able to encode a valid JSON response + promRequest.WithLabelValues(api.FatalError.String()).Inc() + return []byte("fatal error"), api.ToHttpErrorCode(api.FatalError) + } + promRequest.WithLabelValues(errCode.String()).Inc() + return rawResponse, api.ToHttpErrorCode(errCode) +} + +func (g *gateway) GetUserPort() int { + return g.httpServer.GetPort() +} + +func (g *gateway) GetNodePort() int { + return g.connMgr.GetPort() +} diff --git a/core/services/gateway/gateway_test.go b/core/services/gateway/gateway_test.go new file mode 100644 index 00000000..59d362f3 --- /dev/null +++ b/core/services/gateway/gateway_test.go @@ -0,0 +1,247 @@ +package gateway_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + handler_mocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/mocks" + net_mocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network/mocks" +) + +func parseTOMLConfig(t *testing.T, tomlConfig string) *config.GatewayConfig { + var cfg config.GatewayConfig + err := toml.Unmarshal([]byte(tomlConfig), &cfg) + require.NoError(t, err) + return &cfg +} + +func buildConfig(toAppend string) string { + return ` + [userServerConfig] + Path = "/user" + [nodeServerConfig] + Path = "/node" + ` + toAppend +} + +func TestGateway_NewGatewayFromConfig_ValidConfig(t *testing.T) { + t.Parallel() + + tomlConfig := buildConfig(` +[[dons]] +DonId = "my_don_1" +HandlerName = "dummy" + +[[dons]] +DonId = "my_don_2" +HandlerName = "dummy" + +[[dons.Members]] +Name = "node one" +Address = "0x0001020304050607080900010203040506070809" +`) + + lggr := logger.TestLogger(t) + _, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, tomlConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.NoError(t, err) +} + +func TestGateway_NewGatewayFromConfig_DuplicateID(t *testing.T) { + t.Parallel() + + tomlConfig := buildConfig(` +[[dons]] +DonId = "my_don" +HandlerName = "dummy" + +[[dons]] +DonId = "my_don" +HandlerName = "dummy" +`) + + lggr := logger.TestLogger(t) + _, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, tomlConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.Error(t, err) +} + +func TestGateway_NewGatewayFromConfig_InvalidHandler(t *testing.T) { + t.Parallel() + + tomlConfig := buildConfig(` +[[dons]] +DonId = "my_don" +HandlerName = "no_such_handler" +`) + + lggr := logger.TestLogger(t) + _, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, tomlConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.Error(t, err) +} + +func TestGateway_NewGatewayFromConfig_MissingID(t *testing.T) { + t.Parallel() + + tomlConfig := buildConfig(` +[[dons]] +HandlerName = "dummy" +SomeOtherField = "abcd" +`) + + lggr := logger.TestLogger(t) + _, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, tomlConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.Error(t, err) +} + +func TestGateway_NewGatewayFromConfig_InvalidNodeAddress(t *testing.T) { + t.Parallel() + + tomlConfig := buildConfig(` +[[dons]] +HandlerName = "dummy" +DonId = "my_don" + +[[dons.Members]] +Name = "node one" +Address = "0xnot_an_address" +`) + + lggr := logger.TestLogger(t) + _, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, tomlConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.Error(t, err) +} + +func TestGateway_CleanStartAndClose(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + gateway, err := gateway.NewGatewayFromConfig(parseTOMLConfig(t, buildConfig("")), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.NoError(t, err) + servicetest.Run(t, gateway) +} + +func requireJsonRPCResult(t *testing.T, response []byte, expectedId string, expectedResult string) { + require.Equal(t, fmt.Sprintf(`{"jsonrpc":"2.0","id":"%s","result":%s}`, expectedId, expectedResult), string(response)) +} + +func requireJsonRPCError(t *testing.T, response []byte, expectedId string, expectedCode int, expectedMsg string) { + require.Equal(t, fmt.Sprintf(`{"jsonrpc":"2.0","id":"%s","error":{"code":%d,"message":"%s"}}`, expectedId, expectedCode, expectedMsg), string(response)) +} + +func newGatewayWithMockHandler(t *testing.T) (gateway.Gateway, *handler_mocks.Handler) { + httpServer := net_mocks.NewHttpServer(t) + httpServer.On("SetHTTPRequestHandler", mock.Anything).Return(nil) + handler := handler_mocks.NewHandler(t) + handlers := map[string]handlers.Handler{ + "testDON": handler, + } + gw := gateway.NewGateway(&api.JsonRPCCodec{}, httpServer, handlers, nil, logger.TestLogger(t)) + return gw, handler +} + +func newSignedRequest(t *testing.T, messageId string, method string, donID string, payload []byte) []byte { + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: messageId, + Method: method, + DonId: donID, + Payload: payload, + }, + } + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + require.NoError(t, msg.Sign(privateKey)) + codec := api.JsonRPCCodec{} + rawRequest, err := codec.EncodeRequest(msg) + require.NoError(t, err) + return rawRequest +} + +func TestGateway_ProcessRequest_ParseError(t *testing.T) { + t.Parallel() + + gw, _ := newGatewayWithMockHandler(t) + response, statusCode := gw.ProcessRequest(testutils.Context(t), []byte("{{}")) + requireJsonRPCError(t, response, "", -32700, "invalid character '{' looking for beginning of object key string") + require.Equal(t, 400, statusCode) +} + +func TestGateway_ProcessRequest_MessageValidationError(t *testing.T) { + t.Parallel() + + gw, _ := newGatewayWithMockHandler(t) + req := newSignedRequest(t, "abc", "request", "", []byte{}) + response, statusCode := gw.ProcessRequest(testutils.Context(t), req) + requireJsonRPCError(t, response, "abc", -32700, "invalid DON ID length") + require.Equal(t, 400, statusCode) +} + +func TestGateway_ProcessRequest_IncorrectDonId(t *testing.T) { + t.Parallel() + + gw, _ := newGatewayWithMockHandler(t) + req := newSignedRequest(t, "abc", "request", "unknownDON", []byte{}) + response, statusCode := gw.ProcessRequest(testutils.Context(t), req) + requireJsonRPCError(t, response, "abc", -32602, "unsupported DON ID") + require.Equal(t, 400, statusCode) +} + +func TestGateway_ProcessRequest_HandlerResponse(t *testing.T) { + t.Parallel() + + gw, handler := newGatewayWithMockHandler(t) + handler.On("HandleUserMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + msg := args.Get(1).(*api.Message) + callbackCh := args.Get(2).(chan<- handlers.UserCallbackPayload) + // echo back to sender with attached payload + msg.Body.Payload = []byte(`{"result":"OK"}`) + msg.Signature = "" + callbackCh <- handlers.UserCallbackPayload{Msg: msg, ErrCode: api.NoError, ErrMsg: ""} + }) + + req := newSignedRequest(t, "abcd", "request", "testDON", []byte{}) + response, statusCode := gw.ProcessRequest(testutils.Context(t), req) + requireJsonRPCResult(t, response, "abcd", + `{"signature":"","body":{"message_id":"abcd","method":"request","don_id":"testDON","receiver":"","payload":{"result":"OK"}}}`) + require.Equal(t, 200, statusCode) +} + +func TestGateway_ProcessRequest_HandlerTimeout(t *testing.T) { + t.Parallel() + + gw, handler := newGatewayWithMockHandler(t) + handler.On("HandleUserMessage", mock.Anything, mock.Anything, mock.Anything).Return(nil) + timeoutCtx, cancel := context.WithTimeout(testutils.Context(t), time.Millisecond*10) + defer cancel() + + req := newSignedRequest(t, "abcd", "request", "testDON", []byte{}) + response, statusCode := gw.ProcessRequest(timeoutCtx, req) + requireJsonRPCError(t, response, "abcd", -32000, "handler timeout") + require.Equal(t, 504, statusCode) +} + +func TestGateway_ProcessRequest_HandlerError(t *testing.T) { + t.Parallel() + + gw, handler := newGatewayWithMockHandler(t) + handler.On("HandleUserMessage", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("failure")) + + req := newSignedRequest(t, "abcd", "request", "testDON", []byte{}) + response, statusCode := gw.ProcessRequest(testutils.Context(t), req) + requireJsonRPCError(t, response, "abcd", -32600, "failure") + require.Equal(t, 400, statusCode) +} diff --git a/core/services/gateway/handler_factory.go b/core/services/gateway/handler_factory.go new file mode 100644 index 00000000..0f58d6db --- /dev/null +++ b/core/services/gateway/handler_factory.go @@ -0,0 +1,44 @@ +package gateway + +import ( + "encoding/json" + "fmt" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +const ( + FunctionsHandlerType HandlerType = "functions" + DummyHandlerType HandlerType = "dummy" +) + +type handlerFactory struct { + legacyChains legacyevm.LegacyChainContainer + db *sqlx.DB + cfg pg.QConfig + lggr logger.Logger +} + +var _ HandlerFactory = (*handlerFactory)(nil) + +func NewHandlerFactory(legacyChains legacyevm.LegacyChainContainer, db *sqlx.DB, cfg pg.QConfig, lggr logger.Logger) HandlerFactory { + return &handlerFactory{legacyChains, db, cfg, lggr} +} + +func (hf *handlerFactory) NewHandler(handlerType HandlerType, handlerConfig json.RawMessage, donConfig *config.DONConfig, don handlers.DON) (handlers.Handler, error) { + switch handlerType { + case FunctionsHandlerType: + return functions.NewFunctionsHandlerFromConfig(handlerConfig, donConfig, don, hf.legacyChains, hf.db, hf.cfg, hf.lggr) + case DummyHandlerType: + return handlers.NewDummyHandler(donConfig, don, hf.lggr) + default: + return nil, fmt.Errorf("unsupported handler type %s", handlerType) + } +} diff --git a/core/services/gateway/handlers/common/ratelimiter.go b/core/services/gateway/handlers/common/ratelimiter.go new file mode 100644 index 00000000..8329cb15 --- /dev/null +++ b/core/services/gateway/handlers/common/ratelimiter.go @@ -0,0 +1,49 @@ +package common + +import ( + "errors" + "sync" + + "golang.org/x/time/rate" +) + +// Wrapper around Go's rate.Limiter that supports both global and a per-sender rate limiting. +type RateLimiter struct { + global *rate.Limiter + perSender map[string]*rate.Limiter + config RateLimiterConfig + mu sync.Mutex +} + +type RateLimiterConfig struct { + GlobalRPS float64 `json:"globalRPS"` + GlobalBurst int `json:"globalBurst"` + PerSenderRPS float64 `json:"perSenderRPS"` + PerSenderBurst int `json:"perSenderBurst"` +} + +func NewRateLimiter(config RateLimiterConfig) (*RateLimiter, error) { + if config.GlobalRPS <= 0.0 || config.PerSenderRPS <= 0.0 { + return nil, errors.New("RPS values must be positive") + } + if config.GlobalBurst <= 0 || config.PerSenderBurst <= 0 { + return nil, errors.New("burst values must be positive") + } + return &RateLimiter{ + global: rate.NewLimiter(rate.Limit(config.GlobalRPS), config.GlobalBurst), + perSender: make(map[string]*rate.Limiter), + config: config, + }, nil +} + +func (rl *RateLimiter) Allow(sender string) bool { + rl.mu.Lock() + senderLimiter, ok := rl.perSender[sender] + if !ok { + senderLimiter = rate.NewLimiter(rate.Limit(rl.config.PerSenderRPS), rl.config.PerSenderBurst) + rl.perSender[sender] = senderLimiter + } + rl.mu.Unlock() + + return senderLimiter.Allow() && rl.global.Allow() +} diff --git a/core/services/gateway/handlers/common/ratelimiter_test.go b/core/services/gateway/handlers/common/ratelimiter_test.go new file mode 100644 index 00000000..c06286c3 --- /dev/null +++ b/core/services/gateway/handlers/common/ratelimiter_test.go @@ -0,0 +1,27 @@ +package common_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" +) + +func TestRateLimiter_Simple(t *testing.T) { + t.Parallel() + + config := common.RateLimiterConfig{ + GlobalRPS: 3.0, + GlobalBurst: 3, + PerSenderRPS: 1.0, + PerSenderBurst: 2, + } + rl, err := common.NewRateLimiter(config) + require.NoError(t, err) + require.True(t, rl.Allow("user1")) + require.True(t, rl.Allow("user2")) + require.True(t, rl.Allow("user1")) + require.False(t, rl.Allow("user1")) + require.False(t, rl.Allow("user3")) +} diff --git a/core/services/gateway/handlers/common/requestcache.go b/core/services/gateway/handlers/common/requestcache.go new file mode 100644 index 00000000..7d3dadcb --- /dev/null +++ b/core/services/gateway/handlers/common/requestcache.go @@ -0,0 +1,116 @@ +package common + +import ( + "errors" + "sync" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" +) + +// RequestCache is used to store pending requests and collect incoming responses as they arrive. +// It is parameterized by responseData, which is a service-specific type storing all data needed to aggregate responses. +// Client needs to implement a ResponseProcessor, which is called for every response (see below). +// Additionally, each request has a timeout, after which the netry will be removed from the cache and an error sent to the callback channel. +// All methods are thread-safe. +type RequestCache[T any] interface { + NewRequest(request *api.Message, callbackCh chan<- handlers.UserCallbackPayload, responseData *T) error + ProcessResponse(response *api.Message, process ResponseProcessor[T]) error +} + +// If aggregated != nil then the aggregated response is ready and the entry will be deleted from RequestCache. +// Otherwise, state will be updated to newState and the entry will remain in cache, awaiting more responses from nodes. +type ResponseProcessor[T any] func(response *api.Message, state *T) (aggregated *handlers.UserCallbackPayload, newState *T, err error) + +type requestCache[T any] struct { + cache map[globalId]*pendingRequest[T] + maxCacheSize uint32 + timeout time.Duration + mu sync.Mutex +} + +type globalId struct { + sender string + id string +} + +type pendingRequest[T any] struct { + callbackCh chan<- handlers.UserCallbackPayload + responseData *T + timeoutTimer *time.Timer + mu sync.Mutex +} + +func NewRequestCache[T any](timeout time.Duration, maxCacheSize uint32) RequestCache[T] { + return &requestCache[T]{cache: make(map[globalId]*pendingRequest[T]), timeout: timeout, maxCacheSize: maxCacheSize} +} + +func (c *requestCache[T]) NewRequest(request *api.Message, callbackCh chan<- handlers.UserCallbackPayload, responseData *T) error { + if request == nil { + return errors.New("request is nil") + } + if responseData == nil { + return errors.New("responseData is nil") + } + key := globalId{request.Body.Sender, request.Body.MessageId} + c.mu.Lock() + defer c.mu.Unlock() + _, ok := c.cache[key] + if ok { + return errors.New("request already exists") + } + if len(c.cache) >= int(c.maxCacheSize) { + return errors.New("request cache is full") + } + timer := time.AfterFunc(c.timeout, func() { + c.deleteAndSendOnce(key, handlers.UserCallbackPayload{Msg: request, ErrMsg: "timeout", ErrCode: api.RequestTimeoutError}) + }) + c.cache[key] = &pendingRequest[T]{callbackCh: callbackCh, responseData: responseData, timeoutTimer: timer} + return nil +} + +// Call ResponseProcessor on the response. +// There are two possible outcomes: +// +// (a) remove request from cache and send aggregated response to the user +// (b) update request's responseData and keep it in cache, awaiting more responses from nodes +func (c *requestCache[T]) ProcessResponse(response *api.Message, process ResponseProcessor[T]) error { + if response == nil { + return errors.New("response is nil") + } + key := globalId{response.Body.Receiver, response.Body.MessageId} + // retrieve entry + c.mu.Lock() + entry, ok := c.cache[key] + c.mu.Unlock() + if !ok { + return errors.New("request not found") + } + // process under per-entry lock + entry.mu.Lock() + aggregated, newResponseData, err := process(response, entry.responseData) + if newResponseData != nil { + entry.responseData = newResponseData + } + entry.mu.Unlock() + if err != nil { + return err + } + if aggregated != nil { + c.deleteAndSendOnce(key, *aggregated) + } + return nil +} + +func (c *requestCache[T]) deleteAndSendOnce(key globalId, callbackResponse handlers.UserCallbackPayload) { + c.mu.Lock() + entry, deleted := c.cache[key] + delete(c.cache, key) + c.mu.Unlock() + if deleted { + entry.timeoutTimer.Stop() + entry.callbackCh <- callbackResponse + close(entry.callbackCh) + } +} diff --git a/core/services/gateway/handlers/common/requestcache_test.go b/core/services/gateway/handlers/common/requestcache_test.go new file mode 100644 index 00000000..3cf7a83f --- /dev/null +++ b/core/services/gateway/handlers/common/requestcache_test.go @@ -0,0 +1,113 @@ +package common_test + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" +) + +type requestState struct { + counter int +} + +func TestRequestCache_Simple(t *testing.T) { + t.Parallel() + + cache := common.NewRequestCache[requestState](time.Hour, 1000) + callbackCh := make(chan handlers.UserCallbackPayload) + + req := &api.Message{Body: api.MessageBody{MessageId: "aa", Sender: "0x1234"}} + initialState := &requestState{} + require.NoError(t, cache.NewRequest(req, callbackCh, initialState)) + + nodeResp := &api.Message{Body: api.MessageBody{MessageId: "aa", Receiver: "0x1234"}} + go func() { + require.NoError(t, cache.ProcessResponse(nodeResp, func(response *api.Message, responseData *requestState) (aggregated *handlers.UserCallbackPayload, newResponseData *requestState, err error) { + // ready after first response + return &handlers.UserCallbackPayload{Msg: response}, nil, nil + })) + }() + finalResp := <-callbackCh + require.Equal(t, "aa", finalResp.Msg.Body.MessageId) +} + +func TestRequestCache_MultiResponse(t *testing.T) { + t.Parallel() + + nRequests := 10 + nResponsesPerRequest := 100 + maxDelayMillis := 100 + + cache := common.NewRequestCache[requestState](time.Hour, 1000) + chans := make([]chan handlers.UserCallbackPayload, nRequests) + reqs := make([]*api.Message, nRequests) + for i := 0; i < nRequests; i++ { + chans[i] = make(chan handlers.UserCallbackPayload) + reqs[i] = &api.Message{Body: api.MessageBody{MessageId: "abcd", Sender: fmt.Sprintf("sender_%d", i)}} + initialState := &requestState{counter: 0} + require.NoError(t, cache.NewRequest(reqs[i], chans[i], initialState)) + } + + for i := 0; i < nRequests; i++ { + resp := &api.Message{Body: api.MessageBody{MessageId: "abcd"}} + resp.Body.Receiver = reqs[i].Body.Sender + for j := 0; j < nResponsesPerRequest; j++ { + go func() { + n := rand.Intn(maxDelayMillis) + 1 + time.Sleep(time.Duration(n) * time.Millisecond) + require.NoError(t, cache.ProcessResponse(resp, func(response *api.Message, responseData *requestState) (aggregated *handlers.UserCallbackPayload, newResponseData *requestState, err error) { + responseData.counter++ + if responseData.counter == nResponsesPerRequest { + return &handlers.UserCallbackPayload{Msg: response}, nil, nil + } + return nil, responseData, nil + })) + }() + } + } + + for i := 0; i < nRequests; i++ { + resp := <-chans[i] + require.Equal(t, "abcd", resp.Msg.Body.MessageId) + require.Equal(t, reqs[i].Body.Sender, resp.Msg.Body.Receiver) + } +} + +func TestRequestCache_Timeout(t *testing.T) { + t.Parallel() + + cache := common.NewRequestCache[requestState](time.Millisecond*10, 1000) + callbackCh := make(chan handlers.UserCallbackPayload) + + req := &api.Message{Body: api.MessageBody{MessageId: "aa", Sender: "0x1234"}} + initialState := &requestState{} + require.NoError(t, cache.NewRequest(req, callbackCh, initialState)) + + finalResp := <-callbackCh + require.Equal(t, "aa", finalResp.Msg.Body.MessageId) + require.Equal(t, api.RequestTimeoutError, finalResp.ErrCode) +} + +func TestRequestCache_MaxSize(t *testing.T) { + t.Parallel() + + cache := common.NewRequestCache[requestState](time.Hour, 2) + callbackCh := make(chan handlers.UserCallbackPayload) + initialState := &requestState{} + + req := &api.Message{Body: api.MessageBody{MessageId: "aa", Sender: "0x1234"}} + require.NoError(t, cache.NewRequest(req, callbackCh, initialState)) + + req.Body.MessageId = "bb" + require.NoError(t, cache.NewRequest(req, callbackCh, initialState)) + + req.Body.MessageId = "cc" + require.Error(t, cache.NewRequest(req, callbackCh, initialState)) +} diff --git a/core/services/gateway/handlers/functions/allowlist/allowlist.go b/core/services/gateway/handlers/functions/allowlist/allowlist.go new file mode 100644 index 00000000..c89245f3 --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/allowlist.go @@ -0,0 +1,346 @@ +package allowlist + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_allow_list" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + defaultStoredAllowlistBatchSize = 1000 + defaultOnchainAllowlistBatchSize = 100 + defaultFetchingDelayInRangeSec = 1 +) + +type OnchainAllowlistConfig struct { + // ContractAddress is required + ContractAddress common.Address `json:"contractAddress"` + ContractVersion uint32 `json:"contractVersion"` + BlockConfirmations uint `json:"blockConfirmations"` + // UpdateFrequencySec can be zero to disable periodic updates + UpdateFrequencySec uint `json:"updateFrequencySec"` + UpdateTimeoutSec uint `json:"updateTimeoutSec"` + StoredAllowlistBatchSize uint `json:"storedAllowlistBatchSize"` + OnchainAllowlistBatchSize uint `json:"onchainAllowlistBatchSize"` + // StoreAllowedSendersEnabled is a feature flag that enables storing in db a copy of the allowlist. + StoreAllowedSendersEnabled bool `json:"storeAllowedSendersEnabled"` + // FetchingDelayInRangeSec prevents RPC client being rate limited when fetching the allowlist in ranges. + FetchingDelayInRangeSec uint `json:"fetchingDelayInRangeSec"` +} + +// OnchainAllowlist maintains an allowlist of addresses fetched from the blockchain (EVM-only). +// Use UpdateFromContract() for a one-time update or set OnchainAllowlistConfig.UpdateFrequencySec +// for repeated updates. +// All methods are thread-safe. +// +//go:generate mockery --quiet --name OnchainAllowlist --output ./mocks/ --case=underscore +type OnchainAllowlist interface { + job.ServiceCtx + + Allow(common.Address) bool + UpdateFromContract(ctx context.Context) error +} + +type onchainAllowlist struct { + services.StateMachine + + config OnchainAllowlistConfig + allowlist atomic.Pointer[map[common.Address]struct{}] + orm ORM + client evmclient.Client + contractV1 *functions_router.FunctionsRouter + blockConfirmations *big.Int + lggr logger.Logger + closeWait sync.WaitGroup + stopCh services.StopChan +} + +func NewOnchainAllowlist(client evmclient.Client, config OnchainAllowlistConfig, orm ORM, lggr logger.Logger) (OnchainAllowlist, error) { + if client == nil { + return nil, errors.New("client is nil") + } + if lggr == nil { + return nil, errors.New("logger is nil") + } + if config.ContractVersion != 1 { + return nil, fmt.Errorf("unsupported contract version %d", config.ContractVersion) + } + + if config.StoredAllowlistBatchSize == 0 { + lggr.Info("StoredAllowlistBatchSize not specified, using default size: ", defaultStoredAllowlistBatchSize) + config.StoredAllowlistBatchSize = defaultStoredAllowlistBatchSize + } + + if config.OnchainAllowlistBatchSize == 0 { + lggr.Info("OnchainAllowlistBatchSize not specified, using default size: ", defaultOnchainAllowlistBatchSize) + config.OnchainAllowlistBatchSize = defaultOnchainAllowlistBatchSize + } + + if config.FetchingDelayInRangeSec == 0 { + lggr.Info("FetchingDelayInRangeSec not specified, using default delay: ", defaultFetchingDelayInRangeSec) + config.FetchingDelayInRangeSec = defaultFetchingDelayInRangeSec + } + + if config.UpdateFrequencySec != 0 && config.FetchingDelayInRangeSec >= config.UpdateFrequencySec { + return nil, fmt.Errorf("to avoid updates overlapping FetchingDelayInRangeSec:%d should be less than UpdateFrequencySec:%d", config.FetchingDelayInRangeSec, config.UpdateFrequencySec) + } + + contractV1, err := functions_router.NewFunctionsRouter(config.ContractAddress, client) + if err != nil { + return nil, fmt.Errorf("unexpected error during functions_router.NewFunctionsRouter: %s", err) + } + allowlist := &onchainAllowlist{ + config: config, + orm: orm, + client: client, + contractV1: contractV1, + blockConfirmations: big.NewInt(int64(config.BlockConfirmations)), + lggr: lggr.Named("OnchainAllowlist"), + stopCh: make(services.StopChan), + } + emptyMap := make(map[common.Address]struct{}) + allowlist.allowlist.Store(&emptyMap) + return allowlist, nil +} + +func (a *onchainAllowlist) Start(ctx context.Context) error { + return a.StartOnce("OnchainAllowlist", func() error { + a.lggr.Info("starting onchain allowlist") + if a.config.UpdateFrequencySec == 0 || a.config.UpdateTimeoutSec == 0 { + a.lggr.Info("OnchainAllowlist periodic updates are disabled") + return nil + } + + a.loadStoredAllowedSenderList() + + updateOnce := func() { + timeoutCtx, cancel := utils.ContextFromChanWithTimeout(a.stopCh, time.Duration(a.config.UpdateTimeoutSec)*time.Second) + if err := a.UpdateFromContract(timeoutCtx); err != nil { + a.lggr.Errorw("error calling UpdateFromContract", "err", err) + } + cancel() + } + + a.closeWait.Add(1) + go func() { + defer a.closeWait.Done() + // update immediately after start to populate the allowlist without waiting UpdateFrequencySec seconds + updateOnce() + ticker := time.NewTicker(time.Duration(a.config.UpdateFrequencySec) * time.Second) + defer ticker.Stop() + for { + select { + case <-a.stopCh: + return + case <-ticker.C: + updateOnce() + } + } + }() + return nil + }) +} + +func (a *onchainAllowlist) Close() error { + return a.StopOnce("OnchainAllowlist", func() (err error) { + a.lggr.Info("closing onchain allowlist") + close(a.stopCh) + a.closeWait.Wait() + return nil + }) +} + +func (a *onchainAllowlist) Allow(address common.Address) bool { + allowlist := *a.allowlist.Load() + _, ok := allowlist[address] + return ok +} + +func (a *onchainAllowlist) UpdateFromContract(ctx context.Context) error { + latestBlockHeight, err := a.client.LatestBlockHeight(ctx) + if err != nil { + return errors.Wrap(err, "error calling LatestBlockHeight") + } + if latestBlockHeight == nil { + return errors.New("LatestBlockHeight returned nil") + } + blockNum := big.NewInt(0).Sub(latestBlockHeight, a.blockConfirmations) + return a.updateFromContractV1(ctx, blockNum) +} + +func (a *onchainAllowlist) updateFromContractV1(ctx context.Context, blockNum *big.Int) error { + tosID, err := a.contractV1.GetAllowListId(&bind.CallOpts{ + Pending: false, + Context: ctx, + }) + if err != nil { + return errors.Wrap(err, "unexpected error during functions_router.GetAllowListId") + } + a.lggr.Debugw("successfully fetched allowlist route ID", "id", hex.EncodeToString(tosID[:])) + if tosID == [32]byte{} { + return errors.New("allowlist route ID has not been set") + } + tosAddress, err := a.contractV1.GetContractById(&bind.CallOpts{ + Pending: false, + Context: ctx, + }, tosID) + if err != nil { + return errors.Wrap(err, "unexpected error during functions_router.GetContractById") + } + a.lggr.Debugw("successfully fetched allowlist contract address", "address", tosAddress) + tosContract, err := functions_allow_list.NewTermsOfServiceAllowList(tosAddress, a.client) + if err != nil { + return errors.Wrap(err, "unexpected error during functions_allow_list.NewTermsOfServiceAllowList") + } + + var allowedSenderList []common.Address + if !a.config.StoreAllowedSendersEnabled { + allowedSenderList, err = tosContract.GetAllAllowedSenders(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }) + if err != nil { + return errors.Wrap(err, "error calling GetAllAllowedSenders") + } + } else { + err = a.syncBlockedSenders(ctx, tosContract, blockNum) + if err != nil { + return errors.Wrap(err, "failed to sync the stored allowed and blocked senders") + } + + allowedSenderList, err = a.getAllowedSendersBatched(ctx, tosContract, blockNum) + if err != nil { + return errors.Wrap(err, "failed to get allowed senders in rage") + } + } + + a.update(allowedSenderList) + return nil +} + +func (a *onchainAllowlist) getAllowedSendersBatched(ctx context.Context, tosContract *functions_allow_list.TermsOfServiceAllowList, blockNum *big.Int) ([]common.Address, error) { + allowedSenderList := make([]common.Address, 0) + count, err := tosContract.GetAllowedSendersCount(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }) + if err != nil { + return nil, errors.Wrap(err, "unexpected error during functions_allow_list.GetAllowedSendersCount") + } + + throttleTicker := time.NewTicker(time.Duration(a.config.FetchingDelayInRangeSec) * time.Second) + for idxStart := uint64(0); idxStart < count; idxStart += uint64(a.config.OnchainAllowlistBatchSize) { + <-throttleTicker.C + + idxEnd := idxStart + uint64(a.config.OnchainAllowlistBatchSize) + if idxEnd >= count { + idxEnd = count - 1 + } + + allowedSendersBatch, err := tosContract.GetAllowedSendersInRange(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }, idxStart, idxEnd) + if err != nil { + return nil, errors.Wrap(err, "error calling GetAllowedSendersInRange") + } + + allowedSenderList = append(allowedSenderList, allowedSendersBatch...) + err = a.orm.CreateAllowedSenders(allowedSendersBatch) + if err != nil { + a.lggr.Errorf("failed to update stored allowedSenderList: %w", err) + } + } + throttleTicker.Stop() + + return allowedSenderList, nil +} + +// syncBlockedSenders fetches the list of blocked addresses from the contract in batches +// and removes the addresses from the functions_allowlist table if present +func (a *onchainAllowlist) syncBlockedSenders(ctx context.Context, tosContract *functions_allow_list.TermsOfServiceAllowList, blockNum *big.Int) error { + count, err := tosContract.GetBlockedSendersCount(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }) + if err != nil { + return errors.Wrap(err, "unexpected error during functions_allow_list.GetBlockedSendersCount") + } + + throttleTicker := time.NewTicker(time.Duration(a.config.FetchingDelayInRangeSec) * time.Second) + for idxStart := uint64(0); idxStart < count; idxStart += uint64(a.config.OnchainAllowlistBatchSize) { + <-throttleTicker.C + + idxEnd := idxStart + uint64(a.config.OnchainAllowlistBatchSize) + if idxEnd >= count { + idxEnd = count - 1 + } + + blockedSendersBatch, err := tosContract.GetBlockedSendersInRange(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNum, + Context: ctx, + }, idxStart, idxEnd) + if err != nil { + return errors.Wrap(err, "error calling GetAllowedSendersInRange") + } + + err = a.orm.DeleteAllowedSenders(blockedSendersBatch) + if err != nil { + a.lggr.Errorf("failed to delete blocked address from allowed list in storage: %w", err) + } + } + throttleTicker.Stop() + + return nil +} + +func (a *onchainAllowlist) update(addrList []common.Address) { + newAllowlist := make(map[common.Address]struct{}) + for _, addr := range addrList { + newAllowlist[addr] = struct{}{} + } + a.allowlist.Store(&newAllowlist) + a.lggr.Infow("allowlist updated successfully", "len", len(addrList)) +} + +func (a *onchainAllowlist) loadStoredAllowedSenderList() { + allowedList := make([]common.Address, 0) + offset := uint(0) + for { + asBatch, err := a.orm.GetAllowedSenders(offset, a.config.StoredAllowlistBatchSize) + if err != nil { + a.lggr.Errorf("failed to get stored allowed senders: %w", err) + break + } + + allowedList = append(allowedList, asBatch...) + + if len(asBatch) < int(a.config.StoredAllowlistBatchSize) { + break + } + offset += a.config.StoredAllowlistBatchSize + } + + a.update(allowedList) +} diff --git a/core/services/gateway/handlers/functions/allowlist/allowlist_test.go b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go new file mode 100644 index 00000000..7c23931d --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/allowlist_test.go @@ -0,0 +1,184 @@ +package allowlist_test + +import ( + "context" + "encoding/hex" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" + amocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist/mocks" +) + +const ( + addr1 = "9ed925d8206a4f88a2f643b28b3035b315753cd6" + addr2 = "ea6721ac65bced841b8ec3fc5fedea6141a0ade4" + addr3 = "84689acc87ff22841b8ec378300da5e141a99911" +) + +func sampleEncodedAllowlist(t *testing.T) []byte { + abiEncodedAddresses := + "0000000000000000000000000000000000000000000000000000000000000020" + + "0000000000000000000000000000000000000000000000000000000000000002" + + "000000000000000000000000" + addr1 + + "000000000000000000000000" + addr2 + rawData, err := hex.DecodeString(abiEncodedAddresses) + require.NoError(t, err) + return rawData +} + +func TestAllowlist_UpdateAndCheck(t *testing.T) { + t.Parallel() + + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractVersion: 1, + ContractAddress: common.Address{}, + BlockConfirmations: 1, + } + + orm := amocks.NewORM(t) + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(testutils.Context(t)) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + require.NoError(t, allowlist.UpdateFromContract(testutils.Context(t))) + require.False(t, allowlist.Allow(common.Address{})) + require.True(t, allowlist.Allow(common.HexToAddress(addr1))) + require.True(t, allowlist.Allow(common.HexToAddress(addr2))) + require.False(t, allowlist.Allow(common.HexToAddress(addr3))) +} + +func TestAllowlist_UnsupportedVersion(t *testing.T) { + t.Parallel() + + client := mocks.NewClient(t) + config := allowlist.OnchainAllowlistConfig{ + ContractVersion: 0, + ContractAddress: common.Address{}, + BlockConfirmations: 1, + } + + orm := amocks.NewORM(t) + _, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.Error(t, err) +} + +func TestAllowlist_UpdatePeriodically(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(testutils.Context(t)) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractAddress: common.Address{}, + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + } + + orm := amocks.NewORM(t) + orm.On("GetAllowedSenders", uint(0), uint(1000)).Return([]common.Address{}, nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, allowlist.Close()) + }) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} +func TestAllowlist_UpdateFromContract(t *testing.T) { + t.Parallel() + + t.Run("OK-iterate_over_list_of_allowed_senders", func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractAddress: common.HexToAddress(addr3), + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + StoredAllowlistBatchSize: 2, + OnchainAllowlistBatchSize: 16, + StoreAllowedSendersEnabled: true, + FetchingDelayInRangeSec: 0, + } + + orm := amocks.NewORM(t) + orm.On("DeleteAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) + orm.On("CreateAllowedSenders", []common.Address{common.HexToAddress(addr1), common.HexToAddress(addr2)}).Times(2).Return(nil) + + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.UpdateFromContract(ctx) + require.NoError(t, err) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + }) + + t.Run("OK-fetch_complete_list_of_allowed_senders_without_storing", func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(sampleEncodedAllowlist(t), nil) + config := allowlist.OnchainAllowlistConfig{ + ContractAddress: common.HexToAddress(addr3), + ContractVersion: 1, + BlockConfirmations: 1, + UpdateFrequencySec: 2, + UpdateTimeoutSec: 1, + StoredAllowlistBatchSize: 2, + OnchainAllowlistBatchSize: 16, + StoreAllowedSendersEnabled: false, + FetchingDelayInRangeSec: 0, + } + + orm := amocks.NewORM(t) + allowlist, err := allowlist.NewOnchainAllowlist(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = allowlist.UpdateFromContract(ctx) + require.NoError(t, err) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return allowlist.Allow(common.HexToAddress(addr1)) && !allowlist.Allow(common.HexToAddress(addr3)) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + }) +} diff --git a/core/services/gateway/handlers/functions/allowlist/mocks/onchain_allowlist.go b/core/services/gateway/handlers/functions/allowlist/mocks/onchain_allowlist.go new file mode 100644 index 00000000..6668a3c7 --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/mocks/onchain_allowlist.go @@ -0,0 +1,102 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// OnchainAllowlist is an autogenerated mock type for the OnchainAllowlist type +type OnchainAllowlist struct { + mock.Mock +} + +// Allow provides a mock function with given fields: _a0 +func (_m *OnchainAllowlist) Allow(_a0 common.Address) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Allow") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(common.Address) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *OnchainAllowlist) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *OnchainAllowlist) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateFromContract provides a mock function with given fields: ctx +func (_m *OnchainAllowlist) UpdateFromContract(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for UpdateFromContract") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewOnchainAllowlist creates a new instance of OnchainAllowlist. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOnchainAllowlist(t interface { + mock.TestingT + Cleanup(func()) +}) *OnchainAllowlist { + mock := &OnchainAllowlist{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/handlers/functions/allowlist/mocks/orm.go b/core/services/gateway/handlers/functions/allowlist/mocks/orm.go new file mode 100644 index 00000000..c47db79f --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/mocks/orm.go @@ -0,0 +1,116 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// CreateAllowedSenders provides a mock function with given fields: allowedSenders, qopts +func (_m *ORM) CreateAllowedSenders(allowedSenders []common.Address, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, allowedSenders) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateAllowedSenders") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]common.Address, ...pg.QOpt) error); ok { + r0 = rf(allowedSenders, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteAllowedSenders provides a mock function with given fields: blockedSenders, qopts +func (_m *ORM) DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, blockedSenders) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteAllowedSenders") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]common.Address, ...pg.QOpt) error); ok { + r0 = rf(blockedSenders, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetAllowedSenders provides a mock function with given fields: offset, limit, qopts +func (_m *ORM) GetAllowedSenders(offset uint, limit uint, qopts ...pg.QOpt) ([]common.Address, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, offset, limit) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetAllowedSenders") + } + + var r0 []common.Address + var r1 error + if rf, ok := ret.Get(0).(func(uint, uint, ...pg.QOpt) ([]common.Address, error)); ok { + return rf(offset, limit, qopts...) + } + if rf, ok := ret.Get(0).(func(uint, uint, ...pg.QOpt) []common.Address); ok { + r0 = rf(offset, limit, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.Address) + } + } + + if rf, ok := ret.Get(1).(func(uint, uint, ...pg.QOpt) error); ok { + r1 = rf(offset, limit, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/handlers/functions/allowlist/orm.go b/core/services/gateway/handlers/functions/allowlist/orm.go new file mode 100644 index 00000000..591def3f --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/orm.go @@ -0,0 +1,123 @@ +package allowlist + +import ( + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore +type ORM interface { + GetAllowedSenders(offset, limit uint, qopts ...pg.QOpt) ([]common.Address, error) + CreateAllowedSenders(allowedSenders []common.Address, qopts ...pg.QOpt) error + DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg.QOpt) error +} + +type orm struct { + q pg.Q + lggr logger.Logger + routerContractAddress common.Address +} + +var _ ORM = (*orm)(nil) +var ( + ErrInvalidParameters = errors.New("invalid parameters provided to create a functions contract cache ORM") +) + +const ( + tableName = "functions_allowlist" +) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, routerContractAddress common.Address) (ORM, error) { + if db == nil || cfg == nil || lggr == nil || routerContractAddress == (common.Address{}) { + return nil, ErrInvalidParameters + } + + return &orm{ + q: pg.NewQ(db, lggr, cfg), + lggr: lggr, + routerContractAddress: routerContractAddress, + }, nil +} + +func (o *orm) GetAllowedSenders(offset, limit uint, qopts ...pg.QOpt) ([]common.Address, error) { + var addresses []common.Address + stmt := fmt.Sprintf(` + SELECT allowed_address + FROM %s + WHERE router_contract_address = $1 + ORDER BY id ASC + OFFSET $2 + LIMIT $3; + `, tableName) + err := o.q.WithOpts(qopts...).Select(&addresses, stmt, o.routerContractAddress, offset, limit) + if err != nil { + return addresses, err + } + o.lggr.Debugf("Successfully fetched allowed sender list from DB. offset: %d, limit: %d, length: %d", offset, limit, len(addresses)) + + return addresses, nil +} + +func (o *orm) CreateAllowedSenders(allowedSenders []common.Address, qopts ...pg.QOpt) error { + var valuesPlaceholder []string + for i := 1; i <= len(allowedSenders)*2; i += 2 { + valuesPlaceholder = append(valuesPlaceholder, fmt.Sprintf("($%d, $%d)", i, i+1)) + } + + stmt := fmt.Sprintf(` + INSERT INTO %s (allowed_address, router_contract_address) + VALUES %s ON CONFLICT (allowed_address, router_contract_address) DO NOTHING;`, tableName, strings.Join(valuesPlaceholder, ", ")) + + var args []interface{} + for _, as := range allowedSenders { + args = append(args, as, o.routerContractAddress) + } + + _, err := o.q.WithOpts(qopts...).Exec(stmt, args...) + if err != nil { + return err + } + + o.lggr.Debugf("Successfully stored allowed senders: %v for routerContractAddress: %s", allowedSenders, o.routerContractAddress) + + return nil +} + +func (o *orm) DeleteAllowedSenders(blockedSenders []common.Address, qopts ...pg.QOpt) error { + var valuesPlaceholder []string + for i := 1; i <= len(blockedSenders); i++ { + valuesPlaceholder = append(valuesPlaceholder, fmt.Sprintf("$%d", i+1)) + } + + stmt := fmt.Sprintf(` + DELETE FROM %s + WHERE router_contract_address = $1 + AND allowed_address IN (%s);`, tableName, strings.Join(valuesPlaceholder, ", ")) + + args := []interface{}{o.routerContractAddress} + for _, bs := range blockedSenders { + args = append(args, bs) + } + + res, err := o.q.WithOpts(qopts...).Exec(stmt, args...) + if err != nil { + return err + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + return err + } + + o.lggr.Debugf("Successfully removed blocked senders from the allowed list: %v for routerContractAddress: %s. rowsAffected: %d", blockedSenders, o.routerContractAddress, rowsAffected) + + return nil +} diff --git a/core/services/gateway/handlers/functions/allowlist/orm_test.go b/core/services/gateway/handlers/functions/allowlist/orm_test.go new file mode 100644 index 00000000..70df66fc --- /dev/null +++ b/core/services/gateway/handlers/functions/allowlist/orm_test.go @@ -0,0 +1,190 @@ +package allowlist_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" +) + +func setupORM(t *testing.T) (allowlist.ORM, error) { + t.Helper() + + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.TestLogger(t) + ) + + return allowlist.NewORM(db, lggr, pgtest.NewQConfig(true), testutils.NewAddress()) +} + +func seedAllowedSenders(t *testing.T, orm allowlist.ORM, amount int) []common.Address { + storedAllowedSenders := make([]common.Address, amount) + for i := 0; i < amount; i++ { + address := testutils.NewAddress() + storedAllowedSenders[i] = address + } + + err := orm.CreateAllowedSenders(storedAllowedSenders) + require.NoError(t, err) + + return storedAllowedSenders +} +func TestORM_GetAllowedSenders(t *testing.T) { + t.Parallel() + t.Run("fetch first page", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + storedAllowedSenders := seedAllowedSenders(t, orm, 2) + results, err := orm.GetAllowedSenders(0, 1) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, storedAllowedSenders[0], results[0]) + }) + + t.Run("fetch second page", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + storedAllowedSenders := seedAllowedSenders(t, orm, 2) + results, err := orm.GetAllowedSenders(1, 5) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, storedAllowedSenders[1], results[0]) + }) +} + +func TestORM_CreateAllowedSenders(t *testing.T) { + t.Parallel() + + t.Run("OK-create_an_allowed_sender", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + expected := testutils.NewAddress() + err = orm.CreateAllowedSenders([]common.Address{expected}) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 1) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, expected, results[0]) + }) + + t.Run("OK-create_an_existing_allowed_sender", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + expected := testutils.NewAddress() + err = orm.CreateAllowedSenders([]common.Address{expected}) + require.NoError(t, err) + + err = orm.CreateAllowedSenders([]common.Address{expected}) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 5) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, expected, results[0]) + }) + + t.Run("OK-create_multiple_allowed_senders_in_one_query", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + expected := []common.Address{testutils.NewAddress(), testutils.NewAddress()} + err = orm.CreateAllowedSenders(expected) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 2) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, expected[0], results[0]) + require.Equal(t, expected[1], results[1]) + }) + + t.Run("OK-create_multiple_allowed_senders_with_duplicates", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + addr1 := testutils.NewAddress() + addr2 := testutils.NewAddress() + expected := []common.Address{addr1, addr2} + + duplicatedAddressInput := []common.Address{addr1, addr1, addr1, addr2} + err = orm.CreateAllowedSenders(duplicatedAddressInput) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, expected[0], results[0]) + require.Equal(t, expected[1], results[1]) + }) +} + +func TestORM_DeleteAllowedSenders(t *testing.T) { + t.Parallel() + + t.Run("OK-delete_blocked_sender_from_allowed_list", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + add1 := testutils.NewAddress() + add2 := testutils.NewAddress() + add3 := testutils.NewAddress() + err = orm.CreateAllowedSenders([]common.Address{add1, add2, add3}) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 3, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + + err = orm.DeleteAllowedSenders([]common.Address{add1, add3}) + require.NoError(t, err) + + results, err = orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, add2, results[0]) + }) + + t.Run("OK-delete_non_existing_blocked_sender_from_allowed_list", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + add1 := testutils.NewAddress() + add2 := testutils.NewAddress() + err = orm.CreateAllowedSenders([]common.Address{add1, add2}) + require.NoError(t, err) + + results, err := orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + + add3 := testutils.NewAddress() + err = orm.DeleteAllowedSenders([]common.Address{add3}) + require.NoError(t, err) + + results, err = orm.GetAllowedSenders(0, 10) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, add1, results[0]) + require.Equal(t, add2, results[1]) + }) +} + +func Test_NewORM(t *testing.T) { + t.Run("OK-create_ORM", func(t *testing.T) { + _, err := allowlist.NewORM(pgtest.NewSqlxDB(t), logger.TestLogger(t), pgtest.NewQConfig(true), testutils.NewAddress()) + require.NoError(t, err) + }) + t.Run("NOK-create_ORM_with_nil_fields", func(t *testing.T) { + _, err := allowlist.NewORM(nil, nil, nil, common.Address{}) + require.Error(t, err) + }) + t.Run("NOK-create_ORM_with_empty_address", func(t *testing.T) { + _, err := allowlist.NewORM(pgtest.NewSqlxDB(t), logger.TestLogger(t), pgtest.NewQConfig(true), common.Address{}) + require.Error(t, err) + }) +} diff --git a/core/services/gateway/handlers/functions/api.go b/core/services/gateway/handlers/functions/api.go new file mode 100644 index 00000000..211d2354 --- /dev/null +++ b/core/services/gateway/handlers/functions/api.go @@ -0,0 +1,45 @@ +package functions + +import "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + +const ( + MethodSecretsSet = "secrets_set" + MethodSecretsList = "secrets_list" + MethodHeartbeat = "heartbeat" +) + +type SecretsSetRequest struct { + SlotID uint `json:"slot_id"` + Version uint64 `json:"version"` + Expiration int64 `json:"expiration"` + Payload []byte `json:"payload"` + Signature []byte `json:"signature"` +} + +// SecretsListRequest has empty payload + +type ResponseBase struct { + Success bool `json:"success"` + ErrorMessage string `json:"error_message,omitempty"` +} + +type SecretsSetResponse struct { + ResponseBase +} + +type SecretsListResponse struct { + ResponseBase + Rows []SecretsListRow `json:"rows,omitempty"` +} + +type SecretsListRow struct { + SlotID uint `json:"slot_id"` + Version uint64 `json:"version"` + Expiration int64 `json:"expiration"` +} + +// Gateway -> User response, which combines responses from several nodes +type CombinedResponse struct { + ResponseBase + NodeResponses []*api.Message `json:"node_responses"` +} diff --git a/core/services/gateway/handlers/functions/handler.functions.go b/core/services/gateway/handlers/functions/handler.functions.go new file mode 100644 index 00000000..3a73a2c6 --- /dev/null +++ b/core/services/gateway/handlers/functions/handler.functions.go @@ -0,0 +1,391 @@ +package functions + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + fallow "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" + fsub "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + ErrNotAllowlisted = errors.New("sender not allowlisted") + ErrRateLimited = errors.New("rate-limited") + ErrUnsupportedMethod = errors.New("unsupported method") + + promHandlerError = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_functions_handler_error", + Help: "Metric to track functions handler errors", + }, []string{"don_id", "error"}) + + promSecretsSetSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_functions_secrets_set_success", + Help: "Metric to track successful secrets_set calls", + }, []string{"don_id"}) + + promSecretsSetFailure = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_functions_secrets_set_failure", + Help: "Metric to track failed secrets_set calls", + }, []string{"don_id"}) + + promSecretsListSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_functions_secrets_list_success", + Help: "Metric to track successful secrets_list calls", + }, []string{"don_id"}) + + promSecretsListFailure = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "gateway_functions_secrets_list_failure", + Help: "Metric to track failed secrets_list calls", + }, []string{"don_id"}) +) + +type FunctionsHandlerConfig struct { + ChainID string `json:"chainId"` + // Not specifying OnchainAllowlist config disables allowlist checks + OnchainAllowlist *fallow.OnchainAllowlistConfig `json:"onchainAllowlist"` + // Not specifying OnchainSubscriptions config disables minimum balance checks + OnchainSubscriptions *fsub.OnchainSubscriptionsConfig `json:"onchainSubscriptions"` + MinimumSubscriptionBalance *assets.Link `json:"minimumSubscriptionBalance"` + // Not specifying RateLimiter config disables rate limiting + UserRateLimiter *hc.RateLimiterConfig `json:"userRateLimiter"` + NodeRateLimiter *hc.RateLimiterConfig `json:"nodeRateLimiter"` + MaxPendingRequests uint32 `json:"maxPendingRequests"` + RequestTimeoutMillis int64 `json:"requestTimeoutMillis"` + AllowedHeartbeatInitiators []string `json:"allowedHeartbeatInitiators"` +} + +type functionsHandler struct { + services.StateMachine + + handlerConfig FunctionsHandlerConfig + donConfig *config.DONConfig + don handlers.DON + pendingRequests hc.RequestCache[PendingRequest] + allowlist fallow.OnchainAllowlist + subscriptions fsub.OnchainSubscriptions + minimumBalance *assets.Link + userRateLimiter *hc.RateLimiter + nodeRateLimiter *hc.RateLimiter + allowedHeartbeatInitiators map[string]struct{} + chStop services.StopChan + lggr logger.Logger +} + +type PendingRequest struct { + request *api.Message + responses map[string]*api.Message + successful []*api.Message + errors []*api.Message +} + +var _ handlers.Handler = (*functionsHandler)(nil) + +func NewFunctionsHandlerFromConfig(handlerConfig json.RawMessage, donConfig *config.DONConfig, don handlers.DON, legacyChains legacyevm.LegacyChainContainer, db *sqlx.DB, qcfg pg.QConfig, lggr logger.Logger) (handlers.Handler, error) { + var cfg FunctionsHandlerConfig + err := json.Unmarshal(handlerConfig, &cfg) + if err != nil { + return nil, err + } + lggr = lggr.Named("FunctionsHandler:" + donConfig.DonId) + var allowlist fallow.OnchainAllowlist + if cfg.OnchainAllowlist != nil { + chain, err2 := legacyChains.Get(cfg.ChainID) + if err2 != nil { + return nil, err2 + } + + orm, err2 := fallow.NewORM(db, lggr, qcfg, cfg.OnchainAllowlist.ContractAddress) + if err2 != nil { + return nil, err2 + } + allowlist, err2 = fallow.NewOnchainAllowlist(chain.Client(), *cfg.OnchainAllowlist, orm, lggr) + if err2 != nil { + return nil, err2 + } + } + var userRateLimiter, nodeRateLimiter *hc.RateLimiter + if cfg.UserRateLimiter != nil { + userRateLimiter, err = hc.NewRateLimiter(*cfg.UserRateLimiter) + if err != nil { + return nil, err + } + } + if cfg.NodeRateLimiter != nil { + nodeRateLimiter, err = hc.NewRateLimiter(*cfg.NodeRateLimiter) + if err != nil { + return nil, err + } + } + var subscriptions fsub.OnchainSubscriptions + if cfg.OnchainSubscriptions != nil { + chain, err2 := legacyChains.Get(cfg.ChainID) + if err2 != nil { + return nil, err2 + } + + orm, err2 := fsub.NewORM(db, lggr, qcfg, cfg.OnchainSubscriptions.ContractAddress) + if err2 != nil { + return nil, err2 + } + + subscriptions, err2 = fsub.NewOnchainSubscriptions(chain.Client(), *cfg.OnchainSubscriptions, orm, lggr) + if err2 != nil { + return nil, err2 + } + } + allowedHeartbeatInitiators := make(map[string]struct{}) + for _, initiator := range cfg.AllowedHeartbeatInitiators { + allowedHeartbeatInitiators[strings.ToLower(initiator)] = struct{}{} + } + pendingRequestsCache := hc.NewRequestCache[PendingRequest](time.Millisecond*time.Duration(cfg.RequestTimeoutMillis), cfg.MaxPendingRequests) + return NewFunctionsHandler(cfg, donConfig, don, pendingRequestsCache, allowlist, subscriptions, cfg.MinimumSubscriptionBalance, userRateLimiter, nodeRateLimiter, allowedHeartbeatInitiators, lggr), nil +} + +func NewFunctionsHandler( + cfg FunctionsHandlerConfig, + donConfig *config.DONConfig, + don handlers.DON, + pendingRequestsCache hc.RequestCache[PendingRequest], + allowlist fallow.OnchainAllowlist, + subscriptions fsub.OnchainSubscriptions, + minimumBalance *assets.Link, + userRateLimiter *hc.RateLimiter, + nodeRateLimiter *hc.RateLimiter, + allowedHeartbeatInitiators map[string]struct{}, + lggr logger.Logger) handlers.Handler { + return &functionsHandler{ + handlerConfig: cfg, + donConfig: donConfig, + don: don, + pendingRequests: pendingRequestsCache, + allowlist: allowlist, + subscriptions: subscriptions, + minimumBalance: minimumBalance, + userRateLimiter: userRateLimiter, + nodeRateLimiter: nodeRateLimiter, + allowedHeartbeatInitiators: allowedHeartbeatInitiators, + chStop: make(services.StopChan), + lggr: lggr, + } +} + +func (h *functionsHandler) HandleUserMessage(ctx context.Context, msg *api.Message, callbackCh chan<- handlers.UserCallbackPayload) error { + sender := common.HexToAddress(msg.Body.Sender) + if h.allowlist != nil && !h.allowlist.Allow(sender) { + h.lggr.Debugw("received a message from a non-allowlisted address", "sender", msg.Body.Sender) + promHandlerError.WithLabelValues(h.donConfig.DonId, ErrNotAllowlisted.Error()).Inc() + return ErrNotAllowlisted + } + if h.userRateLimiter != nil && !h.userRateLimiter.Allow(msg.Body.Sender) { + h.lggr.Debugw("rate-limited", "sender", msg.Body.Sender) + promHandlerError.WithLabelValues(h.donConfig.DonId, ErrRateLimited.Error()).Inc() + return ErrRateLimited + } + if msg.Body.Method == MethodSecretsSet && h.subscriptions != nil && h.minimumBalance != nil { + balance, err := h.subscriptions.GetMaxUserBalance(sender) + if err != nil { + h.lggr.Debugw("error getting max user balance", "sender", msg.Body.Sender, "err", err) + } + if balance == nil { + balance = big.NewInt(0) + } + if err != nil || balance.Cmp(h.minimumBalance.ToInt()) < 0 { + h.lggr.Debugw("received a message from a user having insufficient balance", "sender", msg.Body.Sender, "balance", balance.String()) + return fmt.Errorf("sender has insufficient balance: %v juels", balance.String()) + } + } + switch msg.Body.Method { + case MethodSecretsSet, MethodSecretsList: + return h.handleRequest(ctx, msg, callbackCh) + case MethodHeartbeat: + if _, ok := h.allowedHeartbeatInitiators[msg.Body.Sender]; !ok { + h.lggr.Debugw("received heartbeat request from a non-allowed sender", "sender", msg.Body.Sender) + promHandlerError.WithLabelValues(h.donConfig.DonId, ErrNotAllowlisted.Error()).Inc() + return ErrUnsupportedMethod + } + return h.handleRequest(ctx, msg, callbackCh) + default: + h.lggr.Debugw("unsupported method", "method", msg.Body.Method) + promHandlerError.WithLabelValues(h.donConfig.DonId, ErrUnsupportedMethod.Error()).Inc() + return ErrUnsupportedMethod + } +} + +func (h *functionsHandler) handleRequest(ctx context.Context, msg *api.Message, callbackCh chan<- handlers.UserCallbackPayload) error { + h.lggr.Debugw("handleRequest: processing message", "sender", msg.Body.Sender, "messageId", msg.Body.MessageId) + err := h.pendingRequests.NewRequest(msg, callbackCh, &PendingRequest{request: msg, responses: make(map[string]*api.Message)}) + if err != nil { + h.lggr.Warnw("handleRequest: error adding new request", "sender", msg.Body.Sender, "err", err) + promHandlerError.WithLabelValues(h.donConfig.DonId, err.Error()).Inc() + return err + } + // Send to all nodes. + for _, member := range h.donConfig.Members { + err := h.don.SendToNode(ctx, member.Address, msg) + if err != nil { + h.lggr.Debugw("handleRequest: failed to send to a node", "node", member.Address, "err", err) + } + } + return nil +} + +func (h *functionsHandler) HandleNodeMessage(ctx context.Context, msg *api.Message, nodeAddr string) error { + h.lggr.Debugw("HandleNodeMessage: processing message", "nodeAddr", nodeAddr, "receiver", msg.Body.Receiver, "id", msg.Body.MessageId) + if h.nodeRateLimiter != nil && !h.nodeRateLimiter.Allow(nodeAddr) { + h.lggr.Debugw("rate-limited", "sender", nodeAddr) + return errors.New("rate-limited") + } + switch msg.Body.Method { + case MethodSecretsSet, MethodSecretsList: + return h.pendingRequests.ProcessResponse(msg, h.processSecretsResponse) + case MethodHeartbeat: + return h.pendingRequests.ProcessResponse(msg, h.processHeartbeatResponse) + default: + h.lggr.Debugw("unsupported method", "method", msg.Body.Method) + return ErrUnsupportedMethod + } +} + +// Conforms to ResponseProcessor[*PendingRequest] +func (h *functionsHandler) processSecretsResponse(response *api.Message, responseData *PendingRequest) (*handlers.UserCallbackPayload, *PendingRequest, error) { + if _, exists := responseData.responses[response.Body.Sender]; exists { + return nil, nil, errors.New("duplicate response") + } + if response.Body.Method != responseData.request.Body.Method { + return nil, responseData, errors.New("invalid method") + } + responseData.responses[response.Body.Sender] = response + var responsePayload ResponseBase + err := json.Unmarshal(response.Body.Payload, &responsePayload) + if err != nil { + responseData.errors = append(responseData.errors, response) + return nil, responseData, err + } + // user response is ready with either F+1 successes or N-F failures + if responsePayload.Success { + responseData.successful = append(responseData.successful, response) + if len(responseData.successful) >= h.donConfig.F+1 { + // return success to the user + callbackPayload, err := newSecretsResponse(responseData.request, true, responseData.successful) + return callbackPayload, responseData, err + } + } else { + responseData.errors = append(responseData.errors, response) + if len(responseData.errors) >= len(h.donConfig.Members)-h.donConfig.F { + // return error to the user + callbackPayload, err := newSecretsResponse(responseData.request, false, responseData.errors) + return callbackPayload, responseData, err + } + } + // not ready to be processed yet + return nil, responseData, nil +} + +func newSecretsResponse(request *api.Message, success bool, responses []*api.Message) (*handlers.UserCallbackPayload, error) { + payload := CombinedResponse{ResponseBase: ResponseBase{Success: success}, NodeResponses: responses} + payloadJson, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + if request.Body.Method == MethodSecretsSet { + if success { + promSecretsSetSuccess.WithLabelValues(request.Body.DonId).Inc() + } else { + promSecretsSetFailure.WithLabelValues(request.Body.DonId).Inc() + } + } else if request.Body.Method == MethodSecretsList { + if success { + promSecretsListSuccess.WithLabelValues(request.Body.DonId).Inc() + } else { + promSecretsListFailure.WithLabelValues(request.Body.DonId).Inc() + } + } + + userResponse := *request + userResponse.Body.Receiver = request.Body.Sender + userResponse.Body.Payload = payloadJson + return &handlers.UserCallbackPayload{Msg: &userResponse, ErrCode: api.NoError, ErrMsg: ""}, nil +} + +// Conforms to ResponseProcessor[*PendingRequest] +func (h *functionsHandler) processHeartbeatResponse(response *api.Message, responseData *PendingRequest) (*handlers.UserCallbackPayload, *PendingRequest, error) { + if _, exists := responseData.responses[response.Body.Sender]; exists { + return nil, nil, errors.New("duplicate response") + } + if response.Body.Method != responseData.request.Body.Method { + return nil, responseData, errors.New("invalid method") + } + responseData.responses[response.Body.Sender] = response + + // user response is ready with F+1 node responses + if len(responseData.responses) >= h.donConfig.F+1 { + var responseList []*api.Message + for _, response := range responseData.responses { + responseList = append(responseList, response) + } + userResponse := *responseData.request + userResponse.Body.Receiver = responseData.request.Body.Sender + // success = true only means that we got F+1 responses + // it's up to the heartbeat sender to validate computation results + payload := CombinedResponse{ResponseBase: ResponseBase{Success: true}, NodeResponses: responseList} + payloadJson, err := json.Marshal(payload) + if err != nil { + return &handlers.UserCallbackPayload{Msg: &userResponse, ErrCode: api.NodeReponseEncodingError, ErrMsg: ""}, nil, nil + } + userResponse.Body.Payload = payloadJson + return &handlers.UserCallbackPayload{Msg: &userResponse, ErrCode: api.NoError, ErrMsg: ""}, nil, nil + } + // not ready to be processed yet + return nil, responseData, nil +} + +func (h *functionsHandler) Start(ctx context.Context) error { + return h.StartOnce("FunctionsHandler", func() error { + h.lggr.Info("starting FunctionsHandler") + if h.allowlist != nil { + if err := h.allowlist.Start(ctx); err != nil { + return err + } + } + if h.subscriptions != nil { + if err := h.subscriptions.Start(ctx); err != nil { + return err + } + } + return nil + }) +} + +func (h *functionsHandler) Close() error { + return h.StopOnce("FunctionsHandler", func() (err error) { + close(h.chStop) + if h.allowlist != nil { + err = multierr.Combine(err, h.allowlist.Close()) + } + if h.subscriptions != nil { + err = multierr.Combine(err, h.subscriptions.Close()) + } + return + }) +} diff --git a/core/services/gateway/handlers/functions/handler.functions_test.go b/core/services/gateway/handlers/functions/handler.functions_test.go new file mode 100644 index 00000000..5a2c5a97 --- /dev/null +++ b/core/services/gateway/handlers/functions/handler.functions_test.go @@ -0,0 +1,226 @@ +package functions_test + +import ( + "crypto/ecdsa" + "encoding/json" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + gc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" + allowlist_mocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist/mocks" + subscriptions_mocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions/mocks" + handlers_mocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/mocks" +) + +func newFunctionsHandlerForATestDON(t *testing.T, nodes []gc.TestNode, requestTimeout time.Duration, heartbeatSender string) (handlers.Handler, *handlers_mocks.DON, *allowlist_mocks.OnchainAllowlist, *subscriptions_mocks.OnchainSubscriptions) { + cfg := functions.FunctionsHandlerConfig{} + donConfig := &config.DONConfig{ + Members: []config.NodeConfig{}, + F: 1, + } + + for id, n := range nodes { + donConfig.Members = append(donConfig.Members, config.NodeConfig{ + Name: fmt.Sprintf("node_%d", id), + Address: n.Address, + }) + } + + don := handlers_mocks.NewDON(t) + allowlist := allowlist_mocks.NewOnchainAllowlist(t) + subscriptions := subscriptions_mocks.NewOnchainSubscriptions(t) + minBalance := assets.NewLinkFromJuels(100) + userRateLimiter, err := hc.NewRateLimiter(hc.RateLimiterConfig{GlobalRPS: 100.0, GlobalBurst: 100, PerSenderRPS: 100.0, PerSenderBurst: 100}) + require.NoError(t, err) + nodeRateLimiter, err := hc.NewRateLimiter(hc.RateLimiterConfig{GlobalRPS: 100.0, GlobalBurst: 100, PerSenderRPS: 100.0, PerSenderBurst: 100}) + require.NoError(t, err) + pendingRequestsCache := hc.NewRequestCache[functions.PendingRequest](requestTimeout, 1000) + allowedHeartbeatInititors := map[string]struct{}{heartbeatSender: {}} + handler := functions.NewFunctionsHandler(cfg, donConfig, don, pendingRequestsCache, allowlist, subscriptions, minBalance, userRateLimiter, nodeRateLimiter, allowedHeartbeatInititors, logger.TestLogger(t)) + return handler, don, allowlist, subscriptions +} + +func newSignedMessage(t *testing.T, id string, method string, donId string, privateKey *ecdsa.PrivateKey) api.Message { + msg := api.Message{ + Body: api.MessageBody{ + MessageId: id, + Method: method, + DonId: donId, + }, + } + require.NoError(t, msg.Sign(privateKey)) + return msg +} + +func sendNodeReponses(t *testing.T, handler handlers.Handler, userRequestMsg api.Message, nodes []gc.TestNode, responses []bool) { + for id, resp := range responses { + nodeResponseMsg := userRequestMsg + nodeResponseMsg.Body.Receiver = userRequestMsg.Body.Sender + if resp { + nodeResponseMsg.Body.Payload = []byte(`{"success":true}`) + } else { + nodeResponseMsg.Body.Payload = []byte(`{"success":false}`) + } + require.NoError(t, nodeResponseMsg.Sign(nodes[id].PrivateKey)) + _ = handler.HandleNodeMessage(testutils.Context(t), &nodeResponseMsg, nodes[id].Address) + } +} + +func TestFunctionsHandler_Minimal(t *testing.T) { + t.Parallel() + + handler, err := functions.NewFunctionsHandlerFromConfig(json.RawMessage("{}"), &config.DONConfig{}, nil, nil, nil, nil, logger.TestLogger(t)) + require.NoError(t, err) + + // empty message should always error out + msg := &api.Message{} + err = handler.HandleUserMessage(testutils.Context(t), msg, nil) + require.Error(t, err) +} + +func TestFunctionsHandler_CleanStartAndClose(t *testing.T) { + t.Parallel() + + handler, err := functions.NewFunctionsHandlerFromConfig(json.RawMessage("{}"), &config.DONConfig{}, nil, nil, nil, nil, logger.TestLogger(t)) + require.NoError(t, err) + + servicetest.Run(t, handler) +} + +func TestFunctionsHandler_HandleUserMessage_SecretsSet(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + nodeResults []bool + expectedGatewayResult bool + expectedNodeMessageCount int + }{ + {"three successful", []bool{true, true, true, false}, true, 2}, + {"two successful", []bool{false, true, false, true}, true, 2}, + {"one successful", []bool{false, true, false, false}, false, 3}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodes, user := gc.NewTestNodes(t, 4), gc.NewTestNodes(t, 1)[0] + handler, don, allowlist, subscriptions := newFunctionsHandlerForATestDON(t, nodes, time.Hour*24, user.Address) + userRequestMsg := newSignedMessage(t, "1234", "secrets_set", "don_id", user.PrivateKey) + + callbachCh := make(chan handlers.UserCallbackPayload) + done := make(chan struct{}) + go func() { + defer close(done) + // wait on a response from Gateway to the user + response := <-callbachCh + require.Equal(t, api.NoError, response.ErrCode) + require.Equal(t, userRequestMsg.Body.MessageId, response.Msg.Body.MessageId) + var payload functions.CombinedResponse + require.NoError(t, json.Unmarshal(response.Msg.Body.Payload, &payload)) + require.Equal(t, test.expectedGatewayResult, payload.Success) + require.Equal(t, test.expectedNodeMessageCount, len(payload.NodeResponses)) + }() + + allowlist.On("Allow", common.HexToAddress(user.Address)).Return(true, nil) + subscriptions.On("GetMaxUserBalance", common.HexToAddress(user.Address)).Return(big.NewInt(1000), nil) + don.On("SendToNode", mock.Anything, mock.Anything, mock.Anything).Return(nil) + require.NoError(t, handler.HandleUserMessage(testutils.Context(t), &userRequestMsg, callbachCh)) + sendNodeReponses(t, handler, userRequestMsg, nodes, test.nodeResults) + <-done + }) + } +} + +func TestFunctionsHandler_HandleUserMessage_Heartbeat(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + nodeResults []bool + expectedGatewayResult bool + expectedNodeMessageCount int + }{ + {"three successful", []bool{true, true, true, false}, true, 2}, + {"two successful", []bool{false, true, false, true}, true, 2}, + {"one successful", []bool{false, true, false, false}, true, 2}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodes, user := gc.NewTestNodes(t, 4), gc.NewTestNodes(t, 1)[0] + handler, don, allowlist, _ := newFunctionsHandlerForATestDON(t, nodes, time.Hour*24, user.Address) + userRequestMsg := newSignedMessage(t, "1234", "heartbeat", "don_id", user.PrivateKey) + + callbachCh := make(chan handlers.UserCallbackPayload) + done := make(chan struct{}) + go func() { + defer close(done) + // wait on a response from Gateway to the user + response := <-callbachCh + require.Equal(t, api.NoError, response.ErrCode) + require.Equal(t, userRequestMsg.Body.MessageId, response.Msg.Body.MessageId) + var payload functions.CombinedResponse + require.NoError(t, json.Unmarshal(response.Msg.Body.Payload, &payload)) + require.Equal(t, test.expectedGatewayResult, payload.Success) + require.Equal(t, test.expectedNodeMessageCount, len(payload.NodeResponses)) + }() + + allowlist.On("Allow", common.HexToAddress(user.Address)).Return(true, nil) + don.On("SendToNode", mock.Anything, mock.Anything, mock.Anything).Return(nil) + require.NoError(t, handler.HandleUserMessage(testutils.Context(t), &userRequestMsg, callbachCh)) + sendNodeReponses(t, handler, userRequestMsg, nodes, test.nodeResults) + <-done + }) + } +} + +func TestFunctionsHandler_HandleUserMessage_InvalidMethod(t *testing.T) { + t.Parallel() + + nodes, user := gc.NewTestNodes(t, 4), gc.NewTestNodes(t, 1)[0] + handler, _, allowlist, _ := newFunctionsHandlerForATestDON(t, nodes, time.Hour*24, user.Address) + userRequestMsg := newSignedMessage(t, "1234", "secrets_reveal_all_please", "don_id", user.PrivateKey) + + allowlist.On("Allow", common.HexToAddress(user.Address)).Return(true, nil) + err := handler.HandleUserMessage(testutils.Context(t), &userRequestMsg, make(chan handlers.UserCallbackPayload)) + require.Error(t, err) +} + +func TestFunctionsHandler_HandleUserMessage_Timeout(t *testing.T) { + t.Parallel() + + nodes, user := gc.NewTestNodes(t, 4), gc.NewTestNodes(t, 1)[0] + handler, don, allowlist, subscriptions := newFunctionsHandlerForATestDON(t, nodes, time.Millisecond*10, user.Address) + userRequestMsg := newSignedMessage(t, "1234", "secrets_set", "don_id", user.PrivateKey) + + callbachCh := make(chan handlers.UserCallbackPayload) + done := make(chan struct{}) + go func() { + defer close(done) + // wait on a response from Gateway to the user + response := <-callbachCh + require.Equal(t, api.RequestTimeoutError, response.ErrCode) + require.Equal(t, userRequestMsg.Body.MessageId, response.Msg.Body.MessageId) + }() + + allowlist.On("Allow", common.HexToAddress(user.Address)).Return(true, nil) + subscriptions.On("GetMaxUserBalance", common.HexToAddress(user.Address)).Return(big.NewInt(1000), nil) + don.On("SendToNode", mock.Anything, mock.Anything, mock.Anything).Return(nil) + require.NoError(t, handler.HandleUserMessage(testutils.Context(t), &userRequestMsg, callbachCh)) + <-done +} diff --git a/core/services/gateway/handlers/functions/subscriptions/mocks/onchain_subscriptions.go b/core/services/gateway/handlers/functions/subscriptions/mocks/onchain_subscriptions.go new file mode 100644 index 00000000..5f2054c4 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/mocks/onchain_subscriptions.go @@ -0,0 +1,97 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// OnchainSubscriptions is an autogenerated mock type for the OnchainSubscriptions type +type OnchainSubscriptions struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *OnchainSubscriptions) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetMaxUserBalance provides a mock function with given fields: _a0 +func (_m *OnchainSubscriptions) GetMaxUserBalance(_a0 common.Address) (*big.Int, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetMaxUserBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(common.Address) (*big.Int, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(common.Address) *big.Int); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: _a0 +func (_m *OnchainSubscriptions) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewOnchainSubscriptions creates a new instance of OnchainSubscriptions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOnchainSubscriptions(t interface { + mock.TestingT + Cleanup(func()) +}) *OnchainSubscriptions { + mock := &OnchainSubscriptions{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/handlers/functions/subscriptions/mocks/orm.go b/core/services/gateway/handlers/functions/subscriptions/mocks/orm.go new file mode 100644 index 00000000..d62dddb5 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/mocks/orm.go @@ -0,0 +1,90 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + subscriptions "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + mock "github.com/stretchr/testify/mock" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// GetSubscriptions provides a mock function with given fields: offset, limit, qopts +func (_m *ORM) GetSubscriptions(offset uint, limit uint, qopts ...pg.QOpt) ([]subscriptions.StoredSubscription, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, offset, limit) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetSubscriptions") + } + + var r0 []subscriptions.StoredSubscription + var r1 error + if rf, ok := ret.Get(0).(func(uint, uint, ...pg.QOpt) ([]subscriptions.StoredSubscription, error)); ok { + return rf(offset, limit, qopts...) + } + if rf, ok := ret.Get(0).(func(uint, uint, ...pg.QOpt) []subscriptions.StoredSubscription); ok { + r0 = rf(offset, limit, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]subscriptions.StoredSubscription) + } + } + + if rf, ok := ret.Get(1).(func(uint, uint, ...pg.QOpt) error); ok { + r1 = rf(offset, limit, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpsertSubscription provides a mock function with given fields: subscription, qopts +func (_m *ORM) UpsertSubscription(subscription subscriptions.StoredSubscription, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, subscription) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for UpsertSubscription") + } + + var r0 error + if rf, ok := ret.Get(0).(func(subscriptions.StoredSubscription, ...pg.QOpt) error); ok { + r0 = rf(subscription, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/handlers/functions/subscriptions/orm.go b/core/services/gateway/handlers/functions/subscriptions/orm.go new file mode 100644 index 00000000..80ca1db3 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/orm.go @@ -0,0 +1,143 @@ +package subscriptions + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore +type ORM interface { + GetSubscriptions(offset, limit uint, qopts ...pg.QOpt) ([]StoredSubscription, error) + UpsertSubscription(subscription StoredSubscription, qopts ...pg.QOpt) error +} + +type orm struct { + q pg.Q + lggr logger.Logger + routerContractAddress common.Address +} + +var _ ORM = (*orm)(nil) +var ( + ErrInvalidParameters = errors.New("invalid parameters provided to create a subscription contract ORM") +) + +const ( + tableName = "functions_subscriptions" +) + +type storedSubscriptionRow struct { + SubscriptionID uint64 + Owner common.Address + Balance int64 + BlockedBalance int64 + ProposedOwner common.Address + Consumers pq.ByteaArray + Flags []uint8 + RouterContractAddress common.Address +} + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, routerContractAddress common.Address) (ORM, error) { + if db == nil || cfg == nil || lggr == nil || routerContractAddress == (common.Address{}) { + return nil, ErrInvalidParameters + } + + return &orm{ + q: pg.NewQ(db, lggr, cfg), + lggr: lggr, + routerContractAddress: routerContractAddress, + }, nil +} + +func (o *orm) GetSubscriptions(offset, limit uint, qopts ...pg.QOpt) ([]StoredSubscription, error) { + var storedSubscriptions []StoredSubscription + var storedSubscriptionRows []storedSubscriptionRow + stmt := fmt.Sprintf(` + SELECT subscription_id, owner, balance, blocked_balance, proposed_owner, consumers, flags, router_contract_address + FROM %s + WHERE router_contract_address = $1 + ORDER BY subscription_id ASC + OFFSET $2 + LIMIT $3; + `, tableName) + err := o.q.WithOpts(qopts...).Select(&storedSubscriptionRows, stmt, o.routerContractAddress, offset, limit) + if err != nil { + return storedSubscriptions, err + } + + for _, cs := range storedSubscriptionRows { + storedSubscriptions = append(storedSubscriptions, cs.encode()) + } + + return storedSubscriptions, nil +} + +// UpsertSubscription will update if a subscription exists or create if it does not. +// In case a subscription gets deleted we will update it with an owner address equal to 0x0. +func (o *orm) UpsertSubscription(subscription StoredSubscription, qopts ...pg.QOpt) error { + stmt := fmt.Sprintf(` + INSERT INTO %s (subscription_id, owner, balance, blocked_balance, proposed_owner, consumers, flags, router_contract_address) + VALUES ($1,$2,$3,$4,$5,$6,$7,$8) ON CONFLICT (subscription_id, router_contract_address) DO UPDATE + SET owner=$2, balance=$3, blocked_balance=$4, proposed_owner=$5, consumers=$6, flags=$7, router_contract_address=$8;`, tableName) + + if subscription.Balance == nil { + subscription.Balance = big.NewInt(0) + } + + if subscription.BlockedBalance == nil { + subscription.BlockedBalance = big.NewInt(0) + } + + var consumers [][]byte + for _, c := range subscription.Consumers { + consumers = append(consumers, c.Bytes()) + } + + _, err := o.q.WithOpts(qopts...).Exec( + stmt, + subscription.SubscriptionID, + subscription.Owner, + subscription.Balance.Int64(), + subscription.BlockedBalance.Int64(), + subscription.ProposedOwner, + consumers, + subscription.Flags[:], + o.routerContractAddress, + ) + if err != nil { + return err + } + + o.lggr.Debugf("Successfully updated subscription: %d for routerContractAddress: %s", subscription.SubscriptionID, o.routerContractAddress) + + return nil +} + +func (cs *storedSubscriptionRow) encode() StoredSubscription { + consumers := make([]common.Address, 0) + for _, csc := range cs.Consumers { + consumers = append(consumers, common.BytesToAddress(csc)) + } + + return StoredSubscription{ + SubscriptionID: cs.SubscriptionID, + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(cs.Balance), + Owner: cs.Owner, + BlockedBalance: big.NewInt(cs.BlockedBalance), + ProposedOwner: cs.ProposedOwner, + Consumers: consumers, + Flags: [32]byte(cs.Flags), + }, + } +} diff --git a/core/services/gateway/handlers/functions/subscriptions/orm_test.go b/core/services/gateway/handlers/functions/subscriptions/orm_test.go new file mode 100644 index 00000000..2c7e4cc1 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/orm_test.go @@ -0,0 +1,245 @@ +package subscriptions_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" +) + +var ( + defaultFlags = [32]byte{0x1, 0x2, 0x3} +) + +func setupORM(t *testing.T) (subscriptions.ORM, error) { + t.Helper() + + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.TestLogger(t) + ) + + return subscriptions.NewORM(db, lggr, pgtest.NewQConfig(true), testutils.NewAddress()) +} + +func seedSubscriptions(t *testing.T, orm subscriptions.ORM, amount int) []subscriptions.StoredSubscription { + storedSubscriptions := make([]subscriptions.StoredSubscription, 0) + for i := amount; i > 0; i-- { + cs := subscriptions.StoredSubscription{ + SubscriptionID: uint64(i), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(10), + Owner: testutils.NewAddress(), + BlockedBalance: big.NewInt(20), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: defaultFlags, + }, + } + storedSubscriptions = append(storedSubscriptions, cs) + err := orm.UpsertSubscription(cs) + require.NoError(t, err) + } + return storedSubscriptions +} + +func TestORM_GetSubscriptions(t *testing.T) { + t.Parallel() + t.Run("fetch first page", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + storedSubscriptions := seedSubscriptions(t, orm, 2) + results, err := orm.GetSubscriptions(0, 1) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, storedSubscriptions[1], results[0]) + }) + + t.Run("fetch second page", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + storedSubscriptions := seedSubscriptions(t, orm, 2) + results, err := orm.GetSubscriptions(1, 5) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, storedSubscriptions[0], results[0]) + }) +} + +func TestORM_UpsertSubscription(t *testing.T) { + t.Parallel() + + t.Run("create a subscription", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + expected := subscriptions.StoredSubscription{ + SubscriptionID: uint64(1), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(10), + Owner: testutils.NewAddress(), + BlockedBalance: big.NewInt(20), + ProposedOwner: common.Address{}, + Consumers: []common.Address{testutils.NewAddress()}, + Flags: defaultFlags, + }, + } + err = orm.UpsertSubscription(expected) + require.NoError(t, err) + + results, err := orm.GetSubscriptions(0, 1) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, expected, results[0]) + }) + + t.Run("update a subscription", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + + expectedUpdated := subscriptions.StoredSubscription{ + SubscriptionID: uint64(1), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(10), + Owner: testutils.NewAddress(), + BlockedBalance: big.NewInt(20), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: defaultFlags, + }, + } + err = orm.UpsertSubscription(expectedUpdated) + require.NoError(t, err) + + expectedNotUpdated := subscriptions.StoredSubscription{ + SubscriptionID: uint64(2), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(10), + Owner: testutils.NewAddress(), + BlockedBalance: big.NewInt(20), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: defaultFlags, + }, + } + err = orm.UpsertSubscription(expectedNotUpdated) + require.NoError(t, err) + + // update the balance value + expectedUpdated.Balance = big.NewInt(20) + err = orm.UpsertSubscription(expectedUpdated) + require.NoError(t, err) + + results, err := orm.GetSubscriptions(0, 5) + require.NoError(t, err) + require.Equal(t, 2, len(results), "incorrect results length") + require.Equal(t, expectedNotUpdated, results[1]) + require.Equal(t, expectedUpdated, results[0]) + }) + + t.Run("update a deleted subscription", func(t *testing.T) { + orm, err := setupORM(t) + require.NoError(t, err) + + subscription := subscriptions.StoredSubscription{ + SubscriptionID: uint64(1), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(10), + Owner: testutils.NewAddress(), + BlockedBalance: big.NewInt(20), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: defaultFlags, + }, + } + err = orm.UpsertSubscription(subscription) + require.NoError(t, err) + + // empty subscription + subscription.IFunctionsSubscriptionsSubscription = functions_router.IFunctionsSubscriptionsSubscription{ + Balance: big.NewInt(0), + Owner: common.Address{}, + BlockedBalance: big.NewInt(0), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: [32]byte{}, + } + + err = orm.UpsertSubscription(subscription) + require.NoError(t, err) + + results, err := orm.GetSubscriptions(0, 5) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + require.Equal(t, subscription, results[0]) + }) + + t.Run("create a subscription with same id but different router address", func(t *testing.T) { + var ( + db = pgtest.NewSqlxDB(t) + lggr = logger.TestLogger(t) + ) + + orm1, err := subscriptions.NewORM(db, lggr, pgtest.NewQConfig(true), testutils.NewAddress()) + require.NoError(t, err) + orm2, err := subscriptions.NewORM(db, lggr, pgtest.NewQConfig(true), testutils.NewAddress()) + require.NoError(t, err) + + subscription := subscriptions.StoredSubscription{ + SubscriptionID: uint64(1), + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: assets.Ether(10).ToInt(), + Owner: testutils.NewAddress(), + BlockedBalance: assets.Ether(20).ToInt(), + ProposedOwner: common.Address{}, + Consumers: []common.Address{}, + Flags: defaultFlags, + }, + } + + err = orm1.UpsertSubscription(subscription) + require.NoError(t, err) + + // should update the existing subscription + subscription.Balance = assets.Ether(12).ToInt() + err = orm1.UpsertSubscription(subscription) + require.NoError(t, err) + + results, err := orm1.GetSubscriptions(0, 10) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + + // should create a new subscription because it comes from a different router contract + err = orm2.UpsertSubscription(subscription) + require.NoError(t, err) + + results, err = orm1.GetSubscriptions(0, 10) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + + results, err = orm2.GetSubscriptions(0, 10) + require.NoError(t, err) + require.Equal(t, 1, len(results), "incorrect results length") + }) +} +func Test_NewORM(t *testing.T) { + t.Run("OK-create_ORM", func(t *testing.T) { + _, err := subscriptions.NewORM(pgtest.NewSqlxDB(t), logger.TestLogger(t), pgtest.NewQConfig(true), testutils.NewAddress()) + require.NoError(t, err) + }) + t.Run("NOK-create_ORM_with_nil_fields", func(t *testing.T) { + _, err := subscriptions.NewORM(nil, nil, nil, common.Address{}) + require.Error(t, err) + }) + t.Run("NOK-create_ORM_with_empty_address", func(t *testing.T) { + _, err := subscriptions.NewORM(pgtest.NewSqlxDB(t), logger.TestLogger(t), pgtest.NewQConfig(true), common.Address{}) + require.Error(t, err) + }) +} diff --git a/core/services/gateway/handlers/functions/subscriptions/subscriptions.go b/core/services/gateway/handlers/functions/subscriptions/subscriptions.go new file mode 100644 index 00000000..6655a613 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/subscriptions.go @@ -0,0 +1,254 @@ +package subscriptions + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const defaultStoreBatchSize = 100 + +type OnchainSubscriptionsConfig struct { + ContractAddress common.Address `json:"contractAddress"` + BlockConfirmations uint `json:"blockConfirmations"` + UpdateFrequencySec uint `json:"updateFrequencySec"` + UpdateTimeoutSec uint `json:"updateTimeoutSec"` + UpdateRangeSize uint `json:"updateRangeSize"` + StoreBatchSize uint `json:"storeBatchSize"` +} + +// OnchainSubscriptions maintains a mirror of all subscriptions fetched from the blockchain (EVM-only). +// All methods are thread-safe. +// +//go:generate mockery --quiet --name OnchainSubscriptions --output ./mocks/ --case=underscore +type OnchainSubscriptions interface { + job.ServiceCtx + + // GetMaxUserBalance returns a maximum subscription balance (juels), or error if user has no subscriptions. + GetMaxUserBalance(common.Address) (*big.Int, error) +} + +type onchainSubscriptions struct { + services.StateMachine + + config OnchainSubscriptionsConfig + subscriptions UserSubscriptions + orm ORM + client evmclient.Client + router *functions_router.FunctionsRouter + blockConfirmations *big.Int + lggr logger.Logger + closeWait sync.WaitGroup + rwMutex sync.RWMutex + stopCh services.StopChan +} + +func NewOnchainSubscriptions(client evmclient.Client, config OnchainSubscriptionsConfig, orm ORM, lggr logger.Logger) (OnchainSubscriptions, error) { + if client == nil { + return nil, errors.New("client is nil") + } + if lggr == nil { + return nil, errors.New("logger is nil") + } + router, err := functions_router.NewFunctionsRouter(config.ContractAddress, client) + if err != nil { + return nil, fmt.Errorf("unexpected error during functions_router.NewFunctionsRouter: %s", err) + } + + // if StoreBatchSize is not specified use the default value + if config.StoreBatchSize == 0 { + lggr.Info("StoreBatchSize not specified, using default size: ", defaultStoreBatchSize) + config.StoreBatchSize = defaultStoreBatchSize + } + + return &onchainSubscriptions{ + config: config, + subscriptions: NewUserSubscriptions(), + orm: orm, + client: client, + router: router, + blockConfirmations: big.NewInt(int64(config.BlockConfirmations)), + lggr: lggr.Named("OnchainSubscriptions"), + stopCh: make(services.StopChan), + }, nil +} + +func (s *onchainSubscriptions) Start(ctx context.Context) error { + return s.StartOnce("OnchainSubscriptions", func() error { + s.lggr.Info("starting onchain subscriptions") + if s.config.UpdateFrequencySec == 0 { + return errors.New("OnchainSubscriptionsConfig.UpdateFrequencySec must be greater than 0") + } + if s.config.UpdateTimeoutSec == 0 { + return errors.New("OnchainSubscriptionsConfig.UpdateTimeoutSec must be greater than 0") + } + if s.config.UpdateRangeSize == 0 { + return errors.New("OnchainSubscriptionsConfig.UpdateRangeSize must be greater than 0") + } + + s.loadStoredSubscriptions() + + s.closeWait.Add(1) + go s.queryLoop() + + return nil + }) +} + +func (s *onchainSubscriptions) Close() error { + return s.StopOnce("OnchainSubscriptions", func() (err error) { + s.lggr.Info("closing onchain subscriptions") + close(s.stopCh) + s.closeWait.Wait() + return nil + }) +} + +func (s *onchainSubscriptions) GetMaxUserBalance(user common.Address) (*big.Int, error) { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + return s.subscriptions.GetMaxUserBalance(user) +} + +func (s *onchainSubscriptions) queryLoop() { + defer s.closeWait.Done() + + ticker := time.NewTicker(time.Duration(s.config.UpdateFrequencySec) * time.Second) + defer ticker.Stop() + + start := uint64(1) + lastKnownCount := uint64(0) + + queryFunc := func() { + ctx, cancel := utils.ContextFromChanWithTimeout(s.stopCh, time.Duration(s.config.UpdateTimeoutSec)*time.Second) + defer cancel() + + latestBlockHeight, err := s.client.LatestBlockHeight(ctx) + if err != nil || latestBlockHeight == nil { + s.lggr.Errorw("Error calling LatestBlockHeight", "err", err, "latestBlockHeight", latestBlockHeight.Int64()) + return + } + + blockNumber := big.NewInt(0).Sub(latestBlockHeight, s.blockConfirmations) + + if lastKnownCount == 0 || start > lastKnownCount { + count, err := s.getSubscriptionsCount(ctx, blockNumber) + if err != nil { + s.lggr.Errorw("Error getting new subscriptions count", "err", err) + } else { + s.lggr.Infow("Updated subscriptions count", "count", count, "blockNumber", blockNumber.Int64()) + lastKnownCount = count + } + } + + if lastKnownCount == 0 { + s.lggr.Info("Router has no subscriptions yet") + return + } + + if start > lastKnownCount { + start = 1 + } + + end := start + uint64(s.config.UpdateRangeSize) - 1 + if end > lastKnownCount { + end = lastKnownCount + } + if err := s.querySubscriptionsRange(ctx, blockNumber, start, end); err != nil { + s.lggr.Errorw("Error querying subscriptions", "err", err, "start", start, "end", end) + return + } + + start = end + 1 + } + + queryFunc() + + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + queryFunc() + } + } +} + +func (s *onchainSubscriptions) querySubscriptionsRange(ctx context.Context, blockNumber *big.Int, start, end uint64) error { + s.lggr.Debugw("Querying subscriptions", "blockNumber", blockNumber, "start", start, "end", end) + + subscriptions, err := s.router.GetSubscriptionsInRange(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNumber, + Context: ctx, + }, start, end) + if err != nil { + return errors.Wrap(err, "unexpected error during functions_router.GetSubscriptionsInRange") + } + + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + for i, subscription := range subscriptions { + subscriptionId := start + uint64(i) + subscription := subscription + updated := s.subscriptions.UpdateSubscription(subscriptionId, &subscription) + if updated { + if err = s.orm.UpsertSubscription(StoredSubscription{ + SubscriptionID: subscriptionId, + IFunctionsSubscriptionsSubscription: subscription, + }); err != nil { + s.lggr.Errorf("unexpected error updating subscription in the db: %w", err) + } + } + } + + return nil +} + +func (s *onchainSubscriptions) getSubscriptionsCount(ctx context.Context, blockNumber *big.Int) (uint64, error) { + return s.router.GetSubscriptionCount(&bind.CallOpts{ + Pending: false, + BlockNumber: blockNumber, + Context: ctx, + }) +} + +func (s *onchainSubscriptions) loadStoredSubscriptions() { + offset := uint(0) + for { + csBatch, err := s.orm.GetSubscriptions(offset, s.config.StoreBatchSize) + if err != nil { + break + } + + for _, cs := range csBatch { + _ = s.subscriptions.UpdateSubscription(cs.SubscriptionID, &functions_router.IFunctionsSubscriptionsSubscription{ + Balance: cs.Balance, + Owner: cs.Owner, + BlockedBalance: cs.BlockedBalance, + ProposedOwner: cs.ProposedOwner, + Consumers: cs.Consumers, + Flags: cs.Flags, + }) + } + s.lggr.Debugw("Loading stored subscriptions", "offset", offset, "batch_length", len(csBatch)) + + if len(csBatch) != int(s.config.StoreBatchSize) { + break + } + offset += s.config.StoreBatchSize + } +} diff --git a/core/services/gateway/handlers/functions/subscriptions/subscriptions_test.go b/core/services/gateway/handlers/functions/subscriptions/subscriptions_test.go new file mode 100644 index 00000000..3d51103f --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/subscriptions_test.go @@ -0,0 +1,173 @@ +package subscriptions_test + +import ( + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + smocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions/mocks" +) + +const ( + validUser = "0x9ED925d8206a4f88a2f643b28B3035B315753Cd6" + invalidUser = "0x6E2dc0F9DB014aE19888F539E59285D2Ea04244C" + storedUser = "0x3E2dc0F9DB014aE19888F539E59285D2Ea04233G" +) + +func TestSubscriptions_OnePass(t *testing.T) { + getSubscriptionCount := hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000003") + getSubscriptionsInRange := hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000109e6e1b12098cc8f3a1e9719a817ec53ab9b35c000000000000000000000000000000000000000000000000000034e23f515cb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f5340f0968ee8b7dfd97e3327a6139273cc2c4fa000000000000000000000000000000000000000000000001158e460913d000000000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bc14b92364c75e20000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000005439e5881a529f3ccbffc0e82d49f9db3950aefe") + + ctx := testutils.Context(t) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // getSubscriptionCount + To: &common.Address{}, + Data: hexutil.MustDecode("0x66419970"), + }, mock.Anything).Return(getSubscriptionCount, nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange + To: &common.Address{}, + Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000003"), + }, mock.Anything).Return(getSubscriptionsInRange, nil) + config := subscriptions.OnchainSubscriptionsConfig{ + ContractAddress: common.Address{}, + BlockConfirmations: 1, + UpdateFrequencySec: 1, + UpdateTimeoutSec: 1, + UpdateRangeSize: 3, + } + orm := smocks.NewORM(t) + orm.On("GetSubscriptions", uint(0), uint(100)).Return([]subscriptions.StoredSubscription{}, nil) + orm.On("UpsertSubscription", mock.Anything).Return(nil) + subscriptions, err := subscriptions.NewOnchainSubscriptions(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = subscriptions.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, subscriptions.Close()) + }) + + // initially we have 3 subs and range is 3, which needs one pass + gomega.NewGomegaWithT(t).Eventually(func() bool { + expectedBalance := big.NewInt(0).SetBytes(hexutil.MustDecode("0x01158e460913d00000")) + balance, err1 := subscriptions.GetMaxUserBalance(common.HexToAddress(validUser)) + _, err2 := subscriptions.GetMaxUserBalance(common.HexToAddress(invalidUser)) + return err1 == nil && err2 != nil && balance.Cmp(expectedBalance) == 0 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func TestSubscriptions_MultiPass(t *testing.T) { + const ncycles int32 = 5 + var currentCycle atomic.Int32 + getSubscriptionCount := hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000006") + getSubscriptionsInRange := hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000109e6e1b12098cc8f3a1e9719a817ec53ab9b35c000000000000000000000000000000000000000000000000000034e23f515cb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f5340f0968ee8b7dfd97e3327a6139273cc2c4fa000000000000000000000000000000000000000000000001158e460913d000000000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bc14b92364c75e20000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000005439e5881a529f3ccbffc0e82d49f9db3950aefe") + + ctx := testutils.Context(t) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // getSubscriptionCount + To: &common.Address{}, + Data: hexutil.MustDecode("0x66419970"), + }, mock.Anything).Run(func(args mock.Arguments) { + currentCycle.Add(1) + }).Return(getSubscriptionCount, nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange(1,3) + To: &common.Address{}, + Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000003"), + }, mock.Anything).Return(getSubscriptionsInRange, nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange(4,6) + To: &common.Address{}, + Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006"), + }, mock.Anything).Return(getSubscriptionsInRange, nil) + config := subscriptions.OnchainSubscriptionsConfig{ + ContractAddress: common.Address{}, + BlockConfirmations: 1, + UpdateFrequencySec: 1, + UpdateTimeoutSec: 1, + UpdateRangeSize: 3, + } + orm := smocks.NewORM(t) + orm.On("GetSubscriptions", uint(0), uint(100)).Return([]subscriptions.StoredSubscription{}, nil) + orm.On("UpsertSubscription", mock.Anything).Return(nil) + subscriptions, err := subscriptions.NewOnchainSubscriptions(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = subscriptions.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, subscriptions.Close()) + }) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + return currentCycle.Load() == ncycles + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func TestSubscriptions_Stored(t *testing.T) { + getSubscriptionCount := hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000003") + getSubscriptionsInRange := hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000240000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000109e6e1b12098cc8f3a1e9719a817ec53ab9b35c000000000000000000000000000000000000000000000000000034e23f515cb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f5340f0968ee8b7dfd97e3327a6139273cc2c4fa000000000000000000000000000000000000000000000001158e460913d000000000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001bc14b92364c75e20000000000000000000000009ed925d8206a4f88a2f643b28b3035b315753cd60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000005439e5881a529f3ccbffc0e82d49f9db3950aefe") + + ctx := testutils.Context(t) + client := mocks.NewClient(t) + client.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(42), nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // getSubscriptionCount + To: &common.Address{}, + Data: hexutil.MustDecode("0x66419970"), + }, mock.Anything).Return(getSubscriptionCount, nil) + client.On("CallContract", mock.Anything, ethereum.CallMsg{ // GetSubscriptionsInRange + To: &common.Address{}, + Data: hexutil.MustDecode("0xec2454e500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000003"), + }, mock.Anything).Return(getSubscriptionsInRange, nil) + config := subscriptions.OnchainSubscriptionsConfig{ + ContractAddress: common.Address{}, + BlockConfirmations: 1, + UpdateFrequencySec: 1, + UpdateTimeoutSec: 1, + UpdateRangeSize: 3, + StoreBatchSize: 1, + } + + expectedBalance := big.NewInt(5) + orm := smocks.NewORM(t) + orm.On("GetSubscriptions", uint(0), uint(1)).Return([]subscriptions.StoredSubscription{ + { + SubscriptionID: 1, + IFunctionsSubscriptionsSubscription: functions_router.IFunctionsSubscriptionsSubscription{ + Balance: expectedBalance, + Owner: common.HexToAddress(storedUser), + BlockedBalance: big.NewInt(10), + }, + }, + }, nil) + orm.On("GetSubscriptions", uint(1), uint(1)).Return([]subscriptions.StoredSubscription{}, nil) + orm.On("UpsertSubscription", mock.Anything).Return(nil) + + subscriptions, err := subscriptions.NewOnchainSubscriptions(client, config, orm, logger.TestLogger(t)) + require.NoError(t, err) + + err = subscriptions.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + assert.NoError(t, subscriptions.Close()) + }) + + gomega.NewGomegaWithT(t).Eventually(func() bool { + actualBalance, err := subscriptions.GetMaxUserBalance(common.HexToAddress(storedUser)) + return err == nil && assert.Equal(t, expectedBalance, actualBalance) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} diff --git a/core/services/gateway/handlers/functions/subscriptions/user_subscriptions.go b/core/services/gateway/handlers/functions/subscriptions/user_subscriptions.go new file mode 100644 index 00000000..ed37f137 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/user_subscriptions.go @@ -0,0 +1,82 @@ +package subscriptions + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" +) + +// Methods are NOT thread-safe. + +var ErrUserHasNoSubscription = errors.New("user has no subscriptions") + +type UserSubscriptions interface { + UpdateSubscription(subscriptionId uint64, subscription *functions_router.IFunctionsSubscriptionsSubscription) bool + GetMaxUserBalance(user common.Address) (*big.Int, error) +} + +type userSubscriptions struct { + userSubscriptionsMap map[common.Address]map[uint64]*functions_router.IFunctionsSubscriptionsSubscription + subscriptionIdsMap map[uint64]common.Address +} + +func NewUserSubscriptions() UserSubscriptions { + return &userSubscriptions{ + userSubscriptionsMap: make(map[common.Address]map[uint64]*functions_router.IFunctionsSubscriptionsSubscription), + subscriptionIdsMap: make(map[uint64]common.Address), + } +} + +// StoredSubscription is used to populate the user subscription maps from a persistent layer like postgres. +type StoredSubscription struct { + SubscriptionID uint64 + functions_router.IFunctionsSubscriptionsSubscription +} + +// UpdateSubscription updates a subscription returning false in case there was no variation to the current state. +func (us *userSubscriptions) UpdateSubscription(subscriptionId uint64, subscription *functions_router.IFunctionsSubscriptionsSubscription) bool { + if subscription == nil || subscription.Owner == utils.ZeroAddress { + user, ok := us.subscriptionIdsMap[subscriptionId] + if !ok { + return false + } + + delete(us.userSubscriptionsMap[user], subscriptionId) + delete(us.subscriptionIdsMap, subscriptionId) + if len(us.userSubscriptionsMap[user]) == 0 { + delete(us.userSubscriptionsMap, user) + } + return true + } + + // there is no change to the subscription + if us.userSubscriptionsMap[subscription.Owner][subscriptionId] == subscription { + return false + } + + us.subscriptionIdsMap[subscriptionId] = subscription.Owner + if _, ok := us.userSubscriptionsMap[subscription.Owner]; !ok { + us.userSubscriptionsMap[subscription.Owner] = make(map[uint64]*functions_router.IFunctionsSubscriptionsSubscription) + } + us.userSubscriptionsMap[subscription.Owner][subscriptionId] = subscription + return true +} + +func (us *userSubscriptions) GetMaxUserBalance(user common.Address) (*big.Int, error) { + subs, exists := us.userSubscriptionsMap[user] + if !exists { + return nil, ErrUserHasNoSubscription + } + + maxBalance := big.NewInt(0) + for _, sub := range subs { + if sub.Balance.Cmp(maxBalance) > 0 { + maxBalance = sub.Balance + } + } + return maxBalance, nil +} diff --git a/core/services/gateway/handlers/functions/subscriptions/user_subscriptions_test.go b/core/services/gateway/handlers/functions/subscriptions/user_subscriptions_test.go new file mode 100644 index 00000000..d78c5241 --- /dev/null +++ b/core/services/gateway/handlers/functions/subscriptions/user_subscriptions_test.go @@ -0,0 +1,149 @@ +package subscriptions_test + +import ( + "math/big" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + + "github.com/stretchr/testify/assert" +) + +func TestUserSubscriptions(t *testing.T) { + t.Parallel() + + us := subscriptions.NewUserSubscriptions() + + t.Run("GetMaxUserBalance for unknown user", func(t *testing.T) { + _, err := us.GetMaxUserBalance(utils.RandomAddress()) + assert.Error(t, err) + }) + + t.Run("UpdateSubscription then GetMaxUserBalance", func(t *testing.T) { + user1 := utils.RandomAddress() + user1Balance := big.NewInt(10) + user2 := utils.RandomAddress() + user2Balance1 := big.NewInt(50) + user2Balance2 := big.NewInt(70) + + updated := us.UpdateSubscription(5, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: user1, + Balance: user1Balance, + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(3, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: user2, + Balance: user2Balance1, + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(10, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: user2, + Balance: user2Balance2, + }) + assert.True(t, updated) + + balance, err := us.GetMaxUserBalance(user1) + assert.NoError(t, err) + assert.Zero(t, balance.Cmp(user1Balance)) + + balance, err = us.GetMaxUserBalance(user2) + assert.NoError(t, err) + assert.Zero(t, balance.Cmp(user2Balance2)) + }) +} + +func TestUserSubscriptions_UpdateSubscription(t *testing.T) { + t.Parallel() + + t.Run("update balance", func(t *testing.T) { + us := subscriptions.NewUserSubscriptions() + owner := utils.RandomAddress() + + updated := us.UpdateSubscription(1, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: owner, + Balance: big.NewInt(10), + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(1, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: owner, + Balance: big.NewInt(100), + }) + assert.True(t, updated) + }) + + t.Run("updated proposed owner", func(t *testing.T) { + us := subscriptions.NewUserSubscriptions() + owner := utils.RandomAddress() + + updated := us.UpdateSubscription(1, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: owner, + Balance: big.NewInt(10), + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(1, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: owner, + Balance: big.NewInt(10), + ProposedOwner: utils.RandomAddress(), + }) + assert.True(t, updated) + }) + t.Run("remove subscriptions", func(t *testing.T) { + us := subscriptions.NewUserSubscriptions() + user2 := utils.RandomAddress() + user2Balance1 := big.NewInt(50) + user2Balance2 := big.NewInt(70) + + updated := us.UpdateSubscription(3, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: user2, + Balance: user2Balance1, + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(10, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: user2, + Balance: user2Balance2, + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(3, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: utils.ZeroAddress, + }) + assert.True(t, updated) + + updated = us.UpdateSubscription(10, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: utils.ZeroAddress, + }) + assert.True(t, updated) + + _, err := us.GetMaxUserBalance(user2) + assert.Error(t, err) + }) + + t.Run("remove a non existing subscription", func(t *testing.T) { + us := subscriptions.NewUserSubscriptions() + updated := us.UpdateSubscription(3, &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: utils.ZeroAddress, + }) + assert.False(t, updated) + }) + + t.Run("no actual changes", func(t *testing.T) { + us := subscriptions.NewUserSubscriptions() + subscription := &functions_router.IFunctionsSubscriptionsSubscription{ + Owner: utils.RandomAddress(), + Balance: big.NewInt(25), + BlockedBalance: big.NewInt(25), + } + updated := us.UpdateSubscription(5, subscription) + assert.True(t, updated) + + updated = us.UpdateSubscription(5, subscription) + assert.False(t, updated) + }) +} diff --git a/core/services/gateway/handlers/handler.dummy.go b/core/services/gateway/handlers/handler.dummy.go new file mode 100644 index 00000000..7f5b751c --- /dev/null +++ b/core/services/gateway/handlers/handler.dummy.go @@ -0,0 +1,73 @@ +package handlers + +import ( + "context" + "sync" + + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" +) + +// DummyHandler forwards each request/response without doing any checks. +type dummyHandler struct { + donConfig *config.DONConfig + don DON + savedCallbacks map[string]*savedCallback + mu sync.Mutex + lggr logger.Logger +} + +type savedCallback struct { + id string + callbackCh chan<- UserCallbackPayload +} + +var _ Handler = (*dummyHandler)(nil) + +func NewDummyHandler(donConfig *config.DONConfig, don DON, lggr logger.Logger) (Handler, error) { + return &dummyHandler{ + donConfig: donConfig, + don: don, + savedCallbacks: make(map[string]*savedCallback), + lggr: lggr.Named("DummyHandler." + donConfig.DonId), + }, nil +} + +func (d *dummyHandler) HandleUserMessage(ctx context.Context, msg *api.Message, callbackCh chan<- UserCallbackPayload) error { + d.mu.Lock() + d.savedCallbacks[msg.Body.MessageId] = &savedCallback{msg.Body.MessageId, callbackCh} + don := d.don + d.mu.Unlock() + + var err error + // Send to all nodes. + for _, member := range d.donConfig.Members { + err = multierr.Combine(err, don.SendToNode(ctx, member.Address, msg)) + } + return err +} + +func (d *dummyHandler) HandleNodeMessage(ctx context.Context, msg *api.Message, nodeAddr string) error { + d.mu.Lock() + savedCb, found := d.savedCallbacks[msg.Body.MessageId] + delete(d.savedCallbacks, msg.Body.MessageId) + d.mu.Unlock() + + if found { + // Send first response from a node back to the user, ignore any other ones. + savedCb.callbackCh <- UserCallbackPayload{Msg: msg, ErrCode: api.NoError, ErrMsg: ""} + close(savedCb.callbackCh) + } + return nil +} + +func (d *dummyHandler) Start(context.Context) error { + return nil +} + +func (d *dummyHandler) Close() error { + return nil +} diff --git a/core/services/gateway/handlers/handler.dummy_test.go b/core/services/gateway/handlers/handler.dummy_test.go new file mode 100644 index 00000000..596f891f --- /dev/null +++ b/core/services/gateway/handlers/handler.dummy_test.go @@ -0,0 +1,58 @@ +package handlers_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" +) + +type testConnManager struct { + handler handlers.Handler + sendCounter int +} + +func (m *testConnManager) SetHandler(handler handlers.Handler) { + m.handler = handler +} + +func (m *testConnManager) SendToNode(ctx context.Context, nodeAddress string, msg *api.Message) error { + m.sendCounter++ + return nil +} + +func TestDummyHandler_BasicFlow(t *testing.T) { + t.Parallel() + + config := config.DONConfig{ + Members: []config.NodeConfig{ + {Name: "node one", Address: "addr_1"}, + {Name: "node two", Address: "addr_2"}, + }, + } + + connMgr := testConnManager{} + handler, err := handlers.NewDummyHandler(&config, &connMgr, logger.TestLogger(t)) + require.NoError(t, err) + connMgr.SetHandler(handler) + + ctx := testutils.Context(t) + + // User request + msg := api.Message{Body: api.MessageBody{MessageId: "1234"}} + callbackCh := make(chan handlers.UserCallbackPayload, 1) + require.NoError(t, handler.HandleUserMessage(ctx, &msg, callbackCh)) + require.Equal(t, 2, connMgr.sendCounter) + + // Responses from both nodes + require.NoError(t, handler.HandleNodeMessage(ctx, &msg, "addr_1")) + require.NoError(t, handler.HandleNodeMessage(ctx, &msg, "addr_2")) + response := <-callbackCh + require.Equal(t, "1234", response.Msg.Body.MessageId) +} diff --git a/core/services/gateway/handlers/handler.go b/core/services/gateway/handlers/handler.go new file mode 100644 index 00000000..c74634e6 --- /dev/null +++ b/core/services/gateway/handlers/handler.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "context" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +//go:generate mockery --quiet --name Handler --output ./mocks/ --case=underscore +//go:generate mockery --quiet --name DON --output ./mocks/ --case=underscore + +// UserCallbackPayload is a response to user request sent to HandleUserMessage(). +// Each message needs to receive at most one response on the provided channel. +type UserCallbackPayload struct { + Msg *api.Message + ErrCode api.ErrorCode + ErrMsg string +} + +// Handler implements service-specific logic for managing messages from users and nodes. +// There is one Handler object created for each DON. +// +// The lifecycle of a Handler object is as follows: +// - Start() call +// - a series of HandleUserMessage/HandleNodeMessage calls, executed in parallel +// (Handler needs to guarantee thread safety) +// - Close() call +type Handler interface { + job.ServiceCtx + + // Each user request is processed by a separate goroutine, which: + // 1. calls HandleUserMessage + // 2. waits on callbackCh with a timeout + HandleUserMessage(ctx context.Context, msg *api.Message, callbackCh chan<- UserCallbackPayload) error + + // Handlers should not make any assumptions about goroutines calling HandleNodeMessage + HandleNodeMessage(ctx context.Context, msg *api.Message, nodeAddr string) error +} + +// Representation of a DON from a Handler's perspective. +type DON interface { + // Thread-safe + SendToNode(ctx context.Context, nodeAddress string, msg *api.Message) error +} diff --git a/core/services/gateway/handlers/mocks/don.go b/core/services/gateway/handlers/mocks/don.go new file mode 100644 index 00000000..4fcc0ce4 --- /dev/null +++ b/core/services/gateway/handlers/mocks/don.go @@ -0,0 +1,48 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + api "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + + mock "github.com/stretchr/testify/mock" +) + +// DON is an autogenerated mock type for the DON type +type DON struct { + mock.Mock +} + +// SendToNode provides a mock function with given fields: ctx, nodeAddress, msg +func (_m *DON) SendToNode(ctx context.Context, nodeAddress string, msg *api.Message) error { + ret := _m.Called(ctx, nodeAddress, msg) + + if len(ret) == 0 { + panic("no return value specified for SendToNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, *api.Message) error); ok { + r0 = rf(ctx, nodeAddress, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewDON creates a new instance of DON. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDON(t interface { + mock.TestingT + Cleanup(func()) +}) *DON { + mock := &DON{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/handlers/mocks/handler.go b/core/services/gateway/handlers/mocks/handler.go new file mode 100644 index 00000000..a3217037 --- /dev/null +++ b/core/services/gateway/handlers/mocks/handler.go @@ -0,0 +1,104 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + api "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + + handlers "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers" + + mock "github.com/stretchr/testify/mock" +) + +// Handler is an autogenerated mock type for the Handler type +type Handler struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Handler) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandleNodeMessage provides a mock function with given fields: ctx, msg, nodeAddr +func (_m *Handler) HandleNodeMessage(ctx context.Context, msg *api.Message, nodeAddr string) error { + ret := _m.Called(ctx, msg, nodeAddr) + + if len(ret) == 0 { + panic("no return value specified for HandleNodeMessage") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *api.Message, string) error); ok { + r0 = rf(ctx, msg, nodeAddr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandleUserMessage provides a mock function with given fields: ctx, msg, callbackCh +func (_m *Handler) HandleUserMessage(ctx context.Context, msg *api.Message, callbackCh chan<- handlers.UserCallbackPayload) error { + ret := _m.Called(ctx, msg, callbackCh) + + if len(ret) == 0 { + panic("no return value specified for HandleUserMessage") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *api.Message, chan<- handlers.UserCallbackPayload) error); ok { + r0 = rf(ctx, msg, callbackCh) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Handler) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewHandler creates a new instance of Handler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHandler(t interface { + mock.TestingT + Cleanup(func()) +}) *Handler { + mock := &Handler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/integration_tests/gateway_integration_test.go b/core/services/gateway/integration_tests/gateway_integration_test.go new file mode 100644 index 00000000..7b265383 --- /dev/null +++ b/core/services/gateway/integration_tests/gateway_integration_test.go @@ -0,0 +1,195 @@ +package integration_tests + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "io" + "net/http" + "strings" + "sync/atomic" + "testing" + + "github.com/jonboulle/clockwork" + "github.com/onsi/gomega" + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/config" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" +) + +const gatewayConfigTemplate = ` +[ConnectionManagerConfig] +AuthChallengeLen = 32 +AuthGatewayId = "test_gateway" +AuthTimestampToleranceSec = 30 + +[NodeServerConfig] +Path = "/node" +Port = 0 +HandshakeTimeoutMillis = 2_000 +MaxRequestBytes = 20_000 +ReadTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 + +[UserServerConfig] +Path = "/user" +Port = 0 +ContentTypeHeader = "application/jsonrpc" +MaxRequestBytes = 20_000 +ReadTimeoutMillis = 1000 +RequestTimeoutMillis = 1000 +WriteTimeoutMillis = 1000 + +[[Dons]] +DonId = "test_don" +HandlerName = "dummy" + +[[Dons.Members]] +Address = "%s" +Name = "test_node_1" +` + +const nodeConfigTemplate = ` +DonID = "test_don" +AuthMinChallengeLen = 32 +AuthTimestampToleranceSec = 30 +NodeAddress = "%s" + +[WsClientConfig] +HandshakeTimeoutMillis = 2_000 + +[[Gateways]] +Id = "test_gateway" +URL = "%s" +` + +const ( + messageId1 = "123" + messageId2 = "456" + + nodeResponsePayload = `{"response":"correct response"}` +) + +func parseGatewayConfig(t *testing.T, tomlConfig string) *config.GatewayConfig { + var cfg config.GatewayConfig + err := toml.Unmarshal([]byte(tomlConfig), &cfg) + require.NoError(t, err) + return &cfg +} + +func parseConnectorConfig(t *testing.T, tomlConfig string, nodeAddress string, nodeURL string) *connector.ConnectorConfig { + nodeConfig := fmt.Sprintf(tomlConfig, nodeAddress, nodeURL) + var cfg connector.ConnectorConfig + require.NoError(t, toml.Unmarshal([]byte(nodeConfig), &cfg)) + return &cfg +} + +type client struct { + privateKey *ecdsa.PrivateKey + connector connector.GatewayConnector + done atomic.Bool +} + +func (c *client) HandleGatewayMessage(ctx context.Context, gatewayId string, msg *api.Message) { + c.done.Store(true) + // send back user's message without re-signing - should be ignored by the Gateway + _ = c.connector.SendToGateway(ctx, gatewayId, msg) + // send back a correct response + responseMsg := &api.Message{Body: api.MessageBody{ + MessageId: msg.Body.MessageId, + Method: "test", + DonId: "test_don", + Receiver: msg.Body.Sender, + Payload: []byte(nodeResponsePayload), + }} + err := responseMsg.Sign(c.privateKey) + if err != nil { + panic(err) + } + _ = c.connector.SendToGateway(ctx, gatewayId, responseMsg) +} + +func (c *client) Sign(data ...[]byte) ([]byte, error) { + return common.SignData(c.privateKey, data...) +} + +func (*client) Start(ctx context.Context) error { + return nil +} + +func (*client) Close() error { + return nil +} + +func TestIntegration_Gateway_NoFullNodes_BasicConnectionAndMessage(t *testing.T) { + t.Parallel() + + testWallets := common.NewTestNodes(t, 2) + nodeKeys := testWallets[0] + userKeys := testWallets[1] + // Verify that addresses in config are case-insensitive + nodeKeys.Address = strings.ToUpper(nodeKeys.Address) + + // Launch Gateway + lggr := logger.TestLogger(t) + gatewayConfig := fmt.Sprintf(gatewayConfigTemplate, nodeKeys.Address) + gateway, err := gateway.NewGatewayFromConfig(parseGatewayConfig(t, gatewayConfig), gateway.NewHandlerFactory(nil, nil, nil, lggr), lggr) + require.NoError(t, err) + servicetest.Run(t, gateway) + userPort, nodePort := gateway.GetUserPort(), gateway.GetNodePort() + userUrl := fmt.Sprintf("http://localhost:%d/user", userPort) + nodeUrl := fmt.Sprintf("ws://localhost:%d/node", nodePort) + + // Launch Connector + client := &client{privateKey: nodeKeys.PrivateKey} + connector, err := connector.NewGatewayConnector(parseConnectorConfig(t, nodeConfigTemplate, nodeKeys.Address, nodeUrl), client, client, clockwork.NewRealClock(), lggr) + require.NoError(t, err) + client.connector = connector + servicetest.Run(t, connector) + + // Send requests until one of them reaches Connector (i.e. the node) + gomega.NewGomegaWithT(t).Eventually(func() bool { + req := newHttpRequestObject(t, messageId1, userUrl, userKeys.PrivateKey) + httpClient := &http.Client{} + _, _ = httpClient.Do(req) // could initially return error if Gateway is not fully initialized yet + return client.done.Load() + }, testutils.WaitTimeout(t), testutils.TestInterval).Should(gomega.Equal(true)) + + // Send another request and validate that response has correct content and sender + req := newHttpRequestObject(t, messageId2, userUrl, userKeys.PrivateKey) + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + + rawResp, err := io.ReadAll(resp.Body) + require.NoError(t, err) + codec := api.JsonRPCCodec{} + respMsg, err := codec.DecodeResponse(rawResp) + require.NoError(t, err) + require.NoError(t, respMsg.Validate()) + require.Equal(t, strings.ToLower(nodeKeys.Address), respMsg.Body.Sender) + require.Equal(t, messageId2, respMsg.Body.MessageId) + require.Equal(t, nodeResponsePayload, string(respMsg.Body.Payload)) +} + +func newHttpRequestObject(t *testing.T, messageId string, userUrl string, signerKey *ecdsa.PrivateKey) *http.Request { + msg := &api.Message{Body: api.MessageBody{MessageId: messageId, Method: "test", DonId: "test_don"}} + require.NoError(t, msg.Sign(signerKey)) + codec := api.JsonRPCCodec{} + rawMsg, err := codec.EncodeRequest(msg) + require.NoError(t, err) + req, err := http.NewRequestWithContext(testutils.Context(t), "POST", userUrl, bytes.NewBuffer(rawMsg)) + require.NoError(t, err) + return req +} diff --git a/core/services/gateway/network/constants.go b/core/services/gateway/network/constants.go new file mode 100644 index 00000000..67dc1a97 --- /dev/null +++ b/core/services/gateway/network/constants.go @@ -0,0 +1,14 @@ +package network + +const ( + WsServerHandshakeAuthHeaderName string = "Authorization" + WsServerHandshakeChallengeHeaderName string = "Challenge" + + HandshakeTimestampLen int = 4 + HandshakeDonIdLen int = 64 + HandshakeGatewayURLLen int = 128 + HandshakeSignatureLen int = 65 + HandshakeAuthHeaderLen int = HandshakeTimestampLen + HandshakeDonIdLen + HandshakeGatewayURLLen + HandshakeSignatureLen + HandshakeEncodedAuthHeaderMaxLen int = 512 + HandshakeChallengeMinLen int = HandshakeTimestampLen + HandshakeGatewayURLLen + 1 +) diff --git a/core/services/gateway/network/handshake.go b/core/services/gateway/network/handshake.go new file mode 100644 index 00000000..a9a186c2 --- /dev/null +++ b/core/services/gateway/network/handshake.go @@ -0,0 +1,110 @@ +package network + +import ( + "errors" + "fmt" + "net/url" + + "github.com/gorilla/websocket" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/common" +) + +// The handshake works as follows: +// +// Client (Initiator) Server (Acceptor) +// +// NewAuthHeader() +// -------auth header--------> +// StartHandshake() +// <-------challenge---------- +// ChallengeResponse() +// ---------response---------> +// FinalizeHandshake() +// +//go:generate mockery --quiet --name ConnectionInitiator --output ./mocks/ --case=underscore +type ConnectionInitiator interface { + // Generate authentication header value specific to node and gateway + NewAuthHeader(url *url.URL) ([]byte, error) + + // Sign challenge to prove identity. + ChallengeResponse(url *url.URL, challenge []byte) ([]byte, error) +} + +//go:generate mockery --quiet --name ConnectionAcceptor --output ./mocks/ --case=underscore +type ConnectionAcceptor interface { + // Verify auth header, save state of the attempt and generate a challenge for the node. + StartHandshake(authHeader []byte) (attemptId string, challenge []byte, err error) + + // Verify signed challenge and update connection, if successful. + FinalizeHandshake(attemptId string, response []byte, conn *websocket.Conn) error + + // Clear attempt's state. + AbortHandshake(attemptId string) +} + +// Components going into the auth header, excluding the signature. +type AuthHeaderElems struct { + Timestamp uint32 + DonId string + GatewayId string +} + +type ChallengeElems struct { + Timestamp uint32 + GatewayId string + ChallengeBytes []byte +} + +var ( + ErrAuthHeaderParse = errors.New("unable to parse auth header") + ErrAuthInvalidDonId = errors.New("invalid DON ID") + ErrAuthInvalidNode = errors.New("unexpected node address") + ErrAuthInvalidGateway = errors.New("invalid gateway ID") + ErrAuthInvalidTimestamp = errors.New("timestamp outside of tolerance range") + ErrChallengeTooShort = errors.New("challenge too short") + ErrChallengeAttemptNotFound = errors.New("attempt not found") + ErrChallengeInvalidSignature = errors.New("invalid challenge signature") +) + +func PackAuthHeader(elems *AuthHeaderElems) []byte { + packed := common.Uint32ToBytes(elems.Timestamp) + packed = append(packed, common.StringToAlignedBytes(elems.DonId, HandshakeDonIdLen)...) + packed = append(packed, common.StringToAlignedBytes(elems.GatewayId, HandshakeGatewayURLLen)...) + return packed +} + +func UnpackSignedAuthHeader(data []byte) (elems *AuthHeaderElems, signer []byte, err error) { + if len(data) != HandshakeAuthHeaderLen { + return nil, nil, fmt.Errorf("auth header length is invalid (expected: %d, got: %d)", HandshakeAuthHeaderLen, len(data)) + } + elems = &AuthHeaderElems{} + offset := 0 + elems.Timestamp = common.BytesToUint32(data[offset : offset+HandshakeTimestampLen]) + offset += HandshakeTimestampLen + elems.DonId = common.AlignedBytesToString(data[offset : offset+HandshakeDonIdLen]) + offset += HandshakeDonIdLen + elems.GatewayId = common.AlignedBytesToString(data[offset : offset+HandshakeGatewayURLLen]) + offset += HandshakeGatewayURLLen + signature := data[offset:] + signer, err = common.ExtractSigner(signature, data[:len(data)-HandshakeSignatureLen]) + return +} + +func PackChallenge(elems *ChallengeElems) []byte { + packed := common.Uint32ToBytes(elems.Timestamp) + packed = append(packed, common.StringToAlignedBytes(elems.GatewayId, HandshakeGatewayURLLen)...) + packed = append(packed, elems.ChallengeBytes...) + return packed +} + +func UnpackChallenge(data []byte) (*ChallengeElems, error) { + if len(data) < HandshakeChallengeMinLen { + return nil, fmt.Errorf("challenge length is too small (expected at least: %d, got: %d)", HandshakeChallengeMinLen, len(data)) + } + unpacked := &ChallengeElems{} + unpacked.Timestamp = common.BytesToUint32(data[0:HandshakeTimestampLen]) + unpacked.GatewayId = common.AlignedBytesToString(data[HandshakeTimestampLen : HandshakeTimestampLen+HandshakeGatewayURLLen]) + unpacked.ChallengeBytes = data[HandshakeTimestampLen+HandshakeGatewayURLLen:] + return unpacked, nil +} diff --git a/core/services/gateway/network/httpserver.go b/core/services/gateway/network/httpserver.go new file mode 100644 index 00000000..85d08659 --- /dev/null +++ b/core/services/gateway/network/httpserver.go @@ -0,0 +1,164 @@ +package network + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +//go:generate mockery --quiet --name HttpServer --output ./mocks/ --case=underscore +type HttpServer interface { + job.ServiceCtx + + // Not thread-safe. Should be called once, before Start() is called. + SetHTTPRequestHandler(handler HTTPRequestHandler) + + // Not thread-safe. Can be called after Start() returns. + GetPort() int +} + +//go:generate mockery --quiet --name HTTPRequestHandler --output ./mocks/ --case=underscore +type HTTPRequestHandler interface { + ProcessRequest(ctx context.Context, rawRequest []byte) (rawResponse []byte, httpStatusCode int) +} + +type HTTPServerConfig struct { + Host string + Port uint16 + TLSEnabled bool + TLSCertPath string + TLSKeyPath string + Path string + ContentTypeHeader string + ReadTimeoutMillis uint32 + WriteTimeoutMillis uint32 + RequestTimeoutMillis uint32 + MaxRequestBytes int64 +} + +type httpServer struct { + services.StateMachine + config *HTTPServerConfig + listener net.Listener + server *http.Server + handler HTTPRequestHandler + doneCh chan struct{} + cancelBaseContext context.CancelFunc + lggr logger.Logger +} + +const ( + HealthCheckPath = "/health" + HealthCheckResponse = "OK" +) + +func NewHttpServer(config *HTTPServerConfig, lggr logger.Logger) HttpServer { + baseCtx, cancelBaseCtx := context.WithCancel(context.Background()) + server := &httpServer{ + config: config, + doneCh: make(chan struct{}), + cancelBaseContext: cancelBaseCtx, + lggr: lggr.Named("WebSocketServer"), + } + mux := http.NewServeMux() + mux.Handle(config.Path, http.HandlerFunc(server.handleRequest)) + mux.Handle(HealthCheckPath, http.HandlerFunc(server.handleHealthCheck)) + server.server = &http.Server{ + Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), + Handler: mux, + BaseContext: func(net.Listener) context.Context { return baseCtx }, + ReadTimeout: time.Duration(config.ReadTimeoutMillis) * time.Millisecond, + ReadHeaderTimeout: time.Duration(config.ReadTimeoutMillis) * time.Millisecond, + WriteTimeout: time.Duration(config.WriteTimeoutMillis) * time.Millisecond, + } + return server +} + +func (s *httpServer) handleHealthCheck(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(HealthCheckResponse)) + if err != nil { + s.lggr.Debug("error when writing response for healthcheck", err) + } +} + +func (s *httpServer) handleRequest(w http.ResponseWriter, r *http.Request) { + source := http.MaxBytesReader(nil, r.Body, s.config.MaxRequestBytes) + rawMessage, err := io.ReadAll(source) + if err != nil { + s.lggr.Error("error reading request", err) + w.WriteHeader(http.StatusBadRequest) + return + } + + requestCtx := r.Context() + if s.config.RequestTimeoutMillis > 0 { + var cancel context.CancelFunc + requestCtx, cancel = context.WithTimeout(requestCtx, time.Duration(s.config.RequestTimeoutMillis)*time.Millisecond) + defer cancel() + } + rawResponse, httpStatusCode := s.handler.ProcessRequest(requestCtx, rawMessage) + + w.Header().Set("Content-Type", s.config.ContentTypeHeader) + w.WriteHeader(httpStatusCode) + _, err = w.Write(rawResponse) + if err != nil { + s.lggr.Error("error when writing response", err) + } +} + +func (s *httpServer) SetHTTPRequestHandler(handler HTTPRequestHandler) { + s.handler = handler +} + +func (s *httpServer) GetPort() int { + return s.listener.Addr().(*net.TCPAddr).Port +} + +func (s *httpServer) Start(ctx context.Context) error { + return s.StartOnce("GatewayHTTPServer", func() error { + s.lggr.Info("starting gateway HTTP server") + return s.runServer() + }) +} + +func (s *httpServer) Close() error { + return s.StopOnce("GatewayHTTPServer", func() (err error) { + s.lggr.Info("closing gateway HTTP server") + s.cancelBaseContext() + err = s.server.Shutdown(context.Background()) + <-s.doneCh + return + }) +} + +func (s *httpServer) runServer() (err error) { + s.listener, err = net.Listen("tcp", s.server.Addr) + if err != nil { + return + } + tlsEnabled := s.config.TLSEnabled + + go func() { + if tlsEnabled { + err := s.server.ServeTLS(s.listener, s.config.TLSCertPath, s.config.TLSKeyPath) + if err != http.ErrServerClosed { + s.lggr.Error("gateway server closed with error:", err) + } + } else { + err := s.server.Serve(s.listener) + if err != http.ErrServerClosed { + s.lggr.Error("gateway server closed with error:", err) + } + } + s.doneCh <- struct{}{} + }() + return +} diff --git a/core/services/gateway/network/httpserver_test.go b/core/services/gateway/network/httpserver_test.go new file mode 100644 index 00000000..c95568e7 --- /dev/null +++ b/core/services/gateway/network/httpserver_test.go @@ -0,0 +1,89 @@ +package network_test + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network/mocks" +) + +const ( + HTTPTestHost = "localhost" + HTTPTestPath = "/test_path" +) + +func startNewServer(t *testing.T, maxRequestBytes int64, readTimeoutMillis uint32) (server network.HttpServer, handler *mocks.HTTPRequestHandler, url string) { + config := &network.HTTPServerConfig{ + Host: HTTPTestHost, + Port: 0, + Path: HTTPTestPath, + TLSEnabled: false, + ContentTypeHeader: "application/jsonrpc", + ReadTimeoutMillis: readTimeoutMillis, + WriteTimeoutMillis: 10_000, + RequestTimeoutMillis: 10_000, + MaxRequestBytes: maxRequestBytes, + } + + handler = mocks.NewHTTPRequestHandler(t) + server = network.NewHttpServer(config, logger.TestLogger(t)) + server.SetHTTPRequestHandler(handler) + err := server.Start(testutils.Context(t)) + require.NoError(t, err) + + port := server.GetPort() + url = fmt.Sprintf("http://%s:%d%s", HTTPTestHost, port, HTTPTestPath) + return +} + +func sendRequest(t *testing.T, url string, body []byte) *http.Response { + req, err := http.NewRequestWithContext(testutils.Context(t), "POST", url, bytes.NewBuffer(body)) + require.NoError(t, err) + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + return resp +} + +func TestHTTPServer_HandleRequest_Correct(t *testing.T) { + server, handler, url := startNewServer(t, 100_000, 100_000) + defer server.Close() + + handler.On("ProcessRequest", mock.Anything, mock.Anything).Return([]byte("response"), 200) + + resp := sendRequest(t, url, []byte("0123456789")) + respBytes, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, []byte("response"), respBytes) +} + +func TestHTTPServer_HandleRequest_RequestBodyTooBig(t *testing.T) { + server, _, url := startNewServer(t, 5, 100_000) + defer server.Close() + + resp := sendRequest(t, url, []byte("0123456789")) + require.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestHTTPServer_HandleHealthCheck(t *testing.T) { + server, _, url := startNewServer(t, 100_000, 100_000) + defer server.Close() + + url = strings.Replace(url, HTTPTestPath, network.HealthCheckPath, 1) + resp := sendRequest(t, url, []byte{}) + respBytes, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, []byte(network.HealthCheckResponse), respBytes) +} diff --git a/core/services/gateway/network/mocks/connection_acceptor.go b/core/services/gateway/network/mocks/connection_acceptor.go new file mode 100644 index 00000000..c45cc7fb --- /dev/null +++ b/core/services/gateway/network/mocks/connection_acceptor.go @@ -0,0 +1,88 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + websocket "github.com/gorilla/websocket" +) + +// ConnectionAcceptor is an autogenerated mock type for the ConnectionAcceptor type +type ConnectionAcceptor struct { + mock.Mock +} + +// AbortHandshake provides a mock function with given fields: attemptId +func (_m *ConnectionAcceptor) AbortHandshake(attemptId string) { + _m.Called(attemptId) +} + +// FinalizeHandshake provides a mock function with given fields: attemptId, response, conn +func (_m *ConnectionAcceptor) FinalizeHandshake(attemptId string, response []byte, conn *websocket.Conn) error { + ret := _m.Called(attemptId, response, conn) + + if len(ret) == 0 { + panic("no return value specified for FinalizeHandshake") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, []byte, *websocket.Conn) error); ok { + r0 = rf(attemptId, response, conn) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StartHandshake provides a mock function with given fields: authHeader +func (_m *ConnectionAcceptor) StartHandshake(authHeader []byte) (string, []byte, error) { + ret := _m.Called(authHeader) + + if len(ret) == 0 { + panic("no return value specified for StartHandshake") + } + + var r0 string + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func([]byte) (string, []byte, error)); ok { + return rf(authHeader) + } + if rf, ok := ret.Get(0).(func([]byte) string); ok { + r0 = rf(authHeader) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func([]byte) []byte); ok { + r1 = rf(authHeader) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func([]byte) error); ok { + r2 = rf(authHeader) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewConnectionAcceptor creates a new instance of ConnectionAcceptor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnectionAcceptor(t interface { + mock.TestingT + Cleanup(func()) +}) *ConnectionAcceptor { + mock := &ConnectionAcceptor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/network/mocks/connection_initiator.go b/core/services/gateway/network/mocks/connection_initiator.go new file mode 100644 index 00000000..87e4f407 --- /dev/null +++ b/core/services/gateway/network/mocks/connection_initiator.go @@ -0,0 +1,88 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + url "net/url" +) + +// ConnectionInitiator is an autogenerated mock type for the ConnectionInitiator type +type ConnectionInitiator struct { + mock.Mock +} + +// ChallengeResponse provides a mock function with given fields: _a0, challenge +func (_m *ConnectionInitiator) ChallengeResponse(_a0 *url.URL, challenge []byte) ([]byte, error) { + ret := _m.Called(_a0, challenge) + + if len(ret) == 0 { + panic("no return value specified for ChallengeResponse") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*url.URL, []byte) ([]byte, error)); ok { + return rf(_a0, challenge) + } + if rf, ok := ret.Get(0).(func(*url.URL, []byte) []byte); ok { + r0 = rf(_a0, challenge) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*url.URL, []byte) error); ok { + r1 = rf(_a0, challenge) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAuthHeader provides a mock function with given fields: _a0 +func (_m *ConnectionInitiator) NewAuthHeader(_a0 *url.URL) ([]byte, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for NewAuthHeader") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*url.URL) ([]byte, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(*url.URL) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*url.URL) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewConnectionInitiator creates a new instance of ConnectionInitiator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnectionInitiator(t interface { + mock.TestingT + Cleanup(func()) +}) *ConnectionInitiator { + mock := &ConnectionInitiator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/network/mocks/http_request_handler.go b/core/services/gateway/network/mocks/http_request_handler.go new file mode 100644 index 00000000..7c5ff402 --- /dev/null +++ b/core/services/gateway/network/mocks/http_request_handler.go @@ -0,0 +1,58 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// HTTPRequestHandler is an autogenerated mock type for the HTTPRequestHandler type +type HTTPRequestHandler struct { + mock.Mock +} + +// ProcessRequest provides a mock function with given fields: ctx, rawRequest +func (_m *HTTPRequestHandler) ProcessRequest(ctx context.Context, rawRequest []byte) ([]byte, int) { + ret := _m.Called(ctx, rawRequest) + + if len(ret) == 0 { + panic("no return value specified for ProcessRequest") + } + + var r0 []byte + var r1 int + if rf, ok := ret.Get(0).(func(context.Context, []byte) ([]byte, int)); ok { + return rf(ctx, rawRequest) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte) []byte); ok { + r0 = rf(ctx, rawRequest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte) int); ok { + r1 = rf(ctx, rawRequest) + } else { + r1 = ret.Get(1).(int) + } + + return r0, r1 +} + +// NewHTTPRequestHandler creates a new instance of HTTPRequestHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHTTPRequestHandler(t interface { + mock.TestingT + Cleanup(func()) +}) *HTTPRequestHandler { + mock := &HTTPRequestHandler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/network/mocks/http_server.go b/core/services/gateway/network/mocks/http_server.go new file mode 100644 index 00000000..ba7725c9 --- /dev/null +++ b/core/services/gateway/network/mocks/http_server.go @@ -0,0 +1,88 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + network "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + mock "github.com/stretchr/testify/mock" +) + +// HttpServer is an autogenerated mock type for the HttpServer type +type HttpServer struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *HttpServer) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetPort provides a mock function with given fields: +func (_m *HttpServer) GetPort() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetPort") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// SetHTTPRequestHandler provides a mock function with given fields: handler +func (_m *HttpServer) SetHTTPRequestHandler(handler network.HTTPRequestHandler) { + _m.Called(handler) +} + +// Start provides a mock function with given fields: _a0 +func (_m *HttpServer) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewHttpServer creates a new instance of HttpServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHttpServer(t interface { + mock.TestingT + Cleanup(func()) +}) *HttpServer { + mock := &HttpServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/network/mocks/web_socket_server.go b/core/services/gateway/network/mocks/web_socket_server.go new file mode 100644 index 00000000..4f75f3b7 --- /dev/null +++ b/core/services/gateway/network/mocks/web_socket_server.go @@ -0,0 +1,82 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// WebSocketServer is an autogenerated mock type for the WebSocketServer type +type WebSocketServer struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *WebSocketServer) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetPort provides a mock function with given fields: +func (_m *WebSocketServer) GetPort() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetPort") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *WebSocketServer) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewWebSocketServer creates a new instance of WebSocketServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWebSocketServer(t interface { + mock.TestingT + Cleanup(func()) +}) *WebSocketServer { + mock := &WebSocketServer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/gateway/network/wsclient.go b/core/services/gateway/network/wsclient.go new file mode 100644 index 00000000..86bf3c01 --- /dev/null +++ b/core/services/gateway/network/wsclient.go @@ -0,0 +1,94 @@ +package network + +import ( + "context" + "encoding/base64" + "net/http" + "net/url" + "time" + + "github.com/gorilla/websocket" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type WebSocketClient interface { + Connect(ctx context.Context, url *url.URL) (*websocket.Conn, error) +} + +type WebSocketClientConfig struct { + HandshakeTimeoutMillis uint32 +} + +type webSocketClient struct { + initiator ConnectionInitiator + dialer *websocket.Dialer + lggr logger.Logger +} + +func NewWebSocketClient(config WebSocketClientConfig, initiator ConnectionInitiator, lggr logger.Logger) WebSocketClient { + dialer := &websocket.Dialer{ + HandshakeTimeout: time.Duration(config.HandshakeTimeoutMillis) * time.Millisecond, + } + client := &webSocketClient{ + initiator: initiator, + dialer: dialer, + lggr: lggr.Named("WebSocketClient"), + } + return client +} + +func (c *webSocketClient) Connect(ctx context.Context, url *url.URL) (*websocket.Conn, error) { + authHeader, err := c.initiator.NewAuthHeader(url) + if err != nil { + return nil, err + } + authHeaderStr := base64.StdEncoding.EncodeToString(authHeader) + + hdr := make(http.Header) + hdr.Add(WsServerHandshakeAuthHeaderName, authHeaderStr) + + conn, resp, err := c.dialer.DialContext(ctx, url.String(), hdr) + + if err != nil { + c.lggr.Errorf("WebSocketClient: couldn't connect to %s: %w", url.String(), err) + c.tryCloseConn(conn) + return nil, err + } + + challengeStr := resp.Header.Get(WsServerHandshakeChallengeHeaderName) + if challengeStr == "" { + c.lggr.Error("WebSocketClient: empty challenge") + c.tryCloseConn(conn) + return nil, err + } + challenge, err := base64.StdEncoding.DecodeString(challengeStr) + if err != nil { + c.lggr.Errorf("WebSocketClient: couldn't decode challenge: %s: %v", challengeStr, err) + c.tryCloseConn(conn) + return nil, err + } + + response, err := c.initiator.ChallengeResponse(url, challenge) + if err != nil { + c.lggr.Errorf("WebSocketClient: couldn't generate challenge response", err) + c.tryCloseConn(conn) + return nil, err + } + + if err = conn.WriteMessage(websocket.BinaryMessage, response); err != nil { + c.lggr.Errorf("WebSocketClient: couldn't send challenge response", err) + c.tryCloseConn(conn) + return nil, err + } + return conn, nil +} + +func (c *webSocketClient) tryCloseConn(conn *websocket.Conn) { + if conn != nil { + err := conn.Close() + if err != nil { + c.lggr.Errorf("WebSocketClient: error closing connection %w", err) + } + } +} diff --git a/core/services/gateway/network/wsconnection.go b/core/services/gateway/network/wsconnection.go new file mode 100644 index 00000000..5076ca1d --- /dev/null +++ b/core/services/gateway/network/wsconnection.go @@ -0,0 +1,184 @@ +package network + +import ( + "context" + "errors" + "sync/atomic" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/gorilla/websocket" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// WSConnectionWrapper is a websocket connection abstraction that supports re-connects. +// I/O is separated from connection management: +// - component doing writes can use the thread-safe Write() method +// - component doing reads can listen on the ReadChannel() +// - component managing connections can listen to connection-closed channels and call Reset() +// to swap the underlying connection object +// +// The Wrapper can be used by a server expecting long-lived connections from a given client, +// as well as a client maintaining such long-lived connection with a given server. +// This fits the Gateway very well as servers accept connections only from a fixed set of nodes +// and conversely, nodes only connect to a fixed set of servers (Gateways). +// +// The concept of "pumps" is borrowed from https://github.com/goplugin/wsrpc +// All methods are thread-safe. +type WSConnectionWrapper interface { + job.ServiceCtx + services.HealthReporter + + // Update underlying connection object. Return a channel that gets an error on connection close. + // Cannot be called after Close(). + Reset(newConn *websocket.Conn) <-chan error + + Write(ctx context.Context, msgType int, data []byte) error + + ReadChannel() <-chan ReadItem +} + +type wsConnectionWrapper struct { + services.StateMachine + lggr logger.Logger + + conn atomic.Pointer[websocket.Conn] + + writeCh chan writeItem + readCh chan ReadItem + shutdownCh chan struct{} +} + +func (c *wsConnectionWrapper) HealthReport() map[string]error { + return map[string]error{c.Name(): c.Healthy()} +} + +func (c *wsConnectionWrapper) Name() string { return c.lggr.Name() } + +type ReadItem struct { + MsgType int + Data []byte +} + +type writeItem struct { + MsgType int + Data []byte + ErrCh chan error +} + +var _ WSConnectionWrapper = (*wsConnectionWrapper)(nil) + +var ( + ErrNoActiveConnection = errors.New("no active connection") + ErrWrapperShutdown = errors.New("wrapper shutting down") +) + +func NewWSConnectionWrapper(lggr logger.Logger) WSConnectionWrapper { + cw := &wsConnectionWrapper{ + lggr: lggr.Named("WSConnectionWrapper"), + writeCh: make(chan writeItem), + readCh: make(chan ReadItem), + shutdownCh: make(chan struct{}), + } + return cw +} + +func (c *wsConnectionWrapper) Start(_ context.Context) error { + return c.StartOnce("WSConnectionWrapper", func() error { + // write pump runs until Shutdown() is called + go c.writePump() + return nil + }) +} + +// Reset: +// 1. replaces the underlying connection and shuts the old one down +// 2. starts a new read goroutine that pushes received messages to readCh +// 3. returns channel that closes when connection closes +func (c *wsConnectionWrapper) Reset(newConn *websocket.Conn) <-chan error { + oldConn := c.conn.Swap(newConn) + + if oldConn != nil { + oldConn.Close() + } + if newConn == nil { + return nil + } + closeCh := make(chan error, 1) + // readPump goroutine is tied to the lifecycle of the underlying conn object + go c.readPump(newConn, closeCh) + return closeCh +} + +func (c *wsConnectionWrapper) Write(ctx context.Context, msgType int, data []byte) error { + errCh := make(chan error, 1) + // push to write channel + select { + case c.writeCh <- writeItem{msgType, data, errCh}: + break + case <-c.shutdownCh: + return ErrWrapperShutdown + case <-ctx.Done(): + return ctx.Err() + } + // wait for write result + select { + case err := <-errCh: + return err + case <-c.shutdownCh: + return ErrWrapperShutdown + case <-ctx.Done(): + return ctx.Err() + } +} + +func (c *wsConnectionWrapper) ReadChannel() <-chan ReadItem { + return c.readCh +} + +func (c *wsConnectionWrapper) Close() error { + return c.StopOnce("WSConnectionWrapper", func() error { + close(c.shutdownCh) + c.Reset(nil) + return nil + }) +} + +func (c *wsConnectionWrapper) writePump() { + for { + select { + case wsMsg := <-c.writeCh: + // synchronization is a tradeoff for the ability to use a single write channel + conn := c.conn.Load() + if conn == nil { + wsMsg.ErrCh <- ErrNoActiveConnection + close(wsMsg.ErrCh) + break + } + wsMsg.ErrCh <- conn.WriteMessage(wsMsg.MsgType, wsMsg.Data) + close(wsMsg.ErrCh) + case <-c.shutdownCh: + return + } + } +} + +func (c *wsConnectionWrapper) readPump(conn *websocket.Conn, closeCh chan<- error) { + for { + msgType, data, err := conn.ReadMessage() + if err != nil { + closeCh <- conn.Close() + close(closeCh) + return + } + select { + case c.readCh <- ReadItem{msgType, data}: + case <-c.shutdownCh: + closeCh <- conn.Close() + close(closeCh) + return + } + } +} diff --git a/core/services/gateway/network/wsconnection_test.go b/core/services/gateway/network/wsconnection_test.go new file mode 100644 index 00000000..cf521b81 --- /dev/null +++ b/core/services/gateway/network/wsconnection_test.go @@ -0,0 +1,67 @@ +package network_test + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" +) + +var upgrader = websocket.Upgrader{} + +type serverSideLogic struct { + connWrapper network.WSConnectionWrapper +} + +func (ssl *serverSideLogic) wsHandler(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + // one wsConnWrapper per client + ssl.connWrapper.Reset(c) +} + +func TestWSConnectionWrapper_ClientReconnect(t *testing.T) { + lggr := logger.TestLogger(t) + // server + ssl := &serverSideLogic{connWrapper: network.NewWSConnectionWrapper(lggr)} + servicetest.Run(t, ssl.connWrapper) + s := httptest.NewServer(http.HandlerFunc(ssl.wsHandler)) + serverURL := "ws" + strings.TrimPrefix(s.URL, "http") + defer s.Close() + + // client + clientConnWrapper := network.NewWSConnectionWrapper(lggr) + servicetest.Run(t, clientConnWrapper) + + // connect, write a message, disconnect + conn, _, err := websocket.DefaultDialer.Dial(serverURL, nil) + require.NoError(t, err) + clientConnWrapper.Reset(conn) + writeErr := clientConnWrapper.Write(testutils.Context(t), websocket.TextMessage, []byte("hello")) + require.NoError(t, writeErr) + <-ssl.connWrapper.ReadChannel() // consumed by server + conn.Close() + + // try to write without a connection + writeErr = clientConnWrapper.Write(testutils.Context(t), websocket.TextMessage, []byte("failed send")) + require.Error(t, writeErr) + + // re-connect, write another message, disconnect + conn, _, err = websocket.DefaultDialer.Dial(serverURL, nil) + require.NoError(t, err) + clientConnWrapper.Reset(conn) + writeErr = clientConnWrapper.Write(testutils.Context(t), websocket.TextMessage, []byte("hello again")) + require.NoError(t, writeErr) + <-ssl.connWrapper.ReadChannel() // consumed by server + conn.Close() +} diff --git a/core/services/gateway/network/wsserver.go b/core/services/gateway/network/wsserver.go new file mode 100644 index 00000000..55c9fe4a --- /dev/null +++ b/core/services/gateway/network/wsserver.go @@ -0,0 +1,158 @@ +package network + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "time" + + "github.com/gorilla/websocket" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +//go:generate mockery --quiet --name WebSocketServer --output ./mocks/ --case=underscore +type WebSocketServer interface { + job.ServiceCtx + + // Not thread-safe. Can be called after Start() returns. + GetPort() int +} + +type WebSocketServerConfig struct { + HTTPServerConfig + HandshakeTimeoutMillis uint32 +} + +type webSocketServer struct { + services.StateMachine + config *WebSocketServerConfig + listener net.Listener + server *http.Server + acceptor ConnectionAcceptor + upgrader *websocket.Upgrader + doneCh chan struct{} + cancelBaseContext context.CancelFunc + lggr logger.Logger +} + +func NewWebSocketServer(config *WebSocketServerConfig, acceptor ConnectionAcceptor, lggr logger.Logger) WebSocketServer { + baseCtx, cancelBaseCtx := context.WithCancel(context.Background()) + upgrader := &websocket.Upgrader{ + HandshakeTimeout: time.Duration(config.HandshakeTimeoutMillis) * time.Millisecond, + } + server := &webSocketServer{ + config: config, + acceptor: acceptor, + upgrader: upgrader, + doneCh: make(chan struct{}), + cancelBaseContext: cancelBaseCtx, + lggr: lggr.Named("WebSocketServer"), + } + mux := http.NewServeMux() + mux.Handle(config.Path, http.HandlerFunc(server.handleRequest)) + server.server = &http.Server{ + Addr: fmt.Sprintf("%s:%d", config.Host, config.Port), + Handler: mux, + BaseContext: func(net.Listener) context.Context { return baseCtx }, + ReadTimeout: time.Duration(config.ReadTimeoutMillis) * time.Millisecond, + ReadHeaderTimeout: time.Duration(config.ReadTimeoutMillis) * time.Millisecond, + WriteTimeout: time.Duration(config.WriteTimeoutMillis) * time.Millisecond, + } + return server +} + +func (s *webSocketServer) GetPort() int { + return s.listener.Addr().(*net.TCPAddr).Port +} + +func (s *webSocketServer) handleRequest(w http.ResponseWriter, r *http.Request) { + authHeader := r.Header.Get(WsServerHandshakeAuthHeaderName) + if len(authHeader) > HandshakeEncodedAuthHeaderMaxLen { + s.lggr.Errorw("received auth header is too large", "len", len(authHeader)) + w.WriteHeader(http.StatusBadRequest) + return + } + authBytes, err := base64.StdEncoding.DecodeString(authHeader) + if err != nil { + s.lggr.Error("received auth header can't be base64-decoded", err) + w.WriteHeader(http.StatusBadRequest) + return + } + attemptId, challenge, err := s.acceptor.StartHandshake(authBytes) + if err != nil { + s.lggr.Error("received invalid auth header", err) + w.WriteHeader(http.StatusUnauthorized) + return + } + + challengeStr := base64.StdEncoding.EncodeToString(challenge) + hdr := make(http.Header) + hdr.Add(WsServerHandshakeChallengeHeaderName, challengeStr) + conn, err := s.upgrader.Upgrade(w, r, hdr) + if err != nil { + s.lggr.Error("failed websocket upgrade", err) + conn.Close() + s.acceptor.AbortHandshake(attemptId) + return + } + + msgType, response, err := conn.ReadMessage() + if err != nil || msgType != websocket.BinaryMessage { + s.lggr.Error("invalid handshake message", msgType, err) + conn.Close() + s.acceptor.AbortHandshake(attemptId) + return + } + + if err = s.acceptor.FinalizeHandshake(attemptId, response, conn); err != nil { + s.lggr.Error("unable to finalize handshake", err) + conn.Close() + s.acceptor.AbortHandshake(attemptId) + return + } +} + +func (s *webSocketServer) Start(ctx context.Context) error { + return s.StartOnce("GatewayWebSocketServer", func() error { + s.lggr.Info("starting gateway WebSocket server") + return s.runServer() + }) +} + +func (s *webSocketServer) Close() error { + return s.StopOnce("GatewayWebSocketServer", func() (err error) { + s.lggr.Info("closing gateway WebSocket server") + s.cancelBaseContext() + err = s.server.Shutdown(context.Background()) + <-s.doneCh + return + }) +} + +func (s *webSocketServer) runServer() (err error) { + s.listener, err = net.Listen("tcp", s.server.Addr) + if err != nil { + return + } + tlsEnabled := s.config.TLSEnabled + go func() { + if tlsEnabled { + err := s.server.ServeTLS(s.listener, s.config.TLSCertPath, s.config.TLSKeyPath) + if err != http.ErrServerClosed { + s.lggr.Error("gateway WS server closed with error:", err) + } + } else { + err := s.server.Serve(s.listener) + if err != http.ErrServerClosed { + s.lggr.Error("gateway WS server closed with error:", err) + } + } + s.doneCh <- struct{}{} + }() + return +} diff --git a/core/services/gateway/network/wsserver_test.go b/core/services/gateway/network/wsserver_test.go new file mode 100644 index 00000000..7751727f --- /dev/null +++ b/core/services/gateway/network/wsserver_test.go @@ -0,0 +1,92 @@ +package network_test + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/network/mocks" +) + +const ( + WSTestHost = "localhost" + WSTestPath = "/ws_test_path" +) + +func startNewWSServer(t *testing.T, readTimeoutMillis uint32) (server network.WebSocketServer, acceptor *mocks.ConnectionAcceptor, url string) { + config := &network.WebSocketServerConfig{ + HTTPServerConfig: network.HTTPServerConfig{ + Host: WSTestHost, + Port: 0, + Path: "/ws_test_path", + TLSEnabled: false, + ContentTypeHeader: "application/jsonrpc", + ReadTimeoutMillis: readTimeoutMillis, + WriteTimeoutMillis: 10_000, + RequestTimeoutMillis: 10_000, + MaxRequestBytes: 100_000, + }, + HandshakeTimeoutMillis: 10_000, + } + + acceptor = mocks.NewConnectionAcceptor(t) + server = network.NewWebSocketServer(config, acceptor, logger.TestLogger(t)) + err := server.Start(testutils.Context(t)) + require.NoError(t, err) + + port := server.GetPort() + url = fmt.Sprintf("http://%s:%d%s", WSTestHost, port, WSTestPath) + return +} + +func sendRequestWithHeader(t *testing.T, url string, headerName string, headerValue string) *http.Response { + req, err := http.NewRequestWithContext(testutils.Context(t), "POST", url, bytes.NewBuffer([]byte{})) + require.NoError(t, err) + req.Header.Set(headerName, headerValue) + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + return resp +} + +func TestWSServer_HandleRequest_AuthHeaderTooBig(t *testing.T) { + server, _, url := startNewWSServer(t, 100_000) + defer server.Close() + + longString := "abcdefgh" + for i := 0; i < 6; i++ { + longString += longString + } + authHeader := base64.StdEncoding.EncodeToString([]byte(longString)) + resp := sendRequestWithHeader(t, url, network.WsServerHandshakeAuthHeaderName, authHeader) + require.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestWSServer_HandleRequest_AuthHeaderIncorrectlyBase64Encoded(t *testing.T) { + server, _, url := startNewWSServer(t, 100_000) + defer server.Close() + + resp := sendRequestWithHeader(t, url, network.WsServerHandshakeAuthHeaderName, "}}}") + require.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestWSServer_HandleRequest_AuthHeaderInvalid(t *testing.T) { + server, acceptor, url := startNewWSServer(t, 100_000) + defer server.Close() + + acceptor.On("StartHandshake", mock.Anything).Return("", []byte{}, errors.New("invalid auth header")) + + authHeader := base64.StdEncoding.EncodeToString([]byte("abcd")) + resp := sendRequestWithHeader(t, url, network.WsServerHandshakeAuthHeaderName, authHeader) + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} diff --git a/core/services/health.go b/core/services/health.go new file mode 100644 index 00000000..29a46e62 --- /dev/null +++ b/core/services/health.go @@ -0,0 +1,70 @@ +package services + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var _ Checker = (*services.HealthChecker)(nil) + +// Checker provides a service which can be probed for system health. +// +//go:generate mockery --quiet --name Checker --output ./mocks/ --case=underscore +type Checker interface { + // Register a service for health checks. + Register(service services.HealthReporter) error + // Unregister a service. + Unregister(name string) error + // IsReady returns the current readiness of the system. + // A system is considered ready if all checks are passing (no errors) + IsReady() (ready bool, errors map[string]error) + // IsHealthy returns the current health of the system. + // A system is considered healthy if all checks are passing (no errors) + IsHealthy() (healthy bool, errors map[string]error) + + Start() error + Close() error +} + +type InBackupHealthReport struct { + server http.Server + lggr logger.Logger +} + +func (i *InBackupHealthReport) Stop() { + shutdownCtx, shutdownRelease := context.WithTimeout(context.Background(), time.Second) + defer shutdownRelease() + if err := i.server.Shutdown(shutdownCtx); err != nil { + i.lggr.Errorf("InBackupHealthReport shutdown error: %v", err) + } + i.lggr.Info("InBackupHealthReport shutdown complete") +} + +func (i *InBackupHealthReport) Start() { + go func() { + http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + }) + i.lggr.Info("Starting InBackupHealthReport") + if err := i.server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + i.lggr.Errorf("InBackupHealthReport server error: %v", err) + } + }() +} + +// NewInBackupHealthReport creates a new InBackupHealthReport that will serve the /health endpoint, useful for +// preventing shutdowns due to health-checks when running long backup tasks +func NewInBackupHealthReport(port uint16, lggr logger.Logger) *InBackupHealthReport { + return &InBackupHealthReport{ + server: http.Server{Addr: fmt.Sprintf(":%d", port), ReadHeaderTimeout: time.Second * 5}, + lggr: lggr, + } +} diff --git a/core/services/health_test.go b/core/services/health_test.go new file mode 100644 index 00000000..92cf581f --- /dev/null +++ b/core/services/health_test.go @@ -0,0 +1,33 @@ +package services_test + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-common/pkg/utils/tests" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" +) + +func TestNewInBackupHealthReport(t *testing.T) { + lggr, observed := logger.TestLoggerObserved(t, zapcore.InfoLevel) + ibhr := services.NewInBackupHealthReport(1234, lggr) + + ibhr.Start() + require.Eventually(t, func() bool { return observed.Len() >= 1 }, time.Second*5, time.Millisecond*100) + require.Equal(t, "Starting InBackupHealthReport", observed.TakeAll()[0].Message) + + req, err := http.NewRequestWithContext(tests.Context(t), "GET", "http://localhost:1234/health", nil) + require.NoError(t, err) + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusNoContent, res.StatusCode) + + ibhr.Stop() + require.Eventually(t, func() bool { return observed.Len() >= 1 }, time.Second*5, time.Millisecond*100) + require.Equal(t, "InBackupHealthReport shutdown complete", observed.TakeAll()[0].Message) +} diff --git a/core/services/job/common.go b/core/services/job/common.go new file mode 100644 index 00000000..c200c584 --- /dev/null +++ b/core/services/job/common.go @@ -0,0 +1,53 @@ +package job + +import ( + "context" + "net/url" + + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name ServiceCtx --output ./mocks/ --case=underscore + +type Service interface { + Start() error + Close() error +} + +// ServiceCtx is the same as Service, but Start method receives a context. +type ServiceCtx interface { + Start(context.Context) error + Close() error +} + +type Config interface { + URL() url.URL + pg.QConfig +} + +// ServiceAdapter is a helper introduced for transitioning from Service to ServiceCtx. +type ServiceAdapter interface { + ServiceCtx +} + +type adapter struct { + service Service +} + +// NewServiceAdapter creates an adapter instance for the given Service. +func NewServiceAdapter(service Service) ServiceCtx { + return &adapter{ + service, + } +} + +// Start forwards the call to the underlying service.Start(). +// Context is not used in this case. +func (a adapter) Start(context.Context) error { + return a.service.Start() +} + +// Close forwards the call to the underlying service.Close(). +func (a adapter) Close() error { + return a.service.Close() +} diff --git a/core/services/job/helpers_test.go b/core/services/job/helpers_test.go new file mode 100644 index 00000000..65d46173 --- /dev/null +++ b/core/services/job/helpers_test.go @@ -0,0 +1,327 @@ +package job_test + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +const ( + ocrJobSpecTemplate = ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "%s" +evmChainID = "0" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "%s" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "%s" +observationTimeout = "10s" +blockchainTimeout = "20s" +contractConfigTrackerSubscribeInterval = "2m" +contractConfigTrackerPollInterval = "1m" +contractConfigConfirmations = 3 +observationSource = """ + %s +""" +` + ocr2vrfJobSpecTemplate = ` +type = "offchainreporting2" +schemaVersion = 1 +name = "ocr2 vrf spec" +maxTaskDuration = "10s" +contractID = "%s" +ocrKeyBundleID = "%s" +relay = "evm" +pluginType = "ocr2vrf" +transmitterID = "%s" +forwardingAllowed = %t + +[relayConfig] +chainID = %d +fromBlock = %d +sendingKeys = [%s] + +[pluginConfig] +dkgEncryptionPublicKey = "%s" +dkgSigningPublicKey = "%s" +dkgKeyID = "%s" +dkgContractAddress = "%s" + +vrfCoordinatorAddress = "%s" +linkEthFeedAddress = "%s" +` + voterTurnoutDataSourceTemplate = ` +// data source 1 +ds1 [type=bridge name="%s"]; +ds1_parse [type=jsonparse path="data,result"]; +ds1_multiply [type=multiply times=100]; + +// data source 2 +ds2 [type=http method=POST url="%s" requestData="{\\"hi\\": \\"hello\\"}"]; +ds2_parse [type=jsonparse path="turnout"]; +ds2_multiply [type=multiply times=100]; + +ds1 -> ds1_parse -> ds1_multiply -> answer1; +ds2 -> ds2_parse -> ds2_multiply -> answer1; + +answer1 [type=median index=0]; +answer2 [type=bridge name="%s" index=1]; +` + + simpleFetchDataSourceTemplate = ` +// data source 1 +ds1 [type=http method=GET url="%s" allowunrestrictednetworkaccess="true"]; +ds1_parse [type=jsonparse path="USD" lax=%t]; +ds1_multiply [type=multiply times=100]; +ds1 -> ds1_parse -> ds1_multiply; +` + minimalNonBootstrapTemplate = ` + type = "offchainreporting" + schemaVersion = 1 + contractAddress = "%s" + p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] + isBootstrapPeer = false + transmitterAddress = "%s" + keyBundleID = "%s" + observationTimeout = "10s" + evmChainID = "0" + observationSource = """ +ds1 [type=http method=GET url="%s" allowunrestrictednetworkaccess="true" %s]; +ds1_parse [type=jsonparse path="USD" lax=true]; +ds1 -> ds1_parse; +""" +` + minimalBootstrapTemplate = ` + type = "offchainreporting" + schemaVersion = 1 + contractAddress = "%s" + evmChainID = "0" + isBootstrapPeer = true +` + ocrJobSpecText = ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "%s" +evmChainID = "0" +p2pPeerID = "%s" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "%s" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "%s" +observationTimeout = "10s" +blockchainTimeout = "20s" +contractConfigTrackerSubscribeInterval = "2m" +contractConfigTrackerPollInterval = "1m" +contractConfigConfirmations = 3 +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData="{\\"hi\\": \\"hello\\"}"]; + ds2_parse [type=jsonparse path="three,four"]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; + answer2 [type=bridge name="%s" index=1]; +""" +` +) + +func makeOCRJobSpec(t *testing.T, transmitterAddress common.Address, b1, b2 string) *job.Job { + t.Helper() + + peerID := cltest.DefaultP2PPeerID + ocrKeyID := cltest.DefaultOCRKeyBundleID + jobSpecText := fmt.Sprintf(ocrJobSpecText, testutils.NewAddress().Hex(), peerID, ocrKeyID, transmitterAddress.Hex(), b1, b2) + + dbSpec := job.Job{ + ExternalJobID: uuid.New(), + } + err := toml.Unmarshal([]byte(jobSpecText), &dbSpec) + require.NoError(t, err) + var ocrspec job.OCROracleSpec + err = toml.Unmarshal([]byte(jobSpecText), &ocrspec) + require.NoError(t, err) + dbSpec.OCROracleSpec = &ocrspec + + return &dbSpec +} + +// `require.Equal` currently has broken handling of `time.Time` values, so we have +// to do equality comparisons of these structs manually. +// +// https://github.com/stretchr/testify/issues/984 +func compareOCRJobSpecs(t *testing.T, expected, actual job.Job) { + require.NotNil(t, expected.OCROracleSpec) + require.Equal(t, expected.OCROracleSpec.ContractAddress, actual.OCROracleSpec.ContractAddress) + require.Equal(t, expected.OCROracleSpec.P2PV2Bootstrappers, actual.OCROracleSpec.P2PV2Bootstrappers) + require.Equal(t, expected.OCROracleSpec.IsBootstrapPeer, actual.OCROracleSpec.IsBootstrapPeer) + require.Equal(t, expected.OCROracleSpec.EncryptedOCRKeyBundleID, actual.OCROracleSpec.EncryptedOCRKeyBundleID) + require.Equal(t, expected.OCROracleSpec.TransmitterAddress, actual.OCROracleSpec.TransmitterAddress) + require.Equal(t, expected.OCROracleSpec.ObservationTimeout, actual.OCROracleSpec.ObservationTimeout) + require.Equal(t, expected.OCROracleSpec.BlockchainTimeout, actual.OCROracleSpec.BlockchainTimeout) + require.Equal(t, expected.OCROracleSpec.ContractConfigTrackerSubscribeInterval, actual.OCROracleSpec.ContractConfigTrackerSubscribeInterval) + require.Equal(t, expected.OCROracleSpec.ContractConfigTrackerPollInterval, actual.OCROracleSpec.ContractConfigTrackerPollInterval) + require.Equal(t, expected.OCROracleSpec.ContractConfigConfirmations, actual.OCROracleSpec.ContractConfigConfirmations) +} + +func makeMinimalHTTPOracleSpec(t *testing.T, db *sqlx.DB, cfg plugin.GeneralConfig, contractAddress, transmitterAddress, keyBundle, fetchUrl, timeout string) *job.Job { + var ocrSpec = job.OCROracleSpec{ + P2PV2Bootstrappers: pq.StringArray{}, + ObservationTimeout: models.Interval(10 * time.Second), + BlockchainTimeout: models.Interval(20 * time.Second), + ContractConfigTrackerSubscribeInterval: models.Interval(2 * time.Minute), + ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute), + ContractConfigConfirmations: uint16(3), + } + var os = job.Job{ + Name: null.NewString("a job", true), + Type: job.OffchainReporting, + SchemaVersion: 1, + ExternalJobID: uuid.New(), + } + s := fmt.Sprintf(minimalNonBootstrapTemplate, contractAddress, transmitterAddress, keyBundle, fetchUrl, timeout) + keyStore := cltest.NewKeyStore(t, db, pgtest.NewQConfig(true)) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: evmtest.NewEthClientMockWithDefaultChain(t), GeneralConfig: cfg, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + _, err := ocr.ValidatedOracleSpecToml(legacyChains, s) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &os) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &ocrSpec) + require.NoError(t, err) + os.OCROracleSpec = &ocrSpec + return &os +} + +func makeVoterTurnoutOCRJobSpec(t *testing.T, transmitterAddress common.Address, b1, b2 string) *job.Job { + t.Helper() + return MakeVoterTurnoutOCRJobSpecWithHTTPURL(t, transmitterAddress, "https://example.com/foo/bar", b1, b2) +} + +func MakeVoterTurnoutOCRJobSpecWithHTTPURL(t *testing.T, transmitterAddress common.Address, httpURL, b1, b2 string) *job.Job { + t.Helper() + ocrKeyID := cltest.DefaultOCRKeyBundleID + ds := fmt.Sprintf(voterTurnoutDataSourceTemplate, b1, httpURL, b2) + voterTurnoutJobSpec := fmt.Sprintf(ocrJobSpecTemplate, testutils.NewAddress().Hex(), ocrKeyID, transmitterAddress.Hex(), ds) + return makeOCRJobSpecFromToml(t, voterTurnoutJobSpec) +} + +func makeSimpleFetchOCRJobSpecWithHTTPURL(t *testing.T, transmitterAddress common.Address, httpURL string, lax bool) *job.Job { + t.Helper() + ocrKeyID := cltest.DefaultOCRKeyBundleID + ds := fmt.Sprintf(simpleFetchDataSourceTemplate, httpURL, lax) + simpleFetchJobSpec := fmt.Sprintf(ocrJobSpecTemplate, testutils.NewAddress().Hex(), ocrKeyID, transmitterAddress.Hex(), ds) + return makeOCRJobSpecFromToml(t, simpleFetchJobSpec) +} + +func makeOCRJobSpecFromToml(t *testing.T, jobSpecToml string) *job.Job { + t.Helper() + + id := uuid.New() + var jb = job.Job{ + Name: null.StringFrom(id.String()), + ExternalJobID: id, + } + err := toml.Unmarshal([]byte(jobSpecToml), &jb) + require.NoError(t, err) + var ocrspec job.OCROracleSpec + err = toml.Unmarshal([]byte(jobSpecToml), &ocrspec) + require.NoError(t, err) + if ocrspec.P2PV2Bootstrappers == nil { + ocrspec.P2PV2Bootstrappers = pq.StringArray{} + } + jb.OCROracleSpec = &ocrspec + + return &jb +} + +func makeOCR2VRFJobSpec(t testing.TB, ks keystore.Master, cfg plugin.GeneralConfig, + transmitter common.Address, chainID *big.Int, fromBlock uint64) *job.Job { + t.Helper() + + useForwarders := false + _, beacon := cltest.MustInsertRandomKey(t, ks.Eth()) + _, coordinator := cltest.MustInsertRandomKey(t, ks.Eth()) + _, feed := cltest.MustInsertRandomKey(t, ks.Eth()) + _, dkg := cltest.MustInsertRandomKey(t, ks.Eth()) + sendingKeys := fmt.Sprintf(`"%s"`, transmitter) + kb, _ := ks.OCR2().Create(chaintype.EVM) + + vrfKey := make([]byte, 32) + _, err := rand.Read(vrfKey) + require.NoError(t, err) + + ocr2vrfJob := fmt.Sprintf(ocr2vrfJobSpecTemplate, + beacon.String(), + kb.ID(), + "", + useForwarders, + chainID, + fromBlock, + sendingKeys, + ks.DKGEncrypt(), + ks.DKGSign(), + hex.EncodeToString(vrfKey[:]), + dkg.String(), + coordinator.String(), + feed.String(), + ) + jobSpec := makeOCR2JobSpecFromToml(t, ocr2vrfJob) + + return jobSpec +} + +func makeOCR2JobSpecFromToml(t testing.TB, jobSpecToml string) *job.Job { + t.Helper() + + id := uuid.New() + var jb = job.Job{ + Name: null.StringFrom(id.String()), + ExternalJobID: id, + } + err := toml.Unmarshal([]byte(jobSpecToml), &jb) + require.NoError(t, err, jobSpecToml) + var ocr2spec job.OCR2OracleSpec + err = toml.Unmarshal([]byte(jobSpecToml), &ocr2spec) + require.NoError(t, err) + if ocr2spec.P2PV2Bootstrappers == nil { + ocr2spec.P2PV2Bootstrappers = pq.StringArray{} + } + jb.OCR2OracleSpec = &ocr2spec + + return &jb +} diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go new file mode 100644 index 00000000..04159bbc --- /dev/null +++ b/core/services/job/job_orm_test.go @@ -0,0 +1,1730 @@ +package job_test + +import ( + "context" + "database/sql" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/blockheaderfeeder" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + ocr2validate "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" +) + +const mercuryOracleTOML = `name = 'PLI / ETH | 0x0000000000000000000000000000000000000000000000000000000000000001 | verifier_proxy 0x0000000000000000000000000000000000000001' +type = 'offchainreporting2' +schemaVersion = 1 +externalJobID = '00000000-0000-0000-0000-000000000001' +contractID = '0x0000000000000000000000000000000000000006' +transmitterID = '%s' +feedID = '%s' +relay = 'evm' +pluginType = 'mercury' +observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data.price" separator="."]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply; +""" + +[relayConfig] +chainID = 1 +fromBlock = 1000 + +[pluginConfig] +serverURL = 'wss://localhost:8080' +serverPubKey = '8fa807463ad73f9ee855cfd60ba406dcf98a2855b3dd8af613107b0f6890a707' +` + +func TestORM(t *testing.T) { + t.Parallel() + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + ethKeyStore := keyStore.Eth() + + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + borm := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, address := cltest.MustInsertRandomKey(t, ethKeyStore) + jb := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + + t.Run("it creates job specs", func(t *testing.T) { + err := orm.CreateJob(jb) + require.NoError(t, err) + + var returnedSpec job.Job + var OCROracleSpec job.OCROracleSpec + + err = db.Get(&returnedSpec, "SELECT * FROM jobs WHERE jobs.id = $1", jb.ID) + require.NoError(t, err) + err = db.Get(&OCROracleSpec, "SELECT * FROM ocr_oracle_specs WHERE ocr_oracle_specs.id = $1", jb.OCROracleSpecID) + require.NoError(t, err) + returnedSpec.OCROracleSpec = &OCROracleSpec + compareOCRJobSpecs(t, *jb, returnedSpec) + }) + + t.Run("autogenerates external job ID if missing", func(t *testing.T) { + jb2 := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + jb2.ExternalJobID = uuid.UUID{} + err := orm.CreateJob(jb2) + require.NoError(t, err) + + var returnedSpec job.Job + err = db.Get(&returnedSpec, "SELECT * FROM jobs WHERE jobs.id = $1", jb.ID) + require.NoError(t, err) + + assert.NotEqual(t, uuid.UUID{}, returnedSpec.ExternalJobID) + }) + + t.Run("it deletes jobs from the DB", func(t *testing.T) { + var dbSpecs []job.Job + + err := db.Select(&dbSpecs, "SELECT * FROM jobs") + require.NoError(t, err) + require.Len(t, dbSpecs, 2) + + err = orm.DeleteJob(jb.ID) + require.NoError(t, err) + + dbSpecs = []job.Job{} + err = db.Select(&dbSpecs, "SELECT * FROM jobs") + require.NoError(t, err) + require.Len(t, dbSpecs, 1) + }) + + t.Run("increase job spec error occurrence", func(t *testing.T) { + jb3 := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + err := orm.CreateJob(jb3) + require.NoError(t, err) + var jobSpec job.Job + err = db.Get(&jobSpec, "SELECT * FROM jobs") + require.NoError(t, err) + + ocrSpecError1 := "ocr spec 1 errored" + ocrSpecError2 := "ocr spec 2 errored" + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError1)) + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError1)) + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError2)) + + var specErrors []job.SpecError + err = db.Select(&specErrors, "SELECT * FROM job_spec_errors") + require.NoError(t, err) + require.Len(t, specErrors, 2) + + assert.Equal(t, specErrors[0].Occurrences, uint(2)) + assert.Equal(t, specErrors[1].Occurrences, uint(1)) + assert.True(t, specErrors[0].CreatedAt.Before(specErrors[0].UpdatedAt), "expected created_at (%s) to be before updated_at (%s)", specErrors[0].CreatedAt, specErrors[0].UpdatedAt) + assert.Equal(t, specErrors[0].Description, ocrSpecError1) + assert.Equal(t, specErrors[1].Description, ocrSpecError2) + assert.True(t, specErrors[1].CreatedAt.After(specErrors[0].UpdatedAt)) + var j2 job.Job + var OCROracleSpec job.OCROracleSpec + var jobSpecErrors []job.SpecError + + err = db.Get(&j2, "SELECT * FROM jobs WHERE jobs.id = $1", jobSpec.ID) + require.NoError(t, err) + err = db.Get(&OCROracleSpec, "SELECT * FROM ocr_oracle_specs WHERE ocr_oracle_specs.id = $1", j2.OCROracleSpecID) + require.NoError(t, err) + err = db.Select(&jobSpecErrors, "SELECT * FROM job_spec_errors WHERE job_spec_errors.job_id = $1", j2.ID) + require.NoError(t, err) + require.Len(t, jobSpecErrors, 2) + }) + + t.Run("finds job spec error by ID", func(t *testing.T) { + jb3 := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + err := orm.CreateJob(jb3) + require.NoError(t, err) + var jobSpec job.Job + err = db.Get(&jobSpec, "SELECT * FROM jobs") + require.NoError(t, err) + + var specErrors []job.SpecError + err = db.Select(&specErrors, "SELECT * FROM job_spec_errors") + require.NoError(t, err) + require.Len(t, specErrors, 2) + + ocrSpecError1 := "ocr spec 3 errored" + ocrSpecError2 := "ocr spec 4 errored" + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError1)) + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError2)) + + var updatedSpecError []job.SpecError + + err = db.Select(&updatedSpecError, "SELECT * FROM job_spec_errors ORDER BY id ASC") + require.NoError(t, err) + require.Len(t, updatedSpecError, 4) + + assert.Equal(t, uint(1), updatedSpecError[2].Occurrences) + assert.Equal(t, uint(1), updatedSpecError[3].Occurrences) + assert.Equal(t, ocrSpecError1, updatedSpecError[2].Description) + assert.Equal(t, ocrSpecError2, updatedSpecError[3].Description) + + dbSpecErr1, err := orm.FindSpecError(updatedSpecError[2].ID) + require.NoError(t, err) + dbSpecErr2, err := orm.FindSpecError(updatedSpecError[3].ID) + require.NoError(t, err) + + assert.Equal(t, uint(1), dbSpecErr1.Occurrences) + assert.Equal(t, uint(1), dbSpecErr2.Occurrences) + assert.Equal(t, ocrSpecError1, dbSpecErr1.Description) + assert.Equal(t, ocrSpecError2, dbSpecErr2.Description) + }) + + t.Run("creates a job with a direct request spec", func(t *testing.T) { + drSpec := fmt.Sprintf(` + type = "directrequest" + schemaVersion = 1 + evmChainID = "0" + name = "example eth request event spec" + contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" + externalJobID = "%s" + observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_merge [type=merge left="{}"] + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; + """ + `, uuid.New()) + + drJob, err := directrequest.ValidatedDirectRequestSpec(drSpec) + require.NoError(t, err) + err = orm.CreateJob(&drJob) + require.NoError(t, err) + }) + + t.Run("creates webhook specs along with external_initiator_webhook_specs", func(t *testing.T) { + eiFoo := cltest.MustInsertExternalInitiator(t, borm) + eiBar := cltest.MustInsertExternalInitiator(t, borm) + + eiWS := []webhook.TOMLWebhookSpecExternalInitiator{ + {Name: eiFoo.Name, Spec: cltest.JSONFromString(t, `{}`)}, + {Name: eiBar.Name, Spec: cltest.JSONFromString(t, `{"bar": 1}`)}, + } + eim := webhook.NewExternalInitiatorManager(db, nil, logger.TestLogger(t), config.Database()) + jb, err := webhook.ValidatedWebhookSpec(testspecs.GenerateWebhookSpec(testspecs.WebhookSpecParams{ExternalInitiators: eiWS}).Toml(), eim) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + cltest.AssertCount(t, db, "external_initiator_webhook_specs", 2) + }) + + t.Run("it creates and deletes records for blockhash store jobs", func(t *testing.T) { + bhsJob, err := blockhashstore.ValidatedSpec( + testspecs.GenerateBlockhashStoreSpec(testspecs.BlockhashStoreSpecParams{}).Toml()) + require.NoError(t, err) + + err = orm.CreateJob(&bhsJob) + require.NoError(t, err) + savedJob, err := orm.FindJob(testutils.Context(t), bhsJob.ID) + require.NoError(t, err) + require.Equal(t, bhsJob.ID, savedJob.ID) + require.Equal(t, bhsJob.Type, savedJob.Type) + require.Equal(t, bhsJob.BlockhashStoreSpec.ID, savedJob.BlockhashStoreSpec.ID) + require.Equal(t, bhsJob.BlockhashStoreSpec.CoordinatorV1Address, savedJob.BlockhashStoreSpec.CoordinatorV1Address) + require.Equal(t, bhsJob.BlockhashStoreSpec.CoordinatorV2Address, savedJob.BlockhashStoreSpec.CoordinatorV2Address) + require.Equal(t, bhsJob.BlockhashStoreSpec.CoordinatorV2PlusAddress, savedJob.BlockhashStoreSpec.CoordinatorV2PlusAddress) + require.Equal(t, bhsJob.BlockhashStoreSpec.WaitBlocks, savedJob.BlockhashStoreSpec.WaitBlocks) + require.Equal(t, bhsJob.BlockhashStoreSpec.LookbackBlocks, savedJob.BlockhashStoreSpec.LookbackBlocks) + require.Equal(t, bhsJob.BlockhashStoreSpec.HeartbeatPeriod, savedJob.BlockhashStoreSpec.HeartbeatPeriod) + require.Equal(t, bhsJob.BlockhashStoreSpec.BlockhashStoreAddress, savedJob.BlockhashStoreSpec.BlockhashStoreAddress) + require.Equal(t, bhsJob.BlockhashStoreSpec.TrustedBlockhashStoreAddress, savedJob.BlockhashStoreSpec.TrustedBlockhashStoreAddress) + require.Equal(t, bhsJob.BlockhashStoreSpec.TrustedBlockhashStoreBatchSize, savedJob.BlockhashStoreSpec.TrustedBlockhashStoreBatchSize) + require.Equal(t, bhsJob.BlockhashStoreSpec.PollPeriod, savedJob.BlockhashStoreSpec.PollPeriod) + require.Equal(t, bhsJob.BlockhashStoreSpec.RunTimeout, savedJob.BlockhashStoreSpec.RunTimeout) + require.Equal(t, bhsJob.BlockhashStoreSpec.EVMChainID, savedJob.BlockhashStoreSpec.EVMChainID) + require.Equal(t, bhsJob.BlockhashStoreSpec.FromAddresses, savedJob.BlockhashStoreSpec.FromAddresses) + err = orm.DeleteJob(bhsJob.ID) + require.NoError(t, err) + _, err = orm.FindJob(testutils.Context(t), bhsJob.ID) + require.Error(t, err) + }) + + t.Run("it creates and deletes records for blockheaderfeeder jobs", func(t *testing.T) { + bhsJob, err := blockheaderfeeder.ValidatedSpec( + testspecs.GenerateBlockHeaderFeederSpec(testspecs.BlockHeaderFeederSpecParams{}).Toml()) + require.NoError(t, err) + + err = orm.CreateJob(&bhsJob) + require.NoError(t, err) + savedJob, err := orm.FindJob(testutils.Context(t), bhsJob.ID) + require.NoError(t, err) + require.Equal(t, bhsJob.ID, savedJob.ID) + require.Equal(t, bhsJob.Type, savedJob.Type) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.ID, savedJob.BlockHeaderFeederSpec.ID) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.CoordinatorV1Address, savedJob.BlockHeaderFeederSpec.CoordinatorV1Address) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.CoordinatorV2Address, savedJob.BlockHeaderFeederSpec.CoordinatorV2Address) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.CoordinatorV2PlusAddress, savedJob.BlockHeaderFeederSpec.CoordinatorV2PlusAddress) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.WaitBlocks, savedJob.BlockHeaderFeederSpec.WaitBlocks) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.LookbackBlocks, savedJob.BlockHeaderFeederSpec.LookbackBlocks) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.BlockhashStoreAddress, savedJob.BlockHeaderFeederSpec.BlockhashStoreAddress) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.BatchBlockhashStoreAddress, savedJob.BlockHeaderFeederSpec.BatchBlockhashStoreAddress) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.PollPeriod, savedJob.BlockHeaderFeederSpec.PollPeriod) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.RunTimeout, savedJob.BlockHeaderFeederSpec.RunTimeout) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.EVMChainID, savedJob.BlockHeaderFeederSpec.EVMChainID) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.FromAddresses, savedJob.BlockHeaderFeederSpec.FromAddresses) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.GetBlockhashesBatchSize, savedJob.BlockHeaderFeederSpec.GetBlockhashesBatchSize) + require.Equal(t, bhsJob.BlockHeaderFeederSpec.StoreBlockhashesBatchSize, savedJob.BlockHeaderFeederSpec.StoreBlockhashesBatchSize) + err = orm.DeleteJob(bhsJob.ID) + require.NoError(t, err) + _, err = orm.FindJob(testutils.Context(t), bhsJob.ID) + require.Error(t, err) + }) +} + +func TestORM_DeleteJob_DeletesAssociatedRecords(t *testing.T) { + t.Parallel() + config := configtest.NewGeneralConfig(t, nil) + + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + scopedConfig := evmtest.NewChainScopedConfig(t, config) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + + t.Run("it deletes records for offchainreporting jobs", func(t *testing.T) { + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + + cltest.AssertCount(t, db, "ocr_oracle_specs", 1) + cltest.AssertCount(t, db, "pipeline_specs", 1) + + err = jobORM.DeleteJob(jb.ID) + require.NoError(t, err) + cltest.AssertCount(t, db, "ocr_oracle_specs", 0) + cltest.AssertCount(t, db, "pipeline_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) + }) + + t.Run("it deletes records for keeper jobs", func(t *testing.T) { + registry, keeperJob := cltest.MustInsertKeeperRegistry(t, db, korm, keyStore.Eth(), 0, 1, 20) + scoped := evmtest.NewChainScopedConfig(t, config) + cltest.MustInsertUpkeepForRegistry(t, db, scoped.Database(), registry) + + cltest.AssertCount(t, db, "keeper_specs", 1) + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 1) + + err := jobORM.DeleteJob(keeperJob.ID) + require.NoError(t, err) + cltest.AssertCount(t, db, "keeper_specs", 0) + cltest.AssertCount(t, db, "keeper_registries", 0) + cltest.AssertCount(t, db, "upkeep_registrations", 0) + cltest.AssertCount(t, db, "jobs", 0) + }) + + t.Run("it creates and deletes records for vrf jobs", func(t *testing.T) { + key, err := keyStore.VRF().Create() + require.NoError(t, err) + pk := key.PublicKey + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{PublicKey: pk.String()}).Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + cltest.AssertCount(t, db, "vrf_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + err = jobORM.DeleteJob(jb.ID) + require.NoError(t, err) + cltest.AssertCount(t, db, "vrf_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) + }) + + t.Run("it deletes records for webhook jobs", func(t *testing.T) { + ei := cltest.MustInsertExternalInitiator(t, bridges.NewORM(db, logger.TestLogger(t), config.Database())) + jb, webhookSpec := cltest.MustInsertWebhookSpec(t, db) + _, err := db.Exec(`INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, ei.ID, webhookSpec.ID, `{"ei": "foo", "name": "webhookSpecTwoEIs"}`) + require.NoError(t, err) + + err = jobORM.DeleteJob(jb.ID) + require.NoError(t, err) + cltest.AssertCount(t, db, "webhook_specs", 0) + cltest.AssertCount(t, db, "external_initiator_webhook_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) + }) + + t.Run("does not allow to delete external initiators if they have referencing external_initiator_webhook_specs", func(t *testing.T) { + // create new db because this will rollback transaction and poison it + db := pgtest.NewSqlxDB(t) + ei := cltest.MustInsertExternalInitiator(t, bridges.NewORM(db, logger.TestLogger(t), config.Database())) + _, webhookSpec := cltest.MustInsertWebhookSpec(t, db) + _, err := db.Exec(`INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, ei.ID, webhookSpec.ID, `{"ei": "foo", "name": "webhookSpecTwoEIs"}`) + require.NoError(t, err) + + _, err = db.Exec(`DELETE FROM external_initiators`) + require.EqualError(t, err, "ERROR: update or delete on table \"external_initiators\" violates foreign key constraint \"external_initiator_webhook_specs_external_initiator_id_fkey\" on table \"external_initiator_webhook_specs\" (SQLSTATE 23503)") + }) +} + +func TestORM_CreateJob_VRFV2(t *testing.T) { + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + fromAddresses := []string{cltest.NewEIP55Address().String(), cltest.NewEIP55Address().String()} + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + VRFOwnerAddress: "0x32891BD79647DC9136Fc0a59AAB48c7825eb624c", + }). + Toml()) + require.NoError(t, err) + + require.NoError(t, jobORM.CreateJob(&jb)) + cltest.AssertCount(t, db, "vrf_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + var requestedConfsDelay int64 + require.NoError(t, db.Get(&requestedConfsDelay, `SELECT requested_confs_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, int64(10), requestedConfsDelay) + var batchFulfillmentEnabled bool + require.NoError(t, db.Get(&batchFulfillmentEnabled, `SELECT batch_fulfillment_enabled FROM vrf_specs LIMIT 1`)) + require.False(t, batchFulfillmentEnabled) + var customRevertsPipelineEnabled bool + require.NoError(t, db.Get(&customRevertsPipelineEnabled, `SELECT custom_reverts_pipeline_enabled FROM vrf_specs LIMIT 1`)) + require.False(t, customRevertsPipelineEnabled) + var batchFulfillmentGasMultiplier float64 + require.NoError(t, db.Get(&batchFulfillmentGasMultiplier, `SELECT batch_fulfillment_gas_multiplier FROM vrf_specs LIMIT 1`)) + require.Equal(t, float64(1.0), batchFulfillmentGasMultiplier) + var requestTimeout time.Duration + require.NoError(t, db.Get(&requestTimeout, `SELECT request_timeout FROM vrf_specs LIMIT 1`)) + require.Equal(t, 24*time.Hour, requestTimeout) + var backoffInitialDelay time.Duration + require.NoError(t, db.Get(&backoffInitialDelay, `SELECT backoff_initial_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Minute, backoffInitialDelay) + var backoffMaxDelay time.Duration + require.NoError(t, db.Get(&backoffMaxDelay, `SELECT backoff_max_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Hour, backoffMaxDelay) + var chunkSize int + require.NoError(t, db.Get(&chunkSize, `SELECT chunk_size FROM vrf_specs LIMIT 1`)) + require.Equal(t, 25, chunkSize) + var gasLanePrice assets.Wei + require.NoError(t, db.Get(&gasLanePrice, `SELECT gas_lane_price FROM vrf_specs LIMIT 1`)) + require.Equal(t, jb.VRFSpec.GasLanePrice, &gasLanePrice) + var fa pq.ByteaArray + require.NoError(t, db.Get(&fa, `SELECT from_addresses FROM vrf_specs LIMIT 1`)) + var actual []string + for _, b := range fa { + actual = append(actual, common.BytesToAddress(b).String()) + } + require.ElementsMatch(t, fromAddresses, actual) + var vrfOwnerAddress ethkey.EIP55Address + require.NoError(t, db.Get(&vrfOwnerAddress, `SELECT vrf_owner_address FROM vrf_specs LIMIT 1`)) + require.Equal(t, "0x32891BD79647DC9136Fc0a59AAB48c7825eb624c", vrfOwnerAddress.Address().String()) + require.NoError(t, jobORM.DeleteJob(jb.ID)) + cltest.AssertCount(t, db, "vrf_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) + + jb, err = vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{RequestTimeout: 1 * time.Hour}).Toml()) + require.NoError(t, err) + require.NoError(t, jobORM.CreateJob(&jb)) + cltest.AssertCount(t, db, "vrf_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + require.NoError(t, db.Get(&requestedConfsDelay, `SELECT requested_confs_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, int64(0), requestedConfsDelay) + require.NoError(t, db.Get(&requestTimeout, `SELECT request_timeout FROM vrf_specs LIMIT 1`)) + require.Equal(t, 1*time.Hour, requestTimeout) + require.NoError(t, jobORM.DeleteJob(jb.ID)) + cltest.AssertCount(t, db, "vrf_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) +} + +func TestORM_CreateJob_VRFV2Plus(t *testing.T) { + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + fromAddresses := []string{cltest.NewEIP55Address().String(), cltest.NewEIP55Address().String()} + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + VRFVersion: vrfcommon.V2Plus, + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + CustomRevertsPipelineEnabled: true, + }). + Toml()) + require.NoError(t, err) + + require.NoError(t, jobORM.CreateJob(&jb)) + cltest.AssertCount(t, db, "vrf_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + var requestedConfsDelay int64 + require.NoError(t, db.Get(&requestedConfsDelay, `SELECT requested_confs_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, int64(10), requestedConfsDelay) + var batchFulfillmentEnabled bool + require.NoError(t, db.Get(&batchFulfillmentEnabled, `SELECT batch_fulfillment_enabled FROM vrf_specs LIMIT 1`)) + require.False(t, batchFulfillmentEnabled) + var customRevertsPipelineEnabled bool + require.NoError(t, db.Get(&customRevertsPipelineEnabled, `SELECT custom_reverts_pipeline_enabled FROM vrf_specs LIMIT 1`)) + require.True(t, customRevertsPipelineEnabled) + var batchFulfillmentGasMultiplier float64 + require.NoError(t, db.Get(&batchFulfillmentGasMultiplier, `SELECT batch_fulfillment_gas_multiplier FROM vrf_specs LIMIT 1`)) + require.Equal(t, float64(1.0), batchFulfillmentGasMultiplier) + var requestTimeout time.Duration + require.NoError(t, db.Get(&requestTimeout, `SELECT request_timeout FROM vrf_specs LIMIT 1`)) + require.Equal(t, 24*time.Hour, requestTimeout) + var backoffInitialDelay time.Duration + require.NoError(t, db.Get(&backoffInitialDelay, `SELECT backoff_initial_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Minute, backoffInitialDelay) + var backoffMaxDelay time.Duration + require.NoError(t, db.Get(&backoffMaxDelay, `SELECT backoff_max_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, time.Hour, backoffMaxDelay) + var chunkSize int + require.NoError(t, db.Get(&chunkSize, `SELECT chunk_size FROM vrf_specs LIMIT 1`)) + require.Equal(t, 25, chunkSize) + var gasLanePrice assets.Wei + require.NoError(t, db.Get(&gasLanePrice, `SELECT gas_lane_price FROM vrf_specs LIMIT 1`)) + require.Equal(t, jb.VRFSpec.GasLanePrice, &gasLanePrice) + var fa pq.ByteaArray + require.NoError(t, db.Get(&fa, `SELECT from_addresses FROM vrf_specs LIMIT 1`)) + var actual []string + for _, b := range fa { + actual = append(actual, common.BytesToAddress(b).String()) + } + require.ElementsMatch(t, fromAddresses, actual) + var vrfOwnerAddress ethkey.EIP55Address + require.Error(t, db.Get(&vrfOwnerAddress, `SELECT vrf_owner_address FROM vrf_specs LIMIT 1`)) + require.NoError(t, jobORM.DeleteJob(jb.ID)) + cltest.AssertCount(t, db, "vrf_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) + + jb, err = vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + VRFVersion: vrfcommon.V2Plus, + RequestTimeout: 1 * time.Hour, + FromAddresses: fromAddresses, + }).Toml()) + require.NoError(t, err) + require.NoError(t, jobORM.CreateJob(&jb)) + cltest.AssertCount(t, db, "vrf_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + require.NoError(t, db.Get(&requestedConfsDelay, `SELECT requested_confs_delay FROM vrf_specs LIMIT 1`)) + require.Equal(t, int64(0), requestedConfsDelay) + require.NoError(t, db.Get(&requestTimeout, `SELECT request_timeout FROM vrf_specs LIMIT 1`)) + require.Equal(t, 1*time.Hour, requestTimeout) + require.NoError(t, jobORM.DeleteJob(jb.ID)) + cltest.AssertCount(t, db, "vrf_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) +} + +func TestORM_CreateJob_OCRBootstrap(t *testing.T) { + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := ocrbootstrap.ValidatedBootstrapSpecToml(testspecs.GetOCRBootstrapSpec()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + cltest.AssertCount(t, db, "bootstrap_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + var relay string + require.NoError(t, db.Get(&relay, `SELECT relay FROM bootstrap_specs LIMIT 1`)) + require.Equal(t, "evm", relay) + + require.NoError(t, jobORM.DeleteJob(jb.ID)) + cltest.AssertCount(t, db, "bootstrap_specs", 0) + cltest.AssertCount(t, db, "jobs", 0) +} + +func TestORM_CreateJob_EVMChainID_Validation(t *testing.T) { + config := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + t.Run("evm chain id validation for ocr works", func(t *testing.T) { + jb := job.Job{ + Type: job.OffchainReporting, + OCROracleSpec: &job.OCROracleSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for direct request works", func(t *testing.T) { + jb := job.Job{ + Type: job.DirectRequest, + DirectRequestSpec: &job.DirectRequestSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for flux monitor works", func(t *testing.T) { + jb := job.Job{ + Type: job.FluxMonitor, + FluxMonitorSpec: &job.FluxMonitorSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for keepers works", func(t *testing.T) { + jb := job.Job{ + Type: job.Keeper, + KeeperSpec: &job.KeeperSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for vrf works", func(t *testing.T) { + jb := job.Job{ + Type: job.VRF, + VRFSpec: &job.VRFSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for block hash store works", func(t *testing.T) { + jb := job.Job{ + Type: job.BlockhashStore, + BlockhashStoreSpec: &job.BlockhashStoreSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for block header feeder works", func(t *testing.T) { + jb := job.Job{ + Type: job.BlockHeaderFeeder, + BlockHeaderFeederSpec: &job.BlockHeaderFeederSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for legacy gas station server spec works", func(t *testing.T) { + jb := job.Job{ + Type: job.LegacyGasStationServer, + LegacyGasStationServerSpec: &job.LegacyGasStationServerSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("evm chain id validation for legacy gas station sidecar spec works", func(t *testing.T) { + jb := job.Job{ + Type: job.LegacyGasStationSidecar, + LegacyGasStationSidecarSpec: &job.LegacyGasStationSidecarSpec{}, + } + assert.Equal(t, "CreateJobFailed: evm chain id must be defined", jobORM.CreateJob(&jb).Error()) + }) +} + +func TestORM_CreateJob_OCR_DuplicatedContractAddress(t *testing.T) { + customChainID := big.New(testutils.NewRandomEVMChainID()) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + enabled := true + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: customChainID, + Chain: evmcfg.Defaults(customChainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{{}}, + }) + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + // defaultChainID is deprecated + defaultChainID := customChainID + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + // Custom Chain Job + externalJobID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + spec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + Name: "job3", + EVMChainID: customChainID.String(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + JobID: externalJobID.UUID.String(), + }) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, spec.Toml()) + require.NoError(t, err) + + t.Run("with a set chain id", func(t *testing.T) { + err = jobORM.CreateJob(&jb) // Add job with custom chain id + require.NoError(t, err) + + cltest.AssertCount(t, db, "ocr_oracle_specs", 1) + cltest.AssertCount(t, db, "jobs", 1) + + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + spec.JobID = externalJobID.UUID.String() + jba, err := ocr.ValidatedOracleSpecToml(legacyChains, spec.Toml()) + require.NoError(t, err) + err = jobORM.CreateJob(&jba) // Try to add duplicate job with default id + require.Error(t, err) + assert.Equal(t, fmt.Sprintf("CreateJobFailed: a job with contract address %s already exists for chain ID %s", jb.OCROracleSpec.ContractAddress, defaultChainID.String()), err.Error()) + + externalJobID = uuid.NullUUID{UUID: uuid.New(), Valid: true} + spec.JobID = externalJobID.UUID.String() + jb2, err := ocr.ValidatedOracleSpecToml(legacyChains, spec.Toml()) + require.NoError(t, err) + + err = jobORM.CreateJob(&jb2) // Try to add duplicate job with custom id + require.Error(t, err) + assert.Equal(t, fmt.Sprintf("CreateJobFailed: a job with contract address %s already exists for chain ID %s", jb2.OCROracleSpec.ContractAddress, customChainID), err.Error()) + }) +} + +func TestORM_CreateJob_OCR2_DuplicatedContractAddress(t *testing.T) { + customChainID := big.New(testutils.NewRandomEVMChainID()) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + enabled := true + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: customChainID, + Chain: evmcfg.Defaults(customChainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{{}}, + }) + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR2().Add(cltest.DefaultOCR2Key)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + + const juelsPerFeeCoinSource = ` + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data.price" separator="."]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply;` + + jb.Name = null.StringFrom("Job 1") + jb.OCR2OracleSpec.TransmitterID = null.StringFrom(address.String()) + jb.OCR2OracleSpec.PluginConfig["juelsPerFeeCoinSource"] = juelsPerFeeCoinSource + + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + + jb2, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + + jb2.Name = null.StringFrom("Job with same chain id & contract address") + jb2.OCR2OracleSpec.TransmitterID = null.StringFrom(address.String()) + jb.OCR2OracleSpec.PluginConfig["juelsPerFeeCoinSource"] = juelsPerFeeCoinSource + + err = jobORM.CreateJob(&jb2) + require.Error(t, err) + + jb3, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + jb3.Name = null.StringFrom("Job with different chain id & same contract address") + jb3.OCR2OracleSpec.TransmitterID = null.StringFrom(address.String()) + jb3.OCR2OracleSpec.RelayConfig["chainID"] = customChainID.Int64() + jb.OCR2OracleSpec.PluginConfig["juelsPerFeeCoinSource"] = juelsPerFeeCoinSource + + err = jobORM.CreateJob(&jb3) + require.Error(t, err) +} + +func TestORM_CreateJob_OCR2_Sending_Keys_Transmitter_Keys_Validations(t *testing.T) { + customChainID := big.New(testutils.NewRandomEVMChainID()) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + enabled := true + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: customChainID, + Chain: evmcfg.Defaults(customChainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{{}}, + }) + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR2().Add(cltest.DefaultOCR2Key)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + + jobORM := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + + t.Run("sending keys or transmitterID must be defined", func(t *testing.T) { + jb.OCR2OracleSpec.TransmitterID = null.String{} + assert.Equal(t, "CreateJobFailed: neither sending keys nor transmitter ID is defined", jobORM.CreateJob(&jb).Error()) + }) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + t.Run("sending keys validation works properly", func(t *testing.T) { + jb.OCR2OracleSpec.TransmitterID = null.String{} + _, address2 := cltest.MustInsertRandomKey(t, keyStore.Eth()) + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = interface{}([]any{address.String(), address2.String(), common.HexToAddress("0X0").String()}) + assert.Equal(t, "CreateJobFailed: no EVM key matching: \"0x0000000000000000000000000000000000000000\": no such sending key exists", jobORM.CreateJob(&jb).Error()) + + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = interface{}([]any{1, 2, 3}) + assert.Equal(t, "CreateJobFailed: sending keys are of wrong type", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("sending keys and transmitter ID can't both be defined", func(t *testing.T) { + jb.OCR2OracleSpec.TransmitterID = null.StringFrom(address.String()) + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = interface{}([]any{address.String()}) + assert.Equal(t, "CreateJobFailed: sending keys and transmitter ID can't both be defined", jobORM.CreateJob(&jb).Error()) + }) + + t.Run("transmitter validation works", func(t *testing.T) { + jb.OCR2OracleSpec.TransmitterID = null.StringFrom("transmitterID that doesn't have a match in key store") + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = nil + assert.Equal(t, "CreateJobFailed: no EVM key matching: \"transmitterID that doesn't have a match in key store\": no such transmitter key exists", jobORM.CreateJob(&jb).Error()) + }) +} + +func TestORM_ValidateKeyStoreMatch(t *testing.T) { + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) {}) + + keyStore := cltest.NewKeyStore(t, pgtest.NewSqlxDB(t), config.Database()) + require.NoError(t, keyStore.OCR2().Add(cltest.DefaultOCR2Key)) + + var jb job.Job + { + var err error + jb, err = ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + } + + t.Run("test ETH key validation", func(t *testing.T) { + jb.OCR2OracleSpec.Relay = relay.EVM + err := job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no EVM key matching: \"bad key\"") + + _, evmKey := cltest.MustInsertRandomKey(t, keyStore.Eth()) + err = job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, evmKey.String()) + require.NoError(t, err) + }) + + t.Run("test Cosmos key validation", func(t *testing.T) { + jb.OCR2OracleSpec.Relay = relay.Cosmos + err := job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no Cosmos key matching: \"bad key\"") + + cosmosKey, err := keyStore.Cosmos().Create() + require.NoError(t, err) + err = job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, cosmosKey.ID()) + require.NoError(t, err) + }) + + t.Run("test Solana key validation", func(t *testing.T) { + jb.OCR2OracleSpec.Relay = relay.Solana + + err := job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no Solana key matching: \"bad key\"") + + solanaKey, err := keyStore.Solana().Create() + require.NoError(t, err) + err = job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, solanaKey.ID()) + require.NoError(t, err) + }) + + t.Run("test Starknet key validation", func(t *testing.T) { + jb.OCR2OracleSpec.Relay = relay.StarkNet + err := job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no Starknet key matching: \"bad key\"") + + starkNetKey, err := keyStore.StarkNet().Create() + require.NoError(t, err) + err = job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, starkNetKey.ID()) + require.NoError(t, err) + }) + + t.Run("test Mercury ETH key validation", func(t *testing.T) { + jb.OCR2OracleSpec.PluginType = types.Mercury + err := job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, "bad key") + require.EqualError(t, err, "no CSA key matching: \"bad key\"") + + csaKey, err := keyStore.CSA().Create() + require.NoError(t, err) + err = job.ValidateKeyStoreMatch(jb.OCR2OracleSpec, keyStore, csaKey.ID()) + require.NoError(t, err) + }) +} + +func Test_FindJobs(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + jb1, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: uuid.New().String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb1) + require.NoError(t, err) + + jb2, err := directrequest.ValidatedDirectRequestSpec( + testspecs.GetDirectRequestSpec(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb2) + require.NoError(t, err) + + t.Run("jobs are ordered by latest first", func(t *testing.T) { + jobs, count, err2 := orm.FindJobs(0, 2) + require.NoError(t, err2) + require.Len(t, jobs, 2) + assert.Equal(t, count, 2) + + expectedJobs := []job.Job{jb2, jb1} + + for i, exp := range expectedJobs { + assert.Equal(t, exp.ID, jobs[i].ID) + } + }) + + t.Run("jobs respect pagination", func(t *testing.T) { + jobs, count, err2 := orm.FindJobs(0, 1) + require.NoError(t, err2) + require.Len(t, jobs, 1) + assert.Equal(t, count, 2) + + expectedJobs := []job.Job{jb2} + + for i, exp := range expectedJobs { + assert.Equal(t, exp.ID, jobs[i].ID) + } + }) +} + +func Test_FindJob(t *testing.T) { + t.Parallel() + + // Create a config with multiple EVM chains. The test fixtures already load 1337 + // Additional chains will need additional fixture statements to add a chain to evm_chains. + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + chainID := big.NewI(1337) + enabled := true + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: chainID, + Chain: evmcfg.Defaults(chainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{{}}, + }) + }) + + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + require.NoError(t, keyStore.CSA().Add(cltest.DefaultCSAKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + // Create two jobs. Each job has the same Transmitter Address but on a different chain. + // Must uniquely name the OCR Specs to properly insert a new job in the job table. + externalJobID := uuid.New() + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + job, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + Name: "orig ocr spec", + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + jobSameAddress, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: uuid.New().String(), + TransmitterAddress: address.Hex(), + Name: "ocr spec dup addr", + EVMChainID: "1337", + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + jobOCR2, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + jobOCR2.OCR2OracleSpec.TransmitterID = null.StringFrom(address.String()) + + const juelsPerFeeCoinSource = ` + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data.price" separator="."]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply;` + + jobOCR2.OCR2OracleSpec.PluginConfig["juelsPerFeeCoinSource"] = juelsPerFeeCoinSource + + ocr2WithFeedID1 := "0x0001000000000000000000000000000000000000000000000000000000000001" + ocr2WithFeedID2 := "0x0001000000000000000000000000000000000000000000000000000000000002" + jobOCR2WithFeedID1, err := ocr2validate.ValidatedOracleSpecToml( + config.OCR2(), + config.Insecure(), + fmt.Sprintf(mercuryOracleTOML, cltest.DefaultCSAKey.PublicKeyString(), ocr2WithFeedID1), + ) + require.NoError(t, err) + + jobOCR2WithFeedID2, err := ocr2validate.ValidatedOracleSpecToml( + config.OCR2(), + config.Insecure(), + fmt.Sprintf(mercuryOracleTOML, cltest.DefaultCSAKey.PublicKeyString(), ocr2WithFeedID2), + ) + jobOCR2WithFeedID2.ExternalJobID = uuid.New() + jobOCR2WithFeedID2.Name = null.StringFrom("new name") + require.NoError(t, err) + + err = orm.CreateJob(&job) + require.NoError(t, err) + + err = orm.CreateJob(&jobSameAddress) + require.NoError(t, err) + + err = orm.CreateJob(&jobOCR2) + require.NoError(t, err) + + err = orm.CreateJob(&jobOCR2WithFeedID1) + require.NoError(t, err) + + // second ocr2 job with same contract id but different feed id + err = orm.CreateJob(&jobOCR2WithFeedID2) + require.NoError(t, err) + + t.Run("by id", func(t *testing.T) { + ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second) + defer cancel() + jb, err2 := orm.FindJob(ctx, job.ID) + require.NoError(t, err2) + + assert.Equal(t, jb.ID, job.ID) + assert.Equal(t, jb.Name, job.Name) + + require.Greater(t, jb.PipelineSpecID, int32(0)) + require.NotNil(t, jb.PipelineSpec) + require.NotNil(t, jb.OCROracleSpecID) + require.NotNil(t, jb.OCROracleSpec) + }) + + t.Run("by external job id", func(t *testing.T) { + jb, err2 := orm.FindJobByExternalJobID(externalJobID) + require.NoError(t, err2) + + assert.Equal(t, jb.ID, job.ID) + assert.Equal(t, jb.Name, job.Name) + + require.Greater(t, jb.PipelineSpecID, int32(0)) + require.NotNil(t, jb.PipelineSpec) + require.NotNil(t, jb.OCROracleSpecID) + require.NotNil(t, jb.OCROracleSpec) + }) + + t.Run("by address", func(t *testing.T) { + jbID, err2 := orm.FindJobIDByAddress(job.OCROracleSpec.ContractAddress, job.OCROracleSpec.EVMChainID) + require.NoError(t, err2) + + assert.Equal(t, job.ID, jbID) + + _, err2 = orm.FindJobIDByAddress("not-existing", big.NewI(0)) + require.Error(t, err2) + require.ErrorIs(t, err2, sql.ErrNoRows) + }) + + t.Run("by address yet chain scoped", func(t *testing.T) { + commonAddr := jobSameAddress.OCROracleSpec.ContractAddress + + // Find job ID for job on chain 1337 with common address. + jbID, err2 := orm.FindJobIDByAddress(commonAddr, jobSameAddress.OCROracleSpec.EVMChainID) + require.NoError(t, err2) + + assert.Equal(t, jobSameAddress.ID, jbID) + + // Find job ID for job on default evm chain with common address. + jbID, err2 = orm.FindJobIDByAddress(commonAddr, job.OCROracleSpec.EVMChainID) + require.NoError(t, err2) + + assert.Equal(t, job.ID, jbID) + }) + + t.Run("by contract id without feed id", func(t *testing.T) { + contractID := "0x613a38AC1659769640aaE063C651F48E0250454C" + + // Find job ID for ocr2 job without feedID. + jbID, err2 := orm.FindOCR2JobIDByAddress(contractID, nil) + require.NoError(t, err2) + + assert.Equal(t, jobOCR2.ID, jbID) + }) + + t.Run("by contract id with valid feed id", func(t *testing.T) { + contractID := "0x0000000000000000000000000000000000000006" + feedID := common.HexToHash(ocr2WithFeedID1) + + // Find job ID for ocr2 job with feed ID + jbID, err2 := orm.FindOCR2JobIDByAddress(contractID, &feedID) + require.NoError(t, err2) + + assert.Equal(t, jobOCR2WithFeedID1.ID, jbID) + }) + + t.Run("with duplicate contract id but different feed id", func(t *testing.T) { + contractID := "0x0000000000000000000000000000000000000006" + feedID := common.HexToHash(ocr2WithFeedID2) + + // Find job ID for ocr2 job with feed ID + jbID, err2 := orm.FindOCR2JobIDByAddress(contractID, &feedID) + require.NoError(t, err2) + + assert.Equal(t, jobOCR2WithFeedID2.ID, jbID) + }) +} + +func Test_FindJobsByPipelineSpecIDs(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec()) + require.NoError(t, err) + jb.DirectRequestSpec.EVMChainID = big.NewI(0) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with jobs", func(t *testing.T) { + jbs, err2 := orm.FindJobsByPipelineSpecIDs([]int32{jb.PipelineSpecID}) + require.NoError(t, err2) + assert.Len(t, jbs, 1) + + assert.Equal(t, jb.ID, jbs[0].ID) + assert.Equal(t, jb.Name, jbs[0].Name) + + require.Greater(t, jbs[0].PipelineSpecID, int32(0)) + require.Equal(t, jb.PipelineSpecID, jbs[0].PipelineSpecID) + require.NotNil(t, jbs[0].PipelineSpec) + }) + + t.Run("without jobs", func(t *testing.T) { + jbs, err2 := orm.FindJobsByPipelineSpecIDs([]int32{-1}) + require.NoError(t, err2) + assert.Len(t, jbs, 0) + }) + + t.Run("with chainID disabled", func(t *testing.T) { + orm2 := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jbs, err2 := orm2.FindJobsByPipelineSpecIDs([]int32{jb.PipelineSpecID}) + require.NoError(t, err2) + assert.Len(t, jbs, 1) + }) +} + +func Test_FindPipelineRuns(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + externalJobID := uuid.New() + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with no pipeline runs", func(t *testing.T) { + runs, count, err2 := orm.PipelineRuns(nil, 0, 10) + require.NoError(t, err2) + assert.Equal(t, count, 0) + assert.Empty(t, runs) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + run := mustInsertPipelineRun(t, pipelineORM, jb) + + runs, count, err2 := orm.PipelineRuns(nil, 0, 10) + require.NoError(t, err2) + + assert.Equal(t, count, 1) + actual := runs[0] + + // Test pipeline run fields + assert.Equal(t, run.State, actual.State) + assert.Equal(t, run.PipelineSpecID, actual.PipelineSpecID) + + // Test preloaded pipeline spec + require.NotNil(t, jb.PipelineSpec) + assert.Equal(t, jb.PipelineSpec.ID, actual.PipelineSpec.ID) + assert.Equal(t, jb.ID, actual.PipelineSpec.JobID) + }) +} + +func Test_PipelineRunsByJobID(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + externalJobID := uuid.New() + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with no pipeline runs", func(t *testing.T) { + runs, count, err2 := orm.PipelineRuns(&jb.ID, 0, 10) + require.NoError(t, err2) + assert.Equal(t, count, 0) + assert.Empty(t, runs) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + run := mustInsertPipelineRun(t, pipelineORM, jb) + + runs, count, err2 := orm.PipelineRuns(&jb.ID, 0, 10) + require.NoError(t, err2) + + assert.Equal(t, 1, count) + actual := runs[0] + + // Test pipeline run fields + assert.Equal(t, run.State, actual.State) + assert.Equal(t, run.PipelineSpecID, actual.PipelineSpecID) + + // Test preloaded pipeline spec + assert.Equal(t, jb.PipelineSpec.ID, actual.PipelineSpec.ID) + assert.Equal(t, jb.ID, actual.PipelineSpec.JobID) + }) +} + +func Test_FindPipelineRunIDsByJobID(t *testing.T) { + var jb job.Job + + config := configtest.NewTestGeneralConfig(t) + _, db := heavyweight.FullTestDBV2(t, nil) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + lggr := logger.TestLogger(t) + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, lggr, config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + jobs := make([]job.Job, 11) + for j := 0; j < len(jobs); j++ { + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + jobID := uuid.New().String() + key, err := ethkey.NewV2() + + require.NoError(t, err) + jb, err = ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: jobID, + Name: fmt.Sprintf("Job #%v", jobID), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + TransmitterAddress: address.Hex(), + ContractAddress: key.Address.String(), + }).Toml()) + + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + jobs[j] = jb + } + + for i, j := 0, 0; i < 2500; i++ { + mustInsertPipelineRun(t, pipelineORM, jobs[j]) + j++ + if j == len(jobs)-1 { + j = 0 + } + } + + t.Run("with no pipeline runs", func(t *testing.T) { + runIDs, err := orm.FindPipelineRunIDsByJobID(jb.ID, 0, 10) + require.NoError(t, err) + assert.Empty(t, runIDs) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + run := mustInsertPipelineRun(t, pipelineORM, jb) + + runIDs, err := orm.FindPipelineRunIDsByJobID(jb.ID, 0, 10) + require.NoError(t, err) + require.Len(t, runIDs, 1) + + assert.Equal(t, run.ID, runIDs[0]) + }) + + // Internally these queries are batched by 1000, this tests case requiring concatenation + // of more than 1 batch + t.Run("with batch concatenation limit 10", func(t *testing.T) { + runIDs, err := orm.FindPipelineRunIDsByJobID(jobs[3].ID, 95, 10) + require.NoError(t, err) + require.Len(t, runIDs, 10) + assert.Equal(t, int64(4*(len(jobs)-1)), runIDs[3]-runIDs[7]) + }) + + // Internally these queries are batched by 1000, this tests case requiring concatenation + // of more than 1 batch + t.Run("with batch concatenation limit 100", func(t *testing.T) { + runIDs, err := orm.FindPipelineRunIDsByJobID(jobs[3].ID, 95, 100) + require.NoError(t, err) + require.Len(t, runIDs, 100) + assert.Equal(t, int64(67*(len(jobs)-1)), runIDs[12]-runIDs[79]) + }) + + for i := 0; i < 2100; i++ { + mustInsertPipelineRun(t, pipelineORM, jb) + } + + // There is a COUNT query which doesn't run unless the query for the most recent 1000 rows + // returns empty. This can happen if the job id being requested hasn't run in a while, + // but many other jobs have run since. + t.Run("with first batch empty, over limit", func(t *testing.T) { + runIDs, err := orm.FindPipelineRunIDsByJobID(jobs[3].ID, 0, 25) + require.NoError(t, err) + require.Len(t, runIDs, 25) + assert.Equal(t, int64(16*(len(jobs)-1)), runIDs[7]-runIDs[23]) + }) + + // Same as previous, but where there are fewer matching jobs than the limit + t.Run("with first batch empty, under limit", func(t *testing.T) { + runIDs, err := orm.FindPipelineRunIDsByJobID(jobs[3].ID, 143, 190) + require.NoError(t, err) + require.Len(t, runIDs, 107) + assert.Equal(t, int64(16*(len(jobs)-1)), runIDs[7]-runIDs[23]) + }) +} + +func Test_FindPipelineRunsByIDs(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + externalJobID := uuid.New() + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with no pipeline runs", func(t *testing.T) { + runs, err2 := orm.FindPipelineRunsByIDs([]int64{-1}) + require.NoError(t, err2) + assert.Empty(t, runs) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + run := mustInsertPipelineRun(t, pipelineORM, jb) + + actual, err2 := orm.FindPipelineRunsByIDs([]int64{run.ID}) + require.NoError(t, err2) + require.Len(t, actual, 1) + + actualRun := actual[0] + // Test pipeline run fields + assert.Equal(t, run.State, actualRun.State) + assert.Equal(t, run.PipelineSpecID, actualRun.PipelineSpecID) + + // Test preloaded pipeline spec + assert.Equal(t, jb.PipelineSpec.ID, actualRun.PipelineSpec.ID) + assert.Equal(t, jb.ID, actualRun.PipelineSpec.JobID) + }) +} + +func Test_FindPipelineRunByID(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + err := keyStore.OCR().Add(cltest.DefaultOCRKey) + require.NoError(t, err) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec()) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with no pipeline run", func(t *testing.T) { + run, err2 := orm.FindPipelineRunByID(-1) + assert.Equal(t, run, pipeline.Run{}) + require.ErrorIs(t, err2, sql.ErrNoRows) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + run := mustInsertPipelineRun(t, pipelineORM, jb) + + actual, err2 := orm.FindPipelineRunByID(run.ID) + require.NoError(t, err2) + + actualRun := actual + // Test pipeline run fields + assert.Equal(t, run.State, actualRun.State) + assert.Equal(t, run.PipelineSpecID, actualRun.PipelineSpecID) + + // Test preloaded pipeline spec + assert.Equal(t, jb.PipelineSpec.ID, actualRun.PipelineSpec.ID) + assert.Equal(t, jb.ID, actualRun.PipelineSpec.JobID) + }) +} + +func Test_FindJobWithoutSpecErrors(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + err := keyStore.OCR().Add(cltest.DefaultOCRKey) + require.NoError(t, err) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec()) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + var jobSpec job.Job + err = db.Get(&jobSpec, "SELECT * FROM jobs") + require.NoError(t, err) + + ocrSpecError1 := "ocr spec 1 errored" + ocrSpecError2 := "ocr spec 2 errored" + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError1)) + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError2)) + + jb, err = orm.FindJobWithoutSpecErrors(jobSpec.ID) + require.NoError(t, err) + jbWithErrors, err := orm.FindJobTx(testutils.Context(t), jobSpec.ID) + require.NoError(t, err) + + assert.Equal(t, len(jb.JobSpecErrors), 0) + assert.Equal(t, len(jbWithErrors.JobSpecErrors), 2) +} + +func Test_FindSpecErrorsByJobIDs(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + err := keyStore.OCR().Add(cltest.DefaultOCRKey) + require.NoError(t, err) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + jb, err := directrequest.ValidatedDirectRequestSpec(testspecs.GetDirectRequestSpec()) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + var jobSpec job.Job + err = db.Get(&jobSpec, "SELECT * FROM jobs") + require.NoError(t, err) + + ocrSpecError1 := "ocr spec 1 errored" + ocrSpecError2 := "ocr spec 2 errored" + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError1)) + require.NoError(t, orm.RecordError(jobSpec.ID, ocrSpecError2)) + + specErrs, err := orm.FindSpecErrorsByJobIDs([]int32{jobSpec.ID}) + require.NoError(t, err) + + assert.Equal(t, len(specErrs), 2) +} + +func Test_CountPipelineRunsByJobID(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := NewTestORM(t, db, pipelineORM, bridgesORM, keyStore, config.Database()) + + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + externalJobID := uuid.New() + _, address := cltest.MustInsertRandomKey(t, keyStore.Eth()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, + testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + JobID: externalJobID.String(), + TransmitterAddress: address.Hex(), + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + }).Toml(), + ) + require.NoError(t, err) + + err = orm.CreateJob(&jb) + require.NoError(t, err) + + t.Run("with no pipeline runs", func(t *testing.T) { + count, err2 := orm.CountPipelineRunsByJobID(jb.ID) + require.NoError(t, err2) + assert.Equal(t, int32(0), count) + }) + + t.Run("with a pipeline run", func(t *testing.T) { + mustInsertPipelineRun(t, pipelineORM, jb) + + count, err2 := orm.CountPipelineRunsByJobID(jb.ID) + require.NoError(t, err2) + require.Equal(t, int32(1), count) + }) +} + +func mustInsertPipelineRun(t *testing.T, orm pipeline.ORM, j job.Job) pipeline.Run { + t.Helper() + + run := pipeline.Run{ + PipelineSpecID: j.PipelineSpecID, + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{Valid: false}, + AllErrors: pipeline.RunErrors{}, + CreatedAt: time.Now(), + FinishedAt: null.Time{}, + } + err := orm.CreateRun(&run) + require.NoError(t, err) + return run +} diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go new file mode 100644 index 00000000..5a5a3e71 --- /dev/null +++ b/core/services/job/job_pipeline_orm_integration_test.go @@ -0,0 +1,200 @@ +package job_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func clearJobsDb(t *testing.T, db *sqlx.DB) { + cltest.ClearDBTables(t, db, "flux_monitor_round_stats_v2", "jobs", "pipeline_runs", "pipeline_specs", "pipeline_task_runs") +} + +func TestPipelineORM_Integration(t *testing.T) { + const DotStr = ` + // data source 1 + ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>]; + ds2_parse [type=jsonparse path="three.four" separator="."]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; + answer2 [type=bridge name=election_winner index=1]; + ` + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(30 * time.Millisecond) + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + ethKeyStore := keyStore.Eth() + + _, transmitterAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + var specID int32 + + answer1 := &pipeline.MedianTask{ + AllowedFaults: "", + } + answer2 := &pipeline.BridgeTask{ + Name: "election_winner", + } + ds1_multiply := &pipeline.MultiplyTask{ + Times: "1.23", + } + ds1_parse := &pipeline.JSONParseTask{ + Path: "one,two", + } + ds1 := &pipeline.BridgeTask{ + Name: "voter_turnout", + } + ds2_multiply := &pipeline.MultiplyTask{ + Times: "4.56", + } + ds2_parse := &pipeline.JSONParseTask{ + Path: "three,four", + } + ds2 := &pipeline.HTTPTask{ + URL: "https://chain.link/voter_turnout/USA-2020", + Method: "GET", + RequestData: `{"hi": "hello"}`, + } + + answer1.BaseTask = pipeline.NewBaseTask( + 6, + "answer1", + []pipeline.TaskDependency{ + {PropagateResult: true, InputTask: pipeline.Task(ds1_multiply)}, + {PropagateResult: true, InputTask: pipeline.Task(ds2_multiply)}}, + nil, + 0) + answer2.BaseTask = pipeline.NewBaseTask(7, "answer2", nil, nil, 1) + ds1_multiply.BaseTask = pipeline.NewBaseTask( + 2, + "ds1_multiply", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds1_parse)}}, + []pipeline.Task{answer1}, + 0) + ds2_multiply.BaseTask = pipeline.NewBaseTask( + 5, + "ds2_multiply", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds2_parse)}}, + []pipeline.Task{answer1}, + 0) + ds1_parse.BaseTask = pipeline.NewBaseTask( + 1, + "ds1_parse", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds1)}}, + []pipeline.Task{ds1_multiply}, + 0) + ds2_parse.BaseTask = pipeline.NewBaseTask( + 4, + "ds2_parse", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds2)}}, + []pipeline.Task{ds2_multiply}, + 0) + ds1.BaseTask = pipeline.NewBaseTask(0, "ds1", nil, []pipeline.Task{ds1_parse}, 0) + ds2.BaseTask = pipeline.NewBaseTask(3, "ds2", nil, []pipeline.Task{ds2_parse}, 0) + expectedTasks := []pipeline.Task{ds1, ds1_parse, ds1_multiply, ds2, ds2_parse, ds2_multiply, answer1, answer2} + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + t.Run("creates task DAGs", func(t *testing.T) { + clearJobsDb(t, db) + + orm := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + + p, err := pipeline.Parse(DotStr) + require.NoError(t, err) + + specID, err = orm.CreateSpec(*p, models.Interval(0)) + require.NoError(t, err) + + var pipelineSpecs []pipeline.Spec + sql := `SELECT * FROM pipeline_specs;` + err = db.Select(&pipelineSpecs, sql) + require.NoError(t, err) + require.Len(t, pipelineSpecs, 1) + require.Equal(t, specID, pipelineSpecs[0].ID) + require.Equal(t, DotStr, pipelineSpecs[0].DotDagSource) + + _, err = db.Exec(`DELETE FROM pipeline_specs`) + require.NoError(t, err) + }) + + t.Run("creates runs", func(t *testing.T) { + lggr := logger.TestLogger(t) + cfg := configtest.NewTestGeneralConfig(t) + clearJobsDb(t, db) + orm := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{Client: evmtest.NewEthClientMockWithDefaultChain(t), DB: db, GeneralConfig: config, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + runner := pipeline.NewRunner(orm, btORM, config.JobPipeline(), cfg.WebServer(), legacyChains, nil, nil, lggr, nil, nil) + + jobORM := NewTestORM(t, db, orm, btORM, keyStore, cfg.Database()) + + dbSpec := makeVoterTurnoutOCRJobSpec(t, transmitterAddress, bridge.Name.String(), bridge2.Name.String()) + + // Need a job in order to create a run + require.NoError(t, jobORM.CreateJob(dbSpec)) + + var pipelineSpecs []pipeline.Spec + sql := `SELECT * FROM pipeline_specs;` + require.NoError(t, db.Select(&pipelineSpecs, sql)) + require.Len(t, pipelineSpecs, 1) + require.Equal(t, dbSpec.PipelineSpecID, pipelineSpecs[0].ID) + pipelineSpecID := pipelineSpecs[0].ID + + // Create the run + runID, _, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), pipelineSpecs[0], pipeline.NewVarsFrom(nil), lggr, true) + require.NoError(t, err) + + // Check the DB for the pipeline.Run + var pipelineRuns []pipeline.Run + sql = `SELECT * FROM pipeline_runs WHERE id = $1;` + err = db.Select(&pipelineRuns, sql, runID) + require.NoError(t, err) + require.Len(t, pipelineRuns, 1) + require.Equal(t, pipelineSpecID, pipelineRuns[0].PipelineSpecID) + require.Equal(t, runID, pipelineRuns[0].ID) + + // Check the DB for the pipeline.TaskRuns + var taskRuns []pipeline.TaskRun + sql = `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1;` + err = db.Select(&taskRuns, sql, runID) + require.NoError(t, err) + require.Len(t, taskRuns, len(expectedTasks)) + + for _, taskRun := range taskRuns { + assert.Equal(t, runID, taskRun.PipelineRunID) + assert.False(t, taskRun.Output.Valid) + assert.False(t, taskRun.Error.IsZero()) + } + }) +} diff --git a/core/services/job/mocks/orm.go b/core/services/job/mocks/orm.go new file mode 100644 index 00000000..984c34e8 --- /dev/null +++ b/core/services/job/mocks/orm.go @@ -0,0 +1,780 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + big "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + + context "context" + + ethkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + + job "github.com/goplugin/pluginv3.0/v2/core/services/job" + + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + pipeline "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + uuid "github.com/google/uuid" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// AssertBridgesExist provides a mock function with given fields: p +func (_m *ORM) AssertBridgesExist(p pipeline.Pipeline) error { + ret := _m.Called(p) + + if len(ret) == 0 { + panic("no return value specified for AssertBridgesExist") + } + + var r0 error + if rf, ok := ret.Get(0).(func(pipeline.Pipeline) error); ok { + r0 = rf(p) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *ORM) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CountPipelineRunsByJobID provides a mock function with given fields: jobID +func (_m *ORM) CountPipelineRunsByJobID(jobID int32) (int32, error) { + ret := _m.Called(jobID) + + if len(ret) == 0 { + panic("no return value specified for CountPipelineRunsByJobID") + } + + var r0 int32 + var r1 error + if rf, ok := ret.Get(0).(func(int32) (int32, error)); ok { + return rf(jobID) + } + if rf, ok := ret.Get(0).(func(int32) int32); ok { + r0 = rf(jobID) + } else { + r0 = ret.Get(0).(int32) + } + + if rf, ok := ret.Get(1).(func(int32) error); ok { + r1 = rf(jobID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateJob provides a mock function with given fields: jb, qopts +func (_m *ORM) CreateJob(jb *job.Job, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jb) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*job.Job, ...pg.QOpt) error); ok { + r0 = rf(jb, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteJob provides a mock function with given fields: id, qopts +func (_m *ORM) DeleteJob(id int32, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int32, ...pg.QOpt) error); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DismissError provides a mock function with given fields: ctx, errorID +func (_m *ORM) DismissError(ctx context.Context, errorID int64) error { + ret := _m.Called(ctx, errorID) + + if len(ret) == 0 { + panic("no return value specified for DismissError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok { + r0 = rf(ctx, errorID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindJob provides a mock function with given fields: ctx, id +func (_m *ORM) FindJob(ctx context.Context, id int32) (job.Job, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for FindJob") + } + + var r0 job.Job + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int32) (job.Job, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, int32) job.Job); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(job.Job) + } + + if rf, ok := ret.Get(1).(func(context.Context, int32) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobByExternalJobID provides a mock function with given fields: _a0, qopts +func (_m *ORM) FindJobByExternalJobID(_a0 uuid.UUID, qopts ...pg.QOpt) (job.Job, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindJobByExternalJobID") + } + + var r0 job.Job + var r1 error + if rf, ok := ret.Get(0).(func(uuid.UUID, ...pg.QOpt) (job.Job, error)); ok { + return rf(_a0, qopts...) + } + if rf, ok := ret.Get(0).(func(uuid.UUID, ...pg.QOpt) job.Job); ok { + r0 = rf(_a0, qopts...) + } else { + r0 = ret.Get(0).(job.Job) + } + + if rf, ok := ret.Get(1).(func(uuid.UUID, ...pg.QOpt) error); ok { + r1 = rf(_a0, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobIDByAddress provides a mock function with given fields: address, evmChainID, qopts +func (_m *ORM) FindJobIDByAddress(address ethkey.EIP55Address, evmChainID *big.Big, qopts ...pg.QOpt) (int32, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, evmChainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindJobIDByAddress") + } + + var r0 int32 + var r1 error + if rf, ok := ret.Get(0).(func(ethkey.EIP55Address, *big.Big, ...pg.QOpt) (int32, error)); ok { + return rf(address, evmChainID, qopts...) + } + if rf, ok := ret.Get(0).(func(ethkey.EIP55Address, *big.Big, ...pg.QOpt) int32); ok { + r0 = rf(address, evmChainID, qopts...) + } else { + r0 = ret.Get(0).(int32) + } + + if rf, ok := ret.Get(1).(func(ethkey.EIP55Address, *big.Big, ...pg.QOpt) error); ok { + r1 = rf(address, evmChainID, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobIDsWithBridge provides a mock function with given fields: name +func (_m *ORM) FindJobIDsWithBridge(name string) ([]int32, error) { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for FindJobIDsWithBridge") + } + + var r0 []int32 + var r1 error + if rf, ok := ret.Get(0).(func(string) ([]int32, error)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func(string) []int32); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int32) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobTx provides a mock function with given fields: ctx, id +func (_m *ORM) FindJobTx(ctx context.Context, id int32) (job.Job, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for FindJobTx") + } + + var r0 job.Job + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int32) (job.Job, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, int32) job.Job); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(job.Job) + } + + if rf, ok := ret.Get(1).(func(context.Context, int32) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobWithoutSpecErrors provides a mock function with given fields: id +func (_m *ORM) FindJobWithoutSpecErrors(id int32) (job.Job, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for FindJobWithoutSpecErrors") + } + + var r0 job.Job + var r1 error + if rf, ok := ret.Get(0).(func(int32) (job.Job, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int32) job.Job); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(job.Job) + } + + if rf, ok := ret.Get(1).(func(int32) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindJobs provides a mock function with given fields: offset, limit +func (_m *ORM) FindJobs(offset int, limit int) ([]job.Job, int, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for FindJobs") + } + + var r0 []job.Job + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(int, int) ([]job.Job, int, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []job.Job); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]job.Job) + } + } + + if rf, ok := ret.Get(1).(func(int, int) int); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(int, int) error); ok { + r2 = rf(offset, limit) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// FindJobsByPipelineSpecIDs provides a mock function with given fields: ids +func (_m *ORM) FindJobsByPipelineSpecIDs(ids []int32) ([]job.Job, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for FindJobsByPipelineSpecIDs") + } + + var r0 []job.Job + var r1 error + if rf, ok := ret.Get(0).(func([]int32) ([]job.Job, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int32) []job.Job); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]job.Job) + } + } + + if rf, ok := ret.Get(1).(func([]int32) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindOCR2JobIDByAddress provides a mock function with given fields: contractID, feedID, qopts +func (_m *ORM) FindOCR2JobIDByAddress(contractID string, feedID *common.Hash, qopts ...pg.QOpt) (int32, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, contractID, feedID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindOCR2JobIDByAddress") + } + + var r0 int32 + var r1 error + if rf, ok := ret.Get(0).(func(string, *common.Hash, ...pg.QOpt) (int32, error)); ok { + return rf(contractID, feedID, qopts...) + } + if rf, ok := ret.Get(0).(func(string, *common.Hash, ...pg.QOpt) int32); ok { + r0 = rf(contractID, feedID, qopts...) + } else { + r0 = ret.Get(0).(int32) + } + + if rf, ok := ret.Get(1).(func(string, *common.Hash, ...pg.QOpt) error); ok { + r1 = rf(contractID, feedID, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindPipelineRunByID provides a mock function with given fields: id +func (_m *ORM) FindPipelineRunByID(id int64) (pipeline.Run, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for FindPipelineRunByID") + } + + var r0 pipeline.Run + var r1 error + if rf, ok := ret.Get(0).(func(int64) (pipeline.Run, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) pipeline.Run); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(pipeline.Run) + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindPipelineRunIDsByJobID provides a mock function with given fields: jobID, offset, limit +func (_m *ORM) FindPipelineRunIDsByJobID(jobID int32, offset int, limit int) ([]int64, error) { + ret := _m.Called(jobID, offset, limit) + + if len(ret) == 0 { + panic("no return value specified for FindPipelineRunIDsByJobID") + } + + var r0 []int64 + var r1 error + if rf, ok := ret.Get(0).(func(int32, int, int) ([]int64, error)); ok { + return rf(jobID, offset, limit) + } + if rf, ok := ret.Get(0).(func(int32, int, int) []int64); ok { + r0 = rf(jobID, offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int64) + } + } + + if rf, ok := ret.Get(1).(func(int32, int, int) error); ok { + r1 = rf(jobID, offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindPipelineRunsByIDs provides a mock function with given fields: ids +func (_m *ORM) FindPipelineRunsByIDs(ids []int64) ([]pipeline.Run, error) { + ret := _m.Called(ids) + + if len(ret) == 0 { + panic("no return value specified for FindPipelineRunsByIDs") + } + + var r0 []pipeline.Run + var r1 error + if rf, ok := ret.Get(0).(func([]int64) ([]pipeline.Run, error)); ok { + return rf(ids) + } + if rf, ok := ret.Get(0).(func([]int64) []pipeline.Run); ok { + r0 = rf(ids) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]pipeline.Run) + } + } + + if rf, ok := ret.Get(1).(func([]int64) error); ok { + r1 = rf(ids) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindSpecError provides a mock function with given fields: id, qopts +func (_m *ORM) FindSpecError(id int64, qopts ...pg.QOpt) (job.SpecError, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, id) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindSpecError") + } + + var r0 job.SpecError + var r1 error + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) (job.SpecError, error)); ok { + return rf(id, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, ...pg.QOpt) job.SpecError); ok { + r0 = rf(id, qopts...) + } else { + r0 = ret.Get(0).(job.SpecError) + } + + if rf, ok := ret.Get(1).(func(int64, ...pg.QOpt) error); ok { + r1 = rf(id, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindSpecErrorsByJobIDs provides a mock function with given fields: ids, qopts +func (_m *ORM) FindSpecErrorsByJobIDs(ids []int32, qopts ...pg.QOpt) ([]job.SpecError, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ids) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindSpecErrorsByJobIDs") + } + + var r0 []job.SpecError + var r1 error + if rf, ok := ret.Get(0).(func([]int32, ...pg.QOpt) ([]job.SpecError, error)); ok { + return rf(ids, qopts...) + } + if rf, ok := ret.Get(0).(func([]int32, ...pg.QOpt) []job.SpecError); ok { + r0 = rf(ids, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]job.SpecError) + } + } + + if rf, ok := ret.Get(1).(func([]int32, ...pg.QOpt) error); ok { + r1 = rf(ids, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindTaskResultByRunIDAndTaskName provides a mock function with given fields: runID, taskName, qopts +func (_m *ORM) FindTaskResultByRunIDAndTaskName(runID int64, taskName string, qopts ...pg.QOpt) ([]byte, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, runID, taskName) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for FindTaskResultByRunIDAndTaskName") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(int64, string, ...pg.QOpt) ([]byte, error)); ok { + return rf(runID, taskName, qopts...) + } + if rf, ok := ret.Get(0).(func(int64, string, ...pg.QOpt) []byte); ok { + r0 = rf(runID, taskName, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(int64, string, ...pg.QOpt) error); ok { + r1 = rf(runID, taskName, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertJob provides a mock function with given fields: _a0, qopts +func (_m *ORM) InsertJob(_a0 *job.Job, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*job.Job, ...pg.QOpt) error); ok { + r0 = rf(_a0, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertWebhookSpec provides a mock function with given fields: webhookSpec, qopts +func (_m *ORM) InsertWebhookSpec(webhookSpec *job.WebhookSpec, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, webhookSpec) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertWebhookSpec") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*job.WebhookSpec, ...pg.QOpt) error); ok { + r0 = rf(webhookSpec, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PipelineRuns provides a mock function with given fields: jobID, offset, size +func (_m *ORM) PipelineRuns(jobID *int32, offset int, size int) ([]pipeline.Run, int, error) { + ret := _m.Called(jobID, offset, size) + + if len(ret) == 0 { + panic("no return value specified for PipelineRuns") + } + + var r0 []pipeline.Run + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(*int32, int, int) ([]pipeline.Run, int, error)); ok { + return rf(jobID, offset, size) + } + if rf, ok := ret.Get(0).(func(*int32, int, int) []pipeline.Run); ok { + r0 = rf(jobID, offset, size) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]pipeline.Run) + } + } + + if rf, ok := ret.Get(1).(func(*int32, int, int) int); ok { + r1 = rf(jobID, offset, size) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(*int32, int, int) error); ok { + r2 = rf(jobID, offset, size) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// RecordError provides a mock function with given fields: jobID, description, qopts +func (_m *ORM) RecordError(jobID int32, description string, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jobID, description) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RecordError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int32, string, ...pg.QOpt) error); ok { + r0 = rf(jobID, description, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TryRecordError provides a mock function with given fields: jobID, description, qopts +func (_m *ORM) TryRecordError(jobID int32, description string, qopts ...pg.QOpt) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jobID, description) + _ca = append(_ca, _va...) + _m.Called(_ca...) +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/job/mocks/service_ctx.go b/core/services/job/mocks/service_ctx.go new file mode 100644 index 00000000..43c28632 --- /dev/null +++ b/core/services/job/mocks/service_ctx.go @@ -0,0 +1,64 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// ServiceCtx is an autogenerated mock type for the ServiceCtx type +type ServiceCtx struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *ServiceCtx) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *ServiceCtx) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewServiceCtx creates a new instance of ServiceCtx. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewServiceCtx(t interface { + mock.TestingT + Cleanup(func()) +}) *ServiceCtx { + mock := &ServiceCtx{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/job/mocks/spawner.go b/core/services/job/mocks/spawner.go new file mode 100644 index 00000000..c66247f7 --- /dev/null +++ b/core/services/job/mocks/spawner.go @@ -0,0 +1,218 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + job "github.com/goplugin/pluginv3.0/v2/core/services/job" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// Spawner is an autogenerated mock type for the Spawner type +type Spawner struct { + mock.Mock +} + +// ActiveJobs provides a mock function with given fields: +func (_m *Spawner) ActiveJobs() map[int32]job.Job { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ActiveJobs") + } + + var r0 map[int32]job.Job + if rf, ok := ret.Get(0).(func() map[int32]job.Job); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[int32]job.Job) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *Spawner) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateJob provides a mock function with given fields: jb, qopts +func (_m *Spawner) CreateJob(jb *job.Job, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jb) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*job.Job, ...pg.QOpt) error); ok { + r0 = rf(jb, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteJob provides a mock function with given fields: jobID, qopts +func (_m *Spawner) DeleteJob(jobID int32, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, jobID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int32, ...pg.QOpt) error); ok { + r0 = rf(jobID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *Spawner) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *Spawner) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *Spawner) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Spawner) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StartService provides a mock function with given fields: ctx, spec, qopts +func (_m *Spawner) StartService(ctx context.Context, spec job.Job, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, spec) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for StartService") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, job.Job, ...pg.QOpt) error); ok { + r0 = rf(ctx, spec, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSpawner creates a new instance of Spawner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSpawner(t interface { + mock.TestingT + Cleanup(func()) +}) *Spawner { + mock := &Spawner{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/job/models.go b/core/services/job/models.go new file mode 100644 index 00000000..d051855c --- /dev/null +++ b/core/services/job/models.go @@ -0,0 +1,813 @@ +package job + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/utils/tomlutils" +) + +const ( + BlockHeaderFeeder Type = (Type)(pipeline.BlockHeaderFeederJobType) + BlockhashStore Type = (Type)(pipeline.BlockhashStoreJobType) + Bootstrap Type = (Type)(pipeline.BootstrapJobType) + Cron Type = (Type)(pipeline.CronJobType) + DirectRequest Type = (Type)(pipeline.DirectRequestJobType) + FluxMonitor Type = (Type)(pipeline.FluxMonitorJobType) + Gateway Type = (Type)(pipeline.GatewayJobType) + Keeper Type = (Type)(pipeline.KeeperJobType) + LegacyGasStationServer Type = (Type)(pipeline.LegacyGasStationServerJobType) + LegacyGasStationSidecar Type = (Type)(pipeline.LegacyGasStationSidecarJobType) + OffchainReporting Type = (Type)(pipeline.OffchainReportingJobType) + OffchainReporting2 Type = (Type)(pipeline.OffchainReporting2JobType) + Stream Type = (Type)(pipeline.StreamJobType) + VRF Type = (Type)(pipeline.VRFJobType) + Webhook Type = (Type)(pipeline.WebhookJobType) + Workflow Type = (Type)(pipeline.WorkflowJobType) +) + +//revive:disable:redefines-builtin-id +type Type string + +func (t Type) String() string { + return string(t) +} + +func (t Type) RequiresPipelineSpec() bool { + return requiresPipelineSpec[t] +} + +func (t Type) SupportsAsync() bool { + return supportsAsync[t] +} + +func (t Type) SchemaVersion() uint32 { + return schemaVersions[t] +} + +var ( + requiresPipelineSpec = map[Type]bool{ + BlockHeaderFeeder: false, + BlockhashStore: false, + Bootstrap: false, + Cron: true, + DirectRequest: true, + FluxMonitor: true, + Gateway: false, + Keeper: false, // observationSource is injected in the upkeep executor + LegacyGasStationServer: false, + LegacyGasStationSidecar: false, + OffchainReporting2: false, // bootstrap jobs do not require it + OffchainReporting: false, // bootstrap jobs do not require it + Stream: true, + VRF: true, + Webhook: true, + } + supportsAsync = map[Type]bool{ + BlockHeaderFeeder: false, + BlockhashStore: false, + Bootstrap: false, + Cron: true, + DirectRequest: true, + FluxMonitor: false, + Gateway: false, + Keeper: true, + LegacyGasStationServer: false, + LegacyGasStationSidecar: false, + OffchainReporting2: false, + OffchainReporting: false, + Stream: true, + VRF: true, + Webhook: true, + } + schemaVersions = map[Type]uint32{ + BlockHeaderFeeder: 1, + BlockhashStore: 1, + Bootstrap: 1, + Cron: 1, + DirectRequest: 1, + FluxMonitor: 1, + Gateway: 1, + Keeper: 1, + LegacyGasStationServer: 1, + LegacyGasStationSidecar: 1, + OffchainReporting2: 1, + OffchainReporting: 1, + Stream: 1, + VRF: 1, + Webhook: 1, + } +) + +type Job struct { + ID int32 `toml:"-"` + ExternalJobID uuid.UUID `toml:"externalJobID"` + StreamID *uint32 `toml:"streamID"` + OCROracleSpecID *int32 + OCROracleSpec *OCROracleSpec + OCR2OracleSpecID *int32 + OCR2OracleSpec *OCR2OracleSpec + CronSpecID *int32 + CronSpec *CronSpec + DirectRequestSpecID *int32 + DirectRequestSpec *DirectRequestSpec + FluxMonitorSpecID *int32 + FluxMonitorSpec *FluxMonitorSpec + KeeperSpecID *int32 + KeeperSpec *KeeperSpec + VRFSpecID *int32 + VRFSpec *VRFSpec + WebhookSpecID *int32 + WebhookSpec *WebhookSpec + BlockhashStoreSpecID *int32 + BlockhashStoreSpec *BlockhashStoreSpec + BlockHeaderFeederSpecID *int32 + BlockHeaderFeederSpec *BlockHeaderFeederSpec + LegacyGasStationServerSpecID *int32 + LegacyGasStationServerSpec *LegacyGasStationServerSpec + LegacyGasStationSidecarSpecID *int32 + LegacyGasStationSidecarSpec *LegacyGasStationSidecarSpec + BootstrapSpec *BootstrapSpec + BootstrapSpecID *int32 + GatewaySpec *GatewaySpec + GatewaySpecID *int32 + EALSpec *EALSpec + EALSpecID *int32 + LiquidityBalancerSpec *LiquidityBalancerSpec + LiquidityBalancerSpecID *int32 + PipelineSpecID int32 + PipelineSpec *pipeline.Spec + JobSpecErrors []SpecError + Type Type `toml:"type"` + SchemaVersion uint32 `toml:"schemaVersion"` + GasLimit clnull.Uint32 `toml:"gasLimit"` + ForwardingAllowed bool `toml:"forwardingAllowed"` + Name null.String `toml:"name"` + MaxTaskDuration models.Interval + Pipeline pipeline.Pipeline `toml:"observationSource"` + CreatedAt time.Time +} + +func ExternalJobIDEncodeStringToTopic(id uuid.UUID) common.Hash { + return common.BytesToHash([]byte(strings.Replace(id.String(), "-", "", 4))) +} + +func ExternalJobIDEncodeBytesToTopic(id uuid.UUID) common.Hash { + return common.BytesToHash(common.RightPadBytes(id[:], utils.EVMWordByteLen)) +} + +// ExternalIDEncodeStringToTopic encodes the external job ID (UUID) into a log topic (32 bytes) +// by taking the string representation of the UUID, removing the dashes +// so that its 32 characters long and then encoding those characters to bytes. +func (j Job) ExternalIDEncodeStringToTopic() common.Hash { + return ExternalJobIDEncodeStringToTopic(j.ExternalJobID) +} + +// ExternalIDEncodeBytesToTopic encodes the external job ID (UUID) into a log topic (32 bytes) +// by taking the 16 bytes underlying the UUID and right padding it. +func (j Job) ExternalIDEncodeBytesToTopic() common.Hash { + return ExternalJobIDEncodeBytesToTopic(j.ExternalJobID) +} + +// SetID takes the id as a string and attempts to convert it to an int32. If +// it succeeds, it will set it as the id on the job +func (j *Job) SetID(value string) error { + id, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + j.ID = int32(id) + return nil +} + +type SpecError struct { + ID int64 + JobID int32 + Description string + Occurrences uint + CreatedAt time.Time + UpdatedAt time.Time +} + +// SetID takes the id as a string and attempts to convert it to an int32. If +// it succeeds, it will set it as the id on the job +func (j *SpecError) SetID(value string) error { + id, err := stringutils.ToInt64(value) + if err != nil { + return err + } + j.ID = id + return nil +} + +type PipelineRun struct { + ID int64 `json:"-"` +} + +func (pr PipelineRun) GetID() string { + return fmt.Sprintf("%v", pr.ID) +} + +func (pr *PipelineRun) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + pr.ID = ID + return nil +} + +// OCROracleSpec defines the job spec for OCR jobs. +type OCROracleSpec struct { + ID int32 `toml:"-"` + ContractAddress ethkey.EIP55Address `toml:"contractAddress"` + P2PV2Bootstrappers pq.StringArray `toml:"p2pv2Bootstrappers" db:"p2pv2_bootstrappers"` + IsBootstrapPeer bool `toml:"isBootstrapPeer"` + EncryptedOCRKeyBundleID *models.Sha256Hash `toml:"keyBundleID"` + TransmitterAddress *ethkey.EIP55Address `toml:"transmitterAddress"` + ObservationTimeout models.Interval `toml:"observationTimeout"` + BlockchainTimeout models.Interval `toml:"blockchainTimeout"` + ContractConfigTrackerSubscribeInterval models.Interval `toml:"contractConfigTrackerSubscribeInterval"` + ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"` + EVMChainID *big.Big `toml:"evmChainID" db:"evm_chain_id"` + DatabaseTimeout *models.Interval `toml:"databaseTimeout"` + ObservationGracePeriod *models.Interval `toml:"observationGracePeriod"` + ContractTransmitterTransmitTimeout *models.Interval `toml:"contractTransmitterTransmitTimeout"` + CaptureEATelemetry bool `toml:"captureEATelemetry"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +// GetID is a getter function that returns the ID of the spec. +func (s OCROracleSpec) GetID() string { + return fmt.Sprintf("%v", s.ID) +} + +// SetID is a setter function that sets the ID of the spec. +func (s *OCROracleSpec) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + s.ID = int32(ID) + return nil +} + +// JSONConfig is a map for config properties which are encoded as JSON in the database by implementing +// sql.Scanner and driver.Valuer. +type JSONConfig map[string]interface{} + +// Bytes returns the raw bytes +func (r JSONConfig) Bytes() []byte { + b, _ := json.Marshal(r) + return b +} + +// Value returns this instance serialized for database storage. +func (r JSONConfig) Value() (driver.Value, error) { + return json.Marshal(r) +} + +// Scan reads the database value and returns an instance. +func (r *JSONConfig) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.Errorf("expected bytes got %T", b) + } + return json.Unmarshal(b, &r) +} + +func (r JSONConfig) MercuryCredentialName() (string, error) { + url, ok := r["mercuryCredentialName"] + if !ok { + return "", nil + } + name, ok := url.(string) + if !ok { + return "", fmt.Errorf("expected string mercuryCredentialName but got: %T", url) + } + return name, nil +} + +var ForwardersSupportedPlugins = []types.OCR2PluginType{types.Median, types.DKG, types.OCR2VRF, types.OCR2Keeper, types.Functions} + +// OCR2OracleSpec defines the job spec for OCR2 jobs. +// Relay config is chain specific config for a relay (chain adapter). +type OCR2OracleSpec struct { + ID int32 `toml:"-"` + ContractID string `toml:"contractID"` + FeedID *common.Hash `toml:"feedID"` + Relay relay.Network `toml:"relay"` + // TODO BCF-2442 implement ChainID as top level parameter rathe than buried in RelayConfig. + ChainID string `toml:"chainID"` + RelayConfig JSONConfig `toml:"relayConfig"` + P2PV2Bootstrappers pq.StringArray `toml:"p2pv2Bootstrappers"` + OCRKeyBundleID null.String `toml:"ocrKeyBundleID"` + MonitoringEndpoint null.String `toml:"monitoringEndpoint"` + TransmitterID null.String `toml:"transmitterID"` + BlockchainTimeout models.Interval `toml:"blockchainTimeout"` + ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"` + PluginConfig JSONConfig `toml:"pluginConfig"` + PluginType types.OCR2PluginType `toml:"pluginType"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` + CaptureEATelemetry bool `toml:"captureEATelemetry"` + CaptureAutomationCustomTelemetry bool `toml:"captureAutomationCustomTelemetry"` +} + +func validateRelayID(id relay.ID) error { + // only the EVM has specific requirements + if id.Network == relay.EVM { + _, err := toml.ChainIDInt64(id.ChainID) + if err != nil { + return fmt.Errorf("invalid EVM chain id %s: %w", id.ChainID, err) + } + } + return nil +} + +func (s *OCR2OracleSpec) RelayID() (relay.ID, error) { + cid, err := s.getChainID() + if err != nil { + return relay.ID{}, err + } + rid := relay.NewID(s.Relay, cid) + err = validateRelayID(rid) + if err != nil { + return relay.ID{}, err + } + return rid, nil +} + +func (s *OCR2OracleSpec) getChainID() (relay.ChainID, error) { + if s.ChainID != "" { + return s.ChainID, nil + } + // backward compatible job spec + return s.getChainIdFromRelayConfig() +} + +func (s *OCR2OracleSpec) getChainIdFromRelayConfig() (relay.ChainID, error) { + + v, exists := s.RelayConfig["chainID"] + if !exists { + return "", fmt.Errorf("chainID does not exist") + } + switch t := v.(type) { + case string: + return t, nil + case int, int64, int32: + return fmt.Sprintf("%d", v), nil + case float64: + // backward compatibility with JSONConfig.EVMChainID + i := int64(t) + return strconv.FormatInt(i, 10), nil + + default: + return "", fmt.Errorf("unable to parse chainID: unexpected type %T", t) + } +} + +// GetID is a getter function that returns the ID of the spec. +func (s OCR2OracleSpec) GetID() string { + return fmt.Sprintf("%v", s.ID) +} + +// SetID is a setter function that sets the ID of the spec. +func (s *OCR2OracleSpec) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + s.ID = int32(ID) + return nil +} + +type ExternalInitiatorWebhookSpec struct { + ExternalInitiatorID int64 + ExternalInitiator bridges.ExternalInitiator + WebhookSpecID int32 + WebhookSpec WebhookSpec + Spec models.JSON +} + +type WebhookSpec struct { + ID int32 `toml:"-"` + ExternalInitiatorWebhookSpecs []ExternalInitiatorWebhookSpec + CreatedAt time.Time `json:"createdAt" toml:"-"` + UpdatedAt time.Time `json:"updatedAt" toml:"-"` +} + +func (w WebhookSpec) GetID() string { + return fmt.Sprintf("%v", w.ID) +} + +func (w *WebhookSpec) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + w.ID = int32(ID) + return nil +} + +type DirectRequestSpec struct { + ID int32 `toml:"-"` + ContractAddress ethkey.EIP55Address `toml:"contractAddress"` + MinIncomingConfirmations clnull.Uint32 `toml:"minIncomingConfirmations"` + Requesters models.AddressCollection `toml:"requesters"` + MinContractPayment *commonassets.Link `toml:"minContractPaymentLinkJuels"` + EVMChainID *big.Big `toml:"evmChainID"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +type CronSpec struct { + ID int32 `toml:"-"` + CronSchedule string `toml:"schedule"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +func (s CronSpec) GetID() string { + return fmt.Sprintf("%v", s.ID) +} + +func (s *CronSpec) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + s.ID = int32(ID) + return nil +} + +type FluxMonitorSpec struct { + ID int32 `toml:"-"` + ContractAddress ethkey.EIP55Address `toml:"contractAddress"` + Threshold tomlutils.Float32 `toml:"threshold,float"` + // AbsoluteThreshold is the maximum absolute change allowed in a fluxmonitored + // value before a new round should be kicked off, so that the current value + // can be reported on-chain. + AbsoluteThreshold tomlutils.Float32 `toml:"absoluteThreshold,float"` + PollTimerPeriod time.Duration + PollTimerDisabled bool + IdleTimerPeriod time.Duration + IdleTimerDisabled bool + DrumbeatSchedule string + DrumbeatRandomDelay time.Duration + DrumbeatEnabled bool + MinPayment *commonassets.Link + EVMChainID *big.Big `toml:"evmChainID"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +type KeeperSpec struct { + ID int32 `toml:"-"` + ContractAddress ethkey.EIP55Address `toml:"contractAddress"` + MinIncomingConfirmations *uint32 `toml:"minIncomingConfirmations"` + FromAddress ethkey.EIP55Address `toml:"fromAddress"` + EVMChainID *big.Big `toml:"evmChainID"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +type VRFSpec struct { + ID int32 + + // BatchCoordinatorAddress is the address of the batch vrf coordinator to use. + // This is required if batchFulfillmentEnabled is set to true in the job spec. + BatchCoordinatorAddress *ethkey.EIP55Address `toml:"batchCoordinatorAddress"` + // BatchFulfillmentEnabled indicates to the vrf job to use the batch vrf coordinator + // for fulfilling requests. If set to true, batchCoordinatorAddress must be set in + // the job spec. + BatchFulfillmentEnabled bool `toml:"batchFulfillmentEnabled"` + // CustomRevertsPipelineEnabled indicates to the vrf job to run the + // custom reverted txns pipeline along with VRF listener + CustomRevertsPipelineEnabled bool `toml:"customRevertsPipelineEnabled"` + // BatchFulfillmentGasMultiplier is used to determine the final gas estimate for the batch + // fulfillment. + BatchFulfillmentGasMultiplier tomlutils.Float64 `toml:"batchFulfillmentGasMultiplier"` + + // VRFOwnerAddress is the address of the VRFOwner address to use. + // + // V2 only. + VRFOwnerAddress *ethkey.EIP55Address `toml:"vrfOwnerAddress"` + + CoordinatorAddress ethkey.EIP55Address `toml:"coordinatorAddress"` + PublicKey secp256k1.PublicKey `toml:"publicKey"` + MinIncomingConfirmations uint32 `toml:"minIncomingConfirmations"` + EVMChainID *big.Big `toml:"evmChainID"` + FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"` + PollPeriod time.Duration `toml:"pollPeriod"` // For v2 jobs + RequestedConfsDelay int64 `toml:"requestedConfsDelay"` // For v2 jobs. Optional, defaults to 0 if not provided. + RequestTimeout time.Duration `toml:"requestTimeout"` // Optional, defaults to 24hr if not provided. + + // GasLanePrice specifies the gas lane price for this VRF job. + // If the specified keys in FromAddresses do not have the provided gas price the job + // will not start. + // + // Optional, for v2 jobs only. + GasLanePrice *assets.Wei `toml:"gasLanePrice" db:"gas_lane_price"` + + // ChunkSize is the number of pending VRF V2 requests to process in parallel. Optional, defaults + // to 20 if not provided. + ChunkSize uint32 `toml:"chunkSize"` + + // BackoffInitialDelay is the amount of time to wait before retrying a failed request after the + // first failure. V2 only. + BackoffInitialDelay time.Duration `toml:"backoffInitialDelay"` + + // BackoffMaxDelay is the maximum amount of time to wait before retrying a failed request. V2 + // only. + BackoffMaxDelay time.Duration `toml:"backoffMaxDelay"` + + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +// BlockhashStoreSpec defines the job spec for the blockhash store feeder. +type BlockhashStoreSpec struct { + ID int32 + + // CoordinatorV1Address is the VRF V1 coordinator to watch for unfulfilled requests. If empty, + // no V1 coordinator will be watched. + CoordinatorV1Address *ethkey.EIP55Address `toml:"coordinatorV1Address"` + + // CoordinatorV2Address is the VRF V2 coordinator to watch for unfulfilled requests. If empty, + // no V2 coordinator will be watched. + CoordinatorV2Address *ethkey.EIP55Address `toml:"coordinatorV2Address"` + + // CoordinatorV2PlusAddress is the VRF V2Plus coordinator to watch for unfulfilled requests. If empty, + // no V2Plus coordinator will be watched. + CoordinatorV2PlusAddress *ethkey.EIP55Address `toml:"coordinatorV2PlusAddress"` + + // LookbackBlocks defines the maximum age of blocks whose hashes should be stored. + LookbackBlocks int32 `toml:"lookbackBlocks"` + + // WaitBlocks defines the minimum age of blocks whose hashes should be stored. + WaitBlocks int32 `toml:"waitBlocks"` + + // HeartbeatPeriodTime defines the number of seconds by which we "heartbeat store" + // a blockhash into the blockhash store contract. + // This is so that we always have a blockhash to anchor to in the event we need to do a + // backwards mode on the contract. + HeartbeatPeriod time.Duration `toml:"heartbeatPeriod"` + + // BlockhashStoreAddress is the address of the BlockhashStore contract to store blockhashes + // into. + BlockhashStoreAddress ethkey.EIP55Address `toml:"blockhashStoreAddress"` + + // BatchBlockhashStoreAddress is the address of the trusted BlockhashStore contract to store blockhashes + TrustedBlockhashStoreAddress *ethkey.EIP55Address `toml:"trustedBlockhashStoreAddress"` + + // BatchBlockhashStoreBatchSize is the number of blockhashes to store in a single batch + TrustedBlockhashStoreBatchSize int32 `toml:"trustedBlockhashStoreBatchSize"` + + // PollPeriod defines how often recent blocks should be scanned for blockhash storage. + PollPeriod time.Duration `toml:"pollPeriod"` + + // RunTimeout defines the timeout for a single run of the blockhash store feeder. + RunTimeout time.Duration `toml:"runTimeout"` + + // EVMChainID defines the chain ID for monitoring and storing of blockhashes. + EVMChainID *big.Big `toml:"evmChainID"` + + // FromAddress is the sender address that should be used to store blockhashes. + FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"` + + // CreatedAt is the time this job was created. + CreatedAt time.Time `toml:"-"` + + // UpdatedAt is the time this job was last updated. + UpdatedAt time.Time `toml:"-"` +} + +// BlockHeaderFeederSpec defines the job spec for the blockhash store feeder. +type BlockHeaderFeederSpec struct { + ID int32 + + // CoordinatorV1Address is the VRF V1 coordinator to watch for unfulfilled requests. If empty, + // no V1 coordinator will be watched. + CoordinatorV1Address *ethkey.EIP55Address `toml:"coordinatorV1Address"` + + // CoordinatorV2Address is the VRF V2 coordinator to watch for unfulfilled requests. If empty, + // no V2 coordinator will be watched. + CoordinatorV2Address *ethkey.EIP55Address `toml:"coordinatorV2Address"` + + // CoordinatorV2PlusAddress is the VRF V2Plus coordinator to watch for unfulfilled requests. If empty, + // no V2Plus coordinator will be watched. + CoordinatorV2PlusAddress *ethkey.EIP55Address `toml:"coordinatorV2PlusAddress"` + + // LookbackBlocks defines the maximum age of blocks whose hashes should be stored. + LookbackBlocks int32 `toml:"lookbackBlocks"` + + // WaitBlocks defines the minimum age of blocks whose hashes should be stored. + WaitBlocks int32 `toml:"waitBlocks"` + + // BlockhashStoreAddress is the address of the BlockhashStore contract to store blockhashes + // into. + BlockhashStoreAddress ethkey.EIP55Address `toml:"blockhashStoreAddress"` + + // BatchBlockhashStoreAddress is the address of the BatchBlockhashStore contract to store blockhashes + // into. + BatchBlockhashStoreAddress ethkey.EIP55Address `toml:"batchBlockhashStoreAddress"` + + // PollPeriod defines how often recent blocks should be scanned for blockhash storage. + PollPeriod time.Duration `toml:"pollPeriod"` + + // RunTimeout defines the timeout for a single run of the blockhash store feeder. + RunTimeout time.Duration `toml:"runTimeout"` + + // EVMChainID defines the chain ID for monitoring and storing of blockhashes. + EVMChainID *big.Big `toml:"evmChainID"` + + // FromAddress is the sender address that should be used to store blockhashes. + FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"` + + // GetBlockHashesBatchSize is the RPC call batch size for retrieving blockhashes + GetBlockhashesBatchSize uint16 `toml:"getBlockhashesBatchSize"` + + // StoreBlockhashesBatchSize is the RPC call batch size for storing blockhashes + StoreBlockhashesBatchSize uint16 `toml:"storeBlockhashesBatchSize"` + + // CreatedAt is the time this job was created. + CreatedAt time.Time `toml:"-"` + + // UpdatedAt is the time this job was last updated. + UpdatedAt time.Time `toml:"-"` +} + +// LegacyGasStationServerSpec defines the job spec for the legacy gas station server. +type LegacyGasStationServerSpec struct { + ID int32 + + // ForwarderAddress is the address of EIP2771 forwarder that verifies signature + // and forwards requests to target contracts + ForwarderAddress ethkey.EIP55Address `toml:"forwarderAddress"` + + // EVMChainID defines the chain ID from which the meta-transaction request originates. + EVMChainID *big.Big `toml:"evmChainID"` + + // CCIPChainSelector is the CCIP chain selector that corresponds to EVMChainID param. + // This selector is equivalent to (source) chainID specified in SendTransaction request + CCIPChainSelector *big.Big `toml:"ccipChainSelector"` + + // FromAddress is the sender address that should be used to send meta-transactions + FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"` + + // CreatedAt is the time this job was created. + CreatedAt time.Time `toml:"-"` + + // UpdatedAt is the time this job was last updated. + UpdatedAt time.Time `toml:"-"` +} + +// LegacyGasStationSidecarSpec defines the job spec for the legacy gas station sidecar. +type LegacyGasStationSidecarSpec struct { + ID int32 + + // ForwarderAddress is the address of EIP2771 forwarder that verifies signature + // and forwards requests to target contracts + ForwarderAddress ethkey.EIP55Address `toml:"forwarderAddress"` + + // OffRampAddress is the address of CCIP OffRamp for the given chainID + OffRampAddress ethkey.EIP55Address `toml:"offRampAddress"` + + // LookbackBlocks defines the maximum number of blocks to search for on-chain events. + LookbackBlocks int32 `toml:"lookbackBlocks"` + + // PollPeriod defines how frequently legacy gas station sidecar runs. + PollPeriod time.Duration `toml:"pollPeriod"` + + // RunTimeout defines the timeout for a single run of the legacy gas station sidecar. + RunTimeout time.Duration `toml:"runTimeout"` + + // EVMChainID defines the chain ID for the on-chain events tracked by sidecar + EVMChainID *big.Big `toml:"evmChainID"` + + // CCIPChainSelector is the CCIP chain selector that corresponds to EVMChainID param + CCIPChainSelector *big.Big `toml:"ccipChainSelector"` + + // CreatedAt is the time this job was created. + CreatedAt time.Time `toml:"-"` + + // UpdatedAt is the time this job was last updated. + UpdatedAt time.Time `toml:"-"` +} + +// BootstrapSpec defines the spec to handles the node communication setup process. +type BootstrapSpec struct { + ID int32 `toml:"-"` + ContractID string `toml:"contractID"` + FeedID *common.Hash `toml:"feedID"` + Relay relay.Network `toml:"relay"` + RelayConfig JSONConfig + MonitoringEndpoint null.String `toml:"monitoringEndpoint"` + BlockchainTimeout models.Interval `toml:"blockchainTimeout"` + ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +// AsOCR2Spec transforms the bootstrap spec into a generic OCR2 format to enable code sharing between specs. +func (s BootstrapSpec) AsOCR2Spec() OCR2OracleSpec { + return OCR2OracleSpec{ + ID: s.ID, + ContractID: s.ContractID, + Relay: s.Relay, + RelayConfig: s.RelayConfig, + MonitoringEndpoint: s.MonitoringEndpoint, + BlockchainTimeout: s.BlockchainTimeout, + ContractConfigTrackerPollInterval: s.ContractConfigTrackerPollInterval, + ContractConfigConfirmations: s.ContractConfigConfirmations, + CreatedAt: s.CreatedAt, + UpdatedAt: s.UpdatedAt, + P2PV2Bootstrappers: pq.StringArray{}, + } +} + +type GatewaySpec struct { + ID int32 `toml:"-"` + GatewayConfig JSONConfig `toml:"gatewayConfig"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +func (s GatewaySpec) GetID() string { + return fmt.Sprintf("%v", s.ID) +} + +func (s *GatewaySpec) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + s.ID = int32(ID) + return nil +} + +// EALSpec defines the job spec for the gas station. +type EALSpec struct { + ID int32 + + // ForwarderAddress is the address of EIP2771 forwarder that verifies signature + // and forwards requests to target contracts + ForwarderAddress ethkey.EIP55Address `toml:"forwarderAddress"` + + // EVMChainID defines the chain ID from which the meta-transaction request originates. + EVMChainID *big.Big `toml:"evmChainID"` + + // FromAddress is the sender address that should be used to send meta-transactions + FromAddresses []ethkey.EIP55Address `toml:"fromAddresses"` + + // LookbackBlocks defines the maximum age of blocks to lookback in status tracker + LookbackBlocks int32 `toml:"lookbackBlocks"` + + // PollPeriod defines how frequently EAL status tracker runs + PollPeriod time.Duration `toml:"pollPeriod"` + + // RunTimeout defines the timeout for a single run of EAL status tracker + RunTimeout time.Duration `toml:"runTimeout"` + + // CreatedAt is the time this job was created. + CreatedAt time.Time `toml:"-"` + + // UpdatedAt is the time this job was last updated. + UpdatedAt time.Time `toml:"-"` +} + +type LiquidityBalancerSpec struct { + ID int32 + + LiquidityBalancerConfig string `toml:"liquidityBalancerConfig" db:"liquidity_balancer_config"` +} diff --git a/core/services/job/models_test.go b/core/services/job/models_test.go new file mode 100644 index 00000000..4ebf028e --- /dev/null +++ b/core/services/job/models_test.go @@ -0,0 +1,263 @@ +package job + +import ( + _ "embed" + "reflect" + "testing" + "time" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func TestOCR2OracleSpec_RelayIdentifier(t *testing.T) { + type fields struct { + Relay relay.Network + ChainID string + RelayConfig JSONConfig + } + tests := []struct { + name string + fields fields + want relay.ID + wantErr bool + }{ + {name: "err no chain id", + fields: fields{}, + want: relay.ID{}, + wantErr: true, + }, + { + name: "evm explicitly configured", + fields: fields{ + Relay: relay.EVM, + ChainID: "1", + }, + want: relay.ID{Network: relay.EVM, ChainID: "1"}, + }, + { + name: "evm implicitly configured", + fields: fields{ + Relay: relay.EVM, + RelayConfig: map[string]any{"chainID": 1}, + }, + want: relay.ID{Network: relay.EVM, ChainID: "1"}, + }, + { + name: "evm implicitly configured with bad value", + fields: fields{ + Relay: relay.EVM, + RelayConfig: map[string]any{"chainID": float32(1)}, + }, + want: relay.ID{}, + wantErr: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + s := &OCR2OracleSpec{ + Relay: tt.fields.Relay, + ChainID: tt.fields.ChainID, + RelayConfig: tt.fields.RelayConfig, + } + got, err := s.RelayID() + if (err != nil) != tt.wantErr { + t.Errorf("OCR2OracleSpec.RelayIdentifier() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("OCR2OracleSpec.RelayIdentifier() = %v, want %v", got, tt.want) + } + }) + } +} + +var ( + //go:embed testdata/compact.toml + compact string + //go:embed testdata/pretty.toml + pretty string +) + +func TestOCR2OracleSpec(t *testing.T) { + val := OCR2OracleSpec{ + Relay: relay.EVM, + PluginType: types.Median, + ContractID: "foo", + OCRKeyBundleID: null.StringFrom("bar"), + TransmitterID: null.StringFrom("baz"), + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second), + RelayConfig: map[string]interface{}{ + "chainID": 1337, + "fromBlock": 42, + "chainReader": evmtypes.ChainReaderConfig{ + Contracts: map[string]evmtypes.ChainContractReader{ + "median": { + ContractABI: `[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "requester", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "round", + "type": "uint8" + } + ], + "name": "RoundRequested", + "type": "event" + }, + { + "inputs": [], + "name": "latestTransmissionDetails", + "outputs": [ + { + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "internalType": "uint8", + "name": "round", + "type": "uint8" + }, + { + "internalType": "int192", + "name": "latestAnswer_", + "type": "int192" + }, + { + "internalType": "uint64", + "name": "latestTimestamp_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + } +] +`, + Configs: map[string]*evmtypes.ChainReaderDefinition{ + "LatestTransmissionDetails": { + ChainSpecificName: "latestTransmissionDetails", + OutputModifications: codec.ModifiersConfig{ + &codec.EpochToTimeModifierConfig{ + Fields: []string{"LatestTimestamp_"}, + }, + &codec.RenameModifierConfig{ + Fields: map[string]string{ + "LatestAnswer_": "LatestAnswer", + "LatestTimestamp_": "LatestTimestamp", + }, + }, + }, + }, + "LatestRoundRequested": { + ChainSpecificName: "RoundRequested", + ReadType: evmtypes.Event, + }, + }, + }, + }, + }, + "codec": evmtypes.CodecConfig{ + Configs: map[string]evmtypes.ChainCodecConfig{ + "MedianReport": { + TypeABI: `[ + { + "Name": "Timestamp", + "Type": "uint32" + }, + { + "Name": "Observers", + "Type": "bytes32" + }, + { + "Name": "Observations", + "Type": "int192[]" + }, + { + "Name": "JuelsPerFeeCoin", + "Type": "int192" + } +] +`, + }, + }, + }, + }, + PluginConfig: map[string]interface{}{"juelsPerFeeCoinSource": ` // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=2]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=2]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +`, + }, + } + + t.Run("marshal", func(t *testing.T) { + gotB, err := toml.Marshal(val) + require.NoError(t, err) + t.Log("marshaled:", string(gotB)) + require.Equal(t, compact, string(gotB)) + }) + + t.Run("round-trip", func(t *testing.T) { + var gotVal OCR2OracleSpec + require.NoError(t, toml.Unmarshal([]byte(compact), &gotVal)) + gotB, err := toml.Marshal(gotVal) + require.NoError(t, err) + require.Equal(t, compact, string(gotB)) + t.Run("pretty", func(t *testing.T) { + var gotVal OCR2OracleSpec + require.NoError(t, toml.Unmarshal([]byte(pretty), &gotVal)) + gotB, err := toml.Marshal(gotVal) + require.NoError(t, err) + t.Log("marshaled compact:", string(gotB)) + require.Equal(t, compact, string(gotB)) + }) + }) +} diff --git a/core/services/job/orm.go b/core/services/job/orm.go new file mode 100644 index 00000000..77ed3b67 --- /dev/null +++ b/core/services/job/orm.go @@ -0,0 +1,1442 @@ +package job + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "reflect" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/jackc/pgconn" + "github.com/lib/pq" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + medianconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/median/config" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var ( + ErrNoSuchKeyBundle = errors.New("no such key bundle exists") + ErrNoSuchTransmitterKey = errors.New("no such transmitter key exists") + ErrNoSuchSendingKey = errors.New("no such sending key exists") + ErrNoSuchPublicKey = errors.New("no such public key exists") +) + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +type ORM interface { + InsertWebhookSpec(webhookSpec *WebhookSpec, qopts ...pg.QOpt) error + InsertJob(job *Job, qopts ...pg.QOpt) error + CreateJob(jb *Job, qopts ...pg.QOpt) error + FindJobs(offset, limit int) ([]Job, int, error) + FindJobTx(ctx context.Context, id int32) (Job, error) + FindJob(ctx context.Context, id int32) (Job, error) + FindJobByExternalJobID(uuid uuid.UUID, qopts ...pg.QOpt) (Job, error) + FindJobIDByAddress(address ethkey.EIP55Address, evmChainID *big.Big, qopts ...pg.QOpt) (int32, error) + FindOCR2JobIDByAddress(contractID string, feedID *common.Hash, qopts ...pg.QOpt) (int32, error) + FindJobIDsWithBridge(name string) ([]int32, error) + DeleteJob(id int32, qopts ...pg.QOpt) error + RecordError(jobID int32, description string, qopts ...pg.QOpt) error + // TryRecordError is a helper which calls RecordError and logs the returned error if present. + TryRecordError(jobID int32, description string, qopts ...pg.QOpt) + DismissError(ctx context.Context, errorID int64) error + FindSpecError(id int64, qopts ...pg.QOpt) (SpecError, error) + Close() error + PipelineRuns(jobID *int32, offset, size int) ([]pipeline.Run, int, error) + + FindPipelineRunIDsByJobID(jobID int32, offset, limit int) (ids []int64, err error) + FindPipelineRunsByIDs(ids []int64) (runs []pipeline.Run, err error) + CountPipelineRunsByJobID(jobID int32) (count int32, err error) + + FindJobsByPipelineSpecIDs(ids []int32) ([]Job, error) + FindPipelineRunByID(id int64) (pipeline.Run, error) + + FindSpecErrorsByJobIDs(ids []int32, qopts ...pg.QOpt) ([]SpecError, error) + FindJobWithoutSpecErrors(id int32) (jb Job, err error) + + FindTaskResultByRunIDAndTaskName(runID int64, taskName string, qopts ...pg.QOpt) ([]byte, error) + AssertBridgesExist(p pipeline.Pipeline) error +} + +type ORMConfig interface { + DatabaseDefaultQueryTimeout() time.Duration +} + +type orm struct { + q pg.Q + keyStore keystore.Master + pipelineORM pipeline.ORM + lggr logger.SugaredLogger + cfg pg.QConfig + bridgeORM bridges.ORM +} + +var _ ORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, pipelineORM pipeline.ORM, bridgeORM bridges.ORM, keyStore keystore.Master, lggr logger.Logger, cfg pg.QConfig) *orm { + namedLogger := logger.Sugared(lggr.Named("JobORM")) + return &orm{ + q: pg.NewQ(db, namedLogger, cfg), + keyStore: keyStore, + pipelineORM: pipelineORM, + bridgeORM: bridgeORM, + lggr: namedLogger, + cfg: cfg, + } +} +func (o *orm) Close() error { + return nil +} + +func (o *orm) AssertBridgesExist(p pipeline.Pipeline) error { + var bridgeNames = make(map[bridges.BridgeName]struct{}) + var uniqueBridges []bridges.BridgeName + for _, task := range p.Tasks { + if task.Type() == pipeline.TaskTypeBridge { + // Bridge must exist + name := task.(*pipeline.BridgeTask).Name + bridge, err := bridges.ParseBridgeName(name) + if err != nil { + return err + } + if _, have := bridgeNames[bridge]; have { + continue + } + bridgeNames[bridge] = struct{}{} + uniqueBridges = append(uniqueBridges, bridge) + } + } + if len(uniqueBridges) != 0 { + _, err := o.bridgeORM.FindBridges(uniqueBridges) + if err != nil { + return err + } + } + return nil +} + +// CreateJob creates the job, and it's associated spec record. +// Expects an unmarshalled job spec as the jb argument i.e. output from ValidatedXX. +// Scans all persisted records back into jb +func (o *orm) CreateJob(jb *Job, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + p := jb.Pipeline + if err := o.AssertBridgesExist(p); err != nil { + return err + } + + var jobID int32 + err := q.Transaction(func(tx pg.Queryer) error { + // Autogenerate a job ID if not specified + if jb.ExternalJobID == (uuid.UUID{}) { + jb.ExternalJobID = uuid.New() + } + + switch jb.Type { + case DirectRequest: + if jb.DirectRequestSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO direct_request_specs (contract_address, min_incoming_confirmations, requesters, min_contract_payment, evm_chain_id, created_at, updated_at) + VALUES (:contract_address, :min_incoming_confirmations, :requesters, :min_contract_payment, :evm_chain_id, now(), now()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.DirectRequestSpec); err != nil { + return errors.Wrap(err, "failed to create DirectRequestSpec") + } + jb.DirectRequestSpecID = &specID + case FluxMonitor: + if jb.FluxMonitorSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO flux_monitor_specs (contract_address, threshold, absolute_threshold, poll_timer_period, poll_timer_disabled, idle_timer_period, idle_timer_disabled, + drumbeat_schedule, drumbeat_random_delay, drumbeat_enabled, min_payment, evm_chain_id, created_at, updated_at) + VALUES (:contract_address, :threshold, :absolute_threshold, :poll_timer_period, :poll_timer_disabled, :idle_timer_period, :idle_timer_disabled, + :drumbeat_schedule, :drumbeat_random_delay, :drumbeat_enabled, :min_payment, :evm_chain_id, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.FluxMonitorSpec); err != nil { + return errors.Wrap(err, "failed to create FluxMonitorSpec") + } + jb.FluxMonitorSpecID = &specID + case OffchainReporting: + if jb.OCROracleSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + + var specID int32 + if jb.OCROracleSpec.EncryptedOCRKeyBundleID != nil { + _, err := o.keyStore.OCR().Get(jb.OCROracleSpec.EncryptedOCRKeyBundleID.String()) + if err != nil { + return errors.Wrapf(ErrNoSuchKeyBundle, "no key bundle with id: %x", jb.OCROracleSpec.EncryptedOCRKeyBundleID) + } + } + if jb.OCROracleSpec.TransmitterAddress != nil { + _, err := o.keyStore.Eth().Get(jb.OCROracleSpec.TransmitterAddress.Hex()) + if err != nil { + return errors.Wrapf(ErrNoSuchTransmitterKey, "no key matching transmitter address: %s", jb.OCROracleSpec.TransmitterAddress.Hex()) + } + } + + newChainID := jb.OCROracleSpec.EVMChainID + existingSpec := new(OCROracleSpec) + err := tx.Get(existingSpec, `SELECT * FROM ocr_oracle_specs WHERE contract_address = $1 and (evm_chain_id = $2 or evm_chain_id IS NULL) LIMIT 1;`, + jb.OCROracleSpec.ContractAddress, newChainID, + ) + + if !errors.Is(err, sql.ErrNoRows) { + if err != nil { + return errors.Wrap(err, "failed to validate OffchainreportingOracleSpec on creation") + } + + return errors.Errorf("a job with contract address %s already exists for chain ID %s", jb.OCROracleSpec.ContractAddress, newChainID) + } + + sql := `INSERT INTO ocr_oracle_specs (contract_address, p2pv2_bootstrappers, is_bootstrap_peer, encrypted_ocr_key_bundle_id, transmitter_address, + observation_timeout, blockchain_timeout, contract_config_tracker_subscribe_interval, contract_config_tracker_poll_interval, contract_config_confirmations, evm_chain_id, + created_at, updated_at, database_timeout, observation_grace_period, contract_transmitter_transmit_timeout) + VALUES (:contract_address, :p2pv2_bootstrappers, :is_bootstrap_peer, :encrypted_ocr_key_bundle_id, :transmitter_address, + :observation_timeout, :blockchain_timeout, :contract_config_tracker_subscribe_interval, :contract_config_tracker_poll_interval, :contract_config_confirmations, :evm_chain_id, + NOW(), NOW(), :database_timeout, :observation_grace_period, :contract_transmitter_transmit_timeout) + RETURNING id;` + err = pg.PrepareQueryRowx(tx, sql, &specID, jb.OCROracleSpec) + if err != nil { + return errors.Wrap(err, "failed to create OffchainreportingOracleSpec") + } + jb.OCROracleSpecID = &specID + case OffchainReporting2: + var specID int32 + + if jb.OCR2OracleSpec.OCRKeyBundleID.Valid { + _, err := o.keyStore.OCR2().Get(jb.OCR2OracleSpec.OCRKeyBundleID.String) + if err != nil { + return errors.Wrapf(ErrNoSuchKeyBundle, "no key bundle with id: %q", jb.OCR2OracleSpec.OCRKeyBundleID.ValueOrZero()) + } + } + + if jb.OCR2OracleSpec.RelayConfig["sendingKeys"] != nil && jb.OCR2OracleSpec.TransmitterID.Valid { + return errors.New("sending keys and transmitter ID can't both be defined") + } + + // checks if they are present and if they are valid + sendingKeysDefined, err := areSendingKeysDefined(jb, o.keyStore) + if err != nil { + return err + } + + if !sendingKeysDefined && !jb.OCR2OracleSpec.TransmitterID.Valid { + return errors.New("neither sending keys nor transmitter ID is defined") + } + + if !sendingKeysDefined { + if err = ValidateKeyStoreMatch(jb.OCR2OracleSpec, o.keyStore, jb.OCR2OracleSpec.TransmitterID.String); err != nil { + return errors.Wrap(ErrNoSuchTransmitterKey, err.Error()) + } + } + + if jb.ForwardingAllowed && !slices.Contains(ForwardersSupportedPlugins, jb.OCR2OracleSpec.PluginType) { + return errors.Errorf("forwarding is not currently supported for %s jobs", jb.OCR2OracleSpec.PluginType) + } + + if jb.OCR2OracleSpec.PluginType == types.Mercury { + if jb.OCR2OracleSpec.FeedID == nil { + return errors.New("feed ID is required for mercury plugin type") + } + } else { + if jb.OCR2OracleSpec.FeedID != nil { + return errors.New("feed ID is not currently supported for non-mercury jobs") + } + } + + if jb.OCR2OracleSpec.PluginType == types.Median { + var cfg medianconfig.PluginConfig + err2 := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &cfg) + if err2 != nil { + return errors.Wrap(err2, "failed to parse plugin config") + } + feePipeline, err2 := pipeline.Parse(cfg.JuelsPerFeeCoinPipeline) + if err2 != nil { + return err2 + } + if err2 = o.AssertBridgesExist(*feePipeline); err2 != nil { + return err2 + } + } + + sql := `INSERT INTO ocr2_oracle_specs (contract_id, feed_id, relay, relay_config, plugin_type, plugin_config, p2pv2_bootstrappers, ocr_key_bundle_id, transmitter_id, + blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, + created_at, updated_at) + VALUES (:contract_id, :feed_id, :relay, :relay_config, :plugin_type, :plugin_config, :p2pv2_bootstrappers, :ocr_key_bundle_id, :transmitter_id, + :blockchain_timeout, :contract_config_tracker_poll_interval, :contract_config_confirmations, + NOW(), NOW()) + RETURNING id;` + err = pg.PrepareQueryRowx(tx, sql, &specID, jb.OCR2OracleSpec) + if err != nil { + return errors.Wrap(err, "failed to create Offchainreporting2OracleSpec") + } + jb.OCR2OracleSpecID = &specID + case Keeper: + if jb.KeeperSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO keeper_specs (contract_address, from_address, evm_chain_id, created_at, updated_at) + VALUES (:contract_address, :from_address, :evm_chain_id, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.KeeperSpec); err != nil { + return errors.Wrap(err, "failed to create KeeperSpec") + } + jb.KeeperSpecID = &specID + case Cron: + var specID int32 + sql := `INSERT INTO cron_specs (cron_schedule, created_at, updated_at) + VALUES (:cron_schedule, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.CronSpec); err != nil { + return errors.Wrap(err, "failed to create CronSpec") + } + jb.CronSpecID = &specID + case VRF: + if jb.VRFSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO vrf_specs ( + coordinator_address, public_key, min_incoming_confirmations, + evm_chain_id, from_addresses, poll_period, requested_confs_delay, + request_timeout, chunk_size, batch_coordinator_address, batch_fulfillment_enabled, + batch_fulfillment_gas_multiplier, backoff_initial_delay, backoff_max_delay, gas_lane_price, + vrf_owner_address, custom_reverts_pipeline_enabled, + created_at, updated_at) + VALUES ( + :coordinator_address, :public_key, :min_incoming_confirmations, + :evm_chain_id, :from_addresses, :poll_period, :requested_confs_delay, + :request_timeout, :chunk_size, :batch_coordinator_address, :batch_fulfillment_enabled, + :batch_fulfillment_gas_multiplier, :backoff_initial_delay, :backoff_max_delay, :gas_lane_price, + :vrf_owner_address, :custom_reverts_pipeline_enabled, + NOW(), NOW()) + RETURNING id;` + + err := pg.PrepareQueryRowx(tx, sql, &specID, toVRFSpecRow(jb.VRFSpec)) + var pqErr *pgconn.PgError + ok := errors.As(err, &pqErr) + if err != nil && ok && pqErr.Code == "23503" { + if pqErr.ConstraintName == "vrf_specs_public_key_fkey" { + return errors.Wrapf(ErrNoSuchPublicKey, "%s", jb.VRFSpec.PublicKey.String()) + } + } + if err != nil { + return errors.Wrap(err, "failed to create VRFSpec") + } + jb.VRFSpecID = &specID + case Webhook: + err := o.InsertWebhookSpec(jb.WebhookSpec, pg.WithQueryer(tx)) + if err != nil { + return errors.Wrap(err, "failed to create WebhookSpec") + } + jb.WebhookSpecID = &jb.WebhookSpec.ID + + if len(jb.WebhookSpec.ExternalInitiatorWebhookSpecs) > 0 { + for i := range jb.WebhookSpec.ExternalInitiatorWebhookSpecs { + jb.WebhookSpec.ExternalInitiatorWebhookSpecs[i].WebhookSpecID = jb.WebhookSpec.ID + } + sql := `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) + VALUES (:external_initiator_id, :webhook_spec_id, :spec);` + query, args, err := tx.BindNamed(sql, jb.WebhookSpec.ExternalInitiatorWebhookSpecs) + if err != nil { + return errors.Wrap(err, "failed to bindquery for ExternalInitiatorWebhookSpecs") + } + if _, err = tx.Exec(query, args...); err != nil { + return errors.Wrap(err, "failed to create ExternalInitiatorWebhookSpecs") + } + } + case BlockhashStore: + if jb.BlockhashStoreSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO blockhash_store_specs (coordinator_v1_address, coordinator_v2_address, coordinator_v2_plus_address, trusted_blockhash_store_address, trusted_blockhash_store_batch_size, wait_blocks, lookback_blocks, heartbeat_period, blockhash_store_address, poll_period, run_timeout, evm_chain_id, from_addresses, created_at, updated_at) + VALUES (:coordinator_v1_address, :coordinator_v2_address, :coordinator_v2_plus_address, :trusted_blockhash_store_address, :trusted_blockhash_store_batch_size, :wait_blocks, :lookback_blocks, :heartbeat_period, :blockhash_store_address, :poll_period, :run_timeout, :evm_chain_id, :from_addresses, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, toBlockhashStoreSpecRow(jb.BlockhashStoreSpec)); err != nil { + return errors.Wrap(err, "failed to create BlockhashStore spec") + } + jb.BlockhashStoreSpecID = &specID + case BlockHeaderFeeder: + if jb.BlockHeaderFeederSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO block_header_feeder_specs (coordinator_v1_address, coordinator_v2_address, coordinator_v2_plus_address, wait_blocks, lookback_blocks, blockhash_store_address, batch_blockhash_store_address, poll_period, run_timeout, evm_chain_id, from_addresses, get_blockhashes_batch_size, store_blockhashes_batch_size, created_at, updated_at) + VALUES (:coordinator_v1_address, :coordinator_v2_address, :coordinator_v2_plus_address, :wait_blocks, :lookback_blocks, :blockhash_store_address, :batch_blockhash_store_address, :poll_period, :run_timeout, :evm_chain_id, :from_addresses, :get_blockhashes_batch_size, :store_blockhashes_batch_size, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, toBlockHeaderFeederSpecRow(jb.BlockHeaderFeederSpec)); err != nil { + return errors.Wrap(err, "failed to create BlockHeaderFeeder spec") + } + jb.BlockHeaderFeederSpecID = &specID + case LegacyGasStationServer: + if jb.LegacyGasStationServerSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO legacy_gas_station_server_specs (forwarder_address, evm_chain_id, ccip_chain_selector, from_addresses, created_at, updated_at) + VALUES (:forwarder_address, :evm_chain_id, :ccip_chain_selector, :from_addresses, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, toLegacyGasStationServerSpecRow(jb.LegacyGasStationServerSpec)); err != nil { + return errors.Wrap(err, "failed to create LegacyGasStationServer spec") + } + jb.LegacyGasStationServerSpecID = &specID + case LegacyGasStationSidecar: + if jb.LegacyGasStationSidecarSpec.EVMChainID == nil { + return errors.New("evm chain id must be defined") + } + var specID int32 + sql := `INSERT INTO legacy_gas_station_sidecar_specs (forwarder_address, off_ramp_address, lookback_blocks, poll_period, run_timeout, evm_chain_id, ccip_chain_selector, created_at, updated_at) + VALUES (:forwarder_address, :off_ramp_address, :lookback_blocks, :poll_period, :run_timeout, :evm_chain_id, :ccip_chain_selector, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.LegacyGasStationSidecarSpec); err != nil { + return errors.Wrap(err, "failed to create LegacyGasStationSidecar spec") + } + jb.LegacyGasStationSidecarSpecID = &specID + case Bootstrap: + var specID int32 + sql := `INSERT INTO bootstrap_specs (contract_id, feed_id, relay, relay_config, monitoring_endpoint, + blockchain_timeout, contract_config_tracker_poll_interval, + contract_config_confirmations, created_at, updated_at) + VALUES (:contract_id, :feed_id, :relay, :relay_config, :monitoring_endpoint, + :blockchain_timeout, :contract_config_tracker_poll_interval, + :contract_config_confirmations, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.BootstrapSpec); err != nil { + return errors.Wrap(err, "failed to create BootstrapSpec for jobSpec") + } + jb.BootstrapSpecID = &specID + case Gateway: + var specID int32 + sql := `INSERT INTO gateway_specs (gateway_config, created_at, updated_at) + VALUES (:gateway_config, NOW(), NOW()) + RETURNING id;` + if err := pg.PrepareQueryRowx(tx, sql, &specID, jb.GatewaySpec); err != nil { + return errors.Wrap(err, "failed to create GatewaySpec for jobSpec") + } + jb.GatewaySpecID = &specID + case Stream: + // 'stream' type has no associated spec, nothing to do here + case Workflow: + // 'workflow' type has no associated spec, nothing to do here + default: + o.lggr.Panicf("Unsupported jb.Type: %v", jb.Type) + } + + pipelineSpecID, err := o.pipelineORM.CreateSpec(p, jb.MaxTaskDuration, pg.WithQueryer(tx)) + if err != nil { + return errors.Wrap(err, "failed to create pipeline spec") + } + + jb.PipelineSpecID = pipelineSpecID + + err = o.InsertJob(jb, pg.WithQueryer(tx)) + jobID = jb.ID + return errors.Wrap(err, "failed to insert job") + }) + if err != nil { + return errors.Wrap(err, "CreateJobFailed") + } + + return o.findJob(jb, "id", jobID, qopts...) +} + +// ValidateKeyStoreMatch confirms that the key has a valid match in the keystore +func ValidateKeyStoreMatch(spec *OCR2OracleSpec, keyStore keystore.Master, key string) error { + if spec.PluginType == types.Mercury { + _, err := keyStore.CSA().Get(key) + if err != nil { + return errors.Errorf("no CSA key matching: %q", key) + } + } else { + switch spec.Relay { + case relay.EVM: + _, err := keyStore.Eth().Get(key) + if err != nil { + return errors.Errorf("no EVM key matching: %q", key) + } + case relay.Cosmos: + _, err := keyStore.Cosmos().Get(key) + if err != nil { + return errors.Errorf("no Cosmos key matching: %q", key) + } + case relay.Solana: + _, err := keyStore.Solana().Get(key) + if err != nil { + return errors.Errorf("no Solana key matching: %q", key) + } + case relay.StarkNet: + _, err := keyStore.StarkNet().Get(key) + if err != nil { + return errors.Errorf("no Starknet key matching: %q", key) + } + } + } + return nil +} + +func areSendingKeysDefined(jb *Job, keystore keystore.Master) (bool, error) { + if jb.OCR2OracleSpec.RelayConfig["sendingKeys"] != nil { + sendingKeys, err := SendingKeysForJob(jb) + if err != nil { + return false, err + } + + for _, sendingKey := range sendingKeys { + if err = ValidateKeyStoreMatch(jb.OCR2OracleSpec, keystore, sendingKey); err != nil { + return false, errors.Wrap(ErrNoSuchSendingKey, err.Error()) + } + } + + return true, nil + } + return false, nil +} + +func (o *orm) InsertWebhookSpec(webhookSpec *WebhookSpec, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + query := `INSERT INTO webhook_specs (created_at, updated_at) + VALUES (NOW(), NOW()) + RETURNING *;` + return q.GetNamed(query, webhookSpec, webhookSpec) +} + +func (o *orm) InsertJob(job *Job, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + var query string + + // if job has id, emplace otherwise insert with a new id. + if job.ID == 0 { + query = `INSERT INTO jobs (pipeline_spec_id, name, stream_id, schema_version, type, max_task_duration, ocr_oracle_spec_id, ocr2_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, vrf_spec_id, webhook_spec_id, blockhash_store_spec_id, bootstrap_spec_id, block_header_feeder_spec_id, gateway_spec_id, + legacy_gas_station_server_spec_id, legacy_gas_station_sidecar_spec_id, external_job_id, gas_limit, forwarding_allowed, created_at) + VALUES (:pipeline_spec_id, :name, :stream_id, :schema_version, :type, :max_task_duration, :ocr_oracle_spec_id, :ocr2_oracle_spec_id, :direct_request_spec_id, :flux_monitor_spec_id, + :keeper_spec_id, :cron_spec_id, :vrf_spec_id, :webhook_spec_id, :blockhash_store_spec_id, :bootstrap_spec_id, :block_header_feeder_spec_id, :gateway_spec_id, + :legacy_gas_station_server_spec_id, :legacy_gas_station_sidecar_spec_id, :external_job_id, :gas_limit, :forwarding_allowed, NOW()) + RETURNING *;` + } else { + query = `INSERT INTO jobs (id, pipeline_spec_id, name, stream_id, schema_version, type, max_task_duration, ocr_oracle_spec_id, ocr2_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, vrf_spec_id, webhook_spec_id, blockhash_store_spec_id, bootstrap_spec_id, block_header_feeder_spec_id, gateway_spec_id, + legacy_gas_station_server_spec_id, legacy_gas_station_sidecar_spec_id, external_job_id, gas_limit, forwarding_allowed, created_at) + VALUES (:id, :pipeline_spec_id, :name, :stream_id, :schema_version, :type, :max_task_duration, :ocr_oracle_spec_id, :ocr2_oracle_spec_id, :direct_request_spec_id, :flux_monitor_spec_id, + :keeper_spec_id, :cron_spec_id, :vrf_spec_id, :webhook_spec_id, :blockhash_store_spec_id, :bootstrap_spec_id, :block_header_feeder_spec_id, :gateway_spec_id, + :legacy_gas_station_server_spec_id, :legacy_gas_station_sidecar_spec_id, :external_job_id, :gas_limit, :forwarding_allowed, NOW()) + RETURNING *;` + } + return q.GetNamed(query, job, job) +} + +// DeleteJob removes a job +func (o *orm) DeleteJob(id int32, qopts ...pg.QOpt) error { + o.lggr.Debugw("Deleting job", "jobID", id) + // Added a 1 minute timeout to this query since this can take a long time as data increases. + // This was added specifically due to an issue with a database that had a millions of pipeline_runs and pipeline_task_runs + // and this query was taking ~40secs. + qopts = append(qopts, pg.WithLongQueryTimeout()) + q := o.q.WithOpts(qopts...) + query := ` + WITH deleted_jobs AS ( + DELETE FROM jobs WHERE id = $1 RETURNING + pipeline_spec_id, + ocr_oracle_spec_id, + ocr2_oracle_spec_id, + keeper_spec_id, + cron_spec_id, + flux_monitor_spec_id, + vrf_spec_id, + webhook_spec_id, + direct_request_spec_id, + blockhash_store_spec_id, + bootstrap_spec_id, + block_header_feeder_spec_id, + gateway_spec_id + ), + deleted_oracle_specs AS ( + DELETE FROM ocr_oracle_specs WHERE id IN (SELECT ocr_oracle_spec_id FROM deleted_jobs) + ), + deleted_oracle2_specs AS ( + DELETE FROM ocr2_oracle_specs WHERE id IN (SELECT ocr2_oracle_spec_id FROM deleted_jobs) + ), + deleted_keeper_specs AS ( + DELETE FROM keeper_specs WHERE id IN (SELECT keeper_spec_id FROM deleted_jobs) + ), + deleted_cron_specs AS ( + DELETE FROM cron_specs WHERE id IN (SELECT cron_spec_id FROM deleted_jobs) + ), + deleted_fm_specs AS ( + DELETE FROM flux_monitor_specs WHERE id IN (SELECT flux_monitor_spec_id FROM deleted_jobs) + ), + deleted_vrf_specs AS ( + DELETE FROM vrf_specs WHERE id IN (SELECT vrf_spec_id FROM deleted_jobs) + ), + deleted_webhook_specs AS ( + DELETE FROM webhook_specs WHERE id IN (SELECT webhook_spec_id FROM deleted_jobs) + ), + deleted_dr_specs AS ( + DELETE FROM direct_request_specs WHERE id IN (SELECT direct_request_spec_id FROM deleted_jobs) + ), + deleted_blockhash_store_specs AS ( + DELETE FROM blockhash_store_specs WHERE id IN (SELECT blockhash_store_spec_id FROM deleted_jobs) + ), + deleted_bootstrap_specs AS ( + DELETE FROM bootstrap_specs WHERE id IN (SELECT bootstrap_spec_id FROM deleted_jobs) + ), + deleted_block_header_feeder_specs AS ( + DELETE FROM block_header_feeder_specs WHERE id IN (SELECT block_header_feeder_spec_id FROM deleted_jobs) + ), + deleted_gateway_specs AS ( + DELETE FROM gateway_specs WHERE id IN (SELECT gateway_spec_id FROM deleted_jobs) + ) + DELETE FROM pipeline_specs WHERE id IN (SELECT pipeline_spec_id FROM deleted_jobs)` + res, cancel, err := q.ExecQIter(query, id) + defer cancel() + if err != nil { + return errors.Wrap(err, "DeleteJob failed to delete job") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return errors.Wrap(err, "DeleteJob failed getting RowsAffected") + } + if rowsAffected == 0 { + return sql.ErrNoRows + } + o.lggr.Debugw("Deleted job", "jobID", id) + return nil +} + +func (o *orm) RecordError(jobID int32, description string, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + sql := `INSERT INTO job_spec_errors (job_id, description, occurrences, created_at, updated_at) + VALUES ($1, $2, 1, $3, $3) + ON CONFLICT (job_id, description) DO UPDATE SET + occurrences = job_spec_errors.occurrences + 1, + updated_at = excluded.updated_at` + err := q.ExecQ(sql, jobID, description, time.Now()) + // Noop if the job has been deleted. + var pqErr *pgconn.PgError + ok := errors.As(err, &pqErr) + if err != nil && ok && pqErr.Code == "23503" { + if pqErr.ConstraintName == "job_spec_errors_v2_job_id_fkey" { + return nil + } + } + return err +} +func (o *orm) TryRecordError(jobID int32, description string, qopts ...pg.QOpt) { + err := o.RecordError(jobID, description, qopts...) + o.lggr.ErrorIf(err, fmt.Sprintf("Error creating SpecError %v", description)) +} + +func (o *orm) DismissError(ctx context.Context, ID int64) error { + q := o.q.WithOpts(pg.WithParentCtx(ctx)) + res, cancel, err := q.ExecQIter("DELETE FROM job_spec_errors WHERE id = $1", ID) + defer cancel() + if err != nil { + return errors.Wrap(err, "failed to dismiss error") + } + n, err := res.RowsAffected() + if err != nil { + return errors.Wrap(err, "failed to dismiss error") + } + if n == 0 { + return sql.ErrNoRows + } + return nil +} + +func (o *orm) FindSpecError(id int64, qopts ...pg.QOpt) (SpecError, error) { + stmt := `SELECT * FROM job_spec_errors WHERE id = $1;` + + specErr := new(SpecError) + err := o.q.WithOpts(qopts...).Get(specErr, stmt, id) + + return *specErr, errors.Wrap(err, "FindSpecError failed") +} + +func (o *orm) FindJobs(offset, limit int) (jobs []Job, count int, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + sql := `SELECT count(*) FROM jobs;` + err = tx.QueryRowx(sql).Scan(&count) + if err != nil { + return err + } + + sql = `SELECT * FROM jobs ORDER BY created_at DESC, id DESC OFFSET $1 LIMIT $2;` + err = tx.Select(&jobs, sql, offset, limit) + if err != nil { + return err + } + + err = LoadAllJobsTypes(tx, jobs) + if err != nil { + return err + } + + return nil + }) + return jobs, count, err +} + +func LoadDefaultVRFPollPeriod(vrfs VRFSpec) *VRFSpec { + if vrfs.PollPeriod == 0 { + vrfs.PollPeriod = 5 * time.Second + } + + return &vrfs +} + +// SetDRMinIncomingConfirmations takes the largest of the global vs specific. +func SetDRMinIncomingConfirmations(defaultMinIncomingConfirmations uint32, drs DirectRequestSpec) *DirectRequestSpec { + if !drs.MinIncomingConfirmations.Valid || drs.MinIncomingConfirmations.Uint32 < defaultMinIncomingConfirmations { + drs.MinIncomingConfirmations = null.Uint32From(defaultMinIncomingConfirmations) + } + return &drs +} + +type OCRConfig interface { + BlockchainTimeout() time.Duration + CaptureEATelemetry() bool + ContractPollInterval() time.Duration + ContractSubscribeInterval() time.Duration + KeyBundleID() (string, error) + ObservationTimeout() time.Duration + TransmitterAddress() (ethkey.EIP55Address, error) +} + +// LoadConfigVarsLocalOCR loads local OCR vars into the OCROracleSpec. +func LoadConfigVarsLocalOCR(evmOcrCfg evmconfig.OCR, os OCROracleSpec, ocrCfg OCRConfig) *OCROracleSpec { + if os.ObservationTimeout == 0 { + os.ObservationTimeout = models.Interval(ocrCfg.ObservationTimeout()) + } + if os.BlockchainTimeout == 0 { + os.BlockchainTimeout = models.Interval(ocrCfg.BlockchainTimeout()) + } + if os.ContractConfigTrackerSubscribeInterval == 0 { + os.ContractConfigTrackerSubscribeInterval = models.Interval(ocrCfg.ContractSubscribeInterval()) + } + if os.ContractConfigTrackerPollInterval == 0 { + os.ContractConfigTrackerPollInterval = models.Interval(ocrCfg.ContractPollInterval()) + } + if os.ContractConfigConfirmations == 0 { + os.ContractConfigConfirmations = evmOcrCfg.ContractConfirmations() + } + if os.DatabaseTimeout == nil { + os.DatabaseTimeout = models.NewInterval(evmOcrCfg.DatabaseTimeout()) + } + if os.ObservationGracePeriod == nil { + os.ObservationGracePeriod = models.NewInterval(evmOcrCfg.ObservationGracePeriod()) + } + if os.ContractTransmitterTransmitTimeout == nil { + os.ContractTransmitterTransmitTimeout = models.NewInterval(evmOcrCfg.ContractTransmitterTransmitTimeout()) + } + os.CaptureEATelemetry = ocrCfg.CaptureEATelemetry() + + return &os +} + +// LoadConfigVarsOCR loads OCR config vars into the OCROracleSpec. +func LoadConfigVarsOCR(evmOcrCfg evmconfig.OCR, ocrCfg OCRConfig, os OCROracleSpec) (*OCROracleSpec, error) { + if os.TransmitterAddress == nil { + ta, err := ocrCfg.TransmitterAddress() + if !errors.Is(errors.Cause(err), config.ErrEnvUnset) { + if err != nil { + return nil, err + } + os.TransmitterAddress = &ta + } + } + + if os.EncryptedOCRKeyBundleID == nil { + kb, err := ocrCfg.KeyBundleID() + if err != nil { + return nil, err + } + encryptedOCRKeyBundleID, err := models.Sha256HashFromHex(kb) + if err != nil { + return nil, err + } + os.EncryptedOCRKeyBundleID = &encryptedOCRKeyBundleID + } + + return LoadConfigVarsLocalOCR(evmOcrCfg, os, ocrCfg), nil +} + +func (o *orm) FindJobTx(ctx context.Context, id int32) (Job, error) { + ctx, cancel := context.WithTimeout(ctx, o.cfg.DefaultQueryTimeout()) + defer cancel() + return o.FindJob(ctx, id) +} + +// FindJob returns job by ID, with all relations preloaded +func (o *orm) FindJob(ctx context.Context, id int32) (jb Job, err error) { + err = o.findJob(&jb, "id", id, pg.WithParentCtx(ctx)) + return +} + +// FindJobWithoutSpecErrors returns a job by ID, without loading Spec Errors preloaded +func (o *orm) FindJobWithoutSpecErrors(id int32) (jb Job, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + stmt := "SELECT * FROM jobs WHERE id = $1 LIMIT 1" + err = tx.Get(&jb, stmt, id) + if err != nil { + return errors.Wrap(err, "failed to load job") + } + + if err = LoadAllJobTypes(tx, &jb); err != nil { + return errors.Wrap(err, "failed to load job types") + } + + return nil + }, pg.OptReadOnlyTx()) + if err != nil { + return jb, errors.Wrap(err, "FindJobWithoutSpecErrors failed") + } + + return jb, nil +} + +// FindSpecErrorsByJobIDs returns all jobs spec errors by jobs IDs +func (o *orm) FindSpecErrorsByJobIDs(ids []int32, qopts ...pg.QOpt) ([]SpecError, error) { + stmt := `SELECT * FROM job_spec_errors WHERE job_id = ANY($1);` + + var specErrs []SpecError + err := o.q.WithOpts(qopts...).Select(&specErrs, stmt, ids) + + return specErrs, errors.Wrap(err, "FindSpecErrorsByJobIDs failed") +} + +func (o *orm) FindJobByExternalJobID(externalJobID uuid.UUID, qopts ...pg.QOpt) (jb Job, err error) { + err = o.findJob(&jb, "external_job_id", externalJobID, qopts...) + return +} + +// FindJobIDByAddress - finds a job id by contract address. Currently only OCR and FM jobs are supported +func (o *orm) FindJobIDByAddress(address ethkey.EIP55Address, evmChainID *big.Big, qopts ...pg.QOpt) (jobID int32, err error) { + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + stmt := ` +SELECT jobs.id +FROM jobs +LEFT JOIN ocr_oracle_specs ocrspec on ocrspec.contract_address = $1 AND (ocrspec.evm_chain_id = $2 OR ocrspec.evm_chain_id IS NULL) AND ocrspec.id = jobs.ocr_oracle_spec_id +LEFT JOIN flux_monitor_specs fmspec on fmspec.contract_address = $1 AND (fmspec.evm_chain_id = $2 OR fmspec.evm_chain_id IS NULL) AND fmspec.id = jobs.flux_monitor_spec_id +WHERE ocrspec.id IS NOT NULL OR fmspec.id IS NOT NULL +` + err = tx.Get(&jobID, stmt, address, evmChainID) + + if !errors.Is(err, sql.ErrNoRows) { + if err != nil { + return errors.Wrap(err, "error searching for job by contract address") + } + return nil + } + + return err + }) + + return jobID, errors.Wrap(err, "FindJobIDByAddress failed") +} + +func (o *orm) FindOCR2JobIDByAddress(contractID string, feedID *common.Hash, qopts ...pg.QOpt) (jobID int32, err error) { + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + // NOTE: We want to explicitly match on NULL feed_id hence usage of `IS + // NOT DISTINCT FROM` instead of `=` + stmt := ` +SELECT jobs.id +FROM jobs +LEFT JOIN ocr2_oracle_specs ocr2spec on ocr2spec.contract_id = $1 AND ocr2spec.feed_id IS NOT DISTINCT FROM $2 AND ocr2spec.id = jobs.ocr2_oracle_spec_id +LEFT JOIN bootstrap_specs bs on bs.contract_id = $1 AND bs.feed_id IS NOT DISTINCT FROM $2 AND bs.id = jobs.bootstrap_spec_id +WHERE ocr2spec.id IS NOT NULL OR bs.id IS NOT NULL +` + err = tx.Get(&jobID, stmt, contractID, feedID) + + if !errors.Is(err, sql.ErrNoRows) { + if err != nil { + return errors.Wrapf(err, "error searching for job by contract id=%s and feed id=%s", contractID, feedID) + } + return nil + } + + return err + }) + + return jobID, errors.Wrap(err, "FindOCR2JobIDByAddress failed") +} + +func (o *orm) findJob(jb *Job, col string, arg interface{}, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.Transaction(func(tx pg.Queryer) error { + sql := fmt.Sprintf(`SELECT * FROM jobs WHERE %s = $1 LIMIT 1`, col) + err := tx.Get(jb, sql, arg) + if err != nil { + return errors.Wrap(err, "failed to load job") + } + + if err = LoadAllJobTypes(tx, jb); err != nil { + return err + } + + return loadJobSpecErrors(tx, jb) + }) + if err != nil { + return errors.Wrap(err, "findJob failed") + } + return nil +} + +func (o *orm) FindJobIDsWithBridge(name string) (jids []int32, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + query := `SELECT jobs.id, dot_dag_source FROM jobs JOIN pipeline_specs ON pipeline_specs.id = jobs.pipeline_spec_id WHERE dot_dag_source ILIKE '%' || $1 || '%' ORDER BY id` + var rows *sqlx.Rows + rows, err = tx.Queryx(query, name) + if err != nil { + return err + } + defer rows.Close() + var ids []int32 + var sources []string + for rows.Next() { + var id int32 + var source string + if err = rows.Scan(&id, &source); err != nil { + return err + } + ids = append(jids, id) + sources = append(sources, source) + } + + for i, id := range ids { + var p *pipeline.Pipeline + p, err = pipeline.Parse(sources[i]) + if err != nil { + return errors.Wrapf(err, "could not parse dag for job %d", id) + } + for _, task := range p.Tasks { + if task.Type() == pipeline.TaskTypeBridge { + if task.(*pipeline.BridgeTask).Name == name { + jids = append(jids, id) + } + } + } + } + return nil + }) + return jids, errors.Wrap(err, "FindJobIDsWithBridge failed") +} + +// PipelineRunsByJobsIDs returns pipeline runs for multiple jobs, not preloading data +func (o *orm) PipelineRunsByJobsIDs(ids []int32) (runs []pipeline.Run, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + stmt := `SELECT pipeline_runs.* FROM pipeline_runs INNER JOIN jobs ON pipeline_runs.pipeline_spec_id = jobs.pipeline_spec_id WHERE jobs.id = ANY($1) + ORDER BY pipeline_runs.created_at DESC, pipeline_runs.id DESC;` + if err = tx.Select(&runs, stmt, ids); err != nil { + return errors.Wrap(err, "error loading runs") + } + + runs, err = o.loadPipelineRunsRelations(runs, tx) + + return err + }) + + return runs, errors.Wrap(err, "PipelineRunsByJobsIDs failed") +} + +func (o *orm) loadPipelineRunIDs(jobID *int32, offset, limit int, tx pg.Queryer) (ids []int64, err error) { + lggr := logger.Sugared(o.lggr) + + var res sql.NullInt64 + if err = tx.Get(&res, "SELECT MAX(id) FROM pipeline_runs"); err != nil { + err = errors.Wrap(err, "error while loading runs") + return + } else if !res.Valid { + // MAX() will return NULL if there are no rows in table. This is not an error + return + } + maxID := res.Int64 + + var filter string + if jobID != nil { + filter = fmt.Sprintf("JOIN jobs USING(pipeline_spec_id) WHERE jobs.id = %d AND ", *jobID) + } else { + filter = "WHERE " + } + + stmt := fmt.Sprintf(`SELECT p.id FROM pipeline_runs AS p %s p.id >= $3 AND p.id <= $4 + ORDER BY p.id DESC OFFSET $1 LIMIT $2`, filter) + + // Only search the most recent n pipeline runs (whether deleted or not), starting with n = 1000 and + // doubling only if we still need more. Without this, large tables can result in the UI + // becoming unusably slow, continuously flashing, or timing out. The ORDER BY in + // this query requires a sort of all runs matching jobID, so we restrict it to the + // range minID <-> maxID. + + for n := int64(1000); maxID > 0 && len(ids) < limit; n *= 2 { + var batch []int64 + minID := maxID - n + if err = tx.Select(&batch, stmt, offset, limit-len(ids), minID, maxID); err != nil { + err = errors.Wrap(err, "error loading runs") + return + } + ids = append(ids, batch...) + if offset > 0 { + if len(ids) > 0 { + // If we're already receiving rows back, then we no longer need an offset + offset = 0 + } else { + var skipped int + // If no rows were returned, we need to know whether there were any ids skipped + // in this batch due to the offset, and reduce it for the next batch + err = tx.Get(&skipped, + fmt.Sprintf( + `SELECT COUNT(p.id) FROM pipeline_runs AS p %s p.id >= $1 AND p.id <= $2`, filter, + ), minID, maxID, + ) + if err != nil { + err = errors.Wrap(err, "error loading from pipeline_runs") + return + } + offset -= skipped + if offset < 0 { // sanity assertion, if this ever happened it would probably mean db corruption or pg bug + lggr.AssumptionViolationw("offset < 0 while reading pipeline_runs") + err = errors.Wrap(err, "internal db error while reading pipeline_runs") + return + } + lggr.Debugw("loadPipelineRunIDs empty batch", "minId", minID, "maxID", maxID, "n", n, "len(ids)", len(ids), "limit", limit, "offset", offset, "skipped", skipped) + + } + } + maxID = minID - 1 + } + return +} + +func (o *orm) FindTaskResultByRunIDAndTaskName(runID int64, taskName string, qopts ...pg.QOpt) (result []byte, err error) { + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + stmt := fmt.Sprintf("SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1 AND dot_id = '%s';", taskName) + + var taskRuns []pipeline.TaskRun + if errB := tx.Select(&taskRuns, stmt, runID); errB != nil { + return errB + } + if len(taskRuns) == 0 { + return fmt.Errorf("can't find task run with id: %v, taskName: %v", runID, taskName) + } + if len(taskRuns) > 1 { + o.lggr.Errorf("found multiple task runs with id: %v, taskName: %v. Using the first one.", runID, taskName) + } + taskRun := taskRuns[0] + if !taskRun.Error.IsZero() { + return errors.New(taskRun.Error.ValueOrZero()) + } + resBytes, errB := taskRun.Output.MarshalJSON() + if errB != nil { + return errB + } + result = resBytes + return nil + }) + return result, errors.Wrap(err, "failed") +} + +// FindPipelineRunIDsByJobID fetches the ids of pipeline runs for a job. +func (o *orm) FindPipelineRunIDsByJobID(jobID int32, offset, limit int) (ids []int64, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + ids, err = o.loadPipelineRunIDs(&jobID, offset, limit, tx) + return err + }) + return ids, errors.Wrap(err, "FindPipelineRunIDsByJobID failed") +} + +func (o *orm) loadPipelineRunsByID(ids []int64, tx pg.Queryer) (runs []pipeline.Run, err error) { + stmt := ` + SELECT pipeline_runs.* + FROM pipeline_runs + WHERE id = ANY($1) + ORDER BY created_at DESC, id DESC + ` + if err = tx.Select(&runs, stmt, ids); err != nil { + err = errors.Wrap(err, "error loading runs") + return + } + + return o.loadPipelineRunsRelations(runs, tx) +} + +// FindPipelineRunsByIDs returns pipeline runs with the ids. +func (o *orm) FindPipelineRunsByIDs(ids []int64) (runs []pipeline.Run, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + runs, err = o.loadPipelineRunsByID(ids, tx) + return err + }) + + return runs, errors.Wrap(err, "FindPipelineRunsByIDs failed") +} + +// FindPipelineRunByID returns pipeline run with the id. +func (o *orm) FindPipelineRunByID(id int64) (pipeline.Run, error) { + var run pipeline.Run + + err := o.q.Transaction(func(tx pg.Queryer) error { + stmt := ` +SELECT pipeline_runs.* +FROM pipeline_runs +WHERE id = $1 +` + + if err := tx.Get(&run, stmt, id); err != nil { + return errors.Wrap(err, "error loading run") + } + + runs, err := o.loadPipelineRunsRelations([]pipeline.Run{run}, tx) + + run = runs[0] + + return err + }) + + return run, errors.Wrap(err, "FindPipelineRunByID failed") +} + +// CountPipelineRunsByJobID returns the total number of pipeline runs for a job. +func (o *orm) CountPipelineRunsByJobID(jobID int32) (count int32, err error) { + err = o.q.Transaction(func(tx pg.Queryer) error { + stmt := "SELECT COUNT(*) FROM pipeline_runs JOIN jobs USING (pipeline_spec_id) WHERE jobs.id = $1" + if err = tx.Get(&count, stmt, jobID); err != nil { + return errors.Wrap(err, "error counting runs") + } + + return err + }) + + return count, errors.Wrap(err, "CountPipelineRunsByJobID failed") +} + +func (o *orm) FindJobsByPipelineSpecIDs(ids []int32) ([]Job, error) { + var jbs []Job + + err := o.q.Transaction(func(tx pg.Queryer) error { + stmt := `SELECT * FROM jobs WHERE jobs.pipeline_spec_id = ANY($1) ORDER BY id ASC +` + if err := tx.Select(&jbs, stmt, ids); err != nil { + return errors.Wrap(err, "error fetching jobs by pipeline spec IDs") + } + + err := LoadAllJobsTypes(tx, jbs) + if err != nil { + return err + } + + return nil + }) + + return jbs, errors.Wrap(err, "FindJobsByPipelineSpecIDs failed") +} + +// PipelineRuns returns pipeline runs for a job, with spec and taskruns loaded, latest first +// If jobID is nil, returns all pipeline runs +func (o *orm) PipelineRuns(jobID *int32, offset, size int) (runs []pipeline.Run, count int, err error) { + var filter string + if jobID != nil { + filter = fmt.Sprintf("JOIN jobs USING(pipeline_spec_id) WHERE jobs.id = %d", *jobID) + } + err = o.q.Transaction(func(tx pg.Queryer) error { + sql := fmt.Sprintf(`SELECT count(*) FROM pipeline_runs %s`, filter) + if err = tx.QueryRowx(sql).Scan(&count); err != nil { + return errors.Wrap(err, "error counting runs") + } + + var ids []int64 + ids, err = o.loadPipelineRunIDs(jobID, offset, size, tx) + runs, err = o.loadPipelineRunsByID(ids, tx) + + return err + }) + + return runs, count, errors.Wrap(err, "PipelineRuns failed") +} + +func (o *orm) loadPipelineRunsRelations(runs []pipeline.Run, tx pg.Queryer) ([]pipeline.Run, error) { + // Postload PipelineSpecs + // TODO: We should pull this out into a generic preload function once go has generics + specM := make(map[int32]pipeline.Spec) + for _, run := range runs { + if _, exists := specM[run.PipelineSpecID]; !exists { + specM[run.PipelineSpecID] = pipeline.Spec{} + } + } + specIDs := make([]int32, len(specM)) + for specID := range specM { + specIDs = append(specIDs, specID) + } + stmt := `SELECT pipeline_specs.*, jobs.id AS job_id FROM pipeline_specs JOIN jobs ON pipeline_specs.id = jobs.pipeline_spec_id WHERE pipeline_specs.id = ANY($1);` + var specs []pipeline.Spec + if err := o.q.Select(&specs, stmt, specIDs); err != nil { + return nil, errors.Wrap(err, "error loading specs") + } + for _, spec := range specs { + specM[spec.ID] = spec + } + runM := make(map[int64]*pipeline.Run, len(runs)) + for i, run := range runs { + runs[i].PipelineSpec = specM[run.PipelineSpecID] + runM[run.ID] = &runs[i] + } + + // Postload PipelineTaskRuns + runIDs := make([]int64, len(runs)) + for i, run := range runs { + runIDs[i] = run.ID + } + var taskRuns []pipeline.TaskRun + stmt = `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = ANY($1) ORDER BY pipeline_run_id, created_at, id;` + if err := tx.Select(&taskRuns, stmt, runIDs); err != nil { + return nil, errors.Wrap(err, "error loading pipeline_task_runs") + } + for _, taskRun := range taskRuns { + run := runM[taskRun.PipelineRunID] + run.PipelineTaskRuns = append(run.PipelineTaskRuns, taskRun) + } + + return runs, nil +} + +// NOTE: N+1 query, be careful of performance +// This is not easily fixable without complicating the logic a lot, since we +// only use it in the GUI it's probably acceptable +func LoadAllJobsTypes(tx pg.Queryer, jobs []Job) error { + for i := range jobs { + err := LoadAllJobTypes(tx, &jobs[i]) + if err != nil { + return err + } + } + return nil +} + +func LoadAllJobTypes(tx pg.Queryer, job *Job) error { + return multierr.Combine( + loadJobType(tx, job, "PipelineSpec", "pipeline_specs", &job.PipelineSpecID), + loadJobType(tx, job, "FluxMonitorSpec", "flux_monitor_specs", job.FluxMonitorSpecID), + loadJobType(tx, job, "DirectRequestSpec", "direct_request_specs", job.DirectRequestSpecID), + loadJobType(tx, job, "OCROracleSpec", "ocr_oracle_specs", job.OCROracleSpecID), + loadJobType(tx, job, "OCR2OracleSpec", "ocr2_oracle_specs", job.OCR2OracleSpecID), + loadJobType(tx, job, "KeeperSpec", "keeper_specs", job.KeeperSpecID), + loadJobType(tx, job, "CronSpec", "cron_specs", job.CronSpecID), + loadJobType(tx, job, "WebhookSpec", "webhook_specs", job.WebhookSpecID), + loadVRFJob(tx, job, job.VRFSpecID), + loadBlockhashStoreJob(tx, job, job.BlockhashStoreSpecID), + loadBlockHeaderFeederJob(tx, job, job.BlockHeaderFeederSpecID), + loadLegacyGasStationServerJob(tx, job, job.LegacyGasStationServerSpecID), + loadJobType(tx, job, "LegacyGasStationSidecarSpec", "legacy_gas_station_sidecar_specs", job.LegacyGasStationSidecarSpecID), + loadJobType(tx, job, "BootstrapSpec", "bootstrap_specs", job.BootstrapSpecID), + loadJobType(tx, job, "GatewaySpec", "gateway_specs", job.GatewaySpecID), + ) +} + +func loadJobType(tx pg.Queryer, job *Job, field, table string, id *int32) error { + if id == nil { + return nil + } + + // The abomination below allows us to initialise and then scan into the + // type of the field without hardcoding for each individual field + // My LIFE for generics... + r := reflect.ValueOf(job) + t := reflect.Indirect(r).FieldByName(field).Type().Elem() + destVal := reflect.New(t) + dest := destVal.Interface() + + err := tx.Get(dest, fmt.Sprintf(`SELECT * FROM %s WHERE id = $1`, table), *id) + + if err != nil { + return errors.Wrapf(err, "failed to load job type %s with id %d", table, *id) + } + reflect.ValueOf(job).Elem().FieldByName(field).Set(destVal) + return nil +} + +func loadVRFJob(tx pg.Queryer, job *Job, id *int32) error { + if id == nil { + return nil + } + + var row vrfSpecRow + err := tx.Get(&row, `SELECT * FROM vrf_specs WHERE id = $1`, *id) + if err != nil { + return errors.Wrapf(err, `failed to load job type VRFSpec with id %d`, *id) + } + + job.VRFSpec = row.toVRFSpec() + return nil +} + +// vrfSpecRow is a helper type for reading and writing VRF specs to the database. This is necessary +// because the bytea[] in the DB is not automatically convertible to or from the spec's +// FromAddresses field. pq.ByteaArray must be used instead. +type vrfSpecRow struct { + *VRFSpec + FromAddresses pq.ByteaArray +} + +func toVRFSpecRow(spec *VRFSpec) vrfSpecRow { + addresses := make(pq.ByteaArray, len(spec.FromAddresses)) + for i, a := range spec.FromAddresses { + addresses[i] = a.Bytes() + } + return vrfSpecRow{VRFSpec: spec, FromAddresses: addresses} +} + +func (r vrfSpecRow) toVRFSpec() *VRFSpec { + for _, a := range r.FromAddresses { + r.VRFSpec.FromAddresses = append(r.VRFSpec.FromAddresses, + ethkey.EIP55AddressFromAddress(common.BytesToAddress(a))) + } + return r.VRFSpec +} + +func loadBlockhashStoreJob(tx pg.Queryer, job *Job, id *int32) error { + if id == nil { + return nil + } + + var row blockhashStoreSpecRow + err := tx.Get(&row, `SELECT * FROM blockhash_store_specs WHERE id = $1`, *id) + if err != nil { + return errors.Wrapf(err, `failed to load job type BlockhashStoreSpec with id %d`, *id) + } + + job.BlockhashStoreSpec = row.toBlockhashStoreSpec() + return nil +} + +// blockhashStoreSpecRow is a helper type for reading and writing blockhashStore specs to the database. This is necessary +// because the bytea[] in the DB is not automatically convertible to or from the spec's +// FromAddresses field. pq.ByteaArray must be used instead. +type blockhashStoreSpecRow struct { + *BlockhashStoreSpec + FromAddresses pq.ByteaArray +} + +func toBlockhashStoreSpecRow(spec *BlockhashStoreSpec) blockhashStoreSpecRow { + addresses := make(pq.ByteaArray, len(spec.FromAddresses)) + for i, a := range spec.FromAddresses { + addresses[i] = a.Bytes() + } + return blockhashStoreSpecRow{BlockhashStoreSpec: spec, FromAddresses: addresses} +} + +func (r blockhashStoreSpecRow) toBlockhashStoreSpec() *BlockhashStoreSpec { + for _, a := range r.FromAddresses { + r.BlockhashStoreSpec.FromAddresses = append(r.BlockhashStoreSpec.FromAddresses, + ethkey.EIP55AddressFromAddress(common.BytesToAddress(a))) + } + return r.BlockhashStoreSpec +} + +func loadBlockHeaderFeederJob(tx pg.Queryer, job *Job, id *int32) error { + if id == nil { + return nil + } + + var row blockHeaderFeederSpecRow + err := tx.Get(&row, `SELECT * FROM block_header_feeder_specs WHERE id = $1`, *id) + if err != nil { + return errors.Wrapf(err, `failed to load job type BlockHeaderFeederSpec with id %d`, *id) + } + + job.BlockHeaderFeederSpec = row.toBlockHeaderFeederSpec() + return nil +} + +// blockHeaderFeederSpecRow is a helper type for reading and writing blockHeaderFeederSpec specs to the database. This is necessary +// because the bytea[] in the DB is not automatically convertible to or from the spec's +// FromAddresses field. pq.ByteaArray must be used instead. +type blockHeaderFeederSpecRow struct { + *BlockHeaderFeederSpec + FromAddresses pq.ByteaArray +} + +func toBlockHeaderFeederSpecRow(spec *BlockHeaderFeederSpec) blockHeaderFeederSpecRow { + addresses := make(pq.ByteaArray, len(spec.FromAddresses)) + for i, a := range spec.FromAddresses { + addresses[i] = a.Bytes() + } + return blockHeaderFeederSpecRow{BlockHeaderFeederSpec: spec, FromAddresses: addresses} +} + +func (r blockHeaderFeederSpecRow) toBlockHeaderFeederSpec() *BlockHeaderFeederSpec { + for _, a := range r.FromAddresses { + r.BlockHeaderFeederSpec.FromAddresses = append(r.BlockHeaderFeederSpec.FromAddresses, + ethkey.EIP55AddressFromAddress(common.BytesToAddress(a))) + } + return r.BlockHeaderFeederSpec +} + +func loadLegacyGasStationServerJob(tx pg.Queryer, job *Job, id *int32) error { + if id == nil { + return nil + } + + var row legacyGasStationServerSpecRow + err := tx.Get(&row, `SELECT * FROM legacy_gas_station_server_specs WHERE id = $1`, *id) + if err != nil { + return errors.Wrapf(err, `failed to load job type LegacyGasStationServerSpec with id %d`, *id) + } + + job.LegacyGasStationServerSpec = row.toLegacyGasStationServerSpec() + return nil +} + +// legacyGasStationServerSpecRow is a helper type for reading and writing legacyGasStationServerSpec specs to the database. This is necessary +// because the bytea[] in the DB is not automatically convertible to or from the spec's +// FromAddresses field. pq.ByteaArray must be used instead. +type legacyGasStationServerSpecRow struct { + *LegacyGasStationServerSpec + FromAddresses pq.ByteaArray +} + +func toLegacyGasStationServerSpecRow(spec *LegacyGasStationServerSpec) legacyGasStationServerSpecRow { + addresses := make(pq.ByteaArray, len(spec.FromAddresses)) + for i, a := range spec.FromAddresses { + addresses[i] = a.Bytes() + } + return legacyGasStationServerSpecRow{LegacyGasStationServerSpec: spec, FromAddresses: addresses} +} + +func (r legacyGasStationServerSpecRow) toLegacyGasStationServerSpec() *LegacyGasStationServerSpec { + for _, a := range r.FromAddresses { + r.LegacyGasStationServerSpec.FromAddresses = append(r.LegacyGasStationServerSpec.FromAddresses, + ethkey.EIP55AddressFromAddress(common.BytesToAddress(a))) + } + return r.LegacyGasStationServerSpec +} + +func loadJobSpecErrors(tx pg.Queryer, jb *Job) error { + return errors.Wrapf(tx.Select(&jb.JobSpecErrors, `SELECT * FROM job_spec_errors WHERE job_id = $1`, jb.ID), "failed to load job spec errors for job %d", jb.ID) +} diff --git a/core/services/job/orm_test.go b/core/services/job/orm_test.go new file mode 100644 index 00000000..f577d7e0 --- /dev/null +++ b/core/services/job/orm_test.go @@ -0,0 +1,74 @@ +package job_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func NewTestORM(t *testing.T, db *sqlx.DB, pipelineORM pipeline.ORM, bridgeORM bridges.ORM, keyStore keystore.Master, cfg pg.QConfig) job.ORM { + o := job.NewORM(db, pipelineORM, bridgeORM, keyStore, logger.TestLogger(t), cfg) + t.Cleanup(func() { assert.NoError(t, o.Close()) }) + return o +} + +func TestLoadConfigVarsLocalOCR(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + chainConfig := evmtest.NewChainScopedConfig(t, config) + jobSpec := &job.OCROracleSpec{} + + jobSpec = job.LoadConfigVarsLocalOCR(chainConfig.EVM().OCR(), *jobSpec, chainConfig.OCR()) + + require.Equal(t, models.Interval(chainConfig.OCR().ObservationTimeout()), jobSpec.ObservationTimeout) + require.Equal(t, models.Interval(chainConfig.OCR().BlockchainTimeout()), jobSpec.BlockchainTimeout) + require.Equal(t, models.Interval(chainConfig.OCR().ContractSubscribeInterval()), jobSpec.ContractConfigTrackerSubscribeInterval) + require.Equal(t, models.Interval(chainConfig.OCR().ContractPollInterval()), jobSpec.ContractConfigTrackerPollInterval) + require.Equal(t, chainConfig.OCR().CaptureEATelemetry(), jobSpec.CaptureEATelemetry) + + require.Equal(t, chainConfig.EVM().OCR().ContractConfirmations(), jobSpec.ContractConfigConfirmations) + require.Equal(t, models.Interval(chainConfig.EVM().OCR().DatabaseTimeout()), *jobSpec.DatabaseTimeout) + require.Equal(t, models.Interval(chainConfig.EVM().OCR().ObservationGracePeriod()), *jobSpec.ObservationGracePeriod) + require.Equal(t, models.Interval(chainConfig.EVM().OCR().ContractTransmitterTransmitTimeout()), *jobSpec.ContractTransmitterTransmitTimeout) +} + +func TestSetDRMinIncomingConfirmations(t *testing.T) { + t.Parallel() + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + hundred := uint32(100) + c.EVM[0].MinIncomingConfirmations = &hundred + }) + chainConfig := evmtest.NewChainScopedConfig(t, config) + + jobSpec10 := job.DirectRequestSpec{ + MinIncomingConfirmations: clnull.Uint32From(10), + } + + drs10 := job.SetDRMinIncomingConfirmations(chainConfig.EVM().MinIncomingConfirmations(), jobSpec10) + assert.Equal(t, uint32(100), drs10.MinIncomingConfirmations.Uint32) + + jobSpec200 := job.DirectRequestSpec{ + MinIncomingConfirmations: clnull.Uint32From(200), + } + + drs200 := job.SetDRMinIncomingConfirmations(chainConfig.EVM().MinIncomingConfirmations(), jobSpec200) + assert.True(t, drs200.MinIncomingConfirmations.Valid) + assert.Equal(t, uint32(200), drs200.MinIncomingConfirmations.Uint32) +} diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go new file mode 100644 index 00000000..fc2c63e1 --- /dev/null +++ b/core/services/job/runner_integration_test.go @@ -0,0 +1,1099 @@ +package job_test + +import ( + "database/sql" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web" +) + +var monitoringEndpoint = telemetry.MonitoringEndpointGenerator(&telemetry.NoopAgent{}) + +func TestRunner(t *testing.T) { + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, pgtest.NewQConfig(true)) + + ethKeyStore := keyStore.Eth() + _, transmitterAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + kb, err := keyStore.OCR().Create() + require.NoError(t, err) + kbid := models.MustSha256HashFromHex(kb.ID()) + c.OCR.KeyBundleID = &kbid + taddress := ethkey.EIP55AddressFromAddress(transmitterAddress) + c.OCR.TransmitterAddress = &taddress + c.OCR2.DatabaseTimeout = commonconfig.MustNewDuration(time.Second) + c.OCR2.ContractTransmitterTransmitTimeout = commonconfig.MustNewDuration(time.Second) + c.Insecure.OCRDevelopmentMode = ptr(true) + }) + + ethClient := cltest.NewEthMocksWithDefaultChain(t) + ethClient.On("HeadByNumber", mock.Anything, (*big.Int)(nil)).Return(cltest.Head(10), nil) + ethClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil, nil) + + ctx := testutils.Context(t) + pipelineORM := pipeline.NewORM(db, logger.TestLogger(t), config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + require.NoError(t, pipelineORM.Start(ctx)) + t.Cleanup(func() { assert.NoError(t, pipelineORM.Close()) }) + btORM := bridges.NewORM(db, logger.TestLogger(t), config.Database()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: ethClient, GeneralConfig: config, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + c := clhttptest.NewTestLocalOnlyHTTPClient() + + runner := pipeline.NewRunner(pipelineORM, btORM, config.JobPipeline(), config.WebServer(), legacyChains, nil, nil, logger.TestLogger(t), c, c) + jobORM := NewTestORM(t, db, pipelineORM, btORM, keyStore, config.Database()) + t.Cleanup(func() { assert.NoError(t, jobORM.Close()) }) + + _, placeHolderAddress := cltest.MustInsertRandomKey(t, keyStore.Eth()) + + servicetest.Run(t, runner) + + t.Run("gets the election result winner", func(t *testing.T) { + var httpURL string + mockElectionWinner := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", `Hal Finney`, + func(header http.Header, s string) { + var md bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal([]byte(s), &md)) + assert.Equal(t, big.NewInt(10), md.Meta.LatestAnswer) + assert.Equal(t, big.NewInt(100), md.Meta.UpdatedAt) + }) + mockVoterTurnout := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", `{"data": {"result": 62.57}}`, + func(header http.Header, s string) { + var md bridges.BridgeMetaDataJSON + require.NoError(t, json.Unmarshal([]byte(s), &md)) + assert.Equal(t, big.NewInt(10), md.Meta.LatestAnswer) + assert.Equal(t, big.NewInt(100), md.Meta.UpdatedAt) + }, + ) + mockHTTP := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", `{"turnout": 61.942}`) + + httpURL = mockHTTP.URL + _, bridgeER := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: mockElectionWinner.URL}, config.Database()) + _, bridgeVT := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: mockVoterTurnout.URL}, config.Database()) + + // Need a job in order to create a run + jb := MakeVoterTurnoutOCRJobSpecWithHTTPURL(t, transmitterAddress, httpURL, bridgeVT.Name.String(), bridgeER.Name.String()) + require.NoError(t, jobORM.CreateJob(jb)) + require.NotNil(t, jb.PipelineSpec) + + m, err := bridges.MarshalBridgeMetaData(big.NewInt(10), big.NewInt(100)) + require.NoError(t, err) + runID, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(map[string]interface{}{"jobRun": map[string]interface{}{"meta": m}}), logger.TestLogger(t), true) + require.NoError(t, err) + + require.Len(t, results.Values, 2) + require.GreaterOrEqual(t, len(results.FatalErrors), 2) + assert.Nil(t, results.FatalErrors[0]) + assert.Nil(t, results.FatalErrors[1]) + require.GreaterOrEqual(t, len(results.AllErrors), 2) + assert.Equal(t, "6225.6", results.Values[0].(decimal.Decimal).String()) + assert.Equal(t, "Hal Finney", results.Values[1].(string)) + + // Verify individual task results + var runs []pipeline.TaskRun + sql := `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1` + err = db.Select(&runs, sql, runID) + assert.NoError(t, err) + assert.Len(t, runs, 8) + + for _, run := range runs { + if run.GetDotID() == "answer2" { + assert.Equal(t, "Hal Finney", run.Output.Val) + } else if run.GetDotID() == "ds2" { + assert.Equal(t, `{"turnout": 61.942}`, run.Output.Val) + } else if run.GetDotID() == "ds2_parse" { + assert.Equal(t, float64(61.942), run.Output.Val) + } else if run.GetDotID() == "ds2_multiply" { + assert.Equal(t, "6194.2", run.Output.Val) + } else if run.GetDotID() == "ds1" { + assert.Equal(t, `{"data": {"result": 62.57}}`, run.Output.Val) + } else if run.GetDotID() == "ds1_parse" { + assert.Equal(t, float64(62.57), run.Output.Val) + } else if run.GetDotID() == "ds1_multiply" { + assert.Equal(t, "6257", run.Output.Val) + } else if run.GetDotID() == "answer1" { + assert.Equal(t, "6225.6", run.Output.Val) + } else { + t.Fatalf("unknown task '%v'", run.GetDotID()) + } + } + }) + + t.Run("must delete job before deleting bridge", func(t *testing.T) { + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + jb := makeOCRJobSpecFromToml(t, fmt.Sprintf(` + type = "offchainreporting" + schemaVersion = 1 + evmChainID = "0" + observationSource = """ + ds1 [type=bridge name="%s"]; + """ + `, bridge.Name.String())) + require.NoError(t, jobORM.CreateJob(jb)) + // Should not be able to delete a bridge in use. + jids, err := jobORM.FindJobIDsWithBridge(bridge.Name.String()) + require.NoError(t, err) + require.Equal(t, 1, len(jids)) + + // But if we delete the job, then we can. + require.NoError(t, jobORM.DeleteJob(jb.ID)) + jids, err = jobORM.FindJobIDsWithBridge(bridge.Name.String()) + require.NoError(t, err) + require.Equal(t, 0, len(jids)) + }) + + t.Run("referencing a non-existent bridge should error", func(t *testing.T) { + // Create a random bridge name + _, b := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + // Reference a different one + legacyChains := cltest.NewLegacyChainsWithMockChain(t, nil, config) + + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, fmt.Sprintf(` + type = "offchainreporting" + schemaVersion = 1 + evmChainID = 0 + transmitterID = "%s" + contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" + isBootstrapPeer = false + blockchainTimeout = "1s" + observationTimeout = "10s" + databaseTimeout = "2s" + contractConfigTrackerPollInterval="15s" + contractConfigConfirmations=1 + observationGracePeriod = "2s" + contractTransmitterTransmitTimeout = "5s" + contractConfigTrackerSubscribeInterval="1m" + observationSource = """ + ds1 [type=bridge name=blah]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + ds1 -> ds1_parse -> ds1_multiply -> answer1; + answer1 [type=median index=0]; + """ + `, placeHolderAddress.String())) + require.NoError(t, err) + // Should error creating it + err = jobORM.CreateJob(&jb) + require.Error(t, err) + assert.Contains(t, err.Error(), "not all bridges exist") + + // Same for ocr2 + jb2, err := validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), fmt.Sprintf(` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +transmitterID = "%s" +blockchainTimeout = "1s" +contractConfigTrackerPollInterval = "15s" +contractConfigConfirmations = 1 +observationSource = """ +ds1 [type=bridge name="%s"]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=blah]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, placeHolderAddress.String(), b.Name.String())) + require.NoError(t, err) + // Should error creating it because of the juels per fee coin non-existent bridge + err = jobORM.CreateJob(&jb2) + require.Error(t, err) + assert.Contains(t, err.Error(), "not all bridges exist") + + // Duplicate bridge names that exist is ok + jb3, err := validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), fmt.Sprintf(` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +transmitterID = "%s" +blockchainTimeout = "1s" +contractConfigTrackerPollInterval = "15s" +contractConfigConfirmations = 1 +observationSource = """ +ds1 [type=bridge name="%s"]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name="%s"]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +ds2 [type=bridge name="%s"]; +ds2_parse [type=jsonparse path="one,two"]; +ds2_multiply [type=multiply times=1.23]; +ds2 -> ds2_parse -> ds2_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, placeHolderAddress, b.Name.String(), b.Name.String(), b.Name.String())) + require.NoError(t, err) + // Should not error with duplicate bridges + err = jobORM.CreateJob(&jb3) + require.NoError(t, err) + }) + + t.Run("handles the case where the parsed value is literally null", func(t *testing.T) { + var httpURL string + resp := `{"USD": null}` + { + mockHTTP := cltest.NewHTTPMockServer(t, http.StatusOK, "GET", resp) + httpURL = mockHTTP.URL + } + + // Need a job in order to create a run + jb := makeSimpleFetchOCRJobSpecWithHTTPURL(t, transmitterAddress, httpURL, false) + err := jobORM.CreateJob(jb) + require.NoError(t, err) + + runID, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + + assert.Len(t, results.FatalErrors, 1) + assert.Len(t, results.Values, 1) + assert.Contains(t, results.FatalErrors[0].Error(), "type cannot be converted to decimal.Decimal") + assert.Nil(t, results.Values[0]) + + // Verify individual task results + var runs []pipeline.TaskRun + sql := `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1` + err = db.Select(&runs, sql, runID) + assert.NoError(t, err) + require.Len(t, runs, 3) + + for _, run := range runs { + if run.GetDotID() == "ds1" { + assert.True(t, run.Error.IsZero()) + require.NotNil(t, resp, run.Output) + assert.Equal(t, resp, run.Output.Val) + } else if run.GetDotID() == "ds1_parse" { + assert.True(t, run.Error.IsZero()) + assert.False(t, run.Output.Valid) + } else if run.GetDotID() == "ds1_multiply" { + assert.Contains(t, run.Error.ValueOrZero(), "type cannot be converted to decimal.Decimal") + assert.False(t, run.Output.Valid) + } else { + t.Fatalf("unknown task '%v'", run.GetDotID()) + } + } + }) + + t.Run("handles the case where the jsonparse lookup path is missing from the http response", func(t *testing.T) { + var httpURL string + resp := "{\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}" + { + mockHTTP := cltest.NewHTTPMockServer(t, http.StatusOK, "GET", resp) + httpURL = mockHTTP.URL + } + + // Need a job in order to create a run + jb := makeSimpleFetchOCRJobSpecWithHTTPURL(t, transmitterAddress, httpURL, false) + err := jobORM.CreateJob(jb) + require.NoError(t, err) + + runID, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + + assert.Len(t, results.Values, 1) + assert.Len(t, results.FatalErrors, 1) + assert.Contains(t, results.FatalErrors[0].Error(), pipeline.ErrTooManyErrors.Error()) + assert.Nil(t, results.Values[0]) + + // Verify individual task results + var runs []pipeline.TaskRun + sql := `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1` + err = db.Select(&runs, sql, runID) + assert.NoError(t, err) + require.Len(t, runs, 3) + + for _, run := range runs { + if run.GetDotID() == "ds1" { + assert.True(t, run.Error.IsZero()) + assert.Equal(t, resp, run.Output.Val) + } else if run.GetDotID() == "ds1_parse" { + assert.Contains(t, run.Error.ValueOrZero(), "could not resolve path [\"USD\"] in {\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}") + assert.False(t, run.Output.Valid) + } else if run.GetDotID() == "ds1_multiply" { + assert.Contains(t, run.Error.ValueOrZero(), pipeline.ErrTooManyErrors.Error()) + assert.False(t, run.Output.Valid) + } else { + t.Fatalf("unknown task '%v'", run.GetDotID()) + } + } + }) + + t.Run("handles the case where the jsonparse lookup path is missing from the http response and lax is enabled", func(t *testing.T) { + var httpURL string + resp := "{\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}" + { + mockHTTP := cltest.NewHTTPMockServer(t, http.StatusOK, "GET", resp) + httpURL = mockHTTP.URL + } + + // Need a job in order to create a run + jb := makeSimpleFetchOCRJobSpecWithHTTPURL(t, transmitterAddress, httpURL, true) + err := jobORM.CreateJob(jb) + require.NoError(t, err) + + runID, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + + assert.Len(t, results.Values, 1) + assert.Contains(t, results.FatalErrors[0].Error(), "type cannot be converted to decimal.Decimal") + assert.Nil(t, results.Values[0]) + + // Verify individual task results + var runs []pipeline.TaskRun + sql := `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1` + err = db.Select(&runs, sql, runID) + assert.NoError(t, err) + require.Len(t, runs, 3) + + for _, run := range runs { + if run.GetDotID() == "ds1" { + assert.True(t, run.Error.IsZero()) + assert.Equal(t, resp, run.Output.Val) + } else if run.GetDotID() == "ds1_parse" { + assert.True(t, run.Error.IsZero()) + assert.False(t, run.Output.Valid) + } else if run.GetDotID() == "ds1_multiply" { + assert.Contains(t, run.Error.ValueOrZero(), "type cannot be converted to decimal.Decimal") + assert.False(t, run.Output.Valid) + } else { + t.Fatalf("unknown task '%v'", run.GetDotID()) + } + } + }) + + t.Run("minimal bootstrap", func(t *testing.T) { + s := ` + type = "offchainreporting" + schemaVersion = 1 + contractAddress = "%s" + isBootstrapPeer = true + evmChainID = "0" +` + s = fmt.Sprintf(s, cltest.NewEIP55Address()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &jb) + require.NoError(t, err) + jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s")) + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + _, err = keyStore.P2P().Create() + assert.NoError(t, err) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr) + servicetest.Run(t, pw) + sd := ocr.NewDelegate( + db, + jobORM, + keyStore, + nil, + pw, + monitoringEndpoint, + legacyChains, + lggr, + config.Database(), + servicetest.Run(t, mailboxtest.NewMonitor(t)), + ) + _, err = sd.ServicesForSpec(jb) + require.NoError(t, err) + }) + + t.Run("test min non-bootstrap", func(t *testing.T) { + kb, err := keyStore.OCR().Create() + require.NoError(t, err) + + s := fmt.Sprintf(minimalNonBootstrapTemplate, cltest.NewEIP55Address(), transmitterAddress.Hex(), kb.ID(), "http://blah.com", "") + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &jb) + require.NoError(t, err) + + jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s")) + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + assert.Equal(t, jb.MaxTaskDuration, models.Interval(cltest.MustParseDuration(t, "1s"))) + + lggr := logger.TestLogger(t) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr) + servicetest.Run(t, pw) + sd := ocr.NewDelegate( + db, + jobORM, + keyStore, + nil, + pw, + monitoringEndpoint, + legacyChains, + lggr, + config.Database(), + servicetest.Run(t, mailboxtest.NewMonitor(t)), + ) + _, err = sd.ServicesForSpec(jb) + require.NoError(t, err) + }) + + t.Run("test min bootstrap", func(t *testing.T) { + s := fmt.Sprintf(minimalBootstrapTemplate, cltest.NewEIP55Address()) + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &jb) + require.NoError(t, err) + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr) + servicetest.Run(t, pw) + sd := ocr.NewDelegate( + db, + jobORM, + keyStore, + nil, + pw, + monitoringEndpoint, + legacyChains, + lggr, + config.Database(), + servicetest.Run(t, mailboxtest.NewMonitor(t)), + ) + _, err = sd.ServicesForSpec(jb) + require.NoError(t, err) + }) + + t.Run("test enhanced telemetry service creation", func(t *testing.T) { + testCases := []struct { + jbCaptureEATelemetry bool + specCaptureEATelemetry bool + expected bool + }{{false, false, false}, + {true, false, false}, + {false, true, true}, + {true, true, true}, + } + + for _, tc := range testCases { + + config = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.OCR.CaptureEATelemetry = ptr(tc.specCaptureEATelemetry) + }) + + relayExtenders = evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: ethClient, GeneralConfig: config, KeyStore: ethKeyStore}) + legacyChains = evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + + kb, err := keyStore.OCR().Create() + require.NoError(t, err) + + s := fmt.Sprintf(minimalNonBootstrapTemplate, cltest.NewEIP55Address(), transmitterAddress.Hex(), kb.ID(), "http://blah.com", "") + jb, err := ocr.ValidatedOracleSpecToml(legacyChains, s) + require.NoError(t, err) + err = toml.Unmarshal([]byte(s), &jb) + require.NoError(t, err) + + jb.MaxTaskDuration = models.Interval(cltest.MustParseDuration(t, "1s")) + err = jobORM.CreateJob(&jb) + require.NoError(t, err) + assert.Equal(t, jb.MaxTaskDuration, models.Interval(cltest.MustParseDuration(t, "1s"))) + + lggr := logger.TestLogger(t) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr) + servicetest.Run(t, pw) + sd := ocr.NewDelegate( + db, + jobORM, + keyStore, + nil, + pw, + monitoringEndpoint, + legacyChains, + lggr, + config.Database(), + servicetest.Run(t, mailboxtest.NewMonitor(t)), + ) + + jb.OCROracleSpec.CaptureEATelemetry = tc.jbCaptureEATelemetry + services, err := sd.ServicesForSpec(jb) + require.NoError(t, err) + + enhancedTelemetryServiceCreated := false + for _, service := range services { + _, ok := service.(*ocrcommon.EnhancedTelemetryService[ocrcommon.EnhancedTelemetryData]) + enhancedTelemetryServiceCreated = ok + if enhancedTelemetryServiceCreated { + break + } + } + + require.Equal(t, tc.expected, enhancedTelemetryServiceCreated) + } + }) + + t.Run("test job spec error is created", func(t *testing.T) { + // Create a keystore with an ocr key bundle and p2p key. + kb, err := keyStore.OCR().Create() + require.NoError(t, err) + spec := fmt.Sprintf(ocrJobSpecTemplate, testutils.NewAddress().Hex(), kb.ID(), transmitterAddress.Hex(), fmt.Sprintf(simpleFetchDataSourceTemplate, "blah", true)) + jb := makeOCRJobSpecFromToml(t, spec) + + // Create an OCR job + err = jobORM.CreateJob(jb) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, config.P2P(), config.OCR(), config.Database(), db, lggr) + servicetest.Run(t, pw) + sd := ocr.NewDelegate( + db, + jobORM, + keyStore, + nil, + pw, + monitoringEndpoint, + legacyChains, + lggr, + config.Database(), + servicetest.Run(t, mailboxtest.NewMonitor(t)), + ) + services, err := sd.ServicesForSpec(*jb) + require.NoError(t, err) + + // Return an error getting the contract code. + ethClient.On("CodeAt", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("no such code")) + ctx := testutils.Context(t) + for _, s := range services { + err = s.Start(ctx) + require.NoError(t, err) + } + var se []job.SpecError + require.Eventually(t, func() bool { + err = db.Select(&se, `SELECT * FROM job_spec_errors`) + require.NoError(t, err) + return len(se) == 1 + }, time.Second, 100*time.Millisecond) + require.Len(t, se, 1) + assert.Equal(t, uint(1), se[0].Occurrences) + + for _, s := range services { + err = s.Close() + require.NoError(t, err) + } + + // Ensure we can delete an errored + err = jobORM.DeleteJob(jb.ID) + require.NoError(t, err) + se = []job.SpecError{} + err = db.Select(&se, `SELECT * FROM job_spec_errors`) + require.NoError(t, err) + require.Len(t, se, 0) + + // TODO: This breaks the txdb connection, failing subsequent tests. Resolve in the future + // Noop once the job is gone. + // jobORM.RecordError(testutils.Context(t), jb.ID, "test") + // err = db.Find(&se).Error + // require.NoError(t, err) + // require.Len(t, se, 0) + }) + + t.Run("timeouts", func(t *testing.T) { + // There are 4 timeouts: + // - ObservationTimeout = how long the whole OCR time needs to run, or it fails (default 10 seconds) + // - config.JobPipelineMaxTaskDuration() = node level maximum time for a pipeline task (default 10 minutes) + // - config.transmitterAddress, http specific timeouts (default 15s * 5 retries = 75s) + // - "d1 [.... timeout="2s"]" = per task level timeout (should override the global config) + serv := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(1 * time.Millisecond) + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"USD":10.1}`)) + require.NoError(t, err) + })) + defer serv.Close() + + jb := makeMinimalHTTPOracleSpec(t, db, config, cltest.NewEIP55Address().String(), transmitterAddress.Hex(), cltest.DefaultOCRKeyBundleID, serv.URL, `timeout="1ns"`) + err := jobORM.CreateJob(jb) + require.NoError(t, err) + + _, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + assert.Nil(t, results.Values[0]) + + // No task timeout should succeed. + jb = makeMinimalHTTPOracleSpec(t, db, config, cltest.NewEIP55Address().String(), transmitterAddress.Hex(), cltest.DefaultOCRKeyBundleID, serv.URL, "") + jb.Name = null.NewString("a job 2", true) + err = jobORM.CreateJob(jb) + require.NoError(t, err) + _, results, err = runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + assert.Equal(t, 10.1, results.Values[0]) + assert.Nil(t, results.FatalErrors[0]) + + // Job specified task timeout should fail. + jb = makeMinimalHTTPOracleSpec(t, db, config, cltest.NewEIP55Address().String(), transmitterAddress.Hex(), cltest.DefaultOCRKeyBundleID, serv.URL, "") + jb.MaxTaskDuration = models.Interval(time.Duration(1)) + jb.Name = null.NewString("a job 3", true) + err = jobORM.CreateJob(jb) + require.NoError(t, err) + + _, results, err = runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + assert.NotNil(t, results.FatalErrors[0]) + }) + + t.Run("deleting jobs", func(t *testing.T) { + var httpURL string + { + resp := `{"USD": 42.42}` + mockHTTP := cltest.NewHTTPMockServer(t, http.StatusOK, "GET", resp) + httpURL = mockHTTP.URL + } + + // Need a job in order to create a run + jb := makeSimpleFetchOCRJobSpecWithHTTPURL(t, transmitterAddress, httpURL, false) + err := jobORM.CreateJob(jb) + require.NoError(t, err) + + _, results, err := runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.NoError(t, err) + assert.Len(t, results.Values, 1) + assert.Nil(t, results.FatalErrors[0]) + assert.Equal(t, "4242", results.Values[0].(decimal.Decimal).String()) + + // Delete the job + err = jobORM.DeleteJob(jb.ID) + require.NoError(t, err) + + // Create another run, it should fail + _, _, err = runner.ExecuteAndInsertFinishedRun(testutils.Context(t), *jb.PipelineSpec, pipeline.NewVarsFrom(nil), logger.TestLogger(t), true) + require.Error(t, err) + }) +} + +func TestRunner_Success_Callback_AsyncJob(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + t := true + c.JobPipeline.ExternalInitiatorsEnabled = &t + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(10 * time.Millisecond) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager) + require.NoError(t, app.Start(testutils.Context(t))) + + var ( + eiName = "substrate-ei" + eiSpec = map[string]interface{}{"foo": "bar"} + eiRequest = map[string]interface{}{"result": 42} + + jobUUID = uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46") + + expectedCreateJobRequest = map[string]interface{}{ + "jobId": jobUUID.String(), + "type": eiName, + "params": eiSpec, + } + ) + + // Setup EI + var eiURL string + var eiNotifiedOfCreate bool + var eiNotifiedOfDelete bool + { + mockEI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !eiNotifiedOfCreate { + require.Equal(t, http.MethodPost, r.Method) + + eiNotifiedOfCreate = true + defer r.Body.Close() + + var gotCreateJobRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&gotCreateJobRequest) + require.NoError(t, err) + + require.Equal(t, expectedCreateJobRequest, gotCreateJobRequest) + w.WriteHeader(http.StatusOK) + } else { + require.Equal(t, http.MethodDelete, r.Method) + + eiNotifiedOfDelete = true + defer r.Body.Close() + + require.Equal(t, fmt.Sprintf("/%v", jobUUID.String()), r.URL.Path) + } + })) + defer mockEI.Close() + eiURL = mockEI.URL + } + + // Create the EI record on the Core node + var eia *auth.Token + { + eiCreate := map[string]string{ + "name": eiName, + "url": eiURL, + } + eiCreateJSON, err := json.Marshal(eiCreate) + require.NoError(t, err) + eip := cltest.CreateExternalInitiatorViaWeb(t, app, string(eiCreateJSON)) + eia = &auth.Token{ + AccessKey: eip.AccessKey, + Secret: eip.Secret, + } + } + + var responseURL string + + // Create the bridge on the Core node + bridgeCalled := make(chan struct{}, 1) + var bridgeName string + { + bridgeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + var bridgeRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&bridgeRequest) + require.NoError(t, err) + + require.Equal(t, float64(42), bridgeRequest["value"]) + + responseURL = bridgeRequest["responseURL"].(string) + + w.WriteHeader(http.StatusOK) + require.NoError(t, err) + _, err = io.WriteString(w, `{"pending": true}`) + require.NoError(t, err) + bridgeCalled <- struct{}{} + })) + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{URL: bridgeServer.URL}, app.GetConfig().Database()) + bridgeName = bridge.Name.String() + defer bridgeServer.Close() + } + + // Create the job spec on the Core node + var jobID int32 + { + tomlSpec := fmt.Sprintf(` + type = "webhook" + schemaVersion = 1 + externalJobID = "%v" + externalInitiators = [ + { + name = "%s", + spec = """ + %s + """ + } + ] + observationSource = """ + parse [type=jsonparse path="result" data="$(jobRun.requestBody)"] + ds1 [type=bridge async=true name="%s" timeout=0 requestData=<{"value": $(parse)}>] + ds1_parse [type=jsonparse lax=false path="data,result"] + ds1_multiply [type=multiply times=1000000000000000000 index=0] + + parse->ds1->ds1_parse->ds1_multiply; + """ + `, jobUUID, eiName, cltest.MustJSONMarshal(t, eiSpec), bridgeName) + + _, err := webhook.ValidatedWebhookSpec(tomlSpec, app.GetExternalInitiatorManager()) + require.NoError(t, err) + job := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + jobID = job.ID + + require.Eventually(t, func() bool { return eiNotifiedOfCreate }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of new job") + } + t.Run("simulate request from EI -> Core node with successful callback", func(t *testing.T) { + cltest.AwaitJobActive(t, app.JobSpawner(), jobID, 3*time.Second) + + _ = cltest.CreateJobRunViaExternalInitiatorV2(t, app, jobUUID, *eia, cltest.MustJSONMarshal(t, eiRequest)) + + pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database()) + jobORM := NewTestORM(t, app.GetSqlxDB(), pipelineORM, bridgesORM, app.KeyStore, cfg.Database()) + + // Trigger v2/resume + select { + case <-bridgeCalled: + case <-time.After(time.Second): + t.Fatal("expected bridge server to be called") + } + // Make the request + { + url, err := url.Parse(responseURL) + require.NoError(t, err) + client := app.NewHTTPClient(nil) + body := strings.NewReader(`{"value": {"data":{"result":"123.45"}}}`) + response, cleanup := client.Patch(url.Path, body) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + } + + runs := cltest.WaitForPipelineComplete(t, 0, jobID, 1, 4, jobORM, 5*time.Second, 300*time.Millisecond) + require.Len(t, runs, 1) + run := runs[0] + require.Len(t, run.PipelineTaskRuns, 4) + require.Empty(t, run.PipelineTaskRuns[0].Error) + require.Empty(t, run.PipelineTaskRuns[1].Error) + require.Empty(t, run.PipelineTaskRuns[2].Error) + require.Empty(t, run.PipelineTaskRuns[3].Error) + require.Equal(t, pipeline.JSONSerializable{Val: []interface{}{"123450000000000000000"}, Valid: true}, run.Outputs) + require.Equal(t, pipeline.RunErrors{null.String{NullString: sql.NullString{String: "", Valid: false}}}, run.FatalErrors) + }) + // Delete the job + { + cltest.DeleteJobViaWeb(t, app, jobID) + require.Eventually(t, func() bool { return eiNotifiedOfDelete }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of deleted job") + } +} + +func TestRunner_Error_Callback_AsyncJob(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + t := true + c.JobPipeline.ExternalInitiatorsEnabled = &t + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(10 * time.Millisecond) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient, cltest.UseRealExternalInitiatorManager) + require.NoError(t, app.Start(testutils.Context(t))) + + var ( + eiName = "substrate-ei" + eiSpec = map[string]interface{}{"foo": "bar"} + eiRequest = map[string]interface{}{"result": 42} + + jobUUID = uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F47") + + expectedCreateJobRequest = map[string]interface{}{ + "jobId": jobUUID.String(), + "type": eiName, + "params": eiSpec, + } + ) + + // Setup EI + var eiURL string + var eiNotifiedOfCreate bool + var eiNotifiedOfDelete bool + { + mockEI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !eiNotifiedOfCreate { + require.Equal(t, http.MethodPost, r.Method) + + eiNotifiedOfCreate = true + defer r.Body.Close() + + var gotCreateJobRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&gotCreateJobRequest) + require.NoError(t, err) + + require.Equal(t, expectedCreateJobRequest, gotCreateJobRequest) + w.WriteHeader(http.StatusOK) + } else { + require.Equal(t, http.MethodDelete, r.Method) + + eiNotifiedOfDelete = true + defer r.Body.Close() + + require.Equal(t, fmt.Sprintf("/%v", jobUUID.String()), r.URL.Path) + } + })) + defer mockEI.Close() + eiURL = mockEI.URL + } + + // Create the EI record on the Core node + var eia *auth.Token + { + eiCreate := map[string]string{ + "name": eiName, + "url": eiURL, + } + eiCreateJSON, err := json.Marshal(eiCreate) + require.NoError(t, err) + eip := cltest.CreateExternalInitiatorViaWeb(t, app, string(eiCreateJSON)) + eia = &auth.Token{ + AccessKey: eip.AccessKey, + Secret: eip.Secret, + } + } + + var responseURL string + + // Create the bridge on the Core node + bridgeCalled := make(chan struct{}, 1) + var bridgeName string + { + bridgeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + var bridgeRequest map[string]interface{} + err := json.NewDecoder(r.Body).Decode(&bridgeRequest) + require.NoError(t, err) + + require.Equal(t, float64(42), bridgeRequest["value"]) + + responseURL = bridgeRequest["responseURL"].(string) + + w.WriteHeader(http.StatusOK) + require.NoError(t, err) + _, err = io.WriteString(w, `{"pending": true}`) + require.NoError(t, err) + bridgeCalled <- struct{}{} + })) + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{URL: bridgeServer.URL}, app.GetConfig().Database()) + bridgeName = bridge.Name.String() + defer bridgeServer.Close() + } + + // Create the job spec on the Core node + var jobID int32 + { + tomlSpec := fmt.Sprintf(` + type = "webhook" + schemaVersion = 1 + externalJobID = "%v" + externalInitiators = [ + { + name = "%s", + spec = """ + %s + """ + } + ] + observationSource = """ + parse [type=jsonparse path="result" data="$(jobRun.requestBody)"] + ds1 [type=bridge async=true name="%s" timeout=0 requestData=<{"value": $(parse)}>] + ds1_parse [type=jsonparse lax=false path="data,result"] + ds1_multiply [type=multiply times=1000000000000000000 index=0] + + parse->ds1->ds1_parse->ds1_multiply; + """ + `, jobUUID, eiName, cltest.MustJSONMarshal(t, eiSpec), bridgeName) + + _, err := webhook.ValidatedWebhookSpec(tomlSpec, app.GetExternalInitiatorManager()) + require.NoError(t, err) + job := cltest.CreateJobViaWeb(t, app, []byte(cltest.MustJSONMarshal(t, web.CreateJobRequest{TOML: tomlSpec}))) + jobID = job.ID + + require.Eventually(t, func() bool { return eiNotifiedOfCreate }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of new job") + } + t.Run("simulate request from EI -> Core node with erroring callback", func(t *testing.T) { + _ = cltest.CreateJobRunViaExternalInitiatorV2(t, app, jobUUID, *eia, cltest.MustJSONMarshal(t, eiRequest)) + + pipelineORM := pipeline.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(app.GetSqlxDB(), logger.TestLogger(t), cfg.Database()) + jobORM := NewTestORM(t, app.GetSqlxDB(), pipelineORM, bridgesORM, app.KeyStore, cfg.Database()) + + // Trigger v2/resume + select { + case <-bridgeCalled: + case <-time.After(time.Second): + t.Fatal("expected bridge server to be called") + } + // Make the request + { + url, err := url.Parse(responseURL) + require.NoError(t, err) + client := app.NewHTTPClient(nil) + body := strings.NewReader(`{"error": "something exploded in EA"}`) + response, cleanup := client.Patch(url.Path, body) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + } + + runs := cltest.WaitForPipelineError(t, 0, jobID, 1, 4, jobORM, 5*time.Second, 300*time.Millisecond) + require.Len(t, runs, 1) + run := runs[0] + require.Len(t, run.PipelineTaskRuns, 4) + require.Empty(t, run.PipelineTaskRuns[0].Error) + assert.True(t, run.PipelineTaskRuns[1].Error.Valid) + assert.Equal(t, "something exploded in EA", run.PipelineTaskRuns[1].Error.String) + assert.True(t, run.PipelineTaskRuns[2].Error.Valid) + assert.True(t, run.PipelineTaskRuns[3].Error.Valid) + require.Equal(t, pipeline.JSONSerializable{Val: []interface{}{interface{}(nil)}, Valid: true}, run.Outputs) + require.Equal(t, pipeline.RunErrors{null.String{NullString: sql.NullString{String: "task inputs: too many errors", Valid: true}}}, run.FatalErrors) + }) + // Delete the job + { + cltest.DeleteJobViaWeb(t, app, jobID) + require.Eventually(t, func() bool { return eiNotifiedOfDelete }, 5*time.Second, 10*time.Millisecond, "expected external initiator to be notified of deleted job") + } +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/job/spawner.go b/core/services/job/spawner.go new file mode 100644 index 00000000..05b11687 --- /dev/null +++ b/core/services/job/spawner.go @@ -0,0 +1,401 @@ +package job + +import ( + "context" + "fmt" + "math" + "reflect" + "sync" + + pkgerrors "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name Spawner --output ./mocks/ --case=underscore + +type ( + // Spawner manages the spinning up and down of the long-running + // services that perform the work described by job specs. Each active job spec + // has 1 or more of these services associated with it. + Spawner interface { + services.Service + + // CreateJob creates a new job and starts services. + // All services must start without errors for the job to be active. + CreateJob(jb *Job, qopts ...pg.QOpt) (err error) + // DeleteJob deletes a job and stops any active services. + DeleteJob(jobID int32, qopts ...pg.QOpt) error + // ActiveJobs returns a map of jobs with active services (started without error). + ActiveJobs() map[int32]Job + + // StartService starts services for the given job spec. + // NOTE: Prefer to use CreateJob, this is only publicly exposed for use in tests + // to start a job that was previously manually inserted into DB + StartService(ctx context.Context, spec Job, qopts ...pg.QOpt) error + } + + Checker interface { + Register(service services.HealthReporter) error + Unregister(name string) error + } + + spawner struct { + services.StateMachine + orm ORM + config Config + checker Checker + jobTypeDelegates map[Type]Delegate + activeJobs map[int32]activeJob + activeJobsMu sync.RWMutex + q pg.Q + lggr logger.Logger + + chStop services.StopChan + lbDependentAwaiters []utils.DependentAwaiter + } + + // TODO(spook): I can't wait for Go generics + Delegate interface { + JobType() Type + // BeforeJobCreated is only called once on first time job create. + BeforeJobCreated(Job) + // ServicesForSpec returns services to be started and stopped for this + // job. In case a given job type relies upon well-defined startup/shutdown + // ordering for services, they are started in the order they are given + // and stopped in reverse order. + ServicesForSpec(Job) ([]ServiceCtx, error) + AfterJobCreated(Job) + BeforeJobDeleted(Job) + // OnDeleteJob will be called from within DELETE db transaction. Any db + // commands issued within OnDeleteJob() should be performed first, before any + // non-db side effects. This is required in order to guarantee mutual atomicity between + // all tasks intended to happen during job deletion. For the same reason, the job will + // not show up in the db within OnDeleteJob(), even though it is still actively running. + OnDeleteJob(jb Job, q pg.Queryer) error + } + + activeJob struct { + delegate Delegate + spec Job + services []ServiceCtx + } +) + +var _ Spawner = (*spawner)(nil) + +func NewSpawner(orm ORM, config Config, checker Checker, jobTypeDelegates map[Type]Delegate, db *sqlx.DB, lggr logger.Logger, lbDependentAwaiters []utils.DependentAwaiter) *spawner { + namedLogger := lggr.Named("JobSpawner") + s := &spawner{ + orm: orm, + config: config, + checker: checker, + jobTypeDelegates: jobTypeDelegates, + q: pg.NewQ(db, namedLogger, config), + lggr: namedLogger, + activeJobs: make(map[int32]activeJob), + chStop: make(services.StopChan), + lbDependentAwaiters: lbDependentAwaiters, + } + return s +} + +// Start starts Spawner. +func (js *spawner) Start(ctx context.Context) error { + return js.StartOnce("JobSpawner", func() error { + js.startAllServices(ctx) + return nil + + }) +} + +func (js *spawner) Close() error { + return js.StopOnce("JobSpawner", func() error { + close(js.chStop) + js.stopAllServices() + return nil + + }) +} + +func (js *spawner) Name() string { + return js.lggr.Name() +} + +func (js *spawner) HealthReport() map[string]error { + return map[string]error{js.Name(): js.Healthy()} +} + +func (js *spawner) startAllServices(ctx context.Context) { + // TODO: rename to find AllJobs + specs, _, err := js.orm.FindJobs(0, math.MaxUint32) + if err != nil { + werr := fmt.Errorf("couldn't fetch unclaimed jobs: %v", err) + js.lggr.Critical(werr.Error()) + js.SvcErrBuffer.Append(werr) + return + } + + for _, spec := range specs { + if err = js.StartService(ctx, spec); err != nil { + js.lggr.Errorf("Couldn't start service %q: %v", spec.Name.ValueOrZero(), err) + } + } + // Log Broadcaster fully starts after all initial Register calls are done from other starting services + // to make sure the initial backfill covers those subscribers. + for _, lbd := range js.lbDependentAwaiters { + lbd.DependentReady() + } +} + +func (js *spawner) stopAllServices() { + jobIDs := js.activeJobIDs() + for _, jobID := range jobIDs { + js.stopService(jobID) + } +} + +// stopService removes the job from memory and stop the services. +// It will always delete the job from memory even if closing the services fail. +func (js *spawner) stopService(jobID int32) { + lggr := js.lggr.With("jobID", jobID) + lggr.Debug("Stopping services for job") + js.activeJobsMu.Lock() + defer js.activeJobsMu.Unlock() + + aj := js.activeJobs[jobID] + + for i := len(aj.services) - 1; i >= 0; i-- { + service := aj.services[i] + sLggr := lggr.With("subservice", i, "serviceType", reflect.TypeOf(service)) + if c, ok := service.(services.HealthReporter); ok { + if err := js.checker.Unregister(c.Name()); err != nil { + sLggr.Warnw("Failed to unregister service from health checker", "err", err) + } + } + if err := service.Close(); err != nil { + sLggr.Criticalw("Error stopping job service", "err", err) + js.SvcErrBuffer.Append(pkgerrors.Wrap(err, "error stopping job service")) + } else { + sLggr.Debug("Stopped job service") + } + } + lggr.Debug("Stopped all services for job") + + delete(js.activeJobs, jobID) +} + +func (js *spawner) StartService(ctx context.Context, jb Job, qopts ...pg.QOpt) error { + lggr := js.lggr.With("jobID", jb.ID) + js.activeJobsMu.Lock() + defer js.activeJobsMu.Unlock() + + delegate, exists := js.jobTypeDelegates[jb.Type] + if !exists { + lggr.Errorw("Job type has not been registered with job.Spawner", "type", jb.Type) + return pkgerrors.Errorf("unregistered type %q for job: %d", jb.Type, jb.ID) + } + // We always add the active job in the activeJob map, even in the case + // that it fails to start. That way we have access to the delegate to call + // OnJobDeleted before deleting. However, the activeJob will only have services + // that it was able to start without an error. + aj := activeJob{delegate: delegate, spec: jb} + + jb.PipelineSpec.JobName = jb.Name.ValueOrZero() + jb.PipelineSpec.JobID = jb.ID + jb.PipelineSpec.JobType = string(jb.Type) + jb.PipelineSpec.ForwardingAllowed = jb.ForwardingAllowed + if jb.GasLimit.Valid { + jb.PipelineSpec.GasLimit = &jb.GasLimit.Uint32 + } + + srvs, err := delegate.ServicesForSpec(jb) + if err != nil { + lggr.Errorw("Error creating services for job", "err", err) + cctx, cancel := js.chStop.NewCtx() + defer cancel() + js.orm.TryRecordError(jb.ID, err.Error(), pg.WithParentCtx(cctx)) + js.activeJobs[jb.ID] = aj + return pkgerrors.Wrapf(err, "failed to create services for job: %d", jb.ID) + } + + lggr.Debugw("JobSpawner: Starting services for job", "count", len(srvs)) + + var ms services.MultiStart + for _, srv := range srvs { + err = ms.Start(ctx, srv) + if err != nil { + lggr.Criticalw("Error starting service for job", "err", err) + return err + } + if c, ok := srv.(services.HealthReporter); ok { + err = js.checker.Register(c) + if err != nil { + lggr.Errorw("Error registering service with health checker", "err", err) + return err + } + } + aj.services = append(aj.services, srv) + } + lggr.Debugw("JobSpawner: Finished starting services for job", "count", len(srvs)) + js.activeJobs[jb.ID] = aj + return nil +} + +// Should not get called before Start() +func (js *spawner) CreateJob(jb *Job, qopts ...pg.QOpt) (err error) { + delegate, exists := js.jobTypeDelegates[jb.Type] + if !exists { + js.lggr.Errorf("job type '%s' has not been registered with the job.Spawner", jb.Type) + err = pkgerrors.Errorf("job type '%s' has not been registered with the job.Spawner", jb.Type) + return + } + + q := js.q.WithOpts(qopts...) + pctx, cancel := js.chStop.Ctx(q.ParentCtx) + defer cancel() + q.ParentCtx = pctx + + ctx, cancel := q.Context() + defer cancel() + + err = js.orm.CreateJob(jb, pg.WithQueryer(q.Queryer), pg.WithParentCtx(ctx)) + if err != nil { + js.lggr.Errorw("Error creating job", "type", jb.Type, "err", err) + return + } + js.lggr.Infow("Created job", "type", jb.Type, "jobID", jb.ID) + + delegate.BeforeJobCreated(*jb) + err = js.StartService(pctx, *jb, pg.WithQueryer(q.Queryer)) + if err != nil { + js.lggr.Errorw("Error starting job services", "type", jb.Type, "jobID", jb.ID, "err", err) + } else { + js.lggr.Infow("Started job services", "type", jb.Type, "jobID", jb.ID) + } + + delegate.AfterJobCreated(*jb) + + return err +} + +// Should not get called before Start() +func (js *spawner) DeleteJob(jobID int32, qopts ...pg.QOpt) error { + if jobID == 0 { + return pkgerrors.New("will not delete job with 0 ID") + } + + lggr := js.lggr.With("jobID", jobID) + lggr.Debugw("Deleting job") + + var aj activeJob + var exists bool + func() { + js.activeJobsMu.RLock() + defer js.activeJobsMu.RUnlock() + aj, exists = js.activeJobs[jobID] + }() + + q := js.q.WithOpts(qopts...) + pctx, cancel := js.chStop.Ctx(q.ParentCtx) + defer cancel() + q.ParentCtx = pctx + ctx, cancel := q.Context() + defer cancel() + + if !exists { // inactive, so look up the spec and delegate + jb, err := js.orm.FindJob(ctx, jobID) + if err != nil { + return pkgerrors.Wrapf(err, "job %d not found", jobID) + } + aj.spec = jb + if !func() (ok bool) { + js.activeJobsMu.RLock() + defer js.activeJobsMu.RUnlock() + aj.delegate, ok = js.jobTypeDelegates[jb.Type] + return ok + }() { + js.lggr.Errorw("Job type has not been registered with job.Spawner", "type", jb.Type, "jobID", jb.ID) + return pkgerrors.Errorf("unregistered type %q for job: %d", jb.Type, jb.ID) + } + } + + lggr.Debugw("Callback: BeforeDeleteJob") + aj.delegate.BeforeJobDeleted(aj.spec) + lggr.Debugw("Callback: BeforeDeleteJob done") + + err := q.Transaction(func(tx pg.Queryer) error { + err := js.orm.DeleteJob(jobID, pg.WithQueryer(tx)) + if err != nil { + js.lggr.Errorw("Error deleting job", "jobID", jobID, "err", err) + return err + } + // This comes after calling orm.DeleteJob(), so that any non-db side effects inside it only get executed if + // we know the DELETE will succeed. The DELETE will be finalized only if all db transactions in OnDeleteJob() + // succeed. If either of those fails, the job will not be stopped and everything will be rolled back. + lggr.Debugw("Callback: OnDeleteJob") + err = aj.delegate.OnDeleteJob(aj.spec, tx) + if err != nil { + return err + } + + lggr.Debugw("Callback: OnDeleteJob done") + return nil + }) + + if exists { + // Stop the service and remove the job from memory, which will always happen even if closing the services fail. + js.stopService(jobID) + } + lggr.Infow("Stopped and deleted job") + + return err +} + +func (js *spawner) ActiveJobs() map[int32]Job { + js.activeJobsMu.RLock() + defer js.activeJobsMu.RUnlock() + + m := make(map[int32]Job, len(js.activeJobs)) + for jobID := range js.activeJobs { + m[jobID] = js.activeJobs[jobID].spec + } + return m +} + +func (js *spawner) activeJobIDs() []int32 { + js.activeJobsMu.RLock() + defer js.activeJobsMu.RUnlock() + + ids := make([]int32, 0, len(js.activeJobs)) + for jobID := range js.activeJobs { + ids = append(ids, jobID) + } + return ids +} + +var _ Delegate = &NullDelegate{} + +type NullDelegate struct { + Type Type +} + +func (n *NullDelegate) JobType() Type { + return n.Type +} + +// ServicesForSpec does no-op. +func (n *NullDelegate) ServicesForSpec(spec Job) (s []ServiceCtx, err error) { + return +} + +func (n *NullDelegate) BeforeJobCreated(spec Job) {} +func (n *NullDelegate) AfterJobCreated(spec Job) {} +func (n *NullDelegate) BeforeJobDeleted(spec Job) {} +func (n *NullDelegate) OnDeleteJob(spec Job, q pg.Queryer) error { return nil } diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go new file mode 100644 index 00000000..ac09f48a --- /dev/null +++ b/core/services/job/spawner_test.go @@ -0,0 +1,346 @@ +package job_test + +import ( + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + "github.com/goplugin/pluginv3.0/v2/core/capabilities" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + mocklp "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/job/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + evmrelayer "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type delegate struct { + jobType job.Type + services []job.ServiceCtx + jobID int32 + chContinueCreatingServices chan struct{} + job.Delegate +} + +func (d delegate) JobType() job.Type { + return d.jobType +} + +// ServicesForSpec satisfies the job.Delegate interface. +func (d delegate) ServicesForSpec(js job.Job) ([]job.ServiceCtx, error) { + if js.Type != d.jobType { + return nil, nil + } + return d.services, nil +} + +func clearDB(t *testing.T, db *sqlx.DB) { + cltest.ClearDBTables(t, db, "jobs", "pipeline_runs", "pipeline_specs", "pipeline_task_runs") +} + +type relayGetter struct { + e evmrelay.EVMChainRelayerExtender + r *evmrelayer.Relayer +} + +func (g *relayGetter) Get(id relay.ID) (loop.Relayer, error) { + return evmrelayer.NewLoopRelayServerAdapter(g.r, g.e), nil +} + +func TestSpawner_CreateJobDeleteJob(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + ethKeyStore := keyStore.Eth() + require.NoError(t, keyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, keyStore.P2P().Add(cltest.DefaultP2PKey)) + require.NoError(t, keyStore.OCR2().Add(cltest.DefaultOCR2Key)) + + _, address := cltest.MustInsertRandomKey(t, ethKeyStore) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, config.Database()) + + ethClient := cltest.NewEthMocksWithDefaultChain(t) + ethClient.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.Anything, false). + Run(func(args mock.Arguments) { + head := args.Get(1).(**evmtypes.Head) + *head = cltest.Head(10) + }). + Return(nil).Maybe() + + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: ethClient, GeneralConfig: config, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + t.Run("should respect its dependents", func(t *testing.T) { + lggr := logger.TestLogger(t) + orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database()) + a := utils.NewDependentAwaiter() + a.AddDependents(1) + spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{}, db, lggr, []utils.DependentAwaiter{a}) + // Starting the spawner should signal to the dependents + result := make(chan bool) + go func() { + select { + case <-a.AwaitDependents(): + result <- true + case <-time.After(2 * time.Second): + result <- false + } + }() + require.NoError(t, spawner.Start(testutils.Context(t))) + assert.True(t, <-result, "failed to signal to dependents") + }) + + t.Run("starts and stops job services when jobs are added and removed", func(t *testing.T) { + jobA := cltest.MakeDirectRequestJobSpec(t) + jobB := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + + lggr := logger.TestLogger(t) + orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database()) + + eventuallyA := cltest.NewAwaiter() + serviceA1 := mocks.NewServiceCtx(t) + serviceA2 := mocks.NewServiceCtx(t) + serviceA1.On("Start", mock.Anything).Return(nil).Once() + serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyA.ItHappened() }) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + dA := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon) + delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, make(chan struct{}), dA} + + eventuallyB := cltest.NewAwaiter() + serviceB1 := mocks.NewServiceCtx(t) + serviceB2 := mocks.NewServiceCtx(t) + serviceB1.On("Start", mock.Anything).Return(nil).Once() + serviceB2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyB.ItHappened() }) + dB := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon) + delegateB := &delegate{jobB.Type, []job.ServiceCtx{serviceB1, serviceB2}, 0, make(chan struct{}), dB} + + spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{ + jobA.Type: delegateA, + jobB.Type: delegateB, + }, db, lggr, nil) + require.NoError(t, spawner.Start(testutils.Context(t))) + err := spawner.CreateJob(jobA) + require.NoError(t, err) + jobSpecIDA := jobA.ID + delegateA.jobID = jobSpecIDA + close(delegateA.chContinueCreatingServices) + + eventuallyA.AwaitOrFail(t, 20*time.Second) + + err = spawner.CreateJob(jobB) + require.NoError(t, err) + jobSpecIDB := jobB.ID + delegateB.jobID = jobSpecIDB + close(delegateB.chContinueCreatingServices) + + eventuallyB.AwaitOrFail(t, 20*time.Second) + + serviceA1.On("Close").Return(nil).Once() + serviceA2.On("Close").Return(nil).Once() + err = spawner.DeleteJob(jobSpecIDA) + require.NoError(t, err) + + serviceB1.On("Close").Return(nil).Once() + serviceB2.On("Close").Return(nil).Once() + err = spawner.DeleteJob(jobSpecIDB) + require.NoError(t, err) + + require.NoError(t, spawner.Close()) + }) + + clearDB(t, db) + + t.Run("starts and stops job services from the DB when .Start()/.Stop() is called", func(t *testing.T) { + jobA := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + + eventually := cltest.NewAwaiter() + serviceA1 := mocks.NewServiceCtx(t) + serviceA2 := mocks.NewServiceCtx(t) + serviceA1.On("Start", mock.Anything).Return(nil).Once() + serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventually.ItHappened() }) + + lggr := logger.TestLogger(t) + orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database()) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon) + delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d} + spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{ + jobA.Type: delegateA, + }, db, lggr, nil) + + err := orm.CreateJob(jobA) + require.NoError(t, err) + delegateA.jobID = jobA.ID + + require.NoError(t, spawner.Start(testutils.Context(t))) + + eventually.AwaitOrFail(t) + + serviceA1.On("Close").Return(nil).Once() + serviceA2.On("Close").Return(nil).Once() + + require.NoError(t, spawner.Close()) + }) + + clearDB(t, db) + + t.Run("closes job services on 'DeleteJob()'", func(t *testing.T) { + jobA := makeOCRJobSpec(t, address, bridge.Name.String(), bridge2.Name.String()) + + eventuallyStart := cltest.NewAwaiter() + serviceA1 := mocks.NewServiceCtx(t) + serviceA2 := mocks.NewServiceCtx(t) + serviceA1.On("Start", mock.Anything).Return(nil).Once() + serviceA2.On("Start", mock.Anything).Return(nil).Once().Run(func(mock.Arguments) { eventuallyStart.ItHappened() }) + + lggr := logger.TestLogger(t) + orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database()) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + d := ocr.NewDelegate(nil, orm, nil, nil, nil, monitoringEndpoint, legacyChains, logger.TestLogger(t), config.Database(), mailMon) + delegateA := &delegate{jobA.Type, []job.ServiceCtx{serviceA1, serviceA2}, 0, nil, d} + spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{ + jobA.Type: delegateA, + }, db, lggr, nil) + + err := orm.CreateJob(jobA) + require.NoError(t, err) + jobSpecIDA := jobA.ID + delegateA.jobID = jobSpecIDA + + require.NoError(t, spawner.Start(testutils.Context(t))) + defer func() { assert.NoError(t, spawner.Close()) }() + + eventuallyStart.AwaitOrFail(t) + + // Wait for the claim lock to be taken + gomega.NewWithT(t).Eventually(func() bool { + jobs := spawner.ActiveJobs() + _, exists := jobs[jobSpecIDA] + return exists + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(true)) + + eventuallyClose := cltest.NewAwaiter() + serviceA1.On("Close").Return(nil).Once() + serviceA2.On("Close").Return(nil).Once().Run(func(mock.Arguments) { eventuallyClose.ItHappened() }) + + err = spawner.DeleteJob(jobSpecIDA) + require.NoError(t, err) + + eventuallyClose.AwaitOrFail(t) + + // Wait for the claim lock to be released + gomega.NewWithT(t).Eventually(func() bool { + jobs := spawner.ActiveJobs() + _, exists := jobs[jobSpecIDA] + return exists + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(false)) + + clearDB(t, db) + }) + + t.Run("Unregisters filters on 'DeleteJob()'", func(t *testing.T) { + config = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = func(b bool) *bool { return &b }(true) + }) + lp := &mocklp.LogPoller{} + testopts := evmtest.TestChainOpts{ + DB: db, + Client: ethClient, + GeneralConfig: config, + LogPoller: lp, + KeyStore: ethKeyStore, + } + + lggr := logger.TestLogger(t) + relayExtenders := evmtest.NewChainRelayExtenders(t, testopts) + assert.Equal(t, relayExtenders.Len(), 1) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + chain := evmtest.MustGetDefaultChain(t, legacyChains) + + evmRelayer, err := evmrelayer.NewRelayer(lggr, chain, evmrelayer.RelayerOpts{ + DB: db, + QConfig: testopts.GeneralConfig.Database(), + CSAETHKeystore: keyStore, + }) + assert.NoError(t, err) + + testRelayGetter := &relayGetter{ + e: relayExtenders.Slice()[0], + r: evmRelayer, + } + + jobOCR2VRF := makeOCR2VRFJobSpec(t, keyStore, config, address, chain.ID(), 2) + + orm := NewTestORM(t, db, pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()), bridges.NewORM(db, lggr, config.Database()), keyStore, config.Database()) + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + processConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, func(name string) (*plugins.RegisteredLoop, error) { return nil, nil }) + ocr2DelegateConfig := ocr2.NewDelegateConfig(config.OCR2(), config.Mercury(), config.Threshold(), config.Insecure(), config.JobPipeline(), config.Database(), processConfig) + + d := ocr2.NewDelegate(nil, orm, nil, nil, nil, nil, monitoringEndpoint, legacyChains, lggr, ocr2DelegateConfig, + keyStore.OCR2(), keyStore.DKGSign(), keyStore.DKGEncrypt(), ethKeyStore, testRelayGetter, mailMon, capabilities.NewRegistry()) + delegateOCR2 := &delegate{jobOCR2VRF.Type, []job.ServiceCtx{}, 0, nil, d} + + spawner := job.NewSpawner(orm, config.Database(), noopChecker{}, map[job.Type]job.Delegate{ + jobOCR2VRF.Type: delegateOCR2, + }, db, lggr, nil) + + err = spawner.CreateJob(jobOCR2VRF) + require.NoError(t, err) + jobSpecID := jobOCR2VRF.ID + delegateOCR2.jobID = jobOCR2VRF.ID + + lp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + lggr.Debugf("Got here, with args %v", args) + }) + + err = spawner.DeleteJob(jobSpecID) + require.NoError(t, err) + + lp.AssertNumberOfCalls(t, "UnregisterFilter", 3) + + lp.On("Close").Return(nil).Once() + spawner.Close() + }) +} + +type noopChecker struct{} + +func (n noopChecker) Register(service services.HealthReporter) error { return nil } + +func (n noopChecker) Unregister(name string) error { return nil } + +func (n noopChecker) IsReady() (ready bool, errors map[string]error) { return true, nil } + +func (n noopChecker) IsHealthy() (healthy bool, errors map[string]error) { return true, nil } + +func (n noopChecker) Start() error { return nil } + +func (n noopChecker) Close() error { return nil } diff --git a/core/services/job/testdata/compact.toml b/core/services/job/testdata/compact.toml new file mode 100644 index 00000000..9f0f5402 --- /dev/null +++ b/core/services/job/testdata/compact.toml @@ -0,0 +1,34 @@ +contractID = 'foo' +relay = 'evm' +chainID = '' +p2pv2Bootstrappers = [] +ocrKeyBundleID = 'bar' +monitoringEndpoint = '' +transmitterID = 'baz' +blockchainTimeout = '0s' +contractConfigTrackerPollInterval = '1s' +contractConfigConfirmations = 1 +pluginType = 'median' +captureEATelemetry = false +captureAutomationCustomTelemetry = false + +[relayConfig] +chainID = 1337 +fromBlock = 42 + +[relayConfig.chainReader] +[relayConfig.chainReader.contracts] +[relayConfig.chainReader.contracts.median] +contractABI = "[\n {\n \"anonymous\": false,\n \"inputs\": [\n {\n \"indexed\": true,\n \"internalType\": \"address\",\n \"name\": \"requester\",\n \"type\": \"address\"\n },\n {\n \"indexed\": false,\n \"internalType\": \"bytes32\",\n \"name\": \"configDigest\",\n \"type\": \"bytes32\"\n },\n {\n \"indexed\": false,\n \"internalType\": \"uint32\",\n \"name\": \"epoch\",\n \"type\": \"uint32\"\n },\n {\n \"indexed\": false,\n \"internalType\": \"uint8\",\n \"name\": \"round\",\n \"type\": \"uint8\"\n }\n ],\n \"name\": \"RoundRequested\",\n \"type\": \"event\"\n },\n {\n \"inputs\": [],\n \"name\": \"latestTransmissionDetails\",\n \"outputs\": [\n {\n \"internalType\": \"bytes32\",\n \"name\": \"configDigest\",\n \"type\": \"bytes32\"\n },\n {\n \"internalType\": \"uint32\",\n \"name\": \"epoch\",\n \"type\": \"uint32\"\n },\n {\n \"internalType\": \"uint8\",\n \"name\": \"round\",\n \"type\": \"uint8\"\n },\n {\n \"internalType\": \"int192\",\n \"name\": \"latestAnswer_\",\n \"type\": \"int192\"\n },\n {\n \"internalType\": \"uint64\",\n \"name\": \"latestTimestamp_\",\n \"type\": \"uint64\"\n }\n ],\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n }\n]\n" + +[relayConfig.chainReader.contracts.median.configs] +LatestRoundRequested = "{\n \"chainSpecificName\": \"RoundRequested\",\n \"readType\": \"event\"\n}\n" +LatestTransmissionDetails = "{\n \"chainSpecificName\": \"latestTransmissionDetails\",\n \"outputModifications\": [\n {\n \"Fields\": [\n \"LatestTimestamp_\"\n ],\n \"Type\": \"epoch to time\"\n },\n {\n \"Fields\": {\n \"LatestAnswer_\": \"LatestAnswer\",\n \"LatestTimestamp_\": \"LatestTimestamp\"\n },\n \"Type\": \"rename\"\n }\n ]\n}\n" + +[relayConfig.codec] +[relayConfig.codec.configs] +[relayConfig.codec.configs.MedianReport] +typeABI = "[\n {\n \"Name\": \"Timestamp\",\n \"Type\": \"uint32\"\n },\n {\n \"Name\": \"Observers\",\n \"Type\": \"bytes32\"\n },\n {\n \"Name\": \"Observations\",\n \"Type\": \"int192[]\"\n },\n {\n \"Name\": \"JuelsPerFeeCoin\",\n \"Type\": \"int192\"\n }\n]\n" + +[pluginConfig] +juelsPerFeeCoinSource = " // data source 1\n ds1 [type=bridge name=\"%s\"];\n ds1_parse [type=jsonparse path=\"data\"];\n ds1_multiply [type=multiply times=2];\n\n // data source 2\n ds2 [type=http method=GET url=\"%s\"];\n ds2_parse [type=jsonparse path=\"data\"];\n ds2_multiply [type=multiply times=2];\n\n ds1 -> ds1_parse -> ds1_multiply -> answer1;\n ds2 -> ds2_parse -> ds2_multiply -> answer1;\n\n answer1 [type=median index=0];\n" diff --git a/core/services/job/testdata/pretty.toml b/core/services/job/testdata/pretty.toml new file mode 100644 index 00000000..88bacff7 --- /dev/null +++ b/core/services/job/testdata/pretty.toml @@ -0,0 +1,149 @@ +relay = "evm" +pluginType = "median" +contractID = "foo" +ocrKeyBundleID = "bar" +transmitterID = "baz" +contractConfigConfirmations = 1 +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = 1337 +fromBlock = 42 + +[relayConfig.chainReader.contracts.median] +contractABI = ''' +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "requester", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "round", + "type": "uint8" + } + ], + "name": "RoundRequested", + "type": "event" + }, + { + "inputs": [], + "name": "latestTransmissionDetails", + "outputs": [ + { + "internalType": "bytes32", + "name": "configDigest", + "type": "bytes32" + }, + { + "internalType": "uint32", + "name": "epoch", + "type": "uint32" + }, + { + "internalType": "uint8", + "name": "round", + "type": "uint8" + }, + { + "internalType": "int192", + "name": "latestAnswer_", + "type": "int192" + }, + { + "internalType": "uint64", + "name": "latestTimestamp_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + } +] +''' + +[relayConfig.chainReader.contracts.median.configs] +LatestRoundRequested = ''' +{ + "chainSpecificName": "RoundRequested", + "readType": "event" +} +''' +LatestTransmissionDetails = ''' +{ + "chainSpecificName": "latestTransmissionDetails", + "outputModifications": [ + { + "Fields": [ + "LatestTimestamp_" + ], + "Type": "epoch to time" + }, + { + "Fields": { + "LatestAnswer_": "LatestAnswer", + "LatestTimestamp_": "LatestTimestamp" + }, + "Type": "rename" + } + ] +} +''' + +[relayConfig.codec.configs.MedianReport] +typeABI = ''' +[ + { + "Name": "Timestamp", + "Type": "uint32" + }, + { + "Name": "Observers", + "Type": "bytes32" + }, + { + "Name": "Observations", + "Type": "int192[]" + }, + { + "Name": "JuelsPerFeeCoin", + "Type": "int192" + } +] +''' + +[pluginConfig] +juelsPerFeeCoinSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=2]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=2]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +""" \ No newline at end of file diff --git a/core/services/job/util.go b/core/services/job/util.go new file mode 100644 index 00000000..143dfed7 --- /dev/null +++ b/core/services/job/util.go @@ -0,0 +1,40 @@ +package job + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + ErrNoChainFromSpec = fmt.Errorf("could not get chain from spec") + ErrNoSendingKeysFromSpec = fmt.Errorf("could not get sending keys from spec") +) + +// SendingKeysForJob parses the job spec and retrieves the sending keys found. +func SendingKeysForJob(job *Job) ([]string, error) { + sendingKeysInterface, ok := job.OCR2OracleSpec.RelayConfig["sendingKeys"] + if !ok { + return nil, fmt.Errorf("%w: sendingKeys must be provided in relay config", ErrNoSendingKeysFromSpec) + } + + sendingKeysInterfaceSlice, ok := sendingKeysInterface.([]interface{}) + if !ok { + return nil, errors.New("sending keys should be an array") + } + + var sendingKeys []string + for _, sendingKeyInterface := range sendingKeysInterfaceSlice { + sendingKey, ok := sendingKeyInterface.(string) + if !ok { + return nil, errors.New("sending keys are of wrong type") + } + sendingKeys = append(sendingKeys, sendingKey) + } + + if len(sendingKeys) == 0 { + return nil, errors.New("sending keys are empty") + } + + return sendingKeys, nil +} diff --git a/core/services/job/validate.go b/core/services/job/validate.go new file mode 100644 index 00000000..47c9bb5a --- /dev/null +++ b/core/services/job/validate.go @@ -0,0 +1,75 @@ +package job + +import ( + "strings" + + "github.com/pelletier/go-toml" + "github.com/pkg/errors" +) + +var ( + ErrNoPipelineSpec = errors.New("pipeline spec not specified") + ErrInvalidJobType = errors.New("invalid job type") + ErrInvalidSchemaVersion = errors.New("invalid schema version") + jobTypes = map[Type]struct{}{ + BlockHeaderFeeder: {}, + BlockhashStore: {}, + Bootstrap: {}, + Cron: {}, + DirectRequest: {}, + FluxMonitor: {}, + Gateway: {}, + Keeper: {}, + LegacyGasStationServer: {}, + LegacyGasStationSidecar: {}, + OffchainReporting2: {}, + OffchainReporting: {}, + Stream: {}, + VRF: {}, + Webhook: {}, + Workflow: {}, + } +) + +// ValidateSpec is the common spec validation +func ValidateSpec(ts string) (Type, error) { + var jb Job + // Note we can't use: + // toml.NewDecoder(bytes.NewReader([]byte(ts))).Strict(true).Decode(&jb) + // to error in the case of unrecognized keys because all the keys in the toml are at + // the top level and so decoding for the job will have undecodable keys meant for the job + // type specific struct and vice versa. Should we upgrade the schema, + // we put the type specific config in its own subtree e.g. + // schemaVersion=1 + // name="test" + // [vrf_spec] + // publicKey="0x..." + // and then we could use it. + tree, err := toml.Load(ts) + if err != nil { + return "", err + } + err = tree.Unmarshal(&jb) + if err != nil { + return "", err + } + if _, ok := jobTypes[jb.Type]; !ok { + return "", ErrInvalidJobType + } + if jb.Type.SchemaVersion() != jb.SchemaVersion { + return "", ErrInvalidSchemaVersion + } + if jb.Type.RequiresPipelineSpec() && (jb.Pipeline.Source == "") { + return "", ErrNoPipelineSpec + } + if jb.Pipeline.RequiresPreInsert() && !jb.Type.SupportsAsync() { + return "", errors.Errorf("async=true tasks are not supported for %v", jb.Type) + } + // spec.CustomRevertsPipelineEnabled == false, default is custom reverted txns pipeline disabled + + if strings.Contains(ts, "<{}>") { + return "", errors.Errorf("'<{}>' syntax is not supported. Please use \"{}\" instead") + } + + return jb.Type, nil +} diff --git a/core/services/job/validate_test.go b/core/services/job/validate_test.go new file mode 100644 index 00000000..f3ca2057 --- /dev/null +++ b/core/services/job/validate_test.go @@ -0,0 +1,114 @@ +package job + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestValidate(t *testing.T) { + var tt = []struct { + name string + spec string + assertion func(t *testing.T, err error) + }{ + { + name: "invalid job type", + spec: ` +type="blah" +schemaVersion=1 +`, + assertion: func(t *testing.T, err error) { + require.True(t, errors.Is(errors.Cause(err), ErrInvalidJobType)) + }, + }, + { + name: "invalid schema version", + spec: ` +type="vrf" +schemaVersion=2 +`, + assertion: func(t *testing.T, err error) { + require.True(t, errors.Is(errors.Cause(err), ErrInvalidSchemaVersion)) + }, + }, + { + name: "missing schema version", + spec: ` +type="vrf" +`, + assertion: func(t *testing.T, err error) { + require.True(t, errors.Is(errors.Cause(err), ErrInvalidSchemaVersion)) + }, + }, + { + name: "missing pipeline spec key", + spec: ` +type="vrf" +schemaVersion=1 +`, + assertion: func(t *testing.T, err error) { + require.True(t, errors.Is(errors.Cause(err), ErrNoPipelineSpec)) + }, + }, + { + name: "missing pipeline spec value", + spec: ` +type="vrf" +schemaVersion=1 +`, + assertion: func(t *testing.T, err error) { + require.True(t, errors.Is(errors.Cause(err), ErrNoPipelineSpec)) + }, + }, + { + name: "invalid dot", + spec: ` +type="vrf" +schemaVersion=1 +observationSource=""" +sldkfjalskdjf +""" +`, + assertion: func(t *testing.T, err error) { + t.Log(err) + require.Error(t, err) + }, + }, + { + name: "async check", + spec: ` +type="offchainreporting" +schemaVersion=1 +observationSource=""" +ds [type=bridge async=true] +""" +`, + assertion: func(t *testing.T, err error) { + t.Log(err) + require.Error(t, err) + }, + }, + { + name: "happy path", + spec: ` +type="vrf" +schemaVersion=1 +observationSource=""" +ds [type=http] +""" +`, + assertion: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + } + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + _, err := ValidateSpec(tc.spec) + tc.assertion(t, err) + }) + } +} diff --git a/core/services/keeper/common.go b/core/services/keeper/common.go new file mode 100644 index 00000000..775dd882 --- /dev/null +++ b/core/services/keeper/common.go @@ -0,0 +1,18 @@ +package keeper + +import ( + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" +) + +var Registry1_1ABI = types.MustGetABI(keeper_registry_wrapper1_1.KeeperRegistryABI) +var Registry1_2ABI = types.MustGetABI(keeper_registry_wrapper1_2.KeeperRegistryABI) +var Registry1_3ABI = types.MustGetABI(keeper_registry_wrapper1_3.KeeperRegistryABI) + +type RegistryGasChecker interface { + CheckGasOverhead() uint32 + PerformGasOverhead() uint32 + MaxPerformDataSize() uint32 +} diff --git a/core/services/keeper/delegate.go b/core/services/keeper/delegate.go new file mode 100644 index 00000000..2c6d6fbe --- /dev/null +++ b/core/services/keeper/delegate.go @@ -0,0 +1,127 @@ +package keeper + +import ( + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// To make sure Delegate struct implements job.Delegate interface +var _ job.Delegate = (*Delegate)(nil) + +type Delegate struct { + logger logger.Logger + db *sqlx.DB + jrm job.ORM + pr pipeline.Runner + legacyChains legacyevm.LegacyChainContainer + mailMon *mailbox.Monitor +} + +// NewDelegate is the constructor of Delegate +func NewDelegate( + db *sqlx.DB, + jrm job.ORM, + pr pipeline.Runner, + logger logger.Logger, + legacyChains legacyevm.LegacyChainContainer, + mailMon *mailbox.Monitor, +) *Delegate { + return &Delegate{ + logger: logger, + db: db, + jrm: jrm, + pr: pr, + legacyChains: legacyChains, + mailMon: mailMon, + } +} + +// JobType returns job type +func (d *Delegate) JobType() job.Type { + return job.Keeper +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(spec job.Job) (services []job.ServiceCtx, err error) { + if spec.KeeperSpec == nil { + return nil, errors.Errorf("Delegate expects a *job.KeeperSpec to be present, got %v", spec) + } + chain, err := d.legacyChains.Get(spec.KeeperSpec.EVMChainID.String()) + if err != nil { + return nil, err + } + registryAddress := spec.KeeperSpec.ContractAddress + orm := NewORM(d.db, d.logger, chain.Config().Database()) + svcLogger := d.logger.With( + "jobID", spec.ID, + "registryAddress", registryAddress.Hex(), + ) + + registryWrapper, err := NewRegistryWrapper(registryAddress, chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "unable to create keeper registry wrapper") + } + svcLogger.Info("Registry version is: ", registryWrapper.Version) + + minIncomingConfirmations := chain.Config().EVM().MinIncomingConfirmations() + if spec.KeeperSpec.MinIncomingConfirmations != nil { + minIncomingConfirmations = *spec.KeeperSpec.MinIncomingConfirmations + } + + // effectiveKeeperAddress is the keeper address registered on the registry. This is by default the EOA account on the node. + // In the case of forwarding, the keeper address is the forwarder contract deployed onchain between EOA and Registry. + effectiveKeeperAddress := spec.KeeperSpec.FromAddress.Address() + if spec.ForwardingAllowed { + fwdrAddress, fwderr := chain.TxManager().GetForwarderForEOA(spec.KeeperSpec.FromAddress.Address()) + if fwderr == nil { + effectiveKeeperAddress = fwdrAddress + } else { + svcLogger.Warnw("Skipping forwarding for job, will fallback to default behavior", "job", spec.Name, "err", fwderr) + } + } + + keeper := chain.Config().Keeper() + registry := keeper.Registry() + registrySynchronizer := NewRegistrySynchronizer(RegistrySynchronizerOptions{ + Job: spec, + RegistryWrapper: *registryWrapper, + ORM: orm, + JRM: d.jrm, + LogBroadcaster: chain.LogBroadcaster(), + MailMon: d.mailMon, + SyncInterval: registry.SyncInterval(), + MinIncomingConfirmations: minIncomingConfirmations, + Logger: svcLogger, + SyncUpkeepQueueSize: registry.SyncUpkeepQueueSize(), + EffectiveKeeperAddress: effectiveKeeperAddress, + }) + upkeepExecuter := NewUpkeepExecuter( + spec, + orm, + d.pr, + chain.Client(), + chain.HeadBroadcaster(), + chain.GasEstimator(), + svcLogger, + chain.Config().Keeper(), + effectiveKeeperAddress, + ) + + return []job.ServiceCtx{ + registrySynchronizer, + upkeepExecuter, + }, nil +} diff --git a/core/services/keeper/errors.go b/core/services/keeper/errors.go new file mode 100644 index 00000000..d003fcc9 --- /dev/null +++ b/core/services/keeper/errors.go @@ -0,0 +1,7 @@ +package keeper + +import "github.com/pkg/errors" + +var ( + ErrContractCallFailure = errors.New("failure in calling contract") +) diff --git a/core/services/keeper/helpers_test.go b/core/services/keeper/helpers_test.go new file mode 100644 index 00000000..fcdc009f --- /dev/null +++ b/core/services/keeper/helpers_test.go @@ -0,0 +1,41 @@ +package keeper + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" +) + +func (rs *RegistrySynchronizer) ExportedFullSync() { + rs.fullSync() +} + +func (rs *RegistrySynchronizer) ExportedProcessLogs() { + rs.processLogs() +} + +func (rw *RegistryWrapper) GetUpkeepIdFromRawRegistrationLog(rawLog types.Log) (*big.Int, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + parsedLog, err := rw.contract1_1.ParseUpkeepRegistered(rawLog) + if err != nil { + return nil, errors.Wrap(err, "failed to get parse UpkeepRegistered log") + } + return parsedLog.Id, nil + case RegistryVersion_1_2: + parsedLog, err := rw.contract1_2.ParseUpkeepRegistered(rawLog) + if err != nil { + return nil, errors.Wrap(err, "failed to get parse UpkeepRegistered log") + } + return parsedLog.Id, nil + case RegistryVersion_1_3: + parsedLog, err := rw.contract1_3.ParseUpkeepRegistered(rawLog) + if err != nil { + return nil, errors.Wrap(err, "failed to get parse UpkeepRegistered log") + } + return parsedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromRawRegistrationLog", rw.Version) + } +} diff --git a/core/services/keeper/integration_test.go b/core/services/keeper/integration_test.go new file mode 100644 index 00000000..8d9e596f --- /dev/null +++ b/core/services/keeper/integration_test.go @@ -0,0 +1,588 @@ +package keeper_test + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/goplugin/libocr/gethwrappers/link_token_interface" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/basic_upkeep_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +var ( + oneEth = big.NewInt(1000000000000000000) + tenEth = big.NewInt(0).Mul(oneEth, big.NewInt(10)) + oneHunEth = big.NewInt(0).Mul(oneEth, big.NewInt(100)) + + payload1 = common.Hex2Bytes("1234") + payload2 = common.Hex2Bytes("ABCD") + payload3 = common.Hex2Bytes("6789") +) + +func deployKeeperRegistry( + t *testing.T, + version keeper.RegistryVersion, + auth *bind.TransactOpts, + backend *client.SimulatedBackendClient, + linkAddr, linkFeedAddr, gasFeedAddr common.Address, +) (common.Address, *keeper.RegistryWrapper) { + switch version { + case keeper.RegistryVersion_1_1: + regAddr, _, _, err := keeper_registry_wrapper1_1.DeployKeeperRegistry( + auth, + backend, + linkAddr, + linkFeedAddr, + gasFeedAddr, + 250_000_000, + 0, + big.NewInt(1), + 20_000_000, + big.NewInt(3600), + 1, + big.NewInt(60000000000), + big.NewInt(20000000000000000), + ) + require.NoError(t, err) + backend.Commit() + + wrapper, err := keeper.NewRegistryWrapper(ethkey.EIP55AddressFromAddress(regAddr), backend) + require.NoError(t, err) + return regAddr, wrapper + case keeper.RegistryVersion_1_2: + regAddr, _, _, err := keeper_registry_wrapper1_2.DeployKeeperRegistry( + auth, + backend, + linkAddr, + linkFeedAddr, + gasFeedAddr, + keeper_registry_wrapper1_2.Config{ + PaymentPremiumPPB: 250_000_000, + FlatFeeMicroLink: 0, + BlockCountPerTurn: big.NewInt(1), + CheckGasLimit: 20_000_000, + StalenessSeconds: big.NewInt(3600), + GasCeilingMultiplier: 1, + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: 5_000_000, + FallbackGasPrice: big.NewInt(60000000000), + FallbackLinkPrice: big.NewInt(20000000000000000), + Transcoder: testutils.NewAddress(), + Registrar: testutils.NewAddress(), + }, + ) + require.NoError(t, err) + backend.Commit() + wrapper, err := keeper.NewRegistryWrapper(ethkey.EIP55AddressFromAddress(regAddr), backend) + require.NoError(t, err) + return regAddr, wrapper + case keeper.RegistryVersion_1_3: + logicAddr, _, _, err := keeper_registry_logic1_3.DeployKeeperRegistryLogic( + auth, + backend, + 0, + big.NewInt(80000), + linkAddr, + linkFeedAddr, + gasFeedAddr) + require.NoError(t, err) + backend.Commit() + + regAddr, _, _, err := keeper_registry_wrapper1_3.DeployKeeperRegistry( + auth, + backend, + logicAddr, + keeper_registry_wrapper1_3.Config{ + PaymentPremiumPPB: 250_000_000, + FlatFeeMicroLink: 0, + BlockCountPerTurn: big.NewInt(1), + CheckGasLimit: 20_000_000, + StalenessSeconds: big.NewInt(3600), + GasCeilingMultiplier: 1, + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: 5_000_000, + FallbackGasPrice: big.NewInt(60000000000), + FallbackLinkPrice: big.NewInt(20000000000000000), + Transcoder: testutils.NewAddress(), + Registrar: testutils.NewAddress(), + }, + ) + require.NoError(t, err) + backend.Commit() + wrapper, err := keeper.NewRegistryWrapper(ethkey.EIP55AddressFromAddress(regAddr), backend) + require.NoError(t, err) + return regAddr, wrapper + default: + panic(errors.Errorf("Deployment of registry verdion %d not defined", version)) + } +} + +func getUpkeepIdFromTx(t *testing.T, registryWrapper *keeper.RegistryWrapper, registrationTx *types.Transaction, backend *client.SimulatedBackendClient) *big.Int { + receipt, err := backend.TransactionReceipt(testutils.Context(t), registrationTx.Hash()) + require.NoError(t, err) + upkeepId, err := registryWrapper.GetUpkeepIdFromRawRegistrationLog(*receipt.Logs[0]) + require.NoError(t, err) + return upkeepId +} + +func TestKeeperEthIntegration(t *testing.T) { + t.Parallel() + tests := []struct { + name string + eip1559 bool + registryVersion keeper.RegistryVersion + }{ + // name should be a valid ORM name, only containing alphanumeric/underscore + {"legacy_registry1_1", false, keeper.RegistryVersion_1_1}, + {"eip1559_registry1_1", true, keeper.RegistryVersion_1_1}, + {"legacy_registry1_2", false, keeper.RegistryVersion_1_2}, + {"eip1559_registry1_2", true, keeper.RegistryVersion_1_2}, + {"legacy_registry1_3", false, keeper.RegistryVersion_1_3}, + {"eip1559_registry1_3", true, keeper.RegistryVersion_1_3}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + t.Parallel() + g := gomega.NewWithT(t) + + // setup node key + nodeKey := cltest.MustGenerateRandomKey(t) + nodeAddress := nodeKey.Address + nodeAddressEIP55 := ethkey.EIP55AddressFromAddress(nodeAddress) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // client + nelly := testutils.MustNewSimTransactor(t) // other keeper operator 1 + nick := testutils.MustNewSimTransactor(t) // other keeper operator 2 + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + steve.From: {Balance: assets.Ether(1000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000).ToInt()}, + nelly.From: {Balance: assets.Ether(1000).ToInt()}, + nick.From: {Balance: assets.Ether(1000).ToInt()}, + nodeAddress: {Balance: assets.Ether(1000).ToInt()}, + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + b := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + backend := client.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) + + stopMining := cltest.Mine(backend.Backend(), 1*time.Second) // >> 2 seconds and the test gets slow, << 1 second and the app may miss heads + defer stopMining() + + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(20000000000000000)) + require.NoError(t, err) + + regAddr, registryWrapper := deployKeeperRegistry(t, test.registryVersion, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, regAddr, oneHunEth) + require.NoError(t, err) + _, err = registryWrapper.SetKeepers(steve, []common.Address{nodeAddress, nelly.From}, []common.Address{nodeAddress, nelly.From}) + require.NoError(t, err) + registrationTx, err := registryWrapper.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx(t, registryWrapper, registrationTx, backend) + + _, err = upkeepContract.SetBytesToSend(carrol, payload1) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + _, err = registryWrapper.AddFunds(carrol, upkeepID, tenEth) + require.NoError(t, err) + backend.Commit() + + // setup app + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = &test.eip1559 + c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps + c.Keeper.Registry.SyncInterval = commonconfig.MustNewDuration(24 * time.Hour) // disable full sync ticker for test + + c.Keeper.TurnLookBack = ptr[int64](0) // testing doesn't need to do far look back + + c.EVM[0].BlockBackfillDepth = ptr[uint32](0) // backfill will trigger sync on startup + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) // disable reorg protection for this test + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) // helps prevent missed heads + }) + scopedConfig := evmtest.NewChainScopedConfig(t, config) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, backend.Backend(), nodeKey) + require.NoError(t, app.Start(testutils.Context(t))) + + // create job + regAddrEIP55 := ethkey.EIP55AddressFromAddress(regAddr) + job := cltest.MustInsertKeeperJob(t, db, korm, nodeAddressEIP55, regAddrEIP55) + err = app.JobSpawner().StartService(testutils.Context(t), job) + require.NoError(t, err) + + // keeper job is triggered and payload is received + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload1)) + + // submit from other keeper (because keepers must alternate) + _, err = registryWrapper.PerformUpkeep(nelly, upkeepID, []byte{}) + require.NoError(t, err) + + // change payload + _, err = upkeepContract.SetBytesToSend(carrol, payload2) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + + // observe 2nd job run and received payload changes + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload2)) + + // cancel upkeep + _, err = registryWrapper.CancelUpkeep(carrol, upkeepID) + require.NoError(t, err) + backend.Commit() + + cltest.WaitForCount(t, app.GetSqlxDB(), "upkeep_registrations", 0) + + // add new upkeep (same target contract) + registrationTx, err = registryWrapper.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}) + require.NoError(t, err) + backend.Commit() + + upkeepID = getUpkeepIdFromTx(t, registryWrapper, registrationTx, backend) + _, err = upkeepContract.SetBytesToSend(carrol, payload3) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + _, err = registryWrapper.AddFunds(carrol, upkeepID, tenEth) + require.NoError(t, err) + backend.Commit() + + // observe update + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload3)) + + // remove this node from keeper list + _, err = registryWrapper.SetKeepers(steve, []common.Address{nick.From, nelly.From}, []common.Address{nick.From, nelly.From}) + require.NoError(t, err) + + var registry keeper.Registry + require.NoError(t, app.GetSqlxDB().Get(®istry, `SELECT * FROM keeper_registries`)) + cltest.AssertRecordEventually(t, app.GetSqlxDB(), ®istry, fmt.Sprintf("SELECT * FROM keeper_registries WHERE id = %d", registry.ID), func() bool { + return registry.KeeperIndex == -1 + }) + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + // Since we set grace period to 0, we can have more than 1 pipeline run per perform + // This happens in case we start a pipeline run before previous perform tx is committed to chain + require.GreaterOrEqual(t, len(runs), 3) + prr := webpresenters.NewPipelineRunResource(runs[0], logger.TestLogger(t)) + require.Equal(t, 1, len(prr.Outputs)) + require.Nil(t, prr.Outputs[0]) + }) + } +} + +func TestKeeperForwarderEthIntegration(t *testing.T) { + t.Parallel() + t.Run("keeper_forwarder_flow", func(t *testing.T) { + g := gomega.NewWithT(t) + + // setup node key + nodeKey := cltest.MustGenerateRandomKey(t) + nodeAddress := nodeKey.Address + nodeAddressEIP55 := ethkey.EIP55AddressFromAddress(nodeAddress) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // client + nelly := testutils.MustNewSimTransactor(t) // other keeper operator 1 + nick := testutils.MustNewSimTransactor(t) // other keeper operator 2 + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + steve.From: {Balance: assets.Ether(1000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000).ToInt()}, + nelly.From: {Balance: assets.Ether(1000).ToInt()}, + nick.From: {Balance: assets.Ether(1000).ToInt()}, + nodeAddress: {Balance: assets.Ether(1000).ToInt()}, + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + b := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + backend := client.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) + + stopMining := cltest.Mine(backend.Backend(), 1*time.Second) // >> 2 seconds and the test gets slow, << 1 second and the app may miss heads + defer stopMining() + + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(20000000000000000)) + require.NoError(t, err) + + regAddr, registryWrapper := deployKeeperRegistry(t, keeper.RegistryVersion_1_3, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + fwdrAddress, _, authorizedForwarder, err := authorized_forwarder.DeployAuthorizedForwarder(sergey, backend, linkAddr, sergey.From, steve.From, []byte{}) + require.NoError(t, err) + _, err = authorizedForwarder.SetAuthorizedSenders(sergey, []common.Address{nodeAddress}) + require.NoError(t, err) + + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, regAddr, oneHunEth) + require.NoError(t, err) + _, err = registryWrapper.SetKeepers(steve, []common.Address{fwdrAddress, nelly.From}, []common.Address{nodeAddress, nelly.From}) + require.NoError(t, err) + registrationTx, err := registryWrapper.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx(t, registryWrapper, registrationTx, backend) + + _, err = upkeepContract.SetBytesToSend(carrol, payload1) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + _, err = registryWrapper.AddFunds(carrol, upkeepID, tenEth) + require.NoError(t, err) + backend.Commit() + + // setup app + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = ptr(true) + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps + c.Keeper.Registry.SyncInterval = commonconfig.MustNewDuration(24 * time.Hour) // disable full sync ticker for test + + c.Keeper.TurnLookBack = ptr[int64](0) // testing doesn't need to do far look back + + c.EVM[0].BlockBackfillDepth = ptr[uint32](0) // backfill will trigger sync on startup + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) // disable reorg protection for this test + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) // helps prevent missed heads + c.EVM[0].Transactions.ForwardersEnabled = ptr(true) // Enable Operator Forwarder flow + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + scopedConfig := evmtest.NewChainScopedConfig(t, config) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, backend.Backend(), nodeKey) + require.NoError(t, app.Start(testutils.Context(t))) + + forwarderORM := forwarders.NewORM(db, logger.TestLogger(t), config.Database()) + chainID := ubig.Big(*backend.ConfiguredChainID()) + _, err = forwarderORM.CreateForwarder(fwdrAddress, chainID) + require.NoError(t, err) + + addr, err := app.GetRelayers().LegacyEVMChains().Slice()[0].TxManager().GetForwarderForEOA(nodeAddress) + require.NoError(t, err) + require.Equal(t, addr, fwdrAddress) + + // create job + regAddrEIP55 := ethkey.EIP55AddressFromAddress(regAddr) + + jb := job.Job{ + ID: 1, + Type: job.Keeper, + KeeperSpec: &job.KeeperSpec{ + FromAddress: nodeAddressEIP55, + ContractAddress: regAddrEIP55, + EVMChainID: (*ubig.Big)(testutils.SimulatedChainID), + }, + SchemaVersion: 1, + ForwardingAllowed: true, + } + err = app.JobORM().CreateJob(&jb) + require.NoError(t, err) + + registry := keeper.Registry{ + ContractAddress: regAddrEIP55, + BlockCountPerTurn: 1, + CheckGas: 150_000, + FromAddress: nodeAddressEIP55, + JobID: jb.ID, + KeeperIndex: 0, + NumKeepers: 2, + KeeperIndexMap: map[ethkey.EIP55Address]int32{ + nodeAddressEIP55: 0, + ethkey.EIP55AddressFromAddress(nelly.From): 1, + }, + } + err = korm.UpsertRegistry(®istry) + require.NoError(t, err) + + callOpts := bind.CallOpts{From: nodeAddress} + // Read last keeper on the upkeep contract + lastKeeper := func() common.Address { + upkeepCfg, err2 := registryWrapper.GetUpkeep(&callOpts, upkeepID) + require.NoError(t, err2) + return upkeepCfg.LastKeeper + } + require.Equal(t, lastKeeper(), common.Address{}) + + err = app.JobSpawner().StartService(testutils.Context(t), jb) + require.NoError(t, err) + + // keeper job is triggered and payload is received + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload1)) + + // Upkeep performed by the node through the forwarder + g.Eventually(lastKeeper, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(fwdrAddress)) + }) +} + +func TestMaxPerformDataSize(t *testing.T) { + t.Parallel() + t.Run("max_perform_data_size_test", func(t *testing.T) { + maxPerformDataSize := 1000 // Will be set as config override + g := gomega.NewWithT(t) + + // setup node key + nodeKey := cltest.MustGenerateRandomKey(t) + nodeAddress := nodeKey.Address + nodeAddressEIP55 := ethkey.EIP55AddressFromAddress(nodeAddress) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // client + nelly := testutils.MustNewSimTransactor(t) // other keeper operator 1 + nick := testutils.MustNewSimTransactor(t) // other keeper operator 2 + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + steve.From: {Balance: assets.Ether(1000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000).ToInt()}, + nelly.From: {Balance: assets.Ether(1000).ToInt()}, + nick.From: {Balance: assets.Ether(1000).ToInt()}, + nodeAddress: {Balance: assets.Ether(1000).ToInt()}, + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil * 2) + b := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + backend := client.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) + + stopMining := cltest.Mine(backend.Backend(), 1*time.Second) // >> 2 seconds and the test gets slow, << 1 second and the app may miss heads + defer stopMining() + + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(20000000000000000)) + require.NoError(t, err) + + regAddr, registryWrapper := deployKeeperRegistry(t, keeper.RegistryVersion_1_3, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, regAddr, oneHunEth) + require.NoError(t, err) + _, err = registryWrapper.SetKeepers(steve, []common.Address{nodeAddress, nelly.From}, []common.Address{nodeAddress, nelly.From}) + require.NoError(t, err) + registrationTx, err := registryWrapper.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx(t, registryWrapper, registrationTx, backend) + + _, err = registryWrapper.AddFunds(carrol, upkeepID, tenEth) + require.NoError(t, err) + backend.Commit() + + // setup app + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Keeper.MaxGracePeriod = ptr[int64](0) // avoid waiting to re-submit for upkeeps + c.Keeper.Registry.SyncInterval = commonconfig.MustNewDuration(24 * time.Hour) // disable full sync ticker for test + c.Keeper.Registry.MaxPerformDataSize = ptr(uint32(maxPerformDataSize)) // set the max perform data size + + c.Keeper.TurnLookBack = ptr[int64](0) // testing doesn't need to do far look back + + c.EVM[0].BlockBackfillDepth = ptr[uint32](0) // backfill will trigger sync on startup + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) // disable reorg protection for this test + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) // helps prevent missed heads + }) + scopedConfig := evmtest.NewChainScopedConfig(t, config) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, backend.Backend(), nodeKey) + require.NoError(t, app.Start(testutils.Context(t))) + + // create job + regAddrEIP55 := ethkey.EIP55AddressFromAddress(regAddr) + job := cltest.MustInsertKeeperJob(t, db, korm, nodeAddressEIP55, regAddrEIP55) + err = app.JobSpawner().StartService(testutils.Context(t), job) + require.NoError(t, err) + + // keeper job is triggered + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + + hugePayload := make([]byte, maxPerformDataSize) + _, err = upkeepContract.SetBytesToSend(carrol, hugePayload) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + + // Huge payload should not result in a perform + g.Consistently(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal([]byte{})) + + // Set payload to be small and it should get received + smallPayload := make([]byte, maxPerformDataSize-1) + _, err = upkeepContract.SetBytesToSend(carrol, smallPayload) + require.NoError(t, err) + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(smallPayload)) + }) +} diff --git a/core/services/keeper/models.go b/core/services/keeper/models.go new file mode 100644 index 00000000..2b8cea9f --- /dev/null +++ b/core/services/keeper/models.go @@ -0,0 +1,79 @@ +package keeper + +import ( + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type KeeperIndexMap map[ethkey.EIP55Address]int32 + +type Registry struct { + ID int64 + BlockCountPerTurn int32 + CheckGas uint32 + ContractAddress ethkey.EIP55Address + FromAddress ethkey.EIP55Address + JobID int32 + KeeperIndex int32 + NumKeepers int32 + KeeperIndexMap KeeperIndexMap +} + +type UpkeepRegistration struct { + ID int32 + CheckData []byte + ExecuteGas uint32 + LastRunBlockHeight int64 + RegistryID int64 + Registry Registry + UpkeepID *big.Big + LastKeeperIndex null.Int64 + PositioningConstant int32 +} + +func (k *KeeperIndexMap) Scan(val interface{}) error { + switch v := val.(type) { + case []byte: + err := json.Unmarshal(v, &k) + return err + case string: + err := json.Unmarshal([]byte(v), &k) + return err + default: + return fmt.Errorf("unsupported type: %T", v) + } +} + +func (k *KeeperIndexMap) Value() (driver.Value, error) { + return json.Marshal(&k) +} + +func (upkeep UpkeepRegistration) PrettyID() string { + return NewUpkeepIdentifier(upkeep.UpkeepID).String() +} + +func NewUpkeepIdentifier(i *big.Big) *UpkeepIdentifier { + val := UpkeepIdentifier(*i) + return &val +} + +type UpkeepIdentifier big.Big + +// String produces a hex encoded value, zero padded, prefixed with UpkeepPrefix +func (ui UpkeepIdentifier) String() string { + val := big.Big(ui) + result, err := utils.Uint256ToBytes(val.ToInt()) + if err != nil { + panic(errors.Wrap(err, "invariant, invalid upkeepID")) + } + return fmt.Sprintf("%s%s", UpkeepPrefix, hex.EncodeToString(result)) +} diff --git a/core/services/keeper/models_test.go b/core/services/keeper/models_test.go new file mode 100644 index 00000000..ecd5bd43 --- /dev/null +++ b/core/services/keeper/models_test.go @@ -0,0 +1,33 @@ +package keeper + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +func TestUpkeepIdentifer_String(t *testing.T) { + for _, test := range []struct { + name string + id string + hex string + }{ + {"small", "10", "UPx000000000000000000000000000000000000000000000000000000000000000a"}, + {"large", "1000000000", "UPx000000000000000000000000000000000000000000000000000000003b9aca00"}, + {"big", "5032485723458348569331745", "UPx0000000000000000000000000000000000000000000429ab990419450db80821"}, + } { + t.Run(test.name, func(t *testing.T) { + o, ok := new(big.Int).SetString(test.id, 10) + if !ok { + t.Errorf("%s failed to parse test integer", test.name) + return + } + + result := NewUpkeepIdentifier(ubig.New(o)).String() + require.Equal(t, test.hex, result) + }) + } +} diff --git a/core/services/keeper/orm.go b/core/services/keeper/orm.go new file mode 100644 index 00000000..a4f467d8 --- /dev/null +++ b/core/services/keeper/orm.go @@ -0,0 +1,233 @@ +package keeper + +import ( + "math/rand" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// ORM implements ORM layer using PostgreSQL +type ORM struct { + q pg.Q + logger logger.Logger +} + +// NewORM is the constructor of postgresORM +func NewORM(db *sqlx.DB, lggr logger.Logger, config pg.QConfig) ORM { + lggr = lggr.Named("KeeperORM") + return ORM{ + q: pg.NewQ(db, lggr, config), + logger: lggr, + } +} + +func (korm ORM) Q() pg.Q { + return korm.q +} + +// Registries returns all registries +func (korm ORM) Registries() ([]Registry, error) { + var registries []Registry + err := korm.q.Select(®istries, `SELECT * FROM keeper_registries ORDER BY id ASC`) + return registries, errors.Wrap(err, "failed to get registries") +} + +// RegistryByContractAddress returns a single registry based on provided address +func (korm ORM) RegistryByContractAddress(registryAddress ethkey.EIP55Address) (Registry, error) { + var registry Registry + err := korm.q.Get(®istry, `SELECT * FROM keeper_registries WHERE keeper_registries.contract_address = $1`, registryAddress) + return registry, errors.Wrap(err, "failed to get registry") +} + +// RegistryForJob returns a specific registry for a job with the given ID +func (korm ORM) RegistryForJob(jobID int32) (Registry, error) { + var registry Registry + err := korm.q.Get(®istry, `SELECT * FROM keeper_registries WHERE job_id = $1 LIMIT 1`, jobID) + return registry, errors.Wrapf(err, "failed to get registry with job_id %d", jobID) +} + +// UpsertRegistry upserts registry by the given input +func (korm ORM) UpsertRegistry(registry *Registry) error { + stmt := ` +INSERT INTO keeper_registries (job_id, keeper_index, contract_address, from_address, check_gas, block_count_per_turn, num_keepers, keeper_index_map) VALUES ( +:job_id, :keeper_index, :contract_address, :from_address, :check_gas, :block_count_per_turn, :num_keepers, :keeper_index_map +) ON CONFLICT (job_id) DO UPDATE SET + keeper_index = :keeper_index, + check_gas = :check_gas, + block_count_per_turn = :block_count_per_turn, + num_keepers = :num_keepers, + keeper_index_map = :keeper_index_map +RETURNING * +` + err := korm.q.GetNamed(stmt, registry, registry) + return errors.Wrap(err, "failed to upsert registry") +} + +// UpsertUpkeep upserts upkeep by the given input +func (korm ORM) UpsertUpkeep(registration *UpkeepRegistration) error { + stmt := ` +INSERT INTO upkeep_registrations (registry_id, execute_gas, check_data, upkeep_id, positioning_constant, last_run_block_height) VALUES ( +:registry_id, :execute_gas, :check_data, :upkeep_id, :positioning_constant, :last_run_block_height +) ON CONFLICT (registry_id, upkeep_id) DO UPDATE SET + execute_gas = :execute_gas, + check_data = :check_data, + positioning_constant = :positioning_constant +RETURNING * +` + err := korm.q.GetNamed(stmt, registration, registration) + return errors.Wrap(err, "failed to upsert upkeep") +} + +// UpdateUpkeepLastKeeperIndex updates the last keeper index for an upkeep +func (korm ORM) UpdateUpkeepLastKeeperIndex(jobID int32, upkeepID *big.Big, fromAddress ethkey.EIP55Address) error { + _, err := korm.q.Exec(` + UPDATE upkeep_registrations + SET + last_keeper_index = CAST((SELECT keeper_index_map -> $3 FROM keeper_registries WHERE job_id = $1) AS int) + WHERE upkeep_id = $2 AND + registry_id = (SELECT id FROM keeper_registries WHERE job_id = $1)`, + jobID, upkeepID, fromAddress.Hex()) + return errors.Wrap(err, "UpdateUpkeepLastKeeperIndex failed") +} + +// BatchDeleteUpkeepsForJob deletes all upkeeps by the given IDs for the job with the given ID +func (korm ORM) BatchDeleteUpkeepsForJob(jobID int32, upkeepIDs []big.Big) (int64, error) { + strIds := []string{} + for _, upkeepID := range upkeepIDs { + strIds = append(strIds, upkeepID.String()) + } + res, err := korm.q.Exec(` +DELETE FROM upkeep_registrations WHERE registry_id IN ( + SELECT id FROM keeper_registries WHERE job_id = $1 +) AND upkeep_id = ANY($2) +`, jobID, strIds) + if err != nil { + return 0, errors.Wrap(err, "BatchDeleteUpkeepsForJob failed to delete") + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "BatchDeleteUpkeepsForJob failed to get RowsAffected") + } + return rowsAffected, nil +} + +// EligibleUpkeepsForRegistry fetches eligible upkeeps for processing +// The query checks the following conditions +// - checks the registry address is correct and the registry has some keepers associated +// -- is it my turn AND my keeper was not the last perform for this upkeep OR my keeper was the last before BUT it is past the grace period +// -- OR is it my buddy's turn AND they were the last keeper to do the perform for this upkeep +// DEV: note we cast upkeep_id and binaryHash as 32 bits, even though both are 256 bit numbers when performing XOR. This is enough information +// to distribute the upkeeps over the keepers so long as num keepers < 4294967296 +func (korm ORM) EligibleUpkeepsForRegistry(registryAddress ethkey.EIP55Address, blockNumber int64, gracePeriod int64, binaryHash string) (upkeeps []UpkeepRegistration, err error) { + stmt := ` +SELECT upkeep_registrations.* +FROM upkeep_registrations + INNER JOIN keeper_registries ON keeper_registries.id = upkeep_registrations.registry_id, + LATERAL ABS( + (least_significant(uint256_to_bit(upkeep_registrations.upkeep_id), 32) # least_significant($4, 32))::bigint + ) AS turn +WHERE keeper_registries.contract_address = $1 + AND keeper_registries.num_keepers > 0 + AND + ( + ( + -- my turn + keeper_registries.keeper_index = turn % keeper_registries.num_keepers + AND + ( + -- last keeper != me + upkeep_registrations.last_keeper_index IS DISTINCT FROM keeper_registries.keeper_index + OR + -- last keeper == me AND its past the grace period + (upkeep_registrations.last_keeper_index IS NOT DISTINCT FROM + keeper_registries.keeper_index AND + upkeep_registrations.last_run_block_height + $2 < $3) + ) + ) + OR + ( + -- my buddy's turn + (keeper_registries.keeper_index + 1) % keeper_registries.num_keepers = + turn % keeper_registries.num_keepers + AND + -- last keeper == my buddy + upkeep_registrations.last_keeper_index IS NOT DISTINCT FROM + (keeper_registries.keeper_index + 1) % keeper_registries.num_keepers + -- buddy system only active if we have multiple keeper nodes + AND keeper_registries.num_keepers > 1 + ) + ) +` + if err = korm.q.Select(&upkeeps, stmt, registryAddress, gracePeriod, blockNumber, binaryHash); err != nil { + return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to get upkeep_registrations") + } + if err = loadUpkeepsRegistry(korm.q, upkeeps); err != nil { + return upkeeps, errors.Wrap(err, "EligibleUpkeepsForRegistry failed to load Registry on upkeeps") + } + + rand.Shuffle(len(upkeeps), func(i, j int) { + upkeeps[i], upkeeps[j] = upkeeps[j], upkeeps[i] + }) + + return upkeeps, err +} + +func loadUpkeepsRegistry(q pg.Queryer, upkeeps []UpkeepRegistration) error { + registryIDM := make(map[int64]*Registry) + var registryIDs []int64 + for _, upkeep := range upkeeps { + if _, exists := registryIDM[upkeep.RegistryID]; !exists { + registryIDM[upkeep.RegistryID] = new(Registry) + registryIDs = append(registryIDs, upkeep.RegistryID) + } + } + var registries []*Registry + err := q.Select(®istries, `SELECT * FROM keeper_registries WHERE id = ANY($1)`, pq.Array(registryIDs)) + if err != nil { + return errors.Wrap(err, "loadUpkeepsRegistry failed") + } + for _, reg := range registries { + registryIDM[reg.ID] = reg + } + for i, upkeep := range upkeeps { + upkeeps[i].Registry = *registryIDM[upkeep.RegistryID] + } + return nil +} + +func (korm ORM) AllUpkeepIDsForRegistry(regID int64) (upkeeps []big.Big, err error) { + err = korm.q.Select(&upkeeps, ` +SELECT upkeep_id +FROM upkeep_registrations +WHERE registry_id = $1 +`, regID) + return upkeeps, errors.Wrap(err, "allUpkeepIDs failed") +} + +// SetLastRunInfoForUpkeepOnJob sets the last run block height and the associated keeper index only if the new block height is greater than the previous. +func (korm ORM) SetLastRunInfoForUpkeepOnJob(jobID int32, upkeepID *big.Big, height int64, fromAddress ethkey.EIP55Address, qopts ...pg.QOpt) (int64, error) { + res, err := korm.q.WithOpts(qopts...).Exec(` + UPDATE upkeep_registrations + SET last_run_block_height = $1, + last_keeper_index = CAST((SELECT keeper_index_map -> $4 FROM keeper_registries WHERE job_id = $3) AS int) + WHERE upkeep_id = $2 AND + registry_id = (SELECT id FROM keeper_registries WHERE job_id = $3) AND + last_run_block_height <= $1`, height, upkeepID, jobID, fromAddress.Hex()) + + if err != nil { + return 0, errors.Wrap(err, "SetLastRunInfoForUpkeepOnJob failed") + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "SetLastRunInfoForUpkeepOnJob failed to get RowsAffected") + } + return rowsAffected, nil +} diff --git a/core/services/keeper/orm_test.go b/core/services/keeper/orm_test.go new file mode 100644 index 00000000..53043d2e --- /dev/null +++ b/core/services/keeper/orm_test.go @@ -0,0 +1,522 @@ +package keeper_test + +import ( + "fmt" + "math/big" + "sort" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" + bigmath "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +var ( + checkData = common.Hex2Bytes("ABC123") + executeGas = uint32(10_000) +) + +func setupKeeperDB(t *testing.T) ( + *sqlx.DB, + evmconfig.ChainScopedConfig, + keeper.ORM, +) { + gcfg := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + orm := keeper.NewORM(db, logger.TestLogger(t), cfg.Database()) + return db, cfg, orm +} + +func newUpkeep(registry keeper.Registry, upkeepID int64) keeper.UpkeepRegistration { + return keeper.UpkeepRegistration{ + UpkeepID: ubig.NewI(upkeepID), + ExecuteGas: executeGas, + Registry: registry, + RegistryID: registry.ID, + CheckData: checkData, + } +} + +func waitLastRunHeight(t *testing.T, db *sqlx.DB, upkeep keeper.UpkeepRegistration, height int64) { + t.Helper() + + gomega.NewWithT(t).Eventually(func() int64 { + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE id = $1`, upkeep.ID) + require.NoError(t, err) + return upkeep.LastRunBlockHeight + }, time.Second*2, time.Millisecond*100).Should(gomega.Equal(height)) +} + +func assertLastRunHeight(t *testing.T, db *sqlx.DB, upkeep keeper.UpkeepRegistration, lastRunBlockHeight int64, lastKeeperIndex int64) { + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE id = $1`, upkeep.ID) + require.NoError(t, err) + require.Equal(t, lastRunBlockHeight, upkeep.LastRunBlockHeight) + require.Equal(t, lastKeeperIndex, upkeep.LastKeeperIndex.Int64) +} + +func TestKeeperDB_Registries(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + existingRegistries, err := orm.Registries() + require.NoError(t, err) + require.Equal(t, 2, len(existingRegistries)) +} + +func TestKeeperDB_RegistryByContractAddress(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + registryByContractAddress, err := orm.RegistryByContractAddress(registry.ContractAddress) + require.NoError(t, err) + require.Equal(t, registry, registryByContractAddress) +} + +func TestKeeperDB_UpsertUpkeep(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + upkeep := keeper.UpkeepRegistration{ + UpkeepID: ubig.NewI(0), + ExecuteGas: executeGas, + Registry: registry, + RegistryID: registry.ID, + CheckData: checkData, + LastRunBlockHeight: 1, + PositioningConstant: 1, + } + require.NoError(t, orm.UpsertUpkeep(&upkeep)) + cltest.AssertCount(t, db, "upkeep_registrations", 1) + + // update upkeep + upkeep.ExecuteGas = 20_000 + upkeep.CheckData = common.Hex2Bytes("8888") + upkeep.LastRunBlockHeight = 2 + + err := orm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + cltest.AssertCount(t, db, "upkeep_registrations", 1) + + var upkeepFromDB keeper.UpkeepRegistration + err = db.Get(&upkeepFromDB, `SELECT * FROM upkeep_registrations ORDER BY id LIMIT 1`) + require.NoError(t, err) + require.Equal(t, uint32(20_000), upkeepFromDB.ExecuteGas) + require.Equal(t, "8888", common.Bytes2Hex(upkeepFromDB.CheckData)) + require.Equal(t, int64(1), upkeepFromDB.LastRunBlockHeight) // shouldn't change on upsert +} + +func TestKeeperDB_BatchDeleteUpkeepsForJob(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + expectedUpkeepID := cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry).UpkeepID + var upkeepIDs []ubig.Big + for i := 0; i < 2; i++ { + upkeep := cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + upkeepIDs = append(upkeepIDs, *upkeep.UpkeepID) + } + + cltest.AssertCount(t, db, "upkeep_registrations", 3) + + _, err := orm.BatchDeleteUpkeepsForJob(job.ID, upkeepIDs) + require.NoError(t, err) + cltest.AssertCount(t, db, "upkeep_registrations", 1) + + var remainingUpkeep keeper.UpkeepRegistration + err = db.Get(&remainingUpkeep, `SELECT * FROM upkeep_registrations ORDER BY id LIMIT 1`) + require.NoError(t, err) + require.Equal(t, expectedUpkeepID, remainingUpkeep.UpkeepID) +} + +func TestKeeperDB_EligibleUpkeeps_Shuffle(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + blockheight := int64(63) + gracePeriod := int64(10) + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + ordered := [100]int64{} + for i := 0; i < 100; i++ { + k := newUpkeep(registry, int64(i)) + ordered[i] = int64(i) + err := orm.UpsertUpkeep(&k) + require.NoError(t, err) + } + cltest.AssertCount(t, db, "upkeep_registrations", 100) + + eligibleUpkeeps, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, blockheight, gracePeriod, fmt.Sprintf("%b", evmutils.NewHash().Big())) + assert.NoError(t, err) + + require.Len(t, eligibleUpkeeps, 100) + shuffled := [100]*ubig.Big{} + for i := 0; i < 100; i++ { + shuffled[i] = eligibleUpkeeps[i].UpkeepID + } + assert.NotEqualValues(t, ordered, shuffled) +} + +func TestKeeperDB_NewEligibleUpkeeps_GracePeriod(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) + + // if current keeper index = 0 and all upkeeps last perform was done by index = 0 and still within grace period + upkeep := keeper.UpkeepRegistration{} + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0, last_run_block_height = 10 RETURNING *`)) + list0, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) // none eligible + require.NoError(t, err) + require.Equal(t, 0, len(list0), "should be 0 as all last perform was done by current node") + + // once passed grace period + list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 121, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) // none eligible + require.NoError(t, err) + require.NotEqual(t, 0, len(list1), "should get some eligible upkeeps now that they are outside grace period") +} + +func TestKeeperDB_EligibleUpkeeps_TurnsRandom(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 3, 10) + + for i := 0; i < 1000; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 1000) + + // 3 keepers 10 block turns should be different every turn + list1, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 20, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) + require.NoError(t, err) + list2, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 31, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) + require.NoError(t, err) + list3, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 42, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) + require.NoError(t, err) + list4, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 53, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) + require.NoError(t, err) + + // sort before compare + sort.Slice(list1, func(i, j int) bool { + return list1[i].UpkeepID.Cmp(list1[j].UpkeepID) == -1 + }) + sort.Slice(list2, func(i, j int) bool { + return list2[i].UpkeepID.Cmp(list2[j].UpkeepID) == -1 + }) + sort.Slice(list3, func(i, j int) bool { + return list3[i].UpkeepID.Cmp(list3[j].UpkeepID) == -1 + }) + sort.Slice(list4, func(i, j int) bool { + return list4[i].UpkeepID.Cmp(list4[j].UpkeepID) == -1 + }) + + assert.NotEqual(t, list1, list2, "list1 vs list2") + assert.NotEqual(t, list1, list3, "list1 vs list3") + assert.NotEqual(t, list1, list4, "list1 vs list4") +} + +func TestKeeperDB_NewEligibleUpkeeps_SkipIfLastPerformedByCurrentKeeper(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) + + // if current keeper index = 0 and all upkeeps last perform was done by index = 0 then skip as it would not pass required turn taking + upkeep := keeper.UpkeepRegistration{} + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0 RETURNING *`)) + list0, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, fmt.Sprintf("%b", evmutils.NewHash().Big())) // none eligible + require.NoError(t, err) + require.Equal(t, 0, len(list0), "should be 0 as all last perform was done by current node") +} + +func TestKeeperDB_NewEligibleUpkeeps_CoverBuddy(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 1, 2, 20) + + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) + + upkeep := keeper.UpkeepRegistration{} + binaryHash := fmt.Sprintf("%b", evmutils.NewHash().Big()) + listBefore, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // normal + require.NoError(t, err) + require.NoError(t, db.Get(&upkeep, `UPDATE upkeep_registrations SET last_keeper_index = 0 RETURNING *`)) + listAfter, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // covering buddy + require.NoError(t, err) + require.Greater(t, len(listAfter), len(listBefore), "after our buddy runs all the performs we should have more eligible then a normal turn") +} + +func TestKeeperDB_NewEligibleUpkeeps_FirstTurn(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 2, 20) + + for i := 0; i < 100; i++ { + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + } + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 100) + + binaryHash := fmt.Sprintf("%b", evmutils.NewHash().Big()) + // last keeper index is null to simulate a normal first run + listKpr0, err := orm.EligibleUpkeepsForRegistry(registry.ContractAddress, 21, 100, binaryHash) // someone eligible only kpr0 turn + require.NoError(t, err) + require.NotEqual(t, 0, len(listKpr0), "kpr0 should have some eligible as a normal turn") +} + +func TestKeeperDB_NewEligibleUpkeeps_FiltersByRegistry(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry1, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + registry2, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry1) + cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry2) + + cltest.AssertCount(t, db, "keeper_registries", 2) + cltest.AssertCount(t, db, "upkeep_registrations", 2) + + binaryHash := fmt.Sprintf("%b", evmutils.NewHash().Big()) + list1, err := orm.EligibleUpkeepsForRegistry(registry1.ContractAddress, 20, 100, binaryHash) + require.NoError(t, err) + list2, err := orm.EligibleUpkeepsForRegistry(registry2.ContractAddress, 20, 100, binaryHash) + require.NoError(t, err) + + assert.Equal(t, 1, len(list1)) + assert.Equal(t, 1, len(list2)) +} + +func TestKeeperDB_AllUpkeepIDsForRegistry(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + registry, _ := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + + upkeepIDs, err := orm.AllUpkeepIDsForRegistry(registry.ID) + require.NoError(t, err) + // No upkeeps returned + require.Len(t, upkeepIDs, 0) + + upkeep := newUpkeep(registry, 3) + err = orm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + + upkeep = newUpkeep(registry, 8) + err = orm.UpsertUpkeep(&upkeep) + require.NoError(t, err) + + // We should get two upkeeps IDs, 3 & 8 + upkeepIDs, err = orm.AllUpkeepIDsForRegistry(registry.ID) + require.NoError(t, err) + // No upkeeps returned + require.Len(t, upkeepIDs, 2) + require.Contains(t, upkeepIDs, *ubig.New(big.NewInt(3))) + require.Contains(t, upkeepIDs, *ubig.New(big.NewInt(8))) +} + +func TestKeeperDB_UpdateUpkeepLastKeeperIndex(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + registry, j := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + upkeep := cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + + require.NoError(t, orm.UpdateUpkeepLastKeeperIndex(j.ID, upkeep.UpkeepID, registry.FromAddress)) + + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE upkeep_id = $1`, upkeep.UpkeepID) + require.NoError(t, err) + require.Equal(t, int64(0), upkeep.LastKeeperIndex.Int64) +} + +func TestKeeperDB_NewSetLastRunInfoForUpkeepOnJob(t *testing.T) { + t.Parallel() + db, config, orm := setupKeeperDB(t) + ethKeyStore := cltest.NewKeyStore(t, db, config.Database()).Eth() + + registry, j := cltest.MustInsertKeeperRegistry(t, db, orm, ethKeyStore, 0, 1, 20) + upkeep := cltest.MustInsertUpkeepForRegistry(t, db, config.Database(), registry) + registry.NumKeepers = 2 + registry.KeeperIndexMap = map[ethkey.EIP55Address]int32{ + registry.FromAddress: 0, + ethkey.EIP55AddressFromAddress(evmutils.ZeroAddress): 1, + } + err := orm.UpsertRegistry(®istry) + require.NoError(t, err, "UPDATE keeper_registries") + + // update + rowsAffected, err := orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 100, registry.FromAddress) + require.NoError(t, err) + require.Equal(t, rowsAffected, int64(1)) + assertLastRunHeight(t, db, upkeep, 100, 0) + // update to lower block height not allowed + rowsAffected, err = orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 0, registry.FromAddress) + require.NoError(t, err) + require.Equal(t, rowsAffected, int64(0)) + assertLastRunHeight(t, db, upkeep, 100, 0) + // update to same block height allowed + rowsAffected, err = orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 100, ethkey.EIP55AddressFromAddress(evmutils.ZeroAddress)) + require.NoError(t, err) + require.Equal(t, rowsAffected, int64(1)) + assertLastRunHeight(t, db, upkeep, 100, 1) + // update to higher block height allowed + rowsAffected, err = orm.SetLastRunInfoForUpkeepOnJob(j.ID, upkeep.UpkeepID, 101, registry.FromAddress) + require.NoError(t, err) + require.Equal(t, rowsAffected, int64(1)) + assertLastRunHeight(t, db, upkeep, 101, 0) +} + +func TestKeeperDB_LeastSignificant(t *testing.T) { + t.Parallel() + db, _, _ := setupKeeperDB(t) + sql := `SELECT least_significant($1, $2)` + inputBytes := "10001000101010101101" + for _, test := range []struct { + name string + inputLength int + expectedOutput string + expectedError bool + }{ + { + name: "half slice", + inputLength: 10, + expectedOutput: "1010101101", + }, + { + name: "full slice", + inputLength: 20, + expectedOutput: "10001000101010101101", + }, + { + name: "no slice", + inputLength: 0, + expectedOutput: "", + }, + { + name: "slice too large", + inputLength: 21, + expectedError: true, + }, + } { + t.Run(test.name, func(tt *testing.T) { + var test = test + var result string + err := db.Get(&result, sql, inputBytes, test.inputLength) + if test.expectedError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, test.expectedOutput, result) + }) + } +} + +func TestKeeperDB_Uint256ToBit(t *testing.T) { + t.Parallel() + db, _, _ := setupKeeperDB(t) + sql := `SELECT uint256_to_bit($1)` + for _, test := range []struct { + name string + input *big.Int + errorExpected bool + }{ + { + name: "min", + input: big.NewInt(0), + }, + { + name: "max", + input: evmutils.MaxUint256, + }, + { + name: "rand", + input: evmutils.RandUint256(), + }, + { + name: "needs pading", + input: big.NewInt(1), + }, + { + name: "overflow", + input: bigmath.Add(evmutils.MaxUint256, big.NewInt(1)), + errorExpected: true, + }, + } { + t.Run(test.name, func(tt *testing.T) { + var test = test + var result string + err := db.Get(&result, sql, test.input.String()) + if test.errorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + expected := utils.LeftPadBitString(fmt.Sprintf("%b", test.input), 256) + require.Equal(t, expected, result) + }) + } +} diff --git a/core/services/keeper/registry1_1_synchronizer_test.go b/core/services/keeper/registry1_1_synchronizer_test.go new file mode 100644 index 00000000..ee5aaf1e --- /dev/null +++ b/core/services/keeper/registry1_1_synchronizer_test.go @@ -0,0 +1,429 @@ +package keeper_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + registry1_1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +var registryConfig1_1 = registry1_1.GetConfig{ + PaymentPremiumPPB: 100, + BlockCountPerTurn: big.NewInt(20), + CheckGasLimit: 2_000_000, + StalenessSeconds: big.NewInt(3600), + FallbackGasPrice: big.NewInt(1000000), + FallbackLinkPrice: big.NewInt(1000000), +} + +var upkeepConfig1_1 = registry1_1.GetUpkeep{ + Target: testutils.NewAddress(), + ExecuteGas: 2_000_000, + CheckData: common.Hex2Bytes("1234"), + Balance: big.NewInt(1000000000000000000), + LastKeeper: testutils.NewAddress(), + Admin: testutils.NewAddress(), + MaxValidBlocknumber: 1_000_000_000, +} + +func mockRegistry1_1( + t *testing.T, + ethMock *evmclimocks.Client, + contractAddress common.Address, + config registry1_1.GetConfig, + keeperList []common.Address, + cancelledUpkeeps []*big.Int, + upkeepCount *big.Int, + upkeepConfig registry1_1.GetUpkeep, + timesGetUpkeepMock int, +) { + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, contractAddress) + + ethMock.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: 10}, nil) + registryMock.MockResponse("getConfig", config).Once() + registryMock.MockResponse("getKeeperList", keeperList).Once() + registryMock.MockResponse("getCanceledUpkeepList", cancelledUpkeeps).Once() + registryMock.MockResponse("getUpkeepCount", upkeepCount).Once() + if timesGetUpkeepMock > 0 { + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(timesGetUpkeepMock) + } +} + +func Test_LogListenerOpts1_1(t *testing.T) { + db := pgtest.NewSqlxDB(t) + scopedConfig := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, nil)) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + j := cltest.MustInsertKeeperJob(t, db, korm, cltest.NewEIP55Address(), cltest.NewEIP55Address()) + + contractAddress := j.KeeperSpec.ContractAddress.Address() + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.1.0").Once() + + registryWrapper, err := keeper.NewRegistryWrapper(j.KeeperSpec.ContractAddress, ethClient) + require.NoError(t, err) + + logListenerOpts, err := registryWrapper.GetLogListenerOpts(1, nil) + require.NoError(t, err) + + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_1.KeeperRegistryKeepersUpdated{}.Topic(), "Registry should listen to KeeperRegistryKeepersUpdated log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_1.KeeperRegistryConfigSet{}.Topic(), "Registry should listen to KeeperRegistryConfigSet log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_1.KeeperRegistryUpkeepCanceled{}.Topic(), "Registry should listen to KeeperRegistryUpkeepCanceled log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_1.KeeperRegistryUpkeepRegistered{}.Topic(), "Registry should listen to KeeperRegistryUpkeepRegistered log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_1.KeeperRegistryUpkeepPerformed{}.Topic(), "Registry should listen to KeeperRegistryUpkeepPerformed log") +} + +func Test_RegistrySynchronizer1_1_Start(t *testing.T) { + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(0), + upkeepConfig1_1, + 0) + + err := synchronizer.Start(testutils.Context(t)) + require.NoError(t, err) + defer func() { assert.NoError(t, synchronizer.Close()) }() + + cltest.WaitForCount(t, db, "keeper_registries", 1) + + err = synchronizer.Start(testutils.Context(t)) + require.Error(t, err) +} + +func Test_RegistrySynchronizer_CalcPositioningConstant(t *testing.T) { + t.Parallel() + for _, upkeepID := range []int64{0, 1, 100, 10_000} { + _, err := keeper.CalcPositioningConstant(ubig.NewI(upkeepID), cltest.NewEIP55Address()) + require.NoError(t, err) + } +} + +func Test_RegistrySynchronizer1_1_FullSync(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + canceledUpkeeps := []*big.Int{big.NewInt(1)} + + upkeepConfig := upkeepConfig1_1 + upkeepConfig.LastKeeper = fromAddress + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + canceledUpkeeps, + big.NewInt(3), + upkeepConfig, + 2) // sync only 2 (#0,#2) + + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 2) + + // Last keeper index should be set correctly on upkeep + g.Eventually(func() bool { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Valid + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(true)) + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) + + var registry keeper.Registry + var upkeepRegistration keeper.UpkeepRegistration + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + require.NoError(t, db.Get(&upkeepRegistration, `SELECT * FROM upkeep_registrations`)) + require.Equal(t, job.KeeperSpec.ContractAddress, registry.ContractAddress) + require.Equal(t, job.KeeperSpec.FromAddress, registry.FromAddress) + require.Equal(t, int32(20), registry.BlockCountPerTurn) + require.Equal(t, int32(0), registry.KeeperIndex) + require.Equal(t, int32(1), registry.NumKeepers) + require.Equal(t, upkeepConfig1_1.CheckData, upkeepRegistration.CheckData) + require.Equal(t, upkeepConfig1_1.ExecuteGas, upkeepRegistration.ExecuteGas) + + assertUpkeepIDs(t, db, []int64{0, 2}) + + // 2nd sync + canceledUpkeeps = []*big.Int{big.NewInt(0), big.NewInt(1), big.NewInt(3)} + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + canceledUpkeeps, + big.NewInt(5), + upkeepConfig1_1, + 2) // sync all 2 upkeeps (#2, #4) + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 2) + assertUpkeepIDs(t, db, []int64{2, 4}) +} + +func Test_RegistrySynchronizer1_1_ConfigSetLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(0), + upkeepConfig1_1, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, contractAddress) + newConfig := registryConfig1_1 + newConfig.BlockCountPerTurn = big.NewInt(40) // change from default + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getConfig", newConfig).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_1.KeeperRegistryConfigSet{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.BlockCountPerTurn == 40 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} + +func Test_RegistrySynchronizer1_1_KeepersUpdatedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(0), + upkeepConfig1_1, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + addresses := []common.Address{fromAddress, testutils.NewAddress()} // change from default + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("getConfig", registryConfig1_1).Once() + registryMock.MockResponse("getKeeperList", addresses).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_1.KeeperRegistryKeepersUpdated{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.NumKeepers == 2 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} +func Test_RegistrySynchronizer1_1_UpkeepCanceledLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(3), + upkeepConfig1_1, + 3) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_1.KeeperRegistryUpkeepCanceled{Id: big.NewInt(1)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_1_UpkeepRegisteredLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(1), + upkeepConfig1_1, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("getUpkeep", upkeepConfig1_1).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_1.KeeperRegistryUpkeepRegistered{Id: big.NewInt(1)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_1_UpkeepPerformedLog(t *testing.T) { + g := gomega.NewWithT(t) + + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_1) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_1( + t, + ethMock, + contractAddress, + registryConfig1_1, + []common.Address{fromAddress}, + []*big.Int{}, + big.NewInt(1), + upkeepConfig1_1, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} + log := registry1_1.KeeperRegistryUpkeepPerformed{Id: big.NewInt(0), From: fromAddress} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastRunBlockHeight + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(200))) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) +} diff --git a/core/services/keeper/registry1_2_synchronizer_test.go b/core/services/keeper/registry1_2_synchronizer_test.go new file mode 100644 index 00000000..f443ca4f --- /dev/null +++ b/core/services/keeper/registry1_2_synchronizer_test.go @@ -0,0 +1,594 @@ +package keeper_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + registry1_2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +var registryConfig1_2 = registry1_2.Config{ + PaymentPremiumPPB: 100, + FlatFeeMicroLink: uint32(0), + BlockCountPerTurn: big.NewInt(20), + CheckGasLimit: 2_000_000, + StalenessSeconds: big.NewInt(3600), + GasCeilingMultiplier: uint16(2), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(1000000), + FallbackLinkPrice: big.NewInt(1000000), + Transcoder: cltest.NewEIP55Address().Address(), + Registrar: cltest.NewEIP55Address().Address(), +} + +var registryState1_2 = registry1_2.State{ + Nonce: uint32(0), + OwnerLinkBalance: big.NewInt(1000000000000000000), + ExpectedLinkBalance: big.NewInt(1000000000000000000), + NumUpkeeps: big.NewInt(0), +} + +var upkeepConfig1_2 = registry1_2.GetUpkeep{ + Target: testutils.NewAddress(), + ExecuteGas: 2_000_000, + CheckData: common.Hex2Bytes("1234"), + Balance: big.NewInt(1000000000000000000), + LastKeeper: testutils.NewAddress(), + Admin: testutils.NewAddress(), + MaxValidBlocknumber: 1_000_000_000, + AmountSpent: big.NewInt(0), +} + +func mockRegistry1_2( + t *testing.T, + ethMock *evmclimocks.Client, + contractAddress common.Address, + config registry1_2.Config, + activeUpkeepIDs []*big.Int, + keeperList []common.Address, + upkeepConfig registry1_2.GetUpkeep, + timesGetUpkeepMock int, + getStateTime int, + getActiveUpkeepIDsTime int, +) { + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + + state := registryState1_2 + state.NumUpkeeps = big.NewInt(int64(len(activeUpkeepIDs))) + var getState = registry1_2.GetState{ + State: state, + Config: config, + Keepers: keeperList, + } + ethMock.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: 10}, nil) + if getStateTime > 0 { + registryMock.MockResponse("getState", getState).Times(getStateTime) + } + if getActiveUpkeepIDsTime > 0 { + registryMock.MockResponse("getActiveUpkeepIDs", activeUpkeepIDs).Times(getActiveUpkeepIDsTime) + } + if timesGetUpkeepMock > 0 { + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(timesGetUpkeepMock) + } +} + +func Test_LogListenerOpts1_2(t *testing.T) { + db := pgtest.NewSqlxDB(t) + scopedConfig := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, nil)) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + j := cltest.MustInsertKeeperJob(t, db, korm, cltest.NewEIP55Address(), cltest.NewEIP55Address()) + + contractAddress := j.KeeperSpec.ContractAddress.Address() + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.2.0").Once() + + registryWrapper, err := keeper.NewRegistryWrapper(j.KeeperSpec.ContractAddress, ethClient) + require.NoError(t, err) + + logListenerOpts, err := registryWrapper.GetLogListenerOpts(1, nil) + require.NoError(t, err) + + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryKeepersUpdated{}.Topic(), "Registry should listen to KeeperRegistryKeepersUpdated log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryConfigSet{}.Topic(), "Registry should listen to KeeperRegistryConfigSet log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepCanceled{}.Topic(), "Registry should listen to KeeperRegistryUpkeepCanceled log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepRegistered{}.Topic(), "Registry should listen to KeeperRegistryUpkeepRegistered log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepPerformed{}.Topic(), "Registry should listen to KeeperRegistryUpkeepPerformed log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepGasLimitSet{}.Topic(), "Registry should listen to KeeperRegistryUpkeepGasLimitSet log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepMigrated{}.Topic(), "Registry should listen to KeeperRegistryUpkeepMigrated log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_2.KeeperRegistryUpkeepReceived{}.Topic(), "Registry should listen to KeeperRegistryUpkeepReceived log") +} + +func Test_RegistrySynchronizer1_2_Start(t *testing.T) { + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{}, + []common.Address{fromAddress}, + upkeepConfig1_2, + 0, + 2, + 0) + + err := synchronizer.Start(testutils.Context(t)) + require.NoError(t, err) + defer func() { assert.NoError(t, synchronizer.Close()) }() + + cltest.WaitForCount(t, db, "keeper_registries", 1) + + err = synchronizer.Start(testutils.Context(t)) + require.Error(t, err) +} + +func Test_RegistrySynchronizer1_2_FullSync(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + upkeepConfig := upkeepConfig1_2 + upkeepConfig.LastKeeper = fromAddress + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig, + 3, // sync all 3 + 2, + 1) + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 3) + + // Last keeper index should be set correctly on upkeep + g.Eventually(func() bool { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Valid + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(true)) + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) + + var registry keeper.Registry + var upkeepRegistration keeper.UpkeepRegistration + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + require.Equal(t, job.KeeperSpec.ContractAddress, registry.ContractAddress) + require.Equal(t, job.KeeperSpec.FromAddress, registry.FromAddress) + require.Equal(t, int32(20), registry.BlockCountPerTurn) + require.Equal(t, int32(0), registry.KeeperIndex) + require.Equal(t, int32(1), registry.NumKeepers) + + require.NoError(t, db.Get(&upkeepRegistration, `SELECT * FROM upkeep_registrations`)) + require.Equal(t, upkeepConfig1_2.CheckData, upkeepRegistration.CheckData) + require.Equal(t, upkeepConfig1_2.ExecuteGas, upkeepRegistration.ExecuteGas) + + assertUpkeepIDs(t, db, []int64{3, 69, 420}) + + // 2nd sync. Cancel upkeep (id 3) and add a new upkeep (id 2022) + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(69), big.NewInt(420), big.NewInt(2022)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 3, // sync all 3 active upkeeps + 2, + 1) + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 3) + assertUpkeepIDs(t, db, []int64{69, 420, 2022}) +} + +func Test_RegistrySynchronizer1_2_ConfigSetLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 0, + 2, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + newConfig := registryConfig1_2 + newConfig.BlockCountPerTurn = big.NewInt(40) // change from default + registryMock.MockResponse("getState", registry1_2.GetState{ + State: registryState1_2, + Config: newConfig, + Keepers: []common.Address{fromAddress}, + }).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryConfigSet{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.BlockCountPerTurn == 40 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} + +func Test_RegistrySynchronizer1_2_KeepersUpdatedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 0, + 2, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + addresses := []common.Address{fromAddress, testutils.NewAddress()} // change from default + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + registryMock.MockResponse("getState", registry1_2.GetState{ + State: registryState1_2, + Config: registryConfig1_2, + Keepers: addresses, + }).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryKeepersUpdated{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.NumKeepers == 2 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} + +func Test_RegistrySynchronizer1_2_UpkeepCanceledLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 3, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryUpkeepCanceled{Id: big.NewInt(3)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_2_UpkeepRegisteredLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + registryMock.MockResponse("getUpkeep", upkeepConfig1_2).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryUpkeepRegistered{Id: big.NewInt(420)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_2_UpkeepPerformedLog(t *testing.T) { + g := gomega.NewWithT(t) + + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} + log := registry1_2.KeeperRegistryUpkeepPerformed{Id: big.NewInt(3), From: fromAddress} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastRunBlockHeight + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(200))) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) +} + +func Test_RegistrySynchronizer1_2_UpkeepGasLimitSetLog(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + getExecuteGas := func() uint32 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.ExecuteGas + } + g.Eventually(getExecuteGas, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(uint32(2_000_000))) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + newConfig := upkeepConfig1_2 + newConfig.ExecuteGas = 4_000_000 // change from default + registryMock.MockResponse("getUpkeep", newConfig).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryUpkeepGasLimitSet{Id: big.NewInt(3), GasLimit: big.NewInt(4_000_000)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(getExecuteGas, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(uint32(4_000_000))) +} + +func Test_RegistrySynchronizer1_2_UpkeepReceivedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_2ABI, contractAddress) + registryMock.MockResponse("getUpkeep", upkeepConfig1_2).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryUpkeepReceived{Id: big.NewInt(420)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_2_UpkeepMigratedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_2) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_2( + t, + ethMock, + contractAddress, + registryConfig1_2, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_2, + 3, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_2.KeeperRegistryUpkeepMigrated{Id: big.NewInt(3)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} diff --git a/core/services/keeper/registry1_3_synchronizer_test.go b/core/services/keeper/registry1_3_synchronizer_test.go new file mode 100644 index 00000000..0510d5a4 --- /dev/null +++ b/core/services/keeper/registry1_3_synchronizer_test.go @@ -0,0 +1,721 @@ +package keeper_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + registry1_3 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" +) + +var registryConfig1_3 = registry1_3.Config{ + PaymentPremiumPPB: 100, + FlatFeeMicroLink: uint32(0), + BlockCountPerTurn: big.NewInt(20), + CheckGasLimit: 2_000_000, + StalenessSeconds: big.NewInt(3600), + GasCeilingMultiplier: uint16(2), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(1000000), + FallbackLinkPrice: big.NewInt(1000000), + Transcoder: cltest.NewEIP55Address().Address(), + Registrar: cltest.NewEIP55Address().Address(), +} + +var registryState1_3 = registry1_3.State{ + Nonce: uint32(0), + OwnerLinkBalance: big.NewInt(1000000000000000000), + ExpectedLinkBalance: big.NewInt(1000000000000000000), + NumUpkeeps: big.NewInt(0), +} + +var upkeepConfig1_3 = registry1_3.GetUpkeep{ + Target: testutils.NewAddress(), + ExecuteGas: 2_000_000, + CheckData: common.Hex2Bytes("1234"), + Balance: big.NewInt(1000000000000000000), + LastKeeper: testutils.NewAddress(), + Admin: testutils.NewAddress(), + MaxValidBlocknumber: 1_000_000_000, + AmountSpent: big.NewInt(0), +} + +func mockRegistry1_3( + t *testing.T, + ethMock *evmclimocks.Client, + contractAddress common.Address, + config registry1_3.Config, + activeUpkeepIDs []*big.Int, + keeperList []common.Address, + upkeepConfig registry1_3.GetUpkeep, + timesGetUpkeepMock int, + getStateTime int, + getActiveUpkeepIDsTime int, +) { + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + + state := registryState1_3 + state.NumUpkeeps = big.NewInt(int64(len(activeUpkeepIDs))) + var getState = registry1_3.GetState{ + State: state, + Config: config, + Keepers: keeperList, + } + ethMock.On("HeadByNumber", mock.Anything, (*big.Int)(nil)). + Return(&evmtypes.Head{Number: 10}, nil) + if getStateTime > 0 { + registryMock.MockResponse("getState", getState).Times(getStateTime) + } + if getActiveUpkeepIDsTime > 0 { + registryMock.MockResponse("getActiveUpkeepIDs", activeUpkeepIDs).Times(getActiveUpkeepIDsTime) + } + if timesGetUpkeepMock > 0 { + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(timesGetUpkeepMock) + } +} + +func Test_LogListenerOpts1_3(t *testing.T) { + db := pgtest.NewSqlxDB(t) + scopedConfig := evmtest.NewChainScopedConfig(t, configtest.NewGeneralConfig(t, nil)) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + j := cltest.MustInsertKeeperJob(t, db, korm, cltest.NewEIP55Address(), cltest.NewEIP55Address()) + + contractAddress := j.KeeperSpec.ContractAddress.Address() + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.3.0").Once() + + registryWrapper, err := keeper.NewRegistryWrapper(j.KeeperSpec.ContractAddress, ethClient) + require.NoError(t, err) + + logListenerOpts, err := registryWrapper.GetLogListenerOpts(1, nil) + require.NoError(t, err) + + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryKeepersUpdated{}.Topic(), "Registry should listen to KeeperRegistryKeepersUpdated log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryConfigSet{}.Topic(), "Registry should listen to KeeperRegistryConfigSet log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepCanceled{}.Topic(), "Registry should listen to KeeperRegistryUpkeepCanceled log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepRegistered{}.Topic(), "Registry should listen to KeeperRegistryUpkeepRegistered log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepPerformed{}.Topic(), "Registry should listen to KeeperRegistryUpkeepPerformed log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepGasLimitSet{}.Topic(), "Registry should listen to KeeperRegistryUpkeepGasLimitSet log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepMigrated{}.Topic(), "Registry should listen to KeeperRegistryUpkeepMigrated log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepReceived{}.Topic(), "Registry should listen to KeeperRegistryUpkeepReceived log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepPaused{}.Topic(), "Registry should listen to KeeperRegistryUpkeepPaused log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepUnpaused{}.Topic(), "Registry should listen to KeeperRegistryUpkeepUnpaused log") + require.Contains(t, logListenerOpts.LogsWithTopics, registry1_3.KeeperRegistryUpkeepCheckDataUpdated{}.Topic(), "Registry should listen to KeeperRegistryUpkeepCheckDataUpdated log") +} + +func Test_RegistrySynchronizer1_3_Start(t *testing.T) { + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{}, + []common.Address{fromAddress}, + upkeepConfig1_3, + 0, + 2, + 0) + + err := synchronizer.Start(testutils.Context(t)) + require.NoError(t, err) + defer func() { assert.NoError(t, synchronizer.Close()) }() + + cltest.WaitForCount(t, db, "keeper_registries", 1) + + err = synchronizer.Start(testutils.Context(t)) + require.Error(t, err) +} + +func Test_RegistrySynchronizer1_3_FullSync(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, _, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + upkeepConfig := upkeepConfig1_3 + upkeepConfig.LastKeeper = fromAddress + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig, + 3, // sync all 3 + 2, + 1) + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 3) + + // Last keeper index should be set correctly on upkeep + g.Eventually(func() bool { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Valid + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(true)) + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) + + var registry keeper.Registry + var upkeepRegistration keeper.UpkeepRegistration + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + require.Equal(t, job.KeeperSpec.ContractAddress, registry.ContractAddress) + require.Equal(t, job.KeeperSpec.FromAddress, registry.FromAddress) + require.Equal(t, int32(20), registry.BlockCountPerTurn) + require.Equal(t, int32(0), registry.KeeperIndex) + require.Equal(t, int32(1), registry.NumKeepers) + + require.NoError(t, db.Get(&upkeepRegistration, `SELECT * FROM upkeep_registrations`)) + require.Equal(t, upkeepConfig1_3.CheckData, upkeepRegistration.CheckData) + require.Equal(t, upkeepConfig1_3.ExecuteGas, upkeepRegistration.ExecuteGas) + + assertUpkeepIDs(t, db, []int64{3, 69, 420}) + + // 2nd sync. Cancel upkeep (id 3) and add a new upkeep (id 2022) + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(69), big.NewInt(420), big.NewInt(2022)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 3, // sync all 3 upkeeps + 2, + 1) + synchronizer.ExportedFullSync() + + cltest.AssertCount(t, db, "keeper_registries", 1) + cltest.AssertCount(t, db, "upkeep_registrations", 3) + assertUpkeepIDs(t, db, []int64{69, 420, 2022}) +} + +func Test_RegistrySynchronizer1_3_ConfigSetLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 0, + 2, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + newConfig := registryConfig1_3 + newConfig.BlockCountPerTurn = big.NewInt(40) // change from default + registryMock.MockResponse("getState", registry1_3.GetState{ + State: registryState1_3, + Config: newConfig, + Keepers: []common.Address{fromAddress}, + }).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryConfigSet{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.BlockCountPerTurn == 40 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} + +func Test_RegistrySynchronizer1_3_KeepersUpdatedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 0, + 2, + 0) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + var registry keeper.Registry + require.NoError(t, db.Get(®istry, `SELECT * FROM keeper_registries`)) + + addresses := []common.Address{fromAddress, testutils.NewAddress()} // change from default + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + registryMock.MockResponse("getState", registry1_3.GetState{ + State: registryState1_3, + Config: registryConfig1_3, + Keepers: addresses, + }).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryKeepersUpdated{} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.AssertRecordEventually(t, db, ®istry, fmt.Sprintf(`SELECT * FROM keeper_registries WHERE id = %d`, registry.ID), func() bool { + return registry.NumKeepers == 2 + }) + cltest.AssertCount(t, db, "keeper_registries", 1) +} + +func Test_RegistrySynchronizer1_3_UpkeepCanceledLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 3, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepCanceled{Id: big.NewInt(3)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_3_UpkeepRegisteredLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + registryMock.MockResponse("getUpkeep", upkeepConfig1_3).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepRegistered{Id: big.NewInt(420)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_3_UpkeepPerformedLog(t *testing.T) { + g := gomega.NewWithT(t) + + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + pgtest.MustExec(t, db, `UPDATE upkeep_registrations SET last_run_block_height = 100`) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash, BlockNumber: 200} + log := registry1_3.KeeperRegistryUpkeepPerformed{Id: big.NewInt(3), From: fromAddress} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastRunBlockHeight + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(200))) + + g.Eventually(func() int64 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.LastKeeperIndex.Int64 + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) +} + +func Test_RegistrySynchronizer1_3_UpkeepGasLimitSetLog(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + getExecuteGas := func() uint32 { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations`) + require.NoError(t, err) + return upkeep.ExecuteGas + } + g.Eventually(getExecuteGas, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(uint32(2_000_000))) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + newConfig := upkeepConfig1_3 + newConfig.ExecuteGas = 4_000_000 // change from default + registryMock.MockResponse("getUpkeep", newConfig).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepGasLimitSet{Id: big.NewInt(3), GasLimit: big.NewInt(4_000_000)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(getExecuteGas, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(uint32(4_000_000))) +} + +func Test_RegistrySynchronizer1_3_UpkeepReceivedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + registryMock.MockResponse("getUpkeep", upkeepConfig1_3).Once() + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepReceived{Id: big.NewInt(420)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_3_UpkeepMigratedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 3, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepMigrated{Id: big.NewInt(3)} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + // race condition: "wait for count" + cltest.WaitForCount(t, db, "upkeep_registrations", 2) +} + +func Test_RegistrySynchronizer1_3_UpkeepPausedLog_UpkeepUnpausedLog(t *testing.T) { + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + upkeepId := big.NewInt(3) + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{big.NewInt(3), big.NewInt(69), big.NewInt(420)}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 4, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + log := registry1_3.KeeperRegistryUpkeepPaused{Id: upkeepId} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 2) + + cfg = configtest.NewGeneralConfig(t, nil) + head = cltest.MustInsertHead(t, db, cfg.Database(), 2) + rawLog = types.Log{BlockHash: head.Hash} + unpausedlog := registry1_3.KeeperRegistryUpkeepUnpaused{Id: upkeepId} + logBroadcast = logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&unpausedlog) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + cltest.WaitForCount(t, db, "upkeep_registrations", 3) + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE upkeep_id = $1`, ubig.New(upkeepId)) + require.NoError(t, err) + + require.Equal(t, upkeepId.String(), upkeep.UpkeepID.String()) + require.Equal(t, upkeepConfig1_3.CheckData, upkeep.CheckData) + require.Equal(t, upkeepConfig1_3.ExecuteGas, upkeep.ExecuteGas) + + var registryId int64 + err = db.Get(®istryId, `SELECT id from keeper_registries WHERE job_id = $1`, job.ID) + require.NoError(t, err) + require.Equal(t, registryId, upkeep.RegistryID) +} + +func Test_RegistrySynchronizer1_3_UpkeepCheckDataUpdatedLog(t *testing.T) { + g := gomega.NewWithT(t) + db, synchronizer, ethMock, lb, job := setupRegistrySync(t, keeper.RegistryVersion_1_3) + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + upkeepId := big.NewInt(3) + + mockRegistry1_3( + t, + ethMock, + contractAddress, + registryConfig1_3, + []*big.Int{upkeepId}, // Upkeep IDs + []common.Address{fromAddress}, + upkeepConfig1_3, + 1, + 2, + 1) + + servicetest.Run(t, synchronizer) + cltest.WaitForCount(t, db, "keeper_registries", 1) + cltest.WaitForCount(t, db, "upkeep_registrations", 1) + + cfg := configtest.NewGeneralConfig(t, nil) + head := cltest.MustInsertHead(t, db, cfg.Database(), 1) + rawLog := types.Log{BlockHash: head.Hash} + _ = logmocks.NewBroadcast(t) + newCheckData := []byte("Plugin") + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_3ABI, contractAddress) + newConfig := upkeepConfig1_3 + newConfig.CheckData = newCheckData // changed from default + registryMock.MockResponse("getUpkeep", newConfig).Once() + + updatedLog := registry1_3.KeeperRegistryUpkeepCheckDataUpdated{Id: upkeepId, NewCheckData: newCheckData} + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("DecodedLog").Return(&updatedLog) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Maybe().Return("") + lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + + g.Eventually(func() []byte { + var upkeep keeper.UpkeepRegistration + err := db.Get(&upkeep, `SELECT * FROM upkeep_registrations WHERE upkeep_id = $1`, ubig.New(upkeepId)) + require.NoError(t, err) + return upkeep.CheckData + }, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(newCheckData)) +} diff --git a/core/services/keeper/registry_interface.go b/core/services/keeper/registry_interface.go new file mode 100644 index 00000000..73c77b9a --- /dev/null +++ b/core/services/keeper/registry_interface.go @@ -0,0 +1,388 @@ +package keeper + +import ( + "context" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + registry1_1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry1_2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry1_3 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + type_and_version "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/type_and_version_interface_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +type RegistryVersion int32 + +const ( + RegistryVersion_1_0 RegistryVersion = iota + RegistryVersion_1_1 + RegistryVersion_1_2 + RegistryVersion_1_3 + RegistryVersion_2_0 + RegistryVersion_2_1 +) + +func (rv RegistryVersion) String() string { + switch rv { + case RegistryVersion_1_0, RegistryVersion_1_1, RegistryVersion_1_2, RegistryVersion_1_3: + return fmt.Sprintf("v1.%d", rv) + case RegistryVersion_2_0: + return "v2.0" + default: + return "unknown registry version" + } +} + +const ActiveUpkeepIDBatchSize int64 = 10000 + +// upkeepGetter is declared as a private interface as it is only needed +// internally to the keeper package for now +type upkeepGetter interface { + GetUpkeep(*bind.CallOpts, *big.Int) (*UpkeepConfig, error) +} + +// RegistryWrapper implements a layer on top of different versions of registry wrappers +// to provide a unified layer to rest of the codebase +type RegistryWrapper struct { + Address ethkey.EIP55Address + Version RegistryVersion + contract1_1 *registry1_1.KeeperRegistry + contract1_2 *registry1_2.KeeperRegistry + contract1_3 *registry1_3.KeeperRegistry + evmClient evmclient.Client +} + +func NewRegistryWrapper(address ethkey.EIP55Address, evmClient evmclient.Client) (*RegistryWrapper, error) { + interface_wrapper, err := type_and_version.NewTypeAndVersionInterface( + address.Address(), + evmClient, + ) + if err != nil { + return nil, errors.Wrap(err, "unable to create type and interface wrapper") + } + version, err := getRegistryVersion(interface_wrapper) + if err != nil { + return nil, errors.Wrap(err, "unable to determine version of keeper registry contract") + } + + contract1_1, err := registry1_1.NewKeeperRegistry( + address.Address(), + evmClient, + ) + if err != nil { + return nil, errors.Wrap(err, "unable to create keeper registry 1_1 contract wrapper") + } + contract1_2, err := registry1_2.NewKeeperRegistry( + address.Address(), + evmClient, + ) + if err != nil { + return nil, errors.Wrap(err, "unable to create keeper registry 1_2 contract wrapper") + } + contract1_3, err := registry1_3.NewKeeperRegistry( + address.Address(), + evmClient, + ) + if err != nil { + return nil, errors.Wrap(err, "unable to create keeper registry 1_3 contract wrapper") + } + + return &RegistryWrapper{ + Address: address, + Version: *version, + contract1_1: contract1_1, + contract1_2: contract1_2, + contract1_3: contract1_3, + evmClient: evmClient, + }, nil +} + +func getRegistryVersion(contract *type_and_version.TypeAndVersionInterface) (*RegistryVersion, error) { + typeAndVersion, err := contract.TypeAndVersion(nil) + if err != nil { + jsonErr := evmclient.ExtractRPCErrorOrNil(err) + if jsonErr != nil { + // Version 1.0 does not support typeAndVersion interface, hence gives a json error on this call + version := RegistryVersion_1_0 + return &version, nil + } + return nil, errors.Wrap(err, "unable to fetch version of registry") + } + switch { + case strings.HasPrefix(typeAndVersion, "KeeperRegistry 1.1"): + version := RegistryVersion_1_1 + return &version, nil + case strings.HasPrefix(typeAndVersion, "KeeperRegistry 1.2"): + version := RegistryVersion_1_2 + return &version, nil + case strings.HasPrefix(typeAndVersion, "KeeperRegistry 1.3"): + version := RegistryVersion_1_3 + return &version, nil + default: + return nil, errors.Errorf("Registry type and version %s not supported", typeAndVersion) + } +} + +func newUnsupportedVersionError(functionName string, version RegistryVersion) error { + return errors.Errorf("Registry version %d does not support %s", version, functionName) +} + +// getUpkeepCount retrieves the number of upkeeps +func (rw *RegistryWrapper) getUpkeepCount(opts *bind.CallOpts) (*big.Int, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + upkeepCount, err := rw.contract1_1.GetUpkeepCount(opts) + if err != nil { + return nil, errors.Wrap(err, "failed to get upkeep count") + } + return upkeepCount, nil + case RegistryVersion_1_2: + state, err := rw.contract1_2.GetState(opts) + if err != nil { + return nil, errors.Wrapf(err, "failed to get contract state at block number %d", opts.BlockNumber.Int64()) + } + return state.State.NumUpkeeps, nil + case RegistryVersion_1_3: + state, err := rw.contract1_3.GetState(opts) + if err != nil { + return nil, errors.Wrapf(err, "failed to get contract state at block number %d", opts.BlockNumber.Int64()) + } + return state.State.NumUpkeeps, nil + default: + return nil, newUnsupportedVersionError("getUpkeepCount", rw.Version) + } +} + +func (rw *RegistryWrapper) GetActiveUpkeepIDs(opts *bind.CallOpts) ([]*big.Int, error) { + if opts == nil || opts.BlockNumber.Int64() == 0 { + var head *evmtypes.Head + // fetch the current block number so batched GetActiveUpkeepIDs calls can be performed on the same block + head, err := rw.evmClient.HeadByNumber(context.Background(), nil) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch EVM block header") + } + if opts != nil { + opts.BlockNumber = big.NewInt(head.Number) + } else { + opts = &bind.CallOpts{ + BlockNumber: big.NewInt(head.Number), + } + } + } + + upkeepCount, err := rw.getUpkeepCount(opts) + if err != nil { + return nil, errors.Wrap(err, "failed to get upkeep count") + } + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + cancelledUpkeeps, err2 := rw.contract1_1.GetCanceledUpkeepList(opts) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to get cancelled upkeeps") + } + cancelledSet := make(map[int64]bool) + for _, upkeepID := range cancelledUpkeeps { + cancelledSet[upkeepID.Int64()] = true + } + // Active upkeep IDs are 0,1 ... upkeepCount-1, removing the cancelled ones + activeUpkeeps := make([]*big.Int, 0) + for i := int64(0); i < upkeepCount.Int64(); i++ { + if _, found := cancelledSet[i]; !found { + activeUpkeeps = append(activeUpkeeps, big.NewInt(i)) + } + } + return activeUpkeeps, nil + case RegistryVersion_1_2, RegistryVersion_1_3: + activeUpkeepIDs := make([]*big.Int, 0) + var activeUpkeepIDBatch []*big.Int + for int64(len(activeUpkeepIDs)) < upkeepCount.Int64() { + startIndex := int64(len(activeUpkeepIDs)) + maxCount := upkeepCount.Int64() - int64(len(activeUpkeepIDs)) + if maxCount > ActiveUpkeepIDBatchSize { + maxCount = ActiveUpkeepIDBatchSize + } + if rw.Version == RegistryVersion_1_2 { + activeUpkeepIDBatch, err = rw.contract1_2.GetActiveUpkeepIDs(opts, big.NewInt(startIndex), big.NewInt(maxCount)) + } else { + activeUpkeepIDBatch, err = rw.contract1_3.GetActiveUpkeepIDs(opts, big.NewInt(startIndex), big.NewInt(maxCount)) + } + if err != nil { + return nil, errors.Wrapf(err, "failed to get active upkeep IDs from index %d to %d (both inclusive)", startIndex, startIndex+maxCount-1) + } + activeUpkeepIDs = append(activeUpkeepIDs, activeUpkeepIDBatch...) + } + + return activeUpkeepIDs, nil + default: + return nil, newUnsupportedVersionError("GetActiveUpkeepIDs", rw.Version) + } +} + +type UpkeepConfig struct { + ExecuteGas uint32 + CheckData []byte + LastKeeper common.Address +} + +func (rw *RegistryWrapper) GetUpkeep(opts *bind.CallOpts, id *big.Int) (*UpkeepConfig, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + upkeep, err := rw.contract1_1.GetUpkeep(opts, id) + if err != nil { + return nil, errors.Wrap(err, "failed to get upkeep config") + } + return &UpkeepConfig{ + ExecuteGas: upkeep.ExecuteGas, + CheckData: upkeep.CheckData, + LastKeeper: upkeep.LastKeeper, + }, nil + case RegistryVersion_1_2: + upkeep, err := rw.contract1_2.GetUpkeep(opts, id) + if err != nil { + return nil, errors.Wrap(err, "failed to get upkeep config") + } + return &UpkeepConfig{ + ExecuteGas: upkeep.ExecuteGas, + CheckData: upkeep.CheckData, + LastKeeper: upkeep.LastKeeper, + }, nil + case RegistryVersion_1_3: + upkeep, err := rw.contract1_3.GetUpkeep(opts, id) + if err != nil { + return nil, errors.Wrap(err, "failed to get upkeep config") + } + return &UpkeepConfig{ + ExecuteGas: upkeep.ExecuteGas, + CheckData: upkeep.CheckData, + LastKeeper: upkeep.LastKeeper, + }, nil + default: + return nil, newUnsupportedVersionError("GetUpkeep", rw.Version) + } +} + +type RegistryConfig struct { + BlockCountPerTurn int32 + CheckGas uint32 + KeeperAddresses []common.Address +} + +func (rw *RegistryWrapper) GetConfig(opts *bind.CallOpts) (*RegistryConfig, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + config, err := rw.contract1_1.GetConfig(opts) + if err != nil { + // TODO: error wrapping with %w should be done here to preserve the error type as it bubbles up + // pkg/errors doesn't support the native errors.Is/As capabilities + // using pkg/errors produces a stack trace in the logs and this behavior is too valuable to let go + return nil, errors.Errorf("%s [%s]: getConfig %s", ErrContractCallFailure, err, rw.Version) + } + keeperAddresses, err := rw.contract1_1.GetKeeperList(opts) + if err != nil { + return nil, errors.Errorf("%s [%s]: getKeeperList %s", ErrContractCallFailure, err, rw.Version) + } + return &RegistryConfig{ + BlockCountPerTurn: int32(config.BlockCountPerTurn.Int64()), + CheckGas: config.CheckGasLimit, + KeeperAddresses: keeperAddresses, + }, nil + case RegistryVersion_1_2: + state, err := rw.contract1_2.GetState(opts) + if err != nil { + return nil, errors.Errorf("%s [%s]: getState %s", ErrContractCallFailure, err, rw.Version) + } + + return &RegistryConfig{ + BlockCountPerTurn: int32(state.Config.BlockCountPerTurn.Int64()), + CheckGas: state.Config.CheckGasLimit, + KeeperAddresses: state.Keepers, + }, nil + case RegistryVersion_1_3: + state, err := rw.contract1_3.GetState(opts) + if err != nil { + return nil, errors.Errorf("%s [%s]: getState %s", ErrContractCallFailure, err, rw.Version) + } + + return &RegistryConfig{ + BlockCountPerTurn: int32(state.Config.BlockCountPerTurn.Int64()), + CheckGas: state.Config.CheckGasLimit, + KeeperAddresses: state.Keepers, + }, nil + default: + return nil, newUnsupportedVersionError("GetConfig", rw.Version) + } +} + +func (rw *RegistryWrapper) SetKeepers(opts *bind.TransactOpts, keepers []common.Address, payees []common.Address) (*types.Transaction, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return rw.contract1_1.SetKeepers(opts, keepers, payees) + case RegistryVersion_1_2: + return rw.contract1_2.SetKeepers(opts, keepers, payees) + case RegistryVersion_1_3: + return rw.contract1_3.SetKeepers(opts, keepers, payees) + default: + return nil, newUnsupportedVersionError("SetKeepers", rw.Version) + } +} + +func (rw *RegistryWrapper) RegisterUpkeep(opts *bind.TransactOpts, target common.Address, gasLimit uint32, admin common.Address, checkData []byte) (*types.Transaction, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return rw.contract1_1.RegisterUpkeep(opts, target, gasLimit, admin, checkData) + case RegistryVersion_1_2: + return rw.contract1_2.RegisterUpkeep(opts, target, gasLimit, admin, checkData) + case RegistryVersion_1_3: + return rw.contract1_3.RegisterUpkeep(opts, target, gasLimit, admin, checkData) + default: + return nil, newUnsupportedVersionError("RegisterUpkeep", rw.Version) + } +} + +func (rw *RegistryWrapper) AddFunds(opts *bind.TransactOpts, id *big.Int, amount *big.Int) (*types.Transaction, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return rw.contract1_1.AddFunds(opts, id, amount) + case RegistryVersion_1_2: + return rw.contract1_2.AddFunds(opts, id, amount) + case RegistryVersion_1_3: + return rw.contract1_3.AddFunds(opts, id, amount) + default: + return nil, newUnsupportedVersionError("AddFunds", rw.Version) + } +} + +func (rw *RegistryWrapper) PerformUpkeep(opts *bind.TransactOpts, id *big.Int, performData []byte) (*types.Transaction, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return rw.contract1_1.PerformUpkeep(opts, id, performData) + case RegistryVersion_1_2: + return rw.contract1_2.PerformUpkeep(opts, id, performData) + case RegistryVersion_1_3: + return rw.contract1_3.PerformUpkeep(opts, id, performData) + default: + return nil, newUnsupportedVersionError("PerformUpkeep", rw.Version) + } +} + +func (rw *RegistryWrapper) CancelUpkeep(opts *bind.TransactOpts, id *big.Int) (*types.Transaction, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return rw.contract1_1.CancelUpkeep(opts, id) + case RegistryVersion_1_2: + return rw.contract1_2.CancelUpkeep(opts, id) + case RegistryVersion_1_3: + return rw.contract1_3.CancelUpkeep(opts, id) + default: + return nil, newUnsupportedVersionError("CancelUpkeep", rw.Version) + } +} diff --git a/core/services/keeper/registry_interface_logs.go b/core/services/keeper/registry_interface_logs.go new file mode 100644 index 00000000..ed2489f7 --- /dev/null +++ b/core/services/keeper/registry_interface_logs.go @@ -0,0 +1,267 @@ +package keeper + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + registry1_1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry1_2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry1_3 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" +) + +func (rw *RegistryWrapper) GetLogListenerOpts(minIncomingConfirmations uint32, upkeepPerformedFilter [][]log.Topic) (*log.ListenerOpts, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + return &log.ListenerOpts{ + Contract: rw.contract1_1.Address(), + ParseLog: rw.contract1_1.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + registry1_1.KeeperRegistryKeepersUpdated{}.Topic(): nil, + registry1_1.KeeperRegistryConfigSet{}.Topic(): nil, + registry1_1.KeeperRegistryUpkeepCanceled{}.Topic(): nil, + registry1_1.KeeperRegistryUpkeepRegistered{}.Topic(): nil, + registry1_1.KeeperRegistryUpkeepPerformed{}.Topic(): upkeepPerformedFilter, + }, + MinIncomingConfirmations: minIncomingConfirmations, + }, nil + case RegistryVersion_1_2: + return &log.ListenerOpts{ + Contract: rw.contract1_2.Address(), + ParseLog: rw.contract1_2.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + registry1_2.KeeperRegistryKeepersUpdated{}.Topic(): nil, + registry1_2.KeeperRegistryConfigSet{}.Topic(): nil, + registry1_2.KeeperRegistryUpkeepCanceled{}.Topic(): nil, + registry1_2.KeeperRegistryUpkeepRegistered{}.Topic(): nil, + registry1_2.KeeperRegistryUpkeepPerformed{}.Topic(): upkeepPerformedFilter, + registry1_2.KeeperRegistryUpkeepGasLimitSet{}.Topic(): nil, + registry1_2.KeeperRegistryUpkeepMigrated{}.Topic(): nil, + registry1_2.KeeperRegistryUpkeepReceived{}.Topic(): nil, + }, + MinIncomingConfirmations: minIncomingConfirmations, + }, nil + case RegistryVersion_1_3: + return &log.ListenerOpts{ + Contract: rw.contract1_3.Address(), + ParseLog: rw.contract1_3.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + registry1_3.KeeperRegistryKeepersUpdated{}.Topic(): nil, + registry1_3.KeeperRegistryConfigSet{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepCanceled{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepRegistered{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepPerformed{}.Topic(): upkeepPerformedFilter, + registry1_3.KeeperRegistryUpkeepGasLimitSet{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepMigrated{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepReceived{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepPaused{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepUnpaused{}.Topic(): nil, + registry1_3.KeeperRegistryUpkeepCheckDataUpdated{}.Topic(): nil, + }, + MinIncomingConfirmations: minIncomingConfirmations, + }, nil + default: + return nil, newUnsupportedVersionError("GetLogListenerOpts", rw.Version) + } +} + +func (rw *RegistryWrapper) GetCancelledUpkeepIDFromLog(broadcast log.Broadcast) (*big.Int, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_1.KeeperRegistryUpkeepCanceled) + if !ok { + return nil, errors.Errorf("expected UpkeepCanceled log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepCanceled) + if !ok { + return nil, errors.Errorf("expected UpkeepCanceled log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepCanceled) + if !ok { + return nil, errors.Errorf("expected UpkeepCanceled log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetCancelledUpkeepIDFromLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetUpkeepIdFromRegistrationLog(broadcast log.Broadcast) (*big.Int, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_1.KeeperRegistryUpkeepRegistered) + if !ok { + return nil, errors.Errorf("expected UpkeepRegistered log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepRegistered) + if !ok { + return nil, errors.Errorf("expected UpkeepRegistered log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepRegistered) + if !ok { + return nil, errors.Errorf("expected UpkeepRegistered log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromRegistrationLog", rw.Version) + } +} + +type UpkeepPerformedLog struct { + UpkeepID *big.Int + FromKeeper common.Address +} + +func (rw *RegistryWrapper) ParseUpkeepPerformedLog(broadcast log.Broadcast) (*UpkeepPerformedLog, error) { + switch rw.Version { + case RegistryVersion_1_0, RegistryVersion_1_1: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_1.KeeperRegistryUpkeepPerformed) + if !ok { + return nil, errors.Errorf("expected UpkeepPerformed log but got %T", broadcastedLog) + } + return &UpkeepPerformedLog{ + UpkeepID: broadcastedLog.Id, + FromKeeper: broadcastedLog.From, + }, nil + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepPerformed) + if !ok { + return nil, errors.Errorf("expected UpkeepPerformed log but got %T", broadcastedLog) + } + return &UpkeepPerformedLog{ + UpkeepID: broadcastedLog.Id, + FromKeeper: broadcastedLog.From, + }, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepPerformed) + if !ok { + return nil, errors.Errorf("expected UpkeepPerformed log but got %T", broadcastedLog) + } + return &UpkeepPerformedLog{ + UpkeepID: broadcastedLog.Id, + FromKeeper: broadcastedLog.From, + }, nil + default: + return nil, newUnsupportedVersionError("ParseUpkeepPerformedLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetIDFromGasLimitSetLog(broadcast log.Broadcast) (*big.Int, error) { + // Only supported on 1.2 and 1.3 + switch rw.Version { + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepGasLimitSet) + if !ok { + return nil, errors.Errorf("expected UpkeepGasLimitSetlog but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepGasLimitSet) + if !ok { + return nil, errors.Errorf("expected UpkeepGasLimitSetlog but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetIDFromGasLimitSetLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetUpkeepIdFromReceivedLog(broadcast log.Broadcast) (*big.Int, error) { + // Only supported on 1.2 and 1.3 + switch rw.Version { + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepReceived) + if !ok { + return nil, errors.Errorf("expected UpkeepReceived log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepReceived) + if !ok { + return nil, errors.Errorf("expected UpkeepReceived log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromReceivedLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetUpkeepIdFromMigratedLog(broadcast log.Broadcast) (*big.Int, error) { + // Only supported on 1.2 and 1.3 + switch rw.Version { + case RegistryVersion_1_2: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_2.KeeperRegistryUpkeepMigrated) + if !ok { + return nil, errors.Errorf("expected UpkeepMigrated log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepMigrated) + if !ok { + return nil, errors.Errorf("expected UpkeepMigrated log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromMigratedLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetUpkeepIdFromUpkeepPausedLog(broadcast log.Broadcast) (*big.Int, error) { + // Only supported on 1.3 + switch rw.Version { + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepPaused) + if !ok { + return nil, errors.Errorf("expected UpkeepPaused log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromUpkeepPausedLog", rw.Version) + } +} + +func (rw *RegistryWrapper) GetUpkeepIdFromUpkeepUnpausedLog(broadcast log.Broadcast) (*big.Int, error) { + // Only supported on 1.3 + switch rw.Version { + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepUnpaused) + if !ok { + return nil, errors.Errorf("expected UpkeepUnpaused log but got %T", broadcastedLog) + } + return broadcastedLog.Id, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromUpkeepUnpausedLog", rw.Version) + } +} + +type UpkeepCheckDataUpdatedLog struct { + UpkeepID *big.Int + NewCheckData []byte +} + +func (rw *RegistryWrapper) ParseUpkeepCheckDataUpdatedLog(broadcast log.Broadcast) (*UpkeepCheckDataUpdatedLog, error) { + // Only supported on 1.3 + switch rw.Version { + case RegistryVersion_1_3: + broadcastedLog, ok := broadcast.DecodedLog().(*registry1_3.KeeperRegistryUpkeepCheckDataUpdated) + if !ok { + return nil, errors.Errorf("expected UpkeepCheckDataUpdated log but got %T", broadcastedLog) + } + return &UpkeepCheckDataUpdatedLog{ + UpkeepID: broadcastedLog.Id, + NewCheckData: broadcastedLog.NewCheckData, + }, nil + default: + return nil, newUnsupportedVersionError("GetUpkeepIdFromUpkeepPausedLog", rw.Version) + } +} diff --git a/core/services/keeper/registry_synchronizer_core.go b/core/services/keeper/registry_synchronizer_core.go new file mode 100644 index 00000000..b1411151 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_core.go @@ -0,0 +1,130 @@ +package keeper + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// RegistrySynchronizer conforms to the Service and Listener interfaces +var ( + _ job.ServiceCtx = (*RegistrySynchronizer)(nil) + _ log.Listener = (*RegistrySynchronizer)(nil) +) + +type RegistrySynchronizerOptions struct { + Job job.Job + RegistryWrapper RegistryWrapper + ORM ORM + JRM job.ORM + LogBroadcaster log.Broadcaster + MailMon *mailbox.Monitor + SyncInterval time.Duration + MinIncomingConfirmations uint32 + Logger logger.Logger + SyncUpkeepQueueSize uint32 + EffectiveKeeperAddress common.Address +} + +type RegistrySynchronizer struct { + services.StateMachine + chStop chan struct{} + registryWrapper RegistryWrapper + interval time.Duration + job job.Job + jrm job.ORM + logBroadcaster log.Broadcaster + mbLogs *mailbox.Mailbox[log.Broadcast] + minIncomingConfirmations uint32 + effectiveKeeperAddress common.Address + orm ORM + logger logger.SugaredLogger + wgDone sync.WaitGroup + syncUpkeepQueueSize uint32 //Represents the max number of upkeeps that can be synced in parallel + mailMon *mailbox.Monitor +} + +// NewRegistrySynchronizer is the constructor of RegistrySynchronizer +func NewRegistrySynchronizer(opts RegistrySynchronizerOptions) *RegistrySynchronizer { + return &RegistrySynchronizer{ + chStop: make(chan struct{}), + registryWrapper: opts.RegistryWrapper, + interval: opts.SyncInterval, + job: opts.Job, + jrm: opts.JRM, + logBroadcaster: opts.LogBroadcaster, + mbLogs: mailbox.New[log.Broadcast](5_000), // Arbitrary limit, better to have excess capacity + minIncomingConfirmations: opts.MinIncomingConfirmations, + orm: opts.ORM, + effectiveKeeperAddress: opts.EffectiveKeeperAddress, + logger: logger.Sugared(opts.Logger.Named("RegistrySynchronizer")), + syncUpkeepQueueSize: opts.SyncUpkeepQueueSize, + mailMon: opts.MailMon, + } +} + +// Start starts RegistrySynchronizer. +func (rs *RegistrySynchronizer) Start(context.Context) error { + return rs.StartOnce("RegistrySynchronizer", func() error { + rs.wgDone.Add(2) + go rs.run() + + var upkeepPerformedFilter [][]log.Topic + + logListenerOpts, err := rs.registryWrapper.GetLogListenerOpts(rs.minIncomingConfirmations, upkeepPerformedFilter) + if err != nil { + return errors.Wrap(err, "Unable to fetch log listener opts from wrapper") + } + lbUnsubscribe := rs.logBroadcaster.Register(rs, *logListenerOpts) + + go func() { + defer rs.wgDone.Done() + defer lbUnsubscribe() + <-rs.chStop + }() + + rs.mailMon.Monitor(rs.mbLogs, "RegistrySynchronizer", "Logs", fmt.Sprint(rs.job.ID)) + + return nil + }) +} + +func (rs *RegistrySynchronizer) Close() error { + return rs.StopOnce("RegistrySynchronizer", func() error { + close(rs.chStop) + rs.wgDone.Wait() + return rs.mbLogs.Close() + }) +} + +func (rs *RegistrySynchronizer) run() { + syncTicker := utils.NewResettableTimer() + defer rs.wgDone.Done() + defer syncTicker.Stop() + + rs.fullSync() + + for { + select { + case <-rs.chStop: + return + case <-syncTicker.Ticks(): + rs.fullSync() + syncTicker.Reset(rs.interval) + case <-rs.mbLogs.Notify(): + rs.processLogs() + } + } +} diff --git a/core/services/keeper/registry_synchronizer_helper_test.go b/core/services/keeper/registry_synchronizer_helper_test.go new file mode 100644 index 00000000..eea22720 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_helper_test.go @@ -0,0 +1,102 @@ +package keeper_test + +import ( + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +const syncInterval = 1000 * time.Hour // prevents sync timer from triggering during test +const syncUpkeepQueueSize = 10 + +func setupRegistrySync(t *testing.T, version keeper.RegistryVersion) ( + *sqlx.DB, + *keeper.RegistrySynchronizer, + *evmclimocks.Client, + *logmocks.Broadcaster, + job.Job, +) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + scopedConfig := evmtest.NewChainScopedConfig(t, cfg) + korm := keeper.NewORM(db, logger.TestLogger(t), scopedConfig.Database()) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + lbMock := logmocks.NewBroadcaster(t) + lbMock.On("AddDependents", 1).Maybe() + j := cltest.MustInsertKeeperJob(t, db, korm, cltest.NewEIP55Address(), cltest.NewEIP55Address()) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, Client: ethClient, LogBroadcaster: lbMock, GeneralConfig: cfg, KeyStore: keyStore.Eth()}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + ch := evmtest.MustGetDefaultChain(t, legacyChains) + jpv2 := cltest.NewJobPipelineV2(t, cfg.WebServer(), cfg.JobPipeline(), cfg.Database(), legacyChains, db, keyStore, nil, nil) + contractAddress := j.KeeperSpec.ContractAddress.Address() + + switch version { + case keeper.RegistryVersion_1_0, keeper.RegistryVersion_1_1: + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_1ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.1.1").Once() + case keeper.RegistryVersion_1_2: + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_2ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.2.0").Once() + case keeper.RegistryVersion_1_3: + registryMock := cltest.NewContractMockReceiver(t, ethClient, keeper.Registry1_3ABI, contractAddress) + registryMock.MockResponse("typeAndVersion", "KeeperRegistry 1.3.0").Once() + case keeper.RegistryVersion_2_0, keeper.RegistryVersion_2_1: + t.Fatalf("Unsupported version: %s", version) + } + + registryWrapper, err := keeper.NewRegistryWrapper(j.KeeperSpec.ContractAddress, ethClient) + require.NoError(t, err) + + lbMock.On("Register", mock.Anything, mock.MatchedBy(func(opts log.ListenerOpts) bool { + return opts.Contract == contractAddress + })).Maybe().Return(func() {}) + lbMock.On("IsConnected").Return(true).Maybe() + + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + orm := keeper.NewORM(db, logger.TestLogger(t), ch.Config().Database()) + synchronizer := keeper.NewRegistrySynchronizer(keeper.RegistrySynchronizerOptions{ + Job: j, + RegistryWrapper: *registryWrapper, + ORM: orm, + JRM: jpv2.Jrm, + LogBroadcaster: lbMock, + MailMon: mailMon, + SyncInterval: syncInterval, + MinIncomingConfirmations: 1, + Logger: logger.TestLogger(t), + SyncUpkeepQueueSize: syncUpkeepQueueSize, + EffectiveKeeperAddress: j.KeeperSpec.FromAddress.Address(), + }) + return db, synchronizer, ethClient, lbMock, j +} + +func assertUpkeepIDs(t *testing.T, db *sqlx.DB, expected []int64) { + g := gomega.NewWithT(t) + var upkeepIDs []int64 + err := db.Select(&upkeepIDs, `SELECT upkeep_id FROM upkeep_registrations`) + require.NoError(t, err) + require.Equal(t, len(expected), len(upkeepIDs)) + g.Expect(upkeepIDs).To(gomega.ContainElements(expected)) +} diff --git a/core/services/keeper/registry_synchronizer_log_listener.go b/core/services/keeper/registry_synchronizer_log_listener.go new file mode 100644 index 00000000..d6a5dc22 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_log_listener.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "reflect" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" +) + +func (rs *RegistrySynchronizer) JobID() int32 { + return rs.job.ID +} + +func (rs *RegistrySynchronizer) HandleLog(broadcast log.Broadcast) { + eventLog := broadcast.DecodedLog() + if eventLog == nil || reflect.ValueOf(eventLog).IsNil() { + rs.logger.Panicf("HandleLog: ignoring nil value, type: %T", broadcast) + return + } + + svcLogger := rs.logger.With( + "logType", reflect.TypeOf(eventLog), + "txHash", broadcast.RawLog().TxHash.Hex(), + ) + + svcLogger.Debug("received log, waiting for confirmations") + + wasOverCapacity := rs.mbLogs.Deliver(broadcast) + if wasOverCapacity { + svcLogger.Errorf("mailbox is over capacity - dropped the oldest unprocessed item") + } +} diff --git a/core/services/keeper/registry_synchronizer_process_logs.go b/core/services/keeper/registry_synchronizer_process_logs.go new file mode 100644 index 00000000..cbac7320 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_process_logs.go @@ -0,0 +1,274 @@ +package keeper + +import ( + "fmt" + "reflect" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + registry1_1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + registry1_2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + registry1_3 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func (rs *RegistrySynchronizer) processLogs() { + for _, broadcast := range rs.mbLogs.RetrieveAll() { + eventLog := broadcast.DecodedLog() + if eventLog == nil || reflect.ValueOf(eventLog).IsNil() { + rs.logger.Panicf("processLogs: ignoring nil value, type: %T", eventLog) + continue + } + + was, err := rs.logBroadcaster.WasAlreadyConsumed(broadcast) + if err != nil { + rs.logger.Warn(errors.Wrap(err, "unable to check if log was consumed")) + continue + } else if was { + continue + } + + switch eventLog.(type) { + case *registry1_1.KeeperRegistryKeepersUpdated, + *registry1_1.KeeperRegistryConfigSet, + *registry1_2.KeeperRegistryKeepersUpdated, + *registry1_2.KeeperRegistryConfigSet, + *registry1_3.KeeperRegistryKeepersUpdated, + *registry1_3.KeeperRegistryConfigSet: + err = rs.handleSyncRegistryLog(broadcast) + + case *registry1_1.KeeperRegistryUpkeepCanceled, + *registry1_2.KeeperRegistryUpkeepCanceled, + *registry1_3.KeeperRegistryUpkeepCanceled: + err = rs.handleUpkeepCancelled(broadcast) + + case *registry1_1.KeeperRegistryUpkeepRegistered, + *registry1_2.KeeperRegistryUpkeepRegistered, + *registry1_3.KeeperRegistryUpkeepRegistered: + err = rs.handleUpkeepRegistered(broadcast) + + case *registry1_1.KeeperRegistryUpkeepPerformed, + *registry1_2.KeeperRegistryUpkeepPerformed, + *registry1_3.KeeperRegistryUpkeepPerformed: + err = rs.handleUpkeepPerformed(broadcast) + + case *registry1_2.KeeperRegistryUpkeepGasLimitSet, + *registry1_3.KeeperRegistryUpkeepGasLimitSet: + err = rs.handleUpkeepGasLimitSet(broadcast) + + case *registry1_2.KeeperRegistryUpkeepReceived, + *registry1_3.KeeperRegistryUpkeepReceived: + err = rs.handleUpkeepReceived(broadcast) + + case *registry1_2.KeeperRegistryUpkeepMigrated, + *registry1_3.KeeperRegistryUpkeepMigrated: + err = rs.handleUpkeepMigrated(broadcast) + + case *registry1_3.KeeperRegistryUpkeepPaused: + err = rs.handleUpkeepPaused(broadcast) + + case *registry1_3.KeeperRegistryUpkeepUnpaused: + err = rs.handleUpkeepUnpaused(broadcast) + + case *registry1_3.KeeperRegistryUpkeepCheckDataUpdated: + err = rs.handleUpkeepCheckDataUpdated(broadcast) + + default: + rs.logger.Warn("unexpected log type") + // Don't `continue` -- we still want to mark this log as consumed + } + + if err != nil { + rs.logger.Error(err) + } + + err = rs.logBroadcaster.MarkConsumed(broadcast) + if err != nil { + rs.logger.Error(errors.Wrapf(err, "unable to mark %T log as consumed, log: %v", broadcast.RawLog(), broadcast.String())) + } + } +} + +func (rs *RegistrySynchronizer) handleSyncRegistryLog(broadcast log.Broadcast) error { + rs.logger.Debugw("processing SyncRegistry log", "txHash", broadcast.RawLog().TxHash.Hex()) + + _, err := rs.syncRegistry() + if err != nil { + return errors.Wrap(err, "unable to sync registry") + } + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepCancelled(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepCanceled log", "txHash", broadcast.RawLog().TxHash.Hex()) + + cancelledID, err := rs.registryWrapper.GetCancelledUpkeepIDFromLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch cancelled upkeep ID from log") + } + + affected, err := rs.orm.BatchDeleteUpkeepsForJob(rs.job.ID, []big.Big{*big.New(cancelledID)}) + if err != nil { + return errors.Wrap(err, "unable to batch delete upkeeps") + } + rs.logger.Debugw(fmt.Sprintf("deleted %v upkeep registrations", affected), "txHash", broadcast.RawLog().TxHash.Hex()) + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepRegistered(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepRegistered log", "txHash", broadcast.RawLog().TxHash.Hex()) + + registry, err := rs.orm.RegistryForJob(rs.job.ID) + if err != nil { + return errors.Wrap(err, "unable to find registry for job") + } + + upkeepID, err := rs.registryWrapper.GetUpkeepIdFromRegistrationLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from registration log") + } + + err = rs.syncUpkeep(&rs.registryWrapper, registry, big.New(upkeepID)) + if err != nil { + return errors.Wrapf(err, "failed to sync upkeep, log: %v", broadcast.String()) + } + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepPerformed(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepPerformed log", "jobID", rs.job.ID, "txHash", broadcast.RawLog().TxHash.Hex()) + + log, err := rs.registryWrapper.ParseUpkeepPerformedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from performed log") + } + rowsAffected, err := rs.orm.SetLastRunInfoForUpkeepOnJob(rs.job.ID, big.New(log.UpkeepID), int64(broadcast.RawLog().BlockNumber), ethkey.EIP55AddressFromAddress(log.FromKeeper)) + if err != nil { + return errors.Wrap(err, "failed to set last run to 0") + } + rs.logger.Debugw("updated db for UpkeepPerformed log", + "jobID", rs.job.ID, + "upkeepID", log.UpkeepID.String(), + "blockNumber", int64(broadcast.RawLog().BlockNumber), + "fromAddr", ethkey.EIP55AddressFromAddress(log.FromKeeper), + "rowsAffected", rowsAffected, + ) + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepGasLimitSet(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepGasLimitSet log", "jobID", rs.job.ID, "txHash", broadcast.RawLog().TxHash.Hex()) + + registry, err := rs.orm.RegistryForJob(rs.job.ID) + if err != nil { + return errors.Wrap(err, "unable to find registry for job") + } + + upkeepID, err := rs.registryWrapper.GetIDFromGasLimitSetLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from gas limit set log") + } + + err = rs.syncUpkeep(&rs.registryWrapper, registry, big.New(upkeepID)) + if err != nil { + return errors.Wrapf(err, "failed to sync upkeep, log: %v", broadcast.String()) + } + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepReceived(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepReceived log", "txHash", broadcast.RawLog().TxHash.Hex()) + + registry, err := rs.orm.RegistryForJob(rs.job.ID) + if err != nil { + return errors.Wrap(err, "unable to find registry for job") + } + + upkeepID, err := rs.registryWrapper.GetUpkeepIdFromReceivedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from received log") + } + + err = rs.syncUpkeep(&rs.registryWrapper, registry, big.New(upkeepID)) + if err != nil { + return errors.Wrapf(err, "failed to sync upkeep, log: %v", broadcast.String()) + } + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepMigrated(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepMigrated log", "txHash", broadcast.RawLog().TxHash.Hex()) + + migratedID, err := rs.registryWrapper.GetUpkeepIdFromMigratedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch migrated upkeep ID from log") + } + + affected, err := rs.orm.BatchDeleteUpkeepsForJob(rs.job.ID, []big.Big{*big.New(migratedID)}) + if err != nil { + return errors.Wrap(err, "unable to batch delete upkeeps") + } + rs.logger.Debugw(fmt.Sprintf("deleted %v upkeep registrations", affected), "txHash", broadcast.RawLog().TxHash.Hex()) + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepPaused(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepPaused log", "txHash", broadcast.RawLog().TxHash.Hex()) + + pausedUpkeepId, err := rs.registryWrapper.GetUpkeepIdFromUpkeepPausedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from upkeep paused log") + } + + _, err = rs.orm.BatchDeleteUpkeepsForJob(rs.job.ID, []big.Big{*big.New(pausedUpkeepId)}) + if err != nil { + return errors.Wrap(err, "unable to batch delete upkeeps") + } + rs.logger.Debugw(fmt.Sprintf("paused upkeep %s", pausedUpkeepId.String()), "txHash", broadcast.RawLog().TxHash.Hex()) + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepUnpaused(broadcast log.Broadcast) error { + rs.logger.Debugw("processing UpkeepUnpaused log", "txHash", broadcast.RawLog().TxHash.Hex()) + + registry, err := rs.orm.RegistryForJob(rs.job.ID) + if err != nil { + return errors.Wrap(err, "unable to find registry for job") + } + + unpausedUpkeepId, err := rs.registryWrapper.GetUpkeepIdFromUpkeepUnpausedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to fetch upkeep ID from upkeep unpaused log") + } + + err = rs.syncUpkeep(&rs.registryWrapper, registry, big.New(unpausedUpkeepId)) + if err != nil { + return errors.Wrapf(err, "failed to sync upkeep, log: %s", broadcast.String()) + } + rs.logger.Debugw(fmt.Sprintf("unpaused upkeep %s", unpausedUpkeepId.String()), "txHash", broadcast.RawLog().TxHash.Hex()) + return nil +} + +func (rs *RegistrySynchronizer) handleUpkeepCheckDataUpdated(broadcast log.Broadcast) error { + rs.logger.Debugw("processing Upkeep check data updated log", "txHash", broadcast.RawLog().TxHash.Hex()) + + registry, err := rs.orm.RegistryForJob(rs.job.ID) + if err != nil { + return errors.Wrap(err, "unable to find registry for job") + } + + updateLog, err := rs.registryWrapper.ParseUpkeepCheckDataUpdatedLog(broadcast) + if err != nil { + return errors.Wrap(err, "Unable to parse update log from upkeep check data updated log") + } + + err = rs.syncUpkeep(&rs.registryWrapper, registry, big.New(updateLog.UpkeepID)) + if err != nil { + return errors.Wrapf(err, "unable to update check data for upkeep %s", updateLog.UpkeepID.String()) + } + + rs.logger.Debugw(fmt.Sprintf("updated check data for upkeep %s", updateLog.UpkeepID.String()), "txHash", broadcast.RawLog().TxHash.Hex()) + return nil +} diff --git a/core/services/keeper/registry_synchronizer_sync.go b/core/services/keeper/registry_synchronizer_sync.go new file mode 100644 index 00000000..7765b242 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_sync.go @@ -0,0 +1,187 @@ +package keeper + +import ( + "encoding/binary" + "math" + "sync" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func (rs *RegistrySynchronizer) fullSync() { + rs.logger.Debugf("fullSyncing registry %s", rs.job.KeeperSpec.ContractAddress.Hex()) + + registry, err := rs.syncRegistry() + if err != nil { + rs.logger.Error(errors.Wrap(err, "failed to sync registry during fullSyncing registry")) + return + } + + if err := rs.fullSyncUpkeeps(registry); err != nil { + rs.logger.Error(errors.Wrap(err, "failed to sync upkeeps during fullSyncing registry")) + return + } + rs.logger.Debugf("fullSyncing registry successful %s", rs.job.KeeperSpec.ContractAddress.Hex()) +} + +func (rs *RegistrySynchronizer) syncRegistry() (Registry, error) { + registry, err := rs.newRegistryFromChain() + if err != nil { + return Registry{}, errors.Wrap(err, "failed to get new registry from chain") + } + + err = rs.orm.UpsertRegistry(®istry) + if err != nil { + return Registry{}, errors.Wrap(err, "failed to upsert registry") + } + + return registry, nil +} + +func (rs *RegistrySynchronizer) fullSyncUpkeeps(reg Registry) error { + activeUpkeepIDs, err := rs.registryWrapper.GetActiveUpkeepIDs(nil) + if err != nil { + return errors.Wrap(err, "unable to get active upkeep IDs") + } + + existingUpkeepIDs, err := rs.orm.AllUpkeepIDsForRegistry(reg.ID) + if err != nil { + return errors.Wrap(err, "unable to fetch existing upkeep IDs from DB") + } + + activeSet := make(map[string]bool) + allActiveUpkeeps := make([]big.Big, 0) + for _, upkeepID := range activeUpkeepIDs { + activeSet[upkeepID.String()] = true + allActiveUpkeeps = append(allActiveUpkeeps, *big.New(upkeepID)) + } + rs.batchSyncUpkeepsOnRegistry(reg, allActiveUpkeeps) + + // All upkeeps in existingUpkeepIDs, not in activeUpkeepIDs should be deleted + canceled := make([]big.Big, 0) + for _, upkeepID := range existingUpkeepIDs { + if _, found := activeSet[upkeepID.ToInt().String()]; !found { + canceled = append(canceled, upkeepID) + } + } + if _, err := rs.orm.BatchDeleteUpkeepsForJob(rs.job.ID, canceled); err != nil { + return errors.Wrap(err, "failed to batch delete upkeeps from job") + } + return nil +} + +// batchSyncUpkeepsOnRegistry syncs upkeeps at a time in parallel +// for all the IDs within newUpkeeps slice +func (rs *RegistrySynchronizer) batchSyncUpkeepsOnRegistry(reg Registry, newUpkeeps []big.Big) { + wg := sync.WaitGroup{} + wg.Add(len(newUpkeeps)) + chSyncUpkeepQueue := make(chan struct{}, rs.syncUpkeepQueueSize) + + done := func() { <-chSyncUpkeepQueue; wg.Done() } + for i := range newUpkeeps { + select { + case <-rs.chStop: + return + case chSyncUpkeepQueue <- struct{}{}: + go rs.syncUpkeepWithCallback(&rs.registryWrapper, reg, &newUpkeeps[i], done) + } + } + + wg.Wait() +} + +func (rs *RegistrySynchronizer) syncUpkeepWithCallback(getter upkeepGetter, registry Registry, upkeepID *big.Big, doneCallback func()) { + defer doneCallback() + + if err := rs.syncUpkeep(getter, registry, upkeepID); err != nil { + rs.logger.With("err", err.Error()).With( + "upkeepID", NewUpkeepIdentifier(upkeepID).String(), + "registryContract", registry.ContractAddress.Hex(), + ).Error("unable to sync upkeep on registry") + } +} + +func (rs *RegistrySynchronizer) syncUpkeep(getter upkeepGetter, registry Registry, upkeepID *big.Big) error { + upkeep, err := getter.GetUpkeep(nil, upkeepID.ToInt()) + if err != nil { + return errors.Wrap(err, "failed to get upkeep config") + } + + if upkeep.ExecuteGas <= uint32(0) { + return errors.Errorf("execute gas is zero for upkeep %s", NewUpkeepIdentifier(upkeepID).String()) + } + + positioningConstant, err := CalcPositioningConstant(upkeepID, registry.ContractAddress) + if err != nil { + return errors.Wrap(err, "failed to calc positioning constant") + } + newUpkeep := UpkeepRegistration{ + CheckData: upkeep.CheckData, + ExecuteGas: upkeep.ExecuteGas, + RegistryID: registry.ID, + PositioningConstant: positioningConstant, + UpkeepID: upkeepID, + } + if err := rs.orm.UpsertUpkeep(&newUpkeep); err != nil { + return errors.Wrap(err, "failed to upsert upkeep") + } + + if err := rs.orm.UpdateUpkeepLastKeeperIndex(rs.job.ID, upkeepID, ethkey.EIP55AddressFromAddress(upkeep.LastKeeper)); err != nil { + return errors.Wrap(err, "failed to update upkeep last keeper index") + } + + return nil +} + +// newRegistryFromChain returns a Registry struct with fields synched from those on chain +func (rs *RegistrySynchronizer) newRegistryFromChain() (Registry, error) { + fromAddress := rs.effectiveKeeperAddress + contractAddress := rs.job.KeeperSpec.ContractAddress + + registryConfig, err := rs.registryWrapper.GetConfig(nil) + if err != nil { + rs.jrm.TryRecordError(rs.job.ID, err.Error()) + return Registry{}, errors.Wrap(err, "failed to get contract config") + } + + keeperIndex := int32(-1) + keeperMap := map[ethkey.EIP55Address]int32{} + for idx, address := range registryConfig.KeeperAddresses { + keeperMap[ethkey.EIP55AddressFromAddress(address)] = int32(idx) + if address == fromAddress { + keeperIndex = int32(idx) + } + } + if keeperIndex == -1 { + rs.logger.Warnf("unable to find %s in keeper list on registry %s", fromAddress.Hex(), contractAddress.Hex()) + } + + return Registry{ + BlockCountPerTurn: registryConfig.BlockCountPerTurn, + CheckGas: registryConfig.CheckGas, + ContractAddress: contractAddress, + FromAddress: rs.job.KeeperSpec.FromAddress, + JobID: rs.job.ID, + KeeperIndex: keeperIndex, + NumKeepers: int32(len(registryConfig.KeeperAddresses)), + KeeperIndexMap: keeperMap, + }, nil +} + +// CalcPositioningConstant calculates a positioning constant. +// The positioning constant is fixed because upkeepID and registryAddress are immutable +func CalcPositioningConstant(upkeepID *big.Big, registryAddress ethkey.EIP55Address) (int32, error) { + upkeepBytes := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(upkeepBytes, upkeepID.Mod(big.NewI(math.MaxInt64)).Int64()) + bytesToHash := utils.ConcatBytes(upkeepBytes, registryAddress.Bytes()) + checksum, err := utils.Keccak256(bytesToHash) + if err != nil { + return 0, err + } + constant := binary.BigEndian.Uint16(checksum[:2]) + return int32(constant), nil +} diff --git a/core/services/keeper/registry_synchronizer_sync_test.go b/core/services/keeper/registry_synchronizer_sync_test.go new file mode 100644 index 00000000..2cd401f0 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_sync_test.go @@ -0,0 +1,76 @@ +package keeper + +import ( + "errors" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +// GetUpkeepFailure implements the upkeepGetter interface with an induced error and nil +// config response. +type GetUpkeepFailure struct{} + +var errGetUpkeep = errors.New("chain connection error example") + +func (g *GetUpkeepFailure) GetUpkeep(opts *bind.CallOpts, id *big.Int) (*UpkeepConfig, error) { + return nil, fmt.Errorf("%w [%s]: getConfig v1.%d", ErrContractCallFailure, errGetUpkeep, RegistryVersion_1_2) +} + +func TestSyncUpkeepWithCallback_UpkeepNotFound(t *testing.T) { + log, logObserver := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + synchronizer := &RegistrySynchronizer{ + logger: log.(logger.SugaredLogger), + } + + addr := ethkey.EIP55Address(testutils.NewAddress().Hex()) + registry := Registry{ + ContractAddress: addr, + } + + o, ok := new(big.Int).SetString("5032485723458348569331745", 10) + if !ok { + t.FailNow() + } + + id := ubig.New(o) + count := 0 + doneFunc := func() { + count++ + } + + getter := &GetUpkeepFailure{} + synchronizer.syncUpkeepWithCallback(getter, registry, id, doneFunc) + + // logs should have the upkeep identifier included in the error context properly formatted + require.Equal(t, 1, logObserver.Len()) + + keys := map[string]bool{} + for _, entry := range logObserver.All() { + for _, field := range entry.Context { + switch field.Key { + case "err": + require.Equal(t, "failed to get upkeep config: failure in calling contract [chain connection error example]: getConfig v1.2", field.String) + case "upkeepID": + require.Equal(t, fmt.Sprintf("UPx%064s", "429ab990419450db80821"), field.String) + case "registryContract": + require.Equal(t, addr.Hex(), field.String) + default: + continue + } + keys[field.Key] = true + } + } + + require.Equal(t, map[string]bool{"upkeepID": true, "err": true, "registryContract": true}, keys) + require.Equal(t, 1, count, "callback function should run") +} diff --git a/core/services/keeper/upkeep_executer.go b/core/services/keeper/upkeep_executer.go new file mode 100644 index 00000000..0c934952 --- /dev/null +++ b/core/services/keeper/upkeep_executer.go @@ -0,0 +1,288 @@ +package keeper + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +const ( + executionQueueSize = 10 + maxUpkeepPerformGas = 5_000_000 // Max perform gas for upkeep is 5M on all chains for v1.x +) + +// UpkeepExecuter fulfills Service and HeadTrackable interfaces +var ( + _ job.ServiceCtx = (*UpkeepExecuter)(nil) + _ httypes.HeadTrackable = (*UpkeepExecuter)(nil) +) + +var ( + promCheckUpkeepExecutionTime = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "keeper_check_upkeep_execution_time", + Help: "Time taken to fully execute the check upkeep logic", + }, + []string{"upkeepID"}, + ) +) + +type UpkeepExecuterConfig interface { + MaxGracePeriod() int64 + TurnLookBack() int64 + Registry() config.Registry +} + +// UpkeepExecuter implements the logic to communicate with KeeperRegistry +type UpkeepExecuter struct { + services.StateMachine + chStop services.StopChan + ethClient evmclient.Client + config UpkeepExecuterConfig + executionQueue chan struct{} + headBroadcaster httypes.HeadBroadcasterRegistry + gasEstimator gas.EvmFeeEstimator + job job.Job + mailbox *mailbox.Mailbox[*evmtypes.Head] + orm ORM + pr pipeline.Runner + logger logger.Logger + wgDone sync.WaitGroup + effectiveKeeperAddress common.Address +} + +// NewUpkeepExecuter is the constructor of UpkeepExecuter +func NewUpkeepExecuter( + job job.Job, + orm ORM, + pr pipeline.Runner, + ethClient evmclient.Client, + headBroadcaster httypes.HeadBroadcaster, + gasEstimator gas.EvmFeeEstimator, + logger logger.Logger, + config UpkeepExecuterConfig, + effectiveKeeperAddress common.Address, +) *UpkeepExecuter { + return &UpkeepExecuter{ + chStop: make(services.StopChan), + ethClient: ethClient, + executionQueue: make(chan struct{}, executionQueueSize), + headBroadcaster: headBroadcaster, + gasEstimator: gasEstimator, + job: job, + mailbox: mailbox.NewSingle[*evmtypes.Head](), + config: config, + orm: orm, + pr: pr, + effectiveKeeperAddress: effectiveKeeperAddress, + logger: logger.Named("UpkeepExecuter"), + } +} + +// Start starts the upkeep executer logic +func (ex *UpkeepExecuter) Start(context.Context) error { + return ex.StartOnce("UpkeepExecuter", func() error { + ex.wgDone.Add(2) + go ex.run() + latestHead, unsubscribeHeads := ex.headBroadcaster.Subscribe(ex) + if latestHead != nil { + ex.mailbox.Deliver(latestHead) + } + go func() { + defer unsubscribeHeads() + defer ex.wgDone.Done() + <-ex.chStop + }() + return nil + }) +} + +// Close stops and closes upkeep executer +func (ex *UpkeepExecuter) Close() error { + return ex.StopOnce("UpkeepExecuter", func() error { + close(ex.chStop) + ex.wgDone.Wait() + return nil + }) +} + +// OnNewLongestChain handles the given head of a new longest chain +func (ex *UpkeepExecuter) OnNewLongestChain(_ context.Context, head *evmtypes.Head) { + ex.mailbox.Deliver(head) +} + +func (ex *UpkeepExecuter) run() { + defer ex.wgDone.Done() + for { + select { + case <-ex.chStop: + return + case <-ex.mailbox.Notify(): + ex.processActiveUpkeeps() + } + } +} + +func (ex *UpkeepExecuter) processActiveUpkeeps() { + // Keepers could miss their turn in the turn taking algo if they are too overloaded + // with work because processActiveUpkeeps() blocks + head, exists := ex.mailbox.Retrieve() + if !exists { + ex.logger.Info("no head to retrieve. It might have been skipped") + return + } + + ex.logger.Debugw("checking active upkeeps", "blockheight", head.Number) + + registry, err := ex.orm.RegistryByContractAddress(ex.job.KeeperSpec.ContractAddress) + if err != nil { + ex.logger.Error(errors.Wrap(err, "unable to load registry")) + return + } + + var activeUpkeeps []UpkeepRegistration + turnBinary, err2 := ex.turnBlockHashBinary(registry, head, ex.config.TurnLookBack()) + if err2 != nil { + ex.logger.Error(errors.Wrap(err2, "unable to get turn block number hash")) + return + } + activeUpkeeps, err2 = ex.orm.EligibleUpkeepsForRegistry( + ex.job.KeeperSpec.ContractAddress, + head.Number, + ex.config.MaxGracePeriod(), + turnBinary) + if err2 != nil { + ex.logger.Error(errors.Wrap(err2, "unable to load active registrations")) + return + } + + if head.Number%10 == 0 { + // Log this once every 10 blocks + fetchedUpkeepIDs := make([]string, len(activeUpkeeps)) + for i, activeUpkeep := range activeUpkeeps { + fetchedUpkeepIDs[i] = NewUpkeepIdentifier(activeUpkeep.UpkeepID).String() + } + ex.logger.Debugw("Fetched list of active upkeeps", "blockNum", head.Number, "active upkeeps list", fetchedUpkeepIDs) + } + + wg := sync.WaitGroup{} + wg.Add(len(activeUpkeeps)) + done := func() { + <-ex.executionQueue + wg.Done() + } + for _, reg := range activeUpkeeps { + ex.executionQueue <- struct{}{} + go ex.execute(reg, head, done) + } + + wg.Wait() + ex.logger.Debugw("Finished checking upkeeps", "blockNum", head.Number) +} + +// execute triggers the pipeline run +func (ex *UpkeepExecuter) execute(upkeep UpkeepRegistration, head *evmtypes.Head, done func()) { + defer done() + + start := time.Now() + svcLogger := ex.logger.With("jobID", ex.job.ID, "blockNum", head.Number, "upkeepID", upkeep.UpkeepID) + svcLogger.Debugw("checking upkeep", "lastRunBlockHeight", upkeep.LastRunBlockHeight, "lastKeeperIndex", upkeep.LastKeeperIndex) + + ctxService, cancel := ex.chStop.CtxCancel(context.WithTimeout(context.Background(), time.Minute)) + defer cancel() + + evmChainID := "" + if ex.job.KeeperSpec.EVMChainID != nil { + evmChainID = ex.job.KeeperSpec.EVMChainID.String() + } + + var gasPrice, gasTipCap, gasFeeCap *assets.Wei + // effectiveKeeperAddress is always fromAddress when forwarding is not enabled. + // when forwarding is enabled, effectiveKeeperAddress is on-chain forwarder. + vars := pipeline.NewVarsFrom(buildJobSpec(ex.job, ex.effectiveKeeperAddress, upkeep, ex.config.Registry(), gasPrice, gasTipCap, gasFeeCap, evmChainID)) + + // DotDagSource in database is empty because all the Keeper pipeline runs make use of the same observation source + ex.job.PipelineSpec.DotDagSource = pipeline.KeepersObservationSource + run := pipeline.NewRun(*ex.job.PipelineSpec, vars) + + if _, err := ex.pr.Run(ctxService, run, svcLogger, true, nil); err != nil { + svcLogger.Error(errors.Wrap(err, "failed executing run")) + return + } + + // Only after task runs where a tx was broadcast + if run.State == pipeline.RunStatusCompleted { + rowsAffected, err := ex.orm.SetLastRunInfoForUpkeepOnJob(ex.job.ID, upkeep.UpkeepID, head.Number, upkeep.Registry.FromAddress, pg.WithParentCtx(ctxService)) + if err != nil { + svcLogger.Error(errors.Wrap(err, "failed to set last run height for upkeep")) + } + svcLogger.Debugw("execute pipeline status completed", "fromAddr", upkeep.Registry.FromAddress, "rowsAffected", rowsAffected) + + elapsed := time.Since(start) + promCheckUpkeepExecutionTime. + WithLabelValues(upkeep.PrettyID()). + Set(float64(elapsed)) + } +} + +func (ex *UpkeepExecuter) turnBlockHashBinary(registry Registry, head *evmtypes.Head, lookback int64) (string, error) { + turnBlock := head.Number - (head.Number % int64(registry.BlockCountPerTurn)) - lookback + block, err := ex.ethClient.HeadByNumber(context.Background(), big.NewInt(turnBlock)) + if err != nil { + return "", err + } + hashAtHeight := block.Hash + binaryString := fmt.Sprintf("%b", hashAtHeight.Big()) + return binaryString, nil +} + +func buildJobSpec( + jb job.Job, + effectiveKeeperAddress common.Address, + upkeep UpkeepRegistration, + ormConfig RegistryGasChecker, + gasPrice *assets.Wei, + gasTipCap *assets.Wei, + gasFeeCap *assets.Wei, + chainID string, +) map[string]interface{} { + return map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "jobID": jb.ID, + "fromAddress": upkeep.Registry.FromAddress.String(), + "effectiveKeeperAddress": effectiveKeeperAddress.String(), + "contractAddress": upkeep.Registry.ContractAddress.String(), + "upkeepID": upkeep.UpkeepID.String(), + "prettyID": upkeep.PrettyID(), + "pipelineSpec": &pipeline.Spec{ + ForwardingAllowed: jb.ForwardingAllowed, + }, + "performUpkeepGasLimit": maxUpkeepPerformGas + ormConfig.PerformGasOverhead(), + "maxPerformDataSize": ormConfig.MaxPerformDataSize(), + "gasPrice": gasPrice.ToInt(), + "gasTipCap": gasTipCap.ToInt(), + "gasFeeCap": gasFeeCap.ToInt(), + "evmChainID": chainID, + }, + } +} diff --git a/core/services/keeper/upkeep_executer_test.go b/core/services/keeper/upkeep_executer_test.go new file mode 100644 index 00000000..63228c4b --- /dev/null +++ b/core/services/keeper/upkeep_executer_test.go @@ -0,0 +1,346 @@ +package keeper_test + +import ( + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + gasmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func newHead() evmtypes.Head { + return evmtypes.NewHead(big.NewInt(20), utils.NewHash(), utils.NewHash(), 1000, ubig.NewI(0)) +} + +func mockEstimator(t *testing.T) gas.EvmFeeEstimator { + // note: estimator will only return 1 of legacy or dynamic fees (not both) + // assumed to call legacy estimator only + estimator := gasmocks.NewEvmFeeEstimator(t) + estimator.On("GetFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Maybe().Return(gas.EvmFee{ + Legacy: assets.GWei(60), + }, uint32(60), nil) + return estimator +} + +func setup(t *testing.T, estimator gas.EvmFeeEstimator, overrideFn func(c *plugin.Config, s *plugin.Secrets)) ( + *sqlx.DB, + plugin.GeneralConfig, + *evmclimocks.Client, + *keeper.UpkeepExecuter, + keeper.Registry, + keeper.UpkeepRegistration, + job.Job, + cltest.JobPipelineV2TestHelper, + *txmmocks.MockEvmTxManager, + keystore.Master, + legacyevm.Chain, + keeper.ORM, +) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Keeper.TurnLookBack = ptr[int64](0) + if fn := overrideFn; fn != nil { + fn(c, s) + } + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ethClient := evmtest.NewEthClientMock(t) + ethClient.On("ConfiguredChainID").Return(cfg.EVMConfigs()[0].ChainID.ToInt()).Maybe() + ethClient.On("IsL2").Return(false).Maybe() + ethClient.On("HeadByNumber", mock.Anything, mock.Anything).Maybe().Return(&evmtypes.Head{Number: 1, Hash: utils.NewHash()}, nil) + txm := txmmocks.NewMockEvmTxManager(t) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{TxManager: txm, DB: db, Client: ethClient, KeyStore: keyStore.Eth(), GeneralConfig: cfg, GasEstimator: estimator}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + jpv2 := cltest.NewJobPipelineV2(t, cfg.WebServer(), cfg.JobPipeline(), cfg.Database(), legacyChains, db, keyStore, nil, nil) + ch := evmtest.MustGetDefaultChain(t, legacyChains) + orm := keeper.NewORM(db, logger.TestLogger(t), ch.Config().Database()) + registry, job := cltest.MustInsertKeeperRegistry(t, db, orm, keyStore.Eth(), 0, 1, 20) + + lggr := logger.TestLogger(t) + executer := keeper.NewUpkeepExecuter(job, orm, jpv2.Pr, ethClient, ch.HeadBroadcaster(), ch.GasEstimator(), lggr, ch.Config().Keeper(), job.KeeperSpec.FromAddress.Address()) + upkeep := cltest.MustInsertUpkeepForRegistry(t, db, ch.Config().Database(), registry) + servicetest.Run(t, executer) + return db, cfg, ethClient, executer, registry, upkeep, job, jpv2, txm, keyStore, ch, orm +} + +var checkUpkeepResponse = struct { + PerformData []byte + MaxLinkPayment *big.Int + GasLimit *big.Int + GasWei *big.Int + LinkEth *big.Int +}{ + PerformData: common.Hex2Bytes("1234"), + MaxLinkPayment: big.NewInt(0), // doesn't matter + GasLimit: big.NewInt(2_000_000), + GasWei: big.NewInt(0), // doesn't matter + LinkEth: big.NewInt(0), // doesn't matter +} + +var checkPerformResponse = struct { + Success bool +}{ + Success: true, +} + +func Test_UpkeepExecuter_ErrorsIfStartedTwice(t *testing.T) { + t.Parallel() + _, _, _, executer, _, _, _, _, _, _, _, _ := setup(t, mockEstimator(t), nil) + err := executer.Start(testutils.Context(t)) // already started in setup() + require.Error(t, err) +} + +func Test_UpkeepExecuter_PerformsUpkeep_Happy(t *testing.T) { + taskRuns := 11 + + t.Parallel() + + t.Run("runs upkeep on triggering block number", func(t *testing.T) { + db, config, ethMock, executer, registry, upkeep, job, jpv2, txm, _, _, _ := setup(t, mockEstimator(t), + func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + + gasLimit := 5_000_000 + config.Keeper().Registry().PerformGasOverhead() + + ethTxCreated := cltest.NewAwaiter() + txm.On("CreateTransaction", + mock.Anything, + mock.MatchedBy(func(txRequest txmgr.TxRequest) bool { return txRequest.FeeLimit == gasLimit }), + ). + Once(). + Return(txmgr.Tx{ + ID: 1, + }, nil). + Run(func(mock.Arguments) { ethTxCreated.ItHappened() }) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, registry.ContractAddress.Address()) + registryMock.MockMatchedResponse( + "checkUpkeep", + func(callArgs ethereum.CallMsg) bool { + return callArgs.GasPrice == nil && + callArgs.Gas == 0 + }, + checkUpkeepResponse, + ) + registryMock.MockMatchedResponse( + "performUpkeep", + func(callArgs ethereum.CallMsg) bool { return true }, + checkPerformResponse, + ) + + head := newHead() + executer.OnNewLongestChain(testutils.Context(t), &head) + ethTxCreated.AwaitOrFail(t) + runs := cltest.WaitForPipelineComplete(t, 0, job.ID, 1, taskRuns, jpv2.Jrm, time.Second, 100*time.Millisecond) + require.Len(t, runs, 1) + assert.False(t, runs[0].HasErrors()) + assert.False(t, runs[0].HasFatalErrors()) + waitLastRunHeight(t, db, upkeep, 20) + }) + + t.Run("runs upkeep on triggering block number on EIP1559 and non-EIP1559 chains", func(t *testing.T) { + runTest := func(t *testing.T, eip1559 bool) { + db, config, ethMock, executer, registry, upkeep, job, jpv2, txm, _, _, _ := setup(t, mockEstimator(t), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = &eip1559 + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + + gasLimit := 5_000_000 + config.Keeper().Registry().PerformGasOverhead() + + ethTxCreated := cltest.NewAwaiter() + txm.On("CreateTransaction", + mock.Anything, + mock.MatchedBy(func(txRequest txmgr.TxRequest) bool { return txRequest.FeeLimit == gasLimit }), + ). + Once(). + Return(txmgr.Tx{ + ID: 1, + }, nil). + Run(func(mock.Arguments) { ethTxCreated.ItHappened() }) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, registry.ContractAddress.Address()) + registryMock.MockMatchedResponse( + "checkUpkeep", + func(callArgs ethereum.CallMsg) bool { + return callArgs.GasPrice == nil && + callArgs.Gas == 0 + }, + checkUpkeepResponse, + ) + registryMock.MockMatchedResponse( + "performUpkeep", + func(callArgs ethereum.CallMsg) bool { return true }, + checkPerformResponse, + ) + + head := newHead() + + executer.OnNewLongestChain(testutils.Context(t), &head) + ethTxCreated.AwaitOrFail(t) + runs := cltest.WaitForPipelineComplete(t, 0, job.ID, 1, taskRuns, jpv2.Jrm, time.Second, 100*time.Millisecond) + require.Len(t, runs, 1) + assert.False(t, runs[0].HasErrors()) + assert.False(t, runs[0].HasFatalErrors()) + waitLastRunHeight(t, db, upkeep, 20) + } + + t.Run("EIP1559", func(t *testing.T) { + runTest(t, true) + }) + + t.Run("non-EIP1559", func(t *testing.T) { + runTest(t, false) + }) + }) + + t.Run("errors if submission key not found", func(t *testing.T) { + _, _, ethMock, executer, registry, _, job, jpv2, _, keyStore, _, _ := setup(t, mockEstimator(t), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + + // replace expected key with random one + _, err := keyStore.Eth().Create(testutils.SimulatedChainID) + require.NoError(t, err) + _, err = keyStore.Eth().Delete(job.KeeperSpec.FromAddress.Hex()) + require.NoError(t, err) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, registry.ContractAddress.Address()) + registryMock.MockMatchedResponse( + "checkUpkeep", + func(callArgs ethereum.CallMsg) bool { + return callArgs.GasPrice == nil && + callArgs.Gas == 0 + }, + checkUpkeepResponse, + ) + registryMock.MockMatchedResponse( + "performUpkeep", + func(callArgs ethereum.CallMsg) bool { return true }, + checkPerformResponse, + ) + + head := newHead() + executer.OnNewLongestChain(testutils.Context(t), &head) + runs := cltest.WaitForPipelineError(t, 0, job.ID, 1, taskRuns, jpv2.Jrm, time.Second, 100*time.Millisecond) + require.Len(t, runs, 1) + assert.True(t, runs[0].HasErrors()) + assert.True(t, runs[0].HasFatalErrors()) + }) + + t.Run("errors if submission chain not found", func(t *testing.T) { + db, _, ethMock, _, _, _, _, jpv2, _, keyStore, ch, orm := setup(t, mockEstimator(t), nil) + + registry, jb := cltest.MustInsertKeeperRegistry(t, db, orm, keyStore.Eth(), 0, 1, 20) + // change chain ID to non-configured chain + jb.KeeperSpec.EVMChainID = (*ubig.Big)(big.NewInt(999)) + cltest.MustInsertUpkeepForRegistry(t, db, ch.Config().Database(), registry) + lggr := logger.TestLogger(t) + executer := keeper.NewUpkeepExecuter(jb, orm, jpv2.Pr, ethMock, ch.HeadBroadcaster(), ch.GasEstimator(), lggr, ch.Config().Keeper(), jb.KeeperSpec.FromAddress.Address()) + err := executer.Start(testutils.Context(t)) + require.NoError(t, err) + head := newHead() + executer.OnNewLongestChain(testutils.Context(t), &head) + // TODO we want to see an errored run result once this is completed + // https://app.shortcut.com/pluginlabs/story/25397/remove-failearly-flag-from-eth-call-task + cltest.AssertPipelineRunsStays(t, jb.PipelineSpecID, db, 0) + }) + + t.Run("triggers if heads are skipped but later heads arrive within range", func(t *testing.T) { + db, config, ethMock, executer, registry, upkeep, job, jpv2, txm, _, _, _ := setup(t, mockEstimator(t), func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + + etxs := []cltest.Awaiter{ + cltest.NewAwaiter(), + cltest.NewAwaiter(), + } + gasLimit := 5_000_000 + config.Keeper().Registry().PerformGasOverhead() + txm.On("CreateTransaction", + mock.Anything, + mock.MatchedBy(func(txRequest txmgr.TxRequest) bool { return txRequest.FeeLimit == gasLimit }), + ). + Once(). + Return(txmgr.Tx{}, nil). + Run(func(mock.Arguments) { etxs[0].ItHappened() }) + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, registry.ContractAddress.Address()) + registryMock.MockResponse("checkUpkeep", checkUpkeepResponse) + registryMock.MockMatchedResponse( + "performUpkeep", + func(callArgs ethereum.CallMsg) bool { return true }, + checkPerformResponse, + ) + // turn falls somewhere between 20-39 (blockCountPerTurn=20) + // heads 20 thru 35 were skipped (e.g. due to node reboot) + head := cltest.Head(36) + + executer.OnNewLongestChain(testutils.Context(t), head) + runs := cltest.WaitForPipelineComplete(t, 0, job.ID, 1, taskRuns, jpv2.Jrm, time.Second, 100*time.Millisecond) + require.Len(t, runs, 1) + assert.False(t, runs[0].HasErrors()) + etxs[0].AwaitOrFail(t) + waitLastRunHeight(t, db, upkeep, 36) + }) +} + +func Test_UpkeepExecuter_PerformsUpkeep_Error(t *testing.T) { + t.Parallel() + + g := gomega.NewWithT(t) + + db, _, ethMock, executer, registry, _, _, _, _, _, _, _ := setup(t, mockEstimator(t), + func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + + var wasCalled atomic.Bool + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.Registry1_1ABI, registry.ContractAddress.Address()) + registryMock.MockRevertResponse("checkUpkeep").Run(func(args mock.Arguments) { + wasCalled.Store(true) + }) + + head := newHead() + executer.OnNewLongestChain(testutils.Context(t), &head) + + g.Eventually(wasCalled.Load).Should(gomega.Equal(true)) + + cfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + txes, err := txStore.GetAllTxes(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, txes, 0) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/keeper/upkeep_executer_unit_test.go b/core/services/keeper/upkeep_executer_unit_test.go new file mode 100644 index 00000000..4df51c5f --- /dev/null +++ b/core/services/keeper/upkeep_executer_unit_test.go @@ -0,0 +1,79 @@ +package keeper + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type registry struct { + pgo uint32 + mpds uint32 +} + +func (r *registry) CheckGasOverhead() uint32 { return uint32(0) } +func (r *registry) PerformGasOverhead() uint32 { return r.pgo } +func (r *registry) MaxPerformDataSize() uint32 { return r.mpds } + +func TestBuildJobSpec(t *testing.T) { + from := ethkey.EIP55Address(testutils.NewAddress().Hex()) + contract := ethkey.EIP55Address(testutils.NewAddress().Hex()) + chainID := "250" + jb := job.Job{ + ID: 10, + KeeperSpec: &job.KeeperSpec{ + FromAddress: from, + ContractAddress: contract, + }} + + upkeepID := big.NewI(4) + upkeep := UpkeepRegistration{ + Registry: Registry{ + FromAddress: from, + ContractAddress: contract, + CheckGas: 11, + }, + UpkeepID: upkeepID, + ExecuteGas: 12, + } + gasPrice := assets.NewWeiI(24) + gasTipCap := assets.NewWeiI(48) + gasFeeCap := assets.NewWeiI(72) + + r := ®istry{ + pgo: uint32(9), + mpds: uint32(1000), + } + + spec := buildJobSpec(jb, jb.KeeperSpec.FromAddress.Address(), upkeep, r, gasPrice, gasTipCap, gasFeeCap, chainID) + + expected := map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "jobID": int32(10), + "fromAddress": from.String(), + "effectiveKeeperAddress": jb.KeeperSpec.FromAddress.String(), + "contractAddress": contract.String(), + "upkeepID": "4", + "prettyID": fmt.Sprintf("UPx%064d", 4), + "pipelineSpec": &pipeline.Spec{ + ForwardingAllowed: false, + }, + "performUpkeepGasLimit": uint32(5_000_000 + 9), + "maxPerformDataSize": uint32(1000), + "gasPrice": gasPrice.ToInt(), + "gasTipCap": gasTipCap.ToInt(), + "gasFeeCap": gasFeeCap.ToInt(), + "evmChainID": "250", + }, + } + + require.Equal(t, expected, spec) +} diff --git a/core/services/keeper/utils.go b/core/services/keeper/utils.go new file mode 100644 index 00000000..e1b137ae --- /dev/null +++ b/core/services/keeper/utils.go @@ -0,0 +1,39 @@ +package keeper + +import ( + "math" + "math/big" + "strings" + + ethmath "github.com/ethereum/go-ethereum/common/math" +) + +const ( + ZeroPrefix = "0x" + UpkeepPrefix = "UPx" +) + +// LeastSignificant32 returns the least significant 32 bits of the input as a big int +func LeastSignificant32(num *big.Int) uint64 { + max32 := big.NewInt(math.MaxUint32) + return big.NewInt(0).And(num, max32).Uint64() +} + +// ParseUpkeepId parses the upkeep id input string to a big int pointer. It can handle the following 4 formats: +// 1. decimal format like 123471239047239047243709... +// 2. hex format like AbC13D354eFF... +// 3. 0x-prefixed hex like 0xAbC13D354eFF... +// 4. Upkeep-prefixed hex like UPxAbC13D354eFF... +func ParseUpkeepId(upkeepIdStr string) (*big.Int, bool) { + if strings.HasPrefix(upkeepIdStr, UpkeepPrefix) { + upkeepIdStr = ZeroPrefix + upkeepIdStr[len(UpkeepPrefix):] + } + + // this handles cases 1, 3, 4 + upkeepId, ok := ethmath.ParseBig256(upkeepIdStr) + if !ok { + // this handles case 2 or returns (nil, false) + return ethmath.ParseBig256(ZeroPrefix + upkeepIdStr) + } + return upkeepId, ok +} diff --git a/core/services/keeper/validate.go b/core/services/keeper/validate.go new file mode 100644 index 00000000..7904f4d6 --- /dev/null +++ b/core/services/keeper/validate.go @@ -0,0 +1,47 @@ +package keeper + +import ( + "strings" + + "github.com/google/uuid" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// ValidatedKeeperSpec analyses the tomlString passed as parameter and +// returns a newly-created Job if there are no validation errors inside the toml. +func ValidatedKeeperSpec(tomlString string) (job.Job, error) { + // Create a new job with a randomly generated uuid, which can be replaced with the one from tomlString. + var j = job.Job{ + ExternalJobID: uuid.New(), + } + + tree, err := toml.Load(tomlString) + if err != nil { + return j, err + } + + if err := tree.Unmarshal(&j); err != nil { + return j, err + } + + var spec job.KeeperSpec + if err := tree.Unmarshal(&spec); err != nil { + return j, err + } + + j.KeeperSpec = &spec + + if j.Type != job.Keeper { + return j, errors.Errorf("unsupported type %s", j.Type) + } + + if strings.Contains(tomlString, "observationSource") || + strings.Contains(tomlString, "ObservationSource") { + return j, errors.New("There should be no 'observationSource' parameter included in the toml") + } + + return j, nil +} diff --git a/core/services/keeper/validate_test.go b/core/services/keeper/validate_test.go new file mode 100644 index 00000000..598bc3da --- /dev/null +++ b/core/services/keeper/validate_test.go @@ -0,0 +1,196 @@ +package keeper + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestValidatedKeeperSpec(t *testing.T) { + t.Parallel() + + type args struct { + tomlString string + } + + type want struct { + id int32 + contractAddr string + fromAddr string + createdAt time.Time + updatedAt time.Time + } + + tests := []struct { + name string + args args + want want + wantErr bool + }{ + { + name: "valid job spec", + args: args{ + tomlString: ` + type = "keeper" + name = "example keeper spec" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + externalJobID = "123e4567-e89b-12d3-a456-426655440002" + `, + }, + want: want{ + id: 0, + contractAddr: "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba", + fromAddr: "0xa8037A20989AFcBC51798de9762b351D63ff462e", + createdAt: time.Time{}, + updatedAt: time.Time{}, + }, + wantErr: false, + }, + + { + name: "valid job spec with reordered fields", + args: args{ + tomlString: ` + type = "keeper" + name = "example keeper spec" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + evmChainID = 4 + externalJobID = "123e4567-e89b-12d3-a456-426655440002" + `, + }, + want: want{ + id: 0, + contractAddr: "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba", + fromAddr: "0xa8037A20989AFcBC51798de9762b351D63ff462e", + createdAt: time.Time{}, + updatedAt: time.Time{}, + }, + wantErr: false, + }, + + { + name: "invalid job spec because of type", + args: args{ + tomlString: ` + type = "vrf" + name = "invalid keeper spec example 1" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + evmChainID = 4 + externalJobID = "123e4567-e89b-12d3-a456-426655440002" + `, + }, + want: want{}, + wantErr: true, + }, + + { + name: "invalid job spec because observation source is passed as parameter (lowercase)", + args: args{ + tomlString: ` + type = "keeper" + name = "invalid keeper spec example 2" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + evmChainID = 4 + externalJobID = "123e4567-e89b-12d3-a456-426655440002" + observationSource = " + encode_check_upkeep_tx [type=ethabiencode abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] + check_upkeep_tx [type=ethcall + failEarly=false + extractRevertReason=false + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] + decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, + uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] + encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] + perform_upkeep_tx [type=ethtx + minConfirmations=2 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] + encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx" + `, + }, + want: want{}, + wantErr: true, + }, + + { + name: "invalid job spec because observation source is passed as parameter (uppercase)", + args: args{ + tomlString: ` + type = "keeper" + name = "invalid keeper spec example 2" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + evmChainID = 4 + externalJobID = "123e4567-e89b-12d3-a456-426655440002" + ObservationSource = " + encode_check_upkeep_tx [type=ethabiencode abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] + check_upkeep_tx [type=ethcall + failEarly=false + extractRevertReason=false + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] + decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, + uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] + encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] + perform_upkeep_tx [type=ethtx + minConfirmations=2 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] + encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx" + `, + }, + want: want{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ValidatedKeeperSpec(tt.args.tomlString) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + require.Equal(t, tt.want.id, got.ID) + require.Equal(t, tt.want.contractAddr, got.KeeperSpec.ContractAddress.Hex()) + require.Equal(t, tt.want.fromAddr, got.KeeperSpec.FromAddress.Hex()) + require.Equal(t, tt.want.createdAt, got.KeeperSpec.CreatedAt) + require.Equal(t, tt.want.updatedAt, got.KeeperSpec.UpdatedAt) + }) + } + +} diff --git a/core/services/keystore/chaintype/chaintype.go b/core/services/keystore/chaintype/chaintype.go new file mode 100644 index 00000000..cd149e39 --- /dev/null +++ b/core/services/keystore/chaintype/chaintype.go @@ -0,0 +1,64 @@ +package chaintype + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// ChainType denotes the chain or network to work with +type ChainType string + +const ( + // EVM for Ethereum or other chains supporting the EVM + EVM ChainType = "evm" + // Cosmos for the Cosmos chain + Cosmos ChainType = "cosmos" + // Solana for the Solana chain + Solana ChainType = "solana" + // StarkNet for the StarkNet chain + StarkNet ChainType = "starknet" +) + +type ChainTypes []ChainType + +func (c ChainTypes) String() (out string) { + var sb strings.Builder + for i, chain := range c { + if i != 0 { + sb.WriteString(", ") + } + sb.WriteString(string(chain)) + } + return sb.String() +} + +// SupportedChainTypes contain all chains that are supported +var SupportedChainTypes = ChainTypes{EVM, Cosmos, Solana, StarkNet} + +// ErrInvalidChainType is an error to indicate an unsupported chain type +var ErrInvalidChainType error + +func init() { + supported := make([]string, 0, len(SupportedChainTypes)) + for _, chainType := range SupportedChainTypes { + supported = append(supported, fmt.Sprintf(`"%s"`, chainType)) + } + ErrInvalidChainType = fmt.Errorf("valid types include: [%s]", strings.Join(supported, ", ")) +} + +// IsSupportedChainType checks to see if the chain is supported +func IsSupportedChainType(chainType ChainType) bool { + for _, v := range SupportedChainTypes { + if v == chainType { + return true + } + } + return false +} + +// NewErrInvalidChainType returns an error wrapping ErrInvalidChainType for an unsupported chain +func NewErrInvalidChainType(chainType ChainType) error { + return errors.Wrapf(ErrInvalidChainType, `unknown chain type "%s"`, chainType) +} diff --git a/core/services/keystore/cosmos.go b/core/services/keystore/cosmos.go new file mode 100644 index 00000000..c4ab66d6 --- /dev/null +++ b/core/services/keystore/cosmos.go @@ -0,0 +1,183 @@ +package keystore + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" +) + +//go:generate mockery --quiet --name Cosmos --output ./mocks/ --case=underscore --filename cosmos.go + +type Cosmos interface { + Get(id string) (cosmoskey.Key, error) + GetAll() ([]cosmoskey.Key, error) + Create() (cosmoskey.Key, error) + Add(key cosmoskey.Key) error + Delete(id string) (cosmoskey.Key, error) + Import(keyJSON []byte, password string) (cosmoskey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +type cosmos struct { + *keyManager +} + +var _ Cosmos = &cosmos{} + +func newCosmosKeyStore(km *keyManager) *cosmos { + return &cosmos{ + km, + } +} + +func (ks *cosmos) Get(id string) (cosmoskey.Key, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return cosmoskey.Key{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *cosmos) GetAll() (keys []cosmoskey.Key, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.Cosmos { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *cosmos) Create() (cosmoskey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return cosmoskey.Key{}, ErrLocked + } + key := cosmoskey.New() + return key, ks.safeAddKey(key) +} + +func (ks *cosmos) Add(key cosmoskey.Key) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.Cosmos[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *cosmos) Delete(id string) (cosmoskey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return cosmoskey.Key{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return cosmoskey.Key{}, err + } + err = ks.safeRemoveKey(key) + return key, err +} + +func (ks *cosmos) Import(keyJSON []byte, password string) (cosmoskey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return cosmoskey.Key{}, ErrLocked + } + key, err := cosmoskey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return cosmoskey.Key{}, errors.Wrap(err, "CosmosKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.Cosmos[key.ID()]; found { + return cosmoskey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *cosmos) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *cosmos) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + if len(ks.keyRing.Cosmos) > 0 { + return nil + } + + key := cosmoskey.New() + + ks.logger.Infof("Created Cosmos key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +func (ks *cosmos) getByID(id string) (cosmoskey.Key, error) { + key, found := ks.keyRing.Cosmos[id] + if !found { + return cosmoskey.Key{}, KeyNotFoundError{ID: id, KeyType: "Cosmos"} + } + return key, nil +} + +// CosmosLoopKeystore implements the [github.com/goplugin/plugin-common/pkg/loop.Keystore] interface and +// handles signing for Cosmos messages. +type CosmosLoopKeystore struct { + Cosmos +} + +var _ loop.Keystore = &CosmosLoopKeystore{} + +func (lk *CosmosLoopKeystore) Sign(ctx context.Context, id string, hash []byte) ([]byte, error) { + k, err := lk.Get(id) + if err != nil { + return nil, err + } + // loopp spec requires passing nil hash to check existence of id + if hash == nil { + return nil, nil + } + + return k.ToPrivKey().Sign(hash) +} + +func (lk *CosmosLoopKeystore) Accounts(ctx context.Context) ([]string, error) { + keys, err := lk.GetAll() + if err != nil { + return nil, err + } + + accounts := []string{} + for _, k := range keys { + accounts = append(accounts, k.PublicKeyStr()) + } + + return accounts, nil +} diff --git a/core/services/keystore/cosmos_test.go b/core/services/keystore/cosmos_test.go new file mode 100644 index 00000000..c8eca750 --- /dev/null +++ b/core/services/keystore/cosmos_test.go @@ -0,0 +1,109 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" +) + +func Test_CosmosKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.Cosmos() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Export("non-existent", cltest.Password) + assert.Error(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import(exportJSON, cltest.Password) + assert.Error(t, err) + _, err = ks.Import([]byte(""), cltest.Password) + assert.Error(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey := cosmoskey.New() + err := ks.Add(newKey) + require.NoError(t, err) + err = ks.Add(newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + _, err = ks.Delete(newKey.ID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKey() + assert.NoError(t, err) + + err = ks.EnsureKey() + assert.NoError(t, err) + + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) +} diff --git a/core/services/keystore/csa.go b/core/services/keystore/csa.go new file mode 100644 index 00000000..eb65ddee --- /dev/null +++ b/core/services/keystore/csa.go @@ -0,0 +1,165 @@ +package keystore + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" +) + +//go:generate mockery --quiet --name CSA --output mocks/ --case=underscore + +// ErrCSAKeyExists describes the error when the CSA key already exists +var ErrCSAKeyExists = errors.New("can only have 1 CSA key") + +// type CSAKeystoreInterface interface { +type CSA interface { + Get(id string) (csakey.KeyV2, error) + GetAll() ([]csakey.KeyV2, error) + Create() (csakey.KeyV2, error) + Add(key csakey.KeyV2) error + Delete(id string) (csakey.KeyV2, error) + Import(keyJSON []byte, password string) (csakey.KeyV2, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +type csa struct { + *keyManager +} + +var _ CSA = &csa{} + +func newCSAKeyStore(km *keyManager) *csa { + return &csa{ + km, + } +} + +func (ks *csa) Get(id string) (csakey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return csakey.KeyV2{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *csa) GetAll() (keys []csakey.KeyV2, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.CSA { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *csa) Create() (csakey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return csakey.KeyV2{}, ErrLocked + } + // Ensure you can only have one CSA at a time. This is a temporary + // restriction until we are able to handle multiple CSA keys in the + // communication channel + if len(ks.keyRing.CSA) > 0 { + return csakey.KeyV2{}, ErrCSAKeyExists + } + key, err := csakey.NewV2() + if err != nil { + return csakey.KeyV2{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *csa) Add(key csakey.KeyV2) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if len(ks.keyRing.CSA) > 0 { + return ErrCSAKeyExists + } + return ks.safeAddKey(key) +} + +func (ks *csa) Delete(id string) (csakey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return csakey.KeyV2{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return csakey.KeyV2{}, err + } + + err = ks.safeRemoveKey(key) + + return key, err +} + +func (ks *csa) Import(keyJSON []byte, password string) (csakey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return csakey.KeyV2{}, ErrLocked + } + key, err := csakey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return csakey.KeyV2{}, errors.Wrap(err, "CSAKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.CSA[key.ID()]; found { + return csakey.KeyV2{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *csa) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +// EnsureKey verifies whether the CSA key has been seeded, if not, it creates it. +func (ks *csa) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + if len(ks.keyRing.CSA) > 0 { + return nil + } + + key, err := csakey.NewV2() + if err != nil { + return err + } + + ks.logger.Infof("Created CSA key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +func (ks *csa) getByID(id string) (csakey.KeyV2, error) { + key, found := ks.keyRing.CSA[id] + if !found { + return csakey.KeyV2{}, KeyNotFoundError{ID: id, KeyType: "CSA"} + } + return key, nil +} diff --git a/core/services/keystore/csa_test.go b/core/services/keystore/csa_test.go new file mode 100644 index 00000000..55a32769 --- /dev/null +++ b/core/services/keystore/csa_test.go @@ -0,0 +1,171 @@ +package keystore_test + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" +) + +func Test_CSAKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.CSA() + reset := func() { + _, err := db.Exec("DELETE FROM encrypted_key_rings") + require.NoError(t, err) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + + t.Run("prevents creating more than one key", func(t *testing.T) { + k, err2 := ks.Create() + + assert.Zero(t, k) + assert.Error(t, err2) + assert.True(t, errors.Is(err2, keystore.ErrCSAKeyExists)) + }) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + + t.Run("prevents importing more than one key", func(t *testing.T) { + k, err2 := ks.Import(exportJSON, cltest.Password) + + assert.Zero(t, k) + assert.Error(t, err2) + assert.Equal(t, fmt.Sprintf("key with ID %s already exists", key.ID()), err2.Error()) + }) + + t.Run("fails to import malformed key", func(t *testing.T) { + k, err2 := ks.Import([]byte(""), cltest.Password) + + assert.Zero(t, k) + assert.Error(t, err2) + }) + + t.Run("fails to export non-existent key", func(t *testing.T) { + exportJSON, err = ks.Export("non-existent", cltest.Password) + + assert.Error(t, err) + assert.Empty(t, exportJSON) + }) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := csakey.NewV2() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + + t.Run("prevents adding more than one key", func(t *testing.T) { + err = ks.Add(newKey) + require.NoError(t, err) + + err = ks.Add(newKey) + + assert.Error(t, err) + assert.True(t, errors.Is(err, keystore.ErrCSAKeyExists)) + }) + + t.Run("fails to delete non-existent key", func(t *testing.T) { + k, err2 := ks.Delete("non-existent") + + assert.Zero(t, k) + assert.Error(t, err2) + }) + }) + + t.Run("adds an externally created key/ensures it already exists", func(t *testing.T) { + defer reset() + + newKey, err := csakey.NewV2() + assert.NoError(t, err) + err = ks.Add(newKey) + assert.NoError(t, err) + + err = keyStore.CSA().EnsureKey() + assert.NoError(t, err) + keys, err2 := ks.GetAll() + assert.NoError(t, err2) + + require.Equal(t, 1, len(keys)) + require.Equal(t, newKey.ID(), keys[0].ID()) + require.Equal(t, newKey.Version, keys[0].Version) + require.Equal(t, newKey.PublicKey, keys[0].PublicKey) + }) + + t.Run("auto creates a key if it doesn't exists when trying to ensure it already exists", func(t *testing.T) { + defer reset() + + keys, err := ks.GetAll() + assert.NoError(t, err) + assert.Equal(t, 0, len(keys)) + + err = keyStore.CSA().EnsureKey() + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) +} diff --git a/core/services/keystore/dkgencrypt.go b/core/services/keystore/dkgencrypt.go new file mode 100644 index 00000000..bf32bd49 --- /dev/null +++ b/core/services/keystore/dkgencrypt.go @@ -0,0 +1,163 @@ +package keystore + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" +) + +//go:generate mockery --quiet --name DKGEncrypt --output mocks/ --case=underscore + +// DKGEncrypt provides encryption keys for the DKG. +type DKGEncrypt interface { + Get(id string) (dkgencryptkey.Key, error) + GetAll() ([]dkgencryptkey.Key, error) + Create() (dkgencryptkey.Key, error) + Add(key dkgencryptkey.Key) error + Delete(id string) (dkgencryptkey.Key, error) + Import(keyJSON []byte, password string) (dkgencryptkey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +type dkgEncrypt struct { + *keyManager +} + +func newDKGEncryptKeyStore(km *keyManager) *dkgEncrypt { + return &dkgEncrypt{ + keyManager: km, + } +} + +var _ DKGEncrypt = &dkgEncrypt{} + +// Add implements DKGEncrypt +func (d *dkgEncrypt) Add(key dkgencryptkey.Key) error { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return ErrLocked + } + return d.safeAddKey(key) +} + +// Create implements DKGEncrypt +func (d *dkgEncrypt) Create() (dkgencryptkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgencryptkey.Key{}, ErrLocked + } + key, err := dkgencryptkey.New() + if err != nil { + return dkgencryptkey.Key{}, errors.Wrap(err, "dkgencryptkey.New()") + } + return key, d.safeAddKey(key) +} + +// Delete implements DKGEncrypt +func (d *dkgEncrypt) Delete(id string) (dkgencryptkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgencryptkey.Key{}, ErrLocked + } + key, err := d.getByID(id) + if err != nil { + return dkgencryptkey.Key{}, err + } + + err = d.safeRemoveKey(key) + return key, errors.Wrap(err, "safe remove key") +} + +// EnsureKey implements DKGEncrypt +func (d *dkgEncrypt) EnsureKey() error { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return ErrLocked + } + if len(d.keyRing.DKGEncrypt) > 0 { + return nil + } + + key, err := dkgencryptkey.New() + if err != nil { + return errors.Wrap(err, "dkgencryptkey. New()") + } + + d.logger.Infof("Created DKGEncrypt key with ID %s", key.ID()) + + return d.safeAddKey(key) +} + +// Export implements DKGEncrypt +func (d *dkgEncrypt) Export(id string, password string) ([]byte, error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return nil, ErrLocked + } + key, err := d.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, d.scryptParams) +} + +// Get implements DKGEncrypt +func (d *dkgEncrypt) Get(id string) (keys dkgencryptkey.Key, err error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return dkgencryptkey.Key{}, ErrLocked + } + return d.getByID(id) +} + +// GetAll implements DKGEncrypt +func (d *dkgEncrypt) GetAll() (keys []dkgencryptkey.Key, err error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return nil, ErrLocked + } + for _, key := range d.keyRing.DKGEncrypt { + keys = append(keys, key) + } + return keys, nil +} + +// Import implements DKGEncrypt +func (d *dkgEncrypt) Import(keyJSON []byte, password string) (dkgencryptkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgencryptkey.Key{}, ErrLocked + } + key, err := dkgencryptkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return dkgencryptkey.Key{}, errors.Wrap(err, "from encrypted json") + } + _, err = d.getByID(key.ID()) + if err == nil { + return dkgencryptkey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, d.keyManager.safeAddKey(key) +} + +// caller must hold lock +func (d *dkgEncrypt) getByID(id string) (dkgencryptkey.Key, error) { + key, found := d.keyRing.DKGEncrypt[id] + if !found { + return dkgencryptkey.Key{}, KeyNotFoundError{ + ID: id, + KeyType: "DKGEncrypt", + } + } + return key, nil +} diff --git a/core/services/keystore/dkgencrypt_test.go b/core/services/keystore/dkgencrypt_test.go new file mode 100644 index 00000000..afd30a1b --- /dev/null +++ b/core/services/keystore/dkgencrypt_test.go @@ -0,0 +1,124 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" +) + +func Test_DKGEncryptKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.DKGEncrypt() + + assert.NotNil(t, ks) + + reset := func() { + _, err := db.Exec("DELETE FROM encrypted_key_rings") + require.NoError(t, err) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := dkgencryptkey.New() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("adds an externally created key/ensures it already exists", func(t *testing.T) { + defer reset() + + newKey, err := dkgencryptkey.New() + assert.NoError(t, err) + err = ks.Add(newKey) + assert.NoError(t, err) + + err = keyStore.DKGEncrypt().EnsureKey() + assert.NoError(t, err) + keys, err2 := ks.GetAll() + assert.NoError(t, err2) + + require.Equal(t, 1, len(keys)) + require.Equal(t, newKey.ID(), keys[0].ID()) + require.Equal(t, newKey.PublicKey, keys[0].PublicKey) + }) + + t.Run("auto creates a key if it doesn't exists when trying to ensure it already exists", func(t *testing.T) { + defer reset() + + keys, err := ks.GetAll() + assert.NoError(t, err) + assert.Equal(t, 0, len(keys)) + + err = keyStore.DKGEncrypt().EnsureKey() + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) +} diff --git a/core/services/keystore/dkgsign.go b/core/services/keystore/dkgsign.go new file mode 100644 index 00000000..998de95d --- /dev/null +++ b/core/services/keystore/dkgsign.go @@ -0,0 +1,163 @@ +package keystore + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" +) + +//go:generate mockery --quiet --name DKGSign --output mocks/ --case=underscore + +// DKGSign provides signing keys for the DKG. +type DKGSign interface { + Get(id string) (dkgsignkey.Key, error) + GetAll() ([]dkgsignkey.Key, error) + Create() (dkgsignkey.Key, error) + Add(key dkgsignkey.Key) error + Delete(id string) (dkgsignkey.Key, error) + Import(keyJSON []byte, password string) (dkgsignkey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +type dkgSign struct { + *keyManager +} + +func newDKGSignKeyStore(km *keyManager) *dkgSign { + return &dkgSign{ + keyManager: km, + } +} + +var _ DKGSign = &dkgSign{} + +// Add implements DKGSign +func (d *dkgSign) Add(key dkgsignkey.Key) error { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return ErrLocked + } + return d.safeAddKey(key) +} + +// Create implements DKGSign +func (d *dkgSign) Create() (dkgsignkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgsignkey.Key{}, ErrLocked + } + key, err := dkgsignkey.New() + if err != nil { + return dkgsignkey.Key{}, errors.Wrap(err, "dkgsignkey New()") + } + return key, d.safeAddKey(key) +} + +// Delete implements DKGSign +func (d *dkgSign) Delete(id string) (dkgsignkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgsignkey.Key{}, ErrLocked + } + key, err := d.getByID(id) + if err != nil { + return dkgsignkey.Key{}, err + } + + err = d.safeRemoveKey(key) + return key, errors.Wrap(err, "safe remove key") +} + +// EnsureKey implements DKGSign +func (d *dkgSign) EnsureKey() error { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return ErrLocked + } + if len(d.keyRing.DKGSign) > 0 { + return nil + } + + key, err := dkgsignkey.New() + if err != nil { + return errors.Wrap(err, "dkgsignkey New()") + } + + d.logger.Infof("Created DKGSign key with ID %s", key.ID()) + + return d.safeAddKey(key) +} + +// Export implements DKGSign +func (d *dkgSign) Export(id string, password string) ([]byte, error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return nil, ErrLocked + } + key, err := d.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, d.scryptParams) +} + +// Get implements DKGSign +func (d *dkgSign) Get(id string) (keys dkgsignkey.Key, err error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return dkgsignkey.Key{}, ErrLocked + } + return d.getByID(id) +} + +// GetAll implements DKGSign +func (d *dkgSign) GetAll() (keys []dkgsignkey.Key, err error) { + d.lock.RLock() + defer d.lock.RUnlock() + if d.isLocked() { + return nil, ErrLocked + } + for _, key := range d.keyRing.DKGSign { + keys = append(keys, key) + } + return keys, nil +} + +// Import implements DKGSign +func (d *dkgSign) Import(keyJSON []byte, password string) (dkgsignkey.Key, error) { + d.lock.Lock() + defer d.lock.Unlock() + if d.isLocked() { + return dkgsignkey.Key{}, ErrLocked + } + key, err := dkgsignkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return dkgsignkey.Key{}, errors.Wrap(err, "from encrypted json") + } + _, err = d.getByID(key.ID()) + if err == nil { + return dkgsignkey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, d.keyManager.safeAddKey(key) +} + +// caller must hold lock +func (d *dkgSign) getByID(id string) (dkgsignkey.Key, error) { + key, found := d.keyRing.DKGSign[id] + if !found { + return dkgsignkey.Key{}, KeyNotFoundError{ + ID: id, + KeyType: "DKGSign", + } + } + return key, nil +} diff --git a/core/services/keystore/dkgsign_test.go b/core/services/keystore/dkgsign_test.go new file mode 100644 index 00000000..5d1b97f3 --- /dev/null +++ b/core/services/keystore/dkgsign_test.go @@ -0,0 +1,124 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" +) + +func Test_DKGSignKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.DKGSign() + + assert.NotNil(t, ks) + + reset := func() { + _, err := db.Exec("DELETE FROM encrypted_key_rings") + require.NoError(t, err) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := dkgsignkey.New() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("adds an externally created key/ensures it already exists", func(t *testing.T) { + defer reset() + + newKey, err := dkgsignkey.New() + assert.NoError(t, err) + err = ks.Add(newKey) + assert.NoError(t, err) + + err = keyStore.DKGSign().EnsureKey() + assert.NoError(t, err) + keys, err2 := ks.GetAll() + assert.NoError(t, err2) + + require.Equal(t, 1, len(keys)) + require.Equal(t, newKey.ID(), keys[0].ID()) + require.Equal(t, newKey.PublicKey, keys[0].PublicKey) + }) + + t.Run("auto creates a key if it doesn't exists when trying to ensure it already exists", func(t *testing.T) { + defer reset() + + keys, err := ks.GetAll() + assert.NoError(t, err) + assert.Equal(t, 0, len(keys)) + + err = keyStore.DKGSign().EnsureKey() + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) +} diff --git a/core/services/keystore/eth.go b/core/services/keystore/eth.go new file mode 100644 index 00000000..5ffb8a34 --- /dev/null +++ b/core/services/keystore/eth.go @@ -0,0 +1,589 @@ +package keystore + +import ( + "fmt" + "math/big" + "sort" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// Eth is the external interface for EthKeyStore +// +//go:generate mockery --quiet --name Eth --output mocks/ --case=underscore +type Eth interface { + Get(id string) (ethkey.KeyV2, error) + GetAll() ([]ethkey.KeyV2, error) + Create(chainIDs ...*big.Int) (ethkey.KeyV2, error) + Delete(id string) (ethkey.KeyV2, error) + Import(keyJSON []byte, password string, chainIDs ...*big.Int) (ethkey.KeyV2, error) + Export(id string, password string) ([]byte, error) + + Enable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error + Disable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error + Add(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error + + EnsureKeys(chainIDs ...*big.Int) error + SubscribeToKeyChanges() (ch chan struct{}, unsub func()) + + SignTx(fromAddress common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) + + EnabledKeysForChain(chainID *big.Int) (keys []ethkey.KeyV2, err error) + GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (address common.Address, err error) + CheckEnabled(address common.Address, chainID *big.Int) error + + GetState(id string, chainID *big.Int) (ethkey.State, error) + GetStatesForKeys([]ethkey.KeyV2) ([]ethkey.State, error) + GetStateForKey(ethkey.KeyV2) (ethkey.State, error) + GetStatesForChain(chainID *big.Int) ([]ethkey.State, error) + EnabledAddressesForChain(chainID *big.Int) (addresses []common.Address, err error) + + XXXTestingOnlySetState(ethkey.State) + XXXTestingOnlyAdd(key ethkey.KeyV2) +} + +type eth struct { + *keyManager + keystateORM + q pg.Q + subscribers [](chan struct{}) + subscribersMu *sync.RWMutex +} + +var _ Eth = ð{} + +func newEthKeyStore(km *keyManager, orm keystateORM, q pg.Q) *eth { + return ð{ + keystateORM: orm, + keyManager: km, + q: q, + subscribers: make([](chan struct{}), 0), + subscribersMu: new(sync.RWMutex), + } +} + +func (ks *eth) Get(id string) (ethkey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return ethkey.KeyV2{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *eth) GetAll() (keys []ethkey.KeyV2, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + return ks.getAll(), nil +} + +// caller must hold lock! +func (ks *eth) getAll() (keys []ethkey.KeyV2) { + for _, key := range ks.keyRing.Eth { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Cmp(keys[j]) < 0 }) + return +} + +// Create generates a fresh new key and enables it for the given chain IDs +func (ks *eth) Create(chainIDs ...*big.Int) (ethkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ethkey.KeyV2{}, ErrLocked + } + key, err := ethkey.NewV2() + if err != nil { + return ethkey.KeyV2{}, err + } + err = ks.add(key, chainIDs...) + if err == nil { + ks.notify() + } + ks.logger.Infow(fmt.Sprintf("Created EVM key with ID %s", key.Address.Hex()), "address", key.Address.Hex(), "evmChainIDs", chainIDs) + return key, err +} + +// EnsureKeys ensures that each chain has at least one key with a state +// linked to that chain. If a key and state exists for a chain but it is +// disabled, we do not enable it automatically here. +func (ks *eth) EnsureKeys(chainIDs ...*big.Int) (err error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + for _, chainID := range chainIDs { + keys := ks.keysForChain(chainID, true) + if len(keys) > 0 { + continue + } + newKey, err := ethkey.NewV2() + if err != nil { + return err + } + err = ks.add(newKey, chainID) + if err != nil { + return err + } + ks.logger.Infow(fmt.Sprintf("Created EVM key with ID %s", newKey.Address.Hex()), "address", newKey.Address.Hex(), "evmChainID", chainID) + } + + return nil +} + +func (ks *eth) Import(keyJSON []byte, password string, chainIDs ...*big.Int) (ethkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ethkey.KeyV2{}, ErrLocked + } + dKey, err := keystore.DecryptKey(keyJSON, password) + if err != nil { + return ethkey.KeyV2{}, errors.Wrap(err, "EthKeyStore#ImportKey failed to decrypt key") + } + key := ethkey.FromPrivateKey(dKey.PrivateKey) + if _, found := ks.keyRing.Eth[key.ID()]; found { + return ethkey.KeyV2{}, ErrKeyExists + } + err = ks.add(key, chainIDs...) + if err != nil { + return ethkey.KeyV2{}, errors.Wrap(err, "unable to add eth key") + } + ks.notify() + return key, nil +} + +func (ks *eth) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *eth) Add(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + ks.lock.Lock() + defer ks.lock.Unlock() + _, found := ks.keyRing.Eth[address.Hex()] + if !found { + return ErrKeyNotFound + } + return ks.addKey(address, chainID, qopts...) +} + +// caller must hold lock! +func (ks *eth) addKey(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + state := new(ethkey.State) + sql := `INSERT INTO evm.key_states (address, disabled, evm_chain_id, created_at, updated_at) + VALUES ($1, false, $2, NOW(), NOW()) + RETURNING *;` + q := ks.q.WithOpts(qopts...) + if err := q.Get(state, sql, address, chainID.String()); err != nil { + return errors.Wrap(err, "failed to insert evm_key_state") + } + // consider: do we really need a cache of the keystates? + ks.keyStates.add(state) + ks.notify() + return nil +} + +func (ks *eth) Enable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + ks.lock.Lock() + defer ks.lock.Unlock() + _, found := ks.keyRing.Eth[address.Hex()] + if !found { + return ErrKeyNotFound + } + return ks.enable(address, chainID, qopts...) +} + +// caller must hold lock! +func (ks *eth) enable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + state := new(ethkey.State) + q := ks.q.WithOpts(qopts...) + sql := `INSERT INTO evm.key_states as key_states ("address", "evm_chain_id", "disabled", "created_at", "updated_at") VALUES ($1, $2, false, NOW(), NOW()) + ON CONFLICT ("address", "evm_chain_id") DO UPDATE SET "disabled" = false, "updated_at" = NOW() WHERE key_states."address" = $1 AND key_states."evm_chain_id" = $2 + RETURNING *;` + if err := q.Get(state, sql, address, chainID.String()); err != nil { + return errors.Wrap(err, "failed to enable state") + } + + if state.CreatedAt.Equal(state.UpdatedAt) { + ks.keyStates.add(state) + } else { + ks.keyStates.enable(address, chainID, state.UpdatedAt) + } + ks.notify() + return nil +} + +func (ks *eth) Disable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + ks.lock.Lock() + defer ks.lock.Unlock() + _, found := ks.keyRing.Eth[address.Hex()] + if !found { + return errors.Errorf("no key exists with ID %s", address.Hex()) + } + return ks.disable(address, chainID, qopts...) +} + +func (ks *eth) disable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + state := new(ethkey.State) + q := ks.q.WithOpts(qopts...) + sql := `INSERT INTO evm.key_states as key_states ("address", "evm_chain_id", "disabled", "created_at", "updated_at") VALUES ($1, $2, true, NOW(), NOW()) + ON CONFLICT ("address", "evm_chain_id") DO UPDATE SET "disabled" = true, "updated_at" = NOW() WHERE key_states."address" = $1 AND key_states."evm_chain_id" = $2 + RETURNING *;` + if err := q.Get(state, sql, address, chainID.String()); err != nil { + return errors.Wrap(err, "failed to disable state") + } + + if state.CreatedAt.Equal(state.UpdatedAt) { + ks.keyStates.add(state) + } else { + ks.keyStates.disable(address, chainID, state.UpdatedAt) + } + ks.notify() + return nil +} + +func (ks *eth) Delete(id string) (ethkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ethkey.KeyV2{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return ethkey.KeyV2{}, err + } + err = ks.safeRemoveKey(key, func(tx pg.Queryer) error { + _, err2 := tx.Exec(`DELETE FROM evm.key_states WHERE address = $1`, key.Address) + return err2 + }) + if err != nil { + return ethkey.KeyV2{}, errors.Wrap(err, "unable to remove eth key") + } + ks.keyStates.delete(key.Address) + ks.notify() + return key, nil +} + +func (ks *eth) SubscribeToKeyChanges() (ch chan struct{}, unsub func()) { + ch = make(chan struct{}, 1) + ks.subscribersMu.Lock() + defer ks.subscribersMu.Unlock() + ks.subscribers = append(ks.subscribers, ch) + return ch, func() { + ks.subscribersMu.Lock() + defer ks.subscribersMu.Unlock() + for i, sub := range ks.subscribers { + if sub == ch { + ks.subscribers = append(ks.subscribers[:i], ks.subscribers[i+1:]...) + close(ch) + } + } + } +} + +func (ks *eth) SignTx(address common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(address.String()) + if err != nil { + return nil, err + } + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, key.ToEcdsaPrivKey()) +} + +// EnabledKeysForChain returns all keys that are enabled for the given chain +func (ks *eth) EnabledKeysForChain(chainID *big.Int) (sendingKeys []ethkey.KeyV2, err error) { + if chainID == nil { + return nil, errors.New("chainID must be non-nil") + } + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + return ks.enabledKeysForChain(chainID), nil +} + +func (ks *eth) GetRoundRobinAddress(chainID *big.Int, whitelist ...common.Address) (common.Address, error) { + if chainID == nil { + return common.Address{}, errors.New("chainID must be non-nil") + } + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return common.Address{}, ErrLocked + } + + var keys []ethkey.KeyV2 + if len(whitelist) == 0 { + keys = ks.enabledKeysForChain(chainID) + } else if len(whitelist) > 0 { + for _, k := range ks.enabledKeysForChain(chainID) { + for _, addr := range whitelist { + if addr == k.Address { + keys = append(keys, k) + } + } + } + } + + if len(keys) == 0 { + var err error + if chainID == nil && len(whitelist) == 0 { + err = errors.New("no sending keys available") + } else if chainID == nil { + err = errors.Errorf("no sending keys available that match whitelist: %v", whitelist) + } else if len(whitelist) == 0 { + err = errors.Errorf("no sending keys available for chain %s", chainID.String()) + } else { + err = errors.Errorf("no sending keys available for chain %s that match whitelist: %v", chainID, whitelist) + } + return common.Address{}, err + } + + states := ks.keyStates.ChainIDKeyID[chainID.String()] + sort.SliceStable(keys, func(i, j int) bool { + return states[keys[i].ID()].LastUsed().Before(states[keys[j].ID()].LastUsed()) + }) + + leastRecentlyUsed := keys[0] + states[leastRecentlyUsed.ID()].WasUsed() + return leastRecentlyUsed.Address, nil +} + +// CheckEnabled returns nil if state is present and enabled +// The complexity here comes because we want to return nice, useful error messages +func (ks *eth) CheckEnabled(address common.Address, chainID *big.Int) error { + if utils.IsZero(address) { + return errors.Errorf("empty address provided as input") + } + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return ErrLocked + } + var found bool + for _, k := range ks.keyRing.Eth { + if k.Address == address { + found = true + break + } + } + if !found { + return errors.Errorf("no eth key exists with address %s", address.String()) + } + states := ks.keyStates.KeyIDChainID[address.String()] + state, exists := states[chainID.String()] + if !exists { + var chainIDs []string + for cid, state := range states { + if !state.Disabled { + chainIDs = append(chainIDs, cid) + } + } + return errors.Errorf("eth key with address %s exists but is has not been enabled for chain %s (enabled only for chain IDs: %s)", address, chainID.String(), strings.Join(chainIDs, ",")) + } + if state.Disabled { + var chainIDs []string + for cid, state := range states { + if !state.Disabled { + chainIDs = append(chainIDs, cid) + } + } + return errors.Errorf("eth key with address %s exists but is disabled for chain %s (enabled only for chain IDs: %s)", address.String(), chainID.String(), strings.Join(chainIDs, ",")) + } + return nil +} + +func (ks *eth) GetState(id string, chainID *big.Int) (ethkey.State, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return ethkey.State{}, ErrLocked + } + state, exists := ks.keyStates.KeyIDChainID[id][chainID.String()] + if !exists { + return ethkey.State{}, errors.Errorf("state not found for eth key ID %s", id) + } + return *state, nil +} + +func (ks *eth) GetStatesForKeys(keys []ethkey.KeyV2) (states []ethkey.State, err error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + for _, state := range ks.keyStates.All { + for _, k := range keys { + if state.KeyID() == k.ID() { + states = append(states, *state) + } + } + } + sort.Slice(states, func(i, j int) bool { return states[i].KeyID() < states[j].KeyID() }) + return +} + +// Useful to fetch the ChainID for a given key +func (ks *eth) GetStateForKey(key ethkey.KeyV2) (state ethkey.State, err error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + for _, state := range ks.keyStates.All { + if state.KeyID() == key.ID() { + return *state, err + } + } + err = fmt.Errorf("no state found for key with id %s", key.ID()) + return +} + +func (ks *eth) GetStatesForChain(chainID *big.Int) (states []ethkey.State, err error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, s := range ks.keyStates.ChainIDKeyID[chainID.String()] { + states = append(states, *s) + } + sort.Slice(states, func(i, j int) bool { return states[i].KeyID() < states[j].KeyID() }) + return +} + +func (ks *eth) EnabledAddressesForChain(chainID *big.Int) (addresses []common.Address, err error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if chainID == nil { + return nil, errors.New("chainID must be non-nil") + } + if ks.isLocked() { + return nil, ErrLocked + } + for _, s := range ks.keyStates.ChainIDKeyID[chainID.String()] { + if !s.Disabled { + evmAddress := s.Address.Address() + addresses = append(addresses, evmAddress) + } + } + return +} + +// XXXTestingOnlySetState is only used in tests to manually update a key's state +func (ks *eth) XXXTestingOnlySetState(state ethkey.State) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + panic(ErrLocked) + } + existingState, exists := ks.keyStates.ChainIDKeyID[state.EVMChainID.String()][state.KeyID()] + if !exists { + panic(fmt.Sprintf("key not found with ID %s", state.KeyID())) + } + *existingState = state + sql := `UPDATE evm.key_states SET address = :address, is_disabled = :is_disabled, evm_chain_id = :evm_chain_id, updated_at = NOW() + WHERE address = :address;` + _, err := ks.q.NamedExec(sql, state) + if err != nil { + panic(err.Error()) + } +} + +// XXXTestingOnlyAdd is only used in tests to manually add a key +func (ks *eth) XXXTestingOnlyAdd(key ethkey.KeyV2) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + panic(ErrLocked) + } + if _, found := ks.keyRing.Eth[key.ID()]; found { + panic(fmt.Sprintf("key with ID %s already exists", key.ID())) + } + err := ks.add(key) + if err != nil { + panic(err.Error()) + } +} + +// caller must hold lock! +func (ks *eth) getByID(id string) (ethkey.KeyV2, error) { + key, found := ks.keyRing.Eth[id] + if !found { + return ethkey.KeyV2{}, ErrKeyNotFound + } + return key, nil +} + +// caller must hold lock! +func (ks *eth) enabledKeysForChain(chainID *big.Int) (keys []ethkey.KeyV2) { + return ks.keysForChain(chainID, false) +} + +// caller must hold lock! +func (ks *eth) keysForChain(chainID *big.Int, includeDisabled bool) (keys []ethkey.KeyV2) { + states := ks.keyStates.ChainIDKeyID[chainID.String()] + if states == nil { + return + } + for keyID, state := range states { + if includeDisabled || !state.Disabled { + k := ks.keyRing.Eth[keyID] + keys = append(keys, k) + } + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Cmp(keys[j]) < 0 }) + return keys +} + +// caller must hold lock! +func (ks *eth) add(key ethkey.KeyV2, chainIDs ...*big.Int) (err error) { + err = ks.safeAddKey(key, func(tx pg.Queryer) (serr error) { + for _, chainID := range chainIDs { + if serr = ks.addKey(key.Address, chainID, pg.WithQueryer(tx)); serr != nil { + return serr + } + } + return nil + }) + if len(chainIDs) > 0 { + ks.notify() + } + return err +} + +// notify notifies subscribers that eth keys have changed +func (ks *eth) notify() { + ks.subscribersMu.RLock() + defer ks.subscribersMu.RUnlock() + for _, ch := range ks.subscribers { + select { + case ch <- struct{}{}: + default: + } + } +} diff --git a/core/services/keystore/eth_test.go b/core/services/keystore/eth_test.go new file mode 100644 index 00000000..39e687c5 --- /dev/null +++ b/core/services/keystore/eth_test.go @@ -0,0 +1,826 @@ +package keystore_test + +import ( + "fmt" + "math/big" + "sort" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonutils "github.com/goplugin/plugin-common/pkg/utils" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func Test_EthKeyStore(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + err := keyStore.Unlock(cltest.Password) + require.NoError(t, err) + ethKeyStore := keyStore.Eth() + reset := func() { + keyStore.ResetXXXTestOnly() + require.NoError(t, commonutils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + require.NoError(t, commonutils.JustError(db.Exec("DELETE FROM evm.key_states"))) + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + const statesTableName = "evm.key_states" + + t.Run("Create / GetAll / Get", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + retrievedKeys, err := ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(retrievedKeys)) + require.Equal(t, key.Address, retrievedKeys[0].Address) + foundKey, err := ethKeyStore.Get(key.Address.Hex()) + require.NoError(t, err) + require.Equal(t, key, foundKey) + // adds ethkey.State + cltest.AssertCount(t, db, statesTableName, 1) + var state ethkey.State + sql := fmt.Sprintf(`SELECT address, disabled, evm_chain_id, created_at, updated_at from %s LIMIT 1`, statesTableName) + require.NoError(t, db.Get(&state, sql)) + require.Equal(t, state.Address.Address(), retrievedKeys[0].Address) + // adds key to db + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + retrievedKeys, err = ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(retrievedKeys)) + require.Equal(t, key.Address, retrievedKeys[0].Address) + // adds 2nd key + _, err = ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + retrievedKeys, err = ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 2, len(retrievedKeys)) + }) + + t.Run("GetAll ordering", func(t *testing.T) { + defer reset() + var keys []ethkey.KeyV2 + for i := 0; i < 5; i++ { + key, err := ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + keys = append(keys, key) + } + retrievedKeys, err := ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 5, len(retrievedKeys)) + + sort.Slice(keys, func(i, j int) bool { return keys[i].Cmp(keys[j]) < 0 }) + + assert.Equal(t, keys, retrievedKeys) + }) + + t.Run("RemoveKey", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + _, err = ethKeyStore.Delete(key.ID()) + require.NoError(t, err) + retrievedKeys, err := ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(retrievedKeys)) + cltest.AssertCount(t, db, statesTableName, 0) + }) + + t.Run("Delete removes key even if evm.txes are present", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(&cltest.FixtureChainID) + require.NoError(t, err) + // ensure at least one state is present + cltest.AssertCount(t, db, statesTableName, 1) + + // add one eth_tx + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 42, key.Address) + + _, err = ethKeyStore.Delete(key.ID()) + require.NoError(t, err) + retrievedKeys, err := ethKeyStore.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(retrievedKeys)) + cltest.AssertCount(t, db, statesTableName, 0) + }) + + t.Run("EnsureKeys / EnabledKeysForChain", func(t *testing.T) { + defer reset() + err := ethKeyStore.EnsureKeys(&cltest.FixtureChainID) + assert.NoError(t, err) + sendingKeys1, err := ethKeyStore.EnabledKeysForChain(testutils.FixtureChainID) + assert.NoError(t, err) + + require.Equal(t, 1, len(sendingKeys1)) + cltest.AssertCount(t, db, statesTableName, 1) + + err = ethKeyStore.EnsureKeys(&cltest.FixtureChainID) + assert.NoError(t, err) + sendingKeys2, err := ethKeyStore.EnabledKeysForChain(testutils.FixtureChainID) + assert.NoError(t, err) + + require.Equal(t, 1, len(sendingKeys2)) + require.Equal(t, sendingKeys1, sendingKeys2) + }) + + t.Run("EnabledKeysForChain with specified chain ID", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(testutils.FixtureChainID) + require.NoError(t, err) + key2, err := ethKeyStore.Create(big.NewInt(1337)) + require.NoError(t, err) + + keys, err := ethKeyStore.EnabledKeysForChain(testutils.FixtureChainID) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key, keys[0]) + + keys, err = ethKeyStore.EnabledKeysForChain(big.NewInt(1337)) + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key2, keys[0]) + + _, err = ethKeyStore.EnabledKeysForChain(nil) + assert.Error(t, err) + assert.EqualError(t, err, "chainID must be non-nil") + }) + + t.Run("EnabledAddressesForChain with specified chain ID", func(t *testing.T) { + defer reset() + key, err := ethKeyStore.Create(testutils.FixtureChainID) + require.NoError(t, err) + key2, err := ethKeyStore.Create(big.NewInt(1337)) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 2) + keys, err := ethKeyStore.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 2) + + //get enabled addresses for FixtureChainID + enabledAddresses, err := ethKeyStore.EnabledAddressesForChain(testutils.FixtureChainID) + require.NoError(t, err) + require.Len(t, enabledAddresses, 1) + require.Equal(t, key.Address, enabledAddresses[0]) + + //get enabled addresses for chain 1337 + enabledAddresses, err = ethKeyStore.EnabledAddressesForChain(big.NewInt(1337)) + require.NoError(t, err) + require.Len(t, enabledAddresses, 1) + require.Equal(t, key2.Address, enabledAddresses[0]) + + // /get enabled addresses for nil chain ID + _, err = ethKeyStore.EnabledAddressesForChain(nil) + assert.Error(t, err) + assert.EqualError(t, err, "chainID must be non-nil") + + // disable the key for chain FixtureChainID + err = ethKeyStore.Disable(key.Address, testutils.FixtureChainID) + require.NoError(t, err) + + enabledAddresses, err = ethKeyStore.EnabledAddressesForChain(testutils.FixtureChainID) + require.NoError(t, err) + assert.Len(t, enabledAddresses, 0) + enabledAddresses, err = ethKeyStore.EnabledAddressesForChain(big.NewInt(1337)) + require.NoError(t, err) + assert.Len(t, enabledAddresses, 1) + require.Equal(t, key2.Address, enabledAddresses[0]) + }) +} + +func Test_EthKeyStore_GetRoundRobinAddress(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ethKeyStore := keyStore.Eth() + + t.Run("should error when no addresses", func(t *testing.T) { + _, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.Error(t, err) + }) + + // create keys + // - key 1 + // enabled - fixture + // enabled - simulated + // - key 2 + // enabled - fixture + // disabled - simulated + // - key 3 + // enabled - simulated + // - key 4 + // enabled - fixture + k1, _ := cltest.MustInsertRandomKeyNoChains(t, ethKeyStore) + require.NoError(t, ethKeyStore.Add(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ethKeyStore.Add(k1.Address, testutils.SimulatedChainID)) + require.NoError(t, ethKeyStore.Enable(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ethKeyStore.Enable(k1.Address, testutils.SimulatedChainID)) + + k2, _ := cltest.MustInsertRandomKeyNoChains(t, ethKeyStore) + require.NoError(t, ethKeyStore.Add(k2.Address, testutils.FixtureChainID)) + require.NoError(t, ethKeyStore.Add(k2.Address, testutils.SimulatedChainID)) + require.NoError(t, ethKeyStore.Enable(k2.Address, testutils.FixtureChainID)) + require.NoError(t, ethKeyStore.Enable(k2.Address, testutils.SimulatedChainID)) + require.NoError(t, ethKeyStore.Disable(k2.Address, testutils.SimulatedChainID)) + + k3, _ := cltest.MustInsertRandomKeyNoChains(t, ethKeyStore) + require.NoError(t, ethKeyStore.Add(k3.Address, testutils.SimulatedChainID)) + require.NoError(t, ethKeyStore.Enable(k3.Address, testutils.SimulatedChainID)) + + k4, _ := cltest.MustInsertRandomKeyNoChains(t, ethKeyStore) + require.NoError(t, ethKeyStore.Add(k4.Address, testutils.FixtureChainID)) + require.NoError(t, ethKeyStore.Enable(k4.Address, testutils.FixtureChainID)) + + t.Run("with no address filter, rotates between all enabled addresses", func(t *testing.T) { + address1, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + address2, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + address3, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + address4, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + address5, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + address6, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID) + require.NoError(t, err) + + assert.NotEqual(t, address1, address2) + assert.NotEqual(t, address2, address3) + assert.NotEqual(t, address1, address3) + assert.Equal(t, address1, address4) + assert.Equal(t, address2, address5) + assert.Equal(t, address3, address6) + }) + + t.Run("with address filter, rotates between given addresses that match sending keys", func(t *testing.T) { + { + // k3 is a disabled address for FixtureChainID so even though it's whitelisted, it will be ignored + addresses := []common.Address{k4.Address, k3.Address, k1.Address, k2.Address, testutils.NewAddress()} + + address1, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID, addresses...) + require.NoError(t, err) + address2, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID, addresses...) + require.NoError(t, err) + address3, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID, addresses...) + require.NoError(t, err) + address4, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID, addresses...) + require.NoError(t, err) + + assert.NotEqual(t, k3.Address, address1) + assert.NotEqual(t, k3.Address, address2) + assert.NotEqual(t, k3.Address, address3) + assert.NotEqual(t, address1, address2) + assert.NotEqual(t, address1, address3) + assert.NotEqual(t, address2, address3) + assert.Equal(t, address1, address4) + } + + { + + // k2 and k4 are disabled address for SimulatedChainID so even though it's whitelisted, it will be ignored + addresses := []common.Address{k4.Address, k3.Address, k1.Address, k2.Address, testutils.NewAddress()} + + address1, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID, addresses...) + require.NoError(t, err) + address2, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID, addresses...) + require.NoError(t, err) + address3, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID, addresses...) + require.NoError(t, err) + address4, err := ethKeyStore.GetRoundRobinAddress(testutils.SimulatedChainID, addresses...) + require.NoError(t, err) + + assert.True(t, address1 == k1.Address || address1 == k3.Address) + assert.True(t, address2 == k1.Address || address2 == k3.Address) + assert.NotEqual(t, address1, address2) + assert.Equal(t, address1, address3) + assert.Equal(t, address2, address4) + } + }) + + t.Run("with address filter when no address matches", func(t *testing.T) { + addr := testutils.NewAddress() + _, err := ethKeyStore.GetRoundRobinAddress(testutils.FixtureChainID, []common.Address{addr}...) + require.Error(t, err) + require.Equal(t, fmt.Sprintf("no sending keys available for chain %s that match whitelist: [%s]", testutils.FixtureChainID.String(), addr.Hex()), err.Error()) + }) +} + +func Test_EthKeyStore_SignTx(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + ethKeyStore := keyStore.Eth() + + k, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + + chainID := big.NewInt(evmclient.NullClientChainID) + tx := cltest.NewLegacyTransaction(0, testutils.NewAddress(), big.NewInt(53), 21000, big.NewInt(1000000000), []byte{1, 2, 3, 4}) + + randomAddress := testutils.NewAddress() + _, err := ethKeyStore.SignTx(randomAddress, tx, chainID) + require.EqualError(t, err, "Key not found") + + signed, err := ethKeyStore.SignTx(k.Address, tx, chainID) + require.NoError(t, err) + + require.NotEqual(t, tx, signed) +} + +func Test_EthKeyStore_E2E(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + err := keyStore.Unlock(cltest.Password) + require.NoError(t, err) + ks := keyStore.Eth() + reset := func() { + keyStore.ResetXXXTestOnly() + require.NoError(t, commonutils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + require.NoError(t, commonutils.JustError(db.Exec("DELETE FROM evm.key_states"))) + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create(&cltest.FixtureChainID) + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create(&cltest.FixtureChainID) + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password, &cltest.FixtureChainID) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := ethkey.NewV2() + require.NoError(t, err) + ks.XXXTestingOnlyAdd(newKey) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + assert.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + assert.Error(t, err) + _, err = ks.Delete(newKey.ID()) + assert.Error(t, err) + }) + + t.Run("imports a key exported from a v1 keystore", func(t *testing.T) { + exportedKey := `{"address":"0dd359b4f22a30e44b2fd744b679971941865820","crypto":{"cipher":"aes-128-ctr","ciphertext":"b30af964a3b3f37894e599446b4cf2314bbfcd1062e6b35b620d3d20bd9965cc","cipherparams":{"iv":"58a8d75629cc1945da7cf8c24520d1dc"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"c352887e9d427d8a6a1869082619b73fac4566082a99f6e367d126f11b434f28"},"mac":"fd76a588210e0bf73d01332091e0e83a4584ee2df31eaec0e27f9a1b94f024b4"},"id":"a5ee0802-1d7b-45b6-aeb8-ea8a3351e715","version":3}` + importedKey, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_", &cltest.FixtureChainID) + require.NoError(t, err) + assert.Equal(t, "0x0dd359b4f22a30E44b2fD744B679971941865820", importedKey.ID()) + + k, err := ks.Import([]byte(exportedKey), cltest.Password, &cltest.FixtureChainID) + + assert.Empty(t, k) + assert.Error(t, err) + }) + + t.Run("fails to export a non-existent key", func(t *testing.T) { + k, err := ks.Export("non-existent", cltest.Password) + + assert.Empty(t, k) + assert.Error(t, err) + }) + + t.Run("getting keys states", func(t *testing.T) { + defer reset() + + t.Run("returns states for keys", func(t *testing.T) { + k1, err := ethkey.NewV2() + require.NoError(t, err) + k2, err := ethkey.NewV2() + require.NoError(t, err) + ks.XXXTestingOnlyAdd(k1) + ks.XXXTestingOnlyAdd(k2) + require.NoError(t, ks.Add(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + + states, err := ks.GetStatesForKeys([]ethkey.KeyV2{k1, k2}) + require.NoError(t, err) + assert.Len(t, states, 1) + + chainStates, err := ks.GetStatesForChain(testutils.FixtureChainID) + require.NoError(t, err) + assert.Len(t, chainStates, 2) // one created here, one created above + + chainStates, err = ks.GetStatesForChain(testutils.SimulatedChainID) + require.NoError(t, err) + assert.Len(t, chainStates, 0) + }) + }) +} + +func Test_EthKeyStore_SubscribeToKeyChanges(t *testing.T) { + t.Parallel() + + chDone := make(chan struct{}) + defer func() { close(chDone) }() + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + chSub, unsubscribe := ks.SubscribeToKeyChanges() + defer unsubscribe() + + var count atomic.Int32 + + assertCountAtLeast := func(expected int32) { + require.Eventually( + t, + func() bool { return count.Load() >= expected }, + 10*time.Second, + 100*time.Millisecond, + fmt.Sprintf("insufficient number of callbacks triggered. Expected %d, got %d", expected, count.Load()), + ) + } + + go func() { + for { + select { + case _, ok := <-chSub: + if !ok { + return + } + count.Add(1) + case <-chDone: + return + } + } + }() + + drainAndReset := func() { + for len(chSub) > 0 { + <-chSub + } + count.Store(0) + } + + err := ks.EnsureKeys(&cltest.FixtureChainID) + require.NoError(t, err) + assertCountAtLeast(1) + + drainAndReset() + + // Create the key includes a state, triggering notify + k1, err := ks.Create(testutils.FixtureChainID) + require.NoError(t, err) + assertCountAtLeast(1) + + drainAndReset() + + // Enabling the key for a new state triggers the notification callback again + require.NoError(t, ks.Add(k1.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.SimulatedChainID)) + assertCountAtLeast(1) + + drainAndReset() + + // Disabling triggers a notify + require.NoError(t, ks.Disable(k1.Address, testutils.SimulatedChainID)) + assertCountAtLeast(1) +} + +func Test_EthKeyStore_Enable(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + t.Run("already existing disabled key gets enabled", func(t *testing.T) { + k, _ := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Disable(k.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(k.Address, testutils.SimulatedChainID)) + key, err := ks.GetState(k.Address.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + require.Equal(t, key.Disabled, false) + }) + + t.Run("creates key, deletes it unsafely and then enable creates it again", func(t *testing.T) { + k, _ := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k.Address, testutils.SimulatedChainID)) + _, err := db.Exec("DELETE FROM evm.key_states WHERE address = $1", k.Address) + require.NoError(t, err) + require.NoError(t, ks.Enable(k.Address, testutils.SimulatedChainID)) + key, err := ks.GetState(k.Address.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + require.Equal(t, key.Disabled, false) + }) + + t.Run("creates key and enables it if it exists in the keystore, but is missing from key states db table", func(t *testing.T) { + k, _ := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Enable(k.Address, testutils.SimulatedChainID)) + key, err := ks.GetState(k.Address.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + require.Equal(t, key.Disabled, false) + }) + + t.Run("errors if key is not present in keystore", func(t *testing.T) { + addrNotInKs := testutils.NewAddress() + require.Error(t, ks.Enable(addrNotInKs, testutils.SimulatedChainID)) + _, err := ks.GetState(addrNotInKs.Hex(), testutils.SimulatedChainID) + require.Error(t, err) + }) +} + +func Test_EthKeyStore_EnsureKeys(t *testing.T) { + t.Parallel() + + t.Run("creates one unique key per chain if none exist", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + testutils.AssertCount(t, db, "evm.key_states", 0) + err := ks.EnsureKeys(testutils.FixtureChainID, testutils.SimulatedChainID) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 2) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 2) + }) + + t.Run("does nothing if a key exists for a chain", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + // Add one enabled key + _, err := ks.Create(testutils.FixtureChainID) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 1) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 1) + + // this adds one more key for the additional chain + err = ks.EnsureKeys(testutils.FixtureChainID, testutils.SimulatedChainID) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 2) + keys, err = ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 2) + }) + + t.Run("does nothing if a key exists but is disabled for a chain", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + // Add one enabled key + k, err := ks.Create(testutils.FixtureChainID) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 1) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 1) + + // disable the key + err = ks.Disable(k.Address, testutils.FixtureChainID) + require.NoError(t, err) + + // this does nothing + err = ks.EnsureKeys(testutils.FixtureChainID) + require.NoError(t, err) + testutils.AssertCount(t, db, "evm.key_states", 1) + keys, err = ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 1) + state, err := ks.GetState(k.Address.Hex(), testutils.FixtureChainID) + require.NoError(t, err) + assert.True(t, state.Disabled) + }) +} + +func Test_EthKeyStore_Delete(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + randKeyID := utils.RandomAddress().Hex() + _, err := ks.Delete(randKeyID) + require.Error(t, err) + assert.Contains(t, err.Error(), "Key not found") + + _, addr1 := cltest.MustInsertRandomKey(t, ks) + _, addr2 := cltest.MustInsertRandomKey(t, ks) + cltest.MustInsertRandomKey(t, ks, *ubig.New(testutils.SimulatedChainID)) + require.NoError(t, ks.Add(addr1, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(addr1, testutils.SimulatedChainID)) + + testutils.AssertCount(t, db, "evm.key_states", 4) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 3) + _, err = ks.GetState(addr1.Hex(), testutils.FixtureChainID) + require.NoError(t, err) + _, err = ks.GetState(addr1.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + + deletedK, err := ks.Delete(addr1.String()) + require.NoError(t, err) + assert.Equal(t, addr1, deletedK.Address) + + testutils.AssertCount(t, db, "evm.key_states", 2) + keys, err = ks.GetAll() + require.NoError(t, err) + assert.Len(t, keys, 2) + _, err = ks.GetState(addr1.Hex(), testutils.FixtureChainID) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("state not found for eth key ID %s", addr1.Hex())) + _, err = ks.GetState(addr1.Hex(), testutils.SimulatedChainID) + require.Error(t, err) + assert.Contains(t, err.Error(), fmt.Sprintf("state not found for eth key ID %s", addr1.Hex())) + _, err = ks.GetState(addr2.Hex(), testutils.FixtureChainID) + require.NoError(t, err) +} + +func Test_EthKeyStore_CheckEnabled(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + // create keys + // - key 1 + // enabled - fixture + // enabled - simulated + // - key 2 + // enabled - fixture + // disabled - simulated + // - key 3 + // enabled - simulated + // - key 4 + // enabled - fixture + k1, addr1 := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k1.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Add(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + + k2, addr2 := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k2.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Add(k2.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(k2.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k2.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Disable(k2.Address, testutils.SimulatedChainID)) + + k3, addr3 := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k3.Address, testutils.SimulatedChainID)) + require.NoError(t, ks.Enable(k3.Address, testutils.SimulatedChainID)) + + t.Run("enabling the same key multiple times does not create duplicate states", func(t *testing.T) { + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + require.NoError(t, ks.Enable(k1.Address, testutils.FixtureChainID)) + + states, err := ks.GetStatesForKeys([]ethkey.KeyV2{k1}) + require.NoError(t, err) + assert.Len(t, states, 2) + var cids []*big.Int + for i := range states { + cid := states[i].EVMChainID.ToInt() + cids = append(cids, cid) + } + assert.Contains(t, cids, testutils.FixtureChainID) + assert.Contains(t, cids, testutils.SimulatedChainID) + + for _, s := range states { + assert.Equal(t, addr1, s.Address.Address()) + } + }) + + t.Run("returns nil when key is enabled for given chain", func(t *testing.T) { + err := ks.CheckEnabled(addr1, testutils.FixtureChainID) + assert.NoError(t, err) + err = ks.CheckEnabled(addr1, testutils.SimulatedChainID) + assert.NoError(t, err) + }) + + t.Run("returns error when key does not exist", func(t *testing.T) { + addr := utils.RandomAddress() + err := ks.CheckEnabled(addr, testutils.FixtureChainID) + assert.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("no eth key exists with address %s", addr.Hex())) + }) + + t.Run("returns error when key exists but has never been enabled (no state) for the given chain", func(t *testing.T) { + err := ks.CheckEnabled(addr3, testutils.FixtureChainID) + assert.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("eth key with address %s exists but is has not been enabled for chain 0 (enabled only for chain IDs: 1337)", addr3.Hex())) + }) + + t.Run("returns error when key exists but is disabled for the given chain", func(t *testing.T) { + err := ks.CheckEnabled(addr2, testutils.SimulatedChainID) + assert.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("eth key with address %s exists but is disabled for chain 1337 (enabled only for chain IDs: 0)", addr2.Hex())) + }) +} + +func Test_EthKeyStore_Disable(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + ks := keyStore.Eth() + + t.Run("creates key, deletes it unsafely and then enable creates it again", func(t *testing.T) { + k, _ := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Add(k.Address, testutils.SimulatedChainID)) + _, err := db.Exec("DELETE FROM evm.key_states WHERE address = $1", k.Address) + require.NoError(t, err) + require.NoError(t, ks.Disable(k.Address, testutils.SimulatedChainID)) + key, err := ks.GetState(k.Address.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + require.Equal(t, key.Disabled, true) + }) + + t.Run("creates key and enables it if it exists in the keystore, but is missing from key states db table", func(t *testing.T) { + k, _ := cltest.MustInsertRandomKeyNoChains(t, ks) + require.NoError(t, ks.Disable(k.Address, testutils.SimulatedChainID)) + key, err := ks.GetState(k.Address.Hex(), testutils.SimulatedChainID) + require.NoError(t, err) + require.Equal(t, key.Disabled, true) + }) + + t.Run("errors if key is not present in keystore", func(t *testing.T) { + addrNotInKs := testutils.NewAddress() + require.Error(t, ks.Disable(addrNotInKs, testutils.SimulatedChainID)) + _, err := ks.GetState(addrNotInKs.Hex(), testutils.SimulatedChainID) + require.Error(t, err) + }) +} diff --git a/core/services/keystore/helpers_test.go b/core/services/keystore/helpers_test.go new file mode 100644 index 00000000..b4f1a21d --- /dev/null +++ b/core/services/keystore/helpers_test.go @@ -0,0 +1,40 @@ +package keystore + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func mustNewEthKey(t *testing.T) *ethkey.KeyV2 { + key, err := ethkey.NewV2() + require.NoError(t, err) + return &key +} + +func ExposedNewMaster(t *testing.T, db *sqlx.DB, cfg pg.QConfig) *master { + return newMaster(db, utils.FastScryptParams, logger.TestLogger(t), cfg) +} + +func (m *master) ExportedSave() error { + m.lock.Lock() + defer m.lock.Unlock() + return m.save() +} + +func (m *master) ResetXXXTestOnly() { + m.keyRing = newKeyRing() + m.keyStates = newKeyStates() + m.password = "" +} + +func (m *master) SetPassword(pw string) { + m.password = pw +} diff --git a/core/services/keystore/keys/cosmoskey/export.go b/core/services/keystore/keys/cosmoskey/export.go new file mode 100644 index 00000000..de2358da --- /dev/null +++ b/core/services/keystore/keys/cosmoskey/export.go @@ -0,0 +1,48 @@ +package cosmoskey + +import ( + "encoding/hex" + + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "Cosmos" + +// FromEncryptedJSON gets key from json and password +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +// ToEncryptedJSON returns encrypted JSON representing key +func (key Key) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: hex.EncodeToString(key.PublicKey().Bytes()), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "cosmoskey" + password +} diff --git a/core/services/keystore/keys/cosmoskey/export_test.go b/core/services/keystore/keys/cosmoskey/export_test.go new file mode 100644 index 00000000..e79567bb --- /dev/null +++ b/core/services/keystore/keys/cosmoskey/export_test.go @@ -0,0 +1,19 @@ +package cosmoskey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestCosmosKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return New(), nil +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/cosmoskey/key.go b/core/services/keystore/keys/cosmoskey/key.go new file mode 100644 index 00000000..3e516a21 --- /dev/null +++ b/core/services/keystore/keys/cosmoskey/key.go @@ -0,0 +1,99 @@ +package cosmoskey + +import ( + "crypto/ecdsa" + cryptorand "crypto/rand" + "fmt" + "io" + "math/big" + + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/ethereum/go-ethereum/crypto" + + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" +) + +var secpSigningAlgo, _ = keyring.NewSigningAlgoFromString(string(hd.Secp256k1Type), []keyring.SignatureAlgo{hd.Secp256k1}) + +type Raw []byte + +func (raw Raw) Key() Key { + d := big.NewInt(0).SetBytes(raw) + privKey := secpSigningAlgo.Generate()(d.Bytes()) + return Key{ + d: d, + k: privKey, + } +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &Key{} + +// Key represents Cosmos key +type Key struct { + d *big.Int + k cryptotypes.PrivKey +} + +// New creates new Key +func New() Key { + return newFrom(cryptorand.Reader) +} + +// MustNewInsecure return Key +func MustNewInsecure(reader io.Reader) Key { + return newFrom(reader) +} + +func newFrom(reader io.Reader) Key { + rawKey, err := ecdsa.GenerateKey(crypto.S256(), reader) + if err != nil { + panic(err) + } + privKey := secpSigningAlgo.Generate()(rawKey.D.Bytes()) + if err != nil { + panic(err) + } + + return Key{ + d: rawKey.D, + k: privKey, + } +} + +func (key Key) ID() string { + return key.PublicKeyStr() +} + +func (key Key) PublicKey() (pubKey cryptotypes.PubKey) { + return key.k.PubKey() +} + +func (key Key) PublicKeyStr() string { + return fmt.Sprintf("%X", key.k.PubKey().Bytes()) +} + +func (key Key) Raw() Raw { + return key.d.Bytes() +} + +// ToPrivKey returns the key usable for signing. +func (key Key) ToPrivKey() cryptotypes.PrivKey { + return key.k +} + +func (key Key) String() string { + return fmt.Sprintf("CosmosKey{PrivateKey: , Public Key: %s}", key.PublicKeyStr()) +} + +func (key Key) GoString() string { + return key.String() +} diff --git a/core/services/keystore/keys/csakey/export.go b/core/services/keystore/keys/csakey/export.go new file mode 100644 index 00000000..2bb9b44e --- /dev/null +++ b/core/services/keystore/keys/csakey/export.go @@ -0,0 +1,44 @@ +package csakey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "CSA" + +func FromEncryptedJSON(keyJSON []byte, password string) (KeyV2, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (KeyV2, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +func (k KeyV2) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + k.Raw(), + k, + password, + scryptParams, + adulteratedPassword, + func(id string, key KeyV2, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: key.PublicKeyString(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "csakey" + password +} diff --git a/core/services/keystore/keys/csakey/export_test.go b/core/services/keystore/keys/csakey/export_test.go new file mode 100644 index 00000000..1981a312 --- /dev/null +++ b/core/services/keystore/keys/csakey/export_test.go @@ -0,0 +1,19 @@ +package csakey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestCSAKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return NewV2() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/csakey/key.go b/core/services/keystore/keys/csakey/key.go new file mode 100644 index 00000000..b9635fc9 --- /dev/null +++ b/core/services/keystore/keys/csakey/key.go @@ -0,0 +1,65 @@ +package csakey + +import ( + "crypto/ed25519" + "errors" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +type Key struct { + ID uint + PublicKey crypto.PublicKey + privateKey []byte + EncryptedPrivateKey crypto.EncryptedPrivateKey + CreatedAt time.Time + UpdatedAt time.Time +} + +// New creates a new CSA key consisting of an ed25519 key. It encrypts the +// Key with the passphrase. +func New(passphrase string, scryptParams utils.ScryptParams) (*Key, error) { + pubkey, privkey, err := ed25519.GenerateKey(nil) + if err != nil { + return nil, err + } + + encPrivkey, err := crypto.NewEncryptedPrivateKey(privkey, passphrase, scryptParams) + if err != nil { + return nil, err + } + + return &Key{ + PublicKey: crypto.PublicKey(pubkey), + privateKey: privkey, + EncryptedPrivateKey: *encPrivkey, + }, nil +} + +func (k *Key) Unlock(password string) error { + pk, err := k.EncryptedPrivateKey.Decrypt(password) + if err != nil { + return err + } + k.privateKey = pk + return nil +} + +func (k *Key) Unsafe_GetPrivateKey() ([]byte, error) { + if k.privateKey == nil { + return nil, errors.New("key has not been unlocked") + } + + return k.privateKey, nil +} + +func (k Key) ToV2() KeyV2 { + pk := ed25519.PrivateKey(k.privateKey) + return KeyV2{ + privateKey: &pk, + PublicKey: ed25519.PublicKey(k.PublicKey), + Version: 1, + } +} diff --git a/core/services/keystore/keys/csakey/key_test.go b/core/services/keystore/keys/csakey/key_test.go new file mode 100644 index 00000000..1a22f636 --- /dev/null +++ b/core/services/keystore/keys/csakey/key_test.go @@ -0,0 +1,58 @@ +package csakey + +import ( + "crypto/ed25519" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func Test_New(t *testing.T) { + passphrase := "passphrase" + key, err := New(passphrase, utils.FastScryptParams) + require.NoError(t, err) + + rawprivkey, err := key.EncryptedPrivateKey.Decrypt("passphrase") + require.NoError(t, err) + + privkey := ed25519.PrivateKey(rawprivkey) + assert.Equal(t, ed25519.PublicKey(key.PublicKey), privkey.Public()) +} + +func Test_Unlock(t *testing.T) { + passphrase := "passphrase" + key, err := New(passphrase, utils.FastScryptParams) + require.NoError(t, err) + + err = key.Unlock(passphrase) + require.NoError(t, err) + + expected, err := key.EncryptedPrivateKey.Decrypt(passphrase) + require.NoError(t, err) + + assert.Equal(t, expected, key.privateKey) +} + +func Test_GetPrivateKey(t *testing.T) { + passphrase := "passphrase" + key, err := New(passphrase, utils.FastScryptParams) + require.NoError(t, err) + + privkey, err := key.Unsafe_GetPrivateKey() + require.NoError(t, err) + assert.Equal(t, key.privateKey, privkey) +} + +func TestKey_ToV2(t *testing.T) { + passphrase := "passphrase" + key, err := New(passphrase, utils.FastScryptParams) + require.NoError(t, err) + + v2Key := key.ToV2() + + assert.Equal(t, key.PublicKey.String(), v2Key.PublicKeyString()) + assert.Equal(t, ed25519.PrivateKey(key.privateKey), *v2Key.privateKey) +} diff --git a/core/services/keystore/keys/csakey/key_v2.go b/core/services/keystore/keys/csakey/key_v2.go new file mode 100644 index 00000000..76822398 --- /dev/null +++ b/core/services/keystore/keys/csakey/key_v2.go @@ -0,0 +1,98 @@ +package csakey + +import ( + "crypto/ed25519" + cryptorand "crypto/rand" + "encoding/hex" + "fmt" + "math/big" + + "github.com/goplugin/wsrpc/credentials" +) + +type Raw []byte + +func (raw Raw) Key() KeyV2 { + privKey := ed25519.PrivateKey(raw) + return KeyV2{ + privateKey: &privKey, + PublicKey: ed25519PubKeyFromPrivKey(privKey), + } +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +func (raw Raw) Bytes() []byte { + return ([]byte)(raw) +} + +var _ fmt.GoStringer = &KeyV2{} + +type KeyV2 struct { + privateKey *ed25519.PrivateKey + PublicKey ed25519.PublicKey + Version int +} + +func (k KeyV2) StaticSizedPublicKey() (sspk credentials.StaticSizedPublicKey) { + if len(k.PublicKey) != ed25519.PublicKeySize { + panic(fmt.Sprintf("expected ed25519.PublicKey to have len %d but got len %d", ed25519.PublicKeySize, len(k.PublicKey))) + } + copy(sspk[:], k.PublicKey) + return sspk +} + +func NewV2() (KeyV2, error) { + pubKey, privKey, err := ed25519.GenerateKey(cryptorand.Reader) + if err != nil { + return KeyV2{}, err + } + return KeyV2{ + privateKey: &privKey, + PublicKey: pubKey, + Version: 2, + }, nil +} + +func MustNewV2XXXTestingOnly(k *big.Int) KeyV2 { + seed := make([]byte, ed25519.SeedSize) + copy(seed, k.Bytes()) + privKey := ed25519.NewKeyFromSeed(seed) + return KeyV2{ + privateKey: &privKey, + PublicKey: ed25519PubKeyFromPrivKey(privKey), + Version: 2, + } +} + +func (k KeyV2) ID() string { + return k.PublicKeyString() +} + +func (k KeyV2) PublicKeyString() string { + return hex.EncodeToString(k.PublicKey) +} + +func (k KeyV2) Raw() Raw { + return Raw(*k.privateKey) +} + +func (k KeyV2) String() string { + return fmt.Sprintf("CSAKeyV2{PrivateKey: , PublicKey: %s}", k.PublicKey) +} + +func (k KeyV2) GoString() string { + return k.String() +} + +func ed25519PubKeyFromPrivKey(privKey ed25519.PrivateKey) ed25519.PublicKey { + publicKey := make([]byte, ed25519.PublicKeySize) + copy(publicKey, privKey[32:]) + return publicKey +} diff --git a/core/services/keystore/keys/csakey/key_v2_test.go b/core/services/keystore/keys/csakey/key_v2_test.go new file mode 100644 index 00000000..67b2b2ad --- /dev/null +++ b/core/services/keystore/keys/csakey/key_v2_test.go @@ -0,0 +1,43 @@ +package csakey + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCSAKeyV2_RawPrivateKey(t *testing.T) { + _, privKey, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + privateKey := Raw(privKey) + + assert.Equal(t, "", privateKey.String()) + assert.Equal(t, privateKey.String(), privateKey.GoString()) +} + +func TestCSAKeyV2_FromRawPrivateKey(t *testing.T) { + pubKey, privKey, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + keyV2 := Raw(privKey).Key() + + assert.Equal(t, pubKey, keyV2.PublicKey) + assert.Equal(t, privKey, *keyV2.privateKey) + assert.Equal(t, keyV2.String(), keyV2.GoString()) + assert.Equal(t, hex.EncodeToString(pubKey), keyV2.PublicKeyString()) + assert.Equal(t, fmt.Sprintf("CSAKeyV2{PrivateKey: , PublicKey: %s}", pubKey), keyV2.String()) +} + +func TestCSAKeyV2_NewV2(t *testing.T) { + keyV2, err := NewV2() + require.NoError(t, err) + + assert.Equal(t, 2, keyV2.Version) + assert.NotNil(t, keyV2.PublicKey) + assert.NotNil(t, keyV2.privateKey) +} diff --git a/core/services/keystore/keys/dkgencryptkey/export.go b/core/services/keystore/keys/dkgencryptkey/export.go new file mode 100644 index 00000000..b5628aa8 --- /dev/null +++ b/core/services/keystore/keys/dkgencryptkey/export.go @@ -0,0 +1,44 @@ +package dkgencryptkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "DKGEncrypt" + +// FromEncryptedJSON returns a dkgencryptkey.KeyV2 from encrypted data in go-ethereum keystore format. +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }) +} + +// ToEncryptedJSON exports this key into a JSON object following the format of EncryptedDKGEncryptKeyExport +func (k Key) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + k.Raw(), + k, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: key.PublicKeyString(), + Crypto: cryptoJSON, + } + }) +} + +func adulteratedPassword(password string) string { + return "dkgencryptkey" + password +} diff --git a/core/services/keystore/keys/dkgencryptkey/export_test.go b/core/services/keystore/keys/dkgencryptkey/export_test.go new file mode 100644 index 00000000..7561b7af --- /dev/null +++ b/core/services/keystore/keys/dkgencryptkey/export_test.go @@ -0,0 +1,19 @@ +package dkgencryptkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestDKGEncryptKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return New() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/dkgencryptkey/key.go b/core/services/keystore/keys/dkgencryptkey/key.go new file mode 100644 index 00000000..8f87c52e --- /dev/null +++ b/core/services/keystore/keys/dkgencryptkey/key.go @@ -0,0 +1,116 @@ +package dkgencryptkey + +import ( + "encoding/hex" + "fmt" + "math/big" + + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/pairing" + + "github.com/goplugin/plugin-vrf/altbn_128" +) + +var suite pairing.Suite = &altbn_128.PairingSuite{} +var g1 = suite.G1() + +type Raw []byte + +func (r Raw) Key() Key { + scalar := g1.Scalar() + err := scalar.UnmarshalBinary(r) + if err != nil { + panic(err) // should never happen6 + } + key, err := keyFromScalar(scalar) + if err != nil { + panic(err) // should never happen + } + return key +} + +func (r Raw) String() string { + return "" +} + +func (r Raw) GoString() string { + return r.String() +} + +type Key struct { + privateKey kyber.Scalar + publicKeyBytes []byte + PublicKey kyber.Point +} + +// New returns a new dkgencryptkey key +func New() (Key, error) { + return keyFromScalar(g1.Scalar().Pick(suite.RandomStream())) +} + +// MustNewXXXTestingOnly creates a new DKGEncrypt key from the given secret key. +// NOTE: for testing only. +func MustNewXXXTestingOnly(sk *big.Int) Key { + key, err := keyFromScalar(g1.Scalar().SetInt64(sk.Int64())) + if err != nil { + panic(err) + } + return key +} + +var _ fmt.GoStringer = &Key{} + +// GoString implements fmt.GoStringer +func (k Key) GoString() string { + return k.String() +} + +// String returns the string representation of this key +func (k Key) String() string { + return fmt.Sprintf("DKGEncryptKey{PrivateKey: , PublicKey: %s", k.PublicKeyString()) +} + +// ID returns the ID of this key +func (k Key) ID() string { + return k.PublicKeyString() +} + +// PublicKeyString returns the hex representation of this key's public key +func (k Key) PublicKeyString() string { + return hex.EncodeToString(k.publicKeyBytes) +} + +// Raw returns the key raw data +func (k Key) Raw() Raw { + raw, err := k.privateKey.MarshalBinary() + if err != nil { + panic(err) // should never happen + } + return Raw(raw) +} + +// KyberScalar returns the private key as a kyber.Scalar object +func (k Key) KyberScalar() kyber.Scalar { + return g1.Scalar().Set(k.privateKey) +} + +// KyberPoint returns the public key as a kyber.Point object +func (k Key) KyberPoint() kyber.Point { + return g1.Point().Base().Mul(k.privateKey, nil) +} + +// keyFromScalar creates a new dkgencryptkey key from the given scalar. +// the given scalar must be a scalar of the g1 group in the altbn_128 pairing. +func keyFromScalar(k kyber.Scalar) (Key, error) { + publicKey := g1.Point().Base().Mul(k, nil) + publicKeyBytes, err := publicKey.MarshalBinary() + if err != nil { + return Key{}, errors.Wrap(err, "kyber point MarshalBinary") + } + return Key{ + privateKey: k, + PublicKey: publicKey, + publicKeyBytes: publicKeyBytes, + }, nil +} diff --git a/core/services/keystore/keys/dkgencryptkey/key_test.go b/core/services/keystore/keys/dkgencryptkey/key_test.go new file mode 100644 index 00000000..fc7f9912 --- /dev/null +++ b/core/services/keystore/keys/dkgencryptkey/key_test.go @@ -0,0 +1,58 @@ +package dkgencryptkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestNew(t *testing.T) { + key, err := New() + assert.NoError(t, err) + assert.NotNil(t, key.privateKey) + assert.NotNil(t, key.PublicKey) + assert.NotNil(t, key.publicKeyBytes) +} + +func TestStringers(t *testing.T) { + key := MustNewXXXTestingOnly(big.NewInt(1337)) + assert.Equal(t, "26578c46722826d18dc5f5a954c65c5c78e0d215a465356502ff8f002aff36ef", key.PublicKeyString()) + assert.Equal(t, + "DKGEncryptKey{PrivateKey: , PublicKey: 26578c46722826d18dc5f5a954c65c5c78e0d215a465356502ff8f002aff36ef", + key.String()) + assert.Equal(t, + "DKGEncryptKey{PrivateKey: , PublicKey: 26578c46722826d18dc5f5a954c65c5c78e0d215a465356502ff8f002aff36ef", + key.GoString()) + assert.Equal(t, + "26578c46722826d18dc5f5a954c65c5c78e0d215a465356502ff8f002aff36ef", + key.ID()) +} + +func TestRaw(t *testing.T) { + key := MustNewXXXTestingOnly(big.NewInt(1337)) + rawFromKey := key.Raw() + scalar := g1.Scalar().SetBytes(rawFromKey) + assert.True(t, scalar.Equal(key.privateKey)) + + keyFromRaw := rawFromKey.Key() + assert.True(t, keyFromRaw.privateKey.Equal(key.privateKey)) + + assert.Equal(t, "", rawFromKey.GoString()) + assert.Equal(t, "", rawFromKey.String()) +} + +func TestExportImport(t *testing.T) { + password := "helloworld" + key := MustNewXXXTestingOnly(big.NewInt(1337)) + encryptedJSON, err := key.ToEncryptedJSON(password, utils.DefaultScryptParams) + assert.NoError(t, err) + + decryptedKey, err := FromEncryptedJSON(encryptedJSON, password) + assert.NoError(t, err) + assert.True(t, decryptedKey.privateKey.Equal(key.privateKey)) + assert.True(t, decryptedKey.PublicKey.Equal(key.PublicKey)) + assert.ElementsMatch(t, decryptedKey.publicKeyBytes, key.publicKeyBytes) +} diff --git a/core/services/keystore/keys/dkgsignkey/export.go b/core/services/keystore/keys/dkgsignkey/export.go new file mode 100644 index 00000000..ee30b7f8 --- /dev/null +++ b/core/services/keystore/keys/dkgsignkey/export.go @@ -0,0 +1,46 @@ +package dkgsignkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "DKGSign" + +// FromEncryptedJSON returns a dkgsignkey.Key from encrypted data in go-ethereum keystore format. +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +// ToEncryptedJSON exports this key into a JSON object following the format of EncryptedDKGSignKeyExport +func (key Key) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: key.PublicKeyString(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "dkgsignkey" + password +} diff --git a/core/services/keystore/keys/dkgsignkey/export_test.go b/core/services/keystore/keys/dkgsignkey/export_test.go new file mode 100644 index 00000000..c7081f95 --- /dev/null +++ b/core/services/keystore/keys/dkgsignkey/export_test.go @@ -0,0 +1,19 @@ +package dkgsignkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestDKGSignKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return New() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/dkgsignkey/key.go b/core/services/keystore/keys/dkgsignkey/key.go new file mode 100644 index 00000000..6ab18810 --- /dev/null +++ b/core/services/keystore/keys/dkgsignkey/key.go @@ -0,0 +1,92 @@ +package dkgsignkey + +import ( + "encoding/hex" + "fmt" + "math/big" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/edwards25519" +) + +var suite = edwards25519.NewBlakeSHA256Ed25519() + +// Raw represents a raw dkgsign secret key in little-endian byte order. +type Raw []byte + +// Key returns a Key object from this raw data. +func (r Raw) Key() Key { + privKey := suite.Scalar().SetBytes(r) + key, err := keyFromScalar(privKey) + if err != nil { + panic(err) // should never happen + } + return key +} + +func (r Raw) String() string { + return "" +} + +func (r Raw) GoString() string { + return r.String() +} + +// Key is DKG signing key that conforms to the keystore.Key interface +type Key struct { + privateKey kyber.Scalar + publicKeyBytes []byte + PublicKey kyber.Point +} + +// New creates a new DKGSign key +func New() (Key, error) { + privateKey := suite.Scalar().Pick(suite.RandomStream()) + return keyFromScalar(privateKey) +} + +// MustNewXXXTestingOnly creates a new DKGSign key from the given secret key. +// NOTE: for testing only. +func MustNewXXXTestingOnly(sk *big.Int) Key { + key, err := keyFromScalar(scalarFromBig(sk)) + if err != nil { + panic(err) + } + return key +} + +var _ fmt.GoStringer = &Key{} + +// GoString implements fmt.GoStringer +func (k Key) GoString() string { + return k.String() +} + +// String returns the string representation of this key +func (k Key) String() string { + return fmt.Sprintf("DKGSignKey{PrivateKey: , PublicKey: %s", k.PublicKey) +} + +// ID returns the ID of this key +func (k Key) ID() string { + return k.PublicKeyString() +} + +// PublicKeyString returns the hex representation of this key's public key +func (k Key) PublicKeyString() string { + return hex.EncodeToString(k.publicKeyBytes) +} + +// Raw returns the key raw data +func (k Key) Raw() Raw { + raw, err := k.privateKey.MarshalBinary() + if err != nil { + panic(err) // should never happen + } + return Raw(raw) +} + +// KyberScalar returns the private key as a kyber.Scalar object +func (k Key) KyberScalar() kyber.Scalar { + return suite.Scalar().Set(k.privateKey) +} diff --git a/core/services/keystore/keys/dkgsignkey/key_test.go b/core/services/keystore/keys/dkgsignkey/key_test.go new file mode 100644 index 00000000..1fbf53b9 --- /dev/null +++ b/core/services/keystore/keys/dkgsignkey/key_test.go @@ -0,0 +1,58 @@ +package dkgsignkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestNew(t *testing.T) { + key, err := New() + assert.NoError(t, err) + assert.NotNil(t, key.privateKey) + assert.NotNil(t, key.PublicKey) + assert.NotNil(t, key.publicKeyBytes) +} + +func TestStringers(t *testing.T) { + key := MustNewXXXTestingOnly(big.NewInt(1337)) + assert.Equal(t, "becd7a86af89b2f3ffd11fabe897de820b74cd2956c6e047a14e35d090ade17d", key.PublicKeyString()) + assert.Equal(t, + "DKGSignKey{PrivateKey: , PublicKey: becd7a86af89b2f3ffd11fabe897de820b74cd2956c6e047a14e35d090ade17d", + key.String()) + assert.Equal(t, + "DKGSignKey{PrivateKey: , PublicKey: becd7a86af89b2f3ffd11fabe897de820b74cd2956c6e047a14e35d090ade17d", + key.GoString()) + assert.Equal(t, + "becd7a86af89b2f3ffd11fabe897de820b74cd2956c6e047a14e35d090ade17d", + key.ID()) +} + +func TestRaw(t *testing.T) { + key := MustNewXXXTestingOnly(big.NewInt(1337)) + rawFromKey := key.Raw() + scalar := suite.Scalar().SetBytes(rawFromKey) + assert.True(t, scalar.Equal(key.privateKey)) + + keyFromRaw := rawFromKey.Key() + assert.True(t, keyFromRaw.privateKey.Equal(key.privateKey)) + + assert.Equal(t, "", rawFromKey.GoString()) + assert.Equal(t, "", rawFromKey.String()) +} + +func TestExportImport(t *testing.T) { + password := "helloworld" + key := MustNewXXXTestingOnly(big.NewInt(1337)) + encryptedJSON, err := key.ToEncryptedJSON(password, utils.DefaultScryptParams) + assert.NoError(t, err) + + decryptedKey, err := FromEncryptedJSON(encryptedJSON, password) + assert.NoError(t, err) + assert.True(t, decryptedKey.privateKey.Equal(key.privateKey)) + assert.True(t, decryptedKey.PublicKey.Equal(key.PublicKey)) + assert.ElementsMatch(t, decryptedKey.publicKeyBytes, key.publicKeyBytes) +} diff --git a/core/services/keystore/keys/dkgsignkey/utils.go b/core/services/keystore/keys/dkgsignkey/utils.go new file mode 100644 index 00000000..8b4add1e --- /dev/null +++ b/core/services/keystore/keys/dkgsignkey/utils.go @@ -0,0 +1,39 @@ +package dkgsignkey + +import ( + "math/big" + + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" +) + +// scalarFromBig creates a kyber.Scalar belonging to the edwards25519 +// kyber suite from a big integer. This is useful for testing. +func scalarFromBig(i *big.Int) kyber.Scalar { + scalar := suite.Scalar() + // big.Int.Bytes() returns a byte slice in big-endian order, + // need to reverse the slice before we SetBytes since + // SetBytes interprets it in little-endian order. + b := i.Bytes() + reverseSliceInPlace(b) + return scalar.SetBytes(b) +} + +func keyFromScalar(k kyber.Scalar) (Key, error) { + publicKey := suite.Point().Base().Mul(k, nil) + publicKeyBytes, err := publicKey.MarshalBinary() + if err != nil { + return Key{}, errors.Wrap(err, "kyber point MarshalBinary") + } + return Key{ + privateKey: k, + PublicKey: publicKey, + publicKeyBytes: publicKeyBytes, + }, nil +} + +func reverseSliceInPlace[T any](elems []T) { + for i := 0; i < len(elems)/2; i++ { + elems[i], elems[len(elems)-i-1] = elems[len(elems)-i-1], elems[i] + } +} diff --git a/core/services/keystore/keys/ethkey/address.go b/core/services/keystore/keys/ethkey/address.go new file mode 100644 index 00000000..652a9578 --- /dev/null +++ b/core/services/keystore/keys/ethkey/address.go @@ -0,0 +1,145 @@ +package ethkey + +import ( + "database/sql/driver" + "encoding/hex" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/utils/bytes" +) + +// EIP55Address is a new type for string which persists an ethereum address in +// its original string representation which includes a leading 0x, and EIP55 +// checksum which is represented by the case of digits A-F. +type EIP55Address string + +// NewEIP55Address creates an EIP55Address from a string, an error is returned if: +// +// 1) There is no leading 0x +// 2) The length is wrong +// 3) There are any non hexadecimal characters +// 4) The checksum fails +func NewEIP55Address(s string) (EIP55Address, error) { + address := common.HexToAddress(s) + if s != address.Hex() { + return EIP55Address(""), fmt.Errorf(`"%s" is not a valid EIP55 formatted address`, s) + } + return EIP55Address(s), nil +} + +func MustEIP55Address(s string) EIP55Address { + addr, err := NewEIP55Address(s) + if err != nil { + panic(err) + } + return addr +} + +// EIP55AddressFromAddress forces an address into EIP55Address format +// It is safe to panic on error since address.Hex() should ALWAYS generate EIP55Address-compatible hex strings +func EIP55AddressFromAddress(a common.Address) EIP55Address { + addr, err := NewEIP55Address(a.Hex()) + if err != nil { + panic(err) + } + return addr +} + +// Bytes returns the raw bytes +func (a EIP55Address) Bytes() []byte { return a.Address().Bytes() } + +// Big returns a big.Int representation +func (a EIP55Address) Big() *big.Int { return a.Address().Big() } + +// Hash returns the Hash +func (a EIP55Address) Hash() common.Hash { return common.BytesToHash(a.Bytes()) } + +// Address returns EIP55Address as a go-ethereum Address type +func (a EIP55Address) Address() common.Address { return common.HexToAddress(a.String()) } + +// String implements the stringer interface and is used also by the logger. +func (a EIP55Address) String() string { + return string(a) +} + +// Hex is identical to String but makes the API similar to common.Address +func (a EIP55Address) Hex() string { + return a.String() +} + +// Format implements fmt.Formatter +func (a EIP55Address) Format(s fmt.State, c rune) { + _, _ = fmt.Fprint(s, a.String()) +} + +// UnmarshalText parses a hash from plain text +func (a *EIP55Address) UnmarshalText(input []byte) error { + var err error + *a, err = NewEIP55Address(string(input)) + return err +} + +// UnmarshalJSON parses a hash from a JSON string +func (a *EIP55Address) UnmarshalJSON(input []byte) error { + input = bytes.TrimQuotes(input) + return a.UnmarshalText(input) +} + +// Value returns this instance serialized for database storage. +func (a EIP55Address) Value() (driver.Value, error) { + return a.Bytes(), nil + +} + +// Scan reads the database value and returns an instance. +func (a *EIP55Address) Scan(value interface{}) error { + switch v := value.(type) { + case string: + *a = EIP55Address(v) + case []byte: + address := common.HexToAddress("0x" + hex.EncodeToString(v)) + *a = EIP55Address(address.Hex()) + default: + return fmt.Errorf("unable to convert %v of %T to EIP55Address", value, value) + } + return nil +} + +// IsZeroAddress determines whether the address is 0x0000... or not +func (a EIP55Address) IsZero() bool { + return a.Address() == common.Address{} +} + +// EIP55AddressCollection is an array of EIP55Addresses. +type EIP55AddressCollection []EIP55Address + +// Value returns this instance serialized for database storage. +func (c EIP55AddressCollection) Value() (driver.Value, error) { + // Unable to convert copy-free without unsafe: + // https://stackoverflow.com/a/48554123/639773 + converted := make([]string, len(c)) + for i, e := range c { + converted[i] = string(e) + } + return strings.Join(converted, ","), nil +} + +// Scan reads the database value and returns an instance. +func (c *EIP55AddressCollection) Scan(value interface{}) error { + temp, ok := value.(string) + if !ok { + return fmt.Errorf("unable to convert %v of %T to EIP55AddressCollection", value, value) + } + + arr := strings.Split(temp, ",") + collection := make(EIP55AddressCollection, len(arr)) + for i, r := range arr { + collection[i] = EIP55Address(r) + } + *c = collection + return nil +} diff --git a/core/services/keystore/keys/ethkey/address_test.go b/core/services/keystore/keys/ethkey/address_test.go new file mode 100644 index 00000000..065f28aa --- /dev/null +++ b/core/services/keystore/keys/ethkey/address_test.go @@ -0,0 +1,124 @@ +package ethkey_test + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +func TestEIP55Address(t *testing.T) { + t.Parallel() + + address := ethkey.EIP55Address("0xa0788FC17B1dEe36f057c42B6F373A34B014687e") + + assert.Equal(t, []byte{ + 0xa0, 0x78, 0x8f, 0xc1, 0x7b, 0x1d, 0xee, 0x36, + 0xf0, 0x57, 0xc4, 0x2b, 0x6f, 0x37, 0x3a, 0x34, + 0xb0, 0x14, 0x68, 0x7e, + }, address.Bytes()) + + bi, _ := (new(big.Int)).SetString("a0788FC17B1dEe36f057c42B6F373A34B014687e", 16) + assert.Equal(t, bi, address.Big()) + + assert.Equal(t, "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", address.String()) + + assert.Equal(t, common.BytesToHash([]byte{ + 0xa0, 0x78, 0x8f, 0xc1, 0x7b, 0x1d, 0xee, 0x36, + 0xf0, 0x57, 0xc4, 0x2b, 0x6f, 0x37, 0x3a, 0x34, + 0xb0, 0x14, 0x68, 0x7e, + }), address.Hash()) + + assert.Equal(t, "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", address.String()) + + zeroAddress := ethkey.EIP55Address("") + err := json.Unmarshal([]byte(`"0xa0788FC17B1dEe36f057c42B6F373A34B014687e"`), &zeroAddress) + assert.NoError(t, err) + assert.Equal(t, "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", zeroAddress.String()) + + zeroAddress = ethkey.EIP55Address("") + err = zeroAddress.UnmarshalText([]byte("0xa0788FC17B1dEe36f057c42B6F373A34B014687e")) + assert.NoError(t, err) + assert.Equal(t, "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", zeroAddress.String()) +} + +func TestValidateEIP55Address(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + valid bool + }{ + {"valid address", "0xa0788FC17B1dEe36f057c42B6F373A34B014687e", true}, + {"lowercase address", "0xa0788fc17b1dee36f057c42b6f373a34b014687e", false}, + {"invalid checksum", "0xA0788FC17B1dEe36f057c42B6F373A34B014687e", false}, + {"no leading 0x", "A0788FC17B1dEe36f057c42B6F373A34B014687e", false}, + {"non hex character", "0xz0788FC17B1dEe36f057c42B6F373A34B014687e", false}, + {"wrong length", "0xa0788FC17B1dEe36f057c42B6F373A34B014687", false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := ethkey.NewEIP55Address(test.input) + valid := err == nil + assert.Equal(t, test.valid, valid) + }) + } +} + +func TestEIP55AddressFromAddress(t *testing.T) { + t.Parallel() + + addr := common.HexToAddress("0xa0788FC17B1dEe36f057c42B6F373A34B014687e") + eip55 := ethkey.EIP55AddressFromAddress(addr) + assert.Equal(t, addr, eip55.Address()) +} + +func TestEIP55Address_Scan_Value(t *testing.T) { + t.Parallel() + + eip55, err := ethkey.NewEIP55Address("0xa0788FC17B1dEe36f057c42B6F373A34B014687e") + assert.NoError(t, err) + + val, err := eip55.Value() + assert.NoError(t, err) + + var eip55New ethkey.EIP55Address + err = eip55New.Scan(val) + assert.NoError(t, err) + + assert.Equal(t, eip55, eip55New) +} + +func TestEIP55AddressCollection_Scan_Value(t *testing.T) { + t.Parallel() + + collection := ethkey.EIP55AddressCollection{ + ethkey.EIP55Address("0xa0788FC17B1dEe36f057c42B6F373A34B0146111"), + ethkey.EIP55Address("0xa0788FC17B1dEe36f057c42B6F373A34B0146222"), + } + + val, err := collection.Value() + assert.NoError(t, err) + + var collectionNew ethkey.EIP55AddressCollection + err = collectionNew.Scan(val) + assert.NoError(t, err) + + assert.Equal(t, collection, collectionNew) +} + +func TestEIP55Address_IsZero(t *testing.T) { + t.Parallel() + + eip55 := ethkey.EIP55AddressFromAddress(common.HexToAddress("0x0")) + assert.True(t, eip55.IsZero()) + + eip55 = ethkey.EIP55AddressFromAddress(common.HexToAddress("0x1")) + assert.False(t, eip55.IsZero()) +} diff --git a/core/services/keystore/keys/ethkey/export.go b/core/services/keystore/keys/ethkey/export.go new file mode 100644 index 00000000..52fe54af --- /dev/null +++ b/core/services/keystore/keys/ethkey/export.go @@ -0,0 +1,29 @@ +package ethkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type EncryptedEthKeyExport struct { + KeyType string `json:"keyType"` + Address EIP55Address `json:"address"` + Crypto keystore.CryptoJSON `json:"crypto"` +} + +func (key KeyV2) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + // DEV: uuid is derived directly from the address, since it is not stored internally + id, err := uuid.FromBytes(key.Address.Bytes()[:16]) + if err != nil { + return nil, errors.Wrapf(err, "could not generate ethkey UUID") + } + dKey := &keystore.Key{ + Id: id, + Address: key.Address, + PrivateKey: key.privateKey, + } + return keystore.EncryptKey(dKey, password, scryptParams.N, scryptParams.P) +} diff --git a/core/services/keystore/keys/ethkey/export_test.go b/core/services/keystore/keys/ethkey/export_test.go new file mode 100644 index 00000000..13450d6b --- /dev/null +++ b/core/services/keystore/keys/ethkey/export_test.go @@ -0,0 +1,18 @@ +package ethkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestEthKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, func(keyJSON []byte, password string) (kt keys.KeyType, err error) { + t.SkipNow() + return kt, err + }) +} + +func createKey() (keys.KeyType, error) { + return NewV2() +} diff --git a/core/services/keystore/keys/ethkey/key.go b/core/services/keystore/keys/ethkey/key.go new file mode 100644 index 00000000..8ff16cda --- /dev/null +++ b/core/services/keystore/keys/ethkey/key.go @@ -0,0 +1,33 @@ +package ethkey + +import ( + "time" + + "github.com/goplugin/plugin-common/pkg/sqlutil" +) + +// NOTE: This model refers to the OLD key and is only used for migrations +// +// Key holds the private key metadata for a given address that is used to unlock +// said key when given a password. +// +// By default, a key is assumed to represent an ethereum account. +type Key struct { + ID int32 + Address EIP55Address + JSON sqlutil.JSON `json:"-"` + CreatedAt time.Time `json:"-"` + UpdatedAt time.Time `json:"-"` + DeletedAt *time.Time `json:"-"` + // IsFunding marks the address as being used for rescuing the node and the pending transactions + // Only one key can be IsFunding=true at a time. + IsFunding bool +} + +// Type returns type of key +func (k Key) Type() string { + if k.IsFunding { + return "funding" + } + return "sending" +} diff --git a/core/services/keystore/keys/ethkey/key_test.go b/core/services/keystore/keys/ethkey/key_test.go new file mode 100644 index 00000000..9255c0f8 --- /dev/null +++ b/core/services/keystore/keys/ethkey/key_test.go @@ -0,0 +1,19 @@ +package ethkey + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEthKey_Type(t *testing.T) { + k := Key{ + IsFunding: true, + } + k2 := Key{ + IsFunding: false, + } + + assert.Equal(t, k.Type(), "funding") + assert.Equal(t, k2.Type(), "sending") +} diff --git a/core/services/keystore/keys/ethkey/key_v2.go b/core/services/keystore/keys/ethkey/key_v2.go new file mode 100644 index 00000000..15dc15d3 --- /dev/null +++ b/core/services/keystore/keys/ethkey/key_v2.go @@ -0,0 +1,90 @@ +package ethkey + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var curve = crypto.S256() + +type Raw []byte + +func (raw Raw) Key() KeyV2 { + var privateKey ecdsa.PrivateKey + d := big.NewInt(0).SetBytes(raw) + privateKey.PublicKey.Curve = curve + privateKey.D = d + privateKey.PublicKey.X, privateKey.PublicKey.Y = curve.ScalarBaseMult(d.Bytes()) + address := crypto.PubkeyToAddress(privateKey.PublicKey) + eip55 := EIP55AddressFromAddress(address) + return KeyV2{ + Address: address, + EIP55Address: eip55, + privateKey: &privateKey, + } +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &KeyV2{} + +type KeyV2 struct { + Address common.Address + EIP55Address EIP55Address + privateKey *ecdsa.PrivateKey +} + +func NewV2() (KeyV2, error) { + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + if err != nil { + return KeyV2{}, err + } + return FromPrivateKey(privateKeyECDSA), nil +} + +func FromPrivateKey(privKey *ecdsa.PrivateKey) (key KeyV2) { + address := crypto.PubkeyToAddress(privKey.PublicKey) + eip55 := EIP55AddressFromAddress(address) + return KeyV2{ + Address: address, + EIP55Address: eip55, + privateKey: privKey, + } +} + +func (key KeyV2) ID() string { + return key.Address.Hex() +} + +func (key KeyV2) Raw() Raw { + return key.privateKey.D.Bytes() +} + +func (key KeyV2) ToEcdsaPrivKey() *ecdsa.PrivateKey { + return key.privateKey +} + +func (key KeyV2) String() string { + return fmt.Sprintf("EthKeyV2{PrivateKey: , Address: %s}", key.Address) +} + +func (key KeyV2) GoString() string { + return key.String() +} + +// Cmp uses byte-order address comparison to give a stable comparison between two keys +func (key KeyV2) Cmp(key2 KeyV2) int { + return bytes.Compare(key.Address.Bytes(), key2.Address.Bytes()) +} diff --git a/core/services/keystore/keys/ethkey/key_v2_test.go b/core/services/keystore/keys/ethkey/key_v2_test.go new file mode 100644 index 00000000..82b1084e --- /dev/null +++ b/core/services/keystore/keys/ethkey/key_v2_test.go @@ -0,0 +1,43 @@ +package ethkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEthKeyV2_ToKey(t *testing.T) { + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + k := Raw(privateKeyECDSA.D.Bytes()).Key() + + assert.Equal(t, k.String(), k.GoString()) + assert.Equal(t, k.privateKey, privateKeyECDSA) + assert.Equal(t, k.privateKey.PublicKey.X, privateKeyECDSA.PublicKey.X) + assert.Equal(t, k.privateKey.PublicKey.Y, privateKeyECDSA.PublicKey.Y) + assert.Equal(t, EIP55AddressFromAddress(crypto.PubkeyToAddress(privateKeyECDSA.PublicKey)).Hex(), k.ID()) +} + +func TestEthKeyV2_RawPrivateKey(t *testing.T) { + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + k := Raw(privateKeyECDSA.D.Bytes()) + + assert.Equal(t, "", k.String()) + assert.Equal(t, k.String(), k.GoString()) +} + +func TestEthKeyV2_NewV2(t *testing.T) { + keyV2, err := NewV2() + require.NoError(t, err) + + assert.NotZero(t, keyV2.Address) + assert.NotNil(t, keyV2.privateKey) + assert.Equal(t, keyV2.Address.Hex(), keyV2.ID()) +} diff --git a/core/services/keystore/keys/ethkey/models.go b/core/services/keystore/keys/ethkey/models.go new file mode 100644 index 00000000..64e3d8f3 --- /dev/null +++ b/core/services/keystore/keys/ethkey/models.go @@ -0,0 +1,31 @@ +package ethkey + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +type State struct { + ID int32 + Address EIP55Address + EVMChainID big.Big + Disabled bool + CreatedAt time.Time + UpdatedAt time.Time + lastUsed time.Time +} + +func (s State) KeyID() string { + return s.Address.Hex() +} + +// lastUsed is an internal field and ought not be persisted to the database or +// exposed outside of the application +func (s State) LastUsed() time.Time { + return s.lastUsed +} + +func (s *State) WasUsed() { + s.lastUsed = time.Now() +} diff --git a/core/services/keystore/keys/exporttestutils.go b/core/services/keystore/keys/exporttestutils.go new file mode 100644 index 00000000..c38cf585 --- /dev/null +++ b/core/services/keystore/keys/exporttestutils.go @@ -0,0 +1,38 @@ +package keys + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// KeyType represents a key type for keys testing +type KeyType interface { + ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) + String() string +} + +// CreateKeyFunc represents a function to create a key +type CreateKeyFunc func() (KeyType, error) + +// DecryptFunc represents a function to decrypt a key +type DecryptFunc func(keyJSON []byte, password string) (KeyType, error) + +// RunKeyExportImportTestcase executes a testcase to validate keys import/export functionality +func RunKeyExportImportTestcase(t *testing.T, createKey CreateKeyFunc, decrypt DecryptFunc) { + key, err := createKey() + require.NoError(t, err) + + json, err := key.ToEncryptedJSON("password", utils.FastScryptParams) + require.NoError(t, err) + + assert.NotEmpty(t, json) + + imported, err := decrypt(json, "password") + require.NoError(t, err) + + assert.Equal(t, key.String(), imported.String()) +} diff --git a/core/services/keystore/keys/exportutils.go b/core/services/keystore/keys/exportutils.go new file mode 100644 index 00000000..5b74d21b --- /dev/null +++ b/core/services/keystore/keys/exportutils.go @@ -0,0 +1,83 @@ +package keys + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Encrypted interface { + GetCrypto() keystore.CryptoJSON +} + +// EncryptedKeyExport represents a chain specific encrypted key +type EncryptedKeyExport struct { + KeyType string `json:"keyType"` + PublicKey string `json:"publicKey"` + Crypto keystore.CryptoJSON `json:"crypto"` +} + +func (x EncryptedKeyExport) GetCrypto() keystore.CryptoJSON { + return x.Crypto +} + +// FromEncryptedJSON gets key [K] from keyJSON [E] and password +func FromEncryptedJSON[E Encrypted, K any]( + identifier string, + keyJSON []byte, + password string, + passwordFunc func(string) string, + privKeyToKey func(export E, rawPrivKey []byte) (K, error), +) (K, error) { + + // unmarshal byte data to [E] Encrypted key export + var export E + if err := json.Unmarshal(keyJSON, &export); err != nil { + return *new(K), err + } + + // decrypt data using prefixed password + privKey, err := keystore.DecryptDataV3(export.GetCrypto(), passwordFunc(password)) + if err != nil { + return *new(K), errors.Wrapf(err, "failed to decrypt %s key", identifier) + } + + // convert unmarshalled data and decrypted key to [K] key format + key, err := privKeyToKey(export, privKey) + if err != nil { + return *new(K), errors.Wrapf(err, "failed to convert %s key to key bundle", identifier) + } + + return key, nil +} + +// ToEncryptedJSON returns encrypted JSON [E] representing key [K] +func ToEncryptedJSON[E Encrypted, K any]( + identifier string, + raw []byte, + key K, + password string, + scryptParams utils.ScryptParams, + passwordFunc func(string) string, + buildExport func(id string, key K, cryptoJSON keystore.CryptoJSON) E, +) (export []byte, err error) { + + // encrypt data using prefixed password + cryptoJSON, err := keystore.EncryptDataV3( + raw, + []byte(passwordFunc(password)), + scryptParams.N, + scryptParams.P, + ) + if err != nil { + return nil, errors.Wrapf(err, "could not encrypt %s key", identifier) + } + + // build [E] export struct using encrypted key, identifier, and original key [K] + encryptedKeyExport := buildExport(identifier, key, cryptoJSON) + + return json.Marshal(encryptedKeyExport) +} diff --git a/core/services/keystore/keys/keystest/keystest.go b/core/services/keystore/keys/keystest/keystest.go new file mode 100644 index 00000000..972caadb --- /dev/null +++ b/core/services/keystore/keys/keystest/keystest.go @@ -0,0 +1,15 @@ +package keystest + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +func NewP2PKeyV2(t *testing.T) p2pkey.KeyV2 { + k, err := p2pkey.NewV2() + require.NoError(t, err) + return k +} diff --git a/core/services/keystore/keys/ocr2key/cosmos_keyring.go b/core/services/keystore/keys/ocr2key/cosmos_keyring.go new file mode 100644 index 00000000..3fed40d8 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/cosmos_keyring.go @@ -0,0 +1,113 @@ +package ocr2key + +import ( + "crypto/ed25519" + "encoding/binary" + "io" + + "github.com/hdevalence/ed25519consensus" + "github.com/pkg/errors" + "github.com/goplugin/libocr/offchainreporting2/types" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "golang.org/x/crypto/blake2s" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ ocrtypes.OnchainKeyring = &cosmosKeyring{} + +type cosmosKeyring struct { + privKey ed25519.PrivateKey + pubKey ed25519.PublicKey +} + +func newCosmosKeyring(material io.Reader) (*cosmosKeyring, error) { + pubKey, privKey, err := ed25519.GenerateKey(material) + if err != nil { + return nil, err + } + return &cosmosKeyring{pubKey: pubKey, privKey: privKey}, nil +} + +func (ckr *cosmosKeyring) PublicKey() ocrtypes.OnchainPublicKey { + return []byte(ckr.pubKey) +} + +func (ckr *cosmosKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + rawReportContext := evmutil.RawReportContext(reportCtx) + h, err := blake2s.New256(nil) + if err != nil { + return nil, err + } + reportLen := make([]byte, 4) + binary.BigEndian.PutUint32(reportLen[0:], uint32(len(report))) + h.Write(reportLen[:]) + h.Write(report) + h.Write(rawReportContext[0][:]) + h.Write(rawReportContext[1][:]) + h.Write(rawReportContext[2][:]) + return h.Sum(nil), nil +} + +func (ckr *cosmosKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + sigData, err := ckr.reportToSigData(reportCtx, report) + if err != nil { + return nil, err + } + return ckr.signBlob(sigData) +} + +func (ckr *cosmosKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return nil, errors.New("not implemented") +} + +func (ckr *cosmosKeyring) signBlob(b []byte) ([]byte, error) { + signedMsg := ed25519.Sign(ckr.privKey, b) + // match on-chain parsing (first 32 bytes are for pubkey, remaining are for signature) + return utils.ConcatBytes(ckr.PublicKey(), signedMsg), nil +} + +func (ckr *cosmosKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash, err := ckr.reportToSigData(reportCtx, report) + if err != nil { + return false + } + return ckr.verifyBlob(publicKey, hash, signature) +} + +func (ckr *cosmosKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + return false +} + +func (ckr *cosmosKeyring) verifyBlob(pubkey ocrtypes.OnchainPublicKey, b, sig []byte) bool { + // Ed25519 signatures are always 64 bytes and the + // public key (always prefixed, see Sign above) is always, + // 32 bytes, so we always require the max signature length. + if len(sig) != ckr.MaxSignatureLength() { + return false + } + if len(pubkey) != ed25519.PublicKeySize { + return false + } + return ed25519consensus.Verify(ed25519.PublicKey(pubkey), b, sig[32:]) +} + +func (ckr *cosmosKeyring) MaxSignatureLength() int { + // Reference: https://pkg.go.dev/crypto/ed25519 + return ed25519.PublicKeySize + ed25519.SignatureSize // 32 + 64 +} + +func (ckr *cosmosKeyring) Marshal() ([]byte, error) { + return ckr.privKey.Seed(), nil +} + +func (ckr *cosmosKeyring) Unmarshal(in []byte) error { + if len(in) != ed25519.SeedSize { + return errors.Errorf("unexpected seed size, got %d want %d", len(in), ed25519.SeedSize) + } + privKey := ed25519.NewKeyFromSeed(in) + ckr.privKey = privKey + ckr.pubKey = privKey.Public().(ed25519.PublicKey) + return nil +} diff --git a/core/services/keystore/keys/ocr2key/cosmos_keyring_test.go b/core/services/keystore/keys/ocr2key/cosmos_keyring_test.go new file mode 100644 index 00000000..d5e33252 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/cosmos_keyring_test.go @@ -0,0 +1,58 @@ +package ocr2key + +import ( + "bytes" + cryptorand "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/require" +) + +func TestCosmosKeyRing_Sign_Verify(t *testing.T) { + kr1, err := newCosmosKeyring(cryptorand.Reader) + require.NoError(t, err) + kr2, err := newCosmosKeyring(cryptorand.Reader) + require.NoError(t, err) + ctx := ocrtypes.ReportContext{} + + t.Run("can verify", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + t.Log(len(sig)) + result := kr2.Verify(kr1.PublicKey(), ctx, report, sig) + require.True(t, result) + }) + + t.Run("invalid sig", func(t *testing.T) { + report := ocrtypes.Report{} + result := kr2.Verify(kr1.PublicKey(), ctx, report, []byte{0x01}) + require.False(t, result) + }) + + t.Run("invalid pubkey", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + result := kr2.Verify([]byte{0x01}, ctx, report, sig) + require.False(t, result) + }) +} + +func TestCosmosKeyRing_Marshalling(t *testing.T) { + kr1, err := newCosmosKeyring(cryptorand.Reader) + require.NoError(t, err) + m, err := kr1.Marshal() + require.NoError(t, err) + kr2 := cosmosKeyring{} + err = kr2.Unmarshal(m) + require.NoError(t, err) + assert.True(t, bytes.Equal(kr1.pubKey, kr2.pubKey)) + assert.True(t, bytes.Equal(kr1.privKey, kr2.privKey)) + + // Invalid seed size should error + require.Error(t, kr2.Unmarshal([]byte{0x01})) +} diff --git a/core/services/keystore/keys/ocr2key/evm_keyring.go b/core/services/keystore/keys/ocr2key/evm_keyring.go new file mode 100644 index 00000000..c431008c --- /dev/null +++ b/core/services/keystore/keys/ocr2key/evm_keyring.go @@ -0,0 +1,113 @@ +package ocr2key + +import ( + "bytes" + "crypto/ecdsa" + "encoding/binary" + "io" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/goplugin/libocr/offchainreporting2/types" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ ocrtypes.OnchainKeyring = &evmKeyring{} + +type evmKeyring struct { + privateKey ecdsa.PrivateKey +} + +func newEVMKeyring(material io.Reader) (*evmKeyring, error) { + ecdsaKey, err := ecdsa.GenerateKey(curve, material) + if err != nil { + return nil, err + } + return &evmKeyring{privateKey: *ecdsaKey}, nil +} + +// XXX: PublicKey returns the address of the public key not the public key itself +func (ekr *evmKeyring) PublicKey() ocrtypes.OnchainPublicKey { + address := ekr.signingAddress() + return address[:] +} + +func (ekr *evmKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + return ekr.signBlob(ekr.reportToSigData(reportCtx, report)) +} + +func (ekr *evmKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { + rawReportContext := evmutil.RawReportContext(reportCtx) + sigData := crypto.Keccak256(report) + sigData = append(sigData, rawReportContext[0][:]...) + sigData = append(sigData, rawReportContext[1][:]...) + sigData = append(sigData, rawReportContext[2][:]...) + return crypto.Keccak256(sigData) +} + +func (ekr *evmKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return ekr.signBlob(ekr.reportToSigData3(digest, seqNr, r)) +} + +func (ekr *evmKeyring) reportToSigData3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) []byte { + rawReportContext := RawReportContext3(digest, seqNr) + sigData := crypto.Keccak256(r) + sigData = append(sigData, rawReportContext[0][:]...) + sigData = append(sigData, rawReportContext[1][:]...) + return crypto.Keccak256(sigData) +} + +func RawReportContext3(digest types.ConfigDigest, seqNr uint64) [2][32]byte { + seqNrBytes := [32]byte{} + binary.BigEndian.PutUint64(seqNrBytes[:], seqNr) + return [2][32]byte{ + digest, + seqNrBytes, + } +} + +func (ekr *evmKeyring) signBlob(b []byte) (sig []byte, err error) { + return crypto.Sign(b, &ekr.privateKey) +} + +func (ekr *evmKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash := ekr.reportToSigData(reportCtx, report) + return ekr.verifyBlob(publicKey, hash, signature) +} + +func (ekr *evmKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + hash := ekr.reportToSigData3(cd, seqNr, r) + return ekr.verifyBlob(publicKey, hash, signature) +} + +func (ekr *evmKeyring) verifyBlob(pubkey types.OnchainPublicKey, b, sig []byte) bool { + authorPubkey, err := crypto.SigToPub(b, sig) + if err != nil { + return false + } + authorAddress := crypto.PubkeyToAddress(*authorPubkey) + // no need for constant time compare since neither arg is sensitive + return bytes.Equal(pubkey[:], authorAddress[:]) +} + +func (ekr *evmKeyring) MaxSignatureLength() int { + return 65 +} + +func (ekr *evmKeyring) signingAddress() common.Address { + return crypto.PubkeyToAddress(*(&ekr.privateKey).Public().(*ecdsa.PublicKey)) +} + +func (ekr *evmKeyring) Marshal() ([]byte, error) { + return crypto.FromECDSA(&ekr.privateKey), nil +} + +func (ekr *evmKeyring) Unmarshal(in []byte) error { + privateKey, err := crypto.ToECDSA(in) + if err != nil { + return err + } + ekr.privateKey = *privateKey + return nil +} diff --git a/core/services/keystore/keys/ocr2key/evm_keyring_test.go b/core/services/keystore/keys/ocr2key/evm_keyring_test.go new file mode 100644 index 00000000..f4a0f82e --- /dev/null +++ b/core/services/keystore/keys/ocr2key/evm_keyring_test.go @@ -0,0 +1,98 @@ +package ocr2key + +import ( + "bytes" + cryptorand "crypto/rand" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2/types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestEVMKeyring_SignVerify(t *testing.T) { + kr1, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + kr2, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + + ctx := ocrtypes.ReportContext{} + + t.Run("can verify", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + t.Log(len(sig)) + result := kr2.Verify(kr1.PublicKey(), ctx, report, sig) + assert.True(t, result) + }) + + t.Run("invalid sig", func(t *testing.T) { + report := ocrtypes.Report{} + result := kr2.Verify(kr1.PublicKey(), ctx, report, []byte{0x01}) + assert.False(t, result) + }) + + t.Run("invalid pubkey", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + result := kr2.Verify([]byte{0x01}, ctx, report, sig) + assert.False(t, result) + }) +} + +func TestEVMKeyring_Sign3Verify3(t *testing.T) { + kr1, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + kr2, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + + digest, err := types.BytesToConfigDigest(testutils.MustRandBytes(32)) + require.NoError(t, err) + seqNr := rand.Uint64() + r := ocrtypes.Report(testutils.MustRandBytes(rand.Intn(1024))) + + t.Run("can verify", func(t *testing.T) { + sig, err := kr1.Sign3(digest, seqNr, r) + require.NoError(t, err) + t.Log(len(sig)) + result := kr2.Verify3(kr1.PublicKey(), digest, seqNr, r, sig) + assert.True(t, result) + }) + + t.Run("invalid sig", func(t *testing.T) { + result := kr2.Verify3(kr1.PublicKey(), digest, seqNr, r, []byte{0x01}) + assert.False(t, result) + }) + + t.Run("invalid pubkey", func(t *testing.T) { + sig, err := kr1.Sign3(digest, seqNr, r) + require.NoError(t, err) + result := kr2.Verify3([]byte{0x01}, digest, seqNr, r, sig) + assert.False(t, result) + }) +} + +func TestEVMKeyring_Marshalling(t *testing.T) { + kr1, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + + m, err := kr1.Marshal() + require.NoError(t, err) + + kr2 := evmKeyring{} + err = kr2.Unmarshal(m) + require.NoError(t, err) + + assert.True(t, bytes.Equal(kr1.PublicKey(), kr2.PublicKey())) + assert.True(t, bytes.Equal(kr1.privateKey.D.Bytes(), kr2.privateKey.D.Bytes())) + + // Invalid seed size should error + assert.Error(t, kr2.Unmarshal([]byte{0x01})) +} diff --git a/core/services/keystore/keys/ocr2key/export.go b/core/services/keystore/keys/ocr2key/export.go new file mode 100644 index 00000000..8a06f221 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/export.go @@ -0,0 +1,83 @@ +package ocr2key + +import ( + "encoding/hex" + + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "OCR2" + +// EncryptedOCRKeyExport represents encrypted OCR key export +type EncryptedOCRKeyExport struct { + KeyType string `json:"keyType"` + ChainType chaintype.ChainType `json:"chainType"` + ID string `json:"id"` + OnchainPublicKey string `json:"onchainPublicKey"` + OffChainPublicKey string `json:"offchainPublicKey"` + ConfigPublicKey string `json:"configPublicKey"` + Crypto keystore.CryptoJSON `json:"crypto"` +} + +func (x EncryptedOCRKeyExport) GetCrypto() keystore.CryptoJSON { + return x.Crypto +} + +// FromEncryptedJSON returns key from encrypted json +func FromEncryptedJSON(keyJSON []byte, password string) (KeyBundle, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(export EncryptedOCRKeyExport, rawPrivKey []byte) (KeyBundle, error) { + var kb KeyBundle + switch export.ChainType { + case chaintype.EVM: + kb = newKeyBundle(new(evmKeyring)) + case chaintype.Cosmos: + kb = newKeyBundle(new(cosmosKeyring)) + case chaintype.Solana: + kb = newKeyBundle(new(solanaKeyring)) + case chaintype.StarkNet: + kb = newKeyBundle(new(starkkey.OCR2Key)) + default: + return nil, chaintype.NewErrInvalidChainType(export.ChainType) + } + if err := kb.Unmarshal(rawPrivKey); err != nil { + return nil, err + } + return kb, nil + }, + ) +} + +// ToEncryptedJSON returns encrypted JSON representing key +func ToEncryptedJSON(key KeyBundle, password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key KeyBundle, cryptoJSON keystore.CryptoJSON) EncryptedOCRKeyExport { + pubKeyConfig := key.ConfigEncryptionPublicKey() + pubKey := key.OffchainPublicKey() + return EncryptedOCRKeyExport{ + KeyType: id, + ChainType: key.ChainType(), + ID: key.ID(), + OnchainPublicKey: key.OnChainPublicKey(), + OffChainPublicKey: hex.EncodeToString(pubKey[:]), + ConfigPublicKey: hex.EncodeToString(pubKeyConfig[:]), + Crypto: cryptoJSON, + } + }, + ) +} diff --git a/core/services/keystore/keys/ocr2key/export_test.go b/core/services/keystore/keys/ocr2key/export_test.go new file mode 100644 index 00000000..256394d6 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/export_test.go @@ -0,0 +1,40 @@ +package ocr2key + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestExport(t *testing.T) { + var tt = []struct { + chain chaintype.ChainType + }{ + {chain: chaintype.EVM}, + {chain: chaintype.Cosmos}, + {chain: chaintype.Solana}, + {chain: chaintype.StarkNet}, + } + for _, tc := range tt { + tc := tc + t.Run(string(tc.chain), func(t *testing.T) { + kb, err := New(tc.chain) + require.NoError(t, err) + ej, err := ToEncryptedJSON(kb, "blah", utils.FastScryptParams) + require.NoError(t, err) + kbAfter, err := FromEncryptedJSON(ej, "blah") + require.NoError(t, err) + assert.Equal(t, kbAfter.ID(), kb.ID()) + assert.Equal(t, kbAfter.PublicKey(), kb.PublicKey()) + assert.Equal(t, kbAfter.OffchainPublicKey(), kb.OffchainPublicKey()) + assert.Equal(t, kbAfter.MaxSignatureLength(), kb.MaxSignatureLength()) + assert.Equal(t, kbAfter.Raw(), kb.Raw()) + assert.Equal(t, kbAfter.ConfigEncryptionPublicKey(), kb.ConfigEncryptionPublicKey()) + assert.Equal(t, kbAfter.ChainType(), kb.ChainType()) + }) + } +} diff --git a/core/services/keystore/keys/ocr2key/generic_key_bundle.go b/core/services/keystore/keys/ocr2key/generic_key_bundle.go new file mode 100644 index 00000000..8f7d3397 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/generic_key_bundle.go @@ -0,0 +1,182 @@ +package ocr2key + +import ( + "bytes" + cryptorand "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io" + + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type ( + keyring interface { + ocrtypes.OnchainKeyring + OCR3SignerVerifier + Marshal() ([]byte, error) + Unmarshal(in []byte) error + } + + keyBundle[K keyring] struct { + keyBundleBase + keyring K + } + + keyBundleRawData struct { + ChainType chaintype.ChainType + OffchainKeyring []byte + Keyring []byte + ID models.Sha256Hash // tracked to preserve bundle ID in case of migrations + + // old chain specific format for migrating + EVMKeyring []byte `json:",omitempty"` + SolanaKeyring []byte `json:",omitempty"` + CosmosKeyring []byte `json:",omitempty"` + } +) + +func newKeyBundle[K keyring](key K) *keyBundle[K] { + return &keyBundle[K]{keyring: key} +} + +func newKeyBundleRand[K keyring](chain chaintype.ChainType, newKeyring func(material io.Reader) (K, error)) (*keyBundle[K], error) { + return newKeyBundleFrom(chain, newKeyring, cryptorand.Reader, cryptorand.Reader, cryptorand.Reader) +} + +func mustNewKeyBundleInsecure[K keyring](chain chaintype.ChainType, newKeyring func(material io.Reader) (K, error), reader io.Reader) *keyBundle[K] { + key, err := newKeyBundleFrom(chain, newKeyring, reader, reader, reader) + if err != nil { + panic(errors.Wrapf(err, "failed to generate new OCR2-%s Key", chain)) + } + return key +} + +func newKeyBundleFrom[K keyring](chain chaintype.ChainType, newKeyring func(material io.Reader) (K, error), onchainSigningKeyMaterial, onchainEncryptionKeyMaterial, offchainKeyMaterial io.Reader) (*keyBundle[K], error) { + offchainKeyring, err := newOffchainKeyring(onchainSigningKeyMaterial, onchainEncryptionKeyMaterial) + if err != nil { + return nil, err + } + kr, err := newKeyring(onchainSigningKeyMaterial) + if err != nil { + return nil, err + } + k := keyBundle[K]{ + keyBundleBase: keyBundleBase{ + chainType: chain, + OffchainKeyring: *offchainKeyring, + }, + keyring: kr, + } + marshalledPrivK, err := k.Marshal() + if err != nil { + return nil, err + } + k.id = sha256.Sum256(marshalledPrivK) + return &k, nil +} + +func (kb *keyBundle[K]) MaxSignatureLength() int { + return kb.keyring.MaxSignatureLength() +} + +func (kb *keyBundle[K]) PublicKey() ocrtypes.OnchainPublicKey { + return kb.keyring.PublicKey() +} + +func (kb *keyBundle[K]) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + return kb.keyring.Sign(reportCtx, report) +} + +func (kb *keyBundle[K]) Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return kb.keyring.Sign3(digest, seqNr, r) +} + +func (kb *keyBundle[K]) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + return kb.keyring.Verify(publicKey, reportCtx, report, signature) +} + +func (kb *keyBundle[K]) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + return kb.keyring.Verify3(publicKey, cd, seqNr, r, signature) +} + +// OnChainPublicKey returns public component of the keypair used on chain +func (kb *keyBundle[K]) OnChainPublicKey() string { + return hex.EncodeToString(kb.keyring.PublicKey()) +} + +func (kb *keyBundle[K]) Marshal() ([]byte, error) { + offchainKeyringBytes, err := kb.OffchainKeyring.marshal() + if err != nil { + return nil, err + } + keyringBytes, err := kb.keyring.Marshal() + if err != nil { + return nil, err + } + rawKeyData := keyBundleRawData{ + ChainType: kb.chainType, + OffchainKeyring: offchainKeyringBytes, + Keyring: keyringBytes, + ID: kb.id, // preserve bundle ID + } + return json.Marshal(&rawKeyData) +} + +func (kb *keyBundle[K]) Unmarshal(b []byte) (err error) { + var rawKeyData keyBundleRawData + err = json.Unmarshal(b, &rawKeyData) + if err != nil { + return err + } + if err = rawKeyData.Migrate(b); err != nil { + return err + } + + err = kb.OffchainKeyring.unmarshal(rawKeyData.OffchainKeyring) + if err != nil { + return err + } + + err = kb.keyring.Unmarshal(rawKeyData.Keyring) + if err != nil { + return err + } + kb.chainType = rawKeyData.ChainType + kb.id = rawKeyData.ID + return nil +} + +func (kb *keyBundle[K]) Raw() Raw { + b, err := kb.Marshal() + if err != nil { + panic(err) + } + return b +} + +// migration code +func (kbraw *keyBundleRawData) Migrate(b []byte) error { + // if key is not stored in Keyring param, use EVM or Solana as Keyring + // for migrating, key will only be marshalled into Keyring + if len(kbraw.Keyring) == 0 { + if len(kbraw.EVMKeyring) != 0 { + kbraw.Keyring = kbraw.EVMKeyring + } else if len(kbraw.SolanaKeyring) != 0 { + kbraw.Keyring = kbraw.SolanaKeyring + } + } + + // if key does not have an ID associated with it (old formats), + // derive the key ID and preserve it + if bytes.Equal(kbraw.ID[:], models.EmptySha256Hash[:]) { + kbraw.ID = sha256.Sum256(b) + } + + return nil +} diff --git a/core/services/keystore/keys/ocr2key/generic_key_bundle_test.go b/core/services/keystore/keys/ocr2key/generic_key_bundle_test.go new file mode 100644 index 00000000..5a0eddde --- /dev/null +++ b/core/services/keystore/keys/ocr2key/generic_key_bundle_test.go @@ -0,0 +1,154 @@ +package ocr2key + +import ( + cryptorand "crypto/rand" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" +) + +type ( + XXXOldEVMKeyBundleRawData struct { + ChainType chaintype.ChainType + OffchainKeyring []byte + EVMKeyring []byte + } + XXXOldSolanaKeyBundleRawData struct { + ChainType chaintype.ChainType + OffchainKeyring []byte + SolanaKeyring []byte + } + XXXOldV1GenericKeyBundleRawData struct { + ChainType chaintype.ChainType + OffchainKeyring []byte + Keyring []byte + // missing ID + } +) + +func TestGenericKeyBundle_Migrate_UnmarshalMarshal(t *testing.T) { + // offchain key + offKey, err := newOffchainKeyring(cryptorand.Reader, cryptorand.Reader) + require.NoError(t, err) + offBytes, err := offKey.marshal() + require.NoError(t, err) + + t.Run("EVM", func(t *testing.T) { + // onchain key + onKey, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + onBytes, err := onKey.Marshal() + require.NoError(t, err) + + // marshal old key format + oldKey := XXXOldEVMKeyBundleRawData{ + ChainType: chaintype.EVM, + OffchainKeyring: offBytes, + EVMKeyring: onBytes, + } + bundleBytes, err := json.Marshal(oldKey) + require.NoError(t, err) + + // test Unmarshal with old raw bundle + bundle := newKeyBundle(&evmKeyring{}) + require.NoError(t, bundle.Unmarshal(bundleBytes)) + newBundleBytes, err := bundle.Marshal() // marshalling migrates to a generic struct + require.NoError(t, err) + + // new bundle == old bundle (only difference is Keyring == Keyring) + var newRawBundle keyBundleRawData + require.NoError(t, json.Unmarshal(newBundleBytes, &newRawBundle)) + assert.Equal(t, oldKey.ChainType, newRawBundle.ChainType) + assert.Equal(t, oldKey.OffchainKeyring, newRawBundle.OffchainKeyring) + assert.Equal(t, oldKey.EVMKeyring, newRawBundle.Keyring) + + // test unmarshalling again to ensure ID has not changed + // the underlying bytes have changed, but ID should be preserved + newBundle := newKeyBundle(&evmKeyring{}) + require.NoError(t, newBundle.Unmarshal(newBundleBytes)) + assert.Equal(t, bundle.ID(), newBundle.ID()) + }) + + t.Run("Solana", func(t *testing.T) { + // onchain key + onKey, err := newSolanaKeyring(cryptorand.Reader) + require.NoError(t, err) + onBytes, err := onKey.Marshal() + require.NoError(t, err) + + // marshal old key format + oldKey := XXXOldSolanaKeyBundleRawData{ + ChainType: chaintype.Solana, + OffchainKeyring: offBytes, + SolanaKeyring: onBytes, + } + bundleBytes, err := json.Marshal(oldKey) + require.NoError(t, err) + + // test Unmarshal with old raw bundle + bundle := newKeyBundle(&solanaKeyring{}) + require.NoError(t, bundle.Unmarshal(bundleBytes)) + newBundleBytes, err := bundle.Marshal() + require.NoError(t, err) + + // new bundle == old bundle (only difference is Keyring == Keyring) + var newRawBundle keyBundleRawData + require.NoError(t, json.Unmarshal(newBundleBytes, &newRawBundle)) + assert.Equal(t, oldKey.ChainType, newRawBundle.ChainType) + assert.Equal(t, oldKey.OffchainKeyring, newRawBundle.OffchainKeyring) + assert.Equal(t, oldKey.SolanaKeyring, newRawBundle.Keyring) + + // test unmarshalling again to ensure ID has not changed + // the underlying bytes have changed, but ID should be preserved + newBundle := newKeyBundle(&solanaKeyring{}) + require.NoError(t, newBundle.Unmarshal(newBundleBytes)) + assert.Equal(t, bundle.ID(), newBundle.ID()) + }) + + t.Run("Cosmos", func(t *testing.T) { + // onchain key + bundle, err := newKeyBundleRand(chaintype.Cosmos, newCosmosKeyring) + require.NoError(t, err) + bundleBytes, err := bundle.Marshal() + require.NoError(t, err) + + // test unmarshalling again to ensure ID has not changed + // the underlying bytes have changed, but ID should be preserved + otherBundle := newKeyBundle(&cosmosKeyring{}) + require.NoError(t, otherBundle.Unmarshal(bundleBytes)) + assert.Equal(t, bundle.ID(), otherBundle.ID()) + }) + + t.Run("MissingID", func(t *testing.T) { + // onchain key + onKey, err := newEVMKeyring(cryptorand.Reader) + require.NoError(t, err) + onBytes, err := onKey.Marshal() + require.NoError(t, err) + + // build key without ID parameter + oldKey := XXXOldV1GenericKeyBundleRawData{ + ChainType: chaintype.EVM, + OffchainKeyring: offBytes, + Keyring: onBytes, + } + bundleBytes, err := json.Marshal(oldKey) + require.NoError(t, err) + + // unmarshal first time to generate ID + bundle := newKeyBundle(&evmKeyring{}) + require.NoError(t, bundle.Unmarshal(bundleBytes)) + + // marshal and unmarshal again + // different bytes generated, ID should not change + newBundleBytes, err := bundle.Marshal() + require.NoError(t, err) + newBundle := newKeyBundle(&evmKeyring{}) + require.NoError(t, newBundle.Unmarshal(newBundleBytes)) + assert.Equal(t, bundle.ID(), newBundle.ID()) + }) +} diff --git a/core/services/keystore/keys/ocr2key/key_bundle.go b/core/services/keystore/keys/ocr2key/key_bundle.go new file mode 100644 index 00000000..9506b36c --- /dev/null +++ b/core/services/keystore/keys/ocr2key/key_bundle.go @@ -0,0 +1,138 @@ +package ocr2key + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io" + + "github.com/ethereum/go-ethereum/crypto/secp256k1" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type OCR3SignerVerifier interface { + Sign3(digest ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) + Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool +} + +// nolint +type KeyBundle interface { + // OnchainKeyring is used for signing reports (groups of observations, verified onchain) + ocrtypes.OnchainKeyring + // OffchainKeyring is used for signing observations + ocrtypes.OffchainKeyring + + OCR3SignerVerifier + + ID() string + ChainType() chaintype.ChainType + Marshal() ([]byte, error) + Unmarshal(b []byte) (err error) + Raw() Raw + OnChainPublicKey() string + // Decrypts ciphertext using the encryptionKey from an OCR2 OffchainKeyring + NaclBoxOpenAnonymous(ciphertext []byte) (plaintext []byte, err error) +} + +// check generic keybundle for each chain conforms to KeyBundle interface +var _ KeyBundle = &keyBundle[*evmKeyring]{} +var _ KeyBundle = &keyBundle[*cosmosKeyring]{} +var _ KeyBundle = &keyBundle[*solanaKeyring]{} +var _ KeyBundle = &keyBundle[*starkkey.OCR2Key]{} + +var curve = secp256k1.S256() + +// New returns key bundle based on the chain type +func New(chainType chaintype.ChainType) (KeyBundle, error) { + switch chainType { + case chaintype.EVM: + return newKeyBundleRand(chaintype.EVM, newEVMKeyring) + case chaintype.Cosmos: + return newKeyBundleRand(chaintype.Cosmos, newCosmosKeyring) + case chaintype.Solana: + return newKeyBundleRand(chaintype.Solana, newSolanaKeyring) + case chaintype.StarkNet: + return newKeyBundleRand(chaintype.StarkNet, starkkey.NewOCR2Key) + } + return nil, chaintype.NewErrInvalidChainType(chainType) +} + +// MustNewInsecure returns key bundle based on the chain type or panics +func MustNewInsecure(reader io.Reader, chainType chaintype.ChainType) KeyBundle { + switch chainType { + case chaintype.EVM: + return mustNewKeyBundleInsecure(chaintype.EVM, newEVMKeyring, reader) + case chaintype.Cosmos: + return mustNewKeyBundleInsecure(chaintype.Cosmos, newCosmosKeyring, reader) + case chaintype.Solana: + return mustNewKeyBundleInsecure(chaintype.Solana, newSolanaKeyring, reader) + case chaintype.StarkNet: + return mustNewKeyBundleInsecure(chaintype.StarkNet, starkkey.NewOCR2Key, reader) + } + panic(chaintype.NewErrInvalidChainType(chainType)) +} + +var _ fmt.GoStringer = &keyBundleBase{} + +type keyBundleBase struct { + OffchainKeyring + id models.Sha256Hash + chainType chaintype.ChainType +} + +func (kb keyBundleBase) ID() string { + return hex.EncodeToString(kb.id[:]) +} + +// ChainType gets the chain type from the key bundle +func (kb keyBundleBase) ChainType() chaintype.ChainType { + return kb.chainType +} + +// String reduces the risk of accidentally logging the private key +func (kb keyBundleBase) String() string { + return fmt.Sprintf("KeyBundle{chainType: %s, id: %s}", kb.ChainType(), kb.ID()) +} + +// GoString reduces the risk of accidentally logging the private key +func (kb keyBundleBase) GoString() string { + return kb.String() +} + +// nolint +type Raw []byte + +func (raw Raw) Key() (kb KeyBundle) { + var temp struct{ ChainType chaintype.ChainType } + err := json.Unmarshal(raw, &temp) + if err != nil { + panic(err) + } + switch temp.ChainType { + case chaintype.EVM: + kb = newKeyBundle(new(evmKeyring)) + case chaintype.Cosmos: + kb = newKeyBundle(new(cosmosKeyring)) + case chaintype.Solana: + kb = newKeyBundle(new(solanaKeyring)) + case chaintype.StarkNet: + kb = newKeyBundle(new(starkkey.OCR2Key)) + default: + return nil + } + if err := kb.Unmarshal(raw); err != nil { + panic(err) + } + return +} + +// type is added to the beginning of the passwords for OCR key bundles, +// so that the keys can't accidentally be mis-used in the wrong place +func adulteratedPassword(auth string) string { + s := "ocr2key" + auth + return s +} diff --git a/core/services/keystore/keys/ocr2key/key_bundle_test.go b/core/services/keystore/keys/ocr2key/key_bundle_test.go new file mode 100644 index 00000000..fd55dada --- /dev/null +++ b/core/services/keystore/keys/ocr2key/key_bundle_test.go @@ -0,0 +1,76 @@ +package ocr2key_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" +) + +func assertKeyBundlesNotEqual(t *testing.T, pk1 ocr2key.KeyBundle, pk2 ocr2key.KeyBundle) { + assert.NotEqual(t, pk1.ID(), pk2.ID()) + assert.NotEqualValues(t, pk1.OffchainPublicKey(), pk2.OffchainPublicKey()) + assert.NotEqualValues(t, pk1.OnChainPublicKey(), pk2.OnChainPublicKey()) +} + +func TestOCR2Keys_New(t *testing.T) { + t.Parallel() + var keys []ocr2key.KeyBundle + + // create two keys for each chain type + for _, chain := range chaintype.SupportedChainTypes { + pk0, err := ocr2key.New(chain) + require.NoError(t, err) + pk1, err := ocr2key.New(chain) + require.NoError(t, err) + + keys = append(keys, pk0) + keys = append(keys, pk1) + } + + // validate keys are unique + for i := 0; i < len(keys); i++ { + for j := i + 1; j < len(keys); j++ { + assertKeyBundlesNotEqual(t, keys[i], keys[j]) + } + } + + // validate chain types + for i := 0; i < len(keys); i += 2 { + // check key for same chain + require.Equal(t, keys[i].ChainType(), keys[i+1].ChainType()) + + // check 1 key for each chain + for j := i + 2; j < len(keys); j += 2 { + require.NotEqual(t, keys[i].ChainType(), keys[j].ChainType()) + } + } +} + +func TestOCR2KeyBundle_RawToKey(t *testing.T) { + t.Parallel() + + for _, chain := range chaintype.SupportedChainTypes { + pk, err := ocr2key.New(chain) + require.NoError(t, err) + + pkFromRaw := pk.Raw().Key() + assert.NotNil(t, pkFromRaw) + } +} + +func TestOCR2KeyBundle_BundleBase(t *testing.T) { + t.Parallel() + + for _, chain := range chaintype.SupportedChainTypes { + kb, err := ocr2key.New(chain) + require.NoError(t, err) + + assert.NotNil(t, kb.ID()) + assert.Equal(t, fmt.Sprintf(`bundle: KeyBundle{chainType: %s, id: %s}`, chain, kb.ID()), fmt.Sprintf(`bundle: %s`, kb)) + } +} diff --git a/core/services/keystore/keys/ocr2key/offchain_keyring.go b/core/services/keystore/keys/ocr2key/offchain_keyring.go new file mode 100644 index 00000000..fdddc93e --- /dev/null +++ b/core/services/keystore/keys/ocr2key/offchain_keyring.go @@ -0,0 +1,136 @@ +package ocr2key + +import ( + "bytes" + "crypto/ed25519" + "encoding/binary" + "errors" + "io" + + "golang.org/x/crypto/nacl/box" + + "golang.org/x/crypto/curve25519" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ ocrtypes.OffchainKeyring = &OffchainKeyring{} + +// OffchainKeyring contains the secret keys needed for the OCR nodes to share secrets +// and perform aggregation. +// +// This is currently an ed25519 signing key and a separate encryption key. +// +// All its functions should be thread-safe. +type OffchainKeyring struct { + signingKey ed25519.PrivateKey + encryptionKey [curve25519.ScalarSize]byte +} + +func newOffchainKeyring(encryptionMaterial, signingMaterial io.Reader) (*OffchainKeyring, error) { + _, signingKey, err := ed25519.GenerateKey(signingMaterial) + if err != nil { + return nil, err + } + + encryptionKey := [curve25519.ScalarSize]byte{} + _, err = encryptionMaterial.Read(encryptionKey[:]) + if err != nil { + return nil, err + } + + ok := &OffchainKeyring{ + signingKey: signingKey, + encryptionKey: encryptionKey, + } + _, err = ok.configEncryptionPublicKey() + if err != nil { + return nil, err + } + return ok, nil +} + +// NaclBoxOpenAnonymous decrypts a message that was encrypted using the OCR2 Offchain public key +func (ok *OffchainKeyring) NaclBoxOpenAnonymous(ciphertext []byte) (plaintext []byte, err error) { + if len(ciphertext) < box.Overhead { + return nil, errors.New("ciphertext too short") + } + + publicKey := [curve25519.PointSize]byte(ok.ConfigEncryptionPublicKey()) + + decrypted, success := box.OpenAnonymous(nil, ciphertext, &publicKey, &ok.encryptionKey) + if !success { + return nil, errors.New("decryption failed") + } + + return decrypted, nil +} + +// OffchainSign signs message using private key +func (ok *OffchainKeyring) OffchainSign(msg []byte) (signature []byte, err error) { + return ed25519.Sign(ok.signingKey, msg), nil +} + +// ConfigDiffieHellman returns the shared point obtained by multiplying someone's +// public key by a secret scalar ( in this case, the offchain key ring's encryption key.) +func (ok *OffchainKeyring) ConfigDiffieHellman(point [curve25519.PointSize]byte) ([curve25519.PointSize]byte, error) { + p, err := curve25519.X25519(ok.encryptionKey[:], point[:]) + if err != nil { + return [curve25519.PointSize]byte{}, err + } + sharedPoint := [ed25519.PublicKeySize]byte{} + copy(sharedPoint[:], p) + return sharedPoint, nil +} + +// OffchainPublicKey returns the public component of this offchain keyring. +func (ok *OffchainKeyring) OffchainPublicKey() ocrtypes.OffchainPublicKey { + var offchainPubKey [ed25519.PublicKeySize]byte + copy(offchainPubKey[:], ok.signingKey.Public().(ed25519.PublicKey)[:]) + return offchainPubKey +} + +// ConfigEncryptionPublicKey returns config public key +func (ok *OffchainKeyring) ConfigEncryptionPublicKey() ocrtypes.ConfigEncryptionPublicKey { + cpk, _ := ok.configEncryptionPublicKey() + return cpk +} + +func (ok *OffchainKeyring) configEncryptionPublicKey() (ocrtypes.ConfigEncryptionPublicKey, error) { + rv, err := curve25519.X25519(ok.encryptionKey[:], curve25519.Basepoint) + if err != nil { + return [curve25519.PointSize]byte{}, err + } + var rvFixed [curve25519.PointSize]byte + copy(rvFixed[:], rv) + return rvFixed, nil +} + +func (ok *OffchainKeyring) marshal() ([]byte, error) { + buffer := new(bytes.Buffer) + err := binary.Write(buffer, binary.LittleEndian, ok.signingKey) + if err != nil { + return nil, err + } + err = binary.Write(buffer, binary.LittleEndian, ok.encryptionKey) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +func (ok *OffchainKeyring) unmarshal(in []byte) error { + buffer := bytes.NewReader(in) + ok.signingKey = make(ed25519.PrivateKey, ed25519.PrivateKeySize) + err := binary.Read(buffer, binary.LittleEndian, &ok.signingKey) + if err != nil { + return err + } + ok.encryptionKey = [curve25519.ScalarSize]byte{} + err = binary.Read(buffer, binary.LittleEndian, &ok.encryptionKey) + if err != nil { + return err + } + _, err = ok.configEncryptionPublicKey() + return err +} diff --git a/core/services/keystore/keys/ocr2key/offchain_keyring_test.go b/core/services/keystore/keys/ocr2key/offchain_keyring_test.go new file mode 100644 index 00000000..c64850fa --- /dev/null +++ b/core/services/keystore/keys/ocr2key/offchain_keyring_test.go @@ -0,0 +1,68 @@ +package ocr2key + +import ( + "bytes" + "crypto/ed25519" + cryptorand "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/box" +) + +func TestOffchainKeyring(t *testing.T) { + kr, err := newOffchainKeyring(cryptorand.Reader, cryptorand.Reader) + require.NoError(t, err) + pubKey := kr.OffchainPublicKey() + assert.True(t, bytes.Equal(kr.signingKey.Public().(ed25519.PublicKey), pubKey[:])) +} + +func TestOffchainKeyring_NaclBoxSealAnonymous(t *testing.T) { + kr, err := newOffchainKeyring(cryptorand.Reader, cryptorand.Reader) + require.NoError(t, err) + + originalMessage := []byte("test") + + encryptedMessage := naclBoxSealAnonymous(t, kr.ConfigEncryptionPublicKey(), originalMessage) + + decryptedMessage, err := kr.NaclBoxOpenAnonymous(encryptedMessage) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, originalMessage, decryptedMessage) +} + +func TestOffchainKeyring_NaclBoxSealAnonymous_ShortCiphertext(t *testing.T) { + kr, err := newOffchainKeyring(cryptorand.Reader, cryptorand.Reader) + require.NoError(t, err) + + shortMessage := []byte("short") + + _, err = kr.NaclBoxOpenAnonymous(shortMessage) + assert.Equal(t, err.Error(), "ciphertext too short") +} + +func TestOffchainKeyring_NaclBoxSealAnonymous_FailedDecryption(t *testing.T) { + kr, err := newOffchainKeyring(cryptorand.Reader, cryptorand.Reader) + require.NoError(t, err) + + invalid := []byte("invalidEncryptedMessage") + + _, err = kr.NaclBoxOpenAnonymous(invalid) + assert.Equal(t, err.Error(), "decryption failed") +} + +func naclBoxSealAnonymous(t *testing.T, peerPublicKey [curve25519.PointSize]byte, plaintext []byte) []byte { + t.Helper() + + ciphertext, err := box.SealAnonymous(nil, plaintext, &peerPublicKey, cryptorand.Reader) + if err != nil { + t.Fatalf("encryption failed") + return nil + } + + return ciphertext +} diff --git a/core/services/keystore/keys/ocr2key/solana_keyring.go b/core/services/keystore/keys/ocr2key/solana_keyring.go new file mode 100644 index 00000000..69b45d36 --- /dev/null +++ b/core/services/keystore/keys/ocr2key/solana_keyring.go @@ -0,0 +1,90 @@ +package ocr2key + +import ( + "bytes" + "crypto/ecdsa" + "crypto/sha256" + "io" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/pkg/errors" + "github.com/goplugin/libocr/offchainreporting2/types" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ ocrtypes.OnchainKeyring = &solanaKeyring{} + +type solanaKeyring struct { + privateKey ecdsa.PrivateKey +} + +func newSolanaKeyring(material io.Reader) (*solanaKeyring, error) { + ecdsaKey, err := ecdsa.GenerateKey(curve, material) + if err != nil { + return nil, err + } + return &solanaKeyring{privateKey: *ecdsaKey}, nil +} + +// XXX: PublicKey returns the evm-style address of the public key not the public key itself +func (skr *solanaKeyring) PublicKey() ocrtypes.OnchainPublicKey { + address := crypto.PubkeyToAddress(*(&skr.privateKey).Public().(*ecdsa.PublicKey)) + return address[:] +} + +func (skr *solanaKeyring) reportToSigData(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) []byte { + rawReportContext := evmutil.RawReportContext(reportCtx) + h := sha256.New() + h.Write([]byte{uint8(len(report))}) + h.Write(report) + h.Write(rawReportContext[0][:]) + h.Write(rawReportContext[1][:]) + h.Write(rawReportContext[2][:]) + return h.Sum(nil) +} + +func (skr *solanaKeyring) Sign(reportCtx ocrtypes.ReportContext, report ocrtypes.Report) ([]byte, error) { + return skr.signBlob(skr.reportToSigData(reportCtx, report)) +} + +func (skr *solanaKeyring) Sign3(digest types.ConfigDigest, seqNr uint64, r ocrtypes.Report) (signature []byte, err error) { + return nil, errors.New("not implemented") +} + +func (skr *solanaKeyring) signBlob(b []byte) (sig []byte, err error) { + return crypto.Sign(b, &skr.privateKey) +} + +func (skr *solanaKeyring) Verify(publicKey ocrtypes.OnchainPublicKey, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signature []byte) bool { + hash := skr.reportToSigData(reportCtx, report) + return skr.verifyBlob(publicKey, hash, signature) +} + +func (skr *solanaKeyring) Verify3(publicKey ocrtypes.OnchainPublicKey, cd ocrtypes.ConfigDigest, seqNr uint64, r ocrtypes.Report, signature []byte) bool { + return false +} + +func (skr *solanaKeyring) verifyBlob(pubkey types.OnchainPublicKey, b, sig []byte) bool { + authorPubkey, err := crypto.SigToPub(b, sig) + if err != nil { + return false + } + authorAddress := crypto.PubkeyToAddress(*authorPubkey) + // no need for constant time compare since neither arg is sensitive + return bytes.Equal(pubkey[:], authorAddress[:]) +} + +func (skr *solanaKeyring) MaxSignatureLength() int { + return 65 +} + +func (skr *solanaKeyring) Marshal() ([]byte, error) { + return crypto.FromECDSA(&skr.privateKey), nil +} + +func (skr *solanaKeyring) Unmarshal(in []byte) error { + privateKey, err := crypto.ToECDSA(in) + skr.privateKey = *privateKey + return err +} diff --git a/core/services/keystore/keys/ocr2key/solana_keyring_test.go b/core/services/keystore/keys/ocr2key/solana_keyring_test.go new file mode 100644 index 00000000..15ac848c --- /dev/null +++ b/core/services/keystore/keys/ocr2key/solana_keyring_test.go @@ -0,0 +1,55 @@ +package ocr2key + +import ( + "bytes" + cryptorand "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +func TestSolanaKeyring_Sign_Verify(t *testing.T) { + kr1, err := newSolanaKeyring(cryptorand.Reader) + require.NoError(t, err) + kr2, err := newSolanaKeyring(cryptorand.Reader) + require.NoError(t, err) + ctx := ocrtypes.ReportContext{} + + t.Run("can verify", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + t.Log(len(sig)) + result := kr2.Verify(kr1.PublicKey(), ctx, report, sig) + assert.True(t, result) + }) + + t.Run("invalid sig", func(t *testing.T) { + report := ocrtypes.Report{} + result := kr2.Verify(kr1.PublicKey(), ctx, report, []byte{0x01}) + assert.False(t, result) + }) + + t.Run("invalid pubkey", func(t *testing.T) { + report := ocrtypes.Report{} + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + result := kr2.Verify([]byte{0x01}, ctx, report, sig) + assert.False(t, result) + }) +} + +func TestSolanaKeyring_Marshalling(t *testing.T) { + kr1, err := newSolanaKeyring(cryptorand.Reader) + require.NoError(t, err) + m, err := kr1.Marshal() + require.NoError(t, err) + kr2 := solanaKeyring{} + err = kr2.Unmarshal(m) + require.NoError(t, err) + assert.True(t, bytes.Equal(kr1.PublicKey(), kr2.PublicKey())) + assert.True(t, bytes.Equal(kr1.privateKey.D.Bytes(), kr2.privateKey.D.Bytes())) +} diff --git a/core/services/keystore/keys/ocrkey/config_public_key.go b/core/services/keystore/keys/ocrkey/config_public_key.go new file mode 100644 index 00000000..2eb8f0cc --- /dev/null +++ b/core/services/keystore/keys/ocrkey/config_public_key.go @@ -0,0 +1,72 @@ +package ocrkey + +import ( + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/pkg/errors" + "golang.org/x/crypto/curve25519" +) + +const configPublicKeyPrefix = "ocrcfg_" + +// ConfigPublicKey represents the public key for the config decryption keypair +type ConfigPublicKey [curve25519.PointSize]byte + +func (cpk ConfigPublicKey) String() string { + return fmt.Sprintf("%s%s", configPublicKeyPrefix, cpk.Raw()) +} + +func (cpk ConfigPublicKey) Raw() string { + return hex.EncodeToString(cpk[:]) +} + +func (cpk ConfigPublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(cpk.String()) +} + +func (cpk *ConfigPublicKey) UnmarshalJSON(input []byte) error { + var hexString string + if err := json.Unmarshal(input, &hexString); err != nil { + return err + } + + return cpk.UnmarshalText([]byte(hexString)) +} + +// Scan reads the database value and returns an instance. +func (cpk *ConfigPublicKey) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.Errorf("unable to convert %v of type %T to ConfigPublicKey", value, value) + } + if len(b) != curve25519.PointSize { + return errors.Errorf("unable to convert blob 0x%x of length %v to ConfigPublicKey", b, len(b)) + } + copy(cpk[:], b) + return nil +} + +// Value returns this instance serialized for database storage. +func (cpk ConfigPublicKey) Value() (driver.Value, error) { + return cpk[:], nil +} + +func (cpk *ConfigPublicKey) UnmarshalText(bs []byte) error { + input := string(bs) + if strings.HasPrefix(input, configPublicKeyPrefix) { + input = string(bs[len(configPublicKeyPrefix):]) + } + + decodedString, err := hex.DecodeString(input) + if err != nil { + return err + } + var result [curve25519.PointSize]byte + copy(result[:], decodedString[:curve25519.PointSize]) + *cpk = result + return nil +} diff --git a/core/services/keystore/keys/ocrkey/config_public_key_test.go b/core/services/keystore/keys/ocrkey/config_public_key_test.go new file mode 100644 index 00000000..f3d257bd --- /dev/null +++ b/core/services/keystore/keys/ocrkey/config_public_key_test.go @@ -0,0 +1,42 @@ +package ocrkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCRKey_ConfigPublicKey(t *testing.T) { + k := MustNewV2XXXTestingOnly(big.NewInt(1)) + + t.Run("fails to unmarshal invalid JSON", func(t *testing.T) { + pk := ConfigPublicKey(k.PublicKeyConfig()) + + err := pk.UnmarshalJSON([]byte("")) + + assert.Error(t, err) + }) + + t.Run("returns serialized instance value", func(t *testing.T) { + pk := ConfigPublicKey(k.PublicKeyConfig()) + + v, err := pk.Value() + require.NoError(t, err) + + assert.NotEmpty(t, v) + }) + + t.Run("updates current instance by scanning another instance", func(t *testing.T) { + pk := ConfigPublicKey(k.PublicKeyConfig()) + + k2 := MustNewV2XXXTestingOnly(big.NewInt(1)) + pk2 := ConfigPublicKey(k2.PublicKeyConfig()) + + err := pk.Scan(pk2[:]) + require.NoError(t, err) + + assert.Equal(t, pk2.Raw(), pk.Raw()) + }) +} diff --git a/core/services/keystore/keys/ocrkey/export.go b/core/services/keystore/keys/ocrkey/export.go new file mode 100644 index 00000000..fb504663 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/export.go @@ -0,0 +1,60 @@ +package ocrkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "OCR" + +func FromEncryptedJSON(keyJSON []byte, password string) (KeyV2, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ EncryptedOCRKeyExport, rawPrivKey []byte) (KeyV2, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +type EncryptedOCRKeyExport struct { + KeyType string `json:"keyType"` + ID string `json:"id"` + OnChainSigningAddress OnChainSigningAddress `json:"onChainSigningAddress"` + OffChainPublicKey OffChainPublicKey `json:"offChainPublicKey"` + ConfigPublicKey ConfigPublicKey `json:"configPublicKey"` + Crypto keystore.CryptoJSON `json:"crypto"` +} + +func (x EncryptedOCRKeyExport) GetCrypto() keystore.CryptoJSON { + return x.Crypto +} + +func (key KeyV2) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key KeyV2, cryptoJSON keystore.CryptoJSON) EncryptedOCRKeyExport { + return EncryptedOCRKeyExport{ + KeyType: id, + ID: key.ID(), + OnChainSigningAddress: key.OnChainSigning.Address(), + OffChainPublicKey: key.OffChainSigning.PublicKey(), + ConfigPublicKey: key.PublicKeyConfig(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "ocrkey" + password +} diff --git a/core/services/keystore/keys/ocrkey/export_test.go b/core/services/keystore/keys/ocrkey/export_test.go new file mode 100644 index 00000000..3060a17a --- /dev/null +++ b/core/services/keystore/keys/ocrkey/export_test.go @@ -0,0 +1,19 @@ +package ocrkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestOCRKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return NewV2() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/ocrkey/helpers_test.go b/core/services/keystore/keys/ocrkey/helpers_test.go new file mode 100644 index 00000000..9634d721 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/helpers_test.go @@ -0,0 +1,13 @@ +package ocrkey + +func (kb *KeyV2) ExportedOnChainSigning() *onChainPrivateKey { + return kb.OnChainSigning +} + +func (kb *KeyV2) ExportedOffChainSigning() *offChainPrivateKey { + return kb.OffChainSigning +} + +func (kb *KeyV2) ExportedOffChainEncryption() *[32]byte { + return kb.OffChainEncryption +} diff --git a/core/services/keystore/keys/ocrkey/key_bundle.go b/core/services/keystore/keys/ocrkey/key_bundle.go new file mode 100644 index 00000000..c7b5b98f --- /dev/null +++ b/core/services/keystore/keys/ocrkey/key_bundle.go @@ -0,0 +1,259 @@ +package ocrkey + +import ( + "crypto/ecdsa" + "crypto/ed25519" + cryptorand "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "time" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + "golang.org/x/crypto/curve25519" + + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type ( + // KeyBundle represents the bundle of keys needed for OCR + KeyBundle struct { + ID models.Sha256Hash + onChainSigning *onChainPrivateKey + offChainSigning *offChainPrivateKey + offChainEncryption *[curve25519.ScalarSize]byte + } + + // EncryptedKeyBundle holds an encrypted KeyBundle + EncryptedKeyBundle struct { + ID models.Sha256Hash + OnChainSigningAddress OnChainSigningAddress + OffChainPublicKey OffChainPublicKey + ConfigPublicKey ConfigPublicKey + EncryptedPrivateKeys []byte + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time + } +) + +func (ekb EncryptedKeyBundle) GetID() string { + return ekb.ID.String() +} + +func (ekb *EncryptedKeyBundle) SetID(value string) error { + var result models.Sha256Hash + decodedString, err := hex.DecodeString(value) + + if err != nil { + return err + } + + copy(result[:], decodedString[:32]) + ekb.ID = result + return nil +} + +// New makes a new set of OCR key bundles from cryptographically secure entropy +func New() (*KeyBundle, error) { + return NewFrom(cryptorand.Reader, cryptorand.Reader, cryptorand.Reader) +} + +// NewFrom makes a new set of OCR key bundles from cryptographically secure entropy +func NewFrom(onChainSigning io.Reader, offChainSigning io.Reader, offChainEncryption io.Reader) (*KeyBundle, error) { + ecdsaKey, err := ecdsa.GenerateKey(curve, onChainSigning) + if err != nil { + return nil, err + } + onChainPriv := (*onChainPrivateKey)(ecdsaKey) + + _, offChainPriv, err := ed25519.GenerateKey(offChainSigning) + if err != nil { + return nil, err + } + var encryptionPriv [curve25519.ScalarSize]byte + _, err = offChainEncryption.Read(encryptionPriv[:]) + if err != nil { + return nil, err + } + k := &KeyBundle{ + onChainSigning: onChainPriv, + offChainSigning: (*offChainPrivateKey)(&offChainPriv), + offChainEncryption: &encryptionPriv, + } + marshalledPrivK, err := json.Marshal(k) + if err != nil { + return nil, err + } + k.ID = sha256.Sum256(marshalledPrivK) + return k, nil +} + +// SignOnChain returns an ethereum-style ECDSA secp256k1 signature on msg. +func (pk *KeyBundle) SignOnChain(msg []byte) (signature []byte, err error) { + return pk.onChainSigning.Sign(msg) +} + +// SignOffChain returns an EdDSA-Ed25519 signature on msg. +func (pk *KeyBundle) SignOffChain(msg []byte) (signature []byte, err error) { + return pk.offChainSigning.Sign(msg) +} + +// ConfigDiffieHellman returns the shared point obtained by multiplying someone's +// public key by a secret scalar ( in this case, the offChainEncryption key.) +func (pk *KeyBundle) ConfigDiffieHellman(base *[curve25519.PointSize]byte) ( + sharedPoint *[curve25519.PointSize]byte, err error, +) { + p, err := curve25519.X25519(pk.offChainEncryption[:], base[:]) + if err != nil { + return nil, err + } + sharedPoint = new([ed25519.PublicKeySize]byte) + copy(sharedPoint[:], p) + return sharedPoint, nil +} + +// PublicKeyAddressOnChain returns public component of the keypair used in +// SignOnChain +func (pk *KeyBundle) PublicKeyAddressOnChain() ocrtypes.OnChainSigningAddress { + return ocrtypes.OnChainSigningAddress(pk.onChainSigning.Address()) +} + +// PublicKeyOffChain returns the public component of the keypair used in SignOffChain +func (pk *KeyBundle) PublicKeyOffChain() ocrtypes.OffchainPublicKey { + return ocrtypes.OffchainPublicKey(pk.offChainSigning.PublicKey()) +} + +// PublicKeyConfig returns the public component of the keypair used in ConfigKeyShare +func (pk *KeyBundle) PublicKeyConfig() [curve25519.PointSize]byte { + rv, err := curve25519.X25519(pk.offChainEncryption[:], curve25519.Basepoint) + if err != nil { + log.Println("failure while computing public key: " + err.Error()) + } + var rvFixed [curve25519.PointSize]byte + copy(rvFixed[:], rv) + return rvFixed +} + +// Encrypt combines the KeyBundle into a single json-serialized +// bytes array and then encrypts +func (pk *KeyBundle) Encrypt(auth string, scryptParams utils.ScryptParams) (*EncryptedKeyBundle, error) { + return pk.encrypt(auth, scryptParams) +} + +// encrypt combines the KeyBundle into a single json-serialized +// bytes array and then encrypts, using the provided scrypt params +// separated into a different function so that scryptParams can be +// weakened in tests +func (pk *KeyBundle) encrypt(auth string, scryptParams utils.ScryptParams) (*EncryptedKeyBundle, error) { + marshalledPrivK, err := json.Marshal(&pk) + if err != nil { + return nil, err + } + cryptoJSON, err := keystore.EncryptDataV3( + marshalledPrivK, + []byte(adulteratedPassword(auth)), + scryptParams.N, + scryptParams.P, + ) + if err != nil { + return nil, errors.Wrapf(err, "could not encrypt ocr key") + } + encryptedPrivKeys, err := json.Marshal(&cryptoJSON) + if err != nil { + return nil, errors.Wrapf(err, "could not encode cryptoJSON") + } + return &EncryptedKeyBundle{ + ID: pk.ID, + OnChainSigningAddress: pk.onChainSigning.Address(), + OffChainPublicKey: pk.offChainSigning.PublicKey(), + ConfigPublicKey: pk.PublicKeyConfig(), + EncryptedPrivateKeys: encryptedPrivKeys, + }, nil +} + +// Decrypt returns the PrivateKeys in e, decrypted via auth, or an error +func (ekb *EncryptedKeyBundle) Decrypt(auth string) (*KeyBundle, error) { + var cryptoJSON keystore.CryptoJSON + err := json.Unmarshal(ekb.EncryptedPrivateKeys, &cryptoJSON) + if err != nil { + return nil, errors.Wrapf(err, "invalid cryptoJSON for OCR key bundle") + } + marshalledPrivK, err := keystore.DecryptDataV3(cryptoJSON, adulteratedPassword(auth)) + if err != nil { + return nil, errors.Wrapf(err, "could not decrypt OCR key bundle") + } + var pk KeyBundle + err = json.Unmarshal(marshalledPrivK, &pk) + if err != nil { + return nil, errors.Wrapf(err, "could not unmarshal OCR key bundle") + } + return &pk, nil +} + +// MarshalJSON marshals the private keys into json +func (pk *KeyBundle) MarshalJSON() ([]byte, error) { + rawKeyData := keyBundleRawData{ + EcdsaD: *pk.onChainSigning.D, + Ed25519PrivKey: []byte(*pk.offChainSigning), + OffChainEncryption: *pk.offChainEncryption, + } + return json.Marshal(&rawKeyData) +} + +// UnmarshalJSON constructs KeyBundle from raw json +func (pk *KeyBundle) UnmarshalJSON(b []byte) (err error) { + var rawKeyData keyBundleRawData + err = json.Unmarshal(b, &rawKeyData) + if err != nil { + return err + } + ecdsaDSize := len(rawKeyData.EcdsaD.Bytes()) + if ecdsaDSize > curve25519.PointSize { + return errors.Wrapf(ErrScalarTooBig, "got %d byte ecdsa scalar", ecdsaDSize) + } + + publicKey := ecdsa.PublicKey{Curve: curve} + publicKey.X, publicKey.Y = curve.ScalarBaseMult(rawKeyData.EcdsaD.Bytes()) + privateKey := ecdsa.PrivateKey{ + PublicKey: publicKey, + D: &rawKeyData.EcdsaD, + } + onChainSigning := onChainPrivateKey(privateKey) + offChainSigning := offChainPrivateKey(rawKeyData.Ed25519PrivKey) + pk.onChainSigning = &onChainSigning + pk.offChainSigning = &offChainSigning + pk.offChainEncryption = &rawKeyData.OffChainEncryption + pk.ID = sha256.Sum256(b) + return nil +} + +// String reduces the risk of accidentally logging the private key +func (pk KeyBundle) String() string { + addressOnChain := pk.PublicKeyAddressOnChain() + return fmt.Sprintf( + "KeyBundle{PublicKeyAddressOnChain: %s, PublicKeyOffChain: %s}", + hex.EncodeToString(addressOnChain[:]), + hex.EncodeToString(pk.PublicKeyOffChain()), + ) +} + +// GoString reduces the risk of accidentally logging the private key +func (pk KeyBundle) GoString() string { + return pk.String() +} + +// GoString reduces the risk of accidentally logging the private key +func (pk KeyBundle) ToV2() KeyV2 { + return KeyV2{ + OnChainSigning: pk.onChainSigning, + OffChainSigning: pk.offChainSigning, + OffChainEncryption: pk.offChainEncryption, + } +} diff --git a/core/services/keystore/keys/ocrkey/key_bundle_test.go b/core/services/keystore/keys/ocrkey/key_bundle_test.go new file mode 100644 index 00000000..350d8d92 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/key_bundle_test.go @@ -0,0 +1,102 @@ +package ocrkey_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func assertKeyBundlesNotEqual(t *testing.T, pk1 ocrkey.KeyV2, pk2 ocrkey.KeyV2) { + assert.NotEqual(t, pk1.ID(), pk2.ID()) + assert.NotEqual(t, pk1.ExportedOnChainSigning().X, pk2.ExportedOnChainSigning().X) + assert.NotEqual(t, pk1.ExportedOnChainSigning().Y, pk2.ExportedOnChainSigning().Y) + assert.NotEqual(t, pk1.ExportedOnChainSigning().D, pk2.ExportedOnChainSigning().D) + assert.NotEqual(t, pk1.ExportedOffChainSigning().PublicKey(), pk2.ExportedOffChainSigning().PublicKey()) + assert.NotEqual(t, pk1.ExportedOffChainEncryption(), pk2.ExportedOffChainEncryption()) +} + +func TestOCRKeys_New(t *testing.T) { + t.Parallel() + pk1, err := ocrkey.NewV2() + require.NoError(t, err) + pk2, err := ocrkey.NewV2() + require.NoError(t, err) + pk3, err := ocrkey.NewV2() + require.NoError(t, err) + assertKeyBundlesNotEqual(t, pk1, pk2) + assertKeyBundlesNotEqual(t, pk1, pk3) + assertKeyBundlesNotEqual(t, pk2, pk3) +} + +func TestOCRKeys_NewBundleIDMatchesOld(t *testing.T) { + t.Parallel() + oldKey, err := ocrkey.New() + require.NoError(t, err) + newKey := oldKey.ToV2() + require.Equal(t, oldKey.ID.String(), newKey.ID()) +} + +func TestOCRKeys_Raw_Key(t *testing.T) { + t.Parallel() + key := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + require.Equal(t, key.ID(), key.Raw().Key().ID()) +} + +func TestOCRKeys_BundleSetID(t *testing.T) { + t.Parallel() + + k, err := ocrkey.New() + require.NoError(t, err) + ek, err := k.Encrypt("test", utils.FastScryptParams) + require.NoError(t, err) + + oldId := ek.GetID() + err = ek.SetID("48656c6c6f20476f7068657221") + require.NoError(t, err) + + assert.NotEqual(t, oldId, ek.GetID()) + + err = ek.SetID("invalid id") + assert.Error(t, err) +} + +func TestOCRKeys_BundleDecrypt(t *testing.T) { + t.Parallel() + + k, err := ocrkey.New() + require.NoError(t, err) + ek, err := k.Encrypt("test", utils.FastScryptParams) + require.NoError(t, err) + + _, err = ek.Decrypt("wrongpass") + assert.Error(t, err) + + dk, err := ek.Decrypt("test") + require.NoError(t, err) + + dk.GoString() + assert.Equal(t, k.GoString(), dk.GoString()) + assert.Equal(t, k.ID.String(), dk.ID.String()) +} + +func TestOCRKeys_BundleMarshalling(t *testing.T) { + t.Parallel() + + k, err := ocrkey.New() + require.NoError(t, err) + k2, err := ocrkey.New() + require.NoError(t, err) + + mk, err := k.MarshalJSON() + require.NoError(t, err) + + err = k2.UnmarshalJSON(mk) + require.NoError(t, err) + + assert.Equal(t, k.String(), k2.String()) +} diff --git a/core/services/keystore/keys/ocrkey/key_v2.go b/core/services/keystore/keys/ocrkey/key_v2.go new file mode 100644 index 00000000..1a85c0d6 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/key_v2.go @@ -0,0 +1,201 @@ +package ocrkey + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "math/big" + + "github.com/ethereum/go-ethereum/crypto/secp256k1" + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + "golang.org/x/crypto/curve25519" +) + +var ( + ErrScalarTooBig = errors.Errorf("can't handle scalars greater than %d", curve25519.PointSize) + curve = secp256k1.S256() +) + +type keyBundleRawData struct { + EcdsaD big.Int + Ed25519PrivKey []byte + OffChainEncryption [curve25519.ScalarSize]byte +} + +type Raw []byte + +func (raw Raw) Key() KeyV2 { + var key KeyV2 + err := json.Unmarshal(raw, &key) + if err != nil { + panic(errors.Wrap(err, "while unmarshalling OCR key")) + } + return key +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &KeyV2{} + +type KeyV2 struct { + OnChainSigning *onChainPrivateKey + OffChainSigning *offChainPrivateKey + OffChainEncryption *[curve25519.ScalarSize]byte +} + +func NewV2() (KeyV2, error) { + ecdsaKey, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return KeyV2{}, err + } + onChainPriv := (*onChainPrivateKey)(ecdsaKey) + + _, offChainPriv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return KeyV2{}, err + } + var encryptionPriv [curve25519.ScalarSize]byte + _, err = rand.Reader.Read(encryptionPriv[:]) + if err != nil { + return KeyV2{}, err + } + return KeyV2{ + OnChainSigning: onChainPriv, + OffChainSigning: (*offChainPrivateKey)(&offChainPriv), + OffChainEncryption: &encryptionPriv, + }, nil +} + +func MustNewV2XXXTestingOnly(k *big.Int) KeyV2 { + ecdsaKey := new(ecdsa.PrivateKey) + ecdsaKey.PublicKey.Curve = curve + ecdsaKey.D = k + ecdsaKey.PublicKey.X, ecdsaKey.PublicKey.Y = curve.ScalarBaseMult(k.Bytes()) + onChainPriv := (*onChainPrivateKey)(ecdsaKey) + var seed [32]byte + copy(seed[:], k.Bytes()) + offChainPriv := ed25519.NewKeyFromSeed(seed[:]) + return KeyV2{ + OnChainSigning: onChainPriv, + OffChainSigning: (*offChainPrivateKey)(&offChainPriv), + OffChainEncryption: &seed, + } +} + +func (key KeyV2) ID() string { + sha := sha256.Sum256(key.Raw()) + return hex.EncodeToString(sha[:]) +} + +func (key KeyV2) Raw() Raw { + marshalledPrivK, err := json.Marshal(key) + if err != nil { + panic(errors.Wrap(err, "while calculating OCR key ID")) + } + return marshalledPrivK +} + +// SignOnChain returns an ethereum-style ECDSA secp256k1 signature on msg. +func (key KeyV2) SignOnChain(msg []byte) (signature []byte, err error) { + return key.OnChainSigning.Sign(msg) +} + +// SignOffChain returns an EdDSA-Ed25519 signature on msg. +func (key KeyV2) SignOffChain(msg []byte) (signature []byte, err error) { + return key.OffChainSigning.Sign(msg) +} + +// ConfigDiffieHellman returns the shared point obtained by multiplying someone's +// public key by a secret scalar ( in this case, the OffChainEncryption key.) +func (key KeyV2) ConfigDiffieHellman(base *[curve25519.PointSize]byte) ( + sharedPoint *[curve25519.PointSize]byte, err error, +) { + p, err := curve25519.X25519(key.OffChainEncryption[:], base[:]) + if err != nil { + return nil, err + } + sharedPoint = new([ed25519.PublicKeySize]byte) + copy(sharedPoint[:], p) + return sharedPoint, nil +} + +// PublicKeyAddressOnChain returns public component of the keypair used in +// SignOnChain +func (key KeyV2) PublicKeyAddressOnChain() ocrtypes.OnChainSigningAddress { + return ocrtypes.OnChainSigningAddress(key.OnChainSigning.Address()) +} + +// PublicKeyOffChain returns the public component of the keypair used in SignOffChain +func (key KeyV2) PublicKeyOffChain() ocrtypes.OffchainPublicKey { + return ocrtypes.OffchainPublicKey(key.OffChainSigning.PublicKey()) +} + +// PublicKeyConfig returns the public component of the keypair used in ConfigKeyShare +func (key KeyV2) PublicKeyConfig() [curve25519.PointSize]byte { + rv, err := curve25519.X25519(key.OffChainEncryption[:], curve25519.Basepoint) + if err != nil { + log.Println("failure while computing public key: " + err.Error()) + } + var rvFixed [curve25519.PointSize]byte + copy(rvFixed[:], rv) + return rvFixed +} + +func (key KeyV2) GetID() string { + return key.ID() +} + +func (key KeyV2) String() string { + return fmt.Sprintf("OCRKeyV2{ID: %s}", key.ID()) +} + +func (key KeyV2) GoString() string { + return key.String() +} + +// MarshalJSON marshals the private keys into json +func (key KeyV2) MarshalJSON() ([]byte, error) { + rawKeyData := keyBundleRawData{ + EcdsaD: *key.OnChainSigning.D, + Ed25519PrivKey: []byte(*key.OffChainSigning), + OffChainEncryption: *key.OffChainEncryption, + } + return json.Marshal(&rawKeyData) +} + +func (key *KeyV2) UnmarshalJSON(b []byte) (err error) { + var rawKeyData keyBundleRawData + err = json.Unmarshal(b, &rawKeyData) + if err != nil { + return err + } + ecdsaDSize := len(rawKeyData.EcdsaD.Bytes()) + if ecdsaDSize > curve25519.PointSize { + return errors.Wrapf(ErrScalarTooBig, "got %d byte ecdsa scalar", ecdsaDSize) + } + + publicKey := ecdsa.PublicKey{Curve: curve} + publicKey.X, publicKey.Y = curve.ScalarBaseMult(rawKeyData.EcdsaD.Bytes()) + privateKey := ecdsa.PrivateKey{ + PublicKey: publicKey, + D: &rawKeyData.EcdsaD, + } + onChainSigning := onChainPrivateKey(privateKey) + offChainSigning := offChainPrivateKey(rawKeyData.Ed25519PrivKey) + key.OnChainSigning = &onChainSigning + key.OffChainSigning = &offChainSigning + key.OffChainEncryption = &rawKeyData.OffChainEncryption + return nil +} diff --git a/core/services/keystore/keys/ocrkey/off_chain_private_key.go b/core/services/keystore/keys/ocrkey/off_chain_private_key.go new file mode 100644 index 00000000..3addcf94 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/off_chain_private_key.go @@ -0,0 +1,22 @@ +package ocrkey + +import ( + "crypto/ed25519" + + "github.com/pkg/errors" +) + +type offChainPrivateKey ed25519.PrivateKey + +// Sign returns the signature on msgHash with k +func (k *offChainPrivateKey) Sign(msg []byte) ([]byte, error) { + if k == nil { + return nil, errors.Errorf("attempt to sign with nil key") + } + return ed25519.Sign(ed25519.PrivateKey(*k), msg), nil +} + +// PublicKey returns the public key which commits to k +func (k *offChainPrivateKey) PublicKey() OffChainPublicKey { + return OffChainPublicKey(ed25519.PrivateKey(*k).Public().(ed25519.PublicKey)) +} diff --git a/core/services/keystore/keys/ocrkey/off_chain_public_key.go b/core/services/keystore/keys/ocrkey/off_chain_public_key.go new file mode 100644 index 00000000..cb499412 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/off_chain_public_key.go @@ -0,0 +1,67 @@ +package ocrkey + +import ( + "crypto/ed25519" + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/common" +) + +const offChainPublicKeyPrefix = "ocroff_" + +type OffChainPublicKey ed25519.PublicKey + +func (ocpk OffChainPublicKey) String() string { + return fmt.Sprintf("%s%s", offChainPublicKeyPrefix, ocpk.Raw()) +} + +func (ocpk OffChainPublicKey) Raw() string { + return hex.EncodeToString(ocpk) +} + +func (ocpk OffChainPublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(ocpk.String()) +} + +func (ocpk *OffChainPublicKey) UnmarshalJSON(input []byte) error { + var hexString string + if err := json.Unmarshal(input, &hexString); err != nil { + return err + } + return ocpk.UnmarshalText([]byte(hexString)) +} + +func (ocpk *OffChainPublicKey) UnmarshalText(bs []byte) error { + input := string(bs) + if strings.HasPrefix(input, offChainPublicKeyPrefix) { + input = string(bs[len(offChainPublicKeyPrefix):]) + } + + result, err := hex.DecodeString(input) + if err != nil { + return err + } + copy(result[:], result[:common.AddressLength]) + *ocpk = result + return nil +} + +func (ocpk *OffChainPublicKey) Scan(value interface{}) error { + switch v := value.(type) { + case []byte: + *ocpk = v + return nil + default: + return errors.Errorf("invalid public key bytes got %T wanted []byte", v) + } +} + +func (ocpk OffChainPublicKey) Value() (driver.Value, error) { + return []byte(ocpk), nil +} diff --git a/core/services/keystore/keys/ocrkey/off_chain_public_key_test.go b/core/services/keystore/keys/ocrkey/off_chain_public_key_test.go new file mode 100644 index 00000000..16089e89 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/off_chain_public_key_test.go @@ -0,0 +1,48 @@ +package ocrkey_test + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +func TestOCR_OffchainPublicKey_MarshalJSON(t *testing.T) { + t.Parallel() + rawBytes := make([]byte, 32) + rawBytes[31] = 1 + pubKey := ocrkey.OffChainPublicKey(rawBytes) + + pubKeyString := "ocroff_0000000000000000000000000000000000000000000000000000000000000001" + pubKeyJSON := fmt.Sprintf(`"%s"`, pubKeyString) + + result, err := json.Marshal(pubKey) + assert.NoError(t, err) + assert.Equal(t, pubKeyJSON, string(result)) +} + +func TestOCR_OffchainPublicKey_UnmarshalJSON_Happy(t *testing.T) { + t.Parallel() + + pubKeyString := "918a65a518c005d6367309bec4b26805f8afabef72cbf9940d9a0fd04ec80b38" + pubKeyJSON := fmt.Sprintf(`"%s"`, pubKeyString) + pubKey := ocrkey.OffChainPublicKey{} + + err := json.Unmarshal([]byte(pubKeyJSON), &pubKey) + assert.NoError(t, err) + assert.Equal(t, pubKeyString, pubKey.Raw()) +} + +func TestOCR_OffchainPublicKey_UnmarshalJSON_Error(t *testing.T) { + t.Parallel() + + pubKeyString := "hello world" + pubKeyJSON := fmt.Sprintf(`"%s"`, pubKeyString) + pubKey := ocrkey.OffChainPublicKey{} + + err := json.Unmarshal([]byte(pubKeyJSON), &pubKey) + assert.Error(t, err) +} diff --git a/core/services/keystore/keys/ocrkey/off_chan_private_key_test.go b/core/services/keystore/keys/ocrkey/off_chan_private_key_test.go new file mode 100644 index 00000000..0c0f6a96 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/off_chan_private_key_test.go @@ -0,0 +1,20 @@ +package ocrkey + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCRKeys_OffChainPrivateKey(t *testing.T) { + t.Parallel() + + k, err := New() + require.NoError(t, err) + + sig, err := k.offChainSigning.Sign([]byte("hello world")) + + assert.NoError(t, err) + assert.NotEmpty(t, sig) +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_private_key.go b/core/services/keystore/keys/ocrkey/on_chain_private_key.go new file mode 100644 index 00000000..94a82cbf --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_private_key.go @@ -0,0 +1,23 @@ +package ocrkey + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/crypto" +) + +type onChainPrivateKey ecdsa.PrivateKey + +// Sign returns the signature on msgHash with k +func (k *onChainPrivateKey) Sign(msg []byte) (signature []byte, err error) { + sig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k)) + return sig, err +} + +func (k onChainPrivateKey) Address() OnChainSigningAddress { + return OnChainSigningAddress(crypto.PubkeyToAddress(k.PublicKey)) +} + +func onChainHash(msg []byte) []byte { + return crypto.Keccak256(msg) +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_private_key_test.go b/core/services/keystore/keys/ocrkey/on_chain_private_key_test.go new file mode 100644 index 00000000..0a005b26 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_private_key_test.go @@ -0,0 +1,25 @@ +package ocrkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCRKeys_OnChainPrivateKey(t *testing.T) { + t.Parallel() + + pk, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + k := onChainPrivateKey(*pk) + + sig, err := k.Sign([]byte("hello world")) + + assert.NoError(t, err) + assert.NotEmpty(t, sig) +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_public_key.go b/core/services/keystore/keys/ocrkey/on_chain_public_key.go new file mode 100644 index 00000000..90f54e39 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_public_key.go @@ -0,0 +1,13 @@ +package ocrkey + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/crypto" +) + +type OnChainPublicKey ecdsa.PublicKey + +func (k OnChainPublicKey) Address() OnChainSigningAddress { + return OnChainSigningAddress(crypto.PubkeyToAddress(ecdsa.PublicKey(k))) +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_public_key_test.go b/core/services/keystore/keys/ocrkey/on_chain_public_key_test.go new file mode 100644 index 00000000..2828fd74 --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_public_key_test.go @@ -0,0 +1,22 @@ +package ocrkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCRKeys_OnChainPublicKey(t *testing.T) { + t.Parallel() + + pk, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + publicKey := OnChainPublicKey(pk.PublicKey) + + assert.NotEmpty(t, publicKey.Address()) +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_signing_address.go b/core/services/keystore/keys/ocrkey/on_chain_signing_address.go new file mode 100644 index 00000000..6f47b8ec --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_signing_address.go @@ -0,0 +1,69 @@ +package ocrkey + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" +) + +const onChainSigningAddressPrefix = "ocrsad_" + +type OnChainSigningAddress ocrtypes.OnChainSigningAddress + +func (ocsa OnChainSigningAddress) String() string { + address := common.BytesToAddress(ocsa[:]) + return fmt.Sprintf("%s%s", onChainSigningAddressPrefix, address) +} + +func (ocsa OnChainSigningAddress) MarshalJSON() ([]byte, error) { + return json.Marshal(ocsa.String()) +} + +func (ocsa *OnChainSigningAddress) UnmarshalJSON(input []byte) error { + var hexString string + if err := json.Unmarshal(input, &hexString); err != nil { + return err + } + return ocsa.UnmarshalText([]byte(hexString)) +} + +func (ocsa *OnChainSigningAddress) UnmarshalText(bs []byte) error { + input := string(bs) + if strings.HasPrefix(input, onChainSigningAddressPrefix) { + input = string(bs[len(onChainSigningAddressPrefix):]) + } + + result, err := hexutil.Decode(input) + if err != nil { + return err + } + + var onChainSigningAddress common.Address + copy(onChainSigningAddress[:], result[:common.AddressLength]) + *ocsa = OnChainSigningAddress(onChainSigningAddress) + return nil +} + +func (ocsa OnChainSigningAddress) Value() (driver.Value, error) { + byteArray := [common.AddressLength]byte(ocsa) + return byteArray[:], nil +} + +func (ocsa *OnChainSigningAddress) Scan(value interface{}) error { + switch typed := value.(type) { + case []byte: + if len(typed) != common.AddressLength { + return errors.New("wrong number of bytes to scan into address") + } + copy(ocsa[:], typed) + return nil + default: + return errors.Errorf(`unable to convert %v of %T to OnChainSigningAddress`, value, value) + } +} diff --git a/core/services/keystore/keys/ocrkey/on_chain_signing_address_test.go b/core/services/keystore/keys/ocrkey/on_chain_signing_address_test.go new file mode 100644 index 00000000..ad8b79eb --- /dev/null +++ b/core/services/keystore/keys/ocrkey/on_chain_signing_address_test.go @@ -0,0 +1,25 @@ +package ocrkey_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +func TestOCR_OnChainSigningAddress_String(t *testing.T) { + t.Parallel() + + // should contain EIP55CapitalizedAddress + const ocrSigningKey = "ocrsad_0x30762A700F7d836528dfB14DD60Ec2A3aEaA7694" + var address ocrkey.OnChainSigningAddress + + err := address.UnmarshalText([]byte(ocrSigningKey)) + require.NoError(t, err) + _, err = address.Value() + + assert.NoError(t, err) + assert.Equal(t, ocrSigningKey, address.String()) +} diff --git a/core/services/keystore/keys/p2pkey/export.go b/core/services/keystore/keys/p2pkey/export.go new file mode 100644 index 00000000..6e7a808b --- /dev/null +++ b/core/services/keystore/keys/p2pkey/export.go @@ -0,0 +1,56 @@ +package p2pkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "P2P" + +func FromEncryptedJSON(keyJSON []byte, password string) (KeyV2, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ EncryptedP2PKeyExport, rawPrivKey []byte) (KeyV2, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +type EncryptedP2PKeyExport struct { + KeyType string `json:"keyType"` + PublicKey string `json:"publicKey"` + PeerID PeerID `json:"peerID"` + Crypto keystore.CryptoJSON `json:"crypto"` +} + +func (x EncryptedP2PKeyExport) GetCrypto() keystore.CryptoJSON { + return x.Crypto +} + +func (key KeyV2) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key KeyV2, cryptoJSON keystore.CryptoJSON) EncryptedP2PKeyExport { + return EncryptedP2PKeyExport{ + KeyType: id, + PublicKey: key.PublicKeyHex(), + PeerID: key.PeerID(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "p2pkey" + password +} diff --git a/core/services/keystore/keys/p2pkey/export_test.go b/core/services/keystore/keys/p2pkey/export_test.go new file mode 100644 index 00000000..b16b0b79 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/export_test.go @@ -0,0 +1,19 @@ +package p2pkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestP2PKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return NewV2() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/p2pkey/key.go b/core/services/keystore/keys/p2pkey/key.go new file mode 100644 index 00000000..74214396 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/key.go @@ -0,0 +1,125 @@ +package p2pkey + +import ( + "crypto/ed25519" + "database/sql/driver" + "encoding/hex" + "encoding/json" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/pkg/errors" + + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" +) + +// Key represents a p2p private key +type Key struct { + PrivKey ed25519.PrivateKey +} + +func (k Key) ToV2() KeyV2 { + return KeyV2{ + PrivKey: k.PrivKey, + peerID: k.PeerID(), + } +} + +// PublicKeyBytes is a [ed25519.PublicKey] +type PublicKeyBytes []byte + +func (pkb PublicKeyBytes) String() string { + return hex.EncodeToString(pkb) +} + +func (pkb PublicKeyBytes) MarshalJSON() ([]byte, error) { + return json.Marshal(hex.EncodeToString(pkb)) +} + +func (pkb *PublicKeyBytes) UnmarshalJSON(input []byte) error { + var hexString string + if err := json.Unmarshal(input, &hexString); err != nil { + return err + } + + result, err := hex.DecodeString(hexString) + if err != nil { + return err + } + + *pkb = result + return nil +} + +func (pkb *PublicKeyBytes) Scan(value interface{}) error { + switch v := value.(type) { + case []byte: + *pkb = v + return nil + default: + return errors.Errorf("invalid public key bytes got %T wanted []byte", v) + } +} + +func (pkb PublicKeyBytes) Value() (driver.Value, error) { + return []byte(pkb), nil +} + +func (k Key) GetPeerID() (PeerID, error) { + peerID, err := ragep2ptypes.PeerIDFromPrivateKey(k.PrivKey) + if err != nil { + return PeerID{}, errors.WithStack(err) + } + return PeerID(peerID), err +} + +func (k Key) PeerID() PeerID { + peerID, err := k.GetPeerID() + if err != nil { + panic(err) + } + return peerID +} + +type EncryptedP2PKey struct { + ID int32 + PeerID PeerID + PubKey PublicKeyBytes + EncryptedPrivKey []byte + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} + +func (ep2pk *EncryptedP2PKey) SetID(value string) error { + result, err := strconv.ParseInt(value, 10, 32) + + if err != nil { + return err + } + + ep2pk.ID = int32(result) + return nil +} + +// Decrypt returns the PrivateKey in e, decrypted via auth, or an error +func (ep2pk EncryptedP2PKey) Decrypt(auth string) (k Key, err error) { + var cryptoJSON keystore.CryptoJSON + err = json.Unmarshal(ep2pk.EncryptedPrivKey, &cryptoJSON) + if err != nil { + return k, errors.Wrapf(err, "invalid JSON for P2P key %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) + } + marshalledPrivK, err := keystore.DecryptDataV3(cryptoJSON, adulteratedPassword(auth)) + if err != nil { + return k, errors.Wrapf(err, "could not decrypt P2P key %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) + } + + privK, err := UnmarshalPrivateKey(marshalledPrivK) + if err != nil { + return k, errors.Wrapf(err, "could not unmarshal P2P private key for %s (0x%x)", ep2pk.PeerID.String(), ep2pk.PubKey) + } + return Key{ + privK, + }, nil +} diff --git a/core/services/keystore/keys/p2pkey/key_test.go b/core/services/keystore/keys/p2pkey/key_test.go new file mode 100644 index 00000000..924f9d96 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/key_test.go @@ -0,0 +1,109 @@ +package p2pkey + +import ( + "crypto/ed25519" + "crypto/rand" + "encoding/hex" + "encoding/json" + "testing" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestP2PKeys_KeyStruct(t *testing.T) { + _, pk, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + k := Key{PrivKey: pk} + + t.Run("converts into V2 key", func(t *testing.T) { + k2 := k.ToV2() + + assert.Equal(t, k.PrivKey, k2.PrivKey) + assert.Equal(t, k.PeerID(), k2.peerID) + }) + + t.Run("returns PeerID", func(t *testing.T) { + pid, err := k.GetPeerID() + require.NoError(t, err) + pid2 := k.PeerID() + + assert.Equal(t, pid, pid2) + }) +} + +func TestP2PKeys_PublicKeyBytes(t *testing.T) { + pk, _, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + pkb := PublicKeyBytes(pk) + assert.Equal(t, hex.EncodeToString(pkb), pkb.String()) + + b, err := pkb.MarshalJSON() + require.NoError(t, err) + assert.NotEmpty(t, b) + + err = pkb.UnmarshalJSON(b) + assert.NoError(t, err) + + err = pkb.UnmarshalJSON([]byte("")) + assert.Error(t, err) + + err = pkb.Scan([]byte(pk)) + assert.NoError(t, err) + + err = pkb.Scan("invalid-type") + assert.Error(t, err) + + sv, err := pkb.Value() + assert.NoError(t, err) + assert.NotEmpty(t, sv) +} + +func TestP2PKeys_EncryptedP2PKey(t *testing.T) { + _, privk, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + k := Key{PrivKey: privk} + + pubkr := k.PrivKey.Public().(ed25519.PublicKey) + + var marshalledPrivK []byte + marshalledPrivK, err = MarshalPrivateKey(k.PrivKey) + require.NoError(t, err) + cryptoJSON, err := keystore.EncryptDataV3(marshalledPrivK, []byte(adulteratedPassword("password")), utils.FastScryptParams.N, utils.FastScryptParams.P) + require.NoError(t, err) + encryptedPrivKey, err := json.Marshal(&cryptoJSON) + require.NoError(t, err) + + p2pk := EncryptedP2PKey{ + ID: 1, + PeerID: k.PeerID(), + PubKey: []byte(pubkr), + EncryptedPrivKey: encryptedPrivKey, + } + + t.Run("sets a different ID", func(t *testing.T) { + err := p2pk.SetID("12") + require.NoError(t, err) + + assert.Equal(t, int32(12), p2pk.ID) + + err = p2pk.SetID("invalid") + assert.Error(t, err) + }) + + t.Run("decrypts key", func(t *testing.T) { + k, err := p2pk.Decrypt("invalid-pass") + assert.Empty(t, k) + assert.Error(t, err) + + k, err = p2pk.Decrypt("password") + require.NoError(t, err) + assert.NotEmpty(t, k) + }) +} diff --git a/core/services/keystore/keys/p2pkey/key_v2.go b/core/services/keystore/keys/p2pkey/key_v2.go new file mode 100644 index 00000000..7e92f1d9 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/key_v2.go @@ -0,0 +1,115 @@ +package p2pkey + +import ( + "bytes" + "crypto/ed25519" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "math/big" + + "github.com/goplugin/libocr/ragep2p/types" +) + +var libp2pPBPrefix = []byte{0x08, 0x01, 0x12, 0x40} + +// Raw is an encoded protocol buffer. +type Raw []byte + +func (raw Raw) Key() KeyV2 { + privKey, err := UnmarshalPrivateKey(raw) + if err != nil { + panic(err) + } + key, err := fromPrivkey(privKey) + if err != nil { + panic(err) + } + return key +} + +func UnmarshalPrivateKey(raw Raw) (ed25519.PrivateKey, error) { + if !bytes.HasPrefix(raw, libp2pPBPrefix) { + return nil, errors.New("invalid key: missing libp2p protobuf prefix") + } + return ed25519.PrivateKey(raw[len(libp2pPBPrefix):]), nil +} + +func MarshalPrivateKey(key ed25519.PrivateKey) ([]byte, error) { + return bytes.Join([][]byte{libp2pPBPrefix, key}, nil), nil +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &KeyV2{} + +type KeyV2 struct { + PrivKey ed25519.PrivateKey + peerID PeerID +} + +func NewV2() (KeyV2, error) { + _, privKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return KeyV2{}, err + } + return fromPrivkey(privKey) +} + +func MustNewV2XXXTestingOnly(k *big.Int) KeyV2 { + seed := make([]byte, ed25519.SeedSize) + copy(seed, k.Bytes()) + pk := ed25519.NewKeyFromSeed(seed[:]) + key, err := fromPrivkey(pk) + if err != nil { + panic(err) + } + return key +} + +func (key KeyV2) ID() string { + return types.PeerID(key.peerID).String() +} + +func (key KeyV2) Raw() Raw { + marshalledPrivK, err := MarshalPrivateKey(key.PrivKey) + if err != nil { + panic(err) + } + return marshalledPrivK +} + +func (key KeyV2) PeerID() PeerID { + return key.peerID +} + +func (key KeyV2) PublicKeyHex() string { + pubKeyBytes := key.PrivKey.Public().(ed25519.PublicKey) + return hex.EncodeToString(pubKeyBytes) +} + +func (key KeyV2) String() string { + return fmt.Sprintf("P2PKeyV2{PrivateKey: , PeerID: %s}", key.peerID.Raw()) +} + +func (key KeyV2) GoString() string { + return key.String() +} + +func fromPrivkey(privKey ed25519.PrivateKey) (KeyV2, error) { + peerID, err := types.PeerIDFromPrivateKey(privKey) + if err != nil { + return KeyV2{}, err + } + return KeyV2{ + PrivKey: privKey, + peerID: PeerID(peerID), + }, nil +} diff --git a/core/services/keystore/keys/p2pkey/key_v2_test.go b/core/services/keystore/keys/p2pkey/key_v2_test.go new file mode 100644 index 00000000..3e21355e --- /dev/null +++ b/core/services/keystore/keys/p2pkey/key_v2_test.go @@ -0,0 +1,36 @@ +package p2pkey + +import ( + "crypto/ed25519" + "crypto/rand" + "encoding/hex" + "testing" + + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PKeys_Raw(t *testing.T) { + _, pk, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + r := Raw(pk) + + assert.Equal(t, r.String(), r.GoString()) + assert.Equal(t, "", r.String()) +} + +func TestP2PKeys_KeyV2(t *testing.T) { + _, pk, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + + k := Key{PrivKey: pk} + kv2 := k.ToV2() + + pkv2 := kv2.PrivKey.Public().(ed25519.PublicKey) + + assert.Equal(t, kv2.String(), kv2.GoString()) + assert.Equal(t, ragep2ptypes.PeerID(k.PeerID()).String(), kv2.ID()) + assert.Equal(t, hex.EncodeToString(pkv2), kv2.PublicKeyHex()) +} diff --git a/core/services/keystore/keys/p2pkey/peer_id.go b/core/services/keystore/keys/p2pkey/peer_id.go new file mode 100644 index 00000000..5fae4611 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/peer_id.go @@ -0,0 +1,96 @@ +package p2pkey + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/goplugin/libocr/ragep2p/types" +) + +const peerIDPrefix = "p2p_" + +type PeerID types.PeerID + +func MakePeerID(s string) (PeerID, error) { + var peerID PeerID + return peerID, peerID.UnmarshalString(s) +} + +func (p PeerID) String() string { + // Handle a zero peerID more gracefully, i.e. print it as empty string rather + // than `p2p_` + if p == (PeerID{}) { + return "" + } + return fmt.Sprintf("%s%s", peerIDPrefix, p.Raw()) +} + +func (p PeerID) Raw() string { + return types.PeerID(p).String() +} + +func (p *PeerID) UnmarshalString(s string) error { + return p.UnmarshalText([]byte(s)) +} + +func (p *PeerID) MarshalText() ([]byte, error) { + if *p == (PeerID{}) { + return nil, nil + } + return []byte(p.Raw()), nil +} + +func (p *PeerID) UnmarshalText(bs []byte) error { + input := string(bs) + if strings.HasPrefix(input, peerIDPrefix) { + input = string(bs[len(peerIDPrefix):]) + } + + if input == "" { + return nil + } + + var peerID types.PeerID + err := peerID.UnmarshalText([]byte(input)) + if err != nil { + return errors.Wrapf(err, `PeerID#UnmarshalText("%v")`, input) + } + *p = PeerID(peerID) + return nil +} + +func (p *PeerID) Scan(value interface{}) error { + *p = PeerID{} + switch s := value.(type) { + case string: + if s != "" { + return p.UnmarshalText([]byte(s)) + } + case nil: + default: + return errors.Errorf("PeerID#Scan got %T, expected string", value) + } + return nil +} + +func (p PeerID) Value() (driver.Value, error) { + b, err := types.PeerID(p).MarshalText() + return string(b), err +} + +func (p PeerID) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} + +func (p *PeerID) UnmarshalJSON(input []byte) error { + var result string + if err := json.Unmarshal(input, &result); err != nil { + return err + } + + return p.UnmarshalText([]byte(result)) +} diff --git a/core/services/keystore/keys/p2pkey/peer_id_test.go b/core/services/keystore/keys/p2pkey/peer_id_test.go new file mode 100644 index 00000000..c648fec8 --- /dev/null +++ b/core/services/keystore/keys/p2pkey/peer_id_test.go @@ -0,0 +1,72 @@ +package p2pkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PKeys_PeerID(t *testing.T) { + t.Run("make peer ID", func(t *testing.T) { + id, err := MakePeerID("12D3KooWM1111111111111111111111111111111111111111111") + require.NoError(t, err) + _, err = MakePeerID("invalid") + assert.Error(t, err) + + assert.Equal(t, "p2p_12D3KooWM1111111111111111111111111111111111111111111", id.String()) + }) + + t.Run("unmarshals new ID", func(t *testing.T) { + id, err := MakePeerID("12D3KooWM1111111111111111111111111111111111111111111") + require.NoError(t, err) + fakeKey := MustNewV2XXXTestingOnly(big.NewInt(1)) + + err = id.UnmarshalString(fakeKey.ID()) + require.NoError(t, err) + + assert.Equal(t, "p2p_"+fakeKey.ID(), id.String()) + }) + + t.Run("scans new ID", func(t *testing.T) { + id, err := MakePeerID("12D3KooWM1111111111111111111111111111111111111111111") + require.NoError(t, err) + fakeKey := MustNewV2XXXTestingOnly(big.NewInt(1)) + + err = id.Scan(fakeKey.ID()) + require.NoError(t, err) + + assert.Equal(t, "p2p_"+fakeKey.ID(), id.String()) + + err = id.Scan(12) + assert.Error(t, err) + assert.Equal(t, "", id.String()) + }) +} + +func TestPeerID_marshal(t *testing.T) { + id, err := MakePeerID("p2p_12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw") + require.NoError(t, err) + t.Run("json", func(t *testing.T) { + b, err := id.MarshalJSON() + require.NoError(t, err) + var got PeerID + require.NoError(t, got.UnmarshalJSON(b)) + require.Equal(t, id, got) + }) + t.Run("db", func(t *testing.T) { + v, err := id.Value() + require.NoError(t, err) + var got PeerID + require.NoError(t, got.Scan(v)) + require.Equal(t, id, got) + }) + t.Run("text", func(t *testing.T) { + s, err := id.MarshalText() + require.NoError(t, err) + var got PeerID + require.NoError(t, got.UnmarshalText(s)) + require.Equal(t, id, got) + }) +} diff --git a/core/services/keystore/keys/solkey/export.go b/core/services/keystore/keys/solkey/export.go new file mode 100644 index 00000000..30b66352 --- /dev/null +++ b/core/services/keystore/keys/solkey/export.go @@ -0,0 +1,48 @@ +package solkey + +import ( + "encoding/hex" + + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "Solana" + +// FromEncryptedJSON gets key from json and password +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +// ToEncryptedJSON returns encrypted JSON representing key +func (key Key) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: hex.EncodeToString(key.pubKey), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "solkey" + password +} diff --git a/core/services/keystore/keys/solkey/export_test.go b/core/services/keystore/keys/solkey/export_test.go new file mode 100644 index 00000000..5b8f5edc --- /dev/null +++ b/core/services/keystore/keys/solkey/export_test.go @@ -0,0 +1,19 @@ +package solkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestSolanaKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return New() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/solkey/key.go b/core/services/keystore/keys/solkey/key.go new file mode 100644 index 00000000..bcec5e89 --- /dev/null +++ b/core/services/keystore/keys/solkey/key.go @@ -0,0 +1,110 @@ +package solkey + +import ( + "crypto" + "crypto/ed25519" + crypto_rand "crypto/rand" + "fmt" + "io" + + "github.com/gagliardetto/solana-go" + "github.com/mr-tron/base58" +) + +// Raw represents the ETH private key +type Raw []byte + +// Key gets the Key +func (raw Raw) Key() Key { + privKey := ed25519.NewKeyFromSeed(raw) + pubKey := make([]byte, ed25519.PublicKeySize) + copy(pubKey, privKey[ed25519.PublicKeySize:]) + return Key{ + privkey: privKey, + pubKey: pubKey, + } +} + +// String returns description +func (raw Raw) String() string { + return "" +} + +// GoString wraps String() +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &Key{} + +// Key represents Solana key +type Key struct { + privkey ed25519.PrivateKey + pubKey ed25519.PublicKey +} + +// New creates new Key +func New() (Key, error) { + return newFrom(crypto_rand.Reader) +} + +// MustNewInsecure return Key if no error +func MustNewInsecure(reader io.Reader) Key { + key, err := newFrom(reader) + if err != nil { + panic(err) + } + return key +} + +func newFrom(reader io.Reader) (Key, error) { + pub, priv, err := ed25519.GenerateKey(reader) + if err != nil { + return Key{}, err + } + return Key{ + privkey: priv, + pubKey: pub, + }, nil +} + +// ID gets Key ID +func (key Key) ID() string { + return key.PublicKeyStr() +} + +// GetPublic get Key's public key +func (key Key) GetPublic() ed25519.PublicKey { + return key.pubKey +} + +// PublicKeyStr return base58 encoded public key +func (key Key) PublicKeyStr() string { + return base58.Encode(key.pubKey) +} + +// Raw from private key +func (key Key) Raw() Raw { + return key.privkey.Seed() +} + +// String is the print-friendly format of the Key +func (key Key) String() string { + return fmt.Sprintf("SolanaKey{PrivateKey: , Public Key: %s}", key.PublicKeyStr()) +} + +// GoString wraps String() +func (key Key) GoString() string { + return key.String() +} + +// Sign is used to sign a message +func (key Key) Sign(msg []byte) ([]byte, error) { + return key.privkey.Sign(crypto_rand.Reader, msg, crypto.Hash(0)) +} + +// PublicKey copies public key slice +func (key Key) PublicKey() (pubKey solana.PublicKey) { + copy(pubKey[:], key.pubKey) + return +} diff --git a/core/services/keystore/keys/starkkey/export.go b/core/services/keystore/keys/starkkey/export.go new file mode 100644 index 00000000..c0162726 --- /dev/null +++ b/core/services/keystore/keys/starkkey/export.go @@ -0,0 +1,46 @@ +package starkkey + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const keyTypeIdentifier = "StarkNet" + +// FromEncryptedJSON gets key from json and password +func FromEncryptedJSON(keyJSON []byte, password string) (Key, error) { + return keys.FromEncryptedJSON( + keyTypeIdentifier, + keyJSON, + password, + adulteratedPassword, + func(_ keys.EncryptedKeyExport, rawPrivKey []byte) (Key, error) { + return Raw(rawPrivKey).Key(), nil + }, + ) +} + +// ToEncryptedJSON returns encrypted JSON representing key +func ToEncryptedJSON(key Key, password string, scryptParams utils.ScryptParams) (export []byte, err error) { + return keys.ToEncryptedJSON( + keyTypeIdentifier, + key.Raw(), + key, + password, + scryptParams, + adulteratedPassword, + func(id string, key Key, cryptoJSON keystore.CryptoJSON) keys.EncryptedKeyExport { + return keys.EncryptedKeyExport{ + KeyType: id, + PublicKey: key.StarkKeyStr(), + Crypto: cryptoJSON, + } + }, + ) +} + +func adulteratedPassword(password string) string { + return "starkkey" + password +} diff --git a/core/services/keystore/keys/starkkey/export_test.go b/core/services/keystore/keys/starkkey/export_test.go new file mode 100644 index 00000000..32f2f241 --- /dev/null +++ b/core/services/keystore/keys/starkkey/export_test.go @@ -0,0 +1,31 @@ +package starkkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestStarkNetKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + key, err := New() + return TestWrapped{key}, err +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + key, err := FromEncryptedJSON(keyJSON, password) + return TestWrapped{key}, err +} + +// wrap key to conform to desired test interface +type TestWrapped struct { + Key +} + +func (w TestWrapped) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) ([]byte, error) { + return ToEncryptedJSON(w.Key, password, scryptParams) +} diff --git a/core/services/keystore/keys/starkkey/key.go b/core/services/keystore/keys/starkkey/key.go new file mode 100644 index 00000000..28b23138 --- /dev/null +++ b/core/services/keystore/keys/starkkey/key.go @@ -0,0 +1,104 @@ +package starkkey + +import ( + crypto_rand "crypto/rand" + "fmt" + "io" + "math/big" + + "github.com/goplugin/caigo" + caigotypes "github.com/goplugin/caigo/types" +) + +// Raw represents the Stark private key +type Raw []byte + +// Key gets the Key +func (raw Raw) Key() Key { + k := Key{} + var err error + + k.priv = new(big.Int).SetBytes(raw) + k.pub.X, k.pub.Y, err = caigo.Curve.PrivateToPoint(k.priv) + if err != nil { + panic(err) // key not generated + } + return k +} + +// String returns description +func (raw Raw) String() string { + return "" +} + +// GoString wraps String() +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &Key{} + +type PublicKey struct { + X, Y *big.Int +} + +// Key represents Starknet key +type Key struct { + priv *big.Int + pub PublicKey +} + +// New creates new Key +func New() (Key, error) { + return newFrom(crypto_rand.Reader) +} + +// MustNewInsecure return Key if no error +func MustNewInsecure(reader io.Reader) Key { + key, err := newFrom(reader) + if err != nil { + panic(err) + } + return key +} + +func newFrom(reader io.Reader) (Key, error) { + return GenerateKey(reader) +} + +// ID gets Key ID +func (key Key) ID() string { + return key.StarkKeyStr() +} + +// StarkKeyStr is the starknet public key associated to the private key +// it is the X component of the ECDSA pubkey and used in the deployment of the account contract +// this func is used in exporting it via CLI and API +func (key Key) StarkKeyStr() string { + return caigotypes.BigToFelt(key.pub.X).String() +} + +// Raw from private key +func (key Key) Raw() Raw { + return key.priv.Bytes() +} + +// String is the print-friendly format of the Key +func (key Key) String() string { + return fmt.Sprintf("StarknetKey{PrivateKey: , StarkKey: %s}", key.StarkKeyStr()) +} + +// GoString wraps String() +func (key Key) GoString() string { + return key.String() +} + +// ToPrivKey returns the key usable for signing. +func (key Key) ToPrivKey() *big.Int { + return key.priv +} + +// PublicKey copies public key object +func (key Key) PublicKey() PublicKey { + return key.pub +} diff --git a/core/services/keystore/keys/starkkey/ocr2key.go b/core/services/keystore/keys/starkkey/ocr2key.go new file mode 100644 index 00000000..c87a6b93 --- /dev/null +++ b/core/services/keystore/keys/starkkey/ocr2key.go @@ -0,0 +1,182 @@ +package starkkey + +import ( + "bytes" + "io" + "math/big" + + "github.com/pkg/errors" + + "github.com/goplugin/caigo" + caigotypes "github.com/goplugin/caigo/types" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ types.OnchainKeyring = &OCR2Key{} + +type OCR2Key struct { + Key +} + +func NewOCR2Key(material io.Reader) (*OCR2Key, error) { + k, err := GenerateKey(material) + + return &OCR2Key{k}, err +} + +func (sk *OCR2Key) PublicKey() types.OnchainPublicKey { + return caigotypes.BigToFelt(sk.pub.X).Bytes() +} + +func ReportToSigData(reportCtx types.ReportContext, report types.Report) (*big.Int, error) { + var dataArray []*big.Int + + rawReportContext := rawReportContext(reportCtx) + dataArray = append(dataArray, new(big.Int).SetBytes(rawReportContext[0][:])) + dataArray = append(dataArray, new(big.Int).SetBytes(rawReportContext[1][:])) + dataArray = append(dataArray, new(big.Int).SetBytes(rawReportContext[2][:])) + + // split report into separate felts for hashing + splitReport, err := splitReport(report) + if err != nil { + return &big.Int{}, err + } + for i := 0; i < len(splitReport); i++ { + dataArray = append(dataArray, new(big.Int).SetBytes(splitReport[i])) + } + + hash, err := caigo.Curve.ComputeHashOnElements(dataArray) + if err != nil { + return &big.Int{}, err + } + return hash, nil +} + +func (sk *OCR2Key) Sign(reportCtx types.ReportContext, report types.Report) ([]byte, error) { + hash, err := ReportToSigData(reportCtx, report) + if err != nil { + return []byte{}, err + } + r, s, err := caigo.Curve.Sign(hash, sk.priv) + if err != nil { + return []byte{}, err + } + + // enforce s <= N/2 to prevent signature malleability + if s.Cmp(new(big.Int).Rsh(caigo.Curve.N, 1)) > 0 { + s.Sub(caigo.Curve.N, s) + } + + // encoding: public key (32 bytes) + r (32 bytes) + s (32 bytes) + buff := bytes.NewBuffer([]byte(sk.PublicKey())) + if _, err := buff.Write(padBytes(r.Bytes(), byteLen)); err != nil { + return []byte{}, err + } + if _, err := buff.Write(padBytes(s.Bytes(), byteLen)); err != nil { + return []byte{}, err + } + + out := buff.Bytes() + if len(out) != sk.MaxSignatureLength() { + return []byte{}, errors.Errorf("unexpected signature size, got %d want %d", len(out), sk.MaxSignatureLength()) + } + return out, nil +} + +func (sk *OCR2Key) Sign3(digest types.ConfigDigest, seqNr uint64, r types.Report) (signature []byte, err error) { + return nil, errors.New("not implemented") +} + +func (sk *OCR2Key) Verify(publicKey types.OnchainPublicKey, reportCtx types.ReportContext, report types.Report, signature []byte) bool { + // check valid signature length + if len(signature) != sk.MaxSignatureLength() { + return false + } + + // convert OnchainPublicKey (starkkey) into ecdsa public keys (prepend 2 or 3 to indicate +/- Y coord) + var keys [2]PublicKey + keys[0].X = new(big.Int).SetBytes(publicKey) + keys[0].Y = caigo.Curve.GetYCoordinate(keys[0].X) + + // When there is no point with the provided x-coordinate, the GetYCoordinate function returns the nil value. + if keys[0].Y == nil { + return false + } + + keys[1].X = keys[0].X + keys[1].Y = new(big.Int).Mul(keys[0].Y, big.NewInt(-1)) + + hash, err := ReportToSigData(reportCtx, report) + if err != nil { + return false + } + + r := new(big.Int).SetBytes(signature[32:64]) + s := new(big.Int).SetBytes(signature[64:]) + + // Only allow canonical signatures to avoid signature malleability. Verify s <= N/2 + if s.Cmp(new(big.Int).Rsh(caigo.Curve.N, 1)) == 1 { + return false + } + + return caigo.Curve.Verify(hash, r, s, keys[0].X, keys[0].Y) || caigo.Curve.Verify(hash, r, s, keys[1].X, keys[1].Y) +} + +func (sk *OCR2Key) Verify3(publicKey types.OnchainPublicKey, cd types.ConfigDigest, seqNr uint64, r types.Report, signature []byte) bool { + return false +} + +func (sk *OCR2Key) MaxSignatureLength() int { + return 32 + 32 + 32 // publickey + r + s +} + +func (sk *OCR2Key) Marshal() ([]byte, error) { + return padBytes(sk.priv.Bytes(), sk.privateKeyLen()), nil +} + +func (sk *OCR2Key) privateKeyLen() int { + // https://github.com/NethermindEth/juno/blob/3e71279632d82689e5af03e26693ca5c58a2376e/pkg/crypto/weierstrass/weierstrass.go#L377 + return 32 +} + +func (sk *OCR2Key) Unmarshal(in []byte) error { + // enforce byte length + if len(in) != sk.privateKeyLen() { + return errors.Errorf("unexpected seed size, got %d want %d", len(in), sk.privateKeyLen()) + } + + sk.Key = Raw(in).Key() + return nil +} + +func splitReport(report types.Report) ([][]byte, error) { + chunkSize := 32 + if len(report)%chunkSize != 0 { + return [][]byte{}, errors.New("invalid report length") + } + + // order is guaranteed by buildReport: + // observation_timestamp + // observers + // observations_len + // observations + // juels_per_fee_coin + // gas_price + slices := [][]byte{} + for i := 0; i < len(report)/chunkSize; i++ { + idx := i * chunkSize + slices = append(slices, report[idx:(idx+chunkSize)]) + } + + return slices, nil +} + +// NOTE: this should sit in the ocr2 package but that causes import cycles +func rawReportContext(repctx types.ReportContext) [3][32]byte { + rawReportContext := evmutil.RawReportContext(repctx) + // NOTE: Ensure extra_hash is 31 bytes with first byte blanked out + // libocr generates a 32 byte extraHash but we need to fit it into a felt + rawReportContext[2][0] = 0 + return rawReportContext +} diff --git a/core/services/keystore/keys/starkkey/ocr2key_test.go b/core/services/keystore/keys/starkkey/ocr2key_test.go new file mode 100644 index 00000000..c1ba9a56 --- /dev/null +++ b/core/services/keystore/keys/starkkey/ocr2key_test.go @@ -0,0 +1,177 @@ +package starkkey + +import ( + cryptorand "crypto/rand" + "encoding/hex" + "math/big" + "testing" + + caigotypes "github.com/goplugin/caigo/types" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// msg to hash +// [ +// '0x4acf99cb25a4803916f086440c661295b105a485efdc649ac4de9536da25b', // digest +// 1, // epoch_and_round +// 1, // extra_hash +// 1, // timestamp +// '0x00010203000000000000000000000000000000000000000000000000000000', // observers +// 4, // len +// 99, // reports +// 99, +// 99, +// 99, +// 1 juels_per_fee_coin +// ] +// hash 0x1332a8dabaabef63b03438ca50760cb9f5c0292cbf015b2395e50e6157df4e3 +// --> privKey 2137244795266879235401249500471353867704187908407744160927664772020405449078 r 2898571078985034687500959842265381508927681132188252715370774777831313601543 s 1930849708769648077928186998643944706551011476358007177069185543644456022504 pubKey 1118148281956858477519852250235501663092798578871088714409528077622994994907 +// privKey 3571531812827697194985986636869245829152430835021673171507607525908246940354 r 3242770073040892094735101607173275538752888766491356946211654602282309624331 s 2150742645846855766116236144967953798077492822890095121354692808525999221887 pubKey 2445157821578193538289426656074203099996547227497157254541771705133209838679 + +func TestStarknetKeyring_TestVector(t *testing.T) { + var kr1 OCR2Key + bigKey, _ := new(big.Int).SetString("2137244795266879235401249500471353867704187908407744160927664772020405449078", 10) + feltKey := caigotypes.BigToFelt(bigKey) + err := kr1.Unmarshal(feltKey.Bytes()) + require.NoError(t, err) + // kr2, err := NewOCR2Key(cryptorand.Reader) + // require.NoError(t, err) + + bytes, err := caigotypes.HexToBytes("0x004acf99cb25a4803916f086440c661295b105a485efdc649ac4de9536da25b") + require.NoError(t, err) + configDigest, err := ocrtypes.BytesToConfigDigest(bytes) + require.NoError(t, err) + + ctx := ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 0, + Round: 1, + }, + ExtraHash: [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + }, + } + + var report []byte + report = append(report, caigotypes.BigToFelt(big.NewInt(1)).Bytes()...) + report = append(report, caigotypes.StrToFelt("0x00010203000000000000000000000000000000000000000000000000000000").Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(4)).Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(99)).Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(99)).Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(99)).Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(99)).Bytes()...) + report = append(report, caigotypes.BigToFelt(big.NewInt(1)).Bytes()...) + + // check that report hash matches expected + msg, err := ReportToSigData(ctx, report) + require.NoError(t, err) + + expected, err := caigotypes.HexToBytes("0x1332a8dabaabef63b03438ca50760cb9f5c0292cbf015b2395e50e6157df4e3") + require.NoError(t, err) + assert.Equal(t, expected, msg.Bytes()) + + // check that signature matches expected + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + + pub := caigotypes.BytesToFelt(sig[0:32]) + r := caigotypes.BytesToFelt(sig[32:64]) + s := caigotypes.BytesToFelt(sig[64:]) + + bigPubExpected, _ := new(big.Int).SetString("1118148281956858477519852250235501663092798578871088714409528077622994994907", 10) + feltPubExpected := caigotypes.BigToFelt(bigPubExpected) + assert.Equal(t, feltPubExpected, pub) + + bigRExpected, _ := new(big.Int).SetString("2898571078985034687500959842265381508927681132188252715370774777831313601543", 10) + feltRExpected := caigotypes.BigToFelt(bigRExpected) + assert.Equal(t, feltRExpected, r) + + // test for malleability + otherS, _ := new(big.Int).SetString("1930849708769648077928186998643944706551011476358007177069185543644456022504", 10) + bigSExpected, _ := new(big.Int).SetString("1687653079896483135769135784451125398975732275358080312084893914240056843079", 10) + + feltSExpected := caigotypes.BigToFelt(bigSExpected) + assert.NotEqual(t, otherS, s, "signature not in canonical form") + assert.Equal(t, feltSExpected, s) +} + +func TestStarknetKeyring_Sign_Verify(t *testing.T) { + kr1, err := NewOCR2Key(cryptorand.Reader) + require.NoError(t, err) + kr2, err := NewOCR2Key(cryptorand.Reader) + require.NoError(t, err) + + digest := "00044e5d4f35325e464c87374b13c512f60e09d1236dd902f4bef4c9aedd7300" + bytes, err := hex.DecodeString(digest) + require.NoError(t, err) + configDigest, err := ocrtypes.BytesToConfigDigest(bytes) + require.NoError(t, err) + + ctx := ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 1, + Round: 1, + }, + ExtraHash: [32]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + }, + } + report := ocrtypes.Report{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97, 91, 43, 83, // observations_timestamp + 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // observers + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, // len + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73, 150, 2, 210, // observation 1 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73, 150, 2, 211, // observation 2 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 224, 182, 179, 167, 100, 0, 0, // juels per fee coin (1 with 18 decimal places) + } + + t.Run("can verify", func(t *testing.T) { + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + result := kr2.Verify(kr1.PublicKey(), ctx, report, sig) + require.True(t, result) + }) + + t.Run("invalid sig", func(t *testing.T) { + result := kr2.Verify(kr1.PublicKey(), ctx, report, []byte{0x01}) + require.False(t, result) + + longSig := [100]byte{} + result = kr2.Verify(kr1.PublicKey(), ctx, report, longSig[:]) + require.False(t, result) + }) + + t.Run("invalid pubkey", func(t *testing.T) { + sig, err := kr1.Sign(ctx, report) + require.NoError(t, err) + + pk := []byte{0x01} + result := kr2.Verify(pk, ctx, report, sig) + require.False(t, result) + + pk = big.NewInt(int64(31337)).Bytes() + result = kr2.Verify(pk, ctx, report, sig) + require.False(t, result) + }) +} + +func TestStarknetKeyring_Marshal(t *testing.T) { + kr1, err := NewOCR2Key(cryptorand.Reader) + require.NoError(t, err) + m, err := kr1.Marshal() + require.NoError(t, err) + kr2 := OCR2Key{} + err = kr2.Unmarshal(m) + require.NoError(t, err) + assert.True(t, kr1.priv.Cmp(kr2.priv) == 0) + + // Invalid seed size should error + require.Error(t, kr2.Unmarshal([]byte{0x01})) +} diff --git a/core/services/keystore/keys/starkkey/utils.go b/core/services/keystore/keys/starkkey/utils.go new file mode 100644 index 00000000..a904eea1 --- /dev/null +++ b/core/services/keystore/keys/starkkey/utils.go @@ -0,0 +1,49 @@ +package starkkey + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" + + "github.com/goplugin/caigo" +) + +// constants +var ( + byteLen = 32 +) + +// reimplements parts of https://github.com/goplugin/caigo/blob/main/utils.go#L85 +// generate the PK as a pseudo-random number in the interval [1, CurveOrder - 1] +// using io.Reader, and Key struct +func GenerateKey(material io.Reader) (k Key, err error) { + max := new(big.Int).Sub(caigo.Curve.N, big.NewInt(1)) + + k.priv, err = rand.Int(material, max) + if err != nil { + return k, err + } + + k.pub.X, k.pub.Y, err = caigo.Curve.PrivateToPoint(k.priv) + if err != nil { + return k, err + } + + if !caigo.Curve.IsOnCurve(k.pub.X, k.pub.Y) { + return k, fmt.Errorf("key gen is not on stark curve") + } + + return k, nil +} + +// pad bytes to specific length +func padBytes(a []byte, length int) []byte { + if len(a) < length { + pad := make([]byte, length-len(a)) + return append(pad, a...) + } + + // return original if length is >= to specified length + return a +} diff --git a/core/services/keystore/keys/vrfkey/benchmark_vrf_validation_test.go b/core/services/keystore/keys/vrfkey/benchmark_vrf_validation_test.go new file mode 100644 index 00000000..f6e1da0b --- /dev/null +++ b/core/services/keystore/keys/vrfkey/benchmark_vrf_validation_test.go @@ -0,0 +1,26 @@ +package vrfkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +// Run with `go test -bench BenchmarkProofValidation` +func BenchmarkProofValidation(b *testing.B) { + key, err := NewV2() + require.NoError(b, err) + var proofs []Proof + for i := 0; i < b.N; i++ { + p, err := key.GenerateProof(big.NewInt(int64(i))) + require.NoError(b, err, "failed to generate proof number %d", i) + proofs = append(proofs, p) + } + b.ResetTimer() + for i, p := range proofs { + isValid, err := p.VerifyVRFProof() + require.NoError(b, err, "failed to check proof number %d", i) + require.True(b, isValid, "proof number %d is invalid", i) + } +} diff --git a/core/services/keystore/keys/vrfkey/crypto.go b/core/services/keystore/keys/vrfkey/crypto.go new file mode 100644 index 00000000..4a8f17f2 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/crypto.go @@ -0,0 +1,187 @@ +package vrfkey + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + bm "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +// This file contains golang re-implementations of functions on the VRF solidity +// contract. They are used to verify correct operation of those functions, and +// also to efficiently compute zInv off-chain, which makes computing the linear +// combination of c*gamma+s*hash onchain much more efficient. + +var ( + // FieldSize is number of elements in secp256k1's base field, i.e. GF(FieldSize) + FieldSize = mustParseBig( + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", + ) + Secp256k1Curve = &secp256k1.Secp256k1{} + Generator = Secp256k1Curve.Point().Base() + eulersCriterionPower = bm.Div(bm.Sub(FieldSize, bm.One), bm.Two) + sqrtPower = bm.Div(bm.Add(FieldSize, bm.One), bm.Four) + ErrCGammaEqualsSHash = fmt.Errorf("pick a different nonce; c*gamma = s*hash, with this one") + // hashToCurveHashPrefix is domain-separation tag for initial HashToCurve hash. + // Corresponds to HASH_TO_CURVE_HASH_PREFIX in VRF.sol. + hashToCurveHashPrefix = common.BigToHash(bm.One).Bytes() + // scalarFromCurveHashPrefix is a domain-separation tag for the hash taken in + // ScalarFromCurve. Corresponds to SCALAR_FROM_CURVE_POINTS_HASH_PREFIX in + // VRF.sol. + scalarFromCurveHashPrefix = common.BigToHash(bm.Two).Bytes() + // RandomOutputHashPrefix is a domain-separation tag for the hash used to + // compute the final VRF random output + RandomOutputHashPrefix = common.BigToHash(bm.Three).Bytes() +) + +type fieldElt = *big.Int + +// neg(f) is the negation of f in the base field +func neg(f fieldElt) fieldElt { return bm.Sub(FieldSize, f) } + +// projectiveSub(x1, z1, x2, z2) is the projective coordinates of x1/z1 - x2/z2 +func projectiveSub(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { + num1 := bm.Mul(z2, x1) + num2 := neg(bm.Mul(z1, x2)) + return bm.Mod(bm.Add(num1, num2), FieldSize), bm.Mod(bm.Mul(z1, z2), FieldSize) +} + +// projectiveMul(x1, z1, x2, z2) is projective coordinates of (x1/z1)×(x2/z2) +func projectiveMul(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { + return bm.Mul(x1, x2), bm.Mul(z1, z2) +} + +// ProjectiveECAdd(px, py, qx, qy) duplicates the calculation in projective +// coordinates of VRF.sol#projectiveECAdd, so we can reliably get the +// denominator (i.e, z) +func ProjectiveECAdd(p, q kyber.Point) (x, y, z fieldElt) { + px, py := secp256k1.Coordinates(p) + qx, qy := secp256k1.Coordinates(q) + pz, qz := bm.One, bm.One + lx := bm.Sub(qy, py) + lz := bm.Sub(qx, px) + + sx, dx := projectiveMul(lx, lz, lx, lz) + sx, dx = projectiveSub(sx, dx, px, pz) + sx, dx = projectiveSub(sx, dx, qx, qz) + + sy, dy := projectiveSub(px, pz, sx, dx) + sy, dy = projectiveMul(sy, dy, lx, lz) + sy, dy = projectiveSub(sy, dy, py, pz) + + var sz fieldElt + if dx != dy { + sx = bm.Mul(sx, dy) + sy = bm.Mul(sy, dx) + sz = bm.Mul(dx, dy) + } else { + sz = dx + } + return bm.Mod(sx, FieldSize), bm.Mod(sy, FieldSize), bm.Mod(sz, FieldSize) +} + +// IsSquare returns true iff x = y^2 for some y in GF(p) +func IsSquare(x *big.Int) bool { + return bm.Equal(bm.One, bm.Exp(x, eulersCriterionPower, FieldSize)) +} + +// SquareRoot returns a s.t. a^2=x, as long as x is a square +func SquareRoot(x *big.Int) *big.Int { + return bm.Exp(x, sqrtPower, FieldSize) +} + +// YSquared returns x^3+7 mod fieldSize, the right-hand side of the secp256k1 +// curve equation. +func YSquared(x *big.Int) *big.Int { + return bm.Mod(bm.Add(bm.Exp(x, bm.Three, FieldSize), bm.Seven), FieldSize) +} + +// IsCurveXOrdinate returns true iff there is y s.t. y^2=x^3+7 +func IsCurveXOrdinate(x *big.Int) bool { + return IsSquare(YSquared(x)) +} + +// FieldHash hashes xs uniformly into {0, ..., fieldSize-1}. msg is assumed to +// already be a 256-bit hash +func FieldHash(msg []byte) *big.Int { + rv := utils.MustHash(string(msg)).Big() + // Hash recursively until rv < q. P(success per iteration) >= 0.5, so + // number of extra hashes is geometrically distributed, with mean < 1. + for rv.Cmp(FieldSize) >= 0 { + rv = utils.MustHash(string(common.BigToHash(rv).Bytes())).Big() + } + return rv +} + +// linearCombination returns c*p1+s*p2 +func linearCombination(c *big.Int, p1 kyber.Point, + s *big.Int, p2 kyber.Point) kyber.Point { + return Secp256k1Curve.Point().Add( + Secp256k1Curve.Point().Mul(secp256k1.IntToScalar(c), p1), + Secp256k1Curve.Point().Mul(secp256k1.IntToScalar(s), p2)) +} + +// checkCGammaNotEqualToSHash checks c*gamma ≠ s*hash, as required by solidity +// verifier +func checkCGammaNotEqualToSHash(c *big.Int, gamma kyber.Point, s *big.Int, + hash kyber.Point) error { + cGamma := Secp256k1Curve.Point().Mul(secp256k1.IntToScalar(c), gamma) + sHash := Secp256k1Curve.Point().Mul(secp256k1.IntToScalar(s), hash) + if cGamma.Equal(sHash) { + return ErrCGammaEqualsSHash + } + return nil +} + +// HashToCurve is a cryptographic hash function which outputs a secp256k1 point, +// or an error. It passes each candidate x ordinate to ordinates function. +func HashToCurve(p kyber.Point, input *big.Int, ordinates func(x *big.Int), +) (kyber.Point, error) { + if !(secp256k1.ValidPublicKey(p) && input.BitLen() <= 256 && input.Cmp(bm.Zero) >= 0) { + return nil, fmt.Errorf("bad input to vrf.HashToCurve") + } + x := FieldHash(append(hashToCurveHashPrefix, append(secp256k1.LongMarshal(p), + utils.Uint256ToBytes32(input)...)...)) + ordinates(x) + for !IsCurveXOrdinate(x) { // Hash recursively until x^3+7 is a square + x.Set(FieldHash(common.BigToHash(x).Bytes())) + ordinates(x) + } + y := SquareRoot(YSquared(x)) + rv := secp256k1.SetCoordinates(x, y) + if bm.Equal(bm.I().Mod(y, bm.Two), bm.One) { // Negate response if y odd + rv = rv.Neg(rv) + } + return rv, nil +} + +// ScalarFromCurve returns a hash for the curve points. Corresponds to the +// hash computed in VRF.sol#ScalarFromCurvePoints +func ScalarFromCurvePoints( + hash, pk, gamma kyber.Point, uWitness [20]byte, v kyber.Point) *big.Int { + if !(secp256k1.ValidPublicKey(hash) && secp256k1.ValidPublicKey(pk) && + secp256k1.ValidPublicKey(gamma) && secp256k1.ValidPublicKey(v)) { + panic("bad arguments to vrf.ScalarFromCurvePoints") + } + // msg will contain abi.encodePacked(hash, pk, gamma, v, uWitness) + msg := scalarFromCurveHashPrefix + for _, p := range []kyber.Point{hash, pk, gamma, v} { + msg = append(msg, secp256k1.LongMarshal(p)...) + } + msg = append(msg, uWitness[:]...) + return bm.I().SetBytes(utils.MustHash(string(msg)).Bytes()) +} + +func mustParseBig(hx string) *big.Int { + n, err := hex.ParseBig(hx) + if err != nil { + panic(fmt.Errorf(`failed to convert "%s" as hex to big.Int`, hx)) + } + return n +} diff --git a/core/services/keystore/keys/vrfkey/crypto_test.go b/core/services/keystore/keys/vrfkey/crypto_test.go new file mode 100644 index 00000000..b9f41d90 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/crypto_test.go @@ -0,0 +1,29 @@ +package vrfkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + bm "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +func TestVRF_IsSquare(t *testing.T) { + assert.True(t, IsSquare(bm.Four)) + minusOneModP := bm.I().Sub(FieldSize, bm.One) + assert.False(t, IsSquare(minusOneModP)) +} + +func TestVRF_SquareRoot(t *testing.T) { + assert.Equal(t, bm.Two, SquareRoot(bm.Four)) +} + +func TestVRF_YSquared(t *testing.T) { + assert.Equal(t, bm.Add(bm.Mul(bm.Two, bm.Mul(bm.Two, bm.Two)), bm.Seven), YSquared(bm.Two)) // 2³+7 +} + +func TestVRF_IsCurveXOrdinate(t *testing.T) { + assert.True(t, IsCurveXOrdinate(big.NewInt(1))) + assert.False(t, IsCurveXOrdinate(big.NewInt(5))) +} diff --git a/core/services/keystore/keys/vrfkey/export.go b/core/services/keystore/keys/vrfkey/export.go new file mode 100644 index 00000000..6fc4c07d --- /dev/null +++ b/core/services/keystore/keys/vrfkey/export.go @@ -0,0 +1,85 @@ +package vrfkey + +import ( + "crypto/ecdsa" + "encoding/json" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func FromEncryptedJSON(keyJSON []byte, password string) (KeyV2, error) { + var export EncryptedVRFKeyExport + if err := json.Unmarshal(keyJSON, &export); err != nil { + return KeyV2{}, err + } + + // NOTE: We do this shuffle to an anonymous struct + // solely to add a throwaway UUID, so we can leverage + // the keystore.DecryptKey from the geth which requires it + // as of 1.10.0. + keyJSON, err := json.Marshal(struct { + Address string `json:"address"` + Crypto keystore.CryptoJSON `json:"crypto"` + Version int `json:"version"` + Id string `json:"id"` + }{ + Address: export.VRFKey.Address, + Crypto: export.VRFKey.Crypto, + Version: export.VRFKey.Version, + Id: uuid.New().String(), + }) + if err != nil { + return KeyV2{}, errors.Wrapf(err, "while marshaling key for decryption") + } + + gethKey, err := keystore.DecryptKey(keyJSON, adulteratedPassword(password)) + if err != nil { + return KeyV2{}, errors.Wrapf(err, "could not decrypt VRF key %s", export.PublicKey.String()) + } + + key := Raw(gethKey.PrivateKey.D.Bytes()).Key() + return key, nil +} + +type EncryptedVRFKeyExport struct { + PublicKey secp256k1.PublicKey `json:"PublicKey"` + VRFKey gethKeyStruct `json:"vrf_key"` +} + +func (key KeyV2) ToEncryptedJSON(password string, scryptParams utils.ScryptParams) (export []byte, err error) { + cryptoJSON, err := keystore.EncryptKey(key.toGethKey(), adulteratedPassword(password), scryptParams.N, scryptParams.P) + if err != nil { + return nil, errors.Wrapf(err, "failed to encrypt key %s", key.ID()) + } + var gethKey gethKeyStruct + err = json.Unmarshal(cryptoJSON, &gethKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal key %s", key.ID()) + } + encryptedOCRKExport := EncryptedVRFKeyExport{ + PublicKey: key.PublicKey, + VRFKey: gethKey, + } + return json.Marshal(encryptedOCRKExport) +} + +func (key KeyV2) toGethKey() *keystore.Key { + return &keystore.Key{ + Address: key.PublicKey.Address(), + PrivateKey: &ecdsa.PrivateKey{D: secp256k1.ToInt(*key.k)}, + } +} + +// passwordPrefix is added to the beginning of the passwords for +// EncryptedVRFKey's, so that VRF keys can't casually be used as ethereum +// keys, and vice-versa. If you want to do that, DON'T. +var passwordPrefix = "don't mix VRF and Ethereum keys!" + +func adulteratedPassword(password string) string { + return passwordPrefix + password +} diff --git a/core/services/keystore/keys/vrfkey/export_test.go b/core/services/keystore/keys/vrfkey/export_test.go new file mode 100644 index 00000000..acd9bb48 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/export_test.go @@ -0,0 +1,19 @@ +package vrfkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys" +) + +func TestVRFKeys_ExportImport(t *testing.T) { + keys.RunKeyExportImportTestcase(t, createKey, decryptKey) +} + +func createKey() (keys.KeyType, error) { + return NewV2() +} + +func decryptKey(keyJSON []byte, password string) (keys.KeyType, error) { + return FromEncryptedJSON(keyJSON, password) +} diff --git a/core/services/keystore/keys/vrfkey/key_v2.go b/core/services/keystore/keys/vrfkey/key_v2.go new file mode 100644 index 00000000..4fd8e8d1 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/key_v2.go @@ -0,0 +1,161 @@ +package vrfkey + +import ( + "crypto/rand" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + bm "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +var suite = secp256k1.NewBlakeKeccackSecp256k1() + +type Raw []byte + +func (raw Raw) Key() KeyV2 { + rawKeyInt := new(big.Int).SetBytes(raw) + k := secp256k1.IntToScalar(rawKeyInt) + key, err := keyFromScalar(k) + if err != nil { + panic(err) + } + return key +} + +func (raw Raw) String() string { + return "" +} + +func (raw Raw) GoString() string { + return raw.String() +} + +var _ fmt.GoStringer = &KeyV2{} + +type KeyV2 struct { + k *kyber.Scalar + PublicKey secp256k1.PublicKey +} + +func NewV2() (KeyV2, error) { + k := suite.Scalar().Pick(suite.RandomStream()) + return keyFromScalar(k) +} + +func MustNewV2XXXTestingOnly(k *big.Int) KeyV2 { + rv, err := keyFromScalar(secp256k1.IntToScalar(k)) + if err != nil { + panic(err) + } + return rv +} + +func (key KeyV2) ID() string { + return hexutil.Encode(key.PublicKey[:]) +} + +func (key KeyV2) Raw() Raw { + return secp256k1.ToInt(*key.k).Bytes() +} + +// GenerateProofWithNonce allows external nonce generation for testing purposes +// +// As with signatures, using nonces which are in any way predictable to an +// adversary will leak your secret key! Most people should use GenerateProof +// instead. +func (key KeyV2) GenerateProofWithNonce(seed, nonce *big.Int) (Proof, error) { + secretKey := secp256k1.ScalarToHash(*key.k).Big() + if !(secp256k1.RepresentsScalar(secretKey) && seed.BitLen() <= 256) { + return Proof{}, fmt.Errorf("badly-formatted key or seed") + } + skAsScalar := secp256k1.IntToScalar(secretKey) + publicKey := Secp256k1Curve.Point().Mul(skAsScalar, nil) + h, err := HashToCurve(publicKey, seed, func(*big.Int) {}) + if err != nil { + return Proof{}, errors.Wrap(err, "vrf.makeProof#HashToCurve") + } + gamma := Secp256k1Curve.Point().Mul(skAsScalar, h) + sm := secp256k1.IntToScalar(nonce) + u := Secp256k1Curve.Point().Mul(sm, Generator) + uWitness := secp256k1.EthereumAddress(u) + v := Secp256k1Curve.Point().Mul(sm, h) + c := ScalarFromCurvePoints(h, publicKey, gamma, uWitness, v) + // (m - c*secretKey) % GroupOrder + s := bm.Mod(bm.Sub(nonce, bm.Mul(c, secretKey)), secp256k1.GroupOrder) + if e := checkCGammaNotEqualToSHash(c, gamma, s, h); e != nil { + return Proof{}, e + } + outputHash := utils.MustHash(string(append(RandomOutputHashPrefix, + secp256k1.LongMarshal(gamma)...))) + rv := Proof{ + PublicKey: publicKey, + Gamma: gamma, + C: c, + S: s, + Seed: seed, + Output: outputHash.Big(), + } + valid, err := rv.VerifyVRFProof() + if !valid || err != nil { + panic("constructed invalid proof") + } + return rv, nil +} + +// GenerateProof returns gamma, plus proof that gamma was constructed from seed +// as mandated from the given secretKey, with public key secretKey*Generator +// +// secretKey and seed must be less than secp256k1 group order. (Without this +// constraint on the seed, the samples and the possible public keys would +// deviate very slightly from uniform distribution.) +func (key KeyV2) GenerateProof(seed *big.Int) (Proof, error) { + for { + nonce, err := rand.Int(rand.Reader, secp256k1.GroupOrder) + if err != nil { + return Proof{}, err + } + proof, err := key.GenerateProofWithNonce(seed, nonce) + switch { + case errors.Is(err, ErrCGammaEqualsSHash): + // This is cryptographically impossible, but if it were ever to happen, we + // should try again with a different nonce. + continue + case err != nil: // Any other error indicates failure + return Proof{}, err + default: + return proof, err // err should be nil + } + } +} + +func (key KeyV2) String() string { + return fmt.Sprintf("VRFKeyV2{PublicKey: %s}", key.PublicKey) +} + +func (key KeyV2) GoString() string { + return key.String() +} + +func keyFromScalar(k kyber.Scalar) (KeyV2, error) { + rawPublicKey, err := secp256k1.ScalarToPublicPoint(k).MarshalBinary() + if err != nil { + return KeyV2{}, errors.Wrapf(err, "could not marshal public key") + } + if len(rawPublicKey) != secp256k1.CompressedPublicKeyLength { + return KeyV2{}, fmt.Errorf("public key %x has wrong length", rawPublicKey) + } + var publicKey secp256k1.PublicKey + if l := copy(publicKey[:], rawPublicKey); l != secp256k1.CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy correct length in serialized public key")) + } + return KeyV2{ + k: &k, + PublicKey: publicKey, + }, nil +} diff --git a/core/services/keystore/keys/vrfkey/key_v2_test.go b/core/services/keystore/keys/vrfkey/key_v2_test.go new file mode 100644 index 00000000..9ee7bdf5 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/key_v2_test.go @@ -0,0 +1,40 @@ +package vrfkey + +import ( + "crypto/ecdsa" + "crypto/rand" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +func TestVRFKeys_KeyV2_Raw(t *testing.T) { + privK, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + r := Raw(privK.D.Bytes()) + + assert.Equal(t, r.String(), r.GoString()) + assert.Equal(t, "", r.String()) +} + +func TestVRFKeys_KeyV2(t *testing.T) { + k, err := NewV2() + require.NoError(t, err) + + assert.Equal(t, hexutil.Encode(k.PublicKey[:]), k.ID()) + assert.Equal(t, Raw(secp256k1.ToInt(*k.k).Bytes()), k.Raw()) + + t.Run("generates proof", func(t *testing.T) { + p, err := k.GenerateProof(big.NewInt(1)) + + assert.NotZero(t, p) + assert.NoError(t, err) + }) +} diff --git a/core/services/keystore/keys/vrfkey/models.go b/core/services/keystore/keys/vrfkey/models.go new file mode 100644 index 00000000..e96c4ad1 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/models.go @@ -0,0 +1,73 @@ +package vrfkey + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "os" + "time" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// EncryptedVRFKey contains encrypted private key to be serialized to DB +// +// We could re-use geth's key handling, here, but this makes it much harder to +// misuse VRF proving keys as ethereum keys or vice versa. +type EncryptedVRFKey struct { + PublicKey secp256k1.PublicKey + VRFKey gethKeyStruct `json:"vrf_key"` + CreatedAt time.Time `json:"-"` + UpdatedAt time.Time `json:"-"` + DeletedAt *time.Time `json:"-"` +} + +// JSON returns the JSON representation of e, or errors +func (e *EncryptedVRFKey) JSON() ([]byte, error) { + keyJSON, err := json.Marshal(e) + if err != nil { + return nil, errors.Wrapf(err, "could not marshal encrypted key to JSON") + } + return keyJSON, nil +} + +// WriteToDisk writes the JSON representation of e to given file path, and +// ensures the file has appropriate access permissions +func (e *EncryptedVRFKey) WriteToDisk(path string) error { + keyJSON, err := e.JSON() + if err != nil { + return errors.Wrapf(err, "while marshaling key to save to %s", path) + } + userReadWriteOtherNoAccess := os.FileMode(0600) + return utils.WriteFileWithMaxPerms(path, keyJSON, userReadWriteOtherNoAccess) +} + +// Copied from go-ethereum/accounts/keystore/key.go's encryptedKeyJSONV3 +type gethKeyStruct struct { + Address string `json:"address"` + Crypto keystore.CryptoJSON `json:"crypto"` + Version int `json:"version"` +} + +func (k gethKeyStruct) Value() (driver.Value, error) { + return json.Marshal(&k) +} + +func (k *gethKeyStruct) Scan(value interface{}) error { + var toUnmarshal []byte + switch s := value.(type) { + case []byte: + toUnmarshal = s + case string: + toUnmarshal = []byte(s) + default: + return errors.Wrap( + fmt.Errorf("unable to convert %+v of type %T to gethKeyStruct", + value, value), "scan failure") + } + return json.Unmarshal(toUnmarshal, k) +} diff --git a/core/services/keystore/keys/vrfkey/models_test.go b/core/services/keystore/keys/vrfkey/models_test.go new file mode 100644 index 00000000..e603e6aa --- /dev/null +++ b/core/services/keystore/keys/vrfkey/models_test.go @@ -0,0 +1,32 @@ +package vrfkey + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVRFKeys_Models(t *testing.T) { + kv2, err := NewV2() + require.NoError(t, err) + k := EncryptedVRFKey{ + PublicKey: kv2.PublicKey, + VRFKey: gethKeyStruct{}, + } + + v, err := k.JSON() + assert.NotEmpty(t, v) + require.NoError(t, err) + + vrfk := gethKeyStruct{} + + v2, err := vrfk.Value() + require.NoError(t, err) + err = vrfk.Scan(v2) + require.NoError(t, err) + err = vrfk.Scan("") + require.Error(t, err) + err = vrfk.Scan(1) + require.Error(t, err) +} diff --git a/core/services/keystore/keys/vrfkey/private_key.go b/core/services/keystore/keys/vrfkey/private_key.go new file mode 100644 index 00000000..dfac849f --- /dev/null +++ b/core/services/keystore/keys/vrfkey/private_key.go @@ -0,0 +1,83 @@ +package vrfkey + +import ( + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/google/uuid" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +// PrivateKey represents the secret used to construct a VRF proof. +// +// Don't serialize directly, use Encrypt method, with user-supplied passphrase. +// The unencrypted PrivateKey struct should only live in-memory. +// +// Only use it if you absolutely need it (i.e., for a novel crypto protocol.) +// Implement whatever cryptography you need on this struct, so your callers +// don't need to know the secret key explicitly. (See, e.g., MarshaledProof.) +type PrivateKey struct { + k kyber.Scalar + PublicKey secp256k1.PublicKey +} + +func (k PrivateKey) ToV2() KeyV2 { + return KeyV2{ + k: &k.k, + PublicKey: k.PublicKey, + } +} + +// fromGethKey returns the vrfkey representation of gethKey. Do not abuse this +// to convert an ethereum key into a VRF key! +func fromGethKey(gethKey *keystore.Key) *PrivateKey { + secretKey := secp256k1.IntToScalar(gethKey.PrivateKey.D) + rawPublicKey, err := secp256k1.ScalarToPublicPoint(secretKey).MarshalBinary() + if err != nil { + panic(err) // Only way this can happen is out-of-memory failure + } + var publicKey secp256k1.PublicKey + copy(publicKey[:], rawPublicKey) + return &PrivateKey{secretKey, publicKey} +} + +func (k *PrivateKey) String() string { + return fmt.Sprintf("PrivateKey{k: , PublicKey: %s}", k.PublicKey) +} + +// GoString reduces the risk of accidentally logging the private key +func (k *PrivateKey) GoString() string { + return k.String() +} + +// Decrypt returns the PrivateKey in e, decrypted via auth, or an error +func Decrypt(e EncryptedVRFKey, auth string) (*PrivateKey, error) { + // NOTE: We do this shuffle to an anonymous struct + // solely to add a a throwaway UUID, so we can leverage + // the keystore.DecryptKey from the geth which requires it + // as of 1.10.0. + keyJSON, err := json.Marshal(struct { + Address string `json:"address"` + Crypto keystore.CryptoJSON `json:"crypto"` + Version int `json:"version"` + Id string `json:"id"` + }{ + Address: e.VRFKey.Address, + Crypto: e.VRFKey.Crypto, + Version: e.VRFKey.Version, + Id: uuid.New().String(), + }) + if err != nil { + return nil, errors.Wrapf(err, "while marshaling key for decryption") + } + gethKey, err := keystore.DecryptKey(keyJSON, adulteratedPassword(auth)) + if err != nil { + return nil, errors.Wrapf(err, "could not decrypt VRF key %s", + e.PublicKey.String()) + } + return fromGethKey(gethKey), nil +} diff --git a/core/services/keystore/keys/vrfkey/private_key_test.go b/core/services/keystore/keys/vrfkey/private_key_test.go new file mode 100644 index 00000000..4d6b580e --- /dev/null +++ b/core/services/keystore/keys/vrfkey/private_key_test.go @@ -0,0 +1,40 @@ +package vrfkey + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestVRFKeys_PrivateKey(t *testing.T) { + jsonKey := `{"PublicKey":"0xd2377bc6be8a2c5ce163e1867ee42ef111e320686f940a98e52e9c019ca0606800","vrf_key":{"address":"b94276ad4e5452732ec0cccf30ef7919b67844b6","crypto":{"cipher":"aes-128-ctr","ciphertext":"ff66d61d02dba54a61bab1ceb8414643f9e76b7351785d2959e2c8b50ee69a92","cipherparams":{"iv":"75705da271b11e330a27b8d593a3930c"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"efe5b372e4fe79d0af576a79d65a1ee35d0792d9c92b70107b5ada1817ea7c7b"},"mac":"e4d0bb08ffd004ab03aeaa42367acbd9bb814c6cfd981f5157503f54c30816e7"},"version":3}}` + k, err := FromEncryptedJSON([]byte(jsonKey), "p4SsW0rD1!@#_") + require.NoError(t, err) + cryptoJSON, err := keystore.EncryptKey(k.toGethKey(), adulteratedPassword(testutils.Password), utils.FastScryptParams.N, utils.FastScryptParams.P) + require.NoError(t, err) + var gethKey gethKeyStruct + err = json.Unmarshal(cryptoJSON, &gethKey) + require.NoError(t, err) + + ek := EncryptedVRFKey{ + PublicKey: k.PublicKey, + VRFKey: gethKey, + } + + pk, err := Decrypt(ek, testutils.Password) + require.NoError(t, err) + _, err = Decrypt(ek, "wrong-password") + assert.Error(t, err) + + kv2 := pk.ToV2() + + assert.Equal(t, fmt.Sprintf("VRFKeyV2{PublicKey: %s}", kv2.PublicKey), kv2.String()) + assert.Equal(t, fmt.Sprintf("PrivateKey{k: , PublicKey: %s}", pk.PublicKey), pk.String()) +} diff --git a/core/services/keystore/keys/vrfkey/proof.go b/core/services/keystore/keys/vrfkey/proof.go new file mode 100644 index 00000000..597f76e2 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/proof.go @@ -0,0 +1,66 @@ +package vrfkey + +import ( + "fmt" + "math/big" + + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + bm "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +// Proof represents a proof that Gamma was constructed from the Seed +// according to the process mandated by the PublicKey. +// +// N.B.: The kyber.Point fields must contain secp256k1.secp256k1Point values, C, +// S and Seed must be secp256k1Point, and Output must be at +// most 256 bits. See Proof.WellFormed. +type Proof struct { + PublicKey kyber.Point // secp256k1 public key of private key used in proof + Gamma kyber.Point + C *big.Int + S *big.Int + Seed *big.Int // Seed input to verifiable random function + Output *big.Int // verifiable random function output;, uniform uint256 sample +} + +func (p *Proof) String() string { + return fmt.Sprintf( + "vrf.Proof{PublicKey: %s, Gamma: %s, C: %x, S: %x, Seed: %x, Output: %x}", + p.PublicKey, p.Gamma, p.C, p.S, p.Seed, p.Output) +} + +// WellFormed is true iff p's attributes satisfy basic domain checks +func (p *Proof) WellFormed() bool { + return (secp256k1.ValidPublicKey(p.PublicKey) && + secp256k1.ValidPublicKey(p.Gamma) && secp256k1.RepresentsScalar(p.C) && + secp256k1.RepresentsScalar(p.S) && p.Output.BitLen() <= 256) +} + +// VerifyProof is true iff gamma was generated in the mandated way from the +// given publicKey and seed, and no error was encountered +func (p *Proof) VerifyVRFProof() (bool, error) { + if !p.WellFormed() { + return false, fmt.Errorf("badly-formatted proof") + } + h, err := HashToCurve(p.PublicKey, p.Seed, func(*big.Int) {}) + if err != nil { + return false, err + } + err = checkCGammaNotEqualToSHash(p.C, p.Gamma, p.S, h) + if err != nil { + return false, fmt.Errorf("c*γ = s*hash (disallowed in solidity verifier)") + } + // publicKey = secretKey*Generator. See GenerateProof for u, v, m, s + // c*secretKey*Generator + (m - c*secretKey)*Generator = m*Generator = u + uPrime := linearCombination(p.C, p.PublicKey, p.S, Generator) + // c*secretKey*h + (m - c*secretKey)*h = m*h = v + vPrime := linearCombination(p.C, p.Gamma, p.S, h) + uWitness := secp256k1.EthereumAddress(uPrime) + cPrime := ScalarFromCurvePoints(h, p.PublicKey, p.Gamma, uWitness, vPrime) + output := utils.MustHash(string(append( + RandomOutputHashPrefix, secp256k1.LongMarshal(p.Gamma)...))) + return bm.Equal(p.C, cPrime) && bm.Equal(p.Output, output.Big()), nil +} diff --git a/core/services/keystore/keys/vrfkey/proof_test.go b/core/services/keystore/keys/vrfkey/proof_test.go new file mode 100644 index 00000000..0a6af4b0 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/proof_test.go @@ -0,0 +1,25 @@ +package vrfkey + +import ( + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestVRF_VerifyProof(t *testing.T) { + sk, err := NewV2() + require.NoError(t, err) + seed, nonce := big.NewInt(2), big.NewInt(3) + p, err := sk.GenerateProofWithNonce(seed, nonce) + require.NoError(t, err, "could not generate proof") + p.Seed = big.NewInt(0).Add(seed, big.NewInt(1)) + valid, err := p.VerifyVRFProof() + require.NoError(t, err, "could not validate proof") + assert.False(t, valid, "invalid proof was found valid") + assert.Equal(t, fmt.Sprintf( + "vrf.Proof{PublicKey: %s, Gamma: %s, C: %x, S: %x, Seed: %x, Output: %x}", + p.PublicKey, p.Gamma, p.C, p.S, p.Seed, p.Output), p.String()) +} diff --git a/core/services/keystore/keys/vrfkey/public_key_test.go b/core/services/keystore/keys/vrfkey/public_key_test.go new file mode 100644 index 00000000..6fca2a41 --- /dev/null +++ b/core/services/keystore/keys/vrfkey/public_key_test.go @@ -0,0 +1,44 @@ +package vrfkey + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValueScanIdentityPointSet(t *testing.T) { + randomStream := cryptotest.NewStream(t, 0) + for i := 0; i < 10; i++ { + p := suite.Point().Pick(randomStream) + var pk, nPk, nnPk secp256k1.PublicKey + marshaledKey, err := p.MarshalBinary() + require.NoError(t, err, "failed to marshal public key") + require.Equal(t, copy(pk[:], marshaledKey), + secp256k1.CompressedPublicKeyLength, "failed to copy marshaled key to pk") + assert.NotEqual(t, pk, nPk, "equality test succeeds on different keys!") + np, err := pk.Point() + require.NoError(t, err, "failed to marshal public key") + assert.True(t, p.Equal(np), "Point should give the point we constructed pk from") + value, err := pk.Value() + require.NoError(t, err, "failed to serialize public key for database") + require.NoError(t, nPk.Scan(value)) + assert.Equal(t, pk, nPk, + "roundtripping public key through db Value/Scan gave different key!") + nnPk.Set(pk) + assert.Equal(t, pk, nnPk, + "setting one PubliKey to another should result in equal keys") + } + +} + +// Tests that PublicKey.Hash gives the same result as the VRFCoordinator's +func TestHash(t *testing.T) { + pk, err := secp256k1.NewPublicKeyFromHex("0x9dc09a0f898f3b5e8047204e7ce7e44b587920932f08431e29c9bf6923b8450a01") + assert.NoError(t, err) + assert.Equal(t, "0xc4406d555db624837188b91514a5f47e34d825d935ab887a35c06a3e7c41de69", pk.MustHash().String()) +} diff --git a/core/services/keystore/keystoretest.go b/core/services/keystore/keystoretest.go new file mode 100644 index 00000000..bb973e93 --- /dev/null +++ b/core/services/keystore/keystoretest.go @@ -0,0 +1,81 @@ +package keystore + +import ( + "errors" + "sync" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// memoryORM is an in-memory version of the keystore. This is +// only intended to be used in tests to avoid DB lock contention on +// the single DB row that stores the key material. +// +// Note: we store `q` on the struct since `saveEncryptedKeyRing` needs +// to support DB callbacks. +type memoryORM struct { + keyRing *encryptedKeyRing + q pg.Queryer + mu sync.RWMutex +} + +func (o *memoryORM) isEmpty() (bool, error) { + return false, nil +} + +func (o *memoryORM) saveEncryptedKeyRing(kr *encryptedKeyRing, callbacks ...func(pg.Queryer) error) (err error) { + o.mu.Lock() + defer o.mu.Unlock() + o.keyRing = kr + for _, c := range callbacks { + err = errors.Join(err, c(o.q)) + } + return +} + +func (o *memoryORM) getEncryptedKeyRing() (encryptedKeyRing, error) { + o.mu.RLock() + defer o.mu.RUnlock() + if o.keyRing == nil { + return encryptedKeyRing{}, nil + } + return *o.keyRing, nil +} + +func newInMemoryORM(q pg.Queryer) *memoryORM { + return &memoryORM{q: q} +} + +// NewInMemory sets up a keystore which NOOPs attempts to access the `encrypted_key_rings` table. Accessing `evm.key_states` +// will still hit the DB. +func NewInMemory(db *sqlx.DB, scryptParams utils.ScryptParams, lggr logger.Logger, cfg pg.QConfig) *master { + dbORM := NewORM(db, lggr, cfg) + memoryORM := newInMemoryORM(dbORM.q) + + km := &keyManager{ + orm: memoryORM, + keystateORM: dbORM, + scryptParams: scryptParams, + lock: &sync.RWMutex{}, + logger: lggr.Named("KeyStore"), + } + + return &master{ + keyManager: km, + cosmos: newCosmosKeyStore(km), + csa: newCSAKeyStore(km), + eth: newEthKeyStore(km, dbORM, dbORM.q), + ocr: newOCRKeyStore(km), + ocr2: newOCR2KeyStore(km), + p2p: newP2PKeyStore(km), + solana: newSolanaKeyStore(km), + starknet: newStarkNetKeyStore(km), + vrf: newVRFKeyStore(km), + dkgSign: newDKGSignKeyStore(km), + dkgEncrypt: newDKGEncryptKeyStore(km), + } +} diff --git a/core/services/keystore/legacy_key.go b/core/services/keystore/legacy_key.go new file mode 100644 index 00000000..1f1be128 --- /dev/null +++ b/core/services/keystore/legacy_key.go @@ -0,0 +1,108 @@ +package keystore + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +type rawLegacyKey []string +type rawLegacyKeys map[string]rawLegacyKey + +type LegacyKeyStorage struct { + legacyRawKeys rawLegacyKeys +} + +func (rlk *rawLegacyKeys) len() (n int) { + for _, v := range *rlk { + n += len(v) + } + return n +} + +func (rlk *rawLegacyKeys) has(name string) bool { + for n := range *rlk { + if n == name { + return true + } + } + return false +} + +func (rlk *rawLegacyKeys) hasValueInField(fieldName, value string) bool { + for _, v := range (*rlk)[fieldName] { + if v == value { + return true + } + } + return false +} + +// StoreUnsupported will store the raw keys that no longer have support in the node +// it will check if raw json contains keys that have not been added to the key ring +// and stores them internally +func (k *LegacyKeyStorage) StoreUnsupported(allRawKeysJson []byte, keyRing *keyRing) error { + if keyRing == nil { + return errors.New("keyring is nil") + } + supportedKeyRingJson, err := json.Marshal(keyRing.raw()) + if err != nil { + return err + } + + var ( + allKeys = rawLegacyKeys{} + supportedKeys = rawLegacyKeys{} + ) + + err = json.Unmarshal(allRawKeysJson, &allKeys) + if err != nil { + return err + } + err = json.Unmarshal(supportedKeyRingJson, &supportedKeys) + if err != nil { + return err + } + + k.legacyRawKeys = rawLegacyKeys{} + for fName, fValue := range allKeys { + if !supportedKeys.has(fName) { + k.legacyRawKeys[fName] = fValue + continue + } + for _, v := range allKeys[fName] { + if !supportedKeys.hasValueInField(fName, v) { + k.legacyRawKeys[fName] = append(k.legacyRawKeys[fName], v) + } + } + } + + return nil +} + +// UnloadUnsupported will inject the unsupported keys into the raw key ring json +func (k *LegacyKeyStorage) UnloadUnsupported(supportedRawKeyRingJson []byte) ([]byte, error) { + supportedKeys := rawLegacyKeys{} + err := json.Unmarshal(supportedRawKeyRingJson, &supportedKeys) + if err != nil { + return nil, err + } + + for fName, vals := range k.legacyRawKeys { + if !supportedKeys.has(fName) { + supportedKeys[fName] = vals + continue + } + for _, v := range vals { + if !supportedKeys.hasValueInField(fName, v) { + supportedKeys[fName] = append(supportedKeys[fName], v) + } + } + } + + allKeysJson, err := json.Marshal(supportedKeys) + if err != nil { + return nil, err + } + return allKeysJson, nil +} diff --git a/core/services/keystore/master.go b/core/services/keystore/master.go new file mode 100644 index 00000000..c46bfdd4 --- /dev/null +++ b/core/services/keystore/master.go @@ -0,0 +1,289 @@ +package keystore + +import ( + "fmt" + "math/big" + "reflect" + "sync" + + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + ErrLocked = errors.New("Keystore is locked") + ErrKeyNotFound = errors.New("Key not found") + ErrKeyExists = errors.New("Key already exists") +) + +// DefaultEVMChainIDFunc is a func for getting a default evm chain ID - +// necessary because it is lazily evaluated +type DefaultEVMChainIDFunc func() (defaultEVMChainID *big.Int, err error) + +//go:generate mockery --quiet --name Master --output ./mocks/ --case=underscore + +type Master interface { + CSA() CSA + DKGSign() DKGSign + DKGEncrypt() DKGEncrypt + Eth() Eth + OCR() OCR + OCR2() OCR2 + P2P() P2P + Solana() Solana + Cosmos() Cosmos + StarkNet() StarkNet + VRF() VRF + Unlock(password string) error + IsEmpty() (bool, error) +} + +type master struct { + *keyManager + cosmos *cosmos + csa *csa + eth *eth + ocr *ocr + ocr2 ocr2 + p2p *p2p + solana *solana + starknet *starknet + vrf *vrf + dkgSign *dkgSign + dkgEncrypt *dkgEncrypt +} + +func New(db *sqlx.DB, scryptParams utils.ScryptParams, lggr logger.Logger, cfg pg.QConfig) Master { + return newMaster(db, scryptParams, lggr, cfg) +} + +func newMaster(db *sqlx.DB, scryptParams utils.ScryptParams, lggr logger.Logger, cfg pg.QConfig) *master { + orm := NewORM(db, lggr, cfg) + km := &keyManager{ + orm: orm, + keystateORM: orm, + scryptParams: scryptParams, + lock: &sync.RWMutex{}, + logger: lggr.Named("KeyStore"), + } + + return &master{ + keyManager: km, + cosmos: newCosmosKeyStore(km), + csa: newCSAKeyStore(km), + eth: newEthKeyStore(km, orm, orm.q), + ocr: newOCRKeyStore(km), + ocr2: newOCR2KeyStore(km), + p2p: newP2PKeyStore(km), + solana: newSolanaKeyStore(km), + starknet: newStarkNetKeyStore(km), + vrf: newVRFKeyStore(km), + dkgSign: newDKGSignKeyStore(km), + dkgEncrypt: newDKGEncryptKeyStore(km), + } +} + +func (ks *master) DKGEncrypt() DKGEncrypt { + return ks.dkgEncrypt +} + +func (ks master) DKGSign() DKGSign { + return ks.dkgSign +} + +func (ks master) CSA() CSA { + return ks.csa +} + +func (ks *master) Eth() Eth { + return ks.eth +} + +func (ks *master) OCR() OCR { + return ks.ocr +} + +func (ks *master) OCR2() OCR2 { + return ks.ocr2 +} + +func (ks *master) P2P() P2P { + return ks.p2p +} + +func (ks *master) Solana() Solana { + return ks.solana +} + +func (ks *master) Cosmos() Cosmos { + return ks.cosmos +} + +func (ks *master) StarkNet() StarkNet { + return ks.starknet +} + +func (ks *master) VRF() VRF { + return ks.vrf +} + +type ORM interface { + isEmpty() (bool, error) + saveEncryptedKeyRing(*encryptedKeyRing, ...func(pg.Queryer) error) error + getEncryptedKeyRing() (encryptedKeyRing, error) +} + +type keystateORM interface { + loadKeyStates() (*keyStates, error) +} + +type keyManager struct { + orm ORM + keystateORM keystateORM + scryptParams utils.ScryptParams + keyRing *keyRing + keyStates *keyStates + lock *sync.RWMutex + password string + logger logger.Logger +} + +func (km *keyManager) IsEmpty() (bool, error) { + return km.orm.isEmpty() +} + +func (km *keyManager) Unlock(password string) error { + km.lock.Lock() + defer km.lock.Unlock() + // DEV: allow Unlock() to be idempotent - this is especially useful in tests, + if km.password != "" { + if password != km.password { + return errors.New("attempting to unlock keystore again with a different password") + } + return nil + } + ekr, err := km.orm.getEncryptedKeyRing() + if err != nil { + return errors.Wrap(err, "unable to get encrypted key ring") + } + kr, err := ekr.Decrypt(password) + if err != nil { + return errors.Wrap(err, "unable to decrypt encrypted key ring") + } + kr.logPubKeys(km.logger) + km.keyRing = kr + + ks, err := km.keystateORM.loadKeyStates() + if err != nil { + return errors.Wrap(err, "unable to load key states") + } + km.keyStates = ks + + km.password = password + return nil +} + +// caller must hold lock! +func (km *keyManager) save(callbacks ...func(pg.Queryer) error) error { + ekb, err := km.keyRing.Encrypt(km.password, km.scryptParams) + if err != nil { + return errors.Wrap(err, "unable to encrypt keyRing") + } + return km.orm.saveEncryptedKeyRing(&ekb, callbacks...) +} + +// caller must hold lock! +func (km *keyManager) safeAddKey(unknownKey Key, callbacks ...func(pg.Queryer) error) error { + fieldName, err := GetFieldNameForKey(unknownKey) + if err != nil { + return err + } + // add key to keyring + id := reflect.ValueOf(unknownKey.ID()) + key := reflect.ValueOf(unknownKey) + keyRing := reflect.Indirect(reflect.ValueOf(km.keyRing)) + keyMap := keyRing.FieldByName(fieldName) + keyMap.SetMapIndex(id, key) + // save keyring to DB + err = km.save(callbacks...) + // if save fails, remove key from keyring + if err != nil { + keyMap.SetMapIndex(id, reflect.Value{}) + return err + } + return nil +} + +// caller must hold lock! +func (km *keyManager) safeRemoveKey(unknownKey Key, callbacks ...func(pg.Queryer) error) (err error) { + fieldName, err := GetFieldNameForKey(unknownKey) + if err != nil { + return err + } + id := reflect.ValueOf(unknownKey.ID()) + key := reflect.ValueOf(unknownKey) + keyRing := reflect.Indirect(reflect.ValueOf(km.keyRing)) + keyMap := keyRing.FieldByName(fieldName) + keyMap.SetMapIndex(id, reflect.Value{}) + // save keyring to DB + err = km.save(callbacks...) + // if save fails, add key back to keyRing + if err != nil { + keyMap.SetMapIndex(id, key) + return err + } + return nil +} + +// caller must hold lock! +func (km *keyManager) isLocked() bool { + return len(km.password) == 0 +} + +func GetFieldNameForKey(unknownKey Key) (string, error) { + switch unknownKey.(type) { + case cosmoskey.Key: + return "Cosmos", nil + case csakey.KeyV2: + return "CSA", nil + case ethkey.KeyV2: + return "Eth", nil + case ocrkey.KeyV2: + return "OCR", nil + case ocr2key.KeyBundle: + return "OCR2", nil + case p2pkey.KeyV2: + return "P2P", nil + case solkey.Key: + return "Solana", nil + case starkkey.Key: + return "StarkNet", nil + case vrfkey.KeyV2: + return "VRF", nil + case dkgsignkey.Key: + return "DKGSign", nil + case dkgencryptkey.Key: + return "DKGEncrypt", nil + } + return "", fmt.Errorf("unknown key type: %T", unknownKey) +} + +type Key interface { + ID() string +} diff --git a/core/services/keystore/master_test.go b/core/services/keystore/master_test.go new file mode 100644 index 00000000..c74a3caa --- /dev/null +++ b/core/services/keystore/master_test.go @@ -0,0 +1,63 @@ +package keystore_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +func TestMasterKeystore_Unlock_Save(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + const tableName = "encrypted_key_rings" + reset := func() { + keyStore.ResetXXXTestOnly() + _, err := db.Exec(fmt.Sprintf("DELETE FROM %s", tableName)) + require.NoError(t, err) + } + + t.Run("can be unlocked more than once, as long as the passwords match", func(t *testing.T) { + defer reset() + require.NoError(t, keyStore.Unlock(cltest.Password)) + require.NoError(t, keyStore.Unlock(cltest.Password)) + require.NoError(t, keyStore.Unlock(cltest.Password)) + require.Error(t, keyStore.Unlock("wrong password")) + }) + + t.Run("saves an empty keyRing", func(t *testing.T) { + defer reset() + require.NoError(t, keyStore.Unlock(cltest.Password)) + cltest.AssertCount(t, db, tableName, 1) + require.NoError(t, keyStore.ExportedSave()) + cltest.AssertCount(t, db, tableName, 1) + }) + + t.Run("won't load a saved keyRing if the password is incorrect", func(t *testing.T) { + defer reset() + require.NoError(t, keyStore.Unlock(cltest.Password)) + cltest.MustInsertRandomKey(t, keyStore.Eth()) // need at least 1 key to encrypt + cltest.AssertCount(t, db, tableName, 1) + keyStore.ResetXXXTestOnly() + cltest.AssertCount(t, db, tableName, 1) + require.Error(t, keyStore.Unlock("password2")) + cltest.AssertCount(t, db, tableName, 1) + }) + + t.Run("loads a saved keyRing if the password is correct", func(t *testing.T) { + defer reset() + require.NoError(t, keyStore.Unlock(cltest.Password)) + require.NoError(t, keyStore.ExportedSave()) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + }) +} diff --git a/core/services/keystore/mocks/cosmos.go b/core/services/keystore/mocks/cosmos.go new file mode 100644 index 00000000..c5b763d8 --- /dev/null +++ b/core/services/keystore/mocks/cosmos.go @@ -0,0 +1,236 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + cosmoskey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + + mock "github.com/stretchr/testify/mock" +) + +// Cosmos is an autogenerated mock type for the Cosmos type +type Cosmos struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *Cosmos) Add(key cosmoskey.Key) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(cosmoskey.Key) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *Cosmos) Create() (cosmoskey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 cosmoskey.Key + var r1 error + if rf, ok := ret.Get(0).(func() (cosmoskey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() cosmoskey.Key); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(cosmoskey.Key) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *Cosmos) Delete(id string) (cosmoskey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 cosmoskey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (cosmoskey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) cosmoskey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(cosmoskey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *Cosmos) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *Cosmos) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *Cosmos) Get(id string) (cosmoskey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 cosmoskey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (cosmoskey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) cosmoskey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(cosmoskey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *Cosmos) GetAll() ([]cosmoskey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []cosmoskey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]cosmoskey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []cosmoskey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]cosmoskey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *Cosmos) Import(keyJSON []byte, password string) (cosmoskey.Key, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 cosmoskey.Key + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (cosmoskey.Key, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) cosmoskey.Key); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(cosmoskey.Key) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewCosmos creates a new instance of Cosmos. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCosmos(t interface { + mock.TestingT + Cleanup(func()) +}) *Cosmos { + mock := &Cosmos{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/csa.go b/core/services/keystore/mocks/csa.go new file mode 100644 index 00000000..520b78b4 --- /dev/null +++ b/core/services/keystore/mocks/csa.go @@ -0,0 +1,236 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + csakey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + + mock "github.com/stretchr/testify/mock" +) + +// CSA is an autogenerated mock type for the CSA type +type CSA struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *CSA) Add(key csakey.KeyV2) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(csakey.KeyV2) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *CSA) Create() (csakey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 csakey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() (csakey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() csakey.KeyV2); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(csakey.KeyV2) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *CSA) Delete(id string) (csakey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 csakey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (csakey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) csakey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(csakey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *CSA) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *CSA) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *CSA) Get(id string) (csakey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 csakey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (csakey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) csakey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(csakey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *CSA) GetAll() ([]csakey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []csakey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() ([]csakey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []csakey.KeyV2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]csakey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *CSA) Import(keyJSON []byte, password string) (csakey.KeyV2, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 csakey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (csakey.KeyV2, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) csakey.KeyV2); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(csakey.KeyV2) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewCSA creates a new instance of CSA. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCSA(t interface { + mock.TestingT + Cleanup(func()) +}) *CSA { + mock := &CSA{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/dkg_encrypt.go b/core/services/keystore/mocks/dkg_encrypt.go new file mode 100644 index 00000000..343fab40 --- /dev/null +++ b/core/services/keystore/mocks/dkg_encrypt.go @@ -0,0 +1,236 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + dkgencryptkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + + mock "github.com/stretchr/testify/mock" +) + +// DKGEncrypt is an autogenerated mock type for the DKGEncrypt type +type DKGEncrypt struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *DKGEncrypt) Add(key dkgencryptkey.Key) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(dkgencryptkey.Key) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *DKGEncrypt) Create() (dkgencryptkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 dkgencryptkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() (dkgencryptkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() dkgencryptkey.Key); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(dkgencryptkey.Key) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *DKGEncrypt) Delete(id string) (dkgencryptkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 dkgencryptkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (dkgencryptkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) dkgencryptkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(dkgencryptkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *DKGEncrypt) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *DKGEncrypt) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *DKGEncrypt) Get(id string) (dkgencryptkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 dkgencryptkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (dkgencryptkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) dkgencryptkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(dkgencryptkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *DKGEncrypt) GetAll() ([]dkgencryptkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []dkgencryptkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]dkgencryptkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []dkgencryptkey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]dkgencryptkey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *DKGEncrypt) Import(keyJSON []byte, password string) (dkgencryptkey.Key, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 dkgencryptkey.Key + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (dkgencryptkey.Key, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) dkgencryptkey.Key); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(dkgencryptkey.Key) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDKGEncrypt creates a new instance of DKGEncrypt. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGEncrypt(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGEncrypt { + mock := &DKGEncrypt{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/dkg_sign.go b/core/services/keystore/mocks/dkg_sign.go new file mode 100644 index 00000000..f8d10bd6 --- /dev/null +++ b/core/services/keystore/mocks/dkg_sign.go @@ -0,0 +1,236 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + dkgsignkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + + mock "github.com/stretchr/testify/mock" +) + +// DKGSign is an autogenerated mock type for the DKGSign type +type DKGSign struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *DKGSign) Add(key dkgsignkey.Key) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(dkgsignkey.Key) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *DKGSign) Create() (dkgsignkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 dkgsignkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() (dkgsignkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() dkgsignkey.Key); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(dkgsignkey.Key) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *DKGSign) Delete(id string) (dkgsignkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 dkgsignkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (dkgsignkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) dkgsignkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(dkgsignkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *DKGSign) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *DKGSign) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *DKGSign) Get(id string) (dkgsignkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 dkgsignkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (dkgsignkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) dkgsignkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(dkgsignkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *DKGSign) GetAll() ([]dkgsignkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []dkgsignkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]dkgsignkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []dkgsignkey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]dkgsignkey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *DKGSign) Import(keyJSON []byte, password string) (dkgsignkey.Key, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 dkgsignkey.Key + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (dkgsignkey.Key, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) dkgsignkey.Key); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(dkgsignkey.Key) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDKGSign creates a new instance of DKGSign. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGSign(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGSign { + mock := &DKGSign{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/eth.go b/core/services/keystore/mocks/eth.go new file mode 100644 index 00000000..83a23812 --- /dev/null +++ b/core/services/keystore/mocks/eth.go @@ -0,0 +1,622 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + ethkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Eth is an autogenerated mock type for the Eth type +type Eth struct { + mock.Mock +} + +// Add provides a mock function with given fields: address, chainID, qopts +func (_m *Eth) Add(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, chainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, *big.Int, ...pg.QOpt) error); ok { + r0 = rf(address, chainID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CheckEnabled provides a mock function with given fields: address, chainID +func (_m *Eth) CheckEnabled(address common.Address, chainID *big.Int) error { + ret := _m.Called(address, chainID) + + if len(ret) == 0 { + panic("no return value specified for CheckEnabled") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, *big.Int) error); ok { + r0 = rf(address, chainID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: chainIDs +func (_m *Eth) Create(chainIDs ...*big.Int) (ethkey.KeyV2, error) { + _va := make([]interface{}, len(chainIDs)) + for _i := range chainIDs { + _va[_i] = chainIDs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(...*big.Int) (ethkey.KeyV2, error)); ok { + return rf(chainIDs...) + } + if rf, ok := ret.Get(0).(func(...*big.Int) ethkey.KeyV2); ok { + r0 = rf(chainIDs...) + } else { + r0 = ret.Get(0).(ethkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(...*big.Int) error); ok { + r1 = rf(chainIDs...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *Eth) Delete(id string) (ethkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (ethkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) ethkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(ethkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Disable provides a mock function with given fields: address, chainID, qopts +func (_m *Eth) Disable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, chainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Disable") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, *big.Int, ...pg.QOpt) error); ok { + r0 = rf(address, chainID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Enable provides a mock function with given fields: address, chainID, qopts +func (_m *Eth) Enable(address common.Address, chainID *big.Int, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, chainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Enable") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Address, *big.Int, ...pg.QOpt) error); ok { + r0 = rf(address, chainID, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnabledAddressesForChain provides a mock function with given fields: chainID +func (_m *Eth) EnabledAddressesForChain(chainID *big.Int) ([]common.Address, error) { + ret := _m.Called(chainID) + + if len(ret) == 0 { + panic("no return value specified for EnabledAddressesForChain") + } + + var r0 []common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) ([]common.Address, error)); ok { + return rf(chainID) + } + if rf, ok := ret.Get(0).(func(*big.Int) []common.Address); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnabledKeysForChain provides a mock function with given fields: chainID +func (_m *Eth) EnabledKeysForChain(chainID *big.Int) ([]ethkey.KeyV2, error) { + ret := _m.Called(chainID) + + if len(ret) == 0 { + panic("no return value specified for EnabledKeysForChain") + } + + var r0 []ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) ([]ethkey.KeyV2, error)); ok { + return rf(chainID) + } + if rf, ok := ret.Get(0).(func(*big.Int) []ethkey.KeyV2); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKeys provides a mock function with given fields: chainIDs +func (_m *Eth) EnsureKeys(chainIDs ...*big.Int) error { + _va := make([]interface{}, len(chainIDs)) + for _i := range chainIDs { + _va[_i] = chainIDs[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for EnsureKeys") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...*big.Int) error); ok { + r0 = rf(chainIDs...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *Eth) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *Eth) Get(id string) (ethkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (ethkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) ethkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(ethkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *Eth) GetAll() ([]ethkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() ([]ethkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []ethkey.KeyV2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRoundRobinAddress provides a mock function with given fields: chainID, addresses +func (_m *Eth) GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (common.Address, error) { + _va := make([]interface{}, len(addresses)) + for _i := range addresses { + _va[_i] = addresses[_i] + } + var _ca []interface{} + _ca = append(_ca, chainID) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetRoundRobinAddress") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) (common.Address, error)); ok { + return rf(chainID, addresses...) + } + if rf, ok := ret.Get(0).(func(*big.Int, ...common.Address) common.Address); ok { + r0 = rf(chainID, addresses...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, ...common.Address) error); ok { + r1 = rf(chainID, addresses...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetState provides a mock function with given fields: id, chainID +func (_m *Eth) GetState(id string, chainID *big.Int) (ethkey.State, error) { + ret := _m.Called(id, chainID) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 ethkey.State + var r1 error + if rf, ok := ret.Get(0).(func(string, *big.Int) (ethkey.State, error)); ok { + return rf(id, chainID) + } + if rf, ok := ret.Get(0).(func(string, *big.Int) ethkey.State); ok { + r0 = rf(id, chainID) + } else { + r0 = ret.Get(0).(ethkey.State) + } + + if rf, ok := ret.Get(1).(func(string, *big.Int) error); ok { + r1 = rf(id, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStateForKey provides a mock function with given fields: _a0 +func (_m *Eth) GetStateForKey(_a0 ethkey.KeyV2) (ethkey.State, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStateForKey") + } + + var r0 ethkey.State + var r1 error + if rf, ok := ret.Get(0).(func(ethkey.KeyV2) (ethkey.State, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(ethkey.KeyV2) ethkey.State); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(ethkey.State) + } + + if rf, ok := ret.Get(1).(func(ethkey.KeyV2) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStatesForChain provides a mock function with given fields: chainID +func (_m *Eth) GetStatesForChain(chainID *big.Int) ([]ethkey.State, error) { + ret := _m.Called(chainID) + + if len(ret) == 0 { + panic("no return value specified for GetStatesForChain") + } + + var r0 []ethkey.State + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) ([]ethkey.State, error)); ok { + return rf(chainID) + } + if rf, ok := ret.Get(0).(func(*big.Int) []ethkey.State); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethkey.State) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStatesForKeys provides a mock function with given fields: _a0 +func (_m *Eth) GetStatesForKeys(_a0 []ethkey.KeyV2) ([]ethkey.State, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStatesForKeys") + } + + var r0 []ethkey.State + var r1 error + if rf, ok := ret.Get(0).(func([]ethkey.KeyV2) ([]ethkey.State, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func([]ethkey.KeyV2) []ethkey.State); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethkey.State) + } + } + + if rf, ok := ret.Get(1).(func([]ethkey.KeyV2) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password, chainIDs +func (_m *Eth) Import(keyJSON []byte, password string, chainIDs ...*big.Int) (ethkey.KeyV2, error) { + _va := make([]interface{}, len(chainIDs)) + for _i := range chainIDs { + _va[_i] = chainIDs[_i] + } + var _ca []interface{} + _ca = append(_ca, keyJSON, password) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 ethkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string, ...*big.Int) (ethkey.KeyV2, error)); ok { + return rf(keyJSON, password, chainIDs...) + } + if rf, ok := ret.Get(0).(func([]byte, string, ...*big.Int) ethkey.KeyV2); ok { + r0 = rf(keyJSON, password, chainIDs...) + } else { + r0 = ret.Get(0).(ethkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func([]byte, string, ...*big.Int) error); ok { + r1 = rf(keyJSON, password, chainIDs...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignTx provides a mock function with given fields: fromAddress, tx, chainID +func (_m *Eth) SignTx(fromAddress common.Address, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + ret := _m.Called(fromAddress, tx, chainID) + + if len(ret) == 0 { + panic("no return value specified for SignTx") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(common.Address, *types.Transaction, *big.Int) (*types.Transaction, error)); ok { + return rf(fromAddress, tx, chainID) + } + if rf, ok := ret.Get(0).(func(common.Address, *types.Transaction, *big.Int) *types.Transaction); ok { + r0 = rf(fromAddress, tx, chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(common.Address, *types.Transaction, *big.Int) error); ok { + r1 = rf(fromAddress, tx, chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeToKeyChanges provides a mock function with given fields: +func (_m *Eth) SubscribeToKeyChanges() (chan struct{}, func()) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubscribeToKeyChanges") + } + + var r0 chan struct{} + var r1 func() + if rf, ok := ret.Get(0).(func() (chan struct{}, func())); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(chan struct{}) + } + } + + if rf, ok := ret.Get(1).(func() func()); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(func()) + } + } + + return r0, r1 +} + +// XXXTestingOnlyAdd provides a mock function with given fields: key +func (_m *Eth) XXXTestingOnlyAdd(key ethkey.KeyV2) { + _m.Called(key) +} + +// XXXTestingOnlySetState provides a mock function with given fields: _a0 +func (_m *Eth) XXXTestingOnlySetState(_a0 ethkey.State) { + _m.Called(_a0) +} + +// NewEth creates a new instance of Eth. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEth(t interface { + mock.TestingT + Cleanup(func()) +}) *Eth { + mock := &Eth{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/master.go b/core/services/keystore/mocks/master.go new file mode 100644 index 00000000..2a44c523 --- /dev/null +++ b/core/services/keystore/mocks/master.go @@ -0,0 +1,293 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + keystore "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + mock "github.com/stretchr/testify/mock" +) + +// Master is an autogenerated mock type for the Master type +type Master struct { + mock.Mock +} + +// CSA provides a mock function with given fields: +func (_m *Master) CSA() keystore.CSA { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CSA") + } + + var r0 keystore.CSA + if rf, ok := ret.Get(0).(func() keystore.CSA); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.CSA) + } + } + + return r0 +} + +// Cosmos provides a mock function with given fields: +func (_m *Master) Cosmos() keystore.Cosmos { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Cosmos") + } + + var r0 keystore.Cosmos + if rf, ok := ret.Get(0).(func() keystore.Cosmos); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.Cosmos) + } + } + + return r0 +} + +// DKGEncrypt provides a mock function with given fields: +func (_m *Master) DKGEncrypt() keystore.DKGEncrypt { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKGEncrypt") + } + + var r0 keystore.DKGEncrypt + if rf, ok := ret.Get(0).(func() keystore.DKGEncrypt); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.DKGEncrypt) + } + } + + return r0 +} + +// DKGSign provides a mock function with given fields: +func (_m *Master) DKGSign() keystore.DKGSign { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKGSign") + } + + var r0 keystore.DKGSign + if rf, ok := ret.Get(0).(func() keystore.DKGSign); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.DKGSign) + } + } + + return r0 +} + +// Eth provides a mock function with given fields: +func (_m *Master) Eth() keystore.Eth { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Eth") + } + + var r0 keystore.Eth + if rf, ok := ret.Get(0).(func() keystore.Eth); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.Eth) + } + } + + return r0 +} + +// IsEmpty provides a mock function with given fields: +func (_m *Master) IsEmpty() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsEmpty") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OCR provides a mock function with given fields: +func (_m *Master) OCR() keystore.OCR { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR") + } + + var r0 keystore.OCR + if rf, ok := ret.Get(0).(func() keystore.OCR); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.OCR) + } + } + + return r0 +} + +// OCR2 provides a mock function with given fields: +func (_m *Master) OCR2() keystore.OCR2 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OCR2") + } + + var r0 keystore.OCR2 + if rf, ok := ret.Get(0).(func() keystore.OCR2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.OCR2) + } + } + + return r0 +} + +// P2P provides a mock function with given fields: +func (_m *Master) P2P() keystore.P2P { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for P2P") + } + + var r0 keystore.P2P + if rf, ok := ret.Get(0).(func() keystore.P2P); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.P2P) + } + } + + return r0 +} + +// Solana provides a mock function with given fields: +func (_m *Master) Solana() keystore.Solana { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Solana") + } + + var r0 keystore.Solana + if rf, ok := ret.Get(0).(func() keystore.Solana); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.Solana) + } + } + + return r0 +} + +// StarkNet provides a mock function with given fields: +func (_m *Master) StarkNet() keystore.StarkNet { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for StarkNet") + } + + var r0 keystore.StarkNet + if rf, ok := ret.Get(0).(func() keystore.StarkNet); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.StarkNet) + } + } + + return r0 +} + +// Unlock provides a mock function with given fields: password +func (_m *Master) Unlock(password string) error { + ret := _m.Called(password) + + if len(ret) == 0 { + panic("no return value specified for Unlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(password) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VRF provides a mock function with given fields: +func (_m *Master) VRF() keystore.VRF { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VRF") + } + + var r0 keystore.VRF + if rf, ok := ret.Get(0).(func() keystore.VRF); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(keystore.VRF) + } + } + + return r0 +} + +// NewMaster creates a new instance of Master. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMaster(t interface { + mock.TestingT + Cleanup(func()) +}) *Master { + mock := &Master{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/ocr.go b/core/services/keystore/mocks/ocr.go new file mode 100644 index 00000000..f5db6d28 --- /dev/null +++ b/core/services/keystore/mocks/ocr.go @@ -0,0 +1,235 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + ocrkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + mock "github.com/stretchr/testify/mock" +) + +// OCR is an autogenerated mock type for the OCR type +type OCR struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *OCR) Add(key ocrkey.KeyV2) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(ocrkey.KeyV2) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *OCR) Create() (ocrkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 ocrkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() (ocrkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ocrkey.KeyV2); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ocrkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *OCR) Delete(id string) (ocrkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 ocrkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (ocrkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) ocrkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(ocrkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *OCR) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *OCR) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *OCR) Get(id string) (ocrkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 ocrkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (ocrkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) ocrkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(ocrkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *OCR) GetAll() ([]ocrkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []ocrkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() ([]ocrkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []ocrkey.KeyV2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ocrkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *OCR) Import(keyJSON []byte, password string) (ocrkey.KeyV2, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 ocrkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (ocrkey.KeyV2, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) ocrkey.KeyV2); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(ocrkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewOCR creates a new instance of OCR. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOCR(t interface { + mock.TestingT + Cleanup(func()) +}) *OCR { + mock := &OCR{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/ocr2.go b/core/services/keystore/mocks/ocr2.go new file mode 100644 index 00000000..6694e7cc --- /dev/null +++ b/core/services/keystore/mocks/ocr2.go @@ -0,0 +1,270 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + chaintype "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + + mock "github.com/stretchr/testify/mock" + + ocr2key "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" +) + +// OCR2 is an autogenerated mock type for the OCR2 type +type OCR2 struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *OCR2) Add(key ocr2key.KeyBundle) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(ocr2key.KeyBundle) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: _a0 +func (_m *OCR2) Create(_a0 chaintype.ChainType) (ocr2key.KeyBundle, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 ocr2key.KeyBundle + var r1 error + if rf, ok := ret.Get(0).(func(chaintype.ChainType) (ocr2key.KeyBundle, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(chaintype.ChainType) ocr2key.KeyBundle); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ocr2key.KeyBundle) + } + } + + if rf, ok := ret.Get(1).(func(chaintype.ChainType) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *OCR2) Delete(id string) error { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnsureKeys provides a mock function with given fields: enabledChains +func (_m *OCR2) EnsureKeys(enabledChains ...chaintype.ChainType) error { + _va := make([]interface{}, len(enabledChains)) + for _i := range enabledChains { + _va[_i] = enabledChains[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for EnsureKeys") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...chaintype.ChainType) error); ok { + r0 = rf(enabledChains...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *OCR2) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *OCR2) Get(id string) (ocr2key.KeyBundle, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 ocr2key.KeyBundle + var r1 error + if rf, ok := ret.Get(0).(func(string) (ocr2key.KeyBundle, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) ocr2key.KeyBundle); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ocr2key.KeyBundle) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *OCR2) GetAll() ([]ocr2key.KeyBundle, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []ocr2key.KeyBundle + var r1 error + if rf, ok := ret.Get(0).(func() ([]ocr2key.KeyBundle, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []ocr2key.KeyBundle); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ocr2key.KeyBundle) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAllOfType provides a mock function with given fields: _a0 +func (_m *OCR2) GetAllOfType(_a0 chaintype.ChainType) ([]ocr2key.KeyBundle, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetAllOfType") + } + + var r0 []ocr2key.KeyBundle + var r1 error + if rf, ok := ret.Get(0).(func(chaintype.ChainType) ([]ocr2key.KeyBundle, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(chaintype.ChainType) []ocr2key.KeyBundle); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ocr2key.KeyBundle) + } + } + + if rf, ok := ret.Get(1).(func(chaintype.ChainType) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *OCR2) Import(keyJSON []byte, password string) (ocr2key.KeyBundle, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 ocr2key.KeyBundle + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (ocr2key.KeyBundle, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) ocr2key.KeyBundle); ok { + r0 = rf(keyJSON, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ocr2key.KeyBundle) + } + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewOCR2 creates a new instance of OCR2. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOCR2(t interface { + mock.TestingT + Cleanup(func()) +}) *OCR2 { + mock := &OCR2{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/p2p.go b/core/services/keystore/mocks/p2p.go new file mode 100644 index 00000000..44f62a12 --- /dev/null +++ b/core/services/keystore/mocks/p2p.go @@ -0,0 +1,263 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + p2pkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + mock "github.com/stretchr/testify/mock" +) + +// P2P is an autogenerated mock type for the P2P type +type P2P struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *P2P) Add(key p2pkey.KeyV2) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(p2pkey.KeyV2) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *P2P) Create() (p2pkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() (p2pkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() p2pkey.KeyV2); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(p2pkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *P2P) Delete(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) (p2pkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) p2pkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(p2pkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(p2pkey.PeerID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *P2P) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *P2P) Export(id p2pkey.PeerID, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(p2pkey.PeerID, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(p2pkey.PeerID, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(p2pkey.PeerID, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *P2P) Get(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) (p2pkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) p2pkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(p2pkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(p2pkey.PeerID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *P2P) GetAll() ([]p2pkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() ([]p2pkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []p2pkey.KeyV2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]p2pkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOrFirst provides a mock function with given fields: id +func (_m *P2P) GetOrFirst(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for GetOrFirst") + } + + var r0 p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) (p2pkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(p2pkey.PeerID) p2pkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(p2pkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(p2pkey.PeerID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *P2P) Import(keyJSON []byte, password string) (p2pkey.KeyV2, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 p2pkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (p2pkey.KeyV2, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) p2pkey.KeyV2); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(p2pkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewP2P creates a new instance of P2P. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewP2P(t interface { + mock.TestingT + Cleanup(func()) +}) *P2P { + mock := &P2P{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/solana.go b/core/services/keystore/mocks/solana.go new file mode 100644 index 00000000..5482853f --- /dev/null +++ b/core/services/keystore/mocks/solana.go @@ -0,0 +1,268 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + solkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +// Solana is an autogenerated mock type for the Solana type +type Solana struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *Solana) Add(key solkey.Key) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(solkey.Key) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *Solana) Create() (solkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 solkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() (solkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() solkey.Key); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(solkey.Key) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *Solana) Delete(id string) (solkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 solkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (solkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) solkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(solkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *Solana) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *Solana) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *Solana) Get(id string) (solkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 solkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (solkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) solkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(solkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *Solana) GetAll() ([]solkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []solkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]solkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []solkey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]solkey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *Solana) Import(keyJSON []byte, password string) (solkey.Key, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 solkey.Key + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (solkey.Key, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) solkey.Key); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(solkey.Key) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Sign provides a mock function with given fields: ctx, id, msg +func (_m *Solana) Sign(ctx context.Context, id string, msg []byte) ([]byte, error) { + ret := _m.Called(ctx, id, msg) + + if len(ret) == 0 { + panic("no return value specified for Sign") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []byte) ([]byte, error)); ok { + return rf(ctx, id, msg) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []byte) []byte); ok { + r0 = rf(ctx, id, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []byte) error); ok { + r1 = rf(ctx, id, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSolana creates a new instance of Solana. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSolana(t interface { + mock.TestingT + Cleanup(func()) +}) *Solana { + mock := &Solana{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/starknet.go b/core/services/keystore/mocks/starknet.go new file mode 100644 index 00000000..447acab0 --- /dev/null +++ b/core/services/keystore/mocks/starknet.go @@ -0,0 +1,235 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + starkkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + mock "github.com/stretchr/testify/mock" +) + +// StarkNet is an autogenerated mock type for the StarkNet type +type StarkNet struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *StarkNet) Add(key starkkey.Key) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(starkkey.Key) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *StarkNet) Create() (starkkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 starkkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() (starkkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() starkkey.Key); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(starkkey.Key) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *StarkNet) Delete(id string) (starkkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 starkkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (starkkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) starkkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(starkkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EnsureKey provides a mock function with given fields: +func (_m *StarkNet) EnsureKey() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnsureKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Export provides a mock function with given fields: id, password +func (_m *StarkNet) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *StarkNet) Get(id string) (starkkey.Key, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 starkkey.Key + var r1 error + if rf, ok := ret.Get(0).(func(string) (starkkey.Key, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) starkkey.Key); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(starkkey.Key) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *StarkNet) GetAll() ([]starkkey.Key, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []starkkey.Key + var r1 error + if rf, ok := ret.Get(0).(func() ([]starkkey.Key, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []starkkey.Key); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]starkkey.Key) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *StarkNet) Import(keyJSON []byte, password string) (starkkey.Key, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 starkkey.Key + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (starkkey.Key, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) starkkey.Key); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(starkkey.Key) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStarkNet creates a new instance of StarkNet. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStarkNet(t interface { + mock.TestingT + Cleanup(func()) +}) *StarkNet { + mock := &StarkNet{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/mocks/vrf.go b/core/services/keystore/mocks/vrf.go new file mode 100644 index 00000000..c6434e92 --- /dev/null +++ b/core/services/keystore/mocks/vrf.go @@ -0,0 +1,248 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" + + vrfkey "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +// VRF is an autogenerated mock type for the VRF type +type VRF struct { + mock.Mock +} + +// Add provides a mock function with given fields: key +func (_m *VRF) Add(key vrfkey.KeyV2) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 error + if rf, ok := ret.Get(0).(func(vrfkey.KeyV2) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Create provides a mock function with given fields: +func (_m *VRF) Create() (vrfkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 vrfkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() (vrfkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() vrfkey.KeyV2); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(vrfkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Delete provides a mock function with given fields: id +func (_m *VRF) Delete(id string) (vrfkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 vrfkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (vrfkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) vrfkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(vrfkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Export provides a mock function with given fields: id, password +func (_m *VRF) Export(id string, password string) ([]byte, error) { + ret := _m.Called(id, password) + + if len(ret) == 0 { + panic("no return value specified for Export") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(string, string) ([]byte, error)); ok { + return rf(id, password) + } + if rf, ok := ret.Get(0).(func(string, string) []byte); ok { + r0 = rf(id, password) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(id, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GenerateProof provides a mock function with given fields: id, seed +func (_m *VRF) GenerateProof(id string, seed *big.Int) (vrfkey.Proof, error) { + ret := _m.Called(id, seed) + + if len(ret) == 0 { + panic("no return value specified for GenerateProof") + } + + var r0 vrfkey.Proof + var r1 error + if rf, ok := ret.Get(0).(func(string, *big.Int) (vrfkey.Proof, error)); ok { + return rf(id, seed) + } + if rf, ok := ret.Get(0).(func(string, *big.Int) vrfkey.Proof); ok { + r0 = rf(id, seed) + } else { + r0 = ret.Get(0).(vrfkey.Proof) + } + + if rf, ok := ret.Get(1).(func(string, *big.Int) error); ok { + r1 = rf(id, seed) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: id +func (_m *VRF) Get(id string) (vrfkey.KeyV2, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 vrfkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func(string) (vrfkey.KeyV2, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(string) vrfkey.KeyV2); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(vrfkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAll provides a mock function with given fields: +func (_m *VRF) GetAll() ([]vrfkey.KeyV2, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAll") + } + + var r0 []vrfkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func() ([]vrfkey.KeyV2, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []vrfkey.KeyV2); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]vrfkey.KeyV2) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Import provides a mock function with given fields: keyJSON, password +func (_m *VRF) Import(keyJSON []byte, password string) (vrfkey.KeyV2, error) { + ret := _m.Called(keyJSON, password) + + if len(ret) == 0 { + panic("no return value specified for Import") + } + + var r0 vrfkey.KeyV2 + var r1 error + if rf, ok := ret.Get(0).(func([]byte, string) (vrfkey.KeyV2, error)); ok { + return rf(keyJSON, password) + } + if rf, ok := ret.Get(0).(func([]byte, string) vrfkey.KeyV2); ok { + r0 = rf(keyJSON, password) + } else { + r0 = ret.Get(0).(vrfkey.KeyV2) + } + + if rf, ok := ret.Get(1).(func([]byte, string) error); ok { + r1 = rf(keyJSON, password) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVRF creates a new instance of VRF. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVRF(t interface { + mock.TestingT + Cleanup(func()) +}) *VRF { + mock := &VRF{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/keystore/models.go b/core/services/keystore/models.go new file mode 100644 index 00000000..d234dd04 --- /dev/null +++ b/core/services/keystore/models.go @@ -0,0 +1,405 @@ +package keystore + +import ( + "encoding/json" + "fmt" + "math/big" + "time" + + gethkeystore "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type encryptedKeyRing struct { + UpdatedAt time.Time + EncryptedKeys []byte +} + +func (ekr encryptedKeyRing) Decrypt(password string) (*keyRing, error) { + if len(ekr.EncryptedKeys) == 0 { + return newKeyRing(), nil + } + var cryptoJSON gethkeystore.CryptoJSON + err := json.Unmarshal(ekr.EncryptedKeys, &cryptoJSON) + if err != nil { + return nil, err + } + marshalledRawKeyRingJson, err := gethkeystore.DecryptDataV3(cryptoJSON, adulteratedPassword(password)) + if err != nil { + return nil, err + } + var rawKeys rawKeyRing + err = json.Unmarshal(marshalledRawKeyRingJson, &rawKeys) + if err != nil { + return nil, err + } + ring, err := rawKeys.keys() + if err != nil { + return nil, err + } + + err = rawKeys.LegacyKeys.StoreUnsupported(marshalledRawKeyRingJson, ring) + if err != nil { + return nil, err + } + ring.LegacyKeys = rawKeys.LegacyKeys + + return ring, nil +} + +type keyStates struct { + // Key ID => chain ID => state + KeyIDChainID map[string]map[string]*ethkey.State + // Chain ID => Key ID => state + ChainIDKeyID map[string]map[string]*ethkey.State + All []*ethkey.State +} + +func newKeyStates() *keyStates { + return &keyStates{ + KeyIDChainID: make(map[string]map[string]*ethkey.State), + ChainIDKeyID: make(map[string]map[string]*ethkey.State), + } +} + +// warning: not thread-safe! caller must sync +// adds or replaces a state +func (ks *keyStates) add(state *ethkey.State) { + cid := state.EVMChainID.String() + kid := state.KeyID() + + keyStates, exists := ks.KeyIDChainID[kid] + if !exists { + keyStates = make(map[string]*ethkey.State) + ks.KeyIDChainID[kid] = keyStates + } + keyStates[cid] = state + + chainStates, exists := ks.ChainIDKeyID[cid] + if !exists { + chainStates = make(map[string]*ethkey.State) + ks.ChainIDKeyID[cid] = chainStates + } + chainStates[kid] = state + + exists = false + for i, existingState := range ks.All { + if existingState.ID == state.ID { + ks.All[i] = state + exists = true + break + } + } + if !exists { + ks.All = append(ks.All, state) + } +} + +// warning: not thread-safe! caller must sync +func (ks *keyStates) get(addr common.Address, chainID *big.Int) *ethkey.State { + chainStates, exists := ks.KeyIDChainID[addr.Hex()] + if !exists { + return nil + } + return chainStates[chainID.String()] +} + +// warning: not thread-safe! caller must sync +func (ks *keyStates) disable(addr common.Address, chainID *big.Int, updatedAt time.Time) { + state := ks.get(addr, chainID) + state.Disabled = true + state.UpdatedAt = updatedAt +} + +// warning: not thread-safe! caller must sync +func (ks *keyStates) enable(addr common.Address, chainID *big.Int, updatedAt time.Time) { + state := ks.get(addr, chainID) + state.Disabled = false + state.UpdatedAt = updatedAt +} + +// warning: not thread-safe! caller must sync +func (ks *keyStates) delete(addr common.Address) { + var chainIDs []*big.Int + for i := len(ks.All) - 1; i >= 0; i-- { + if ks.All[i].Address.Address() == addr { + chainIDs = append(chainIDs, ks.All[i].EVMChainID.ToInt()) + ks.All = append(ks.All[:i], ks.All[i+1:]...) + } + } + for _, cid := range chainIDs { + delete(ks.KeyIDChainID[addr.Hex()], cid.String()) + delete(ks.ChainIDKeyID[cid.String()], addr.Hex()) + } +} + +type keyRing struct { + CSA map[string]csakey.KeyV2 + Eth map[string]ethkey.KeyV2 + OCR map[string]ocrkey.KeyV2 + OCR2 map[string]ocr2key.KeyBundle + P2P map[string]p2pkey.KeyV2 + Cosmos map[string]cosmoskey.Key + Solana map[string]solkey.Key + StarkNet map[string]starkkey.Key + VRF map[string]vrfkey.KeyV2 + DKGSign map[string]dkgsignkey.Key + DKGEncrypt map[string]dkgencryptkey.Key + LegacyKeys LegacyKeyStorage +} + +func newKeyRing() *keyRing { + return &keyRing{ + CSA: make(map[string]csakey.KeyV2), + Eth: make(map[string]ethkey.KeyV2), + OCR: make(map[string]ocrkey.KeyV2), + OCR2: make(map[string]ocr2key.KeyBundle), + P2P: make(map[string]p2pkey.KeyV2), + Cosmos: make(map[string]cosmoskey.Key), + Solana: make(map[string]solkey.Key), + StarkNet: make(map[string]starkkey.Key), + VRF: make(map[string]vrfkey.KeyV2), + DKGSign: make(map[string]dkgsignkey.Key), + DKGEncrypt: make(map[string]dkgencryptkey.Key), + } +} + +func (kr *keyRing) Encrypt(password string, scryptParams utils.ScryptParams) (ekr encryptedKeyRing, err error) { + marshalledRawKeyRingJson, err := json.Marshal(kr.raw()) + if err != nil { + return ekr, err + } + + marshalledRawKeyRingJson, err = kr.LegacyKeys.UnloadUnsupported(marshalledRawKeyRingJson) + if err != nil { + return encryptedKeyRing{}, err + } + + cryptoJSON, err := gethkeystore.EncryptDataV3( + marshalledRawKeyRingJson, + []byte(adulteratedPassword(password)), + scryptParams.N, + scryptParams.P, + ) + if err != nil { + return ekr, errors.Wrapf(err, "could not encrypt key ring") + } + encryptedKeys, err := json.Marshal(&cryptoJSON) + if err != nil { + return ekr, errors.Wrapf(err, "could not encode cryptoJSON") + } + return encryptedKeyRing{ + EncryptedKeys: encryptedKeys, + }, nil +} + +func (kr *keyRing) raw() (rawKeys rawKeyRing) { + for _, csaKey := range kr.CSA { + rawKeys.CSA = append(rawKeys.CSA, csaKey.Raw()) + } + for _, ethKey := range kr.Eth { + rawKeys.Eth = append(rawKeys.Eth, ethKey.Raw()) + } + for _, ocrKey := range kr.OCR { + rawKeys.OCR = append(rawKeys.OCR, ocrKey.Raw()) + } + for _, ocr2key := range kr.OCR2 { + rawKeys.OCR2 = append(rawKeys.OCR2, ocr2key.Raw()) + } + for _, p2pKey := range kr.P2P { + rawKeys.P2P = append(rawKeys.P2P, p2pKey.Raw()) + } + for _, cosmoskey := range kr.Cosmos { + rawKeys.Cosmos = append(rawKeys.Cosmos, cosmoskey.Raw()) + } + for _, solkey := range kr.Solana { + rawKeys.Solana = append(rawKeys.Solana, solkey.Raw()) + } + for _, starkkey := range kr.StarkNet { + rawKeys.StarkNet = append(rawKeys.StarkNet, starkkey.Raw()) + } + for _, vrfKey := range kr.VRF { + rawKeys.VRF = append(rawKeys.VRF, vrfKey.Raw()) + } + for _, dkgSignKey := range kr.DKGSign { + rawKeys.DKGSign = append(rawKeys.DKGSign, dkgSignKey.Raw()) + } + for _, dkgEncryptKey := range kr.DKGEncrypt { + rawKeys.DKGEncrypt = append(rawKeys.DKGEncrypt, dkgEncryptKey.Raw()) + } + return rawKeys +} + +func (kr *keyRing) logPubKeys(lggr logger.Logger) { + lggr = lggr.Named("KeyRing") + var csaIDs []string + for _, CSAKey := range kr.CSA { + csaIDs = append(csaIDs, CSAKey.ID()) + } + var ethIDs []string + for _, ETHKey := range kr.Eth { + ethIDs = append(ethIDs, ETHKey.ID()) + } + var ocrIDs []string + for _, OCRKey := range kr.OCR { + ocrIDs = append(ocrIDs, OCRKey.ID()) + } + var ocr2IDs []string + for _, OCR2Key := range kr.OCR2 { + ocr2IDs = append(ocr2IDs, OCR2Key.ID()) + } + var p2pIDs []string + for _, P2PKey := range kr.P2P { + p2pIDs = append(p2pIDs, P2PKey.ID()) + } + var cosmosIDs []string + for _, cosmosKey := range kr.Cosmos { + cosmosIDs = append(cosmosIDs, cosmosKey.ID()) + } + var solanaIDs []string + for _, solanaKey := range kr.Solana { + solanaIDs = append(solanaIDs, solanaKey.ID()) + } + var starknetIDs []string + for _, starkkey := range kr.StarkNet { + starknetIDs = append(starknetIDs, starkkey.ID()) + } + var vrfIDs []string + for _, VRFKey := range kr.VRF { + vrfIDs = append(vrfIDs, VRFKey.ID()) + } + var dkgSignIDs []string + for _, dkgSignKey := range kr.DKGSign { + dkgSignIDs = append(dkgSignIDs, dkgSignKey.ID()) + } + var dkgEncryptIDs []string + for _, dkgEncryptKey := range kr.DKGEncrypt { + dkgEncryptIDs = append(dkgEncryptIDs, dkgEncryptKey.ID()) + } + if len(csaIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d CSA keys", len(csaIDs)), "keys", csaIDs) + } + if len(ethIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d ETH keys", len(ethIDs)), "keys", ethIDs) + } + if len(ocrIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d OCR keys", len(ocrIDs)), "keys", ocrIDs) + } + if len(ocr2IDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d OCR2 keys", len(ocr2IDs)), "keys", ocr2IDs) + } + if len(p2pIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d P2P keys", len(p2pIDs)), "keys", p2pIDs) + } + if len(cosmosIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d Cosmos keys", len(cosmosIDs)), "keys", cosmosIDs) + } + if len(solanaIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d Solana keys", len(solanaIDs)), "keys", solanaIDs) + } + if len(starknetIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d StarkNet keys", len(starknetIDs)), "keys", starknetIDs) + } + if len(vrfIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d VRF keys", len(vrfIDs)), "keys", vrfIDs) + } + if len(dkgSignIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d DKGSign keys", len(dkgSignIDs)), "keys", dkgSignIDs) + } + if len(dkgEncryptIDs) > 0 { + lggr.Infow(fmt.Sprintf("Unlocked %d DKGEncrypt keys", len(dkgEncryptIDs)), "keys", dkgEncryptIDs) + } + if len(kr.LegacyKeys.legacyRawKeys) > 0 { + lggr.Infow(fmt.Sprintf("%d keys stored in legacy system", kr.LegacyKeys.legacyRawKeys.len())) + } +} + +// rawKeyRing is an intermediate struct for encrypting / decrypting keyRing +// it holds only the essential key information to avoid adding unnecessary data +// (like public keys) to the database +type rawKeyRing struct { + Eth []ethkey.Raw + CSA []csakey.Raw + OCR []ocrkey.Raw + OCR2 []ocr2key.Raw + P2P []p2pkey.Raw + Cosmos []cosmoskey.Raw + Solana []solkey.Raw + StarkNet []starkkey.Raw + VRF []vrfkey.Raw + DKGSign []dkgsignkey.Raw + DKGEncrypt []dkgencryptkey.Raw + LegacyKeys LegacyKeyStorage `json:"-"` +} + +func (rawKeys rawKeyRing) keys() (*keyRing, error) { + keyRing := newKeyRing() + for _, rawCSAKey := range rawKeys.CSA { + csaKey := rawCSAKey.Key() + keyRing.CSA[csaKey.ID()] = csaKey + } + for _, rawETHKey := range rawKeys.Eth { + ethKey := rawETHKey.Key() + keyRing.Eth[ethKey.ID()] = ethKey + } + for _, rawOCRKey := range rawKeys.OCR { + ocrKey := rawOCRKey.Key() + keyRing.OCR[ocrKey.ID()] = ocrKey + } + for _, rawOCR2Key := range rawKeys.OCR2 { + if ocr2Key := rawOCR2Key.Key(); ocr2Key != nil { + keyRing.OCR2[ocr2Key.ID()] = ocr2Key + } + } + for _, rawP2PKey := range rawKeys.P2P { + p2pKey := rawP2PKey.Key() + keyRing.P2P[p2pKey.ID()] = p2pKey + } + for _, rawCosmosKey := range rawKeys.Cosmos { + cosmosKey := rawCosmosKey.Key() + keyRing.Cosmos[cosmosKey.ID()] = cosmosKey + } + for _, rawSolKey := range rawKeys.Solana { + solKey := rawSolKey.Key() + keyRing.Solana[solKey.ID()] = solKey + } + for _, rawStarkNetKey := range rawKeys.StarkNet { + starkKey := rawStarkNetKey.Key() + keyRing.StarkNet[starkKey.ID()] = starkKey + } + for _, rawVRFKey := range rawKeys.VRF { + vrfKey := rawVRFKey.Key() + keyRing.VRF[vrfKey.ID()] = vrfKey + } + for _, rawDKGSignKey := range rawKeys.DKGSign { + dkgSignKey := rawDKGSignKey.Key() + keyRing.DKGSign[dkgSignKey.ID()] = dkgSignKey + } + for _, rawDKGEncryptKey := range rawKeys.DKGEncrypt { + dkgEncryptKey := rawDKGEncryptKey.Key() + keyRing.DKGEncrypt[dkgEncryptKey.ID()] = dkgEncryptKey + } + + keyRing.LegacyKeys = rawKeys.LegacyKeys + return keyRing, nil +} + +// adulteration prevents the password from getting used in the wrong place +func adulteratedPassword(password string) string { + return "master-password-" + password +} diff --git a/core/services/keystore/models_test.go b/core/services/keystore/models_test.go new file mode 100644 index 00000000..14d1cc50 --- /dev/null +++ b/core/services/keystore/models_test.go @@ -0,0 +1,171 @@ +package keystore + +import ( + "crypto/rand" + "encoding/json" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const password = "password" + +func TestKeyRing_Encrypt_Decrypt(t *testing.T) { + csa1, csa2 := csakey.MustNewV2XXXTestingOnly(big.NewInt(1)), csakey.MustNewV2XXXTestingOnly(big.NewInt(2)) + eth1, eth2 := mustNewEthKey(t), mustNewEthKey(t) + ocr := []ocrkey.KeyV2{ + ocrkey.MustNewV2XXXTestingOnly(big.NewInt(1)), + ocrkey.MustNewV2XXXTestingOnly(big.NewInt(2)), + } + var ocr2 []ocr2key.KeyBundle + var ocr2_raw []ocr2key.Raw + for _, chain := range chaintype.SupportedChainTypes { + key := ocr2key.MustNewInsecure(rand.Reader, chain) + ocr2 = append(ocr2, key) + ocr2_raw = append(ocr2_raw, key.Raw()) + } + p2p1, p2p2 := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(1)), p2pkey.MustNewV2XXXTestingOnly(big.NewInt(2)) + sol1, sol2 := solkey.MustNewInsecure(rand.Reader), solkey.MustNewInsecure(rand.Reader) + vrf1, vrf2 := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)), vrfkey.MustNewV2XXXTestingOnly(big.NewInt(2)) + tk1, tk2 := cosmoskey.MustNewInsecure(rand.Reader), cosmoskey.MustNewInsecure(rand.Reader) + dkgsign1, dkgsign2 := dkgsignkey.MustNewXXXTestingOnly(big.NewInt(1)), dkgsignkey.MustNewXXXTestingOnly(big.NewInt(2)) + dkgencrypt1, dkgencrypt2 := dkgencryptkey.MustNewXXXTestingOnly(big.NewInt(1)), dkgencryptkey.MustNewXXXTestingOnly(big.NewInt(2)) + originalKeyRingRaw := rawKeyRing{ + CSA: []csakey.Raw{csa1.Raw(), csa2.Raw()}, + Eth: []ethkey.Raw{eth1.Raw(), eth2.Raw()}, + OCR: []ocrkey.Raw{ocr[0].Raw(), ocr[1].Raw()}, + OCR2: ocr2_raw, + P2P: []p2pkey.Raw{p2p1.Raw(), p2p2.Raw()}, + Solana: []solkey.Raw{sol1.Raw(), sol2.Raw()}, + VRF: []vrfkey.Raw{vrf1.Raw(), vrf2.Raw()}, + Cosmos: []cosmoskey.Raw{tk1.Raw(), tk2.Raw()}, + DKGSign: []dkgsignkey.Raw{dkgsign1.Raw(), dkgsign2.Raw()}, + DKGEncrypt: []dkgencryptkey.Raw{dkgencrypt1.Raw(), dkgencrypt2.Raw()}, + } + originalKeyRing, kerr := originalKeyRingRaw.keys() + require.NoError(t, kerr) + + t.Run("test encrypt/decrypt", func(t *testing.T) { + encryptedKr, err := originalKeyRing.Encrypt(password, utils.FastScryptParams) + require.NoError(t, err) + decryptedKeyRing, err := encryptedKr.Decrypt(password) + require.NoError(t, err) + // compare cosmos keys + require.Equal(t, 2, len(decryptedKeyRing.Cosmos)) + require.Equal(t, originalKeyRing.Cosmos[tk1.ID()].PublicKey(), decryptedKeyRing.Cosmos[tk1.ID()].PublicKey()) + require.Equal(t, originalKeyRing.Cosmos[tk2.ID()].PublicKey(), decryptedKeyRing.Cosmos[tk2.ID()].PublicKey()) + // compare csa keys + require.Equal(t, 2, len(decryptedKeyRing.CSA)) + require.Equal(t, originalKeyRing.CSA[csa1.ID()].PublicKey, decryptedKeyRing.CSA[csa1.ID()].PublicKey) + require.Equal(t, originalKeyRing.CSA[csa2.ID()].PublicKey, decryptedKeyRing.CSA[csa2.ID()].PublicKey) + // compare eth keys + require.Equal(t, 2, len(decryptedKeyRing.Eth)) + require.Equal(t, originalKeyRing.Eth[eth1.ID()].Address, decryptedKeyRing.Eth[eth1.ID()].Address) + require.Equal(t, originalKeyRing.Eth[eth2.ID()].Address, decryptedKeyRing.Eth[eth2.ID()].Address) + // compare ocr keys + require.Equal(t, 2, len(decryptedKeyRing.OCR)) + require.Equal(t, originalKeyRing.OCR[ocr[0].ID()].OnChainSigning.X, decryptedKeyRing.OCR[ocr[0].ID()].OnChainSigning.X) + require.Equal(t, originalKeyRing.OCR[ocr[0].ID()].OnChainSigning.Y, decryptedKeyRing.OCR[ocr[0].ID()].OnChainSigning.Y) + require.Equal(t, originalKeyRing.OCR[ocr[0].ID()].OnChainSigning.D, decryptedKeyRing.OCR[ocr[0].ID()].OnChainSigning.D) + require.Equal(t, originalKeyRing.OCR[ocr[0].ID()].OffChainSigning, decryptedKeyRing.OCR[ocr[0].ID()].OffChainSigning) + require.Equal(t, originalKeyRing.OCR[ocr[0].ID()].OffChainEncryption, decryptedKeyRing.OCR[ocr[0].ID()].OffChainEncryption) + require.Equal(t, originalKeyRing.OCR[ocr[1].ID()].OnChainSigning.X, decryptedKeyRing.OCR[ocr[1].ID()].OnChainSigning.X) + require.Equal(t, originalKeyRing.OCR[ocr[1].ID()].OnChainSigning.Y, decryptedKeyRing.OCR[ocr[1].ID()].OnChainSigning.Y) + require.Equal(t, originalKeyRing.OCR[ocr[1].ID()].OnChainSigning.D, decryptedKeyRing.OCR[ocr[1].ID()].OnChainSigning.D) + require.Equal(t, originalKeyRing.OCR[ocr[1].ID()].OffChainSigning, decryptedKeyRing.OCR[ocr[1].ID()].OffChainSigning) + require.Equal(t, originalKeyRing.OCR[ocr[1].ID()].OffChainEncryption, decryptedKeyRing.OCR[ocr[1].ID()].OffChainEncryption) + // compare ocr2 keys + require.Equal(t, len(chaintype.SupportedChainTypes), len(decryptedKeyRing.OCR2)) + for i := range ocr2 { + id := ocr2[i].ID() + require.Equal(t, originalKeyRing.OCR2[id].ID(), decryptedKeyRing.OCR2[id].ID()) + require.Equal(t, ocr2[i].OnChainPublicKey(), decryptedKeyRing.OCR2[id].OnChainPublicKey()) + require.Equal(t, originalKeyRing.OCR2[id].ChainType(), decryptedKeyRing.OCR2[id].ChainType()) + } + // compare p2p keys + require.Equal(t, 2, len(decryptedKeyRing.P2P)) + require.Equal(t, originalKeyRing.P2P[p2p1.ID()].PublicKeyHex(), decryptedKeyRing.P2P[p2p1.ID()].PublicKeyHex()) + require.Equal(t, originalKeyRing.P2P[p2p1.ID()].PeerID(), decryptedKeyRing.P2P[p2p1.ID()].PeerID()) + require.Equal(t, originalKeyRing.P2P[p2p2.ID()].PublicKeyHex(), decryptedKeyRing.P2P[p2p2.ID()].PublicKeyHex()) + require.Equal(t, originalKeyRing.P2P[p2p2.ID()].PeerID(), decryptedKeyRing.P2P[p2p2.ID()].PeerID()) + // compare solana keys + require.Equal(t, 2, len(decryptedKeyRing.Solana)) + require.Equal(t, originalKeyRing.Solana[sol1.ID()].GetPublic(), decryptedKeyRing.Solana[sol1.ID()].GetPublic()) + // compare vrf keys + require.Equal(t, 2, len(decryptedKeyRing.VRF)) + require.Equal(t, originalKeyRing.VRF[vrf1.ID()].PublicKey, decryptedKeyRing.VRF[vrf1.ID()].PublicKey) + require.Equal(t, originalKeyRing.VRF[vrf2.ID()].PublicKey, decryptedKeyRing.VRF[vrf2.ID()].PublicKey) + // compare dkgsign keys + require.Equal(t, 2, len(decryptedKeyRing.DKGSign)) + require.Equal(t, originalKeyRing.DKGSign[dkgsign1.ID()].PublicKey, decryptedKeyRing.DKGSign[dkgsign1.ID()].PublicKey) + require.Equal(t, originalKeyRing.DKGSign[dkgsign2.ID()].PublicKey, decryptedKeyRing.DKGSign[dkgsign2.ID()].PublicKey) + // compare dkgencrypt keys + require.Equal(t, 2, len(decryptedKeyRing.DKGEncrypt)) + require.Equal(t, originalKeyRing.DKGEncrypt[dkgencrypt1.ID()].PublicKey, decryptedKeyRing.DKGEncrypt[dkgencrypt1.ID()].PublicKey) + require.Equal(t, originalKeyRing.DKGEncrypt[dkgencrypt2.ID()].PublicKey, decryptedKeyRing.DKGEncrypt[dkgencrypt2.ID()].PublicKey) + }) + + t.Run("test legacy system", func(t *testing.T) { + //Add unsupported keys to raw json + rawJson, _ := json.Marshal(originalKeyRing.raw()) + var allKeys = map[string][]string{ + "foo": { + "bar", "biz", + }, + } + err := json.Unmarshal(rawJson, &allKeys) + require.NoError(t, err) + //Add more ocr2 keys + newOCR2Key1 := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(5)) + newOCR2Key2 := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(6)) + allKeys["OCR2"] = append(allKeys["OCR2"], newOCR2Key1.Raw().String()) + allKeys["OCR2"] = append(allKeys["OCR2"], newOCR2Key2.Raw().String()) + + //Add more p2p keys + newP2PKey1 := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(5)) + newP2PKey2 := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(7)) + allKeys["P2P"] = append(allKeys["P2P"], newP2PKey1.Raw().String()) + allKeys["P2P"] = append(allKeys["P2P"], newP2PKey2.Raw().String()) + + //Run legacy system + newRawJson, _ := json.Marshal(allKeys) + err = originalKeyRing.LegacyKeys.StoreUnsupported(newRawJson, originalKeyRing) + require.NoError(t, err) + require.Equal(t, originalKeyRing.LegacyKeys.legacyRawKeys.len(), 6) + marshalledRawKeyRingJson, err := json.Marshal(originalKeyRing.raw()) + require.NoError(t, err) + unloadedKeysJson, err := originalKeyRing.LegacyKeys.UnloadUnsupported(marshalledRawKeyRingJson) + require.NoError(t, err) + var shouldHaveAllKeys = map[string][]string{} + err = json.Unmarshal(unloadedKeysJson, &shouldHaveAllKeys) + require.NoError(t, err) + + //Check if keys where added to the raw json + require.Equal(t, shouldHaveAllKeys["foo"], []string{"bar", "biz"}) + require.Contains(t, shouldHaveAllKeys["OCR2"], newOCR2Key1.Raw().String()) + require.Contains(t, shouldHaveAllKeys["OCR2"], newOCR2Key2.Raw().String()) + require.Contains(t, shouldHaveAllKeys["P2P"], newP2PKey1.Raw().String()) + require.Contains(t, shouldHaveAllKeys["P2P"], newP2PKey2.Raw().String()) + + //Check error + err = originalKeyRing.LegacyKeys.StoreUnsupported(newRawJson, nil) + require.Error(t, err) + _, err = originalKeyRing.LegacyKeys.UnloadUnsupported(nil) + require.Error(t, err) + }) + +} diff --git a/core/services/keystore/ocr.go b/core/services/keystore/ocr.go new file mode 100644 index 00000000..801f78fb --- /dev/null +++ b/core/services/keystore/ocr.go @@ -0,0 +1,163 @@ +package keystore + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +//go:generate mockery --quiet --name OCR --output ./mocks/ --case=underscore + +type OCR interface { + Get(id string) (ocrkey.KeyV2, error) + GetAll() ([]ocrkey.KeyV2, error) + Create() (ocrkey.KeyV2, error) + Add(key ocrkey.KeyV2) error + Delete(id string) (ocrkey.KeyV2, error) + Import(keyJSON []byte, password string) (ocrkey.KeyV2, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +// KeyNotFoundError is returned when we don't find a requested key +type KeyNotFoundError struct { + ID string + KeyType string +} + +func (e KeyNotFoundError) Error() string { + return fmt.Sprintf("unable to find %s key with id %s", e.KeyType, e.ID) +} + +type ocr struct { + *keyManager +} + +var _ OCR = &ocr{} + +func newOCRKeyStore(km *keyManager) *ocr { + return &ocr{ + km, + } +} + +func (ks *ocr) Get(id string) (ocrkey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return ocrkey.KeyV2{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *ocr) GetAll() (keys []ocrkey.KeyV2, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.OCR { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *ocr) Create() (ocrkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ocrkey.KeyV2{}, ErrLocked + } + key, err := ocrkey.NewV2() + if err != nil { + return ocrkey.KeyV2{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *ocr) Add(key ocrkey.KeyV2) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.OCR[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *ocr) Delete(id string) (ocrkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ocrkey.KeyV2{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return ocrkey.KeyV2{}, err + } + err = ks.safeRemoveKey(key) + return key, err +} + +func (ks *ocr) Import(keyJSON []byte, password string) (ocrkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ocrkey.KeyV2{}, ErrLocked + } + key, err := ocrkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return ocrkey.KeyV2{}, errors.Wrap(err, "OCRKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.OCR[key.ID()]; found { + return ocrkey.KeyV2{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *ocr) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +// EnsureKey verifies whether the OCR key has been seeded, if not, it creates it. +func (ks *ocr) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + if len(ks.keyRing.OCR) > 0 { + return nil + } + + key, err := ocrkey.NewV2() + if err != nil { + return err + } + + ks.logger.Infof("Created OCR key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +func (ks *ocr) getByID(id string) (ocrkey.KeyV2, error) { + key, found := ks.keyRing.OCR[id] + if !found { + return ocrkey.KeyV2{}, KeyNotFoundError{ID: id, KeyType: "OCR"} + } + return key, nil +} diff --git a/core/services/keystore/ocr2.go b/core/services/keystore/ocr2.go new file mode 100644 index 00000000..5bdeaf4d --- /dev/null +++ b/core/services/keystore/ocr2.go @@ -0,0 +1,189 @@ +package keystore + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" +) + +//go:generate mockery --quiet --name OCR2 --output mocks/ --case=underscore + +type OCR2 interface { + Get(id string) (ocr2key.KeyBundle, error) + GetAll() ([]ocr2key.KeyBundle, error) + GetAllOfType(chaintype.ChainType) ([]ocr2key.KeyBundle, error) + Create(chaintype.ChainType) (ocr2key.KeyBundle, error) + Add(key ocr2key.KeyBundle) error + Delete(id string) error + Import(keyJSON []byte, password string) (ocr2key.KeyBundle, error) + Export(id string, password string) ([]byte, error) + EnsureKeys(enabledChains ...chaintype.ChainType) error +} + +type ocr2 struct { + *keyManager +} + +var _ OCR2 = ocr2{} + +func newOCR2KeyStore(km *keyManager) ocr2 { + return ocr2{ + km, + } +} + +func (ks ocr2) Get(id string) (ocr2key.KeyBundle, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + return ks.getByID(id) +} + +func (ks ocr2) GetAll() ([]ocr2key.KeyBundle, error) { + keys := []ocr2key.KeyBundle{} + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return keys, ErrLocked + } + for _, key := range ks.keyRing.OCR2 { + keys = append(keys, key) + } + return keys, nil +} + +func (ks ocr2) GetAllOfType(chainType chaintype.ChainType) ([]ocr2key.KeyBundle, error) { + keys := []ocr2key.KeyBundle{} + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return keys, ErrLocked + } + return ks.getAllOfType(chainType) +} + +func (ks ocr2) Create(chainType chaintype.ChainType) (ocr2key.KeyBundle, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return nil, ErrLocked + } + return ks.create(chainType) +} + +func (ks ocr2) Add(key ocr2key.KeyBundle) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.OCR2[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks ocr2) Delete(id string) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return err + } + err = ks.safeRemoveKey(key) + return err +} + +func (ks ocr2) Import(keyJSON []byte, password string) (ocr2key.KeyBundle, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ocr2key.FromEncryptedJSON(keyJSON, password) + if err != nil { + return nil, errors.Wrap(err, "OCRKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.OCR[key.ID()]; found { + return nil, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks ocr2) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return ocr2key.ToEncryptedJSON(key, password, ks.scryptParams) +} + +func (ks ocr2) EnsureKeys(enabledChains ...chaintype.ChainType) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + for _, chainType := range enabledChains { + keys, err := ks.getAllOfType(chainType) + if err != nil { + return err + } + + if len(keys) > 0 { + continue + } + + created, err := ks.create(chainType) + if err != nil { + return err + } + + ks.logger.Infof("Created OCR2 key with ID %s for chain type %s", created.ID(), chainType) + } + + return nil +} + +func (ks ocr2) getByID(id string) (ocr2key.KeyBundle, error) { + key, found := ks.keyRing.OCR2[id] + if !found { + return nil, fmt.Errorf("unable to find OCR key with id %s", id) + } + return key, nil +} + +func (ks ocr2) getAllOfType(chainType chaintype.ChainType) ([]ocr2key.KeyBundle, error) { + keys := []ocr2key.KeyBundle{} + for _, key := range ks.keyRing.OCR2 { + if key.ChainType() == chainType { + keys = append(keys, key) + } + } + return keys, nil +} + +func (ks ocr2) create(chainType chaintype.ChainType) (ocr2key.KeyBundle, error) { + if !chaintype.IsSupportedChainType(chainType) { + return nil, chaintype.NewErrInvalidChainType(chainType) + } + key, err := ocr2key.New(chainType) + if err != nil { + return nil, err + } + return key, ks.safeAddKey(key) +} diff --git a/core/services/keystore/ocr2_test.go b/core/services/keystore/ocr2_test.go new file mode 100644 index 00000000..988bf817 --- /dev/null +++ b/core/services/keystore/ocr2_test.go @@ -0,0 +1,193 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" +) + +func Test_OCR2KeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.OCR2() + reset := func() { + _, err := db.Exec("DELETE FROM encrypted_key_rings") + require.NoError(t, err) + keyStore.ResetXXXTestOnly() + err = keyStore.Unlock(cltest.Password) + require.NoError(t, err) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key with valid type", func(t *testing.T) { + defer reset() + // lopp through different chain types + for _, chain := range chaintype.SupportedChainTypes { + key, err := ks.Create(chain) + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + } + }) + + t.Run("gets keys by type", func(t *testing.T) { + defer reset() + + created := map[chaintype.ChainType]bool{} + for _, chain := range chaintype.SupportedChainTypes { + + // validate no keys exist for chain + keys, err := ks.GetAllOfType(chain) + require.NoError(t, err) + require.Len(t, keys, 0) + + _, err = ks.Create(chain) + require.NoError(t, err) + created[chain] = true + + // validate that only 1 of each exists after creation + for _, c := range chaintype.SupportedChainTypes { + keys, err := ks.GetAllOfType(c) + require.NoError(t, err) + if created[c] { + require.Len(t, keys, 1) + continue + } + require.Len(t, keys, 0) + } + } + }) + + t.Run("errors when creating a key with an invalid type", func(t *testing.T) { + defer reset() + _, err := ks.Create("foobar") + require.Error(t, err) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + for _, chain := range chaintype.SupportedChainTypes { + key, err := ks.Create(chain) + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Export("non-existent", cltest.Password) + assert.Error(t, err) + err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import([]byte(""), cltest.Password) + assert.Error(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + require.Equal(t, importedKey.ChainType(), retrievedKey.ChainType()) + } + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + for _, chain := range chaintype.SupportedChainTypes { + newKey, err := ocr2key.New(chain) + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + err = ks.Add(newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + err = ks.Delete(newKey.ID()) + require.NoError(t, err) + err = ks.Delete(newKey.ID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + } + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKeys(chaintype.SupportedChainTypes...) + assert.NoError(t, err) + + keys, err := ks.GetAll() + assert.NoError(t, err) + require.Equal(t, len(chaintype.SupportedChainTypes), len(keys)) + + err = ks.EnsureKeys(chaintype.SupportedChainTypes...) + assert.NoError(t, err) + + // loop through different supported chain types + for _, chain := range chaintype.SupportedChainTypes { + keys, err := ks.GetAllOfType(chain) + assert.NoError(t, err) + require.Equal(t, 1, len(keys)) + } + }) + + t.Run("ensures key only for enabled chains", func(t *testing.T) { + defer reset() + err := ks.EnsureKeys(chaintype.EVM) + assert.NoError(t, err) + + keys, err := ks.GetAll() + assert.NoError(t, err) + require.Equal(t, 1, len(keys)) + require.Equal(t, keys[0].ChainType(), chaintype.EVM) + + err = ks.EnsureKeys(chaintype.Cosmos) + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + require.Equal(t, 2, len(keys)) + + cosmosKeys, err := ks.GetAllOfType(chaintype.Cosmos) + assert.NoError(t, err) + require.Equal(t, 1, len(cosmosKeys)) + require.Equal(t, cosmosKeys[0].ChainType(), chaintype.Cosmos) + + err = ks.EnsureKeys(chaintype.StarkNet) + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + require.Equal(t, 3, len(keys)) + + straknetKeys, err := ks.GetAllOfType(chaintype.StarkNet) + assert.NoError(t, err) + require.Equal(t, 1, len(straknetKeys)) + require.Equal(t, straknetKeys[0].ChainType(), chaintype.StarkNet) + }) +} diff --git a/core/services/keystore/ocr_test.go b/core/services/keystore/ocr_test.go new file mode 100644 index 00000000..2e651d58 --- /dev/null +++ b/core/services/keystore/ocr_test.go @@ -0,0 +1,114 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +func Test_OCRKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.OCR() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + _, err = ks.Export("non-existent", cltest.Password) + assert.Error(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import([]byte(""), cltest.Password) + assert.Error(t, err) + _, err = ks.Import(exportJSON, cltest.Password) + assert.Error(t, err) + assert.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + assert.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := ocrkey.NewV2() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + err = ks.Add(newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + assert.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + _, err = ks.Delete(newKey.ID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + assert.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + assert.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKey() + require.NoError(t, err) + err = ks.EnsureKey() + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) + + t.Run("imports a key exported from a v1 keystore", func(t *testing.T) { + exportedKey := `{"id":"7cfd89bbb018e4778a44fd61172e8834dd24b4a2baf61ead795143b117221c61","onChainSigningAddress":"ocrsad_0x2ed5b18b62dacd7a85b6ed19247ea718bdae6114","offChainPublicKey":"ocroff_62a76d04e13dae5870071badea6b113a5123f4ac1a2cbae6b2fb7070dd9dbf2d","configPublicKey":"ocrcfg_75581baab36744671c2b1d75071b07b08b9cb631b3a7155d2f590744983d9c41","crypto":{"cipher":"aes-128-ctr","ciphertext":"60d2e679f08e0b1538cf609e25f2d32c0b7d408f24cab22dd05bffd3b5580c65552097e203f6546e2d792a4f6adb69449fee0fe4dd7f1060970907518e7c33331abd076388af842f03d05c193b03f22f6bf0423d4ae99dbb563c7158b4eac2a31b03c90fb9fd7be217804243151c36c33504469632bc2c89be33e7b9157edf172a52af4d49fa125b8d0358ea63ace90bc181a7164b548e0f12288ec08b919b46afad1b36dbaeda32d8d657a43908f802b6f2354473f538437ba3bd0b0d374d8e836e623484b655c95f4ef11e30baaa47b9075c6dbb53147c4b489f45a4bdcfa6b56ef2e6eaa9e9b88b570517c991de359d7f07226c00259810a8a4196b7d5331e4126529eac9bd80b47b5540940f89ad0e728b3dd50e6da316d9f3cf9b3be9b87ca6b7868daa7e4142fc4a65fc77deea6f4f2b4bce1e38337aa827160d8c50cad92d157309aa251180b894ab1ca9923d709d","cipherparams":{"iv":"a9507e6f2b073c1da1082d40a24864d1"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"267f9450f52af42a918ab5747043c88bd2035fa3d3e0f0cfd2b621981bc9320f"},"mac":"15aeb3fc1903f514bfe70cb2eb5a23820ba904f5edf8aeb1913d447797f74442"}}` + importedKey, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_") + require.NoError(t, err) + require.Equal(t, "7cfd89bbb018e4778a44fd61172e8834dd24b4a2baf61ead795143b117221c61", importedKey.ID()) + }) +} diff --git a/core/services/keystore/orm.go b/core/services/keystore/orm.go new file mode 100644 index 00000000..50702022 --- /dev/null +++ b/core/services/keystore/orm.go @@ -0,0 +1,80 @@ +package keystore + +import ( + "database/sql" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) ksORM { + namedLogger := lggr.Named("KeystoreORM") + return ksORM{ + q: pg.NewQ(db, namedLogger, cfg), + lggr: namedLogger, + } +} + +type ksORM struct { + q pg.Q + lggr logger.Logger +} + +func (orm ksORM) isEmpty() (bool, error) { + var count int64 + err := orm.q.QueryRow("SELECT count(*) FROM encrypted_key_rings").Scan(&count) + if err != nil { + return false, err + } + return count == 0, nil +} + +func (orm ksORM) saveEncryptedKeyRing(kr *encryptedKeyRing, callbacks ...func(pg.Queryer) error) error { + return orm.q.Transaction(func(tx pg.Queryer) error { + _, err := tx.Exec(` + UPDATE encrypted_key_rings + SET encrypted_keys = $1 + `, kr.EncryptedKeys) + if err != nil { + return errors.Wrap(err, "while saving keyring") + } + for _, callback := range callbacks { + err = callback(tx) + if err != nil { + return err + } + } + return nil + }) +} + +func (orm ksORM) getEncryptedKeyRing() (kr encryptedKeyRing, err error) { + err = orm.q.Get(&kr, `SELECT * FROM encrypted_key_rings LIMIT 1`) + if errors.Is(err, sql.ErrNoRows) { + sql := `INSERT INTO encrypted_key_rings (encrypted_keys, updated_at) VALUES (NULL, NOW()) RETURNING *;` + err2 := orm.q.Get(&kr, sql) + + if err2 != nil { + return kr, err2 + } + } else if err != nil { + return kr, err + } + return kr, nil +} + +func (orm ksORM) loadKeyStates() (*keyStates, error) { + ks := newKeyStates() + var ethkeystates []*ethkey.State + if err := orm.q.Select(ðkeystates, `SELECT id, address, evm_chain_id, disabled, created_at, updated_at FROM evm.key_states`); err != nil { + return ks, errors.Wrap(err, "error loading evm.key_states from DB") + } + for _, state := range ethkeystates { + ks.add(state) + } + return ks, nil +} diff --git a/core/services/keystore/p2p.go b/core/services/keystore/p2p.go new file mode 100644 index 00000000..6f05b6f2 --- /dev/null +++ b/core/services/keystore/p2p.go @@ -0,0 +1,191 @@ +package keystore + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +//go:generate mockery --quiet --name P2P --output ./mocks/ --case=underscore --filename p2p.go + +type P2P interface { + Get(id p2pkey.PeerID) (p2pkey.KeyV2, error) + GetAll() ([]p2pkey.KeyV2, error) + Create() (p2pkey.KeyV2, error) + Add(key p2pkey.KeyV2) error + Delete(id p2pkey.PeerID) (p2pkey.KeyV2, error) + Import(keyJSON []byte, password string) (p2pkey.KeyV2, error) + Export(id p2pkey.PeerID, password string) ([]byte, error) + EnsureKey() error + + GetOrFirst(id p2pkey.PeerID) (p2pkey.KeyV2, error) +} + +type p2p struct { + *keyManager +} + +var _ P2P = &p2p{} + +func newP2PKeyStore(km *keyManager) *p2p { + return &p2p{ + km, + } +} + +func (ks *p2p) Get(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return p2pkey.KeyV2{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *p2p) GetAll() (keys []p2pkey.KeyV2, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.P2P { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *p2p) Create() (p2pkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return p2pkey.KeyV2{}, ErrLocked + } + key, err := p2pkey.NewV2() + if err != nil { + return p2pkey.KeyV2{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *p2p) Add(key p2pkey.KeyV2) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.P2P[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *p2p) Delete(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return p2pkey.KeyV2{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return p2pkey.KeyV2{}, err + } + err = ks.safeRemoveKey(key, func(tx pg.Queryer) error { + _, err2 := tx.Exec(`DELETE FROM p2p_peers WHERE peer_id = $1`, key.ID()) + return err2 + }) + return key, err +} + +func (ks *p2p) Import(keyJSON []byte, password string) (p2pkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return p2pkey.KeyV2{}, ErrLocked + } + key, err := p2pkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return p2pkey.KeyV2{}, errors.Wrap(err, "P2PKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.P2P[key.ID()]; found { + return p2pkey.KeyV2{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *p2p) Export(id p2pkey.PeerID, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *p2p) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + + if len(ks.keyRing.P2P) > 0 { + return nil + } + + key, err := p2pkey.NewV2() + if err != nil { + return err + } + + ks.logger.Infof("Created P2P key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +var ( + ErrNoP2PKey = errors.New("no p2p keys exist") +) + +func (ks *p2p) GetOrFirst(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return p2pkey.KeyV2{}, ErrLocked + } + if id != (p2pkey.PeerID{}) { + return ks.getByID(id) + } else if len(ks.keyRing.P2P) == 1 { + ks.logger.Warn("No P2P.PeerID set, defaulting to first key in database") + for _, key := range ks.keyRing.P2P { + return key, nil + } + } else if len(ks.keyRing.P2P) == 0 { + return p2pkey.KeyV2{}, ErrNoP2PKey + } + possibleKeys := make([]string, 0, len(ks.keyRing.P2P)) + for _, key := range ks.keyRing.P2P { + possibleKeys = append(possibleKeys, key.ID()) + } + //To avoid ambiguity, we require the user to specify a peer ID if there are multiple keys + return p2pkey.KeyV2{}, errors.New( + "multiple p2p keys found but peer ID was not set - you must specify a P2P.PeerID " + + "config var if you have more than one key, or delete the keys you aren't using" + + " (possible keys: " + strings.Join(possibleKeys, ", ") + ")", + ) +} + +func (ks *p2p) getByID(id p2pkey.PeerID) (p2pkey.KeyV2, error) { + key, found := ks.keyRing.P2P[id.Raw()] + if !found { + return p2pkey.KeyV2{}, KeyNotFoundError{ID: id.String(), KeyType: "P2P"} + } + return key, nil +} diff --git a/core/services/keystore/p2p_test.go b/core/services/keystore/p2p_test.go new file mode 100644 index 00000000..7dab5dd0 --- /dev/null +++ b/core/services/keystore/p2p_test.go @@ -0,0 +1,195 @@ +package keystore_test + +import ( + "crypto/rand" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +func Test_P2PKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.P2P() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + var nonExistent p2pkey.PeerID + _, err := rand.Read(nonExistent[:]) + require.NoError(t, err) + _, err = ks.Get(nonExistent) + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.PeerID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.PeerID(), cltest.Password) + require.NoError(t, err) + var nonExistent p2pkey.PeerID + _, err = rand.Read(nonExistent[:]) + require.NoError(t, err) + _, err = ks.Export(nonExistent, cltest.Password) + assert.Error(t, err) + _, err = ks.Delete(key.PeerID()) + require.NoError(t, err) + _, err = ks.Get(key.PeerID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import(exportJSON, cltest.Password) + assert.Error(t, err) + _, err = ks.Import([]byte(""), cltest.Password) + assert.Error(t, err) + require.Equal(t, key.PeerID(), importedKey.PeerID()) + retrievedKey, err := ks.Get(key.PeerID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := p2pkey.NewV2() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + err = ks.Add(newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.PeerID()) + require.NoError(t, err) + _, err = ks.Delete(newKey.PeerID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.PeerID()) + require.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKey() + assert.NoError(t, err) + + keys, err := ks.GetAll() + assert.NoError(t, err) + require.Equal(t, 1, len(keys)) + + err = ks.EnsureKey() + assert.NoError(t, err) + + keys, err = ks.GetAll() + assert.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) + + t.Run("GetOrFirst", func(t *testing.T) { + defer reset() + _, err := ks.GetOrFirst(p2pkey.PeerID{}) + require.Contains(t, err.Error(), "no p2p keys exist") + id := p2pkey.PeerID{0xa0} + _, err = ks.GetOrFirst(id) + require.Contains(t, err.Error(), fmt.Sprintf("unable to find P2P key with id %s", id)) + k1, err := ks.Create() + require.NoError(t, err) + k2, err := ks.GetOrFirst(p2pkey.PeerID{}) + require.NoError(t, err) + require.Equal(t, k1, k2) + k3, err := ks.GetOrFirst(k1.PeerID()) + require.NoError(t, err) + require.Equal(t, k1, k3) + _, err = ks.Create() + require.NoError(t, err) + _, err = ks.GetOrFirst(p2pkey.PeerID{}) + require.Contains(t, err.Error(), "multiple p2p keys found") + //Check for possible keys in error message + require.Contains(t, err.Error(), k1.ID()) + require.Contains(t, err.Error(), k2.ID()) + require.Contains(t, err.Error(), k3.ID()) + + k4, err := ks.GetOrFirst(k1.PeerID()) + require.NoError(t, err) + require.Equal(t, k1, k4) + }) + + t.Run("clears p2p_peers on delete", func(t *testing.T) { + key, err := ks.Create() + require.NoError(t, err) + type P2PPeer struct { + ID string + Addr string + PeerID string + CreatedAt time.Time + UpdatedAt time.Time + } + p2pPeer1 := P2PPeer{ + ID: cltest.NewPeerID().String(), + Addr: testutils.NewAddress().Hex(), + PeerID: cltest.DefaultPeerID, // different p2p key + } + p2pPeer2 := P2PPeer{ + ID: cltest.NewPeerID().String(), + Addr: testutils.NewAddress().Hex(), + PeerID: key.PeerID().Raw(), + } + const p2pTableName = "p2p_peers" + sql := fmt.Sprintf(`INSERT INTO %s (id, addr, peer_id, created_at, updated_at) + VALUES (:id, :addr, :peer_id, now(), now()) + RETURNING *;`, p2pTableName) + stmt, err := db.PrepareNamed(sql) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, stmt.Close()) }) + require.NoError(t, stmt.Get(&p2pPeer1, &p2pPeer1)) + require.NoError(t, stmt.Get(&p2pPeer2, &p2pPeer2)) + cltest.AssertCount(t, db, p2pTableName, 2) + _, err = ks.Delete(key.PeerID()) + require.NoError(t, err) + cltest.AssertCount(t, db, p2pTableName, 1) + }) + + t.Run("imports a key exported from a v1 keystore", func(t *testing.T) { + exportedKey := `{"publicKey":"fcc1fdebde28322dde17233fe7bd6dcde447d60d5cc1de518962deed102eea35","peerID":"p2p_12D3KooWSq2UZgSXvhGLG5uuAAmz1JNjxHMJViJB39aorvbbYo8p","crypto":{"cipher":"aes-128-ctr","ciphertext":"adb2dff72148a8cd467f6f06a03869e7cedf180cf2a4decdb86875b2e1cf3e58c4bd2b721ecdaa88a0825fa9abfc309bf32dbb35a5c0b6cb01ac89a956d78e0550eff351","cipherparams":{"iv":"6cc4381766a4efc39f762b2b8d09dfba"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"ff5055ae4cdcdc2d0404307d578262e2caeb0210f82db3a0ecbdba727c6f5259"},"mac":"d37e4f1dea98d85960ef3205099fc71741715ae56a3b1a8f9215a78de9b95595"}}` + importedKey, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_") + require.NoError(t, err) + require.Equal(t, "12D3KooWSq2UZgSXvhGLG5uuAAmz1JNjxHMJViJB39aorvbbYo8p", importedKey.ID()) + }) +} diff --git a/core/services/keystore/solana.go b/core/services/keystore/solana.go new file mode 100644 index 00000000..fae5f9a8 --- /dev/null +++ b/core/services/keystore/solana.go @@ -0,0 +1,177 @@ +package keystore + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +//go:generate mockery --quiet --name Solana --output ./mocks/ --case=underscore --filename solana.go + +type Solana interface { + Get(id string) (solkey.Key, error) + GetAll() ([]solkey.Key, error) + Create() (solkey.Key, error) + Add(key solkey.Key) error + Delete(id string) (solkey.Key, error) + Import(keyJSON []byte, password string) (solkey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error + Sign(ctx context.Context, id string, msg []byte) (signature []byte, err error) +} + +// SolanaSigner adapts Solana to [loop.Keystore]. +type SolanaSigner struct { + Solana +} + +func (s *SolanaSigner) Accounts(ctx context.Context) (accounts []string, err error) { + ks, err := s.GetAll() + if err != nil { + return nil, err + } + for _, k := range ks { + accounts = append(accounts, k.PublicKeyStr()) + } + return +} + +type solana struct { + *keyManager +} + +var _ Solana = &solana{} + +func newSolanaKeyStore(km *keyManager) *solana { + return &solana{ + km, + } +} + +func (ks *solana) Get(id string) (solkey.Key, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return solkey.Key{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *solana) GetAll() (keys []solkey.Key, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.Solana { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *solana) Create() (solkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return solkey.Key{}, ErrLocked + } + key, err := solkey.New() + if err != nil { + return solkey.Key{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *solana) Add(key solkey.Key) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.Solana[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *solana) Delete(id string) (solkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return solkey.Key{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return solkey.Key{}, err + } + err = ks.safeRemoveKey(key) + return key, err +} + +func (ks *solana) Import(keyJSON []byte, password string) (solkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return solkey.Key{}, ErrLocked + } + key, err := solkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return solkey.Key{}, errors.Wrap(err, "SolanaKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.Solana[key.ID()]; found { + return solkey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *solana) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *solana) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if len(ks.keyRing.Solana) > 0 { + return nil + } + + key, err := solkey.New() + if err != nil { + return err + } + + ks.logger.Infof("Created Solana key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +func (ks *solana) Sign(_ context.Context, id string, msg []byte) (signature []byte, err error) { + k, err := ks.Get(id) + if err != nil { + return nil, err + } + return k.Sign(msg) +} + +func (ks *solana) getByID(id string) (solkey.Key, error) { + key, found := ks.keyRing.Solana[id] + if !found { + return solkey.Key{}, KeyNotFoundError{ID: id, KeyType: "Solana"} + } + return key, nil +} diff --git a/core/services/keystore/solana_test.go b/core/services/keystore/solana_test.go new file mode 100644 index 00000000..d1c028a6 --- /dev/null +++ b/core/services/keystore/solana_test.go @@ -0,0 +1,133 @@ +package keystore_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +func Test_SolanaKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.Solana() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Export("non-existent", cltest.Password) + assert.Error(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + _, err = ks.Import(exportJSON, cltest.Password) + assert.Error(t, err) + _, err = ks.Import([]byte(""), cltest.Password) + assert.Error(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := solkey.New() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + err = ks.Add(newKey) + assert.Error(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + _, err = ks.Delete(newKey.ID()) + assert.Error(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKey() + assert.NoError(t, err) + + err = ks.EnsureKey() + assert.NoError(t, err) + + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) + + t.Run("sign tx", func(t *testing.T) { + defer reset() + newKey, err := solkey.New() + require.NoError(t, err) + require.NoError(t, ks.Add(newKey)) + + // sign unknown ID + _, err = ks.Sign(testutils.Context(t), "not-real", nil) + assert.Error(t, err) + + // sign known key + payload := []byte{1} + sig, err := ks.Sign(testutils.Context(t), newKey.ID(), payload) + require.NoError(t, err) + + directSig, err := newKey.Sign(payload) + require.NoError(t, err) + + // signatures should match using keystore sign or key sign + assert.Equal(t, directSig, sig) + }) +} diff --git a/core/services/keystore/starknet.go b/core/services/keystore/starknet.go new file mode 100644 index 00000000..ecf8753c --- /dev/null +++ b/core/services/keystore/starknet.go @@ -0,0 +1,197 @@ +package keystore + +import ( + "context" + "fmt" + "math/big" + + "github.com/pkg/errors" + + "github.com/goplugin/caigo" + + "github.com/goplugin/plugin-common/pkg/loop" + adapters "github.com/goplugin/plugin-common/pkg/loop/adapters/starknet" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" +) + +//go:generate mockery --quiet --name StarkNet --output ./mocks/ --case=underscore --filename starknet.go +type StarkNet interface { + Get(id string) (starkkey.Key, error) + GetAll() ([]starkkey.Key, error) + Create() (starkkey.Key, error) + Add(key starkkey.Key) error + Delete(id string) (starkkey.Key, error) + Import(keyJSON []byte, password string) (starkkey.Key, error) + Export(id string, password string) ([]byte, error) + EnsureKey() error +} + +type starknet struct { + *keyManager +} + +var _ StarkNet = &starknet{} + +func newStarkNetKeyStore(km *keyManager) *starknet { + return &starknet{ + km, + } +} + +func (ks *starknet) Get(id string) (starkkey.Key, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return starkkey.Key{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *starknet) GetAll() (keys []starkkey.Key, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.StarkNet { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *starknet) Create() (starkkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return starkkey.Key{}, ErrLocked + } + key, err := starkkey.New() + if err != nil { + return starkkey.Key{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *starknet) Add(key starkkey.Key) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.StarkNet[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *starknet) Delete(id string) (starkkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return starkkey.Key{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return starkkey.Key{}, err + } + err = ks.safeRemoveKey(key) + return key, err +} + +func (ks *starknet) Import(keyJSON []byte, password string) (starkkey.Key, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return starkkey.Key{}, ErrLocked + } + key, err := starkkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return starkkey.Key{}, errors.Wrap(err, "StarkNetKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.StarkNet[key.ID()]; found { + return starkkey.Key{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *starknet) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return starkkey.ToEncryptedJSON(key, password, ks.scryptParams) +} + +func (ks *starknet) EnsureKey() error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if len(ks.keyRing.StarkNet) > 0 { + return nil + } + + key, err := starkkey.New() + if err != nil { + return err + } + + ks.logger.Infof("Created StarkNet key with ID %s", key.ID()) + + return ks.safeAddKey(key) +} + +func (ks *starknet) getByID(id string) (starkkey.Key, error) { + key, found := ks.keyRing.StarkNet[id] + if !found { + return starkkey.Key{}, KeyNotFoundError{ID: id, KeyType: "StarkNet"} + } + return key, nil +} + +// StarknetLooppSigner implements [github.com/goplugin/plugin-common/pkg/loop.Keystore] interface and the requirements +// of signature d/encoding of the [github.com/goplugin/plugin-starknet/relayer/pkg/plugin/txm.NewKeystoreAdapter] +type StarknetLooppSigner struct { + StarkNet +} + +var _ loop.Keystore = &StarknetLooppSigner{} + +// Sign implements [loop.Keystore] +// hash is expected to be the byte representation of big.Int +// the returned []byte is an encoded [github.com/goplugin/plugin-common/pkg/loop/adapters/starknet.Signature]. +// this enables compatibility with [github.com/goplugin/plugin-starknet/relayer/pkg/plugin/txm.NewKeystoreAdapter] +func (lk *StarknetLooppSigner) Sign(ctx context.Context, id string, hash []byte) ([]byte, error) { + + k, err := lk.Get(id) + if err != nil { + return nil, err + } + // loopp spec requires passing nil hash to check existence of id + if hash == nil { + return nil, nil + } + + starkHash := new(big.Int).SetBytes(hash) + x, y, err := caigo.Curve.Sign(starkHash, k.ToPrivKey()) + if err != nil { + return nil, fmt.Errorf("error signing data with curve: %w", err) + } + + sig, err := adapters.SignatureFromBigInts(x, y) + if err != nil { + return nil, err + } + return sig.Bytes() +} + +// TODO what is this supposed to return for starknet? +func (lk *StarknetLooppSigner) Accounts(ctx context.Context) ([]string, error) { + return nil, fmt.Errorf("unimplemented") +} diff --git a/core/services/keystore/starknet_test.go b/core/services/keystore/starknet_test.go new file mode 100644 index 00000000..48d5ecea --- /dev/null +++ b/core/services/keystore/starknet_test.go @@ -0,0 +1,152 @@ +package keystore_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/caigo" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + + starktxm "github.com/goplugin/plugin-starknet/relayer/pkg/plugin/txm" +) + +func Test_StarkNetKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.StarkNet() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := starkkey.New() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("ensures key", func(t *testing.T) { + defer reset() + err := ks.EnsureKey() + assert.NoError(t, err) + + err = ks.EnsureKey() + assert.NoError(t, err) + + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + }) +} + +func TestStarknetSigner(t *testing.T) { + var ( + starknetSenderAddr = "legit" + ) + baseKs := mocks.NewStarkNet(t) + starkKey, err := starkkey.New() + require.NoError(t, err) + + lk := &keystore.StarknetLooppSigner{baseKs} + // test that we implementw the loopp spec. signing nil data should not error + // on existing sender id + t.Run("key exists", func(t *testing.T) { + baseKs.On("Get", starknetSenderAddr).Return(starkKey, nil) + signed, err := lk.Sign(testutils.Context(t), starknetSenderAddr, nil) + require.Nil(t, signed) + require.NoError(t, err) + }) + t.Run("key doesn't exists", func(t *testing.T) { + baseKs.On("Get", mock.Anything).Return(starkkey.Key{}, fmt.Errorf("key doesn't exist")) + signed, err := lk.Sign(testutils.Context(t), "not an address", nil) + require.Nil(t, signed) + require.Error(t, err) + }) + + // TODO BCF-2242 remove this test once we have starknet smoke/integration tests + // that exercise the transaction signing. + t.Run("keystore adapter integration", func(t *testing.T) { + + adapter := starktxm.NewKeystoreAdapter(lk) + baseKs.On("Get", starknetSenderAddr).Return(starkKey, nil) + hash, err := caigo.Curve.PedersenHash([]*big.Int{big.NewInt(42)}) + require.NoError(t, err) + r, s, err := adapter.Sign(testutils.Context(t), starknetSenderAddr, hash) + require.NoError(t, err) + require.NotNil(t, r) + require.NotNil(t, s) + + pubx, puby, err := caigo.Curve.PrivateToPoint(starkKey.ToPrivKey()) + require.NoError(t, err) + require.True(t, caigo.Curve.Verify(hash, r, s, pubx, puby)) + }) +} diff --git a/core/services/keystore/vrf.go b/core/services/keystore/vrf.go new file mode 100644 index 00000000..d023bb56 --- /dev/null +++ b/core/services/keystore/vrf.go @@ -0,0 +1,150 @@ +package keystore + +import ( + "fmt" + "math/big" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +//go:generate mockery --quiet --name VRF --output ./mocks/ --case=underscore --filename vrf.go + +type VRF interface { + Get(id string) (vrfkey.KeyV2, error) + GetAll() ([]vrfkey.KeyV2, error) + Create() (vrfkey.KeyV2, error) + Add(key vrfkey.KeyV2) error + Delete(id string) (vrfkey.KeyV2, error) + Import(keyJSON []byte, password string) (vrfkey.KeyV2, error) + Export(id string, password string) ([]byte, error) + + GenerateProof(id string, seed *big.Int) (vrfkey.Proof, error) +} + +var ( + ErrMissingVRFKey = errors.New("unable to find VRF key") +) + +type vrf struct { + *keyManager +} + +var _ VRF = &vrf{} + +func newVRFKeyStore(km *keyManager) *vrf { + return &vrf{ + km, + } +} + +func (ks *vrf) Get(id string) (vrfkey.KeyV2, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return vrfkey.KeyV2{}, ErrLocked + } + return ks.getByID(id) +} + +func (ks *vrf) GetAll() (keys []vrfkey.KeyV2, _ error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + for _, key := range ks.keyRing.VRF { + keys = append(keys, key) + } + return keys, nil +} + +func (ks *vrf) Create() (vrfkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return vrfkey.KeyV2{}, ErrLocked + } + key, err := vrfkey.NewV2() + if err != nil { + return vrfkey.KeyV2{}, err + } + return key, ks.safeAddKey(key) +} + +func (ks *vrf) Add(key vrfkey.KeyV2) error { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return ErrLocked + } + if _, found := ks.keyRing.VRF[key.ID()]; found { + return fmt.Errorf("key with ID %s already exists", key.ID()) + } + return ks.safeAddKey(key) +} + +func (ks *vrf) Delete(id string) (vrfkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return vrfkey.KeyV2{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return vrfkey.KeyV2{}, err + } + err = ks.safeRemoveKey(key) + return key, err +} + +func (ks *vrf) Import(keyJSON []byte, password string) (vrfkey.KeyV2, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if ks.isLocked() { + return vrfkey.KeyV2{}, ErrLocked + } + key, err := vrfkey.FromEncryptedJSON(keyJSON, password) + if err != nil { + return vrfkey.KeyV2{}, errors.Wrap(err, "VRFKeyStore#ImportKey failed to decrypt key") + } + if _, found := ks.keyRing.VRF[key.ID()]; found { + return vrfkey.KeyV2{}, fmt.Errorf("key with ID %s already exists", key.ID()) + } + return key, ks.keyManager.safeAddKey(key) +} + +func (ks *vrf) Export(id string, password string) ([]byte, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return nil, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return nil, err + } + return key.ToEncryptedJSON(password, ks.scryptParams) +} + +func (ks *vrf) GenerateProof(id string, seed *big.Int) (vrfkey.Proof, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + if ks.isLocked() { + return vrfkey.Proof{}, ErrLocked + } + key, err := ks.getByID(id) + if err != nil { + return vrfkey.Proof{}, err + } + return key.GenerateProof(seed) +} + +func (ks *vrf) getByID(id string) (vrfkey.KeyV2, error) { + key, found := ks.keyRing.VRF[id] + if !found { + return vrfkey.KeyV2{}, KeyNotFoundError{ID: id, KeyType: "VRF"} + } + return key, nil +} diff --git a/core/services/keystore/vrf_test.go b/core/services/keystore/vrf_test.go new file mode 100644 index 00000000..7e8539f3 --- /dev/null +++ b/core/services/keystore/vrf_test.go @@ -0,0 +1,172 @@ +package keystore_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + + "github.com/stretchr/testify/require" +) + +func Test_VRFKeyStore_E2E(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + keyStore := keystore.ExposedNewMaster(t, db, cfg.Database()) + require.NoError(t, keyStore.Unlock(cltest.Password)) + ks := keyStore.VRF() + reset := func() { + require.NoError(t, utils.JustError(db.Exec("DELETE FROM encrypted_key_rings"))) + keyStore.ResetXXXTestOnly() + require.NoError(t, keyStore.Unlock(cltest.Password)) + } + + t.Run("initializes with an empty state", func(t *testing.T) { + defer reset() + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + }) + + t.Run("errors when getting non-existent ID", func(t *testing.T) { + defer reset() + _, err := ks.Get("non-existent-id") + require.Error(t, err) + }) + + t.Run("creates a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, key, retrievedKey) + }) + + t.Run("imports and exports a key", func(t *testing.T) { + defer reset() + key, err := ks.Create() + require.NoError(t, err) + exportJSON, err := ks.Export(key.ID(), cltest.Password) + require.NoError(t, err) + _, err = ks.Delete(key.ID()) + require.NoError(t, err) + _, err = ks.Get(key.ID()) + require.Error(t, err) + importedKey, err := ks.Import(exportJSON, cltest.Password) + require.NoError(t, err) + require.Equal(t, key.ID(), importedKey.ID()) + retrievedKey, err := ks.Get(key.ID()) + require.NoError(t, err) + require.Equal(t, importedKey, retrievedKey) + }) + + t.Run("adds an externally created key / deletes a key", func(t *testing.T) { + defer reset() + newKey, err := vrfkey.NewV2() + require.NoError(t, err) + err = ks.Add(newKey) + require.NoError(t, err) + keys, err := ks.GetAll() + require.NoError(t, err) + require.Equal(t, 1, len(keys)) + _, err = ks.Delete(newKey.ID()) + require.NoError(t, err) + keys, err = ks.GetAll() + require.NoError(t, err) + require.Equal(t, 0, len(keys)) + _, err = ks.Get(newKey.ID()) + require.Error(t, err) + }) + + t.Run("fails to add an already added key", func(t *testing.T) { + defer reset() + + k, err := vrfkey.NewV2() + require.NoError(t, err) + + err = ks.Add(k) + require.NoError(t, err) + err = ks.Add(k) + + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf("key with ID %s already exists", k.ID()), err.Error()) + }) + + t.Run("fails to delete a key that doesn't exists", func(t *testing.T) { + defer reset() + + k, err := vrfkey.NewV2() + require.NoError(t, err) + + err = ks.Add(k) + require.NoError(t, err) + + fk, err := ks.Delete("non-existent") + + assert.Zero(t, fk) + assert.Error(t, err) + }) + + t.Run("imports a key exported from a v1 keystore", func(t *testing.T) { + defer reset() + + exportedKey := `{"PublicKey":"0xd2377bc6be8a2c5ce163e1867ee42ef111e320686f940a98e52e9c019ca0606800","vrf_key":{"address":"b94276ad4e5452732ec0cccf30ef7919b67844b6","crypto":{"cipher":"aes-128-ctr","ciphertext":"ff66d61d02dba54a61bab1ceb8414643f9e76b7351785d2959e2c8b50ee69a92","cipherparams":{"iv":"75705da271b11e330a27b8d593a3930c"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"efe5b372e4fe79d0af576a79d65a1ee35d0792d9c92b70107b5ada1817ea7c7b"},"mac":"e4d0bb08ffd004ab03aeaa42367acbd9bb814c6cfd981f5157503f54c30816e7"},"version":3}}` + importedKey, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_") + require.NoError(t, err) + require.Equal(t, "0xd2377bc6be8a2c5ce163e1867ee42ef111e320686f940a98e52e9c019ca0606800", importedKey.ID()) + }) + + t.Run("fails to import an already imported key", func(t *testing.T) { + defer reset() + + exportedKey := `{"PublicKey":"0xd2377bc6be8a2c5ce163e1867ee42ef111e320686f940a98e52e9c019ca0606800","vrf_key":{"address":"b94276ad4e5452732ec0cccf30ef7919b67844b6","crypto":{"cipher":"aes-128-ctr","ciphertext":"ff66d61d02dba54a61bab1ceb8414643f9e76b7351785d2959e2c8b50ee69a92","cipherparams":{"iv":"75705da271b11e330a27b8d593a3930c"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"efe5b372e4fe79d0af576a79d65a1ee35d0792d9c92b70107b5ada1817ea7c7b"},"mac":"e4d0bb08ffd004ab03aeaa42367acbd9bb814c6cfd981f5157503f54c30816e7"},"version":3}}` + importedKey, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_") + require.NoError(t, err) + + keyStore.SetPassword("p4SsW0rD1!@#_") + k, err := ks.Import([]byte(exportedKey), "p4SsW0rD1!@#_") + + assert.Zero(t, k) + assert.Error(t, err) + assert.Equal(t, fmt.Sprintf("key with ID %s already exists", importedKey.ID()), err.Error()) + }) + + t.Run("fails to export non-existent key", func(t *testing.T) { + k, err := ks.Export("non-existent", cltest.Password) + + assert.Zero(t, k) + assert.Error(t, err) + }) + + t.Run("generate proof for keys", func(t *testing.T) { + defer reset() + + t.Run("fails to generate proof for non-existent key", func(t *testing.T) { + pf, err := ks.GenerateProof("non-existent", big.NewInt(int64(1))) + + assert.Zero(t, pf) + assert.Error(t, err) + }) + + t.Run("generates a proof for a key", func(t *testing.T) { + k, err := vrfkey.NewV2() + require.NoError(t, err) + err = ks.Add(k) + require.NoError(t, err) + + pf, err := ks.GenerateProof(k.ID(), big.NewInt(int64(1))) + require.NoError(t, err) + + assert.NotZero(t, pf) + }) + }) +} diff --git a/core/services/mocks/checker.go b/core/services/mocks/checker.go new file mode 100644 index 00000000..b16a42cb --- /dev/null +++ b/core/services/mocks/checker.go @@ -0,0 +1,159 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + pkgservices "github.com/goplugin/plugin-common/pkg/services" + mock "github.com/stretchr/testify/mock" +) + +// Checker is an autogenerated mock type for the Checker type +type Checker struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Checker) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// IsHealthy provides a mock function with given fields: +func (_m *Checker) IsHealthy() (bool, map[string]error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsHealthy") + } + + var r0 bool + var r1 map[string]error + if rf, ok := ret.Get(0).(func() (bool, map[string]error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() map[string]error); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[string]error) + } + } + + return r0, r1 +} + +// IsReady provides a mock function with given fields: +func (_m *Checker) IsReady() (bool, map[string]error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsReady") + } + + var r0 bool + var r1 map[string]error + if rf, ok := ret.Get(0).(func() (bool, map[string]error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() map[string]error); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[string]error) + } + } + + return r0, r1 +} + +// Register provides a mock function with given fields: service +func (_m *Checker) Register(service pkgservices.HealthReporter) error { + ret := _m.Called(service) + + if len(ret) == 0 { + panic("no return value specified for Register") + } + + var r0 error + if rf, ok := ret.Get(0).(func(pkgservices.HealthReporter) error); ok { + r0 = rf(service) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *Checker) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Unregister provides a mock function with given fields: name +func (_m *Checker) Unregister(name string) error { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for Unregister") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewChecker creates a new instance of Checker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChecker(t interface { + mock.TestingT + Cleanup(func()) +}) *Checker { + mock := &Checker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/mocks/config.go b/core/services/mocks/config.go new file mode 100644 index 00000000..13ec4010 --- /dev/null +++ b/core/services/mocks/config.go @@ -0,0 +1,39 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// LogSQL provides a mock function with given fields: +func (_m *Config) LogSQL() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +type mockConstructorTestingTNewConfig interface { + mock.TestingT + Cleanup(func()) +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewConfig(t mockConstructorTestingTNewConfig) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/multi.go b/core/services/multi.go new file mode 100644 index 00000000..58e1a67c --- /dev/null +++ b/core/services/multi.go @@ -0,0 +1,27 @@ +package services + +import ( + "io" + + "github.com/goplugin/plugin-common/pkg/services" +) + +// StartClose is a subset of the ServiceCtx interface. +type StartClose = services.StartClose + +// MultiStart is a utility for starting multiple services together. +// The set of started services is tracked internally, so that they can be closed if any single service fails to start. +type MultiStart = services.MultiStart + +// CloseAll closes all elements concurrently. +// Use this when you have various different types of io.Closer. +func CloseAll(cs ...io.Closer) error { + return services.CloseAll(cs...) +} + +// MultiCloser returns an io.Closer which closes all elements concurrently. +// Use this when you have a slice of a type which implements io.Closer. +// []io.Closer can be cast directly to MultiCloser. +func MultiCloser[C io.Closer](cs []C) io.Closer { + return services.MultiCloser(cs) +} diff --git a/core/services/nurse.go b/core/services/nurse.go new file mode 100644 index 00000000..00f69bbb --- /dev/null +++ b/core/services/nurse.go @@ -0,0 +1,489 @@ +package services + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/fs" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" + "time" + + "github.com/google/pprof/profile" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Nurse struct { + services.StateMachine + + cfg Config + log logger.Logger + + checks map[string]CheckFunc + checksMu sync.RWMutex + + chGather chan gatherRequest + chStop chan struct{} + wgDone sync.WaitGroup +} + +type Config interface { + BlockProfileRate() int + CPUProfileRate() int + GatherDuration() commonconfig.Duration + GatherTraceDuration() commonconfig.Duration + GoroutineThreshold() int + MaxProfileSize() utils.FileSize + MemProfileRate() int + MemThreshold() utils.FileSize + MutexProfileFraction() int + PollInterval() commonconfig.Duration + ProfileRoot() string +} + +type CheckFunc func() (unwell bool, meta Meta) + +type gatherRequest struct { + reason string + meta Meta +} + +type Meta map[string]interface{} + +const ( + cpuProfName = "cpu" + traceProfName = "trace" +) + +func NewNurse(cfg Config, log logger.Logger) *Nurse { + return &Nurse{ + cfg: cfg, + log: log.Named("Nurse"), + checks: make(map[string]CheckFunc), + chGather: make(chan gatherRequest, 1), + chStop: make(chan struct{}), + } +} + +func (n *Nurse) Start() error { + return n.StartOnce("Nurse", func() error { + // This must be set *once*, and it must occur as early as possible + if n.cfg.MemProfileRate() != runtime.MemProfileRate { + runtime.MemProfileRate = n.cfg.BlockProfileRate() + } + + n.log.Debugf("Starting nurse with config %+v", n.cfg) + runtime.SetCPUProfileRate(n.cfg.CPUProfileRate()) + runtime.SetBlockProfileRate(n.cfg.BlockProfileRate()) + runtime.SetMutexProfileFraction(n.cfg.MutexProfileFraction()) + + err := utils.EnsureDirAndMaxPerms(n.cfg.ProfileRoot(), 0744) + if err != nil { + return err + } + + n.AddCheck("mem", n.checkMem) + n.AddCheck("goroutines", n.checkGoroutines) + + n.wgDone.Add(1) + // Checker + go func() { + defer n.wgDone.Done() + for { + select { + case <-n.chStop: + return + case <-time.After(n.cfg.PollInterval().Duration()): + } + + func() { + n.checksMu.RLock() + defer n.checksMu.RUnlock() + for reason, checkFunc := range n.checks { + if unwell, meta := checkFunc(); unwell { + n.GatherVitals(reason, meta) + break + } + } + }() + } + }() + + n.wgDone.Add(1) + // Responder + go func() { + defer n.wgDone.Done() + for { + select { + case <-n.chStop: + return + case req := <-n.chGather: + n.gatherVitals(req.reason, req.meta) + } + } + }() + + return nil + }) +} + +func (n *Nurse) Close() error { + return n.StopOnce("Nurse", func() error { + n.log.Debug("Nurse closing...") + defer n.log.Debug("Nurse closed") + close(n.chStop) + n.wgDone.Wait() + return nil + }) +} + +func (n *Nurse) AddCheck(reason string, checkFunc CheckFunc) { + n.checksMu.Lock() + defer n.checksMu.Unlock() + n.checks[reason] = checkFunc +} + +func (n *Nurse) GatherVitals(reason string, meta Meta) { + select { + case <-n.chStop: + case n.chGather <- gatherRequest{reason, meta}: + default: + } +} + +func (n *Nurse) checkMem() (bool, Meta) { + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + unwell := memStats.Alloc >= uint64(n.cfg.MemThreshold()) + if !unwell { + return false, nil + } + return true, Meta{ + "mem_alloc": utils.FileSize(memStats.Alloc), + "threshold": n.cfg.MemThreshold(), + } +} + +func (n *Nurse) checkGoroutines() (bool, Meta) { + num := runtime.NumGoroutine() + unwell := num >= n.cfg.GoroutineThreshold() + if !unwell { + return false, nil + } + return true, Meta{ + "num_goroutine": num, + "threshold": n.cfg.GoroutineThreshold(), + } +} + +func (n *Nurse) gatherVitals(reason string, meta Meta) { + loggerFields := (logger.Fields{"reason": reason}).Merge(logger.Fields(meta)) + + n.log.Debugw("Nurse is gathering vitals", loggerFields.Slice()...) + + size, err := n.totalProfileBytes() + if err != nil { + n.log.Errorw("could not fetch total profile bytes", loggerFields.With("err", err).Slice()...) + return + } else if size >= uint64(n.cfg.MaxProfileSize()) { + n.log.Warnw("cannot write pprof profile, total profile size exceeds configured PPROF_MAX_PROFILE_SIZE", + loggerFields.With("total", size, "max", n.cfg.MaxProfileSize()).Slice()..., + ) + return + } + + now := time.Now() + + err = n.appendLog(now, reason, meta) + if err != nil { + n.log.Warnw("cannot write pprof profile", loggerFields.With("err", err).Slice()...) + return + } + var wg sync.WaitGroup + wg.Add(1) + go n.gatherCPU(now, &wg) + wg.Add(1) + go n.gatherTrace(now, &wg) + wg.Add(1) + go n.gather("allocs", now, &wg) + wg.Add(1) + go n.gather("block", now, &wg) + wg.Add(1) + go n.gather("goroutine", now, &wg) + + // pprof docs state memory profile is not + // created if the MemProfileRate is zero + if runtime.MemProfileRate != 0 { + wg.Add(1) + go n.gather("heap", now, &wg) + } else { + n.log.Info("skipping heap collection because runtime.MemProfileRate = 0") + } + + wg.Add(1) + go n.gather("mutex", now, &wg) + wg.Add(1) + go n.gather("threadcreate", now, &wg) + + ch := make(chan struct{}) + n.wgDone.Add(1) + go func() { + defer n.wgDone.Done() + defer close(ch) + wg.Wait() + }() + + select { + case <-n.chStop: + case <-ch: + } +} + +func (n *Nurse) appendLog(now time.Time, reason string, meta Meta) error { + filename := filepath.Join(n.cfg.ProfileRoot(), "nurse.log") + + n.log.Debugf("creating nurse log %s", filename) + file, err := os.Create(filename) + + if err != nil { + return err + } + wc := utils.NewDeferableWriteCloser(file) + defer wc.Close() + + if _, err = wc.Write([]byte(fmt.Sprintf("==== %v\n", now))); err != nil { + return err + } + if _, err = wc.Write([]byte(fmt.Sprintf("reason: %v\n", reason))); err != nil { + return err + } + ks := make([]string, len(meta)) + var i int + for k := range meta { + ks[i] = k + i++ + } + sort.Strings(ks) + for _, k := range ks { + if _, err = wc.Write([]byte(fmt.Sprintf("- %v: %v\n", k, meta[k]))); err != nil { + return err + } + } + _, err = wc.Write([]byte("\n")) + if err != nil { + return err + } + return wc.Close() +} + +func (n *Nurse) gatherCPU(now time.Time, wg *sync.WaitGroup) { + defer wg.Done() + n.log.Debugf("gather cpu %d ...", now.UnixMicro()) + defer n.log.Debugf("gather cpu %d done", now.UnixMicro()) + wc, err := n.createFile(now, cpuProfName, false) + if err != nil { + n.log.Errorw("could not write cpu profile", "err", err) + return + } + defer wc.Close() + + err = pprof.StartCPUProfile(wc) + if err != nil { + n.log.Errorw("could not start cpu profile", "err", err) + return + } + + select { + case <-n.chStop: + n.log.Debug("gather cpu received stop") + + case <-time.After(n.cfg.GatherDuration().Duration()): + n.log.Debugf("gather cpu duration elapsed %s. stoping profiling.", n.cfg.GatherDuration().Duration().String()) + } + + pprof.StopCPUProfile() + + err = wc.Close() + if err != nil { + n.log.Errorw("could not close cpu profile", "err", err) + return + } + +} + +func (n *Nurse) gatherTrace(now time.Time, wg *sync.WaitGroup) { + defer wg.Done() + + n.log.Debugf("gather trace %d ...", now.UnixMicro()) + defer n.log.Debugf("gather trace %d done", now.UnixMicro()) + wc, err := n.createFile(now, traceProfName, true) + if err != nil { + n.log.Errorw("could not write trace profile", "err", err) + return + } + defer wc.Close() + + err = trace.Start(wc) + if err != nil { + n.log.Errorw("could not start trace profile", "err", err) + return + } + + select { + case <-n.chStop: + case <-time.After(n.cfg.GatherTraceDuration().Duration()): + } + + trace.Stop() + + err = wc.Close() + if err != nil { + n.log.Errorw("could not close trace profile", "err", err) + return + } +} + +func (n *Nurse) gather(typ string, now time.Time, wg *sync.WaitGroup) { + defer wg.Done() + + n.log.Debugf("gather %s %d ...", typ, now.UnixMicro()) + n.log.Debugf("gather %s %d done", typ, now.UnixMicro()) + + p := pprof.Lookup(typ) + if p == nil { + n.log.Errorf("Invariant violation: pprof type '%v' does not exist", typ) + return + } + + p0, err := collectProfile(p) + if err != nil { + n.log.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err) + return + } + + t := time.NewTimer(n.cfg.GatherDuration().Duration()) + defer t.Stop() + + select { + case <-n.chStop: + return + case <-t.C: + } + + p1, err := collectProfile(p) + if err != nil { + n.log.Errorw(fmt.Sprintf("could not collect %v profile", typ), "err", err) + return + } + ts := p1.TimeNanos + dur := p1.TimeNanos - p0.TimeNanos + + p0.Scale(-1) + + p1, err = profile.Merge([]*profile.Profile{p0, p1}) + if err != nil { + n.log.Errorw(fmt.Sprintf("could not compute delta for %v profile", typ), "err", err) + return + } + + p1.TimeNanos = ts // set since we don't know what profile.Merge set for TimeNanos. + p1.DurationNanos = dur + + wc, err := n.createFile(now, typ, false) + if err != nil { + n.log.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err) + return + } + defer wc.Close() + + err = p1.Write(wc) + if err != nil { + n.log.Errorw(fmt.Sprintf("could not write %v profile", typ), "err", err) + return + } + err = wc.Close() + if err != nil { + n.log.Errorw(fmt.Sprintf("could not close file for %v profile", typ), "err", err) + return + } +} + +func collectProfile(p *pprof.Profile) (*profile.Profile, error) { + var buf bytes.Buffer + if err := p.WriteTo(&buf, 0); err != nil { + return nil, err + } + ts := time.Now().UnixNano() + p0, err := profile.Parse(&buf) + if err != nil { + return nil, err + } + p0.TimeNanos = ts + return p0, nil +} + +func (n *Nurse) createFile(now time.Time, typ string, shouldGzip bool) (*utils.DeferableWriteCloser, error) { + filename := fmt.Sprintf("%v.%v.pprof", now.UnixMicro(), typ) + if shouldGzip { + filename += ".gz" + } + fullpath := filepath.Join(n.cfg.ProfileRoot(), filename) + n.log.Debugf("creating file %s", fullpath) + + file, err := os.Create(fullpath) + if err != nil { + return nil, err + } + if shouldGzip { + gw := gzip.NewWriter(file) + return utils.NewDeferableWriteCloser(gw), nil + } + + return utils.NewDeferableWriteCloser(file), nil +} + +func (n *Nurse) totalProfileBytes() (uint64, error) { + profiles, err := n.listProfiles() + if err != nil { + return 0, err + } + var size uint64 + for _, p := range profiles { + size += uint64(p.Size()) + } + return size, nil +} + +func (n *Nurse) listProfiles() ([]fs.FileInfo, error) { + out := make([]fs.FileInfo, 0) + entries, err := os.ReadDir(n.cfg.ProfileRoot()) + + if err != nil { + return nil, err + } + for _, entry := range entries { + if entry.IsDir() || + (filepath.Ext(entry.Name()) != ".pprof" && + entry.Name() != "nurse.log" && + !strings.HasSuffix(entry.Name(), ".pprof.gz")) { + continue + } + info, err := entry.Info() + if err != nil { + return nil, err + } + out = append(out, info) + } + return out, nil + +} diff --git a/core/services/nurse_test.go b/core/services/nurse_test.go new file mode 100644 index 00000000..93912ebe --- /dev/null +++ b/core/services/nurse_test.go @@ -0,0 +1,142 @@ +package services + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type mockConfig struct { + t *testing.T + root string + pollInterval *commonconfig.Duration + gatherDuration *commonconfig.Duration + traceDuration *commonconfig.Duration + profileSize utils.FileSize + cpuProfileRate int + memProfileRate int + blockProfileRate int + mutexProfileFraction int + memThreshold utils.FileSize + goroutineThreshold int +} + +var ( + testInterval = 50 * time.Millisecond + testDuration = 20 * time.Millisecond + testRate = 100 + testSize = 16 * 1024 * 1024 +) + +func newMockConfig(t *testing.T) *mockConfig { + return &mockConfig{ + root: t.TempDir(), + pollInterval: commonconfig.MustNewDuration(testInterval), + gatherDuration: commonconfig.MustNewDuration(testDuration), + traceDuration: commonconfig.MustNewDuration(testDuration), + profileSize: utils.FileSize(testSize), + memProfileRate: runtime.MemProfileRate, + blockProfileRate: testRate, + mutexProfileFraction: testRate, + memThreshold: utils.FileSize(testSize), + goroutineThreshold: testRate, + t: t, + } +} + +func (c mockConfig) ProfileRoot() string { + return c.root +} + +func (c mockConfig) PollInterval() commonconfig.Duration { + return *c.pollInterval +} + +func (c mockConfig) GatherDuration() commonconfig.Duration { + return *c.gatherDuration +} + +func (c mockConfig) GatherTraceDuration() commonconfig.Duration { + return *c.traceDuration +} + +func (c mockConfig) MaxProfileSize() utils.FileSize { + return c.profileSize +} + +func (c mockConfig) CPUProfileRate() int { + return c.cpuProfileRate +} + +func (c mockConfig) MemProfileRate() int { + return c.memProfileRate +} + +func (c mockConfig) BlockProfileRate() int { + return c.blockProfileRate +} + +func (c mockConfig) MutexProfileFraction() int { + return c.mutexProfileFraction +} + +func (c mockConfig) MemThreshold() utils.FileSize { + return c.memThreshold +} + +func (c mockConfig) GoroutineThreshold() int { + return c.goroutineThreshold +} + +func TestNurse(t *testing.T) { + + l := logger.TestLogger(t) + nrse := NewNurse(newMockConfig(t), l) + nrse.AddCheck("test", func() (bool, Meta) { return true, Meta{} }) + + require.NoError(t, nrse.Start()) + defer func() { require.NoError(t, nrse.Close()) }() + + require.NoError(t, nrse.appendLog(time.Now(), "test", Meta{})) + + wc, err := nrse.createFile(time.Now(), "test", false) + require.NoError(t, err) + n, err := wc.Write([]byte("junk")) + require.NoError(t, err) + require.Greater(t, n, 0) + require.NoError(t, wc.Close()) + + wc, err = nrse.createFile(time.Now(), "testgz", false) + require.NoError(t, err) + require.NoError(t, wc.Close()) + + // check both of the files exist. synchronous, check immediately + assert.True(t, profileExists(t, nrse, "test")) + assert.True(t, profileExists(t, nrse, "testgz")) + + testutils.AssertEventually(t, func() bool { return profileExists(t, nrse, cpuProfName) }) + testutils.AssertEventually(t, func() bool { return profileExists(t, nrse, traceProfName) }) + n2, err := nrse.totalProfileBytes() + require.NoError(t, err) + require.Greater(t, n2, uint64(0)) +} + +func profileExists(t *testing.T, nrse *Nurse, typ string) bool { + profiles, err := nrse.listProfiles() + require.Nil(t, err) + for _, p := range profiles { + if strings.Contains(p.Name(), typ) { + return true + } + } + return false +} diff --git a/core/services/ocr/config.go b/core/services/ocr/config.go new file mode 100644 index 00000000..288b1b0d --- /dev/null +++ b/core/services/ocr/config.go @@ -0,0 +1,35 @@ +package ocr + +import ( + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// Config contains OCR configurations for a job. +type Config interface { + pg.QConfig +} + +func toLocalConfig(cfg ValidationConfig, evmOcrConfig evmconfig.OCR, insecureCfg insecureConfig, spec job.OCROracleSpec, ocrConfig job.OCRConfig) ocrtypes.LocalConfig { + concreteSpec := job.LoadConfigVarsLocalOCR(evmOcrConfig, spec, ocrConfig) + lc := ocrtypes.LocalConfig{ + BlockchainTimeout: concreteSpec.BlockchainTimeout.Duration(), + ContractConfigConfirmations: concreteSpec.ContractConfigConfirmations, + SkipContractConfigConfirmations: cfg.ChainType().IsL2(), + ContractConfigTrackerPollInterval: concreteSpec.ContractConfigTrackerPollInterval.Duration(), + ContractConfigTrackerSubscribeInterval: concreteSpec.ContractConfigTrackerSubscribeInterval.Duration(), + ContractTransmitterTransmitTimeout: concreteSpec.ContractTransmitterTransmitTimeout.Duration(), + DatabaseTimeout: concreteSpec.DatabaseTimeout.Duration(), + DataSourceTimeout: concreteSpec.ObservationTimeout.Duration(), + DataSourceGracePeriod: concreteSpec.ObservationGracePeriod.Duration(), + } + if insecureCfg.OCRDevelopmentMode() { + // Skips config validation so we can use any config parameters we want. + // For example to lower contractConfigTrackerPollInterval to speed up tests. + lc.DevelopmentMode = ocrtypes.EnableDangerousDevelopmentMode + } + return lc +} diff --git a/core/services/ocr/config_overrider.go b/core/services/ocr/config_overrider.go new file mode 100644 index 00000000..28698270 --- /dev/null +++ b/core/services/ocr/config_overrider.go @@ -0,0 +1,158 @@ +package ocr + +import ( + "context" + "fmt" + "math" + "math/big" + "sync" + "time" + + "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type ConfigOverriderImpl struct { + services.StateMachine + logger logger.Logger + flags *ContractFlags + contractAddress ethkey.EIP55Address + + pollTicker utils.TickerBase + lastStateChangeTimestamp time.Time + isHibernating bool + DeltaCFromAddress time.Duration + + // Start/Stop lifecycle + ctx context.Context + ctxCancel context.CancelFunc + chDone chan struct{} + + mu sync.RWMutex +} + +// InitialHibernationStatus - hibernation state set until the first successful update from the chain +const InitialHibernationStatus = false + +type DeltaCConfig interface { + DeltaCOverride() time.Duration + DeltaCJitterOverride() time.Duration +} + +func NewConfigOverriderImpl( + logger logger.Logger, + cfg DeltaCConfig, + contractAddress ethkey.EIP55Address, + flags *ContractFlags, + pollTicker utils.TickerBase, +) (*ConfigOverriderImpl, error) { + + if !flags.ContractExists() { + return nil, errors.Errorf("OCRConfigOverrider: Flags contract instance is missing, the contract does not exist: %s. "+ + "Please create the contract or remove the OCR.TransmitterAddress configuration variable", contractAddress.Address()) + } + + addressBig := contractAddress.Big() + jitterSeconds := int64(cfg.DeltaCJitterOverride() / time.Second) + addressSeconds := addressBig.Mod(addressBig, big.NewInt(jitterSeconds)).Uint64() + deltaC := cfg.DeltaCOverride() + time.Duration(addressSeconds)*time.Second + + ctx, cancel := context.WithCancel(context.Background()) + co := ConfigOverriderImpl{ + services.StateMachine{}, + logger, + flags, + contractAddress, + pollTicker, + time.Now(), + InitialHibernationStatus, + deltaC, + ctx, + cancel, + make(chan struct{}), + sync.RWMutex{}, + } + + return &co, nil +} + +// Start starts ConfigOverriderImpl. +func (c *ConfigOverriderImpl) Start(context.Context) error { + return c.StartOnce("OCRConfigOverrider", func() (err error) { + if err := c.updateFlagsStatus(); err != nil { + c.logger.Errorw("OCRConfigOverrider: Error updating hibernation status at OCR job start. Will default to not hibernating, until next successful update.", "err", err) + } + + go c.eventLoop() + return nil + }) +} + +func (c *ConfigOverriderImpl) Close() error { + return c.StopOnce("OCRContractTracker", func() error { + c.ctxCancel() + <-c.chDone + return nil + }) +} + +func (c *ConfigOverriderImpl) eventLoop() { + defer close(c.chDone) + c.pollTicker.Resume() + defer c.pollTicker.Destroy() + for { + select { + case <-c.ctx.Done(): + return + case <-c.pollTicker.Ticks(): + if err := c.updateFlagsStatus(); err != nil { + c.logger.Errorw("OCRConfigOverrider: Error updating hibernation status", "err", err) + } + } + } +} + +func (c *ConfigOverriderImpl) updateFlagsStatus() error { + isFlagLowered, err := c.flags.IsLowered(c.contractAddress.Address()) + if err != nil { + return errors.Wrap(err, "Failed to check if flag is lowered") + } + shouldHibernate := !isFlagLowered + + c.mu.Lock() + defer c.mu.Unlock() + + wasUpdated := (!c.isHibernating && shouldHibernate) || (c.isHibernating && !shouldHibernate) + if wasUpdated { + c.logger.Infow( + fmt.Sprintf("OCRConfigOverrider: Setting hibernation state to '%v'", shouldHibernate), + "elapsedSinceLastChange", time.Since(c.lastStateChangeTimestamp), + ) + c.lastStateChangeTimestamp = time.Now() + c.isHibernating = shouldHibernate + } + return nil +} + +func (c *ConfigOverriderImpl) ConfigOverride() *ocrtypes.ConfigOverride { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.isHibernating { + c.logger.Debugw("OCRConfigOverrider: Returning a config override") + return c.configOverrideInstance() + } + c.logger.Debugw("OCRConfigOverrider: Config override returned as nil") + return nil +} + +func (c *ConfigOverriderImpl) configOverrideInstance() *ocrtypes.ConfigOverride { + return &ocrtypes.ConfigOverride{AlphaPPB: math.MaxUint64, DeltaC: c.DeltaCFromAddress} +} diff --git a/core/services/ocr/config_overrider_test.go b/core/services/ocr/config_overrider_test.go new file mode 100644 index 00000000..e63cfc97 --- /dev/null +++ b/core/services/ocr/config_overrider_test.go @@ -0,0 +1,217 @@ +package ocr_test + +import ( + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type configOverriderUni struct { + overrider *ocr.ConfigOverriderImpl + contractAddress ethkey.EIP55Address +} + +type deltaCConfig struct{} + +func (d deltaCConfig) DeltaCOverride() time.Duration { return time.Hour * 24 * 7 } + +func (d deltaCConfig) DeltaCJitterOverride() time.Duration { return time.Hour } + +func newConfigOverriderUni(t *testing.T, pollITicker utils.TickerBase, flagsContract *mocks.Flags) (uni configOverriderUni) { + var testLogger = logger.TestLogger(t) + contractAddress := cltest.NewEIP55Address() + + flags := &ocr.ContractFlags{FlagsInterface: flagsContract} + var err error + uni.overrider, err = ocr.NewConfigOverriderImpl( + testLogger, + deltaCConfig{}, + contractAddress, + flags, + pollITicker, + ) + require.NoError(t, err) + + uni.contractAddress = contractAddress + + return uni +} + +func TestIntegration_OCRConfigOverrider_EntersHibernation(t *testing.T) { + g := gomega.NewWithT(t) + + flagsContract := mocks.NewFlags(t) + + ticker := utils.NewPausableTicker(3 * time.Second) + uni := newConfigOverriderUni(t, &ticker, flagsContract) + + // not hibernating, because one of the flags is lowered + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{false, true}, nil).Once() + + // hibernating + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{true, true}, nil) + + servicetest.Run(t, uni.overrider) + + // not hibernating initially + require.Nil(t, uni.overrider.ConfigOverride()) + + expectedOverride := &ocrtypes.ConfigOverride{AlphaPPB: math.MaxUint64, DeltaC: uni.overrider.DeltaCFromAddress} + + // timeout needs to be longer than the poll interval of 3 seconds + g.Eventually(func() *ocrtypes.ConfigOverride { return uni.overrider.ConfigOverride() }, 10*time.Second, 450*time.Millisecond).Should(gomega.Equal(expectedOverride)) +} + +func Test_OCRConfigOverrider(t *testing.T) { + t.Parallel() + + t.Run("Before first tick returns nil override, later does return a specific override when hibernating", func(t *testing.T) { + flagsContract := mocks.NewFlags(t) + + ticker := NewFakeTicker() + uni := newConfigOverriderUni(t, ticker, flagsContract) + + // not hibernating, because one of the flags is lowered + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{false, true}, nil).Once() + + // hibernating + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{true, true}, nil) + + servicetest.Run(t, uni.overrider) + + // not hibernating initially + require.Nil(t, uni.overrider.ConfigOverride()) + + // update state by getting flags + require.NoError(t, uni.overrider.ExportedUpdateFlagsStatus()) + + expectedOverride := &ocrtypes.ConfigOverride{AlphaPPB: math.MaxUint64, DeltaC: uni.overrider.DeltaCFromAddress} + require.Equal(t, expectedOverride, uni.overrider.ConfigOverride()) + }) + + t.Run("Before first tick is hibernating, later exists hibernation", func(t *testing.T) { + flagsContract := mocks.NewFlags(t) + + ticker := NewFakeTicker() + uni := newConfigOverriderUni(t, ticker, flagsContract) + + // hibernating + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{true, true}, nil).Once() + + // not hibernating + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(checkFlagsAddress(t, uni.contractAddress)). + Return([]bool{true, false}, nil) + + servicetest.Run(t, uni.overrider) + + // initially enters hibernation + expectedOverride := &ocrtypes.ConfigOverride{AlphaPPB: math.MaxUint64, DeltaC: uni.overrider.DeltaCFromAddress} + require.Equal(t, expectedOverride, uni.overrider.ConfigOverride()) + + // update state by getting flags + require.NoError(t, uni.overrider.ExportedUpdateFlagsStatus()) + + // should exit hibernation + require.Nil(t, uni.overrider.ConfigOverride()) + }) + + t.Run("Errors if flags contract is missing", func(t *testing.T) { + var testLogger = logger.TestLogger(t) + contractAddress := cltest.NewEIP55Address() + flags := &ocr.ContractFlags{FlagsInterface: nil} + _, err := ocr.NewConfigOverriderImpl( + testLogger, + deltaCConfig{}, + contractAddress, + flags, + nil, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "Flags contract instance is missing, the contract does not exist") + }) + + t.Run("DeltaC should be stable per address", func(t *testing.T) { + var testLogger = logger.TestLogger(t) + flagsContract := mocks.NewFlags(t) + flags := &ocr.ContractFlags{FlagsInterface: flagsContract} + + address1, err := ethkey.NewEIP55Address(common.BigToAddress(big.NewInt(10000)).Hex()) + require.NoError(t, err) + + address2, err := ethkey.NewEIP55Address(common.BigToAddress(big.NewInt(1234567890)).Hex()) + require.NoError(t, err) + + overrider1a, err := ocr.NewConfigOverriderImpl(testLogger, deltaCConfig{}, address1, flags, nil) + require.NoError(t, err) + + overrider1b, err := ocr.NewConfigOverriderImpl(testLogger, deltaCConfig{}, address1, flags, nil) + require.NoError(t, err) + + overrider2, err := ocr.NewConfigOverriderImpl(testLogger, deltaCConfig{}, address2, flags, nil) + require.NoError(t, err) + + assert.Equal(t, cltest.MustParseDuration(t, "168h46m40s"), overrider1a.DeltaCFromAddress) + assert.Equal(t, cltest.MustParseDuration(t, "168h46m40s"), overrider1b.DeltaCFromAddress) + assert.Equal(t, cltest.MustParseDuration(t, "168h31m30s"), overrider2.DeltaCFromAddress) + }) +} + +func checkFlagsAddress(t *testing.T, contractAddress ethkey.EIP55Address) func(args mock.Arguments) { + return func(args mock.Arguments) { + require.Equal(t, []common.Address{ + evmutils.ZeroAddress, + contractAddress.Address(), + }, args.Get(1).([]common.Address)) + } +} + +type FakeTicker struct { + ticks chan time.Time +} + +func NewFakeTicker() *FakeTicker { + return &FakeTicker{ + ticks: make(chan time.Time), + } +} + +func (t *FakeTicker) SimulateTick() { + t.ticks <- time.Now() +} + +func (t *FakeTicker) Ticks() <-chan time.Time { + return t.ticks +} + +func (t *FakeTicker) Pause() {} +func (t *FakeTicker) Resume() {} +func (t *FakeTicker) Destroy() {} diff --git a/core/services/ocr/contract_config_subscription.go b/core/services/ocr/contract_config_subscription.go new file mode 100644 index 00000000..6d775a18 --- /dev/null +++ b/core/services/ocr/contract_config_subscription.go @@ -0,0 +1,21 @@ +package ocr + +import ( + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" +) + +var _ ocrtypes.ContractConfigSubscription = &OCRContractConfigSubscription{} + +// OCRContractConfigSubscription only exists to comply with the +// ContractConfigSubscription interface, it's just a simple shell around +// OCRContractTracker that defines two methods +type OCRContractConfigSubscription OCRContractTracker + +// Configs complies with ContractConfigSubscription interface +func (sub *OCRContractConfigSubscription) Configs() <-chan ocrtypes.ContractConfig { + return sub.chConfigs +} + +// Close is a no-op since Subscribing/Unsubscribing is handled in the +// Start/Close methods of the OCRContractTracker +func (sub *OCRContractConfigSubscription) Close() {} diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go new file mode 100644 index 00000000..7e0dbb98 --- /dev/null +++ b/core/services/ocr/contract_tracker.go @@ -0,0 +1,464 @@ +package ocr + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/offchainreporting/confighelper" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/common/config" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/offchain_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// configMailboxSanityLimit is the maximum number of configs that can be held +// in the mailbox. Under normal operation there should never be more than 0 or +// 1 configs in the mailbox, this limit is here merely to prevent unbounded usage +// in some kind of unforeseen insane situation. +const configMailboxSanityLimit = 100 + +var ( + _ ocrtypes.ContractConfigTracker = &OCRContractTracker{} + _ log.Listener = &OCRContractTracker{} + _ httypes.HeadTrackable = &OCRContractTracker{} + + OCRContractConfigSet = getEventTopic("ConfigSet") + OCRContractLatestRoundRequested = getEventTopic("RoundRequested") +) + +//go:generate mockery --quiet --name OCRContractTrackerDB --output ./mocks/ --case=underscore +type ( + // OCRContractTracker complies with ContractConfigTracker interface and + // handles log events related to the contract more generally + OCRContractTracker struct { + services.StateMachine + + ethClient evmclient.Client + contract *offchain_aggregator_wrapper.OffchainAggregator + contractFilterer *offchainaggregator.OffchainAggregatorFilterer + contractCaller *offchainaggregator.OffchainAggregatorCaller + logBroadcaster log.Broadcaster + jobID int32 + logger logger.Logger + ocrDB OCRContractTrackerDB + q pg.Q + blockTranslator ocrcommon.BlockTranslator + cfg ocrcommon.Config + mailMon *mailbox.Monitor + + // HeadBroadcaster + headBroadcaster httypes.HeadBroadcaster + unsubscribeHeads func() + + // Start/Stop lifecycle + chStop services.StopChan + wg sync.WaitGroup + unsubscribeLogs func() + + // LatestRoundRequested + latestRoundRequested offchainaggregator.OffchainAggregatorRoundRequested + lrrMu sync.RWMutex + + // ContractConfig + configsMB *mailbox.Mailbox[ocrtypes.ContractConfig] + chConfigs chan ocrtypes.ContractConfig + + // LatestBlockHeight + latestBlockHeight int64 + latestBlockHeightMu sync.RWMutex + } + + OCRContractTrackerDB interface { + SaveLatestRoundRequested(tx pg.Queryer, rr offchainaggregator.OffchainAggregatorRoundRequested) error + LoadLatestRoundRequested() (rr offchainaggregator.OffchainAggregatorRoundRequested, err error) + } +) + +func (t *OCRContractTracker) HealthReport() map[string]error { + return map[string]error{t.Name(): t.Healthy()} +} + +func (t *OCRContractTracker) Name() string { return t.logger.Name() } + +// NewOCRContractTracker makes a new OCRContractTracker +func NewOCRContractTracker( + contract *offchain_aggregator_wrapper.OffchainAggregator, + contractFilterer *offchainaggregator.OffchainAggregatorFilterer, + contractCaller *offchainaggregator.OffchainAggregatorCaller, + ethClient evmclient.Client, + logBroadcaster log.Broadcaster, + jobID int32, + logger logger.Logger, + db *sqlx.DB, + ocrDB OCRContractTrackerDB, + cfg ocrcommon.Config, + q pg.QConfig, + headBroadcaster httypes.HeadBroadcaster, + mailMon *mailbox.Monitor, +) (o *OCRContractTracker) { + logger = logger.Named("OCRContractTracker") + return &OCRContractTracker{ + ethClient: ethClient, + contract: contract, + contractFilterer: contractFilterer, + contractCaller: contractCaller, + logBroadcaster: logBroadcaster, + jobID: jobID, + logger: logger, + ocrDB: ocrDB, + q: pg.NewQ(db, logger, q), + blockTranslator: ocrcommon.NewBlockTranslator(cfg, ethClient, logger), + cfg: cfg, + mailMon: mailMon, + headBroadcaster: headBroadcaster, + chStop: make(services.StopChan), + latestRoundRequested: offchainaggregator.OffchainAggregatorRoundRequested{}, + configsMB: mailbox.New[ocrtypes.ContractConfig](configMailboxSanityLimit), + chConfigs: make(chan ocrtypes.ContractConfig), + latestBlockHeight: -1, + } +} + +// Start must be called before logs can be delivered +// It ought to be called before starting OCR +func (t *OCRContractTracker) Start(context.Context) error { + return t.StartOnce("OCRContractTracker", func() (err error) { + t.latestRoundRequested, err = t.ocrDB.LoadLatestRoundRequested() + if err != nil { + return errors.Wrap(err, "OCRContractTracker#Start: failed to load latest round requested") + } + + t.unsubscribeLogs = t.logBroadcaster.Register(t, log.ListenerOpts{ + Contract: t.contract.Address(), + ParseLog: t.contract.ParseLog, + LogsWithTopics: map[gethCommon.Hash][][]log.Topic{ + offchain_aggregator_wrapper.OffchainAggregatorRoundRequested{}.Topic(): nil, + offchain_aggregator_wrapper.OffchainAggregatorConfigSet{}.Topic(): nil, + }, + MinIncomingConfirmations: 1, + }) + + var latestHead *evmtypes.Head + latestHead, t.unsubscribeHeads = t.headBroadcaster.Subscribe(t) + if latestHead != nil { + t.setLatestBlockHeight(latestHead) + } + + t.wg.Add(1) + go t.processLogs() + + t.mailMon.Monitor(t.configsMB, "OCRContractTracker", "Configs", fmt.Sprint(t.jobID)) + + return nil + }) +} + +// Close should be called after teardown of the OCR job relying on this tracker +func (t *OCRContractTracker) Close() error { + return t.StopOnce("OCRContractTracker", func() error { + close(t.chStop) + t.wg.Wait() + t.unsubscribeHeads() + t.unsubscribeLogs() + close(t.chConfigs) + return t.configsMB.Close() + }) +} + +// OnNewLongestChain conformed to HeadTrackable and updates latestBlockHeight +func (t *OCRContractTracker) OnNewLongestChain(_ context.Context, h *evmtypes.Head) { + t.setLatestBlockHeight(h) +} + +func (t *OCRContractTracker) setLatestBlockHeight(h *evmtypes.Head) { + var num int64 + if h.L1BlockNumber.Valid { + num = h.L1BlockNumber.Int64 + } else { + num = h.Number + } + t.latestBlockHeightMu.Lock() + defer t.latestBlockHeightMu.Unlock() + if num > t.latestBlockHeight { + t.latestBlockHeight = num + } +} + +func (t *OCRContractTracker) getLatestBlockHeight() int64 { + t.latestBlockHeightMu.RLock() + defer t.latestBlockHeightMu.RUnlock() + return t.latestBlockHeight +} + +func (t *OCRContractTracker) processLogs() { + defer t.wg.Done() + for { + select { + case <-t.configsMB.Notify(): + // NOTE: libocr could take an arbitrary amount of time to process a + // new config. To avoid blocking the log broadcaster, we use this + // background thread to deliver them and a mailbox as the buffer. + for { + cc, exists := t.configsMB.Retrieve() + if !exists { + break + } + select { + case t.chConfigs <- cc: + case <-t.chStop: + return + } + } + case <-t.chStop: + return + } + } +} + +// HandleLog complies with LogListener interface +// It is not thread safe +func (t *OCRContractTracker) HandleLog(lb log.Broadcast) { + was, err := t.logBroadcaster.WasAlreadyConsumed(lb) + if err != nil { + t.logger.Errorw("could not determine if log was already consumed", "err", err) + return + } else if was { + return + } + + raw := lb.RawLog() + if raw.Address != t.contract.Address() { + t.logger.Errorf("log address of 0x%x does not match configured contract address of 0x%x", raw.Address, t.contract.Address()) + if err2 := t.logBroadcaster.MarkConsumed(lb); err2 != nil { + t.logger.Errorw("failed to mark log consumed", "err", err2) + } + return + } + topics := raw.Topics + if len(topics) == 0 { + if err2 := t.logBroadcaster.MarkConsumed(lb); err2 != nil { + t.logger.Errorw("failed to mark log consumed", "err", err2) + } + return + } + + var consumed bool + switch topics[0] { + case OCRContractConfigSet: + var configSet *offchainaggregator.OffchainAggregatorConfigSet + configSet, err = t.contractFilterer.ParseConfigSet(raw) + if err != nil { + t.logger.Errorw("could not parse config set", "err", err) + if err2 := t.logBroadcaster.MarkConsumed(lb); err2 != nil { + t.logger.Errorw("failed to mark log consumed", "err", err2) + } + return + } + configSet.Raw = lb.RawLog() + cc := confighelper.ContractConfigFromConfigSetEvent(*configSet) + + wasOverCapacity := t.configsMB.Deliver(cc) + if wasOverCapacity { + t.logger.Error("config mailbox is over capacity - dropped the oldest unprocessed item") + } + case OCRContractLatestRoundRequested: + var rr *offchainaggregator.OffchainAggregatorRoundRequested + rr, err = t.contractFilterer.ParseRoundRequested(raw) + if err != nil { + t.logger.Errorw("could not parse round requested", "err", err) + if err2 := t.logBroadcaster.MarkConsumed(lb); err2 != nil { + t.logger.Errorw("failed to mark log consumed", "err", err2) + } + return + } + if IsLaterThan(raw, t.latestRoundRequested.Raw) { + err = t.q.Transaction(func(tx pg.Queryer) error { + if err = t.ocrDB.SaveLatestRoundRequested(tx, *rr); err != nil { + return err + } + return t.logBroadcaster.MarkConsumed(lb, pg.WithQueryer(tx)) + }) + if err != nil { + t.logger.Error(err) + return + } + consumed = true + t.lrrMu.Lock() + t.latestRoundRequested = *rr + t.lrrMu.Unlock() + t.logger.Infow("received new latest RoundRequested event", "latestRoundRequested", *rr) + } else { + t.logger.Warnw("ignoring out of date RoundRequested event", "latestRoundRequested", t.latestRoundRequested, "roundRequested", rr) + } + default: + t.logger.Debugw("got unrecognised log topic", "topic", topics[0]) + } + if !consumed { + if err := t.logBroadcaster.MarkConsumed(lb); err != nil { + t.logger.Errorw("failed to mark log consumed", "err", err) + } + } +} + +// IsLaterThan returns true if the first log was emitted "after" the second log +// from the blockchain's point of view +func IsLaterThan(incoming gethTypes.Log, existing gethTypes.Log) bool { + return incoming.BlockNumber > existing.BlockNumber || + (incoming.BlockNumber == existing.BlockNumber && incoming.TxIndex > existing.TxIndex) || + (incoming.BlockNumber == existing.BlockNumber && incoming.TxIndex == existing.TxIndex && incoming.Index > existing.Index) +} + +// JobID complies with LogListener interface +func (t *OCRContractTracker) JobID() int32 { + return t.jobID +} + +// SubscribeToNewConfigs returns the tracker aliased as a ContractConfigSubscription +func (t *OCRContractTracker) SubscribeToNewConfigs(context.Context) (ocrtypes.ContractConfigSubscription, error) { + return (*OCRContractConfigSubscription)(t), nil +} + +// LatestConfigDetails queries the eth node +func (t *OCRContractTracker) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + var cancel context.CancelFunc + ctx, cancel = t.chStop.Ctx(ctx) + defer cancel() + + opts := bind.CallOpts{Context: ctx, Pending: false} + result, err := t.contractCaller.LatestConfigDetails(&opts) + if err != nil { + return 0, configDigest, errors.Wrap(err, "error getting LatestConfigDetails") + } + configDigest, err = ocrtypes.BytesToConfigDigest(result.ConfigDigest[:]) + if err != nil { + return 0, configDigest, errors.Wrap(err, "error getting config digest") + } + return uint64(result.BlockNumber), configDigest, err +} + +// ConfigFromLogs queries the eth node for logs for this contract +func (t *OCRContractTracker) ConfigFromLogs(ctx context.Context, changedInBlock uint64) (c ocrtypes.ContractConfig, err error) { + fromBlock, toBlock := t.blockTranslator.NumberToQueryRange(ctx, changedInBlock) + q := ethereum.FilterQuery{ + FromBlock: fromBlock, + ToBlock: toBlock, + Addresses: []gethCommon.Address{t.contract.Address()}, + Topics: [][]gethCommon.Hash{ + {OCRContractConfigSet}, + }, + } + + var cancel context.CancelFunc + ctx, cancel = t.chStop.Ctx(ctx) + defer cancel() + + logs, err := t.ethClient.FilterLogs(ctx, q) + if err != nil { + return c, err + } + if len(logs) == 0 { + return c, errors.Errorf("ConfigFromLogs: OCRContract with address 0x%x has no logs", t.contract.Address()) + } + + latest, err := t.contractFilterer.ParseConfigSet(logs[len(logs)-1]) + if err != nil { + return c, errors.Wrap(err, "ConfigFromLogs failed to ParseConfigSet") + } + latest.Raw = logs[len(logs)-1] + if latest.Raw.Address != t.contract.Address() { + return c, errors.Errorf("log address of 0x%x does not match configured contract address of 0x%x", latest.Raw.Address, t.contract.Address()) + } + return confighelper.ContractConfigFromConfigSetEvent(*latest), err +} + +// LatestBlockHeight queries the eth node for the most recent header +func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight uint64, err error) { + switch t.cfg.ChainType() { + case config.ChainMetis: + // We skip confirmation checking anyway on these L2s so there's no need to + // care about the block height; we have no way of getting the L1 block + // height anyway + return 0, nil + case "", config.ChainArbitrum, config.ChainCelo, config.ChainOptimismBedrock, config.ChainXDai, config.ChainKroma, config.ChainWeMix, config.ChainZkSync, config.ChainScroll: + // continue + } + latestBlockHeight := t.getLatestBlockHeight() + if latestBlockHeight >= 0 { + return uint64(latestBlockHeight), nil + } + + t.logger.Debugw("still waiting for first head, falling back to on-chain lookup") + + var cancel context.CancelFunc + ctx, cancel = t.chStop.Ctx(ctx) + defer cancel() + + h, err := t.ethClient.HeadByNumber(ctx, nil) + if err != nil { + return 0, err + } + if h == nil { + return 0, errors.New("got nil head") + } + + if h.L1BlockNumber.Valid { + return uint64(h.L1BlockNumber.Int64), nil + } + + return uint64(h.Number), nil +} + +// LatestRoundRequested returns the configDigest, epoch, and round from the latest +// RoundRequested event emitted by the contract. LatestRoundRequested may or may not +// return a result if the latest such event was emitted in a block b such that +// b.timestamp < tip.timestamp - lookback. +// +// If no event is found, LatestRoundRequested should return zero values, not an error. +// An error should only be returned if an actual error occurred during execution, +// e.g. because there was an error querying the blockchain or the database. +// +// As an optimization, this function may also return zero values, if no +// RoundRequested event has been emitted after the latest NewTransmission event. +func (t *OCRContractTracker) LatestRoundRequested(_ context.Context, lookback time.Duration) (configDigest ocrtypes.ConfigDigest, epoch uint32, round uint8, err error) { + // NOTE: This should be "good enough" 99% of the time. + // It guarantees validity up to `EVM.BlockBackfillDepth` blocks ago + // Some further improvements could be made: + t.lrrMu.RLock() + defer t.lrrMu.RUnlock() + return t.latestRoundRequested.ConfigDigest, t.latestRoundRequested.Epoch, t.latestRoundRequested.Round, nil +} + +func getEventTopic(name string) gethCommon.Hash { + abi, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI)) + if err != nil { + panic("could not parse OffchainAggregator ABI: " + err.Error()) + } + event, exists := abi.Events[name] + if !exists { + panic(fmt.Sprintf("abi.Events was missing %s", name)) + } + return event.ID +} diff --git a/core/services/ocr/contract_tracker_test.go b/core/services/ocr/contract_tracker_test.go new file mode 100644 index 00000000..dffdc2aa --- /dev/null +++ b/core/services/ocr/contract_tracker_test.go @@ -0,0 +1,410 @@ +package ocr_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/mocks" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/offchain_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + ocrmocks "github.com/goplugin/pluginv3.0/v2/core/services/ocr/mocks" +) + +func mustNewContract(t *testing.T, address gethCommon.Address) *offchain_aggregator_wrapper.OffchainAggregator { + contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(address, nil) + require.NoError(t, err) + return contract +} + +func mustNewFilterer(t *testing.T) *offchainaggregator.OffchainAggregatorFilterer { + filterer, err := offchainaggregator.NewOffchainAggregatorFilterer(testutils.NewAddress(), nil) + require.NoError(t, err) + return filterer +} + +type contractTrackerUni struct { + db *ocrmocks.OCRContractTrackerDB + lb *logmocks.Broadcaster + hb *commonmocks.HeadBroadcaster[*evmtypes.Head, common.Hash] + ec *evmclimocks.Client + tracker *ocr.OCRContractTracker +} + +func newContractTrackerUni(t *testing.T, opts ...interface{}) (uni contractTrackerUni) { + var cfg evmconfig.ChainScopedConfig + var filterer *offchainaggregator.OffchainAggregatorFilterer + var contract *offchain_aggregator_wrapper.OffchainAggregator + for _, opt := range opts { + switch v := opt.(type) { + case evmconfig.ChainScopedConfig: + cfg = v + case *offchainaggregator.OffchainAggregatorFilterer: + filterer = v + case *offchain_aggregator_wrapper.OffchainAggregator: + contract = v + default: + t.Fatalf("unrecognised option type %T", v) + } + } + if cfg == nil { + cfg = evmtest.NewChainScopedConfig(t, configtest.NewTestGeneralConfig(t)) + } + if filterer == nil { + filterer = mustNewFilterer(t) + } + if contract == nil { + contract = mustNewContract(t, testutils.NewAddress()) + } + uni.db = ocrmocks.NewOCRContractTrackerDB(t) + uni.lb = logmocks.NewBroadcaster(t) + uni.hb = commonmocks.NewHeadBroadcaster[*evmtypes.Head, common.Hash](t) + uni.ec = evmtest.NewEthClientMock(t) + + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + db := pgtest.NewSqlxDB(t) + uni.tracker = ocr.NewOCRContractTracker( + contract, + filterer, + nil, + uni.ec, + uni.lb, + 42, + logger.TestLogger(t), + db, + uni.db, + cfg.EVM(), + cfg.Database(), + uni.hb, + mailMon, + ) + + return uni +} + +func Test_OCRContractTracker_LatestBlockHeight(t *testing.T) { + t.Parallel() + + t.Run("before first head incoming, looks up on-chain", func(t *testing.T) { + uni := newContractTrackerUni(t) + uni.ec.On("HeadByNumber", mock.AnythingOfType("*context.cancelCtx"), (*big.Int)(nil)).Return(&evmtypes.Head{Number: 42}, nil) + + l, err := uni.tracker.LatestBlockHeight(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, uint64(42), l) + }) + + t.Run("Before first head incoming, on client error returns error", func(t *testing.T) { + uni := newContractTrackerUni(t) + uni.ec.On("HeadByNumber", mock.AnythingOfType("*context.cancelCtx"), (*big.Int)(nil)).Return(nil, nil).Once() + + _, err := uni.tracker.LatestBlockHeight(testutils.Context(t)) + assert.EqualError(t, err, "got nil head") + + uni.ec.On("HeadByNumber", mock.AnythingOfType("*context.cancelCtx"), (*big.Int)(nil)).Return(nil, errors.New("bar")).Once() + + _, err = uni.tracker.LatestBlockHeight(testutils.Context(t)) + assert.EqualError(t, err, "bar") + }) + + t.Run("after first head incoming, uses cached value", func(t *testing.T) { + uni := newContractTrackerUni(t) + + uni.tracker.OnNewLongestChain(testutils.Context(t), &evmtypes.Head{Number: 42}) + + l, err := uni.tracker.LatestBlockHeight(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, uint64(42), l) + }) + + t.Run("if headbroadcaster has it, uses the given value on start", func(t *testing.T) { + uni := newContractTrackerUni(t) + + uni.hb.On("Subscribe", uni.tracker).Return(&evmtypes.Head{Number: 42}, func() {}) + uni.db.On("LoadLatestRoundRequested").Return(offchainaggregator.OffchainAggregatorRoundRequested{}, nil) + uni.lb.On("Register", uni.tracker, mock.Anything).Return(func() {}) + + servicetest.Run(t, uni.tracker) + + l, err := uni.tracker.LatestBlockHeight(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, uint64(42), l) + }) +} + +func Test_OCRContractTracker_HandleLog_OCRContractLatestRoundRequested(t *testing.T) { + t.Parallel() + + fixtureLogAddress := gethCommon.HexToAddress("0x03bd0d5d39629423979f8a0e53dbce78c1791ebf") + fixtureFilterer := mustNewFilterer(t) + fixtureContract := mustNewContract(t, fixtureLogAddress) + + t.Run("does not update if contract address doesn't match", func(t *testing.T) { + uni := newContractTrackerUni(t) + logBroadcast := logmocks.NewBroadcast(t) + + rawLog := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_1.json") + logBroadcast.On("RawLog").Return(rawLog).Maybe() + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + configDigest, epoch, round, err := uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + uni.tracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("does nothing if log has already been consumed", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("String").Return("").Maybe() + + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(true, nil) + + configDigest, epoch, round, err := uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + uni.tracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("for new round requested log", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + configDigest, epoch, round, err := uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + // Any round supercedes the 0 round + + rawLog := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_1.json") + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("RawLog").Return(rawLog).Maybe() + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr offchainaggregator.OffchainAggregatorRoundRequested) bool { + return rr.Epoch == 1 && rr.Round == 1 + })).Return(nil) + + uni.tracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 1, int(round)) + + // Same round with higher epoch supercedes + rawLog2 := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_9.json") + logBroadcast2 := logmocks.NewBroadcast(t) + logBroadcast2.On("RawLog").Return(rawLog2) + logBroadcast2.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr offchainaggregator.OffchainAggregatorRoundRequested) bool { + return rr.Epoch == 1 && rr.Round == 9 + })).Return(nil) + + uni.tracker.HandleLog(logBroadcast2) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 9, int(round)) + + // Same round with lower epoch is ignored + uni.tracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 9, int(round)) + + // Higher epoch with lower round supercedes + rawLog3 := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_2_1.json") + logBroadcast3 := logmocks.NewBroadcast(t) + logBroadcast3.On("RawLog").Return(rawLog3).Maybe() + logBroadcast3.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr offchainaggregator.OffchainAggregatorRoundRequested) bool { + return rr.Epoch == 2 && rr.Round == 1 + })).Return(nil) + + uni.tracker.HandleLog(logBroadcast3) + + configDigest, epoch, round, err = uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "cccccccccccccccccccccccccccccccc", configDigest.Hex()) + assert.Equal(t, 2, int(epoch)) + assert.Equal(t, 1, int(round)) + }) + + t.Run("does not mark consumed or update state if latest round fails to save", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + rawLog := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_1.json") + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.Anything).Return(errors.New("something exploded")) + + uni.tracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err := uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("restores latest round requested from database on start", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + rawLog := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_1.json") + rr := offchainaggregator.OffchainAggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: cltest.MakeConfigDigest(t), + Epoch: 42, + Round: 9, + Raw: rawLog, + } + + eventuallyCloseLogBroadcaster := cltest.NewAwaiter() + uni.lb.On("Register", uni.tracker, mock.Anything).Return(func() { eventuallyCloseLogBroadcaster.ItHappened() }) + uni.lb.On("IsConnected").Return(true).Maybe() + + eventuallyCloseHeadBroadcaster := cltest.NewAwaiter() + uni.hb.On("Subscribe", uni.tracker).Return((*evmtypes.Head)(nil), func() { eventuallyCloseHeadBroadcaster.ItHappened() }) + + uni.db.On("LoadLatestRoundRequested").Return(rr, nil) + + require.NoError(t, uni.tracker.Start(testutils.Context(t))) + + configDigest, epoch, round, err := uni.tracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, (ocrtypes.ConfigDigest)(rr.ConfigDigest).Hex(), configDigest.Hex()) + assert.Equal(t, rr.Epoch, epoch) + assert.Equal(t, rr.Round, round) + + require.NoError(t, uni.tracker.Close()) + + eventuallyCloseHeadBroadcaster.AssertHappened(t, true) + eventuallyCloseLogBroadcaster.AssertHappened(t, true) + }) +} + +func Test_OCRContractTracker_IsLaterThan(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + incoming types.Log + existing types.Log + expected bool + }{ + { + "incoming higher index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 1}, + true, + }, + { + "incoming lower index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 1}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + false, + }, + { + "incoming identical to existing", + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + false, + }, + { + "incoming higher tx index than existing", + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + true, + }, + { + "incoming lower tx index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + false, + }, + { + "incoming higher block number than existing", + types.Log{BlockNumber: 3, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 2, TxIndex: 2, Index: 2}, + true, + }, + { + "incoming lower block number than existing", + types.Log{BlockNumber: 2, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 3, TxIndex: 2, Index: 2}, + false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := ocr.IsLaterThan(test.incoming, test.existing) + assert.Equal(t, test.expected, res) + }) + } +} diff --git a/core/services/ocr/contract_transmitter.go b/core/services/ocr/contract_transmitter.go new file mode 100644 index 00000000..eb6b70aa --- /dev/null +++ b/core/services/ocr/contract_transmitter.go @@ -0,0 +1,96 @@ +package ocr + +import ( + "context" + "math/big" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" +) + +var ( + _ ocrtypes.ContractTransmitter = &OCRContractTransmitter{} +) + +type ( + OCRContractTransmitter struct { + contractAddress gethCommon.Address + contractABI abi.ABI + transmitter ocrcommon.Transmitter + contractCaller *offchainaggregator.OffchainAggregatorCaller + tracker *OCRContractTracker + chainID *big.Int + effectiveTransmitterAddress gethCommon.Address + } +) + +func NewOCRContractTransmitter( + address gethCommon.Address, + contractCaller *offchainaggregator.OffchainAggregatorCaller, + contractABI abi.ABI, + transmitter ocrcommon.Transmitter, + logBroadcaster log.Broadcaster, + tracker *OCRContractTracker, + chainID *big.Int, + effectiveTransmitterAddress gethCommon.Address, +) *OCRContractTransmitter { + return &OCRContractTransmitter{ + contractAddress: address, + contractABI: contractABI, + transmitter: transmitter, + contractCaller: contractCaller, + tracker: tracker, + chainID: chainID, + effectiveTransmitterAddress: effectiveTransmitterAddress, + } +} + +func (oc *OCRContractTransmitter) Transmit(ctx context.Context, report []byte, rs, ss [][32]byte, vs [32]byte) error { + payload, err := oc.contractABI.Pack("transmit", report, rs, ss, vs) + if err != nil { + return errors.Wrap(err, "abi.Pack failed") + } + + return errors.Wrap(oc.transmitter.CreateEthTransaction(ctx, oc.contractAddress, payload, nil), "failed to send Eth transaction") +} + +func (oc *OCRContractTransmitter) LatestTransmissionDetails(ctx context.Context) (configDigest ocrtypes.ConfigDigest, epoch uint32, round uint8, latestAnswer ocrtypes.Observation, latestTimestamp time.Time, err error) { + opts := bind.CallOpts{Context: ctx, Pending: false} + result, err := oc.contractCaller.LatestTransmissionDetails(&opts) + if err != nil { + return configDigest, 0, 0, ocrtypes.Observation(nil), time.Time{}, errors.Wrap(err, "error getting LatestTransmissionDetails") + } + return result.ConfigDigest, result.Epoch, result.Round, ocrtypes.Observation(result.LatestAnswer), time.Unix(int64(result.LatestTimestamp), 0), nil +} + +func (oc *OCRContractTransmitter) FromAddress() gethCommon.Address { + return oc.effectiveTransmitterAddress +} + +func (oc *OCRContractTransmitter) ChainID() *big.Int { + return oc.chainID +} + +// LatestRoundRequested returns the configDigest, epoch, and round from the latest +// RoundRequested event emitted by the contract. LatestRoundRequested may or may not +// return a result if the latest such event was emitted in a block b such that +// b.timestamp < tip.timestamp - lookback. +// +// If no event is found, LatestRoundRequested should return zero values, not an error. +// An error should only be returned if an actual error occurred during execution, +// e.g. because there was an error querying the blockchain or the database. +// +// As an optimization, this function may also return zero values, if no +// RoundRequested event has been emitted after the latest NewTransmission event. +func (oc *OCRContractTransmitter) LatestRoundRequested(ctx context.Context, lookback time.Duration) (configDigest ocrtypes.ConfigDigest, epoch uint32, round uint8, err error) { + return oc.tracker.LatestRoundRequested(ctx, lookback) +} diff --git a/core/services/ocr/contract_transmitter_test.go b/core/services/ocr/contract_transmitter_test.go new file mode 100644 index 00000000..cb261780 --- /dev/null +++ b/core/services/ocr/contract_transmitter_test.go @@ -0,0 +1,34 @@ +package ocr_test + +import ( + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" +) + +func Test_ContractTransmitter_ChainID(t *testing.T) { + chainID := big.NewInt(42) + contractABI, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI)) + require.NoError(t, err) + ct := ocr.NewOCRContractTransmitter( + testutils.NewAddress(), + nil, + contractABI, + nil, + nil, + nil, + chainID, + common.Address{}, + ) + + assert.Equal(t, chainID, ct.ChainID()) +} diff --git a/core/services/ocr/database.go b/core/services/ocr/database.go new file mode 100644 index 00000000..95616a3a --- /dev/null +++ b/core/services/ocr/database.go @@ -0,0 +1,343 @@ +package ocr + +import ( + "context" + "database/sql" + "encoding/json" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/jmoiron/sqlx" + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type db struct { + q pg.Q + oracleSpecID int32 + lggr logger.SugaredLogger +} + +var ( + _ ocrtypes.Database = &db{} + _ OCRContractTrackerDB = &db{} +) + +// NewDB returns a new DB scoped to this oracleSpecID +func NewDB(sqlxDB *sqlx.DB, oracleSpecID int32, lggr logger.Logger, cfg pg.QConfig) *db { + namedLogger := lggr.Named("OCR.DB") + + return &db{ + q: pg.NewQ(sqlxDB, namedLogger, cfg), + oracleSpecID: oracleSpecID, + lggr: logger.Sugared(lggr), + } +} + +func (d *db) ReadState(ctx context.Context, cd ocrtypes.ConfigDigest) (ps *ocrtypes.PersistentState, err error) { + stmt := ` + SELECT epoch, highest_sent_epoch, highest_received_epoch + FROM ocr_persistent_states + WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 + LIMIT 1` + + ps = new(ocrtypes.PersistentState) + + var tmp []int64 + var highestSentEpochTmp int64 + + err = d.q.QueryRowxContext(ctx, stmt, d.oracleSpecID, cd).Scan(&ps.Epoch, &highestSentEpochTmp, pq.Array(&tmp)) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, "ReadState failed") + } + + ps.HighestSentEpoch = uint32(highestSentEpochTmp) + + for _, v := range tmp { + ps.HighestReceivedEpoch = append(ps.HighestReceivedEpoch, uint32(v)) + } + + return ps, nil +} + +func (d *db) WriteState(ctx context.Context, cd ocrtypes.ConfigDigest, state ocrtypes.PersistentState) error { + var highestReceivedEpoch []int64 + for _, v := range state.HighestReceivedEpoch { + highestReceivedEpoch = append(highestReceivedEpoch, int64(v)) + } + + stmt := ` + INSERT INTO ocr_persistent_states (ocr_oracle_spec_id, config_digest, epoch, highest_sent_epoch, highest_received_epoch, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (ocr_oracle_spec_id, config_digest) DO UPDATE SET + (epoch, highest_sent_epoch, highest_received_epoch, updated_at) + = + ( + EXCLUDED.epoch, + EXCLUDED.highest_sent_epoch, + EXCLUDED.highest_received_epoch, + NOW() + ) + ` + _, err := d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext( + ctx, stmt, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch), + ) + + return errors.Wrap(err, "WriteState failed") +} + +func (d *db) ReadConfig(ctx context.Context) (c *ocrtypes.ContractConfig, err error) { + stmt := ` + SELECT config_digest, signers, transmitters, threshold, encoded_config_version, encoded + FROM ocr_contract_configs + WHERE ocr_oracle_spec_id = $1 + LIMIT 1` + + c = new(ocrtypes.ContractConfig) + + var signers [][]byte + var transmitters [][]byte + + err = d.q.QueryRowContext(ctx, stmt, d.oracleSpecID).Scan( + &c.ConfigDigest, + (*pq.ByteaArray)(&signers), + (*pq.ByteaArray)(&transmitters), + &c.Threshold, + &c.EncodedConfigVersion, + &c.Encoded, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, "ReadConfig failed") + } + + for _, s := range signers { + c.Signers = append(c.Signers, common.BytesToAddress(s)) + } + for _, t := range transmitters { + c.Transmitters = append(c.Transmitters, common.BytesToAddress(t)) + } + + return +} + +func (d *db) WriteConfig(ctx context.Context, c ocrtypes.ContractConfig) error { + var signers [][]byte + var transmitters [][]byte + for _, s := range c.Signers { + signers = append(signers, s.Bytes()) + } + for _, t := range c.Transmitters { + transmitters = append(transmitters, t.Bytes()) + } + stmt := ` + INSERT INTO ocr_contract_configs (ocr_oracle_spec_id, config_digest, signers, transmitters, threshold, encoded_config_version, encoded, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, NOW(), NOW()) + ON CONFLICT (ocr_oracle_spec_id) DO UPDATE SET + config_digest = EXCLUDED.config_digest, + signers = EXCLUDED.signers, + transmitters = EXCLUDED.transmitters, + threshold = EXCLUDED.threshold, + encoded_config_version = EXCLUDED.encoded_config_version, + encoded = EXCLUDED.encoded, + updated_at = NOW() + ` + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, c.ConfigDigest, pq.ByteaArray(signers), pq.ByteaArray(transmitters), c.Threshold, int(c.EncodedConfigVersion), c.Encoded) + + return errors.Wrap(err, "WriteConfig failed") +} + +func (d *db) StorePendingTransmission(ctx context.Context, k ocrtypes.ReportTimestamp, p ocrtypes.PendingTransmission) error { + median := big.New(p.Median) + var rs [][]byte + var ss [][]byte + // Note: p.Rs and p.Ss are of type [][32]byte. + // See last example of https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable + for _, v := range p.Rs { + v := v + rs = append(rs, v[:]) + } + for _, v := range p.Ss { + v := v + ss = append(ss, v[:]) + } + + stmt := ` + INSERT INTO ocr_pending_transmissions ( + ocr_oracle_spec_id, + config_digest, + epoch, + round, + time, + median, + serialized_report, + rs, + ss, + vs, + created_at, + updated_at + ) + VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,NOW(),NOW()) + ON CONFLICT (ocr_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET + time = EXCLUDED.time, + median = EXCLUDED.median, + serialized_report = EXCLUDED.serialized_report, + rs = EXCLUDED.rs, + ss = EXCLUDED.ss, + vs = EXCLUDED.vs, + updated_at = NOW() + ` + + _, err := d.q.ExecContext(ctx, stmt, d.oracleSpecID, k.ConfigDigest, k.Epoch, k.Round, p.Time, median, p.SerializedReport, pq.ByteaArray(rs), pq.ByteaArray(ss), p.Vs[:]) + + return errors.Wrap(err, "StorePendingTransmission failed") +} + +func (d *db) PendingTransmissionsWithConfigDigest(ctx context.Context, cd ocrtypes.ConfigDigest) (map[ocrtypes.ReportTimestamp]ocrtypes.PendingTransmission, error) { + //nolint sqlclosecheck false positive + rows, err := d.q.QueryContext(ctx, ` +SELECT + config_digest, + epoch, + round, + time, + median, + serialized_report, + rs, + ss, + vs +FROM ocr_pending_transmissions +WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 +`, d.oracleSpecID, cd) + if err != nil { + return nil, errors.Wrap(err, "PendingTransmissionsWithConfigDigest failed to query rows") + } + defer d.lggr.ErrorIfFn(rows.Close, "Error closing ocr_pending_transmissions rows") + + m := make(map[ocrtypes.ReportTimestamp]ocrtypes.PendingTransmission) + + for rows.Next() { + k := ocrtypes.ReportTimestamp{} + p := ocrtypes.PendingTransmission{} + + var median big.Big + var rs [][]byte + var ss [][]byte + var vs []byte + if err := rows.Scan(&k.ConfigDigest, &k.Epoch, &k.Round, &p.Time, &median, &p.SerializedReport, (*pq.ByteaArray)(&rs), (*pq.ByteaArray)(&ss), &vs); err != nil { + return nil, errors.Wrap(err, "PendingTransmissionsWithConfigDigest failed to scan row") + } + p.Median = median.ToInt() + for i, v := range rs { + var r [32]byte + if n := copy(r[:], v); n != 32 { + return nil, errors.Errorf("expected 32 bytes for rs value at index %v, got %v bytes", i, n) + } + p.Rs = append(p.Rs, r) + } + for i, v := range ss { + var s [32]byte + if n := copy(s[:], v); n != 32 { + return nil, errors.Errorf("expected 32 bytes for ss value at index %v, got %v bytes", i, n) + } + p.Ss = append(p.Ss, s) + } + if n := copy(p.Vs[:], vs); n != 32 { + return nil, errors.Errorf("expected 32 bytes for vs, got %v bytes", n) + } + m[k] = p + } + + if err := rows.Err(); err != nil { + return m, err + } + + return m, nil +} + +func (d *db) DeletePendingTransmission(ctx context.Context, k ocrtypes.ReportTimestamp) (err error) { + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` +DELETE FROM ocr_pending_transmissions +WHERE ocr_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round = $4 +`, d.oracleSpecID, k.ConfigDigest, k.Epoch, k.Round) + + err = errors.Wrap(err, "DeletePendingTransmission failed") + + return +} + +func (d *db) DeletePendingTransmissionsOlderThan(ctx context.Context, t time.Time) (err error) { + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` +DELETE FROM ocr_pending_transmissions +WHERE ocr_oracle_spec_id = $1 AND time < $2 +`, d.oracleSpecID, t) + + err = errors.Wrap(err, "DeletePendingTransmissionsOlderThan failed") + + return +} + +func (d *db) SaveLatestRoundRequested(tx pg.Queryer, rr offchainaggregator.OffchainAggregatorRoundRequested) error { + rawLog, err := json.Marshal(rr.Raw) + if err != nil { + return errors.Wrap(err, "could not marshal log as JSON") + } + _, err = tx.Exec(` +INSERT INTO ocr_latest_round_requested (ocr_oracle_spec_id, requester, config_digest, epoch, round, raw) +VALUES ($1,$2,$3,$4,$5,$6) ON CONFLICT (ocr_oracle_spec_id) DO UPDATE SET + requester = EXCLUDED.requester, + config_digest = EXCLUDED.config_digest, + epoch = EXCLUDED.epoch, + round = EXCLUDED.round, + raw = EXCLUDED.raw +`, d.oracleSpecID, rr.Requester, rr.ConfigDigest[:], rr.Epoch, rr.Round, rawLog) + + return errors.Wrap(err, "could not save latest round requested") +} + +func (d *db) LoadLatestRoundRequested() (rr offchainaggregator.OffchainAggregatorRoundRequested, err error) { + rows, err := d.q.Query(` +SELECT requester, config_digest, epoch, round, raw +FROM ocr_latest_round_requested +WHERE ocr_oracle_spec_id = $1 +LIMIT 1 +`, d.oracleSpecID) + if err != nil { + return rr, errors.Wrap(err, "LoadLatestRoundRequested failed to query rows") + } + defer func() { err = multierr.Combine(err, rows.Close()) }() + + for rows.Next() { + var configDigest []byte + var rawLog []byte + var err2 error + + err2 = rows.Scan(&rr.Requester, &configDigest, &rr.Epoch, &rr.Round, &rawLog) + err = multierr.Combine(err2, errors.Wrap(err, "LoadLatestRoundRequested failed to scan row")) + + rr.ConfigDigest, err2 = ocrtypes.BytesToConfigDigest(configDigest) + err = multierr.Combine(err2, errors.Wrap(err, "LoadLatestRoundRequested failed to decode config digest")) + + err2 = json.Unmarshal(rawLog, &rr.Raw) + err = multierr.Combine(err2, errors.Wrap(err, "LoadLatestRoundRequested failed to unmarshal raw log")) + } + + if err = rows.Err(); err != nil { + return + } + + return +} diff --git a/core/services/ocr/database_test.go b/core/services/ocr/database_test.go new file mode 100644 index 00000000..bb185ae9 --- /dev/null +++ b/core/services/ocr/database_test.go @@ -0,0 +1,447 @@ +package ocr_test + +import ( + "bytes" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" +) + +func Test_DB_ReadWriteState(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + configDigest := cltest.MakeConfigDigest(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + spec := cltest.MustInsertOffchainreportingOracleSpec(t, db, key.EIP55Address) + + t.Run("reads and writes state", func(t *testing.T) { + t.Log("creating DB") + odb := ocr.NewTestDB(t, db, spec.ID) + state := ocrtypes.PersistentState{ + Epoch: 1, + HighestSentEpoch: 2, + HighestReceivedEpoch: []uint32{3}, + } + + err := odb.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + readState, err := odb.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Equal(t, state, *readState) + }) + + t.Run("updates state", func(t *testing.T) { + odb := ocr.NewTestDB(t, db, spec.ID) + newState := ocrtypes.PersistentState{ + Epoch: 2, + HighestSentEpoch: 3, + HighestReceivedEpoch: []uint32{4, 5}, + } + + err := odb.WriteState(testutils.Context(t), configDigest, newState) + require.NoError(t, err) + + readState, err := odb.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Equal(t, newState, *readState) + }) + + t.Run("does not return result for wrong spec", func(t *testing.T) { + odb := ocr.NewTestDB(t, db, spec.ID) + state := ocrtypes.PersistentState{ + Epoch: 3, + HighestSentEpoch: 4, + HighestReceivedEpoch: []uint32{5, 6}, + } + + err := odb.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + // db with different spec + odb = ocr.NewTestDB(t, db, -1) + + readState, err := odb.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Nil(t, readState) + }) + + t.Run("does not return result for wrong config digest", func(t *testing.T) { + odb := ocr.NewTestDB(t, db, spec.ID) + state := ocrtypes.PersistentState{ + Epoch: 4, + HighestSentEpoch: 5, + HighestReceivedEpoch: []uint32{6, 7}, + } + + err := odb.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + readState, err := odb.ReadState(testutils.Context(t), cltest.MakeConfigDigest(t)) + require.NoError(t, err) + + require.Nil(t, readState) + }) +} + +func Test_DB_ReadWriteConfig(t *testing.T) { + db := pgtest.NewSqlxDB(t) + sqlDB := db + cfg := configtest.NewTestGeneralConfig(t) + + config := ocrtypes.ContractConfig{ + ConfigDigest: cltest.MakeConfigDigest(t), + Signers: []common.Address{testutils.NewAddress(), testutils.NewAddress()}, + Transmitters: []common.Address{testutils.NewAddress(), testutils.NewAddress()}, + Threshold: uint8(35), + EncodedConfigVersion: uint64(987654), + Encoded: []byte{1, 2, 3, 4, 5}, + } + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + spec := cltest.MustInsertOffchainreportingOracleSpec(t, db, key.EIP55Address) + transmitterAddress := key.Address + + t.Run("reads and writes config", func(t *testing.T) { + db := ocr.NewTestDB(t, sqlDB, spec.ID) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &config, readConfig) + }) + + t.Run("updates config", func(t *testing.T) { + db := ocr.NewTestDB(t, sqlDB, spec.ID) + + newConfig := ocrtypes.ContractConfig{ + ConfigDigest: cltest.MakeConfigDigest(t), + Signers: []common.Address{utils.ZeroAddress, transmitterAddress, testutils.NewAddress()}, + Transmitters: []common.Address{utils.ZeroAddress, transmitterAddress, testutils.NewAddress()}, + Threshold: uint8(36), + EncodedConfigVersion: uint64(987655), + Encoded: []byte{2, 3, 4, 5, 6}, + } + + err := db.WriteConfig(testutils.Context(t), newConfig) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &newConfig, readConfig) + }) + + t.Run("does not return result for wrong spec", func(t *testing.T) { + db := ocr.NewTestDB(t, sqlDB, spec.ID) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + db = ocr.NewTestDB(t, sqlDB, -1) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Nil(t, readConfig) + }) +} + +func assertPendingTransmissionEqual(t *testing.T, pt1, pt2 ocrtypes.PendingTransmission) { + t.Helper() + + require.Equal(t, pt1.Rs, pt2.Rs) + require.Equal(t, pt1.Ss, pt2.Ss) + assert.True(t, bytes.Equal(pt1.Vs[:], pt2.Vs[:])) + assert.True(t, bytes.Equal(pt1.SerializedReport[:], pt2.SerializedReport[:])) + assert.Equal(t, pt1.Median, pt2.Median) + for i := range pt1.Ss { + assert.True(t, bytes.Equal(pt1.Ss[i][:], pt2.Ss[i][:])) + } + for i := range pt1.Rs { + assert.True(t, bytes.Equal(pt1.Rs[i][:], pt2.Rs[i][:])) + } +} + +func Test_DB_PendingTransmissions(t *testing.T) { + db := pgtest.NewSqlxDB(t) + sqlDB := db + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + + spec := cltest.MustInsertOffchainreportingOracleSpec(t, db, key.EIP55Address) + spec2 := cltest.MustInsertOffchainreportingOracleSpec(t, db, key.EIP55Address) + odb := ocr.NewTestDB(t, sqlDB, spec.ID) + odb2 := ocr.NewTestDB(t, sqlDB, spec2.ID) + configDigest := cltest.MakeConfigDigest(t) + + k := ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 0, + Round: 1, + } + k2 := ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 1, + Round: 2, + } + + t.Run("stores and retrieves pending transmissions", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Now(), + Median: ocrtypes.Observation(big.NewInt(41)), + SerializedReport: []byte{0, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte(), testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte(), testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + + err := odb.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + m, err := odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, m[k], p) + + // Now overwrite value for k to prove that updating works + p = ocrtypes.PendingTransmission{ + Time: time.Now(), + Median: ocrtypes.Observation(big.NewInt(42)), + SerializedReport: []byte{1, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + err = odb.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + m, err = odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, m[k], p) + + p2 := ocrtypes.PendingTransmission{ + Time: time.Now(), + Median: ocrtypes.Observation(big.NewInt(43)), + SerializedReport: []byte{2, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + + err = odb.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + kRedHerring := ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{43}, + Epoch: 1, + Round: 2, + } + pRedHerring := ocrtypes.PendingTransmission{ + Time: time.Now(), + Median: ocrtypes.Observation(big.NewInt(43)), + SerializedReport: []byte{3, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + + err = odb.StorePendingTransmission(testutils.Context(t), kRedHerring, pRedHerring) + require.NoError(t, err) + + m, err = odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Len(t, m, 2) + + // HACK to get around time equality because otherwise its annoying (time storage into postgres is mildly lossy) + require.Equal(t, p.Time.Unix(), m[k].Time.Unix()) + require.Equal(t, p2.Time.Unix(), m[k2].Time.Unix()) + + var zt time.Time + p.Time, p2.Time = zt, zt + for k, v := range m { + v.Time = zt + m[k] = v + } + + require.Equal(t, p, m[k]) + require.Equal(t, p2, m[k2]) + + // No keys for this oracleSpecID yet + m, err = odb2.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 0) + }) + + t.Run("deletes pending transmission by key", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + Median: ocrtypes.Observation(big.NewInt(44)), + SerializedReport: []byte{1, 4, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + err := odb.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + err = odb2.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + + err = odb.DeletePendingTransmission(testutils.Context(t), k) + require.NoError(t, err) + + m, err := odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + + // Did not affect other oracleSpecID + m, err = odb2.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + }) + + t.Run("allows multiple duplicate keys for different spec ID", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + Median: ocrtypes.Observation(big.NewInt(44)), + SerializedReport: []byte{1, 4, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + err := odb.StorePendingTransmission(testutils.Context(t), k2, p) + require.NoError(t, err) + + m, err := odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + require.Equal(t, p.Median, m[k2].Median) + }) + + t.Run("deletes pending transmission older than", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + Median: ocrtypes.Observation(big.NewInt(41)), + SerializedReport: []byte{0, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + + err := odb.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + + p2 := ocrtypes.PendingTransmission{ + Time: time.Unix(1000, 0), + Median: ocrtypes.Observation(big.NewInt(42)), + SerializedReport: []byte{1, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + err = odb.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + p2 = ocrtypes.PendingTransmission{ + Time: time.Now(), + Median: ocrtypes.Observation(big.NewInt(43)), + SerializedReport: []byte{2, 2, 3}, + Rs: [][32]byte{testutils.Random32Byte()}, + Ss: [][32]byte{testutils.Random32Byte()}, + Vs: testutils.Random32Byte(), + } + + err = odb.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + err = odb.DeletePendingTransmissionsOlderThan(testutils.Context(t), time.Unix(900, 0)) + require.NoError(t, err) + + m, err := odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + + // Didn't affect other oracleSpecIDs + odb = ocr.NewTestDB(t, sqlDB, spec2.ID) + m, err = odb.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + }) +} + +func Test_DB_LatestRoundRequested(t *testing.T) { + db := pgtest.NewSqlxDB(t) + sqlDB := db + + pgtest.MustExec(t, db, `SET CONSTRAINTS offchainreporting_latest_roun_offchainreporting_oracle_spe_fkey DEFERRED`) + + odb := ocr.NewTestDB(t, sqlDB, 1) + odb2 := ocr.NewTestDB(t, sqlDB, 2) + + rawLog := cltest.LogFromFixture(t, "../../testdata/jsonrpc/round_requested_log_1_1.json") + + rr := offchainaggregator.OffchainAggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: cltest.MakeConfigDigest(t), + Epoch: 42, + Round: 9, + Raw: rawLog, + } + + t.Run("saves latest round requested", func(t *testing.T) { + err := odb.SaveLatestRoundRequested(sqlDB, rr) + require.NoError(t, err) + + rawLog.Index = 42 + + // Now overwrite to prove that updating works + rr = offchainaggregator.OffchainAggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: cltest.MakeConfigDigest(t), + Epoch: 43, + Round: 8, + Raw: rawLog, + } + + err = odb.SaveLatestRoundRequested(sqlDB, rr) + require.NoError(t, err) + }) + + t.Run("loads latest round requested", func(t *testing.T) { + // There is no round for db2 + lrr, err := odb2.LoadLatestRoundRequested() + require.NoError(t, err) + require.Equal(t, 0, int(lrr.Epoch)) + + lrr, err = odb.LoadLatestRoundRequested() + require.NoError(t, err) + + assert.Equal(t, rr, lrr) + }) + + t.Run("spec with latest round requested can be deleted", func(t *testing.T) { + _, err := sqlDB.Exec(`DELETE FROM ocr_oracle_specs`) + assert.NoError(t, err) + }) +} diff --git a/core/services/ocr/delegate.go b/core/services/ocr/delegate.go new file mode 100644 index 00000000..f1ea163a --- /dev/null +++ b/core/services/ocr/delegate.go @@ -0,0 +1,340 @@ +package ocr + +import ( + "fmt" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + ocr "github.com/goplugin/libocr/offchainreporting" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/offchain_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Delegate struct { + db *sqlx.DB + jobORM job.ORM + keyStore keystore.Master + pipelineRunner pipeline.Runner + peerWrapper *ocrcommon.SingletonPeerWrapper + monitoringEndpointGen telemetry.MonitoringEndpointGenerator + legacyChains legacyevm.LegacyChainContainer + lggr logger.Logger + cfg Config + mailMon *mailbox.Monitor +} + +var _ job.Delegate = (*Delegate)(nil) + +const ConfigOverriderPollInterval = 30 * time.Second + +func NewDelegate( + db *sqlx.DB, + jobORM job.ORM, + keyStore keystore.Master, + pipelineRunner pipeline.Runner, + peerWrapper *ocrcommon.SingletonPeerWrapper, + monitoringEndpointGen telemetry.MonitoringEndpointGenerator, + legacyChains legacyevm.LegacyChainContainer, + lggr logger.Logger, + cfg Config, + mailMon *mailbox.Monitor, +) *Delegate { + return &Delegate{ + db: db, + jobORM: jobORM, + keyStore: keyStore, + pipelineRunner: pipelineRunner, + peerWrapper: peerWrapper, + monitoringEndpointGen: monitoringEndpointGen, + legacyChains: legacyChains, + lggr: lggr.Named("OCR"), + cfg: cfg, + mailMon: mailMon, + } +} + +func (d *Delegate) JobType() job.Type { + return job.OffchainReporting +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec returns the OCR services that need to run for this job +func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err error) { + if jb.OCROracleSpec == nil { + return nil, errors.Errorf("offchainreporting.Delegate expects an *job.OffchainreportingOracleSpec to be present, got %v", jb) + } + chain, err := d.legacyChains.Get(jb.OCROracleSpec.EVMChainID.String()) + if err != nil { + return nil, err + } + concreteSpec, err := job.LoadConfigVarsOCR(chain.Config().EVM().OCR(), chain.Config().OCR(), *jb.OCROracleSpec) + if err != nil { + return nil, err + } + lggr := d.lggr.With( + "contractAddress", concreteSpec.ContractAddress, + "jobName", jb.Name.ValueOrZero(), + "jobID", jb.ID, + "externalJobID", jb.ExternalJobID) + + contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(concreteSpec.ContractAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregator") + } + + contractFilterer, err := offchainaggregator.NewOffchainAggregatorFilterer(concreteSpec.ContractAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregatorFilterer") + } + + contractCaller, err := offchainaggregator.NewOffchainAggregatorCaller(concreteSpec.ContractAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregatorCaller") + } + + ocrDB := NewDB(d.db, concreteSpec.ID, lggr, d.cfg) + + tracker := NewOCRContractTracker( + contract, + contractFilterer, + contractCaller, + chain.Client(), + chain.LogBroadcaster(), + jb.ID, + lggr, + d.db, + ocrDB, + chain.Config().EVM(), + chain.Config().Database(), + chain.HeadBroadcaster(), + d.mailMon, + ) + services = append(services, tracker) + + peerWrapper := d.peerWrapper + if peerWrapper == nil { + return nil, errors.New("cannot setup OCR job service, libp2p peer was missing") + } else if !peerWrapper.IsStarted() { + return nil, errors.New("peerWrapper is not started. OCR jobs require a started and running p2p peer") + } + + v2Bootstrappers, err := ocrcommon.ParseBootstrapPeers(concreteSpec.P2PV2Bootstrappers) + if err != nil { + return nil, err + } else if len(v2Bootstrappers) == 0 { + // ParseBootstrapPeers() does not distinguish between no p2pv2Bootstrappers field + // present in job spec, and p2pv2Bootstrappers = []. So even if an empty list is + // passed explicitly, this will still fall back to using the V2 bootstappers defined + // in P2P.V2.DefaultBootstrappers config var. Only a non-empty list will override the default list. + v2Bootstrappers = peerWrapper.P2PConfig().V2().DefaultBootstrappers() + } + + ocrLogger := commonlogger.NewOCRWrapper(lggr, chain.Config().OCR().TraceLogging(), func(msg string) { + d.jobORM.TryRecordError(jb.ID, msg) + }) + + lc := toLocalConfig(chain.Config().EVM(), chain.Config().EVM().OCR(), chain.Config().Insecure(), *concreteSpec, chain.Config().OCR()) + if err = ocr.SanityCheckLocalConfig(lc); err != nil { + return nil, err + } + lggr.Info(fmt.Sprintf("OCR job using local config %+v", lc)) + + if concreteSpec.IsBootstrapPeer { + var bootstrapper *ocr.BootstrapNode + bootstrapper, err = ocr.NewBootstrapNode(ocr.BootstrapNodeArgs{ + BootstrapperFactory: peerWrapper.Peer1, + V2Bootstrappers: v2Bootstrappers, + ContractConfigTracker: tracker, + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + }) + if err != nil { + return nil, errors.Wrap(err, "error calling NewBootstrapNode") + } + bootstrapperCtx := job.NewServiceAdapter(bootstrapper) + services = append(services, bootstrapperCtx) + } else { + // p2pv2Bootstrappers must be defined either in node config or in job spec + if len(v2Bootstrappers) < 1 { + return nil, errors.New("Need at least one v2 bootstrap peer defined") + } + + ocrkey, err := d.keyStore.OCR().Get(concreteSpec.EncryptedOCRKeyBundleID.String()) + if err != nil { + return nil, err + } + contractABI, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI)) + if err != nil { + return nil, errors.Wrap(err, "could not get contract ABI JSON") + } + + cfg := chain.Config() + strategy := txmgrcommon.NewQueueingTxStrategy(jb.ExternalJobID, cfg.OCR().DefaultTransactionQueueDepth(), cfg.Database().DefaultQueryTimeout()) + + var checker txmgr.TransmitCheckerSpec + if chain.Config().OCR().SimulateTransactions() { + checker.CheckerType = txmgr.TransmitCheckerTypeSimulate + } + + if concreteSpec.TransmitterAddress == nil { + return nil, errors.New("TransmitterAddress is missing") + } + + var jsGasLimit *uint32 + if jb.GasLimit.Valid { + jsGasLimit = &jb.GasLimit.Uint32 + } + gasLimit := pipeline.SelectGasLimit(chain.Config().EVM().GasEstimator(), jb.Type.String(), jsGasLimit) + + // effectiveTransmitterAddress is the transmitter address registered on the ocr contract. This is by default the EOA account on the node. + // In the case of forwarding, the transmitter address is the forwarder contract deployed onchain between EOA and OCR contract. + effectiveTransmitterAddress := concreteSpec.TransmitterAddress.Address() + if jb.ForwardingAllowed { + fwdrAddress, fwderr := chain.TxManager().GetForwarderForEOA(effectiveTransmitterAddress) + if fwderr == nil { + effectiveTransmitterAddress = fwdrAddress + } else { + lggr.Warnw("Skipping forwarding for job, will fallback to default behavior", "job", jb.Name, "err", fwderr) + } + } + + transmitter, err := ocrcommon.NewTransmitter( + chain.TxManager(), + []common.Address{concreteSpec.TransmitterAddress.Address()}, + gasLimit, + effectiveTransmitterAddress, + strategy, + checker, + chain.ID(), + d.keyStore.Eth(), + ) + if err != nil { + return nil, errors.Wrap(err, "failed to create transmitter") + } + + contractTransmitter := NewOCRContractTransmitter( + concreteSpec.ContractAddress.Address(), + contractCaller, + contractABI, + transmitter, + chain.LogBroadcaster(), + tracker, + chain.ID(), + effectiveTransmitterAddress, + ) + + saver := ocrcommon.NewResultRunSaver( + d.pipelineRunner, + lggr, + cfg.JobPipeline().MaxSuccessfulRuns(), + cfg.JobPipeline().ResultWriteQueueDepth(), + ) + + var configOverrider ocrtypes.ConfigOverrider + configOverriderService, err := d.maybeCreateConfigOverrider(lggr, chain, concreteSpec.ContractAddress) + if err != nil { + return nil, errors.Wrap(err, "Failed to create ConfigOverrider") + } + + // NOTE: conditional assigning to `configOverrider` is necessary due to the unfortunate fact that assigning `nil` to an + // interface variable causes `x == nil` checks to always return false, so methods on the interface cannot be safely called then. + // + // the problematic case would be: + // configOverriderService, err := d.maybeCreateConfigOverrider(...) + // if err != nil { return ... } + // configOverrider = configOverriderService // contract might be `nil` + // assert.False(configOverrider != nil) // even if 'contract' was nil, this check will return true, unexpectedly + if configOverriderService != nil { + services = append(services, configOverriderService) + configOverrider = configOverriderService + } + + jb.OCROracleSpec.CaptureEATelemetry = chain.Config().OCR().CaptureEATelemetry() + enhancedTelemChan := make(chan ocrcommon.EnhancedTelemetryData, 100) + if ocrcommon.ShouldCollectEnhancedTelemetry(&jb) { + enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint("EVM", chain.ID().String(), concreteSpec.ContractAddress.String(), synchronization.EnhancedEA), lggr.Named("EnhancedTelemetry")) + services = append(services, enhancedTelemService) + } else { + lggr.Infow("Enhanced telemetry is disabled for job", "job", jb.Name) + } + + oracle, err := ocr.NewOracle(ocr.OracleArgs{ + Database: ocrDB, + Datasource: ocrcommon.NewDataSourceV1( + d.pipelineRunner, + jb, + *jb.PipelineSpec, + lggr, + saver, + enhancedTelemChan, + ), + LocalConfig: lc, + ContractTransmitter: contractTransmitter, + ContractConfigTracker: tracker, + PrivateKeys: ocrkey, + BinaryNetworkEndpointFactory: peerWrapper.Peer1, + Logger: ocrLogger, + V2Bootstrappers: v2Bootstrappers, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint("EVM", chain.ID().String(), concreteSpec.ContractAddress.String(), synchronization.OCR), + ConfigOverrider: configOverrider, + }) + if err != nil { + return nil, errors.Wrap(err, "error calling NewOracle") + } + oracleCtx := job.NewServiceAdapter(oracle) + services = append(services, oracleCtx) + + // RunResultSaver needs to be started first so its available + // to read db writes. It is stopped last after the Oracle is shut down + // so no further runs are enqueued and we can drain the queue. + services = append([]job.ServiceCtx{saver}, services...) + } + + return services, nil +} + +func (d *Delegate) maybeCreateConfigOverrider(logger logger.Logger, chain legacyevm.Chain, contractAddress ethkey.EIP55Address) (*ConfigOverriderImpl, error) { + flagsContractAddress := chain.Config().EVM().FlagsContractAddress() + if flagsContractAddress != "" { + flags, err := NewFlags(flagsContractAddress, chain.Client()) + if err != nil { + return nil, errors.Wrapf(err, + "OCR: unable to create Flags contract instance, check address: %s or remove EVM.FlagsContractAddress configuration variable", + flagsContractAddress, + ) + } + + ticker := utils.NewPausableTicker(ConfigOverriderPollInterval) + return NewConfigOverriderImpl(logger, chain.Config().EVM().OCR(), contractAddress, flags, &ticker) + } + return nil, nil +} diff --git a/core/services/ocr/example-job-spec.toml b/core/services/ocr/example-job-spec.toml new file mode 100644 index 00000000..7622c45f --- /dev/null +++ b/core/services/ocr/example-job-spec.toml @@ -0,0 +1,31 @@ +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xaA07d525B4006a2f927D79CA78a23A8ee680A32A" +observationTimeout = "10s" +blockchainTimeout = "20s" +contractConfigTrackerSubscribeInterval = "2m" +contractConfigTrackerPollInterval = "1m" +contractConfigConfirmations = 3 +observationSource = """ + // data source 1 + ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData="{\\"hi\\": \\"hello\\"}"]; + ds2_parse [type=jsonparse path="three,four"]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; + answer2 [type=bridge name=election_winner index=1]; +""" \ No newline at end of file diff --git a/core/services/ocr/flags.go b/core/services/ocr/flags.go new file mode 100644 index 00000000..93bdd748 --- /dev/null +++ b/core/services/ocr/flags.go @@ -0,0 +1,59 @@ +package ocr + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" +) + +// ContractFlags wraps the a contract +type ContractFlags struct { + flags_wrapper.FlagsInterface +} + +// NewFlags constructs a new Flags from a flags contract address +func NewFlags(addrHex string, ethClient evmclient.Client) (*ContractFlags, error) { + flags := &ContractFlags{} + + if addrHex == "" { + return flags, nil + } + + contractAddr := common.HexToAddress(addrHex) + contract, err := flags_wrapper.NewFlags(contractAddr, ethClient) + if err != nil { + return flags, errors.Wrap(err, "Failed to create flags wrapper") + } + flags.FlagsInterface = contract + return flags, nil +} + +// Contract returns the flags contract +func (f *ContractFlags) Contract() flags_wrapper.FlagsInterface { + return f.FlagsInterface +} + +// ContractExists returns whether a flag contract exists +func (f *ContractFlags) ContractExists() bool { + return f.FlagsInterface != nil +} + +// IsLowered determines whether the flag is lowered for a given contract. +// If a contract does not exist, it is considered to be lowered +func (f *ContractFlags) IsLowered(contractAddr common.Address) (bool, error) { + if !f.ContractExists() { + return true, nil + } + + flags, err := f.GetFlags(nil, + []common.Address{utils.ZeroAddress, contractAddr}, + ) + if err != nil { + return true, errors.Wrap(err, "Failed to call GetFlags in the contract") + } + + return !flags[0] || !flags[1], nil +} diff --git a/core/services/ocr/flags_test.go b/core/services/ocr/flags_test.go new file mode 100644 index 00000000..56a95d99 --- /dev/null +++ b/core/services/ocr/flags_test.go @@ -0,0 +1,54 @@ +package ocr_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" +) + +func TestFlags_IsLowered(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + getFlagsResult []bool + expected bool + }{ + {"both lowered", []bool{false, false}, true}, + {"global lowered", []bool{false, true}, true}, + {"contract lowered", []bool{true, false}, true}, + {"both raised", []bool{true, true}, false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + var ( + flagsContract = mocks.NewFlags(t) + address = testutils.NewAddress() + ) + + flags := fluxmonitorv2.ContractFlags{FlagsInterface: flagsContract} + + flagsContract.On("GetFlags", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + require.Equal(t, []common.Address{ + utils.ZeroAddress, + address, + }, args.Get(1).([]common.Address)) + }). + Return(tc.getFlagsResult, nil) + + result, err := flags.IsLowered(address) + require.NoError(t, err) + require.Equal(t, tc.expected, result) + }) + } +} diff --git a/core/services/ocr/helpers_internal_test.go b/core/services/ocr/helpers_internal_test.go new file mode 100644 index 00000000..7401e5b3 --- /dev/null +++ b/core/services/ocr/helpers_internal_test.go @@ -0,0 +1,18 @@ +package ocr + +import ( + "testing" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func (c *ConfigOverriderImpl) ExportedUpdateFlagsStatus() error { + return c.updateFlagsStatus() +} + +func NewTestDB(t *testing.T, sqldb *sqlx.DB, oracleSpecID int32) *db { + return NewDB(sqldb, oracleSpecID, logger.TestLogger(t), pgtest.NewQConfig(true)) +} diff --git a/core/services/ocr/mocks/ocr_contract_tracker_db.go b/core/services/ocr/mocks/ocr_contract_tracker_db.go new file mode 100644 index 00000000..e8e4cf2c --- /dev/null +++ b/core/services/ocr/mocks/ocr_contract_tracker_db.go @@ -0,0 +1,76 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + offchainaggregator "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// OCRContractTrackerDB is an autogenerated mock type for the OCRContractTrackerDB type +type OCRContractTrackerDB struct { + mock.Mock +} + +// LoadLatestRoundRequested provides a mock function with given fields: +func (_m *OCRContractTrackerDB) LoadLatestRoundRequested() (offchainaggregator.OffchainAggregatorRoundRequested, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LoadLatestRoundRequested") + } + + var r0 offchainaggregator.OffchainAggregatorRoundRequested + var r1 error + if rf, ok := ret.Get(0).(func() (offchainaggregator.OffchainAggregatorRoundRequested, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() offchainaggregator.OffchainAggregatorRoundRequested); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(offchainaggregator.OffchainAggregatorRoundRequested) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveLatestRoundRequested provides a mock function with given fields: tx, rr +func (_m *OCRContractTrackerDB) SaveLatestRoundRequested(tx pg.Queryer, rr offchainaggregator.OffchainAggregatorRoundRequested) error { + ret := _m.Called(tx, rr) + + if len(ret) == 0 { + panic("no return value specified for SaveLatestRoundRequested") + } + + var r0 error + if rf, ok := ret.Get(0).(func(pg.Queryer, offchainaggregator.OffchainAggregatorRoundRequested) error); ok { + r0 = rf(tx, rr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewOCRContractTrackerDB creates a new instance of OCRContractTrackerDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOCRContractTrackerDB(t interface { + mock.TestingT + Cleanup(func()) +}) *OCRContractTrackerDB { + mock := &OCRContractTrackerDB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr/validate.go b/core/services/ocr/validate.go new file mode 100644 index 00000000..45b2558e --- /dev/null +++ b/core/services/ocr/validate.go @@ -0,0 +1,163 @@ +package ocr + +import ( + "math/big" + "time" + + "github.com/lib/pq" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/libocr/offchainreporting" + + "github.com/goplugin/pluginv3.0/v2/common/config" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +type ValidationConfig interface { + ChainType() config.ChainType +} + +type OCRValidationConfig interface { + BlockchainTimeout() time.Duration + CaptureEATelemetry() bool + ContractPollInterval() time.Duration + ContractSubscribeInterval() time.Duration + KeyBundleID() (string, error) + ObservationTimeout() time.Duration + TransmitterAddress() (ethkey.EIP55Address, error) +} + +type insecureConfig interface { + OCRDevelopmentMode() bool +} + +// ValidatedOracleSpecToml validates an oracle spec that came from TOML +func ValidatedOracleSpecToml(legacyChains legacyevm.LegacyChainContainer, tomlString string) (job.Job, error) { + return ValidatedOracleSpecTomlCfg(func(id *big.Int) (evmconfig.ChainScopedConfig, error) { + c, err := legacyChains.Get(id.String()) + if err != nil { + return nil, err + } + return c.Config(), nil + }, tomlString) +} + +func ValidatedOracleSpecTomlCfg(configFn func(id *big.Int) (evmconfig.ChainScopedConfig, error), tomlString string) (job.Job, error) { + var jb = job.Job{} + var spec job.OCROracleSpec + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "toml error on load") + } + // Note this validates all the fields which implement an UnmarshalText + // i.e. TransmitterAddress, PeerID... + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on spec") + } + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + jb.OCROracleSpec = &spec + + if jb.OCROracleSpec.P2PV2Bootstrappers == nil { + // Empty but non-null, field is non-nullable. + jb.OCROracleSpec.P2PV2Bootstrappers = pq.StringArray{} + } + + if jb.Type != job.OffchainReporting { + return jb, errors.Errorf("the only supported type is currently 'offchainreporting', got %s", jb.Type) + } + if !tree.Has("isBootstrapPeer") { + return jb, errors.New("isBootstrapPeer is not defined") + } + + if len(spec.P2PV2Bootstrappers) > 0 { + _, err = ocrcommon.ParseBootstrapPeers(spec.P2PV2Bootstrappers) + if err != nil { + return jb, err + } + } + + cfg, err := configFn(jb.OCROracleSpec.EVMChainID.ToInt()) + if err != nil { + return jb, err + } + + if spec.IsBootstrapPeer { + if err := validateBootstrapSpec(tree); err != nil { + return jb, err + } + } else if err := validateNonBootstrapSpec(tree, jb, cfg.OCR().ObservationTimeout()); err != nil { + return jb, err + } + if err := validateTimingParameters(cfg.EVM(), cfg.EVM().OCR(), cfg.Insecure(), spec, cfg.OCR()); err != nil { + return jb, err + } + return jb, nil +} + +// Parameters that must be explicitly set by the operator. +var ( + // Common to both bootstrap and non-boostrap + params = map[string]struct{}{ + "type": {}, + "schemaVersion": {}, + "contractAddress": {}, + "isBootstrapPeer": {}, + } + // Boostrap and non-bootstrap parameters + // are mutually exclusive. + bootstrapParams = map[string]struct{}{} + nonBootstrapParams = map[string]struct{}{ + "observationSource": {}, + } +) + +func validateTimingParameters(cfg ValidationConfig, evmOcrCfg evmconfig.OCR, insecureCfg insecureConfig, spec job.OCROracleSpec, ocrCfg job.OCRConfig) error { + lc := toLocalConfig(cfg, evmOcrCfg, insecureCfg, spec, ocrCfg) + return errors.Wrap(offchainreporting.SanityCheckLocalConfig(lc), "offchainreporting.SanityCheckLocalConfig failed") +} + +func validateBootstrapSpec(tree *toml.Tree) error { + expected, notExpected := ocrcommon.CloneSet(params), ocrcommon.CloneSet(nonBootstrapParams) + for k := range bootstrapParams { + expected[k] = struct{}{} + } + return ocrcommon.ValidateExplicitlySetKeys(tree, expected, notExpected, "bootstrap") +} + +func validateNonBootstrapSpec(tree *toml.Tree, spec job.Job, ocrObservationTimeout time.Duration) error { + expected, notExpected := ocrcommon.CloneSet(params), ocrcommon.CloneSet(bootstrapParams) + for k := range nonBootstrapParams { + expected[k] = struct{}{} + } + if err := ocrcommon.ValidateExplicitlySetKeys(tree, expected, notExpected, "non-bootstrap"); err != nil { + return err + } + if spec.Pipeline.Source == "" { + return errors.New("no pipeline specified") + } + var observationTimeout time.Duration + if spec.OCROracleSpec.ObservationTimeout != 0 { + observationTimeout = spec.OCROracleSpec.ObservationTimeout.Duration() + } else { + observationTimeout = ocrObservationTimeout + } + if time.Duration(spec.MaxTaskDuration) > observationTimeout { + return errors.Errorf("max task duration must be < observation timeout") + } + for _, task := range spec.Pipeline.Tasks { + timeout, set := task.TaskTimeout() + if set && timeout > observationTimeout { + return errors.Errorf("individual max task duration must be < observation timeout") + } + } + return nil +} diff --git a/core/services/ocr/validate_test.go b/core/services/ocr/validate_test.go new file mode 100644 index 00000000..1954c86f --- /dev/null +++ b/core/services/ocr/validate_test.go @@ -0,0 +1,382 @@ +package ocr_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" +) + +func TestValidateOracleSpec(t *testing.T) { + var tt = []struct { + name string + toml string + overrides func(c *plugin.Config, s *plugin.Secrets) + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "minimal non-bootstrap oracle spec", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +isBootstrapPeer = false +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + // Should be able to jsonapi marshal/unmarshal the minimum spec. + // This ensures the UnmarshalJSON's defined on the fields handle a min spec correctly. + b, err := jsonapi.Marshal(os.OCROracleSpec) + require.NoError(t, err) + var r job.OCROracleSpec + err = jsonapi.Unmarshal(b, &r) + require.NoError(t, err) + }, + }, + { + name: "decodes valid oracle spec toml", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +databaseTimeout = "2s" +observationGracePeriod = "2s" +contractTransmitterTransmitTimeout = "1s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, 1, int(os.SchemaVersion)) + assert.False(t, os.OCROracleSpec.IsBootstrapPeer) + }, + }, + { + name: "decodes bootstrap toml", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = true +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, 1, int(os.SchemaVersion)) + assert.True(t, os.OCROracleSpec.IsBootstrapPeer) + }, + }, + { + name: "raises error on extra keys", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = true +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + assert.Contains(t, err.Error(), "unrecognised key for bootstrap peer: observationSource") + }, + }, + { + name: "empty pipeline string non-bootstrap node", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid dot", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +observationSource = """ +-> +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid v2 bootstrapper address", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["invalid bootstrapper /#@ address"] +isBootstrapPeer = false +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero blockchain timeout", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [ +"12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001", +] +isBootstrapPeer = false +blockchainTimeout = "0s" +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero database timeout", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [ +"12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001", +] +isBootstrapPeer = false +databaseTimeout = "0s" +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero observation grace period", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +observationGracePeriod = "0s" +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero contract transmitter transmit timeout", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +contractTransmitterTransmitTimeout = "0s" +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero intervals", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +contractConfigTrackerSubscribeInterval = "0s" +observationSource = """ +blah +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "broken monitoring endpoint", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = true +monitoringEndpoint = "\t/fd\2ff )(*&^%$#@" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.EqualError(t, err, "toml error on load: (8, 23): invalid escape sequence: \\2") + }, + }, + { + name: "max task duration > observation timeout should error", + toml: ` +type = "offchainreporting" +maxTaskDuration = "30s" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "max task duration must be < observation timeout") + }, + }, + { + name: "individual max task duration > observation timeout should error", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +observationSource = """ +ds1 [type=bridge name=voter_turnout timeout="30s"]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "individual max task duration must be < observation timeout") + }, + }, + { + name: "toml parse doesn't panic", + toml: string(hexutil.MustDecode("0x2222220d5c22223b22225c0d21222222")), + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid global default", + toml: ` +type = "offchainreporting" +schemaVersion = 1 +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "data source timeout must be between 1s and 20s, but is currently 20m0s") + }, + overrides: func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.ObservationTimeout = commonconfig.MustNewDuration(20 * time.Minute) + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + c := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = null.BoolFrom(false).Ptr() + if tc.overrides != nil { + tc.overrides(c, s) + } + }) + + s, err := ocr.ValidatedOracleSpecTomlCfg(func(id *big.Int) (evmconfig.ChainScopedConfig, error) { + return evmtest.NewChainScopedConfig(t, c), nil + }, tc.toml) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/ocr2/database.go b/core/services/ocr2/database.go new file mode 100644 index 00000000..08e305da --- /dev/null +++ b/core/services/ocr2/database.go @@ -0,0 +1,376 @@ +package ocr2 + +import ( + "context" + "database/sql" + "encoding/binary" + "time" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + "github.com/pkg/errors" + ocrcommon "github.com/goplugin/libocr/commontypes" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type db struct { + q pg.Q + oracleSpecID int32 + pluginID int32 + lggr logger.SugaredLogger +} + +var ( + _ ocrtypes.Database = &db{} +) + +// NewDB returns a new DB scoped to this oracleSpecID +func NewDB(sqlxDB *sqlx.DB, oracleSpecID int32, pluginID int32, lggr logger.Logger, cfg pg.QConfig) *db { + namedLogger := lggr.Named("OCR2.DB") + + return &db{ + q: pg.NewQ(sqlxDB, namedLogger, cfg), + oracleSpecID: oracleSpecID, + pluginID: pluginID, + lggr: logger.Sugared(lggr), + } +} + +func (d *db) ReadState(ctx context.Context, cd ocrtypes.ConfigDigest) (ps *ocrtypes.PersistentState, err error) { + stmt := ` + SELECT epoch, highest_sent_epoch, highest_received_epoch + FROM ocr2_persistent_states + WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 + LIMIT 1` + + ps = new(ocrtypes.PersistentState) + + var tmp []int64 + var highestSentEpochTmp int64 + + err = d.q.QueryRowxContext(ctx, stmt, d.oracleSpecID, cd).Scan(&ps.Epoch, &highestSentEpochTmp, pq.Array(&tmp)) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, "ReadState failed") + } + + ps.HighestSentEpoch = uint32(highestSentEpochTmp) + + for _, v := range tmp { + ps.HighestReceivedEpoch = append(ps.HighestReceivedEpoch, uint32(v)) + } + + return ps, nil +} + +func (d *db) WriteState(ctx context.Context, cd ocrtypes.ConfigDigest, state ocrtypes.PersistentState) error { + var highestReceivedEpoch []int64 + for _, v := range state.HighestReceivedEpoch { + highestReceivedEpoch = append(highestReceivedEpoch, int64(v)) + } + + stmt := ` + INSERT INTO ocr2_persistent_states ( + ocr2_oracle_spec_id, + config_digest, + epoch, + highest_sent_epoch, + highest_received_epoch, + created_at, + updated_at + ) + VALUES ($1, $2, $3, $4, $5, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id, config_digest) + DO UPDATE SET ( + epoch, + highest_sent_epoch, + highest_received_epoch, + updated_at + ) = ( + EXCLUDED.epoch, + EXCLUDED.highest_sent_epoch, + EXCLUDED.highest_received_epoch, + NOW() + )` + + _, err := d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext( + ctx, stmt, d.oracleSpecID, cd, state.Epoch, state.HighestSentEpoch, pq.Array(&highestReceivedEpoch), + ) + + return errors.Wrap(err, "WriteState failed") +} + +func (d *db) ReadConfig(ctx context.Context) (c *ocrtypes.ContractConfig, err error) { + stmt := ` + SELECT + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config + FROM ocr2_contract_configs + WHERE ocr2_oracle_spec_id = $1 AND plugin_id = $2 + LIMIT 1` + + c = new(ocrtypes.ContractConfig) + + digest := []byte{} + signers := [][]byte{} + transmitters := [][]byte{} + + err = d.q.QueryRowx(stmt, d.oracleSpecID, d.pluginID).Scan( + &digest, + &c.ConfigCount, + (*pq.ByteaArray)(&signers), + (*pq.ByteaArray)(&transmitters), + &c.F, + &c.OnchainConfig, + &c.OffchainConfigVersion, + &c.OffchainConfig, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, "ReadConfig failed") + } + + copy(c.ConfigDigest[:], digest) + + c.Signers = []ocrtypes.OnchainPublicKey{} + for _, s := range signers { + c.Signers = append(c.Signers, s) + } + + c.Transmitters = []ocrtypes.Account{} + for _, t := range transmitters { + transmitter := ocrtypes.Account(t) + c.Transmitters = append(c.Transmitters, transmitter) + } + + return +} + +func (d *db) WriteConfig(ctx context.Context, c ocrtypes.ContractConfig) error { + var signers [][]byte + for _, s := range c.Signers { + signers = append(signers, []byte(s)) + } + stmt := ` + INSERT INTO ocr2_contract_configs ( + ocr2_oracle_spec_id, + plugin_id, + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config, + created_at, + updated_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id, plugin_id) DO UPDATE SET + config_digest = EXCLUDED.config_digest, + config_count = EXCLUDED.config_count, + signers = EXCLUDED.signers, + transmitters = EXCLUDED.transmitters, + f = EXCLUDED.f, + onchain_config = EXCLUDED.onchain_config, + offchain_config_version = EXCLUDED.offchain_config_version, + offchain_config = EXCLUDED.offchain_config, + updated_at = NOW() + ` + _, err := d.q.ExecContext(ctx, stmt, + d.oracleSpecID, + d.pluginID, + c.ConfigDigest, + c.ConfigCount, + pq.ByteaArray(signers), + c.Transmitters, + c.F, + c.OnchainConfig, + c.OffchainConfigVersion, + c.OffchainConfig, + ) + + return errors.Wrap(err, "WriteConfig failed") +} + +func (d *db) StorePendingTransmission(ctx context.Context, t ocrtypes.ReportTimestamp, tx ocrtypes.PendingTransmission) error { + var signatures [][]byte + for _, s := range tx.AttributedSignatures { + signatures = append(signatures, s.Signature) + buffer := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(buffer, int64(s.Signer)) + signatures = append(signatures, buffer) + } + + digest := make([]byte, 32) + copy(digest, t.ConfigDigest[:]) + + extraHash := make([]byte, 32) + copy(extraHash[:], tx.ExtraHash[:]) + + stmt := ` + INSERT INTO ocr2_pending_transmissions ( + ocr2_oracle_spec_id, + config_digest, + epoch, + round, + + time, + extra_hash, + report, + attributed_signatures, + + created_at, + updated_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) + ON CONFLICT (ocr2_oracle_spec_id, config_digest, epoch, round) DO UPDATE SET + ocr2_oracle_spec_id = EXCLUDED.ocr2_oracle_spec_id, + config_digest = EXCLUDED.config_digest, + epoch = EXCLUDED.epoch, + round = EXCLUDED.round, + + time = EXCLUDED.time, + extra_hash = EXCLUDED.extra_hash, + report = EXCLUDED.report, + attributed_signatures = EXCLUDED.attributed_signatures, + + updated_at = NOW() + ` + + _, err := d.q.ExecContext(ctx, stmt, + d.oracleSpecID, + digest, + t.Epoch, + t.Round, + tx.Time, + extraHash, + tx.Report, + pq.ByteaArray(signatures), + ) + + return errors.Wrap(err, "StorePendingTransmission failed") +} + +func (d *db) PendingTransmissionsWithConfigDigest(ctx context.Context, cd ocrtypes.ConfigDigest) (map[ocrtypes.ReportTimestamp]ocrtypes.PendingTransmission, error) { + stmt := ` + SELECT + config_digest, + epoch, + round, + time, + extra_hash, + report, + attributed_signatures + FROM ocr2_pending_transmissions + WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 + ` + rows, err := d.q.QueryxContext(ctx, stmt, d.oracleSpecID, cd) //nolint sqlclosecheck false positive + if err != nil { + return nil, errors.Wrap(err, "PendingTransmissionsWithConfigDigest failed to query rows") + } + defer d.lggr.ErrorIfFn(rows.Close, "Error closing ocr2_pending_transmissions rows") + + m := make(map[ocrtypes.ReportTimestamp]ocrtypes.PendingTransmission) + + for rows.Next() { + k := ocrtypes.ReportTimestamp{} + p := ocrtypes.PendingTransmission{} + + signatures := [][]byte{} + digest := []byte{} + extraHash := []byte{} + report := []byte{} + + if err := rows.Scan(&digest, &k.Epoch, &k.Round, &p.Time, &extraHash, &report, (*pq.ByteaArray)(&signatures)); err != nil { + return nil, errors.Wrap(err, "PendingTransmissionsWithConfigDigest failed to scan row") + } + + copy(k.ConfigDigest[:], digest) + copy(p.ExtraHash[:], extraHash) + p.Report = make([]byte, len(report)) + copy(p.Report[:], report) + + for index := 0; index < len(signatures); index += 2 { + signature := signatures[index] + signer, _ := binary.Varint(signatures[index+1]) + sig := ocrtypes.AttributedOnchainSignature{ + Signature: signature, + Signer: ocrcommon.OracleID(signer), + } + p.AttributedSignatures = append(p.AttributedSignatures, sig) + } + m[k] = p + } + + if err := rows.Err(); err != nil { + return m, err + } + + return m, nil +} + +func (d *db) DeletePendingTransmission(ctx context.Context, t ocrtypes.ReportTimestamp) (err error) { + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` +DELETE FROM ocr2_pending_transmissions +WHERE ocr2_oracle_spec_id = $1 AND config_digest = $2 AND epoch = $3 AND round = $4 +`, d.oracleSpecID, t.ConfigDigest, t.Epoch, t.Round) + + err = errors.Wrap(err, "DeletePendingTransmission failed") + + return +} + +func (d *db) DeletePendingTransmissionsOlderThan(ctx context.Context, t time.Time) (err error) { + _, err = d.q.WithOpts(pg.WithLongQueryTimeout()).ExecContext(ctx, ` +DELETE FROM ocr2_pending_transmissions +WHERE ocr2_oracle_spec_id = $1 AND time < $2 +`, d.oracleSpecID, t) + + err = errors.Wrap(err, "DeletePendingTransmissionsOlderThan failed") + + return +} + +func (d *db) ReadProtocolState(ctx context.Context, configDigest ocrtypes.ConfigDigest, key string) (value []byte, err error) { + err = d.q.GetContext(ctx, &value, ` +SELECT value FROM ocr_protocol_states +WHERE config_digest = $1 AND key = $2; +`, configDigest, key) + + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + + err = errors.Wrapf(err, "ReadProtocolState failed for job %d", d.oracleSpecID) + + return +} + +func (d *db) WriteProtocolState(ctx context.Context, configDigest ocrtypes.ConfigDigest, key string, value []byte) (err error) { + if value == nil { + _, err = d.q.ExecContext(ctx, `DELETE FROM ocr_protocol_states WHERE config_digest = $1 AND key = $2;`, configDigest, key) + } else { + _, err = d.q.ExecContext(ctx, ` +INSERT INTO ocr_protocol_states (config_digest, key, value) VALUES ($1, $2, $3) +ON CONFLICT (config_digest, key) DO UPDATE SET value = $3;`, configDigest, key, value) + } + + err = errors.Wrapf(err, "WriteProtocolState failed for job %d", d.oracleSpecID) + + return +} diff --git a/core/services/ocr2/database_test.go b/core/services/ocr2/database_test.go new file mode 100644 index 00000000..094b60ee --- /dev/null +++ b/core/services/ocr2/database_test.go @@ -0,0 +1,516 @@ +package ocr2_test + +import ( + "encoding/json" + "testing" + "time" + + medianconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/median/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" +) + +const defaultPluginID = 0 + +func MustInsertOCROracleSpec(t *testing.T, db *sqlx.DB, transmitterAddress ethkey.EIP55Address) job.OCR2OracleSpec { + t.Helper() + + spec := job.OCR2OracleSpec{} + mockJuelsPerFeeCoinSource := `ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + ds1 -> ds1_parse -> ds1_multiply -> answer1; + answer1 [type=median index=0];` + config := medianconfig.PluginConfig{JuelsPerFeeCoinPipeline: mockJuelsPerFeeCoinSource} + jsonConfig, err := json.Marshal(config) + require.NoError(t, err) + + require.NoError(t, db.Get(&spec, `INSERT INTO ocr2_oracle_specs ( +relay, relay_config, contract_id, p2pv2_bootstrappers, ocr_key_bundle_id, monitoring_endpoint, transmitter_id, +blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, plugin_type, plugin_config, created_at, updated_at) VALUES ( +'ethereum', '{}', $1, '{}', $2, $3, $4, +0, 0, 0, 'median', $5, NOW(), NOW() +) RETURNING *`, cltest.NewEIP55Address().String(), cltest.DefaultOCR2KeyBundleID, "chain.link:1234", transmitterAddress.String(), jsonConfig)) + return spec +} + +func setupDB(t *testing.T) *sqlx.DB { + t.Helper() + + sqlx := pgtest.NewSqlxDB(t) + + return sqlx +} + +func Test_DB_ReadWriteState(t *testing.T) { + sqlDB := setupDB(t) + + configDigest := testhelpers.MakeConfigDigest(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, sqlDB, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + spec := MustInsertOCROracleSpec(t, sqlDB, key.EIP55Address) + lggr := logger.TestLogger(t) + + t.Run("reads and writes state", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + state := ocrtypes.PersistentState{ + Epoch: 1, + HighestSentEpoch: 2, + HighestReceivedEpoch: []uint32{3}, + } + + err := db.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + readState, err := db.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Equal(t, state, *readState) + }) + + t.Run("updates state", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + newState := ocrtypes.PersistentState{ + Epoch: 2, + HighestSentEpoch: 3, + HighestReceivedEpoch: []uint32{4, 5}, + } + + err := db.WriteState(testutils.Context(t), configDigest, newState) + require.NoError(t, err) + + readState, err := db.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Equal(t, newState, *readState) + }) + + t.Run("does not return result for wrong spec", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + state := ocrtypes.PersistentState{ + Epoch: 3, + HighestSentEpoch: 4, + HighestReceivedEpoch: []uint32{5, 6}, + } + + err := db.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + // odb with different spec + db = ocr2.NewDB(sqlDB, -1, defaultPluginID, lggr, cfg.Database()) + + readState, err := db.ReadState(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Nil(t, readState) + }) + + t.Run("does not return result for wrong config digest", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + state := ocrtypes.PersistentState{ + Epoch: 4, + HighestSentEpoch: 5, + HighestReceivedEpoch: []uint32{6, 7}, + } + + err := db.WriteState(testutils.Context(t), configDigest, state) + require.NoError(t, err) + + readState, err := db.ReadState(testutils.Context(t), testhelpers.MakeConfigDigest(t)) + require.NoError(t, err) + + require.Nil(t, readState) + }) +} + +func Test_DB_ReadWriteConfig(t *testing.T) { + sqlDB := setupDB(t) + + config := ocrtypes.ContractConfig{ + ConfigDigest: testhelpers.MakeConfigDigest(t), + ConfigCount: 1, + Signers: []ocrtypes.OnchainPublicKey{{0x01}, {0x02}}, + Transmitters: []ocrtypes.Account{"account1", "account2"}, + F: 79, + OnchainConfig: []byte{0x01, 0x02}, + OffchainConfigVersion: 111, + OffchainConfig: []byte{0x03, 0x04}, + } + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, sqlDB, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + spec := MustInsertOCROracleSpec(t, sqlDB, key.EIP55Address) + lggr := logger.TestLogger(t) + + t.Run("reads and writes config", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &config, readConfig) + }) + + t.Run("updates config", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + + newConfig := ocrtypes.ContractConfig{ + ConfigDigest: testhelpers.MakeConfigDigest(t), + Signers: []ocrtypes.OnchainPublicKey{}, + Transmitters: []ocrtypes.Account{}, + } + + err := db.WriteConfig(testutils.Context(t), newConfig) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &newConfig, readConfig) + }) + + t.Run("does not return result for wrong spec", func(t *testing.T) { + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + db = ocr2.NewDB(sqlDB, -1, defaultPluginID, lggr, cfg.Database()) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Nil(t, readConfig) + }) + + t.Run("reads and writes config for multiple plugins", func(t *testing.T) { + otherPluginID := int32(2) + db1 := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + db2 := ocr2.NewDB(sqlDB, spec.ID, otherPluginID, lggr, cfg.Database()) + + otherConfig := ocrtypes.ContractConfig{ + ConfigDigest: testhelpers.MakeConfigDigest(t), + Signers: []ocrtypes.OnchainPublicKey{}, + Transmitters: []ocrtypes.Account{}, + } + err := db1.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + err = db2.WriteConfig(testutils.Context(t), otherConfig) + require.NoError(t, err) + + readConfig, err := db1.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, &config, readConfig) + + readConfig, err = db2.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, &otherConfig, readConfig) + }) +} + +func assertPendingTransmissionEqual(t *testing.T, pt1, pt2 ocrtypes.PendingTransmission) { + t.Helper() + + require.Equal(t, pt1.Time.Unix(), pt2.Time.Unix()) + require.Equal(t, pt1.ExtraHash, pt2.ExtraHash) + require.Equal(t, pt1.Report, pt2.Report) + require.Equal(t, pt1.AttributedSignatures, pt2.AttributedSignatures) +} + +func Test_DB_PendingTransmissions(t *testing.T) { + sqlDB := setupDB(t) + + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, sqlDB, cfg.Database()).Eth() + key, _ := cltest.MustInsertRandomKey(t, ethKeyStore) + + lggr := logger.TestLogger(t) + spec := MustInsertOCROracleSpec(t, sqlDB, key.EIP55Address) + spec2 := MustInsertOCROracleSpec(t, sqlDB, key.EIP55Address) + db := ocr2.NewDB(sqlDB, spec.ID, defaultPluginID, lggr, cfg.Database()) + db2 := ocr2.NewDB(sqlDB, spec2.ID, defaultPluginID, lggr, cfg.Database()) + configDigest := testhelpers.MakeConfigDigest(t) + + k := ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 0, + Round: 1, + } + k2 := ocrtypes.ReportTimestamp{ + ConfigDigest: configDigest, + Epoch: 1, + Round: 2, + } + + t.Run("stores and retrieves pending transmissions", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Now(), + ExtraHash: testutils.Random32Byte(), + Report: []byte{0, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + {Signature: cltest.MustRandomBytes(t, 17), Signer: 31}, + }, + } + + err := db.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + m, err := db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, p, m[k]) + + // Now overwrite value for k to prove that updating works + p = ocrtypes.PendingTransmission{ + Time: time.Now(), + ExtraHash: testutils.Random32Byte(), + Report: []byte{1, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + err = db.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + m, err = db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, p, m[k]) + + p2 := ocrtypes.PendingTransmission{ + Time: time.Now(), + ExtraHash: testutils.Random32Byte(), + Report: []byte{2, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + + err = db.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + kRedHerring := ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{43}, + Epoch: 1, + Round: 2, + } + pRedHerring := ocrtypes.PendingTransmission{ + Time: time.Now(), + ExtraHash: testutils.Random32Byte(), + Report: []byte{3, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + + err = db.StorePendingTransmission(testutils.Context(t), kRedHerring, pRedHerring) + require.NoError(t, err) + + m, err = db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + + require.Len(t, m, 2) + + // HACK to get around time equality because otherwise its annoying (time storage into postgres is mildly lossy) + require.Equal(t, p.Time.Unix(), m[k].Time.Unix()) + require.Equal(t, p2.Time.Unix(), m[k2].Time.Unix()) + + var zt time.Time + p.Time, p2.Time = zt, zt + for k, v := range m { + v.Time = zt + m[k] = v + } + + require.Equal(t, p, m[k]) + require.Equal(t, p2, m[k2]) + + // No keys for this oracleSpecID yet + m, err = db2.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 0) + }) + + t.Run("deletes pending transmission by key", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + ExtraHash: testutils.Random32Byte(), + Report: []byte{1, 4, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + err := db.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + err = db2.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + + err = db.DeletePendingTransmission(testutils.Context(t), k) + require.NoError(t, err) + + m, err := db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + + // Did not affect other oracleSpecID + m, err = db2.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + }) + + t.Run("allows multiple duplicate keys for different spec ID", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + ExtraHash: testutils.Random32Byte(), + Report: []byte{2, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + err := db.StorePendingTransmission(testutils.Context(t), k2, p) + require.NoError(t, err) + + m, err := db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + // FIXME: don't understand how the median is being used as a key or what the replacement is yet + // require.Equal(t, p.Median, m[k2].Median) + }) + + t.Run("deletes pending transmission older than", func(t *testing.T) { + p := ocrtypes.PendingTransmission{ + Time: time.Unix(100, 0), + ExtraHash: testutils.Random32Byte(), + Report: []byte{2, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + + err := db.StorePendingTransmission(testutils.Context(t), k, p) + require.NoError(t, err) + + p2 := ocrtypes.PendingTransmission{ + Time: time.Unix(1000, 0), + ExtraHash: testutils.Random32Byte(), + Report: []byte{2, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + err = db.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + p2 = ocrtypes.PendingTransmission{ + Time: time.Now(), + ExtraHash: testutils.Random32Byte(), + Report: []byte{2, 2, 3}, + AttributedSignatures: []ocrtypes.AttributedOnchainSignature{ + {Signature: cltest.MustRandomBytes(t, 7), Signer: 248}, + }, + } + + err = db.StorePendingTransmission(testutils.Context(t), k2, p2) + require.NoError(t, err) + + err = db.DeletePendingTransmissionsOlderThan(testutils.Context(t), time.Unix(900, 0)) + require.NoError(t, err) + + m, err := db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + + // Didn't affect other oracleSpecIDs + db = ocr2.NewDB(sqlDB, spec2.ID, defaultPluginID, lggr, cfg.Database()) + m, err = db.PendingTransmissionsWithConfigDigest(testutils.Context(t), configDigest) + require.NoError(t, err) + require.Len(t, m, 1) + }) +} + +func Test_DB_ReadWriteProtocolState(t *testing.T) { + sqlDB := setupDB(t) + + cfg := configtest.NewTestGeneralConfig(t) + + lggr := logger.TestLogger(t) + db := ocr2.NewDB(sqlDB, 0, defaultPluginID, lggr, cfg.Database()) + cd1 := testhelpers.MakeConfigDigest(t) + cd2 := testhelpers.MakeConfigDigest(t) + ctx := testutils.Context(t) + + assertCount := func(expected int64) { + testutils.AssertCount(t, sqlDB, "ocr_protocol_states", expected) + } + + t.Run("stores and retrieves protocol state", func(t *testing.T) { + assertCount(0) + + err := db.WriteProtocolState(ctx, cd1, "key1", []byte{1}) + assert.NoError(t, err) + + assertCount(1) + + err = db.WriteProtocolState(ctx, cd2, "key1", []byte{2}) + assert.NoError(t, err) + + assertCount(2) + + err = db.WriteProtocolState(ctx, cd2, "key2", []byte{3}) + assert.NoError(t, err) + + assertCount(3) + + // should overwrite + err = db.WriteProtocolState(ctx, cd2, "key2", []byte{4}) + assert.NoError(t, err) + + val, err := db.ReadProtocolState(ctx, cd1, "key1") + assert.NoError(t, err) + assert.Equal(t, []byte{1}, val) + + val, err = db.ReadProtocolState(ctx, cd2, "key1") + assert.NoError(t, err) + assert.Equal(t, []byte{2}, val) + + val, err = db.ReadProtocolState(ctx, cd2, "key2") + assert.NoError(t, err) + assert.Equal(t, []byte{4}, val) + + // should write empty value + err = db.WriteProtocolState(ctx, cd1, "key1", []byte{}) + assert.NoError(t, err) + + val, err = db.ReadProtocolState(ctx, cd1, "key1") + assert.NoError(t, err) + assert.Equal(t, []byte{}, val) + + assertCount(3) + + // should delete value + err = db.WriteProtocolState(ctx, cd1, "key1", nil) + assert.NoError(t, err) + + assertCount(2) + + // trying to read missing value yields nil + val, err = db.ReadProtocolState(ctx, cd1, "key1") + assert.NoError(t, err) + assert.Nil(t, val) + }) +} diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go new file mode 100644 index 00000000..954ee099 --- /dev/null +++ b/core/services/ocr2/delegate.go @@ -0,0 +1,1535 @@ +package ocr2 + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "google.golang.org/grpc" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/libocr/commontypes" + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers20 "github.com/goplugin/plugin-automation/pkg/v2" + ocr2keepers20config "github.com/goplugin/plugin-automation/pkg/v2/config" + ocr2keepers20coordinator "github.com/goplugin/plugin-automation/pkg/v2/coordinator" + ocr2keepers20polling "github.com/goplugin/plugin-automation/pkg/v2/observer/polling" + ocr2keepers20runner "github.com/goplugin/plugin-automation/pkg/v2/runner" + ocr2keepers21config "github.com/goplugin/plugin-automation/pkg/v3/config" + ocr2keepers21 "github.com/goplugin/plugin-automation/pkg/v3/plugin" + "github.com/goplugin/plugin-common/pkg/loop/reportingplugins/ocr3" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + + "github.com/goplugin/plugin-vrf/altbn_128" + dkgpkg "github.com/goplugin/plugin-vrf/dkg" + "github.com/goplugin/plugin-vrf/ocr2vrf" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/loop/reportingplugins" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + coreconfig "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/persistence" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/generic" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/median" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21" + ocr2keeper21core "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + ocr2vrfconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/config" + ocr2coordinator "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/coordinator" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/reportserializer" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/promwrapper" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + functionsRelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/functions" + evmmercury "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + evmrelaytypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type ErrJobSpecNoRelayer struct { + PluginName string + Err error +} + +func (e ErrJobSpecNoRelayer) Unwrap() error { return e.Err } + +func (e ErrJobSpecNoRelayer) Error() string { + return fmt.Sprintf("%s services: OCR2 job spec could not get relayer ID: %s", e.PluginName, e.Err) +} + +type ErrRelayNotEnabled struct { + PluginName string + Relay string + Err error +} + +func (e ErrRelayNotEnabled) Unwrap() error { return e.Err } + +func (e ErrRelayNotEnabled) Error() string { + return fmt.Sprintf("%s services: failed to get relay %s, is it enabled? %s", e.PluginName, e.Relay, e.Err) +} + +type RelayGetter interface { + Get(id relay.ID) (loop.Relayer, error) +} +type Delegate struct { + db *sqlx.DB + jobORM job.ORM + bridgeORM bridges.ORM + mercuryORM evmmercury.ORM + pipelineRunner pipeline.Runner + peerWrapper *ocrcommon.SingletonPeerWrapper + monitoringEndpointGen telemetry.MonitoringEndpointGenerator + cfg DelegateConfig + lggr logger.Logger + ks keystore.OCR2 + dkgSignKs keystore.DKGSign + dkgEncryptKs keystore.DKGEncrypt + ethKs keystore.Eth + RelayGetter + isNewlyCreatedJob bool // Set to true if this is a new job freshly added, false if job was present already on node boot. + mailMon *mailbox.Monitor + + legacyChains legacyevm.LegacyChainContainer // legacy: use relayers instead + capabilitiesRegistry types.CapabilitiesRegistry +} + +type DelegateConfig interface { + plugins.RegistrarConfig + OCR2() ocr2Config + JobPipeline() jobPipelineConfig + Database() pg.QConfig + Insecure() insecureConfig + Mercury() coreconfig.Mercury + Threshold() coreconfig.Threshold +} + +// concrete implementation of DelegateConfig so it can be explicitly composed +type delegateConfig struct { + plugins.RegistrarConfig + ocr2 ocr2Config + jobPipeline jobPipelineConfig + database pg.QConfig + insecure insecureConfig + mercury mercuryConfig + threshold thresholdConfig +} + +func (d *delegateConfig) JobPipeline() jobPipelineConfig { + return d.jobPipeline +} + +func (d *delegateConfig) Database() pg.QConfig { + return d.database +} + +func (d *delegateConfig) Insecure() insecureConfig { + return d.insecure +} + +func (d *delegateConfig) Threshold() coreconfig.Threshold { + return d.threshold +} + +func (d *delegateConfig) Mercury() coreconfig.Mercury { + return d.mercury +} + +func (d *delegateConfig) OCR2() ocr2Config { + return d.ocr2 +} + +type ocr2Config interface { + BlockchainTimeout() time.Duration + CaptureEATelemetry() bool + ContractConfirmations() uint16 + ContractPollInterval() time.Duration + ContractTransmitterTransmitTimeout() time.Duration + DatabaseTimeout() time.Duration + KeyBundleID() (string, error) + TraceLogging() bool + CaptureAutomationCustomTelemetry() bool +} + +type insecureConfig interface { + OCRDevelopmentMode() bool +} + +type jobPipelineConfig interface { + MaxSuccessfulRuns() uint64 + ResultWriteQueueDepth() uint64 +} + +type mercuryConfig interface { + Credentials(credName string) *types.MercuryCredentials + Cache() coreconfig.MercuryCache + TLS() coreconfig.MercuryTLS +} + +type thresholdConfig interface { + ThresholdKeyShare() string +} + +func NewDelegateConfig(ocr2Cfg ocr2Config, m coreconfig.Mercury, t coreconfig.Threshold, i insecureConfig, jp jobPipelineConfig, qconf pg.QConfig, pluginProcessCfg plugins.RegistrarConfig) DelegateConfig { + return &delegateConfig{ + ocr2: ocr2Cfg, + RegistrarConfig: pluginProcessCfg, + jobPipeline: jp, + database: qconf, + insecure: i, + mercury: m, + threshold: t, + } +} + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate( + db *sqlx.DB, + jobORM job.ORM, + bridgeORM bridges.ORM, + mercuryORM evmmercury.ORM, + pipelineRunner pipeline.Runner, + peerWrapper *ocrcommon.SingletonPeerWrapper, + monitoringEndpointGen telemetry.MonitoringEndpointGenerator, + legacyChains legacyevm.LegacyChainContainer, + lggr logger.Logger, + cfg DelegateConfig, + ks keystore.OCR2, + dkgSignKs keystore.DKGSign, + dkgEncryptKs keystore.DKGEncrypt, + ethKs keystore.Eth, + relayers RelayGetter, + mailMon *mailbox.Monitor, + capabilitiesRegistry types.CapabilitiesRegistry, +) *Delegate { + return &Delegate{ + db: db, + jobORM: jobORM, + bridgeORM: bridgeORM, + mercuryORM: mercuryORM, + pipelineRunner: pipelineRunner, + peerWrapper: peerWrapper, + monitoringEndpointGen: monitoringEndpointGen, + legacyChains: legacyChains, + cfg: cfg, + lggr: lggr.Named("OCR2"), + ks: ks, + dkgSignKs: dkgSignKs, + dkgEncryptKs: dkgEncryptKs, + ethKs: ethKs, + RelayGetter: relayers, + isNewlyCreatedJob: false, + mailMon: mailMon, + capabilitiesRegistry: capabilitiesRegistry, + } +} + +func (d *Delegate) JobType() job.Type { + return job.OffchainReporting2 +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) { + // This is only called first time the job is created + d.isNewlyCreatedJob = true +} +func (d *Delegate) AfterJobCreated(spec job.Job) {} +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} +func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { + // If the job spec is malformed in any way, we report the error but return nil so that + // the job deletion itself isn't blocked. + + spec := jb.OCR2OracleSpec + if spec == nil { + d.lggr.Errorf("offchainreporting2.Delegate.OnDeleteJob called with wrong job type, ignoring non-OCR2 spec %v", jb) + return nil + } + + rid, err := spec.RelayID() + if err != nil { + d.lggr.Errorw("DeleteJob", "err", ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)}) + return nil + } + // we only have clean to do for the EVM + if rid.Network == relay.EVM { + return d.cleanupEVM(jb, q, rid) + } + return nil +} + +// cleanupEVM is a helper for clean up EVM specific state when a job is deleted +func (d *Delegate) cleanupEVM(jb job.Job, q pg.Queryer, relayID relay.ID) error { + // If UnregisterFilter returns an + // error, that means it failed to remove a valid active filter from the db. We do abort the job deletion + // in that case, since it should be easy for the user to retry and will avoid leaving the db in + // an inconsistent state. This assumes UnregisterFilter will return nil if the filter wasn't found + // at all (no rows deleted). + spec := jb.OCR2OracleSpec + chain, err := d.legacyChains.Get(relayID.ChainID) + if err != nil { + d.lggr.Error("cleanupEVM: failed to chain get chain %s", "err", relayID.ChainID, err) + return nil + } + lp := chain.LogPoller() + + var filters []string + switch spec.PluginType { + case types.OCR2VRF: + filters, err = ocr2coordinator.FilterNamesFromSpec(spec) + if err != nil { + d.lggr.Errorw("failed to derive ocr2vrf filter names from spec", "err", err, "spec", spec) + } + case types.OCR2Keeper: + filters, err = ocr2keeper.FilterNamesFromSpec20(spec) + if err != nil { + d.lggr.Errorw("failed to derive ocr2keeper filter names from spec", "err", err, "spec", spec) + } + default: + return nil + } + + rargs := types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: false, + RelayConfig: spec.RelayConfig.Bytes(), + } + + relayFilters, err := evmrelay.FilterNamesFromRelayArgs(rargs) + if err != nil { + d.lggr.Errorw("Failed to derive evm relay filter names from relay args", "err", err, "rargs", rargs) + return nil + } + + filters = append(filters, relayFilters...) + + for _, filter := range filters { + d.lggr.Debugf("Unregistering %s filter", filter) + err = lp.UnregisterFilter(filter, pg.WithQueryer(q)) + if err != nil { + return errors.Wrapf(err, "Failed to unregister filter %s", filter) + } + } + return nil +} + +// ServicesForSpec returns the OCR2 services that need to run for this job +func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + if spec == nil { + return nil, errors.Errorf("offchainreporting2.Delegate expects an *job.OCR2OracleSpec to be present, got %v", jb) + } + + transmitterID := spec.TransmitterID.String + effectiveTransmitterID := transmitterID + + lggrCtx := loop.ContextValues{ + JobID: jb.ID, + JobName: jb.Name.ValueOrZero(), + + ContractID: spec.ContractID, + TransmitterID: transmitterID, + } + if spec.FeedID != nil && (*spec.FeedID != (common.Hash{})) { + lggrCtx.FeedID = *spec.FeedID + spec.RelayConfig["feedID"] = spec.FeedID + } + lggr := logger.Sugared(d.lggr.Named(jb.ExternalJobID.String()).With(lggrCtx.Args()...)) + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)} + } + + if rid.Network == relay.EVM { + lggr = logger.Sugared(lggr.With("evmChainID", rid.ChainID)) + + chain, err2 := d.legacyChains.Get(rid.ChainID) + if err2 != nil { + return nil, fmt.Errorf("ServicesForSpec: could not get EVM chain %s: %w", rid.ChainID, err2) + } + effectiveTransmitterID, err2 = GetEVMEffectiveTransmitterID(&jb, chain, lggr) + if err2 != nil { + return nil, fmt.Errorf("ServicesForSpec failed to get evm transmitterID: %w", err2) + } + } + spec.RelayConfig["effectiveTransmitterID"] = effectiveTransmitterID + + ocrDB := NewDB(d.db, spec.ID, 0, lggr, d.cfg.Database()) + if d.peerWrapper == nil { + return nil, errors.New("cannot setup OCR2 job service, libp2p peer was missing") + } else if !d.peerWrapper.IsStarted() { + return nil, errors.New("peerWrapper is not started. OCR2 jobs require a started and running p2p v2 peer") + } + + ocrLogger := commonlogger.NewOCRWrapper(lggr, d.cfg.OCR2().TraceLogging(), func(msg string) { + lggr.ErrorIf(d.jobORM.RecordError(jb.ID, msg), "unable to record error") + }) + + lc, err := validate.ToLocalConfig(d.cfg.OCR2(), d.cfg.Insecure(), *spec) + if err != nil { + return nil, err + } + if err = libocr2.SanityCheckLocalConfig(lc); err != nil { + return nil, err + } + lggr.Infow("OCR2 job using local config", + "BlockchainTimeout", lc.BlockchainTimeout, + "ContractConfigConfirmations", lc.ContractConfigConfirmations, + "ContractConfigTrackerPollInterval", lc.ContractConfigTrackerPollInterval, + "ContractTransmitterTransmitTimeout", lc.ContractTransmitterTransmitTimeout, + "DatabaseTimeout", lc.DatabaseTimeout, + ) + + bootstrapPeers, err := ocrcommon.GetValidatedBootstrapPeers(spec.P2PV2Bootstrappers, d.peerWrapper.P2PConfig().V2().DefaultBootstrappers()) + if err != nil { + return nil, err + } + lggr.Debugw("Using bootstrap peers", "peers", bootstrapPeers) + // Fetch the specified OCR2 key bundle + var kbID string + if spec.OCRKeyBundleID.Valid { + kbID = spec.OCRKeyBundleID.String + } else if kbID, err = d.cfg.OCR2().KeyBundleID(); err != nil { + return nil, err + } + kb, err := d.ks.Get(kbID) + if err != nil { + return nil, err + } + + spec.CaptureEATelemetry = d.cfg.OCR2().CaptureEATelemetry() + + ctx := lggrCtx.ContextWithValues(context.Background()) + switch spec.PluginType { + case types.Mercury: + return d.newServicesMercury(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + + case types.Median: + return d.newServicesMedian(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + + case types.DKG: + return d.newServicesDKG(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + + case types.OCR2VRF: + return d.newServicesOCR2VRF(lggr, jb, bootstrapPeers, kb, ocrDB, lc) + + case types.OCR2Keeper: + return d.newServicesOCR2Keepers(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger) + + case types.Functions: + const ( + _ int32 = iota + thresholdPluginId + s4PluginId + ) + thresholdPluginDB := NewDB(d.db, spec.ID, thresholdPluginId, lggr, d.cfg.Database()) + s4PluginDB := NewDB(d.db, spec.ID, s4PluginId, lggr, d.cfg.Database()) + return d.newServicesOCR2Functions(lggr, jb, bootstrapPeers, kb, ocrDB, thresholdPluginDB, s4PluginDB, lc, ocrLogger) + + case types.GenericPlugin: + return d.newServicesGenericPlugin(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, d.capabilitiesRegistry) + + default: + return nil, errors.Errorf("plugin type %s not supported", spec.PluginType) + } +} + +func GetEVMEffectiveTransmitterID(jb *job.Job, chain legacyevm.Chain, lggr logger.SugaredLogger) (string, error) { + spec := jb.OCR2OracleSpec + if spec.PluginType == types.Mercury { + return spec.TransmitterID.String, nil + } + + if spec.RelayConfig["sendingKeys"] == nil { + spec.RelayConfig["sendingKeys"] = []string{spec.TransmitterID.String} + } else if !spec.TransmitterID.Valid { + sendingKeys, err := job.SendingKeysForJob(jb) + if err != nil { + return "", err + } + if len(sendingKeys) > 1 && spec.PluginType != types.OCR2VRF { + return "", errors.New("only ocr2 vrf should have more than 1 sending key") + } + spec.TransmitterID = null.StringFrom(sendingKeys[0]) + } + + // effectiveTransmitterID is the transmitter address registered on the ocr contract. This is by default the EOA account on the node. + // In the case of forwarding, the transmitter address is the forwarder contract deployed onchain between EOA and OCR contract. + // ForwardingAllowed cannot be set with Mercury, so this should always be false for mercury jobs + if jb.ForwardingAllowed { + if chain == nil { + return "", fmt.Errorf("job forwarding requires non-nil chain") + } + effectiveTransmitterID, err := chain.TxManager().GetForwarderForEOA(common.HexToAddress(spec.TransmitterID.String)) + if err == nil { + return effectiveTransmitterID.String(), nil + } else if !spec.TransmitterID.Valid { + return "", errors.New("failed to get forwarder address and transmitterID is not set") + } + lggr.Warnw("Skipping forwarding for job, will fallback to default behavior", "job", jb.Name, "err", err) + // this shouldn't happen unless behaviour above was changed + } + + return spec.TransmitterID.String, nil +} + +type connProvider interface { + ClientConn() grpc.ClientConnInterface +} + +func (d *Delegate) newServicesGenericPlugin( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, + capabilitiesRegistry types.CapabilitiesRegistry, +) (srvs []job.ServiceCtx, err error) { + spec := jb.OCR2OracleSpec + + // NOTE: we don't need to validate this config, since that happens as part of creating the job. + // See: validate/validate.go's `validateSpec`. + pCfg := validate.OCR2GenericPluginConfig{} + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pCfg) + if err != nil { + return nil, err + } + + plugEnv := env.NewPlugin(pCfg.PluginName) + + command := pCfg.Command + if command == "" { + command = plugEnv.Cmd.Get() + } + + // Add the default pipeline to the pluginConfig + pCfg.Pipelines = append( + pCfg.Pipelines, + validate.PipelineSpec{Name: "__DEFAULT_PIPELINE__", Spec: jb.Pipeline.Source}, + ) + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{PluginName: pCfg.PluginName, Err: err} + } + + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: pCfg.PluginName} + } + + provider, err := relayer.NewPluginProvider(ctx, types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: spec.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: pCfg.ProviderType, + }, types.PluginArgs{ + TransmitterID: spec.TransmitterID.String, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err != nil { + return nil, err + } + srvs = append(srvs, provider) + + envVars, err := plugins.ParseEnvFile(plugEnv.Env.Get()) + if err != nil { + return nil, fmt.Errorf("failed to parse median env file: %w", err) + } + if len(pCfg.EnvVars) > 0 { + for k, v := range pCfg.EnvVars { + envVars = append(envVars, k+"="+v) + } + } + + pluginLggr := lggr.Named(pCfg.PluginName).Named(spec.ContractID).Named(spec.GetID()) + cmdFn, grpcOpts, err := d.cfg.RegisterLOOP(plugins.CmdConfig{ + ID: fmt.Sprintf("%s-%s-%s", pCfg.PluginName, spec.ContractID, spec.GetID()), + Cmd: command, + Env: envVars, + }) + if err != nil { + return nil, fmt.Errorf("failed to register loop: %w", err) + } + + errorLog := &errorLog{jobID: jb.ID, recordError: d.jobORM.RecordError} + var providerClientConn grpc.ClientConnInterface + providerConn, ok := provider.(connProvider) + if ok { + providerClientConn = providerConn.ClientConn() + } else { + //We chose to deal with the difference between a LOOP provider and an embedded provider here rather than + //in NewServerAdapter because this has a smaller blast radius, as the scope of this workaround is to + //enable the medianpoc for EVM and not touch the other providers. + //TODO: remove this workaround when the EVM relayer is running inside of an LOOPP + d.lggr.Info("provider is not a LOOPP provider, switching to provider server") + + ps, err2 := relay.NewProviderServer(provider, types.OCR2PluginType(pCfg.ProviderType), d.lggr) + if err2 != nil { + return nil, fmt.Errorf("cannot start EVM provider server: %s", err2) + } + providerClientConn, err2 = ps.GetConn() + if err2 != nil { + return nil, fmt.Errorf("cannot connect to EVM provider server: %s", err) + } + srvs = append(srvs, ps) + } + + pc, err := json.Marshal(pCfg.Config) + if err != nil { + return nil, fmt.Errorf("cannot dump plugin config to string before sending to plugin: %s", err) + } + + pluginConfig := types.ReportingPluginServiceConfig{ + PluginName: pCfg.PluginName, + Command: command, + ProviderType: pCfg.ProviderType, + TelemetryType: pCfg.TelemetryType, + PluginConfig: string(pc), + } + + pr := generic.NewPipelineRunnerAdapter(pluginLggr, jb, d.pipelineRunner) + ta := generic.NewTelemetryAdapter(d.monitoringEndpointGen) + + oracleEndpoint := d.monitoringEndpointGen.GenMonitoringEndpoint( + rid.Network, + rid.ChainID, + spec.ContractID, + synchronization.TelemetryType(pCfg.TelemetryType), + ) + + switch pCfg.OCRVersion { + case 2: + plugin := reportingplugins.NewLOOPPService(pluginLggr, grpcOpts, cmdFn, pluginConfig, providerClientConn, pr, ta, errorLog) + oracleArgs := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: oracleEndpoint, + OffchainKeyring: kb, + OnchainKeyring: kb, + ContractTransmitter: provider.ContractTransmitter(), + ContractConfigTracker: provider.ContractConfigTracker(), + OffchainConfigDigester: provider.OffchainConfigDigester(), + } + oracleArgs.ReportingPluginFactory = plugin + srvs = append(srvs, plugin) + oracle, err := libocr2.NewOracle(oracleArgs) + if err != nil { + return nil, err + } + srvs = append(srvs, job.NewServiceAdapter(oracle)) + + case 3: + //OCR3 with OCR2 OnchainKeyring and ContractTransmitter + plugin := ocr3.NewLOOPPService(pluginLggr, grpcOpts, cmdFn, pluginConfig, providerClientConn, pr, ta, errorLog, capabilitiesRegistry) + contractTransmitter := ocrcommon.NewOCR3ContractTransmitterAdapter(provider.ContractTransmitter()) + oracleArgs := libocr2.OCR3OracleArgs[[]byte]{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractConfigTracker: provider.ContractConfigTracker(), + ContractTransmitter: contractTransmitter, + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: oracleEndpoint, + OffchainConfigDigester: provider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: ocrcommon.NewOCR3OnchainKeyringAdapter(kb), + } + oracleArgs.ReportingPluginFactory = plugin + srvs = append(srvs, plugin) + oracle, err := libocr2.NewOracle(oracleArgs) + if err != nil { + return nil, err + } + srvs = append(srvs, job.NewServiceAdapter(oracle)) + + default: + return nil, fmt.Errorf("unknown OCR version: %d", pCfg.OCRVersion) + } + + return srvs, nil +} + +func (d *Delegate) newServicesMercury( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + if jb.OCR2OracleSpec.FeedID == nil || (*jb.OCR2OracleSpec.FeedID == (common.Hash{})) { + return nil, errors.Errorf("ServicesForSpec: mercury job type requires feedID") + } + spec := jb.OCR2OracleSpec + transmitterID := spec.TransmitterID.String + if len(transmitterID) != 64 { + return nil, errors.Errorf("ServicesForSpec: mercury job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + if _, err := hex.DecodeString(transmitterID); err != nil { + return nil, errors.Wrapf(err, "ServicesForSpec: mercury job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID) + } + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "mercury"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("mercury services: expected EVM relayer got %s", rid.Network) + } + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "mercury"} + } + + provider, err2 := relayer.NewPluginProvider(ctx, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(spec.PluginType), + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, err2 + } + + mercuryProvider, ok := provider.(types.MercuryProvider) + if !ok { + return nil, errors.New("could not coerce PluginProvider to MercuryProvider") + } + + oracleArgsNoPlugin := libocr2.MercuryOracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: mercuryProvider.ContractTransmitter(), + ContractConfigTracker: mercuryProvider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.OCR3Mercury), + OffchainConfigDigester: mercuryProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + } + + chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 100) + + mCfg := mercury.NewMercuryConfig(d.cfg.JobPipeline().MaxSuccessfulRuns(), d.cfg.JobPipeline().ResultWriteQueueDepth(), d.cfg) + + mercuryServices, err2 := mercury.NewServices(jb, mercuryProvider, d.pipelineRunner, lggr, oracleArgsNoPlugin, mCfg, chEnhancedTelem, d.mercuryORM, (mercuryutils.FeedID)(*spec.FeedID)) + + if ocrcommon.ShouldCollectEnhancedTelemetryMercury(jb) { + enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, chEnhancedTelem, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.EnhancedEAMercury), lggr.Named("EnhancedTelemetryMercury")) + mercuryServices = append(mercuryServices, enhancedTelemService) + } else { + lggr.Infow("Enhanced telemetry is disabled for mercury job", "job", jb.Name) + } + + return mercuryServices, err2 +} + +func (d *Delegate) newServicesMedian( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "median"} + } + + oracleArgsNoPlugin := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Median), + OffchainKeyring: kb, + OnchainKeyring: kb, + } + errorLog := &errorLog{jobID: jb.ID, recordError: d.jobORM.RecordError} + enhancedTelemChan := make(chan ocrcommon.EnhancedTelemetryData, 100) + mConfig := median.NewMedianConfig( + d.cfg.JobPipeline().MaxSuccessfulRuns(), + d.cfg.JobPipeline().ResultWriteQueueDepth(), + d.cfg, + ) + + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, PluginName: "median", Relay: spec.Relay} + } + + medianServices, err2 := median.NewMedianServices(ctx, jb, d.isNewlyCreatedJob, relayer, d.pipelineRunner, lggr, oracleArgsNoPlugin, mConfig, enhancedTelemChan, errorLog) + + if ocrcommon.ShouldCollectEnhancedTelemetry(&jb) { + enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, enhancedTelemChan, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.EnhancedEA), lggr.Named("EnhancedTelemetry")) + medianServices = append(medianServices, enhancedTelemService) + } else { + lggr.Infow("Enhanced telemetry is disabled for job", "job", jb.Name) + } + + return medianServices, err2 +} + +func (d *Delegate) newServicesDKG( + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "DKG"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("DKG services: expected EVM relayer got %s", rid.Network) + } + + chain, err2 := d.legacyChains.Get(rid.ChainID) + if err2 != nil { + return nil, fmt.Errorf("DKG services: failed to get chain %s: %w", rid.ChainID, err2) + } + ocr2vrfRelayer := evmrelay.NewOCR2VRFRelayer(d.db, chain, lggr.Named("OCR2VRFRelayer"), d.ethKs) + dkgProvider, err2 := ocr2vrfRelayer.NewDKGProvider( + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + }, types.PluginArgs{ + TransmitterID: spec.TransmitterID.String, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, err2 + } + noopMonitoringEndpoint := telemetry.NoopAgent{} + oracleArgsNoPlugin := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: dkgProvider.ContractTransmitter(), + ContractConfigTracker: dkgProvider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + // Telemetry ingress for DKG is currently not supported so a noop monitoring endpoint is being used + MonitoringEndpoint: &noopMonitoringEndpoint, + OffchainConfigDigester: dkgProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + } + return dkg.NewDKGServices( + jb, + dkgProvider, + lggr, + ocrLogger, + d.dkgSignKs, + d.dkgEncryptKs, + chain.Client(), + oracleArgsNoPlugin, + d.db, + d.cfg.Database(), + chain.ID(), + spec.Relay, + ) +} + +func (d *Delegate) newServicesOCR2VRF( + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, +) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "VRF"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("VRF services: expected EVM relayer got %s", rid.Network) + } + chain, err2 := d.legacyChains.Get(rid.ChainID) + if err2 != nil { + return nil, fmt.Errorf("VRF services: failed to get chain (%s): %w", rid.ChainID, err2) + } + if jb.ForwardingAllowed != chain.Config().EVM().Transactions().ForwardersEnabled() { + return nil, errors.New("transaction forwarding settings must be consistent for ocr2vrf") + } + + var cfg ocr2vrfconfig.PluginConfig + err2 = json.Unmarshal(spec.PluginConfig.Bytes(), &cfg) + if err2 != nil { + return nil, errors.Wrap(err2, "unmarshal ocr2vrf plugin config") + } + + err2 = ocr2vrfconfig.ValidatePluginConfig(cfg, d.dkgSignKs, d.dkgEncryptKs) + if err2 != nil { + return nil, errors.Wrap(err2, "validate ocr2vrf plugin config") + } + + ocr2vrfRelayer := evmrelay.NewOCR2VRFRelayer(d.db, chain, lggr.Named("OCR2VRFRelayer"), d.ethKs) + transmitterID := spec.TransmitterID.String + + vrfProvider, err2 := ocr2vrfRelayer.NewOCR2VRFProvider( + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, errors.Wrap(err2, "new vrf provider") + } + + dkgProvider, err2 := ocr2vrfRelayer.NewDKGProvider( + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: cfg.DKGContractAddress, + RelayConfig: spec.RelayConfig.Bytes(), + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err2 != nil { + return nil, errors.Wrap(err2, "new dkg provider") + } + + dkgContract, err2 := dkg.NewOnchainDKGClient(cfg.DKGContractAddress, chain.Client()) + if err2 != nil { + return nil, errors.Wrap(err2, "new onchain dkg client") + } + + timeout := 5 * time.Second + interval := 60 * time.Second + juelsLogger := lggr.Named("JuelsFeeCoin").With("contract", cfg.LinkEthFeedAddress, "timeout", timeout, "interval", interval) + juelsPerFeeCoin, err2 := juelsfeecoin.NewLinkEthPriceProvider( + common.HexToAddress(cfg.LinkEthFeedAddress), chain.Client(), timeout, interval, juelsLogger) + if err2 != nil { + return nil, errors.Wrap(err2, "new link eth price provider") + } + + reasonableGasPrice := reasonablegasprice.NewReasonableGasPriceProvider( + chain.GasEstimator(), + timeout, + chain.Config().EVM().GasEstimator().PriceMax(), + chain.Config().EVM().GasEstimator().EIP1559DynamicFees(), + ) + + encryptionSecretKey, err2 := d.dkgEncryptKs.Get(cfg.DKGEncryptionPublicKey) + if err2 != nil { + return nil, errors.Wrap(err2, "get DKG encryption key") + } + signingSecretKey, err2 := d.dkgSignKs.Get(cfg.DKGSigningPublicKey) + if err2 != nil { + return nil, errors.Wrap(err2, "get DKG signing key") + } + keyID, err2 := dkg.DecodeKeyID(cfg.DKGKeyID) + if err2 != nil { + return nil, errors.Wrap(err2, "decode DKG key ID") + } + + coordinator, err2 := ocr2coordinator.New( + lggr.Named("OCR2VRFCoordinator"), + common.HexToAddress(spec.ContractID), + common.HexToAddress(cfg.VRFCoordinatorAddress), + common.HexToAddress(cfg.DKGContractAddress), + chain.Client(), + chain.LogPoller(), + chain.Config().EVM().FinalityDepth(), + ) + if err2 != nil { + return nil, errors.Wrap(err2, "create ocr2vrf coordinator") + } + l := lggr.Named("OCR2VRF").With( + "jobName", jb.Name.ValueOrZero(), + "jobID", jb.ID, + ) + vrfLogger := commonlogger.NewOCRWrapper(l.With( + "vrfContractID", spec.ContractID), d.cfg.OCR2().TraceLogging(), func(msg string) { + lggr.ErrorIf(d.jobORM.RecordError(jb.ID, msg), "unable to record error") + }) + dkgLogger := commonlogger.NewOCRWrapper(l.With( + "dkgContractID", cfg.DKGContractAddress), d.cfg.OCR2().TraceLogging(), func(msg string) { + lggr.ErrorIf(d.jobORM.RecordError(jb.ID, msg), "unable to record error") + }) + dkgReportingPluginFactoryDecorator := func(wrapped ocrtypes.ReportingPluginFactory) ocrtypes.ReportingPluginFactory { + return promwrapper.NewPromFactory(wrapped, "DKG", string(relay.EVM), chain.ID()) + } + vrfReportingPluginFactoryDecorator := func(wrapped ocrtypes.ReportingPluginFactory) ocrtypes.ReportingPluginFactory { + return promwrapper.NewPromFactory(wrapped, "OCR2VRF", string(relay.EVM), chain.ID()) + } + noopMonitoringEndpoint := telemetry.NoopAgent{} + oracles, err2 := ocr2vrf.NewOCR2VRF(ocr2vrf.DKGVRFArgs{ + VRFLogger: vrfLogger, + DKGLogger: dkgLogger, + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + OffchainKeyring: kb, + OnchainKeyring: kb, + VRFOffchainConfigDigester: vrfProvider.OffchainConfigDigester(), + VRFContractConfigTracker: vrfProvider.ContractConfigTracker(), + VRFContractTransmitter: vrfProvider.ContractTransmitter(), + VRFDatabase: ocrDB, + VRFLocalConfig: lc, + VRFMonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2VRF), + DKGContractConfigTracker: dkgProvider.ContractConfigTracker(), + DKGOffchainConfigDigester: dkgProvider.OffchainConfigDigester(), + DKGContract: dkgpkg.NewOnchainContract(dkgContract, &altbn_128.G2{}), + DKGContractTransmitter: dkgProvider.ContractTransmitter(), + DKGDatabase: ocrDB, + DKGLocalConfig: lc, + // Telemetry ingress for DKG is currently not supported so a noop monitoring endpoint is being used + DKGMonitoringEndpoint: &noopMonitoringEndpoint, + Serializer: reportserializer.NewReportSerializer(&altbn_128.G1{}), + JuelsPerFeeCoin: juelsPerFeeCoin, + ReasonableGasPrice: reasonableGasPrice, + Coordinator: coordinator, + Esk: encryptionSecretKey.KyberScalar(), + Ssk: signingSecretKey.KyberScalar(), + KeyID: keyID, + DKGReportingPluginFactoryDecorator: dkgReportingPluginFactoryDecorator, + VRFReportingPluginFactoryDecorator: vrfReportingPluginFactoryDecorator, + DKGSharePersistence: persistence.NewShareDB(d.db, lggr.Named("DKGShareDB"), d.cfg.Database(), chain.ID(), spec.Relay), + }) + if err2 != nil { + return nil, errors.Wrap(err2, "new ocr2vrf") + } + + // NOTE: we return from here with the services because the OCR2VRF oracles are defined + // and exported from the ocr2vrf library. It takes care of running the DKG and OCR2VRF + // oracles under the hood together. + oracleCtx := job.NewServiceAdapter(oracles) + return []job.ServiceCtx{vrfProvider, dkgProvider, oracleCtx}, nil +} + +func (d *Delegate) newServicesOCR2Keepers( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + var cfg ocr2keeper.PluginConfig + if err := json.Unmarshal(spec.PluginConfig.Bytes(), &cfg); err != nil { + return nil, errors.Wrap(err, "unmarshal ocr2keepers plugin config") + } + + if err := ocr2keeper.ValidatePluginConfig(cfg); err != nil { + return nil, errors.Wrap(err, "ocr2keepers plugin config validation failure") + } + + switch cfg.ContractVersion { + case "v2.1": + return d.newServicesOCR2Keepers21(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) + case "v2.0": + return d.newServicesOCR2Keepers20(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) + default: + return d.newServicesOCR2Keepers20(lggr, jb, bootstrapPeers, kb, ocrDB, lc, ocrLogger, cfg, spec) + } +} + +func (d *Delegate) newServicesOCR2Keepers21( + ctx context.Context, + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, + cfg ocr2keeper.PluginConfig, + spec *job.OCR2OracleSpec, +) ([]job.ServiceCtx, error) { + credName, err2 := jb.OCR2OracleSpec.PluginConfig.MercuryCredentialName() + if err2 != nil { + return nil, errors.Wrap(err2, "failed to get mercury credential name") + } + + mc := d.cfg.Mercury().Credentials(credName) + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "keeper2"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("keeper2 services: expected EVM relayer got %s", rid.Network) + } + + transmitterID := spec.TransmitterID.String + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "ocr2keepers"} + } + + provider, err := relayer.NewPluginProvider(ctx, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(spec.PluginType), + MercuryCredentials: mc, + }, types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err != nil { + return nil, err + } + + keeperProvider, ok := provider.(types.AutomationProvider) + if !ok { + return nil, errors.New("could not coerce PluginProvider to AutomationProvider") + } + + services, err := ocr2keeper.EVMDependencies21(kb) + if err != nil { + return nil, errors.Wrap(err, "could not build dependencies for ocr2 keepers") + } + // set some defaults + conf := ocr2keepers21config.ReportingFactoryConfig{ + CacheExpiration: ocr2keepers21config.DefaultCacheExpiration, + CacheEvictionInterval: ocr2keepers21config.DefaultCacheClearInterval, + MaxServiceWorkers: ocr2keepers21config.DefaultMaxServiceWorkers, + ServiceQueueLength: ocr2keepers21config.DefaultServiceQueueLength, + } + + // override if set in config + if cfg.CacheExpiration.Value() != 0 { + conf.CacheExpiration = cfg.CacheExpiration.Value() + } + + if cfg.CacheEvictionInterval.Value() != 0 { + conf.CacheEvictionInterval = cfg.CacheEvictionInterval.Value() + } + + if cfg.MaxServiceWorkers != 0 { + conf.MaxServiceWorkers = cfg.MaxServiceWorkers + } + + if cfg.ServiceQueueLength != 0 { + conf.ServiceQueueLength = cfg.ServiceQueueLength + } + + dConf := ocr2keepers21.DelegateConfig{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: evmrelay.NewKeepersOCR3ContractTransmitter(keeperProvider.ContractTransmitter()), + ContractConfigTracker: keeperProvider.ContractConfigTracker(), + KeepersDatabase: ocrDB, + Logger: ocrLogger, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR3Automation), + OffchainConfigDigester: keeperProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: services.Keyring(), + LocalConfig: lc, + LogProvider: keeperProvider.LogEventProvider(), + EventProvider: keeperProvider.TransmitEventProvider(), + Runnable: keeperProvider.Registry(), + Encoder: keeperProvider.Encoder(), + BlockSubscriber: keeperProvider.BlockSubscriber(), + RecoverableProvider: keeperProvider.LogRecoverer(), + PayloadBuilder: keeperProvider.PayloadBuilder(), + UpkeepProvider: keeperProvider.UpkeepProvider(), + UpkeepStateUpdater: keeperProvider.UpkeepStateStore(), + UpkeepTypeGetter: ocr2keeper21core.GetUpkeepType, + WorkIDGenerator: ocr2keeper21core.UpkeepWorkID, + // TODO: Clean up the config + CacheExpiration: cfg.CacheExpiration.Value(), + CacheEvictionInterval: cfg.CacheEvictionInterval.Value(), + MaxServiceWorkers: cfg.MaxServiceWorkers, + ServiceQueueLength: cfg.ServiceQueueLength, + } + + pluginService, err := ocr2keepers21.NewDelegate(dConf) + if err != nil { + return nil, errors.Wrap(err, "could not create new keepers ocr2 delegate") + } + + automationServices := []job.ServiceCtx{ + keeperProvider, + keeperProvider.Registry(), + keeperProvider.BlockSubscriber(), + keeperProvider.LogEventProvider(), + keeperProvider.LogRecoverer(), + keeperProvider.UpkeepStateStore(), + keeperProvider.TransmitEventProvider(), + pluginService, + } + + if cfg.CaptureAutomationCustomTelemetry != nil && *cfg.CaptureAutomationCustomTelemetry || + cfg.CaptureAutomationCustomTelemetry == nil && d.cfg.OCR2().CaptureAutomationCustomTelemetry() { + endpoint := d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.AutomationCustom) + customTelemService, custErr := autotelemetry21.NewAutomationCustomTelemetryService( + endpoint, + lggr, + keeperProvider.BlockSubscriber(), + keeperProvider.ContractConfigTracker(), + ) + if custErr != nil { + return nil, errors.Wrap(custErr, "Error when creating AutomationCustomTelemetryService") + } + automationServices = append(automationServices, customTelemService) + } + + return automationServices, nil +} + +func (d *Delegate) newServicesOCR2Keepers20( + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + ocrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, + cfg ocr2keeper.PluginConfig, + spec *job.OCR2OracleSpec, +) ([]job.ServiceCtx, error) { + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "keepers2.0"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("keepers2.0 services: expected EVM relayer got %s", rid.Network) + } + chain, err2 := d.legacyChains.Get(rid.ChainID) + if err2 != nil { + return nil, fmt.Errorf("keepers2.0 services: failed to get chain (%s): %w", rid.ChainID, err2) + } + + keeperProvider, rgstry, encoder, logProvider, err2 := ocr2keeper.EVMDependencies20(jb, d.db, lggr, chain, d.ethKs, d.cfg.Database()) + if err2 != nil { + return nil, errors.Wrap(err2, "could not build dependencies for ocr2 keepers") + } + + w := &logWriter{log: lggr.Named("Automation Dependencies")} + + // set some defaults + conf := ocr2keepers20config.ReportingFactoryConfig{ + CacheExpiration: ocr2keepers20config.DefaultCacheExpiration, + CacheEvictionInterval: ocr2keepers20config.DefaultCacheClearInterval, + MaxServiceWorkers: ocr2keepers20config.DefaultMaxServiceWorkers, + ServiceQueueLength: ocr2keepers20config.DefaultServiceQueueLength, + } + + // override if set in config + if cfg.CacheExpiration.Value() != 0 { + conf.CacheExpiration = cfg.CacheExpiration.Value() + } + + if cfg.CacheEvictionInterval.Value() != 0 { + conf.CacheEvictionInterval = cfg.CacheEvictionInterval.Value() + } + + if cfg.MaxServiceWorkers != 0 { + conf.MaxServiceWorkers = cfg.MaxServiceWorkers + } + + if cfg.ServiceQueueLength != 0 { + conf.ServiceQueueLength = cfg.ServiceQueueLength + } + + runr, err2 := ocr2keepers20runner.NewRunner( + log.New(w, "[automation-plugin-runner] ", log.Lshortfile), + rgstry, + encoder, + conf.MaxServiceWorkers, + conf.ServiceQueueLength, + conf.CacheExpiration, + conf.CacheEvictionInterval, + ) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to create automation pipeline runner") + } + + condObs := &ocr2keepers20polling.PollingObserverFactory{ + Logger: log.New(w, "[automation-plugin-conditional-observer] ", log.Lshortfile), + Source: rgstry, + Heads: rgstry, + Runner: runr, + Encoder: encoder, + } + + coord := &ocr2keepers20coordinator.CoordinatorFactory{ + Logger: log.New(w, "[automation-plugin-coordinator] ", log.Lshortfile), + Encoder: encoder, + Logs: logProvider, + CacheClean: conf.CacheEvictionInterval, + } + + dConf := ocr2keepers20.DelegateConfig{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: keeperProvider.ContractTransmitter(), + ContractConfigTracker: keeperProvider.ContractConfigTracker(), + KeepersDatabase: ocrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Automation), + OffchainConfigDigester: keeperProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + ConditionalObserverFactory: condObs, + CoordinatorFactory: coord, + Encoder: encoder, + Runner: runr, + // the following values are not needed in the delegate config anymore + CacheExpiration: cfg.CacheExpiration.Value(), + CacheEvictionInterval: cfg.CacheEvictionInterval.Value(), + MaxServiceWorkers: cfg.MaxServiceWorkers, + ServiceQueueLength: cfg.ServiceQueueLength, + } + + pluginService, err := ocr2keepers20.NewDelegate(dConf) + if err != nil { + return nil, errors.Wrap(err, "could not create new keepers ocr2 delegate") + } + + return []job.ServiceCtx{ + job.NewServiceAdapter(runr), + keeperProvider, + rgstry, + logProvider, + pluginService, + }, nil +} + +func (d *Delegate) newServicesOCR2Functions( + lggr logger.SugaredLogger, + jb job.Job, + bootstrapPeers []commontypes.BootstrapperLocator, + kb ocr2key.KeyBundle, + functionsOcrDB *db, + thresholdOcrDB *db, + s4OcrDB *db, + lc ocrtypes.LocalConfig, + ocrLogger commontypes.Logger, +) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + + rid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "functions"} + } + if rid.Network != relay.EVM { + return nil, fmt.Errorf("functions services: expected EVM relayer got %s", rid.Network) + } + chain, err := d.legacyChains.Get(rid.ChainID) + if err != nil { + return nil, fmt.Errorf("functions services: failed to get chain %s: %w", rid.ChainID, err) + } + createPluginProvider := func(pluginType functionsRelay.FunctionsPluginType, relayerName string) (evmrelaytypes.FunctionsProvider, error) { + return evmrelay.NewFunctionsProvider( + chain, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + RelayConfig: spec.RelayConfig.Bytes(), + New: d.isNewlyCreatedJob, + }, + types.PluginArgs{ + TransmitterID: spec.TransmitterID.String, + PluginConfig: spec.PluginConfig.Bytes(), + }, + lggr.Named(relayerName), + d.ethKs, + pluginType, + ) + } + + functionsProvider, err := createPluginProvider(functionsRelay.FunctionsPlugin, "FunctionsRelayer") + if err != nil { + return nil, err + } + + thresholdProvider, err := createPluginProvider(functionsRelay.ThresholdPlugin, "FunctionsThresholdRelayer") + if err != nil { + return nil, err + } + + s4Provider, err := createPluginProvider(functionsRelay.S4Plugin, "FunctionsS4Relayer") + if err != nil { + return nil, err + } + + functionsOracleArgs := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: functionsProvider.ContractTransmitter(), + ContractConfigTracker: functionsProvider.ContractConfigTracker(), + Database: functionsOcrDB, + LocalConfig: lc, + Logger: ocrLogger, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.OCR2Functions), + OffchainConfigDigester: functionsProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + ReportingPluginFactory: nil, // To be set by NewFunctionsServices + } + + noopMonitoringEndpoint := telemetry.NoopAgent{} + + thresholdOracleArgs := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: thresholdProvider.ContractTransmitter(), + ContractConfigTracker: thresholdProvider.ContractConfigTracker(), + Database: thresholdOcrDB, + LocalConfig: lc, + Logger: ocrLogger, + // Telemetry ingress for OCR2Threshold is currently not supported so a noop monitoring endpoint is being used + MonitoringEndpoint: &noopMonitoringEndpoint, + OffchainConfigDigester: thresholdProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + ReportingPluginFactory: nil, // To be set by NewFunctionsServices + } + + s4OracleArgs := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: s4Provider.ContractTransmitter(), + ContractConfigTracker: s4Provider.ContractConfigTracker(), + Database: s4OcrDB, + LocalConfig: lc, + Logger: ocrLogger, + // Telemetry ingress for OCR2S4 is currently not supported so a noop monitoring endpoint is being used + MonitoringEndpoint: &noopMonitoringEndpoint, + OffchainConfigDigester: s4Provider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + ReportingPluginFactory: nil, // To be set by NewFunctionsServices + } + + encryptedThresholdKeyShare := d.cfg.Threshold().ThresholdKeyShare() + var thresholdKeyShare []byte + if len(encryptedThresholdKeyShare) > 0 { + encryptedThresholdKeyShareBytes, err2 := hex.DecodeString(encryptedThresholdKeyShare) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to decode ThresholdKeyShare hex string") + } + thresholdKeyShare, err2 = kb.NaclBoxOpenAnonymous(encryptedThresholdKeyShareBytes) + if err2 != nil { + return nil, errors.Wrap(err2, "failed to decrypt ThresholdKeyShare") + } + } + + functionsServicesConfig := functions.FunctionsServicesConfig{ + Job: jb, + JobORM: d.jobORM, + BridgeORM: d.bridgeORM, + QConfig: d.cfg.Database(), + DB: d.db, + Chain: chain, + ContractID: spec.ContractID, + Logger: lggr, + MailMon: d.mailMon, + URLsMonEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.ContractID, synchronization.FunctionsRequests), + EthKeystore: d.ethKs, + ThresholdKeyShare: thresholdKeyShare, + LogPollerWrapper: functionsProvider.LogPollerWrapper(), + } + + functionsServices, err := functions.NewFunctionsServices(&functionsOracleArgs, &thresholdOracleArgs, &s4OracleArgs, &functionsServicesConfig) + if err != nil { + return nil, errors.Wrap(err, "error calling NewFunctionsServices") + } + + return append([]job.ServiceCtx{functionsProvider, thresholdProvider, s4Provider}, functionsServices...), nil +} + +// errorLog implements [loop.ErrorLog] +type errorLog struct { + jobID int32 + recordError func(jobID int32, description string, qopts ...pg.QOpt) error +} + +func (l *errorLog) SaveError(ctx context.Context, msg string) error { + return l.recordError(l.jobID, msg) +} + +type logWriter struct { + log logger.Logger +} + +func (l *logWriter) Write(p []byte) (n int, err error) { + l.log.Debug(string(p), nil) + n = len(p) + return +} diff --git a/core/services/ocr2/delegate_test.go b/core/services/ocr2/delegate_test.go new file mode 100644 index 00000000..d6634349 --- /dev/null +++ b/core/services/ocr2/delegate_test.go @@ -0,0 +1,185 @@ +package ocr2_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/types" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2" + ocr2validate "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" +) + +func TestGetEVMEffectiveTransmitterID(t *testing.T) { + customChainID := big.New(testutils.NewRandomEVMChainID()) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + enabled := true + c.EVM = append(c.EVM, &evmcfg.EVMConfig{ + ChainID: customChainID, + Chain: evmcfg.Defaults(customChainID), + Enabled: &enabled, + Nodes: evmcfg.EVMNodes{{}}, + }) + }) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + require.NoError(t, keyStore.OCR2().Add(cltest.DefaultOCR2Key)) + lggr := logger.TestLogger(t) + + txManager := txmmocks.NewMockEvmTxManager(t) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: config, KeyStore: keyStore.Eth(), TxManager: txManager}) + require.True(t, relayExtenders.Len() > 0) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + + type testCase struct { + name string + pluginType types.OCR2PluginType + transmitterID null.String + sendingKeys []any + expectedError bool + expectedTransmitterID string + forwardingEnabled bool + getForwarderForEOAArg common.Address + getForwarderForEOAErr bool + } + + setTestCase := func(jb *job.Job, tc testCase, txManager *txmmocks.MockEvmTxManager) { + jb.OCR2OracleSpec.PluginType = tc.pluginType + jb.OCR2OracleSpec.TransmitterID = tc.transmitterID + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = tc.sendingKeys + jb.ForwardingAllowed = tc.forwardingEnabled + + if tc.forwardingEnabled && tc.getForwarderForEOAErr { + txManager.Mock.On("GetForwarderForEOA", tc.getForwarderForEOAArg).Return(common.HexToAddress("0x0"), errors.New("random error")).Once() + } else if tc.forwardingEnabled { + txManager.Mock.On("GetForwarderForEOA", tc.getForwarderForEOAArg).Return(common.HexToAddress(tc.expectedTransmitterID), nil).Once() + } + } + + testCases := []testCase{ + { + name: "mercury plugin should just return transmitterID", + pluginType: types.Mercury, + transmitterID: null.StringFrom("Mercury transmitterID"), + expectedTransmitterID: "Mercury transmitterID", + }, + { + name: "when transmitterID is not defined, it should validate that sending keys are defined", + sendingKeys: []any{}, + expectedError: true, + }, + { + name: "when transmitterID is not defined, it should validate that plugin type is ocr2 vrf if more than 1 sending key is defined", + sendingKeys: []any{"0x7e57000000000000000000000000000000000001", "0x7e57000000000000000000000000000000000002", "0x7e57000000000000000000000000000000000003"}, + expectedError: true, + }, + { + name: "when transmitterID is not defined and plugin is ocr2vrf, it should allow>1 sendingKeys and set transmitterID to the first one", + pluginType: types.OCR2VRF, + sendingKeys: []any{"0x7e57000000000000000000000000000000000000", "0x7e57000000000000000000000000000000000001", "0x7e57000000000000000000000000000000000002"}, + expectedTransmitterID: "0x7e57000000000000000000000000000000000000", + }, + { + name: "when transmitterID is not defined, it should set transmitterID to first sendingKey", + sendingKeys: []any{"0x7e57000000000000000000000000000000000004"}, + expectedTransmitterID: "0x7e57000000000000000000000000000000000004", + }, + { + name: "when forwarders are enabled and when transmitterID is defined, it should default to using spec transmitterID to retrieve forwarder address", + forwardingEnabled: true, + transmitterID: null.StringFrom("0x7e57000000000000000000000000000000000001"), + getForwarderForEOAArg: common.HexToAddress("0x7e57000000000000000000000000000000000001"), + expectedTransmitterID: "0x7e58000000000000000000000000000000000000", + }, + { + name: "when forwarders are enabled and when transmitterID is not defined, it should use first sendingKey to retrieve forwarder address", + pluginType: types.OCR2VRF, + forwardingEnabled: true, + sendingKeys: []any{"0x7e57000000000000000000000000000000000001", "0x7e57000000000000000000000000000000000002"}, + getForwarderForEOAArg: common.HexToAddress("0x7e57000000000000000000000000000000000001"), + expectedTransmitterID: "0x7e58000000000000000000000000000000000000", + }, + { + name: "when forwarders are enabled but forwarder address fails to be retrieved and when transmitterID is not defined, it should default to using first sendingKey", + pluginType: types.OCR2VRF, + forwardingEnabled: true, + sendingKeys: []any{"0x7e57000000000000000000000000000000000001", "0x7e57000000000000000000000000000000000002"}, + getForwarderForEOAArg: common.HexToAddress("0x7e57000000000000000000000000000000000001"), + getForwarderForEOAErr: true, + expectedTransmitterID: "0x7e57000000000000000000000000000000000001", + }, + { + name: "when forwarders are enabled but forwarder address fails to be retrieved and when transmitterID is defined, it should default to using spec transmitterID", + forwardingEnabled: true, + transmitterID: null.StringFrom("0x7e57000000000000000000000000000000000003"), + getForwarderForEOAErr: true, + getForwarderForEOAArg: common.HexToAddress("0x7e57000000000000000000000000000000000003"), + expectedTransmitterID: "0x7e57000000000000000000000000000000000003", + }, + } + + t.Run("when sending keys are not defined, the first one should be set to transmitterID", func(t *testing.T) { + jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + jb.OCR2OracleSpec.TransmitterID = null.StringFrom("some transmitterID string") + jb.OCR2OracleSpec.RelayConfig["sendingKeys"] = nil + chain, err := legacyChains.Get(customChainID.String()) + require.NoError(t, err) + effectiveTransmitterID, err := ocr2.GetEVMEffectiveTransmitterID(&jb, chain, lggr) + require.NoError(t, err) + require.Equal(t, "some transmitterID string", effectiveTransmitterID) + require.Equal(t, []string{"some transmitterID string"}, jb.OCR2OracleSpec.RelayConfig["sendingKeys"].([]string)) + }) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + setTestCase(&jb, tc, txManager) + chain, err := legacyChains.Get(customChainID.String()) + require.NoError(t, err) + + effectiveTransmitterID, err := ocr2.GetEVMEffectiveTransmitterID(&jb, chain, lggr) + if tc.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedTransmitterID, effectiveTransmitterID) + // when forwarding is enabled effectiveTransmitter differs unless it failed to fetch forwarder address + if !jb.ForwardingAllowed { + require.Equal(t, jb.OCR2OracleSpec.TransmitterID.String, effectiveTransmitterID) + } + + }) + } + + t.Run("when forwarders are enabled and chain retrieval fails, error should be handled", func(t *testing.T) { + jb, err := ocr2validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), testspecs.GetOCR2EVMSpecMinimal()) + require.NoError(t, err) + jb.ForwardingAllowed = true + jb.OCR2OracleSpec.TransmitterID = null.StringFrom("0x7e57000000000000000000000000000000000001") + chain, err := legacyChains.Get("not an id") + require.Error(t, err) + _, err = ocr2.GetEVMEffectiveTransmitterID(&jb, chain, lggr) + require.Error(t, err) + }) +} diff --git a/core/services/ocr2/plugins/dkg/config/config.go b/core/services/ocr2/plugins/dkg/config/config.go new file mode 100644 index 00000000..527e5ffc --- /dev/null +++ b/core/services/ocr2/plugins/dkg/config/config.go @@ -0,0 +1,28 @@ +package config + +import ( + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +// PluginConfig contains custom arguments for the DKG plugin. +type PluginConfig struct { + EncryptionPublicKey string `json:"encryptionPublicKey"` + SigningPublicKey string `json:"signingPublicKey"` + KeyID string `json:"keyID"` +} + +// ValidatePluginConfig validates that the given DKG plugin configuration is correct. +func ValidatePluginConfig(config PluginConfig, dkgSignKs keystore.DKGSign, dkgEncryptKs keystore.DKGEncrypt) error { + _, err := dkgEncryptKs.Get(config.EncryptionPublicKey) + if err != nil { + return errors.Wrapf(err, "DKG encryption key: %s not found in key store", config.EncryptionPublicKey) + } + _, err = dkgSignKs.Get(config.SigningPublicKey) + if err != nil { + return errors.Wrapf(err, "DKG sign key: %s not found in key store", config.SigningPublicKey) + } + + return nil +} diff --git a/core/services/ocr2/plugins/dkg/config/config_test.go b/core/services/ocr2/plugins/dkg/config/config_test.go new file mode 100644 index 00000000..f048b9b9 --- /dev/null +++ b/core/services/ocr2/plugins/dkg/config/config_test.go @@ -0,0 +1,64 @@ +package config_test + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/config" +) + +func TestValidatePluginConfig(t *testing.T) { + t.Parallel() + + cfg := configtest.NewGeneralConfig(t, nil) + db := pgtest.NewSqlxDB(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + + dkgEncryptKey, err := kst.DKGEncrypt().Create() + require.NoError(t, err) + dkgSignKey, err := kst.DKGSign().Create() + require.NoError(t, err) + + encryptKeyBytes, err := dkgEncryptKey.PublicKey.MarshalBinary() + require.NoError(t, err) + encryptKey := hex.EncodeToString(encryptKeyBytes) + + signKeyBytes, err := dkgSignKey.PublicKey.MarshalBinary() + require.NoError(t, err) + signKey := hex.EncodeToString(signKeyBytes) + + pluginConfig := config.PluginConfig{ + EncryptionPublicKey: encryptKey, + SigningPublicKey: signKey, + } + t.Run("no error when keys are found", func(t *testing.T) { + err = config.ValidatePluginConfig(pluginConfig, kst.DKGSign(), kst.DKGEncrypt()) + require.NoError(t, err) + }) + + t.Run("error when encryption key not found", func(t *testing.T) { + pluginConfig = config.PluginConfig{ + EncryptionPublicKey: "wrongKey", + SigningPublicKey: signKey, + } + err = config.ValidatePluginConfig(pluginConfig, kst.DKGSign(), kst.DKGEncrypt()) + require.Error(t, err) + require.Contains(t, err.Error(), "DKG encryption key: wrongKey not found in key store") + }) + + t.Run("error when sign key not found", func(t *testing.T) { + pluginConfig = config.PluginConfig{ + EncryptionPublicKey: encryptKey, + SigningPublicKey: "wrongKey", + } + + err = config.ValidatePluginConfig(pluginConfig, kst.DKGSign(), kst.DKGEncrypt()) + require.Error(t, err) + require.Contains(t, err.Error(), "DKG sign key: wrongKey not found in key store") + }) +} diff --git a/core/services/ocr2/plugins/dkg/key_consumer.go b/core/services/ocr2/plugins/dkg/key_consumer.go new file mode 100644 index 00000000..792f16f4 --- /dev/null +++ b/core/services/ocr2/plugins/dkg/key_consumer.go @@ -0,0 +1,24 @@ +package dkg + +import ( + "encoding/hex" + "fmt" + + "github.com/goplugin/plugin-vrf/dkg" +) + +type dummyKeyConsumer struct{} + +func (d dummyKeyConsumer) KeyInvalidated(keyID dkg.KeyID) { + fmt.Println("KEY INVALIDATED:", hex.EncodeToString(keyID[:])) +} + +func (d dummyKeyConsumer) NewKey(keyID dkg.KeyID, data *dkg.KeyData) { + fmt.Println("NEW KEY FOR KEY ID:", hex.EncodeToString(keyID[:]), "KEY:", data) +} + +var _ dkg.KeyConsumer = dummyKeyConsumer{} + +func newDummyKeyConsumer() dummyKeyConsumer { + return dummyKeyConsumer{} +} diff --git a/core/services/ocr2/plugins/dkg/onchain_contract.go b/core/services/ocr2/plugins/dkg/onchain_contract.go new file mode 100644 index 00000000..0640fafd --- /dev/null +++ b/core/services/ocr2/plugins/dkg/onchain_contract.go @@ -0,0 +1,90 @@ +package dkg + +import ( + "context" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3/sign/anon" + + "github.com/goplugin/plugin-vrf/dkg" + dkgwrapper "github.com/goplugin/plugin-vrf/gethwrappers/dkg" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +type onchainContract struct { + wrapper *dkgwrapper.DKG + dkgAddress common.Address +} + +var _ dkg.DKG = &onchainContract{} + +func NewOnchainDKGClient(dkgAddress string, ethClient evmclient.Client) (dkg.DKG, error) { + dkgAddr := common.HexToAddress(dkgAddress) + wrapper, err := dkgwrapper.NewDKG(dkgAddr, ethClient) + if err != nil { + return nil, errors.Wrap(err, "new dkg wrapper") + } + return &onchainContract{ + wrapper: wrapper, + dkgAddress: dkgAddr, + }, nil +} + +func (o *onchainContract) GetKey( + ctx context.Context, + keyID dkg.KeyID, + configDigest [32]byte, +) (dkg.OnchainKeyData, error) { + keyData, err := o.wrapper.GetKey(&bind.CallOpts{ + Context: ctx, + }, keyID, configDigest) + if err != nil { + return dkg.OnchainKeyData{}, errors.Wrap(err, "wrapper GetKey") + } + return dkg.OnchainKeyData{ + PublicKey: keyData.PublicKey, + Hashes: keyData.Hashes, + }, nil +} + +func (o *onchainContract) Address() common.Address { + return o.dkgAddress +} + +func (o *onchainContract) CurrentCommittee(ctx context.Context) (ocr2vrftypes.OCRCommittee, error) { + // NOTE: this is only ever used in tests in the ocr2vrf repo. + // Since this isn't really used for production DKG running, + // there's no point in implementing it. + panic("unimplemented") +} + +func (o *onchainContract) InitiateDKG( + ctx context.Context, + committee ocr2vrftypes.OCRCommittee, + f ocr2vrftypes.PlayerIdxInt, + keyID dkg.KeyID, + epks dkg.EncryptionPublicKeys, + spks dkg.SigningPublicKeys, + encGroup anon.Suite, + translator ocr2vrftypes.PubKeyTranslation, +) error { + // NOTE: this is only ever used in tests, the idea here is to call setConfig + // on the DKG contract to get the OCR process going. Since this isn't really + // used for production DKG running, there's no point in implementing it. + panic("unimplemented") +} + +func (o *onchainContract) AddClient( + ctx context.Context, + keyID [32]byte, + clientAddress common.Address, +) error { + // NOTE: this is only ever used in tests in the ocr2vrf repo. + // Since this isn't really used for production DKG running, + // there's no point in implementing it. + panic("unimplemented") +} diff --git a/core/services/ocr2/plugins/dkg/persistence/db.go b/core/services/ocr2/plugins/dkg/persistence/db.go new file mode 100644 index 00000000..e41c7e2b --- /dev/null +++ b/core/services/ocr2/plugins/dkg/persistence/db.go @@ -0,0 +1,224 @@ +package persistence + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + "github.com/goplugin/plugin-vrf/types/hash" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +var ( + _ ocr2vrftypes.DKGSharePersistence = &shareDB{} + zeroHash hash.Hash + buckets = []float64{ + float64(100 * time.Millisecond), + float64(200 * time.Millisecond), + float64(500 * time.Millisecond), + float64(1 * time.Second), + float64(2 * time.Second), + float64(5 * time.Second), + float64(10 * time.Second), + float64(30 * time.Second), + } + labels = []string{ + "chainType", "chainID", + } + promWriteShareRecords = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "write_share_records_time", + Help: "The duration of the DKG WriteShareRecords call", + Buckets: buckets, + }, labels) + promReadShareRecords = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "read_share_records_time", + Help: "The duration of the DKG ReadShareRecords call.", + Buckets: buckets, + }, labels) +) + +type shareDB struct { + q pg.Q + lggr logger.Logger + chainID *big.Int + chainType string +} + +// NewShareDB creates a new DKG share database. +func NewShareDB(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, chainID *big.Int, chainType relay.Network) ocr2vrftypes.DKGSharePersistence { + return &shareDB{ + q: pg.NewQ(db, lggr, cfg), + lggr: lggr, + chainID: chainID, + chainType: chainType, + } +} + +// WriteShareRecords writes the provided (already encrypted) +// share records to the Plugin database. +func (s *shareDB) WriteShareRecords( + ctx context.Context, + cfgDgst ocrtypes.ConfigDigest, + keyID [32]byte, + shareRecords []ocr2vrftypes.PersistentShareSetRecord, +) error { + lggr := s.lggr.With( + "configDigest", hexutil.Encode(cfgDgst[:]), + "keyID", hexutil.Encode(keyID[:])) + + start := time.Now() + defer func() { + duration := time.Since(start) + promWriteShareRecords.WithLabelValues(s.chainType, s.chainID.String()).Observe(float64(duration)) + // lggr.Debugw("Inserted DKG shares into DB", "duration", duration) // see ocr2vrf code for logs + }() + + var named []dkgShare + for _, record := range shareRecords { + if bytes.Equal(record.Hash[:], zeroHash[:]) { + // see ocr2vrf for logging + // lggr.Warnw("skipping record with zero hash", + // "player", record.Dealer.String(), + // "hash", hexutil.Encode(record.Hash[:]), + // ) + continue + } + + // XXX: this might be expensive, but is a good sanity check. + localHash := hash.GetHash(record.MarshaledShareRecord) + if !bytes.Equal(record.Hash[:], localHash[:]) { + return fmt.Errorf("local hash doesn't match given hash in record, expected: %x, got: %x", + localHash[:], record.Hash[:]) + } + + var h hash.Hash + if copied := copy(h[:], record.Hash[:]); copied != 32 { + return fmt.Errorf("wrong number of bytes copied in hash (dealer:%s) %x: %d", + record.Dealer.String(), record.Hash[:], copied) + } + + named = append(named, dkgShare{ + ConfigDigest: cfgDgst[:], + KeyID: keyID[:], + Dealer: record.Dealer.Marshal(), + MarshaledShareRecord: record.MarshaledShareRecord, + /* TODO/WTF: can't do "record.Hash[:]": this leads to store the last record's hash for all the records! */ + RecordHash: h[:], + }) + } + + if len(named) == 0 { + lggr.Infow("No valid share records to insert") + return nil + } + + // see ocr2vrf for logging + // lggr.Infow("Inserting DKG shares into DB", + // "shareHashes", shareHashes(shareRecords), + // "numRecords", len(shareRecords), + // "numNamed", len(named)) + + // Always upsert because we want the number of rows in the table to match + // the number of members of the committee. + query := ` +INSERT INTO dkg_shares (config_digest, key_id, dealer, marshaled_share_record, record_hash) +VALUES (:config_digest, :key_id, :dealer, :marshaled_share_record, :record_hash) +ON CONFLICT ON CONSTRAINT dkg_shares_pkey +DO UPDATE SET marshaled_share_record = EXCLUDED.marshaled_share_record, record_hash = EXCLUDED.record_hash +` + return s.q.ExecQNamed(query, named[:]) +} + +// ReadShareRecords retrieves any share records in the database that correspond +// to the provided config digest and DKG key ID. +func (s *shareDB) ReadShareRecords( + cfgDgst ocrtypes.ConfigDigest, + keyID [32]byte, +) ( + retrievedShares []ocr2vrftypes.PersistentShareSetRecord, + err error, +) { + lggr := s.lggr.With( + "configDigest", hexutil.Encode(cfgDgst[:]), + "keyID", hexutil.Encode(keyID[:])) + + start := time.Now() + defer func() { + duration := time.Since(start) + promReadShareRecords.WithLabelValues(s.chainType, s.chainID.String()).Observe(float64(duration)) + lggr.Debugw("Finished reading DKG shares from DB", "duration", duration) + }() + + a := map[string]any{ + "config_digest": cfgDgst[:], + "key_id": keyID[:], + } + query, args, err := sqlx.Named( + ` +SELECT * +FROM dkg_shares +WHERE config_digest = :config_digest + AND key_id = :key_id +`, a) + if err != nil { + return nil, errors.Wrap(err, "sqlx Named") + } + query = s.q.Rebind(query) + var dkgShares []dkgShare + err = s.q.Select(&dkgShares, query, args...) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, err + } + + for _, share := range dkgShares { + playerIdx, _, err := ocr2vrftypes.UnmarshalPlayerIdx(share.Dealer) + if err != nil { + return nil, errors.Wrapf(err, "unmarshalling %x", share.Dealer) + } + var h hash.Hash + if copied := copy(h[:], share.RecordHash); copied != 32 { + return nil, fmt.Errorf("wrong number of bytes copied in hash %x: %d", share.RecordHash, copied) + } + + // NOTE: no integrity check on share.MarshaledShareRecord + // because caller will do it anyways, so it'd be wasteful. + retrievedShares = append(retrievedShares, ocr2vrftypes.PersistentShareSetRecord{ + Dealer: *playerIdx, + MarshaledShareRecord: share.MarshaledShareRecord, + Hash: h, + }) + } + + lggr.Debugw("Read DKG shares from DB", + "shareRecords", shareHashes(retrievedShares), + "numRecords", len(dkgShares), + ) + + return retrievedShares, nil +} + +func shareHashes(shareRecords []ocr2vrftypes.PersistentShareSetRecord) []string { + r := make([]string, len(shareRecords)) + for i, record := range shareRecords { + r[i] = hexutil.Encode(record.Hash[:]) + } + return r +} diff --git a/core/services/ocr2/plugins/dkg/persistence/db_test.go b/core/services/ocr2/plugins/dkg/persistence/db_test.go new file mode 100644 index 00000000..33ff5c30 --- /dev/null +++ b/core/services/ocr2/plugins/dkg/persistence/db_test.go @@ -0,0 +1,183 @@ +package persistence + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + "github.com/goplugin/plugin-vrf/types/hash" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +func setup(t testing.TB) (ocr2vrftypes.DKGSharePersistence, *sqlx.DB) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + return NewShareDB(db, lggr, pgtest.NewQConfig(true), big.NewInt(1337), relay.EVM), db +} + +func TestShareDB_WriteShareRecords(t *testing.T) { + configDigest := testutils.Random32Byte() + keyID := testutils.Random32Byte() + + t.Run("valid input", func(tt *testing.T) { + shareDB, db := setup(tt) + var expectedRecords []ocr2vrftypes.PersistentShareSetRecord + + // Starting from 1 because player indexes must not be 0 + for i := 1; i < 4; i++ { + b := ocr2vrftypes.RawMarshalPlayerIdxInt(ocr2vrftypes.PlayerIdxInt(i)) + playerIdx, _, err := ocr2vrftypes.UnmarshalPlayerIdx(b) + require.NoError(t, err) + shareRecord := crypto.Keccak256Hash([]byte(fmt.Sprintf("%d", i))) + shareRecordHash := hash.GetHash(shareRecord[:]) + var h hash.Hash + copy(h[:], shareRecordHash[:]) + rec := ocr2vrftypes.PersistentShareSetRecord{ + Dealer: *playerIdx, + MarshaledShareRecord: shareRecord[:], + Hash: h, + } + expectedRecords = append(expectedRecords, rec) + } + + err := shareDB.WriteShareRecords(testutils.Context(t), configDigest, keyID, expectedRecords) + require.NoError(tt, err) + + rows, err := db.Query(`SELECT COUNT(*) AS count FROM dkg_shares`) + require.NoError(tt, err) + t.Cleanup(func() { assert.NoError(t, rows.Close()) }) + + var count int + for rows.Next() { + require.NoError(tt, rows.Scan(&count)) + } + require.NoError(tt, rows.Err()) + + require.Equal(tt, 3, count) + }) + + t.Run("bad input, zero hash", func(tt *testing.T) { + shareDB, db := setup(tt) + b := ocr2vrftypes.RawMarshalPlayerIdxInt(ocr2vrftypes.PlayerIdxInt(1)) + dealer, _, err := ocr2vrftypes.UnmarshalPlayerIdx(b) + require.NoError(tt, err) + records := []ocr2vrftypes.PersistentShareSetRecord{ + { + Dealer: *dealer, + MarshaledShareRecord: []byte{1}, + Hash: hash.Hash{}, // There's a problem here + }, + } + + // no error, but there will be no rows inserted in the db + err = shareDB.WriteShareRecords(testutils.Context(t), configDigest, keyID, records) + require.NoError(tt, err) + + rows, err := db.Query(`SELECT COUNT(*) AS count FROM dkg_shares`) + require.NoError(tt, err) + t.Cleanup(func() { assert.NoError(t, rows.Close()) }) + + var count int + for rows.Next() { + require.NoError(tt, rows.Scan(&count)) + } + require.NoError(tt, rows.Err()) + + require.Equal(tt, 0, count) + }) + + t.Run("bad input, nonmatching hash", func(tt *testing.T) { + shareDB, db := setup(tt) + var records []ocr2vrftypes.PersistentShareSetRecord + + // Starting from 1 because player indexes must not be 0 + for i := 1; i < 4; i++ { + b := ocr2vrftypes.RawMarshalPlayerIdxInt(ocr2vrftypes.PlayerIdxInt(i)) + playerIdx, _, err := ocr2vrftypes.UnmarshalPlayerIdx(b) + require.NoError(t, err) + shareRecord := crypto.Keccak256Hash([]byte(fmt.Sprintf("%d", i))) + // Expected hash is SHA256, not Keccak256. + shareRecordHash := crypto.Keccak256Hash(shareRecord[:]) + var h hash.Hash + copy(h[:], shareRecordHash[:]) + rec := ocr2vrftypes.PersistentShareSetRecord{ + Dealer: *playerIdx, + MarshaledShareRecord: shareRecord[:], + Hash: h, + } + records = append(records, rec) + } + + err := shareDB.WriteShareRecords(testutils.Context(t), configDigest, keyID, records) + require.Error(tt, err) + + // no rows should have been inserted + rows, err := db.Query(`SELECT COUNT(*) AS count FROM dkg_shares`) + require.NoError(tt, err) + t.Cleanup(func() { assert.NoError(t, rows.Close()) }) + + var count int + for rows.Next() { + require.NoError(tt, rows.Scan(&count)) + } + require.NoError(tt, rows.Err()) + + require.Equal(tt, 0, count) + }) +} + +func TestShareDBE2E(t *testing.T) { + shareDB, _ := setup(t) + + // create some fake data to insert and retrieve + configDigest := testutils.Random32Byte() + keyID := testutils.Random32Byte() + var expectedRecords []ocr2vrftypes.PersistentShareSetRecord + expectedRecordsMap := make(map[ocr2vrftypes.PlayerIdx]ocr2vrftypes.PersistentShareSetRecord) + + // Starting from 1 because player indexes must not be 0 + for i := 1; i < 4; i++ { + b := ocr2vrftypes.RawMarshalPlayerIdxInt(ocr2vrftypes.PlayerIdxInt(i)) + playerIdx, _, err := ocr2vrftypes.UnmarshalPlayerIdx(b) + require.NoError(t, err) + shareRecord := crypto.Keccak256Hash([]byte(fmt.Sprintf("%d", i))) + shareRecordHash := hash.GetHash(shareRecord[:]) + var h hash.Hash + copy(h[:], shareRecordHash[:]) + rec := ocr2vrftypes.PersistentShareSetRecord{ + Dealer: *playerIdx, + MarshaledShareRecord: shareRecord[:], + Hash: h, + } + expectedRecords = append(expectedRecords, rec) + expectedRecordsMap[*playerIdx] = rec + } + + err := shareDB.WriteShareRecords(testutils.Context(t), configDigest, keyID, expectedRecords) + require.NoError(t, err) + + actualRecords, err := shareDB.ReadShareRecords(configDigest, keyID) + require.NoError(t, err) + + assert.Equal(t, len(expectedRecords), len(actualRecords)) + numAssertions := 0 + for _, actualRecord := range actualRecords { + expectedRecord, ok := expectedRecordsMap[actualRecord.Dealer] + require.True(t, ok) + require.Equal(t, expectedRecord.MarshaledShareRecord, actualRecord.MarshaledShareRecord) + require.Equal(t, expectedRecord.Hash[:], actualRecord.Hash[:]) + numAssertions++ + } + + require.Equal(t, len(expectedRecords), numAssertions) +} diff --git a/core/services/ocr2/plugins/dkg/persistence/models.go b/core/services/ocr2/plugins/dkg/persistence/models.go new file mode 100644 index 00000000..02c63393 --- /dev/null +++ b/core/services/ocr2/plugins/dkg/persistence/models.go @@ -0,0 +1,9 @@ +package persistence + +type dkgShare struct { + ConfigDigest []byte `db:"config_digest"` + KeyID []byte `db:"key_id"` + Dealer []byte `db:"dealer"` + MarshaledShareRecord []byte `db:"marshaled_share_record"` + RecordHash []byte `db:"record_hash"` +} diff --git a/core/services/ocr2/plugins/dkg/plugin.go b/core/services/ocr2/plugins/dkg/plugin.go new file mode 100644 index 00000000..b17a0726 --- /dev/null +++ b/core/services/ocr2/plugins/dkg/plugin.go @@ -0,0 +1,99 @@ +package dkg + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/libocr/commontypes" + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + + "github.com/goplugin/plugin-vrf/altbn_128" + "github.com/goplugin/plugin-vrf/dkg" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/persistence" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func NewDKGServices( + jb job.Job, + ocr2Provider evmrelay.DKGProvider, + lggr logger.Logger, + ocrLogger commontypes.Logger, + dkgSignKs keystore.DKGSign, + dkgEncryptKs keystore.DKGEncrypt, + ethClient evmclient.Client, + oracleArgsNoPlugin libocr2.OCR2OracleArgs, + db *sqlx.DB, + qConfig pg.QConfig, + chainID *big.Int, + network relay.Network, +) ([]job.ServiceCtx, error) { + var pluginConfig config.PluginConfig + err := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return nil, errors.Wrap(err, "json unmarshal plugin config") + } + err = config.ValidatePluginConfig(pluginConfig, dkgSignKs, dkgEncryptKs) + if err != nil { + return nil, errors.Wrap(err, "validate plugin config") + } + signKey, err := dkgSignKs.Get(pluginConfig.SigningPublicKey) + if err != nil { + return nil, errors.Wrap(err, "get dkgsign key") + } + encryptKey, err := dkgEncryptKs.Get(pluginConfig.EncryptionPublicKey) + if err != nil { + return nil, errors.Wrap(err, "get dkgencrypt key") + } + onchainDKGClient, err := NewOnchainDKGClient( + jb.OCR2OracleSpec.ContractID, + ethClient) + if err != nil { + return nil, errors.Wrap(err, "new onchain dkg client") + } + onchainContract := dkg.NewOnchainContract(onchainDKGClient, &altbn_128.G2{}) + keyConsumer := newDummyKeyConsumer() + keyID, err := DecodeKeyID(pluginConfig.KeyID) + if err != nil { + return nil, errors.Wrap(err, "decode key ID") + } + shareDB := persistence.NewShareDB(db, lggr.Named("DKGShareDB"), qConfig, chainID, network) + oracleArgsNoPlugin.ReportingPluginFactory = dkg.NewReportingPluginFactory( + encryptKey.KyberScalar(), + signKey.KyberScalar(), + keyID, + onchainContract, + ocrLogger, + keyConsumer, + shareDB, + ) + oracle, err := libocr2.NewOracle(oracleArgsNoPlugin) + if err != nil { + return nil, errors.Wrap(err, "error calling NewOracle") + } + return []job.ServiceCtx{ocr2Provider, job.NewServiceAdapter(oracle)}, nil +} + +func DecodeKeyID(val string) (byteArray [32]byte, err error) { + decoded, err := hex.DecodeString(val) + if err != nil { + return [32]byte{}, errors.Wrap(err, "hex decode string") + } + if len(decoded) != 32 { + return [32]byte{}, fmt.Errorf("expected value to be 32 bytes but received %d bytes", len(decoded)) + } + copy(byteArray[:], decoded) + return +} diff --git a/core/services/ocr2/plugins/functions/aggregation.go b/core/services/ocr2/plugins/functions/aggregation.go new file mode 100644 index 00000000..2e149b55 --- /dev/null +++ b/core/services/ocr2/plugins/functions/aggregation.go @@ -0,0 +1,119 @@ +package functions + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" +) + +func CanAggregate(N int, F int, observations []*encoding.ProcessedRequest) bool { + return N > 0 && F >= 0 && len(observations) > 0 && len(observations) <= N && len(observations) >= 2*F+1 +} + +func Aggregate(aggMethod config.AggregationMethod, observations []*encoding.ProcessedRequest) (*encoding.ProcessedRequest, error) { + if len(observations) == 0 { + return nil, fmt.Errorf("empty observation list passed for aggregation") + } + var errored []*encoding.ProcessedRequest + var successful []*encoding.ProcessedRequest + reqId := observations[0].RequestID + finalResult := encoding.ProcessedRequest{ + RequestID: reqId, + Result: []byte{}, + Error: []byte{}, + } + for _, obs := range observations { + if !bytes.Equal(obs.RequestID, reqId) { + return nil, fmt.Errorf("inconsistent request IDs in aggregated observations %v vs %v", formatRequestId(obs.RequestID), formatRequestId(reqId)) + } + if obs.GetError() != nil && len(obs.GetError()) > 0 { + errored = append(errored, obs) + } else { + successful = append(successful, obs) + } + } + resultIsError := len(errored) > len(successful) + var toAggregate []*encoding.ProcessedRequest + var rawData [][]byte + if resultIsError { + toAggregate = errored + for _, item := range errored { + rawData = append(rawData, item.Error) + } + } else { + toAggregate = successful + for _, item := range successful { + rawData = append(rawData, item.Result) + } + } + // Metadata (CallbackGasLimit, CoordinatorContract and OnchainMetadata) is aggregated using MODE method + finalResult.CallbackGasLimit, finalResult.CoordinatorContract, finalResult.OnchainMetadata = aggregateMetadata(toAggregate) + if resultIsError { + // Errors are always aggregated using MODE method + finalResult.Error = aggregateMode(rawData) + } else { + switch aggMethod { + case config.AggregationMethod_AGGREGATION_MODE: + finalResult.Result = aggregateMode(rawData) + case config.AggregationMethod_AGGREGATION_MEDIAN: + finalResult.Result = aggregateMedian(rawData) + default: + return nil, fmt.Errorf("unsupported aggregation method: %s", aggMethod) + } + } + return &finalResult, nil +} + +func aggregateMetadata(items []*encoding.ProcessedRequest) (uint32, []byte, []byte) { + gasLimitBytes := make([][]byte, len(items)) + coordinatorContracts := make([][]byte, len(items)) + onchainMetadata := make([][]byte, len(items)) + for i, item := range items { + gasLimitBytes[i] = make([]byte, 4) + binary.BigEndian.PutUint32(gasLimitBytes[i], item.CallbackGasLimit) + coordinatorContracts[i] = item.CoordinatorContract + if coordinatorContracts[i] == nil { + coordinatorContracts[i] = []byte{} + } + onchainMetadata[i] = item.OnchainMetadata + if onchainMetadata[i] == nil { + onchainMetadata[i] = []byte{} + } + } + aggGasLimitBytes := aggregateMode(gasLimitBytes) + aggGasLimitUint32 := binary.BigEndian.Uint32(aggGasLimitBytes) + aggCoordinatorContract := aggregateMode(coordinatorContracts) + aggOnchainMetadata := aggregateMode(onchainMetadata) + return aggGasLimitUint32, aggCoordinatorContract, aggOnchainMetadata +} + +func aggregateMode(items [][]byte) []byte { + counts := make(map[string]int) + var mostFrequent []byte + highestFreq := 0 + for _, item := range items { + str := string(item) + currCount := counts[str] + 1 + counts[str] = currCount + if currCount > highestFreq { + highestFreq = currCount + mostFrequent = item + } + } + return mostFrequent +} + +func aggregateMedian(items [][]byte) []byte { + sort.Slice(items, func(i, j int) bool { + if len(items[i]) != len(items[j]) { + // NOTE: this doesn't account for extra leading zeros + return len(items[i]) < len(items[j]) + } + return bytes.Compare(items[i], items[j]) < 0 + }) + return items[(len(items)-1)/2] +} diff --git a/core/services/ocr2/plugins/functions/aggregation_test.go b/core/services/ocr2/plugins/functions/aggregation_test.go new file mode 100644 index 00000000..c0a39c7e --- /dev/null +++ b/core/services/ocr2/plugins/functions/aggregation_test.go @@ -0,0 +1,151 @@ +package functions_test + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" +) + +func req(id int, result []byte, err []byte) *encoding.ProcessedRequest { + return &encoding.ProcessedRequest{ + RequestID: []byte(strconv.Itoa(id)), + Result: result, + Error: err, + CallbackGasLimit: 0, + CoordinatorContract: []byte{}, + OnchainMetadata: []byte{}, + } +} + +func reqS(id int, result string, err string) *encoding.ProcessedRequest { + return req(id, []byte(result), []byte(err)) +} + +func reqMeta(id int, result []byte, err []byte, callbackGas uint32, coordinatorContract []byte, onchainMeta []byte) *encoding.ProcessedRequest { + return &encoding.ProcessedRequest{ + RequestID: []byte(strconv.Itoa(id)), + Result: result, + Error: err, + CallbackGasLimit: callbackGas, + CoordinatorContract: coordinatorContract, + OnchainMetadata: onchainMeta, + } +} + +func TestCanAggregate(t *testing.T) { + t.Parallel() + obs := make([]*encoding.ProcessedRequest, 10) + + require.True(t, functions.CanAggregate(4, 1, obs[:4])) + require.True(t, functions.CanAggregate(4, 1, obs[:3])) + require.True(t, functions.CanAggregate(6, 1, obs[:3])) + + require.False(t, functions.CanAggregate(4, 1, obs[:5])) + require.False(t, functions.CanAggregate(4, 1, obs[:2])) + require.False(t, functions.CanAggregate(4, 1, obs[:0])) + require.False(t, functions.CanAggregate(0, 0, obs[:0])) +} + +func TestAggregate_Successful(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mode config.AggregationMethod + input []*encoding.ProcessedRequest + expected *encoding.ProcessedRequest + }{ + { + "Mode", + config.AggregationMethod_AGGREGATION_MODE, + []*encoding.ProcessedRequest{ + reqS(21, "ab", ""), + reqS(21, "abcd", ""), + reqS(21, "cd", ""), + reqS(21, "abcd", ""), + }, + reqS(21, "abcd", ""), + }, + { + "Errors", + config.AggregationMethod_AGGREGATION_MEDIAN, + []*encoding.ProcessedRequest{ + reqS(21, "", "bug"), + reqS(21, "", "compile error"), + reqS(21, "", "bug"), + }, + reqS(21, "", "bug"), + }, + { + "Median Odd", + config.AggregationMethod_AGGREGATION_MEDIAN, + // NOTE: binary values of those strings represent different integers + // but they still should be sorted correctly + []*encoding.ProcessedRequest{ + reqS(21, "7", ""), + reqS(21, "101", ""), + reqS(21, "8", ""), + reqS(21, "19", ""), + reqS(21, "10", ""), + }, + reqS(21, "10", ""), + }, + { + "Median Even", + config.AggregationMethod_AGGREGATION_MEDIAN, + []*encoding.ProcessedRequest{ + req(21, []byte{9, 200, 2}, []byte{}), + req(21, []byte{9, 11}, []byte{}), + req(21, []byte{5, 100}, []byte{}), + req(21, []byte{12, 2}, []byte{}), + }, + req(21, []byte{9, 11}, []byte{}), + }, + { + "Median Even Aligned", + config.AggregationMethod_AGGREGATION_MEDIAN, + []*encoding.ProcessedRequest{ + req(21, []byte{0, 9, 200, 2}, []byte{}), + req(21, []byte{0, 0, 9, 11}, []byte{}), + req(21, []byte{0, 0, 5, 100}, []byte{}), + req(21, []byte{0, 0, 12, 2}, []byte{}), + }, + req(21, []byte{0, 0, 9, 11}, []byte{}), + }, + { + "Metadata With Results", + config.AggregationMethod_AGGREGATION_MEDIAN, + []*encoding.ProcessedRequest{ + reqMeta(21, []byte{1}, []byte{}, 100, []byte{2}, []byte{4}), + reqMeta(21, []byte{1}, []byte{}, 90, []byte{2}, []byte{4}), + reqMeta(21, []byte{1}, []byte{}, 100, []byte{0}, []byte{4}), + reqMeta(21, []byte{1}, []byte{}, 100, []byte{2}, []byte{1}), + }, + reqMeta(21, []byte{1}, []byte{}, 100, []byte{2}, []byte{4}), + }, + { + "Metadata With Errors", + config.AggregationMethod_AGGREGATION_MEDIAN, + []*encoding.ProcessedRequest{ + reqMeta(21, []byte{}, []byte{2}, 90, []byte{0}, []byte{4}), + reqMeta(21, []byte{}, []byte{2}, 100, []byte{2}, []byte{4}), + reqMeta(21, []byte{}, []byte{2}, 100, []byte{2}, []byte{1}), + reqMeta(21, []byte{}, []byte{2}, 100, []byte{2}, []byte{4}), + }, + reqMeta(21, []byte{}, []byte{2}, 100, []byte{2}, []byte{4}), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, err := functions.Aggregate(test.mode, test.input) + require.NoError(t, err) + require.Equal(t, test.expected, result) + }) + } +} diff --git a/core/services/ocr2/plugins/functions/config/config.go b/core/services/ocr2/plugins/functions/config/config.go new file mode 100644 index 00000000..b6a183c8 --- /dev/null +++ b/core/services/ocr2/plugins/functions/config/config.go @@ -0,0 +1,154 @@ +package config + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/proto" + + decryptionPluginConfig "github.com/goplugin/tdh2/go/ocr2/decryptionplugin/config" + + "github.com/goplugin/libocr/offchainreporting2/types" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + s4PluginConfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +// This config is part of the job spec and is loaded only once on node boot/job creation. +type PluginConfig struct { + EnableRequestSignatureCheck bool `json:"enableRequestSignatureCheck"` + DONID string `json:"donID"` + ContractVersion uint32 `json:"contractVersion"` + MinRequestConfirmations uint32 `json:"minRequestConfirmations"` + MinResponseConfirmations uint32 `json:"minResponseConfirmations"` + MinIncomingConfirmations uint32 `json:"minIncomingConfirmations"` + PastBlocksToPoll uint32 `json:"pastBlocksToPoll"` + LogPollerCacheDurationSec uint32 `json:"logPollerCacheDurationSec"` // Duration to cache previously detected request or response logs such that they can be filtered when calling logpoller_wrapper.LatestEvents() + RequestTimeoutSec uint32 `json:"requestTimeoutSec"` + RequestTimeoutCheckFrequencySec uint32 `json:"requestTimeoutCheckFrequencySec"` + RequestTimeoutBatchLookupSize uint32 `json:"requestTimeoutBatchLookupSize"` + PruneMaxStoredRequests uint32 `json:"pruneMaxStoredRequests"` + PruneCheckFrequencySec uint32 `json:"pruneCheckFrequencySec"` + PruneBatchSize uint32 `json:"pruneBatchSize"` + ListenerEventHandlerTimeoutSec uint32 `json:"listenerEventHandlerTimeoutSec"` + ListenerEventsCheckFrequencyMillis uint32 `json:"listenerEventsCheckFrequencyMillis"` + ContractUpdateCheckFrequencySec uint32 `json:"contractUpdateCheckFrequencySec"` + MaxRequestSizeBytes uint32 `json:"maxRequestSizeBytes"` + MaxRequestSizesList []uint32 `json:"maxRequestSizesList"` + MaxSecretsSizesList []uint32 `json:"maxSecretsSizesList"` + MinimumSubscriptionBalance assets.Link `json:"minimumSubscriptionBalance"` + AllowedHeartbeatInitiators []string `json:"allowedHeartbeatInitiators"` + GatewayConnectorConfig *connector.ConnectorConfig `json:"gatewayConnectorConfig"` + OnchainAllowlist *allowlist.OnchainAllowlistConfig `json:"onchainAllowlist"` + OnchainSubscriptions *subscriptions.OnchainSubscriptionsConfig `json:"onchainSubscriptions"` + RateLimiter *common.RateLimiterConfig `json:"rateLimiter"` + S4Constraints *s4.Constraints `json:"s4Constraints"` + DecryptionQueueConfig *DecryptionQueueConfig `json:"decryptionQueueConfig"` +} + +type DecryptionQueueConfig struct { + MaxQueueLength uint32 `json:"maxQueueLength"` + MaxCiphertextBytes uint32 `json:"maxCiphertextBytes"` + MaxCiphertextIdLength uint32 `json:"maxCiphertextIdLength"` + CompletedCacheTimeoutSec uint32 `json:"completedCacheTimeoutSec"` + DecryptRequestTimeoutSec uint32 `json:"decryptRequestTimeoutSec"` +} + +func ValidatePluginConfig(config PluginConfig) error { + if config.DecryptionQueueConfig != nil { + if config.DecryptionQueueConfig.MaxQueueLength <= 0 { + return errors.New("missing or invalid decryptionQueueConfig maxQueueLength") + } + if config.DecryptionQueueConfig.MaxCiphertextBytes <= 0 { + return errors.New("missing or invalid decryptionQueueConfig maxCiphertextBytes") + } + if config.DecryptionQueueConfig.MaxCiphertextIdLength <= 0 { + return errors.New("missing or invalid decryptionQueueConfig maxCiphertextIdLength") + } + if config.DecryptionQueueConfig.CompletedCacheTimeoutSec <= 0 { + return errors.New("missing or invalid decryptionQueueConfig completedCacheTimeoutSec") + } + if config.DecryptionQueueConfig.DecryptRequestTimeoutSec <= 0 { + return errors.New("missing or invalid decryptionQueueConfig decryptRequestTimeoutSec") + } + } + return nil +} + +// This config is stored in the Oracle contract (set via SetConfig()). +// Every SetConfig() call reloads the reporting plugin (FunctionsReportingPluginFactory.NewReportingPlugin()) +type ReportingPluginConfigWrapper struct { + Config *ReportingPluginConfig +} + +func DecodeReportingPluginConfig(raw []byte) (*ReportingPluginConfigWrapper, error) { + configProto := &ReportingPluginConfig{} + err := proto.Unmarshal(raw, configProto) + if err != nil { + return nil, err + } + return &ReportingPluginConfigWrapper{Config: configProto}, nil +} + +func EncodeReportingPluginConfig(rpConfig *ReportingPluginConfigWrapper) ([]byte, error) { + return proto.Marshal(rpConfig.Config) +} + +var _ decryptionPluginConfig.ConfigParser = &ThresholdConfigParser{} + +type ThresholdConfigParser struct{} + +func (ThresholdConfigParser) ParseConfig(config []byte) (*decryptionPluginConfig.ReportingPluginConfigWrapper, error) { + reportingPluginConfigWrapper, err := DecodeReportingPluginConfig(config) + if err != nil { + return nil, errors.New("failed to decode Functions Threshold plugin config") + } + thresholdPluginConfig := reportingPluginConfigWrapper.Config.ThresholdPluginConfig + + if thresholdPluginConfig == nil { + return nil, fmt.Errorf("PluginConfig bytes %x did not contain threshold plugin config", config) + } + + return &decryptionPluginConfig.ReportingPluginConfigWrapper{ + Config: &decryptionPluginConfig.ReportingPluginConfig{ + MaxQueryLengthBytes: thresholdPluginConfig.MaxQueryLengthBytes, + MaxObservationLengthBytes: thresholdPluginConfig.MaxObservationLengthBytes, + MaxReportLengthBytes: thresholdPluginConfig.MaxReportLengthBytes, + RequestCountLimit: thresholdPluginConfig.RequestCountLimit, + RequestTotalBytesLimit: thresholdPluginConfig.RequestTotalBytesLimit, + RequireLocalRequestCheck: thresholdPluginConfig.RequireLocalRequestCheck, + K: thresholdPluginConfig.K, + }, + }, nil +} + +func S4ConfigDecoder(config []byte) (*s4PluginConfig.PluginConfig, *types.ReportingPluginLimits, error) { + reportingPluginConfigWrapper, err := DecodeReportingPluginConfig(config) + if err != nil { + return nil, nil, errors.New("failed to decode S4 plugin config") + } + + pluginConfig := reportingPluginConfigWrapper.Config.S4PluginConfig + if pluginConfig == nil { + return nil, nil, fmt.Errorf("PluginConfig bytes %x did not contain s4 plugin config", config) + } + + return &s4PluginConfig.PluginConfig{ + ProductName: "functions", + NSnapshotShards: uint(pluginConfig.NSnapshotShards), + MaxObservationEntries: uint(pluginConfig.MaxObservationEntries), + MaxReportEntries: uint(pluginConfig.MaxReportEntries), + MaxDeleteExpiredEntries: uint(pluginConfig.MaxDeleteExpiredEntries), + }, + &types.ReportingPluginLimits{ + MaxQueryLength: int(pluginConfig.MaxQueryLengthBytes), + MaxObservationLength: int(pluginConfig.MaxObservationLengthBytes), + MaxReportLength: int(pluginConfig.MaxReportLengthBytes), + }, + nil +} diff --git a/core/services/ocr2/plugins/functions/config/config_test.go b/core/services/ocr2/plugins/functions/config/config_test.go new file mode 100644 index 00000000..cc814e20 --- /dev/null +++ b/core/services/ocr2/plugins/functions/config/config_test.go @@ -0,0 +1,41 @@ +package config_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestS4ConfigDecoder(t *testing.T) { + t.Parallel() + + configProto := &config.ReportingPluginConfig{ + S4PluginConfig: &config.S4ReportingPluginConfig{ + MaxQueryLengthBytes: 100, + MaxObservationLengthBytes: 200, + MaxReportLengthBytes: 300, + NSnapshotShards: 1, + MaxObservationEntries: 111, + MaxReportEntries: 222, + MaxDeleteExpiredEntries: 333, + }, + } + + configBytes, err := proto.Marshal(configProto) + require.NoError(t, err) + + config, limits, err := config.S4ConfigDecoder(configBytes) + require.NoError(t, err) + assert.Equal(t, "functions", config.ProductName) + assert.Equal(t, uint(1), config.NSnapshotShards) + assert.Equal(t, uint(111), config.MaxObservationEntries) + assert.Equal(t, uint(222), config.MaxReportEntries) + assert.Equal(t, uint(333), config.MaxDeleteExpiredEntries) + assert.Equal(t, 100, limits.MaxQueryLength) + assert.Equal(t, 200, limits.MaxObservationLength) + assert.Equal(t, 300, limits.MaxReportLength) +} diff --git a/core/services/ocr2/plugins/functions/config/config_types.pb.go b/core/services/ocr2/plugins/functions/config/config_types.pb.go new file mode 100644 index 00000000..b9debdcd --- /dev/null +++ b/core/services/ocr2/plugins/functions/config/config_types.pb.go @@ -0,0 +1,575 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.8 +// source: core/services/ocr2/plugins/functions/config/config_types.proto + +package config + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AggregationMethod int32 + +const ( + AggregationMethod_AGGREGATION_MODE AggregationMethod = 0 + AggregationMethod_AGGREGATION_MEDIAN AggregationMethod = 1 +) + +// Enum value maps for AggregationMethod. +var ( + AggregationMethod_name = map[int32]string{ + 0: "AGGREGATION_MODE", + 1: "AGGREGATION_MEDIAN", + } + AggregationMethod_value = map[string]int32{ + "AGGREGATION_MODE": 0, + "AGGREGATION_MEDIAN": 1, + } +) + +func (x AggregationMethod) Enum() *AggregationMethod { + p := new(AggregationMethod) + *p = x + return p +} + +func (x AggregationMethod) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AggregationMethod) Descriptor() protoreflect.EnumDescriptor { + return file_core_services_ocr2_plugins_functions_config_config_types_proto_enumTypes[0].Descriptor() +} + +func (AggregationMethod) Type() protoreflect.EnumType { + return &file_core_services_ocr2_plugins_functions_config_config_types_proto_enumTypes[0] +} + +func (x AggregationMethod) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AggregationMethod.Descriptor instead. +func (AggregationMethod) EnumDescriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescGZIP(), []int{0} +} + +// Has to match the corresponding proto in tdh2. +type ThresholdReportingPluginConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxQueryLengthBytes uint32 `protobuf:"varint,1,opt,name=max_query_length_bytes,json=maxQueryLengthBytes,proto3" json:"max_query_length_bytes,omitempty"` + MaxObservationLengthBytes uint32 `protobuf:"varint,2,opt,name=max_observation_length_bytes,json=maxObservationLengthBytes,proto3" json:"max_observation_length_bytes,omitempty"` + MaxReportLengthBytes uint32 `protobuf:"varint,3,opt,name=max_report_length_bytes,json=maxReportLengthBytes,proto3" json:"max_report_length_bytes,omitempty"` + RequestCountLimit uint32 `protobuf:"varint,4,opt,name=request_count_limit,json=requestCountLimit,proto3" json:"request_count_limit,omitempty"` + RequestTotalBytesLimit uint32 `protobuf:"varint,5,opt,name=request_total_bytes_limit,json=requestTotalBytesLimit,proto3" json:"request_total_bytes_limit,omitempty"` + RequireLocalRequestCheck bool `protobuf:"varint,6,opt,name=require_local_request_check,json=requireLocalRequestCheck,proto3" json:"require_local_request_check,omitempty"` + K uint32 `protobuf:"varint,7,opt,name=k,proto3" json:"k,omitempty"` // Number of decryption shares required for assembling plaintext. +} + +func (x *ThresholdReportingPluginConfig) Reset() { + *x = ThresholdReportingPluginConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ThresholdReportingPluginConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThresholdReportingPluginConfig) ProtoMessage() {} + +func (x *ThresholdReportingPluginConfig) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThresholdReportingPluginConfig.ProtoReflect.Descriptor instead. +func (*ThresholdReportingPluginConfig) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescGZIP(), []int{0} +} + +func (x *ThresholdReportingPluginConfig) GetMaxQueryLengthBytes() uint32 { + if x != nil { + return x.MaxQueryLengthBytes + } + return 0 +} + +func (x *ThresholdReportingPluginConfig) GetMaxObservationLengthBytes() uint32 { + if x != nil { + return x.MaxObservationLengthBytes + } + return 0 +} + +func (x *ThresholdReportingPluginConfig) GetMaxReportLengthBytes() uint32 { + if x != nil { + return x.MaxReportLengthBytes + } + return 0 +} + +func (x *ThresholdReportingPluginConfig) GetRequestCountLimit() uint32 { + if x != nil { + return x.RequestCountLimit + } + return 0 +} + +func (x *ThresholdReportingPluginConfig) GetRequestTotalBytesLimit() uint32 { + if x != nil { + return x.RequestTotalBytesLimit + } + return 0 +} + +func (x *ThresholdReportingPluginConfig) GetRequireLocalRequestCheck() bool { + if x != nil { + return x.RequireLocalRequestCheck + } + return false +} + +func (x *ThresholdReportingPluginConfig) GetK() uint32 { + if x != nil { + return x.K + } + return 0 +} + +type S4ReportingPluginConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxQueryLengthBytes uint32 `protobuf:"varint,1,opt,name=max_query_length_bytes,json=maxQueryLengthBytes,proto3" json:"max_query_length_bytes,omitempty"` + MaxObservationLengthBytes uint32 `protobuf:"varint,2,opt,name=max_observation_length_bytes,json=maxObservationLengthBytes,proto3" json:"max_observation_length_bytes,omitempty"` + MaxReportLengthBytes uint32 `protobuf:"varint,3,opt,name=max_report_length_bytes,json=maxReportLengthBytes,proto3" json:"max_report_length_bytes,omitempty"` + NSnapshotShards uint32 `protobuf:"varint,4,opt,name=n_snapshot_shards,json=nSnapshotShards,proto3" json:"n_snapshot_shards,omitempty"` + MaxObservationEntries uint32 `protobuf:"varint,5,opt,name=max_observation_entries,json=maxObservationEntries,proto3" json:"max_observation_entries,omitempty"` + MaxReportEntries uint32 `protobuf:"varint,6,opt,name=max_report_entries,json=maxReportEntries,proto3" json:"max_report_entries,omitempty"` + MaxDeleteExpiredEntries uint32 `protobuf:"varint,7,opt,name=max_delete_expired_entries,json=maxDeleteExpiredEntries,proto3" json:"max_delete_expired_entries,omitempty"` +} + +func (x *S4ReportingPluginConfig) Reset() { + *x = S4ReportingPluginConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S4ReportingPluginConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S4ReportingPluginConfig) ProtoMessage() {} + +func (x *S4ReportingPluginConfig) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S4ReportingPluginConfig.ProtoReflect.Descriptor instead. +func (*S4ReportingPluginConfig) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescGZIP(), []int{1} +} + +func (x *S4ReportingPluginConfig) GetMaxQueryLengthBytes() uint32 { + if x != nil { + return x.MaxQueryLengthBytes + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetMaxObservationLengthBytes() uint32 { + if x != nil { + return x.MaxObservationLengthBytes + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetMaxReportLengthBytes() uint32 { + if x != nil { + return x.MaxReportLengthBytes + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetNSnapshotShards() uint32 { + if x != nil { + return x.NSnapshotShards + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetMaxObservationEntries() uint32 { + if x != nil { + return x.MaxObservationEntries + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetMaxReportEntries() uint32 { + if x != nil { + return x.MaxReportEntries + } + return 0 +} + +func (x *S4ReportingPluginConfig) GetMaxDeleteExpiredEntries() uint32 { + if x != nil { + return x.MaxDeleteExpiredEntries + } + return 0 +} + +type ReportingPluginConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxQueryLengthBytes uint32 `protobuf:"varint,1,opt,name=maxQueryLengthBytes,proto3" json:"maxQueryLengthBytes,omitempty"` + MaxObservationLengthBytes uint32 `protobuf:"varint,2,opt,name=maxObservationLengthBytes,proto3" json:"maxObservationLengthBytes,omitempty"` + MaxReportLengthBytes uint32 `protobuf:"varint,3,opt,name=maxReportLengthBytes,proto3" json:"maxReportLengthBytes,omitempty"` + MaxRequestBatchSize uint32 `protobuf:"varint,4,opt,name=maxRequestBatchSize,proto3" json:"maxRequestBatchSize,omitempty"` + DefaultAggregationMethod AggregationMethod `protobuf:"varint,5,opt,name=defaultAggregationMethod,proto3,enum=functions_config_types.AggregationMethod" json:"defaultAggregationMethod,omitempty"` + UniqueReports bool `protobuf:"varint,6,opt,name=uniqueReports,proto3" json:"uniqueReports,omitempty"` + ThresholdPluginConfig *ThresholdReportingPluginConfig `protobuf:"bytes,7,opt,name=thresholdPluginConfig,proto3" json:"thresholdPluginConfig,omitempty"` + S4PluginConfig *S4ReportingPluginConfig `protobuf:"bytes,8,opt,name=s4PluginConfig,proto3" json:"s4PluginConfig,omitempty"` + // Needs to be set in tandem with gas estimator (e.g. [EVM.GasEstimator.LimitJobType] OCR = ) + // otherwise the report won't go through TX Manager or fail later. + MaxReportTotalCallbackGas uint32 `protobuf:"varint,9,opt,name=maxReportTotalCallbackGas,proto3" json:"maxReportTotalCallbackGas,omitempty"` +} + +func (x *ReportingPluginConfig) Reset() { + *x = ReportingPluginConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReportingPluginConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportingPluginConfig) ProtoMessage() {} + +func (x *ReportingPluginConfig) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReportingPluginConfig.ProtoReflect.Descriptor instead. +func (*ReportingPluginConfig) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescGZIP(), []int{2} +} + +func (x *ReportingPluginConfig) GetMaxQueryLengthBytes() uint32 { + if x != nil { + return x.MaxQueryLengthBytes + } + return 0 +} + +func (x *ReportingPluginConfig) GetMaxObservationLengthBytes() uint32 { + if x != nil { + return x.MaxObservationLengthBytes + } + return 0 +} + +func (x *ReportingPluginConfig) GetMaxReportLengthBytes() uint32 { + if x != nil { + return x.MaxReportLengthBytes + } + return 0 +} + +func (x *ReportingPluginConfig) GetMaxRequestBatchSize() uint32 { + if x != nil { + return x.MaxRequestBatchSize + } + return 0 +} + +func (x *ReportingPluginConfig) GetDefaultAggregationMethod() AggregationMethod { + if x != nil { + return x.DefaultAggregationMethod + } + return AggregationMethod_AGGREGATION_MODE +} + +func (x *ReportingPluginConfig) GetUniqueReports() bool { + if x != nil { + return x.UniqueReports + } + return false +} + +func (x *ReportingPluginConfig) GetThresholdPluginConfig() *ThresholdReportingPluginConfig { + if x != nil { + return x.ThresholdPluginConfig + } + return nil +} + +func (x *ReportingPluginConfig) GetS4PluginConfig() *S4ReportingPluginConfig { + if x != nil { + return x.S4PluginConfig + } + return nil +} + +func (x *ReportingPluginConfig) GetMaxReportTotalCallbackGas() uint32 { + if x != nil { + return x.MaxReportTotalCallbackGas + } + return 0 +} + +var File_core_services_ocr2_plugins_functions_config_config_types_proto protoreflect.FileDescriptor + +var file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDesc = []byte{ + 0x0a, 0x3e, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6f, 0x63, 0x72, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x16, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x85, 0x03, 0x0a, 0x1e, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x33, 0x0a, 0x16, 0x6d, + 0x61, 0x78, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x6d, 0x61, 0x78, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x3f, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, + 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x01, 0x6b, + 0x22, 0x95, 0x03, 0x0a, 0x17, 0x53, 0x34, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x33, 0x0a, 0x16, + 0x6d, 0x61, 0x78, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x6d, 0x61, + 0x78, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x5f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x6e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x6d, + 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x17, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xff, 0x04, 0x0a, 0x15, 0x52, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x30, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x13, 0x6d, 0x61, 0x78, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x6d, 0x61, 0x78, 0x4f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x14, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x65, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x52, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x24, 0x0a, 0x0d, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x6c, 0x0a, 0x15, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x57, 0x0a, 0x0e, 0x73, 0x34, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x34, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x73, 0x34, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x19, + 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x19, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x43, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x2a, 0x41, 0x0a, 0x11, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x14, 0x0a, 0x10, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x41, 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x5a, + 0x2b, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x6f, + 0x63, 0x72, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescOnce sync.Once + file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescData = file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDesc +) + +func file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescGZIP() []byte { + file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescOnce.Do(func() { + file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescData) + }) + return file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDescData +} + +var file_core_services_ocr2_plugins_functions_config_config_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_core_services_ocr2_plugins_functions_config_config_types_proto_goTypes = []interface{}{ + (AggregationMethod)(0), // 0: functions_config_types.AggregationMethod + (*ThresholdReportingPluginConfig)(nil), // 1: functions_config_types.ThresholdReportingPluginConfig + (*S4ReportingPluginConfig)(nil), // 2: functions_config_types.S4ReportingPluginConfig + (*ReportingPluginConfig)(nil), // 3: functions_config_types.ReportingPluginConfig +} +var file_core_services_ocr2_plugins_functions_config_config_types_proto_depIdxs = []int32{ + 0, // 0: functions_config_types.ReportingPluginConfig.defaultAggregationMethod:type_name -> functions_config_types.AggregationMethod + 1, // 1: functions_config_types.ReportingPluginConfig.thresholdPluginConfig:type_name -> functions_config_types.ThresholdReportingPluginConfig + 2, // 2: functions_config_types.ReportingPluginConfig.s4PluginConfig:type_name -> functions_config_types.S4ReportingPluginConfig + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_core_services_ocr2_plugins_functions_config_config_types_proto_init() } +func file_core_services_ocr2_plugins_functions_config_config_types_proto_init() { + if File_core_services_ocr2_plugins_functions_config_config_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ThresholdReportingPluginConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S4ReportingPluginConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReportingPluginConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDesc, + NumEnums: 1, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_core_services_ocr2_plugins_functions_config_config_types_proto_goTypes, + DependencyIndexes: file_core_services_ocr2_plugins_functions_config_config_types_proto_depIdxs, + EnumInfos: file_core_services_ocr2_plugins_functions_config_config_types_proto_enumTypes, + MessageInfos: file_core_services_ocr2_plugins_functions_config_config_types_proto_msgTypes, + }.Build() + File_core_services_ocr2_plugins_functions_config_config_types_proto = out.File + file_core_services_ocr2_plugins_functions_config_config_types_proto_rawDesc = nil + file_core_services_ocr2_plugins_functions_config_config_types_proto_goTypes = nil + file_core_services_ocr2_plugins_functions_config_config_types_proto_depIdxs = nil +} diff --git a/core/services/ocr2/plugins/functions/config/config_types.proto b/core/services/ocr2/plugins/functions/config/config_types.proto new file mode 100644 index 00000000..c0e58e45 --- /dev/null +++ b/core/services/ocr2/plugins/functions/config/config_types.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +option go_package = "core/services/ocr2/plugins/functions/config"; + +package functions_config_types; + +// WARNING: Any changes to protobufs must also be reflected in Gauntlet which is used to generate the OCR plugin config for production deployments + +enum AggregationMethod { + AGGREGATION_MODE = 0; + AGGREGATION_MEDIAN = 1; +} + +// Has to match the corresponding proto in tdh2. +message ThresholdReportingPluginConfig { + uint32 max_query_length_bytes = 1; + uint32 max_observation_length_bytes = 2; + uint32 max_report_length_bytes = 3; + uint32 request_count_limit = 4; + uint32 request_total_bytes_limit = 5; + bool require_local_request_check = 6; + uint32 k = 7; // Number of decryption shares required for assembling plaintext. +} + +message S4ReportingPluginConfig { + uint32 max_query_length_bytes = 1; + uint32 max_observation_length_bytes = 2; + uint32 max_report_length_bytes = 3; + uint32 n_snapshot_shards = 4; + uint32 max_observation_entries = 5; + uint32 max_report_entries = 6; + uint32 max_delete_expired_entries = 7; +} + +message ReportingPluginConfig { + uint32 maxQueryLengthBytes = 1; + uint32 maxObservationLengthBytes = 2; + uint32 maxReportLengthBytes = 3; + uint32 maxRequestBatchSize = 4; + AggregationMethod defaultAggregationMethod = 5; + bool uniqueReports = 6; + ThresholdReportingPluginConfig thresholdPluginConfig = 7; + S4ReportingPluginConfig s4PluginConfig = 8; + // Needs to be set in tandem with gas estimator (e.g. [EVM.GasEstimator.LimitJobType] OCR = ) + // otherwise the report won't go through TX Manager or fail later. + uint32 maxReportTotalCallbackGas = 9; +} \ No newline at end of file diff --git a/core/services/ocr2/plugins/functions/encoding/abi_codec.go b/core/services/ocr2/plugins/functions/encoding/abi_codec.go new file mode 100644 index 00000000..bbf624d4 --- /dev/null +++ b/core/services/ocr2/plugins/functions/encoding/abi_codec.go @@ -0,0 +1,128 @@ +package encoding + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/pkg/errors" +) + +type ReportCodec interface { + EncodeReport(requests []*ProcessedRequest) ([]byte, error) + DecodeReport(raw []byte) ([]*ProcessedRequest, error) +} + +type reportCodecV1 struct { + reportTypes abi.Arguments +} + +func NewReportCodec(contractVersion uint32) (ReportCodec, error) { + switch contractVersion { + case 1: + reportTypes, err := getReportTypesV1() + if err != nil { + return nil, err + } + return &reportCodecV1{reportTypes: reportTypes}, nil + default: + return nil, fmt.Errorf("unknown contract version: %d", contractVersion) + } +} + +func SliceToByte32(slice []byte) ([32]byte, error) { + if len(slice) != 32 { + return [32]byte{}, fmt.Errorf("input length is not 32 bytes: %d", len(slice)) + } + var res [32]byte + copy(res[:], slice[:32]) + return res, nil +} + +func getReportTypesV1() (abi.Arguments, error) { + bytes32ArrType, err := abi.NewType("bytes32[]", "", []abi.ArgumentMarshaling{}) + if err != nil { + return nil, fmt.Errorf("unable to create an ABI type object for bytes32[]") + } + bytesArrType, err := abi.NewType("bytes[]", "", []abi.ArgumentMarshaling{}) + if err != nil { + return nil, fmt.Errorf("unable to create an ABI type object for bytes[]") + } + return abi.Arguments([]abi.Argument{ + {Name: "ids", Type: bytes32ArrType}, + {Name: "results", Type: bytesArrType}, + {Name: "errors", Type: bytesArrType}, + {Name: "onchain_metadata", Type: bytesArrType}, + {Name: "processing_metadata", Type: bytesArrType}, + }), nil +} + +func (c *reportCodecV1) EncodeReport(requests []*ProcessedRequest) ([]byte, error) { + size := len(requests) + if size == 0 { + return []byte{}, nil + } + ids := make([][32]byte, size) + results := make([][]byte, size) + errors := make([][]byte, size) + onchainMetadata := make([][]byte, size) + processingMetadata := make([][]byte, size) + for i := 0; i < size; i++ { + var err error + ids[i], err = SliceToByte32(requests[i].RequestID) + if err != nil { + return nil, err + } + results[i] = requests[i].Result + errors[i] = requests[i].Error + onchainMetadata[i] = requests[i].OnchainMetadata + processingMetadata[i] = requests[i].CoordinatorContract + // CallbackGasLimit is not ABI-encoded + } + return c.reportTypes.Pack(ids, results, errors, onchainMetadata, processingMetadata) +} + +func (c *reportCodecV1) DecodeReport(raw []byte) ([]*ProcessedRequest, error) { + reportElems := map[string]interface{}{} + if err := c.reportTypes.UnpackIntoMap(reportElems, raw); err != nil { + return nil, errors.WithMessage(err, "unable to unpack elements from raw report") + } + + idsIface, idsOK := reportElems["ids"] + resultsIface, resultsOK := reportElems["results"] + errorsIface, errorsOK := reportElems["errors"] + onchainMetaIface, onchainMetaOK := reportElems["onchain_metadata"] + processingMetaIface, processingMetaOK := reportElems["processing_metadata"] + if !idsOK || !resultsOK || !errorsOK || !onchainMetaOK || !processingMetaOK { + return nil, fmt.Errorf("missing arrays in raw report, ids: %v, results: %v, errors: %v", idsOK, resultsOK, errorsOK) + } + + ids, idsOK := idsIface.([][32]byte) + results, resultsOK := resultsIface.([][]byte) + errors, errorsOK := errorsIface.([][]byte) + onchainMeta, onchainMetaOK := onchainMetaIface.([][]byte) + processingMeta, processingMetaOK := processingMetaIface.([][]byte) + if !idsOK || !resultsOK || !errorsOK || !onchainMetaOK || !processingMetaOK { + return nil, fmt.Errorf("unable to cast part of raw report into array type, ids: %v, results: %v, errors: %v", idsOK, resultsOK, errorsOK) + } + + size := len(ids) + if len(results) != size || len(errors) != size || len(onchainMeta) != size || len(processingMeta) != size { + return nil, fmt.Errorf("unequal sizes of arrays parsed from raw report, ids: %v, results: %v, errors: %v", len(ids), len(results), len(errors)) + } + if size == 0 { + return []*ProcessedRequest{}, nil + } + + decoded := make([]*ProcessedRequest, size) + for i := 0; i < size; i++ { + decoded[i] = &ProcessedRequest{ + RequestID: ids[i][:], + Result: results[i], + Error: errors[i], + OnchainMetadata: onchainMeta[i], + CoordinatorContract: processingMeta[i], + // CallbackGasLimit is not ABI-encoded + } + } + return decoded, nil +} diff --git a/core/services/ocr2/plugins/functions/encoding/abi_codec_test.go b/core/services/ocr2/plugins/functions/encoding/abi_codec_test.go new file mode 100644 index 00000000..8ecfa25c --- /dev/null +++ b/core/services/ocr2/plugins/functions/encoding/abi_codec_test.go @@ -0,0 +1,64 @@ +package encoding_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" +) + +func TestABICodec_EncodeDecodeV1Success(t *testing.T) { + t.Parallel() + codec, err := encoding.NewReportCodec(1) + require.NoError(t, err) + + var report = []*encoding.ProcessedRequest{ + { + RequestID: []byte(fmt.Sprintf("%032d", 123)), + Result: []byte("abcd"), + Error: []byte("err string"), + CoordinatorContract: []byte("contract_1"), + OnchainMetadata: []byte("commitment_1"), + }, + { + RequestID: []byte(fmt.Sprintf("%032d", 4321)), + Result: []byte("0xababababab"), + Error: []byte(""), + CoordinatorContract: []byte("contract_2"), + OnchainMetadata: []byte("commitment_2"), + }, + } + + encoded, err := codec.EncodeReport(report) + require.NoError(t, err) + decoded, err := codec.DecodeReport(encoded) + require.NoError(t, err) + + require.Equal(t, len(report), len(decoded)) + for i := 0; i < len(report); i++ { + require.Equal(t, report[i].RequestID, decoded[i].RequestID, "RequestIDs not equal at index %d", i) + require.Equal(t, report[i].Result, decoded[i].Result, "Results not equal at index %d", i) + require.Equal(t, report[i].Error, decoded[i].Error, "Errors not equal at index %d", i) + require.Equal(t, report[i].CoordinatorContract, decoded[i].CoordinatorContract, "Contracts not equal at index %d", i) + require.Equal(t, report[i].OnchainMetadata, decoded[i].OnchainMetadata, "Metadata not equal at index %d", i) + } +} + +func TestABICodec_SliceToByte32(t *testing.T) { + t.Parallel() + + _, err := encoding.SliceToByte32([]byte("abcd")) + require.Error(t, err) + _, err = encoding.SliceToByte32([]byte("0123456789012345678901234567890123456789")) + require.Error(t, err) + + var expected [32]byte + for i := 0; i < 32; i++ { + expected[i] = byte(i) + } + res, err := encoding.SliceToByte32(expected[:]) + require.NoError(t, err) + require.Equal(t, expected, res) +} diff --git a/core/services/ocr2/plugins/functions/encoding/ocr_types.pb.go b/core/services/ocr2/plugins/functions/encoding/ocr_types.pb.go new file mode 100644 index 00000000..40220763 --- /dev/null +++ b/core/services/ocr2/plugins/functions/encoding/ocr_types.pb.go @@ -0,0 +1,330 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.8 +// source: core/services/ocr2/plugins/functions/encoding/ocr_types.proto + +package encoding + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// These protos are used internally by the OCR2 reporting plugin to +// pass data between initial phases. Report is ABI-encoded. +type Query struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestIDs [][]byte `protobuf:"bytes,1,rep,name=requestIDs,proto3" json:"requestIDs,omitempty"` +} + +func (x *Query) Reset() { + *x = Query{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Query) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Query) ProtoMessage() {} + +func (x *Query) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Query.ProtoReflect.Descriptor instead. +func (*Query) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Query) GetRequestIDs() [][]byte { + if x != nil { + return x.RequestIDs + } + return nil +} + +type Observation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProcessedRequests []*ProcessedRequest `protobuf:"bytes,1,rep,name=processedRequests,proto3" json:"processedRequests,omitempty"` +} + +func (x *Observation) Reset() { + *x = Observation{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Observation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Observation) ProtoMessage() {} + +func (x *Observation) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Observation.ProtoReflect.Descriptor instead. +func (*Observation) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Observation) GetProcessedRequests() []*ProcessedRequest { + if x != nil { + return x.ProcessedRequests + } + return nil +} + +type ProcessedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestID []byte `protobuf:"bytes,1,opt,name=requestID,proto3" json:"requestID,omitempty"` + Result []byte `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + Error []byte `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + CallbackGasLimit uint32 `protobuf:"varint,4,opt,name=callbackGasLimit,proto3" json:"callbackGasLimit,omitempty"` + CoordinatorContract []byte `protobuf:"bytes,5,opt,name=coordinatorContract,proto3" json:"coordinatorContract,omitempty"` + OnchainMetadata []byte `protobuf:"bytes,6,opt,name=onchainMetadata,proto3" json:"onchainMetadata,omitempty"` +} + +func (x *ProcessedRequest) Reset() { + *x = ProcessedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessedRequest) ProtoMessage() {} + +func (x *ProcessedRequest) ProtoReflect() protoreflect.Message { + mi := &file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessedRequest.ProtoReflect.Descriptor instead. +func (*ProcessedRequest) Descriptor() ([]byte, []int) { + return file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescGZIP(), []int{2} +} + +func (x *ProcessedRequest) GetRequestID() []byte { + if x != nil { + return x.RequestID + } + return nil +} + +func (x *ProcessedRequest) GetResult() []byte { + if x != nil { + return x.Result + } + return nil +} + +func (x *ProcessedRequest) GetError() []byte { + if x != nil { + return x.Error + } + return nil +} + +func (x *ProcessedRequest) GetCallbackGasLimit() uint32 { + if x != nil { + return x.CallbackGasLimit + } + return 0 +} + +func (x *ProcessedRequest) GetCoordinatorContract() []byte { + if x != nil { + return x.CoordinatorContract + } + return nil +} + +func (x *ProcessedRequest) GetOnchainMetadata() []byte { + if x != nil { + return x.OnchainMetadata + } + return nil +} + +var File_core_services_ocr2_plugins_functions_encoding_ocr_types_proto protoreflect.FileDescriptor + +var file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDesc = []byte{ + 0x0a, 0x3d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6f, 0x63, 0x72, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2f, + 0x6f, 0x63, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x27, 0x0a, 0x05, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x44, 0x73, 0x22, 0x57, 0x0a, 0x0b, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x10, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x44, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x10, + 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x47, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x30, 0x0a, 0x13, 0x63, 0x6f, 0x6f, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, + 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x6f, 0x6e, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x6f, 0x6e, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x42, 0x2f, 0x5a, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x6f, 0x63, 0x72, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescOnce sync.Once + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescData = file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDesc +) + +func file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescGZIP() []byte { + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescOnce.Do(func() { + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescData) + }) + return file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDescData +} + +var file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_goTypes = []interface{}{ + (*Query)(nil), // 0: encoding.Query + (*Observation)(nil), // 1: encoding.Observation + (*ProcessedRequest)(nil), // 2: encoding.ProcessedRequest +} +var file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_depIdxs = []int32{ + 2, // 0: encoding.Observation.processedRequests:type_name -> encoding.ProcessedRequest + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_init() } +func file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_init() { + if File_core_services_ocr2_plugins_functions_encoding_ocr_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Query); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Observation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_goTypes, + DependencyIndexes: file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_depIdxs, + MessageInfos: file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_msgTypes, + }.Build() + File_core_services_ocr2_plugins_functions_encoding_ocr_types_proto = out.File + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_rawDesc = nil + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_goTypes = nil + file_core_services_ocr2_plugins_functions_encoding_ocr_types_proto_depIdxs = nil +} diff --git a/core/services/ocr2/plugins/functions/encoding/ocr_types.proto b/core/services/ocr2/plugins/functions/encoding/ocr_types.proto new file mode 100644 index 00000000..a546f0f1 --- /dev/null +++ b/core/services/ocr2/plugins/functions/encoding/ocr_types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +option go_package = "core/services/ocr2/plugins/functions/encoding"; + +package encoding; + +// These protos are used internally by the OCR2 reporting plugin to +// pass data between initial phases. Report is ABI-encoded. +message Query { + repeated bytes requestIDs = 1; +} + +message Observation { + repeated ProcessedRequest processedRequests = 1; +} + +message ProcessedRequest { + bytes requestID = 1; + bytes result = 2; + bytes error = 3; + uint32 callbackGasLimit = 4; + bytes coordinatorContract = 5; + bytes onchainMetadata = 6; +} diff --git a/core/services/ocr2/plugins/functions/integration_tests/v1/functions_integration_test.go b/core/services/ocr2/plugins/functions/integration_tests/v1/functions_integration_test.go new file mode 100644 index 00000000..54dd731e --- /dev/null +++ b/core/services/ocr2/plugins/functions/integration_tests/v1/functions_integration_test.go @@ -0,0 +1,128 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + functionsConfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + utils "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/integration_tests/v1/internal" +) + +var ( + // a batch of 8 max-length results uses around 2M gas (assuming 70k gas per client callback - see FunctionsClientExample.sol) + nOracleNodes = 4 + nClients = 50 + requestLenBytes = 1_000 + maxGas = 1_700_000 + maxTotalReportGas = 560_000 + batchSize = 8 +) + +func TestIntegration_Functions_MultipleV1Requests_Success(t *testing.T) { + // simulated chain with all contracts + owner, b, ticker, active, proposed, clientContracts, routerAddress, routerContract, linkToken, allowListContractAddress, allowListContract := utils.StartNewChainWithContracts(t, nClients) + defer ticker.Stop() + + utils.SetupRouterRoutes(t, b, owner, routerContract, active.Address, proposed.Address, allowListContractAddress) + + _, _, oracleIdentities := utils.CreateFunctionsNodes(t, owner, b, routerAddress, nOracleNodes, maxGas, nil, nil) + + pluginConfig := functionsConfig.ReportingPluginConfig{ + MaxQueryLengthBytes: 10_000, + MaxObservationLengthBytes: 15_000, + MaxReportLengthBytes: 15_000, + MaxRequestBatchSize: uint32(batchSize), + MaxReportTotalCallbackGas: uint32(maxTotalReportGas), + DefaultAggregationMethod: functionsConfig.AggregationMethod_AGGREGATION_MODE, + UniqueReports: true, + } + + // config for oracle contract + utils.SetOracleConfig(t, b, owner, active.Contract, oracleIdentities, batchSize, &pluginConfig) + + subscriptionId := utils.CreateAndFundSubscriptions(t, b, owner, linkToken, routerAddress, routerContract, clientContracts, allowListContract) + b.Commit() + utils.ClientTestRequests(t, owner, b, linkToken, routerAddress, routerContract, allowListContract, clientContracts, requestLenBytes, nil, subscriptionId, 1*time.Minute) +} + +func TestIntegration_Functions_MultipleV1Requests_ThresholdDecryptionSuccess(t *testing.T) { + // simulated chain with all contracts + owner, b, ticker, active, proposed, clientContracts, routerAddress, routerContract, linkToken, allowListContractAddress, allowListContract := utils.StartNewChainWithContracts(t, nClients) + defer ticker.Stop() + + utils.SetupRouterRoutes(t, b, owner, routerContract, active.Address, proposed.Address, allowListContractAddress) + + _, _, oracleIdentities := utils.CreateFunctionsNodes(t, owner, b, routerAddress, nOracleNodes, maxGas, utils.ExportedOcr2Keystores, utils.MockThresholdKeyShares) + + pluginConfig := functionsConfig.ReportingPluginConfig{ + MaxQueryLengthBytes: 10_000, + MaxObservationLengthBytes: 15_000, + MaxReportLengthBytes: 15_000, + MaxRequestBatchSize: uint32(batchSize), + MaxReportTotalCallbackGas: uint32(maxTotalReportGas), + DefaultAggregationMethod: functionsConfig.AggregationMethod_AGGREGATION_MODE, + UniqueReports: true, + ThresholdPluginConfig: &functionsConfig.ThresholdReportingPluginConfig{ + // approximately 750 bytes per test ciphertext + overhead + MaxQueryLengthBytes: 70_000, + MaxObservationLengthBytes: 70_000, + MaxReportLengthBytes: 70_000, + RequestCountLimit: 50, + RequestTotalBytesLimit: 50_000, + RequireLocalRequestCheck: true, + K: 2, + }, + } + + // config for oracle contract + utils.SetOracleConfig(t, b, owner, active.Contract, oracleIdentities, batchSize, &pluginConfig) + + subscriptionId := utils.CreateAndFundSubscriptions(t, b, owner, linkToken, routerAddress, routerContract, clientContracts, allowListContract) + b.Commit() + utils.ClientTestRequests(t, owner, b, linkToken, routerAddress, routerContract, allowListContract, clientContracts, requestLenBytes, utils.DefaultSecretsUrlsBytes, subscriptionId, 1*time.Minute) +} + +func TestIntegration_Functions_MultipleV1Requests_WithUpgrade(t *testing.T) { + // simulated chain with all contracts + owner, b, ticker, active, proposed, clientContracts, routerAddress, routerContract, linkToken, allowListContractAddress, allowListContract := utils.StartNewChainWithContracts(t, nClients) + defer ticker.Stop() + + utils.SetupRouterRoutes(t, b, owner, routerContract, active.Address, proposed.Address, allowListContractAddress) + + _, _, oracleIdentities := utils.CreateFunctionsNodes(t, owner, b, routerAddress, nOracleNodes, maxGas, utils.ExportedOcr2Keystores, utils.MockThresholdKeyShares) + + pluginConfig := functionsConfig.ReportingPluginConfig{ + MaxQueryLengthBytes: 10_000, + MaxObservationLengthBytes: 15_000, + MaxReportLengthBytes: 15_000, + MaxRequestBatchSize: uint32(batchSize), + MaxReportTotalCallbackGas: uint32(maxTotalReportGas), + DefaultAggregationMethod: functionsConfig.AggregationMethod_AGGREGATION_MODE, + UniqueReports: true, + ThresholdPluginConfig: &functionsConfig.ThresholdReportingPluginConfig{ + // approximately 750 bytes per test ciphertext + overhead + MaxQueryLengthBytes: 70_000, + MaxObservationLengthBytes: 70_000, + MaxReportLengthBytes: 70_000, + RequestCountLimit: 50, + RequestTotalBytesLimit: 50_000, + RequireLocalRequestCheck: true, + K: 2, + }, + } + + // set config for both coordinators + utils.SetOracleConfig(t, b, owner, active.Contract, oracleIdentities, batchSize, &pluginConfig) + utils.SetOracleConfig(t, b, owner, proposed.Contract, oracleIdentities, batchSize, &pluginConfig) + + subscriptionId := utils.CreateAndFundSubscriptions(t, b, owner, linkToken, routerAddress, routerContract, clientContracts, allowListContract) + utils.ClientTestRequests(t, owner, b, linkToken, routerAddress, routerContract, allowListContract, clientContracts, requestLenBytes, utils.DefaultSecretsUrlsBytes, subscriptionId, 1*time.Minute) + + // upgrade and send requests again + _, err := routerContract.UpdateContracts(owner) + require.NoError(t, err) + b.Commit() + utils.ClientTestRequests(t, owner, b, linkToken, routerAddress, routerContract, allowListContract, clientContracts, requestLenBytes, utils.DefaultSecretsUrlsBytes, subscriptionId, 1*time.Minute) +} diff --git a/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testconstants.go b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testconstants.go new file mode 100644 index 00000000..5a463a3d --- /dev/null +++ b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testconstants.go @@ -0,0 +1,38 @@ +package testutils + +var ( + DefaultSecretsBytes = []byte{0xaa, 0xbb, 0xcc} + DefaultSecretsBase64 = "qrvM" + DefaultSecretsUrlsBytes = []byte{0x01, 0x02, 0x03} + DefaultSecretsUrlsBase64 = "AQID" + DefaultArg1 = "arg1" + DefaultArg2 = "arg2" + DefaultDONId = "1" + DefaultGasPrice = 1_000_000_000 + + // Below are the corresponding values for the encrypted threshold keys used in the test + // mockThresholdPubKey := `{"Group":"P256","G_bar":"BAnzIguQNKnA37Zh0b3Z3K5CcvxHjzfTIytt37ZgNQLaTeuiq9rrVhz+yaZcvNQ9EYw978KmmYOq6qd0NA/ERh8=","H":"BOyfKc8aowVjOK2qYf0kdeuLkPeqbjDnjDFGIj/2n7O+qHIvqKx3A07Oa92tP5DkcS5AL/tipXDIBJvVWvcvudk=","HArray":["BBhLQicdsIUgigmIW4l6Xi1jBkFFXEtm2wuvydoZCjZZdlDZt82pXtOI+vPQbd5iawQPX6u4HUrhEisqwhx5P0A=","BLTOIUViwoVJTAzCKIo2FgliIfK7w3jG6wjwf3LVkdsMYJ2ZiEJDA7YC1GwsVgutYdxrwOkAY+wnoh9j+AYF/rQ=","BH2vi5G9ftykpOJARMlziZuZKXSx5YiP131HpwWwsgFAquSpsNTRWHsjk4nc0lcQKf6x9E+7UUQpAPwDpyrh7Xc=","BHQBZMqVRvxQHtnC4tqfh9Qc632IfSCPCBDsePyLzD1nXOf/qJWrCpfsZ3T3PaRm/U30LSgnb1nsuXI9nDuTFsM="]}` + // mockPlaintextThresholdMasterKey := `{"Group":"P256","S":"MTpFRrh8F5mih+92W0l51ZVZIiGJgpHNUXb4vzkzv8A="}` + // mockPlaintextThresholdKeyShares := [][]byte{ + // []byte(`{"PublicKey":{"Group":"P256","G_bar":"BAnzIguQNKnA37Zh0b3Z3K5CcvxHjzfTIytt37ZgNQLaTeuiq9rrVhz+yaZcvNQ9EYw978KmmYOq6qd0NA/ERh8=","H":"BOyfKc8aowVjOK2qYf0kdeuLkPeqbjDnjDFGIj/2n7O+qHIvqKx3A07Oa92tP5DkcS5AL/tipXDIBJvVWvcvudk=","HArray":["BBhLQicdsIUgigmIW4l6Xi1jBkFFXEtm2wuvydoZCjZZdlDZt82pXtOI+vPQbd5iawQPX6u4HUrhEisqwhx5P0A=","BLTOIUViwoVJTAzCKIo2FgliIfK7w3jG6wjwf3LVkdsMYJ2ZiEJDA7YC1GwsVgutYdxrwOkAY+wnoh9j+AYF/rQ=","BH2vi5G9ftykpOJARMlziZuZKXSx5YiP131HpwWwsgFAquSpsNTRWHsjk4nc0lcQKf6x9E+7UUQpAPwDpyrh7Xc=","BHQBZMqVRvxQHtnC4tqfh9Qc632IfSCPCBDsePyLzD1nXOf/qJWrCpfsZ3T3PaRm/U30LSgnb1nsuXI9nDuTFsM="]},"PrivateKeyShare":{"Group":"P256","Index":0,"V":"Jzuh+h/jgm0HIp6iKVJxc/vCUOz7Ea+Y0twvRzJDheg="}}`), + // []byte(`{"PublicKey":{"Group":"P256","G_bar":"BAnzIguQNKnA37Zh0b3Z3K5CcvxHjzfTIytt37ZgNQLaTeuiq9rrVhz+yaZcvNQ9EYw978KmmYOq6qd0NA/ERh8=","H":"BOyfKc8aowVjOK2qYf0kdeuLkPeqbjDnjDFGIj/2n7O+qHIvqKx3A07Oa92tP5DkcS5AL/tipXDIBJvVWvcvudk=","HArray":["BBhLQicdsIUgigmIW4l6Xi1jBkFFXEtm2wuvydoZCjZZdlDZt82pXtOI+vPQbd5iawQPX6u4HUrhEisqwhx5P0A=","BLTOIUViwoVJTAzCKIo2FgliIfK7w3jG6wjwf3LVkdsMYJ2ZiEJDA7YC1GwsVgutYdxrwOkAY+wnoh9j+AYF/rQ=","BH2vi5G9ftykpOJARMlziZuZKXSx5YiP131HpwWwsgFAquSpsNTRWHsjk4nc0lcQKf6x9E+7UUQpAPwDpyrh7Xc=","BHQBZMqVRvxQHtnC4tqfh9Qc632IfSCPCBDsePyLzD1nXOf/qJWrCpfsZ3T3PaRm/U30LSgnb1nsuXI9nDuTFsM="]},"PrivateKeyShare":{"Group":"P256","Index":1,"V":"HTz+rYdK7UBrvU3N91tpEmIrf7hsoM1kVEFlzytTTBA="}}`), + // []byte(`{"PublicKey":{"Group":"P256","G_bar":"BAnzIguQNKnA37Zh0b3Z3K5CcvxHjzfTIytt37ZgNQLaTeuiq9rrVhz+yaZcvNQ9EYw978KmmYOq6qd0NA/ERh8=","H":"BOyfKc8aowVjOK2qYf0kdeuLkPeqbjDnjDFGIj/2n7O+qHIvqKx3A07Oa92tP5DkcS5AL/tipXDIBJvVWvcvudk=","HArray":["BBhLQicdsIUgigmIW4l6Xi1jBkFFXEtm2wuvydoZCjZZdlDZt82pXtOI+vPQbd5iawQPX6u4HUrhEisqwhx5P0A=","BLTOIUViwoVJTAzCKIo2FgliIfK7w3jG6wjwf3LVkdsMYJ2ZiEJDA7YC1GwsVgutYdxrwOkAY+wnoh9j+AYF/rQ=","BH2vi5G9ftykpOJARMlziZuZKXSx5YiP131HpwWwsgFAquSpsNTRWHsjk4nc0lcQKf6x9E+7UUQpAPwDpyrh7Xc=","BHQBZMqVRvxQHtnC4tqfh9Qc632IfSCPCBDsePyLzD1nXOf/qJWrCpfsZ3T3PaRm/U30LSgnb1nsuXI9nDuTFsM="]},"PrivateKeyShare":{"Group":"P256","Index":2,"V":"Ez5bYO6yWBPQV/z5xWRgsMiUroPeL+sv1aacVyRjEjg="}}`), + // []byte(`{"PublicKey":{"Group":"P256","G_bar":"BAnzIguQNKnA37Zh0b3Z3K5CcvxHjzfTIytt37ZgNQLaTeuiq9rrVhz+yaZcvNQ9EYw978KmmYOq6qd0NA/ERh8=","H":"BOyfKc8aowVjOK2qYf0kdeuLkPeqbjDnjDFGIj/2n7O+qHIvqKx3A07Oa92tP5DkcS5AL/tipXDIBJvVWvcvudk=","HArray":["BBhLQicdsIUgigmIW4l6Xi1jBkFFXEtm2wuvydoZCjZZdlDZt82pXtOI+vPQbd5iawQPX6u4HUrhEisqwhx5P0A=","BLTOIUViwoVJTAzCKIo2FgliIfK7w3jG6wjwf3LVkdsMYJ2ZiEJDA7YC1GwsVgutYdxrwOkAY+wnoh9j+AYF/rQ=","BH2vi5G9ftykpOJARMlziZuZKXSx5YiP131HpwWwsgFAquSpsNTRWHsjk4nc0lcQKf6x9E+7UUQpAPwDpyrh7Xc=","BHQBZMqVRvxQHtnC4tqfh9Qc632IfSCPCBDsePyLzD1nXOf/qJWrCpfsZ3T3PaRm/U30LSgnb1nsuXI9nDuTFsM="]},"PrivateKeyShare":{"Group":"P256","Index":3,"V":"CT+4FFYZwuc08qwlk21YTy793U9Pvwj7VwvS3x1y2GA="}}`), + // } + // Since the threshold public keys are encrypted with each node's public OCR2 offchain config key, the OCR2 offchain config key must be known in advance instead of regenerated every time. + ExportedOcr2Keystores = [][]byte{ + []byte(`{"keyType":"OCR2","chainType":"evm","id":"d8d0363c218526d809b8570257e7822bfde559ed532fa976118fc2d994155e55","onchainPublicKey":"e1deb8516367e7715da19134d58528778d6aff04","offchainPublicKey":"91c021d592ae5949eafaf4b74d622b13ac5784db7de9591755c3d3d2ed7a19b4","configPublicKey":"677c7ba4487eabea0f3341a095b94b6f56a1df97a165e49b8b0a7864c0f66077","crypto":{"cipher":"aes-128-ctr","ciphertext":"6d111479822c5b6e5a2b5ebde4f7ab6e6f7809e07bf6451d416f5e3082833a3b34e9aa820658c03487a3bdde5cbbfff3198a951f36a5ef91d1274211273f7af2e3e17e258530c25aebb1099395ecc1d96e567adefa727f06fcb61d0df2829737998bdd959759c62342fbd74a3f231535bb413d9c4479e33ff294d66bfc90d2b6db9d12250ef8daab309bb914373a76da24e82dce5b0da428a19efc1cdda48719f898a91f21b4fc89458d126d1023ffc0efcc8cf3e0ee249bfa0b1b55fa151d5c2630be21be2ea27964b82816ee72885f7bc59d7d4157d5333ab8324cc65a78581437a8600e8c044fabe80ae76cbe39ae332c5c0f85ac1090b13a1525caca40459082e9ca762714e50543e6aa907b7286eafc93a51c2d67265393d86ad83d431bf068aa006c36f63356","cipherparams":{"iv":"11b7b2f3c33dfff2a3125fdb10048601"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"a3c5952aa41720097a50fea6957c994a9c39cb1dbe7bd59cff5b9354633836c1"},"mac":"df6d0d0b5f2cc95f1e1d4c9894f8487c56b814ca55d10e520d0c44e91d699012"}}`), + []byte(`{"keyType":"OCR2","chainType":"evm","id":"03a0713ffb5b506cbf12bd59fd7023c9dc3f1f56d4d2f9ed564f0361c3aa1119","onchainPublicKey":"3203c6923adc7ecff671e00632eb350fa354e957","offchainPublicKey":"c9aea40c0e5f13d9704d3051faa01718f1d4aa1419d3e6199f7deacee56a596e","configPublicKey":"23ccc8500fa3af447c7a8b3aa4e41ad7b34f6585bdc3e8c847c7ff12a8caa818","crypto":{"cipher":"aes-128-ctr","ciphertext":"5aa58bfd17209a4c9da966a1b175c74137536531ada19507434d6dbf146d71516c0ed2b52b5e664e69c4286a56f38be322206a33eacfaa854be313bade3b5876e5b40aec5be4dca761749b535ad9cd1575897d6736eb96fb9e118eb696c44d111b81757bd73a6d33c170febaf57705590f279625ec536d1656eac96a953de915693a9cd040a5d5948f3b288ebdbb178c955b5f0faa432d68e744f39dc621cbc4c5853627addc3dd2ed5e1ba4a514278d56d0cdef2e57688ad0752f3077656f16fa3144c33f3ed96ebdeb3009139f327d6c12d344a047147e71a95d5b08582bd8ee86c86e308d81f16357c79b50a48041994d98490a4b9d965525187901a2862d41bcc6822046f99ad481ae104aa75b816c5abac876bce0b07b2b73bb459830a5bc70f20c34b87ed6fc","cipherparams":{"iv":"d8fcf50fff54dfa082d40c0de90b6ab4"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"64ba7faf39b49c46d5360109ef23e3f4198a3f402cbb975686fe4b52cc7c0b1f"},"mac":"09520656cc08b5743f95d386a5b7c3d164ecaa2002a0b83f2cd8d44c97d99b8a"}}`), + []byte(`{"keyType":"OCR2","chainType":"evm","id":"ace8972c126601fdc5f9ed42c909a57d2a19dff2be7ce341913c01a4921c829f","onchainPublicKey":"d8699a99d2a3ab4ccfc61bc7df72b30605a05b7c","offchainPublicKey":"9c1fa3ce9d355a73ce2dcdece70e7314af8da13f5ca30ee251dbed201dc904ed","configPublicKey":"9a2b9087af305c187d1292842072e50d2c4e98cb4e26c6328021fdd992e78926","crypto":{"cipher":"aes-128-ctr","ciphertext":"c7d2da9c60779ad33dce870b1f8af94082dfdfe51d4f04f27cf505bba21b17e287c685880614e22df92ce7549230e8b1a327de0686d892f5c4dda546132b044bde2a58d60bf335931b78e42342e2578e54111592cad4301d22c192c87809e3cef6e6dd1a30a7a208c78461154d856213466c1afad4023001c1b45fdf7804c6a9a4eb64ac1ea59775a1e694c79fc492b4d7422cd777778549c817e735e8e5e5f45aea9c9e9a5a7ff1c71b7879af120a4aa467a3ac0cdb1ba04f893283cad533c4ac9723969a64bbfd69bb7ef88d88d8d96c5293fee35c14a00a8e6c0cb2e7cfa918d05ec9e092e4fdad2a41edf7a1cb9e1a1a12ec6fbb594eb1e07d5dbb79eda390b826aa7ca8ccfda1cee9c125eb8a1d07d9e899cf6e9c1ec0fd40d83a50656674cc5c9d0a88710c65","cipherparams":{"iv":"5b6fca38ff8c03e38fb6f56f9d90fe8d"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"6bfc5f03e8bd10efea3808d025c5769a49ade8bee6c608d713a6bf3f940a37ba"},"mac":"e1e4bcbe67a80d73ee12c8080ddd99bba9ca8774349a9cb5c5b9821983e460ca"}}`), + []byte(`{"keyType":"OCR2","chainType":"evm","id":"5b09af3b5a5a420436fa206eb266b6a4d796406030d022d4beaf6609e9da9274","onchainPublicKey":"c27e7957c243e061cc17d8f30e8f894280235499","offchainPublicKey":"f6c2fd84bb73252d49852b50420f5983b488609d9f73ef601e7641df24794dec","configPublicKey":"45a96a32b7339cd22da1a1c42c2ee71e698dd7dfbe7fd997949f1cdb3f2ba518","crypto":{"cipher":"aes-128-ctr","ciphertext":"f842a872d8a0033fc9b91673a1c2f095eda18c4716b53c743d04b82710f2bfa2c950356e50ea085b05975c36176a3dd2b10901508f297e3ff966cde02cf6ba077eebdb8708117c4c9f4498358e74fdc98bc1130a32bfa6bf8f46d9ea77d4e56952cc1ccbbf2bde05a48acf13fbb238675d640b6a74af70dfbaa63cad57108480238ce946a5af5f60b09794ff5b40bde982f32d7b48f5e9a51830a46cce1cf2f9a0c0ec220c062eb90602b2abbaf2a6a2da2d8b3dbc211db17d599789654a2a26f1fad52e22d7eed91365ce7d964731936307c39e66bd9a9ffc999f3174317786077ab83224d06e025b7de5974293b96eabcb3b9ffb17d2d0b49743cd7b4f1b222e32be3f32b9d2177b1c20cb7d56c10ae0789f8586ee99ccd9ac8b626df058b454686b2d8a663635b8","cipherparams":{"iv":"21a5417a99f37507bc2fbdda83c5856f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"c98ac207e897658d7c42029c2858ba29f03abe9298745b0b909377085806895a"},"mac":"dc1ef1c284e1d001b63f1d545e308c4ef194757605a4b57b8c565c5c40e64146"}}`), + } + // Threshold key shares have been generated with the Functions admin tooling (https://github.com/goplugin/functions-admin-tooling/tree/main/threshold_key_manager) + MockThresholdKeyShares = []string{ + "40e99151aaa2002631cfdac741a0f206b9ebc492a2c5bd56deac64118148df6e3829a6f81b6df54694ca241f5bc12fd2b58bce9106767f4adbf7250e0a033386b56eb93159c3cf14748b6a0d1996a51039ce9ff98acaba758913fa2d671e86b909edbe27bdf441b68be2c2debbe8a29d814a04accd996b4df0fa8e0cea65ef47922c4aca8132fe4f2d6def9c4683b031267f60ccf896b6bf6b1957786deafebb5bc1663778ee893511306dda7833d01efe0d63224d0da0cea6d728953793e6b3e97f81cb64f80ed4ff4f3c349b90f11290adb0eff07f8ccb9b97d098903ce194f33f284d38832e12902354c3cda72189b3ac31b8206575391c1618dee6530b99fc60d667052e0065365842176b6b5d9b779be0be4f7fa27115d65e08d01f13db695581c1be4a1f96b980cba45bd528aaee23809fa9a601a229c7018eb6755f43a0d38125a872fcf8e109ca92abc55348cccc4f83beae8412425570b8aa9b6526a9ddd05a41e5c6d6865cf91ff1912490d67e6a99c3a91db433120b13768affef6b0cdeb33d085078fa27b90ef2663c69fcf9f3dad9a1fb67b9605f035b2cebf7808801e9ccd3eb83015aaefecf1fa97674923219c543accdc23d5f624cdbd673eba5f29cc66c647e1fe454e0c73efe8ff6605f78243e01aa42c14241f890388d6d35e3071ee89130f62317b8d624fc2b7c270769cf8e5c054cf3ed4163917f0af7372ab0eafdc4dd4faaf3cc30d406041404eae2ba942c800e4d9726e8e0d27e1488c5aaf47d977665c86d8574c17b69c8b31783ec479929e6b858ef57b072bcfaa3a747362235e142e9b73f19192d8878b832ff25b8cf29f0b5c130cb1d505ec1830a0901c0ac1f9b5542db33a9c39afc6ffa68d32a91dbb525cec9536647020cd22925baf63d173cd9b377fbcb509efcbf7830df459e080f5794186d628be938e9a53d3ea3182eb328d9956905dc6c22061c76118b208cdd7f063522d81ab4e404ef6d4a53b2099d2c94b8c05526b8367cfff6f08340bbe53cf2fbe6e7fae3b7062d27722085", + "9956a87c447eb15b82906742ef84980ca1dc6846b2234468ed7399af5392243b14226ab2a6ff6220776d1b5a591cacfde0bd583f48683d4f38186300f919681aa0a886e7eddc556e1bb434c18994df214a5b6fe4bfcb09ae9fc5dba4cb7e027ce01121dbad0621fe35311e1015c7d4d006ababd5c7377a0f70aecafaf3f709c0571ca2dd89b6cd83867bd299e8ce01e04ecafc1230ab0aeb2812a693fadcba7b479a00da357e75876d0cd2a3d85774e056d63436623ff400c9fd76446f5b7fd90902bf4d4268fb5775e326a887bd59136692bb5b0400790dd7908f9d5c7e422c76831ac3876e67a8b6d66ceb49c9c0835660bbd90a1e18ab8486dc464b6fd4044c580d3082842ab655169cf46abe25ad4de04d0bdefc74fee9c2009b43eea48f976a449ceb805993383b06ddfaf5a3df112a1c5e72ea121b9fbeafe8b7666500abeb52f2bad6035295b66994c0d239ced38f32af4df161b7bd66886703c050531173ae6ace942a311a0077b601c025bb59a78b43558c09ae55a6ce70d408b7e0ae439542971b33fdf14a7c5a36fed8d310e8cd08fe7c6b45d1bffd351bad2fd1cfc8d964f8ec39db82a7fd4095771f6d5e84516598364bd0532d3b8ccd660fa73bc1e5840087eeac18c7fd52611deac5aa50a6a033d2aecd79ec8bba8045615ff4d7b77e0d087110b888426d138107236ed4a6a6b6a5b51e54149e2272896d0180a46cde8955a8adeb969c35c5b58a0958ce955f2634723d8bf38c2d1c3efe0ce2494b00947d3958ec3b5b9f5445acad30296a34060bd688c7b91e818f49ddfa560541682ce0ff6d245b9df5adee989ea6b9eedcf58b3d47fdc64e8c046efaad51e169a81fc22419be8666897f18d7c913d806764714c9a735cce3698da8ecb3b16c1a88f6ef2b4a52686d4d1b5cfc5859422a1b39f7fe46691ccd28050a8edf040f438b407bfa4cc48641155e3c7531301d5192148ab2ccba064afeed1ae8723702ea9793317a1989a2601a958fda5bb23222a983842a31553930c4364b661e03376fff2471ae", + "4c19996b1ed254aba3f5d36a24a3a24a008cc864562610e15523087c2c05e63c57bc6c386d4f1b3cb53eb10ae26ab594a6693fa8ee5783ab4b4b040a0e4f27beda68405b72cbcbeee762114b23d1fe69aaf72bc81f70e76251df20b83fe6b82240fbe64be3b1a238bf3c5a8b7c8bac945fd25348b5618ff2409940f64bd86afb64695706c57340e72741af4e0d2d47407b041c57aac7ba9d35ce2316d6bff28ee5c22b007ec2c9b5914cb5baaa9e2ce6aafeb976c681dc08e1eb28514a7b7adfa0536ced06e3e23e9a14bad3f68b836442a952c39bfcaabdae0d3226dde590f3ba927b49a0c6f011b02abd14e5db8bf355432e3d3286c9aa5fcf2b6620cda13d56753a420555d0300de04c008d6cb9b62e4c7c44ea563445cccfc8266b06192c2981ce5b1157b6c16ca38e6b7036dd2e21e75646bbffc0be2f6c23257666a5679b8e35ac4382e31c05e4ef1170293617e6f77bd32b57f5ac617a7cbafa345c2137d8a7b70bf4a3b5c4010d73e7f064a9e5e51eaecf600bb837e0a648359aa4857b0875f14a62c648e7a7a13fac7e466bdd9ac89cf7f1f46081f5d107a6fffa5a0d8c70aa5aeaed3df35e9a5b33cabb98b22bf62d84cf4e57d7683457dbff3542a9299f9acd291225e4e8fc1255b305ca0e1404707053e50d55f69d8b709d9350db4f6f36ecf63037c6e00073df67b610c322e79ae4616314dc47d296b4570b659c21d659375fd1006af4d42838eb20f1682ffd19cf88b603790005b8c5ae4ab9c0415bcb0c8ce4a04d1f7d18fef5e16e9061aa56bc596bcdeae74ccdd1c95ab1607280602f75ddbb3a25cbe9af99197b0dab671a2e1069340cefcefef75e24e222c9e3d1563634eb3f3a8319bfd6f95b3a995d9dcad5a9aa2aea6bf46e6253080d8e4ec3b918d0012fe500edfba67e49d0520be57e7414629bb40e64312fb1d3e26bdef31cc8ec2426f71f91cfee0d3cd9390dceafe615106fd56703b85b377b812b8a6c24c9fc5d0c68464565d9b421a4cd296f00ddd7a062ea16c56232a4d2a69c2817bfa882", + "ba1edea16bd7223b3a413af6b392662b9b01eb18f67d41defe1cc4190160cf12c462414df90299b0ca5fbe7e2cf11f902a23de8340d9a190f32de0199ce150f15c6ed658638fe33701e3cee11574865efe768ecefd53ba20833e1300d4cc84bf0291852031fe5ce6b7dbd1bfe46d6887e39b6466b6ef76cd5743349c4109176bd0e60074bb7d8ba1cf47609b628045ed64743be510d90a21b7022f88a08dcabd7bc600c26b228b94ee175268f8206df72f7a708c014d162231e9009af0acacfb115674eb6fbb873e0c47b7b66820c7999143da38a871610614c8fe45a0c0d83020ed7c1e3eb5ee14e3e3bf70917a7d7a96e356e52c3bb33607448f61f592142de315fd29a172ce686b028c47cfb936780e1beebbbd2fca706534060e8d7f2de973005229183cd5ceaef3ceb944ca98562f22deec110ff6993ff9512f9b8e1aac17c08fa0295315229596a87eb0935e9b28ff1aff4d71889fed76fa7021e3494155b3b34f28cf5e717b46351bd9b11b0ddceac9ff3bd0c962ac003f5062acdb50058d50546d71fbb50e21221addd61a36f9ef98611f77d1c3c3ecc344fcdd9008d37ebe15944375ab767b1f937cb647dea26d7c6c8f9832b4f8632febba8abb107c39ba85ce09719eb7d7607ca6d0cc491419f6d3863e5d6f446a3c291da01c16e58e65b4428f8e0c024df391e6864e879bfc749d9175654c04413cbe4a65a35cea5596d6dc2d81800b2458fc75bfec6fedbbba857c24a19a0e747dc1d089b9eebb9b5098b2bcd2720f52d28055611ceb7b26d0c4b2a20c3b3aa9da50872bd95ef8154d6fd28669ffb48792e234a6f8eba3e53b57d994b8efd594045e0b6afb764013c28a23934a1502d0a90fe853e0a95738f1cdff79c7ee4e91d57824cd5cea56e8a45d95ee0cb1fa8911ccbd1e14883873817109f9ca4d21be60e3966541d1e1aaa7de0a57a3437f8a24e41a941d20f03055cd74f4b3048d2d9da7406da46ea82b65fd7b18e9004474583ecc9a2759b12fdd770f47222ef2c02d42ed0d18837a3e259f009d1e", + } + // This has been generated using the Functions Toolkit (https://github.com/goplugin/functions-toolkit/blob/main/src/SecretsManager.ts) and decrypts to the JSON string `{"0x0":"qrvM"}` + DefaultThresholdSecretsHex = "0x7b225444483243747874223a2265794a48636d393163434936496c41794e5459694c434a44496a6f69533035305a5559325448593056553168543341766148637955584a4b65545a68626b3177527939794f464e78576a59356158646d636a6c4f535430694c434a4d59574a6c62434936496b464251554642515546425155464251554642515546425155464251554642515546425155464251554642515546425155464251554642515545394969776956534936496b4a45536c6c7a51334e7a623055334d6e6444574846474e557056634770585a573157596e565265544d796431526d4d32786c636c705a647a4671536e6c47627a5256615735744e6d773355456855546e6b7962324e746155686f626c51354d564a6a4e6e5230656c70766147644255326372545430694c434a5658324a6863694936496b4a4961544e69627a5a45536d396a4d324d344d6c46614d5852724c325645536b4a484d336c5a556d783555306834576d684954697472623264575a306f33546e4e456232314b5931646853544979616d63305657644f556c526e57465272655570325458706952306c4a617a466e534851314f4430694c434a46496a6f694d7a524956466c354d544e474b307836596e5a584e7a6c314d6d356c655574514e6b397a656e467859335253513239705a315534534652704e4430694c434a47496a6f69557a5132596d6c6952545a584b314176546d744252445677575459796148426862316c6c6330684853556869556c56614e303155556c6f345554306966513d3d222c2253796d43747874223a2253764237652f4a556a552b433358757873384e5378316967454e517759755051623730306a4a6144222c224e6f6e6365223a224d31714b557a6b306b77374767593538227d" +) diff --git a/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go new file mode 100644 index 00000000..37df66ba --- /dev/null +++ b/core/services/ocr2/plugins/functions/integration_tests/v1/internal/testutils.go @@ -0,0 +1,638 @@ +package testutils + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io" + "math/big" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/commontypes" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_allow_list" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_client_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + functionsConfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var nilOpts *bind.CallOpts + +func ptr[T any](v T) *T { return &v } + +var allowListPrivateKey = "0xae78c8b502571dba876742437f8bc78b689cf8518356c0921393d89caaf284ce" + +func SetOracleConfig(t *testing.T, b *backends.SimulatedBackend, owner *bind.TransactOpts, coordinatorContract *functions_coordinator.FunctionsCoordinator, oracles []confighelper2.OracleIdentityExtra, batchSize int, functionsPluginConfig *functionsConfig.ReportingPluginConfig) { + S := make([]int, len(oracles)) + for i := 0; i < len(S); i++ { + S[i] = 1 + } + + reportingPluginConfigBytes, err := functionsConfig.EncodeReportingPluginConfig(&functionsConfig.ReportingPluginConfigWrapper{ + Config: functionsPluginConfig, + }) + require.NoError(t, err) + + signersKeys, transmittersAccounts, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 2*time.Second, // deltaProgress + 1*time.Second, // deltaResend + 1*time.Second, // deltaRound + 500*time.Millisecond, // deltaGrace + 2*time.Second, // deltaStage + 5, // RMax (maxRounds) + S, // S (schedule of randomized transmission order) + oracles, + reportingPluginConfigBytes, + 200*time.Millisecond, // maxDurationQuery + 200*time.Millisecond, // maxDurationObservation + 200*time.Millisecond, // maxDurationReport + 200*time.Millisecond, // maxDurationAccept + 200*time.Millisecond, // maxDurationTransmit + 1, // f (max faulty oracles) + nil, // empty onChain config + ) + + var signers []common.Address + var transmitters []common.Address + for i := range signersKeys { + signers = append(signers, common.BytesToAddress(signersKeys[i])) + transmitters = append(transmitters, common.HexToAddress(string(transmittersAccounts[i]))) + } + require.NoError(t, err) + + _, err = coordinatorContract.SetConfig( + owner, + signers, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err) + CommitWithFinality(b) +} + +func CreateAndFundSubscriptions(t *testing.T, b *backends.SimulatedBackend, owner *bind.TransactOpts, linkToken *link_token_interface.LinkToken, routerContractAddress common.Address, routerContract *functions_router.FunctionsRouter, clientContracts []deployedClientContract, allowListContract *functions_allow_list.TermsOfServiceAllowList) (subscriptionId uint64) { + allowed, err := allowListContract.HasAccess(nilOpts, owner.From, []byte{}) + require.NoError(t, err) + if !allowed { + message, err2 := allowListContract.GetMessage(nilOpts, owner.From, owner.From) + require.NoError(t, err2) + privateKey, err2 := crypto.HexToECDSA(allowListPrivateKey[2:]) + require.NoError(t, err2) + flatSignature, err2 := crypto.Sign(message[:], privateKey) + require.NoError(t, err2) + var r [32]byte + copy(r[:], flatSignature[:32]) + var s [32]byte + copy(s[:], flatSignature[32:64]) + v := flatSignature[65] + _, err2 = allowListContract.AcceptTermsOfService(owner, owner.From, owner.From, r, s, v) + require.NoError(t, err2) + } + + _, err = routerContract.CreateSubscription(owner) + require.NoError(t, err) + + subscriptionID := uint64(1) + + numContracts := len(clientContracts) + for i := 0; i < numContracts; i++ { + _, err = routerContract.AddConsumer(owner, subscriptionID, clientContracts[i].Address) + require.NoError(t, err) + } + + data, err := utils.ABIEncode(`[{"type":"uint64"}]`, subscriptionID) + require.NoError(t, err) + + amount := big.NewInt(0).Mul(big.NewInt(int64(numContracts)), big.NewInt(2e18)) // 2 PLI per client + _, err = linkToken.TransferAndCall(owner, routerContractAddress, amount, data) + require.NoError(t, err) + b.Commit() + + return subscriptionID +} + +const finalityDepth int = 4 + +func CommitWithFinality(b *backends.SimulatedBackend) { + for i := 0; i < finalityDepth; i++ { + b.Commit() + } +} + +type deployedClientContract struct { + Address common.Address + Contract *functions_client_example.FunctionsClientExample +} + +type Coordinator struct { + Address common.Address + Contract *functions_coordinator.FunctionsCoordinator +} + +func StartNewChainWithContracts(t *testing.T, nClients int) (*bind.TransactOpts, *backends.SimulatedBackend, *time.Ticker, Coordinator, Coordinator, []deployedClientContract, common.Address, *functions_router.FunctionsRouter, *link_token_interface.LinkToken, common.Address, *functions_allow_list.TermsOfServiceAllowList) { + owner := testutils.MustNewSimTransactor(t) + owner.GasPrice = big.NewInt(int64(DefaultGasPrice)) + sb := new(big.Int) + sb, _ = sb.SetString("100000000000000000000", 10) // 1 eth + genesisData := core.GenesisAlloc{owner.From: {Balance: sb}} + gasLimit := ethconfig.Defaults.Miner.GasCeil * 2 // 60 M blocks + b := backends.NewSimulatedBackend(genesisData, gasLimit) + b.Commit() + + // Deploy PLI token + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(owner, b) + require.NoError(t, err) + + // Deploy mock PLI/ETH price feed + linkEthFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(owner, b, 18, big.NewInt(5_000_000_000_000_000)) + require.NoError(t, err) + + // Deploy Router contract + handleOracleFulfillmentSelectorSlice, err := hex.DecodeString("0ca76175") + require.NoError(t, err) + var handleOracleFulfillmentSelector [4]byte + copy(handleOracleFulfillmentSelector[:], handleOracleFulfillmentSelectorSlice[:4]) + functionsRouterConfig := functions_router.FunctionsRouterConfig{ + MaxConsumersPerSubscription: uint16(100), + AdminFee: big.NewInt(0), + HandleOracleFulfillmentSelector: handleOracleFulfillmentSelector, + MaxCallbackGasLimits: []uint32{300_000, 500_000, 1_000_000}, + GasForCallExactCheck: 5000, + SubscriptionDepositMinimumRequests: 10, + SubscriptionDepositJuels: big.NewInt(9 * 1e18), // 9 PLI + } + routerAddress, _, routerContract, err := functions_router.DeployFunctionsRouter(owner, b, linkAddr, functionsRouterConfig) + require.NoError(t, err) + + // Deploy Allow List contract + privateKey, err := crypto.HexToECDSA(allowListPrivateKey[2:]) + proofSignerPublicKey := crypto.PubkeyToAddress(privateKey.PublicKey) + require.NoError(t, err) + allowListConfig := functions_allow_list.TermsOfServiceAllowListConfig{ + Enabled: false, // TODO: true + SignerPublicKey: proofSignerPublicKey, + } + var initialAllowedSenders []common.Address + var initialBlockedSenders []common.Address + allowListAddress, _, allowListContract, err := functions_allow_list.DeployTermsOfServiceAllowList(owner, b, allowListConfig, initialAllowedSenders, initialBlockedSenders) + require.NoError(t, err) + + // Deploy Coordinator contract (matches updateConfig() in FunctionsBilling.sol) + coordinatorConfig := functions_coordinator.FunctionsBillingConfig{ + FeedStalenessSeconds: uint32(86_400), + GasOverheadBeforeCallback: uint32(325_000), + GasOverheadAfterCallback: uint32(50_000), + RequestTimeoutSeconds: uint32(300), + DonFee: big.NewInt(0), + MaxSupportedRequestDataVersion: uint16(1), + FulfillmentGasPriceOverEstimationBP: uint32(1_000), + FallbackNativePerUnitLink: big.NewInt(5_000_000_000_000_000), + MinimumEstimateGasPriceWei: big.NewInt(1_000_000_000), + } + require.NoError(t, err) + coordinatorAddress, _, coordinatorContract, err := functions_coordinator.DeployFunctionsCoordinator(owner, b, routerAddress, coordinatorConfig, linkEthFeedAddr) + require.NoError(t, err) + proposalAddress, _, proposalContract, err := functions_coordinator.DeployFunctionsCoordinator(owner, b, routerAddress, coordinatorConfig, linkEthFeedAddr) + require.NoError(t, err) + + // Deploy Client contracts + clientContracts := []deployedClientContract{} + for i := 0; i < nClients; i++ { + clientContractAddress, _, clientContract, err := functions_client_example.DeployFunctionsClientExample(owner, b, routerAddress) + require.NoError(t, err) + clientContracts = append(clientContracts, deployedClientContract{ + Address: clientContractAddress, + Contract: clientContract, + }) + if i%10 == 0 { + // Max 10 requests per block + b.Commit() + } + } + + CommitWithFinality(b) + ticker := time.NewTicker(1 * time.Second) + go func() { + for range ticker.C { + b.Commit() + } + }() + + active := Coordinator{ + Contract: coordinatorContract, + Address: coordinatorAddress, + } + proposed := Coordinator{ + Contract: proposalContract, + Address: proposalAddress, + } + return owner, b, ticker, active, proposed, clientContracts, routerAddress, routerContract, linkToken, allowListAddress, allowListContract +} + +func SetupRouterRoutes(t *testing.T, b *backends.SimulatedBackend, owner *bind.TransactOpts, routerContract *functions_router.FunctionsRouter, coordinatorAddress common.Address, proposedCoordinatorAddress common.Address, allowListAddress common.Address) { + allowListId, err := routerContract.GetAllowListId(nilOpts) + require.NoError(t, err) + var donId [32]byte + copy(donId[:], DefaultDONId) + proposedContractSetIds := []([32]byte){allowListId, donId} + proposedContractSetAddresses := []common.Address{allowListAddress, coordinatorAddress} + _, err = routerContract.ProposeContractsUpdate(owner, proposedContractSetIds, proposedContractSetAddresses) + require.NoError(t, err) + + b.Commit() + + _, err = routerContract.UpdateContracts(owner) + require.NoError(t, err) + b.Commit() + + // prepare next coordinator + proposedContractSetIds = []([32]byte){donId} + proposedContractSetAddresses = []common.Address{proposedCoordinatorAddress} + _, err = routerContract.ProposeContractsUpdate(owner, proposedContractSetIds, proposedContractSetAddresses) + require.NoError(t, err) + b.Commit() +} + +type Node struct { + App *cltest.TestApplication + PeerID string + Transmitter common.Address + Keybundle ocr2key.KeyBundle + OracleIdentity confighelper2.OracleIdentityExtra +} + +func StartNewNode( + t *testing.T, + owner *bind.TransactOpts, + port int, + b *backends.SimulatedBackend, + maxGas uint32, + p2pV2Bootstrappers []commontypes.BootstrapperLocator, + ocr2Keystore []byte, + thresholdKeyShare string, +) *Node { + p2pKey := keystest.NewP2PKeyV2(t) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = ptr(true) + + c.Feature.LogPoller = ptr(true) + + c.OCR.Enabled = ptr(false) + c.OCR2.Enabled = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", port)} + if len(p2pV2Bootstrappers) > 0 { + c.P2P.V2.DefaultBootstrappers = &p2pV2Bootstrappers + } + + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + c.EVM[0].Transactions.ForwardersEnabled = ptr(false) + c.EVM[0].GasEstimator.LimitDefault = ptr(maxGas) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + c.EVM[0].GasEstimator.PriceDefault = assets.NewWei(big.NewInt(int64(DefaultGasPrice))) + + if len(thresholdKeyShare) > 0 { + s.Threshold.ThresholdKeyShare = models.NewSecret(thresholdKeyShare) + } + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b, p2pKey) + + sendingKeys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + transmitter := sendingKeys[0].Address + + // fund the transmitter address + n, err := b.NonceAt(testutils.Context(t), owner.From, nil) + require.NoError(t, err) + + tx := cltest.NewLegacyTransaction( + n, transmitter, + assets.Ether(1).ToInt(), + 21000, + assets.GWei(1).ToInt(), + nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + + var kb ocr2key.KeyBundle + if ocr2Keystore != nil { + kb, err = app.GetKeyStore().OCR2().Import(ocr2Keystore, "testPassword") + } else { + kb, err = app.GetKeyStore().OCR2().Create("evm") + } + require.NoError(t, err) + + err = app.Start(testutils.Context(t)) + require.NoError(t, err) + + return &Node{ + App: app, + PeerID: p2pKey.PeerID().Raw(), + Transmitter: transmitter, + Keybundle: kb, + OracleIdentity: confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: kb.PublicKey(), + TransmitAccount: ocrtypes2.Account(transmitter.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: p2pKey.PeerID().Raw(), + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }, + } +} + +func AddBootstrapJob(t *testing.T, app *cltest.TestApplication, contractAddress common.Address) job.Job { + job, err := ocrbootstrap.ValidatedBootstrapSpecToml(fmt.Sprintf(` + type = "bootstrap" + name = "functions-bootstrap" + schemaVersion = 1 + relay = "evm" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "1s" + contractID = "%s" + + [relayConfig] + chainID = 1337 + fromBlock = 1 + donID = "%s" + contractVersion = 1 + contractUpdateCheckFrequencySec = 1 + + `, contractAddress, DefaultDONId)) + require.NoError(t, err) + err = app.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) + return job +} + +func AddOCR2Job(t *testing.T, app *cltest.TestApplication, contractAddress common.Address, keyBundleID string, transmitter common.Address, bridgeURL string) job.Job { + u, err := url.Parse(bridgeURL) + require.NoError(t, err) + require.NoError(t, app.BridgeORM().CreateBridgeType(&bridges.BridgeType{ + Name: "ea_bridge", + URL: models.WebURL(*u), + })) + job, err := validate.ValidatedOracleSpecToml(app.Config.OCR2(), app.Config.Insecure(), fmt.Sprintf(` + type = "offchainreporting2" + name = "functions-node" + schemaVersion = 1 + relay = "evm" + contractID = "%s" + ocrKeyBundleID = "%s" + transmitterID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "1s" + pluginType = "functions" + observationSource = """ + run_computation [type="bridge" name="ea_bridge" requestData="{\\"note\\": \\"observationSource is unused but the bridge is required\\"}"] + run_computation + """ + + [relayConfig] + chainID = 1337 + fromBlock = 1 + + [pluginConfig] + donID = "%s" + contractVersion = 1 + minIncomingConfirmations = 3 + requestTimeoutSec = 300 + requestTimeoutCheckFrequencySec = 10 + requestTimeoutBatchLookupSize = 20 + listenerEventHandlerTimeoutSec = 120 + listenerEventsCheckFrequencyMillis = 1000 + maxRequestSizeBytes = 30720 + contractUpdateCheckFrequencySec = 1 + + [pluginConfig.decryptionQueueConfig] + completedCacheTimeoutSec = 300 + maxCiphertextBytes = 10_000 + maxCiphertextIdLength = 100 + maxQueueLength = 100 + decryptRequestTimeoutSec = 100 + + [pluginConfig.s4Constraints] + maxPayloadSizeBytes = 10_1000 + maxSlotsPerUser = 10 + `, contractAddress, keyBundleID, transmitter, DefaultDONId)) + require.NoError(t, err) + err = app.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) + return job +} + +func StartNewMockEA(t *testing.T) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + var jsonMap map[string]any + require.NoError(t, json.Unmarshal(b, &jsonMap)) + var responsePayload []byte + if jsonMap["endpoint"].(string) == "lambda" { + responsePayload = mockEALambdaExecutionResponse(t, jsonMap) + } else if jsonMap["endpoint"].(string) == "fetcher" { + responsePayload = mockEASecretsFetchResponse(t, jsonMap) + } else { + require.Fail(t, "unknown external adapter endpoint '%s'", jsonMap["endpoint"].(string)) + } + res.WriteHeader(http.StatusOK) + _, err = res.Write(responsePayload) + require.NoError(t, err) + })) +} + +func mockEALambdaExecutionResponse(t *testing.T, request map[string]any) []byte { + data := request["data"].(map[string]any) + require.Equal(t, functions.LanguageJavaScript, int(data["language"].(float64))) + require.Equal(t, functions.LocationInline, int(data["codeLocation"].(float64))) + if len(request["nodeProvidedSecrets"].(string)) > 0 { + require.Equal(t, functions.LocationRemote, int(data["secretsLocation"].(float64))) + require.Equal(t, fmt.Sprintf(`{"0x0":"%s"}`, DefaultSecretsBase64), request["nodeProvidedSecrets"].(string)) + } + args := data["args"].([]interface{}) + require.Equal(t, 2, len(args)) + require.Equal(t, DefaultArg1, args[0].(string)) + require.Equal(t, DefaultArg2, args[1].(string)) + source := data["source"].(string) + // prepend "0xab" to source and return as result + return []byte(fmt.Sprintf(`{"result": "success", "statusCode": 200, "data": {"result": "0xab%s", "error": ""}}`, source)) +} + +func mockEASecretsFetchResponse(t *testing.T, request map[string]any) []byte { + data := request["data"].(map[string]any) + require.Equal(t, "fetchThresholdEncryptedSecrets", data["requestType"]) + require.Equal(t, DefaultSecretsUrlsBase64, data["encryptedSecretsUrls"]) + return []byte(fmt.Sprintf(`{"result": "success", "statusCode": 200, "data": {"result": "%s", "error": ""}}`, DefaultThresholdSecretsHex)) +} + +// Mock EA prepends 0xab to source and user contract crops the answer to first 32 bytes +func GetExpectedResponse(source []byte) [32]byte { + var resp [32]byte + resp[0] = 0xab + for j := 0; j < 31; j++ { + if j >= len(source) { + break + } + resp[j+1] = source[j] + } + return resp +} + +func CreateFunctionsNodes( + t *testing.T, + owner *bind.TransactOpts, + b *backends.SimulatedBackend, + routerAddress common.Address, + nOracleNodes int, + maxGas int, + ocr2Keystores [][]byte, + thresholdKeyShares []string, +) (bootstrapNode *Node, oracleNodes []*cltest.TestApplication, oracleIdentites []confighelper2.OracleIdentityExtra) { + t.Helper() + + if len(thresholdKeyShares) != 0 && len(thresholdKeyShares) != nOracleNodes { + require.Fail(t, "thresholdKeyShares must be empty or have length equal to nOracleNodes") + } + if len(ocr2Keystores) != 0 && len(ocr2Keystores) != nOracleNodes { + require.Fail(t, "ocr2Keystores must be empty or have length equal to nOracleNodes") + } + if len(ocr2Keystores) != len(thresholdKeyShares) { + require.Fail(t, "ocr2Keystores and thresholdKeyShares must have the same length") + } + + bootstrapPort := freeport.GetOne(t) + bootstrapNode = StartNewNode(t, owner, bootstrapPort, b, uint32(maxGas), nil, nil, "") + AddBootstrapJob(t, bootstrapNode.App, routerAddress) + + // oracle nodes with jobs, bridges and mock EAs + ports := freeport.GetN(t, nOracleNodes) + for i := 0; i < nOracleNodes; i++ { + var thresholdKeyShare string + if len(thresholdKeyShares) == 0 { + thresholdKeyShare = "" + } else { + thresholdKeyShare = thresholdKeyShares[i] + } + var ocr2Keystore []byte + if len(ocr2Keystores) == 0 { + ocr2Keystore = nil + } else { + ocr2Keystore = ocr2Keystores[i] + } + oracleNode := StartNewNode(t, owner, ports[i], b, uint32(maxGas), []commontypes.BootstrapperLocator{ + {PeerID: bootstrapNode.PeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapPort)}}, + }, ocr2Keystore, thresholdKeyShare) + oracleNodes = append(oracleNodes, oracleNode.App) + oracleIdentites = append(oracleIdentites, oracleNode.OracleIdentity) + + ea := StartNewMockEA(t) + t.Cleanup(ea.Close) + + _ = AddOCR2Job(t, oracleNodes[i], routerAddress, oracleNode.Keybundle.ID(), oracleNode.Transmitter, ea.URL) + } + + return bootstrapNode, oracleNodes, oracleIdentites +} + +func ClientTestRequests( + t *testing.T, + owner *bind.TransactOpts, + b *backends.SimulatedBackend, + linkToken *link_token_interface.LinkToken, + routerAddress common.Address, + routerContract *functions_router.FunctionsRouter, + allowListContract *functions_allow_list.TermsOfServiceAllowList, + clientContracts []deployedClientContract, + requestLenBytes int, + expectedSecrets []byte, + subscriptionId uint64, + timeout time.Duration, +) { + t.Helper() + var donId [32]byte + copy(donId[:], []byte(DefaultDONId)) + // send requests + requestSources := make([][]byte, len(clientContracts)) + rnd := rand.New(rand.NewSource(666)) + for i, client := range clientContracts { + requestSources[i] = make([]byte, requestLenBytes) + for j := 0; j < requestLenBytes; j++ { + requestSources[i][j] = byte(rnd.Uint32() % 256) + } + _, err := client.Contract.SendRequest( + owner, + hex.EncodeToString(requestSources[i]), + expectedSecrets, + []string{DefaultArg1, DefaultArg2}, + subscriptionId, + donId, + ) + require.NoError(t, err) + } + CommitWithFinality(b) + + // validate that all client contracts got correct responses to their requests + var wg sync.WaitGroup + for i := 0; i < len(clientContracts); i++ { + ic := i + wg.Add(1) + go func() { + defer wg.Done() + gomega.NewGomegaWithT(t).Eventually(func() [32]byte { + answer, err := clientContracts[ic].Contract.SLastResponse(nil) + require.NoError(t, err) + return answer + }, timeout, 1*time.Second).Should(gomega.Equal(GetExpectedResponse(requestSources[ic]))) + }() + } + wg.Wait() +} diff --git a/core/services/ocr2/plugins/functions/plugin.go b/core/services/ocr2/plugins/functions/plugin.go new file mode 100644 index 00000000..cda91594 --- /dev/null +++ b/core/services/ocr2/plugins/functions/plugin.go @@ -0,0 +1,211 @@ +package functions + +import ( + "encoding/json" + "math/big" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/jonboulle/clockwork" + "github.com/pkg/errors" + + "github.com/goplugin/libocr/commontypes" + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + gwAllowlist "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist" + gwSubscriptions "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + s4_plugin "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/threshold" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + evmrelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +type FunctionsServicesConfig struct { + Job job.Job + JobORM job.ORM + BridgeORM bridges.ORM + QConfig pg.QConfig + DB *sqlx.DB + Chain legacyevm.Chain + ContractID string + Logger logger.Logger + MailMon *mailbox.Monitor + URLsMonEndpoint commontypes.MonitoringEndpoint + EthKeystore keystore.Eth + ThresholdKeyShare []byte + LogPollerWrapper evmrelayTypes.LogPollerWrapper +} + +const ( + FunctionsBridgeName string = "ea_bridge" + FunctionsS4Namespace string = "functions" + MaxAdapterResponseBytes int64 = 1_000_000 + DefaultOffchainTransmitterChannelSize uint32 = 1000 +) + +// Create all OCR2 plugin Oracles and all extra services needed to run a Functions job. +func NewFunctionsServices(functionsOracleArgs, thresholdOracleArgs, s4OracleArgs *libocr2.OCR2OracleArgs, conf *FunctionsServicesConfig) ([]job.ServiceCtx, error) { + pluginORM := functions.NewORM(conf.DB, conf.Logger, conf.QConfig, common.HexToAddress(conf.ContractID)) + s4ORM := s4.NewPostgresORM(conf.DB, conf.Logger, conf.QConfig, s4.SharedTableName, FunctionsS4Namespace) + + var pluginConfig config.PluginConfig + if err := json.Unmarshal(conf.Job.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig); err != nil { + return nil, err + } + if err := config.ValidatePluginConfig(pluginConfig); err != nil { + return nil, err + } + + allServices := []job.ServiceCtx{} + + var decryptor threshold.Decryptor + // thresholdOracleArgs nil check will be removed once the Threshold plugin is fully integrated w/ Functions + if len(conf.ThresholdKeyShare) > 0 && thresholdOracleArgs != nil && pluginConfig.DecryptionQueueConfig != nil { + decryptionQueue := threshold.NewDecryptionQueue( + int(pluginConfig.DecryptionQueueConfig.MaxQueueLength), + int(pluginConfig.DecryptionQueueConfig.MaxCiphertextBytes), + int(pluginConfig.DecryptionQueueConfig.MaxCiphertextIdLength), + time.Duration(pluginConfig.DecryptionQueueConfig.CompletedCacheTimeoutSec)*time.Second, + conf.Logger.Named("DecryptionQueue"), + ) + decryptor = decryptionQueue + thresholdServicesConfig := threshold.ThresholdServicesConfig{ + DecryptionQueue: decryptionQueue, + KeyshareWithPubKey: conf.ThresholdKeyShare, + ConfigParser: config.ThresholdConfigParser{}, + } + thresholdService, err2 := threshold.NewThresholdService(thresholdOracleArgs, &thresholdServicesConfig) + if err2 != nil { + return nil, errors.Wrap(err2, "error calling NewThresholdServices") + } + allServices = append(allServices, thresholdService) + } else { + conf.Logger.Warn("Threshold configuration is incomplete. Threshold secrets decryption plugin is disabled.") + } + + var s4Storage s4.Storage + if pluginConfig.S4Constraints != nil { + s4Storage = s4.NewStorage(conf.Logger, *pluginConfig.S4Constraints, s4ORM, clockwork.NewRealClock()) + } + + offchainTransmitter := functions.NewOffchainTransmitter(DefaultOffchainTransmitterChannelSize) + listenerLogger := conf.Logger.Named("FunctionsListener") + bridgeAccessor := functions.NewBridgeAccessor(conf.BridgeORM, FunctionsBridgeName, MaxAdapterResponseBytes) + functionsListener := functions.NewFunctionsListener( + conf.Job, + conf.Chain.Client(), + conf.Job.OCR2OracleSpec.ContractID, + bridgeAccessor, + pluginORM, + pluginConfig, + s4Storage, + listenerLogger, + conf.URLsMonEndpoint, + decryptor, + conf.LogPollerWrapper, + ) + allServices = append(allServices, functionsListener) + + functionsOracleArgs.ReportingPluginFactory = FunctionsReportingPluginFactory{ + Logger: functionsOracleArgs.Logger, + PluginORM: pluginORM, + JobID: conf.Job.ExternalJobID, + ContractVersion: pluginConfig.ContractVersion, + OffchainTransmitter: offchainTransmitter, + } + functionsReportingPluginOracle, err := libocr2.NewOracle(*functionsOracleArgs) + if err != nil { + return nil, errors.Wrap(err, "failed to call NewOracle to create a Functions Reporting Plugin") + } + allServices = append(allServices, job.NewServiceAdapter(functionsReportingPluginOracle)) + + if pluginConfig.GatewayConnectorConfig != nil && s4Storage != nil && pluginConfig.OnchainAllowlist != nil && pluginConfig.RateLimiter != nil && pluginConfig.OnchainSubscriptions != nil { + allowlistORM, err := gwAllowlist.NewORM(conf.DB, conf.Logger, conf.QConfig, pluginConfig.OnchainAllowlist.ContractAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to create allowlist ORM") + } + allowlist, err2 := gwAllowlist.NewOnchainAllowlist(conf.Chain.Client(), *pluginConfig.OnchainAllowlist, allowlistORM, conf.Logger) + if err2 != nil { + return nil, errors.Wrap(err, "failed to create OnchainAllowlist") + } + rateLimiter, err2 := hc.NewRateLimiter(*pluginConfig.RateLimiter) + if err2 != nil { + return nil, errors.Wrap(err, "failed to create a RateLimiter") + } + subscriptionsORM, err := gwSubscriptions.NewORM(conf.DB, conf.Logger, conf.QConfig, pluginConfig.OnchainSubscriptions.ContractAddress) + if err != nil { + return nil, errors.Wrap(err, "failed to create subscriptions ORM") + } + subscriptions, err2 := gwSubscriptions.NewOnchainSubscriptions(conf.Chain.Client(), *pluginConfig.OnchainSubscriptions, subscriptionsORM, conf.Logger) + if err2 != nil { + return nil, errors.Wrap(err, "failed to create a OnchainSubscriptions") + } + connectorLogger := conf.Logger.Named("GatewayConnector").With("jobName", conf.Job.PipelineSpec.JobName) + connector, err2 := NewConnector(&pluginConfig, conf.EthKeystore, conf.Chain.ID(), s4Storage, allowlist, rateLimiter, subscriptions, functionsListener, offchainTransmitter, connectorLogger) + if err2 != nil { + return nil, errors.Wrap(err, "failed to create a GatewayConnector") + } + allServices = append(allServices, connector) + } else { + listenerLogger.Warn("Insufficient config, GatewayConnector will not be enabled") + } + + if s4OracleArgs != nil && pluginConfig.S4Constraints != nil { + s4OracleArgs.ReportingPluginFactory = s4_plugin.S4ReportingPluginFactory{ + Logger: s4OracleArgs.Logger, + ORM: s4ORM, + ConfigDecoder: config.S4ConfigDecoder, + } + s4ReportingPluginOracle, err := libocr2.NewOracle(*s4OracleArgs) + if err != nil { + return nil, errors.Wrap(err, "failed to call NewOracle to create a S4 Reporting Plugin") + } + allServices = append(allServices, job.NewServiceAdapter(s4ReportingPluginOracle)) + } else { + listenerLogger.Warn("s4OracleArgs is nil or S4Constraints are not configured. S4 plugin is disabled.") + } + + return allServices, nil +} + +func NewConnector(pluginConfig *config.PluginConfig, ethKeystore keystore.Eth, chainID *big.Int, s4Storage s4.Storage, allowlist gwAllowlist.OnchainAllowlist, rateLimiter *hc.RateLimiter, subscriptions gwSubscriptions.OnchainSubscriptions, listener functions.FunctionsListener, offchainTransmitter functions.OffchainTransmitter, lggr logger.Logger) (connector.GatewayConnector, error) { + enabledKeys, err := ethKeystore.EnabledKeysForChain(chainID) + if err != nil { + return nil, err + } + configuredNodeAddress := common.HexToAddress(pluginConfig.GatewayConnectorConfig.NodeAddress) + idx := slices.IndexFunc(enabledKeys, func(key ethkey.KeyV2) bool { return key.Address == configuredNodeAddress }) + if idx == -1 { + return nil, errors.New("key for configured node address not found") + } + signerKey := enabledKeys[idx].ToEcdsaPrivKey() + if enabledKeys[idx].ID() != pluginConfig.GatewayConnectorConfig.NodeAddress { + return nil, errors.New("node address mismatch") + } + + handler, err := functions.NewFunctionsConnectorHandler(pluginConfig, signerKey, s4Storage, allowlist, rateLimiter, subscriptions, listener, offchainTransmitter, lggr) + if err != nil { + return nil, err + } + connector, err := connector.NewGatewayConnector(pluginConfig.GatewayConnectorConfig, handler, handler, clockwork.NewRealClock(), lggr) + if err != nil { + return nil, err + } + handler.SetConnector(connector) + return connector, nil +} diff --git a/core/services/ocr2/plugins/functions/plugin_test.go b/core/services/ocr2/plugins/functions/plugin_test.go new file mode 100644 index 00000000..3229e605 --- /dev/null +++ b/core/services/ocr2/plugins/functions/plugin_test.go @@ -0,0 +1,76 @@ +package functions_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + sfmocks "github.com/goplugin/pluginv3.0/v2/core/services/functions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/connector" + hc "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/common" + gfaMocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/allowlist/mocks" + gfsMocks "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions/subscriptions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + s4mocks "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" +) + +func TestNewConnector_Success(t *testing.T) { + t.Parallel() + keyV2, err := ethkey.NewV2() + require.NoError(t, err) + + gwcCfg := &connector.ConnectorConfig{ + NodeAddress: keyV2.Address.String(), + DonId: "my_don", + } + chainID := big.NewInt(80001) + ethKeystore := ksmocks.NewEth(t) + s4Storage := s4mocks.NewStorage(t) + allowlist := gfaMocks.NewOnchainAllowlist(t) + subscriptions := gfsMocks.NewOnchainSubscriptions(t) + rateLimiter, err := hc.NewRateLimiter(hc.RateLimiterConfig{GlobalRPS: 100.0, GlobalBurst: 100, PerSenderRPS: 100.0, PerSenderBurst: 100}) + require.NoError(t, err) + listener := sfmocks.NewFunctionsListener(t) + offchainTransmitter := sfmocks.NewOffchainTransmitter(t) + ethKeystore.On("EnabledKeysForChain", mock.Anything).Return([]ethkey.KeyV2{keyV2}, nil) + config := &config.PluginConfig{ + GatewayConnectorConfig: gwcCfg, + } + _, err = functions.NewConnector(config, ethKeystore, chainID, s4Storage, allowlist, rateLimiter, subscriptions, listener, offchainTransmitter, logger.TestLogger(t)) + require.NoError(t, err) +} + +func TestNewConnector_NoKeyForConfiguredAddress(t *testing.T) { + t.Parallel() + addresses := []string{ + "0x00000000DE801ceE9471ADf23370c48b011f82a6", + "0x11111111DE801ceE9471ADf23370c48b011f82a6", + } + + gwcCfg := &connector.ConnectorConfig{ + NodeAddress: addresses[0], + DonId: "my_don", + } + chainID := big.NewInt(80001) + ethKeystore := ksmocks.NewEth(t) + s4Storage := s4mocks.NewStorage(t) + allowlist := gfaMocks.NewOnchainAllowlist(t) + subscriptions := gfsMocks.NewOnchainSubscriptions(t) + rateLimiter, err := hc.NewRateLimiter(hc.RateLimiterConfig{GlobalRPS: 100.0, GlobalBurst: 100, PerSenderRPS: 100.0, PerSenderBurst: 100}) + require.NoError(t, err) + listener := sfmocks.NewFunctionsListener(t) + offchainTransmitter := sfmocks.NewOffchainTransmitter(t) + ethKeystore.On("EnabledKeysForChain", mock.Anything).Return([]ethkey.KeyV2{{Address: common.HexToAddress(addresses[1])}}, nil) + config := &config.PluginConfig{ + GatewayConnectorConfig: gwcCfg, + } + _, err = functions.NewConnector(config, ethKeystore, chainID, s4Storage, allowlist, rateLimiter, subscriptions, listener, offchainTransmitter, logger.TestLogger(t)) + require.Error(t, err) +} diff --git a/core/services/ocr2/plugins/functions/reporting.go b/core/services/ocr2/plugins/functions/reporting.go new file mode 100644 index 00000000..4539f8c2 --- /dev/null +++ b/core/services/ocr2/plugins/functions/reporting.go @@ -0,0 +1,538 @@ +package functions + +import ( + "bytes" + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type FunctionsReportingPluginFactory struct { + Logger commontypes.Logger + PluginORM functions.ORM + JobID uuid.UUID + ContractVersion uint32 + OffchainTransmitter functions.OffchainTransmitter +} + +var _ types.ReportingPluginFactory = (*FunctionsReportingPluginFactory)(nil) + +type functionsReporting struct { + logger commontypes.Logger + pluginORM functions.ORM + jobID uuid.UUID + reportCodec encoding.ReportCodec + genericConfig *types.ReportingPluginConfig + specificConfig *config.ReportingPluginConfigWrapper + contractVersion uint32 + offchainTransmitter functions.OffchainTransmitter +} + +var _ types.ReportingPlugin = &functionsReporting{} + +var ( + promReportingPlugins = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_restarts", + Help: "Metric to track number of reporting plugin restarts", + }, []string{"jobID"}) + + promReportingPluginsQuery = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_query", + Help: "Metric to track number of reporting plugin Query calls", + }, []string{"jobID"}) + + promReportingPluginsObservation = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_observation", + Help: "Metric to track number of reporting plugin Observation calls", + }, []string{"jobID"}) + + promReportingPluginsReport = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_report", + Help: "Metric to track number of reporting plugin Report calls", + }, []string{"jobID"}) + + promReportingPluginsReportNumObservations = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "functions_reporting_plugin_report_num_observations", + Help: "Metric to track number of observations available in the report phase", + }, []string{"jobID"}) + + promReportingAcceptReports = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_accept", + Help: "Metric to track number of accepting reports", + }, []string{"jobID"}) + + promReportingTransmitReports = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "functions_reporting_plugin_transmit", + Help: "Metric to track number of transmiting reports", + }, []string{"jobID"}) + + promReportingTransmitBatchSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "functions_reporting_plugin_transmit_batch_size", + Help: "Metric to track batch size of transmitting reports", + Buckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 100, 1000}, + }, []string{"jobID"}) +) + +func formatRequestId(requestId []byte) string { + return fmt.Sprintf("0x%x", requestId) +} + +// NewReportingPlugin complies with ReportingPluginFactory +func (f FunctionsReportingPluginFactory) NewReportingPlugin(rpConfig types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) { + pluginConfig, err := config.DecodeReportingPluginConfig(rpConfig.OffchainConfig) + if err != nil { + f.Logger.Error("unable to decode reporting plugin config", commontypes.LogFields{ + "digest": rpConfig.ConfigDigest.String(), + }) + return nil, types.ReportingPluginInfo{}, err + } + codec, err := encoding.NewReportCodec(f.ContractVersion) + if err != nil { + f.Logger.Error("unable to create a report codec object", commontypes.LogFields{}) + return nil, types.ReportingPluginInfo{}, err + } + info := types.ReportingPluginInfo{ + Name: "functionsReporting", + UniqueReports: pluginConfig.Config.GetUniqueReports(), // Enforces (N+F+1)/2 signatures. Must match setting in OCR2Base.sol. + Limits: types.ReportingPluginLimits{ + MaxQueryLength: int(pluginConfig.Config.GetMaxQueryLengthBytes()), + MaxObservationLength: int(pluginConfig.Config.GetMaxObservationLengthBytes()), + MaxReportLength: int(pluginConfig.Config.GetMaxReportLengthBytes()), + }, + } + plugin := functionsReporting{ + logger: f.Logger, + pluginORM: f.PluginORM, + jobID: f.JobID, + reportCodec: codec, + genericConfig: &rpConfig, + specificConfig: pluginConfig, + contractVersion: f.ContractVersion, + offchainTransmitter: f.OffchainTransmitter, + } + promReportingPlugins.WithLabelValues(f.JobID.String()).Inc() + return &plugin, info, nil +} + +// Check if requestCoordinator can be included together with reportCoordinator. +// Return new reportCoordinator (if previous was nil) and error. +func ShouldIncludeCoordinator(requestCoordinator *common.Address, reportCoordinator *common.Address) (*common.Address, error) { + if requestCoordinator == nil || *requestCoordinator == (common.Address{}) { + return reportCoordinator, errors.New("missing/zero request coordinator address") + } + if reportCoordinator == nil { + return requestCoordinator, nil + } + if *reportCoordinator != *requestCoordinator { + return reportCoordinator, errors.New("coordinator contract address mismatch") + } + return reportCoordinator, nil +} + +// Query() complies with ReportingPlugin +func (r *functionsReporting) Query(ctx context.Context, ts types.ReportTimestamp) (types.Query, error) { + r.logger.Debug("FunctionsReporting Query start", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + }) + maxBatchSize := r.specificConfig.Config.GetMaxRequestBatchSize() + results, err := r.pluginORM.FindOldestEntriesByState(functions.RESULT_READY, maxBatchSize, pg.WithParentCtx(ctx)) + if err != nil { + return nil, err + } + + queryProto := encoding.Query{} + var idStrs []string + var reportCoordinator *common.Address + for _, result := range results { + result := result + reportCoordinator, err = ShouldIncludeCoordinator(result.CoordinatorContractAddress, reportCoordinator) + if err != nil { + r.logger.Debug("FunctionsReporting Query: skipping request with mismatched coordinator contract address", commontypes.LogFields{ + "requestID": formatRequestId(result.RequestID[:]), + "requestCoordinator": result.CoordinatorContractAddress, + "reportCoordinator": reportCoordinator, + "error": err, + }) + continue + } + queryProto.RequestIDs = append(queryProto.RequestIDs, result.RequestID[:]) + idStrs = append(idStrs, formatRequestId(result.RequestID[:])) + } + // The ID batch built in Query can exceed maxReportTotalCallbackGas. This is done + // on purpose as some requests may (repeatedly) fail aggregation and we don't want + // them to block processing of other requests. Final total callback gas limit + // is enforced in the Report() phase. + r.logger.Debug("FunctionsReporting Query end", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "queryLen": len(queryProto.RequestIDs), + "requestIDs": idStrs, + }) + promReportingPluginsQuery.WithLabelValues(r.jobID.String()).Inc() + return proto.Marshal(&queryProto) +} + +// Observation() complies with ReportingPlugin +func (r *functionsReporting) Observation(ctx context.Context, ts types.ReportTimestamp, query types.Query) (types.Observation, error) { + r.logger.Debug("FunctionsReporting Observation start", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + }) + + queryProto := &encoding.Query{} + err := proto.Unmarshal(query, queryProto) + if err != nil { + return nil, err + } + + observationProto := encoding.Observation{} + processedIds := make(map[[32]byte]bool) + var idStrs []string + for _, id := range queryProto.RequestIDs { + id, err := encoding.SliceToByte32(id) + if err != nil { + r.logger.Error("FunctionsReporting Observation invalid ID", commontypes.LogFields{ + "requestID": formatRequestId(id[:]), + "err": err, + }) + continue + } + if _, ok := processedIds[id]; ok { + r.logger.Error("FunctionsReporting Observation duplicate ID in query", commontypes.LogFields{ + "requestID": formatRequestId(id[:]), + }) + continue + } + processedIds[id] = true + localResult, err2 := r.pluginORM.FindById(id, pg.WithParentCtx(ctx)) + if err2 != nil { + r.logger.Debug("FunctionsReporting Observation can't find request from query", commontypes.LogFields{ + "requestID": formatRequestId(id[:]), + "err": err2, + }) + continue + } + // NOTE: ignoring TIMED_OUT requests, which potentially had ready results + if localResult.State == functions.RESULT_READY { + resultProto := encoding.ProcessedRequest{ + RequestID: localResult.RequestID[:], + Result: localResult.Result, + Error: localResult.Error, + OnchainMetadata: localResult.OnchainMetadata, + } + if localResult.CallbackGasLimit == nil || localResult.CoordinatorContractAddress == nil { + r.logger.Error("FunctionsReporting Observation missing required v1 fields", commontypes.LogFields{ + "requestID": formatRequestId(id[:]), + }) + continue + } + resultProto.CallbackGasLimit = *localResult.CallbackGasLimit + resultProto.CoordinatorContract = localResult.CoordinatorContractAddress[:] + observationProto.ProcessedRequests = append(observationProto.ProcessedRequests, &resultProto) + idStrs = append(idStrs, formatRequestId(localResult.RequestID[:])) + } + } + r.logger.Debug("FunctionsReporting Observation end", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "nReadyRequests": len(observationProto.ProcessedRequests), + "requestIDs": idStrs, + }) + + promReportingPluginsObservation.WithLabelValues(r.jobID.String()).Inc() + return proto.Marshal(&observationProto) +} + +// Report() complies with ReportingPlugin +func (r *functionsReporting) Report(ctx context.Context, ts types.ReportTimestamp, query types.Query, obs []types.AttributedObservation) (bool, types.Report, error) { + r.logger.Debug("FunctionsReporting Report start", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "nObservations": len(obs), + }) + promReportingPluginsReportNumObservations.WithLabelValues(r.jobID.String()).Set(float64(len(obs))) + + queryProto := &encoding.Query{} + err := proto.Unmarshal(query, queryProto) + if err != nil { + r.logger.Error("FunctionsReporting Report: unable to decode query!", + commontypes.LogFields{"err": err}) + return false, nil, err + } + + reqIdToObservationList := make(map[string][]*encoding.ProcessedRequest) + var uniqueQueryIds []string + for _, id := range queryProto.RequestIDs { + reqId := formatRequestId(id) + if _, ok := reqIdToObservationList[reqId]; ok { + r.logger.Error("FunctionsReporting Report: duplicate ID in query", commontypes.LogFields{ + "requestID": reqId, + }) + continue + } + uniqueQueryIds = append(uniqueQueryIds, reqId) + reqIdToObservationList[reqId] = []*encoding.ProcessedRequest{} + } + + for _, ob := range obs { + observationProto := &encoding.Observation{} + err = proto.Unmarshal(ob.Observation, observationProto) + if err != nil { + r.logger.Error("FunctionsReporting Report: unable to decode observation!", + commontypes.LogFields{"err": err, "observer": ob.Observer}) + continue + } + seenReqIds := make(map[string]struct{}) + for _, processedReq := range observationProto.ProcessedRequests { + id := formatRequestId(processedReq.RequestID) + if _, seen := seenReqIds[id]; seen { + r.logger.Error("FunctionsReporting Report: observation contains duplicate IDs!", + commontypes.LogFields{"requestID": id, "observer": ob.Observer}) + continue + } + if val, ok := reqIdToObservationList[id]; ok { + reqIdToObservationList[id] = append(val, processedReq) + seenReqIds[id] = struct{}{} + } else { + r.logger.Error("FunctionsReporting Report: observation contains ID that's not the query!", + commontypes.LogFields{"requestID": id, "observer": ob.Observer}) + } + } + } + + defaultAggMethod := r.specificConfig.Config.GetDefaultAggregationMethod() + var allAggregated []*encoding.ProcessedRequest + var allIdStrs []string + var totalCallbackGas uint32 + var reportCoordinator *common.Address + for _, reqId := range uniqueQueryIds { + observations := reqIdToObservationList[reqId] + if !CanAggregate(r.genericConfig.N, r.genericConfig.F, observations) { + r.logger.Debug("FunctionsReporting Report: unable to aggregate request in current round", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "requestID": reqId, + "nObservations": len(observations), + }) + continue + } + + // TODO: support per-request aggregation method + // https://app.shortcut.com/pluginlabs/story/57701/per-request-plugin-config + aggregated, errAgg := Aggregate(defaultAggMethod, observations) + if errAgg != nil { + r.logger.Error("FunctionsReporting Report: error when aggregating reqId", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "requestID": reqId, + "err": errAgg, + }) + continue + } + if totalCallbackGas+aggregated.CallbackGasLimit > r.specificConfig.Config.GetMaxReportTotalCallbackGas() { + r.logger.Warn("FunctionsReporting Report: total callback gas limit exceeded", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "requestID": reqId, + "requestCallbackGas": aggregated.CallbackGasLimit, + "totalCallbackGas": totalCallbackGas, + "maxReportCallbackGas": r.specificConfig.Config.GetMaxReportTotalCallbackGas(), + }) + continue + } + totalCallbackGas += aggregated.CallbackGasLimit + r.logger.Debug("FunctionsReporting Report: aggregated successfully", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "requestID": reqId, + "nObservations": len(observations), + }) + var requestCoordinator common.Address + requestCoordinator.SetBytes(aggregated.CoordinatorContract) + reportCoordinator, err = ShouldIncludeCoordinator(&requestCoordinator, reportCoordinator) + if err != nil { + r.logger.Error("FunctionsReporting Report: skipping request with mismatched coordinator contract address", commontypes.LogFields{ + "requestID": reqId, + "requestCoordinator": requestCoordinator, + "reportCoordinator": reportCoordinator, + "error": err, + }) + continue + } + allAggregated = append(allAggregated, aggregated) + allIdStrs = append(allIdStrs, reqId) + } + + r.logger.Debug("FunctionsReporting Report end", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "nAggregatedRequests": len(allAggregated), + "reporting": len(allAggregated) > 0, + "requestIDs": allIdStrs, + "totalCallbackGas": totalCallbackGas, + }) + if len(allAggregated) == 0 { + return false, nil, nil + } + reportBytes, err := r.reportCodec.EncodeReport(allAggregated) + if err != nil { + return false, nil, err + } + promReportingPluginsReport.WithLabelValues(r.jobID.String()).Inc() + return true, reportBytes, nil +} + +// ShouldAcceptFinalizedReport() complies with ReportingPlugin +func (r *functionsReporting) ShouldAcceptFinalizedReport(ctx context.Context, ts types.ReportTimestamp, report types.Report) (bool, error) { + r.logger.Debug("FunctionsReporting ShouldAcceptFinalizedReport start", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + }) + + // NOTE: The output of the Report() phase needs to be later decoded by the contract. So unfortunately we + // can't use anything more convenient like protobufs but we need to ABI-decode here instead. + decoded, err := r.reportCodec.DecodeReport(report) + if err != nil { + r.logger.Error("FunctionsReporting ShouldAcceptFinalizedReport: unable to decode report built in reporting phase", commontypes.LogFields{"err": err}) + return false, err + } + + allIds := []string{} + needTransmissionIds := []string{} + for _, item := range decoded { + reqIdStr := formatRequestId(item.RequestID) + allIds = append(allIds, reqIdStr) + id, err := encoding.SliceToByte32(item.RequestID) + if err != nil { + r.logger.Error("FunctionsReporting ShouldAcceptFinalizedReport: invalid ID", commontypes.LogFields{"requestID": reqIdStr, "err": err}) + continue + } + _, err = r.pluginORM.FindById(id, pg.WithParentCtx(ctx)) + if err != nil { + // TODO: Differentiate between ID not found and other ORM errors (https://smartcontract-it.atlassian.net/browse/DRO-215) + r.logger.Warn("FunctionsReporting ShouldAcceptFinalizedReport: request doesn't exist locally! Accepting anyway.", commontypes.LogFields{"requestID": reqIdStr}) + needTransmissionIds = append(needTransmissionIds, reqIdStr) + continue + } + err = r.pluginORM.SetFinalized(id, item.Result, item.Error, pg.WithParentCtx(ctx)) // validates state transition + if err != nil { + r.logger.Debug("FunctionsReporting ShouldAcceptFinalizedReport: state couldn't be changed to FINALIZED. Not transmitting.", commontypes.LogFields{"requestID": reqIdStr, "err": err}) + continue + } + if bytes.Equal(item.OnchainMetadata, []byte(functions.OffchainRequestMarker)) { + r.logger.Debug("FunctionsReporting ShouldAcceptFinalizedReport: transmitting offchain", commontypes.LogFields{"requestID": reqIdStr}) + result := functions.OffchainResponse{RequestId: item.RequestID, Result: item.Result, Error: item.Error} + if err := r.offchainTransmitter.TransmitReport(ctx, &result); err != nil { + r.logger.Error("FunctionsReporting ShouldAcceptFinalizedReport: unable to transmit offchain", commontypes.LogFields{"requestID": reqIdStr, "err": err}) + } + continue // doesn't need onchain transmission + } + needTransmissionIds = append(needTransmissionIds, reqIdStr) + } + r.logger.Debug("FunctionsReporting ShouldAcceptFinalizedReport end", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "allIds": allIds, + "needTransmissionIds": needTransmissionIds, + "accepting": len(needTransmissionIds) > 0, + }) + shouldAccept := len(needTransmissionIds) > 0 + if shouldAccept { + promReportingAcceptReports.WithLabelValues(r.jobID.String()).Inc() + } + return shouldAccept, nil +} + +// ShouldTransmitAcceptedReport() complies with ReportingPlugin +func (r *functionsReporting) ShouldTransmitAcceptedReport(ctx context.Context, ts types.ReportTimestamp, report types.Report) (bool, error) { + r.logger.Debug("FunctionsReporting ShouldTransmitAcceptedReport start", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + }) + + decoded, err := r.reportCodec.DecodeReport(report) + if err != nil { + r.logger.Error("FunctionsReporting ShouldTransmitAcceptedReport: unable to decode report built in reporting phase", commontypes.LogFields{"err": err}) + return false, err + } + + allIds := []string{} + needTransmissionIds := []string{} + for _, item := range decoded { + reqIdStr := formatRequestId(item.RequestID) + allIds = append(allIds, reqIdStr) + id, err := encoding.SliceToByte32(item.RequestID) + if err != nil { + r.logger.Error("FunctionsReporting ShouldAcceptFinalizedReport: invalid ID", commontypes.LogFields{"requestID": reqIdStr, "err": err}) + continue + } + request, err := r.pluginORM.FindById(id, pg.WithParentCtx(ctx)) + if err != nil { + r.logger.Warn("FunctionsReporting ShouldTransmitAcceptedReport: request doesn't exist locally! Transmitting anyway.", commontypes.LogFields{"requestID": reqIdStr, "err": err}) + needTransmissionIds = append(needTransmissionIds, reqIdStr) + continue + } + if request.State == functions.CONFIRMED { + r.logger.Debug("FunctionsReporting ShouldTransmitAcceptedReport: request already CONFIRMED. Not transmitting.", commontypes.LogFields{"requestID": reqIdStr}) + continue + } + if request.State == functions.TIMED_OUT { + r.logger.Debug("FunctionsReporting ShouldTransmitAcceptedReport: request already TIMED_OUT. Not transmitting.", commontypes.LogFields{"requestID": reqIdStr}) + continue + } + if request.State == functions.IN_PROGRESS || request.State == functions.RESULT_READY { + r.logger.Warn("FunctionsReporting ShouldTransmitAcceptedReport: unusual request state. Still transmitting.", + commontypes.LogFields{ + "requestID": reqIdStr, + "state": request.State.String(), + }) + } + needTransmissionIds = append(needTransmissionIds, reqIdStr) + } + r.logger.Debug("FunctionsReporting ShouldTransmitAcceptedReport end", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "oracleID": r.genericConfig.OracleID, + "allIds": allIds, + "needTransmissionIds": needTransmissionIds, + "transmitting": len(needTransmissionIds) > 0, + }) + shouldTransmit := len(needTransmissionIds) > 0 + if shouldTransmit { + promReportingTransmitReports.WithLabelValues(r.jobID.String()).Inc() + promReportingTransmitBatchSize.WithLabelValues(r.jobID.String()).Observe(float64(len(allIds))) + } + return shouldTransmit, nil +} + +// Close() complies with ReportingPlugin +func (r *functionsReporting) Close() error { + r.logger.Debug("FunctionsReporting Close", commontypes.LogFields{ + "oracleID": r.genericConfig.OracleID, + }) + return nil +} diff --git a/core/services/ocr2/plugins/functions/reporting_test.go b/core/services/ocr2/plugins/functions/reporting_test.go new file mode 100644 index 00000000..0564bd29 --- /dev/null +++ b/core/services/ocr2/plugins/functions/reporting_test.go @@ -0,0 +1,556 @@ +package functions_test + +import ( + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + functions_srv "github.com/goplugin/pluginv3.0/v2/core/services/functions" + functions_mocks "github.com/goplugin/pluginv3.0/v2/core/services/functions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" +) + +func preparePlugin(t *testing.T, batchSize uint32, maxTotalGasLimit uint32) (types.ReportingPlugin, *functions_mocks.ORM, encoding.ReportCodec, *functions_mocks.OffchainTransmitter) { + lggr := logger.TestLogger(t) + ocrLogger := commonlogger.NewOCRWrapper(lggr, true, func(msg string) {}) + orm := functions_mocks.NewORM(t) + offchainTransmitter := functions_mocks.NewOffchainTransmitter(t) + factory := functions.FunctionsReportingPluginFactory{ + Logger: ocrLogger, + PluginORM: orm, + ContractVersion: 1, + OffchainTransmitter: offchainTransmitter, + } + + pluginConfig := config.ReportingPluginConfigWrapper{ + Config: &config.ReportingPluginConfig{ + MaxRequestBatchSize: batchSize, + MaxReportTotalCallbackGas: maxTotalGasLimit, + }, + } + pluginConfigBytes, err := config.EncodeReportingPluginConfig(&pluginConfig) + require.NoError(t, err) + plugin, _, err := factory.NewReportingPlugin(types.ReportingPluginConfig{ + N: 4, + F: 1, + OffchainConfig: pluginConfigBytes, + }) + require.NoError(t, err) + codec, err := encoding.NewReportCodec(1) + require.NoError(t, err) + return plugin, orm, codec, offchainTransmitter +} + +func newRequestID() functions_srv.RequestID { + return testutils.Random32Byte() +} + +func newRequest() functions_srv.Request { + var gasLimit uint32 = 100000 + return functions_srv.Request{RequestID: newRequestID(), State: functions_srv.IN_PROGRESS, CoordinatorContractAddress: &common.Address{1}, CallbackGasLimit: &gasLimit} +} + +func newRequestWithResult(result []byte) functions_srv.Request { + req := newRequest() + req.State = functions_srv.RESULT_READY + req.Result = result + return req +} + +func newRequestFinalized() functions_srv.Request { + req := newRequest() + req.State = functions_srv.FINALIZED + return req +} + +func newRequestTimedOut() functions_srv.Request { + req := newRequest() + req.State = functions_srv.TIMED_OUT + return req +} + +func newRequestConfirmed() functions_srv.Request { + req := newRequest() + req.State = functions_srv.CONFIRMED + return req +} + +func newMarshalledQuery(t *testing.T, reqIDs ...functions_srv.RequestID) []byte { + queryProto := encoding.Query{} + queryProto.RequestIDs = [][]byte{} + for _, id := range reqIDs { + id := id + queryProto.RequestIDs = append(queryProto.RequestIDs, id[:]) + } + marshalled, err := proto.Marshal(&queryProto) + require.NoError(t, err) + return marshalled +} + +func newProcessedRequest(requestId functions_srv.RequestID, compResult []byte, compError []byte) *encoding.ProcessedRequest { + return &encoding.ProcessedRequest{ + RequestID: requestId[:], + Result: compResult, + Error: compError, + CoordinatorContract: []byte{1}, + } +} + +func newProcessedRequestWithMeta(requestId functions_srv.RequestID, compResult []byte, compError []byte, callbackGasLimit uint32, coordinatorContract []byte, onchainMetadata []byte) *encoding.ProcessedRequest { + return &encoding.ProcessedRequest{ + RequestID: requestId[:], + Result: compResult, + Error: compError, + CallbackGasLimit: callbackGasLimit, + CoordinatorContract: coordinatorContract, + OnchainMetadata: onchainMetadata, + } +} + +func newObservation(t *testing.T, observerId uint8, requests ...*encoding.ProcessedRequest) types.AttributedObservation { + observationProto := encoding.Observation{ProcessedRequests: requests} + raw, err := proto.Marshal(&observationProto) + require.NoError(t, err) + return types.AttributedObservation{ + Observation: raw, + Observer: commontypes.OracleID(observerId), + } +} + +func TestFunctionsReporting_Query(t *testing.T) { + t.Parallel() + const batchSize = 10 + plugin, orm, _, _ := preparePlugin(t, batchSize, 0) + reqs := []functions_srv.Request{newRequest(), newRequest()} + orm.On("FindOldestEntriesByState", functions_srv.RESULT_READY, uint32(batchSize), mock.Anything).Return(reqs, nil) + + q, err := plugin.Query(testutils.Context(t), types.ReportTimestamp{}) + require.NoError(t, err) + + queryProto := &encoding.Query{} + err = proto.Unmarshal(q, queryProto) + require.NoError(t, err) + require.Equal(t, 2, len(queryProto.RequestIDs)) + require.Equal(t, reqs[0].RequestID[:], queryProto.RequestIDs[0]) + require.Equal(t, reqs[1].RequestID[:], queryProto.RequestIDs[1]) +} + +func TestFunctionsReporting_Query_HandleCoordinatorMismatch(t *testing.T) { + t.Parallel() + const batchSize = 10 + plugin, orm, _, _ := preparePlugin(t, batchSize, 1000000) + reqs := []functions_srv.Request{newRequest(), newRequest()} + reqs[0].CoordinatorContractAddress = &common.Address{1} + reqs[1].CoordinatorContractAddress = &common.Address{2} + orm.On("FindOldestEntriesByState", functions_srv.RESULT_READY, uint32(batchSize), mock.Anything).Return(reqs, nil) + + q, err := plugin.Query(testutils.Context(t), types.ReportTimestamp{}) + require.NoError(t, err) + + queryProto := &encoding.Query{} + err = proto.Unmarshal(q, queryProto) + require.NoError(t, err) + require.Equal(t, 1, len(queryProto.RequestIDs)) + require.Equal(t, reqs[0].RequestID[:], queryProto.RequestIDs[0]) + // reqs[1] should be excluded from this query because it has a different coordinator address +} + +func TestFunctionsReporting_Observation(t *testing.T) { + t.Parallel() + plugin, orm, _, _ := preparePlugin(t, 10, 0) + + req1 := newRequestWithResult([]byte("abc")) + req2 := newRequest() + req3 := newRequestWithResult([]byte("def")) + req4 := newRequestTimedOut() + nonexistentId := newRequestID() + + orm.On("FindById", req1.RequestID, mock.Anything).Return(&req1, nil) + orm.On("FindById", req2.RequestID, mock.Anything).Return(&req2, nil) + orm.On("FindById", req3.RequestID, mock.Anything).Return(&req3, nil) + orm.On("FindById", req4.RequestID, mock.Anything).Return(&req4, nil) + orm.On("FindById", nonexistentId, mock.Anything).Return(nil, errors.New("nonexistent ID")) + + // Query asking for 5 requests (with duplicates), out of which: + // - two are ready + // - one is still in progress + // - one has timed out + // - one doesn't exist + query := newMarshalledQuery(t, req1.RequestID, req1.RequestID, req2.RequestID, req3.RequestID, req4.RequestID, nonexistentId, req4.RequestID) + obs, err := plugin.Observation(testutils.Context(t), types.ReportTimestamp{}, query) + require.NoError(t, err) + + observationProto := &encoding.Observation{} + err = proto.Unmarshal(obs, observationProto) + require.NoError(t, err) + require.Equal(t, len(observationProto.ProcessedRequests), 2) + require.Equal(t, observationProto.ProcessedRequests[0].RequestID, req1.RequestID[:]) + require.Equal(t, observationProto.ProcessedRequests[0].Result, []byte("abc")) + require.Equal(t, observationProto.ProcessedRequests[1].RequestID, req3.RequestID[:]) + require.Equal(t, observationProto.ProcessedRequests[1].Result, []byte("def")) +} + +func TestFunctionsReporting_Observation_IncorrectQuery(t *testing.T) { + t.Parallel() + plugin, orm, _, _ := preparePlugin(t, 10, 0) + + req1 := newRequestWithResult([]byte("abc")) + invalidId := []byte("invalid") + + orm.On("FindById", req1.RequestID, mock.Anything).Return(&req1, nil) + + // Query asking for 3 requests (with duplicates), out of which: + // - two are invalid + // - one is ready + queryProto := encoding.Query{} + queryProto.RequestIDs = [][]byte{invalidId, req1.RequestID[:], invalidId} + marshalled, err := proto.Marshal(&queryProto) + require.NoError(t, err) + + obs, err := plugin.Observation(testutils.Context(t), types.ReportTimestamp{}, marshalled) + require.NoError(t, err) + observationProto := &encoding.Observation{} + err = proto.Unmarshal(obs, observationProto) + require.NoError(t, err) + require.Equal(t, len(observationProto.ProcessedRequests), 1) + require.Equal(t, observationProto.ProcessedRequests[0].RequestID, req1.RequestID[:]) + require.Equal(t, observationProto.ProcessedRequests[0].Result, []byte("abc")) +} + +func TestFunctionsReporting_Report(t *testing.T) { + t.Parallel() + plugin, _, codec, _ := preparePlugin(t, 10, 1000000) + reqId1, reqId2, reqId3 := newRequestID(), newRequestID(), newRequestID() + compResult := []byte("aaa") + procReq1 := newProcessedRequest(reqId1, compResult, []byte{}) + procReq2 := newProcessedRequest(reqId2, compResult, []byte{}) + + query := newMarshalledQuery(t, reqId1, reqId2, reqId3, reqId1, reqId2) // duplicates should be ignored + obs := []types.AttributedObservation{ + newObservation(t, 1, procReq2, procReq1), + newObservation(t, 2, procReq1, procReq2), + } + + // Two observations are not enough to produce a report + produced, reportBytes, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.False(t, produced) + require.Nil(t, reportBytes) + require.NoError(t, err) + + // Three observations with the same requestID should produce a report + obs = append(obs, newObservation(t, 3, procReq1, procReq2)) + produced, reportBytes, err = plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.True(t, produced) + require.NoError(t, err) + + decoded, err := codec.DecodeReport(reportBytes) + require.NoError(t, err) + require.Equal(t, 2, len(decoded)) + require.Equal(t, reqId1[:], decoded[0].RequestID) + require.Equal(t, compResult, decoded[0].Result) + require.Equal(t, []byte{}, decoded[0].Error) + require.Equal(t, reqId2[:], decoded[1].RequestID) + require.Equal(t, compResult, decoded[1].Result) + require.Equal(t, []byte{}, decoded[1].Error) +} + +func TestFunctionsReporting_Report_WithGasLimitAndMetadata(t *testing.T) { + t.Parallel() + plugin, _, codec, _ := preparePlugin(t, 10, 300000) + reqId1, reqId2, reqId3 := newRequestID(), newRequestID(), newRequestID() + compResult := []byte("aaa") + gasLimit1, gasLimit2 := uint32(100_000), uint32(200_000) + coordinatorContract := common.Address{1} + meta1, meta2 := []byte("meta1"), []byte("meta2") + procReq1 := newProcessedRequestWithMeta(reqId1, compResult, []byte{}, gasLimit1, coordinatorContract[:], meta1) + procReq2 := newProcessedRequestWithMeta(reqId2, compResult, []byte{}, gasLimit2, coordinatorContract[:], meta2) + + query := newMarshalledQuery(t, reqId1, reqId2, reqId3, reqId1, reqId2) // duplicates should be ignored + obs := []types.AttributedObservation{ + newObservation(t, 1, procReq2, procReq1), + newObservation(t, 2, procReq1, procReq2), + newObservation(t, 3, procReq1, procReq2), + } + + produced, reportBytes, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.True(t, produced) + require.NoError(t, err) + + decoded, err := codec.DecodeReport(reportBytes) + require.NoError(t, err) + require.Equal(t, 2, len(decoded)) + + require.Equal(t, reqId1[:], decoded[0].RequestID) + require.Equal(t, compResult, decoded[0].Result) + require.Equal(t, []byte{}, decoded[0].Error) + require.Equal(t, coordinatorContract[:], decoded[0].CoordinatorContract) + require.Equal(t, meta1, decoded[0].OnchainMetadata) + // CallbackGasLimit is not ABI-encoded + + require.Equal(t, reqId2[:], decoded[1].RequestID) + require.Equal(t, compResult, decoded[1].Result) + require.Equal(t, []byte{}, decoded[1].Error) + require.Equal(t, coordinatorContract[:], decoded[1].CoordinatorContract) + require.Equal(t, meta2, decoded[1].OnchainMetadata) + // CallbackGasLimit is not ABI-encoded +} + +func TestFunctionsReporting_Report_HandleCoordinatorMismatch(t *testing.T) { + t.Parallel() + plugin, _, codec, _ := preparePlugin(t, 10, 300000) + reqId1, reqId2, reqId3 := newRequestID(), newRequestID(), newRequestID() + compResult, meta := []byte("aaa"), []byte("meta") + coordinatorContractA, coordinatorContractB := common.Address{1}, common.Address{2} + procReq1 := newProcessedRequestWithMeta(reqId1, compResult, []byte{}, 0, coordinatorContractA[:], meta) + procReq2 := newProcessedRequestWithMeta(reqId2, compResult, []byte{}, 0, coordinatorContractB[:], meta) + procReq3 := newProcessedRequestWithMeta(reqId3, compResult, []byte{}, 0, coordinatorContractA[:], meta) + + query := newMarshalledQuery(t, reqId1, reqId2, reqId3, reqId1, reqId2) // duplicates should be ignored + obs := []types.AttributedObservation{ + newObservation(t, 1, procReq2, procReq3, procReq1), + newObservation(t, 2, procReq1, procReq2, procReq3), + newObservation(t, 3, procReq3, procReq1, procReq2), + } + + produced, reportBytes, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.True(t, produced) + require.NoError(t, err) + + decoded, err := codec.DecodeReport(reportBytes) + require.NoError(t, err) + require.Equal(t, 2, len(decoded)) + + require.Equal(t, reqId1[:], decoded[0].RequestID) + require.Equal(t, reqId3[:], decoded[1].RequestID) + // reqId2 should be excluded from this report because it has a different coordinator address +} + +func TestFunctionsReporting_Report_CallbackGasLimitExceeded(t *testing.T) { + t.Parallel() + plugin, _, codec, _ := preparePlugin(t, 10, 200000) + reqId1, reqId2 := newRequestID(), newRequestID() + compResult := []byte("aaa") + gasLimit1, gasLimit2 := uint32(100_000), uint32(200_000) + coordinatorContract1, coordinatorContract2 := common.Address{1}, common.Address{2} + procReq1 := newProcessedRequestWithMeta(reqId1, compResult, []byte{}, gasLimit1, coordinatorContract1[:], []byte{}) + procReq2 := newProcessedRequestWithMeta(reqId2, compResult, []byte{}, gasLimit2, coordinatorContract2[:], []byte{}) + + query := newMarshalledQuery(t, reqId1, reqId2) + obs := []types.AttributedObservation{ + newObservation(t, 1, procReq2, procReq1), + newObservation(t, 2, procReq1, procReq2), + newObservation(t, 3, procReq1, procReq2), + } + + produced, reportBytes, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.True(t, produced) + require.NoError(t, err) + + decoded, err := codec.DecodeReport(reportBytes) + require.NoError(t, err) + // Gas limit is set to 200k per report so we can only fit the first request + require.Equal(t, 1, len(decoded)) + require.Equal(t, reqId1[:], decoded[0].RequestID) + require.Equal(t, compResult, decoded[0].Result) + require.Equal(t, []byte{}, decoded[0].Error) + require.Equal(t, coordinatorContract1[:], decoded[0].CoordinatorContract) +} + +func TestFunctionsReporting_Report_DeterministicOrderOfRequests(t *testing.T) { + t.Parallel() + plugin, _, codec, _ := preparePlugin(t, 10, 0) + reqId1, reqId2, reqId3 := newRequestID(), newRequestID(), newRequestID() + compResult := []byte("aaa") + + query := newMarshalledQuery(t, reqId1, reqId2, reqId3, reqId1, reqId2) // duplicates should be ignored + procReq1 := newProcessedRequest(reqId1, compResult, []byte{}) + procReq2 := newProcessedRequest(reqId2, compResult, []byte{}) + procReq3 := newProcessedRequest(reqId3, compResult, []byte{}) + obs := []types.AttributedObservation{ + newObservation(t, 1, procReq1, procReq2, procReq3), + newObservation(t, 2, procReq2, procReq1, procReq3), + newObservation(t, 3, procReq3, procReq2, procReq1), + } + + produced1, reportBytes1, err1 := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + produced2, reportBytes2, err2 := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.True(t, produced1) + require.True(t, produced2) + require.NoError(t, err1) + require.NoError(t, err2) + require.Equal(t, reportBytes1, reportBytes2) + + decoded, err := codec.DecodeReport(reportBytes1) + require.NoError(t, err) + require.Equal(t, 3, len(decoded)) +} + +func TestFunctionsReporting_Report_IncorrectObservation(t *testing.T) { + t.Parallel() + plugin, _, _, _ := preparePlugin(t, 10, 0) + reqId1 := newRequestID() + compResult := []byte("aaa") + + query := newMarshalledQuery(t, reqId1) + req := newProcessedRequest(reqId1, compResult, []byte{}) + + // There are 4 observations but all are coming from the same node + obs := []types.AttributedObservation{newObservation(t, 1, req, req, req, req)} + produced, reportBytes, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, query, obs) + require.False(t, produced) + require.Nil(t, reportBytes) + require.NoError(t, err) +} + +func getReportBytes(t *testing.T, codec encoding.ReportCodec, reqs ...functions_srv.Request) []byte { + var report []*encoding.ProcessedRequest + for _, req := range reqs { + req := req + report = append(report, &encoding.ProcessedRequest{ + RequestID: req.RequestID[:], + Result: req.Result, + Error: req.Error, + CallbackGasLimit: *req.CallbackGasLimit, + CoordinatorContract: req.CoordinatorContractAddress[:], + OnchainMetadata: req.OnchainMetadata, + }) + } + reportBytes, err := codec.EncodeReport(report) + require.NoError(t, err) + return reportBytes +} + +func TestFunctionsReporting_ShouldAcceptFinalizedReport(t *testing.T) { + t.Parallel() + plugin, orm, codec, _ := preparePlugin(t, 10, 0) + + req1 := newRequestWithResult([]byte("xxx")) // nonexistent + req2 := newRequestWithResult([]byte("abc")) + req3 := newRequestFinalized() + req4 := newRequestTimedOut() + + orm.On("FindById", req1.RequestID, mock.Anything).Return(nil, errors.New("nonexistent ID")) + orm.On("FindById", req2.RequestID, mock.Anything).Return(&req2, nil) + orm.On("SetFinalized", req2.RequestID, mock.Anything, mock.Anything, mock.Anything).Return(nil) + orm.On("FindById", req3.RequestID, mock.Anything).Return(&req3, nil) + orm.On("SetFinalized", req3.RequestID, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("same state")) + orm.On("FindById", req4.RequestID, mock.Anything).Return(&req4, nil) + orm.On("SetFinalized", req4.RequestID, mock.Anything, mock.Anything, mock.Anything).Return(errors.New("already timed out")) + + // Attempting to transmit 2 requests, out of which: + // - one was already accepted for transmission earlier + // - one has timed out + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req3, req4)) + require.NoError(t, err) + require.False(t, should) + + // Attempting to transmit 2 requests, out of which: + // - one is ready + // - one was already accepted for transmission earlier + should, err = plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req2, req3)) + require.NoError(t, err) + require.True(t, should) + + // Attempting to transmit 2 requests, out of which: + // - one doesn't exist + // - one has timed out + should, err = plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req1, req4)) + require.NoError(t, err) + require.True(t, should) +} + +func TestFunctionsReporting_ShouldAcceptFinalizedReport_OffchainTransmission(t *testing.T) { + t.Parallel() + plugin, orm, codec, offchainTransmitter := preparePlugin(t, 10, 0) + req1 := newRequestWithResult([]byte("abc")) + req1.OnchainMetadata = []byte(functions_srv.OffchainRequestMarker) + + orm.On("FindById", req1.RequestID, mock.Anything).Return(&req1, nil) + orm.On("SetFinalized", req1.RequestID, mock.Anything, mock.Anything, mock.Anything).Return(nil) + offchainTransmitter.On("TransmitReport", mock.Anything, mock.Anything).Return(nil) + + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req1)) + require.NoError(t, err) + require.False(t, should) +} + +func TestFunctionsReporting_ShouldTransmitAcceptedReport(t *testing.T) { + t.Parallel() + plugin, orm, codec, _ := preparePlugin(t, 10, 0) + + req1 := newRequestWithResult([]byte("xxx")) // nonexistent + req2 := newRequestWithResult([]byte("abc")) + req3 := newRequestFinalized() + req4 := newRequestTimedOut() + req5 := newRequestConfirmed() + + orm.On("FindById", req1.RequestID, mock.Anything).Return(nil, errors.New("nonexistent ID")) + orm.On("FindById", req2.RequestID, mock.Anything).Return(&req2, nil) + orm.On("FindById", req3.RequestID, mock.Anything).Return(&req3, nil) + orm.On("FindById", req4.RequestID, mock.Anything).Return(&req4, nil) + orm.On("FindById", req5.RequestID, mock.Anything).Return(&req5, nil) + + // Attempting to transmit 2 requests, out of which: + // - one was already confirmed on chain + // - one has timed out + should, err := plugin.ShouldTransmitAcceptedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req5, req4)) + require.NoError(t, err) + require.False(t, should) + + // Attempting to transmit 2 requests, out of which: + // - one is ready + // - one in finalized + should, err = plugin.ShouldTransmitAcceptedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req2, req3)) + require.NoError(t, err) + require.True(t, should) + + // Attempting to transmit 2 requests, out of which: + // - one doesn't exist + // - one is ready + should, err = plugin.ShouldTransmitAcceptedReport(testutils.Context(t), types.ReportTimestamp{}, getReportBytes(t, codec, req1, req2)) + require.NoError(t, err) + require.True(t, should) +} + +func TestFunctionsReporting_ShouldIncludeCoordinator(t *testing.T) { + t.Parallel() + + zeroAddr, coord1, coord2 := &common.Address{}, &common.Address{1}, &common.Address{2} + + // should never pass nil requestCoordinator + newCoord, err := functions.ShouldIncludeCoordinator(nil, nil) + require.Error(t, err) + require.Nil(t, newCoord) + + // should never pass zero requestCoordinator + newCoord, err = functions.ShouldIncludeCoordinator(zeroAddr, nil) + require.Error(t, err) + require.Nil(t, newCoord) + + // overwrite nil reportCoordinator + newCoord, err = functions.ShouldIncludeCoordinator(coord1, nil) + require.NoError(t, err) + require.Equal(t, coord1, newCoord) + + // same address is fine + newCoord, err = functions.ShouldIncludeCoordinator(coord1, newCoord) + require.NoError(t, err) + require.Equal(t, coord1, newCoord) + + // different address is not accepted + newCoord, err = functions.ShouldIncludeCoordinator(coord2, newCoord) + require.Error(t, err) + require.Equal(t, coord1, newCoord) +} diff --git a/core/services/ocr2/plugins/generic/helpers_test.go b/core/services/ocr2/plugins/generic/helpers_test.go new file mode 100644 index 00000000..2f8f3204 --- /dev/null +++ b/core/services/ocr2/plugins/generic/helpers_test.go @@ -0,0 +1,7 @@ +package generic + +import "github.com/goplugin/libocr/commontypes" + +func (t *TelemetryAdapter) Endpoints() map[[4]string]commontypes.MonitoringEndpoint { + return t.endpoints +} diff --git a/core/services/ocr2/plugins/generic/merge_test.go b/core/services/ocr2/plugins/generic/merge_test.go new file mode 100644 index 00000000..9618c623 --- /dev/null +++ b/core/services/ocr2/plugins/generic/merge_test.go @@ -0,0 +1,32 @@ +package generic + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMerge(t *testing.T) { + vars := map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": "some-job-id", + }, + } + addedVars := map[string]interface{}{ + "jb": map[string]interface{}{ + "some-other-var": "foo", + }, + "val": 0, + } + + merge(vars, addedVars) + + assert.True(t, reflect.DeepEqual(vars, map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": "some-job-id", + "some-other-var": "foo", + }, + "val": 0, + }), vars) +} diff --git a/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go b/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go new file mode 100644 index 00000000..68f453b6 --- /dev/null +++ b/core/services/ocr2/plugins/generic/pipeline_runner_adapter.go @@ -0,0 +1,93 @@ +package generic + +import ( + "context" + "time" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var _ types.PipelineRunnerService = (*PipelineRunnerAdapter)(nil) + +type pipelineRunner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) +} + +type PipelineRunnerAdapter struct { + runner pipelineRunner + job job.Job + logger logger.Logger +} + +func (p *PipelineRunnerAdapter) ExecuteRun(ctx context.Context, spec string, vars types.Vars, options types.Options) (types.TaskResults, error) { + s := pipeline.Spec{ + DotDagSource: spec, + CreatedAt: time.Now(), + MaxTaskDuration: models.Interval(options.MaxTaskDuration), + JobID: p.job.ID, + JobName: p.job.Name.ValueOrZero(), + JobType: string(p.job.Type), + } + + defaultVars := map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": p.job.ID, + "externalJobID": p.job.ExternalJobID, + "name": p.job.Name.ValueOrZero(), + }, + } + + merge(defaultVars, vars.Vars) + + finalVars := pipeline.NewVarsFrom(defaultVars) + _, trrs, err := p.runner.ExecuteRun(ctx, s, finalVars, p.logger) + if err != nil { + return nil, err + } + + taskResults := make([]types.TaskResult, len(trrs)) + for i, trr := range trrs { + taskResults[i] = types.TaskResult{ + ID: trr.ID.String(), + Type: string(trr.Task.Type()), + Index: int(trr.Task.OutputIndex()), + + TaskValue: types.TaskValue{ + Value: trr.Result.Value, + Error: trr.Result.Error, + IsTerminal: len(trr.Task.Outputs()) == 0, + }, + } + } + return taskResults, nil +} + +func NewPipelineRunnerAdapter(logger logger.Logger, job job.Job, runner pipelineRunner) *PipelineRunnerAdapter { + return &PipelineRunnerAdapter{ + logger: logger, + job: job, + runner: runner, + } +} + +// merge merges mapTwo into mapOne, modifying mapOne in the process. +func merge(mapOne, mapTwo map[string]interface{}) { + for k, v := range mapTwo { + // if `mapOne` doesn't have `k`, then nothing to do, just assign v to `mapOne`. + if _, ok := mapOne[k]; !ok { + mapOne[k] = v + } else { + vAsMap, vOK := v.(map[string]interface{}) + mapOneVAsMap, moOK := mapOne[k].(map[string]interface{}) + if vOK && moOK { + merge(mapOneVAsMap, vAsMap) + } else { + mapOne[k] = v + } + } + } +} diff --git a/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go b/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go new file mode 100644 index 00000000..2289e9fb --- /dev/null +++ b/core/services/ocr2/plugins/generic/pipeline_runner_adapter_test.go @@ -0,0 +1,119 @@ +package generic_test + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/google/uuid" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + _ "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/generic" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const spec = ` +answer [type=sum values=<[ $(val), 2 ]>] +answer; +` + +func TestAdapter_Integration(t *testing.T) { + logger := logger.TestLogger(t) + cfg := configtest.NewTestGeneralConfig(t) + url := cfg.Database().URL() + db, err := pg.NewConnection(url.String(), cfg.Database().Dialect(), cfg.Database()) + require.NoError(t, err) + + keystore := keystore.NewInMemory(db, utils.FastScryptParams, logger, cfg.Database()) + pipelineORM := pipeline.NewORM(db, logger, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + bridgesORM := bridges.NewORM(db, logger, cfg.Database()) + pr := pipeline.NewRunner( + pipelineORM, + bridgesORM, + cfg.JobPipeline(), + cfg.WebServer(), + nil, + keystore.Eth(), + keystore.VRF(), + logger, + http.DefaultClient, + http.DefaultClient, + ) + pra := generic.NewPipelineRunnerAdapter(logger, job.Job{}, pr) + results, err := pra.ExecuteRun(testutils.Context(t), spec, types.Vars{Vars: map[string]interface{}{"val": 1}}, types.Options{}) + require.NoError(t, err) + + finalResult := results[0].Value.(decimal.Decimal) + + assert.True(t, decimal.NewFromInt(3).Equal(finalResult)) +} + +func newMockPipelineRunner() *mockPipelineRunner { + return &mockPipelineRunner{} +} + +type mockPipelineRunner struct { + results pipeline.TaskRunResults + err error + run *pipeline.Run + spec pipeline.Spec + vars pipeline.Vars +} + +func (m *mockPipelineRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (*pipeline.Run, pipeline.TaskRunResults, error) { + m.spec = spec + m.vars = vars + return m.run, m.results, m.err +} + +func TestAdapter_AddsDefaultVars(t *testing.T) { + logger := logger.TestLogger(t) + mpr := newMockPipelineRunner() + jobID, externalJobID, name := int32(100), uuid.New(), null.StringFrom("job-name") + pra := generic.NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name}, mpr) + + _, err := pra.ExecuteRun(testutils.Context(t), spec, types.Vars{}, types.Options{}) + require.NoError(t, err) + + gotName, err := mpr.vars.Get("jb.name") + require.NoError(t, err) + assert.Equal(t, name.String, gotName) + + gotID, err := mpr.vars.Get("jb.databaseID") + require.NoError(t, err) + assert.Equal(t, jobID, gotID) + + gotExternalID, err := mpr.vars.Get("jb.externalJobID") + require.NoError(t, err) + assert.Equal(t, externalJobID, gotExternalID) +} + +func TestPipelineRunnerAdapter_SetsVarsOnSpec(t *testing.T) { + logger := logger.TestLogger(t) + mpr := newMockPipelineRunner() + jobID, externalJobID, name, jobType := int32(100), uuid.New(), null.StringFrom("job-name"), job.Type("generic") + pra := generic.NewPipelineRunnerAdapter(logger, job.Job{ID: jobID, ExternalJobID: externalJobID, Name: name, Type: jobType}, mpr) + + maxDuration := 100 * time.Second + _, err := pra.ExecuteRun(testutils.Context(t), spec, types.Vars{}, types.Options{MaxTaskDuration: maxDuration}) + require.NoError(t, err) + + assert.Equal(t, jobID, mpr.spec.JobID) + assert.Equal(t, name.ValueOrZero(), mpr.spec.JobName) + assert.Equal(t, string(jobType), mpr.spec.JobType) + assert.Equal(t, maxDuration, mpr.spec.MaxTaskDuration.Duration()) +} diff --git a/core/services/ocr2/plugins/generic/telemetry_adapter.go b/core/services/ocr2/plugins/generic/telemetry_adapter.go new file mode 100644 index 00000000..9dc8cd81 --- /dev/null +++ b/core/services/ocr2/plugins/generic/telemetry_adapter.go @@ -0,0 +1,59 @@ +package generic + +import ( + "context" + "errors" + + "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" + + "github.com/goplugin/plugin-common/pkg/types" +) + +var _ types.TelemetryService = (*TelemetryAdapter)(nil) + +type TelemetryAdapter struct { + endpointGenerator telemetry.MonitoringEndpointGenerator + endpoints map[[4]string]commontypes.MonitoringEndpoint +} + +func NewTelemetryAdapter(endpointGen telemetry.MonitoringEndpointGenerator) *TelemetryAdapter { + return &TelemetryAdapter{ + endpoints: make(map[[4]string]commontypes.MonitoringEndpoint), + endpointGenerator: endpointGen, + } +} + +func (t *TelemetryAdapter) Send(ctx context.Context, network string, chainID string, contractID string, telemetryType string, payload []byte) error { + e, err := t.getOrCreateEndpoint(network, chainID, contractID, telemetryType) + if err != nil { + return err + } + e.SendLog(payload) + return nil +} + +func (t *TelemetryAdapter) getOrCreateEndpoint(network string, chainID string, contractID string, telemetryType string) (commontypes.MonitoringEndpoint, error) { + if contractID == "" { + return nil, errors.New("contractID cannot be empty") + } + if telemetryType == "" { + return nil, errors.New("telemetryType cannot be empty") + } + if network == "" { + return nil, errors.New("network cannot be empty") + } + if chainID == "" { + return nil, errors.New("chainID cannot be empty") + } + + key := [4]string{network, chainID, contractID, telemetryType} + e, ok := t.endpoints[key] + if !ok { + e = t.endpointGenerator.GenMonitoringEndpoint(network, chainID, contractID, synchronization.TelemetryType(telemetryType)) + t.endpoints[key] = e + } + return e, nil +} diff --git a/core/services/ocr2/plugins/generic/telemetry_adapter_test.go b/core/services/ocr2/plugins/generic/telemetry_adapter_test.go new file mode 100644 index 00000000..a9a39b27 --- /dev/null +++ b/core/services/ocr2/plugins/generic/telemetry_adapter_test.go @@ -0,0 +1,110 @@ +package generic_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/generic" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +type mockEndpoint struct { + network string + chainID string + contractID string + telemetryType string + payload []byte +} + +func (m *mockEndpoint) SendLog(payload []byte) { m.payload = payload } + +type mockGenerator struct{} + +func (m *mockGenerator) GenMonitoringEndpoint(network string, chainID string, contractID string, telemetryType synchronization.TelemetryType) commontypes.MonitoringEndpoint { + return &mockEndpoint{ + network: network, + chainID: chainID, + contractID: contractID, + telemetryType: string(telemetryType), + } +} + +func TestTelemetryAdapter(t *testing.T) { + ta := generic.NewTelemetryAdapter(&mockGenerator{}) + + tests := []struct { + name string + contractID string + telemetryType string + networkID string + chainID string + payload []byte + errorMsg string + }{ + { + name: "valid request", + contractID: "contract", + telemetryType: "mercury", + networkID: "solana", + chainID: "1337", + payload: []byte("uh oh"), + }, + { + name: "no valid contractID", + telemetryType: "mercury", + networkID: "solana", + chainID: "1337", + payload: []byte("uh oh"), + errorMsg: "contractID cannot be empty", + }, + { + name: "no valid chainID", + contractID: "contract", + telemetryType: "mercury", + networkID: "solana", + payload: []byte("uh oh"), + errorMsg: "chainID cannot be empty", + }, + { + name: "no valid telemetryType", + contractID: "contract", + networkID: "solana", + chainID: "1337", + payload: []byte("uh oh"), + errorMsg: "telemetryType cannot be empty", + }, + { + name: "no valid network", + contractID: "contract", + telemetryType: "mercury", + chainID: "1337", + payload: []byte("uh oh"), + errorMsg: "network cannot be empty", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ta.Send(testutils.Context(t), test.networkID, test.chainID, test.contractID, test.telemetryType, test.payload) + if test.errorMsg != "" { + assert.ErrorContains(t, err, test.errorMsg) + } else { + require.NoError(t, err) + key := [4]string{test.networkID, test.chainID, test.contractID, test.telemetryType} + endpoint, ok := ta.Endpoints()[key] + require.True(t, ok) + + me := endpoint.(*mockEndpoint) + assert.Equal(t, test.networkID, me.network) + assert.Equal(t, test.chainID, me.chainID) + assert.Equal(t, test.contractID, me.contractID) + assert.Equal(t, test.telemetryType, me.telemetryType) + assert.Equal(t, test.payload, me.payload) + } + }) + } +} diff --git a/core/services/ocr2/plugins/median/config/config.go b/core/services/ocr2/plugins/median/config/config.go new file mode 100644 index 00000000..078a4c26 --- /dev/null +++ b/core/services/ocr2/plugins/median/config/config.go @@ -0,0 +1,24 @@ +// config is a separate package so that we can validate +// the config in other packages, for example in job at job create time. + +package config + +import ( + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// The PluginConfig struct contains the custom arguments needed for the Median plugin. +type PluginConfig struct { + JuelsPerFeeCoinPipeline string `json:"juelsPerFeeCoinSource"` +} + +// ValidatePluginConfig validates the arguments for the Median plugin. +func ValidatePluginConfig(config PluginConfig) error { + if _, err := pipeline.Parse(config.JuelsPerFeeCoinPipeline); err != nil { + return errors.Wrap(err, "invalid juelsPerFeeCoinSource pipeline") + } + + return nil +} diff --git a/core/services/ocr2/plugins/median/config/config_test.go b/core/services/ocr2/plugins/median/config/config_test.go new file mode 100644 index 00000000..6e137992 --- /dev/null +++ b/core/services/ocr2/plugins/median/config/config_test.go @@ -0,0 +1,22 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidatePluginConfig(t *testing.T) { + for _, s := range []struct { + name string + pipeline string + }{ + {"empty", ""}, + {"blank", " "}, + {"foo", "foo"}, + } { + t.Run(s.name, func(t *testing.T) { + assert.Error(t, ValidatePluginConfig(PluginConfig{JuelsPerFeeCoinPipeline: s.pipeline})) + }) + } +} diff --git a/core/services/ocr2/plugins/median/services.go b/core/services/ocr2/plugins/median/services.go new file mode 100644 index 00000000..4ea4abb3 --- /dev/null +++ b/core/services/ocr2/plugins/median/services.go @@ -0,0 +1,170 @@ +package median + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + libocr "github.com/goplugin/libocr/offchainreporting2plus" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-feeds/median" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/median/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type MedianConfig interface { + JobPipelineMaxSuccessfulRuns() uint64 + JobPipelineResultWriteQueueDepth() uint64 + plugins.RegistrarConfig +} + +// concrete implementation of MedianConfig +type medianConfig struct { + jobPipelineMaxSuccessfulRuns uint64 + jobPipelineResultWriteQueueDepth uint64 + plugins.RegistrarConfig +} + +func NewMedianConfig(jobPipelineMaxSuccessfulRuns uint64, jobPipelineResultWriteQueueDepth uint64, pluginProcessCfg plugins.RegistrarConfig) MedianConfig { + return &medianConfig{ + jobPipelineMaxSuccessfulRuns: jobPipelineMaxSuccessfulRuns, + jobPipelineResultWriteQueueDepth: jobPipelineResultWriteQueueDepth, + RegistrarConfig: pluginProcessCfg, + } +} + +func (m *medianConfig) JobPipelineMaxSuccessfulRuns() uint64 { + return m.jobPipelineMaxSuccessfulRuns +} + +func (m *medianConfig) JobPipelineResultWriteQueueDepth() uint64 { + return m.jobPipelineResultWriteQueueDepth +} + +func NewMedianServices(ctx context.Context, + jb job.Job, + isNewlyCreatedJob bool, + relayer loop.Relayer, + pipelineRunner pipeline.Runner, + lggr logger.Logger, + argsNoPlugin libocr.OCR2OracleArgs, + cfg MedianConfig, + chEnhancedTelem chan ocrcommon.EnhancedTelemetryData, + errorLog loop.ErrorLog, + +) (srvs []job.ServiceCtx, err error) { + var pluginConfig config.PluginConfig + err = json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return + } + err = config.ValidatePluginConfig(pluginConfig) + if err != nil { + return + } + spec := jb.OCR2OracleSpec + + runSaver := ocrcommon.NewResultRunSaver( + pipelineRunner, + lggr, + cfg.JobPipelineMaxSuccessfulRuns(), + cfg.JobPipelineResultWriteQueueDepth(), + ) + + provider, err := relayer.NewPluginProvider(ctx, types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(spec.PluginType), + }, types.PluginArgs{ + TransmitterID: spec.TransmitterID.String, + PluginConfig: spec.PluginConfig.Bytes(), + }) + if err != nil { + return + } + + medianProvider, ok := provider.(types.MedianProvider) + if !ok { + return nil, errors.New("could not coerce PluginProvider to MedianProvider") + } + + srvs = append(srvs, provider) + argsNoPlugin.ContractTransmitter = provider.ContractTransmitter() + argsNoPlugin.ContractConfigTracker = provider.ContractConfigTracker() + argsNoPlugin.OffchainConfigDigester = provider.OffchainConfigDigester() + + abort := func() { + if cerr := services.MultiCloser(srvs).Close(); err != nil { + lggr.Errorw("Error closing unused services", "err", cerr) + } + } + + dataSource, juelsPerFeeCoinSource := ocrcommon.NewDataSourceV2(pipelineRunner, + jb, + *jb.PipelineSpec, + lggr, + runSaver, + chEnhancedTelem, + ), ocrcommon.NewInMemoryDataSource(pipelineRunner, jb, pipeline.Spec{ + ID: jb.ID, + DotDagSource: pluginConfig.JuelsPerFeeCoinPipeline, + CreatedAt: time.Now(), + }, lggr) + + if cmdName := env.MedianPlugin.Cmd.Get(); cmdName != "" { + // use unique logger names so we can use it to register a loop + medianLggr := lggr.Named("Median").Named(spec.ContractID).Named(spec.GetID()) + envVars, err2 := plugins.ParseEnvFile(env.MedianPlugin.Env.Get()) + if err2 != nil { + err = fmt.Errorf("failed to parse median env file: %w", err2) + abort() + return + } + cmdFn, telem, err2 := cfg.RegisterLOOP(plugins.CmdConfig{ + ID: medianLggr.Name(), + Cmd: cmdName, + Env: envVars, + }) + if err2 != nil { + err = fmt.Errorf("failed to register loop: %w", err2) + abort() + return + } + median := loop.NewMedianService(lggr, telem, cmdFn, medianProvider, dataSource, juelsPerFeeCoinSource, errorLog) + argsNoPlugin.ReportingPluginFactory = median + srvs = append(srvs, median) + } else { + argsNoPlugin.ReportingPluginFactory, err = median.NewPlugin(lggr).NewMedianFactory(ctx, medianProvider, dataSource, juelsPerFeeCoinSource, errorLog) + if err != nil { + err = fmt.Errorf("failed to create median factory: %w", err) + abort() + return + } + } + + var oracle libocr.Oracle + oracle, err = libocr.NewOracle(argsNoPlugin) + if err != nil { + abort() + return + } + srvs = append(srvs, runSaver, job.NewServiceAdapter(oracle)) + if !jb.OCR2OracleSpec.CaptureEATelemetry { + lggr.Infof("Enhanced EA telemetry is disabled for job %s", jb.Name.ValueOrZero()) + } + return +} diff --git a/core/services/ocr2/plugins/mercury/config/config.go b/core/services/ocr2/plugins/mercury/config/config.go new file mode 100644 index 00000000..294b1eb7 --- /dev/null +++ b/core/services/ocr2/plugins/mercury/config/config.go @@ -0,0 +1,84 @@ +// config is a separate package so that we can validate +// the config in other packages, for example in job at job create time. + +package config + +import ( + "errors" + "fmt" + "net/url" + "regexp" + + pkgerrors "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/null" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type PluginConfig struct { + RawServerURL string `json:"serverURL" toml:"serverURL"` + ServerPubKey utils.PlainHexBytes `json:"serverPubKey" toml:"serverPubKey"` + // InitialBlockNumber allows to set a custom "validFromBlockNumber" for + // the first ever report in the case of a brand new feed, where the mercury + // server does not have any previous reports. For a brand new feed, this + // effectively sets the "first" validFromBlockNumber. + InitialBlockNumber null.Int64 `json:"initialBlockNumber" toml:"initialBlockNumber"` + + LinkFeedID *mercuryutils.FeedID `json:"linkFeedID" toml:"linkFeedID"` + NativeFeedID *mercuryutils.FeedID `json:"nativeFeedID" toml:"nativeFeedID"` +} + +func ValidatePluginConfig(config PluginConfig, feedID mercuryutils.FeedID) (merr error) { + if config.RawServerURL == "" { + merr = errors.New("mercury: ServerURL must be specified") + } else { + var normalizedURI string + if schemeRegexp.MatchString(config.RawServerURL) { + normalizedURI = config.RawServerURL + } else { + normalizedURI = fmt.Sprintf("wss://%s", config.RawServerURL) + } + uri, err := url.ParseRequestURI(normalizedURI) + if err != nil { + merr = pkgerrors.Wrap(err, "Mercury: invalid value for ServerURL") + } else if uri.Scheme != "wss" { + merr = pkgerrors.Errorf(`Mercury: invalid scheme specified for MercuryServer, got: %q (scheme: %q) but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`, config.RawServerURL, uri.Scheme) + } + } + + if len(config.ServerPubKey) != 32 { + merr = errors.Join(merr, errors.New("mercury: ServerPubKey is required and must be a 32-byte hex string")) + } + + switch feedID.Version() { + case 1: + if config.LinkFeedID != nil { + merr = errors.Join(merr, errors.New("linkFeedID may not be specified for v1 jobs")) + } + if config.NativeFeedID != nil { + merr = errors.Join(merr, errors.New("nativeFeedID may not be specified for v1 jobs")) + } + case 2, 3: + if config.LinkFeedID == nil { + merr = errors.Join(merr, fmt.Errorf("linkFeedID must be specified for v%d jobs", feedID.Version())) + } + if config.NativeFeedID == nil { + merr = errors.Join(merr, fmt.Errorf("nativeFeedID must be specified for v%d jobs", feedID.Version())) + } + if config.InitialBlockNumber.Valid { + merr = errors.Join(merr, fmt.Errorf("initialBlockNumber may not be specified for v%d jobs", feedID.Version())) + } + default: + merr = errors.Join(merr, fmt.Errorf("got unsupported schema version %d; supported versions are 1,2,3", feedID.Version())) + } + + return merr +} + +var schemeRegexp = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+.-]*://`) +var wssRegexp = regexp.MustCompile(`^wss://`) + +func (p PluginConfig) ServerURL() string { + return wssRegexp.ReplaceAllString(p.RawServerURL, "") +} diff --git a/core/services/ocr2/plugins/mercury/config/config_test.go b/core/services/ocr2/plugins/mercury/config/config_test.go new file mode 100644 index 00000000..60cc548f --- /dev/null +++ b/core/services/ocr2/plugins/mercury/config/config_test.go @@ -0,0 +1,147 @@ +package config + +import ( + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var v1FeedId = [32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} +var v2FeedId = [32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + +func Test_PluginConfig(t *testing.T) { + t.Run("Mercury v1", func(t *testing.T) { + t.Run("with valid values", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + InitialBlockNumber = 1234 + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + assert.Equal(t, "example.com:80", mc.RawServerURL) + assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String()) + assert.Equal(t, int64(1234), mc.InitialBlockNumber.Int64) + + err = ValidatePluginConfig(mc, v1FeedId) + require.NoError(t, err) + }) + + t.Run("with invalid values", func(t *testing.T) { + rawToml := ` + InitialBlockNumber = "invalid" + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.Error(t, err) + assert.EqualError(t, err, `toml: strconv.ParseInt: parsing "invalid": invalid syntax`) + + rawToml = ` + ServerURL = "http://example.com" + ServerPubKey = "4242" + ` + + err = toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = ValidatePluginConfig(mc, v1FeedId) + require.Error(t, err) + assert.Contains(t, err.Error(), `Mercury: invalid scheme specified for MercuryServer, got: "http://example.com" (scheme: "http") but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`) + assert.Contains(t, err.Error(), `mercury: ServerPubKey is required and must be a 32-byte hex string`) + }) + + t.Run("with unnecessary values", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472" + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = ValidatePluginConfig(mc, v1FeedId) + assert.Contains(t, err.Error(), `linkFeedID may not be specified for v1 jobs`) + }) + }) + + t.Run("Mercury v2/v3", func(t *testing.T) { + t.Run("with valid values", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472" + NativeFeedID = "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472" + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = ValidatePluginConfig(mc, v2FeedId) + require.NoError(t, err) + + require.NotNil(t, mc.LinkFeedID) + require.NotNil(t, mc.NativeFeedID) + assert.Equal(t, "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.LinkFeedID).String()) + assert.Equal(t, "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.NativeFeedID).String()) + + }) + + t.Run("with invalid values", func(t *testing.T) { + var mc PluginConfig + + rawToml := `LinkFeedID = "test"` + err := toml.Unmarshal([]byte(rawToml), &mc) + assert.Contains(t, err.Error(), "toml: hash: expected a hex string starting with '0x'") + + rawToml = `LinkFeedID = "0xtest"` + err = toml.Unmarshal([]byte(rawToml), &mc) + assert.Contains(t, err.Error(), `toml: hash: UnmarshalText failed: encoding/hex: invalid byte: U+0074 't'`) + + rawToml = ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472" + ` + err = toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = ValidatePluginConfig(mc, v2FeedId) + assert.Contains(t, err.Error(), "nativeFeedID must be specified for v2 jobs") + }) + + t.Run("with unnecessary values", func(t *testing.T) { + rawToml := ` + ServerURL = "example.com:80" + ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" + InitialBlockNumber = 1234 + ` + + var mc PluginConfig + err := toml.Unmarshal([]byte(rawToml), &mc) + require.NoError(t, err) + + err = ValidatePluginConfig(mc, v2FeedId) + assert.Contains(t, err.Error(), `initialBlockNumber may not be specified for v2 jobs`) + }) + }) +} + +func Test_PluginConfig_ServerURL(t *testing.T) { + pc := PluginConfig{RawServerURL: "example.com"} + assert.Equal(t, "example.com", pc.ServerURL()) + pc = PluginConfig{RawServerURL: "wss://example.com"} + assert.Equal(t, "example.com", pc.ServerURL()) + pc = PluginConfig{RawServerURL: "example.com:1234/foo"} + assert.Equal(t, "example.com:1234/foo", pc.ServerURL()) + pc = PluginConfig{RawServerURL: "wss://example.com:1234/foo"} + assert.Equal(t, "example.com:1234/foo", pc.ServerURL()) +} diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go new file mode 100644 index 00000000..c2bdaf6e --- /dev/null +++ b/core/services/ocr2/plugins/mercury/helpers_test.go @@ -0,0 +1,464 @@ +package mercury_test + +import ( + "context" + "crypto/ed25519" + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/goplugin/wsrpc" + "github.com/goplugin/wsrpc/credentials" + "github.com/goplugin/wsrpc/peer" + + "github.com/goplugin/libocr/offchainreporting2/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/keystest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +var _ pb.MercuryServer = &mercuryServer{} + +type request struct { + pk credentials.StaticSizedPublicKey + req *pb.TransmitRequest +} + +type mercuryServer struct { + privKey ed25519.PrivateKey + reqsCh chan request + t *testing.T + buildReport func() []byte +} + +func NewMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan request, buildReport func() []byte) *mercuryServer { + return &mercuryServer{privKey, reqsCh, t, buildReport} +} + +func (s *mercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + r := request{p.PublicKey, req} + s.reqsCh <- r + + return &pb.TransmitResponse{ + Code: 1, + Error: "", + }, nil +} + +func (s *mercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("could not extract public key") + } + s.t.Logf("mercury server got latest report from %x for feed id 0x%x", p.PublicKey, lrr.FeedId) + + out := new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = lrr.FeedId + + report := s.buildReport() + payload, err := mercury.PayloadTypes.Pack(evmutil.RawReportContext(ocrtypes.ReportContext{}), report, [][32]byte{}, [][32]byte{}, [32]byte{}) + if err != nil { + panic(err) + } + out.Report.Payload = payload + return out, nil +} + +func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) { + // Set up the wsrpc server + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("[MAIN] failed to listen: %v", err) + } + serverURL = lis.Addr().String() + s := wsrpc.NewServer(wsrpc.Creds(srv.privKey, pubKeys)) + + // Register mercury implementation with the wsrpc server + pb.RegisterMercuryServer(s, srv) + + // Start serving + go s.Serve(lis) + t.Cleanup(s.Stop) + + return +} + +type Feed struct { + name string + id [32]byte + baseBenchmarkPrice *big.Int + baseBid *big.Int + baseAsk *big.Int +} + +func randomFeedID(version uint16) [32]byte { + id := [32]byte(utils.NewHash()) + binary.BigEndian.PutUint16(id[:2], version) + return id +} + +type Node struct { + App plugin.Application + ClientPubKey credentials.StaticSizedPublicKey + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) AddJob(t *testing.T, spec string) { + c := node.App.GetConfig() + job, err := validate.ValidatedOracleSpecToml(c.OCR2(), c.Insecure(), spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec string) { + job, err := ocrbootstrap.ValidatedBootstrapSpecToml(spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &job) + require.NoError(t, err) +} + +func setupNode( + t *testing.T, + port int, + dbName string, + backend *backends.SimulatedBackend, + csaKey csakey.KeyV2, +) (app plugin.Application, peerID string, clientPubKey credentials.StaticSizedPublicKey, ocr2kb ocr2key.KeyBundle, observedLogs *observer.ObservedLogs) { + k := big.NewInt(int64(port)) // keys unique to port + p2pKey := p2pkey.MustNewV2XXXTestingOnly(k) + rdr := keystest.NewRandReaderFromSeed(int64(port)) + ocr2kb = ocr2key.MustNewInsecure(rdr, chaintype.EVM) + + p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)} + + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + // [JobPipeline] + // MaxSuccessfulRuns = 0 + c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0)) + + // [Feature] + // UICSAKeys=true + // LogPoller = true + // FeedsManager = false + c.Feature.UICSAKeys = ptr(true) + c.Feature.LogPoller = ptr(true) + c.Feature.FeedsManager = ptr(false) + + // [OCR] + // Enabled = false + c.OCR.Enabled = ptr(false) + + // [OCR2] + // Enabled = true + c.OCR2.Enabled = ptr(true) + + // [P2P] + // PeerID = '$PEERID' + // TraceLogging = true + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.TraceLogging = ptr(true) + + // [P2P.V2] + // Enabled = true + // AnnounceAddresses = ['$EXT_IP:17775'] + // ListenAddresses = ['127.0.0.1:17775'] + // DeltaDial = 500ms + // DeltaReconcile = 5s + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.AnnounceAddresses = &p2paddresses + c.P2P.V2.ListenAddresses = &p2paddresses + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + }) + + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + app = cltest.NewApplicationWithConfigV2OnSimulatedBlockchain(t, config, backend, p2pKey, ocr2kb, csaKey, lggr.Named(dbName)) + err := app.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { + assert.NoError(t, app.Stop()) + }) + + return app, p2pKey.PeerID().Raw(), csaKey.StaticSizedPublicKey(), ocr2kb, observedLogs +} + +func ptr[T any](t T) *T { return &t } + +func addBootstrapJob(t *testing.T, bootstrapNode Node, chainID *big.Int, verifierAddress common.Address, feedName string, feedID [32]byte) { + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "boot-%s" +contractID = "%s" +feedID = "0x%x" +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = %d + `, feedName, verifierAddress, feedID, chainID)) +} + +func addV1MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + bidBridge, + askBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + chainID *big.Int, + fromBlock int, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[14]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[11]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[10]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; + + // Bid + bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + bid_parse [type=jsonparse path="result"]; + bid_multiply [type=multiply times=100000000 index=1]; + + bid -> bid_parse -> bid_multiply; + + // Ask + ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ask_parse [type=jsonparse path="result"]; + ask_multiply [type=multiply times=100000000 index=2]; + + ask -> ask_parse -> ask_multiply; +""" + +[pluginConfig] +serverURL = "%[8]s" +serverPubKey = "%[9]x" +initialBlockNumber = %[13]d + +[relayConfig] +chainID = %[12]d + + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + chainID, + fromBlock, + feedName, + )) +} + +func addV2MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + linkFeedID [32]byte, + nativeFeedID [32]byte, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[10]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[9]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[8]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; +""" + +[pluginConfig] +serverURL = "%[6]s" +serverPubKey = "%[7]x" +linkFeedID = "0x%[11]x" +nativeFeedID = "0x%[12]x" + +[relayConfig] +chainID = 1337 + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + feedName, + linkFeedID, + nativeFeedID, + )) +} + +func addV3MercuryJob( + t *testing.T, + node Node, + i int, + verifierAddress common.Address, + bootstrapPeerID string, + bootstrapNodePort int, + bmBridge, + bidBridge, + askBridge, + serverURL string, + serverPubKey, + clientPubKey ed25519.PublicKey, + feedName string, + feedID [32]byte, + linkFeedID [32]byte, + nativeFeedID [32]byte, +) { + node.AddJob(t, fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-%[1]d-%[12]s" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "%[2]s" +feedID = "0x%[11]x" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "%[3]s" +p2pv2Bootstrappers = [ + "%[4]s" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "%[10]x" +observationSource = """ + // Benchmark Price + price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + price1_parse [type=jsonparse path="result"]; + price1_multiply [type=multiply times=100000000 index=0]; + + price1 -> price1_parse -> price1_multiply; + + // Bid + bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + bid_parse [type=jsonparse path="result"]; + bid_multiply [type=multiply times=100000000 index=1]; + + bid -> bid_parse -> bid_multiply; + + // Ask + ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ask_parse [type=jsonparse path="result"]; + ask_multiply [type=multiply times=100000000 index=2]; + + ask -> ask_parse -> ask_multiply; +""" + +[pluginConfig] +serverURL = "%[8]s" +serverPubKey = "%[9]x" +linkFeedID = "0x%[13]x" +nativeFeedID = "0x%[14]x" + +[relayConfig] +chainID = 1337 + `, + i, + verifierAddress, + node.KeyBundle.ID(), + fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKey, + feedID, + feedName, + linkFeedID, + nativeFeedID, + )) +} diff --git a/core/services/ocr2/plugins/mercury/integration_plugin_test.go b/core/services/ocr2/plugins/mercury/integration_plugin_test.go new file mode 100644 index 00000000..fec9b0e4 --- /dev/null +++ b/core/services/ocr2/plugins/mercury/integration_plugin_test.go @@ -0,0 +1,27 @@ +//go:build integration + +package mercury_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" +) + +func TestIntegration_MercuryV1_Plugin(t *testing.T) { + t.Setenv(string(env.MercuryPlugin.Cmd), "plugin-mercury") + + integration_MercuryV1(t) +} + +func TestIntegration_MercuryV2_Plugin(t *testing.T) { + t.Setenv(string(env.MercuryPlugin.Cmd), "plugin-mercury") + + integration_MercuryV2(t) +} + +func TestIntegration_MercuryV3_Plugin(t *testing.T) { + t.Setenv(string(env.MercuryPlugin.Cmd), "plugin-mercury") + + integration_MercuryV3(t) +} diff --git a/core/services/ocr2/plugins/mercury/integration_test.go b/core/services/ocr2/plugins/mercury/integration_test.go new file mode 100644 index 00000000..321032b9 --- /dev/null +++ b/core/services/ocr2/plugins/mercury/integration_test.go @@ -0,0 +1,1028 @@ +package mercury_test + +import ( + "crypto/ed25519" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/goplugin/wsrpc/credentials" + + mercurytypes "github.com/goplugin/plugin-common/pkg/types/mercury" + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + v2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + v3 "github.com/goplugin/plugin-common/pkg/types/mercury/v3" + relaymercury "github.com/goplugin/plugin-data-streams/mercury" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + token "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/fee_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/reward_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + reportcodecv1 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/reportcodec" + reportcodecv2 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/reportcodec" + reportcodecv3 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/reportcodec" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var ( + f = uint8(1) + n = 4 // number of nodes + multiplier int64 = 100000000 + rawOnchainConfig = mercurytypes.OnchainConfig{ + Min: big.NewInt(0), + Max: big.NewInt(math.MaxInt64), + } + rawReportingPluginConfig = relaymercury.OffchainConfig{ + ExpirationWindow: 1, + BaseUSDFee: decimal.NewFromInt(100), + } +) + +func detectPanicLogs(t *testing.T, logObservers []*observer.ObservedLogs) { + var panicLines []string + for _, observedLogs := range logObservers { + panicLogs := observedLogs.Filter(func(e observer.LoggedEntry) bool { + return e.Level >= zapcore.DPanicLevel + }) + for _, log := range panicLogs.All() { + line := fmt.Sprintf("%v\t%s\t%s\t%s\t%s", log.Time.Format(time.RFC3339), log.Level.CapitalString(), log.LoggerName, log.Caller.TrimmedPath(), log.Message) + panicLines = append(panicLines, line) + } + } + if len(panicLines) > 0 { + t.Errorf("Found logs with DPANIC or higher level:\n%s", strings.Join(panicLines, "\n")) + } +} + +func setupBlockchain(t *testing.T) (*bind.TransactOpts, *backends.SimulatedBackend, *verifier.Verifier, common.Address) { + steve := testutils.MustNewSimTransactor(t) // config contract deployer and owner + genesisData := core.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}} + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + backend.Commit() // ensure starting block number at least 1 + stopMining := cltest.Mine(backend, 1*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + t.Cleanup(stopMining) + + // Deploy contracts + linkTokenAddress, _, linkToken, err := token.DeployLinkToken(steve, backend) + require.NoError(t, err) + _, err = linkToken.Transfer(steve, steve.From, big.NewInt(1000)) + require.NoError(t, err) + nativeTokenAddress, _, nativeToken, err := token.DeployLinkToken(steve, backend) + require.NoError(t, err) + _, err = nativeToken.Transfer(steve, steve.From, big.NewInt(1000)) + require.NoError(t, err) + verifierProxyAddr, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(steve, backend, common.Address{}) // zero address for access controller disables access control + require.NoError(t, err) + verifierAddress, _, verifier, err := verifier.DeployVerifier(steve, backend, verifierProxyAddr) + require.NoError(t, err) + _, err = verifierProxy.InitializeVerifier(steve, verifierAddress) + require.NoError(t, err) + rewardManagerAddr, _, rewardManager, err := reward_manager.DeployRewardManager(steve, backend, linkTokenAddress) + require.NoError(t, err) + feeManagerAddr, _, _, err := fee_manager.DeployFeeManager(steve, backend, linkTokenAddress, nativeTokenAddress, verifierProxyAddr, rewardManagerAddr) + require.NoError(t, err) + _, err = verifierProxy.SetFeeManager(steve, feeManagerAddr) + require.NoError(t, err) + _, err = rewardManager.SetFeeManager(steve, feeManagerAddr) + require.NoError(t, err) + backend.Commit() + + return steve, backend, verifier, verifierAddress +} + +func TestIntegration_MercuryV1(t *testing.T) { + t.Parallel() + + integration_MercuryV1(t) +} + +func integration_MercuryV1(t *testing.T) { + var logObservers []*observer.ObservedLogs + t.Cleanup(func() { + detectPanicLogs(t, logObservers) + }) + lggr := logger.TestLogger(t) + const fromBlock = 1 // cannot use zero, start from block 1 + testStartTimeStamp := uint32(time.Now().Unix()) + + // test vars + // pError is the probability that an EA will return an error instead of a result, as integer percentage + // pError = 0 means it will never return error + pError := atomic.Int64{} + + // feeds + btcFeed := Feed{"BTC/USD", randomFeedID(1), big.NewInt(20_000 * multiplier), big.NewInt(19_997 * multiplier), big.NewInt(20_004 * multiplier)} + ethFeed := Feed{"ETH/USD", randomFeedID(1), big.NewInt(1_568 * multiplier), big.NewInt(1_566 * multiplier), big.NewInt(1_569 * multiplier)} + linkFeed := Feed{"PLI/USD", randomFeedID(1), big.NewInt(7150 * multiplier / 1000), big.NewInt(7123 * multiplier / 1000), big.NewInt(7177 * multiplier / 1000)} + feeds := []Feed{btcFeed, ethFeed, linkFeed} + feedM := make(map[[32]byte]Feed, len(feeds)) + for i := range feeds { + feedM[feeds[i].id] = feeds[i] + } + + reqs := make(chan request) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, func() []byte { + report, err := (&reportcodecv1.ReportCodec{}).BuildReport(v1.ReportFields{BenchmarkPrice: big.NewInt(234567), Bid: big.NewInt(1), Ask: big.NewInt(1), CurrentBlockHash: make([]byte, 32)}) + if err != nil { + panic(err) + } + return report + }) + clientCSAKeys := make([]csakey.KeyV2, n+1) + clientPubKeys := make([]ed25519.PublicKey, n+1) + for i := 0; i < n+1; i++ { + k := big.NewInt(int64(i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + serverURL := startMercuryServer(t, srv, clientPubKeys) + chainID := testutils.SimulatedChainID + + steve, backend, verifier, verifierAddress := setupBlockchain(t) + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n]) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + logObservers = append(logObservers, observedLogs) + + // Set up n oracles + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + ports := freeport.GetN(t, n) + for i := 0; i < n; i++ { + app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i]) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + logObservers = append(logObservers, observedLogs) + } + + for _, feed := range feeds { + addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id) + } + + createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) { + bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b)) + + r := rand.Int63n(101) + if r > pError.Load() { + res.WriteHeader(http.StatusOK) + val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String() + resp := fmt.Sprintf(`{"result": %s}`, val) + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } else { + res.WriteHeader(http.StatusInternalServerError) + resp := `{"error": "pError test error"}` + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } + })) + t.Cleanup(bridge.Close) + u, _ := url.Parse(bridge.URL) + bridgeName = fmt.Sprintf("bridge-%s-%d", name, i) + require.NoError(t, borm.CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(bridgeName), + URL: models.WebURL(*u), + })) + + return bridgeName + } + + // Add OCR jobs - one per feed on each node + for i, node := range nodes { + for j, feed := range feeds { + bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM()) + askBridge := createBridge(fmt.Sprintf("ask-%d", j), i, feed.baseAsk, node.App.BridgeORM()) + bidBridge := createBridge(fmt.Sprintf("bid-%d", j), i, feed.baseBid, node.App.BridgeORM()) + + addV1MercuryJob( + t, + node, + i, + verifierAddress, + bootstrapPeerID, + bootstrapNodePort, + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKeys[i], + feed.name, + feed.id, + chainID, + fromBlock, + ) + } + } + + // Setup config on contract + onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + require.NoError(t, err) + + reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) + require.NoError(t, err) + + signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 300*time.Millisecond, // DeltaCertifiedCommitRequest + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(nodes)}, // S + oracles, + reportingPluginConfig, // reportingPluginConfig []byte, + 250*time.Millisecond, // Max duration observation + int(f), // f + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + + offchainTransmitters := make([][32]byte, n) + for i := 0; i < n; i++ { + offchainTransmitters[i] = nodes[i].ClientPubKey + } + + for i, feed := range feeds { + lggr.Infow("Setting Config on Oracle Contract", + "i", i, + "feedID", feed.id, + "feedName", feed.name, + "signerAddresses", signerAddresses, + "offchainTransmitters", offchainTransmitters, + "f", f, + "onchainConfig", onchainConfig, + "offchainConfigVersion", offchainConfigVersion, + "offchainConfig", offchainConfig, + ) + + _, err = verifier.SetConfig( + steve, + feed.id, + signerAddresses, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + nil, + ) + require.NoError(t, err) + backend.Commit() + } + + // Bury it with finality depth + ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + finalityDepth := ch.Config().EVM().FinalityDepth() + for i := 0; i < int(finalityDepth); i++ { + backend.Commit() + } + + t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) { + // Expect at least one report per feed from each oracle + seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{}) + for i := range feeds { + // feedID will be deleted when all n oracles have reported + seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n) + } + + for req := range reqs { + v := make(map[string]interface{}) + err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("expected payload %#v to contain 'report'", v) + } + reportElems := make(map[string]interface{}) + err = reportcodecv1.ReportTypes.UnpackIntoMap(reportElems, report.([]byte)) + require.NoError(t, err) + + feedID := reportElems["feedId"].([32]uint8) + feed, exists := feedM[feedID] + require.True(t, exists) + + if _, exists := seen[feedID]; !exists { + continue // already saw all oracles for this feed + } + + num, err := (&reportcodecv1.ReportCodec{}).CurrentBlockNumFromReport(ocr2types.Report(report.([]byte))) + require.NoError(t, err) + currentBlock, err := backend.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + + assert.GreaterOrEqual(t, currentBlock.Number().Int64(), num) + + expectedBm := feed.baseBenchmarkPrice + expectedBid := feed.baseBid + expectedAsk := feed.baseAsk + + assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) + assert.InDelta(t, expectedBm.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, expectedBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, expectedAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000) + assert.GreaterOrEqual(t, int(currentBlock.Number().Int64()), int(reportElems["currentBlockNum"].(uint64))) + assert.GreaterOrEqual(t, currentBlock.Time(), reportElems["currentBlockTimestamp"].(uint64)) + assert.NotEqual(t, common.Hash{}, common.Hash(reportElems["currentBlockHash"].([32]uint8))) + assert.LessOrEqual(t, int(reportElems["validFromBlockNum"].(uint64)), int(reportElems["currentBlockNum"].(uint64))) + assert.Less(t, int64(0), int64(reportElems["validFromBlockNum"].(uint64))) + + t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id) + + seen[feedID][req.pk] = struct{}{} + if len(seen[feedID]) == n { + t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id) + delete(seen, feedID) + if len(seen) == 0 { + break // saw all oracles; success! + } + } + } + }) + + t.Run("receives at least one report per feed from each oracle when EAs are at 80% reliability", func(t *testing.T) { + pError.Store(20) // 20% chance of EA error + + // Expect at least one report per feed from each oracle + seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{}) + for i := range feeds { + // feedID will be deleted when all n oracles have reported + seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n) + } + + for req := range reqs { + v := make(map[string]interface{}) + err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("expected payload %#v to contain 'report'", v) + } + reportElems := make(map[string]interface{}) + err = reportcodecv1.ReportTypes.UnpackIntoMap(reportElems, report.([]byte)) + require.NoError(t, err) + + feedID := reportElems["feedId"].([32]uint8) + feed, exists := feedM[feedID] + require.True(t, exists) + + if _, exists := seen[feedID]; !exists { + continue // already saw all oracles for this feed + } + + num, err := (&reportcodecv1.ReportCodec{}).CurrentBlockNumFromReport(ocr2types.Report(report.([]byte))) + require.NoError(t, err) + currentBlock, err := backend.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + + assert.GreaterOrEqual(t, currentBlock.Number().Int64(), num) + + expectedBm := feed.baseBenchmarkPrice + expectedBid := feed.baseBid + expectedAsk := feed.baseAsk + + assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) + assert.InDelta(t, expectedBm.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, expectedBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, expectedAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000) + assert.GreaterOrEqual(t, int(currentBlock.Number().Int64()), int(reportElems["currentBlockNum"].(uint64))) + assert.GreaterOrEqual(t, currentBlock.Time(), reportElems["currentBlockTimestamp"].(uint64)) + assert.NotEqual(t, common.Hash{}, common.Hash(reportElems["currentBlockHash"].([32]uint8))) + assert.LessOrEqual(t, int(reportElems["validFromBlockNum"].(uint64)), int(reportElems["currentBlockNum"].(uint64))) + + t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id) + + seen[feedID][req.pk] = struct{}{} + if len(seen[feedID]) == n { + t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id) + delete(seen, feedID) + if len(seen) == 0 { + break // saw all oracles; success! + } + } + } + }) +} + +func TestIntegration_MercuryV2(t *testing.T) { + t.Parallel() + + integration_MercuryV2(t) +} + +func integration_MercuryV2(t *testing.T) { + var logObservers []*observer.ObservedLogs + t.Cleanup(func() { + detectPanicLogs(t, logObservers) + }) + + testStartTimeStamp := uint32(time.Now().Unix()) + + // test vars + // pError is the probability that an EA will return an error instead of a result, as integer percentage + // pError = 0 means it will never return error + pError := atomic.Int64{} + + // feeds + btcFeed := Feed{ + name: "BTC/USD", + id: randomFeedID(2), + baseBenchmarkPrice: big.NewInt(20_000 * multiplier), + } + ethFeed := Feed{ + name: "ETH/USD", + id: randomFeedID(2), + baseBenchmarkPrice: big.NewInt(1_568 * multiplier), + } + linkFeed := Feed{ + name: "PLI/USD", + id: randomFeedID(2), + baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000), + } + feeds := []Feed{btcFeed, ethFeed, linkFeed} + feedM := make(map[[32]byte]Feed, len(feeds)) + for i := range feeds { + feedM[feeds[i].id] = feeds[i] + } + + reqs := make(chan request) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, func() []byte { + report, err := (&reportcodecv2.ReportCodec{}).BuildReport(v2.ReportFields{BenchmarkPrice: big.NewInt(234567), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1)}) + if err != nil { + panic(err) + } + return report + }) + clientCSAKeys := make([]csakey.KeyV2, n+1) + clientPubKeys := make([]ed25519.PublicKey, n+1) + for i := 0; i < n+1; i++ { + k := big.NewInt(int64(i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + serverURL := startMercuryServer(t, srv, clientPubKeys) + chainID := testutils.SimulatedChainID + + steve, backend, verifier, verifierAddress := setupBlockchain(t) + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n]) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + logObservers = append(logObservers, observedLogs) + + // Set up n oracles + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + ports := freeport.GetN(t, n) + for i := 0; i < n; i++ { + app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i]) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + logObservers = append(logObservers, observedLogs) + } + + for _, feed := range feeds { + addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id) + } + + createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) { + bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b)) + + r := rand.Int63n(101) + if r > pError.Load() { + res.WriteHeader(http.StatusOK) + val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String() + resp := fmt.Sprintf(`{"result": %s}`, val) + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } else { + res.WriteHeader(http.StatusInternalServerError) + resp := `{"error": "pError test error"}` + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } + })) + t.Cleanup(bridge.Close) + u, _ := url.Parse(bridge.URL) + bridgeName = fmt.Sprintf("bridge-%s-%d", name, i) + require.NoError(t, borm.CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(bridgeName), + URL: models.WebURL(*u), + })) + + return bridgeName + } + + // Add OCR jobs - one per feed on each node + for i, node := range nodes { + for j, feed := range feeds { + bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM()) + + addV2MercuryJob( + t, + node, + i, + verifierAddress, + bootstrapPeerID, + bootstrapNodePort, + bmBridge, + serverURL, + serverPubKey, + clientPubKeys[i], + feed.name, + feed.id, + randomFeedID(2), + randomFeedID(2), + ) + } + } + + // Setup config on contract + onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + require.NoError(t, err) + + reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) + require.NoError(t, err) + + signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 300*time.Millisecond, // DeltaCertifiedCommitRequest + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(nodes)}, // S + oracles, + reportingPluginConfig, // reportingPluginConfig []byte, + 250*time.Millisecond, // Max duration observation + int(f), // f + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + + offchainTransmitters := make([][32]byte, n) + for i := 0; i < n; i++ { + offchainTransmitters[i] = nodes[i].ClientPubKey + } + + for _, feed := range feeds { + _, err = verifier.SetConfig( + steve, + feed.id, + signerAddresses, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + nil, + ) + require.NoError(t, err) + backend.Commit() + } + + // Bury it with finality depth + ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + finalityDepth := ch.Config().EVM().FinalityDepth() + for i := 0; i < int(finalityDepth); i++ { + backend.Commit() + } + + runTestSetup := func() { + // Expect at least one report per feed from each oracle + seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{}) + for i := range feeds { + // feedID will be deleted when all n oracles have reported + seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n) + } + + for req := range reqs { + v := make(map[string]interface{}) + err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("expected payload %#v to contain 'report'", v) + } + reportElems := make(map[string]interface{}) + err = reportcodecv2.ReportTypes.UnpackIntoMap(reportElems, report.([]byte)) + require.NoError(t, err) + + feedID := reportElems["feedId"].([32]uint8) + feed, exists := feedM[feedID] + require.True(t, exists) + + if _, exists := seen[feedID]; !exists { + continue // already saw all oracles for this feed + } + + expectedFee := relaymercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) + expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow + + assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) + assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000) + assert.NotZero(t, reportElems["validFromTimestamp"].(uint32)) + assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32)) + assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32)) + assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int)) + assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int)) + + t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id) + + seen[feedID][req.pk] = struct{}{} + if len(seen[feedID]) == n { + t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id) + delete(seen, feedID) + if len(seen) == 0 { + break // saw all oracles; success! + } + } + } + } + + t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) { + runTestSetup() + }) + + t.Run("receives at least one report per feed from each oracle when EAs are at 80% reliability", func(t *testing.T) { + pError.Store(20) + runTestSetup() + }) +} + +func TestIntegration_MercuryV3(t *testing.T) { + t.Parallel() + + integration_MercuryV3(t) +} + +func integration_MercuryV3(t *testing.T) { + var logObservers []*observer.ObservedLogs + t.Cleanup(func() { + detectPanicLogs(t, logObservers) + }) + + testStartTimeStamp := uint32(time.Now().Unix()) + + // test vars + // pError is the probability that an EA will return an error instead of a result, as integer percentage + // pError = 0 means it will never return error + pError := atomic.Int64{} + + // feeds + btcFeed := Feed{ + name: "BTC/USD", + id: randomFeedID(3), + baseBenchmarkPrice: big.NewInt(20_000 * multiplier), + baseBid: big.NewInt(19_997 * multiplier), + baseAsk: big.NewInt(20_004 * multiplier), + } + ethFeed := Feed{ + name: "ETH/USD", + id: randomFeedID(3), + baseBenchmarkPrice: big.NewInt(1_568 * multiplier), + baseBid: big.NewInt(1_566 * multiplier), + baseAsk: big.NewInt(1_569 * multiplier), + } + linkFeed := Feed{ + name: "PLI/USD", + id: randomFeedID(3), + baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000), + baseBid: big.NewInt(7123 * multiplier / 1000), + baseAsk: big.NewInt(7177 * multiplier / 1000), + } + feeds := []Feed{btcFeed, ethFeed, linkFeed} + feedM := make(map[[32]byte]Feed, len(feeds)) + for i := range feeds { + feedM[feeds[i].id] = feeds[i] + } + + reqs := make(chan request) + serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1)) + serverPubKey := serverKey.PublicKey + srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, func() []byte { + report, err := (&reportcodecv3.ReportCodec{}).BuildReport(v3.ReportFields{BenchmarkPrice: big.NewInt(234567), Bid: big.NewInt(1), Ask: big.NewInt(1), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1)}) + if err != nil { + panic(err) + } + return report + }) + clientCSAKeys := make([]csakey.KeyV2, n+1) + clientPubKeys := make([]ed25519.PublicKey, n+1) + for i := 0; i < n+1; i++ { + k := big.NewInt(int64(i)) + key := csakey.MustNewV2XXXTestingOnly(k) + clientCSAKeys[i] = key + clientPubKeys[i] = key.PublicKey + } + serverURL := startMercuryServer(t, srv, clientPubKeys) + chainID := testutils.SimulatedChainID + + steve, backend, verifier, verifierAddress := setupBlockchain(t) + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n]) + bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb} + logObservers = append(logObservers, observedLogs) + + // Set up n oracles + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + ports := freeport.GetN(t, n) + for i := 0; i < n; i++ { + app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i]) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + logObservers = append(logObservers, observedLogs) + } + + for _, feed := range feeds { + addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id) + } + + createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) { + bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + b, err := io.ReadAll(req.Body) + require.NoError(t, err) + require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b)) + + r := rand.Int63n(101) + if r > pError.Load() { + res.WriteHeader(http.StatusOK) + val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String() + resp := fmt.Sprintf(`{"result": %s}`, val) + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } else { + res.WriteHeader(http.StatusInternalServerError) + resp := `{"error": "pError test error"}` + _, err := res.Write([]byte(resp)) + require.NoError(t, err) + } + })) + t.Cleanup(bridge.Close) + u, _ := url.Parse(bridge.URL) + bridgeName = fmt.Sprintf("bridge-%s-%d", name, i) + require.NoError(t, borm.CreateBridgeType(&bridges.BridgeType{ + Name: bridges.BridgeName(bridgeName), + URL: models.WebURL(*u), + })) + + return bridgeName + } + + // Add OCR jobs - one per feed on each node + for i, node := range nodes { + for j, feed := range feeds { + bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM()) + bidBridge := createBridge(fmt.Sprintf("bid-%d", j), i, feed.baseBid, node.App.BridgeORM()) + askBridge := createBridge(fmt.Sprintf("ask-%d", j), i, feed.baseAsk, node.App.BridgeORM()) + + addV3MercuryJob( + t, + node, + i, + verifierAddress, + bootstrapPeerID, + bootstrapNodePort, + bmBridge, + bidBridge, + askBridge, + serverURL, + serverPubKey, + clientPubKeys[i], + feed.name, + feed.id, + randomFeedID(2), + randomFeedID(2), + ) + } + } + + // Setup config on contract + onchainConfig, err := (relaymercury.StandardOnchainConfigCodec{}).Encode(rawOnchainConfig) + require.NoError(t, err) + + reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) + require.NoError(t, err) + + signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 300*time.Millisecond, // DeltaCertifiedCommitRequest + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(nodes)}, // S + oracles, + reportingPluginConfig, // reportingPluginConfig []byte, + 250*time.Millisecond, // Max duration observation + int(f), // f + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + + offchainTransmitters := make([][32]byte, n) + for i := 0; i < n; i++ { + offchainTransmitters[i] = nodes[i].ClientPubKey + } + + for _, feed := range feeds { + _, err = verifier.SetConfig( + steve, + feed.id, + signerAddresses, + offchainTransmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + nil, + ) + require.NoError(t, err) + backend.Commit() + } + + // Bury it with finality depth + ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + finalityDepth := ch.Config().EVM().FinalityDepth() + for i := 0; i < int(finalityDepth); i++ { + backend.Commit() + } + + runTestSetup := func() { + // Expect at least one report per feed from each oracle + seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{}) + for i := range feeds { + // feedID will be deleted when all n oracles have reported + seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n) + } + + for req := range reqs { + v := make(map[string]interface{}) + err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload) + require.NoError(t, err) + report, exists := v["report"] + if !exists { + t.Fatalf("expected payload %#v to contain 'report'", v) + } + reportElems := make(map[string]interface{}) + err = reportcodecv3.ReportTypes.UnpackIntoMap(reportElems, report.([]byte)) + require.NoError(t, err) + + feedID := reportElems["feedId"].([32]uint8) + feed, exists := feedM[feedID] + require.True(t, exists) + + if _, exists := seen[feedID]; !exists { + continue // already saw all oracles for this feed + } + + expectedFee := relaymercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee) + expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow + + assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp)) + assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, feed.baseBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000) + assert.InDelta(t, feed.baseAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000) + assert.NotZero(t, reportElems["validFromTimestamp"].(uint32)) + assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32)) + assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32)) + assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int)) + assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int)) + + t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id) + + seen[feedID][req.pk] = struct{}{} + if len(seen[feedID]) == n { + t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id) + delete(seen, feedID) + if len(seen) == 0 { + break // saw all oracles; success! + } + } + } + } + + t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) { + runTestSetup() + }) + + t.Run("receives at least one report per feed from each oracle when EAs are at 80% reliability", func(t *testing.T) { + pError.Store(20) + runTestSetup() + }) +} diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go new file mode 100644 index 00000000..51bf6921 --- /dev/null +++ b/core/services/ocr2/plugins/mercury/plugin.go @@ -0,0 +1,292 @@ +package mercury + +import ( + "encoding/json" + "fmt" + "os/exec" + + "github.com/pkg/errors" + + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + + relaymercuryv1 "github.com/goplugin/plugin-data-streams/mercury/v1" + relaymercuryv2 "github.com/goplugin/plugin-data-streams/mercury/v2" + relaymercuryv3 "github.com/goplugin/plugin-data-streams/mercury/v3" + + "github.com/goplugin/plugin-common/pkg/loop" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/mercury/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + mercuryv1 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1" + mercuryv2 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2" + mercuryv3 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type Config interface { + MaxSuccessfulRuns() uint64 + ResultWriteQueueDepth() uint64 + plugins.RegistrarConfig +} + +// concrete implementation of MercuryConfig +type mercuryConfig struct { + jobPipelineMaxSuccessfulRuns uint64 + jobPipelineResultWriteQueueDepth uint64 + plugins.RegistrarConfig +} + +func NewMercuryConfig(jobPipelineMaxSuccessfulRuns uint64, jobPipelineResultWriteQueueDepth uint64, pluginProcessCfg plugins.RegistrarConfig) Config { + return &mercuryConfig{ + jobPipelineMaxSuccessfulRuns: jobPipelineMaxSuccessfulRuns, + jobPipelineResultWriteQueueDepth: jobPipelineResultWriteQueueDepth, + RegistrarConfig: pluginProcessCfg, + } +} + +func (m *mercuryConfig) MaxSuccessfulRuns() uint64 { + return m.jobPipelineMaxSuccessfulRuns +} + +func (m *mercuryConfig) ResultWriteQueueDepth() uint64 { + return m.jobPipelineResultWriteQueueDepth +} + +func NewServices( + jb job.Job, + ocr2Provider commontypes.MercuryProvider, + pipelineRunner pipeline.Runner, + lggr logger.Logger, + argsNoPlugin libocr2.MercuryOracleArgs, + cfg Config, + chEnhancedTelem chan ocrcommon.EnhancedTelemetryMercuryData, + orm types.DataSourceORM, + feedID utils.FeedID, +) ([]job.ServiceCtx, error) { + if jb.PipelineSpec == nil { + return nil, errors.New("expected job to have a non-nil PipelineSpec") + } + + var pluginConfig config.PluginConfig + err := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return nil, errors.WithStack(err) + } + err = config.ValidatePluginConfig(pluginConfig, feedID) + if err != nil { + return nil, err + } + lggr = lggr.Named("MercuryPlugin").With("jobID", jb.ID, "jobName", jb.Name.ValueOrZero()) + + // encapsulate all the subservices and ensure we close them all if any fail to start + srvs := []job.ServiceCtx{ocr2Provider} + abort := func() { + if cerr := services.MultiCloser(srvs).Close(); err != nil { + lggr.Errorw("Error closing unused services", "err", cerr) + } + } + saver := ocrcommon.NewResultRunSaver(pipelineRunner, lggr, cfg.MaxSuccessfulRuns(), cfg.ResultWriteQueueDepth()) + srvs = append(srvs, saver) + + // this is the factory that will be used to create the mercury plugin + var ( + factory ocr3types.MercuryPluginFactory + factoryServices []job.ServiceCtx + ) + fCfg := factoryCfg{ + orm: orm, + pipelineRunner: pipelineRunner, + jb: jb, + lggr: lggr, + saver: saver, + chEnhancedTelem: chEnhancedTelem, + ocr2Provider: ocr2Provider, + reportingPluginConfig: pluginConfig, + cfg: cfg, + feedID: feedID, + } + switch feedID.Version() { + case 1: + factory, factoryServices, err = newv1factory(fCfg) + if err != nil { + abort() + return nil, fmt.Errorf("failed to create mercury v1 factory: %w", err) + } + srvs = append(srvs, factoryServices...) + case 2: + factory, factoryServices, err = newv2factory(fCfg) + if err != nil { + abort() + return nil, fmt.Errorf("failed to create mercury v2 factory: %w", err) + } + srvs = append(srvs, factoryServices...) + case 3: + factory, factoryServices, err = newv3factory(fCfg) + if err != nil { + abort() + return nil, fmt.Errorf("failed to create mercury v3 factory: %w", err) + } + srvs = append(srvs, factoryServices...) + default: + return nil, errors.Errorf("unknown Mercury report schema version: %d", feedID.Version()) + } + argsNoPlugin.MercuryPluginFactory = factory + oracle, err := libocr2.NewOracle(argsNoPlugin) + if err != nil { + abort() + return nil, errors.WithStack(err) + } + srvs = append(srvs, job.NewServiceAdapter(oracle)) + return srvs, nil +} + +type factoryCfg struct { + orm types.DataSourceORM + pipelineRunner pipeline.Runner + jb job.Job + lggr logger.Logger + saver *ocrcommon.RunResultSaver + chEnhancedTelem chan ocrcommon.EnhancedTelemetryMercuryData + ocr2Provider commontypes.MercuryProvider + reportingPluginConfig config.PluginConfig + cfg Config + feedID utils.FeedID +} + +func newv3factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) { + var factory ocr3types.MercuryPluginFactory + srvs := make([]job.ServiceCtx, 0) + + ds := mercuryv3.NewDataSource( + factoryCfg.orm, + factoryCfg.pipelineRunner, + factoryCfg.jb, + *factoryCfg.jb.PipelineSpec, + factoryCfg.feedID, + factoryCfg.lggr, + factoryCfg.saver, + factoryCfg.chEnhancedTelem, + factoryCfg.ocr2Provider.MercuryServerFetcher(), + *factoryCfg.reportingPluginConfig.LinkFeedID, + *factoryCfg.reportingPluginConfig.NativeFeedID, + ) + + loopCmd := env.MercuryPlugin.Cmd.Get() + loopEnabled := loopCmd != "" + + if loopEnabled { + cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr) + if err != nil { + return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err) + } + // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle + factoryServer := loop.NewMercuryV3Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds) + srvs = append(srvs, factoryServer) + // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle + factory = factoryServer + } else { + factory = relaymercuryv3.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV3()) + } + return factory, srvs, nil +} + +func newv2factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) { + var factory ocr3types.MercuryPluginFactory + srvs := make([]job.ServiceCtx, 0) + + ds := mercuryv2.NewDataSource( + factoryCfg.orm, + factoryCfg.pipelineRunner, + factoryCfg.jb, + *factoryCfg.jb.PipelineSpec, + factoryCfg.feedID, + factoryCfg.lggr, + factoryCfg.saver, + factoryCfg.chEnhancedTelem, + factoryCfg.ocr2Provider.MercuryServerFetcher(), + *factoryCfg.reportingPluginConfig.LinkFeedID, + *factoryCfg.reportingPluginConfig.NativeFeedID, + ) + + loopCmd := env.MercuryPlugin.Cmd.Get() + loopEnabled := loopCmd != "" + + if loopEnabled { + cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr) + if err != nil { + return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err) + } + // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle + factoryServer := loop.NewMercuryV2Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds) + srvs = append(srvs, factoryServer) + // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle + factory = factoryServer + } else { + factory = relaymercuryv2.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV2()) + } + return factory, srvs, nil +} + +func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) { + var factory ocr3types.MercuryPluginFactory + srvs := make([]job.ServiceCtx, 0) + + ds := mercuryv1.NewDataSource( + factoryCfg.orm, + factoryCfg.pipelineRunner, + factoryCfg.jb, + *factoryCfg.jb.PipelineSpec, + factoryCfg.lggr, + factoryCfg.saver, + factoryCfg.chEnhancedTelem, + factoryCfg.ocr2Provider.MercuryChainReader(), + factoryCfg.ocr2Provider.MercuryServerFetcher(), + factoryCfg.reportingPluginConfig.InitialBlockNumber.Ptr(), + factoryCfg.feedID, + ) + + loopCmd := env.MercuryPlugin.Cmd.Get() + loopEnabled := loopCmd != "" + + if loopEnabled { + cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr) + if err != nil { + return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err) + } + // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle + factoryServer := loop.NewMercuryV1Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds) + srvs = append(srvs, factoryServer) + // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle + factory = factoryServer + } else { + factory = relaymercuryv1.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV1()) + } + return factory, srvs, nil +} + +func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, loop.GRPCOpts, logger.Logger, error) { + lggr.Debugw("Initializing Mercury loop", "command", cmd) + mercuryLggr := lggr.Named(fmt.Sprintf("MercuryV%d", feedID.Version())).Named(feedID.String()) + envVars, err := plugins.ParseEnvFile(env.MercuryPlugin.Env.Get()) + if err != nil { + return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err) + } + cmdFn, opts, err := cfg.RegisterLOOP(plugins.CmdConfig{ + ID: mercuryLggr.Name(), + Cmd: cmd, + Env: envVars, + }) + if err != nil { + return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err) + } + return cmdFn, opts, mercuryLggr, nil +} diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go new file mode 100644 index 00000000..3753ac7c --- /dev/null +++ b/core/services/ocr2/plugins/mercury/plugin_test.go @@ -0,0 +1,284 @@ +package mercury_test + +import ( + "context" + "os/exec" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + + "github.com/goplugin/plugin-common/pkg/loop" + commontypes "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/types/mercury" + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + v2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + v3 "github.com/goplugin/plugin-common/pkg/types/mercury/v3" + + mercuryocr2 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/mercury" + + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + libocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +var ( + v1FeedId = [32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + v2FeedId = [32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + v3FeedId = [32]uint8{00, 03, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + + testArgsNoPlugin = libocr2.MercuryOracleArgs{ + LocalConfig: libocr2types.LocalConfig{ + DevelopmentMode: libocr2types.EnableDangerousDevelopmentMode, + }, + } + + testCfg = mercuryocr2.NewMercuryConfig(1, 1, &testRegistrarConfig{}) + + v1jsonCfg = job.JSONConfig{ + "serverURL": "example.com:80", + "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", + "initialBlockNumber": 1234, + } + + v2jsonCfg = job.JSONConfig{ + "serverURL": "example.com:80", + "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", + "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", + "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", + } + + v3jsonCfg = job.JSONConfig{ + "serverURL": "example.com:80", + "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", + "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", + "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", + } + + testJob = job.Job{ + ID: 1, + ExternalJobID: uuid.Must(uuid.NewRandom()), + OCR2OracleSpecID: ptr(int32(7)), + OCR2OracleSpec: &job.OCR2OracleSpec{ + ID: 7, + ContractID: "phony", + FeedID: ptr(common.BytesToHash([]byte{1, 2, 3})), + Relay: relay.EVM, + ChainID: "1", + }, + PipelineSpec: &pipeline.Spec{}, + PipelineSpecID: int32(1), + } + + // this is kind of gross, but it's the best way to test return values of the services + expectedEmbeddedServiceCnt = 3 + expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 1 +) + +func TestNewServices(t *testing.T) { + type args struct { + pluginConfig job.JSONConfig + feedID utils.FeedID + } + tests := []struct { + name string + args args + loopMode bool + wantLoopFactory any + wantServiceCnt int + wantErr bool + }{ + { + name: "no plugin config error ", + args: args{ + feedID: v1FeedId, + }, + wantServiceCnt: 0, + wantErr: true, + }, + + { + name: "v1 legacy", + args: args{ + pluginConfig: v1jsonCfg, + feedID: v1FeedId, + }, + wantServiceCnt: expectedEmbeddedServiceCnt, + wantErr: false, + }, + { + name: "v2 legacy", + args: args{ + pluginConfig: v2jsonCfg, + feedID: v2FeedId, + }, + wantServiceCnt: expectedEmbeddedServiceCnt, + wantErr: false, + }, + { + name: "v3 legacy", + args: args{ + pluginConfig: v3jsonCfg, + feedID: v3FeedId, + }, + wantServiceCnt: expectedEmbeddedServiceCnt, + wantErr: false, + }, + { + name: "v1 loop", + loopMode: true, + args: args{ + pluginConfig: v1jsonCfg, + feedID: v1FeedId, + }, + wantServiceCnt: expectedLoopServiceCnt, + wantErr: false, + wantLoopFactory: &loop.MercuryV1Service{}, + }, + { + name: "v2 loop", + loopMode: true, + args: args{ + pluginConfig: v2jsonCfg, + feedID: v2FeedId, + }, + wantServiceCnt: expectedLoopServiceCnt, + wantErr: false, + wantLoopFactory: &loop.MercuryV2Service{}, + }, + { + name: "v3 loop", + loopMode: true, + args: args{ + pluginConfig: v3jsonCfg, + feedID: v3FeedId, + }, + wantServiceCnt: expectedLoopServiceCnt, + wantErr: false, + wantLoopFactory: &loop.MercuryV3Service{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.loopMode { + t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd") + assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get()) + } + got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID) + if (err != nil) != tt.wantErr { + t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Len(t, got, tt.wantServiceCnt) + if tt.loopMode { + foundLoopFactory := false + for i := 0; i < len(got); i++ { + if reflect.TypeOf(got[i]) == reflect.TypeOf(tt.wantLoopFactory) { + foundLoopFactory = true + break + } + } + assert.True(t, foundLoopFactory) + } + }) + } +} + +// we are only varying the version via feedID (and the plugin config) +// this wrapper supplies dummy values for the rest of the arguments +func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID) ([]job.ServiceCtx, error) { + t.Helper() + jb := testJob + jb.OCR2OracleSpec.PluginConfig = pluginConfig + return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, testCfg, nil, &testDataSourceORM{}, feedID) +} + +type testProvider struct{} + +// ChainReader implements types.MercuryProvider. +func (*testProvider) ChainReader() commontypes.ChainReader { panic("unimplemented") } + +// Close implements types.MercuryProvider. +func (*testProvider) Close() error { return nil } + +// Codec implements types.MercuryProvider. +func (*testProvider) Codec() commontypes.Codec { panic("unimplemented") } + +// ContractConfigTracker implements types.MercuryProvider. +func (*testProvider) ContractConfigTracker() libocr2types.ContractConfigTracker { + panic("unimplemented") +} + +// ContractTransmitter implements types.MercuryProvider. +func (*testProvider) ContractTransmitter() libocr2types.ContractTransmitter { + panic("unimplemented") +} + +// HealthReport implements types.MercuryProvider. +func (*testProvider) HealthReport() map[string]error { panic("unimplemented") } + +// MercuryChainReader implements types.MercuryProvider. +func (*testProvider) MercuryChainReader() mercury.ChainReader { return nil } + +// MercuryServerFetcher implements types.MercuryProvider. +func (*testProvider) MercuryServerFetcher() mercury.ServerFetcher { return nil } + +// Name implements types.MercuryProvider. +func (*testProvider) Name() string { panic("unimplemented") } + +// OffchainConfigDigester implements types.MercuryProvider. +func (*testProvider) OffchainConfigDigester() libocr2types.OffchainConfigDigester { + panic("unimplemented") +} + +// OnchainConfigCodec implements types.MercuryProvider. +func (*testProvider) OnchainConfigCodec() mercury.OnchainConfigCodec { + return nil +} + +// Ready implements types.MercuryProvider. +func (*testProvider) Ready() error { panic("unimplemented") } + +// ReportCodecV1 implements types.MercuryProvider. +func (*testProvider) ReportCodecV1() v1.ReportCodec { return nil } + +// ReportCodecV2 implements types.MercuryProvider. +func (*testProvider) ReportCodecV2() v2.ReportCodec { return nil } + +// ReportCodecV3 implements types.MercuryProvider. +func (*testProvider) ReportCodecV3() v3.ReportCodec { return nil } + +// Start implements types.MercuryProvider. +func (*testProvider) Start(context.Context) error { panic("unimplemented") } + +var _ commontypes.MercuryProvider = (*testProvider)(nil) + +type testRegistrarConfig struct{} + +// RegisterLOOP implements plugins.RegistrarConfig. +func (*testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) { + return nil, loop.GRPCOpts{}, nil +} + +var _ plugins.RegistrarConfig = (*testRegistrarConfig)(nil) + +type testDataSourceORM struct{} + +// LatestReport implements types.DataSourceORM. +func (*testDataSourceORM) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) { + return []byte{1, 2, 3}, nil +} + +var _ types.DataSourceORM = (*testDataSourceORM)(nil) diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go new file mode 100644 index 00000000..ec56f9c6 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -0,0 +1,87 @@ +package ocr2keeper + +import ( + "encoding/json" + "fmt" + "time" +) + +type Duration time.Duration + +func (d *Duration) UnmarshalJSON(b []byte) error { + var raw string + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + + p, err := time.ParseDuration(raw) + if err != nil { + return err + } + + *d = Duration(p) + return nil +} + +func (d Duration) MarshalJSON() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} + +func (d Duration) Value() time.Duration { + return time.Duration(d) +} + +// NOTE: This plugin config is shared among different versions of keepers +// Any changes to this config should keep in mind existing production +// deployments of all versions of keepers and should be backwards compatible +// with existing job specs. +type PluginConfig struct { + // CacheExpiration is the duration of time a cached key is available. Use + // this value to balance memory usage and RPC calls. A new set of keys is + // generated with every block so a good setting might come from block time + // times number of blocks of history to support not replaying reports. + CacheExpiration Duration `json:"cacheExpiration"` + // CacheEvictionInterval is a parameter for how often the cache attempts to + // evict expired keys. This value should be short enough to ensure key + // eviction doesn't block for too long, and long enough that it doesn't + // cause frequent blocking. + CacheEvictionInterval Duration `json:"cacheEvictionInterval"` + // MaxServiceWorkers is the total number of go-routines allowed to make RPC + // simultaneous calls on behalf of the sampling operation. This parameter + // is 10x the number of available CPUs by default. The RPC calls are memory + // heavy as opposed to CPU heavy as most of the work involves waiting on + // network responses. + MaxServiceWorkers int `json:"maxServiceWorkers"` + // ServiceQueueLength is the buffer size for the RPC service queue. Fewer + // workers or slower RPC responses will cause this queue to build up. + // Adding new items to the queue will block if the queue becomes full. + ServiceQueueLength int `json:"serviceQueueLength"` + // ContractVersion is the contract version + ContractVersion string `json:"contractVersion"` + // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service + CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` +} + +func ValidatePluginConfig(cfg PluginConfig) error { + if cfg.CacheExpiration < 0 { + return fmt.Errorf("cache expiration cannot be less than zero") + } + + if cfg.CacheEvictionInterval < 0 { + return fmt.Errorf("cache eviction interval cannot be less than zero") + } + + if cfg.CacheEvictionInterval > 0 && cfg.CacheEvictionInterval.Value() < time.Second { + return fmt.Errorf("cache eviction interval should be more than every second") + } + + if cfg.MaxServiceWorkers < 0 { + return fmt.Errorf("max service workers cannot be less than zero") + } + + if cfg.ServiceQueueLength < 0 { + return fmt.Errorf("service queue length cannot be less than zero") + } + + return nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/config_test.go b/core/services/ocr2/plugins/ocr2keeper/config_test.go new file mode 100644 index 00000000..82b0a448 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/config_test.go @@ -0,0 +1,30 @@ +package ocr2keeper + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestUnmarshalDuration(t *testing.T) { + raw := `"2s"` + + var value Duration + err := json.Unmarshal([]byte(raw), &value) + + assert.NoError(t, err) + assert.Equal(t, 2*time.Second, value.Value()) +} + +func TestUnmarshalConfig(t *testing.T) { + raw := `{"cacheExpiration":"2s","maxServiceWorkers":42}` + + var config PluginConfig + err := json.Unmarshal([]byte(raw), &config) + + assert.NoError(t, err) + assert.Equal(t, 2*time.Second, config.CacheExpiration.Value()) + assert.Equal(t, 42, config.MaxServiceWorkers) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi.go new file mode 100644 index 00000000..f4b08c26 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi.go @@ -0,0 +1,196 @@ +package evm + +import ( + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" +) + +type evmRegistryPackerV2_0 struct { + abi abi.ABI +} + +// enum UpkeepFailureReason +// https://github.com/goplugin/pluginv3.0/blob/d9dee8ea6af26bc82463510cb8786b951fa98585/contracts/src/v0.8/interfaces/AutomationRegistryInterface2_0.sol#L94 +const ( + UPKEEP_FAILURE_REASON_NONE = iota + UPKEEP_FAILURE_REASON_UPKEEP_CANCELLED + UPKEEP_FAILURE_REASON_UPKEEP_PAUSED + UPKEEP_FAILURE_REASON_TARGET_CHECK_REVERTED + UPKEEP_FAILURE_REASON_UPKEEP_NOT_NEEDED + UPKEEP_FAILURE_REASON_PERFORM_DATA_EXCEEDS_LIMIT + UPKEEP_FAILURE_REASON_INSUFFICIENT_BALANCE +) + +func NewEvmRegistryPackerV2_0(abi abi.ABI) *evmRegistryPackerV2_0 { + return &evmRegistryPackerV2_0{abi: abi} +} + +func (rp *evmRegistryPackerV2_0) UnpackCheckResult(key ocr2keepers.UpkeepKey, raw string) (EVMAutomationUpkeepResult20, error) { + var ( + result EVMAutomationUpkeepResult20 + ) + + b, err := hexutil.Decode(raw) + if err != nil { + return result, err + } + + out, err := rp.abi.Methods["checkUpkeep"].Outputs.UnpackValues(b) + if err != nil { + return result, fmt.Errorf("%w: unpack checkUpkeep return: %s", err, raw) + } + + block, id, err := splitKey(key) + if err != nil { + return result, err + } + + result = EVMAutomationUpkeepResult20{ + Block: uint32(block.Uint64()), + ID: id, + Eligible: true, + } + + upkeepNeeded := *abi.ConvertType(out[0], new(bool)).(*bool) + rawPerformData := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + result.FailureReason = *abi.ConvertType(out[2], new(uint8)).(*uint8) + result.GasUsed = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + result.FastGasWei = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) + result.LinkNative = *abi.ConvertType(out[5], new(*big.Int)).(**big.Int) + + if !upkeepNeeded { + result.Eligible = false + } + // if NONE we expect the perform data. if TARGET_CHECK_REVERTED we will have the error data in the perform data used for off chain lookup + if result.FailureReason == UPKEEP_FAILURE_REASON_NONE || (result.FailureReason == UPKEEP_FAILURE_REASON_TARGET_CHECK_REVERTED && len(rawPerformData) > 0) { + var ret0 = new(performDataWrapper) + err = pdataABI.UnpackIntoInterface(ret0, "check", rawPerformData) + if err != nil { + return result, err + } + + result.CheckBlockNumber = ret0.Result.CheckBlockNumber + result.CheckBlockHash = ret0.Result.CheckBlockhash + result.PerformData = ret0.Result.PerformData + } + + // This is a default placeholder which is used since we do not get the execute gas + // from checkUpkeep result. This field is overwritten later from the execute gas + // we have for an upkeep in memory. TODO (AUTO-1482): Refactor this + result.ExecuteGas = 5_000_000 + + return result, nil +} + +func (rp *evmRegistryPackerV2_0) UnpackPerformResult(raw string) (bool, error) { + b, err := hexutil.Decode(raw) + if err != nil { + return false, err + } + + out, err := rp.abi.Methods["simulatePerformUpkeep"]. + Outputs.UnpackValues(b) + if err != nil { + return false, fmt.Errorf("%w: unpack simulatePerformUpkeep return: %s", err, raw) + } + + return *abi.ConvertType(out[0], new(bool)).(*bool), nil +} + +func (rp *evmRegistryPackerV2_0) UnpackUpkeepResult(id *big.Int, raw string) (activeUpkeep, error) { + b, err := hexutil.Decode(raw) + if err != nil { + return activeUpkeep{}, err + } + + out, err := rp.abi.Methods["getUpkeep"].Outputs.UnpackValues(b) + if err != nil { + return activeUpkeep{}, fmt.Errorf("%w: unpack getUpkeep return: %s", err, raw) + } + + type upkeepInfo struct { + Target common.Address + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + Admin common.Address + MaxValidBlocknumber uint64 + LastPerformBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte + } + temp := *abi.ConvertType(out[0], new(upkeepInfo)).(*upkeepInfo) + + au := activeUpkeep{ + ID: id, + PerformGasLimit: temp.ExecuteGas, + CheckData: temp.CheckData, + } + + return au, nil +} + +func (rp *evmRegistryPackerV2_0) UnpackTransmitTxInput(raw []byte) ([]ocr2keepers.UpkeepResult, error) { + var ( + enc = EVMAutomationEncoder20{} + decoded []ocr2keepers.UpkeepResult + out []interface{} + err error + b []byte + ok bool + ) + + if out, err = rp.abi.Methods["transmit"].Inputs.UnpackValues(raw); err != nil { + return nil, fmt.Errorf("%w: unpack TransmitTxInput return: %s", err, raw) + } + + if len(out) < 2 { + return nil, fmt.Errorf("invalid unpacking of TransmitTxInput in %s", raw) + } + + if b, ok = out[1].([]byte); !ok { + return nil, fmt.Errorf("unexpected value type in transaction") + } + + if decoded, err = enc.DecodeReport(b); err != nil { + return nil, fmt.Errorf("error during decoding report while unpacking TransmitTxInput: %w", err) + } + + return decoded, nil +} + +var ( + // rawPerformData is abi encoded tuple(uint32, bytes32, bytes). We create an ABI with dummy + // function which returns this tuple in order to decode the bytes + pdataABI, _ = abi.JSON(strings.NewReader(`[{ + "name":"check", + "type":"function", + "outputs":[{ + "name":"ret", + "type":"tuple", + "components":[ + {"type":"uint32","name":"checkBlockNumber"}, + {"type":"bytes32","name":"checkBlockhash"}, + {"type":"bytes","name":"performData"} + ] + }] + }]`, + )) +) + +type performDataWrapper struct { + Result performDataStruct +} +type performDataStruct struct { + CheckBlockNumber uint32 `abi:"checkBlockNumber"` + CheckBlockhash [32]byte `abi:"checkBlockhash"` + PerformData []byte `abi:"performData"` +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go new file mode 100644 index 00000000..fbfb10ac --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/abi_test.go @@ -0,0 +1,150 @@ +package evm + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" +) + +func TestUnpackTransmitTxInput(t *testing.T) { + registryABI, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + assert.Nil(t, err) + + packer := &evmRegistryPackerV2_0{abi: registryABI} + decodedReport, err := packer.UnpackTransmitTxInput(hexutil.MustDecode("0x00011a04d404e571ead64b2f08cfae623a0d96b9beb326c20e322001cbbd344700000000000000000000000000000000000000000000000000000000000d580e35681c68a0426c30f4686e837c0cd7864200f48dbfe48c80c51f92aa5ac607b300000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000360000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000773594000000000000000000000000000000000000000000000000000010fb9cd2f34a00000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000001de1256139081c6b165a3aee0432f605d3dee0e6087ea53b46ca9478c253ea9c8000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000827075c4bacd41884f60c2ca7af3630400bedd92ad7ad0ba4e1f000e70297de0573e180000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000a8c0000000000000000000000000000000000000000000000000000000000086470000000000000000000000000000000000000000000000000000000000000000326e2b521089d44f1457ae51b3f8d76e8577e08c4af9374bdc62aebbfad081a78a13941ab209ad44a905ee0fd704a46b2ebc022dcb60659bed87342fd94dadb70827af523f59c7c9bb8dcc77e959b0476869612e8cf84e63a2e9a5617290633f70000000000000000000000000000000000000000000000000000000000000003723d77998618c5959396115fc61380215e0395f68c18a6cf0647c3e759ee013040c2967fdd369aac59b464f931dacd7b8863498757eda53a9f6a4b6150f2dbe640771f3c242c297265c36c5e78f4c660ae74dcd1f5bda8687b6afed3d3f27e0d")) + assert.Nil(t, err) + + // We expect one upkeep ID in the report at block number + var expectedBlock uint32 = 8548469 + expectedID, _ := new(big.Int).SetString("100445849710294316610676143149039812931260394722330855891004881602834541226440", 10) + + assert.Equal(t, len(decodedReport), 1) + + rpt, ok := decodedReport[0].(EVMAutomationUpkeepResult20) + assert.True(t, ok) + + assert.Equal(t, rpt.Block, expectedBlock) + assert.Equal(t, rpt.ID.String(), expectedID.String()) +} + +func TestUnpackTransmitTxInputErrors(t *testing.T) { + + tests := []struct { + Name string + RawData string + }{ + { + Name: "Empty Data", + RawData: "0x", + }, + { + Name: "Random Data", + RawData: "0x2f08cfae623a0d96b9beb326c20e322001cbbd344700", + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + abi, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + assert.Nil(t, err) + + packer := &evmRegistryPackerV2_0{abi: abi} + _, err = packer.UnpackTransmitTxInput(hexutil.MustDecode(test.RawData)) + assert.NotNil(t, err) + }) + } +} + +func TestUnpackCheckResults(t *testing.T) { + registryABI, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + if err != nil { + assert.Nil(t, err) + } + + upkeepId, _ := new(big.Int).SetString("1843548457736589226156809205796175506139185429616502850435279853710366065936", 10) + + tests := []struct { + Name string + UpkeepKey ocr2keepers.UpkeepKey + RawData string + ExpectedResult EVMAutomationUpkeepResult20 + }{ + { + Name: "upkeep not needed", + UpkeepKey: ocr2keepers.UpkeepKey(fmt.Sprintf("19447615|%s", upkeepId)), + RawData: "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000421c000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000c8caf37f3b3890000000000000000000000000000000000000000000000000000000000000000", + ExpectedResult: EVMAutomationUpkeepResult20{ + Block: 19447615, + ID: upkeepId, + Eligible: false, + FailureReason: UPKEEP_FAILURE_REASON_UPKEEP_NOT_NEEDED, + GasUsed: big.NewInt(16924), + PerformData: nil, + FastGasWei: big.NewInt(1000000000), + LinkNative: big.NewInt(3532383906411401), + CheckBlockNumber: 0, + CheckBlockHash: [32]byte{}, + ExecuteGas: 5000000, + }, + }, + { + Name: "target check reverted", + UpkeepKey: ocr2keepers.UpkeepKey(fmt.Sprintf("19448272|%s", upkeepId)), + RawData: "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000007531000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000c8caf37f3b3890000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000008914039bf676e20aad43a5642485e666575ed0d927a4b5679745e947e7d125ee2687c10000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000024462e8a50d00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000000000000000000000000000000000000128c1d000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000009666565644944537472000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000184554482d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000000000184254432d5553442d415242495452554d2d544553544e45540000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d6265720000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000000000000000000000", + ExpectedResult: EVMAutomationUpkeepResult20{ + Block: 19448272, + ID: upkeepId, + Eligible: false, + FailureReason: UPKEEP_FAILURE_REASON_TARGET_CHECK_REVERTED, + GasUsed: big.NewInt(30001), + PerformData: []byte{98, 232, 165, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 40, 193, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 102, 101, 101, 100, 73, 68, 83, 116, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 69, 84, 72, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 66, 84, 67, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 98, 108, 111, 99, 107, 78, 117, 109, 98, 101, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + FastGasWei: big.NewInt(1000000000), + LinkNative: big.NewInt(3532383906411401), + CheckBlockNumber: 8983555, + CheckBlockHash: [32]byte{155, 246, 118, 226, 10, 173, 67, 165, 100, 36, 133, 230, 102, 87, 94, 208, 217, 39, 164, 181, 103, 151, 69, 233, 71, 231, 209, 37, 238, 38, 135, 193}, + ExecuteGas: 5000000, + }, + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + packer := &evmRegistryPackerV2_0{abi: registryABI} + rs, err := packer.UnpackCheckResult(test.UpkeepKey, test.RawData) + assert.Nil(t, err) + assert.Equal(t, test.ExpectedResult, rs) + }) + } +} + +func TestUnpackPerformResult(t *testing.T) { + registryABI, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + if err != nil { + assert.Nil(t, err) + } + + tests := []struct { + Name string + RawData string + }{ + { + Name: "unpack success", + RawData: "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000a52d", + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + packer := &evmRegistryPackerV2_0{abi: registryABI} + rs, err := packer.UnpackPerformResult(test.RawData) + assert.Nil(t, err) + assert.True(t, rs) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder.go new file mode 100644 index 00000000..97e2aeeb --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder.go @@ -0,0 +1,231 @@ +package evm + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + "github.com/goplugin/plugin-automation/pkg/v2/encoding" +) + +type EVMAutomationEncoder20 struct { + encoding.BasicEncoder +} + +var ( + Uint256, _ = abi.NewType("uint256", "", nil) + Uint256Arr, _ = abi.NewType("uint256[]", "", nil) + PerformDataMarshalingArgs = []abi.ArgumentMarshaling{ + {Name: "checkBlockNumber", Type: "uint32"}, + {Name: "checkBlockhash", Type: "bytes32"}, + {Name: "performData", Type: "bytes"}, + } + PerformDataArr, _ = abi.NewType("tuple(uint32,bytes32,bytes)[]", "", PerformDataMarshalingArgs) + ErrUnexpectedResult = fmt.Errorf("unexpected result struct") + packFn = reportArgs.Pack + unpackIntoMapFn = reportArgs.UnpackIntoMap + mKeys = []string{"fastGasWei", "linkNative", "upkeepIds", "wrappedPerformDatas"} + reportArgs = abi.Arguments{ + {Name: mKeys[0], Type: Uint256}, + {Name: mKeys[1], Type: Uint256}, + {Name: mKeys[2], Type: Uint256Arr}, + {Name: mKeys[3], Type: PerformDataArr}, + } +) + +type EVMAutomationUpkeepResult20 struct { + // Block is the block number used to build an UpkeepKey for this result + Block uint32 + // ID is the unique identifier for the upkeep + ID *big.Int + Eligible bool + FailureReason uint8 + GasUsed *big.Int + PerformData []byte + FastGasWei *big.Int + LinkNative *big.Int + // CheckBlockNumber is the block number that the contract indicates the + // upkeep was checked on + CheckBlockNumber uint32 + CheckBlockHash [32]byte + ExecuteGas uint32 +} + +func (enc EVMAutomationEncoder20) EncodeReport(toReport []ocr2keepers.UpkeepResult) ([]byte, error) { + if len(toReport) == 0 { + return nil, nil + } + + var ( + fastGas *big.Int + link *big.Int + ) + + ids := make([]*big.Int, len(toReport)) + data := make([]wrappedPerform, len(toReport)) + + for i, result := range toReport { + res, ok := result.(EVMAutomationUpkeepResult20) + if !ok { + return nil, fmt.Errorf("unexpected upkeep result struct") + } + + // only take these values from the first result + // TODO: find a new way to get these values + if i == 0 { + fastGas = res.FastGasWei + link = res.LinkNative + } + + ids[i] = res.ID + data[i] = wrappedPerform{ + CheckBlockNumber: res.CheckBlockNumber, + CheckBlockhash: res.CheckBlockHash, + PerformData: res.PerformData, + } + } + + bts, err := packFn(fastGas, link, ids, data) + if err != nil { + return []byte{}, fmt.Errorf("%w: failed to pack report data", err) + } + + return bts, nil +} + +func (enc EVMAutomationEncoder20) DecodeReport(report []byte) ([]ocr2keepers.UpkeepResult, error) { + m := make(map[string]interface{}) + if err := unpackIntoMapFn(m, report); err != nil { + return nil, err + } + + for _, key := range mKeys { + if _, ok := m[key]; !ok { + return nil, fmt.Errorf("decoding error: %s missing from struct", key) + } + } + + res := []ocr2keepers.UpkeepResult{} + + var ( + ok bool + upkeepIds []*big.Int + wei *big.Int + link *big.Int + ) + + if upkeepIds, ok = m[mKeys[2]].([]*big.Int); !ok { + return res, fmt.Errorf("upkeep ids of incorrect type in report") + } + + // TODO: a type assertion on `wrappedPerform` did not work, even with the + // exact same struct definition as what follows. reflect was used to get the + // struct definition. not sure yet how to clean this up. + // ex: + // t := reflect.TypeOf(rawPerforms) + // fmt.Printf("%v\n", t) + performs, ok := m[mKeys[3]].([]struct { + CheckBlockNumber uint32 `json:"checkBlockNumber"` + CheckBlockhash [32]byte `json:"checkBlockhash"` + PerformData []byte `json:"performData"` + }) + + if !ok { + return res, fmt.Errorf("performs of incorrect structure in report") + } + + if len(upkeepIds) != len(performs) { + return res, fmt.Errorf("upkeep ids and performs should have matching length") + } + + if wei, ok = m[mKeys[0]].(*big.Int); !ok { + return res, fmt.Errorf("fast gas as wrong type") + } + + if link, ok = m[mKeys[1]].(*big.Int); !ok { + return res, fmt.Errorf("link native as wrong type") + } + + res = make([]ocr2keepers.UpkeepResult, len(upkeepIds)) + + for i := 0; i < len(upkeepIds); i++ { + r := EVMAutomationUpkeepResult20{ + Block: performs[i].CheckBlockNumber, + ID: upkeepIds[i], + Eligible: true, + PerformData: performs[i].PerformData, + FastGasWei: wei, + LinkNative: link, + CheckBlockNumber: performs[i].CheckBlockNumber, + CheckBlockHash: performs[i].CheckBlockhash, + } + + res[i] = ocr2keepers.UpkeepResult(r) + } + + return res, nil +} + +func (enc EVMAutomationEncoder20) Eligible(result ocr2keepers.UpkeepResult) (bool, error) { + res, ok := result.(EVMAutomationUpkeepResult20) + if !ok { + tp := reflect.TypeOf(result) + return false, fmt.Errorf("%s: name: %s, kind: %s", ErrUnexpectedResult, tp.Name(), tp.Kind()) + } + + return res.Eligible, nil +} + +func (enc EVMAutomationEncoder20) Detail(result ocr2keepers.UpkeepResult) (ocr2keepers.UpkeepKey, uint32, error) { + res, ok := result.(EVMAutomationUpkeepResult20) + if !ok { + return nil, 0, ErrUnexpectedResult + } + + str := fmt.Sprintf("%d%s%s", res.Block, separator, res.ID) + + return ocr2keepers.UpkeepKey([]byte(str)), res.ExecuteGas, nil +} + +func (enc EVMAutomationEncoder20) KeysFromReport(b []byte) ([]ocr2keepers.UpkeepKey, error) { + results, err := enc.DecodeReport(b) + if err != nil { + return nil, err + } + + keys := make([]ocr2keepers.UpkeepKey, 0, len(results)) + for _, result := range results { + res, ok := result.(EVMAutomationUpkeepResult20) + if !ok { + return nil, fmt.Errorf("unexpected result struct") + } + + str := fmt.Sprintf("%d%s%s", res.Block, separator, res.ID) + keys = append(keys, ocr2keepers.UpkeepKey([]byte(str))) + } + + return keys, nil +} + +type wrappedPerform struct { + CheckBlockNumber uint32 `abi:"checkBlockNumber"` + CheckBlockhash [32]byte `abi:"checkBlockhash"` + PerformData []byte `abi:"performData"` +} + +type BlockKeyHelper[T uint32 | int64] struct { +} + +func (kh BlockKeyHelper[T]) MakeBlockKey(b T) ocr2keepers.BlockKey { + return ocr2keepers.BlockKey(fmt.Sprintf("%d", b)) +} + +type UpkeepKeyHelper[T uint32 | int64] struct { +} + +func (kh UpkeepKeyHelper[T]) MakeUpkeepKey(b T, id *big.Int) ocr2keepers.UpkeepKey { + return ocr2keepers.UpkeepKey(fmt.Sprintf("%d%s%s", b, separator, id)) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go new file mode 100644 index 00000000..ee49688d --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/encoder_test.go @@ -0,0 +1,222 @@ +package evm + +import ( + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" +) + +func TestEVMAutomationEncoder20(t *testing.T) { + encoder := EVMAutomationEncoder20{} + + t.Run("encoding an empty list of upkeep results returns a nil byte array", func(t *testing.T) { + b, err := encoder.EncodeReport([]ocr2keepers.UpkeepResult{}) + assert.Nil(t, err) + assert.Equal(t, b, []byte(nil)) + }) + + t.Run("attempting to encode an invalid upkeep result returns an error", func(t *testing.T) { + b, err := encoder.EncodeReport([]ocr2keepers.UpkeepResult{"data"}) + assert.Error(t, err, "unexpected upkeep result struct") + assert.Equal(t, b, []byte(nil)) + }) + + t.Run("successfully encodes a single upkeep result", func(t *testing.T) { + upkeepResult := EVMAutomationUpkeepResult20{ + Block: 1, + ID: big.NewInt(10), + Eligible: true, + GasUsed: big.NewInt(100), + PerformData: []byte("data"), + FastGasWei: big.NewInt(100), + LinkNative: big.NewInt(100), + CheckBlockNumber: 1, + CheckBlockHash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + ExecuteGas: 10, + } + b, err := encoder.EncodeReport([]ocr2keepers.UpkeepResult{upkeepResult}) + assert.Nil(t, err) + assert.Len(t, b, 416) + + t.Run("successfully decodes a report with a single upkeep result", func(t *testing.T) { + upkeeps, err := encoder.DecodeReport(b) + assert.Nil(t, err) + assert.Len(t, upkeeps, 1) + + upkeep := upkeeps[0].(EVMAutomationUpkeepResult20) + + // some fields aren't populated by the decode so we compare field-by-field for those that are populated + assert.Equal(t, upkeep.Block, upkeepResult.Block) + assert.Equal(t, upkeep.ID, upkeepResult.ID) + assert.Equal(t, upkeep.Eligible, upkeepResult.Eligible) + assert.Equal(t, upkeep.PerformData, upkeepResult.PerformData) + assert.Equal(t, upkeep.FastGasWei, upkeepResult.FastGasWei) + assert.Equal(t, upkeep.LinkNative, upkeepResult.LinkNative) + assert.Equal(t, upkeep.CheckBlockNumber, upkeepResult.CheckBlockNumber) + assert.Equal(t, upkeep.CheckBlockHash, upkeepResult.CheckBlockHash) + }) + + t.Run("an error is returned when unpacking into a map fails", func(t *testing.T) { + oldUnpackIntoMapFn := unpackIntoMapFn + unpackIntoMapFn = func(v map[string]interface{}, data []byte) error { + return errors.New("failed to unpack into map") + } + defer func() { + unpackIntoMapFn = oldUnpackIntoMapFn + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "failed to unpack into map") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when an expected key is missing from the map", func(t *testing.T) { + oldMKeys := mKeys + mKeys = []string{"fastGasWei", "linkNative", "upkeepIds", "wrappedPerformDatas", "thisKeyWontExist"} + defer func() { + mKeys = oldMKeys + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "decoding error") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when the third element of the map is not a slice of big.Int", func(t *testing.T) { + oldMKeys := mKeys + mKeys = []string{"fastGasWei", "linkNative", "wrappedPerformDatas", "upkeepIds"} + defer func() { + mKeys = oldMKeys + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "upkeep ids of incorrect type in report") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when the fourth element of the map is not a struct of perform data", func(t *testing.T) { + oldMKeys := mKeys + mKeys = []string{"fastGasWei", "linkNative", "upkeepIds", "upkeepIds"} + defer func() { + mKeys = oldMKeys + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "performs of incorrect structure in report") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when the upkeep ids and performDatas are of different lengths", func(t *testing.T) { + oldUnpackIntoMapFn := unpackIntoMapFn + unpackIntoMapFn = func(v map[string]interface{}, data []byte) error { + v["fastGasWei"] = 1 + v["linkNative"] = 2 + v["upkeepIds"] = []*big.Int{big.NewInt(123), big.NewInt(456)} + v["wrappedPerformDatas"] = []struct { + CheckBlockNumber uint32 `json:"checkBlockNumber"` + CheckBlockhash [32]byte `json:"checkBlockhash"` + PerformData []byte `json:"performData"` + }{ + { + CheckBlockNumber: 1, + CheckBlockhash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + PerformData: []byte{}, + }, + } + return nil + } + defer func() { + unpackIntoMapFn = oldUnpackIntoMapFn + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "upkeep ids and performs should have matching length") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when the first element of the map is not a big int", func(t *testing.T) { + oldMKeys := mKeys + mKeys = []string{"upkeepIds", "linkNative", "upkeepIds", "wrappedPerformDatas"} + defer func() { + mKeys = oldMKeys + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "fast gas as wrong type") + assert.Len(t, upkeeps, 0) + }) + + t.Run("an error is returned when the second element of the map is not a big int", func(t *testing.T) { + oldMKeys := mKeys + mKeys = []string{"fastGasWei", "upkeepIds", "upkeepIds", "wrappedPerformDatas"} + defer func() { + mKeys = oldMKeys + }() + + upkeeps, err := encoder.DecodeReport(b) + assert.Error(t, err, "link native as wrong type") + assert.Len(t, upkeeps, 0) + }) + }) + + t.Run("successfully encodes multiple upkeep results", func(t *testing.T) { + upkeepResult0 := EVMAutomationUpkeepResult20{ + Block: 1, + ID: big.NewInt(10), + Eligible: true, + GasUsed: big.NewInt(100), + PerformData: []byte("data0"), + FastGasWei: big.NewInt(100), + LinkNative: big.NewInt(100), + CheckBlockNumber: 1, + CheckBlockHash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + ExecuteGas: 10, + } + upkeepResult1 := EVMAutomationUpkeepResult20{ + Block: 1, + ID: big.NewInt(10), + Eligible: true, + GasUsed: big.NewInt(200), + PerformData: []byte("data1"), + FastGasWei: big.NewInt(200), + LinkNative: big.NewInt(200), + CheckBlockNumber: 2, + CheckBlockHash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + ExecuteGas: 20, + } + b, err := encoder.EncodeReport([]ocr2keepers.UpkeepResult{upkeepResult0, upkeepResult1}) + assert.Nil(t, err) + assert.Len(t, b, 640) + }) + + t.Run("an error is returned when pack fails", func(t *testing.T) { + oldPackFn := packFn + packFn = func(args ...interface{}) ([]byte, error) { + return nil, errors.New("pack failed") + } + defer func() { + packFn = oldPackFn + }() + + upkeepResult0 := EVMAutomationUpkeepResult20{ + Block: 1, + ID: big.NewInt(10), + Eligible: true, + GasUsed: big.NewInt(100), + PerformData: []byte("data0"), + FastGasWei: big.NewInt(100), + LinkNative: big.NewInt(100), + CheckBlockNumber: 1, + CheckBlockHash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + ExecuteGas: 10, + } + b, err := encoder.EncodeReport([]ocr2keepers.UpkeepResult{upkeepResult0}) + assert.Errorf(t, err, "pack failed: failed to pack report data") + assert.Len(t, b, 0) + }) + +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/head.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/head.go new file mode 100644 index 00000000..c3fb4bb6 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/head.go @@ -0,0 +1,56 @@ +package evm + +import ( + "context" + "fmt" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +type HeadProvider struct { + ht httypes.HeadTracker + hb httypes.HeadBroadcaster + chHead chan ocr2keepers.BlockKey + subscribed bool +} + +// HeadTicker provides external access to the heads channel +func (hw *HeadProvider) HeadTicker() chan ocr2keepers.BlockKey { + if !hw.subscribed { + _, _ = hw.hb.Subscribe(&headWrapper{c: hw.chHead}) + hw.subscribed = true + } + return hw.chHead +} + +func (hw *HeadProvider) LatestBlock() int64 { + lc := hw.ht.LatestChain() + if lc == nil { + return 0 + } + return lc.Number +} + +// send does a non-blocking send of the key on c. +func send(c chan ocr2keepers.BlockKey, k ocr2keepers.BlockKey) { + select { + case c <- k: + default: + } +} + +type headWrapper struct { + c chan ocr2keepers.BlockKey +} + +func (w *headWrapper) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + var bl int64 + if head != nil { + bl = head.Number + } + + send(w.c, ocr2keepers.BlockKey(fmt.Sprintf("%d", bl))) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go new file mode 100644 index 00000000..db46cab1 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/log_provider.go @@ -0,0 +1,480 @@ +package evm + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + + pluginutils "github.com/goplugin/plugin-automation/pkg/util" + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + "github.com/goplugin/plugin-automation/pkg/v2/encoding" + + "github.com/goplugin/plugin-common/pkg/services" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + registry "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type TransmitUnpacker interface { + UnpackTransmitTxInput([]byte) ([]ocr2keepers.UpkeepResult, error) +} + +type LogProvider struct { + sync services.StateMachine + mu sync.RWMutex + runState int + runError error + logger logger.Logger + logPoller logpoller.LogPoller + registryAddress common.Address + lookbackBlocks int64 + registry *registry.KeeperRegistry + client evmclient.Client + packer TransmitUnpacker + txCheckBlockCache *pluginutils.Cache[string] + cacheCleaner *pluginutils.IntervalCacheCleaner[string] +} + +func LogProviderFilterName(addr common.Address) string { + return logpoller.FilterName("OCR2KeeperRegistry - LogProvider", addr) +} + +func NewLogProvider( + logger logger.Logger, + logPoller logpoller.LogPoller, + registryAddress common.Address, + client evmclient.Client, + lookbackBlocks int64, +) (*LogProvider, error) { + var err error + + contract, err := registry.NewKeeperRegistry(common.HexToAddress("0x"), client) + if err != nil { + return nil, err + } + + abi, err := abi.JSON(strings.NewReader(registry.KeeperRegistryABI)) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrABINotParsable, err) + } + + // Add log filters for the log poller so that it can poll and find the logs that + // we need. + err = logPoller.RegisterFilter(logpoller.Filter{ + Name: LogProviderFilterName(contract.Address()), + EventSigs: []common.Hash{ + registry.KeeperRegistryUpkeepPerformed{}.Topic(), + registry.KeeperRegistryReorgedUpkeepReport{}.Topic(), + registry.KeeperRegistryInsufficientFundsUpkeepReport{}.Topic(), + registry.KeeperRegistryStaleUpkeepReport{}.Topic(), + }, + Addresses: []common.Address{registryAddress}, + }) + if err != nil { + return nil, err + } + + return &LogProvider{ + logger: logger.Named("AutomationLogProvider"), + logPoller: logPoller, + registryAddress: registryAddress, + lookbackBlocks: lookbackBlocks, + registry: contract, + client: client, + packer: NewEvmRegistryPackerV2_0(abi), + txCheckBlockCache: pluginutils.NewCache[string](time.Hour), + cacheCleaner: pluginutils.NewIntervalCacheCleaner[string](time.Minute), + }, nil +} + +func (c *LogProvider) Name() string { + return c.logger.Name() +} + +func (c *LogProvider) Start(ctx context.Context) error { + return c.sync.StartOnce("AutomationLogProvider", func() error { + c.mu.Lock() + defer c.mu.Unlock() + + go c.cacheCleaner.Run(c.txCheckBlockCache) + c.runState = 1 + return nil + }) +} + +func (c *LogProvider) Close() error { + return c.sync.StopOnce("AutomationRegistry", func() error { + c.mu.Lock() + defer c.mu.Unlock() + + c.cacheCleaner.Stop() + c.runState = 0 + c.runError = nil + return nil + }) +} + +func (c *LogProvider) Ready() error { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.runState == 1 { + return nil + } + return c.sync.Ready() +} + +func (c *LogProvider) HealthReport() map[string]error { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.runState > 1 { + c.sync.SvcErrBuffer.Append(fmt.Errorf("failed run state: %w", c.runError)) + } + return map[string]error{c.Name(): c.sync.Healthy()} +} + +func (c *LogProvider) PerformLogs(ctx context.Context) ([]ocr2keepers.PerformLog, error) { + end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) + } + + // always check the last lookback number of blocks and rebroadcast + // this allows the plugin to make decisions based on event confirmations + logs, err := c.logPoller.LogsWithSigs( + end.BlockNumber-c.lookbackBlocks, + end.BlockNumber, + []common.Hash{ + registry.KeeperRegistryUpkeepPerformed{}.Topic(), + }, + c.registryAddress, + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) + } + + performed, err := c.unmarshalPerformLogs(logs) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal logs", err) + } + + vals := []ocr2keepers.PerformLog{} + for _, p := range performed { + // broadcast log to subscribers + l := ocr2keepers.PerformLog{ + Key: UpkeepKeyHelper[uint32]{}.MakeUpkeepKey(p.CheckBlockNumber, p.Id), + TransmitBlock: BlockKeyHelper[int64]{}.MakeBlockKey(p.BlockNumber), + TransactionHash: p.TxHash.Hex(), + Confirmations: end.BlockNumber - p.BlockNumber, + } + vals = append(vals, l) + } + + return vals, nil +} + +func (c *LogProvider) StaleReportLogs(ctx context.Context) ([]ocr2keepers.StaleReportLog, error) { + end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) + } + + // always check the last lookback number of blocks and rebroadcast + // this allows the plugin to make decisions based on event confirmations + + // ReorgedUpkeepReportLogs + logs, err := c.logPoller.LogsWithSigs( + end.BlockNumber-c.lookbackBlocks, + end.BlockNumber, + []common.Hash{ + registry.KeeperRegistryReorgedUpkeepReport{}.Topic(), + }, + c.registryAddress, + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) + } + reorged, err := c.unmarshalReorgUpkeepLogs(logs) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal reorg logs", err) + } + + // StaleUpkeepReportLogs + logs, err = c.logPoller.LogsWithSigs( + end.BlockNumber-c.lookbackBlocks, + end.BlockNumber, + []common.Hash{ + registry.KeeperRegistryStaleUpkeepReport{}.Topic(), + }, + c.registryAddress, + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) + } + staleUpkeep, err := c.unmarshalStaleUpkeepLogs(logs) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal stale upkeep logs", err) + } + + // InsufficientFundsUpkeepReportLogs + logs, err = c.logPoller.LogsWithSigs( + end.BlockNumber-c.lookbackBlocks, + end.BlockNumber, + []common.Hash{ + registry.KeeperRegistryInsufficientFundsUpkeepReport{}.Topic(), + }, + c.registryAddress, + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) + } + insufficientFunds, err := c.unmarshalInsufficientFundsUpkeepLogs(logs) + if err != nil { + return nil, fmt.Errorf("%w: failed to unmarshal insufficient fund upkeep logs", err) + } + + vals := []ocr2keepers.StaleReportLog{} + for _, r := range reorged { + upkeepId := ocr2keepers.UpkeepIdentifier(r.Id.String()) + checkBlockNumber, err := c.getCheckBlockNumberFromTxHash(r.TxHash, upkeepId) + if err != nil { + c.logger.Error("error while fetching checkBlockNumber from reorged report log: %w", err) + continue + } + l := ocr2keepers.StaleReportLog{ + Key: encoding.BasicEncoder{}.MakeUpkeepKey(checkBlockNumber, upkeepId), + TransmitBlock: BlockKeyHelper[int64]{}.MakeBlockKey(r.BlockNumber), + TransactionHash: r.TxHash.Hex(), + Confirmations: end.BlockNumber - r.BlockNumber, + } + vals = append(vals, l) + } + for _, r := range staleUpkeep { + upkeepId := ocr2keepers.UpkeepIdentifier(r.Id.String()) + checkBlockNumber, err := c.getCheckBlockNumberFromTxHash(r.TxHash, upkeepId) + if err != nil { + c.logger.Error("error while fetching checkBlockNumber from stale report log: %w", err) + continue + } + l := ocr2keepers.StaleReportLog{ + Key: encoding.BasicEncoder{}.MakeUpkeepKey(checkBlockNumber, upkeepId), + TransmitBlock: BlockKeyHelper[int64]{}.MakeBlockKey(r.BlockNumber), + TransactionHash: r.TxHash.Hex(), + Confirmations: end.BlockNumber - r.BlockNumber, + } + vals = append(vals, l) + } + for _, r := range insufficientFunds { + upkeepId := ocr2keepers.UpkeepIdentifier(r.Id.String()) + checkBlockNumber, err := c.getCheckBlockNumberFromTxHash(r.TxHash, upkeepId) + if err != nil { + c.logger.Error("error while fetching checkBlockNumber from insufficient funds report log: %w", err) + continue + } + l := ocr2keepers.StaleReportLog{ + Key: encoding.BasicEncoder{}.MakeUpkeepKey(checkBlockNumber, upkeepId), + TransmitBlock: BlockKeyHelper[int64]{}.MakeBlockKey(r.BlockNumber), + TransactionHash: r.TxHash.Hex(), + Confirmations: end.BlockNumber - r.BlockNumber, + } + vals = append(vals, l) + } + + return vals, nil +} + +func (c *LogProvider) unmarshalPerformLogs(logs []logpoller.Log) ([]performed, error) { + results := []performed{} + + for _, log := range logs { + rawLog := log.ToGethLog() + abilog, err := c.registry.ParseLog(rawLog) + if err != nil { + return results, err + } + + switch l := abilog.(type) { + case *registry.KeeperRegistryUpkeepPerformed: + if l == nil { + continue + } + + r := performed{ + Log: log, + KeeperRegistryUpkeepPerformed: *l, + } + + results = append(results, r) + } + } + + return results, nil +} + +func (c *LogProvider) unmarshalReorgUpkeepLogs(logs []logpoller.Log) ([]reorged, error) { + results := []reorged{} + + for _, log := range logs { + rawLog := log.ToGethLog() + abilog, err := c.registry.ParseLog(rawLog) + if err != nil { + return results, err + } + + switch l := abilog.(type) { + case *registry.KeeperRegistryReorgedUpkeepReport: + if l == nil { + continue + } + + r := reorged{ + Log: log, + KeeperRegistryReorgedUpkeepReport: *l, + } + + results = append(results, r) + } + } + + return results, nil +} + +func (c *LogProvider) unmarshalStaleUpkeepLogs(logs []logpoller.Log) ([]staleUpkeep, error) { + results := []staleUpkeep{} + + for _, log := range logs { + rawLog := log.ToGethLog() + abilog, err := c.registry.ParseLog(rawLog) + if err != nil { + return results, err + } + + switch l := abilog.(type) { + case *registry.KeeperRegistryStaleUpkeepReport: + if l == nil { + continue + } + + r := staleUpkeep{ + Log: log, + KeeperRegistryStaleUpkeepReport: *l, + } + + results = append(results, r) + } + } + + return results, nil +} + +func (c *LogProvider) unmarshalInsufficientFundsUpkeepLogs(logs []logpoller.Log) ([]insufficientFunds, error) { + results := []insufficientFunds{} + + for _, log := range logs { + rawLog := log.ToGethLog() + abilog, err := c.registry.ParseLog(rawLog) + if err != nil { + return results, err + } + + switch l := abilog.(type) { + case *registry.KeeperRegistryInsufficientFundsUpkeepReport: + if l == nil { + continue + } + + r := insufficientFunds{ + Log: log, + KeeperRegistryInsufficientFundsUpkeepReport: *l, + } + + results = append(results, r) + } + } + + return results, nil +} + +// Fetches the checkBlockNumber for a particular transaction and an upkeep ID. Requires a RPC call to get txData +// so this function should not be used heavily +func (c *LogProvider) getCheckBlockNumberFromTxHash(txHash common.Hash, id ocr2keepers.UpkeepIdentifier) (bk ocr2keepers.BlockKey, e error) { + defer func() { + if r := recover(); r != nil { + e = fmt.Errorf("recovered from panic in getCheckBlockNumberForUpkeep: %v", r) + } + }() + + // Check if value already exists in cache for txHash, id pair + cacheKey := txHash.String() + "|" + string(id) + if val, ok := c.txCheckBlockCache.Get(cacheKey); ok { + return ocr2keepers.BlockKey(val), nil + } + + var tx gethtypes.Transaction + err := c.client.CallContext(context.Background(), &tx, "eth_getTransactionByHash", txHash) + if err != nil { + return "", err + } + + txData := tx.Data() + if len(txData) < 4 { + return "", fmt.Errorf("error in getCheckBlockNumberForUpkeep, got invalid tx data %s", txData) + } + + decodedReport, err := c.packer.UnpackTransmitTxInput(txData[4:]) // Remove first 4 bytes of function signature + if err != nil { + return "", err + } + + for _, upkeep := range decodedReport { + // TODO: the log provider should be in the evm package for isolation + res, ok := upkeep.(EVMAutomationUpkeepResult20) + if !ok { + return "", fmt.Errorf("unexpected type") + } + + if res.ID.String() == string(id) { + bl := fmt.Sprintf("%d", res.Block) + + c.txCheckBlockCache.Set(cacheKey, bl, pluginutils.DefaultCacheExpiration) + + return ocr2keepers.BlockKey(bl), nil + } + } + + return "", fmt.Errorf("upkeep %s not found in tx hash %s", id, txHash) +} + +type performed struct { + logpoller.Log + registry.KeeperRegistryUpkeepPerformed +} + +type reorged struct { + logpoller.Log + registry.KeeperRegistryReorgedUpkeepReport +} + +type staleUpkeep struct { + logpoller.Log + registry.KeeperRegistryStaleUpkeepReport +} + +type insufficientFunds struct { + logpoller.Log + registry.KeeperRegistryInsufficientFundsUpkeepReport +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/mocks/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/mocks/registry.go new file mode 100644 index 00000000..9048a4cd --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/mocks/registry.go @@ -0,0 +1,152 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + keeper_registry_wrapper2_0 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Registry is an autogenerated mock type for the Registry type +type Registry struct { + mock.Mock +} + +// GetActiveUpkeepIDs provides a mock function with given fields: opts, startIndex, maxCount +func (_m *Registry) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + ret := _m.Called(opts, startIndex, maxCount) + + if len(ret) == 0 { + panic("no return value specified for GetActiveUpkeepIDs") + } + + var r0 []*big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, *big.Int) ([]*big.Int, error)); ok { + return rf(opts, startIndex, maxCount) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, *big.Int) []*big.Int); ok { + r0 = rf(opts, startIndex, maxCount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int, *big.Int) error); ok { + r1 = rf(opts, startIndex, maxCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetState provides a mock function with given fields: opts +func (_m *Registry) GetState(opts *bind.CallOpts) (keeper_registry_wrapper2_0.GetState, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 keeper_registry_wrapper2_0.GetState + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (keeper_registry_wrapper2_0.GetState, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) keeper_registry_wrapper2_0.GetState); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(keeper_registry_wrapper2_0.GetState) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUpkeep provides a mock function with given fields: opts, id +func (_m *Registry) GetUpkeep(opts *bind.CallOpts, id *big.Int) (keeper_registry_wrapper2_0.UpkeepInfo, error) { + ret := _m.Called(opts, id) + + if len(ret) == 0 { + panic("no return value specified for GetUpkeep") + } + + var r0 keeper_registry_wrapper2_0.UpkeepInfo + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (keeper_registry_wrapper2_0.UpkeepInfo, error)); ok { + return rf(opts, id) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) keeper_registry_wrapper2_0.UpkeepInfo); ok { + r0 = rf(opts, id) + } else { + r0 = ret.Get(0).(keeper_registry_wrapper2_0.UpkeepInfo) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *Registry) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRegistry creates a new instance of Registry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *Registry { + mock := &Registry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go new file mode 100644 index 00000000..08b0dc09 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry.go @@ -0,0 +1,764 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + coreTypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "go.uber.org/multierr" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +const ( + // DefaultUpkeepExpiration decides how long an upkeep info will be valid for. after it expires, a getUpkeepInfo + // call will be made to the registry to obtain the most recent upkeep info and refresh this cache. + DefaultUpkeepExpiration = 10 * time.Minute + // DefaultCooldownExpiration decides how long a Mercury upkeep will be put in cool down for the first time. within + // 10 minutes, subsequent failures will result in double amount of cool down period. + DefaultCooldownExpiration = 5 * time.Second + // DefaultApiErrExpiration decides a running sum of total errors of an upkeep in this 10 minutes window. it is used + // to decide how long the cool down period will be. + DefaultApiErrExpiration = 10 * time.Minute + // CleanupInterval decides when the expired items in cache will be deleted. + CleanupInterval = 15 * time.Minute +) + +var ( + ErrLogReadFailure = fmt.Errorf("failure reading logs") + ErrHeadNotAvailable = fmt.Errorf("head not available") + ErrRegistryCallFailure = fmt.Errorf("registry chain call failure") + ErrBlockKeyNotParsable = fmt.Errorf("block identifier not parsable") + ErrUpkeepKeyNotParsable = fmt.Errorf("upkeep key not parsable") + ErrInitializationFailure = fmt.Errorf("failed to initialize registry") + ErrContextCancelled = fmt.Errorf("context was cancelled") + ErrABINotParsable = fmt.Errorf("error parsing abi") + ActiveUpkeepIDBatchSize int64 = 1000 + FetchUpkeepConfigBatchSize = 50 + separator = "|" + reInitializationDelay = 15 * time.Minute + logEventLookback int64 = 250 +) + +//go:generate mockery --quiet --name Registry --output ./mocks/ --case=underscore +type Registry interface { + GetUpkeep(opts *bind.CallOpts, id *big.Int) (keeper_registry_wrapper2_0.UpkeepInfo, error) + GetState(opts *bind.CallOpts) (keeper_registry_wrapper2_0.GetState, error) + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + ParseLog(log coreTypes.Log) (generated.AbigenLog, error) +} + +type LatestBlockGetter interface { + LatestBlock() int64 +} + +func NewEVMRegistryService(addr common.Address, client legacyevm.Chain, lggr logger.Logger) (*EvmRegistry, error) { + keeperRegistryABI, err := abi.JSON(strings.NewReader(keeper_registry_wrapper2_0.KeeperRegistryABI)) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrABINotParsable, err) + } + + registry, err := keeper_registry_wrapper2_0.NewKeeperRegistry(addr, client.Client()) + if err != nil { + return nil, fmt.Errorf("%w: failed to create caller for address and backend", ErrInitializationFailure) + } + + r := &EvmRegistry{ + HeadProvider: HeadProvider{ + ht: client.HeadTracker(), + hb: client.HeadBroadcaster(), + chHead: make(chan ocr2keepers.BlockKey, 1), + }, + lggr: lggr.Named("AutomationRegistry"), + poller: client.LogPoller(), + addr: addr, + client: client.Client(), + txHashes: make(map[string]bool), + registry: registry, + abi: keeperRegistryABI, + active: make(map[string]activeUpkeep), + packer: &evmRegistryPackerV2_0{abi: keeperRegistryABI}, + headFunc: func(ocr2keepers.BlockKey) {}, + chLog: make(chan logpoller.Log, 1000), + enc: EVMAutomationEncoder20{}, + } + + if err := r.registerEvents(client.ID().Uint64(), addr); err != nil { + return nil, fmt.Errorf("logPoller error while registering automation events: %w", err) + } + + return r, nil +} + +var upkeepStateEvents = []common.Hash{ + keeper_registry_wrapper2_0.KeeperRegistryUpkeepRegistered{}.Topic(), // adds new upkeep id to registry + keeper_registry_wrapper2_0.KeeperRegistryUpkeepReceived{}.Topic(), // adds new upkeep id to registry via migration + keeper_registry_wrapper2_0.KeeperRegistryUpkeepGasLimitSet{}.Topic(), // unpauses an upkeep + keeper_registry_wrapper2_0.KeeperRegistryUpkeepUnpaused{}.Topic(), // updates the gas limit for an upkeep +} + +var upkeepActiveEvents = []common.Hash{ + keeper_registry_wrapper2_0.KeeperRegistryUpkeepPerformed{}.Topic(), + keeper_registry_wrapper2_0.KeeperRegistryReorgedUpkeepReport{}.Topic(), + keeper_registry_wrapper2_0.KeeperRegistryInsufficientFundsUpkeepReport{}.Topic(), + keeper_registry_wrapper2_0.KeeperRegistryStaleUpkeepReport{}.Topic(), +} + +type checkResult struct { + ur []EVMAutomationUpkeepResult20 + err error +} + +type activeUpkeep struct { + ID *big.Int + PerformGasLimit uint32 + CheckData []byte +} + +type EvmRegistry struct { + HeadProvider + sync services.StateMachine + lggr logger.Logger + poller logpoller.LogPoller + addr common.Address + client client.Client + registry Registry + abi abi.ABI + packer *evmRegistryPackerV2_0 + chLog chan logpoller.Log + reInit *time.Timer + mu sync.RWMutex + txHashes map[string]bool + lastPollBlock int64 + ctx context.Context + cancel context.CancelFunc + active map[string]activeUpkeep + headFunc func(ocr2keepers.BlockKey) + runState int + runError error + enc EVMAutomationEncoder20 +} + +// GetActiveUpkeepKeys uses the latest head and map of all active upkeeps to build a +// slice of upkeep keys. +func (r *EvmRegistry) GetActiveUpkeepIDs(context.Context) ([]ocr2keepers.UpkeepIdentifier, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + keys := make([]ocr2keepers.UpkeepIdentifier, len(r.active)) + var i int + for _, value := range r.active { + keys[i] = ocr2keepers.UpkeepIdentifier(value.ID.String()) + i++ + } + + return keys, nil +} + +func (r *EvmRegistry) CheckUpkeep(ctx context.Context, mercuryEnabled bool, keys ...ocr2keepers.UpkeepKey) ([]ocr2keepers.UpkeepResult, error) { + chResult := make(chan checkResult, 1) + go r.doCheck(ctx, mercuryEnabled, keys, chResult) + + select { + case rs := <-chResult: + result := make([]ocr2keepers.UpkeepResult, len(rs.ur)) + for i := range rs.ur { + result[i] = rs.ur[i] + } + + return result, rs.err + case <-ctx.Done(): + // safety on context done to provide an error on context cancellation + // contract calls through the geth wrappers are a bit of a black box + // so this safety net ensures contexts are fully respected and contract + // call functions have a more graceful closure outside the scope of + // CheckUpkeep needing to return immediately. + return nil, fmt.Errorf("%w: failed to check upkeep on registry", ErrContextCancelled) + } +} + +func (r *EvmRegistry) Name() string { + return r.lggr.Name() +} + +func (r *EvmRegistry) Start(ctx context.Context) error { + return r.sync.StartOnce("AutomationRegistry", func() error { + r.mu.Lock() + defer r.mu.Unlock() + r.ctx, r.cancel = context.WithCancel(context.Background()) + r.reInit = time.NewTimer(reInitializationDelay) + + // initialize the upkeep keys; if the reInit timer returns, do it again + { + go func(cx context.Context, tmr *time.Timer, lggr logger.Logger, f func() error) { + err := f() + if err != nil { + lggr.Errorf("failed to initialize upkeeps", err) + } + + for { + select { + case <-tmr.C: + err = f() + if err != nil { + lggr.Errorf("failed to re-initialize upkeeps", err) + } + tmr.Reset(reInitializationDelay) + case <-cx.Done(): + return + } + } + }(r.ctx, r.reInit, r.lggr, r.initialize) + } + + // start polling logs on an interval + { + go func(cx context.Context, lggr logger.Logger, f func() error) { + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + err := f() + if err != nil { + lggr.Errorf("failed to poll logs for upkeeps", err) + } + case <-cx.Done(): + ticker.Stop() + return + } + } + }(r.ctx, r.lggr, r.pollLogs) + } + + // run process to process logs from log channel + { + go func(cx context.Context, ch chan logpoller.Log, lggr logger.Logger, f func(logpoller.Log) error) { + for { + select { + case l := <-ch: + err := f(l) + if err != nil { + lggr.Errorf("failed to process log for upkeep", err) + } + case <-cx.Done(): + return + } + } + }(r.ctx, r.chLog, r.lggr, r.processUpkeepStateLog) + } + + r.runState = 1 + return nil + }) +} + +func (r *EvmRegistry) Close() error { + return r.sync.StopOnce("AutomationRegistry", func() error { + r.mu.Lock() + defer r.mu.Unlock() + r.cancel() + r.runState = 0 + r.runError = nil + return nil + }) +} + +func (r *EvmRegistry) Ready() error { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.runState == 1 { + return nil + } + return r.sync.Ready() +} + +func (r *EvmRegistry) HealthReport() map[string]error { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.runState > 1 { + r.sync.SvcErrBuffer.Append(fmt.Errorf("failed run state: %w", r.runError)) + } + return map[string]error{r.Name(): r.sync.Healthy()} +} + +func (r *EvmRegistry) initialize() error { + startupCtx, cancel := context.WithTimeout(r.ctx, reInitializationDelay) + defer cancel() + + idMap := make(map[string]activeUpkeep) + + r.lggr.Debugf("Re-initializing active upkeeps list") + // get active upkeep ids from contract + ids, err := r.getLatestIDsFromContract(startupCtx) + if err != nil { + return fmt.Errorf("failed to get ids from contract: %s", err) + } + + var offset int + for offset < len(ids) { + batch := FetchUpkeepConfigBatchSize + if len(ids)-offset < batch { + batch = len(ids) - offset + } + + actives, err := r.getUpkeepConfigs(startupCtx, ids[offset:offset+batch]) + if err != nil { + return fmt.Errorf("failed to get configs for id batch (length '%d'): %s", batch, err) + } + + for _, active := range actives { + idMap[active.ID.String()] = active + } + + offset += batch + + // Do not bombard RPC will calls, wait a bit + time.Sleep(100 * time.Millisecond) + } + + r.mu.Lock() + r.active = idMap + r.mu.Unlock() + + return nil +} + +func (r *EvmRegistry) pollLogs() error { + var latest int64 + var end logpoller.LogPollerBlock + var err error + + if end, err = r.poller.LatestBlock(pg.WithParentCtx(r.ctx)); err != nil { + return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + + r.mu.Lock() + latest = r.lastPollBlock + r.lastPollBlock = end.BlockNumber + r.mu.Unlock() + + // if start and end are the same, no polling needs to be done + if latest == 0 || latest == end.BlockNumber { + return nil + } + + { + var logs []logpoller.Log + + if logs, err = r.poller.LogsWithSigs( + end.BlockNumber-logEventLookback, + end.BlockNumber, + upkeepStateEvents, + r.addr, + pg.WithParentCtx(r.ctx), + ); err != nil { + return fmt.Errorf("%w: %s", ErrLogReadFailure, err) + } + + for _, log := range logs { + r.chLog <- log + } + } + + return nil +} + +func UpkeepFilterName(addr common.Address) string { + return logpoller.FilterName("EvmRegistry - Upkeep events for", addr.String()) +} + +func (r *EvmRegistry) registerEvents(chainID uint64, addr common.Address) error { + // Add log filters for the log poller so that it can poll and find the logs that + // we need + return r.poller.RegisterFilter(logpoller.Filter{ + Name: UpkeepFilterName(addr), + EventSigs: append(upkeepStateEvents, upkeepActiveEvents...), + Addresses: []common.Address{addr}, + }) +} + +func (r *EvmRegistry) processUpkeepStateLog(l logpoller.Log) error { + + hash := l.TxHash.String() + if _, ok := r.txHashes[hash]; ok { + return nil + } + r.txHashes[hash] = true + + rawLog := l.ToGethLog() + abilog, err := r.registry.ParseLog(rawLog) + if err != nil { + return err + } + + switch l := abilog.(type) { + case *keeper_registry_wrapper2_0.KeeperRegistryUpkeepRegistered: + r.lggr.Debugf("KeeperRegistryUpkeepRegistered log detected for upkeep ID %s in transaction %s", l.Id.String(), hash) + r.addToActive(l.Id, false) + case *keeper_registry_wrapper2_0.KeeperRegistryUpkeepReceived: + r.lggr.Debugf("KeeperRegistryUpkeepReceived log detected for upkeep ID %s in transaction %s", l.Id.String(), hash) + r.addToActive(l.Id, false) + case *keeper_registry_wrapper2_0.KeeperRegistryUpkeepUnpaused: + r.lggr.Debugf("KeeperRegistryUpkeepUnpaused log detected for upkeep ID %s in transaction %s", l.Id.String(), hash) + r.addToActive(l.Id, false) + case *keeper_registry_wrapper2_0.KeeperRegistryUpkeepGasLimitSet: + r.lggr.Debugf("KeeperRegistryUpkeepGasLimitSet log detected for upkeep ID %s in transaction %s", l.Id.String(), hash) + r.addToActive(l.Id, true) + } + + return nil +} + +func (r *EvmRegistry) addToActive(id *big.Int, force bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.active == nil { + r.active = make(map[string]activeUpkeep) + } + + if _, ok := r.active[id.String()]; !ok || force { + actives, err := r.getUpkeepConfigs(r.ctx, []*big.Int{id}) + if err != nil { + r.lggr.Errorf("failed to get upkeep configs during adding active upkeep: %w", err) + return + } + + if len(actives) != 1 { + return + } + + r.active[id.String()] = actives[0] + } +} + +func (r *EvmRegistry) buildCallOpts(ctx context.Context, block *big.Int) (*bind.CallOpts, error) { + opts := bind.CallOpts{ + Context: ctx, + BlockNumber: nil, + } + + if block == nil || block.Int64() == 0 { + if r.LatestBlock() != 0 { + opts.BlockNumber = big.NewInt(r.LatestBlock()) + } + } else { + opts.BlockNumber = block + } + + return &opts, nil +} + +func (r *EvmRegistry) getLatestIDsFromContract(ctx context.Context) ([]*big.Int, error) { + opts, err := r.buildCallOpts(ctx, nil) + if err != nil { + return nil, err + } + + state, err := r.registry.GetState(opts) + if err != nil { + n := "latest" + if opts.BlockNumber != nil { + n = fmt.Sprintf("%d", opts.BlockNumber.Int64()) + } + + return nil, fmt.Errorf("%w: failed to get contract state at block number '%s'", err, n) + } + + ids := make([]*big.Int, 0, int(state.State.NumUpkeeps.Int64())) + for int64(len(ids)) < state.State.NumUpkeeps.Int64() { + startIndex := int64(len(ids)) + maxCount := state.State.NumUpkeeps.Int64() - startIndex + + if maxCount == 0 { + break + } + + if maxCount > ActiveUpkeepIDBatchSize { + maxCount = ActiveUpkeepIDBatchSize + } + + batchIDs, err := r.registry.GetActiveUpkeepIDs(opts, big.NewInt(startIndex), big.NewInt(maxCount)) + if err != nil { + return nil, fmt.Errorf("%w: failed to get active upkeep IDs from index %d to %d (both inclusive)", err, startIndex, startIndex+maxCount-1) + } + + ids = append(ids, batchIDs...) + } + + return ids, nil +} + +func (r *EvmRegistry) doCheck(ctx context.Context, _ bool, keys []ocr2keepers.UpkeepKey, chResult chan checkResult) { + upkeepResults, err := r.checkUpkeeps(ctx, keys) + if err != nil { + chResult <- checkResult{ + err: err, + } + return + } + + upkeepResults, err = r.simulatePerformUpkeeps(ctx, upkeepResults) + if err != nil { + chResult <- checkResult{ + err: err, + } + return + } + + for i, res := range upkeepResults { + r.mu.RLock() + up, ok := r.active[res.ID.String()] + r.mu.RUnlock() + + if ok { + upkeepResults[i].ExecuteGas = up.PerformGasLimit + } + } + + chResult <- checkResult{ + ur: upkeepResults, + } +} + +func splitKey(key ocr2keepers.UpkeepKey) (*big.Int, *big.Int, error) { + var ( + block *big.Int + id *big.Int + ok bool + ) + + parts := strings.Split(string(key), separator) + if len(parts) != 2 { + return nil, nil, fmt.Errorf("unsplittable key") + } + + if block, ok = new(big.Int).SetString(parts[0], 10); !ok { + return nil, nil, fmt.Errorf("could not get block from key") + } + + if id, ok = new(big.Int).SetString(parts[1], 10); !ok { + return nil, nil, fmt.Errorf("could not get id from key") + } + + return block, id, nil +} + +// TODO (AUTO-2013): Have better error handling to not return nil results in case of partial errors +func (r *EvmRegistry) checkUpkeeps(ctx context.Context, keys []ocr2keepers.UpkeepKey) ([]EVMAutomationUpkeepResult20, error) { + var ( + checkReqs = make([]rpc.BatchElem, len(keys)) + checkResults = make([]*string, len(keys)) + ) + + for i, key := range keys { + block, upkeepId, err := splitKey(key) + if err != nil { + return nil, err + } + + opts, err := r.buildCallOpts(ctx, block) + if err != nil { + return nil, err + } + + payload, err := r.abi.Pack("checkUpkeep", upkeepId) + if err != nil { + return nil, err + } + + var result string + checkReqs[i] = rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": r.addr.Hex(), + "data": hexutil.Bytes(payload), + }, + hexutil.EncodeBig(opts.BlockNumber), + }, + Result: &result, + } + + checkResults[i] = &result + } + + if err := r.client.BatchCallContext(ctx, checkReqs); err != nil { + return nil, err + } + + var ( + multiErr error + results = make([]EVMAutomationUpkeepResult20, len(keys)) + ) + + for i, req := range checkReqs { + if req.Error != nil { + r.lggr.Debugf("error encountered for key %s with message '%s' in check", keys[i], req.Error) + multierr.AppendInto(&multiErr, req.Error) + } else { + var err error + r.lggr.Debugf("UnpackCheckResult key %s checkResult: %s", string(keys[i]), *checkResults[i]) + results[i], err = r.packer.UnpackCheckResult(keys[i], *checkResults[i]) + if err != nil { + return nil, err + } + } + } + + return results, multiErr +} + +// TODO (AUTO-2013): Have better error handling to not return nil results in case of partial errors +func (r *EvmRegistry) simulatePerformUpkeeps(ctx context.Context, checkResults []EVMAutomationUpkeepResult20) ([]EVMAutomationUpkeepResult20, error) { + var ( + performReqs = make([]rpc.BatchElem, 0, len(checkResults)) + performResults = make([]*string, 0, len(checkResults)) + performToKeyIdx = make([]int, 0, len(checkResults)) + ) + + for i, checkResult := range checkResults { + if !checkResult.Eligible { + continue + } + + opts, err := r.buildCallOpts(ctx, big.NewInt(int64(checkResult.Block))) + if err != nil { + return nil, err + } + + // Since checkUpkeep is true, simulate perform upkeep to ensure it doesn't revert + payload, err := r.abi.Pack("simulatePerformUpkeep", checkResult.ID, checkResult.PerformData) + if err != nil { + return nil, err + } + + var result string + performReqs = append(performReqs, rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": r.addr.Hex(), + "data": hexutil.Bytes(payload), + }, + hexutil.EncodeBig(opts.BlockNumber), + }, + Result: &result, + }) + + performResults = append(performResults, &result) + performToKeyIdx = append(performToKeyIdx, i) + } + + if len(performReqs) > 0 { + if err := r.client.BatchCallContext(ctx, performReqs); err != nil { + return nil, err + } + } + + var multiErr error + + for i, req := range performReqs { + if req.Error != nil { + r.lggr.Debugf("error encountered for key %d|%s with message '%s' in simulate perform", checkResults[i].Block, checkResults[i].ID, req.Error) + multierr.AppendInto(&multiErr, req.Error) + } else { + simulatePerformSuccess, err := r.packer.UnpackPerformResult(*performResults[i]) + if err != nil { + return nil, err + } + + if !simulatePerformSuccess { + checkResults[performToKeyIdx[i]].Eligible = false + } + } + } + + return checkResults, multiErr +} + +// TODO (AUTO-2013): Have better error handling to not return nil results in case of partial errors +func (r *EvmRegistry) getUpkeepConfigs(ctx context.Context, ids []*big.Int) ([]activeUpkeep, error) { + if len(ids) == 0 { + return []activeUpkeep{}, nil + } + + var ( + uReqs = make([]rpc.BatchElem, len(ids)) + uResults = make([]*string, len(ids)) + ) + + for i, id := range ids { + opts, err := r.buildCallOpts(ctx, nil) + if err != nil { + return nil, fmt.Errorf("failed to get call opts: %s", err) + } + + payload, err := r.abi.Pack("getUpkeep", id) + if err != nil { + return nil, fmt.Errorf("failed to pack id with abi: %s", err) + } + + var result string + uReqs[i] = rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": r.addr.Hex(), + "data": hexutil.Bytes(payload), + }, + hexutil.EncodeBig(opts.BlockNumber), + }, + Result: &result, + } + + uResults[i] = &result + } + + if err := r.client.BatchCallContext(ctx, uReqs); err != nil { + return nil, fmt.Errorf("rpc error: %s", err) + } + + var ( + multiErr error + results = make([]activeUpkeep, len(ids)) + ) + + for i, req := range uReqs { + if req.Error != nil { + r.lggr.Debugf("error encountered for config id %s with message '%s' in get config", ids[i], req.Error) + multierr.AppendInto(&multiErr, req.Error) + } else { + var err error + results[i], err = r.packer.UnpackUpkeepResult(ids[i], *uResults[i]) + if err != nil { + return nil, fmt.Errorf("failed to unpack result: %s", err) + } + } + } + + return results, multiErr +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go new file mode 100644 index 00000000..45b47006 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20/registry_test.go @@ -0,0 +1,242 @@ +package evm + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + ocr2keepers "github.com/goplugin/plugin-automation/pkg/v2" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestGetActiveUpkeepKeys(t *testing.T) { + tests := []struct { + Name string + LatestHead int64 + ActiveIDs []string + ExpectedErr error + ExpectedKeys []ocr2keepers.UpkeepIdentifier + }{ + {Name: "NoActiveIDs", LatestHead: 1, ActiveIDs: []string{}, ExpectedKeys: []ocr2keepers.UpkeepIdentifier{}}, + {Name: "AvailableActiveIDs", LatestHead: 1, ActiveIDs: []string{"8", "9", "3", "1"}, ExpectedKeys: []ocr2keepers.UpkeepIdentifier{ + ocr2keepers.UpkeepIdentifier("8"), + ocr2keepers.UpkeepIdentifier("9"), + ocr2keepers.UpkeepIdentifier("3"), + ocr2keepers.UpkeepIdentifier("1"), + }}, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + actives := make(map[string]activeUpkeep) + for _, id := range test.ActiveIDs { + idNum := big.NewInt(0) + idNum.SetString(id, 10) + actives[id] = activeUpkeep{ID: idNum} + } + + mht := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + + rg := &EvmRegistry{ + HeadProvider: HeadProvider{ + ht: mht, + }, + active: actives, + } + + keys, err := rg.GetActiveUpkeepIDs(testutils.Context(t)) + + if test.ExpectedErr != nil { + assert.ErrorIs(t, err, test.ExpectedErr) + } else { + assert.Nil(t, err) + } + + if len(test.ExpectedKeys) > 0 { + for _, key := range keys { + assert.Contains(t, test.ExpectedKeys, key) + } + } else { + assert.Equal(t, test.ExpectedKeys, keys) + } + }) + } +} + +func TestPollLogs(t *testing.T) { + tests := []struct { + Name string + LastPoll int64 + Address common.Address + ExpectedLastPoll int64 + ExpectedErr error + LatestBlock *struct { + OutputBlock int64 + OutputErr error + } + LogsWithSigs *struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + } + }{ + { + Name: "LatestBlockError", + ExpectedErr: ErrHeadNotAvailable, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 0, + OutputErr: fmt.Errorf("test error output"), + }, + }, + { + Name: "LastHeadPollIsLatestHead", + LastPoll: 500, + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + }, + { + Name: "LastHeadPollNotInitialized", + LastPoll: 0, + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + }, + { + Name: "LogPollError", + LastPoll: 480, + Address: common.BigToAddress(big.NewInt(1)), + ExpectedLastPoll: 500, + ExpectedErr: ErrLogReadFailure, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + LogsWithSigs: &struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + }{ + InputStart: 250, + InputEnd: 500, + OutputLogs: []logpoller.Log{}, + OutputErr: fmt.Errorf("test output error"), + }, + }, + { + Name: "LogPollSuccess", + LastPoll: 480, + Address: common.BigToAddress(big.NewInt(1)), + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + LogsWithSigs: &struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + }{ + InputStart: 250, + InputEnd: 500, + OutputLogs: []logpoller.Log{ + {EvmChainId: ubig.New(big.NewInt(5)), LogIndex: 1}, + {EvmChainId: ubig.New(big.NewInt(6)), LogIndex: 2}, + }, + OutputErr: nil, + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + mp := new(mocks.LogPoller) + + if test.LatestBlock != nil { + mp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: test.LatestBlock.OutputBlock}, test.LatestBlock.OutputErr) + } + + if test.LogsWithSigs != nil { + fc := test.LogsWithSigs + mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + } + + rg := &EvmRegistry{ + addr: test.Address, + lastPollBlock: test.LastPoll, + poller: mp, + chLog: make(chan logpoller.Log, 10), + } + + err := rg.pollLogs() + + assert.Equal(t, test.ExpectedLastPoll, rg.lastPollBlock) + if test.ExpectedErr != nil { + assert.ErrorIs(t, err, test.ExpectedErr) + } else { + assert.Nil(t, err) + } + + var outputLogCount int + + CheckLoop: + for { + chT := time.NewTimer(20 * time.Millisecond) + select { + case l := <-rg.chLog: + chT.Stop() + if test.LogsWithSigs == nil { + assert.FailNow(t, "logs detected but no logs were expected") + } + outputLogCount++ + assert.Contains(t, test.LogsWithSigs.OutputLogs, l) + case <-chT.C: + break CheckLoop + } + } + + if test.LogsWithSigs != nil { + assert.Equal(t, len(test.LogsWithSigs.OutputLogs), outputLogCount) + } + + mp.AssertExpectations(t) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list.go new file mode 100644 index 00000000..c5e50eae --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list.go @@ -0,0 +1,123 @@ +package evm + +import ( + "math/big" + "sync" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +// ActiveUpkeepList is a list to manage active upkeep IDs +type ActiveUpkeepList interface { + // Reset resets the list to the given IDs + Reset(ids ...*big.Int) + // Add adds new entries to the list + Add(id ...*big.Int) int + // Remove removes entries from the list + Remove(id ...*big.Int) int + // View returns the list of IDs of the given type + View(...types.UpkeepType) []*big.Int + // IsActive returns true if the given ID is of an active upkeep + IsActive(id *big.Int) bool + Size() int +} + +type activeList struct { + items map[string]bool + lock sync.RWMutex +} + +var _ ActiveUpkeepList = &activeList{} + +// NewActiveList creates a new ActiveList +func NewActiveUpkeepList() ActiveUpkeepList { + return &activeList{ + items: make(map[string]bool), + } +} + +// Reset resets the list to the given IDs +func (al *activeList) Reset(ids ...*big.Int) { + al.lock.Lock() + defer al.lock.Unlock() + + al.items = make(map[string]bool) + for _, id := range ids { + al.items[id.String()] = true + } +} + +// Add adds new entries to the list +func (al *activeList) Add(ids ...*big.Int) int { + al.lock.Lock() + defer al.lock.Unlock() + + count := 0 + for _, id := range ids { + if key := id.String(); !al.items[key] { + count++ + al.items[key] = true + } + } + return count +} + +// Remove removes entries from the list +func (al *activeList) Remove(ids ...*big.Int) int { + al.lock.Lock() + defer al.lock.Unlock() + + count := 0 + for _, id := range ids { + key := id.String() + if al.items[key] { + count++ + delete(al.items, key) + } + } + return count +} + +// View returns the list of IDs of the given type +func (al *activeList) View(upkeepTypes ...types.UpkeepType) []*big.Int { + al.lock.RLock() + defer al.lock.RUnlock() + + var keys []*big.Int + for key := range al.items { + id := &ocr2keepers.UpkeepIdentifier{} + bint, ok := big.NewInt(0).SetString(key, 10) + if !ok { + continue + } + if !id.FromBigInt(bint) { + continue + } + currentType := core.GetUpkeepType(*id) + for _, t := range upkeepTypes { + if currentType == t { + keys = append(keys, bint) + break + } + } + } + return keys +} + +func (al *activeList) IsActive(id *big.Int) bool { + al.lock.RLock() + defer al.lock.RUnlock() + + return al.items[id.String()] +} + +func (al *activeList) Size() int { + al.lock.RLock() + defer al.lock.RUnlock() + + return len(al.items) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list_test.go new file mode 100644 index 00000000..35eca6b4 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/active_list_test.go @@ -0,0 +1,106 @@ +package evm + +import ( + "math/big" + "sort" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestActiveUpkeepList(t *testing.T) { + logIDs := []ocr2keepers.UpkeepIdentifier{ + core.GenUpkeepID(types.LogTrigger, "0"), + core.GenUpkeepID(types.LogTrigger, "1"), + core.GenUpkeepID(types.LogTrigger, "2"), + core.GenUpkeepID(types.LogTrigger, "3"), + core.GenUpkeepID(types.LogTrigger, "4"), + } + conditionalIDs := []ocr2keepers.UpkeepIdentifier{ + core.GenUpkeepID(types.ConditionTrigger, "0"), + core.GenUpkeepID(types.ConditionTrigger, "1"), + core.GenUpkeepID(types.ConditionTrigger, "2"), + core.GenUpkeepID(types.ConditionTrigger, "3"), + core.GenUpkeepID(types.ConditionTrigger, "4"), + } + + tests := []struct { + name string + initial []*big.Int + add []*big.Int + remove []*big.Int + expectedLogIds []*big.Int + expectedConditionalIds []*big.Int + }{ + { + name: "happy flow", + initial: []*big.Int{logIDs[0].BigInt(), logIDs[1].BigInt(), conditionalIDs[0].BigInt(), conditionalIDs[1].BigInt()}, + add: []*big.Int{logIDs[2].BigInt(), logIDs[3].BigInt(), conditionalIDs[2].BigInt(), conditionalIDs[3].BigInt()}, + remove: []*big.Int{logIDs[3].BigInt(), conditionalIDs[3].BigInt()}, + expectedLogIds: []*big.Int{logIDs[0].BigInt(), logIDs[1].BigInt(), logIDs[2].BigInt()}, + expectedConditionalIds: []*big.Int{conditionalIDs[0].BigInt(), conditionalIDs[1].BigInt(), conditionalIDs[2].BigInt()}, + }, + { + name: "empty", + initial: []*big.Int{}, + add: []*big.Int{}, + remove: []*big.Int{}, + expectedLogIds: []*big.Int{}, + expectedConditionalIds: []*big.Int{}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + al := NewActiveUpkeepList() + al.Reset(tc.initial...) + require.Equal(t, len(tc.initial), al.Size()) + for _, id := range tc.initial { + require.True(t, al.IsActive(id)) + } + al.Add(tc.add...) + for _, id := range tc.add { + require.True(t, al.IsActive(id)) + } + al.Remove(tc.remove...) + for _, id := range tc.remove { + require.False(t, al.IsActive(id)) + } + logIds := al.View(types.LogTrigger) + require.Equal(t, len(tc.expectedLogIds), len(logIds)) + sort.Slice(logIds, func(i, j int) bool { + return logIds[i].Cmp(logIds[j]) < 0 + }) + for i := range logIds { + require.Equal(t, tc.expectedLogIds[i], logIds[i]) + } + conditionalIds := al.View(types.ConditionTrigger) + require.Equal(t, len(tc.expectedConditionalIds), len(conditionalIds)) + sort.Slice(conditionalIds, func(i, j int) bool { + return conditionalIds[i].Cmp(conditionalIds[j]) < 0 + }) + for i := range conditionalIds { + require.Equal(t, tc.expectedConditionalIds[i], conditionalIds[i]) + } + }) + } +} + +func TestActiveUpkeepList_error(t *testing.T) { + t.Run("if invalid or negative numbers are in the store, they are excluded from the view operation", func(t *testing.T) { + al := &activeList{} + al.items = make(map[string]bool) + al.items["not a number"] = true + al.items["-1"] = true + al.items["100"] = true + + keys := al.View(types.ConditionTrigger) + require.Equal(t, []*big.Int{big.NewInt(100)}, keys) + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry.go new file mode 100644 index 00000000..00e8da00 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry.go @@ -0,0 +1,162 @@ +package autotelemetry21 + +import ( + "context" + "encoding/hex" + "time" + + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type AutomationCustomTelemetryService struct { + services.StateMachine + monitoringEndpoint commontypes.MonitoringEndpoint + blockSubscriber ocr2keepers.BlockSubscriber + blockSubChanID int + threadCtrl utils.ThreadControl + lggr logger.Logger + configDigest [32]byte + contractConfigTracker types.ContractConfigTracker +} + +// NewAutomationCustomTelemetryService creates a telemetry service for new blocks and node version +func NewAutomationCustomTelemetryService(me commontypes.MonitoringEndpoint, + lggr logger.Logger, blocksub ocr2keepers.BlockSubscriber, configTracker types.ContractConfigTracker) (*AutomationCustomTelemetryService, error) { + return &AutomationCustomTelemetryService{ + monitoringEndpoint: me, + threadCtrl: utils.NewThreadControl(), + lggr: lggr.Named("AutomationCustomTelem"), + contractConfigTracker: configTracker, + blockSubscriber: blocksub, + }, nil +} + +// Start starts Custom Telemetry Service, sends 1 NodeVersion message to endpoint at start and sends new BlockNumber messages +func (e *AutomationCustomTelemetryService) Start(ctx context.Context) error { + return e.StartOnce("AutomationCustomTelemetryService", func() error { + e.lggr.Infof("Starting: Custom Telemetry Service") + _, configDetails, err := e.contractConfigTracker.LatestConfigDetails(ctx) + if err != nil { + e.lggr.Errorf("Error occurred while getting newestConfigDetails for initialization %s", err) + } else { + e.configDigest = configDetails + e.sendNodeVersionMsg() + } + e.threadCtrl.Go(func(ctx context.Context) { + minuteTicker := time.NewTicker(1 * time.Minute) + hourTicker := time.NewTicker(1 * time.Hour) + defer minuteTicker.Stop() + defer hourTicker.Stop() + for { + select { + case <-minuteTicker.C: + _, newConfigDigest, err := e.contractConfigTracker.LatestConfigDetails(ctx) + if err != nil { + e.lggr.Errorf("Error occurred while getting newestConfigDetails in configDigest loop %s", err) + } + if newConfigDigest != e.configDigest { + e.configDigest = newConfigDigest + e.sendNodeVersionMsg() + } + case <-hourTicker.C: + e.sendNodeVersionMsg() + case <-ctx.Done(): + return + } + } + }) + + chanID, blockSubscriberChan, blockSubErr := e.blockSubscriber.Subscribe() + if blockSubErr != nil { + e.lggr.Errorf("Block Subscriber Error: Subscribe(): %s", blockSubErr) + return blockSubErr + } + e.blockSubChanID = chanID + e.threadCtrl.Go(func(ctx context.Context) { + e.lggr.Debug("Started: Sending BlockNumber Messages") + for { + select { + case blockHistory := <-blockSubscriberChan: + // Exploratory: Debounce blocks to avoid overflow in case of re-org + latestBlockKey, err := blockHistory.Latest() + if err != nil { + e.lggr.Errorf("BlockSubscriber BlockHistory.Latest() failed: %s", err) + continue + } + e.sendBlockNumberMsg(latestBlockKey) + case <-ctx.Done(): + return + } + } + }) + return nil + }) +} + +// Close stops go routines and closes channels +func (e *AutomationCustomTelemetryService) Close() error { + return e.StopOnce("AutomationCustomTelemetryService", func() error { + e.lggr.Debug("Stopping: custom telemetry service") + e.threadCtrl.Close() + err := e.blockSubscriber.Unsubscribe(e.blockSubChanID) + if err != nil { + e.lggr.Errorf("Custom telemetry service encounters error %v when stopping", err) + return err + } + e.lggr.Infof("Stopped: Custom telemetry service") + return nil + }) +} + +func (e *AutomationCustomTelemetryService) sendNodeVersionMsg() { + vMsg := &telem.NodeVersion{ + Timestamp: uint64(time.Now().UTC().UnixMilli()), + NodeVersion: static.Version, + ConfigDigest: e.configDigest[:], + } + wrappedVMsg := &telem.AutomationTelemWrapper{ + Msg: &telem.AutomationTelemWrapper_NodeVersion{ + NodeVersion: vMsg, + }, + } + bytes, err := proto.Marshal(wrappedVMsg) + if err != nil { + e.lggr.Errorf("Error occurred while marshalling the Node Version Message %s: %v", wrappedVMsg.String(), err) + } else { + e.monitoringEndpoint.SendLog(bytes) + e.lggr.Debugf("NodeVersion Message Sent to Endpoint: %d", vMsg.Timestamp) + } +} + +func (e *AutomationCustomTelemetryService) sendBlockNumberMsg(blockKey ocr2keepers.BlockKey) { + blockNumMsg := &telem.BlockNumber{ + Timestamp: uint64(time.Now().UTC().UnixMilli()), + BlockNumber: uint64(blockKey.Number), + BlockHash: hex.EncodeToString(blockKey.Hash[:]), + ConfigDigest: e.configDigest[:], + } + wrappedBlockNumMsg := &telem.AutomationTelemWrapper{ + Msg: &telem.AutomationTelemWrapper_BlockNumber{ + BlockNumber: blockNumMsg, + }, + } + b, err := proto.Marshal(wrappedBlockNumMsg) + if err != nil { + e.lggr.Errorf("Error occurred while marshalling the Block Num Message %s: %v", wrappedBlockNumMsg.String(), err) + } else { + e.monitoringEndpoint.SendLog(b) + e.lggr.Debugf("BlockNumber Message Sent to Endpoint: %d", blockNumMsg.Timestamp) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry_test.go new file mode 100644 index 00000000..3cffcfee --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21/custom_telemetry_test.go @@ -0,0 +1,56 @@ +package autotelemetry21 + +import ( + "sync" + "testing" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + + headtracker "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + evm "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21" +) + +// const historySize = 4 +// const blockSize = int64(4) +const finality = uint32(4) + +func TestNewAutomationCustomTelemetryService(t *testing.T) { + me := &MockMonitoringEndpoint{} + lggr := logger.TestLogger(t) + var hb headtracker.HeadBroadcaster + var lp logpoller.LogPoller + + bs := evm.NewBlockSubscriber(hb, lp, finality, lggr) + // configTracker := &MockContractConfigTracker{} + var configTracker types.ContractConfigTracker + + service, err := NewAutomationCustomTelemetryService(me, lggr, bs, configTracker) + if err != nil { + t.Errorf("Expected no error, but got: %v", err) + } + service.monitoringEndpoint.SendLog([]byte("test")) + assert.Equal(t, me.LogCount(), 1) + service.monitoringEndpoint.SendLog([]byte("test2")) + assert.Equal(t, me.LogCount(), 2) + service.Close() +} + +type MockMonitoringEndpoint struct { + sentLogs [][]byte + lock sync.RWMutex +} + +func (me *MockMonitoringEndpoint) SendLog(log []byte) { + me.lock.Lock() + defer me.lock.Unlock() + me.sentLogs = append(me.sentLogs, log) +} + +func (me *MockMonitoringEndpoint) LogCount() int { + me.lock.RLock() + defer me.lock.RUnlock() + return len(me.sentLogs) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go new file mode 100644 index 00000000..42ff9fb5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber.go @@ -0,0 +1,300 @@ +package evm + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + // cleanUpInterval is the interval for cleaning up block maps + cleanUpInterval = 15 * time.Minute + // channelSize represents the channel size for head broadcaster + channelSize = 100 + // lookbackDepth decides valid trigger block lookback range + lookbackDepth = 1024 + // blockHistorySize decides the block history size sent to subscribers + blockHistorySize = int64(256) +) + +var ( + BlockSubscriberServiceName = "BlockSubscriber" +) + +type BlockSubscriber struct { + services.StateMachine + threadCtrl utils.ThreadControl + + mu sync.RWMutex + hb httypes.HeadBroadcaster + lp logpoller.LogPoller + headC chan *evmtypes.Head + unsubscribe func() + subscribers map[int]chan ocr2keepers.BlockHistory + blocks map[int64]string + maxSubId int + lastClearedBlock int64 + lastSentBlock int64 + latestBlock atomic.Pointer[ocr2keepers.BlockKey] + blockHistorySize int64 + blockSize int64 + finalityDepth uint32 + lggr logger.Logger +} + +func (bs *BlockSubscriber) LatestBlock() *ocr2keepers.BlockKey { + return bs.latestBlock.Load() +} + +var _ ocr2keepers.BlockSubscriber = &BlockSubscriber{} + +func NewBlockSubscriber(hb httypes.HeadBroadcaster, lp logpoller.LogPoller, finalityDepth uint32, lggr logger.Logger) *BlockSubscriber { + return &BlockSubscriber{ + threadCtrl: utils.NewThreadControl(), + hb: hb, + lp: lp, + headC: make(chan *evmtypes.Head, channelSize), + subscribers: map[int]chan ocr2keepers.BlockHistory{}, + blocks: map[int64]string{}, + blockHistorySize: blockHistorySize, + blockSize: lookbackDepth, + finalityDepth: finalityDepth, + latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, + lggr: lggr.Named("BlockSubscriber"), + } +} + +func (bs *BlockSubscriber) getBlockRange(ctx context.Context) ([]uint64, error) { + h, err := bs.lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, err + } + latestBlockNumber := h.BlockNumber + bs.lggr.Infof("latest block from log poller is %d", latestBlockNumber) + + var blocks []uint64 + for i := bs.blockSize - 1; i >= 0; i-- { + if latestBlockNumber-i > 0 { + blocks = append(blocks, uint64(latestBlockNumber-i)) + } + } + return blocks, nil +} + +func (bs *BlockSubscriber) initializeBlocks(ctx context.Context, blocks []uint64) error { + logpollerBlocks, err := bs.lp.GetBlocksRange(ctx, blocks) + if err != nil { + return err + } + for i, b := range logpollerBlocks { + if i == 0 { + bs.lastClearedBlock = b.BlockNumber - 1 + bs.lggr.Infof("lastClearedBlock is %d", bs.lastClearedBlock) + } + bs.blocks[b.BlockNumber] = b.BlockHash.Hex() + } + bs.lggr.Infof("initialize with %d blocks", len(logpollerBlocks)) + return nil +} + +func (bs *BlockSubscriber) buildHistory(block int64) ocr2keepers.BlockHistory { + var keys []ocr2keepers.BlockKey + // populate keys slice in block DES order + for i := int64(0); i < bs.blockHistorySize; i++ { + if block-i > 0 { + if h, ok := bs.blocks[block-i]; ok { + keys = append(keys, ocr2keepers.BlockKey{ + Number: ocr2keepers.BlockNumber(block - i), + Hash: common.HexToHash(h), + }) + } else { + bs.lggr.Debugf("block %d is missing", block-i) + } + } + } + return keys +} + +func (bs *BlockSubscriber) cleanup() { + bs.mu.Lock() + defer bs.mu.Unlock() + + bs.lggr.Debugf("start clearing blocks from %d to %d", bs.lastClearedBlock+1, bs.lastSentBlock-bs.blockSize) + for i := bs.lastClearedBlock + 1; i <= bs.lastSentBlock-bs.blockSize; i++ { + delete(bs.blocks, i) + } + bs.lastClearedBlock = bs.lastSentBlock - bs.blockSize + bs.lggr.Infof("lastClearedBlock is set to %d", bs.lastClearedBlock) +} + +func (bs *BlockSubscriber) initialize(ctx context.Context) { + bs.mu.Lock() + defer bs.mu.Unlock() + // initialize the blocks map with the recent blockSize blocks + blocks, err := bs.getBlockRange(ctx) + if err != nil { + bs.lggr.Errorf("failed to get block range", err) + } + err = bs.initializeBlocks(ctx, blocks) + if err != nil { + bs.lggr.Errorf("failed to get log poller blocks", err) + } + _, bs.unsubscribe = bs.hb.Subscribe(&headWrapper{headC: bs.headC, lggr: bs.lggr}) +} + +func (bs *BlockSubscriber) Start(ctx context.Context) error { + return bs.StartOnce(BlockSubscriberServiceName, func() error { + bs.lggr.Info("block subscriber started.") + bs.initialize(ctx) + // poll from head broadcaster channel and push to subscribers + bs.threadCtrl.Go(func(ctx context.Context) { + for { + select { + case h := <-bs.headC: + if h != nil { + bs.processHead(h) + } + case <-ctx.Done(): + return + } + } + }) + // cleanup old blocks + bs.threadCtrl.Go(func(ctx context.Context) { + ticker := time.NewTicker(cleanUpInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + bs.cleanup() + case <-ctx.Done(): + return + } + } + }) + + return nil + }) +} + +func (bs *BlockSubscriber) Close() error { + return bs.StopOnce(BlockSubscriberServiceName, func() error { + bs.lggr.Info("stop block subscriber") + bs.threadCtrl.Close() + bs.unsubscribe() + return nil + }) +} + +func (bs *BlockSubscriber) Subscribe() (int, chan ocr2keepers.BlockHistory, error) { + bs.mu.Lock() + defer bs.mu.Unlock() + + bs.maxSubId++ + subId := bs.maxSubId + newC := make(chan ocr2keepers.BlockHistory, channelSize) + bs.subscribers[subId] = newC + bs.lggr.Infof("new subscriber %d", subId) + + return subId, newC, nil +} + +func (bs *BlockSubscriber) Unsubscribe(subId int) error { + bs.mu.Lock() + defer bs.mu.Unlock() + + c, ok := bs.subscribers[subId] + if !ok { + return fmt.Errorf("subscriber %d does not exist", subId) + } + + close(c) + delete(bs.subscribers, subId) + bs.lggr.Infof("subscriber %d unsubscribed", subId) + return nil +} + +func (bs *BlockSubscriber) processHead(h *evmtypes.Head) { + bs.mu.Lock() + defer bs.mu.Unlock() + // head parent is a linked list with EVM finality depth + // when re-org happens, new heads will have pointers to the new blocks + i := int64(0) + for cp := h; cp != nil; cp = cp.Parent { + // we don't stop when a matching (block number/hash) entry is seen in the map because parent linked list may be + // cut short during a re-org if head broadcaster backfill is not complete. This can cause some re-orged blocks + // left in the map. for example, re-org happens for block 98, 99, 100. next head 101 from broadcaster has parent list + // of 100, so block 100 and 101 are updated. when next head 102 arrives, it has full parent history of finality depth. + // if we stop when we see a block number/hash match, we won't look back and correct block 98 and 99. + // hence, we make a compromise here and check previous max(finality depth, blockSize) blocks and update the map. + existingHash, ok := bs.blocks[cp.Number] + if !ok { + bs.lggr.Debugf("filling block %d with new hash %s", cp.Number, cp.Hash.Hex()) + } else if existingHash != cp.Hash.Hex() { + bs.lggr.Warnf("overriding block %d old hash %s with new hash %s due to re-org", cp.Number, existingHash, cp.Hash.Hex()) + } + bs.blocks[cp.Number] = cp.Hash.Hex() + i++ + if i > int64(bs.finalityDepth) || i > bs.blockSize { + break + } + } + bs.lggr.Debugf("blocks block %d hash is %s", h.Number, h.Hash.Hex()) + + history := bs.buildHistory(h.Number) + block := &ocr2keepers.BlockKey{ + Number: ocr2keepers.BlockNumber(h.Number), + } + copy(block.Hash[:], h.Hash[:]) + bs.latestBlock.Store(block) + bs.lastSentBlock = h.Number + // send history to all subscribers + for _, subC := range bs.subscribers { + // wrapped in a select to not get blocked by certain subscribers + select { + case subC <- history: + default: + bs.lggr.Warnf("subscriber channel is full, dropping block history with length %d", len(history)) + } + } + + bs.lggr.Debugf("published block history with length %d and latestBlock %d to %d subscriber(s)", len(history), bs.latestBlock.Load(), len(bs.subscribers)) +} + +func (bs *BlockSubscriber) queryBlocksMap(bn int64) (string, bool) { + bs.mu.RLock() + defer bs.mu.RUnlock() + v, ok := bs.blocks[bn] + return v, ok +} + +type headWrapper struct { + headC chan *evmtypes.Head + lggr logger.Logger +} + +func (w *headWrapper) OnNewLongestChain(_ context.Context, head *evmtypes.Head) { + if head != nil { + select { + case w.headC <- head: + default: + w.lggr.Debugf("head channel is full, discarding head %+v", head) + } + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go new file mode 100644 index 00000000..bb683ad9 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/block_subscriber_test.go @@ -0,0 +1,460 @@ +package evm + +import ( + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +const historySize = 4 +const blockSize = int64(4) +const finality = uint32(4) + +func TestBlockSubscriber_Subscribe(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + var lp logpoller.LogPoller + + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + subId, _, err := bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, subId, 1) + subId, _, err = bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, subId, 2) + subId, _, err = bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, subId, 3) +} + +func TestBlockSubscriber_Unsubscribe(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + var lp logpoller.LogPoller + + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + subId, _, err := bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, subId, 1) + subId, _, err = bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, subId, 2) + err = bs.Unsubscribe(1) + assert.Nil(t, err) +} + +func TestBlockSubscriber_Unsubscribe_Failure(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + var lp logpoller.LogPoller + + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + err := bs.Unsubscribe(2) + assert.Equal(t, err.Error(), "subscriber 2 does not exist") +} + +func TestBlockSubscriber_GetBlockRange(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + + tests := []struct { + Name string + LatestBlock int64 + LatestBlockErr error + ExpectedBlocks []uint64 + }{ + { + Name: "failed to get latest block", + LatestBlockErr: fmt.Errorf("failed to get latest block"), + }, + { + Name: "get block range", + LatestBlock: 100, + ExpectedBlocks: []uint64{97, 98, 99, 100}, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + lp := new(mocks.LogPoller) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: tc.LatestBlock}, tc.LatestBlockErr) + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + blocks, err := bs.getBlockRange(testutils.Context(t)) + + if tc.LatestBlockErr != nil { + assert.Equal(t, tc.LatestBlockErr.Error(), err.Error()) + } else { + assert.Equal(t, tc.ExpectedBlocks, blocks) + } + }) + } +} + +func TestBlockSubscriber_InitializeBlocks(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + + tests := []struct { + Name string + Blocks []uint64 + PollerBlocks []logpoller.LogPollerBlock + LastClearedBlock int64 + Error error + }{ + { + Name: "failed to get latest block", + Error: fmt.Errorf("failed to get log poller blocks"), + }, + { + Name: "get block range", + Blocks: []uint64{97, 98, 99, 100}, + PollerBlocks: []logpoller.LogPollerBlock{ + { + BlockNumber: 97, + BlockHash: common.HexToHash("0x5e7fadfc14e1cfa9c05a91128c16a20c6cbc3be38b4723c3d482d44bf9c0e07b"), + }, + { + BlockNumber: 98, + BlockHash: common.HexToHash("0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba"), + }, + { + BlockNumber: 99, + BlockHash: common.HexToHash("0xa7ac5bbc905b81f3a2ad9fb8ef1fe45f4a95768df456736952e4ec6c21296abe"), + }, + { + BlockNumber: 100, + BlockHash: common.HexToHash("0xa7ac5bbc905b81f3a2ad9fb8ef1fe45f4a95768df456736952e4ec6c21296abe"), + }, + }, + LastClearedBlock: 96, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + lp := new(mocks.LogPoller) + lp.On("GetBlocksRange", mock.Anything, tc.Blocks, mock.Anything).Return(tc.PollerBlocks, tc.Error) + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + err := bs.initializeBlocks(testutils.Context(t), tc.Blocks) + + if tc.Error != nil { + assert.Equal(t, tc.Error.Error(), err.Error()) + } else { + for _, b := range tc.PollerBlocks { + h, ok := bs.blocks[b.BlockNumber] + assert.True(t, ok) + assert.Equal(t, b.BlockHash.Hex(), h) + } + assert.Equal(t, tc.LastClearedBlock, bs.lastClearedBlock) + } + }) + } +} + +func TestBlockSubscriber_BuildHistory(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + lp := new(mocks.LogPoller) + + tests := []struct { + Name string + Blocks map[int64]string + Block int64 + ExpectedHistory ocr2keepers.BlockHistory + }{ + { + Name: "build history", + Blocks: map[int64]string{ + 100: "0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba", + 98: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + 97: "0xa7ac5bbc905b81f3a2ad9fb8ef1fe45f4a95768df456736952e4ec6c21296abe", + 95: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + }, + Block: 100, + ExpectedHistory: ocr2keepers.BlockHistory{ + ocr2keepers.BlockKey{ + Number: 100, + Hash: common.HexToHash("0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba"), + }, + ocr2keepers.BlockKey{ + Number: 98, + Hash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + }, + ocr2keepers.BlockKey{ + Number: 97, + Hash: common.HexToHash("0xa7ac5bbc905b81f3a2ad9fb8ef1fe45f4a95768df456736952e4ec6c21296abe"), + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + bs.blocks = tc.Blocks + + history := bs.buildHistory(tc.Block) + assert.Equal(t, history, tc.ExpectedHistory) + }) + } +} + +func TestBlockSubscriber_Cleanup(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types.HeadBroadcaster + lp := new(mocks.LogPoller) + + tests := []struct { + Name string + Blocks map[int64]string + LastClearedBlock int64 + LastSentBlock int64 + ExpectedLastClearedBlock int64 + ExpectedBlocks map[int64]string + }{ + { + Name: "build history", + Blocks: map[int64]string{ + 102: "0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba", + 100: "0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba", + 98: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + 95: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + }, + LastClearedBlock: 94, + LastSentBlock: 101, + ExpectedLastClearedBlock: 97, + ExpectedBlocks: map[int64]string{ + 102: "0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba", + 100: "0xaf3f8b36a27837e9f1ea3b4da7cdbf2ce0bdf7ef4e87d23add83b19438a2fcba", + 98: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + bs.blocks = tc.Blocks + bs.lastClearedBlock = tc.LastClearedBlock + bs.lastSentBlock = tc.LastSentBlock + bs.cleanup() + + assert.Equal(t, tc.ExpectedLastClearedBlock, bs.lastClearedBlock) + assert.Equal(t, tc.ExpectedBlocks, bs.blocks) + }) + } +} + +func TestBlockSubscriber_Start(t *testing.T) { + lggr := logger.TestLogger(t) + hb := commonmocks.NewHeadBroadcaster[*evmtypes.Head, common.Hash](t) + hb.On("Subscribe", mock.Anything).Return(&evmtypes.Head{Number: 42}, func() {}) + lp := new(mocks.LogPoller) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: 100}, nil) + blocks := []uint64{97, 98, 99, 100} + pollerBlocks := []logpoller.LogPollerBlock{ + { + BlockNumber: 97, + BlockHash: common.HexToHash("0xda2f9d1359eadd7b93338703adc07d942021a78195564038321ef53f23f87333"), + }, + { + BlockNumber: 98, + BlockHash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + }, + { + BlockNumber: 99, + BlockHash: common.HexToHash("0x9bc2b51e147f9cad05f1614b7f1d8181cb24c544cbcf841f3155e54e752a3b44"), + }, + { + BlockNumber: 100, + BlockHash: common.HexToHash("0x5e7fadfc14e1cfa9c05a91128c16a20c6cbc3be38b4723c3d482d44bf9c0e07b"), + }, + } + + lp.On("GetBlocksRange", mock.Anything, blocks, mock.Anything).Return(pollerBlocks, nil) + + bs := NewBlockSubscriber(hb, lp, finality, lggr) + bs.blockHistorySize = historySize + bs.blockSize = blockSize + err := bs.Start(testutils.Context(t)) + assert.Nil(t, err) + + h97 := evmtypes.Head{ + Number: 97, + Hash: common.HexToHash("0xda2f9d1359eadd7b93338703adc07d942021a78195564038321ef53f23f87333"), + Parent: nil, + } + h98 := evmtypes.Head{ + Number: 98, + Hash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + Parent: &h97, + } + h99 := evmtypes.Head{ + Number: 99, + Hash: common.HexToHash("0x9bc2b51e147f9cad05f1614b7f1d8181cb24c544cbcf841f3155e54e752a3b44"), + Parent: &h98, + } + h100 := evmtypes.Head{ + Number: 100, + Hash: common.HexToHash("0x5e7fadfc14e1cfa9c05a91128c16a20c6cbc3be38b4723c3d482d44bf9c0e07b"), + Parent: &h99, + } + + // no subscribers yet + bs.headC <- &h100 + + expectedBlocks := map[int64]string{ + 97: "0xda2f9d1359eadd7b93338703adc07d942021a78195564038321ef53f23f87333", + 98: "0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0", + 99: "0x9bc2b51e147f9cad05f1614b7f1d8181cb24c544cbcf841f3155e54e752a3b44", + 100: "0x5e7fadfc14e1cfa9c05a91128c16a20c6cbc3be38b4723c3d482d44bf9c0e07b", + } + + // sleep 100 milli to wait for the go routine to finish + time.Sleep(100 * time.Millisecond) + assert.Equal(t, int64(historySize), bs.blockHistorySize) + assert.Equal(t, int64(96), bs.lastClearedBlock) + assert.Equal(t, int64(100), bs.lastSentBlock) + assert.Equal(t, expectedBlocks, bs.blocks) + + // add 1 subscriber + subId1, c1, err := bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, 1, subId1) + + h101 := &evmtypes.Head{ + Number: 101, + Hash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + Parent: &h100, + } + bs.headC <- h101 + + time.Sleep(100 * time.Millisecond) + bk1 := <-c1 + assert.Equal(t, ocr2keepers.BlockHistory{ + ocr2keepers.BlockKey{ + Number: 101, + Hash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + }, + ocr2keepers.BlockKey{ + Number: 100, + Hash: common.HexToHash("0x5e7fadfc14e1cfa9c05a91128c16a20c6cbc3be38b4723c3d482d44bf9c0e07b"), + }, + ocr2keepers.BlockKey{ + Number: 99, + Hash: common.HexToHash("0x9bc2b51e147f9cad05f1614b7f1d8181cb24c544cbcf841f3155e54e752a3b44"), + }, + ocr2keepers.BlockKey{ + Number: 98, + Hash: common.HexToHash("0xc20c7b47466c081a44a3b168994e89affe85cb894547845d938f923b67c633c0"), + }, + }, bk1) + + // add 2nd subscriber + subId2, c2, err := bs.Subscribe() + assert.Nil(t, err) + assert.Equal(t, 2, subId2) + + // re-org happens + new99 := &evmtypes.Head{ + Number: 99, + Hash: common.HexToHash("0x70c03acc4ddbfb253ba41a25dc13fb21b25da8b63bcd1aa7fb55713d33a36c71"), + Parent: &h98, + } + new100 := &evmtypes.Head{ + Number: 100, + Hash: common.HexToHash("0x8a876b62d252e63e16cf3487db3486c0a7c0a8e06bc3792a3b116c5ca480503f"), + Parent: new99, + } + new101 := &evmtypes.Head{ + Number: 101, + Hash: common.HexToHash("0x41b5842b8847dcf834e39556d2ac51cc7d960a7de9471ec504673d0038fd6c8e"), + Parent: new100, + } + + new102 := &evmtypes.Head{ + Number: 102, + Hash: common.HexToHash("0x9ac1ebc307554cf1bcfcc2a49462278e89d6878d613a33df38a64d0aeac971b5"), + Parent: new101, + } + + bs.headC <- new102 + + time.Sleep(100 * time.Millisecond) + bk1 = <-c1 + assert.Equal(t, + ocr2keepers.BlockHistory{ + ocr2keepers.BlockKey{ + Number: 102, + Hash: common.HexToHash("0x9ac1ebc307554cf1bcfcc2a49462278e89d6878d613a33df38a64d0aeac971b5"), + }, + ocr2keepers.BlockKey{ + Number: 101, + Hash: common.HexToHash("0x41b5842b8847dcf834e39556d2ac51cc7d960a7de9471ec504673d0038fd6c8e"), + }, + ocr2keepers.BlockKey{ + Number: 100, + Hash: common.HexToHash("0x8a876b62d252e63e16cf3487db3486c0a7c0a8e06bc3792a3b116c5ca480503f"), + }, + ocr2keepers.BlockKey{ + Number: 99, + Hash: common.HexToHash("0x70c03acc4ddbfb253ba41a25dc13fb21b25da8b63bcd1aa7fb55713d33a36c71"), + }, + }, + bk1, + ) + + bk2 := <-c2 + assert.Equal(t, + ocr2keepers.BlockHistory{ + ocr2keepers.BlockKey{ + Number: 102, + Hash: common.HexToHash("0x9ac1ebc307554cf1bcfcc2a49462278e89d6878d613a33df38a64d0aeac971b5"), + }, + ocr2keepers.BlockKey{ + Number: 101, + Hash: common.HexToHash("0x41b5842b8847dcf834e39556d2ac51cc7d960a7de9471ec504673d0038fd6c8e"), + }, + ocr2keepers.BlockKey{ + Number: 100, + Hash: common.HexToHash("0x8a876b62d252e63e16cf3487db3486c0a7c0a8e06bc3792a3b116c5ca480503f"), + }, + ocr2keepers.BlockKey{ + Number: 99, + Hash: common.HexToHash("0x70c03acc4ddbfb253ba41a25dc13fb21b25da8b63bcd1aa7fb55713d33a36c71"), + }, + }, + bk2, + ) + + assert.Equal(t, int64(102), bs.lastSentBlock) + assert.Equal(t, int64(96), bs.lastClearedBlock) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/abi.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/abi.go new file mode 100644 index 00000000..b618515b --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/abi.go @@ -0,0 +1,14 @@ +package core + +import ( + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_log_automation" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/streams_lookup_compatible_interface" +) + +var UtilsABI = types.MustGetABI(automation_utils_2_1.AutomationUtilsABI) +var RegistryABI = types.MustGetABI(iregistry21.IKeeperRegistryMasterABI) +var StreamsCompatibleABI = types.MustGetABI(streams_lookup_compatible_interface.StreamsLookupCompatibleInterfaceABI) +var ILogAutomationABI = types.MustGetABI(i_log_automation.ILogAutomationABI) diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/interfaces.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/interfaces.go new file mode 100644 index 00000000..09774683 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/interfaces.go @@ -0,0 +1,14 @@ +package core + +import ( + "context" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +// UpkeepStateReader is the interface for reading the current state of upkeeps. +// +//go:generate mockery --quiet --name UpkeepStateReader --output ./mocks/ --case=underscore +type UpkeepStateReader interface { + SelectByWorkIDs(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/mocks/upkeep_state_reader.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/mocks/upkeep_state_reader.go new file mode 100644 index 00000000..50d0781f --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/mocks/upkeep_state_reader.go @@ -0,0 +1,67 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + automation "github.com/goplugin/plugin-common/pkg/types/automation" + + mock "github.com/stretchr/testify/mock" +) + +// UpkeepStateReader is an autogenerated mock type for the UpkeepStateReader type +type UpkeepStateReader struct { + mock.Mock +} + +// SelectByWorkIDs provides a mock function with given fields: ctx, workIDs +func (_m *UpkeepStateReader) SelectByWorkIDs(ctx context.Context, workIDs ...string) ([]automation.UpkeepState, error) { + _va := make([]interface{}, len(workIDs)) + for _i := range workIDs { + _va[_i] = workIDs[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SelectByWorkIDs") + } + + var r0 []automation.UpkeepState + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ...string) ([]automation.UpkeepState, error)); ok { + return rf(ctx, workIDs...) + } + if rf, ok := ret.Get(0).(func(context.Context, ...string) []automation.UpkeepState); ok { + r0 = rf(ctx, workIDs...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]automation.UpkeepState) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ...string) error); ok { + r1 = rf(ctx, workIDs...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewUpkeepStateReader creates a new instance of UpkeepStateReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUpkeepStateReader(t interface { + mock.TestingT + Cleanup(func()) +}) *UpkeepStateReader { + mock := &UpkeepStateReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload.go new file mode 100644 index 00000000..ca72f70f --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload.go @@ -0,0 +1,40 @@ +package core + +import ( + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/crypto" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +var ( + ErrInvalidUpkeepID = fmt.Errorf("invalid upkeepID") +) + +func UpkeepWorkID(uid ocr2keepers.UpkeepIdentifier, trigger ocr2keepers.Trigger) string { + var triggerExtBytes []byte + if trigger.LogTriggerExtension != nil { + triggerExtBytes = trigger.LogTriggerExtension.LogIdentifier() + } + hash := crypto.Keccak256(append(uid[:], triggerExtBytes...)) + return hex.EncodeToString(hash[:]) +} + +func NewUpkeepPayload(id *big.Int, trigger ocr2keepers.Trigger, checkData []byte) (ocr2keepers.UpkeepPayload, error) { + uid := &ocr2keepers.UpkeepIdentifier{} + ok := uid.FromBigInt(id) + if !ok { + return ocr2keepers.UpkeepPayload{}, ErrInvalidUpkeepID + } + p := ocr2keepers.UpkeepPayload{ + UpkeepID: *uid, + Trigger: trigger, + CheckData: checkData, + } + // set work id based on upkeep id and trigger + p.WorkID = UpkeepWorkID(*uid, trigger) + return p, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload_test.go new file mode 100644 index 00000000..ef526da7 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/payload_test.go @@ -0,0 +1,151 @@ +package core + +import ( + "math/big" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +func TestWorkID(t *testing.T) { + tests := []struct { + name string + upkeepID string + trigger ocr2keepers.Trigger + expected string + }{ + { + name: "happy flow no extension", + upkeepID: "12345", + trigger: ocr2keepers.Trigger{ + BlockNumber: 123, + BlockHash: common.HexToHash("0xabcdef"), + }, + expected: "e546b0a52c2879744f6def0fb483d581dc6d205de83af8440456804dd8b62380", + }, + { + name: "empty trigger", + upkeepID: "12345", + trigger: ocr2keepers.Trigger{}, + // same as with no extension + expected: "e546b0a52c2879744f6def0fb483d581dc6d205de83af8440456804dd8b62380", + }, + { + name: "happy flow with extension", + upkeepID: GenUpkeepID(types.LogTrigger, "12345").String(), + trigger: ocr2keepers.Trigger{ + BlockNumber: 123, + BlockHash: common.HexToHash("0xabcdef"), + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + Index: 1, + TxHash: common.HexToHash("0x12345"), + BlockHash: common.HexToHash("0xabcdef"), + }, + }, + expected: "aaa208331dfafff7a681e3358d082a2e78633dd05c8fb2817c391888cadb2912", + }, + { + name: "happy path example from an actual tx", + upkeepID: "57755329819103678328139927896464733492677608573736038892412245689671711489918", + trigger: ocr2keepers.Trigger{ + BlockNumber: 39344455, + BlockHash: common.HexToHash("0xb41258d18cd44ebf7a0d70de011f2bc4a67c9b68e8b6dada864045d8543bb020"), + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + Index: 41, + TxHash: common.HexToHash("0x44079b1b33aff337dbf17b9e12c5724ecab979c50c8201a9814a488ff3e22384"), + BlockHash: common.HexToHash("0xb41258d18cd44ebf7a0d70de011f2bc4a67c9b68e8b6dada864045d8543bb020"), + }, + }, + expected: "ef1b6acac8aa3682a8a08f666a13cfa165f7e811a16ea9fa0817f437fc4d110d", + }, + { + name: "empty upkeepID", + upkeepID: "0", + trigger: ocr2keepers.Trigger{ + BlockNumber: 123, + BlockHash: common.HexToHash("0xabcdef"), + }, + expected: "290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // Convert the string to a big.Int + var id big.Int + _, success := id.SetString(tc.upkeepID, 10) + if !success { + t.Fatal("Invalid big integer value") + } + uid := &ocr2keepers.UpkeepIdentifier{} + ok := uid.FromBigInt(&id) + if !ok { + t.Fatal("Invalid upkeep identifier") + } + + res := UpkeepWorkID(*uid, tc.trigger) + assert.Equal(t, tc.expected, res, "UpkeepWorkID mismatch") + }) + } +} + +func TestNewUpkeepPayload(t *testing.T) { + tests := []struct { + name string + upkeepID *big.Int + upkeepType types.UpkeepType + trigger ocr2keepers.Trigger + check []byte + errored bool + workID string + }{ + { + name: "happy flow no extension", + upkeepID: big.NewInt(111), + upkeepType: types.ConditionTrigger, + trigger: ocr2keepers.Trigger{ + BlockNumber: 11, + BlockHash: common.HexToHash("0x11111"), + }, + check: []byte("check-data-111"), + workID: "39f2babe526038520877fc7c33d81accf578af4a06c5fa6b0d038cae36e12711", + }, + { + name: "happy flow with extension", + upkeepID: big.NewInt(111), + upkeepType: types.LogTrigger, + trigger: ocr2keepers.Trigger{ + BlockNumber: 11, + BlockHash: common.HexToHash("0x11111"), + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + Index: 1, + TxHash: common.HexToHash("0x11111"), + }, + }, + check: []byte("check-data-111"), + workID: "d8e7c8907a0b60b637ce71ff4f757edf076e270d52c51f6e4d46a3b0696e0a39", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + payload, err := NewUpkeepPayload( + tc.upkeepID, + tc.trigger, + tc.check, + ) + if tc.errored { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + assert.Equal(t, tc.workID, payload.WorkID) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/testutil.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/testutil.go new file mode 100644 index 00000000..5e2c6e96 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/testutil.go @@ -0,0 +1,32 @@ +package core + +import ( + "math/big" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +// GenUpkeepID generates an ocr2keepers.UpkeepIdentifier with a specific UpkeepType and some random string +func GenUpkeepID(uType types.UpkeepType, rand string) ocr2keepers.UpkeepIdentifier { + b := append([]byte{1}, common.LeftPadBytes([]byte{uint8(uType)}, 15)...) + b = append(b, []byte(rand)...) + b = common.RightPadBytes(b, 32-len(b)) + if len(b) > 32 { + b = b[:32] + } + var id [32]byte + copy(id[:], b) + return ocr2keepers.UpkeepIdentifier(id) +} + +// UpkeepIDFromInt converts an int string to ocr2keepers.UpkeepIdentifier +func UpkeepIDFromInt(id string) ocr2keepers.UpkeepIdentifier { + uid := &ocr2keepers.UpkeepIdentifier{} + idInt, _ := big.NewInt(0).SetString(id, 10) + uid.FromBigInt(idInt) + return *uid +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger.go new file mode 100644 index 00000000..0417d2ef --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger.go @@ -0,0 +1,107 @@ +package core + +import ( + "fmt" + "math/big" + "strings" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" +) + +type triggerWrapper = automation_utils_2_1.KeeperRegistryBase21LogTrigger + +var ErrABINotParsable = fmt.Errorf("error parsing abi") + +// PackTrigger packs the trigger data according to the upkeep type of the given id. it will remove the first 4 bytes of function selector. +func PackTrigger(id *big.Int, trig triggerWrapper) ([]byte, error) { + var trigger []byte + var err error + + // construct utils abi + utilsABI, err := abi.JSON(strings.NewReader(automation_utils_2_1.AutomationUtilsABI)) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrABINotParsable, err) + } + + // pack trigger based on upkeep type + upkeepType, ok := getUpkeepTypeFromBigInt(id) + if !ok { + return nil, ErrInvalidUpkeepID + } + switch upkeepType { + case types.ConditionTrigger: + trig := automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger{ + BlockNum: trig.BlockNum, + BlockHash: trig.BlockHash, + } + trigger, err = utilsABI.Pack("_conditionalTrigger", &trig) + case types.LogTrigger: + logTrig := automation_utils_2_1.KeeperRegistryBase21LogTrigger{ + BlockNum: trig.BlockNum, + BlockHash: trig.BlockHash, + LogBlockHash: trig.LogBlockHash, + LogIndex: trig.LogIndex, + TxHash: trig.TxHash, + } + trigger, err = utilsABI.Pack("_logTrigger", &logTrig) + default: + err = fmt.Errorf("unknown trigger type: %d", upkeepType) + } + if err != nil { + return nil, err + } + return trigger[4:], nil +} + +// UnpackTrigger unpacks the trigger from the given raw data, according to the upkeep type of the given id. +func UnpackTrigger(id *big.Int, raw []byte) (triggerWrapper, error) { + // construct utils abi + utilsABI, err := abi.JSON(strings.NewReader(automation_utils_2_1.AutomationUtilsABI)) + if err != nil { + return triggerWrapper{}, fmt.Errorf("%w: %s", ErrABINotParsable, err) + } + + upkeepType, ok := getUpkeepTypeFromBigInt(id) + if !ok { + return triggerWrapper{}, ErrInvalidUpkeepID + } + switch upkeepType { + case types.ConditionTrigger: + unpacked, err := utilsABI.Methods["_conditionalTrigger"].Inputs.Unpack(raw) + if err != nil { + return triggerWrapper{}, fmt.Errorf("%w: failed to unpack conditional trigger", err) + } + converted, ok := abi.ConvertType(unpacked[0], new(automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger)).(*automation_utils_2_1.KeeperRegistryBase21ConditionalTrigger) + if !ok { + return triggerWrapper{}, fmt.Errorf("failed to convert type") + } + triggerW := triggerWrapper{ + BlockNum: converted.BlockNum, + } + copy(triggerW.BlockHash[:], converted.BlockHash[:]) + return triggerW, nil + case types.LogTrigger: + unpacked, err := utilsABI.Methods["_logTrigger"].Inputs.Unpack(raw) + if err != nil { + return triggerWrapper{}, fmt.Errorf("%w: failed to unpack log trigger", err) + } + converted, ok := abi.ConvertType(unpacked[0], new(automation_utils_2_1.KeeperRegistryBase21LogTrigger)).(*automation_utils_2_1.KeeperRegistryBase21LogTrigger) + if !ok { + return triggerWrapper{}, fmt.Errorf("failed to convert type") + } + triggerW := triggerWrapper{ + BlockNum: converted.BlockNum, + LogIndex: converted.LogIndex, + } + copy(triggerW.BlockHash[:], converted.BlockHash[:]) + copy(triggerW.TxHash[:], converted.TxHash[:]) + copy(triggerW.LogBlockHash[:], converted.LogBlockHash[:]) + return triggerW, nil + default: + return triggerWrapper{}, fmt.Errorf("unknown trigger type: %d", upkeepType) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger_test.go new file mode 100644 index 00000000..2c680f13 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/trigger_test.go @@ -0,0 +1,97 @@ +package core + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +func TestPackUnpackTrigger(t *testing.T) { + tests := []struct { + name string + id []byte + trigger triggerWrapper + encoded []byte + err error + }{ + { + "happy flow log trigger", + append([]byte{1}, common.LeftPadBytes([]byte{1}, 15)...), + triggerWrapper{ + BlockNum: 1, + BlockHash: common.HexToHash("0x01111111"), + LogIndex: 1, + TxHash: common.HexToHash("0x01111111"), + LogBlockHash: common.HexToHash("0x01111abc"), + }, + func() []byte { + b, _ := hexutil.Decode("0x0000000000000000000000000000000000000000000000000000000001111abc0000000000000000000000000000000000000000000000000000000001111111000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + return b + }(), + nil, + }, + { + "happy flow conditional trigger", + append([]byte{1}, common.LeftPadBytes([]byte{0}, 15)...), + triggerWrapper{ + BlockNum: 1, + BlockHash: common.HexToHash("0x01111111"), + }, + func() []byte { + b, _ := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + return b + }(), + nil, + }, + { + "invalid type", + append([]byte{1}, common.LeftPadBytes([]byte{8}, 15)...), + triggerWrapper{ + BlockNum: 1, + BlockHash: common.HexToHash("0x01111111"), + }, + []byte{}, + fmt.Errorf("unknown trigger type: %d", 8), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var idBytes [32]byte + copy(idBytes[:], tc.id) + id := ocr2keepers.UpkeepIdentifier(idBytes) + + encoded, err := PackTrigger(id.BigInt(), tc.trigger) + if tc.err != nil { + assert.EqualError(t, err, tc.err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.encoded, encoded) + decoded, err := UnpackTrigger(id.BigInt(), encoded) + assert.NoError(t, err) + assert.Equal(t, tc.trigger.BlockNum, decoded.BlockNum) + } + }) + } + + t.Run("unpacking invalid trigger", func(t *testing.T) { + _, err := UnpackTrigger(big.NewInt(0), []byte{1, 2, 3}) + assert.Error(t, err) + }) + + t.Run("unpacking unknown type", func(t *testing.T) { + uid := append([]byte{1}, common.LeftPadBytes([]byte{8}, 15)...) + var idBytes [32]byte + copy(idBytes[:], uid) + id := ocr2keepers.UpkeepIdentifier(idBytes) + decoded, _ := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + _, err := UnpackTrigger(id.BigInt(), decoded) + assert.EqualError(t, err, "unknown trigger type: 8") + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type.go new file mode 100644 index 00000000..096d000d --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type.go @@ -0,0 +1,36 @@ +package core + +import ( + "math/big" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +const ( + // upkeepTypeStartIndex is the index where the upkeep type bytes start. + // for 2.1 we use 11 zeros (reserved bytes for future use) + // and 1 byte to represent the type, with index equal upkeepTypeByteIndex + upkeepTypeStartIndex = 4 + // upkeepTypeByteIndex is the index of the byte that holds the upkeep type. + upkeepTypeByteIndex = 15 +) + +// GetUpkeepType returns the upkeep type from the given ID. +// it follows the same logic as the contract, but performs it locally. +func GetUpkeepType(id ocr2keepers.UpkeepIdentifier) types.UpkeepType { + for i := upkeepTypeStartIndex; i < upkeepTypeByteIndex; i++ { + if id[i] != 0 { // old id + return types.ConditionTrigger + } + } + typeByte := id[upkeepTypeByteIndex] + return types.UpkeepType(typeByte) +} + +func getUpkeepTypeFromBigInt(id *big.Int) (types.UpkeepType, bool) { + uid := &ocr2keepers.UpkeepIdentifier{} + ok := uid.FromBigInt(id) + return GetUpkeepType(*uid), ok +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type_test.go new file mode 100644 index 00000000..53b89957 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/type_test.go @@ -0,0 +1,57 @@ +package core + +import ( + "math/big" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +func TestGetUpkeepType(t *testing.T) { + tests := []struct { + name string + upkeepID []byte + upkeepType types.UpkeepType + }{ + { + "zeroed id", + big.NewInt(0).Bytes(), + types.ConditionTrigger, + }, + { + "old id", + []byte("5820911532554020907796191562093071158274499580927271776163559390280294438608"), + types.ConditionTrigger, + }, + { + "condition trigger", + GenUpkeepID(types.ConditionTrigger, "").BigInt().Bytes(), + types.ConditionTrigger, + }, + { + "log trigger", + GenUpkeepID(types.LogTrigger, "111").BigInt().Bytes(), + types.LogTrigger, + }, + { + "log trigger id", + func() []byte { + id, _ := big.NewInt(0).SetString("32329108151019397958065800113404894502874153543356521479058624064899121404671", 10) + return id.Bytes() + }(), + types.LogTrigger, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + uid := ocr2keepers.UpkeepIdentifier{} + copy(uid[:], tc.upkeepID) + assert.Equal(t, tc.upkeepType, GetUpkeepType(uid)) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils.go new file mode 100644 index 00000000..cb29491f --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils.go @@ -0,0 +1,26 @@ +package core + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +// GetTxBlock calls eth_getTransactionReceipt on the eth client to obtain a tx receipt +func GetTxBlock(ctx context.Context, client client.Client, txHash common.Hash) (*big.Int, common.Hash, error) { + receipt := types.Receipt{} + + if err := client.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txHash); err != nil { + return nil, common.Hash{}, err + } + + if receipt.Status != 1 { + return nil, common.Hash{}, nil + } + + return receipt.GetBlockNumber(), receipt.GetBlockHash(), nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils_test.go new file mode 100644 index 00000000..f493ba45 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/utils_test.go @@ -0,0 +1,73 @@ +package core + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +func TestUtils_GetTxBlock(t *testing.T) { + tests := []struct { + name string + txHash common.Hash + ethCallError error + receipt *types.Receipt + status uint64 + }{ + { + name: "success", + txHash: common.HexToHash("0xc48fbf05edaf18f6aaa7de24de28528546b874bb03728d624ca407b8fed582a3"), + receipt: &types.Receipt{ + Status: 1, + BlockNumber: big.NewInt(2000), + }, + status: 1, + }, + { + name: "failure - eth call error", + txHash: common.HexToHash("0xc48fbf05edaf18f6aaa7de24de28528546b874bb03728d624ca407b8fed582a3"), + ethCallError: fmt.Errorf("eth call failed"), + }, + { + name: "failure - tx does not exist", + txHash: common.HexToHash("0xc48fbf05edaf18f6aaa7de24de28528546b874bb03728d624ca407b8fed582a3"), + receipt: &types.Receipt{ + Status: 0, + }, + status: 0, + }, + } + + for _, tt := range tests { + client := new(evmClientMocks.Client) + client.On("CallContext", mock.Anything, mock.Anything, "eth_getTransactionReceipt", tt.txHash). + Return(tt.ethCallError).Run(func(args mock.Arguments) { + receipt := tt.receipt + if receipt != nil { + res := args.Get(1).(*types.Receipt) + res.Status = receipt.Status + res.TxHash = receipt.TxHash + res.BlockNumber = receipt.BlockNumber + res.BlockHash = receipt.BlockHash + } + }) + + bn, bh, err := GetTxBlock(testutils.Context(t), client, tt.txHash) + if tt.ethCallError != nil { + assert.Equal(t, tt.ethCallError, err) + } else { + assert.Equal(t, tt.status, tt.receipt.Status) + assert.Equal(t, tt.receipt.BlockNumber, bn) + assert.Equal(t, tt.receipt.BlockHash, bh) + } + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder.go new file mode 100644 index 00000000..207bd7ec --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder.go @@ -0,0 +1,128 @@ +package encoding + +import ( + "fmt" + "math/big" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +var ( + ErrEmptyResults = fmt.Errorf("empty results; cannot encode") +) + +type reportEncoder struct { + packer Packer +} + +var _ ocr2keepers.Encoder = (*reportEncoder)(nil) + +func NewReportEncoder(p Packer) ocr2keepers.Encoder { + return &reportEncoder{ + packer: p, + } +} + +func (e reportEncoder) Encode(results ...ocr2keepers.CheckResult) ([]byte, error) { + if len(results) == 0 { + return nil, ErrEmptyResults + } + + report := automation_utils_2_1.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + UpkeepIds: make([]*big.Int, len(results)), + GasLimits: make([]*big.Int, len(results)), + Triggers: make([][]byte, len(results)), + PerformDatas: make([][]byte, len(results)), + } + + encoded := 0 + highestCheckBlock := big.NewInt(0) + + for i, result := range results { + checkBlock := big.NewInt(int64(result.Trigger.BlockNumber)) + + if checkBlock.Cmp(highestCheckBlock) == 1 { + highestCheckBlock = checkBlock + if result.FastGasWei != nil { + report.FastGasWei = result.FastGasWei + } + if result.LinkNative != nil { + report.LinkNative = result.LinkNative + } + } + + id := result.UpkeepID.BigInt() + report.UpkeepIds[i] = id + report.GasLimits[i] = big.NewInt(0).SetUint64(result.GasAllocated) + + triggerW := triggerWrapper{ + BlockNum: uint32(result.Trigger.BlockNumber), + BlockHash: result.Trigger.BlockHash, + } + switch core.GetUpkeepType(result.UpkeepID) { + case types.LogTrigger: + triggerW.TxHash = result.Trigger.LogTriggerExtension.TxHash + triggerW.LogIndex = result.Trigger.LogTriggerExtension.Index + triggerW.LogBlockHash = result.Trigger.LogTriggerExtension.BlockHash + default: + // no special handling here for conditional triggers + } + + trigger, err := core.PackTrigger(id, triggerW) + if err != nil { + return nil, fmt.Errorf("%w: failed to pack trigger", err) + } + + report.Triggers[i] = trigger + report.PerformDatas[i] = result.PerformData + + encoded++ + } + + return e.packer.PackReport(report) +} + +// Extract extracts a slice of reported upkeeps (upkeep id, trigger, and work id) from raw bytes. the plugin will call this function to accept/transmit reports. +func (e reportEncoder) Extract(raw []byte) ([]ocr2keepers.ReportedUpkeep, error) { + report, err := e.packer.UnpackReport(raw) + if err != nil { + return nil, fmt.Errorf("%w: failed to unpack report", err) + } + reportedUpkeeps := make([]ocr2keepers.ReportedUpkeep, len(report.UpkeepIds)) + for i, upkeepId := range report.UpkeepIds { + triggerW, err := core.UnpackTrigger(upkeepId, report.Triggers[i]) + if err != nil { + return nil, fmt.Errorf("%w: failed to unpack trigger", err) + } + id := &ocr2keepers.UpkeepIdentifier{} + id.FromBigInt(upkeepId) + + trigger := ocr2keepers.NewTrigger( + ocr2keepers.BlockNumber(triggerW.BlockNum), + triggerW.BlockHash, + ) + switch core.GetUpkeepType(*id) { + case types.LogTrigger: + trigger.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{} + trigger.LogTriggerExtension.TxHash = triggerW.TxHash + trigger.LogTriggerExtension.Index = triggerW.LogIndex + trigger.LogTriggerExtension.BlockHash = triggerW.LogBlockHash + default: + } + workID := core.UpkeepWorkID(*id, trigger) + reportedUpkeeps[i] = ocr2keepers.ReportedUpkeep{ + WorkID: workID, + UpkeepID: *id, + Trigger: trigger, + } + } + + return reportedUpkeeps, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder_test.go new file mode 100644 index 00000000..69272260 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/encoder_test.go @@ -0,0 +1,165 @@ +package encoding + +import ( + "bytes" + "encoding/hex" + "math/big" + "os" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +var expectedEncodedReport []byte + +func init() { + b, err := os.ReadFile("../fixtures/expected_encoded_report.txt") + if err != nil { + panic(err) + } + expectedEncodedReport, err = hex.DecodeString(string(b)) + if err != nil { + panic(err) + } +} + +func TestReportEncoder_EncodeExtract(t *testing.T) { + encoder := reportEncoder{ + packer: NewAbiPacker(), + } + + tests := []struct { + name string + results []ocr2keepers.CheckResult + reportSize int + expectedFastGasWei int64 + expectedLinkNative int64 + expectedErr error + }{ + { + "happy flow single", + []ocr2keepers.CheckResult{ + newResult(1, 1, core.GenUpkeepID(types.LogTrigger, "123"), 1, 1), + }, + 736, + 1, + 1, + nil, + }, + { + "happy flow multiple", + []ocr2keepers.CheckResult{ + newResult(1, 1, core.GenUpkeepID(types.LogTrigger, "10"), 1, 1), + newResult(1, 1, core.GenUpkeepID(types.ConditionTrigger, "20"), 1, 1), + newResult(1, 1, core.GenUpkeepID(types.ConditionTrigger, "30"), 1, 1), + }, + 1312, + 3, + 3, + nil, + }, + { + "happy flow highest block number first", + []ocr2keepers.CheckResult{ + newResult(1, 1, core.GenUpkeepID(types.ConditionTrigger, "30"), 1, 1), + newResult(1, 1, core.GenUpkeepID(types.ConditionTrigger, "20"), 1, 1), + newResult(1, 1, core.GenUpkeepID(types.LogTrigger, "10"), 1, 1), + }, + 1312, + 1000, + 2000, + nil, + }, + { + "empty results", + []ocr2keepers.CheckResult{}, + 0, + 0, + 0, + ErrEmptyResults, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + b, err := encoder.Encode(tc.results...) + if tc.expectedErr != nil { + assert.Equal(t, tc.expectedErr, err) + return + } + + assert.Nil(t, err) + assert.Len(t, b, tc.reportSize) + + results, err := encoder.Extract(b) + assert.Nil(t, err) + assert.Len(t, results, len(tc.results)) + + for i, r := range results { + assert.Equal(t, r.UpkeepID, tc.results[i].UpkeepID) + assert.Equal(t, r.WorkID, tc.results[i].WorkID) + assert.Equal(t, r.Trigger, tc.results[i].Trigger) + } + }) + } +} + +func TestReportEncoder_BackwardsCompatibility(t *testing.T) { + encoder := reportEncoder{ + packer: NewAbiPacker(), + } + results := []ocr2keepers.CheckResult{ + newResult(1, 2, core.GenUpkeepID(types.LogTrigger, "10"), 5, 6), + newResult(3, 4, core.GenUpkeepID(types.ConditionTrigger, "20"), 7, 8), + } + encoded, err := encoder.Encode(results...) + assert.NoError(t, err) + if !bytes.Equal(encoded, expectedEncodedReport) { + assert.Fail(t, + "encoded report does not match expected encoded report; "+ + "this means a breaking change has been made to the report encoding function; "+ + "only update this test if non-backwards-compatible changes are necessary", + ) + } +} + +func newResult(block int64, checkBlock ocr2keepers.BlockNumber, id ocr2keepers.UpkeepIdentifier, fastGasWei, linkNative int64) ocr2keepers.CheckResult { + tp := core.GetUpkeepType(id) + + trig := ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(block), + BlockHash: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, + } + + if tp == types.LogTrigger { + trig.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{ + Index: 1, + TxHash: common.HexToHash("0x1234567890123456789012345678901234567890123456789012345678901234"), + BlockHash: common.HexToHash("0xaaaaaaaa90123456789012345678901234567890123456789012345678901234"), + } + } + + payload, _ := core.NewUpkeepPayload( + id.BigInt(), + trig, + []byte{}, + ) + + return ocr2keepers.CheckResult{ + UpkeepID: id, + Trigger: payload.Trigger, + WorkID: payload.WorkID, + Eligible: true, + GasAllocated: 100, + PerformData: []byte("data0"), + FastGasWei: big.NewInt(fastGasWei), + LinkNative: big.NewInt(linkNative), + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go new file mode 100644 index 00000000..63352325 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/interface.go @@ -0,0 +1,54 @@ +package encoding + +import ( + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" +) + +type UpkeepFailureReason uint8 +type PipelineExecutionState uint8 + +const ( + // upkeep failure onchain reasons + UpkeepFailureReasonNone UpkeepFailureReason = 0 + UpkeepFailureReasonUpkeepCancelled UpkeepFailureReason = 1 + UpkeepFailureReasonUpkeepPaused UpkeepFailureReason = 2 + UpkeepFailureReasonTargetCheckReverted UpkeepFailureReason = 3 + UpkeepFailureReasonUpkeepNotNeeded UpkeepFailureReason = 4 + UpkeepFailureReasonPerformDataExceedsLimit UpkeepFailureReason = 5 + UpkeepFailureReasonInsufficientBalance UpkeepFailureReason = 6 + UpkeepFailureReasonMercuryCallbackReverted UpkeepFailureReason = 7 + UpkeepFailureReasonRevertDataExceedsLimit UpkeepFailureReason = 8 + UpkeepFailureReasonRegistryPaused UpkeepFailureReason = 9 + // leaving a gap here for more onchain failure reasons in the future + // upkeep failure offchain reasons + UpkeepFailureReasonMercuryAccessNotAllowed UpkeepFailureReason = 32 + UpkeepFailureReasonTxHashNoLongerExists UpkeepFailureReason = 33 + UpkeepFailureReasonInvalidRevertDataInput UpkeepFailureReason = 34 + UpkeepFailureReasonSimulationFailed UpkeepFailureReason = 35 + UpkeepFailureReasonTxHashReorged UpkeepFailureReason = 36 + + // pipeline execution error + NoPipelineError PipelineExecutionState = 0 + CheckBlockTooOld PipelineExecutionState = 1 + CheckBlockInvalid PipelineExecutionState = 2 + RpcFlakyFailure PipelineExecutionState = 3 + MercuryFlakyFailure PipelineExecutionState = 4 + PackUnpackDecodeFailed PipelineExecutionState = 5 + MercuryUnmarshalError PipelineExecutionState = 6 + InvalidMercuryRequest PipelineExecutionState = 7 + InvalidMercuryResponse PipelineExecutionState = 8 // this will only happen if Mercury server sends bad responses + UpkeepNotAuthorized PipelineExecutionState = 9 +) + +type UpkeepInfo = iregistry21.KeeperRegistryBase21UpkeepInfo + +type Packer interface { + UnpackCheckResult(payload ocr2keepers.UpkeepPayload, raw string) (ocr2keepers.CheckResult, error) + UnpackPerformResult(raw string) (PipelineExecutionState, bool, error) + UnpackLogTriggerConfig(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) + PackReport(report automation_utils_2_1.KeeperRegistryBase21Report) ([]byte, error) + UnpackReport(raw []byte) (automation_utils_2_1.KeeperRegistryBase21Report, error) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer.go new file mode 100644 index 00000000..096354a5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer.go @@ -0,0 +1,148 @@ +package encoding + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +// triggerWrapper is a wrapper for the different trigger types (log and condition triggers). +// NOTE: we use log trigger because it extends condition trigger, +type triggerWrapper = automation_utils_2_1.KeeperRegistryBase21LogTrigger + +type abiPacker struct { + registryABI abi.ABI + utilsABI abi.ABI + streamsABI abi.ABI +} + +var _ Packer = (*abiPacker)(nil) + +func NewAbiPacker() *abiPacker { + return &abiPacker{registryABI: core.RegistryABI, utilsABI: core.UtilsABI, streamsABI: core.StreamsCompatibleABI} +} + +func (p *abiPacker) UnpackCheckResult(payload ocr2keepers.UpkeepPayload, raw string) (ocr2keepers.CheckResult, error) { + b, err := hexutil.Decode(raw) + if err != nil { + // decode failed, not retryable + return GetIneligibleCheckResultWithoutPerformData(payload, UpkeepFailureReasonNone, PackUnpackDecodeFailed, false), + fmt.Errorf("upkeepId %s failed to decode checkUpkeep result %s: %s", payload.UpkeepID.String(), raw, err) + } + + out, err := p.registryABI.Methods["checkUpkeep"].Outputs.UnpackValues(b) + if err != nil { + // unpack failed, not retryable + return GetIneligibleCheckResultWithoutPerformData(payload, UpkeepFailureReasonNone, PackUnpackDecodeFailed, false), + fmt.Errorf("upkeepId %s failed to unpack checkUpkeep result %s: %s", payload.UpkeepID.String(), raw, err) + } + + result := ocr2keepers.CheckResult{ + Eligible: *abi.ConvertType(out[0], new(bool)).(*bool), + Retryable: false, + GasAllocated: uint64((*abi.ConvertType(out[4], new(*big.Int)).(**big.Int)).Int64()), + UpkeepID: payload.UpkeepID, + Trigger: payload.Trigger, + WorkID: payload.WorkID, + FastGasWei: *abi.ConvertType(out[5], new(*big.Int)).(**big.Int), + LinkNative: *abi.ConvertType(out[6], new(*big.Int)).(**big.Int), + IneligibilityReason: *abi.ConvertType(out[2], new(uint8)).(*uint8), + } + + rawPerformData := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + + // if NONE we expect the perform data. if TARGET_CHECK_REVERTED we will have the error data in the perform data used for off chain lookup + if result.IneligibilityReason == uint8(UpkeepFailureReasonNone) || (result.IneligibilityReason == uint8(UpkeepFailureReasonTargetCheckReverted) && len(rawPerformData) > 0) { + result.PerformData = rawPerformData + } + + return result, nil +} + +func (p *abiPacker) UnpackPerformResult(raw string) (PipelineExecutionState, bool, error) { + b, err := hexutil.Decode(raw) + if err != nil { + return PackUnpackDecodeFailed, false, err + } + + out, err := p.registryABI.Methods["simulatePerformUpkeep"].Outputs.UnpackValues(b) + if err != nil { + return PackUnpackDecodeFailed, false, err + } + + return NoPipelineError, *abi.ConvertType(out[0], new(bool)).(*bool), nil +} + +// UnpackLogTriggerConfig unpacks the log trigger config from the given raw data +func (p *abiPacker) UnpackLogTriggerConfig(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + var cfg automation_utils_2_1.LogTriggerConfig + + out, err := core.UtilsABI.Methods["_logTriggerConfig"].Inputs.UnpackValues(raw) + if err != nil { + return cfg, fmt.Errorf("%w: unpack _logTriggerConfig return: %s", err, raw) + } + + converted, ok := abi.ConvertType(out[0], new(automation_utils_2_1.LogTriggerConfig)).(*automation_utils_2_1.LogTriggerConfig) + if !ok { + return cfg, fmt.Errorf("failed to convert type during UnpackLogTriggerConfig") + } + return *converted, nil +} + +// PackReport packs the report with abi definitions from the contract. +func (p *abiPacker) PackReport(report automation_utils_2_1.KeeperRegistryBase21Report) ([]byte, error) { + bts, err := p.utilsABI.Methods["_report"].Inputs.Pack(&report) + if err != nil { + return nil, fmt.Errorf("%w: failed to pack report", err) + } + return bts, nil +} + +// UnpackReport unpacks the report from the given raw data. +func (p *abiPacker) UnpackReport(raw []byte) (automation_utils_2_1.KeeperRegistryBase21Report, error) { + unpacked, err := p.utilsABI.Methods["_report"].Inputs.Unpack(raw) + if err != nil { + return automation_utils_2_1.KeeperRegistryBase21Report{}, fmt.Errorf("%w: failed to unpack report", err) + } + converted, ok := abi.ConvertType(unpacked[0], new(automation_utils_2_1.KeeperRegistryBase21Report)).(*automation_utils_2_1.KeeperRegistryBase21Report) + if !ok { + return automation_utils_2_1.KeeperRegistryBase21Report{}, fmt.Errorf("failed to convert type") + } + report := automation_utils_2_1.KeeperRegistryBase21Report{ + FastGasWei: converted.FastGasWei, + LinkNative: converted.LinkNative, + UpkeepIds: make([]*big.Int, len(converted.UpkeepIds)), + GasLimits: make([]*big.Int, len(converted.GasLimits)), + Triggers: make([][]byte, len(converted.Triggers)), + PerformDatas: make([][]byte, len(converted.PerformDatas)), + } + if len(report.UpkeepIds) > 0 { + copy(report.UpkeepIds, converted.UpkeepIds) + copy(report.GasLimits, converted.GasLimits) + copy(report.Triggers, converted.Triggers) + copy(report.PerformDatas, converted.PerformDatas) + } + + return report, nil +} + +// GetIneligibleCheckResultWithoutPerformData returns an ineligible check result with ineligibility reason and pipeline execution state but without perform data +func GetIneligibleCheckResultWithoutPerformData(p ocr2keepers.UpkeepPayload, reason UpkeepFailureReason, state PipelineExecutionState, retryable bool) ocr2keepers.CheckResult { + return ocr2keepers.CheckResult{ + IneligibilityReason: uint8(reason), + PipelineExecutionState: uint8(state), + Retryable: retryable, + UpkeepID: p.UpkeepID, + Trigger: p.Trigger, + WorkID: p.WorkID, + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer_test.go new file mode 100644 index 00000000..05344677 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding/packer_test.go @@ -0,0 +1,292 @@ +package encoding + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + automation21Utils "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestPacker_PackReport(t *testing.T) { + for _, tc := range []struct { + name string + report automation21Utils.KeeperRegistryBase21Report + expectsErr bool + wantErr error + wantBytes int + }{ + { + name: "all non-nil values get encoded to a byte array of a specific length", + report: automation21Utils.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + UpkeepIds: []*big.Int{big.NewInt(3)}, + GasLimits: []*big.Int{big.NewInt(4)}, + Triggers: [][]byte{ + {5}, + }, + PerformDatas: [][]byte{ + {6}, + }, + }, + wantBytes: 608, + }, + { + name: "if upkeep IDs are nil, the packed report is smaller", + report: automation21Utils.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(1), + LinkNative: big.NewInt(2), + UpkeepIds: nil, + GasLimits: []*big.Int{big.NewInt(4)}, + Triggers: [][]byte{ + {5}, + }, + PerformDatas: [][]byte{ + {6}, + }, + }, + wantBytes: 576, + }, + { + name: "if gas limits are nil, the packed report is smaller", + report: automation21Utils.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(1), + LinkNative: big.NewInt(2), + UpkeepIds: []*big.Int{big.NewInt(3)}, + GasLimits: nil, + Triggers: [][]byte{ + {5}, + }, + PerformDatas: [][]byte{ + {6}, + }, + }, + wantBytes: 576, + }, + { + name: "if perform datas are nil, the packed report is smaller", + report: automation21Utils.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(1), + LinkNative: big.NewInt(2), + UpkeepIds: []*big.Int{big.NewInt(3)}, + GasLimits: []*big.Int{big.NewInt(4)}, + Triggers: [][]byte{ + {5}, + }, + PerformDatas: nil, + }, + wantBytes: 512, + }, + { + name: "if triggers are nil, the packed report is smaller", + report: automation21Utils.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(1), + LinkNative: big.NewInt(2), + UpkeepIds: []*big.Int{big.NewInt(3)}, + GasLimits: []*big.Int{big.NewInt(4)}, + Triggers: nil, + PerformDatas: [][]byte{ + {6}, + }, + }, + wantBytes: 512, + }, + } { + t.Run(tc.name, func(t *testing.T) { + packer := NewAbiPacker() + bytes, err := packer.PackReport(tc.report) + if tc.expectsErr { + assert.Error(t, err) + assert.Equal(t, tc.wantErr.Error(), err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.wantBytes, len(bytes)) + } + }) + } +} + +func TestPacker_UnpackCheckResults(t *testing.T) { + uid, _ := new(big.Int).SetString("1843548457736589226156809205796175506139185429616502850435279853710366065936", 10) + upkeepId := ocr2keepers.UpkeepIdentifier{} + upkeepId.FromBigInt(uid) + p1, _ := core.NewUpkeepPayload(uid, ocr2keepers.NewTrigger(19447615, common.HexToHash("0x0")), []byte{}) + + p2, _ := core.NewUpkeepPayload(uid, ocr2keepers.NewTrigger(19448272, common.HexToHash("0x0")), []byte{}) + + tests := []struct { + Name string + Payload ocr2keepers.UpkeepPayload + RawData string + ExpectedResult ocr2keepers.CheckResult + ExpectedError error + }{ + { + Name: "upkeep not needed", + Payload: p1, + RawData: "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000421c000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000c8caf37f3b3890000000000000000000000000000000000000000000000000000000000000000", + ExpectedResult: ocr2keepers.CheckResult{ + UpkeepID: upkeepId, + Eligible: false, + IneligibilityReason: uint8(UpkeepFailureReasonUpkeepNotNeeded), + Trigger: ocr2keepers.NewLogTrigger(ocr2keepers.BlockNumber(19447615), [32]byte{}, nil), + WorkID: "e54c524132d9c8d87b7e43b76f6d769face19ffd2ff93fc24f123dd745d3ce1e", + PerformData: nil, + FastGasWei: big.NewInt(1000000000), + LinkNative: big.NewInt(3532383906411401), + }, + }, + { + Name: "target check reverted", + Payload: p2, + RawData: "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000007531000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000c8caf37f3b3890000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000008914039bf676e20aad43a5642485e666575ed0d927a4b5679745e947e7d125ee2687c10000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000024462e8a50d00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001c0000000000000000000000000000000000000000000000000000000000128c1d000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000009666565644944537472000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000184554482d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000000000184254432d5553442d415242495452554d2d544553544e45540000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d6265720000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000000000000000000000", + ExpectedResult: ocr2keepers.CheckResult{ + UpkeepID: upkeepId, + Eligible: false, + IneligibilityReason: uint8(UpkeepFailureReasonTargetCheckReverted), + Trigger: ocr2keepers.NewLogTrigger(ocr2keepers.BlockNumber(19448272), [32]byte{}, nil), + WorkID: "e54c524132d9c8d87b7e43b76f6d769face19ffd2ff93fc24f123dd745d3ce1e", + PerformData: []byte{98, 232, 165, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 40, 193, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 102, 101, 101, 100, 73, 68, 83, 116, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 69, 84, 72, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 66, 84, 67, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 98, 108, 111, 99, 107, 78, 117, 109, 98, 101, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + FastGasWei: big.NewInt(1000000000), + LinkNative: big.NewInt(3532383906411401), + }, + }, + { + Name: "decode failed", + Payload: p2, + RawData: "invalid_raw_data", + ExpectedResult: ocr2keepers.CheckResult{ + UpkeepID: upkeepId, + PipelineExecutionState: uint8(PackUnpackDecodeFailed), + Trigger: p2.Trigger, + WorkID: p2.WorkID, + }, + ExpectedError: fmt.Errorf("upkeepId %s failed to decode checkUpkeep result invalid_raw_data: hex string without 0x prefix", p2.UpkeepID.String()), + }, + { + Name: "unpack failed", + Payload: p2, + RawData: "0x123123", + ExpectedResult: ocr2keepers.CheckResult{ + UpkeepID: upkeepId, + PipelineExecutionState: uint8(PackUnpackDecodeFailed), + Trigger: p2.Trigger, + WorkID: p2.WorkID, + }, + ExpectedError: fmt.Errorf("upkeepId %s failed to unpack checkUpkeep result 0x123123: abi: cannot marshal in to go type: length insufficient 3 require 32", p2.UpkeepID.String()), + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + packer := NewAbiPacker() + rs, err := packer.UnpackCheckResult(test.Payload, test.RawData) + if test.ExpectedError != nil { + assert.Equal(t, test.ExpectedError.Error(), err.Error()) + } else { + assert.Nil(t, err) + } + assert.Equal(t, test.ExpectedResult.UpkeepID, rs.UpkeepID) + assert.Equal(t, test.ExpectedResult.Eligible, rs.Eligible) + assert.Equal(t, test.ExpectedResult.Trigger, rs.Trigger) + assert.Equal(t, test.ExpectedResult.WorkID, rs.WorkID) + assert.Equal(t, test.ExpectedResult.IneligibilityReason, rs.IneligibilityReason) + assert.Equal(t, test.ExpectedResult.PipelineExecutionState, rs.PipelineExecutionState) + }) + } +} + +func TestPacker_UnpackPerformResult(t *testing.T) { + tests := []struct { + Name string + RawData string + State PipelineExecutionState + }{ + { + Name: "unpack success", + RawData: "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000a52d", + State: NoPipelineError, + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + packer := NewAbiPacker() + state, rs, err := packer.UnpackPerformResult(test.RawData) + assert.Nil(t, err) + assert.True(t, rs) + assert.Equal(t, test.State, state) + }) + } +} + +func TestPacker_UnpackLogTriggerConfig(t *testing.T) { + tests := []struct { + name string + raw []byte + res automation21Utils.LogTriggerConfig + errored bool + }{ + { + "happy flow", + func() []byte { + b, _ := hexutil.Decode("0x0000000000000000000000007456fadf415b7c34b1182bd20b0537977e945e3e00000000000000000000000000000000000000000000000000000000000000003d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + return b + }(), + automation21Utils.LogTriggerConfig{ + ContractAddress: common.HexToAddress("0x7456FadF415b7c34B1182Bd20B0537977e945e3E"), + Topic0: [32]uint8{0x3d, 0x53, 0xa3, 0x95, 0x50, 0xe0, 0x46, 0x88, 0x6, 0x58, 0x27, 0xf3, 0xbb, 0x86, 0x58, 0x4c, 0xb0, 0x7, 0xab, 0x9e, 0xbc, 0xa7, 0xeb, 0xd5, 0x28, 0xe7, 0x30, 0x1c, 0x9c, 0x31, 0xeb, 0x5d}, + }, + false, + }, + { + "invalid", + func() []byte { + b, _ := hexutil.Decode("0x000000000000000000000000b1182bd20b0537977e945e3e00000000000000000000000000000000000000000000000000000000000000003d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + return b + }(), + automation21Utils.LogTriggerConfig{}, + true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + packer := NewAbiPacker() + res, err := packer.UnpackLogTriggerConfig(tc.raw) + if tc.errored { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.res, res) + } + }) + } +} + +func TestPacker_PackReport_UnpackReport(t *testing.T) { + report := automation_utils_2_1.KeeperRegistryBase21Report{ + FastGasWei: big.NewInt(1), + LinkNative: big.NewInt(1), + UpkeepIds: []*big.Int{big.NewInt(1), big.NewInt(2)}, + GasLimits: []*big.Int{big.NewInt(100), big.NewInt(200)}, + Triggers: [][]byte{{1, 2, 3, 4}, {5, 6, 7, 8}}, + PerformDatas: [][]byte{{5, 6, 7, 8}, {1, 2, 3, 4}}, + } + packer := NewAbiPacker() + res, err := packer.PackReport(report) + require.NoError(t, err) + report2, err := packer.UnpackReport(res) + require.NoError(t, err) + assert.Equal(t, report, report2) + expected := "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000c800000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040102030400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000405060708000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004050607080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040102030400000000000000000000000000000000000000000000000000000000" + assert.Equal(t, hexutil.Encode(res), expected) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/fixtures/expected_encoded_report.txt b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/fixtures/expected_encoded_report.txt new file mode 100644 index 00000000..3fb42146 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/fixtures/expected_encoded_report.txt @@ -0,0 +1 @@ +00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000007000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000000020100000000000000000000000000000131300000000000000000000000000000010000000000000000000000000000003230000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000006400000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000a0aaaaaaaa9012345678901234567890123456789012345678901234567890123412345678901234567890123456789012345678901234567890123456789012340000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000101020304050607080102030405060708010203040506070801020304050607080000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000301020304050607080102030405060708010203040506070801020304050607080000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005646174613000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000056461746130000000000000000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring.go new file mode 100644 index 00000000..5c55d57e --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring.go @@ -0,0 +1,50 @@ +package evm + +import ( + "github.com/goplugin/libocr/offchainreporting2/types" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + + "github.com/goplugin/plugin-automation/pkg/v3/plugin" +) + +var _ ocr3types.OnchainKeyring[plugin.AutomationReportInfo] = &onchainKeyringV3Wrapper{} + +type onchainKeyringV3Wrapper struct { + core types.OnchainKeyring +} + +func NewOnchainKeyringV3Wrapper(keyring types.OnchainKeyring) *onchainKeyringV3Wrapper { + return &onchainKeyringV3Wrapper{ + core: keyring, + } +} + +func (w *onchainKeyringV3Wrapper) PublicKey() types.OnchainPublicKey { + return w.core.PublicKey() +} + +func (w *onchainKeyringV3Wrapper) MaxSignatureLength() int { + return w.core.MaxSignatureLength() +} + +func (w *onchainKeyringV3Wrapper) Sign(digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[plugin.AutomationReportInfo]) (signature []byte, err error) { + rCtx := types.ReportContext{ + ReportTimestamp: types.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + }, + } + + return w.core.Sign(rCtx, r.Report) +} + +func (w *onchainKeyringV3Wrapper) Verify(key types.OnchainPublicKey, digest types.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[plugin.AutomationReportInfo], signature []byte) bool { + rCtx := types.ReportContext{ + ReportTimestamp: types.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + }, + } + + return w.core.Verify(key, rCtx, r.Report, signature) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring_test.go new file mode 100644 index 00000000..03a5c843 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/keyring_test.go @@ -0,0 +1,110 @@ +package evm + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-automation/pkg/v3/plugin" +) + +func TestNewOnchainKeyringV3Wrapper(t *testing.T) { + t.Run("the on chain keyring wrapper gets the public key and max signature length from the wrapped keyring", func(t *testing.T) { + onchainKeyring := &mockOnchainKeyring{ + MaxSignatureLengthFn: func() int { + return 123 + }, + PublicKeyFn: func() types.OnchainPublicKey { + return types.OnchainPublicKey([]byte("abcdef")) + }, + } + keyring := NewOnchainKeyringV3Wrapper(onchainKeyring) + assert.Equal(t, 123, keyring.MaxSignatureLength()) + assert.Equal(t, types.OnchainPublicKey([]byte("abcdef")), keyring.PublicKey()) + }) + + t.Run("a report context is created and the wrapped keyring signs the report", func(t *testing.T) { + onchainKeyring := &mockOnchainKeyring{ + SignFn: func(context types.ReportContext, report types.Report) (signature []byte, err error) { + assert.Equal(t, types.ReportContext{ + ReportTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest([32]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), + Epoch: 101, + Round: 0, + }, + ExtraHash: [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, context) + assert.Equal(t, types.Report([]byte("a report to sign")), report) + return []byte("signature"), err + }, + } + keyring := NewOnchainKeyringV3Wrapper(onchainKeyring) + signed, err := keyring.Sign( + types.ConfigDigest([32]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), + 101, + ocr3types.ReportWithInfo[plugin.AutomationReportInfo]{ + Report: []byte("a report to sign"), + Info: plugin.AutomationReportInfo{}, + }, + ) + assert.NoError(t, err) + assert.Equal(t, []byte("signature"), signed) + }) + + t.Run("a report context is created and the wrapped keyring verifies the report", func(t *testing.T) { + onchainKeyring := &mockOnchainKeyring{ + VerifyFn: func(pk types.OnchainPublicKey, rc types.ReportContext, r types.Report, signature []byte) bool { + assert.Equal(t, types.OnchainPublicKey([]byte("key")), pk) + assert.Equal(t, types.ReportContext{ + ReportTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest([32]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), + Epoch: 999, + Round: 0, + }, + ExtraHash: [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, rc) + assert.Equal(t, types.Report([]byte("a report to sign")), r) + assert.Equal(t, []byte("signature"), signature) + return true + }, + } + keyring := NewOnchainKeyringV3Wrapper(onchainKeyring) + valid := keyring.Verify( + types.OnchainPublicKey([]byte("key")), + types.ConfigDigest([32]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}), + 999, + ocr3types.ReportWithInfo[plugin.AutomationReportInfo]{ + Report: []byte("a report to sign"), + Info: plugin.AutomationReportInfo{}, + }, + []byte("signature"), + ) + assert.True(t, valid) + }) +} + +type mockOnchainKeyring struct { + PublicKeyFn func() types.OnchainPublicKey + SignFn func(types.ReportContext, types.Report) (signature []byte, err error) + VerifyFn func(_ types.OnchainPublicKey, _ types.ReportContext, _ types.Report, signature []byte) bool + MaxSignatureLengthFn func() int +} + +func (k *mockOnchainKeyring) PublicKey() types.OnchainPublicKey { + return k.PublicKeyFn() +} + +func (k *mockOnchainKeyring) Sign(c types.ReportContext, r types.Report) (signature []byte, err error) { + return k.SignFn(c, r) +} + +func (k *mockOnchainKeyring) Verify(pk types.OnchainPublicKey, c types.ReportContext, r types.Report, signature []byte) bool { + return k.VerifyFn(pk, c, r, signature) +} + +func (k *mockOnchainKeyring) MaxSignatureLength() int { + return k.MaxSignatureLengthFn() +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go new file mode 100644 index 00000000..6ca82468 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time.go @@ -0,0 +1,62 @@ +package logprovider + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + defaultSampleSize = int64(10000) + defaultBlockTime = time.Second * 1 +) + +type blockTimeResolver struct { + poller logpoller.LogPoller +} + +func newBlockTimeResolver(poller logpoller.LogPoller) *blockTimeResolver { + return &blockTimeResolver{ + poller: poller, + } +} + +func (r *blockTimeResolver) BlockTime(ctx context.Context, blockSampleSize int64) (time.Duration, error) { + if blockSampleSize < 2 { // min 2 blocks range + blockSampleSize = defaultSampleSize + } + + latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return 0, fmt.Errorf("failed to get latest block from poller: %w", err) + } + latestBlockNumber := latest.BlockNumber + if latestBlockNumber <= blockSampleSize { + return defaultBlockTime, nil + } + start, end := latestBlockNumber-blockSampleSize, latestBlockNumber + startTime, endTime, err := r.getSampleTimestamps(ctx, uint64(start), uint64(end)) + if err != nil { + return 0, err + } + + return endTime.Sub(startTime) / time.Duration(blockSampleSize), nil +} + +func (r *blockTimeResolver) getSampleTimestamps(ctx context.Context, start, end uint64) (time.Time, time.Time, error) { + blocks, err := r.poller.GetBlocksRange(ctx, []uint64{start, end}) + if err != nil { + return time.Time{}, time.Time{}, fmt.Errorf("failed to get block range from poller: %w", err) + } + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].BlockNumber < blocks[j].BlockNumber + }) + if len(blocks) < 2 { + return time.Time{}, time.Time{}, fmt.Errorf("failed to fetch blocks %d, %d from log poller", start, end) + } + return blocks[0].BlockTimestamp, blocks[1].BlockTimestamp, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time_test.go new file mode 100644 index 00000000..4ad3c3a0 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/block_time_test.go @@ -0,0 +1,82 @@ +package logprovider + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + lpmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestBlockTimeResolver_BlockTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + blockSampleSize int64 + latestBlock int64 + latestBlockErr error + blocksRange []logpoller.LogPollerBlock + blocksRangeErr error + blockTime time.Duration + blockTimeErr error + }{ + { + "latest block err", + 10, + 0, + fmt.Errorf("test err"), + nil, + nil, + 0, + fmt.Errorf("test err"), + }, + { + "block range err", + 10, + 20, + nil, + nil, + fmt.Errorf("test err"), + 0, + fmt.Errorf("test err"), + }, + { + "2 sec block time", + 4, + 20, + nil, + []logpoller.LogPollerBlock{ + {BlockTimestamp: now.Add(-time.Second * (2 * 4)), BlockNumber: 16}, + {BlockTimestamp: now, BlockNumber: 20}, + }, + nil, + 2 * time.Second, + nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := testutils.Context(t) + + lp := new(lpmocks.LogPoller) + resolver := newBlockTimeResolver(lp) + + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: tc.latestBlock}, tc.latestBlockErr) + lp.On("GetBlocksRange", mock.Anything, mock.Anything).Return(tc.blocksRange, tc.blocksRangeErr) + + blockTime, err := resolver.BlockTime(ctx, tc.blockSampleSize) + if tc.blockTimeErr != nil { + require.Error(t, err) + return + } + require.Equal(t, tc.blockTime, blockTime) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go new file mode 100644 index 00000000..b8f4f7f4 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go @@ -0,0 +1,384 @@ +package logprovider + +import ( + "encoding/hex" + "math/big" + "sort" + "sync" + "sync/atomic" + + "github.com/goplugin/plugin-automation/pkg/v3/random" + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var ( + // defaultFastExecLogsHigh is the default upper bound / maximum number of logs that Automation is committed to process for each upkeep, + // based on available capacity, i.e. if there are no logs from other upkeeps. + // Used by Log buffer to limit the number of logs we are saving in memory for each upkeep in a block + defaultFastExecLogsHigh = 32 + // defaultNumOfLogUpkeeps is the default number of log upkeeps supported by the registry. + defaultNumOfLogUpkeeps = 50 +) + +// fetchedLog holds the log and the ID of the upkeep +type fetchedLog struct { + upkeepID *big.Int + log logpoller.Log + // cachedLogID is the cached log identifier, used for sorting. + // It is calculated lazily, and cached for performance. + cachedLogID string +} + +func (l *fetchedLog) getLogID() string { + if len(l.cachedLogID) == 0 { + ext := ocr2keepers.LogTriggerExtension{ + Index: uint32(l.log.LogIndex), + } + copy(ext.TxHash[:], l.log.TxHash[:]) + copy(ext.BlockHash[:], l.log.BlockHash[:]) + l.cachedLogID = hex.EncodeToString(ext.LogIdentifier()) + } + return l.cachedLogID +} + +// fetchedBlock holds the logs fetched for a block +type fetchedBlock struct { + blockNumber int64 + // logs is the logs fetched for the block and haven't been visited yet + logs []fetchedLog + // visited is the logs fetched for the block and have been visited. + // We keep them in order to avoid fetching them again. + visited []fetchedLog +} + +func (b *fetchedBlock) Append(lggr logger.Logger, fl fetchedLog, maxBlockLogs, maxUpkeepLogs int) (fetchedLog, bool) { + has, upkeepLogs := b.has(fl.upkeepID, fl.log) + if has { + // Skipping known logs + return fetchedLog{}, false + } + // lggr.Debugw("Adding log", "i", i, "blockBlock", currentBlock.blockNumber, "logBlock", log.BlockNumber, "id", id) + b.logs = append(b.logs, fl) + + // drop logs if we reached limits. + if upkeepLogs+1 > maxUpkeepLogs { + // in case we have logs overflow for a particular upkeep, we drop a log of that upkeep, + // based on shared, random (per block) order of the logs in the block. + b.Sort() + var dropped fetchedLog + currentLogs := make([]fetchedLog, 0, len(b.logs)-1) + for _, l := range b.logs { + if dropped.upkeepID == nil && l.upkeepID.Cmp(fl.upkeepID) == 0 { + dropped = l + continue + } + currentLogs = append(currentLogs, l) + } + b.logs = currentLogs + return dropped, true + } else if len(b.logs)+len(b.visited) > maxBlockLogs { + // in case we have logs overflow in the buffer level, we drop a log based on + // shared, random (per block) order of the logs in the block. + b.Sort() + dropped := b.logs[0] + b.logs = b.logs[1:] + return dropped, true + } + + return fetchedLog{}, true +} + +// Has returns true if the block has the log, +// and the number of logs for that upkeep in the block. +func (b fetchedBlock) has(id *big.Int, log logpoller.Log) (bool, int) { + allLogs := append(b.logs, b.visited...) + upkeepLogs := 0 + for _, l := range allLogs { + if l.upkeepID.Cmp(id) != 0 { + continue + } + upkeepLogs++ + if l.log.BlockHash == log.BlockHash && l.log.TxHash == log.TxHash && l.log.LogIndex == log.LogIndex { + return true, upkeepLogs + } + } + return false, upkeepLogs +} + +func (b fetchedBlock) Clone() fetchedBlock { + logs := make([]fetchedLog, len(b.logs)) + copy(logs, b.logs) + visited := make([]fetchedLog, len(b.visited)) + copy(visited, b.visited) + return fetchedBlock{ + blockNumber: b.blockNumber, + logs: logs, + visited: visited, + } +} + +// Sort by log identifiers, shuffled using a pseduorandom souce that is shared across all nodes +// for a given block. +func (b *fetchedBlock) Sort() { + randSeed := random.GetRandomKeySource(nil, uint64(b.blockNumber)) + + shuffledLogIDs := make(map[string]string, len(b.logs)) + for _, log := range b.logs { + logID := log.getLogID() + shuffledLogIDs[logID] = random.ShuffleString(logID, randSeed) + } + + sort.SliceStable(b.logs, func(i, j int) bool { + return shuffledLogIDs[b.logs[i].getLogID()] < shuffledLogIDs[b.logs[j].getLogID()] + }) +} + +// logEventBuffer is a circular/ring buffer of fetched logs. +// Each entry in the buffer represents a block, +// and holds the logs fetched for that block. +type logEventBuffer struct { + lggr logger.Logger + lock sync.RWMutex + // size is the number of blocks supported by the buffer + size int32 + + numOfLogUpkeeps, fastExecLogsHigh uint32 + // blocks is the circular buffer of fetched blocks + blocks []fetchedBlock + // latestBlock is the latest block number seen + latestBlock int64 +} + +func newLogEventBuffer(lggr logger.Logger, size, numOfLogUpkeeps, fastExecLogsHigh int) *logEventBuffer { + return &logEventBuffer{ + lggr: lggr.Named("KeepersRegistry.LogEventBuffer"), + size: int32(size), + blocks: make([]fetchedBlock, size), + numOfLogUpkeeps: uint32(numOfLogUpkeeps), + fastExecLogsHigh: uint32(fastExecLogsHigh), + } +} + +func (b *logEventBuffer) latestBlockSeen() int64 { + return atomic.LoadInt64(&b.latestBlock) +} + +func (b *logEventBuffer) bufferSize() int { + return int(atomic.LoadInt32(&b.size)) +} + +func (b *logEventBuffer) SetLimits(numOfLogUpkeeps, fastExecLogsHigh int) { + atomic.StoreUint32(&b.numOfLogUpkeeps, uint32(numOfLogUpkeeps)) + atomic.StoreUint32(&b.fastExecLogsHigh, uint32(fastExecLogsHigh)) +} + +// enqueue adds logs (if not exist) to the buffer, returning the number of logs added +// minus the number of logs dropped. +func (b *logEventBuffer) enqueue(id *big.Int, logs ...logpoller.Log) int { + b.lock.Lock() + defer b.lock.Unlock() + + lggr := b.lggr.With("id", id.String()) + + maxBlockLogs := int(atomic.LoadUint32(&b.fastExecLogsHigh) * atomic.LoadUint32(&b.numOfLogUpkeeps)) + maxUpkeepLogs := int(atomic.LoadUint32(&b.fastExecLogsHigh)) + + latestBlock := b.latestBlockSeen() + added, dropped := 0, 0 + + for _, log := range logs { + if log.BlockNumber == 0 { + // invalid log + continue + } + i := b.blockNumberIndex(log.BlockNumber) + currentBlock := b.blocks[i] + if currentBlock.blockNumber < log.BlockNumber { + lggr.Debugw("Got log on a new block", "prevBlock", currentBlock.blockNumber, "newBlock", log.BlockNumber) + currentBlock.blockNumber = log.BlockNumber + currentBlock.logs = nil + currentBlock.visited = nil + } else if currentBlock.blockNumber > log.BlockNumber { + // not expected to happen + lggr.Debugw("Skipping log from old block", "currentBlock", currentBlock.blockNumber, "newBlock", log.BlockNumber) + continue + } + droppedLog, ok := currentBlock.Append(lggr, fetchedLog{upkeepID: id, log: log}, maxBlockLogs, maxUpkeepLogs) + if !ok { + // Skipping known logs + continue + } + if droppedLog.upkeepID != nil { + dropped++ + lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", droppedLog.log.BlockNumber, + "blockHash", droppedLog.log.BlockHash, "txHash", droppedLog.log.TxHash, "logIndex", droppedLog.log.LogIndex, + "upkeepID", droppedLog.upkeepID.String()) + } + added++ + b.blocks[i] = currentBlock + + if log.BlockNumber > latestBlock { + latestBlock = log.BlockNumber + } + } + + if latestBlock > b.latestBlockSeen() { + atomic.StoreInt64(&b.latestBlock, latestBlock) + } + if added > 0 { + lggr.Debugw("Added logs to buffer", "addedLogs", added, "dropped", dropped, "latestBlock", latestBlock) + } + + return added - dropped +} + +// peek returns the logs in range [latestBlock-blocks, latestBlock] +func (b *logEventBuffer) peek(blocks int) []fetchedLog { + latestBlock := b.latestBlockSeen() + if latestBlock == 0 { + return nil + } + if blocks > int(latestBlock) { + blocks = int(latestBlock) - 1 + } + + return b.peekRange(latestBlock-int64(blocks), latestBlock) +} + +// peekRange returns the logs between start and end inclusive. +func (b *logEventBuffer) peekRange(start, end int64) []fetchedLog { + b.lock.RLock() + defer b.lock.RUnlock() + + blocksInRange := b.getBlocksInRange(int(start), int(end)) + + var results []fetchedLog + for _, block := range blocksInRange { + // double checking that we don't have any gaps in the range + if block.blockNumber < start || block.blockNumber > end { + continue + } + results = append(results, block.logs...) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].log.BlockNumber < results[j].log.BlockNumber + }) + + b.lggr.Debugw("Peeked logs", "results", len(results), "start", start, "end", end) + + return results +} + +// dequeueRange returns the logs between start and end inclusive. +func (b *logEventBuffer) dequeueRange(start, end int64, upkeepLimit, totalLimit int) []fetchedLog { + b.lock.Lock() + defer b.lock.Unlock() + + blocksInRange := b.getBlocksInRange(int(start), int(end)) + fetchedBlocks := make([]fetchedBlock, 0, len(blocksInRange)) + for _, block := range blocksInRange { + // Create clone of the blocks as they get processed and update underlying b.blocks + fetchedBlocks = append(fetchedBlocks, block.Clone()) + } + + // Sort the blocks in reverse order of block number so that latest logs + // are preferred while dequeueing. + sort.SliceStable(fetchedBlocks, func(i, j int) bool { + return fetchedBlocks[i].blockNumber > fetchedBlocks[j].blockNumber + }) + + logsCount := map[string]int{} + totalCount := 0 + var results []fetchedLog + for _, block := range fetchedBlocks { + if block.blockNumber < start || block.blockNumber > end { + // double checking that we don't have any gaps in the range + continue + } + if totalCount >= totalLimit { + // reached total limit, no need to process more blocks + break + } + // Sort the logs in random order that is shared across all nodes. + // This ensures that nodes across the network will process the same logs. + block.Sort() + var remainingLogs, blockResults []fetchedLog + for _, log := range block.logs { + if totalCount >= totalLimit { + remainingLogs = append(remainingLogs, log) + continue + } + if logsCount[log.upkeepID.String()] >= upkeepLimit { + remainingLogs = append(remainingLogs, log) + continue + } + blockResults = append(blockResults, log) + logsCount[log.upkeepID.String()]++ + totalCount++ + } + if len(blockResults) == 0 { + continue + } + results = append(results, blockResults...) + block.visited = append(block.visited, blockResults...) + block.logs = remainingLogs + b.blocks[b.blockNumberIndex(block.blockNumber)] = block + } + + if len(results) > 0 { + b.lggr.Debugw("Dequeued logs", "results", len(results), "start", start, "end", end) + } + + return results +} + +// getBlocksInRange returns the blocks between start and end. +// NOTE: this function should be called with the lock held +func (b *logEventBuffer) getBlocksInRange(start, end int) []fetchedBlock { + var blocksInRange []fetchedBlock + start, end = b.blockRangeToIndices(start, end) + if start == -1 || end == -1 { + // invalid range + return blocksInRange + } + if start <= end { + // Normal range, need to return indices from start to end(inclusive) + return b.blocks[start : end+1] + } + // in case we get circular range such as [0, 1, end, ... , start, ..., size-1] + // we need to return the blocks in two ranges: [0, end](inclusive) and [start, size-1] + blocksInRange = append(blocksInRange, b.blocks[start:]...) + blocksInRange = append(blocksInRange, b.blocks[:end+1]...) + + return blocksInRange +} + +// blockRangeToIndices returns the normalized range of start to end block range, +// to indices aligned with buffer size. Note ranges inclusive of start, end indices. +func (b *logEventBuffer) blockRangeToIndices(start, end int) (int, int) { + latest := b.latestBlockSeen() + if end > int(latest) { + // Limit end of range to latest block seen + end = int(latest) + } + if end < start || start == 0 || end == 0 { + // invalid range + return -1, -1 + } + size := b.bufferSize() + if end-start >= size { + // If range requires more than buffer size blocks, only to return + // last size blocks as that's the max the buffer stores. + start = (end - size) + 1 + } + return b.blockNumberIndex(int64(start)), b.blockNumberIndex(int64(end)) +} + +// blockNumberIndex returns the index of the block in the buffer +func (b *logEventBuffer) blockNumberIndex(bn int64) int { + return int(bn-1) % b.bufferSize() +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go new file mode 100644 index 00000000..f99967eb --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go @@ -0,0 +1,892 @@ +package logprovider + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestLogEventBuffer_GetBlocksInRange(t *testing.T) { + size := 3 + maxSeenBlock := int64(4) + buf := newLogEventBuffer(logger.TestLogger(t), size, 10, 10) + + buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + ) + + buf.enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 2}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 2}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + + tests := []struct { + name string + from int64 + to int64 + want int + }{ + { + name: "all", + from: 2, + to: 4, + want: 3, + }, + { + name: "partial", + from: 2, + to: 3, + want: 2, + }, + { + name: "circular", + from: 3, + to: 4, + want: 2, + }, + { + name: "zero start", + from: 0, + to: 2, + }, + { + name: "invalid zero end", + from: 0, + to: 0, + }, + { + name: "invalid from larger than to", + from: 4, + to: 2, + }, + { + name: "outside max last seen", + from: 5, + to: 10, + }, + { + name: "limited by max last seen", + from: 2, + to: 5, + want: 3, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + blocks := buf.getBlocksInRange(int(tc.from), int(tc.to)) + require.Equal(t, tc.want, len(blocks)) + if tc.want > 0 { + from := tc.from + require.Equal(t, from, blocks[0].blockNumber) + to := tc.to + if to >= maxSeenBlock { + to = maxSeenBlock + } + require.Equal(t, to, blocks[len(blocks)-1].blockNumber) + } + }) + } +} + +func TestLogEventBuffer_GetBlocksInRange_Circular(t *testing.T) { + size := 4 + buf := newLogEventBuffer(logger.TestLogger(t), size, 10, 10) + + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + ), 3) + + require.Equal(t, buf.enqueue(big.NewInt(2), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 2}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 2}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ), 3) + + require.Equal(t, buf.enqueue(big.NewInt(3), + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 4}, + logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x3"), LogIndex: 2}, + logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x3"), LogIndex: 5}, + ), 3) + + tests := []struct { + name string + from int64 + to int64 + expectedBlocks []int64 + }{ + { + name: "happy flow", + from: 2, + to: 5, + expectedBlocks: []int64{2, 3, 4, 5}, + }, + { + name: "range overflow circular", + from: 1, + to: 6, + expectedBlocks: []int64{2, 3, 4, 5}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + blocks := buf.getBlocksInRange(int(tc.from), int(tc.to)) + require.Equal(t, len(tc.expectedBlocks), len(blocks)) + expectedBlockNumbers := map[int64]bool{} + for _, b := range tc.expectedBlocks { + expectedBlockNumbers[b] = false + } + for _, b := range blocks { + expectedBlockNumbers[b.blockNumber] = true + } + for k, v := range expectedBlockNumbers { + require.True(t, v, "missing block %d", k) + } + }) + } +} + +func TestLogEventBuffer_EnqueueDequeue(t *testing.T) { + t.Run("dequeue empty", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + results := buf.peekRange(int64(1), int64(2)) + require.Equal(t, 0, len(results)) + results = buf.peek(2) + require.Equal(t, 0, len(results)) + }) + + t.Run("enqueue", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ) + buf.lock.Lock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + buf.lock.Unlock() + }) + + t.Run("enqueue logs overflow", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 2, 2, 2) + + require.Equal(t, 2, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + )) + buf.lock.Lock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + buf.lock.Unlock() + }) + + t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) + + require.Equal(t, 2, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + )) + buf.SetLimits(10, 3) + require.Equal(t, 3, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, + )) + + buf.lock.Lock() + defer buf.lock.Unlock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + require.Equal(t, 3, len(buf.blocks[1].logs)) + }) + + t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) + + require.Equal(t, 2, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, + )) + buf.SetLimits(10, 3) + require.Equal(t, 3, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, + )) + + buf.lock.Lock() + defer buf.lock.Unlock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + }) + + t.Run("enqueue block overflow", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 2, 10) + + require.Equal(t, 5, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, + )) + buf.lock.Lock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + buf.lock.Unlock() + }) + + t.Run("enqueue upkeep block overflow", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 10, 10, 2) + + require.Equal(t, 2, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, + )) + buf.lock.Lock() + require.Equal(t, 2, len(buf.blocks[0].logs)) + buf.lock.Unlock() + }) + + t.Run("peek range after dequeue", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + require.Equal(t, buf.enqueue(big.NewInt(10), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + ), 2) + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ), 2) + results := buf.peekRange(int64(1), int64(2)) + require.Equal(t, 2, len(results)) + verifyBlockNumbers(t, results, 1, 2) + removed := buf.dequeueRange(int64(1), int64(2), 2, 10) + require.Equal(t, 2, len(removed)) + results = buf.peekRange(int64(1), int64(2)) + require.Equal(t, 0, len(results)) + }) + + t.Run("enqueue peek and dequeue", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 4, 10, 10) + + require.Equal(t, buf.enqueue(big.NewInt(10), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + ), 2) + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, + ), 2) + results := buf.peek(8) + require.Equal(t, 4, len(results)) + verifyBlockNumbers(t, results, 1, 2, 3, 3) + removed := buf.dequeueRange(1, 3, 5, 5) + require.Equal(t, 4, len(removed)) + buf.lock.Lock() + require.Equal(t, 0, len(buf.blocks[0].logs)) + require.Equal(t, int64(2), buf.blocks[1].blockNumber) + require.Equal(t, 1, len(buf.blocks[1].visited)) + buf.lock.Unlock() + }) + + t.Run("enqueue and peek range circular", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) + + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + ), 3) + require.Equal(t, buf.enqueue(big.NewInt(10), + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + ), 2) + + results := buf.peekRange(int64(1), int64(1)) + require.Equal(t, 0, len(results)) + + results = buf.peekRange(int64(3), int64(5)) + require.Equal(t, 3, len(results)) + verifyBlockNumbers(t, results, 3, 4, 4) + }) + + t.Run("doesnt enqueue old blocks", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + + require.Equal(t, buf.enqueue(big.NewInt(10), + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, + ), 2) + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + ), 2) + results := buf.peekRange(int64(1), int64(5)) + fmt.Println(results) + verifyBlockNumbers(t, results, 2, 3, 4, 4) + }) + + t.Run("dequeue with limits returns latest block logs", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, + logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 0}, + ), 5) + + logs := buf.dequeueRange(1, 5, 2, 10) + require.Equal(t, 2, len(logs)) + require.Equal(t, int64(5), logs[0].log.BlockNumber) + require.Equal(t, int64(4), logs[1].log.BlockNumber) + + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, + logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 1}, + ), 2) + + logs = buf.dequeueRange(1, 5, 3, 2) + require.Equal(t, 2, len(logs)) + }) + + t.Run("dequeue doesn't return same logs again", func(t *testing.T) { + buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) + require.Equal(t, buf.enqueue(big.NewInt(1), + logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, + logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, + logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, + ), 3) + + logs := buf.dequeueRange(3, 3, 2, 10) + fmt.Println(logs) + require.Equal(t, 1, len(logs)) + + logs = buf.dequeueRange(3, 3, 2, 10) + fmt.Println(logs) + require.Equal(t, 0, len(logs)) + }) +} + +func TestLogEventBuffer_FetchedBlock_Append(t *testing.T) { + type appendArgs struct { + fl fetchedLog + maxBlockLogs, maxUpkeepLogs int + added, dropped bool + } + + tests := []struct { + name string + blockNumber int64 + logs []fetchedLog + visited []fetchedLog + toAdd []appendArgs + expected []fetchedLog + added bool + }{ + { + name: "empty block", + blockNumber: 1, + logs: []fetchedLog{}, + visited: []fetchedLog{}, + toAdd: []appendArgs{ + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: true, + }, + }, + expected: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + }, + { + name: "existing log", + blockNumber: 1, + logs: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + visited: []fetchedLog{}, + toAdd: []appendArgs{ + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: false, + }, + }, + expected: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + }, + { + name: "visited log", + blockNumber: 1, + logs: []fetchedLog{}, + visited: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + toAdd: []appendArgs{ + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: false, + }, + }, + expected: []fetchedLog{}, + }, + { + name: "upkeep log limits", + blockNumber: 1, + logs: []fetchedLog{}, + visited: []fetchedLog{}, + toAdd: []appendArgs{ + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: true, + }, + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: true, + }, + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 10, + maxUpkeepLogs: 2, + added: true, + dropped: true, + }, + }, + expected: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + }, + { + name: "block log limits", + blockNumber: 1, + logs: []fetchedLog{}, + visited: []fetchedLog{}, + toAdd: []appendArgs{ + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 2, + maxUpkeepLogs: 4, + added: true, + }, + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 2, + maxUpkeepLogs: 4, + added: true, + }, + { + fl: fetchedLog{ + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + maxBlockLogs: 2, + maxUpkeepLogs: 4, + added: true, + dropped: true, + }, + }, + expected: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lggr := logger.TestLogger(t) + b := fetchedBlock{ + blockNumber: tc.blockNumber, + logs: make([]fetchedLog, len(tc.logs)), + visited: make([]fetchedLog, len(tc.visited)), + } + copy(b.logs, tc.logs) + copy(b.visited, tc.visited) + + for _, args := range tc.toAdd { + dropped, added := b.Append(lggr, args.fl, args.maxBlockLogs, args.maxUpkeepLogs) + require.Equal(t, args.added, added) + if args.dropped { + require.NotNil(t, dropped.upkeepID) + } else { + require.Nil(t, dropped.upkeepID) + } + } + // clear cached logIDs + for i := range b.logs { + b.logs[i].cachedLogID = "" + } + require.Equal(t, tc.expected, b.logs) + }) + } +} +func TestLogEventBuffer_FetchedBlock_Sort(t *testing.T) { + tests := []struct { + name string + blockNumber int64 + logs []fetchedLog + beforeSort []string + afterSort []string + iterations int + }{ + { + name: "no logs", + blockNumber: 10, + logs: []fetchedLog{}, + beforeSort: []string{}, + afterSort: []string{}, + }, + { + name: "single log", + blockNumber: 1, + logs: []fetchedLog{ + { + log: logpoller.Log{ + BlockHash: common.HexToHash("0x111"), + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + }, + }, + beforeSort: []string{ + "0000000000000000000000000000000000000000000000000000000000000111000000000000000000000000000000000000000000000000000000000000000100000000", + }, + afterSort: []string{ + "0000000000000000000000000000000000000000000000000000000000000111000000000000000000000000000000000000000000000000000000000000000100000000", + }, + }, + { + name: "multiple logs with 10 iterations", + blockNumber: 1, + logs: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xb711bd1103927611ee41152aa8ae27f3330"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "222").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 4, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 3, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "222").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 5, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 3, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), + TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), + LogIndex: 1, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + beforeSort: []string{ + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000b711bd1103927611ee41152aa8ae27f333000000000", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000000", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000004", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000002", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000005", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000001", + }, + afterSort: []string{ + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000b711bd1103927611ee41152aa8ae27f333000000000", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000000", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000001", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000002", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000004", + "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000005", + }, + iterations: 10, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + b := fetchedBlock{ + blockNumber: tc.blockNumber, + logs: make([]fetchedLog, len(tc.logs)), + } + if tc.iterations == 0 { + tc.iterations = 1 + } + // performing the same multiple times should yield the same result + // default is one iteration + for i := 0; i < tc.iterations; i++ { + copy(b.logs, tc.logs) + logIDs := getLogIds(b) + require.Equal(t, len(tc.beforeSort), len(logIDs)) + require.Equal(t, tc.beforeSort, logIDs) + b.Sort() + logIDsAfterSort := getLogIds(b) + require.Equal(t, len(tc.afterSort), len(logIDsAfterSort)) + require.Equal(t, tc.afterSort, logIDsAfterSort) + } + }) + } +} + +func TestLogEventBuffer_FetchedBlock_Clone(t *testing.T) { + b1 := fetchedBlock{ + blockNumber: 1, + logs: []fetchedLog{ + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 0, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + { + log: logpoller.Log{ + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + LogIndex: 2, + }, + upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), + }, + }, + } + + b2 := b1.Clone() + require.Equal(t, b1.blockNumber, b2.blockNumber) + require.Equal(t, len(b1.logs), len(b2.logs)) + require.Equal(t, b1.logs[0].log.BlockNumber, b2.logs[0].log.BlockNumber) + + b1.blockNumber = 2 + b1.logs[0].log.BlockNumber = 2 + require.NotEqual(t, b1.blockNumber, b2.blockNumber) + require.NotEqual(t, b1.logs[0].log.BlockNumber, b2.logs[0].log.BlockNumber) +} + +func verifyBlockNumbers(t *testing.T, logs []fetchedLog, bns ...int64) { + require.Equal(t, len(bns), len(logs), "expected length mismatch") + for i, log := range logs { + require.Equal(t, bns[i], log.log.BlockNumber, "wrong block number") + } +} + +func getLogIds(b fetchedBlock) []string { + logIDs := make([]string, len(b.logs)) + for i, l := range b.logs { + ext := ocr2keepers.LogTriggerExtension{ + TxHash: l.log.TxHash, + Index: uint32(l.log.LogIndex), + BlockHash: l.log.BlockHash, + } + logIDs[i] = hex.EncodeToString(ext.LogIdentifier()) + } + return logIDs +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go new file mode 100644 index 00000000..40bf3b60 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -0,0 +1,70 @@ +package logprovider + +import ( + "time" + + "golang.org/x/time/rate" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +// New creates a new log event provider and recoverer. +// using default values for the options. +func New(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore core.UpkeepStateReader, finalityDepth uint32) (LogEventProvider, LogRecoverer) { + filterStore := NewUpkeepFilterStore() + packer := NewLogEventsPacker() + opts := NewOptions(int64(finalityDepth)) + provider := NewLogProvider(lggr, poller, packer, filterStore, opts) + recoverer := NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, opts) + + return provider, recoverer +} + +// LogTriggersOptions holds the options for the log trigger components. +type LogTriggersOptions struct { + // LookbackBlocks is the number of blocks the provider will look back for logs. + // The recoverer will scan for logs up to this depth. + // NOTE: MUST be set to a greater-or-equal to the chain's finality depth. + LookbackBlocks int64 + // ReadInterval is the interval to fetch logs in the background. + ReadInterval time.Duration + // BlockRateLimit is the rate limit on the range of blocks the we fetch logs for. + BlockRateLimit rate.Limit + // blockLimitBurst is the burst upper limit on the range of blocks the we fetch logs for. + BlockLimitBurst int + // Finality depth is the number of blocks to wait before considering a block final. + FinalityDepth int64 +} + +func NewOptions(finalityDepth int64) LogTriggersOptions { + opts := new(LogTriggersOptions) + opts.Defaults(finalityDepth) + return *opts +} + +// Defaults sets the default values for the options. +// NOTE: o.LookbackBlocks should be set only from within tests +func (o *LogTriggersOptions) Defaults(finalityDepth int64) { + if o.LookbackBlocks == 0 { + lookbackBlocks := int64(200) + if lookbackBlocks < finalityDepth { + lookbackBlocks = finalityDepth + } + o.LookbackBlocks = lookbackBlocks + } + if o.ReadInterval == 0 { + o.ReadInterval = time.Second + } + if o.BlockLimitBurst == 0 { + o.BlockLimitBurst = int(o.LookbackBlocks) + } + if o.BlockRateLimit == 0 { + o.BlockRateLimit = rate.Every(o.ReadInterval) + } + if o.FinalityDepth == 0 { + o.FinalityDepth = finalityDepth + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go new file mode 100644 index 00000000..9eed3fff --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter.go @@ -0,0 +1,86 @@ +package logprovider + +import ( + "bytes" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/time/rate" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" +) + +type upkeepFilter struct { + addr []byte + // selector is the filter selector in log trigger config + selector uint8 + topics []common.Hash + upkeepID *big.Int + // configUpdateBlock is the block number the filter was last updated at + configUpdateBlock uint64 + // lastPollBlock is the last block number the logs were fetched for this upkeep + // used by log event provider. + lastPollBlock int64 + // blockLimiter is used to limit the number of blocks to fetch logs for an upkeep. + // used by log event provider. + blockLimiter *rate.Limiter + // lastRePollBlock is the last block number the logs were recovered for this upkeep + // used by log recoverer. + lastRePollBlock int64 +} + +func (f upkeepFilter) Clone() upkeepFilter { + topics := make([]common.Hash, len(f.topics)) + copy(topics, f.topics) + addr := make([]byte, len(f.addr)) + copy(addr, f.addr) + return upkeepFilter{ + upkeepID: f.upkeepID, + selector: f.selector, + topics: topics, + addr: addr, + configUpdateBlock: f.configUpdateBlock, + lastPollBlock: f.lastPollBlock, + lastRePollBlock: f.lastRePollBlock, + blockLimiter: f.blockLimiter, + } +} + +// Select returns a slice of logs which match the upkeep filter. +func (f upkeepFilter) Select(logs ...logpoller.Log) []logpoller.Log { + var selected []logpoller.Log + for _, log := range logs { + if f.match(log) { + selected = append(selected, log) + } + } + return selected +} + +// match returns a bool indicating if the log's topics data matches selector and indexed topics in upkeep filter. +func (f upkeepFilter) match(log logpoller.Log) bool { + filters := f.topics[1:] + selector := f.selector + + if selector == 0 { + // no filters + return true + } + + for i, filter := range filters { + // bitwise AND the selector with the index to check + // if the filter is needed + mask := uint8(1 << uint8(i)) + if selector&mask == uint8(0) { + continue + } + if len(log.Topics) <= i+1 { + // log doesn't have enough topics + return false + } + if !bytes.Equal(filter.Bytes(), log.Topics[i+1]) { + return false + } + } + return true +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store.go new file mode 100644 index 00000000..07173ac7 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store.go @@ -0,0 +1,158 @@ +package logprovider + +import ( + "math/big" + "sync" +) + +type UpkeepFilterStore interface { + GetIDs(selector func(upkeepFilter) bool) []*big.Int + UpdateFilters(updater func(upkeepFilter, upkeepFilter) upkeepFilter, filters ...upkeepFilter) + Has(id *big.Int) bool + Get(id *big.Int) *upkeepFilter + RangeFiltersByIDs(iterator func(int, upkeepFilter), ids ...*big.Int) + GetFilters(selector func(upkeepFilter) bool) []upkeepFilter + AddActiveUpkeeps(filters ...upkeepFilter) + RemoveActiveUpkeeps(filters ...upkeepFilter) + Size() int +} + +var _ UpkeepFilterStore = &upkeepFilterStore{} + +type upkeepFilterStore struct { + lock *sync.RWMutex + // filters is a map of upkeepID to upkeepFilter + filters map[string]upkeepFilter +} + +func NewUpkeepFilterStore() *upkeepFilterStore { + return &upkeepFilterStore{ + lock: &sync.RWMutex{}, + filters: make(map[string]upkeepFilter), + } +} + +func (s *upkeepFilterStore) GetIDs(selector func(upkeepFilter) bool) []*big.Int { + s.lock.RLock() + defer s.lock.RUnlock() + + if selector == nil { + // noop selector returns true for all filters + selector = func(upkeepFilter) bool { return true } + } + + var ids []*big.Int + for _, f := range s.filters { + if selector(f) { + ids = append(ids, f.upkeepID) + } + } + + return ids +} + +func (s *upkeepFilterStore) UpdateFilters(resolveUpdated func(upkeepFilter, upkeepFilter) upkeepFilter, filters ...upkeepFilter) { + s.lock.Lock() + defer s.lock.Unlock() + + if resolveUpdated == nil { + // noop resolveUpdated will use the newer filter + resolveUpdated = func(_ upkeepFilter, f upkeepFilter) upkeepFilter { return f } + } + + for _, f := range filters { + uid := f.upkeepID.String() + orig, ok := s.filters[uid] + if !ok { + // not found, turned inactive probably + continue + } + updated := resolveUpdated(orig, f) + s.filters[uid] = updated + } +} + +func (s *upkeepFilterStore) Has(id *big.Int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + + _, ok := s.filters[id.String()] + return ok +} + +func (s *upkeepFilterStore) Get(id *big.Int) *upkeepFilter { + s.lock.RLock() + defer s.lock.RUnlock() + + f, ok := s.filters[id.String()] + if !ok { + return nil + } + fp := f.Clone() + return &fp +} + +func (s *upkeepFilterStore) RangeFiltersByIDs(iterator func(int, upkeepFilter), ids ...*big.Int) { + s.lock.RLock() + defer s.lock.RUnlock() + + if iterator == nil { + // noop iterator does nothing + iterator = func(int, upkeepFilter) {} + } + + for i, id := range ids { + f, ok := s.filters[id.String()] + if !ok { + // in case the filter is not found, we still want to call the iterator + // with an empty filter, so + iterator(i, upkeepFilter{upkeepID: id}) + } else { + iterator(i, f) + } + } +} + +func (s *upkeepFilterStore) GetFilters(selector func(upkeepFilter) bool) []upkeepFilter { + s.lock.RLock() + defer s.lock.RUnlock() + + if selector == nil { + // noop selector returns true for all filters + selector = func(upkeepFilter) bool { return true } + } + + var filters []upkeepFilter + for _, f := range s.filters { + if selector(f) { + filters = append(filters, f.Clone()) + } + } + return filters +} + +func (s *upkeepFilterStore) AddActiveUpkeeps(filters ...upkeepFilter) { + s.lock.Lock() + defer s.lock.Unlock() + + for _, f := range filters { + s.filters[f.upkeepID.String()] = f + } +} + +func (s *upkeepFilterStore) RemoveActiveUpkeeps(filters ...upkeepFilter) { + s.lock.Lock() + defer s.lock.Unlock() + + for _, f := range filters { + uid := f.upkeepID.String() + delete(s.filters, uid) + } +} + +func (s *upkeepFilterStore) Size() int { + s.lock.RLock() + defer s.lock.RUnlock() + + return len(s.filters) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store_test.go new file mode 100644 index 00000000..b2030a8d --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_store_test.go @@ -0,0 +1,93 @@ +package logprovider + +import ( + "math/big" + "sort" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFilterStore_CRUD(t *testing.T) { + tests := []struct { + name string + initial []upkeepFilter + toAdd []upkeepFilter + expectedPostAdd []upkeepFilter + toRemove []upkeepFilter + expectedPostRemove []upkeepFilter + }{ + { + "empty", + []upkeepFilter{}, + []upkeepFilter{}, + []upkeepFilter{}, + []upkeepFilter{}, + []upkeepFilter{}, + }, + { + "add rm one", + []upkeepFilter{}, + []upkeepFilter{{upkeepID: big.NewInt(1)}}, + []upkeepFilter{{upkeepID: big.NewInt(1)}}, + []upkeepFilter{{upkeepID: big.NewInt(1)}}, + []upkeepFilter{}, + }, + { + "add rm multiple", + []upkeepFilter{}, + []upkeepFilter{{upkeepID: big.NewInt(1)}, {upkeepID: big.NewInt(2)}}, + []upkeepFilter{{upkeepID: big.NewInt(1)}, {upkeepID: big.NewInt(2)}}, + []upkeepFilter{{upkeepID: big.NewInt(1)}}, + []upkeepFilter{{upkeepID: big.NewInt(2)}}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + s := NewUpkeepFilterStore() + s.AddActiveUpkeeps(tc.initial...) + require.Equal(t, len(tc.initial), len(s.GetIDs(nil))) + s.AddActiveUpkeeps(tc.toAdd...) + require.Equal(t, len(tc.expectedPostAdd), s.Size()) + filters := s.GetFilters(func(f upkeepFilter) bool { return true }) + require.Equal(t, len(tc.expectedPostAdd), len(filters)) + if len(filters) > 0 { + sort.Slice(filters, func(i, j int) bool { + return filters[i].upkeepID.Cmp(filters[j].upkeepID) < 0 + }) + for i, f := range filters { + require.Equal(t, tc.expectedPostAdd[i].upkeepID, f.upkeepID) + } + } + s.RemoveActiveUpkeeps(tc.toRemove...) + require.Equal(t, len(tc.expectedPostRemove), len(s.GetIDs(func(upkeepFilter) bool { return true }))) + }) + } +} + +func TestFilterStore_Concurrency(t *testing.T) { + s := NewUpkeepFilterStore() + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + s.AddActiveUpkeeps(upkeepFilter{upkeepID: big.NewInt(1)}) + s.AddActiveUpkeeps(upkeepFilter{upkeepID: big.NewInt(2)}) + }() + wg.Add(1) + go func() { + defer wg.Done() + s.AddActiveUpkeeps(upkeepFilter{upkeepID: big.NewInt(2)}) + }() + + go func() { + _ = s.GetIDs(nil) + }() + + wg.Wait() + + require.Equal(t, 2, s.Size()) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_test.go new file mode 100644 index 00000000..ba606f10 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/filter_test.go @@ -0,0 +1,196 @@ +package logprovider + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" +) + +func TestUpkeepFilter_Select(t *testing.T) { + var zeroBytes [32]byte + emptyTopic := common.BytesToHash(zeroBytes[:]) + contractAddress := common.HexToAddress("0xB9F3af0c2CbfE108efd0E23F7b0a151Ea42f764E") + uid := big.NewInt(123456) + topic10 := "0x000000000000000000000000000000000000000000000000000000000000007b" // decimal 123 encoded + topic20 := "0x0000000000000000000000000000000000000000000000000000000000000001" // bool true encoded + topic30 := "0x00000000000000000000000082b8b466f4be252e56af8a00aa28838866686062" // address encoded + topic11 := "0x000000000000000000000000000000000000000000000000000000000000007a" // decimal 122 encoded + topic21 := "0x0000000000000000000000000000000000000000000000000000000000000000" // bool false encoded + topic31 := "0x000000000000000000000000f91a27d2f37a36f1e6acc681b07b1dd2e288aebc" // address encoded + + log1 := logpoller.Log{ + Topics: [][]byte{ + contractAddress.Bytes(), + hexutil.MustDecode(topic10), + hexutil.MustDecode(topic20), + hexutil.MustDecode(topic30), + }, + } + log2 := logpoller.Log{ + Topics: [][]byte{ + contractAddress.Bytes(), + hexutil.MustDecode(topic11), + hexutil.MustDecode(topic21), + hexutil.MustDecode(topic31), + }, + } + log3 := logpoller.Log{ + Topics: [][]byte{ + contractAddress.Bytes(), + hexutil.MustDecode(topic11), + hexutil.MustDecode(topic20), + hexutil.MustDecode(topic31), + }, + } + log4 := logpoller.Log{ + Topics: [][]byte{ + contractAddress.Bytes(), + hexutil.MustDecode(topic10), + hexutil.MustDecode(topic21), + hexutil.MustDecode(topic31), + }, + } + log5 := logpoller.Log{ + Topics: [][]byte{ + contractAddress.Bytes(), + hexutil.MustDecode(topic10), + hexutil.MustDecode(topic20), + hexutil.MustDecode(topic30), + }, + } + + tests := []struct { + name string + filter upkeepFilter + logs []logpoller.Log + expectedLogs []logpoller.Log + }{ + { + "no selector configured - all logs are returned", + upkeepFilter{ + selector: 0, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), emptyTopic, emptyTopic, emptyTopic}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + }, + []logpoller.Log{ + log1, + log2, + }, + }, + { + "selector is 1 - topics 1 is used to filter logs", + upkeepFilter{ + selector: 1, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), common.HexToHash(topic10), emptyTopic, emptyTopic}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + }, + []logpoller.Log{ + log1, + }, + }, + { + "selector is 2 - topic 2 is used to filter logs", + upkeepFilter{ + selector: 2, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), emptyTopic, common.HexToHash(topic21), emptyTopic}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + }, + []logpoller.Log{ + log2, + }, + }, + { + "selector is 3 - topics 1 2 are used to filter logs", + upkeepFilter{ + selector: 3, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), common.HexToHash(topic10), common.HexToHash(topic21), emptyTopic}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + log3, + log4, + }, + []logpoller.Log{ + log4, + }, + }, + { + "selector is 4 - topic 3 is used to filter logs", + upkeepFilter{ + selector: 4, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), emptyTopic, emptyTopic, common.HexToHash(topic31)}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + }, + []logpoller.Log{ + log2, + }, + }, + { + "selector is 5 - topics 1 3 are used to filter logs", + upkeepFilter{ + selector: 5, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), common.HexToHash(topic11), emptyTopic, common.HexToHash(topic31)}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + log3, + log4, + }, + []logpoller.Log{ + log2, + log3, + }, + }, + { + "selector is 7 - topics 1 2 3 are used to filter logs", + upkeepFilter{ + selector: 7, + topics: []common.Hash{common.BytesToHash(contractAddress.Bytes()), common.HexToHash(topic10), common.HexToHash(topic20), common.HexToHash(topic30)}, + upkeepID: uid, + }, + []logpoller.Log{ + log1, + log2, + log3, + log4, + log5, + }, + []logpoller.Log{ + log1, + log5, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + filteredLogs := tc.filter.Select(tc.logs...) + assert.Equal(t, tc.expectedLogs, filteredLogs) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go new file mode 100644 index 00000000..42d25d4b --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -0,0 +1,718 @@ +package logprovider_test + +import ( + "context" + "errors" + "math/big" + "testing" + "time" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "golang.org/x/time/rate" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + evmregistry21 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestIntegration_LogEventProvider(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] + + db := setupDB(t) + defer db.Close() + + opts := logprovider.NewOptions(200) + opts.ReadInterval = time.Second / 2 + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) + + n := 10 + + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + + ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + lp.PollAndSaveLogs(ctx, int64(n)) + + go func() { + if err := logProvider.Start(ctx); err != nil { + t.Logf("error starting log provider: %s", err) + t.Fail() + } + }() + defer logProvider.Close() + + logsRounds := 10 + + poll := pollFn(ctx, t, lp, ethClient) + + triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) + + poll(backend.Commit()) + + waitLogPoller(ctx, t, backend, lp, ethClient) + + waitLogProvider(ctx, t, logProvider, 3) + + allPayloads := collectPayloads(ctx, t, logProvider, n, 5) + require.GreaterOrEqual(t, len(allPayloads), n, + "failed to get logs after restart") + + t.Run("Restart", func(t *testing.T) { + t.Log("restarting log provider") + // assuming that our service was closed and restarted, + // we should be able to backfill old logs and fetch new ones + filterStore := logprovider.NewUpkeepFilterStore() + logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, logprovider.NewLogEventsPacker(), filterStore, opts) + + poll(backend.Commit()) + go func() { + if err2 := logProvider2.Start(ctx); err2 != nil { + t.Logf("error starting log provider: %s", err2) + t.Fail() + } + }() + defer logProvider2.Close() + + // re-register filters + for i, id := range ids { + err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: newPlainLogTriggerConfig(addrs[i]), + // using block number at which the upkeep was registered, + // before we emitted any logs + UpdateBlock: uint64(n), + }) + require.NoError(t, err) + } + + waitLogProvider(ctx, t, logProvider2, 2) + + t.Log("getting logs after restart") + logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) + require.GreaterOrEqual(t, len(logsAfterRestart), n, + "failed to get logs after restart") + }) +} + +func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] + + db := setupDB(t) + defer db.Close() + + opts := &logprovider.LogTriggersOptions{ + ReadInterval: time.Second / 2, + } + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, opts) + logProvider := provider.(logprovider.LogEventProviderTest) + + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + _, addrs, contracts := deployUpkeepCounter(ctx, t, 1, ethClient, backend, carrol, logProvider) + lp.PollAndSaveLogs(ctx, int64(5)) + require.Equal(t, 1, len(contracts)) + require.Equal(t, 1, len(addrs)) + + t.Run("update filter config", func(t *testing.T) { + upkeepID := evmregistry21.GenUpkeepID(types.LogTrigger, "111") + id := upkeepID.BigInt() + cfg := newPlainLogTriggerConfig(addrs[0]) + b, err := ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + bn := b.Number() + err = logProvider.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: cfg, + UpdateBlock: bn.Uint64(), + }) + require.NoError(t, err) + // old block + err = logProvider.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: cfg, + UpdateBlock: bn.Uint64() - 1, + }) + require.Error(t, err) + // new block + b, err = ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + bn = b.Number() + err = logProvider.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: cfg, + UpdateBlock: bn.Uint64(), + }) + require.NoError(t, err) + }) + + t.Run("register same log filter", func(t *testing.T) { + upkeepID := evmregistry21.GenUpkeepID(types.LogTrigger, "222") + id := upkeepID.BigInt() + cfg := newPlainLogTriggerConfig(addrs[0]) + b, err := ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + bn := b.Number() + err = logProvider.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: cfg, + UpdateBlock: bn.Uint64(), + }) + require.NoError(t, err) + }) +} + +func TestIntegration_LogEventProvider_Backfill(t *testing.T) { + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) + defer cancel() + + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] + + db := setupDB(t) + defer db.Close() + + opts := logprovider.NewOptions(200) + opts.ReadInterval = time.Second / 4 + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) + + n := 10 + + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + + poll := pollFn(ctx, t, lp, ethClient) + + rounds := 8 + for i := 0; i < rounds; i++ { + poll(backend.Commit()) + triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) + poll(backend.Commit()) + } + + waitLogPoller(ctx, t, backend, lp, ethClient) + + // starting the log provider should backfill logs + go func() { + if startErr := logProvider.Start(ctx); startErr != nil { + t.Logf("error starting log provider: %s", startErr) + t.Fail() + } + }() + defer logProvider.Close() + + waitLogProvider(ctx, t, logProvider, 3) + + allPayloads := collectPayloads(ctx, t, logProvider, n, 5) + require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") +} + +func TestIntegration_LogEventProvider_RateLimit(t *testing.T) { + setupTest := func( + t *testing.T, + opts *logprovider.LogTriggersOptions, + ) ( + context.Context, + *backends.SimulatedBackend, + func(blockHash common.Hash), + logprovider.LogEventProviderTest, + []*big.Int, + func(), + ) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + backend, stopMining, accounts := setupBackend(t) + userContractAccount := accounts[2] + db := setupDB(t) + + deferFunc := func() { + cancel() + stopMining() + _ = db.Close() + } + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, opts) + logProvider := provider.(logprovider.LogEventProviderTest) + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + + rounds := 5 + numberOfUserContracts := 10 + poll := pollFn(ctx, t, lp, ethClient) + + // deployUpkeepCounter creates 'n' blocks and 'n' contracts + ids, _, contracts := deployUpkeepCounter( + ctx, + t, + numberOfUserContracts, + ethClient, + backend, + userContractAccount, + logProvider) + + // have log poller save logs for current blocks + lp.PollAndSaveLogs(ctx, int64(numberOfUserContracts)) + + for i := 0; i < rounds; i++ { + triggerEvents( + ctx, + t, + backend, + userContractAccount, + numberOfUserContracts, + poll, + contracts...) + + for dummyBlocks := 0; dummyBlocks < numberOfUserContracts; dummyBlocks++ { + _ = backend.Commit() + } + + poll(backend.Commit()) + } + + { + // total block history at this point should be 566 + var minimumBlockCount int64 = 500 + latestBlock, _ := lp.LatestBlock() + + assert.GreaterOrEqual(t, latestBlock.BlockNumber, minimumBlockCount, "to ensure the integrety of the test, the minimum block count before the test should be %d but got %d", minimumBlockCount, latestBlock) + } + + require.NoError(t, logProvider.ReadLogs(ctx, ids...)) + + return ctx, backend, poll, logProvider, ids, deferFunc + } + + // polling for logs at approximately the same rate as a chain produces + // blocks should not encounter rate limits + t.Run("should allow constant polls within the rate and burst limit", func(t *testing.T) { + ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ + LookbackBlocks: 200, + // BlockRateLimit is set low to ensure the test does not exceed the + // rate limit + BlockRateLimit: rate.Every(50 * time.Millisecond), + // BlockLimitBurst is just set to a non-zero value + BlockLimitBurst: 5, + }) + + defer deferFunc() + + // set the wait time between reads higher than the rate limit + readWait := 50 * time.Millisecond + timer := time.NewTimer(readWait) + + for i := 0; i < 4; i++ { + <-timer.C + + // advance 1 block for every read + poll(backend.Commit()) + + err := logProvider.ReadLogs(ctx, ids...) + if err != nil { + assert.False(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") + } + + timer.Reset(readWait) + } + + poll(backend.Commit()) + + _, err := logProvider.GetLatestPayloads(ctx) + + require.NoError(t, err) + }) + + t.Run("should produce a rate limit error for over burst limit", func(t *testing.T) { + ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ + LookbackBlocks: 200, + // BlockRateLimit is set low to ensure the test does not exceed the + // rate limit + BlockRateLimit: rate.Every(50 * time.Millisecond), + // BlockLimitBurst is just set to a non-zero value + BlockLimitBurst: 5, + }) + + defer deferFunc() + + // set the wait time between reads higher than the rate limit + readWait := 50 * time.Millisecond + timer := time.NewTimer(readWait) + + for i := 0; i < 4; i++ { + <-timer.C + + // advance 4 blocks for every read + for x := 0; x < 4; x++ { + poll(backend.Commit()) + } + + err := logProvider.ReadLogs(ctx, ids...) + if err != nil { + assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") + } + + timer.Reset(readWait) + } + + poll(backend.Commit()) + + _, err := logProvider.GetLatestPayloads(ctx) + + require.NoError(t, err) + }) + + t.Run("should allow polling after lookback number of blocks have passed", func(t *testing.T) { + ctx, backend, poll, logProvider, ids, deferFunc := setupTest(t, &logprovider.LogTriggersOptions{ + // BlockRateLimit is set low to ensure the test does not exceed the + // rate limit + BlockRateLimit: rate.Every(50 * time.Millisecond), + // BlockLimitBurst is set low to ensure the test exceeds the burst limit + BlockLimitBurst: 5, + // LogBlocksLookback is set low to reduce the number of blocks required + // to reset the block limiter to maxBurst + LookbackBlocks: 50, + }) + + defer deferFunc() + + // simulate a burst in unpolled blocks + for i := 0; i < 20; i++ { + _ = backend.Commit() + } + + poll(backend.Commit()) + + // all entries should error at this point because there are too many + // blocks to processes + err := logProvider.ReadLogs(ctx, ids...) + if err != nil { + assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") + } + + // progress the chain by the same number of blocks as the lookback limit + // to trigger the usage of maxBurst + for i := 0; i < 50; i++ { + _ = backend.Commit() + } + + poll(backend.Commit()) + + // all entries should reset to the maxBurst because they are beyond + // the log lookback + err = logProvider.ReadLogs(ctx, ids...) + if err != nil { + assert.True(t, errors.Is(err, logprovider.ErrBlockLimitExceeded), "error should not contain block limit exceeded") + } + + poll(backend.Commit()) + + _, err = logProvider.GetLatestPayloads(ctx) + + require.NoError(t, err) + }) +} + +func TestIntegration_LogRecoverer_Backfill(t *testing.T) { + ctx := testutils.Context(t) + + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] + + db := setupDB(t) + defer db.Close() + + lookbackBlocks := int64(200) + opts := &logprovider.LogTriggersOptions{ + ReadInterval: time.Second / 4, + LookbackBlocks: lookbackBlocks, + } + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + origDefaultRecoveryInterval := logprovider.RecoveryInterval + logprovider.RecoveryInterval = time.Millisecond * 200 + defer func() { + logprovider.RecoveryInterval = origDefaultRecoveryInterval + }() + provider, recoverer := setup(logger.TestLogger(t), lp, nil, &mockUpkeepStateStore{}, filterStore, opts) + logProvider := provider.(logprovider.LogEventProviderTest) + + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + + n := 10 + _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + + poll := pollFn(ctx, t, lp, ethClient) + + rounds := 8 + for i := 0; i < rounds; i++ { + triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) + poll(backend.Commit()) + } + poll(backend.Commit()) + + waitLogPoller(ctx, t, backend, lp, ethClient) + + // create dummy blocks + var blockNumber int64 + for blockNumber < lookbackBlocks*4 { + b, err := ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + bn := b.Number() + blockNumber = bn.Int64() + } + // starting the log recoverer should backfill logs + go func() { + if startErr := recoverer.Start(ctx); startErr != nil { + t.Logf("error starting log provider: %s", startErr) + t.Fail() + } + }() + defer recoverer.Close() + + var allProposals []ocr2keepers.UpkeepPayload + for { + poll(backend.Commit()) + proposals, err := recoverer.GetRecoveryProposals(ctx) + require.NoError(t, err) + allProposals = append(allProposals, proposals...) + if len(allProposals) >= n { + break // success + } + select { + case <-ctx.Done(): + t.Fatalf("could not recover logs before timeout: %s", ctx.Err()) + case <-time.After(100 * time.Millisecond): + } + } +} + +func collectPayloads(ctx context.Context, t *testing.T, logProvider logprovider.LogEventProvider, n, rounds int) []ocr2keepers.UpkeepPayload { + allPayloads := make([]ocr2keepers.UpkeepPayload, 0) + for ctx.Err() == nil && len(allPayloads) < n && rounds > 0 { + logs, err := logProvider.GetLatestPayloads(ctx) + require.NoError(t, err) + require.LessOrEqual(t, len(logs), logprovider.AllowedLogsPerUpkeep, "failed to get all logs") + allPayloads = append(allPayloads, logs...) + rounds-- + } + return allPayloads +} + +// waitLogProvider waits until the provider reaches the given partition +func waitLogProvider(ctx context.Context, t *testing.T, logProvider logprovider.LogEventProviderTest, partition int) { + t.Logf("waiting for log provider to reach partition %d", partition) + for ctx.Err() == nil { + currentPartition := logProvider.CurrentPartitionIdx() + if currentPartition > uint64(partition) { // make sure we went over all items + break + } + time.Sleep(100 * time.Millisecond) + } +} + +// waitLogPoller waits until the log poller is familiar with the given block +func waitLogPoller(ctx context.Context, t *testing.T, backend *backends.SimulatedBackend, lp logpoller.LogPollerTest, ethClient *evmclient.SimulatedBackendClient) { + t.Log("waiting for log poller to get updated") + // let the log poller work + b, err := ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + latestBlock := b.Number().Int64() + for { + latestPolled, lberr := lp.LatestBlock(pg.WithParentCtx(ctx)) + require.NoError(t, lberr) + if latestPolled.BlockNumber >= latestBlock { + break + } + lp.PollAndSaveLogs(ctx, latestBlock) + } +} + +func pollFn(ctx context.Context, t *testing.T, lp logpoller.LogPollerTest, ethClient *evmclient.SimulatedBackendClient) func(blockHash common.Hash) { + return func(blockHash common.Hash) { + b, err := ethClient.BlockByHash(ctx, blockHash) + require.NoError(t, err) + bn := b.Number() + lp.PollAndSaveLogs(ctx, bn.Int64()) + } +} + +func triggerEvents( + ctx context.Context, + t *testing.T, + backend *backends.SimulatedBackend, + account *bind.TransactOpts, + rounds int, + poll func(blockHash common.Hash), + contracts ...*log_upkeep_counter_wrapper.LogUpkeepCounter, +) { + lctx, cancel := context.WithCancel(ctx) + defer cancel() + + var blockHash common.Hash + for rounds > 0 && lctx.Err() == nil { + rounds-- + for _, upkeepContract := range contracts { + if lctx.Err() != nil { + return + } + _, err := upkeepContract.Start(account) + require.NoError(t, err) + blockHash = backend.Commit() + } + poll(blockHash) + } +} + +func deployUpkeepCounter( + ctx context.Context, + t *testing.T, + n int, + ethClient *evmclient.SimulatedBackendClient, + backend *backends.SimulatedBackend, + account *bind.TransactOpts, + logProvider logprovider.LogEventProvider, +) ( + ids []*big.Int, + contractsAddrs []common.Address, + contracts []*log_upkeep_counter_wrapper.LogUpkeepCounter, +) { + for i := 0; i < n; i++ { + upkeepAddr, _, upkeepContract, err := log_upkeep_counter_wrapper.DeployLogUpkeepCounter( + account, backend, + big.NewInt(100000), + ) + require.NoError(t, err) + backend.Commit() + + contracts = append(contracts, upkeepContract) + contractsAddrs = append(contractsAddrs, upkeepAddr) + + // creating some dummy upkeepID to register filter + upkeepID := ocr2keepers.UpkeepIdentifier(append(common.LeftPadBytes([]byte{1}, 16), upkeepAddr[:16]...)) + id := upkeepID.BigInt() + ids = append(ids, id) + b, err := ethClient.BlockByHash(ctx, backend.Commit()) + require.NoError(t, err) + bn := b.Number() + err = logProvider.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: newPlainLogTriggerConfig(upkeepAddr), + UpdateBlock: bn.Uint64(), + }) + require.NoError(t, err) + } + return +} + +func newPlainLogTriggerConfig(upkeepAddr common.Address) logprovider.LogTriggerConfig { + return logprovider.LogTriggerConfig{ + ContractAddress: upkeepAddr, + FilterSelector: 0, + Topic0: common.HexToHash("0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d"), + } +} + +func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBackend) (logpoller.LogPollerTest, *evmclient.SimulatedBackendClient) { + ethClient := evmclient.NewSimulatedBackendClient(t, backend, big.NewInt(1337)) + pollerLggr := logger.TestLogger(t) + pollerLggr.SetLogLevel(zapcore.WarnLevel) + lorm := logpoller.NewORM(big.NewInt(1337), db, pollerLggr, pgtest.NewQConfig(false)) + lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, false, 1, 2, 2, 1000) + return lp, ethClient +} + +func setup(lggr logger.Logger, poller logpoller.LogPoller, c client.Client, stateStore evmregistry21.UpkeepStateReader, filterStore logprovider.UpkeepFilterStore, opts *logprovider.LogTriggersOptions) (logprovider.LogEventProvider, logprovider.LogRecoverer) { + packer := logprovider.NewLogEventsPacker() + if opts == nil { + o := logprovider.NewOptions(200) + opts = &o + } + provider := logprovider.NewLogProvider(lggr, poller, packer, filterStore, *opts) + recoverer := logprovider.NewLogRecoverer(lggr, poller, c, stateStore, packer, filterStore, *opts) + + return provider, recoverer +} + +func setupBackend(t *testing.T) (*backends.SimulatedBackend, func(), []*bind.TransactOpts) { + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000000000000000000).ToInt()}, + steve.From: {Balance: assets.Ether(1000000000000000000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000000000000000000).ToInt()}, + } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + return backend, stopMining, []*bind.TransactOpts{sergey, steve, carrol} +} + +func ptr[T any](v T) *T { return &v } + +func setupDB(t *testing.T) *sqlx.DB { + _, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = ptr(true) + + c.OCR.Enabled = ptr(false) + c.OCR2.Enabled = ptr(true) + + c.EVM[0].Transactions.ForwardersEnabled = ptr(true) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }) + return db +} + +type mockUpkeepStateStore struct { +} + +func (m *mockUpkeepStateStore) SelectByWorkIDs(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + states := make([]ocr2keepers.UpkeepState, len(workIDs)) + for i := range workIDs { + states[i] = ocr2keepers.UnknownState + } + return states, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_packer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_packer.go new file mode 100644 index 00000000..f02ecfc7 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/log_packer.go @@ -0,0 +1,44 @@ +package logprovider + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +type LogDataPacker interface { + PackLogData(log logpoller.Log) ([]byte, error) +} + +type logEventsPacker struct { + abi abi.ABI +} + +func NewLogEventsPacker() *logEventsPacker { + return &logEventsPacker{abi: core.UtilsABI} +} + +func (p *logEventsPacker) PackLogData(log logpoller.Log) ([]byte, error) { + var topics [][32]byte + for _, topic := range log.GetTopics() { + topics = append(topics, topic) + } + b, err := p.abi.Methods["_log"].Inputs.Pack(&automation_utils_2_1.Log{ + Index: big.NewInt(log.LogIndex), + Timestamp: big.NewInt(log.BlockTimestamp.Unix()), + TxHash: log.TxHash, + BlockNumber: big.NewInt(log.BlockNumber), + BlockHash: log.BlockHash, + Source: log.Address, + Topics: topics, + Data: log.Data, + }) + if err != nil { + return nil, err + } + return b, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go new file mode 100644 index 00000000..a5c6c824 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -0,0 +1,409 @@ +package logprovider + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "math/big" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + LogProviderServiceName = "LogEventProvider" + + ErrHeadNotAvailable = fmt.Errorf("head not available") + ErrBlockLimitExceeded = fmt.Errorf("block limit exceeded") + + // AllowedLogsPerUpkeep is the maximum number of logs allowed per upkeep every single call. + AllowedLogsPerUpkeep = 5 + // MaxPayloads is the maximum number of payloads to return per call. + MaxPayloads = 100 + + readJobQueueSize = 64 + readLogsTimeout = 10 * time.Second + + readMaxBatchSize = 32 + // reorgBuffer is the number of blocks to add as a buffer to the block range when reading logs. + reorgBuffer = int64(32) + readerThreads = 4 +) + +// LogTriggerConfig is an alias for log trigger config. +type LogTriggerConfig automation_utils_2_1.LogTriggerConfig + +type FilterOptions struct { + UpkeepID *big.Int + TriggerConfig LogTriggerConfig + UpdateBlock uint64 +} + +type LogTriggersLifeCycle interface { + // RegisterFilter registers the filter (if valid) for the given upkeepID. + RegisterFilter(ctx context.Context, opts FilterOptions) error + // UnregisterFilter removes the filter for the given upkeepID. + UnregisterFilter(upkeepID *big.Int) error +} +type LogEventProvider interface { + ocr2keepers.LogEventProvider + LogTriggersLifeCycle + + RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) + + Start(context.Context) error + io.Closer +} + +type LogEventProviderTest interface { + LogEventProvider + ReadLogs(ctx context.Context, ids ...*big.Int) error + CurrentPartitionIdx() uint64 +} + +var _ LogEventProvider = &logEventProvider{} +var _ LogEventProviderTest = &logEventProvider{} + +// logEventProvider manages log filters for upkeeps and enables to read the log events. +type logEventProvider struct { + services.StateMachine + threadCtrl utils.ThreadControl + + lggr logger.Logger + + poller logpoller.LogPoller + + packer LogDataPacker + + lock sync.RWMutex + registerLock sync.Mutex + + filterStore UpkeepFilterStore + buffer *logEventBuffer + + opts LogTriggersOptions + + currentPartitionIdx uint64 +} + +func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logEventProvider { + return &logEventProvider{ + threadCtrl: utils.NewThreadControl(), + lggr: lggr.Named("KeepersRegistry.LogEventProvider"), + packer: packer, + buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), + poller: poller, + opts: opts, + filterStore: filterStore, + } +} + +func (p *logEventProvider) Start(context.Context) error { + return p.StartOnce(LogProviderServiceName, func() error { + + readQ := make(chan []*big.Int, readJobQueueSize) + + p.lggr.Infow("starting log event provider", "readInterval", p.opts.ReadInterval, "readMaxBatchSize", readMaxBatchSize, "readers", readerThreads) + + for i := 0; i < readerThreads; i++ { + p.threadCtrl.Go(func(ctx context.Context) { + p.startReader(ctx, readQ) + }) + } + + p.threadCtrl.Go(func(ctx context.Context) { + lggr := p.lggr.With("where", "scheduler") + + p.scheduleReadJobs(ctx, func(ids []*big.Int) { + select { + case readQ <- ids: + case <-ctx.Done(): + default: + lggr.Warnw("readQ is full, dropping ids", "ids", ids) + } + }) + }) + + return nil + }) +} + +func (p *logEventProvider) Close() error { + return p.StopOnce(LogProviderServiceName, func() error { + p.threadCtrl.Close() + return nil + }) +} + +func (p *logEventProvider) HealthReport() map[string]error { + return map[string]error{LogProviderServiceName: p.Healthy()} +} + +func (p *logEventProvider) GetLatestPayloads(ctx context.Context) ([]ocr2keepers.UpkeepPayload, error) { + latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + start := latest.BlockNumber - p.opts.LookbackBlocks + if start <= 0 { + start = 1 + } + logs := p.buffer.dequeueRange(start, latest.BlockNumber, AllowedLogsPerUpkeep, MaxPayloads) + + // p.lggr.Debugw("got latest logs from buffer", "latest", latest, "diff", diff, "logs", len(logs)) + + var payloads []ocr2keepers.UpkeepPayload + for _, l := range logs { + log := l.log + trig := logToTrigger(log) + checkData, err := p.packer.PackLogData(log) + if err != nil { + p.lggr.Warnw("failed to pack log data", "err", err, "log", log) + continue + } + payload, err := core.NewUpkeepPayload(l.upkeepID, trig, checkData) + if err != nil { + p.lggr.Warnw("failed to create upkeep payload", "err", err, "id", l.upkeepID, "trigger", trig, "checkData", checkData) + continue + } + + payloads = append(payloads, payload) + } + + return payloads, nil +} + +// ReadLogs fetches the logs for the given upkeeps. +func (p *logEventProvider) ReadLogs(pctx context.Context, ids ...*big.Int) error { + ctx, cancel := context.WithTimeout(pctx, readLogsTimeout) + defer cancel() + + latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + if latest.BlockNumber == 0 { + return fmt.Errorf("%w: %s", ErrHeadNotAvailable, "latest block is 0") + } + filters := p.getFilters(latest.BlockNumber, ids...) + + err = p.readLogs(ctx, latest.BlockNumber, filters) + p.updateFiltersLastPoll(filters) + // p.lggr.Debugw("read logs for entries", "latestBlock", latest, "entries", len(entries), "err", err) + if err != nil { + return fmt.Errorf("fetched logs with errors: %w", err) + } + + return nil +} + +func (p *logEventProvider) CurrentPartitionIdx() uint64 { + return atomic.LoadUint64(&p.currentPartitionIdx) +} + +// scheduleReadJobs starts a scheduler that pushed ids to readQ for reading logs in the background. +func (p *logEventProvider) scheduleReadJobs(pctx context.Context, execute func([]*big.Int)) { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + ticker := time.NewTicker(p.opts.ReadInterval) + defer ticker.Stop() + + h := sha256.New() + + partitionIdx := p.CurrentPartitionIdx() + + for { + select { + case <-ticker.C: + ids := p.getPartitionIds(h, int(partitionIdx)) + if len(ids) > 0 { + maxBatchSize := readMaxBatchSize + for len(ids) > maxBatchSize { + batch := ids[:maxBatchSize] + execute(batch) + ids = ids[maxBatchSize:] + runtime.Gosched() + } + execute(ids) + } + partitionIdx++ + atomic.StoreUint64(&p.currentPartitionIdx, partitionIdx) + case <-ctx.Done(): + return + } + } +} + +// startReader starts a reader that reads logs from the ids coming from readQ. +func (p *logEventProvider) startReader(pctx context.Context, readQ <-chan []*big.Int) { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + lggr := p.lggr.With("where", "reader") + + for { + select { + case batch := <-readQ: + if err := p.ReadLogs(ctx, batch...); err != nil { + if ctx.Err() != nil { + return + } + lggr.Warnw("failed to read logs", "err", err) + } + case <-ctx.Done(): + return + } + } +} + +// getPartitionIds returns the upkeepIDs for the given partition and the number of partitions. +// Partitioning is done by hashing the upkeepID and taking the modulus of the number of partitions. +func (p *logEventProvider) getPartitionIds(hashFn hash.Hash, partition int) []*big.Int { + numOfPartitions := p.filterStore.Size() / readMaxBatchSize + if numOfPartitions < 1 { + numOfPartitions = 1 + } + partition = partition % numOfPartitions + + ids := p.filterStore.GetIDs(func(f upkeepFilter) bool { + if len(f.addr) == 0 { + return false + } + n, err := hashFn.Write(f.addr) + if err != nil || n == 0 { + p.lggr.Warnw("failed to hash upkeep address", "err", err, "addr", hexutil.Encode(f.addr)) + return false + } + h := hashFn.Sum(nil) + defer hashFn.Reset() + // taking only 6 bytes to avoid working with big numbers + i := big.NewInt(0).SetBytes(h[len(h)-6:]) + return int(i.Int64())%numOfPartitions == partition + }) + + return ids +} + +func (p *logEventProvider) updateFiltersLastPoll(entries []upkeepFilter) { + p.filterStore.UpdateFilters(func(orig, f upkeepFilter) upkeepFilter { + if f.lastPollBlock > orig.lastPollBlock { + orig.lastPollBlock = f.lastPollBlock + if f.lastPollBlock%10 == 0 { + // print log occasionally to avoid spamming logs + p.lggr.Debugw("Updated lastPollBlock", "lastPollBlock", f.lastPollBlock, "upkeepID", f.upkeepID) + } + } + return orig + }, entries...) +} + +// getFilters returns the filters for the given upkeepIDs, +// returns empty filter for inactive upkeeps. +func (p *logEventProvider) getFilters(latestBlock int64, ids ...*big.Int) []upkeepFilter { + var filters []upkeepFilter + p.filterStore.RangeFiltersByIDs(func(i int, f upkeepFilter) { + if len(f.addr) == 0 { // not found + p.lggr.Debugw("upkeep filter not found", "upkeep", f.upkeepID.String()) + filters = append(filters, f) + return + } + if f.configUpdateBlock > uint64(latestBlock) { + p.lggr.Debugw("upkeep config update block was created after latestBlock", "upkeep", f.upkeepID.String(), "configUpdateBlock", f.configUpdateBlock, "latestBlock", latestBlock) + filters = append(filters, upkeepFilter{upkeepID: f.upkeepID}) + return + } + if f.lastPollBlock > latestBlock { + p.lggr.Debugw("already polled latest block", "entry.lastPollBlock", f.lastPollBlock, "latestBlock", latestBlock, "upkeep", f.upkeepID.String()) + filters = append(filters, upkeepFilter{upkeepID: f.upkeepID}) + return + } + filters = append(filters, f.Clone()) + }, ids...) + + return filters +} + +// readLogs calls log poller to get the logs for the given upkeep entries. +// +// Exploratory: batch filters by contract address and call log poller once per contract address +// NOTE: the filters are already grouped by contract address +func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters []upkeepFilter) (merr error) { + lookbackBlocks := p.opts.LookbackBlocks + if latest < lookbackBlocks { + // special case of a new blockchain (e.g. simulated chain) + lookbackBlocks = latest - 1 + } + // maxBurst will be used to increase the burst limit to allow a long range scan + maxBurst := int(lookbackBlocks + 1) + + for i, filter := range filters { + if len(filter.addr) == 0 { + continue + } + start := filter.lastPollBlock + // range should not exceed [lookbackBlocks, latest] + if start < latest-lookbackBlocks { + start = latest - lookbackBlocks + filter.blockLimiter.SetBurst(maxBurst) + } + + resv := filter.blockLimiter.ReserveN(time.Now(), int(latest-start)) + if !resv.OK() { + merr = errors.Join(merr, fmt.Errorf("%w: %s", ErrBlockLimitExceeded, filter.upkeepID.String())) + continue + } + // adding a buffer to check for reorged logs. + start = start - reorgBuffer + // make sure start of the range is not before the config update block + if configUpdateBlock := int64(filter.configUpdateBlock); start < configUpdateBlock { + start = configUpdateBlock + } + // query logs based on contract address, event sig, and blocks + logs, err := p.poller.LogsWithSigs(start, latest, []common.Hash{filter.topics[0]}, common.BytesToAddress(filter.addr), pg.WithParentCtx(ctx)) + if err != nil { + // cancel limit reservation as we failed to get logs + resv.Cancel() + if ctx.Err() != nil { + // exit if the context was canceled + return merr + } + merr = errors.Join(merr, fmt.Errorf("failed to get logs for upkeep %s: %w", filter.upkeepID.String(), err)) + continue + } + filteredLogs := filter.Select(logs...) + + // if this limiter's burst was set to the max -> + // reset it and cancel the reservation to allow further processing + if filter.blockLimiter.Burst() == maxBurst { + resv.Cancel() + filter.blockLimiter.SetBurst(p.opts.BlockLimitBurst) + } + + p.buffer.enqueue(filter.upkeepID, filteredLogs...) + + // Update the lastPollBlock for filter in slice this is then + // updated into filter store in updateFiltersLastPoll + filters[i].lastPollBlock = latest + } + + return merr +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go new file mode 100644 index 00000000..e634a63a --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle.go @@ -0,0 +1,191 @@ +package logprovider + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/time/rate" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + // LogRetention is the amount of time to retain logs for. + LogRetention = 24 * time.Hour + // LogBackfillBuffer is the number of blocks from the latest block for which backfill is done when adding a filter in log poller + LogBackfillBuffer = 100 +) + +func (p *logEventProvider) RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) { + // Exploratory: investigate how we can batch the refresh + if len(ids) == 0 { + return nil, nil + } + p.lggr.Debugw("Refreshing active upkeeps", "upkeeps", len(ids)) + visited := make(map[string]bool, len(ids)) + for _, id := range ids { + visited[id.String()] = false + } + inactiveIDs := p.filterStore.GetIDs(func(f upkeepFilter) bool { + uid := f.upkeepID.String() + _, ok := visited[uid] + visited[uid] = true + return !ok + }) + var merr error + if len(inactiveIDs) > 0 { + p.lggr.Debugw("Removing inactive upkeeps", "upkeeps", len(inactiveIDs)) + for _, id := range inactiveIDs { + if err := p.UnregisterFilter(id); err != nil { + merr = errors.Join(merr, fmt.Errorf("failed to unregister filter: %s", id.String())) + } + } + } + var newIDs []*big.Int + for id, ok := range visited { + if !ok { + uid, _ := new(big.Int).SetString(id, 10) + newIDs = append(newIDs, uid) + } + } + + return newIDs, merr +} + +func (p *logEventProvider) RegisterFilter(ctx context.Context, opts FilterOptions) error { + upkeepID, cfg := opts.UpkeepID, opts.TriggerConfig + if err := p.validateLogTriggerConfig(cfg); err != nil { + return fmt.Errorf("invalid log trigger config: %w", err) + } + lpFilter := p.newLogFilter(upkeepID, cfg) + + // using lock to facilitate multiple events causing filter registration + // at the same time. + // Exploratory: consider using a q to handle registration requests + p.registerLock.Lock() + defer p.registerLock.Unlock() + + var filter upkeepFilter + currentFilter := p.filterStore.Get(upkeepID) + if currentFilter != nil { + if currentFilter.configUpdateBlock > opts.UpdateBlock { + // already registered with a config from a higher block number + return fmt.Errorf("filter for upkeep with id %s already registered with newer config", upkeepID.String()) + } else if currentFilter.configUpdateBlock == opts.UpdateBlock { + // already registered with the same config + p.lggr.Debugf("filter for upkeep with id %s already registered with the same config", upkeepID.String()) + return nil + } + filter = *currentFilter + } else { // new filter + filter = upkeepFilter{ + upkeepID: upkeepID, + blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + } + } + filter.lastPollBlock = 0 + filter.lastRePollBlock = 0 + filter.configUpdateBlock = opts.UpdateBlock + filter.selector = cfg.FilterSelector + filter.addr = cfg.ContractAddress.Bytes() + filter.topics = []common.Hash{cfg.Topic0, cfg.Topic1, cfg.Topic2, cfg.Topic3} + + if err := p.register(ctx, lpFilter, filter); err != nil { + return fmt.Errorf("failed to register upkeep filter %s: %w", filter.upkeepID.String(), err) + } + + return nil +} + +// register registers the upkeep filter with the log poller and adds it to the filter store. +func (p *logEventProvider) register(ctx context.Context, lpFilter logpoller.Filter, ufilter upkeepFilter) error { + latest, err := p.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return fmt.Errorf("failed to get latest block while registering filter: %w", err) + } + lggr := p.lggr.With("upkeepID", ufilter.upkeepID.String()) + logPollerHasFilter := p.poller.HasFilter(lpFilter.Name) + filterStoreHasFilter := p.filterStore.Has(ufilter.upkeepID) + if filterStoreHasFilter { + // removing filter in case of an update so we can recreate it with updated values + lggr.Debugw("Upserting upkeep filter") + err := p.poller.UnregisterFilter(lpFilter.Name) + if err != nil { + return fmt.Errorf("failed to upsert (unregister) upkeep filter %s: %w", ufilter.upkeepID.String(), err) + } + } + if err := p.poller.RegisterFilter(lpFilter); err != nil { + return err + } + p.filterStore.AddActiveUpkeeps(ufilter) + if logPollerHasFilter { + // already registered in DB before, no need to backfill + return nil + } + backfillBlock := latest.BlockNumber - int64(LogBackfillBuffer) + if backfillBlock < 1 { + // New chain, backfill from start + backfillBlock = 1 + } + if int64(ufilter.configUpdateBlock) > backfillBlock { + // backfill from config update block in case it is not too old + backfillBlock = int64(ufilter.configUpdateBlock) + } + // NOTE: replys are planned to be done as part of RegisterFilter within logpoller + lggr.Debugw("Backfilling logs for new upkeep filter", "backfillBlock", backfillBlock) + p.poller.ReplayAsync(backfillBlock) + + return nil +} + +func (p *logEventProvider) UnregisterFilter(upkeepID *big.Int) error { + // Filter might have been unregistered already, only try to unregister if it exists + if p.poller.HasFilter(p.filterName(upkeepID)) { + if err := p.poller.UnregisterFilter(p.filterName(upkeepID)); err != nil { + return fmt.Errorf("failed to unregister upkeep filter %s: %w", upkeepID.String(), err) + } + } + p.filterStore.RemoveActiveUpkeeps(upkeepFilter{ + upkeepID: upkeepID, + }) + return nil +} + +// newLogFilter creates logpoller.Filter from the given upkeep config +func (p *logEventProvider) newLogFilter(upkeepID *big.Int, cfg LogTriggerConfig) logpoller.Filter { + return logpoller.Filter{ + Name: p.filterName(upkeepID), + // log poller filter treats this event sigs slice as an array of topic0 + // since we don't support multiple events right now, only put one topic0 here + EventSigs: []common.Hash{common.BytesToHash(cfg.Topic0[:])}, + Addresses: []common.Address{cfg.ContractAddress}, + Retention: LogRetention, + } +} + +func (p *logEventProvider) validateLogTriggerConfig(cfg LogTriggerConfig) error { + var zeroAddr common.Address + var zeroBytes [32]byte + if bytes.Equal(cfg.ContractAddress[:], zeroAddr[:]) { + return errors.New("invalid contract address: zeroed") + } + if bytes.Equal(cfg.Topic0[:], zeroBytes[:]) { + return errors.New("invalid topic0: zeroed") + } + s := cfg.FilterSelector + if s >= 8 { + p.lggr.Error("filter selector %d is invalid", s) + return errors.New("invalid filter selector: larger or equal to 8") + } + return nil +} + +func (p *logEventProvider) filterName(upkeepID *big.Int) string { + return logpoller.FilterName("KeepersRegistry LogUpkeep", upkeepID.String()) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go new file mode 100644 index 00000000..97ff465a --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_life_cycle_test.go @@ -0,0 +1,240 @@ +package logprovider + +import ( + "fmt" + "math/big" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestLogEventProvider_LifeCycle(t *testing.T) { + tests := []struct { + name string + errored bool + upkeepID *big.Int + upkeepCfg LogTriggerConfig + hasFilter bool + replyed bool + cfgUpdateBlock uint64 + mockPoller bool + unregister bool + }{ + { + "new upkeep", + false, + big.NewInt(111), + LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{1, 2, 3, 4}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{1, 2, 3, 4}, 32)), + }, + false, + true, + uint64(1), + true, + false, + }, + { + "empty config", + true, + big.NewInt(111), + LogTriggerConfig{}, + false, + false, + uint64(0), + false, + false, + }, + { + "invalid config", + true, + big.NewInt(111), + LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{}, 32)), + }, + false, + false, + uint64(2), + false, + false, + }, + { + "existing config with old block", + true, + big.NewInt(111), + LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{1, 2, 3, 4}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{1, 2, 3, 4}, 32)), + }, + true, + false, + uint64(0), + true, + false, + }, + { + "existing config with newer block", + false, + big.NewInt(111), + LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{1, 2, 3, 4}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{1, 2, 3, 4}, 32)), + }, + true, + false, + uint64(2), + true, + true, + }, + } + + p := NewLogProvider(logger.TestLogger(t), nil, &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := testutils.Context(t) + + if tc.mockPoller { + lp := new(mocks.LogPoller) + lp.On("RegisterFilter", mock.Anything).Return(nil) + lp.On("UnregisterFilter", mock.Anything).Return(nil) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) + hasFitlerTimes := 1 + if tc.unregister { + hasFitlerTimes = 2 + } + lp.On("HasFilter", p.filterName(tc.upkeepID)).Return(tc.hasFilter).Times(hasFitlerTimes) + if tc.replyed { + lp.On("ReplayAsync", mock.Anything).Return(nil).Times(1) + } else { + lp.On("ReplayAsync", mock.Anything).Return(nil).Times(0) + } + p.lock.Lock() + p.poller = lp + p.lock.Unlock() + } + + err := p.RegisterFilter(ctx, FilterOptions{ + UpkeepID: tc.upkeepID, + TriggerConfig: tc.upkeepCfg, + UpdateBlock: tc.cfgUpdateBlock, + }) + if tc.errored { + require.Error(t, err) + } else { + require.NoError(t, err) + if tc.unregister { + require.NoError(t, p.UnregisterFilter(tc.upkeepID)) + } + } + }) + } +} + +func TestEventLogProvider_RefreshActiveUpkeeps(t *testing.T) { + ctx := testutils.Context(t) + mp := new(mocks.LogPoller) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("UnregisterFilter", mock.Anything).Return(nil) + mp.On("HasFilter", mock.Anything).Return(false) + mp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{}, nil) + mp.On("ReplayAsync", mock.Anything).Return(nil) + + p := NewLogProvider(logger.TestLogger(t), mp, &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + + require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ + UpkeepID: core.GenUpkeepID(types.LogTrigger, "1111").BigInt(), + TriggerConfig: LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{1, 2, 3, 4}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{1, 2, 3, 4}, 32)), + }, + UpdateBlock: uint64(0), + })) + require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ + UpkeepID: core.GenUpkeepID(types.LogTrigger, "2222").BigInt(), + TriggerConfig: LogTriggerConfig{ + ContractAddress: common.BytesToAddress(common.LeftPadBytes([]byte{1, 2, 3, 4}, 20)), + Topic0: common.BytesToHash(common.LeftPadBytes([]byte{1, 2, 3, 4}, 32)), + }, + UpdateBlock: uint64(0), + })) + require.Equal(t, 2, p.filterStore.Size()) + + newIds, err := p.RefreshActiveUpkeeps() + require.NoError(t, err) + require.Len(t, newIds, 0) + mp.On("HasFilter", p.filterName(core.GenUpkeepID(types.LogTrigger, "2222").BigInt())).Return(true) + newIds, err = p.RefreshActiveUpkeeps( + core.GenUpkeepID(types.LogTrigger, "2222").BigInt(), + core.GenUpkeepID(types.LogTrigger, "1234").BigInt(), + core.GenUpkeepID(types.LogTrigger, "123").BigInt()) + require.NoError(t, err) + require.Len(t, newIds, 2) + require.Equal(t, 1, p.filterStore.Size()) +} + +func TestLogEventProvider_ValidateLogTriggerConfig(t *testing.T) { + contractAddress := common.HexToAddress("0xB9F3af0c2CbfE108efd0E23F7b0a151Ea42f764E") + eventSig := common.HexToHash("0x3bdab8bffae631cfee411525ebae27f3fb61b10c662c09ec2a7dbb5854c87e8c") + tests := []struct { + name string + cfg LogTriggerConfig + expectedErr error + }{ + { + "success", + LogTriggerConfig{ + ContractAddress: contractAddress, + FilterSelector: 0, + Topic0: eventSig, + }, + nil, + }, + { + "invalid contract address", + LogTriggerConfig{ + ContractAddress: common.Address{}, + FilterSelector: 0, + Topic0: eventSig, + }, + fmt.Errorf("invalid contract address: zeroed"), + }, + { + "invalid topic0", + LogTriggerConfig{ + ContractAddress: contractAddress, + FilterSelector: 0, + }, + fmt.Errorf("invalid topic0: zeroed"), + }, + { + "success", + LogTriggerConfig{ + ContractAddress: contractAddress, + FilterSelector: 8, + Topic0: eventSig, + }, + fmt.Errorf("invalid filter selector: larger or equal to 8"), + }, + } + + p := NewLogProvider(logger.TestLogger(t), nil, &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := p.validateLogTriggerConfig(tc.cfg) + assert.Equal(t, tc.expectedErr, err) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go new file mode 100644 index 00000000..81c4b2d8 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -0,0 +1,326 @@ +package logprovider + +import ( + "context" + "fmt" + "math/big" + "runtime" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestLogEventProvider_GetFilters(t *testing.T) { + p := NewLogProvider(logger.TestLogger(t), nil, &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + + _, f := newEntry(p, 1) + p.filterStore.AddActiveUpkeeps(f) + + t.Run("no filters", func(t *testing.T) { + filters := p.getFilters(0, big.NewInt(0)) + require.Len(t, filters, 1) + require.Equal(t, len(filters[0].addr), 0) + }) + + t.Run("has filter with lower lastPollBlock", func(t *testing.T) { + filters := p.getFilters(0, f.upkeepID) + require.Len(t, filters, 1) + require.Greater(t, len(filters[0].addr), 0) + filters = p.getFilters(10, f.upkeepID) + require.Len(t, filters, 1) + require.Greater(t, len(filters[0].addr), 0) + }) + + t.Run("has filter with higher lastPollBlock", func(t *testing.T) { + _, f := newEntry(p, 2) + f.lastPollBlock = 3 + p.filterStore.AddActiveUpkeeps(f) + + filters := p.getFilters(1, f.upkeepID) + require.Len(t, filters, 1) + require.Equal(t, len(filters[0].addr), 0) + }) + + t.Run("has filter with higher configUpdateBlock", func(t *testing.T) { + _, f := newEntry(p, 2) + f.configUpdateBlock = 3 + p.filterStore.AddActiveUpkeeps(f) + + filters := p.getFilters(1, f.upkeepID) + require.Len(t, filters, 1) + require.Equal(t, len(filters[0].addr), 0) + }) +} + +func TestLogEventProvider_UpdateEntriesLastPoll(t *testing.T) { + p := NewLogProvider(logger.TestLogger(t), nil, &mockedPacker{}, NewUpkeepFilterStore(), NewOptions(200)) + + n := 10 + + // entries := map[string]upkeepFilter{} + for i := 0; i < n; i++ { + _, f := newEntry(p, i+1) + p.filterStore.AddActiveUpkeeps(f) + } + + t.Run("no entries", func(t *testing.T) { + _, f := newEntry(p, n*2) + f.lastPollBlock = 10 + p.updateFiltersLastPoll([]upkeepFilter{f}) + + filters := p.filterStore.GetFilters(nil) + for _, f := range filters { + require.Equal(t, int64(0), f.lastPollBlock) + } + }) + + t.Run("update entries", func(t *testing.T) { + _, f2 := newEntry(p, n-2) + f2.lastPollBlock = 10 + _, f1 := newEntry(p, n-1) + f1.lastPollBlock = 10 + p.updateFiltersLastPoll([]upkeepFilter{f1, f2}) + + p.filterStore.RangeFiltersByIDs(func(_ int, f upkeepFilter) { + require.Equal(t, int64(10), f.lastPollBlock) + }, f1.upkeepID, f2.upkeepID) + + // update with same block + p.updateFiltersLastPoll([]upkeepFilter{f1}) + + // checking other entries are not updated + _, f := newEntry(p, 1) + p.filterStore.RangeFiltersByIDs(func(_ int, f upkeepFilter) { + require.Equal(t, int64(0), f.lastPollBlock) + }, f.upkeepID) + }) +} + +func TestLogEventProvider_ScheduleReadJobs(t *testing.T) { + mp := new(mocks.LogPoller) + + tests := []struct { + name string + maxBatchSize int + ids []int + addrs []string + }{ + { + "no entries", + 3, + []int{}, + []string{}, + }, + { + "single entry", + 3, + []int{1}, + []string{"0x1111111"}, + }, + { + "happy flow", + 3, + []int{1, 2, 3}, + []string{"0x1111111", "0x2222222", "0x3333333"}, + }, + { + "batching", + 3, + []int{ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 19, 20, 21, + }, + []string{ + "0x11111111", + "0x22222222", + "0x33333333", + "0x111111111", + "0x122222222", + "0x133333333", + "0x1111111111", + "0x1122222222", + "0x1133333333", + "0x11111111111", + "0x11122222222", + "0x11133333333", + "0x111111111111", + "0x111122222222", + "0x111133333333", + "0x1111111111111", + "0x1111122222222", + "0x1111133333333", + "0x11111111111111", + "0x11111122222222", + "0x11111133333333", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := testutils.Context(t) + + readInterval := 10 * time.Millisecond + opts := NewOptions(200) + opts.ReadInterval = readInterval + + p := NewLogProvider(logger.TestLogger(t), mp, &mockedPacker{}, NewUpkeepFilterStore(), opts) + + var ids []*big.Int + for i, id := range tc.ids { + _, f := newEntry(p, id, tc.addrs[i]) + p.filterStore.AddActiveUpkeeps(f) + ids = append(ids, f.upkeepID) + } + + reads := make(chan []*big.Int, 100) + + go func(ctx context.Context) { + p.scheduleReadJobs(ctx, func(ids []*big.Int) { + select { + case reads <- ids: + default: + t.Log("dropped ids") + } + }) + }(ctx) + + batches := (len(tc.ids) / tc.maxBatchSize) + 1 + + timeoutTicker := time.NewTicker(readInterval * time.Duration(batches*10)) + defer timeoutTicker.Stop() + + got := map[string]int{} + + readLoop: + for { + select { + case <-timeoutTicker.C: + break readLoop + case batch := <-reads: + for _, id := range batch { + got[id.String()]++ + } + case <-ctx.Done(): + break readLoop + default: + if p.CurrentPartitionIdx() > uint64(batches+1) { + break readLoop + } + } + runtime.Gosched() + } + + require.Equal(t, len(ids), len(got)) + for _, id := range ids { + _, ok := got[id.String()] + require.True(t, ok, "id not found %s", id.String()) + require.GreaterOrEqual(t, got[id.String()], 1, "id don't have schdueled job %s", id.String()) + } + }) + } +} + +func TestLogEventProvider_ReadLogs(t *testing.T) { + ctx := testutils.Context(t) + + mp := new(mocks.LogPoller) + + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("ReplayAsync", mock.Anything).Return() + mp.On("HasFilter", mock.Anything).Return(false) + mp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) + mp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: int64(1)}, nil) + mp.On("LogsWithSigs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{ + { + BlockNumber: 1, + TxHash: common.HexToHash("0x1"), + }, + }, nil) + + filterStore := NewUpkeepFilterStore() + p := NewLogProvider(logger.TestLogger(t), mp, &mockedPacker{}, filterStore, NewOptions(200)) + + var ids []*big.Int + for i := 0; i < 10; i++ { + cfg, f := newEntry(p, i+1) + ids = append(ids, f.upkeepID) + require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ + UpkeepID: f.upkeepID, + TriggerConfig: cfg, + })) + } + + t.Run("no entries", func(t *testing.T) { + require.NoError(t, p.ReadLogs(ctx, big.NewInt(999999))) + logs := p.buffer.peek(10) + require.Len(t, logs, 0) + }) + + t.Run("has entries", func(t *testing.T) { + require.NoError(t, p.ReadLogs(ctx, ids[:2]...)) + logs := p.buffer.peek(10) + require.Len(t, logs, 2) + + var updatedFilters []upkeepFilter + filterStore.RangeFiltersByIDs(func(i int, f upkeepFilter) { + updatedFilters = append(updatedFilters, f.Clone()) + }, ids[:2]...) + for _, f := range updatedFilters { + // Last poll block should be updated + require.Equal(t, int64(1), f.lastPollBlock) + } + }) + + // TODO: test rate limiting + +} + +func newEntry(p *logEventProvider, i int, args ...string) (LogTriggerConfig, upkeepFilter) { + idBytes := append(common.LeftPadBytes([]byte{1}, 16), []byte(fmt.Sprintf("%d", i))...) + id := ocr2keepers.UpkeepIdentifier{} + copy(id[:], idBytes) + uid := id.BigInt() + for len(args) < 2 { + args = append(args, "0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d") + } + addr, topic0 := args[0], args[1] + cfg := LogTriggerConfig{ + ContractAddress: common.HexToAddress(addr), + FilterSelector: 0, + Topic0: common.HexToHash(topic0), + } + filter := p.newLogFilter(uid, cfg) + topics := make([]common.Hash, len(filter.EventSigs)) + copy(topics, filter.EventSigs) + f := upkeepFilter{ + upkeepID: uid, + addr: filter.Addresses[0].Bytes(), + topics: topics, + blockLimiter: rate.NewLimiter(p.opts.BlockRateLimit, p.opts.BlockLimitBurst), + } + return cfg, f +} + +type mockedPacker struct { +} + +func (p *mockedPacker) PackLogData(log logpoller.Log) ([]byte, error) { + return log.Data, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go new file mode 100644 index 00000000..6f7a8c93 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer.go @@ -0,0 +1,728 @@ +package logprovider + +import ( + "bytes" + "context" + "crypto/rand" + "errors" + "fmt" + "io" + "math" + "math/big" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-automation/pkg/v3/random" + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + LogRecovererServiceName = "LogRecoverer" + + // RecoveryInterval is the interval at which the recovery scanning processing is triggered + RecoveryInterval = 5 * time.Second + // RecoveryCacheTTL is the time to live for the recovery cache + RecoveryCacheTTL = 10 * time.Minute + // GCInterval is the interval at which the recovery cache is cleaned up + GCInterval = RecoveryCacheTTL - time.Second + // MaxProposals is the maximum number of proposals that can be returned by GetRecoveryProposals + MaxProposals = 20 + // recoveryBatchSize is the number of filters to recover in a single batch + recoveryBatchSize = 10 + // recoveryLogsBuffer is the number of blocks to be used as a safety buffer when reading logs + recoveryLogsBuffer = int64(200) + recoveryLogsBurst = int64(500) + // blockTimeUpdateCadence is the cadence at which the chain's blocktime is re-calculated + blockTimeUpdateCadence = 10 * time.Minute + // maxPendingPayloadsPerUpkeep is the number of logs we can have pending for a single upkeep + // at any given time + maxPendingPayloadsPerUpkeep = 500 +) + +type LogRecoverer interface { + ocr2keepers.RecoverableProvider + GetProposalData(context.Context, ocr2keepers.CoordinatedBlockProposal) ([]byte, error) + + Start(context.Context) error + io.Closer +} + +type visitedRecord struct { + visitedAt time.Time + payload ocr2keepers.UpkeepPayload +} + +type logRecoverer struct { + services.StateMachine + threadCtrl utils.ThreadControl + + lggr logger.Logger + + lookbackBlocks *atomic.Int64 + blockTime *atomic.Int64 + + interval time.Duration + lock sync.RWMutex + + pending []ocr2keepers.UpkeepPayload + visited map[string]visitedRecord + + filterStore UpkeepFilterStore + states core.UpkeepStateReader + packer LogDataPacker + poller logpoller.LogPoller + client client.Client + blockTimeResolver *blockTimeResolver + + finalityDepth int64 +} + +var _ LogRecoverer = &logRecoverer{} + +func NewLogRecoverer(lggr logger.Logger, poller logpoller.LogPoller, client client.Client, stateStore core.UpkeepStateReader, packer LogDataPacker, filterStore UpkeepFilterStore, opts LogTriggersOptions) *logRecoverer { + rec := &logRecoverer{ + lggr: lggr.Named(LogRecovererServiceName), + + threadCtrl: utils.NewThreadControl(), + + blockTime: &atomic.Int64{}, + lookbackBlocks: &atomic.Int64{}, + interval: opts.ReadInterval * 5, + + pending: make([]ocr2keepers.UpkeepPayload, 0), + visited: make(map[string]visitedRecord), + poller: poller, + filterStore: filterStore, + states: stateStore, + packer: packer, + client: client, + blockTimeResolver: newBlockTimeResolver(poller), + + finalityDepth: opts.FinalityDepth, + } + + rec.lookbackBlocks.Store(opts.LookbackBlocks) + rec.blockTime.Store(int64(defaultBlockTime)) + + return rec +} + +// Start starts the log recoverer, which runs 3 threads in the background: +// 1. Recovery thread: scans for logs that were missed by the log poller +// 2. Cleanup thread: cleans up the cache of logs that were already processed +// 3. Block time thread: updates the block time of the chain +func (r *logRecoverer) Start(ctx context.Context) error { + return r.StartOnce(LogRecovererServiceName, func() error { + r.updateBlockTime(ctx) + + r.lggr.Infow("starting log recoverer", "blockTime", r.blockTime.Load(), "lookbackBlocks", r.lookbackBlocks.Load(), "interval", r.interval) + + r.threadCtrl.Go(func(ctx context.Context) { + recoveryTicker := time.NewTicker(r.interval) + defer recoveryTicker.Stop() + + for { + select { + case <-recoveryTicker.C: + if err := r.recover(ctx); err != nil { + r.lggr.Warnw("failed to recover logs", "err", err) + } + case <-ctx.Done(): + return + } + } + }) + + r.threadCtrl.Go(func(ctx context.Context) { + cleanupTicker := time.NewTicker(utils.WithJitter(GCInterval)) + defer cleanupTicker.Stop() + + for { + select { + case <-cleanupTicker.C: + r.clean(ctx) + cleanupTicker.Reset(utils.WithJitter(GCInterval)) + case <-ctx.Done(): + return + } + } + }) + + r.threadCtrl.Go(func(ctx context.Context) { + blockTimeTicker := time.NewTicker(blockTimeUpdateCadence) + defer blockTimeTicker.Stop() + + for { + select { + case <-blockTimeTicker.C: + r.updateBlockTime(ctx) + blockTimeTicker.Reset(utils.WithJitter(blockTimeUpdateCadence)) + case <-ctx.Done(): + return + } + } + }) + + return nil + }) +} + +func (r *logRecoverer) Close() error { + return r.StopOnce(LogRecovererServiceName, func() error { + r.threadCtrl.Close() + return nil + }) +} + +func (r *logRecoverer) HealthReport() map[string]error { + return map[string]error{LogRecovererServiceName: r.Healthy()} +} + +func (r *logRecoverer) GetProposalData(ctx context.Context, proposal ocr2keepers.CoordinatedBlockProposal) ([]byte, error) { + switch core.GetUpkeepType(proposal.UpkeepID) { + case types.LogTrigger: + return r.getLogTriggerCheckData(ctx, proposal) + default: + return []byte{}, errors.New("not a log trigger upkeep ID") + } +} + +func (r *logRecoverer) getLogTriggerCheckData(ctx context.Context, proposal ocr2keepers.CoordinatedBlockProposal) ([]byte, error) { + if !r.filterStore.Has(proposal.UpkeepID.BigInt()) { + return nil, fmt.Errorf("filter not found for upkeep %v", proposal.UpkeepID) + } + latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, err + } + + start, offsetBlock := r.getRecoveryWindow(latest.BlockNumber) + if proposal.Trigger.LogTriggerExtension == nil { + return nil, errors.New("missing log trigger extension") + } + + // Verify the log is still present on chain, not reorged and is within recoverable range + // Do not trust the logBlockNumber from proposal since it's not included in workID + logBlockHash := common.BytesToHash(proposal.Trigger.LogTriggerExtension.BlockHash[:]) + bn, bh, err := core.GetTxBlock(ctx, r.client, proposal.Trigger.LogTriggerExtension.TxHash) + if err != nil { + return nil, err + } + if bn == nil { + return nil, errors.New("failed to get tx block") + } + if bh.Hex() != logBlockHash.Hex() { + return nil, errors.New("log tx reorged") + } + logBlock := bn.Int64() + if isRecoverable := logBlock < offsetBlock && logBlock > start; !isRecoverable { + return nil, errors.New("log block is not recoverable") + } + + // Check if the log was already performed or ineligible + upkeepStates, err := r.states.SelectByWorkIDs(ctx, proposal.WorkID) + if err != nil { + return nil, err + } + for _, upkeepState := range upkeepStates { + switch upkeepState { + case ocr2keepers.Performed, ocr2keepers.Ineligible: + return nil, errors.New("upkeep state is not recoverable") + default: + // we can proceed + } + } + + var filter upkeepFilter + r.filterStore.RangeFiltersByIDs(func(i int, f upkeepFilter) { + filter = f + }, proposal.UpkeepID.BigInt()) + + if len(filter.addr) == 0 { + return nil, fmt.Errorf("invalid filter found for upkeepID %s", proposal.UpkeepID.String()) + } + if filter.configUpdateBlock > uint64(logBlock) { + return nil, fmt.Errorf("log block %d is before the filter configUpdateBlock %d for upkeepID %s", logBlock, filter.configUpdateBlock, proposal.UpkeepID.String()) + } + + logs, err := r.poller.LogsWithSigs(logBlock-1, logBlock+1, filter.topics, common.BytesToAddress(filter.addr), pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("could not read logs: %w", err) + } + logs = filter.Select(logs...) + + for _, log := range logs { + trigger := logToTrigger(log) + // use coordinated proposal block number as checkblock/hash + trigger.BlockHash = proposal.Trigger.BlockHash + trigger.BlockNumber = proposal.Trigger.BlockNumber + wid := core.UpkeepWorkID(proposal.UpkeepID, trigger) + if wid == proposal.WorkID { + r.lggr.Debugw("found log for proposal", "upkeepId", proposal.UpkeepID, "trigger.ext", trigger.LogTriggerExtension) + checkData, err := r.packer.PackLogData(log) + if err != nil { + return nil, fmt.Errorf("failed to pack log data: %w", err) + } + return checkData, nil + } + } + return nil, fmt.Errorf("no log found for upkeepID %v and trigger %+v", proposal.UpkeepID, proposal.Trigger) +} + +func (r *logRecoverer) GetRecoveryProposals(ctx context.Context) ([]ocr2keepers.UpkeepPayload, error) { + latestBlock, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + + r.lock.Lock() + defer r.lock.Unlock() + + if len(r.pending) == 0 { + return nil, nil + } + + allLogsCounter := 0 + logsCount := map[string]int{} + + r.sortPending(uint64(latestBlock.BlockNumber)) + + var results, pending []ocr2keepers.UpkeepPayload + for _, payload := range r.pending { + if allLogsCounter >= MaxProposals { + // we have enough proposals, pushed the rest are pushed back to pending + pending = append(pending, payload) + continue + } + uid := payload.UpkeepID.String() + if logsCount[uid] >= AllowedLogsPerUpkeep { + // we have enough proposals for this upkeep, the rest are pushed back to pending + pending = append(pending, payload) + continue + } + results = append(results, payload) + logsCount[uid]++ + allLogsCounter++ + } + + r.pending = pending + + r.lggr.Debugf("found %d recoverable payloads", len(results)) + + return results, nil +} + +func (r *logRecoverer) recover(ctx context.Context) error { + latest, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + + start, offsetBlock := r.getRecoveryWindow(latest.BlockNumber) + if offsetBlock < 0 { + // too soon to recover, we don't have enough blocks + return nil + } + if start < 0 { + start = 0 + } + + filters := r.getFilterBatch(offsetBlock) + if len(filters) == 0 { + return nil + } + + r.lggr.Debugw("recovering logs", "filters", filters, "startBlock", start, "offsetBlock", offsetBlock, "latestBlock", latest) + + var wg sync.WaitGroup + for _, f := range filters { + wg.Add(1) + go func(f upkeepFilter) { + defer wg.Done() + if err := r.recoverFilter(ctx, f, start, offsetBlock); err != nil { + r.lggr.Debugw("error recovering filter", "err", err.Error()) + } + }(f) + } + wg.Wait() + + return nil +} + +// recoverFilter recovers logs for a single upkeep filter. +func (r *logRecoverer) recoverFilter(ctx context.Context, f upkeepFilter, startBlock, offsetBlock int64) error { + start := f.lastRePollBlock + 1 // NOTE: we expect f.lastRePollBlock + 1 <= offsetBlock, as others would have been filtered out + // ensure we don't recover logs from before the filter was created + if configUpdateBlock := int64(f.configUpdateBlock); start < configUpdateBlock { + // NOTE: we expect that configUpdateBlock <= offsetBlock, as others would have been filtered out + start = configUpdateBlock + } + if start < startBlock { + start = startBlock + } + end := start + recoveryLogsBuffer + if offsetBlock-end > 100*recoveryLogsBuffer { + // If recoverer is lagging by a lot (more than 100x recoveryLogsBuffer), allow + // a range of recoveryLogsBurst + // Exploratory: Store lastRePollBlock in DB to prevent bursts during restarts + // (while also taking into account existing pending payloads) + end = start + recoveryLogsBurst + } + if end > offsetBlock { + end = offsetBlock + } + // we expect start to be > offsetBlock in any case + logs, err := r.poller.LogsWithSigs(start, end, f.topics, common.BytesToAddress(f.addr), pg.WithParentCtx(ctx)) + if err != nil { + return fmt.Errorf("could not read logs: %w", err) + } + logs = f.Select(logs...) + + workIDs := make([]string, 0) + for _, log := range logs { + trigger := logToTrigger(log) + upkeepId := &ocr2keepers.UpkeepIdentifier{} + ok := upkeepId.FromBigInt(f.upkeepID) + if !ok { + r.lggr.Warnw("failed to convert upkeepID to UpkeepIdentifier", "upkeepID", f.upkeepID) + continue + } + workIDs = append(workIDs, core.UpkeepWorkID(*upkeepId, trigger)) + } + + states, err := r.states.SelectByWorkIDs(ctx, workIDs...) + if err != nil { + return fmt.Errorf("could not read states: %w", err) + } + if len(logs) != len(states) { + return fmt.Errorf("log and state count mismatch: %d != %d", len(logs), len(states)) + } + filteredLogs := r.filterFinalizedStates(f, logs, states) + + added, alreadyPending, ok := r.populatePending(f, filteredLogs) + if added > 0 { + r.lggr.Debugw("found missed logs", "added", added, "alreadyPending", alreadyPending, "upkeepID", f.upkeepID) + } + if !ok { + r.lggr.Debugw("failed to add all logs to pending", "upkeepID", f.upkeepID) + return nil + } + r.filterStore.UpdateFilters(func(uf1, uf2 upkeepFilter) upkeepFilter { + uf1.lastRePollBlock = end + r.lggr.Debugw("Updated lastRePollBlock", "lastRePollBlock", end, "upkeepID", uf1.upkeepID) + return uf1 + }, f) + + return nil +} + +// populatePending adds the logs to the pending list if they are not already pending. +// returns the number of logs added, the number of logs that were already pending, +// and a flag that indicates whether some errors happened while we are trying to add to pending q. +func (r *logRecoverer) populatePending(f upkeepFilter, filteredLogs []logpoller.Log) (int, int, bool) { + r.lock.Lock() + defer r.lock.Unlock() + + pendingSizeBefore := len(r.pending) + alreadyPending := 0 + errs := make([]error, 0) + for _, log := range filteredLogs { + trigger := logToTrigger(log) + // Set the checkBlock and Hash to zero so that the checkPipeline uses the latest block + trigger.BlockHash = [32]byte{} + trigger.BlockNumber = 0 + upkeepId := &ocr2keepers.UpkeepIdentifier{} + ok := upkeepId.FromBigInt(f.upkeepID) + if !ok { + r.lggr.Warnw("failed to convert upkeepID to UpkeepIdentifier", "upkeepID", f.upkeepID) + continue + } + wid := core.UpkeepWorkID(*upkeepId, trigger) + if _, ok := r.visited[wid]; ok { + alreadyPending++ + continue + } + checkData, err := r.packer.PackLogData(log) + if err != nil { + r.lggr.Warnw("failed to pack log data", "err", err, "log", log) + continue + } + payload, err := core.NewUpkeepPayload(f.upkeepID, trigger, checkData) + if err != nil { + r.lggr.Warnw("failed to create payload", "err", err, "log", log) + continue + } + // r.lggr.Debugw("adding a payload to pending", "payload", payload) + if err := r.addPending(payload); err != nil { + errs = append(errs, err) + } else { + r.visited[wid] = visitedRecord{ + visitedAt: time.Now(), + payload: payload, + } + } + } + return len(r.pending) - pendingSizeBefore, alreadyPending, len(errs) == 0 +} + +// filterFinalizedStates filters out the log upkeeps that have already been completed (performed or ineligible). +func (r *logRecoverer) filterFinalizedStates(_ upkeepFilter, logs []logpoller.Log, states []ocr2keepers.UpkeepState) []logpoller.Log { + filtered := make([]logpoller.Log, 0) + + for i, log := range logs { + state := states[i] + if state != ocr2keepers.UnknownState { + continue + } + filtered = append(filtered, log) + } + + return filtered +} + +// getRecoveryWindow returns the block range of which the recoverer will try work on +func (r *logRecoverer) getRecoveryWindow(latest int64) (int64, int64) { + lookbackBlocks := r.lookbackBlocks.Load() + blockTime := r.blockTime.Load() + blocksInDay := int64(24*time.Hour) / blockTime + start := latest - blocksInDay + // Exploratory: Instead of subtracting finality depth to account for finalized performs + // keep two pointers of lastRePollBlock for soft and hard finalization, i.e. manage + // unfinalized perform logs better + end := latest - lookbackBlocks - r.finalityDepth + if start > end { + // In this case, allow starting from more than a day behind + start = end + } + return start, end +} + +// getFilterBatch returns a batch of filters that are ready to be recovered. +func (r *logRecoverer) getFilterBatch(offsetBlock int64) []upkeepFilter { + filters := r.filterStore.GetFilters(func(f upkeepFilter) bool { + // ensure we work only on filters that are ready to be recovered + // no need to recover in case f.configUpdateBlock is after offsetBlock + return f.lastRePollBlock < offsetBlock && int64(f.configUpdateBlock) <= offsetBlock + }) + + sort.Slice(filters, func(i, j int) bool { + return filters[i].lastRePollBlock < filters[j].lastRePollBlock + }) + + return r.selectFilterBatch(filters) +} + +// selectFilterBatch selects a batch of filters to be recovered. +// Half of the batch is selected randomly, the other half is selected +// in order of the oldest lastRePollBlock. +func (r *logRecoverer) selectFilterBatch(filters []upkeepFilter) []upkeepFilter { + batchSize := recoveryBatchSize + + if len(filters) < batchSize { + return filters + } + results := filters[:batchSize/2] + filters = filters[batchSize/2:] + + for len(results) < batchSize && len(filters) != 0 { + i, err := r.randIntn(len(filters)) + if err != nil { + r.lggr.Debugw("error generating random number", "err", err.Error()) + continue + } + results = append(results, filters[i]) + if i == 0 { + filters = filters[1:] + } else if i == len(filters)-1 { + filters = filters[:i] + } else { + filters = append(filters[:i], filters[i+1:]...) + } + } + + return results +} + +func (r *logRecoverer) randIntn(limit int) (int, error) { + n, err := rand.Int(rand.Reader, big.NewInt(int64(limit))) + if err != nil { + return 0, err + } + + return int(n.Int64()), nil +} + +func logToTrigger(log logpoller.Log) ocr2keepers.Trigger { + t := ocr2keepers.NewTrigger( + ocr2keepers.BlockNumber(log.BlockNumber), + log.BlockHash, + ) + t.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{ + TxHash: log.TxHash, + Index: uint32(log.LogIndex), + BlockHash: log.BlockHash, + BlockNumber: ocr2keepers.BlockNumber(log.BlockNumber), + } + return t +} + +func (r *logRecoverer) clean(ctx context.Context) { + r.lock.RLock() + var expired []string + for id, t := range r.visited { + if time.Since(t.visitedAt) > RecoveryCacheTTL { + expired = append(expired, id) + } + } + r.lock.RUnlock() + lggr := r.lggr.With("where", "clean") + if len(expired) == 0 { + lggr.Debug("no expired upkeeps") + return + } + err := r.tryExpire(ctx, expired...) + if err != nil { + lggr.Warnw("failed to clean visited upkeeps", "err", err) + } +} + +func (r *logRecoverer) tryExpire(ctx context.Context, ids ...string) error { + latestBlock, err := r.poller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return fmt.Errorf("failed to get latest block: %w", err) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + states, err := r.states.SelectByWorkIDs(ctx, ids...) + if err != nil { + return fmt.Errorf("failed to get states: %w", err) + } + lggr := r.lggr.With("where", "clean") + start, _ := r.getRecoveryWindow(latestBlock.BlockNumber) + r.lock.Lock() + defer r.lock.Unlock() + var removed int + for i, state := range states { + switch state { + case ocr2keepers.UnknownState: + // in case the state is unknown, we can't be sure if the upkeep was performed or not + // so we push it back to the pending list + rec, ok := r.visited[ids[i]] + if !ok { + // in case it was removed by another thread + continue + } + if logBlock := rec.payload.Trigger.LogTriggerExtension.BlockNumber; int64(logBlock) < start { + // we can't recover this log anymore, so we remove it from the visited list + lggr.Debugw("removing expired log: old block", "upkeepID", rec.payload.UpkeepID, + "latestBlock", latestBlock, "logBlock", logBlock, "start", start) + r.removePending(rec.payload.WorkID) + delete(r.visited, ids[i]) + removed++ + continue + } + if err := r.addPending(rec.payload); err == nil { + rec.visitedAt = time.Now() + r.visited[ids[i]] = rec + } + default: + delete(r.visited, ids[i]) + removed++ + } + } + + if removed > 0 { + lggr.Debugw("expired upkeeps", "expired", len(ids), "cleaned", removed) + } + + return nil +} + +// addPending adds a payload to the pending list if it's not already there. +// NOTE: the lock must be held before calling this function. +func (r *logRecoverer) addPending(payload ocr2keepers.UpkeepPayload) error { + var exist bool + pending := r.pending + upkeepPayloads := 0 + for _, p := range pending { + if bytes.Equal(p.UpkeepID[:], payload.UpkeepID[:]) { + upkeepPayloads++ + } + if p.WorkID == payload.WorkID { + exist = true + } + } + if upkeepPayloads >= maxPendingPayloadsPerUpkeep { + return fmt.Errorf("upkeep %v has too many payloads in pending queue", payload.UpkeepID) + } + if !exist { + r.pending = append(pending, payload) + } + return nil +} + +// removePending removes a payload from the pending list. +// NOTE: the lock must be held before calling this function. +func (r *logRecoverer) removePending(workID string) { + updated := make([]ocr2keepers.UpkeepPayload, 0, len(r.pending)) + for _, p := range r.pending { + if p.WorkID != workID { + updated = append(updated, p) + } + } + r.pending = updated +} + +// sortPending sorts the pending list by a random order based on the normalized latest block number. +// Divided by 10 to ensure that nodes with similar block numbers won't end up with different order. +// NOTE: the lock must be held before calling this function. +func (r *logRecoverer) sortPending(latestBlock uint64) { + normalized := latestBlock / 100 + if normalized == 0 { + normalized = 1 + } + randSeed := random.GetRandomKeySource(nil, normalized) + + shuffledIDs := make(map[string]string, len(r.pending)) + for _, p := range r.pending { + shuffledIDs[p.WorkID] = random.ShuffleString(p.WorkID, randSeed) + } + + sort.SliceStable(r.pending, func(i, j int) bool { + return shuffledIDs[r.pending[i].WorkID] < shuffledIDs[r.pending[j].WorkID] + }) +} + +func (r *logRecoverer) updateBlockTime(ctx context.Context) { + blockTime, err := r.blockTimeResolver.BlockTime(ctx, defaultSampleSize) + if err != nil { + r.lggr.Warnw("failed to compute block time", "err", err) + return + } + if blockTime > 0 { + currentBlockTime := r.blockTime.Load() + newBlockTime := int64(blockTime) + if currentBlockTime > 0 && (int64(math.Abs(float64(currentBlockTime-newBlockTime)))*100/currentBlockTime) > 20 { + r.lggr.Warnf("updating blocktime from %d to %d, this change is larger than 20%", currentBlockTime, newBlockTime) + } else { + r.lggr.Debugf("updating blocktime from %d to %d", currentBlockTime, newBlockTime) + } + r.blockTime.Store(newBlockTime) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go new file mode 100644 index 00000000..70fa0316 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/recoverer_test.go @@ -0,0 +1,1242 @@ +package logprovider + +import ( + "context" + "fmt" + "math" + "math/big" + "sort" + "testing" + "time" + + types2 "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + lpmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestLogRecoverer_GetRecoverables(t *testing.T) { + ctx := testutils.Context(t) + lp := &lpmocks.LogPoller{} + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: 100}, nil) + r := NewLogRecoverer(logger.TestLogger(t), lp, nil, nil, nil, nil, NewOptions(200)) + + tests := []struct { + name string + pending []ocr2keepers.UpkeepPayload + want []ocr2keepers.UpkeepPayload + wantErr bool + }{ + { + "empty", + []ocr2keepers.UpkeepPayload{}, + []ocr2keepers.UpkeepPayload{}, + false, + }, + { + "happy flow", + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + false, + }, + { + "rate limiting", + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "3", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "4", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "5", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "6", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "3", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "4", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "5", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + r.lock.Lock() + r.pending = tc.pending + r.lock.Unlock() + + got, err := r.GetRecoveryProposals(ctx) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Len(t, got, len(tc.want)) + }) + } +} + +func TestLogRecoverer_Clean(t *testing.T) { + oldLogsOffset := int64(20) + + tests := []struct { + name string + pending []ocr2keepers.UpkeepPayload + visited map[string]visitedRecord + states []ocr2keepers.UpkeepState + wantPending []ocr2keepers.UpkeepPayload + wantVisited []string + }{ + { + "empty", + []ocr2keepers.UpkeepPayload{}, + map[string]visitedRecord{}, + []ocr2keepers.UpkeepState{}, + []ocr2keepers.UpkeepPayload{}, + []string{}, + }, + { + "clean expired", + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + {WorkID: "3", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "3")}, + }, + map[string]visitedRecord{ + "1": visitedRecord{time.Now(), ocr2keepers.UpkeepPayload{ + WorkID: "1", + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: ocr2keepers.BlockNumber(oldLogsOffset * 2), + }, + }, + }}, + "2": visitedRecord{time.Now(), ocr2keepers.UpkeepPayload{ + WorkID: "2", + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: ocr2keepers.BlockNumber(oldLogsOffset * 2), + }, + }, + }}, + "3": visitedRecord{time.Now().Add(-time.Hour), ocr2keepers.UpkeepPayload{ + WorkID: "3", + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: ocr2keepers.BlockNumber(oldLogsOffset - 10), + }, + }, + }}, + "4": visitedRecord{time.Now().Add(-time.Hour), ocr2keepers.UpkeepPayload{ + WorkID: "4", + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: ocr2keepers.BlockNumber(oldLogsOffset + 10), + }, + }, + }}, + }, + []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + }, + []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + {WorkID: "4", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "4")}, + }, + []string{"1", "2", "4"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + lookbackBlocks := int64(100) + r, _, lp, statesReader := setupTestRecoverer(t, time.Millisecond*50, lookbackBlocks) + start, _ := r.getRecoveryWindow(0) + block24h := int64(math.Abs(float64(start))) + + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: block24h + oldLogsOffset}, nil) + statesReader.On("SelectByWorkIDs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.states, nil) + + r.lock.Lock() + r.pending = tc.pending + r.visited = tc.visited + r.lock.Unlock() + + r.clean(ctx) + + r.lock.RLock() + defer r.lock.RUnlock() + + pending := r.pending + require.Equal(t, len(tc.wantPending), len(pending)) + sort.Slice(pending, func(i, j int) bool { + return pending[i].WorkID < pending[j].WorkID + }) + for i := range pending { + require.Equal(t, tc.wantPending[i].WorkID, pending[i].WorkID) + } + require.Equal(t, len(tc.wantVisited), len(r.visited)) + for _, id := range tc.wantVisited { + _, ok := r.visited[id] + require.True(t, ok) + } + }) + } +} + +func TestLogRecoverer_Recover(t *testing.T) { + ctx := testutils.Context(t) + + tests := []struct { + name string + lookbackBlocks int64 + latestBlock int64 + latestBlockErr error + active []upkeepFilter + states []ocr2keepers.UpkeepState + statesErr error + logs []logpoller.Log + logsErr error + recoverErr error + proposalsWorkIDs []string + lastRePollBlocks []int64 + }{ + { + "no filters", + 200, + 300, + nil, + []upkeepFilter{}, + []ocr2keepers.UpkeepState{}, + nil, + []logpoller.Log{}, + nil, + nil, + []string{}, + []int64{}, + }, + { + "latest block error", + 200, + 0, + fmt.Errorf("test error"), + []upkeepFilter{}, + []ocr2keepers.UpkeepState{}, + nil, + []logpoller.Log{}, + nil, + fmt.Errorf("test error"), + []string{}, + []int64{}, + }, + { + "states error", + 100, + 200, + nil, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x1"), + }, + }, + }, + nil, + fmt.Errorf("test error"), + []logpoller.Log{ + { + BlockNumber: 2, + TxHash: common.HexToHash("0x111"), + LogIndex: 1, + BlockHash: common.HexToHash("0x2"), + }, + }, + nil, + nil, + []string{}, + []int64{0}, + }, + { + "get logs error", + 200, + 300, + nil, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x1"), + }, + }, + }, + []ocr2keepers.UpkeepState{}, + nil, + []logpoller.Log{}, + fmt.Errorf("test error"), + nil, + []string{}, + []int64{0}, + }, + { + "happy flow", + 100, + 500, + nil, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x1"), + }, + }, + { + upkeepID: big.NewInt(2), + addr: common.HexToAddress("0x2").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x2"), + }, + configUpdateBlock: 450, // should be filtered out + }, + { + upkeepID: big.NewInt(3), + addr: common.HexToAddress("0x2").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x2"), + }, + lastRePollBlock: 450, // should be filtered out, as its higher than latest-lookback + }, + }, + []ocr2keepers.UpkeepState{ocr2keepers.UnknownState}, + nil, + []logpoller.Log{ + { + BlockNumber: 2, + TxHash: common.HexToHash("0x111"), + LogIndex: 1, + BlockHash: common.HexToHash("0x2"), + }, + }, + nil, + nil, + []string{"c207451fa897f9bb13b09d54d8655edf0644e027c53521b4a92eafbb64ba4d14"}, + []int64{201, 0, 450}, + }, + { + "lastRePollBlock updated with burst when lagging behind", + 100, + 50000, + nil, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x1"), + }, + lastRePollBlock: 99, // Should be updated with burst + }, + }, + []ocr2keepers.UpkeepState{ocr2keepers.UnknownState}, + nil, + []logpoller.Log{ + { + BlockNumber: 2, + TxHash: common.HexToHash("0x111"), + LogIndex: 1, + BlockHash: common.HexToHash("0x2"), + }, + }, + nil, + nil, + []string{"c207451fa897f9bb13b09d54d8655edf0644e027c53521b4a92eafbb64ba4d14"}, + []int64{600}, + }, + { + "recovery starts at configUpdateBlock if higher than lastRePollBlock", + 100, + 5000, + nil, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + topics: []common.Hash{ + common.HexToHash("0x1"), + }, + lastRePollBlock: 100, + configUpdateBlock: 500, + }, + }, + []ocr2keepers.UpkeepState{ocr2keepers.UnknownState}, + nil, + []logpoller.Log{ + { + BlockNumber: 2, + TxHash: common.HexToHash("0x111"), + LogIndex: 1, + BlockHash: common.HexToHash("0x2"), + }, + }, + nil, + nil, + []string{"c207451fa897f9bb13b09d54d8655edf0644e027c53521b4a92eafbb64ba4d14"}, + []int64{700}, // should be configUpdateBlock + recoveryLogsBuffer + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lookbackBlocks := int64(100) + recoverer, filterStore, lp, statesReader := setupTestRecoverer(t, time.Millisecond*50, lookbackBlocks) + + filterStore.AddActiveUpkeeps(tc.active...) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: tc.latestBlock}, tc.latestBlockErr) + lp.On("LogsWithSigs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.logs, tc.logsErr) + statesReader.On("SelectByWorkIDs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.states, tc.statesErr) + + err := recoverer.recover(ctx) + if tc.recoverErr != nil { + require.Error(t, err) + return + } + require.NoError(t, err) + for i, active := range tc.active { + filters := filterStore.GetFilters(func(f upkeepFilter) bool { + return f.upkeepID.String() == active.upkeepID.String() + }) + require.Equal(t, 1, len(filters)) + require.Equal(t, tc.lastRePollBlocks[i], filters[0].lastRePollBlock) + } + + proposals, err := recoverer.GetRecoveryProposals(ctx) + require.NoError(t, err) + require.Equal(t, len(tc.proposalsWorkIDs), len(proposals)) + if len(proposals) > 0 { + sort.Slice(proposals, func(i, j int) bool { + return proposals[i].WorkID < proposals[j].WorkID + }) + } + for i := range proposals { + require.Equal(t, tc.proposalsWorkIDs[i], proposals[i].WorkID) + } + }) + } +} + +func TestLogRecoverer_SelectFilterBatch(t *testing.T) { + n := recoveryBatchSize*2 + 2 + filters := []upkeepFilter{} + for i := 0; i < n; i++ { + filters = append(filters, upkeepFilter{ + upkeepID: big.NewInt(int64(i)), + }) + } + recoverer, _, _, _ := setupTestRecoverer(t, time.Millisecond*50, int64(100)) + + batch := recoverer.selectFilterBatch(filters) + require.Equal(t, recoveryBatchSize, len(batch)) + + batch = recoverer.selectFilterBatch(filters[:recoveryBatchSize/2]) + require.Equal(t, recoveryBatchSize/2, len(batch)) +} + +func TestLogRecoverer_getFilterBatch(t *testing.T) { + tests := []struct { + name string + offsetBlock int64 + filters []upkeepFilter + want int + }{ + { + "empty", + 2, + []upkeepFilter{}, + 0, + }, + { + "filter out of range", + 100, + []upkeepFilter{ + { + upkeepID: big.NewInt(1), + addr: common.HexToAddress("0x1").Bytes(), + lastRePollBlock: 50, + }, + { + upkeepID: big.NewInt(2), + addr: common.HexToAddress("0x2").Bytes(), + lastRePollBlock: 50, + configUpdateBlock: 101, // out of range + }, + { + upkeepID: big.NewInt(3), + addr: common.HexToAddress("0x3").Bytes(), + configUpdateBlock: 99, + }, + }, + 2, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + recoverer, filterStore, _, _ := setupTestRecoverer(t, time.Millisecond*50, int64(100)) + filterStore.AddActiveUpkeeps(tc.filters...) + batch := recoverer.getFilterBatch(tc.offsetBlock) + require.Equal(t, tc.want, len(batch)) + }) + } +} + +func TestLogRecoverer_FilterFinalizedStates(t *testing.T) { + tests := []struct { + name string + logs []logpoller.Log + states []ocr2keepers.UpkeepState + want []logpoller.Log + }{ + { + "empty", + []logpoller.Log{}, + []ocr2keepers.UpkeepState{}, + []logpoller.Log{}, + }, + { + "happy flow", + []logpoller.Log{ + {LogIndex: 0}, {LogIndex: 2}, {LogIndex: 2}, + }, + []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.Performed, + ocr2keepers.Ineligible, + }, + []logpoller.Log{ + {LogIndex: 0}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + recoverer, _, _, _ := setupTestRecoverer(t, time.Millisecond*50, int64(100)) + state := recoverer.filterFinalizedStates(upkeepFilter{}, tc.logs, tc.states) + require.Equal(t, len(tc.want), len(state)) + for i := range state { + require.Equal(t, tc.want[i].LogIndex, state[i].LogIndex) + } + }) + } +} + +func TestLogRecoverer_GetProposalData(t *testing.T) { + for _, tc := range []struct { + name string + proposal ocr2keepers.CoordinatedBlockProposal + skipFilter bool + filterStore UpkeepFilterStore + logPoller logpoller.LogPoller + client client.Client + stateReader core.UpkeepStateReader + wantBytes []byte + expectErr bool + wantErr error + }{ + { + name: "passing an empty proposal with an empty upkeep ID returns an error", + proposal: ocr2keepers.CoordinatedBlockProposal{}, + expectErr: true, + wantErr: errors.New("not a log trigger upkeep ID"), + }, + { + name: "if a filter is not found for the upkeep ID, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + }, + skipFilter: true, + expectErr: true, + wantErr: errors.New("filter not found for upkeep 452312848583266388373324160190187140457511065560374322131410487042692349952"), + }, + { + name: "if an error is encountered fetching the latest block, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 0, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 0, errors.New("latest block boom") + }, + }, + expectErr: true, + wantErr: errors.New("latest block boom"), + }, + { + name: "if an error is encountered fetching the tx receipt, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 0, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 100, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + return errors.New("tx receipt boom") + }, + }, + expectErr: true, + wantErr: errors.New("tx receipt boom"), + }, + { + name: "if the tx block is nil, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 0, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 100, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + return nil + }, + }, + expectErr: true, + wantErr: errors.New("failed to get tx block"), + }, + { + name: "if a log trigger extension block number is 0, and the block number on the tx receipt is not recoverable, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 0, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 100, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(200) + return nil + }, + }, + expectErr: true, + wantErr: errors.New("log block is not recoverable"), + }, + { + name: "if a log block is not recoverable, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 200, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 100, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(200) + return nil + }, + }, + expectErr: true, + wantErr: errors.New("log block is not recoverable"), + }, + { + name: "if a log block has does not match, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 200, + BlockHash: common.HexToHash("0x2"), + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 100, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(200) + receipt.BlockHash = common.HexToHash("0x1") + return nil + }, + }, + expectErr: true, + wantErr: errors.New("log tx reorged"), + }, + { + name: "if a log block is recoverable, when the upkeep state reader errors, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 80, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return nil, errors.New("upkeep state boom") + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + return nil + }, + }, + expectErr: true, + wantErr: errors.New("upkeep state boom"), + }, + { + name: "if a log block is recoverable, when the upkeep state reader returns a non recoverable state, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 80, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.Ineligible, + }, nil + }, + }, + expectErr: true, + wantErr: errors.New("upkeep state is not recoverable"), + }, + { + name: "if a log block is recoverable, when the filter address is empty, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 80, + }, + }, + }, + filterStore: &mockFilterStore{ + HasFn: func(id *big.Int) bool { + return true + }, + RangeFiltersByIDsFn: func(iterator func(int, upkeepFilter), ids ...*big.Int) { + + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, nil + }, + }, + expectErr: true, + wantErr: errors.New("invalid filter found for upkeepID 452312848583266388373324160190187140457511065560374322131410487042692349952"), + }, + { + name: "if a log block is recoverable, when the log poller returns an error fetching logs, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 80, + }, + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return nil, errors.New("logs with sigs boom") + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, nil + }, + }, + expectErr: true, + wantErr: errors.New("could not read logs: logs with sigs boom"), + }, + { + name: "if a log block is recoverable, when logs cannot be found for an upkeep ID, an error is returned", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: ocr2keepers.Trigger{ + LogTriggerExtension: &ocr2keepers.LogTriggerExtension{ + BlockNumber: 80, + }, + }, + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 80, + }, + }, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, nil + }, + }, + expectErr: true, + wantErr: errors.New(`no log found for upkeepID 452312848583266388373324160190187140457511065560374322131410487042692349952 and trigger {"BlockNumber":0,"BlockHash":"0000000000000000000000000000000000000000000000000000000000000000","LogTriggerExtension":{"BlockHash":"0000000000000000000000000000000000000000000000000000000000000000","BlockNumber":80,"Index":0,"TxHash":"0000000000000000000000000000000000000000000000000000000000000000"}}`), + }, + { + name: "happy path with empty check data", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: func() ocr2keepers.Trigger { + t := ocr2keepers.NewTrigger( + ocr2keepers.BlockNumber(80), + [32]byte{1}, + ) + t.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{ + TxHash: [32]byte{2}, + Index: uint32(3), + BlockHash: [32]byte{1}, + BlockNumber: ocr2keepers.BlockNumber(80), + } + return t + }(), + WorkID: "7f775793422d178c90e99c3bbdf05181bc6bb6ce13170e87c92ac396bb7ddda0", + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 80, + BlockHash: [32]byte{1}, + TxHash: [32]byte{2}, + LogIndex: 3, + }, + }, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + receipt.BlockHash = [32]byte{1} + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, nil + }, + }, + wantBytes: []byte(nil), + }, + { + name: "happy path with check data", + proposal: ocr2keepers.CoordinatedBlockProposal{ + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "123"), + Trigger: func() ocr2keepers.Trigger { + t := ocr2keepers.NewTrigger( + ocr2keepers.BlockNumber(80), + [32]byte{1}, + ) + t.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{ + TxHash: [32]byte{2}, + Index: uint32(3), + BlockHash: [32]byte{1}, + BlockNumber: ocr2keepers.BlockNumber(80), + } + return t + }(), + WorkID: "7f775793422d178c90e99c3bbdf05181bc6bb6ce13170e87c92ac396bb7ddda0", + }, + logPoller: &mockLogPoller{ + LatestBlockFn: func(qopts ...pg.QOpt) (int64, error) { + return 300, nil + }, + LogsWithSigsFn: func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + EvmChainId: ubig.New(big.NewInt(1)), + LogIndex: 3, + BlockHash: [32]byte{1}, + BlockNumber: 80, + BlockTimestamp: time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC), + EventSig: common.HexToHash("abc"), + TxHash: [32]byte{2}, + Data: []byte{1, 2, 3}, + CreatedAt: time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC), + }, + }, nil + }, + }, + client: &mockClient{ + CallContextFn: func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error { + receipt.Status = 1 + receipt.BlockNumber = big.NewInt(80) + receipt.BlockHash = [32]byte{1} + return nil + }, + }, + stateReader: &mockStateReader{ + SelectByWorkIDsFn: func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, nil + }, + }, + wantBytes: []byte{1, 2, 3}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + recoverer, filterStore, _, _ := setupTestRecoverer(t, time.Second, 10) + + if !tc.skipFilter { + filterStore.AddActiveUpkeeps(upkeepFilter{ + addr: []byte("test"), + topics: []common.Hash{common.HexToHash("0x1"), common.HexToHash("0x2"), common.HexToHash("0x3"), common.HexToHash("0x4")}, + upkeepID: core.GenUpkeepID(types2.LogTrigger, "123").BigInt(), + }) + } + + if tc.filterStore != nil { + recoverer.filterStore = tc.filterStore + } + if tc.logPoller != nil { + recoverer.poller = tc.logPoller + } + if tc.client != nil { + recoverer.client = tc.client + } + if tc.stateReader != nil { + recoverer.states = tc.stateReader + } + + b, err := recoverer.GetProposalData(testutils.Context(t), tc.proposal) + if tc.expectErr { + assert.Error(t, err) + assert.Equal(t, tc.wantErr.Error(), err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.wantBytes, b) + } + }) + } +} + +func TestLogRecoverer_pending(t *testing.T) { + tests := []struct { + name string + maxPerUpkeep int + exist []ocr2keepers.UpkeepPayload + new []ocr2keepers.UpkeepPayload + errored []bool + want []ocr2keepers.UpkeepPayload + }{ + { + name: "empty", + maxPerUpkeep: 10, + exist: []ocr2keepers.UpkeepPayload{}, + new: []ocr2keepers.UpkeepPayload{}, + errored: []bool{}, + want: []ocr2keepers.UpkeepPayload{}, + }, + { + name: "add new and existing", + maxPerUpkeep: 10, + exist: []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + }, + new: []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + errored: []bool{false, false}, + want: []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "2")}, + }, + }, + { + name: "exceed limits for upkeep", + maxPerUpkeep: 3, + exist: []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "3", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + }, + new: []ocr2keepers.UpkeepPayload{ + {WorkID: "4", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + }, + errored: []bool{true}, + want: []ocr2keepers.UpkeepPayload{ + {WorkID: "1", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "2", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + {WorkID: "3", UpkeepID: core.GenUpkeepID(types2.LogTrigger, "1")}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + origMaxPendingPayloadsPerUpkeep := maxPendingPayloadsPerUpkeep + maxPendingPayloadsPerUpkeep = tc.maxPerUpkeep + defer func() { + maxPendingPayloadsPerUpkeep = origMaxPendingPayloadsPerUpkeep + }() + + r := NewLogRecoverer(logger.TestLogger(t), nil, nil, nil, nil, nil, NewOptions(200)) + r.lock.Lock() + r.pending = tc.exist + for i, p := range tc.new { + err := r.addPending(p) + if tc.errored[i] { + require.Error(t, err) + continue + } + require.NoError(t, err) + } + pending := r.pending + require.GreaterOrEqual(t, len(pending), len(tc.new)) + require.Equal(t, len(tc.want), len(pending)) + sort.Slice(pending, func(i, j int) bool { + return pending[i].WorkID < pending[j].WorkID + }) + for i := range pending { + require.Equal(t, tc.want[i].WorkID, pending[i].WorkID) + } + r.lock.Unlock() + for _, p := range tc.want { + r.removePending(p.WorkID) + } + r.lock.Lock() + defer r.lock.Unlock() + require.Equal(t, 0, len(r.pending)) + }) + } +} + +type mockFilterStore struct { + UpkeepFilterStore + HasFn func(id *big.Int) bool + RangeFiltersByIDsFn func(iterator func(int, upkeepFilter), ids ...*big.Int) +} + +func (s *mockFilterStore) RangeFiltersByIDs(iterator func(int, upkeepFilter), ids ...*big.Int) { + s.RangeFiltersByIDsFn(iterator, ids...) +} + +func (s *mockFilterStore) Has(id *big.Int) bool { + return s.HasFn(id) +} + +type mockLogPoller struct { + logpoller.LogPoller + LatestBlockFn func(qopts ...pg.QOpt) (int64, error) + LogsWithSigsFn func(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) +} + +func (p *mockLogPoller) LogsWithSigs(start, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return p.LogsWithSigsFn(start, end, eventSigs, address, qopts...) +} +func (p *mockLogPoller) LatestBlock(qopts ...pg.QOpt) (logpoller.LogPollerBlock, error) { + block, err := p.LatestBlockFn(qopts...) + return logpoller.LogPollerBlock{BlockNumber: block}, err +} + +type mockClient struct { + client.Client + CallContextFn func(ctx context.Context, receipt *types.Receipt, method string, args ...interface{}) error +} + +func (c *mockClient) CallContext(ctx context.Context, r interface{}, method string, args ...interface{}) error { + receipt := r.(*types.Receipt) + return c.CallContextFn(ctx, receipt, method, args) +} + +type mockStateReader struct { + SelectByWorkIDsFn func(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) +} + +func (r *mockStateReader) SelectByWorkIDs(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + return r.SelectByWorkIDsFn(ctx, workIDs...) +} + +func setupTestRecoverer(t *testing.T, interval time.Duration, lookbackBlocks int64) (*logRecoverer, UpkeepFilterStore, *lpmocks.LogPoller, *mocks.UpkeepStateReader) { + lp := new(lpmocks.LogPoller) + statesReader := new(mocks.UpkeepStateReader) + filterStore := NewUpkeepFilterStore() + opts := NewOptions(lookbackBlocks) + opts.ReadInterval = interval / 5 + opts.LookbackBlocks = lookbackBlocks + recoverer := NewLogRecoverer(logger.TestLogger(t), lp, nil, statesReader, &mockedPacker{}, filterStore, opts) + return recoverer, filterStore, lp, statesReader +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury.go new file mode 100644 index 00000000..6632da40 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury.go @@ -0,0 +1,180 @@ +package mercury + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "math/big" + "net/http" + "time" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/patrickmn/go-cache" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" +) + +const ( + FeedIDs = "feedIDs" // valid for v0.3 + FeedIdHex = "feedIdHex" // valid for v0.2 + BlockNumber = "blockNumber" // valid for v0.2 + Timestamp = "timestamp" // valid for v0.3 + totalFastPluginRetries = 5 + totalMediumPluginRetries = 10 +) + +var GenerateHMACFn = func(method string, path string, body []byte, clientId string, secret string, ts int64) string { + bodyHash := sha256.New() + bodyHash.Write(body) + hashString := fmt.Sprintf("%s %s %s %s %d", + method, + path, + hex.EncodeToString(bodyHash.Sum(nil)), + clientId, + ts) + signedMessage := hmac.New(sha256.New, []byte(secret)) + signedMessage.Write([]byte(hashString)) + userHmac := hex.EncodeToString(signedMessage.Sum(nil)) + return userHmac +} + +// CalculateRetryConfig returns plugin retry interval based on how many times plugin has retried this work +var CalculateRetryConfigFn = func(prk string, mercuryConfig MercuryConfigProvider) time.Duration { + var retryInterval time.Duration + var retries int + totalAttempts, ok := mercuryConfig.GetPluginRetry(prk) + if ok { + retries = totalAttempts.(int) + if retries < totalFastPluginRetries { + retryInterval = 1 * time.Second + } else if retries < totalMediumPluginRetries { + retryInterval = 5 * time.Second + } + // if the core node has retried totalMediumPluginRetries times, do not set retry interval and plugin will use + // the default interval + } else { + retryInterval = 1 * time.Second + } + mercuryConfig.SetPluginRetry(prk, retries+1, cache.DefaultExpiration) + return retryInterval +} + +type MercuryData struct { + Index int + Error error + Retryable bool + Bytes [][]byte + State encoding.PipelineExecutionState +} + +type MercuryConfigProvider interface { + Credentials() *types.MercuryCredentials + IsUpkeepAllowed(string) (interface{}, bool) + SetUpkeepAllowed(string, interface{}, time.Duration) + GetPluginRetry(string) (interface{}, bool) + SetPluginRetry(string, interface{}, time.Duration) +} + +type HttpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type MercuryClient interface { + DoRequest(ctx context.Context, streamsLookup *StreamsLookup, pluginRetryKey string) (encoding.PipelineExecutionState, encoding.UpkeepFailureReason, [][]byte, bool, time.Duration, error) +} + +type StreamsLookupError struct { + FeedParamKey string + Feeds []string + TimeParamKey string + Time *big.Int + ExtraData []byte +} + +type StreamsLookup struct { + *StreamsLookupError + UpkeepId *big.Int + Block uint64 +} + +func (l *StreamsLookup) IsMercuryV02() bool { + return l.FeedParamKey == FeedIdHex && l.TimeParamKey == BlockNumber +} + +func (l *StreamsLookup) IsMercuryV03() bool { + return l.FeedParamKey == FeedIDs +} + +// IsMercuryV03UsingBlockNumber is used to distinguish the batch path. It is used for Mercury V03 only +func (l *StreamsLookup) IsMercuryV03UsingBlockNumber() bool { + return l.TimeParamKey == BlockNumber +} + +type Packer interface { + UnpackCheckCallbackResult(callbackResp []byte) (encoding.PipelineExecutionState, bool, []byte, encoding.UpkeepFailureReason, *big.Int, error) + PackGetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) + UnpackGetUpkeepPrivilegeConfig(resp []byte) ([]byte, error) + DecodeStreamsLookupRequest(data []byte) (*StreamsLookupError, error) +} + +type abiPacker struct { + registryABI abi.ABI + streamsABI abi.ABI +} + +func NewAbiPacker() *abiPacker { + return &abiPacker{registryABI: core.RegistryABI, streamsABI: core.StreamsCompatibleABI} +} + +// DecodeStreamsLookupRequest decodes the revert error StreamsLookup(string feedParamKey, string[] feeds, string feedParamKey, uint256 time, byte[] extraData) +func (p *abiPacker) DecodeStreamsLookupRequest(data []byte) (*StreamsLookupError, error) { + e := p.streamsABI.Errors["StreamsLookup"] + unpack, err := e.Unpack(data) + if err != nil { + return nil, fmt.Errorf("unpack error: %w", err) + } + errorParameters := unpack.([]interface{}) + + return &StreamsLookupError{ + FeedParamKey: *abi.ConvertType(errorParameters[0], new(string)).(*string), + Feeds: *abi.ConvertType(errorParameters[1], new([]string)).(*[]string), + TimeParamKey: *abi.ConvertType(errorParameters[2], new(string)).(*string), + Time: *abi.ConvertType(errorParameters[3], new(*big.Int)).(**big.Int), + ExtraData: *abi.ConvertType(errorParameters[4], new([]byte)).(*[]byte), + }, nil +} + +func (p *abiPacker) UnpackCheckCallbackResult(callbackResp []byte) (encoding.PipelineExecutionState, bool, []byte, encoding.UpkeepFailureReason, *big.Int, error) { + out, err := p.registryABI.Methods["checkCallback"].Outputs.UnpackValues(callbackResp) + if err != nil { + return encoding.PackUnpackDecodeFailed, false, nil, 0, nil, fmt.Errorf("%w: unpack checkUpkeep return: %s", err, hexutil.Encode(callbackResp)) + } + + upkeepNeeded := *abi.ConvertType(out[0], new(bool)).(*bool) + rawPerformData := *abi.ConvertType(out[1], new([]byte)).(*[]byte) + failureReason := encoding.UpkeepFailureReason(*abi.ConvertType(out[2], new(uint8)).(*uint8)) + gasUsed := *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) + + return encoding.NoPipelineError, upkeepNeeded, rawPerformData, failureReason, gasUsed, nil +} + +func (p *abiPacker) UnpackGetUpkeepPrivilegeConfig(resp []byte) ([]byte, error) { + out, err := p.registryABI.Methods["getUpkeepPrivilegeConfig"].Outputs.UnpackValues(resp) + if err != nil { + return nil, fmt.Errorf("%w: unpack getUpkeepPrivilegeConfig return", err) + } + + bts := *abi.ConvertType(out[0], new([]byte)).(*[]byte) + + return bts, nil +} + +func (p *abiPacker) PackGetUpkeepPrivilegeConfig(upkeepId *big.Int) ([]byte, error) { + return p.registryABI.Pack("getUpkeepPrivilegeConfig", upkeepId) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury_test.go new file mode 100644 index 00000000..6556260f --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/mercury_test.go @@ -0,0 +1,242 @@ +package mercury + +import ( + "encoding/json" + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" +) + +func TestGenerateHMACFn(t *testing.T) { + testCases := []struct { + name string + method string + path string + body []byte + clientId string + secret string + ts int64 + expected string + }{ + { + name: "generate hmac function", + method: "GET", + path: "/example", + body: []byte(""), + clientId: "yourClientId", + secret: "yourSecret", + ts: 1234567890, + expected: "17b0bb6b14f7b48ef9d24f941ff8f33ad2d5e94ac343380be02c2f1ca32fdbd8", + }, + { + name: "generate hmac function with non-empty body", + method: "POST", + path: "/api", + body: []byte("request body"), + clientId: "anotherClientId", + secret: "anotherSecret", + ts: 1597534567, + expected: "d326c168c50c996e271d6b3b4c97944db01163994090f73fcf4fd42f23f06bbb", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := GenerateHMACFn(tc.method, tc.path, tc.body, tc.clientId, tc.secret, tc.ts) + + if result != tc.expected { + t.Errorf("Expected: %s, Got: %s", tc.expected, result) + } + }) + } +} + +func TestPacker_DecodeStreamsLookupRequest(t *testing.T) { + tests := []struct { + name string + data []byte + expected *StreamsLookupError + state uint8 + err error + }{ + { + name: "success - decode to streams lookup", + data: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002400000000000000000000000000000000000000000000000000000000002435eb50000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000000966656564496448657800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000423078343535343438326435353533343432643431353234323439353435323535346432643534343535333534346534353534303030303030303030303030303030300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000042307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + expected: &StreamsLookupError{ + FeedParamKey: "feedIdHex", + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: "blockNumber", + Time: big.NewInt(37969589), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + }, + { + name: "failure - unpack error", + data: []byte{1, 2, 3, 4}, + err: errors.New("unpack error: invalid identifier, have 0x01020304 want 0xf055e4a2"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + packer := NewAbiPacker() + fl, err := packer.DecodeStreamsLookupRequest(tt.data) + assert.Equal(t, tt.expected, fl) + if tt.err != nil { + assert.Equal(t, tt.err.Error(), err.Error()) + } + }) + } +} + +func TestPacker_UnpackGetUpkeepPrivilegeConfig(t *testing.T) { + tests := []struct { + name string + raw []byte + errored bool + }{ + { + name: "happy path", + raw: func() []byte { + b, _ := hexutil.Decode("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000177b226d657263757279456e61626c6564223a747275657d000000000000000000") + + return b + }(), + errored: false, + }, + { + name: "error empty config", + raw: func() []byte { + b, _ := hexutil.Decode("0x") + + return b + }(), + errored: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + packer := NewAbiPacker() + + b, err := packer.UnpackGetUpkeepPrivilegeConfig(test.raw) + + if !test.errored { + require.NoError(t, err, "should unpack bytes from abi encoded value") + + // the actual struct to unmarshal into is not available to this + // package so basic json encoding is the limit of the following test + var data map[string]interface{} + err = json.Unmarshal(b, &data) + + assert.NoError(t, err, "packed data should unmarshal using json encoding") + assert.Equal(t, []byte(`{"mercuryEnabled":true}`), b) + } else { + assert.NotNil(t, err, "error expected from unpack function") + } + }) + } +} + +func TestPacker_PackGetUpkeepPrivilegeConfig(t *testing.T) { + tests := []struct { + name string + upkeepId *big.Int + raw []byte + errored bool + }{ + { + name: "happy path", + upkeepId: func() *big.Int { + id, _ := new(big.Int).SetString("52236098515066839510538748191966098678939830769967377496848891145101407612976", 10) + + return id + }(), + raw: func() []byte { + b, _ := hexutil.Decode("0x19d97a94737c9583000000000000000000000001ea8ed6d0617dd5b3b87374020efaf030") + + return b + }(), + errored: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + packer := NewAbiPacker() + + b, err := packer.PackGetUpkeepPrivilegeConfig(test.upkeepId) + + if !test.errored { + require.NoError(t, err, "no error expected from packing") + + assert.Equal(t, test.raw, b, "raw bytes for output should match expected") + } else { + assert.NotNil(t, err, "error expected from packing function") + } + }) + } +} + +func TestPacker_UnpackCheckCallbackResult(t *testing.T) { + tests := []struct { + Name string + CallbackResp []byte + UpkeepNeeded bool + PerformData []byte + FailureReason encoding.UpkeepFailureReason + GasUsed *big.Int + ErrorString string + State encoding.PipelineExecutionState + }{ + { + Name: "unpack upkeep needed", + CallbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 10, 11, 21, 31, 41, 15, 16, 17, 18, 19, 13, 14, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 120, 111, 101, 122, 90, 54, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + UpkeepNeeded: true, + PerformData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 10, 11, 21, 31, 41, 15, 16, 17, 18, 19, 13, 14, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 120, 111, 101, 122, 90, 54, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + FailureReason: encoding.UpkeepFailureReasonNone, + GasUsed: big.NewInt(11796), + }, + { + Name: "unpack upkeep not needed", + CallbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 208, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 10, 11, 21, 31, 41, 15, 16, 17, 18, 19, 13, 14, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 120, 111, 101, 122, 90, 54, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + UpkeepNeeded: false, + PerformData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 10, 11, 21, 31, 41, 15, 16, 17, 18, 19, 13, 14, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 120, 111, 101, 122, 90, 54, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + FailureReason: encoding.UpkeepFailureReasonUpkeepNotNeeded, + GasUsed: big.NewInt(13008), + }, + { + Name: "unpack malformed data", + CallbackResp: []byte{0, 0, 0, 23, 4, 163, 66, 91, 228, 102, 200, 84, 144, 233, 218, 44, 168, 192, 191, 253, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + UpkeepNeeded: false, + PerformData: nil, + ErrorString: "abi: improperly encoded boolean value: unpack checkUpkeep return: ", + State: encoding.PackUnpackDecodeFailed, + }, + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + packer := NewAbiPacker() + + state, needed, pd, failureReason, gasUsed, err := packer.UnpackCheckCallbackResult(test.CallbackResp) + + if test.ErrorString != "" { + assert.EqualError(t, err, test.ErrorString+hexutil.Encode(test.CallbackResp)) + } else { + assert.Nil(t, err) + } + assert.Equal(t, test.UpkeepNeeded, needed) + assert.Equal(t, test.PerformData, pd) + assert.Equal(t, test.FailureReason, failureReason) + assert.Equal(t, test.GasUsed, gasUsed) + assert.Equal(t, test.State, state) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go new file mode 100644 index 00000000..7a8a9bde --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go @@ -0,0 +1,337 @@ +package streams + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "net/http" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/patrickmn/go-cache" + + "github.com/goplugin/plugin-common/pkg/services" + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + v02 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02" + v03 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Lookup interface { + Lookup(ctx context.Context, checkResults []ocr2keepers.CheckResult) []ocr2keepers.CheckResult +} + +type latestBlockProvider interface { + LatestBlock() *ocr2keepers.BlockKey +} + +type streamRegistry interface { + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) + Address() common.Address +} + +type contextCaller interface { + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error +} + +type streams struct { + services.StateMachine + packer mercury.Packer + mercuryConfig mercury.MercuryConfigProvider + abi abi.ABI + blockSubscriber latestBlockProvider + registry streamRegistry + client contextCaller + lggr logger.Logger + threadCtrl utils.ThreadControl + v02Client mercury.MercuryClient + v03Client mercury.MercuryClient +} + +// UpkeepPrivilegeConfig represents the administrative offchain config for each upkeep. It can be set by s_upkeepPrivilegeManager +// role on the registry. Upkeeps allowed to use Mercury server will have this set to true. +type UpkeepPrivilegeConfig struct { + MercuryEnabled bool `json:"mercuryEnabled"` +} + +func NewStreamsLookup( + mercuryConfig mercury.MercuryConfigProvider, + blockSubscriber latestBlockProvider, + client contextCaller, + registry streamRegistry, + lggr logger.Logger) *streams { + httpClient := http.DefaultClient + threadCtrl := utils.NewThreadControl() + packer := mercury.NewAbiPacker() + + return &streams{ + packer: packer, + mercuryConfig: mercuryConfig, + abi: core.RegistryABI, + blockSubscriber: blockSubscriber, + registry: registry, + client: client, + lggr: lggr, + threadCtrl: threadCtrl, + v02Client: v02.NewClient(mercuryConfig, httpClient, threadCtrl, lggr), + v03Client: v03.NewClient(mercuryConfig, httpClient, threadCtrl, lggr), + } +} + +// Lookup looks through check upkeep results looking for any that need off chain lookup +func (s *streams) Lookup(ctx context.Context, checkResults []ocr2keepers.CheckResult) []ocr2keepers.CheckResult { + lookups := map[int]*mercury.StreamsLookup{} + for i, checkResult := range checkResults { + s.buildResult(ctx, i, checkResult, checkResults, lookups) + } + + var wg sync.WaitGroup + for i, lookup := range lookups { + wg.Add(1) + func(i int, lookup *mercury.StreamsLookup) { + s.threadCtrl.Go(func(ctx context.Context) { + s.doLookup(ctx, &wg, lookup, i, checkResults) + }) + }(i, lookup) + } + wg.Wait() + + // don't surface error to plugin bc StreamsLookup process should be self-contained. + return checkResults +} + +// buildResult checks if the upkeep is allowed by Mercury and builds a streams lookup request from the check result +func (s *streams) buildResult(ctx context.Context, i int, checkResult ocr2keepers.CheckResult, checkResults []ocr2keepers.CheckResult, lookups map[int]*mercury.StreamsLookup) { + lookupLggr := s.lggr.With("where", "StreamsLookup") + if checkResult.IneligibilityReason != uint8(encoding.UpkeepFailureReasonTargetCheckReverted) { + // Streams Lookup only works when upkeep target check reverts + return + } + + block := big.NewInt(int64(checkResult.Trigger.BlockNumber)) + upkeepId := checkResult.UpkeepID + + // Try to decode the revert error into streams lookup format. User upkeeps can revert with any reason, see if they + // tried to call mercury + lookupLggr.Infof("at block %d upkeep %s trying to DecodeStreamsLookupRequest performData=%s", block, upkeepId, hexutil.Encode(checkResults[i].PerformData)) + streamsLookupErr, err := s.packer.DecodeStreamsLookupRequest(checkResult.PerformData) + if err != nil { + lookupLggr.Debugf("at block %d upkeep %s DecodeStreamsLookupRequest failed: %v", block, upkeepId, err) + // user contract did not revert with StreamsLookup error + return + } + streamsLookupResponse := &mercury.StreamsLookup{StreamsLookupError: streamsLookupErr} + if s.mercuryConfig.Credentials() == nil { + lookupLggr.Errorf("at block %d upkeep %s tries to access mercury server but mercury credential is not configured", block, upkeepId) + return + } + + if len(streamsLookupResponse.Feeds) == 0 { + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonInvalidRevertDataInput) + lookupLggr.Debugf("at block %s upkeep %s has empty feeds array", block, upkeepId) + return + } + + // mercury permission checking for v0.3 is done by mercury server, so no need to check here + if streamsLookupResponse.IsMercuryV02() { + // check permission on the registry for mercury v0.2 + opts := s.buildCallOpts(ctx, block) + if state, reason, retryable, allowed, err := s.AllowedToUseMercury(opts, upkeepId.BigInt()); err != nil { + lookupLggr.Warnf("at block %s upkeep %s failed to query mercury allow list: %s", block, upkeepId, err) + checkResults[i].PipelineExecutionState = uint8(state) + checkResults[i].IneligibilityReason = uint8(reason) + checkResults[i].Retryable = retryable + return + } else if !allowed { + lookupLggr.Debugf("at block %d upkeep %s NOT allowed to query Mercury server", block, upkeepId) + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonMercuryAccessNotAllowed) + return + } + } else if !streamsLookupResponse.IsMercuryV03() { + // if mercury version is not v02 or v03, set failure reason + lookupLggr.Debugf("at block %d upkeep %s NOT allowed to query Mercury server", block, upkeepId) + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonInvalidRevertDataInput) + return + } + + streamsLookupResponse.UpkeepId = upkeepId.BigInt() + // the block here is exclusively used to call checkCallback at this block, not to be confused with the block number + // in the revert for mercury v0.2, which is denoted by time in the struct bc starting from v0.3, only timestamp will be supported + streamsLookupResponse.Block = uint64(block.Int64()) + lookupLggr.Infof("at block %d upkeep %s DecodeStreamsLookupRequest feedKey=%s timeKey=%s feeds=%v time=%s extraData=%s", block, upkeepId, streamsLookupResponse.FeedParamKey, streamsLookupResponse.TimeParamKey, streamsLookupResponse.Feeds, streamsLookupResponse.Time, hexutil.Encode(streamsLookupResponse.ExtraData)) + lookups[i] = streamsLookupResponse +} + +func (s *streams) doLookup(ctx context.Context, wg *sync.WaitGroup, lookup *mercury.StreamsLookup, i int, checkResults []ocr2keepers.CheckResult) { + defer wg.Done() + + values, err := s.DoMercuryRequest(ctx, lookup, checkResults, i) + if err != nil { + s.lggr.Errorf("at block %d upkeep %s requested time %s DoMercuryRequest err: %s", lookup.Block, lookup.UpkeepId, lookup.Time, err.Error()) + } + + if err := s.CheckCallback(ctx, values, lookup, checkResults, i); err != nil { + s.lggr.Errorf("at block %d upkeep %s requested time %s CheckCallback err: %s", lookup.Block, lookup.UpkeepId, lookup.Time, err.Error()) + } +} + +func (s *streams) CheckCallback(ctx context.Context, values [][]byte, lookup *mercury.StreamsLookup, checkResults []ocr2keepers.CheckResult, i int) error { + payload, err := s.abi.Pack("checkCallback", lookup.UpkeepId, values, lookup.ExtraData) + if err != nil { + checkResults[i].Retryable = false + checkResults[i].PipelineExecutionState = uint8(encoding.PackUnpackDecodeFailed) + return err + } + + var mercuryBytes hexutil.Bytes + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + + // call checkCallback function at the block which OCR3 has agreed upon + if err = s.client.CallContext(ctx, &mercuryBytes, "eth_call", args, hexutil.EncodeUint64(lookup.Block)); err != nil { + checkResults[i].Retryable = true + checkResults[i].PipelineExecutionState = uint8(encoding.RpcFlakyFailure) + return err + } + + s.lggr.Infof("at block %d upkeep %s requested time %s checkCallback mercuryBytes: %s", lookup.Block, lookup.UpkeepId, lookup.Time, hexutil.Encode(mercuryBytes)) + + unpackCallBackState, needed, performData, failureReason, _, err := s.packer.UnpackCheckCallbackResult(mercuryBytes) + if err != nil { + checkResults[i].PipelineExecutionState = uint8(unpackCallBackState) + return err + } + + if failureReason == encoding.UpkeepFailureReasonMercuryCallbackReverted { + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonMercuryCallbackReverted) + s.lggr.Debugf("at block %d upkeep %s requested time %s mercury callback reverts", lookup.Block, lookup.UpkeepId, lookup.Time) + return nil + } + + if !needed { + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonUpkeepNotNeeded) + s.lggr.Debugf("at block %d upkeep %s requested time %s callback reports upkeep not needed", lookup.Block, lookup.UpkeepId, lookup.Time) + return nil + } + + checkResults[i].IneligibilityReason = uint8(encoding.UpkeepFailureReasonNone) + checkResults[i].Eligible = true + checkResults[i].PerformData = performData + s.lggr.Infof("at block %d upkeep %s requested time %s CheckCallback successful with perform data: %s", lookup.Block, lookup.UpkeepId, lookup.Time, hexutil.Encode(performData)) + + return nil +} + +func (s *streams) DoMercuryRequest(ctx context.Context, lookup *mercury.StreamsLookup, checkResults []ocr2keepers.CheckResult, i int) ([][]byte, error) { + state, reason, values, retryable, retryInterval, err := encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0*time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", lookup.FeedParamKey, lookup.TimeParamKey, lookup.Feeds) + pluginRetryKey := generatePluginRetryKey(checkResults[i].WorkID, lookup.Block) + + if lookup.IsMercuryV02() { + state, reason, values, retryable, retryInterval, err = s.v02Client.DoRequest(ctx, lookup, pluginRetryKey) + } else if lookup.IsMercuryV03() { + state, reason, values, retryable, retryInterval, err = s.v03Client.DoRequest(ctx, lookup, pluginRetryKey) + } + + if err != nil { + checkResults[i].Retryable = retryable + checkResults[i].RetryInterval = retryInterval + checkResults[i].PipelineExecutionState = uint8(state) + checkResults[i].IneligibilityReason = uint8(reason) + return nil, err + } + + for j, v := range values { + s.lggr.Infof("at block %d upkeep %s requested time %s doMercuryRequest values[%d]: %s", lookup.Block, lookup.UpkeepId, lookup.Time, j, hexutil.Encode(v)) + } + return values, nil +} + +// AllowedToUseMercury retrieves upkeep's administrative offchain config and decode a mercuryEnabled bool to indicate if +// this upkeep is allowed to use Mercury service. +func (s *streams) AllowedToUseMercury(opts *bind.CallOpts, upkeepId *big.Int) (state encoding.PipelineExecutionState, reason encoding.UpkeepFailureReason, retryable bool, allow bool, err error) { + allowed, ok := s.mercuryConfig.IsUpkeepAllowed(upkeepId.String()) + if ok { + return encoding.NoPipelineError, encoding.UpkeepFailureReasonNone, false, allowed.(bool), nil + } + + payload, err := s.packer.PackGetUpkeepPrivilegeConfig(upkeepId) + if err != nil { + // pack error, no retryable + s.lggr.Warnf("failed to pack getUpkeepPrivilegeConfig data for upkeepId %s: %s", upkeepId, err) + + return encoding.PackUnpackDecodeFailed, encoding.UpkeepFailureReasonNone, false, false, fmt.Errorf("failed to pack upkeepId: %w", err) + } + + var resultBytes hexutil.Bytes + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + + if err = s.client.CallContext(opts.Context, &resultBytes, "eth_call", args, hexutil.EncodeBig(opts.BlockNumber)); err != nil { + return encoding.RpcFlakyFailure, encoding.UpkeepFailureReasonNone, true, false, fmt.Errorf("failed to get upkeep privilege config: %v", err) + } + + var upkeepPrivilegeConfigBytes []byte + upkeepPrivilegeConfigBytes, err = s.packer.UnpackGetUpkeepPrivilegeConfig(resultBytes) + + if err != nil { + return encoding.PackUnpackDecodeFailed, encoding.UpkeepFailureReasonNone, false, false, fmt.Errorf("failed to get upkeep privilege config: %v", err) + } + + if len(upkeepPrivilegeConfigBytes) == 0 { + s.mercuryConfig.SetUpkeepAllowed(upkeepId.String(), false, cache.DefaultExpiration) + return encoding.NoPipelineError, encoding.UpkeepFailureReasonMercuryAccessNotAllowed, false, false, fmt.Errorf("upkeep privilege config is empty") + } + + var privilegeConfig UpkeepPrivilegeConfig + if err = json.Unmarshal(upkeepPrivilegeConfigBytes, &privilegeConfig); err != nil { + return encoding.MercuryUnmarshalError, encoding.UpkeepFailureReasonNone, false, false, fmt.Errorf("failed to unmarshal privilege config: %v", err) + } + + s.mercuryConfig.SetUpkeepAllowed(upkeepId.String(), privilegeConfig.MercuryEnabled, cache.DefaultExpiration) + + return encoding.NoPipelineError, encoding.UpkeepFailureReasonNone, false, privilegeConfig.MercuryEnabled, nil +} + +func (s *streams) buildCallOpts(ctx context.Context, block *big.Int) *bind.CallOpts { + opts := bind.CallOpts{ + Context: ctx, + } + + if block == nil || block.Int64() == 0 { + if latestBlock := s.blockSubscriber.LatestBlock(); latestBlock != nil && latestBlock.Number != 0 { + opts.BlockNumber = big.NewInt(int64(latestBlock.Number)) + } + } else { + opts.BlockNumber = block + } + + return &opts +} + +// generatePluginRetryKey returns a plugin retry cache key +func generatePluginRetryKey(workID string, block uint64) string { + return workID + "|" + fmt.Sprintf("%d", block) +} + +func (s *streams) Close() error { + return s.StopOnce("streams_lookup", func() error { + s.threadCtrl.Close() + return nil + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go new file mode 100644 index 00000000..ccf22fd0 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go @@ -0,0 +1,822 @@ +package streams + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "strings" + "testing" + "time" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + v02 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02" + v03 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03" +) + +type MockMercuryConfigProvider struct { + mock.Mock +} + +func (m *MockMercuryConfigProvider) Credentials() *types.MercuryCredentials { + mc := &types.MercuryCredentials{ + LegacyURL: "https://google.old.com", + URL: "https://google.com", + Username: "FakeClientID", + Password: "FakeClientKey", + } + return mc +} + +func (m *MockMercuryConfigProvider) IsUpkeepAllowed(s string) (interface{}, bool) { + args := m.Called(s) + return args.Get(0), args.Bool(1) +} + +func (m *MockMercuryConfigProvider) SetUpkeepAllowed(s string, i interface{}, d time.Duration) { + m.Called(s, i, d) +} + +func (m *MockMercuryConfigProvider) GetPluginRetry(s string) (interface{}, bool) { + args := m.Called(s) + return args.Get(0), args.Bool(1) +} + +func (m *MockMercuryConfigProvider) SetPluginRetry(s string, i interface{}, d time.Duration) { + m.Called(s, i, d) +} + +type MockBlockSubscriber struct { + mock.Mock +} + +func (b *MockBlockSubscriber) LatestBlock() *ocr2keepers.BlockKey { + return nil +} + +type MockHttpClient struct { + mock.Mock +} + +func (mock *MockHttpClient) Do(req *http.Request) (*http.Response, error) { + args := mock.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +type mockRegistry struct { + GetUpkeepPrivilegeConfigFn func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + CheckCallbackFn func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) +} + +func (r *mockRegistry) Address() common.Address { + return common.HexToAddress("0x6cA639822c6C241Fa9A7A6b5032F6F7F1C513CAD") +} + +func (r *mockRegistry) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return r.GetUpkeepPrivilegeConfigFn(opts, upkeepId) +} + +func (r *mockRegistry) CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return r.CheckCallbackFn(opts, id, values, extraData) +} + +// setups up a streams object for tests. +func setupStreams(t *testing.T) *streams { + lggr := logger.TestLogger(t) + mercuryConfig := new(MockMercuryConfigProvider) + blockSubscriber := new(MockBlockSubscriber) + registry := &mockRegistry{} + client := evmClientMocks.NewClient(t) + + streams := NewStreamsLookup( + mercuryConfig, + blockSubscriber, + client, + registry, + lggr, + ) + return streams +} + +func TestStreams_CheckCallback(t *testing.T) { + upkeepId := big.NewInt(123456789) + bn := uint64(999) + bs := []byte{183, 114, 215, 10, 0, 0, 0, 0, 0, 0} + values := [][]byte{bs} + tests := []struct { + name string + lookup *mercury.StreamsLookup + input []ocr2keepers.CheckResult + values [][]byte + statusCode int + + callbackResp []byte + callbackErr error + + upkeepNeeded bool + performData []byte + wantErr assert.ErrorAssertionFunc + + state encoding.PipelineExecutionState + retryable bool + registry streamRegistry + }{ + { + name: "success - empty extra data", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"ETD-USD", "BTC-ETH"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(100), + ExtraData: []byte{48, 120, 48, 48}, + }, + UpkeepId: upkeepId, + Block: bn, + }, + input: []ocr2keepers.CheckResult{ + {}, + }, + values: values, + statusCode: http.StatusOK, + callbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 48, 120, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + upkeepNeeded: true, + performData: []byte{48, 120, 48, 48}, + wantErr: assert.NoError, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: []byte{48, 120, 48, 48}, + }, nil + }, + }, + }, + { + name: "success - with extra data", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(18952430), + // this is the address of precompile contract ArbSys(0x0000000000000000000000000000000000000064) + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + Block: bn, + }, + input: []ocr2keepers.CheckResult{ + {}, + }, + values: values, + statusCode: http.StatusOK, + callbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + upkeepNeeded: true, + performData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + wantErr: assert.NoError, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, nil + }, + }, + }, + { + name: "failure - bad response", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"ETD-USD", "BTC-ETH"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(100), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 48, 120, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + UpkeepId: upkeepId, + Block: bn, + }, + input: []ocr2keepers.CheckResult{ + {}, + }, + values: values, + statusCode: http.StatusOK, + callbackResp: []byte{}, + callbackErr: errors.New("bad response"), + wantErr: assert.Error, + state: encoding.RpcFlakyFailure, + retryable: true, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, errors.New("bad response") + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := setupStreams(t) + defer r.Close() + r.registry = tt.registry + + client := new(evmClientMocks.Client) + s := setupStreams(t) + payload, err := s.abi.Pack("checkCallback", tt.lookup.UpkeepId, values, tt.lookup.ExtraData) + require.Nil(t, err) + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + client.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", args, hexutil.EncodeUint64(tt.lookup.Block)).Return(tt.callbackErr). + Run(func(args mock.Arguments) { + by := args.Get(1).(*hexutil.Bytes) + *by = tt.callbackResp + }).Once() + s.client = client + + err = s.CheckCallback(testutils.Context(t), tt.values, tt.lookup, tt.input, 0) + tt.wantErr(t, err, fmt.Sprintf("Error assertion failed: %v", tt.name)) + assert.Equal(t, uint8(tt.state), tt.input[0].PipelineExecutionState) + assert.Equal(t, tt.retryable, tt.input[0].Retryable) + }) + } +} + +func TestStreams_AllowedToUseMercury(t *testing.T) { + upkeepId, ok := new(big.Int).SetString("71022726777042968814359024671382968091267501884371696415772139504780367423725", 10) + assert.True(t, ok, t.Name()) + tests := []struct { + name string + cached bool + allowed bool + ethCallErr error + err error + state encoding.PipelineExecutionState + reason encoding.UpkeepFailureReason + registry streamRegistry + retryable bool + config []byte + }{ + { + name: "success - allowed via cache", + cached: true, + allowed: true, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "success - allowed via fetching privilege config", + allowed: true, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "success - not allowed via cache", + cached: true, + allowed: false, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "success - not allowed via fetching privilege config", + allowed: false, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":false}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "failure - cannot unmarshal privilege config", + err: fmt.Errorf("failed to unmarshal privilege config: invalid character '\\x00' looking for beginning of value"), + state: encoding.MercuryUnmarshalError, + config: []byte{0, 1}, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "failure - flaky RPC", + retryable: true, + err: fmt.Errorf("failed to get upkeep privilege config: flaky RPC"), + state: encoding.RpcFlakyFailure, + ethCallErr: fmt.Errorf("flaky RPC"), + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, errors.New("flaky RPC") + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + { + name: "failure - empty upkeep privilege config", + err: fmt.Errorf("upkeep privilege config is empty"), + reason: encoding.UpkeepFailureReasonMercuryAccessNotAllowed, + config: []byte{}, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(``), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{}, nil + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupStreams(t) + defer s.Close() + s.registry = tt.registry + + client := new(evmClientMocks.Client) + s.client = client + + mc := new(MockMercuryConfigProvider) + mc.On("IsUpkeepAllowed", mock.Anything).Return(tt.allowed, tt.cached).Once() + mc.On("SetUpkeepAllowed", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.mercuryConfig = mc + + if !tt.cached { + if tt.err != nil { + bContractCfg, err := s.abi.Methods["getUpkeepPrivilegeConfig"].Outputs.PackValues([]interface{}{tt.config}) + require.Nil(t, err) + + payload, err := s.abi.Pack("getUpkeepPrivilegeConfig", upkeepId) + require.Nil(t, err) + + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + + client.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", args, mock.AnythingOfType("string")). + Return(tt.ethCallErr). + Run(func(args mock.Arguments) { + b := args.Get(1).(*hexutil.Bytes) + *b = bContractCfg + }).Once() + } else { + cfg := UpkeepPrivilegeConfig{MercuryEnabled: tt.allowed} + bCfg, err := json.Marshal(cfg) + require.Nil(t, err) + + bContractCfg, err := s.abi.Methods["getUpkeepPrivilegeConfig"].Outputs.PackValues([]interface{}{bCfg}) + require.Nil(t, err) + + payload, err := s.abi.Pack("getUpkeepPrivilegeConfig", upkeepId) + require.Nil(t, err) + + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + + client.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", args, mock.AnythingOfType("string")).Return(nil). + Run(func(args mock.Arguments) { + b := args.Get(1).(*hexutil.Bytes) + *b = bContractCfg + }).Once() + } + } + + opts := &bind.CallOpts{ + BlockNumber: big.NewInt(10), + } + + state, reason, retryable, allowed, err := s.AllowedToUseMercury(opts, upkeepId) + assert.Equal(t, tt.err, err) + assert.Equal(t, tt.allowed, allowed) + assert.Equal(t, tt.state, state) + assert.Equal(t, tt.reason, reason) + assert.Equal(t, tt.retryable, retryable) + }) + } +} + +func TestStreams_StreamsLookup(t *testing.T) { + upkeepId, ok := new(big.Int).SetString("71022726777042968814359024671382968091267501884371696415772139504780367423725", 10) + var upkeepIdentifier [32]byte + copy(upkeepIdentifier[:], upkeepId.Bytes()) + assert.True(t, ok, t.Name()) + blockNum := ocr2keepers.BlockNumber(37974374) + tests := []struct { + name string + input []ocr2keepers.CheckResult + blobs map[string]string + callbackResp []byte + expectedResults []ocr2keepers.CheckResult + callbackNeeded bool + extraData []byte + checkCallbackResp []byte + values [][]byte + cachedAdminCfg bool + hasError bool + hasPermission bool + v3 bool + registry streamRegistry + }{ + { + name: "success - happy path no cache", + input: []ocr2keepers.CheckResult{ + { + PerformData: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000000966656564496448657800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000423078343535343438326435353533343432643431353234323439353435323535346432643534343535333534346534353534303030303030303030303030303030300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000042307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + blobs: map[string]string{ + "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000": "0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3", + "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000": "0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d", + }, + cachedAdminCfg: false, + extraData: hexutil.MustDecode("0x0000000000000000000000000000000000000064"), + callbackNeeded: true, + checkCallbackResp: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063a400000000000000000000000000000000000000000000000000000000000006e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + values: [][]byte{hexutil.MustDecode("0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3"), hexutil.MustDecode("0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d")}, + expectedResults: []ocr2keepers.CheckResult{ + { + Eligible: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonNone), + }, + }, + hasPermission: true, + v3: false, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + }, nil + }, + }, + }, + { + name: "two CheckResults: Mercury success E2E and No Mercury because of insufficient balance", + input: []ocr2keepers.CheckResult{ + { + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonInsufficientBalance), + }, + { + PerformData: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000000966656564496448657800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000423078343535343438326435353533343432643431353234323439353435323535346432643534343535333534346534353534303030303030303030303030303030300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000042307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + blobs: map[string]string{ + "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000": "0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3", + "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000": "0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d", + }, + cachedAdminCfg: false, + extraData: hexutil.MustDecode("0x0000000000000000000000000000000000000064"), + callbackNeeded: true, + checkCallbackResp: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063a400000000000000000000000000000000000000000000000000000000000006e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + values: [][]byte{hexutil.MustDecode("0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3"), hexutil.MustDecode("0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d")}, + expectedResults: []ocr2keepers.CheckResult{ + { + Eligible: false, + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonInsufficientBalance), + }, + { + Eligible: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonNone), + }, + }, + hasPermission: true, + v3: false, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + }, nil + }, + }, + }, + { + name: "success - happy path no cache - v0.3", + input: []ocr2keepers.CheckResult{ + { + PerformData: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000000766656564494473000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000423078343535343438326435353533343432643431353234323439353435323535346432643534343535333534346534353534303030303030303030303030303030300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000042307834323534343332643535353334343264343135323432343935343532353534643264353434353533353434653435353430303030303030303030303030303030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + blobs: map[string]string{ + "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000": "0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3", + "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000": "0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d", + }, + cachedAdminCfg: false, + extraData: hexutil.MustDecode("0x0000000000000000000000000000000000000064"), + callbackNeeded: true, + checkCallbackResp: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000063a400000000000000000000000000000000000000000000000000000000000006e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + values: [][]byte{hexutil.MustDecode("0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa3"), hexutil.MustDecode("0x0006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d")}, + expectedResults: []ocr2keepers.CheckResult{ + { + Eligible: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonNone), + }, + }, + hasPermission: true, + v3: true, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000002e000066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000004555638000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000000269ecbb83b000000000000000000000000000000000000000000000000000000269e4a4e14000000000000000000000000000000000000000000000000000000269f4d0edb000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002381e91cffa9502c20de1ddcee350db3f715a5ab449448e3184a5b03c682356c6e2115f20663b3731e373cf33465a96da26f2876debb548f281e62e48f64c374200000000000000000000000000000000000000000000000000000000000000027db99e34135098d4e0bb9ae143ec9cd72fd63150c6d0cc5b38f4aa1aa42408377e8fe8e5ac489c9b7f62ff5aa7b05d2e892e7dee4cac631097247969b3b03fa300000000000000000000000000000000000000000000000000000000000002e00006da4a86c4933dd4a87b21dd2871aea29f706bcde43c70039355ac5b664fb5000000000000000000000000000000000000000000000000000000000454d118000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204254432d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064f0d4a0000000000000000000000000000000000000000000000000000002645f00877a000000000000000000000000000000000000000000000000000002645e1e1010000000000000000000000000000000000000000000000000000002645fe2fee4000000000000000000000000000000000000000000000000000000000243716664b42d20423a47fb13ad3098b49b37f667548e6745fff958b663afe25a845f6100000000000000000000000000000000000000000000000000000000024371660000000000000000000000000000000000000000000000000000000064f0d4a00000000000000000000000000000000000000000000000000000000000000002a0373c0bce7393673f819eb9681cac2773c2d718ce933eb858252195b17a9c832d7b0bee173c02c3c25fb65912b8b13b9302ede8423bab3544cb7a8928d5eb3600000000000000000000000000000000000000000000000000000000000000027d7b79d7646383a5dbf51edf14d53bd3ad0a9f3ca8affab3165e89d3ddce9cb17b58e892fafe4ecb24d2fde07c6a756029e752a5114c33c173df4e7d309adb4d00000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000064000000000000000000000000"), + }, nil + }, + }, + }, + { + name: "skip - failure reason is insufficient balance", + input: []ocr2keepers.CheckResult{ + { + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonInsufficientBalance), + }, + }, + expectedResults: []ocr2keepers.CheckResult{ + { + Eligible: false, + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonInsufficientBalance), + }, + }, + hasError: true, + registry: &mockRegistry{ + GetUpkeepPrivilegeConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return []byte(`{"mercuryEnabled":true}`), nil + }, + CheckCallbackFn: func(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) { + return iregistry21.CheckCallback{ + UpkeepNeeded: true, + PerformData: []byte{}, + }, nil + }, + }, + }, + { + name: "skip - invalid revert data", + input: []ocr2keepers.CheckResult{ + { + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + expectedResults: []ocr2keepers.CheckResult{ + { + Eligible: false, + PerformData: []byte{}, + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: 26046145, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + hasError: true, + }, + { + name: "failure - invalid mercury version", + input: []ocr2keepers.CheckResult{ + { + // This Perform data contains invalid FeedParamKey: {feedIdHex:RandomString [ETD-USD BTC-ETH] blockNumber 100 [48 120 48 48]} + PerformData: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000166665656449644865783a52616e646f6d537472696e670000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000074554442d5553440000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000074254432d45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043078303000000000000000000000000000000000000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonTargetCheckReverted), + }, + }, + expectedResults: []ocr2keepers.CheckResult{ + { + PerformData: hexutil.MustDecode("0xf055e4a200000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001c00000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000166665656449644865783a52616e646f6d537472696e670000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000074554442d5553440000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000074254432d45544800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b626c6f636b4e756d62657200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000043078303000000000000000000000000000000000000000000000000000000000"), + UpkeepID: upkeepIdentifier, + Trigger: ocr2keepers.Trigger{ + BlockNumber: blockNum, + }, + IneligibilityReason: uint8(encoding.UpkeepFailureReasonInvalidRevertDataInput), + }, + }, + hasError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupStreams(t) + defer s.Close() + s.registry = tt.registry + + client := new(evmClientMocks.Client) + s.client = client + + mc := new(MockMercuryConfigProvider) + mc.On("IsUpkeepAllowed", mock.Anything).Return(tt.hasPermission, tt.cachedAdminCfg).Once() + mc.On("SetUpkeepAllowed", mock.Anything, mock.Anything, mock.Anything).Return().Once() + s.mercuryConfig = mc + + if !tt.cachedAdminCfg && !tt.hasError { + cfg := UpkeepPrivilegeConfig{MercuryEnabled: tt.hasPermission} + bCfg, err := json.Marshal(cfg) + require.Nil(t, err) + + bContractCfg, err := s.abi.Methods["getUpkeepPrivilegeConfig"].Outputs.PackValues([]interface{}{bCfg}) + require.Nil(t, err) + + payload, err := s.abi.Pack("getUpkeepPrivilegeConfig", upkeepId) + require.Nil(t, err) + + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + + client.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", args, mock.AnythingOfType("string")).Return(nil). + Run(func(args mock.Arguments) { + b := args.Get(1).(*hexutil.Bytes) + *b = bContractCfg + }).Once() + } + + if len(tt.blobs) > 0 { + if tt.v3 { + hc03 := new(MockHttpClient) + v03HttpClient := v03.NewClient(s.mercuryConfig, hc03, s.threadCtrl, s.lggr) + s.v03Client = v03HttpClient + + mr1 := v03.MercuryV03Response{ + Reports: []v03.MercuryV03Report{{FullReport: tt.blobs["0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"]}, {FullReport: tt.blobs["0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"]}}} + b1, err := json.Marshal(mr1) + assert.Nil(t, err) + resp1 := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(b1)), + } + hc03.On("Do", mock.Anything).Return(resp1, nil).Once() + } else { + hc02 := new(MockHttpClient) + v02HttpClient := v02.NewClient(s.mercuryConfig, hc02, s.threadCtrl, s.lggr) + s.v02Client = v02HttpClient + + mr1 := v02.MercuryV02Response{PluginBlob: tt.blobs["0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"]} + b1, err := json.Marshal(mr1) + assert.Nil(t, err) + resp1 := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(b1)), + } + mr2 := v02.MercuryV02Response{PluginBlob: tt.blobs["0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"]} + b2, err := json.Marshal(mr2) + assert.Nil(t, err) + resp2 := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(b2)), + } + + hc02.On("Do", mock.MatchedBy(func(req *http.Request) bool { + return strings.Contains(req.URL.String(), "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000") + })).Return(resp2, nil).Once() + + hc02.On("Do", mock.MatchedBy(func(req *http.Request) bool { + return strings.Contains(req.URL.String(), "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000") + })).Return(resp1, nil).Once() + } + } + + if tt.callbackNeeded { + payload, err := s.abi.Pack("checkCallback", upkeepId, tt.values, tt.extraData) + require.Nil(t, err) + args := map[string]interface{}{ + "to": s.registry.Address().Hex(), + "data": hexutil.Bytes(payload), + } + client.On("CallContext", mock.Anything, mock.AnythingOfType("*hexutil.Bytes"), "eth_call", args, hexutil.EncodeUint64(uint64(blockNum))).Return(nil). + Run(func(args mock.Arguments) { + b := args.Get(1).(*hexutil.Bytes) + *b = tt.checkCallbackResp + }).Once() + } + + got := s.Lookup(testutils.Context(t), tt.input) + assert.Equal(t, tt.expectedResults, got, tt.name) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/request.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/request.go new file mode 100644 index 00000000..eacd0393 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/request.go @@ -0,0 +1,206 @@ +package v02 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/avast/retry-go/v4" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + mercuryPathV02 = "/client?" // only used to access mercury v0.2 server + retryDelay = 500 * time.Millisecond + totalAttempt = 3 + contentTypeHeader = "Content-Type" + authorizationHeader = "Authorization" + timestampHeader = "X-Authorization-Timestamp" + signatureHeader = "X-Authorization-Signature-SHA256" +) + +type MercuryV02Response struct { + PluginBlob string `json:"pluginBlob"` +} + +type client struct { + services.StateMachine + mercuryConfig mercury.MercuryConfigProvider + httpClient mercury.HttpClient + threadCtrl utils.ThreadControl + lggr logger.Logger +} + +func NewClient(mercuryConfig mercury.MercuryConfigProvider, httpClient mercury.HttpClient, threadCtrl utils.ThreadControl, lggr logger.Logger) *client { + return &client{ + mercuryConfig: mercuryConfig, + httpClient: httpClient, + threadCtrl: threadCtrl, + lggr: lggr, + } +} + +func (c *client) DoRequest(ctx context.Context, streamsLookup *mercury.StreamsLookup, pluginRetryKey string) (encoding.PipelineExecutionState, encoding.UpkeepFailureReason, [][]byte, bool, time.Duration, error) { + resultLen := len(streamsLookup.Feeds) + ch := make(chan mercury.MercuryData, resultLen) + if len(streamsLookup.Feeds) == 0 { + return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0 * time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", streamsLookup.FeedParamKey, streamsLookup.TimeParamKey, streamsLookup.Feeds) + } + for i := range streamsLookup.Feeds { + // TODO (AUTO-7209): limit the number of concurrent requests + i := i + c.threadCtrl.Go(func(ctx context.Context) { + c.singleFeedRequest(ctx, ch, i, streamsLookup) + }) + } + + var reqErr error + var retryInterval time.Duration + results := make([][]byte, len(streamsLookup.Feeds)) + retryable := true + allSuccess := true + // in v0.2, use the last execution error as the state, if no execution errors, state will be no error + state := encoding.NoPipelineError + for i := 0; i < resultLen; i++ { + m := <-ch + if m.Error != nil { + reqErr = errors.Join(reqErr, m.Error) + retryable = retryable && m.Retryable + allSuccess = false + if m.State != encoding.NoPipelineError { + state = m.State + } + continue + } + results[m.Index] = m.Bytes[0] + } + if retryable && !allSuccess { + retryInterval = mercury.CalculateRetryConfigFn(pluginRetryKey, c.mercuryConfig) + } + // only retry when not all successful AND none are not retryable + return state, encoding.UpkeepFailureReasonNone, results, retryable && !allSuccess, retryInterval, reqErr +} + +func (c *client) singleFeedRequest(ctx context.Context, ch chan<- mercury.MercuryData, index int, sl *mercury.StreamsLookup) { + var httpRequest *http.Request + var err error + + q := url.Values{ + sl.FeedParamKey: {sl.Feeds[index]}, + sl.TimeParamKey: {sl.Time.String()}, + } + mercuryURL := c.mercuryConfig.Credentials().LegacyURL + reqUrl := fmt.Sprintf("%s%s%s", mercuryURL, mercuryPathV02, q.Encode()) + c.lggr.Debugf("request URL for upkeep %s feed %s: %s", sl.UpkeepId.String(), sl.Feeds[index], reqUrl) + + httpRequest, err = http.NewRequestWithContext(ctx, http.MethodGet, reqUrl, nil) + if err != nil { + ch <- mercury.MercuryData{Index: index, Error: err, Retryable: false, State: encoding.InvalidMercuryRequest} + return + } + + ts := time.Now().UTC().UnixMilli() + signature := mercury.GenerateHMACFn(http.MethodGet, mercuryPathV02+q.Encode(), []byte{}, c.mercuryConfig.Credentials().Username, c.mercuryConfig.Credentials().Password, ts) + httpRequest.Header.Set(contentTypeHeader, "application/json") + httpRequest.Header.Set(authorizationHeader, c.mercuryConfig.Credentials().Username) + httpRequest.Header.Set(timestampHeader, strconv.FormatInt(ts, 10)) + httpRequest.Header.Set(signatureHeader, signature) + + // in the case of multiple retries here, use the last attempt's data + state := encoding.NoPipelineError + retryable := false + sent := false + retryErr := retry.Do( + func() error { + var httpResponse *http.Response + var responseBody []byte + var blobBytes []byte + + retryable = false + if httpResponse, err = c.httpClient.Do(httpRequest); err != nil { + c.lggr.Warnf("at block %s upkeep %s GET request fails for feed %s: %v", sl.Time.String(), sl.UpkeepId.String(), sl.Feeds[index], err) + retryable = true + state = encoding.MercuryFlakyFailure + return err + } + defer httpResponse.Body.Close() + + if responseBody, err = io.ReadAll(httpResponse.Body); err != nil { + state = encoding.InvalidMercuryResponse + return err + } + + switch httpResponse.StatusCode { + case http.StatusNotFound, http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + c.lggr.Warnf("at block %s upkeep %s received status code %d for feed %s", sl.Time.String(), sl.UpkeepId.String(), httpResponse.StatusCode, sl.Feeds[index]) + retryable = true + state = encoding.MercuryFlakyFailure + return errors.New(strconv.FormatInt(int64(httpResponse.StatusCode), 10)) + case http.StatusOK: + // continue + default: + state = encoding.InvalidMercuryRequest + return fmt.Errorf("at block %s upkeep %s received status code %d for feed %s", sl.Time.String(), sl.UpkeepId.String(), httpResponse.StatusCode, sl.Feeds[index]) + } + + c.lggr.Debugf("at block %s upkeep %s received status code %d from mercury v0.2 with BODY=%s", sl.Time.String(), sl.UpkeepId.String(), httpResponse.StatusCode, hexutil.Encode(responseBody)) + + var m MercuryV02Response + if err = json.Unmarshal(responseBody, &m); err != nil { + c.lggr.Warnf("at block %s upkeep %s failed to unmarshal body to MercuryV02Response for feed %s: %v", sl.Time.String(), sl.UpkeepId.String(), sl.Feeds[index], err) + state = encoding.MercuryUnmarshalError + return err + } + if blobBytes, err = hexutil.Decode(m.PluginBlob); err != nil { + c.lggr.Warnf("at block %s upkeep %s failed to decode pluginBlob %s for feed %s: %v", sl.Time.String(), sl.UpkeepId.String(), m.PluginBlob, sl.Feeds[index], err) + state = encoding.InvalidMercuryResponse + return err + } + ch <- mercury.MercuryData{ + Index: index, + Bytes: [][]byte{blobBytes}, + Retryable: false, + State: encoding.NoPipelineError, + } + sent = true + return nil + }, + // only retry when the error is 404 Not Found, 500 Internal Server Error, 502 Bad Gateway, 503 Service Unavailable, 504 Gateway Timeout + retry.RetryIf(func(err error) bool { + return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError) || err.Error() == fmt.Sprintf("%d", http.StatusBadGateway) || err.Error() == fmt.Sprintf("%d", http.StatusServiceUnavailable) || err.Error() == fmt.Sprintf("%d", http.StatusGatewayTimeout) + }), + retry.Context(ctx), + retry.Delay(retryDelay), + retry.Attempts(totalAttempt), + ) + + if !sent { + ch <- mercury.MercuryData{ + Index: index, + Bytes: [][]byte{}, + Retryable: retryable, + Error: fmt.Errorf("failed to request feed for %s: %w", sl.Feeds[index], retryErr), + State: state, + } + } +} + +func (c *client) Close() error { + return c.StopOnce("v02_request", func() error { + c.threadCtrl.Close() + return nil + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/v02_request_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/v02_request_test.go new file mode 100644 index 00000000..7f593fc0 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v02/v02_request_test.go @@ -0,0 +1,469 @@ +package v02 + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "math/big" + "net/http" + "strings" + "testing" + "time" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" +) + +const ( + defaultPluginRetryExpiration = 30 * time.Minute + cleanupInterval = 5 * time.Minute +) + +type MockMercuryConfigProvider struct { + cache *cache.Cache + mock.Mock +} + +func NewMockMercuryConfigProvider() *MockMercuryConfigProvider { + return &MockMercuryConfigProvider{ + cache: cache.New(defaultPluginRetryExpiration, cleanupInterval), + } +} + +func (m *MockMercuryConfigProvider) Credentials() *types.MercuryCredentials { + mc := &types.MercuryCredentials{ + LegacyURL: "https://google.old.com", + URL: "https://google.com", + Username: "FakeClientID", + Password: "FakeClientKey", + } + return mc +} + +func (m *MockMercuryConfigProvider) IsUpkeepAllowed(s string) (interface{}, bool) { + args := m.Called(s) + return args.Get(0), args.Bool(1) +} + +func (m *MockMercuryConfigProvider) SetUpkeepAllowed(s string, i interface{}, d time.Duration) { + m.Called(s, i, d) +} + +func (m *MockMercuryConfigProvider) GetPluginRetry(s string) (interface{}, bool) { + if value, found := m.cache.Get(s); found { + return value, true + } + + return nil, false +} + +func (m *MockMercuryConfigProvider) SetPluginRetry(s string, i interface{}, d time.Duration) { + m.cache.Set(s, i, d) +} + +type MockHttpClient struct { + mock.Mock +} + +func (mock *MockHttpClient) Do(req *http.Request) (*http.Response, error) { + args := mock.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +// setups up a client object for tests. +func setupClient(t *testing.T) *client { + lggr := logger.TestLogger(t) + mockHttpClient := new(MockHttpClient) + mercuryConfig := NewMockMercuryConfigProvider() + threadCtl := utils.NewThreadControl() + + client := NewClient( + mercuryConfig, + mockHttpClient, + threadCtl, + lggr, + ) + return client +} + +func TestV02_SingleFeedRequest(t *testing.T) { + upkeepId := big.NewInt(123456789) + tests := []struct { + name string + index int + lookup *mercury.StreamsLookup + blob string + statusCode int + lastStatusCode int + retryNumber int + retryable bool + errorMessage string + }{ + { + name: "success - mercury responds in the first try", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dc00000012", + }, + { + name: "success - retry for 404", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dcbabbad", + retryNumber: 1, + statusCode: http.StatusNotFound, + lastStatusCode: http.StatusOK, + }, + { + name: "success - retry for 500", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dcbbabad", + retryNumber: 2, + statusCode: http.StatusInternalServerError, + lastStatusCode: http.StatusOK, + }, + { + name: "failure - returns retryable", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dc", + retryNumber: totalAttempt, + statusCode: http.StatusNotFound, + retryable: true, + errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 404\n#2: 404\n#3: 404", + }, + { + name: "failure - returns retryable and then non-retryable", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dc", + retryNumber: 1, + statusCode: http.StatusNotFound, + lastStatusCode: http.StatusTooManyRequests, + errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 404\n#2: at block 123456 upkeep 123456789 received status code 429 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + }, + { + name: "failure - returns not retryable", + index: 0, + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + blob: "0xab2123dc", + statusCode: http.StatusConflict, + errorMessage: "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 123456 upkeep 123456789 received status code 409 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupClient(t) + defer c.Close() + hc := new(MockHttpClient) + + mr := MercuryV02Response{PluginBlob: tt.blob} + b, err := json.Marshal(mr) + assert.Nil(t, err) + + if tt.retryNumber == 0 { + if tt.errorMessage != "" { + resp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } else { + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } + } else if tt.retryNumber > 0 && tt.retryNumber < totalAttempt { + retryResp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(retryResp, nil).Times(tt.retryNumber) + + resp := &http.Response{ + StatusCode: tt.lastStatusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } else { + resp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Times(tt.retryNumber) + } + c.httpClient = hc + + ch := make(chan mercury.MercuryData, 1) + c.singleFeedRequest(testutils.Context(t), ch, tt.index, tt.lookup) + + m := <-ch + assert.Equal(t, tt.index, m.Index) + assert.Equal(t, tt.retryable, m.Retryable) + if tt.retryNumber >= totalAttempt || tt.errorMessage != "" { + assert.Equal(t, tt.errorMessage, m.Error.Error()) + assert.Equal(t, [][]byte{}, m.Bytes) + } else { + blobBytes, err := hexutil.Decode(tt.blob) + assert.Nil(t, err) + assert.Nil(t, m.Error) + assert.Equal(t, [][]byte{blobBytes}, m.Bytes) + } + }) + } +} + +func TestV02_DoMercuryRequestV02(t *testing.T) { + upkeepId, _ := new(big.Int).SetString("88786950015966611018675766524283132478093844178961698330929478019253453382042", 10) + + tests := []struct { + name string + lookup *mercury.StreamsLookup + mockHttpStatusCode int + mockPluginBlobs []string + pluginRetries int + pluginRetryKey string + expectedValues [][]byte + expectedRetryable bool + expectedRetryInterval time.Duration + expectedError error + state encoding.PipelineExecutionState + reason encoding.UpkeepFailureReason + }{ + { + name: "success", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + mockHttpStatusCode: http.StatusOK, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{{0, 6, 109, 252, 209, 237, 45, 149, 177, 140, 148, 141, 188, 91, 214, 76, 104, 122, 254, 147, 228, 202, 125, 102, 61, 222, 193, 76, 32, 9, 10, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 128, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 69, 84, 72, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 137, 28, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 154, 216, 211, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 154, 207, 11, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 155, 61, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 138, 231, 206, 116, 217, 250, 37, 42, 137, 131, 151, 110, 171, 96, 13, 199, 89, 12, 119, 141, 4, 129, 52, 48, 132, 27, 198, 231, 101, 195, 76, 216, 26, 22, 141, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 138, 231, 203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 137, 28, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 96, 65, 43, 148, 229, 37, 202, 108, 237, 201, 245, 68, 253, 134, 247, 118, 6, 213, 47, 231, 49, 165, 208, 105, 219, 232, 54, 168, 191, 192, 251, 140, 145, 25, 99, 176, 174, 122, 20, 151, 31, 59, 70, 33, 191, 251, 128, 46, 240, 96, 83, 146, 185, 166, 200, 156, 127, 171, 29, 248, 99, 58, 90, 222, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 69, 0, 194, 245, 33, 248, 63, 186, 94, 252, 43, 243, 239, 250, 174, 221, 228, 61, 10, 74, 223, 247, 133, 193, 33, 59, 113, 42, 58, 237, 13, 129, 87, 100, 42, 132, 50, 77, 176, 207, 150, 149, 235, 210, 119, 8, 212, 96, 142, 176, 51, 126, 13, 216, 123, 14, 67, 240, 250, 112, 199, 0, 217, 17}}, + expectedRetryable: false, + expectedError: nil, + }, + { + name: "failure - retryable and interval is 1s", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + mockHttpStatusCode: http.StatusInternalServerError, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{nil}, + expectedRetryable: true, + pluginRetries: 0, + expectedRetryInterval: 1 * time.Second, + expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 500\n#2: 500\n#3: 500"), + state: encoding.MercuryFlakyFailure, + }, + { + name: "failure - retryable and interval is 5s", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + pluginRetries: 5, + mockHttpStatusCode: http.StatusInternalServerError, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{nil}, + expectedRetryable: true, + expectedRetryInterval: 5 * time.Second, + expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 500\n#2: 500\n#3: 500"), + state: encoding.MercuryFlakyFailure, + }, + { + name: "failure - not retryable because there are many plugin retries already", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + pluginRetries: 10, + mockHttpStatusCode: http.StatusInternalServerError, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{nil}, + expectedRetryable: true, + expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: 500\n#2: 500\n#3: 500"), + state: encoding.MercuryFlakyFailure, + }, + { + name: "failure - not retryable", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + mockHttpStatusCode: http.StatusTooManyRequests, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{nil}, + expectedRetryable: false, + expectedError: errors.New("failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000: All attempts fail:\n#1: at block 25880526 upkeep 88786950015966611018675766524283132478093844178961698330929478019253453382042 received status code 429 for feed 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"), + state: encoding.InvalidMercuryRequest, + }, + { + name: "failure - no feeds", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIdHex, + Feeds: []string{}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + expectedValues: [][]byte{}, + reason: encoding.UpkeepFailureReasonInvalidRevertDataInput, + }, + { + name: "failure - invalid revert data", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + expectedValues: [][]byte{}, + reason: encoding.UpkeepFailureReasonInvalidRevertDataInput, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupClient(t) + defer c.Close() + if tt.pluginRetries != 0 { + c.mercuryConfig.SetPluginRetry(tt.pluginRetryKey, tt.pluginRetries, cache.DefaultExpiration) + } + hc := new(MockHttpClient) + + for _, blob := range tt.mockPluginBlobs { + mr := MercuryV02Response{PluginBlob: blob} + b, err := json.Marshal(mr) + assert.Nil(t, err) + + resp := &http.Response{ + StatusCode: tt.mockHttpStatusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + if tt.expectedError != nil && tt.expectedRetryable || tt.pluginRetries > 0 { + hc.On("Do", mock.Anything).Return(resp, nil).Times(totalAttempt) + } else { + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } + } + c.httpClient = hc + + state, reason, values, retryable, retryInterval, reqErr := c.DoRequest(testutils.Context(t), tt.lookup, tt.pluginRetryKey) + assert.Equal(t, tt.expectedValues, values) + assert.Equal(t, tt.expectedRetryable, retryable) + if retryable { + newRetries, _ := c.mercuryConfig.GetPluginRetry(tt.pluginRetryKey) + assert.Equal(t, tt.pluginRetries+1, newRetries.(int)) + } + assert.Equal(t, tt.expectedRetryInterval, retryInterval) + assert.Equal(t, tt.state, state) + assert.Equal(t, tt.reason, reason) + if tt.expectedError != nil { + assert.True(t, strings.HasPrefix(reqErr.Error(), "failed to request feed for 0x4554482d5553442d415242495452554d2d544553544e45540000000000000000")) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/request.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/request.go new file mode 100644 index 00000000..8aaafac5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/request.go @@ -0,0 +1,246 @@ +package v03 + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/avast/retry-go/v4" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + mercuryBatchPathV03 = "/api/v1/reports/bulk?" // only used to access mercury v0.3 server + mercuryBatchPathV03BlockNumber = "/api/v1gmx/reports/bulk?" // only used to access mercury v0.3 server with blockNumber + retryDelay = 500 * time.Millisecond + totalAttempt = 3 + contentTypeHeader = "Content-Type" + authorizationHeader = "Authorization" + timestampHeader = "X-Authorization-Timestamp" + signatureHeader = "X-Authorization-Signature-SHA256" + upkeepIDHeader = "X-Authorization-Upkeep-Id" +) + +type MercuryV03Response struct { + Reports []MercuryV03Report `json:"reports"` +} + +type MercuryV03Report struct { + FeedID string `json:"feedID"` // feed id in hex encoded + ValidFromTimestamp uint32 `json:"validFromTimestamp"` + ObservationsTimestamp uint32 `json:"observationsTimestamp"` + FullReport string `json:"fullReport"` // the actual hex encoded mercury report of this feed, can be sent to verifier +} + +type client struct { + services.StateMachine + mercuryConfig mercury.MercuryConfigProvider + httpClient mercury.HttpClient + threadCtrl utils.ThreadControl + lggr logger.Logger +} + +func NewClient(mercuryConfig mercury.MercuryConfigProvider, httpClient mercury.HttpClient, threadCtrl utils.ThreadControl, lggr logger.Logger) *client { + return &client{ + mercuryConfig: mercuryConfig, + httpClient: httpClient, + threadCtrl: threadCtrl, + lggr: lggr, + } +} + +func (c *client) DoRequest(ctx context.Context, streamsLookup *mercury.StreamsLookup, pluginRetryKey string) (encoding.PipelineExecutionState, encoding.UpkeepFailureReason, [][]byte, bool, time.Duration, error) { + if len(streamsLookup.Feeds) == 0 { + return encoding.NoPipelineError, encoding.UpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0 * time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", streamsLookup.FeedParamKey, streamsLookup.TimeParamKey, streamsLookup.Feeds) + } + resultLen := 1 // Only 1 multi-feed request is made for all feeds + ch := make(chan mercury.MercuryData, resultLen) + c.threadCtrl.Go(func(ctx context.Context) { + c.multiFeedsRequest(ctx, ch, streamsLookup) + }) + + var reqErr error + var retryInterval time.Duration + results := make([][]byte, len(streamsLookup.Feeds)) + retryable := false + state := encoding.NoPipelineError + + m := <-ch + if m.Error != nil { + reqErr = m.Error + retryable = m.Retryable + state = m.State + if retryable { + retryInterval = mercury.CalculateRetryConfigFn(pluginRetryKey, c.mercuryConfig) + } + } else { + results = m.Bytes + } + + return state, encoding.UpkeepFailureReasonNone, results, retryable, retryInterval, reqErr +} + +func (c *client) multiFeedsRequest(ctx context.Context, ch chan<- mercury.MercuryData, sl *mercury.StreamsLookup) { + // this won't work bc q.Encode() will encode commas as '%2C' but the server is strictly expecting a comma separated list + //q := url.Values{ + // feedIDs: {strings.Join(sl.Feeds, ",")}, + // timestamp: {sl.Time.String()}, + //} + + params := fmt.Sprintf("%s=%s&%s=%s", mercury.FeedIDs, strings.Join(sl.Feeds, ","), mercury.Timestamp, sl.Time.String()) + batchPathV03 := mercuryBatchPathV03 + if sl.IsMercuryV03UsingBlockNumber() { + batchPathV03 = mercuryBatchPathV03BlockNumber + } + reqUrl := fmt.Sprintf("%s%s%s", c.mercuryConfig.Credentials().URL, batchPathV03, params) + + c.lggr.Debugf("request URL for upkeep %s userId %s: %s", sl.UpkeepId.String(), c.mercuryConfig.Credentials().Username, reqUrl) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqUrl, nil) + if err != nil { + ch <- mercury.MercuryData{Index: 0, Error: err, Retryable: false, State: encoding.InvalidMercuryRequest} + return + } + + ts := time.Now().UTC().UnixMilli() + signature := mercury.GenerateHMACFn(http.MethodGet, mercuryBatchPathV03+params, []byte{}, c.mercuryConfig.Credentials().Username, c.mercuryConfig.Credentials().Password, ts) + + req.Header.Set(contentTypeHeader, "application/json") + // username here is often referred to as user id + req.Header.Set(authorizationHeader, c.mercuryConfig.Credentials().Username) + req.Header.Set(timestampHeader, strconv.FormatInt(ts, 10)) + req.Header.Set(signatureHeader, signature) + // mercury will inspect authorization headers above to make sure this user (in automation's context, this node) is eligible to access mercury + // and if it has an automation role. it will then look at this upkeep id to check if it has access to all the requested feeds. + req.Header.Set(upkeepIDHeader, sl.UpkeepId.String()) + + // in the case of multiple retries here, use the last attempt's data + state := encoding.NoPipelineError + retryable := false + sent := false + retryErr := retry.Do( + func() error { + retryable = false + resp, err := c.httpClient.Do(req) + if err != nil { + c.lggr.Warnf("at timestamp %s upkeep %s GET request fails from mercury v0.3: %v", sl.Time.String(), sl.UpkeepId.String(), err) + retryable = true + state = encoding.MercuryFlakyFailure + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + retryable = false + state = encoding.InvalidMercuryResponse + return err + } + + c.lggr.Infof("at timestamp %s upkeep %s received status code %d from mercury v0.3", sl.Time.String(), sl.UpkeepId.String(), resp.StatusCode) + switch resp.StatusCode { + case http.StatusUnauthorized: + retryable = false + state = encoding.UpkeepNotAuthorized + return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by unauthorized upkeep", sl.Time.String(), sl.UpkeepId.String(), resp.StatusCode) + case http.StatusBadRequest: + retryable = false + state = encoding.InvalidMercuryRequest + return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by invalid format of timestamp", sl.Time.String(), sl.UpkeepId.String(), resp.StatusCode) + case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + retryable = true + state = encoding.MercuryFlakyFailure + return fmt.Errorf("%d", resp.StatusCode) + case http.StatusPartialContent: + // TODO (AUTO-5044): handle response code 206 entirely with errors field parsing + c.lggr.Warnf("at timestamp %s upkeep %s requested [%s] feeds but mercury v0.3 server returned 206 status, treating it as 404 and retrying", sl.Time.String(), sl.UpkeepId.String(), sl.Feeds) + retryable = true + state = encoding.MercuryFlakyFailure + return fmt.Errorf("%d", http.StatusPartialContent) + case http.StatusOK: + // continue + default: + retryable = false + state = encoding.InvalidMercuryRequest + return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3", sl.Time.String(), sl.UpkeepId.String(), resp.StatusCode) + } + c.lggr.Debugf("at block %s upkeep %s received status code %d from mercury v0.3 with BODY=%s", sl.Time.String(), sl.UpkeepId.String(), resp.StatusCode, hexutil.Encode(body)) + + var response MercuryV03Response + if err := json.Unmarshal(body, &response); err != nil { + c.lggr.Warnf("at timestamp %s upkeep %s failed to unmarshal body to MercuryV03Response from mercury v0.3: %v", sl.Time.String(), sl.UpkeepId.String(), err) + retryable = false + state = encoding.MercuryUnmarshalError + return err + } + + // in v0.3, if some feeds are not available, the server will only return available feeds, but we need to make sure ALL feeds are retrieved before calling user contract + // hence, retry in this case. retry will help when we send a very new timestamp and reports are not yet generated + if len(response.Reports) != len(sl.Feeds) { + var receivedFeeds []string + for _, f := range response.Reports { + receivedFeeds = append(receivedFeeds, f.FeedID) + } + c.lggr.Warnf("at timestamp %s upkeep %s mercury v0.3 server returned 206 status with [%s] reports while we requested [%s] feeds, retrying", sl.Time.String(), sl.UpkeepId.String(), receivedFeeds, sl.Feeds) + retryable = true + state = encoding.MercuryFlakyFailure + return fmt.Errorf("%d", http.StatusNotFound) + } + var reportBytes [][]byte + for _, rsp := range response.Reports { + b, err := hexutil.Decode(rsp.FullReport) + if err != nil { + c.lggr.Warnf("at timestamp %s upkeep %s failed to decode reportBlob %s: %v", sl.Time.String(), sl.UpkeepId.String(), rsp.FullReport, err) + retryable = false + state = encoding.InvalidMercuryResponse + return err + } + reportBytes = append(reportBytes, b) + } + ch <- mercury.MercuryData{ + Index: 0, + Bytes: reportBytes, + Retryable: false, + State: encoding.NoPipelineError, + } + sent = true + return nil + }, + // only retry when the error is 206 Partial Content, 404 Not Found, 500 Internal Server Error, 502 Bad Gateway, 503 Service Unavailable, 504 Gateway Timeout + retry.RetryIf(func(err error) bool { + return err.Error() == fmt.Sprintf("%d", http.StatusPartialContent) || err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError) || err.Error() == fmt.Sprintf("%d", http.StatusBadGateway) || err.Error() == fmt.Sprintf("%d", http.StatusServiceUnavailable) || err.Error() == fmt.Sprintf("%d", http.StatusGatewayTimeout) + }), + retry.Context(ctx), + retry.Delay(retryDelay), + retry.Attempts(totalAttempt), + ) + + if !sent { + ch <- mercury.MercuryData{ + Index: 0, + Bytes: [][]byte{}, + Retryable: retryable, + Error: retryErr, + State: state, + } + } +} + +func (c *client) Close() error { + return c.StopOnce("v03_request", func() error { + c.threadCtrl.Close() + return nil + }) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/v03_request_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/v03_request_test.go new file mode 100644 index 00000000..73ccf9e8 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/v03/v03_request_test.go @@ -0,0 +1,537 @@ +package v03 + +import ( + "bytes" + "encoding/json" + "io" + "math/big" + "net/http" + "testing" + "time" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" +) + +const ( + defaultPluginRetryExpiration = 30 * time.Minute + cleanupInterval = 5 * time.Minute +) + +type MockMercuryConfigProvider struct { + cache *cache.Cache + mock.Mock +} + +func NewMockMercuryConfigProvider() *MockMercuryConfigProvider { + return &MockMercuryConfigProvider{ + cache: cache.New(defaultPluginRetryExpiration, cleanupInterval), + } +} + +func (m *MockMercuryConfigProvider) Credentials() *types.MercuryCredentials { + mc := &types.MercuryCredentials{ + LegacyURL: "https://google.old.com", + URL: "https://google.com", + Username: "FakeClientID", + Password: "FakeClientKey", + } + return mc +} + +func (m *MockMercuryConfigProvider) IsUpkeepAllowed(s string) (interface{}, bool) { + args := m.Called(s) + return args.Get(0), args.Bool(1) +} + +func (m *MockMercuryConfigProvider) SetUpkeepAllowed(s string, i interface{}, d time.Duration) { + m.Called(s, i, d) +} + +func (m *MockMercuryConfigProvider) GetPluginRetry(s string) (interface{}, bool) { + if value, found := m.cache.Get(s); found { + return value, true + } + + return nil, false +} + +func (m *MockMercuryConfigProvider) SetPluginRetry(s string, i interface{}, d time.Duration) { + m.cache.Set(s, i, d) +} + +type MockHttpClient struct { + mock.Mock +} + +func (mock *MockHttpClient) Do(req *http.Request) (*http.Response, error) { + args := mock.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +// setups up a client object for tests. +func setupClient(t *testing.T) *client { + lggr := logger.TestLogger(t) + mockHttpClient := new(MockHttpClient) + mercuryConfig := NewMockMercuryConfigProvider() + threadCtl := utils.NewThreadControl() + + client := NewClient( + mercuryConfig, + mockHttpClient, + threadCtl, + lggr, + ) + return client +} + +func TestV03_DoMercuryRequestV03(t *testing.T) { + upkeepId, _ := new(big.Int).SetString("88786950015966611018675766524283132478093844178961698330929478019253453382042", 10) + + tests := []struct { + name string + lookup *mercury.StreamsLookup + mockHttpStatusCode int + mockPluginBlobs []string + pluginRetryKey string + expectedValues [][]byte + expectedRetryable bool + expectedRetryInterval time.Duration + expectedError error + state encoding.PipelineExecutionState + reason encoding.UpkeepFailureReason + }{ + { + name: "success v0.3", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(25880526), + ExtraData: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + }, + UpkeepId: upkeepId, + }, + mockHttpStatusCode: http.StatusOK, + mockPluginBlobs: []string{"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}, + expectedValues: [][]byte{{0, 6, 109, 252, 209, 237, 45, 149, 177, 140, 148, 141, 188, 91, 214, 76, 104, 122, 254, 147, 228, 202, 125, 102, 61, 222, 193, 76, 32, 9, 10, 216, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 128, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 32, 69, 84, 72, 45, 85, 83, 68, 45, 65, 82, 66, 73, 84, 82, 85, 77, 45, 84, 69, 83, 84, 78, 69, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 137, 28, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 154, 216, 211, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 154, 207, 11, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 155, 61, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 138, 231, 206, 116, 217, 250, 37, 42, 137, 131, 151, 110, 171, 96, 13, 199, 89, 12, 119, 141, 4, 129, 52, 48, 132, 27, 198, 231, 101, 195, 76, 216, 26, 22, 141, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 138, 231, 203, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 137, 28, 152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 96, 65, 43, 148, 229, 37, 202, 108, 237, 201, 245, 68, 253, 134, 247, 118, 6, 213, 47, 231, 49, 165, 208, 105, 219, 232, 54, 168, 191, 192, 251, 140, 145, 25, 99, 176, 174, 122, 20, 151, 31, 59, 70, 33, 191, 251, 128, 46, 240, 96, 83, 146, 185, 166, 200, 156, 127, 171, 29, 248, 99, 58, 90, 222, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 69, 0, 194, 245, 33, 248, 63, 186, 94, 252, 43, 243, 239, 250, 174, 221, 228, 61, 10, 74, 223, 247, 133, 193, 33, 59, 113, 42, 58, 237, 13, 129, 87, 100, 42, 132, 50, 77, 176, 207, 150, 149, 235, 210, 119, 8, 212, 96, 142, 176, 51, 126, 13, 216, 123, 14, 67, 240, 250, 112, 199, 0, 217, 17}}, + expectedRetryable: false, + expectedError: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupClient(t) + defer c.Close() + hc := mocks.NewHttpClient(t) + + mr := MercuryV03Response{} + for i, blob := range tt.mockPluginBlobs { + r := MercuryV03Report{ + FeedID: tt.lookup.Feeds[i], + ValidFromTimestamp: 0, + ObservationsTimestamp: 0, + FullReport: blob, + } + mr.Reports = append(mr.Reports, r) + } + + b, err := json.Marshal(mr) + assert.Nil(t, err) + resp := &http.Response{ + StatusCode: tt.mockHttpStatusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + if tt.expectedError != nil && tt.expectedRetryable { + hc.On("Do", mock.Anything).Return(resp, nil).Times(totalAttempt) + } else { + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } + c.httpClient = hc + + state, reason, values, retryable, retryInterval, reqErr := c.DoRequest(testutils.Context(t), tt.lookup, tt.pluginRetryKey) + + assert.Equal(t, tt.expectedValues, values) + assert.Equal(t, tt.expectedRetryable, retryable) + assert.Equal(t, tt.expectedRetryInterval, retryInterval) + assert.Equal(t, tt.state, state) + assert.Equal(t, tt.reason, reason) + if tt.expectedError != nil { + assert.Equal(t, tt.expectedError.Error(), reqErr.Error()) + } + }) + } +} + +func TestV03_MultiFeedRequest(t *testing.T) { + upkeepId := big.NewInt(123456789) + tests := []struct { + name string + lookup *mercury.StreamsLookup + statusCode int + lastStatusCode int + pluginRetries int + pluginRetryKey string + retryNumber int + retryable bool + errorMessage string + firstResponse *MercuryV03Response + response *MercuryV03Response + }{ + { + name: "success - mercury responds in the first try", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000016", + }, + }, + }, + statusCode: http.StatusOK, + }, + { + name: "success - mercury responds in the first try with blocknumber", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.BlockNumber, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000016", + }, + }, + }, + statusCode: http.StatusOK, + }, + { + name: "success - retry 206", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + firstResponse: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + }, + }, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000019", + }, + }, + }, + retryNumber: 1, + statusCode: http.StatusPartialContent, + lastStatusCode: http.StatusOK, + }, + { + name: "success - retry for 500", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + retryNumber: 2, + statusCode: http.StatusInternalServerError, + lastStatusCode: http.StatusOK, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000019", + }, + }, + }, + }, + { + name: "failure - fail to decode reportBlob", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "qerwiu", // invalid hex blob + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000016", + }, + }, + }, + statusCode: http.StatusOK, + retryable: false, + errorMessage: "All attempts fail:\n#1: hex string without 0x prefix", + }, + { + name: "failure - returns retryable with 1s plugin retry interval", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + retryNumber: totalAttempt, + statusCode: http.StatusInternalServerError, + retryable: true, + errorMessage: "All attempts fail:\n#1: 500\n#2: 500\n#3: 500", + }, + { + name: "failure - returns retryable with 5s plugin retry interval", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + pluginRetries: 6, + retryNumber: totalAttempt, + statusCode: http.StatusInternalServerError, + retryable: true, + errorMessage: "All attempts fail:\n#1: 500\n#2: 500\n#3: 500", + }, + { + name: "failure - returns retryable and then non-retryable", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + retryNumber: 1, + statusCode: http.StatusInternalServerError, + lastStatusCode: http.StatusUnauthorized, + errorMessage: "All attempts fail:\n#1: 500\n#2: at timestamp 123456 upkeep 123456789 received status code 401 from mercury v0.3, most likely this is caused by unauthorized upkeep", + }, + { + name: "failure - returns status code 422 not retryable", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + statusCode: http.StatusUnprocessableEntity, + errorMessage: "All attempts fail:\n#1: at timestamp 123456 upkeep 123456789 received status code 422 from mercury v0.3", + }, + { + name: "success - retry when reports length does not match feeds length", + lookup: &mercury.StreamsLookup{ + StreamsLookupError: &mercury.StreamsLookupError{ + FeedParamKey: mercury.FeedIDs, + Feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"}, + TimeParamKey: mercury.Timestamp, + Time: big.NewInt(123456), + }, + UpkeepId: upkeepId, + }, + firstResponse: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + }, + }, + response: &MercuryV03Response{ + Reports: []MercuryV03Report{ + { + FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123456, + ObservationsTimestamp: 123456, + FullReport: "0xab2123dc00000012", + }, + { + FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000", + ValidFromTimestamp: 123458, + ObservationsTimestamp: 123458, + FullReport: "0xab2123dc00000019", + }, + }, + }, + retryNumber: 1, + statusCode: http.StatusOK, + lastStatusCode: http.StatusOK, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupClient(t) + defer c.Close() + if tt.pluginRetries != 0 { + c.mercuryConfig.SetPluginRetry(tt.pluginRetryKey, tt.pluginRetries, cache.DefaultExpiration) + } + + hc := new(MockHttpClient) + b, err := json.Marshal(tt.response) + assert.Nil(t, err) + + if tt.retryNumber == 0 { + resp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } else if tt.retryNumber < totalAttempt { + if tt.firstResponse != nil && tt.response != nil { + b0, err := json.Marshal(tt.firstResponse) + assert.Nil(t, err) + resp0 := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b0)), + } + b1, err := json.Marshal(tt.response) + assert.Nil(t, err) + resp1 := &http.Response{ + StatusCode: tt.lastStatusCode, + Body: io.NopCloser(bytes.NewReader(b1)), + } + hc.On("Do", mock.Anything).Return(resp0, nil).Once().On("Do", mock.Anything).Return(resp1, nil).Once() + } else { + retryResp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(retryResp, nil).Times(tt.retryNumber) + + resp := &http.Response{ + StatusCode: tt.lastStatusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Once() + } + } else { + resp := &http.Response{ + StatusCode: tt.statusCode, + Body: io.NopCloser(bytes.NewReader(b)), + } + hc.On("Do", mock.Anything).Return(resp, nil).Times(tt.retryNumber) + } + c.httpClient = hc + + ch := make(chan mercury.MercuryData, 1) + c.multiFeedsRequest(testutils.Context(t), ch, tt.lookup) + + m := <-ch + assert.Equal(t, 0, m.Index) + assert.Equal(t, tt.retryable, m.Retryable) + if tt.retryNumber >= totalAttempt || tt.errorMessage != "" { + assert.Equal(t, tt.errorMessage, m.Error.Error()) + assert.Equal(t, [][]byte{}, m.Bytes) + } else { + assert.Nil(t, m.Error) + var reports [][]byte + for _, rsp := range tt.response.Reports { + b, _ := hexutil.Decode(rsp.FullReport) + reports = append(reports, b) + } + assert.Equal(t, reports, m.Bytes) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/http_client.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/http_client.go new file mode 100644 index 00000000..d6982e9d --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/http_client.go @@ -0,0 +1,58 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + http "net/http" + + mock "github.com/stretchr/testify/mock" +) + +// HttpClient is an autogenerated mock type for the HttpClient type +type HttpClient struct { + mock.Mock +} + +// Do provides a mock function with given fields: req +func (_m *HttpClient) Do(req *http.Request) (*http.Response, error) { + ret := _m.Called(req) + + if len(ret) == 0 { + panic("no return value specified for Do") + } + + var r0 *http.Response + var r1 error + if rf, ok := ret.Get(0).(func(*http.Request) (*http.Response, error)); ok { + return rf(req) + } + if rf, ok := ret.Get(0).(func(*http.Request) *http.Response); ok { + r0 = rf(req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Response) + } + } + + if rf, ok := ret.Get(1).(func(*http.Request) error); ok { + r1 = rf(req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewHttpClient creates a new instance of HttpClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHttpClient(t interface { + mock.TestingT + Cleanup(func()) +}) *HttpClient { + mock := &HttpClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/registry.go new file mode 100644 index 00000000..cd308d2c --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks/registry.go @@ -0,0 +1,240 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + i_keeper_registry_master_wrapper_2_1 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// Registry is an autogenerated mock type for the Registry type +type Registry struct { + mock.Mock +} + +// CheckCallback provides a mock function with given fields: opts, id, values, extraData +func (_m *Registry) CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (i_keeper_registry_master_wrapper_2_1.CheckCallback, error) { + ret := _m.Called(opts, id, values, extraData) + + if len(ret) == 0 { + panic("no return value specified for CheckCallback") + } + + var r0 i_keeper_registry_master_wrapper_2_1.CheckCallback + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, [][]byte, []byte) (i_keeper_registry_master_wrapper_2_1.CheckCallback, error)); ok { + return rf(opts, id, values, extraData) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, [][]byte, []byte) i_keeper_registry_master_wrapper_2_1.CheckCallback); ok { + r0 = rf(opts, id, values, extraData) + } else { + r0 = ret.Get(0).(i_keeper_registry_master_wrapper_2_1.CheckCallback) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int, [][]byte, []byte) error); ok { + r1 = rf(opts, id, values, extraData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetActiveUpkeepIDs provides a mock function with given fields: opts, startIndex, maxCount +func (_m *Registry) GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + ret := _m.Called(opts, startIndex, maxCount) + + if len(ret) == 0 { + panic("no return value specified for GetActiveUpkeepIDs") + } + + var r0 []*big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, *big.Int) ([]*big.Int, error)); ok { + return rf(opts, startIndex, maxCount) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, *big.Int) []*big.Int); ok { + r0 = rf(opts, startIndex, maxCount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int, *big.Int) error); ok { + r1 = rf(opts, startIndex, maxCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetState provides a mock function with given fields: opts +func (_m *Registry) GetState(opts *bind.CallOpts) (i_keeper_registry_master_wrapper_2_1.GetState, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetState") + } + + var r0 i_keeper_registry_master_wrapper_2_1.GetState + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (i_keeper_registry_master_wrapper_2_1.GetState, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) i_keeper_registry_master_wrapper_2_1.GetState); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(i_keeper_registry_master_wrapper_2_1.GetState) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUpkeep provides a mock function with given fields: opts, id +func (_m *Registry) GetUpkeep(opts *bind.CallOpts, id *big.Int) (i_keeper_registry_master_wrapper_2_1.KeeperRegistryBase21UpkeepInfo, error) { + ret := _m.Called(opts, id) + + if len(ret) == 0 { + panic("no return value specified for GetUpkeep") + } + + var r0 i_keeper_registry_master_wrapper_2_1.KeeperRegistryBase21UpkeepInfo + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (i_keeper_registry_master_wrapper_2_1.KeeperRegistryBase21UpkeepInfo, error)); ok { + return rf(opts, id) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) i_keeper_registry_master_wrapper_2_1.KeeperRegistryBase21UpkeepInfo); ok { + r0 = rf(opts, id) + } else { + r0 = ret.Get(0).(i_keeper_registry_master_wrapper_2_1.KeeperRegistryBase21UpkeepInfo) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUpkeepPrivilegeConfig provides a mock function with given fields: opts, upkeepId +func (_m *Registry) GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + ret := _m.Called(opts, upkeepId) + + if len(ret) == 0 { + panic("no return value specified for GetUpkeepPrivilegeConfig") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) ([]byte, error)); ok { + return rf(opts, upkeepId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) []byte); ok { + r0 = rf(opts, upkeepId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, upkeepId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUpkeepTriggerConfig provides a mock function with given fields: opts, upkeepId +func (_m *Registry) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + ret := _m.Called(opts, upkeepId) + + if len(ret) == 0 { + panic("no return value specified for GetUpkeepTriggerConfig") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) ([]byte, error)); ok { + return rf(opts, upkeepId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) []byte); ok { + r0 = rf(opts, upkeepId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, upkeepId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *Registry) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRegistry creates a new instance of Registry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *Registry { + mock := &Registry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder.go new file mode 100644 index 00000000..d1da54e1 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder.go @@ -0,0 +1,63 @@ +package evm + +import ( + "context" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" +) + +type payloadBuilder struct { + upkeepList ActiveUpkeepList + lggr logger.Logger + recoverer logprovider.LogRecoverer +} + +var _ ocr2keepers.PayloadBuilder = &payloadBuilder{} + +func NewPayloadBuilder(activeUpkeepList ActiveUpkeepList, recoverer logprovider.LogRecoverer, lggr logger.Logger) *payloadBuilder { + return &payloadBuilder{ + upkeepList: activeUpkeepList, + lggr: lggr, + recoverer: recoverer, + } +} + +func (b *payloadBuilder) BuildPayloads(ctx context.Context, proposals ...ocr2keepers.CoordinatedBlockProposal) ([]ocr2keepers.UpkeepPayload, error) { + payloads := make([]ocr2keepers.UpkeepPayload, len(proposals)) + + for i, proposal := range proposals { + var payload ocr2keepers.UpkeepPayload + if !b.upkeepList.IsActive(proposal.UpkeepID.BigInt()) { + b.lggr.Warnw("upkeep is not active, skipping", "upkeepID", proposal.UpkeepID) + continue + } + b.lggr.Debugf("building payload for coordinated block proposal %+v", proposal) + var checkData []byte + var err error + switch core.GetUpkeepType(proposal.UpkeepID) { + case types.LogTrigger: + checkData, err = b.recoverer.GetProposalData(ctx, proposal) + if err != nil { + b.lggr.Warnw("failed to get log proposal data", "err", err, "upkeepID", proposal.UpkeepID, "trigger", proposal.Trigger) + continue + } + case types.ConditionTrigger: + // Empty checkData for conditionals + } + payload, err = core.NewUpkeepPayload(proposal.UpkeepID.BigInt(), proposal.Trigger, checkData) + if err != nil { + b.lggr.Warnw("error building upkeep payload", "err", err, "upkeepID", proposal.UpkeepID) + continue + } + + payloads[i] = payload + } + + return payloads, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder_test.go new file mode 100644 index 00000000..bf796115 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/payload_builder_test.go @@ -0,0 +1,212 @@ +package evm + +import ( + "context" + "math/big" + "testing" + + types2 "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + types "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" +) + +func TestNewPayloadBuilder(t *testing.T) { + for _, tc := range []struct { + name string + activeList ActiveUpkeepList + recoverer logprovider.LogRecoverer + proposals []types.CoordinatedBlockProposal + wantPayloads []types.UpkeepPayload + }{ + { + name: "for log trigger upkeeps, new payloads are created", + activeList: &mockActiveUpkeepList{ + IsActiveFn: func(id *big.Int) bool { + return true + }, + }, + proposals: []types.CoordinatedBlockProposal{ + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "abc"), + WorkID: "workID1", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + }, + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "def"), + WorkID: "workID2", + Trigger: types.Trigger{ + BlockNumber: 2, + BlockHash: [32]byte{2}, + }, + }, + }, + recoverer: &mockLogRecoverer{ + GetProposalDataFn: func(ctx context.Context, proposal types.CoordinatedBlockProposal) ([]byte, error) { + return []byte{1, 2, 3}, nil + }, + }, + wantPayloads: []types.UpkeepPayload{ + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "abc"), + WorkID: "714f83255c5b562823725748c4a75777c9b78ea8c5ba72ea819926a1fecd389e", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + CheckData: []byte{1, 2, 3}, + }, + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "def"), + WorkID: "3956daa0378d6a761fe972ee00fe98338f17fb6b7865c1d49a8a416cd85977b8", + Trigger: types.Trigger{ + BlockNumber: 2, + BlockHash: [32]byte{2}, + }, + CheckData: []byte{1, 2, 3}, + }, + }, + }, + { + name: "for an inactive log trigger upkeep, an empty payload is added to the list of payloads", + activeList: &mockActiveUpkeepList{ + IsActiveFn: func(id *big.Int) bool { + return core.GenUpkeepID(types2.LogTrigger, "ghi").BigInt().Cmp(id) != 0 + }, + }, + proposals: []types.CoordinatedBlockProposal{ + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "abc"), + WorkID: "workID1", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + }, + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "def"), + WorkID: "workID2", + Trigger: types.Trigger{ + BlockNumber: 2, + BlockHash: [32]byte{2}, + }, + }, + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "ghi"), + WorkID: "workID3", + Trigger: types.Trigger{ + BlockNumber: 3, + BlockHash: [32]byte{3}, + }, + }, + }, + recoverer: &mockLogRecoverer{ + GetProposalDataFn: func(ctx context.Context, proposal types.CoordinatedBlockProposal) ([]byte, error) { + return []byte{1, 2, 3}, nil + }, + }, + wantPayloads: []types.UpkeepPayload{ + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "abc"), + WorkID: "714f83255c5b562823725748c4a75777c9b78ea8c5ba72ea819926a1fecd389e", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + CheckData: []byte{1, 2, 3}, + }, + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "def"), + WorkID: "3956daa0378d6a761fe972ee00fe98338f17fb6b7865c1d49a8a416cd85977b8", + Trigger: types.Trigger{ + BlockNumber: 2, + BlockHash: [32]byte{2}, + }, + CheckData: []byte{1, 2, 3}, + }, + {}, + }, + }, + { + name: "when the recoverer errors, an empty payload is created but not added to the list of payloads", + activeList: &mockActiveUpkeepList{ + IsActiveFn: func(id *big.Int) bool { + return true + }, + }, + proposals: []types.CoordinatedBlockProposal{ + { + UpkeepID: core.GenUpkeepID(types2.LogTrigger, "abc"), + WorkID: "workID1", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + }, + }, + recoverer: &mockLogRecoverer{ + GetProposalDataFn: func(ctx context.Context, proposal types.CoordinatedBlockProposal) ([]byte, error) { + return nil, errors.New("recoverer boom") + }, + }, + wantPayloads: []types.UpkeepPayload{ + {}, + }, + }, + { + name: "for a conditional upkeep, a new payload with empty check data is added to the list of payloads", + activeList: &mockActiveUpkeepList{ + IsActiveFn: func(id *big.Int) bool { + return true + }, + }, + proposals: []types.CoordinatedBlockProposal{ + { + UpkeepID: core.GenUpkeepID(types2.ConditionTrigger, "def"), + WorkID: "workID1", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + }, + }, + wantPayloads: []types.UpkeepPayload{ + { + UpkeepID: core.GenUpkeepID(types2.ConditionTrigger, "def"), + WorkID: "58f2f231792448679a75bac6efc2af4ba731901f0cb93a44a366525751cbabfb", + Trigger: types.Trigger{ + BlockNumber: 1, + BlockHash: [32]byte{1}, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + lggr, _ := logger.NewLogger() + builder := NewPayloadBuilder(tc.activeList, tc.recoverer, lggr) + payloads, err := builder.BuildPayloads(testutils.Context(t), tc.proposals...) + assert.NoError(t, err) + assert.Equal(t, tc.wantPayloads, payloads) + }) + } +} + +type mockLogRecoverer struct { + logprovider.LogRecoverer + GetProposalDataFn func(context.Context, types.CoordinatedBlockProposal) ([]byte, error) +} + +func (r *mockLogRecoverer) GetProposalData(ctx context.Context, p types.CoordinatedBlockProposal) ([]byte, error) { + return r.GetProposalDataFn(ctx, p) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go new file mode 100644 index 00000000..cf4170b4 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry.go @@ -0,0 +1,627 @@ +package evm + +import ( + "context" + goerrors "errors" + "fmt" + "math/big" + "net/http" + "sync" + "time" + + types2 "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + coreTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/patrickmn/go-cache" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/services" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + defaultPluginRetryExpiration = 30 * time.Minute + // defaultAllowListExpiration decides how long an upkeep's allow list info will be valid for. + defaultAllowListExpiration = 10 * time.Minute + // cleanupInterval decides when the expired items in cache will be deleted. + cleanupInterval = 5 * time.Minute + logTriggerRefreshBatchSize = 32 +) + +var ( + RegistryServiceName = "AutomationRegistry" + + ErrLogReadFailure = fmt.Errorf("failure reading logs") + ErrHeadNotAvailable = fmt.Errorf("head not available") + ErrInitializationFailure = fmt.Errorf("failed to initialize registry") + ErrContextCancelled = fmt.Errorf("context was cancelled") + ErrABINotParsable = fmt.Errorf("error parsing abi") + ActiveUpkeepIDBatchSize int64 = 1000 + // This is the interval at which active upkeep list is fully refreshed from chain + refreshInterval = 15 * time.Minute + // This is the lookback for polling upkeep state event logs from latest block + logEventLookback int64 = 250 +) + +//go:generate mockery --quiet --name Registry --output ./mocks/ --case=underscore +type Registry interface { + GetUpkeep(opts *bind.CallOpts, id *big.Int) (encoding.UpkeepInfo, error) + GetState(opts *bind.CallOpts) (iregistry21.GetState, error) + GetActiveUpkeepIDs(opts *bind.CallOpts, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + GetUpkeepPrivilegeConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + CheckCallback(opts *bind.CallOpts, id *big.Int, values [][]byte, extraData []byte) (iregistry21.CheckCallback, error) + ParseLog(log coreTypes.Log) (generated.AbigenLog, error) +} + +//go:generate mockery --quiet --name HttpClient --output ./mocks/ --case=underscore +type HttpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +func NewEvmRegistry( + lggr logger.Logger, + addr common.Address, + client legacyevm.Chain, + registry *iregistry21.IKeeperRegistryMaster, + mc *types.MercuryCredentials, + al ActiveUpkeepList, + logEventProvider logprovider.LogEventProvider, + packer encoding.Packer, + blockSub *BlockSubscriber, + finalityDepth uint32, +) *EvmRegistry { + mercuryConfig := &MercuryConfig{ + cred: mc, + Abi: core.StreamsCompatibleABI, + AllowListCache: cache.New(defaultAllowListExpiration, cleanupInterval), + pluginRetryCache: cache.New(defaultPluginRetryExpiration, cleanupInterval), + } + hc := http.DefaultClient + + return &EvmRegistry{ + ctx: context.Background(), + threadCtrl: utils.NewThreadControl(), + lggr: lggr.Named(RegistryServiceName), + poller: client.LogPoller(), + addr: addr, + client: client.Client(), + logProcessed: make(map[string]bool), + registry: registry, + abi: core.RegistryABI, + active: al, + packer: packer, + headFunc: func(ocr2keepers.BlockKey) {}, + chLog: make(chan logpoller.Log, 1000), + hc: hc, + logEventProvider: logEventProvider, + bs: blockSub, + finalityDepth: finalityDepth, + streams: streams.NewStreamsLookup(mercuryConfig, blockSub, client.Client(), registry, lggr), + } +} + +var upkeepStateEvents = []common.Hash{ + iregistry21.IKeeperRegistryMasterUpkeepRegistered{}.Topic(), // adds new upkeep id to registry + iregistry21.IKeeperRegistryMasterUpkeepReceived{}.Topic(), // adds new upkeep id to registry via migration + iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), // unpauses an upkeep + iregistry21.IKeeperRegistryMasterUpkeepPaused{}.Topic(), // pauses an upkeep + iregistry21.IKeeperRegistryMasterUpkeepMigrated{}.Topic(), // migrated an upkeep, equivalent to cancel from this registry's perspective + iregistry21.IKeeperRegistryMasterUpkeepCanceled{}.Topic(), // cancels an upkeep + iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), // trigger config was changed +} + +type MercuryConfig struct { + cred *types.MercuryCredentials + Abi abi.ABI + // AllowListCache stores the upkeeps privileges. In 2.1, this only includes a JSON bytes for allowed to use mercury + AllowListCache *cache.Cache + pluginRetryCache *cache.Cache +} + +func NewMercuryConfig(credentials *types.MercuryCredentials, abi abi.ABI) *MercuryConfig { + return &MercuryConfig{ + cred: credentials, + Abi: abi, + AllowListCache: cache.New(defaultPluginRetryExpiration, cleanupInterval), + pluginRetryCache: cache.New(defaultPluginRetryExpiration, cleanupInterval), + } +} + +func (c *MercuryConfig) Credentials() *types.MercuryCredentials { + return c.cred +} + +func (c *MercuryConfig) IsUpkeepAllowed(k string) (interface{}, bool) { + return c.AllowListCache.Get(k) +} + +func (c *MercuryConfig) SetUpkeepAllowed(k string, v interface{}, d time.Duration) { + c.AllowListCache.Set(k, v, d) +} + +func (c *MercuryConfig) GetPluginRetry(k string) (interface{}, bool) { + return c.pluginRetryCache.Get(k) +} + +func (c *MercuryConfig) SetPluginRetry(k string, v interface{}, d time.Duration) { + c.pluginRetryCache.Set(k, v, d) +} + +type EvmRegistry struct { + services.StateMachine + threadCtrl utils.ThreadControl + lggr logger.Logger + poller logpoller.LogPoller + addr common.Address + client client.Client + chainID uint64 + registry Registry + abi abi.ABI + packer encoding.Packer + chLog chan logpoller.Log + mu sync.RWMutex + logProcessed map[string]bool + active ActiveUpkeepList + lastPollBlock int64 + ctx context.Context + headFunc func(ocr2keepers.BlockKey) + mercury *MercuryConfig + hc HttpClient + bs *BlockSubscriber + logEventProvider logprovider.LogEventProvider + finalityDepth uint32 + streams streams.Lookup +} + +func (r *EvmRegistry) Name() string { + return r.lggr.Name() +} + +func (r *EvmRegistry) Start(ctx context.Context) error { + return r.StartOnce(RegistryServiceName, func() error { + if err := r.registerEvents(r.chainID, r.addr); err != nil { + return fmt.Errorf("logPoller error while registering automation events: %w", err) + } + + r.threadCtrl.Go(func(ctx context.Context) { + lggr := r.lggr.With("where", "upkeeps_referesh") + err := r.refreshActiveUpkeeps() + if err != nil { + lggr.Errorf("failed to initialize upkeeps", err) + } + + ticker := time.NewTicker(refreshInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err = r.refreshActiveUpkeeps() + if err != nil { + lggr.Errorf("failed to refresh upkeeps", err) + } + case <-ctx.Done(): + return + } + } + }) + + r.threadCtrl.Go(func(ctx context.Context) { + lggr := r.lggr.With("where", "logs_polling") + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := r.pollUpkeepStateLogs() + if err != nil { + lggr.Errorf("failed to poll logs for upkeeps", err) + } + case <-ctx.Done(): + return + } + } + }) + + r.threadCtrl.Go(func(ctx context.Context) { + lggr := r.lggr.With("where", "logs_processing") + ch := r.chLog + + for { + select { + case l := <-ch: + err := r.processUpkeepStateLog(l) + if err != nil { + lggr.Errorf("failed to process log for upkeep", err) + } + case <-ctx.Done(): + return + } + } + }) + + return nil + }) +} + +func (r *EvmRegistry) Close() error { + return r.StopOnce(RegistryServiceName, func() error { + r.threadCtrl.Close() + return nil + }) +} + +func (r *EvmRegistry) HealthReport() map[string]error { + return map[string]error{RegistryServiceName: r.Healthy()} +} + +func (r *EvmRegistry) refreshActiveUpkeeps() error { + // Allow for max timeout of refreshInterval + ctx, cancel := context.WithTimeout(r.ctx, refreshInterval) + defer cancel() + + r.lggr.Debugf("Refreshing active upkeeps list") + // get active upkeep ids from contract + ids, err := r.getLatestIDsFromContract(ctx) + if err != nil { + return fmt.Errorf("failed to get active upkeep ids from contract during refresh: %s", err) + } + r.active.Reset(ids...) + + var logTriggerIDs []*big.Int + for _, id := range ids { + uid := &ocr2keepers.UpkeepIdentifier{} + if ok := uid.FromBigInt(id); !ok { + r.lggr.Warnf("failed to parse upkeep id %s", id.String()) + continue + } + switch core.GetUpkeepType(*uid) { + case types2.LogTrigger: + logTriggerIDs = append(logTriggerIDs, id) + default: + } + } + + _, err = r.logEventProvider.RefreshActiveUpkeeps(logTriggerIDs...) + if err != nil { + return fmt.Errorf("failed to refresh active upkeep ids in log event provider: %w", err) + } + + // Try to refersh log trigger config for all log upkeeps + return r.refreshLogTriggerUpkeeps(logTriggerIDs) +} + +// refreshLogTriggerUpkeeps refreshes the active upkeep ids for log trigger upkeeps +func (r *EvmRegistry) refreshLogTriggerUpkeeps(ids []*big.Int) error { + var err error + for i := 0; i < len(ids); i += logTriggerRefreshBatchSize { + end := i + logTriggerRefreshBatchSize + if end > len(ids) { + end = len(ids) + } + idBatch := ids[i:end] + + if batchErr := r.refreshLogTriggerUpkeepsBatch(idBatch); batchErr != nil { + multierr.AppendInto(&err, batchErr) + } + + time.Sleep(500 * time.Millisecond) + } + + return err +} + +func (r *EvmRegistry) refreshLogTriggerUpkeepsBatch(logTriggerIDs []*big.Int) error { + var logTriggerHashes []common.Hash + for _, id := range logTriggerIDs { + logTriggerHashes = append(logTriggerHashes, common.BigToHash(id)) + } + + unpausedLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth), pg.WithParentCtx(r.ctx)) + if err != nil { + return err + } + configSetLogs, err := r.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic(), r.addr, 1, logTriggerHashes, logpoller.Confirmations(r.finalityDepth), pg.WithParentCtx(r.ctx)) + if err != nil { + return err + } + + logs := append(unpausedLogs, configSetLogs...) + + configSetBlockNumbers := map[string]uint64{} + unpausedBlockNumbers := map[string]uint64{} + perUpkeepConfig := map[string][]byte{} + + for _, log := range logs { + rawLog := log.ToGethLog() + abilog, err := r.registry.ParseLog(rawLog) + if err != nil { + return err + } + switch l := abilog.(type) { + case *iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet: + if rawLog.BlockNumber > configSetBlockNumbers[l.Id.String()] { + configSetBlockNumbers[l.Id.String()] = rawLog.BlockNumber + perUpkeepConfig[l.Id.String()] = l.TriggerConfig + } + case *iregistry21.IKeeperRegistryMasterUpkeepUnpaused: + if rawLog.BlockNumber > unpausedBlockNumbers[l.Id.String()] { + unpausedBlockNumbers[l.Id.String()] = rawLog.BlockNumber + } + } + } + + var merr error + for _, id := range logTriggerIDs { + logBlock, ok := configSetBlockNumbers[id.String()] + if !ok { + r.lggr.Warnf("unable to find finalized config set block number for %s, using 0 as config start block", id.String()) + // Use zero as config update block so it can be updated if an actual event is found later + logBlock = 0 + } + + config, ok := perUpkeepConfig[id.String()] + if !ok { + r.lggr.Warnf("unable to find per finalized log config for %s, will fetch latest config from chain", id.String()) + // Set it to empty bytes so that latest config is fetched within r.updateTriggerConfig + config = []byte{} + } + + // In case an upkeep was paused then unpaused after a config set event, start the config from the unpaused block number + if unpausedBlockNumbers[id.String()] > logBlock { + logBlock = unpausedBlockNumbers[id.String()] + } + if err := r.updateTriggerConfig(id, config, logBlock); err != nil { + merr = goerrors.Join(merr, fmt.Errorf("failed to update trigger config for upkeep id %s: %w", id.String(), err)) + } + } + + return merr +} + +func (r *EvmRegistry) pollUpkeepStateLogs() error { + var latest int64 + var end logpoller.LogPollerBlock + var err error + + if end, err = r.poller.LatestBlock(pg.WithParentCtx(r.ctx)); err != nil { + return fmt.Errorf("%w: %s", ErrHeadNotAvailable, err) + } + + r.mu.Lock() + latest = r.lastPollBlock + r.lastPollBlock = end.BlockNumber + r.mu.Unlock() + + // if start and end are the same, no polling needs to be done + if latest == 0 || latest == end.BlockNumber { + return nil + } + + var logs []logpoller.Log + if logs, err = r.poller.LogsWithSigs( + end.BlockNumber-logEventLookback, + end.BlockNumber, + upkeepStateEvents, + r.addr, + pg.WithParentCtx(r.ctx), + ); err != nil { + return fmt.Errorf("%w: %s", ErrLogReadFailure, err) + } + + for _, log := range logs { + r.chLog <- log + } + + return nil +} + +func (r *EvmRegistry) processUpkeepStateLog(l logpoller.Log) error { + lid := fmt.Sprintf("%s%d", l.TxHash.String(), l.LogIndex) + r.mu.Lock() + if _, ok := r.logProcessed[lid]; ok { + r.mu.Unlock() + return nil + } + r.logProcessed[lid] = true + r.mu.Unlock() + txHash := l.TxHash.String() + + rawLog := l.ToGethLog() + abilog, err := r.registry.ParseLog(rawLog) + if err != nil { + return err + } + + switch l := abilog.(type) { + case *iregistry21.IKeeperRegistryMasterUpkeepPaused: + r.lggr.Debugf("KeeperRegistryUpkeepPaused log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + r.removeFromActive(l.Id) + case *iregistry21.IKeeperRegistryMasterUpkeepCanceled: + r.lggr.Debugf("KeeperRegistryUpkeepCanceled log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + r.removeFromActive(l.Id) + case *iregistry21.IKeeperRegistryMasterUpkeepMigrated: + r.lggr.Debugf("KeeperRegistryMasterUpkeepMigrated log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + r.removeFromActive(l.Id) + case *iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet: + r.lggr.Debugf("KeeperRegistryUpkeepTriggerConfigSet log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + if err := r.updateTriggerConfig(l.Id, l.TriggerConfig, rawLog.BlockNumber); err != nil { + r.lggr.Warnf("failed to update trigger config upon KeeperRegistryMasterUpkeepTriggerConfigSet for upkeep ID %s: %s", l.Id.String(), err) + } + case *iregistry21.IKeeperRegistryMasterUpkeepRegistered: + uid := &ocr2keepers.UpkeepIdentifier{} + uid.FromBigInt(l.Id) + trigger := core.GetUpkeepType(*uid) + r.lggr.Debugf("KeeperRegistryUpkeepRegistered log detected for upkeep ID %s (trigger=%d) in transaction %s", l.Id.String(), trigger, txHash) + r.active.Add(l.Id) + if err := r.updateTriggerConfig(l.Id, nil, rawLog.BlockNumber); err != nil { + r.lggr.Warnf("failed to update trigger config upon KeeperRegistryMasterUpkeepRegistered for upkeep ID %s: %s", err) + } + case *iregistry21.IKeeperRegistryMasterUpkeepReceived: + r.lggr.Debugf("KeeperRegistryUpkeepReceived log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + r.active.Add(l.Id) + if err := r.updateTriggerConfig(l.Id, nil, rawLog.BlockNumber); err != nil { + r.lggr.Warnf("failed to update trigger config upon KeeperRegistryMasterUpkeepReceived for upkeep ID %s: %s", err) + } + case *iregistry21.IKeeperRegistryMasterUpkeepUnpaused: + r.lggr.Debugf("KeeperRegistryUpkeepUnpaused log detected for upkeep ID %s in transaction %s", l.Id.String(), txHash) + r.active.Add(l.Id) + if err := r.updateTriggerConfig(l.Id, nil, rawLog.BlockNumber); err != nil { + r.lggr.Warnf("failed to update trigger config upon KeeperRegistryMasterUpkeepUnpaused for upkeep ID %s: %s", err) + } + default: + r.lggr.Debugf("Unknown log detected for log %+v in transaction %s", l, txHash) + } + + return nil +} + +func RegistryUpkeepFilterName(addr common.Address) string { + return logpoller.FilterName("KeeperRegistry Events", addr.String()) +} + +// registerEvents registers upkeep state events from keeper registry on log poller +func (r *EvmRegistry) registerEvents(_ uint64, addr common.Address) error { + // Add log filters for the log poller so that it can poll and find the logs that we need + return r.poller.RegisterFilter(logpoller.Filter{ + Name: RegistryUpkeepFilterName(addr), + EventSigs: upkeepStateEvents, + Addresses: []common.Address{addr}, + }) +} + +// removeFromActive removes an upkeepID from active list and unregisters the log filter for log upkeeps +func (r *EvmRegistry) removeFromActive(id *big.Int) { + r.active.Remove(id) + + uid := &ocr2keepers.UpkeepIdentifier{} + uid.FromBigInt(id) + trigger := core.GetUpkeepType(*uid) + switch trigger { + case types2.LogTrigger: + if err := r.logEventProvider.UnregisterFilter(id); err != nil { + r.lggr.Warnw("failed to unregister log filter", "upkeepID", id.String()) + } + r.lggr.Debugw("unregistered log filter", "upkeepID", id.String()) + default: + } +} + +func (r *EvmRegistry) buildCallOpts(ctx context.Context, block *big.Int) *bind.CallOpts { + opts := bind.CallOpts{ + Context: ctx, + } + + if block == nil || block.Int64() == 0 { + l := r.bs.latestBlock.Load() + if l != nil && l.Number != 0 { + opts.BlockNumber = big.NewInt(int64(l.Number)) + } + } else { + opts.BlockNumber = block + } + + return &opts +} + +func (r *EvmRegistry) getLatestIDsFromContract(ctx context.Context) ([]*big.Int, error) { + opts := r.buildCallOpts(ctx, nil) + + state, err := r.registry.GetState(opts) + if err != nil { + n := "latest" + if opts.BlockNumber != nil { + n = fmt.Sprintf("%d", opts.BlockNumber.Int64()) + } + + return nil, fmt.Errorf("%w: failed to get contract state at block number '%s'", err, n) + } + + ids := make([]*big.Int, 0, int(state.State.NumUpkeeps.Int64())) + for int64(len(ids)) < state.State.NumUpkeeps.Int64() { + startIndex := int64(len(ids)) + maxCount := state.State.NumUpkeeps.Int64() - startIndex + + if maxCount == 0 { + break + } + + if maxCount > ActiveUpkeepIDBatchSize { + maxCount = ActiveUpkeepIDBatchSize + } + + batchIDs, err := r.registry.GetActiveUpkeepIDs(opts, big.NewInt(startIndex), big.NewInt(maxCount)) + if err != nil { + return nil, fmt.Errorf("%w: failed to get active upkeep IDs from index %d to %d (both inclusive)", err, startIndex, startIndex+maxCount-1) + } + + ids = append(ids, batchIDs...) + } + + return ids, nil +} + +// updateTriggerConfig updates the trigger config for an upkeep. it will re-register a filter for this upkeep. +func (r *EvmRegistry) updateTriggerConfig(id *big.Int, cfg []byte, logBlock uint64) error { + uid := &ocr2keepers.UpkeepIdentifier{} + uid.FromBigInt(id) + switch core.GetUpkeepType(*uid) { + case types2.LogTrigger: + if len(cfg) == 0 { + fetched, err := r.fetchTriggerConfig(id) + if err != nil { + return errors.Wrap(err, "failed to fetch log upkeep config") + } + cfg = fetched + } + parsed, err := r.packer.UnpackLogTriggerConfig(cfg) + if err != nil { + // Upkeep has been setup with improper config. Log a warning and ignore the upkeep. + r.lggr.Warnw("failed to unpack log upkeep config", "upkeepID", id.String(), "err", err) + return nil + } + if err := r.logEventProvider.RegisterFilter(r.ctx, logprovider.FilterOptions{ + TriggerConfig: logprovider.LogTriggerConfig(parsed), + UpkeepID: id, + UpdateBlock: logBlock, + }); err != nil { + return errors.Wrap(err, "failed to register log filter") + } + r.lggr.Debugw("registered log filter", "upkeepID", id.String(), "cfg", parsed) + default: + } + return nil +} + +// fetchTriggerConfig fetches trigger config in raw bytes for an upkeep. +func (r *EvmRegistry) fetchTriggerConfig(id *big.Int) ([]byte, error) { + opts := r.buildCallOpts(r.ctx, nil) + cfg, err := r.registry.GetUpkeepTriggerConfig(opts, id) + if err != nil { + r.lggr.Warnw("failed to get trigger config", "err", err) + return nil, err + } + return cfg, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go new file mode 100644 index 00000000..869cff36 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline.go @@ -0,0 +1,371 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + "strings" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" +) + +const ( + // checkBlockTooOldRange is the number of blocks that can be behind the latest block before + // we return a CheckBlockTooOld error + checkBlockTooOldRange = 128 +) + +type checkResult struct { + cr []ocr2keepers.CheckResult + err error +} + +func (r *EvmRegistry) CheckUpkeeps(ctx context.Context, keys ...ocr2keepers.UpkeepPayload) ([]ocr2keepers.CheckResult, error) { + r.lggr.Debugw("Checking upkeeps", "upkeeps", keys) + for i := range keys { + if keys[i].Trigger.BlockNumber == 0 { // check block was not populated, use latest + latest := r.bs.latestBlock.Load() + if latest == nil { + return nil, fmt.Errorf("no latest block available") + } + copy(keys[i].Trigger.BlockHash[:], latest.Hash[:]) + keys[i].Trigger.BlockNumber = latest.Number + r.lggr.Debugf("Check upkeep key had no trigger block number, using latest block %v", keys[i].Trigger.BlockNumber) + } + } + + chResult := make(chan checkResult, 1) + + r.threadCtrl.Go(func(ctx context.Context) { + r.doCheck(ctx, keys, chResult) + }) + + select { + case rs := <-chResult: + result := make([]ocr2keepers.CheckResult, len(rs.cr)) + copy(result, rs.cr) + return result, rs.err + case <-ctx.Done(): + // safety on context done to provide an error on context cancellation + // contract calls through the geth wrappers are a bit of a black box + // so this safety net ensures contexts are fully respected and contract + // call functions have a more graceful closure outside the scope of + // CheckUpkeep needing to return immediately. + return nil, fmt.Errorf("%w: failed to check upkeep on registry", ErrContextCancelled) + } +} + +func (r *EvmRegistry) doCheck(ctx context.Context, keys []ocr2keepers.UpkeepPayload, chResult chan checkResult) { + upkeepResults, err := r.checkUpkeeps(ctx, keys) + if err != nil { + chResult <- checkResult{ + err: err, + } + return + } + + upkeepResults = r.streams.Lookup(ctx, upkeepResults) + + upkeepResults, err = r.simulatePerformUpkeeps(ctx, upkeepResults) + if err != nil { + chResult <- checkResult{ + err: err, + } + return + } + + chResult <- checkResult{ + cr: upkeepResults, + } +} + +// getBlockAndUpkeepId retrieves check block number, block hash from trigger and upkeep id +func (r *EvmRegistry) getBlockAndUpkeepId(upkeepID ocr2keepers.UpkeepIdentifier, trigger ocr2keepers.Trigger) (*big.Int, common.Hash, *big.Int) { + block := new(big.Int).SetInt64(int64(trigger.BlockNumber)) + return block, common.BytesToHash(trigger.BlockHash[:]), upkeepID.BigInt() +} + +func (r *EvmRegistry) getBlockHash(blockNumber *big.Int) (common.Hash, error) { + blocks, err := r.poller.GetBlocksRange(r.ctx, []uint64{blockNumber.Uint64()}) + if err != nil { + return [32]byte{}, err + } + if len(blocks) == 0 { + return [32]byte{}, fmt.Errorf("could not find block %d in log poller", blockNumber.Uint64()) + } + + return blocks[0].BlockHash, nil +} + +// verifyCheckBlock checks that the check block and hash are valid, returns the pipeline execution state and retryable +func (r *EvmRegistry) verifyCheckBlock(_ context.Context, checkBlock, upkeepId *big.Int, checkHash common.Hash) (state encoding.PipelineExecutionState, retryable bool) { + // verify check block number and hash are valid + h, ok := r.bs.queryBlocksMap(checkBlock.Int64()) + // if this block number/hash combo exists in block subscriber, this check block and hash still exist on chain and are valid + // the block hash in block subscriber might be slightly outdated, if it doesn't match then we fetch the latest from RPC. + if ok && h == checkHash.Hex() { + r.lggr.Debugf("check block hash %s exists on chain at block number %d for upkeepId %s", checkHash.Hex(), checkBlock, upkeepId) + return encoding.NoPipelineError, false + } + r.lggr.Warnf("check block %s does not exist in block subscriber or hash does not match for upkeepId %s. this may be caused by block subscriber outdated due to re-org, querying eth client to confirm", checkBlock, upkeepId) + b, err := r.getBlockHash(checkBlock) + if err != nil { + r.lggr.Warnf("failed to query block %s: %s", checkBlock, err.Error()) + return encoding.RpcFlakyFailure, true + } + if checkHash.Hex() != b.Hex() { + r.lggr.Warnf("check block %s hash do not match. %s from block subscriber vs %s from trigger for upkeepId %s", checkBlock, h, checkHash.Hex(), upkeepId) + return encoding.CheckBlockInvalid, false + } + return encoding.NoPipelineError, false +} + +// verifyLogExists checks that the log still exists on chain, returns failure reason, pipeline error, and retryable +func (r *EvmRegistry) verifyLogExists(upkeepId *big.Int, p ocr2keepers.UpkeepPayload) (encoding.UpkeepFailureReason, encoding.PipelineExecutionState, bool) { + logBlockNumber := int64(p.Trigger.LogTriggerExtension.BlockNumber) + logBlockHash := common.BytesToHash(p.Trigger.LogTriggerExtension.BlockHash[:]) + checkBlockHash := common.BytesToHash(p.Trigger.BlockHash[:]) + if checkBlockHash.String() == logBlockHash.String() { + // log verification would be covered by checkBlock verification as they are the same. Return early from + // log verificaion. This also helps in preventing some racy conditions when rpc does not return the tx receipt + // for a very new log + return encoding.UpkeepFailureReasonNone, encoding.NoPipelineError, false + } + // if log block number is populated, check log block number and block hash + if logBlockNumber != 0 { + h, ok := r.bs.queryBlocksMap(logBlockNumber) + // if this block number/hash combo exists in block subscriber, this block and tx still exists on chain and is valid + // the block hash in block subscriber might be slightly outdated, if it doesn't match then we fetch the latest from RPC. + if ok && h == logBlockHash.Hex() { + r.lggr.Debugf("tx hash %s exists on chain at block number %d, block hash %s for upkeepId %s", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), logBlockHash.Hex(), logBlockNumber, upkeepId) + return encoding.UpkeepFailureReasonNone, encoding.NoPipelineError, false + } + // if this block does not exist in the block subscriber, the block which this log lived on was probably re-orged + // hence, check eth client for this log's tx hash to confirm + r.lggr.Debugf("log block %d does not exist in block subscriber or block hash does not match for upkeepId %s. this may be caused by block subscriber outdated due to re-org, querying eth client to confirm", logBlockNumber, upkeepId) + } else { + r.lggr.Debugf("log block not provided, querying eth client for tx hash %s for upkeepId %s", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), upkeepId) + } + // query eth client as a fallback + bn, bh, err := core.GetTxBlock(r.ctx, r.client, p.Trigger.LogTriggerExtension.TxHash) + if err != nil { + // primitive way of checking errors + if strings.Contains(err.Error(), "missing required field") || strings.Contains(err.Error(), "not found") { + return encoding.UpkeepFailureReasonTxHashNoLongerExists, encoding.NoPipelineError, false + } + r.lggr.Warnf("failed to query tx hash %s for upkeepId %s: %s", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), upkeepId, err.Error()) + return encoding.UpkeepFailureReasonNone, encoding.RpcFlakyFailure, true + } + if bn == nil { + r.lggr.Warnf("tx hash %s does not exist on chain for upkeepId %s.", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), upkeepId) + return encoding.UpkeepFailureReasonTxHashNoLongerExists, encoding.NoPipelineError, false + } + if bh.Hex() != logBlockHash.Hex() { + r.lggr.Warnf("tx hash %s reorged from expected blockhash %s to %s for upkeepId %s.", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), logBlockHash.Hex(), bh.Hex(), upkeepId) + return encoding.UpkeepFailureReasonTxHashReorged, encoding.NoPipelineError, false + } + r.lggr.Debugf("tx hash %s exists on chain for upkeepId %s", hexutil.Encode(p.Trigger.LogTriggerExtension.TxHash[:]), upkeepId) + return encoding.UpkeepFailureReasonNone, encoding.NoPipelineError, false +} + +func (r *EvmRegistry) checkUpkeeps(ctx context.Context, payloads []ocr2keepers.UpkeepPayload) ([]ocr2keepers.CheckResult, error) { + var ( + checkReqs []rpc.BatchElem + checkResults []*string + results = make([]ocr2keepers.CheckResult, len(payloads)) + ) + indices := map[int]int{} + + for i, p := range payloads { + block, checkHash, upkeepId := r.getBlockAndUpkeepId(p.UpkeepID, p.Trigger) + state, retryable := r.verifyCheckBlock(ctx, block, upkeepId, checkHash) + if state != encoding.NoPipelineError { + results[i] = encoding.GetIneligibleCheckResultWithoutPerformData(p, encoding.UpkeepFailureReasonNone, state, retryable) + continue + } + + opts := r.buildCallOpts(ctx, block) + var payload []byte + var err error + uid := &ocr2keepers.UpkeepIdentifier{} + uid.FromBigInt(upkeepId) + switch core.GetUpkeepType(*uid) { + case types.LogTrigger: + reason, state, retryable := r.verifyLogExists(upkeepId, p) + if reason != encoding.UpkeepFailureReasonNone || state != encoding.NoPipelineError { + results[i] = encoding.GetIneligibleCheckResultWithoutPerformData(p, reason, state, retryable) + continue + } + + // check data will include the log trigger config + payload, err = r.abi.Pack("checkUpkeep", upkeepId, p.CheckData) + if err != nil { + // pack error, no retryable + r.lggr.Warnf("failed to pack log trigger checkUpkeep data for upkeepId %s with check data %s: %s", upkeepId, hexutil.Encode(p.CheckData), err) + results[i] = encoding.GetIneligibleCheckResultWithoutPerformData(p, encoding.UpkeepFailureReasonNone, encoding.PackUnpackDecodeFailed, false) + continue + } + default: + // checkUpkeep is overloaded on the contract for conditionals and log upkeeps + // Need to use the first function (checkUpkeep0) for conditionals + payload, err = r.abi.Pack("checkUpkeep0", upkeepId) + if err != nil { + // pack error, no retryable + r.lggr.Warnf("failed to pack conditional checkUpkeep data for upkeepId %s with check data %s: %s", upkeepId, hexutil.Encode(p.CheckData), err) + results[i] = encoding.GetIneligibleCheckResultWithoutPerformData(p, encoding.UpkeepFailureReasonNone, encoding.PackUnpackDecodeFailed, false) + continue + } + } + indices[len(checkReqs)] = i + results[i] = encoding.GetIneligibleCheckResultWithoutPerformData(p, encoding.UpkeepFailureReasonNone, encoding.NoPipelineError, false) + + var result string + checkReqs = append(checkReqs, rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": r.addr.Hex(), + "data": hexutil.Bytes(payload), + }, + hexutil.EncodeBig(opts.BlockNumber), + }, + Result: &result, + }) + + checkResults = append(checkResults, &result) + } + + if len(checkResults) > 0 { + // In contrast to CallContext, BatchCallContext only returns errors that have occurred + // while sending the request. Any error specific to a request is reported through the + // Error field of the corresponding BatchElem. + // hence, if BatchCallContext returns an error, it will be an error which will terminate the pipeline + if err := r.client.BatchCallContext(ctx, checkReqs); err != nil { + r.lggr.Errorf("failed to batch call for checkUpkeeps: %s", err) + return nil, err + } + } + + for i, req := range checkReqs { + index := indices[i] + if req.Error != nil { + latestBlockNumber := int64(0) + latestBlock := r.bs.latestBlock.Load() + if latestBlock != nil { + latestBlockNumber = int64(latestBlock.Number) + } + checkBlock, _, _ := r.getBlockAndUpkeepId(payloads[index].UpkeepID, payloads[index].Trigger) + // Exploratory: remove reliance on primitive way of checking errors + blockNotFound := (strings.Contains(req.Error.Error(), "header not found") || strings.Contains(req.Error.Error(), "missing trie node")) + if blockNotFound && latestBlockNumber-checkBlock.Int64() > checkBlockTooOldRange { + // Check block not found in RPC and it is too old, non-retryable error + r.lggr.Warnf("block not found error encountered in check result for upkeepId %s, check block %d, latest block %d: %s", results[index].UpkeepID.String(), checkBlock.Int64(), latestBlockNumber, req.Error) + results[index].Retryable = false + results[index].PipelineExecutionState = uint8(encoding.CheckBlockTooOld) + } else { + // individual upkeep failed in a batch call, likely a flay RPC error, consider retryable + r.lggr.Warnf("rpc error encountered in check result for upkeepId %s: %s", results[index].UpkeepID.String(), req.Error) + results[index].Retryable = true + results[index].PipelineExecutionState = uint8(encoding.RpcFlakyFailure) + } + } else { + var err error + results[index], err = r.packer.UnpackCheckResult(payloads[index], *checkResults[i]) + if err != nil { + r.lggr.Warnf("failed to unpack check result: %s", err) + } + } + } + + return results, nil +} + +func (r *EvmRegistry) simulatePerformUpkeeps(ctx context.Context, checkResults []ocr2keepers.CheckResult) ([]ocr2keepers.CheckResult, error) { + var ( + performReqs = make([]rpc.BatchElem, 0, len(checkResults)) + performResults = make([]*string, 0, len(checkResults)) + performToKeyIdx = make([]int, 0, len(checkResults)) + ) + + for i, cr := range checkResults { + if !cr.Eligible { + continue + } + + block, _, upkeepId := r.getBlockAndUpkeepId(cr.UpkeepID, cr.Trigger) + + opts := r.buildCallOpts(ctx, block) + + // Since checkUpkeep is true, simulate perform upkeep to ensure it doesn't revert + payload, err := r.abi.Pack("simulatePerformUpkeep", upkeepId, cr.PerformData) + if err != nil { + // pack failed, not retryable + r.lggr.Warnf("failed to pack perform data %s for %s: %s", hexutil.Encode(cr.PerformData), upkeepId, err) + checkResults[i].Eligible = false + checkResults[i].PipelineExecutionState = uint8(encoding.PackUnpackDecodeFailed) + continue + } + + var result string + performReqs = append(performReqs, rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": r.addr.Hex(), + "data": hexutil.Bytes(payload), + }, + hexutil.EncodeBig(opts.BlockNumber), + }, + Result: &result, + }) + + performResults = append(performResults, &result) + performToKeyIdx = append(performToKeyIdx, i) + } + + if len(performReqs) > 0 { + if err := r.client.BatchCallContext(ctx, performReqs); err != nil { + r.lggr.Errorf("failed to batch call for simulatePerformUpkeeps: %s", err) + return nil, err + } + } + + for i, req := range performReqs { + idx := performToKeyIdx[i] + if req.Error != nil { + // individual upkeep failed in a batch call, retryable + r.lggr.Warnf("failed to simulate upkeepId %s: %s", checkResults[idx].UpkeepID.String(), req.Error) + checkResults[idx].Retryable = true + checkResults[idx].Eligible = false + checkResults[idx].PipelineExecutionState = uint8(encoding.RpcFlakyFailure) + continue + } + + state, simulatePerformSuccess, err := r.packer.UnpackPerformResult(*performResults[i]) + if err != nil { + // unpack failed, not retryable + r.lggr.Warnf("failed to unpack simulate performUpkeep result for upkeepId %s for state %d: %s", checkResults[idx].UpkeepID.String(), state, req.Error) + checkResults[idx].Retryable = false + checkResults[idx].Eligible = false + checkResults[idx].PipelineExecutionState = uint8(state) + continue + } + + if !simulatePerformSuccess { + r.lggr.Warnf("upkeepId %s is not eligible after simulation of perform", checkResults[idx].UpkeepID.String()) + checkResults[performToKeyIdx[i]].Eligible = false + checkResults[performToKeyIdx[i]].IneligibilityReason = uint8(encoding.UpkeepFailureReasonSimulationFailed) + } + } + + return checkResults, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go new file mode 100644 index 00000000..b3fbee19 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_check_pipeline_test.go @@ -0,0 +1,700 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + "strings" + "sync/atomic" + "testing" + + types3 "github.com/goplugin/plugin-automation/pkg/v3/types" + + types2 "github.com/goplugin/plugin-common/pkg/types" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/streams_lookup_compatible_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestRegistry_GetBlockAndUpkeepId(t *testing.T) { + r := &EvmRegistry{} + tests := []struct { + name string + input ocr2keepers.UpkeepPayload + wantBlock *big.Int + wantUpkeep *big.Int + }{ + { + "happy flow", + ocr2keepers.UpkeepPayload{ + UpkeepID: core.UpkeepIDFromInt("10"), + Trigger: ocr2keepers.Trigger{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x1"), + }, + }, + big.NewInt(1), + big.NewInt(10), + }, + { + "empty trigger", + ocr2keepers.UpkeepPayload{ + UpkeepID: core.UpkeepIDFromInt("10"), + }, + big.NewInt(0), + big.NewInt(10), + }, + { + "empty payload", + ocr2keepers.UpkeepPayload{}, + big.NewInt(0), + big.NewInt(0), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + block, _, upkeep := r.getBlockAndUpkeepId(tc.input.UpkeepID, tc.input.Trigger) + assert.Equal(t, tc.wantBlock, block) + assert.Equal(t, tc.wantUpkeep.String(), upkeep.String()) + }) + } +} + +func TestRegistry_VerifyCheckBlock(t *testing.T) { + lggr := logger.TestLogger(t) + upkeepId := ocr2keepers.UpkeepIdentifier{} + upkeepId.FromBigInt(big.NewInt(12345)) + tests := []struct { + name string + checkBlock *big.Int + latestBlock *ocr2keepers.BlockKey + upkeepId *big.Int + checkHash common.Hash + payload ocr2keepers.UpkeepPayload + blocks map[int64]string + poller logpoller.LogPoller + state encoding.PipelineExecutionState + retryable bool + makeEthCall bool + }{ + { + name: "for an invalid check block number, if hash does not match the check hash, return CheckBlockInvalid", + checkBlock: big.NewInt(500), + latestBlock: &ocr2keepers.BlockKey{Number: 560}, + upkeepId: big.NewInt(12345), + checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewTrigger(500, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83")), + WorkID: "work", + }, + poller: &mockLogPoller{ + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + return []logpoller.LogPollerBlock{ + { + BlockHash: common.HexToHash("abcdef"), + }, + }, nil + }, + }, + state: encoding.CheckBlockInvalid, + retryable: false, + makeEthCall: true, + }, + { + name: "for an invalid check block number, if hash does match the check hash, return NoPipelineError", + checkBlock: big.NewInt(500), + latestBlock: &ocr2keepers.BlockKey{Number: 560}, + upkeepId: big.NewInt(12345), + checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewTrigger(500, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83")), + WorkID: "work", + }, + poller: &mockLogPoller{ + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + return []logpoller.LogPollerBlock{ + { + BlockHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + }, + }, nil + }, + }, + state: encoding.NoPipelineError, + retryable: false, + makeEthCall: true, + }, + { + name: "check block hash does not match", + checkBlock: big.NewInt(500), + latestBlock: &ocr2keepers.BlockKey{Number: 560}, + upkeepId: big.NewInt(12345), + checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewTrigger(500, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83")), + WorkID: "work", + }, + poller: &mockLogPoller{ + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + return []logpoller.LogPollerBlock{ + { + BlockHash: common.HexToHash("0xcba5cf9e2bb32373c76015384e1098912d9510a72481c78057fcb088209167de"), + }, + }, nil + }, + }, + blocks: map[int64]string{ + 500: "0xa518faeadcc423338c62572da84dda35fe44b34f521ce88f6081b703b250cca4", + }, + state: encoding.CheckBlockInvalid, + }, + { + name: "check block is valid", + checkBlock: big.NewInt(500), + latestBlock: &ocr2keepers.BlockKey{Number: 560}, + upkeepId: big.NewInt(12345), + checkHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewTrigger(500, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83")), + WorkID: "work", + }, + blocks: map[int64]string{ + 500: "0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83", + }, + state: encoding.NoPipelineError, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + bs := &BlockSubscriber{ + latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, + blocks: tc.blocks, + } + bs.latestBlock.Store(tc.latestBlock) + e := &EvmRegistry{ + lggr: lggr, + bs: bs, + poller: tc.poller, + } + if tc.makeEthCall { + client := new(evmClientMocks.Client) + client.On("BlockByNumber", mock.Anything, tc.checkBlock).Return(nil, fmt.Errorf("error")) + e.client = client + } + + state, retryable := e.verifyCheckBlock(testutils.Context(t), tc.checkBlock, tc.upkeepId, tc.checkHash) + assert.Equal(t, tc.state, state) + assert.Equal(t, tc.retryable, retryable) + }) + } +} + +type mockLogPoller struct { + logpoller.LogPoller + GetBlocksRangeFn func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) + IndexedLogsFn func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) +} + +func (p *mockLogPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + return p.GetBlocksRangeFn(ctx, numbers, qopts...) +} + +func (p *mockLogPoller) IndexedLogs(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return p.IndexedLogsFn(eventSig, address, topicIndex, topicValues, confs, qopts...) +} + +func TestRegistry_VerifyLogExists(t *testing.T) { + lggr := logger.TestLogger(t) + upkeepId := ocr2keepers.UpkeepIdentifier{} + upkeepId.FromBigInt(big.NewInt(12345)) + + extension := &ocr2keepers.LogTriggerExtension{ + TxHash: common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651"), + Index: 0, + BlockHash: common.HexToHash("0x3df0e926f3e21ec1195ffe007a2899214905eb02e768aa89ce0b94accd7f3d71"), + BlockNumber: 500, + } + extension1 := &ocr2keepers.LogTriggerExtension{ + TxHash: common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651"), + Index: 0, + BlockHash: common.HexToHash("0x3df0e926f3e21ec1195ffe007a2899214905eb02e768aa89ce0b94accd7f3d71"), + BlockNumber: 0, + } + + tests := []struct { + name string + upkeepId *big.Int + payload ocr2keepers.UpkeepPayload + blocks map[int64]string + makeEthCall bool + reason encoding.UpkeepFailureReason + state encoding.PipelineExecutionState + retryable bool + ethCallErr error + receipt *types.Receipt + }{ + { + name: "log block number invalid", + upkeepId: big.NewInt(12345), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewLogTrigger(550, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), extension), + WorkID: "work", + }, + reason: encoding.UpkeepFailureReasonNone, + state: encoding.RpcFlakyFailure, + retryable: true, + makeEthCall: true, + ethCallErr: fmt.Errorf("error"), + }, + { + name: "log block no longer exists", + upkeepId: big.NewInt(12345), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewLogTrigger(550, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), extension), + WorkID: "work", + }, + reason: encoding.UpkeepFailureReasonTxHashNoLongerExists, + retryable: false, + makeEthCall: true, + blocks: map[int64]string{ + 500: "0xb2173b4b75f23f56b7b2b6b2cc5fa9ed1079b9d1655b12b40fdb4dbf59006419", + }, + receipt: &types.Receipt{Status: 0}, + }, + { + name: "eth client returns a matching block but different hash", + upkeepId: big.NewInt(12345), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewLogTrigger(550, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), extension1), + WorkID: "work", + }, + reason: encoding.UpkeepFailureReasonTxHashReorged, + retryable: false, + blocks: map[int64]string{ + 500: "0xa518faeadcc423338c62572da84dda35fe44b34f521ce88f6081b703b250cca4", + }, + makeEthCall: true, + receipt: &types.Receipt{ + Status: 1, + BlockNumber: big.NewInt(550), + BlockHash: common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), + }, + }, + { + name: "eth client returns a matching block", + upkeepId: big.NewInt(12345), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewLogTrigger(550, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), extension1), + WorkID: "work", + }, + reason: encoding.UpkeepFailureReasonNone, + retryable: false, + blocks: map[int64]string{ + 500: "0xa518faeadcc423338c62572da84dda35fe44b34f521ce88f6081b703b250cca4", + }, + makeEthCall: true, + receipt: &types.Receipt{ + Status: 1, + BlockNumber: big.NewInt(550), + BlockHash: common.HexToHash("0x3df0e926f3e21ec1195ffe007a2899214905eb02e768aa89ce0b94accd7f3d71"), + }, + }, + { + name: "log block is valid", + upkeepId: big.NewInt(12345), + payload: ocr2keepers.UpkeepPayload{ + UpkeepID: upkeepId, + Trigger: ocr2keepers.NewLogTrigger(550, common.HexToHash("0x5bff03de234fe771ac0d685f9ee0fb0b757ea02ec9e6f10e8e2ee806db1b6b83"), extension), + WorkID: "work", + }, + reason: encoding.UpkeepFailureReasonNone, + retryable: false, + blocks: map[int64]string{ + 500: "0x3df0e926f3e21ec1195ffe007a2899214905eb02e768aa89ce0b94accd7f3d71", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + bs := &BlockSubscriber{ + blocks: tc.blocks, + } + e := &EvmRegistry{ + lggr: lggr, + bs: bs, + ctx: testutils.Context(t), + } + + if tc.makeEthCall { + client := new(evmClientMocks.Client) + client.On("CallContext", mock.Anything, mock.Anything, "eth_getTransactionReceipt", common.BytesToHash(tc.payload.Trigger.LogTriggerExtension.TxHash[:])). + Return(tc.ethCallErr).Run(func(args mock.Arguments) { + if tc.receipt != nil { + res := args.Get(1).(*types.Receipt) + res.Status = tc.receipt.Status + res.TxHash = tc.receipt.TxHash + res.BlockNumber = tc.receipt.BlockNumber + res.BlockHash = tc.receipt.BlockHash + } + }) + e.client = client + } + + reason, state, retryable := e.verifyLogExists(tc.upkeepId, tc.payload) + assert.Equal(t, tc.reason, reason) + assert.Equal(t, tc.state, state) + assert.Equal(t, tc.retryable, retryable) + }) + } +} + +func TestRegistry_CheckUpkeeps(t *testing.T) { + lggr := logger.TestLogger(t) + uid0 := core.GenUpkeepID(types3.UpkeepType(0), "p0") + uid1 := core.GenUpkeepID(types3.UpkeepType(1), "p1") + uid2 := core.GenUpkeepID(types3.UpkeepType(1), "p2") + + extension1 := &ocr2keepers.LogTriggerExtension{ + TxHash: common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651"), + Index: 0, + BlockHash: common.HexToHash("0x0919c83363b439ea634ce2b576cf3e30db26b340fb7a12058c2fcc401bd04ba0"), + BlockNumber: 550, + } + extension2 := &ocr2keepers.LogTriggerExtension{ + TxHash: common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651"), + Index: 0, + BlockHash: common.HexToHash("0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857"), + BlockNumber: 550, + } + + trigger0 := ocr2keepers.NewTrigger(575, common.HexToHash("0x1c77db0abe32327cf3ea9de2aadf79876f9e6b6dfcee9d4719a8a2dc8ca289d0")) + trigger1 := ocr2keepers.NewLogTrigger(560, common.HexToHash("0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857"), extension1) + trigger2 := ocr2keepers.NewLogTrigger(570, common.HexToHash("0x1222d75217e2dd461cc77e4091c37abe76277430d97f1963a822b4e94ebb83fc"), extension2) + + tests := []struct { + name string + inputs []ocr2keepers.UpkeepPayload + blocks map[int64]string + latestBlock *ocr2keepers.BlockKey + results []ocr2keepers.CheckResult + err error + ethCalls map[string]bool + receipts map[string]*types.Receipt + poller logpoller.LogPoller + ethCallErrors map[string]error + }{ + { + name: "check upkeeps with different upkeep types", + inputs: []ocr2keepers.UpkeepPayload{ + { + UpkeepID: uid0, + Trigger: trigger0, + WorkID: "work0", + }, + { + UpkeepID: uid1, + Trigger: trigger1, + WorkID: "work1", + }, + { + UpkeepID: uid2, + Trigger: trigger2, + WorkID: "work2", + // check data byte slice length cannot be odd number, abi pack error + CheckData: []byte{0, 0, 0, 0, 1}, + }, + }, + blocks: map[int64]string{ + 550: "0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857", + 560: "0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857", + 570: "0x1222d75217e2dd461cc77e4091c37abe76277430d97f1963a822b4e94ebb83fc", + 575: "0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857", + }, + latestBlock: &ocr2keepers.BlockKey{Number: 580}, + results: []ocr2keepers.CheckResult{ + { + PipelineExecutionState: uint8(encoding.CheckBlockInvalid), + Retryable: false, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid0, + Trigger: trigger0, + WorkID: "work0", + GasAllocated: 0, + PerformData: nil, + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + }, + { + PipelineExecutionState: uint8(encoding.RpcFlakyFailure), + Retryable: true, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid1, + Trigger: trigger1, + WorkID: "work1", + GasAllocated: 0, + PerformData: nil, + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + }, + { + PipelineExecutionState: uint8(encoding.PackUnpackDecodeFailed), + Retryable: false, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid2, + Trigger: trigger2, + WorkID: "work2", + GasAllocated: 0, + PerformData: nil, + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + }, + }, + ethCalls: map[string]bool{ + uid1.String(): true, + }, + receipts: map[string]*types.Receipt{}, + poller: &mockLogPoller{ + GetBlocksRangeFn: func(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]logpoller.LogPollerBlock, error) { + return []logpoller.LogPollerBlock{ + { + BlockHash: common.HexToHash("0xcba5cf9e2bb32373c76015384e1098912d9510a72481c78057fcb088209167de"), + }, + }, nil + }, + }, + ethCallErrors: map[string]error{ + uid1.String(): fmt.Errorf("error"), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + bs := &BlockSubscriber{ + latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, + blocks: tc.blocks, + } + bs.latestBlock.Store(tc.latestBlock) + e := &EvmRegistry{ + lggr: lggr, + bs: bs, + poller: tc.poller, + } + client := new(evmClientMocks.Client) + for _, i := range tc.inputs { + uid := i.UpkeepID.String() + if tc.ethCalls[uid] { + client.On("CallContext", mock.Anything, mock.Anything, "eth_getTransactionReceipt", common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651")). + Return(tc.ethCallErrors[uid]).Run(func(args mock.Arguments) { + receipt := tc.receipts[uid] + if receipt != nil { + res := args.Get(1).(*types.Receipt) + res.Status = receipt.Status + res.TxHash = receipt.TxHash + res.BlockNumber = receipt.BlockNumber + res.BlockHash = receipt.BlockHash + } + }) + } + } + e.client = client + + results, err := e.checkUpkeeps(testutils.Context(t), tc.inputs) + assert.Equal(t, tc.results, results) + assert.Equal(t, tc.err, err) + }) + } +} + +func TestRegistry_SimulatePerformUpkeeps(t *testing.T) { + uid0 := core.GenUpkeepID(types3.UpkeepType(0), "p0") + uid1 := core.GenUpkeepID(types3.UpkeepType(1), "p1") + uid2 := core.GenUpkeepID(types3.UpkeepType(1), "p2") + + extension1 := &ocr2keepers.LogTriggerExtension{ + TxHash: common.HexToHash("0xc8def8abdcf3a4eaaf6cc13bff3e4e2a7168d86ea41dbbf97451235aa76c3651"), + Index: 0, + BlockHash: common.HexToHash("0x9840e5b709bfccf6a1b44f34c884bc39403f57923f3f5ead6243cc090546b857"), + BlockNumber: 550, + } + + trigger0 := ocr2keepers.NewTrigger(150, common.HexToHash("0x1c77db0abe32327cf3ea9de2aadf79876f9e6b6dfcee9d4719a8a2dc8ca289d0")) + trigger1 := ocr2keepers.NewLogTrigger(570, common.HexToHash("0x1222d75217e2dd461cc77e4091c37abe76277430d97f1963a822b4e94ebb83fc"), extension1) + trigger2 := ocr2keepers.NewLogTrigger(570, common.HexToHash("0x1222d75217e2dd461cc77e4091c37abe76277430d97f1963a822b4e94ebb83fc"), extension1) + + cr0 := ocr2keepers.CheckResult{ + PipelineExecutionState: uint8(encoding.CheckBlockTooOld), + Retryable: false, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid0, + Trigger: trigger0, + WorkID: "work0", + GasAllocated: 0, + PerformData: nil, + FastGasWei: big.NewInt(0), + LinkNative: big.NewInt(0), + } + + tests := []struct { + name string + inputs []ocr2keepers.CheckResult + results []ocr2keepers.CheckResult + err error + }{ + { + name: "simulate multiple upkeeps", + inputs: []ocr2keepers.CheckResult{ + cr0, + { + PipelineExecutionState: 0, + Retryable: false, + Eligible: true, + IneligibilityReason: 0, + UpkeepID: uid1, + Trigger: trigger1, + WorkID: "work1", + GasAllocated: 20000, + PerformData: []byte{0, 0, 0, 1, 2, 3}, + FastGasWei: big.NewInt(20000), + LinkNative: big.NewInt(20000), + }, + { + PipelineExecutionState: 0, + Retryable: false, + Eligible: true, + IneligibilityReason: 0, + UpkeepID: uid2, + Trigger: trigger2, + WorkID: "work2", + GasAllocated: 20000, + PerformData: []byte{0, 0, 0, 1, 2, 3}, + FastGasWei: big.NewInt(20000), + LinkNative: big.NewInt(20000), + }, + }, + results: []ocr2keepers.CheckResult{ + cr0, + { + PipelineExecutionState: uint8(encoding.RpcFlakyFailure), + Retryable: true, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid1, + Trigger: trigger1, + WorkID: "work1", + GasAllocated: 20000, + PerformData: []byte{0, 0, 0, 1, 2, 3}, + FastGasWei: big.NewInt(20000), + LinkNative: big.NewInt(20000), + }, + { + PipelineExecutionState: uint8(encoding.PackUnpackDecodeFailed), + Retryable: false, + Eligible: false, + IneligibilityReason: 0, + UpkeepID: uid2, + Trigger: trigger2, + WorkID: "work2", + GasAllocated: 20000, + PerformData: []byte{0, 0, 0, 1, 2, 3}, + FastGasWei: big.NewInt(20000), + LinkNative: big.NewInt(20000), + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + e := setupEVMRegistry(t) + client := new(evmClientMocks.Client) + client.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && b[0].Method == "eth_call" && b[1].Method == "eth_call" + })).Return(nil). + Run(func(args mock.Arguments) { + be := args.Get(1).([]rpc.BatchElem) + be[0].Error = fmt.Errorf("error") + res := "0x0001" + be[1].Result = res + }).Once() + e.client = client + + results, err := e.simulatePerformUpkeeps(testutils.Context(t), tc.inputs) + assert.Equal(t, tc.results, results) + assert.Equal(t, tc.err, err) + }) + } +} + +// setups up an evm registry for tests. +func setupEVMRegistry(t *testing.T) *EvmRegistry { + lggr := logger.TestLogger(t) + addr := common.HexToAddress("0x6cA639822c6C241Fa9A7A6b5032F6F7F1C513CAD") + keeperRegistryABI, err := abi.JSON(strings.NewReader(i_keeper_registry_master_wrapper_2_1.IKeeperRegistryMasterABI)) + require.Nil(t, err, "need registry abi") + streamsLookupCompatibleABI, err := abi.JSON(strings.NewReader(streams_lookup_compatible_interface.StreamsLookupCompatibleInterfaceABI)) + require.Nil(t, err, "need mercury abi") + var logPoller logpoller.LogPoller + mockReg := mocks.NewRegistry(t) + mockHttpClient := mocks.NewHttpClient(t) + client := evmClientMocks.NewClient(t) + + r := &EvmRegistry{ + lggr: lggr, + poller: logPoller, + addr: addr, + client: client, + logProcessed: make(map[string]bool), + registry: mockReg, + abi: keeperRegistryABI, + active: NewActiveUpkeepList(), + packer: encoding.NewAbiPacker(), + headFunc: func(ocr2keepers.BlockKey) {}, + chLog: make(chan logpoller.Log, 1000), + mercury: &MercuryConfig{ + cred: &types2.MercuryCredentials{ + LegacyURL: "https://google.old.com", + URL: "https://google.com", + Username: "FakeClientID", + Password: "FakeClientKey", + }, + Abi: streamsLookupCompatibleABI, + AllowListCache: cache.New(defaultAllowListExpiration, cleanupInterval), + }, + hc: mockHttpClient, + } + return r +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go new file mode 100644 index 00000000..4e8373d2 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/registry_test.go @@ -0,0 +1,564 @@ +package evm + +import ( + "context" + "errors" + "fmt" + "math/big" + "testing" + "time" + + types2 "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + coreTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + types3 "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestPollLogs(t *testing.T) { + tests := []struct { + Name string + LastPoll int64 + Address common.Address + ExpectedLastPoll int64 + ExpectedErr error + LatestBlock *struct { + OutputBlock int64 + OutputErr error + } + LogsWithSigs *struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + } + }{ + { + Name: "LatestBlockError", + ExpectedErr: ErrHeadNotAvailable, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 0, + OutputErr: fmt.Errorf("test error output"), + }, + }, + { + Name: "LastHeadPollIsLatestHead", + LastPoll: 500, + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + }, + { + Name: "LastHeadPollNotInitialized", + LastPoll: 0, + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + }, + { + Name: "LogPollError", + LastPoll: 480, + Address: common.BigToAddress(big.NewInt(1)), + ExpectedLastPoll: 500, + ExpectedErr: ErrLogReadFailure, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + LogsWithSigs: &struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + }{ + InputStart: 250, + InputEnd: 500, + OutputLogs: []logpoller.Log{}, + OutputErr: fmt.Errorf("test output error"), + }, + }, + { + Name: "LogPollSuccess", + LastPoll: 480, + Address: common.BigToAddress(big.NewInt(1)), + ExpectedLastPoll: 500, + ExpectedErr: nil, + LatestBlock: &struct { + OutputBlock int64 + OutputErr error + }{ + OutputBlock: 500, + OutputErr: nil, + }, + LogsWithSigs: &struct { + InputStart int64 + InputEnd int64 + OutputLogs []logpoller.Log + OutputErr error + }{ + InputStart: 250, + InputEnd: 500, + OutputLogs: []logpoller.Log{ + {EvmChainId: ubig.New(big.NewInt(5)), LogIndex: 1}, + {EvmChainId: ubig.New(big.NewInt(6)), LogIndex: 2}, + }, + OutputErr: nil, + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + mp := new(mocks.LogPoller) + + if test.LatestBlock != nil { + mp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: test.LatestBlock.OutputBlock}, test.LatestBlock.OutputErr) + } + + if test.LogsWithSigs != nil { + fc := test.LogsWithSigs + mp.On("LogsWithSigs", fc.InputStart, fc.InputEnd, upkeepStateEvents, test.Address, mock.Anything).Return(fc.OutputLogs, fc.OutputErr) + } + + rg := &EvmRegistry{ + addr: test.Address, + lastPollBlock: test.LastPoll, + poller: mp, + chLog: make(chan logpoller.Log, 10), + } + + err := rg.pollUpkeepStateLogs() + + assert.Equal(t, test.ExpectedLastPoll, rg.lastPollBlock) + if test.ExpectedErr != nil { + assert.ErrorIs(t, err, test.ExpectedErr) + } else { + assert.Nil(t, err) + } + + var outputLogCount int + + CheckLoop: + for { + chT := time.NewTimer(20 * time.Millisecond) + select { + case l := <-rg.chLog: + chT.Stop() + if test.LogsWithSigs == nil { + assert.FailNow(t, "logs detected but no logs were expected") + } + outputLogCount++ + assert.Contains(t, test.LogsWithSigs.OutputLogs, l) + case <-chT.C: + break CheckLoop + } + } + + if test.LogsWithSigs != nil { + assert.Equal(t, len(test.LogsWithSigs.OutputLogs), outputLogCount) + } + + mp.AssertExpectations(t) + }) + } +} + +func TestRegistry_refreshLogTriggerUpkeeps(t *testing.T) { + for _, tc := range []struct { + name string + ids []*big.Int + logEventProvider logprovider.LogEventProvider + poller logpoller.LogPoller + registry Registry + packer encoding.Packer + expectsErr bool + wantErr error + }{ + { + name: "an error is returned when fetching indexed logs for IKeeperRegistryMasterUpkeepUnpaused errors", + ids: []*big.Int{ + core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + }, + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + // of the ids specified in the test, only one is a valid log trigger upkeep + assert.Equal(t, 1, len(ids)) + return ids, nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepUnpaused{}.Topic()) { + return nil, errors.New("indexed logs boom") + } + return nil, nil + }, + }, + expectsErr: true, + wantErr: errors.New("indexed logs boom"), + }, + { + name: "an error is returned when fetching indexed logs for IKeeperRegistryMasterUpkeepTriggerConfigSet errors", + ids: []*big.Int{ + core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + core.GenUpkeepID(types2.ConditionTrigger, "abc").BigInt(), + big.NewInt(-1), + }, + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + // of the ids specified in the test, only one is a valid log trigger upkeep + assert.Equal(t, 1, len(ids)) + return ids, nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + if eventSig == (iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{}.Topic()) { + return nil, errors.New("indexed logs boom") + } + return nil, nil + }, + }, + expectsErr: true, + wantErr: errors.New("indexed logs boom"), + }, + { + name: "an error is returned when parsing the logs using the registry errors", + ids: []*big.Int{ + core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + core.GenUpkeepID(types2.ConditionTrigger, "abc").BigInt(), + big.NewInt(-1), + }, + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + // of the ids specified in the test, only one is a valid log trigger upkeep + assert.Equal(t, 1, len(ids)) + return ids, nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + {}, + }, nil + }, + }, + registry: &mockRegistry{ + ParseLogFn: func(log coreTypes.Log) (generated.AbigenLog, error) { + return nil, errors.New("parse log boom") + }, + }, + expectsErr: true, + wantErr: errors.New("parse log boom"), + }, + { + name: "an error is returned when registering the filter errors", + ids: []*big.Int{ + core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + core.GenUpkeepID(types2.ConditionTrigger, "abc").BigInt(), + big.NewInt(-1), + }, + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + // of the ids specified in the test, only one is a valid log trigger upkeep + assert.Equal(t, 1, len(ids)) + return ids, nil + }, + RegisterFilterFn: func(opts logprovider.FilterOptions) error { + return errors.New("register filter boom") + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 1, + }, + { + BlockNumber: 2, + }, + }, nil + }, + }, + registry: &mockRegistry{ + ParseLogFn: func(log coreTypes.Log) (generated.AbigenLog, error) { + if log.BlockNumber == 1 { + return &iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{ + TriggerConfig: []byte{1, 2, 3}, + Id: core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + }, nil + } + return &iregistry21.IKeeperRegistryMasterUpkeepUnpaused{ + Id: core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + }, nil + }, + GetUpkeepTriggerConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + }, + packer: &mockPacker{ + UnpackLogTriggerConfigFn: func(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + return automation_utils_2_1.LogTriggerConfig{}, nil + }, + }, + expectsErr: true, + wantErr: errors.New("failed to update trigger config for upkeep id 452312848583266388373324160190187140521564213162920931037143039228013182976: failed to register log filter: register filter boom"), + }, + { + name: "log trigger upkeeps are refreshed without error", + ids: []*big.Int{ + core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + core.GenUpkeepID(types2.LogTrigger, "def").BigInt(), + core.GenUpkeepID(types2.ConditionTrigger, "abc").BigInt(), + big.NewInt(-1), + }, + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + // of the ids specified in the test, only two are a valid log trigger upkeep + assert.Equal(t, 2, len(ids)) + return ids, nil + }, + RegisterFilterFn: func(opts logprovider.FilterOptions) error { + return nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 2, + }, + { + BlockNumber: 1, + }, + }, nil + }, + }, + registry: &mockRegistry{ + ParseLogFn: func(log coreTypes.Log) (generated.AbigenLog, error) { + if log.BlockNumber == 1 { + return &iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{ + Id: core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + TriggerConfig: []byte{1, 2, 3}, + }, nil + } + return &iregistry21.IKeeperRegistryMasterUpkeepUnpaused{ + Id: core.GenUpkeepID(types2.LogTrigger, "def").BigInt(), + }, nil + }, + GetUpkeepTriggerConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + }, + packer: &mockPacker{ + UnpackLogTriggerConfigFn: func(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + return automation_utils_2_1.LogTriggerConfig{}, nil + }, + }, + }, + { + name: "log trigger upkeeps are refreshed in batch without error", + ids: func() []*big.Int { + res := []*big.Int{} + for i := 0; i < logTriggerRefreshBatchSize*3; i++ { + res = append(res, core.GenUpkeepID(types2.LogTrigger, fmt.Sprintf("%d", i)).BigInt()) + } + return res + }(), + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + assert.Equal(t, logTriggerRefreshBatchSize, len(ids)) + return ids, nil + }, + RegisterFilterFn: func(opts logprovider.FilterOptions) error { + return nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 2, + }, + { + BlockNumber: 1, + }, + }, nil + }, + }, + registry: &mockRegistry{ + ParseLogFn: func(log coreTypes.Log) (generated.AbigenLog, error) { + if log.BlockNumber == 1 { + return &iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{ + Id: core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + TriggerConfig: []byte{1, 2, 3}, + }, nil + } + return &iregistry21.IKeeperRegistryMasterUpkeepUnpaused{ + Id: core.GenUpkeepID(types2.LogTrigger, "def").BigInt(), + }, nil + }, + GetUpkeepTriggerConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + }, + packer: &mockPacker{ + UnpackLogTriggerConfigFn: func(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + return automation_utils_2_1.LogTriggerConfig{}, nil + }, + }, + }, + { + name: "log trigger upkeeps are refreshed in batch, with a partial batch without error", + ids: func() []*big.Int { + res := []*big.Int{} + for i := 0; i < logTriggerRefreshBatchSize+3; i++ { + res = append(res, core.GenUpkeepID(types2.LogTrigger, fmt.Sprintf("%d", i)).BigInt()) + } + return res + }(), + logEventProvider: &mockLogEventProvider{ + RefreshActiveUpkeepsFn: func(ids ...*big.Int) ([]*big.Int, error) { + if len(ids) != logTriggerRefreshBatchSize { + assert.Equal(t, 3, len(ids)) + } + return ids, nil + }, + RegisterFilterFn: func(opts logprovider.FilterOptions) error { + return nil + }, + }, + poller: &mockLogPoller{ + IndexedLogsFn: func(eventSig common.Hash, address common.Address, topicIndex int, topicValues []common.Hash, confs logpoller.Confirmations, qopts ...pg.QOpt) ([]logpoller.Log, error) { + return []logpoller.Log{ + { + BlockNumber: 2, + }, + { + BlockNumber: 1, + }, + }, nil + }, + }, + registry: &mockRegistry{ + ParseLogFn: func(log coreTypes.Log) (generated.AbigenLog, error) { + if log.BlockNumber == 1 { + return &iregistry21.IKeeperRegistryMasterUpkeepTriggerConfigSet{ + Id: core.GenUpkeepID(types2.LogTrigger, "abc").BigInt(), + TriggerConfig: []byte{1, 2, 3}, + }, nil + } + return &iregistry21.IKeeperRegistryMasterUpkeepUnpaused{ + Id: core.GenUpkeepID(types2.LogTrigger, "def").BigInt(), + }, nil + }, + GetUpkeepTriggerConfigFn: func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return nil, nil + }, + }, + packer: &mockPacker{ + UnpackLogTriggerConfigFn: func(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + return automation_utils_2_1.LogTriggerConfig{}, nil + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + lggr := logger.TestLogger(t) + var hb types3.HeadBroadcaster + var lp logpoller.LogPoller + + bs := NewBlockSubscriber(hb, lp, 1000, lggr) + + registry := &EvmRegistry{ + addr: common.BigToAddress(big.NewInt(1)), + poller: tc.poller, + logEventProvider: tc.logEventProvider, + chLog: make(chan logpoller.Log, 10), + bs: bs, + registry: tc.registry, + packer: tc.packer, + lggr: lggr, + } + + err := registry.refreshLogTriggerUpkeeps(tc.ids) + if tc.expectsErr { + assert.Error(t, err) + assert.Equal(t, err.Error(), tc.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +type mockLogEventProvider struct { + logprovider.LogEventProvider + RefreshActiveUpkeepsFn func(ids ...*big.Int) ([]*big.Int, error) + RegisterFilterFn func(opts logprovider.FilterOptions) error +} + +func (p *mockLogEventProvider) RefreshActiveUpkeeps(ids ...*big.Int) ([]*big.Int, error) { + return p.RefreshActiveUpkeepsFn(ids...) +} + +func (p *mockLogEventProvider) RegisterFilter(ctx context.Context, opts logprovider.FilterOptions) error { + return p.RegisterFilterFn(opts) +} + +type mockRegistry struct { + Registry + GetUpkeepTriggerConfigFn func(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) + ParseLogFn func(log coreTypes.Log) (generated.AbigenLog, error) +} + +func (r *mockRegistry) ParseLog(log coreTypes.Log) (generated.AbigenLog, error) { + return r.ParseLogFn(log) +} + +func (r *mockRegistry) GetUpkeepTriggerConfig(opts *bind.CallOpts, upkeepId *big.Int) ([]byte, error) { + return r.GetUpkeepTriggerConfigFn(opts, upkeepId) +} + +type mockPacker struct { + encoding.Packer + UnpackLogTriggerConfigFn func(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) +} + +func (p *mockPacker) UnpackLogTriggerConfig(raw []byte) (automation_utils_2_1.LogTriggerConfig, error) { + return p.UnpackLogTriggerConfigFn(raw) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go new file mode 100644 index 00000000..1acdf5fd --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/services.go @@ -0,0 +1,31 @@ +package evm + +import ( + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-automation/pkg/v3/plugin" +) + +type AutomationServices interface { + Keyring() ocr3types.OnchainKeyring[plugin.AutomationReportInfo] +} + +func New(keyring ocrtypes.OnchainKeyring) (AutomationServices, error) { + + services := new(automationServices) + + services.keyring = NewOnchainKeyringV3Wrapper(keyring) + + return services, nil +} + +type automationServices struct { + keyring *onchainKeyringV3Wrapper +} + +var _ AutomationServices = &automationServices{} + +func (f *automationServices) Keyring() ocr3types.OnchainKeyring[plugin.AutomationReportInfo] { + return f.keyring +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/btc-usd.json b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/btc-usd.json new file mode 100644 index 00000000..0afd6b1c --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/btc-usd.json @@ -0,0 +1,3 @@ +{ + "pluginBlob": "0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d" +} \ No newline at end of file diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/eth-usd.json b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/eth-usd.json new file mode 100644 index 00000000..a4576b67 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/testdata/eth-usd.json @@ -0,0 +1,3 @@ +{ + "pluginBlob": "0x00013d0126ad1ad2e0e0ab3e6de9499910b44925f8c3ee02e242de5dbc9c7bdb0000000000000000000000000000000000000000000000000000000001a8cf43d1dae0445bfc7e4942ab6e109d1685f3c64e7c9308595d17aadefaa51d8afd0d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260010100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004554482d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be0000000000000000000000000000000000000000000000000000002e5f7503c30000000000000000000000000000000000000000000000000000002e5eea51290000000000000000000000000000000000000000000000000000002e5fffb65c00000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f600000000000000000000000000000000000000000000000000000000000000021a53f0612f666ef65a2b91567c4b9cbac38e8d3dfd44b6d181518966a6c4c738151e1856cab0bcfc4688b67abb13228f9426f82161ec6265cf4edaa1e60bc0ce000000000000000000000000000000000000000000000000000000000000000218f3a503be5d4a40e9cd4024bac143e263a6d4ad80a8285d87241b8b0cd59d9446ec5662fc4956fbeefba7a7f052635dcb7fef649cbf08c37f7f196487560b30" +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache.go new file mode 100644 index 00000000..375b6652 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache.go @@ -0,0 +1,72 @@ +package transmit + +import ( + "sync" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +// transmitEventCache holds a ring buffer of the last visited blocks (transmit block), +// and their corresponding logs (by log id). +// Using a ring buffer allows us to keep a cache of the last N blocks, +// without having to iterate over the entire buffer to clean it up. +type transmitEventCache struct { + lock sync.RWMutex + buffer []cacheBlock + + cap int64 +} + +func newTransmitEventCache(cap int64) transmitEventCache { + return transmitEventCache{ + buffer: make([]cacheBlock, cap), + cap: cap, + } +} + +func (c *transmitEventCache) get(block ocr2keepers.BlockNumber, logID string) (ocr2keepers.TransmitEvent, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + + i := int64(block) % c.cap + b := c.buffer[i] + if b.block != block { + return ocr2keepers.TransmitEvent{}, false + } + if len(b.records) == 0 { + return ocr2keepers.TransmitEvent{}, false + } + e, ok := b.records[logID] + + return e, ok +} + +func (c *transmitEventCache) add(logID string, e ocr2keepers.TransmitEvent) { + c.lock.Lock() + defer c.lock.Unlock() + + i := int64(e.TransmitBlock) % c.cap + b := c.buffer[i] + isBlockEmpty := len(b.records) == 0 + isNewBlock := b.block < e.TransmitBlock + if isBlockEmpty || isNewBlock { + b = newCacheBlock(e.TransmitBlock) + } else if b.block > e.TransmitBlock { + // old log + return + } + b.records[logID] = e + c.buffer[i] = b +} + +type cacheBlock struct { + block ocr2keepers.BlockNumber + records map[string]ocr2keepers.TransmitEvent +} + +func newCacheBlock(block ocr2keepers.BlockNumber) cacheBlock { + return cacheBlock{ + block: block, + records: make(map[string]ocr2keepers.TransmitEvent), + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache_test.go new file mode 100644 index 00000000..9cf59e8f --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/cache_test.go @@ -0,0 +1,102 @@ +package transmit + +import ( + "testing" + + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" +) + +func TestTransmitEventCache_Sanity(t *testing.T) { + tests := []struct { + name string + cap int64 + logIDsToAdd []string + eventsToAdd []ocr2keepers.TransmitEvent + toGet []string + blocksToGet []int64 + expected map[string]ocr2keepers.TransmitEvent + }{ + { + "empty cache", + 10, + []string{}, + []ocr2keepers.TransmitEvent{}, + []string{"1"}, + []int64{1}, + map[string]ocr2keepers.TransmitEvent{}, + }, + { + "happy path", + 10, + []string{"3", "2", "4", "1"}, + []ocr2keepers.TransmitEvent{ + {WorkID: "3", TransmitBlock: 3}, + {WorkID: "2", TransmitBlock: 2}, + {WorkID: "4", TransmitBlock: 4}, + {WorkID: "1", TransmitBlock: 1}, + }, + []string{"1", "3"}, + []int64{1, 3}, + map[string]ocr2keepers.TransmitEvent{ + "1": {WorkID: "1", TransmitBlock: 1}, + "3": {WorkID: "3", TransmitBlock: 3}, + }, + }, + { + "different blocks", + 10, + []string{"3", "1", "2", "4"}, + []ocr2keepers.TransmitEvent{ + {WorkID: "3", TransmitBlock: 3}, + {WorkID: "1", TransmitBlock: 1}, + {WorkID: "2", TransmitBlock: 2}, + {WorkID: "4", TransmitBlock: 4}, + }, + []string{"1", "3"}, + []int64{9, 9}, + map[string]ocr2keepers.TransmitEvent{}, + }, + { + "overflow", + 3, + []string{"4", "1", "3", "2", "5"}, + []ocr2keepers.TransmitEvent{ + {WorkID: "4", TransmitBlock: 4}, + {WorkID: "1", TransmitBlock: 1}, + {WorkID: "3", TransmitBlock: 3}, + {WorkID: "2", TransmitBlock: 2}, + {WorkID: "5", TransmitBlock: 5}, + }, + []string{"1", "4", "2", "3", "5"}, + []int64{1, 4, 2, 3, 5}, + map[string]ocr2keepers.TransmitEvent{ + "3": {WorkID: "3", TransmitBlock: 3}, + "4": {WorkID: "4", TransmitBlock: 4}, + "5": {WorkID: "5", TransmitBlock: 5}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + c := newTransmitEventCache(tc.cap) + require.Equal(t, len(tc.eventsToAdd), len(tc.logIDsToAdd)) + for i, e := range tc.eventsToAdd { + c.add(tc.logIDsToAdd[i], e) + } + require.Equal(t, len(tc.toGet), len(tc.blocksToGet)) + for i, logID := range tc.toGet { + e, exist := c.get(ocr2keepers.BlockNumber(tc.blocksToGet[i]), logID) + expected, ok := tc.expected[logID] + if !ok { + require.False(t, exist, "expected not to find logID %s", logID) + continue + } + require.True(t, exist, "expected to find logID %s", logID) + require.Equal(t, expected.WorkID, e.WorkID) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding.go new file mode 100644 index 00000000..2f5a5d5b --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding.go @@ -0,0 +1,112 @@ +package transmit + +import ( + "fmt" + "math/big" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" +) + +// defaultLogParser parses logs from the registry contract +func defaultLogParser(registry *iregistry21.IKeeperRegistryMaster, log logpoller.Log) (transmitEventLog, error) { + rawLog := log.ToGethLog() + abilog, err := registry.ParseLog(rawLog) + if err != nil { + return transmitEventLog{}, fmt.Errorf("%w: failed to parse log", err) + } + + switch l := abilog.(type) { + case *iregistry21.IKeeperRegistryMasterUpkeepPerformed: + if l == nil { + break + } + return transmitEventLog{ + Log: log, + Performed: l, + }, nil + case *iregistry21.IKeeperRegistryMasterReorgedUpkeepReport: + if l == nil { + break + } + return transmitEventLog{ + Log: log, + Reorged: l, + }, nil + case *iregistry21.IKeeperRegistryMasterStaleUpkeepReport: + if l == nil { + break + } + return transmitEventLog{ + Log: log, + Stale: l, + }, nil + case *iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport: + if l == nil { + break + } + return transmitEventLog{ + Log: log, + InsufficientFunds: l, + }, nil + default: + return transmitEventLog{}, fmt.Errorf("unknown log type: %v", l) + } + return transmitEventLog{}, fmt.Errorf("log with bad structure") +} + +// transmitEventLog is a wrapper around logpoller.Log and the parsed log +type transmitEventLog struct { + logpoller.Log + Performed *iregistry21.IKeeperRegistryMasterUpkeepPerformed + Stale *iregistry21.IKeeperRegistryMasterStaleUpkeepReport + Reorged *iregistry21.IKeeperRegistryMasterReorgedUpkeepReport + InsufficientFunds *iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport +} + +func (l transmitEventLog) Id() *big.Int { + switch { + case l.Performed != nil: + return l.Performed.Id + case l.Stale != nil: + return l.Stale.Id + case l.Reorged != nil: + return l.Reorged.Id + case l.InsufficientFunds != nil: + return l.InsufficientFunds.Id + default: + return nil + } +} + +func (l transmitEventLog) Trigger() []byte { + switch { + case l.Performed != nil: + return l.Performed.Trigger + case l.Stale != nil: + return l.Stale.Trigger + case l.Reorged != nil: + return l.Reorged.Trigger + case l.InsufficientFunds != nil: + return l.InsufficientFunds.Trigger + default: + return []byte{} + } +} + +func (l transmitEventLog) TransmitEventType() ocr2keepers.TransmitEventType { + switch { + case l.Performed != nil: + return ocr2keepers.PerformEvent + case l.Stale != nil: + return ocr2keepers.StaleReportEvent + case l.Reorged != nil: + return ocr2keepers.ReorgReportEvent + case l.InsufficientFunds != nil: + return ocr2keepers.InsufficientFundsReportEvent + default: + return ocr2keepers.UnknownEvent + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding_test.go new file mode 100644 index 00000000..c2f470de --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/encoding_test.go @@ -0,0 +1,103 @@ +package transmit + +import ( + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestTransmitEventLog(t *testing.T) { + uid := core.GenUpkeepID(types.ConditionTrigger, "111") + + tests := []struct { + name string + log transmitEventLog + etype ocr2keepers.TransmitEventType + }{ + { + "performed", + transmitEventLog{ + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x010203040"), + }, + Performed: &iregistry21.IKeeperRegistryMasterUpkeepPerformed{ + Id: uid.BigInt(), + Trigger: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, + ocr2keepers.PerformEvent, + }, + { + "stale", + transmitEventLog{ + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x010203040"), + }, + Stale: &iregistry21.IKeeperRegistryMasterStaleUpkeepReport{ + Id: uid.BigInt(), + Trigger: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, + ocr2keepers.StaleReportEvent, + }, + { + "insufficient funds", + transmitEventLog{ + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x010203040"), + }, + InsufficientFunds: &iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport{ + Id: uid.BigInt(), + Trigger: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, + ocr2keepers.InsufficientFundsReportEvent, + }, + { + "reorged", + transmitEventLog{ + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x010203040"), + }, + Reorged: &iregistry21.IKeeperRegistryMasterReorgedUpkeepReport{ + Id: uid.BigInt(), + Trigger: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }, + }, + ocr2keepers.ReorgReportEvent, + }, + { + "empty", + transmitEventLog{ + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x010203040"), + }, + }, + ocr2keepers.UnknownEvent, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if tc.log.Id() != nil { + require.Equal(t, uid.BigInt().Int64(), tc.log.Id().Int64()) + require.Equal(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, tc.log.Trigger()) + } + require.Equal(t, tc.etype, tc.log.TransmitEventType()) + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go new file mode 100644 index 00000000..ecfcc633 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider.go @@ -0,0 +1,229 @@ +package transmit + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var _ types.TransmitEventProvider = &EventProvider{} + +type logParser func(registry *iregistry21.IKeeperRegistryMaster, log logpoller.Log) (transmitEventLog, error) + +type EventProvider struct { + sync services.StateMachine + mu sync.RWMutex + runState int + runError error + + logger logger.Logger + logPoller logpoller.LogPoller + registry *iregistry21.IKeeperRegistryMaster + client evmclient.Client + + registryAddress common.Address + lookbackBlocks int64 + + parseLog logParser + cache transmitEventCache +} + +func EventProviderFilterName(addr common.Address) string { + return logpoller.FilterName("KeepersRegistry TransmitEventProvider", addr) +} + +func NewTransmitEventProvider( + logger logger.Logger, + logPoller logpoller.LogPoller, + registryAddress common.Address, + client evmclient.Client, + lookbackBlocks int64, +) (*EventProvider, error) { + var err error + + contract, err := iregistry21.NewIKeeperRegistryMaster(registryAddress, client) + if err != nil { + return nil, err + } + err = logPoller.RegisterFilter(logpoller.Filter{ + Name: EventProviderFilterName(contract.Address()), + EventSigs: []common.Hash{ + // These are the events that are emitted when a node transmits a report + iregistry21.IKeeperRegistryMasterUpkeepPerformed{}.Topic(), // Happy path: report performed the upkeep + iregistry21.IKeeperRegistryMasterReorgedUpkeepReport{}.Topic(), // Report checkBlockNumber was reorged + iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport{}.Topic(), // Upkeep didn't have sufficient funds when report reached chain, perform was aborted early + // Report was too old when it reached the chain. For conditionals upkeep was already performed on a higher block than checkBlockNum + // for logs upkeep was already performed for the particular log + iregistry21.IKeeperRegistryMasterStaleUpkeepReport{}.Topic(), + }, + Addresses: []common.Address{registryAddress}, + }) + if err != nil { + return nil, err + } + + return &EventProvider{ + logger: logger, + logPoller: logPoller, + registryAddress: registryAddress, + lookbackBlocks: lookbackBlocks, + registry: contract, + client: client, + parseLog: defaultLogParser, + cache: newTransmitEventCache(lookbackBlocks), + }, nil +} + +func (c *EventProvider) Name() string { + return c.logger.Name() +} + +func (c *EventProvider) Start(_ context.Context) error { + return c.sync.StartOnce("AutomationTransmitEventProvider", func() error { + c.mu.Lock() + defer c.mu.Unlock() + + c.runState = 1 + return nil + }) +} + +func (c *EventProvider) Close() error { + return c.sync.StopOnce("AutomationRegistry", func() error { + c.mu.Lock() + defer c.mu.Unlock() + + c.runState = 0 + c.runError = nil + return nil + }) +} + +func (c *EventProvider) Ready() error { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.runState == 1 { + return nil + } + return c.sync.Ready() +} + +func (c *EventProvider) HealthReport() map[string]error { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.runState > 1 { + c.sync.SvcErrBuffer.Append(fmt.Errorf("failed run state: %w", c.runError)) + } + return map[string]error{c.Name(): c.sync.Healthy()} +} + +func (c *EventProvider) GetLatestEvents(ctx context.Context) ([]ocr2keepers.TransmitEvent, error) { + end, err := c.logPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("%w: failed to get latest block from log poller", err) + } + + // always check the last lookback number of blocks and rebroadcast + // this allows the plugin to make decisions based on event confirmations + logs, err := c.logPoller.LogsWithSigs( + end.BlockNumber-c.lookbackBlocks, + end.BlockNumber, + []common.Hash{ + iregistry21.IKeeperRegistryMasterUpkeepPerformed{}.Topic(), + iregistry21.IKeeperRegistryMasterStaleUpkeepReport{}.Topic(), + iregistry21.IKeeperRegistryMasterReorgedUpkeepReport{}.Topic(), + iregistry21.IKeeperRegistryMasterInsufficientFundsUpkeepReport{}.Topic(), + }, + c.registryAddress, + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to collect logs from log poller", err) + } + + return c.processLogs(end.BlockNumber, logs...) +} + +// processLogs will parse the unseen logs and return the corresponding transmit events. +func (c *EventProvider) processLogs(latestBlock int64, logs ...logpoller.Log) ([]ocr2keepers.TransmitEvent, error) { + var vals []ocr2keepers.TransmitEvent + + for _, log := range logs { + k := c.logKey(log) + + transmitEvent, ok := c.cache.get(ocr2keepers.BlockNumber(log.BlockNumber), k) + if !ok { + l, err := c.parseLog(c.registry, log) + if err != nil { + c.logger.Debugw("failed to parse log", "err", err) + continue + } + id := l.Id() + upkeepId := &ocr2keepers.UpkeepIdentifier{} + ok := upkeepId.FromBigInt(id) + if !ok { + return nil, core.ErrInvalidUpkeepID + } + triggerW, err := core.UnpackTrigger(id, l.Trigger()) + if err != nil { + return nil, fmt.Errorf("%w: failed to unpack trigger", err) + } + trigger := ocr2keepers.NewTrigger( + ocr2keepers.BlockNumber(triggerW.BlockNum), + triggerW.BlockHash, + ) + switch core.GetUpkeepType(*upkeepId) { + case types.LogTrigger: + trigger.LogTriggerExtension = &ocr2keepers.LogTriggerExtension{} + trigger.LogTriggerExtension.TxHash = triggerW.TxHash + trigger.LogTriggerExtension.Index = triggerW.LogIndex + trigger.LogTriggerExtension.BlockHash = triggerW.LogBlockHash + default: + } + workID := core.UpkeepWorkID(*upkeepId, trigger) + transmitEvent = ocr2keepers.TransmitEvent{ + Type: l.TransmitEventType(), + TransmitBlock: ocr2keepers.BlockNumber(l.BlockNumber), + TransactionHash: l.TxHash, + WorkID: workID, + UpkeepID: *upkeepId, + CheckBlock: trigger.BlockNumber, + } + c.cache.add(k, transmitEvent) + } + + transmitEvent.Confirmations = latestBlock - int64(transmitEvent.TransmitBlock) + + vals = append(vals, transmitEvent) + } + + return vals, nil +} + +func (c *EventProvider) logKey(log logpoller.Log) string { + logExt := ocr2keepers.LogTriggerExtension{ + TxHash: log.TxHash, + Index: uint32(log.LogIndex), + BlockHash: log.BlockHash, + } + logId := logExt.LogIdentifier() + return hex.EncodeToString(logId) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go new file mode 100644 index 00000000..67db2635 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit/event_provider_test.go @@ -0,0 +1,230 @@ +package transmit + +import ( + "math/big" + "runtime" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestTransmitEventProvider_Sanity(t *testing.T) { + ctx := testutils.Context(t) + + lp := new(mocks.LogPoller) + + lp.On("RegisterFilter", mock.Anything).Return(nil) + + provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client.NewNullClient(big.NewInt(1), logger.TestLogger(t)), 32) + require.NoError(t, err) + require.NotNil(t, provider) + + go func() { + require.Error(t, provider.Ready()) + errs := provider.HealthReport() + require.Len(t, errs, 1) + require.Error(t, errs[provider.Name()]) + err := provider.Start(ctx) + if ctx.Err() == nil { + require.NoError(t, err) + } + }() + + for provider.Ready() != nil { + runtime.Gosched() + } + errs := provider.HealthReport() + require.Len(t, errs, 1) + require.NoError(t, errs[provider.Name()]) + + tests := []struct { + name string + latestBlock int64 + logs []logpoller.Log + errored bool + resultsLen int + }{ + { + "empty logs", + 100, + []logpoller.Log{}, + false, + 0, + }, + { + "invalid log", + 101, + []logpoller.Log{ + { + BlockNumber: 101, + BlockHash: common.HexToHash("0x1"), + TxHash: common.HexToHash("0x1"), + LogIndex: 1, + Address: common.HexToAddress("0x1"), + Topics: [][]byte{ + iregistry21.IKeeperRegistryMasterUpkeepPerformed{}.Topic().Bytes(), + }, + EventSig: iregistry21.IKeeperRegistryMasterUpkeepPerformed{}.Topic(), + Data: []byte{}, + }, + }, + false, + 0, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: tc.latestBlock}, nil) + lp.On("LogsWithSigs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.logs, nil) + + res, err := provider.GetLatestEvents(ctx) + require.Equal(t, tc.errored, err != nil) + require.Len(t, res, tc.resultsLen) + }) + } +} + +func TestTransmitEventProvider_ProcessLogs(t *testing.T) { + lp := new(mocks.LogPoller) + lp.On("RegisterFilter", mock.Anything).Return(nil) + client := evmClientMocks.NewClient(t) + + provider, err := NewTransmitEventProvider(logger.TestLogger(t), lp, common.HexToAddress("0x"), client, 250) + require.NoError(t, err) + + id := core.GenUpkeepID(types.LogTrigger, "1111111111111111") + + tests := []struct { + name string + parsedPerformed []transmitEventLog + latestBlock int64 + want []ocr2keepers.TransmitEvent + errored bool + }{ + { + "happy flow", + []transmitEventLog{ + { + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x0102030405060708010203040506070801020304050607080102030405060708"), + }, + Performed: &iregistry21.IKeeperRegistryMasterUpkeepPerformed{ + Id: id.BigInt(), + Trigger: func() []byte { + b, _ := hexutil.Decode("0x0000000000000000000000000000000000000000000000000000000001111abc0000000000000000000000000000000000000000000000000000000001111111000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + return b + }(), + }, + }, + }, + 1, + []ocr2keepers.TransmitEvent{ + { + Type: ocr2keepers.PerformEvent, + UpkeepID: id, + CheckBlock: ocr2keepers.BlockNumber(1), + }, + }, + false, + }, + { + "empty events", + []transmitEventLog{}, + 1, + []ocr2keepers.TransmitEvent{}, + false, + }, + { + "same log twice", // shouldn't happen in practice as log poller should not return duplicate logs + []transmitEventLog{ + { + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x0102030405060708010203040506070801020304050607080102030405060708"), + }, + Performed: &iregistry21.IKeeperRegistryMasterUpkeepPerformed{ + Id: id.BigInt(), + Trigger: func() []byte { + b, _ := hexutil.Decode("0x0000000000000000000000000000000000000000000000000000000001111abc0000000000000000000000000000000000000000000000000000000001111111000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + return b + }(), + }, + }, + { + Log: logpoller.Log{ + BlockNumber: 1, + BlockHash: common.HexToHash("0x0102030405060708010203040506070801020304050607080102030405060708"), + }, + Performed: &iregistry21.IKeeperRegistryMasterUpkeepPerformed{ + Id: id.BigInt(), + Trigger: func() []byte { + b, _ := hexutil.Decode("0x0000000000000000000000000000000000000000000000000000000001111abc0000000000000000000000000000000000000000000000000000000001111111000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000001111111") + return b + }(), + }, + }, + }, + 1, + []ocr2keepers.TransmitEvent{ + { + Type: ocr2keepers.PerformEvent, + UpkeepID: id, + CheckBlock: ocr2keepers.BlockNumber(1), + }, + { + Type: ocr2keepers.PerformEvent, + UpkeepID: id, + CheckBlock: ocr2keepers.BlockNumber(1), + }, + }, + false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + parseResults := make(map[string]transmitEventLog, len(tc.parsedPerformed)) + performedLogs := make([]logpoller.Log, len(tc.parsedPerformed)) + for i, l := range tc.parsedPerformed { + performedLogs[i] = l.Log + if _, ok := parseResults[provider.logKey(l.Log)]; ok { + continue + } + parseResults[provider.logKey(l.Log)] = l + } + provider.mu.Lock() + provider.cache = newTransmitEventCache(provider.cache.cap) + provider.parseLog = func(registry *iregistry21.IKeeperRegistryMaster, log logpoller.Log) (transmitEventLog, error) { + return parseResults[provider.logKey(log)], nil + } + provider.mu.Unlock() + + results, err := provider.processLogs(tc.latestBlock, performedLogs...) + require.Equal(t, tc.errored, err != nil) + require.Len(t, results, len(tc.want)) + for i, res := range results { + require.Equal(t, tc.want[i].Type, res.Type) + require.Equal(t, tc.want[i].UpkeepID, res.UpkeepID) + require.Equal(t, tc.want[i].CheckBlock, res.CheckBlock) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider.go new file mode 100644 index 00000000..a09f6ac5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider.go @@ -0,0 +1,51 @@ +package evm + +import ( + "context" + "fmt" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +var _ ocr2keepers.ConditionalUpkeepProvider = &upkeepProvider{} + +type upkeepProvider struct { + activeUpkeeps ActiveUpkeepList + bs *BlockSubscriber + lp logpoller.LogPoller +} + +func NewUpkeepProvider(activeUpkeeps ActiveUpkeepList, bs *BlockSubscriber, lp logpoller.LogPoller) *upkeepProvider { + return &upkeepProvider{ + activeUpkeeps: activeUpkeeps, + bs: bs, + lp: lp, + } +} + +func (p *upkeepProvider) GetActiveUpkeeps(_ context.Context) ([]ocr2keepers.UpkeepPayload, error) { + latestBlock := p.bs.latestBlock.Load() + if latestBlock == nil { + return nil, fmt.Errorf("no latest block found when fetching active upkeeps") + } + var payloads []ocr2keepers.UpkeepPayload + for _, uid := range p.activeUpkeeps.View(types.ConditionTrigger) { + payload, err := core.NewUpkeepPayload( + uid, + ocr2keepers.NewTrigger(latestBlock.Number, latestBlock.Hash), + nil, + ) + if err != nil { + // skip invalid payloads + continue + } + + payloads = append(payloads, payload) + } + + return payloads, nil +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider_test.go new file mode 100644 index 00000000..b9b2e934 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeep_provider_test.go @@ -0,0 +1,115 @@ +package evm + +import ( + "math/big" + "sync/atomic" + "testing" + + "github.com/goplugin/plugin-automation/pkg/v3/types" + + "github.com/stretchr/testify/require" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" +) + +func TestUpkeepProvider_GetActiveUpkeeps(t *testing.T) { + ctx := testutils.Context(t) + + var lp logpoller.LogPoller + + tests := []struct { + name string + active ActiveUpkeepList + latestBlock *ocr2keepers.BlockKey + want []ocr2keepers.UpkeepPayload + wantErr bool + }{ + { + "empty", + &mockActiveUpkeepList{ + ViewFn: func(upkeepType ...types.UpkeepType) []*big.Int { + return []*big.Int{} + }, + }, + &ocr2keepers.BlockKey{Number: 1}, + nil, + false, + }, + { + "happy flow", + &mockActiveUpkeepList{ + ViewFn: func(upkeepType ...types.UpkeepType) []*big.Int { + return []*big.Int{ + big.NewInt(1), + big.NewInt(2), + } + }, + }, + &ocr2keepers.BlockKey{Number: 1}, + []ocr2keepers.UpkeepPayload{ + { + UpkeepID: core.UpkeepIDFromInt("1"), + Trigger: ocr2keepers.NewTrigger(ocr2keepers.BlockNumber(1), [32]byte{}), + WorkID: "b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6", + }, + { + UpkeepID: core.UpkeepIDFromInt("2"), + Trigger: ocr2keepers.NewTrigger(ocr2keepers.BlockNumber(1), [32]byte{}), + WorkID: "405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace", + }, + }, + false, + }, + { + "latest block not found", + &mockActiveUpkeepList{ + ViewFn: func(upkeepType ...types.UpkeepType) []*big.Int { + return []*big.Int{ + big.NewInt(1), + big.NewInt(2), + } + }, + }, + nil, + []ocr2keepers.UpkeepPayload{}, + true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + bs := &BlockSubscriber{ + latestBlock: atomic.Pointer[ocr2keepers.BlockKey]{}, + } + bs.latestBlock.Store(tc.latestBlock) + p := NewUpkeepProvider(tc.active, bs, lp) + + got, err := p.GetActiveUpkeeps(ctx) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Len(t, got, len(tc.want)) + require.Equal(t, tc.want, got) + }) + } +} + +type mockActiveUpkeepList struct { + ActiveUpkeepList + ViewFn func(...types.UpkeepType) []*big.Int + IsActiveFn func(id *big.Int) bool +} + +func (l *mockActiveUpkeepList) View(u ...types.UpkeepType) []*big.Int { + return l.ViewFn(u...) +} + +func (l *mockActiveUpkeepList) IsActive(id *big.Int) bool { + return l.IsActiveFn(id) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm.go new file mode 100644 index 00000000..edb9961a --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm.go @@ -0,0 +1,95 @@ +package upkeepstate + +import ( + "math/big" + "time" + + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type orm struct { + chainID *ubig.Big + q pg.Q +} + +type persistedStateRecord struct { + UpkeepID *ubig.Big + WorkID string + CompletionState uint8 + BlockNumber int64 + IneligibilityReason uint8 + InsertedAt time.Time +} + +// NewORM creates an ORM scoped to chainID. +func NewORM(chainID *big.Int, db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) *orm { + return &orm{ + chainID: ubig.New(chainID), + q: pg.NewQ(db, lggr.Named("ORM"), cfg), + } +} + +// BatchInsertRecords is idempotent and sets upkeep state values in db +func (o *orm) BatchInsertRecords(state []persistedStateRecord, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + + if len(state) == 0 { + return nil + } + + type row struct { + EvmChainId *ubig.Big + WorkId string + CompletionState uint8 + BlockNumber int64 + InsertedAt time.Time + UpkeepId *ubig.Big + IneligibilityReason uint8 + } + + var rows []row + for _, record := range state { + rows = append(rows, row{ + EvmChainId: o.chainID, + WorkId: record.WorkID, + CompletionState: record.CompletionState, + BlockNumber: record.BlockNumber, + InsertedAt: record.InsertedAt, + UpkeepId: record.UpkeepID, + IneligibilityReason: record.IneligibilityReason, + }) + } + + return q.ExecQNamed(`INSERT INTO evm.upkeep_states +(evm_chain_id, work_id, completion_state, block_number, inserted_at, upkeep_id, ineligibility_reason) VALUES +(:evm_chain_id, :work_id, :completion_state, :block_number, :inserted_at, :upkeep_id, :ineligibility_reason) ON CONFLICT (evm_chain_id, work_id) DO NOTHING`, rows) +} + +// SelectStatesByWorkIDs searches the data store for stored states for the +// provided work ids and configured chain id +func (o *orm) SelectStatesByWorkIDs(workIDs []string, qopts ...pg.QOpt) (states []persistedStateRecord, err error) { + q := o.q.WithOpts(qopts...) + + err = q.Select(&states, `SELECT upkeep_id, work_id, completion_state, block_number, ineligibility_reason, inserted_at + FROM evm.upkeep_states + WHERE work_id = ANY($1) AND evm_chain_id = $2::NUMERIC`, pq.Array(workIDs), o.chainID) + + if err != nil { + return nil, err + } + + return states, err +} + +// DeleteExpired prunes stored states older than to the provided time +func (o *orm) DeleteExpired(expired time.Time, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + _, err := q.Exec(`DELETE FROM evm.upkeep_states WHERE inserted_at <= $1 AND evm_chain_id::NUMERIC = $2`, expired, o.chainID) + + return err +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm_test.go new file mode 100644 index 00000000..176353b2 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/orm_test.go @@ -0,0 +1,52 @@ +package upkeepstate + +import ( + "math/big" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestInsertSelectDelete(t *testing.T) { + lggr, _ := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + inserted := []persistedStateRecord{ + { + UpkeepID: ubig.New(big.NewInt(2)), + WorkID: "0x1", + CompletionState: 100, + BlockNumber: 2, + IneligibilityReason: 2, + InsertedAt: time.Now(), + }, + } + + err := orm.BatchInsertRecords(inserted) + + require.NoError(t, err, "no error expected from insert") + + states, err := orm.SelectStatesByWorkIDs([]string{"0x1"}) + + require.NoError(t, err, "no error expected from select") + require.Len(t, states, 1, "records return should equal records inserted") + + err = orm.DeleteExpired(time.Now()) + + assert.NoError(t, err, "no error expected from delete") + + states, err = orm.SelectStatesByWorkIDs([]string{"0x1"}) + + require.NoError(t, err, "no error expected from select") + require.Len(t, states, 0, "records return should be empty since records were deleted") +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go new file mode 100644 index 00000000..cb988189 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner.go @@ -0,0 +1,107 @@ +package upkeepstate + +import ( + "context" + "encoding/hex" + "fmt" + "io" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var ( + _ PerformedLogsScanner = &performedEventsScanner{} + + workIDsBatchSize = 25 +) + +type PerformedLogsScanner interface { + ScanWorkIDs(ctx context.Context, workIDs ...string) ([]string, error) + + Start(context.Context) error + io.Closer +} + +type performedEventsScanner struct { + lggr logger.Logger + poller logpoller.LogPoller + registryAddress common.Address + + finalityDepth uint32 +} + +func NewPerformedEventsScanner( + lggr logger.Logger, + poller logpoller.LogPoller, + registryAddress common.Address, + finalityDepth uint32, +) *performedEventsScanner { + return &performedEventsScanner{ + lggr: lggr.Named("EventsScanner"), + poller: poller, + registryAddress: registryAddress, + finalityDepth: finalityDepth, + } +} + +func (s *performedEventsScanner) Start(_ context.Context) error { + return s.poller.RegisterFilter(logpoller.Filter{ + Name: dedupFilterName(s.registryAddress), + EventSigs: []common.Hash{ + // listening to dedup key added event + iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), + }, + Addresses: []common.Address{s.registryAddress}, + Retention: logprovider.LogRetention, + }) +} + +// Close implements io.Closer and does nothing +func (s *performedEventsScanner) Close() error { + return nil +} + +func (s *performedEventsScanner) ScanWorkIDs(ctx context.Context, workID ...string) ([]string, error) { + var ids []common.Hash + for _, id := range workID { + ids = append(ids, common.HexToHash(id)) + } + logs := make([]logpoller.Log, 0) + for i := 0; i < len(ids); i += workIDsBatchSize { + end := i + workIDsBatchSize + if end > len(ids) { + end = len(ids) + } + batch := ids[i:end] + batchLogs, err := s.poller.IndexedLogs(iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), s.registryAddress, 1, batch, logpoller.Confirmations(s.finalityDepth), pg.WithParentCtx(ctx)) + if err != nil { + return nil, fmt.Errorf("error fetching logs: %w", err) + } + logs = append(logs, batchLogs...) + } + + return s.logsToWorkIDs(logs), nil +} + +func (s *performedEventsScanner) logsToWorkIDs(logs []logpoller.Log) []string { + workIDs := make([]string, 0) + for _, log := range logs { + topics := log.GetTopics() + if len(topics) < 2 { + s.lggr.Debugw("unexpected log topics", "topics", topics) + continue + } + workIDs = append(workIDs, hex.EncodeToString(topics[1].Bytes())) + } + return workIDs +} + +func dedupFilterName(addr common.Address) string { + return logpoller.FilterName("KeepersRegistry UpkeepStates Deduped", addr) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go new file mode 100644 index 00000000..2bbdf1a8 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/scanner_test.go @@ -0,0 +1,171 @@ +package upkeepstate + +import ( + "fmt" + "sort" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestPerformedEventsScanner(t *testing.T) { + ctx := testutils.Context(t) + registryAddr := common.HexToAddress("0x12345") + lggr := logger.TestLogger(t) + + tests := []struct { + name string + workIDs []string + pollerResults []logpoller.Log + scannerResults []string + pollerErr error + errored bool + }{ + { + "empty", + []string{}, + []logpoller.Log{}, + []string{}, + nil, + false, + }, + { + "log poller error", + []string{"111"}, + []logpoller.Log{}, + []string{}, + fmt.Errorf("test-error"), + true, + }, + { + "one result", + []string{"290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}, + []logpoller.Log{ + { + BlockNumber: 1, + Address: registryAddr, + Topics: convertTopics([]common.Hash{ + iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), + common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"), + }), + }, + }, + []string{"290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}, + nil, + false, + }, + { + "missing workID", + []string{"290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"}, + []logpoller.Log{ + { + BlockNumber: 1, + Address: registryAddr, + Topics: convertTopics([]common.Hash{ + iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), + }), + }, + }, + []string{}, + nil, + false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mp := new(mocks.LogPoller) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) + scanner := NewPerformedEventsScanner(lggr, mp, registryAddr, 100) + + go func() { + _ = scanner.Start(ctx) + }() + defer func() { + _ = scanner.Close() + }() + + mp.On("IndexedLogs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tc.pollerResults, tc.pollerErr) + + results, err := scanner.ScanWorkIDs(ctx, tc.workIDs...) + if tc.errored { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, len(tc.scannerResults), len(results)) + + for _, result := range results { + require.Contains(t, tc.scannerResults, result) + } + }) + } +} + +func TestPerformedEventsScanner_Batch(t *testing.T) { + ctx := testutils.Context(t) + registryAddr := common.HexToAddress("0x12345") + lggr := logger.TestLogger(t) + lp := new(mocks.LogPoller) + scanner := NewPerformedEventsScanner(lggr, lp, registryAddr, 100) + + lp.On("IndexedLogs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{ + { + BlockNumber: 1, + Address: registryAddr, + Topics: convertTopics([]common.Hash{ + iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), + common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"), + }), + }, + }, nil).Times(1) + lp.On("IndexedLogs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{ + { + BlockNumber: 3, + Address: registryAddr, + Topics: convertTopics([]common.Hash{ + iregistry21.IKeeperRegistryMasterDedupKeyAdded{}.Topic(), + common.HexToHash("0x331decd9548b62a8d603457658386fc84ba6bc95888008f6362f93160ef3b663"), + }), + }, + }, nil).Times(1) + + origWorkIDsBatchSize := workIDsBatchSize + workIDsBatchSize = 8 + defer func() { + workIDsBatchSize = origWorkIDsBatchSize + }() + + ids, err := scanner.ScanWorkIDs(ctx, + "290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + "1111", "2222", "3333", "4444", "5555", "6666", "7777", "8888", "9999", + "331decd9548b62a8d603457658386fc84ba6bc95888008f6362f93160ef3b663", + ) + + require.NoError(t, err) + require.Equal(t, 2, len(ids)) + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + require.Equal(t, "290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", ids[0]) + require.Equal(t, "331decd9548b62a8d603457658386fc84ba6bc95888008f6362f93160ef3b663", ids[1]) + + lp.AssertExpectations(t) +} + +func convertTopics(topics []common.Hash) [][]byte { + var topicsForDB [][]byte + for _, t := range topics { + topicsForDB = append(topicsForDB, t.Bytes()) + } + return topicsForDB +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store.go new file mode 100644 index 00000000..773f3722 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store.go @@ -0,0 +1,336 @@ +package upkeepstate + +import ( + "context" + "fmt" + "io" + "math/big" + "sync" + "time" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + UpkeepStateStoreServiceName = "UpkeepStateStore" + // CacheExpiration is the amount of time that we keep a record in the cache. + CacheExpiration = 24 * time.Hour + // GCInterval is the amount of time between cache cleanups. + GCInterval = 2 * time.Hour + // flushCadence is the amount of time between flushes to the DB. + flushCadence = 30 * time.Second + concurrentBatchCalls = 10 +) + +type ORM interface { + BatchInsertRecords([]persistedStateRecord, ...pg.QOpt) error + SelectStatesByWorkIDs([]string, ...pg.QOpt) ([]persistedStateRecord, error) + DeleteExpired(time.Time, ...pg.QOpt) error +} + +// UpkeepStateStore is the interface for managing upkeeps final state in a local store. +type UpkeepStateStore interface { + ocr2keepers.UpkeepStateUpdater + core.UpkeepStateReader + Start(context.Context) error + io.Closer +} + +var ( + _ UpkeepStateStore = &upkeepStateStore{} + newTickerFn = time.NewTicker + batchSize = 1000 +) + +// upkeepStateRecord is a record that we save in a local cache. +type upkeepStateRecord struct { + workID string + state ocr2keepers.UpkeepState + + addedAt time.Time +} + +// upkeepStateStore implements UpkeepStateStore. +// It stores the state of ineligible upkeeps in a local, in-memory cache. +// In addition, performed events are fetched by the scanner on demand. +type upkeepStateStore struct { + services.StateMachine + threadCtrl utils.ThreadControl + + orm ORM + lggr logger.Logger + scanner PerformedLogsScanner + + retention time.Duration + cleanCadence time.Duration + + mu sync.RWMutex + cache map[string]*upkeepStateRecord + + pendingRecords []persistedStateRecord + sem chan struct{} + batchSize int +} + +// NewUpkeepStateStore creates a new state store +func NewUpkeepStateStore(orm ORM, lggr logger.Logger, scanner PerformedLogsScanner) *upkeepStateStore { + return &upkeepStateStore{ + orm: orm, + lggr: lggr.Named(UpkeepStateStoreServiceName), + cache: map[string]*upkeepStateRecord{}, + scanner: scanner, + retention: CacheExpiration, + cleanCadence: GCInterval, + threadCtrl: utils.NewThreadControl(), + pendingRecords: []persistedStateRecord{}, + sem: make(chan struct{}, concurrentBatchCalls), + batchSize: batchSize, + } +} + +// Start starts the upkeep state store. +// it does background cleanup of the cache every GCInterval, +// and flush records to DB every flushCadence. +func (u *upkeepStateStore) Start(pctx context.Context) error { + return u.StartOnce(UpkeepStateStoreServiceName, func() error { + if err := u.scanner.Start(pctx); err != nil { + return fmt.Errorf("failed to start scanner") + } + + u.lggr.Debug("Starting upkeep state store") + + u.threadCtrl.Go(func(ctx context.Context) { + ticker := time.NewTicker(utils.WithJitter(u.cleanCadence)) + defer ticker.Stop() + + flushTicker := newTickerFn(utils.WithJitter(flushCadence)) + defer flushTicker.Stop() + + for { + select { + case <-ticker.C: + if err := u.cleanup(ctx); err != nil { + u.lggr.Errorw("unable to clean old state values", "err", err) + } + ticker.Reset(utils.WithJitter(u.cleanCadence)) + case <-flushTicker.C: + u.flush(ctx) + flushTicker.Reset(utils.WithJitter(flushCadence)) + case <-ctx.Done(): + u.flush(ctx) + return + } + } + }) + return nil + }) +} + +func (u *upkeepStateStore) flush(ctx context.Context) { + u.mu.Lock() + cloneRecords := make([]persistedStateRecord, len(u.pendingRecords)) + copy(cloneRecords, u.pendingRecords) + u.pendingRecords = []persistedStateRecord{} + u.mu.Unlock() + + for i := 0; i < len(cloneRecords); i += u.batchSize { + end := i + u.batchSize + if end > len(cloneRecords) { + end = len(cloneRecords) + } + + batch := cloneRecords[i:end] + + u.sem <- struct{}{} + + go func() { + if err := u.orm.BatchInsertRecords(batch, pg.WithParentCtx(ctx)); err != nil { + u.lggr.Errorw("error inserting records", "err", err) + } + <-u.sem + }() + } +} + +// Close stops the service of pruning stale data; implements io.Closer +func (u *upkeepStateStore) Close() error { + return u.StopOnce(UpkeepStateStoreServiceName, func() error { + u.threadCtrl.Close() + return nil + }) +} + +func (u *upkeepStateStore) HealthReport() map[string]error { + return map[string]error{UpkeepStateStoreServiceName: u.Healthy()} +} + +// SelectByWorkIDs returns the current state of the upkeep for the provided ids. +// If an id is not found, the state is returned as StateUnknown. +// We first check the cache, and if any ids are missing, we fetch them from the scanner and DB. +func (u *upkeepStateStore) SelectByWorkIDs(ctx context.Context, workIDs ...string) ([]ocr2keepers.UpkeepState, error) { + states, missing := u.selectFromCache(workIDs...) + if len(missing) == 0 { + // all ids were found in the cache + return states, nil + } + if err := u.fetchPerformed(ctx, missing...); err != nil { + return nil, err + } + if err := u.fetchFromDB(ctx, missing...); err != nil { + return nil, err + } + + // at this point all values should be in the cache. if values are missing + // their state is indicated as unknown + states, _ = u.selectFromCache(workIDs...) + + return states, nil +} + +// SetUpkeepState updates the state of the upkeep. +// Currently we only store the state if the upkeep is ineligible. +// Performed events will be fetched on demand. +func (u *upkeepStateStore) SetUpkeepState(ctx context.Context, result ocr2keepers.CheckResult, _ ocr2keepers.UpkeepState) error { + if result.Eligible { + return nil + } + + return u.upsertStateRecord(ctx, result.WorkID, ocr2keepers.Ineligible, uint64(result.Trigger.BlockNumber), result.UpkeepID.BigInt(), result.IneligibilityReason) +} + +// upsertStateRecord inserts or updates a record for the provided +// check result. If an item already exists in the data store, the state and +// block are updated. +func (u *upkeepStateStore) upsertStateRecord(ctx context.Context, workID string, s ocr2keepers.UpkeepState, b uint64, upkeepID *big.Int, reason uint8) error { + u.mu.Lock() + defer u.mu.Unlock() + + record, ok := u.cache[workID] + if !ok { + record = &upkeepStateRecord{ + workID: workID, + addedAt: time.Now(), + } + } + + record.state = s + + u.cache[workID] = record + + u.pendingRecords = append(u.pendingRecords, persistedStateRecord{ + UpkeepID: ubig.New(upkeepID), + WorkID: record.workID, + CompletionState: uint8(record.state), + IneligibilityReason: reason, + InsertedAt: record.addedAt, + }) + + return nil +} + +// fetchPerformed fetches all performed logs from the scanner to populate the cache. +func (u *upkeepStateStore) fetchPerformed(ctx context.Context, workIDs ...string) error { + performed, err := u.scanner.ScanWorkIDs(ctx, workIDs...) + if err != nil { + return err + } + + if len(performed) > 0 { + u.lggr.Debugw("Fetched performed logs", "performed", len(performed)) + } + + u.mu.Lock() + defer u.mu.Unlock() + + for _, workID := range performed { + if _, ok := u.cache[workID]; !ok { + s := &upkeepStateRecord{ + workID: workID, + state: ocr2keepers.Performed, + addedAt: time.Now(), + } + + u.cache[workID] = s + } + } + + return nil +} + +// fetchFromDB fetches all upkeeps indicated as ineligible from the db to +// populate the cache. +func (u *upkeepStateStore) fetchFromDB(ctx context.Context, workIDs ...string) error { + states, err := u.orm.SelectStatesByWorkIDs(workIDs, pg.WithParentCtx(ctx)) + if err != nil { + return err + } + + u.mu.Lock() + defer u.mu.Unlock() + + for _, state := range states { + if _, ok := u.cache[state.WorkID]; !ok { + u.cache[state.WorkID] = &upkeepStateRecord{ + workID: state.WorkID, + state: ocr2keepers.UpkeepState(state.CompletionState), + addedAt: state.InsertedAt, + } + } + } + + return nil +} + +// selectFromCache returns all saved state values for the provided ids, +// returning stateNotFound for any ids that are not found. +// the second return value is true if all ids were found in the cache. +func (u *upkeepStateStore) selectFromCache(workIDs ...string) ([]ocr2keepers.UpkeepState, []string) { + u.mu.RLock() + defer u.mu.RUnlock() + + var missing []string + states := make([]ocr2keepers.UpkeepState, len(workIDs)) + for i, workID := range workIDs { + if state, ok := u.cache[workID]; ok { + states[i] = state.state + } else { + missing = append(missing, workID) + } + } + + return states, missing +} + +// cleanup removes any records that are older than the TTL from both cache and DB. +func (u *upkeepStateStore) cleanup(ctx context.Context) error { + u.cleanCache() + + return u.cleanDB(ctx) +} + +// cleanDB cleans up records in the DB that are older than the TTL. +func (u *upkeepStateStore) cleanDB(ctx context.Context) error { + tm := time.Now().Add(-1 * u.retention) + + return u.orm.DeleteExpired(tm, pg.WithParentCtx(ctx), pg.WithLongQueryTimeout()) +} + +// cleanupCache removes any records from the cache that are older than the TTL. +func (u *upkeepStateStore) cleanCache() { + u.mu.Lock() + defer u.mu.Unlock() + + for id, state := range u.cache { + if time.Since(state.addedAt) > u.retention { + delete(u.cache, id) + } + } +} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store_test.go new file mode 100644 index 00000000..0d713b42 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate/store_test.go @@ -0,0 +1,603 @@ +package upkeepstate + +import ( + "context" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + ocr2keepers "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func TestUpkeepStateStore(t *testing.T) { + tests := []struct { + name string + inserts []ocr2keepers.CheckResult + workIDsSelect []string + workIDsFromScanner []string + errScanner error + recordsFromDB []persistedStateRecord + errDB error + expected []ocr2keepers.UpkeepState + errored bool + }{ + { + name: "empty store", + }, + { + name: "save only ineligible states", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + { + UpkeepID: createUpkeepIDForTest(2), + WorkID: "ox2", + Eligible: true, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(2), + }, + }, + }, + workIDsSelect: []string{"0x1", "0x2"}, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.Ineligible, + ocr2keepers.UnknownState, + }, + }, + { + name: "fetch results from scanner", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + }, + workIDsSelect: []string{"0x1", "0x2"}, + workIDsFromScanner: []string{"0x2", "0x222"}, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.Ineligible, + ocr2keepers.Performed, + }, + }, + { + name: "fetch results from db", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + }, + workIDsSelect: []string{"0x1", "0x2", "0x3"}, + workIDsFromScanner: []string{"0x2", "0x222"}, + recordsFromDB: []persistedStateRecord{ + { + WorkID: "0x3", + CompletionState: 2, + BlockNumber: 2, + InsertedAt: time.Now(), + }, + }, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.Ineligible, + ocr2keepers.Performed, + ocr2keepers.Ineligible, + }, + }, + { + name: "unknown states", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + }, + workIDsSelect: []string{"0x2"}, + workIDsFromScanner: []string{}, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + }, + }, + { + name: "scanner error", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + }, + workIDsSelect: []string{"0x1", "0x2"}, + workIDsFromScanner: []string{"0x2", "0x222"}, + errScanner: fmt.Errorf("test error"), + errored: true, + }, + { + name: "db error", + inserts: []ocr2keepers.CheckResult{ + { + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, + }, + workIDsSelect: []string{"0x1", "0x2"}, + workIDsFromScanner: []string{"0x2", "0x222"}, + errDB: fmt.Errorf("test error"), + errored: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + + scanner := &mockScanner{} + scanner.addWorkID(tc.workIDsFromScanner...) + scanner.setErr(tc.errScanner) + + orm := &mockORM{} + orm.addRecords(tc.recordsFromDB...) + orm.setErr(tc.errDB) + + store := NewUpkeepStateStore(orm, lggr, scanner) + + for _, insert := range tc.inserts { + assert.NoError(t, store.SetUpkeepState(ctx, insert, ocr2keepers.Performed)) + } + + states, err := store.SelectByWorkIDs(ctx, tc.workIDsSelect...) + if tc.errored { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + assert.Equal(t, len(tc.expected), len(states)) + for i, state := range states { + assert.Equal(t, tc.expected[i], state) + } + }) + } +} + +func TestUpkeepStateStore_SetSelectIntegration(t *testing.T) { + if testing.Short() { + t.Skip("database required for upkeep state store integration test") + } + + makeTestResult := func(id int64, workID string, eligible bool, block uint64) ocr2keepers.CheckResult { + uid := &ocr2keepers.UpkeepIdentifier{} + _ = uid.FromBigInt(big.NewInt(id)) + + return ocr2keepers.CheckResult{ + UpkeepID: *uid, + WorkID: workID, + Eligible: eligible, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(block), + }, + } + } + + type storedValue struct { + result ocr2keepers.CheckResult + state ocr2keepers.UpkeepState + } + + tests := []struct { + name string + flushSize int + expectedWrites int + queryIDs []string + storedValues []storedValue + expected []ocr2keepers.UpkeepState + }{ + { + name: "querying non-stored workIDs on db with values returns unknown state results", + queryIDs: []string{"0x1", "0x2", "0x3", "0x4"}, + flushSize: 10, + expectedWrites: 1, + storedValues: []storedValue{ + {result: makeTestResult(1, "0x11", false, 1), state: ocr2keepers.Performed}, + {result: makeTestResult(2, "0x22", false, 1), state: ocr2keepers.Performed}, + {result: makeTestResult(3, "0x33", false, 1), state: ocr2keepers.Performed}, + {result: makeTestResult(4, "0x44", false, 1), state: ocr2keepers.Performed}, + }, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + }, + }, + { + name: "storing eligible values is a noop", + queryIDs: []string{"0x1", "0x2", "0x3", "0x4"}, + flushSize: 4, + expectedWrites: 1, + storedValues: []storedValue{ + {result: makeTestResult(9, "0x1", false, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(10, "0x2", false, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(11, "0x3", false, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(12, "0x4", true, 1), state: ocr2keepers.Performed}, // gets inserted + }, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.Ineligible, + ocr2keepers.Ineligible, + ocr2keepers.Ineligible, + ocr2keepers.UnknownState, + }, + }, + { + name: "provided state on setupkeepstate is currently ignored for eligible check results", + queryIDs: []string{"0x1", "0x2"}, + flushSize: 1, + expectedWrites: 1, + storedValues: []storedValue{ + {result: makeTestResult(13, "0x1", true, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(14, "0x2", false, 1), state: ocr2keepers.Performed}, // gets inserted + }, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.Ineligible, + }, + }, + { + name: "provided state outside the flush batch isn't registered in the db", + queryIDs: []string{"0x1", "0x2", "0x3", "0x4", "0x5", "0x6", "0x7", "0x8"}, + flushSize: 3, + expectedWrites: 2, + storedValues: []storedValue{ + {result: makeTestResult(13, "0x1", true, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(14, "0x2", false, 1), state: ocr2keepers.Performed}, // gets inserted + {result: makeTestResult(15, "0x3", true, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(16, "0x4", false, 1), state: ocr2keepers.Performed}, // gets inserted + {result: makeTestResult(17, "0x5", true, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(18, "0x6", false, 1), state: ocr2keepers.Performed}, // gets inserted + {result: makeTestResult(19, "0x7", true, 1), state: ocr2keepers.Ineligible}, + {result: makeTestResult(20, "0x8", false, 1), state: ocr2keepers.Performed}, // gets inserted + }, + expected: []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.Ineligible, + ocr2keepers.UnknownState, + ocr2keepers.Ineligible, + ocr2keepers.UnknownState, + ocr2keepers.Ineligible, + ocr2keepers.UnknownState, + ocr2keepers.Ineligible, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := testutils.Context(t) + + tickerCh := make(chan time.Time) + + oldNewTickerFn := newTickerFn + oldFlushSize := batchSize + newTickerFn = func(d time.Duration) *time.Ticker { + t := time.NewTicker(d) + t.C = tickerCh + return t + } + batchSize = test.flushSize + defer func() { + newTickerFn = oldNewTickerFn + batchSize = oldFlushSize + }() + + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + realORM := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + insertFinished := make(chan struct{}, 1) + orm := &wrappedORM{ + BatchInsertRecordsFn: func(records []persistedStateRecord, opt ...pg.QOpt) error { + err := realORM.BatchInsertRecords(records, opt...) + insertFinished <- struct{}{} + return err + }, + SelectStatesByWorkIDsFn: func(strings []string, opt ...pg.QOpt) ([]persistedStateRecord, error) { + return realORM.SelectStatesByWorkIDs(strings, opt...) + }, + DeleteExpiredFn: func(t time.Time, opt ...pg.QOpt) error { + return realORM.DeleteExpired(t, opt...) + }, + } + scanner := &mockScanner{} + store := NewUpkeepStateStore(orm, lggr, scanner) + + servicetest.Run(t, store) + + t.Cleanup(func() { + t.Log("cleaning up database") + + if _, err := db.Exec(`DELETE FROM evm.upkeep_states`); err != nil { + t.Logf("error in cleanup: %s", err) + } + }) + + for _, insert := range test.storedValues { + require.NoError(t, store.SetUpkeepState(ctx, insert.result, insert.state), "storing states should not produce an error") + } + + tickerCh <- time.Now() + + // if this test inserts data, wait for the insert to complete before proceeding + for i := 0; i < test.expectedWrites; i++ { + <-insertFinished + } + + // empty the cache before doing selects to force a db lookup + store.cache = make(map[string]*upkeepStateRecord) + + states, err := store.SelectByWorkIDs(ctx, test.queryIDs...) + + require.NoError(t, err, "no error expected from selecting states") + + assert.Equal(t, test.expected, states, "upkeep state values should match expected") + + observedLogs.TakeAll() + + require.Equal(t, 0, observedLogs.Len()) + }) + } +} + +func TestUpkeepStateStore_emptyDB(t *testing.T) { + t.Run("querying non-stored workIDs on empty db returns unknown state results", func(t *testing.T) { + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + realORM := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + insertFinished := make(chan struct{}, 1) + orm := &wrappedORM{ + BatchInsertRecordsFn: func(records []persistedStateRecord, opt ...pg.QOpt) error { + err := realORM.BatchInsertRecords(records, opt...) + insertFinished <- struct{}{} + return err + }, + SelectStatesByWorkIDsFn: func(strings []string, opt ...pg.QOpt) ([]persistedStateRecord, error) { + return realORM.SelectStatesByWorkIDs(strings, opt...) + }, + DeleteExpiredFn: func(t time.Time, opt ...pg.QOpt) error { + return realORM.DeleteExpired(t, opt...) + }, + } + scanner := &mockScanner{} + store := NewUpkeepStateStore(orm, lggr, scanner) + + states, err := store.SelectByWorkIDs(testutils.Context(t), []string{"0x1", "0x2", "0x3", "0x4"}...) + assert.NoError(t, err) + assert.Equal(t, []ocr2keepers.UpkeepState{ + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + ocr2keepers.UnknownState, + }, states) + + observedLogs.TakeAll() + + require.Equal(t, 0, observedLogs.Len()) + }) +} + +func TestUpkeepStateStore_Upsert(t *testing.T) { + db := pgtest.NewSqlxDB(t) + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + chainID := testutils.FixtureChainID + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + store := NewUpkeepStateStore(orm, lggr, &mockScanner{}) + + res := ocr2keepers.CheckResult{ + UpkeepID: createUpkeepIDForTest(1), + WorkID: "0x1", + Eligible: false, + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + } + require.NoError(t, store.SetUpkeepState(ctx, res, ocr2keepers.Performed)) + <-time.After(10 * time.Millisecond) + res.Trigger.BlockNumber = ocr2keepers.BlockNumber(2) + now := time.Now() + require.NoError(t, store.SetUpkeepState(ctx, res, ocr2keepers.Performed)) + + store.mu.Lock() + addedAt := store.cache["0x1"].addedAt + store.mu.Unlock() + + require.True(t, now.After(addedAt)) +} + +func TestUpkeepStateStore_Service(t *testing.T) { + ctx := testutils.Context(t) + orm := &mockORM{ + onDelete: func(tm time.Time) { + + }, + } + scanner := &mockScanner{} + + store := NewUpkeepStateStore(orm, logger.TestLogger(t), scanner) + + store.retention = 500 * time.Millisecond + store.cleanCadence = 100 * time.Millisecond + + servicetest.Run(t, store) + + // add a value to set up the test + require.NoError(t, store.SetUpkeepState(ctx, ocr2keepers.CheckResult{ + Eligible: false, + WorkID: "0x2", + Trigger: ocr2keepers.Trigger{ + BlockNumber: ocr2keepers.BlockNumber(1), + }, + }, ocr2keepers.Ineligible)) + + // allow one cycle of cleaning the cache + time.Sleep(110 * time.Millisecond) + + // select from store to ensure values still exist + values, err := store.SelectByWorkIDs(ctx, "0x2") + require.NoError(t, err, "no error from selecting states") + require.Equal(t, []ocr2keepers.UpkeepState{ocr2keepers.Ineligible}, values, "selected values should match expected") + + // wait longer than cache timeout + time.Sleep(700 * time.Millisecond) + + // select from store to ensure cached values were removed + values, err = store.SelectByWorkIDs(ctx, "0x2") + require.NoError(t, err, "no error from selecting states") + require.Equal(t, []ocr2keepers.UpkeepState{ocr2keepers.UnknownState}, values, "selected values should match expected") +} + +func createUpkeepIDForTest(v int64) ocr2keepers.UpkeepIdentifier { + uid := &ocr2keepers.UpkeepIdentifier{} + _ = uid.FromBigInt(big.NewInt(v)) + + return *uid +} + +type mockScanner struct { + lock sync.Mutex + workIDs []string + err error +} + +func (s *mockScanner) addWorkID(workIDs ...string) { + s.lock.Lock() + defer s.lock.Unlock() + + s.workIDs = append(s.workIDs, workIDs...) +} + +func (s *mockScanner) setErr(err error) { + s.lock.Lock() + defer s.lock.Unlock() + + s.err = err +} + +func (s *mockScanner) ScanWorkIDs(context.Context, ...string) ([]string, error) { + s.lock.Lock() + defer s.lock.Unlock() + + res := s.workIDs[:] + s.workIDs = nil + return res, s.err +} + +func (s *mockScanner) Start(context.Context) error { + return nil +} + +func (s *mockScanner) Close() error { + return nil +} + +type mockORM struct { + lock sync.Mutex + records []persistedStateRecord + lastPruneDepth time.Time + onDelete func(tm time.Time) + err error +} + +func (_m *mockORM) addRecords(records ...persistedStateRecord) { + _m.lock.Lock() + defer _m.lock.Unlock() + + _m.records = append(_m.records, records...) +} + +func (_m *mockORM) setErr(err error) { + _m.lock.Lock() + defer _m.lock.Unlock() + + _m.err = err +} + +func (_m *mockORM) BatchInsertRecords(state []persistedStateRecord, opts ...pg.QOpt) error { + return nil +} + +func (_m *mockORM) SelectStatesByWorkIDs(workIDs []string, opts ...pg.QOpt) ([]persistedStateRecord, error) { + _m.lock.Lock() + defer _m.lock.Unlock() + + res := _m.records[:] + _m.records = nil + + return res, _m.err +} + +func (_m *mockORM) DeleteExpired(tm time.Time, opts ...pg.QOpt) error { + _m.lock.Lock() + defer _m.lock.Unlock() + + _m.lastPruneDepth = tm + _m.onDelete(tm) + + return _m.err +} + +type wrappedORM struct { + BatchInsertRecordsFn func([]persistedStateRecord, ...pg.QOpt) error + SelectStatesByWorkIDsFn func([]string, ...pg.QOpt) ([]persistedStateRecord, error) + DeleteExpiredFn func(time.Time, ...pg.QOpt) error +} + +func (o *wrappedORM) BatchInsertRecords(r []persistedStateRecord, q ...pg.QOpt) error { + return o.BatchInsertRecordsFn(r, q...) +} + +func (o *wrappedORM) SelectStatesByWorkIDs(ids []string, q ...pg.QOpt) ([]persistedStateRecord, error) { + return o.SelectStatesByWorkIDsFn(ids, q...) +} + +func (o *wrappedORM) DeleteExpired(t time.Time, q ...pg.QOpt) error { + return o.DeleteExpiredFn(t, q...) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go new file mode 100644 index 00000000..54151b61 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -0,0 +1,1002 @@ +package ocr2keeper_test + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/umbracle/ethgo/abi" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + ocrTypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-automation/pkg/v3/config" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + automationForwarderLogic "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_forwarder_logic" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/basic_upkeep_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/dummy_protocol_wrapper" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + registrylogica21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1" + registrylogicb21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1" + registry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestFilterNamesFromSpec21(t *testing.T) { + b := make([]byte, 20) + _, err := rand.Read(b) + require.NoError(t, err) + address := common.HexToAddress(hexutil.Encode(b)) + + spec := &job.OCR2OracleSpec{ + PluginType: types.OCR2Keeper, + ContractID: address.String(), // valid contract addr + } + + names, err := ocr2keeper.FilterNamesFromSpec21(spec) + require.NoError(t, err) + + assert.Len(t, names, 2) + assert.Equal(t, logpoller.FilterName("KeepersRegistry TransmitEventProvider", address), names[0]) + assert.Equal(t, logpoller.FilterName("KeeperRegistry Events", address), names[1]) + + spec = &job.OCR2OracleSpec{ + PluginType: types.OCR2Keeper, + ContractID: "0x5431", // invalid contract addr + } + _, err = ocr2keeper.FilterNamesFromSpec21(spec) + require.ErrorContains(t, err, "not a valid EIP55 formatted address") +} + +func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { + g := gomega.NewWithT(t) + lggr := logger.TestLogger(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(10000).ToInt()}, + steve.From: {Balance: assets.Ether(10000).ToInt()}, + carrol.From: {Balance: assets.Ether(10000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() + + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) + registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + setupNodes(t, nodeKeys, registry, backend, steve) + + <-time.After(time.Second * 5) + + upkeeps := 1 + + _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) + require.NoError(t, err) + + // Register new upkeep + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + registrationTx, err := registry.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, 0, []byte{}, []byte{}, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx21(t, registry, registrationTx, backend) + + // Fund the upkeep + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, registry.Address(), oneHunEth) + require.NoError(t, err) + _, err = registry.AddFunds(carrol, upkeepID, oneHunEth) + require.NoError(t, err) + backend.Commit() + + // Set upkeep to be performed + _, err = upkeepContract.SetBytesToSend(carrol, payload1) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + backend.Commit() + + lggr.Infow("Upkeep registered and funded", "upkeepID", upkeepID.String()) + + // keeper job is triggered and payload is received + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload1)) + + // change payload + _, err = upkeepContract.SetBytesToSend(carrol, payload2) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + + // observe 2nd job run and received payload changes + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload2)) +} + +func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(10000).ToInt()}, + steve.From: {Balance: assets.Ether(10000).ToInt()}, + carrol.From: {Balance: assets.Ether(10000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() + + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) + + registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + setupNodes(t, nodeKeys, registry, backend, steve) + upkeeps := 1 + + _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) + require.NoError(t, err) + + backend.Commit() + + ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) + require.Equal(t, upkeeps, len(ids)) + require.Equal(t, len(ids), len(contracts)) + require.Equal(t, len(ids), len(addrs)) + + backend.Commit() + + emits := 1 + go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { + backend.Commit() + }) + + listener, done := listenPerformed(t, backend, registry, ids, int64(1)) + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + done() + + t.Run("recover logs", func(t *testing.T) { + addr, contract := addrs[0], contracts[0] + upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) + backend.Commit() + t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) + // Emit 100 logs in a burst + recoverEmits := 100 + i := 0 + emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { + i++ + if i%(recoverEmits/4) == 0 { + backend.Commit() + time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors + } + }) + + beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() + + // Mine enough blocks to ensure these logs don't fall into log provider range + dummyBlocks := 500 + for i := 0; i < dummyBlocks; i++ { + backend.Commit() + time.Sleep(time.Millisecond * 10) + } + + t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) + + listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + done() + }) +} + +func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, + } + + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() + + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) + + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) + + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) + + const upkeepCount = 10 + const mercuryFailCount = upkeepCount * 3 * 2 + + // testing with the mercury server involves mocking responses. currently, + // there is not a way to connect a mercury call to an upkeep id (though we + // could add custom headers) so the test must be fairly basic and just + // count calls before switching to successes + var ( + mu sync.Mutex + count int + ) + + mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + + count++ + + _ = r.ParseForm() + + t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) + + for key, value := range r.Form { + t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) + } + + // the streams lookup retries against the remote server 3 times before + // returning a result as retryable. + // the simulation here should force the streams lookup process to return + // retryable 2 times. + // the total count of failures should be (upkeepCount * 3 * tryCount) + if count <= mercuryFailCount { + w.WriteHeader(http.StatusNotFound) + + return + } + + // start sending success messages + output := `{"pluginBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` + + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(output)) + }) + + defer mercuryServer.Stop() + + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) + + backend.Commit() + + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") + + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount) + _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) + _ = feeds.EnableMercury(t, backend, registry, registryOwner) + _ = feeds.VerifyEnv(t, backend, registry, registryOwner) + + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() + + listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) + + done() +} + +func emitEvents(ctx context.Context, t *testing.T, n int, contracts []*log_upkeep_counter_wrapper.LogUpkeepCounter, carrol *bind.TransactOpts, afterEmit func()) { + for i := 0; i < n && ctx.Err() == nil; i++ { + for _, contract := range contracts { + // t.Logf("[automation-ocr3 | EvmRegistry] calling upkeep contracts to emit events. run: %d; contract addr: %s", i+1, contract.Address().Hex()) + _, err := contract.Start(carrol) + require.NoError(t, err) + } + afterEmit() + } +} + +func mapListener(m *sync.Map, n int) func() bool { + return func() bool { + count := 0 + m.Range(func(key, value interface{}) bool { + count += value.(int) + return true + }) + return count > n + } +} + +func listenPerformedN(t *testing.T, backend *backends.SimulatedBackend, registry *iregistry21.IKeeperRegistryMaster, ids []*big.Int, startBlock int64, count int) (func() bool, func()) { + cache := &sync.Map{} + ctx, cancel := context.WithCancel(testutils.Context(t)) + start := startBlock + + go func() { + for ctx.Err() == nil { + currentBlock := backend.Blockchain().CurrentBlock().Number.Uint64() + + success := make([]bool, len(ids)) + for i := range success { + success[i] = true + } + + iter, err := registry.FilterUpkeepPerformed(&bind.FilterOpts{ + Start: uint64(start), + End: ¤tBlock, + Context: ctx, + }, ids, success) + + if ctx.Err() != nil { + return + } + + require.NoError(t, err) + + for iter.Next() { + if iter.Event != nil { + t.Logf("[automation-ocr3 | EvmRegistry] upkeep performed event emitted for id %s", iter.Event.Id.String()) + + //cache.Store(iter.Event.Id.String(), true) + count, ok := cache.Load(iter.Event.Id.String()) + if !ok { + cache.Store(iter.Event.Id.String(), 1) + continue + } + countI := count.(int) + cache.Store(iter.Event.Id.String(), countI+1) + } + } + + require.NoError(t, iter.Close()) + + time.Sleep(time.Second) + } + }() + + return mapListener(cache, count), cancel +} + +func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry *iregistry21.IKeeperRegistryMaster, ids []*big.Int, startBlock int64) (func() bool, func()) { + return listenPerformedN(t, backend, registry, ids, startBlock, 0) +} + +func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts) ([]Node, *SimulatedMercuryServer) { + lggr := logger.TestLogger(t) + mServer := NewSimulatedMercuryServer() + mServer.Start() + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, mServer) + bootstrapNode := Node{ + appBootstrap, bootstrapTransmitter, bootstrapKb, + } + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + // Set up the minimum 4 oracles all funded + ports := freeport.GetN(t, 4) + for i := 0; i < 4; i++ { + app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{ + // Supply the bootstrap IP and port as a V2 peer address + {PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}}, + }, mServer) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocrTypes.Account(transmitter.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + } + // Add the bootstrap job + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` + type = "bootstrap" + relay = "evm" + schemaVersion = 1 + name = "boot" + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + + [relayConfig] + chainID = 1337 + `, registry.Address())) + + // Add OCR jobs + for i, node := range nodes { + node.AddJob(t, fmt.Sprintf(` + type = "offchainreporting2" + pluginType = "ocr2automation" + relay = "evm" + name = "ocr2keepers-%d" + schemaVersion = 1 + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + ocrKeyBundleID = "%s" + transmitterID = "%s" + p2pv2Bootstrappers = [ + "%s" + ] + + [relayConfig] + chainID = 1337 + + [pluginConfig] + maxServiceWorkers = 100 + cacheEvictionInterval = "1s" + mercuryCredentialName = "%s" + contractVersion = "v2.1" + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) + } + + // Setup config on contract + configType := abi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint32 maxRevertDataSize, uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address[] registrars, address upkeepPrivilegeManager)") + onchainConfig, err := abi.Encode(map[string]interface{}{ + "paymentPremiumPPB": uint32(0), + "flatFeeMicroLink": uint32(0), + "checkGasLimit": uint32(6500000), + "stalenessSeconds": uint32(90000), + "gasCeilingMultiplier": uint16(2), + "minUpkeepSpend": uint32(0), + "maxPerformGas": uint32(5000000), + "maxCheckDataSize": uint32(5000), + "maxPerformDataSize": uint32(5000), + "maxRevertDataSize": uint32(5000), + "fallbackGasPrice": big.NewInt(60000000000), + "fallbackLinkPrice": big.NewInt(2000000000000000000), + "transcoder": testutils.NewAddress(), + "registrars": []common.Address{testutils.NewAddress()}, + "upkeepPrivilegeManager": usr.From, + }, configType) + require.NoError(t, err) + rawCfg, err := json.Marshal(config.OffchainConfig{ + PerformLockoutWindow: 100 * 12 * 1000, // ~100 block lockout (on goerli) + MinConfirmations: 1, + }) + if err != nil { + t.Logf("error creating off-chain config: %s", err) + t.FailNow() + } + + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTests( + 5*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 100*time.Millisecond, // deltaInitial time.Duration, + 1000*time.Millisecond, // deltaRound time.Duration, + 40*time.Millisecond, // deltaGrace time.Duration, + 200*time.Millisecond, // deltaRequestCertifiedCommit time.Duration, + 30*time.Second, // deltaStage time.Duration, + uint64(50), // rMax uint8, + []int{1, 1, 1, 1}, // s []int, + oracles, // oracles []OracleIdentityExtra, + rawCfg, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 1600*time.Millisecond, // maxDurationObservation time.Duration, + 20*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + onchainConfig, // onchainConfig []byte, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := accountsToAddress(transmitters) + require.NoError(t, err) + + lggr.Infow("Setting Config on Oracle Contract", + "signerAddresses", signerAddresses, + "transmitterAddresses", transmitterAddresses, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", offchainConfigVersion, + "offchainConfig", offchainConfig, + ) + _, err = registry.SetConfig( + usr, + signerAddresses, + transmitterAddresses, + threshold, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err) + backend.Commit() + + return nodes, mServer +} + +func deployUpkeeps(t *testing.T, backend *backends.SimulatedBackend, carrol, steve *bind.TransactOpts, linkToken *link_token_interface.LinkToken, registry *iregistry21.IKeeperRegistryMaster, n int) ([]*big.Int, []common.Address, []*log_upkeep_counter_wrapper.LogUpkeepCounter) { + ids := make([]*big.Int, n) + addrs := make([]common.Address, n) + contracts := make([]*log_upkeep_counter_wrapper.LogUpkeepCounter, n) + for i := 0; i < n; i++ { + backend.Commit() + time.Sleep(1 * time.Second) + upkeepAddr, _, upkeepContract, err := log_upkeep_counter_wrapper.DeployLogUpkeepCounter( + carrol, backend, + big.NewInt(100000), + ) + require.NoError(t, err) + + upkeepID := registerUpkeep(t, registry, upkeepAddr, carrol, steve, backend) + + // Fund the upkeep + _, err = linkToken.Approve(carrol, registry.Address(), oneHunEth) + require.NoError(t, err) + _, err = registry.AddFunds(carrol, upkeepID, oneHunEth) + require.NoError(t, err) + backend.Commit() + + ids[i] = upkeepID + contracts[i] = upkeepContract + addrs[i] = upkeepAddr + } + return ids, addrs, contracts +} + +func registerUpkeep(t *testing.T, registry *iregistry21.IKeeperRegistryMaster, upkeepAddr common.Address, carrol, steve *bind.TransactOpts, backend *backends.SimulatedBackend) *big.Int { + logTriggerConfigType := abi.MustNewType("tuple(address contractAddress, uint8 filterSelector, bytes32 topic0, bytes32 topic1, bytes32 topic2, bytes32 topic3)") + logTriggerConfig, err := abi.Encode(map[string]interface{}{ + "contractAddress": upkeepAddr, + "filterSelector": 0, // no indexed topics filtered + "topic0": "0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d", // event sig for Trigger() + "topic1": "0x", + "topic2": "0x", + "topic3": "0x", + }, logTriggerConfigType) + require.NoError(t, err) + + registrationTx, err := registry.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, 1, []byte{}, logTriggerConfig, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx21(t, registry, registrationTx, backend) + + return upkeepID +} + +func deployKeeper21Registry( + t *testing.T, + auth *bind.TransactOpts, + backend *backends.SimulatedBackend, + linkAddr, linkFeedAddr, + gasFeedAddr common.Address, +) *iregistry21.IKeeperRegistryMaster { + automationForwarderLogicAddr, _, _, err := automationForwarderLogic.DeployAutomationForwarderLogic(auth, backend) + require.NoError(t, err) + backend.Commit() + registryLogicBAddr, _, _, err := registrylogicb21.DeployKeeperRegistryLogicB( + auth, + backend, + 0, // Payment model + linkAddr, + linkFeedAddr, + gasFeedAddr, + automationForwarderLogicAddr, + ) + require.NoError(t, err) + backend.Commit() + + registryLogicAAddr, _, _, err := registrylogica21.DeployKeeperRegistryLogicA( + auth, + backend, + registryLogicBAddr, + ) + require.NoError(t, err) + backend.Commit() + + registryAddr, _, _, err := registry21.DeployKeeperRegistry( + auth, + backend, + registryLogicAAddr, + ) + require.NoError(t, err) + backend.Commit() + + registryMaster, err := iregistry21.NewIKeeperRegistryMaster(registryAddr, backend) + require.NoError(t, err) + + return registryMaster +} + +func getUpkeepIdFromTx21(t *testing.T, registry *iregistry21.IKeeperRegistryMaster, registrationTx *gethtypes.Transaction, backend *backends.SimulatedBackend) *big.Int { + receipt, err := backend.TransactionReceipt(testutils.Context(t), registrationTx.Hash()) + require.NoError(t, err) + parsedLog, err := registry.ParseUpkeepRegistered(*receipt.Logs[0]) + require.NoError(t, err) + return parsedLog.Id +} + +// ------- below this line could be added to a test helpers package +type registerAndFundFunc func(*testing.T, common.Address, *bind.TransactOpts, uint8, []byte) *big.Int + +func registerAndFund( + registry *iregistry21.IKeeperRegistryMaster, + registryOwner *bind.TransactOpts, + backend *backends.SimulatedBackend, + linkToken *link_token_interface.LinkToken, +) registerAndFundFunc { + return func(t *testing.T, upkeepAddr common.Address, upkeepOwner *bind.TransactOpts, trigger uint8, config []byte) *big.Int { + // register the upkeep on the host registry contract + registrationTx, err := registry.RegisterUpkeep( + registryOwner, + upkeepAddr, + 2_500_000, + upkeepOwner.From, + trigger, + []byte{}, + config, + []byte{}, + ) + require.NoError(t, err) + + backend.Commit() + + receipt, err := backend.TransactionReceipt(testutils.Context(t), registrationTx.Hash()) + require.NoError(t, err) + + parsedLog, err := registry.ParseUpkeepRegistered(*receipt.Logs[0]) + require.NoError(t, err) + + upkeepID := parsedLog.Id + + // Fund the upkeep + _, err = linkToken.Approve(upkeepOwner, registry.Address(), oneHunEth) + require.NoError(t, err) + + _, err = registry.AddFunds(upkeepOwner, upkeepID, oneHunEth) + require.NoError(t, err) + + backend.Commit() + + return upkeepID + } +} + +type feedLookupUpkeepController struct { + // address for dummy protocol + logSrcAddr common.Address + // dummy protocol is a log event source + protocol *dummy_protocol_wrapper.DummyProtocol + protocolOwner *bind.TransactOpts + // log trigger listener contracts react to logs produced from protocol + count int + upkeepIds []*big.Int + addresses []common.Address + contracts []*log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup + contractsOwner *bind.TransactOpts +} + +func newFeedLookupUpkeepController( + backend *backends.SimulatedBackend, + protocolOwner *bind.TransactOpts, +) (*feedLookupUpkeepController, error) { + addr, _, contract, err := dummy_protocol_wrapper.DeployDummyProtocol(protocolOwner, backend) + if err != nil { + return nil, err + } + + backend.Commit() + + return &feedLookupUpkeepController{ + logSrcAddr: addr, + protocol: contract, + protocolOwner: protocolOwner, + }, nil +} + +func (c *feedLookupUpkeepController) DeployUpkeeps( + t *testing.T, + backend *backends.SimulatedBackend, + owner *bind.TransactOpts, + count int, +) error { + addresses := make([]common.Address, count) + contracts := make([]*log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup, count) + + // deploy n upkeep contracts + for x := 0; x < count; x++ { + addr, _, contract, err := log_triggered_streams_lookup_wrapper.DeployLogTriggeredStreamsLookup( + owner, + backend, + false, + false, + ) + + if err != nil { + require.NoError(t, err, "test dependent on contract deployment") + + return err + } + + addresses[x] = addr + contracts[x] = contract + } + + backend.Commit() + + c.count = count + c.addresses = addresses + c.contracts = contracts + c.contractsOwner = owner + + return nil +} + +func (c *feedLookupUpkeepController) RegisterAndFund( + t *testing.T, + registry *iregistry21.IKeeperRegistryMaster, + registryOwner *bind.TransactOpts, + backend *backends.SimulatedBackend, + linkToken *link_token_interface.LinkToken, +) error { + ids := make([]*big.Int, len(c.contracts)) + + t.Logf("address: %s", c.logSrcAddr.Hex()) + + logTriggerConfigType := abi.MustNewType("tuple(address contractAddress, uint8 filterSelector, bytes32 topic0, bytes32 topic1, bytes32 topic2, bytes32 topic3)") + config, err := abi.Encode(map[string]interface{}{ + "contractAddress": c.logSrcAddr, + "filterSelector": 0, // no indexed topics filtered + "topic0": "0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd", // LimitOrderExecuted event for dummy protocol + "topic1": "0x", + "topic2": "0x", + "topic3": "0x", + }, logTriggerConfigType) + + require.NoError(t, err) + + registerFunc := registerAndFund(registry, registryOwner, backend, linkToken) + + for x := range c.contracts { + ids[x] = registerFunc(t, c.addresses[x], c.contractsOwner, 1, config) + } + + c.upkeepIds = ids + + return nil +} + +func (c *feedLookupUpkeepController) EnableMercury( + t *testing.T, + backend *backends.SimulatedBackend, + registry *iregistry21.IKeeperRegistryMaster, + registryOwner *bind.TransactOpts, +) error { + adminBytes, _ := json.Marshal(streams.UpkeepPrivilegeConfig{ + MercuryEnabled: true, + }) + + ctx := testutils.Context(t) + for _, id := range c.upkeepIds { + if _, err := registry.SetUpkeepPrivilegeConfig(registryOwner, id, adminBytes); err != nil { + require.NoError(t, err) + + return err + } + + callOpts := &bind.CallOpts{ + Pending: true, + From: registryOwner.From, + Context: ctx, + } + + bts, err := registry.GetUpkeepPrivilegeConfig(callOpts, id) + if err != nil { + require.NoError(t, err) + + return err + } + + var checkBytes streams.UpkeepPrivilegeConfig + if err := json.Unmarshal(bts, &checkBytes); err != nil { + require.NoError(t, err) + + return err + } + + require.True(t, checkBytes.MercuryEnabled) + } + + bl, _ := backend.BlockByHash(testutils.Context(t), backend.Commit()) + t.Logf("block number after mercury enabled: %d", bl.NumberU64()) + + return nil +} + +func (c *feedLookupUpkeepController) VerifyEnv( + t *testing.T, + backend *backends.SimulatedBackend, + registry *iregistry21.IKeeperRegistryMaster, + registryOwner *bind.TransactOpts, +) error { + t.Log("verifying number of active upkeeps") + + ids, err := registry.GetActiveUpkeepIDs(&bind.CallOpts{ + Context: testutils.Context(t), + From: registryOwner.From, + }, big.NewInt(0), big.NewInt(100)) + + require.NoError(t, err) + require.Len(t, ids, c.count, "active upkeep ids does not match count") + require.Len(t, ids, len(c.upkeepIds)) + + t.Log("verifying total number of contracts") + require.Len(t, c.contracts, len(c.upkeepIds), "one contract for each upkeep id expected") + + // call individual contracts to see that they revert + for _, contract := range c.contracts { + _, err := contract.CheckLog(c.contractsOwner, log_triggered_streams_lookup_wrapper.Log{ + Index: big.NewInt(0), + Timestamp: big.NewInt(123), + TxHash: common.HexToHash("0x1"), + BlockNumber: big.NewInt(0), + BlockHash: common.HexToHash("0x14"), + Source: common.HexToAddress("0x2"), + Topics: [][32]byte{ + common.HexToHash("0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd"), // matches executedSig and should result in a feedlookup revert + common.HexToHash("0x"), + common.HexToHash("0x"), + common.HexToHash("0x"), + }, + Data: []byte{}, + }, []byte("0x")) + + require.Error(t, err, "check log contract call should revert: %s", err) + } + + return nil +} + +func (c *feedLookupUpkeepController) EmitEvents( + t *testing.T, + backend *backends.SimulatedBackend, + count int, + afterEmit func(), +) error { + ctx := testutils.Context(t) + + for i := 0; i < count && ctx.Err() == nil; i++ { + _, err := c.protocol.ExecuteLimitOrder(c.protocolOwner, big.NewInt(1000), big.NewInt(10000), c.logSrcAddr) + require.NoError(t, err, "no error expected from limit order exec") + + if err != nil { + return err + } + + backend.Commit() + + // verify event was emitted + block, _ := backend.BlockByHash(ctx, backend.Commit()) + t.Logf("block number after emit event: %d", block.NumberU64()) + + iter, _ := c.protocol.FilterLimitOrderExecuted( + &bind.FilterOpts{ + Context: testutils.Context(t), + Start: block.NumberU64() - 1, + }, + []*big.Int{big.NewInt(1000)}, + []*big.Int{big.NewInt(10000)}, + []common.Address{c.logSrcAddr}, + ) + + var eventEmitted bool + for iter.Next() { + if iter.Event != nil { + eventEmitted = true + } + } + + require.True(t, eventEmitted, "event expected on backend") + if !eventEmitted { + return fmt.Errorf("event was not emitted") + } + + afterEmit() + } + + return nil +} + +func (c *feedLookupUpkeepController) UpkeepsIds() []*big.Int { + return c.upkeepIds +} diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go new file mode 100644 index 00000000..87f4b83a --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go @@ -0,0 +1,782 @@ +package ocr2keeper_test + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/umbracle/ethgo/abi" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrTypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-automation/pkg/v2/config" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/basic_upkeep_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic2_0" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +const ( + MercuryCredName = "cred1" +) + +var ( + oneEth = big.NewInt(1000000000000000000) + oneHunEth = big.NewInt(0).Mul(oneEth, big.NewInt(100)) + + payload1 = common.Hex2Bytes("1234") + payload2 = common.Hex2Bytes("ABCD") +) + +func deployKeeper20Registry( + t *testing.T, + auth *bind.TransactOpts, + backend *backends.SimulatedBackend, + linkAddr, linkFeedAddr, + gasFeedAddr common.Address, +) *keeper_registry_wrapper2_0.KeeperRegistry { + logicAddr, _, _, err := keeper_registry_logic2_0.DeployKeeperRegistryLogic( + auth, + backend, + 0, // Payment model + linkAddr, + linkFeedAddr, + gasFeedAddr) + require.NoError(t, err) + backend.Commit() + + regAddr, _, _, err := keeper_registry_wrapper2_0.DeployKeeperRegistry( + auth, + backend, + logicAddr, + ) + require.NoError(t, err) + backend.Commit() + + registry, err := keeper_registry_wrapper2_0.NewKeeperRegistry(regAddr, backend) + require.NoError(t, err) + + return registry +} + +func setupNode( + t *testing.T, + port int, + nodeKey ethkey.KeyV2, + backend *backends.SimulatedBackend, + p2pV2Bootstrappers []commontypes.BootstrapperLocator, + mercury MercuryEndpoint, +) (plugin.Application, string, common.Address, ocr2key.KeyBundle) { + p2pKey := keystest.NewP2PKeyV2(t) + p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)} + cfg, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Feature.LogPoller = ptr(true) + + c.OCR.Enabled = ptr(false) + c.OCR2.Enabled = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + c.P2P.V2.AnnounceAddresses = &p2paddresses + c.P2P.V2.ListenAddresses = &p2paddresses + if len(p2pV2Bootstrappers) > 0 { + c.P2P.V2.DefaultBootstrappers = &p2pV2Bootstrappers + } + + c.EVM[0].Transactions.ForwardersEnabled = ptr(true) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + s.Mercury.Credentials = map[string]toml.MercuryCredentials{ + MercuryCredName: { + LegacyURL: models.MustSecretURL(mercury.URL()), + URL: models.MustSecretURL(mercury.URL()), + Username: models.NewSecret(mercury.Username()), + Password: models.NewSecret(mercury.Password()), + }, + } + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, backend, nodeKey, p2pKey) + kb, err := app.GetKeyStore().OCR2().Create(chaintype.EVM) + require.NoError(t, err) + + err = app.Start(testutils.Context(t)) + require.NoError(t, err) + + t.Cleanup(func() { + assert.NoError(t, app.Stop()) + }) + + return app, p2pKey.PeerID().Raw(), nodeKey.Address, kb +} + +type Node struct { + App plugin.Application + Transmitter common.Address + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) AddJob(t *testing.T, spec string) { + c := node.App.GetConfig() + jb, err := validate.ValidatedOracleSpecToml(c.OCR2(), c.Insecure(), spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec string) { + jb, err := ocrbootstrap.ValidatedBootstrapSpecToml(spec) + require.NoError(t, err) + err = node.App.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) +} + +func accountsToAddress(accounts []ocrTypes.Account) (addresses []common.Address, err error) { + for _, signer := range accounts { + bytes, err := hexutil.Decode(string(signer)) + if err != nil { + return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer)) + } + if len(bytes) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(bytes)) + } + return addresses, nil +} + +func getUpkeepIdFromTx(t *testing.T, registry *keeper_registry_wrapper2_0.KeeperRegistry, registrationTx *gethtypes.Transaction, backend *backends.SimulatedBackend) *big.Int { + receipt, err := backend.TransactionReceipt(testutils.Context(t), registrationTx.Hash()) + require.NoError(t, err) + parsedLog, err := registry.ParseUpkeepRegistered(*receipt.Logs[0]) + require.NoError(t, err) + return parsedLog.Id +} + +func TestIntegration_KeeperPluginBasic(t *testing.T) { + g := gomega.NewWithT(t) + lggr := logger.TestLogger(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + steve.From: {Balance: assets.Ether(1000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() + + // Deploy contracts + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) + registry := deployKeeper20Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, NewSimulatedMercuryServer()) + bootstrapNode := Node{ + appBootstrap, bootstrapTransmitter, bootstrapKb, + } + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + // Set up the minimum 4 oracles all funded + ports := freeport.GetN(t, 4) + for i := 0; i < 4; i++ { + app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{ + // Supply the bootstrap IP and port as a V2 peer address + {PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}}, + }, NewSimulatedMercuryServer()) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocrTypes.Account(transmitter.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + } + + // Add the bootstrap job + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` + type = "bootstrap" + relay = "evm" + schemaVersion = 1 + name = "boot" + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + + [relayConfig] + chainID = 1337 + `, registry.Address())) + + // Add OCR jobs + for i, node := range nodes { + node.AddJob(t, fmt.Sprintf(` + type = "offchainreporting2" + pluginType = "ocr2automation" + relay = "evm" + name = "ocr2keepers-%d" + schemaVersion = 1 + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + ocrKeyBundleID = "%s" + transmitterID = "%s" + p2pv2Bootstrappers = [ + "%s" + ] + + [relayConfig] + chainID = 1337 + + [pluginConfig] + maxServiceWorkers = 100 + cacheEvictionInterval = "1s" + mercuryCredentialName = "%s" + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) + } + + // Setup config on contract + configType := abi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") + onchainConfig, err := abi.Encode(map[string]interface{}{ + "paymentPremiumPPB": uint32(0), + "flatFeeMicroLink": uint32(0), + "checkGasLimit": uint32(6500000), + "stalenessSeconds": uint32(90000), + "gasCeilingMultiplier": uint16(2), + "minUpkeepSpend": uint32(0), + "maxPerformGas": uint32(5000000), + "maxCheckDataSize": uint32(5000), + "maxPerformDataSize": uint32(5000), + "fallbackGasPrice": big.NewInt(60000000000), + "fallbackLinkPrice": big.NewInt(2000000000000000000), + "transcoder": testutils.NewAddress(), + "registrar": testutils.NewAddress(), + }, configType) + require.NoError(t, err) + + offC, err := json.Marshal(config.OffchainConfig{ + PerformLockoutWindow: 100 * 3 * 1000, // ~100 block lockout (on goerli) + MinConfirmations: 1, + }) + if err != nil { + t.Logf("error creating off-chain config: %s", err) + t.Fail() + } + + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 2500*time.Millisecond, // deltaRound time.Duration, + 40*time.Millisecond, // deltaGrace time.Duration, + 15*time.Second, // deltaStage time.Duration, + 3, // rMax uint8, + []int{1, 1, 1, 1}, + oracles, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // Max duration query + 1600*time.Millisecond, // Max duration observation + 800*time.Millisecond, + 20*time.Millisecond, + 20*time.Millisecond, + 1, // f + onchainConfig, + ) + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := accountsToAddress(transmitters) + require.NoError(t, err) + lggr.Infow("Setting Config on Oracle Contract", + "signerAddresses", signerAddresses, + "transmitterAddresses", transmitterAddresses, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", offchainConfigVersion, + "offchainConfig", offchainConfig, + ) + _, err = registry.SetConfig( + steve, + signerAddresses, + transmitterAddresses, + threshold, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err) + backend.Commit() + + // Register new upkeep + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + registrationTx, err := registry.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx(t, registry, registrationTx, backend) + + // Fund the upkeep + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, registry.Address(), oneHunEth) + require.NoError(t, err) + _, err = registry.AddFunds(carrol, upkeepID, oneHunEth) + require.NoError(t, err) + backend.Commit() + + // Set upkeep to be performed + _, err = upkeepContract.SetBytesToSend(carrol, payload1) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + backend.Commit() + + lggr.Infow("Upkeep registered and funded", "upkeepID", upkeepID.String()) + + // keeper job is triggered and payload is received + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload1)) + + // change payload + _, err = upkeepContract.SetBytesToSend(carrol, payload2) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + + // observe 2nd job run and received payload changes + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload2)) +} + +func setupForwarderForNode( + t *testing.T, + app plugin.Application, + caller *bind.TransactOpts, + backend *backends.SimulatedBackend, + recipient common.Address, + linkAddr common.Address) common.Address { + + faddr, _, authorizedForwarder, err := authorized_forwarder.DeployAuthorizedForwarder(caller, backend, linkAddr, caller.From, recipient, []byte{}) + require.NoError(t, err) + + // set EOA as an authorized sender for the forwarder + _, err = authorizedForwarder.SetAuthorizedSenders(caller, []common.Address{recipient}) + require.NoError(t, err) + backend.Commit() + + // add forwarder address to be tracked in db + forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), app.GetConfig().Database()) + chainID := ubig.Big(*backend.Blockchain().Config().ChainID) + _, err = forwarderORM.CreateForwarder(faddr, chainID) + require.NoError(t, err) + + chain, err := app.GetRelayers().LegacyEVMChains().Get((*big.Int)(&chainID).String()) + require.NoError(t, err) + fwdr, err := chain.TxManager().GetForwarderForEOA(recipient) + require.NoError(t, err) + require.Equal(t, faddr, fwdr) + + return faddr +} + +func TestIntegration_KeeperPluginForwarderEnabled(t *testing.T) { + g := gomega.NewWithT(t) + lggr := logger.TestLogger(t) + + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + steve.From: {Balance: assets.Ether(1000).ToInt()}, + carrol.From: {Balance: assets.Ether(1000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 6*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() + + // Deploy contracts + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) + registry := deployKeeper20Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + + effectiveTransmitters := make([]common.Address, 0) + // Setup bootstrap + oracle nodes + bootstrapNodePort := freeport.GetOne(t) + appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNode(t, bootstrapNodePort, nodeKeys[0], backend, nil, NewSimulatedMercuryServer()) + + bootstrapNode := Node{ + appBootstrap, bootstrapTransmitter, bootstrapKb, + } + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + // Set up the minimum 4 oracles all funded + ports := freeport.GetN(t, 4) + for i := 0; i < 4; i++ { + app, peerID, transmitter, kb := setupNode(t, ports[i], nodeKeys[i+1], backend, []commontypes.BootstrapperLocator{ + // Supply the bootstrap IP and port as a V2 peer address + {PeerID: bootstrapPeerID, Addrs: []string{fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort)}}, + }, NewSimulatedMercuryServer()) + nodeForwarder := setupForwarderForNode(t, app, sergey, backend, transmitter, linkAddr) + effectiveTransmitters = append(effectiveTransmitters, nodeForwarder) + + nodes = append(nodes, Node{ + app, transmitter, kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: ocrTypes.Account(nodeForwarder.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + } + + // Add the bootstrap job + bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(` + type = "bootstrap" + relay = "evm" + schemaVersion = 1 + name = "boot" + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + + [relayConfig] + chainID = 1337 + `, registry.Address())) + + // Add OCR jobs + for i, node := range nodes { + node.AddJob(t, fmt.Sprintf(` + type = "offchainreporting2" + pluginType = "ocr2automation" + relay = "evm" + name = "ocr2keepers-%d" + schemaVersion = 1 + contractID = "%s" + contractConfigTrackerPollInterval = "15s" + ocrKeyBundleID = "%s" + transmitterID = "%s" + p2pv2Bootstrappers = [ + "%s" + ] + forwardingAllowed = true + + [relayConfig] + chainID = 1337 + + [pluginConfig] + cacheEvictionInterval = "1s" + maxServiceWorkers = 100 + mercuryCredentialName = "%s" + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) + } + + // Setup config on contract + configType := abi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") + onchainConfig, err := abi.Encode(map[string]interface{}{ + "paymentPremiumPPB": uint32(0), + "flatFeeMicroLink": uint32(0), + "checkGasLimit": uint32(6500000), + "stalenessSeconds": uint32(90000), + "gasCeilingMultiplier": uint16(2), + "minUpkeepSpend": uint32(0), + "maxPerformGas": uint32(5000000), + "maxCheckDataSize": uint32(5000), + "maxPerformDataSize": uint32(5000), + "fallbackGasPrice": big.NewInt(60000000000), + "fallbackLinkPrice": big.NewInt(2000000000000000000), + "transcoder": testutils.NewAddress(), + "registrar": testutils.NewAddress(), + }, configType) + require.NoError(t, err) + + offC, err := json.Marshal(config.OffchainConfig{ + PerformLockoutWindow: 100 * 12 * 1000, // ~100 block lockout (on goerli) + }) + if err != nil { + t.Logf("error creating off-chain config: %s", err) + t.FailNow() + } + + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 5*time.Second, // deltaRound time.Duration, + 500*time.Millisecond, // deltaGrace time.Duration, + 2*time.Second, // deltaStage time.Duration, + 3, // rMax uint8, + []int{1, 1, 1, 1}, + oracles, + offC, // reportingPluginConfig []byte, + 50*time.Millisecond, // Max duration query + 1*time.Second, // Max duration observation + 1*time.Second, + 1*time.Second, + 1*time.Second, + 1, // f + onchainConfig, + ) + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := accountsToAddress(transmitters) + require.NoError(t, err) + + // Make sure we are using forwarders and not node keys as transmitters on chain. + eoaList := make([]common.Address, 0) + for _, n := range nodes { + eoaList = append(eoaList, n.Transmitter) + } + require.Equal(t, effectiveTransmitters, transmitterAddresses) + require.NotEqual(t, eoaList, effectiveTransmitters) + lggr.Infow("Setting Config on Oracle Contract", + "signerAddresses", signerAddresses, + "transmitterAddresses", transmitterAddresses, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", offchainConfigVersion, + "offchainConfig", offchainConfig, + ) + _, err = registry.SetConfig( + steve, + signerAddresses, + transmitterAddresses, + threshold, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err) + backend.Commit() + + // Register new upkeep + upkeepAddr, _, upkeepContract, err := basic_upkeep_contract.DeployBasicUpkeepContract(carrol, backend) + require.NoError(t, err) + registrationTx, err := registry.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}, []byte{}) + require.NoError(t, err) + backend.Commit() + upkeepID := getUpkeepIdFromTx(t, registry, registrationTx, backend) + + // Fund the upkeep + _, err = linkToken.Transfer(sergey, carrol.From, oneHunEth) + require.NoError(t, err) + _, err = linkToken.Approve(carrol, registry.Address(), oneHunEth) + require.NoError(t, err) + _, err = registry.AddFunds(carrol, upkeepID, oneHunEth) + require.NoError(t, err) + backend.Commit() + + //Set upkeep to be performed + _, err = upkeepContract.SetBytesToSend(carrol, payload1) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + backend.Commit() + + lggr.Infow("Upkeep registered and funded") + + // keeper job is triggered and payload is received + receivedBytes := func() []byte { + received, err2 := upkeepContract.ReceivedBytes(nil) + require.NoError(t, err2) + return received + } + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload1)) + + // change payload + _, err = upkeepContract.SetBytesToSend(carrol, payload2) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + + // observe 2nd job run and received payload changes + g.Eventually(receivedBytes, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.Equal(payload2)) +} + +func ptr[T any](v T) *T { return &v } + +func TestFilterNamesFromSpec20(t *testing.T) { + b := make([]byte, 20) + _, err := rand.Read(b) + require.NoError(t, err) + address := common.HexToAddress(hexutil.Encode(b)) + + spec := &job.OCR2OracleSpec{ + PluginType: types.OCR2Keeper, + ContractID: address.String(), // valid contract addr + } + + names, err := ocr2keeper.FilterNamesFromSpec20(spec) + require.NoError(t, err) + + assert.Len(t, names, 2) + assert.Equal(t, logpoller.FilterName("OCR2KeeperRegistry - LogProvider", address), names[0]) + assert.Equal(t, logpoller.FilterName("EvmRegistry - Upkeep events for", address), names[1]) + + spec = &job.OCR2OracleSpec{ + PluginType: types.OCR2Keeper, + ContractID: "0x5431", // invalid contract addr + } + _, err = ocr2keeper.FilterNamesFromSpec20(spec) + require.ErrorContains(t, err, "not a valid EIP55 formatted address") +} + +// ------- below this line could be added to a test helpers package +type MercuryEndpoint interface { + URL() string + Username() string + Password() string + CallCount() int + RegisterHandler(http.HandlerFunc) +} + +type SimulatedMercuryServer struct { + server *httptest.Server + handler http.HandlerFunc + + mu sync.RWMutex + callCount int +} + +func NewSimulatedMercuryServer() *SimulatedMercuryServer { + srv := &SimulatedMercuryServer{ + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }, + } + + srv.server = httptest.NewUnstartedServer(srv) + + return srv +} + +func (ms *SimulatedMercuryServer) URL() string { + return ms.server.URL +} + +func (ms *SimulatedMercuryServer) Username() string { + return "username1" +} + +func (ms *SimulatedMercuryServer) Password() string { + return "password1" +} + +func (ms *SimulatedMercuryServer) CallCount() int { + ms.mu.RLock() + defer ms.mu.RUnlock() + + return ms.callCount +} + +func (ms *SimulatedMercuryServer) RegisterHandler(h http.HandlerFunc) { + ms.handler = h +} + +func (ms *SimulatedMercuryServer) Start() { + ms.server.Start() +} + +func (ms *SimulatedMercuryServer) Stop() { + ms.server.Close() +} + +func (ms *SimulatedMercuryServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ms.mu.Lock() + defer ms.mu.Unlock() + + ms.callCount++ + + ms.handler.ServeHTTP(w, r) +} diff --git a/core/services/ocr2/plugins/ocr2keeper/util.go b/core/services/ocr2/plugins/ocr2keeper/util.go new file mode 100644 index 00000000..c9135343 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2keeper/util.go @@ -0,0 +1,123 @@ +package ocr2keeper + +import ( + "fmt" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + + "github.com/jmoiron/sqlx" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers20 "github.com/goplugin/plugin-automation/pkg/v2" + ocr2keepers20coordinator "github.com/goplugin/plugin-automation/pkg/v2/coordinator" + ocr2keepers20polling "github.com/goplugin/plugin-automation/pkg/v2/observer/polling" + ocr2keepers20runner "github.com/goplugin/plugin-automation/pkg/v2/runner" + ocr2keepers21 "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evmregistry20 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v20" + evmregistry21 "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21" + evmregistry21transmit "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type Encoder20 interface { + ocr2keepers20.Encoder + ocr2keepers20coordinator.Encoder + ocr2keepers20polling.Encoder + ocr2keepers20runner.Encoder + ocr2keepers20coordinator.Encoder +} + +type Encoder21 interface { + ocr2keepers21.Encoder +} + +var ( + ErrNoChainFromSpec = fmt.Errorf("could not create chain from spec") +) + +func EVMProvider(db *sqlx.DB, chain legacyevm.Chain, lggr logger.Logger, spec job.Job, ethKeystore keystore.Eth, dbCfg pg.QConfig) (evmrelay.OCR2KeeperProvider, error) { + oSpec := spec.OCR2OracleSpec + ocr2keeperRelayer := evmrelay.NewOCR2KeeperRelayer(db, chain, lggr.Named("OCR2KeeperRelayer"), ethKeystore, dbCfg) + + keeperProvider, err := ocr2keeperRelayer.NewOCR2KeeperProvider( + types.RelayArgs{ + ExternalJobID: spec.ExternalJobID, + JobID: oSpec.ID, + ContractID: oSpec.ContractID, + RelayConfig: oSpec.RelayConfig.Bytes(), + }, + types.PluginArgs{ + TransmitterID: oSpec.TransmitterID.String, + PluginConfig: oSpec.PluginConfig.Bytes(), + }, + ) + if err != nil { + return nil, fmt.Errorf("%w: failed to create new ocr2keeper provider", err) + } + + return keeperProvider, nil +} + +func EVMDependencies20( + spec job.Job, + db *sqlx.DB, + lggr logger.Logger, + chain legacyevm.Chain, + ethKeystore keystore.Eth, + dbCfg pg.QConfig, +) (evmrelay.OCR2KeeperProvider, *evmregistry20.EvmRegistry, Encoder20, *evmregistry20.LogProvider, error) { + var err error + + var keeperProvider evmrelay.OCR2KeeperProvider + var registry *evmregistry20.EvmRegistry + + // the provider will be returned as a dependency + if keeperProvider, err = EVMProvider(db, chain, lggr, spec, ethKeystore, dbCfg); err != nil { + return nil, nil, nil, nil, err + } + + rAddr := ethkey.MustEIP55Address(spec.OCR2OracleSpec.ContractID).Address() + if registry, err = evmregistry20.NewEVMRegistryService(rAddr, chain, lggr); err != nil { + return nil, nil, nil, nil, err + } + + encoder := evmregistry20.EVMAutomationEncoder20{} + + // lookback blocks is hard coded and should provide ample time for logs + // to be detected in most cases + var lookbackBlocks int64 = 250 + // TODO: accept a version of the registry contract and use the correct interfaces + logProvider, err := evmregistry20.NewLogProvider(lggr, chain.LogPoller(), rAddr, chain.Client(), lookbackBlocks) + + return keeperProvider, registry, encoder, logProvider, err +} + +func FilterNamesFromSpec20(spec *job.OCR2OracleSpec) (names []string, err error) { + addr, err := ethkey.NewEIP55Address(spec.ContractID) + if err != nil { + return nil, err + } + return []string{evmregistry20.LogProviderFilterName(addr.Address()), evmregistry20.UpkeepFilterName(addr.Address())}, err +} + +func EVMDependencies21( + keyring ocrtypes.OnchainKeyring, +) (evmregistry21.AutomationServices, error) { + return evmregistry21.New(keyring) +} + +func FilterNamesFromSpec21(spec *job.OCR2OracleSpec) (names []string, err error) { + addr, err := ethkey.NewEIP55Address(spec.ContractID) + if err != nil { + return nil, err + } + return []string{evmregistry21transmit.EventProviderFilterName(addr.Address()), evmregistry21.RegistryUpkeepFilterName(addr.Address())}, err +} diff --git a/core/services/ocr2/plugins/ocr2vrf/config/config.go b/core/services/ocr2/plugins/ocr2vrf/config/config.go new file mode 100644 index 00000000..be6a8ec3 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/config/config.go @@ -0,0 +1,57 @@ +package config + +import ( + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + dkgconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/config" +) + +// PluginConfig contains custom arguments for the OCR2VRF plugin. +// +// The OCR2VRF plugin runs a DKG under the hood, so it will need both +// DKG and OCR2VRF configuration fields. +// +// The DKG contract address is provided in the plugin configuration, +// however the OCR2VRF contract address is provided in the OCR2 job spec +// under the 'contractID' key. +type PluginConfig struct { + // DKG configuration fields. + DKGEncryptionPublicKey string `json:"dkgEncryptionPublicKey"` + DKGSigningPublicKey string `json:"dkgSigningPublicKey"` + DKGKeyID string `json:"dkgKeyID"` + DKGContractAddress string `json:"dkgContractAddress"` + + // VRF configuration fields + VRFCoordinatorAddress string `json:"vrfCoordinatorAddress"` + LinkEthFeedAddress string `json:"linkEthFeedAddress"` +} + +// ValidatePluginConfig validates that the given OCR2VRF plugin configuration is correct. +func ValidatePluginConfig(config PluginConfig, dkgSignKs keystore.DKGSign, dkgEncryptKs keystore.DKGEncrypt) error { + err := dkgconfig.ValidatePluginConfig(dkgconfig.PluginConfig{ + EncryptionPublicKey: config.DKGEncryptionPublicKey, + SigningPublicKey: config.DKGSigningPublicKey, + KeyID: config.DKGKeyID, + }, dkgSignKs, dkgEncryptKs) + if err != nil { + return err + } + + // NOTE: a better validation would be to call a method on the on-chain contract pointed to by this + // address. + if config.DKGContractAddress == "" { + return errors.New("dkgContractAddress field must be provided") + } + + if config.VRFCoordinatorAddress == "" { + return errors.New("vrfCoordinatorAddress field must be provided") + } + + // NOTE: similar to the above. + if config.LinkEthFeedAddress == "" { + return errors.New("linkEthFieldAddress field must be provided") + } + + return nil +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go new file mode 100644 index 00000000..65e0ed8c --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator.go @@ -0,0 +1,1161 @@ +package coordinator + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "fmt" + "math/big" + "sort" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + ocr2Types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + "github.com/goplugin/plugin-vrf/dkg" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + dkg_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + vrf_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + ocr2vrfconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/config" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +var _ ocr2vrftypes.CoordinatorInterface = &coordinator{} + +var ( + dkgABI = evmtypes.MustGetABI(dkg_wrapper.DKGMetaData.ABI) + vrfBeaconABI = evmtypes.MustGetABI(vrf_beacon.VRFBeaconMetaData.ABI) + vrfCoordinatorABI = evmtypes.MustGetABI(vrf_coordinator.VRFCoordinatorMetaData.ABI) + counterBuckets = []float64{ + 0, + 1, + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256, + 512, + 1024, + 2048, + } + timingBuckets = []float64{ + float64(1 * time.Millisecond), + float64(5 * time.Millisecond), + float64(10 * time.Millisecond), + float64(50 * time.Millisecond), + float64(100 * time.Millisecond), + float64(500 * time.Millisecond), + float64(time.Second), + float64(5 * time.Second), + float64(10 * time.Second), + float64(30 * time.Second), + } + promLabels = []string{"evmChainID", "oracleID", "configDigest"} + promBlocksToReport = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ocr2vrf_coordinator_blocks_to_report", + Help: "Number of unfulfilled and in-flight blocks that fit in current report in reportBlocks", + Buckets: counterBuckets, + }, promLabels) + promCallbacksToReport = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ocr2vrf_coordinator_callbacks_to_report", + Help: "Number of unfulfilled and and in-flight callbacks fit in current report in reportBlocks", + Buckets: counterBuckets, + }, promLabels) + promBlocksInReport = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ocr2vrf_coordinator_blocks_in_report", + Help: "Number of blocks found in reportWillBeTransmitted", + Buckets: counterBuckets, + }, promLabels) + promCallbacksInReport = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ocr2vrf_coordinator_callbacks_in_report", + Help: "Number of callbacks found in reportWillBeTransmitted", + Buckets: counterBuckets, + }, promLabels) + promMethodDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ocr2vrf_coordinator_method_time", + Help: "The amount of time elapsed for given method call", + Buckets: timingBuckets, + }, append(promLabels, "methodName")) +) + +const ( + // VRF-only events. + randomnessRequestedEvent string = "RandomnessRequested" + randomnessFulfillmentRequestedEvent string = "RandomnessFulfillmentRequested" + randomWordsFulfilledEvent string = "RandomWordsFulfilled" + newTransmissionEvent string = "NewTransmission" + outputsServedEvent string = "OutputsServed" + + // Both VRF and DKG contracts emit this, it's an OCR event. + configSetEvent = "ConfigSet" +) + +// block is used to key into a set that tracks beacon blocks. +type block struct { + blockNumber uint64 + confDelay uint32 +} + +type blockInReport struct { + block + recentBlockHeight uint64 + recentBlockHash common.Hash +} + +type callback struct { + blockNumber uint64 + requestID *big.Int +} + +type callbackInReport struct { + callback + recentBlockHeight uint64 + recentBlockHash common.Hash +} + +type coordinator struct { + lggr logger.Logger + + lp logpoller.LogPoller + topics + finalityDepth uint32 + + onchainRouter VRFBeaconCoordinator + coordinatorAddress common.Address + beaconAddress common.Address + + // We need to keep track of DKG ConfigSet events as well. + dkgAddress common.Address + + evmClient evmclient.Client + + // set of blocks that have been scheduled for transmission. + toBeTransmittedBlocks *ocrCache[blockInReport] + // set of request id's that have been scheduled for transmission. + toBeTransmittedCallbacks *ocrCache[callbackInReport] + blockhashLookback uint64 + coordinatorConfig *ocr2vrftypes.CoordinatorConfig + configDigest ocr2Types.ConfigDigest + oracleID commontypes.OracleID +} + +// New creates a new CoordinatorInterface implementor. +func New( + lggr logger.Logger, + beaconAddress common.Address, + coordinatorAddress common.Address, + dkgAddress common.Address, + client evmclient.Client, + logPoller logpoller.LogPoller, + finalityDepth uint32, +) (ocr2vrftypes.CoordinatorInterface, error) { + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, client) + if err != nil { + return nil, errors.Wrap(err, "onchain router creation") + } + + t := newTopics() + + // Add log filters for the log poller so that it can poll and find the logs that + // we need. + err = logPoller.RegisterFilter(logpoller.Filter{ + Name: filterName(beaconAddress, coordinatorAddress, dkgAddress), + EventSigs: []common.Hash{ + t.randomnessRequestedTopic, + t.randomnessFulfillmentRequestedTopic, + t.randomWordsFulfilledTopic, + t.configSetTopic, + t.outputsServedTopic, + t.newTransmissionTopic}, Addresses: []common.Address{beaconAddress, coordinatorAddress, dkgAddress}}) + if err != nil { + return nil, err + } + + cacheEvictionWindowSeconds := int64(60) + cacheEvictionWindow := time.Duration(cacheEvictionWindowSeconds * int64(time.Second)) + lookbackBlocks := uint64(1_000) + + return &coordinator{ + onchainRouter: onchainRouter, + coordinatorAddress: coordinatorAddress, + beaconAddress: beaconAddress, + dkgAddress: dkgAddress, + lp: logPoller, + topics: t, + finalityDepth: finalityDepth, + evmClient: client, + lggr: lggr.Named("OCR2VRFCoordinator"), + toBeTransmittedBlocks: NewBlockCache[blockInReport](cacheEvictionWindow), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](cacheEvictionWindow), + blockhashLookback: mathutil.Min(256, lookbackBlocks), + // defaults + coordinatorConfig: &ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: cacheEvictionWindowSeconds, + BatchGasLimit: 5_000_000, + CoordinatorOverhead: 50_000, + BlockGasOverhead: 50_000, + CallbackOverhead: 50_000, + LookbackBlocks: lookbackBlocks, + }, + }, nil +} + +func (c *coordinator) CurrentChainHeight(ctx context.Context) (uint64, error) { + head, err := c.lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + return 0, err + } + return uint64(head.BlockNumber), nil +} + +// ReportIsOnchain returns true iff a report for the given OCR epoch/round is +// present onchain. +func (c *coordinator) ReportIsOnchain( + ctx context.Context, + epoch uint32, + round uint8, + configDigest [32]byte, +) (presentOnchain bool, err error) { + now := time.Now().UTC() + defer c.logAndEmitFunctionDuration("ReportIsOnchain", now) + + // Check if a NewTransmission event was emitted on-chain with the + // provided epoch and round. + + epochAndRound := toEpochAndRoundUint40(epoch, round) + + // this is technically NOT a hash in the regular meaning, + // however it has the same size as a common.Hash. We need + // to left-pad by bytes because it has to be 256 (or 32 bytes) + // long in order to use as a topic filter. + enrTopic := common.BytesToHash(common.LeftPadBytes(epochAndRound.Bytes(), 32)) + + c.lggr.Info(fmt.Sprintf("epoch and round: %s %s", epochAndRound.String(), enrTopic.String())) + logs, err := c.lp.IndexedLogs( + c.topics.newTransmissionTopic, + c.beaconAddress, + 2, + []common.Hash{ + enrTopic, + }, + 1, + pg.WithParentCtx(ctx)) + if err != nil { + return false, errors.Wrap(err, "log poller IndexedLogs") + } + + // Filter for valid logs that match the current config digest. + var logsWithCorrectConfigDigest []logpoller.Log + for i := 0; i < len(logs); i++ { + rawLog := toGethLog(logs[i]) + unpacked, err := c.onchainRouter.ParseLog(rawLog) + if err != nil { + c.lggr.Warnw("Incorrect log found in NewTransmissions", "log", logs[i], "err", err) + continue + } + nt, ok := unpacked.(*vrf_beacon.VRFBeaconNewTransmission) + if !ok { + c.lggr.Warnw("Type error for log in NewTransmissisons", "log", logs[i], "err", err) + continue + } + if nt.ConfigDigest == configDigest { + logsWithCorrectConfigDigest = append(logsWithCorrectConfigDigest, logs[i]) + } + } + + c.lggr.Info(fmt.Sprintf("NewTransmission logs: %+v", logsWithCorrectConfigDigest)) + + return len(logsWithCorrectConfigDigest) >= 1, nil +} + +// ReportBlocks returns the heights and hashes of the blocks which require VRF +// proofs in the current report, and the callback requests which should be +// served as part of processing that report. Everything returned by this +// should concern blocks older than the corresponding confirmationDelay. +// Blocks and callbacks it has returned previously may be returned again, as +// long as retransmissionDelay blocks have passed since they were last +// returned. The callbacks returned do not have to correspond to the blocks. +// +// The implementor is responsible for only returning well-funded callback +// requests, and blocks for which clients have actually requested random output +// +// This can be implemented on ethereum using the RandomnessRequested and +// RandomnessFulfillmentRequested events, to identify which blocks and +// callbacks need to be served, along with the NewTransmission and +// RandomWordsFulfilled events, to identify which have already been served. +func (c *coordinator) ReportBlocks( + ctx context.Context, + slotInterval uint16, // TODO: unused for now + confirmationDelays map[uint32]struct{}, + retransmissionDelay time.Duration, // TODO: unused for now + maxBlocks, // TODO: unused for now + maxCallbacks int, // TODO: unused for now +) ( + blocks []ocr2vrftypes.Block, + callbacks []ocr2vrftypes.AbstractCostedCallbackRequest, + recentBlockHashesStartHeight uint64, + recentBlockHashes []common.Hash, + err error, +) { + now := time.Now().UTC() + defer c.logAndEmitFunctionDuration("ReportBlocks", now) + + // Instantiate the gas used by this batch. + currentBatchGasLimit := c.coordinatorConfig.CoordinatorOverhead + + // TODO: use head broadcaster instead? + currentHeight, err := c.CurrentChainHeight(ctx) + if err != nil { + err = errors.Wrap(err, "header by number") + return + } + + // Evict expired items from the cache. + c.toBeTransmittedBlocks.EvictExpiredItems(now) + c.toBeTransmittedCallbacks.EvictExpiredItems(now) + + c.lggr.Infow("current chain height", "currentHeight", currentHeight) + + logs, err := c.lp.LogsWithSigs( + int64(currentHeight-c.coordinatorConfig.LookbackBlocks), + int64(currentHeight), + []common.Hash{ + c.randomnessRequestedTopic, + c.randomnessFulfillmentRequestedTopic, + c.randomWordsFulfilledTopic, + c.outputsServedTopic, + }, + c.coordinatorAddress, + pg.WithParentCtx(ctx)) + if err != nil { + err = errors.Wrapf(err, "logs with topics. address: %s", c.coordinatorAddress) + return + } + + c.lggr.Tracew("logsWithSigs", "logs", logs) + + randomnessRequestedLogs, + randomnessFulfillmentRequestedLogs, + randomWordsFulfilledLogs, + outputsServedLogs, + err := c.unmarshalLogs(logs) + if err != nil { + err = errors.Wrap(err, "unmarshal logs") + return + } + + c.lggr.Tracew( + "finished unmarshalLogs", + "RandomnessRequested", randomnessRequestedLogs, + "RandomnessFulfillmentRequested", randomnessFulfillmentRequestedLogs, + "RandomWordsFulfilled", randomWordsFulfilledLogs, + "OutputsServed", outputsServedLogs, + ) + + // Get start height for recent blockhashes. + recentBlockHashesStartHeight = uint64(0) + if currentHeight >= c.blockhashLookback { + recentBlockHashesStartHeight = currentHeight - c.blockhashLookback + 1 + } + + // Get blockhashes that pertain to requested blocks. + blockhashesMapping, err := c.getBlockhashesMappingFromRequests( + ctx, + randomnessRequestedLogs, + randomnessFulfillmentRequestedLogs, + currentHeight, + recentBlockHashesStartHeight, + ) + if err != nil { + err = errors.Wrap(err, "get blockhashes in ReportBlocks") + return + } + + // TODO BELOW: Write tests for the new blockhash retrieval. + // Obtain recent blockhashes, ordered by ascending block height. + for i := recentBlockHashesStartHeight; i <= currentHeight; i++ { + recentBlockHashes = append(recentBlockHashes, blockhashesMapping[i]) + } + + blocksRequested := make(map[block]struct{}) + redeemRandomnessBlocksRequested := make(map[block]struct{}) + unfulfilled, err := c.filterEligibleRandomnessRequests(randomnessRequestedLogs, confirmationDelays, currentHeight, blockhashesMapping) + if err != nil { + err = errors.Wrap(err, "filter requests in ReportBlocks") + return + } + for _, uf := range unfulfilled { + blocksRequested[uf] = struct{}{} + redeemRandomnessBlocksRequested[uf] = struct{}{} + } + + c.lggr.Tracew("filtered eligible randomness requests", "blocks", unfulfilled) + + callbacksRequested, unfulfilled, err := c.filterEligibleCallbacks(randomnessFulfillmentRequestedLogs, confirmationDelays, currentHeight, blockhashesMapping) + if err != nil { + err = errors.Wrap(err, "filter callbacks in ReportBlocks") + return + } + for _, uf := range unfulfilled { + blocksRequested[uf] = struct{}{} + } + + c.lggr.Tracew("filtered eligible callbacks and blocks", "callbacks", callbacksRequested, "blocks", maps.Keys(blocksRequested)) + + // Remove blocks that have already received responses so that we don't + // respond to them again. + fulfilledBlocks := c.getFulfilledBlocks(outputsServedLogs) + for _, f := range fulfilledBlocks { + delete(blocksRequested, f) + delete(redeemRandomnessBlocksRequested, f) + } + + c.lggr.Tracew("got fulfilled blocks", "fulfilled", fulfilledBlocks) + + // Fill blocks slice with valid requested blocks. + blocks = []ocr2vrftypes.Block{} + for block := range blocksRequested { + if c.coordinatorConfig.BatchGasLimit-currentBatchGasLimit < c.coordinatorConfig.BlockGasOverhead { + break + } + _, redeemRandomnessRequested := redeemRandomnessBlocksRequested[block] + blocks = append(blocks, ocr2vrftypes.Block{ + Hash: blockhashesMapping[block.blockNumber], + Height: block.blockNumber, + ConfirmationDelay: block.confDelay, + ShouldStore: redeemRandomnessRequested, + }) + currentBatchGasLimit += c.coordinatorConfig.BlockGasOverhead + } + + c.lggr.Tracew("got elligible blocks", "blocks", blocks) + + // Find unfulfilled callback requests by filtering out already fulfilled callbacks. + fulfilledRequestIDs := c.getFulfilledRequestIDs(randomWordsFulfilledLogs) + callbacks = c.filterUnfulfilledCallbacks(callbacksRequested, fulfilledRequestIDs, confirmationDelays, currentHeight, currentBatchGasLimit) + c.emitReportBlocksMetrics(len(blocks), len(callbacks)) + + // Pull request IDs from elligible callbacks for logging. There should only be + // at most 100-200 elligible callbacks in a report. + var reqIDs []*big.Int + for _, c := range callbacks { + reqIDs = append(reqIDs, c.RequestID) + } + c.lggr.Debugw("reporting blocks and callbacks", "blocks", blocks, "callbacks", reqIDs) + c.lggr.Tracew("alreday fulfilled blocks and callbacks", "blocks", fulfilledBlocks, "callbacks", maps.Keys(fulfilledRequestIDs)) + + return +} + +// getBlockhashesMappingFromRequests returns the blockhashes for enqueued request blocks. +func (c *coordinator) getBlockhashesMappingFromRequests( + ctx context.Context, + randomnessRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessRequested, + randomnessFulfillmentRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, + currentHeight uint64, + recentBlockHashesStartHeight uint64, +) (blockhashesMapping map[uint64]common.Hash, err error) { + + // Get all request + callback requests into a mapping. + rawBlocksRequested := make(map[uint64]struct{}) + for _, l := range randomnessRequestedLogs { + if isBlockEligible(l.NextBeaconOutputHeight, l.ConfDelay, currentHeight) { + rawBlocksRequested[l.NextBeaconOutputHeight] = struct{}{} + + // Also get the blockhash for the most recent cached report on this block, + // if one exists. + cacheKey := getBlockCacheKey(l.NextBeaconOutputHeight, l.ConfDelay.Uint64()) + t := c.toBeTransmittedBlocks.GetItem(cacheKey) + if t != nil { + rawBlocksRequested[t.recentBlockHeight] = struct{}{} + } + } + } + for _, l := range randomnessFulfillmentRequestedLogs { + if isBlockEligible(l.NextBeaconOutputHeight, l.ConfDelay, currentHeight) { + rawBlocksRequested[l.NextBeaconOutputHeight] = struct{}{} + + // Also get the blockhash for the most recent cached report on this callback, + // if one exists. + cacheKey := getCallbackCacheKey(l.RequestID) + t := c.toBeTransmittedCallbacks.GetItem(cacheKey) + if t != nil { + rawBlocksRequested[t.recentBlockHeight] = struct{}{} + } + } + } + + // Fill a unique list of request blocks. + requestedBlockNumbers := []uint64{} + for k := range rawBlocksRequested { + requestedBlockNumbers = append(requestedBlockNumbers, k) + } + + // Get a mapping of block numbers to block hashes. + blockhashesMapping, err = c.getBlockhashesMapping(ctx, append(requestedBlockNumbers, currentHeight, recentBlockHashesStartHeight)) + if err != nil { + err = errors.Wrap(err, "get blockhashes for ReportBlocks") + } + return +} + +func (c *coordinator) getFulfilledBlocks(outputsServedLogs []*vrf_coordinator.VRFCoordinatorOutputsServed) (fulfilled []block) { + for _, r := range outputsServedLogs { + for _, o := range r.OutputsServed { + fulfilled = append(fulfilled, block{ + blockNumber: o.Height, + confDelay: uint32(o.ConfirmationDelay.Uint64()), + }) + } + } + return +} + +// getBlockhashesMapping returns the blockhashes corresponding to a slice of block numbers. +func (c *coordinator) getBlockhashesMapping( + ctx context.Context, + blockNumbers []uint64, +) (blockhashesMapping map[uint64]common.Hash, err error) { + // GetBlocks doesn't necessarily need a sorted blockNumbers array, + // but sorting it is helpful for testing. + sort.Slice(blockNumbers, func(a, b int) bool { + return blockNumbers[a] < blockNumbers[b] + }) + + heads, err := c.lp.GetBlocksRange(ctx, blockNumbers, pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "logpoller.GetBlocks") + } + + blockhashesMapping = make(map[uint64]common.Hash) + for _, head := range heads { + blockhashesMapping[uint64(head.BlockNumber)] = head.BlockHash + } + + // Ensure that every requested block received a blockhash. + for _, b := range blockNumbers { + if _, ok := blockhashesMapping[b]; !ok { + err = fmt.Errorf("could not find all heads in db: want %d got %d", len(blockNumbers), len(heads)) + return + } + } + return +} + +// getFulfilledRequestIDs returns the request IDs referenced by the given RandomWordsFulfilled logs slice. +func (c *coordinator) getFulfilledRequestIDs(randomWordsFulfilledLogs []*vrf_wrapper.VRFCoordinatorRandomWordsFulfilled) map[uint64]struct{} { + fulfilledRequestIDs := make(map[uint64]struct{}) + for _, r := range randomWordsFulfilledLogs { + for _, requestID := range r.RequestIDs { + fulfilledRequestIDs[requestID.Uint64()] = struct{}{} + } + } + return fulfilledRequestIDs +} + +// filterUnfulfilledCallbacks returns unfulfilled callback requests given the +// callback request logs and the already fulfilled callback request IDs. +func (c *coordinator) filterUnfulfilledCallbacks( + callbacksRequested []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, + fulfilledRequestIDs map[uint64]struct{}, + confirmationDelays map[uint32]struct{}, + currentHeight uint64, + currentBatchGasLimit int64, +) (callbacks []ocr2vrftypes.AbstractCostedCallbackRequest) { + + /** + * Callback batch ordering: + * - Callbacks are first ordered by beacon output + confirmation delay (ascending), in other words + * the fulfillments at the oldest block are first in line. + * - Within the same block, fulfillments are ordered by gasAllowance (ascending), i.e the callbacks with + * the lowest gasAllowance are first in line. + * - This ordering ensures that the oldest callbacks can be picked up first, and that as many callbacks as + * possible can be fit into a batch. + * + * Example: + * Unsorted: (outputHeight: 1, gasAllowance: 200k), (outputHeight: 3, gasAllowance: 100k), (outputHeight: 1, gasAllowance: 100k) + * Sorted: (outputHeight: 1, gasAllowance: 100k), (outputHeight: 1, gasAllowance: 200k), (outputHeight: 3, gasAllowance: 100k) + * + */ + sort.Slice(callbacksRequested, func(a, b int) bool { + aHeight := callbacksRequested[a].NextBeaconOutputHeight + callbacksRequested[a].ConfDelay.Uint64() + bHeight := callbacksRequested[b].NextBeaconOutputHeight + callbacksRequested[b].ConfDelay.Uint64() + if aHeight == bHeight { + return callbacksRequested[a].GasAllowance < callbacksRequested[b].GasAllowance + } + return aHeight < bHeight + }) + + for _, r := range callbacksRequested { + // Check if there is room left in the batch. If there is no room left, the coordinator + // will keep iterating, until it either finds a callback in a subsequent output height that + // can fit into the current batch or reaches the end of the sorted callbacks slice. + if c.coordinatorConfig.BatchGasLimit-currentBatchGasLimit < (int64(r.GasAllowance) + c.coordinatorConfig.CallbackOverhead) { + continue + } + + requestID := r.RequestID + if _, ok := fulfilledRequestIDs[requestID.Uint64()]; !ok { + // The on-chain machinery will revert requests that specify an unsupported + // confirmation delay, so this is more of a sanity check than anything else. + if _, ok := confirmationDelays[uint32(r.ConfDelay.Uint64())]; !ok { + // if we can't find the conf delay in the map then just ignore this request + c.lggr.Errorw("ignoring bad request, unsupported conf delay", + "confDelay", r.ConfDelay.String(), + "supportedConfDelays", confirmationDelays) + continue + } + + // NOTE: we already check if the callback has been fulfilled in filterEligibleCallbacks, + // so we don't need to do that again here. + if isBlockEligible(r.NextBeaconOutputHeight, r.ConfDelay, currentHeight) { + callbacks = append(callbacks, ocr2vrftypes.AbstractCostedCallbackRequest{ + BeaconHeight: r.NextBeaconOutputHeight, + ConfirmationDelay: uint32(r.ConfDelay.Uint64()), + SubscriptionID: r.SubID, + Price: big.NewInt(0), // TODO: no price tracking + RequestID: requestID, + NumWords: r.NumWords, + Requester: r.Requester, + Arguments: r.Arguments, + GasAllowance: big.NewInt(int64(r.GasAllowance)), + GasPrice: r.GasPrice, + WeiPerUnitLink: r.WeiPerUnitLink, + }) + currentBatchGasLimit += int64(r.GasAllowance) + c.lggr.Debugw("Request is unfulfilled", "requestID", requestID) + } + } + } + return callbacks +} + +// filterEligibleCallbacks extracts valid callback requests from the given logs, +// based on their readiness to be fulfilled. It also returns any unfulfilled blocks +// associated with those callbacks. +func (c *coordinator) filterEligibleCallbacks( + randomnessFulfillmentRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, + confirmationDelays map[uint32]struct{}, + currentHeight uint64, + blockhashesMapping map[uint64]common.Hash, +) (callbacks []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, unfulfilled []block, err error) { + + for _, r := range randomnessFulfillmentRequestedLogs { + // The on-chain machinery will revert requests that specify an unsupported + // confirmation delay, so this is more of a sanity check than anything else. + if _, ok := confirmationDelays[uint32(r.ConfDelay.Uint64())]; !ok { + // if we can't find the conf delay in the map then just ignore this request + c.lggr.Errorw("ignoring bad request, unsupported conf delay", + "confDelay", r.ConfDelay.String(), + "supportedConfDelays", confirmationDelays) + continue + } + + // Check that the callback is elligible. + if isBlockEligible(r.NextBeaconOutputHeight, r.ConfDelay, currentHeight) { + cacheKey := getCallbackCacheKey(r.RequestID) + t := c.toBeTransmittedCallbacks.GetItem(cacheKey) + // If the callback is found in the cache and the recentBlockHash from the report containing the callback + // is correct, then the callback is in-flight and should not be included in the current observation. If that + // report gets re-orged, then the recentBlockHash of the report will become invalid, in which case + // the cached callback is ignored, and the callback is added to the current observation. + inflightTransmission := (t != nil) && (t.recentBlockHash == blockhashesMapping[t.recentBlockHeight]) + if inflightTransmission { + c.lggr.Debugw("Request is in-flight", "requestID", r.RequestID) + continue + } + + callbacks = append(callbacks, r) + + // We could have a callback request that was made in a different block than what we + // have possibly already received from regular requests. + unfulfilled = append(unfulfilled, block{ + blockNumber: r.NextBeaconOutputHeight, + confDelay: uint32(r.ConfDelay.Uint64()), + }) + c.lggr.Debugw("Request is eligible", "requestID", r.RequestID) + } + } + return +} + +// filterEligibleRandomnessRequests extracts valid randomness requests from the given logs, +// based on their readiness to be fulfilled. +func (c *coordinator) filterEligibleRandomnessRequests( + randomnessRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessRequested, + confirmationDelays map[uint32]struct{}, + currentHeight uint64, + blockhashesMapping map[uint64]common.Hash, +) (unfulfilled []block, err error) { + + for _, r := range randomnessRequestedLogs { + // The on-chain machinery will revert requests that specify an unsupported + // confirmation delay, so this is more of a sanity check than anything else. + if _, ok := confirmationDelays[uint32(r.ConfDelay.Uint64())]; !ok { + // if we can't find the conf delay in the map then just ignore this request + c.lggr.Errorw("ignoring bad request, unsupported conf delay", + "confDelay", r.ConfDelay.String(), + "supportedConfDelays", confirmationDelays) + continue + } + + // Check that the block is elligible. + if isBlockEligible(r.NextBeaconOutputHeight, r.ConfDelay, currentHeight) { + cacheKey := getBlockCacheKey(r.NextBeaconOutputHeight, r.ConfDelay.Uint64()) + t := c.toBeTransmittedBlocks.GetItem(cacheKey) + // If the block is found in the cache and the recentBlockHash from the report containing the block + // is correct, then the block is in-flight and should not be included in the current observation. If that + // report gets re-orged, then the recentBlockHash of the report will become invalid, in which case + // the cached block is ignored and the block is added to the current observation. + validTransmission := (t != nil) && (t.recentBlockHash == blockhashesMapping[t.recentBlockHeight]) + if validTransmission { + c.lggr.Debugw("Block is in-flight", "blockNumber", r.NextBeaconOutputHeight, "confDelay", r.ConfDelay) + continue + } + + unfulfilled = append(unfulfilled, block{ + blockNumber: r.NextBeaconOutputHeight, + confDelay: uint32(r.ConfDelay.Uint64()), + }) + c.lggr.Debugw("Block is eligible", "blockNumber", r.NextBeaconOutputHeight, "confDelay", r.ConfDelay) + } + } + return +} + +func (c *coordinator) unmarshalLogs( + logs []logpoller.Log, +) ( + randomnessRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessRequested, + randomnessFulfillmentRequestedLogs []*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested, + randomWordsFulfilledLogs []*vrf_wrapper.VRFCoordinatorRandomWordsFulfilled, + outputsServedLogs []*vrf_wrapper.VRFCoordinatorOutputsServed, + err error, +) { + for _, lg := range logs { + rawLog := toGethLog(lg) + switch lg.EventSig { + case c.randomnessRequestedTopic: + unpacked, err2 := c.onchainRouter.ParseLog(rawLog) + if err2 != nil { + // should never happen + err = errors.Wrap(err2, "unmarshal RandomnessRequested log") + return + } + rr, ok := unpacked.(*vrf_wrapper.VRFCoordinatorRandomnessRequested) + if !ok { + // should never happen + err = errors.New("cast to *VRFCoordinatorRandomnessRequested") + return + } + randomnessRequestedLogs = append(randomnessRequestedLogs, rr) + case c.randomnessFulfillmentRequestedTopic: + unpacked, err2 := c.onchainRouter.ParseLog(rawLog) + if err2 != nil { + // should never happen + err = errors.Wrap(err2, "unmarshal RandomnessFulfillmentRequested log") + return + } + rfr, ok := unpacked.(*vrf_wrapper.VRFCoordinatorRandomnessFulfillmentRequested) + if !ok { + // should never happen + err = errors.New("cast to *VRFCoordinatorRandomnessFulfillmentRequested") + return + } + randomnessFulfillmentRequestedLogs = append(randomnessFulfillmentRequestedLogs, rfr) + case c.randomWordsFulfilledTopic: + unpacked, err2 := c.onchainRouter.ParseLog(rawLog) + if err2 != nil { + // should never happen + err = errors.Wrap(err2, "unmarshal RandomWordsFulfilled log") + return + } + rwf, ok := unpacked.(*vrf_wrapper.VRFCoordinatorRandomWordsFulfilled) + if !ok { + // should never happen + err = errors.New("cast to *VRFCoordinatorRandomWordsFulfilled") + return + } + randomWordsFulfilledLogs = append(randomWordsFulfilledLogs, rwf) + case c.outputsServedTopic: + unpacked, err2 := c.onchainRouter.ParseLog(rawLog) + if err2 != nil { + // should never happen + err = errors.Wrap(err2, "unmarshal OutputsServed log") + return + } + nt, ok := unpacked.(*vrf_coordinator.VRFCoordinatorOutputsServed) + if !ok { + // should never happen + err = errors.New("cast to *vrf_coordinator.VRFCoordinatorOutputsServed") + } + outputsServedLogs = append(outputsServedLogs, nt) + default: + c.lggr.Error(fmt.Sprintf("Unexpected event sig: %s", lg.EventSig)) + c.lggr.Error(fmt.Sprintf("expected one of: %s (RandomnessRequested) %s (RandomnessFulfillmentRequested) %s (RandomWordsFulfilled) %s (OutputsServed), got %s", + hexutil.Encode(c.randomnessRequestedTopic[:]), + hexutil.Encode(c.randomnessFulfillmentRequestedTopic[:]), + hexutil.Encode(c.randomWordsFulfilledTopic[:]), + hexutil.Encode(c.outputsServedTopic[:]), + lg.EventSig)) + } + } + return +} + +// ReportWillBeTransmitted registers to the CoordinatorInterface that the +// local node has accepted the AbstractReport for transmission, so that its +// blocks and callbacks can be tracked for possible later retransmission +func (c *coordinator) ReportWillBeTransmitted(ctx context.Context, report ocr2vrftypes.AbstractReport) error { + now := time.Now().UTC() + defer c.logAndEmitFunctionDuration("ReportWillBeTransmitted", now) + + // Evict expired items from the cache. + c.toBeTransmittedBlocks.EvictExpiredItems(now) + c.toBeTransmittedCallbacks.EvictExpiredItems(now) + + // Check for a re-org, and return an error if one is present. + blockhashesMapping, err := c.getBlockhashesMapping(ctx, []uint64{report.RecentBlockHeight}) + if err != nil { + return errors.Wrap(err, "getting blockhash mapping in ReportWillBeTransmitted") + } + if blockhashesMapping[report.RecentBlockHeight] != report.RecentBlockHash { + return errors.Errorf("blockhash of report does not match most recent blockhash in ReportWillBeTransmitted") + } + + blocksRequested := []blockInReport{} + callbacksRequested := []callbackInReport{} + + // Get all requested blocks and callbacks. + for _, output := range report.Outputs { + // If the VRF proof size is 0, the block is not included in this output. We still + // check for callbacks in the ouptut. + if len(output.VRFProof) > 0 { + bR := blockInReport{ + block: block{ + blockNumber: output.BlockHeight, + confDelay: output.ConfirmationDelay, + }, + recentBlockHeight: report.RecentBlockHeight, + recentBlockHash: report.RecentBlockHash, + } + // Store block in blocksRequested.br + blocksRequested = append(blocksRequested, bR) + } + + // Iterate through callbacks for output. + for _, cb := range output.Callbacks { + cbR := callbackInReport{ + callback: callback{ + blockNumber: cb.BeaconHeight, + requestID: cb.RequestID, + }, + recentBlockHeight: report.RecentBlockHeight, + recentBlockHash: report.RecentBlockHash, + } + + // Add callback to callbacksRequested. + callbacksRequested = append(callbacksRequested, cbR) + } + } + + // Apply blockhashes to blocks and mark them as transmitted. + for _, b := range blocksRequested { + cacheKey := getBlockCacheKey(b.blockNumber, uint64(b.confDelay)) + c.toBeTransmittedBlocks.CacheItem(b, cacheKey, now) + c.lggr.Debugw("Block is being transmitted", "blockNumber", b.blockNumber, "confDelay", b.confDelay) + } + + // Add the corresponding blockhashes to callbacks and mark them as transmitted. + for _, cb := range callbacksRequested { + cacheKey := getCallbackCacheKey(cb.requestID) + c.toBeTransmittedCallbacks.CacheItem(cb, cacheKey, now) + c.lggr.Debugw("Request is being transmitted", "requestID", cb.requestID) + } + + c.emitReportWillBeTransmittedMetrics(len(blocksRequested), len(callbacksRequested)) + + return nil +} + +// DKGVRFCommittees returns the addresses of the signers and transmitters +// for the DKG and VRF OCR committees. On ethereum, these can be retrieved +// from the most recent ConfigSet events for each contract. +func (c *coordinator) DKGVRFCommittees(ctx context.Context) (dkgCommittee, vrfCommittee ocr2vrftypes.OCRCommittee, err error) { + startTime := time.Now().UTC() + defer c.logAndEmitFunctionDuration("DKGVRFCommittees", startTime) + + latestVRF, err := c.lp.LatestLogByEventSigWithConfs( + c.configSetTopic, + c.beaconAddress, + logpoller.Confirmations(c.finalityDepth), + pg.WithParentCtx(ctx), + ) + if err != nil { + err = errors.Wrap(err, "latest vrf ConfigSet by sig with confs") + return + } + + latestDKG, err := c.lp.LatestLogByEventSigWithConfs( + c.configSetTopic, + c.dkgAddress, + logpoller.Confirmations(c.finalityDepth), + pg.WithParentCtx(ctx), + ) + if err != nil { + err = errors.Wrap(err, "latest dkg ConfigSet by sig with confs") + return + } + + var vrfConfigSetLog vrf_beacon.VRFBeaconConfigSet + err = vrfBeaconABI.UnpackIntoInterface(&vrfConfigSetLog, configSetEvent, latestVRF.Data) + if err != nil { + err = errors.Wrap(err, "unpack vrf ConfigSet into interface") + return + } + + var dkgConfigSetLog dkg_wrapper.DKGConfigSet + err = dkgABI.UnpackIntoInterface(&dkgConfigSetLog, configSetEvent, latestDKG.Data) + if err != nil { + err = errors.Wrap(err, "unpack dkg ConfigSet into interface") + return + } + + // len(signers) == len(transmitters), this is guaranteed by libocr. + for i := range vrfConfigSetLog.Signers { + vrfCommittee.Signers = append(vrfCommittee.Signers, vrfConfigSetLog.Signers[i]) + vrfCommittee.Transmitters = append(vrfCommittee.Transmitters, vrfConfigSetLog.Transmitters[i]) + } + + for i := range dkgConfigSetLog.Signers { + dkgCommittee.Signers = append(dkgCommittee.Signers, dkgConfigSetLog.Signers[i]) + dkgCommittee.Transmitters = append(dkgCommittee.Transmitters, dkgConfigSetLog.Transmitters[i]) + } + + return +} + +// ProvingKeyHash returns the VRF current proving block, in view of the local +// node. On ethereum this can be retrieved from the VRF contract's attribute +// s_provingKeyHash +func (c *coordinator) ProvingKeyHash(ctx context.Context) (common.Hash, error) { + h, err := c.onchainRouter.SProvingKeyHash(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + return [32]byte{}, errors.Wrap(err, "get proving block hash") + } + + return h, nil +} + +// BeaconPeriod returns the period used in the coordinator's contract +func (c *coordinator) BeaconPeriod(ctx context.Context) (uint16, error) { + beaconPeriodBlocks, err := c.onchainRouter.IBeaconPeriodBlocks(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + return 0, errors.Wrap(err, "get beacon period blocks") + } + + return uint16(beaconPeriodBlocks.Int64()), nil +} + +// ConfirmationDelays returns the list of confirmation delays defined in the coordinator's contract +func (c *coordinator) ConfirmationDelays(ctx context.Context) ([]uint32, error) { + confDelays, err := c.onchainRouter.GetConfirmationDelays(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + return nil, errors.Wrap(err, "could not get confirmation delays") + } + var result []uint32 + for _, c := range confDelays { + result = append(result, uint32(c.Uint64())) + } + return result, nil +} + +// KeyID returns the key ID from coordinator's contract +func (c *coordinator) KeyID(ctx context.Context) (dkg.KeyID, error) { + keyID, err := c.onchainRouter.SKeyID(&bind.CallOpts{Context: ctx}) + if err != nil { + return dkg.KeyID{}, errors.Wrap(err, "could not get key ID") + } + return keyID, nil +} + +// isBlockEligible returns true if and only if the nextBeaconOutputHeight plus +// the confDelay is less than the current blockchain height, meaning that the beacon +// output height has enough confirmations. +// +// NextBeaconOutputHeight is always greater than the request block, therefore +// a number of confirmations on the beacon block is always enough confirmations +// for the request block. +func isBlockEligible(nextBeaconOutputHeight uint64, confDelay *big.Int, currentHeight uint64) bool { + cond := confDelay.Uint64() < currentHeight // Edge case: for simulated chains with low block numbers + cond = cond && (nextBeaconOutputHeight+confDelay.Uint64()) < currentHeight + return cond +} + +// toEpochAndRoundUint40 returns a single unsigned 40 bit big.Int object +// that has the epoch in the first 32 bytes and the round in the last 8 bytes, +// in a big-endian fashion. +func toEpochAndRoundUint40(epoch uint32, round uint8) *big.Int { + return big.NewInt((int64(epoch) << 8) + int64(round)) +} + +func toGethLog(lg logpoller.Log) types.Log { + var topics []common.Hash + for _, b := range lg.Topics { + topics = append(topics, common.BytesToHash(b)) + } + return types.Log{ + Data: lg.Data, + Address: lg.Address, + BlockHash: lg.BlockHash, + BlockNumber: uint64(lg.BlockNumber), + Topics: topics, + TxHash: lg.TxHash, + Index: uint(lg.LogIndex), + } +} + +// getBlockCacheKey returns a cache key for a requested block +// The blockhash of the block does not need to be included in the key. Instead, +// the block cached at a given key contains a blockhash that is checked for validity +// against the log poller's current state. +func getBlockCacheKey(blockNumber uint64, confDelay uint64) common.Hash { + var blockNumberBytes [8]byte + var confDelayBytes [8]byte + + binary.BigEndian.PutUint64(blockNumberBytes[:], blockNumber) + binary.BigEndian.PutUint64(confDelayBytes[:], confDelay) + + return common.BytesToHash(bytes.Join([][]byte{blockNumberBytes[:], confDelayBytes[:]}, nil)) +} + +// getBlockCacheKey returns a cache key for a requested callback +// The blockhash of the callback does not need to be included in the key. Instead, +// the callback cached at a given key contains a blockhash that is checked for validity +// against the log poller's current state. +func getCallbackCacheKey(requestID *big.Int) common.Hash { + return common.BigToHash(requestID) +} + +// logAndEmitFunctionDuration logs the time in milliseconds and emits metrics in nanosecond for function duration +func (c *coordinator) logAndEmitFunctionDuration(funcName string, startTime time.Time) { + elapsed := time.Now().UTC().Sub(startTime) + c.lggr.Debugf("%s took %d milliseconds to complete", funcName, elapsed.Milliseconds()) + promMethodDuration.WithLabelValues( + append(c.labelValues(), funcName)..., + ).Observe(float64(elapsed.Nanoseconds())) +} + +func (c *coordinator) UpdateConfiguration( + b []byte, + configDigest ocr2Types.ConfigDigest, + oracleID commontypes.OracleID, +) error { + // Update config digest & oracle ID for epoch. + c.configDigest = configDigest + c.oracleID = oracleID + + // Unmarshal off-chain config. + err := proto.Unmarshal(b, c.coordinatorConfig) + if err != nil { + return errors.Wrap(err, "error setting offchain config on coordinator") + } + + // Update local caches with new eviction window. + cacheEvictionWindowSeconds := c.coordinatorConfig.CacheEvictionWindowSeconds + cacheEvictionWindow := time.Duration(cacheEvictionWindowSeconds * int64(time.Second)) + c.toBeTransmittedBlocks.SetEvictonWindow(cacheEvictionWindow) + c.toBeTransmittedCallbacks.SetEvictonWindow(cacheEvictionWindow) + + c.blockhashLookback = mathutil.Min(256, c.coordinatorConfig.LookbackBlocks) + c.lggr.Infow("set offchain config", + offchainConfigFields(c.coordinatorConfig)..., + ) + + return nil +} + +func offchainConfigFields(coordinatorConfig *ocr2vrftypes.CoordinatorConfig) []any { + return []any{ + "cacheEvictionWindowSeconds", coordinatorConfig.CacheEvictionWindowSeconds, + "batchGasLimit", coordinatorConfig.BatchGasLimit, + "coordinatorOverhead", coordinatorConfig.CoordinatorOverhead, + "lookbackBlocks", coordinatorConfig.LookbackBlocks, + "blockGasOverhead", coordinatorConfig.BlockGasOverhead, + "callbackOverhead", coordinatorConfig.CallbackOverhead, + } +} + +func (c *coordinator) labelValues() []string { + chainId := c.evmClient.ConfiguredChainID() + return []string{chainId.String(), fmt.Sprintf("%d", c.oracleID), common.Bytes2Hex(c.configDigest[:])} +} + +func (c *coordinator) emitReportBlocksMetrics( + numBlocks int, + numCallbacks int) { + promBlocksToReport.WithLabelValues(c.labelValues()...).Observe(float64(numBlocks)) + promCallbacksToReport.WithLabelValues(c.labelValues()...).Observe(float64(numCallbacks)) +} + +func (c *coordinator) emitReportWillBeTransmittedMetrics( + numBlocks int, + numCallbacks int) { + promBlocksInReport.WithLabelValues(c.labelValues()...).Observe(float64(numBlocks)) + promCallbacksInReport.WithLabelValues(c.labelValues()...).Observe(float64(numCallbacks)) +} + +func filterName(beaconAddress, coordinatorAddress, dkgAddress common.Address) string { + return logpoller.FilterName("VRF Coordinator", beaconAddress, coordinatorAddress, dkgAddress) +} + +func FilterNamesFromSpec(spec *job.OCR2OracleSpec) (names []string, err error) { + var cfg ocr2vrfconfig.PluginConfig + var beaconAddress, coordinatorAddress, dkgAddress ethkey.EIP55Address + + if err = json.Unmarshal(spec.PluginConfig.Bytes(), &cfg); err != nil { + err = errors.Wrap(err, "failed to unmarshal ocr2vrf plugin config") + return nil, err + } + + if beaconAddress, err = ethkey.NewEIP55Address(spec.ContractID); err == nil { + if coordinatorAddress, err = ethkey.NewEIP55Address(cfg.VRFCoordinatorAddress); err == nil { + if dkgAddress, err = ethkey.NewEIP55Address(cfg.DKGContractAddress); err == nil { + return []string{filterName(beaconAddress.Address(), coordinatorAddress.Address(), dkgAddress.Address())}, nil + } + } + } + + return nil, err +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go new file mode 100644 index 00000000..401805c8 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/coordinator_test.go @@ -0,0 +1,1787 @@ +package coordinator + +import ( + "bytes" + "crypto/rand" + "fmt" + "math/big" + "sort" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/commontypes" + ocr2Types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-vrf/dkg" + "github.com/goplugin/plugin-vrf/ocr2vrf" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + lp_mocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + dkg_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks" +) + +func TestCoordinator_BeaconPeriod(t *testing.T) { + t.Parallel() + + t.Run("valid output", func(t *testing.T) { + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("IBeaconPeriodBlocks", mock.Anything). + Return(big.NewInt(10), nil) + c := &coordinator{ + onchainRouter: onchainRouter, + } + period, err := c.BeaconPeriod(testutils.Context(t)) + assert.NoError(t, err) + assert.Equal(t, uint16(10), period) + }) + + t.Run("invalid output", func(t *testing.T) { + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("IBeaconPeriodBlocks", mock.Anything). + Return(nil, errors.New("rpc error")) + c := &coordinator{ + onchainRouter: onchainRouter, + } + _, err := c.BeaconPeriod(testutils.Context(t)) + assert.Error(t, err) + }) +} + +func TestCoordinator_DKGVRFCommittees(t *testing.T) { + t.Parallel() + evmClient := evmclimocks.NewClient(t) + evmClient.On("ConfiguredChainID").Return(big.NewInt(1)) + + t.Run("happy path", func(t *testing.T) { + // In this test the DKG and VRF committees have the same signers and + // transmitters. This may (?) be different in practice. + + lp := lp_mocks.NewLogPoller(t) + tp := newTopics() + + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + dkgAddress := newAddress(t) + lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + Return(&logpoller.Log{ + Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), + }, nil) + lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, dkgAddress, logpoller.Confirmations(10), mock.Anything). + Return(&logpoller.Log{ + Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), + }, nil) + + expectedDKGVRF := ocr2vrftypes.OCRCommittee{ + Signers: []common.Address{ + common.HexToAddress("0x0A8cbEA12a06869d3EC432aB9682DAb6C761D591"), + common.HexToAddress("0xF4f9db7BB1d16b7CDfb18Ec68994c26964F59853"), + common.HexToAddress("0x22fB3F90C539457f00d8484438869135E604a655"), + common.HexToAddress("0x33CbCedccb11c9773AD78e214Ba342E979255ab3"), + common.HexToAddress("0x6ffaA96256fbC1012325cca88C79F725c33eED80"), + }, + Transmitters: []common.Address{ + common.HexToAddress("0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35"), + common.HexToAddress("0x38A6Cb196f805cC3041F6645a5A6CEC27B64430D"), + common.HexToAddress("0x47d7095CFEBF8285BdAa421Bc8268D0DB87D933C"), + common.HexToAddress("0xa8842BE973800fF61D80d2d53fa62C3a685380eB"), + common.HexToAddress("0x3750e31321aEE8c024751877070E8d5F704cE987"), + }, + } + + c := &coordinator{ + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + dkgAddress: dkgAddress, + finalityDepth: 10, + evmClient: evmClient, + } + actualDKG, actualVRF, err := c.DKGVRFCommittees(testutils.Context(t)) + assert.NoError(t, err) + assert.ElementsMatch(t, expectedDKGVRF.Signers, actualDKG.Signers) + assert.ElementsMatch(t, expectedDKGVRF.Transmitters, actualDKG.Transmitters) + assert.ElementsMatch(t, expectedDKGVRF.Signers, actualVRF.Signers) + assert.ElementsMatch(t, expectedDKGVRF.Transmitters, actualVRF.Transmitters) + }) + + t.Run("vrf log poll fails", func(t *testing.T) { + lp := lp_mocks.NewLogPoller(t) + tp := newTopics() + + beaconAddress := newAddress(t) + lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + Return(nil, errors.New("rpc error")) + + c := &coordinator{ + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + beaconAddress: beaconAddress, + finalityDepth: 10, + evmClient: evmClient, + } + + _, _, err := c.DKGVRFCommittees(testutils.Context(t)) + assert.Error(t, err) + }) + + t.Run("dkg log poll fails", func(t *testing.T) { + lp := lp_mocks.NewLogPoller(t) + tp := newTopics() + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + dkgAddress := newAddress(t) + lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, beaconAddress, logpoller.Confirmations(10), mock.Anything). + Return(&logpoller.Log{ + Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000a6fca200010576e704b4a519484d6239ef17f1f5b4a82e330b0daf827ed4dc2789971b0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000002a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000050000000000000000000000000a8cbea12a06869d3ec432ab9682dab6c761d591000000000000000000000000f4f9db7bb1d16b7cdfb18ec68994c26964f5985300000000000000000000000022fb3f90c539457f00d8484438869135e604a65500000000000000000000000033cbcedccb11c9773ad78e214ba342e979255ab30000000000000000000000006ffaa96256fbc1012325cca88c79f725c33eed80000000000000000000000000000000000000000000000000000000000000000500000000000000000000000074103cf8b436465870b26aa9fa2f62ad62b22e3500000000000000000000000038a6cb196f805cc3041f6645a5a6cec27b64430d00000000000000000000000047d7095cfebf8285bdaa421bc8268d0db87d933c000000000000000000000000a8842be973800ff61d80d2d53fa62c3a685380eb0000000000000000000000003750e31321aee8c024751877070e8d5f704ce98700000000000000000000000000000000000000000000000000000000000000206f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf000000000000000000000000000000000000000000000000000000000000004220880d88ee16f1080c8afa0251880c8afa025208090dfc04a288090dfc04a30033a05010101010142206c5ca6f74b532222ac927dd3de235d46a943e372c0563393a33b01dcfd3f371c4220855114d25c2ef5e85fffe4f20a365672d8f2dba3b2ec82333f494168a2039c0442200266e835634db00977cbc1caa4db10e1676c1a4c0fcbc6ba7f09300f0d1831824220980cd91f7a73f20f4b0d51d00cd4e00373dc2beafbb299ca3c609757ab98c8304220eb6d36e2af8922085ff510bbe1eb8932a0e3295ca9f047fef25d90e69c52948f4a34313244334b6f6f574463364b7232644542684b59326b336e685057694676544565325331703978544532544b74344d7572716f684a34313244334b6f6f574b436e4367724b637743324a3577576a626e355435335068646b6b6f57454e534a39546537544b7836366f4a4a34313244334b6f6f575239616f675948786b357a38636b624c4c56346e426f7a777a747871664a7050586671336d4a7232796452474a34313244334b6f6f5744695444635565675637776b313133473366476a69616259756f54436f3157726f6f53656741343263556f544a34313244334b6f6f574e64687072586b5472665370354d5071736270467a70364167394a53787358694341434442676454424c656652820300050e416c74424e2d3132382047e282810e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c216a812d7616e47f0bd38fa4863f48fbcda6a38af4c58d2233dfa7cf79620947042d09f923e0a2f7a2270391e8b058d8bdb8f79fe082b7b627f025651c7290382fdff97c3181d15d162c146ce87ff752499d2acc2b26011439a12e29571a6f1e1defb1751c3be4258c493984fd9f0f6b4a26c539870b5f15bfed3d8ffac92499eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b127a112970e1adf615f823b2b2180754c2f0ee01f1b389e56df55ca09702cd0401b66ff71779d2dd67222503a85ab921b28c329cc1832800b192d0b0247c0776e1b9653dc00df48daa6364287c84c0382f5165e7269fef06d10bc67c1bba252305d1af0dc7bb0fe92558eb4c5f38c23163dee1cfb34a72020669dbdfe337c16f3307472616e736c61746f722066726f6d20416c74424e2d3132382047e2828120746f20416c74424e2d3132382047e282825880ade2046080c8afa0256880c8afa0257080ade204788094ebdc0382019e010a205034214e0bd4373f38e162cf9fc9133e2f3b71441faa4c3d1ac01c1877f1cd2712200e03e975b996f911abba2b79d2596c2150bc94510963c40a1137a03df6edacdb1a107dee1cdb894163813bb3da604c9c133c1a10bb33302eeafbd55d352e35dcc5d2b3311a10d2c658b6b93d74a02d467849b6fe75251a10fea5308cc1fea69e7246eafe7ca8a3a51a1048efe1ad873b6f025ac0243bdef715f8000000000000000000000000000000000000000000000000000000000000"), + }, nil) + lp.On("LatestLogByEventSigWithConfs", tp.configSetTopic, dkgAddress, logpoller.Confirmations(10), mock.Anything). + Return(nil, errors.New("rpc error")) + + c := &coordinator{ + lp: lp, + topics: tp, + lggr: logger.TestLogger(t), + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + dkgAddress: dkgAddress, + finalityDepth: 10, + evmClient: evmClient, + } + _, _, err := c.DKGVRFCommittees(testutils.Context(t)) + assert.Error(t, err) + }) +} + +func TestCoordinator_ProvingKeyHash(t *testing.T) { + t.Parallel() + + t.Run("valid output", func(t *testing.T) { + h := crypto.Keccak256Hash([]byte("hello world")) + var expected [32]byte + copy(expected[:], h.Bytes()) + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("SProvingKeyHash", mock.Anything). + Return(expected, nil) + c := &coordinator{ + onchainRouter: onchainRouter, + } + provingKeyHash, err := c.ProvingKeyHash(testutils.Context(t)) + assert.NoError(t, err) + assert.ElementsMatch(t, expected[:], provingKeyHash[:]) + }) + + t.Run("invalid output", func(t *testing.T) { + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("SProvingKeyHash", mock.Anything). + Return([32]byte{}, errors.New("rpc error")) + c := &coordinator{ + onchainRouter: onchainRouter, + } + _, err := c.ProvingKeyHash(testutils.Context(t)) + assert.Error(t, err) + }) +} + +func TestCoordinator_ReportBlocks(t *testing.T) { + lggr := logger.TestLogger(t) + proofG1X := big.NewInt(1) + proofG1Y := big.NewInt(2) + evmClient := evmclimocks.NewClient(t) + evmClient.On("ConfiguredChainID").Return(big.NewInt(1)) + t.Run("happy path, beacon requests", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessRequestedLog(t, 3, 195, 192, 1, coordinatorAddress), + newRandomnessRequestedLog(t, 3, 195, 193, 2, coordinatorAddress), + }, nil).Once() + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 1) + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, callback requests", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 1000, coordinatorAddress), + }, nil).Once() + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 1) + for _, b := range blocks { + assert.False(t, b.ShouldStore) + } + assert.Len(t, callbacks, 3) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, beacon requests, beacon fulfillments", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessRequestedLog(t, 3, 195, 192, 1, coordinatorAddress), + newRandomnessRequestedLog(t, 3, 195, 193, 2, coordinatorAddress), + newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ + { + Height: 195, + ConfirmationDelay: big.NewInt(3), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + }, coordinatorAddress), + }, nil).Once() + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 0) + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, callback requests, callback fulfillments", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + lp := getLogPoller(t, []uint64{195}, latestHeadNumber, true, true, lookbackBlocks) + // Both RandomWordsFulfilled and NewTransmission events are emitted + // when a VRF fulfillment happens on chain. + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 1000, coordinatorAddress), + // Regardless of success or failure, if the fulfillment has been tried once do not report again. + newRandomWordsFulfilledLog(t, []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, []byte{1, 0, 0}, coordinatorAddress), + newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ + { + Height: 195, + ConfirmationDelay: big.NewInt(3), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + }, coordinatorAddress), + }, nil).Once() + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 0) + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, only beacon fulfillment", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + lp := getLogPoller(t, []uint64{}, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ + { + Height: 195, + ConfirmationDelay: big.NewInt(3), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + }, coordinatorAddress)}, nil).Once() + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 0) + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, callback requests & callback fulfillments in-flight", func(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + + latestHeadNumber := uint64(200) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + tp := newTopics() + + lookbackBlocks := uint64(5) + // Do not include latestHeadNumber in "GetBlocksRange" call for initial "ReportWillBeTransmitted." + // Do not include recent blockhashes in range either. + lp := getLogPoller(t, []uint64{195}, latestHeadNumber, false, false /* includeLatestHeadInRange */, 0) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + report := ocr2vrftypes.AbstractReport{ + RecentBlockHeight: 195, + RecentBlockHash: common.HexToHash("0x001"), + Outputs: []ocr2vrftypes.AbstractVRFOutput{ + { + BlockHeight: 195, + ConfirmationDelay: 195, + Callbacks: []ocr2vrftypes.AbstractCostedCallbackRequest{ + { + RequestID: big.NewInt(1), + BeaconHeight: 195, + }, + { + RequestID: big.NewInt(2), + BeaconHeight: 195, + }, + { + RequestID: big.NewInt(3), + BeaconHeight: 195, + }, + }, + }, + }, + } + + err = c.ReportWillBeTransmitted(testutils.Context(t), report) + require.NoError(t, err) + + // Include latestHeadNumber in "GetBlocksRange" call for "ReportBlocks" call. + // Include recent blockhashes in range. + lp = getLogPoller(t, []uint64{195}, latestHeadNumber, true, true /* includeLatestHeadInRange */, lookbackBlocks) + c.lp = lp + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 1000, coordinatorAddress), + newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ + { + Height: 195, + ConfirmationDelay: big.NewInt(3), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + }, coordinatorAddress), + }, nil).Once() + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + assert.NoError(t, err) + assert.Len(t, blocks, 0) + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, blocks requested hits batch gas limit", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(400) + lookbackBlocks := uint64(400) + blockhashLookback := uint64(256) + + tp := newTopics() + + logs := []logpoller.Log{} + requestedBlocks := []uint64{} + + // Populate 200 request blocks. + for i := 0; i < 400; i += 2 { + logs = append(logs, newRandomnessRequestedLog(t, 1, uint64(i), 0, int64(i), coordinatorAddress)) + requestedBlocks = append(requestedBlocks, uint64(i)) + } + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, blockhashLookback) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return(logs, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: blockhashLookback, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{1: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + // Coordinator should allow 99 blocks, i.e 100 blocks - 1 block's worth of gas + // for the coordinator overhead. + assert.NoError(t, err) + assert.Len(t, blocks, 99) + for _, b := range blocks { + assert.True(t, b.ShouldStore) + } + assert.Len(t, callbacks, 0) + assert.Equal(t, latestHeadNumber-blockhashLookback+1, recentHeightStart) + assert.Len(t, recentBlocks, int(blockhashLookback)) + }) + + t.Run("happy path, last callback hits batch gas limit", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(200) + lookbackBlocks := uint64(5) + + tp := newTopics() + + requestedBlocks := []uint64{195} + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 2_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 2_900_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 1, coordinatorAddress), + }, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + // Should allow the first two callbacks, which add up to 4_950_000 + 50_000 (1 block) = 5_000_000, + // then reject the last callback for being out of gas. + assert.NoError(t, err) + assert.Len(t, blocks, 1) + for _, b := range blocks { + assert.True(t, b.ShouldStore) + } + assert.Len(t, callbacks, 2) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, sandwiched callbacks hit batch gas limit", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(200) + lookbackBlocks := uint64(5) + + tp := newTopics() + + requestedBlocks := []uint64{195} + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 10_000_000, coordinatorAddress), + }, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + // Should allow the middle callback, with an acceptable gas allowance, to be processed. + assert.NoError(t, err) + assert.Len(t, blocks, 1) + assert.Len(t, callbacks, 1) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("happy path, sandwiched callbacks with valid callback in next block hit batch gas limit", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(200) + lookbackBlocks := uint64(5) + + tp := newTopics() + + requestedBlocks := []uint64{195, 196} + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 10_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 196, 194, 4, 1000, coordinatorAddress), + }, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + // Should allow the middle callback, with an acceptable gas allowance, to be processed, + // then move to the next block and find a suitable callback. Also adds the block 196 for + // that callback. + assert.NoError(t, err) + assert.Len(t, blocks, 2) + assert.Len(t, callbacks, 2) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("correct blockhashes are retrieved with the maximum lookback", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(1000) + lookbackBlocks := uint64(256) + + tp := newTopics() + + requestedBlocks := []uint64{} + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{}, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + _, _, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + assert.NoError(t, err) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Equal(t, common.HexToHash(fmt.Sprintf("0x00%d", 1)), recentBlocks[0]) + assert.Equal(t, common.HexToHash(fmt.Sprintf("0x00%d", lookbackBlocks)), recentBlocks[len(recentBlocks)-1]) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("correct blockhashes are retrieved with a capped lookback (close to genesis block)", func(t *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(100) + lookbackBlocks := uint64(100) + + tp := newTopics() + + requestedBlocks := []uint64{} + lp := getLogPoller(t, requestedBlocks, latestHeadNumber, true, true, lookbackBlocks) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{}, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + _, _, recentHeightStart, recentBlocks, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + assert.NoError(t, err) + assert.Equal(t, latestHeadNumber-lookbackBlocks+1, recentHeightStart) + assert.Equal(t, common.HexToHash(fmt.Sprintf("0x00%d", 1)), recentBlocks[0]) + assert.Equal(t, common.HexToHash(fmt.Sprintf("0x00%d", lookbackBlocks)), recentBlocks[len(recentBlocks)-1]) + assert.Len(t, recentBlocks, int(lookbackBlocks)) + }) + + t.Run("logpoller GetBlocks returns error", func(tt *testing.T) { + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + latestHeadNumber := uint64(200) + lookbackBlocks := uint64(5) + + tp := newTopics() + + requestedBlocks := []uint64{195, 196} + lp := lp_mocks.NewLogPoller(t) + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: int64(latestHeadNumber)}, nil) + + lp.On("GetBlocksRange", mock.Anything, append(requestedBlocks, latestHeadNumber-lookbackBlocks+1, latestHeadNumber), mock.Anything). + Return(nil, errors.New("GetBlocks error")) + lp.On( + "LogsWithSigs", + int64(latestHeadNumber-lookbackBlocks), + int64(latestHeadNumber), + []common.Hash{ + tp.randomnessRequestedTopic, + tp.randomnessFulfillmentRequestedTopic, + tp.randomWordsFulfilledTopic, + tp.outputsServedTopic, + }, + coordinatorAddress, + mock.Anything, + ).Return([]logpoller.Log{ + newRandomnessRequestedLog(t, 3, 195, 191, 0, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 191, 1, 10_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 192, 2, 1000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 195, 193, 3, 10_000_000, coordinatorAddress), + newRandomnessFulfillmentRequestedLog(t, 3, 196, 194, 4, 1000, coordinatorAddress), + }, nil) + + c := &coordinator{ + onchainRouter: onchainRouter, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + lp: lp, + lggr: logger.TestLogger(t), + topics: tp, + evmClient: evmClient, + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + blockhashLookback: lookbackBlocks, + } + + blocks, callbacks, _, _, err := c.ReportBlocks( + testutils.Context(t), + 0, // slotInterval: unused + map[uint32]struct{}{3: {}}, + time.Duration(0), + 100, // maxBlocks: unused + 100, // maxCallbacks: unused + ) + + assert.Error(tt, err) + assert.EqualError(tt, errors.Cause(err), "GetBlocks error") + assert.Nil(tt, blocks) + assert.Nil(tt, callbacks) + }) +} + +func TestCoordinator_ReportWillBeTransmitted(t *testing.T) { + evmClient := evmclimocks.NewClient(t) + evmClient.On("ConfiguredChainID").Return(big.NewInt(1)) + t.Run("happy path", func(t *testing.T) { + lookbackBlocks := uint64(0) + lp := getLogPoller(t, []uint64{199}, 200, false, false, 0) + c := &coordinator{ + lp: lp, + lggr: logger.TestLogger(t), + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + evmClient: evmClient, + } + assert.NoError(t, c.ReportWillBeTransmitted(testutils.Context(t), ocr2vrftypes.AbstractReport{ + RecentBlockHeight: 199, + RecentBlockHash: common.HexToHash("0x001"), + })) + }) + + t.Run("re-org", func(t *testing.T) { + lookbackBlocks := uint64(0) + lp := getLogPoller(t, []uint64{199}, 200, false, false, 0) + c := &coordinator{ + lp: lp, + lggr: logger.TestLogger(t), + toBeTransmittedBlocks: NewBlockCache[blockInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + toBeTransmittedCallbacks: NewBlockCache[callbackInReport](time.Duration(int64(lookbackBlocks) * int64(time.Second))), + coordinatorConfig: newCoordinatorConfig(lookbackBlocks), + evmClient: evmClient, + } + assert.Error(t, c.ReportWillBeTransmitted(testutils.Context(t), ocr2vrftypes.AbstractReport{ + RecentBlockHeight: 199, + RecentBlockHash: common.HexToHash("0x009"), + })) + }) +} + +func TestCoordinator_MarshalUnmarshal(t *testing.T) { + t.Parallel() + proofG1X := big.NewInt(1) + proofG1Y := big.NewInt(2) + lggr := logger.TestLogger(t) + evmClient := evmclimocks.NewClient(t) + + coordinatorAddress := newAddress(t) + beaconAddress := newAddress(t) + vrfBeaconCoordinator, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + require.NoError(t, err) + + lg := newRandomnessRequestedLog(t, 3, 1500, 1450, 1, coordinatorAddress) + rrIface, err := vrfBeaconCoordinator.ParseLog(toGethLog(lg)) + require.NoError(t, err) + rr, ok := rrIface.(*vrf_coordinator.VRFCoordinatorRandomnessRequested) + require.True(t, ok) + assert.Equal(t, uint64(1500), rr.NextBeaconOutputHeight) + assert.Equal(t, int64(3), rr.ConfDelay.Int64()) + + lg = newRandomnessFulfillmentRequestedLog(t, 3, 1500, 1450, 1, 1000, coordinatorAddress) + rfrIface, err := vrfBeaconCoordinator.ParseLog(toGethLog(lg)) + require.NoError(t, err) + rfr, ok := rfrIface.(*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested) + require.True(t, ok) + assert.Equal(t, uint64(1500), rfr.NextBeaconOutputHeight) + assert.Equal(t, int64(3), rfr.ConfDelay.Int64()) + assert.Equal(t, int64(1), rfr.RequestID.Int64()) + + configDigest := common.BigToHash(big.NewInt(10)) + lg = newNewTransmissionLog(t, beaconAddress, configDigest) + ntIface, err := vrfBeaconCoordinator.ParseLog(toGethLog(lg)) + require.NoError(t, err) + nt, ok := ntIface.(*vrf_beacon.VRFBeaconNewTransmission) + require.True(t, ok) + assert.True(t, bytes.Equal(nt.ConfigDigest[:], configDigest[:])) + assert.Equal(t, 0, nt.JuelsPerFeeCoin.Cmp(big.NewInt(1_000))) + assert.Equal(t, 0, nt.EpochAndRound.Cmp(big.NewInt(1))) + + lg = newRandomWordsFulfilledLog(t, []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, []byte{1, 1, 1}, coordinatorAddress) + rwfIface, err := vrfBeaconCoordinator.ParseLog(toGethLog(lg)) + require.NoError(t, err) + rwf, ok := rwfIface.(*vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) + require.True(t, ok) + assert.Equal(t, []int64{1, 2, 3}, []int64{rwf.RequestIDs[0].Int64(), rwf.RequestIDs[1].Int64(), rwf.RequestIDs[2].Int64()}) + assert.Equal(t, []byte{1, 1, 1}, rwf.SuccessfulFulfillment) + + lg = newOutputsServedLog(t, []vrf_coordinator.VRFBeaconTypesOutputServed{ + { + Height: 1500, + ConfirmationDelay: big.NewInt(3), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + { + Height: 1505, + ConfirmationDelay: big.NewInt(4), + ProofG1X: proofG1X, + ProofG1Y: proofG1Y, + }, + }, coordinatorAddress) + + osIface, err := vrfBeaconCoordinator.ParseLog(toGethLog(lg)) + require.NoError(t, err) + os, ok := osIface.(*vrf_coordinator.VRFCoordinatorOutputsServed) + require.True(t, ok) + assert.Equal(t, uint64(1500), os.OutputsServed[0].Height) + assert.Equal(t, uint64(1505), os.OutputsServed[1].Height) + assert.Equal(t, int64(3), os.OutputsServed[0].ConfirmationDelay.Int64()) + assert.Equal(t, int64(4), os.OutputsServed[1].ConfirmationDelay.Int64()) +} + +func TestCoordinator_ReportIsOnchain(t *testing.T) { + evmClient := evmclimocks.NewClient(t) + evmClient.On("ConfiguredChainID").Return(big.NewInt(1)) + + t.Run("report is on-chain", func(t *testing.T) { + tp := newTopics() + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + lggr := logger.TestLogger(t) + + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + assert.NoError(t, err) + + epoch := uint32(20) + round := uint8(3) + epochAndRound := toEpochAndRoundUint40(epoch, round) + enrTopic := common.BytesToHash(common.LeftPadBytes(epochAndRound.Bytes(), 32)) + lp := lp_mocks.NewLogPoller(t) + configDigest := common.BigToHash(big.NewInt(1337)) + log := newNewTransmissionLog(t, beaconAddress, configDigest) + log.BlockNumber = 195 + lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + enrTopic, + }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{log}, nil) + + c := &coordinator{ + lp: lp, + onchainRouter: onchainRouter, + lggr: logger.TestLogger(t), + beaconAddress: beaconAddress, + topics: tp, + evmClient: evmClient, + } + + present, err := c.ReportIsOnchain(testutils.Context(t), epoch, round, configDigest) + assert.NoError(t, err) + assert.True(t, present) + }) + + t.Run("report is on-chain for old config digest", func(t *testing.T) { + tp := newTopics() + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + lggr := logger.TestLogger(t) + + onchainRouter, err := newRouter(lggr, beaconAddress, coordinatorAddress, evmClient) + assert.NoError(t, err) + + epoch := uint32(20) + round := uint8(3) + epochAndRound := toEpochAndRoundUint40(epoch, round) + enrTopic := common.BytesToHash(common.LeftPadBytes(epochAndRound.Bytes(), 32)) + lp := lp_mocks.NewLogPoller(t) + oldConfigDigest := common.BigToHash(big.NewInt(1337)) + newConfigDigest := common.BigToHash(big.NewInt(8888)) + log := newNewTransmissionLog(t, beaconAddress, oldConfigDigest) + log.BlockNumber = 195 + lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + enrTopic, + }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{log}, nil) + + c := &coordinator{ + lp: lp, + onchainRouter: onchainRouter, + lggr: logger.TestLogger(t), + beaconAddress: beaconAddress, + topics: tp, + evmClient: evmClient, + } + + present, err := c.ReportIsOnchain(testutils.Context(t), epoch, round, newConfigDigest) + assert.NoError(t, err) + assert.False(t, present) + }) + + t.Run("report is not on-chain", func(t *testing.T) { + tp := newTopics() + beaconAddress := newAddress(t) + + epoch := uint32(20) + round := uint8(3) + epochAndRound := toEpochAndRoundUint40(epoch, round) + enrTopic := common.BytesToHash(common.LeftPadBytes(epochAndRound.Bytes(), 32)) + lp := lp_mocks.NewLogPoller(t) + lp.On("IndexedLogs", tp.newTransmissionTopic, beaconAddress, 2, []common.Hash{ + enrTopic, + }, logpoller.Confirmations(1), mock.Anything).Return([]logpoller.Log{}, nil) + + c := &coordinator{ + lp: lp, + lggr: logger.TestLogger(t), + beaconAddress: beaconAddress, + topics: tp, + evmClient: evmClient, + } + + configDigest := common.BigToHash(big.NewInt(0)) + present, err := c.ReportIsOnchain(testutils.Context(t), epoch, round, configDigest) + assert.NoError(t, err) + assert.False(t, present) + }) + +} + +func TestCoordinator_ConfirmationDelays(t *testing.T) { + t.Parallel() + + t.Run("valid output", func(t *testing.T) { + expected := [8]uint32{1, 2, 3, 4, 5, 6, 7, 8} + ret := [8]*big.Int{} + for i, delay := range expected { + ret[i] = big.NewInt(int64(delay)) + } + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("GetConfirmationDelays", mock.Anything). + Return(ret, nil) + c := &coordinator{ + onchainRouter: onchainRouter, + } + confDelays, err := c.ConfirmationDelays(testutils.Context(t)) + assert.NoError(t, err) + assert.Equal(t, expected[:], confDelays[:]) + }) + + t.Run("invalid output", func(t *testing.T) { + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("GetConfirmationDelays", mock.Anything). + Return([8]*big.Int{}, errors.New("rpc error")) + c := &coordinator{ + onchainRouter: onchainRouter, + } + _, err := c.ConfirmationDelays(testutils.Context(t)) + assert.Error(t, err) + }) +} + +func TestCoordinator_getBlockCacheKey(t *testing.T) { + t.Parallel() + + t.Run("calculates key correctly", func(t *testing.T) { + hash := getBlockCacheKey(1, 11) + assert.Equal( + t, + common.HexToHash("0x000000000000000000000000000000000000000000000001000000000000000b"), + hash, + ) + }) +} + +func TestCoordinator_KeyID(t *testing.T) { + t.Parallel() + + t.Run("valid output", func(t *testing.T) { + var keyIDBytes [32]byte + keyIDBytes[0] = 1 + expected := dkg.KeyID(keyIDBytes) + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("SKeyID", mock.Anything). + Return(keyIDBytes, nil) + c := &coordinator{ + onchainRouter: onchainRouter, + } + keyID, err := c.KeyID(testutils.Context(t)) + assert.NoError(t, err) + assert.Equal(t, expected[:], keyID[:]) + }) + + t.Run("invalid output", func(t *testing.T) { + var emptyBytes [32]byte + onchainRouter := mocks.NewVRFBeaconCoordinator(t) + onchainRouter. + On("SKeyID", mock.Anything). + Return(emptyBytes, errors.New("rpc error")) + c := &coordinator{ + onchainRouter: onchainRouter, + } + _, err := c.KeyID(testutils.Context(t)) + assert.Error(t, err) + }) +} + +func TestTopics_DKGConfigSet_VRFConfigSet(t *testing.T) { + dkgConfigSetTopic := dkg_wrapper.DKGConfigSet{}.Topic() + vrfConfigSetTopic := vrf_beacon.VRFBeaconConfigSet{}.Topic() + assert.Equal(t, dkgConfigSetTopic, vrfConfigSetTopic, "config set topics of vrf and dkg must be equal") +} + +func Test_UpdateConfiguration(t *testing.T) { + t.Parallel() + + t.Run("valid binary", func(t *testing.T) { + c := &coordinator{coordinatorConfig: newCoordinatorConfig(10), lggr: logger.TestLogger(t)} + cacheEvictionWindowSeconds := int64(60) + cacheEvictionWindow := time.Duration(cacheEvictionWindowSeconds * int64(time.Second)) + c.toBeTransmittedBlocks = NewBlockCache[blockInReport](cacheEvictionWindow) + c.toBeTransmittedCallbacks = NewBlockCache[callbackInReport](cacheEvictionWindow) + + newCoordinatorConfig := &ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: 30, + BatchGasLimit: 1_000_000, + CoordinatorOverhead: 10_000, + CallbackOverhead: 10_000, + BlockGasOverhead: 10_000, + LookbackBlocks: 1_000, + } + + require.Equal(t, cacheEvictionWindow, c.toBeTransmittedBlocks.evictionWindow) + require.Equal(t, cacheEvictionWindow, c.toBeTransmittedCallbacks.evictionWindow) + + expectedConfigDigest := ocr2Types.ConfigDigest(common.HexToHash("asd")) + expectedOracleID := commontypes.OracleID(3) + err := c.UpdateConfiguration(ocr2vrf.OffchainConfig(newCoordinatorConfig), expectedConfigDigest, expectedOracleID) + newCacheEvictionWindow := time.Duration(newCoordinatorConfig.CacheEvictionWindowSeconds * int64(time.Second)) + require.NoError(t, err) + require.Equal(t, newCoordinatorConfig.CacheEvictionWindowSeconds, c.coordinatorConfig.CacheEvictionWindowSeconds) + require.Equal(t, newCoordinatorConfig.BatchGasLimit, c.coordinatorConfig.BatchGasLimit) + require.Equal(t, newCoordinatorConfig.CoordinatorOverhead, c.coordinatorConfig.CoordinatorOverhead) + require.Equal(t, newCoordinatorConfig.CallbackOverhead, c.coordinatorConfig.CallbackOverhead) + require.Equal(t, newCoordinatorConfig.BlockGasOverhead, c.coordinatorConfig.BlockGasOverhead) + require.Equal(t, newCoordinatorConfig.LookbackBlocks, c.coordinatorConfig.LookbackBlocks) + require.Equal(t, newCacheEvictionWindow, c.toBeTransmittedBlocks.evictionWindow) + require.Equal(t, newCacheEvictionWindow, c.toBeTransmittedCallbacks.evictionWindow) + require.Equal(t, expectedConfigDigest, c.configDigest) + require.Equal(t, expectedOracleID, c.oracleID) + }) + + t.Run("invalid binary", func(t *testing.T) { + c := &coordinator{coordinatorConfig: newCoordinatorConfig(10), lggr: logger.TestLogger(t)} + + err := c.UpdateConfiguration([]byte{123}, ocr2Types.ConfigDigest{}, commontypes.OracleID(0)) + require.Error(t, err) + }) +} + +func newCoordinatorConfig(lookbackBlocks uint64) *ocr2vrftypes.CoordinatorConfig { + return &ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: 60, + BatchGasLimit: 5_000_000, + CoordinatorOverhead: 50_000, + CallbackOverhead: 50_000, + BlockGasOverhead: 50_000, + LookbackBlocks: lookbackBlocks, + } +} + +func newRandomnessRequestedLog( + t *testing.T, + confDelay int64, + nextBeaconOutputHeight uint64, + requestBlock uint64, + requestID int64, + coordinatorAddress common.Address, +) logpoller.Log { + //event RandomnessRequested( + // RequestID indexed requestID, + // address indexed requester, + // uint64 nextBeaconOutputHeight, + // ConfirmationDelay confDelay, + // uint64 subID, + // uint16 numWords + //); + e := vrf_coordinator.VRFCoordinatorRandomnessRequested{ + RequestID: big.NewInt(requestID), + Requester: common.HexToAddress("0x1234567890"), + ConfDelay: big.NewInt(confDelay), + NextBeaconOutputHeight: nextBeaconOutputHeight, + NumWords: 1, + SubID: big.NewInt(1), + CostJuels: big.NewInt(50_000), + NewSubBalance: big.NewInt(100_000), + Raw: gethtypes.Log{ + BlockNumber: requestBlock, + }, + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorABI.Events[randomnessRequestedEvent].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack(e.Requester, e.NextBeaconOutputHeight, e.ConfDelay, e.SubID, e.NumWords, e.CostJuels, e.NewSubBalance) + require.NoError(t, err) + + requestIDType, err := abi.NewType("uint64", "", nil) + require.NoError(t, err) + + requestIDArg := abi.Arguments{abi.Argument{ + Name: "requestID", + Type: requestIDType, + Indexed: true, + }} + + topic1, err := requestIDArg.Pack(e.RequestID.Uint64()) + require.NoError(t, err) + + topic0 := vrfCoordinatorABI.Events[randomnessRequestedEvent].ID + lg := logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + Topics: [][]byte{ + // first topic is the event signature + topic0.Bytes(), + // second topic is requestID since it's indexed + topic1, + }, + BlockNumber: int64(requestBlock), + EventSig: topic0, + } + return lg +} + +func newRandomnessFulfillmentRequestedLog( + t *testing.T, + confDelay int64, + nextBeaconOutputHeight uint64, + requestBlock uint64, + requestID int64, + gasAllowance uint32, + coordinatorAddress common.Address, +) logpoller.Log { + //event RandomnessFulfillmentRequested( + // RequestID indexed requestID, + // address indexed requester, + // uint64 nextBeaconOutputHeight, + // ConfirmationDelay confDelay, + // uint256 subID, + // uint16 numWords, + // uint32 gasAllowance, + // uint256 gasPrice, + // uint256 weiPerUnitLink, + // bytes arguments, + // uint256 costJuels + //); + e := vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested{ + ConfDelay: big.NewInt(confDelay), + NextBeaconOutputHeight: nextBeaconOutputHeight, + RequestID: big.NewInt(1), + NumWords: 1, + GasAllowance: gasAllowance, + GasPrice: big.NewInt(0), + WeiPerUnitLink: big.NewInt(0), + SubID: big.NewInt(1), + Requester: common.HexToAddress("0x1234567890"), + CostJuels: big.NewInt(50_000), + NewSubBalance: big.NewInt(100_000), + Raw: gethtypes.Log{ + BlockNumber: requestBlock, + }, + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorABI.Events[randomnessFulfillmentRequestedEvent].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack(e.Requester, e.NextBeaconOutputHeight, e.ConfDelay, e.SubID, e.NumWords, + e.GasAllowance, e.GasPrice, e.WeiPerUnitLink, e.Arguments, e.CostJuels, e.NewSubBalance) + require.NoError(t, err) + + requestIDType, err := abi.NewType("uint64", "", nil) + require.NoError(t, err) + + requestIDArg := abi.Arguments{abi.Argument{ + Name: "requestID", + Type: requestIDType, + Indexed: true, + }} + + topic0 := vrfCoordinatorABI.Events[randomnessFulfillmentRequestedEvent].ID + topic1, err := requestIDArg.Pack(e.RequestID.Uint64()) + require.NoError(t, err) + return logpoller.Log{ + Address: coordinatorAddress, + Data: nonIndexedData, + EventSig: topic0, + Topics: [][]byte{ + topic0.Bytes(), + topic1, + }, + BlockNumber: int64(requestBlock), + } +} + +func newRandomWordsFulfilledLog( + t *testing.T, + requestIDs []*big.Int, + successfulFulfillment []byte, + coordinatorAddress common.Address, +) logpoller.Log { + //event RandomWordsFulfilled( + // RequestID[] requestIDs, + // bytes successfulFulfillment, + // bytes[] truncatedErrorData + //); + e := vrf_coordinator.VRFCoordinatorRandomWordsFulfilled{ + RequestIDs: requestIDs, + SuccessfulFulfillment: successfulFulfillment, + } + packed, err := vrfCoordinatorABI.Events[randomWordsFulfilledEvent].Inputs.Pack( + e.RequestIDs, e.SuccessfulFulfillment, e.TruncatedErrorData, e.SubBalances, e.SubIDs) + require.NoError(t, err) + topic0 := vrfCoordinatorABI.Events[randomWordsFulfilledEvent].ID + return logpoller.Log{ + Address: coordinatorAddress, + Data: packed, + EventSig: topic0, + Topics: [][]byte{topic0.Bytes()}, + } +} + +func newOutputsServedLog( + t *testing.T, + outputsServed []vrf_coordinator.VRFBeaconTypesOutputServed, + beaconAddress common.Address, +) logpoller.Log { + // event OutputsServed( + // uint64 recentBlockHeight, + // address transmitter, + // uint192 juelsPerFeeCoin, + // OutputServed[] outputsServed + // ); + e := vrf_coordinator.VRFCoordinatorOutputsServed{ + RecentBlockHeight: 0, + // AggregatorRoundId: 1, + OutputsServed: outputsServed, + JuelsPerFeeCoin: big.NewInt(0), + ReasonableGasPrice: 0, + // EpochAndRound: big.NewInt(1), + // ConfigDigest: crypto.Keccak256Hash([]byte("hello world")), + } + var unindexed abi.Arguments + for _, a := range vrfCoordinatorABI.Events[outputsServedEvent].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack(e.RecentBlockHeight, e.JuelsPerFeeCoin, e.ReasonableGasPrice, e.OutputsServed) + require.NoError(t, err) + + topic0 := vrfCoordinatorABI.Events[outputsServedEvent].ID + return logpoller.Log{ + Address: beaconAddress, + Data: nonIndexedData, + Topics: [][]byte{ + topic0.Bytes(), + }, + EventSig: topic0, + } +} + +func newNewTransmissionLog( + t *testing.T, + beaconAddress common.Address, + configDigest [32]byte, +) logpoller.Log { + // event NewTransmission( + // uint32 indexed aggregatorRoundId, + // uint40 indexed epochAndRound, + // address transmitter, + // uint192 juelsPerFeeCoin, + // bytes32 configDigest + // ); + e := vrf_beacon.VRFBeaconNewTransmission{ + JuelsPerFeeCoin: big.NewInt(1_000), + ReasonableGasPrice: 1_000, + EpochAndRound: big.NewInt(1), + ConfigDigest: configDigest, + Transmitter: newAddress(t), + } + var unindexed abi.Arguments + for _, a := range vrfBeaconABI.Events[newTransmissionEvent].Inputs { + if !a.Indexed { + unindexed = append(unindexed, a) + } + } + nonIndexedData, err := unindexed.Pack( + e.Transmitter, e.JuelsPerFeeCoin, e.ReasonableGasPrice, e.ConfigDigest) + require.NoError(t, err) + + // epochAndRound is indexed + epochAndRoundType, err := abi.NewType("uint40", "", nil) + require.NoError(t, err) + indexedArgs := abi.Arguments{ + { + Name: "epochAndRound", + Type: epochAndRoundType, + }, + } + epochAndRoundPacked, err := indexedArgs.Pack(e.EpochAndRound) + require.NoError(t, err) + + topic0 := vrfBeaconABI.Events[newTransmissionEvent].ID + return logpoller.Log{ + Address: beaconAddress, + Data: nonIndexedData, + Topics: [][]byte{ + topic0.Bytes(), + epochAndRoundPacked, + }, + EventSig: topic0, + } +} + +func newAddress(t *testing.T) common.Address { + b := make([]byte, 20) + _, err := rand.Read(b) + require.NoError(t, err) + return common.HexToAddress(hexutil.Encode(b)) +} + +func getLogPoller( + t *testing.T, + requestedBlocks []uint64, + latestHeadNumber uint64, + needsLatestBlock bool, + includeLatestHeadInRange bool, + blockhashLookback uint64, +) *lp_mocks.LogPoller { + lp := lp_mocks.NewLogPoller(t) + if needsLatestBlock { + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{BlockNumber: int64(latestHeadNumber)}, nil) + } + var logPollerBlocks []logpoller.LogPollerBlock + + // If provided, ajust the blockhash range such that it starts at the most recent head. + if includeLatestHeadInRange { + requestedBlocks = append(requestedBlocks, latestHeadNumber) + } + + // If provided, adjust the blockhash range such that it includes all recent available blockhashes. + if blockhashLookback != 0 { + requestedBlocks = append(requestedBlocks, latestHeadNumber-blockhashLookback+1) + } + + // Sort the blocks to match the coordinator's calls. + sort.Slice(requestedBlocks, func(a, b int) bool { + return requestedBlocks[a] < requestedBlocks[b] + }) + + // Fill range of blocks based on requestedBlocks + // example: requestedBlocks [195, 197] -> [{BlockNumber: 195, BlockHash: 0x001}, {BlockNumber: 196, BlockHash: 0x002}, {BlockNumber: 197, BlockHash: 0x003}] + minRequestedBlock := mathutil.Min(requestedBlocks[0], requestedBlocks[1:]...) + maxRequestedBlock := mathutil.Max(requestedBlocks[0], requestedBlocks[1:]...) + for i := minRequestedBlock; i <= maxRequestedBlock; i++ { + logPollerBlocks = append(logPollerBlocks, logpoller.LogPollerBlock{ + BlockNumber: int64(i), + BlockHash: common.HexToHash(fmt.Sprintf("0x00%d", i-minRequestedBlock+1)), + }) + } + + lp.On("GetBlocksRange", mock.Anything, requestedBlocks, mock.Anything). + Return(logPollerBlocks, nil) + + return lp +} + +func TestFilterNamesFromSpec(t *testing.T) { + beaconAddress := newAddress(t) + coordinatorAddress := newAddress(t) + dkgAddress := newAddress(t) + + spec := &job.OCR2OracleSpec{ + ContractID: beaconAddress.String(), + PluginType: types.OCR2VRF, + PluginConfig: job.JSONConfig{ + "VRFCoordinatorAddress": coordinatorAddress.String(), + "DKGContractAddress": dkgAddress.String(), + }, + } + + names, err := FilterNamesFromSpec(spec) + require.NoError(t, err) + + assert.Len(t, names, 1) + assert.Equal(t, logpoller.FilterName("VRF Coordinator", beaconAddress, coordinatorAddress, dkgAddress), names[0]) + + spec = &job.OCR2OracleSpec{ + PluginType: types.OCR2VRF, + ContractID: beaconAddress.String(), + PluginConfig: nil, // missing coordinator & dkg addresses + } + _, err = FilterNamesFromSpec(spec) + require.ErrorContains(t, err, "is not a valid EIP55 formatted address") +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/interfaces.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/interfaces.go new file mode 100644 index 00000000..68713992 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/interfaces.go @@ -0,0 +1,31 @@ +package coordinator + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +//go:generate mockery --quiet --name VRFBeaconCoordinator --output ./mocks/ --case=underscore + +// VRFBeaconCoordinator is an interface that defines methods needed by the off-chain coordinator +type VRFBeaconCoordinator interface { + // SProvingKeyHash retrieves the proving key hash from the on-chain contract. + SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) + + // SKeyID retrieves the keyID from the on-chain contract. + SKeyID(opts *bind.CallOpts) ([32]byte, error) + + // IBeaconPeriodBlocks retrieves the beacon period in blocks from the on-chain contract. + IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) + + // ParseLog parses the raw log data and topics into a go object. + // The returned object must be casted to the expected type. + ParseLog(log types.Log) (generated.AbigenLog, error) + + // GetConfirmationDelays retrieves confirmation delays from the on-chain contract. + GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon.go new file mode 100644 index 00000000..cc6aeb9e --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon.go @@ -0,0 +1,2179 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + common "github.com/ethereum/go-ethereum/common" + + event "github.com/ethereum/go-ethereum/event" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + vrf_beacon "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" +) + +// VRFBeaconInterface is an autogenerated mock type for the VRFBeaconInterface type +type VRFBeaconInterface struct { + mock.Mock +} + +// AcceptOwnership provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AcceptOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AcceptPayeeship provides a mock function with given fields: opts, transmitter +func (_m *VRFBeaconInterface) AcceptPayeeship(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, transmitter) + + if len(ret) == 0 { + panic("no return value specified for AcceptPayeeship") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, transmitter) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, transmitter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, transmitter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Address provides a mock function with given fields: +func (_m *VRFBeaconInterface) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// ExposeType provides a mock function with given fields: opts, arg0 +func (_m *VRFBeaconInterface) ExposeType(opts *bind.TransactOpts, arg0 vrf_beacon.VRFBeaconReportReport) (*types.Transaction, error) { + ret := _m.Called(opts, arg0) + + if len(ret) == 0 { + panic("no return value specified for ExposeType") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_beacon.VRFBeaconReportReport) (*types.Transaction, error)); ok { + return rf(opts, arg0) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_beacon.VRFBeaconReportReport) *types.Transaction); ok { + r0 = rf(opts, arg0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, vrf_beacon.VRFBeaconReportReport) error); ok { + r1 = rf(opts, arg0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterBillingAccessControllerSet provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*vrf_beacon.VRFBeaconBillingAccessControllerSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterBillingAccessControllerSet") + } + + var r0 *vrf_beacon.VRFBeaconBillingAccessControllerSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_beacon.VRFBeaconBillingAccessControllerSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_beacon.VRFBeaconBillingAccessControllerSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconBillingAccessControllerSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterBillingSet provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) FilterBillingSet(opts *bind.FilterOpts) (*vrf_beacon.VRFBeaconBillingSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterBillingSet") + } + + var r0 *vrf_beacon.VRFBeaconBillingSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_beacon.VRFBeaconBillingSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_beacon.VRFBeaconBillingSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconBillingSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterConfigSet provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) FilterConfigSet(opts *bind.FilterOpts) (*vrf_beacon.VRFBeaconConfigSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterConfigSet") + } + + var r0 *vrf_beacon.VRFBeaconConfigSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_beacon.VRFBeaconConfigSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_beacon.VRFBeaconConfigSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconConfigSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterNewTransmission provides a mock function with given fields: opts, epochAndRound +func (_m *VRFBeaconInterface) FilterNewTransmission(opts *bind.FilterOpts, epochAndRound []*big.Int) (*vrf_beacon.VRFBeaconNewTransmissionIterator, error) { + ret := _m.Called(opts, epochAndRound) + + if len(ret) == 0 { + panic("no return value specified for FilterNewTransmission") + } + + var r0 *vrf_beacon.VRFBeaconNewTransmissionIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_beacon.VRFBeaconNewTransmissionIterator, error)); ok { + return rf(opts, epochAndRound) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_beacon.VRFBeaconNewTransmissionIterator); ok { + r0 = rf(opts, epochAndRound) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconNewTransmissionIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, epochAndRound) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOraclePaid provides a mock function with given fields: opts, transmitter, payee, linkToken +func (_m *VRFBeaconInterface) FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*vrf_beacon.VRFBeaconOraclePaidIterator, error) { + ret := _m.Called(opts, transmitter, payee, linkToken) + + if len(ret) == 0 { + panic("no return value specified for FilterOraclePaid") + } + + var r0 *vrf_beacon.VRFBeaconOraclePaidIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) (*vrf_beacon.VRFBeaconOraclePaidIterator, error)); ok { + return rf(opts, transmitter, payee, linkToken) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) *vrf_beacon.VRFBeaconOraclePaidIterator); ok { + r0 = rf(opts, transmitter, payee, linkToken) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOraclePaidIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, transmitter, payee, linkToken) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOutputsServed provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) FilterOutputsServed(opts *bind.FilterOpts) (*vrf_beacon.VRFBeaconOutputsServedIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterOutputsServed") + } + + var r0 *vrf_beacon.VRFBeaconOutputsServedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_beacon.VRFBeaconOutputsServedIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_beacon.VRFBeaconOutputsServedIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOutputsServedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferRequested provides a mock function with given fields: opts, from, to +func (_m *VRFBeaconInterface) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_beacon.VRFBeaconOwnershipTransferRequestedIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferRequested") + } + + var r0 *vrf_beacon.VRFBeaconOwnershipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_beacon.VRFBeaconOwnershipTransferRequestedIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_beacon.VRFBeaconOwnershipTransferRequestedIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOwnershipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferred provides a mock function with given fields: opts, from, to +func (_m *VRFBeaconInterface) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_beacon.VRFBeaconOwnershipTransferredIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferred") + } + + var r0 *vrf_beacon.VRFBeaconOwnershipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_beacon.VRFBeaconOwnershipTransferredIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_beacon.VRFBeaconOwnershipTransferredIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOwnershipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterPayeeshipTransferRequested provides a mock function with given fields: opts, transmitter, current, proposed +func (_m *VRFBeaconInterface) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*vrf_beacon.VRFBeaconPayeeshipTransferRequestedIterator, error) { + ret := _m.Called(opts, transmitter, current, proposed) + + if len(ret) == 0 { + panic("no return value specified for FilterPayeeshipTransferRequested") + } + + var r0 *vrf_beacon.VRFBeaconPayeeshipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) (*vrf_beacon.VRFBeaconPayeeshipTransferRequestedIterator, error)); ok { + return rf(opts, transmitter, current, proposed) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) *vrf_beacon.VRFBeaconPayeeshipTransferRequestedIterator); ok { + r0 = rf(opts, transmitter, current, proposed) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconPayeeshipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, transmitter, current, proposed) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterPayeeshipTransferred provides a mock function with given fields: opts, transmitter, previous, current +func (_m *VRFBeaconInterface) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*vrf_beacon.VRFBeaconPayeeshipTransferredIterator, error) { + ret := _m.Called(opts, transmitter, previous, current) + + if len(ret) == 0 { + panic("no return value specified for FilterPayeeshipTransferred") + } + + var r0 *vrf_beacon.VRFBeaconPayeeshipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) (*vrf_beacon.VRFBeaconPayeeshipTransferredIterator, error)); ok { + return rf(opts, transmitter, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) *vrf_beacon.VRFBeaconPayeeshipTransferredIterator); ok { + r0 = rf(opts, transmitter, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconPayeeshipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, transmitter, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomWordsFulfilled provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*vrf_beacon.VRFBeaconRandomWordsFulfilledIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomWordsFulfilled") + } + + var r0 *vrf_beacon.VRFBeaconRandomWordsFulfilledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_beacon.VRFBeaconRandomWordsFulfilledIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_beacon.VRFBeaconRandomWordsFulfilledIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomWordsFulfilledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessFulfillmentRequested provides a mock function with given fields: opts, requestID +func (_m *VRFBeaconInterface) FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*vrf_beacon.VRFBeaconRandomnessFulfillmentRequestedIterator, error) { + ret := _m.Called(opts, requestID) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessFulfillmentRequested") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessFulfillmentRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_beacon.VRFBeaconRandomnessFulfillmentRequestedIterator, error)); ok { + return rf(opts, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_beacon.VRFBeaconRandomnessFulfillmentRequestedIterator); ok { + r0 = rf(opts, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessFulfillmentRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessRedeemed provides a mock function with given fields: opts, requestID, requester +func (_m *VRFBeaconInterface) FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*vrf_beacon.VRFBeaconRandomnessRedeemedIterator, error) { + ret := _m.Called(opts, requestID, requester) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessRedeemed") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessRedeemedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) (*vrf_beacon.VRFBeaconRandomnessRedeemedIterator, error)); ok { + return rf(opts, requestID, requester) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) *vrf_beacon.VRFBeaconRandomnessRedeemedIterator); ok { + r0 = rf(opts, requestID, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessRedeemedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, requestID, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessRequested provides a mock function with given fields: opts, requestID +func (_m *VRFBeaconInterface) FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*vrf_beacon.VRFBeaconRandomnessRequestedIterator, error) { + ret := _m.Called(opts, requestID) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessRequested") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_beacon.VRFBeaconRandomnessRequestedIterator, error)); ok { + return rf(opts, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_beacon.VRFBeaconRandomnessRequestedIterator); ok { + r0 = rf(opts, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBilling provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) GetBilling(opts *bind.CallOpts) (vrf_beacon.GetBilling, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetBilling") + } + + var r0 vrf_beacon.GetBilling + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_beacon.GetBilling, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_beacon.GetBilling); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_beacon.GetBilling) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBillingAccessController provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) GetBillingAccessController(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetBillingAccessController") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ICoordinator provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) ICoordinator(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for ICoordinator") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ILink provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) ILink(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for ILink") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// KeyGenerated provides a mock function with given fields: opts, kd +func (_m *VRFBeaconInterface) KeyGenerated(opts *bind.TransactOpts, kd vrf_beacon.KeyDataStructKeyData) (*types.Transaction, error) { + ret := _m.Called(opts, kd) + + if len(ret) == 0 { + panic("no return value specified for KeyGenerated") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_beacon.KeyDataStructKeyData) (*types.Transaction, error)); ok { + return rf(opts, kd) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_beacon.KeyDataStructKeyData) *types.Transaction); ok { + r0 = rf(opts, kd) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, vrf_beacon.KeyDataStructKeyData) error); ok { + r1 = rf(opts, kd) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestConfigDetails provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) LatestConfigDetails(opts *bind.CallOpts) (vrf_beacon.LatestConfigDetails, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestConfigDetails") + } + + var r0 vrf_beacon.LatestConfigDetails + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_beacon.LatestConfigDetails, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_beacon.LatestConfigDetails); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_beacon.LatestConfigDetails) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestConfigDigestAndEpoch provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) LatestConfigDigestAndEpoch(opts *bind.CallOpts) (vrf_beacon.LatestConfigDigestAndEpoch, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestConfigDigestAndEpoch") + } + + var r0 vrf_beacon.LatestConfigDigestAndEpoch + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_beacon.LatestConfigDigestAndEpoch, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_beacon.LatestConfigDigestAndEpoch); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_beacon.LatestConfigDigestAndEpoch) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LinkAvailableForPayment provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) LinkAvailableForPayment(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LinkAvailableForPayment") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NUMCONFDELAYS provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for NUMCONFDELAYS") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewKeyRequested provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) NewKeyRequested(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for NewKeyRequested") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OwedPayment provides a mock function with given fields: opts, transmitterAddress +func (_m *VRFBeaconInterface) OwedPayment(opts *bind.CallOpts, transmitterAddress common.Address) (*big.Int, error) { + ret := _m.Called(opts, transmitterAddress) + + if len(ret) == 0 { + panic("no return value specified for OwedPayment") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) (*big.Int, error)); ok { + return rf(opts, transmitterAddress) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, common.Address) *big.Int); ok { + r0 = rf(opts, transmitterAddress) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, common.Address) error); ok { + r1 = rf(opts, transmitterAddress) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Owner provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) Owner(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Owner") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseBillingAccessControllerSet provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseBillingAccessControllerSet(log types.Log) (*vrf_beacon.VRFBeaconBillingAccessControllerSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseBillingAccessControllerSet") + } + + var r0 *vrf_beacon.VRFBeaconBillingAccessControllerSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconBillingAccessControllerSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconBillingAccessControllerSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconBillingAccessControllerSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseBillingSet provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseBillingSet(log types.Log) (*vrf_beacon.VRFBeaconBillingSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseBillingSet") + } + + var r0 *vrf_beacon.VRFBeaconBillingSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconBillingSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconBillingSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconBillingSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseConfigSet provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseConfigSet(log types.Log) (*vrf_beacon.VRFBeaconConfigSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseConfigSet") + } + + var r0 *vrf_beacon.VRFBeaconConfigSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconConfigSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconConfigSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconConfigSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseNewTransmission provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseNewTransmission(log types.Log) (*vrf_beacon.VRFBeaconNewTransmission, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseNewTransmission") + } + + var r0 *vrf_beacon.VRFBeaconNewTransmission + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconNewTransmission, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconNewTransmission); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconNewTransmission) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOraclePaid provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseOraclePaid(log types.Log) (*vrf_beacon.VRFBeaconOraclePaid, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOraclePaid") + } + + var r0 *vrf_beacon.VRFBeaconOraclePaid + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconOraclePaid, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconOraclePaid); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOraclePaid) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOutputsServed provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseOutputsServed(log types.Log) (*vrf_beacon.VRFBeaconOutputsServed, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOutputsServed") + } + + var r0 *vrf_beacon.VRFBeaconOutputsServed + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconOutputsServed, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconOutputsServed); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOutputsServed) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferRequested provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseOwnershipTransferRequested(log types.Log) (*vrf_beacon.VRFBeaconOwnershipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferRequested") + } + + var r0 *vrf_beacon.VRFBeaconOwnershipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconOwnershipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconOwnershipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOwnershipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferred provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseOwnershipTransferred(log types.Log) (*vrf_beacon.VRFBeaconOwnershipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferred") + } + + var r0 *vrf_beacon.VRFBeaconOwnershipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconOwnershipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconOwnershipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconOwnershipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParsePayeeshipTransferRequested provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParsePayeeshipTransferRequested(log types.Log) (*vrf_beacon.VRFBeaconPayeeshipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParsePayeeshipTransferRequested") + } + + var r0 *vrf_beacon.VRFBeaconPayeeshipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconPayeeshipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconPayeeshipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconPayeeshipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParsePayeeshipTransferred provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParsePayeeshipTransferred(log types.Log) (*vrf_beacon.VRFBeaconPayeeshipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParsePayeeshipTransferred") + } + + var r0 *vrf_beacon.VRFBeaconPayeeshipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconPayeeshipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconPayeeshipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconPayeeshipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomWordsFulfilled provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseRandomWordsFulfilled(log types.Log) (*vrf_beacon.VRFBeaconRandomWordsFulfilled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomWordsFulfilled") + } + + var r0 *vrf_beacon.VRFBeaconRandomWordsFulfilled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconRandomWordsFulfilled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconRandomWordsFulfilled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomWordsFulfilled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessFulfillmentRequested provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseRandomnessFulfillmentRequested(log types.Log) (*vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessFulfillmentRequested") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessFulfillmentRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessRedeemed provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseRandomnessRedeemed(log types.Log) (*vrf_beacon.VRFBeaconRandomnessRedeemed, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessRedeemed") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessRedeemed + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconRandomnessRedeemed, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconRandomnessRedeemed); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessRedeemed) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessRequested provides a mock function with given fields: log +func (_m *VRFBeaconInterface) ParseRandomnessRequested(log types.Log) (*vrf_beacon.VRFBeaconRandomnessRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessRequested") + } + + var r0 *vrf_beacon.VRFBeaconRandomnessRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_beacon.VRFBeaconRandomnessRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_beacon.VRFBeaconRandomnessRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_beacon.VRFBeaconRandomnessRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SKeyID provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) SKeyID(opts *bind.CallOpts) ([32]byte, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SKeyID") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([32]byte, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [32]byte); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SKeyProvider provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) SKeyProvider(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SKeyProvider") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SProvingKeyHash provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SProvingKeyHash") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([32]byte, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [32]byte); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetBilling provides a mock function with given fields: opts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas +func (_m *VRFBeaconInterface) SetBilling(opts *bind.TransactOpts, maximumGasPrice uint64, reasonableGasPrice uint64, observationPayment uint64, transmissionPayment uint64, accountingGas *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) + + if len(ret) == 0 { + panic("no return value specified for SetBilling") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, uint64, uint64, uint64, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, uint64, uint64, uint64, *big.Int) *types.Transaction); ok { + r0 = rf(opts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64, uint64, uint64, uint64, *big.Int) error); ok { + r1 = rf(opts, maximumGasPrice, reasonableGasPrice, observationPayment, transmissionPayment, accountingGas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetBillingAccessController provides a mock function with given fields: opts, _billingAccessController +func (_m *VRFBeaconInterface) SetBillingAccessController(opts *bind.TransactOpts, _billingAccessController common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, _billingAccessController) + + if len(ret) == 0 { + panic("no return value specified for SetBillingAccessController") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, _billingAccessController) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, _billingAccessController) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, _billingAccessController) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetConfig provides a mock function with given fields: opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig +func (_m *VRFBeaconInterface) SetConfig(opts *bind.TransactOpts, signers []common.Address, transmitters []common.Address, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte) (*types.Transaction, error) { + ret := _m.Called(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + + if len(ret) == 0 { + panic("no return value specified for SetConfig") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address, uint8, []byte, uint64, []byte) (*types.Transaction, error)); ok { + return rf(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address, uint8, []byte, uint64, []byte) *types.Transaction); ok { + r0 = rf(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address, []common.Address, uint8, []byte, uint64, []byte) error); ok { + r1 = rf(opts, signers, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetPayees provides a mock function with given fields: opts, transmitters, payees +func (_m *VRFBeaconInterface) SetPayees(opts *bind.TransactOpts, transmitters []common.Address, payees []common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, transmitters, payees) + + if len(ret) == 0 { + panic("no return value specified for SetPayees") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address) (*types.Transaction, error)); ok { + return rf(opts, transmitters, payees) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []common.Address) *types.Transaction); ok { + r0 = rf(opts, transmitters, payees) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, transmitters, payees) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferOwnership provides a mock function with given fields: opts, to +func (_m *VRFBeaconInterface) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, to) + + if len(ret) == 0 { + panic("no return value specified for TransferOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferPayeeship provides a mock function with given fields: opts, transmitter, proposed +func (_m *VRFBeaconInterface) TransferPayeeship(opts *bind.TransactOpts, transmitter common.Address, proposed common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, transmitter, proposed) + + if len(ret) == 0 { + panic("no return value specified for TransferPayeeship") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address) (*types.Transaction, error)); ok { + return rf(opts, transmitter, proposed) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, common.Address) *types.Transaction); ok { + r0 = rf(opts, transmitter, proposed) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, common.Address) error); ok { + r1 = rf(opts, transmitter, proposed) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Transmit provides a mock function with given fields: opts, reportContext, report, rs, ss, rawVs +func (_m *VRFBeaconInterface) Transmit(opts *bind.TransactOpts, reportContext [3][32]byte, report []byte, rs [][32]byte, ss [][32]byte, rawVs [32]byte) (*types.Transaction, error) { + ret := _m.Called(opts, reportContext, report, rs, ss, rawVs) + + if len(ret) == 0 { + panic("no return value specified for Transmit") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [3][32]byte, []byte, [][32]byte, [][32]byte, [32]byte) (*types.Transaction, error)); ok { + return rf(opts, reportContext, report, rs, ss, rawVs) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [3][32]byte, []byte, [][32]byte, [][32]byte, [32]byte) *types.Transaction); ok { + r0 = rf(opts, reportContext, report, rs, ss, rawVs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, [3][32]byte, []byte, [][32]byte, [][32]byte, [32]byte) error); ok { + r1 = rf(opts, reportContext, report, rs, ss, rawVs) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TypeAndVersion provides a mock function with given fields: opts +func (_m *VRFBeaconInterface) TypeAndVersion(opts *bind.CallOpts) (string, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for TypeAndVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (string, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) string); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchBillingAccessControllerSet provides a mock function with given fields: opts, sink +func (_m *VRFBeaconInterface) WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconBillingAccessControllerSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchBillingAccessControllerSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingAccessControllerSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingAccessControllerSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingAccessControllerSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchBillingSet provides a mock function with given fields: opts, sink +func (_m *VRFBeaconInterface) WatchBillingSet(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconBillingSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchBillingSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconBillingSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchConfigSet provides a mock function with given fields: opts, sink +func (_m *VRFBeaconInterface) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconConfigSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchConfigSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconConfigSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconConfigSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconConfigSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchNewTransmission provides a mock function with given fields: opts, sink, epochAndRound +func (_m *VRFBeaconInterface) WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconNewTransmission, epochAndRound []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, epochAndRound) + + if len(ret) == 0 { + panic("no return value specified for WatchNewTransmission") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconNewTransmission, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, epochAndRound) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconNewTransmission, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, epochAndRound) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconNewTransmission, []*big.Int) error); ok { + r1 = rf(opts, sink, epochAndRound) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOraclePaid provides a mock function with given fields: opts, sink, transmitter, payee, linkToken +func (_m *VRFBeaconInterface) WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, transmitter, payee, linkToken) + + if len(ret) == 0 { + panic("no return value specified for WatchOraclePaid") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOraclePaid, []common.Address, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, transmitter, payee, linkToken) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOraclePaid, []common.Address, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, transmitter, payee, linkToken) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOraclePaid, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, transmitter, payee, linkToken) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOutputsServed provides a mock function with given fields: opts, sink +func (_m *VRFBeaconInterface) WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconOutputsServed) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchOutputsServed") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOutputsServed) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOutputsServed) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOutputsServed) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferRequested provides a mock function with given fields: opts, sink, from, to +func (_m *VRFBeaconInterface) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferRequested, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferRequested, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferRequested, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferred provides a mock function with given fields: opts, sink, from, to +func (_m *VRFBeaconInterface) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferred, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferred, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconOwnershipTransferred, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchPayeeshipTransferRequested provides a mock function with given fields: opts, sink, transmitter, current, proposed +func (_m *VRFBeaconInterface) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, transmitter, current, proposed) + + if len(ret) == 0 { + panic("no return value specified for WatchPayeeshipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferRequested, []common.Address, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, transmitter, current, proposed) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferRequested, []common.Address, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, transmitter, current, proposed) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferRequested, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, transmitter, current, proposed) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchPayeeshipTransferred provides a mock function with given fields: opts, sink, transmitter, previous, current +func (_m *VRFBeaconInterface) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, transmitter, previous, current) + + if len(ret) == 0 { + panic("no return value specified for WatchPayeeshipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferred, []common.Address, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, transmitter, previous, current) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferred, []common.Address, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, transmitter, previous, current) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconPayeeshipTransferred, []common.Address, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, transmitter, previous, current) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomWordsFulfilled provides a mock function with given fields: opts, sink +func (_m *VRFBeaconInterface) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconRandomWordsFulfilled) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomWordsFulfilled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomWordsFulfilled) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomWordsFulfilled) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomWordsFulfilled) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessFulfillmentRequested provides a mock function with given fields: opts, sink, requestID +func (_m *VRFBeaconInterface) WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessFulfillmentRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessFulfillmentRequested, []*big.Int) error); ok { + r1 = rf(opts, sink, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessRedeemed provides a mock function with given fields: opts, sink, requestID, requester +func (_m *VRFBeaconInterface) WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID, requester) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessRedeemed") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRedeemed, []*big.Int, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, requestID, requester) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRedeemed, []*big.Int, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, requestID, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRedeemed, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, sink, requestID, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessRequested provides a mock function with given fields: opts, sink, requestID +func (_m *VRFBeaconInterface) WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *vrf_beacon.VRFBeaconRandomnessRequested, requestID []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRequested, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRequested, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_beacon.VRFBeaconRandomnessRequested, []*big.Int) error); ok { + r1 = rf(opts, sink, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithdrawFunds provides a mock function with given fields: opts, recipient, amount +func (_m *VRFBeaconInterface) WithdrawFunds(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, recipient, amount) + + if len(ret) == 0 { + panic("no return value specified for WithdrawFunds") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, recipient, amount) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) *types.Transaction); ok { + r0 = rf(opts, recipient, amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int) error); ok { + r1 = rf(opts, recipient, amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WithdrawPayment provides a mock function with given fields: opts, transmitter +func (_m *VRFBeaconInterface) WithdrawPayment(opts *bind.TransactOpts, transmitter common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, transmitter) + + if len(ret) == 0 { + panic("no return value specified for WithdrawPayment") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, transmitter) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, transmitter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, transmitter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVRFBeaconInterface creates a new instance of VRFBeaconInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVRFBeaconInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *VRFBeaconInterface { + mock := &VRFBeaconInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon_coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon_coordinator.go new file mode 100644 index 00000000..2b11b1b1 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_beacon_coordinator.go @@ -0,0 +1,184 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// VRFBeaconCoordinator is an autogenerated mock type for the VRFBeaconCoordinator type +type VRFBeaconCoordinator struct { + mock.Mock +} + +// GetConfirmationDelays provides a mock function with given fields: opts +func (_m *VRFBeaconCoordinator) GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetConfirmationDelays") + } + + var r0 [8]*big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([8]*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [8]*big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([8]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IBeaconPeriodBlocks provides a mock function with given fields: opts +func (_m *VRFBeaconCoordinator) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for IBeaconPeriodBlocks") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *VRFBeaconCoordinator) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SKeyID provides a mock function with given fields: opts +func (_m *VRFBeaconCoordinator) SKeyID(opts *bind.CallOpts) ([32]byte, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SKeyID") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([32]byte, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [32]byte); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SProvingKeyHash provides a mock function with given fields: opts +func (_m *VRFBeaconCoordinator) SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SProvingKeyHash") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([32]byte, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [32]byte); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVRFBeaconCoordinator creates a new instance of VRFBeaconCoordinator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVRFBeaconCoordinator(t interface { + mock.TestingT + Cleanup(func()) +}) *VRFBeaconCoordinator { + mock := &VRFBeaconCoordinator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_coordinator.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_coordinator.go new file mode 100644 index 00000000..2f781b05 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks/vrf_coordinator.go @@ -0,0 +1,3093 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + common "github.com/ethereum/go-ethereum/common" + + event "github.com/ethereum/go-ethereum/event" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + vrf_coordinator "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" +) + +// VRFCoordinatorInterface is an autogenerated mock type for the VRFCoordinatorInterface type +type VRFCoordinatorInterface struct { + mock.Mock +} + +// AcceptOwnership provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AcceptOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AcceptSubscriptionOwnerTransfer provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for AcceptSubscriptionOwnerTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int) *types.Transaction); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddConsumer provides a mock function with given fields: opts, subId, consumer +func (_m *VRFCoordinatorInterface) AddConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, consumer) + + if len(ret) == 0 { + panic("no return value specified for AddConsumer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, consumer) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, consumer) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, common.Address) error); ok { + r1 = rf(opts, subId, consumer) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Address provides a mock function with given fields: +func (_m *VRFCoordinatorInterface) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// BatchTransferLink provides a mock function with given fields: opts, recipients, paymentsInJuels +func (_m *VRFCoordinatorInterface) BatchTransferLink(opts *bind.TransactOpts, recipients []common.Address, paymentsInJuels []*big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, recipients, paymentsInJuels) + + if len(ret) == 0 { + panic("no return value specified for BatchTransferLink") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []*big.Int) (*types.Transaction, error)); ok { + return rf(opts, recipients, paymentsInJuels) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []common.Address, []*big.Int) *types.Transaction); ok { + r0 = rf(opts, recipients, paymentsInJuels) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []common.Address, []*big.Int) error); ok { + r1 = rf(opts, recipients, paymentsInJuels) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CancelSubscription provides a mock function with given fields: opts, subId, to +func (_m *VRFCoordinatorInterface) CancelSubscription(opts *bind.TransactOpts, subId *big.Int, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, to) + + if len(ret) == 0 { + panic("no return value specified for CancelSubscription") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, common.Address) error); ok { + r1 = rf(opts, subId, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateSubscription provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for CreateSubscription") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeregisterMigratableCoordinator provides a mock function with given fields: opts, target +func (_m *VRFCoordinatorInterface) DeregisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, target) + + if len(ret) == 0 { + panic("no return value specified for DeregisterMigratableCoordinator") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, target) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, target) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, target) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCallbackConfigSet provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterCallbackConfigSet(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCallbackConfigSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCallbackConfigSet") + } + + var r0 *vrf_coordinator.VRFCoordinatorCallbackConfigSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCallbackConfigSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorCallbackConfigSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCallbackConfigSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCoordinatorConfigSet provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterCoordinatorConfigSet(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorConfigSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCoordinatorConfigSet") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorConfigSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorConfigSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorCoordinatorConfigSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorConfigSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCoordinatorDeregistered provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterCoordinatorDeregistered(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorDeregisteredIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCoordinatorDeregistered") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorDeregisteredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorDeregisteredIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorCoordinatorDeregisteredIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorDeregisteredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterCoordinatorRegistered provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterCoordinatorRegistered(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorRegisteredIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterCoordinatorRegistered") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorRegisteredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorCoordinatorRegisteredIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorCoordinatorRegisteredIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorRegisteredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterMigrationCompleted provides a mock function with given fields: opts, newVersion, subID +func (_m *VRFCoordinatorInterface) FilterMigrationCompleted(opts *bind.FilterOpts, newVersion []uint8, subID []*big.Int) (*vrf_coordinator.VRFCoordinatorMigrationCompletedIterator, error) { + ret := _m.Called(opts, newVersion, subID) + + if len(ret) == 0 { + panic("no return value specified for FilterMigrationCompleted") + } + + var r0 *vrf_coordinator.VRFCoordinatorMigrationCompletedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint8, []*big.Int) (*vrf_coordinator.VRFCoordinatorMigrationCompletedIterator, error)); ok { + return rf(opts, newVersion, subID) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint8, []*big.Int) *vrf_coordinator.VRFCoordinatorMigrationCompletedIterator); ok { + r0 = rf(opts, newVersion, subID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorMigrationCompletedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint8, []*big.Int) error); ok { + r1 = rf(opts, newVersion, subID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOutputsServed provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterOutputsServed(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorOutputsServedIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterOutputsServed") + } + + var r0 *vrf_coordinator.VRFCoordinatorOutputsServedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorOutputsServedIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorOutputsServedIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOutputsServedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferRequested provides a mock function with given fields: opts, from, to +func (_m *VRFCoordinatorInterface) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_coordinator.VRFCoordinatorOwnershipTransferRequestedIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorOwnershipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_coordinator.VRFCoordinatorOwnershipTransferRequestedIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_coordinator.VRFCoordinatorOwnershipTransferRequestedIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOwnershipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferred provides a mock function with given fields: opts, from, to +func (_m *VRFCoordinatorInterface) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_coordinator.VRFCoordinatorOwnershipTransferredIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferred") + } + + var r0 *vrf_coordinator.VRFCoordinatorOwnershipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_coordinator.VRFCoordinatorOwnershipTransferredIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_coordinator.VRFCoordinatorOwnershipTransferredIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOwnershipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterPauseFlagChanged provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterPauseFlagChanged(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorPauseFlagChangedIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterPauseFlagChanged") + } + + var r0 *vrf_coordinator.VRFCoordinatorPauseFlagChangedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorPauseFlagChangedIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorPauseFlagChangedIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorPauseFlagChangedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomWordsFulfilled provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) FilterRandomWordsFulfilled(opts *bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorRandomWordsFulfilledIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomWordsFulfilled") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomWordsFulfilledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator.VRFCoordinatorRandomWordsFulfilledIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator.VRFCoordinatorRandomWordsFulfilledIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomWordsFulfilledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessFulfillmentRequested provides a mock function with given fields: opts, requestID +func (_m *VRFCoordinatorInterface) FilterRandomnessFulfillmentRequested(opts *bind.FilterOpts, requestID []*big.Int) (*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequestedIterator, error) { + ret := _m.Called(opts, requestID) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessFulfillmentRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequestedIterator, error)); ok { + return rf(opts, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequestedIterator); ok { + r0 = rf(opts, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessRedeemed provides a mock function with given fields: opts, requestID, requester +func (_m *VRFCoordinatorInterface) FilterRandomnessRedeemed(opts *bind.FilterOpts, requestID []*big.Int, requester []common.Address) (*vrf_coordinator.VRFCoordinatorRandomnessRedeemedIterator, error) { + ret := _m.Called(opts, requestID, requester) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessRedeemed") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessRedeemedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) (*vrf_coordinator.VRFCoordinatorRandomnessRedeemedIterator, error)); ok { + return rf(opts, requestID, requester) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) *vrf_coordinator.VRFCoordinatorRandomnessRedeemedIterator); ok { + r0 = rf(opts, requestID, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessRedeemedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, requestID, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomnessRequested provides a mock function with given fields: opts, requestID +func (_m *VRFCoordinatorInterface) FilterRandomnessRequested(opts *bind.FilterOpts, requestID []*big.Int) (*vrf_coordinator.VRFCoordinatorRandomnessRequestedIterator, error) { + ret := _m.Called(opts, requestID) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomnessRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorRandomnessRequestedIterator, error)); ok { + return rf(opts, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorRandomnessRequestedIterator); ok { + r0 = rf(opts, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionCanceled provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionCanceledIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionCanceled") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionCanceledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionCanceledIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionCanceledIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionCanceledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionConsumerAdded provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAddedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionConsumerAdded") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAddedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAddedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAddedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAddedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionConsumerRemoved provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemovedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionConsumerRemoved") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemovedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemovedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemovedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemovedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionCreated provides a mock function with given fields: opts, subId, owner +func (_m *VRFCoordinatorInterface) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []*big.Int, owner []common.Address) (*vrf_coordinator.VRFCoordinatorSubscriptionCreatedIterator, error) { + ret := _m.Called(opts, subId, owner) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionCreated") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionCreatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) (*vrf_coordinator.VRFCoordinatorSubscriptionCreatedIterator, error)); ok { + return rf(opts, subId, owner) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int, []common.Address) *vrf_coordinator.VRFCoordinatorSubscriptionCreatedIterator); ok { + r0 = rf(opts, subId, owner) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionCreatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, subId, owner) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionFunded provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionFundedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionFunded") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionFundedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionFundedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionFundedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionFundedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionOwnerTransferRequested provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequestedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionOwnerTransferRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequestedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequestedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionOwnerTransferred provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferredIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionOwnerTransferred") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferredIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferredIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCallbackMemo provides a mock function with given fields: opts, requestId +func (_m *VRFCoordinatorInterface) GetCallbackMemo(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) { + ret := _m.Called(opts, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCallbackMemo") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) ([32]byte, error)); ok { + return rf(opts, requestId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) [32]byte); ok { + r0 = rf(opts, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetConfirmationDelays provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetConfirmationDelays") + } + + var r0 [8]*big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) ([8]*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) [8]*big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([8]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFee provides a mock function with given fields: opts, arg0, arg1 +func (_m *VRFCoordinatorInterface) GetFee(opts *bind.CallOpts, arg0 *big.Int, arg1 []byte) (*big.Int, error) { + ret := _m.Called(opts, arg0, arg1) + + if len(ret) == 0 { + panic("no return value specified for GetFee") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, []byte) (*big.Int, error)); ok { + return rf(opts, arg0, arg1) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, []byte) *big.Int); ok { + r0 = rf(opts, arg0, arg1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int, []byte) error); ok { + r1 = rf(opts, arg0, arg1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFulfillmentFee provides a mock function with given fields: opts, arg0, callbackGasLimit, arguments, arg3 +func (_m *VRFCoordinatorInterface) GetFulfillmentFee(opts *bind.CallOpts, arg0 *big.Int, callbackGasLimit uint32, arguments []byte, arg3 []byte) (*big.Int, error) { + ret := _m.Called(opts, arg0, callbackGasLimit, arguments, arg3) + + if len(ret) == 0 { + panic("no return value specified for GetFulfillmentFee") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, uint32, []byte, []byte) (*big.Int, error)); ok { + return rf(opts, arg0, callbackGasLimit, arguments, arg3) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int, uint32, []byte, []byte) *big.Int); ok { + r0 = rf(opts, arg0, callbackGasLimit, arguments, arg3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int, uint32, []byte, []byte) error); ok { + r1 = rf(opts, arg0, callbackGasLimit, arguments, arg3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSubscription provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorInterface) GetSubscription(opts *bind.CallOpts, subId *big.Int) (vrf_coordinator.GetSubscription, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for GetSubscription") + } + + var r0 vrf_coordinator.GetSubscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (vrf_coordinator.GetSubscription, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) vrf_coordinator.GetSubscription); ok { + r0 = rf(opts, subId) + } else { + r0 = ret.Get(0).(vrf_coordinator.GetSubscription) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSubscriptionLinkBalance provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) GetSubscriptionLinkBalance(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetSubscriptionLinkBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IBeaconPeriodBlocks provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for IBeaconPeriodBlocks") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ILink provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) ILink(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for ILink") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MAXCONSUMERS provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MAXCONSUMERS") + } + + var r0 uint16 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint16, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint16); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint16) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MAXNUMWORDS provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) MAXNUMWORDS(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MAXNUMWORDS") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Migrate provides a mock function with given fields: opts, newCoordinator, encodedRequest +func (_m *VRFCoordinatorInterface) Migrate(opts *bind.TransactOpts, newCoordinator common.Address, encodedRequest []byte) (*types.Transaction, error) { + ret := _m.Called(opts, newCoordinator, encodedRequest) + + if len(ret) == 0 { + panic("no return value specified for Migrate") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, []byte) (*types.Transaction, error)); ok { + return rf(opts, newCoordinator, encodedRequest) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, []byte) *types.Transaction); ok { + r0 = rf(opts, newCoordinator, encodedRequest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, []byte) error); ok { + r1 = rf(opts, newCoordinator, encodedRequest) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MigrationVersion provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) MigrationVersion(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MigrationVersion") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NUMCONFDELAYS provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) NUMCONFDELAYS(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for NUMCONFDELAYS") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnMigration provides a mock function with given fields: opts, arg0 +func (_m *VRFCoordinatorInterface) OnMigration(opts *bind.CallOpts, arg0 []byte) error { + ret := _m.Called(opts, arg0) + + if len(ret) == 0 { + panic("no return value specified for OnMigration") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, []byte) error); ok { + r0 = rf(opts, arg0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnTokenTransfer provides a mock function with given fields: opts, arg0, amount, data +func (_m *VRFCoordinatorInterface) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + ret := _m.Called(opts, arg0, amount, data) + + if len(ret) == 0 { + panic("no return value specified for OnTokenTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) (*types.Transaction, error)); ok { + return rf(opts, arg0, amount, data) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) *types.Transaction); ok { + r0 = rf(opts, arg0, amount, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) error); ok { + r1 = rf(opts, arg0, amount, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Owner provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) Owner(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Owner") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCallbackConfigSet provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseCallbackConfigSet(log types.Log) (*vrf_coordinator.VRFCoordinatorCallbackConfigSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCallbackConfigSet") + } + + var r0 *vrf_coordinator.VRFCoordinatorCallbackConfigSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorCallbackConfigSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorCallbackConfigSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCallbackConfigSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCoordinatorConfigSet provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseCoordinatorConfigSet(log types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorConfigSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCoordinatorConfigSet") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorConfigSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorConfigSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCoordinatorDeregistered provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseCoordinatorDeregistered(log types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorDeregistered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCoordinatorDeregistered") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorDeregistered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorDeregistered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseCoordinatorRegistered provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseCoordinatorRegistered(log types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorRegistered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseCoordinatorRegistered") + } + + var r0 *vrf_coordinator.VRFCoordinatorCoordinatorRegistered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorCoordinatorRegistered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorCoordinatorRegistered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorCoordinatorRegistered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseMigrationCompleted provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseMigrationCompleted(log types.Log) (*vrf_coordinator.VRFCoordinatorMigrationCompleted, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseMigrationCompleted") + } + + var r0 *vrf_coordinator.VRFCoordinatorMigrationCompleted + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorMigrationCompleted, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorMigrationCompleted); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorMigrationCompleted) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOutputsServed provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseOutputsServed(log types.Log) (*vrf_coordinator.VRFCoordinatorOutputsServed, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOutputsServed") + } + + var r0 *vrf_coordinator.VRFCoordinatorOutputsServed + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorOutputsServed, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorOutputsServed); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOutputsServed) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseOwnershipTransferRequested(log types.Log) (*vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOwnershipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferred provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseOwnershipTransferred(log types.Log) (*vrf_coordinator.VRFCoordinatorOwnershipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferred") + } + + var r0 *vrf_coordinator.VRFCoordinatorOwnershipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorOwnershipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorOwnershipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorOwnershipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParsePauseFlagChanged provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParsePauseFlagChanged(log types.Log) (*vrf_coordinator.VRFCoordinatorPauseFlagChanged, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParsePauseFlagChanged") + } + + var r0 *vrf_coordinator.VRFCoordinatorPauseFlagChanged + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorPauseFlagChanged, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorPauseFlagChanged); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorPauseFlagChanged) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomWordsFulfilled provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseRandomWordsFulfilled(log types.Log) (*vrf_coordinator.VRFCoordinatorRandomWordsFulfilled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomWordsFulfilled") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorRandomWordsFulfilled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessFulfillmentRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseRandomnessFulfillmentRequested(log types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessFulfillmentRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessRedeemed provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseRandomnessRedeemed(log types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessRedeemed, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessRedeemed") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessRedeemed + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessRedeemed, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorRandomnessRedeemed); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessRedeemed) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomnessRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseRandomnessRequested(log types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomnessRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorRandomnessRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorRandomnessRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorRandomnessRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorRandomnessRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionCanceled provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionCanceled(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionCanceled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionCanceled") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionCanceled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionCanceled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionCanceled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionCanceled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionConsumerAdded provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionConsumerAdded(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionConsumerAdded") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionConsumerRemoved provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionConsumerRemoved(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionConsumerRemoved") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionCreated provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionCreated(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionCreated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionCreated") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionCreated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionCreated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionCreated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionCreated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionFunded provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionFunded(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionFunded, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionFunded") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionFunded + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionFunded, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionFunded); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionFunded) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionOwnerTransferRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionOwnerTransferRequested(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionOwnerTransferRequested") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionOwnerTransferred provides a mock function with given fields: log +func (_m *VRFCoordinatorInterface) ParseSubscriptionOwnerTransferred(log types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionOwnerTransferred") + } + + var r0 *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessVRFOutputs provides a mock function with given fields: opts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight +func (_m *VRFCoordinatorInterface) ProcessVRFOutputs(opts *bind.TransactOpts, vrfOutputs []vrf_coordinator.VRFBeaconTypesVRFOutput, juelsPerFeeCoin *big.Int, reasonableGasPrice uint64, blockHeight uint64) (*types.Transaction, error) { + ret := _m.Called(opts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for ProcessVRFOutputs") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []vrf_coordinator.VRFBeaconTypesVRFOutput, *big.Int, uint64, uint64) (*types.Transaction, error)); ok { + return rf(opts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []vrf_coordinator.VRFBeaconTypesVRFOutput, *big.Int, uint64, uint64) *types.Transaction); ok { + r0 = rf(opts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []vrf_coordinator.VRFBeaconTypesVRFOutput, *big.Int, uint64, uint64) error); ok { + r1 = rf(opts, vrfOutputs, juelsPerFeeCoin, reasonableGasPrice, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RedeemRandomness provides a mock function with given fields: opts, subID, requestID, arg2 +func (_m *VRFCoordinatorInterface) RedeemRandomness(opts *bind.TransactOpts, subID *big.Int, requestID *big.Int, arg2 []byte) (*types.Transaction, error) { + ret := _m.Called(opts, subID, requestID, arg2) + + if len(ret) == 0 { + panic("no return value specified for RedeemRandomness") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, *big.Int, []byte) (*types.Transaction, error)); ok { + return rf(opts, subID, requestID, arg2) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, *big.Int, []byte) *types.Transaction); ok { + r0 = rf(opts, subID, requestID, arg2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, *big.Int, []byte) error); ok { + r1 = rf(opts, subID, requestID, arg2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterMigratableCoordinator provides a mock function with given fields: opts, target +func (_m *VRFCoordinatorInterface) RegisterMigratableCoordinator(opts *bind.TransactOpts, target common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, target) + + if len(ret) == 0 { + panic("no return value specified for RegisterMigratableCoordinator") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, target) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, target) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, target) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveConsumer provides a mock function with given fields: opts, subId, consumer +func (_m *VRFCoordinatorInterface) RemoveConsumer(opts *bind.TransactOpts, subId *big.Int, consumer common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, consumer) + + if len(ret) == 0 { + panic("no return value specified for RemoveConsumer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, consumer) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, consumer) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, common.Address) error); ok { + r1 = rf(opts, subId, consumer) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestRandomness provides a mock function with given fields: opts, subID, numWords, confDelay, arg3 +func (_m *VRFCoordinatorInterface) RequestRandomness(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, arg3 []byte) (*types.Transaction, error) { + ret := _m.Called(opts, subID, numWords, confDelay, arg3) + + if len(ret) == 0 { + panic("no return value specified for RequestRandomness") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, []byte) (*types.Transaction, error)); ok { + return rf(opts, subID, numWords, confDelay, arg3) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, []byte) *types.Transaction); ok { + r0 = rf(opts, subID, numWords, confDelay, arg3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, []byte) error); ok { + r1 = rf(opts, subID, numWords, confDelay, arg3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestRandomnessFulfillment provides a mock function with given fields: opts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5 +func (_m *VRFCoordinatorInterface) RequestRandomnessFulfillment(opts *bind.TransactOpts, subID *big.Int, numWords uint16, confDelay *big.Int, callbackGasLimit uint32, arguments []byte, arg5 []byte) (*types.Transaction, error) { + ret := _m.Called(opts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) + + if len(ret) == 0 { + panic("no return value specified for RequestRandomnessFulfillment") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, uint32, []byte, []byte) (*types.Transaction, error)); ok { + return rf(opts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, uint32, []byte, []byte) *types.Transaction); ok { + r0 = rf(opts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, uint16, *big.Int, uint32, []byte, []byte) error); ok { + r1 = rf(opts, subID, numWords, confDelay, callbackGasLimit, arguments, arg5) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestSubscriptionOwnerTransfer provides a mock function with given fields: opts, subId, newOwner +func (_m *VRFCoordinatorInterface) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId *big.Int, newOwner common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, newOwner) + + if len(ret) == 0 { + panic("no return value specified for RequestSubscriptionOwnerTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, newOwner) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, *big.Int, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, newOwner) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, *big.Int, common.Address) error); ok { + r1 = rf(opts, subId, newOwner) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SCallbackConfig provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) SCallbackConfig(opts *bind.CallOpts) (vrf_coordinator.SCallbackConfig, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SCallbackConfig") + } + + var r0 vrf_coordinator.SCallbackConfig + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_coordinator.SCallbackConfig, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_coordinator.SCallbackConfig); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_coordinator.SCallbackConfig) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SCoordinatorConfig provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) SCoordinatorConfig(opts *bind.CallOpts) (vrf_coordinator.SCoordinatorConfig, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SCoordinatorConfig") + } + + var r0 vrf_coordinator.SCoordinatorConfig + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_coordinator.SCoordinatorConfig, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_coordinator.SCoordinatorConfig); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_coordinator.SCoordinatorConfig) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SPendingRequests provides a mock function with given fields: opts, arg0 +func (_m *VRFCoordinatorInterface) SPendingRequests(opts *bind.CallOpts, arg0 *big.Int) (vrf_coordinator.SPendingRequests, error) { + ret := _m.Called(opts, arg0) + + if len(ret) == 0 { + panic("no return value specified for SPendingRequests") + } + + var r0 vrf_coordinator.SPendingRequests + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (vrf_coordinator.SPendingRequests, error)); ok { + return rf(opts, arg0) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) vrf_coordinator.SPendingRequests); ok { + r0 = rf(opts, arg0) + } else { + r0 = ret.Get(0).(vrf_coordinator.SPendingRequests) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, arg0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SProducer provides a mock function with given fields: opts +func (_m *VRFCoordinatorInterface) SProducer(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for SProducer") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetCallbackConfig provides a mock function with given fields: opts, config +func (_m *VRFCoordinatorInterface) SetCallbackConfig(opts *bind.TransactOpts, config vrf_coordinator.VRFCoordinatorCallbackConfig) (*types.Transaction, error) { + ret := _m.Called(opts, config) + + if len(ret) == 0 { + panic("no return value specified for SetCallbackConfig") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator.VRFCoordinatorCallbackConfig) (*types.Transaction, error)); ok { + return rf(opts, config) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator.VRFCoordinatorCallbackConfig) *types.Transaction); ok { + r0 = rf(opts, config) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, vrf_coordinator.VRFCoordinatorCallbackConfig) error); ok { + r1 = rf(opts, config) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetConfirmationDelays provides a mock function with given fields: opts, confDelays +func (_m *VRFCoordinatorInterface) SetConfirmationDelays(opts *bind.TransactOpts, confDelays [8]*big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, confDelays) + + if len(ret) == 0 { + panic("no return value specified for SetConfirmationDelays") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [8]*big.Int) (*types.Transaction, error)); ok { + return rf(opts, confDelays) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [8]*big.Int) *types.Transaction); ok { + r0 = rf(opts, confDelays) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, [8]*big.Int) error); ok { + r1 = rf(opts, confDelays) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetCoordinatorConfig provides a mock function with given fields: opts, coordinatorConfig +func (_m *VRFCoordinatorInterface) SetCoordinatorConfig(opts *bind.TransactOpts, coordinatorConfig vrf_coordinator.VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error) { + ret := _m.Called(opts, coordinatorConfig) + + if len(ret) == 0 { + panic("no return value specified for SetCoordinatorConfig") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator.VRFBeaconTypesCoordinatorConfig) (*types.Transaction, error)); ok { + return rf(opts, coordinatorConfig) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator.VRFBeaconTypesCoordinatorConfig) *types.Transaction); ok { + r0 = rf(opts, coordinatorConfig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, vrf_coordinator.VRFBeaconTypesCoordinatorConfig) error); ok { + r1 = rf(opts, coordinatorConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetPauseFlag provides a mock function with given fields: opts, pause +func (_m *VRFCoordinatorInterface) SetPauseFlag(opts *bind.TransactOpts, pause bool) (*types.Transaction, error) { + ret := _m.Called(opts, pause) + + if len(ret) == 0 { + panic("no return value specified for SetPauseFlag") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, bool) (*types.Transaction, error)); ok { + return rf(opts, pause) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, bool) *types.Transaction); ok { + r0 = rf(opts, pause) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, bool) error); ok { + r1 = rf(opts, pause) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetProducer provides a mock function with given fields: opts, producer +func (_m *VRFCoordinatorInterface) SetProducer(opts *bind.TransactOpts, producer common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, producer) + + if len(ret) == 0 { + panic("no return value specified for SetProducer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, producer) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, producer) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, producer) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferLink provides a mock function with given fields: opts, recipient, juelsAmount +func (_m *VRFCoordinatorInterface) TransferLink(opts *bind.TransactOpts, recipient common.Address, juelsAmount *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, recipient, juelsAmount) + + if len(ret) == 0 { + panic("no return value specified for TransferLink") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, recipient, juelsAmount) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) *types.Transaction); ok { + r0 = rf(opts, recipient, juelsAmount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int) error); ok { + r1 = rf(opts, recipient, juelsAmount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferOwnership provides a mock function with given fields: opts, to +func (_m *VRFCoordinatorInterface) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, to) + + if len(ret) == 0 { + panic("no return value specified for TransferOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCallbackConfigSet provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchCallbackConfigSet(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorCallbackConfigSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCallbackConfigSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCallbackConfigSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCallbackConfigSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCallbackConfigSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCoordinatorConfigSet provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchCoordinatorConfigSet(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCoordinatorConfigSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorConfigSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCoordinatorDeregistered provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchCoordinatorDeregistered(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCoordinatorDeregistered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorDeregistered) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchCoordinatorRegistered provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchCoordinatorRegistered(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorCoordinatorRegistered) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchCoordinatorRegistered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorRegistered) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorRegistered) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorCoordinatorRegistered) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchMigrationCompleted provides a mock function with given fields: opts, sink, newVersion, subID +func (_m *VRFCoordinatorInterface) WatchMigrationCompleted(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorMigrationCompleted, newVersion []uint8, subID []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, newVersion, subID) + + if len(ret) == 0 { + panic("no return value specified for WatchMigrationCompleted") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorMigrationCompleted, []uint8, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, newVersion, subID) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorMigrationCompleted, []uint8, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, newVersion, subID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorMigrationCompleted, []uint8, []*big.Int) error); ok { + r1 = rf(opts, sink, newVersion, subID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOutputsServed provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchOutputsServed(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorOutputsServed) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchOutputsServed") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOutputsServed) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOutputsServed) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOutputsServed) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferRequested provides a mock function with given fields: opts, sink, from, to +func (_m *VRFCoordinatorInterface) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferRequested, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferred provides a mock function with given fields: opts, sink, from, to +func (_m *VRFCoordinatorInterface) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferred, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferred, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorOwnershipTransferred, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchPauseFlagChanged provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchPauseFlagChanged(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorPauseFlagChanged) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchPauseFlagChanged") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorPauseFlagChanged) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorPauseFlagChanged) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorPauseFlagChanged) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomWordsFulfilled provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorInterface) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomWordsFulfilled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomWordsFulfilled) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessFulfillmentRequested provides a mock function with given fields: opts, sink, requestID +func (_m *VRFCoordinatorInterface) WatchRandomnessFulfillmentRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, requestID []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessFulfillmentRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested, []*big.Int) error); ok { + r1 = rf(opts, sink, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessRedeemed provides a mock function with given fields: opts, sink, requestID, requester +func (_m *VRFCoordinatorInterface) WatchRandomnessRedeemed(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorRandomnessRedeemed, requestID []*big.Int, requester []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID, requester) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessRedeemed") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRedeemed, []*big.Int, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, requestID, requester) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRedeemed, []*big.Int, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, requestID, requester) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRedeemed, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, sink, requestID, requester) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomnessRequested provides a mock function with given fields: opts, sink, requestID +func (_m *VRFCoordinatorInterface) WatchRandomnessRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorRandomnessRequested, requestID []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestID) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomnessRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRequested, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, requestID) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRequested, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, requestID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorRandomnessRequested, []*big.Int) error); ok { + r1 = rf(opts, sink, requestID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionCanceled provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCanceled, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionCanceled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCanceled, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCanceled, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCanceled, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionConsumerAdded provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionConsumerAdded") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerAdded, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionConsumerRemoved provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionConsumerRemoved") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionConsumerRemoved, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionCreated provides a mock function with given fields: opts, sink, subId, owner +func (_m *VRFCoordinatorInterface) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCreated, subId []*big.Int, owner []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId, owner) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionCreated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCreated, []*big.Int, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, subId, owner) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCreated, []*big.Int, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, subId, owner) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionCreated, []*big.Int, []common.Address) error); ok { + r1 = rf(opts, sink, subId, owner) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionFunded provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionFunded, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionFunded") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionFunded, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionFunded, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionFunded, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionOwnerTransferRequested provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionOwnerTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferRequested, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionOwnerTransferred provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorInterface) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, subId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionOwnerTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator.VRFCoordinatorSubscriptionOwnerTransferred, []*big.Int) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVRFCoordinatorInterface creates a new instance of VRFCoordinatorInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVRFCoordinatorInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *VRFCoordinatorInterface { + mock := &VRFCoordinatorInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go new file mode 100644 index 00000000..bd249c6b --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache.go @@ -0,0 +1,125 @@ +package coordinator + +import ( + "runtime" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" +) + +// ocrCache is a caching strucuture that allows items to be stored and then evicted +// based on an eviction window. In this package, it is being used to track in-flight +// items the coordinator includes in OCR reports, such that the items can be checked against +// the cache to avoid double-transmissions. +type ocrCache[T any] struct { + evictionWindow time.Duration + cacheMu sync.Mutex + cache map[common.Hash]*cacheItem[T] + cleaner *intervalCacheCleaner[T] +} + +type cacheItem[T any] struct { + item T + itemKey common.Hash + timeStored time.Time +} + +// NewBlockCache constructs a new cache. +func NewBlockCache[T any](evictionWindow time.Duration) *ocrCache[T] { + + // Construct cache cleaner to evict old items. + cleaner := &intervalCacheCleaner[T]{ + interval: evictionWindow, + stop: make(chan struct{}, 1), + } + + // Instantiate the cache for type T. + cache := &ocrCache[T]{ + cacheMu: sync.Mutex{}, + cache: make(map[common.Hash]*cacheItem[T]), + evictionWindow: evictionWindow, + cleaner: cleaner, + } + + // Stop the cleaner upon garbage collection of the cache. + runtime.SetFinalizer(cache, func(b *ocrCache[T]) { b.cleaner.stop <- struct{}{} }) + + return cache +} + +// AddItem adds an item to the cache. +func (l *ocrCache[T]) CacheItem(item T, itemKey common.Hash, timeStored time.Time) { + + // Construct new item to be stored. + newItem := &cacheItem[T]{ + item: item, + itemKey: itemKey, + timeStored: timeStored, + } + + // Lock, and defer unlock. + l.cacheMu.Lock() + defer l.cacheMu.Unlock() + + // Assign item to key. + l.cache[itemKey] = newItem +} + +func (l *ocrCache[T]) SetEvictonWindow(newWindow time.Duration) { + l.evictionWindow = newWindow +} + +// AddItem adds an item to the cache. +func (l *ocrCache[T]) GetItem(itemKey common.Hash) (item *T) { + + // Lock, and defer unlock. + l.cacheMu.Lock() + defer l.cacheMu.Unlock() + + // Construct new item to be stored. + cacheItem := l.cache[itemKey] + + // Return nil if the item is not found, otherwise return item. + if cacheItem == nil { + return + } + + return &cacheItem.item +} + +// EvictExpiredItems removes all expired items stored in the cache. +func (l *ocrCache[T]) EvictExpiredItems(currentTime time.Time) { + + // Lock, and defer unlock. + l.cacheMu.Lock() + defer l.cacheMu.Unlock() + + // Iteratively check all item ages, and delete an item if it is expired. + for key, item := range l.cache { + diff := currentTime.Sub(item.timeStored) + if diff > l.evictionWindow { + delete(l.cache, key) + } + } +} + +// A cache cleaner that evicts items on a regular interval. +type intervalCacheCleaner[T any] struct { + interval time.Duration + stop chan struct{} +} + +// Run evicts expired items every n seconds, until the "stop" channel is triggered. +func (ic *intervalCacheCleaner[T]) Run(c *ocrCache[T]) { + ticker := time.NewTicker(ic.interval) + for { + select { + case <-ticker.C: + c.EvictExpiredItems(time.Now().UTC()) + case <-ic.stop: + ticker.Stop() + return + } + } +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go new file mode 100644 index 00000000..57aaf1c5 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/ocr_cache_test.go @@ -0,0 +1,166 @@ +package coordinator + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +func TestNewCache(t *testing.T) { + b := NewBlockCache[int](time.Second) + + assert.Equal(t, time.Second, b.evictionWindow, "must set correct blockEvictionWindow") +} + +func TestCache(t *testing.T) { + t.Run("Happy path, no overwrites.", func(t *testing.T) { + + now := time.Now().UTC() + + tests := []struct { + Key common.Hash + Value int + }{ + {Key: common.HexToHash("0x0"), Value: 1}, + {Key: common.HexToHash("0x1"), Value: 2}, + {Key: common.HexToHash("0x2"), Value: 3}, + {Key: common.HexToHash("0x3"), Value: 4}, + {Key: common.HexToHash("0x4"), Value: 5}, + } + + c := NewBlockCache[int](100 * time.Second) + + // Populate cache with ordered items. + for i, test := range tests { + c.CacheItem(test.Value, test.Key, getSecondsAfterNow(now, i)) + item := c.GetItem(test.Key) + assert.Equal(t, test.Value, *item) + } + + // Ensure cache has 5 items, with the newest and oldest pointers correct. + assert.Equal(t, 5, len(c.cache), "cache should contain 5 keys") + + // Evict all items. + evictionTime := getSecondsAfterNow(now, 105) + c.EvictExpiredItems(evictionTime) + assert.Equal(t, 0, len(c.cache), "cache should contain 0 keys") + + // Cache a new item. + c.CacheItem(tests[0].Value, tests[0].Key, getSecondsAfterNow(now, 10)) + item := c.GetItem(tests[0].Key) + assert.Equal(t, tests[0].Value, *item) + + // Attempting a new eviction should have no effect. + c.EvictExpiredItems(evictionTime) + assert.Equal(t, 1, len(c.cache), "cache should contain 1 key") + + // Reduce eviction window. + c.SetEvictonWindow(time.Second * 50) + + // Attempting a new eviction will remove the added item. + c.EvictExpiredItems(evictionTime) + assert.Equal(t, 0, len(c.cache), "cache should contain 0 keys") + }) + + t.Run("Happy path, override middle item.", func(t *testing.T) { + + now := time.Now().UTC() + + tests := []struct { + Key common.Hash + Value int + }{ + {Key: common.HexToHash("0x0"), Value: 1}, + {Key: common.HexToHash("0x1"), Value: 2}, + {Key: common.HexToHash("0x2"), Value: 3}, + {Key: common.HexToHash("0x3"), Value: 4}, + {Key: common.HexToHash("0x1"), Value: 5}, + } + + c := NewBlockCache[int](100 * time.Second) + + // Populate cache with items. + for i, test := range tests { + c.CacheItem(test.Value, test.Key, getSecondsAfterNow(now, i)) + item := c.GetItem(test.Key) + assert.Equal(t, test.Value, *item) + } + + // Ensure cache has 4 items, with the newest and oldest pointers correct. + assert.Equal(t, 4, len(c.cache), "cache should contain 4 keys") + + // Evict all but two items. + c.EvictExpiredItems(getSecondsAfterNow(now, 103)) + assert.Equal(t, 2, len(c.cache), "cache should contain 2 keys") + + // Evict all but one items. + c.EvictExpiredItems(getSecondsAfterNow(now, 104)) + assert.Equal(t, 1, len(c.cache), "cache should contain 1 keys") + + // Evict remaining item. + c.EvictExpiredItems(getSecondsAfterNow(now, 105)) + assert.Equal(t, 0, len(c.cache), "cache should contain 0 keys") + }) + + t.Run("Happy path, override last item.", func(t *testing.T) { + + now := time.Now().UTC() + + tests := []struct { + Key common.Hash + Value int + }{ + {Key: common.HexToHash("0x0"), Value: 1}, + {Key: common.HexToHash("0x1"), Value: 2}, + {Key: common.HexToHash("0x2"), Value: 3}, + {Key: common.HexToHash("0x3"), Value: 4}, + {Key: common.HexToHash("0x0"), Value: 5}, + } + + c := NewBlockCache[int](100 * time.Second) + + // Populate cache with items. + for i, test := range tests { + c.CacheItem(test.Value, test.Key, getSecondsAfterNow(now, i)) + item := c.GetItem(test.Key) + assert.Equal(t, test.Value, *item) + } + + // Ensure cache has 4 items, with the newest and oldest pointers correct. + assert.Equal(t, 4, len(c.cache), "cache should contain 4 keys") + + // Evict all but one item. + c.EvictExpiredItems(getSecondsAfterNow(now, 104)) + assert.Equal(t, 1, len(c.cache), "cache should contain 1 keys") + + // Cache a new item. + c.CacheItem(tests[1].Value, tests[1].Key, getSecondsAfterNow(now, 110)) + item := c.GetItem(tests[1].Key) + assert.Equal(t, tests[1].Value, *item) + + // Assert correct length. + assert.Equal(t, 2, len(c.cache), "cache should contain 2 keys") + + // Replace the oldest item. + c.CacheItem(tests[0].Value, tests[0].Key, getSecondsAfterNow(now, 111)) + item = c.GetItem(tests[0].Key) + assert.Equal(t, tests[0].Value, *item) + + // Assert correct length. + assert.Equal(t, 2, len(c.cache), "cache should contain 2 keys") + + // Replace the newest item. + c.CacheItem(tests[0].Value, tests[0].Key, getSecondsAfterNow(now, 112)) + item = c.GetItem(tests[0].Key) + assert.Equal(t, tests[0].Value, *item) + + // Assert correct length. + assert.Equal(t, 2, len(c.cache), "cache should contain 2 keys") + }) +} + +func getSecondsAfterNow(now time.Time, i int) time.Time { + return now.Add(time.Duration(i) * time.Second) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/router.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/router.go new file mode 100644 index 00000000..47507dba --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/router.go @@ -0,0 +1,82 @@ +package coordinator + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var _ VRFBeaconCoordinator = &vrfRouter{} + +// VRFProxy routes requests to VRFBeacon and VRFCoordinator go wrappers and implements VRFBeaconCoordinator interface +type vrfRouter struct { + lggr logger.Logger + beacon vrf_beacon.VRFBeaconInterface + coordinator vrf_coordinator.VRFCoordinatorInterface +} + +func newRouter( + lggr logger.Logger, + beaconAddress common.Address, + coordinatorAddress common.Address, + client evmclient.Client, +) (VRFBeaconCoordinator, error) { + beacon, err := vrf_beacon.NewVRFBeacon(beaconAddress, client) + if err != nil { + return nil, errors.Wrap(err, "beacon wrapper creation") + } + coordinator, err := vrf_coordinator.NewVRFCoordinator(coordinatorAddress, client) + if err != nil { + return nil, errors.Wrap(err, "coordinator wrapper creation") + } + return &vrfRouter{ + lggr: lggr, + beacon: beacon, + coordinator: coordinator, + }, nil +} + +// SProvingKeyHash retrieves the proving key hash from the on-chain contract. +// Calls VRF beacon wrapper to retrieve proving key hash +func (v *vrfRouter) SProvingKeyHash(opts *bind.CallOpts) ([32]byte, error) { + return v.beacon.SProvingKeyHash(opts) +} + +// SKeyID retrieves the keyID from the on-chain contract. +// Calls VRF beacon wrapper to retrieve key ID +func (v *vrfRouter) SKeyID(opts *bind.CallOpts) ([32]byte, error) { + return v.beacon.SKeyID(opts) +} + +// IBeaconPeriodBlocks retrieves the beacon period in blocks from the on-chain contract. +// Calls VRF coordinator wrapper to beacon period blocks +func (v *vrfRouter) IBeaconPeriodBlocks(opts *bind.CallOpts) (*big.Int, error) { + return v.coordinator.IBeaconPeriodBlocks(opts) +} + +// ParseLog parses the raw log data and topics into a go object. +// The returned object must be casted to the expected type. +// Calls either VRF beacon wrapper or VRF coordinator wrapper depending on the addresses of the log +func (v *vrfRouter) ParseLog(log types.Log) (generated.AbigenLog, error) { + if log.Address == v.beacon.Address() { + return v.beacon.ParseLog(log) + } else if log.Address == v.coordinator.Address() { + return v.coordinator.ParseLog(log) + } + return nil, errors.Errorf("failed to parse log. contractAddress: %x logs: %x", log.Address, log.Topics) +} + +// GetConfirmationDelays retrieves confirmation delays from the on-chain contract. +// Calls VRF coordinator to retrieve confirmation delays +func (v *vrfRouter) GetConfirmationDelays(opts *bind.CallOpts) ([8]*big.Int, error) { + return v.coordinator.GetConfirmationDelays(opts) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/router_test.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/router_test.go new file mode 100644 index 00000000..c8cb2631 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/router_test.go @@ -0,0 +1,130 @@ +package coordinator + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/coordinator/mocks" +) + +var nilOpts *bind.CallOpts + +func TestRouter_SProvingKeyHash(t *testing.T) { + beacon := mocks.NewVRFBeaconInterface(t) + keyHash := [32]byte{1} + router := vrfRouter{ + beacon: beacon, + } + beacon.On("SProvingKeyHash", mock.Anything).Return(keyHash, nil).Once() + + result, err := router.SProvingKeyHash(nilOpts) + assert.NoError(t, err) + assert.Equal(t, keyHash, result) +} + +func TestRouter_SKeyID(t *testing.T) { + beacon := mocks.NewVRFBeaconInterface(t) + keyID := [32]byte{2} + router := vrfRouter{ + beacon: beacon, + } + beacon.On("SKeyID", mock.Anything).Return(keyID, nil).Once() + + result, err := router.SKeyID(nilOpts) + assert.NoError(t, err) + assert.Equal(t, keyID, result) +} + +func TestRouter_IBeaconPeriodBlocks(t *testing.T) { + coordinator := mocks.NewVRFCoordinatorInterface(t) + periodBlocks := big.NewInt(3) + router := vrfRouter{ + coordinator: coordinator, + } + coordinator.On("IBeaconPeriodBlocks", mock.Anything).Return(periodBlocks, nil).Once() + + result, err := router.IBeaconPeriodBlocks(nilOpts) + assert.NoError(t, err) + assert.Equal(t, periodBlocks, result) +} + +func TestRouter_GetConfirmationDelays(t *testing.T) { + coordinator := mocks.NewVRFCoordinatorInterface(t) + confDelays := [8]*big.Int{big.NewInt(4)} + router := vrfRouter{ + coordinator: coordinator, + } + coordinator.On("GetConfirmationDelays", mock.Anything).Return(confDelays, nil).Once() + + result, err := router.GetConfirmationDelays(nilOpts) + assert.NoError(t, err) + assert.Equal(t, confDelays, result) +} + +func TestRouter_ParseLog(t *testing.T) { + t.Parallel() + + t.Run("parse beacon log", func(t *testing.T) { + addr := newAddress(t) + log := types.Log{ + Address: addr, + } + parsedLog := vrf_beacon.VRFBeaconNewTransmission{} + beacon := mocks.NewVRFBeaconInterface(t) + router := vrfRouter{ + beacon: beacon, + } + beacon.On("Address").Return(addr).Once() + beacon.On("ParseLog", log).Return(parsedLog, nil).Once() + + result, err := router.ParseLog(log) + assert.NoError(t, err) + assert.Equal(t, result, parsedLog) + }) + + t.Run("parse coordinator log", func(t *testing.T) { + addr := newAddress(t) + log := types.Log{ + Address: addr, + } + parsedLog := vrf_coordinator.VRFCoordinatorRandomnessRequested{} + beacon := mocks.NewVRFBeaconInterface(t) + coordinator := mocks.NewVRFCoordinatorInterface(t) + router := vrfRouter{ + beacon: beacon, + coordinator: coordinator, + } + beacon.On("Address").Return(newAddress(t)).Once() + coordinator.On("Address").Return(addr).Once() + coordinator.On("ParseLog", log).Return(parsedLog, nil).Once() + + result, err := router.ParseLog(log) + assert.NoError(t, err) + assert.Equal(t, result, parsedLog) + }) + + t.Run("parse log unexpected log", func(t *testing.T) { + log := types.Log{ + Address: newAddress(t), + } + beacon := mocks.NewVRFBeaconInterface(t) + coordinator := mocks.NewVRFCoordinatorInterface(t) + router := vrfRouter{ + beacon: beacon, + coordinator: coordinator, + } + beacon.On("Address").Return(newAddress(t)).Once() + coordinator.On("Address").Return(newAddress(t)).Once() + + result, err := router.ParseLog(log) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "failed to parse log") + }) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/coordinator/topics.go b/core/services/ocr2/plugins/ocr2vrf/coordinator/topics.go new file mode 100644 index 00000000..638aac7d --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/coordinator/topics.go @@ -0,0 +1,28 @@ +package coordinator + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" +) + +type topics struct { + randomnessRequestedTopic common.Hash + randomnessFulfillmentRequestedTopic common.Hash + randomWordsFulfilledTopic common.Hash + configSetTopic common.Hash + newTransmissionTopic common.Hash + outputsServedTopic common.Hash +} + +func newTopics() topics { + return topics{ + randomnessRequestedTopic: vrf_coordinator.VRFCoordinatorRandomnessRequested{}.Topic(), + randomnessFulfillmentRequestedTopic: vrf_coordinator.VRFCoordinatorRandomnessFulfillmentRequested{}.Topic(), + randomWordsFulfilledTopic: vrf_coordinator.VRFCoordinatorRandomWordsFulfilled{}.Topic(), + configSetTopic: vrf_beacon.VRFBeaconConfigSet{}.Topic(), + newTransmissionTopic: vrf_beacon.VRFBeaconNewTransmission{}.Topic(), + outputsServedTopic: vrf_coordinator.VRFCoordinatorOutputsServed{}.Topic(), + } +} diff --git a/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go new file mode 100644 index 00000000..4251d7e7 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/internal/ocr2vrf_integration_test.go @@ -0,0 +1,884 @@ +package internal_test + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/libocr/commontypes" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-vrf/altbn_128" + ocr2dkg "github.com/goplugin/plugin-vrf/dkg" + "github.com/goplugin/plugin-vrf/ocr2vrf" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + commonutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + dkg_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/load_test_beacon_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer" + vrf_wrapper "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" +) + +type ocr2vrfUniverse struct { + owner *bind.TransactOpts + backend *backends.SimulatedBackend + + dkgAddress common.Address + dkg *dkg_wrapper.DKG + + beaconAddress common.Address + coordinatorAddress common.Address + beacon *vrf_beacon.VRFBeacon + coordinator *vrf_wrapper.VRFCoordinator + + linkAddress common.Address + link *link_token_interface.LinkToken + + consumerAddress common.Address + consumer *vrf_beacon_consumer.BeaconVRFConsumer + + loadTestConsumerAddress common.Address + loadTestConsumer *load_test_beacon_consumer.LoadTestBeaconVRFConsumer + + feedAddress common.Address + feed *mock_v3_aggregator_contract.MockV3AggregatorContract + + subID *big.Int +} + +const ( + fundingAmount int64 = 5e18 +) + +type ocr2Node struct { + app *cltest.TestApplication + peerID string + transmitter common.Address + effectiveTransmitter common.Address + keybundle ocr2key.KeyBundle + sendingKeys []string +} + +func setupOCR2VRFContracts( + t *testing.T, beaconPeriod int64, keyID [32]byte, consumerShouldFail bool) ocr2vrfUniverse { + owner := testutils.MustNewSimTransactor(t) + owner.GasPrice = assets.GWei(1).ToInt() + genesisData := core.GenesisAlloc{ + owner.From: { + Balance: assets.Ether(100).ToInt(), + }, + } + b := backends.NewSimulatedBackend(genesisData, ethconfig.Defaults.Miner.GasCeil*2) + + // deploy OCR2VRF contracts, which have the following deploy order: + // * link token + // * link/eth feed + // * DKG + // * VRF (coordinator, and beacon) + // * VRF consumer + linkAddress, _, link, err := link_token_interface.DeployLinkToken( + owner, b) + require.NoError(t, err) + b.Commit() + + feedAddress, _, feed, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + owner, b, 18, assets.GWei(int(1e7)).ToInt()) // 0.01 eth per link + require.NoError(t, err) + b.Commit() + + dkgAddress, _, dkg, err := dkg_wrapper.DeployDKG(owner, b) + require.NoError(t, err) + b.Commit() + + coordinatorAddress, _, coordinator, err := vrf_wrapper.DeployVRFCoordinator( + owner, b, big.NewInt(beaconPeriod), linkAddress) + require.NoError(t, err) + b.Commit() + + require.NoError(t, commonutils.JustError(coordinator.SetCallbackConfig(owner, vrf_wrapper.VRFCoordinatorCallbackConfig{ + MaxCallbackGasLimit: 2.5e6, + MaxCallbackArgumentsLength: 160, // 5 EVM words + }))) + b.Commit() + + require.NoError(t, commonutils.JustError(coordinator.SetCoordinatorConfig(owner, vrf_wrapper.VRFBeaconTypesCoordinatorConfig{ + RedeemableRequestGasOverhead: 50_000, + CallbackRequestGasOverhead: 50_000, + StalenessSeconds: 60, + FallbackWeiPerUnitLink: assets.GWei(int(1e7)).ToInt(), + }))) + b.Commit() + + beaconAddress, _, beacon, err := vrf_beacon.DeployVRFBeacon( + owner, b, linkAddress, coordinatorAddress, dkgAddress, keyID) + require.NoError(t, err) + b.Commit() + + consumerAddress, _, consumer, err := vrf_beacon_consumer.DeployBeaconVRFConsumer( + owner, b, coordinatorAddress, consumerShouldFail, big.NewInt(beaconPeriod)) + require.NoError(t, err) + b.Commit() + + loadTestConsumerAddress, _, loadTestConsumer, err := load_test_beacon_consumer.DeployLoadTestBeaconVRFConsumer( + owner, b, coordinatorAddress, consumerShouldFail, big.NewInt(beaconPeriod)) + require.NoError(t, err) + b.Commit() + + // Set up coordinator subscription for billing. + require.NoError(t, commonutils.JustError(coordinator.CreateSubscription(owner))) + b.Commit() + + fopts := &bind.FilterOpts{} + + subscriptionIterator, err := coordinator.FilterSubscriptionCreated(fopts, nil, []common.Address{owner.From}) + require.NoError(t, err) + + require.True(t, subscriptionIterator.Next()) + subID := subscriptionIterator.Event.SubId + + require.NoError(t, commonutils.JustError(coordinator.AddConsumer(owner, subID, consumerAddress))) + b.Commit() + require.NoError(t, commonutils.JustError(coordinator.AddConsumer(owner, subID, loadTestConsumerAddress))) + b.Commit() + data, err := utils.ABIEncode(`[{"type":"uint256"}]`, subID) + require.NoError(t, err) + require.NoError(t, commonutils.JustError(link.TransferAndCall(owner, coordinatorAddress, big.NewInt(5e18), data))) + b.Commit() + + _, err = dkg.AddClient(owner, keyID, beaconAddress) + require.NoError(t, err) + b.Commit() + + _, err = coordinator.SetProducer(owner, beaconAddress) + require.NoError(t, err) + + // Achieve finality depth so the CL node can work properly. + for i := 0; i < 20; i++ { + b.Commit() + } + + return ocr2vrfUniverse{ + owner: owner, + backend: b, + dkgAddress: dkgAddress, + dkg: dkg, + beaconAddress: beaconAddress, + coordinatorAddress: coordinatorAddress, + beacon: beacon, + coordinator: coordinator, + linkAddress: linkAddress, + link: link, + consumerAddress: consumerAddress, + consumer: consumer, + loadTestConsumerAddress: loadTestConsumerAddress, + loadTestConsumer: loadTestConsumer, + feedAddress: feedAddress, + feed: feed, + subID: subID, + } +} + +func setupNodeOCR2( + t *testing.T, + owner *bind.TransactOpts, + port int, + dbName string, + b *backends.SimulatedBackend, + useForwarders bool, + p2pV2Bootstrappers []commontypes.BootstrapperLocator, +) *ocr2Node { + p2pKey := keystest.NewP2PKeyV2(t) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = ptr(true) // Disables ocr spec validation so we can have fast polling for the test. + + c.Feature.LogPoller = ptr(true) + + c.P2P.PeerID = ptr(p2pKey.PeerID()) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", port)} + if len(p2pV2Bootstrappers) > 0 { + c.P2P.V2.DefaultBootstrappers = &p2pV2Bootstrappers + } + + c.OCR.Enabled = ptr(false) + c.OCR2.Enabled = ptr(true) + + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(500 * time.Millisecond) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](3_500_000) + c.EVM[0].Transactions.ForwardersEnabled = &useForwarders + c.OCR2.ContractPollInterval = commonconfig.MustNewDuration(10 * time.Second) + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, b, p2pKey) + + var sendingKeys []ethkey.KeyV2 + { + var err error + sendingKeys, err = app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + } + transmitter := sendingKeys[0].Address + effectiveTransmitter := sendingKeys[0].Address + + if useForwarders { + sendingKeysAddresses := []common.Address{sendingKeys[0].Address} + + // Add new sending key. + k, err := app.KeyStore.Eth().Create() + require.NoError(t, err) + require.NoError(t, app.KeyStore.Eth().Add(k.Address, testutils.SimulatedChainID)) + require.NoError(t, app.KeyStore.Eth().Enable(k.Address, testutils.SimulatedChainID)) + sendingKeys = append(sendingKeys, k) + sendingKeysAddresses = append(sendingKeysAddresses, k.Address) + + require.Len(t, sendingKeys, 2) + + // Deploy a forwarder. + faddr, _, authorizedForwarder, err := authorized_forwarder.DeployAuthorizedForwarder(owner, b, common.HexToAddress("0x326C977E6efc84E512bB9C30f76E30c160eD06FB"), owner.From, common.Address{}, []byte{}) + require.NoError(t, err) + + // Set the node's sending keys as authorized senders. + _, err = authorizedForwarder.SetAuthorizedSenders(owner, sendingKeysAddresses) + require.NoError(t, err) + b.Commit() + + // Add the forwarder to the node's forwarder manager. + forwarderORM := forwarders.NewORM(app.GetSqlxDB(), logger.TestLogger(t), config.Database()) + chainID := ubig.Big(*b.Blockchain().Config().ChainID) + _, err = forwarderORM.CreateForwarder(faddr, chainID) + require.NoError(t, err) + effectiveTransmitter = faddr + } + + // Fund the sending keys with some ETH. + var sendingKeyStrings []string + for _, k := range sendingKeys { + sendingKeyStrings = append(sendingKeyStrings, k.Address.String()) + n, err := b.NonceAt(testutils.Context(t), owner.From, nil) + require.NoError(t, err) + + tx := cltest.NewLegacyTransaction( + n, k.Address, + assets.Ether(1).ToInt(), + 21000, + assets.GWei(1).ToInt(), + nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = b.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + b.Commit() + } + + kb, err := app.GetKeyStore().OCR2().Create("evm") + require.NoError(t, err) + + return &ocr2Node{ + app: app, + peerID: p2pKey.PeerID().Raw(), + transmitter: transmitter, + effectiveTransmitter: effectiveTransmitter, + keybundle: kb, + sendingKeys: sendingKeyStrings, + } +} + +func TestIntegration_OCR2VRF_ForwarderFlow(t *testing.T) { + testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/VRF-688") + runOCR2VRFTest(t, true) +} + +func TestIntegration_OCR2VRF(t *testing.T) { + testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/VRF-688") + runOCR2VRFTest(t, false) +} + +func runOCR2VRFTest(t *testing.T, useForwarders bool) { + ctx := testutils.Context(t) + keyID := randomKeyID(t) + uni := setupOCR2VRFContracts(t, 5, keyID, false) + + t.Log("Creating bootstrap node") + + bootstrapNodePort := freeport.GetOne(t) + bootstrapNode := setupNodeOCR2(t, uni.owner, bootstrapNodePort, "bootstrap", uni.backend, false, nil) + numNodes := 5 + + t.Log("Creating OCR2 nodes") + var ( + oracles []confighelper2.OracleIdentityExtra + transmitters []common.Address + payees []common.Address + payeeTransactors []*bind.TransactOpts + effectiveTransmitters []common.Address + onchainPubKeys []common.Address + kbs []ocr2key.KeyBundle + apps []*cltest.TestApplication + dkgEncrypters []dkgencryptkey.Key + dkgSigners []dkgsignkey.Key + sendingKeys [][]string + ) + ports := freeport.GetN(t, numNodes) + for i := 0; i < numNodes; i++ { + // Supply the bootstrap IP and port as a V2 peer address + bootstrappers := []commontypes.BootstrapperLocator{ + {PeerID: bootstrapNode.peerID, Addrs: []string{ + fmt.Sprintf("127.0.0.1:%d", bootstrapNodePort), + }}, + } + node := setupNodeOCR2(t, uni.owner, ports[i], fmt.Sprintf("ocr2vrforacle%d", i), uni.backend, useForwarders, bootstrappers) + sendingKeys = append(sendingKeys, node.sendingKeys) + + dkgSignKey, err := node.app.GetKeyStore().DKGSign().Create() + require.NoError(t, err) + + dkgEncryptKey, err := node.app.GetKeyStore().DKGEncrypt().Create() + require.NoError(t, err) + + kbs = append(kbs, node.keybundle) + apps = append(apps, node.app) + transmitters = append(transmitters, node.transmitter) + payeeTransactor := testutils.MustNewSimTransactor(t) + payeeTransactors = append(payeeTransactors, payeeTransactor) + payees = append(payees, payeeTransactor.From) + effectiveTransmitters = append(effectiveTransmitters, node.effectiveTransmitter) + dkgEncrypters = append(dkgEncrypters, dkgEncryptKey) + dkgSigners = append(dkgSigners, dkgSignKey) + onchainPubKeys = append(onchainPubKeys, common.BytesToAddress(node.keybundle.PublicKey())) + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: node.keybundle.PublicKey(), + TransmitAccount: ocrtypes2.Account(node.transmitter.String()), + OffchainPublicKey: node.keybundle.OffchainPublicKey(), + PeerID: node.peerID, + }, + ConfigEncryptionPublicKey: node.keybundle.ConfigEncryptionPublicKey(), + }) + } + + _, err := uni.beacon.SetPayees(uni.owner, transmitters, payees) + require.NoError(t, err) + + t.Log("starting ticker to commit blocks") + tick := time.NewTicker(1 * time.Second) + defer tick.Stop() + go func() { + for range tick.C { + uni.backend.Commit() + } + }() + + blockBeforeConfig, err := uni.backend.BlockByNumber(ctx, nil) + require.NoError(t, err) + + t.Log("Setting DKG config before block:", blockBeforeConfig.Number().String()) + + // set config for dkg + setDKGConfig( + t, + uni, + onchainPubKeys, + effectiveTransmitters, + 1, + oracles, + dkgSigners, + dkgEncrypters, + keyID, + ) + + t.Log("Adding bootstrap node job") + err = bootstrapNode.app.Start(testutils.Context(t)) + require.NoError(t, err) + + evmChains := bootstrapNode.app.GetRelayers().LegacyEVMChains() + require.NotNil(t, evmChains) + bootstrapJobSpec := fmt.Sprintf(` +type = "bootstrap" +name = "bootstrap" +contractConfigTrackerPollInterval = "15s" +relay = "evm" +schemaVersion = 1 +contractID = "%s" +[relayConfig] +chainID = 1337 +fromBlock = %d +`, uni.dkgAddress.Hex(), blockBeforeConfig.Number().Int64()) + t.Log("Creating bootstrap job:", bootstrapJobSpec) + ocrJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(bootstrapJobSpec) + require.NoError(t, err) + err = bootstrapNode.app.AddJobV2(ctx, &ocrJob) + require.NoError(t, err) + + t.Log("Creating OCR2VRF jobs") + for i := 0; i < numNodes; i++ { + var sendingKeysString = fmt.Sprintf(`"%s"`, sendingKeys[i][0]) + for x := 1; x < len(sendingKeys[i]); x++ { + sendingKeysString = fmt.Sprintf(`%s,"%s"`, sendingKeysString, sendingKeys[i][x]) + } + err = apps[i].Start(testutils.Context(t)) + require.NoError(t, err) + + jobSpec := fmt.Sprintf(` +type = "offchainreporting2" +schemaVersion = 1 +name = "ocr2 vrf integration test" +maxTaskDuration = "30s" +contractID = "%s" +ocrKeyBundleID = "%s" +relay = "evm" +pluginType = "ocr2vrf" +transmitterID = "%s" +forwardingAllowed = %t +contractConfigTrackerPollInterval = "15s" + +[relayConfig] +chainID = 1337 +fromBlock = %d + +[pluginConfig] +dkgEncryptionPublicKey = "%s" +dkgSigningPublicKey = "%s" +dkgKeyID = "%s" +dkgContractAddress = "%s" + +vrfCoordinatorAddress = "%s" +linkEthFeedAddress = "%s" +`, uni.beaconAddress.String(), + kbs[i].ID(), + transmitters[i], + useForwarders, + blockBeforeConfig.Number().Int64(), + dkgEncrypters[i].PublicKeyString(), + dkgSigners[i].PublicKeyString(), + hex.EncodeToString(keyID[:]), + uni.dkgAddress.String(), + uni.coordinatorAddress.String(), + uni.feedAddress.String(), + ) + t.Log("Creating OCR2VRF job with spec:", jobSpec) + ocrJob2, err2 := validate.ValidatedOracleSpecToml(apps[i].Config.OCR2(), apps[i].Config.Insecure(), jobSpec) + require.NoError(t, err2) + err2 = apps[i].AddJobV2(ctx, &ocrJob2) + require.NoError(t, err2) + } + + t.Log("Waiting for DKG key to get written") + // poll until a DKG key is written to the contract + // at that point we can start sending VRF requests + var emptyKH [32]byte + emptyHash := crypto.Keccak256Hash(emptyKH[:]) + gomega.NewWithT(t).Eventually(func() bool { + kh, err2 := uni.beacon.SProvingKeyHash(&bind.CallOpts{ + Context: testutils.Context(t), + }) + require.NoError(t, err2) + t.Log("proving keyhash:", hexutil.Encode(kh[:])) + return crypto.Keccak256Hash(kh[:]) != emptyHash + }, testutils.WaitTimeout(t), 5*time.Second).Should(gomega.BeTrue()) + + t.Log("DKG key written, setting VRF config") + + // set config for vrf now that dkg is ready + setVRFConfig( + t, + uni, + onchainPubKeys, + effectiveTransmitters, + 1, + oracles, + []int{1, 2, 3, 4, 5, 6, 7, 8}, + keyID) + + t.Log("Sending VRF request") + + initialSub, err := uni.coordinator.GetSubscription(nil, uni.subID) + require.NoError(t, err) + require.Equal(t, assets.Ether(5).ToInt(), initialSub.Balance) + + // Send a beacon VRF request and mine it + _, err = uni.consumer.TestRequestRandomness(uni.owner, 2, uni.subID, big.NewInt(1)) + require.NoError(t, err) + uni.backend.Commit() + + redemptionRequestID, err := uni.consumer.SMostRecentRequestID(nil) + require.NoError(t, err) + + // There is no premium on this request, so the cost of the request should have been: + // = (request overhead) * (gas price) / (PLI/ETH ratio) + // = (50_000 * 1 Gwei) / .01 + // = 5_000_000 GJuels + subAfterBeaconRequest, err := uni.coordinator.GetSubscription(nil, uni.subID) + require.NoError(t, err) + require.Equal(t, big.NewInt(initialSub.Balance.Int64()-assets.GWei(5_000_000).Int64()), subAfterBeaconRequest.Balance) + + // Send a fulfillment VRF request and mine it + _, err = uni.consumer.TestRequestRandomnessFulfillment(uni.owner, uni.subID, 1, big.NewInt(2), 100_000, []byte{}) + require.NoError(t, err) + uni.backend.Commit() + + fulfillmentRequestID, err := uni.consumer.SMostRecentRequestID(nil) + require.NoError(t, err) + + // There is no premium on this request, so the cost of the request should have been: + // = (request overhead + callback gas allowance) * (gas price) / (PLI/ETH ratio) + // = ((50_000 + 100_000) * 1 Gwei) / .01 + // = 15_000_000 GJuels + subAfterFulfillmentRequest, err := uni.coordinator.GetSubscription(nil, uni.subID) + require.NoError(t, err) + require.Equal(t, big.NewInt(subAfterBeaconRequest.Balance.Int64()-assets.GWei(15_000_000).Int64()), subAfterFulfillmentRequest.Balance) + + // Send two batched fulfillment VRF requests and mine them + _, err = uni.loadTestConsumer.TestRequestRandomnessFulfillmentBatch(uni.owner, uni.subID, 1, big.NewInt(2), 200_000, []byte{}, big.NewInt(2)) + require.NoError(t, err) + uni.backend.Commit() + + batchFulfillmentRequestID1, err := uni.loadTestConsumer.SRequestIDs(nil, big.NewInt(0), big.NewInt(0)) + require.NoError(t, err) + + batchFulfillmentRequestID2, err := uni.loadTestConsumer.SRequestIDs(nil, big.NewInt(0), big.NewInt(1)) + require.NoError(t, err) + + // There is no premium on these requests, so the cost of the requests should have been: + // = ((request overhead + callback gas allowance) * (gas price) / (PLI/ETH ratio)) * batch size + // = (((50_000 + 200_000) * 1 Gwei) / .01) * 2 + // = 50_000_000 GJuels + subAfterBatchFulfillmentRequest, err := uni.coordinator.GetSubscription(nil, uni.subID) + require.NoError(t, err) + require.Equal(t, big.NewInt(subAfterFulfillmentRequest.Balance.Int64()-assets.GWei(50_000_000).Int64()), subAfterBatchFulfillmentRequest.Balance) + + t.Logf("sub balance after batch fulfillment request: %d", subAfterBatchFulfillmentRequest.Balance) + + t.Log("waiting for fulfillment") + + var balanceAfterRefund *big.Int + // poll until we're able to redeem the randomness without reverting + // at that point, it's been fulfilled + gomega.NewWithT(t).Eventually(func() bool { + _, err2 := uni.consumer.TestRedeemRandomness(uni.owner, uni.subID, redemptionRequestID) + t.Logf("TestRedeemRandomness err: %+v", err2) + return err2 == nil + }, testutils.WaitTimeout(t), 5*time.Second).Should(gomega.BeTrue()) + + gomega.NewWithT(t).Eventually(func() bool { + // Ensure a refund is provided. Refund amount comes out to ~15_700_000 GJuels. + // We use an upper and lower bound such that this part of the test is not excessively brittle to upstream tweaks. + refundUpperBound := big.NewInt(0).Add(assets.GWei(17_000_000).ToInt(), subAfterBatchFulfillmentRequest.Balance) + refundLowerBound := big.NewInt(0).Add(assets.GWei(15_000_000).ToInt(), subAfterBatchFulfillmentRequest.Balance) + subAfterRefund, err2 := uni.coordinator.GetSubscription(nil, uni.subID) + require.NoError(t, err2) + balanceAfterRefund = subAfterRefund.Balance + if ok := ((balanceAfterRefund.Cmp(refundUpperBound) == -1) && (balanceAfterRefund.Cmp(refundLowerBound) == 1)); !ok { + t.Logf("unexpected sub balance after refund: %d", balanceAfterRefund) + return false + } + return true + }, testutils.WaitTimeout(t), 5*time.Second).Should(gomega.BeTrue()) + + // Mine block after redeeming randomness + uni.backend.Commit() + + // ensure that total sub balance is updated correctly + totalSubBalance, err := uni.coordinator.GetSubscriptionLinkBalance(nil) + require.NoError(t, err) + require.True(t, totalSubBalance.Cmp(balanceAfterRefund) == 0) + // ensure total link balance is correct before any payout + totalLinkBalance, err := uni.link.BalanceOf(nil, uni.coordinatorAddress) + require.NoError(t, err) + require.True(t, totalLinkBalance.Cmp(big.NewInt(fundingAmount)) == 0) + + // get total owed amount to NOPs and ensure linkAvailableForPayment (CLL profit) calculation is correct + nopOwedAmount := new(big.Int) + for _, transmitter := range transmitters { + owedAmount, err2 := uni.beacon.OwedPayment(nil, transmitter) + require.NoError(t, err2) + nopOwedAmount = new(big.Int).Add(nopOwedAmount, owedAmount) + } + linkAvailable, err := uni.beacon.LinkAvailableForPayment(nil) + require.NoError(t, err) + debt := new(big.Int).Add(totalSubBalance, nopOwedAmount) + profit := new(big.Int).Sub(totalLinkBalance, debt) + require.True(t, linkAvailable.Cmp(profit) == 0) + + // test cancel subscription + linkBalanceBeforeCancel, err := uni.link.BalanceOf(nil, uni.owner.From) + require.NoError(t, err) + _, err = uni.coordinator.CancelSubscription(uni.owner, uni.subID, uni.owner.From) + require.NoError(t, err) + uni.backend.Commit() + linkBalanceAfterCancel, err := uni.link.BalanceOf(nil, uni.owner.From) + require.NoError(t, err) + require.True(t, new(big.Int).Add(linkBalanceBeforeCancel, totalSubBalance).Cmp(linkBalanceAfterCancel) == 0) + totalSubBalance, err = uni.coordinator.GetSubscriptionLinkBalance(nil) + require.NoError(t, err) + require.True(t, totalSubBalance.Cmp(big.NewInt(0)) == 0) + totalLinkBalance, err = uni.link.BalanceOf(nil, uni.coordinatorAddress) + require.NoError(t, err) + require.True(t, totalLinkBalance.Cmp(new(big.Int).Sub(big.NewInt(fundingAmount), balanceAfterRefund)) == 0) + + // payout node operators + totalNopPayout := new(big.Int) + for idx, payeeTransactor := range payeeTransactors { + // Fund the payee with some ETH. + n, err2 := uni.backend.NonceAt(testutils.Context(t), uni.owner.From, nil) + require.NoError(t, err2) + tx := cltest.NewLegacyTransaction( + n, payeeTransactor.From, + assets.Ether(1).ToInt(), + 21000, + assets.GWei(1).ToInt(), + nil) + signedTx, err2 := uni.owner.Signer(uni.owner.From, tx) + require.NoError(t, err2) + err2 = uni.backend.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err2) + + _, err2 = uni.beacon.WithdrawPayment(payeeTransactor, transmitters[idx]) + require.NoError(t, err2) + uni.backend.Commit() + payoutAmount, err2 := uni.link.BalanceOf(nil, payeeTransactor.From) + require.NoError(t, err2) + totalNopPayout = new(big.Int).Add(totalNopPayout, payoutAmount) + owedAmountAfter, err2 := uni.beacon.OwedPayment(nil, transmitters[idx]) + require.NoError(t, err2) + require.True(t, owedAmountAfter.Cmp(big.NewInt(0)) == 0) + } + require.True(t, nopOwedAmount.Cmp(totalNopPayout) == 0) + + // check total link balance after NOP payout + totalLinkBalanceAfterNopPayout, err := uni.link.BalanceOf(nil, uni.coordinatorAddress) + require.NoError(t, err) + require.True(t, totalLinkBalanceAfterNopPayout.Cmp(new(big.Int).Sub(totalLinkBalance, totalNopPayout)) == 0) + totalSubBalance, err = uni.coordinator.GetSubscriptionLinkBalance(nil) + require.NoError(t, err) + require.True(t, totalSubBalance.Cmp(big.NewInt(0)) == 0) + + // withdraw remaining profits after NOP payout + linkAvailable, err = uni.beacon.LinkAvailableForPayment(nil) + require.NoError(t, err) + linkBalanceBeforeWithdraw, err := uni.link.BalanceOf(nil, uni.owner.From) + require.NoError(t, err) + _, err = uni.beacon.WithdrawFunds(uni.owner, uni.owner.From, linkAvailable) + require.NoError(t, err) + uni.backend.Commit() + linkBalanceAfterWithdraw, err := uni.link.BalanceOf(nil, uni.owner.From) + require.NoError(t, err) + require.True(t, linkBalanceAfterWithdraw.Cmp(new(big.Int).Add(linkBalanceBeforeWithdraw, linkAvailable)) == 0) + linkAvailable, err = uni.beacon.LinkAvailableForPayment(nil) + require.NoError(t, err) + require.True(t, linkAvailable.Cmp(big.NewInt(0)) == 0) + + // poll until we're able to verify that consumer contract has stored randomness as expected + // First arg is the request ID, which starts at zero, second is the index into + // the random words. + gomega.NewWithT(t).Eventually(func() bool { + + var errs []error + rw1, err2 := uni.consumer.SReceivedRandomnessByRequestID(nil, redemptionRequestID, big.NewInt(0)) + t.Logf("TestRedeemRandomness 1st word err: %+v", err2) + errs = append(errs, err2) + rw2, err2 := uni.consumer.SReceivedRandomnessByRequestID(nil, redemptionRequestID, big.NewInt(1)) + t.Logf("TestRedeemRandomness 2nd word err: %+v", err2) + errs = append(errs, err2) + rw3, err2 := uni.consumer.SReceivedRandomnessByRequestID(nil, fulfillmentRequestID, big.NewInt(0)) + t.Logf("FulfillRandomness 1st word err: %+v", err2) + errs = append(errs, err2) + rw4, err2 := uni.loadTestConsumer.SReceivedRandomnessByRequestID(nil, batchFulfillmentRequestID1, big.NewInt(0)) + t.Logf("Batch FulfillRandomness 1st word err: %+v", err2) + errs = append(errs, err2) + rw5, err2 := uni.loadTestConsumer.SReceivedRandomnessByRequestID(nil, batchFulfillmentRequestID2, big.NewInt(0)) + t.Logf("Batch FulfillRandomness 2nd word err: %+v", err2) + errs = append(errs, err2) + batchTotalRequests, err2 := uni.loadTestConsumer.STotalRequests(nil) + t.Logf("Batch FulfillRandomness total requests err: %+v", err2) + errs = append(errs, err2) + batchTotalFulfillments, err2 := uni.loadTestConsumer.STotalFulfilled(nil) + t.Logf("Batch FulfillRandomness total fulfillments err: %+v", err2) + errs = append(errs, err2) + err2 = nil + if batchTotalRequests.Int64() != batchTotalFulfillments.Int64() { + err2 = errors.New("batchTotalRequests is not equal to batchTotalFulfillments") + errs = append(errs, err2) + } + t.Logf("Batch FulfillRandomness total requests/fulfillments equal err: %+v", err2) + + t.Logf("randomness from redeemRandomness: %s %s", rw1.String(), rw2.String()) + t.Logf("randomness from fulfillRandomness: %s", rw3.String()) + t.Logf("randomness from batch fulfillRandomness: %s %s", rw4.String(), rw5.String()) + t.Logf("total batch requested and fulfilled: %d %d", batchTotalRequests, batchTotalFulfillments) + + for _, err := range errs { + if err != nil { + return false + } + } + return true + }, testutils.WaitTimeout(t), 5*time.Second).Should(gomega.BeTrue()) +} + +func setDKGConfig( + t *testing.T, + uni ocr2vrfUniverse, + onchainPubKeys []common.Address, + transmitters []common.Address, + f uint8, + oracleIdentities []confighelper2.OracleIdentityExtra, + signKeys []dkgsignkey.Key, + encryptKeys []dkgencryptkey.Key, + keyID [32]byte, +) { + var ( + signingPubKeys []kyber.Point + encryptPubKeys []kyber.Point + ) + for i := range signKeys { + signingPubKeys = append(signingPubKeys, signKeys[i].PublicKey) + encryptPubKeys = append(encryptPubKeys, encryptKeys[i].PublicKey) + } + + offchainConfig, err := ocr2dkg.OffchainConfig( + encryptPubKeys, + signingPubKeys, + &altbn_128.G1{}, + &ocr2vrftypes.PairingTranslation{ + Suite: &altbn_128.PairingSuite{}, + }) + require.NoError(t, err) + onchainConfig, err := ocr2dkg.OnchainConfig(keyID) + require.NoError(t, err) + + var schedule []int + for range oracleIdentities { + schedule = append(schedule, 1) + } + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 30*time.Second, + 10*time.Second, + 20*time.Second, + 2*time.Second, + 20*time.Second, + 3, + schedule, + oracleIdentities, + offchainConfig, + 50*time.Millisecond, + 10*time.Second, + 10*time.Second, + 100*time.Millisecond, + 1*time.Second, + int(f), + onchainConfig) + require.NoError(t, err) + + _, err = uni.dkg.SetConfig(uni.owner, onchainPubKeys, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + require.NoError(t, err) + + uni.backend.Commit() +} + +func setVRFConfig( + t *testing.T, + uni ocr2vrfUniverse, + onchainPubKeys []common.Address, + transmitters []common.Address, + f uint8, + oracleIdentities []confighelper2.OracleIdentityExtra, + confDelaysSl []int, + keyID [32]byte, +) { + offchainConfig := ocr2vrf.OffchainConfig(&ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: 1, + BatchGasLimit: 5_000_000, + CoordinatorOverhead: 50_000, + CallbackOverhead: 50_000, + BlockGasOverhead: 50_000, + LookbackBlocks: 1_000, + }) + + confDelays := make(map[uint32]struct{}) + for _, c := range confDelaysSl { + confDelays[uint32(c)] = struct{}{} + } + + onchainConfig := ocr2vrf.OnchainConfig(confDelays) + + var schedule []int + for range oracleIdentities { + schedule = append(schedule, 1) + } + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 30*time.Second, + 10*time.Second, + 20*time.Second, + 2*time.Second, + 20*time.Second, + 3, + schedule, + oracleIdentities, + offchainConfig, + 50*time.Millisecond, + 10*time.Second, + 10*time.Second, + 100*time.Millisecond, + 1*time.Second, + int(f), + onchainConfig) + require.NoError(t, err) + + _, err = uni.beacon.SetConfig( + uni.owner, onchainPubKeys, transmitters, f, onchainConfig, offchainConfigVersion, offchainConfig) + require.NoError(t, err) + + uni.backend.Commit() +} + +func randomKeyID(t *testing.T) (r [32]byte) { + _, err := rand.Read(r[:]) + require.NoError(t, err) + return +} + +func ptr[T any](v T) *T { return &v } diff --git a/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider.go b/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider.go new file mode 100644 index 00000000..86d25556 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider.go @@ -0,0 +1,109 @@ +package juelsfeecoin + +import ( + "context" + "math/big" + "runtime" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-vrf/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// linkEthPriceProvider provides conversation rate between Link and native token using price feeds +type linkEthPriceProvider struct { + aggregator aggregator_v3_interface.AggregatorV3InterfaceInterface + timeout time.Duration + interval time.Duration + lock sync.RWMutex + stop chan struct{} + currentJuelsPerFeeCoin *big.Int + lggr logger.Logger +} + +var _ types.JuelsPerFeeCoin = (*linkEthPriceProvider)(nil) + +func NewLinkEthPriceProvider( + linkEthFeedAddress common.Address, + client evmclient.Client, + timeout time.Duration, + interval time.Duration, + logger logger.Logger, +) (types.JuelsPerFeeCoin, error) { + aggregator, err := aggregator_v3_interface.NewAggregatorV3Interface(linkEthFeedAddress, client) + if err != nil { + return nil, errors.Wrap(err, "new aggregator v3 interface") + } + + if timeout >= interval { + return nil, errors.New("timeout must be less than interval") + } + + p := &linkEthPriceProvider{ + aggregator: aggregator, + timeout: timeout, + interval: interval, + currentJuelsPerFeeCoin: big.NewInt(0), + stop: make(chan struct{}), + lggr: logger, + } + + // Begin updating JuelsPerFeeCoin. + // Stop fetching price updates on garbage collection, as to avoid a leaked goroutine. + go p.run() + runtime.SetFinalizer(p, func(p *linkEthPriceProvider) { p.stop <- struct{}{} }) + + return p, nil +} + +// Run updates the JuelsPerFeeCoin value at a regular interval, until stopped. +// Do not block the main thread, such that updates are always timely. +func (p *linkEthPriceProvider) run() { + ticker := time.NewTicker(p.interval) + for { + select { + case <-ticker.C: + go p.updateJuelsPerFeeCoin() + case <-p.stop: + ticker.Stop() + return + } + } +} + +// JuelsPerFeeCoin returns the current JuelsPerFeeCoin value, threadsafe. +func (p *linkEthPriceProvider) JuelsPerFeeCoin() (*big.Int, error) { + p.lock.RLock() + defer p.lock.RUnlock() + return p.currentJuelsPerFeeCoin, nil +} + +// Get current JuelsPerFeeCoin value from aggregator contract. +// If the RPC call fails, log the error and return. +func (p *linkEthPriceProvider) updateJuelsPerFeeCoin() { + // Ensure writes to currentJuelsPerFeeCoin are threadsafe. + p.lock.Lock() + defer p.lock.Unlock() + + ctx, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + roundData, err := p.aggregator.LatestRoundData(&bind.CallOpts{Context: ctx}) + + // For RPC issues, set the most recent price to 0. This way, stale prices will not be transmitted, + // and zero-observations can be ignored in OCR and on-chain. + if err != nil { + p.currentJuelsPerFeeCoin = big.NewInt(0) + return + } + + // Update JuelsPerFeeCoin to the obtained value. + p.currentJuelsPerFeeCoin = roundData.Answer +} diff --git a/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider_test.go b/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider_test.go new file mode 100644 index 00000000..172c52ae --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/juelsfeecoin/link_eth_price_provider_test.go @@ -0,0 +1,91 @@ +package juelsfeecoin + +import ( + "errors" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/mocks" +) + +func Test_JuelsPerFeeCoin(t *testing.T) { + t.Parallel() + + t.Run("returns juels per fee coin", func(t *testing.T) { + mockAggregator := mocks.NewAggregatorV3Interface(t) + latestRoundData := aggregator_v3_interface.LatestRoundData{Answer: big.NewInt(10000)} + mockAggregator.On("LatestRoundData", mock.Anything).Return(latestRoundData, nil) + + // Start linkEthPriceProvider. + provider := &linkEthPriceProvider{ + aggregator: mockAggregator, + timeout: time.Second / 2, + interval: time.Second, + stop: make(chan struct{}), + currentJuelsPerFeeCoin: big.NewInt(0), + lggr: logger.TestLogger(t), + } + go provider.run() + t.Cleanup(func() { close(provider.stop) }) + + // Assert correct initial price. + price, err := provider.JuelsPerFeeCoin() + require.NoError(t, err) + assert.Equal(t, int64(0), price.Int64()) + + // Wait until the price is updated. + time.Sleep(2 * time.Second) + + // Ensure the correct price is returned. + price, err = provider.JuelsPerFeeCoin() + require.NoError(t, err) + assert.Equal(t, int64(10000), price.Int64()) + }) + + t.Run("returns juels per fee coin (error updating)", func(t *testing.T) { + mockAggregator := mocks.NewAggregatorV3Interface(t) + mockAggregator.On("LatestRoundData", mock.Anything).Return(aggregator_v3_interface.LatestRoundData{}, + errors.New("could not fetch")) + + // Start linkEthPriceProvider. + provider := &linkEthPriceProvider{ + aggregator: mockAggregator, + timeout: time.Second / 2, + interval: time.Second, + stop: make(chan struct{}), + currentJuelsPerFeeCoin: big.NewInt(0), + lggr: logger.TestLogger(t), + } + go provider.run() + t.Cleanup(func() { close(provider.stop) }) + + // Assert correct initial price. + price, err := provider.JuelsPerFeeCoin() + require.NoError(t, err) + assert.Equal(t, int64(0), price.Int64()) + + // Wait until the price is updated. + time.Sleep(2 * time.Second) + + // Ensure the correct price is returned. + price, err = provider.JuelsPerFeeCoin() + require.NoError(t, err) + assert.Equal(t, int64(0), price.Int64()) + }) + + t.Run("errors out for timeout >= interval", func(t *testing.T) { + evmClient := evmclimocks.NewClient(t) + _, err := NewLinkEthPriceProvider(common.Address{}, evmClient, time.Second, time.Second, logger.TestLogger(t)) + require.Error(t, err) + require.Equal(t, "timeout must be less than interval", err.Error()) + }) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_provider.go b/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_provider.go new file mode 100644 index 00000000..43d0b0e6 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_provider.go @@ -0,0 +1,42 @@ +package reasonablegasprice + +import ( + "math/big" + "time" + + "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" +) + +// reasonableGasPriceProvider provides an estimate for the average gas price +type reasonableGasPriceProvider struct { + estimator gas.EvmFeeEstimator + timeout time.Duration + maxGasPrice *assets.Wei + supportsDynamicFee bool +} + +var _ types.ReasonableGasPrice = (*reasonableGasPriceProvider)(nil) + +func NewReasonableGasPriceProvider( + estimator gas.EvmFeeEstimator, + timeout time.Duration, + maxGasPrice *assets.Wei, + supportsDynamicFee bool, +) types.ReasonableGasPrice { + return &reasonableGasPriceProvider{ + estimator: estimator, + timeout: timeout, + maxGasPrice: maxGasPrice, + supportsDynamicFee: supportsDynamicFee, + } +} + +// TODO: implement this function to use a gas estimator. This change can be rolled out +// to all nodes while the on-chain `useReasonableGasPrice` flag is disabled. Once reasonable +// gas prices reported by nodes become 'reasonable' the flag can be enabled. +func (r *reasonableGasPriceProvider) ReasonableGasPrice() (*big.Int, error) { + return big.NewInt(0), nil +} diff --git a/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_test.go b/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_test.go new file mode 100644 index 00000000..11f4bf04 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/reasonablegasprice/reasonable_gas_price_test.go @@ -0,0 +1,22 @@ +package reasonablegasprice + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" +) + +func Test_ReasonableGasPrice(t *testing.T) { + t.Parallel() + + t.Run("returns reasonable gas price", func(t *testing.T) { + r := NewReasonableGasPriceProvider(nil, 1*time.Second, assets.GWei(100), true) + g, err := r.ReasonableGasPrice() + require.NoError(t, err) + + require.Equal(t, int64(0), g.Int64()) + }) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go new file mode 100644 index 00000000..59ee8e72 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer.go @@ -0,0 +1,62 @@ +package reportserializer + +import ( + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/plugin-vrf/ocr2vrf" + "github.com/goplugin/plugin-vrf/types" +) + +type reportSerializer struct { + e ocr2vrf.EthereumReportSerializer +} + +var _ types.ReportSerializer = (*reportSerializer)(nil) + +// NewReportSerializer provides a serialization component for sending byte-encoded reports on-chain. +func NewReportSerializer(encryptionGroup kyber.Group) types.ReportSerializer { + return &reportSerializer{ + e: ocr2vrf.EthereumReportSerializer{ + G: encryptionGroup, + }, + } +} + +// SerializeReport serializes an abstract report into abi-encoded bytes. +func (serializer *reportSerializer) SerializeReport(r types.AbstractReport) ([]byte, error) { + + packed, err := serializer.e.SerializeReport(r) + + if err != nil { + return nil, errors.Wrap(err, "serialize report") + } + + return packed, nil +} + +// DeserializeReport deserializes a serialized byte array into a report. +func (serializer *reportSerializer) DeserializeReport(reportBytes []byte) (types.AbstractReport, error) { + // Leaving unimplemented, as serialization here is not two-way. The object that is abi-encoded on-chain is a BeaconReport, not an AbstractReport. + // So, the AbstractReport is first converted to a BeaconReport before the encoding. Converting an AbstractReport to a BeaconReport requires + // the removal of some fields, so when it is converted back to a BeaconReport and then deserialized, those fields are missing. + // Consequently, either the returned object from this function will be an abstract report + // that has had some fields removed/zeroed, or the return type will be changed to a BeaconReport, which cannot be re-serialized. + // + // Also, the need for off-chain deserialization is not currently clear. + panic("implement me") +} + +// MaxReportLength gives the max length of a report to be transmitted on-chain. +func (serializer *reportSerializer) MaxReportLength() uint { + return 150_000 // TODO: change this. +} + +// ReportLength provides the expected report length of a report. +func (serializer *reportSerializer) ReportLength(a types.AbstractReport) uint { + s, err := serializer.SerializeReport(a) + if err != nil { + return 0 + } + return uint(len(s)) +} diff --git a/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer_test.go b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer_test.go new file mode 100644 index 00000000..05a7bc23 --- /dev/null +++ b/core/services/ocr2/plugins/ocr2vrf/reportserializer/report_serializer_test.go @@ -0,0 +1,44 @@ +package reportserializer_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-vrf/altbn_128" + "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/reportserializer" +) + +func Test_Serialize_Deserialize(t *testing.T) { + altbn128Suite := &altbn_128.PairingSuite{} + reportSerializer := reportserializer.NewReportSerializer(altbn128Suite.G1()) + + unserializedReport := types.AbstractReport{ + JuelsPerFeeCoin: big.NewInt(10), + RecentBlockHeight: 100, + RecentBlockHash: common.HexToHash("0x002"), + Outputs: []types.AbstractVRFOutput{{ + BlockHeight: 10, + ConfirmationDelay: 20, + Callbacks: []types.AbstractCostedCallbackRequest{{ + RequestID: big.NewInt(1), + NumWords: 2, + Requester: common.HexToAddress("0x03"), + Arguments: []byte{4}, + SubscriptionID: big.NewInt(5), + GasAllowance: big.NewInt(6), + Price: big.NewInt(7), + GasPrice: big.NewInt(0), + WeiPerUnitLink: big.NewInt(0), + }}, + }}, + } + r, err := reportSerializer.SerializeReport(unserializedReport) + require.NoError(t, err) + require.Equal(t, uint(len(r)), reportSerializer.ReportLength(unserializedReport)) + // TODO: Add deserialization after this point to verify. +} diff --git a/core/services/ocr2/plugins/plugin.go b/core/services/ocr2/plugins/plugin.go new file mode 100644 index 00000000..10e1d71c --- /dev/null +++ b/core/services/ocr2/plugins/plugin.go @@ -0,0 +1,17 @@ +package plugins + +import ( + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// OraclePlugin is the interface that every OCR2 plugin needs to implement to be able to run from the generic +// OCR2.Delegate ServicesForSpec method. +type OraclePlugin interface { + // GetPluginFactory return the ocr2types.ReportingPluginFactory object for the given Plugin. + GetPluginFactory() (plugin ocr2types.ReportingPluginFactory, err error) + // GetServices returns any additional services that the plugin might need. This can return an empty slice when + // there are no additional services needed. + GetServices() (services []job.ServiceCtx, err error) +} diff --git a/core/services/ocr2/plugins/promwrapper/factory.go b/core/services/ocr2/plugins/promwrapper/factory.go new file mode 100644 index 00000000..afc174f6 --- /dev/null +++ b/core/services/ocr2/plugins/promwrapper/factory.go @@ -0,0 +1,35 @@ +package promwrapper + +import ( + "math/big" + + "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ types.ReportingPluginFactory = &promFactory{} + +type promFactory struct { + wrapped types.ReportingPluginFactory + name string + chainType string + chainID *big.Int +} + +func (p *promFactory) NewReportingPlugin(config types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) { + plugin, info, err := p.wrapped.NewReportingPlugin(config) + if err != nil { + return nil, types.ReportingPluginInfo{}, err + } + + prom := New(plugin, p.name, p.chainType, p.chainID, config, nil) + return prom, info, nil +} + +func NewPromFactory(wrapped types.ReportingPluginFactory, name, chainType string, chainID *big.Int) types.ReportingPluginFactory { + return &promFactory{ + wrapped: wrapped, + name: name, + chainType: chainType, + chainID: chainID, + } +} diff --git a/core/services/ocr2/plugins/promwrapper/mocks/prometheus_backend.go b/core/services/ocr2/plugins/promwrapper/mocks/prometheus_backend.go new file mode 100644 index 00000000..418cb276 --- /dev/null +++ b/core/services/ocr2/plugins/promwrapper/mocks/prometheus_backend.go @@ -0,0 +1,74 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// PrometheusBackend is an autogenerated mock type for the PrometheusBackend type +type PrometheusBackend struct { + mock.Mock +} + +// SetAcceptFinalizedReportToTransmitAcceptedReportLatency provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetAcceptFinalizedReportToTransmitAcceptedReportLatency(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetCloseDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetCloseDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetObservationDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetObservationDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetObservationToReportLatency provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetObservationToReportLatency(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetQueryDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetQueryDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetQueryToObservationLatency provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetQueryToObservationLatency(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetReportDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetReportDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetReportToAcceptFinalizedReportLatency provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetReportToAcceptFinalizedReportLatency(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetShouldAcceptFinalizedReportDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetShouldAcceptFinalizedReportDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// SetShouldTransmitAcceptedReportDuration provides a mock function with given fields: _a0, _a1 +func (_m *PrometheusBackend) SetShouldTransmitAcceptedReportDuration(_a0 []string, _a1 float64) { + _m.Called(_a0, _a1) +} + +// NewPrometheusBackend creates a new instance of PrometheusBackend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPrometheusBackend(t interface { + mock.TestingT + Cleanup(func()) +}) *PrometheusBackend { + mock := &PrometheusBackend{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/promwrapper/plugin.go b/core/services/ocr2/plugins/promwrapper/plugin.go new file mode 100644 index 00000000..991e9738 --- /dev/null +++ b/core/services/ocr2/plugins/promwrapper/plugin.go @@ -0,0 +1,346 @@ +// promwrapper wraps another OCR2 reporting plugin and provides standardized prometheus metrics +// for each of the OCR2 phases (Query, Observation, Report, ShouldAcceptFinalizedReport, +// ShouldTransmitAcceptedReport, and Close). +package promwrapper + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +// Type assertions, buckets and labels. +var ( + _ types.ReportingPlugin = &promPlugin{} + _ PrometheusBackend = &defaultPrometheusBackend{} + buckets = []float64{ + float64(1 * time.Millisecond), + float64(5 * time.Millisecond), + float64(10 * time.Millisecond), + float64(50 * time.Millisecond), + float64(100 * time.Millisecond), + float64(500 * time.Millisecond), + float64(time.Second), + float64(5 * time.Second), + float64(10 * time.Second), + float64(30 * time.Second), + float64(time.Minute), + float64(2 * time.Minute), + float64(5 * time.Minute), + float64(10 * time.Minute), + } + labels = []string{"chainType", "chainID", "plugin", "oracleID", "configDigest"} + getLabelsValues = func(p *promPlugin, t types.ReportTimestamp) []string { + return []string{ + p.chainType, // chainType + p.chainID.String(), // chainID + p.name, // plugin + p.oracleID, // oracleID + common.Bytes2Hex(t.ConfigDigest[:]), // configDigest + } + } +) + +// Prometheus queries. +var ( + promQuery = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_query_time", + Help: "The amount of time elapsed during the OCR2 plugin's Query() method", + Buckets: buckets, + }, + labels, + ) + promObservation = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_observation_time", + Help: "The amount of time elapsed during the OCR2 plugin's Observation() method", + Buckets: buckets, + }, + labels, + ) + promReport = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_report_time", + Help: "The amount of time elapsed during the OCR2 plugin's Report() method", + Buckets: buckets, + }, + labels, + ) + promShouldAcceptFinalizedReport = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_should_accept_finalized_report_time", + Help: "The amount of time elapsed during the OCR2 plugin's ShouldAcceptFinalizedReport() method", + Buckets: buckets, + }, + labels, + ) + promShouldTransmitAcceptedReport = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_should_transmit_accepted_report_time", + Help: "The amount of time elapsed during the OCR2 plugin's ShouldTransmitAcceptedReport() method", + Buckets: buckets, + }, + labels, + ) + promClose = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_close_time", + Help: "The amount of time elapsed during the OCR2 plugin's Close() method", + Buckets: buckets, + }, + []string{"chainType", "chainID", "plugin", "oracleID", "configDigest"}, + ) + promQueryToObservationLatency = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_end_query_to_begin_observation", + Help: "The amount of time elapsed after the OCR2 node's Query() method and before its Observation() method", + Buckets: buckets, + }, + labels, + ) + promObservationToReportLatency = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_end_observation_to_begin_report_time", + Help: "The amount of time elapsed after the OCR2 node's Observation() method and before its Report() method", + Buckets: buckets, + }, + labels, + ) + promReportToAcceptFinalizedReportLatency = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_end_report_to_begin_accept_finalized_report", + Help: "The amount of time elapsed after the OCR2 node's Report() method and before its ShouldAcceptFinalizedReport() method", + Buckets: buckets, + }, + labels, + ) + promAcceptFinalizedReportToTransmitAcceptedReportLatency = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ocr2_reporting_plugin_end_accept_finalized_report_to_begin_transmit_accepted_report", + Help: "The amount of time elapsed after the OCR2 node's ShouldAcceptFinalizedReport() method and before its ShouldTransmitAcceptedReport() method", + Buckets: buckets, + }, + labels, + ) +) + +//go:generate mockery --quiet --name PrometheusBackend --output ./mocks/ --case=underscore +type ( + // Contains interface for logging OCR telemetry. + PrometheusBackend interface { + // Intra-phase latency. + SetQueryDuration([]string, float64) + SetObservationDuration([]string, float64) + SetReportDuration([]string, float64) + SetShouldAcceptFinalizedReportDuration([]string, float64) + SetShouldTransmitAcceptedReportDuration([]string, float64) + SetCloseDuration([]string, float64) + + // Inter-phase latency. + SetQueryToObservationLatency([]string, float64) + SetObservationToReportLatency([]string, float64) + SetReportToAcceptFinalizedReportLatency([]string, float64) + SetAcceptFinalizedReportToTransmitAcceptedReportLatency([]string, float64) + } + + defaultPrometheusBackend struct{} // implements PrometheusBackend + + // promPlugin consumes a report plugin and wraps its core functions e.g Report(), Observe()... + promPlugin struct { + wrapped types.ReportingPlugin + name string + chainType string + chainID *big.Int + oracleID string + configDigest string + queryEndTimes sync.Map + observationEndTimes sync.Map + reportEndTimes sync.Map + acceptFinalizedReportEndTimes sync.Map + prometheusBackend PrometheusBackend + } +) + +func (*defaultPrometheusBackend) SetQueryDuration(labelValues []string, duration float64) { + promQuery.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetObservationDuration(labelValues []string, duration float64) { + promObservation.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetReportDuration(labelValues []string, duration float64) { + promReport.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetShouldAcceptFinalizedReportDuration(labelValues []string, duration float64) { + promShouldAcceptFinalizedReport.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetShouldTransmitAcceptedReportDuration(labelValues []string, duration float64) { + promShouldTransmitAcceptedReport.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetCloseDuration(labelValues []string, duration float64) { + promClose.WithLabelValues(labelValues...).Observe(duration) +} + +func (*defaultPrometheusBackend) SetQueryToObservationLatency(labelValues []string, latency float64) { + promQueryToObservationLatency.WithLabelValues(labelValues...).Observe(latency) +} + +func (*defaultPrometheusBackend) SetObservationToReportLatency(labelValues []string, latency float64) { + promObservationToReportLatency.WithLabelValues(labelValues...).Observe(latency) +} + +func (*defaultPrometheusBackend) SetReportToAcceptFinalizedReportLatency(labelValues []string, latency float64) { + promReportToAcceptFinalizedReportLatency.WithLabelValues(labelValues...).Observe(latency) +} + +func (*defaultPrometheusBackend) SetAcceptFinalizedReportToTransmitAcceptedReportLatency(labelValues []string, latency float64) { + promAcceptFinalizedReportToTransmitAcceptedReportLatency.WithLabelValues(labelValues...).Observe(latency) +} + +func New( + plugin types.ReportingPlugin, + name string, + chainType string, + chainID *big.Int, + config types.ReportingPluginConfig, + backend PrometheusBackend, +) types.ReportingPlugin { + // Apply passed-in Prometheus backend if one is given. + var prometheusBackend PrometheusBackend = &defaultPrometheusBackend{} + if backend != nil { + prometheusBackend = backend + } + + return &promPlugin{ + wrapped: plugin, + name: name, + chainType: chainType, + chainID: chainID, + oracleID: fmt.Sprintf("%d", config.OracleID), + configDigest: common.Bytes2Hex(config.ConfigDigest[:]), + prometheusBackend: prometheusBackend, + } +} + +func (p *promPlugin) Query(ctx context.Context, timestamp types.ReportTimestamp) (types.Query, error) { + start := time.Now().UTC() + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + p.prometheusBackend.SetQueryDuration(getLabelsValues(p, timestamp), duration) + p.queryEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Query() + }() + + return p.wrapped.Query(ctx, timestamp) +} + +func (p *promPlugin) Observation(ctx context.Context, timestamp types.ReportTimestamp, query types.Query) (types.Observation, error) { + start := time.Now().UTC() + + // Report latency between Query() and Observation(). + labelValues := getLabelsValues(p, timestamp) + if queryEndTime, ok := p.queryEndTimes.Load(timestamp); ok { + latency := float64(start.Sub(queryEndTime.(time.Time))) + p.prometheusBackend.SetQueryToObservationLatency(labelValues, latency) + p.queryEndTimes.Delete(timestamp) + } + + // Report latency for Observation() at end of call. + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + p.prometheusBackend.SetObservationDuration(labelValues, duration) + p.observationEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Observe() + }() + + return p.wrapped.Observation(ctx, timestamp, query) +} + +func (p *promPlugin) Report(ctx context.Context, timestamp types.ReportTimestamp, query types.Query, observations []types.AttributedObservation) (bool, types.Report, error) { + start := time.Now().UTC() + + // Report latency between Observation() and Report(). + labelValues := getLabelsValues(p, timestamp) + if observationEndTime, ok := p.observationEndTimes.Load(timestamp); ok { + latency := float64(start.Sub(observationEndTime.(time.Time))) + p.prometheusBackend.SetObservationToReportLatency(labelValues, latency) + p.observationEndTimes.Delete(timestamp) + } + + // Report latency for Report() at end of call. + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + p.prometheusBackend.SetReportDuration(labelValues, duration) + p.reportEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Report() + }() + + return p.wrapped.Report(ctx, timestamp, query, observations) +} + +func (p *promPlugin) ShouldAcceptFinalizedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) { + start := time.Now().UTC() + + // Report latency between Report() and ShouldAcceptFinalizedReport(). + labelValues := getLabelsValues(p, timestamp) + if reportEndTime, ok := p.reportEndTimes.Load(timestamp); ok { + latency := float64(start.Sub(reportEndTime.(time.Time))) + p.prometheusBackend.SetReportToAcceptFinalizedReportLatency(labelValues, latency) + p.reportEndTimes.Delete(timestamp) + } + + // Report latency for ShouldAcceptFinalizedReport() at end of call. + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + p.prometheusBackend.SetShouldAcceptFinalizedReportDuration(labelValues, duration) + p.acceptFinalizedReportEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of ShouldAcceptFinalizedReport() + }() + + return p.wrapped.ShouldAcceptFinalizedReport(ctx, timestamp, report) +} + +func (p *promPlugin) ShouldTransmitAcceptedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) { + start := time.Now().UTC() + + // Report latency between ShouldAcceptFinalizedReport() and ShouldTransmitAcceptedReport(). + labelValues := getLabelsValues(p, timestamp) + if acceptFinalizedReportEndTime, ok := p.acceptFinalizedReportEndTimes.Load(timestamp); ok { + latency := float64(start.Sub(acceptFinalizedReportEndTime.(time.Time))) + p.prometheusBackend.SetAcceptFinalizedReportToTransmitAcceptedReportLatency(labelValues, latency) + p.acceptFinalizedReportEndTimes.Delete(timestamp) + } + + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + p.prometheusBackend.SetShouldTransmitAcceptedReportDuration(labelValues, duration) + }() + + return p.wrapped.ShouldTransmitAcceptedReport(ctx, timestamp, report) +} + +// Note: the 'Close' method does not have access to a report timestamp, as it is not part of report generation. +func (p *promPlugin) Close() error { + start := time.Now().UTC() + defer func() { + duration := float64(time.Now().UTC().Sub(start)) + labelValues := []string{ + p.chainType, // chainType + p.chainID.String(), // chainID + p.name, // plugin + p.oracleID, // oracleID + p.configDigest, // configDigest + } + p.prometheusBackend.SetCloseDuration(labelValues, duration) + }() + + return p.wrapped.Close() +} diff --git a/core/services/ocr2/plugins/promwrapper/plugin_test.go b/core/services/ocr2/plugins/promwrapper/plugin_test.go new file mode 100644 index 00000000..6b1f4549 --- /dev/null +++ b/core/services/ocr2/plugins/promwrapper/plugin_test.go @@ -0,0 +1,240 @@ +package promwrapper + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/promwrapper/mocks" +) + +var ( + // Intra-phase latencies. + qDuration = time.Millisecond * 100 // duration of Query() + oDuration = time.Millisecond * 200 // duration of Observation() + rDuration = time.Millisecond * 300 // duration of Report() + aDuration = time.Millisecond * 400 // duration of ShouldAcceptFinalizedReport() + tDuration = time.Millisecond * 500 // duration of ShouldTransmitAcceptedReport() + cDuration = time.Millisecond * 600 // duration of Close() + + // Inter-phase latencies. + qToOLatency = time.Millisecond * 100 // latency between Query() and Observation() + oToRLatency = time.Millisecond * 200 // latency between Observation() and Report() + rToALatency = time.Millisecond * 300 // latency between Report() and ShouldAcceptFinalizedReport() + aToTLatency = time.Millisecond * 400 // latency between ShouldAcceptFinalizedReport() and ShouldTransmitAcceptedReport() + + ceiling = time.Millisecond * 700 +) + +// fakeReportingPlugin has varied intra-phase latencies. +type fakeReportingPlugin struct{} + +func (fakeReportingPlugin) Query(context.Context, types.ReportTimestamp) (types.Query, error) { + time.Sleep(qDuration) + return nil, nil +} +func (fakeReportingPlugin) Observation(context.Context, types.ReportTimestamp, types.Query) (types.Observation, error) { + time.Sleep(oDuration) + return nil, nil +} +func (fakeReportingPlugin) Report(context.Context, types.ReportTimestamp, types.Query, []types.AttributedObservation) (bool, types.Report, error) { + time.Sleep(rDuration) + return false, nil, nil +} +func (fakeReportingPlugin) ShouldAcceptFinalizedReport(context.Context, types.ReportTimestamp, types.Report) (bool, error) { + time.Sleep(aDuration) + return false, nil +} +func (fakeReportingPlugin) ShouldTransmitAcceptedReport(context.Context, types.ReportTimestamp, types.Report) (bool, error) { + time.Sleep(tDuration) + return false, nil +} +func (fakeReportingPlugin) Close() error { + time.Sleep(cDuration) + return nil +} + +var _ types.ReportingPlugin = &fakeReportingPlugin{} + +func TestPlugin_MustInstantiate(t *testing.T) { + // Ensure instantiation without panic for no override backend. + var reportingPlugin = &fakeReportingPlugin{} + promPlugin := New(reportingPlugin, "test", "EVM", big.NewInt(1), types.ReportingPluginConfig{}, nil) + require.NotEqual(t, nil, promPlugin) + + // Ensure instantiation without panic for override provided. + backend := mocks.NewPrometheusBackend(t) + promPlugin = New(reportingPlugin, "test-2", "EVM", big.NewInt(1), types.ReportingPluginConfig{}, backend) + require.NotEqual(t, nil, promPlugin) +} + +func TestPlugin_GetLatencies(t *testing.T) { + // Use arbitrary report timestamp and label values. + configDigest := common.BytesToHash(crypto.Keccak256([]byte("foobar"))) + reportTimestamp := types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest(configDigest), + Epoch: 1, + Round: 1, + } + var assertCorrectLabelValues = func(labelValues []string) { + require.Equal( + t, + []string{ + "EVM", + "1", + "test-plugin", + "0", + common.Bytes2Hex(configDigest[:]), + }, labelValues) + } + + // Instantiate prometheus backend mock. + backend := mocks.NewPrometheusBackend(t) + + // Assert intra-phase latencies. + backend.On("SetQueryDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + duration := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, duration, qDuration) + require.Less(t, duration, oDuration) + }).Return() + backend.On("SetObservationDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + duration := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, duration, oDuration) + require.Less(t, duration, rDuration) + }).Return() + backend.On("SetReportDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + duration := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, duration, rDuration) + require.Less(t, duration, aDuration) + }).Return() + backend.On("SetShouldAcceptFinalizedReportDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + duration := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, duration, aDuration) + require.Less(t, duration, tDuration) + }).Return() + backend.On("SetShouldTransmitAcceptedReportDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + duration := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, duration, tDuration) + require.Less(t, duration, cDuration) + }).Return() + + // Assert inter-phase latencies. + backend.On("SetQueryToObservationLatency", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + latency := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, latency, qToOLatency) + require.Less(t, latency, oToRLatency) + }).Return() + backend.On("SetObservationToReportLatency", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + latency := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, latency, oToRLatency) + require.Less(t, latency, rToALatency) + }).Return() + backend.On("SetReportToAcceptFinalizedReportLatency", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + latency := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, latency, rToALatency) + require.Less(t, latency, aToTLatency) + }).Return() + backend.On("SetAcceptFinalizedReportToTransmitAcceptedReportLatency", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + latency := time.Duration(args[1].(float64)) + assertCorrectLabelValues(labelValues) + require.Greater(t, latency, aToTLatency) + require.Less(t, latency, cDuration) + }).Return() + + // Assert close correctly reported. + backend.On("SetCloseDuration", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + labelValues := args[0].([]string) + latency := time.Duration(args[1].(float64)) + require.Equal( + t, + []string{ + "EVM", + "1", + "test-plugin", + "0", + common.Bytes2Hex(configDigest[:]), + }, labelValues) + require.Greater(t, latency, cDuration) + require.Less(t, latency, ceiling) + }).Return() + + // Create promPlugin with mocked prometheus backend. + var reportingPlugin = &fakeReportingPlugin{} + var promPlugin *promPlugin = New( + reportingPlugin, + "test-plugin", + "EVM", + big.NewInt(1), + types.ReportingPluginConfig{ConfigDigest: reportTimestamp.ConfigDigest}, + backend, + ).(*promPlugin) + require.NotEqual(t, nil, promPlugin) + + ctx := testutils.Context(t) + + // Run OCR methods. + _, err := promPlugin.Query(ctx, reportTimestamp) + require.NoError(t, err) + _, ok := promPlugin.queryEndTimes.Load(reportTimestamp) + require.Equal(t, true, ok) + time.Sleep(qToOLatency) + + _, err = promPlugin.Observation(ctx, reportTimestamp, nil) + require.NoError(t, err) + _, ok = promPlugin.queryEndTimes.Load(reportTimestamp) + require.Equal(t, false, ok) + _, ok = promPlugin.observationEndTimes.Load(reportTimestamp) + require.Equal(t, true, ok) + time.Sleep(oToRLatency) + + _, _, err = promPlugin.Report(ctx, reportTimestamp, nil, nil) + require.NoError(t, err) + _, ok = promPlugin.observationEndTimes.Load(reportTimestamp) + require.Equal(t, false, ok) + _, ok = promPlugin.reportEndTimes.Load(reportTimestamp) + require.Equal(t, true, ok) + time.Sleep(rToALatency) + + _, err = promPlugin.ShouldAcceptFinalizedReport(ctx, reportTimestamp, nil) + require.NoError(t, err) + _, ok = promPlugin.reportEndTimes.Load(reportTimestamp) + require.Equal(t, false, ok) + _, ok = promPlugin.acceptFinalizedReportEndTimes.Load(reportTimestamp) + require.Equal(t, true, ok) + time.Sleep(aToTLatency) + + _, err = promPlugin.ShouldTransmitAcceptedReport(ctx, reportTimestamp, nil) + require.NoError(t, err) + _, ok = promPlugin.acceptFinalizedReportEndTimes.Load(reportTimestamp) + require.Equal(t, false, ok) + + // Close. + err = promPlugin.Close() + require.NoError(t, err) +} diff --git a/core/services/ocr2/plugins/s4/config.go b/core/services/ocr2/plugins/s4/config.go new file mode 100644 index 00000000..917fbf30 --- /dev/null +++ b/core/services/ocr2/plugins/s4/config.go @@ -0,0 +1,9 @@ +package s4 + +type PluginConfig struct { + ProductName string + NSnapshotShards uint + MaxObservationEntries uint + MaxReportEntries uint + MaxDeleteExpiredEntries uint +} diff --git a/core/services/ocr2/plugins/s4/factory.go b/core/services/ocr2/plugins/s4/factory.go new file mode 100644 index 00000000..f22ef247 --- /dev/null +++ b/core/services/ocr2/plugins/s4/factory.go @@ -0,0 +1,42 @@ +package s4 + +import ( + s4_orm "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2/types" +) + +const S4ReportingPluginName = "S4Reporting" + +type PluginConfigDecoder func([]byte) (*PluginConfig, *types.ReportingPluginLimits, error) + +type S4ReportingPluginFactory struct { + Logger commontypes.Logger + ORM s4_orm.ORM + ConfigDecoder PluginConfigDecoder +} + +var _ types.ReportingPluginFactory = (*S4ReportingPluginFactory)(nil) + +// NewReportingPlugin complies with ReportingPluginFactory +func (f S4ReportingPluginFactory) NewReportingPlugin(rpConfig types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) { + config, limits, err := f.ConfigDecoder(rpConfig.OffchainConfig) + if err != nil { + f.Logger.Error("unable to decode reporting plugin config", commontypes.LogFields{ + "digest": rpConfig.ConfigDigest.String(), + }) + return nil, types.ReportingPluginInfo{}, err + } + info := types.ReportingPluginInfo{ + Name: S4ReportingPluginName, + UniqueReports: false, + Limits: *limits, + } + plugin, err := NewReportingPlugin(f.Logger, config, f.ORM) + if err != nil { + f.Logger.Error("unable to create S4 reporting plugin", commontypes.LogFields{}) + return nil, types.ReportingPluginInfo{}, err + } + return plugin, info, nil +} diff --git a/core/services/ocr2/plugins/s4/factory_test.go b/core/services/ocr2/plugins/s4/factory_test.go new file mode 100644 index 00000000..c78e8ee4 --- /dev/null +++ b/core/services/ocr2/plugins/s4/factory_test.go @@ -0,0 +1,74 @@ +package s4_test + +import ( + "errors" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + s4_mocks "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/libocr/offchainreporting2/types" + + "github.com/stretchr/testify/require" +) + +func TestS4ReportingPluginFactory_NewReportingPlugin(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + orm := s4_mocks.NewORM(t) + + f := s4.S4ReportingPluginFactory{ + Logger: logger, + ORM: orm, + ConfigDecoder: func([]byte) (*s4.PluginConfig, *types.ReportingPluginLimits, error) { + return &s4.PluginConfig{ + ProductName: "test", + NSnapshotShards: 1, + MaxObservationEntries: 10, + MaxReportEntries: 20, + MaxDeleteExpiredEntries: 30, + }, &types.ReportingPluginLimits{ + MaxQueryLength: 100, + MaxObservationLength: 200, + MaxReportLength: 300, + }, nil + }, + } + + rpConfig := types.ReportingPluginConfig{ + OffchainConfig: make([]byte, 100), + } + plugin, pluginInfo, err := f.NewReportingPlugin(rpConfig) + require.NoError(t, err) + require.NotNil(t, plugin) + require.Equal(t, types.ReportingPluginInfo{ + Name: s4.S4ReportingPluginName, + UniqueReports: false, + Limits: types.ReportingPluginLimits{ + MaxQueryLength: 100, + MaxObservationLength: 200, + MaxReportLength: 300, + }, + }, pluginInfo) + + t.Run("error while decoding", func(t *testing.T) { + f := s4.S4ReportingPluginFactory{ + Logger: logger, + ORM: orm, + ConfigDecoder: func([]byte) (*s4.PluginConfig, *types.ReportingPluginLimits, error) { + return nil, nil, errors.New("some error") + }, + } + + rpConfig := types.ReportingPluginConfig{ + OffchainConfig: make([]byte, 100), + } + plugin, _, err := f.NewReportingPlugin(rpConfig) + require.ErrorContains(t, err, "some error") + require.Nil(t, plugin) + }) +} diff --git a/core/services/ocr2/plugins/s4/integration_test.go b/core/services/ocr2/plugins/s4/integration_test.go new file mode 100644 index 00000000..0bea1d79 --- /dev/null +++ b/core/services/ocr2/plugins/s4/integration_test.go @@ -0,0 +1,413 @@ +package s4_test + +import ( + "context" + "crypto/ecdsa" + "fmt" + "maps" + "math/rand" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + s4_svc "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/multierr" +) + +// Disclaimer: this is not a true integration test, it's more of a S4 feature test, on purpose. +// The purpose of the test is to make sure that S4 plugin works as expected in conjunction with Postgres ORM. +// Because of simplification, this emulates OCR2 rounds, not involving libocr. +// A proper integration test would be done per product, e.g. as a part of Functions integration test. + +type don struct { + size int + config *s4.PluginConfig + logger logger.SugaredLogger + orms []s4_svc.ORM + plugins []types.ReportingPlugin +} + +func newDON(t *testing.T, size int, config *s4.PluginConfig) *don { + t.Helper() + + logger := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + + orms := make([]s4_svc.ORM, size) + plugins := make([]types.ReportingPlugin, size) + + for i := 0; i < size; i++ { + ns := fmt.Sprintf("s4_int_test_%d", i) + orm := s4_svc.NewPostgresORM(db, logger, pgtest.NewQConfig(false), s4_svc.SharedTableName, ns) + orms[i] = orm + + ocrLogger := commonlogger.NewOCRWrapper(logger, true, func(msg string) {}) + plugin, err := s4.NewReportingPlugin(ocrLogger, config, orm) + require.NoError(t, err) + plugins[i] = plugin + } + + return &don{ + size: size, + config: config, + logger: logger, + orms: orms, + plugins: plugins, + } +} + +func (d *don) simulateOCR(ctx context.Context, rounds int) []error { + errors := make([]error, d.size) + + for i := 0; i < rounds && ctx.Err() == nil; i++ { + leaderIndex := i % d.size + leader := d.plugins[leaderIndex] + query, err := leader.Query(ctx, types.ReportTimestamp{}) + if err != nil { + errors[leaderIndex] = multierr.Combine(errors[leaderIndex], err) + continue + } + + aos := make([]types.AttributedObservation, 0) + for i := 0; i < d.size; i++ { + observation, err2 := d.plugins[i].Observation(ctx, types.ReportTimestamp{}, query) + if err2 != nil { + errors[i] = multierr.Combine(errors[i], err2) + continue + } + aos = append(aos, types.AttributedObservation{ + Observation: observation, + Observer: commontypes.OracleID(i), + }) + } + if len(aos) < d.size-1 { + continue + } + + _, report, err := leader.Report(ctx, types.ReportTimestamp{}, query, aos) + if err != nil { + errors[leaderIndex] = multierr.Combine(errors[leaderIndex], err) + continue + } + + for i := 0; i < d.size; i++ { + _, err2 := d.plugins[i].ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, report) + errors[i] = multierr.Combine(errors[i], err2) + } + } + + return errors +} + +func compareSnapshots(s1, s2 []*s4_svc.SnapshotRow) bool { + if len(s1) != len(s2) { + return false + } + m1 := make(map[string]struct{}, len(s1)) + m2 := make(map[string]struct{}, len(s2)) + for i := 0; i < len(s1); i++ { + k1 := fmt.Sprintf("%s_%d_%d", s1[i].Address.String(), s1[i].SlotId, s1[i].Version) + k2 := fmt.Sprintf("%s_%d_%d", s2[i].Address.String(), s2[i].SlotId, s2[i].Version) + m1[k1] = struct{}{} + m2[k2] = struct{}{} + } + return maps.Equal(m1, m2) +} + +func filter[T any](ss []T, test func(T) bool) (ret []T) { + for _, s := range ss { + if test(s) { + ret = append(ret, s) + } + } + return +} + +func checkNoErrors(t *testing.T, errors []error) { + t.Helper() + + for i, err := range errors { + assert.NoError(t, err, "oracle %d", i) + } +} + +func checkNoUnconfirmedRows(ctx context.Context, t *testing.T, orm s4_svc.ORM, limit uint) { + t.Helper() + + rows, err := orm.GetUnconfirmedRows(limit, pg.WithParentCtx(ctx)) + assert.NoError(t, err) + assert.Empty(t, rows) +} + +func TestS4Integration_HappyDON(t *testing.T) { + don := newDON(t, 4, createPluginConfig(100)) + ctx := testutils.Context(t) + + // injecting new records + rows := generateTestOrmRows(t, 10, time.Minute) + for _, row := range rows { + err := don.orms[0].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + originSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + + // S4 to propagate all records in one OCR round + errors := don.simulateOCR(ctx, 1) + checkNoErrors(t, errors) + + for i := 0; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(originSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + checkNoUnconfirmedRows(ctx, t, don.orms[i], 10) + } +} + +func TestS4Integration_HappyDON_4X(t *testing.T) { + don := newDON(t, 4, createPluginConfig(100)) + ctx := testutils.Context(t) + + // injecting new records to all nodes + for o := 0; o < don.size; o++ { + rows := generateTestOrmRows(t, 10, time.Minute) + for _, row := range rows { + err := don.orms[o].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + } + + // S4 to propagate all records in one OCR round + errors := don.simulateOCR(ctx, 1) + checkNoErrors(t, errors) + + firstSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + + for i := 1; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(firstSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + checkNoUnconfirmedRows(ctx, t, don.orms[i], 10) + } +} + +func TestS4Integration_WrongSignature(t *testing.T) { + don := newDON(t, 4, createPluginConfig(100)) + ctx := testutils.Context(t) + + // injecting new records + rows := generateTestOrmRows(t, 10, time.Minute) + rows[0].Signature = rows[1].Signature + for _, row := range rows { + err := don.orms[0].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + originSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + originSnapshot = filter(originSnapshot, func(row *s4_svc.SnapshotRow) bool { + return row.Address.Cmp(rows[0].Address) != 0 || row.SlotId != rows[0].SlotId + }) + require.Len(t, originSnapshot, len(rows)-1) + + // S4 to propagate valid records in one OCR round + errors := don.simulateOCR(ctx, 1) + checkNoErrors(t, errors) + + for i := 1; i < don.size; i++ { + snapshot, err2 := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err2) + equal := compareSnapshots(originSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + } + + // record with a wrong signature must remain unconfirmed + ur, err := don.orms[0].GetUnconfirmedRows(10, pg.WithParentCtx(ctx)) + require.NoError(t, err) + require.Len(t, ur, 1) +} + +func TestS4Integration_MaxObservations(t *testing.T) { + config := createPluginConfig(100) + config.MaxObservationEntries = 5 + don := newDON(t, 4, config) + ctx := testutils.Context(t) + + // injecting new records + rows := generateTestOrmRows(t, 10, time.Minute) + for _, row := range rows { + err := don.orms[0].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + originSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + + // It requires at least two rounds due to MaxObservationEntries = rows / 2 + errors := don.simulateOCR(ctx, 2) + checkNoErrors(t, errors) + + for i := 1; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(originSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + } +} + +func TestS4Integration_Expired(t *testing.T) { + config := createPluginConfig(100) + config.MaxObservationEntries = 5 + don := newDON(t, 4, config) + ctx := testutils.Context(t) + + // injecting expiring records + rows := generateTestOrmRows(t, 10, time.Millisecond) + for _, row := range rows { + err := don.orms[0].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + + // within one round, all records will be GC-ed + time.Sleep(testutils.TestInterval) + errors := don.simulateOCR(ctx, 1) + checkNoErrors(t, errors) + + for i := 0; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + require.Len(t, snapshot, 0) + } +} + +func TestS4Integration_NSnapshotShards(t *testing.T) { + config := createPluginConfig(10000) + config.NSnapshotShards = 4 + don := newDON(t, 4, config) + ctx := testutils.Context(t) + + // injecting lots of new records (to be close to normal address distribution) + rows := generateTestOrmRows(t, 1000, time.Minute) + for _, row := range rows { + err := don.orms[0].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + originSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + + // this still requires one round, because Observation takes all unconfirmed rows + errors := don.simulateOCR(ctx, 1) + checkNoErrors(t, errors) + + for i := 1; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(originSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + checkNoUnconfirmedRows(ctx, t, don.orms[i], 1000) + } +} + +func TestS4Integration_OneNodeOutOfSync(t *testing.T) { + don := newDON(t, 4, createPluginConfig(100)) + ctx := testutils.Context(t) + + // injecting same confirmed records to all nodes but the last one + rows := generateConfirmedTestOrmRows(t, 10, time.Minute) + for o := 0; o < don.size-1; o++ { + for _, row := range rows { + err := don.orms[o].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + } + + // all records will be propagated to the last node when it is a leader + // leader selection is round-robin, so the 4th iteration picks the last node + errors := don.simulateOCR(ctx, 4) + checkNoErrors(t, errors) + + firstSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + lastSnapshot, err := don.orms[don.size-1].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(firstSnapshot, lastSnapshot) + assert.True(t, equal) + checkNoUnconfirmedRows(ctx, t, don.orms[don.size-1], 10) +} + +func TestS4Integration_RandomState(t *testing.T) { + don := newDON(t, 4, createPluginConfig(1000)) + ctx := testutils.Context(t) + + type user struct { + privateKey *ecdsa.PrivateKey + address *big.Big + } + + nUsers := 100 + users := make([]user, nUsers) + for i := 0; i < nUsers; i++ { + pk, addr := testutils.NewPrivateKeyAndAddress(t) + users[i] = user{pk, big.New(addr.Big())} + } + + // generating test records + for o := 0; o < don.size; o++ { + for u := 0; u < nUsers; u++ { + user := users[u] + row := &s4_svc.Row{ + Address: user.address, + SlotId: uint(u), + Version: uint64(rand.Intn(don.size)), + Confirmed: rand.Intn(2) == 0, + Expiration: time.Now().UTC().Add(time.Minute).UnixMilli(), + Payload: cltest.MustRandomBytes(t, 64), + } + env := &s4_svc.Envelope{ + Address: common.BytesToAddress(user.address.Bytes()).Bytes(), + SlotID: row.SlotId, + Version: row.Version, + Expiration: row.Expiration, + Payload: row.Payload, + } + sig, err := env.Sign(user.privateKey) + require.NoError(t, err) + row.Signature = sig + err = don.orms[o].Update(row, pg.WithParentCtx(ctx)) + require.NoError(t, err) + } + } + + // for any state, all nodes should converge to the same snapshot + errors := don.simulateOCR(ctx, 4) + checkNoErrors(t, errors) + + firstSnapshot, err := don.orms[0].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + require.NotEmpty(t, firstSnapshot) + checkNoUnconfirmedRows(ctx, t, don.orms[0], 1000) + + for i := 1; i < don.size; i++ { + snapshot, err := don.orms[i].GetSnapshot(s4_svc.NewFullAddressRange(), pg.WithParentCtx(ctx)) + require.NoError(t, err) + equal := compareSnapshots(firstSnapshot, snapshot) + assert.True(t, equal, "oracle %d", i) + checkNoUnconfirmedRows(ctx, t, don.orms[i], 1000) + } +} diff --git a/core/services/ocr2/plugins/s4/messages.go b/core/services/ocr2/plugins/s4/messages.go new file mode 100644 index 00000000..a2188766 --- /dev/null +++ b/core/services/ocr2/plugins/s4/messages.go @@ -0,0 +1,82 @@ +package s4 + +import ( + "bytes" + "math/big" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/ethereum/go-ethereum/common" + "google.golang.org/protobuf/proto" +) + +func MarshalQuery(rows []*SnapshotRow, addressRange *s4.AddressRange) ([]byte, error) { + rr := &Query{ + AddressRange: &AddressRange{ + MinAddress: addressRange.MinAddress.Bytes(), + MaxAddress: addressRange.MaxAddress.Bytes(), + }, + Rows: rows, + } + return proto.Marshal(rr) +} + +func UnmarshalQuery(data []byte) ([]*SnapshotRow, *s4.AddressRange, error) { + addressRange := s4.NewFullAddressRange() + query := &Query{} + if err := proto.Unmarshal(data, query); err != nil { + return nil, nil, err + } + if query.Rows == nil { + query.Rows = make([]*SnapshotRow, 0) + } + if query.AddressRange != nil { + addressRange = &s4.AddressRange{ + MinAddress: UnmarshalAddress(query.AddressRange.MinAddress), + MaxAddress: UnmarshalAddress(query.AddressRange.MaxAddress), + } + } + return query.Rows, addressRange, nil +} + +func MarshalRows(rows []*Row) ([]byte, error) { + rr := &Rows{ + Rows: rows, + } + return proto.Marshal(rr) +} + +func UnmarshalRows(data []byte) ([]*Row, error) { + rows := &Rows{} + if err := proto.Unmarshal(data, rows); err != nil { + return nil, err + } + if rows.Rows == nil { + rows.Rows = make([]*Row, 0) + } + return rows.Rows, nil +} + +func UnmarshalAddress(address []byte) *ubig.Big { + return ubig.New(new(big.Int).SetBytes(address)) +} + +func (row *Row) VerifySignature() error { + address := common.BytesToAddress(row.Address) + e := &s4.Envelope{ + Address: address.Bytes(), + SlotID: uint(row.Slotid), + Payload: row.Payload, + Version: row.Version, + Expiration: row.Expiration, + } + signer, err := e.GetSignerAddress(row.Signature) + if err != nil { + return err + } + if !bytes.Equal(signer.Bytes(), address.Bytes()) { + return s4.ErrWrongSignature + } + return nil +} diff --git a/core/services/ocr2/plugins/s4/messages.pb.go b/core/services/ocr2/plugins/s4/messages.pb.go new file mode 100644 index 00000000..e629633c --- /dev/null +++ b/core/services/ocr2/plugins/s4/messages.pb.go @@ -0,0 +1,487 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: messages.proto + +package s4 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SnapshotRow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Slotid uint32 `protobuf:"varint,2,opt,name=slotid,proto3" json:"slotid,omitempty"` + Version uint64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SnapshotRow) Reset() { + *x = SnapshotRow{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SnapshotRow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SnapshotRow) ProtoMessage() {} + +func (x *SnapshotRow) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotRow.ProtoReflect.Descriptor instead. +func (*SnapshotRow) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{0} +} + +func (x *SnapshotRow) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *SnapshotRow) GetSlotid() uint32 { + if x != nil { + return x.Slotid + } + return 0 +} + +func (x *SnapshotRow) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +type AddressRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MinAddress []byte `protobuf:"bytes,1,opt,name=minAddress,proto3" json:"minAddress,omitempty"` + MaxAddress []byte `protobuf:"bytes,2,opt,name=maxAddress,proto3" json:"maxAddress,omitempty"` +} + +func (x *AddressRange) Reset() { + *x = AddressRange{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddressRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddressRange) ProtoMessage() {} + +func (x *AddressRange) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddressRange.ProtoReflect.Descriptor instead. +func (*AddressRange) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{1} +} + +func (x *AddressRange) GetMinAddress() []byte { + if x != nil { + return x.MinAddress + } + return nil +} + +func (x *AddressRange) GetMaxAddress() []byte { + if x != nil { + return x.MaxAddress + } + return nil +} + +type Query struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AddressRange *AddressRange `protobuf:"bytes,1,opt,name=addressRange,proto3" json:"addressRange,omitempty"` + Rows []*SnapshotRow `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"` +} + +func (x *Query) Reset() { + *x = Query{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Query) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Query) ProtoMessage() {} + +func (x *Query) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Query.ProtoReflect.Descriptor instead. +func (*Query) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{2} +} + +func (x *Query) GetAddressRange() *AddressRange { + if x != nil { + return x.AddressRange + } + return nil +} + +func (x *Query) GetRows() []*SnapshotRow { + if x != nil { + return x.Rows + } + return nil +} + +type Row struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Slotid uint32 `protobuf:"varint,2,opt,name=slotid,proto3" json:"slotid,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + Expiration int64 `protobuf:"varint,5,opt,name=expiration,proto3" json:"expiration,omitempty"` + Signature []byte `protobuf:"bytes,6,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *Row) Reset() { + *x = Row{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Row) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Row) ProtoMessage() {} + +func (x *Row) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Row.ProtoReflect.Descriptor instead. +func (*Row) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{3} +} + +func (x *Row) GetAddress() []byte { + if x != nil { + return x.Address + } + return nil +} + +func (x *Row) GetSlotid() uint32 { + if x != nil { + return x.Slotid + } + return 0 +} + +func (x *Row) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Row) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *Row) GetExpiration() int64 { + if x != nil { + return x.Expiration + } + return 0 +} + +func (x *Row) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type Rows struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rows []*Row `protobuf:"bytes,1,rep,name=rows,proto3" json:"rows,omitempty"` +} + +func (x *Rows) Reset() { + *x = Rows{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Rows) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rows) ProtoMessage() {} + +func (x *Rows) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Rows.ProtoReflect.Descriptor instead. +func (*Rows) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{4} +} + +func (x *Rows) GetRows() []*Row { + if x != nil { + return x.Rows + } + return nil +} + +var File_messages_proto protoreflect.FileDescriptor + +var file_messages_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x08, 0x73, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x59, 0x0a, 0x0b, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x69, 0x6e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6e, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, + 0x0a, 0x0c, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x34, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x72, 0x6f, + 0x77, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x34, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x6f, 0x77, 0x52, + 0x04, 0x72, 0x6f, 0x77, 0x73, 0x22, 0xa9, 0x01, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x69, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x22, 0x29, 0x0a, 0x04, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x72, 0x6f, 0x77, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x34, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x42, 0x1f, 0x5a, 0x1d, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x6f, 0x63, + 0x72, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x73, 0x34, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messages_proto_rawDescOnce sync.Once + file_messages_proto_rawDescData = file_messages_proto_rawDesc +) + +func file_messages_proto_rawDescGZIP() []byte { + file_messages_proto_rawDescOnce.Do(func() { + file_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_messages_proto_rawDescData) + }) + return file_messages_proto_rawDescData +} + +var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_messages_proto_goTypes = []interface{}{ + (*SnapshotRow)(nil), // 0: s4_types.SnapshotRow + (*AddressRange)(nil), // 1: s4_types.AddressRange + (*Query)(nil), // 2: s4_types.Query + (*Row)(nil), // 3: s4_types.Row + (*Rows)(nil), // 4: s4_types.Rows +} +var file_messages_proto_depIdxs = []int32{ + 1, // 0: s4_types.Query.addressRange:type_name -> s4_types.AddressRange + 0, // 1: s4_types.Query.rows:type_name -> s4_types.SnapshotRow + 3, // 2: s4_types.Rows.rows:type_name -> s4_types.Row + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_messages_proto_init() } +func file_messages_proto_init() { + if File_messages_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotRow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddressRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Query); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Row); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Rows); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messages_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_messages_proto_goTypes, + DependencyIndexes: file_messages_proto_depIdxs, + MessageInfos: file_messages_proto_msgTypes, + }.Build() + File_messages_proto = out.File + file_messages_proto_rawDesc = nil + file_messages_proto_goTypes = nil + file_messages_proto_depIdxs = nil +} diff --git a/core/services/ocr2/plugins/s4/messages.proto b/core/services/ocr2/plugins/s4/messages.proto new file mode 100644 index 00000000..2472fe13 --- /dev/null +++ b/core/services/ocr2/plugins/s4/messages.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +option go_package = "core/services/ocr2/plugins/s4"; + +package s4_types; + +message SnapshotRow { + bytes address = 1; + uint32 slotid = 2; + uint64 version = 3; +} + +message AddressRange { + bytes minAddress = 1; + bytes maxAddress = 2; +} + +message Query { + AddressRange addressRange = 1; + repeated SnapshotRow rows = 2; +} + +message Row { + bytes address = 1; + uint32 slotid = 2; + bytes payload = 3; + uint64 version = 4; + int64 expiration = 5; + bytes signature = 6; +} + +message Rows { + repeated Row rows = 1; +} + diff --git a/core/services/ocr2/plugins/s4/messages_test.go b/core/services/ocr2/plugins/s4/messages_test.go new file mode 100644 index 00000000..fbabdb45 --- /dev/null +++ b/core/services/ocr2/plugins/s4/messages_test.go @@ -0,0 +1,122 @@ +package s4_test + +import ( + "crypto/ecdsa" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + s4_svc "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/stretchr/testify/require" +) + +func Test_MarshalUnmarshalRows(t *testing.T) { + t.Parallel() + + const n = 1000 + rows := generateTestRows(t, n, time.Minute) + + data, err := s4.MarshalRows(rows) + require.NoError(t, err) + + rr, err := s4.UnmarshalRows(data) + require.NoError(t, err) + require.Len(t, rr, n) + + data2, err := s4.MarshalRows(rr) + require.NoError(t, err) + require.Equal(t, data, data2) +} + +func Test_MarshalUnmarshalQuery(t *testing.T) { + t.Parallel() + + const n = 100 + rows := generateTestOrmRows(t, n, time.Minute) + ormVersions := rowsToShapshotRows(rows) + + snapshot := make([]*s4.SnapshotRow, len(ormVersions)) + for i, v := range ormVersions { + snapshot[i] = &s4.SnapshotRow{ + Address: v.Address.Bytes(), + Slotid: uint32(v.SlotId), + Version: v.Version, + } + } + addressRange := s4_svc.NewFullAddressRange() + data, err := s4.MarshalQuery(snapshot, addressRange) + require.NoError(t, err) + + qq, ar, err := s4.UnmarshalQuery(data) + require.NoError(t, err) + require.Len(t, qq, n) + require.Equal(t, addressRange, ar) +} + +func signRow(t *testing.T, row *s4.Row, address common.Address, pk *ecdsa.PrivateKey) { + t.Helper() + + env := &s4_svc.Envelope{ + Address: address.Bytes(), + SlotID: uint(row.Slotid), + Version: row.Version, + Expiration: row.Expiration, + Payload: row.Payload, + } + sig, err := env.Sign(pk) + require.NoError(t, err) + row.Signature = sig +} + +func marshalUnmarshal(t *testing.T, row *s4.Row) *s4.Row { + t.Helper() + + data, err := s4.MarshalRows([]*s4.Row{row}) + require.NoError(t, err) + rows, err := s4.UnmarshalRows(data) + require.NoError(t, err) + require.Len(t, rows, 1) + return rows[0] +} + +func Test_VerifySignature(t *testing.T) { + t.Parallel() + + rows := generateTestRows(t, 2, time.Minute) + err := rows[0].VerifySignature() + require.NoError(t, err) + + rows[1].Signature[0] = ^rows[1].Signature[0] + err = rows[1].VerifySignature() + require.Error(t, err) + + t.Run("address with leading zeros", func(t *testing.T) { + pk, addr := testutils.NewPrivateKeyAndAddress(t) + for addr[0] != 0 { + pk, addr = testutils.NewPrivateKeyAndAddress(t) + } + row := generateTestRows(t, 1, time.Minute)[0] + row.Address = addr.Big().Bytes() + signRow(t, row, addr, pk) + + require.NoError(t, row.VerifySignature()) + sameRow := marshalUnmarshal(t, row) + require.NoError(t, sameRow.VerifySignature()) + }) + + t.Run("empty payload", func(t *testing.T) { + pk, addr := testutils.NewPrivateKeyAndAddress(t) + row := generateTestRows(t, 1, time.Minute)[0] + row.Payload = []byte{} + row.Address = addr.Big().Bytes() + signRow(t, row, addr, pk) + + require.NoError(t, row.VerifySignature()) + sameRow := marshalUnmarshal(t, row) + require.NoError(t, sameRow.VerifySignature()) + }) +} diff --git a/core/services/ocr2/plugins/s4/plugin.go b/core/services/ocr2/plugins/s4/plugin.go new file mode 100644 index 00000000..08498365 --- /dev/null +++ b/core/services/ocr2/plugins/s4/plugin.go @@ -0,0 +1,339 @@ +package s4 + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/libocr/commontypes" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +var ( + promStoragePluginUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "storage_plugin_updates", + Help: "Number of storage updates fetched from other nodes", + }, []string{}) + + promStorageTotalByteSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "storage_total_byte_size", + Help: "Current byte size of data stored in S4", + }, []string{}) +) + +type plugin struct { + logger commontypes.Logger + config *PluginConfig + orm s4.ORM + addressRange *s4.AddressRange +} + +type key struct { + address string + slotID uint +} + +var _ types.ReportingPlugin = (*plugin)(nil) + +func NewReportingPlugin(logger commontypes.Logger, config *PluginConfig, orm s4.ORM) (types.ReportingPlugin, error) { + if config.MaxObservationEntries == 0 { + return nil, errors.New("max number of observation entries cannot be zero") + } + if config.MaxReportEntries == 0 { + return nil, errors.New("max number of report entries cannot be zero") + } + if config.MaxDeleteExpiredEntries == 0 { + return nil, errors.New("max number of delete expired entries cannot be zero") + } + + addressRange, err := s4.NewInitialAddressRangeForIntervals(config.NSnapshotShards) + if err != nil { + return nil, err + } + + return &plugin{ + logger: logger, + config: config, + orm: orm, + addressRange: addressRange, + }, nil +} + +func (c *plugin) Query(ctx context.Context, ts types.ReportTimestamp) (types.Query, error) { + promReportingPluginQuery.WithLabelValues(c.config.ProductName).Inc() + + snapshot, err := c.orm.GetSnapshot(c.addressRange, pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "failed to GetVersions in Query()") + } + + var storageTotalByteSize uint64 + rows := make([]*SnapshotRow, len(snapshot)) + for i, v := range snapshot { + rows[i] = &SnapshotRow{ + Address: v.Address.Bytes(), + Slotid: uint32(v.SlotId), + Version: v.Version, + } + + storageTotalByteSize += v.PayloadSize + } + + queryBytes, err := MarshalQuery(rows, c.addressRange) + if err != nil { + return nil, err + } + + promReportingPluginsQueryRowsCount.WithLabelValues(c.config.ProductName).Set(float64(len(rows))) + promReportingPluginsQueryByteSize.WithLabelValues(c.config.ProductName).Set(float64(len(queryBytes))) + + promStorageTotalByteSize.WithLabelValues().Set(float64(storageTotalByteSize)) + + c.addressRange.Advance() + + c.logger.Debug("S4StorageReporting Query", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "nSnapshotRows": len(rows), + }) + + return queryBytes, err +} + +func (c *plugin) Observation(ctx context.Context, ts types.ReportTimestamp, query types.Query) (types.Observation, error) { + promReportingPluginObservation.WithLabelValues(c.config.ProductName).Inc() + + now := time.Now().UTC() + count, err := c.orm.DeleteExpired(c.config.MaxDeleteExpiredEntries, now, pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "failed to DeleteExpired in Observation()") + } + promReportingPluginsExpiredRows.WithLabelValues(c.config.ProductName).Add(float64(count)) + + returnObservation := func(rows []*s4.Row) (types.Observation, error) { + promReportingPluginsObservationRowsCount.WithLabelValues(c.config.ProductName).Set(float64(len(rows))) + return MarshalRows(convertRows(rows)) + } + + unconfirmedRows, err := c.orm.GetUnconfirmedRows(c.config.MaxObservationEntries, pg.WithParentCtx(ctx)) + if err != nil { + return nil, errors.Wrap(err, "failed to GetUnconfirmedRows in Observation()") + } + + if uint(len(unconfirmedRows)) >= c.config.MaxObservationEntries { + return returnObservation(unconfirmedRows[:c.config.MaxObservationEntries]) + } + + maxRemainingRows := int(c.config.MaxObservationEntries) - len(unconfirmedRows) + remainingRows := make([]*s4.Row, 0) + + queryRows, addressRange, err := UnmarshalQuery(query) + if err != nil { + c.logger.Error("Failed to unmarshal query (likely malformed)", commontypes.LogFields{"err": err}) + } else { + snapshot, err := c.orm.GetSnapshot(addressRange, pg.WithParentCtx(ctx)) + if err != nil { + c.logger.Error("ORM GetSnapshot error", commontypes.LogFields{"err": err}) + } else { + type rkey struct { + address *big.Big + slotID uint + } + + snapshotVersionsMap := snapshotToVersionMap(snapshot) + toBeAdded := make([]rkey, 0) + // Add rows from query snapshot that have a higher version locally. + for _, qr := range queryRows { + address := UnmarshalAddress(qr.Address) + k := key{address: address.String(), slotID: uint(qr.Slotid)} + if version, ok := snapshotVersionsMap[k]; ok && version > qr.Version { + toBeAdded = append(toBeAdded, rkey{address: address, slotID: uint(qr.Slotid)}) + } + delete(snapshotVersionsMap, k) + } + + if len(toBeAdded) > maxRemainingRows { + toBeAdded = toBeAdded[:maxRemainingRows] + } else { + // Add rows from query address range that exist locally but are missing from query snapshot. + for _, sr := range snapshot { + if !sr.Confirmed { + continue + } + k := key{address: sr.Address.String(), slotID: sr.SlotId} + if _, ok := snapshotVersionsMap[k]; ok { + toBeAdded = append(toBeAdded, rkey{address: sr.Address, slotID: sr.SlotId}) + if len(toBeAdded) == maxRemainingRows { + break + } + } + } + } + + for _, k := range toBeAdded { + row, err := c.orm.Get(k.address, k.slotID, pg.WithParentCtx(ctx)) + if err == nil { + remainingRows = append(remainingRows, row) + } else if !errors.Is(err, s4.ErrNotFound) { + c.logger.Error("ORM Get error", commontypes.LogFields{"err": err}) + } + } + } + } + + c.logger.Debug("S4StorageReporting Observation", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "nUnconfirmedRows": len(unconfirmedRows), + "nRemainingRows": len(remainingRows), + }) + + return returnObservation(append(unconfirmedRows, remainingRows...)) +} + +func (c *plugin) Report(_ context.Context, ts types.ReportTimestamp, _ types.Query, aos []types.AttributedObservation) (bool, types.Report, error) { + promReportingPluginReport.WithLabelValues(c.config.ProductName).Inc() + + reportMap := make(map[key]*Row) + reportKeys := []key{} + + for _, ao := range aos { + observationRows, err := UnmarshalRows(ao.Observation) + if err != nil { + return false, nil, errors.Wrap(err, "failed to UnmarshalRows in Report()") + } + + for _, row := range observationRows { + if err := row.VerifySignature(); err != nil { + promReportingPluginWrongSigCount.WithLabelValues(c.config.ProductName).Inc() + c.logger.Error("Report detected invalid signature", commontypes.LogFields{"err": err, "oracleID": ao.Observer}) + continue + } + mkey := key{ + address: UnmarshalAddress(row.Address).String(), + slotID: uint(row.Slotid), + } + report, ok := reportMap[mkey] + if ok && report.Version >= row.Version { + continue + } + reportMap[mkey] = row + reportKeys = append(reportKeys, mkey) + } + } + + reportRows := make([]*Row, 0) + for _, key := range reportKeys { + row := reportMap[key] + reportRows = append(reportRows, row) + + if len(reportRows) >= int(c.config.MaxReportEntries) { + break + } + } + + report, err := MarshalRows(reportRows) + if err != nil { + return false, nil, err + } + + promReportingPluginsReportRowsCount.WithLabelValues(c.config.ProductName).Set(float64(len(reportRows))) + c.logger.Debug("S4StorageReporting Report", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "nReportRows": len(reportRows), + "nObservations": len(aos), + }) + + return true, report, nil +} + +func (c *plugin) ShouldAcceptFinalizedReport(ctx context.Context, ts types.ReportTimestamp, report types.Report) (bool, error) { + promReportingPluginShouldAccept.WithLabelValues(c.config.ProductName).Inc() + + reportRows, err := UnmarshalRows(report) + if err != nil { + return false, errors.Wrap(err, "failed to UnmarshalRows in ShouldAcceptFinalizedReport()") + } + + for _, row := range reportRows { + ormRow := &s4.Row{ + Address: UnmarshalAddress(row.Address), + SlotId: uint(row.Slotid), + Payload: row.Payload, + Version: row.Version, + Expiration: row.Expiration, + Confirmed: true, + Signature: row.Signature, + } + + now := time.Now().UnixMilli() + if now > ormRow.Expiration { + c.logger.Error("Received an expired entry in a report, not saving", commontypes.LogFields{ + "expirationTs": ormRow.Expiration, + "nowTs": now, + }) + continue + } + + err = c.orm.Update(ormRow, pg.WithParentCtx(ctx)) + if err != nil && !errors.Is(err, s4.ErrVersionTooLow) { + c.logger.Error("Failed to Update a row in ShouldAcceptFinalizedReport()", commontypes.LogFields{"err": err}) + continue + } + promStoragePluginUpdatesCount.WithLabelValues().Inc() + } + + c.logger.Debug("S4StorageReporting ShouldAcceptFinalizedReport", commontypes.LogFields{ + "epoch": ts.Epoch, + "round": ts.Round, + "nReportRows": len(reportRows), + }) + + // If ShouldAcceptFinalizedReport returns false, ShouldTransmitAcceptedReport will not be called. + return false, nil +} + +func (c *plugin) ShouldTransmitAcceptedReport(context.Context, types.ReportTimestamp, types.Report) (bool, error) { + return false, nil +} + +func (c *plugin) Close() error { + return nil +} + +func convertRow(from *s4.Row) *Row { + return &Row{ + Address: from.Address.Bytes(), + Slotid: uint32(from.SlotId), + Version: from.Version, + Expiration: from.Expiration, + Payload: from.Payload, + Signature: from.Signature, + } +} + +func convertRows(from []*s4.Row) []*Row { + rows := make([]*Row, len(from)) + for i, row := range from { + rows[i] = convertRow(row) + } + return rows +} + +func snapshotToVersionMap(rows []*s4.SnapshotRow) map[key]uint64 { + m := make(map[key]uint64) + for _, row := range rows { + if row.Confirmed { + m[key{address: row.Address.String(), slotID: row.SlotId}] = row.Version + } + } + return m +} diff --git a/core/services/ocr2/plugins/s4/plugin_test.go b/core/services/ocr2/plugins/s4/plugin_test.go new file mode 100644 index 00000000..8b9932b6 --- /dev/null +++ b/core/services/ocr2/plugins/s4/plugin_test.go @@ -0,0 +1,506 @@ +package s4_test + +import ( + "errors" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/s4" + s4_svc "github.com/goplugin/pluginv3.0/v2/core/services/s4" + s4_mocks "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +func createPluginConfig(maxEntries uint) *s4.PluginConfig { + return &s4.PluginConfig{ + MaxObservationEntries: maxEntries, + MaxReportEntries: maxEntries, + MaxDeleteExpiredEntries: maxEntries, + NSnapshotShards: 1, + } +} + +func generateTestRows(t *testing.T, n int, ttl time.Duration) []*s4.Row { + ormRows := generateTestOrmRows(t, n, ttl) + rows := make([]*s4.Row, n) + for i := 0; i < n; i++ { + rows[i] = &s4.Row{ + Address: ormRows[i].Address.Bytes(), + Slotid: uint32(ormRows[i].SlotId), + Version: ormRows[i].Version, + Expiration: ormRows[i].Expiration, + Payload: ormRows[i].Payload, + Signature: ormRows[i].Signature, + } + } + return rows +} + +func generateTestOrmRow(t *testing.T, ttl time.Duration, version uint64, confimed bool) *s4_svc.Row { + priv, addr := testutils.NewPrivateKeyAndAddress(t) + row := &s4_svc.Row{ + Address: big.New(addr.Big()), + SlotId: 0, + Version: version, + Confirmed: confimed, + Expiration: time.Now().Add(ttl).UnixMilli(), + Payload: cltest.MustRandomBytes(t, 64), + } + env := &s4_svc.Envelope{ + Address: addr.Bytes(), + SlotID: row.SlotId, + Version: row.Version, + Expiration: row.Expiration, + Payload: row.Payload, + } + sig, err := env.Sign(priv) + assert.NoError(t, err) + row.Signature = sig + return row +} + +func generateTestOrmRows(t *testing.T, n int, ttl time.Duration) []*s4_svc.Row { + rows := make([]*s4_svc.Row, n) + for i := 0; i < n; i++ { + rows[i] = generateTestOrmRow(t, ttl, 0, false) + } + return rows +} + +func generateConfirmedTestOrmRows(t *testing.T, n int, ttl time.Duration) []*s4_svc.Row { + rows := make([]*s4_svc.Row, n) + for i := 0; i < n; i++ { + rows[i] = generateTestOrmRow(t, ttl, uint64(i), true) + } + return rows +} + +func compareRows(t *testing.T, protoRows []*s4.Row, ormRows []*s4_svc.Row) { + assert.Equal(t, len(ormRows), len(protoRows)) + for i, row := range protoRows { + assert.Equal(t, row.Address, ormRows[i].Address.Bytes()) + assert.Equal(t, row.Version, ormRows[i].Version) + assert.Equal(t, row.Expiration, ormRows[i].Expiration) + assert.Equal(t, row.Payload, ormRows[i].Payload) + assert.Equal(t, row.Signature, ormRows[i].Signature) + } +} + +func compareSnapshotRows(t *testing.T, snapshot []*s4.SnapshotRow, ormVersions []*s4_svc.SnapshotRow) { + assert.Equal(t, len(ormVersions), len(snapshot)) + for i, row := range snapshot { + assert.Equal(t, row.Address, ormVersions[i].Address.Bytes()) + assert.Equal(t, row.Slotid, uint32(ormVersions[i].SlotId)) + assert.Equal(t, row.Version, ormVersions[i].Version) + } +} + +func rowsToShapshotRows(rows []*s4_svc.Row) []*s4_svc.SnapshotRow { + versions := make([]*s4_svc.SnapshotRow, len(rows)) + for i, r := range rows { + versions[i] = &s4_svc.SnapshotRow{ + Address: r.Address, + SlotId: r.SlotId, + Version: r.Version, + } + } + return versions +} + +func TestPlugin_NewReportingPlugin(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + orm := s4_mocks.NewORM(t) + + t.Run("ErrInvalidIntervals", func(t *testing.T) { + config := createPluginConfig(1) + config.NSnapshotShards = 0 + + _, err := s4.NewReportingPlugin(logger, config, orm) + assert.ErrorIs(t, err, s4_svc.ErrInvalidIntervals) + }) + + t.Run("MaxObservationEntries is zero", func(t *testing.T) { + config := createPluginConfig(1) + config.MaxObservationEntries = 0 + + _, err := s4.NewReportingPlugin(logger, config, orm) + assert.ErrorContains(t, err, "max number of observation entries cannot be zero") + }) + + t.Run("MaxReportEntries is zero", func(t *testing.T) { + config := createPluginConfig(1) + config.MaxReportEntries = 0 + + _, err := s4.NewReportingPlugin(logger, config, orm) + assert.ErrorContains(t, err, "max number of report entries cannot be zero") + }) + + t.Run("MaxDeleteExpiredEntries is zero", func(t *testing.T) { + config := createPluginConfig(1) + config.MaxDeleteExpiredEntries = 0 + + _, err := s4.NewReportingPlugin(logger, config, orm) + assert.ErrorContains(t, err, "max number of delete expired entries cannot be zero") + }) + + t.Run("happy", func(t *testing.T) { + config := createPluginConfig(1) + p, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + assert.NotNil(t, p) + }) +} + +func TestPlugin_Close(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + err = plugin.Close() + assert.NoError(t, err) +} + +func TestPlugin_ShouldTransmitAcceptedReport(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + should, err := plugin.ShouldTransmitAcceptedReport(testutils.Context(t), types.ReportTimestamp{}, nil) + assert.NoError(t, err) + assert.False(t, should) +} + +func TestPlugin_ShouldAcceptFinalizedReport(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + t.Run("happy", func(t *testing.T) { + ormRows := make([]*s4_svc.Row, 0) + rows := generateTestRows(t, 10, time.Minute) + orm.On("Update", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + updateRow := args.Get(0).(*s4_svc.Row) + ormRows = append(ormRows, updateRow) + }).Return(nil).Times(10) + + report, err := proto.Marshal(&s4.Rows{ + Rows: rows, + }) + assert.NoError(t, err) + + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, report) + assert.NoError(t, err) + assert.False(t, should) + assert.Equal(t, 10, len(ormRows)) + compareRows(t, rows, ormRows) + + }) + + t.Run("error", func(t *testing.T) { + testErr := errors.New("some error") + rows := generateTestRows(t, 1, time.Minute) + orm.On("Update", mock.Anything, mock.Anything).Return(testErr).Once() + + report, err := proto.Marshal(&s4.Rows{ + Rows: rows, + }) + assert.NoError(t, err) + + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, report) + assert.NoError(t, err) // errors just logged + assert.False(t, should) + }) + + t.Run("don't save expired", func(t *testing.T) { + ormRows := make([]*s4_svc.Row, 0) + rows := generateTestRows(t, 2, -time.Minute) + + report, err := proto.Marshal(&s4.Rows{ + Rows: rows, + }) + assert.NoError(t, err) + + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), types.ReportTimestamp{}, report) + assert.NoError(t, err) + assert.False(t, should) + assert.Equal(t, 0, len(ormRows)) + }) +} + +func TestPlugin_Query(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + t.Run("happy", func(t *testing.T) { + ormRows := generateTestOrmRows(t, 10, time.Minute) + rows := rowsToShapshotRows(ormRows) + + orm.On("GetSnapshot", mock.Anything, mock.Anything).Return(rows, nil).Once() + + queryBytes, err := plugin.Query(testutils.Context(t), types.ReportTimestamp{}) + assert.NoError(t, err) + + query := &s4.Query{} + err = proto.Unmarshal(queryBytes, query) + assert.NoError(t, err) + assert.Equal(t, s4_svc.MinAddress, s4.UnmarshalAddress(query.AddressRange.MinAddress)) + assert.Equal(t, s4_svc.MaxAddress, s4.UnmarshalAddress(query.AddressRange.MaxAddress)) + + compareSnapshotRows(t, query.Rows, rows) + }) + + t.Run("empty", func(t *testing.T) { + empty := make([]*s4_svc.SnapshotRow, 0) + orm.On("GetSnapshot", mock.Anything, mock.Anything).Return(empty, nil).Once() + + query, err := plugin.Query(testutils.Context(t), types.ReportTimestamp{}) + assert.NoError(t, err) + assert.NotNil(t, query) + }) + + t.Run("query with shards", func(t *testing.T) { + config.NSnapshotShards = 16 + + ormRows := generateTestOrmRows(t, 256, time.Minute) + for i := 0; i < 256; i++ { + var thisAddress common.Address + thisAddress[0] = byte(i) + ormRows[i].Address = big.New(thisAddress.Big()) + } + versions := rowsToShapshotRows(ormRows) + + ar, err := s4_svc.NewInitialAddressRangeForIntervals(config.NSnapshotShards) + assert.NoError(t, err) + + for i := 0; i <= int(config.NSnapshotShards); i++ { + from := i * 16 + to := from + 16 + if i == int(config.NSnapshotShards) { + from = 0 + to = 16 + } + orm.On("GetSnapshot", mock.Anything, mock.Anything).Return(versions[from:to], nil).Once() + + query, err := plugin.Query(testutils.Context(t), types.ReportTimestamp{}) + assert.NoError(t, err) + + qq := &s4.Query{} + err = proto.Unmarshal(query, qq) + assert.NoError(t, err) + + assert.Len(t, qq.Rows, 16) + for _, r := range qq.Rows { + thisAddress := s4.UnmarshalAddress(r.Address) + assert.True(t, ar.Contains(thisAddress)) + } + + ar.Advance() + } + }) +} + +func TestPlugin_Observation(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + t.Run("all unconfirmed", func(t *testing.T) { + ormRows := generateTestOrmRows(t, int(config.MaxObservationEntries), time.Minute) + for _, or := range ormRows { + or.Confirmed = false + } + orm.On("DeleteExpired", uint(10), mock.Anything, mock.Anything).Return(int64(10), nil).Once() + orm.On("GetUnconfirmedRows", config.MaxObservationEntries, mock.Anything).Return(ormRows, nil).Once() + + observation, err := plugin.Observation(testutils.Context(t), types.ReportTimestamp{}, []byte{}) + assert.NoError(t, err) + + rows := &s4.Rows{} + err = proto.Unmarshal(observation, rows) + assert.NoError(t, err) + assert.Len(t, rows.Rows, int(config.MaxObservationEntries)) + }) + + t.Run("unconfirmed with query", func(t *testing.T) { + numUnconfirmed := int(config.MaxObservationEntries / 2) + ormRows := generateTestOrmRows(t, int(config.MaxObservationEntries), time.Minute) + snapshot := make([]*s4_svc.SnapshotRow, len(ormRows)) + for i, or := range ormRows { + or.Confirmed = i < numUnconfirmed // First half are confirmed + or.Version = uint64(i) + snapshot[i] = &s4_svc.SnapshotRow{ + Address: or.Address, + SlotId: or.SlotId, + Version: or.Version, + Confirmed: or.Confirmed, + } + } + orm.On("DeleteExpired", uint(10), mock.Anything, mock.Anything).Return(int64(10), nil).Once() + orm.On("GetUnconfirmedRows", config.MaxObservationEntries, mock.Anything).Return(ormRows[numUnconfirmed:], nil).Once() + orm.On("GetSnapshot", mock.Anything, mock.Anything).Return(snapshot, nil).Once() + + snapshotRows := rowsToShapshotRows(ormRows) + query := &s4.Query{ + Rows: make([]*s4.SnapshotRow, len(snapshotRows)), + } + numHigherVersion := 2 + for i, v := range snapshotRows { + query.Rows[i] = &s4.SnapshotRow{ + Address: v.Address.Bytes(), + Slotid: uint32(v.SlotId), + Version: v.Version, + } + if i < numHigherVersion { + ormRows[i].Version++ + snapshot[i].Version++ + orm.On("Get", v.Address, v.SlotId, mock.Anything).Return(ormRows[i], nil).Once() + } + } + queryBytes, err := proto.Marshal(query) + assert.NoError(t, err) + + observation, err := plugin.Observation(testutils.Context(t), types.ReportTimestamp{}, queryBytes) + assert.NoError(t, err) + + rows := &s4.Rows{} + err = proto.Unmarshal(observation, rows) + assert.NoError(t, err) + assert.Len(t, rows.Rows, numUnconfirmed+numHigherVersion) + + for i := 0; i < numUnconfirmed; i++ { + assert.Equal(t, ormRows[numUnconfirmed+i].Version, rows.Rows[i].Version) + } + for i := 0; i < numHigherVersion; i++ { + assert.Equal(t, ormRows[i].Version, rows.Rows[numUnconfirmed+i].Version) + } + }) + + t.Run("missing from query", func(t *testing.T) { + vLow, vHigh := uint64(2), uint64(5) + ormRows := generateTestOrmRows(t, 3, time.Minute) + // Follower node has 3 confirmed entries with latest versions. + snapshot := make([]*s4_svc.SnapshotRow, len(ormRows)) + for i, or := range ormRows { + or.Confirmed = true + or.Version = vHigh + snapshot[i] = &s4_svc.SnapshotRow{ + Address: or.Address, + SlotId: or.SlotId, + Version: or.Version, + Confirmed: or.Confirmed, + } + } + + // Query snapshot has: + // - First entry with same version + // - Second entry with lower version + // - Third entry missing + query := &s4.Query{ + Rows: []*s4.SnapshotRow{ + &s4.SnapshotRow{ + Address: snapshot[0].Address.Bytes(), + Slotid: uint32(snapshot[0].SlotId), + Version: vHigh, + }, + &s4.SnapshotRow{ + Address: snapshot[1].Address.Bytes(), + Slotid: uint32(snapshot[1].SlotId), + Version: vLow, + }, + }, + } + queryBytes, err := proto.Marshal(query) + assert.NoError(t, err) + + orm.On("DeleteExpired", uint(10), mock.Anything, mock.Anything).Return(int64(10), nil).Once() + orm.On("GetUnconfirmedRows", config.MaxObservationEntries, mock.Anything).Return([]*s4_svc.Row{}, nil).Once() + orm.On("GetSnapshot", mock.Anything, mock.Anything).Return(snapshot, nil).Once() + orm.On("Get", snapshot[1].Address, snapshot[1].SlotId, mock.Anything).Return(ormRows[1], nil).Once() + orm.On("Get", snapshot[2].Address, snapshot[2].SlotId, mock.Anything).Return(ormRows[2], nil).Once() + + observation, err := plugin.Observation(testutils.Context(t), types.ReportTimestamp{}, queryBytes) + assert.NoError(t, err) + + rows := &s4.Rows{} + err = proto.Unmarshal(observation, rows) + assert.NoError(t, err) + assert.Len(t, rows.Rows, 2) + }) +} + +func TestPlugin_Report(t *testing.T) { + t.Parallel() + + logger := commonlogger.NewOCRWrapper(logger.TestLogger(t), true, func(msg string) {}) + config := createPluginConfig(10) + orm := s4_mocks.NewORM(t) + plugin, err := s4.NewReportingPlugin(logger, config, orm) + assert.NoError(t, err) + + rows := generateTestRows(t, 10, time.Minute) + observation, err := proto.Marshal(&s4.Rows{Rows: rows}) + assert.NoError(t, err) + + aos := []types.AttributedObservation{ + { + Observation: observation, + }, + { + Observation: observation, + }, + } + ok, report, err := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, nil, aos) + assert.NoError(t, err) + assert.True(t, ok) + + reportRows := &s4.Rows{} + err = proto.Unmarshal(report, reportRows) + assert.NoError(t, err) + assert.Len(t, reportRows.Rows, 10) + + ok2, report2, err2 := plugin.Report(testutils.Context(t), types.ReportTimestamp{}, nil, aos) + assert.NoError(t, err2) + assert.True(t, ok2) + + reportRows2 := &s4.Rows{} + err = proto.Unmarshal(report2, reportRows2) + assert.NoError(t, err) + + // Verify that the same report was produced + assert.Equal(t, reportRows, reportRows2) +} diff --git a/core/services/ocr2/plugins/s4/prometheus.go b/core/services/ocr2/plugins/s4/prometheus.go new file mode 100644 index 00000000..41342579 --- /dev/null +++ b/core/services/ocr2/plugins/s4/prometheus.go @@ -0,0 +1,58 @@ +package s4 + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + promReportingPluginQuery = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "s4_reporting_plugin_query", + Help: "Metric to track number of ReportingPlugin.Query() calls", + }, []string{"product"}) + + promReportingPluginObservation = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "s4_reporting_plugin_observation", + Help: "Metric to track number of ReportingPlugin.Observation() calls", + }, []string{"product"}) + + promReportingPluginReport = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "s4_reporting_plugin_report", + Help: "Metric to track number of ReportingPlugin.Report() calls", + }, []string{"product"}) + + promReportingPluginShouldAccept = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "s4_reporting_plugin_accept", + Help: "Metric to track number of ReportingPlugin.ShouldAcceptFinalizedReport() calls", + }, []string{"product"}) + + promReportingPluginsQueryByteSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s4_reporting_plugin_query_byte_size", + Help: "Metric to track query byte size returned by ReportingPlugin.Query()", + }, []string{"product"}) + + promReportingPluginsQueryRowsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s4_reporting_plugin_query_rows_count", + Help: "Metric to track rows count returned by ReportingPlugin.Query()", + }, []string{"product"}) + + promReportingPluginsObservationRowsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s4_reporting_plugin_observation_rows_count", + Help: "Metric to track rows count returned by ReportingPlugin.Observation()", + }, []string{"product"}) + + promReportingPluginsReportRowsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s4_reporting_plugin_report_rows_count", + Help: "Metric to track rows count returned by ReportingPlugin.Report()", + }, []string{"product"}) + + promReportingPluginWrongSigCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "s4_reporting_plugin_wrong_sig_count", + Help: "Metric to track number of rows having wrong signature", + }, []string{"product"}) + + promReportingPluginsExpiredRows = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "s4_reporting_plugin_expired_rows", + Help: "Metric to track number of expired rows", + }, []string{"product"}) +) diff --git a/core/services/ocr2/plugins/threshold/decryption_queue.go b/core/services/ocr2/plugins/threshold/decryption_queue.go new file mode 100644 index 00000000..b4e0a939 --- /dev/null +++ b/core/services/ocr2/plugins/threshold/decryption_queue.go @@ -0,0 +1,258 @@ +package threshold + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + decryptionPlugin "github.com/goplugin/tdh2/go/ocr2/decryptionplugin" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +//go:generate mockery --quiet --name Decryptor --output ./mocks/ --case=underscore +type Decryptor interface { + Decrypt(ctx context.Context, ciphertextId decryptionPlugin.CiphertextId, ciphertext []byte) ([]byte, error) +} + +type pendingRequest struct { + chPlaintext chan<- []byte + ciphertext []byte +} + +type completedRequest struct { + plaintext []byte + timer *time.Timer +} + +type decryptionQueue struct { + maxQueueLength int + maxCiphertextBytes int + maxCiphertextIdLen int + completedRequestsCacheTimeout time.Duration + pendingRequestQueue []decryptionPlugin.CiphertextId + pendingRequests map[string]pendingRequest + completedRequests map[string]completedRequest + mu sync.RWMutex + lggr logger.Logger +} + +var ( + _ Decryptor = &decryptionQueue{} + _ decryptionPlugin.DecryptionQueuingService = &decryptionQueue{} + _ job.ServiceCtx = &decryptionQueue{} +) + +func NewDecryptionQueue(maxQueueLength int, maxCiphertextBytes int, maxCiphertextIdLen int, completedRequestsCacheTimeout time.Duration, lggr logger.Logger) *decryptionQueue { + dq := decryptionQueue{ + maxQueueLength, + maxCiphertextBytes, + maxCiphertextIdLen, + completedRequestsCacheTimeout, + []decryptionPlugin.CiphertextId{}, + make(map[string]pendingRequest), + make(map[string]completedRequest), + sync.RWMutex{}, + lggr.Named("DecryptionQueue"), + } + return &dq +} + +func (dq *decryptionQueue) Decrypt(ctx context.Context, ciphertextId decryptionPlugin.CiphertextId, ciphertext []byte) ([]byte, error) { + if len(ciphertextId) > dq.maxCiphertextIdLen { + return nil, errors.New("ciphertextId too large") + } + + if len(ciphertextId) == 0 { + return nil, errors.New("ciphertextId is empty") + } + + if len(ciphertext) > dq.maxCiphertextBytes { + return nil, errors.New("ciphertext too large") + } + + if len(ciphertext) == 0 { + return nil, errors.New("ciphertext is empty") + } + + chPlaintext, err := dq.getResult(ciphertextId, ciphertext) + if err != nil { + return nil, err + } + + select { + case pt, ok := <-chPlaintext: + if ok { + return pt, nil + } + return nil, fmt.Errorf("pending decryption request for ciphertextId %s was closed without a response", ciphertextId) + case <-ctx.Done(): + dq.mu.Lock() + defer dq.mu.Unlock() + delete(dq.pendingRequests, string(ciphertextId)) + return nil, errors.New("context provided by caller was cancelled") + } +} + +func (dq *decryptionQueue) getResult(ciphertextId decryptionPlugin.CiphertextId, ciphertext []byte) (<-chan []byte, error) { + dq.mu.Lock() + defer dq.mu.Unlock() + + chPlaintext := make(chan []byte, 1) + + req, ok := dq.completedRequests[string(ciphertextId)] + if ok { + dq.lggr.Debugf("ciphertextId %s was already decrypted by the DON", ciphertextId) + chPlaintext <- req.plaintext + req.timer.Stop() + delete(dq.completedRequests, string(ciphertextId)) + return chPlaintext, nil + } + + _, isDuplicateId := dq.pendingRequests[string(ciphertextId)] + if isDuplicateId { + return nil, errors.New("ciphertextId must be unique") + } + + if len(dq.pendingRequestQueue) >= dq.maxQueueLength { + return nil, errors.New("queue is full") + } + dq.pendingRequestQueue = append(dq.pendingRequestQueue, ciphertextId) + + dq.pendingRequests[string(ciphertextId)] = pendingRequest{ + chPlaintext, + ciphertext, + } + dq.lggr.Debugf("ciphertextId %s added to pendingRequestQueue", ciphertextId) + + return chPlaintext, nil +} + +func (dq *decryptionQueue) GetRequests(requestCountLimit int, totalBytesLimit int) []decryptionPlugin.DecryptionRequest { + dq.mu.Lock() + defer dq.mu.Unlock() + + requests := make([]decryptionPlugin.DecryptionRequest, 0, requestCountLimit) + totalBytes := 0 + indicesToRemove := make(map[int]struct{}) + + for i := 0; len(requests) < requestCountLimit; i++ { + if i >= len(dq.pendingRequestQueue) { + break + } + + ciphertextId := dq.pendingRequestQueue[i] + pendingRequest, exists := dq.pendingRequests[string(ciphertextId)] + + if !exists { + dq.lggr.Debugf("decryption request for ciphertextId %s already processed or expired", ciphertextId) + indicesToRemove[i] = struct{}{} + continue + } + + requestToAdd := decryptionPlugin.DecryptionRequest{ + CiphertextId: ciphertextId, + Ciphertext: pendingRequest.ciphertext, + } + + requestTotalLen := len(ciphertextId) + len(pendingRequest.ciphertext) + + if (totalBytes + requestTotalLen) > totalBytesLimit { + dq.lggr.Debug("totalBytesLimit reached in GetRequests") + break + } + + requests = append(requests, requestToAdd) + totalBytes += requestTotalLen + } + + dq.pendingRequestQueue = removeMultipleIndices(dq.pendingRequestQueue, indicesToRemove) + + if len(dq.pendingRequestQueue) > 0 { + dq.lggr.Debugf("returning first %d of %d total requests awaiting decryption", len(requests), len(dq.pendingRequestQueue)) + } else { + dq.lggr.Debug("no requests awaiting decryption") + } + + return requests +} + +func removeMultipleIndices[T any](data []T, indicesToRemove map[int]struct{}) []T { + filtered := make([]T, 0, len(data)-len(indicesToRemove)) + + for i, v := range data { + if _, exists := indicesToRemove[i]; !exists { + filtered = append(filtered, v) + } + } + + return filtered +} + +func (dq *decryptionQueue) GetCiphertext(ciphertextId decryptionPlugin.CiphertextId) ([]byte, error) { + dq.mu.RLock() + defer dq.mu.RUnlock() + + req, ok := dq.pendingRequests[string(ciphertextId)] + if !ok { + return nil, decryptionPlugin.ErrNotFound + } + + return req.ciphertext, nil +} + +func (dq *decryptionQueue) SetResult(ciphertextId decryptionPlugin.CiphertextId, plaintext []byte, err error) { + dq.mu.Lock() + defer dq.mu.Unlock() + + if err == nil && plaintext == nil { + dq.lggr.Errorf("received nil error and nil plaintext for ciphertextId %s", ciphertextId) + return + } + + req, ok := dq.pendingRequests[string(ciphertextId)] + if ok { + if err != nil { + dq.lggr.Debugf("decryption error for ciphertextId %s", ciphertextId) + } else { + dq.lggr.Debugf("responding with result for pending decryption request ciphertextId %s", ciphertextId) + req.chPlaintext <- plaintext + } + close(req.chPlaintext) + delete(dq.pendingRequests, string(ciphertextId)) + } else { + if err != nil { + // This is currently possible only for ErrAggregation, encountered during Report() phase. + dq.lggr.Debugf("received decryption error for ciphertextId %s which doesn't exist locally", ciphertextId) + return + } + + // Cache plaintext result in completedRequests map for cacheTimeoutMs to account for delayed Decrypt() calls + timer := time.AfterFunc(dq.completedRequestsCacheTimeout, func() { + dq.lggr.Debugf("removing completed decryption result for ciphertextId %s from cache", ciphertextId) + dq.mu.Lock() + delete(dq.completedRequests, string(ciphertextId)) + dq.mu.Unlock() + }) + + dq.lggr.Debugf("adding decryption result for ciphertextId %s to completedRequests cache", ciphertextId) + dq.completedRequests[string(ciphertextId)] = completedRequest{ + plaintext, + timer, + } + } +} + +func (dq *decryptionQueue) Start(ctx context.Context) error { + return nil +} + +func (dq *decryptionQueue) Close() error { + for _, completedRequest := range dq.completedRequests { + completedRequest.timer.Stop() + } + return nil +} diff --git a/core/services/ocr2/plugins/threshold/decryption_queue_test.go b/core/services/ocr2/plugins/threshold/decryption_queue_test.go new file mode 100644 index 00000000..64789e82 --- /dev/null +++ b/core/services/ocr2/plugins/threshold/decryption_queue_test.go @@ -0,0 +1,472 @@ +package threshold + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + decryptionPlugin "github.com/goplugin/tdh2/go/ocr2/decryptionplugin" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func Test_decryptionQueue_NewThresholdDecryptor(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(5, 1001, 64, 1002, lggr) + + assert.Equal(t, 5, dq.maxQueueLength) + assert.Equal(t, 1001, dq.maxCiphertextBytes) + assert.Equal(t, time.Duration(1002), dq.completedRequestsCacheTimeout) +} + +func Test_decryptionQueue_Decrypt_ReturnResultAfterCallingDecrypt(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(5, 1000, 64, testutils.WaitTimeout(t), lggr) + + go func() { + waitForPendingRequestToBeAdded(t, dq, []byte("1")) + dq.SetResult([]byte("1"), []byte("decrypted"), nil) + }() + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + pt, err := dq.Decrypt(ctx, []byte("1"), []byte("encrypted")) + require.NoError(t, err) + if !reflect.DeepEqual(pt, []byte("decrypted")) { + t.Error("did not get expected result") + } +} + +func Test_decryptionQueue_Decrypt_CiphertextIdTooLarge(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 16, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte("largeCiphertextId"), []byte("ciphertext")) + assert.Equal(t, err.Error(), "ciphertextId too large") +} + +func Test_decryptionQueue_Decrypt_EmptyCiphertextId(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte(""), []byte("ciphertext")) + assert.Equal(t, err.Error(), "ciphertextId is empty") +} + +func Test_decryptionQueue_Decrypt_CiphertextTooLarge(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 10, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte("1"), []byte("largeciphertext")) + assert.Equal(t, err.Error(), "ciphertext too large") +} + +func Test_decryptionQueue_Decrypt_EmptyCiphertext(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte("1"), []byte("")) + assert.Equal(t, err.Error(), "ciphertext is empty") +} + +func Test_decryptionQueue_Decrypt_DuplicateCiphertextId(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + go func() { + _, err := dq.Decrypt(ctx, []byte("1"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("1")) + + _, err := dq.Decrypt(ctx, []byte("1"), []byte("encrypted")) + assert.Equal(t, err.Error(), "ciphertextId must be unique") +} + +func Test_decryptionQueue_Decrypt_ContextCancelled(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 64, 100, lggr) + + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Duration(100)*time.Millisecond) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte("2"), []byte("encrypted")) + assert.Equal(t, err.Error(), "context provided by caller was cancelled") +} + +func Test_decryptionQueue_Decrypt_QueueFull(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(1, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx1, cancel1 := context.WithCancel(testutils.Context(t)) + defer cancel1() + + go func() { + _, err := dq.Decrypt(ctx1, []byte("4"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("4")) + + ctx2, cancel2 := context.WithCancel(testutils.Context(t)) + defer cancel2() + + _, err := dq.Decrypt(ctx2, []byte("3"), []byte("encrypted")) + assert.Equal(t, err.Error(), "queue is full") +} + +func Test_decryptionQueue_GetRequests(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(3, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx1, cancel1 := context.WithCancel(testutils.Context(t)) + defer cancel1() + + go func() { + _, err := dq.Decrypt(ctx1, []byte("5"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("5")) + + ctx2, cancel2 := context.WithCancel(testutils.Context(t)) + defer cancel2() + + go func() { + _, err := dq.Decrypt(ctx2, []byte("6"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("6")) + + requests := dq.GetRequests(2, 1000) + expected := []decryptionPlugin.DecryptionRequest{ + {CiphertextId: []byte("5"), Ciphertext: []byte("encrypted")}, + {CiphertextId: []byte("6"), Ciphertext: []byte("encrypted")}, + } + + if !reflect.DeepEqual(requests, expected) { + t.Error("did not get the expected requests") + } +} + +func Test_decryptionQueue_GetCiphertext(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(3, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + go func() { + _, err := dq.Decrypt(ctx, []byte("7"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("7")) + + ct, err := dq.GetCiphertext([]byte("7")) + require.NoError(t, err) + if !reflect.DeepEqual(ct, []byte("encrypted")) { + t.Error("did not get the expected requests") + } +} + +func Test_decryptionQueue_GetCiphertext_CiphertextNotFound(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(3, 1000, 64, testutils.WaitTimeout(t), lggr) + + _, err := dq.GetCiphertext([]byte{0xa5}) + assert.True(t, errors.Is(err, decryptionPlugin.ErrNotFound)) +} + +func Test_decryptionQueue_Decrypt_DecryptCalledAfterReadyResult(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(2, 1000, 64, testutils.WaitTimeout(t), lggr) + + dq.SetResult([]byte("9"), []byte("decrypted"), nil) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + pt, err := dq.Decrypt(ctx, []byte("9"), []byte("encrypted")) + require.NoError(t, err) + if !reflect.DeepEqual(pt, []byte("decrypted")) { + t.Error("did not get expected plaintext") + } +} + +func Test_decryptionQueue_ReadyResult_ExpireRequest(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(2, 1000, 64, 100, lggr) + + dq.SetResult([]byte("9"), []byte("decrypted"), nil) + + waitForCompletedRequestToBeAdded(t, dq, []byte("9")) + + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Duration(100)*time.Millisecond) + defer cancel() + + _, err := dq.Decrypt(ctx, []byte("9"), []byte("encrypted")) + assert.Equal(t, err.Error(), "context provided by caller was cancelled") +} + +func Test_decryptionQueue_Decrypt_CleanupSuccessfulRequest(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(2, 1000, 64, testutils.WaitTimeout(t), lggr) + + dq.SetResult([]byte("10"), []byte("decrypted"), nil) + + ctx1, cancel1 := context.WithCancel(testutils.Context(t)) + defer cancel1() + + _, err1 := dq.Decrypt(ctx1, []byte("10"), []byte("encrypted")) // This will remove the decrypted result to completedRequests + require.NoError(t, err1) + + ctx2, cancel2 := context.WithTimeout(testutils.Context(t), time.Duration(100)*time.Millisecond) + defer cancel2() + + _, err2 := dq.Decrypt(ctx2, []byte("10"), []byte("encrypted")) + assert.Equal(t, err2.Error(), "context provided by caller was cancelled") +} + +func Test_decryptionQueue_Decrypt_UserErrorDuringDecryption(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(5, 1000, 64, testutils.WaitTimeout(t), lggr) + ciphertextId := []byte{0x12, 0x0f} + + go func() { + waitForPendingRequestToBeAdded(t, dq, ciphertextId) + dq.SetResult(ciphertextId, nil, decryptionPlugin.ErrAggregation) + }() + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, ciphertextId, []byte("encrypted")) + assert.Equal(t, err.Error(), "pending decryption request for ciphertextId 0x120f was closed without a response") +} + +func Test_decryptionQueue_Decrypt_HandleClosedChannelWithoutPlaintextResponse(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(5, 1000, 64, testutils.WaitTimeout(t), lggr) + ciphertextId := []byte{0x00, 0xff} + + go func() { + waitForPendingRequestToBeAdded(t, dq, ciphertextId) + close(dq.pendingRequests[string(ciphertextId)].chPlaintext) + }() + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + _, err := dq.Decrypt(ctx, ciphertextId, []byte("encrypted")) + assert.Equal(t, err.Error(), "pending decryption request for ciphertextId 0x00ff was closed without a response") +} + +func Test_decryptionQueue_GetRequests_RequestsCountLimit(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx1, cancel1 := context.WithCancel(testutils.Context(t)) + defer cancel1() + + go func() { + _, err := dq.Decrypt(ctx1, []byte("11"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("11")) + + ctx2, cancel2 := context.WithCancel(testutils.Context(t)) + defer cancel2() + + go func() { + _, err := dq.Decrypt(ctx2, []byte("12"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("12")) + + ctx3, cancel3 := context.WithCancel(testutils.Context(t)) + defer cancel3() + + go func() { + _, err := dq.Decrypt(ctx3, []byte("13"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("13")) + + requests := dq.GetRequests(2, 1000) + expected := []decryptionPlugin.DecryptionRequest{ + {CiphertextId: []byte("11"), Ciphertext: []byte("encrypted")}, + {CiphertextId: []byte("12"), Ciphertext: []byte("encrypted")}, + } + if !reflect.DeepEqual(requests, expected) { + t.Error("did not get expected requests") + } +} + +func Test_decryptionQueue_GetRequests_TotalBytesLimit(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 10, 64, testutils.WaitTimeout(t), lggr) + + ctx1, cancel1 := context.WithCancel(testutils.Context(t)) + defer cancel1() + + go func() { + _, err := dq.Decrypt(ctx1, []byte("11"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("11")) + + ctx2, cancel2 := context.WithCancel(testutils.Context(t)) + defer cancel2() + + go func() { + _, err := dq.Decrypt(ctx2, []byte("12"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("12")) + + ctx3, cancel3 := context.WithCancel(testutils.Context(t)) + defer cancel3() + + go func() { + _, err := dq.Decrypt(ctx3, []byte("13"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("13")) + + requests := dq.GetRequests(4, 30) + expected := []decryptionPlugin.DecryptionRequest{ + {CiphertextId: []byte("11"), Ciphertext: []byte("encrypted")}, + {CiphertextId: []byte("12"), Ciphertext: []byte("encrypted")}, + } + if !reflect.DeepEqual(requests, expected) { + t.Error("did not get expected requests") + } +} + +func Test_decryptionQueue_GetRequests_PendingRequestQueueShorterThanRequestCountLimit(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + go func() { + _, err := dq.Decrypt(ctx, []byte("11"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("11")) + + requests := dq.GetRequests(2, 1000) + expected := []decryptionPlugin.DecryptionRequest{ + {CiphertextId: []byte("11"), Ciphertext: []byte("encrypted")}, + } + if !reflect.DeepEqual(requests, expected) { + t.Error("did not get expected requests") + } +} + +func Test_decryptionQueue_GetRequests_ExpiredRequest(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + + go func() { + _, err := dq.Decrypt(ctx, []byte("11"), []byte("encrypted")) + require.Equal(t, err.Error(), "context provided by caller was cancelled") + }() + + waitForPendingRequestToBeAdded(t, dq, []byte("11")) + cancel() // Context cancellation should expire the pending request + waitForPendingRequestToBeRemoved(t, dq, []byte("11")) + + requests := dq.GetRequests(2, 1000) + expected := []decryptionPlugin.DecryptionRequest{} + if !reflect.DeepEqual(requests, expected) { + t.Error("did not get expected requests") + } +} + +func Test_decryptionQueue_Start(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 1000, 64, testutils.WaitTimeout(t), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + err := dq.Start(ctx) + + require.NoError(t, err) +} + +func Test_decryptionQueue_Close(t *testing.T) { + lggr := logger.TestLogger(t) + dq := NewDecryptionQueue(4, 1000, 64, testutils.WaitTimeout(t), lggr) + + dq.SetResult([]byte("14"), []byte("decrypted"), nil) + + err := dq.Close() + + require.NoError(t, err) +} + +func waitForPendingRequestToBeAdded(t *testing.T, dq *decryptionQueue, ciphertextId decryptionPlugin.CiphertextId) { + gomega.NewGomegaWithT(t).Eventually(func() bool { + dq.mu.RLock() + _, exists := dq.pendingRequests[string(ciphertextId)] + dq.mu.RUnlock() + return exists + }, testutils.WaitTimeout(t), "10ms").Should(gomega.BeTrue(), "pending request should be added") +} + +func waitForPendingRequestToBeRemoved(t *testing.T, dq *decryptionQueue, ciphertextId decryptionPlugin.CiphertextId) { + gomega.NewGomegaWithT(t).Eventually(func() bool { + dq.mu.RLock() + _, exists := dq.pendingRequests[string(ciphertextId)] + dq.mu.RUnlock() + return exists + }, testutils.WaitTimeout(t), "10ms").Should(gomega.BeFalse(), "pending request should be removed") +} + +func waitForCompletedRequestToBeAdded(t *testing.T, dq *decryptionQueue, ciphertextId decryptionPlugin.CiphertextId) { + gomega.NewGomegaWithT(t).Eventually(func() bool { + dq.mu.RLock() + _, exists := dq.completedRequests[string(ciphertextId)] + dq.mu.RUnlock() + return exists + }, testutils.WaitTimeout(t), "10ms").Should(gomega.BeFalse(), "completed request should be removed") +} diff --git a/core/services/ocr2/plugins/threshold/mocks/decryptor.go b/core/services/ocr2/plugins/threshold/mocks/decryptor.go new file mode 100644 index 00000000..a7169283 --- /dev/null +++ b/core/services/ocr2/plugins/threshold/mocks/decryptor.go @@ -0,0 +1,59 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + decryptionplugin "github.com/goplugin/tdh2/go/ocr2/decryptionplugin" + mock "github.com/stretchr/testify/mock" +) + +// Decryptor is an autogenerated mock type for the Decryptor type +type Decryptor struct { + mock.Mock +} + +// Decrypt provides a mock function with given fields: ctx, ciphertextId, ciphertext +func (_m *Decryptor) Decrypt(ctx context.Context, ciphertextId decryptionplugin.CiphertextId, ciphertext []byte) ([]byte, error) { + ret := _m.Called(ctx, ciphertextId, ciphertext) + + if len(ret) == 0 { + panic("no return value specified for Decrypt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, decryptionplugin.CiphertextId, []byte) ([]byte, error)); ok { + return rf(ctx, ciphertextId, ciphertext) + } + if rf, ok := ret.Get(0).(func(context.Context, decryptionplugin.CiphertextId, []byte) []byte); ok { + r0 = rf(ctx, ciphertextId, ciphertext) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, decryptionplugin.CiphertextId, []byte) error); ok { + r1 = rf(ctx, ciphertextId, ciphertext) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDecryptor creates a new instance of Decryptor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDecryptor(t interface { + mock.TestingT + Cleanup(func()) +}) *Decryptor { + mock := &Decryptor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/threshold/plugin.go b/core/services/ocr2/plugins/threshold/plugin.go new file mode 100644 index 00000000..823dcb06 --- /dev/null +++ b/core/services/ocr2/plugins/threshold/plugin.go @@ -0,0 +1,79 @@ +package threshold + +import ( + "encoding/json" + + "github.com/pkg/errors" + + "github.com/goplugin/libocr/commontypes" + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + + decryptionPlugin "github.com/goplugin/tdh2/go/ocr2/decryptionplugin" + decryptionPluginConfig "github.com/goplugin/tdh2/go/ocr2/decryptionplugin/config" + "github.com/goplugin/tdh2/go/tdh2/tdh2easy" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +type ThresholdServicesConfig struct { + DecryptionQueue decryptionPlugin.DecryptionQueuingService + KeyshareWithPubKey []byte + ConfigParser decryptionPluginConfig.ConfigParser +} + +func NewThresholdService(sharedOracleArgs *libocr2.OCR2OracleArgs, conf *ThresholdServicesConfig) (job.ServiceCtx, error) { + publicKey, privKeyShare, err := UnmarshalKeys(conf.KeyshareWithPubKey) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal threshold key share with public key") + } + + // The key generation tooling ensures that key IDs correspond to the oracle's index, + // therefore an identity mapping is used when creating the threshold reporting plugin. + // maxNumNodes is selected such that it will always be larger than the number of nodes in the DON. + oracleToKeyShare := make(map[commontypes.OracleID]int) + maxNumNodes := 100 + for i := 0; i <= maxNumNodes; i++ { + oracleToKeyShare[commontypes.OracleID(i)] = i + } + + sharedOracleArgs.ReportingPluginFactory = decryptionPlugin.DecryptionReportingPluginFactory{ + DecryptionQueue: conf.DecryptionQueue, + ConfigParser: conf.ConfigParser, + PublicKey: &publicKey, + PrivKeyShare: &privKeyShare, + OracleToKeyShare: oracleToKeyShare, + Logger: sharedOracleArgs.Logger, + } + + thresholdReportingPluginOracle, err := libocr2.NewOracle(*sharedOracleArgs) + if err != nil { + return nil, errors.Wrap(err, "failed to call NewOracle to create a Threshold Reporting Plugin") + } + + return job.NewServiceAdapter(thresholdReportingPluginOracle), nil +} + +type KeyshareWithPubKey struct { + PublicKey json.RawMessage //tdh2easy.PublicKey + PrivateKeyShare json.RawMessage //tdh2easy.PrivateShare +} + +func UnmarshalKeys(raw []byte) (publicKey tdh2easy.PublicKey, privateShare tdh2easy.PrivateShare, err error) { + var kwpk KeyshareWithPubKey + err = json.Unmarshal(raw, &kwpk) + if err != nil { + return publicKey, privateShare, err + } + + err = publicKey.Unmarshal(kwpk.PublicKey) + if err != nil { + return publicKey, privateShare, err + } + + err = privateShare.Unmarshal(kwpk.PrivateKeyShare) + if err != nil { + return publicKey, privateShare, err + } + + return publicKey, privateShare, nil +} diff --git a/core/services/ocr2/testhelpers/digest.go b/core/services/ocr2/testhelpers/digest.go new file mode 100644 index 00000000..67877fc1 --- /dev/null +++ b/core/services/ocr2/testhelpers/digest.go @@ -0,0 +1,29 @@ +package testhelpers + +import ( + "crypto/rand" + "testing" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +// MakeConfigDigest makes config digest +func MakeConfigDigest(t *testing.T) ocrtypes.ConfigDigest { + t.Helper() + b := make([]byte, 32) + _, err := rand.Read(b) + if err != nil { + t.Fatal(err) + } + return MustBytesToConfigDigest(t, b) +} + +// MustBytesToConfigDigest returns config digest from bytes +func MustBytesToConfigDigest(t *testing.T, b []byte) ocrtypes.ConfigDigest { + t.Helper() + configDigest, err := ocrtypes.BytesToConfigDigest(b) + if err != nil { + t.Fatal(err) + } + return configDigest +} diff --git a/core/services/ocr2/testhelpers/onchain_config.go b/core/services/ocr2/testhelpers/onchain_config.go new file mode 100644 index 00000000..3f74013f --- /dev/null +++ b/core/services/ocr2/testhelpers/onchain_config.go @@ -0,0 +1,31 @@ +package testhelpers + +import ( + "math/big" + + "github.com/goplugin/libocr/bigbigendian" +) + +func GenerateDefaultOCR2OnchainConfig(minValue *big.Int, maxValue *big.Int) ([]byte, error) { + serializedConfig := make([]byte, 0) + + s1, err := bigbigendian.SerializeSigned(1, big.NewInt(1)) //version + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s1...) + + s2, err := bigbigendian.SerializeSigned(24, minValue) //min + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s2...) + + s3, err := bigbigendian.SerializeSigned(24, maxValue) //max + if err != nil { + return nil, err + } + serializedConfig = append(serializedConfig, s3...) + + return serializedConfig, nil +} diff --git a/core/services/ocr2/validate/config.go b/core/services/ocr2/validate/config.go new file mode 100644 index 00000000..b7a91973 --- /dev/null +++ b/core/services/ocr2/validate/config.go @@ -0,0 +1,80 @@ +package validate + +import ( + "fmt" + "sync" + "time" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +// OCR2Config contains OCR2 configurations for a job. +type OCR2Config interface { + BlockchainTimeout() time.Duration + ContractConfirmations() uint16 + ContractPollInterval() time.Duration + ContractTransmitterTransmitTimeout() time.Duration + DatabaseTimeout() time.Duration + TraceLogging() bool +} + +type InsecureConfig interface { + OCRDevelopmentMode() bool +} + +// ToLocalConfig creates a OCR2 LocalConfig from the global config and the OCR2 spec. +func ToLocalConfig(ocr2Config OCR2Config, insConf InsecureConfig, spec job.OCR2OracleSpec) (types.LocalConfig, error) { + var ( + blockchainTimeout = time.Duration(spec.BlockchainTimeout) + ccConfirmations = spec.ContractConfigConfirmations + ccTrackerPollInterval = time.Duration(spec.ContractConfigTrackerPollInterval) + ) + if blockchainTimeout == 0 { + blockchainTimeout = ocr2Config.BlockchainTimeout() + } + if ccConfirmations == 0 { + ccConfirmations = ocr2Config.ContractConfirmations() + } + if ccTrackerPollInterval == 0 { + ccTrackerPollInterval = ocr2Config.ContractPollInterval() + } + lc := types.LocalConfig{ + BlockchainTimeout: blockchainTimeout, + ContractConfigConfirmations: ccConfirmations, + ContractConfigTrackerPollInterval: ccTrackerPollInterval, + ContractTransmitterTransmitTimeout: ocr2Config.ContractTransmitterTransmitTimeout(), + DatabaseTimeout: ocr2Config.DatabaseTimeout(), + } + if spec.Relay == relay.Solana && env.MedianPlugin.Cmd.Get() != "" { + // Work around for Solana Feeds configured with zero values to support LOOP Plugins. + minOCR2MaxDurationQuery, err := getMinOCR2MaxDurationQuery() + if err != nil { + return types.LocalConfig{}, err + } + lc.MinOCR2MaxDurationQuery = minOCR2MaxDurationQuery + } + if insConf.OCRDevelopmentMode() { + // Skips config validation so we can use any config parameters we want. + // For example to lower contractConfigTrackerPollInterval to speed up tests. + lc.DevelopmentMode = types.EnableDangerousDevelopmentMode + } + return lc, nil +} + +const defaultMinOCR2MaxDurationQuery = 100 * time.Millisecond + +var getMinOCR2MaxDurationQuery = sync.OnceValues(func() (time.Duration, error) { + str := env.MinOCR2MaxDurationQuery.Get() + if str == "" { + return defaultMinOCR2MaxDurationQuery, nil + } + d, err := time.ParseDuration(str) + if err != nil { + return -1, fmt.Errorf("failed to parse %s: %w", env.MinOCR2MaxDurationQuery, err) + } + return d, nil +}) diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go new file mode 100644 index 00000000..dcbedd82 --- /dev/null +++ b/core/services/ocr2/validate/validate.go @@ -0,0 +1,258 @@ +package validate + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/lib/pq" + "github.com/pelletier/go-toml" + pkgerrors "github.com/pkg/errors" + libocr2 "github.com/goplugin/libocr/offchainreporting2plus" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + dkgconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/config" + mercuryconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/mercury/config" + ocr2vrfconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2vrf/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +// ValidatedOracleSpecToml validates an oracle spec that came from TOML +func ValidatedOracleSpecToml(config OCR2Config, insConf InsecureConfig, tomlString string) (job.Job, error) { + var jb = job.Job{} + var spec job.OCR2OracleSpec + tree, err := toml.Load(tomlString) + if err != nil { + return jb, pkgerrors.Wrap(err, "toml error on load") + } + // Note this validates all the fields which implement an UnmarshalText + // i.e. TransmitterAddress, PeerID... + err = tree.Unmarshal(&spec) + if err != nil { + return jb, pkgerrors.Wrap(err, "toml unmarshal error on spec") + } + err = tree.Unmarshal(&jb) + if err != nil { + return jb, pkgerrors.Wrap(err, "toml unmarshal error on job") + } + jb.OCR2OracleSpec = &spec + if jb.OCR2OracleSpec.P2PV2Bootstrappers == nil { + // Empty but non-null, field is non-nullable. + jb.OCR2OracleSpec.P2PV2Bootstrappers = pq.StringArray{} + } + + if jb.Type != job.OffchainReporting2 { + return jb, pkgerrors.Errorf("the only supported type is currently 'offchainreporting2', got %s", jb.Type) + } + if _, ok := relay.SupportedRelays[spec.Relay]; !ok { + return jb, pkgerrors.Errorf("no such relay %v supported", spec.Relay) + } + if len(spec.P2PV2Bootstrappers) > 0 { + _, err = ocrcommon.ParseBootstrapPeers(spec.P2PV2Bootstrappers) + if err != nil { + return jb, err + } + } + + if err = validateSpec(tree, jb); err != nil { + return jb, err + } + if err = validateTimingParameters(config, insConf, spec); err != nil { + return jb, err + } + return jb, nil +} + +// Parameters that must be explicitly set by the operator. +var ( + params = map[string]struct{}{ + "type": {}, + "schemaVersion": {}, + "contractID": {}, + "relay": {}, + "relayConfig": {}, + "pluginType": {}, + "pluginConfig": {}, + } + notExpectedParams = map[string]struct{}{ + "isBootstrapPeer": {}, + "juelsPerFeeCoinSource": {}, + } +) + +func validateTimingParameters(ocr2Conf OCR2Config, insConf InsecureConfig, spec job.OCR2OracleSpec) error { + lc, err := ToLocalConfig(ocr2Conf, insConf, spec) + if err != nil { + return err + } + return libocr2.SanityCheckLocalConfig(lc) +} + +func validateSpec(tree *toml.Tree, spec job.Job) error { + expected, notExpected := ocrcommon.CloneSet(params), ocrcommon.CloneSet(notExpectedParams) + if err := ocrcommon.ValidateExplicitlySetKeys(tree, expected, notExpected, "ocr2"); err != nil { + return err + } + + switch spec.OCR2OracleSpec.PluginType { + case types.Median: + if spec.Pipeline.Source == "" { + return errors.New("no pipeline specified") + } + case types.DKG: + return validateDKGSpec(spec.OCR2OracleSpec.PluginConfig) + case types.OCR2VRF: + return validateOCR2VRFSpec(spec.OCR2OracleSpec.PluginConfig) + case types.OCR2Keeper: + return validateOCR2KeeperSpec(spec.OCR2OracleSpec.PluginConfig) + case types.Functions: + // TODO validator for DR-OCR spec: https://app.shortcut.com/pluginlabs/story/54054/ocr-plugin-for-directrequest-ocr + return nil + case types.Mercury: + return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID) + case types.GenericPlugin: + return validateOCR2GenericPluginSpec(spec.OCR2OracleSpec.PluginConfig) + case "": + return errors.New("no plugin specified") + default: + return pkgerrors.Errorf("invalid pluginType %s", spec.OCR2OracleSpec.PluginType) + } + + return nil +} + +type PipelineSpec struct { + Name string `json:"name"` + Spec string `json:"spec"` +} + +type Config struct { + Pipelines []PipelineSpec `json:"pipelines"` + PluginConfig map[string]any `json:"pluginConfig"` +} + +type innerConfig struct { + Command string `json:"command"` + EnvVars map[string]string `json:"envVars"` + ProviderType string `json:"providerType"` + PluginName string `json:"pluginName"` + TelemetryType string `json:"telemetryType"` + OCRVersion int `json:"OCRVersion"` + Config +} + +type OCR2GenericPluginConfig struct { + innerConfig +} + +func (o *OCR2GenericPluginConfig) UnmarshalJSON(data []byte) error { + err := json.Unmarshal(data, &o.innerConfig) + if err != nil { + return nil + } + + m := map[string]any{} + err = json.Unmarshal(data, &m) + if err != nil { + return err + } + + o.PluginConfig = m + return nil +} + +func validateOCR2GenericPluginSpec(jsonConfig job.JSONConfig) error { + p := OCR2GenericPluginConfig{} + err := json.Unmarshal(jsonConfig.Bytes(), &p) + if err != nil { + return err + } + + if p.PluginName == "" { + return errors.New("generic config invalid: must provide plugin name") + } + + if p.TelemetryType == "" { + return errors.New("generic config invalid: must provide telemetry type") + } + + return nil +} + +func validateDKGSpec(jsonConfig job.JSONConfig) error { + if jsonConfig == nil { + return errors.New("pluginConfig is empty") + } + var pluginConfig dkgconfig.PluginConfig + err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshaling plugin config") + } + err = validateHexString(pluginConfig.EncryptionPublicKey, 32) + if err != nil { + return pkgerrors.Wrap(err, "validation error for encryptedPublicKey") + } + err = validateHexString(pluginConfig.SigningPublicKey, 32) + if err != nil { + return pkgerrors.Wrap(err, "validation error for signingPublicKey") + } + err = validateHexString(pluginConfig.KeyID, 32) + if err != nil { + return pkgerrors.Wrap(err, "validation error for keyID") + } + + return nil +} + +func validateHexString(val string, expectedLengthInBytes uint) error { + decoded, err := hex.DecodeString(val) + if err != nil { + return pkgerrors.Wrapf(err, "expected hex string but received %s", val) + } + if len(decoded) != int(expectedLengthInBytes) { + return fmt.Errorf("value: %s has unexpected length. Expected %d bytes", val, expectedLengthInBytes) + } + return nil +} + +func validateOCR2VRFSpec(jsonConfig job.JSONConfig) error { + if jsonConfig == nil { + return errors.New("pluginConfig is empty") + } + var cfg ocr2vrfconfig.PluginConfig + err := json.Unmarshal(jsonConfig.Bytes(), &cfg) + if err != nil { + return pkgerrors.Wrap(err, "json unmarshal plugin config") + } + err = validateDKGSpec(job.JSONConfig{ + "encryptionPublicKey": cfg.DKGEncryptionPublicKey, + "signingPublicKey": cfg.DKGSigningPublicKey, + "keyID": cfg.DKGKeyID, + }) + if err != nil { + return err + } + if cfg.LinkEthFeedAddress == "" { + return errors.New("linkEthFeedAddress must be provided") + } + if cfg.DKGContractAddress == "" { + return errors.New("dkgContractAddress must be provided") + } + return nil +} + +func validateOCR2KeeperSpec(jsonConfig job.JSONConfig) error { + return nil +} + +func validateOCR2MercurySpec(jsonConfig job.JSONConfig, feedId [32]byte) error { + var pluginConfig mercuryconfig.PluginConfig + err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshaling plugin config") + } + return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedId), "Mercury PluginConfig is invalid") +} diff --git a/core/services/ocr2/validate/validate_test.go b/core/services/ocr2/validate/validate_test.go new file mode 100644 index 00000000..930bc140 --- /dev/null +++ b/core/services/ocr2/validate/validate_test.go @@ -0,0 +1,720 @@ +package validate_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/manyminds/api2go/jsonapi" + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + medianconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/median/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" +) + +func TestValidateOracleSpec(t *testing.T) { + var tt = []struct { + name string + toml string + overrides func(c *plugin.Config, s *plugin.Secrets) + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "minimal OCR2 oracle spec", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + // Should be able to jsonapi marshal/unmarshal the minimum spec. + // This ensures the UnmarshalJSON's defined on the fields handle a min spec correctly. + b, err := jsonapi.Marshal(os.OCR2OracleSpec) + require.NoError(t, err) + var r job.OCR2OracleSpec + err = jsonapi.Unmarshal(b, &r) + require.NoError(t, err) + assert.Equal(t, "median", string(r.PluginType)) + var pc medianconfig.PluginConfig + require.NoError(t, json.Unmarshal(r.PluginConfig.Bytes(), &pc)) + require.NoError(t, medianconfig.ValidatePluginConfig(pc)) + }, + }, + { + name: "decodes valid oracle spec toml", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [ +"12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001", +] +ocrKeyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterID = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, 1, int(os.SchemaVersion)) + }, + }, + { + name: "raises error on extra keys", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [ +"12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001", +] +isBootstrapPeer = true +ocrKeyBundleID = "73e8966a78ca09bb912e9565cfb79fbe8a6048fab1f0cf49b18047c3895e0447" +monitoringEndpoint = "chain.link:4321" +transmitterID = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationTimeout = "10s" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + assert.Contains(t, err.Error(), "unrecognised key for ocr2 peer: isBootstrapPeer") + }, + }, + { + name: "empty pipeline string", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [] +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid dot", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [] +observationSource = """ +-> +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid peer address", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["/invalid/peer/address"] +observationSource = """ +blah +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero timeouts", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +blockchainTimeout = "0s" +observationSource = """ +blah +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "non-zero intervals", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +observationSource = """ +blah +""" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "broken monitoring endpoint", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [] +monitoringEndpoint = "\t/fd\2ff )(*&^%$#@" +[relayConfig] +chainID = 1337 +[pluginConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "invalid escape sequence") + }, + }, + { + name: "toml parse doesn't panic", + toml: string(hexutil.MustDecode("0x2222220d5c22223b22225c0d21222222")), + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid global default", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +maxTaskDuration = "30m" +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pPeerID = "12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq" +p2pv2Bootstrappers = [ +"12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001", +] +monitoringEndpoint = "chain.link:4321" +transmitterID = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "database timeout must be between 100ms and 10s, but is currently 20m0s") + }, + overrides: func(c *plugin.Config, s *plugin.Secrets) { + c.OCR2.DatabaseTimeout = commonconfig.MustNewDuration(20 * time.Minute) + }, + }, + { + name: "invalid pluginType", + toml: ` +type = "offchainreporting2" +pluginType = "medion" +schemaVersion = 1 +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[pluginConfig] +juelsPerFeeCoinSource = """ +-> +""" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "invalid pluginType medion") + }, + }, + { + name: "invalid relay", + toml: ` +type = "offchainreporting2" +pluginType = "median" +schemaVersion = 1 +relay = "blerg" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +[pluginConfig] +juelsPerFeeCoinSource = """ +ds1 [type=bridge name=voter_turnout]; +""" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + t.Log("relay", os.OCR2OracleSpec.Relay) + require.Error(t, err) + require.Contains(t, err.Error(), "no such relay blerg supported") + }, + }, + { + name: "valid DKG pluginConfig", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b1" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf0" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + }, + }, + { + name: "DKG encryption key is not hex", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "frog" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b1" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf0" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "expected hex string but received frog") + require.Contains(t, err.Error(), "validation error for encryptedPublicKey") + }, + }, + { + name: "DKG encryption key is too short", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b10606" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b1" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf0" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "value: 0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b10606 has unexpected length. Expected 32 bytes") + require.Contains(t, err.Error(), "validation error for encryptedPublicKey") + }, + }, + { + name: "DKG signing key is not hex", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c" +SigningPublicKey = "frog" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf0" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "expected hex string but received frog") + require.Contains(t, err.Error(), "validation error for signingPublicKey") + }, + }, + { + name: "DKG signing key is too short", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc24" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbf0" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "value: eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc24 has unexpected length. Expected 32 bytes") + require.Contains(t, err.Error(), "validation error for signingPublicKey") + }, + }, + { + name: "DKG keyID is not hex", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b1" +KeyID = "frog" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "expected hex string but received frog") + require.Contains(t, err.Error(), "validation error for keyID") + }, + }, + { + name: "DKG keyID is too long", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "dkg" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +EncryptionPublicKey = "0e86e8cf899ae9a1b43e023bbe8825b103659bb8d6d4e54f6a3cfae7b106069c" +SigningPublicKey = "eb62dbd2beb7c1524275a8019022f6ce6a7e86c9e65e3099452a2b96fc2432b1" +KeyID = "6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbaaaabc" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "value: 6f3b82406688b8ddb944c6f2e6d808f014c8fa8d568d639c25019568c715fbaaaabc has unexpected length. Expected 32 bytes") + require.Contains(t, err.Error(), "validation error for keyID") + }, + }, + { + name: "Generic plugin config validation - nothing provided", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "plugin" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig.coreConfig] +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.ErrorContains(t, err, "must provide plugin name") + }, + }, + { + name: "Generic plugin config validation - plugin name provided", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "plugin" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +pluginName = "median" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + require.ErrorContains(t, err, "must provide telemetry type") + }, + }, + { + name: "Generic plugin config validation - all provided", + toml: ` +type = "offchainreporting2" +schemaVersion = 1 +name = "dkg" +externalJobID = "6d46d85f-d38c-4f4a-9f00-ac29a25b6330" +maxTaskDuration = "1s" +contractID = "0x3e54dCc49F16411A3aaa4cDbC41A25bCa9763Cee" +ocrKeyBundleID = "08d14c6eed757414d72055d28de6caf06535806c6a14e450f3a2f1c854420e17" +p2pv2Bootstrappers = [ + "12D3KooWSbPRwXY4gxFRJT7LWCnjgGbR4S839nfCRCDgQUiNenxa@127.0.0.1:8000" +] +relay = "evm" +pluginType = "plugin" +transmitterID = "0x74103Cf8b436465870b26aa9Fa2F62AD62b22E35" + +[relayConfig] +chainID = 4 + +[pluginConfig] +pluginName = "median" +telemetryType = "median" +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + c := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Insecure.OCRDevelopmentMode = testutils.Ptr(false) // tests run with OCRDevelopmentMode by default. + if tc.overrides != nil { + tc.overrides(c, s) + } + }) + s, err := validate.ValidatedOracleSpecToml(c.OCR2(), c.Insecure(), tc.toml) + tc.assertion(t, s, err) + }) + } +} + +type envelope struct { + PluginConfig *validate.OCR2GenericPluginConfig +} + +func TestOCR2GenericPluginConfig_Unmarshal(t *testing.T) { + payload := ` +[pluginConfig] +pluginName = "median" +telemetryType = "median" +foo = "bar" + +[[pluginConfig.pipelines]] +name = "default" +spec = "a spec" +` + tree, err := toml.Load(payload) + require.NoError(t, err) + + // Load the toml how we load it in the plugin, i.e. convert to + // map[string]any first, then treat as JSON + o := map[string]any{} + err = tree.Unmarshal(&o) + require.NoError(t, err) + + b, err := json.Marshal(o) + require.NoError(t, err) + + e := &envelope{} + err = json.Unmarshal(b, e) + require.NoError(t, err) + + pc := e.PluginConfig + assert.Equal(t, "bar", pc.PluginConfig["foo"]) + assert.Len(t, pc.Pipelines, 1) + assert.Equal(t, validate.PipelineSpec{Name: "default", Spec: "a spec"}, pc.Pipelines[0]) + assert.Equal(t, "median", pc.PluginName) + assert.Equal(t, "median", pc.TelemetryType) +} diff --git a/core/services/ocrbootstrap/database.go b/core/services/ocrbootstrap/database.go new file mode 100644 index 00000000..7d77dae0 --- /dev/null +++ b/core/services/ocrbootstrap/database.go @@ -0,0 +1,123 @@ +package ocrbootstrap + +import ( + "context" + "database/sql" + + "github.com/lib/pq" + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type db struct { + *sql.DB + oracleSpecID int32 + lggr logger.Logger +} + +var _ ocrtypes.ConfigDatabase = &db{} + +// NewDB returns a new DB scoped to this oracleSpecID +func NewDB(sqldb *sql.DB, bootstrapSpecID int32, lggr logger.Logger) *db { + return &db{sqldb, bootstrapSpecID, lggr} +} + +func (d *db) ReadConfig(ctx context.Context) (c *ocrtypes.ContractConfig, err error) { + q := d.QueryRowContext(ctx, ` +SELECT + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config +FROM bootstrap_contract_configs +WHERE bootstrap_spec_id = $1 +LIMIT 1`, d.oracleSpecID) + + c = new(ocrtypes.ContractConfig) + + digest := []byte{} + signers := [][]byte{} + transmitters := [][]byte{} + + err = q.Scan( + &digest, + &c.ConfigCount, + (*pq.ByteaArray)(&signers), + (*pq.ByteaArray)(&transmitters), + &c.F, + &c.OnchainConfig, + &c.OffchainConfigVersion, + &c.OffchainConfig, + ) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } else if err != nil { + return nil, errors.Wrap(err, "ReadConfig failed") + } + + copy(c.ConfigDigest[:], digest) + + c.Signers = []ocrtypes.OnchainPublicKey{} + for _, s := range signers { + c.Signers = append(c.Signers, s) + } + + c.Transmitters = []ocrtypes.Account{} + for _, t := range transmitters { + transmitter := ocrtypes.Account(t) + c.Transmitters = append(c.Transmitters, transmitter) + } + + return +} + +func (d *db) WriteConfig(ctx context.Context, c ocrtypes.ContractConfig) error { + var signers [][]byte + for _, s := range c.Signers { + signers = append(signers, []byte(s)) + } + _, err := d.ExecContext(ctx, ` +INSERT INTO bootstrap_contract_configs ( + bootstrap_spec_id, + config_digest, + config_count, + signers, + transmitters, + f, + onchain_config, + offchain_config_version, + offchain_config, + created_at, + updated_at +) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW(), NOW()) +ON CONFLICT (bootstrap_spec_id) DO UPDATE SET + config_digest = EXCLUDED.config_digest, + config_count = EXCLUDED.config_count, + signers = EXCLUDED.signers, + transmitters = EXCLUDED.transmitters, + f = EXCLUDED.f, + onchain_config = EXCLUDED.onchain_config, + offchain_config_version = EXCLUDED.offchain_config_version, + offchain_config = EXCLUDED.offchain_config, + updated_at = NOW() +`, + d.oracleSpecID, + c.ConfigDigest, + c.ConfigCount, + pq.ByteaArray(signers), + c.Transmitters, + c.F, + c.OnchainConfig, + c.OffchainConfigVersion, + c.OffchainConfig, + ) + + return errors.Wrap(err, "WriteConfig failed") +} diff --git a/core/services/ocrbootstrap/database_test.go b/core/services/ocrbootstrap/database_test.go new file mode 100644 index 00000000..5637477e --- /dev/null +++ b/core/services/ocrbootstrap/database_test.go @@ -0,0 +1,97 @@ +package ocrbootstrap_test + +import ( + "testing" + + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" +) + +func MustInsertOCRBootstrapSpec(t *testing.T, db *sqlx.DB) job.BootstrapSpec { + t.Helper() + + spec := job.BootstrapSpec{} + require.NoError(t, db.Get(&spec, `INSERT INTO bootstrap_specs ( + relay, relay_config, contract_id, monitoring_endpoint, + blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, + created_at, updated_at) VALUES ( + 'evm', '{}', $1, $2, 0, 0, 0, NOW(), NOW() +) RETURNING *`, cltest.NewEIP55Address().String(), "chain.link:1234")) + return spec +} + +func setupDB(t *testing.T) *sqlx.DB { + t.Helper() + return pgtest.NewSqlxDB(t) +} + +func Test_DB_ReadWriteConfig(t *testing.T) { + sqlDB := setupDB(t) + + config := ocrtypes.ContractConfig{ + ConfigDigest: testhelpers.MakeConfigDigest(t), + ConfigCount: 1, + Signers: []ocrtypes.OnchainPublicKey{{0x01}, {0x02}}, + Transmitters: []ocrtypes.Account{"account1", "account2"}, + F: 79, + OnchainConfig: []byte{0x01, 0x02}, + OffchainConfigVersion: 111, + OffchainConfig: []byte{0x03, 0x04}, + } + spec := MustInsertOCRBootstrapSpec(t, sqlDB) + lggr := logger.TestLogger(t) + + t.Run("reads and writes config", func(t *testing.T) { + db := ocrbootstrap.NewDB(sqlDB.DB, spec.ID, lggr) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &config, readConfig) + }) + + t.Run("updates config", func(t *testing.T) { + db := ocrbootstrap.NewDB(sqlDB.DB, spec.ID, lggr) + + newConfig := ocrtypes.ContractConfig{ + ConfigDigest: testhelpers.MakeConfigDigest(t), + Signers: []ocrtypes.OnchainPublicKey{{0x03}}, + Transmitters: []ocrtypes.Account{"test"}, + } + + err := db.WriteConfig(testutils.Context(t), newConfig) + require.NoError(t, err) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Equal(t, &newConfig, readConfig) + }) + + t.Run("does not return result for wrong spec", func(t *testing.T) { + db := ocrbootstrap.NewDB(sqlDB.DB, spec.ID, lggr) + + err := db.WriteConfig(testutils.Context(t), config) + require.NoError(t, err) + + db = ocrbootstrap.NewDB(sqlDB.DB, -1, lggr) + + readConfig, err := db.ReadConfig(testutils.Context(t)) + require.NoError(t, err) + + require.Nil(t, readConfig) + }) +} diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go new file mode 100644 index 00000000..c400df50 --- /dev/null +++ b/core/services/ocrbootstrap/delegate.go @@ -0,0 +1,192 @@ +package ocrbootstrap + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + ocr "github.com/goplugin/libocr/offchainreporting2plus" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +type RelayGetter interface { + Get(relay.ID) (loop.Relayer, error) +} + +// Delegate creates Bootstrap jobs +type Delegate struct { + db *sqlx.DB + jobORM job.ORM + peerWrapper *ocrcommon.SingletonPeerWrapper + ocr2Cfg validate.OCR2Config + insecureCfg validate.InsecureConfig + lggr logger.SugaredLogger + RelayGetter + isNewlyCreatedJob bool +} + +// Extra fields to enable router proxy contract support. Must match field names of functions' PluginConfig. +type relayConfigRouterContractFields struct { + DONID string `json:"donID"` + ContractVersion uint32 `json:"contractVersion"` + ContractUpdateCheckFrequencySec uint32 `json:"contractUpdateCheckFrequencySec"` +} + +// NewDelegateBootstrap creates a new Delegate +func NewDelegateBootstrap( + db *sqlx.DB, + jobORM job.ORM, + peerWrapper *ocrcommon.SingletonPeerWrapper, + lggr logger.Logger, + ocr2Cfg validate.OCR2Config, + insecureCfg validate.InsecureConfig, + relayers RelayGetter, +) *Delegate { + return &Delegate{ + db: db, + jobORM: jobORM, + peerWrapper: peerWrapper, + lggr: logger.Sugared(lggr), + ocr2Cfg: ocr2Cfg, + insecureCfg: insecureCfg, + RelayGetter: relayers, + } +} + +// JobType satisfies the job.Delegate interface. +func (d *Delegate) JobType() job.Type { + return job.Bootstrap +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) { + d.isNewlyCreatedJob = true +} + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err error) { + spec := jb.BootstrapSpec + if spec == nil { + return nil, errors.Errorf("Bootstrap.Delegate expects an *job.BootstrapSpec to be present, got %v", jb) + } + if d.peerWrapper == nil { + return nil, errors.New("cannot setup OCR2 job service, libp2p peer was missing") + } else if !d.peerWrapper.IsStarted() { + return nil, errors.New("peerWrapper is not started. OCR2 jobs require a started and running p2p v2 peer") + } + s := spec.AsOCR2Spec() + rid, err := s.RelayID() + if err != nil { + return nil, fmt.Errorf("ServicesForSpec: could not get relayer: %w", err) + } + + relayer, err := d.RelayGetter.Get(rid) + if err != nil { + return nil, fmt.Errorf("ServiceForSpec: failed to get relay %s is it enabled?: %w", rid.Name(), err) + } + if spec.FeedID != nil { + spec.RelayConfig["feedID"] = *spec.FeedID + } + + ctxVals := loop.ContextValues{ + JobID: jb.ID, + JobName: jb.Name.ValueOrZero(), + ContractID: spec.ContractID, + FeedID: spec.FeedID, + } + ctx := ctxVals.ContextWithValues(context.Background()) + + var routerFields relayConfigRouterContractFields + if err = json.Unmarshal(spec.RelayConfig.Bytes(), &routerFields); err != nil { + return nil, err + } + + var configProvider types.ConfigProvider + if routerFields.DONID != "" { + if routerFields.ContractVersion != 1 || routerFields.ContractUpdateCheckFrequencySec == 0 { + return nil, errors.New("invalid router contract config") + } + configProvider, err = relayer.NewPluginProvider( + ctx, + types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + RelayConfig: spec.RelayConfig.Bytes(), + New: d.isNewlyCreatedJob, + ProviderType: string(types.Functions), + }, + types.PluginArgs{ + PluginConfig: spec.RelayConfig.Bytes(), // contains all necessary fields for config provider + }, + ) + } else { + configProvider, err = relayer.NewConfigProvider(ctx, types.RelayArgs{ + ExternalJobID: jb.ExternalJobID, + JobID: jb.ID, + ContractID: spec.ContractID, + New: d.isNewlyCreatedJob, + RelayConfig: spec.RelayConfig.Bytes(), + }) + } + + if err != nil { + return nil, errors.Wrap(err, "error calling 'relayer.NewConfigWatcher'") + } + lc, err := validate.ToLocalConfig(d.ocr2Cfg, d.insecureCfg, spec.AsOCR2Spec()) + if err != nil { + return nil, err + } + if err = ocr.SanityCheckLocalConfig(lc); err != nil { + return nil, err + } + lggr := d.lggr.With(ctxVals.Args()...) + lggr.Infow("OCR2 job using local config", + "BlockchainTimeout", lc.BlockchainTimeout, + "ContractConfigConfirmations", lc.ContractConfigConfirmations, + "ContractConfigTrackerPollInterval", lc.ContractConfigTrackerPollInterval, + "ContractTransmitterTransmitTimeout", lc.ContractTransmitterTransmitTimeout, + "DatabaseTimeout", lc.DatabaseTimeout, + ) + bootstrapNodeArgs := ocr.BootstrapperArgs{ + BootstrapperFactory: d.peerWrapper.Peer2, + ContractConfigTracker: configProvider.ContractConfigTracker(), + Database: NewDB(d.db.DB, spec.ID, lggr), + LocalConfig: lc, + Logger: commonlogger.NewOCRWrapper(lggr.Named("OCRBootstrap"), d.ocr2Cfg.TraceLogging(), func(msg string) { + logger.Sugared(lggr).ErrorIf(d.jobORM.RecordError(jb.ID, msg), "unable to record error") + }), + OffchainConfigDigester: configProvider.OffchainConfigDigester(), + } + lggr.Debugw("Launching new bootstrap node", "args", bootstrapNodeArgs) + bootstrapper, err := ocr.NewBootstrapper(bootstrapNodeArgs) + if err != nil { + return nil, errors.Wrap(err, "error calling NewBootstrapNode") + } + return []job.ServiceCtx{configProvider, job.NewServiceAdapter(bootstrapper)}, nil +} + +// AfterJobCreated satisfies the job.Delegate interface. +func (d *Delegate) AfterJobCreated(spec job.Job) { +} + +// BeforeJobDeleted satisfies the job.Delegate interface. +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} + +// OnDeleteJob satisfies the job.Delegate interface. +func (d *Delegate) OnDeleteJob(spec job.Job, q pg.Queryer) error { + return nil +} diff --git a/core/services/ocrbootstrap/validate.go b/core/services/ocrbootstrap/validate.go new file mode 100644 index 00000000..cc3c8ae2 --- /dev/null +++ b/core/services/ocrbootstrap/validate.go @@ -0,0 +1,55 @@ +package ocrbootstrap + +import ( + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +// ValidatedBootstrapSpecToml validates a bootstrap spec that came from TOML +func ValidatedBootstrapSpecToml(tomlString string) (jb job.Job, err error) { + var spec job.BootstrapSpec + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "toml error on load") + } + // Note this validates all the fields which implement an UnmarshalText + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on spec") + } + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + jb.BootstrapSpec = &spec + + if jb.Type != job.Bootstrap { + return jb, errors.Errorf("the only supported type is currently 'bootstrap', got %s", jb.Type) + } + expected, notExpected := ocrcommon.CloneSet(params), ocrcommon.CloneSet(nonBootstrapParams) + if err := ocrcommon.ValidateExplicitlySetKeys(tree, expected, notExpected, "bootstrap"); err != nil { + return jb, err + } + + return jb, nil +} + +// Parameters that must be explicitly set by the operator. +var ( + params = map[string]struct{}{ + "type": {}, + "schemaVersion": {}, + "contractID": {}, + "relay": {}, + "relayConfig": {}, + } + // Parameters that should not be set + nonBootstrapParams = map[string]struct{}{ + "isBootstrapPeer": {}, + "juelsPerFeeCoinSource": {}, + "observationSource": {}, + } +) diff --git a/core/services/ocrbootstrap/validate_test.go b/core/services/ocrbootstrap/validate_test.go new file mode 100644 index 00000000..ad4f608f --- /dev/null +++ b/core/services/ocrbootstrap/validate_test.go @@ -0,0 +1,75 @@ +package ocrbootstrap + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +func TestValidateBootstrapSpec(t *testing.T) { + var tt = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "decodes valid bootstrap spec toml", + toml: ` +type = "bootstrap" +name = "bootstrap" +schemaVersion = 1 +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +monitoringEndpoint = "chain.link:4321" +relay = "evm" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, 1, int(os.SchemaVersion)) + }, + }, + { + name: "raises error on missing key", + toml: ` +type = "bootstrap" +schemaVersion = 1 +monitoringEndpoint = "chain.link:4321" +relay = "evm" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + assert.Contains(t, err.Error(), "missing required key contractID") + }, + }, + { + name: "raises error on unexpected key", + toml: ` +type = "bootstrap" +schemaVersion = 1 +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +monitoringEndpoint = "chain.link:4321" +isBootstrapPeer = true +relay = "evm" +[relayConfig] +chainID = 1337 +`, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + assert.Contains(t, err.Error(), "unrecognised key for bootstrap peer: isBootstrapPeer") + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, err := ValidatedBootstrapSpecToml(tc.toml) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/ocrcommon/adapters.go b/core/services/ocrcommon/adapters.go new file mode 100644 index 00000000..6096f7e8 --- /dev/null +++ b/core/services/ocrcommon/adapters.go @@ -0,0 +1,73 @@ +package ocrcommon + +import ( + "context" + + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ ocr3types.OnchainKeyring[[]byte] = (*OCR3OnchainKeyringAdapter)(nil) + +type OCR3OnchainKeyringAdapter struct { + o ocrtypes.OnchainKeyring +} + +func NewOCR3OnchainKeyringAdapter(o ocrtypes.OnchainKeyring) *OCR3OnchainKeyringAdapter { + return &OCR3OnchainKeyringAdapter{o} +} + +func (k *OCR3OnchainKeyringAdapter) PublicKey() ocrtypes.OnchainPublicKey { + return k.o.PublicKey() +} + +func (k *OCR3OnchainKeyringAdapter) Sign(digest ocrtypes.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[[]byte]) (signature []byte, err error) { + return k.o.Sign(ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + Round: 0, + }, + ExtraHash: [32]byte(make([]byte, 32)), + }, r.Report) +} + +func (k *OCR3OnchainKeyringAdapter) Verify(opk ocrtypes.OnchainPublicKey, digest ocrtypes.ConfigDigest, seqNr uint64, ri ocr3types.ReportWithInfo[[]byte], signature []byte) bool { + return k.o.Verify(opk, ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + Round: 0, + }, + ExtraHash: [32]byte(make([]byte, 32)), + }, ri.Report, signature) +} + +func (k *OCR3OnchainKeyringAdapter) MaxSignatureLength() int { + return k.o.MaxSignatureLength() +} + +var _ ocr3types.ContractTransmitter[[]byte] = (*OCR3ContractTransmitterAdapter)(nil) + +type OCR3ContractTransmitterAdapter struct { + ct ocrtypes.ContractTransmitter +} + +func NewOCR3ContractTransmitterAdapter(ct ocrtypes.ContractTransmitter) *OCR3ContractTransmitterAdapter { + return &OCR3ContractTransmitterAdapter{ct} +} + +func (c *OCR3ContractTransmitterAdapter) Transmit(ctx context.Context, digest ocrtypes.ConfigDigest, seqNr uint64, r ocr3types.ReportWithInfo[[]byte], signatures []ocrtypes.AttributedOnchainSignature) error { + return c.ct.Transmit(ctx, ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + Round: 0, + }, + ExtraHash: [32]byte(make([]byte, 32)), + }, r.Report, signatures) +} + +func (c *OCR3ContractTransmitterAdapter) FromAccount() (ocrtypes.Account, error) { + return c.ct.FromAccount() +} diff --git a/core/services/ocrcommon/adapters_test.go b/core/services/ocrcommon/adapters_test.go new file mode 100644 index 00000000..af462fe7 --- /dev/null +++ b/core/services/ocrcommon/adapters_test.go @@ -0,0 +1,154 @@ +package ocrcommon_test + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/goplugin/libocr/offchainreporting2/types" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +var _ ocrtypes.OnchainKeyring = (*fakeOnchainKeyring)(nil) + +var ( + account ocrtypes.Account = "Test-Account" + configDigest = ocrtypes.ConfigDigest([]byte("kKfYauxXBMjuP5EuuyacN6BwCfKJnP6d")) + seqNr uint64 = 11 + rwi = ocr3types.ReportWithInfo[[]byte]{ + Report: []byte("report"), + Info: []byte("info"), + } + signatures = []types.AttributedOnchainSignature{{ + Signature: []byte("signature1"), + Signer: 1, + }, { + Signature: []byte("signature2"), + Signer: 2, + }} + pubKey = ocrtypes.OnchainPublicKey("pub-key") + maxSignatureLength = 12 + sigs = []byte("some-signatures") +) + +type fakeOnchainKeyring struct { +} + +func (f fakeOnchainKeyring) PublicKey() ocrtypes.OnchainPublicKey { + return pubKey +} + +func (f fakeOnchainKeyring) Sign(rc ocrtypes.ReportContext, r ocrtypes.Report) (signature []byte, err error) { + if !reflect.DeepEqual(rc.ConfigDigest, configDigest) { + return nil, fmt.Errorf("expected configDigest %v but got %v", configDigest, rc.ReportTimestamp.ConfigDigest) + } + + if rc.Epoch != uint32(seqNr) { + return nil, fmt.Errorf("expected Epoch %v but got %v", seqNr, rc.Epoch) + } + + if rc.Round != 0 { + return nil, fmt.Errorf("expected Round %v but got %v", 0, rc.Round) + } + + if !reflect.DeepEqual(r, rwi.Report) { + return nil, fmt.Errorf("expected Report %v but got %v", rwi.Report, r) + } + return nil, nil +} + +func (f fakeOnchainKeyring) Verify(pk ocrtypes.OnchainPublicKey, rc ocrtypes.ReportContext, r ocrtypes.Report, signature []byte) bool { + if !reflect.DeepEqual(pk, pubKey) { + return false + } + + if !reflect.DeepEqual(rc.ConfigDigest, configDigest) { + return false + } + + if rc.Epoch != uint32(seqNr) { + return false + } + + if rc.Round != 0 { + return false + } + + if !reflect.DeepEqual(r, rwi.Report) { + return false + } + + if !reflect.DeepEqual(signature, sigs) { + return false + } + + return true +} + +func (f fakeOnchainKeyring) MaxSignatureLength() int { + return maxSignatureLength +} + +func TestOCR3OnchainKeyringAdapter(t *testing.T) { + kr := ocrcommon.NewOCR3OnchainKeyringAdapter(fakeOnchainKeyring{}) + + _, err := kr.Sign(configDigest, seqNr, rwi) + require.NoError(t, err) + require.True(t, kr.Verify(pubKey, configDigest, seqNr, rwi, sigs)) + + require.Equal(t, pubKey, kr.PublicKey()) + require.Equal(t, maxSignatureLength, kr.MaxSignatureLength()) +} + +var _ ocrtypes.ContractTransmitter = (*fakeContractTransmitter)(nil) + +type fakeContractTransmitter struct { +} + +func (f fakeContractTransmitter) Transmit(ctx context.Context, rc ocrtypes.ReportContext, report ocrtypes.Report, s []ocrtypes.AttributedOnchainSignature) error { + + if !reflect.DeepEqual(report, rwi.Report) { + return fmt.Errorf("expected Report %v but got %v", rwi.Report, report) + } + + if !reflect.DeepEqual(s, signatures) { + return fmt.Errorf("expected signatures %v but got %v", signatures, s) + } + + if !reflect.DeepEqual(rc.ConfigDigest, configDigest) { + return fmt.Errorf("expected configDigest %v but got %v", configDigest, rc.ReportTimestamp.ConfigDigest) + } + + if rc.Epoch != uint32(seqNr) { + return fmt.Errorf("expected Epoch %v but got %v", seqNr, rc.Epoch) + } + + if rc.Round != 0 { + return fmt.Errorf("expected Round %v but got %v", 0, rc.Round) + } + + return nil +} + +func (f fakeContractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (configDigest ocrtypes.ConfigDigest, epoch uint32, err error) { + panic("not implemented") +} + +func (f fakeContractTransmitter) FromAccount() (ocrtypes.Account, error) { + return account, nil +} + +func TestContractTransmitter(t *testing.T) { + ct := ocrcommon.NewOCR3ContractTransmitterAdapter(fakeContractTransmitter{}) + + require.NoError(t, ct.Transmit(context.Background(), configDigest, seqNr, rwi, signatures)) + + a, err := ct.FromAccount() + require.NoError(t, err) + require.Equal(t, a, account) +} diff --git a/core/services/ocrcommon/arbitrum_block_translator.go b/core/services/ocrcommon/arbitrum_block_translator.go new file mode 100644 index 00000000..430fea03 --- /dev/null +++ b/core/services/ocrcommon/arbitrum_block_translator.go @@ -0,0 +1,268 @@ +package ocrcommon + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/pkg/errors" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// ArbitrumBlockTranslator uses Arbitrum's special L1BlockNumber to optimise log lookups +// Performance matters here hence aggressive use of the cache +// We want to minimise fetches because calling eth_getBlockByNumber is +// relatively expensive +type ArbitrumBlockTranslator struct { + ethClient evmclient.Client + lggr logger.Logger + // l2->l1 cache + cache map[int64]int64 + cacheMu sync.RWMutex + l2Locks utils.KeyedMutex +} + +// NewArbitrumBlockTranslator returns a concrete ArbitrumBlockTranslator +func NewArbitrumBlockTranslator(ethClient evmclient.Client, lggr logger.Logger) *ArbitrumBlockTranslator { + return &ArbitrumBlockTranslator{ + ethClient, + lggr.Named("ArbitrumBlockTranslator"), + make(map[int64]int64), + sync.RWMutex{}, + utils.KeyedMutex{}, + } +} + +// NumberToQueryRange implements BlockTranslator interface +func (a *ArbitrumBlockTranslator) NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int) { + var err error + fromBlock, toBlock, err = a.BinarySearch(ctx, int64(changedInL1Block)) + if err != nil { + a.lggr.Warnw("Failed to binary search L2->L1, falling back to slow scan over entire chain", "err", err) + return big.NewInt(0), nil + } + + return +} + +// BinarySearch uses both cache and RPC calls to find the smallest possible range of L2 block numbers that encompasses the given L1 block number +// +// Imagine as a virtual array of L1 block numbers indexed by L2 block numbers +// L1 values are likely duplicated so it looks something like +// [42, 42, 42, 42, 42, 155, 155, 155, 430, 430, 430, 430, 430, ...] +// Theoretical max difference between L1 values is typically about 5, "worst case" is 6545 but can be arbitrarily high if sequencer is broken +// The returned range of L2s from leftmost thru rightmost represent all possible L2s that correspond to the L1 value we are looking for +// nil can be returned as a rightmost value if the range has no upper bound +func (a *ArbitrumBlockTranslator) BinarySearch(ctx context.Context, targetL1 int64) (l2lowerBound *big.Int, l2upperBound *big.Int, err error) { + mark := time.Now() + var n int + defer func() { + duration := time.Since(mark) + a.lggr.Debugw(fmt.Sprintf("BinarySearch completed in %s with %d total lookups", duration, n), "finishedIn", duration, "err", err, "nLookups", n) + }() + var h *evmtypes.Head + + // l2lower..l2upper is the inclusive range of L2 block numbers in which + // transactions that called block.number will return the given L1 block + // number + var l2lower int64 + var l2upper int64 + + var skipUpperBound bool + + { + var maybeL2Upper *int64 + l2lower, maybeL2Upper = a.reverseLookup(targetL1) + if maybeL2Upper != nil { + l2upper = *maybeL2Upper + } else { + // Initial query to get highest L1 and L2 numbers + h, err = a.ethClient.HeadByNumber(ctx, nil) + n++ + if err != nil { + return nil, nil, err + } + if h == nil { + return nil, nil, errors.New("got nil head") + } + if !h.L1BlockNumber.Valid { + return nil, nil, errors.New("head was missing L1 block number") + } + currentL1 := h.L1BlockNumber.Int64 + currentL2 := h.Number + + a.cachePut(currentL2, currentL1) + + // NOTE: This case shouldn't ever happen but we ought to handle it in the least broken way possible + if targetL1 > currentL1 { + // real upper must always be nil, we can skip the upper limit part of the binary search + a.lggr.Debugf("BinarySearch target of %d is above current L1 block number of %d, using nil for upper bound", targetL1, currentL1) + return big.NewInt(currentL2), nil, nil + } else if targetL1 == currentL1 { + // NOTE: If the latest seen L2 block corresponds to the target L1 + // block, we have to leave the top end of the range open because future + // L2 blocks can be produced that would also match + skipUpperBound = true + } + l2upper = currentL2 + } + } + + a.lggr.Debugf("TRACE: BinarySearch starting search for L2 range wrapping L1 block number %d between bounds [%d, %d]", targetL1, l2lower, l2upper) + + var exactMatch bool + + // LEFT EDGE + // First, use binary search to find the smallest L2 block number for which L1 >= changedInBlock + // This L2 block number represents the lower bound on a range of L2s corresponding to this L1 + { + l2lower, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) { + l1, miss, err2 := a.arbL2ToL1(ctx, l2) + if miss { + n++ + } + if err2 != nil { + return false, err2 + } + if targetL1 == l1 { + exactMatch = true + } + return l1 >= targetL1, nil + }) + if err != nil { + return nil, nil, err + } + } + + // RIGHT EDGE + // Second, use binary search again to find the smallest L2 block number for which L1 > changedInBlock + // Now we can subtract one to get the largest L2 that corresponds to this L1 + // This can be skipped if we know we are already at the top of the range, and the upper limit will be returned as nil + if !skipUpperBound { + var r int64 + r, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) { + l1, miss, err2 := a.arbL2ToL1(ctx, l2) + if miss { + n++ + } + if err2 != nil { + return false, err2 + } + if targetL1 == l1 { + exactMatch = true + } + return l1 > targetL1, nil + }) + if err != nil { + return nil, nil, err + } + l2upper = r - 1 + l2upperBound = big.NewInt(l2upper) + } + + // NOTE: We expect either left or right search to make an exact match, if they don't something has gone badly wrong + if !exactMatch { + return nil, nil, errors.Errorf("target L1 block number %d is not represented by any L2 block", targetL1) + } + return big.NewInt(l2lower), l2upperBound, nil +} + +// reverseLookup takes an l1 and returns lower and upper bounds for an L2 based on cache data +func (a *ArbitrumBlockTranslator) reverseLookup(targetL1 int64) (from int64, to *int64) { + type val struct { + l1 int64 + l2 int64 + } + vals := make([]val, 0) + + a.cacheMu.RLock() + defer a.cacheMu.RUnlock() + + for l2, l1 := range a.cache { + vals = append(vals, val{l1, l2}) + } + + sort.Slice(vals, func(i, j int) bool { return vals[i].l1 < vals[j].l1 }) + + for _, val := range vals { + if val.l1 < targetL1 { + from = val.l2 + } else if val.l1 > targetL1 && to == nil { + // workaround golang footgun; can't take a pointer to val + l2 := val.l2 + to = &l2 + } + } + return +} + +func (a *ArbitrumBlockTranslator) arbL2ToL1(ctx context.Context, l2 int64) (l1 int64, cacheMiss bool, err error) { + // This locking block synchronises access specifically around one l2 number so we never fetch the same data concurrently + // One thread will wait while the other fetches + unlock := a.l2Locks.LockInt64(l2) + defer unlock() + + var exists bool + if l1, exists = a.cacheGet(l2); exists { + return l1, false, err + } + + h, err := a.ethClient.HeadByNumber(ctx, big.NewInt(l2)) + if err != nil { + return 0, true, err + } + if h == nil { + return 0, true, errors.New("got nil head") + } + if !h.L1BlockNumber.Valid { + return 0, true, errors.New("head was missing L1 block number") + } + l1 = h.L1BlockNumber.Int64 + + a.cachePut(l2, l1) + + return l1, true, nil +} + +func (a *ArbitrumBlockTranslator) cacheGet(l2 int64) (l1 int64, exists bool) { + a.cacheMu.RLock() + defer a.cacheMu.RUnlock() + l1, exists = a.cache[l2] + return +} + +func (a *ArbitrumBlockTranslator) cachePut(l2, l1 int64) { + a.cacheMu.Lock() + defer a.cacheMu.Unlock() + a.cache[l2] = l1 +} + +// stolen from golang standard library and modified for 64-bit ints, +// customisable range and erroring function +// see: https://golang.org/src/sort/search.go +func search(i, j int64, f func(int64) (bool, error)) (int64, error) { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + for i < j { + h := int64(uint64(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + is, err := f(h) + if err != nil { + return 0, err + } + if !is { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i, nil +} diff --git a/core/services/ocrcommon/arbitrum_block_translator_test.go b/core/services/ocrcommon/arbitrum_block_translator_test.go new file mode 100644 index 00000000..ec3cab3a --- /dev/null +++ b/core/services/ocrcommon/arbitrum_block_translator_test.go @@ -0,0 +1,254 @@ +package ocrcommon_test + +import ( + "math/big" + mrand "math/rand" + "testing" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestArbitrumBlockTranslator_BinarySearch(t *testing.T) { + t.Parallel() + + blocks := generateDeterministicL2Blocks() + lggr := logger.TestLogger(t) + + t.Run("returns range of current to nil if target is above current block number", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5541 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + from, to, err := abt.BinarySearch(ctx, changedInL1Block) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(1000), from) + assert.Equal(t, (*big.Int)(nil), to) + }) + + t.Run("returns error if changedInL1Block is less than the lowest possible L1 block on the L2 chain", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 42 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Return(tmp, nil).Run(func(args mock.Arguments) { + *tmp = blocks[args[1].(*big.Int).Int64()] + }) + + _, _, err := abt.BinarySearch(ctx, changedInL1Block) + + assert.EqualError(t, err, "target L1 block number 42 is not represented by any L2 block") + }) + + t.Run("returns error if L1 block number does not exist for any range of L2 blocks", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5043 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Return(tmp, nil).Run(func(args mock.Arguments) { + *tmp = blocks[args[1].(*big.Int).Int64()] + }) + + _, _, err := abt.BinarySearch(ctx, changedInL1Block) + + assert.EqualError(t, err, "target L1 block number 5043 is not represented by any L2 block") + }) + + t.Run("returns correct range of L2 blocks that encompasses all possible blocks that might contain the given L1 block number", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5042 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Return(tmp, nil).Run(func(args mock.Arguments) { + h := blocks[args[1].(*big.Int).Int64()] + *tmp = h + }) + + from, to, err := abt.BinarySearch(ctx, changedInL1Block) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(98), from) + assert.Equal(t, big.NewInt(137), to) + }) + + t.Run("handles edge case where L1 is the smallest possible value", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5000 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Return(tmp, nil).Run(func(args mock.Arguments) { + h := blocks[args[1].(*big.Int).Int64()] + *tmp = h + }) + + from, to, err := abt.BinarySearch(ctx, changedInL1Block) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(0), from) + assert.Equal(t, big.NewInt(16), to) + }) + + t.Run("leaves upper bound unbounded where L1 is the largest possible value", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5540 + + latestBlock := blocks[1000] + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Return(tmp, nil).Run(func(args mock.Arguments) { + h := blocks[args[1].(*big.Int).Int64()] + *tmp = h + }) + + from, to, err := abt.BinarySearch(ctx, changedInL1Block) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(986), from) + assert.Equal(t, (*big.Int)(nil), to) + }) + + t.Run("caches duplicate lookups", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + + var changedInL1Block int64 = 5042 + + latestBlock := blocks[1000] + // Latest is never cached + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(&latestBlock, nil).Once() + + tmp := new(evmtypes.Head) + client.On("HeadByNumber", ctx, mock.AnythingOfType("*big.Int")).Times(20+18+14).Return(tmp, nil).Run(func(args mock.Arguments) { + h := blocks[args[1].(*big.Int).Int64()] + *tmp = h + }) + + // First search, nothing cached (total 21 - bsearch 20) + from, to, err := abt.BinarySearch(ctx, changedInL1Block) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(98), from) + assert.Equal(t, big.NewInt(137), to) + + var changedInL1Block2 int64 = 5351 + + // Second search, initial lookup cached + space reduced to [549, 1000] (total 18 - bsearch 18) + from, to, err = abt.BinarySearch(ctx, changedInL1Block2) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(670), from) + assert.Equal(t, big.NewInt(697), to) + + var changedInL1Block3 int64 = 5193 + + // Third search, initial lookup cached + space reduced to [323, 500] (total 14 - bsearch 14) + from, to, err = abt.BinarySearch(ctx, changedInL1Block3) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(403), from) + assert.Equal(t, big.NewInt(448), to) + }) + + // TODO: test edge cases - at left edge of range, at right edge +} + +func TestArbitrumBlockTranslator_NumberToQueryRange(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + + t.Run("falls back to whole range on error", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + var changedInL1Block uint64 = 5042 + + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(nil, errors.New("something exploded")).Once() + + from, to := abt.NumberToQueryRange(ctx, changedInL1Block) + assert.Equal(t, big.NewInt(0), from) + assert.Equal(t, (*big.Int)(nil), to) + }) + + t.Run("falls back to whole range on missing head", func(t *testing.T) { + client := evmtest.NewEthClientMock(t) + abt := ocrcommon.NewArbitrumBlockTranslator(client, lggr) + ctx := testutils.Context(t) + var changedInL1Block uint64 = 5042 + + client.On("HeadByNumber", ctx, (*big.Int)(nil)).Return(nil, nil).Once() + + from, to := abt.NumberToQueryRange(ctx, changedInL1Block) + assert.Equal(t, big.NewInt(0), from) + assert.Equal(t, (*big.Int)(nil), to) + }) +} + +func generateDeterministicL2Blocks() (heads []evmtypes.Head) { + source := mrand.NewSource(0) + deterministicRand := mrand.New(source) + l2max := 1000 + var l1BlockNumber int64 = 5000 + var parentHash common.Hash + for i := 0; i <= l2max; i++ { + head := evmtypes.Head{ + Number: int64(i), + L1BlockNumber: null.Int64From(l1BlockNumber), + Hash: utils.NewHash(), + ParentHash: parentHash, + } + parentHash = head.Hash + heads = append(heads, head) + if deterministicRand.Intn(10) == 1 { // 10% chance + // l1 number should jump by "about" 5 but this is variable depending on whether the sequencer got to post, network conditions etc + l1BlockNumber += int64(deterministicRand.Intn(6) + 4) + } + } + return +} diff --git a/core/services/ocrcommon/block_translator.go b/core/services/ocrcommon/block_translator.go new file mode 100644 index 00000000..18d98761 --- /dev/null +++ b/core/services/ocrcommon/block_translator.go @@ -0,0 +1,37 @@ +package ocrcommon + +import ( + "context" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/common/config" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// BlockTranslator converts emitted block numbers (from block.number) into a +// block number range suitable for query in FilterLogs +type BlockTranslator interface { + NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int) +} + +// NewBlockTranslator returns the block translator for the given chain +func NewBlockTranslator(cfg Config, client evmclient.Client, lggr logger.Logger) BlockTranslator { + switch cfg.ChainType() { + case config.ChainArbitrum: + return NewArbitrumBlockTranslator(client, lggr) + case config.ChainXDai, config.ChainMetis, config.ChainOptimismBedrock: + fallthrough + default: + return &l1BlockTranslator{} + } +} + +type l1BlockTranslator struct{} + +func (*l1BlockTranslator) NumberToQueryRange(_ context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int) { + return big.NewInt(int64(changedInL1Block)), big.NewInt(int64(changedInL1Block)) +} + +func (*l1BlockTranslator) OnNewLongestChain(context.Context, *evmtypes.Head) {} diff --git a/core/services/ocrcommon/block_translator_test.go b/core/services/ocrcommon/block_translator_test.go new file mode 100644 index 00000000..16204ba4 --- /dev/null +++ b/core/services/ocrcommon/block_translator_test.go @@ -0,0 +1,47 @@ +package ocrcommon_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + v2 "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest/v2" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +func Test_BlockTranslator(t *testing.T) { + t.Parallel() + + ethClient := evmtest.NewEthClientMock(t) + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + + t.Run("for L1 chains, returns the block changed argument", func(t *testing.T) { + bt := ocrcommon.NewBlockTranslator(v2.ChainEthMainnet(t).EVM(), ethClient, lggr) + + from, to := bt.NumberToQueryRange(ctx, 42) + + assert.Equal(t, big.NewInt(42), from) + assert.Equal(t, big.NewInt(42), to) + }) + + t.Run("for optimism, uses the default translator", func(t *testing.T) { + bt := ocrcommon.NewBlockTranslator(v2.ChainOptimismMainnet(t).EVM(), ethClient, lggr) + from, to := bt.NumberToQueryRange(ctx, 42) + assert.Equal(t, big.NewInt(42), from) + assert.Equal(t, big.NewInt(42), to) + + }) + + t.Run("for arbitrum, returns the ArbitrumBlockTranslator", func(t *testing.T) { + bt := ocrcommon.NewBlockTranslator(v2.ChainArbitrumMainnet(t).EVM(), ethClient, lggr) + assert.IsType(t, &ocrcommon.ArbitrumBlockTranslator{}, bt) + + bt = ocrcommon.NewBlockTranslator(v2.ChainArbitrumRinkeby(t).EVM(), ethClient, lggr) + assert.IsType(t, &ocrcommon.ArbitrumBlockTranslator{}, bt) + }) +} diff --git a/core/services/ocrcommon/config.go b/core/services/ocrcommon/config.go new file mode 100644 index 00000000..f91a76f4 --- /dev/null +++ b/core/services/ocrcommon/config.go @@ -0,0 +1,40 @@ +package ocrcommon + +import ( + "github.com/pkg/errors" + + "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/common/config" +) + +type Config interface { + ChainType() config.ChainType +} + +func ParseBootstrapPeers(peers []string) (bootstrapPeers []commontypes.BootstrapperLocator, err error) { + for _, bs := range peers { + var bsl commontypes.BootstrapperLocator + err = bsl.UnmarshalText([]byte(bs)) + if err != nil { + return nil, err + } + bootstrapPeers = append(bootstrapPeers, bsl) + } + return +} + +// GetValidatedBootstrapPeers will error unless at least one valid bootstrap peer is found +func GetValidatedBootstrapPeers(specPeers []string, configPeers []commontypes.BootstrapperLocator) ([]commontypes.BootstrapperLocator, error) { + bootstrapPeers, err := ParseBootstrapPeers(specPeers) + if err != nil { + return nil, err + } + if len(bootstrapPeers) == 0 { + if len(configPeers) == 0 { + return nil, errors.New("no bootstrappers found") + } + return configPeers, nil + } + return bootstrapPeers, nil +} diff --git a/core/services/ocrcommon/data_source.go b/core/services/ocrcommon/data_source.go new file mode 100644 index 00000000..e80fdc01 --- /dev/null +++ b/core/services/ocrcommon/data_source.go @@ -0,0 +1,223 @@ +package ocrcommon + +import ( + "context" + "math/big" + "sync" + "time" + + "github.com/pkg/errors" + ocr1types "github.com/goplugin/libocr/offchainreporting/types" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// inMemoryDataSource is an abstraction over the process of initiating a pipeline run +// and returning the result +type inMemoryDataSource struct { + pipelineRunner pipeline.Runner + jb job.Job + spec pipeline.Spec + lggr logger.Logger + + current bridges.BridgeMetaData + mu sync.RWMutex + + chEnhancedTelemetry chan<- EnhancedTelemetryData +} + +type Saver interface { + Save(run *pipeline.Run) +} + +type dataSourceBase struct { + inMemoryDataSource + saver Saver +} + +// dataSource implements dataSourceBase with the proper Observe return type for ocr1 +type dataSource struct { + dataSourceBase +} + +// dataSourceV2 implements dataSourceBase with the proper Observe return type for ocr2 +type dataSourceV2 struct { + dataSourceBase +} + +// ObservationTimestamp abstracts ocr2types.ReportTimestamp and ocr1types.ReportTimestamp +type ObservationTimestamp struct { + Round uint8 + Epoch uint32 + ConfigDigest string +} + +func NewDataSourceV1(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s Saver, chEnhancedTelemetry chan EnhancedTelemetryData) ocr1types.DataSource { + return &dataSource{ + dataSourceBase: dataSourceBase{ + inMemoryDataSource: inMemoryDataSource{ + pipelineRunner: pr, + jb: jb, + spec: spec, + lggr: lggr, + chEnhancedTelemetry: chEnhancedTelemetry, + }, + saver: s, + }, + } +} + +func NewDataSourceV2(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s Saver, enhancedTelemChan chan EnhancedTelemetryData) median.DataSource { + return &dataSourceV2{ + dataSourceBase: dataSourceBase{ + inMemoryDataSource: inMemoryDataSource{ + pipelineRunner: pr, + jb: jb, + spec: spec, + lggr: lggr, + chEnhancedTelemetry: enhancedTelemChan, + }, + saver: s, + }, + } +} + +func NewInMemoryDataSource(pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger) median.DataSource { + return &inMemoryDataSource{ + pipelineRunner: pr, + jb: jb, + spec: spec, + lggr: lggr, + } +} + +var _ ocr1types.DataSource = (*dataSource)(nil) + +func (ds *inMemoryDataSource) updateAnswer(a *big.Int) { + ds.mu.Lock() + defer ds.mu.Unlock() + ds.current = bridges.BridgeMetaData{ + LatestAnswer: a, + UpdatedAt: big.NewInt(time.Now().Unix()), + } +} + +func (ds *inMemoryDataSource) currentAnswer() (*big.Int, *big.Int) { + ds.mu.RLock() + defer ds.mu.RUnlock() + return ds.current.LatestAnswer, ds.current.UpdatedAt +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (ds *inMemoryDataSource) executeRun(ctx context.Context, timestamp ObservationTimestamp) (*pipeline.Run, pipeline.FinalResult, error) { + md, err := bridges.MarshalBridgeMetaData(ds.currentAnswer()) + if err != nil { + ds.lggr.Warnw("unable to attach metadata for run", "err", err) + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": ds.jb.ID, + "externalJobID": ds.jb.ExternalJobID, + "name": ds.jb.Name.ValueOrZero(), + }, + "jobRun": map[string]interface{}{ + "meta": md, + }, + }) + + run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) + if err != nil { + return nil, pipeline.FinalResult{}, errors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + } + finalResult := trrs.FinalResult(ds.lggr) + promSetBridgeParseMetrics(ds, &trrs) + promSetFinalResultMetrics(ds, &finalResult) + + if ShouldCollectEnhancedTelemetry(&ds.jb) { + EnqueueEnhancedTelem(ds.chEnhancedTelemetry, EnhancedTelemetryData{ + TaskRunResults: trrs, + FinalResults: finalResult, + RepTimestamp: timestamp, + }) + } else { + ds.lggr.Infow("Enhanced telemetry is disabled for job", "job", ds.jb.Name) + } + + return run, finalResult, err +} + +// parse uses the FinalResult into a big.Int and stores it in the bridge metadata +func (ds *inMemoryDataSource) parse(finalResult pipeline.FinalResult) (*big.Int, error) { + result, err := finalResult.SingularResult() + if err != nil { + return nil, errors.Wrapf(err, "error getting singular result for job ID %v", ds.spec.JobID) + } + + if result.Error != nil { + return nil, result.Error + } + + asDecimal, err := utils.ToDecimal(result.Value) + if err != nil { + return nil, errors.Wrap(err, "cannot convert observation to decimal") + } + ds.updateAnswer(asDecimal.BigInt()) + return asDecimal.BigInt(), nil +} + +// Observe without saving to DB +func (ds *inMemoryDataSource) Observe(ctx context.Context, timestamp ocr2types.ReportTimestamp) (*big.Int, error) { + _, finalResult, err := ds.executeRun(ctx, ObservationTimestamp{ + Round: timestamp.Round, + Epoch: timestamp.Epoch, + ConfigDigest: timestamp.ConfigDigest.Hex(), + }) + if err != nil { + return nil, err + } + return ds.parse(finalResult) +} + +func (ds *dataSourceBase) observe(ctx context.Context, timestamp ObservationTimestamp) (*big.Int, error) { + run, finalResult, err := ds.inMemoryDataSource.executeRun(ctx, timestamp) + if err != nil { + return nil, err + } + + // Save() does the database write in a non-blocking fashion + // so we can return the observation results immediately. + // This is helpful in the case of a blocking API call, where + // we reach the passed in context deadline and we want to + // immediately return any result we have and do not want to have + // a db write block that. + ds.saver.Save(run) + + return ds.inMemoryDataSource.parse(finalResult) +} + +// Observe with saving to DB, satisfies ocr1 interface +func (ds *dataSource) Observe(ctx context.Context, timestamp ocr1types.ReportTimestamp) (ocr1types.Observation, error) { + return ds.observe(ctx, ObservationTimestamp{ + Round: timestamp.Round, + Epoch: timestamp.Epoch, + ConfigDigest: timestamp.ConfigDigest.Hex(), + }) +} + +// Observe with saving to DB, satisfies ocr2 interface +func (ds *dataSourceV2) Observe(ctx context.Context, timestamp ocr2types.ReportTimestamp) (*big.Int, error) { + return ds.observe(ctx, ObservationTimestamp{ + Round: timestamp.Round, + Epoch: timestamp.Epoch, + ConfigDigest: timestamp.ConfigDigest.Hex(), + }) +} diff --git a/core/services/ocrcommon/data_source_test.go b/core/services/ocrcommon/data_source_test.go new file mode 100644 index 00000000..b3950ba1 --- /dev/null +++ b/core/services/ocrcommon/data_source_test.go @@ -0,0 +1,144 @@ +package ocrcommon_test + +import ( + "math/big" + "testing" + + promtestutil "github.com/prometheus/client_golang/prometheus/testutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting/types" + "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/spf13/cast" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipelinemocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" +) + +var ( + mockValue = "100000000" + jsonParseTaskValue = "1234" +) + +func Test_InMemoryDataSource(t *testing.T) { + runner := pipelinemocks.NewRunner(t) + runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). + Return(&pipeline.Run{}, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: mockValue, + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + + ds := ocrcommon.NewInMemoryDataSource(runner, job.Job{}, pipeline.Spec{}, logger.TestLogger(t)) + val, err := ds.Observe(testutils.Context(t), types.ReportTimestamp{}) + require.NoError(t, err) + assert.Equal(t, mockValue, val.String()) // returns expected value after pipeline run +} + +func Test_InMemoryDataSourceWithProm(t *testing.T) { + runner := pipelinemocks.NewRunner(t) + + jsonParseTask := pipeline.JSONParseTask{ + BaseTask: pipeline.BaseTask{}, + } + bridgeTask := pipeline.BridgeTask{ + BaseTask: pipeline.BaseTask{}, + } + + bridgeTask.BaseTask = pipeline.NewBaseTask(1, "ds1", []pipeline.TaskDependency{{ + PropagateResult: true, + InputTask: nil, + }}, []pipeline.Task{&jsonParseTask}, 1) + + jsonParseTask.BaseTask = pipeline.NewBaseTask(2, "ds1_parse", []pipeline.TaskDependency{{ + PropagateResult: false, + InputTask: &bridgeTask, + }}, []pipeline.Task{}, 2) + + runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). + Return(&pipeline.Run{}, pipeline.TaskRunResults([]pipeline.TaskRunResult{ + { + Task: &bridgeTask, + Result: pipeline.Result{}, + }, + { + Result: pipeline.Result{Value: jsonParseTaskValue}, + Task: &jsonParseTask, + }, + }), nil) + + ds := ocrcommon.NewInMemoryDataSource( + runner, + job.Job{ + Type: "offchainreporting", + }, + pipeline.Spec{}, + logger.TestLogger(t), + ) + val, err := ds.Observe(testutils.Context(t), types.ReportTimestamp{}) + require.NoError(t, err) + + assert.Equal(t, jsonParseTaskValue, val.String()) // returns expected value after pipeline run + assert.Equal(t, cast.ToFloat64(jsonParseTaskValue), promtestutil.ToFloat64(ocrcommon.PromOcrMedianValues)) + assert.Equal(t, cast.ToFloat64(jsonParseTaskValue), promtestutil.ToFloat64(ocrcommon.PromBridgeJsonParseValues)) + +} + +type mockSaver struct { + r *pipeline.Run +} + +func (ms *mockSaver) Save(r *pipeline.Run) { + ms.r = r +} + +func Test_NewDataSourceV2(t *testing.T) { + runner := pipelinemocks.NewRunner(t) + ms := &mockSaver{} + runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). + Return(&pipeline.Run{}, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: mockValue, + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + + ds := ocrcommon.NewDataSourceV2(runner, job.Job{}, pipeline.Spec{}, logger.TestLogger(t), ms, nil) + val, err := ds.Observe(testutils.Context(t), types.ReportTimestamp{}) + require.NoError(t, err) + assert.Equal(t, mockValue, val.String()) // returns expected value after pipeline run + assert.Equal(t, &pipeline.Run{}, ms.r) // expected data properly passed to channel +} + +func Test_NewDataSourceV1(t *testing.T) { + runner := pipelinemocks.NewRunner(t) + ms := &mockSaver{} + runner.On("ExecuteRun", mock.Anything, mock.AnythingOfType("pipeline.Spec"), mock.Anything, mock.Anything). + Return(&pipeline.Run{}, pipeline.TaskRunResults{ + { + Result: pipeline.Result{ + Value: mockValue, + Error: nil, + }, + Task: &pipeline.HTTPTask{}, + }, + }, nil) + + ds := ocrcommon.NewDataSourceV1(runner, job.Job{}, pipeline.Spec{}, logger.TestLogger(t), ms, nil) + val, err := ds.Observe(testutils.Context(t), ocrtypes.ReportTimestamp{}) + require.NoError(t, err) + assert.Equal(t, mockValue, new(big.Int).Set(val).String()) // returns expected value after pipeline run + assert.Equal(t, &pipeline.Run{}, ms.r) // expected data properly passed to channel +} diff --git a/core/services/ocrcommon/discoverer_database.go b/core/services/ocrcommon/discoverer_database.go new file mode 100644 index 00000000..cdb22cbe --- /dev/null +++ b/core/services/ocrcommon/discoverer_database.go @@ -0,0 +1,63 @@ +package ocrcommon + +import ( + "context" + "database/sql" + + "github.com/lib/pq" + "github.com/pkg/errors" + "go.uber.org/multierr" + + ocrnetworking "github.com/goplugin/libocr/networking/types" +) + +var _ ocrnetworking.DiscovererDatabase = &DiscovererDatabase{} + +type DiscovererDatabase struct { + db *sql.DB + peerID string +} + +func NewDiscovererDatabase(db *sql.DB, peerID string) *DiscovererDatabase { + return &DiscovererDatabase{ + db, + peerID, + } +} + +// StoreAnnouncement has key-value-store semantics and stores a peerID (key) and an associated serialized +// announcement (value). +func (d *DiscovererDatabase) StoreAnnouncement(ctx context.Context, peerID string, ann []byte) error { + _, err := d.db.ExecContext(ctx, ` +INSERT INTO ocr_discoverer_announcements (local_peer_id, remote_peer_id, ann, created_at, updated_at) +VALUES ($1,$2,$3,NOW(),NOW()) ON CONFLICT (local_peer_id, remote_peer_id) DO UPDATE SET +ann = EXCLUDED.ann, +updated_at = EXCLUDED.updated_at +;`, d.peerID, peerID, ann) + return errors.Wrap(err, "DiscovererDatabase failed to StoreAnnouncement") +} + +// ReadAnnouncements returns one serialized announcement (if available) for each of the peerIDs in the form of a map +// keyed by each announcement's corresponding peer ID. +func (d *DiscovererDatabase) ReadAnnouncements(ctx context.Context, peerIDs []string) (results map[string][]byte, err error) { + rows, err := d.db.QueryContext(ctx, ` +SELECT remote_peer_id, ann FROM ocr_discoverer_announcements WHERE remote_peer_id = ANY($1) AND local_peer_id = $2`, pq.Array(peerIDs), d.peerID) + if err != nil { + return nil, errors.Wrap(err, "DiscovererDatabase failed to ReadAnnouncements") + } + defer func() { err = multierr.Combine(err, rows.Close()) }() + results = make(map[string][]byte) + for rows.Next() { + var peerID string + var ann []byte + err = rows.Scan(&peerID, &ann) + if err != nil { + return + } + results[peerID] = ann + } + if err = rows.Err(); err != nil { + return + } + return results, nil +} diff --git a/core/services/ocrcommon/discoverer_database_test.go b/core/services/ocrcommon/discoverer_database_test.go new file mode 100644 index 00000000..a74ab12e --- /dev/null +++ b/core/services/ocrcommon/discoverer_database_test.go @@ -0,0 +1,93 @@ +package ocrcommon_test + +import ( + "crypto/ed25519" + "crypto/rand" + "testing" + + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +func Test_DiscovererDatabase(t *testing.T) { + db := pgtest.NewSqlDB(t) + + localPeerID1 := mustRandomP2PPeerID(t) + localPeerID2 := mustRandomP2PPeerID(t) + + dd1 := ocrcommon.NewDiscovererDatabase(db, localPeerID1.Raw()) + dd2 := ocrcommon.NewDiscovererDatabase(db, localPeerID2.Raw()) + + ctx := testutils.Context(t) + + t.Run("StoreAnnouncement writes a value", func(t *testing.T) { + ann := []byte{1, 2, 3} + err := dd1.StoreAnnouncement(ctx, "remote1", ann) + assert.NoError(t, err) + + // test upsert + ann = []byte{4, 5, 6} + err = dd1.StoreAnnouncement(ctx, "remote1", ann) + assert.NoError(t, err) + + // write a different value + ann = []byte{7, 8, 9} + err = dd1.StoreAnnouncement(ctx, "remote2", ann) + assert.NoError(t, err) + }) + + t.Run("ReadAnnouncements reads values filtered by given peerIDs", func(t *testing.T) { + announcements, err := dd1.ReadAnnouncements(ctx, []string{"remote1", "remote2"}) + require.NoError(t, err) + + assert.Len(t, announcements, 2) + assert.Equal(t, []byte{4, 5, 6}, announcements["remote1"]) + assert.Equal(t, []byte{7, 8, 9}, announcements["remote2"]) + + announcements, err = dd1.ReadAnnouncements(ctx, []string{"remote1"}) + require.NoError(t, err) + + assert.Len(t, announcements, 1) + assert.Equal(t, []byte{4, 5, 6}, announcements["remote1"]) + }) + + t.Run("is scoped to local peer ID", func(t *testing.T) { + ann := []byte{10, 11, 12} + err := dd2.StoreAnnouncement(ctx, "remote1", ann) + assert.NoError(t, err) + + announcements, err := dd2.ReadAnnouncements(ctx, []string{"remote1"}) + require.NoError(t, err) + assert.Len(t, announcements, 1) + assert.Equal(t, []byte{10, 11, 12}, announcements["remote1"]) + + announcements, err = dd1.ReadAnnouncements(ctx, []string{"remote1"}) + require.NoError(t, err) + assert.Len(t, announcements, 1) + assert.Equal(t, []byte{4, 5, 6}, announcements["remote1"]) + }) + + t.Run("persists data across restarts", func(t *testing.T) { + dd3 := ocrcommon.NewDiscovererDatabase(db, localPeerID1.Raw()) + + announcements, err := dd3.ReadAnnouncements(ctx, []string{"remote1"}) + require.NoError(t, err) + assert.Len(t, announcements, 1) + assert.Equal(t, []byte{4, 5, 6}, announcements["remote1"]) + + }) +} + +func mustRandomP2PPeerID(t *testing.T) p2pkey.PeerID { + _, p2pPrivkey, err := ed25519.GenerateKey(rand.Reader) + require.NoError(t, err) + id, err := ragep2ptypes.PeerIDFromPrivateKey(p2pPrivkey) + require.NoError(t, err) + return p2pkey.PeerID(id) +} diff --git a/core/services/ocrcommon/helpers_test.go b/core/services/ocrcommon/helpers_test.go new file mode 100644 index 00000000..27c8807f --- /dev/null +++ b/core/services/ocrcommon/helpers_test.go @@ -0,0 +1,7 @@ +package ocrcommon + +import ocrnetworking "github.com/goplugin/libocr/networking" + +func (p *SingletonPeerWrapper) PeerConfig() (ocrnetworking.PeerConfig, error) { + return p.peerConfig() +} diff --git a/core/services/ocrcommon/peer_wrapper.go b/core/services/ocrcommon/peer_wrapper.go new file mode 100644 index 00000000..ac185da0 --- /dev/null +++ b/core/services/ocrcommon/peer_wrapper.go @@ -0,0 +1,165 @@ +package ocrcommon + +import ( + "context" + "io" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + ocrnetworking "github.com/goplugin/libocr/networking" + ocr1types "github.com/goplugin/libocr/offchainreporting/types" + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonlogger "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type PeerWrapperOCRConfig interface { + TraceLogging() bool +} + +type ( + peerAdapterOCR1 struct { + ocr1types.BinaryNetworkEndpointFactory + ocr1types.BootstrapperFactory + } + + peerAdapterOCR2 struct { + ocr2types.BinaryNetworkEndpointFactory + ocr2types.BootstrapperFactory + } + + // SingletonPeerWrapper manages all libocr peers for the application + SingletonPeerWrapper struct { + services.StateMachine + keyStore keystore.Master + p2pCfg config.P2P + ocrCfg PeerWrapperOCRConfig + dbConfig pg.QConfig + db *sqlx.DB + lggr logger.Logger + PeerID p2pkey.PeerID + + // Used at shutdown to stop all of this peer's goroutines + peerCloser io.Closer + + // OCR1 peer adapter + Peer1 *peerAdapterOCR1 + + // OCR2 peer adapter + Peer2 *peerAdapterOCR2 + } +) + +func ValidatePeerWrapperConfig(config config.P2P) error { + if len(config.V2().ListenAddresses()) == 0 { + return errors.New("no P2P.V2.ListenAddresses specified") + } + return nil +} + +// NewSingletonPeerWrapper creates a new peer based on the p2p keys in the keystore +// It currently only supports one peerID/key +// It should be fairly easy to modify it to support multiple peerIDs/keys using e.g. a map +func NewSingletonPeerWrapper(keyStore keystore.Master, p2pCfg config.P2P, ocrCfg PeerWrapperOCRConfig, dbConfig pg.QConfig, db *sqlx.DB, lggr logger.Logger) *SingletonPeerWrapper { + return &SingletonPeerWrapper{ + keyStore: keyStore, + p2pCfg: p2pCfg, + ocrCfg: ocrCfg, + dbConfig: dbConfig, + db: db, + lggr: lggr.Named("SingletonPeerWrapper"), + } +} + +func (p *SingletonPeerWrapper) IsStarted() bool { return p.Ready() == nil } + +// Start starts SingletonPeerWrapper. +func (p *SingletonPeerWrapper) Start(context.Context) error { + return p.StartOnce("SingletonPeerWrapper", func() error { + peerConfig, err := p.peerConfig() + if err != nil { + return err + } + + p.lggr.Debugw("Creating OCR/OCR2 Peer", "config", peerConfig) + // Note: creates and starts the peer + peer, err := ocrnetworking.NewPeer(peerConfig) + if err != nil { + return errors.Wrap(err, "error calling NewPeer") + } + p.Peer1 = &peerAdapterOCR1{ + peer.OCR1BinaryNetworkEndpointFactory(), + peer.OCR1BootstrapperFactory(), + } + p.Peer2 = &peerAdapterOCR2{ + peer.OCR2BinaryNetworkEndpointFactory(), + peer.OCR2BootstrapperFactory(), + } + p.peerCloser = peer + return nil + }) +} + +func (p *SingletonPeerWrapper) peerConfig() (ocrnetworking.PeerConfig, error) { + // Peer wrapper panics if no p2p keys are present. + if ks, err := p.keyStore.P2P().GetAll(); err == nil && len(ks) == 0 { + return ocrnetworking.PeerConfig{}, errors.Errorf("No P2P keys found in keystore. Peer wrapper will not be fully initialized") + } + key, err := p.keyStore.P2P().GetOrFirst(p.p2pCfg.PeerID()) + if err != nil { + return ocrnetworking.PeerConfig{}, err + } + p.PeerID = key.PeerID() + + discovererDB := NewDiscovererDatabase(p.db.DB, p.PeerID.Raw()) + + config := p.p2pCfg + peerConfig := ocrnetworking.PeerConfig{ + PrivKey: key.PrivKey, + Logger: commonlogger.NewOCRWrapper(p.lggr, p.ocrCfg.TraceLogging(), func(string) {}), + + // V2 config + V2ListenAddresses: config.V2().ListenAddresses(), + V2AnnounceAddresses: config.V2().AnnounceAddresses(), // NewPeer will handle the fallback to listen addresses for us. + V2DeltaReconcile: config.V2().DeltaReconcile().Duration(), + V2DeltaDial: config.V2().DeltaDial().Duration(), + V2DiscovererDatabase: discovererDB, + + V2EndpointConfig: ocrnetworking.EndpointConfigV2{ + IncomingMessageBufferSize: config.IncomingMessageBufferSize(), + OutgoingMessageBufferSize: config.OutgoingMessageBufferSize(), + }, + } + + return peerConfig, nil +} + +// Close closes the peer and peerstore +func (p *SingletonPeerWrapper) Close() error { + return p.StopOnce("SingletonPeerWrapper", func() (err error) { + if p.peerCloser != nil { + err = p.peerCloser.Close() + } + return err + }) +} + +func (p *SingletonPeerWrapper) Name() string { + return p.lggr.Name() +} + +func (p *SingletonPeerWrapper) HealthReport() map[string]error { + return map[string]error{p.Name(): p.Healthy()} +} + +func (p *SingletonPeerWrapper) P2PConfig() config.P2P { + return p.p2pCfg +} diff --git a/core/services/ocrcommon/peer_wrapper_test.go b/core/services/ocrcommon/peer_wrapper_test.go new file mode 100644 index 00000000..58d146bd --- /dev/null +++ b/core/services/ocrcommon/peer_wrapper_test.go @@ -0,0 +1,151 @@ +package ocrcommon_test + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/consul/sdk/freeport" + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +func Test_SingletonPeerWrapper_Start(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + + var peerID ragep2ptypes.PeerID + require.NoError(t, peerID.UnmarshalText([]byte(configtest.DefaultPeerID))) + + t.Run("with no p2p keys returns error", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + require.Contains(t, pw.Start(testutils.Context(t)).Error(), "No P2P keys found in keystore. Peer wrapper will not be fully initialized") + }) + + t.Run("with one p2p key and matching P2P.PeerID returns nil", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + k, err := keyStore.P2P().Create() + require.NoError(t, err) + + cfg = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = ptr(k.PeerID()) + }) + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + + servicetest.Run(t, pw) + require.Equal(t, k.PeerID(), pw.PeerID) + }) + + t.Run("with one p2p key and mismatching P2P.PeerID returns error", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.PeerID = ptr(p2pkey.PeerID(peerID)) + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + + _, err := keyStore.P2P().Create() + require.NoError(t, err) + + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + + require.Contains(t, pw.Start(testutils.Context(t)).Error(), "unable to find P2P key with id") + }) + + t.Run("with multiple p2p keys and valid P2P.PeerID returns nil", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + k2, err := keyStore.P2P().Create() + require.NoError(t, err) + + cfg = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = ptr(k2.PeerID()) + }) + + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + + servicetest.Run(t, pw) + require.Equal(t, k2.PeerID(), pw.PeerID) + }) + + t.Run("with multiple p2p keys and mismatching P2P.PeerID returns error", func(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = ptr(p2pkey.PeerID(peerID)) + }) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + + _, err := keyStore.P2P().Create() + require.NoError(t, err) + + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + + require.Contains(t, pw.Start(testutils.Context(t)).Error(), "unable to find P2P key with id") + }) +} + +func Test_SingletonPeerWrapper_Close(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + + cfg := configtest.NewGeneralConfig(t, nil) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + k, err := keyStore.P2P().Create() + require.NoError(t, err) + + cfg = configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.P2P.V2.Enabled = ptr(true) + c.P2P.PeerID = ptr(k.PeerID()) + c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(100 * time.Millisecond) + c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(1 * time.Second) + + p2paddresses := []string{ + "127.0.0.1:17193", + } + c.P2P.V2.ListenAddresses = ptr(p2paddresses) + c.P2P.V2.AnnounceAddresses = ptr(p2paddresses) + + }) + + pw := ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + + require.NoError(t, pw.Start(testutils.Context(t))) + require.True(t, pw.IsStarted(), "Should have started successfully") + require.NoError(t, pw.Close()) + + /* If peer is still stuck in listenLoop, we will get a bind error trying to start on the same port */ + require.False(t, pw.IsStarted()) + pw = ocrcommon.NewSingletonPeerWrapper(keyStore, cfg.P2P(), cfg.OCR(), cfg.Database(), db, logger.TestLogger(t)) + require.NoError(t, pw.Start(testutils.Context(t)), "Should have shut down gracefully, and be able to re-use same port") + require.True(t, pw.IsStarted(), "Should have started successfully") + require.NoError(t, pw.Close()) +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/ocrcommon/prom.go b/core/services/ocrcommon/prom.go new file mode 100644 index 00000000..9a22e66c --- /dev/null +++ b/core/services/ocrcommon/prom.go @@ -0,0 +1,64 @@ +package ocrcommon + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/spf13/cast" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + PromBridgeJsonParseValues = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "bridge_json_parse_values", + Help: "Values returned by json_parse for bridge task", + }, + []string{"job_id", "job_name", "bridge_name", "task_id"}) + + PromOcrMedianValues = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ocr_median_values", + Help: "Median value returned by ocr job", + }, + []string{"job_id", "job_name"}) +) + +// promSetBridgeParseMetrics will parse pipeline.TaskRunResults for bridge tasks, get the pipeline.TaskTypeJSONParse task and update prometheus metrics with it +func promSetBridgeParseMetrics(ds *inMemoryDataSource, trrs *pipeline.TaskRunResults) { + if ds.jb.Type.String() != pipeline.OffchainReportingJobType && ds.jb.Type.String() != pipeline.OffchainReporting2JobType { + return + } + + for _, trr := range *trrs { + if trr.Task.Type() == pipeline.TaskTypeBridge { + nextTask := trrs.GetNextTaskOf(trr) + + if nextTask != nil && nextTask.Task.Type() == pipeline.TaskTypeJSONParse { + fetchedValue := cast.ToFloat64(nextTask.Result.Value) + + PromBridgeJsonParseValues.WithLabelValues(fmt.Sprintf("%d", ds.jb.ID), ds.jb.Name.String, trr.Task.(*pipeline.BridgeTask).Name, trr.Task.DotID()).Set(fetchedValue) + } + } + } +} + +// promSetFinalResultMetrics will check if job is pipeline.OffchainReportingJobType or pipeline.OffchainReporting2JobType then send the pipeline.FinalResult to prometheus +func promSetFinalResultMetrics(ds *inMemoryDataSource, finalResult *pipeline.FinalResult) { + if ds.jb.Type.String() != pipeline.OffchainReportingJobType && ds.jb.Type.String() != pipeline.OffchainReporting2JobType { + return + } + + singularResult, err := finalResult.SingularResult() + if err != nil { + return + } + + finalResultDecimal, err := utils.ToDecimal(singularResult.Value) + if err != nil { + return + } + finalResultFloat, _ := finalResultDecimal.Float64() + PromOcrMedianValues.WithLabelValues(fmt.Sprintf("%d", ds.jb.ID), ds.jb.Name.String).Set(finalResultFloat) +} diff --git a/core/services/ocrcommon/run_saver.go b/core/services/ocrcommon/run_saver.go new file mode 100644 index 00000000..5e7fc3a5 --- /dev/null +++ b/core/services/ocrcommon/run_saver.go @@ -0,0 +1,99 @@ +package ocrcommon + +import ( + "context" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type Runner interface { + InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error +} + +type RunResultSaver struct { + services.StateMachine + + maxSuccessfulRuns uint64 + runResults chan *pipeline.Run + pipelineRunner Runner + done chan struct{} + logger logger.Logger +} + +func (r *RunResultSaver) HealthReport() map[string]error { + return map[string]error{r.Name(): r.Healthy()} +} + +func (r *RunResultSaver) Name() string { return r.logger.Name() } + +func NewResultRunSaver(pipelineRunner Runner, + logger logger.Logger, maxSuccessfulRuns uint64, resultsWriteDepth uint64, +) *RunResultSaver { + return &RunResultSaver{ + maxSuccessfulRuns: maxSuccessfulRuns, + runResults: make(chan *pipeline.Run, resultsWriteDepth), + pipelineRunner: pipelineRunner, + done: make(chan struct{}), + logger: logger.Named("RunResultSaver"), + } +} + +// Save sends the run on the internal `runResults` channel for saving. +// IMPORTANT: if the `runResults` pipeline is full, the run will be dropped. +func (r *RunResultSaver) Save(run *pipeline.Run) { + select { + case r.runResults <- run: + default: + r.logger.Warnw("RunSaver: the write queue was full, dropping run") + } +} + +// Start starts RunResultSaver. +func (r *RunResultSaver) Start(context.Context) error { + return r.StartOnce("RunResultSaver", func() error { + go func() { + for { + select { + case run := <-r.runResults: + if !run.HasErrors() && r.maxSuccessfulRuns == 0 { + // optimisation: don't bother persisting it if we don't need to save successful runs + r.logger.Tracew("Skipping save of successful run due to MaxSuccessfulRuns=0", "run", run) + continue + } + r.logger.Tracew("RunSaver: saving job run", "run", run) + // We do not want save successful TaskRuns as OCR runs very frequently so a lot of records + // are produced and the successful TaskRuns do not provide value. + if err := r.pipelineRunner.InsertFinishedRun(run, false); err != nil { + r.logger.Errorw("error inserting finished results", "err", err) + } + case <-r.done: + return + } + } + }() + return nil + }) +} + +func (r *RunResultSaver) Close() error { + return r.StopOnce("RunResultSaver", func() error { + r.done <- struct{}{} + + // In the unlikely event that there are remaining runResults to write, + // drain the channel and save them. + for { + select { + case run := <-r.runResults: + r.logger.Infow("RunSaver: saving job run before exiting", "run", run) + if err := r.pipelineRunner.InsertFinishedRun(run, false); err != nil { + r.logger.Errorw("error inserting finished results", "err", err) + } + default: + return nil + } + } + }) +} diff --git a/core/services/ocrcommon/run_saver_test.go b/core/services/ocrcommon/run_saver_test.go new file mode 100644 index 00000000..39b01bf6 --- /dev/null +++ b/core/services/ocrcommon/run_saver_test.go @@ -0,0 +1,33 @@ +package ocrcommon + +import ( + "testing" + + "github.com/stretchr/testify/mock" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" +) + +func TestRunSaver(t *testing.T) { + pipelineRunner := mocks.NewRunner(t) + rs := NewResultRunSaver( + pipelineRunner, + logger.TestLogger(t), + 1000, + 100, + ) + servicetest.Run(t, rs) + for i := 0; i < 100; i++ { + d := i + pipelineRunner.On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = int64(d) + }). + Once() + rs.Save(&pipeline.Run{ID: int64(i)}) + } +} diff --git a/core/services/ocrcommon/telemetry.go b/core/services/ocrcommon/telemetry.go new file mode 100644 index 00000000..135018da --- /dev/null +++ b/core/services/ocrcommon/telemetry.go @@ -0,0 +1,546 @@ +package ocrcommon + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/commontypes" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + v1types "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + v2types "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + v3types "github.com/goplugin/plugin-common/pkg/types/mercury/v3" +) + +type eaTelemetry struct { + DataSource string + ProviderRequestedTimestamp int64 + ProviderReceivedTimestamp int64 + ProviderDataStreamEstablished int64 + ProviderIndicatedTime int64 +} + +type EnhancedTelemetryData struct { + TaskRunResults pipeline.TaskRunResults + FinalResults pipeline.FinalResult + RepTimestamp ObservationTimestamp +} + +type EnhancedTelemetryMercuryData struct { + V1Observation *v1types.Observation + V2Observation *v2types.Observation + V3Observation *v3types.Observation + TaskRunResults pipeline.TaskRunResults + RepTimestamp ocrtypes.ReportTimestamp + FeedVersion mercuryutils.FeedVersion + FetchMaxFinalizedTimestamp bool + IsLinkFeed bool + IsNativeFeed bool +} + +type EnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData] struct { + services.StateMachine + + chTelem <-chan T + chDone chan struct{} + monitoringEndpoint commontypes.MonitoringEndpoint + job *job.Job + lggr logger.Logger +} + +func NewEnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](job *job.Job, chTelem <-chan T, done chan struct{}, me commontypes.MonitoringEndpoint, lggr logger.Logger) *EnhancedTelemetryService[T] { + return &EnhancedTelemetryService[T]{ + chTelem: chTelem, + chDone: done, + monitoringEndpoint: me, + lggr: lggr, + job: job, + } +} + +// Start starts +func (e *EnhancedTelemetryService[T]) Start(context.Context) error { + return e.StartOnce("EnhancedTelemetryService", func() error { + go func() { + e.lggr.Infof("Started enhanced telemetry service for job %d", e.job.ID) + for { + select { + case t := <-e.chTelem: + switch v := any(t).(type) { + case EnhancedTelemetryData: + e.collectEATelemetry(v.TaskRunResults, v.FinalResults, v.RepTimestamp) + case EnhancedTelemetryMercuryData: + e.collectMercuryEnhancedTelemetry(v) + default: + e.lggr.Errorf("unrecognised telemetry data type: %T", t) + } + case <-e.chDone: + return + } + } + }() + return nil + }) +} + +func (e *EnhancedTelemetryService[T]) Close() error { + return e.StopOnce("EnhancedTelemetryService", func() error { + close(e.chDone) + e.lggr.Infof("Stopping enhanced telemetry service for job %d", e.job.ID) + return nil + }) +} + +// ShouldCollectEnhancedTelemetry returns whether EA telemetry should be collected +func ShouldCollectEnhancedTelemetry(job *job.Job) bool { + if job.Type.String() == pipeline.OffchainReportingJobType && job.OCROracleSpec != nil { + return job.OCROracleSpec.CaptureEATelemetry + } + + if job.Type.String() == pipeline.OffchainReporting2JobType && job.OCR2OracleSpec != nil { + return job.OCR2OracleSpec.CaptureEATelemetry + } + + return false +} + +// getContract fetches the contract address from the OracleSpec +func (e *EnhancedTelemetryService[T]) getContract() string { + switch e.job.Type.String() { + case pipeline.OffchainReportingJobType: + return e.job.OCROracleSpec.ContractAddress.String() + case pipeline.OffchainReporting2JobType: + return e.job.OCR2OracleSpec.ContractID + default: + return "" + } +} + +// getChainID fetches the chain id from the OracleSpec +func (e *EnhancedTelemetryService[T]) getChainID() string { + switch e.job.Type.String() { + case pipeline.OffchainReportingJobType: + return e.job.OCROracleSpec.EVMChainID.String() + case pipeline.OffchainReporting2JobType: + contract, _ := e.job.OCR2OracleSpec.RelayConfig["chainID"].(string) + return contract + default: + return "" + } +} + +// parseEATelemetry attempts to parse the bridge telemetry +func parseEATelemetry(b []byte) (eaTelemetry, error) { + type eaTimestamps struct { + ProviderRequestedTimestamp int64 `json:"providerDataRequestedUnixMs"` + ProviderReceivedTimestamp int64 `json:"providerDataReceivedUnixMs"` + ProviderDataStreamEstablished int64 `json:"providerDataStreamEstablishedUnixMs"` + ProviderIndicatedTime int64 `json:"providerIndicatedTimeUnixMs"` + } + type eaMeta struct { + AdapterName string `json:"adapterName"` + } + + type eaTelem struct { + TelemTimestamps eaTimestamps `json:"timestamps"` + TelemMeta eaMeta `json:"meta"` + } + t := eaTelem{} + + if err := json.Unmarshal(b, &t); err != nil { + return eaTelemetry{}, err + } + + return eaTelemetry{ + DataSource: t.TelemMeta.AdapterName, + ProviderRequestedTimestamp: t.TelemTimestamps.ProviderRequestedTimestamp, + ProviderReceivedTimestamp: t.TelemTimestamps.ProviderReceivedTimestamp, + ProviderDataStreamEstablished: t.TelemTimestamps.ProviderDataStreamEstablished, + ProviderIndicatedTime: t.TelemTimestamps.ProviderIndicatedTime, + }, nil +} + +// getJsonParsedValue checks if the next logical task is of type pipeline.TaskTypeJSONParse and trys to return +// the response as a *big.Int +func getJsonParsedValue(trr pipeline.TaskRunResult, trrs *pipeline.TaskRunResults) *float64 { + nextTask := trrs.GetNextTaskOf(trr) + if nextTask != nil && nextTask.Task.Type() == pipeline.TaskTypeJSONParse { + asDecimal, err := utils.ToDecimal(nextTask.Result.Value) + if err != nil { + return nil + } + toFloat, _ := asDecimal.Float64() + return &toFloat + } + return nil +} + +// getObservation checks pipeline.FinalResult and extracts the observation +func (e *EnhancedTelemetryService[T]) getObservation(finalResult *pipeline.FinalResult) int64 { + singularResult, err := finalResult.SingularResult() + if err != nil { + e.lggr.Warnf("cannot get singular result, job %d", e.job.ID) + return 0 + } + + finalResultDecimal, err := utils.ToDecimal(singularResult.Value) + if err != nil { + e.lggr.Warnf("cannot parse singular result from bridge task, job %d", e.job.ID) + return 0 + } + + return finalResultDecimal.BigInt().Int64() +} + +func (e *EnhancedTelemetryService[T]) getParsedValue(trrs *pipeline.TaskRunResults, trr pipeline.TaskRunResult) float64 { + parsedValue := getJsonParsedValue(trr, trrs) + if parsedValue == nil { + e.lggr.Warnf("cannot get json parse value, job %d, id %s", e.job.ID, trr.Task.DotID()) + return 0 + } + return *parsedValue +} + +// collectEATelemetry checks if EA telemetry should be collected, gathers the information and sends it for ingestion +func (e *EnhancedTelemetryService[T]) collectEATelemetry(trrs pipeline.TaskRunResults, finalResult pipeline.FinalResult, timestamp ObservationTimestamp) { + if e.monitoringEndpoint == nil { + return + } + + e.collectAndSend(&trrs, &finalResult, timestamp) +} + +func (e *EnhancedTelemetryService[T]) collectAndSend(trrs *pipeline.TaskRunResults, finalResult *pipeline.FinalResult, timestamp ObservationTimestamp) { + chainID := e.getChainID() + contract := e.getContract() + + observation := e.getObservation(finalResult) + + for _, trr := range *trrs { + if trr.Task.Type() != pipeline.TaskTypeBridge { + continue + } + var bridgeName string + if b, is := trr.Task.(*pipeline.BridgeTask); is { + bridgeName = b.Name + } + + if trr.Result.Error != nil { + e.lggr.Warnw(fmt.Sprintf("cannot get bridge response from bridge task, job=%d, id=%s, name=%q", e.job.ID, trr.Task.DotID(), bridgeName), "err", trr.Result.Error, "jobID", e.job.ID, "dotID", trr.Task.DotID(), "bridgeName", bridgeName) + continue + } + bridgeRawResponse, ok := trr.Result.Value.(string) + if !ok { + e.lggr.Warnw(fmt.Sprintf("cannot parse bridge response from bridge task, job=%d, id=%s, name=%q: expected string, got: %v (type %T)", e.job.ID, trr.Task.DotID(), bridgeName, trr.Result.Value, trr.Result.Value), "jobID", e.job.ID, "dotID", trr.Task.DotID(), "bridgeName", bridgeName) + continue + } + eaTelem, err := parseEATelemetry([]byte(bridgeRawResponse)) + if err != nil { + e.lggr.Warnw(fmt.Sprintf("cannot parse EA telemetry, job=%d, id=%s, name=%q", e.job.ID, trr.Task.DotID(), bridgeName), "err", err, "jobID", e.job.ID, "dotID", trr.Task.DotID(), "bridgeName", bridgeName) + continue + } + value := e.getParsedValue(trrs, trr) + + t := &telem.EnhancedEA{ + DataSource: eaTelem.DataSource, + Value: value, + BridgeTaskRunStartedTimestamp: trr.CreatedAt.UnixMilli(), + BridgeTaskRunEndedTimestamp: trr.FinishedAt.Time.UnixMilli(), + ProviderRequestedTimestamp: eaTelem.ProviderRequestedTimestamp, + ProviderReceivedTimestamp: eaTelem.ProviderReceivedTimestamp, + ProviderDataStreamEstablished: eaTelem.ProviderDataStreamEstablished, + ProviderIndicatedTime: eaTelem.ProviderIndicatedTime, + Feed: contract, + ChainId: chainID, + Observation: observation, + ConfigDigest: timestamp.ConfigDigest, + Round: int64(timestamp.Round), + Epoch: int64(timestamp.Epoch), + } + + bytes, err := proto.Marshal(t) + if err != nil { + e.lggr.Warnw("protobuf marshal failed", "err", err) + continue + } + + e.monitoringEndpoint.SendLog(bytes) + } +} + +// collectMercuryEnhancedTelemetry checks if enhanced telemetry should be collected, fetches the information needed and +// sends the telemetry +func (e *EnhancedTelemetryService[T]) collectMercuryEnhancedTelemetry(d EnhancedTelemetryMercuryData) { + if e.monitoringEndpoint == nil { + return + } + + // v1 fields + var bn int64 + var bh string + var bt uint64 + // v1+v2+v3 fields + bp := big.NewInt(0) + //v1+v3 fields + bid := big.NewInt(0) + ask := big.NewInt(0) + // v2+v3 fields + var mfts, lp, np int64 + + switch { + case d.V1Observation != nil: + obs := *d.V1Observation + if obs.CurrentBlockNum.Err == nil { + bn = obs.CurrentBlockNum.Val + } + if obs.CurrentBlockHash.Err == nil { + bh = common.BytesToHash(obs.CurrentBlockHash.Val).Hex() + } + if obs.CurrentBlockTimestamp.Err == nil { + bt = obs.CurrentBlockTimestamp.Val + } + if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil { + bp = obs.BenchmarkPrice.Val + } + if obs.Bid.Err == nil && obs.Bid.Val != nil { + bid = obs.Bid.Val + } + if obs.Ask.Err == nil && obs.Ask.Val != nil { + ask = obs.Ask.Val + } + case d.V2Observation != nil: + obs := *d.V2Observation + if obs.MaxFinalizedTimestamp.Err == nil { + mfts = obs.MaxFinalizedTimestamp.Val + } + if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil { + lp = obs.LinkPrice.Val.Int64() + } + if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil { + np = obs.NativePrice.Val.Int64() + } + if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil { + bp = obs.BenchmarkPrice.Val + } + case d.V3Observation != nil: + obs := *d.V3Observation + if obs.MaxFinalizedTimestamp.Err == nil { + mfts = obs.MaxFinalizedTimestamp.Val + } + if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil { + lp = obs.LinkPrice.Val.Int64() + } + if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil { + np = obs.NativePrice.Val.Int64() + } + if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil { + bp = obs.BenchmarkPrice.Val + } + if obs.Bid.Err == nil && obs.Bid.Val != nil { + bid = obs.Bid.Val + } + if obs.Ask.Err == nil && obs.Ask.Val != nil { + ask = obs.Ask.Val + } + } + + for _, trr := range d.TaskRunResults { + if trr.Task.Type() != pipeline.TaskTypeBridge { + continue + } + bridgeTask := trr.Task.(*pipeline.BridgeTask) + bridgeName := bridgeTask.Name + + bridgeRawResponse, ok := trr.Result.Value.(string) + if !ok { + e.lggr.Warnw(fmt.Sprintf("cannot get bridge response from bridge task, job=%d, id=%s, name=%q, expected string got %T", e.job.ID, trr.Task.DotID(), bridgeName, trr.Result.Value), "jobID", e.job.ID, "dotID", trr.Task.DotID(), "bridgeName", bridgeName) + continue + } + eaTelem, err := parseEATelemetry([]byte(bridgeRawResponse)) + if err != nil { + e.lggr.Warnw(fmt.Sprintf("cannot parse EA telemetry, job=%d, id=%s, name=%q", e.job.ID, trr.Task.DotID(), bridgeName), "err", err, "jobID", e.job.ID, "dotID", trr.Task.DotID(), "bridgeName", bridgeName) + } + + assetSymbol := e.getAssetSymbolFromRequestData(bridgeTask.RequestData) + + benchmarkPrice, bidPrice, askPrice := e.getPricesFromResults(trr, d.TaskRunResults, d.FeedVersion) + + t := &telem.EnhancedEAMercury{ + DataSource: eaTelem.DataSource, + DpBenchmarkPrice: benchmarkPrice, + DpBid: bidPrice, + DpAsk: askPrice, + CurrentBlockNumber: bn, + CurrentBlockHash: bh, + CurrentBlockTimestamp: bt, + FetchMaxFinalizedTimestamp: d.FetchMaxFinalizedTimestamp, + MaxFinalizedTimestamp: mfts, + BridgeTaskRunStartedTimestamp: trr.CreatedAt.UnixMilli(), + BridgeTaskRunEndedTimestamp: trr.FinishedAt.Time.UnixMilli(), + ProviderRequestedTimestamp: eaTelem.ProviderRequestedTimestamp, + ProviderReceivedTimestamp: eaTelem.ProviderReceivedTimestamp, + ProviderDataStreamEstablished: eaTelem.ProviderDataStreamEstablished, + ProviderIndicatedTime: eaTelem.ProviderIndicatedTime, + Feed: e.job.OCR2OracleSpec.FeedID.Hex(), + ObservationBenchmarkPrice: bp.Int64(), + ObservationBid: bid.Int64(), + ObservationAsk: ask.Int64(), + ObservationBenchmarkPriceString: stringOrEmpty(bp), + ObservationBidString: stringOrEmpty(bid), + ObservationAskString: stringOrEmpty(ask), + IsLinkFeed: d.IsLinkFeed, + LinkPrice: lp, + IsNativeFeed: d.IsNativeFeed, + NativePrice: np, + ConfigDigest: d.RepTimestamp.ConfigDigest.Hex(), + Round: int64(d.RepTimestamp.Round), + Epoch: int64(d.RepTimestamp.Epoch), + AssetSymbol: assetSymbol, + Version: uint32(d.FeedVersion), + } + + bytes, err := proto.Marshal(t) + if err != nil { + e.lggr.Warnf("protobuf marshal failed %v", err.Error()) + continue + } + + e.monitoringEndpoint.SendLog(bytes) + } +} + +// getAssetSymbolFromRequestData parses the requestData of the bridge to generate an asset symbol pair +func (e *EnhancedTelemetryService[T]) getAssetSymbolFromRequestData(requestData string) string { + type reqDataPayload struct { + To string `json:"to"` + From string `json:"from"` + } + type reqData struct { + Data reqDataPayload `json:"data"` + } + + rd := &reqData{} + err := json.Unmarshal([]byte(requestData), rd) + if err != nil { + return "" + } + + return rd.Data.From + "/" + rd.Data.To +} + +// ShouldCollectEnhancedTelemetryMercury checks if enhanced telemetry should be collected and sent +func ShouldCollectEnhancedTelemetryMercury(jb job.Job) bool { + if jb.Type.String() == pipeline.OffchainReporting2JobType && jb.OCR2OracleSpec != nil { + return jb.OCR2OracleSpec.CaptureEATelemetry + } + return false +} + +// getPricesFromResults parses the pipeline.TaskRunResults for pipeline.TaskTypeJSONParse and gets the benchmarkPrice, +// bid and ask. This functions expects the pipeline.TaskRunResults to be correctly ordered +func (e *EnhancedTelemetryService[T]) getPricesFromResults(startTask pipeline.TaskRunResult, allTasks pipeline.TaskRunResults, mercuryVersion mercuryutils.FeedVersion) (float64, float64, float64) { + var benchmarkPrice, askPrice, bidPrice float64 + var err error + // We rely on task results to be sorted in the correct order + benchmarkPriceTask := allTasks.GetNextTaskOf(startTask) + if benchmarkPriceTask == nil { + e.lggr.Warnf("cannot parse enhanced EA telemetry benchmark price, task is nil, job %d, id %s", e.job.ID) + return 0, 0, 0 + } + if benchmarkPriceTask.Task.Type() == pipeline.TaskTypeJSONParse { + if benchmarkPriceTask.Result.Error != nil { + e.lggr.Warnw(fmt.Sprintf("got error for enhanced EA telemetry benchmark price, job %d, id %s: %s", e.job.ID, benchmarkPriceTask.Task.DotID(), benchmarkPriceTask.Result.Error), "err", benchmarkPriceTask.Result.Error) + } else { + benchmarkPrice, err = getResultFloat64(benchmarkPriceTask) + if err != nil { + e.lggr.Warnw(fmt.Sprintf("cannot parse enhanced EA telemetry benchmark price, job %d, id %s", e.job.ID, benchmarkPriceTask.Task.DotID()), "err", err) + } + } + } + + // mercury version 2 only supports benchmarkPrice + if mercuryVersion == 2 { + return benchmarkPrice, 0, 0 + } + + bidTask := allTasks.GetNextTaskOf(*benchmarkPriceTask) + if bidTask == nil { + e.lggr.Warnf("cannot parse enhanced EA telemetry bid price, task is nil, job %d, id %s", e.job.ID) + return benchmarkPrice, 0, 0 + } + + if bidTask != nil && bidTask.Task.Type() == pipeline.TaskTypeJSONParse { + if bidTask.Result.Error != nil { + e.lggr.Warnw(fmt.Sprintf("got error for enhanced EA telemetry bid price, job %d, id %s: %s", e.job.ID, bidTask.Task.DotID(), bidTask.Result.Error), "err", bidTask.Result.Error) + } else { + bidPrice, err = getResultFloat64(bidTask) + if err != nil { + e.lggr.Warnw(fmt.Sprintf("cannot parse enhanced EA telemetry bid price, job %d, id %s", e.job.ID, bidTask.Task.DotID()), "err", err) + } + } + } + + askTask := allTasks.GetNextTaskOf(*bidTask) + if askTask == nil { + e.lggr.Warnf("cannot parse enhanced EA telemetry ask price, task is nil, job %d, id %s", e.job.ID) + return benchmarkPrice, bidPrice, 0 + } + if askTask != nil && askTask.Task.Type() == pipeline.TaskTypeJSONParse { + if bidTask.Result.Error != nil { + e.lggr.Warnw(fmt.Sprintf("got error for enhanced EA telemetry ask price, job %d, id %s: %s", e.job.ID, askTask.Task.DotID(), askTask.Result.Error), "err", askTask.Result.Error) + } else { + askPrice, err = getResultFloat64(askTask) + if err != nil { + e.lggr.Warnw(fmt.Sprintf("cannot parse enhanced EA telemetry ask price, job %d, id %s", e.job.ID, askTask.Task.DotID()), "err", err) + } + } + } + + return benchmarkPrice, bidPrice, askPrice +} + +// MaybeEnqueueEnhancedTelem sends data to the telemetry channel for processing +func MaybeEnqueueEnhancedTelem(jb job.Job, ch chan<- EnhancedTelemetryMercuryData, data EnhancedTelemetryMercuryData) { + if ShouldCollectEnhancedTelemetryMercury(jb) { + EnqueueEnhancedTelem[EnhancedTelemetryMercuryData](ch, data) + } +} + +// EnqueueEnhancedTelem sends data to the telemetry channel for processing +func EnqueueEnhancedTelem[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](ch chan<- T, data T) { + select { + case ch <- data: + default: + } +} + +// getResultFloat64 will check the result type and force it to float64 or returns an error if the conversion cannot be made +func getResultFloat64(task *pipeline.TaskRunResult) (float64, error) { + result, err := utils.ToDecimal(task.Result.Value) + if err != nil { + return 0, err + } + resultFloat64, _ := result.Float64() + return resultFloat64, nil +} + +func stringOrEmpty(n *big.Int) string { + if n.Cmp(big.NewInt(0)) == 0 { + return "" + } + return n.String() +} diff --git a/core/services/ocrcommon/telemetry_test.go b/core/services/ocrcommon/telemetry_test.go new file mode 100644 index 00000000..9619fef2 --- /dev/null +++ b/core/services/ocrcommon/telemetry_test.go @@ -0,0 +1,787 @@ +package ocrcommon + +import ( + "math/big" + "sync" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/types/mercury" + mercuryv1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + mercuryv2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" +) + +const bridgeResponse = `{ + "meta":{ + "adapterName":"data-source-name" + }, + "timestamps":{ + "providerDataRequestedUnixMs":92233720368547760, + "providerDataReceivedUnixMs":-92233720368547760, + "providerDataStreamEstablishedUnixMs":1, + "providerIndicatedTimeUnixMs":-123456789 + } + }` + +var trrs = pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "test-bridge-1", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds1_parse", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: "123456.123456789", + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "test-bridge-2", + BaseTask: pipeline.NewBaseTask(0, "ds2", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds2_parse", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: "12345678", + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "test-bridge-3", + BaseTask: pipeline.NewBaseTask(0, "ds3", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds3_parse", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: "1234567890", + }, + }, +} + +func TestShouldCollectTelemetry(t *testing.T) { + j := job.Job{ + OCROracleSpec: &job.OCROracleSpec{CaptureEATelemetry: true}, + OCR2OracleSpec: &job.OCR2OracleSpec{CaptureEATelemetry: true}, + } + + j.Type = job.Type(pipeline.OffchainReportingJobType) + assert.True(t, ShouldCollectEnhancedTelemetry(&j)) + j.OCROracleSpec.CaptureEATelemetry = false + assert.False(t, ShouldCollectEnhancedTelemetry(&j)) + + j.Type = job.Type(pipeline.OffchainReporting2JobType) + assert.True(t, ShouldCollectEnhancedTelemetry(&j)) + j.OCR2OracleSpec.CaptureEATelemetry = false + assert.False(t, ShouldCollectEnhancedTelemetry(&j)) + + j.Type = job.Type(pipeline.VRFJobType) + assert.False(t, ShouldCollectEnhancedTelemetry(&j)) +} + +func TestGetContract(t *testing.T) { + j := job.Job{ + OCROracleSpec: &job.OCROracleSpec{CaptureEATelemetry: true}, + OCR2OracleSpec: &job.OCR2OracleSpec{CaptureEATelemetry: true}, + } + e := EnhancedTelemetryService[EnhancedTelemetryData]{ + job: &j, + lggr: nil, + } + contractAddress := ethkey.EIP55Address(utils.RandomAddress().String()) + + j.Type = job.Type(pipeline.OffchainReportingJobType) + j.OCROracleSpec.ContractAddress = contractAddress + assert.Equal(t, contractAddress.String(), e.getContract()) + + j.Type = job.Type(pipeline.OffchainReporting2JobType) + j.OCR2OracleSpec.ContractID = contractAddress.String() + assert.Equal(t, contractAddress.String(), e.getContract()) + + j.Type = job.Type(pipeline.VRFJobType) + assert.Empty(t, e.getContract()) +} + +func TestGetChainID(t *testing.T) { + j := job.Job{ + OCROracleSpec: &job.OCROracleSpec{CaptureEATelemetry: true}, + OCR2OracleSpec: &job.OCR2OracleSpec{CaptureEATelemetry: true}, + } + e := EnhancedTelemetryService[EnhancedTelemetryData]{ + job: &j, + lggr: nil, + } + + j.Type = job.Type(pipeline.OffchainReportingJobType) + j.OCROracleSpec.EVMChainID = (*ubig.Big)(big.NewInt(1234567890)) + assert.Equal(t, "1234567890", e.getChainID()) + + j.Type = job.Type(pipeline.OffchainReporting2JobType) + j.OCR2OracleSpec.RelayConfig = make(map[string]interface{}) + j.OCR2OracleSpec.RelayConfig["chainID"] = "foo" + assert.Equal(t, "foo", e.getChainID()) + + j.Type = job.Type(pipeline.VRFJobType) + assert.Empty(t, e.getChainID()) +} + +func TestParseEATelemetry(t *testing.T) { + ea, err := parseEATelemetry([]byte(bridgeResponse)) + assert.NoError(t, err) + assert.Equal(t, ea.DataSource, "data-source-name") + assert.Equal(t, ea.ProviderRequestedTimestamp, int64(92233720368547760)) + assert.Equal(t, ea.ProviderReceivedTimestamp, int64(-92233720368547760)) + assert.Equal(t, ea.ProviderDataStreamEstablished, int64(1)) + assert.Equal(t, ea.ProviderIndicatedTime, int64(-123456789)) + + _, err = parseEATelemetry(nil) + assert.Error(t, err) +} + +func TestGetJsonParsedValue(t *testing.T) { + + resp := getJsonParsedValue(trrs[0], &trrs) + assert.Equal(t, 123456.123456789, *resp) + + trrs[1].Result.Value = nil + resp = getJsonParsedValue(trrs[0], &trrs) + assert.Nil(t, resp) + + resp = getJsonParsedValue(trrs[1], &trrs) + assert.Nil(t, resp) + +} + +func TestSendEATelemetry(t *testing.T) { + wg := sync.WaitGroup{} + ingressClient := mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient) + monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEA) + + var sentMessage []byte + ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + sentMessage = args[1].([]byte) + wg.Done() + }) + + feedAddress := utils.RandomAddress() + + enhancedTelemChan := make(chan EnhancedTelemetryData, 100) + jb := job.Job{ + Type: job.Type(pipeline.OffchainReportingJobType), + OCROracleSpec: &job.OCROracleSpec{ + ContractAddress: ethkey.EIP55AddressFromAddress(feedAddress), + CaptureEATelemetry: true, + EVMChainID: (*ubig.Big)(big.NewInt(9)), + }, + } + + lggr, _ := logger.TestLoggerObserved(t, zap.WarnLevel) + doneCh := make(chan struct{}) + enhancedTelemService := NewEnhancedTelemetryService(&jb, enhancedTelemChan, doneCh, monitoringEndpoint, lggr.Named("Enhanced Telemetry Mercury")) + servicetest.Run(t, enhancedTelemService) + trrs := pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds1", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: "123456789.1234567", + }, + }, + } + fr := pipeline.FinalResult{ + Values: []interface{}{"123456"}, + AllErrors: nil, + FatalErrors: []error{nil}, + } + + observationTimestamp := ObservationTimestamp{ + Round: 15, + Epoch: 738, + ConfigDigest: "config digest hex", + } + + wg.Add(1) + enhancedTelemChan <- EnhancedTelemetryData{ + TaskRunResults: trrs, + FinalResults: fr, + RepTimestamp: observationTimestamp, + } + + expectedTelemetry := telem.EnhancedEA{ + DataSource: "data-source-name", + Value: 123456789.1234567, + BridgeTaskRunStartedTimestamp: trrs[0].CreatedAt.UnixMilli(), + BridgeTaskRunEndedTimestamp: trrs[0].FinishedAt.Time.UnixMilli(), + ProviderRequestedTimestamp: 92233720368547760, + ProviderReceivedTimestamp: -92233720368547760, + ProviderDataStreamEstablished: 1, + ProviderIndicatedTime: -123456789, + Feed: feedAddress.String(), + ChainId: "9", + Observation: 123456, + Round: 15, + Epoch: 738, + ConfigDigest: "config digest hex", + } + + expectedMessage, _ := proto.Marshal(&expectedTelemetry) + wg.Wait() + assert.Equal(t, expectedMessage, sentMessage) + //enhancedTelemService.StopOnce("EnhancedTelemetryService", func() error { return nil }) + doneCh <- struct{}{} +} + +func TestGetObservation(t *testing.T) { + j := job.Job{ + OCROracleSpec: &job.OCROracleSpec{CaptureEATelemetry: true}, + OCR2OracleSpec: &job.OCR2OracleSpec{CaptureEATelemetry: true}, + } + + lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel) + e := EnhancedTelemetryService[EnhancedTelemetryData]{ + job: &j, + lggr: lggr, + } + + obs := e.getObservation(&pipeline.FinalResult{}) + assert.Equal(t, obs, int64(0)) + assert.Equal(t, logs.Len(), 1) + assert.Contains(t, logs.All()[0].Message, "cannot get singular result") + + finalResult := &pipeline.FinalResult{ + Values: []interface{}{"123456"}, + AllErrors: nil, + FatalErrors: []error{nil}, + } + obs = e.getObservation(finalResult) + assert.Equal(t, obs, int64(123456)) + +} + +func TestCollectAndSend(t *testing.T) { + wg := sync.WaitGroup{} + ingressClient := mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient) + monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEA) + ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + wg.Done() + }) + + lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel) + jb := job.Job{ + ID: 1234567890, + Type: job.Type(pipeline.OffchainReportingJobType), + OCROracleSpec: &job.OCROracleSpec{ + CaptureEATelemetry: true, + }, + } + + enhancedTelemChan := make(chan EnhancedTelemetryData, 100) + doneCh := make(chan struct{}) + + enhancedTelemService := NewEnhancedTelemetryService(&jb, enhancedTelemChan, doneCh, monitoringEndpoint, lggr.Named("Enhanced Telemetry")) + servicetest.Run(t, enhancedTelemService) + finalResult := &pipeline.FinalResult{ + Values: []interface{}{"123456"}, + AllErrors: nil, + FatalErrors: []error{nil}, + } + + badTrrs := &pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: nil, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "ds2", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }} + + observationTimestamp := ObservationTimestamp{ + Round: 0, + Epoch: 0, + ConfigDigest: "", + } + wg.Add(1) + enhancedTelemChan <- EnhancedTelemetryData{ + TaskRunResults: *badTrrs, + FinalResults: *finalResult, + RepTimestamp: observationTimestamp, + } + + wg.Wait() + assert.Equal(t, logs.Len(), 2) + assert.Contains(t, logs.All()[0].Message, "cannot parse bridge response from bridge task") + + badTrrs = &pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: "[]", + }, + }} + enhancedTelemChan <- EnhancedTelemetryData{ + TaskRunResults: *badTrrs, + FinalResults: *finalResult, + RepTimestamp: observationTimestamp, + } + wg.Wait() + assert.Equal(t, 2, logs.Len()) + assert.Contains(t, logs.All()[0].Message, "cannot parse bridge response from bridge task") + assert.Contains(t, logs.All()[1].Message, "cannot get json parse value") + doneCh <- struct{}{} +} + +var trrsMercuryV1 = pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "link-usd-test-bridge-v1", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + RequestData: `{"data":{"to":"PLI","from":"USD"}}`, + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds1_benchmark", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: float64(123456.123456), + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(2, "ds2_bid", nil, nil, 2), + }, + Result: pipeline.Result{ + Value: float64(1234567.1234567), + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(3, "ds3_ask", nil, nil, 3), + }, + Result: pipeline.Result{ + Value: int64(321123), + }, + }, +} + +var trrsMercuryV2 = pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "link-usd-test-bridge-v2", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + RequestData: `{"data":{"to":"PLI","from":"USD"}}`, + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds1_benchmark", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: float64(123456.123456), + }, + }, +} + +func TestGetPricesFromResults(t *testing.T) { + lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel) + e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{ + lggr: lggr, + job: &job.Job{ + ID: 0, + }, + } + + benchmarkPrice, bid, ask := e.getPricesFromResults(trrsMercuryV1[0], trrsMercuryV1, 1) + require.Equal(t, 123456.123456, benchmarkPrice) + require.Equal(t, 1234567.1234567, bid) + require.Equal(t, float64(321123), ask) + + benchmarkPrice, bid, ask = e.getPricesFromResults(trrsMercuryV1[0], pipeline.TaskRunResults{}, 1) + require.Equal(t, float64(0), benchmarkPrice) + require.Equal(t, float64(0), bid) + require.Equal(t, float64(0), ask) + require.Equal(t, 1, logs.Len()) + require.Contains(t, logs.All()[0].Message, "cannot parse enhanced EA telemetry") + + tt := trrsMercuryV1[:2] + e.getPricesFromResults(trrsMercuryV1[0], tt, 1) + require.Equal(t, 2, logs.Len()) + require.Contains(t, logs.All()[1].Message, "cannot parse enhanced EA telemetry bid price, task is nil") + + tt = trrsMercuryV1[:3] + e.getPricesFromResults(trrsMercuryV1[0], tt, 1) + require.Equal(t, 3, logs.Len()) + require.Contains(t, logs.All()[2].Message, "cannot parse enhanced EA telemetry ask price, task is nil") + + trrs2 := pipeline.TaskRunResults{ + pipeline.TaskRunResult{ + Task: &pipeline.BridgeTask{ + Name: "test-bridge-1", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: bridgeResponse, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(1, "ds1_benchmark", nil, nil, 1), + }, + Result: pipeline.Result{ + Value: nil, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(2, "ds2_bid", nil, nil, 2), + }, + Result: pipeline.Result{ + Value: nil, + }, + }, + pipeline.TaskRunResult{ + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(3, "ds3_ask", nil, nil, 3), + }, + Result: pipeline.Result{ + Value: nil, + }, + }} + benchmarkPrice, bid, ask = e.getPricesFromResults(trrsMercuryV1[0], trrs2, 3) + require.Equal(t, benchmarkPrice, float64(0)) + require.Equal(t, bid, float64(0)) + require.Equal(t, ask, float64(0)) + require.Equal(t, logs.Len(), 6) + require.Contains(t, logs.All()[3].Message, "cannot parse enhanced EA telemetry benchmark price") + require.Contains(t, logs.All()[4].Message, "cannot parse enhanced EA telemetry bid price") + require.Contains(t, logs.All()[5].Message, "cannot parse enhanced EA telemetry ask price") + + benchmarkPrice, bid, ask = e.getPricesFromResults(trrsMercuryV1[0], trrsMercuryV2, 2) + require.Equal(t, 123456.123456, benchmarkPrice) + require.Equal(t, float64(0), bid) + require.Equal(t, float64(0), ask) +} + +func TestShouldCollectEnhancedTelemetryMercury(t *testing.T) { + + j := job.Job{ + Type: job.Type(pipeline.OffchainReporting2JobType), + OCR2OracleSpec: &job.OCR2OracleSpec{ + CaptureEATelemetry: true, + }, + } + + require.Equal(t, ShouldCollectEnhancedTelemetryMercury(j), true) + j.OCR2OracleSpec.CaptureEATelemetry = false + require.Equal(t, ShouldCollectEnhancedTelemetryMercury(j), false) + j.OCR2OracleSpec.CaptureEATelemetry = true + j.Type = job.Type(pipeline.CronJobType) + require.Equal(t, ShouldCollectEnhancedTelemetryMercury(j), false) +} + +func TestGetAssetSymbolFromRequestData(t *testing.T) { + e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{} + require.Equal(t, e.getAssetSymbolFromRequestData(""), "") + reqData := `{"data":{"to":"PLI","from":"USD"}}` + require.Equal(t, e.getAssetSymbolFromRequestData(reqData), "USD/PLI") +} + +func TestCollectMercuryEnhancedTelemetryV1(t *testing.T) { + wg := sync.WaitGroup{} + ingressClient := mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient) + monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury) + + var sentMessage []byte + ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + sentMessage = args[1].([]byte) + wg.Done() + }) + + lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel) + chTelem := make(chan EnhancedTelemetryMercuryData, 100) + chDone := make(chan struct{}) + feedID := common.HexToHash("0x111") + e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{ + chDone: chDone, + chTelem: chTelem, + job: &job.Job{ + Type: job.Type(pipeline.OffchainReporting2JobType), + OCR2OracleSpec: &job.OCR2OracleSpec{ + CaptureEATelemetry: true, + FeedID: &feedID, + }, + }, + lggr: lggr, + monitoringEndpoint: monitoringEndpoint, + } + servicetest.Run(t, &e) + + wg.Add(1) + + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: trrsMercuryV1, + V1Observation: &mercuryv1.Observation{ + BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)}, + Bid: mercury.ObsResult[*big.Int]{Val: big.NewInt(222222)}, + Ask: mercury.ObsResult[*big.Int]{Val: big.NewInt(333333)}, + CurrentBlockNum: mercury.ObsResult[int64]{Val: 123456789}, + CurrentBlockHash: mercury.ObsResult[[]byte]{Val: common.HexToHash("0x123321").Bytes()}, + CurrentBlockTimestamp: mercury.ObsResult[uint64]{Val: 987654321}, + }, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + + expectedTelemetry := telem.EnhancedEAMercury{ + DataSource: "data-source-name", + DpBenchmarkPrice: 123456.123456, + DpBid: 1234567.1234567, + DpAsk: 321123, + CurrentBlockNumber: 123456789, + CurrentBlockHash: common.HexToHash("0x123321").String(), + CurrentBlockTimestamp: 987654321, + BridgeTaskRunStartedTimestamp: trrsMercuryV1[0].CreatedAt.UnixMilli(), + BridgeTaskRunEndedTimestamp: trrsMercuryV1[0].FinishedAt.Time.UnixMilli(), + ProviderRequestedTimestamp: 92233720368547760, + ProviderReceivedTimestamp: -92233720368547760, + ProviderDataStreamEstablished: 1, + ProviderIndicatedTime: -123456789, + Feed: common.HexToHash("0x111").String(), + ObservationBenchmarkPrice: 111111, + ObservationBid: 222222, + ObservationAsk: 333333, + ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000", + Round: 22, + Epoch: 11, + AssetSymbol: "USD/PLI", + ObservationBenchmarkPriceString: "111111", + ObservationBidString: "222222", + ObservationAskString: "333333", + } + + expectedMessage, _ := proto.Marshal(&expectedTelemetry) + wg.Wait() + require.Equal(t, expectedMessage, sentMessage) + + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: pipeline.TaskRunResults{ + pipeline.TaskRunResult{Task: &pipeline.BridgeTask{ + Name: "test-mercury-bridge-1", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: nil, + }}, + }, + V1Observation: &mercuryv1.Observation{}, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + wg.Add(1) + trrsMercuryV1[0].Result.Value = "" + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: trrsMercuryV1, + V1Observation: &mercuryv1.Observation{}, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + + wg.Wait() + require.Equal(t, 2, logs.Len()) + require.Contains(t, logs.All()[0].Message, `cannot get bridge response from bridge task, job=0, id=ds1, name="test-mercury-bridge-1"`) + require.Contains(t, logs.All()[1].Message, "cannot parse EA telemetry") + chDone <- struct{}{} +} + +func TestCollectMercuryEnhancedTelemetryV2(t *testing.T) { + wg := sync.WaitGroup{} + ingressClient := mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient) + monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury) + + var sentMessage []byte + ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + sentMessage = args[1].([]byte) + wg.Done() + }) + + lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel) + chTelem := make(chan EnhancedTelemetryMercuryData, 100) + chDone := make(chan struct{}) + feedID := common.HexToHash("0x111") + e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{ + chDone: chDone, + chTelem: chTelem, + job: &job.Job{ + Type: job.Type(pipeline.OffchainReporting2JobType), + OCR2OracleSpec: &job.OCR2OracleSpec{ + CaptureEATelemetry: true, + FeedID: &feedID, + }, + }, + lggr: lggr, + monitoringEndpoint: monitoringEndpoint, + } + servicetest.Run(t, &e) + + wg.Add(1) + + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: trrsMercuryV2, + V2Observation: &mercuryv2.Observation{ + BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)}, + MaxFinalizedTimestamp: mercury.ObsResult[int64]{Val: 321}, + LinkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(4321)}, + NativePrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(54321)}, + }, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + + expectedTelemetry := telem.EnhancedEAMercury{ + DataSource: "data-source-name", + DpBenchmarkPrice: 123456.123456, + CurrentBlockNumber: 0, + CurrentBlockHash: "", + CurrentBlockTimestamp: 0, + BridgeTaskRunStartedTimestamp: trrsMercuryV1[0].CreatedAt.UnixMilli(), + BridgeTaskRunEndedTimestamp: trrsMercuryV1[0].FinishedAt.Time.UnixMilli(), + ProviderRequestedTimestamp: 92233720368547760, + ProviderReceivedTimestamp: -92233720368547760, + ProviderDataStreamEstablished: 1, + ProviderIndicatedTime: -123456789, + Feed: common.HexToHash("0x111").String(), + ObservationBenchmarkPrice: 111111, + ObservationBid: 0, + ObservationAsk: 0, + ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000", + Round: 22, + Epoch: 11, + AssetSymbol: "USD/PLI", + ObservationBenchmarkPriceString: "111111", + MaxFinalizedTimestamp: 321, + LinkPrice: 4321, + NativePrice: 54321, + } + + expectedMessage, _ := proto.Marshal(&expectedTelemetry) + wg.Wait() + + require.Equal(t, expectedMessage, sentMessage) + + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: pipeline.TaskRunResults{ + pipeline.TaskRunResult{Task: &pipeline.BridgeTask{ + Name: "test-mercury-bridge-2", + BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0), + }, + Result: pipeline.Result{ + Value: nil, + }}, + }, + V2Observation: &mercuryv2.Observation{}, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + wg.Add(1) + trrsMercuryV2[0].Result.Value = "" + chTelem <- EnhancedTelemetryMercuryData{ + TaskRunResults: trrsMercuryV2, + V2Observation: &mercuryv2.Observation{}, + RepTimestamp: types.ReportTimestamp{ + ConfigDigest: types.ConfigDigest{2}, + Epoch: 11, + Round: 22, + }, + } + + wg.Wait() + require.Equal(t, 4, logs.Len()) + require.Contains(t, logs.All()[0].Message, "cannot parse enhanced EA telemetry bid price") + require.Contains(t, logs.All()[1].Message, "cannot get bridge response from bridge task") + require.Contains(t, logs.All()[2].Message, "cannot parse EA telemetry") + require.Contains(t, logs.All()[3].Message, "cannot parse enhanced EA telemetry bid price") + chDone <- struct{}{} +} diff --git a/core/services/ocrcommon/transmitter.go b/core/services/ocrcommon/transmitter.go new file mode 100644 index 00000000..e191e080 --- /dev/null +++ b/core/services/ocrcommon/transmitter.go @@ -0,0 +1,98 @@ +package ocrcommon + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" +) + +type roundRobinKeystore interface { + GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (address common.Address, err error) +} + +type txManager interface { + CreateTransaction(ctx context.Context, txRequest txmgr.TxRequest) (tx txmgr.Tx, err error) +} + +type Transmitter interface { + CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error + FromAddress() common.Address +} + +type transmitter struct { + txm txManager + fromAddresses []common.Address + gasLimit uint32 + effectiveTransmitterAddress common.Address + strategy types.TxStrategy + checker txmgr.TransmitCheckerSpec + chainID *big.Int + keystore roundRobinKeystore +} + +// NewTransmitter creates a new eth transmitter +func NewTransmitter( + txm txManager, + fromAddresses []common.Address, + gasLimit uint32, + effectiveTransmitterAddress common.Address, + strategy types.TxStrategy, + checker txmgr.TransmitCheckerSpec, + chainID *big.Int, + keystore roundRobinKeystore, +) (Transmitter, error) { + + // Ensure that a keystore is provided. + if keystore == nil { + return nil, errors.New("nil keystore provided to transmitter") + } + + return &transmitter{ + txm: txm, + fromAddresses: fromAddresses, + gasLimit: gasLimit, + effectiveTransmitterAddress: effectiveTransmitterAddress, + strategy: strategy, + checker: checker, + chainID: chainID, + keystore: keystore, + }, nil +} + +func (t *transmitter) CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error { + + roundRobinFromAddress, err := t.keystore.GetRoundRobinAddress(t.chainID, t.fromAddresses...) + if err != nil { + return errors.Wrap(err, "skipped OCR transmission, error getting round-robin address") + } + + _, err = t.txm.CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: roundRobinFromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: t.gasLimit, + ForwarderAddress: t.forwarderAddress(), + Strategy: t.strategy, + Checker: t.checker, + Meta: txMeta, + }) + return errors.Wrap(err, "skipped OCR transmission") +} + +func (t *transmitter) FromAddress() common.Address { + return t.effectiveTransmitterAddress +} + +func (t *transmitter) forwarderAddress() common.Address { + for _, a := range t.fromAddresses { + if a == t.effectiveTransmitterAddress { + return common.Address{} + } + } + return t.effectiveTransmitterAddress +} diff --git a/core/services/ocrcommon/transmitter_test.go b/core/services/ocrcommon/transmitter_test.go new file mode 100644 index 00000000..279f39b6 --- /dev/null +++ b/core/services/ocrcommon/transmitter_test.go @@ -0,0 +1,176 @@ +package ocrcommon_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + commontxmmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" +) + +func newMockTxStrategy(t *testing.T) *commontxmmocks.TxStrategy { + return commontxmmocks.NewTxStrategy(t) +} + +func Test_DefaultTransmitter_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint32(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := fromAddress + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := cltest.MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint32(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress, fromAddress2}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress2, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_Round_Robin_Error(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + fromAddress := common.Address{} + + gasLimit := uint32(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + require.Error(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_No_Keystore_Error(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := cltest.MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint32(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + _, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress, fromAddress2}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + nil, + ) + require.Error(t, err) +} diff --git a/core/services/ocrcommon/validate.go b/core/services/ocrcommon/validate.go new file mode 100644 index 00000000..f7ebcf79 --- /dev/null +++ b/core/services/ocrcommon/validate.go @@ -0,0 +1,33 @@ +package ocrcommon + +import ( + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "go.uber.org/multierr" +) + +// CloneSet returns a copy of the input map. +func CloneSet(in map[string]struct{}) map[string]struct{} { + out := make(map[string]struct{}, len(in)) + for k, v := range in { + out[k] = v + } + return out +} + +// ValidateExplicitlySetKeys checks if the values in expected are present and the values in notExpected are not present +// in the toml tree. Works on top level keys only. +func ValidateExplicitlySetKeys(tree *toml.Tree, expected map[string]struct{}, notExpected map[string]struct{}, peerType string) error { + var err error + // top level keys only + for _, k := range tree.Keys() { + if _, ok := notExpected[k]; ok { + err = multierr.Append(err, errors.Errorf("unrecognised key for %s peer: %s", peerType, k)) + } + delete(expected, k) + } + for missing := range expected { + err = multierr.Append(err, errors.Errorf("missing required key %s", missing)) + } + return err +} diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go new file mode 100644 index 00000000..8f0e13cc --- /dev/null +++ b/core/services/periodicbackup/backup.go @@ -0,0 +1,231 @@ +package periodicbackup + +import ( + "context" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +var ( + filePattern = "cl_backup_%s.dump" + minBackupFrequency = time.Minute + + excludedDataFromTables = []string{ + "pipeline_runs", + "pipeline_task_runs", + } +) + +type backupResult struct { + size int64 + path string + maskedArguments []string + pgDumpArguments []string +} + +type ( + DatabaseBackup interface { + services.Service + RunBackup(version string) error + } + + databaseBackup struct { + services.StateMachine + logger logger.Logger + databaseURL url.URL + mode config.DatabaseBackupMode + frequency time.Duration + outputParentDir string + done chan bool + } + + BackupConfig interface { + URL() *url.URL + Dir() string + Mode() config.DatabaseBackupMode + Frequency() time.Duration + } +) + +// NewDatabaseBackup instantiates a *databaseBackup +func NewDatabaseBackup(dbUrl url.URL, rootDir string, backupConfig BackupConfig, lggr logger.Logger) (DatabaseBackup, error) { + lggr = lggr.Named("DatabaseBackup") + dbBackupUrl := backupConfig.URL() + if dbBackupUrl != nil { + dbUrl = *dbBackupUrl + } + + outputParentDir := filepath.Join(rootDir, "backup") + if backupConfig.Dir() != "" { + dir, err := filepath.Abs(backupConfig.Dir()) + if err != nil { + return nil, errors.Errorf("failed to get path for Database.Backup.Dir (%s) - please set it to a valid directory path", backupConfig.Dir()) + } + outputParentDir = dir + } + + return &databaseBackup{ + services.StateMachine{}, + lggr, + dbUrl, + backupConfig.Mode(), + backupConfig.Frequency(), + outputParentDir, + make(chan bool), + }, nil +} + +// Start starts DatabaseBackup. +func (backup *databaseBackup) Start(context.Context) error { + return backup.StartOnce("DatabaseBackup", func() (err error) { + ticker := time.NewTicker(backup.frequency) + if backup.frequency == 0 { + backup.logger.Info("Periodic database backups are disabled; Database.Backup.Frequency was set to 0") + // Stopping the ticker means it will never fire, effectively disabling periodic backups + ticker.Stop() + } else if backup.frequencyIsTooSmall() { + return errors.Errorf("Database backup frequency (%s=%v) is too small. Please set it to at least %s (or set to 0 to disable periodic backups)", "Database.Backup.Frequency", backup.frequency, minBackupFrequency) + } + + go func() { + for { + select { + case <-backup.done: + ticker.Stop() + return + case <-ticker.C: + backup.logger.Infow("Starting automatic database backup, this can take a while. To disable periodic backups, set Database.Backup.Frequency=0. To disable database backups entirely, set Database.Backup.Mode=none.") + //nolint:errcheck + backup.RunBackup(static.Version) + } + } + }() + + return nil + }) +} + +func (backup *databaseBackup) Close() error { + return backup.StopOnce("DatabaseBackup", func() (err error) { + backup.done <- true + return nil + }) +} + +func (backup *databaseBackup) Name() string { + return backup.logger.Name() +} + +func (backup *databaseBackup) HealthReport() map[string]error { + return map[string]error{backup.Name(): backup.Healthy()} +} + +func (backup *databaseBackup) frequencyIsTooSmall() bool { + return backup.frequency < minBackupFrequency +} + +func (backup *databaseBackup) RunBackup(version string) error { + backup.logger.Debugw("Starting backup", "mode", backup.mode, "directory", backup.outputParentDir) + startAt := time.Now() + result, err := backup.runBackup(version) + duration := time.Since(startAt) + if err != nil { + backup.logger.Criticalw("Backup failed", "duration", duration, "err", err) + backup.SvcErrBuffer.Append(err) + return err + } + backup.logger.Infow("Backup completed successfully.", "duration", duration, "fileSize", result.size, "filePath", result.path) + return nil +} + +func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { + err := os.MkdirAll(backup.outputParentDir, os.ModePerm) + if err != nil { + return nil, errors.Wrapf(err, "Failed to create directories on the path: %s", backup.outputParentDir) + } + tmpFile, err := os.CreateTemp(backup.outputParentDir, "cl_backup_tmp_") + if err != nil { + return nil, errors.Wrap(err, "Failed to create a tmp file") + } + err = os.Remove(tmpFile.Name()) + if err != nil { + return nil, errors.Wrap(err, "Failed to remove the tmp file before running backup") + } + + args := []string{ + backup.databaseURL.String(), + "-f", tmpFile.Name(), + "-F", "c", // format: custom (zipped) + } + + if backup.mode == config.DatabaseBackupModeLite { + for _, table := range excludedDataFromTables { + args = append(args, fmt.Sprintf("--exclude-table-data=%s", table)) + } + } + + maskArgs := func(args []string) []string { + masked := make([]string, len(args)) + copy(masked, args) + masked[0] = backup.databaseURL.Redacted() + return masked + } + + maskedArgs := maskArgs(args) + backup.logger.Debugf("Running pg_dump with: %v", maskedArgs) + + cmd := exec.Command( + "pg_dump", args..., + ) + + _, err = cmd.Output() + + if err != nil { + partialResult := &backupResult{ + size: 0, + path: "", + maskedArguments: maskedArgs, + pgDumpArguments: args, + } + var ee *exec.ExitError + if errors.As(err, &ee) { + return partialResult, errors.Wrapf(err, "pg_dump failed with output: %s", string(ee.Stderr)) + } + return partialResult, errors.Wrap(err, "pg_dump failed") + } + + if version == "" { + version = "unknown" + } + finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, version)) + _ = os.Remove(finalFilePath) + err = os.Rename(tmpFile.Name(), finalFilePath) + if err != nil { + _ = os.Remove(tmpFile.Name()) + return nil, errors.Wrap(err, "Failed to rename the temp file to the final backup file") + } + + file, err := os.Stat(finalFilePath) + if err != nil { + return nil, errors.Wrap(err, "Failed to access the final backup file") + } + + return &backupResult{ + size: file.Size(), + path: finalFilePath, + maskedArguments: maskedArgs, + pgDumpArguments: args, + }, nil +} diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go new file mode 100644 index 00000000..a1e97872 --- /dev/null +++ b/core/services/periodicbackup/backup_test.go @@ -0,0 +1,154 @@ +package periodicbackup + +import ( + "net/url" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +func mustNewDatabaseBackup(t *testing.T, url url.URL, rootDir string, config BackupConfig) *databaseBackup { + testutils.SkipShortDB(t) + b, err := NewDatabaseBackup(url, rootDir, config, logger.TestLogger(t)) + require.NoError(t, err) + return b.(*databaseBackup) +} + +func must(t testing.TB, s string) *url.URL { + v, err := url.Parse(s) + require.NoError(t, err) + return v +} + +func TestPeriodicBackup_RunBackup(t *testing.T) { + backupConfig := newTestConfig(time.Minute, nil, "", config.DatabaseBackupModeFull) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup("0.9.9") + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "backup/cl_backup_0.9.9") + assert.NotContains(t, result.pgDumpArguments, "--exclude-table-data=pipeline_task_runs") +} + +func TestPeriodicBackup_RunBackupInLiteMode(t *testing.T) { + backupConfig := newTestConfig(time.Minute, nil, "", config.DatabaseBackupModeLite) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup("0.9.9") + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "backup/cl_backup_0.9.9") + assert.Contains(t, result.pgDumpArguments, "--exclude-table-data=pipeline_task_runs") +} + +func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { + backupConfig := newTestConfig(time.Minute, nil, "", config.DatabaseBackupModeFull) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup(static.Unset) + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "backup/cl_backup_unset") +} + +func TestPeriodicBackup_RunBackupViaAltUrlAndMaskPassword(t *testing.T) { + altUrl, _ := url.Parse("postgresql://invalid:some-pass@invalid") + backupConfig := newTestConfig(time.Minute, altUrl, "", config.DatabaseBackupModeFull) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + partialResult, err := periodicBackup.runBackup("") + require.Error(t, err, "connection to database \"postgresql//invalid\" failed") + assert.Contains(t, partialResult.maskedArguments, "postgresql://invalid:xxxxx@invalid") +} + +func TestPeriodicBackup_FrequencyTooSmall(t *testing.T) { + backupConfig := newTestConfig(time.Second, nil, "", config.DatabaseBackupModeFull) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + assert.True(t, periodicBackup.frequencyIsTooSmall()) +} + +func TestPeriodicBackup_AlternativeOutputDir(t *testing.T) { + backupDir := filepath.Join(os.TempDir(), "alternative") + backupConfig := newTestConfig(time.Second, nil, backupDir, config.DatabaseBackupModeFull) + periodicBackup := mustNewDatabaseBackup(t, *(must(t, string(env.DatabaseURL.Get()))), os.TempDir(), backupConfig) + + result, err := periodicBackup.runBackup("0.9.9") + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Contains(t, result.path, "/alternative/cl_backup_0.9.9.dump") + +} + +type testConfig struct { + frequency time.Duration + mode config.DatabaseBackupMode + url *url.URL + dir string +} + +func (t *testConfig) Frequency() time.Duration { + return t.frequency +} + +func (t *testConfig) Mode() config.DatabaseBackupMode { + return t.mode +} + +func (t *testConfig) URL() *url.URL { + return t.url +} + +func (t *testConfig) Dir() string { + return t.dir +} + +func newTestConfig(frequency time.Duration, databaseBackupURL *url.URL, databaseBackupDir string, mode config.DatabaseBackupMode) *testConfig { + return &testConfig{ + frequency: frequency, + mode: mode, + url: databaseBackupURL, + dir: databaseBackupDir, + } +} diff --git a/core/services/periodicbackup/restore_db_example.sh b/core/services/periodicbackup/restore_db_example.sh new file mode 100644 index 00000000..1ea82d99 --- /dev/null +++ b/core/services/periodicbackup/restore_db_example.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +DB_FILE="$1" +DB_SUPER_USER="postgres" +DB_USER="postgres" +DB_NAME="plugin_fallback_db" +DB_HOST_PORT="localhost:5432" + +psql "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/postgres" -c "CREATE DATABASE $DB_NAME" +psql "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/postgres" -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;" + +pg_restore -d "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/$DB_NAME" "$DB_FILE" \ No newline at end of file diff --git a/core/services/pg/connection.go b/core/services/pg/connection.go new file mode 100644 index 00000000..b3fe0c07 --- /dev/null +++ b/core/services/pg/connection.go @@ -0,0 +1,67 @@ +package pg + +import ( + "fmt" + "time" + + "github.com/google/uuid" + _ "github.com/jackc/pgx/v4/stdlib" // need to make sure pgx driver is registered before opening connection + "github.com/jmoiron/sqlx" + "github.com/scylladb/go-reflectx" + + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +type ConnectionConfig interface { + DefaultIdleInTxSessionTimeout() time.Duration + DefaultLockTimeout() time.Duration + MaxOpenConns() int + MaxIdleConns() int +} + +func NewConnection(uri string, dialect dialects.DialectName, config ConnectionConfig) (db *sqlx.DB, err error) { + if dialect == dialects.TransactionWrappedPostgres { + // Dbtx uses the uri as a unique identifier for each transaction. Each ORM + // should be encapsulated in it's own transaction, and thus needs its own + // unique id. + // + // We can happily throw away the original uri here because if we are using + // txdb it should have already been set at the point where we called + // txdb.Register + uri = uuid.New().String() + } + + // Initialize sql/sqlx + db, err = sqlx.Open(string(dialect), uri) + if err != nil { + return nil, err + } + db.MapperFunc(reflectx.CamelToSnakeASCII) + + // Set default connection options + lockTimeout := config.DefaultLockTimeout().Milliseconds() + idleInTxSessionTimeout := config.DefaultIdleInTxSessionTimeout().Milliseconds() + stmt := fmt.Sprintf(`SET TIME ZONE 'UTC'; SET lock_timeout = %d; SET idle_in_transaction_session_timeout = %d; SET default_transaction_isolation = %q`, + lockTimeout, idleInTxSessionTimeout, defaultIsolation.String()) + if _, err = db.Exec(stmt); err != nil { + return nil, err + } + db.SetMaxOpenConns(config.MaxOpenConns()) + db.SetMaxIdleConns(config.MaxIdleConns()) + + return db, disallowReplica(db) +} + +func disallowReplica(db *sqlx.DB) error { + var val string + err := db.Get(&val, "SHOW session_replication_role") + if err != nil { + return err + } + + if val == "replica" { + return fmt.Errorf("invalid `session_replication_role`: %s. Refusing to connect to replica database. Writing to a replica will corrupt the database", val) + } + + return nil +} diff --git a/core/services/pg/connection_test.go b/core/services/pg/connection_test.go new file mode 100644 index 00000000..1acb0df7 --- /dev/null +++ b/core/services/pg/connection_test.go @@ -0,0 +1,35 @@ +package pg + +import ( + "testing" + + "github.com/google/uuid" + _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +func Test_disallowReplica(t *testing.T) { + + testutils.SkipShortDB(t) + db, err := sqlx.Open(string(dialects.TransactionWrappedPostgres), uuid.New().String()) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, db.Close()) }) + + _, err = db.Exec("SET session_replication_role= 'origin'") + require.NoError(t, err) + err = disallowReplica(db) + require.NoError(t, err) + + _, err = db.Exec("SET session_replication_role= 'replica'") + require.NoError(t, err) + err = disallowReplica(db) + require.Error(t, err, "replica role should be disallowed") + + _, err = db.Exec("SET session_replication_role= 'not_valid_role'") + require.Error(t, err) + +} diff --git a/core/services/pg/datatypes/json.go b/core/services/pg/datatypes/json.go new file mode 100644 index 00000000..5e833a43 --- /dev/null +++ b/core/services/pg/datatypes/json.go @@ -0,0 +1,9 @@ +package datatypes + +import ( + "github.com/goplugin/plugin-common/pkg/sqlutil" +) + +// JSON defined JSON data type, need to implements driver.Valuer, sql.Scanner interface +// Deprecated: Use sqlutil.JSON instead +type JSON = sqlutil.JSON diff --git a/core/services/pg/helpers_test.go b/core/services/pg/helpers_test.go new file mode 100644 index 00000000..52158535 --- /dev/null +++ b/core/services/pg/helpers_test.go @@ -0,0 +1,21 @@ +package pg + +import "github.com/jmoiron/sqlx" + +func SetConn(lock interface{}, conn *sqlx.Conn) { + switch v := lock.(type) { + case *leaseLock: + v.conn = conn + default: + panic("cannot set conn on unknown type") + } +} + +func GetConn(lock interface{}) *sqlx.Conn { + switch v := lock.(type) { + case *leaseLock: + return v.conn + default: + panic("cannot get conn on unknown type") + } +} diff --git a/core/services/pg/lease_lock.go b/core/services/pg/lease_lock.go new file mode 100644 index 00000000..ea6982fd --- /dev/null +++ b/core/services/pg/lease_lock.go @@ -0,0 +1,289 @@ +package pg + +import ( + "context" + "database/sql" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// LeaseLock handles taking an exclusive lease on database access. This is not +// enforced by any database primitives, but rather voluntarily respected by +// other instances of the Plugin application. +// +// Plugin is designed to run as a single instance. Running multiple +// instances of Plugin on a single database at the same time is not +// supported and likely to lead to strange errors and possibly even data +// integrity failures. +// +// With that being said, a common use case is to run multiple Plugin +// instances in failover mode. The first instance will take some kind of lock +// on the database and subsequent instances will wait trying to take this lock +// in case the first instance disappears or dies. +// +// Traditionally Plugin has used an advisory lock to manage this. However, +// advisory locks come with several problems, notably: - Postgres does not +// really like it when you hold locks open for a very long time (hours/days). +// It hampers certain internal cleanup tasks and is explicitly discouraged by +// the postgres maintainers - The advisory lock can silently disappear on +// postgres upgrade - Advisory locks do not play nicely with pooling tools such +// as pgbouncer - If the application crashes, the advisory lock can be left +// hanging around for a while (sometimes hours) and can require manual +// intervention to remove it +// +// For this reason, we now use a database leaseLock instead, which works as +// such: - Have one row in a database which is updated periodically with the +// client ID - CL node A will run a background process on start that updates +// this e.g. once per second - CL node B will spinlock, checking periodically +// to see if the update got too old. If it goes more than, say, 5s without +// updating, it assumes that node A is dead and takes over. Now CL node B is +// the owner of the row and it updates this every second - If CL node A comes +// back somehow, it will go to take out a lease and realise that the database +// has been leased to another process, so it will panic and quit immediately +type LeaseLock interface { + TakeAndHold(ctx context.Context) error + ClientID() uuid.UUID + Release() +} + +type LeaseLockConfig struct { + DefaultQueryTimeout time.Duration + LeaseDuration time.Duration + LeaseRefreshInterval time.Duration +} + +var _ LeaseLock = &leaseLock{} + +type leaseLock struct { + id uuid.UUID + db *sqlx.DB + conn *sqlx.Conn + cfg LeaseLockConfig + logger logger.Logger + stop func() + wgReleased sync.WaitGroup +} + +// NewLeaseLock creates a "leaseLock" - an entity that tries to take an exclusive lease on the database +func NewLeaseLock(db *sqlx.DB, appID uuid.UUID, lggr logger.Logger, cfg LeaseLockConfig) LeaseLock { + if cfg.LeaseRefreshInterval > cfg.LeaseDuration/2 { + panic("refresh interval must be <= half the lease duration") + } + return &leaseLock{appID, db, nil, cfg, lggr.Named("LeaseLock").With("appID", appID), func() {}, sync.WaitGroup{}} +} + +// TakeAndHold will block and wait indefinitely until it can get its first lock or ctx is cancelled. +// Release() function must be used to release the acquired lock. +// NOT THREAD SAFE +func (l *leaseLock) TakeAndHold(ctx context.Context) (err error) { + l.logger.Debug("Taking initial lease...") + retryCount := 0 + isInitial := true + + for { + var gotLease bool + var err error + + err = func() error { + qctx, cancel := context.WithTimeout(ctx, l.cfg.DefaultQueryTimeout) + defer cancel() + if l.conn == nil { + if err = l.checkoutConn(qctx); err != nil { + return errors.Wrap(err, "lease lock failed to checkout initial connection") + } + } + gotLease, err = l.getLease(qctx, isInitial) + if errors.Is(err, sql.ErrConnDone) { + l.logger.Warnw("DB connection was unexpectedly closed; checking out a new one", "err", err) + l.conn = nil + return err + } + return nil + }() + + if errors.Is(err, sql.ErrConnDone) { + continue + } else if err != nil { + err = errors.Wrap(err, "failed to get lease lock") + if l.conn != nil { + err = multierr.Combine(err, l.conn.Close()) + } + return err + } + if gotLease { + break + } + isInitial = false + l.logRetry(retryCount) + retryCount++ + select { + case <-ctx.Done(): + err = errors.New("stopped") + if l.conn != nil { + err = multierr.Combine(err, l.conn.Close()) + } + return err + case <-time.After(utils.WithJitter(l.cfg.LeaseRefreshInterval)): + } + } + l.logger.Debug("Got exclusive lease on database") + + lctx, cancel := context.WithCancel(context.Background()) + l.stop = cancel + + l.wgReleased.Add(1) + // Once the lock is acquired, Release() method must be used to release the lock (hence different context). + // This is done on purpose: Release() method has exclusive control on releasing the lock. + go l.loop(lctx) + + return nil +} + +// Release requests the lock to release and blocks until it gets released. +// Calling Release for a released lock has no effect. +func (l *leaseLock) Release() { + l.stop() + l.wgReleased.Wait() +} + +// checkout dedicated connection for lease lock to bypass any DB contention +func (l *leaseLock) checkoutConn(ctx context.Context) (err error) { + newConn, err := l.db.Connx(ctx) + if err != nil { + return errors.Wrap(err, "failed checking out connection from pool") + } + l.conn = newConn + if err = l.setInitialTimeouts(ctx); err != nil { + return multierr.Combine( + errors.Wrap(err, "failed to set initial timeouts"), + l.conn.Close(), + ) + } + return nil +} + +func (l *leaseLock) setInitialTimeouts(ctx context.Context) error { + // Set short timeouts to prevent some kind of pathological situation + // occurring where we get stuck waiting for the table lock, or hang during + // the transaction - we do not want to leave rows locked if this process is + // dead + ms := l.cfg.LeaseDuration.Milliseconds() + return multierr.Combine( + utils.JustError(l.conn.ExecContext(ctx, fmt.Sprintf(`SET SESSION lock_timeout = %d`, ms))), + utils.JustError(l.conn.ExecContext(ctx, fmt.Sprintf(`SET SESSION idle_in_transaction_session_timeout = %d`, ms))), + ) +} + +func (l *leaseLock) logRetry(count int) { + if count%1000 == 0 || (count < 1000 && count&(count-1) == 0) { + l.logger.Infow("Another application is currently holding the database lease (or a previous instance exited uncleanly), waiting for lease to expire...", "tryCount", count) + } +} + +func (l *leaseLock) loop(ctx context.Context) { + defer l.wgReleased.Done() + + refresh := time.NewTicker(l.cfg.LeaseRefreshInterval) + defer refresh.Stop() + + for { + select { + case <-ctx.Done(): + qctx, cancel := context.WithTimeout(context.Background(), l.cfg.DefaultQueryTimeout) + err := multierr.Combine( + utils.JustError(l.conn.ExecContext(qctx, `UPDATE lease_lock SET expires_at=NOW() WHERE client_id = $1 AND expires_at > NOW()`, l.id)), + l.conn.Close(), + ) + cancel() + if err != nil { + l.logger.Warnw("Error trying to release lease on cancelled ctx", "err", err) + } + return + case <-refresh.C: + qctx, cancel := context.WithTimeout(ctx, l.cfg.LeaseDuration) + gotLease, err := l.getLease(qctx, false) + if errors.Is(err, sql.ErrConnDone) { + l.logger.Warnw("DB connection was unexpectedly closed; checking out a new one", "err", err) + if err = l.checkoutConn(ctx); err != nil { + l.logger.Warnw("Error trying to refresh connection", "err", err) + } + gotLease, err = l.getLease(ctx, false) + } + cancel() + if err != nil { + l.logger.Errorw("Error trying to refresh database lease", "err", err) + } else if !gotLease { + if err := l.db.Close(); err != nil { + l.logger.Errorw("Failed to close DB", "err", err) + } + l.logger.Fatal("Another node has taken the lease, exiting immediately") + } + } + } +} + +// initialSQL is necessary because the application attempts to take the lease +// lock BEFORE running migrations +var initialSQL = []string{ + `CREATE TABLE IF NOT EXISTS lease_lock (client_id uuid NOT NULL, expires_at timestamptz NOT NULL)`, + `CREATE UNIQUE INDEX IF NOT EXISTS only_one_lease_lock ON lease_lock ((client_id IS NOT NULL))`, +} + +// GetLease tries to get a lease from the DB +// If successful, returns true +// If the lease is currently held by someone else, returns false +// If some other error occurred, returns the error +func (l *leaseLock) getLease(ctx context.Context, isInitial bool) (gotLease bool, err error) { + l.logger.Trace("Refreshing database lease") + leaseDuration := fmt.Sprintf("%f seconds", l.cfg.LeaseDuration.Seconds()) + + // NOTE: Uses database time for all calculations since it's conceivable + // that node local times might be skewed compared to each other + err = sqlxTransactionQ(ctx, l.conn, l.logger, func(tx Queryer) error { + if isInitial { + for _, query := range initialSQL { + if _, err = tx.Exec(query); err != nil { + return errors.Wrap(err, "failed to create initial lease_lock table") + } + } + } + + // Upsert the lease_lock, only overwriting an existing one if the existing one has expired + var res sql.Result + res, err = tx.Exec(` +INSERT INTO lease_lock (client_id, expires_at) VALUES ($1, NOW()+$2::interval) ON CONFLICT ((client_id IS NOT NULL)) DO UPDATE SET +client_id = EXCLUDED.client_id, +expires_at = EXCLUDED.expires_at +WHERE +lease_lock.client_id = $1 +OR +lease_lock.expires_at < NOW() +`, l.id, leaseDuration) + if err != nil { + return errors.Wrap(err, "failed to upsert lease_lock") + } + var rowsAffected int64 + rowsAffected, err = res.RowsAffected() + if err != nil { + return errors.Wrap(err, "failed to get RowsAffected for lease lock upsert") + } + if rowsAffected > 0 { + gotLease = true + } + return nil + }) + return gotLease, errors.Wrap(err, "leaseLock#GetLease failed") +} + +func (l *leaseLock) ClientID() uuid.UUID { + return l.id +} diff --git a/core/services/pg/lease_lock_test.go b/core/services/pg/lease_lock_test.go new file mode 100644 index 00000000..5bed0bf5 --- /dev/null +++ b/core/services/pg/lease_lock_test.go @@ -0,0 +1,222 @@ +package pg_test + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +func newLeaseLock(t *testing.T, db *sqlx.DB, cfg pg.LeaseLockConfig) pg.LeaseLock { + return pg.NewLeaseLock(db, uuid.New(), logger.TestLogger(t), cfg) +} + +func Test_LeaseLock(t *testing.T) { + cfg, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *plugin.Config, s *plugin.Secrets) { + t := true + c.Database.Lock.Enabled = &t + }) + + t.Run("on migrated database", func(t *testing.T) { + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock1 := newLeaseLock(t, db, cfg) + + err := leaseLock1.TakeAndHold(testutils.Context(t)) + require.NoError(t, err) + + var clientID uuid.UUID + err = db.Get(&clientID, `SELECT client_id FROM lease_lock`) + require.NoError(t, err) + assert.Equal(t, leaseLock1.ClientID(), clientID) + + started2 := make(chan struct{}) + leaseLock2 := newLeaseLock(t, db, cfg) + go func() { + defer leaseLock2.Release() + require.NoError(t, leaseLock2.TakeAndHold(testutils.Context(t))) + close(started2) + }() + + // Give it plenty of time to have a few tries at getting the lease + time.Sleep(cfg.LeaseRefreshInterval * 5) + + leaseLock1.Release() + + select { + case <-started2: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("timed out waiting for leaseLock2 to start") + } + + err = db.Get(&clientID, `SELECT client_id FROM lease_lock`) + require.NoError(t, err) + assert.Equal(t, leaseLock2.ClientID(), clientID) + }) + + t.Run("recovers and re-opens connection if it's closed externally on initial take wait", func(t *testing.T) { + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock := newLeaseLock(t, db, cfg) + + otherAppID := uuid.New() + + // simulate another application holding lease to force it to retry + res, err := db.Exec(`UPDATE lease_lock SET client_id=$1,expires_at=NOW()+'1 day'::interval`, otherAppID) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.EqualValues(t, 1, rowsAffected) + + conn, err := db.Connx(testutils.Context(t)) + require.NoError(t, err) + + pg.SetConn(leaseLock, conn) + + // Simulate the connection being closed (leaseLock should automatically check out a new one) + require.NoError(t, conn.Close()) + + gotLease := make(chan struct{}) + go func() { + errInternal := leaseLock.TakeAndHold(testutils.Context(t)) + require.NoError(t, errInternal) + close(gotLease) + }() + + // Give it plenty of time to have a few tries at getting the lease + time.Sleep(cfg.LeaseRefreshInterval * 5) + + // Release the dummy lease lock to allow the lease locker to take it now + _, err = db.Exec(`DELETE FROM lease_lock WHERE client_id=$1`, otherAppID) + require.NoError(t, err) + + select { + case <-gotLease: + case <-time.After(testutils.WaitTimeout(t)): + t.Fatal("timed out waiting for lease lock to start") + } + + // check that the lease lock was actually taken + var exists bool + err = db.Get(&exists, `SELECT EXISTS(SELECT 1 FROM lease_lock)`) + require.NoError(t, err) + + assert.True(t, exists) + + leaseLock.Release() + }) + + t.Run("recovers and re-opens connection if it's closed externally while holding", func(t *testing.T) { + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock := newLeaseLock(t, db, cfg) + + err := leaseLock.TakeAndHold(testutils.Context(t)) + require.NoError(t, err) + defer leaseLock.Release() + + conn := pg.GetConn(leaseLock) + + var prevExpiresAt time.Time + + err = conn.Close() + require.NoError(t, err) + + err = db.Get(&prevExpiresAt, `SELECT expires_at FROM lease_lock`) + require.NoError(t, err) + + time.Sleep(cfg.LeaseRefreshInterval + 1*time.Second) + + var expiresAt time.Time + + err = db.Get(&expiresAt, `SELECT expires_at FROM lease_lock`) + require.NoError(t, err) + + // The lease lock must have recovered and re-opened the connection if the second expires_at is later + assert.Greater(t, expiresAt.Unix(), prevExpiresAt.Unix()) + }) + + t.Run("release lock with Release() func", func(t *testing.T) { + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock := newLeaseLock(t, db, cfg) + + err := leaseLock.TakeAndHold(testutils.Context(t)) + require.NoError(t, err) + + leaseLock.Release() + + leaseLock2 := newLeaseLock(t, db, cfg) + err = leaseLock2.TakeAndHold(testutils.Context(t)) + defer leaseLock2.Release() + require.NoError(t, err) + }) + + t.Run("cancel TakeAndHold with ctx", func(t *testing.T) { + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock1 := newLeaseLock(t, db, cfg) + leaseLock2 := newLeaseLock(t, db, cfg) + + err := leaseLock1.TakeAndHold(testutils.Context(t)) + require.NoError(t, err) + + awaiter := cltest.NewAwaiter() + go func() { + ctx, cancel := context.WithCancel(testutils.Context(t)) + go func() { + <-time.After(3 * time.Second) + cancel() + }() + err := leaseLock2.TakeAndHold(ctx) + require.Error(t, err) + awaiter.ItHappened() + }() + + awaiter.AwaitOrFail(t) + leaseLock1.Release() + }) + + require.NoError(t, db.Close()) + + t.Run("on virgin database", func(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + cfg := pg.LeaseLockConfig{ + DefaultQueryTimeout: cfg.Database().DefaultQueryTimeout(), + LeaseDuration: 15 * time.Second, + LeaseRefreshInterval: 100 * time.Millisecond, + } + leaseLock1 := newLeaseLock(t, db, cfg) + + err := leaseLock1.TakeAndHold(testutils.Context(t)) + defer leaseLock1.Release() + require.NoError(t, err) + }) +} diff --git a/core/services/pg/locked_db.go b/core/services/pg/locked_db.go new file mode 100644 index 00000000..50b949b5 --- /dev/null +++ b/core/services/pg/locked_db.go @@ -0,0 +1,148 @@ +package pg + +import ( + "context" + "net/url" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +// LockedDB bounds DB connection and DB locks. +type LockedDB interface { + Open(ctx context.Context) error + Close() error + DB() *sqlx.DB +} + +type LockedDBConfig interface { + ConnectionConfig + URL() url.URL + DefaultQueryTimeout() time.Duration + Dialect() dialects.DialectName +} + +type lockedDb struct { + appID uuid.UUID + cfg LockedDBConfig + lockCfg config.Lock + lggr logger.Logger + db *sqlx.DB + leaseLock LeaseLock + statsReporter *StatsReporter +} + +// NewLockedDB creates a new instance of LockedDB. +func NewLockedDB(appID uuid.UUID, cfg LockedDBConfig, lockCfg config.Lock, lggr logger.Logger) LockedDB { + return &lockedDb{ + appID: appID, + cfg: cfg, + lockCfg: lockCfg, + lggr: lggr.Named("LockedDB"), + } +} + +// OpenUnlockedDB just opens DB connection, without any DB locks. +// This should be used carefully, when we know we don't need any locks. +// Currently this is used by RebroadcastTransactions command only. +func OpenUnlockedDB(appID uuid.UUID, cfg LockedDBConfig) (db *sqlx.DB, err error) { + return openDB(appID, cfg) +} + +// Open function connects to DB and acquires DB locks based on configuration. +// If any of the steps fails or ctx is cancelled, it reverts everything. +// This is a blocking function and it may execute long due to DB locks acquisition. +// NOT THREAD SAFE +func (l *lockedDb) Open(ctx context.Context) (err error) { + // If Open succeeded previously, db will not be nil + if l.db != nil { + l.lggr.Panic("calling Open() twice") + } + + // Step 1: open DB connection + l.db, err = openDB(l.appID, l.cfg) + if err != nil { + // l.db will be nil in case of error + return errors.Wrap(err, "failed to open db") + } + revert := func() { + // Let Open() return the actual error, while l.Close() error is just logged. + if err2 := l.Close(); err2 != nil { + l.lggr.Errorf("failed to cleanup LockedDB: %v", err2) + } + } + + // Step 2: start the stat reporter + l.statsReporter = NewStatsReporter(l.db.Stats, l.lggr) + l.statsReporter.Start(ctx) + + // Step 3: acquire DB locks + lockingMode := l.lockCfg.LockingMode() + l.lggr.Debugf("Using database locking mode: %s", lockingMode) + + // Take the lease before any other DB operations + switch lockingMode { + case "lease": + cfg := LeaseLockConfig{ + DefaultQueryTimeout: l.cfg.DefaultQueryTimeout(), + LeaseDuration: l.lockCfg.LeaseDuration(), + LeaseRefreshInterval: l.lockCfg.LeaseRefreshInterval(), + } + l.leaseLock = NewLeaseLock(l.db, l.appID, l.lggr, cfg) + if err = l.leaseLock.TakeAndHold(ctx); err != nil { + defer revert() + return errors.Wrap(err, "failed to take initial lease on database") + } + } + + return +} + +// Close function releases DB locks (if acquired by Open) and closes DB connection. +// Closing of a closed LockedDB instance has no effect. +// NOT THREAD SAFE +func (l *lockedDb) Close() error { + defer func() { + l.db = nil + l.leaseLock = nil + l.statsReporter = nil + }() + + // Step 0: stop the stat reporter + if l.statsReporter != nil { + l.statsReporter.Stop() + } + + // Step 1: release DB locks + if l.leaseLock != nil { + l.leaseLock.Release() + } + + // Step 2: close DB connection + if l.db != nil { + return l.db.Close() + } + + return nil +} + +// DB returns DB connection if Opened successfully, or nil. +func (l lockedDb) DB() *sqlx.DB { + return l.db +} + +func openDB(appID uuid.UUID, cfg LockedDBConfig) (db *sqlx.DB, err error) { + uri := cfg.URL() + static.SetConsumerName(&uri, "App", &appID) + dialect := cfg.Dialect() + db, err = NewConnection(uri.String(), dialect, cfg) + return +} diff --git a/core/services/pg/locked_db_test.go b/core/services/pg/locked_db_test.go new file mode 100644 index 00000000..786519c7 --- /dev/null +++ b/core/services/pg/locked_db_test.go @@ -0,0 +1,104 @@ +package pg_test + +import ( + "context" + "testing" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/stretchr/testify/require" +) + +func lease(c *plugin.Config, s *plugin.Secrets) { + t := true + c.Database.Lock.Enabled = &t + c.Database.Lock.LeaseDuration = commonconfig.MustNewDuration(10 * time.Second) + c.Database.Lock.LeaseRefreshInterval = commonconfig.MustNewDuration(time.Second) +} + +func TestLockedDB_HappyPath(t *testing.T) { + testutils.SkipShortDB(t) + config := configtest.NewGeneralConfig(t, lease) + lggr := logger.TestLogger(t) + ldb := pg.NewLockedDB(config.AppID(), config.Database(), config.Database().Lock(), lggr) + + err := ldb.Open(testutils.Context(t)) + require.NoError(t, err) + require.NotNil(t, ldb.DB()) + + err = ldb.Close() + require.NoError(t, err) + require.Nil(t, ldb.DB()) +} + +func TestLockedDB_ContextCancelled(t *testing.T) { + testutils.SkipShortDB(t) + config := configtest.NewGeneralConfig(t, lease) + lggr := logger.TestLogger(t) + ldb := pg.NewLockedDB(config.AppID(), config.Database(), config.Database().Lock(), lggr) + + ctx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + err := ldb.Open(ctx) + require.Error(t, err) + require.Nil(t, ldb.DB()) +} + +func TestLockedDB_OpenTwice(t *testing.T) { + testutils.SkipShortDB(t) + config := configtest.NewGeneralConfig(t, lease) + lggr := logger.TestLogger(t) + ldb := pg.NewLockedDB(config.AppID(), config.Database(), config.Database().Lock(), lggr) + + err := ldb.Open(testutils.Context(t)) + require.NoError(t, err) + require.Panics(t, func() { + _ = ldb.Open(testutils.Context(t)) + }) + + _ = ldb.Close() +} + +func TestLockedDB_TwoInstances(t *testing.T) { + testutils.SkipShortDB(t) + config := configtest.NewGeneralConfig(t, lease) + lggr := logger.TestLogger(t) + + ldb1 := pg.NewLockedDB(config.AppID(), config.Database(), config.Database().Lock(), lggr) + err := ldb1.Open(testutils.Context(t)) + require.NoError(t, err) + defer func() { + require.NoError(t, ldb1.Close()) + }() + + // second instance would wait for locks to be released, + // hence we use some timeout + ctx, cancel := context.WithTimeout(testutils.Context(t), config.Database().Lock().LeaseDuration()) + defer cancel() + ldb2 := pg.NewLockedDB(config.AppID(), config.Database(), config.Database().Lock(), lggr) + err = ldb2.Open(ctx) + require.Error(t, err) +} + +func TestOpenUnlockedDB(t *testing.T) { + testutils.SkipShortDB(t) + config := configtest.NewGeneralConfig(t, nil) + + db1, err1 := pg.OpenUnlockedDB(config.AppID(), config.Database()) + require.NoError(t, err1) + require.NotNil(t, db1) + + // should not block the second connection + db2, err2 := pg.OpenUnlockedDB(config.AppID(), config.Database()) + require.NoError(t, err2) + require.NotNil(t, db2) + + require.NoError(t, db1.Close()) + require.NoError(t, db2.Close()) +} diff --git a/core/services/pg/q.go b/core/services/pg/q.go new file mode 100644 index 00000000..07dee97e --- /dev/null +++ b/core/services/pg/q.go @@ -0,0 +1,368 @@ +package pg + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" +) + +var promSQLQueryTime = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "sql_query_timeout_percent", + Help: "SQL query time as a pecentage of timeout.", + Buckets: []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120}, +}) + +// QOpt pattern for ORM methods aims to clarify usage and remove some common footguns, notably: +// +// 1. It should be easy and obvious how to pass a parent context or a transaction into an ORM method +// 2. Simple queries should not be cluttered +// 3. It should have compile-time safety and be explicit +// 4. It should enforce some sort of context deadline on all queries by default +// 5. It should optimise for clarity and readability +// 6. It should mandate using sqlx everywhere, gorm is forbidden in new code +// 7. It should make using sqlx a little more convenient by wrapping certain methods +// 8. It allows easier mocking of DB calls (Queryer is an interface) +// +// The two main concepts introduced are: +// +// A `Q` struct that wraps a `sqlx.DB` or `sqlx.Tx` and implements the `pg.Queryer` interface. +// +// This struct is initialised with `QOpts` which define how the queryer should behave. `QOpts` can define a parent context, an open transaction or other options to configure the Queryer. +// +// A sample ORM method looks like this: +// +// func (o *orm) GetFoo(id int64, qopts ...pg.QOpt) (Foo, error) { +// q := pg.NewQ(q, qopts...) +// return q.Exec(...) +// } +// +// Now you can call it like so: +// +// orm.GetFoo(1) // will automatically have default query timeout context set +// orm.GetFoo(1, pg.WithParentCtx(ctx)) // will wrap the supplied parent context with the default query context +// orm.GetFoo(1, pg.WithQueryer(tx)) // allows to pass in a running transaction or anything else that implements Queryer +// orm.GetFoo(q, pg.WithQueryer(tx), pg.WithParentCtx(ctx)) // options can be combined +type QOpt func(*Q) + +// WithQueryer sets the queryer +func WithQueryer(queryer Queryer) QOpt { + return func(q *Q) { + if q.Queryer != nil { + panic("queryer already set") + } + q.Queryer = queryer + } +} + +// WithParentCtx sets or overwrites the parent ctx +func WithParentCtx(ctx context.Context) QOpt { + return func(q *Q) { + q.ParentCtx = ctx + } +} + +// If the parent has a timeout, just use that instead of DefaultTimeout +func WithParentCtxInheritTimeout(ctx context.Context) QOpt { + return func(q *Q) { + q.ParentCtx = ctx + deadline, ok := q.ParentCtx.Deadline() + if ok { + q.QueryTimeout = time.Until(deadline) + } + } +} + +// WithLongQueryTimeout prevents the usage of the `DefaultQueryTimeout` duration and uses `OneMinuteQueryTimeout` instead +// Some queries need to take longer when operating over big chunks of data, like deleting jobs, but we need to keep some upper bound timeout +func WithLongQueryTimeout() QOpt { + return func(q *Q) { + q.QueryTimeout = longQueryTimeout + } +} + +var _ Queryer = Q{} + +type QConfig interface { + LogSQL() bool + DefaultQueryTimeout() time.Duration +} + +// Q wraps an underlying queryer (either a *sqlx.DB or a *sqlx.Tx) +// +// It is designed to make handling *sqlx.Tx or *sqlx.DB a little bit safer by +// preventing footguns such as having no deadline on contexts. +// +// It also handles nesting transactions. +// +// It automatically adds the default context deadline to all non-context +// queries (if you _really_ want to issue a query without a context, use the +// underlying Queryer) +// +// This is not the prettiest construct but without macros its about the best we +// can do. +type Q struct { + Queryer + ParentCtx context.Context + db *sqlx.DB + logger logger.SugaredLogger + config QConfig + QueryTimeout time.Duration +} + +func NewQ(db *sqlx.DB, lggr logger.Logger, config QConfig, qopts ...QOpt) (q Q) { + for _, opt := range qopts { + opt(&q) + } + + q.db = db + // skip two levels since we use internal helpers and also want to point up the stack to the caller of the Q method. + q.logger = logger.Sugared(logger.Helper(lggr, 2)) + q.config = config + + if q.Queryer == nil { + q.Queryer = db + } + if q.ParentCtx == nil { + q.ParentCtx = context.Background() + } + if q.QueryTimeout <= 0 { + q.QueryTimeout = q.config.DefaultQueryTimeout() + } + return +} + +func (q Q) originalLogger() logger.Logger { + return logger.Helper(q.logger, -2) +} + +func PrepareQueryRowx(q Queryer, sql string, dest interface{}, arg interface{}) error { + stmt, err := q.PrepareNamed(sql) + if err != nil { + return errors.Wrap(err, "error preparing named statement") + } + defer stmt.Close() + return errors.Wrap(stmt.QueryRowx(arg).Scan(dest), "error querying row") +} + +func (q Q) WithOpts(qopts ...QOpt) Q { + return NewQ(q.db, q.originalLogger(), q.config, qopts...) +} + +func (q Q) Context() (context.Context, context.CancelFunc) { + return context.WithTimeout(q.ParentCtx, q.QueryTimeout) +} + +func (q Q) Transaction(fc func(q Queryer) error, txOpts ...TxOption) error { + ctx, cancel := q.Context() + defer cancel() + return SqlxTransaction(ctx, q.Queryer, q.originalLogger(), fc, txOpts...) +} + +// CAUTION: A subtle problem lurks here, because the following code is buggy: +// +// ctx, cancel := context.WithCancel(context.Background()) +// rows, err := db.QueryContext(ctx, "SELECT foo") +// cancel() // canceling here "poisons" the scan below +// for rows.Next() { +// rows.Scan(...) +// } +// +// We must cancel the context only after we have completely finished using the +// returned rows or result from the query/exec +// +// For this reasons, the following functions return a context.CancelFunc and it +// is up to the caller to ensure that cancel is called after it has finished +// +// Generally speaking, it makes more sense to use Get/Select in most cases, +// which avoids this problem +func (q Q) ExecQIter(query string, args ...interface{}) (sql.Result, context.CancelFunc, error) { + ctx, cancel := q.Context() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + res, err := q.Queryer.ExecContext(ctx, query, args...) + return res, cancel, ql.withLogError(err) +} +func (q Q) ExecQ(query string, args ...interface{}) error { + ctx, cancel := q.Context() + defer cancel() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + _, err := q.Queryer.ExecContext(ctx, query, args...) + return ql.withLogError(err) +} +func (q Q) ExecQNamed(query string, arg interface{}) (err error) { + query, args, err := q.BindNamed(query, arg) + if err != nil { + return errors.Wrap(err, "error binding arg") + } + ctx, cancel := q.Context() + defer cancel() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + _, err = q.Queryer.ExecContext(ctx, query, args...) + return ql.withLogError(err) +} + +// Select and Get are safe to wrap the context cancellation because the rows +// are entirely consumed within the call +func (q Q) Select(dest interface{}, query string, args ...interface{}) error { + ctx, cancel := q.Context() + defer cancel() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + return ql.withLogError(q.Queryer.SelectContext(ctx, dest, query, args...)) +} + +func (q Q) SelectNamed(dest interface{}, query string, arg interface{}) error { + query, args, err := q.BindNamed(query, arg) + if err != nil { + return errors.Wrap(err, "error binding arg") + } + return q.Select(dest, query, args...) +} + +func (q Q) Get(dest interface{}, query string, args ...interface{}) error { + ctx, cancel := q.Context() + defer cancel() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + return ql.withLogError(q.Queryer.GetContext(ctx, dest, query, args...)) +} + +func (q Q) GetNamed(sql string, dest interface{}, arg interface{}) error { + query, args, err := q.BindNamed(sql, arg) + if err != nil { + return errors.Wrap(err, "error binding arg") + } + ctx, cancel := q.Context() + defer cancel() + + ql := q.newQueryLogger(query, args) + ql.logSqlQuery() + defer ql.postSqlLog(ctx, time.Now()) + + return ql.withLogError(errors.Wrap(q.GetContext(ctx, dest, query, args...), "error in get query")) +} + +func (q Q) newQueryLogger(query string, args []interface{}) *queryLogger { + return &queryLogger{Q: q, query: query, args: args, str: sync.OnceValue(func() string { + return sprintQ(query, args) + })} +} + +// sprintQ formats the query with the given args and returns the resulting string. +func sprintQ(query string, args []interface{}) string { + if args == nil { + return query + } + var pairs []string + for i, arg := range args { + // We print by type so one can directly take the logged query string and execute it manually in pg. + // Annoyingly it seems as though the logger itself will add an extra \, so you still have to remove that. + switch v := arg.(type) { + case []byte: + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("'\\x%x'", v)) + case common.Address: + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("'\\x%x'", v.Bytes())) + case common.Hash: + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("'\\x%x'", v.Bytes())) + case pq.ByteaArray: + var s strings.Builder + fmt.Fprintf(&s, "('\\x%x'", v[0]) + for j := 1; j < len(v); j++ { + fmt.Fprintf(&s, ",'\\x%x'", v[j]) + } + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%s)", s.String())) + default: + pairs = append(pairs, fmt.Sprintf("$%d", i+1), fmt.Sprintf("%v", arg)) + } + } + replacer := strings.NewReplacer(pairs...) + queryWithVals := replacer.Replace(query) + return strings.ReplaceAll(strings.ReplaceAll(queryWithVals, "\n", " "), "\t", " ") +} + +// queryLogger extends Q with logging helpers for a particular query w/ args. +type queryLogger struct { + Q + + query string + args []interface{} + + str func() string +} + +func (q *queryLogger) String() string { + return q.str() +} + +func (q *queryLogger) logSqlQuery() { + if q.config != nil && q.config.LogSQL() { + q.logger.Debugw("SQL QUERY", "sql", q) + } +} + +func (q *queryLogger) withLogError(err error) error { + if err != nil && !errors.Is(err, sql.ErrNoRows) && q.config != nil && q.config.LogSQL() { + q.logger.Errorw("SQL ERROR", "err", err, "sql", q) + } + return err +} + +// postSqlLog logs about context cancellation and timing after a query returns. +// Queries which use their full timeout log critical level. More than 50% log error, and 10% warn. +func (q *queryLogger) postSqlLog(ctx context.Context, begin time.Time) { + elapsed := time.Since(begin) + if ctx.Err() != nil { + q.logger.Debugw("SQL CONTEXT CANCELLED", "ms", elapsed.Milliseconds(), "err", ctx.Err(), "sql", q) + } + + timeout := q.QueryTimeout + if timeout <= 0 { + timeout = DefaultQueryTimeout + } + + pct := float64(elapsed) / float64(timeout) + pct *= 100 + + kvs := []any{"ms", elapsed.Milliseconds(), "timeout", timeout.Milliseconds(), "percent", strconv.FormatFloat(pct, 'f', 1, 64), "sql", q} + + if elapsed >= timeout { + q.logger.Criticalw("SLOW SQL QUERY", kvs...) + } else if errThreshold := timeout / 5; errThreshold > 0 && elapsed > errThreshold { + q.logger.Errorw("SLOW SQL QUERY", kvs...) + } else if warnThreshold := timeout / 10; warnThreshold > 0 && elapsed > warnThreshold { + q.logger.Warnw("SLOW SQL QUERY", kvs...) + } + + promSQLQueryTime.Observe(pct) +} diff --git a/core/services/pg/q_test.go b/core/services/pg/q_test.go new file mode 100644 index 00000000..7692fb79 --- /dev/null +++ b/core/services/pg/q_test.go @@ -0,0 +1,53 @@ +package pg + +import ( + "testing" + + "github.com/lib/pq" + "github.com/stretchr/testify/require" +) + +func Test_sprintQ(t *testing.T) { + for _, tt := range []struct { + name string + query string + args []interface{} + exp string + }{ + {"none", + "SELECT * FROM table;", + nil, + "SELECT * FROM table;"}, + {"one", + "SELECT $1 FROM table;", + []interface{}{"foo"}, + "SELECT foo FROM table;"}, + {"two", + "SELECT $1 FROM table WHERE bar = $2;", + []interface{}{"foo", 1}, + "SELECT foo FROM table WHERE bar = 1;"}, + {"limit", + "SELECT $1 FROM table LIMIT $2;", + []interface{}{"foo", Limit(10)}, + "SELECT foo FROM table LIMIT 10;"}, + {"limit-all", + "SELECT $1 FROM table LIMIT $2;", + []interface{}{"foo", Limit(-1)}, + "SELECT foo FROM table LIMIT NULL;"}, + {"bytea", + "SELECT $1 FROM table WHERE b = $2;", + []interface{}{"foo", []byte{0x0a}}, + "SELECT foo FROM table WHERE b = '\\x0a';"}, + {"bytea[]", + "SELECT $1 FROM table WHERE b = $2;", + []interface{}{"foo", pq.ByteaArray([][]byte{{0xa}, {0xb}})}, + "SELECT foo FROM table WHERE b = ('\\x0a','\\x0b');"}, + } { + t.Run(tt.name, func(t *testing.T) { + got := sprintQ(tt.query, tt.args) + t.Log(tt.query, tt.args) + t.Log(got) + require.Equal(t, tt.exp, got) + }) + } +} diff --git a/core/services/pg/queries.go b/core/services/pg/queries.go new file mode 100644 index 00000000..618067c4 --- /dev/null +++ b/core/services/pg/queries.go @@ -0,0 +1,26 @@ +package pg + +// BatchSize is the default number of DB records to access in one batch +const BatchSize uint = 1000 + +// BatchFunc is the function to execute on each batch of records, should return the count of records affected +type BatchFunc func(offset, limit uint) (count uint, err error) + +// Batch is an iterator for batches of records +func Batch(cb BatchFunc) error { + offset := uint(0) + limit := BatchSize + + for { + count, err := cb(offset, limit) + if err != nil { + return err + } + + if count < limit { + return nil + } + + offset += limit + } +} diff --git a/core/services/pg/sqlx.go b/core/services/pg/sqlx.go new file mode 100644 index 00000000..92591c60 --- /dev/null +++ b/core/services/pg/sqlx.go @@ -0,0 +1,52 @@ +package pg + +import ( + "context" + "database/sql" + + "github.com/pkg/errors" + mapper "github.com/scylladb/go-reflectx" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/logger" +) + +type Queryer interface { + sqlx.Ext + sqlx.ExtContext + sqlx.Preparer + sqlx.PreparerContext + sqlx.Queryer + Select(dest interface{}, query string, args ...interface{}) error + SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error + PrepareNamed(query string) (*sqlx.NamedStmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + Get(dest interface{}, query string, args ...interface{}) error + GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error + NamedExec(query string, arg interface{}) (sql.Result, error) + NamedQuery(query string, arg interface{}) (*sqlx.Rows, error) +} + +func WrapDbWithSqlx(rdb *sql.DB) *sqlx.DB { + db := sqlx.NewDb(rdb, "postgres") + db.MapperFunc(mapper.CamelToSnakeASCII) + return db +} + +func SqlxTransaction(ctx context.Context, q Queryer, lggr logger.Logger, fc func(q Queryer) error, txOpts ...TxOption) (err error) { + switch db := q.(type) { + case *sqlx.Tx: + // nested transaction: just use the outer transaction + err = fc(db) + case *sqlx.DB: + err = sqlxTransactionQ(ctx, db, lggr, fc, txOpts...) + case Q: + err = sqlxTransactionQ(ctx, db.db, lggr, fc, txOpts...) + default: + err = errors.Errorf("invalid db type: %T", q) + } + + return +} diff --git a/core/services/pg/stats.go b/core/services/pg/stats.go new file mode 100644 index 00000000..7d3afe72 --- /dev/null +++ b/core/services/pg/stats.go @@ -0,0 +1,133 @@ +package pg + +import ( + "context" + "database/sql" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +const dbStatsInternal = 10 * time.Second + +var ( + promDBConnsMax = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "db_conns_max", + Help: "Maximum number of open connections to the database.", + }) + promDBConnsOpen = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "db_conns_open", + Help: "The number of established connections both in use and idle.", + }) + promDBConnsInUse = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "db_conns_used", + Help: "The number of connections currently in use.", + }) + promDBWaitCount = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "db_wait_count", + Help: "The total number of connections waited for.", + }) + promDBWaitDuration = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "db_wait_time_seconds", + Help: "The total time blocked waiting for a new connection.", + }) +) + +func publishStats(stats sql.DBStats) { + promDBConnsMax.Set(float64(stats.MaxOpenConnections)) + promDBConnsOpen.Set(float64(stats.OpenConnections)) + promDBConnsInUse.Set(float64(stats.InUse)) + + promDBWaitCount.Set(float64(stats.WaitCount)) + promDBWaitDuration.Set(stats.WaitDuration.Seconds()) +} + +type StatsReporterOpt func(*StatsReporter) + +func StatsInterval(d time.Duration) StatsReporterOpt { + return func(r *StatsReporter) { + r.interval = d + } +} + +func StatsCustomReporterFn(fn ReportFn) StatsReporterOpt { + return func(r *StatsReporter) { + r.reportFn = fn + } +} + +type ( + StatFn func() sql.DBStats + ReportFn func(sql.DBStats) +) + +type StatsReporter struct { + statFn StatFn + reportFn ReportFn + interval time.Duration + cancel context.CancelFunc + lggr logger.Logger + once sync.Once + wg sync.WaitGroup +} + +func NewStatsReporter(fn StatFn, lggr logger.Logger, opts ...StatsReporterOpt) *StatsReporter { + r := &StatsReporter{ + statFn: fn, + reportFn: publishStats, + interval: dbStatsInternal, + lggr: lggr.Named("StatsReporter"), + } + + for _, opt := range opts { + opt(r) + } + + return r +} + +func (r *StatsReporter) Start(ctx context.Context) { + + startOnce := func() { + r.wg.Add(1) + r.lggr.Debug("Starting DB stat reporter") + rctx, cancelFunc := context.WithCancel(ctx) + r.cancel = cancelFunc + go r.loop(rctx) + } + + r.once.Do(startOnce) +} + +// Stop stops all resources owned by the reporter and waits +// for all of them to be done +func (r *StatsReporter) Stop() { + if r.cancel != nil { + r.lggr.Debug("Stopping DB stat reporter") + r.cancel() + r.cancel = nil + r.wg.Wait() + } +} + +func (r *StatsReporter) loop(ctx context.Context) { + defer r.wg.Done() + + ticker := time.NewTicker(r.interval) + defer ticker.Stop() + + r.reportFn(r.statFn()) + for { + select { + case <-ticker.C: + r.reportFn(r.statFn()) + case <-ctx.Done(): + r.lggr.Debug("stat reporter loop received done. stopping...") + return + } + } +} diff --git a/core/services/pg/stats_test.go b/core/services/pg/stats_test.go new file mode 100644 index 00000000..e627f1fa --- /dev/null +++ b/core/services/pg/stats_test.go @@ -0,0 +1,134 @@ +package pg + +import ( + "context" + "database/sql" + "strings" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// testDbStater implements mocks for the function signatures +// needed by the stat reporte wrapper for statFn +type testDbStater struct { + mock.Mock + t *testing.T + name string + testGauge prometheus.Gauge +} + +func newtestDbStater(t *testing.T, name string) *testDbStater { + return &testDbStater{ + t: t, + name: name, + testGauge: promauto.NewGauge(prometheus.GaugeOpts{ + Name: strings.ReplaceAll(name, " ", "_"), + }), + } +} + +func (s *testDbStater) Stats() sql.DBStats { + s.Called() + return sql.DBStats{} +} + +func (s *testDbStater) Report(stats sql.DBStats) { + s.Called() + s.testGauge.Set(float64(stats.MaxOpenConnections)) +} + +type statScenario struct { + name string + testFn func(*testing.T, *StatsReporter, time.Duration, int) +} + +func TestStatReporter(t *testing.T) { + interval := 2 * time.Millisecond + expectedIntervals := 4 + + lggr := logger.TestLogger(t) + + for _, scenario := range []statScenario{ + {name: "parent_ctx_canceled", testFn: testParentContextCanceled}, + {name: "normal_collect_and_stop", testFn: testCollectAndStop}, + {name: "mutli_start", testFn: testMultiStart}, + {name: "multi_stop", testFn: testMultiStop}, + } { + + t.Run(scenario.name, func(t *testing.T) { + d := newtestDbStater(t, scenario.name) + d.Mock.On("Stats").Return(sql.DBStats{}) + d.Mock.On("Report").Return() + reporter := NewStatsReporter(d.Stats, + lggr, + StatsInterval(interval), + StatsCustomReporterFn(d.Report), + ) + + scenario.testFn( + t, + reporter, + interval, + expectedIntervals, + ) + + d.AssertCalled(t, "Stats") + d.AssertCalled(t, "Report") + }) + } +} + +// test appropriate handling of context cancellation +func testParentContextCanceled(t *testing.T, r *StatsReporter, interval time.Duration, n int) { + ctx := testutils.Context(t) + tctx, cancel := context.WithTimeout(ctx, time.Duration(n)*interval) + + r.Start(tctx) + defer r.Stop() + // wait for parent cancelation + <-tctx.Done() + // call cancel to statisy linter + cancel() +} + +// test normal stop +func testCollectAndStop(t *testing.T, r *StatsReporter, interval time.Duration, n int) { + ctx := testutils.Context(t) + + r.Start(ctx) + time.Sleep(time.Duration(n) * interval) + r.Stop() +} + +// test multiple start calls are idempotent +func testMultiStart(t *testing.T, r *StatsReporter, interval time.Duration, n int) { + ctx := testutils.Context(t) + + ticker := time.NewTicker(time.Duration(n) * interval) + defer ticker.Stop() + + r.Start(ctx) + r.Start(ctx) + <-ticker.C + r.Stop() +} + +// test multiple stop calls are idempotent +func testMultiStop(t *testing.T, r *StatsReporter, interval time.Duration, n int) { + ctx := testutils.Context(t) + + ticker := time.NewTicker(time.Duration(n) * interval) + defer ticker.Stop() + + r.Start(ctx) + <-ticker.C + r.Stop() + r.Stop() +} diff --git a/core/services/pg/transaction.go b/core/services/pg/transaction.go new file mode 100644 index 00000000..8327b594 --- /dev/null +++ b/core/services/pg/transaction.go @@ -0,0 +1,95 @@ +package pg + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/getsentry/sentry-go" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/logger" + corelogger "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// NOTE: This is the default level in Postgres anyway, we just make it +// explicit here +const defaultIsolation = sql.LevelReadCommitted + +// TxOption is a functional option for SQL transactions. +type TxOption func(*sql.TxOptions) + +func OptReadOnlyTx() TxOption { + return func(opts *sql.TxOptions) { + opts.ReadOnly = true + } +} + +func SqlTransaction(ctx context.Context, rdb *sql.DB, lggr logger.Logger, fn func(tx *sqlx.Tx) error, opts ...TxOption) (err error) { + db := WrapDbWithSqlx(rdb) + wrapFn := func(q Queryer) error { + tx, ok := q.(*sqlx.Tx) + if !ok { + panic(fmt.Sprintf("expected q to be %T but got %T", tx, q)) + } + return fn(tx) + } + return sqlxTransactionQ(ctx, db, lggr, wrapFn, opts...) +} + +// txBeginner can be a db or a conn, anything that implements BeginTxx +type txBeginner interface { + BeginTxx(context.Context, *sql.TxOptions) (*sqlx.Tx, error) +} + +func sqlxTransactionQ(ctx context.Context, db txBeginner, lggr logger.Logger, fn func(q Queryer) error, opts ...TxOption) (err error) { + var txOpts sql.TxOptions + for _, o := range opts { + o(&txOpts) + } + + var tx *sqlx.Tx + tx, err = db.BeginTxx(ctx, &txOpts) + if err != nil { + return errors.Wrap(err, "failed to begin transaction") + } + + defer func() { + if p := recover(); p != nil { + sentry.CurrentHub().Recover(p) + sentry.Flush(corelogger.SentryFlushDeadline) + + // A panic occurred, rollback and repanic + lggr.Errorf("Panic in transaction, rolling back: %s", p) + done := make(chan struct{}) + go func() { + if rerr := tx.Rollback(); rerr != nil { + lggr.Errorf("Failed to rollback on panic: %s", rerr) + } + close(done) + }() + select { + case <-done: + panic(p) + case <-time.After(10 * time.Second): + panic(fmt.Sprintf("panic in transaction; aborting rollback that took longer than 10s: %s", p)) + } + } else if err != nil { + lggr.Errorf("Error in transaction, rolling back: %s", err) + // An error occurred, rollback and return error + if rerr := tx.Rollback(); rerr != nil { + err = multierr.Combine(err, errors.WithStack(rerr)) + } + } else { + // All good! Time to commit. + err = errors.WithStack(tx.Commit()) + } + }() + + err = fn(tx) + + return +} diff --git a/core/services/pg/utils.go b/core/services/pg/utils.go new file mode 100644 index 00000000..eb53c261 --- /dev/null +++ b/core/services/pg/utils.go @@ -0,0 +1,50 @@ +package pg + +import ( + "database/sql/driver" + "strconv" + "time" +) + +const ( + // DefaultQueryTimeout is a reasonable upper bound for how long a SQL query should take. + // The configured value should be used instead of this if possible. + DefaultQueryTimeout = 10 * time.Second + // longQueryTimeout is a bigger upper bound for how long a SQL query should take + longQueryTimeout = 1 * time.Minute +) + +var _ driver.Valuer = Limit(-1) + +// Limit is a helper driver.Valuer for LIMIT queries which uses nil/NULL for negative values. +type Limit int + +func (l Limit) String() string { + if l < 0 { + return "NULL" + } + return strconv.Itoa(int(l)) +} + +func (l Limit) Value() (driver.Value, error) { + if l < 0 { + return nil, nil + } + return l, nil +} + +var _ QConfig = &qConfig{} + +// qConfig implements pg.QCOnfig +type qConfig struct { + logSQL bool + defaultQueryTimeout time.Duration +} + +func NewQConfig(logSQL bool) QConfig { + return &qConfig{logSQL, DefaultQueryTimeout} +} + +func (p *qConfig) LogSQL() bool { return p.logSQL } + +func (p *qConfig) DefaultQueryTimeout() time.Duration { return p.defaultQueryTimeout } diff --git a/core/services/pipeline/common.go b/core/services/pipeline/common.go new file mode 100644 index 00000000..5ddf4264 --- /dev/null +++ b/core/services/pipeline/common.go @@ -0,0 +1,694 @@ +package pipeline + +import ( + "bytes" + "context" + "database/sql/driver" + "encoding/json" + "errors" + "math/big" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/mitchellh/mapstructure" + pkgerrors "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + cutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + cnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + BlockHeaderFeederJobType string = "blockheaderfeeder" + BlockhashStoreJobType string = "blockhashstore" + BootstrapJobType string = "bootstrap" + CronJobType string = "cron" + DirectRequestJobType string = "directrequest" + FluxMonitorJobType string = "fluxmonitor" + GatewayJobType string = "gateway" + KeeperJobType string = "keeper" + LegacyGasStationServerJobType string = "legacygasstationserver" + LegacyGasStationSidecarJobType string = "legacygasstationsidecar" + OffchainReporting2JobType string = "offchainreporting2" + OffchainReportingJobType string = "offchainreporting" + StreamJobType string = "stream" + VRFJobType string = "vrf" + WebhookJobType string = "webhook" + WorkflowJobType string = "workflow" +) + +//go:generate mockery --quiet --name Config --output ./mocks/ --case=underscore + +type ( + Task interface { + Type() TaskType + ID() int + DotID() string + Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (Result, RunInfo) + Base() *BaseTask + Outputs() []Task + Inputs() []TaskDependency + OutputIndex() int32 + TaskTimeout() (time.Duration, bool) + TaskRetries() uint32 + TaskMinBackoff() time.Duration + TaskMaxBackoff() time.Duration + } + + Config interface { + DefaultHTTPLimit() int64 + DefaultHTTPTimeout() commonconfig.Duration + MaxRunDuration() time.Duration + ReaperInterval() time.Duration + ReaperThreshold() time.Duration + } + + BridgeConfig interface { + BridgeResponseURL() *url.URL + BridgeCacheTTL() time.Duration + } +) + +// Wraps the input Task for the given dependent task along with a bool variable PropagateResult, +// which Indicates whether result of InputTask should be propagated to its dependent task. +// If the edge between these tasks was an implicit edge, then results are not propagated. This is because +// some tasks cannot handle an input from an edge which wasn't specified in the spec. +type TaskDependency struct { + PropagateResult bool + InputTask Task +} + +var ( + ErrWrongInputCardinality = errors.New("wrong number of task inputs") + ErrBadInput = errors.New("bad input for task") + ErrInputTaskErrored = errors.New("input task errored") + ErrParameterEmpty = errors.New("parameter is empty") + ErrIndexOutOfRange = errors.New("index out of range") + ErrTooManyErrors = errors.New("too many errors") + ErrTimeout = errors.New("timeout") + ErrTaskRunFailed = errors.New("task run failed") + ErrCancelled = errors.New("task run cancelled (fail early)") +) + +const ( + InputTaskKey = "input" +) + +// RunInfo contains additional information about the finished TaskRun +type RunInfo struct { + IsRetryable bool + IsPending bool +} + +// retryableMeta should be returned if the error is non-deterministic; i.e. a +// repeated attempt sometime later _might_ succeed where the current attempt +// failed +func retryableRunInfo() RunInfo { + return RunInfo{IsRetryable: true} +} + +func pendingRunInfo() RunInfo { + return RunInfo{IsPending: true} +} + +func isRetryableHTTPError(statusCode int, err error) bool { + if statusCode >= 400 && statusCode < 500 { + // Client errors are not likely to succeed by resubmitting the exact same information again + return false + } else if statusCode >= 500 { + // Remote errors _might_ work on a retry + return true + } + return err != nil +} + +// Result is the result of a TaskRun +type Result struct { + Value interface{} + Error error +} + +// OutputDB dumps a single result output for a pipeline_run or pipeline_task_run +func (result Result) OutputDB() JSONSerializable { + return JSONSerializable{Val: result.Value, Valid: !(result.Value == nil || (reflect.ValueOf(result.Value).Kind() == reflect.Ptr && reflect.ValueOf(result.Value).IsNil()))} +} + +// ErrorDB dumps a single result error for a pipeline_task_run +func (result Result) ErrorDB() null.String { + var errString null.String + if result.Error != nil { + errString = null.StringFrom(result.Error.Error()) + } + return errString +} + +// FinalResult is the result of a Run +type FinalResult struct { + Values []interface{} + AllErrors []error + FatalErrors []error +} + +// HasFatalErrors returns true if the final result has any errors +func (result FinalResult) HasFatalErrors() bool { + for _, err := range result.FatalErrors { + if err != nil { + return true + } + } + return false +} + +// HasErrors returns true if the final result has any errors +func (result FinalResult) HasErrors() bool { + for _, err := range result.AllErrors { + if err != nil { + return true + } + } + return false +} + +func (result FinalResult) CombinedError() error { + if !result.HasErrors() { + return nil + } + return errors.Join(result.AllErrors...) +} + +// SingularResult returns a single result if the FinalResult only has one set of outputs/errors +func (result FinalResult) SingularResult() (Result, error) { + if len(result.FatalErrors) != 1 || len(result.Values) != 1 { + return Result{}, pkgerrors.Errorf("cannot cast FinalResult to singular result; it does not have exactly 1 error and exactly 1 output: %#v", result) + } + return Result{Error: result.FatalErrors[0], Value: result.Values[0]}, nil +} + +// TaskRunResult describes the result of a task run, suitable for database +// update or insert. +// ID might be zero if the TaskRun has not been inserted yet +// TaskSpecID will always be non-zero +type TaskRunResult struct { + ID uuid.UUID + Task Task + TaskRun TaskRun + Result Result + Attempts uint + CreatedAt time.Time + FinishedAt null.Time + // runInfo is never persisted + runInfo RunInfo +} + +func (result *TaskRunResult) IsPending() bool { + return !result.FinishedAt.Valid && result.Result == Result{} +} + +func (result *TaskRunResult) IsTerminal() bool { + return len(result.Task.Outputs()) == 0 +} + +// TaskRunResults represents a collection of results for all task runs for one pipeline run +type TaskRunResults []TaskRunResult + +// FinalResult pulls the FinalResult for the pipeline_run from the task runs +// It needs to respect the output index of each task +func (trrs TaskRunResults) FinalResult(l logger.Logger) FinalResult { + var found bool + var fr FinalResult + sort.Slice(trrs, func(i, j int) bool { + return trrs[i].Task.OutputIndex() < trrs[j].Task.OutputIndex() + }) + for _, trr := range trrs { + fr.AllErrors = append(fr.AllErrors, trr.Result.Error) + if trr.IsTerminal() { + fr.Values = append(fr.Values, trr.Result.Value) + fr.FatalErrors = append(fr.FatalErrors, trr.Result.Error) + found = true + } + } + + if !found { + l.Panicw("Expected at least one task to be final", "tasks", trrs) + } + return fr +} + +// Terminals returns all terminal task run results +func (trrs TaskRunResults) Terminals() (terminals []TaskRunResult) { + for _, trr := range trrs { + if trr.IsTerminal() { + terminals = append(terminals, trr) + } + } + return +} + +// GetNextTaskOf returns the task with the next id or nil if it does not exist +func (trrs *TaskRunResults) GetNextTaskOf(task TaskRunResult) *TaskRunResult { + nextID := task.Task.Base().id + 1 + + for _, trr := range *trrs { + if trr.Task.Base().id == nextID { + return &trr + } + } + + return nil +} + +type JSONSerializable struct { + Val interface{} + Valid bool +} + +func reinterpetJsonNumbers(val interface{}) (interface{}, error) { + switch v := val.(type) { + case json.Number: + return getJsonNumberValue(v) + case []interface{}: + s := make([]interface{}, len(v)) + for i, vv := range v { + ival, ierr := reinterpetJsonNumbers(vv) + if ierr != nil { + return nil, ierr + } + s[i] = ival + } + return s, nil + case map[string]interface{}: + m := make(map[string]interface{}, len(v)) + for k, vv := range v { + ival, ierr := reinterpetJsonNumbers(vv) + if ierr != nil { + return nil, ierr + } + m[k] = ival + } + return m, nil + } + return val, nil +} + +// UnmarshalJSON implements custom unmarshaling logic +func (js *JSONSerializable) UnmarshalJSON(bs []byte) error { + if js == nil { + *js = JSONSerializable{} + } + if len(bs) == 0 { + js.Valid = false + return nil + } + + var decoded interface{} + d := json.NewDecoder(bytes.NewReader(bs)) + d.UseNumber() + if err := d.Decode(&decoded); err != nil { + return err + } + + if decoded != nil { + reinterpreted, err := reinterpetJsonNumbers(decoded) + if err != nil { + return err + } + + *js = JSONSerializable{ + Valid: true, + Val: reinterpreted, + } + } + + return nil +} + +// MarshalJSON implements custom marshaling logic +func (js JSONSerializable) MarshalJSON() ([]byte, error) { + if !js.Valid { + return json.Marshal(nil) + } + jsWithHex := replaceBytesWithHex(js.Val) + return json.Marshal(jsWithHex) +} + +func (js *JSONSerializable) Scan(value interface{}) error { + if value == nil { + *js = JSONSerializable{} + return nil + } + bytes, ok := value.([]byte) + if !ok { + return pkgerrors.Errorf("JSONSerializable#Scan received a value of type %T", value) + } + if js == nil { + *js = JSONSerializable{} + } + return js.UnmarshalJSON(bytes) +} + +func (js JSONSerializable) Value() (driver.Value, error) { + if !js.Valid { + return nil, nil + } + return js.MarshalJSON() +} + +func (js *JSONSerializable) Empty() bool { + return js == nil || !js.Valid +} + +type TaskType string + +func (t TaskType) String() string { + return string(t) +} + +const ( + TaskTypeAny TaskType = "any" + TaskTypeBase64Decode TaskType = "base64decode" + TaskTypeBase64Encode TaskType = "base64encode" + TaskTypeBridge TaskType = "bridge" + TaskTypeCBORParse TaskType = "cborparse" + TaskTypeConditional TaskType = "conditional" + TaskTypeDivide TaskType = "divide" + TaskTypeETHABIDecode TaskType = "ethabidecode" + TaskTypeETHABIDecodeLog TaskType = "ethabidecodelog" + TaskTypeETHABIEncode TaskType = "ethabiencode" + TaskTypeETHABIEncode2 TaskType = "ethabiencode2" + TaskTypeETHCall TaskType = "ethcall" + TaskTypeETHTx TaskType = "ethtx" + TaskTypeEstimateGasLimit TaskType = "estimategaslimit" + TaskTypeHTTP TaskType = "http" + TaskTypeHexDecode TaskType = "hexdecode" + TaskTypeHexEncode TaskType = "hexencode" + TaskTypeJSONParse TaskType = "jsonparse" + TaskTypeLength TaskType = "length" + TaskTypeLessThan TaskType = "lessthan" + TaskTypeLookup TaskType = "lookup" + TaskTypeLowercase TaskType = "lowercase" + TaskTypeMean TaskType = "mean" + TaskTypeMedian TaskType = "median" + TaskTypeMerge TaskType = "merge" + TaskTypeMode TaskType = "mode" + TaskTypeMultiply TaskType = "multiply" + TaskTypeSum TaskType = "sum" + TaskTypeUppercase TaskType = "uppercase" + TaskTypeVRF TaskType = "vrf" + TaskTypeVRFV2 TaskType = "vrfv2" + TaskTypeVRFV2Plus TaskType = "vrfv2plus" + + // Testing only. + TaskTypePanic TaskType = "panic" + TaskTypeMemo TaskType = "memo" + TaskTypeFail TaskType = "fail" +) + +var ( + stringType = reflect.TypeOf("") + bytesType = reflect.TypeOf([]byte(nil)) + bytes20Type = reflect.TypeOf([20]byte{}) + int32Type = reflect.TypeOf(int32(0)) + nullUint32Type = reflect.TypeOf(cnull.Uint32{}) +) + +func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, ID int, dotID string) (_ Task, err error) { + defer cutils.WrapIfError(&err, "UnmarshalTaskFromMap") + + switch taskMap.(type) { + default: + return nil, pkgerrors.Errorf("UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string. Got %v (%#v) of type %T", taskMap, taskMap, taskMap) + case map[string]interface{}, map[string]string: + } + + taskType = TaskType(strings.ToLower(string(taskType))) + + var task Task + switch taskType { + case TaskTypePanic: + task = &PanicTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeHTTP: + task = &HTTPTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeBridge: + task = &BridgeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMean: + task = &MeanTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMedian: + task = &MedianTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMode: + task = &ModeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeSum: + task = &SumTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeAny: + task = &AnyTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeJSONParse: + task = &JSONParseTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMemo: + task = &MemoTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMultiply: + task = &MultiplyTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeDivide: + task = &DivideTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeVRF: + task = &VRFTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeVRFV2: + task = &VRFTaskV2{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeVRFV2Plus: + task = &VRFTaskV2Plus{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeEstimateGasLimit: + task = &EstimateGasLimitTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHCall: + task = ÐCallTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHTx: + task = ÐTxTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHABIEncode: + task = ÐABIEncodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHABIEncode2: + task = ÐABIEncodeTask2{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHABIDecode: + task = ÐABIDecodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeETHABIDecodeLog: + task = ÐABIDecodeLogTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeCBORParse: + task = &CBORParseTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeFail: + task = &FailTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeMerge: + task = &MergeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeLength: + task = &LengthTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeLessThan: + task = &LessThanTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeLookup: + task = &LookupTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeLowercase: + task = &LowercaseTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeUppercase: + task = &UppercaseTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeConditional: + task = &ConditionalTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeHexDecode: + task = &HexDecodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeHexEncode: + task = &HexEncodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeBase64Decode: + task = &Base64DecodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + case TaskTypeBase64Encode: + task = &Base64EncodeTask{BaseTask: BaseTask{id: ID, dotID: dotID}} + default: + return nil, pkgerrors.Errorf(`unknown task type: "%v"`, taskType) + } + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: task, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from != stringType { + return data, nil + } + switch to { + case nullUint32Type: + i, err2 := strconv.ParseUint(data.(string), 10, 32) + return cnull.Uint32From(uint32(i)), err2 + } + return data, nil + }, + ), + }) + if err != nil { + return nil, err + } + + err = decoder.Decode(taskMap) + if err != nil { + return nil, err + } + return task, nil +} + +func CheckInputs(inputs []Result, minLen, maxLen, maxErrors int) ([]interface{}, error) { + if minLen >= 0 && len(inputs) < minLen { + return nil, pkgerrors.Wrapf(ErrWrongInputCardinality, "min: %v max: %v (got %v)", minLen, maxLen, len(inputs)) + } else if maxLen >= 0 && len(inputs) > maxLen { + return nil, pkgerrors.Wrapf(ErrWrongInputCardinality, "min: %v max: %v (got %v)", minLen, maxLen, len(inputs)) + } + var vals []interface{} + var errs int + for _, input := range inputs { + if input.Error != nil { + errs++ + continue + } + vals = append(vals, input.Value) + } + if maxErrors >= 0 && errs > maxErrors { + return nil, ErrTooManyErrors + } + return vals, nil +} + +var ErrInvalidEVMChainID = errors.New("invalid EVM chain ID") + +func SelectGasLimit(ge config.GasEstimator, jobType string, specGasLimit *uint32) uint32 { + if specGasLimit != nil { + return *specGasLimit + } + + jt := ge.LimitJobType() + var jobTypeGasLimit *uint32 + switch jobType { + case DirectRequestJobType: + jobTypeGasLimit = jt.DR() + case FluxMonitorJobType: + jobTypeGasLimit = jt.FM() + case OffchainReportingJobType: + jobTypeGasLimit = jt.OCR() + case OffchainReporting2JobType: + jobTypeGasLimit = jt.OCR2() + case KeeperJobType: + jobTypeGasLimit = jt.Keeper() + case VRFJobType: + jobTypeGasLimit = jt.VRF() + } + + if jobTypeGasLimit != nil { + return *jobTypeGasLimit + } + return ge.LimitDefault() +} + +// replaceBytesWithHex replaces all []byte with hex-encoded strings +func replaceBytesWithHex(val interface{}) interface{} { + switch value := val.(type) { + case nil: + return value + case []byte: + return utils.StringToHex(string(value)) + case common.Address: + return value.Hex() + case common.Hash: + return value.Hex() + case [][]byte: + var list []string + for _, bytes := range value { + list = append(list, utils.StringToHex(string(bytes))) + } + return list + case []common.Address: + var list []string + for _, addr := range value { + list = append(list, addr.Hex()) + } + return list + case []common.Hash: + var list []string + for _, hash := range value { + list = append(list, hash.Hex()) + } + return list + case []interface{}: + if value == nil { + return value + } + var list []interface{} + for _, item := range value { + list = append(list, replaceBytesWithHex(item)) + } + return list + case map[string]interface{}: + if value == nil { + return value + } + m := make(map[string]interface{}) + for k, v := range value { + m[k] = replaceBytesWithHex(v) + } + return m + default: + // This handles solidity types: bytes1..bytes32, + // which map to [1]uint8..[32]uint8 when decoded. + // We persist them as hex strings, and we know ETH ABI encoders + // can parse hex strings, same as BytesParam does. + if s := uint8ArrayToSlice(value); s != nil { + return replaceBytesWithHex(s) + } + return value + } +} + +// uint8ArrayToSlice converts [N]uint8 array to slice. +func uint8ArrayToSlice(arr interface{}) interface{} { + t := reflect.TypeOf(arr) + if t.Kind() != reflect.Array || t.Elem().Kind() != reflect.Uint8 { + return nil + } + v := reflect.ValueOf(arr) + s := reflect.MakeSlice(reflect.SliceOf(t.Elem()), v.Len(), v.Len()) + reflect.Copy(s, v) + return s.Interface() +} + +func getJsonNumberValue(value json.Number) (interface{}, error) { + var result interface{} + + bn, ok := new(big.Int).SetString(value.String(), 10) + if ok { + if bn.IsInt64() { + result = bn.Int64() + } else if bn.IsUint64() { + result = bn.Uint64() + } else { + result = bn + } + } else { + f, err := value.Float64() + if err != nil { + return nil, pkgerrors.Errorf("failed to parse json.Value: %v", err) + } + result = f + } + + return result, nil +} + +func selectBlock(block string) (string, error) { + if block == "" { + return "latest", nil + } + block = strings.ToLower(block) + if block == "pending" || block == "latest" { + return block, nil + } + return "", pkgerrors.Errorf("unsupported block param: %s", block) +} diff --git a/core/services/pipeline/common_eth.go b/core/services/pipeline/common_eth.go new file mode 100644 index 00000000..aa555d3a --- /dev/null +++ b/core/services/pipeline/common_eth.go @@ -0,0 +1,336 @@ +package pipeline + +import ( + "bytes" + "fmt" + "math/big" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + ethABIRegex = regexp.MustCompile(`\A\s*([a-zA-Z0-9_]+)?\s*\(\s*([a-zA-Z0-9\[\]_\s,]+\s*)?\)\z`) + indexedKeyword = []byte("indexed") + calldataKeyword = []byte("calldata") + memoryKeyword = []byte("memory") + storageKeyword = []byte("storage") + spaceDelim = []byte(" ") + commaDelim = []byte(",") +) + +func ParseETHABIArgsString(theABI []byte, isLog bool) (args abi.Arguments, indexedArgs abi.Arguments, _ error) { + var argStrs [][]byte + if len(bytes.TrimSpace(theABI)) > 0 { + argStrs = bytes.Split(theABI, commaDelim) + } + + for _, argStr := range argStrs { + argStr = bytes.ReplaceAll(argStr, calldataKeyword, nil) // Strip `calldata` modifiers + argStr = bytes.ReplaceAll(argStr, memoryKeyword, nil) // Strip `memory` modifiers + argStr = bytes.ReplaceAll(argStr, storageKeyword, nil) // Strip `storage` modifiers + argStr = bytes.TrimSpace(argStr) + parts := bytes.Split(argStr, spaceDelim) + + var ( + argParts [][]byte + typeStr []byte + argName []byte + indexed bool + ) + for i := range parts { + parts[i] = bytes.TrimSpace(parts[i]) + if len(parts[i]) > 0 { + argParts = append(argParts, parts[i]) + } + } + switch len(argParts) { + case 0: + return nil, nil, errors.Errorf("bad ABI specification, empty argument: %s", theABI) + + case 1: + return nil, nil, errors.Errorf("bad ABI specification, missing argument name: %s", theABI) + + case 2: + if isLog && bytes.Equal(argParts[1], indexedKeyword) { + return nil, nil, errors.Errorf("bad ABI specification, missing argument name: %s", theABI) + } + typeStr = argParts[0] + argName = argParts[1] + + case 3: + if !isLog { + return nil, nil, errors.Errorf("bad ABI specification, too many components in argument: %s", theABI) + } else if bytes.Equal(argParts[0], indexedKeyword) || bytes.Equal(argParts[2], indexedKeyword) { + return nil, nil, errors.Errorf("bad ABI specification, 'indexed' keyword must appear between argument type and name: %s", theABI) + } else if !bytes.Equal(argParts[1], indexedKeyword) { + return nil, nil, errors.Errorf("bad ABI specification, unknown keyword '%v' between argument type and name: %s", string(argParts[1]), theABI) + } + typeStr = argParts[0] + argName = argParts[2] + indexed = true + + default: + return nil, nil, errors.Errorf("bad ABI specification, too many components in argument: %s", theABI) + } + typ, err := abi.NewType(string(typeStr), "", nil) + if err != nil { + return nil, nil, errors.Errorf("bad ABI specification: %v", err.Error()) + } + args = append(args, abi.Argument{Type: typ, Name: string(argName), Indexed: indexed}) + if indexed { + indexedArgs = append(indexedArgs, abi.Argument{Type: typ, Name: string(argName), Indexed: indexed}) + } + } + return args, indexedArgs, nil +} + +func parseETHABIString(theABI []byte, isLog bool) (name string, args abi.Arguments, indexedArgs abi.Arguments, err error) { + matches := ethABIRegex.FindAllSubmatch(theABI, -1) + if len(matches) != 1 || len(matches[0]) != 3 { + return "", nil, nil, errors.Errorf("bad ABI specification: %s", theABI) + } + name = string(bytes.TrimSpace(matches[0][1])) + args, indexedArgs, err = ParseETHABIArgsString(matches[0][2], isLog) + return name, args, indexedArgs, err +} + +func convertToETHABIType(val interface{}, abiType abi.Type) (interface{}, error) { + srcVal := reflect.ValueOf(val) + + if abiType.GetType() == srcVal.Type() { + return val, nil + } + + switch abiType.T { + case abi.IntTy, abi.UintTy: + return convertToETHABIInteger(val, abiType) + + case abi.StringTy: + switch val := val.(type) { + case string: + return val, nil + case []byte: + return string(val), nil + } + + case abi.BytesTy: + switch val := val.(type) { + case string: + if strings.HasPrefix(val, "0x") { + return hexutil.Decode(val) + } + return []byte(val), nil + case []byte: + return val, nil + default: + return convertToETHABIBytes(abiType.GetType(), srcVal, srcVal.Len()) + } + + case abi.FixedBytesTy: + destType := abiType.GetType() + return convertToETHABIBytes(destType, srcVal, destType.Len()) + + case abi.AddressTy: + switch val := val.(type) { + case common.Address: + return val, nil + case [20]byte: + return common.Address(val), nil + default: + maybeBytes, err := convertToETHABIBytes(bytes20Type, srcVal, 20) + if err != nil { + return nil, err + } + bs, ok := maybeBytes.([20]byte) + if !ok { + panic("impossible") + } + return common.Address(bs), nil + } + + case abi.BoolTy: + switch val := val.(type) { + case bool: + return val, nil + case string: + return strconv.ParseBool(val) + } + + case abi.SliceTy: + dest := reflect.MakeSlice(abiType.GetType(), srcVal.Len(), srcVal.Len()) + for i := 0; i < dest.Len(); i++ { + elem, err := convertToETHABIType(srcVal.Index(i).Interface(), *abiType.Elem) + if err != nil { + return nil, err + } + dest.Index(i).Set(reflect.ValueOf(elem)) + } + return dest.Interface(), nil + + case abi.ArrayTy: + if srcVal.Kind() != reflect.Slice && srcVal.Kind() != reflect.Array { + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), abiType) + } else if srcVal.Len() != abiType.Size { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", abiType.Size, srcVal.Len()) + } + + dest := reflect.New(abiType.GetType()).Elem() + for i := 0; i < dest.Len(); i++ { + elem, err := convertToETHABIType(srcVal.Index(i).Interface(), *abiType.Elem) + if err != nil { + return nil, err + } + dest.Index(i).Set(reflect.ValueOf(elem)) + } + return dest.Interface(), nil + + case abi.TupleTy: + return convertToETHABITuple(abiType, srcVal) + + } + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), abiType) +} + +func convertToETHABITuple(abiType abi.Type, srcVal reflect.Value) (interface{}, error) { + size := len(abiType.TupleElems) + if srcVal.Len() != size { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", size, srcVal.Len()) + } + + dest := reflect.New(abiType.TupleType).Elem() + switch srcVal.Type().Kind() { + case reflect.Map: + for i, fieldName := range abiType.TupleRawNames { + src := srcVal.MapIndex(reflect.ValueOf(fieldName)) + elem, err := convertToETHABIType(src.Interface(), *abiType.TupleElems[i]) + if err != nil { + return nil, err + } + dest.FieldByIndex([]int{i}).Set(reflect.ValueOf(elem)) + } + + return dest.Interface(), nil + + case reflect.Slice, reflect.Array: + for i := range abiType.TupleRawNames { + src := srcVal.Index(i) + elem, err := convertToETHABIType(src.Interface(), *abiType.TupleElems[i]) + if err != nil { + return nil, err + } + dest.FieldByIndex([]int{i}).Set(reflect.ValueOf(elem)) + } + + return dest.Interface(), nil + + default: + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to tuple[%d]", srcVal.Type(), size) + } +} + +func convertToETHABIBytes(destType reflect.Type, srcVal reflect.Value, length int) (interface{}, error) { + switch srcVal.Type().Kind() { + case reflect.Slice: + if destType.Len() != length { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", length, destType.Len()) + } else if srcVal.Type().Elem().Kind() != reflect.Uint8 { + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), destType) + } + if destType.Kind() == reflect.Array { + destVal := reflect.New(destType).Elem() + reflect.Copy(destVal.Slice(0, length), srcVal.Slice(0, srcVal.Len())) + return destVal.Interface(), nil + } + destVal := reflect.MakeSlice(destType, length, length) + reflect.Copy(destVal.Slice(0, length), srcVal.Slice(0, srcVal.Len())) + return destVal.Interface(), nil + + case reflect.Array: + if destType.Kind() == reflect.Array && destType.Len() != length { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", length, destType.Len()) + } else if srcVal.Type().Elem().Kind() != reflect.Uint8 { + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), destType) + } + var destVal reflect.Value + if destType.Kind() == reflect.Array { + destVal = reflect.New(destType).Elem() + } else { + destVal = reflect.MakeSlice(destType, length, length) + } + reflect.Copy(destVal, srcVal) + return destVal.Interface(), nil + + case reflect.String: + s := srcVal.Convert(stringType).Interface().(string) + if strings.HasPrefix(s, "0x") { + if len(s) != (length*2)+2 { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", length, (len(s)-2)/2) + } + maybeBytes, err := hexutil.Decode(s) + if err != nil { + return nil, err + } + return convertToETHABIBytes(destType, reflect.ValueOf(maybeBytes), length) + } + + if destType.Len() != len(s) { + return nil, errors.Wrapf(ErrBadInput, "incorrect length: expected %v, got %v", length, len(s)) + } + return convertToETHABIBytes(destType, srcVal.Convert(bytesType), length) + + default: + return nil, errors.Wrapf(ErrBadInput, "cannot convert %v to %v", srcVal.Type(), destType) + } +} + +var ErrOverflow = errors.New("overflow") + +func convertToETHABIInteger(val interface{}, abiType abi.Type) (interface{}, error) { + d, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + + i := d.BigInt() + + if abiType.Size > 64 { + return i, nil + } + + converted := reflect.New(abiType.GetType()).Elem() + // switch on signed/unsignedness of the abi type. + ty := abiType.GetType() + switch ty.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + if converted.OverflowUint(i.Uint64()) { + return nil, ErrOverflow + } + converted.SetUint(i.Uint64()) + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + if converted.OverflowInt(i.Int64()) { + return nil, ErrOverflow + } + converted.SetInt(i.Int64()) + default: + // go-ethereum handles in-betweener sizes, i.e 24, 40, 48, and 56 bit integers, + // as if they were big.Int, instead of the next largest native integer type that + // could hold it. Unsure of why this decision was taken. + // See https://github.com/ethereum/go-ethereum/blob/master/accounts/abi/reflect.go#L61 for + // the relevant code. + if ty == reflect.TypeOf(&big.Int{}) { + return i, nil + } + return nil, fmt.Errorf("unknown Go type %+v for abi type %+v", ty.String(), abiType) + } + + return converted.Interface(), nil +} diff --git a/core/services/pipeline/common_eth_fuzz_test.go b/core/services/pipeline/common_eth_fuzz_test.go new file mode 100644 index 00000000..fa9986ae --- /dev/null +++ b/core/services/pipeline/common_eth_fuzz_test.go @@ -0,0 +1,19 @@ +//go:build go1.18 + +package pipeline + +import ( + "testing" +) + +func FuzzParseETHABIArgsString(f *testing.F) { + for _, tt := range testsABIDecode { + f.Add(tt.abi, false) + } + f.Fuzz(func(t *testing.T, theABI string, isLog bool) { + _, _, err := ParseETHABIArgsString([]byte(theABI), isLog) + if err != nil { + t.Skip() + } + }) +} diff --git a/core/services/pipeline/common_eth_test.go b/core/services/pipeline/common_eth_test.go new file mode 100644 index 00000000..4f297da1 --- /dev/null +++ b/core/services/pipeline/common_eth_test.go @@ -0,0 +1,154 @@ +package pipeline + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func mustABIType(t *testing.T, ty string) abi.Type { + typ, err := abi.NewType(ty, "", nil) + require.NoError(t, err) + return typ +} + +func Test_convertToETHABIType(t *testing.T) { + t.Parallel() + + emptyHash := common.Hash{} + emptyAddr := common.Address{} + emptyFunc := [24]byte{} + + fullHash := common.HexToHash(strings.Repeat("FF", 32)) + fullAddr := common.HexToAddress(strings.Repeat("FF", 20)) + fullFunc := [24]byte{} + + oneHash := common.Hash{31: 0x1} + oneAddr := common.Address{19: 0x1} + oneFunc := [24]byte{23: 0x1} + type testCase struct { + abiType string + exp interface{} + } + for _, tt := range []struct { + vals []interface{} + cases []testCase + }{ + {[]interface{}{emptyHash, emptyHash[:], emptyHash.Hex(), string(emptyHash[:])}, []testCase{ + {"bytes", make([]byte, 32)}, + {"bytes32", [32]byte{}}, + }}, + {[]interface{}{emptyAddr, emptyAddr[:], emptyAddr.Hex(), string(emptyAddr[:])}, []testCase{ + {"bytes", make([]byte, 20)}, + {"bytes20", [20]byte{}}, + {"address", common.Address{}}, + }}, + {[]interface{}{emptyFunc, emptyFunc[:], hexutil.Encode(emptyFunc[:]), string(emptyFunc[:])}, []testCase{ + {"bytes", make([]byte, 24)}, + {"bytes24", [24]byte{}}, + }}, + + {[]interface{}{fullHash, fullHash[:], fullHash.Hex()}, []testCase{ + {"bytes", fullHash[:]}, + {"bytes32", [32]byte(fullHash)}, + }}, + {[]interface{}{fullAddr, fullAddr[:], fullAddr.Hex()}, []testCase{ + {"bytes", fullAddr[:]}, + {"bytes20", [20]byte(fullAddr)}, + {"address", fullAddr}, + }}, + {[]interface{}{fullFunc, fullFunc[:], hexutil.Encode(fullFunc[:])}, []testCase{ + {"bytes", fullFunc[:]}, + {"bytes24", fullFunc}, + }}, + + {[]interface{}{oneHash, oneHash[:], oneHash.Hex()}, []testCase{ + {"bytes", oneHash[:]}, + {"bytes32", [32]byte(oneHash)}, + }}, + {[]interface{}{oneAddr, oneAddr[:], oneAddr.Hex()}, []testCase{ + {"bytes", oneAddr[:]}, + {"bytes20", [20]byte{19: 0x1}}, + {"address", common.Address{19: 0x1}}, + }}, + {[]interface{}{oneFunc, oneFunc[:], hexutil.Encode(oneFunc[:])}, []testCase{ + {"bytes", oneFunc[:]}, + {"bytes24", [24]byte{23: 0x1}}, + }}, + + {[]interface{}{"test", []byte("test")}, []testCase{ + {"string", "test"}, + }}, + + {[]interface{}{true, "true", "1"}, []testCase{ + {"bool", true}, + }}, + } { + tt := tt + for _, tc := range tt.cases { + tc := tc + abiType := mustABIType(t, tc.abiType) + t.Run(fmt.Sprintf("%s:%T", tc.abiType, tc.exp), func(t *testing.T) { + for _, val := range tt.vals { + val := val + t.Run(fmt.Sprintf("%T", val), func(t *testing.T) { + got, err := convertToETHABIType(val, abiType) + require.NoError(t, err) + require.NotNil(t, got) + require.Equal(t, tc.exp, got) + }) + } + }) + } + } +} + +func Test_convertToETHABIType_Errors(t *testing.T) { + t.Parallel() + + for _, tt := range []struct { + val interface{} + errStr string + }{ + {"0x1234", "expected 20, got 2"}, + {"0xasdfasdfasdfasdfasdfsadfasdfasdfasdfasdf", "invalid hex"}, + } { + tt := tt + t.Run(fmt.Sprintf("%T,%s", tt.val, tt.errStr), func(t *testing.T) { + _, err := convertToETHABIType(tt.val, mustABIType(t, "bytes20")) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errStr) + }) + } +} + +func Test_convertToETHABIBytes_Errors(t *testing.T) { + t.Parallel() + + for _, tt := range []struct { + val interface{} + errStr string + }{ + {"test", "expected 20, got 4"}, + {"12345", "expected 20, got 5"}, + {"0x1234", "expected 20, got 2"}, + {"0xzZ", "expected 20, got 1"}, + {"0xasdfasdfasdfasdfasdfsadfasdfasdfasdfasdf", "invalid hex"}, + } { + tt := tt + t.Run(fmt.Sprintf("%T,%s", tt.val, tt.errStr), func(t *testing.T) { + a := reflect.TypeOf([20]byte{}) + b := reflect.ValueOf(tt.val) + _, err := convertToETHABIBytes(a, b, 20) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errStr) + }) + } +} diff --git a/core/services/pipeline/common_http.go b/core/services/pipeline/common_http.go new file mode 100644 index 00000000..7e2fda60 --- /dev/null +++ b/core/services/pipeline/common_http.go @@ -0,0 +1,107 @@ +package pipeline + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + clhttp "github.com/goplugin/pluginv3.0/v2/core/utils/http" +) + +func makeHTTPRequest( + ctx context.Context, + lggr logger.Logger, + method StringParam, + url URLParam, + reqHeaders []string, + requestData MapParam, + client *http.Client, + httpLimit int64, +) ([]byte, int, http.Header, time.Duration, error) { + + var bodyReader io.Reader + if requestData != nil { + bodyBytes, err := json.Marshal(requestData) + if err != nil { + return nil, 0, nil, 0, errors.Wrap(err, "failed to encode request body as JSON") + } + bodyReader = bytes.NewReader(bodyBytes) + } + + request, err := http.NewRequestWithContext(ctx, string(method), url.String(), bodyReader) + if err != nil { + return nil, 0, nil, 0, errors.Wrap(err, "failed to create http.Request") + } + request.Header.Set("Content-Type", "application/json") + if len(reqHeaders)%2 != 0 { + panic("headers must have an even number of elements") + } + for i := 0; i+1 < len(reqHeaders); i += 2 { + request.Header.Set(reqHeaders[i], reqHeaders[i+1]) + } + + httpRequest := clhttp.HTTPRequest{ + Client: client, + Request: request, + Config: clhttp.HTTPRequestConfig{SizeLimit: httpLimit}, + Logger: lggr.Named("HTTPRequest"), + } + + start := time.Now() + responseBytes, statusCode, respHeaders, err := httpRequest.SendRequest() + if ctx.Err() != nil { + return nil, 0, nil, 0, errors.New("http request timed out or interrupted") + } + if err != nil { + return nil, 0, nil, 0, errors.Wrapf(err, "error making http request") + } + elapsed := time.Since(start) // TODO: return elapsed from utils/http + + if statusCode >= 400 { + maybeErr := bestEffortExtractError(responseBytes) + return nil, statusCode, respHeaders, 0, errors.Errorf("got error from %s: (status code %v) %s", url.String(), statusCode, maybeErr) + } + return responseBytes, statusCode, respHeaders, elapsed, nil +} + +type PossibleErrorResponses struct { + Error string `json:"error"` + ErrorMessage string `json:"errorMessage"` +} + +func bestEffortExtractError(responseBytes []byte) string { + var resp PossibleErrorResponses + err := json.Unmarshal(responseBytes, &resp) + if err != nil { + return "" + } + if resp.Error != "" { + return resp.Error + } else if resp.ErrorMessage != "" { + return resp.ErrorMessage + } + return string(responseBytes) +} + +func httpRequestCtx(ctx context.Context, t Task, cfg Config) (requestCtx context.Context, cancel context.CancelFunc) { + // Only set the default timeout if the task timeout is missing; task + // timeout if present will have already been set on the context at a higher + // level. If task timeout is explicitly set to zero, we must not override + // with the default http timeout here (since it has been explicitly + // disabled). + // + // DefaultHTTPTimeout is not used if set to 0. + if _, isSet := t.TaskTimeout(); !isSet && cfg.DefaultHTTPTimeout().Duration() > 0 { + requestCtx, cancel = context.WithTimeout(ctx, cfg.DefaultHTTPTimeout().Duration()) + } else { + requestCtx = ctx + cancel = func() {} + } + return +} diff --git a/core/services/pipeline/common_test.go b/core/services/pipeline/common_test.go new file mode 100644 index 00000000..c9bd7e45 --- /dev/null +++ b/core/services/pipeline/common_test.go @@ -0,0 +1,417 @@ +package pipeline_test + +import ( + "encoding/json" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestTimeoutAttribute(t *testing.T) { + t.Parallel() + + a := `ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];` + p, err := pipeline.Parse(a) + require.NoError(t, err) + timeout, set := p.Tasks[0].TaskTimeout() + assert.Equal(t, cltest.MustParseDuration(t, "10s"), timeout) + assert.Equal(t, true, set) + + a = `ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>];` + p, err = pipeline.Parse(a) + require.NoError(t, err) + timeout, set = p.Tasks[0].TaskTimeout() + assert.Equal(t, cltest.MustParseDuration(t, "0s"), timeout) + assert.Equal(t, false, set) +} + +func TestTaskHTTPUnmarshal(t *testing.T) { + t.Parallel() + + a := `ds1 [type=http allowunrestrictednetworkaccess=true method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];` + p, err := pipeline.Parse(a) + require.NoError(t, err) + require.Len(t, p.Tasks, 1) + + task := p.Tasks[0].(*pipeline.HTTPTask) + require.Equal(t, "true", task.AllowUnrestrictedNetworkAccess) +} + +func TestTaskAnyUnmarshal(t *testing.T) { + t.Parallel() + + a := `ds1 [type=any failEarly=true];` + p, err := pipeline.Parse(a) + require.NoError(t, err) + require.Len(t, p.Tasks, 1) + _, ok := p.Tasks[0].(*pipeline.AnyTask) + require.True(t, ok) + require.Equal(t, true, p.Tasks[0].Base().FailEarly) +} + +func TestRetryUnmarshal(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + spec string + retries uint32 + min time.Duration + max time.Duration + }{ + { + + "nothing specified", + `ds1 [type=any];`, + 0, + time.Second * 5, + time.Minute, + }, + { + + "only retry specified", + `ds1 [type=any retries=5];`, + 5, + time.Second * 5, + time.Minute, + }, + { + "all params set", + `ds1 [type=http retries=10 minBackoff="1s" maxBackoff="30m"];`, + 10, + time.Second, + time.Minute * 30, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + p, err := pipeline.Parse(test.spec) + require.NoError(t, err) + require.Len(t, p.Tasks, 1) + require.Equal(t, test.retries, p.Tasks[0].TaskRetries()) + require.Equal(t, test.min, p.Tasks[0].TaskMinBackoff()) + require.Equal(t, test.max, p.Tasks[0].TaskMaxBackoff()) + }) + } +} + +func TestUnmarshalTaskFromMap(t *testing.T) { + t.Parallel() + + t.Run("returns error if task is not the right type", func(t *testing.T) { + taskMap := interface{}(nil) + _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, 0, "foo-dot-id") + require.EqualError(t, err, "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string. Got () of type ") + + taskMap = struct { + foo time.Time + bar int + }{time.Unix(42, 42), 42} + _, err = pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, 0, "foo-dot-id") + require.Error(t, err) + require.Contains(t, err.Error(), "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string") + }) + + t.Run("unknown task type", func(t *testing.T) { + taskMap := map[string]string{} + _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("xxx"), taskMap, 0, "foo-dot-id") + require.EqualError(t, err, `UnmarshalTaskFromMap: unknown task type: "xxx"`) + }) + + tests := []struct { + taskType pipeline.TaskType + expectedTaskType interface{} + }{ + {pipeline.TaskTypeHTTP, &pipeline.HTTPTask{}}, + {pipeline.TaskTypeBridge, &pipeline.BridgeTask{}}, + {pipeline.TaskTypeMean, &pipeline.MeanTask{}}, + {pipeline.TaskTypeMedian, &pipeline.MedianTask{}}, + {pipeline.TaskTypeMode, &pipeline.ModeTask{}}, + {pipeline.TaskTypeSum, &pipeline.SumTask{}}, + {pipeline.TaskTypeMultiply, &pipeline.MultiplyTask{}}, + {pipeline.TaskTypeDivide, &pipeline.DivideTask{}}, + {pipeline.TaskTypeJSONParse, &pipeline.JSONParseTask{}}, + {pipeline.TaskTypeCBORParse, &pipeline.CBORParseTask{}}, + {pipeline.TaskTypeAny, &pipeline.AnyTask{}}, + {pipeline.TaskTypeVRF, &pipeline.VRFTask{}}, + {pipeline.TaskTypeVRFV2, &pipeline.VRFTaskV2{}}, + {pipeline.TaskTypeVRFV2Plus, &pipeline.VRFTaskV2Plus{}}, + {pipeline.TaskTypeEstimateGasLimit, &pipeline.EstimateGasLimitTask{}}, + {pipeline.TaskTypeETHCall, &pipeline.ETHCallTask{}}, + {pipeline.TaskTypeETHTx, &pipeline.ETHTxTask{}}, + {pipeline.TaskTypeETHABIEncode, &pipeline.ETHABIEncodeTask{}}, + {pipeline.TaskTypeETHABIEncode2, &pipeline.ETHABIEncodeTask2{}}, + {pipeline.TaskTypeETHABIDecode, &pipeline.ETHABIDecodeTask{}}, + {pipeline.TaskTypeETHABIDecodeLog, &pipeline.ETHABIDecodeLogTask{}}, + {pipeline.TaskTypeMerge, &pipeline.MergeTask{}}, + {pipeline.TaskTypeLowercase, &pipeline.LowercaseTask{}}, + {pipeline.TaskTypeUppercase, &pipeline.UppercaseTask{}}, + {pipeline.TaskTypeConditional, &pipeline.ConditionalTask{}}, + {pipeline.TaskTypeHexDecode, &pipeline.HexDecodeTask{}}, + {pipeline.TaskTypeBase64Decode, &pipeline.Base64DecodeTask{}}, + } + + for _, test := range tests { + t.Run(string(test.taskType), func(t *testing.T) { + taskMap := map[string]string{} + task, err := pipeline.UnmarshalTaskFromMap(test.taskType, taskMap, 0, "foo-dot-id") + require.NoError(t, err) + require.IsType(t, test.expectedTaskType, task) + }) + } +} + +func TestMarshalJSONSerializable_replaceBytesWithHex(t *testing.T) { + t.Parallel() + + type jsm = map[string]interface{} + + toJSONSerializable := func(val jsm) *pipeline.JSONSerializable { + return &pipeline.JSONSerializable{ + Valid: true, + Val: val, + } + } + + var ( + testAddr1 = common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406f111") + testAddr2 = common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406f222") + testHash1 = common.HexToHash("0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111") + testHash2 = common.HexToHash("0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf222") + ) + + tests := []struct { + name string + input *pipeline.JSONSerializable + expected string + err error + }{ + {"invalid input", &pipeline.JSONSerializable{Valid: false}, "null", nil}, + {"empty object", toJSONSerializable(jsm{}), "{}", nil}, + {"byte slice", toJSONSerializable(jsm{"slice": []byte{0x10, 0x20, 0x30}}), + `{"slice":"0x102030"}`, nil}, + {"address", toJSONSerializable(jsm{"addr": testAddr1}), + `{"addr":"0x2aB9a2dc53736B361B72d900cDF9f78f9406f111"}`, nil}, + {"hash", toJSONSerializable(jsm{"hash": testHash1}), + `{"hash":"0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111"}`, nil}, + {"slice of byte slice", toJSONSerializable(jsm{"slices": [][]byte{{0x10, 0x11, 0x12}, {0x20, 0x21, 0x22}}}), + `{"slices":["0x101112","0x202122"]}`, nil}, + {"slice of addresses", toJSONSerializable(jsm{"addresses": []common.Address{testAddr1, testAddr2}}), + `{"addresses":["0x2aB9a2dc53736B361B72d900cDF9f78f9406f111","0x2aB9A2Dc53736b361b72D900CDf9f78f9406F222"]}`, nil}, + {"slice of hashes", toJSONSerializable(jsm{"hashes": []common.Hash{testHash1, testHash2}}), + `{"hashes":["0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf111","0x317cfd032b5d6657995f17fe768f7cc4ea0ada27ad421c4caa685a9071eaf222"]}`, nil}, + {"slice of interfaces", toJSONSerializable(jsm{"ifaces": []interface{}{[]byte{0x10, 0x11, 0x12}, []byte{0x20, 0x21, 0x22}}}), + `{"ifaces":["0x101112","0x202122"]}`, nil}, + {"map", toJSONSerializable(jsm{"map": jsm{"slice": []byte{0x10, 0x11, 0x12}, "addr": testAddr1}}), + `{"map":{"addr":"0x2aB9a2dc53736B361B72d900cDF9f78f9406f111","slice":"0x101112"}}`, nil}, + {"byte array 4", toJSONSerializable(jsm{"ba4": [4]byte{1, 2, 3, 4}}), + `{"ba4":"0x01020304"}`, nil}, + {"byte array 8", toJSONSerializable(jsm{"ba8": [8]uint8{1, 2, 3, 4, 5, 6, 7, 8}}), + `{"ba8":"0x0102030405060708"}`, nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + bytes, err := test.input.MarshalJSON() + assert.Equal(t, test.expected, string(bytes)) + assert.Equal(t, test.err, errors.Cause(err)) + }) + } +} + +func TestUnmarshalJSONSerializable(t *testing.T) { + t.Parallel() + + big, ok := new(big.Int).SetString("18446744073709551616", 10) + assert.True(t, ok) + + tests := []struct { + name, input string + expected interface{} + }{ + {"null json", `null`, nil}, + {"bool", `true`, true}, + {"string", `"foo"`, "foo"}, + {"object with int", `{"foo": 42}`, map[string]interface{}{"foo": int64(42)}}, + {"object with float", `{"foo": 3.14}`, map[string]interface{}{"foo": float64(3.14)}}, + {"object with big int", `{"foo": 18446744073709551616}`, map[string]interface{}{"foo": big}}, + {"slice", `[42, 3.14]`, []interface{}{int64(42), float64(3.14)}}, + {"nested map", `{"m": {"foo": 42}}`, map[string]interface{}{"m": map[string]interface{}{"foo": int64(42)}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var i pipeline.JSONSerializable + err := json.Unmarshal([]byte(test.input), &i) + require.NoError(t, err) + if test.expected != nil { + assert.True(t, i.Valid) + assert.Equal(t, test.expected, i.Val) + } + }) + } +} + +func TestCheckInputs(t *testing.T) { + t.Parallel() + + emptyPR := []pipeline.Result{} + nonEmptyPR := []pipeline.Result{ + { + Value: "foo", + Error: nil, + }, + { + Value: "err", + Error: errors.New("bar"), + }, + } + + tests := []struct { + name string + pr []pipeline.Result + minLen, maxLen, maxErrors int + err error + outputsLen int + }{ + {"minLen violation", emptyPR, 1, 0, 0, pipeline.ErrWrongInputCardinality, 0}, + {"maxLen violation", nonEmptyPR, 1, 1, 0, pipeline.ErrWrongInputCardinality, 0}, + {"maxErrors violation", nonEmptyPR, 1, 2, 0, pipeline.ErrTooManyErrors, 0}, + {"ok", nonEmptyPR, 1, 2, 1, nil, 1}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + outputs, err := pipeline.CheckInputs(test.pr, test.minLen, test.maxLen, test.maxErrors) + if test.err == nil { + assert.NoError(t, err) + assert.Equal(t, test.outputsLen, len(outputs)) + } else { + assert.Equal(t, test.err, errors.Cause(err)) + } + }) + } +} + +func TestTaskRunResult_IsPending(t *testing.T) { + t.Parallel() + + trr := &pipeline.TaskRunResult{} + assert.True(t, trr.IsPending()) + + trrWithResult := &pipeline.TaskRunResult{Result: pipeline.Result{Value: "foo"}} + assert.False(t, trrWithResult.IsPending()) + + trrWithFinishedAt := &pipeline.TaskRunResult{FinishedAt: null.NewTime(time.Now(), true)} + assert.False(t, trrWithFinishedAt.IsPending()) +} + +func TestSelectGasLimit(t *testing.T) { + t.Parallel() + + gcfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(999)) + c.EVM[0].GasEstimator.LimitJobType = toml.GasLimitJobType{ + DR: ptr(uint32(100)), + VRF: ptr(uint32(101)), + FM: ptr(uint32(102)), + OCR: ptr(uint32(103)), + Keeper: ptr(uint32(104)), + OCR2: ptr(uint32(105)), + } + }) + cfg := evmtest.NewChainScopedConfig(t, gcfg) + + t.Run("spec defined gas limit", func(t *testing.T) { + var specGasLimit uint32 = 1 + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.DirectRequestJobType, &specGasLimit) + assert.Equal(t, uint32(1), gasLimit) + }) + + t.Run("direct request specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.DirectRequestJobType, nil) + assert.Equal(t, uint32(100), gasLimit) + }) + + t.Run("OCR specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.OffchainReportingJobType, nil) + assert.Equal(t, uint32(103), gasLimit) + }) + + t.Run("OCR2 specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.OffchainReporting2JobType, nil) + assert.Equal(t, uint32(105), gasLimit) + }) + + t.Run("VRF specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.VRFJobType, nil) + assert.Equal(t, uint32(101), gasLimit) + }) + + t.Run("flux monitor specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.FluxMonitorJobType, nil) + assert.Equal(t, uint32(102), gasLimit) + }) + + t.Run("keeper specific gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.KeeperJobType, nil) + assert.Equal(t, uint32(104), gasLimit) + }) + + t.Run("fallback to default gas limit", func(t *testing.T) { + gasLimit := pipeline.SelectGasLimit(cfg.EVM().GasEstimator(), pipeline.WebhookJobType, nil) + assert.Equal(t, uint32(999), gasLimit) + }) +} +func TestGetNextTaskOf(t *testing.T) { + trrs := pipeline.TaskRunResults{ + { + Task: &pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(1, "t1", nil, nil, 0), + }, + }, + { + Task: &pipeline.HTTPTask{ + BaseTask: pipeline.NewBaseTask(2, "t2", nil, nil, 0), + }, + }, + { + Task: &pipeline.ETHABIDecodeTask{ + BaseTask: pipeline.NewBaseTask(3, "t3", nil, nil, 0), + }, + }, + { + Task: &pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(4, "t4", nil, nil, 0), + }, + }, + } + + firstTask := trrs[0] + nextTask := trrs.GetNextTaskOf(firstTask) + assert.Equal(t, nextTask.Task.ID(), 2) + + nextTask = trrs.GetNextTaskOf(*nextTask) + assert.Equal(t, nextTask.Task.ID(), 3) + + nextTask = trrs.GetNextTaskOf(*nextTask) + assert.Equal(t, nextTask.Task.ID(), 4) + + nextTask = trrs.GetNextTaskOf(*nextTask) + assert.Empty(t, nextTask) + +} diff --git a/core/services/pipeline/getters.go b/core/services/pipeline/getters.go new file mode 100644 index 00000000..af9fda19 --- /dev/null +++ b/core/services/pipeline/getters.go @@ -0,0 +1,203 @@ +package pipeline + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" +) + +// GetterFunc is a function that either returns a value or an error. +type GetterFunc func() (interface{}, error) + +// From creates []GetterFunc from a mix of getters or bare values. +func From(getters ...interface{}) []GetterFunc { + var gfs []GetterFunc + for _, g := range getters { + switch v := g.(type) { + case GetterFunc: + gfs = append(gfs, v) + + default: + // If a bare value is passed in, create a simple getter + gfs = append(gfs, func() (interface{}, error) { + return v, nil + }) + } + } + return gfs +} + +// NonemptyString creates a getter to ensure the string is non-empty. +func NonemptyString(s string) GetterFunc { + return func() (interface{}, error) { + trimmed := strings.TrimSpace(s) + if len(trimmed) == 0 { + return nil, ErrParameterEmpty + } + return trimmed, nil + } +} + +// ValidDurationInSeconds creates a getter to ensure the string is a valid duration and return duration in seconds. +func ValidDurationInSeconds(s string) GetterFunc { + return func() (interface{}, error) { + trimmed := strings.TrimSpace(s) + if len(trimmed) == 0 { + return nil, ErrParameterEmpty + } + dr, err := time.ParseDuration(s) + if err != nil { + return nil, err + } + return int(dr.Seconds()), nil + } +} + +// Input creates a getter returning inputs[index] value, or error if index is out of range. +func Input(inputs []Result, index int) GetterFunc { + return func() (interface{}, error) { + if index < 0 || index >= len(inputs) { + return nil, ErrIndexOutOfRange + } + return inputs[index].Value, inputs[index].Error + } +} + +// Inputs creates a getter returning array of Result.Value (or Result.Error where not nil). +func Inputs(inputs []Result) GetterFunc { + return func() (interface{}, error) { + var vals []interface{} + for _, input := range inputs { + if input.Error != nil { + vals = append(vals, input.Error) + } else { + vals = append(vals, input.Value) + } + } + return vals, nil + } +} + +// VarExpr creates a getter interpolating expr value using the given Vars. +// The expression allows whitespace on both ends that will be trimmed. +// Expr examples: $(foo.bar), $(arr.1), $(bar) +func VarExpr(expr string, vars Vars) GetterFunc { + return func() (interface{}, error) { + trimmed := strings.TrimSpace(expr) + if len(trimmed) < 3 { + return nil, ErrParameterEmpty + } + isVariableExpr := strings.Count(trimmed, "$") == 1 && trimmed[:2] == "$(" && trimmed[len(trimmed)-1] == ')' + if !isVariableExpr { + return nil, ErrParameterEmpty + } + keypath := strings.TrimSpace(trimmed[2 : len(trimmed)-1]) + if len(keypath) == 0 { + return nil, ErrParameterEmpty + } + val, err := vars.Get(keypath) + if err != nil { + return nil, err + } else if as, is := val.(error); is { + return nil, errors.Wrapf(ErrTooManyErrors, "VarExpr: %v", as) + } + return val, nil + } +} + +// JSONWithVarExprs creates a getter that unmarshals jsExpr string as JSON, and +// interpolates all variables expressions found in jsExpr from Vars. +// The getter returns the unmarshalled object having expressions interpolated from Vars. +// allowErrors flag indicates if interpolating values stored in Vars can be errors. +// jsExpr example: {"requestId": $(decode_log.requestId), "payment": $(decode_log.payment)} +func JSONWithVarExprs(jsExpr string, vars Vars, allowErrors bool) GetterFunc { + return func() (interface{}, error) { + if strings.TrimSpace(jsExpr) == "" { + return nil, ErrParameterEmpty + } + const pluginKeyPath = "__plugin_key_path__" + replaced := variableRegexp.ReplaceAllFunc([]byte(jsExpr), func(expr []byte) []byte { + keypathStr := strings.TrimSpace(string(expr[2 : len(expr)-1])) + return []byte(fmt.Sprintf(`{ "%s": "%s" }`, pluginKeyPath, keypathStr)) + }) + + var val interface{} + jd := json.NewDecoder(bytes.NewReader(replaced)) + jd.UseNumber() + if err := jd.Decode(&val); err != nil { + return nil, errors.Wrapf(ErrBadInput, "while unmarshalling JSON: %v; js: %s", err, string(replaced)) + } + reinterpreted, err := reinterpetJsonNumbers(val) + if err != nil { + return nil, errors.Wrapf(ErrBadInput, "while processing json.Number: %v; js: %s", err, string(replaced)) + } + val = reinterpreted + + return mapGoValue(val, func(val interface{}) (interface{}, error) { + if m, is := val.(map[string]interface{}); is { + maybeKeypath, exists := m[pluginKeyPath] + if !exists { + return val, nil + } + keypath, is := maybeKeypath.(string) + if !is { + return nil, errors.Wrapf(ErrBadInput, fmt.Sprintf("you cannot use %s in your JSON", pluginKeyPath)) + } + newVal, err := vars.Get(keypath) + if err != nil { + return nil, err + } else if err, is := newVal.(error); is && !allowErrors { + return nil, errors.Wrapf(ErrBadInput, "error is not allowed: %v", err) + } + return newVal, nil + } + return val, nil + }) + } +} + +// mapGoValue iterates on v object recursively and calls fn for each value. +// Used by JSONWithVarExprs to interpolate all variables expressions. +func mapGoValue(v interface{}, fn func(val interface{}) (interface{}, error)) (x interface{}, err error) { + type item struct { + val interface{} + parentMap map[string]interface{} + parentKey string + parentSlice []interface{} + parentIdx int + } + + stack := []item{{val: v}} + var current item + + for len(stack) > 0 { + current = stack[0] + stack = stack[1:] + + val, err := fn(current.val) + if err != nil { + return nil, err + } + + if current.parentMap != nil { + current.parentMap[current.parentKey] = val + } else if current.parentSlice != nil { + current.parentSlice[current.parentIdx] = val + } + + if asMap, isMap := val.(map[string]interface{}); isMap { + for key := range asMap { + stack = append(stack, item{val: asMap[key], parentMap: asMap, parentKey: key}) + } + } else if asSlice, isSlice := val.([]interface{}); isSlice { + for i := range asSlice { + stack = append(stack, item{val: asSlice[i], parentSlice: asSlice, parentIdx: i}) + } + } + } + return v, nil +} diff --git a/core/services/pipeline/getters_test.go b/core/services/pipeline/getters_test.go new file mode 100644 index 00000000..ca08aaa5 --- /dev/null +++ b/core/services/pipeline/getters_test.go @@ -0,0 +1,288 @@ +package pipeline_test + +import ( + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestGetters_VarExpr(t *testing.T) { + t.Parallel() + + vars := createTestVars() + + tests := []struct { + expr string + result interface{} + err error + }{ + // no errors + {"$(foo.bar)", "value", nil}, + {" $(foo.bar)", "value", nil}, + {"$(foo.bar) ", "value", nil}, + {"$( foo.bar)", "value", nil}, + {"$(foo.bar )", "value", nil}, + {"$( foo.bar )", "value", nil}, + {" $( foo.bar )", "value", nil}, + // errors + {" ", nil, pipeline.ErrParameterEmpty}, + {"$()", nil, pipeline.ErrParameterEmpty}, + {"$", nil, pipeline.ErrParameterEmpty}, + {"$(", nil, pipeline.ErrParameterEmpty}, + {"$)", nil, pipeline.ErrParameterEmpty}, + {"()", nil, pipeline.ErrParameterEmpty}, + {"$(foo.bar", nil, pipeline.ErrParameterEmpty}, + {"$foo.bar)", nil, pipeline.ErrParameterEmpty}, + {"(foo.bar)", nil, pipeline.ErrParameterEmpty}, + {"foo.bar", nil, pipeline.ErrParameterEmpty}, + {"$(err)", nil, pipeline.ErrTooManyErrors}, + } + + for _, test := range tests { + t.Run(test.expr, func(t *testing.T) { + getter := pipeline.VarExpr(test.expr, vars) + v, err := getter() + if test.err == nil { + assert.NoError(t, err) + assert.Equal(t, test.result, v) + } else { + assert.Equal(t, test.err, errors.Cause(err)) + } + }) + } +} + +func TestGetters_JSONWithVarExprs(t *testing.T) { + t.Parallel() + + vars := createTestVars() + + errVal, err := vars.Get("err") + require.NoError(t, err) + + big, ok := new(big.Int).SetString("314159265358979323846264338327950288419716939937510582097494459", 10) + require.True(t, ok) + + tests := []struct { + json string + field string + result interface{} + err error + allowErrors bool + }{ + // no errors + {`{ "x": $(zet) }`, "x", 123, nil, false}, + {`{ "x": $( zet ) }`, "x", 123, nil, false}, + {`{ "x": { "y": $(zet) } }`, "x", map[string]interface{}{"y": 123}, nil, false}, + {`{ "z": "foo" }`, "z", "foo", nil, false}, + {`{ "a": $(arr.1) }`, "a", 200, nil, false}, + {`{}`, "", map[string]interface{}{}, nil, false}, + {`{ "e": $(err) }`, "e", errVal, nil, true}, + {`null`, "", nil, nil, false}, + {`{ "x": 314159265358979323846264338327950288419716939937510582097494459 }`, "x", big, nil, false}, + {`{ "x": 3141592653589 }`, "x", int64(3141592653589), nil, false}, + {`{ "x": 18446744073709551615 }`, "x", uint64(18446744073709551615), nil, false}, + {`{ "x": 3141592653589.567 }`, "x", float64(3141592653589.567), nil, false}, + // errors + {` `, "", nil, pipeline.ErrParameterEmpty, false}, + {`{ "x": $(missing) }`, "x", nil, pipeline.ErrKeypathNotFound, false}, + {`{ "x": "$(zet)" }`, "x", "$(zet)", pipeline.ErrBadInput, false}, + {`{ "$(foo.bar)": $(zet) }`, "value", 123, pipeline.ErrBadInput, false}, + {`{ "x": { "__plugin_key_path__": 0 } }`, "", nil, pipeline.ErrBadInput, false}, + {`{ "e": $(err)`, "e", nil, pipeline.ErrBadInput, false}, + } + + for _, test := range tests { + t.Run(test.json, func(t *testing.T) { + getter := pipeline.JSONWithVarExprs(test.json, vars, test.allowErrors) + v, err := getter() + if test.err != nil { + assert.Equal(t, test.err, errors.Cause(err)) + } else { + m, is := v.(map[string]interface{}) + if is && test.field != "" { + assert.Equal(t, test.result, m[test.field]) + } else { + assert.Equal(t, test.result, v) + } + } + }) + } +} + +func TestGetters_Input(t *testing.T) { + t.Parallel() + + t.Run("returns the requested input's Value and Error if they exist", func(t *testing.T) { + expectedVal := "bar" + expectedErr := errors.New("some err") + val, err := pipeline.Input([]pipeline.Result{{Value: "foo"}, {Value: expectedVal, Error: expectedErr}, {Value: "baz"}}, 1)() + assert.Equal(t, expectedVal, val) + assert.Equal(t, expectedErr, err) + }) + + t.Run("returns ErrIndexOutOfRange if the specified index is out of range", func(t *testing.T) { + _, err := pipeline.Input([]pipeline.Result{{Value: "foo"}}, 1)() + assert.Equal(t, pipeline.ErrIndexOutOfRange, errors.Cause(err)) + _, err = pipeline.Input([]pipeline.Result{{Value: "foo"}}, -1)() + assert.Equal(t, pipeline.ErrIndexOutOfRange, errors.Cause(err)) + }) +} + +func TestGetters_Inputs(t *testing.T) { + t.Parallel() + + theErr := errors.New("some issue") + + tests := []struct { + name string + inputs []pipeline.Result + expected []interface{} + expectedErr error + }{ + { + "returns the values and errors", + []pipeline.Result{ + {Value: "foo"}, + {Error: theErr}, + {Value: "baz"}, + }, + []interface{}{"foo", theErr, "baz"}, + nil, + }, + { + "returns nil array", + []pipeline.Result{}, + nil, + nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + val, err := pipeline.Inputs(test.inputs)() + assert.Equal(t, test.expectedErr, errors.Cause(err)) + assert.Equal(t, test.expected, val) + }) + } +} + +func TestGetters_NonemptyString(t *testing.T) { + t.Parallel() + + t.Run("returns any non-empty string", func(t *testing.T) { + val, err := pipeline.NonemptyString("foo bar")() + assert.NoError(t, err) + assert.Equal(t, "foo bar", val) + }) + + t.Run("returns ErrParameterEmpty when given an empty string (including only spaces)", func(t *testing.T) { + _, err := pipeline.NonemptyString("")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + _, err = pipeline.NonemptyString(" ")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + }) +} + +func TestGetters_ValidDurationInSeconds(t *testing.T) { + t.Parallel() + + t.Run("returns duration in seconds", func(t *testing.T) { + val, err := pipeline.ValidDurationInSeconds("10s")() + assert.NoError(t, err) + assert.Equal(t, 10, val) + + val, err = pipeline.ValidDurationInSeconds("1m")() + assert.NoError(t, err) + assert.Equal(t, 60, val) + + val, err = pipeline.ValidDurationInSeconds("1h")() + assert.NoError(t, err) + assert.Equal(t, 3600, val) + }) + + t.Run("returns ErrParameterEmpty when given an empty string (including only spaces)", func(t *testing.T) { + _, err := pipeline.ValidDurationInSeconds("")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + _, err = pipeline.ValidDurationInSeconds(" ")() + assert.Equal(t, pipeline.ErrParameterEmpty, errors.Cause(err)) + }) + + t.Run("returns duration errors when given invalid durations", func(t *testing.T) { + _, err := pipeline.ValidDurationInSeconds("1b")() + assert.Contains(t, err.Error(), "unknown unit") + _, err = pipeline.ValidDurationInSeconds("5")() + assert.Contains(t, err.Error(), "missing unit") + _, err = pipeline.ValidDurationInSeconds("!m")() + assert.Contains(t, err.Error(), "invalid duration") + }) +} + +func TestGetters_From(t *testing.T) { + t.Parallel() + + t.Run("no inputs", func(t *testing.T) { + getters := pipeline.From() + assert.Empty(t, getters) + }) + + var fooGetter1 pipeline.GetterFunc = func() (interface{}, error) { + return "foo", nil + } + var fooGetter2 pipeline.GetterFunc = func() (interface{}, error) { + return "foo", nil + } + + tests := []struct { + name string + input []interface{} + expected string + }{ + { + "only getters", + []interface{}{fooGetter1, fooGetter2}, + "foo", + }, + { + "mix of getters and values", + []interface{}{fooGetter1, "foo"}, + "foo", + }, + { + "only values", + []interface{}{"foo", "foo"}, + "foo", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + getters := pipeline.From(test.input...) + assert.Len(t, getters, 2) + + for _, getter := range getters { + val, err := getter() + assert.NoError(t, err) + assert.Equal(t, test.expected, val) + } + }) + } +} + +func createTestVars() pipeline.Vars { + return pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "value", + }, + "zet": 123, + "arr": []interface{}{ + 100, 200, 300, + }, + "err": errors.New("some error"), + }) +} diff --git a/core/services/pipeline/graph.go b/core/services/pipeline/graph.go new file mode 100644 index 00000000..c3914e69 --- /dev/null +++ b/core/services/pipeline/graph.go @@ -0,0 +1,275 @@ +package pipeline + +import ( + "fmt" + "regexp" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "gonum.org/v1/gonum/graph" + "gonum.org/v1/gonum/graph/encoding" + "gonum.org/v1/gonum/graph/encoding/dot" + "gonum.org/v1/gonum/graph/simple" + "gonum.org/v1/gonum/graph/topo" +) + +// tree fulfills the graph.DirectedGraph interface, which makes it possible +// for us to `dot.Unmarshal(...)` a DOT string directly into it. +type Graph struct { + *simple.DirectedGraph +} + +func NewGraph() *Graph { + return &Graph{DirectedGraph: simple.NewDirectedGraph()} +} + +func (g *Graph) NewNode() graph.Node { + return &GraphNode{Node: g.DirectedGraph.NewNode()} +} + +func (g *Graph) NewEdge(from, to graph.Node) graph.Edge { + return &GraphEdge{Edge: g.DirectedGraph.NewEdge(from, to)} +} + +func (g *Graph) UnmarshalText(bs []byte) (err error) { + if g.DirectedGraph == nil { + g.DirectedGraph = simple.NewDirectedGraph() + } + defer func() { + if rerr := recover(); rerr != nil { + err = fmt.Errorf("could not unmarshal DOT into a pipeline.Graph: %v", rerr) + } + }() + bs = append([]byte("digraph {\n"), bs...) + bs = append(bs, []byte("\n}")...) + err = dot.Unmarshal(bs, g) + if err != nil { + return errors.Wrap(err, "could not unmarshal DOT into a pipeline.Graph") + } + g.AddImplicitDependenciesAsEdges() + return nil +} + +// Looks at node attributes and searches for implicit dependencies on other nodes +// expressed as attribute values. Adds those dependencies as implicit edges in the graph. +func (g *Graph) AddImplicitDependenciesAsEdges() { + for nodesIter := g.Nodes(); nodesIter.Next(); { + graphNode := nodesIter.Node().(*GraphNode) + + params := make(map[string]bool) + // Walk through all attributes and find all params which this node depends on + for _, attr := range graphNode.Attributes() { + for _, item := range variableRegexp.FindAll([]byte(attr.Value), -1) { + expr := strings.TrimSpace(string(item[2 : len(item)-1])) + param := strings.Split(expr, ".")[0] + params[param] = true + } + } + // Iterate through all nodes and add a new edge if node belongs to params set, and there already isn't an edge. + for nodesIter2 := g.Nodes(); nodesIter2.Next(); { + gn := nodesIter2.Node().(*GraphNode) + if params[gn.DOTID()] { + // If these are distinct nodes with no existing edge between them, then add an implicit edge. + if gn.ID() != graphNode.ID() && !g.HasEdgeFromTo(gn.ID(), graphNode.ID()) { + edge := g.NewEdge(gn, graphNode).(*GraphEdge) + // Setting isImplicit indicates that this edge wasn't specified via the TOML spec, + // but rather added automatically here. + // This distinction is needed, as we don't want to propagate results of a task to its dependent + // tasks along implicit edge, as some tasks can't handle unexpected inputs from implicit edges. + edge.SetIsImplicit(true) + g.SetEdge(edge) + } + } + } + } +} + +// Indicates whether there's an implicit edge from uid -> vid. +// Implicit edged are ones that weren't added via the TOML spec, but via the pipeline parsing code +func (g *Graph) IsImplicitEdge(uid, vid int64) bool { + edge := g.Edge(uid, vid).(*GraphEdge) + if edge == nil { + return false + } + return edge.IsImplicit() +} + +type GraphEdge struct { + graph.Edge + + // Indicates that this edge was implicitly added by the pipeline parser, and not via the TOML specs. + isImplicit bool +} + +func (e *GraphEdge) IsImplicit() bool { + return e.isImplicit +} + +func (e *GraphEdge) SetIsImplicit(isImplicit bool) { + e.isImplicit = isImplicit +} + +type GraphNode struct { + graph.Node + dotID string + attrs map[string]string +} + +func (n *GraphNode) DOTID() string { + return n.dotID +} + +func (n *GraphNode) SetDOTID(id string) { + n.dotID = id +} + +func (n *GraphNode) String() string { + return n.dotID +} + +var bracketQuotedAttrRegexp = regexp.MustCompile(`\A\s*<([^<>]+)>\s*\z`) + +func (n *GraphNode) SetAttribute(attr encoding.Attribute) error { + if n.attrs == nil { + n.attrs = make(map[string]string) + } + + // Strings quoted in angle brackets (supported natively by DOT) should + // have those brackets removed before decoding to task parameter types + sanitized := bracketQuotedAttrRegexp.ReplaceAllString(attr.Value, "$1") + + n.attrs[attr.Key] = sanitized + return nil +} + +func (n *GraphNode) Attributes() []encoding.Attribute { + var r []encoding.Attribute + for k, v := range n.attrs { + r = append(r, encoding.Attribute{Key: k, Value: v}) + } + // Ensure the slice returned is deterministic. + sort.Slice(r, func(i, j int) bool { + return r[i].Key < r[j].Key + }) + return r +} + +type Pipeline struct { + Tasks []Task + tree *Graph + Source string +} + +func (p *Pipeline) UnmarshalText(bs []byte) (err error) { + parsed, err := Parse(string(bs)) + if err != nil { + return err + } + *p = *parsed + return nil +} + +func (p *Pipeline) MinTimeout() (time.Duration, bool, error) { + var minTimeout time.Duration = 1<<63 - 1 + var aTimeoutSet bool + for _, t := range p.Tasks { + if timeout, set := t.TaskTimeout(); set && timeout < minTimeout { + minTimeout = timeout + aTimeoutSet = true + } + } + return minTimeout, aTimeoutSet, nil +} + +func (p *Pipeline) RequiresPreInsert() bool { + for _, task := range p.Tasks { + switch task.Type() { + case TaskTypeBridge: + if task.(*BridgeTask).Async == "true" { + return true + } + case TaskTypeETHTx: + // we want to pre-insert pipeline_task_runs always + return true + default: + } + } + return false +} + +func (p *Pipeline) ByDotID(id string) Task { + for _, task := range p.Tasks { + if task.DotID() == id { + return task + } + } + return nil +} + +func Parse(text string) (*Pipeline, error) { + if strings.TrimSpace(text) == "" { + return nil, errors.New("empty pipeline") + } + g := NewGraph() + err := g.UnmarshalText([]byte(text)) + + if err != nil { + return nil, err + } + + p := &Pipeline{ + tree: g, + Tasks: make([]Task, 0, g.Nodes().Len()), + Source: text, + } + + // toposort all the nodes: dependencies ordered before outputs. This also does cycle checking for us. + nodes, err := topo.SortStabilized(g, nil) + + if err != nil { + return nil, errors.Wrap(err, "Unable to topologically sort the graph, cycle detected") + } + + // we need a temporary mapping of graph.IDs to positional ids after toposort + ids := make(map[int64]int) + + // use the new ordering as the id so that we can easily reproduce the original toposort + for id, node := range nodes { + node, is := node.(*GraphNode) + if !is { + panic("unreachable") + } + + if node.dotID == InputTaskKey { + return nil, errors.Errorf("'%v' is a reserved keyword that cannot be used as a task's name", InputTaskKey) + } + + task, err := UnmarshalTaskFromMap(TaskType(node.attrs["type"]), node.attrs, id, node.dotID) + if err != nil { + return nil, err + } + + // re-link the edges + for inputs := g.To(node.ID()); inputs.Next(); { + isImplicitEdge := g.IsImplicitEdge(inputs.Node().ID(), node.ID()) + from := p.Tasks[ids[inputs.Node().ID()]] + + from.Base().outputs = append(from.Base().outputs, task) + task.Base().inputs = append(task.Base().inputs, TaskDependency{!isImplicitEdge, from}) + } + + // This is subtle: g.To doesn't return nodes in deterministic order, which would occasionally swap the order + // of inputs, therefore we manually sort. We don't need to sort outputs the same way because these appends happen + // in p.Task order, which is deterministic via topo.SortStable. + sort.Slice(task.Base().inputs, func(i, j int) bool { + return task.Base().inputs[i].InputTask.ID() < task.Base().inputs[j].InputTask.ID() + }) + + p.Tasks = append(p.Tasks, task) + ids[node.ID()] = id + } + + return p, nil +} diff --git a/core/services/pipeline/graph_fuzz_test.go b/core/services/pipeline/graph_fuzz_test.go new file mode 100644 index 00000000..ccdcd7c3 --- /dev/null +++ b/core/services/pipeline/graph_fuzz_test.go @@ -0,0 +1,66 @@ +//go:build go1.18 + +package pipeline_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func FuzzParse(f *testing.F) { + f.Add(`ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];`) + f.Add(`ds1 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>];`) + f.Add(`ds1 [type=http allowunrestrictednetworkaccess=true method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}> timeout="10s"];`) + f.Add(`ds1 [type=any failEarly=true];`) + f.Add(`ds1 [type=any];`) + f.Add(`ds1 [type=any retries=5];`) + f.Add(`ds1 [type=http retries=10 minBackoff="1s" maxBackoff="30m"];`) + f.Add(pipeline.DotStr) + f.Add(CBORDietEmpty) + f.Add(CBORStdString) + f.Add(` + a [type=bridge]; + b [type=multiply times=1.23]; + a -> b -> a; + `) + f.Add(` +a [type=multiply input="$(val)" times=2] +b1 [type=multiply input="$(a)" times=2] +b2 [type=multiply input="$(a)" times=3] +c [type=median values=<[ $(b1), $(b2) ]> index=0] +a->b1->c; +a->b2->c;`) + f.Add(` +// data source 1 +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; + +// data source 2 +ds2 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds2_parse [type=jsonparse path="latest"]; + +ds1 -> ds1_parse -> answer1; +ds2 -> ds2_parse -> answer1; + +answer1 [type=median index=0]; +`) + f.Add(taskRunWithVars{ + bridgeName: "testBridge", + ds2URL: "https://example.com/path/to/service?with=args&foo=bar", + ds4URL: "http://chain.link", + submitBridgeName: "testSubmitBridge", + includeInputAtKey: "path.to.key", + }.String()) + f.Add(`s->s`) + f.Add(`0->s->s`) + f.Fuzz(func(t *testing.T, spec string) { + if len(spec) > 1_000_000 { + t.Skip() + } + _, err := pipeline.Parse(spec) + if err != nil { + t.Skip() + } + }) +} diff --git a/core/services/pipeline/graph_test.go b/core/services/pipeline/graph_test.go new file mode 100644 index 00000000..b837f7c4 --- /dev/null +++ b/core/services/pipeline/graph_test.go @@ -0,0 +1,269 @@ +package pipeline_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/graph" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestGraph_Decode(t *testing.T) { + t.Parallel() + + expected := map[string]map[string]bool{ + "ds1": { + "ds1": false, + "ds1_parse": true, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": false, + "answer2": false, + }, + "ds1_parse": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": true, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": false, + "answer2": false, + }, + "ds1_multiply": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": true, + "answer2": false, + }, + "ds2": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": true, + "ds2_multiply": false, + "answer1": false, + "answer2": false, + }, + "ds2_parse": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": true, + "answer1": false, + "answer2": false, + }, + "ds2_multiply": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": true, + "answer2": false, + }, + "answer1": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": false, + "answer2": false, + }, + "answer2": { + "ds1": false, + "ds1_parse": false, + "ds1_multiply": false, + "ds2": false, + "ds2_parse": false, + "ds2_multiply": false, + "answer1": false, + "answer2": false, + }, + } + + g := pipeline.NewGraph() + err := g.UnmarshalText([]byte(pipeline.DotStr)) + require.NoError(t, err) + + nodes := make(map[string]int64) + iter := g.Nodes() + for iter.Next() { + n := iter.Node().(interface { + graph.Node + DOTID() string + }) + nodes[n.DOTID()] = n.ID() + } + + for from, connections := range expected { + for to, connected := range connections { + require.Equal(t, connected, g.HasEdgeFromTo(nodes[from], nodes[to])) + } + } +} + +func TestGraph_TasksInDependencyOrder(t *testing.T) { + t.Parallel() + + p, err := pipeline.Parse(pipeline.DotStr) + require.NoError(t, err) + + answer1 := &pipeline.MedianTask{ + AllowedFaults: "", + } + answer2 := &pipeline.BridgeTask{ + Name: "election_winner", + } + ds1_multiply := &pipeline.MultiplyTask{ + Times: "1.23", + } + ds1_parse := &pipeline.JSONParseTask{ + Path: "one,two", + } + ds1 := &pipeline.BridgeTask{ + Name: "voter_turnout", + } + ds2_multiply := &pipeline.MultiplyTask{ + Times: "4.56", + } + ds2_parse := &pipeline.JSONParseTask{ + Path: "three,four", + } + ds2 := &pipeline.HTTPTask{ + URL: "https://chain.link/voter_turnout/USA-2020", + Method: "GET", + RequestData: `{"hi": "hello"}`, + } + + answer1.BaseTask = pipeline.NewBaseTask( + 6, + "answer1", + []pipeline.TaskDependency{ + { + PropagateResult: false, // propagateResult is false because this dependency is implicit + InputTask: pipeline.Task(ds1_multiply), + }, + { + PropagateResult: true, // propagateResult is true because this dependency is explicit in spec + InputTask: pipeline.Task(ds2_multiply), + }, + }, + nil, + 0) + answer2.BaseTask = pipeline.NewBaseTask(7, "answer2", nil, nil, 1) + ds1_multiply.BaseTask = pipeline.NewBaseTask( + 2, + "ds1_multiply", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds1_parse)}}, + []pipeline.Task{answer1}, + 0) + ds2_multiply.BaseTask = pipeline.NewBaseTask( + 5, + "ds2_multiply", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds2_parse)}}, + []pipeline.Task{answer1}, + 0) + ds1_parse.BaseTask = pipeline.NewBaseTask( + 1, + "ds1_parse", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds1)}}, + []pipeline.Task{ds1_multiply}, + 0) + ds2_parse.BaseTask = pipeline.NewBaseTask( + 4, + "ds2_parse", + []pipeline.TaskDependency{{PropagateResult: true, InputTask: pipeline.Task(ds2)}}, + []pipeline.Task{ds2_multiply}, + 0) + ds1.BaseTask = pipeline.NewBaseTask(0, "ds1", nil, []pipeline.Task{ds1_parse}, 0) + ds2.BaseTask = pipeline.NewBaseTask(3, "ds2", nil, []pipeline.Task{ds2_parse}, 0) + + for i, task := range p.Tasks { + // Make sure inputs appear before the task, and outputs don't + for _, input := range task.Inputs() { + require.Contains(t, p.Tasks[:i], input.InputTask) + } + for _, output := range task.Outputs() { + require.NotContains(t, p.Tasks[:i], output) + } + } + + expected := []pipeline.Task{ds1, ds1_parse, ds1_multiply, ds2, ds2_parse, ds2_multiply, answer1, answer2} + require.Len(t, p.Tasks, len(expected)) + + require.Equal(t, expected, p.Tasks) +} + +func TestGraph_HasCycles(t *testing.T) { + t.Parallel() + + _, err := pipeline.Parse(pipeline.DotStr) + require.NoError(t, err) + + _, err = pipeline.Parse(` + a [type=bridge]; + b [type=multiply times=1.23]; + a -> b -> a; + `) + require.Error(t, err) + require.Contains(t, err.Error(), "cycle detected") +} + +func TestGraph_ImplicitDependencies(t *testing.T) { + t.Parallel() + + g := pipeline.NewGraph() + err := g.UnmarshalText([]byte(` + a [type=bridge]; + b [type=multiply times=1.23 data="$(a.a1)" self="$(b)"]; + c [type=xyz times=1.23 input="$(b)"]; + d [type=xyz times=1.23 check="{\"a\": $(jobSpec.id),\"b\":$(c.p)}"]; + `)) + + nodes := make(map[string]int64) + iter := g.Nodes() + for iter.Next() { + n := iter.Node().(interface { + graph.Node + DOTID() string + }) + nodes[n.DOTID()] = n.ID() + } + require.NoError(t, err) + require.Equal(t, 3, g.Edges().Len()) + require.True(t, g.HasEdgeFromTo(nodes["a"], nodes["b"])) + require.True(t, g.HasEdgeFromTo(nodes["b"], nodes["c"])) + require.True(t, g.HasEdgeFromTo(nodes["c"], nodes["d"])) +} + +func TestParse(t *testing.T) { + for _, s := range []struct { + name string + pipeline string + }{ + {"empty", ""}, + {"blank", " "}, + {"foo", "foo"}, + } { + t.Run(s.name, func(t *testing.T) { + _, err := pipeline.Parse(s.pipeline) + assert.Error(t, err) + }) + } + +} diff --git a/core/services/pipeline/helpers_test.go b/core/services/pipeline/helpers_test.go new file mode 100644 index 00000000..c7eba188 --- /dev/null +++ b/core/services/pipeline/helpers_test.go @@ -0,0 +1,65 @@ +package pipeline + +import ( + "net/http" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" +) + +const ( + DotStr = ` + // data source 1 + ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>]; + ds2_parse [type=jsonparse path="three,four"]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0 input1="$(ds1_multiply)" input2="$(ds2_multiply)"]; + answer2 [type=bridge name=election_winner index=1]; + ` +) + +func (t *BridgeTask) HelperSetDependencies( + config Config, + bridgeConfig BridgeConfig, + orm bridges.ORM, + specId int32, + id uuid.UUID, + httpClient *http.Client) { + t.config = config + t.bridgeConfig = bridgeConfig + t.orm = orm + t.uuid = id + t.httpClient = httpClient + t.specId = specId +} + +func (t *HTTPTask) HelperSetDependencies(config Config, restrictedHTTPClient, unrestrictedHTTPClient *http.Client) { + t.config = config + t.httpClient = restrictedHTTPClient + t.unrestrictedHTTPClient = unrestrictedHTTPClient +} + +func (t *ETHCallTask) HelperSetDependencies(legacyChains legacyevm.LegacyChainContainer, config Config, specGasLimit *uint32, jobType string) { + t.legacyChains = legacyChains + t.config = config + t.specGasLimit = specGasLimit + t.jobType = jobType +} + +func (t *ETHTxTask) HelperSetDependencies(legacyChains legacyevm.LegacyChainContainer, keyStore ETHKeyStore, specGasLimit *uint32, jobType string) { + t.legacyChains = legacyChains + t.keyStore = keyStore + t.specGasLimit = specGasLimit + t.jobType = jobType +} diff --git a/core/services/pipeline/internal/eautils/eautils.go b/core/services/pipeline/internal/eautils/eautils.go new file mode 100644 index 00000000..30faa826 --- /dev/null +++ b/core/services/pipeline/internal/eautils/eautils.go @@ -0,0 +1,39 @@ +package eautils + +import ( + "encoding/json" + "net/http" +) + +type AdapterStatus struct { + ErrorMessage *string `json:"errorMessage"` + Error any `json:"error"` + StatusCode *int `json:"statusCode"` + ProviderStatusCode *int `json:"providerStatusCode"` +} + +func BestEffortExtractEAStatus(responseBytes []byte) (code int, ok bool) { + var status AdapterStatus + err := json.Unmarshal(responseBytes, &status) + if err != nil { + return 0, false + } + + if status.StatusCode == nil { + return 0, false + } + + if *status.StatusCode != http.StatusOK { + return *status.StatusCode, true + } + + if status.ProviderStatusCode != nil && *status.ProviderStatusCode != http.StatusOK { + return *status.ProviderStatusCode, true + } + + if status.Error != nil { + return http.StatusInternalServerError, true + } + + return *status.StatusCode, true +} diff --git a/core/services/pipeline/internal/eautils/eautils_test.go b/core/services/pipeline/internal/eautils/eautils_test.go new file mode 100644 index 00000000..80183b80 --- /dev/null +++ b/core/services/pipeline/internal/eautils/eautils_test.go @@ -0,0 +1,61 @@ +package eautils + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBestEffortExtractEAStatus(t *testing.T) { + tests := []struct { + name string + arg []byte + expectCode int + expectOk bool + }{ + { + name: "invalid object", + arg: []byte(`{"error": "invalid json object" `), + expectCode: 0, + expectOk: false, + }, + { + name: "no status code in object", + arg: []byte(`{}`), + expectCode: 0, + expectOk: false, + }, + { + name: "invalid status code", + arg: []byte(`{"statusCode":400}`), + expectCode: http.StatusBadRequest, + expectOk: true, + }, + { + name: "invalid provider status code", + arg: []byte(`{"statusCode":200, "providerStatusCode":500}`), + expectCode: http.StatusInternalServerError, + expectOk: true, + }, + { + name: "valid statuses with error message", + arg: []byte(`{"statusCode":200, "providerStatusCode":200, "error": "unexpected error"}`), + expectCode: http.StatusInternalServerError, + expectOk: true, + }, + { + name: "valid status code", + arg: []byte(`{"statusCode":200}`), + expectCode: http.StatusOK, + expectOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + code, ok := BestEffortExtractEAStatus(tt.arg) + assert.Equal(t, tt.expectCode, code) + assert.Equal(t, tt.expectOk, ok) + }) + } +} diff --git a/core/services/pipeline/keypath.go b/core/services/pipeline/keypath.go new file mode 100644 index 00000000..189e7073 --- /dev/null +++ b/core/services/pipeline/keypath.go @@ -0,0 +1,38 @@ +package pipeline + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + ErrWrongKeypath = errors.New("wrong keypath format") +) + +const KeypathSeparator = "." + +// Keypath contains keypath parsed by NewKeypathFromString. +type Keypath struct { + Parts []string +} + +// NewKeypathFromString creates a new Keypath from the given string. +// Returns error if it fails to parse the given keypath string. +func NewKeypathFromString(keypathStr string) (Keypath, error) { + if len(keypathStr) == 0 { + return Keypath{}, nil + } + + parts := strings.Split(keypathStr, KeypathSeparator) + if len(parts) == 0 { + return Keypath{}, errors.Wrapf(ErrWrongKeypath, "empty keypath") + } + for i, part := range parts { + if len(part) == 0 { + return Keypath{}, errors.Wrapf(ErrWrongKeypath, "empty keypath segment at index %d", i) + } + } + + return Keypath{parts}, nil +} diff --git a/core/services/pipeline/keypath_test.go b/core/services/pipeline/keypath_test.go new file mode 100644 index 00000000..8ef226e8 --- /dev/null +++ b/core/services/pipeline/keypath_test.go @@ -0,0 +1,49 @@ +package pipeline_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestKeypath(t *testing.T) { + t.Parallel() + + t.Run("can be constructed from a period-delimited string", func(t *testing.T) { + kp, err := pipeline.NewKeypathFromString("") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{}, kp) + + kp, err = pipeline.NewKeypathFromString("foo") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{[]string{"foo"}}, kp) + + kp, err = pipeline.NewKeypathFromString("foo.bar") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{[]string{"foo", "bar"}}, kp) + + kp, err = pipeline.NewKeypathFromString("a.b.c.d.e") + assert.NoError(t, err) + assert.Equal(t, pipeline.Keypath{[]string{"a", "b", "c", "d", "e"}}, kp) + }) + + t.Run("wrong keypath", func(t *testing.T) { + wrongKeyPath := []string{ + ".", + "..", + "x.", + ".y", + "x.y.", + "x.y..z", + } + + for _, keypath := range wrongKeyPath { + t.Run(keypath, func(t *testing.T) { + _, err := pipeline.NewKeypathFromString(keypath) + assert.ErrorIs(t, err, pipeline.ErrWrongKeypath) + }) + } + }) +} diff --git a/core/services/pipeline/mocks/config.go b/core/services/pipeline/mocks/config.go new file mode 100644 index 00000000..4d287f76 --- /dev/null +++ b/core/services/pipeline/mocks/config.go @@ -0,0 +1,119 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + config "github.com/goplugin/plugin-common/pkg/config" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// DefaultHTTPLimit provides a mock function with given fields: +func (_m *Config) DefaultHTTPLimit() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DefaultHTTPLimit") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// DefaultHTTPTimeout provides a mock function with given fields: +func (_m *Config) DefaultHTTPTimeout() config.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DefaultHTTPTimeout") + } + + var r0 config.Duration + if rf, ok := ret.Get(0).(func() config.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(config.Duration) + } + + return r0 +} + +// MaxRunDuration provides a mock function with given fields: +func (_m *Config) MaxRunDuration() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MaxRunDuration") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// ReaperInterval provides a mock function with given fields: +func (_m *Config) ReaperInterval() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ReaperInterval") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// ReaperThreshold provides a mock function with given fields: +func (_m *Config) ReaperThreshold() time.Duration { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ReaperThreshold") + } + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/pipeline/mocks/orm.go b/core/services/pipeline/mocks/orm.go new file mode 100644 index 00000000..21c8406c --- /dev/null +++ b/core/services/pipeline/mocks/orm.go @@ -0,0 +1,464 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + models "github.com/goplugin/pluginv3.0/v2/core/store/models" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + pipeline "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + time "time" + + uuid "github.com/google/uuid" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *ORM) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateRun provides a mock function with given fields: run, qopts +func (_m *ORM) CreateRun(run *pipeline.Run, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*pipeline.Run, ...pg.QOpt) error); ok { + r0 = rf(run, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateSpec provides a mock function with given fields: _a0, maxTaskTimeout, qopts +func (_m *ORM) CreateSpec(_a0 pipeline.Pipeline, maxTaskTimeout models.Interval, qopts ...pg.QOpt) (int32, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, maxTaskTimeout) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateSpec") + } + + var r0 int32 + var r1 error + if rf, ok := ret.Get(0).(func(pipeline.Pipeline, models.Interval, ...pg.QOpt) (int32, error)); ok { + return rf(_a0, maxTaskTimeout, qopts...) + } + if rf, ok := ret.Get(0).(func(pipeline.Pipeline, models.Interval, ...pg.QOpt) int32); ok { + r0 = rf(_a0, maxTaskTimeout, qopts...) + } else { + r0 = ret.Get(0).(int32) + } + + if rf, ok := ret.Get(1).(func(pipeline.Pipeline, models.Interval, ...pg.QOpt) error); ok { + r1 = rf(_a0, maxTaskTimeout, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteRun provides a mock function with given fields: id +func (_m *ORM) DeleteRun(id int64) error { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for DeleteRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int64) error); ok { + r0 = rf(id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteRunsOlderThan provides a mock function with given fields: _a0, _a1 +func (_m *ORM) DeleteRunsOlderThan(_a0 context.Context, _a1 time.Duration) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for DeleteRunsOlderThan") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Duration) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindRun provides a mock function with given fields: id +func (_m *ORM) FindRun(id int64) (pipeline.Run, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for FindRun") + } + + var r0 pipeline.Run + var r1 error + if rf, ok := ret.Get(0).(func(int64) (pipeline.Run, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int64) pipeline.Run); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(pipeline.Run) + } + + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAllRuns provides a mock function with given fields: +func (_m *ORM) GetAllRuns() ([]pipeline.Run, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetAllRuns") + } + + var r0 []pipeline.Run + var r1 error + if rf, ok := ret.Get(0).(func() ([]pipeline.Run, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []pipeline.Run); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]pipeline.Run) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetQ provides a mock function with given fields: +func (_m *ORM) GetQ() pg.Q { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetQ") + } + + var r0 pg.Q + if rf, ok := ret.Get(0).(func() pg.Q); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(pg.Q) + } + + return r0 +} + +// GetUnfinishedRuns provides a mock function with given fields: _a0, _a1, _a2 +func (_m *ORM) GetUnfinishedRuns(_a0 context.Context, _a1 time.Time, _a2 func(pipeline.Run) error) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for GetUnfinishedRuns") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, func(pipeline.Run) error) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *ORM) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// InsertFinishedRun provides a mock function with given fields: run, saveSuccessfulTaskRuns, qopts +func (_m *ORM) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertFinishedRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(run, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertFinishedRuns provides a mock function with given fields: run, saveSuccessfulTaskRuns, qopts +func (_m *ORM) InsertFinishedRuns(run []*pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertFinishedRuns") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(run, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertRun provides a mock function with given fields: run, qopts +func (_m *ORM) InsertRun(run *pipeline.Run, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*pipeline.Run, ...pg.QOpt) error); ok { + r0 = rf(run, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *ORM) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *ORM) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *ORM) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StoreRun provides a mock function with given fields: run, qopts +func (_m *ORM) StoreRun(run *pipeline.Run, qopts ...pg.QOpt) (bool, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for StoreRun") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*pipeline.Run, ...pg.QOpt) (bool, error)); ok { + return rf(run, qopts...) + } + if rf, ok := ret.Get(0).(func(*pipeline.Run, ...pg.QOpt) bool); ok { + r0 = rf(run, qopts...) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*pipeline.Run, ...pg.QOpt) error); ok { + r1 = rf(run, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateTaskRunResult provides a mock function with given fields: taskID, result +func (_m *ORM) UpdateTaskRunResult(taskID uuid.UUID, result pipeline.Result) (pipeline.Run, bool, error) { + ret := _m.Called(taskID, result) + + if len(ret) == 0 { + panic("no return value specified for UpdateTaskRunResult") + } + + var r0 pipeline.Run + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uuid.UUID, pipeline.Result) (pipeline.Run, bool, error)); ok { + return rf(taskID, result) + } + if rf, ok := ret.Get(0).(func(uuid.UUID, pipeline.Result) pipeline.Run); ok { + r0 = rf(taskID, result) + } else { + r0 = ret.Get(0).(pipeline.Run) + } + + if rf, ok := ret.Get(1).(func(uuid.UUID, pipeline.Result) bool); ok { + r1 = rf(taskID, result) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uuid.UUID, pipeline.Result) error); ok { + r2 = rf(taskID, result) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/pipeline/mocks/pipeline_param_unmarshaler.go b/core/services/pipeline/mocks/pipeline_param_unmarshaler.go new file mode 100644 index 00000000..40f2ba4d --- /dev/null +++ b/core/services/pipeline/mocks/pipeline_param_unmarshaler.go @@ -0,0 +1,42 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// PipelineParamUnmarshaler is an autogenerated mock type for the PipelineParamUnmarshaler type +type PipelineParamUnmarshaler struct { + mock.Mock +} + +// UnmarshalPipelineParam provides a mock function with given fields: val +func (_m *PipelineParamUnmarshaler) UnmarshalPipelineParam(val interface{}) error { + ret := _m.Called(val) + + if len(ret) == 0 { + panic("no return value specified for UnmarshalPipelineParam") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(val) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewPipelineParamUnmarshaler creates a new instance of PipelineParamUnmarshaler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPipelineParamUnmarshaler(t interface { + mock.TestingT + Cleanup(func()) +}) *PipelineParamUnmarshaler { + mock := &PipelineParamUnmarshaler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/pipeline/mocks/runner.go b/core/services/pipeline/mocks/runner.go new file mode 100644 index 00000000..ed847d67 --- /dev/null +++ b/core/services/pipeline/mocks/runner.go @@ -0,0 +1,302 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + logger "github.com/goplugin/pluginv3.0/v2/core/logger" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + pipeline "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + uuid "github.com/google/uuid" +) + +// Runner is an autogenerated mock type for the Runner type +type Runner struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Runner) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecuteAndInsertFinishedRun provides a mock function with given fields: ctx, spec, vars, l, saveSuccessfulTaskRuns +func (_m *Runner) ExecuteAndInsertFinishedRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger, saveSuccessfulTaskRuns bool) (int64, pipeline.FinalResult, error) { + ret := _m.Called(ctx, spec, vars, l, saveSuccessfulTaskRuns) + + if len(ret) == 0 { + panic("no return value specified for ExecuteAndInsertFinishedRun") + } + + var r0 int64 + var r1 pipeline.FinalResult + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger, bool) (int64, pipeline.FinalResult, error)); ok { + return rf(ctx, spec, vars, l, saveSuccessfulTaskRuns) + } + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger, bool) int64); ok { + r0 = rf(ctx, spec, vars, l, saveSuccessfulTaskRuns) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger, bool) pipeline.FinalResult); ok { + r1 = rf(ctx, spec, vars, l, saveSuccessfulTaskRuns) + } else { + r1 = ret.Get(1).(pipeline.FinalResult) + } + + if rf, ok := ret.Get(2).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger, bool) error); ok { + r2 = rf(ctx, spec, vars, l, saveSuccessfulTaskRuns) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ExecuteRun provides a mock function with given fields: ctx, spec, vars, l +func (_m *Runner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (*pipeline.Run, pipeline.TaskRunResults, error) { + ret := _m.Called(ctx, spec, vars, l) + + if len(ret) == 0 { + panic("no return value specified for ExecuteRun") + } + + var r0 *pipeline.Run + var r1 pipeline.TaskRunResults + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) (*pipeline.Run, pipeline.TaskRunResults, error)); ok { + return rf(ctx, spec, vars, l) + } + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) *pipeline.Run); ok { + r0 = rf(ctx, spec, vars, l) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pipeline.Run) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) pipeline.TaskRunResults); ok { + r1 = rf(ctx, spec, vars, l) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(pipeline.TaskRunResults) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, pipeline.Spec, pipeline.Vars, logger.Logger) error); ok { + r2 = rf(ctx, spec, vars, l) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// HealthReport provides a mock function with given fields: +func (_m *Runner) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// InsertFinishedRun provides a mock function with given fields: run, saveSuccessfulTaskRuns, qopts +func (_m *Runner) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, run, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertFinishedRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(run, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertFinishedRuns provides a mock function with given fields: runs, saveSuccessfulTaskRuns, qopts +func (_m *Runner) InsertFinishedRuns(runs []*pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, runs, saveSuccessfulTaskRuns) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for InsertFinishedRuns") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]*pipeline.Run, bool, ...pg.QOpt) error); ok { + r0 = rf(runs, saveSuccessfulTaskRuns, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *Runner) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// OnRunFinished provides a mock function with given fields: _a0 +func (_m *Runner) OnRunFinished(_a0 func(*pipeline.Run)) { + _m.Called(_a0) +} + +// Ready provides a mock function with given fields: +func (_m *Runner) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResumeRun provides a mock function with given fields: taskID, value, err +func (_m *Runner) ResumeRun(taskID uuid.UUID, value interface{}, err error) error { + ret := _m.Called(taskID, value, err) + + if len(ret) == 0 { + panic("no return value specified for ResumeRun") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uuid.UUID, interface{}, error) error); ok { + r0 = rf(taskID, value, err) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Run provides a mock function with given fields: ctx, run, l, saveSuccessfulTaskRuns, fn +func (_m *Runner) Run(ctx context.Context, run *pipeline.Run, l logger.Logger, saveSuccessfulTaskRuns bool, fn func(pg.Queryer) error) (bool, error) { + ret := _m.Called(ctx, run, l, saveSuccessfulTaskRuns, fn) + + if len(ret) == 0 { + panic("no return value specified for Run") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *pipeline.Run, logger.Logger, bool, func(pg.Queryer) error) (bool, error)); ok { + return rf(ctx, run, l, saveSuccessfulTaskRuns, fn) + } + if rf, ok := ret.Get(0).(func(context.Context, *pipeline.Run, logger.Logger, bool, func(pg.Queryer) error) bool); ok { + r0 = rf(ctx, run, l, saveSuccessfulTaskRuns, fn) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, *pipeline.Run, logger.Logger, bool, func(pg.Queryer) error) error); ok { + r1 = rf(ctx, run, l, saveSuccessfulTaskRuns, fn) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: _a0 +func (_m *Runner) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRunner creates a new instance of Runner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRunner(t interface { + mock.TestingT + Cleanup(func()) +}) *Runner { + mock := &Runner{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/pipeline/models.go b/core/services/pipeline/models.go new file mode 100644 index 00000000..7fc754e4 --- /dev/null +++ b/core/services/pipeline/models.go @@ -0,0 +1,339 @@ +package pipeline + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "math/big" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type Spec struct { + ID int32 + DotDagSource string `json:"dotDagSource"` + CreatedAt time.Time `json:"-"` + MaxTaskDuration models.Interval `json:"-"` + GasLimit *uint32 `json:"-"` + ForwardingAllowed bool `json:"-"` + + JobID int32 `json:"-"` + JobName string `json:"-"` + JobType string `json:"-"` + + Pipeline *Pipeline `json:"-" db:"-"` // This may be nil, or may be populated manually as a cache. There is no locking on this, so be careful +} + +func (s *Spec) GetOrParsePipeline() (*Pipeline, error) { + if s.Pipeline != nil { + return s.Pipeline, nil + } + return s.ParsePipeline() +} + +func (s *Spec) ParsePipeline() (*Pipeline, error) { + return Parse(s.DotDagSource) +} + +type Run struct { + ID int64 `json:"-"` + PipelineSpecID int32 `json:"-"` + PipelineSpec Spec `json:"pipelineSpec"` + Meta JSONSerializable `json:"meta"` + // The errors are only ever strings + // DB example: [null, null, "my error"] + AllErrors RunErrors `json:"all_errors"` + FatalErrors RunErrors `json:"fatal_errors"` + Inputs JSONSerializable `json:"inputs"` + // Its expected that Output.Val is of type []interface{}. + // DB example: [1234, {"a": 10}, null] + Outputs JSONSerializable `json:"outputs"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt null.Time `json:"finishedAt"` + PipelineTaskRuns []TaskRun `json:"taskRuns"` + State RunStatus `json:"state"` + + Pending bool + // FailSilently is used to signal that a task with the failEarly flag has failed, and we want to not put this in the db + FailSilently bool +} + +func (r Run) GetID() string { + return fmt.Sprintf("%v", r.ID) +} + +func (r *Run) SetID(value string) error { + ID, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + r.ID = ID + return nil +} + +func (r Run) HasFatalErrors() bool { + for _, err := range r.FatalErrors { + if !err.IsZero() { + return true + } + } + return false +} + +func (r Run) HasErrors() bool { + for _, err := range r.AllErrors { + if !err.IsZero() { + return true + } + } + return false +} + +// Status determines the status of the run. +func (r *Run) Status() RunStatus { + if r.HasFatalErrors() { + return RunStatusErrored + } else if r.FinishedAt.Valid { + return RunStatusCompleted + } + + return RunStatusRunning +} + +func (r *Run) ByDotID(id string) *TaskRun { + for i, run := range r.PipelineTaskRuns { + if run.DotID == id { + return &r.PipelineTaskRuns[i] + } + } + return nil +} + +func (r *Run) StringOutputs() ([]*string, error) { + // The UI expects all outputs to be strings. + var outputs []*string + // Note for async jobs, Outputs can be nil/invalid + if r.Outputs.Valid { + outs, ok := r.Outputs.Val.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to process output type %T", r.Outputs.Val) + } + + if r.Outputs.Valid && r.Outputs.Val != nil { + for _, out := range outs { + switch v := out.(type) { + case string: + s := v + outputs = append(outputs, &s) + case map[string]interface{}: + b, _ := json.Marshal(v) + bs := string(b) + outputs = append(outputs, &bs) + case decimal.Decimal: + s := v.String() + outputs = append(outputs, &s) + case *decimal.Decimal: + s := v.String() + outputs = append(outputs, &s) + case big.Int: + s := v.String() + outputs = append(outputs, &s) + case *big.Int: + s := v.String() + outputs = append(outputs, &s) + case int8, uint8, int16, uint16, int32, uint32, int64, uint64: + s := fmt.Sprintf("%v", v) + outputs = append(outputs, &s) + case float64: + s := strconv.FormatFloat(v, 'f', -1, 64) + outputs = append(outputs, &s) + case nil: + outputs = append(outputs, nil) + default: + return nil, fmt.Errorf("unable to process output type %T", out) + } + } + } + } + + return outputs, nil +} + +func (r *Run) StringFatalErrors() []*string { + var fatalErrors []*string + + for _, err := range r.FatalErrors { + if err.Valid { + s := err.String + fatalErrors = append(fatalErrors, &s) + } else { + fatalErrors = append(fatalErrors, nil) + } + } + + return fatalErrors +} + +func (r *Run) StringAllErrors() []*string { + var allErrors []*string + + for _, err := range r.AllErrors { + if err.Valid { + s := err.String + allErrors = append(allErrors, &s) + } else { + allErrors = append(allErrors, nil) + } + } + + return allErrors +} + +type RunErrors []null.String + +func (re *RunErrors) Scan(value interface{}) error { + if value == nil { + return nil + } + bytes, ok := value.([]byte) + if !ok { + return errors.Errorf("RunErrors#Scan received a value of type %T", value) + } + return json.Unmarshal(bytes, re) +} + +func (re RunErrors) Value() (driver.Value, error) { + if len(re) == 0 { + return nil, nil + } + return json.Marshal(re) +} + +func (re RunErrors) HasError() bool { + for _, e := range re { + if !e.IsZero() { + return true + } + } + return false +} + +// ToError coalesces all non-nil errors into a single error object. +// This is useful for logging. +func (re RunErrors) ToError() error { + toErr := func(ns null.String) error { + if !ns.IsZero() { + return errors.New(ns.String) + } + return nil + } + errs := []error{} + for _, e := range re { + errs = append(errs, toErr(e)) + } + return multierr.Combine(errs...) +} + +type ResumeRequest struct { + Error null.String `json:"error"` + Value json.RawMessage `json:"value"` +} + +func (rr ResumeRequest) ToResult() (Result, error) { + var res Result + if rr.Error.Valid && rr.Value == nil { + res.Error = errors.New(rr.Error.ValueOrZero()) + return res, nil + } + if !rr.Error.Valid && rr.Value != nil { + res.Value = []byte(rr.Value) + return res, nil + } + return Result{}, errors.New("must provide only one of either 'value' or 'error' key") +} + +type TaskRun struct { + ID uuid.UUID `json:"id"` + Type TaskType `json:"type"` + PipelineRun Run `json:"-"` + PipelineRunID int64 `json:"-"` + Output JSONSerializable `json:"output"` + Error null.String `json:"error"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt null.Time `json:"finishedAt"` + Index int32 `json:"index"` + DotID string `json:"dotId"` + + // Used internally for sorting completed results + task Task +} + +func (tr TaskRun) GetID() string { + return fmt.Sprintf("%v", tr.ID) +} + +func (tr *TaskRun) SetID(value string) error { + ID, err := uuid.Parse(value) + if err != nil { + return err + } + tr.ID = ID + return nil +} + +func (tr TaskRun) GetDotID() string { + return tr.DotID +} + +func (tr TaskRun) Result() Result { + var result Result + if !tr.Error.IsZero() { + result.Error = errors.New(tr.Error.ValueOrZero()) + } else if tr.Output.Valid && tr.Output.Val != nil { + result.Value = tr.Output.Val + } + return result +} + +func (tr *TaskRun) IsPending() bool { + return !tr.FinishedAt.Valid && tr.Output.Empty() && tr.Error.IsZero() +} + +// RunStatus represents the status of a run +type RunStatus string + +const ( + // RunStatusUnknown is the when the run status cannot be determined. + RunStatusUnknown RunStatus = "unknown" + // RunStatusRunning is used for when a run is actively being executed. + RunStatusRunning RunStatus = "running" + // RunStatusSuspended is used when a run is paused and awaiting further results. + RunStatusSuspended RunStatus = "suspended" + // RunStatusErrored is used for when a run has errored and will not complete. + RunStatusErrored RunStatus = "errored" + // RunStatusCompleted is used for when a run has successfully completed execution. + RunStatusCompleted RunStatus = "completed" +) + +// Completed returns true if the status is RunStatusCompleted. +func (s RunStatus) Completed() bool { + return s == RunStatusCompleted +} + +// Errored returns true if the status is RunStatusErrored. +func (s RunStatus) Errored() bool { + return s == RunStatusErrored +} + +// Finished returns true if the status is final and can't be changed. +func (s RunStatus) Finished() bool { + return s.Completed() || s.Errored() +} diff --git a/core/services/pipeline/models_test.go b/core/services/pipeline/models_test.go new file mode 100644 index 00000000..bdb7555a --- /dev/null +++ b/core/services/pipeline/models_test.go @@ -0,0 +1,132 @@ +package pipeline_test + +import ( + "math/big" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestRun_Status(t *testing.T) { + t.Parallel() + + assert.Equal(t, pipeline.RunStatusUnknown.Finished(), false) + assert.Equal(t, pipeline.RunStatusRunning.Finished(), false) + assert.Equal(t, pipeline.RunStatusCompleted.Finished(), true) + assert.Equal(t, pipeline.RunStatusErrored.Finished(), true) + + assert.Equal(t, pipeline.RunStatusUnknown.Errored(), false) + assert.Equal(t, pipeline.RunStatusRunning.Errored(), false) + assert.Equal(t, pipeline.RunStatusCompleted.Errored(), false) + assert.Equal(t, pipeline.RunStatusErrored.Errored(), true) + + now := null.TimeFrom(time.Now()) + + testCases := []struct { + name string + run *pipeline.Run + want pipeline.RunStatus + }{ + { + name: "In Progress", + run: &pipeline.Run{ + AllErrors: pipeline.RunErrors{}, + FatalErrors: pipeline.RunErrors{}, + Outputs: pipeline.JSONSerializable{}, + FinishedAt: null.Time{}, + }, + want: pipeline.RunStatusRunning, + }, + { + name: "Completed", + run: &pipeline.Run{ + AllErrors: pipeline.RunErrors{}, + FatalErrors: pipeline.RunErrors{}, + Outputs: pipeline.JSONSerializable{Val: []interface{}{10, 10}, Valid: true}, + FinishedAt: now, + }, + want: pipeline.RunStatusCompleted, + }, + { + name: "Error", + run: &pipeline.Run{ + AllErrors: pipeline.RunErrors{null.StringFrom(errors.New("fail").Error())}, + FatalErrors: pipeline.RunErrors{null.StringFrom(errors.New("fail").Error())}, + Outputs: pipeline.JSONSerializable{}, + FinishedAt: null.Time{}, + }, + want: pipeline.RunStatusErrored, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.want, tc.run.Status()) + }) + } +} + +func TestRunErrors_ToError(t *testing.T) { + runErrors := pipeline.RunErrors{} + runErrors = append(runErrors, null.NewString("bad thing happened", true)) + runErrors = append(runErrors, null.NewString("pretty bad thing happened", true)) + runErrors = append(runErrors, null.NewString("", false)) + expected := errors.New("bad thing happened; pretty bad thing happened") + require.Equal(t, expected.Error(), runErrors.ToError().Error()) +} + +func TestRun_StringOutputs(t *testing.T) { + t.Parallel() + + t.Run("invalid outputs", func(t *testing.T) { + run := &pipeline.Run{ + Outputs: pipeline.JSONSerializable{ + Valid: false, + }, + } + outputs, err := run.StringOutputs() + assert.NoError(t, err) + assert.Empty(t, outputs) + }) + + big := big.NewInt(123) + dec := mustDecimal(t, "123") + + testCases := []struct { + name string + val interface{} + want string + }{ + {"int64", int64(123), "123"}, + {"uint64", uint64(123), "123"}, + {"float64", float64(123.456), "123.456"}, + {"large float64", float64(9007199254740991231), "9007199254740991000"}, + {"big.Int", *big, "123"}, + {"*big.Int", big, "123"}, + {"decimal", *dec, "123"}, + {"*decimal", dec, "123"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run := &pipeline.Run{ + Outputs: pipeline.JSONSerializable{ + Valid: true, + Val: []interface{}{tc.val}, + }, + } + t.Log(tc.val) + outputs, err := run.StringOutputs() + assert.NoError(t, err) + assert.NotNil(t, outputs) + assert.Len(t, outputs, 1) + assert.Equal(t, tc.want, *outputs[0]) + }) + } +} diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go new file mode 100644 index 00000000..500e0159 --- /dev/null +++ b/core/services/pipeline/orm.go @@ -0,0 +1,706 @@ +package pipeline + +import ( + "context" + "database/sql" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// KeepersObservationSource is the same for all keeper jobs and it is not persisted in DB +const KeepersObservationSource = ` + encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.effectiveKeeperAddress)}"] + check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gasUnlimited=true + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] + decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] + calculate_perform_data_len [type=length + input="$(decode_check_upkeep_tx.performData)"] + perform_data_lessthan_limit [type=lessthan + left="$(calculate_perform_data_len)" + right="$(jobSpec.maxPerformDataSize)"] + check_perform_data_limit [type=conditional + failEarly=true + data="$(perform_data_lessthan_limit)"] + encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] + simulate_perform_upkeep_tx [type=ethcall + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + from="$(jobSpec.effectiveKeeperAddress)" + gasUnlimited=true + data="$(encode_perform_upkeep_tx)"] + decode_check_perform_tx [type=ethabidecode + abi="bool success"] + check_success [type=conditional + failEarly=true + data="$(decode_check_perform_tx.success)"] + perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] + encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> calculate_perform_data_len -> perform_data_lessthan_limit -> check_perform_data_limit -> encode_perform_upkeep_tx -> simulate_perform_upkeep_tx -> decode_check_perform_tx -> check_success -> perform_upkeep_tx +` + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +type ORM interface { + services.Service + CreateSpec(pipeline Pipeline, maxTaskTimeout models.Interval, qopts ...pg.QOpt) (int32, error) + CreateRun(run *Run, qopts ...pg.QOpt) (err error) + InsertRun(run *Run, qopts ...pg.QOpt) error + DeleteRun(id int64) error + StoreRun(run *Run, qopts ...pg.QOpt) (restart bool, err error) + UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, start bool, err error) + InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) + + // InsertFinishedRuns inserts all the given runs into the database. + // If saveSuccessfulTaskRuns is false, only errored runs are saved. + InsertFinishedRuns(run []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) + + DeleteRunsOlderThan(context.Context, time.Duration) error + FindRun(id int64) (Run, error) + GetAllRuns() ([]Run, error) + GetUnfinishedRuns(context.Context, time.Time, func(run Run) error) error + GetQ() pg.Q +} + +type orm struct { + services.StateMachine + q pg.Q + lggr logger.Logger + maxSuccessfulRuns uint64 + // jobID => count + pm sync.Map + wg sync.WaitGroup + ctx context.Context + cncl context.CancelFunc +} + +var _ ORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, jobPipelineMaxSuccessfulRuns uint64) *orm { + ctx, cancel := context.WithCancel(context.Background()) + return &orm{ + services.StateMachine{}, + pg.NewQ(db, lggr, cfg), + lggr.Named("PipelineORM"), + jobPipelineMaxSuccessfulRuns, + sync.Map{}, + sync.WaitGroup{}, + ctx, + cancel, + } +} + +func (o *orm) Start(_ context.Context) error { + return o.StartOnce("pipeline.ORM", func() error { + var msg string + if o.maxSuccessfulRuns == 0 { + msg = "Pipeline runs saving is disabled for all jobs: MaxSuccessfulRuns=0" + } else { + msg = fmt.Sprintf("Pipeline runs will be pruned above per-job limit of MaxSuccessfulRuns=%d", o.maxSuccessfulRuns) + } + o.lggr.Info(msg) + return nil + }) +} + +func (o *orm) Close() error { + return o.StopOnce("pipeline.ORM", func() error { + o.cncl() + o.wg.Wait() + return nil + }) +} + +func (o *orm) Name() string { + return o.lggr.Name() +} + +func (o *orm) HealthReport() map[string]error { + return map[string]error{o.Name(): o.Healthy()} +} + +func (o *orm) CreateSpec(pipeline Pipeline, maxTaskDuration models.Interval, qopts ...pg.QOpt) (id int32, err error) { + q := o.q.WithOpts(qopts...) + sql := `INSERT INTO pipeline_specs (dot_dag_source, max_task_duration, created_at) + VALUES ($1, $2, NOW()) + RETURNING id;` + err = q.Get(&id, sql, pipeline.Source, maxTaskDuration) + return id, errors.WithStack(err) +} + +func (o *orm) CreateRun(run *Run, qopts ...pg.QOpt) (err error) { + if run.CreatedAt.IsZero() { + return errors.New("run.CreatedAt must be set") + } + + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + if e := o.InsertRun(run, pg.WithQueryer(tx)); e != nil { + return errors.Wrap(e, "error inserting pipeline_run") + } + + // Now create pipeline_task_runs if any + if len(run.PipelineTaskRuns) == 0 { + return nil + } + + // update the ID key everywhere + for i := range run.PipelineTaskRuns { + run.PipelineTaskRuns[i].PipelineRunID = run.ID + } + + sql := ` + INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at) + VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at);` + _, err = tx.NamedExec(sql, run.PipelineTaskRuns) + return err + }) + + return errors.Wrap(err, "CreateRun failed") +} + +// InsertRun inserts a run into the database +func (o *orm) InsertRun(run *Run, qopts ...pg.QOpt) error { + if run.Status() == RunStatusCompleted { + defer o.Prune(o.q, run.PipelineSpecID) + } + q := o.q.WithOpts(qopts...) + sql := `INSERT INTO pipeline_runs (pipeline_spec_id, meta, all_errors, fatal_errors, inputs, outputs, created_at, finished_at, state) + VALUES (:pipeline_spec_id, :meta, :all_errors, :fatal_errors, :inputs, :outputs, :created_at, :finished_at, :state) + RETURNING *;` + return q.GetNamed(sql, run, run) +} + +// StoreRun will persist a partially executed run before suspending, or finish a run. +// If `restart` is true, then new task run data is available and the run should be resumed immediately. +func (o *orm) StoreRun(run *Run, qopts ...pg.QOpt) (restart bool, err error) { + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + finished := run.FinishedAt.Valid + if !finished { + // Lock the current run. This prevents races with /v2/resume + sql := `SELECT id FROM pipeline_runs WHERE id = $1 FOR UPDATE;` + if _, err = tx.Exec(sql, run.ID); err != nil { + return errors.Wrap(err, "StoreRun") + } + + taskRuns := []TaskRun{} + // Reload task runs, we want to check for any changes while the run was ongoing + if err = sqlx.Select(tx, &taskRuns, `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = $1`, run.ID); err != nil { + return errors.Wrap(err, "StoreRun") + } + + // Construct a temporary run so we can use r.ByDotID + tempRun := Run{PipelineTaskRuns: taskRuns} + + // Diff with current state, if updated, swap run.PipelineTaskRuns and early return with restart = true + for i, tr := range run.PipelineTaskRuns { + if !tr.IsPending() { + continue + } + + // Look for new data + if taskRun := tempRun.ByDotID(tr.DotID); taskRun != nil && !taskRun.IsPending() { + // Swap in the latest state + run.PipelineTaskRuns[i] = *taskRun + restart = true + } + } + + if restart { + return nil + } + + // Suspend the run + run.State = RunStatusSuspended + if _, err = sqlx.NamedExec(tx, `UPDATE pipeline_runs SET state = :state WHERE id = :id`, run); err != nil { + return errors.Wrap(err, "StoreRun") + } + } else { + defer o.Prune(tx, run.PipelineSpecID) + // Simply finish the run, no need to do any sort of locking + if run.Outputs.Val == nil || len(run.FatalErrors)+len(run.AllErrors) == 0 { + return errors.Errorf("run must have both Outputs and Errors, got Outputs: %#v, FatalErrors: %#v, AllErrors: %#v", run.Outputs.Val, run.FatalErrors, run.AllErrors) + } + sql := `UPDATE pipeline_runs SET state = :state, finished_at = :finished_at, all_errors= :all_errors, fatal_errors= :fatal_errors, outputs = :outputs WHERE id = :id` + if _, err = sqlx.NamedExec(tx, sql, run); err != nil { + return errors.Wrap(err, "StoreRun") + } + } + + sql := ` + INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at, finished_at) + VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at) + ON CONFLICT (pipeline_run_id, dot_id) DO UPDATE SET + output = EXCLUDED.output, error = EXCLUDED.error, finished_at = EXCLUDED.finished_at + RETURNING *; + ` + + // NOTE: can't use Select() to auto scan because we're using NamedQuery, + // sqlx.Named + Select is possible but it's about the same amount of code + var rows *sqlx.Rows + rows, err = sqlx.NamedQuery(tx, sql, run.PipelineTaskRuns) + if err != nil { + return errors.Wrap(err, "StoreRun") + } + taskRuns := []TaskRun{} + if err = sqlx.StructScan(rows, &taskRuns); err != nil { + return errors.Wrap(err, "StoreRun") + } + // replace with new task run data + run.PipelineTaskRuns = taskRuns + return nil + }) + return +} + +// DeleteRun cleans up a run that failed and is marked failEarly (should leave no trace of the run) +func (o *orm) DeleteRun(id int64) error { + // NOTE: this will cascade and wipe pipeline_task_runs too + _, err := o.q.Exec(`DELETE FROM pipeline_runs WHERE id = $1`, id) + return err +} + +func (o *orm) UpdateTaskRunResult(taskID uuid.UUID, result Result) (run Run, start bool, err error) { + if result.OutputDB().Valid && result.ErrorDB().Valid { + panic("run result must specify either output or error, not both") + } + err = o.q.Transaction(func(tx pg.Queryer) error { + sql := ` + SELECT pipeline_runs.*, pipeline_specs.dot_dag_source "pipeline_spec.dot_dag_source" + FROM pipeline_runs + JOIN pipeline_task_runs ON (pipeline_task_runs.pipeline_run_id = pipeline_runs.id) + JOIN pipeline_specs ON (pipeline_specs.id = pipeline_runs.pipeline_spec_id) + WHERE pipeline_task_runs.id = $1 AND pipeline_runs.state in ('running', 'suspended') + FOR UPDATE` + if err = tx.Get(&run, sql, taskID); err != nil { + return fmt.Errorf("failed to find pipeline run for ID %s: %w", taskID.String(), err) + } + + // Update the task with result + sql = `UPDATE pipeline_task_runs SET output = $2, error = $3, finished_at = $4 WHERE id = $1` + if _, err = tx.Exec(sql, taskID, result.OutputDB(), result.ErrorDB(), time.Now()); err != nil { + return fmt.Errorf("failed to update pipeline task run: %w", err) + } + + if run.State == RunStatusSuspended { + start = true + run.State = RunStatusRunning + + sql = `UPDATE pipeline_runs SET state = $2 WHERE id = $1` + if _, err = tx.Exec(sql, run.ID, run.State); err != nil { + return fmt.Errorf("failed to update pipeline run state: %w", err) + } + } + + return loadAssociations(tx, []*Run{&run}) + }) + + return run, start, err +} + +// InsertFinishedRuns inserts all the given runs into the database. +func (o *orm) InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + err := q.Transaction(func(tx pg.Queryer) error { + pipelineRunsQuery := ` +INSERT INTO pipeline_runs + (pipeline_spec_id, meta, all_errors, fatal_errors, inputs, outputs, created_at, finished_at, state) +VALUES + (:pipeline_spec_id, :meta, :all_errors, :fatal_errors, :inputs, :outputs, :created_at, :finished_at, :state) +RETURNING id + ` + rows, errQ := tx.NamedQuery(pipelineRunsQuery, runs) + if errQ != nil { + return errors.Wrap(errQ, "inserting finished pipeline runs") + } + defer rows.Close() + + var runIDs []int64 + for rows.Next() { + var runID int64 + if errS := rows.Scan(&runID); errS != nil { + return errors.Wrap(errS, "scanning pipeline runs id row") + } + runIDs = append(runIDs, runID) + } + + pipelineSpecIDm := make(map[int32]struct{}) + for i, run := range runs { + pipelineSpecIDm[run.PipelineSpecID] = struct{}{} + for j := range run.PipelineTaskRuns { + run.PipelineTaskRuns[j].PipelineRunID = runIDs[i] + } + } + + defer func() { + for pipelineSpecID := range pipelineSpecIDm { + o.Prune(tx, pipelineSpecID) + } + }() + + pipelineTaskRunsQuery := ` +INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at, finished_at) +VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at); + ` + var pipelineTaskRuns []TaskRun + for _, run := range runs { + if !saveSuccessfulTaskRuns && !run.HasErrors() { + continue + } + pipelineTaskRuns = append(pipelineTaskRuns, run.PipelineTaskRuns...) + } + + _, errE := tx.NamedExec(pipelineTaskRunsQuery, pipelineTaskRuns) + return errors.Wrap(errE, "insert pipeline task runs") + }) + return errors.Wrap(err, "InsertFinishedRuns failed") +} + +func (o *orm) checkFinishedRun(run *Run, saveSuccessfulTaskRuns bool) error { + if run.CreatedAt.IsZero() { + return errors.New("run.CreatedAt must be set") + } + if run.FinishedAt.IsZero() { + return errors.New("run.FinishedAt must be set") + } + if run.Outputs.Val == nil || len(run.FatalErrors)+len(run.AllErrors) == 0 { + return errors.Errorf("run must have both Outputs and Errors, got Outputs: %#v, FatalErrors: %#v, AllErrors: %#v", run.Outputs.Val, run.FatalErrors, run.AllErrors) + } + if len(run.PipelineTaskRuns) == 0 && (saveSuccessfulTaskRuns || run.HasErrors()) { + return errors.New("must provide task run results") + } + return nil +} + +// InsertFinishedRun inserts the given run into the database. +// If saveSuccessfulTaskRuns = false, we only save errored runs. +// That way if the job is run frequently (such as OCR) we avoid saving a large number of successful task runs +// which do not provide much value. +func (o *orm) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) (err error) { + if err = o.checkFinishedRun(run, saveSuccessfulTaskRuns); err != nil { + return err + } + + if o.maxSuccessfulRuns == 0 { + // optimisation: avoid persisting if we oughtn't to save any + return nil + } + + q := o.q.WithOpts(qopts...) + err = q.Transaction(func(tx pg.Queryer) error { + sql := `INSERT INTO pipeline_runs (pipeline_spec_id, meta, all_errors, fatal_errors, inputs, outputs, created_at, finished_at, state) + VALUES (:pipeline_spec_id, :meta, :all_errors, :fatal_errors, :inputs, :outputs, :created_at, :finished_at, :state) + RETURNING id;` + + query, args, e := tx.BindNamed(sql, run) + if e != nil { + return errors.Wrap(e, "failed to bind") + } + + if err = tx.QueryRowx(query, args...).Scan(&run.ID); err != nil { + return errors.Wrap(err, "error inserting finished pipeline_run") + } + + // update the ID key everywhere + for i := range run.PipelineTaskRuns { + run.PipelineTaskRuns[i].PipelineRunID = run.ID + } + + if !saveSuccessfulTaskRuns && !run.HasErrors() { + return nil + } + + defer o.Prune(tx, run.PipelineSpecID) + sql = ` + INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at, finished_at) + VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at);` + _, err = tx.NamedExec(sql, run.PipelineTaskRuns) + return errors.Wrap(err, "failed to insert pipeline_task_runs") + }) + return errors.Wrap(err, "InsertFinishedRun failed") +} + +// DeleteRunsOlderThan deletes all pipeline_runs that have been finished for a certain threshold to free DB space +// Caller is expected to set timeout on calling context. +func (o *orm) DeleteRunsOlderThan(ctx context.Context, threshold time.Duration) error { + start := time.Now() + + q := o.q.WithOpts(pg.WithParentCtxInheritTimeout(ctx)) + + queryThreshold := start.Add(-threshold) + + rowsDeleted := int64(0) + + err := pg.Batch(func(_, limit uint) (count uint, err error) { + result, cancel, err := q.ExecQIter(` +WITH batched_pipeline_runs AS ( + SELECT * FROM pipeline_runs + WHERE finished_at < ($1) + ORDER BY finished_at ASC + LIMIT $2 +) +DELETE FROM pipeline_runs +USING batched_pipeline_runs +WHERE pipeline_runs.id = batched_pipeline_runs.id`, + queryThreshold, + limit, + ) + defer cancel() + if err != nil { + return count, errors.Wrap(err, "DeleteRunsOlderThan failed to delete old pipeline_runs") + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return count, errors.Wrap(err, "DeleteRunsOlderThan failed to get rows affected") + } + rowsDeleted += rowsAffected + + return uint(rowsAffected), err + }) + if err != nil { + return errors.Wrap(err, "DeleteRunsOlderThan failed") + } + + deleteTS := time.Now() + + o.lggr.Debugw("pipeline_runs reaper DELETE query completed", "rowsDeleted", rowsDeleted, "duration", deleteTS.Sub(start)) + defer func(start time.Time) { + o.lggr.Debugw("pipeline_runs reaper VACUUM ANALYZE query completed", "duration", time.Since(start)) + }(deleteTS) + + err = q.ExecQ("VACUUM ANALYZE pipeline_runs") + if err != nil { + o.lggr.Warnw("DeleteRunsOlderThan successfully deleted old pipeline_runs rows, but failed to run VACUUM ANALYZE", "err", err) + return nil + } + + return nil +} + +func (o *orm) FindRun(id int64) (r Run, err error) { + var runs []*Run + err = o.q.Transaction(func(tx pg.Queryer) error { + if err = tx.Select(&runs, `SELECT * from pipeline_runs WHERE id = $1 LIMIT 1`, id); err != nil { + return errors.Wrap(err, "failed to load runs") + } + return loadAssociations(tx, runs) + }) + if len(runs) == 0 { + return r, sql.ErrNoRows + } + return *runs[0], err +} + +func (o *orm) GetAllRuns() (runs []Run, err error) { + var runsPtrs []*Run + err = o.q.Transaction(func(tx pg.Queryer) error { + err = tx.Select(&runsPtrs, `SELECT * from pipeline_runs ORDER BY created_at ASC, id ASC`) + if err != nil { + return errors.Wrap(err, "failed to load runs") + } + + return loadAssociations(tx, runsPtrs) + }) + runs = make([]Run, len(runsPtrs)) + for i, runPtr := range runsPtrs { + runs[i] = *runPtr + } + return runs, err +} + +func (o *orm) GetUnfinishedRuns(ctx context.Context, now time.Time, fn func(run Run) error) error { + q := o.q.WithOpts(pg.WithParentCtx(ctx)) + return pg.Batch(func(offset, limit uint) (count uint, err error) { + var runs []*Run + + err = q.Transaction(func(tx pg.Queryer) error { + err = tx.Select(&runs, `SELECT * from pipeline_runs WHERE state = $1 AND created_at < $2 ORDER BY created_at ASC, id ASC OFFSET $3 LIMIT $4`, RunStatusRunning, now, offset, limit) + if err != nil { + return errors.Wrap(err, "failed to load runs") + } + + err = loadAssociations(tx, runs) + if err != nil { + return err + } + + for _, run := range runs { + if err = fn(*run); err != nil { + return err + } + } + return nil + }) + + return uint(len(runs)), err + }) +} + +// loads PipelineSpec and PipelineTaskRuns for Runs in exactly 2 queries +func loadAssociations(q pg.Queryer, runs []*Run) error { + if len(runs) == 0 { + return nil + } + var specs []Spec + pipelineSpecIDM := make(map[int32]Spec) + var pipelineSpecIDs []int32 // keyed by pipelineSpecID + pipelineRunIDs := make([]int64, len(runs)) + for i, run := range runs { + pipelineRunIDs[i] = run.ID + if _, exists := pipelineSpecIDM[run.PipelineSpecID]; !exists { + pipelineSpecIDs = append(pipelineSpecIDs, run.PipelineSpecID) + pipelineSpecIDM[run.PipelineSpecID] = Spec{} + } + } + if err := q.Select(&specs, `SELECT ps.id, ps.dot_dag_source, ps.created_at, ps.max_task_duration, coalesce(jobs.id, 0) "job_id", coalesce(jobs.name, '') "job_name", coalesce(jobs.type, '') "job_type" FROM pipeline_specs ps LEFT OUTER JOIN jobs ON jobs.pipeline_spec_id=ps.id WHERE ps.id = ANY($1)`, pipelineSpecIDs); err != nil { + return errors.Wrap(err, "failed to postload pipeline_specs for runs") + } + for _, spec := range specs { + if spec.JobType == "keeper" { + spec.DotDagSource = KeepersObservationSource + } + pipelineSpecIDM[spec.ID] = spec + } + + var taskRuns []TaskRun + taskRunPRIDM := make(map[int64][]TaskRun, len(runs)) // keyed by pipelineRunID + if err := q.Select(&taskRuns, `SELECT * FROM pipeline_task_runs WHERE pipeline_run_id = ANY($1) ORDER BY created_at ASC, id ASC`, pipelineRunIDs); err != nil { + return errors.Wrap(err, "failed to postload pipeline_task_runs for runs") + } + for _, taskRun := range taskRuns { + taskRunPRIDM[taskRun.PipelineRunID] = append(taskRunPRIDM[taskRun.PipelineRunID], taskRun) + } + + for i, run := range runs { + runs[i].PipelineSpec = pipelineSpecIDM[run.PipelineSpecID] + runs[i].PipelineTaskRuns = taskRunPRIDM[run.ID] + } + + return nil +} + +func (o *orm) GetQ() pg.Q { + return o.q +} + +func (o *orm) loadCount(pipelineSpecID int32) *atomic.Uint64 { + // fast path; avoids allocation + actual, exists := o.pm.Load(pipelineSpecID) + if exists { + return actual.(*atomic.Uint64) + } + // "slow" path + actual, _ = o.pm.LoadOrStore(pipelineSpecID, new(atomic.Uint64)) + return actual.(*atomic.Uint64) +} + +// Runs will be pruned async on a sampled basis if maxSuccessfulRuns is set to +// this value or higher +const syncLimit = 1000 + +// Prune attempts to keep the pipeline_runs table capped close to the +// maxSuccessfulRuns length for each pipeline_spec_id. +// +// It does this synchronously for small values and async/sampled for large +// values. +// +// Note this does not guarantee the pipeline_runs table is kept to exactly the +// max length, rather that it doesn't excessively larger than it. +func (o *orm) Prune(tx pg.Queryer, pipelineSpecID int32) { + if pipelineSpecID == 0 { + o.lggr.Panic("expected a non-zero pipeline spec ID") + } + // For small maxSuccessfulRuns its fast enough to prune every time + if o.maxSuccessfulRuns < syncLimit { + o.execPrune(tx, pipelineSpecID) + return + } + // for large maxSuccessfulRuns we do it async on a sampled basis + every := o.maxSuccessfulRuns / 20 // it can get up to 5% larger than maxSuccessfulRuns before a prune + cnt := o.loadCount(pipelineSpecID) + val := cnt.Add(1) + if val%every == 0 { + ok := o.IfStarted(func() { + o.wg.Add(1) + go func() { + o.lggr.Debugw("Pruning runs", "pipelineSpecID", pipelineSpecID, "count", val, "every", every, "maxSuccessfulRuns", o.maxSuccessfulRuns) + defer o.wg.Done() + // Must not use tx here since it's async and the transaction + // could be stale + o.execPrune(o.q.WithOpts(pg.WithLongQueryTimeout()), pipelineSpecID) + }() + }) + if !ok { + o.lggr.Warnw("Cannot prune: ORM is not running", "pipelineSpecID", pipelineSpecID) + return + } + } +} + +func (o *orm) execPrune(q pg.Queryer, pipelineSpecID int32) { + res, err := q.ExecContext(o.ctx, `DELETE FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2 AND id NOT IN ( +SELECT id FROM pipeline_runs +WHERE pipeline_spec_id = $1 AND state = $2 +ORDER BY id DESC +LIMIT $3 +)`, pipelineSpecID, RunStatusCompleted, o.maxSuccessfulRuns) + if err != nil { + o.lggr.Errorw("Failed to prune runs", "err", err, "pipelineSpecID", pipelineSpecID) + return + } + rowsAffected, err := res.RowsAffected() + if err != nil { + o.lggr.Errorw("Failed to get RowsAffected while pruning runs", "err", err, "pipelineSpecID", pipelineSpecID) + return + } + if rowsAffected == 0 { + // check the spec still exists and garbage collect if necessary + var exists bool + if err := q.GetContext(o.ctx, &exists, `SELECT EXISTS(SELECT * FROM pipeline_specs WHERE id = $1)`, pipelineSpecID); err != nil { + o.lggr.Errorw("Failed check existence of pipeline_spec while pruning runs", "err", err, "pipelineSpecID", pipelineSpecID) + return + } + if !exists { + o.lggr.Debugw("Pipeline spec no longer exists, removing prune count", "pipelineSpecID", pipelineSpecID) + o.pm.Delete(pipelineSpecID) + } + } else if o.maxSuccessfulRuns < syncLimit { + o.lggr.Tracew("Pruned runs", "rowsAffected", rowsAffected, "pipelineSpecID", pipelineSpecID) + } else { + o.lggr.Debugw("Pruned runs", "rowsAffected", rowsAffected, "pipelineSpecID", pipelineSpecID) + } +} diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go new file mode 100644 index 00000000..8702b033 --- /dev/null +++ b/core/services/pipeline/orm_test.go @@ -0,0 +1,764 @@ +package pipeline_test + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type ormconfig struct { + pg.QConfig +} + +func (ormconfig) JobPipelineMaxSuccessfulRuns() uint64 { return 123456 } + +func setupORM(t *testing.T, heavy bool) (db *sqlx.DB, orm pipeline.ORM) { + t.Helper() + + if heavy { + _, db = heavyweight.FullTestDBV2(t, nil) + } else { + db = pgtest.NewSqlxDB(t) + } + cfg := ormconfig{pgtest.NewQConfig(true)} + orm = pipeline.NewORM(db, logger.TestLogger(t), cfg, cfg.JobPipelineMaxSuccessfulRuns()) + + return +} + +func setupHeavyORM(t *testing.T) (db *sqlx.DB, orm pipeline.ORM) { + return setupORM(t, true) +} + +func setupLiteORM(t *testing.T) (db *sqlx.DB, orm pipeline.ORM) { + return setupORM(t, false) +} + +func Test_PipelineORM_CreateSpec(t *testing.T) { + db, orm := setupLiteORM(t) + + var ( + source = "" + maxTaskDuration = models.Interval(1 * time.Minute) + ) + + p := pipeline.Pipeline{ + Source: source, + } + + id, err := orm.CreateSpec(p, maxTaskDuration) + require.NoError(t, err) + + actual := pipeline.Spec{} + err = db.Get(&actual, "SELECT * FROM pipeline_specs WHERE pipeline_specs.id = $1", id) + require.NoError(t, err) + assert.Equal(t, source, actual.DotDagSource) + assert.Equal(t, maxTaskDuration, actual.MaxTaskDuration) +} + +func Test_PipelineORM_FindRun(t *testing.T) { + db, orm := setupLiteORM(t) + + _, err := db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`) + require.NoError(t, err) + expected := mustInsertPipelineRun(t, orm) + + run, err := orm.FindRun(expected.ID) + require.NoError(t, err) + + require.Equal(t, expected.ID, run.ID) +} + +func mustInsertPipelineRun(t *testing.T, orm pipeline.ORM) pipeline.Run { + t.Helper() + + run := pipeline.Run{ + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{}, + AllErrors: pipeline.RunErrors{}, + FatalErrors: pipeline.RunErrors{}, + FinishedAt: null.Time{}, + } + + require.NoError(t, orm.InsertRun(&run)) + return run +} + +func mustInsertAsyncRun(t *testing.T, orm pipeline.ORM) *pipeline.Run { + t.Helper() + + s := ` +ds1 [type=bridge async=true name="example-bridge" timeout=0 requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds1_parse [type=jsonparse lax=false path="data,result"] +ds1_multiply [type=multiply times=1000000000000000000] + +ds1->ds1_parse->ds1_multiply->answer1; + +answer1 [type=median index=0]; +answer2 [type=bridge name=election_winner index=1]; +` + + p, err := pipeline.Parse(s) + require.NoError(t, err) + require.NotNil(t, p) + + maxTaskDuration := models.Interval(1 * time.Minute) + specID, err := orm.CreateSpec(*p, maxTaskDuration) + require.NoError(t, err) + + run := &pipeline.Run{ + PipelineSpecID: specID, + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + } + + err = orm.CreateRun(run) + require.NoError(t, err) + return run +} + +func TestInsertFinishedRuns(t *testing.T) { + db, orm := setupLiteORM(t) + + _, err := db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`) + require.NoError(t, err) + + ps := cltest.MustInsertPipelineSpec(t, db) + + var runs []*pipeline.Run + for i := 0; i < 3; i++ { + now := time.Now() + r := pipeline.Run{ + PipelineSpecID: ps.ID, + State: pipeline.RunStatusRunning, + AllErrors: pipeline.RunErrors{}, + FatalErrors: pipeline.RunErrors{}, + CreatedAt: now, + FinishedAt: null.Time{}, + Outputs: pipeline.JSONSerializable{}, + } + + require.NoError(t, orm.InsertRun(&r)) + + r.PipelineTaskRuns = []pipeline.TaskRun{ + { + ID: uuid.New(), + PipelineRunID: r.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.TimeFrom(now.Add(100 * time.Millisecond)), + }, + { + ID: uuid.New(), + PipelineRunID: r.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now.Add(200 * time.Millisecond)), + }, + } + r.FinishedAt = null.TimeFrom(now.Add(300 * time.Millisecond)) + r.Outputs = pipeline.JSONSerializable{ + Val: "stuff", + Valid: true, + } + r.AllErrors = append(r.AllErrors, null.NewString("", false)) + r.State = pipeline.RunStatusCompleted + runs = append(runs, &r) + } + + err = orm.InsertFinishedRuns(runs, true) + require.NoError(t, err) + +} + +// Tests that inserting run results, then later updating the run results via upsert will work correctly. +func Test_PipelineORM_StoreRun_ShouldUpsert(t *testing.T) { + _, orm := setupLiteORM(t) + + run := mustInsertAsyncRun(t, orm) + + now := time.Now() + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // pending task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.Time{}, + }, + // finished task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + } + restart, err := orm.StoreRun(run) + require.NoError(t, err) + // no new data, so we don't need a restart + require.Equal(t, false, restart) + // the run is paused + require.Equal(t, pipeline.RunStatusSuspended, run.State) + + r, err := orm.FindRun(run.ID) + require.NoError(t, err) + run = &r + // this is an incomplete run, so partial results should be present (regardless of saveSuccessfulTaskRuns) + require.Equal(t, 2, len(run.PipelineTaskRuns)) + // and ds1 is not finished + task := run.ByDotID("ds1") + require.NotNil(t, task) + require.False(t, task.FinishedAt.Valid) + + // now try setting the ds1 result: call store run again + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // pending task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + Output: pipeline.JSONSerializable{Val: 2, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + } + restart, err = orm.StoreRun(run) + require.NoError(t, err) + // no new data, so we don't need a restart + require.Equal(t, false, restart) + // the run is paused + require.Equal(t, pipeline.RunStatusSuspended, run.State) + + r, err = orm.FindRun(run.ID) + require.NoError(t, err) + run = &r + // this is an incomplete run, so partial results should be present (regardless of saveSuccessfulTaskRuns) + require.Equal(t, 2, len(run.PipelineTaskRuns)) + // and ds1 is finished + task = run.ByDotID("ds1") + require.NotNil(t, task) + require.NotNil(t, task.FinishedAt) +} + +// Tests that trying to persist a partial run while new data became available (i.e. via /v2/restart) +// will detect a restart and update the result data on the Run. +func Test_PipelineORM_StoreRun_DetectsRestarts(t *testing.T) { + db, orm := setupLiteORM(t) + + run := mustInsertAsyncRun(t, orm) + + r, err := orm.FindRun(run.ID) + require.NoError(t, err) + require.Equal(t, run.Inputs, r.Inputs) + + now := time.Now() + + ds1_id := uuid.New() + + // insert something for this pipeline_run to trigger an early resume while the pipeline is running + rows, err := db.NamedQuery(` + INSERT INTO pipeline_task_runs (pipeline_run_id, id, type, index, output, error, dot_id, created_at, finished_at) + VALUES (:pipeline_run_id, :id, :type, :index, :output, :error, :dot_id, :created_at, :finished_at) + `, pipeline.TaskRun{ + ID: ds1_id, + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + Output: pipeline.JSONSerializable{Val: 2, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }) + require.NoError(t, err) + t.Cleanup(func() { assert.NoError(t, rows.Close()) }) + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // pending task + { + ID: ds1_id, + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.Time{}, + }, + // finished task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + } + + restart, err := orm.StoreRun(run) + require.NoError(t, err) + // new data available! immediately restart the run + require.Equal(t, true, restart) + // the run is still in progress + require.Equal(t, pipeline.RunStatusRunning, run.State) + + // confirm we now contain the latest restart data merged with local task data + ds1 := run.ByDotID("ds1") + require.Equal(t, ds1.Output.Val, int64(2)) + require.True(t, ds1.FinishedAt.Valid) + +} + +func Test_PipelineORM_StoreRun_UpdateTaskRunResult(t *testing.T) { + _, orm := setupLiteORM(t) + + run := mustInsertAsyncRun(t, orm) + + ds1_id := uuid.New() + now := time.Now() + address, err := hex.DecodeString("0x8bd112d3f8f92e41c861939545ad387307af9703") + require.NoError(t, err) + cborOutput := map[string]interface{}{ + "blockNum": "0x13babbd", + "confirmations": int64(10), + "contractAddress": address, + "libraryVersion": int64(1), + "remoteChainId": int64(106), + } + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // pending task + { + ID: ds1_id, + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.Time{}, + }, + // finished task with json output + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "cbor_parse", + DotID: "ds2", + Output: pipeline.JSONSerializable{Val: cborOutput, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + // finished task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + } + // assert that run should be in "running" state + require.Equal(t, pipeline.RunStatusRunning, run.State) + + // Now store a partial run + restart, err := orm.StoreRun(run) + require.NoError(t, err) + require.False(t, restart) + // assert that run should be in "paused" state + require.Equal(t, pipeline.RunStatusSuspended, run.State) + + r, start, err := orm.UpdateTaskRunResult(ds1_id, pipeline.Result{Value: "foo"}) + run = &r + require.NoError(t, err) + assert.Greater(t, run.ID, int64(0)) + assert.Greater(t, run.PipelineSpec.ID, int32(0)) // Make sure it actually loaded everything + + require.Len(t, run.PipelineTaskRuns, 3) + // assert that run should be in "running" state + require.Equal(t, pipeline.RunStatusRunning, run.State) + // assert that we get the start signal + require.True(t, start) + + // assert that the task is now updated + task := run.ByDotID("ds1") + require.True(t, task.FinishedAt.Valid) + require.Equal(t, pipeline.JSONSerializable{Val: "foo", Valid: true}, task.Output) + + // assert correct task run serialization + task2 := run.ByDotID("ds2") + cborOutput["contractAddress"] = "0x8bd112d3f8f92e41c861939545ad387307af9703" + require.Equal(t, pipeline.JSONSerializable{Val: cborOutput, Valid: true}, task2.Output) +} + +func Test_PipelineORM_DeleteRun(t *testing.T) { + _, orm := setupLiteORM(t) + + run := mustInsertAsyncRun(t, orm) + + now := time.Now() + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // pending task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "bridge", + DotID: "ds1", + CreatedAt: now, + FinishedAt: null.Time{}, + }, + // finished task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now), + }, + } + restart, err := orm.StoreRun(run) + require.NoError(t, err) + // no new data, so we don't need a restart + require.Equal(t, false, restart) + // the run is paused + require.Equal(t, pipeline.RunStatusSuspended, run.State) + + err = orm.DeleteRun(run.ID) + require.NoError(t, err) + + _, err = orm.FindRun(run.ID) + require.Error(t, err, "not found") +} + +func Test_PipelineORM_DeleteRunsOlderThan(t *testing.T) { + _, orm := setupHeavyORM(t) + + var runsIds []int64 + + for i := 1; i <= 2000; i++ { + run := mustInsertAsyncRun(t, orm) + + now := time.Now() + + run.PipelineTaskRuns = []pipeline.TaskRun{ + // finished task + { + ID: uuid.New(), + PipelineRunID: run.ID, + Type: "median", + DotID: "answer2", + Output: pipeline.JSONSerializable{Val: 1, Valid: true}, + CreatedAt: now, + FinishedAt: null.TimeFrom(now.Add(-1 * time.Second)), + }, + } + run.State = pipeline.RunStatusCompleted + run.FinishedAt = null.TimeFrom(now.Add(-1 * time.Second)) + run.Outputs = pipeline.JSONSerializable{Val: 1, Valid: true} + run.AllErrors = pipeline.RunErrors{null.StringFrom("SOMETHING")} + + restart, err := orm.StoreRun(run) + assert.NoError(t, err) + // no new data, so we don't need a restart + assert.Equal(t, false, restart) + + runsIds = append(runsIds, run.ID) + } + + err := orm.DeleteRunsOlderThan(testutils.Context(t), 1*time.Second) + assert.NoError(t, err) + + for _, runId := range runsIds { + _, err := orm.FindRun(runId) + require.Error(t, err, "not found") + } +} + +func Test_GetUnfinishedRuns_Keepers(t *testing.T) { + t.Parallel() + + // The test configures single Keeper job with two running tasks. + // GetUnfinishedRuns() expects to catch both running tasks. + + config := configtest.NewTestGeneralConfig(t) + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + porm := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgeORM := bridges.NewORM(db, lggr, config.Database()) + + jorm := job.NewORM(db, porm, bridgeORM, keyStore, lggr, config.Database()) + defer func() { assert.NoError(t, jorm.Close()) }() + + timestamp := time.Now() + var keeperJob = job.Job{ + ID: 1, + KeeperSpec: &job.KeeperSpec{ + ContractAddress: cltest.NewEIP55Address(), + FromAddress: cltest.NewEIP55Address(), + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: (*big.Big)(&cltest.FixtureChainID), + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + Type: job.Keeper, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + } + + err := jorm.CreateJob(&keeperJob) + require.NoError(t, err) + require.Equal(t, job.Keeper, keeperJob.Type) + + runID1 := uuid.New() + runID2 := uuid.New() + + err = porm.CreateRun(&pipeline.Run{ + PipelineSpecID: keeperJob.PipelineSpecID, + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + PipelineTaskRuns: []pipeline.TaskRun{{ + ID: runID1, + Type: pipeline.TaskTypeETHTx, + Index: 0, + Output: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + DotID: "perform_upkeep_tx", + }}, + }) + require.NoError(t, err) + + err = porm.CreateRun(&pipeline.Run{ + PipelineSpecID: keeperJob.PipelineSpecID, + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + PipelineTaskRuns: []pipeline.TaskRun{{ + ID: runID2, + Type: pipeline.TaskTypeETHCall, + Index: 1, + Output: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + DotID: "check_upkeep_tx", + }}, + }) + require.NoError(t, err) + + var counter int + + err = porm.GetUnfinishedRuns(testutils.Context(t), time.Now(), func(run pipeline.Run) error { + counter++ + + require.Equal(t, job.Keeper.String(), run.PipelineSpec.JobType) + require.Equal(t, pipeline.KeepersObservationSource, run.PipelineSpec.DotDagSource) + require.NotEmpty(t, run.PipelineTaskRuns) + + switch run.PipelineTaskRuns[0].ID { + case runID1: + trun := run.ByDotID("perform_upkeep_tx") + require.NotNil(t, trun) + case runID2: + trun := run.ByDotID("check_upkeep_tx") + require.NotNil(t, trun) + } + + return nil + }) + require.NoError(t, err) + require.Equal(t, 2, counter) +} + +func Test_GetUnfinishedRuns_DirectRequest(t *testing.T) { + t.Parallel() + + // The test configures single DR job with two task runs: one is running and one is suspended. + // GetUnfinishedRuns() expects to catch the one that is running. + + config := configtest.NewTestGeneralConfig(t) + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + keyStore := cltest.NewKeyStore(t, db, config.Database()) + porm := pipeline.NewORM(db, lggr, config.Database(), config.JobPipeline().MaxSuccessfulRuns()) + bridgeORM := bridges.NewORM(db, lggr, config.Database()) + + jorm := job.NewORM(db, porm, bridgeORM, keyStore, lggr, config.Database()) + defer func() { assert.NoError(t, jorm.Close()) }() + + timestamp := time.Now() + var drJob = job.Job{ + ID: 1, + DirectRequestSpec: &job.DirectRequestSpec{ + ContractAddress: cltest.NewEIP55Address(), + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: (*big.Big)(&cltest.FixtureChainID), + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: `ds1 [type=http method=GET url="https://pricesource1.com"`, + }, + Type: job.DirectRequest, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + } + + err := jorm.CreateJob(&drJob) + require.NoError(t, err) + require.Equal(t, job.DirectRequest, drJob.Type) + + runningID := uuid.New() + + err = porm.CreateRun(&pipeline.Run{ + PipelineSpecID: drJob.PipelineSpecID, + State: pipeline.RunStatusRunning, + Outputs: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + PipelineTaskRuns: []pipeline.TaskRun{{ + ID: runningID, + Type: pipeline.TaskTypeHTTP, + Index: 0, + Output: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + DotID: "ds1", + }}, + }) + require.NoError(t, err) + + err = porm.CreateRun(&pipeline.Run{ + PipelineSpecID: drJob.PipelineSpecID, + State: pipeline.RunStatusSuspended, + Outputs: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + PipelineTaskRuns: []pipeline.TaskRun{{ + ID: uuid.New(), + Type: pipeline.TaskTypeHTTP, + Index: 1, + Output: pipeline.JSONSerializable{}, + CreatedAt: time.Now(), + DotID: "ds1", + }}, + }) + require.NoError(t, err) + + var counter int + + err = porm.GetUnfinishedRuns(testutils.Context(t), time.Now(), func(run pipeline.Run) error { + counter++ + + require.Equal(t, job.DirectRequest.String(), run.PipelineSpec.JobType) + require.NotEmpty(t, run.PipelineTaskRuns) + require.Equal(t, runningID, run.PipelineTaskRuns[0].ID) + + trun := run.ByDotID("ds1") + require.NotNil(t, trun) + + return nil + }) + require.NoError(t, err) + require.Equal(t, 1, counter) +} + +func Test_Prune(t *testing.T) { + t.Parallel() + + n := uint64(2) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.MaxSuccessfulRuns = &n + }) + lggr, observed := logger.TestLoggerObserved(t, zapcore.DebugLevel) + db := pgtest.NewSqlxDB(t) + porm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + + ps1 := cltest.MustInsertPipelineSpec(t, db) + + t.Run("when there are no runs to prune, does nothing", func(t *testing.T) { + porm.Prune(db, ps1.ID) + + // no error logs; it did nothing + assert.Empty(t, observed.All()) + }) + + // ps1 has: + // - 20 completed runs + for i := 0; i < 20; i++ { + cltest.MustInsertPipelineRunWithStatus(t, db, ps1.ID, pipeline.RunStatusCompleted) + } + + ps2 := cltest.MustInsertPipelineSpec(t, db) + + // ps2 has: + // - 12 completed runs + // - 3 errored runs + // - 3 running run + // - 3 suspended run + for i := 0; i < 12; i++ { + cltest.MustInsertPipelineRunWithStatus(t, db, ps2.ID, pipeline.RunStatusCompleted) + } + for i := 0; i < 3; i++ { + cltest.MustInsertPipelineRunWithStatus(t, db, ps2.ID, pipeline.RunStatusErrored) + } + for i := 0; i < 3; i++ { + cltest.MustInsertPipelineRunWithStatus(t, db, ps2.ID, pipeline.RunStatusRunning) + } + for i := 0; i < 3; i++ { + cltest.MustInsertPipelineRunWithStatus(t, db, ps2.ID, pipeline.RunStatusSuspended) + } + + porm.Prune(db, ps2.ID) + + cnt := pgtest.MustCount(t, db, "SELECT count(*) FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2", ps1.ID, pipeline.RunStatusCompleted) + assert.Equal(t, cnt, 20) + + cnt = pgtest.MustCount(t, db, "SELECT count(*) FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2", ps2.ID, pipeline.RunStatusCompleted) + assert.Equal(t, 2, cnt) + cnt = pgtest.MustCount(t, db, "SELECT count(*) FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2", ps2.ID, pipeline.RunStatusErrored) + assert.Equal(t, 3, cnt) + cnt = pgtest.MustCount(t, db, "SELECT count(*) FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2", ps2.ID, pipeline.RunStatusRunning) + assert.Equal(t, 3, cnt) + cnt = pgtest.MustCount(t, db, "SELECT count(*) FROM pipeline_runs WHERE pipeline_spec_id = $1 AND state = $2", ps2.ID, pipeline.RunStatusSuspended) + assert.Equal(t, 3, cnt) +} diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go new file mode 100644 index 00000000..bf1c1330 --- /dev/null +++ b/core/services/pipeline/runner.go @@ -0,0 +1,720 @@ +package pipeline + +import ( + "context" + "fmt" + "net/http" + "sort" + "sync" + "time" + + "github.com/google/uuid" + pkgerrors "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/services" + commonutils "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/recovery" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +//go:generate mockery --quiet --name Runner --output ./mocks/ --case=underscore + +type Runner interface { + services.Service + + // Run is a blocking call that will execute the run until no further progress can be made. + // If `incomplete` is true, the run is only partially complete and is suspended, awaiting to be resumed when more data comes in. + // Note that `saveSuccessfulTaskRuns` value is ignored if the run contains async tasks. + Run(ctx context.Context, run *Run, l logger.Logger, saveSuccessfulTaskRuns bool, fn func(tx pg.Queryer) error) (incomplete bool, err error) + ResumeRun(taskID uuid.UUID, value interface{}, err error) error + + // ExecuteRun executes a new run in-memory according to a spec and returns the results. + // We expect spec.JobID and spec.JobName to be set for logging/prometheus. + ExecuteRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger) (run *Run, trrs TaskRunResults, err error) + // InsertFinishedRun saves the run results in the database. + InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error + InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error + + // ExecuteAndInsertFinishedRun executes a new run in-memory according to a spec, persists and saves the results. + // It is a combination of ExecuteRun and InsertFinishedRun. + // Note that the spec MUST have a DOT graph for this to work. + ExecuteAndInsertFinishedRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger, saveSuccessfulTaskRuns bool) (runID int64, finalResult FinalResult, err error) + + OnRunFinished(func(*Run)) +} + +type runner struct { + services.StateMachine + orm ORM + btORM bridges.ORM + config Config + bridgeConfig BridgeConfig + legacyEVMChains legacyevm.LegacyChainContainer + ethKeyStore ETHKeyStore + vrfKeyStore VRFKeyStore + runReaperWorker *commonutils.SleeperTask + lggr logger.Logger + httpClient *http.Client + unrestrictedHTTPClient *http.Client + + // test helper + runFinished func(*Run) + + chStop services.StopChan + wgDone sync.WaitGroup +} + +var ( + // PromPipelineTaskExecutionTime reports how long each pipeline task took to execute + // TODO: Make private again after + // https://app.clubhouse.io/pluginlabs/story/6065/hook-keeper-up-to-use-tasks-in-the-pipeline + PromPipelineTaskExecutionTime = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pipeline_task_execution_time", + Help: "How long each pipeline task took to execute", + }, + []string{"job_id", "job_name", "task_id", "task_type"}, + ) + PromPipelineRunErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pipeline_run_errors", + Help: "Number of errors for each pipeline spec", + }, + []string{"job_id", "job_name"}, + ) + PromPipelineRunTotalTimeToCompletion = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pipeline_run_total_time_to_completion", + Help: "How long each pipeline run took to finish (from the moment it was created)", + }, + []string{"job_id", "job_name"}, + ) + PromPipelineTasksTotalFinished = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "pipeline_tasks_total_finished", + Help: "The total number of pipeline tasks which have finished", + }, + []string{"job_id", "job_name", "task_id", "task_type", "bridge_name", "status"}, + ) +) + +func NewRunner(orm ORM, btORM bridges.ORM, cfg Config, bridgeCfg BridgeConfig, legacyChains legacyevm.LegacyChainContainer, ethks ETHKeyStore, vrfks VRFKeyStore, lggr logger.Logger, httpClient, unrestrictedHTTPClient *http.Client) *runner { + r := &runner{ + orm: orm, + btORM: btORM, + config: cfg, + bridgeConfig: bridgeCfg, + legacyEVMChains: legacyChains, + ethKeyStore: ethks, + vrfKeyStore: vrfks, + chStop: make(chan struct{}), + wgDone: sync.WaitGroup{}, + runFinished: func(*Run) {}, + lggr: lggr.Named("PipelineRunner"), + httpClient: httpClient, + unrestrictedHTTPClient: unrestrictedHTTPClient, + } + r.runReaperWorker = commonutils.NewSleeperTask( + commonutils.SleeperFuncTask(r.runReaper, "PipelineRunnerReaper"), + ) + return r +} + +// Start starts Runner. +func (r *runner) Start(context.Context) error { + return r.StartOnce("PipelineRunner", func() error { + r.wgDone.Add(1) + go r.scheduleUnfinishedRuns() + if r.config.ReaperInterval() != time.Duration(0) { + r.wgDone.Add(1) + go r.runReaperLoop() + } + return nil + }) +} + +func (r *runner) Close() error { + return r.StopOnce("PipelineRunner", func() error { + close(r.chStop) + r.wgDone.Wait() + return nil + }) +} + +func (r *runner) Name() string { + return r.lggr.Name() +} + +func (r *runner) HealthReport() map[string]error { + return map[string]error{r.Name(): r.Healthy()} +} + +func (r *runner) destroy() { + err := r.runReaperWorker.Stop() + if err != nil { + r.lggr.Error(err) + } +} + +func (r *runner) runReaperLoop() { + defer r.wgDone.Done() + defer r.destroy() + if r.config.ReaperInterval() == 0 { + return + } + + runReaperTicker := time.NewTicker(utils.WithJitter(r.config.ReaperInterval())) + defer runReaperTicker.Stop() + for { + select { + case <-r.chStop: + return + case <-runReaperTicker.C: + r.runReaperWorker.WakeUp() + runReaperTicker.Reset(utils.WithJitter(r.config.ReaperInterval())) + } + } +} + +type memoryTaskRun struct { + task Task + inputs []Result // sorted by input index + vars Vars + attempts uint +} + +// When a task panics, we catch the panic and wrap it in an error for reporting to the scheduler. +type ErrRunPanicked struct { + v interface{} +} + +func (err ErrRunPanicked) Error() string { + return fmt.Sprintf("goroutine panicked when executing run: %v", err.v) +} + +func NewRun(spec Spec, vars Vars) *Run { + return &Run{ + State: RunStatusRunning, + PipelineSpec: spec, + PipelineSpecID: spec.ID, + Inputs: JSONSerializable{Val: vars.vars, Valid: true}, + Outputs: JSONSerializable{Val: nil, Valid: false}, + CreatedAt: time.Now(), + } +} + +func (r *runner) OnRunFinished(fn func(*Run)) { + r.runFinished = fn +} + +// github.com/goplugin/libocr/offchainreporting2plus/internal/protocol.ReportingPluginTimeoutWarningGracePeriod +var overtime = 100 * time.Millisecond + +func init() { + // undocumented escape hatch + if v := env.PipelineOvertime.Get(); v != "" { + d, err := time.ParseDuration(v) + if err == nil { + overtime = d + } + } +} + +func (r *runner) ExecuteRun( + ctx context.Context, + spec Spec, + vars Vars, + l logger.Logger, +) (*Run, TaskRunResults, error) { + // Pipeline runs may return results after the context is cancelled, so we modify the + // deadline to give them time to return before the parent context deadline. + var cancel func() + ctx, cancel = commonutils.ContextWithDeadlineFn(ctx, func(orig time.Time) time.Time { + if tenPct := time.Until(orig) / 10; overtime > tenPct { + return orig.Add(-tenPct) + } + return orig.Add(-overtime) + }) + defer cancel() + + var pipeline *Pipeline + if spec.Pipeline != nil { + // assume if set that it has been pre-initialized + pipeline = spec.Pipeline + } else { + var err error + pipeline, err = r.InitializePipeline(spec) + if err != nil { + return nil, nil, err + } + } + + run := NewRun(spec, vars) + taskRunResults := r.run(ctx, pipeline, run, vars, l) + + if run.Pending { + return run, nil, fmt.Errorf("unexpected async run for spec ID %v, tried executing via ExecuteRun", spec.ID) + } + + return run, taskRunResults, nil +} + +func (r *runner) InitializePipeline(spec Spec) (pipeline *Pipeline, err error) { + pipeline, err = spec.GetOrParsePipeline() + if err != nil { + return + } + + // initialize certain task params + for _, task := range pipeline.Tasks { + task.Base().uuid = uuid.New() + + switch task.Type() { + case TaskTypeHTTP: + task.(*HTTPTask).config = r.config + task.(*HTTPTask).httpClient = r.httpClient + task.(*HTTPTask).unrestrictedHTTPClient = r.unrestrictedHTTPClient + case TaskTypeBridge: + task.(*BridgeTask).config = r.config + task.(*BridgeTask).bridgeConfig = r.bridgeConfig + task.(*BridgeTask).orm = r.btORM + task.(*BridgeTask).specId = spec.ID + // URL is "safe" because it comes from the node's own database. We + // must use the unrestrictedHTTPClient because some node operators + // may run external adapters on their own hardware + task.(*BridgeTask).httpClient = r.unrestrictedHTTPClient + case TaskTypeETHCall: + task.(*ETHCallTask).legacyChains = r.legacyEVMChains + task.(*ETHCallTask).config = r.config + task.(*ETHCallTask).specGasLimit = spec.GasLimit + task.(*ETHCallTask).jobType = spec.JobType + case TaskTypeVRF: + task.(*VRFTask).keyStore = r.vrfKeyStore + case TaskTypeVRFV2: + task.(*VRFTaskV2).keyStore = r.vrfKeyStore + case TaskTypeVRFV2Plus: + task.(*VRFTaskV2Plus).keyStore = r.vrfKeyStore + case TaskTypeEstimateGasLimit: + task.(*EstimateGasLimitTask).legacyChains = r.legacyEVMChains + task.(*EstimateGasLimitTask).specGasLimit = spec.GasLimit + task.(*EstimateGasLimitTask).jobType = spec.JobType + case TaskTypeETHTx: + task.(*ETHTxTask).keyStore = r.ethKeyStore + task.(*ETHTxTask).legacyChains = r.legacyEVMChains + task.(*ETHTxTask).specGasLimit = spec.GasLimit + task.(*ETHTxTask).jobType = spec.JobType + task.(*ETHTxTask).forwardingAllowed = spec.ForwardingAllowed + default: + } + } + + return pipeline, nil +} + +func (r *runner) run(ctx context.Context, pipeline *Pipeline, run *Run, vars Vars, l logger.Logger) TaskRunResults { + l = l.With("jobID", run.PipelineSpec.JobID, "jobName", run.PipelineSpec.JobName) + l.Debug("Initiating tasks for pipeline run of spec") + + scheduler := newScheduler(pipeline, run, vars, l) + go scheduler.Run() + + // This is "just in case" for cleaning up any stray reports. + // Normally the scheduler loop doesn't stop until all in progress runs report back + reportCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if pipelineTimeout := r.config.MaxRunDuration(); pipelineTimeout != 0 { + ctx, cancel = context.WithTimeout(ctx, pipelineTimeout) + defer cancel() + } + + for taskRun := range scheduler.taskCh { + taskRun := taskRun + // execute + go recovery.WrapRecoverHandle(l, func() { + result := r.executeTaskRun(ctx, run.PipelineSpec, taskRun, l) + + logTaskRunToPrometheus(result, run.PipelineSpec) + + scheduler.report(reportCtx, result) + }, func(err interface{}) { + t := time.Now() + scheduler.report(reportCtx, TaskRunResult{ + ID: uuid.New(), + Task: taskRun.task, + Result: Result{Error: ErrRunPanicked{err}}, + FinishedAt: null.TimeFrom(t), + CreatedAt: t, // TODO: more accurate start time + }) + }) + } + + // if the run is suspended, awaiting resumption + run.Pending = scheduler.pending + // scheduler.exiting = we had an error and the task was marked to failEarly + run.FailSilently = scheduler.exiting + run.State = RunStatusSuspended + + if !scheduler.pending { + run.FinishedAt = null.TimeFrom(time.Now()) + + // NOTE: runTime can be very long now because it'll include suspend + runTime := run.FinishedAt.Time.Sub(run.CreatedAt) + l.Debugw("Finished all tasks for pipeline run", "specID", run.PipelineSpecID, "runTime", runTime) + PromPipelineRunTotalTimeToCompletion.WithLabelValues(fmt.Sprintf("%d", run.PipelineSpec.JobID), run.PipelineSpec.JobName).Set(float64(runTime)) + } + + // Update run results + run.PipelineTaskRuns = nil + for _, result := range scheduler.results { + output := result.Result.OutputDB() + run.PipelineTaskRuns = append(run.PipelineTaskRuns, TaskRun{ + ID: result.ID, + PipelineRunID: run.ID, + Type: result.Task.Type(), + Index: result.Task.OutputIndex(), + Output: output, + Error: result.Result.ErrorDB(), + DotID: result.Task.DotID(), + CreatedAt: result.CreatedAt, + FinishedAt: result.FinishedAt, + task: result.Task, + }) + + sort.Slice(run.PipelineTaskRuns, func(i, j int) bool { + return run.PipelineTaskRuns[i].task.OutputIndex() < run.PipelineTaskRuns[j].task.OutputIndex() + }) + } + + // Update run errors/outputs + if run.FinishedAt.Valid { + var errors []null.String + var fatalErrors []null.String + var outputs []interface{} + for _, result := range run.PipelineTaskRuns { + if result.Error.Valid { + errors = append(errors, result.Error) + } + // skip non-terminal results + if len(result.task.Outputs()) != 0 { + continue + } + fatalErrors = append(fatalErrors, result.Error) + outputs = append(outputs, result.Output.Val) + } + run.AllErrors = errors + run.FatalErrors = fatalErrors + run.Outputs = JSONSerializable{Val: outputs, Valid: true} + + if run.HasFatalErrors() { + run.State = RunStatusErrored + PromPipelineRunErrors.WithLabelValues(fmt.Sprintf("%d", run.PipelineSpec.JobID), run.PipelineSpec.JobName).Inc() + } else { + run.State = RunStatusCompleted + } + } + + // TODO: drop this once we stop using TaskRunResults + var taskRunResults TaskRunResults + for _, result := range scheduler.results { + taskRunResults = append(taskRunResults, result) + } + + var idxs []int32 + for i := range taskRunResults { + idxs = append(idxs, taskRunResults[i].Task.OutputIndex()) + } + // Ensure that task run results are ordered by their output index + sort.SliceStable(taskRunResults, func(i, j int) bool { + return taskRunResults[i].Task.OutputIndex() < taskRunResults[j].Task.OutputIndex() + }) + for i := range taskRunResults { + idxs[i] = taskRunResults[i].Task.OutputIndex() + } + + return taskRunResults +} + +func (r *runner) executeTaskRun(ctx context.Context, spec Spec, taskRun *memoryTaskRun, l logger.Logger) TaskRunResult { + start := time.Now() + l = l.With("taskName", taskRun.task.DotID(), + "taskType", taskRun.task.Type(), + "attempt", taskRun.attempts) + + // Task timeout will be whichever of the following timesout/cancels first: + // - Pipeline-level timeout + // - Specific task timeout (task.TaskTimeout) + // - Job level task timeout (spec.MaxTaskDuration) + // - Passed in context + + // CAUTION: Think twice before changing any of the context handling code + // below. It has already been changed several times trying to "fix" a bug, + // but actually introducing new ones. Please leave it as-is unless you have + // an extremely good reason to change it. + ctx, cancel := r.chStop.Ctx(ctx) + defer cancel() + if taskTimeout, isSet := taskRun.task.TaskTimeout(); isSet && taskTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, taskTimeout) + defer cancel() + } + if spec.MaxTaskDuration != models.Interval(time.Duration(0)) { + ctx, cancel = context.WithTimeout(ctx, time.Duration(spec.MaxTaskDuration)) + defer cancel() + } + + result, runInfo := taskRun.task.Run(ctx, l, taskRun.vars, taskRun.inputs) + loggerFields := []interface{}{"runInfo", runInfo, + "resultValue", result.Value, + "resultError", result.Error, + "resultType", fmt.Sprintf("%T", result.Value), + } + switch v := result.Value.(type) { + case []byte: + loggerFields = append(loggerFields, "resultString", fmt.Sprintf("%q", v)) + loggerFields = append(loggerFields, "resultHex", fmt.Sprintf("%x", v)) + } + l.Tracew("Pipeline task completed", loggerFields...) + + now := time.Now() + + var finishedAt null.Time + if !runInfo.IsPending { + finishedAt = null.TimeFrom(now) + } + return TaskRunResult{ + ID: taskRun.task.Base().uuid, + Task: taskRun.task, + Result: result, + CreatedAt: start, + FinishedAt: finishedAt, + runInfo: runInfo, + } +} + +func logTaskRunToPrometheus(trr TaskRunResult, spec Spec) { + elapsed := trr.FinishedAt.Time.Sub(trr.CreatedAt) + + PromPipelineTaskExecutionTime.WithLabelValues(fmt.Sprintf("%d", spec.JobID), spec.JobName, trr.Task.DotID(), string(trr.Task.Type())).Set(float64(elapsed)) + var status string + if trr.Result.Error != nil { + status = "error" + } else { + status = "completed" + } + + bridgeName := "" + if bridgeTask, ok := trr.Task.(*BridgeTask); ok { + bridgeName = bridgeTask.Name + } + + PromPipelineTasksTotalFinished.WithLabelValues(fmt.Sprintf("%d", spec.JobID), spec.JobName, trr.Task.DotID(), string(trr.Task.Type()), bridgeName, status).Inc() +} + +// ExecuteAndInsertFinishedRun executes a run in memory then inserts the finished run/task run records, returning the final result +func (r *runner) ExecuteAndInsertFinishedRun(ctx context.Context, spec Spec, vars Vars, l logger.Logger, saveSuccessfulTaskRuns bool) (runID int64, finalResult FinalResult, err error) { + run, trrs, err := r.ExecuteRun(ctx, spec, vars, l) + if err != nil { + return 0, finalResult, pkgerrors.Wrapf(err, "error executing run for spec ID %v", spec.ID) + } + + finalResult = trrs.FinalResult(l) + + // don't insert if we exited early + if run.FailSilently { + return 0, finalResult, nil + } + + if err = r.orm.InsertFinishedRun(run, saveSuccessfulTaskRuns); err != nil { + return 0, finalResult, pkgerrors.Wrapf(err, "error inserting finished results for spec ID %v", spec.ID) + } + return run.ID, finalResult, nil + +} + +func (r *runner) Run(ctx context.Context, run *Run, l logger.Logger, saveSuccessfulTaskRuns bool, fn func(tx pg.Queryer) error) (incomplete bool, err error) { + pipeline, err := r.InitializePipeline(run.PipelineSpec) + if err != nil { + return false, err + } + + // retain old UUID values + for _, taskRun := range run.PipelineTaskRuns { + task := pipeline.ByDotID(taskRun.DotID) + if task == nil || task.Base() == nil { + return false, pkgerrors.Errorf("failed to match a pipeline task for dot ID: %v", taskRun.DotID) + } + task.Base().uuid = taskRun.ID + } + + preinsert := pipeline.RequiresPreInsert() + + q := r.orm.GetQ().WithOpts(pg.WithParentCtx(ctx)) + err = q.Transaction(func(tx pg.Queryer) error { + // OPTIMISATION: avoid an extra db write if there is no async tasks present or if this is a resumed run + if preinsert && run.ID == 0 { + now := time.Now() + // initialize certain task params + for _, task := range pipeline.Tasks { + switch task.Type() { + case TaskTypeETHTx: + run.PipelineTaskRuns = append(run.PipelineTaskRuns, TaskRun{ + ID: task.Base().uuid, + PipelineRunID: run.ID, + Type: task.Type(), + Index: task.OutputIndex(), + DotID: task.DotID(), + CreatedAt: now, + }) + default: + } + } + if err = r.orm.CreateRun(run, pg.WithQueryer(tx)); err != nil { + return err + } + } + + if fn != nil { + return fn(tx) + } + return nil + }) + if err != nil { + return false, err + } + + for { + r.run(ctx, pipeline, run, NewVarsFrom(run.Inputs.Val.(map[string]interface{})), l) + + if preinsert { + // FailSilently = run failed and task was marked failEarly. skip StoreRun and instead delete all trace of it + if run.FailSilently { + if err = r.orm.DeleteRun(run.ID); err != nil { + return false, pkgerrors.Wrap(err, "Run") + } + return false, nil + } + + var restart bool + restart, err = r.orm.StoreRun(run) + if err != nil { + return false, pkgerrors.Wrapf(err, "error storing run for spec ID %v state %v outputs %v errors %v finished_at %v", + run.PipelineSpec.ID, run.State, run.Outputs, run.FatalErrors, run.FinishedAt) + } + + if restart { + // instant restart: new data is already available in the database + continue + } + } else { + if run.Pending { + return false, pkgerrors.Wrapf(err, "a run without async returned as pending") + } + // don't insert if we exited early + if run.FailSilently { + return false, nil + } + + if err = r.orm.InsertFinishedRun(run, saveSuccessfulTaskRuns, pg.WithParentCtx(ctx)); err != nil { + return false, pkgerrors.Wrapf(err, "error storing run for spec ID %v", run.PipelineSpec.ID) + } + } + + r.runFinished(run) + + return run.Pending, err + } +} + +func (r *runner) ResumeRun(taskID uuid.UUID, value interface{}, err error) error { + run, start, err := r.orm.UpdateTaskRunResult(taskID, Result{ + Value: value, + Error: err, + }) + if err != nil { + return fmt.Errorf("failed to update task run result: %w", err) + } + + // TODO: Should probably replace this with a listener to update events + // which allows to pass in a transactionalised database to this function + if start { + // start the runner again + go func() { + if _, err := r.Run(context.Background(), &run, r.lggr, false, nil); err != nil { + r.lggr.Errorw("Resume run failure", "err", err) + } + r.lggr.Debug("Resume run success") + }() + } + return nil +} + +func (r *runner) InsertFinishedRun(run *Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + return r.orm.InsertFinishedRun(run, saveSuccessfulTaskRuns, qopts...) +} + +func (r *runner) InsertFinishedRuns(runs []*Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + return r.orm.InsertFinishedRuns(runs, saveSuccessfulTaskRuns, qopts...) +} + +func (r *runner) runReaper() { + r.lggr.Debugw("Pipeline run reaper starting") + ctx, cancel := r.chStop.CtxCancel(context.WithTimeout(context.Background(), r.config.ReaperInterval())) + defer cancel() + + err := r.orm.DeleteRunsOlderThan(ctx, r.config.ReaperThreshold()) + if err != nil { + r.lggr.Errorw("Pipeline run reaper failed", "err", err) + r.SvcErrBuffer.Append(err) + } else { + r.lggr.Debugw("Pipeline run reaper completed successfully") + } +} + +// init task: Searches the database for runs stuck in the 'running' state while the node was previously killed. +// We pick up those runs and resume execution. +func (r *runner) scheduleUnfinishedRuns() { + defer r.wgDone.Done() + + // limit using a createdAt < now() @ start of run to prevent executing new jobs + now := time.Now() + + if r.config.ReaperInterval() > time.Duration(0) { + // immediately run reaper so we don't consider runs that are too old + r.runReaper() + } + + ctx, cancel := r.chStop.NewCtx() + defer cancel() + + var wgRunsDone sync.WaitGroup + err := r.orm.GetUnfinishedRuns(ctx, now, func(run Run) error { + wgRunsDone.Add(1) + + go func() { + defer wgRunsDone.Done() + + _, err := r.Run(ctx, &run, r.lggr, false, nil) + if ctx.Err() != nil { + return + } else if err != nil { + r.lggr.Errorw("Pipeline run init job resumption failed", "err", err) + } + }() + + return nil + }) + + wgRunsDone.Wait() + + if ctx.Err() != nil { + return + } else if err != nil { + r.lggr.Errorw("Pipeline run init job failed", "err", err) + } +} diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go new file mode 100644 index 00000000..4ce42c9c --- /dev/null +++ b/core/services/pipeline/runner_test.go @@ -0,0 +1,984 @@ +package pipeline_test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + bridgesMocks "github.com/goplugin/pluginv3.0/v2/core/bridges/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/jmoiron/sqlx" +) + +func newRunner(t testing.TB, db *sqlx.DB, bridgeORM bridges.ORM, cfg plugin.GeneralConfig) (pipeline.Runner, *mocks.ORM) { + lggr := logger.TestLogger(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + orm := mocks.NewORM(t) + q := pg.NewQ(db, lggr, cfg.Database()) + + orm.On("GetQ").Return(q).Maybe() + c := clhttptest.NewTestLocalOnlyHTTPClient() + r := pipeline.NewRunner(orm, bridgeORM, cfg.JobPipeline(), cfg.WebServer(), legacyChains, ethKeyStore, nil, logger.TestLogger(t), c, c) + return r, orm +} + +func Test_PipelineRunner_ExecuteTaskRuns(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + btcUSDPairing := utils.MustUnmarshalToMap(`{"data":{"coin":"BTC","market":"USD"}}`) + + // 1. Setup bridge + s1 := httptest.NewServer(fakePriceResponder(t, btcUSDPairing, decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + bridgeFeedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + _, bt := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: bridgeFeedURL.String()}, cfg.Database()) + + btORM := bridgesMocks.NewORM(t) + btORM.On("FindBridge", bt.Name).Return(*bt, nil).Once() + + // 2. Setup success HTTP + s2 := httptest.NewServer(fakePriceResponder(t, btcUSDPairing, decimal.NewFromInt(9600), "", nil)) + defer s2.Close() + + s4 := httptest.NewServer(fakeStringResponder(t, "foo-index-1")) + defer s4.Close() + s5 := httptest.NewServer(fakeStringResponder(t, "bar-index-2")) + defer s5.Close() + + r, _ := newRunner(t, db, btORM, cfg) + + s := fmt.Sprintf(` +ds1 [type=bridge name="%s" timeout=0 requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds1_parse [type=jsonparse lax=false path="data,result"] +ds1_multiply [type=multiply times=1000000000000000000] + +ds2 [type=http method="GET" url="%s" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds2_parse [type=jsonparse lax=false path="data,result"] +ds2_multiply [type=multiply times=1000000000000000000] + +ds3 [type=http method="GET" url="blah://test.invalid" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds3_parse [type=jsonparse lax=false path="data,result"] +ds3_multiply [type=multiply times=1000000000000000000] + +ds1->ds1_parse->ds1_multiply->median; +ds2->ds2_parse->ds2_multiply->median; +ds3->ds3_parse->ds3_multiply->median; + +median [type=median index=0] +ds4 [type=http method="GET" url="%s" index=1] +ds5 [type=http method="GET" url="%s" index=2] +`, bt.Name.String(), s2.URL, s4.URL, s5.URL) + d, err := pipeline.Parse(s) + require.NoError(t, err) + + spec := pipeline.Spec{DotDagSource: s} + vars := pipeline.NewVarsFrom(nil) + + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), spec, vars, lggr) + require.NoError(t, err) + require.Len(t, trrs, len(d.Tasks)) + + finalResults := trrs.FinalResult(lggr) + require.Len(t, finalResults.Values, 3) + require.Len(t, finalResults.AllErrors, 12) + require.Len(t, finalResults.FatalErrors, 3) + assert.Equal(t, "9650000000000000000000", finalResults.Values[0].(decimal.Decimal).String()) + assert.Nil(t, finalResults.FatalErrors[0]) + assert.Equal(t, "foo-index-1", finalResults.Values[1].(string)) + assert.Nil(t, finalResults.FatalErrors[1]) + assert.Equal(t, "bar-index-2", finalResults.Values[2].(string)) + assert.Nil(t, finalResults.FatalErrors[2]) + + var errorResults []pipeline.TaskRunResult + for _, trr := range trrs { + if trr.Result.Error != nil && !trr.IsTerminal() { + errorResults = append(errorResults, trr) + } + } + // There are three tasks in the erroring pipeline + require.Len(t, errorResults, 3) +} + +type taskRunWithVars struct { + bridgeName string + ds2URL, ds4URL string + submitBridgeName string + includeInputAtKey string +} + +func (t taskRunWithVars) String() string { + return fmt.Sprintf(` + ds1 [type=bridge name="%s" timeout=0 requestData=<{"data": $(foo)}>] + ds1_parse [type=jsonparse lax=false path="data,result" data="$(ds1)"] + ds1_multiply [type=multiply input="$(ds1_parse.result)" times="$(ds1_parse.times)"] + + ds2 [type=http method="POST" url="%s" requestData=<{"data": [ $(bar), $(baz) ]}>] + ds2_parse [type=jsonparse lax=false path="data" data="$(ds2)"] + ds2_multiply [type=multiply input="$(ds2_parse.result)" times="$(ds2_parse.times)"] + + ds3 [type=http method="POST" url="blah://test.invalid" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] + ds3_parse [type=jsonparse lax=false path="data,result" data="$(ds3)"] + ds3_multiply [type=multiply input="$(ds3_parse.value)" times="$(ds3_parse.times)"] + + ds1->ds1_parse->ds1_multiply->median; + ds2->ds2_parse->ds2_multiply->median; + ds3->ds3_parse->ds3_multiply->median; + + median [type=median values=<[ $(ds1_multiply), $(ds2_multiply), $(ds3_multiply) ]> index=0] + ds4 [type=http method="GET" url="%s" index=1] + + submit [type=bridge name="%s" + includeInputAtKey="%s" + requestData=<{ + "median": $(median), + "fetchedValues": [ $(ds1_parse.result), $(ds2_parse.result) ], + "someString": $(ds4) + }>] + + median -> submit; + ds4 -> submit; + `, t.bridgeName, t.ds2URL, t.ds4URL, t.submitBridgeName, t.includeInputAtKey) +} + +func Test_PipelineRunner_ExecuteTaskRunsWithVars(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + vars map[string]interface{} + meta map[string]interface{} + includeInputAtKey string + }{ + { + name: "meta + includeInputAtKey", + vars: map[string]interface{}{ + "foo": []interface{}{float64(123), "plugin"}, + "bar": float64(123.45), + "baz": "such oracle", + }, + meta: map[string]interface{}{"roundID": float64(456), "latestAnswer": float64(654)}, + includeInputAtKey: "sergey", + }, + { + name: "includeInputAtKey", + vars: map[string]interface{}{ + "foo": *mustDecimal(t, "42.1337"), + "bar": map[string]interface{}{"steve": "plugin"}, + "baz": true, + }, + includeInputAtKey: "best oracles", + }, + { + name: "meta", + vars: map[string]interface{}{ + "foo": []interface{}{"asdf", float64(123)}, + "bar": false, + "baz": *mustDecimal(t, "42.1337"), + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + expectedRequestDS1 := map[string]interface{}{"data": test.vars["foo"]} + expectedRequestDS2 := map[string]interface{}{"data": []interface{}{test.vars["bar"], test.vars["baz"]}} + expectedRequestSubmit := map[string]interface{}{ + "median": "9650000000000000000000", + "fetchedValues": []interface{}{"9700", "9600"}, + "someString": "some random string", + } + if test.meta != nil { + expectedRequestDS1["meta"] = test.meta + expectedRequestSubmit["meta"] = test.meta + test.vars["jobRun"] = map[string]interface{}{"meta": test.meta} + } + if test.includeInputAtKey != "" { + expectedRequestSubmit[test.includeInputAtKey] = "9650000000000000000000" + } + + btORM := bridgesMocks.NewORM(t) + + // 1. Setup bridge + ds1, bridge := makeBridge(t, db, expectedRequestDS1, map[string]interface{}{ + "data": map[string]interface{}{ + "result": map[string]interface{}{ + "result": decimal.NewFromInt(9700), + "times": "1000000000000000000", + }, + }, + }, + cfg.Database()) + defer ds1.Close() + + btORM.On("FindBridge", bridge.Name).Return(bridge, nil).Once() + + // 2. Setup success HTTP + ds2 := httptest.NewServer(fakeExternalAdapter(t, expectedRequestDS2, map[string]interface{}{ + "data": map[string]interface{}{ + "result": decimal.NewFromInt(9600), + "times": "1000000000000000000", + }, + })) + defer ds2.Close() + + ds4 := httptest.NewServer(fakeStringResponder(t, "some random string")) + defer ds4.Close() + + // 3. Setup final bridge task + submit, submitBt := makeBridge(t, db, expectedRequestSubmit, map[string]interface{}{"ok": true}, cfg.Database()) + defer submit.Close() + + btORM.On("FindBridge", submitBt.Name).Return(submitBt, nil).Once() + + runner, _ := newRunner(t, db, btORM, cfg) + specStr := taskRunWithVars{ + bridgeName: bridge.Name.String(), + ds2URL: ds2.URL, + ds4URL: ds4.URL, + submitBridgeName: submitBt.Name.String(), + includeInputAtKey: test.includeInputAtKey, + }.String() + p, err := pipeline.Parse(specStr) + require.NoError(t, err) + + spec := pipeline.Spec{ + DotDagSource: specStr, + } + _, taskRunResults, err := runner.ExecuteRun(testutils.Context(t), spec, pipeline.NewVarsFrom(test.vars), logger.TestLogger(t)) + require.NoError(t, err) + require.Len(t, taskRunResults, len(p.Tasks)) + + expectedResults := map[string]pipeline.Result{ + "ds1": {Value: `{"data":{"result":{"result":"9700","times":"1000000000000000000"}}}` + "\n"}, + "ds1_parse": {Value: map[string]interface{}{"result": "9700", "times": "1000000000000000000"}}, + "ds1_multiply": {Value: *mustDecimal(t, "9700000000000000000000")}, + "ds2": {Value: `{"data":{"result":"9600","times":"1000000000000000000"}}` + "\n"}, + "ds2_parse": {Value: map[string]interface{}{"result": "9600", "times": "1000000000000000000"}}, + "ds2_multiply": {Value: *mustDecimal(t, "9600000000000000000000")}, + "ds3": {Error: errors.New(`error making http request: Post "blah://test.invalid": unsupported protocol scheme "blah"`)}, + "ds3_parse": {Error: pipeline.ErrTooManyErrors}, + "ds3_multiply": {Error: pipeline.ErrTooManyErrors}, + "ds4": {Value: "some random string"}, + "median": {Value: *mustDecimal(t, "9650000000000000000000")}, + "submit": {Value: `{"ok":true}` + "\n"}, + } + + for _, r := range taskRunResults { + expected := expectedResults[r.Task.DotID()] + if expected.Error != nil { + require.Error(t, r.Result.Error) + require.Contains(t, r.Result.Error.Error(), expected.Error.Error()) + } else { + if d, is := expected.Value.(decimal.Decimal); is { + require.Equal(t, d.String(), r.Result.Value.(decimal.Decimal).String()) + } else { + require.Equal(t, expected.Value, r.Result.Value) + } + } + } + }) + } +} + +const ( + CBORDietEmpty = ` +decode_log [type="ethabidecodelog" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)" + abi="OracleRequest(address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes cborPayload)"] + +decode_cbor [type="cborparse" + data="$(decode_log.cborPayload)" + mode=diet] + +decode_log -> decode_cbor; +` + CBORStdString = ` +decode_log [type="ethabidecodelog" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)" + abi="OracleRequest(address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes cborPayload)"] + +decode_cbor [type="cborparse" + data="$(decode_log.cborPayload)" + mode=standard] + +decode_log -> decode_cbor; +` +) + +func Test_PipelineRunner_CBORParse(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + + t.Run("diet mode, empty CBOR", func(t *testing.T) { + s := CBORDietEmpty + d, err := pipeline.Parse(s) + require.NoError(t, err) + + spec := pipeline.Spec{DotDagSource: s} + global := make(map[string]interface{}) + jobRun := make(map[string]interface{}) + global["jobRun"] = jobRun + jobRun["logData"] = hexutil.MustDecode("0x0000000000000000000000009c26cc46f57667cba75556014c8e0d5ed7c5b83d17a526ff5d8f916fa2f4a218f6ce0a6e410a0d7823f8238979f8579c2145fd6f0000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000009c26cc46f57667cba75556014c8e0d5ed7c5b83d64ef935700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006148ef28000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000") + jobRun["logTopics"] = []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + common.HexToHash("0x3963386131316165393962363463373161663333376235643831633737353230"), + } + vars := pipeline.NewVarsFrom(global) + + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), spec, vars, lggr) + require.NoError(t, err) + require.Len(t, trrs, len(d.Tasks)) + + finalResults := trrs.FinalResult(lggr) + require.Len(t, finalResults.Values, 1) + assert.Equal(t, make(map[string]interface{}), finalResults.Values[0]) + require.Len(t, finalResults.FatalErrors, 1) + assert.Nil(t, finalResults.FatalErrors[0]) + }) + + t.Run("standard mode, string value", func(t *testing.T) { + s := CBORStdString + d, err := pipeline.Parse(s) + require.NoError(t, err) + + spec := pipeline.Spec{DotDagSource: s} + global := make(map[string]interface{}) + jobRun := make(map[string]interface{}) + global["jobRun"] = jobRun + jobRun["logData"] = hexutil.MustDecode("0x0000000000000000000000009c26cc46f57667cba75556014c8e0d5ed7c5b83d17a526ff5d8f916fa2f4a218f6ce0a6e410a0d7823f8238979f8579c2145fd6f0000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000009c26cc46f57667cba75556014c8e0d5ed7c5b83d64ef935700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006148ef2800000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000463666F6F00000000000000000000000000000000000000000000000000000000") + jobRun["logTopics"] = []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + common.HexToHash("0x3963386131316165393962363463373161663333376235643831633737353230"), + } + vars := pipeline.NewVarsFrom(global) + + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), spec, vars, lggr) + require.NoError(t, err) + require.Len(t, trrs, len(d.Tasks)) + + finalResults := trrs.FinalResult(lggr) + require.Len(t, finalResults.Values, 1) + assert.Equal(t, "foo", finalResults.Values[0]) + require.Len(t, finalResults.FatalErrors, 1) + assert.Nil(t, finalResults.FatalErrors[0]) + }) +} + +func Test_PipelineRunner_HandleFaults(t *testing.T) { + // We want to test the scenario where one or multiple APIs time out, + // but a sufficient number of them still complete within the desired time frame + // and so we can still obtain a median. + db := pgtest.NewSqlxDB(t) + orm := mocks.NewORM(t) + q := pg.NewQ(db, logger.TestLogger(t), configtest.NewTestGeneralConfig(t).Database()) + + orm.On("GetQ").Return(q).Maybe() + m1 := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(100 * time.Millisecond) + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"result":10}`)) + assert.NoError(t, err) + })) + m2 := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + _, err := res.Write([]byte(`{"result":11}`)) + assert.NoError(t, err) + })) + s := fmt.Sprintf(` +ds1 [type=http url="%s"]; +ds1_parse [type=jsonparse path="result"]; +ds1_multiply [type=multiply times=100]; + +ds2 [type=http url="%s"]; +ds2_parse [type=jsonparse path="result"]; +ds2_multiply [type=multiply times=100]; + +ds1 -> ds1_parse -> ds1_multiply -> answer1; +ds2 -> ds2_parse -> ds2_multiply -> answer1; + +answer1 [type=median index=0]; +`, m1.URL, m2.URL) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + + // If we cancel before an API is finished, we should still get a median. + ctx, cancel := context.WithTimeout(testutils.Context(t), 50*time.Millisecond) + defer cancel() + + spec := pipeline.Spec{DotDagSource: s} + vars := pipeline.NewVarsFrom(nil) + + _, trrs, err := r.ExecuteRun(ctx, spec, vars, logger.TestLogger(t)) + require.NoError(t, err) + for _, trr := range trrs { + if trr.IsTerminal() { + require.Equal(t, decimal.RequireFromString("1100"), trr.Result.Value.(decimal.Decimal)) + } + } +} + +func Test_PipelineRunner_HandleFaultsPersistRun(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := mocks.NewORM(t) + btORM := bridgesMocks.NewORM(t) + q := pg.NewQ(db, logger.TestLogger(t), configtest.NewTestGeneralConfig(t).Database()) + orm.On("GetQ").Return(q).Maybe() + orm.On("InsertFinishedRun", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + args.Get(0).(*pipeline.Run).ID = 1 + }). + Return(nil) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + lggr := logger.TestLogger(t) + r := pipeline.NewRunner(orm, btORM, cfg.JobPipeline(), cfg.WebServer(), legacyChains, ethKeyStore, nil, lggr, nil, nil) + + spec := pipeline.Spec{DotDagSource: ` +fail_but_i_dont_care [type=fail] +succeed1 [type=memo value=10] +succeed2 [type=memo value=11] +final [type=mean] + +fail_but_i_dont_care -> final; +succeed1 -> final; +succeed2 -> final; +`} + vars := pipeline.NewVarsFrom(nil) + + _, finalResult, err := r.ExecuteAndInsertFinishedRun(testutils.Context(t), spec, vars, lggr, false) + require.NoError(t, err) + assert.True(t, finalResult.HasErrors()) + assert.False(t, finalResult.HasFatalErrors()) + require.Len(t, finalResult.Values, 1) + assert.Equal(t, "10.5", finalResult.Values[0].(decimal.Decimal).String()) +} + +func Test_PipelineRunner_MultipleOutputs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + input := map[string]interface{}{"val": 2} + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=multiply input="$(val)" times=2] +b1 [type=multiply input="$(a)" times=2] +b2 [type=multiply input="$(a)" times=3] +c [type=median values=<[ $(b1), $(b2) ]> index=0] +a->b1->c; +a->b2->c;`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 4, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + // a = 4 + // (b1 = 8) + (b2 = 12) + // c = 20 / 2 + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, mustDecimal(t, "10").String(), result.Value.(decimal.Decimal).String()) +} + +func Test_PipelineRunner_MultipleTerminatingOutputs(t *testing.T) { + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, pgtest.NewSqlxDB(t), btORM, cfg) + input := map[string]interface{}{"val": 2} + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=multiply input="$(val)" times=2] +b1 [type=multiply input="$(a)" times=2 index=0] +b2 [type=multiply input="$(a)" times=3 index=1] +a->b1; +a->b2;`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 3, len(trrs)) + result := trrs.FinalResult(lggr) + assert.Equal(t, false, result.HasFatalErrors()) + + assert.Equal(t, mustDecimal(t, "8").String(), result.Values[0].(decimal.Decimal).String()) + assert.Equal(t, mustDecimal(t, "12").String(), result.Values[1].(decimal.Decimal).String()) +} + +func Test_PipelineRunner_AsyncJob_Basic(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + btcUSDPairing := utils.MustUnmarshalToMap(`{"data":{"coin":"BTC","market":"USD"}}`) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody adapterRequest + payload, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + err = json.Unmarshal(payload, &reqBody) + require.NoError(t, err) + // TODO: assert finding the id + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Plugin-Pending", "true") + response := map[string]interface{}{} + require.NoError(t, json.NewEncoder(w).Encode(response)) + + }) + + // 1. Setup bridge + s1 := httptest.NewServer(handler) + defer s1.Close() + + bridgeFeedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + cfg := configtest.NewTestGeneralConfig(t) + _, bt := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: bridgeFeedURL.String()}, cfg.Database()) + + // 2. Setup success HTTP + s2 := httptest.NewServer(fakePriceResponder(t, btcUSDPairing, decimal.NewFromInt(9600), "", nil)) + defer s2.Close() + + s4 := httptest.NewServer(fakeStringResponder(t, "foo-index-1")) + defer s4.Close() + s5 := httptest.NewServer(fakeStringResponder(t, "bar-index-2")) + defer s5.Close() + + btORM := bridgesMocks.NewORM(t) + btORM.On("FindBridge", bt.Name).Return(*bt, nil) + r, orm := newRunner(t, db, btORM, cfg) + + s := fmt.Sprintf(` +ds1 [type=bridge async=true name="%s" timeout=0 requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds1_parse [type=jsonparse lax=false path="data,result"] +ds1_multiply [type=multiply times=1000000000000000000] + +ds2 [type=http method="GET" url="%s" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds2_parse [type=jsonparse lax=false path="data,result"] +ds2_multiply [type=multiply times=1000000000000000000] + +ds3 [type=http method="GET" url="blah://test.invalid" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds3_parse [type=jsonparse lax=false path="data,result"] +ds3_multiply [type=multiply times=1000000000000000000] + +ds1->ds1_parse->ds1_multiply->median; +ds2->ds2_parse->ds2_multiply->median; +ds3->ds3_parse->ds3_multiply->median; + +median [type=median index=0] +ds4 [type=http method="GET" url="%s" index=1] +ds5 [type=http method="GET" url="%s" index=2] +`, bt.Name.String(), s2.URL, s4.URL, s5.URL) + _, err = pipeline.Parse(s) + require.NoError(t, err) + + spec := pipeline.Spec{DotDagSource: s} + + // Start a new run + run := pipeline.NewRun(spec, pipeline.NewVarsFrom(nil)) + // we should receive a call to CreateRun because it's contains an async task + orm.On("CreateRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(nil).Run(func(args mock.Arguments) { + run := args.Get(0).(*pipeline.Run) + run.ID = 1 // give it a valid "id" + }).Once() + orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() + lggr := logger.TestLogger(t) + incomplete, err := r.Run(testutils.Context(t), run, lggr, false, nil) + require.NoError(t, err) + require.Len(t, run.PipelineTaskRuns, 9) // 3 tasks are suspended: ds1_parse, ds1_multiply, median. ds1 is present, but contains ErrPending + require.Equal(t, true, incomplete) // still incomplete + + // TODO: test a pending run that's not marked async=true, that is not allowed + + // Trigger run resumption with no new data + orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run")).Return(false, nil).Once() + incomplete, err = r.Run(testutils.Context(t), run, lggr, false, nil) + require.NoError(t, err) + require.Equal(t, true, incomplete) // still incomplete + + // Now simulate a new result coming in + task := run.ByDotID("ds1") + task.Error = null.NewString("", false) + task.Output = pipeline.JSONSerializable{ + Val: `{"data":{"result":"9700"}}` + "\n", + Valid: true, + } + // Trigger run resumption + orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() + incomplete, err = r.Run(testutils.Context(t), run, lggr, false, nil) + require.NoError(t, err) + require.Equal(t, false, incomplete) // done + require.Len(t, run.PipelineTaskRuns, 12) + require.Equal(t, false, incomplete) // run is complete + + require.Len(t, run.Outputs.Val, 3) + require.Len(t, run.FatalErrors, 3) + outputs := run.Outputs.Val.([]interface{}) + assert.Equal(t, "9650000000000000000000", outputs[0].(decimal.Decimal).String()) + assert.True(t, run.FatalErrors[0].IsZero()) + assert.Equal(t, "foo-index-1", outputs[1].(string)) + assert.True(t, run.FatalErrors[1].IsZero()) + assert.Equal(t, "bar-index-2", outputs[2].(string)) + assert.True(t, run.FatalErrors[2].IsZero()) + + var errorResults []pipeline.TaskRun + for _, trr := range run.PipelineTaskRuns { + if trr.Result().Error != nil { + errorResults = append(errorResults, trr) + } + } + // There are three tasks in the erroring pipeline + require.Len(t, errorResults, 3) +} + +func Test_PipelineRunner_AsyncJob_InstantRestart(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + btcUSDPairing := utils.MustUnmarshalToMap(`{"data":{"coin":"BTC","market":"USD"}}`) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody adapterRequest + payload, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + err = json.Unmarshal(payload, &reqBody) + require.NoError(t, err) + require.Contains(t, reqBody.ResponseURL, "http://localhost:6688/v2/resume/") + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Plugin-Pending", "true") + response := map[string]interface{}{} + require.NoError(t, json.NewEncoder(w).Encode(response)) + + }) + + // 1. Setup bridge + s1 := httptest.NewServer(handler) + defer s1.Close() + + bridgeFeedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + cfg := configtest.NewTestGeneralConfig(t) + _, bt := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: bridgeFeedURL.String()}, cfg.Database()) + + // 2. Setup success HTTP + s2 := httptest.NewServer(fakePriceResponder(t, btcUSDPairing, decimal.NewFromInt(9600), "", nil)) + defer s2.Close() + + s4 := httptest.NewServer(fakeStringResponder(t, "foo-index-1")) + defer s4.Close() + s5 := httptest.NewServer(fakeStringResponder(t, "bar-index-2")) + defer s5.Close() + + btORM := bridgesMocks.NewORM(t) + btORM.On("FindBridge", bt.Name).Return(*bt, nil) + + r, orm := newRunner(t, db, btORM, cfg) + + s := fmt.Sprintf(` +ds1 [type=bridge async=true name="%s" timeout=0 requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds1_parse [type=jsonparse lax=false path="data,result"] +ds1_multiply [type=multiply times=1000000000000000000] + +ds2 [type=http method="GET" url="%s" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds2_parse [type=jsonparse lax=false path="data,result"] +ds2_multiply [type=multiply times=1000000000000000000] + +ds3 [type=http method="GET" url="blah://test.invalid" requestData=<{"data": {"coin": "BTC", "market": "USD"}}>] +ds3_parse [type=jsonparse lax=false path="data,result"] +ds3_multiply [type=multiply times=1000000000000000000] + +ds1->ds1_parse->ds1_multiply->median; +ds2->ds2_parse->ds2_multiply->median; +ds3->ds3_parse->ds3_multiply->median; + +median [type=median index=0] +ds4 [type=http method="GET" url="%s" index=1] +ds5 [type=http method="GET" url="%s" index=2] +`, bt.Name.String(), s2.URL, s4.URL, s5.URL) + _, err = pipeline.Parse(s) + require.NoError(t, err) + + spec := pipeline.Spec{DotDagSource: s} + + // Start a new run + run := pipeline.NewRun(spec, pipeline.NewVarsFrom(nil)) + // we should receive a call to CreateRun because it's contains an async task + orm.On("CreateRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(nil).Run(func(args mock.Arguments) { + run := args.Get(0).(*pipeline.Run) + run.ID = 1 // give it a valid "id" + }).Once() + // Simulate updated task run data + orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(true, nil).Run(func(args mock.Arguments) { + run := args.Get(0).(*pipeline.Run) + // Now simulate a new result coming in while we were running + task := run.ByDotID("ds1") + task.Error = null.NewString("", false) + task.Output = pipeline.JSONSerializable{ + Val: `{"data":{"result":"9700"}}` + "\n", + Valid: true, + } + }).Once() + // StoreRun is called again to store the final result + orm.On("StoreRun", mock.AnythingOfType("*pipeline.Run"), mock.Anything).Return(false, nil).Once() + incomplete, err := r.Run(testutils.Context(t), run, logger.TestLogger(t), false, nil) + require.NoError(t, err) + require.Len(t, run.PipelineTaskRuns, 12) + require.Equal(t, false, incomplete) // run is complete + + require.Len(t, run.Outputs.Val, 3) + require.Len(t, run.FatalErrors, 3) + outputs := run.Outputs.Val.([]interface{}) + assert.Equal(t, "9650000000000000000000", outputs[0].(decimal.Decimal).String()) + assert.True(t, run.FatalErrors[0].IsZero()) + assert.Equal(t, "foo-index-1", outputs[1].(string)) + assert.True(t, run.FatalErrors[1].IsZero()) + assert.Equal(t, "bar-index-2", outputs[2].(string)) + assert.True(t, run.FatalErrors[2].IsZero()) + + var errorResults []pipeline.TaskRun + for _, trr := range run.PipelineTaskRuns { + if trr.Result().Error != nil { + errorResults = append(errorResults, trr) + } + } + // There are three tasks in the erroring pipeline + require.Len(t, errorResults, 3) +} + +func Test_PipelineRunner_LowercaseOutputs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + input := map[string]interface{}{ + "first": "camelCase", + "second": "UPPERCASE", + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=lowercase input="$(first)"] +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 1, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, "camelcase", result.Value.(string)) +} + +func Test_PipelineRunner_UppercaseOutputs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + input := map[string]interface{}{ + "first": "somerAnDomTEST", + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=uppercase input="$(first)"] +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 1, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, "SOMERANDOMTEST", result.Value.(string)) +} + +func Test_PipelineRunner_HexDecodeOutputs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + input := map[string]interface{}{ + "astring": "0x12345678", + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=hexdecode input="$(astring)"] +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 1, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, []byte{0x12, 0x34, 0x56, 0x78}, result.Value) +} + +func Test_PipelineRunner_HexEncodeAndDecode(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + inputBytes := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit") + input := map[string]interface{}{ + "input_val": inputBytes, + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +en [type=hexencode input="$(input_val)"] +de [type=hexdecode] +en->de +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 2, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, inputBytes, result.Value) +} + +func Test_PipelineRunner_Base64DecodeOutputs(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + input := map[string]interface{}{ + "astring": "SGVsbG8sIHBsYXlncm91bmQ=", + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +a [type=base64decode input="$(astring)"] +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 1, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, []byte("Hello, playground"), result.Value) +} + +func Test_PipelineRunner_Base64EncodeAndDecode(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + btORM := bridgesMocks.NewORM(t) + r, _ := newRunner(t, db, btORM, cfg) + inputBytes := []byte("[{\"add\": \"weather\", \"during\": true}, 1478647067]") + input := map[string]interface{}{ + "input_val": inputBytes, + } + lggr := logger.TestLogger(t) + _, trrs, err := r.ExecuteRun(testutils.Context(t), pipeline.Spec{ + DotDagSource: ` +en [type=base64encode input="$(input_val)"] +de [type=base64decode] +en->de +`, + }, pipeline.NewVarsFrom(input), lggr) + require.NoError(t, err) + require.Equal(t, 2, len(trrs)) + assert.Equal(t, false, trrs.FinalResult(lggr).HasFatalErrors()) + + result, err := trrs.FinalResult(lggr).SingularResult() + require.NoError(t, err) + assert.Equal(t, inputBytes, result.Value) +} + +func Test_PipelineRunner_ExecuteRun(t *testing.T) { + t.Run("uses cached *Pipeline if available", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, KeyStore: ethKeyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + lggr := logger.TestLogger(t) + r := pipeline.NewRunner(nil, nil, cfg.JobPipeline(), cfg.WebServer(), legacyChains, ethKeyStore, nil, lggr, nil, nil) + + template := ` +succeed [type=memo value=%d] +succeed; +` + + spec := pipeline.Spec{DotDagSource: fmt.Sprintf(template, 1)} + vars := pipeline.NewVarsFrom(nil) + + _, trrs, err := r.ExecuteRun(testutils.Context(t), spec, vars, lggr) + require.NoError(t, err) + require.Len(t, trrs, 1) + assert.Equal(t, "1", trrs[0].Result.Value.(pipeline.ObjectParam).DecimalValue.Decimal().String()) + + // does not automatically cache + require.Nil(t, spec.Pipeline) + + // initialize it + spec.Pipeline, err = spec.ParsePipeline() + require.NoError(t, err) + + // even though this is set to 2, it should use the cached version + spec.DotDagSource = fmt.Sprintf(template, 2) + + _, trrs, err = r.ExecuteRun(testutils.Context(t), spec, vars, lggr) + require.NoError(t, err) + require.Len(t, trrs, 1) + assert.Equal(t, "1", trrs[0].Result.Value.(pipeline.ObjectParam).DecimalValue.Decimal().String()) + }) +} diff --git a/core/services/pipeline/scheduler.go b/core/services/pipeline/scheduler.go new file mode 100644 index 00000000..0032d827 --- /dev/null +++ b/core/services/pipeline/scheduler.go @@ -0,0 +1,298 @@ +package pipeline + +import ( + "context" + "sort" + "time" + + "github.com/jpillora/backoff" + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func (s *scheduler) newMemoryTaskRun(task Task, vars Vars) *memoryTaskRun { + run := &memoryTaskRun{task: task, vars: vars} + + propagatableInputs := 0 + for _, i := range task.Inputs() { + if i.PropagateResult { + propagatableInputs++ + } + } + // fill in the inputs, fast path for no inputs + if propagatableInputs != 0 { + // construct a list of inputs, sorted by OutputIndex + type input struct { + index int32 + result Result + } + inputs := make([]input, 0, propagatableInputs) + // NOTE: we could just allocate via make, then assign directly to run.inputs[i.OutputIndex()] + // if we're confident that indices are within range + for _, i := range task.Inputs() { + if i.PropagateResult { + inputs = append(inputs, input{index: i.InputTask.OutputIndex(), result: s.results[i.InputTask.ID()].Result}) + } + } + sort.Slice(inputs, func(i, j int) bool { + return inputs[i].index < inputs[j].index + }) + run.inputs = make([]Result, len(inputs)) + for i, input := range inputs { + run.inputs[i] = input.result + } + } + + return run +} + +type scheduler struct { + pipeline *Pipeline + run *Run + dependencies map[int]uint + waiting uint + results map[int]TaskRunResult + vars Vars + logger logger.Logger + + pending bool + exiting bool + + taskCh chan *memoryTaskRun + resultCh chan TaskRunResult +} + +func newScheduler(p *Pipeline, run *Run, vars Vars, lggr logger.Logger) *scheduler { + lggr = lggr.Named("Scheduler") + dependencies := make(map[int]uint, len(p.Tasks)) + + for id, task := range p.Tasks { + dependencies[id] = uint(len(task.Inputs())) + } + + s := &scheduler{ + pipeline: p, + run: run, + dependencies: dependencies, + results: make(map[int]TaskRunResult, len(p.Tasks)), + vars: vars, + logger: lggr, + + // taskCh should never block + taskCh: make(chan *memoryTaskRun, len(dependencies)), + resultCh: make(chan TaskRunResult), + } + + // if there's results already present on Run, then this is a resumption. Loop over them and fill results table + s.reconstructResults() + + // immediately schedule all doable tasks + for id, task := range p.Tasks { + // skip tasks that are not ready + if s.dependencies[id] != 0 { + continue + } + + // skip finished tasks + if _, exists := s.results[id]; exists { + continue + } + + run := s.newMemoryTaskRun(task, s.vars.Copy()) + + lggr.Tracew("scheduling task run", "dot_id", task.DotID(), "attempts", run.attempts) + + s.taskCh <- run + s.waiting++ + } + + return s +} + +func (s *scheduler) reconstructResults() { + // if there's results already present on Run, then this is a resumption. Loop over them and fill results table + for _, r := range s.run.PipelineTaskRuns { + task := s.pipeline.ByDotID(r.DotID) + + if task == nil { + panic("can't find task by dot id") + } + + if r.IsPending() { + continue + } + + result := Result{} + + if r.Error.Valid { + result.Error = errors.New(r.Error.String) + } + + if r.Output.Valid { + result.Value = r.Output.Val + } + + s.results[task.ID()] = TaskRunResult{ + Task: task, + Result: result, + CreatedAt: r.CreatedAt, + FinishedAt: r.FinishedAt, + } + + // store the result in vars + var err error + if result.Error != nil { + err = s.vars.Set(task.DotID(), result.Error) + } else { + err = s.vars.Set(task.DotID(), result.Value) + } + if err != nil { + s.logger.Panicf("Vars.Set error: %v", err) + } + + // mark all outputs as complete + for _, output := range task.Outputs() { + id := output.ID() + s.dependencies[id]-- + } + } +} + +func (s *scheduler) Run() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for s.waiting > 0 { + // we don't "for result in resultCh" because it would stall if the + // pipeline is completely empty + + result := <-s.resultCh + // TODO: if for some reason the cleanup didn't succeed and we're stuck waiting for reports forever + // we should be able to timeout and finish shutting down + // See: https://app.shortcut.com/pluginlabs/story/21225/straighten-out-and-clarify-context-usage-in-the-pipeline + + s.waiting-- + + // retrieve previous attempt count + result.Attempts = s.results[result.Task.ID()].Attempts + + // only count as an attempt if the job actually ran. If we're exiting then it got cancelled + if !s.exiting { + result.Attempts++ + } + + // store task run + s.results[result.Task.ID()] = result + + // catch the pending state, we will keep the pipeline running until no more progress is made + if result.runInfo.IsPending { + s.pending = true + + // skip output wrangling because this task isn't actually complete yet + continue + } + + // store the result in vars + var err error + if result.Result.Error != nil { + err = s.vars.Set(result.Task.DotID(), result.Result.Error) + } else { + err = s.vars.Set(result.Task.DotID(), result.Result.Value) + } + if err != nil { + s.logger.Panicf("Vars.Set error: %v", err) + } + + // if the task was marked as failEarly, and the result is a fail + if result.Result.Error != nil && result.Task.Base().FailEarly { + // drain remaining jobs (continue the loop until waiting = 0) then exit + s.exiting = true + cancel() // cleanup: terminate pending retries + + // mark remaining jobs as cancelled + s.markRemaining(ErrCancelled) + } + + if s.exiting { + // skip scheduling dependencies if we're exiting early + continue + } + + // if task hasn't reached it's max retry count yet, we schedule it again + if result.Attempts < uint(result.Task.TaskRetries()) && result.Result.Error != nil { + // we immediately increase the in-flight counter so the pipeline doesn't terminate + // while we wait for the next retry + s.waiting++ + + backoff := backoff.Backoff{ + Factor: 2, + Min: result.Task.TaskMinBackoff(), + Max: result.Task.TaskMaxBackoff(), + } + + go func(vars Vars) { + select { + case <-ctx.Done(): + // report back so the waiting counter gets decreased + now := time.Now() + s.report(context.Background(), TaskRunResult{ + Task: result.Task, + Result: Result{Error: ErrCancelled}, + CreatedAt: now, // TODO: more accurate start time + FinishedAt: null.TimeFrom(now), + }) + case <-time.After(backoff.ForAttempt(float64(result.Attempts - 1))): // we subtract 1 because backoff 0-indexes + // schedule a new attempt + run := s.newMemoryTaskRun(result.Task, vars) + run.attempts = result.Attempts + s.logger.Tracew("scheduling task run", "dot_id", run.task.DotID(), "attempts", run.attempts) + s.taskCh <- run + } + }(s.vars.Copy()) // must Copy() from current goroutine + + // skip scheduling dependencies since it's the task is not complete yet + continue + } + + for _, output := range result.Task.Outputs() { + id := output.ID() + s.dependencies[id]-- + + // if all dependencies are done, schedule task run + if s.dependencies[id] == 0 { + task := s.pipeline.Tasks[id] + run := s.newMemoryTaskRun(task, s.vars.Copy()) + + s.logger.Tracew("scheduling task run", "dot_id", run.task.DotID(), "attempts", run.attempts) + s.taskCh <- run + s.waiting++ + } + } + + } + + close(s.taskCh) +} + +func (s *scheduler) markRemaining(err error) { + now := time.Now() + for _, task := range s.pipeline.Tasks { + if _, ok := s.results[task.ID()]; !ok { + s.results[task.ID()] = TaskRunResult{ + Task: task, + Result: Result{Error: err}, + CreatedAt: now, // TODO: more accurate start time + FinishedAt: null.TimeFrom(now), + } + } + } +} + +func (s *scheduler) report(ctx context.Context, result TaskRunResult) { + select { + case s.resultCh <- result: + case <-ctx.Done(): + s.logger.Errorw("pipeline.scheduler: discarding result; report context timed out", "result", result, "err", ctx.Err()) + } +} diff --git a/core/services/pipeline/scheduler_test.go b/core/services/pipeline/scheduler_test.go new file mode 100644 index 00000000..45ea9f85 --- /dev/null +++ b/core/services/pipeline/scheduler_test.go @@ -0,0 +1,170 @@ +package pipeline + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type event struct { + expected string + result Result +} + +func TestScheduler(t *testing.T) { + // NOTE: task type does not matter in the test cases, it's just there so it's parsed successfully + tests := []struct { + name string + spec string + events []event + assertion func(t *testing.T, p Pipeline, results map[int]TaskRunResult) + }{ + { + name: "fail early immediately cancels subsequent tasks", + spec: ` + a [type=median failEarly=true] + b [type=median index=0] + a -> b`, + events: []event{ + { + expected: "a", + result: Result{Error: ErrTaskRunFailed}, + }, + // no further events for `b` + }, + assertion: func(t *testing.T, p Pipeline, results map[int]TaskRunResult) { + result := results[p.ByDotID("b").ID()] + // b is marked as cancelled + require.Equal(t, uint(0), result.Attempts) + require.Equal(t, ErrCancelled, result.Result.Error) + }, + }, + { + name: "retry: try task N times, then fail it", + spec: ` + a [type=median retries=3 minBackoff="1us" maxBackoff="1us"] + b [type=median index=0] + a -> b`, + events: []event{ + { + expected: "a", + result: Result{Error: ErrTaskRunFailed}, + }, + { + expected: "a", + result: Result{Error: ErrTaskRunFailed}, + }, + { + expected: "a", + result: Result{Error: ErrTimeout}, + }, + { + expected: "b", + result: Result{Value: 1}, + }, + }, + assertion: func(t *testing.T, p Pipeline, results map[int]TaskRunResult) { + result := results[p.ByDotID("a").ID()] + // a is marked as errored with the last error in sequence + require.Equal(t, uint(3), result.Attempts) + require.Equal(t, ErrTimeout, result.Result.Error) + }, + }, + { + name: "retry task: proceed when it succeeds", + spec: ` + a [type=median retries=3 minBackoff="1us" maxBackoff="1us"] + b [type=median index=0] + a -> b`, + events: []event{ + { + expected: "a", + result: Result{Error: ErrTaskRunFailed}, + }, + { + expected: "a", + result: Result{Value: 1}, + }, + { + expected: "b", + result: Result{Value: 1}, + }, + }, + assertion: func(t *testing.T, p Pipeline, results map[int]TaskRunResult) { + result := results[p.ByDotID("a").ID()] + // a has no errors + require.Equal(t, nil, result.Result.Error) + require.Equal(t, 1, result.Result.Value) + require.Equal(t, uint(2), result.Attempts) + }, + }, + { + name: "retry task + failEarly: cancel pending retries", + spec: ` + a [type=median retries=3 minBackoff="10ms" maxBackoff="10ms" index=0] + b [type=median failEarly=true index=1] + `, + events: []event{ + { + expected: "a", + result: Result{Error: ErrTaskRunFailed}, + }, + { + expected: "b", + result: Result{Error: ErrTaskRunFailed}, + }, + // now `b` failing early should stop any retries on `a` + }, + assertion: func(t *testing.T, p Pipeline, results map[int]TaskRunResult) { + result := results[p.ByDotID("a").ID()] + // a only has a single attempt and it got cancelled + require.Equal(t, uint(1), result.Attempts) + require.Equal(t, ErrCancelled, result.Result.Error) + }, + }, + } + + for _, test := range tests { + p, err := Parse(test.spec) + require.NoError(t, err) + vars := NewVarsFrom(nil) + run := NewRun(Spec{}, vars) + s := newScheduler(p, run, vars, logger.TestLogger(t)) + + go s.Run() + + for _, event := range test.events { + select { + case taskRun := <-s.taskCh: + require.Equal(t, event.expected, taskRun.task.DotID()) + now := time.Now() + s.report(testutils.Context(t), TaskRunResult{ + ID: uuid.New(), + Task: taskRun.task, + Result: event.result, + FinishedAt: null.TimeFrom(now), + CreatedAt: now, + }) + case <-time.After(time.Second): + t.Fatal("timed out waiting for task run") + } + } + + select { + case _, ok := <-s.taskCh: + // channel is now closed, if it's not that means there's more tasks + require.Falsef(t, ok, "scheduler has more tasks to schedule") + case <-time.After(time.Second): + t.Fatal("timed out waiting for scheduler to halt") + } + + test.assertion(t, *p, s.results) + + } +} diff --git a/core/services/pipeline/task.any.go b/core/services/pipeline/task.any.go new file mode 100644 index 00000000..ee3e23e6 --- /dev/null +++ b/core/services/pipeline/task.any.go @@ -0,0 +1,52 @@ +package pipeline + +import ( + "context" + "crypto/rand" + "math/big" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// AnyTask picks a value at random from the set of non-errored inputs. +// If there are zero non-errored inputs then it returns an error. +type AnyTask struct { + BaseTask `mapstructure:",squash"` +} + +var _ Task = (*AnyTask)(nil) + +func (t *AnyTask) Type() TaskType { + return TaskTypeAny +} + +func (t *AnyTask) Run(_ context.Context, _ logger.Logger, _ Vars, inputs []Result) (result Result, runInfo RunInfo) { + if len(inputs) == 0 { + return Result{Error: errors.Wrapf(ErrWrongInputCardinality, "AnyTask requires at least 1 input")}, runInfo + } + + var answers []interface{} + + for _, input := range inputs { + if input.Error != nil { + continue + } + + answers = append(answers, input.Value) + } + + if len(answers) == 0 { + return Result{Error: errors.Wrapf(ErrBadInput, "There were zero non-errored inputs")}, runInfo + } + + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(len(answers)))) + if err != nil { + return Result{Error: errors.Wrapf(err, "Failed to generate random number for picking input")}, retryableRunInfo() + } + i := int(nBig.Int64()) + answer := answers[i] + + return Result{Value: answer}, runInfo +} diff --git a/core/services/pipeline/task.any_test.go b/core/services/pipeline/task.any_test.go new file mode 100644 index 00000000..60dac2b0 --- /dev/null +++ b/core/services/pipeline/task.any_test.go @@ -0,0 +1,82 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestAnyTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inputs []pipeline.Result + want pipeline.Result + }{ + { + "zero inputs", + []pipeline.Result{}, + pipeline.Result{Error: pipeline.ErrWrongInputCardinality}, + }, + { + "one non-errored decimal input", + []pipeline.Result{{Value: mustDecimal(t, "42")}}, + pipeline.Result{Value: mustDecimal(t, "42")}, + }, + { + "one errored decimal input", + []pipeline.Result{{Value: mustDecimal(t, "42"), Error: errors.New("foo")}}, + pipeline.Result{Error: pipeline.ErrBadInput}, + }, + { + "one non-errored string input", + []pipeline.Result{{Value: "42"}}, + pipeline.Result{Value: "42"}, + }, + { + "one errored input and one non-errored input", + []pipeline.Result{{Value: "42"}, {Error: errors.New("foo"), Value: "1"}}, + pipeline.Result{Value: "42"}, + }, + { + "two errored inputs", + []pipeline.Result{{Value: "42", Error: errors.New("bar")}, {Error: errors.New("foo"), Value: "1"}}, + pipeline.Result{Error: pipeline.ErrBadInput}, + }, + { + "two non-errored inputs with one errored input", + []pipeline.Result{{Value: "42"}, {Value: "42"}, {Error: errors.New("foo")}}, + pipeline.Result{Value: "42"}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.AnyTask{} + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + switch test.want.Value.(type) { + case *decimal.Decimal: + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(*decimal.Decimal).String()) + default: + require.Equal(t, test.want.Value, output.Value) + } + require.NoError(t, output.Error) + } + }) + } +} diff --git a/core/services/pipeline/task.base.go b/core/services/pipeline/task.base.go new file mode 100644 index 00000000..087f585f --- /dev/null +++ b/core/services/pipeline/task.base.go @@ -0,0 +1,79 @@ +package pipeline + +import ( + "time" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/null" +) + +type BaseTask struct { + outputs []Task + inputs []TaskDependency + + id int + dotID string + Index int32 `mapstructure:"index" json:"-" ` + Timeout *time.Duration `mapstructure:"timeout"` + FailEarly bool `mapstructure:"failEarly"` + + Retries null.Uint32 `mapstructure:"retries"` + MinBackoff time.Duration `mapstructure:"minBackoff"` + MaxBackoff time.Duration `mapstructure:"maxBackoff"` + + uuid uuid.UUID +} + +func NewBaseTask(id int, dotID string, inputs []TaskDependency, outputs []Task, index int32) BaseTask { + return BaseTask{id: id, dotID: dotID, inputs: inputs, outputs: outputs, Index: index} +} + +func (t *BaseTask) Base() *BaseTask { + return t +} + +func (t BaseTask) ID() int { + return t.id +} + +func (t BaseTask) DotID() string { + return t.dotID +} + +func (t BaseTask) OutputIndex() int32 { + return t.Index +} + +func (t BaseTask) Outputs() []Task { + return t.outputs +} + +func (t BaseTask) Inputs() []TaskDependency { + return t.inputs +} + +func (t BaseTask) TaskTimeout() (time.Duration, bool) { + if t.Timeout == nil { + return time.Duration(0), false + } + return *t.Timeout, true +} + +func (t BaseTask) TaskRetries() uint32 { + return t.Retries.Uint32 +} + +func (t BaseTask) TaskMinBackoff() time.Duration { + if t.MinBackoff > 0 { + return t.MinBackoff + } + return time.Second * 5 +} + +func (t BaseTask) TaskMaxBackoff() time.Duration { + if t.MinBackoff > 0 { + return t.MaxBackoff + } + return time.Minute +} diff --git a/core/services/pipeline/task.base64decode.go b/core/services/pipeline/task.base64decode.go new file mode 100644 index 00000000..a1939b9c --- /dev/null +++ b/core/services/pipeline/task.base64decode.go @@ -0,0 +1,48 @@ +package pipeline + +import ( + "context" + "encoding/base64" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// bytes +type Base64DecodeTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*Base64DecodeTask)(nil) + +func (t *Base64DecodeTask) Type() TaskType { + return TaskTypeBase64Decode +} + +func (t *Base64DecodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var input StringParam + + err = multierr.Combine( + errors.Wrap(ResolveParam(&input, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + bs, err := base64.StdEncoding.DecodeString(input.String()) + if err != nil { + return Result{Error: errors.Wrap(err, "failed to decode base64 string")}, runInfo + } + + return Result{Value: bs}, runInfo +} diff --git a/core/services/pipeline/task.base64decode_test.go b/core/services/pipeline/task.base64decode_test.go new file mode 100644 index 00000000..e5e3eec4 --- /dev/null +++ b/core/services/pipeline/task.base64decode_test.go @@ -0,0 +1,76 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestBase64DecodeTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + result []byte + error string + }{ + + // success + {"happy", "SGVsbG8sIHBsYXlncm91bmQ=", []byte("Hello, playground"), ""}, + {"empty input", "", []byte{}, ""}, + + // failure + {"wrong characters", "S.G_VsbG8sIHBsYXlncm91bmQ=", nil, "failed to decode base64 string"}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.error == "" { + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + } else { + require.ErrorContains(t, result.Error, test.error) + } + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.Base64DecodeTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + inputStr := fmt.Sprintf("%v", test.input) + if inputStr == "" { + // empty input parameter is indistinguishable from not providing it at all + // in that case the task will use an input defined by the job DAG + return + } + vars := pipeline.NewVarsFrom(nil) + task := pipeline.Base64DecodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: inputStr, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.Base64DecodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} diff --git a/core/services/pipeline/task.base64encode.go b/core/services/pipeline/task.base64encode.go new file mode 100644 index 00000000..434834ed --- /dev/null +++ b/core/services/pipeline/task.base64encode.go @@ -0,0 +1,52 @@ +package pipeline + +import ( + "context" + "encoding/base64" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// string +type Base64EncodeTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*Base64EncodeTask)(nil) + +func (t *Base64EncodeTask) Type() TaskType { + return TaskTypeBase64Decode +} + +func (t *Base64EncodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var stringInput StringParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&stringInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil { + // string + return Result{Value: base64.StdEncoding.EncodeToString([]byte(stringInput.String()))}, runInfo + } + + var bytesInput BytesParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&bytesInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil { + // bytes + return Result{Value: base64.StdEncoding.EncodeToString(bytesInput)}, runInfo + } + + return Result{Error: err}, runInfo +} diff --git a/core/services/pipeline/task.base64encode_test.go b/core/services/pipeline/task.base64encode_test.go new file mode 100644 index 00000000..2beb4a93 --- /dev/null +++ b/core/services/pipeline/task.base64encode_test.go @@ -0,0 +1,97 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestBase64EncodeTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + result string + error string + }{ + + // success + {"string input 1", "Hello, playground", "SGVsbG8sIHBsYXlncm91bmQ=", ""}, + {"string input 2", "=test=test=", "PXRlc3Q9dGVzdD0=", ""}, + {"empty string", "", "", ""}, + {"bytes input 1", []byte{0xaa, 0xbb, 0xcc, 0xdd}, "qrvM3Q==", ""}, + {"empty bytes", []byte{}, "", ""}, + + // failure (unsupported types) + {"int", 234, "", "bad input for task"}, + {"bool", false, "", "bad input for task"}, + {"float", 3.14, "", "bad input for task"}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.error == "" { + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + } else { + require.ErrorContains(t, result.Error, test.error) + } + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.Base64EncodeTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.Base64EncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} + +func TestBase64EncodeTaskInputParamLiteral(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + result string + }{ + // Only strings can be passed via input param literals (other types will get converted to strings anyway) + {"string input 1", "Hello, playground", "SGVsbG8sIHBsYXlncm91bmQ="}, + {"string input 2", "=test=test=", "PXRlc3Q9dGVzdD0="}, + {"int gets converted to a string", 234, "MjM0"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.Base64EncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: fmt.Sprintf("%v", test.input), + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{}) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + }) + } +} diff --git a/core/services/pipeline/task.bridge.go b/core/services/pipeline/task.bridge.go new file mode 100644 index 00000000..59f7e14d --- /dev/null +++ b/core/services/pipeline/task.bridge.go @@ -0,0 +1,261 @@ +package pipeline + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + "net/url" + "path" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/internal/eautils" +) + +// NOTE: These metrics generate a new label per bridge, this should be safe +// since the number of bridges is almost always relatively small (<< 1000) +// +// We already have promHTTPFetchTime but the bridge-specific gauges allow for +// more granular metrics +var ( + promBridgeLatency = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "bridge_latency_seconds", + Help: "Bridge latency in seconds scoped by name", + }, + []string{"name"}, + ) + promBridgeErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "bridge_errors_total", + Help: "Bridge error count scoped by name", + }, + []string{"name"}, + ) + promBridgeCacheHits = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "bridge_cache_hits_total", + Help: "Bridge cache hits count scoped by name", + }, + []string{"name"}, + ) + promBridgeCacheErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "bridge_cache_errors_total", + Help: "Bridge cache errors count scoped by name", + }, + []string{"name"}, + ) +) + +// Return types: +// +// string +type BridgeTask struct { + BaseTask `mapstructure:",squash"` + + Name string `json:"name"` + RequestData string `json:"requestData"` + IncludeInputAtKey string `json:"includeInputAtKey"` + Async string `json:"async"` + CacheTTL string `json:"cacheTTL"` + Headers string `json:"headers"` + + specId int32 + orm bridges.ORM + config Config + bridgeConfig BridgeConfig + httpClient *http.Client +} + +var _ Task = (*BridgeTask)(nil) + +var zeroURL = new(url.URL) + +const stalenessCap = 30 * time.Minute + +func (t *BridgeTask) Type() TaskType { + return TaskTypeBridge +} + +func (t *BridgeTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + inputValues, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + name StringParam + requestData MapParam + includeInputAtKey StringParam + cacheTTL Uint64Param + reqHeaders StringSliceParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&name, From(NonemptyString(t.Name))), "name"), + errors.Wrap(ResolveParam(&requestData, From(VarExpr(t.RequestData, vars), JSONWithVarExprs(t.RequestData, vars, false), nil)), "requestData"), + errors.Wrap(ResolveParam(&includeInputAtKey, From(t.IncludeInputAtKey)), "includeInputAtKey"), + errors.Wrap(ResolveParam(&cacheTTL, From(ValidDurationInSeconds(t.CacheTTL), t.bridgeConfig.BridgeCacheTTL().Seconds())), "cacheTTL"), + errors.Wrap(ResolveParam(&reqHeaders, From(NonemptyString(t.Headers), "[]")), "reqHeaders"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if len(reqHeaders)%2 != 0 { + return Result{Error: errors.Errorf("headers must have an even number of elements")}, runInfo + } + + url, err := t.getBridgeURLFromName(name) + if err != nil { + return Result{Error: err}, runInfo + } + + var metaMap MapParam + + meta, _ := vars.Get("jobRun.meta") + switch v := meta.(type) { + case map[string]interface{}: + metaMap = MapParam(v) + case nil: + default: + lggr.Warnw(`"meta" field on task run is malformed, discarding`, + "task", t.DotID(), + "meta", meta, + ) + } + + requestData = withRunInfo(requestData, metaMap) + if t.IncludeInputAtKey != "" { + if len(inputValues) > 0 { + requestData[string(includeInputAtKey)] = inputValues[0] + } + } + + if t.Async == "true" { + responseURL := t.bridgeConfig.BridgeResponseURL() + if responseURL != nil && *responseURL != *zeroURL { + responseURL.Path = path.Join(responseURL.Path, "/v2/resume/", t.uuid.String()) + } + var s string + if responseURL != nil { + s = responseURL.String() + } + requestData["responseURL"] = s + } + + requestDataJSON, err := json.Marshal(requestData) + if err != nil { + return Result{Error: err}, runInfo + } + lggr.Tracew("Bridge task: sending request", + "requestData", string(requestDataJSON), + "url", url.String(), + ) + + requestCtx, cancel := httpRequestCtx(ctx, t, t.config) + defer cancel() + + // cacheTTL should not exceed stalenessCap. + cacheDuration := time.Duration(cacheTTL) * time.Second + if cacheDuration > stalenessCap { + lggr.Warnf("bridge task cacheTTL exceeds stalenessCap %s, overriding value to stalenessCap", stalenessCap) + cacheDuration = stalenessCap + } + + var cachedResponse bool + responseBytes, statusCode, headers, elapsed, err := makeHTTPRequest(requestCtx, lggr, "POST", url, reqHeaders, requestData, t.httpClient, t.config.DefaultHTTPLimit()) + + // check for external adapter response object status + if code, ok := eautils.BestEffortExtractEAStatus(responseBytes); ok { + statusCode = code + } + + if err != nil || statusCode != http.StatusOK { + promBridgeErrors.WithLabelValues(t.Name).Inc() + if cacheTTL == 0 { + return Result{Error: err}, RunInfo{IsRetryable: isRetryableHTTPError(statusCode, err)} + } + + var cacheErr error + responseBytes, cacheErr = t.orm.GetCachedResponse(t.dotID, t.specId, cacheDuration) + if cacheErr != nil { + promBridgeCacheErrors.WithLabelValues(t.Name).Inc() + if !errors.Is(cacheErr, sql.ErrNoRows) { + lggr.Warnw("Bridge task: cache fallback failed", + "err", cacheErr.Error(), + "url", url.String(), + ) + } + return Result{Error: err}, RunInfo{IsRetryable: isRetryableHTTPError(statusCode, err)} + } + promBridgeCacheHits.WithLabelValues(t.Name).Inc() + lggr.Debugw("Bridge task: request failed, falling back to cache", + "response", string(responseBytes), + "url", url.String(), + ) + cachedResponse = true + } else { + promBridgeLatency.WithLabelValues(t.Name).Set(elapsed.Seconds()) + } + + if t.Async == "true" { + // Look for a `pending` flag. This check is case-insensitive because http.Header normalizes header names + if _, ok := headers["X-Plugin-Pending"]; ok { + return result, pendingRunInfo() + } + + var response struct { + Pending bool `json:"pending"` + } + if err := json.Unmarshal(responseBytes, &response); err == nil && response.Pending { + return Result{}, pendingRunInfo() + } + } + + if !cachedResponse && cacheTTL > 0 { + err := t.orm.UpsertBridgeResponse(t.dotID, t.specId, responseBytes) + if err != nil { + lggr.Errorw("Bridge task: failed to upsert response in bridge cache", "err", err) + } + } + + // NOTE: We always stringify the response since this is required for all current jobs. + // If a binary response is required we might consider adding an adapter + // flag such as "BinaryMode: true" which passes through raw binary as the + // value instead. + result = Result{Value: string(responseBytes)} + + promHTTPFetchTime.WithLabelValues(t.DotID()).Set(float64(elapsed)) + promHTTPResponseBodySize.WithLabelValues(t.DotID()).Set(float64(len(responseBytes))) + + lggr.Tracew("Bridge task: fetched answer", + "answer", result.Value, + "url", url.String(), + "dotID", t.DotID(), + "cached", cachedResponse, + ) + return result, runInfo +} + +func (t BridgeTask) getBridgeURLFromName(name StringParam) (URLParam, error) { + bt, err := t.orm.FindBridge(bridges.BridgeName(name)) + if err != nil { + return URLParam{}, errors.Wrapf(err, "could not find bridge with name '%s'", name) + } + return URLParam(bt.URL), nil +} + +func withRunInfo(request MapParam, meta MapParam) MapParam { + output := make(MapParam) + for k, v := range request { + output[k] = v + } + if meta != nil { + output["meta"] = meta + } + return output +} diff --git a/core/services/pipeline/task.bridge_test.go b/core/services/pipeline/task.bridge_test.go new file mode 100644 index 00000000..f99600c1 --- /dev/null +++ b/core/services/pipeline/task.bridge_test.go @@ -0,0 +1,1141 @@ +package pipeline_test + +import ( + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "os" + "sort" + "sync/atomic" + "testing" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/internal/eautils" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// ethUSDPairing has the ETH/USD parameters needed when POSTing to the price +// external adapters. +// https://github.com/goplugin/price-adapters + +var ( + btcUSDPairing = `{"data":{"coin":"BTC","market":"USD"}}` + ethUSDPairing = `{"data":{"coin":"ETH","market":"USD"}}` +) + +type adapterRequest struct { + ID string `json:"id"` + Data pipeline.MapParam `json:"data"` + Meta pipeline.MapParam `json:"meta"` + ResponseURL string `json:"responseURL"` +} + +type adapterResponseData struct { + Result *decimal.Decimal `json:"result"` +} + +// adapterResponse is the HTTP response as defined by the external adapter: +// https://github.com/goplugin/bnc-adapter +type adapterResponse struct { + eautils.AdapterStatus + Data adapterResponseData `json:"data"` +} + +func (pr *adapterResponse) SetStatusCode(code int) { + pr.StatusCode = &code +} + +func (pr *adapterResponse) UnsetStatusCode() { + pr.StatusCode = nil +} + +func (pr *adapterResponse) SetProviderStatusCode(code int) { + pr.ProviderStatusCode = &code +} + +func (pr *adapterResponse) UnsetProviderStatusCode() { + pr.ProviderStatusCode = nil +} + +func (pr *adapterResponse) SetError(msg string) { + pr.Error = msg +} + +func (pr *adapterResponse) UnsetError() { + pr.Error = nil +} + +func (pr *adapterResponse) SetErrorMessage(msg string) { + pr.ErrorMessage = &msg +} + +func (pr *adapterResponse) UnsetErrorMessage() { + pr.ErrorMessage = nil +} + +func (pr *adapterResponse) Result() *decimal.Decimal { + return pr.Data.Result +} + +func dataWithResult(t *testing.T, result decimal.Decimal) adapterResponseData { + t.Helper() + var data adapterResponseData + body := []byte(fmt.Sprintf(`{"result":%v}`, result)) + require.NoError(t, json.Unmarshal(body, &data)) + return data +} + +func mustReadFile(t testing.TB, file string) string { + t.Helper() + + content, err := os.ReadFile(file) + require.NoError(t, err) + return string(content) +} + +func fakePriceResponder(t *testing.T, requestData map[string]interface{}, result decimal.Decimal, inputKey string, expectedInput interface{}) http.Handler { + t.Helper() + + body, err := json.Marshal(requestData) + require.NoError(t, err) + var expectedRequest adapterRequest + err = json.Unmarshal(body, &expectedRequest) + require.NoError(t, err) + response := adapterResponse{Data: dataWithResult(t, result)} + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody adapterRequest + payload, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + err = json.Unmarshal(payload, &reqBody) + require.NoError(t, err) + require.Equal(t, expectedRequest.Data, reqBody.Data) + w.Header().Set("Content-Type", "application/json") + require.NoError(t, json.NewEncoder(w).Encode(response)) + + if inputKey != "" { + m := utils.MustUnmarshalToMap(string(payload)) + if expectedInput != nil { + require.Equal(t, expectedInput, m[inputKey]) + } else { + require.Nil(t, m[inputKey]) + } + } + }) +} + +func fakeIntermittentlyFailingPriceResponder(t *testing.T, requestData map[string]interface{}, result decimal.Decimal, inputKey string, expectedInput interface{}) http.Handler { + t.Helper() + + body, err := json.Marshal(requestData) + require.NoError(t, err) + var expectedRequest adapterRequest + err = json.Unmarshal(body, &expectedRequest) + require.NoError(t, err) + response := adapterResponse{Data: dataWithResult(t, result)} + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody adapterRequest + payload, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + err = json.Unmarshal(payload, &reqBody) + require.NoError(t, err) + require.Equal(t, expectedRequest.Data, reqBody.Data) + // require.Equal(t, float64(0), reqBody.Meta["id"]) + + if reqBody.Meta["shouldFail"].(bool) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadGateway) + require.NoError(t, json.NewEncoder(w).Encode(errors.New("EA failure"))) + return + } + w.Header().Set("Content-Type", "application/json") + require.NoError(t, json.NewEncoder(w).Encode(response)) + + if inputKey != "" { + m := utils.MustUnmarshalToMap(string(payload)) + if expectedInput != nil { + require.Equal(t, expectedInput, m[inputKey]) + } else { + require.Nil(t, m[inputKey]) + } + } + }) +} + +func fakeStringResponder(t *testing.T, s string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, err := w.Write([]byte(s)) + require.NoError(t, err) + }) +} + +func TestBridgeTask_Happy(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + s1 := httptest.NewServer(fakePriceResponder(t, utils.MustUnmarshalToMap(btcUSDPairing), decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + var x struct { + Data struct { + Result decimal.Decimal `json:"result"` + } `json:"data"` + } + err = json.Unmarshal([]byte(result.Value.(string)), &x) + require.NoError(t, err) + require.Equal(t, decimal.NewFromInt(9700), x.Data.Result) +} + +func TestBridgeTask_HandlesIntermittentFailure(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) {}) + + s1 := httptest.NewServer(fakeIntermittentlyFailingPriceResponder(t, utils.MustUnmarshalToMap(btcUSDPairing), decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + CacheTTL: "30s", // standard duration string format + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": false, + }, + }, + }, + ), + nil) + + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + + result2, runInfo2 := task.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.NoError(t, result2.Error) + require.Equal(t, result.Value, result2.Value) + require.Equal(t, runInfo.IsPending, runInfo2.IsPending) + require.Equal(t, runInfo.IsRetryable, runInfo2.IsRetryable) +} + +func TestBridgeTask_DoesNotReturnStaleResults(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.WebServer.BridgeCacheTTL = commonconfig.MustNewDuration(30 * time.Second) + }) + queryer := pg.NewQ(db, logger.TestLogger(t), cfg.Database()) + s1 := httptest.NewServer(fakeIntermittentlyFailingPriceResponder(t, utils.MustUnmarshalToMap(btcUSDPairing), decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + // Insert entry 1m in the past, stale value, should not be used in case of EA failure. + err = queryer.ExecQ(`INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;`, task.DotID(), specID, big.NewInt(9700).Bytes(), time.Now().Add(-1*time.Minute)) + require.NoError(t, err) + + result2, _ := task.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.Error(t, result2.Error) + require.Nil(t, result2.Value) + + // Insert entry 10s in the past, under 30 seconds and should be used in case of failure. + err = queryer.ExecQ(`INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;`, task.DotID(), specID, big.NewInt(9700).Bytes(), time.Now().Add(-10*time.Second)) + require.NoError(t, err) + + result2, _ = task.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.NoError(t, result2.Error) + require.Equal(t, string(big.NewInt(9700).Bytes()), result2.Value) + + cfg2 := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.WebServer.BridgeCacheTTL = commonconfig.MustNewDuration(0 * time.Second) + }) + task.HelperSetDependencies(cfg2.JobPipeline(), cfg2.WebServer(), orm, specID, uuid.UUID{}, c) + + // Even though we have a cached value, this should fail since config now set to 0. + result2, _ = task.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.Error(t, result2.Error) + require.Nil(t, result2.Value) + + task2 := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge2", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + CacheTTL: "35m", // more than the stalenessCap 30m + } + task2.HelperSetDependencies(cfg2.JobPipeline(), cfg2.WebServer(), orm, specID, uuid.UUID{}, c) + + // Insert entry 32m in the past, under cacheTTL of 35m but more than stalenessCap of 30m. + err = queryer.ExecQ(`INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;`, task2.DotID(), specID, big.NewInt(9700).Bytes(), time.Now().Add(-32*time.Minute)) + require.NoError(t, err) + + // Run fails even though cacheTTL > lastvalue.finished_at because cacheTTL exceeds stalenessCap. + result2, _ = task2.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.Error(t, result2.Error) + require.Nil(t, result2.Value) + + // Insert entry 25m in the past, under stalenessCap + err = queryer.ExecQ(`INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;`, task2.DotID(), specID, big.NewInt(9700).Bytes(), time.Now().Add(-25*time.Minute)) + require.NoError(t, err) + + // Run succeeds using the cached value that's under stalenessCap. + result2, _ = task2.Run(testutils.Context(t), logger.TestLogger(t), + pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ), + nil) + + require.NoError(t, result2.Error) + require.Equal(t, string(big.NewInt(9700).Bytes()), result2.Value) +} + +func TestBridgeTask_AsyncJobPendingState(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + id := uuid.New() + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var reqBody adapterRequest + payload, err := io.ReadAll(r.Body) + require.NoError(t, err) + defer r.Body.Close() + + err = json.Unmarshal(payload, &reqBody) + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s/v2/resume/%v", cfg.WebServer().BridgeResponseURL(), id.String()), reqBody.ResponseURL) + w.Header().Set("Content-Type", "application/json") + + // w.Header().Set("X-Plugin-Pending", "true") + response := map[string]interface{}{"pending": true} + require.NoError(t, json.NewEncoder(w).Encode(response)) + + }) + + server := httptest.NewServer(handler) + defer server.Close() + feedURL, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + Name: bridge.Name.String(), + RequestData: ethUSDPairing, + Async: "true", + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, id, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.True(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + require.NoError(t, result.Error) + require.Nil(t, result.Value) +} + +func TestBridgeTask_Variables(t *testing.T) { + t.Parallel() + + validMeta := map[string]interface{}{"theMeta": "yes"} + + tests := []struct { + name string + requestData string + includeInputAtKey string + inputs []pipeline.Result + vars pipeline.Vars + expectedRequestData map[string]interface{} + expectedErrorCause error + expectedErrorContains string + }{ + { + "requestData (empty) + includeInputAtKey + meta", + ``, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{ + "input": 123.45, + "meta": validMeta, + }, + nil, + "", + }, + { + "requestData (pure variable) + includeInputAtKey + meta", + `$(some_data)`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{ + "foo": 543.21, + "input": 123.45, + "meta": validMeta, + }, + nil, + "", + }, + { + "requestData (pure variable) + includeInputAtKey", + `$(some_data)`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{ + "foo": 543.21, + "input": 123.45, + }, + nil, + "", + }, + { + "requestData (pure variable) + meta", + `$(some_data)`, + "", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{ + "foo": 543.21, + "meta": validMeta, + }, + nil, + "", + }, + { + "requestData (pure variable, missing)", + `$(some_data)`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"not_some_data": map[string]interface{}{"foo": 543.21}}), + nil, + pipeline.ErrKeypathNotFound, + "requestData", + }, + { + "requestData (pure variable, not a map)", + `$(some_data)`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": 543.21}), + nil, + pipeline.ErrBadInput, + "requestData", + }, + { + "requestData (interpolation) + includeInputAtKey + meta", + `{"data":{"result":$(medianize)}}`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"medianize": 543.21}), + map[string]interface{}{ + "data": map[string]interface{}{"result": 543.21}, + "input": 123.45, + "meta": validMeta, + }, + nil, + "", + }, + { + "requestData (interpolation) + includeInputAtKey", + `{"data":{"result":$(medianize)}}`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"medianize": 543.21}), + map[string]interface{}{ + "data": map[string]interface{}{"result": 543.21}, + "input": 123.45, + }, + nil, + "", + }, + { + "requestData (interpolation) + meta", + `{"data":{"result":$(medianize)}}`, + "", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"medianize": 543.21}), + map[string]interface{}{ + "data": map[string]interface{}{"result": 543.21}, + "meta": validMeta, + }, + nil, + "", + }, + { + "requestData (interpolation, missing)", + `{"data":{"result":$(medianize)}}`, + "input", + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"nope": "foo bar"}), + nil, + pipeline.ErrKeypathNotFound, + "requestData", + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + s1 := httptest.NewServer(fakePriceResponder(t, test.expectedRequestData, decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: test.requestData, + IncludeInputAtKey: test.includeInputAtKey, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + + } else { + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + var x struct { + Data struct { + Result decimal.Decimal `json:"result"` + } `json:"data"` + } + err = json.Unmarshal([]byte(result.Value.(string)), &x) + require.NoError(t, err) + require.Equal(t, decimal.NewFromInt(9700), x.Data.Result) + } + }) + } +} + +func TestBridgeTask_Meta(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + var empty adapterResponse + + var httpCalled atomic.Bool + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req adapterRequest + body, _ := io.ReadAll(r.Body) + err := json.Unmarshal(body, &req) + require.NoError(t, err) + require.Equal(t, float64(10), req.Meta["latestAnswer"]) + require.Equal(t, float64(1616447984), req.Meta["updatedAt"]) + w.Header().Set("Content-Type", "application/json") + require.NoError(t, json.NewEncoder(w).Encode(empty)) + httpCalled.Store(true) + }) + + metaDataForBridge, err := bridges.MarshalBridgeMetaData(big.NewInt(10), big.NewInt(1616447984)) + require.NoError(t, err) + + s1 := httptest.NewServer(handler) + + defer s1.Close() + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + RequestData: ethUSDPairing, + Name: bridge.Name.String(), + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + mp := map[string]interface{}{"meta": metaDataForBridge} + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(map[string]interface{}{"jobRun": mp}), nil) + assert.Nil(t, res.Error) + + assert.True(t, httpCalled.Load()) +} + +func TestBridgeTask_IncludeInputAtKey(t *testing.T) { + t.Parallel() + + theErr := errors.New("foo") + + tests := []struct { + name string + inputs []pipeline.Result + includeInputAtKey string + expectedInput interface{} + expectedErrorCause error + }{ + {"no input, no includeInputAtKey", nil, "", nil, nil}, + {"no input, includeInputAtKey", nil, "result", nil, nil}, + {"input, no includeInputAtKey", []pipeline.Result{{Value: decimal.NewFromFloat(123.45)}}, "", nil, nil}, + {"input, includeInputAtKey", []pipeline.Result{{Value: decimal.NewFromFloat(123.45)}}, "result", "123.45", nil}, + {"input has error", []pipeline.Result{{Error: theErr}}, "result", nil, pipeline.ErrTooManyErrors}, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + s1 := httptest.NewServer(fakePriceResponder(t, utils.MustUnmarshalToMap(btcUSDPairing), decimal.NewFromInt(9700), test.includeInputAtKey, test.expectedInput)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + IncludeInputAtKey: test.includeInputAtKey, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + } else { + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + var x struct { + Data struct { + Result decimal.Decimal `json:"result"` + } `json:"data"` + } + err = json.Unmarshal([]byte(result.Value.(string)), &x) + require.NoError(t, err) + require.Equal(t, decimal.NewFromInt(9700), x.Data.Result) + } + }) + } +} + +func TestBridgeTask_ErrorMessage(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusTooManyRequests) + + resp := &adapterResponse{} + resp.SetErrorMessage("could not hit data fetcher") + err := json.NewEncoder(w).Encode(resp) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + feedURL, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + Name: bridge.Name.String(), + RequestData: ethUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Error(t, result.Error) + require.Contains(t, result.Error.Error(), "could not hit data fetcher") + require.Nil(t, result.Value) +} + +func TestBridgeTask_OnlyErrorMessage(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadGateway) + _, err := w.Write([]byte(mustReadFile(t, "../../testdata/apiresponses/coinmarketcap.error.json"))) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + feedURL, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + Name: bridge.Name.String(), + RequestData: ethUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.True(t, runInfo.IsRetryable) + require.Error(t, result.Error) + require.Contains(t, result.Error.Error(), "RequestId") + require.Nil(t, result.Value) +} + +func TestBridgeTask_ErrorIfBridgeMissing(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + task := pipeline.BridgeTask{ + Name: "foo", + RequestData: btcUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Nil(t, result.Value) + require.Error(t, result.Error) + assert.Contains(t, result.Error.Error(), "could not find bridge with name 'foo'") +} + +// Sample input taken from +// https://github.com/goplugin/price-adapters#plugin-price-request-adapters +func TestAdapterResponse_UnmarshalJSON_Happy(t *testing.T) { + t.Parallel() + + tests := []struct { + name, content string + expect decimal.Decimal + }{ + {"basic", `{"data":{"result":123.4567890},"jobRunID":"1","statusCode":200}`, decimal.NewFromFloat(123.456789)}, + {"bravenewcoin", mustReadFile(t, "../../testdata/apiresponses/bravenewcoin.json"), decimal.NewFromFloat(306.52036004)}, + {"coinmarketcap", mustReadFile(t, "../../testdata/apiresponses/coinmarketcap.json"), decimal.NewFromFloat(305.5574615)}, + {"cryptocompare", mustReadFile(t, "../../testdata/apiresponses/cryptocompare.json"), decimal.NewFromFloat(305.76)}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var response adapterResponse + err := json.Unmarshal([]byte(test.content), &response) + require.NoError(t, err) + result := response.Result() + require.Equal(t, test.expect.String(), result.String()) + }) + } +} + +func TestBridgeTask_Headers(t *testing.T) { + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + var headers http.Header + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`{"fooresponse": 1}`)) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + bridgeURL, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: bridgeURL.String()}, cfg.Database()) + + allHeaders := func(headers http.Header) (s []string) { + var keys []string + for k := range headers { + keys = append(keys, k) + } + // get it in a consistent order + sort.Strings(keys) + for _, k := range keys { + v := headers.Get(k) + s = append(s, k, v) + fmt.Println(k, v) + } + + return s + } + + standardHeaders := []string{"Content-Length", "38", "Content-Type", "application/json", "User-Agent", "Go-http-client/1.1"} + + t.Run("sends headers", func(t *testing.T) { + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + Headers: `["X-Header-1", "foo", "X-Header-2", "bar"]`, + } + + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.Equal(t, `{"fooresponse": 1}`, result.Value) + assert.Nil(t, result.Error) + + assert.Equal(t, append(standardHeaders, "X-Header-1", "foo", "X-Header-2", "bar"), allHeaders(headers)) + }) + + t.Run("errors with odd number of headers", func(t *testing.T) { + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + Headers: `["X-Header-1", "foo", "X-Header-2", "bar", "odd one out"]`, + } + + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.NotNil(t, result.Error) + assert.Equal(t, `headers must have an even number of elements`, result.Error.Error()) + assert.Nil(t, result.Value) + }) + + t.Run("allows to override content-type", func(t *testing.T) { + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + Headers: `["X-Header-1", "foo", "Content-Type", "footype", "X-Header-2", "bar"]`, + } + + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.Equal(t, `{"fooresponse": 1}`, result.Value) + assert.Nil(t, result.Error) + + assert.Equal(t, []string{"Content-Length", "38", "Content-Type", "footype", "User-Agent", "Go-http-client/1.1", "X-Header-1", "foo", "X-Header-2", "bar"}, allHeaders(headers)) + }) +} + +func TestBridgeTask_AdapterResponseStatusFailure(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.WebServer.BridgeCacheTTL = commonconfig.MustNewDuration(1 * time.Minute) + }) + + testAdapterResponse := &adapterResponse{ + Data: adapterResponseData{Result: &decimal.Zero}, + } + + queryer := pg.NewQ(db, logger.TestLogger(t), cfg.Database()) + s1 := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := json.NewEncoder(w).Encode(testAdapterResponse) + require.NoError(t, err) + })) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: btcUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + // Insert entry 1m in the past, stale value, should not be used in case of EA failure. + err = queryer.ExecQ(`INSERT INTO bridge_last_value(dot_id, spec_id, value, finished_at) + VALUES($1, $2, $3, $4) ON CONFLICT ON CONSTRAINT bridge_last_value_pkey + DO UPDATE SET value = $3, finished_at = $4;`, task.DotID(), specID, big.NewInt(9700).Bytes(), time.Now()) + require.NoError(t, err) + + vars := pipeline.NewVarsFrom( + map[string]interface{}{ + "jobRun": map[string]interface{}{ + "meta": map[string]interface{}{ + "shouldFail": true, + }, + }, + }, + ) + + // expect all external adapter response status failures to be served from the cache + testAdapterResponse.SetStatusCode(http.StatusBadRequest) + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + require.False(t, runInfo.IsRetryable) + require.False(t, runInfo.IsPending) + + testAdapterResponse.SetStatusCode(http.StatusOK) + testAdapterResponse.SetProviderStatusCode(http.StatusBadRequest) + result, runInfo = task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + require.False(t, runInfo.IsRetryable) + require.False(t, runInfo.IsPending) + + testAdapterResponse.SetStatusCode(http.StatusOK) + testAdapterResponse.SetProviderStatusCode(http.StatusOK) + testAdapterResponse.SetError("some error") + result, runInfo = task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + require.False(t, runInfo.IsRetryable) + require.False(t, runInfo.IsPending) + + testAdapterResponse.SetStatusCode(http.StatusInternalServerError) + result, runInfo = task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + require.False(t, runInfo.IsRetryable) + require.False(t, runInfo.IsPending) +} diff --git a/core/services/pipeline/task.cborparse.go b/core/services/pipeline/task.cborparse.go new file mode 100644 index 00000000..2a5f4e08 --- /dev/null +++ b/core/services/pipeline/task.cborparse.go @@ -0,0 +1,71 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/cbor" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// map[string]interface{} with potential value types: +// float64 +// string +// bool +// map[string]interface{} +// []interface{} +// nil +type CBORParseTask struct { + BaseTask `mapstructure:",squash"` + Data string `json:"data"` + Mode string `json:"mode"` +} + +var _ Task = (*CBORParseTask)(nil) + +func (t *CBORParseTask) Type() TaskType { + return TaskTypeCBORParse +} + +func (t *CBORParseTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + data BytesParam + mode StringParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars))), "data"), + errors.Wrap(ResolveParam(&mode, From(NonemptyString(t.Mode), "diet")), "mode"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + switch mode { + case "diet": + // NOTE: In diet mode, cbor_parse ASSUMES that the incoming CBOR is a + // map. In the case that data is entirely missing, we assume it was the + // empty map + parsed, err := cbor.ParseDietCBOR(data) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "CBORParse: data: %v", err)}, runInfo + } + return Result{Value: parsed}, runInfo + case "standard": + parsed, err := cbor.ParseStandardCBOR(data) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "CBORParse: data: %v", err)}, runInfo + } + return Result{Value: parsed}, runInfo + default: + return Result{Error: errors.Errorf("unrecognised mode: %s", mode)}, runInfo + } +} diff --git a/core/services/pipeline/task.cborparse_test.go b/core/services/pipeline/task.cborparse_test.go new file mode 100644 index 00000000..dac0cb88 --- /dev/null +++ b/core/services/pipeline/task.cborparse_test.go @@ -0,0 +1,157 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestCBORParseTask(t *testing.T) { + tests := []struct { + name string + data string + vars pipeline.Vars + inputs []pipeline.Result + expected map[string]interface{} + expectedErrorCause error + expectedErrorContains string + }{ + { + "hello world", + "$(foo)", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff", + }), + nil, + map[string]interface{}{ + "path": []interface{}{"recent", "usd"}, + "url": "https://etherprice.com/api", + }, + nil, + "", + }, + { + "trailing empty bytes", + "$(foo)", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0xbf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff000000", + }), + nil, + map[string]interface{}{ + "path": []interface{}{"recent", "usd"}, + "url": "https://etherprice.com/api", + }, + nil, + "", + }, + { + "nested maps", + "$(foo)", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0xbf657461736b739f6868747470706f7374ff66706172616d73bf636d73676f68656c6c6f5f636861696e6c696e6b6375726c75687474703a2f2f6c6f63616c686f73743a36363930ffff", + }), + nil, + map[string]interface{}{ + "params": map[string]interface{}{ + "msg": "hello_plugin", + "url": "http://localhost:6690", + }, + "tasks": []interface{}{"httppost"}, + }, + nil, + "", + }, + { + "bignums", + "$(foo)", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0x" + + "bf" + // map(*) + "67" + // text(7) + "6269676e756d73" + // "bignums" + "9f" + // array(*) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c2" + // tag(2) == unsigned bignum + "5820" + // bytes(32) + "4000000000000000000000000000000000000000000000000000000000000000" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409984) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "0000000000000000000000000000000000000000000000010000000000000000" + + // int(18446744073709551616) + "c3" + // tag(3) == signed bignum + "5820" + // bytes(32) + "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + + // int(28948022309329048855892746252171976963317496166410141009864396001978282409983) + "ff" + // primitive(*) + "ff", // primitive(*) + }), + nil, + map[string]interface{}{ + "bignums": []interface{}{ + testutils.MustParseBigInt(t, "18446744073709551616"), + testutils.MustParseBigInt(t, "28948022309329048855892746252171976963317496166410141009864396001978282409984"), + testutils.MustParseBigInt(t, "-18446744073709551617"), + testutils.MustParseBigInt(t, "-28948022309329048855892746252171976963317496166410141009864396001978282409984"), + }, + }, + nil, + "", + }, + + { + "empty data", + "$(foo)", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": nil, + }), + nil, + map[string]interface{}{}, + nil, + "data", + }, + { + "error input", + "", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Error: errors.New("foo")}}, + nil, + pipeline.ErrTooManyErrors, + "task inputs", + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + task := pipeline.CBORParseTask{ + BaseTask: pipeline.NewBaseTask(0, "cbor", nil, nil, 0), + Data: test.data, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + assert.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + assert.Nil(t, result.Value) + if test.expectedErrorContains != "" { + assert.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + assert.NoError(t, result.Error) + assert.Equal(t, test.expected, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.conditional.go b/core/services/pipeline/task.conditional.go new file mode 100644 index 00000000..69da7067 --- /dev/null +++ b/core/services/pipeline/task.conditional.go @@ -0,0 +1,44 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// ConditionalTask checks if data is false +// for now this is all we need but in the future we can +// expand this to handle more general conditional statements +type ConditionalTask struct { + BaseTask `mapstructure:",squash"` + Data string `json:"data"` +} + +var _ Task = (*ConditionalTask)(nil) + +func (t *ConditionalTask) Type() TaskType { + return TaskTypeConditional +} + +func (t *ConditionalTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + var ( + boolParam BoolParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&boolParam, From(VarExpr(t.Data, vars), Input(inputs, 0), nil)), "data"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + if !boolParam { + return Result{Error: errors.New("conditional was not satisfied")}, runInfo + } + return Result{Value: true}, runInfo +} diff --git a/core/services/pipeline/task.conditional_test.go b/core/services/pipeline/task.conditional_test.go new file mode 100644 index 00000000..dfe8392a --- /dev/null +++ b/core/services/pipeline/task.conditional_test.go @@ -0,0 +1,71 @@ +package pipeline_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestConditionalTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expectErr bool + }{ + {"true string", "true", false}, + {"false string", "false", true}, + {"empty string", "", true}, + {"0 string", "0", true}, + {"1 string", "1", false}, + {"abc string", "abc", true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.ConditionalTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Data: test.input.(string)} + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}}) + + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.expectErr { + require.Error(t, result.Error) + require.Equal(t, nil, result.Value) + } else { + require.NoError(t, result.Error) + require.Equal(t, true, result.Value.(bool)) + } + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.ConditionalTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Data: "$(foo.bar)", + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{}) + + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.expectErr { + require.Error(t, result.Error) + require.Equal(t, nil, result.Value) + } else { + require.NoError(t, result.Error) + require.Equal(t, true, result.Value.(bool)) + } + }) + }) + } +} diff --git a/core/services/pipeline/task.divide.go b/core/services/pipeline/task.divide.go new file mode 100644 index 00000000..0edb1261 --- /dev/null +++ b/core/services/pipeline/task.divide.go @@ -0,0 +1,70 @@ +package pipeline + +import ( + "context" + "math" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type DivideTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` + Divisor string `json:"divisor"` + Precision string `json:"precision"` +} + +var _ Task = (*DivideTask)(nil) + +var ( + ErrDivideByZero = errors.New("divide by zero") + ErrDivisionOverlow = errors.New("division overflow") +) + +func (t *DivideTask) Type() TaskType { + return TaskTypeDivide +} + +func (t *DivideTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + a DecimalParam + b DecimalParam + maybePrecision MaybeInt32Param + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&a, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + errors.Wrap(ResolveParam(&b, From(VarExpr(t.Divisor, vars), NonemptyString(t.Divisor))), "divisor"), + errors.Wrap(ResolveParam(&maybePrecision, From(VarExpr(t.Precision, vars), t.Precision)), "precision"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if b.Decimal().IsZero() { + return Result{Error: ErrDivideByZero}, runInfo + } + + if precision, isSet := maybePrecision.Int32(); isSet { + scale := -precision + e := int64(a.Decimal().Exponent()) - int64(b.Decimal().Exponent()) - int64(scale) + if e > math.MaxInt32 || e < math.MinInt32 { + return Result{Error: ErrDivisionOverlow}, runInfo + } + + return Result{Value: a.Decimal().DivRound(b.Decimal(), precision)}, runInfo + } + // Note that decimal library defaults to rounding to 16 precision + // https://github.com/shopspring/decimal/blob/2568a29459476f824f35433dfbef158d6ad8618c/decimal.go#L44 + return Result{Value: a.Decimal().Div(b.Decimal())}, runInfo +} diff --git a/core/services/pipeline/task.divide_test.go b/core/services/pipeline/task.divide_test.go new file mode 100644 index 00000000..c60f55a1 --- /dev/null +++ b/core/services/pipeline/task.divide_test.go @@ -0,0 +1,195 @@ +package pipeline_test + +import ( + "fmt" + "math" + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestDivideTask_Happy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + divisor string + precision string + expected *decimal.Decimal + }{ + {"string", "12345.67", "100", "", mustDecimal(t, "123.4567")}, + {"string, negative", "12345.67", "-5", "", mustDecimal(t, "-2469.134")}, + {"string, large value", "12345.67", "1000000000000000000", "", mustDecimal(t, "0.0000000000000123")}, + + {"int", int(200), "16", "", mustDecimal(t, "12.5")}, + {"int, negative", int(200), "-5", "", mustDecimal(t, "-40")}, + {"int, large value", int(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"int8", int8(20), "16", "", mustDecimal(t, "1.25")}, + {"int8, negative", int8(20), "-5", "", mustDecimal(t, "-4")}, + {"int8, large value", int8(20), "10000000000000000", "", mustDecimal(t, "0.000000000000002")}, + + {"int16", int16(200), "16", "", mustDecimal(t, "12.5")}, + {"int16, negative", int16(200), "-5", "", mustDecimal(t, "-40")}, + {"int16, large value", int16(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"int32", int32(200), "16", "", mustDecimal(t, "12.5")}, + {"int32, negative", int32(200), "-5", "", mustDecimal(t, "-40")}, + {"int32, large value", int32(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"int64", int64(200), "16", "", mustDecimal(t, "12.5")}, + {"int64, negative", int64(200), "-5", "", mustDecimal(t, "-40")}, + {"int64, large value", int64(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"uint", uint(200), "16", "", mustDecimal(t, "12.5")}, + {"uint, negative", uint(200), "-5", "", mustDecimal(t, "-40")}, + {"uint, large value", uint(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"uint8", uint8(200), "16", "", mustDecimal(t, "12.5")}, + {"uint8, negative", uint8(200), "-5", "", mustDecimal(t, "-40")}, + {"uint8, large value", uint8(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"uint16", uint16(200), "16", "", mustDecimal(t, "12.5")}, + {"uint16, negative", uint16(200), "-5", "", mustDecimal(t, "-40")}, + {"uint16, large value", uint16(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"uint32", uint32(200), "16", "", mustDecimal(t, "12.5")}, + {"uint32, negative", uint32(200), "-5", "", mustDecimal(t, "-40")}, + {"uint32, large value", uint32(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"uint64", uint64(200), "16", "", mustDecimal(t, "12.5")}, + {"uint64, negative", uint64(200), "-5", "", mustDecimal(t, "-40")}, + {"uint64, large value", uint64(200), "1000000000000000000", "", mustDecimal(t, "0.0000000000000002")}, + + {"float32", float32(12345.67), "1000", "", mustDecimal(t, "12.34567")}, + {"float32, negative", float32(12345.67), "-5", "", mustDecimal(t, "-2469.134")}, + {"float32, large value", float32(12345.67), "1000000000000000000", "", mustDecimal(t, "0.0000000000000123")}, + + {"float64", float64(12345.67), "1000", "", mustDecimal(t, "12.34567")}, + {"float64, negative", float64(12345.67), "-5", "", mustDecimal(t, "-2469.134")}, + {"float64, large value", float64(12345.67), "1000000000000000000", "", mustDecimal(t, "0.0000000000000123")}, + + {"precision", float64(12345.67), "1000", "2", mustDecimal(t, "12.35")}, + {"precision (> 16)", float64(200), "6", "18", mustDecimal(t, "33.333333333333333333")}, + {"precision (negative)", float64(12345.67), "1000", "-1", mustDecimal(t, "10")}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.expected.String(), result.Value.(decimal.Decimal).String()) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.DivideTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Divisor: test.divisor, + Precision: test.precision, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.DivideTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: fmt.Sprintf("%v", test.input), + Divisor: test.divisor, + Precision: test.precision, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + "chain": map[string]interface{}{"link": test.divisor}, + "sergey": map[string]interface{}{"steve": test.precision}, + }) + task := pipeline.DivideTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + Divisor: "$(chain.link)", + Precision: "$(sergey.steve)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} + +func TestDivideTask_Unhappy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + divisor string + input string + inputs []pipeline.Result + vars pipeline.Vars + wantErrorCause error + wantErrorContains string + }{ + {"map as input from inputs", "100", "", []pipeline.Result{{Value: map[string]interface{}{"chain": "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "input"}, + {"map as input from var", "100", "$(foo)", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": map[string]interface{}{"chain": "link"}}), pipeline.ErrBadInput, "input"}, + {"slice as input from inputs", "100", "", []pipeline.Result{{Value: []interface{}{"chain", "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "input"}, + {"slice as input from var", "100", "$(foo)", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": []interface{}{"chain", "link"}}), pipeline.ErrBadInput, "input"}, + {"input as missing var", "100", "$(foo)", nil, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "input"}, + {"divisor as missing var", "$(foo)", "", []pipeline.Result{{Value: "123"}}, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "divisor"}, + {"errored inputs", "1000", "", []pipeline.Result{{Error: errors.New("uh oh")}}, pipeline.NewVarsFrom(nil), pipeline.ErrTooManyErrors, "task inputs"}, + {"divide by zero", "0", "", []pipeline.Result{{Value: "123"}}, pipeline.NewVarsFrom(nil), pipeline.ErrDivideByZero, "divide by zero"}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + task := pipeline.DivideTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: test.input, + Divisor: test.divisor, + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Equal(t, test.wantErrorCause, errors.Cause(result.Error)) + if test.wantErrorContains != "" { + require.Contains(t, result.Error.Error(), test.wantErrorContains) + } + }) + } +} + +func TestDivideTask_Overflow(t *testing.T) { + t.Parallel() + + d1, err := decimal.NewFromString("6.34e-01") + assert.NoError(t, err) + d2, err := decimal.NewFromString("6.34e-10") + assert.NoError(t, err) + + task := pipeline.DivideTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(a)", + Divisor: "$(b)", + Precision: fmt.Sprintf("%d", math.MaxInt32), + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "a": d1, + "b": d2, + }) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: "123"}}) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Equal(t, pipeline.ErrDivisionOverlow, errors.Cause(result.Error)) +} diff --git a/core/services/pipeline/task.estimategas.go b/core/services/pipeline/task.estimategas.go new file mode 100644 index 00000000..a63c82fc --- /dev/null +++ b/core/services/pipeline/task.estimategas.go @@ -0,0 +1,135 @@ +package pipeline + +import ( + "context" + "fmt" + "math" + "strconv" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// uint64 +type EstimateGasLimitTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` + From string `json:"from"` + To string `json:"to"` + Multiplier string `json:"multiplier"` + Data string `json:"data"` + EVMChainID string `json:"evmChainID" mapstructure:"evmChainID"` + Block string `json:"block"` + + specGasLimit *uint32 + legacyChains legacyevm.LegacyChainContainer + jobType string +} + +type GasEstimator interface { + EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) +} + +var ( + _ Task = (*EstimateGasLimitTask)(nil) + ErrInvalidMultiplier = errors.New("Invalid multiplier") +) + +func (t *EstimateGasLimitTask) Type() TaskType { + return TaskTypeEstimateGasLimit +} + +func (t *EstimateGasLimitTask) getEvmChainID() string { + if t.EVMChainID == "" { + t.EVMChainID = "$(jobSpec.evmChainID)" + } + return t.EVMChainID +} + +func (t *EstimateGasLimitTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var ( + fromAddr AddressParam + toAddr AddressParam + data BytesParam + multiplier DecimalParam + chainID StringParam + block StringParam + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&fromAddr, From(VarExpr(t.From, vars), utils.ZeroAddress)), "from"), + errors.Wrap(ResolveParam(&toAddr, From(VarExpr(t.To, vars), NonemptyString(t.To))), "to"), + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), NonemptyString(t.Data))), "data"), + // Default to 1, i.e. exactly what estimateGas suggests + errors.Wrap(ResolveParam(&multiplier, From(VarExpr(t.Multiplier, vars), NonemptyString(t.Multiplier), decimal.New(1, 0))), "multiplier"), + errors.Wrap(ResolveParam(&chainID, From(VarExpr(t.getEvmChainID(), vars), NonemptyString(t.getEvmChainID()), "")), "evmChainID"), + errors.Wrap(ResolveParam(&block, From(VarExpr(t.Block, vars), t.Block)), "block"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + chain, err := t.legacyChains.Get(string(chainID)) + if err != nil { + err = fmt.Errorf("%w: %s: %w", ErrInvalidEVMChainID, chainID, err) + return Result{Error: err}, runInfo + } + + maximumGasLimit := SelectGasLimit(chain.Config().EVM().GasEstimator(), t.jobType, t.specGasLimit) + to := common.Address(toAddr) + var gasLimit hexutil.Uint64 + args := map[string]interface{}{ + "from": common.Address(fromAddr), + "to": &to, + "input": hexutil.Bytes([]byte(data)), + } + + selectedBlock, err := selectBlock(string(block)) + if err != nil { + return Result{Error: err}, runInfo + } + err = chain.Client().CallContext(ctx, + &gasLimit, + "eth_estimateGas", + args, + selectedBlock, + ) + + if err != nil { + // Fallback to the maximum conceivable gas limit + // if we're unable to call estimate gas for whatever reason. + lggr.Warnw("EstimateGas: unable to estimate, fallback to configured limit", "err", err, "fallback", maximumGasLimit) + return Result{Value: maximumGasLimit}, runInfo + } + + gasLimitDecimal, err := decimal.NewFromString(strconv.FormatUint(uint64(gasLimit), 10)) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + newExp := int64(gasLimitDecimal.Exponent()) + int64(multiplier.Decimal().Exponent()) + if newExp > math.MaxInt32 || newExp < math.MinInt32 { + return Result{Error: ErrMultiplyOverlow}, retryableRunInfo() + } + gasLimitWithMultiplier := gasLimitDecimal.Mul(multiplier.Decimal()).Truncate(0).BigInt() + if !gasLimitWithMultiplier.IsUint64() { + return Result{Error: ErrInvalidMultiplier}, retryableRunInfo() + } + gasLimitFinal := uint32(gasLimitWithMultiplier.Uint64()) + if gasLimitFinal > maximumGasLimit { + lggr.Warnw("EstimateGas: estimated amount is greater than configured limit, fallback to configured limit", + "estimate", gasLimitFinal, + "fallback", maximumGasLimit, + ) + gasLimitFinal = maximumGasLimit + } + return Result{Value: gasLimitFinal}, runInfo +} diff --git a/core/services/pipeline/task.eth_abi_decode.go b/core/services/pipeline/task.eth_abi_decode.go new file mode 100644 index 00000000..842d6431 --- /dev/null +++ b/core/services/pipeline/task.eth_abi_decode.go @@ -0,0 +1,57 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// map[string]interface{} with any geth/abigen value type +type ETHABIDecodeTask struct { + BaseTask `mapstructure:",squash"` + ABI string `json:"abi"` + Data string `json:"data"` +} + +var _ Task = (*ETHABIDecodeTask)(nil) + +func (t *ETHABIDecodeTask) Type() TaskType { + return TaskTypeETHABIDecode +} + +func (t *ETHABIDecodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + data BytesParam + theABI BytesParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), Input(inputs, 0))), "data"), + errors.Wrap(ResolveParam(&theABI, From(NonemptyString(t.ABI))), "abi"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + args, _, err := ParseETHABIArgsString([]byte(theABI), false) + if err != nil { + return Result{Error: errors.Wrap(ErrBadInput, err.Error())}, runInfo + } + + out := make(map[string]interface{}) + if len(data) > 0 { + if err := args.UnpackIntoMap(out, []byte(data)); err != nil { + return Result{Error: err}, runInfo + } + } + return Result{Value: out}, runInfo +} diff --git a/core/services/pipeline/task.eth_abi_decode_log.go b/core/services/pipeline/task.eth_abi_decode_log.go new file mode 100644 index 00000000..f711b31c --- /dev/null +++ b/core/services/pipeline/task.eth_abi_decode_log.go @@ -0,0 +1,70 @@ +package pipeline + +import ( + "context" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// map[string]interface{} with any geth/abigen value type +type ETHABIDecodeLogTask struct { + BaseTask `mapstructure:",squash"` + ABI string `json:"abi"` + Data string `json:"data"` + Topics string `json:"topics"` +} + +var _ Task = (*ETHABIDecodeLogTask)(nil) + +func (t *ETHABIDecodeLogTask) Type() TaskType { + return TaskTypeETHABIDecodeLog +} + +func (t *ETHABIDecodeLogTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + theABI BytesParam + data BytesParam + topics HashSliceParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), nil)), "data"), + errors.Wrap(ResolveParam(&topics, From(VarExpr(t.Topics, vars))), "topics"), + errors.Wrap(ResolveParam(&theABI, From(NonemptyString(t.ABI))), "abi"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + _, args, indexedArgs, err := parseETHABIString([]byte(theABI), true) + if err != nil { + return Result{Error: errors.Wrap(ErrBadInput, err.Error())}, runInfo + } + + out := make(map[string]interface{}) + if len(data) > 0 { + if err2 := args.UnpackIntoMap(out, []byte(data)); err2 != nil { + return Result{Error: errors.Wrap(ErrBadInput, err2.Error())}, runInfo + } + } + if len(indexedArgs) > 0 { + if len(topics) != len(indexedArgs)+1 { + return Result{Error: errors.Wrap(ErrBadInput, "topic/field count mismatch")}, runInfo + } + err = abi.ParseTopicsIntoMap(out, indexedArgs, topics[1:]) + if err != nil { + return Result{Error: errors.Wrap(ErrBadInput, err.Error())}, runInfo + } + } + return Result{Value: out}, runInfo +} diff --git a/core/services/pipeline/task.eth_abi_decode_log_test.go b/core/services/pipeline/task.eth_abi_decode_log_test.go new file mode 100644 index 00000000..d1e9864e --- /dev/null +++ b/core/services/pipeline/task.eth_abi_decode_log_test.go @@ -0,0 +1,257 @@ +package pipeline_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestETHABIDecodeLogTask(t *testing.T) { + tests := []struct { + name string + abi string + data string + topics string + vars pipeline.Vars + inputs []pipeline.Result + expected map[string]interface{} + expectedErrorCause error + expectedErrorContains string + }{ + { + "AggregatorV2V3#NewRound", + "NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000000f"), + "topics": []common.Hash{ + common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271"), + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000009"), + common.HexToHash("0x000000000000000000000000f17f52151ebef6c7334fad080c5704d77216b732"), + }, + }, + }), + nil, + map[string]interface{}{ + "roundId": big.NewInt(9), + "startedBy": common.HexToAddress("0xf17f52151ebef6c7334fad080c5704d77216b732"), + "startedAt": big.NewInt(15), + }, + nil, + "", + }, + { + "Operator#OracleRequest", + "OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef74686520726571756573742069640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020a6e000000000000000000000000cafebabecafebabecafebabecafebabecafebabe61736466000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003039000000000000000000000000000000000000000000000000000000000000d431000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000147374657665746f7368692073657267616d6f746f000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + common.HexToHash("0x746865206a6f6220696400000000000000000000000000000000000000000000"), + }, + }, + }), + nil, + map[string]interface{}{ + "specId": utils.Bytes32FromString("the job id"), + "requester": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "requestId": utils.Bytes32FromString("the request id"), + "payment": big.NewInt(133742), + "callbackAddr": common.HexToAddress("0xCafEBAbECAFEbAbEcaFEbabECAfebAbEcAFEBaBe"), + "callbackFunctionId": utils.Bytes4FromString("asdf"), + "cancelExpiration": big.NewInt(12345), + "dataVersion": big.NewInt(54321), + "data": []byte("stevetoshi sergamoto"), + }, + nil, + "", + }, + { + "Operator#AuthorizedSendersChanged", + "AuthorizedSendersChanged(address[] senders)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000cafebabecafebabecafebabecafebabecafebabe"), + "topics": []common.Hash{ + common.HexToHash("0xe720bc96024900ba647b8faa27766eb59f72cadf3c7ec34a7365c999f78320db"), + }, + }, + }), + nil, + map[string]interface{}{ + "senders": []common.Address{ + common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + common.HexToAddress("0xCafEBAbECAFEbAbEcaFEbabECAfebAbEcAFEBaBe"), + }, + }, + nil, + "", + }, + + { + "missing arg name", + "SomeEvent(bytes32)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef74686520726571756573742069640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020a6e000000000000000000000000cafebabecafebabecafebabecafebabecafebabe61736466000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003039000000000000000000000000000000000000000000000000000000000000d431000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000147374657665746f7368692073657267616d6f746f000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }, + }), + nil, + nil, + pipeline.ErrBadInput, + "bad ABI specification", + }, + { + "missing arg name (with 'indexed' modifier)", + "SomeEvent(bytes32 indexed)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef74686520726571756573742069640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020a6e000000000000000000000000cafebabecafebabecafebabecafebabecafebabe61736466000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003039000000000000000000000000000000000000000000000000000000000000d431000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000147374657665746f7368692073657267616d6f746f000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }, + }), + nil, + nil, + pipeline.ErrBadInput, + "bad ABI specification", + }, + { + "missing topic data", + "OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef74686520726571756573742069640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020a6e000000000000000000000000cafebabecafebabecafebabecafebabecafebabe61736466000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003039000000000000000000000000000000000000000000000000000000000000d431000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000147374657665746f7368692073657267616d6f746f000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }, + }), + nil, + nil, + pipeline.ErrBadInput, + "topic/field count mismatch", + }, + { + "not enough data: len(data) % 32 != 0", + "OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef74686520726571756573742069640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020a6e000000000000000000000000cafebabecafebabecafebabecafebabecafebabe61736466000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003039000000000000000000000000000000000000000000000000000000000000d4310000000000000000000000000000000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + common.HexToHash("0x746865206a6f6220696400000000000000000000000000000000000000000000"), + }, + }, + }), + nil, + nil, + pipeline.ErrBadInput, + "length insufficient 250 require 256", + }, + { + "not enough data: len(data) < len(non-indexed args) * 32", + "OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 foobar)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef7468652072657175657374206964000000000000000000000000000000000000"), + "topics": []common.Hash{ + common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + common.HexToHash("0x746865206a6f6220696400000000000000000000000000000000000000000000"), + }, + }, + }), + nil, + nil, + pipeline.ErrBadInput, + "length insufficient 64 require 96", + }, + { + "errored task inputs", + "NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt)", + `$(foo.data)`, + `$(foo.topics)`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "address": common.HexToAddress("0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6"), + "data": hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000000f"), + "topics": []common.Hash{ + common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271"), + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000009"), + common.HexToHash("0x000000000000000000000000f17f52151ebef6c7334fad080c5704d77216b732"), + }, + }, + }), + []pipeline.Result{{Error: errors.New("uh oh")}}, + nil, + pipeline.ErrTooManyErrors, + "task inputs", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHABIDecodeLogTask{ + BaseTask: pipeline.NewBaseTask(0, "decodelog", nil, nil, 0), + ABI: test.abi, + Data: test.data, + Topics: test.topics, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.eth_abi_decode_test.go b/core/services/pipeline/task.eth_abi_decode_test.go new file mode 100644 index 00000000..7ae1c003 --- /dev/null +++ b/core/services/pipeline/task.eth_abi_decode_test.go @@ -0,0 +1,136 @@ +package pipeline + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var testsABIDecode = []struct { + name string + abi string + data string + vars Vars + inputs []Result + expected map[string]interface{} + expectedErrorCause error + expectedErrorContains string +}{ + { + "uint256, bool, int256, string", + "uint256 u, bool b, int256 i, string s", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "u": big.NewInt(123), + "b": true, + "i": big.NewInt(-321), + "s": "foo bar baz", + }, + nil, + "", + }, + { + "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", + "bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000002cc18069c8a2800000000000000000000000000000000000000000000000000000000000002625a000000000000000000000000000000000000000000000000000000000000000c8000000000000000000000000000000000000000000000000000000000bebc20000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "performData": []uint8{0x0}, + "maxLinkPayment": big.NewInt(3225000000000000000), + "gasLimit": big.NewInt(2500000), + "adjustedGasWei": big.NewInt(200), + "linkEth": big.NewInt(200000000), + }, + nil, + "", + }, + { + "weird spaces / address, uint80[3][], bytes, bytes32", + "address a , uint80[3][] u , bytes b, bytes32 b32 ", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", + }), + nil, + map[string]interface{}{ + "a": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + "u": [][3]*big.Int{ + {big.NewInt(92), big.NewInt(61), big.NewInt(30)}, + {big.NewInt(33), big.NewInt(66), big.NewInt(99)}, + }, + "b": hexutil.MustDecode("0x666f6f206261722062617a0a"), + "b32": utils.Bytes32FromString("stevetoshi sergamoto"), + }, + nil, + "", + }, + { + "no attribute names", + "address, bytes32", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001607374657665746f7368692073657267616d6f746f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000002100000000000000000000000000000000000000000000000000000000000000420000000000000000000000000000000000000000000000000000000000000063000000000000000000000000000000000000000000000000000000000000000c666f6f206261722062617a0a0000000000000000000000000000000000000000", + }), + nil, + nil, + ErrBadInput, + "", + }, + { + "errored task inputs", + "uint256 u, bool b, int256 i, string s", + "$(foo)", + NewVarsFrom(map[string]interface{}{ + "foo": "0x000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + }), + []Result{{Error: errors.New("uh oh")}}, + nil, + ErrTooManyErrors, + "task inputs", + }, +} + +func TestETHABIDecodeTask(t *testing.T) { + for _, test := range testsABIDecode { + test := test + + t.Run(test.name, func(t *testing.T) { + task := ETHABIDecodeTask{ + BaseTask: NewBaseTask(0, "decode", nil, nil, 0), + ABI: test.abi, + Data: test.data, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.eth_abi_encode.go b/core/services/pipeline/task.eth_abi_encode.go new file mode 100644 index 00000000..c6db1572 --- /dev/null +++ b/core/services/pipeline/task.eth_abi_encode.go @@ -0,0 +1,77 @@ +package pipeline + +import ( + "context" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// []byte +type ETHABIEncodeTask struct { + BaseTask `mapstructure:",squash"` + ABI string `json:"abi"` + Data string `json:"data"` +} + +var _ Task = (*ETHABIEncodeTask)(nil) + +func (t *ETHABIEncodeTask) Type() TaskType { + return TaskTypeETHABIEncode +} + +func (t *ETHABIEncodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + inputValues MapParam + theABI BytesParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&inputValues, From(VarExpr(t.Data, vars), JSONWithVarExprs(t.Data, vars, false), nil)), "data"), + errors.Wrap(ResolveParam(&theABI, From(NonemptyString(t.ABI))), "abi"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + methodName, args, _, err := parseETHABIString([]byte(theABI), false) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: while parsing ABI string: %v", err)}, runInfo + } + method := abi.NewMethod(methodName, methodName, abi.Function, "", false, false, args, nil) + + var vals []interface{} + for _, arg := range args { + val, exists := inputValues[arg.Name] + if !exists { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: argument '%v' is missing", arg.Name)}, runInfo + } + val, err = convertToETHABIType(val, arg.Type) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: while converting argument '%v' from %T to %v: %v", arg.Name, val, arg.Type, err)}, runInfo + } + vals = append(vals, val) + } + + argsEncoded, err := method.Inputs.Pack(vals...) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: could not ABI encode values: %v", err)}, runInfo + } + var dataBytes []byte + if methodName != "" { + dataBytes = append(method.ID, argsEncoded...) + } else { + dataBytes = argsEncoded + } + return Result{Value: hexutil.Encode(dataBytes)}, runInfo +} diff --git a/core/services/pipeline/task.eth_abi_encode_2.go b/core/services/pipeline/task.eth_abi_encode_2.go new file mode 100644 index 00000000..5c34b290 --- /dev/null +++ b/core/services/pipeline/task.eth_abi_encode_2.go @@ -0,0 +1,90 @@ +package pipeline + +import ( + "context" + "encoding/json" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// []byte +type ETHABIEncodeTask2 struct { + BaseTask `mapstructure:",squash"` + ABI string `json:"abi"` + Data string `json:"data"` +} + +var _ Task = (*ETHABIEncodeTask2)(nil) + +func (t *ETHABIEncodeTask2) Type() TaskType { + return TaskTypeETHABIEncode +} + +func (t *ETHABIEncodeTask2) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (Result, RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, RunInfo{} + } + + var ( + inputValues MapParam + theABI BytesParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&inputValues, From(VarExpr(t.Data, vars), JSONWithVarExprs(t.Data, vars, false), nil)), "data"), + errors.Wrap(ResolveParam(&theABI, From(NonemptyString(t.ABI))), "abi"), + ) + if err != nil { + return Result{Error: err}, RunInfo{} + } + + inputMethod := Method{} + err = json.Unmarshal(theABI, &inputMethod) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: while parsing ABI string: %v", err)}, RunInfo{} + } + + method := abi.NewMethod(inputMethod.Name, inputMethod.Name, abi.Function, "", false, false, inputMethod.Inputs, nil) + + var vals []interface{} + for _, arg := range method.Inputs { + if len(arg.Name) == 0 { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: bad ABI specification, missing argument name")}, RunInfo{} + } + val, exists := inputValues[arg.Name] + if !exists { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: argument '%v' is missing", arg.Name)}, RunInfo{} + } + val, err = convertToETHABIType(val, arg.Type) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: while converting argument '%v' from %T to %v: %v", arg.Name, val, arg.Type, err)}, RunInfo{} + } + vals = append(vals, val) + } + + argsEncoded, err := method.Inputs.Pack(vals...) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "ETHABIEncode: could not ABI encode values: %v", err)}, RunInfo{} + } + var dataBytes []byte + if method.Name != "" { + dataBytes = append(method.ID, argsEncoded...) + } else { + dataBytes = argsEncoded + } + return Result{Value: hexutil.Encode(dataBytes)}, RunInfo{} +} + +// go-ethereum's abi.Method doesn't implement json.Marshal for Type, but +// otherwise would have worked fine, in any case we only care about these... +type Method struct { + Name string + Inputs abi.Arguments +} diff --git a/core/services/pipeline/task.eth_abi_encode_2_test.go b/core/services/pipeline/task.eth_abi_encode_2_test.go new file mode 100644 index 00000000..1cdbb535 --- /dev/null +++ b/core/services/pipeline/task.eth_abi_encode_2_test.go @@ -0,0 +1,373 @@ +package pipeline_test + +import ( + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestETHABIEncodeTask2(t *testing.T) { + var bytes32 [32]byte + copy(bytes32[:], []byte("plugin plugin plugin")) + + tests := []struct { + name string + abi string + data string + vars pipeline.Vars + inputs []pipeline.Result + expected string + expectedErrorCause error + expectedErrorContains string + }{ + { + "unusual characters in method name / uint256, bool, int256, string", + `{ + "name": "foo_Bar__3928", + "inputs": [ + { + "indexed": false, + "name": "u", + "type": "uint256" + }, + { + "indexed": false, + "name": "b", + "type": "bool" + }, + { + "indexed": false, + "name": "i", + "type": "int256" + }, + { + "indexed": false, + "name": "s", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function", + "outputs": [] + }`, + `{ "u": $(foo), "b": $(bar), "i": $(baz), "s": $(quux) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(123), + "bar": true, + "baz": big.NewInt(-321), + "quux": "foo bar baz", + }), + nil, + "0xae506917000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + nil, + "", + }, + { + "bytes32, bytes, address", + `{ + "name": "asdf", + "inputs": [ + { + "name": "b", + "type": "bytes32" + }, + { + "name": "bs", + "type": "bytes" + }, + { + "name": "a", + "type": "address" + } + ] + }`, + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + nil, + "0x4f5e7a89636861696e6c696e6b20636861696e6c696e6b20636861696e6c696e6b0000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef00000000000000000000000000000000000000000000000000000000000000157374657665746f736869207365726765796d6f746f0000000000000000000000", + nil, + "", + }, + { + "address[] calldata, uint80, uint32[2]", + `{ + "name": "Plugin", + "inputs": [ + { + "name": "a", + "type": "address[]" + }, + { + "name": "x", + "type": "uint80" + }, + { + "name": "s", + "type": "uint32[2]" + } + ] + }`, + `{ "a": $(foo), "x": $(bar), "s": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []common.Address{ + common.HexToAddress("0x6c91b062a774cbe8b9bf52f224c37badf98fc40b"), + common.HexToAddress("0xc4f27ead9083c756cc2c02aaa39b223fe8d0a0e5"), + common.HexToAddress("0x749e4598819b2b0e915a02120696c7b8fe16c09c"), + }, + "bar": big.NewInt(8293), + "baz": []*big.Int{big.NewInt(192), big.NewInt(4182)}, + }), + nil, + "0xa3a122020000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000206500000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000105600000000000000000000000000000000000000000000000000000000000000030000000000000000000000006c91b062a774cbe8b9bf52f224c37badf98fc40b000000000000000000000000c4f27ead9083c756cc2c02aaa39b223fe8d0a0e5000000000000000000000000749e4598819b2b0e915a02120696c7b8fe16c09c", + nil, + "", + }, + { + "bool[2][] calldata, uint96[2][] calldata", + `{ + "name": "arrayOfArrays", + "inputs": [ + { + "name": "bools", + "type": "bool[2][]" + }, + { + "name": "uints", + "type": "uint96[2][]" + } + ] + }`, + `{ "bools": $(foo), "uints": $(bar) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": [][]bool{{true, false}, {false, true}, {false, false}, {true, true}}, + "bar": [][]*big.Int{{big.NewInt(123), big.NewInt(456)}, {big.NewInt(22), big.NewInt(19842)}}, + }), + nil, + "0xb04bee77000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000007b00000000000000000000000000000000000000000000000000000000000001c800000000000000000000000000000000000000000000000000000000000000160000000000000000000000000000000000000000000000000000000000004d82", + nil, + "", + }, + { + "no args", + `{"name": "noArgs"}`, + ``, + pipeline.NewVarsFrom(nil), + nil, + "0x83c962bb", + nil, + "", + }, + { + "number too large for uint32", + `{ + "name": "willFail", + "inputs": [ + { + "name": "s", + "type": "uint32" + } + ] + }`, + `{ "s": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(math.MaxInt64), + }), + nil, + "", + pipeline.ErrBadInput, + "overflow", + }, + { + "string too large for address", + `{ + "name": "willFail", + "inputs": [ + { + "name": "a", + "type": "address" + } + ] + }`, + `{ "a": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + }), + nil, + "", + pipeline.ErrBadInput, + "incorrect length", + }, + { + "no argument names", + `{ + "name": "willFail", + "inputs": [ + { + "type": "address" + }, + { + "type": "uint256[]" + } + ] + }`, + ``, + pipeline.NewVarsFrom(nil), + nil, + "", + pipeline.ErrBadInput, + "missing argument name", + }, + { + "errored task inputs", + `{ + "name": "asdf", + "inputs": [ + { + "name": "b", + "type": "bytes32" + }, + { + "name": "bs", + "type": "bytes" + }, + { + "name": "a", + "type": "address" + } + ] + }`, + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + []pipeline.Result{{Error: errors.New("uh oh")}}, + "", + pipeline.ErrTooManyErrors, + "task inputs", + }, + { + "hex string to fixed size byte array (note used by fulfillOracleRequest(..., bytes32 data))", + `{ + "name": "asdf", + "inputs": [ + { + "name": "b", + "type": "bytes32" + } + ] + }`, + `{ "b": $(foo)}`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0x0000000000000000000000000000000000000000000000000000000000000001", + }), + nil, + "0x628507ac0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "tuple with map", + `{ + "name": "call", + "inputs": [ + { + "name": "value", + "type": "tuple", + "components": [ + { + "name": "first", + "type": "bytes32" + }, + { + "name": "last", + "type": "bool" + } + ] + } + ] + }`, + `{ "value": $(value) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "value": map[string]interface{}{ + "first": "0x0000000000000000000000000000000000000000000000000000000000000001", + "last": true, + }, + }), + nil, + "0xb06b167500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "tuple with array", + `{ + "name": "call", + "inputs": [ + { + "name": "value", + "type": "tuple", + "components": [ + { + "name": "first", + "type": "bytes32" + }, + { + "name": "last", + "type": "bool" + } + ] + } + ] + }`, + `{ "value": $(value) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "value": []interface{}{ + "0x0000000000000000000000000000000000000000000000000000000000000001", + true, + }, + }), + nil, + "0xb06b167500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHABIEncodeTask2{ + BaseTask: pipeline.NewBaseTask(0, "encode", nil, nil, 0), + ABI: test.abi, + Data: test.data, + } + + result, _ := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.eth_abi_encode_test.go b/core/services/pipeline/task.eth_abi_encode_test.go new file mode 100644 index 00000000..c114346f --- /dev/null +++ b/core/services/pipeline/task.eth_abi_encode_test.go @@ -0,0 +1,598 @@ +package pipeline_test + +import ( + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestETHABIEncodeTask(t *testing.T) { + var bytes32 [32]byte + copy(bytes32[:], []byte("plugin plugin plugin")) + + bytes32hex := utils.StringToHex(string(bytes32[:])) + + tests := []struct { + name string + abi string + data string + vars pipeline.Vars + inputs []pipeline.Result + expected string + expectedErrorCause error + expectedErrorContains string + }{ + { + "unusual characters in method name / uint256, bool, int256, string", + "foo_Bar__3928 ( uint256 u, bool b, int256 i, string s )", + `{ "u": $(foo), "b": $(bar), "i": $(baz), "s": $(quux) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(123), + "bar": true, + "baz": big.NewInt(-321), + "quux": "foo bar baz", + }), + nil, + "0xae506917000000000000000000000000000000000000000000000000000000000000007b0000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebf0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000b666f6f206261722062617a000000000000000000000000000000000000000000", + nil, + "", + }, + { + "bytes32, bytes, address", + "asdf(bytes32 b, bytes bs, address a)", + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + nil, + "0x4f5e7a89636861696e6c696e6b20636861696e6c696e6b20636861696e6c696e6b0000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef00000000000000000000000000000000000000000000000000000000000000157374657665746f736869207365726765796d6f746f0000000000000000000000", + nil, + "", + }, + { + "bytes32 (hex), bytes, address", + "asdf(bytes32 b, bytes bs, address a)", + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32hex, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + nil, + "0x4f5e7a89636861696e6c696e6b20636861696e6c696e6b20636861696e6c696e6b0000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000deadbeefdeadbeefdeadbeefdeadbeefdeadbeef00000000000000000000000000000000000000000000000000000000000000157374657665746f736869207365726765796d6f746f0000000000000000000000", + nil, + "", + }, + { + "address[] calldata, uint80, uint32[2]", + "Plugin(address[] calldata a, uint80 x, uint32[2] s)", + `{ "a": $(foo), "x": $(bar), "s": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []common.Address{ + common.HexToAddress("0x6c91b062a774cbe8b9bf52f224c37badf98fc40b"), + common.HexToAddress("0xc4f27ead9083c756cc2c02aaa39b223fe8d0a0e5"), + common.HexToAddress("0x749e4598819b2b0e915a02120696c7b8fe16c09c"), + }, + "bar": big.NewInt(8293), + "baz": []*big.Int{big.NewInt(192), big.NewInt(4182)}, + }), + nil, + "0xa3a122020000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000206500000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000105600000000000000000000000000000000000000000000000000000000000000030000000000000000000000006c91b062a774cbe8b9bf52f224c37badf98fc40b000000000000000000000000c4f27ead9083c756cc2c02aaa39b223fe8d0a0e5000000000000000000000000749e4598819b2b0e915a02120696c7b8fe16c09c", + nil, + "", + }, + { + "bool[2][] calldata, uint96[2][] calldata", + "arrayOfArrays(bool[2][] calldata bools, uint96[2][] calldata uints)", + `{ "bools": $(foo), "uints": $(bar) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": [][]bool{{true, false}, {false, true}, {false, false}, {true, true}}, + "bar": [][]*big.Int{{big.NewInt(123), big.NewInt(456)}, {big.NewInt(22), big.NewInt(19842)}}, + }), + nil, + "0xb04bee77000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000007b00000000000000000000000000000000000000000000000000000000000001c800000000000000000000000000000000000000000000000000000000000000160000000000000000000000000000000000000000000000000000000000004d82", + nil, + "", + }, + { + "no args", + "noArgs()", + ``, + pipeline.NewVarsFrom(nil), + nil, + "0x83c962bb", + nil, + "", + }, + { + "number too large for uint32", + "willFail(uint32 s)", + `{ "s": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(math.MaxInt64), + }), + nil, + "", + pipeline.ErrBadInput, + "overflow", + }, + { + "string too large for address", + "willFail(address a)", + `{ "a": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", + }), + nil, + "", + pipeline.ErrBadInput, + "incorrect length", + }, + { + "too many array elements", + "willFail(uint32[2] a)", + `{ "a": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{123, 456, 789}, + }), + nil, + "", + pipeline.ErrBadInput, + "incorrect length", + }, + { + "too many array elements (nested)", + "willFail(uint32[2][] a)", + `{ "a": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{ + []interface{}{123, 456, 789}, + }, + }), + nil, + "", + pipeline.ErrBadInput, + "incorrect length", + }, + { + "no argument names", + "willFail(address, uint256[])", + ``, + pipeline.NewVarsFrom(nil), + nil, + "", + pipeline.ErrBadInput, + "missing argument name", + }, + { + "no argument names (calldata)", + "willFail(uint256[] calldata)", + ``, + pipeline.NewVarsFrom(nil), + nil, + "", + pipeline.ErrBadInput, + "missing argument name", + }, + { + "errored task inputs", + "asdf(bytes32 b, bytes bs, address a)", + `{ "b": $(foo), "bs": $(bar), "a": $(baz) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": bytes32, + "bar": []byte("stevetoshi sergeymoto"), + "baz": common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), + }), + []pipeline.Result{{Error: errors.New("uh oh")}}, + "", + pipeline.ErrTooManyErrors, + "task inputs", + }, + { + "hex string to fixed size byte array (note used by fulfillOracleRequest(..., bytes32 data))", + "asdf(bytes32 b)", + `{ "b": $(foo)}`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": "0x0000000000000000000000000000000000000000000000000000000000000001", + }), + nil, + "0x628507ac0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHABIEncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "encode", nil, nil, 0), + ABI: test.abi, + Data: test.data, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} + +func TestETHABIEncode_EncodeIntegers(t *testing.T) { + testCases := []struct { + name string + abi string + data string + vars pipeline.Vars + inputs []pipeline.Result + expected string + expectedErrorCause error + expectedErrorContains string + }{ + // no overflow cases + // 8 bit ints. + { + "encode 1 to int8", + "asdf(int8 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int8(1), + }), + nil, + "0xa8d7f3cd0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint8", + "asdf(uint8 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint8(1), + }), + nil, + "0x6b377be20000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 16 bit ints. + { + "encode 1 to int16", + "asdf(int16 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int16(1), + }), + nil, + "0xabd195460000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint16", + "asdf(uint16 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint16(1), + }), + nil, + "0x8f3294d20000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 24 bit ints. + { + "encode 1 to int24", + "asdf(int24 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int32(1), + }), + nil, + "0xfdc8ca190000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint24", + "asdf(uint24 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint32(1), + }), + nil, + "0xd3f78f380000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 32 bit ints. + { + "encode 1 to int32", + "asdf(int32 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int32(1), + }), + nil, + "0x5124903a0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint32", + "asdf(uint32 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint32(1), + }), + nil, + "0xeea24d600000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 40 bit ints. + { + "encode 1 to int40", + "asdf(int40 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int64(1), + }), + nil, + "0x8fdcab050000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint40", + "asdf(uint40 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint64(1), + }), + nil, + "0xcb53df3b0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 48 bit ints. + { + "encode 1 to int48", + "asdf(int48 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int64(1), + }), + nil, + "0xeeab50db0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint48", + "asdf(uint48 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint64(1), + }), + nil, + "0x2d4a67fd0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 56 bit ints. + { + "encode 1 to int56", + "asdf(int56 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int64(1), + }), + nil, + "0x5f4d36420000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint56", + "asdf(uint56 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint64(1), + }), + nil, + "0xfe0d590c0000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // 64 bit ints. + { + "encode 1 to int64", + "asdf(int64 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int64(1), + }), + nil, + "0x9089b4180000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint64", + "asdf(uint64 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint64(1), + }), + nil, + "0x237643700000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + // Integer sizes strictly larger than 64 bits should resolve in convertToETHABIType rather than + // in convertToETHABIInteger, since geth uses big.Int to represent integers larger than 64 bits. + { + "encode 1 to int96", + "asdf(int96 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(1), + }), + nil, + "0x7d14efc00000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint96", + "asdf(uint96 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(1), + }), + nil, + "0x605171600000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to int128", + "asdf(int128 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(1), + }), + nil, + "0x633a67090000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + { + "encode 1 to uint128", + "asdf(uint128 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": big.NewInt(1), + }), + nil, + "0x8209afa10000000000000000000000000000000000000000000000000000000000000001", + nil, + "", + }, + } + + for _, test := range testCases { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHABIEncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "encode", nil, nil, 0), + ABI: test.abi, + Data: test.data, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + assert.Equal(t, test.expected, result.Value, fmt.Sprintf("test: %s", test.name)) + } + }) + } +} + +func TestETHABIEncode_EncodeIntegers_Overflow(t *testing.T) { + testCases := []struct { + name string + abi string + data string + vars pipeline.Vars + inputs []pipeline.Result + expected string + expectedErrorCause error + expectedErrorContains string + }{ + { + "encode 1 to int8", + "asdf(int8 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": int16(129), + }), + nil, + "", + pipeline.ErrBadInput, + pipeline.ErrOverflow.Error(), + }, + { + "encode 1 to uint8", + "asdf(uint8 i)", + `{ "i": $(foo) }`, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": uint16(257), + }), + nil, + "", + pipeline.ErrBadInput, + pipeline.ErrOverflow.Error(), + }, + } + + for _, test := range testCases { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHABIEncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "encode", nil, nil, 0), + ABI: test.abi, + Data: test.data, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + require.Nil(t, result.Value) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + assert.Equal(t, test.expected, result.Value, fmt.Sprintf("test: %s", test.name)) + } + }) + } +} diff --git a/core/services/pipeline/task.eth_call.go b/core/services/pipeline/task.eth_call.go new file mode 100644 index 00000000..667d03cf --- /dev/null +++ b/core/services/pipeline/task.eth_call.go @@ -0,0 +1,165 @@ +package pipeline + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// []byte +type ETHCallTask struct { + BaseTask `mapstructure:",squash"` + Contract string `json:"contract"` + From string `json:"from"` + Data string `json:"data"` + Gas string `json:"gas"` + GasPrice string `json:"gasPrice"` + GasTipCap string `json:"gasTipCap"` + GasFeeCap string `json:"gasFeeCap"` + GasUnlimited string `json:"gasUnlimited"` + ExtractRevertReason bool `json:"extractRevertReason"` + EVMChainID string `json:"evmChainID" mapstructure:"evmChainID"` + Block string `json:"block"` + + specGasLimit *uint32 + legacyChains legacyevm.LegacyChainContainer + config Config + jobType string +} + +var _ Task = (*ETHCallTask)(nil) + +var ( + promETHCallTime = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pipeline_task_eth_call_execution_time", + Help: "Time taken to fully execute the ETH call", + }, + []string{"pipeline_task_spec_id"}, + ) +) + +func (t *ETHCallTask) Type() TaskType { + return TaskTypeETHCall +} + +func (t *ETHCallTask) getEvmChainID() string { + if t.EVMChainID == "" { + t.EVMChainID = "$(jobSpec.evmChainID)" + } + return t.EVMChainID +} + +func (t *ETHCallTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + contractAddr AddressParam + from AddressParam + data BytesParam + gas Uint64Param + gasPrice MaybeBigIntParam + gasTipCap MaybeBigIntParam + gasFeeCap MaybeBigIntParam + gasUnlimited BoolParam + chainID StringParam + block StringParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&contractAddr, From(VarExpr(t.Contract, vars), NonemptyString(t.Contract))), "contract"), + errors.Wrap(ResolveParam(&from, From(VarExpr(t.From, vars), NonemptyString(t.From), utils.ZeroAddress)), "from"), + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), JSONWithVarExprs(t.Data, vars, false))), "data"), + errors.Wrap(ResolveParam(&gas, From(VarExpr(t.Gas, vars), NonemptyString(t.Gas), 0)), "gas"), + errors.Wrap(ResolveParam(&gasPrice, From(VarExpr(t.GasPrice, vars), t.GasPrice)), "gasPrice"), + errors.Wrap(ResolveParam(&gasTipCap, From(VarExpr(t.GasTipCap, vars), t.GasTipCap)), "gasTipCap"), + errors.Wrap(ResolveParam(&gasFeeCap, From(VarExpr(t.GasFeeCap, vars), t.GasFeeCap)), "gasFeeCap"), + errors.Wrap(ResolveParam(&chainID, From(VarExpr(t.getEvmChainID(), vars), NonemptyString(t.getEvmChainID()), "")), "evmChainID"), + errors.Wrap(ResolveParam(&gasUnlimited, From(VarExpr(t.GasUnlimited, vars), NonemptyString(t.GasUnlimited), false)), "gasUnlimited"), + errors.Wrap(ResolveParam(&block, From(VarExpr(t.Block, vars), t.Block)), "block"), + ) + if err != nil { + return Result{Error: err}, runInfo + } else if len(data) == 0 { + return Result{Error: errors.Wrapf(ErrBadInput, "data param must not be empty")}, runInfo + } + + chain, err := t.legacyChains.Get(string(chainID)) + if err != nil { + err = fmt.Errorf("%w: %s: %w", ErrInvalidEVMChainID, chainID, err) + return Result{Error: err}, runInfo + } + + var selectedGas uint32 + if gasUnlimited { + if gas > 0 { + return Result{Error: errors.Wrapf(ErrBadInput, "gas must be zero when gasUnlimited is true")}, runInfo + } + } else { + if gas > 0 { + selectedGas = uint32(gas) + } else { + selectedGas = SelectGasLimit(chain.Config().EVM().GasEstimator(), t.jobType, t.specGasLimit) + } + } + + call := ethereum.CallMsg{ + To: (*common.Address)(&contractAddr), + From: (common.Address)(from), + Data: []byte(data), + Gas: uint64(selectedGas), + GasPrice: gasPrice.BigInt(), + GasTipCap: gasTipCap.BigInt(), + GasFeeCap: gasFeeCap.BigInt(), + } + + lggr = lggr.With("gas", call.Gas). + With("gasPrice", call.GasPrice). + With("gasTipCap", call.GasTipCap). + With("gasFeeCap", call.GasFeeCap) + + start := time.Now() + + var resp []byte + blockStr := block.String() + if blockStr == "" || strings.ToLower(blockStr) == "latest" { + resp, err = chain.Client().CallContract(ctx, call, nil) + } else if strings.ToLower(blockStr) == "pending" { + resp, err = chain.Client().PendingCallContract(ctx, call) + } + + elapsed := time.Since(start) + if err != nil { + if t.ExtractRevertReason { + rpcError, errExtract := evmclient.ExtractRPCError(err) + if errExtract == nil { + // Update error to unmarshalled RPCError with revert data. + err = rpcError + } else { + lggr.Warnw("failed to extract rpc error", "err", err, "errExtract", errExtract) + // Leave error as is. + } + } + + return Result{Error: err}, retryableRunInfo() + } + + promETHCallTime.WithLabelValues(t.DotID()).Set(float64(elapsed)) + return Result{Value: resp}, runInfo +} diff --git a/core/services/pipeline/task.eth_call_test.go b/core/services/pipeline/task.eth_call_test.go new file mode 100644 index 00000000..0056fd95 --- /dev/null +++ b/core/services/pipeline/task.eth_call_test.go @@ -0,0 +1,342 @@ +package pipeline_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + keystoremocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipelinemocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestETHCallTask(t *testing.T) { + t.Parallel() + testutils.SkipShortDB(t) + + var specGasLimit uint32 = 123 + const gasLimit uint32 = 500_000 + const drJobTypeGasLimit uint32 = 789 + + tests := []struct { + name string + contract string + from string + data string + evmChainID string + gas string + block string + specGasLimit *uint32 + vars pipeline.Vars + inputs []pipeline.Result + setupClientMocks func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) + expected interface{} + expectedErrorCause error + expectedErrorContains string + }{ + { + "happy with empty from", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: uint64(drJobTypeGasLimit), Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + { + "happy with gas limit per task", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "$(gasLimit)", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + "gasLimit": 100_000, + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: 100_000, Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + { + "happy with gas limit per spec", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "", + &specGasLimit, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: uint64(specGasLimit), Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + { + "happy with from addr", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + fromAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: uint64(drJobTypeGasLimit), From: fromAddr, Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + { + "bad from address", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "0xThisAintGonnaWork", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) {}, + nil, pipeline.ErrBadInput, "from", + }, + { + "bad contract address", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbee", + "", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) {}, + nil, pipeline.ErrBadInput, "contract", + }, + { + "missing data var", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "zork": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) {}, + nil, pipeline.ErrKeypathNotFound, "data", + }, + { + "no data", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte(nil), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) {}, + nil, pipeline.ErrBadInput, "data", + }, + { + "errored input", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + []pipeline.Result{{Error: errors.New("uh oh")}}, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) {}, + nil, pipeline.ErrTooManyErrors, "task inputs", + }, + { + "missing chainID", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "$(evmChainID)", + "", + "", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + "evmChainID": "123", + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil).Maybe() + }, + nil, nil, chains.ErrNoSuchChainID.Error(), + }, + { + "simulate using latest block", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "latest", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("CallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: uint64(drJobTypeGasLimit), Data: []byte("foo bar")}, (*big.Int)(nil)). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + { + "simulate using pending block", + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "", + "$(foo)", + "0", + "", + "pending", + nil, + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []byte("foo bar"), + }), + nil, + func(ethClient *evmclimocks.Client, config *pipelinemocks.Config) { + contractAddr := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + ethClient. + On("PendingCallContract", mock.Anything, ethereum.CallMsg{To: &contractAddr, Gas: uint64(drJobTypeGasLimit), Data: []byte("foo bar")}). + Return([]byte("baz quux"), nil) + }, + []byte("baz quux"), nil, "", + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + task := pipeline.ETHCallTask{ + BaseTask: pipeline.NewBaseTask(0, "ethcall", nil, nil, 0), + Contract: test.contract, + From: test.from, + Data: test.data, + EVMChainID: test.evmChainID, + Gas: test.gas, + Block: test.block, + } + + ethClient := evmclimocks.NewClient(t) + config := pipelinemocks.NewConfig(t) + test.setupClientMocks(ethClient, config) + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.LimitDefault = ptr(gasLimit) + c.EVM[0].GasEstimator.LimitJobType.DR = ptr(drJobTypeGasLimit) + }) + lggr := logger.TestLogger(t) + + keyStore := keystoremocks.NewEth(t) + txManager := txmmocks.NewMockEvmTxManager(t) + db := pgtest.NewSqlxDB(t) + + var legacyChains legacyevm.LegacyChainContainer + if test.expectedErrorCause != nil || test.expectedErrorContains != "" { + exts := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, TxManager: txManager, KeyStore: keyStore}) + legacyChains = evmrelay.NewLegacyChainsFromRelayerExtenders(exts) + } else { + legacyChains = cltest.NewLegacyChainsWithMockChain(t, ethClient, cfg) + } + + task.HelperSetDependencies(legacyChains, cfg.JobPipeline(), test.specGasLimit, pipeline.DirectRequestJobType) + + result, runInfo := task.Run(testutils.Context(t), lggr, test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.expectedErrorCause != nil || test.expectedErrorContains != "" { + require.Nil(t, result.Value) + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + } + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.eth_tx.go b/core/services/pipeline/task.eth_tx.go new file mode 100644 index 00000000..40e5e3da --- /dev/null +++ b/core/services/pipeline/task.eth_tx.go @@ -0,0 +1,257 @@ +package pipeline + +import ( + "context" + "fmt" + "math/big" + "reflect" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "go.uber.org/multierr" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + clnull "github.com/goplugin/plugin-common/pkg/utils/null" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// nil +type ETHTxTask struct { + BaseTask `mapstructure:",squash"` + From string `json:"from"` + To string `json:"to"` + Data string `json:"data"` + GasLimit string `json:"gasLimit"` + TxMeta string `json:"txMeta"` + MinConfirmations string `json:"minConfirmations"` + // FailOnRevert, if set, will error the task if the transaction reverted on-chain + // If unset, the receipt will be passed as output + // It has no effect if minConfirmations == 0 + FailOnRevert string `json:"failOnRevert"` + EVMChainID string `json:"evmChainID" mapstructure:"evmChainID"` + TransmitChecker string `json:"transmitChecker"` + + forwardingAllowed bool + specGasLimit *uint32 + keyStore ETHKeyStore + legacyChains legacyevm.LegacyChainContainer + jobType string +} + +type ETHKeyStore interface { + GetRoundRobinAddress(chainID *big.Int, addrs ...common.Address) (common.Address, error) +} + +var _ Task = (*ETHTxTask)(nil) + +func (t *ETHTxTask) Type() TaskType { + return TaskTypeETHTx +} + +func (t *ETHTxTask) getEvmChainID() string { + if t.EVMChainID == "" { + t.EVMChainID = "$(jobSpec.evmChainID)" + } + return t.EVMChainID +} + +func (t *ETHTxTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var chainID StringParam + err := errors.Wrap(ResolveParam(&chainID, From(VarExpr(t.getEvmChainID(), vars), NonemptyString(t.getEvmChainID()), "")), "evmChainID") + if err != nil { + return Result{Error: err}, runInfo + } + + chain, err := t.legacyChains.Get(string(chainID)) + if err != nil { + err = fmt.Errorf("%w: %s: %w", ErrInvalidEVMChainID, chainID, err) + return Result{Error: err}, retryableRunInfo() + } + + cfg := chain.Config().EVM() + txManager := chain.TxManager() + _, err = CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + maximumGasLimit := SelectGasLimit(cfg.GasEstimator(), t.jobType, t.specGasLimit) + + var ( + fromAddrs AddressSliceParam + toAddr AddressParam + data BytesParam + gasLimit Uint64Param + txMetaMap MapParam + maybeMinConfirmations MaybeUint64Param + transmitCheckerMap MapParam + failOnRevert BoolParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&fromAddrs, From(VarExpr(t.From, vars), JSONWithVarExprs(t.From, vars, false), NonemptyString(t.From), nil)), "from"), + errors.Wrap(ResolveParam(&toAddr, From(VarExpr(t.To, vars), NonemptyString(t.To))), "to"), + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), NonemptyString(t.Data))), "data"), + errors.Wrap(ResolveParam(&gasLimit, From(VarExpr(t.GasLimit, vars), NonemptyString(t.GasLimit), maximumGasLimit)), "gasLimit"), + errors.Wrap(ResolveParam(&txMetaMap, From(VarExpr(t.TxMeta, vars), JSONWithVarExprs(t.TxMeta, vars, false), MapParam{})), "txMeta"), + errors.Wrap(ResolveParam(&maybeMinConfirmations, From(VarExpr(t.MinConfirmations, vars), NonemptyString(t.MinConfirmations), "")), "minConfirmations"), + errors.Wrap(ResolveParam(&transmitCheckerMap, From(VarExpr(t.TransmitChecker, vars), JSONWithVarExprs(t.TransmitChecker, vars, false), MapParam{})), "transmitChecker"), + errors.Wrap(ResolveParam(&failOnRevert, From(NonemptyString(t.FailOnRevert), false)), "failOnRevert"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + var minOutgoingConfirmations uint64 + if min, isSet := maybeMinConfirmations.Uint64(); isSet { + minOutgoingConfirmations = min + } else { + minOutgoingConfirmations = uint64(cfg.FinalityDepth()) + } + + txMeta, err := decodeMeta(txMetaMap) + if err != nil { + return Result{Error: err}, runInfo + } + txMeta.FailOnRevert = null.BoolFrom(bool(failOnRevert)) + setJobIDOnMeta(lggr, vars, txMeta) + + transmitChecker, err := decodeTransmitChecker(transmitCheckerMap) + if err != nil { + return Result{Error: err}, runInfo + } + + fromAddr, err := t.keyStore.GetRoundRobinAddress(chain.ID(), fromAddrs...) + if err != nil { + err = errors.Wrap(err, "ETHTxTask failed to get fromAddress") + lggr.Error(err) + return Result{Error: errors.Wrapf(ErrTaskRunFailed, "while querying keystore: %v", err)}, retryableRunInfo() + } + + // TODO(sc-55115): Allow job specs to pass in the strategy that they want + strategy := txmgrcommon.NewSendEveryStrategy() + + var forwarderAddress common.Address + if t.forwardingAllowed { + var fwderr error + forwarderAddress, fwderr = chain.TxManager().GetForwarderForEOA(fromAddr) + if fwderr != nil { + lggr.Warnw("Skipping forwarding for job, will fallback to default behavior", "err", fwderr) + } + } + + txRequest := txmgr.TxRequest{ + FromAddress: fromAddr, + ToAddress: common.Address(toAddr), + EncodedPayload: []byte(data), + FeeLimit: uint32(gasLimit), + Meta: txMeta, + ForwarderAddress: forwarderAddress, + Strategy: strategy, + Checker: transmitChecker, + SignalCallback: true, + } + + if minOutgoingConfirmations > 0 { + // Store the task run ID, so we can resume the pipeline when tx is confirmed + txRequest.PipelineTaskRunID = &t.uuid + txRequest.MinConfirmations = clnull.Uint32From(uint32(minOutgoingConfirmations)) + } + + _, err = txManager.CreateTransaction(ctx, txRequest) + if err != nil { + return Result{Error: errors.Wrapf(ErrTaskRunFailed, "while creating transaction: %v", err)}, retryableRunInfo() + } + + if minOutgoingConfirmations > 0 { + return Result{}, pendingRunInfo() + } + + return Result{Value: nil}, runInfo +} + +func decodeMeta(metaMap MapParam) (*txmgr.TxMeta, error) { + var txMeta txmgr.TxMeta + metaDecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &txMeta, + ErrorUnused: true, + DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + switch from { + case stringType: + switch to { + case int32Type: + i, err2 := strconv.ParseInt(data.(string), 10, 32) + return int32(i), err2 + case reflect.TypeOf(common.Hash{}): + hb, err := hex.DecodeString(data.(string)) + if err != nil { + return nil, err + } + return common.BytesToHash(hb), nil + } + } + return data, nil + }, + }) + if err != nil { + return &txMeta, errors.Wrapf(ErrBadInput, "txMeta: %v", err) + } + + err = metaDecoder.Decode(metaMap) + if err != nil { + return &txMeta, errors.Wrapf(ErrBadInput, "txMeta: %v", err) + } + return &txMeta, nil +} + +func decodeTransmitChecker(checkerMap MapParam) (txmgr.TransmitCheckerSpec, error) { + var transmitChecker txmgr.TransmitCheckerSpec + checkerDecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &transmitChecker, + ErrorUnused: true, + DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + switch from { + case stringType: + switch to { + case reflect.TypeOf(common.Address{}): + ab, err := hex.DecodeString(data.(string)) + if err != nil { + return nil, err + } + return common.BytesToAddress(ab), nil + } + } + return data, nil + }, + }) + if err != nil { + return transmitChecker, errors.Wrapf(ErrBadInput, "transmitChecker: %v", err) + } + + err = checkerDecoder.Decode(checkerMap) + if err != nil { + return transmitChecker, errors.Wrapf(ErrBadInput, "transmitChecker: %v", err) + } + return transmitChecker, nil +} + +// txMeta is really only used for logging, so this is best-effort +func setJobIDOnMeta(lggr logger.Logger, vars Vars, meta *txmgr.TxMeta) { + jobID, err := vars.Get("jobSpec.databaseID") + if err != nil { + return + } + switch v := jobID.(type) { + case int64: + vv := int32(v) + meta.JobID = &vv + default: + logger.Sugared(lggr).AssumptionViolationf("expected type int32 for vars.jobSpec.databaseID; got: %T (value: %v)", jobID, jobID) + } +} diff --git a/core/services/pipeline/task.eth_tx_test.go b/core/services/pipeline/task.eth_tx_test.go new file mode 100644 index 00000000..e87abe68 --- /dev/null +++ b/core/services/pipeline/task.eth_tx_test.go @@ -0,0 +1,610 @@ +package pipeline_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + clnull "github.com/goplugin/plugin-common/pkg/utils/null" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + txmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + keystoremocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestETHTxTask(t *testing.T) { + jid := int32(321) + reqID := common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2") + reqTxHash := common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8") + specGasLimit := uint32(123) + const defaultGasLimit uint32 = 999 + const drJobTypeGasLimit uint32 = 789 + + from := common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c") + to := common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF") + + tests := []struct { + name string + from string + to string + data string + gasLimit string + txMeta string + minConfirmations string + evmChainID string + transmitChecker string + specGasLimit *uint32 + forwardingAllowed bool + vars pipeline.Vars + inputs []pipeline.Result + setupClientMocks func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) + expected interface{} + expectedErrorCause error + expectedErrorContains string + expectedRunInfo pipeline.RunInfo + }{ + { + "happy (no vars)", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + `{"CheckerType": "vrf_v2", "VRFCoordinatorAddress": "0x2E396ecbc8223Ebc16EC45136228AE5EDB649943"}`, + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + + data := []byte("foobar") + gasLimit := uint32(12345) + jobID := int32(321) + addr := common.HexToAddress("0x2E396ecbc8223Ebc16EC45136228AE5EDB649943") + txMeta := &txmgr.TxMeta{ + JobID: &jobID, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Checker: txmgr.TransmitCheckerSpec{ + CheckerType: txmgr.TransmitCheckerTypeVRFV2, + VRFCoordinatorAddress: &addr, + }, + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (with vars)", + `[ $(fromAddr) ]`, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `{ "jobID": $(jobID), "requestID": $(requestID), "requestTxHash": $(requestTxHash) }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddr": common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c"), + "toAddr": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "data": []byte("foobar"), + "gasLimit": uint64(12345), + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + gasLimit := uint32(12345) + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (with minConfirmations as variable expression)", + `[ $(fromAddr) ]`, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `{ "jobID": $(jobID), "requestID": $(requestID), "requestTxHash": $(requestTxHash) }`, + "$(minConfirmations)", + "0", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddr": common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c"), + "toAddr": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "data": []byte("foobar"), + "gasLimit": uint64(12345), + "minConfirmations": uint64(2), + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + addr := common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c") + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, addr).Return(addr, nil) + txManager.On("CreateTransaction", mock.Anything, mock.MatchedBy(func(tx txmgr.TxRequest) bool { + return tx.MinConfirmations == clnull.Uint32From(2) + })).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{IsPending: true}, + }, + { + "happy (with vars 2)", + `$(fromAddrs)`, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `$(requestData)`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddrs": []common.Address{common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c")}, + "toAddr": "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "data": []byte("foobar"), + "gasLimit": uint32(12345), + "requestData": map[string]interface{}{ + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + }, + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + gasLimit := uint32(12345) + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (no `from`, keystore has key)", + ``, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `$(requestData)`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddrs": []common.Address{common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c")}, + "toAddr": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "data": []byte("foobar"), + "gasLimit": uint32(12345), + "requestData": map[string]interface{}{ + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + }, + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + gasLimit := uint32(12345) + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (missing keys in txMeta)", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{}`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + gasLimit := uint32(12345) + txMeta := &txmgr.TxMeta{FailOnRevert: null.BoolFrom(false)} + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (missing gasLimit takes config default)", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + "", + nil, // spec does not override gas limit + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: drJobTypeGasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "happy (missing gasLimit takes spec defined value)", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + "", + &specGasLimit, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: specGasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{}, + }, + { + "error from keystore", + ``, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `$(requestData)`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddrs": []common.Address{common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c")}, + "toAddr": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "data": []byte("foobar"), + "gasLimit": uint32(12345), + "requestData": map[string]interface{}{ + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + }, + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID).Return(nil, errors.New("uh oh")) + }, + nil, pipeline.ErrTaskRunFailed, "while querying keystore", pipeline.RunInfo{IsRetryable: true}, + }, + { + "error from tx manager", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + data := []byte("foobar") + gasLimit := uint32(12345) + txMeta := &txmgr.TxMeta{ + JobID: &jid, + RequestID: &reqID, + RequestTxHash: &reqTxHash, + FailOnRevert: null.BoolFrom(false), + } + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: from, + ToAddress: to, + EncodedPayload: data, + FeeLimit: gasLimit, + Meta: txMeta, + Strategy: txmgrcommon.NewSendEveryStrategy(), + SignalCallback: true, + }).Return(txmgr.Tx{}, errors.New("uh oh")) + }, + nil, pipeline.ErrTaskRunFailed, "while creating transaction", pipeline.RunInfo{IsRetryable: true}, + }, + { + "extra keys in txMeta", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8", "foo": "bar" }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) {}, + nil, pipeline.ErrBadInput, "txMeta", pipeline.RunInfo{}, + }, + { + "bad values in txMeta", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": "asdf", "requestID": 123, "requestTxHash": true }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) {}, + nil, pipeline.ErrBadInput, "txMeta", pipeline.RunInfo{}, + }, + { + "missing `to`", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) {}, + nil, pipeline.ErrParameterEmpty, "to", pipeline.RunInfo{}, + }, + { + "errored input", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `0`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Error: errors.New("uh oh")}}, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) {}, + nil, pipeline.ErrTooManyErrors, "task inputs", pipeline.RunInfo{}, + }, + { + "async mode (with > 0 minConfirmations)", + `[ "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c" ]`, + "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF", + "foobar", + "12345", + `{ "jobID": 321, "requestID": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", "requestTxHash": "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" }`, + `3`, + "0", + "", + nil, + false, + pipeline.NewVarsFrom(nil), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + from := common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c") + keyStore.On("GetRoundRobinAddress", testutils.FixtureChainID, from).Return(from, nil) + txManager.On("CreateTransaction", mock.Anything, mock.MatchedBy(func(tx txmgr.TxRequest) bool { + return tx.MinConfirmations == clnull.Uint32From(3) && tx.PipelineTaskRunID != nil + })).Return(txmgr.Tx{}, nil) + }, + nil, nil, "", pipeline.RunInfo{IsPending: true}, + }, + { + "non-existent chain-id", + `[ $(fromAddr) ]`, + "$(toAddr)", + "$(data)", + "$(gasLimit)", + `{ "jobID": $(jobID), "requestID": $(requestID), "requestTxHash": $(requestTxHash)`, + `0`, + "$(evmChainID)", + "", + nil, + false, + pipeline.NewVarsFrom(map[string]interface{}{ + "fromAddr": common.HexToAddress("0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c"), + "toAddr": common.HexToAddress("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"), + "data": []byte("foobar"), + "gasLimit": uint32(12345), + "jobID": int32(321), + "requestID": common.HexToHash("0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2"), + "requestTxHash": common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8"), + "evmChainID": "123", + }), + nil, + func(keyStore *keystoremocks.Eth, txManager *txmmocks.MockEvmTxManager) { + }, + nil, nil, chains.ErrNoSuchChainID.Error(), pipeline.RunInfo{IsRetryable: true}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + task := pipeline.ETHTxTask{ + BaseTask: pipeline.NewBaseTask(0, "ethtx", nil, nil, 0), + From: test.from, + To: test.to, + Data: test.data, + GasLimit: test.gasLimit, + TxMeta: test.txMeta, + MinConfirmations: test.minConfirmations, + EVMChainID: test.evmChainID, + TransmitChecker: test.transmitChecker, + } + + keyStore := keystoremocks.NewEth(t) + txManager := txmmocks.NewMockEvmTxManager(t) + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.LimitDefault = ptr(defaultGasLimit) + c.EVM[0].GasEstimator.LimitJobType.DR = ptr(drJobTypeGasLimit) + }) + lggr := logger.TestLogger(t) + + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{DB: db, GeneralConfig: cfg, + TxManager: txManager, KeyStore: keyStore}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + + test.setupClientMocks(keyStore, txManager) + task.HelperSetDependencies(legacyChains, keyStore, test.specGasLimit, pipeline.DirectRequestJobType) + + result, runInfo := task.Run(testutils.Context(t), lggr, test.vars, test.inputs) + assert.Equal(t, test.expectedRunInfo, runInfo) + + if test.expectedErrorCause != nil || test.expectedErrorContains != "" { + require.Nil(t, result.Value) + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + } + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + } else { + require.NoError(t, result.Error) + require.Equal(t, test.expected, result.Value) + } + }) + } +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/services/pipeline/task.fail.go b/core/services/pipeline/task.fail.go new file mode 100644 index 00000000..83ba51e1 --- /dev/null +++ b/core/services/pipeline/task.fail.go @@ -0,0 +1,26 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// FailTask is like the Panic task but without all the drama and stack +// unwinding of a panic +type FailTask struct { + BaseTask `mapstructure:",squash"` + Msg string +} + +var _ Task = (*FailTask)(nil) + +func (t *FailTask) Type() TaskType { + return TaskTypeFail +} + +func (t *FailTask) Run(_ context.Context, _ logger.Logger, vars Vars, _ []Result) (Result, RunInfo) { + return Result{Error: errors.New(t.Msg)}, RunInfo{} +} diff --git a/core/services/pipeline/task.hexdecode.go b/core/services/pipeline/task.hexdecode.go new file mode 100644 index 00000000..023aa25c --- /dev/null +++ b/core/services/pipeline/task.hexdecode.go @@ -0,0 +1,53 @@ +package pipeline + +import ( + "context" + "encoding/hex" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + commonhex "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// bytes +type HexDecodeTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*HexDecodeTask)(nil) + +func (t *HexDecodeTask) Type() TaskType { + return TaskTypeHexDecode +} + +func (t *HexDecodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var input StringParam + + err = multierr.Combine( + errors.Wrap(ResolveParam(&input, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if commonhex.HasPrefix(input.String()) { + noHexPrefix := commonhex.TrimPrefix(input.String()) + bs, err := hex.DecodeString(noHexPrefix) + if err == nil { + return Result{Value: bs}, runInfo + } + return Result{Error: errors.Wrap(err, "failed to decode hex string")}, runInfo + } + + return Result{Error: errors.New("hex string must have prefix 0x")}, runInfo +} diff --git a/core/services/pipeline/task.hexdecode_test.go b/core/services/pipeline/task.hexdecode_test.go new file mode 100644 index 00000000..488e2ce9 --- /dev/null +++ b/core/services/pipeline/task.hexdecode_test.go @@ -0,0 +1,78 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestHexDecodeTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + result []byte + error string + }{ + + // success + {"happy", "0x12345678", []byte{0x12, 0x34, 0x56, 0x78}, ""}, + {"happy zero", "0x00", []byte{0}, ""}, + + // failure + {"missing hex prefix", "12345678", nil, "hex string must have prefix 0x"}, + {"empty input", "", nil, "hex string must have prefix 0x"}, + {"wrong alphabet", "0xwq", nil, "failed to decode hex string"}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.error == "" { + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + } else { + require.ErrorContains(t, result.Error, test.error) + } + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.HexDecodeTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + inputStr := fmt.Sprintf("%v", test.input) + if inputStr == "" { + // empty input parameter is indistinguishable from not providing it at all + // in that case the task will use an input defined by the job DAG + return + } + vars := pipeline.NewVarsFrom(nil) + task := pipeline.HexDecodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: inputStr, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.HexDecodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} diff --git a/core/services/pipeline/task.hexencode.go b/core/services/pipeline/task.hexencode.go new file mode 100644 index 00000000..68657f69 --- /dev/null +++ b/core/services/pipeline/task.hexencode.go @@ -0,0 +1,81 @@ +package pipeline + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// string +type HexEncodeTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*HexEncodeTask)(nil) + +func (t *HexEncodeTask) Type() TaskType { + return TaskTypeHexEncode +} + +func addHexPrefix(val string) string { + if len(val) > 0 { + return "0x" + val + } + return "" +} + +func (t *HexEncodeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var stringInput StringParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&stringInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil { + // string + return Result{Value: addHexPrefix(hex.EncodeToString([]byte(stringInput.String())))}, runInfo + } + + var bytesInput BytesParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&bytesInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil { + // bytes + return Result{Value: addHexPrefix(hex.EncodeToString(bytesInput))}, runInfo + } + + var decimalInput DecimalParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&decimalInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil && !decimalInput.Decimal().IsInteger() { + // decimal + return Result{Error: errors.New("decimal input")}, runInfo + } + + var bigIntInput MaybeBigIntParam + err = multierr.Combine( + errors.Wrap(ResolveParam(&bigIntInput, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err == nil { + // one of integer types + if bigIntInput.BigInt().Sign() == -1 { + return Result{Error: errors.New("negative integer")}, runInfo + } + return Result{Value: addHexPrefix(fmt.Sprintf("%x", bigIntInput.BigInt()))}, runInfo + } + + return Result{Error: err}, runInfo +} diff --git a/core/services/pipeline/task.hexencode_test.go b/core/services/pipeline/task.hexencode_test.go new file mode 100644 index 00000000..d4472930 --- /dev/null +++ b/core/services/pipeline/task.hexencode_test.go @@ -0,0 +1,124 @@ +package pipeline_test + +import ( + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestHexEncodeTask(t *testing.T) { + t.Parallel() + bigTwo, bigThree := big.NewInt(2), big.NewInt(3) + + tests := []struct { + name string + input interface{} + result string + error string + }{ + + // success integers + {"zero", 0, "0x0", ""}, + {"small int", 1, "0x1", ""}, + {"two-byte integer", 256, "0x100", ""}, + {"uint8", uint8(10), "0xa", ""}, + {"small int64", int64(456), "0x1c8", ""}, + {"large int64", int64(999000000000), "0xe8990a4600", ""}, + {"bigint 1", bigTwo.Exp(bigTwo, big.NewInt(100), nil), "0x10000000000000000000000000", ""}, + {"bigint 2", bigThree.Exp(bigThree, big.NewInt(100), nil), "0x5a4653ca673768565b41f775d6947d55cf3813d1", ""}, + {"decimal type but integer value", 1.0, "0x1", ""}, + {"decimal type but integer value zero", 0.0, "0x0", ""}, + {"decimal.Decimal type but integer value", mustDecimal(t, "256"), "0x100", ""}, + + // success strings/bytes + {"string ascii bytes", "xyz", "0x78797a", ""}, + {"string with whitespace", "1 x *", "0x312078202a", ""}, + {"string shouldn't convert to int", "456", "0x343536", ""}, + {"don't detect hex in string", "0xff", "0x30786666", ""}, + // NOTE: for byte arrays, output is padded to full bytes (i.e. a potential leading zero) + {"bytes remain bytes", []byte{0xa, 0x0, 0xff, 0x1}, "0x0a00ff01", ""}, + + // success empty results + {"empty string", "", "", ""}, + {"empty byte array", []byte{}, "", ""}, + + // failure + {"negative int", -1, "", "negative integer"}, + {"negative float", -1.0, "", "negative integer"}, + {"negative int64", int64(-10), "", "negative integer"}, + {"negative bigint", big.NewInt(-100), "", "negative integer"}, + {"input of type bool", true, "", "bad input for task"}, + {"input of type decimal", 1.44, "", "decimal input"}, + {"input of type decimal and negative", -0.44, "", "decimal input"}, + {"input of decimal.Decimal type but not integer", mustDecimal(t, "3.14"), "", "decimal input"}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.error == "" { + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + } else { + require.ErrorContains(t, result.Error, test.error) + } + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.HexEncodeTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.HexEncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} + +func TestHexEncodeTaskInputParamLiteral(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + result string + }{ + // Only strings can be passed via input param literals (other types will get converted to strings anyway) + {"string ascii bytes", "xyz", "0x78797a"}, + {"string with whitespace", "1 x *", "0x312078202a"}, + {"string shouldn't convert to int", "456", "0x343536"}, + {"don't detect hex in string", "0xff", "0x30786666"}, + {"int gets converted to string", 256, "0x323536"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.HexEncodeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: fmt.Sprintf("%v", test.input), + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{}) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.result, result.Value) + }) + } +} diff --git a/core/services/pipeline/task.http.go b/core/services/pipeline/task.http.go new file mode 100644 index 00000000..0b30ad4a --- /dev/null +++ b/core/services/pipeline/task.http.go @@ -0,0 +1,129 @@ +package pipeline + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + clhttp "github.com/goplugin/pluginv3.0/v2/core/utils/http" +) + +// Return types: +// +// string +type HTTPTask struct { + BaseTask `mapstructure:",squash"` + Method string + URL string + RequestData string `json:"requestData"` + AllowUnrestrictedNetworkAccess string + Headers string + + config Config + httpClient *http.Client + unrestrictedHTTPClient *http.Client +} + +var _ Task = (*HTTPTask)(nil) + +var ( + promHTTPFetchTime = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pipeline_task_http_fetch_time", + Help: "Time taken to fully execute the HTTP request", + }, + []string{"pipeline_task_spec_id"}, + ) + promHTTPResponseBodySize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "pipeline_task_http_response_body_size", + Help: "Size (in bytes) of the HTTP response body", + }, + []string{"pipeline_task_spec_id"}, + ) +) + +func (t *HTTPTask) Type() TaskType { + return TaskTypeHTTP +} + +func (t *HTTPTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, -1, -1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + method StringParam + url URLParam + requestData MapParam + allowUnrestrictedNetworkAccess BoolParam + reqHeaders StringSliceParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&method, From(NonemptyString(t.Method), "GET")), "method"), + errors.Wrap(ResolveParam(&url, From(VarExpr(t.URL, vars), NonemptyString(t.URL))), "url"), + errors.Wrap(ResolveParam(&requestData, From(VarExpr(t.RequestData, vars), JSONWithVarExprs(t.RequestData, vars, false), nil)), "requestData"), + // Any hardcoded strings used for URL uses the unrestricted HTTP adapter + // Interpolated variable URLs use restricted HTTP adapter by default + // You must set allowUnrestrictedNetworkAccess=true on the task to enable variable-interpolated URLs to make restricted network requests + errors.Wrap(ResolveParam(&allowUnrestrictedNetworkAccess, From(NonemptyString(t.AllowUnrestrictedNetworkAccess), !variableRegexp.MatchString(t.URL))), "allowUnrestrictedNetworkAccess"), + errors.Wrap(ResolveParam(&reqHeaders, From(NonemptyString(t.Headers), "[]")), "reqHeaders"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if len(reqHeaders)%2 != 0 { + return Result{Error: errors.Errorf("headers must have an even number of elements")}, runInfo + } + + requestDataJSON, err := json.Marshal(requestData) + if err != nil { + return Result{Error: err}, runInfo + } + lggr.Debugw("HTTP task: sending request", + "requestData", string(requestDataJSON), + "url", url.String(), + "method", method, + "reqHeaders", reqHeaders, + "allowUnrestrictedNetworkAccess", allowUnrestrictedNetworkAccess, + ) + + requestCtx, cancel := httpRequestCtx(ctx, t, t.config) + defer cancel() + + var client *http.Client + if allowUnrestrictedNetworkAccess { + client = t.unrestrictedHTTPClient + } else { + client = t.httpClient + } + responseBytes, statusCode, respHeaders, elapsed, err := makeHTTPRequest(requestCtx, lggr, method, url, reqHeaders, requestData, client, t.config.DefaultHTTPLimit()) + if err != nil { + if errors.Is(errors.Cause(err), clhttp.ErrDisallowedIP) { + err = errors.Wrap(err, `connections to local resources are disabled by default, if you are sure this is safe, you can enable on a per-task basis by setting allowUnrestrictedNetworkAccess="true" in the pipeline task spec, e.g. fetch [type="http" method=GET url="$(decode_cbor.url)" allowUnrestrictedNetworkAccess="true"]`) + } + return Result{Error: err}, RunInfo{IsRetryable: isRetryableHTTPError(statusCode, err)} + } + + lggr.Debugw("HTTP task got response", + "response", string(responseBytes), + "respHeaders", respHeaders, + "url", url.String(), + "dotID", t.DotID(), + ) + + promHTTPFetchTime.WithLabelValues(t.DotID()).Set(float64(elapsed)) + promHTTPResponseBodySize.WithLabelValues(t.DotID()).Set(float64(len(responseBytes))) + + // NOTE: We always stringify the response since this is required for all current jobs. + // If a binary response is required we might consider adding an adapter + // flag such as "BinaryMode: true" which passes through raw binary as the + // value instead. + return Result{Value: string(responseBytes)}, runInfo +} diff --git a/core/services/pipeline/task.http_test.go b/core/services/pipeline/task.http_test.go new file mode 100644 index 00000000..37c2e210 --- /dev/null +++ b/core/services/pipeline/task.http_test.go @@ -0,0 +1,415 @@ +package pipeline_test + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "sort" + "testing" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + clhttp "github.com/goplugin/pluginv3.0/v2/core/utils/http" +) + +// ethUSDPairing has the ETH/USD parameters needed when POSTing to the price +// external adapters. +// https://github.com/goplugin/price-adapters + +func TestHTTPTask_Happy(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + s1 := httptest.NewServer(fakePriceResponder(t, utils.MustUnmarshalToMap(btcUSDPairing), decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + task := pipeline.HTTPTask{ + BaseTask: pipeline.NewBaseTask(0, "http", nil, nil, 0), + Method: "POST", + URL: s1.URL, + RequestData: btcUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + task.HelperSetDependencies(config.JobPipeline(), c, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + var x struct { + Data struct { + Result decimal.Decimal `json:"result"` + } `json:"data"` + } + err := json.Unmarshal([]byte(result.Value.(string)), &x) + require.NoError(t, err) + require.Equal(t, decimal.NewFromInt(9700), x.Data.Result) +} + +func TestHTTPTask_Variables(t *testing.T) { + t.Parallel() + + validMeta := map[string]interface{}{"theMeta": "yes"} + + tests := []struct { + name string + requestData string + meta pipeline.JSONSerializable + inputs []pipeline.Result + vars pipeline.Vars + expectedRequestData map[string]interface{} + expectedErrorCause error + expectedErrorContains string + }{ + { + "requestData (empty) + meta", + ``, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{}, + nil, + "", + }, + { + "requestData (pure variable) + meta", + `$(some_data)`, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{"foo": 543.21}, + nil, + "", + }, + { + "requestData (pure variable)", + `$(some_data)`, + pipeline.JSONSerializable{nil, false}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": map[string]interface{}{"foo": 543.21}}), + map[string]interface{}{"foo": 543.21}, + nil, + "", + }, + { + "requestData (pure variable, missing)", + `$(some_data)`, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"not_some_data": map[string]interface{}{"foo": 543.21}}), + nil, + pipeline.ErrKeypathNotFound, + "requestData", + }, + { + "requestData (pure variable, not a map)", + `$(some_data)`, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"some_data": 543.21}), + nil, + pipeline.ErrBadInput, + "requestData", + }, + { + "requestData (interpolation) + meta", + `{"data":{"result":$(medianize)}}`, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"medianize": 543.21}), + map[string]interface{}{"data": map[string]interface{}{"result": 543.21}}, + nil, + "", + }, + { + "requestData (interpolation, missing)", + `{"data":{"result":$(medianize)}}`, + pipeline.JSONSerializable{validMeta, true}, + []pipeline.Result{{Value: 123.45}}, + pipeline.NewVarsFrom(map[string]interface{}{"nope": "foo bar"}), + nil, + pipeline.ErrKeypathNotFound, + "requestData", + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + + s1 := httptest.NewServer(fakePriceResponder(t, test.expectedRequestData, decimal.NewFromInt(9700), "", nil)) + defer s1.Close() + + feedURL, err := url.ParseRequestURI(s1.URL) + require.NoError(t, err) + + orm := bridges.NewORM(db, logger.TestLogger(t), cfg.Database()) + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: feedURL.String()}, cfg.Database()) + + task := pipeline.BridgeTask{ + BaseTask: pipeline.NewBaseTask(0, "bridge", nil, nil, 0), + Name: bridge.Name.String(), + RequestData: test.requestData, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + trORM := pipeline.NewORM(db, logger.TestLogger(t), cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + specID, err := trORM.CreateSpec(pipeline.Pipeline{}, *models.NewInterval(5 * time.Minute), pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + task.HelperSetDependencies(cfg.JobPipeline(), cfg.WebServer(), orm, specID, uuid.UUID{}, c) + + err = test.vars.Set("meta", test.meta) + require.NoError(t, err) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if test.expectedErrorCause != nil { + require.Equal(t, test.expectedErrorCause, errors.Cause(result.Error)) + if test.expectedErrorContains != "" { + require.Contains(t, result.Error.Error(), test.expectedErrorContains) + } + + } else { + require.NoError(t, result.Error) + require.NotNil(t, result.Value) + var x struct { + Data struct { + Result decimal.Decimal `json:"result"` + } `json:"data"` + } + err := json.Unmarshal([]byte(result.Value.(string)), &x) + require.NoError(t, err) + require.Equal(t, decimal.NewFromInt(9700), x.Data.Result) + } + }) + } +} + +func TestHTTPTask_OverrideURLSafe(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + task := pipeline.HTTPTask{ + Method: "POST", + URL: server.URL, + RequestData: ethUSDPairing, + } + // Use real clients here to actually test the local connection blocking + r := clhttp.NewRestrictedHTTPClient(config.Database(), logger.TestLogger(t)) + u := clhttp.NewUnrestrictedHTTPClient() + task.HelperSetDependencies(config.JobPipeline(), r, u) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + + task.URL = "$(url)" + + vars := pipeline.NewVarsFrom(map[string]interface{}{"url": server.URL}) + result, runInfo = task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.True(t, runInfo.IsRetryable) + require.Error(t, result.Error) + require.Contains(t, result.Error.Error(), "Connections to local/private and multicast networks are disabled") + require.Nil(t, result.Value) + + task.AllowUnrestrictedNetworkAccess = "true" + + result, runInfo = task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) +} + +func TestHTTPTask_ErrorMessage(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusTooManyRequests) + resp := &adapterResponse{} + resp.SetErrorMessage("could not hit data fetcher") + err := json.NewEncoder(w).Encode(resp) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + c := clhttptest.NewTestLocalOnlyHTTPClient() + task := pipeline.HTTPTask{ + Method: "POST", + URL: server.URL, + RequestData: ethUSDPairing, + } + task.HelperSetDependencies(config.JobPipeline(), c, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + require.Error(t, result.Error) + require.Contains(t, result.Error.Error(), "could not hit data fetcher") + require.Nil(t, result.Value) +} + +func TestHTTPTask_OnlyErrorMessage(t *testing.T) { + t.Parallel() + + config := configtest.NewTestGeneralConfig(t) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadGateway) + _, err := w.Write([]byte(mustReadFile(t, "../../testdata/apiresponses/coinmarketcap.error.json"))) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + task := pipeline.HTTPTask{ + Method: "POST", + URL: server.URL, + RequestData: ethUSDPairing, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + task.HelperSetDependencies(config.JobPipeline(), c, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.True(t, runInfo.IsRetryable) + require.Error(t, result.Error) + require.Contains(t, result.Error.Error(), "RequestId") + require.Nil(t, result.Value) +} + +func TestHTTPTask_Headers(t *testing.T) { + allHeaders := func(headers http.Header) (s []string) { + var keys []string + for k := range headers { + keys = append(keys, k) + } + // get it in a consistent order + sort.Strings(keys) + for _, k := range keys { + v := headers.Get(k) + s = append(s, k, v) + } + return s + } + + standardHeaders := []string{"Content-Length", "38", "Content-Type", "application/json", "User-Agent", "Go-http-client/1.1"} + + t.Run("sends headers", func(t *testing.T) { + config := configtest.NewTestGeneralConfig(t) + var headers http.Header + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`{"fooresponse": 1}`)) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + task := pipeline.HTTPTask{ + Method: "POST", + URL: server.URL, + RequestData: ethUSDPairing, + Headers: `["X-Header-1", "foo", "X-Header-2", "bar"]`, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + task.HelperSetDependencies(config.JobPipeline(), c, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.Equal(t, `{"fooresponse": 1}`, result.Value) + assert.Nil(t, result.Error) + + assert.Equal(t, append(standardHeaders, "X-Header-1", "foo", "X-Header-2", "bar"), allHeaders(headers)) + }) + + t.Run("errors with odd number of headers", func(t *testing.T) { + task := pipeline.HTTPTask{ + Method: "POST", + URL: "http://example.com", + RequestData: ethUSDPairing, + Headers: `["X-Header-1", "foo", "X-Header-2", "bar", "odd one out"]`, + } + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.NotNil(t, result.Error) + assert.Equal(t, `headers must have an even number of elements`, result.Error.Error()) + assert.Nil(t, result.Value) + }) + + t.Run("allows to override content-type", func(t *testing.T) { + config := configtest.NewTestGeneralConfig(t) + var headers http.Header + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`{"fooresponse": 3}`)) + require.NoError(t, err) + }) + + server := httptest.NewServer(handler) + defer server.Close() + + task := pipeline.HTTPTask{ + Method: "POST", + URL: server.URL, + RequestData: ethUSDPairing, + Headers: `["X-Header-1", "foo", "Content-Type", "footype", "X-Header-2", "bar"]`, + } + c := clhttptest.NewTestLocalOnlyHTTPClient() + task.HelperSetDependencies(config.JobPipeline(), c, c) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), nil) + assert.False(t, runInfo.IsPending) + assert.Equal(t, `{"fooresponse": 3}`, result.Value) + assert.Nil(t, result.Error) + + assert.Equal(t, []string{"Content-Length", "38", "Content-Type", "footype", "User-Agent", "Go-http-client/1.1", "X-Header-1", "foo", "X-Header-2", "bar"}, allHeaders(headers)) + }) +} diff --git a/core/services/pipeline/task.jsonparse.go b/core/services/pipeline/task.jsonparse.go new file mode 100644 index 00000000..c36c2ed8 --- /dev/null +++ b/core/services/pipeline/task.jsonparse.go @@ -0,0 +1,118 @@ +package pipeline + +import ( + "bytes" + "context" + "encoding/json" + "math/big" + "strings" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// float64 +// string +// bool +// map[string]interface{} +// []interface{} +// nil +type JSONParseTask struct { + BaseTask `mapstructure:",squash"` + Path string `json:"path"` + Separator string `json:"separator"` + Data string `json:"data"` + // Lax when disabled will return an error if the path does not exist + // Lax when enabled will return nil with no error if the path does not exist + Lax string +} + +var _ Task = (*JSONParseTask)(nil) + +func (t *JSONParseTask) Type() TaskType { + return TaskTypeJSONParse +} + +func (t *JSONParseTask) Run(_ context.Context, l logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var sep StringParam + err = errors.Wrap(ResolveParam(&sep, From(t.Separator)), "separator") + var ( + path = NewJSONPathParam(string(sep)) + data BytesParam + lax BoolParam + ) + err = multierr.Combine(err, + errors.Wrap(ResolveParam(&path, From(VarExpr(t.Path, vars), t.Path)), "path"), + errors.Wrap(ResolveParam(&data, From(VarExpr(t.Data, vars), Input(inputs, 0))), "data"), + errors.Wrap(ResolveParam(&lax, From(NonemptyString(t.Lax), false)), "lax"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + var decoded interface{} + d := json.NewDecoder(bytes.NewReader(data)) + d.UseNumber() + err = d.Decode(&decoded) + if err != nil { + return Result{Error: err}, runInfo + } + + for _, part := range path { + switch d := decoded.(type) { + case map[string]interface{}: + var exists bool + decoded, exists = d[part] + if !exists && bool(lax) { + decoded = nil + break + } else if !exists { + return Result{Error: errors.Wrapf(ErrKeypathNotFound, `could not resolve path ["%v"] in %s`, strings.Join(path, `","`), data)}, runInfo + } + + case []interface{}: + bigindex, ok := big.NewInt(0).SetString(part, 10) + if !ok { + return Result{Error: errors.Wrapf(ErrKeypathNotFound, "JSONParse task error: %v is not a valid array index", part)}, runInfo + } else if !bigindex.IsInt64() { + if bool(lax) { + decoded = nil + break + } + return Result{Error: errors.Wrapf(ErrKeypathNotFound, `could not resolve path ["%v"] in %s`, strings.Join(path, `","`), data)}, runInfo + } + index := int(bigindex.Int64()) + if index < 0 { + index = len(d) + index + } + + exists := index >= 0 && index < len(d) + if !exists && bool(lax) { + decoded = nil + break + } else if !exists { + return Result{Error: errors.Wrapf(ErrKeypathNotFound, `could not resolve path ["%v"] in %s`, strings.Join(path, `","`), data)}, runInfo + } + decoded = d[index] + + default: + return Result{Error: errors.Wrapf(ErrKeypathNotFound, `could not resolve path ["%v"] in %s`, strings.Join(path, `","`), data)}, runInfo + } + } + + decoded, err = reinterpetJsonNumbers(decoded) + if err != nil { + return Result{Error: multierr.Combine(ErrBadInput, err)}, runInfo + } + + return Result{Value: decoded}, runInfo +} diff --git a/core/services/pipeline/task.jsonparse_test.go b/core/services/pipeline/task.jsonparse_test.go new file mode 100644 index 00000000..9b705cca --- /dev/null +++ b/core/services/pipeline/task.jsonparse_test.go @@ -0,0 +1,422 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestJSONParseTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data string + path string + separator string + lax string + vars pipeline.Vars + inputs []pipeline.Result + wantData interface{} + wantErrorCause error + wantErrorContains string + }{ + { + "array index path", + "", + "data,0,availability", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data":[{"availability":"0.99991"}]}`}}, + "0.99991", + nil, + "", + }, + { + "large int result", + "", + "some_id", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"some_id":1564679049192120321}`}}, + int64(1564679049192120321), + nil, + "", + }, + { + "float result", + "", + "availability", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"availability":3.14}`}}, + 3.14, + nil, + "", + }, + { + "index array", + "", + "data,0", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + int64(0), + nil, + "", + }, + { + "index array of array", + "", + "data,0,0", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [[0, 1]]}`}}, + int64(0), + nil, + "", + }, + { + "index of negative one", + "", + "data,-1", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + int64(1), + nil, + "", + }, + { + "index of negative array length", + "", + "data,-10", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]}`}}, + int64(0), + nil, + "", + }, + { + "index of negative array length minus one with lax returns nil", + "", + "data,-12", + "", + "true", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]}`}}, + nil, + nil, + "", + }, + { + "index of negative array length minus one without lax returns error", + "", + "data,-12", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]}`}}, + nil, + pipeline.ErrKeypathNotFound, + "", + }, + { + "maximum index array with lax returns nil", + "", + "data,18446744073709551615", + "", + "true", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + nil, + nil, + "", + }, + { + "maximum index array without lax returns error", + "", + "data,18446744073709551615", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + nil, + pipeline.ErrKeypathNotFound, + "", + }, + { + "overflow index array with lax returns nil", + "", + "data,18446744073709551616", + "", + "true", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + nil, + nil, + "", + }, + { + "overflow index array without lax returns error", + "", + "data,18446744073709551616", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [0, 1]}`}}, + nil, + pipeline.ErrKeypathNotFound, + "", + }, + { + "return array", + "", + "data,0", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": [[0, 1]]}`}}, + []interface{}{int64(0), int64(1)}, + nil, + "", + }, + { + "return false", + "", + "data", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": false}`}}, + false, + nil, + "", + }, + { + "return true", + "", + "data", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"data": true}`}}, + true, + nil, + "", + }, + { + "regression test: keys in the path have dots", + "", + "Realtime Currency Exchange Rate,5. Exchange Rate", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{ + "Realtime Currency Exchange Rate": { + "1. From_Currency Code": "LEND", + "2. From_Currency Name": "EthLend", + "3. To_Currency Code": "ETH", + "4. To_Currency Name": "Ethereum", + "5. Exchange Rate": "0.00058217", + "6. Last Refreshed": "2020-06-22 19:14:04", + "7. Time Zone": "UTC", + "8. Bid Price": "0.00058217", + "9. Ask Price": "0.00058217" + } + }`}}, + "0.00058217", + nil, + "", + }, + { + "custom separator: keys in the path have commas", + "", + "foo.bar1,bar2,bar3", + ".", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{ + "foo": { + "bar1": "LEND", + "bar1,bar2": "EthLend", + "bar2,bar3": "ETH", + "bar1,bar3": "Ethereum", + "bar1,bar2,bar3": "0.00058217", + "bar1.bar2.bar3": "2020-06-22 19:14:04" + } + }`}}, + "0.00058217", + nil, + "", + }, + { + "custom separator: diabolical keys in the path", + "", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/\\[]{}|<>?_+-=!@#$%^&*()__hacky__separator__foo", + "__hacky__separator__", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{ + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,/\\[]{}|<>?_+-=!@#$%^&*()": { + "foo": "LEND", + "bar": "EthLend" + } + }`}}, + "LEND", + nil, + "", + }, + { + "missing top-level key with lax=false returns error", + "", + "baz", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"foo": 1}`}}, + nil, + pipeline.ErrKeypathNotFound, + "", + }, + { + "missing nested key with lax=false returns error", + "", + "foo,bar", + "", + "false", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"foo": {}}`}}, + nil, + pipeline.ErrKeypathNotFound, + "", + }, + { + "missing top-level key with lax=true returns nil", + "", + "baz", + "", + "true", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{}`}}, + nil, + nil, + "", + }, + { + "missing nested key with lax=true returns nil", + "", + "foo,baz", + "", + "true", + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"foo": {}}`}}, + nil, + nil, + "", + }, + { + "variable data", + "$(foo.bar)", + "data,0,availability", + "", + "false", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, + }), + []pipeline.Result{}, + "0.99991", + nil, + "", + }, + { + "empty path", + "$(foo.bar)", + "", + "", + "false", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": `{"data":["stevetoshi sergeymoto"]}`}, + }), + []pipeline.Result{}, + map[string]interface{}{"data": []interface{}{"stevetoshi sergeymoto"}}, + nil, + "", + }, + { + "no data or input", + "", + "$(chain.link)", + "", + "false", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, + "chain": map[string]interface{}{"link": "data,0,availability"}, + }), + []pipeline.Result{}, + "0.99991", + pipeline.ErrIndexOutOfRange, + "data", + }, + { + "malformed 'lax' param", + "$(foo.bar)", + "$(chain.link)", + "", + "sergey", + pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": `{"data":[{"availability":"0.99991"}]}`}, + "chain": map[string]interface{}{"link": "data,0,availability"}, + }), + []pipeline.Result{}, + "0.99991", + pipeline.ErrBadInput, + "lax", + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + task := pipeline.JSONParseTask{ + BaseTask: pipeline.NewBaseTask(0, "json", nil, nil, 0), + Path: test.path, + Separator: test.separator, + Data: test.data, + Lax: test.lax, + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.wantErrorCause != nil { + require.Equal(t, test.wantErrorCause, errors.Cause(result.Error)) + if test.wantErrorContains != "" { + require.Contains(t, result.Error.Error(), test.wantErrorContains) + } + + require.Nil(t, result.Value) + val, err := test.vars.Get("json") + require.Equal(t, pipeline.ErrKeypathNotFound, errors.Cause(err)) + require.Nil(t, val) + } else { + require.NoError(t, result.Error) + require.Equal(t, test.wantData, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.length.go b/core/services/pipeline/task.length.go new file mode 100644 index 00000000..db184b1a --- /dev/null +++ b/core/services/pipeline/task.length.go @@ -0,0 +1,43 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type LengthTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*LengthTask)(nil) + +func (t *LengthTask) Type() TaskType { + return TaskTypeLength +} + +func (t *LengthTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var input BytesParam + + err = multierr.Combine( + errors.Wrap(ResolveParam(&input, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + return Result{Value: decimal.NewFromInt(int64(len(input)))}, runInfo +} diff --git a/core/services/pipeline/task.length_test.go b/core/services/pipeline/task.length_test.go new file mode 100644 index 00000000..24e7ac65 --- /dev/null +++ b/core/services/pipeline/task.length_test.go @@ -0,0 +1,74 @@ +package pipeline_test + +import ( + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestLengthTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + want decimal.Decimal + }{ + {"normal bytes", []byte{0xaa, 0xbb, 0xcc, 0xdd}, decimal.NewFromInt(4)}, + {"empty bytes", []byte{}, decimal.NewFromInt(0)}, + {"string as bytes", []byte("stevetoshi sergeymoto"), decimal.NewFromInt(21)}, + {"string input gets converted to bytes", "stevetoshi sergeymoto", decimal.NewFromInt(21)}, + {"empty string", "", decimal.NewFromInt(0)}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.want.String(), result.Value.(decimal.Decimal).String()) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LengthTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + var inputStr string + if _, ok := test.input.([]byte); ok { + inputStr = string(test.input.([]byte)) + } else { + inputStr = test.input.(string) + } + if inputStr == "" { + // empty input parameter is indistinguishable from not providing it at all + // in that case the task will use an input defined by the job DAG + return + } + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LengthTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: inputStr, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.LengthTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} diff --git a/core/services/pipeline/task.lessthan.go b/core/services/pipeline/task.lessthan.go new file mode 100644 index 00000000..481333d0 --- /dev/null +++ b/core/services/pipeline/task.lessthan.go @@ -0,0 +1,50 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// bool +type LessThanTask struct { + BaseTask `mapstructure:",squash"` + Left string `json:"input"` + Right string `json:"limit"` +} + +var ( + _ Task = (*LessThanTask)(nil) +) + +func (t *LessThanTask) Type() TaskType { + return TaskTypeLessThan +} + +func (t *LessThanTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + a DecimalParam + b DecimalParam + ) + + err = multierr.Combine( + errors.Wrap(ResolveParam(&a, From(VarExpr(t.Left, vars), NonemptyString(t.Left), Input(inputs, 0))), "left"), + errors.Wrap(ResolveParam(&b, From(VarExpr(t.Right, vars), NonemptyString(t.Right))), "right"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + value := a.Decimal().LessThan(b.Decimal()) + return Result{Value: value}, runInfo +} diff --git a/core/services/pipeline/task.lessthan_test.go b/core/services/pipeline/task.lessthan_test.go new file mode 100644 index 00000000..8c06aaed --- /dev/null +++ b/core/services/pipeline/task.lessthan_test.go @@ -0,0 +1,142 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestLessThanTask_Happy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + left interface{} + right string + want bool + }{ + {"string, lt 100", "1.23", "100", true}, + {"string, lt negative", "1.23", "-5", false}, + {"string, lt zero", "1.23", "0", false}, + {"string, lt large value", "1.23", "1000000000000000000", true}, + {"large string, lt large value", "10000000000000000001", "1000000000000000000", false}, + + {"int, true", int(2), "100", true}, + {"int, false", int(2), "-5", false}, + + {"int8, true", int8(2), "100", true}, + {"int8, false", int8(2), "-5", false}, + + {"int16, true", int16(2), "100", true}, + {"int16, false", int16(2), "-5", false}, + + {"int32,true", int32(2), "100", true}, + {"int32, false", int32(2), "-5", false}, + + {"int64, true", int64(2), "100", true}, + {"int64, false", int64(2), "-5", false}, + + {"uint, true", uint(2), "100", true}, + {"uint, false", uint(2), "-5", false}, + + {"uint8, true", uint8(2), "100", true}, + {"uint8, false", uint8(2), "-5", false}, + + {"uint16, true", uint16(2), "100", true}, + {"uint16, false", uint16(2), "-5", false}, + + {"uint32, true", uint32(2), "100", true}, + {"uint32, false", uint32(2), "-5", false}, + + {"uint64, true", uint64(2), "100", true}, + {"uint64, false", uint64(2), "-5", false}, + + {"float32, true", float32(1.23), "10", true}, + {"float32, false", float32(1.23), "-5", false}, + + {"float64, true", float64(1.23), "10", true}, + {"float64, false", float64(1.23), "-5", false}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.want, result.Value.(bool)) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LessThanTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), Right: test.right} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.left}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LessThanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Left: fmt.Sprintf("%v", test.left), + Right: test.right, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.left}, + "chain": map[string]interface{}{"link": test.right}, + }) + task := pipeline.LessThanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Left: "$(foo.bar)", + Right: "$(chain.link)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} + +func TestLessThanTask_Unhappy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + left string + right string + inputs []pipeline.Result + vars pipeline.Vars + wantErrorCause error + wantErrorContains string + }{ + {"map as input from inputs", "", "100", []pipeline.Result{{Value: map[string]interface{}{"chain": "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "left"}, + {"map as input from var", "$(foo)", "100", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": map[string]interface{}{"chain": "link"}}), pipeline.ErrBadInput, "left"}, + {"slice as input from inputs", "", "100", []pipeline.Result{{Value: []interface{}{"chain", "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "left"}, + {"slice as input from var", "$(foo)", "100", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": []interface{}{"chain", "link"}}), pipeline.ErrBadInput, "left"}, + {"input as missing var", "$(foo)", "100", nil, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "left"}, + {"limit as missing var", "", "$(foo)", []pipeline.Result{{Value: "123"}}, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "right"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + task := pipeline.LessThanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Left: test.left, + Right: test.right, + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Equal(t, test.wantErrorCause, errors.Cause(result.Error)) + if test.wantErrorContains != "" { + require.Contains(t, result.Error.Error(), test.wantErrorContains) + } + }) + } +} diff --git a/core/services/pipeline/task.lookup.go b/core/services/pipeline/task.lookup.go new file mode 100644 index 00000000..d4b37b44 --- /dev/null +++ b/core/services/pipeline/task.lookup.go @@ -0,0 +1,48 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Look up a field on a map +// +// Return types: +// +// interface{} +type LookupTask struct { + BaseTask `mapstructure:",squash"` + Key string `json:"key"` +} + +var _ Task = (*LookupTask)(nil) + +func (t *LookupTask) Type() TaskType { + return TaskTypeLookup +} + +func (t *LookupTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 1, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var key StringParam + err = errors.Wrap(ResolveParam(&key, From(t.Key)), "key") + if err != nil { + return Result{Error: err}, runInfo + } + + var val interface{} + switch m := inputs[0].Value.(type) { + case map[string]interface{}: + val = m[(string)(key)] + default: + return Result{Error: errors.Errorf("unexpected input type: %T", inputs[0].Value)}, runInfo + } + + return Result{Value: val}, runInfo +} diff --git a/core/services/pipeline/task.lookup_test.go b/core/services/pipeline/task.lookup_test.go new file mode 100644 index 00000000..a087770e --- /dev/null +++ b/core/services/pipeline/task.lookup_test.go @@ -0,0 +1,68 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func Test_LookupTask(t *testing.T) { + task := pipeline.LookupTask{} + m := map[string]interface{}{ + "foo": 42, + "bar": "baz", + } + var vars pipeline.Vars + var inputs []pipeline.Result + + t.Run("with valid key for map", func(t *testing.T) { + task.Key = "foo" + inputs = []pipeline.Result{{Value: m, Error: nil}} + + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, inputs) + + assert.Equal(t, 42, res.Value) + assert.Nil(t, res.Error) + }) + t.Run("returns nil if key is missing", func(t *testing.T) { + task.Key = "qux" + inputs = []pipeline.Result{{Value: m, Error: nil}} + + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, inputs) + + assert.Nil(t, res.Error) + assert.Nil(t, res.Value) + }) + t.Run("errors when input is not a map", func(t *testing.T) { + task.Key = "qux" + inputs = []pipeline.Result{{Value: "something", Error: nil}} + + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, inputs) + + assert.EqualError(t, res.Error, "unexpected input type: string") + assert.Nil(t, res.Value) + }) + t.Run("errors when input is error", func(t *testing.T) { + task.Key = "qux" + inputs = []pipeline.Result{{Value: nil, Error: errors.New("something blew up")}} + + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, inputs) + + assert.EqualError(t, res.Error, "task inputs: too many errors") + assert.Nil(t, res.Value) + }) + t.Run("errors with too many inputs", func(t *testing.T) { + task.Key = "qux" + inputs = []pipeline.Result{{Value: m, Error: nil}, {Value: nil, Error: errors.New("something blew up")}} + + res, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, inputs) + + assert.EqualError(t, res.Error, "task inputs: min: 1 max: 1 (got 2): wrong number of task inputs") + assert.Nil(t, res.Value) + }) +} diff --git a/core/services/pipeline/task.lowercase.go b/core/services/pipeline/task.lowercase.go new file mode 100644 index 00000000..2eb3b266 --- /dev/null +++ b/core/services/pipeline/task.lowercase.go @@ -0,0 +1,43 @@ +package pipeline + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// string +type LowercaseTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*LowercaseTask)(nil) + +func (t *LowercaseTask) Type() TaskType { + return TaskTypeLowercase +} + +func (t *LowercaseTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var input StringParam + + err = multierr.Combine( + errors.Wrap(ResolveParam(&input, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + return Result{Value: strings.ToLower(string(input))}, runInfo +} diff --git a/core/services/pipeline/task.lowercase_test.go b/core/services/pipeline/task.lowercase_test.go new file mode 100644 index 00000000..f1f1e802 --- /dev/null +++ b/core/services/pipeline/task.lowercase_test.go @@ -0,0 +1,70 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestLowercaseTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + want string + }{ + {"uppercase string", "UPPERCASE", "uppercase"}, + {"camelCase string", "camelCase", "camelcase"}, + {"PascalCase string", "PascalCase", "pascalcase"}, + {"mixed string", "mIxEd", "mixed"}, + {"lowercase string", "lowercase", "lowercase"}, + {"empty string", "", ""}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.want, result.Value.(string)) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LowercaseTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + inputStr := fmt.Sprintf("%v", test.input) + if inputStr == "" { + // empty input parameter is indistinguishable from not providing it at all + // in that case the task will use an input defined by the job DAG + return + } + vars := pipeline.NewVarsFrom(nil) + task := pipeline.LowercaseTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: inputStr, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.LowercaseTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} diff --git a/core/services/pipeline/task.mean.go b/core/services/pipeline/task.mean.go new file mode 100644 index 00000000..9ac0cd77 --- /dev/null +++ b/core/services/pipeline/task.mean.go @@ -0,0 +1,78 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type MeanTask struct { + BaseTask `mapstructure:",squash"` + Values string `json:"values"` + AllowedFaults string `json:"allowedFaults"` + Precision string `json:"precision"` +} + +var _ Task = (*MeanTask)(nil) + +func (t *MeanTask) Type() TaskType { + return TaskTypeMean +} + +func (t *MeanTask) Run(ctx context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var ( + maybeAllowedFaults MaybeUint64Param + maybePrecision MaybeInt32Param + valuesAndErrs SliceParam + decimalValues DecimalSliceParam + allowedFaults int + faults int + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&maybeAllowedFaults, From(t.AllowedFaults)), "allowedFaults"), + errors.Wrap(ResolveParam(&maybePrecision, From(VarExpr(t.Precision, vars), t.Precision)), "precision"), + errors.Wrap(ResolveParam(&valuesAndErrs, From(VarExpr(t.Values, vars), JSONWithVarExprs(t.Values, vars, true), Inputs(inputs))), "values"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if allowed, isSet := maybeAllowedFaults.Uint64(); isSet { + allowedFaults = int(allowed) + } else { + allowedFaults = len(valuesAndErrs) - 1 + } + + values, faults := valuesAndErrs.FilterErrors() + if faults > allowedFaults { + return Result{Error: errors.Wrapf(ErrTooManyErrors, "Number of faulty inputs %v to mean task > number allowed faults %v", faults, allowedFaults)}, runInfo + } else if len(values) == 0 { + return Result{Error: errors.Wrap(ErrWrongInputCardinality, "values")}, runInfo + } + + err = decimalValues.UnmarshalPipelineParam(values) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "values: %v", err)}, runInfo + } + + total := decimal.NewFromInt(0) + for _, val := range decimalValues { + total = total.Add(val) + } + + numValues := decimal.NewFromInt(int64(len(decimalValues))) + + if precision, isSet := maybePrecision.Int32(); isSet { + return Result{Value: total.DivRound(numValues, precision)}, runInfo + } + // Note that decimal library defaults to rounding to 16 precision + //https://github.com/shopspring/decimal/blob/2568a29459476f824f35433dfbef158d6ad8618c/decimal.go#L44 + return Result{Value: total.Div(numValues)}, runInfo +} diff --git a/core/services/pipeline/task.mean_test.go b/core/services/pipeline/task.mean_test.go new file mode 100644 index 00000000..19cb0d77 --- /dev/null +++ b/core/services/pipeline/task.mean_test.go @@ -0,0 +1,215 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestMeanTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inputs []pipeline.Result + allowedFaults string + precision string + want pipeline.Result + }{ + { + "odd number of inputs", + []pipeline.Result{{Value: mustDecimal(t, "1")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}}, + "1", + "", + pipeline.Result{Value: mustDecimal(t, "2")}, + }, + { + "even number of inputs", + []pipeline.Result{{Value: mustDecimal(t, "1")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + "", + pipeline.Result{Value: mustDecimal(t, "2.5")}, + }, + { + "one input", + []pipeline.Result{{Value: mustDecimal(t, "1")}}, + "0", + "", + pipeline.Result{Value: mustDecimal(t, "1")}, + }, + { + "zero inputs", + []pipeline.Result{}, + "0", + "", + pipeline.Result{Error: pipeline.ErrWrongInputCardinality}, + }, + { + "fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + "", + pipeline.Result{Value: mustDecimal(t, "3")}, + }, + { + "exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + "", + pipeline.Result{Value: mustDecimal(t, "3.5")}, + }, + { + "more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "2", + "", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + { + "(unspecified AllowedFaults) fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "", + "", + pipeline.Result{Value: mustDecimal(t, "3.5")}, + }, + { + "(unspecified AllowedFaults) exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "", + "", + pipeline.Result{Value: mustDecimal(t, "4")}, + }, + { + "(unspecified AllowedFaults) more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}}, + "", + "", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + { + "precision", + []pipeline.Result{{Value: mustDecimal(t, "1.234")}, {Value: mustDecimal(t, "2.345")}, {Value: mustDecimal(t, "3.456")}, {Value: mustDecimal(t, "4.567")}}, + "1", + "2", + pipeline.Result{Value: mustDecimal(t, "2.90")}, + }, + { + "precision (> 16)", + []pipeline.Result{{Value: mustDecimal(t, "1.11111111111111111111")}, {Value: mustDecimal(t, "3.33333333333333333333")}}, + "1", + "18", + pipeline.Result{Value: mustDecimal(t, "2.222222222222222222")}, + }, + { + "precision (negative)", + []pipeline.Result{{Value: mustDecimal(t, "12.34")}, {Value: mustDecimal(t, "23.45")}, {Value: mustDecimal(t, "34.56")}, {Value: mustDecimal(t, "45.67")}}, + "1", + "-1", + pipeline.Result{Value: mustDecimal(t, "30")}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + task := pipeline.MeanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + AllowedFaults: test.allowedFaults, + Precision: test.precision, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": inputs}, + }) + task := pipeline.MeanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: "$(foo.bar)", + AllowedFaults: test.allowedFaults, + Precision: test.precision, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with json vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + var valuesParam string + var vars pipeline.Vars + switch len(inputs) { + case 0: + valuesParam = "[]" + vars = pipeline.NewVarsFrom(nil) + case 1: + valuesParam = "[ $(foo) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0]}) + case 2: + valuesParam = "[ $(foo), $(bar) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1]}) + case 3: + valuesParam = "[ $(foo), $(bar), $(chain) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2]}) + case 4: + valuesParam = "[ $(foo), $(bar), $(chain), $(link) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2], "link": inputs[3]}) + } + + task := pipeline.MeanTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: valuesParam, + AllowedFaults: test.allowedFaults, + Precision: test.precision, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + }) + } +} diff --git a/core/services/pipeline/task.median.go b/core/services/pipeline/task.median.go new file mode 100644 index 00000000..66542d9c --- /dev/null +++ b/core/services/pipeline/task.median.go @@ -0,0 +1,72 @@ +package pipeline + +import ( + "context" + "sort" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type MedianTask struct { + BaseTask `mapstructure:",squash"` + Values string `json:"values"` + AllowedFaults string `json:"allowedFaults"` +} + +var _ Task = (*MedianTask)(nil) + +func (t *MedianTask) Type() TaskType { + return TaskTypeMedian +} + +func (t *MedianTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var ( + maybeAllowedFaults MaybeUint64Param + valuesAndErrs SliceParam + decimalValues DecimalSliceParam + allowedFaults int + faults int + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&maybeAllowedFaults, From(t.AllowedFaults)), "allowedFaults"), + errors.Wrap(ResolveParam(&valuesAndErrs, From(VarExpr(t.Values, vars), JSONWithVarExprs(t.Values, vars, true), Inputs(inputs))), "values"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if allowed, isSet := maybeAllowedFaults.Uint64(); isSet { + allowedFaults = int(allowed) + } else { + allowedFaults = len(valuesAndErrs) - 1 + } + + values, faults := valuesAndErrs.FilterErrors() + if faults > allowedFaults { + return Result{Error: errors.Wrapf(ErrTooManyErrors, "Number of faulty inputs %v to median task > number allowed faults %v", faults, allowedFaults)}, runInfo + } else if len(values) == 0 { + return Result{Error: errors.Wrap(ErrWrongInputCardinality, "no values to medianize")}, runInfo + } + + err = decimalValues.UnmarshalPipelineParam(values) + if err != nil { + return Result{Error: err}, runInfo + } + + sort.Slice(decimalValues, func(i, j int) bool { + return decimalValues[i].LessThan(decimalValues[j]) + }) + k := len(decimalValues) / 2 + if len(decimalValues)%2 == 1 { + return Result{Value: decimalValues[k]}, runInfo + } + median := decimalValues[k].Add(decimalValues[k-1]).Div(decimal.NewFromInt(2)) + return Result{Value: median}, runInfo +} diff --git a/core/services/pipeline/task.median_test.go b/core/services/pipeline/task.median_test.go new file mode 100644 index 00000000..301abce7 --- /dev/null +++ b/core/services/pipeline/task.median_test.go @@ -0,0 +1,205 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestMedianTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inputs []pipeline.Result + allowedFaults string + want pipeline.Result + }{ + { + "odd number of inputs", + []pipeline.Result{{Value: mustDecimal(t, "1")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}}, + "1", + pipeline.Result{Value: mustDecimal(t, "2")}, + }, + { + "even number of inputs", + []pipeline.Result{{Value: mustDecimal(t, "1")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Value: mustDecimal(t, "2.5")}, + }, + { + "one input", + []pipeline.Result{{Value: mustDecimal(t, "1")}}, + "0", + pipeline.Result{Value: mustDecimal(t, "1")}, + }, + { + "zero inputs", + []pipeline.Result{}, + "0", + pipeline.Result{Error: pipeline.ErrWrongInputCardinality}, + }, + { + "fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Value: mustDecimal(t, "3")}, + }, + { + "exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Value: mustDecimal(t, "3.5")}, + }, + { + "more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + { + "(unspecified AllowedFaults) fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "", + pipeline.Result{Value: mustDecimal(t, "3.5")}, + }, + { + "(unspecified AllowedFaults) exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "", + pipeline.Result{Value: mustDecimal(t, "4")}, + }, + { + "(unspecified AllowedFaults) more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}}, + "", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + task := pipeline.MedianTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": inputs}, + }) + task := pipeline.MedianTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: "$(foo.bar)", + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with json vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + var valuesParam string + var vars pipeline.Vars + switch len(inputs) { + case 0: + valuesParam = "[]" + vars = pipeline.NewVarsFrom(nil) + case 1: + valuesParam = "[ $(foo) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0]}) + case 3: + valuesParam = "[ $(foo), $(bar), $(chain) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2]}) + case 4: + valuesParam = "[ $(foo), $(bar), $(chain), $(link) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2], "link": inputs[3]}) + } + + task := pipeline.MedianTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: valuesParam, + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + }) + } +} + +func TestMedianTask_AllowedFaults_Unmarshal(t *testing.T) { + t.Parallel() + + p, err := pipeline.Parse(` + // data source 1 + ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData=<{"hi": "hello"}>]; + ds2_parse [type=jsonparse path="three,four"]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0 allowedFaults=10]; + answer2 [type=bridge name=election_winner index=1]; +`) + require.NoError(t, err) + for _, task := range p.Tasks { + if task.Type() == pipeline.TaskTypeMedian { + require.Equal(t, "10", task.(*pipeline.MedianTask).AllowedFaults) + } + } +} diff --git a/core/services/pipeline/task.memo.go b/core/services/pipeline/task.memo.go new file mode 100644 index 00000000..703a39c5 --- /dev/null +++ b/core/services/pipeline/task.memo.go @@ -0,0 +1,39 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Memo task returns its value as a result +// +// e.g. [type=memo value=10] => 10 + +type MemoTask struct { + BaseTask `mapstructure:",squash"` + Value string `json:"value"` +} + +var _ Task = (*MemoTask)(nil) + +func (t *MemoTask) Type() TaskType { + return TaskTypeMemo +} + +func (t *MemoTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (Result, RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task value missing")}, RunInfo{} + } + + var value ObjectParam + err = errors.Wrap(ResolveParam(&value, From(JSONWithVarExprs(t.Value, vars, false), Input(inputs, 0))), "value") + if err != nil { + return Result{Error: err}, RunInfo{} + } + + return Result{Value: value}, RunInfo{} +} diff --git a/core/services/pipeline/task.memo_test.go b/core/services/pipeline/task.memo_test.go new file mode 100644 index 00000000..3fcdec2e --- /dev/null +++ b/core/services/pipeline/task.memo_test.go @@ -0,0 +1,56 @@ +package pipeline_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestMemoTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + output string + }{ + {"identity", pipeline.ObjectParam{Type: pipeline.BoolType, BoolValue: true}, "true"}, + + {"nil", nil, "null"}, + + {"bool", true, "true"}, + {"bool false", false, "false"}, + + {"integer", 17, `"17"`}, + {"negative integer", -19, `"-19"`}, + {"uint", uint(17), `"17"`}, + {"float", 17.3, `"17.3"`}, + {"negative float", -17.3, `"-17.3"`}, + + {"string", "hello world", `"hello world"`}, + + {"array", []int{17, 19}, "[17,19]"}, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + var value pipeline.ObjectParam + err := value.UnmarshalPipelineParam(test.input) + require.NoError(t, err) + + task := pipeline.MemoTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + result, _ := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}}) + require.NoError(t, result.Error) + marshalledValue, err := result.Value.(pipeline.ObjectParam).Marshal() + require.NoError(t, err) + assert.Equal(t, test.output, marshalledValue) + }) + } +} diff --git a/core/services/pipeline/task.merge.go b/core/services/pipeline/task.merge.go new file mode 100644 index 00000000..5d70c5df --- /dev/null +++ b/core/services/pipeline/task.merge.go @@ -0,0 +1,52 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// map[string]interface{} +type MergeTask struct { + BaseTask `mapstructure:",squash"` + Left string `json:"left"` + Right string `json:"right"` +} + +var _ Task = (*MergeTask)(nil) + +func (t *MergeTask) Type() TaskType { + return TaskTypeMerge +} + +func (t *MergeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + lMap MapParam + rMap MapParam + ) + err = multierr.Combine( + errors.Wrap(ResolveParam(&lMap, From(VarExpr(t.Left, vars), JSONWithVarExprs(t.Left, vars, false), Input(inputs, 0))), "left-side"), + errors.Wrap(ResolveParam(&rMap, From(VarExpr(t.Right, vars), JSONWithVarExprs(t.Right, vars, false))), "right-side"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + // clobber lMap with rMap values + // "nil" values on the right will clobber + for key, value := range rMap { + lMap[key] = value + } + + return Result{Value: lMap.Map()}, runInfo +} diff --git a/core/services/pipeline/task.merge_test.go b/core/services/pipeline/task.merge_test.go new file mode 100644 index 00000000..d8841779 --- /dev/null +++ b/core/services/pipeline/task.merge_test.go @@ -0,0 +1,188 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestMergeTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + left string + right string + vars pipeline.Vars + inputs []pipeline.Result + wantData interface{} + wantError bool + wantErrorContains string + }{ + { + "implicit left explicit right", + "", + `{"foo": "42", "bar": null, "blobber": false}`, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"foo": "baz", "qux": 99, "flibber": null, "baz": true}`}}, + map[string]interface{}{ + "foo": "42", + "qux": float64(99), + "bar": nil, + "flibber": nil, + "baz": true, + "blobber": false, + }, + false, + "", + }, + { + "explicit left explicit right", + `{"foo": "baz", "qux": 99, "flibber": null}`, + `{"foo": 42, "qux": null}`, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"ignored": true}`}}, + map[string]interface{}{ + "foo": int64(42), + "qux": nil, + "flibber": nil, + }, + false, + "", + }, + { + "directions reversed", + `{"foo": 42, "bar": null}`, + `{"foo": "baz", "qux": 99, "flibber": null}`, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"ignored": true}`}}, + map[string]interface{}{ + "foo": "baz", + "qux": int64(99), + "bar": nil, + "flibber": nil, + }, + false, + "", + }, + { + "invalid implicit left explicit right", + ``, + `{"foo": 42, "bar": null}`, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `"not a map"`}}, + nil, + true, + "left-side: json: cannot unmarshal string", + }, + { + "implicit left invalid explicit right", + "", + `not a map`, + pipeline.NewVarsFrom(nil), + []pipeline.Result{{Value: `{"foo": "baz", "qux": 99, "flibber": null, "baz": true}`}}, + nil, + true, + `right-side`, + }, + { + "explicit left variable data on right", + `{"foo": 42, "bar": null}`, + "$(someInput)", + pipeline.NewVarsFrom(map[string]interface{}{ + "someInput": map[string]interface{}{ + "foo": "baz", + "qux": 99, + "flibber": nil, + }, + }), + []pipeline.Result{}, + map[string]interface{}{ + "foo": "baz", + "qux": 99, + "bar": nil, + "flibber": nil, + }, + false, + "", + }, + { + "explicit left invalid variable data on right", + `{"foo": 42, "bar": null}`, + "$(someInput)", + pipeline.NewVarsFrom(map[string]interface{}{ + "someInput": "this is a string", + }), + []pipeline.Result{}, + nil, + true, + `right-side`, + }, + { + "variable in left", + `{"foo": 42, "bar": null}`, + `{"flibber": $(someInput)}`, + pipeline.NewVarsFrom(map[string]interface{}{ + "someInput": "this is a string", + }), + []pipeline.Result{}, + map[string]interface{}{ + "foo": int64(42), + "bar": nil, + "flibber": "this is a string", + }, + false, + "", + }, + { + "variable in right", + `{"flibber": $(someInput)}`, + `{"foo": 42, "bar": null}`, + pipeline.NewVarsFrom(map[string]interface{}{ + "someInput": "this is a string", + }), + []pipeline.Result{}, + map[string]interface{}{ + "foo": int64(42), + "bar": nil, + "flibber": "this is a string", + }, + false, + "", + }, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + task := pipeline.MergeTask{ + BaseTask: pipeline.NewBaseTask(0, "merge", nil, nil, 0), + Left: test.left, + Right: test.right, + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if test.wantError { + if test.wantErrorContains != "" { + require.Contains(t, result.Error.Error(), test.wantErrorContains) + } + + require.Nil(t, result.Value) + val, err := test.vars.Get("merge") + require.Equal(t, pipeline.ErrKeypathNotFound, errors.Cause(err)) + require.Nil(t, val) + } else { + assert.NoError(t, result.Error) + assert.Equal(t, test.wantData, result.Value) + } + }) + } +} diff --git a/core/services/pipeline/task.mode.go b/core/services/pipeline/task.mode.go new file mode 100644 index 00000000..707bd633 --- /dev/null +++ b/core/services/pipeline/task.mode.go @@ -0,0 +1,112 @@ +package pipeline + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// map[string]interface{}{ +// "results": []interface{} containing any other type other pipeline tasks can return +// "occurrences": (int64) +// } +type ModeTask struct { + BaseTask `mapstructure:",squash"` + Values string `json:"values"` + AllowedFaults string `json:"allowedFaults"` +} + +var _ Task = (*ModeTask)(nil) + +func (t *ModeTask) Type() TaskType { + return TaskTypeMode +} + +func (t *ModeTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var ( + maybeAllowedFaults MaybeUint64Param + valuesAndErrs SliceParam + allowedFaults int + faults int + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&maybeAllowedFaults, From(t.AllowedFaults)), "allowedFaults"), + errors.Wrap(ResolveParam(&valuesAndErrs, From(VarExpr(t.Values, vars), JSONWithVarExprs(t.Values, vars, true), Inputs(inputs))), "values"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if allowed, isSet := maybeAllowedFaults.Uint64(); isSet { + allowedFaults = int(allowed) + } else { + allowedFaults = len(valuesAndErrs) - 1 + } + + values, faults := valuesAndErrs.FilterErrors() + if faults > allowedFaults { + return Result{Error: errors.Wrapf(ErrTooManyErrors, "Number of faulty inputs %v to mode task > number allowed faults %v", faults, allowedFaults)}, runInfo + } else if len(values) == 0 { + return Result{Error: errors.Wrap(ErrWrongInputCardinality, "values")}, runInfo + } + + type entry struct { + count uint64 + original interface{} + } + + var ( + m = make(map[string]entry, len(values)) + max uint64 + modes []interface{} + ) + for _, val := range values { + var comparable string + switch v := val.(type) { + case []byte: + comparable = string(v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float64, float32, + string, bool: + comparable = fmt.Sprintf("%v", v) + case *big.Int: + comparable = v.String() + case big.Int: + comparable = v.String() + case *decimal.Decimal: + comparable = v.String() + case decimal.Decimal: + comparable = v.String() + default: + bs, err := json.Marshal(v) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "could not json stringify value: %v", err)}, runInfo + } + comparable = string(bs) + } + + m[comparable] = entry{ + count: m[comparable].count + 1, + original: val, + } + + if m[comparable].count > max { + modes = []interface{}{val} + max = m[comparable].count + } else if m[comparable].count == max { + modes = append(modes, val) + } + } + return Result{Value: map[string]interface{}{ + "results": modes, + "occurrences": max, + }}, runInfo +} diff --git a/core/services/pipeline/task.mode_test.go b/core/services/pipeline/task.mode_test.go new file mode 100644 index 00000000..26f2902a --- /dev/null +++ b/core/services/pipeline/task.mode_test.go @@ -0,0 +1,195 @@ +package pipeline_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestModeTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inputs []pipeline.Result + allowedFaults string + wantResults []interface{} + wantOccurrences uint64 + wantErrorCause error + }{ + { + "happy (one winner)", + []pipeline.Result{{Value: "foo"}, {Value: "foo"}, {Value: "bar"}, {Value: true}}, + "1", + []interface{}{"foo"}, 2, nil, + }, + { + "happy (multiple winners)", + []pipeline.Result{{Value: "foo"}, {Value: "foo"}, {Value: "bar"}, {Value: "bar"}}, + "1", + []interface{}{"foo", "bar"}, 2, nil, + }, + { + "happy (one winner expressed as different types)", + []pipeline.Result{{Value: mustDecimal(t, "1.234")}, {Value: float64(1.234)}, {Value: float32(1.234)}, {Value: "1.234"}}, + "1", + []interface{}{"1.234"}, 4, nil, + }, + { + "one input", + []pipeline.Result{{Value: common.Address{1}}}, + "0", + []interface{}{common.Address{1}}, 1, nil, + }, + { + "zero inputs", + []pipeline.Result{}, + "0", + nil, 0, pipeline.ErrWrongInputCardinality, + }, + { + "fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "2")}, {Value: []byte("foo bar")}}, + "2", + []interface{}{mustDecimal(t, "2")}, 2, nil, + }, + { + "exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: []interface{}{1, 2, 3}}, {Value: []interface{}{1, 2, 3}}}, + "2", + []interface{}{[]interface{}{1, 2, 3}}, 2, nil, + }, + { + "more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "2", + nil, 0, pipeline.ErrTooManyErrors, + }, + { + "(unspecified AllowedFaults) fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: big.NewInt(123)}, {Value: big.NewInt(123)}}, + "", + []interface{}{big.NewInt(123)}, 2, nil, + }, + { + "(unspecified AllowedFaults) exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: 123}}, + "", + []interface{}{123}, 1, nil, + }, + { + "(unspecified AllowedFaults) more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}}, + "", + nil, 0, pipeline.ErrTooManyErrors, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + task := pipeline.ModeTask{ + BaseTask: pipeline.NewBaseTask(0, "mode", nil, nil, 0), + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.wantErrorCause, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, map[string]interface{}{ + "results": test.wantResults, + "occurrences": test.wantOccurrences, + }, output.Value) + require.NoError(t, output.Error) + } + }) + t.Run("with vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": inputs}, + }) + task := pipeline.ModeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: "$(foo.bar)", + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.wantErrorCause, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, map[string]interface{}{ + "results": test.wantResults, + "occurrences": test.wantOccurrences, + }, output.Value) + require.NoError(t, output.Error) + } + }) + t.Run("with json vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + var valuesParam string + var vars pipeline.Vars + switch len(inputs) { + case 0: + valuesParam = "[]" + vars = pipeline.NewVarsFrom(nil) + case 1: + valuesParam = "[ $(foo) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0]}) + case 3: + valuesParam = "[ $(foo), $(bar), $(chain) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2]}) + case 4: + valuesParam = "[ $(foo), $(bar), $(chain), $(link) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2], "link": inputs[3]}) + } + + task := pipeline.ModeTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: valuesParam, + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.wantErrorCause, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, map[string]interface{}{ + "results": test.wantResults, + "occurrences": test.wantOccurrences, + }, output.Value) + require.NoError(t, output.Error) + } + }) + }) + } +} diff --git a/core/services/pipeline/task.multiply.go b/core/services/pipeline/task.multiply.go new file mode 100644 index 00000000..d29480b8 --- /dev/null +++ b/core/services/pipeline/task.multiply.go @@ -0,0 +1,57 @@ +package pipeline + +import ( + "context" + "math" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type MultiplyTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` + Times string `json:"times"` +} + +var ( + _ Task = (*MultiplyTask)(nil) + ErrMultiplyOverlow = errors.New("multiply overflow") +) + +func (t *MultiplyTask) Type() TaskType { + return TaskTypeMultiply +} + +func (t *MultiplyTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var ( + a DecimalParam + b DecimalParam + ) + + err = multierr.Combine( + errors.Wrap(ResolveParam(&a, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + errors.Wrap(ResolveParam(&b, From(VarExpr(t.Times, vars), NonemptyString(t.Times))), "times"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + newExp := int64(a.Decimal().Exponent()) + int64(b.Decimal().Exponent()) + if newExp > math.MaxInt32 || newExp < math.MinInt32 { + return Result{Error: ErrMultiplyOverlow}, runInfo + } + + value := a.Decimal().Mul(b.Decimal()) + return Result{Value: value}, runInfo +} diff --git a/core/services/pipeline/task.multiply_test.go b/core/services/pipeline/task.multiply_test.go new file mode 100644 index 00000000..fff68415 --- /dev/null +++ b/core/services/pipeline/task.multiply_test.go @@ -0,0 +1,210 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func mustDecimal(t *testing.T, arg string) *decimal.Decimal { + ret, err := decimal.NewFromString(arg) + require.NoError(t, err) + return &ret +} + +func TestMultiplyTask_Happy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + times string + want decimal.Decimal + }{ + {"string, by 100", "1.23", "100", *mustDecimal(t, "123")}, + {"string, negative", "1.23", "-5", *mustDecimal(t, "-6.15")}, + {"string, no times parameter", "1.23", "1", *mustDecimal(t, "1.23")}, + {"string, zero", "1.23", "0", *mustDecimal(t, "0")}, + {"string, large value", "1.23", "1000000000000000000", *mustDecimal(t, "1230000000000000000")}, + + {"int, by 100", int(2), "100", *mustDecimal(t, "200")}, + {"int, negative", int(2), "-5", *mustDecimal(t, "-10")}, + {"int, no times parameter", int(2), "1", *mustDecimal(t, "2")}, + {"int, zero", int(2), "0", *mustDecimal(t, "0")}, + {"int, large value", int(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"int8, by 100", int8(2), "100", *mustDecimal(t, "200")}, + {"int8, negative", int8(2), "-5", *mustDecimal(t, "-10")}, + {"int8, no times parameter", int8(2), "1", *mustDecimal(t, "2")}, + {"int8, zero", int8(2), "0", *mustDecimal(t, "0")}, + {"int8, large value", int8(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"int16, by 100", int16(2), "100", *mustDecimal(t, "200")}, + {"int16, negative", int16(2), "-5", *mustDecimal(t, "-10")}, + {"int16, no times parameter", int16(2), "1", *mustDecimal(t, "2")}, + {"int16, zero", int16(2), "0", *mustDecimal(t, "0")}, + {"int16, large value", int16(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"int32, by 100", int32(2), "100", *mustDecimal(t, "200")}, + {"int32, negative", int32(2), "-5", *mustDecimal(t, "-10")}, + {"int32, no times parameter", int32(2), "1", *mustDecimal(t, "2")}, + {"int32, zero", int32(2), "0", *mustDecimal(t, "0")}, + {"int32, large value", int32(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"int64, by 100", int64(2), "100", *mustDecimal(t, "200")}, + {"int64, negative", int64(2), "-5", *mustDecimal(t, "-10")}, + {"int64, no times parameter", int64(2), "1", *mustDecimal(t, "2")}, + {"int64, zero", int64(2), "0", *mustDecimal(t, "0")}, + {"int64, large value", int64(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"uint, by 100", uint(2), "100", *mustDecimal(t, "200")}, + {"uint, negative", uint(2), "-5", *mustDecimal(t, "-10")}, + {"uint, no times parameter", uint(2), "1", *mustDecimal(t, "2")}, + {"uint, zero", uint(2), "0", *mustDecimal(t, "0")}, + {"uint, large value", uint(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"uint8, by 100", uint8(2), "100", *mustDecimal(t, "200")}, + {"uint8, negative", uint8(2), "-5", *mustDecimal(t, "-10")}, + {"uint8, no times parameter", uint8(2), "1", *mustDecimal(t, "2")}, + {"uint8, zero", uint8(2), "0", *mustDecimal(t, "0")}, + {"uint8, large value", uint8(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"uint16, by 100", uint16(2), "100", *mustDecimal(t, "200")}, + {"uint16, negative", uint16(2), "-5", *mustDecimal(t, "-10")}, + {"uint16, no times parameter", uint16(2), "1", *mustDecimal(t, "2")}, + {"uint16, zero", uint16(2), "0", *mustDecimal(t, "0")}, + {"uint16, large value", uint16(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"uint32, by 100", uint32(2), "100", *mustDecimal(t, "200")}, + {"uint32, negative", uint32(2), "-5", *mustDecimal(t, "-10")}, + {"uint32, no times parameter", uint32(2), "1", *mustDecimal(t, "2")}, + {"uint32, zero", uint32(2), "0", *mustDecimal(t, "0")}, + {"uint32, large value", uint32(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"uint64, by 100", uint64(2), "100", *mustDecimal(t, "200")}, + {"uint64, negative", uint64(2), "-5", *mustDecimal(t, "-10")}, + {"uint64, no times parameter", uint64(2), "1", *mustDecimal(t, "2")}, + {"uint64, zero", uint64(2), "0", *mustDecimal(t, "0")}, + {"uint64, large value", uint64(2), "1000000000000000000", *mustDecimal(t, "2000000000000000000")}, + + {"float32, by 100", float32(1.23), "10", *mustDecimal(t, "12.3")}, + {"float32, negative", float32(1.23), "-5", *mustDecimal(t, "-6.15")}, + {"float32, no times parameter", float32(1.23), "1", *mustDecimal(t, "1.23")}, + {"float32, zero", float32(1.23), "0", *mustDecimal(t, "0")}, + {"float32, large value", float32(1.23), "1000000000000000000", *mustDecimal(t, "1230000000000000000")}, + + {"float64, by 100", float64(1.23), "10", *mustDecimal(t, "12.3")}, + {"float64, negative", float64(1.23), "-5", *mustDecimal(t, "-6.15")}, + {"float64, no times parameter", float64(1.23), "1", *mustDecimal(t, "1.23")}, + {"float64, zero", float64(1.23), "0", *mustDecimal(t, "0")}, + {"float64, large value", float64(1.23), "1000000000000000000", *mustDecimal(t, "1230000000000000000")}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.want.String(), result.Value.(decimal.Decimal).String()) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.MultiplyTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), Times: test.times} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.MultiplyTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: fmt.Sprintf("%v", test.input), + Times: test.times, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + "chain": map[string]interface{}{"link": test.times}, + }) + task := pipeline.MultiplyTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + Times: "$(chain.link)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} + +func TestMultiplyTask_Unhappy(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + times string + input string + inputs []pipeline.Result + vars pipeline.Vars + wantErrorCause error + wantErrorContains string + }{ + {"map as input from inputs", "100", "", []pipeline.Result{{Value: map[string]interface{}{"chain": "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "input"}, + {"map as input from var", "100", "$(foo)", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": map[string]interface{}{"chain": "link"}}), pipeline.ErrBadInput, "input"}, + {"slice as input from inputs", "100", "", []pipeline.Result{{Value: []interface{}{"chain", "link"}}}, pipeline.NewVarsFrom(nil), pipeline.ErrBadInput, "input"}, + {"slice as input from var", "100", "$(foo)", nil, pipeline.NewVarsFrom(map[string]interface{}{"foo": []interface{}{"chain", "link"}}), pipeline.ErrBadInput, "input"}, + {"input as missing var", "100", "$(foo)", nil, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "input"}, + {"times as missing var", "$(foo)", "", []pipeline.Result{{Value: "123"}}, pipeline.NewVarsFrom(nil), pipeline.ErrKeypathNotFound, "times"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + task := pipeline.MultiplyTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: test.input, + Times: test.times, + } + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), test.vars, test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Equal(t, test.wantErrorCause, errors.Cause(result.Error)) + if test.wantErrorContains != "" { + require.Contains(t, result.Error.Error(), test.wantErrorContains) + } + }) + } +} + +func TestMultiplyTask_Overflow(t *testing.T) { + t.Parallel() + + d1, err := decimal.NewFromString("6.34e-1147483647") + assert.NoError(t, err) + d2, err := decimal.NewFromString("6.34e-1147483647") + assert.NoError(t, err) + + task := pipeline.MultiplyTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(a)", + Times: "$(b)", + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "a": d1, + "b": d2, + }) + + result, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: "123"}}) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.Equal(t, pipeline.ErrMultiplyOverlow, errors.Cause(result.Error)) +} diff --git a/core/services/pipeline/task.panic.go b/core/services/pipeline/task.panic.go new file mode 100644 index 00000000..8659e13a --- /dev/null +++ b/core/services/pipeline/task.panic.go @@ -0,0 +1,22 @@ +package pipeline + +import ( + "context" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type PanicTask struct { + BaseTask `mapstructure:",squash"` + Msg string +} + +var _ Task = (*PanicTask)(nil) + +func (t *PanicTask) Type() TaskType { + return TaskTypePanic +} + +func (t *PanicTask) Run(_ context.Context, _ logger.Logger, vars Vars, _ []Result) (result Result, runInfo RunInfo) { + panic(t.Msg) +} diff --git a/core/services/pipeline/task.sum.go b/core/services/pipeline/task.sum.go new file mode 100644 index 00000000..7a19678d --- /dev/null +++ b/core/services/pipeline/task.sum.go @@ -0,0 +1,67 @@ +package pipeline + +import ( + "context" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// *decimal.Decimal +type SumTask struct { + BaseTask `mapstructure:",squash"` + Values string `json:"values"` + AllowedFaults string `json:"allowedFaults"` +} + +var _ Task = (*SumTask)(nil) + +func (t *SumTask) Type() TaskType { + return TaskTypeSum +} + +func (t *SumTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + var ( + maybeAllowedFaults MaybeUint64Param + valuesAndErrs SliceParam + decimalValues DecimalSliceParam + allowedFaults int + faults int + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&maybeAllowedFaults, From(t.AllowedFaults)), "allowedFaults"), + errors.Wrap(ResolveParam(&valuesAndErrs, From(VarExpr(t.Values, vars), JSONWithVarExprs(t.Values, vars, true), Inputs(inputs))), "values"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + if allowed, isSet := maybeAllowedFaults.Uint64(); isSet { + allowedFaults = int(allowed) + } else { + allowedFaults = len(valuesAndErrs) - 1 + } + + values, faults := valuesAndErrs.FilterErrors() + if faults > allowedFaults { + return Result{Error: errors.Wrapf(ErrTooManyErrors, "Number of faulty inputs %v to sum task > number allowed faults %v", faults, allowedFaults)}, runInfo + } else if len(values) == 0 { + return Result{Error: errors.Wrap(ErrWrongInputCardinality, "values")}, runInfo + } + + err = decimalValues.UnmarshalPipelineParam(values) + if err != nil { + return Result{Error: errors.Wrapf(ErrBadInput, "values: %v", err)}, runInfo + } + + sum := decimal.NewFromInt(0) + for _, val := range decimalValues { + sum = sum.Add(val) + } + return Result{Value: sum}, runInfo +} diff --git a/core/services/pipeline/task.sum_test.go b/core/services/pipeline/task.sum_test.go new file mode 100644 index 00000000..41ad86f6 --- /dev/null +++ b/core/services/pipeline/task.sum_test.go @@ -0,0 +1,172 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestSumTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + inputs []pipeline.Result + allowedFaults string + want pipeline.Result + }{ + { + "happy", + []pipeline.Result{{Value: mustDecimal(t, "1")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}}, + "1", + pipeline.Result{Value: mustDecimal(t, "6")}, + }, + { + "happy (one input)", + []pipeline.Result{{Value: mustDecimal(t, "1")}}, + "0", + pipeline.Result{Value: mustDecimal(t, "1")}, + }, + { + "zero inputs", + []pipeline.Result{}, + "0", + pipeline.Result{Error: pipeline.ErrWrongInputCardinality}, + }, + { + "fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Value: mustDecimal(t, "2")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Value: mustDecimal(t, "9")}, + }, + { + "exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Value: mustDecimal(t, "7")}, + }, + { + "more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "2", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + { + "(unspecified AllowedFaults) fewer errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "3")}, {Value: mustDecimal(t, "4")}}, + "", + pipeline.Result{Value: mustDecimal(t, "7")}, + }, + { + "(unspecified AllowedFaults) exactly threshold of errors", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}, {Value: mustDecimal(t, "4")}}, + "", + pipeline.Result{Value: mustDecimal(t, "4")}, + }, + { + "(unspecified AllowedFaults) more errors than threshold", + []pipeline.Result{{Error: errors.New("")}, {Error: errors.New("")}, {Error: errors.New("")}}, + "", + pipeline.Result{Error: pipeline.ErrTooManyErrors}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("without vars", func(t *testing.T) { + task := pipeline.SumTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), pipeline.NewVarsFrom(nil), test.inputs) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": inputs}, + }) + task := pipeline.SumTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: "$(foo.bar)", + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + t.Run("with json vars", func(t *testing.T) { + var inputs []interface{} + for _, input := range test.inputs { + if input.Error != nil { + inputs = append(inputs, input.Error) + } else { + inputs = append(inputs, input.Value) + } + } + var valuesParam string + var vars pipeline.Vars + switch len(inputs) { + case 0: + valuesParam = "[]" + vars = pipeline.NewVarsFrom(nil) + case 1: + valuesParam = "[ $(foo) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0]}) + case 3: + valuesParam = "[ $(foo), $(bar), $(chain) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2]}) + case 4: + valuesParam = "[ $(foo), $(bar), $(chain), $(link) ]" + vars = pipeline.NewVarsFrom(map[string]interface{}{"foo": inputs[0], "bar": inputs[1], "chain": inputs[2], "link": inputs[3]}) + } + + task := pipeline.SumTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Values: valuesParam, + AllowedFaults: test.allowedFaults, + } + output, runInfo := task.Run(testutils.Context(t), logger.TestLogger(t), vars, nil) + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + if output.Error != nil { + require.Equal(t, test.want.Error, errors.Cause(output.Error)) + require.Nil(t, output.Value) + } else { + require.Equal(t, test.want.Value.(*decimal.Decimal).String(), output.Value.(decimal.Decimal).String()) + require.NoError(t, output.Error) + } + }) + }) + } +} diff --git a/core/services/pipeline/task.uppercase.go b/core/services/pipeline/task.uppercase.go new file mode 100644 index 00000000..4f37b770 --- /dev/null +++ b/core/services/pipeline/task.uppercase.go @@ -0,0 +1,43 @@ +package pipeline + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Return types: +// +// string +type UppercaseTask struct { + BaseTask `mapstructure:",squash"` + Input string `json:"input"` +} + +var _ Task = (*UppercaseTask)(nil) + +func (t *UppercaseTask) Type() TaskType { + return TaskTypeUppercase +} + +func (t *UppercaseTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + _, err := CheckInputs(inputs, 0, 1, 0) + if err != nil { + return Result{Error: errors.Wrap(err, "task inputs")}, runInfo + } + + var input StringParam + + err = multierr.Combine( + errors.Wrap(ResolveParam(&input, From(VarExpr(t.Input, vars), NonemptyString(t.Input), Input(inputs, 0))), "input"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + return Result{Value: strings.ToUpper(string(input))}, runInfo +} diff --git a/core/services/pipeline/task.uppercase_test.go b/core/services/pipeline/task.uppercase_test.go new file mode 100644 index 00000000..92a6a1cd --- /dev/null +++ b/core/services/pipeline/task.uppercase_test.go @@ -0,0 +1,70 @@ +package pipeline_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestUppercaseTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + want string + }{ + {"uppercase string", "UPPERCASE", "UPPERCASE"}, + {"camelCase string", "camelCase", "CAMELCASE"}, + {"PascalCase string", "PascalCase", "PASCALCASE"}, + {"mixed string", "mIxEd", "MIXED"}, + {"lowercase string", "lowercase", "LOWERCASE"}, + {"empty string", "", ""}, + } + + for _, test := range tests { + assertOK := func(result pipeline.Result, runInfo pipeline.RunInfo) { + assert.False(t, runInfo.IsPending) + assert.False(t, runInfo.IsRetryable) + require.NoError(t, result.Error) + require.Equal(t, test.want, result.Value.(string)) + } + t.Run(test.name, func(t *testing.T) { + t.Run("without vars through job DAG", func(t *testing.T) { + vars := pipeline.NewVarsFrom(nil) + task := pipeline.UppercaseTask{BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0)} + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{{Value: test.input}})) + }) + t.Run("without vars through input param", func(t *testing.T) { + inputStr := fmt.Sprintf("%v", test.input) + if inputStr == "" { + // empty input parameter is indistinguishable from not providing it at all + // in that case the task will use an input defined by the job DAG + return + } + vars := pipeline.NewVarsFrom(nil) + task := pipeline.UppercaseTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: inputStr, + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + t.Run("with vars", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{"bar": test.input}, + }) + task := pipeline.UppercaseTask{ + BaseTask: pipeline.NewBaseTask(0, "task", nil, nil, 0), + Input: "$(foo.bar)", + } + assertOK(task.Run(testutils.Context(t), logger.TestLogger(t), vars, []pipeline.Result{})) + }) + }) + } +} diff --git a/core/services/pipeline/task.vrf.go b/core/services/pipeline/task.vrf.go new file mode 100644 index 00000000..502416dd --- /dev/null +++ b/core/services/pipeline/task.vrf.go @@ -0,0 +1,117 @@ +package pipeline + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" +) + +type VRFTask struct { + BaseTask `mapstructure:",squash"` + PublicKey string `json:"publicKey"` + RequestBlockHash string `json:"requestBlockHash"` + RequestBlockNumber string `json:"requestBlockNumber"` + Topics string `json:"topics"` + + keyStore VRFKeyStore +} + +type VRFKeyStore interface { + GenerateProof(id string, seed *big.Int) (vrfkey.Proof, error) +} + +var _ Task = (*VRFTask)(nil) + +func (t *VRFTask) Type() TaskType { + return TaskTypeVRF +} + +func (t *VRFTask) Run(_ context.Context, _ logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + if len(inputs) != 1 { + return Result{Error: ErrWrongInputCardinality}, runInfo + } + if inputs[0].Error != nil { + return Result{Error: ErrInputTaskErrored}, runInfo + } + logValues, ok := inputs[0].Value.(map[string]interface{}) + if !ok { + return Result{Error: errors.Wrap(ErrBadInput, "expected map input")}, runInfo + } + var ( + pubKey BytesParam + requestBlockHash BytesParam + requestBlockNumber Uint64Param + topics HashSliceParam + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&pubKey, From(VarExpr(t.PublicKey, vars))), "publicKey"), + errors.Wrap(ResolveParam(&requestBlockHash, From(VarExpr(t.RequestBlockHash, vars))), "requestBlockHash"), + errors.Wrap(ResolveParam(&requestBlockNumber, From(VarExpr(t.RequestBlockNumber, vars))), "requestBlockNumber"), + errors.Wrap(ResolveParam(&topics, From(VarExpr(t.Topics, vars))), "topics"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + requestKeyHash, ok := logValues["keyHash"].([32]byte) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid keyHash")}, runInfo + } + requestPreSeed, ok := logValues["seed"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid preSeed")}, runInfo + } + requestJobID, ok := logValues["jobID"].([32]byte) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid requestJobID")}, runInfo + } + pk, err := secp256k1.NewPublicKeyFromBytes(pubKey) + if err != nil { + return Result{Error: fmt.Errorf("failed to create PublicKey from bytes %v", err)}, runInfo + } + pkh := pk.MustHash() + // Validate the key against the spec + if !bytes.Equal(requestKeyHash[:], pkh[:]) { + return Result{Error: fmt.Errorf("invalid key hash %v expected %v", hex.EncodeToString(requestKeyHash[:]), hex.EncodeToString(pkh[:]))}, runInfo + } + preSeed, err := proof.BigToSeed(requestPreSeed) + if err != nil { + return Result{Error: fmt.Errorf("unable to parse preseed %v", preSeed)}, runInfo + } + if !bytes.Equal(topics[0][:], requestJobID[:]) && !bytes.Equal(topics[1][:], requestJobID[:]) { + return Result{Error: fmt.Errorf("request jobID %v doesn't match expected %v or %v", requestJobID[:], topics[0][:], topics[1][:])}, runInfo + } + if len(requestBlockHash) != common.HashLength { + return Result{Error: fmt.Errorf("invalid BlockHash length %d expected %d", len(requestBlockHash), common.HashLength)}, runInfo + } + preSeedData := proof.PreSeedData{ + PreSeed: preSeed, + BlockHash: common.BytesToHash(requestBlockHash), + BlockNum: uint64(requestBlockNumber), + } + finalSeed := proof.FinalSeed(preSeedData) + p, err := t.keyStore.GenerateProof(pk.String(), finalSeed) + if err != nil { + return Result{Error: err}, runInfo + } + onChainProof, err := proof.GenerateProofResponseFromProof(p, preSeedData) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + var results = make(map[string]interface{}) + results["onChainProof"] = hexutil.Encode(onChainProof[:]) + + return Result{Value: hexutil.Encode(onChainProof[:])}, runInfo +} diff --git a/core/services/pipeline/task.vrfv2.go b/core/services/pipeline/task.vrfv2.go new file mode 100644 index 00000000..859e791c --- /dev/null +++ b/core/services/pipeline/task.vrfv2.go @@ -0,0 +1,149 @@ +package pipeline + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.uber.org/multierr" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" +) + +var ( + vrfCoordinatorV2ABI = evmtypes.MustGetABI(vrf_coordinator_v2.VRFCoordinatorV2ABI) +) + +type VRFTaskV2 struct { + BaseTask `mapstructure:",squash"` + PublicKey string `json:"publicKey"` + RequestBlockHash string `json:"requestBlockHash"` + RequestBlockNumber string `json:"requestBlockNumber"` + Topics string `json:"topics"` + + keyStore VRFKeyStore +} + +var _ Task = (*VRFTaskV2)(nil) + +func (t *VRFTaskV2) Type() TaskType { + return TaskTypeVRFV2 +} + +func (t *VRFTaskV2) Run(_ context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + if len(inputs) != 1 { + return Result{Error: ErrWrongInputCardinality}, runInfo + } + if inputs[0].Error != nil { + return Result{Error: ErrInputTaskErrored}, runInfo + } + logValues, ok := inputs[0].Value.(map[string]interface{}) + if !ok { + return Result{Error: errors.Wrap(ErrBadInput, "expected map input")}, runInfo + } + var ( + pubKey BytesParam + requestBlockHash BytesParam + requestBlockNumber Uint64Param + topics HashSliceParam + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&pubKey, From(VarExpr(t.PublicKey, vars))), "publicKey"), + errors.Wrap(ResolveParam(&requestBlockHash, From(VarExpr(t.RequestBlockHash, vars))), "requestBlockHash"), + errors.Wrap(ResolveParam(&requestBlockNumber, From(VarExpr(t.RequestBlockNumber, vars))), "requestBlockNumber"), + errors.Wrap(ResolveParam(&topics, From(VarExpr(t.Topics, vars))), "topics"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + requestKeyHash, ok := logValues["keyHash"].([32]byte) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid keyHash")}, runInfo + } + requestPreSeed, ok := logValues["preSeed"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid preSeed")}, runInfo + } + requestId, ok := logValues["requestId"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid requestId")}, runInfo + } + subID, ok := logValues["subId"].(uint64) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid subId")}, runInfo + } + callbackGasLimit, ok := logValues["callbackGasLimit"].(uint32) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid callbackGasLimit")}, runInfo + } + numWords, ok := logValues["numWords"].(uint32) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid numWords")}, runInfo + } + sender, ok := logValues["sender"].(common.Address) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid sender")}, runInfo + } + pk, err := secp256k1.NewPublicKeyFromBytes(pubKey) + if err != nil { + return Result{Error: fmt.Errorf("failed to create PublicKey from bytes %v", err)}, runInfo + } + pkh := pk.MustHash() + // Validate the key against the spec + if !bytes.Equal(requestKeyHash[:], pkh[:]) { + return Result{Error: fmt.Errorf("invalid key hash %v expected %v", hex.EncodeToString(requestKeyHash[:]), hex.EncodeToString(pkh[:]))}, runInfo + } + preSeed, err := proof.BigToSeed(requestPreSeed) + if err != nil { + return Result{Error: fmt.Errorf("unable to parse preseed %v", preSeed)}, runInfo + } + if len(requestBlockHash) != common.HashLength { + return Result{Error: fmt.Errorf("invalid BlockHash length %d expected %d", len(requestBlockHash), common.HashLength)}, runInfo + } + preSeedData := proof.PreSeedDataV2{ + PreSeed: preSeed, + BlockHash: common.BytesToHash(requestBlockHash), + BlockNum: uint64(requestBlockNumber), + SubId: subID, + CallbackGasLimit: callbackGasLimit, + NumWords: numWords, + Sender: sender, + } + finalSeed := proof.FinalSeedV2(preSeedData) + id := hexutil.Encode(pk[:]) + p, err := t.keyStore.GenerateProof(id, finalSeed) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2(p, preSeedData) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + b, err := vrfCoordinatorV2ABI.Pack("fulfillRandomWords", onChainProof, rc) + if err != nil { + return Result{Error: err}, runInfo + } + results := make(map[string]interface{}) + output := hexutil.Encode(b) + results["output"] = output + // RequestID needs to be a [32]byte for EvmTxMeta. + results["requestID"] = hexutil.Encode(requestId.Bytes()) + + // store vrf proof and request commitment separately so they can be used in a batch fashion + results["proof"] = onChainProof + results["requestCommitment"] = rc + + lggr.Debugw("Completed VRF V2 task run", "reqID", requestId.String(), "output", output) + + return Result{Value: results}, runInfo +} diff --git a/core/services/pipeline/task.vrfv2plus.go b/core/services/pipeline/task.vrfv2plus.go new file mode 100644 index 00000000..756af1e6 --- /dev/null +++ b/core/services/pipeline/task.vrfv2plus.go @@ -0,0 +1,159 @@ +package pipeline + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "go.uber.org/multierr" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" +) + +var ( + vrfCoordinatorV2PlusABI = evmtypes.MustGetABI(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalABI) +) + +// VRFTaskV2Plus is identical to VRFTaskV2 except that it uses the V2Plus VRF +// request commitment, which includes a boolean indicating whether native or +// link payment was used. +type VRFTaskV2Plus struct { + BaseTask `mapstructure:",squash"` + PublicKey string `json:"publicKey"` + RequestBlockHash string `json:"requestBlockHash"` + RequestBlockNumber string `json:"requestBlockNumber"` + Topics string `json:"topics"` + + keyStore VRFKeyStore +} + +var _ Task = (*VRFTaskV2Plus)(nil) + +func (t *VRFTaskV2Plus) Type() TaskType { + return TaskTypeVRFV2Plus +} + +func (t *VRFTaskV2Plus) Run(_ context.Context, lggr logger.Logger, vars Vars, inputs []Result) (result Result, runInfo RunInfo) { + if len(inputs) != 1 { + return Result{Error: ErrWrongInputCardinality}, runInfo + } + if inputs[0].Error != nil { + return Result{Error: ErrInputTaskErrored}, runInfo + } + logValues, ok := inputs[0].Value.(map[string]interface{}) + if !ok { + return Result{Error: errors.Wrap(ErrBadInput, "expected map input")}, runInfo + } + var ( + pubKey BytesParam + requestBlockHash BytesParam + requestBlockNumber Uint64Param + topics HashSliceParam + ) + err := multierr.Combine( + errors.Wrap(ResolveParam(&pubKey, From(VarExpr(t.PublicKey, vars))), "publicKey"), + errors.Wrap(ResolveParam(&requestBlockHash, From(VarExpr(t.RequestBlockHash, vars))), "requestBlockHash"), + errors.Wrap(ResolveParam(&requestBlockNumber, From(VarExpr(t.RequestBlockNumber, vars))), "requestBlockNumber"), + errors.Wrap(ResolveParam(&topics, From(VarExpr(t.Topics, vars))), "topics"), + ) + if err != nil { + return Result{Error: err}, runInfo + } + + requestKeyHash, ok := logValues["keyHash"].([32]byte) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid keyHash")}, runInfo + } + requestPreSeed, ok := logValues["preSeed"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid preSeed")}, runInfo + } + requestId, ok := logValues["requestId"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid requestId")}, runInfo + } + subID, ok := logValues["subId"].(*big.Int) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid subId")}, runInfo + } + callbackGasLimit, ok := logValues["callbackGasLimit"].(uint32) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid callbackGasLimit")}, runInfo + } + numWords, ok := logValues["numWords"].(uint32) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid numWords")}, runInfo + } + sender, ok := logValues["sender"].(common.Address) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid sender")}, runInfo + } + extraArgs, ok := logValues["extraArgs"].([]byte) + if !ok { + return Result{Error: errors.Wrapf(ErrBadInput, "invalid extraArgs")}, runInfo + } + pk, err := secp256k1.NewPublicKeyFromBytes(pubKey) + if err != nil { + return Result{Error: fmt.Errorf("failed to create PublicKey from bytes %v", err)}, runInfo + } + pkh := pk.MustHash() + // Validate the key against the spec + if !bytes.Equal(requestKeyHash[:], pkh[:]) { + return Result{Error: fmt.Errorf("invalid key hash %v expected %v", hex.EncodeToString(requestKeyHash[:]), hex.EncodeToString(pkh[:]))}, runInfo + } + preSeed, err := proof.BigToSeed(requestPreSeed) + if err != nil { + return Result{Error: fmt.Errorf("unable to parse preseed %v", preSeed)}, runInfo + } + if len(requestBlockHash) != common.HashLength { + return Result{Error: fmt.Errorf("invalid BlockHash length %d expected %d", len(requestBlockHash), common.HashLength)}, runInfo + } + preSeedData := proof.PreSeedDataV2Plus{ + PreSeed: preSeed, + BlockHash: common.BytesToHash(requestBlockHash), + BlockNum: uint64(requestBlockNumber), + SubId: subID, + CallbackGasLimit: callbackGasLimit, + NumWords: numWords, + Sender: sender, + ExtraArgs: extraArgs, + } + finalSeed := proof.FinalSeedV2Plus(preSeedData) + id := hexutil.Encode(pk[:]) + p, err := t.keyStore.GenerateProof(id, finalSeed) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + onChainProof, rc, err := proof.GenerateProofResponseFromProofV2Plus(p, preSeedData) + if err != nil { + return Result{Error: err}, retryableRunInfo() + } + // onlyPremium is false because this task assumes that plugin node fulfills the VRF request + // gas cost should be billed to the requesting subscription + b, err := vrfCoordinatorV2PlusABI.Pack("fulfillRandomWords", onChainProof, rc, false /* onlyPremium */) + if err != nil { + return Result{Error: err}, runInfo + } + results := make(map[string]interface{}) + output := hexutil.Encode(b) + results["output"] = output + // RequestID needs to be a [32]byte for EvmTxMeta. + results["requestID"] = hexutil.Encode(requestId.Bytes()) + + // store vrf proof and request commitment separately so they can be used in a batch fashion + results["proof"] = onChainProof + results["requestCommitment"] = rc + + lggr.Debugw("Completed VRF V2 task run", "reqID", requestId.String(), "output", output) + + return Result{Value: results}, runInfo +} diff --git a/core/services/pipeline/task_object_params.go b/core/services/pipeline/task_object_params.go new file mode 100644 index 00000000..9bcc0d62 --- /dev/null +++ b/core/services/pipeline/task_object_params.go @@ -0,0 +1,131 @@ +package pipeline + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/shopspring/decimal" +) + +type ObjectType int + +const ( + NilType ObjectType = iota + BoolType + DecimalType + StringType + SliceType + MapType +) + +// ObjectParam represents a kind of any type that could be used by the +// memo task +type ObjectParam struct { + Type ObjectType + BoolValue BoolParam + DecimalValue DecimalParam + StringValue StringParam + SliceValue SliceParam + MapValue MapParam +} + +func (o ObjectParam) MarshalJSON() ([]byte, error) { + switch o.Type { + case NilType: + return json.Marshal(nil) + case BoolType: + return json.Marshal(o.BoolValue) + case DecimalType: + return json.Marshal(o.DecimalValue.Decimal()) + case StringType: + return json.Marshal(o.StringValue) + case MapType: + return json.Marshal(o.MapValue) + case SliceType: + return json.Marshal(o.SliceValue) + } + panic(fmt.Sprintf("Invalid type for ObjectParam %v", o.Type)) +} + +func (o ObjectParam) Marshal() (string, error) { + b, err := o.MarshalJSON() + if err != nil { + return "", err + } + return string(b), nil +} + +func (o ObjectParam) String() string { + value, err := o.Marshal() + if err != nil { + return fmt.Sprintf("", err) + } + return value +} + +func (o *ObjectParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case nil: + o.Type = NilType + return nil + + case bool: + o.Type = BoolType + o.BoolValue = BoolParam(v) + return nil + + case uint8, uint16, uint32, uint64, uint, int8, int16, int32, int64, int, float32, float64, decimal.Decimal, *decimal.Decimal, big.Int, *big.Int: + o.Type = DecimalType + return o.DecimalValue.UnmarshalPipelineParam(v) + + case string: + o.Type = StringType + return o.StringValue.UnmarshalPipelineParam(v) + + // Maps + case MapParam: + o.Type = MapType + o.MapValue = v + return nil + + case map[string]interface{}: + o.Type = MapType + return o.MapValue.UnmarshalPipelineParam(v) + + // Slices + case SliceParam: + o.Type = SliceType + o.SliceValue = v + return nil + + case []interface{}: + o.Type = SliceType + return o.SliceValue.UnmarshalPipelineParam(v) + + case []int: + o.Type = SliceType + for _, value := range v { + o.SliceValue = append(o.SliceValue, value) + } + return nil + + case []string: + o.Type = SliceType + for _, value := range v { + o.SliceValue = append(o.SliceValue, value) + } + return nil + + case ObjectParam: + o.Type = v.Type + o.BoolValue = v.BoolValue + o.MapValue = v.MapValue + o.StringValue = v.StringValue + o.DecimalValue = v.DecimalValue + return nil + + } + + return fmt.Errorf("bad input for task: %T", val) +} diff --git a/core/services/pipeline/task_object_params_test.go b/core/services/pipeline/task_object_params_test.go new file mode 100644 index 00000000..b585ca4e --- /dev/null +++ b/core/services/pipeline/task_object_params_test.go @@ -0,0 +1,95 @@ +package pipeline_test + +import ( + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestObjectParam_UnmarshalPipelineParamValid(t *testing.T) { + t.Parallel() + + decimalValue := decimal.New(173, -1) + + tests := []struct { + name string + input interface{} + output string + }{ + {"identity", pipeline.ObjectParam{Type: pipeline.BoolType, BoolValue: true}, "true"}, + + {"nil", nil, "null"}, + + {"bool", true, "true"}, + {"bool false", false, "false"}, + + {"uint8", uint8(17), `"17"`}, + {"uint16", uint16(17), `"17"`}, + {"uint32", uint32(17), `"17"`}, + {"uint64", uint64(17), `"17"`}, + {"uint", 17, `"17"`}, + + {"int8", int8(17), `"17"`}, + {"int16", int16(17), `"17"`}, + {"int32", int32(17), `"17"`}, + {"int64", int64(17), `"17"`}, + {"integer", 17, `"17"`}, + + {"negative integer", -19, `"-19"`}, + {"float32", float32(17.3), `"17.3"`}, + {"float", 17.3, `"17.3"`}, + {"negative float", -17.3, `"-17.3"`}, + + {"bigintp", big.NewInt(-17), `"-17"`}, + {"bigint", *big.NewInt(29), `"29"`}, + + {"decimalp", &decimalValue, `"17.3"`}, + {"decimal", decimalValue, `"17.3"`}, + + {"string", "hello world", `"hello world"`}, + + {"array", []int{17, 19}, "[17,19]"}, + {"empty array", []interface{}{}, "[]"}, + {"interface array", []interface{}{17, 19}, "[17,19]"}, + {"string array", []string{"hello", "world"}, `["hello","world"]`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var value pipeline.ObjectParam + err := value.UnmarshalPipelineParam(test.input) + require.NoError(t, err) + marshalledValue, err := value.Marshal() + require.NoError(t, err) + assert.Equal(t, test.output, marshalledValue) + }) + } +} + +func TestObjectParam_Marshal(t *testing.T) { + tests := []struct { + name string + input *pipeline.ObjectParam + output string + }{ + {"nil", mustNewObjectParam(t, nil), "null"}, + {"bool", mustNewObjectParam(t, true), "true"}, + {"integer", mustNewObjectParam(t, 17), `"17"`}, + {"string", mustNewObjectParam(t, "hello world"), `"hello world"`}, + {"array", mustNewObjectParam(t, []int{17, 19}), "[17,19]"}, + {"map", mustNewObjectParam(t, map[string]interface{}{"key": 19}), `{"key":19}`}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + marshalledValue, err := test.input.Marshal() + require.NoError(t, err) + assert.Equal(t, test.output, marshalledValue) + }) + } +} diff --git a/core/services/pipeline/task_params.go b/core/services/pipeline/task_params.go new file mode 100644 index 00000000..fbd508f0 --- /dev/null +++ b/core/services/pipeline/task_params.go @@ -0,0 +1,811 @@ +package pipeline + +import ( + "encoding/hex" + "encoding/json" + "math" + "math/big" + "net/url" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + + commonhex "github.com/goplugin/plugin-common/pkg/utils/hex" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +//go:generate mockery --quiet --name PipelineParamUnmarshaler --output ./mocks/ --case=underscore + +type PipelineParamUnmarshaler interface { + UnmarshalPipelineParam(val interface{}) error +} + +func ResolveParam(out PipelineParamUnmarshaler, getters []GetterFunc) error { + var val interface{} + var err error + var found bool + for _, get := range getters { + val, err = get() + if errors.Is(errors.Cause(err), ErrParameterEmpty) { + continue + } else if err != nil { + return err + } + found = true + break + } + if !found { + return ErrParameterEmpty + } + + err = out.UnmarshalPipelineParam(val) + if err != nil { + return err + } + return nil +} + +type StringParam string + +func (s *StringParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case string: + *s = StringParam(v) + return nil + case []byte: + *s = StringParam(string(v)) + return nil + case ObjectParam: + if v.Type == StringType { + *s = v.StringValue + return nil + } + case *ObjectParam: + if v != nil && v.Type == StringType { + *s = v.StringValue + return nil + } + } + return errors.Wrapf(ErrBadInput, "expected string, got %T", val) +} + +func (s *StringParam) String() string { + if s == nil { + return "" + } + return string(*s) +} + +type StringSliceParam []string + +func (s *StringSliceParam) UnmarshalPipelineParam(val interface{}) error { + var ssp StringSliceParam + switch v := val.(type) { + case nil: + ssp = nil + case string: + return s.UnmarshalPipelineParam([]byte(v)) + + case []byte: + var theSlice []string + err := json.Unmarshal(v, &theSlice) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + *s = StringSliceParam(theSlice) + return nil + case []string: + ssp = v + case []interface{}: + return s.UnmarshalPipelineParam(SliceParam(v)) + case SliceParam: + for _, x := range v { + var s StringParam + err := s.UnmarshalPipelineParam(x) + if err != nil { + return err + } + ssp = append(ssp, s.String()) + } + default: + return errors.Wrapf(ErrBadInput, "expected string slice, got %T", val) + } + *s = ssp + return nil +} + +type BytesParam []byte + +func (b *BytesParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case string: + // first check if this is a valid hex-encoded string + if commonhex.HasPrefix(v) { + noHexPrefix := commonhex.TrimPrefix(v) + bs, err := hex.DecodeString(noHexPrefix) + if err == nil { + *b = bs + return nil + } + } + *b = BytesParam(v) + return nil + case []byte: + *b = v + return nil + case nil: + *b = BytesParam(nil) + return nil + case ObjectParam: + if v.Type == StringType { + *b = BytesParam(v.StringValue) + return nil + } + case *ObjectParam: + if v.Type == StringType { + *b = BytesParam(v.StringValue) + return nil + } + } + + return errors.Wrapf(ErrBadInput, "expected array of bytes, got %T", val) +} + +type Uint64Param uint64 + +func (u *Uint64Param) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case uint: + *u = Uint64Param(v) + case uint8: + *u = Uint64Param(v) + case uint16: + *u = Uint64Param(v) + case uint32: + *u = Uint64Param(v) + case uint64: + *u = Uint64Param(v) + case int: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case int8: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case int16: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case int32: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case int64: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case float64: // when decoding from db: JSON numbers are floats + if v < 0 || v > math.MaxUint64 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to Uint64Param", v) + } + *u = Uint64Param(v) + case string: + n, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + *u = Uint64Param(n) + default: + return errors.Wrapf(ErrBadInput, "expected unsigned integer, got %T", val) + } + return nil +} + +type MaybeUint64Param struct { + n uint64 + isSet bool +} + +// NewMaybeUint64Param creates new instance of MaybeUint64Param +func NewMaybeUint64Param(n uint64, isSet bool) MaybeUint64Param { + return MaybeUint64Param{ + n: n, + isSet: isSet, + } +} + +func (p *MaybeUint64Param) UnmarshalPipelineParam(val interface{}) error { + var n uint64 + switch v := val.(type) { + case uint: + n = uint64(v) + case uint8: + n = uint64(v) + case uint16: + n = uint64(v) + case uint32: + n = uint64(v) + case uint64: + n = v + case int: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case int8: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case int16: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case int32: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case int64: + if v < 0 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case float64: // when decoding from db: JSON numbers are floats + if v < 0 || v > math.MaxUint64 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to uint64", v) + } + n = uint64(v) + case string: + if strings.TrimSpace(v) == "" { + *p = MaybeUint64Param{0, false} + return nil + } + var err error + n, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + + default: + return errors.Wrapf(ErrBadInput, "expected unsigned integer or nil, got %T", val) + } + + *p = MaybeUint64Param{n, true} + return nil +} + +func (p MaybeUint64Param) Uint64() (uint64, bool) { + return p.n, p.isSet +} + +type MaybeInt32Param struct { + n int32 + isSet bool +} + +// NewMaybeInt32Param creates new instance of MaybeInt32Param +func NewMaybeInt32Param(n int32, isSet bool) MaybeInt32Param { + return MaybeInt32Param{ + n: n, + isSet: isSet, + } +} + +func (p *MaybeInt32Param) UnmarshalPipelineParam(val interface{}) error { + var n int32 + switch v := val.(type) { + case uint: + if v > math.MaxInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case uint8: + n = int32(v) + case uint16: + n = int32(v) + case uint32: + if v > math.MaxInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case uint64: + if v > math.MaxInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case int: + if v > math.MaxInt32 || v < math.MinInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case int8: + n = int32(v) + case int16: + n = int32(v) + case int32: + n = v + case int64: + if v > math.MaxInt32 || v < math.MinInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case float64: // when decoding from db: JSON numbers are floats + if v > math.MaxInt32 || v < math.MinInt32 { + return errors.Wrap(ErrBadInput, "overflows int32") + } + n = int32(v) + case string: + if strings.TrimSpace(v) == "" { + *p = MaybeInt32Param{0, false} + return nil + } + i, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + n = int32(i) + + default: + return errors.Wrapf(ErrBadInput, "expected signed integer or nil, got %T", val) + } + + *p = MaybeInt32Param{n, true} + return nil +} + +func (p MaybeInt32Param) Int32() (int32, bool) { + return p.n, p.isSet +} + +type BoolParam bool + +func (b *BoolParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case string: + theBool, err := strconv.ParseBool(v) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + *b = BoolParam(theBool) + return nil + case bool: + *b = BoolParam(v) + return nil + case ObjectParam: + if v.Type == BoolType { + *b = v.BoolValue + return nil + } + case *ObjectParam: + if v.Type == BoolType { + *b = v.BoolValue + return nil + } + } + + return errors.Wrapf(ErrBadInput, "expected true or false, got %T", val) +} + +type DecimalParam decimal.Decimal + +func (d *DecimalParam) UnmarshalPipelineParam(val interface{}) error { + if v, ok := val.(ObjectParam); ok && v.Type == DecimalType { + *d = v.DecimalValue + return nil + } else if v, ok := val.(*ObjectParam); ok { + if v == nil { + return errors.Wrap(ErrBadInput, "nil ObjectParam") + } + if v.Type == DecimalType { + *d = v.DecimalValue + return nil + } + } + x, err := utils.ToDecimal(val) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + *d = DecimalParam(x) + return nil +} + +func (d DecimalParam) Decimal() decimal.Decimal { + return decimal.Decimal(d) +} + +type URLParam url.URL + +func (u *URLParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case string: + theURL, err := url.ParseRequestURI(v) + if err != nil { + return errors.Wrap(ErrBadInput, err.Error()) + } + *u = URLParam(*theURL) + return nil + default: + return ErrBadInput + } +} + +func (u *URLParam) String() string { + return (*url.URL)(u).String() +} + +type AddressParam common.Address + +func (a *AddressParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case string: + return a.UnmarshalPipelineParam([]byte(v)) + case []byte: + switch len(v) { + case 42: + bs, err := commonhex.DecodeString(string(v)) + if err == nil { + *a = AddressParam(common.BytesToAddress(bs)) + return nil + } + case 20: + copy((*a)[:], v) + return nil + } + case common.Address: + *a = AddressParam(v) + return nil + } + + return errors.Wrapf(ErrBadInput, "expected common.Address, got %T", val) +} + +// MapParam accepts maps or JSON-encoded strings +type MapParam map[string]interface{} + +func (m *MapParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case nil: + *m = nil + return nil + + case MapParam: + *m = v + return nil + + case map[string]interface{}: + *m = MapParam(v) + return nil + + case string: + return m.UnmarshalPipelineParam([]byte(v)) + + case []byte: + var theMap map[string]interface{} + err := json.Unmarshal(v, &theMap) + if err != nil { + return err + } + *m = MapParam(theMap) + return nil + + case ObjectParam: + if v.Type == MapType { + *m = v.MapValue + return nil + } + + case *ObjectParam: + if v != nil && v.Type == MapType { + *m = v.MapValue + return nil + } + + } + + return errors.Wrapf(ErrBadInput, "expected map, got %T", val) +} + +func (m MapParam) Map() map[string]interface{} { + return (map[string]interface{})(m) +} + +type SliceParam []interface{} + +func (s *SliceParam) UnmarshalPipelineParam(val interface{}) error { + switch v := val.(type) { + case nil: + *s = nil + return nil + case []interface{}: + *s = v + return nil + case string: + return s.UnmarshalPipelineParam([]byte(v)) + + case []byte: + var theSlice []interface{} + err := json.Unmarshal(v, &theSlice) + if err != nil { + return err + } + *s = SliceParam(theSlice) + return nil + } + + return errors.Wrapf(ErrBadInput, "expected slice, got %T", val) +} + +func (s SliceParam) FilterErrors() (SliceParam, int) { + var s2 SliceParam + var errs int + for _, x := range s { + if _, is := x.(error); is { + errs++ + } else { + s2 = append(s2, x) + } + } + return s2, errs +} + +type DecimalSliceParam []decimal.Decimal + +func (s *DecimalSliceParam) UnmarshalPipelineParam(val interface{}) error { + var dsp DecimalSliceParam + switch v := val.(type) { + case nil: + dsp = nil + case []decimal.Decimal: + dsp = v + case []interface{}: + return s.UnmarshalPipelineParam(SliceParam(v)) + case SliceParam: + for _, x := range v { + var d DecimalParam + err := d.UnmarshalPipelineParam(x) + if err != nil { + return err + } + dsp = append(dsp, d.Decimal()) + } + case string: + return s.UnmarshalPipelineParam([]byte(v)) + + case []byte: + var theSlice []interface{} + err := json.Unmarshal(v, &theSlice) + if err != nil { + return err + } + return s.UnmarshalPipelineParam(SliceParam(theSlice)) + + default: + return errors.Wrapf(ErrBadInput, "expected number, got %T", val) + } + *s = dsp + return nil +} + +type HashSliceParam []common.Hash + +func (s *HashSliceParam) UnmarshalPipelineParam(val interface{}) error { + var dsp HashSliceParam + switch v := val.(type) { + case nil: + dsp = nil + case []common.Hash: + dsp = v + case string: + err := json.Unmarshal([]byte(v), &dsp) + if err != nil { + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) + } + case []byte: + err := json.Unmarshal(v, &dsp) + if err != nil { + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) + } + case []interface{}: + for _, h := range v { + if s, is := h.(string); is { + var hash common.Hash + err := hash.UnmarshalText([]byte(s)) + if err != nil { + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) + } + dsp = append(dsp, hash) + } else if b, is := h.([]byte); is { + // same semantic as AddressSliceParam + var hash common.Hash + err := hash.UnmarshalText(b) + if err != nil { + return errors.Wrapf(ErrBadInput, "HashSliceParam: %v", err) + } + dsp = append(dsp, hash) + } else if h, is := h.(common.Hash); is { + dsp = append(dsp, h) + } else { + return errors.Wrap(ErrBadInput, "HashSliceParam") + } + } + default: + return errors.Wrap(ErrBadInput, "HashSliceParam") + } + *s = dsp + return nil +} + +type AddressSliceParam []common.Address + +func (s *AddressSliceParam) UnmarshalPipelineParam(val interface{}) error { + var asp AddressSliceParam + switch v := val.(type) { + case nil: + asp = nil + case []common.Address: + asp = v + case string: + err := json.Unmarshal([]byte(v), &asp) + if err != nil { + return errors.Wrapf(ErrBadInput, "AddressSliceParam: %v", err) + } + case []byte: + err := json.Unmarshal(v, &asp) + if err != nil { + return errors.Wrapf(ErrBadInput, "AddressSliceParam: %v", err) + } + case []interface{}: + for _, a := range v { + var addr AddressParam + err := addr.UnmarshalPipelineParam(a) + if err != nil { + return errors.Wrapf(ErrBadInput, "AddressSliceParam: %v", err) + } + asp = append(asp, common.Address(addr)) + } + default: + return errors.Wrapf(ErrBadInput, "AddressSliceParam: cannot convert %T", val) + } + *s = asp + return nil +} + +type JSONPathParam []string + +// NewJSONPathParam returns a new JSONPathParam using the given separator, or the default if empty. +func NewJSONPathParam(sep string) JSONPathParam { + if len(sep) == 0 { + return nil + } + return []string{sep} +} + +// UnmarshalPipelineParam unmarshals a slice of strings from val. +// If val is a string or []byte, it is split on a separator. +// The default separator is ',' but can be overridden by initializing via NewJSONPathParam. +func (p *JSONPathParam) UnmarshalPipelineParam(val interface{}) error { + sep := "," + if len(*p) > 0 { + // custom separator + sep = (*p)[0] + } + var ssp JSONPathParam + switch v := val.(type) { + case nil: + ssp = nil + case []string: + ssp = v + case []interface{}: + for _, x := range v { + as, is := x.(string) + if !is { + return ErrBadInput + } + ssp = append(ssp, as) + } + case string: + if len(v) == 0 { + return nil + } + ssp = strings.Split(v, sep) + case []byte: + if len(v) == 0 { + return nil + } + ssp = strings.Split(string(v), sep) + default: + return ErrBadInput + } + *p = ssp + return nil +} + +type MaybeBigIntParam struct { + n *big.Int +} + +// NewMaybeBigIntParam creates a new instance of MaybeBigIntParam +func NewMaybeBigIntParam(n *big.Int) MaybeBigIntParam { + return MaybeBigIntParam{ + n: n, + } +} + +func (p *MaybeBigIntParam) UnmarshalPipelineParam(val interface{}) error { + var n *big.Int + switch v := val.(type) { + case uint: + n = big.NewInt(0).SetUint64(uint64(v)) + case uint8: + n = big.NewInt(0).SetUint64(uint64(v)) + case uint16: + n = big.NewInt(0).SetUint64(uint64(v)) + case uint32: + n = big.NewInt(0).SetUint64(uint64(v)) + case uint64: + n = big.NewInt(0).SetUint64(v) + case int: + n = big.NewInt(int64(v)) + case int8: + n = big.NewInt(int64(v)) + case int16: + n = big.NewInt(int64(v)) + case int32: + n = big.NewInt(int64(v)) + case int64: + n = big.NewInt(v) + case float64: // when decoding from db: JSON numbers are floats + if v < math.MinInt64 || v > math.MaxUint64 { + return errors.Wrapf(ErrBadInput, "cannot cast %v to u/int64", v) + } + if v < 0 { + n = big.NewInt(int64(v)) + } else { + n = big.NewInt(0).SetUint64(uint64(v)) + } + case string: + if strings.TrimSpace(v) == "" { + *p = MaybeBigIntParam{n: nil} + return nil + } + var ok bool + n, ok = big.NewInt(0).SetString(v, 10) + if !ok { + return errors.Wrapf(ErrBadInput, "unable to convert %s to big.Int", v) + } + case decimal.Decimal: + if !v.IsInteger() { + return errors.Wrapf(ErrBadInput, "cannot convert non-integer %v to big.Int", v) + } + n = v.BigInt() + case *decimal.Decimal: + if !v.IsInteger() { + return errors.Wrapf(ErrBadInput, "cannot convert non-integer %v to big.Int", v) + } + n = v.BigInt() + case *big.Int: + n = v + case nil: + *p = MaybeBigIntParam{n: nil} + return nil + default: + return ErrBadInput + } + *p = MaybeBigIntParam{n: n} + return nil +} + +func (p MaybeBigIntParam) BigInt() *big.Int { + return p.n +} diff --git a/core/services/pipeline/task_params_test.go b/core/services/pipeline/task_params_test.go new file mode 100644 index 00000000..21e9be91 --- /dev/null +++ b/core/services/pipeline/task_params_test.go @@ -0,0 +1,749 @@ +package pipeline_test + +import ( + "math" + "math/big" + "net/url" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" +) + +func TestStringParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + var nilObjectParam *pipeline.ObjectParam + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // valid + {"string", "foo bar baz", pipeline.StringParam("foo bar baz"), nil}, + {"[]byte", []byte("foo bar baz"), pipeline.StringParam("foo bar baz"), nil}, + {"*object", mustNewObjectParam(t, `boz bar bap`), pipeline.StringParam("boz bar bap"), nil}, + {"object", *mustNewObjectParam(t, `boz bar bap`), pipeline.StringParam("boz bar bap"), nil}, + // invalid + {"int", 12345, pipeline.StringParam(""), pipeline.ErrBadInput}, + {"nil", nil, pipeline.StringParam(""), pipeline.ErrBadInput}, + {"nil ObjectParam", nilObjectParam, pipeline.StringParam(""), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.StringParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestStringSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + expected := pipeline.StringSliceParam{"foo", "bar", "baz"} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"json", `[ "foo", "bar", "baz" ]`, expected, nil}, + {"[]string", []string{"foo", "bar", "baz"}, expected, nil}, + {"[]interface{} with strings", []interface{}{"foo", "bar", "baz"}, expected, nil}, + {"[]interface{} with []byte", []interface{}{[]byte("foo"), []byte("bar"), []byte("baz")}, expected, nil}, + {"SliceParam", pipeline.SliceParam([]interface{}{"foo", "bar", "baz"}), expected, nil}, + + {"nil", nil, pipeline.StringSliceParam(nil), nil}, + + {"bad json", `[ "foo", 1, false ]`, nil, pipeline.ErrBadInput}, + {"[]interface{} with bad types", []interface{}{123, true}, nil, pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.StringSliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.expected != nil { + require.Equal(t, test.expected, p) + } + }) + } + +} + +func TestBytesParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string", "foo bar baz", pipeline.BytesParam("foo bar baz"), nil}, + {"[]byte", []byte("foo bar baz"), pipeline.BytesParam("foo bar baz"), nil}, + {"int", 12345, pipeline.BytesParam(nil), pipeline.ErrBadInput}, + {"hex-invalid", "0xh", pipeline.BytesParam("0xh"), nil}, + {"valid-hex", hexutil.MustDecode("0xd3184d"), pipeline.BytesParam(hexutil.MustDecode("0xd3184d")), nil}, + {"*object", mustNewObjectParam(t, `boz bar bap`), pipeline.BytesParam("boz bar bap"), nil}, + {"object", *mustNewObjectParam(t, `boz bar bap`), pipeline.BytesParam("boz bar bap"), nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.BytesParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestAddressParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + var addr pipeline.AddressParam + copy(addr[:], []byte("deadbeefdeadbeefdead")) + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"20-char string", "deadbeefdeadbeefdead", addr, nil}, + {"21-char string", "deadbeefdeadbeefdeadb", nil, pipeline.ErrBadInput}, + {"19-char string", "deadbeefdeadbeefdea", nil, pipeline.ErrBadInput}, + {"20-char []byte", []byte("deadbeefdeadbeefdead"), addr, nil}, + {"21-char []byte", []byte("deadbeefdeadbeefdeadb"), nil, pipeline.ErrBadInput}, + {"19-char []byte", []byte("deadbeefdeadbeefdea"), nil, pipeline.ErrBadInput}, + + {"42-char string with 0x", "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", pipeline.AddressParam(common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")), nil}, + {"41-char string with 0x", "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbee", nil, pipeline.ErrBadInput}, + {"43-char string with 0x", "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefd", nil, pipeline.ErrBadInput}, + {"42-char string without 0x", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefde", nil, pipeline.ErrBadInput}, + {"40-char string without 0x", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", nil, pipeline.ErrBadInput}, + + {"42-char []byte with 0x", []byte("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), pipeline.AddressParam(common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")), nil}, + {"41-char []byte with 0x", []byte("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbee"), nil, pipeline.ErrBadInput}, + {"43-char []byte with 0x", []byte("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefd"), nil, pipeline.ErrBadInput}, + {"42-char []byte without 0x", []byte("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefde"), nil, pipeline.ErrBadInput}, + {"40-char []byte without 0x", []byte("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"), nil, pipeline.ErrBadInput}, + + {"42-char string with 0x but wrong characters", "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadzzzz", nil, pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.AddressParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.expected != nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestAddressSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + addr1 := common.HexToAddress("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + addr2 := common.HexToAddress("0xcafebabecafebabecafebabecafebabecafebabe") + expected := pipeline.AddressSliceParam{addr1, addr2} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", "0xcafebabecafebabecafebabecafebabecafebabe" ]`, expected, nil}, + {"[]common.Address", []common.Address{addr1, addr2}, expected, nil}, + {"[]interface{} with common.Address", []interface{}{addr1, addr2}, expected, nil}, + {"[]interface{} with strings", []interface{}{addr1.String(), addr2.String()}, expected, nil}, + {"[]interface{} with []byte", []interface{}{[]byte(addr1.String()), []byte(addr2.String())}, expected, nil}, + {"nil", nil, pipeline.AddressSliceParam(nil), nil}, + + {"bad json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" "0xcafebabecafebabecafebabecafebabecafebabe" ]`, nil, pipeline.ErrBadInput}, + {"[]interface{} with bad types", []interface{}{123, true}, nil, pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.AddressSliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.expected != nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestUint64Param_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // positive + {"string", "123", pipeline.Uint64Param(123), nil}, + {"int", int(123), pipeline.Uint64Param(123), nil}, + {"int8", int8(123), pipeline.Uint64Param(123), nil}, + {"int16", int16(123), pipeline.Uint64Param(123), nil}, + {"int32", int32(123), pipeline.Uint64Param(123), nil}, + {"int64", int64(123), pipeline.Uint64Param(123), nil}, + {"uint", uint(123), pipeline.Uint64Param(123), nil}, + {"uint8", uint8(123), pipeline.Uint64Param(123), nil}, + {"uint16", uint16(123), pipeline.Uint64Param(123), nil}, + {"uint32", uint32(123), pipeline.Uint64Param(123), nil}, + {"uint64", uint64(123), pipeline.Uint64Param(123), nil}, + {"float64", float64(123), pipeline.Uint64Param(123), nil}, + // negative + {"bool", true, pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative int", int(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative int8", int8(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative int16", int16(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative int32", int32(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative int64", int64(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"negative float64", float64(-123), pipeline.Uint64Param(0), pipeline.ErrBadInput}, + {"out of bounds float64", math.MaxFloat64, pipeline.Uint64Param(0), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.Uint64Param + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.err == nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestMaybeUint64Param_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // positive + {"string", "123", pipeline.NewMaybeUint64Param(123, true), nil}, + {"int", int(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int8", int8(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int16", int16(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int32", int32(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"int64", int64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint", uint(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint8", uint8(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint16", uint16(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint32", uint32(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"uint64", uint64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"float64", float64(123), pipeline.NewMaybeUint64Param(123, true), nil}, + {"empty string", "", pipeline.NewMaybeUint64Param(0, false), nil}, + // negative + {"bool", true, pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative int", int(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative int8", int8(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative int16", int16(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative int32", int32(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative int64", int64(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"negative float64", float64(-123), pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + {"out of bounds float64", math.MaxFloat64, pipeline.NewMaybeUint64Param(0, false), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.MaybeUint64Param + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if err == nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestMaybeBigIntParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + fromInt := func(n int64) pipeline.MaybeBigIntParam { + return pipeline.NewMaybeBigIntParam(big.NewInt(n)) + } + + intDecimal := *mustDecimal(t, "123") + floatDecimal := *mustDecimal(t, "123.45") + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // positive + {"string", "123", fromInt(123), nil}, + {"empty string", "", pipeline.NewMaybeBigIntParam(nil), nil}, + {"nil", nil, pipeline.NewMaybeBigIntParam(nil), nil}, + {"*big.Int", big.NewInt(123), fromInt(123), nil}, + {"int", int(123), fromInt(123), nil}, + {"int8", int8(123), fromInt(123), nil}, + {"int16", int16(123), fromInt(123), nil}, + {"int32", int32(123), fromInt(123), nil}, + {"int64", int64(123), fromInt(123), nil}, + {"uint", uint(123), fromInt(123), nil}, + {"uint8", uint8(123), fromInt(123), nil}, + {"uint16", uint16(123), fromInt(123), nil}, + {"uint32", uint32(123), fromInt(123), nil}, + {"uint64", uint64(123), fromInt(123), nil}, + {"float64", float64(123), fromInt(123), nil}, + {"float64", float64(-123), fromInt(-123), nil}, + {"decimal.Decimal", intDecimal, fromInt(123), nil}, + {"*decimal.Decimal", &intDecimal, fromInt(123), nil}, + // negative + {"bool", true, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + {"negative out of bound float64", -math.MaxFloat64, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + {"positive out of bound float64", math.MaxFloat64, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + {"non-integer decimal.Decimal", floatDecimal, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + {"non-integer *decimal.Decimal", &floatDecimal, pipeline.NewMaybeBigIntParam(nil), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.MaybeBigIntParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.err == nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestMaybeInt32Param_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string", "123", pipeline.NewMaybeInt32Param(123, true), nil}, + {"int", int(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int8", int8(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int16", int16(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int32", int32(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"int64", int64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint", uint(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint8", uint8(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint16", uint16(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint32", uint32(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"uint64", uint64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"float64", float64(123), pipeline.NewMaybeInt32Param(123, true), nil}, + {"bool", true, pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"empty string", "", pipeline.NewMaybeInt32Param(0, false), nil}, + {"string overflow", "100000000000", pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"int64 overflow", int64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"negative int64 overflow", -int64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"uint64 overflow", uint64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + {"float overflow", float64(123 << 32), pipeline.NewMaybeInt32Param(0, false), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.MaybeInt32Param + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestBoolParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"string true", "true", pipeline.BoolParam(true), nil}, + {"string false", "false", pipeline.BoolParam(false), nil}, + {"bool true", true, pipeline.BoolParam(true), nil}, + {"bool false", false, pipeline.BoolParam(false), nil}, + {"int", int8(123), pipeline.BoolParam(false), pipeline.ErrBadInput}, + {"*object", mustNewObjectParam(t, true), pipeline.BoolParam(true), nil}, + {"object", *mustNewObjectParam(t, true), pipeline.BoolParam(true), nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.BoolParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestDecimalParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + var nilObjectParam *pipeline.ObjectParam + d := decimal.NewFromFloat(123.45) + dNull := decimal.Decimal{} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // valid + {"string", "123.45", pipeline.DecimalParam(d), nil}, + {"float32", float32(123.45), pipeline.DecimalParam(d), nil}, + {"float64", float64(123.45), pipeline.DecimalParam(d), nil}, + {"object", mustNewObjectParam(t, 123.45), pipeline.DecimalParam(d), nil}, + // invalid + {"bool", false, pipeline.DecimalParam(dNull), pipeline.ErrBadInput}, + {"nil", nil, pipeline.DecimalParam(dNull), pipeline.ErrBadInput}, + {"nil ObjectParam", nilObjectParam, pipeline.DecimalParam(dNull), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.DecimalParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestURLParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + good, err := url.ParseRequestURI("https://chain.link/foo?bar=sergey") + require.NoError(t, err) + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"good", "https://chain.link/foo?bar=sergey", pipeline.URLParam(*good), nil}, + {"bad", "asdlkfjlskdfj", pipeline.URLParam(url.URL{}), pipeline.ErrBadInput}, + {"bool", true, pipeline.URLParam(url.URL{}), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.URLParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestMapParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + var nilObjectParam *pipeline.ObjectParam + + inputStr := ` + { + "chain": {"abc": "def"}, + "link": { + "123": "satoshi", + "sergey": "def" + } + }` + + inputMap := map[string]interface{}{ + "chain": map[string]interface{}{ + "abc": "def", + }, + "link": map[string]interface{}{ + "sergey": "def", + "123": "satoshi", + }, + } + + expected := pipeline.MapParam{ + "chain": map[string]interface{}{ + "abc": "def", + }, + "link": map[string]interface{}{ + "sergey": "def", + "123": "satoshi", + }, + } + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + // valid + {"from string", inputStr, expected, nil}, + {"from []byte", []byte(inputStr), expected, nil}, + {"from map", inputMap, expected, nil}, + {"from nil", nil, pipeline.MapParam(nil), nil}, + {"from *object", mustNewObjectParam(t, inputMap), expected, nil}, + {"from object", *mustNewObjectParam(t, inputMap), expected, nil}, + // invalid + {"wrong type", 123, pipeline.MapParam(nil), pipeline.ErrBadInput}, + {"nil ObjectParam", nilObjectParam, pipeline.MapParam(nil), pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.MapParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"[]interface{}", []interface{}{1, 2, 3}, pipeline.SliceParam([]interface{}{1, 2, 3}), nil}, + {"[]byte", []byte(`[1, 2, 3]`), pipeline.SliceParam([]interface{}{float64(1), float64(2), float64(3)}), nil}, + {"string", `[1, 2, 3]`, pipeline.SliceParam([]interface{}{float64(1), float64(2), float64(3)}), nil}, + {"bool", true, pipeline.SliceParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.SliceParam(nil), nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.SliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestHashSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + hash1 := common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + hash2 := common.HexToHash("0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef") + expected := pipeline.HashSliceParam{hash1, hash2} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", "0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef" ]`, expected, nil}, + {"[]common.Hash", []common.Hash{hash1, hash2}, expected, nil}, + {"[]interface{} with common.Hash", []interface{}{hash1, hash2}, expected, nil}, + {"[]interface{} with strings", []interface{}{hash1.String(), hash2.String()}, expected, nil}, + {"[]interface{} with []byte", []interface{}{[]byte(hash1.String()), []byte(hash2.String())}, expected, nil}, + {"nil", nil, pipeline.HashSliceParam(nil), nil}, + {"bad json", `[ "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" "0xcafebabecafebabecafebabecafebabecafebabedeadbeefdeadbeefdeadbeef" ]`, nil, pipeline.ErrBadInput}, + {"[]interface{} with bad types", []interface{}{123, true}, nil, pipeline.ErrBadInput}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.HashSliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + if test.expected != nil { + require.Equal(t, test.expected, p) + } + }) + } +} + +func TestSliceParam_FilterErrors(t *testing.T) { + t.Parallel() + + s := pipeline.SliceParam{"foo", errors.New("bar"), "baz"} + vals, n := s.FilterErrors() + require.Equal(t, 1, n) + require.Equal(t, pipeline.SliceParam{"foo", "baz"}, vals) +} + +func TestDecimalSliceParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + expected := pipeline.DecimalSliceParam{*mustDecimal(t, "1.1"), *mustDecimal(t, "2.2"), *mustDecimal(t, "3.3")} + decimalsSlice := []decimal.Decimal{*mustDecimal(t, "1.1"), *mustDecimal(t, "2.2"), *mustDecimal(t, "3.3")} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"[]interface{}", []interface{}{1.1, "2.2", *mustDecimal(t, "3.3")}, expected, nil}, + {"string", `[1.1, "2.2", 3.3]`, expected, nil}, + {"[]byte", `[1.1, "2.2", 3.3]`, expected, nil}, + {"[]interface{} with error", `[1.1, true, "abc"]`, pipeline.DecimalSliceParam(nil), pipeline.ErrBadInput}, + {"bool", true, pipeline.DecimalSliceParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.DecimalSliceParam(nil), nil}, + {"[]decimal.Decimal", decimalsSlice, expected, nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.DecimalSliceParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestJSONPathParam_UnmarshalPipelineParam(t *testing.T) { + t.Parallel() + + expected := pipeline.JSONPathParam{"1.1", "2.2", "3.3", "sergey"} + + tests := []struct { + name string + input interface{} + expected interface{} + err error + }{ + {"[]interface{}", []interface{}{"1.1", "2.2", "3.3", "sergey"}, expected, nil}, + {"string", `1.1,2.2,3.3,sergey`, expected, nil}, + {"[]byte", []byte(`1.1,2.2,3.3,sergey`), expected, nil}, + {"bool", true, pipeline.JSONPathParam(nil), pipeline.ErrBadInput}, + {"nil", nil, pipeline.JSONPathParam(nil), nil}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var p pipeline.JSONPathParam + err := p.UnmarshalPipelineParam(test.input) + require.Equal(t, test.err, errors.Cause(err)) + require.Equal(t, test.expected, p) + }) + } +} + +func TestResolveValue(t *testing.T) { + t.Parallel() + + t.Run("calls getters in order until the first one that returns without ErrParameterEmpty", func(t *testing.T) { + param := mocks.NewPipelineParamUnmarshaler(t) + param.On("UnmarshalPipelineParam", mock.Anything).Return(nil) + + called := []int{} + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + called = append(called, 0) + return nil, errors.Wrap(pipeline.ErrParameterEmpty, "make sure it still notices when wrapped") + }, + func() (interface{}, error) { + called = append(called, 1) + return 123, nil + }, + func() (interface{}, error) { + called = append(called, 2) + return 123, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.NoError(t, err) + require.Equal(t, []int{0, 1}, called) + }) + + t.Run("returns any GetterFunc error that isn't ErrParameterEmpty", func(t *testing.T) { + param := mocks.NewPipelineParamUnmarshaler(t) + called := []int{} + expectedErr := errors.New("some other issue") + + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + called = append(called, 0) + return nil, expectedErr + }, + func() (interface{}, error) { + called = append(called, 1) + return 123, nil + }, + func() (interface{}, error) { + called = append(called, 2) + return 123, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.Equal(t, expectedErr, err) + require.Equal(t, []int{0}, called) + }) + + t.Run("calls UnmarshalPipelineParam with the value obtained from the GetterFuncs", func(t *testing.T) { + expectedValue := 123 + + param := mocks.NewPipelineParamUnmarshaler(t) + param.On("UnmarshalPipelineParam", expectedValue).Return(nil) + + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + return expectedValue, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.NoError(t, err) + }) + + t.Run("returns any error returned by UnmarshalPipelineParam", func(t *testing.T) { + expectedValue := 123 + expectedErr := errors.New("some issue") + + param := mocks.NewPipelineParamUnmarshaler(t) + param.On("UnmarshalPipelineParam", expectedValue).Return(expectedErr) + + getters := []pipeline.GetterFunc{ + func() (interface{}, error) { + return expectedValue, nil + }, + } + + err := pipeline.ResolveParam(param, getters) + require.Equal(t, expectedErr, err) + }) +} diff --git a/core/services/pipeline/test_helpers_test.go b/core/services/pipeline/test_helpers_test.go new file mode 100644 index 00000000..4c8e602d --- /dev/null +++ b/core/services/pipeline/test_helpers_test.go @@ -0,0 +1,61 @@ +package pipeline_test + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + "github.com/jmoiron/sqlx" +) + +func fakeExternalAdapter(t *testing.T, expectedRequest, response interface{}) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Helper() + + defer r.Body.Close() + + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + + expectedBody := &bytes.Buffer{} + err = json.NewEncoder(expectedBody).Encode(expectedRequest) + require.NoError(t, err) + require.Equal(t, string(bytes.TrimSpace(expectedBody.Bytes())), string(body)) + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(response) + require.NoError(t, err) + }) +} + +func makeBridge(t *testing.T, db *sqlx.DB, expectedRequest, response interface{}, cfg pg.QConfig) (*httptest.Server, bridges.BridgeType) { + t.Helper() + + server := httptest.NewServer(fakeExternalAdapter(t, expectedRequest, response)) + + bridgeFeedURL, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + _, bt := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{URL: bridgeFeedURL.String()}, cfg) + + return server, *bt +} + +func mustNewObjectParam(t *testing.T, val interface{}) *pipeline.ObjectParam { + var value pipeline.ObjectParam + if err := value.UnmarshalPipelineParam(val); err != nil { + t.Fatalf("failed to init ObjectParam from %v, err: %v", val, err) + } + return &value +} diff --git a/core/services/pipeline/variables.go b/core/services/pipeline/variables.go new file mode 100644 index 00000000..aaaa0e3f --- /dev/null +++ b/core/services/pipeline/variables.go @@ -0,0 +1,94 @@ +package pipeline + +import ( + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +var ( + ErrKeypathNotFound = errors.New("keypath not found") + ErrVarsRoot = errors.New("cannot get/set the root of a pipeline.Vars") + ErrVarsSetNested = errors.New("cannot set a nested key of a pipeline.Vars") + + variableRegexp = regexp.MustCompile(`\$\(\s*([a-zA-Z0-9_\.]+)\s*\)`) +) + +type Vars struct { + vars map[string]interface{} +} + +// NewVarsFrom creates new Vars from the given map. +// If the map is nil, a new map instance will be created. +func NewVarsFrom(m map[string]interface{}) Vars { + if m == nil { + m = make(map[string]interface{}) + } + return Vars{vars: m} +} + +// Get returns the value for the given keypath or error. +// The keypath can consist of one or more parts, e.g. "foo" or "foo.6.a.b". +// Every part except for the first one can be an index of a slice. +func (vars Vars) Get(keypathStr string) (interface{}, error) { + keypathStr = strings.TrimSpace(keypathStr) + keypath, err := NewKeypathFromString(keypathStr) + if err != nil { + return nil, err + } + if len(keypath.Parts) == 0 { + return nil, ErrVarsRoot + } + + var exists bool + var currVal interface{} = vars.vars + for i, part := range keypath.Parts { + switch v := currVal.(type) { + case map[string]interface{}: + currVal, exists = v[part] + if !exists { + return nil, errors.Wrapf(ErrKeypathNotFound, "key %v (segment %v in keypath %v)", part, i, keypathStr) + } + case []interface{}: + idx, err := strconv.ParseInt(part, 10, 64) + if err != nil { + return nil, errors.Wrapf(ErrKeypathNotFound, "could not parse key as integer: %v", err) + } else if idx < 0 || idx > int64(len(v)-1) { + return nil, errors.Wrapf(ErrIndexOutOfRange, "index %v out of range (segment %v of length %v in keypath %v)", idx, i, len(v), keypathStr) + } + currVal = v[idx] + default: + return nil, errors.Wrapf(ErrKeypathNotFound, "value at key '%v' is a %T, not a map or slice", part, currVal) + } + } + + return currVal, nil +} + +// Set sets a top-level variable specified by dotID. +// Returns error if either dotID is empty or it is a compound keypath. +func (vars Vars) Set(dotID string, value interface{}) error { + dotID = strings.TrimSpace(dotID) + if len(dotID) == 0 { + return ErrVarsRoot + } else if strings.Contains(dotID, KeypathSeparator) { + return errors.Wrapf(ErrVarsSetNested, "%s", dotID) + } + + vars.vars[dotID] = value + + return nil +} + +// Copy makes a copy of Vars by copying the underlying map. +// Used by scheduler for new tasks to avoid data races. +func (vars Vars) Copy() Vars { + newVars := make(map[string]interface{}) + // No need to copy recursively, because only the top-level map is mutable (see Set()). + for k, v := range vars.vars { + newVars[k] = v + } + return NewVarsFrom(newVars) +} diff --git a/core/services/pipeline/variables_test.go b/core/services/pipeline/variables_test.go new file mode 100644 index 00000000..8e2f3c8d --- /dev/null +++ b/core/services/pipeline/variables_test.go @@ -0,0 +1,138 @@ +package pipeline_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +func TestVars_Set(t *testing.T) { + t.Parallel() + + vars := pipeline.NewVarsFrom(nil) + + err := vars.Set("xyz", "foo") + require.NoError(t, err) + v, err := vars.Get("xyz") + require.NoError(t, err) + require.Equal(t, "foo", v) + + err = vars.Set(" ", "foo") + require.ErrorIs(t, err, pipeline.ErrVarsRoot) + + err = vars.Set("x.y", "foo") + require.ErrorIs(t, err, pipeline.ErrVarsSetNested) +} + +func TestVars_Get(t *testing.T) { + t.Parallel() + + t.Run("gets the values at keypaths that exist", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{1, "bar", false}, + "bar": 321, + }) + + got, err := vars.Get("foo.1") + require.NoError(t, err) + require.Equal(t, "bar", got) + + got, err = vars.Get("bar") + require.NoError(t, err) + require.Equal(t, 321, got) + }) + + t.Run("gets the value for a keypath with more than 2 parts", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": map[string]interface{}{ + "plugin": 123, + }, + }, + }) + got, err := vars.Get("foo.bar.plugin") + require.NoError(t, err) + require.Equal(t, 123, got) + }) + + t.Run("gets the value with indices in the keypath", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{ + nil, + map[string]interface{}{ + "plugin": 456, + }, + }, + }) + got, err := vars.Get("foo.1.plugin") + require.NoError(t, err) + require.Equal(t, 456, got) + }) + + t.Run("errors when getting the values at keypaths that don't exist", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{1, "bar", false}, + "bar": 321, + }) + + _, err := vars.Get("foo.blah") + require.Equal(t, pipeline.ErrKeypathNotFound, errors.Cause(err)) + }) + + t.Run("errors when getting a value at a keypath where the first part is not a map/slice", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": 123, + }) + _, err := vars.Get("foo.bar") + require.Equal(t, pipeline.ErrKeypathNotFound, errors.Cause(err)) + }) + + t.Run("errors when getting a value at a keypath where the second part is not a map/slice", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": 123, + }, + }) + _, err := vars.Get("foo.bar.baz") + require.Equal(t, pipeline.ErrKeypathNotFound, errors.Cause(err)) + }) + + t.Run("errors when using a keypath with empty segments", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": 123, + }, + }) + _, err := vars.Get("foo..bar") + require.Equal(t, pipeline.ErrWrongKeypath, errors.Cause(err)) + }) + + t.Run("index out of range", func(t *testing.T) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "foo": []interface{}{1, "bar", false}, + }) + + _, err := vars.Get("foo.4") + require.ErrorIs(t, err, pipeline.ErrIndexOutOfRange) + + _, err = vars.Get("foo.-1") + require.ErrorIs(t, err, pipeline.ErrIndexOutOfRange) + }) +} + +func TestVars_Copy(t *testing.T) { + t.Parallel() + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "nested": map[string]interface{}{ + "foo": "zet", + }, + "bar": 321, + }) + + varsCopy := vars.Copy() + require.Equal(t, vars, varsCopy) +} diff --git a/core/services/promreporter/prom_reporter.go b/core/services/promreporter/prom_reporter.go new file mode 100644 index 00000000..6833ad63 --- /dev/null +++ b/core/services/promreporter/prom_reporter.go @@ -0,0 +1,274 @@ +package promreporter + +import ( + "context" + "database/sql" + "fmt" + "math/big" + "sync" + "time" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +//go:generate mockery --quiet --name PrometheusBackend --output ../../internal/mocks/ --case=underscore +type ( + promReporter struct { + services.StateMachine + db *sql.DB + chains legacyevm.LegacyChainContainer + lggr logger.Logger + backend PrometheusBackend + newHeads *mailbox.Mailbox[*evmtypes.Head] + chStop services.StopChan + wgDone sync.WaitGroup + reportPeriod time.Duration + } + + PrometheusBackend interface { + SetUnconfirmedTransactions(*big.Int, int64) + SetMaxUnconfirmedAge(*big.Int, float64) + SetMaxUnconfirmedBlocks(*big.Int, int64) + SetPipelineRunsQueued(n int) + SetPipelineTaskRunsQueued(n int) + } + + defaultBackend struct{} +) + +var ( + promUnconfirmedTransactions = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "unconfirmed_transactions", + Help: "Number of currently unconfirmed transactions", + }, []string{"evmChainID"}) + promMaxUnconfirmedAge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "max_unconfirmed_tx_age", + Help: "The length of time the oldest unconfirmed transaction has been in that state (in seconds). Will be 0 if there are no unconfirmed transactions.", + }, []string{"evmChainID"}) + promMaxUnconfirmedBlocks = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "max_unconfirmed_blocks", + Help: "The max number of blocks any currently unconfirmed transaction has been unconfirmed for", + }, []string{"evmChainID"}) + promPipelineRunsQueued = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "pipeline_runs_queued", + Help: "The total number of pipeline runs that are awaiting execution", + }) + promPipelineTaskRunsQueued = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "pipeline_task_runs_queued", + Help: "The total number of pipeline task runs that are awaiting execution", + }) +) + +func (defaultBackend) SetUnconfirmedTransactions(evmChainID *big.Int, n int64) { + promUnconfirmedTransactions.WithLabelValues(evmChainID.String()).Set(float64(n)) +} + +func (defaultBackend) SetMaxUnconfirmedAge(evmChainID *big.Int, s float64) { + promMaxUnconfirmedAge.WithLabelValues(evmChainID.String()).Set(s) +} + +func (defaultBackend) SetMaxUnconfirmedBlocks(evmChainID *big.Int, n int64) { + promMaxUnconfirmedBlocks.WithLabelValues(evmChainID.String()).Set(float64(n)) +} + +func (defaultBackend) SetPipelineRunsQueued(n int) { + promPipelineTaskRunsQueued.Set(float64(n)) +} + +func (defaultBackend) SetPipelineTaskRunsQueued(n int) { + promPipelineRunsQueued.Set(float64(n)) +} + +func NewPromReporter(db *sql.DB, chainContainer legacyevm.LegacyChainContainer, lggr logger.Logger, opts ...interface{}) *promReporter { + var backend PrometheusBackend = defaultBackend{} + period := 15 * time.Second + for _, opt := range opts { + switch v := opt.(type) { + case time.Duration: + period = v + case PrometheusBackend: + backend = v + } + } + + chStop := make(chan struct{}) + return &promReporter{ + db: db, + chains: chainContainer, + lggr: lggr.Named("PromReporter"), + backend: backend, + newHeads: mailbox.NewSingle[*evmtypes.Head](), + chStop: chStop, + reportPeriod: period, + } +} + +// Start starts PromReporter. +func (pr *promReporter) Start(context.Context) error { + return pr.StartOnce("PromReporter", func() error { + pr.wgDone.Add(1) + go pr.eventLoop() + return nil + }) +} + +func (pr *promReporter) Close() error { + return pr.StopOnce("PromReporter", func() error { + close(pr.chStop) + pr.wgDone.Wait() + return nil + }) +} +func (pr *promReporter) Name() string { + return pr.lggr.Name() +} + +func (pr *promReporter) HealthReport() map[string]error { + return map[string]error{pr.Name(): pr.Healthy()} +} + +func (pr *promReporter) OnNewLongestChain(ctx context.Context, head *evmtypes.Head) { + pr.newHeads.Deliver(head) +} + +func (pr *promReporter) eventLoop() { + pr.lggr.Debug("Starting event loop") + defer pr.wgDone.Done() + ctx, cancel := pr.chStop.NewCtx() + defer cancel() + for { + select { + case <-pr.newHeads.Notify(): + head, exists := pr.newHeads.Retrieve() + if !exists { + continue + } + pr.reportHeadMetrics(ctx, head) + case <-time.After(pr.reportPeriod): + if err := errors.Wrap(pr.reportPipelineRunStats(ctx), "reportPipelineRunStats failed"); err != nil { + pr.lggr.Errorw("Error reporting prometheus metrics", "err", err) + } + + case <-pr.chStop: + return + } + } +} + +func (pr *promReporter) getTxm(evmChainID *big.Int) (txmgr.TxManager, error) { + chain, err := pr.chains.Get(evmChainID.String()) + if err != nil { + return nil, fmt.Errorf("failed to get chain: %w", err) + } + return chain.TxManager(), nil +} + +func (pr *promReporter) reportHeadMetrics(ctx context.Context, head *evmtypes.Head) { + evmChainID := head.EVMChainID.ToInt() + err := multierr.Combine( + errors.Wrap(pr.reportPendingEthTxes(ctx, evmChainID), "reportPendingEthTxes failed"), + errors.Wrap(pr.reportMaxUnconfirmedAge(ctx, evmChainID), "reportMaxUnconfirmedAge failed"), + errors.Wrap(pr.reportMaxUnconfirmedBlocks(ctx, head), "reportMaxUnconfirmedBlocks failed"), + ) + + if err != nil && ctx.Err() == nil { + pr.lggr.Errorw("Error reporting prometheus metrics", "err", err) + } +} + +func (pr *promReporter) reportPendingEthTxes(ctx context.Context, evmChainID *big.Int) (err error) { + txm, err := pr.getTxm(evmChainID) + if err != nil { + return fmt.Errorf("failed to get txm: %w", err) + } + + unconfirmed, err := txm.CountTransactionsByState(ctx, txmgrcommon.TxUnconfirmed) + if err != nil { + return fmt.Errorf("failed to query for unconfirmed eth_tx count: %w", err) + } + pr.backend.SetUnconfirmedTransactions(evmChainID, int64(unconfirmed)) + return nil +} + +func (pr *promReporter) reportMaxUnconfirmedAge(ctx context.Context, evmChainID *big.Int) (err error) { + txm, err := pr.getTxm(evmChainID) + if err != nil { + return fmt.Errorf("failed to get txm: %w", err) + } + + broadcastAt, err := txm.FindEarliestUnconfirmedBroadcastTime(ctx) + if err != nil { + return fmt.Errorf("failed to query for min broadcast time: %w", err) + } + + var seconds float64 + if broadcastAt.Valid { + seconds = time.Since(broadcastAt.ValueOrZero()).Seconds() + } + pr.backend.SetMaxUnconfirmedAge(evmChainID, seconds) + return nil +} + +func (pr *promReporter) reportMaxUnconfirmedBlocks(ctx context.Context, head *evmtypes.Head) (err error) { + txm, err := pr.getTxm(head.EVMChainID.ToInt()) + if err != nil { + return fmt.Errorf("failed to get txm: %w", err) + } + + earliestUnconfirmedTxBlock, err := txm.FindEarliestUnconfirmedTxAttemptBlock(ctx) + if err != nil { + return fmt.Errorf("failed to query for earliest unconfirmed tx block: %w", err) + } + + var blocksUnconfirmed int64 + if !earliestUnconfirmedTxBlock.IsZero() { + blocksUnconfirmed = head.Number - earliestUnconfirmedTxBlock.ValueOrZero() + } + pr.backend.SetMaxUnconfirmedBlocks(head.EVMChainID.ToInt(), blocksUnconfirmed) + return nil +} + +func (pr *promReporter) reportPipelineRunStats(ctx context.Context) (err error) { + rows, err := pr.db.QueryContext(ctx, ` +SELECT pipeline_run_id FROM pipeline_task_runs WHERE finished_at IS NULL +`) + if err != nil { + return errors.Wrap(err, "failed to query for pipeline_run_id") + } + defer func() { + err = multierr.Combine(err, rows.Close()) + }() + + pipelineTaskRunsQueued := 0 + pipelineRunsQueuedSet := make(map[int32]struct{}) + for rows.Next() { + var pipelineRunID int32 + if err = rows.Scan(&pipelineRunID); err != nil { + return errors.Wrap(err, "unexpected error scanning row") + } + pipelineTaskRunsQueued++ + pipelineRunsQueuedSet[pipelineRunID] = struct{}{} + } + if err = rows.Err(); err != nil { + return err + } + pipelineRunsQueued := len(pipelineRunsQueuedSet) + + pr.backend.SetPipelineTaskRunsQueued(pipelineTaskRunsQueued) + pr.backend.SetPipelineRunsQueued(pipelineRunsQueued) + + return nil +} diff --git a/core/services/promreporter/prom_reporter_test.go b/core/services/promreporter/prom_reporter_test.go new file mode 100644 index 00000000..9b1ba364 --- /dev/null +++ b/core/services/promreporter/prom_reporter_test.go @@ -0,0 +1,152 @@ +package promreporter_test + +import ( + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/promreporter" +) + +func newHead() evmtypes.Head { + return evmtypes.Head{Number: 42, EVMChainID: ubig.NewI(0)} +} + +func newLegacyChainContainer(t *testing.T, db *sqlx.DB) legacyevm.LegacyChainContainer { + config, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + keyStore := cltest.NewKeyStore(t, db, dbConfig).Eth() + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + estimator := gas.NewEstimator(logger.TestLogger(t), ethClient, config, evmConfig.GasEstimator()) + lggr := logger.TestLogger(t) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) + + txm, err := txmgr.NewTxm( + db, + evmConfig, + evmConfig.GasEstimator(), + evmConfig.Transactions(), + dbConfig, + dbConfig.Listener(), + ethClient, + lggr, + lp, + keyStore, + estimator) + require.NoError(t, err) + + cfg := configtest.NewGeneralConfig(t, nil) + return cltest.NewLegacyChainsWithMockChainAndTxManager(t, ethClient, cfg, txm) +} + +func Test_PromReporter_OnNewLongestChain(t *testing.T) { + t.Run("with nothing in the database", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + backend := mocks.NewPrometheusBackend(t) + reporter := promreporter.NewPromReporter(db.DB, newLegacyChainContainer(t, db), logger.TestLogger(t), backend, 10*time.Millisecond) + + var subscribeCalls atomic.Int32 + + backend.On("SetUnconfirmedTransactions", big.NewInt(0), int64(0)).Return() + backend.On("SetMaxUnconfirmedAge", big.NewInt(0), float64(0)).Return() + backend.On("SetMaxUnconfirmedBlocks", big.NewInt(0), int64(0)).Return() + backend.On("SetPipelineTaskRunsQueued", 0).Return() + backend.On("SetPipelineRunsQueued", 0). + Run(func(args mock.Arguments) { + subscribeCalls.Add(1) + }). + Return() + + servicetest.Run(t, reporter) + + head := newHead() + reporter.OnNewLongestChain(testutils.Context(t), &head) + + require.Eventually(t, func() bool { return subscribeCalls.Load() >= 1 }, 12*time.Second, 100*time.Millisecond) + }) + + t.Run("with unconfirmed evm.txes", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + txStore := cltest.NewTestTxStore(t, db, cfg.Database()) + ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth() + _, fromAddress := cltest.MustInsertRandomKey(t, ethKeyStore) + + var subscribeCalls atomic.Int32 + + backend := mocks.NewPrometheusBackend(t) + backend.On("SetUnconfirmedTransactions", big.NewInt(0), int64(3)).Return() + backend.On("SetMaxUnconfirmedAge", big.NewInt(0), mock.MatchedBy(func(s float64) bool { + return s > 0 + })).Return() + backend.On("SetMaxUnconfirmedBlocks", big.NewInt(0), int64(35)).Return() + backend.On("SetPipelineTaskRunsQueued", 0).Return() + backend.On("SetPipelineRunsQueued", 0). + Run(func(args mock.Arguments) { + subscribeCalls.Add(1) + }). + Return() + reporter := promreporter.NewPromReporter(db.DB, newLegacyChainContainer(t, db), logger.TestLogger(t), backend, 10*time.Millisecond) + servicetest.Run(t, reporter) + + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 0, fromAddress) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, fromAddress) + cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 2, fromAddress) + require.NoError(t, txStore.UpdateTxAttemptBroadcastBeforeBlockNum(testutils.Context(t), etx.ID, 7)) + + head := newHead() + reporter.OnNewLongestChain(testutils.Context(t), &head) + + require.Eventually(t, func() bool { return subscribeCalls.Load() >= 1 }, 12*time.Second, 100*time.Millisecond) + }) + + t.Run("with unfinished pipeline task runs", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + pgtest.MustExec(t, db, `SET CONSTRAINTS pipeline_task_runs_pipeline_run_id_fkey DEFERRED`) + + backend := mocks.NewPrometheusBackend(t) + reporter := promreporter.NewPromReporter(db.DB, newLegacyChainContainer(t, db), logger.TestLogger(t), backend, 10*time.Millisecond) + + cltest.MustInsertUnfinishedPipelineTaskRun(t, db, 1) + cltest.MustInsertUnfinishedPipelineTaskRun(t, db, 1) + cltest.MustInsertUnfinishedPipelineTaskRun(t, db, 2) + + var subscribeCalls atomic.Int32 + + backend.On("SetUnconfirmedTransactions", big.NewInt(0), int64(0)).Return() + backend.On("SetMaxUnconfirmedAge", big.NewInt(0), float64(0)).Return() + backend.On("SetMaxUnconfirmedBlocks", big.NewInt(0), int64(0)).Return() + backend.On("SetPipelineTaskRunsQueued", 3).Return() + backend.On("SetPipelineRunsQueued", 2). + Run(func(args mock.Arguments) { + subscribeCalls.Add(1) + }). + Return() + servicetest.Run(t, reporter) + + head := newHead() + reporter.OnNewLongestChain(testutils.Context(t), &head) + + require.Eventually(t, func() bool { return subscribeCalls.Load() >= 1 }, 12*time.Second, 100*time.Millisecond) + }) +} diff --git a/core/services/relay/evm/address.go b/core/services/relay/evm/address.go new file mode 100644 index 00000000..8c575989 --- /dev/null +++ b/core/services/relay/evm/address.go @@ -0,0 +1,34 @@ +package evm + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +func AccountToAddress(accounts []types.Account) (addresses []common.Address, err error) { + for _, signer := range accounts { + bytes, err := hexutil.Decode(string(signer)) + if err != nil { + return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer)) + } + if len(bytes) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(bytes)) + } + return addresses, nil +} + +func OnchainPublicKeyToAddress(publicKeys []types.OnchainPublicKey) (addresses []common.Address, err error) { + for _, signer := range publicKeys { + if len(signer) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(signer)) + } + return addresses, nil +} diff --git a/core/services/relay/evm/binding.go b/core/services/relay/evm/binding.go new file mode 100644 index 00000000..96e91f6e --- /dev/null +++ b/core/services/relay/evm/binding.go @@ -0,0 +1,15 @@ +package evm + +import ( + "context" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +type readBinding interface { + GetLatestValue(ctx context.Context, params, returnVal any) error + Bind(binding commontypes.BoundContract) error + SetCodec(codec commontypes.RemoteCodec) + Register() error + Unregister() error +} diff --git a/core/services/relay/evm/bindings.go b/core/services/relay/evm/bindings.go new file mode 100644 index 00000000..50346108 --- /dev/null +++ b/core/services/relay/evm/bindings.go @@ -0,0 +1,61 @@ +package evm + +import ( + "fmt" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +// key is contract name +type contractBindings map[string]readBindings + +// key is read name +type readBindings map[string]readBinding + +func (b contractBindings) GetReadBinding(contractName, readName string) (readBinding, error) { + rb, rbExists := b[contractName] + if !rbExists { + return nil, fmt.Errorf("%w: no contract named %s", commontypes.ErrInvalidType, contractName) + } + + reader, readerExists := rb[readName] + if !readerExists { + return nil, fmt.Errorf("%w: no readName named %s in contract %s", commontypes.ErrInvalidType, readName, contractName) + } + return reader, nil +} + +func (b contractBindings) AddReadBinding(contractName, readName string, reader readBinding) { + rbs, rbsExists := b[contractName] + if !rbsExists { + rbs = readBindings{} + b[contractName] = rbs + } + rbs[readName] = reader +} + +func (b contractBindings) Bind(boundContracts []commontypes.BoundContract) error { + for _, bc := range boundContracts { + rbs, rbsExist := b[bc.Name] + if !rbsExist { + return fmt.Errorf("%w: no contract named %s", commontypes.ErrInvalidConfig, bc.Name) + } + for _, r := range rbs { + if err := r.Bind(bc); err != nil { + return err + } + } + } + return nil +} + +func (b contractBindings) ForEach(fn func(readBinding) error) error { + for _, rbs := range b { + for _, rb := range rbs { + if err := fn(rb); err != nil { + return err + } + } + } + return nil +} diff --git a/core/services/relay/evm/cap_encoder.go b/core/services/relay/evm/cap_encoder.go new file mode 100644 index 00000000..c3a67301 --- /dev/null +++ b/core/services/relay/evm/cap_encoder.go @@ -0,0 +1,97 @@ +package evm + +import ( + "context" + "encoding/json" + "fmt" + + consensustypes "github.com/goplugin/plugin-common/pkg/capabilities/consensus/ocr3/types" + commontypes "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/values" + abiutil "github.com/goplugin/pluginv3.0/v2/core/chains/evm/abi" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +const ( + abiConfigFieldName = "abi" + encoderName = "user" + idLen = 32 +) + +type capEncoder struct { + codec commontypes.RemoteCodec +} + +var _ consensustypes.Encoder = (*capEncoder)(nil) + +func NewEVMEncoder(config *values.Map) (consensustypes.Encoder, error) { + // parse the "inner" encoder config - user-defined fields + wrappedSelector, err := config.Underlying[abiConfigFieldName].Unwrap() + if err != nil { + return nil, err + } + selectorStr, ok := wrappedSelector.(string) + if !ok { + return nil, fmt.Errorf("expected %s to be a string", abiConfigFieldName) + } + selector, err := abiutil.ParseSignature("inner(" + selectorStr + ")") + if err != nil { + return nil, err + } + jsonSelector, err := json.Marshal(selector.Inputs) + if err != nil { + return nil, err + } + + codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{ + encoderName: {TypeABI: string(jsonSelector)}, + }} + c, err := NewCodec(codecConfig) + if err != nil { + return nil, err + } + + return &capEncoder{codec: c}, nil +} + +func (c *capEncoder) Encode(ctx context.Context, input values.Map) ([]byte, error) { + unwrappedInput, err := input.Unwrap() + if err != nil { + return nil, err + } + unwrappedMap, ok := unwrappedInput.(map[string]any) + if !ok { + return nil, fmt.Errorf("expected unwrapped input to be a map") + } + userPayload, err := c.codec.Encode(ctx, unwrappedMap, encoderName) + if err != nil { + return nil, err + } + // prepend workflowID and workflowExecutionID to the encoded user data + workflowIDbytes, executionIDBytes, err := extractIDs(unwrappedMap) + if err != nil { + return nil, err + } + return append(append(workflowIDbytes, executionIDBytes...), userPayload...), nil +} + +// extract workflowID and executionID from the input map, validate and align to 32 bytes +// NOTE: consider requiring them to be exactly 32 bytes to avoid issues with padding +func extractIDs(input map[string]any) ([]byte, []byte, error) { + workflowID, ok := input[consensustypes.WorkflowIDFieldName].(string) + if !ok { + return nil, nil, fmt.Errorf("expected %s to be a string", consensustypes.WorkflowIDFieldName) + } + executionID, ok := input[consensustypes.ExecutionIDFieldName].(string) + if !ok { + return nil, nil, fmt.Errorf("expected %s to be a string", consensustypes.ExecutionIDFieldName) + } + if len(workflowID) > 32 || len(executionID) > 32 { + return nil, nil, fmt.Errorf("IDs too long: %d, %d", len(workflowID), len(executionID)) + } + alignedWorkflowID := make([]byte, idLen) + copy(alignedWorkflowID, workflowID) + alignedExecutionID := make([]byte, idLen) + copy(alignedExecutionID, executionID) + return alignedWorkflowID, alignedExecutionID, nil +} diff --git a/core/services/relay/evm/cap_encoder_test.go b/core/services/relay/evm/cap_encoder_test.go new file mode 100644 index 00000000..cebdfd73 --- /dev/null +++ b/core/services/relay/evm/cap_encoder_test.go @@ -0,0 +1,58 @@ +package evm_test + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + + consensustypes "github.com/goplugin/plugin-common/pkg/capabilities/consensus/ocr3/types" + "github.com/goplugin/plugin-common/pkg/values" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +var ( + reportA = []byte{0x01, 0x02, 0x03} + reportB = []byte{0xaa, 0xbb, 0xcc, 0xdd} + workflowID = "my_id" + executionID = "my_execution_id" +) + +func TestEVMEncoder(t *testing.T) { + config := map[string]any{ + "abi": "mercury_reports bytes[]", + } + wrapped, err := values.NewMap(config) + require.NoError(t, err) + enc, err := evm.NewEVMEncoder(wrapped) + require.NoError(t, err) + + // output of a DF2.0 aggregator + metadata fields appended by OCR + input := map[string]any{ + "mercury_reports": []any{reportA, reportB}, + consensustypes.WorkflowIDFieldName: workflowID, + consensustypes.ExecutionIDFieldName: executionID, + } + wrapped, err = values.NewMap(input) + require.NoError(t, err) + encoded, err := enc.Encode(testutils.Context(t), *wrapped) + require.NoError(t, err) + + expected := + // start of the outer tuple ((user_fields), workflow_id, workflow_execution_id) + "6d795f6964000000000000000000000000000000000000000000000000000000" + // workflow ID + "6d795f657865637574696f6e5f69640000000000000000000000000000000000" + // execution ID + // start of the inner tuple (user_fields) + "0000000000000000000000000000000000000000000000000000000000000020" + // offset of mercury_reports array + "0000000000000000000000000000000000000000000000000000000000000002" + // length of mercury_reports array + "0000000000000000000000000000000000000000000000000000000000000040" + // offset of reportA + "0000000000000000000000000000000000000000000000000000000000000080" + // offset of reportB + "0000000000000000000000000000000000000000000000000000000000000003" + // length of reportA + "0102030000000000000000000000000000000000000000000000000000000000" + // reportA + "0000000000000000000000000000000000000000000000000000000000000004" + // length of reportB + "aabbccdd00000000000000000000000000000000000000000000000000000000" // reportB + // end of the inner tuple (user_fields) + + require.Equal(t, expected, hex.EncodeToString(encoded)) +} diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go new file mode 100644 index 00000000..06ad7d80 --- /dev/null +++ b/core/services/relay/evm/chain_reader.go @@ -0,0 +1,292 @@ +package evm + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/google/uuid" + + "github.com/goplugin/plugin-common/pkg/codec" + + commonservices "github.com/goplugin/plugin-common/pkg/services" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type ChainReaderService interface { + services.ServiceCtx + commontypes.ChainReader +} + +type chainReader struct { + lggr logger.Logger + lp logpoller.LogPoller + client evmclient.Client + contractBindings contractBindings + parsed *parsedTypes + codec commontypes.RemoteCodec + commonservices.StateMachine +} + +// NewChainReaderService is a constructor for ChainReader, returns nil if there is any error +func NewChainReaderService(lggr logger.Logger, lp logpoller.LogPoller, chain legacyevm.Chain, config types.ChainReaderConfig) (ChainReaderService, error) { + cr := &chainReader{ + lggr: lggr.Named("ChainReader"), + lp: lp, + client: chain.Client(), + contractBindings: contractBindings{}, + parsed: &parsedTypes{encoderDefs: map[string]types.CodecEntry{}, decoderDefs: map[string]types.CodecEntry{}}, + } + + var err error + if err = cr.init(config.Contracts); err != nil { + return nil, err + } + + if cr.codec, err = cr.parsed.toCodec(); err != nil { + return nil, err + } + + err = cr.contractBindings.ForEach(func(b readBinding) error { + b.SetCodec(cr.codec) + return nil + }) + + return cr, err +} + +func (cr *chainReader) Name() string { return cr.lggr.Name() } + +var _ commontypes.ContractTypeProvider = &chainReader{} + +func (cr *chainReader) GetLatestValue(ctx context.Context, contractName, method string, params any, returnVal any) error { + b, err := cr.contractBindings.GetReadBinding(contractName, method) + if err != nil { + return err + } + + return b.GetLatestValue(ctx, params, returnVal) +} + +func (cr *chainReader) Bind(_ context.Context, bindings []commontypes.BoundContract) error { + return cr.contractBindings.Bind(bindings) +} + +func (cr *chainReader) init(chainContractReaders map[string]types.ChainContractReader) error { + for contractName, chainContractReader := range chainContractReaders { + contractAbi, err := abi.JSON(strings.NewReader(chainContractReader.ContractABI)) + if err != nil { + return err + } + + for typeName, chainReaderDefinition := range chainContractReader.Configs { + switch chainReaderDefinition.ReadType { + case types.Method: + err = cr.addMethod(contractName, typeName, contractAbi, *chainReaderDefinition) + case types.Event: + err = cr.addEvent(contractName, typeName, contractAbi, *chainReaderDefinition) + default: + return fmt.Errorf( + "%w: invalid chain reader definition read type: %s", + commontypes.ErrInvalidConfig, + chainReaderDefinition.ReadType) + } + + if err != nil { + return err + } + } + } + return nil +} + +func (cr *chainReader) Start(_ context.Context) error { + return cr.StartOnce("ChainReader", func() error { + return cr.contractBindings.ForEach(readBinding.Register) + }) +} + +func (cr *chainReader) Close() error { + return cr.StopOnce("ChainReader", func() error { + return cr.contractBindings.ForEach(readBinding.Unregister) + }) +} + +func (cr *chainReader) Ready() error { return nil } +func (cr *chainReader) HealthReport() map[string]error { + return map[string]error{cr.Name(): nil} +} + +func (cr *chainReader) CreateContractType(contractName, methodName string, forEncoding bool) (any, error) { + return cr.codec.CreateType(wrapItemType(contractName, methodName, forEncoding), forEncoding) +} + +func wrapItemType(contractName, methodName string, isParams bool) string { + if isParams { + return fmt.Sprintf("params.%s.%s", contractName, methodName) + } + return fmt.Sprintf("return.%s.%s", contractName, methodName) +} + +func (cr *chainReader) addMethod( + contractName, + methodName string, + abi abi.ABI, + chainReaderDefinition types.ChainReaderDefinition) error { + method, methodExists := abi.Methods[chainReaderDefinition.ChainSpecificName] + if !methodExists { + return fmt.Errorf("%w: method %s doesn't exist", commontypes.ErrInvalidConfig, chainReaderDefinition.ChainSpecificName) + } + + if len(chainReaderDefinition.EventInputFields) != 0 { + return fmt.Errorf( + "%w: method %s has event topic fields defined, but is not an event", + commontypes.ErrInvalidConfig, + chainReaderDefinition.ChainSpecificName) + } + + cr.contractBindings.AddReadBinding(contractName, methodName, &methodBinding{ + contractName: contractName, + method: methodName, + client: cr.client, + }) + + if err := cr.addEncoderDef(contractName, methodName, method.Inputs, method.ID, chainReaderDefinition); err != nil { + return err + } + + return cr.addDecoderDef(contractName, methodName, method.Outputs, chainReaderDefinition) +} + +func (cr *chainReader) addEvent(contractName, eventName string, a abi.ABI, chainReaderDefinition types.ChainReaderDefinition) error { + event, eventExists := a.Events[chainReaderDefinition.ChainSpecificName] + if !eventExists { + return fmt.Errorf("%w: event %s doesn't exist", commontypes.ErrInvalidConfig, chainReaderDefinition.ChainSpecificName) + } + + filterArgs, topicInfo, indexArgNames := setupEventInput(event, chainReaderDefinition) + if err := verifyEventInputsUsed(chainReaderDefinition, indexArgNames); err != nil { + return err + } + + if err := topicInfo.Init(); err != nil { + return err + } + + // Encoder def's codec won't be used to encode, only for its type as input for GetLatestValue + if err := cr.addEncoderDef(contractName, eventName, filterArgs, nil, chainReaderDefinition); err != nil { + return err + } + + inputInfo, inputModifier, err := cr.getEventInput(chainReaderDefinition, contractName, eventName) + if err != nil { + return err + } + + cr.contractBindings.AddReadBinding(contractName, eventName, &eventBinding{ + contractName: contractName, + eventName: eventName, + lp: cr.lp, + hash: event.ID, + inputInfo: inputInfo, + inputModifier: inputModifier, + topicInfo: topicInfo, + id: wrapItemType(contractName, eventName, false) + uuid.NewString(), + }) + + return cr.addDecoderDef(contractName, eventName, event.Inputs, chainReaderDefinition) +} + +func (cr *chainReader) getEventInput(def types.ChainReaderDefinition, contractName, eventName string) ( + types.CodecEntry, codec.Modifier, error) { + inputInfo := cr.parsed.encoderDefs[wrapItemType(contractName, eventName, true)] + inMod, err := def.InputModifications.ToModifier(evmDecoderHooks...) + if err != nil { + return nil, nil, err + } + + // initialize the modification + if _, err = inMod.RetypeToOffChain(reflect.PointerTo(inputInfo.CheckedType()), ""); err != nil { + return nil, nil, err + } + + return inputInfo, inMod, nil +} + +func verifyEventInputsUsed(chainReaderDefinition types.ChainReaderDefinition, indexArgNames map[string]bool) error { + for _, value := range chainReaderDefinition.EventInputFields { + if !indexArgNames[abi.ToCamelCase(value)] { + return fmt.Errorf("%w: %s is not an indexed argument of event %s", commontypes.ErrInvalidConfig, value, chainReaderDefinition.ChainSpecificName) + } + } + return nil +} + +func (cr *chainReader) addEncoderDef(contractName, methodName string, args abi.Arguments, prefix []byte, chainReaderDefinition types.ChainReaderDefinition) error { + // ABI.Pack prepends the method.ID to the encodings, we'll need the encoder to do the same. + inputMod, err := chainReaderDefinition.InputModifications.ToModifier(evmDecoderHooks...) + if err != nil { + return err + } + input := types.NewCodecEntry(args, prefix, inputMod) + + if err := input.Init(); err != nil { + return err + } + + cr.parsed.encoderDefs[wrapItemType(contractName, methodName, true)] = input + return nil +} + +func (cr *chainReader) addDecoderDef(contractName, methodName string, outputs abi.Arguments, def types.ChainReaderDefinition) error { + mod, err := def.OutputModifications.ToModifier(evmDecoderHooks...) + if err != nil { + return err + } + output := types.NewCodecEntry(outputs, nil, mod) + cr.parsed.decoderDefs[wrapItemType(contractName, methodName, false)] = output + return output.Init() +} + +func setupEventInput(event abi.Event, def types.ChainReaderDefinition) ([]abi.Argument, types.CodecEntry, map[string]bool) { + topicFieldDefs := map[string]bool{} + for _, value := range def.EventInputFields { + capFirstValue := abi.ToCamelCase(value) + topicFieldDefs[capFirstValue] = true + } + + filterArgs := make([]abi.Argument, 0, types.MaxTopicFields) + inputArgs := make([]abi.Argument, 0, len(event.Inputs)) + indexArgNames := map[string]bool{} + + for _, input := range event.Inputs { + if !input.Indexed { + continue + } + + filterWith := topicFieldDefs[abi.ToCamelCase(input.Name)] + if filterWith { + // When presenting the filter off-chain, + // the user will provide the unhashed version of the input + // The reader will hash topics if needed. + inputUnindexed := input + inputUnindexed.Indexed = false + filterArgs = append(filterArgs, inputUnindexed) + } + + inputArgs = append(inputArgs, input) + indexArgNames[abi.ToCamelCase(input.Name)] = true + } + + return filterArgs, types.NewCodecEntry(inputArgs, nil, nil), indexArgNames +} diff --git a/core/services/relay/evm/chain_reader_test.go b/core/services/relay/evm/chain_reader_test.go new file mode 100644 index 00000000..5bbb442b --- /dev/null +++ b/core/services/relay/evm/chain_reader_test.go @@ -0,0 +1,426 @@ +package evm_test + +import ( + "crypto/ecdsa" + "fmt" + "math" + "math/big" + "os" + "reflect" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + evmtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/goplugin/libocr/commontypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/codec" + + clcommontypes "github.com/goplugin/plugin-common/pkg/types" + . "github.com/goplugin/plugin-common/pkg/types/interfacetests" //nolint common practice to import test mods with . + + commontestutils "github.com/goplugin/plugin-common/pkg/loop/testutils" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/chain_reader_example" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +const ( + commonGasLimitOnEvms = uint64(4712388) + triggerWithDynamicTopic = "TriggeredEventWithDynamicTopic" + triggerWithAllTopics = "TriggeredWithFourTopics" +) + +func TestChainReader(t *testing.T) { + t.Parallel() + it := &chainReaderInterfaceTester{} + RunChainReaderInterfaceTests(t, it) + RunChainReaderInterfaceTests(t, commontestutils.WrapChainReaderTesterForLoop(it)) + t.Run("Dynamically typed topics can be used to filter and have type correct in return", func(t *testing.T) { + it.Setup(t) + + anyString := "foo" + tx, err := it.evmTest.LatestValueHolderTransactor.TriggerEventWithDynamicTopic(it.auth, anyString) + require.NoError(t, err) + it.sim.Commit() + it.incNonce() + it.awaitTx(t, tx) + ctx := testutils.Context(t) + + cr := it.GetChainReader(t) + require.NoError(t, cr.Bind(ctx, it.GetBindings(t))) + + input := struct{ Field string }{Field: anyString} + tp := cr.(clcommontypes.ContractTypeProvider) + output, err := tp.CreateContractType(AnyContractName, triggerWithDynamicTopic, false) + require.NoError(t, err) + rOutput := reflect.Indirect(reflect.ValueOf(output)) + + require.Eventually(t, func() bool { + return cr.GetLatestValue(ctx, AnyContractName, triggerWithDynamicTopic, input, output) == nil + }, it.MaxWaitTimeForEvents(), time.Millisecond*10) + + assert.Equal(t, &anyString, rOutput.FieldByName("Field").Interface()) + topic, err := abi.MakeTopics([]any{anyString}) + require.NoError(t, err) + assert.Equal(t, &topic[0][0], rOutput.FieldByName("FieldHash").Interface()) + }) + + t.Run("Multiple topics can filter together", func(t *testing.T) { + it.Setup(t) + triggerFourTopics(t, it, int32(1), int32(2), int32(3)) + triggerFourTopics(t, it, int32(2), int32(2), int32(3)) + triggerFourTopics(t, it, int32(1), int32(3), int32(3)) + triggerFourTopics(t, it, int32(1), int32(2), int32(4)) + + ctx := testutils.Context(t) + cr := it.GetChainReader(t) + require.NoError(t, cr.Bind(ctx, it.GetBindings(t))) + var latest struct{ Field1, Field2, Field3 int32 } + params := struct{ Field1, Field2, Field3 int32 }{Field1: 1, Field2: 2, Field3: 3} + + time.Sleep(it.MaxWaitTimeForEvents()) + + require.NoError(t, cr.GetLatestValue(ctx, AnyContractName, triggerWithAllTopics, params, &latest)) + assert.Equal(t, int32(1), latest.Field1) + assert.Equal(t, int32(2), latest.Field2) + assert.Equal(t, int32(3), latest.Field3) + }) +} + +func triggerFourTopics(t *testing.T, it *chainReaderInterfaceTester, i1, i2, i3 int32) { + tx, err := it.evmTest.LatestValueHolderTransactor.TriggerWithFourTopics(it.auth, i1, i2, i3) + require.NoError(t, err) + require.NoError(t, err) + it.sim.Commit() + it.incNonce() + it.awaitTx(t, tx) +} + +type chainReaderInterfaceTester struct { + chain *mocks.Chain + address string + address2 string + chainConfig types.ChainReaderConfig + auth *bind.TransactOpts + sim *backends.SimulatedBackend + pk *ecdsa.PrivateKey + evmTest *chain_reader_example.LatestValueHolder + cr evm.ChainReaderService +} + +func (it *chainReaderInterfaceTester) MaxWaitTimeForEvents() time.Duration { + // From trial and error, when running on CI, sometimes the boxes get slow + maxWaitTime := time.Second * 20 + maxWaitTimeStr, ok := os.LookupEnv("MAX_WAIT_TIME_FOR_EVENTS_S") + if ok { + wiatS, err := strconv.ParseInt(maxWaitTimeStr, 10, 64) + if err != nil { + fmt.Printf("Error parsing MAX_WAIT_TIME_FOR_EVENTS_S: %v, defaulting to %v\n", err, maxWaitTime) + } + maxWaitTime = time.Second * time.Duration(wiatS) + } + + return maxWaitTime +} + +func (it *chainReaderInterfaceTester) Setup(t *testing.T) { + t.Cleanup(func() { + // DB may be closed by the test already, ignore errors + if it.cr != nil { + _ = it.cr.Close() + } + it.cr = nil + it.evmTest = nil + }) + + // can re-use the same chain for tests, just make new contract for each test + if it.chain != nil { + it.deployNewContracts(t) + return + } + + it.chain = &mocks.Chain{} + it.setupChainNoClient(t) + + testStruct := CreateTestStruct(0, it) + + it.chainConfig = types.ChainReaderConfig{ + Contracts: map[string]types.ChainContractReader{ + AnyContractName: { + ContractABI: chain_reader_example.LatestValueHolderMetaData.ABI, + Configs: map[string]*types.ChainReaderDefinition{ + MethodTakingLatestParamsReturningTestStruct: { + ChainSpecificName: "getElementAtIndex", + OutputModifications: codec.ModifiersConfig{ + &codec.RenameModifierConfig{Fields: map[string]string{"NestedStruct.Inner.IntVal": "I"}}, + }, + }, + MethodReturningUint64: { + ChainSpecificName: "getPrimitiveValue", + }, + DifferentMethodReturningUint64: { + ChainSpecificName: "getDifferentPrimitiveValue", + }, + MethodReturningUint64Slice: { + ChainSpecificName: "getSliceValue", + }, + EventName: { + ChainSpecificName: "Triggered", + ReadType: types.Event, + OutputModifications: codec.ModifiersConfig{ + &codec.RenameModifierConfig{Fields: map[string]string{"NestedStruct.Inner.IntVal": "I"}}, + }, + }, + EventWithFilterName: { + ChainSpecificName: "Triggered", + ReadType: types.Event, + EventInputFields: []string{"Field"}, + }, + triggerWithDynamicTopic: { + ChainSpecificName: triggerWithDynamicTopic, + ReadType: types.Event, + EventInputFields: []string{"fieldHash"}, + InputModifications: codec.ModifiersConfig{ + &codec.RenameModifierConfig{Fields: map[string]string{"FieldHash": "Field"}}, + }, + }, + triggerWithAllTopics: { + ChainSpecificName: triggerWithAllTopics, + ReadType: types.Event, + EventInputFields: []string{"Field1", "Field2", "Field3"}, + }, + MethodReturningSeenStruct: { + ChainSpecificName: "returnSeen", + InputModifications: codec.ModifiersConfig{ + &codec.HardCodeModifierConfig{ + OnChainValues: map[string]any{ + "BigField": testStruct.BigField.String(), + "Account": hexutil.Encode(testStruct.Account), + }, + }, + &codec.RenameModifierConfig{Fields: map[string]string{"NestedStruct.Inner.IntVal": "I"}}, + }, + OutputModifications: codec.ModifiersConfig{ + &codec.HardCodeModifierConfig{OffChainValues: map[string]any{"ExtraField": anyExtraValue}}, + &codec.RenameModifierConfig{Fields: map[string]string{"NestedStruct.Inner.IntVal": "I"}}, + }, + }, + }, + }, + AnySecondContractName: { + ContractABI: chain_reader_example.LatestValueHolderMetaData.ABI, + Configs: map[string]*types.ChainReaderDefinition{ + MethodReturningUint64: { + ChainSpecificName: "getDifferentPrimitiveValue", + }, + }, + }, + }, + } + it.chain.On("Client").Return(client.NewSimulatedBackendClient(t, it.sim, big.NewInt(1337))) + it.deployNewContracts(t) +} + +func (it *chainReaderInterfaceTester) Name() string { + return "EVM" +} + +func (it *chainReaderInterfaceTester) GetAccountBytes(i int) []byte { + account := [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + account[i%20] += byte(i) + account[(i+3)%20] += byte(i + 3) + return account[:] +} + +func (it *chainReaderInterfaceTester) GetChainReader(t *testing.T) clcommontypes.ChainReader { + ctx := testutils.Context(t) + if it.cr != nil { + return it.cr + } + + lggr := logger.NullLogger + db := pgtest.NewSqlxDB(t) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, db, lggr, pgtest.NewQConfig(true)), it.chain.Client(), lggr, time.Millisecond, false, 0, 1, 1, 10000) + require.NoError(t, lp.Start(ctx)) + it.chain.On("LogPoller").Return(lp) + cr, err := evm.NewChainReaderService(lggr, lp, it.chain, it.chainConfig) + require.NoError(t, err) + require.NoError(t, cr.Start(ctx)) + it.cr = cr + return cr +} + +func (it *chainReaderInterfaceTester) SetLatestValue(t *testing.T, testStruct *TestStruct) { + it.sendTxWithTestStruct(t, testStruct, (*chain_reader_example.LatestValueHolderTransactor).AddTestStruct) +} + +func (it *chainReaderInterfaceTester) TriggerEvent(t *testing.T, testStruct *TestStruct) { + it.sendTxWithTestStruct(t, testStruct, (*chain_reader_example.LatestValueHolderTransactor).TriggerEvent) +} + +func (it *chainReaderInterfaceTester) GetBindings(t *testing.T) []clcommontypes.BoundContract { + return []clcommontypes.BoundContract{ + {Name: AnyContractName, Address: it.address, Pending: true}, + {Name: AnySecondContractName, Address: it.address2, Pending: true}, + } +} + +type testStructFn = func(*chain_reader_example.LatestValueHolderTransactor, *bind.TransactOpts, int32, string, uint8, [32]uint8, common.Address, []common.Address, *big.Int, chain_reader_example.MidLevelTestStruct) (*evmtypes.Transaction, error) + +func (it *chainReaderInterfaceTester) sendTxWithTestStruct(t *testing.T, testStruct *TestStruct, fn testStructFn) { + tx, err := fn( + &it.evmTest.LatestValueHolderTransactor, + it.auth, + *testStruct.Field, + testStruct.DifferentField, + uint8(testStruct.OracleID), + convertOracleIDs(testStruct.OracleIDs), + common.Address(testStruct.Account), + convertAccounts(testStruct.Accounts), + testStruct.BigField, + midToInternalType(testStruct.NestedStruct), + ) + require.NoError(t, err) + it.sim.Commit() + it.incNonce() + it.awaitTx(t, tx) +} + +func convertOracleIDs(oracleIDs [32]commontypes.OracleID) [32]byte { + convertedIds := [32]byte{} + for i, id := range oracleIDs { + convertedIds[i] = byte(id) + } + return convertedIds +} + +func convertAccounts(accounts [][]byte) []common.Address { + convertedAccounts := make([]common.Address, len(accounts)) + for i, a := range accounts { + convertedAccounts[i] = common.Address(a) + } + return convertedAccounts +} + +func (it *chainReaderInterfaceTester) setupChainNoClient(t require.TestingT) { + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + it.pk = privateKey + + it.auth, err = bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) + require.NoError(t, err) + + it.sim = backends.NewSimulatedBackend(core.GenesisAlloc{it.auth.From: {Balance: big.NewInt(math.MaxInt64)}}, commonGasLimitOnEvms*5000) + it.sim.Commit() +} + +func (it *chainReaderInterfaceTester) deployNewContracts(t *testing.T) { + it.address = it.deployNewContract(t) + it.address2 = it.deployNewContract(t) +} + +func (it *chainReaderInterfaceTester) deployNewContract(t *testing.T) string { + ctx := testutils.Context(t) + gasPrice, err := it.sim.SuggestGasPrice(ctx) + require.NoError(t, err) + it.auth.GasPrice = gasPrice + + // 105528 was in the error: gas too low: have 0, want 105528 + // Not sure if there's a better way to get it. + it.auth.GasLimit = 10552800 + + address, tx, ts, err := chain_reader_example.DeployLatestValueHolder(it.auth, it.sim) + + require.NoError(t, err) + it.sim.Commit() + if it.evmTest == nil { + it.evmTest = ts + } + it.incNonce() + it.awaitTx(t, tx) + return address.String() +} + +func (it *chainReaderInterfaceTester) awaitTx(t *testing.T, tx *evmtypes.Transaction) { + ctx := testutils.Context(t) + receipt, err := it.sim.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) + require.Equal(t, evmtypes.ReceiptStatusSuccessful, receipt.Status) +} + +func (it *chainReaderInterfaceTester) incNonce() { + if it.auth.Nonce == nil { + it.auth.Nonce = big.NewInt(1) + } else { + it.auth.Nonce = it.auth.Nonce.Add(it.auth.Nonce, big.NewInt(1)) + } +} + +func getAccounts(first TestStruct) []common.Address { + accountBytes := make([]common.Address, len(first.Accounts)) + for i, account := range first.Accounts { + accountBytes[i] = common.Address(account) + } + return accountBytes +} + +func argsFromTestStruct(ts TestStruct) []any { + return []any{ + ts.Field, + ts.DifferentField, + uint8(ts.OracleID), + getOracleIDs(ts), + common.Address(ts.Account), + getAccounts(ts), + ts.BigField, + midToInternalType(ts.NestedStruct), + } +} + +func getOracleIDs(first TestStruct) [32]byte { + oracleIDs := [32]byte{} + for i, oracleID := range first.OracleIDs { + oracleIDs[i] = byte(oracleID) + } + return oracleIDs +} + +func toInternalType(testStruct TestStruct) chain_reader_example.TestStruct { + return chain_reader_example.TestStruct{ + Field: *testStruct.Field, + DifferentField: testStruct.DifferentField, + OracleId: byte(testStruct.OracleID), + OracleIds: convertOracleIDs(testStruct.OracleIDs), + Account: common.Address(testStruct.Account), + Accounts: convertAccounts(testStruct.Accounts), + BigField: testStruct.BigField, + NestedStruct: midToInternalType(testStruct.NestedStruct), + } +} + +func midToInternalType(m MidLevelTestStruct) chain_reader_example.MidLevelTestStruct { + return chain_reader_example.MidLevelTestStruct{ + FixedBytes: m.FixedBytes, + Inner: chain_reader_example.InnerTestStruct{ + IntVal: int64(m.Inner.I), + S: m.Inner.S, + }, + } +} diff --git a/core/services/relay/evm/codec.go b/core/services/relay/evm/codec.go new file mode 100644 index 00000000..1e580875 --- /dev/null +++ b/core/services/relay/evm/codec.go @@ -0,0 +1,146 @@ +package evm + +import ( + "encoding/json" + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/mitchellh/mapstructure" + + "github.com/goplugin/plugin-common/pkg/codec" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +// decodeAccountAndAllowArraySliceHook allows: +// +// strings to be converted to [32]byte allowing config to represent them as 0x... +// slices or arrays to be converted to a pointer to that type +// +// BigIntHook allows *big.Int to be represented as any integer type or a string and to go back to them. +// Useful for config, or if when a model may use a go type that isn't a *big.Int when Pack expects one. +// Eg: int32 in a go struct from a plugin could require a *big.Int in Pack for int24, if it fits, we shouldn't care. +// SliceToArrayVerifySizeHook verifies that slices have the correct size when converting to an array +// sizeVerifyBigIntHook allows our custom types that verify the number fits in the on-chain type to be converted as-if +// it was a *big.Int +var evmDecoderHooks = []mapstructure.DecodeHookFunc{decodeAccountAndAllowArraySliceHook, codec.BigIntHook, codec.SliceToArrayVerifySizeHook, sizeVerifyBigIntHook} + +// NewCodec creates a new [commontypes.RemoteCodec] for EVM. +// Note that names in the ABI are converted to Go names using [abi.ToCamelCase], +// this is per convention in [abi.MakeTopics], [abi.Arguments.Pack] etc. +// This allows names on-chain to be in go convention when generated. +// It means that if you need to use a [codec.Modifier] to reference a field +// you need to use the Go name instead of the name on-chain. +// eg: rename FooBar -> Bar, not foo_bar_ to Bar if the name on-chain is foo_bar_ +func NewCodec(conf types.CodecConfig) (commontypes.RemoteCodec, error) { + parsed := &parsedTypes{ + encoderDefs: map[string]types.CodecEntry{}, + decoderDefs: map[string]types.CodecEntry{}, + } + + for k, v := range conf.Configs { + args := abi.Arguments{} + if err := json.Unmarshal(([]byte)(v.TypeABI), &args); err != nil { + return nil, err + } + + mod, err := v.ModifierConfigs.ToModifier(evmDecoderHooks...) + if err != nil { + return nil, err + } + + item := types.NewCodecEntry(args, nil, mod) + if err = item.Init(); err != nil { + return nil, err + } + + parsed.encoderDefs[k] = item + parsed.decoderDefs[k] = item + } + + return parsed.toCodec() +} + +type evmCodec struct { + *encoder + *decoder + *parsedTypes +} + +func (c *evmCodec) CreateType(itemType string, forEncoding bool) (any, error) { + var itemTypes map[string]types.CodecEntry + if forEncoding { + itemTypes = c.encoderDefs + } else { + itemTypes = c.decoderDefs + } + + def, ok := itemTypes[itemType] + if !ok { + return nil, fmt.Errorf("%w: cannot find type name %s", commontypes.ErrInvalidType, itemType) + } + + return reflect.New(def.CheckedType()).Interface(), nil +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +func sizeVerifyBigIntHook(from, to reflect.Type, data any) (any, error) { + if from.Implements(types.SizedBigIntType()) && + !to.Implements(types.SizedBigIntType()) && + !reflect.PointerTo(to).Implements(types.SizedBigIntType()) { + return codec.BigIntHook(from, bigIntType, reflect.ValueOf(data).Convert(bigIntType).Interface()) + } + + if !to.Implements(types.SizedBigIntType()) { + return data, nil + } + + var err error + data, err = codec.BigIntHook(from, bigIntType, data) + if err != nil { + return nil, err + } + + bi, ok := data.(*big.Int) + if !ok { + return data, nil + } + + converted := reflect.ValueOf(bi).Convert(to).Interface().(types.SizedBigInt) + return converted, converted.Verify() +} + +func decodeAccountAndAllowArraySliceHook(from, to reflect.Type, data any) (any, error) { + if from.Kind() == reflect.String && + (to == reflect.TypeOf(common.Address{}) || to == reflect.TypeOf(&common.Address{})) { + return decodeAddress(data) + } + + if from.Kind() == reflect.Pointer && to.Kind() != reflect.Pointer && from != nil && + (from.Elem().Kind() == reflect.Slice || from.Elem().Kind() == reflect.Array) { + return reflect.ValueOf(data).Elem().Interface(), nil + } + + return data, nil +} + +func decodeAddress(data any) (any, error) { + decoded, err := hexutil.Decode(data.(string)) + if err != nil { + return nil, fmt.Errorf("%w: %w", commontypes.ErrInvalidType, err) + } else if len(decoded) != common.AddressLength { + return nil, fmt.Errorf( + "%w: wrong number size for address expected %v got %v", + commontypes.ErrSliceWrongLen, + common.AddressLength, len(decoded)) + } + + return common.Address(decoded), nil +} diff --git a/core/services/relay/evm/codec_fuzz_test.go b/core/services/relay/evm/codec_fuzz_test.go new file mode 100644 index 00000000..bee3cdec --- /dev/null +++ b/core/services/relay/evm/codec_fuzz_test.go @@ -0,0 +1,12 @@ +package evm_test + +import ( + "testing" + + "github.com/goplugin/plugin-common/pkg/types/interfacetests" +) + +func FuzzCodec(f *testing.F) { + tester := &codecInterfaceTester{} + interfacetests.RunCodecInterfaceFuzzTests(f, tester) +} diff --git a/core/services/relay/evm/codec_test.go b/core/services/relay/evm/codec_test.go new file mode 100644 index 00000000..51252993 --- /dev/null +++ b/core/services/relay/evm/codec_test.go @@ -0,0 +1,235 @@ +package evm_test + +import ( + "encoding/json" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + ocr2types "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/codec" + + looptestutils "github.com/goplugin/plugin-common/pkg/loop/testutils" //nolint common practice to import test mods with . + commontypes "github.com/goplugin/plugin-common/pkg/types" + . "github.com/goplugin/plugin-common/pkg/types/interfacetests" //nolint common practice to import test mods with . + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/chain_reader_example" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +const anyExtraValue = 3 + +func TestCodec(t *testing.T) { + tester := &codecInterfaceTester{} + RunCodecInterfaceTests(t, tester) + RunCodecInterfaceTests(t, looptestutils.WrapCodecTesterForLoop(tester)) + + anyN := 10 + c := tester.GetCodec(t) + t.Run("GetMaxEncodingSize delegates to GetMaxSize", func(t *testing.T) { + actual, err := c.GetMaxEncodingSize(testutils.Context(t), anyN, sizeItemType) + assert.NoError(t, err) + + expected, err := types.GetMaxSize(anyN, parseDefs(t)[sizeItemType]) + require.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("GetMaxDecodingSize delegates to GetMaxSize", func(t *testing.T) { + actual, err := c.GetMaxDecodingSize(testutils.Context(t), anyN, sizeItemType) + assert.NoError(t, err) + + expected, err := types.GetMaxSize(anyN, parseDefs(t)[sizeItemType]) + require.NoError(t, err) + assert.Equal(t, expected, actual) + }) +} + +func TestCodec_SimpleEncode(t *testing.T) { + codecName := "my_codec" + input := map[string]any{ + "Report": int32(6), + "Meta": "abcdefg", + } + evmEncoderConfig := `[{"Name":"Report","Type":"int32"},{"Name":"Meta","Type":"string"}]` + + codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{ + codecName: {TypeABI: evmEncoderConfig}, + }} + c, err := evm.NewCodec(codecConfig) + require.NoError(t, err) + + result, err := c.Encode(testutils.Context(t), input, codecName) + require.NoError(t, err) + expected := + "0000000000000000000000000000000000000000000000000000000000000006" + // int32(6) + "0000000000000000000000000000000000000000000000000000000000000040" + // total bytes occupied by the string (64) + "0000000000000000000000000000000000000000000000000000000000000007" + // length of the string (7 chars) + "6162636465666700000000000000000000000000000000000000000000000000" // actual string + + require.Equal(t, expected, hexutil.Encode(result)[2:]) +} + +type codecInterfaceTester struct{} + +func (it *codecInterfaceTester) Setup(_ *testing.T) {} + +func (it *codecInterfaceTester) GetAccountBytes(i int) []byte { + account := [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + // fuzz tests can make -ve numbers + if i < 0 { + i = -i + } + account[i%20] += byte(i) + account[(i+3)%20] += byte(i + 3) + return account[:] +} + +func (it *codecInterfaceTester) EncodeFields(t *testing.T, request *EncodeRequest) []byte { + if request.TestOn == TestItemType { + return encodeFieldsOnItem(t, request) + } + + return encodeFieldsOnSliceOrArray(t, request) +} + +func (it *codecInterfaceTester) GetCodec(t *testing.T) commontypes.Codec { + codecConfig := types.CodecConfig{Configs: map[string]types.ChainCodecConfig{}} + testStruct := CreateTestStruct(0, it) + for k, v := range codecDefs { + defBytes, err := json.Marshal(v) + require.NoError(t, err) + entry := codecConfig.Configs[k] + entry.TypeABI = string(defBytes) + + if k != sizeItemType && k != NilType { + entry.ModifierConfigs = codec.ModifiersConfig{ + &codec.RenameModifierConfig{Fields: map[string]string{"NestedStruct.Inner.IntVal": "I"}}, + } + } + + if k == TestItemWithConfigExtra { + hardCode := &codec.HardCodeModifierConfig{ + OnChainValues: map[string]any{ + "BigField": testStruct.BigField.String(), + "Account": hexutil.Encode(testStruct.Account), + }, + OffChainValues: map[string]any{"ExtraField": anyExtraValue}, + } + entry.ModifierConfigs = append(entry.ModifierConfigs, hardCode) + } + codecConfig.Configs[k] = entry + } + + c, err := evm.NewCodec(codecConfig) + require.NoError(t, err) + return c +} + +func (it *codecInterfaceTester) IncludeArrayEncodingSizeEnforcement() bool { + return true +} +func (it *codecInterfaceTester) Name() string { + return "EVM" +} + +func encodeFieldsOnItem(t *testing.T, request *EncodeRequest) ocr2types.Report { + return packArgs(t, argsFromTestStruct(request.TestStructs[0]), parseDefs(t)[TestItemType], request) +} + +func encodeFieldsOnSliceOrArray(t *testing.T, request *EncodeRequest) []byte { + oargs := parseDefs(t)[request.TestOn] + args := make([]any, 1) + + switch request.TestOn { + case TestItemArray1Type: + args[0] = [1]chain_reader_example.TestStruct{toInternalType(request.TestStructs[0])} + case TestItemArray2Type: + args[0] = [2]chain_reader_example.TestStruct{toInternalType(request.TestStructs[0]), toInternalType(request.TestStructs[1])} + default: + tmp := make([]chain_reader_example.TestStruct, len(request.TestStructs)) + for i, ts := range request.TestStructs { + tmp[i] = toInternalType(ts) + } + args[0] = tmp + } + + return packArgs(t, args, oargs, request) +} + +func packArgs(t *testing.T, allArgs []any, oargs abi.Arguments, request *EncodeRequest) []byte { + // extra capacity in case we add an argument + args := make(abi.Arguments, len(oargs), len(oargs)+1) + copy(args, oargs) + // decoding has extra field to decode + if request.ExtraField { + fakeType, err := abi.NewType("int32", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + args = append(args, abi.Argument{Name: "FakeField", Type: fakeType}) + allArgs = append(allArgs, 11) + } + + if request.MissingField { + args = args[1:] //nolint we know it's non-zero len + allArgs = allArgs[1:] //nolint we know it's non-zero len + } + + bytes, err := args.Pack(allArgs...) + require.NoError(t, err) + return bytes +} + +var inner = []abi.ArgumentMarshaling{ + {Name: "IntVal", Type: "int64"}, + {Name: "S", Type: "string"}, +} + +var nested = []abi.ArgumentMarshaling{ + {Name: "FixedBytes", Type: "bytes2"}, + {Name: "Inner", Type: "tuple", Components: inner}, +} + +var ts = []abi.ArgumentMarshaling{ + {Name: "Field", Type: "int32"}, + {Name: "DifferentField", Type: "string"}, + {Name: "OracleId", Type: "uint8"}, + {Name: "OracleIds", Type: "uint8[32]"}, + {Name: "Account", Type: "address"}, + {Name: "Accounts", Type: "address[]"}, + {Name: "BigField", Type: "int192"}, + {Name: "NestedStruct", Type: "tuple", Components: nested}, +} + +const sizeItemType = "item for size" + +var codecDefs = map[string][]abi.ArgumentMarshaling{ + TestItemType: ts, + TestItemSliceType: { + {Name: "", Type: "tuple[]", Components: ts}, + }, + TestItemArray1Type: { + {Name: "", Type: "tuple[1]", Components: ts}, + }, + TestItemArray2Type: { + {Name: "", Type: "tuple[2]", Components: ts}, + }, + sizeItemType: { + {Name: "Stuff", Type: "int256[]"}, + {Name: "OtherStuff", Type: "int256"}, + }, + TestItemWithConfigExtra: ts, + NilType: {}, +} + +func parseDefs(t *testing.T) map[string]abi.Arguments { + bytes, err := json.Marshal(codecDefs) + require.NoError(t, err) + var results map[string]abi.Arguments + require.NoError(t, json.Unmarshal(bytes, &results)) + return results +} diff --git a/core/services/relay/evm/config_poller.go b/core/services/relay/evm/config_poller.go new file mode 100644 index 00000000..a23046ec --- /dev/null +++ b/core/services/relay/evm/config_poller.go @@ -0,0 +1,233 @@ +package evm + +import ( + "context" + "database/sql" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/goplugin/libocr/gethwrappers2/ocrconfigurationstoreevmsimple" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + evmRelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +var ( + failedRPCContractCalls = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ocr2_failed_rpc_contract_calls", + Help: "Running count of failed RPC contract calls by chain/contract", + }, + []string{"chainID", "contractAddress"}, + ) +) + +type LogDecoder interface { + EventSig() common.Hash + Decode(rawLog []byte) (ocrtypes.ContractConfig, error) +} + +type configPoller struct { + services.StateMachine + + lggr logger.Logger + filterName string + destChainLogPoller logpoller.LogPoller + client client.Client + + aggregatorContractAddr common.Address + aggregatorContract *ocr2aggregator.OCR2Aggregator + + // Some chains "manage" state bloat by deleting older logs. The ConfigStore + // contract allows us work around such restrictions. + configStoreContractAddr *common.Address + configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple + + // Depending on the exact contract used, the raw config log may be shaped + // in different ways + ld LogDecoder +} + +func configPollerFilterName(addr common.Address) string { + return logpoller.FilterName("OCR2ConfigPoller", addr.String()) +} + +type CPConfig struct { + Client client.Client + DestinationChainPoller logpoller.LogPoller + AggregatorContractAddress common.Address + ConfigStoreAddress *common.Address + LogDecoder LogDecoder +} + +func NewConfigPoller(lggr logger.Logger, cfg CPConfig) (evmRelayTypes.ConfigPoller, error) { + return newConfigPoller(lggr, cfg.Client, cfg.DestinationChainPoller, cfg.AggregatorContractAddress, cfg.ConfigStoreAddress, cfg.LogDecoder) +} + +func newConfigPoller(lggr logger.Logger, client client.Client, destChainPoller logpoller.LogPoller, aggregatorContractAddr common.Address, configStoreAddr *common.Address, ld LogDecoder) (*configPoller, error) { + err := destChainPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(aggregatorContractAddr), EventSigs: []common.Hash{ld.EventSig()}, Addresses: []common.Address{aggregatorContractAddr}}) + if err != nil { + return nil, err + } + + aggregatorContract, err := ocr2aggregator.NewOCR2Aggregator(aggregatorContractAddr, client) + if err != nil { + return nil, err + } + + cp := &configPoller{ + lggr: lggr, + filterName: configPollerFilterName(aggregatorContractAddr), + destChainLogPoller: destChainPoller, + aggregatorContractAddr: aggregatorContractAddr, + client: client, + aggregatorContract: aggregatorContract, + ld: ld, + } + + if configStoreAddr != nil { + cp.configStoreContractAddr = configStoreAddr + cp.configStoreContract, err = ocrconfigurationstoreevmsimple.NewOCRConfigurationStoreEVMSimple(*configStoreAddr, client) + if err != nil { + return nil, err + } + } + + return cp, nil +} + +func (cp *configPoller) Start() {} + +func (cp *configPoller) Close() error { + return nil +} + +// Notify noop method +func (cp *configPoller) Notify() <-chan struct{} { + return nil +} + +// Replay abstracts the logpoller.LogPoller Replay() implementation +func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { + return cp.destChainLogPoller.Replay(ctx, fromBlock) +} + +// LatestConfigDetails returns the latest config details from the logs +func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(cp.ld.EventSig(), cp.aggregatorContractAddr, 1, pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + if cp.isConfigStoreAvailable() { + // Fallback to RPC call in case logs have been pruned and configStoreContract is available + return cp.callLatestConfigDetails(ctx) + } + // log not found means return zero config digest + return 0, ocrtypes.ConfigDigest{}, nil + } + return 0, ocrtypes.ConfigDigest{}, err + } + latestConfigSet, err := cp.ld.Decode(latest.Data) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + return uint64(latest.BlockNumber), latestConfigSet.ConfigDigest, nil +} + +// LatestConfig returns the latest config from the logs on a certain block +func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), cp.ld.EventSig(), cp.aggregatorContractAddr, pg.WithParentCtx(ctx)) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + if len(lgs) == 0 { + if cp.isConfigStoreAvailable() { + // Fallback to RPC call in case logs have been pruned + return cp.callReadConfigFromStore(ctx) + } + return ocrtypes.ContractConfig{}, fmt.Errorf("no logs found for config on contract %s (chain %s) at block %d", cp.aggregatorContractAddr.Hex(), cp.client.ConfiguredChainID().String(), changedInBlock) + } + latestConfigSet, err := cp.ld.Decode(lgs[len(lgs)-1].Data) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + cp.lggr.Infow("LatestConfig", "latestConfig", latestConfigSet) + return latestConfigSet, nil +} + +// LatestBlockHeight returns the latest block height from the logs +func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { + latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return 0, err + } + return uint64(latest.BlockNumber), nil +} + +func (cp *configPoller) isConfigStoreAvailable() bool { + return cp.configStoreContract != nil +} + +// RPC call for latest config details +func (cp *configPoller) callLatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + details, err := cp.aggregatorContract.LatestConfigDetails(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.aggregatorContractAddr.Hex()).Inc() + } + return uint64(details.BlockNumber), details.ConfigDigest, err +} + +// RPC call to read config from config store contract +func (cp *configPoller) callReadConfigFromStore(ctx context.Context) (cfg ocrtypes.ContractConfig, err error) { + _, configDigest, err := cp.LatestConfigDetails(ctx) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.aggregatorContractAddr.Hex()).Inc() + return cfg, fmt.Errorf("failed to get latest config details: %w", err) + } + if configDigest == (ocrtypes.ConfigDigest{}) { + return cfg, fmt.Errorf("config details missing while trying to lookup config in store; no logs found for contract %s (chain %s)", cp.aggregatorContractAddr.Hex(), cp.client.ConfiguredChainID().String()) + } + + storedConfig, err := cp.configStoreContract.ReadConfig(&bind.CallOpts{ + Context: ctx, + }, configDigest) + if err != nil { + failedRPCContractCalls.WithLabelValues(cp.client.ConfiguredChainID().String(), cp.configStoreContractAddr.Hex()).Inc() + return cfg, fmt.Errorf("failed to read config from config store contract: %w", err) + } + + signers := make([]ocrtypes.OnchainPublicKey, len(storedConfig.Signers)) + for i := range signers { + signers[i] = storedConfig.Signers[i].Bytes() + } + transmitters := make([]ocrtypes.Account, len(storedConfig.Transmitters)) + for i := range transmitters { + transmitters[i] = ocrtypes.Account(storedConfig.Transmitters[i].Hex()) + } + + return ocrtypes.ContractConfig{ + ConfigDigest: configDigest, + ConfigCount: uint64(storedConfig.ConfigCount), + Signers: signers, + Transmitters: transmitters, + F: storedConfig.F, + OnchainConfig: storedConfig.OnchainConfig, + OffchainConfigVersion: storedConfig.OffchainConfigVersion, + OffchainConfig: storedConfig.OffchainConfig, + }, err +} diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go new file mode 100644 index 00000000..9f70591a --- /dev/null +++ b/core/services/relay/evm/config_poller_test.go @@ -0,0 +1,383 @@ +package evm + +import ( + "database/sql" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/goplugin/libocr/gethwrappers2/ocrconfigurationstoreevmsimple" + testoffchainaggregator2 "github.com/goplugin/libocr/gethwrappers2/testocr2aggregator" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestConfigPoller(t *testing.T) { + lggr := logger.TestLogger(t) + var ethClient *client.SimulatedBackendClient + var lp logpoller.LogPoller + var ocrAddress common.Address + var ocrContract *ocr2aggregator.OCR2Aggregator + var configStoreContractAddr common.Address + var configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple + var user *bind.TransactOpts + var b *backends.SimulatedBackend + var linkTokenAddress common.Address + var accessAddress common.Address + + ld := OCR2AggregatorLogDecoder + + { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + b = backends.NewSimulatedBackend(core.GenesisAlloc{ + user.From: {Balance: big.NewInt(1000000000000000000)}}, + 5*ethconfig.Defaults.Miner.GasCeil) + linkTokenAddress, _, _, err = link_token_interface.DeployLinkToken(user, b) + require.NoError(t, err) + accessAddress, _, _, err = testoffchainaggregator2.DeploySimpleWriteAccessController(user, b) + require.NoError(t, err, "failed to deploy test access controller contract") + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + configStoreContractAddr, _, configStoreContract, err = ocrconfigurationstoreevmsimple.DeployOCRConfigurationStoreEVMSimple(user, b) + require.NoError(t, err) + b.Commit() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(false) + ethClient = evmclient.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) + lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr, cfg) + lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) + servicetest.Run(t, lp) + } + + t.Run("LatestConfig errors if there is no config in logs and config store is unconfigured", func(t *testing.T) { + cp, err := NewConfigPoller(lggr, CPConfig{ethClient, lp, ocrAddress, nil, ld}) + require.NoError(t, err) + + _, err = cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "no logs found for config on contract") + }) + + t.Run("happy path (with config store)", func(t *testing.T) { + cp, err := NewConfigPoller(lggr, CPConfig{ethClient, lp, ocrAddress, &configStoreContractAddr, ld}) + require.NoError(t, err) + // Should have no config to begin with. + _, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, ocrtypes2.ConfigDigest{}, configDigest) + // Should error because there are no logs for config at block 0 + _, err = cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + assert.Contains(t, err.Error(), "config details missing while trying to lookup config in store") + + // Set the config + contractConfig := setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + b.Commit() + latest, err := b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // Ensure we capture this config set log. + require.NoError(t, lp.Replay(testutils.Context(t), latest.Number().Int64()-1)) + + // Send blocks until we see the config updated. + var configBlock uint64 + var digest [32]byte + gomega.NewGomegaWithT(t).Eventually(func() bool { + b.Commit() + configBlock, digest, err = cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + return ocrtypes2.ConfigDigest{} != digest + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the config returned is the one we configured. + newConfig, err := cp.LatestConfig(testutils.Context(t), configBlock) + require.NoError(t, err) + // Note we don't check onchainConfig, as that is populated in the contract itself. + assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, contractConfig.Signers, newConfig.Signers) + assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) + assert.Equal(t, contractConfig.F, newConfig.F) + assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + }) + + { + var err error + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + b.Commit() + } + + t.Run("LatestConfigDetails, when logs have been pruned and config store contract is configured", func(t *testing.T) { + // Give it a log poller that will never return logs + mp := new(mocks.LogPoller) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) + + t.Run("if callLatestConfigDetails succeeds", func(t *testing.T) { + cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) + require.NoError(t, err) + + t.Run("when config has not been set, returns zero values", func(t *testing.T) { + changedInBlock, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, 0, int(changedInBlock)) + assert.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + }) + t.Run("when config has been set, returns config details", func(t *testing.T) { + setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + b.Commit() + + changedInBlock, configDigest, err := cp.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + + latest, err := b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + + onchainDetails, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + assert.Equal(t, latest.Number().Int64(), int64(changedInBlock)) + assert.Equal(t, onchainDetails.ConfigDigest, [32]byte(configDigest)) + }) + }) + t.Run("returns error if callLatestConfigDetails fails", func(t *testing.T) { + failingClient := new(evmClientMocks.Client) + failingClient.On("ConfiguredChainID").Return(big.NewInt(42)) + failingClient.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("something exploded")) + cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) + require.NoError(t, err) + + cp.configStoreContractAddr = &configStoreContractAddr + cp.configStoreContract = configStoreContract + + _, _, err = cp.LatestConfigDetails(testutils.Context(t)) + assert.EqualError(t, err, "something exploded") + + failingClient.AssertExpectations(t) + }) + }) + + { + var err error + // deploy it again to reset to empty config + ocrAddress, _, ocrContract, err = ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + b.Commit() + } + + t.Run("LatestConfig, when logs have been pruned and config store contract is configured", func(t *testing.T) { + // Give it a log poller that will never return logs + mp := mocks.NewLogPoller(t) + mp.On("RegisterFilter", mock.Anything).Return(nil) + mp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mp.On("LatestLogByEventSigWithConfs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, sql.ErrNoRows) + + t.Run("if callReadConfig succeeds", func(t *testing.T) { + cp, err := newConfigPoller(lggr, ethClient, mp, ocrAddress, &configStoreContractAddr, ld) + require.NoError(t, err) + + t.Run("when config has not been set, returns error", func(t *testing.T) { + _, err := cp.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + + assert.Contains(t, err.Error(), "config details missing while trying to lookup config in store") + }) + t.Run("when config has been set, returns config", func(t *testing.T) { + b.Commit() + onchainDetails, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + contractConfig := setConfig(t, median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 0, + AlphaAcceptInfinite: true, + AlphaAcceptPPB: 0, + DeltaC: 10, + }, ocrContract, user) + + signerAddresses, err := OnchainPublicKeyToAddress(contractConfig.Signers) + require.NoError(t, err) + transmitterAddresses, err := AccountToAddress(contractConfig.Transmitters) + require.NoError(t, err) + + configuration := ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + OnchainConfig: contractConfig.OnchainConfig, + OffchainConfig: contractConfig.OffchainConfig, + ContractAddress: ocrAddress, + OffchainConfigVersion: contractConfig.OffchainConfigVersion, + ConfigCount: 1, + F: contractConfig.F, + } + + addConfig(t, user, configStoreContract, configuration) + + b.Commit() + onchainDetails, err = ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + + newConfig, err := cp.LatestConfig(testutils.Context(t), 0) + require.NoError(t, err) + + assert.Equal(t, onchainDetails.ConfigDigest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, contractConfig.Signers, newConfig.Signers) + assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) + assert.Equal(t, contractConfig.F, newConfig.F) + assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + }) + }) + t.Run("returns error if callReadConfig fails", func(t *testing.T) { + failingClient := new(evmClientMocks.Client) + failingClient.On("ConfiguredChainID").Return(big.NewInt(42)) + failingClient.On("CallContract", mock.Anything, mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + // initial call to retrieve config store address from aggregator + return *callArgs.To == ocrAddress + }), mock.Anything).Return(nil, errors.New("something exploded")).Once() + cp, err := newConfigPoller(lggr, failingClient, mp, ocrAddress, &configStoreContractAddr, ld) + require.NoError(t, err) + + _, err = cp.LatestConfig(testutils.Context(t), 0) + assert.EqualError(t, err, "failed to get latest config details: something exploded") + + failingClient.AssertExpectations(t) + }) + }) +} + +func setConfig(t *testing.T, pluginConfig median.OffchainConfig, ocrContract *ocr2aggregator.OCR2Aggregator, user *bind.TransactOpts) ocrtypes2.ContractConfig { + // Create minimum number of nodes. + var oracles []confighelper2.OracleIdentityExtra + for i := 0; i < 4; i++ { + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: evmutils.RandomAddress().Bytes(), + TransmitAccount: ocrtypes2.Account(evmutils.RandomAddress().Hex()), + OffchainPublicKey: evmutils.RandomBytes32(), + PeerID: utils.MustNewPeerID(), + }, + ConfigEncryptionPublicKey: evmutils.RandomBytes32(), + }) + } + // Gnerate OnchainConfig + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(big.NewInt(0), big.NewInt(10)) + require.NoError(t, err) + // Change the offramp config + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 2*time.Second, // deltaProgress + 1*time.Second, // deltaResend + 1*time.Second, // deltaRound + 500*time.Millisecond, // deltaGrace + 2*time.Second, // deltaStage + 3, + []int{1, 1, 1, 1}, + oracles, + pluginConfig.Encode(), + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 1, // faults + onchainConfig, + ) + require.NoError(t, err) + signerAddresses, err := OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := AccountToAddress(transmitters) + require.NoError(t, err) + _, err = ocrContract.SetConfig(user, signerAddresses, transmitterAddresses, threshold, onchainConfig, offchainConfigVersion, offchainConfig) + require.NoError(t, err) + return ocrtypes2.ContractConfig{ + Signers: signers, + Transmitters: transmitters, + F: threshold, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } +} + +func addConfig(t *testing.T, user *bind.TransactOpts, configStoreContract *ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimple, config ocrconfigurationstoreevmsimple.OCRConfigurationStoreEVMSimpleConfigurationEVMSimple) { + + _, err := configStoreContract.AddConfig(user, config) + require.NoError(t, err) +} diff --git a/core/services/relay/evm/contract_transmitter.go b/core/services/relay/evm/contract_transmitter.go new file mode 100644 index 00000000..6f00ec07 --- /dev/null +++ b/core/services/relay/evm/contract_transmitter.go @@ -0,0 +1,209 @@ +package evm + +import ( + "context" + "database/sql" + "encoding/hex" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type ContractTransmitter interface { + services.ServiceCtx + ocrtypes.ContractTransmitter +} + +var _ ContractTransmitter = &contractTransmitter{} + +type Transmitter interface { + CreateEthTransaction(ctx context.Context, toAddress gethcommon.Address, payload []byte, txMeta *txmgr.TxMeta) error + FromAddress() gethcommon.Address +} + +type ReportToEthMetadata func([]byte) (*txmgr.TxMeta, error) + +func reportToEvmTxMetaNoop([]byte) (*txmgr.TxMeta, error) { + return nil, nil +} + +type contractTransmitter struct { + contractAddress gethcommon.Address + contractABI abi.ABI + transmitter Transmitter + transmittedEventSig common.Hash + contractReader contractReader + lp logpoller.LogPoller + lggr logger.Logger + reportToEvmTxMeta ReportToEthMetadata +} + +func transmitterFilterName(addr common.Address) string { + return logpoller.FilterName("OCR ContractTransmitter", addr.String()) +} + +func NewOCRContractTransmitter( + address gethcommon.Address, + caller contractReader, + contractABI abi.ABI, + transmitter Transmitter, + lp logpoller.LogPoller, + lggr logger.Logger, + reportToEvmTxMeta ReportToEthMetadata, +) (*contractTransmitter, error) { + transmitted, ok := contractABI.Events["Transmitted"] + if !ok { + return nil, errors.New("invalid ABI, missing transmitted") + } + + err := lp.RegisterFilter(logpoller.Filter{Name: transmitterFilterName(address), EventSigs: []common.Hash{transmitted.ID}, Addresses: []common.Address{address}}) + if err != nil { + return nil, err + } + if reportToEvmTxMeta == nil { + reportToEvmTxMeta = reportToEvmTxMetaNoop + } + return &contractTransmitter{ + contractAddress: address, + contractABI: contractABI, + transmitter: transmitter, + transmittedEventSig: transmitted.ID, + lp: lp, + contractReader: caller, + lggr: lggr.Named("OCRContractTransmitter"), + reportToEvmTxMeta: reportToEvmTxMeta, + }, nil +} + +// Transmit sends the report to the on-chain smart contract's Transmit method. +func (oc *contractTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signatures []ocrtypes.AttributedOnchainSignature) error { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + if len(signatures) > 32 { + return errors.New("too many signatures, maximum is 32") + } + for i, as := range signatures { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(reportCtx) + + txMeta, err := oc.reportToEvmTxMeta(report) + if err != nil { + oc.lggr.Warnw("failed to generate tx metadata for report", "err", err) + } + + oc.lggr.Debugw("Transmitting report", "report", hex.EncodeToString(report), "rawReportCtx", rawReportCtx, "contractAddress", oc.contractAddress, "txMeta", txMeta) + + payload, err := oc.contractABI.Pack("transmit", rawReportCtx, []byte(report), rs, ss, vs) + if err != nil { + return errors.Wrap(err, "abi.Pack failed") + } + + return errors.Wrap(oc.transmitter.CreateEthTransaction(ctx, oc.contractAddress, payload, txMeta), "failed to send Eth transaction") +} + +type contractReader interface { + CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} + +func parseTransmitted(log []byte) ([32]byte, uint32, error) { + var args abi.Arguments = []abi.Argument{ + { + Name: "configDigest", + Type: utils.MustAbiType("bytes32", nil), + }, + { + Name: "epoch", + Type: utils.MustAbiType("uint32", nil), + }, + } + transmitted, err := args.Unpack(log) + if err != nil { + return [32]byte{}, 0, err + } + if len(transmitted) < 2 { + return [32]byte{}, 0, errors.New("transmitted event log has too few arguments") + } + configDigest := *abi.ConvertType(transmitted[0], new([32]byte)).(*[32]byte) + epoch := *abi.ConvertType(transmitted[1], new(uint32)).(*uint32) + return configDigest, epoch, err +} + +func callContract(ctx context.Context, addr common.Address, contractABI abi.ABI, method string, args []interface{}, caller contractReader) ([]interface{}, error) { + input, err := contractABI.Pack(method, args...) + if err != nil { + return nil, err + } + output, err := caller.CallContract(ctx, ethereum.CallMsg{To: &addr, Data: input}, nil) + if err != nil { + return nil, err + } + return contractABI.Unpack(method, output) +} + +// LatestConfigDigestAndEpoch retrieves the latest config digest and epoch from the OCR2 contract. +// It is plugin independent, in particular avoids use of the plugin specific generated evm wrappers +// by using the evm client Call directly for functions/events that are part of OCR2Abstract. +func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (ocrtypes.ConfigDigest, uint32, error) { + latestConfigDigestAndEpoch, err := callContract(ctx, oc.contractAddress, oc.contractABI, "latestConfigDigestAndEpoch", nil, oc.contractReader) + if err != nil { + return ocrtypes.ConfigDigest{}, 0, err + } + // Panic on these conversions erroring, would mean a broken contract. + scanLogs := *abi.ConvertType(latestConfigDigestAndEpoch[0], new(bool)).(*bool) + configDigest := *abi.ConvertType(latestConfigDigestAndEpoch[1], new([32]byte)).(*[32]byte) + epoch := *abi.ConvertType(latestConfigDigestAndEpoch[2], new(uint32)).(*uint32) + if !scanLogs { + return configDigest, epoch, nil + } + + // Otherwise, we have to scan for the logs. + if err != nil { + return ocrtypes.ConfigDigest{}, 0, err + } + latest, err := oc.lp.LatestLogByEventSigWithConfs( + oc.transmittedEventSig, oc.contractAddress, 1, pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // No transmissions yet + return configDigest, 0, nil + } + return ocrtypes.ConfigDigest{}, 0, err + } + return parseTransmitted(latest.Data) +} + +// FromAccount returns the account from which the transmitter invokes the contract +func (oc *contractTransmitter) FromAccount() (ocrtypes.Account, error) { + return ocrtypes.Account(oc.transmitter.FromAddress().String()), nil +} + +func (oc *contractTransmitter) Start(ctx context.Context) error { return nil } +func (oc *contractTransmitter) Close() error { return nil } + +// Has no state/lifecycle so it's always healthy and ready +func (oc *contractTransmitter) Ready() error { return nil } +func (oc *contractTransmitter) HealthReport() map[string]error { + return map[string]error{oc.Name(): nil} +} +func (oc *contractTransmitter) Name() string { return oc.lggr.Name() } diff --git a/core/services/relay/evm/contract_transmitter_test.go b/core/services/relay/evm/contract_transmitter_test.go new file mode 100644 index 00000000..eed80d26 --- /dev/null +++ b/core/services/relay/evm/contract_transmitter_test.go @@ -0,0 +1,76 @@ +package evm + +import ( + "context" + "encoding/hex" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + lpmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var sampleAddress = testutils.NewAddress() + +type mockTransmitter struct{} + +func (mockTransmitter) CreateEthTransaction(ctx context.Context, toAddress gethcommon.Address, payload []byte, _ *txmgr.TxMeta) error { + return nil +} +func (mockTransmitter) FromAddress() gethcommon.Address { return sampleAddress } + +func TestContractTransmitter(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + c := evmclimocks.NewClient(t) + lp := lpmocks.NewLogPoller(t) + // scanLogs = false + digestAndEpochDontScanLogs, _ := hex.DecodeString( + "0000000000000000000000000000000000000000000000000000000000000000" + // false + "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc776" + // config digest + "0000000000000000000000000000000000000000000000000000000000000002") // epoch + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochDontScanLogs, nil).Once() + contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + lp.On("RegisterFilter", mock.Anything).Return(nil) + ot, err := NewOCRContractTransmitter(gethcommon.Address{}, c, contractABI, mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { + return &txmgr.TxMeta{}, nil + }) + require.NoError(t, err) + digest, epoch, err := ot.LatestConfigDigestAndEpoch(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc776", hex.EncodeToString(digest[:])) + assert.Equal(t, uint32(2), epoch) + + // scanLogs = true + digestAndEpochScanLogs, _ := hex.DecodeString( + "0000000000000000000000000000000000000000000000000000000000000001" + // true + "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc776" + // config digest + "0000000000000000000000000000000000000000000000000000000000000002") // epoch + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochScanLogs, nil).Once() + transmitted2, _ := hex.DecodeString( + "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc777" + // config digest + "0000000000000000000000000000000000000000000000000000000000000002") // epoch + lp.On("LatestLogByEventSigWithConfs", + mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&logpoller.Log{ + Data: transmitted2, + }, nil) + digest, epoch, err = ot.LatestConfigDigestAndEpoch(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc777", hex.EncodeToString(digest[:])) + assert.Equal(t, uint32(2), epoch) + from, err := ot.FromAccount() + require.NoError(t, err) + assert.Equal(t, sampleAddress.String(), string(from)) +} diff --git a/core/services/relay/evm/decoder.go b/core/services/relay/evm/decoder.go new file mode 100644 index 00000000..42bb9b61 --- /dev/null +++ b/core/services/relay/evm/decoder.go @@ -0,0 +1,102 @@ +package evm + +import ( + "context" + "fmt" + "reflect" + + "github.com/mitchellh/mapstructure" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type decoder struct { + Definitions map[string]types.CodecEntry +} + +var _ commontypes.Decoder = &decoder{} + +func (m *decoder) Decode(_ context.Context, raw []byte, into any, itemType string) error { + info, ok := m.Definitions[itemType] + if !ok { + return fmt.Errorf("%w: cannot find definition for %s", commontypes.ErrInvalidType, itemType) + } + + decode, err := extractDecoding(info, raw) + if err != nil { + return err + } + + rDecode := reflect.ValueOf(decode) + switch rDecode.Kind() { + case reflect.Array: + return m.decodeArray(into, rDecode) + case reflect.Slice: + iInto := reflect.Indirect(reflect.ValueOf(into)) + length := rDecode.Len() + iInto.Set(reflect.MakeSlice(iInto.Type(), length, length)) + return setElements(length, rDecode, iInto) + default: + return mapstructureDecode(decode, into) + } +} + +func (m *decoder) decodeArray(into any, rDecode reflect.Value) error { + iInto := reflect.Indirect(reflect.ValueOf(into)) + length := rDecode.Len() + if length != iInto.Len() { + return commontypes.ErrSliceWrongLen + } + iInto.Set(reflect.New(iInto.Type()).Elem()) + return setElements(length, rDecode, iInto) +} + +func (m *decoder) GetMaxDecodingSize(_ context.Context, n int, itemType string) (int, error) { + entry, ok := m.Definitions[itemType] + if !ok { + return 0, fmt.Errorf("%w: nil entry", commontypes.ErrInvalidType) + } + return entry.GetMaxSize(n) +} + +func extractDecoding(info types.CodecEntry, raw []byte) (any, error) { + unpacked := map[string]any{} + args := info.Args() + if err := args.UnpackIntoMap(unpacked, raw); err != nil { + return nil, fmt.Errorf("%w: %w: for args %#v", commontypes.ErrInvalidEncoding, err, args) + } + var decode any = unpacked + + if noName, ok := unpacked[""]; ok { + decode = noName + } + return decode, nil +} + +func setElements(length int, rDecode reflect.Value, iInto reflect.Value) error { + for i := 0; i < length; i++ { + if err := mapstructureDecode(rDecode.Index(i).Interface(), iInto.Index(i).Addr().Interface()); err != nil { + return err + } + } + + return nil +} + +func mapstructureDecode(src, dest any) error { + mDecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc(evmDecoderHooks...), + Result: dest, + Squash: true, + }) + if err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInvalidType, err) + } + + if err = mDecoder.Decode(src); err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInvalidType, err) + } + return nil +} diff --git a/core/services/relay/evm/encoder.go b/core/services/relay/evm/encoder.go new file mode 100644 index 00000000..bc52a459 --- /dev/null +++ b/core/services/relay/evm/encoder.go @@ -0,0 +1,145 @@ +package evm + +import ( + "context" + "fmt" + "reflect" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type encoder struct { + Definitions map[string]types.CodecEntry +} + +var _ commontypes.Encoder = &encoder{} + +func (e *encoder) Encode(_ context.Context, item any, itemType string) (res []byte, err error) { + // nil values can cause abi.Arguments.Pack to panic. + defer func() { + if r := recover(); r != nil { + res = nil + err = fmt.Errorf("%w: cannot encode type", commontypes.ErrInvalidType) + } + }() + info, ok := e.Definitions[itemType] + if !ok { + return nil, fmt.Errorf("%w: cannot find definition for %s", commontypes.ErrInvalidType, itemType) + } + + if len(info.Args()) == 0 { + return info.EncodingPrefix(), nil + } else if item == nil { + return nil, fmt.Errorf("%w: cannot encode nil value for %s", commontypes.ErrInvalidType, itemType) + } + + return encode(reflect.ValueOf(item), info) +} + +func (e *encoder) GetMaxEncodingSize(_ context.Context, n int, itemType string) (int, error) { + entry, ok := e.Definitions[itemType] + if !ok { + return 0, fmt.Errorf("%w: nil entry", commontypes.ErrInvalidType) + } + return entry.GetMaxSize(n) +} + +func encode(item reflect.Value, info types.CodecEntry) ([]byte, error) { + for item.Kind() == reflect.Pointer { + item = reflect.Indirect(item) + } + switch item.Kind() { + case reflect.Array, reflect.Slice: + native, err := representArray(item, info) + if err != nil { + return nil, err + } + return pack(info, native) + case reflect.Struct, reflect.Map: + values, err := unrollItem(item, info) + if err != nil { + return nil, err + } + return pack(info, values...) + default: + return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, item.Kind()) + } +} + +func representArray(item reflect.Value, info types.CodecEntry) (any, error) { + length := item.Len() + checkedType := info.CheckedType() + checked := reflect.New(checkedType) + iChecked := reflect.Indirect(checked) + switch checkedType.Kind() { + case reflect.Array: + if checkedType.Len() != length { + return nil, commontypes.ErrSliceWrongLen + } + case reflect.Slice: + iChecked.Set(reflect.MakeSlice(checkedType, length, length)) + default: + return nil, fmt.Errorf("%w: cannot encode %v as array", commontypes.ErrInvalidType, checkedType.Kind()) + } + + checkedElm := checkedType.Elem() + for i := 0; i < length; i++ { + tmp := reflect.New(checkedElm) + if err := mapstructureDecode(item.Index(i).Interface(), tmp.Interface()); err != nil { + return nil, err + } + iChecked.Index(i).Set(tmp.Elem()) + } + native, err := info.ToNative(checked) + if err != nil { + return nil, err + } + + return native.Elem().Interface(), nil +} + +func unrollItem(item reflect.Value, info types.CodecEntry) ([]any, error) { + checkedType := info.CheckedType() + if item.CanAddr() { + item = item.Addr() + } + + if item.Type() == reflect.PointerTo(checkedType) { + var err error + if item, err = info.ToNative(item); err != nil { + return nil, err + } + } else if !info.IsNativePointer(item.Type()) { + var err error + checked := reflect.New(checkedType) + if err = mapstructureDecode(item.Interface(), checked.Interface()); err != nil { + return nil, err + } + if item, err = info.ToNative(checked); err != nil { + return nil, err + } + } + + item = reflect.Indirect(item) + length := item.NumField() + values := make([]any, length) + iType := item.Type() + for i := 0; i < length; i++ { + if iType.Field(i).IsExported() { + values[i] = item.Field(i).Interface() + } + } + return values, nil +} + +func pack(info types.CodecEntry, values ...any) ([]byte, error) { + bytes, err := info.Args().Pack(values...) + if err != nil { + return nil, fmt.Errorf("%w: %w", commontypes.ErrInvalidType, err) + } + + withPrefix := info.EncodingPrefix() + return append(withPrefix, bytes...), nil +} diff --git a/core/services/relay/evm/event_binding.go b/core/services/relay/evm/event_binding.go new file mode 100644 index 00000000..527eb021 --- /dev/null +++ b/core/services/relay/evm/event_binding.go @@ -0,0 +1,291 @@ +package evm + +import ( + "context" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/codec" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type eventBinding struct { + address common.Address + contractName string + eventName string + lp logpoller.LogPoller + hash common.Hash + codec commontypes.RemoteCodec + pending bool + bound bool + registerCalled bool + lock sync.Mutex + inputInfo types.CodecEntry + inputModifier codec.Modifier + topicInfo types.CodecEntry + // used to allow Register and Unregister to be unique in case two bindings have the same event. + // otherwise, if one unregisters, it'll unregister both with the LogPoller. + id string +} + +var _ readBinding = &eventBinding{} + +func (e *eventBinding) SetCodec(codec commontypes.RemoteCodec) { + e.codec = codec +} + +func (e *eventBinding) Register() error { + e.lock.Lock() + defer e.lock.Unlock() + + e.registerCalled = true + if !e.bound || e.lp.HasFilter(e.id) { + return nil + } + + if err := e.lp.RegisterFilter(logpoller.Filter{ + Name: e.id, + EventSigs: evmtypes.HashArray{e.hash}, + Addresses: evmtypes.AddressArray{e.address}, + }); err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) + } + return nil +} + +func (e *eventBinding) Unregister() error { + e.lock.Lock() + defer e.lock.Unlock() + + if !e.lp.HasFilter(e.id) { + return nil + } + + if err := e.lp.UnregisterFilter(e.id); err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) + } + return nil +} + +func (e *eventBinding) GetLatestValue(ctx context.Context, params, into any) error { + if !e.bound { + return fmt.Errorf("%w: event not bound", commontypes.ErrInvalidType) + } + + confs := logpoller.Finalized + if e.pending { + confs = logpoller.Unconfirmed + } + + if len(e.inputInfo.Args()) == 0 { + return e.getLatestValueWithoutFilters(ctx, confs, into) + } + + return e.getLatestValueWithFilters(ctx, confs, params, into) +} + +func (e *eventBinding) Bind(binding commontypes.BoundContract) error { + if err := e.Unregister(); err != nil { + return err + } + + e.address = common.HexToAddress(binding.Address) + e.pending = binding.Pending + e.bound = true + + if e.registerCalled { + return e.Register() + } + return nil +} + +func (e *eventBinding) getLatestValueWithoutFilters(ctx context.Context, confs logpoller.Confirmations, into any) error { + log, err := e.lp.LatestLogByEventSigWithConfs(e.hash, e.address, confs) + if err = wrapInternalErr(err); err != nil { + return err + } + + return e.decodeLog(ctx, log, into) +} + +func (e *eventBinding) getLatestValueWithFilters( + ctx context.Context, confs logpoller.Confirmations, params, into any) error { + offChain, err := e.convertToOffChainType(params) + if err != nil { + return err + } + + checkedParams, err := e.inputModifier.TransformToOnChain(offChain, "" /* unused */) + if err != nil { + return err + } + + nativeParams, err := e.inputInfo.ToNative(reflect.ValueOf(checkedParams)) + if err != nil { + return err + } + + filtersAndIndices, err := e.encodeParams(nativeParams) + if err != nil { + return err + } + + fai := filtersAndIndices[0] + remainingFilters := filtersAndIndices[1:] + + logs, err := e.lp.IndexedLogs(e.hash, e.address, 1, []common.Hash{fai}, confs) + if err != nil { + return wrapInternalErr(err) + } + + // TODO: there should be a better way to ask log poller to filter these + // First, you should be able to ask for as many topics to match + // Second, you should be able to get the latest only + var logToUse *logpoller.Log + for _, log := range logs { + tmp := log + if compareLogs(&tmp, logToUse) > 0 && matchesRemainingFilters(&tmp, remainingFilters) { + // copy so that it's not pointing to the changing variable + logToUse = &tmp + } + } + + if logToUse == nil { + return fmt.Errorf("%w: no events found", commontypes.ErrNotFound) + } + + return e.decodeLog(ctx, logToUse, into) +} + +func (e *eventBinding) convertToOffChainType(params any) (any, error) { + itemType := wrapItemType(e.contractName, e.eventName, true) + offChain, err := e.codec.CreateType(itemType, true) + if err != nil { + return nil, err + } + + if err = mapstructureDecode(params, offChain); err != nil { + return nil, err + } + + return offChain, nil +} + +func compareLogs(log, use *logpoller.Log) int64 { + if use == nil { + return 1 + } + + if log.BlockNumber != use.BlockNumber { + return log.BlockNumber - use.BlockNumber + } + + return log.LogIndex - use.LogIndex +} + +func matchesRemainingFilters(log *logpoller.Log, filters []common.Hash) bool { + for i, rfai := range filters { + if !reflect.DeepEqual(rfai[:], log.Topics[i+2]) { + return false + } + } + + return true +} + +func (e *eventBinding) encodeParams(item reflect.Value) ([]common.Hash, error) { + for item.Kind() == reflect.Pointer { + item = reflect.Indirect(item) + } + + var topics []any + switch item.Kind() { + case reflect.Array, reflect.Slice: + native, err := representArray(item, e.inputInfo) + if err != nil { + return nil, err + } + topics = []any{native} + case reflect.Struct, reflect.Map: + var err error + if topics, err = unrollItem(item, e.inputInfo); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, item.Kind()) + } + + // abi params allow you to Pack a pointers, but MakeTopics doesn't work with pointers. + if err := e.derefTopics(topics); err != nil { + return nil, err + } + + hashes, err := abi.MakeTopics(topics) + if err != nil { + return nil, wrapInternalErr(err) + } + + if len(hashes) != 1 { + return nil, fmt.Errorf("%w: expected 1 filter set, got %d", commontypes.ErrInternal, len(hashes)) + } + + return hashes[0], nil +} + +func (e *eventBinding) derefTopics(topics []any) error { + for i, topic := range topics { + rTopic := reflect.ValueOf(topic) + if rTopic.Kind() == reflect.Pointer { + if rTopic.IsNil() { + return fmt.Errorf( + "%w: input topic %s cannot be nil", commontypes.ErrInvalidType, e.inputInfo.Args()[i].Name) + } + topics[i] = rTopic.Elem().Interface() + } + } + return nil +} + +func (e *eventBinding) decodeLog(ctx context.Context, log *logpoller.Log, into any) error { + dataType := wrapItemType(e.contractName, e.eventName, false) + if err := e.codec.Decode(ctx, log.Data, into, dataType); err != nil { + return err + } + + topics := make([]common.Hash, len(e.topicInfo.Args())) + if len(log.Topics) < len(topics)+1 { + return fmt.Errorf("%w: not enough topics to decode", commontypes.ErrInvalidType) + } + + for i := 0; i < len(topics); i++ { + topics[i] = common.Hash(log.Topics[i+1]) + } + + topicsInto := map[string]any{} + if err := abi.ParseTopicsIntoMap(topicsInto, e.topicInfo.Args(), topics); err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInvalidType, err) + } + + return mapstructureDecode(topicsInto, into) +} + +func wrapInternalErr(err error) error { + if err == nil { + return nil + } + + errStr := err.Error() + if strings.Contains(errStr, "not found") || strings.Contains(errStr, "no rows") { + return fmt.Errorf("%w: %w", commontypes.ErrNotFound, err) + } + return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) +} diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go new file mode 100644 index 00000000..9be00115 --- /dev/null +++ b/core/services/relay/evm/evm.go @@ -0,0 +1,593 @@ +package evm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + pkgerrors "github.com/pkg/errors" + "golang.org/x/exp/maps" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median/evmreportcodec" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txm "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + mercuryconfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/mercury/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reportcodecv1 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/reportcodec" + reportcodecv2 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/reportcodec" + reportcodecv3 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/reportcodec" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +var ( + OCR2AggregatorTransmissionContractABI abi.ABI + OCR2AggregatorLogDecoder LogDecoder +) + +func init() { + var err error + OCR2AggregatorTransmissionContractABI, err = abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorMetaData.ABI)) + if err != nil { + panic(err) + } + OCR2AggregatorLogDecoder, err = newOCR2AggregatorLogDecoder() + if err != nil { + panic(err) + } +} + +var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck + +type Relayer struct { + db *sqlx.DB + chain legacyevm.Chain + lggr logger.Logger + ks CSAETHKeystore + mercuryPool wsrpc.Pool + pgCfg pg.QConfig + chainReader commontypes.ChainReader + codec commontypes.Codec +} + +type CSAETHKeystore interface { + CSA() keystore.CSA + Eth() keystore.Eth +} + +type RelayerOpts struct { + *sqlx.DB + pg.QConfig + CSAETHKeystore + MercuryPool wsrpc.Pool +} + +func (c RelayerOpts) Validate() error { + var err error + if c.DB == nil { + err = errors.Join(err, errors.New("nil DB")) + } + if c.QConfig == nil { + err = errors.Join(err, errors.New("nil QConfig")) + } + if c.CSAETHKeystore == nil { + err = errors.Join(err, errors.New("nil Keystore")) + } + + if err != nil { + err = fmt.Errorf("invalid RelayerOpts: %w", err) + } + return err +} + +func NewRelayer(lggr logger.Logger, chain legacyevm.Chain, opts RelayerOpts) (*Relayer, error) { + err := opts.Validate() + if err != nil { + return nil, fmt.Errorf("cannot create evm relayer: %w", err) + } + lggr = lggr.Named("Relayer") + return &Relayer{ + db: opts.DB, + chain: chain, + lggr: lggr, + ks: opts.CSAETHKeystore, + mercuryPool: opts.MercuryPool, + pgCfg: opts.QConfig, + }, nil +} + +func (r *Relayer) Name() string { + return r.lggr.Name() +} + +// Start does noop: no subservices started on relay start, but when the first job is started +func (r *Relayer) Start(context.Context) error { + return nil +} + +func (r *Relayer) Close() error { + return nil +} + +// Ready does noop: always ready +func (r *Relayer) Ready() error { + return r.chain.Ready() +} + +func (r *Relayer) HealthReport() (report map[string]error) { + report = make(map[string]error) + maps.Copy(report, r.chain.HealthReport()) + return +} + +func (r *Relayer) NewPluginProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.PluginProvider, error) { + lggr := r.lggr.Named("PluginProvider").Named(rargs.ExternalJobID.String()) + + configWatcher, err := newStandardConfigProvider(r.lggr, r.chain, types.NewRelayOpts(rargs)) + if err != nil { + return nil, err + } + + transmitter, err := newOnChainContractTransmitter(r.lggr, rargs, pargs.TransmitterID, r.ks.Eth(), configWatcher, configTransmitterOpts{}, OCR2AggregatorTransmissionContractABI) + if err != nil { + return nil, err + } + + return NewPluginProvider( + r.chainReader, + r.codec, + transmitter, + configWatcher, + lggr, + ), nil +} + +func (r *Relayer) NewMercuryProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.MercuryProvider, error) { + lggr := r.lggr.Named("MercuryProvider").Named(rargs.ExternalJobID.String()) + relayOpts := types.NewRelayOpts(rargs) + relayConfig, err := relayOpts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + + var mercuryConfig mercuryconfig.PluginConfig + if err = json.Unmarshal(pargs.PluginConfig, &mercuryConfig); err != nil { + return nil, pkgerrors.WithStack(err) + } + + if relayConfig.FeedID == nil { + return nil, pkgerrors.New("FeedID must be specified") + } + feedID := mercuryutils.FeedID(*relayConfig.FeedID) + + if relayConfig.ChainID.String() != r.chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) + } + cp, err := newMercuryConfigProvider(lggr, r.chain, relayOpts) + if err != nil { + return nil, pkgerrors.WithStack(err) + } + + if !relayConfig.EffectiveTransmitterID.Valid { + return nil, pkgerrors.New("EffectiveTransmitterID must be specified") + } + privKey, err := r.ks.CSA().Get(relayConfig.EffectiveTransmitterID.String) + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to get CSA key for mercury connection") + } + + client, err := r.mercuryPool.Checkout(context.Background(), privKey, mercuryConfig.ServerPubKey, mercuryConfig.ServerURL()) + if err != nil { + return nil, err + } + + // FIXME: We actually know the version here since it's in the feed ID, can + // we use generics to avoid passing three of this? + // https://smartcontract-it.atlassian.net/browse/MERC-1414 + reportCodecV1 := reportcodecv1.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV1")) + reportCodecV2 := reportcodecv2.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV2")) + reportCodecV3 := reportcodecv3.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV3")) + + var transmitterCodec mercury.TransmitterReportDecoder + switch feedID.Version() { + case 1: + transmitterCodec = reportCodecV1 + case 2: + transmitterCodec = reportCodecV2 + case 3: + transmitterCodec = reportCodecV3 + default: + return nil, fmt.Errorf("invalid feed version %d", feedID.Version()) + } + transmitter := mercury.NewTransmitter(lggr, client, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.db, r.pgCfg, transmitterCodec) + + return NewMercuryProvider(cp, r.chainReader, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, lggr), nil +} + +func (r *Relayer) NewLLOProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) { + return nil, errors.New("not implemented") +} + +func (r *Relayer) NewFunctionsProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.FunctionsProvider, error) { + lggr := r.lggr.Named("FunctionsProvider").Named(rargs.ExternalJobID.String()) + // TODO(FUN-668): Not ready yet (doesn't implement FunctionsEvents() properly) + return NewFunctionsProvider(r.chain, rargs, pargs, lggr, r.ks.Eth(), functions.FunctionsPlugin) +} + +// NewConfigProvider is called by bootstrap jobs +func (r *Relayer) NewConfigProvider(args commontypes.RelayArgs) (configProvider commontypes.ConfigProvider, err error) { + lggr := r.lggr.Named("ConfigProvider").Named(args.ExternalJobID.String()) + relayOpts := types.NewRelayOpts(args) + relayConfig, err := relayOpts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + expectedChainID := relayConfig.ChainID.String() + if expectedChainID != r.chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) + } + + // Handle legacy jobs which did not yet specify provider type and + // switched between median/mercury based on presence of feed ID + if args.ProviderType == "" { + if relayConfig.FeedID == nil { + args.ProviderType = "median" + } else { + args.ProviderType = "mercury" + } + } + + switch args.ProviderType { + case "median": + configProvider, err = newStandardConfigProvider(lggr, r.chain, relayOpts) + case "mercury": + configProvider, err = newMercuryConfigProvider(lggr, r.chain, relayOpts) + default: + return nil, fmt.Errorf("unrecognized provider type: %q", args.ProviderType) + } + if err != nil { + // Never return (*configProvider)(nil) + return nil, err + } + return configProvider, err +} + +func FilterNamesFromRelayArgs(args commontypes.RelayArgs) (filterNames []string, err error) { + var addr ethkey.EIP55Address + if addr, err = ethkey.NewEIP55Address(args.ContractID); err != nil { + return nil, err + } + var relayConfig types.RelayConfig + if err = json.Unmarshal(args.RelayConfig, &relayConfig); err != nil { + return nil, pkgerrors.WithStack(err) + } + + if relayConfig.FeedID != nil { + filterNames = []string{mercury.FilterName(addr.Address(), *relayConfig.FeedID)} + } else { + filterNames = []string{configPollerFilterName(addr.Address()), transmitterFilterName(addr.Address())} + } + return filterNames, err +} + +type configWatcher struct { + services.StateMachine + lggr logger.Logger + contractAddress common.Address + offchainDigester ocrtypes.OffchainConfigDigester + configPoller types.ConfigPoller + chain legacyevm.Chain + runReplay bool + fromBlock uint64 + replayCtx context.Context + replayCancel context.CancelFunc + wg sync.WaitGroup +} + +func newConfigWatcher(lggr logger.Logger, + contractAddress common.Address, + offchainDigester ocrtypes.OffchainConfigDigester, + configPoller types.ConfigPoller, + chain legacyevm.Chain, + fromBlock uint64, + runReplay bool, +) *configWatcher { + replayCtx, replayCancel := context.WithCancel(context.Background()) + return &configWatcher{ + lggr: lggr.Named("ConfigWatcher").Named(contractAddress.String()), + contractAddress: contractAddress, + offchainDigester: offchainDigester, + configPoller: configPoller, + chain: chain, + runReplay: runReplay, + fromBlock: fromBlock, + replayCtx: replayCtx, + replayCancel: replayCancel, + } + +} + +func (c *configWatcher) Name() string { + return c.lggr.Name() +} + +func (c *configWatcher) Start(ctx context.Context) error { + return c.StartOnce(fmt.Sprintf("configWatcher %x", c.contractAddress), func() error { + if c.runReplay && c.fromBlock != 0 { + // Only replay if it's a brand runReplay job. + c.wg.Add(1) + go func() { + defer c.wg.Done() + c.lggr.Infow("starting replay for config", "fromBlock", c.fromBlock) + if err := c.configPoller.Replay(c.replayCtx, int64(c.fromBlock)); err != nil { + c.lggr.Errorf("error replaying for config", "err", err) + } else { + c.lggr.Infow("completed replaying for config", "fromBlock", c.fromBlock) + } + }() + } + c.configPoller.Start() + return nil + }) +} + +func (c *configWatcher) Close() error { + return c.StopOnce(fmt.Sprintf("configWatcher %x", c.contractAddress), func() error { + c.replayCancel() + c.wg.Wait() + return c.configPoller.Close() + }) +} + +func (c *configWatcher) HealthReport() map[string]error { + return map[string]error{c.Name(): c.Healthy()} +} + +func (c *configWatcher) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return c.offchainDigester +} + +func (c *configWatcher) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return c.configPoller +} + +type configTransmitterOpts struct { + // override the gas limit default provided in the config watcher + pluginGasLimit *uint32 +} + +func newOnChainContractTransmitter(lggr logger.Logger, rargs commontypes.RelayArgs, transmitterID string, ethKeystore keystore.Eth, configWatcher *configWatcher, opts configTransmitterOpts, transmissionContractABI abi.ABI) (*contractTransmitter, error) { + var relayConfig types.RelayConfig + if err := json.Unmarshal(rargs.RelayConfig, &relayConfig); err != nil { + return nil, err + } + var fromAddresses []common.Address + sendingKeys := relayConfig.SendingKeys + if !relayConfig.EffectiveTransmitterID.Valid { + return nil, pkgerrors.New("EffectiveTransmitterID must be specified") + } + effectiveTransmitterAddress := common.HexToAddress(relayConfig.EffectiveTransmitterID.String) + + sendingKeysLength := len(sendingKeys) + if sendingKeysLength == 0 { + return nil, pkgerrors.New("no sending keys provided") + } + + // If we are using multiple sending keys, then a forwarder is needed to rotate transmissions. + // Ensure that this forwarder is not set to a local sending key, and ensure our sending keys are enabled. + for _, s := range sendingKeys { + if sendingKeysLength > 1 && s == effectiveTransmitterAddress.String() { + return nil, pkgerrors.New("the transmitter is a local sending key with transaction forwarding enabled") + } + if err := ethKeystore.CheckEnabled(common.HexToAddress(s), configWatcher.chain.Config().EVM().ChainID()); err != nil { + return nil, pkgerrors.Wrap(err, "one of the sending keys given is not enabled") + } + fromAddresses = append(fromAddresses, common.HexToAddress(s)) + } + + scoped := configWatcher.chain.Config() + strategy := txmgrcommon.NewQueueingTxStrategy(rargs.ExternalJobID, scoped.OCR2().DefaultTransactionQueueDepth(), scoped.Database().DefaultQueryTimeout()) + + var checker txm.TransmitCheckerSpec + if configWatcher.chain.Config().OCR2().SimulateTransactions() { + checker.CheckerType = txm.TransmitCheckerTypeSimulate + } + + gasLimit := configWatcher.chain.Config().EVM().GasEstimator().LimitDefault() + ocr2Limit := configWatcher.chain.Config().EVM().GasEstimator().LimitJobType().OCR2() + if ocr2Limit != nil { + gasLimit = *ocr2Limit + } + if opts.pluginGasLimit != nil { + gasLimit = *opts.pluginGasLimit + } + + transmitter, err := ocrcommon.NewTransmitter( + configWatcher.chain.TxManager(), + fromAddresses, + gasLimit, + effectiveTransmitterAddress, + strategy, + checker, + configWatcher.chain.ID(), + ethKeystore, + ) + + if err != nil { + return nil, pkgerrors.Wrap(err, "failed to create transmitter") + } + + return NewOCRContractTransmitter( + configWatcher.contractAddress, + configWatcher.chain.Client(), + transmissionContractABI, + transmitter, + configWatcher.chain.LogPoller(), + lggr, + nil, + ) +} + +func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.MedianProvider, error) { + lggr := r.lggr.Named("MedianProvider").Named(rargs.ExternalJobID.String()) + relayOpts := types.NewRelayOpts(rargs) + relayConfig, err := relayOpts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + expectedChainID := relayConfig.ChainID.String() + if expectedChainID != r.chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String()) + } + if !common.IsHexAddress(relayOpts.ContractID) { + return nil, fmt.Errorf("invalid contractID %s, expected hex address", relayOpts.ContractID) + } + contractID := common.HexToAddress(relayOpts.ContractID) + + configWatcher, err := newStandardConfigProvider(lggr, r.chain, relayOpts) + if err != nil { + return nil, err + } + + reportCodec := evmreportcodec.ReportCodec{} + contractTransmitter, err := newOnChainContractTransmitter(lggr, rargs, pargs.TransmitterID, r.ks.Eth(), configWatcher, configTransmitterOpts{}, OCR2AggregatorTransmissionContractABI) + if err != nil { + return nil, err + } + + medianContract, err := newMedianContract(configWatcher.ContractConfigTracker(), configWatcher.contractAddress, configWatcher.chain, rargs.JobID, r.db, lggr) + if err != nil { + return nil, err + } + + medianProvider := medianProvider{ + lggr: lggr.Named("MedianProvider"), + configWatcher: configWatcher, + reportCodec: reportCodec, + contractTransmitter: contractTransmitter, + medianContract: medianContract, + } + + // allow fallback until chain reader is default and median contract is removed, but still log just in case + var chainReaderService ChainReaderService + if relayConfig.ChainReader != nil { + if chainReaderService, err = NewChainReaderService(lggr, r.chain.LogPoller(), r.chain, *relayConfig.ChainReader); err != nil { + return nil, err + } + + boundContracts := []commontypes.BoundContract{{Name: "median", Pending: true, Address: contractID.String()}} + if err = chainReaderService.Bind(context.Background(), boundContracts); err != nil { + return nil, err + } + } else { + lggr.Info("ChainReader missing from RelayConfig; falling back to internal MedianContract") + } + medianProvider.chainReader = chainReaderService + + if relayConfig.Codec != nil { + medianProvider.codec, err = NewCodec(*relayConfig.Codec) + if err != nil { + return nil, err + } + } else { + lggr.Info("Codec missing from RelayConfig; falling back to internal MedianContract") + } + + return &medianProvider, nil +} + +func (r *Relayer) NewAutomationProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.AutomationProvider, error) { + lggr := r.lggr.Named("AutomationProvider").Named(rargs.ExternalJobID.String()) + ocr2keeperRelayer := NewOCR2KeeperRelayer(r.db, r.chain, lggr.Named("OCR2KeeperRelayer"), r.ks.Eth(), r.pgCfg) + + return ocr2keeperRelayer.NewOCR2KeeperProvider(rargs, pargs) +} + +var _ commontypes.MedianProvider = (*medianProvider)(nil) + +type medianProvider struct { + lggr logger.Logger + configWatcher *configWatcher + contractTransmitter ContractTransmitter + reportCodec median.ReportCodec + medianContract *medianContract + chainReader ChainReaderService + codec commontypes.Codec + ms services.MultiStart +} + +func (p *medianProvider) Name() string { return p.lggr.Name() } + +func (p *medianProvider) Start(ctx context.Context) error { + srvcs := []services.StartClose{p.configWatcher, p.contractTransmitter, p.medianContract} + if p.chainReader != nil { + srvcs = append(srvcs, p.chainReader) + } + + return p.ms.Start(ctx, srvcs...) +} + +func (p *medianProvider) Close() error { return p.ms.Close() } + +func (p *medianProvider) Ready() error { return nil } + +func (p *medianProvider) HealthReport() map[string]error { + hp := map[string]error{p.Name(): p.Ready()} + services.CopyHealth(hp, p.configWatcher.HealthReport()) + services.CopyHealth(hp, p.contractTransmitter.HealthReport()) + services.CopyHealth(hp, p.medianContract.HealthReport()) + return hp +} + +func (p *medianProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return p.contractTransmitter +} + +func (p *medianProvider) ReportCodec() median.ReportCodec { + return p.reportCodec +} + +func (p *medianProvider) MedianContract() median.MedianContract { + return p.medianContract +} + +func (p *medianProvider) OnchainConfigCodec() median.OnchainConfigCodec { + return median.StandardOnchainConfigCodec{} +} + +func (p *medianProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.configWatcher.OffchainConfigDigester() +} + +func (p *medianProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.configWatcher.ContractConfigTracker() +} + +func (p *medianProvider) ChainReader() commontypes.ChainReader { + return p.chainReader +} + +func (p *medianProvider) Codec() commontypes.Codec { + return p.codec +} diff --git a/core/services/relay/evm/evm_test.go b/core/services/relay/evm/evm_test.go new file mode 100644 index 00000000..1d7fca11 --- /dev/null +++ b/core/services/relay/evm/evm_test.go @@ -0,0 +1,63 @@ +package evm_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestRelayerOpts_Validate(t *testing.T) { + cfg := configtest.NewTestGeneralConfig(t) + type fields struct { + DB *sqlx.DB + QConfig pg.QConfig + CSAETHKeystore evm.CSAETHKeystore + } + tests := []struct { + name string + fields fields + wantErrContains string + }{ + { + name: "all invalid", + fields: fields{ + DB: nil, + QConfig: nil, + CSAETHKeystore: nil, + }, + wantErrContains: `nil DB +nil QConfig +nil Keystore`, + }, + { + name: "missing db, keystore", + fields: fields{ + DB: nil, + QConfig: cfg.Database(), + }, + wantErrContains: `nil DB +nil Keystore`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := evm.RelayerOpts{ + DB: tt.fields.DB, + QConfig: tt.fields.QConfig, + CSAETHKeystore: tt.fields.CSAETHKeystore, + } + err := c.Validate() + if tt.wantErrContains != "" { + assert.Contains(t, err.Error(), tt.wantErrContains) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/core/services/relay/evm/functions.go b/core/services/relay/evm/functions.go new file mode 100644 index 00000000..2257306f --- /dev/null +++ b/core/services/relay/evm/functions.go @@ -0,0 +1,229 @@ +package evm + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "go.uber.org/multierr" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txm "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + functionsRelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/functions" + evmRelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type functionsProvider struct { + services.StateMachine + configWatcher *configWatcher + contractTransmitter ContractTransmitter + logPollerWrapper evmRelayTypes.LogPollerWrapper +} + +var _ evmRelayTypes.FunctionsProvider = (*functionsProvider)(nil) + +func (p *functionsProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return p.contractTransmitter +} + +func (p *functionsProvider) LogPollerWrapper() evmRelayTypes.LogPollerWrapper { + return p.logPollerWrapper +} + +func (p *functionsProvider) FunctionsEvents() commontypes.FunctionsEvents { + // TODO (FUN-668): implement + return nil +} + +func (p *functionsProvider) Start(ctx context.Context) error { + return p.StartOnce("FunctionsProvider", func() error { + if err := p.configWatcher.Start(ctx); err != nil { + return err + } + return p.logPollerWrapper.Start(ctx) + }) +} + +func (p *functionsProvider) Close() error { + return p.StopOnce("FunctionsProvider", func() (err error) { + err = multierr.Combine(err, p.logPollerWrapper.Close()) + err = multierr.Combine(err, p.configWatcher.Close()) + return + }) +} + +// Forward all calls to the underlying configWatcher +func (p *functionsProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.configWatcher.OffchainConfigDigester() +} + +func (p *functionsProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.configWatcher.ContractConfigTracker() +} + +func (p *functionsProvider) HealthReport() map[string]error { + return p.configWatcher.HealthReport() +} + +func (p *functionsProvider) Name() string { + return p.configWatcher.Name() +} + +func (p *functionsProvider) ChainReader() commontypes.ChainReader { + return nil +} + +func (p *functionsProvider) Codec() commontypes.Codec { + return nil +} + +func NewFunctionsProvider(chain legacyevm.Chain, rargs commontypes.RelayArgs, pargs commontypes.PluginArgs, lggr logger.Logger, ethKeystore keystore.Eth, pluginType functionsRelay.FunctionsPluginType) (evmRelayTypes.FunctionsProvider, error) { + relayOpts := evmRelayTypes.NewRelayOpts(rargs) + relayConfig, err := relayOpts.RelayConfig() + if err != nil { + return nil, err + } + expectedChainID := relayConfig.ChainID.String() + if expectedChainID != chain.ID().String() { + return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), chain.ID().String()) + } + if err != nil { + return nil, err + } + if !common.IsHexAddress(rargs.ContractID) { + return nil, errors.Errorf("invalid contractID, expected hex address") + } + var pluginConfig config.PluginConfig + if err2 := json.Unmarshal(pargs.PluginConfig, &pluginConfig); err2 != nil { + return nil, err2 + } + routerContractAddress := common.HexToAddress(rargs.ContractID) + logPollerWrapper, err := functionsRelay.NewLogPollerWrapper(routerContractAddress, pluginConfig, chain.Client(), chain.LogPoller(), lggr) + if err != nil { + return nil, err + } + configWatcher, err := newFunctionsConfigProvider(pluginType, chain, rargs, relayConfig.FromBlock, logPollerWrapper, lggr) + if err != nil { + return nil, err + } + var contractTransmitter ContractTransmitter + if relayConfig.SendingKeys != nil { + contractTransmitter, err = newFunctionsContractTransmitter(pluginConfig.ContractVersion, rargs, pargs.TransmitterID, configWatcher, ethKeystore, logPollerWrapper, lggr) + if err != nil { + return nil, err + } + } else { + lggr.Warn("no sending keys configured for functions plugin, not starting contract transmitter") + } + return &functionsProvider{ + configWatcher: configWatcher, + contractTransmitter: contractTransmitter, + logPollerWrapper: logPollerWrapper, + }, nil +} + +func newFunctionsConfigProvider(pluginType functionsRelay.FunctionsPluginType, chain legacyevm.Chain, args commontypes.RelayArgs, fromBlock uint64, logPollerWrapper evmRelayTypes.LogPollerWrapper, lggr logger.Logger) (*configWatcher, error) { + if !common.IsHexAddress(args.ContractID) { + return nil, errors.Errorf("invalid contractID, expected hex address") + } + + routerContractAddress := common.HexToAddress(args.ContractID) + + cp, err := functionsRelay.NewFunctionsConfigPoller(pluginType, chain.LogPoller(), lggr) + if err != nil { + return nil, err + } + logPollerWrapper.SubscribeToUpdates("FunctionsConfigPoller", cp) + + offchainConfigDigester := functionsRelay.NewFunctionsOffchainConfigDigester(pluginType, chain.ID().Uint64()) + logPollerWrapper.SubscribeToUpdates("FunctionsOffchainConfigDigester", offchainConfigDigester) + + return newConfigWatcher(lggr, routerContractAddress, offchainConfigDigester, cp, chain, fromBlock, args.New), nil +} + +func newFunctionsContractTransmitter(contractVersion uint32, rargs commontypes.RelayArgs, transmitterID string, configWatcher *configWatcher, ethKeystore keystore.Eth, logPollerWrapper evmRelayTypes.LogPollerWrapper, lggr logger.Logger) (ContractTransmitter, error) { + var relayConfig evmRelayTypes.RelayConfig + if err := json.Unmarshal(rargs.RelayConfig, &relayConfig); err != nil { + return nil, err + } + var fromAddresses []common.Address + sendingKeys := relayConfig.SendingKeys + if !relayConfig.EffectiveTransmitterID.Valid { + return nil, errors.New("EffectiveTransmitterID must be specified") + } + effectiveTransmitterAddress := common.HexToAddress(relayConfig.EffectiveTransmitterID.String) + + sendingKeysLength := len(sendingKeys) + if sendingKeysLength == 0 { + return nil, errors.New("no sending keys provided") + } + + // If we are using multiple sending keys, then a forwarder is needed to rotate transmissions. + // Ensure that this forwarder is not set to a local sending key, and ensure our sending keys are enabled. + for _, s := range sendingKeys { + if sendingKeysLength > 1 && s == effectiveTransmitterAddress.String() { + return nil, errors.New("the transmitter is a local sending key with transaction forwarding enabled") + } + if err := ethKeystore.CheckEnabled(common.HexToAddress(s), configWatcher.chain.Config().EVM().ChainID()); err != nil { + return nil, errors.Wrap(err, "one of the sending keys given is not enabled") + } + fromAddresses = append(fromAddresses, common.HexToAddress(s)) + } + + scoped := configWatcher.chain.Config() + strategy := txmgrcommon.NewQueueingTxStrategy(rargs.ExternalJobID, scoped.OCR2().DefaultTransactionQueueDepth(), scoped.Database().DefaultQueryTimeout()) + + var checker txm.TransmitCheckerSpec + if configWatcher.chain.Config().OCR2().SimulateTransactions() { + checker.CheckerType = txm.TransmitCheckerTypeSimulate + } + + gasLimit := configWatcher.chain.Config().EVM().GasEstimator().LimitDefault() + ocr2Limit := configWatcher.chain.Config().EVM().GasEstimator().LimitJobType().OCR2() + if ocr2Limit != nil { + gasLimit = *ocr2Limit + } + + transmitter, err := ocrcommon.NewTransmitter( + configWatcher.chain.TxManager(), + fromAddresses, + gasLimit, + effectiveTransmitterAddress, + strategy, + checker, + configWatcher.chain.ID(), + ethKeystore, + ) + + if err != nil { + return nil, errors.Wrap(err, "failed to create transmitter") + } + + functionsTransmitter, err := functionsRelay.NewFunctionsContractTransmitter( + configWatcher.chain.Client(), + OCR2AggregatorTransmissionContractABI, + transmitter, + configWatcher.chain.LogPoller(), + lggr, + nil, + contractVersion, + ) + if err != nil { + return nil, err + } + logPollerWrapper.SubscribeToUpdates("FunctionsConfigTransmitter", functionsTransmitter) + return functionsTransmitter, err +} diff --git a/core/services/relay/evm/functions/config_poller.go b/core/services/relay/evm/functions/config_poller.go new file mode 100644 index 00000000..cda89afe --- /dev/null +++ b/core/services/relay/evm/functions/config_poller.go @@ -0,0 +1,201 @@ +package functions + +import ( + "context" + "database/sql" + "encoding/binary" + "sync/atomic" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type FunctionsPluginType int + +const ( + FunctionsPlugin FunctionsPluginType = iota + ThresholdPlugin + S4Plugin +) + +type configPoller struct { + lggr logger.Logger + destChainLogPoller logpoller.LogPoller + targetContract atomic.Pointer[common.Address] + pluginType FunctionsPluginType +} + +var _ types.ConfigPoller = &configPoller{} +var _ types.RouteUpdateSubscriber = &configPoller{} + +// ConfigSet Common to all OCR2 evm based contracts: https://github.com/goplugin/libocr/blob/master/contract2/dev/OCR2Abstract.sol +var ConfigSet common.Hash + +var defaultABI abi.ABI + +const configSetEventName = "ConfigSet" + +func init() { + var err error + abiPointer, err := ocr2aggregator.OCR2AggregatorMetaData.GetAbi() + if err != nil { + panic(err) + } + defaultABI = *abiPointer + ConfigSet = defaultABI.Events[configSetEventName].ID +} + +func unpackLogData(d []byte) (*ocr2aggregator.OCR2AggregatorConfigSet, error) { + unpacked := new(ocr2aggregator.OCR2AggregatorConfigSet) + err := defaultABI.UnpackIntoInterface(unpacked, configSetEventName, d) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack log data") + } + return unpacked, nil +} + +func configFromLog(logData []byte, pluginType FunctionsPluginType) (ocrtypes.ContractConfig, error) { + unpacked, err := unpackLogData(logData) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + + var transmitAccounts []ocrtypes.Account + for _, addr := range unpacked.Transmitters { + transmitAccounts = append(transmitAccounts, ocrtypes.Account(addr.String())) + } + var signers []ocrtypes.OnchainPublicKey + for _, addr := range unpacked.Signers { + addr := addr + signers = append(signers, addr[:]) + } + + // Replace the first two bytes of the config digest with the plugin type to avoid duplicate config digests between Functions plugins + switch pluginType { + case FunctionsPlugin: + // FunctionsPluginType should already have the correct prefix, so this is a no-op + case ThresholdPlugin: + binary.BigEndian.PutUint16(unpacked.ConfigDigest[:2], uint16(ThresholdDigestPrefix)) + case S4Plugin: + binary.BigEndian.PutUint16(unpacked.ConfigDigest[:2], uint16(S4DigestPrefix)) + default: + return ocrtypes.ContractConfig{}, errors.New("unknown plugin type") + } + + return ocrtypes.ContractConfig{ + ConfigDigest: unpacked.ConfigDigest, + ConfigCount: unpacked.ConfigCount, + Signers: signers, + Transmitters: transmitAccounts, + F: unpacked.F, + OnchainConfig: unpacked.OnchainConfig, + OffchainConfigVersion: unpacked.OffchainConfigVersion, + OffchainConfig: unpacked.OffchainConfig, + }, nil +} + +func configPollerFilterName(addr common.Address) string { + return logpoller.FilterName("FunctionsOCR2ConfigPoller", addr.String()) +} + +func NewFunctionsConfigPoller(pluginType FunctionsPluginType, destChainPoller logpoller.LogPoller, lggr logger.Logger) (*configPoller, error) { + cp := &configPoller{ + lggr: lggr, + destChainLogPoller: destChainPoller, + pluginType: pluginType, + } + return cp, nil +} + +func (cp *configPoller) Start() {} + +func (cp *configPoller) Close() error { + return nil +} + +func (cp *configPoller) Notify() <-chan struct{} { + return nil +} + +func (cp *configPoller) Replay(ctx context.Context, fromBlock int64) error { + return cp.destChainLogPoller.Replay(ctx, fromBlock) +} + +func (cp *configPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + contractAddr := cp.targetContract.Load() + if contractAddr == nil { + return 0, ocrtypes.ConfigDigest{}, nil + } + + latest, err := cp.destChainLogPoller.LatestLogByEventSigWithConfs(ConfigSet, *contractAddr, 1, pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, ocrtypes.ConfigDigest{}, nil + } + return 0, ocrtypes.ConfigDigest{}, err + } + latestConfigSet, err := configFromLog(latest.Data, cp.pluginType) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + return uint64(latest.BlockNumber), latestConfigSet.ConfigDigest, nil +} + +func (cp *configPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { + // NOTE: if targetContract changes between invocations of LatestConfigDetails() and LatestConfig() + // (unlikely), we'll return an error here and libocr will re-try. + contractAddr := cp.targetContract.Load() + if contractAddr == nil { + return ocrtypes.ContractConfig{}, errors.New("no target contract address set yet") + } + + lgs, err := cp.destChainLogPoller.Logs(int64(changedInBlock), int64(changedInBlock), ConfigSet, *contractAddr, pg.WithParentCtx(ctx)) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + if len(lgs) == 0 { + return ocrtypes.ContractConfig{}, errors.New("no logs found") + } + latestConfigSet, err := configFromLog(lgs[len(lgs)-1].Data, cp.pluginType) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + cp.lggr.Infow("LatestConfig", "latestConfig", latestConfigSet) + return latestConfigSet, nil +} + +func (cp *configPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { + latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return 0, err + } + return uint64(latest.BlockNumber), nil +} + +// called from LogPollerWrapper in a separate goroutine +func (cp *configPoller) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { + cp.targetContract.Store(&activeCoordinator) + // Register filters for both active and proposed + err := cp.destChainLogPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(activeCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) + if err != nil { + return err + } + err = cp.destChainLogPoller.RegisterFilter(logpoller.Filter{Name: configPollerFilterName(proposedCoordinator), EventSigs: []common.Hash{ConfigSet}, Addresses: []common.Address{activeCoordinator}}) + if err != nil { + return err + } + // TODO: unregister old filter (needs refactor to get pg.Queryer) + return nil +} diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go new file mode 100644 index 00000000..ffda19c6 --- /dev/null +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -0,0 +1,211 @@ +package functions_test + +import ( + "encoding/binary" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + testoffchainaggregator2 "github.com/goplugin/libocr/gethwrappers2/testocr2aggregator" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + functionsConfig "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/functions" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestFunctionsConfigPoller(t *testing.T) { + t.Run("FunctionsPlugin", func(t *testing.T) { + runTest(t, functions.FunctionsPlugin, functions.FunctionsDigestPrefix) + }) + t.Run("ThresholdPlugin", func(t *testing.T) { + runTest(t, functions.ThresholdPlugin, functions.ThresholdDigestPrefix) + }) + t.Run("S4Plugin", func(t *testing.T) { + runTest(t, functions.S4Plugin, functions.S4DigestPrefix) + }) +} + +func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDigestPrefix ocrtypes2.ConfigDigestPrefix) { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + b := backends.NewSimulatedBackend(core.GenesisAlloc{ + user.From: {Balance: big.NewInt(1000000000000000000)}}, + 5*ethconfig.Defaults.Miner.GasCeil) + defer b.Close() + linkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(user, b) + require.NoError(t, err) + accessAddress, _, _, err := testoffchainaggregator2.DeploySimpleWriteAccessController(user, b) + require.NoError(t, err, "failed to deploy test access controller contract") + ocrAddress, _, ocrContract, err := ocr2aggregator.DeployOCR2Aggregator( + user, + b, + linkTokenAddress, + big.NewInt(0), + big.NewInt(10), + accessAddress, + accessAddress, + 9, + "TEST", + ) + require.NoError(t, err) + b.Commit() + db := pgtest.NewSqlxDB(t) + defer db.Close() + cfg := pgtest.NewQConfig(false) + ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) + defer ethClient.Close() + lggr := logger.TestLogger(t) + lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) + servicetest.Run(t, lp) + configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) + require.NoError(t, err) + require.NoError(t, configPoller.UpdateRoutes(ocrAddress, ocrAddress)) + // Should have no config to begin with. + _, config, err := configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, ocrtypes2.ConfigDigest{}, config) + _, err = configPoller.LatestConfig(testutils.Context(t), 0) + require.Error(t, err) + + pluginConfig := &functionsConfig.ReportingPluginConfigWrapper{ + Config: &functionsConfig.ReportingPluginConfig{ + MaxQueryLengthBytes: 10000, + MaxObservationLengthBytes: 10000, + MaxReportLengthBytes: 10000, + MaxRequestBatchSize: 10, + DefaultAggregationMethod: functionsConfig.AggregationMethod(0), + UniqueReports: true, + ThresholdPluginConfig: &functionsConfig.ThresholdReportingPluginConfig{ + MaxQueryLengthBytes: 10000, + MaxObservationLengthBytes: 10000, + MaxReportLengthBytes: 10000, + RequestCountLimit: 100, + RequestTotalBytesLimit: 100000, + RequireLocalRequestCheck: true, + }, + }, + } + + // Set the config + contractConfig := setFunctionsConfig(t, pluginConfig, ocrContract, user) + b.Commit() + latest, err := b.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // Ensure we capture this config set log. + require.NoError(t, lp.Replay(testutils.Context(t), latest.Number().Int64()-1)) + + // Send blocks until we see the config updated. + var configBlock uint64 + var digest [32]byte + gomega.NewGomegaWithT(t).Eventually(func() bool { + b.Commit() + configBlock, digest, err = configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + return ocrtypes2.ConfigDigest{} != digest + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the config returned is the one we configured. + newConfig, err := configPoller.LatestConfig(testutils.Context(t), configBlock) + require.NoError(t, err) + + // Get actual configDigest value from contracts + configFromContract, err := ocrContract.LatestConfigDetails(nil) + require.NoError(t, err) + onChainConfigDigest := configFromContract.ConfigDigest + + assert.Equal(t, contractConfig.Signers, newConfig.Signers) + assert.Equal(t, contractConfig.Transmitters, newConfig.Transmitters) + assert.Equal(t, contractConfig.F, newConfig.F) + assert.Equal(t, contractConfig.OffchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, contractConfig.OffchainConfig, newConfig.OffchainConfig) + + var expectedConfigDigest [32]byte + copy(expectedConfigDigest[:], onChainConfigDigest[:]) + binary.BigEndian.PutUint16(expectedConfigDigest[:2], uint16(expectedDigestPrefix)) + + assert.Equal(t, expectedConfigDigest, digest) + assert.Equal(t, expectedConfigDigest, [32]byte(newConfig.ConfigDigest)) +} + +func setFunctionsConfig(t *testing.T, pluginConfig *functionsConfig.ReportingPluginConfigWrapper, ocrContract *ocr2aggregator.OCR2Aggregator, user *bind.TransactOpts) ocrtypes2.ContractConfig { + // Create minimum number of nodes. + var oracles []confighelper2.OracleIdentityExtra + for i := 0; i < 4; i++ { + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: evmutils.RandomAddress().Bytes(), + TransmitAccount: ocrtypes2.Account(evmutils.RandomAddress().String()), + OffchainPublicKey: evmutils.RandomBytes32(), + PeerID: utils.MustNewPeerID(), + }, + ConfigEncryptionPublicKey: evmutils.RandomBytes32(), + }) + } + + pluginConfigBytes, err := functionsConfig.EncodeReportingPluginConfig(pluginConfig) + require.NoError(t, err) + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(big.NewInt(0), big.NewInt(10)) + require.NoError(t, err) + + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 2*time.Second, // deltaProgress + 1*time.Second, // deltaResend + 1*time.Second, // deltaRound + 500*time.Millisecond, // deltaGrace + 2*time.Second, // deltaStage + 3, + []int{1, 1, 1, 1}, + oracles, + pluginConfigBytes, + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 50*time.Millisecond, + 1, // faults + onchainConfig, + ) + + require.NoError(t, err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := evm.AccountToAddress(transmitters) + require.NoError(t, err) + _, err = ocrContract.SetConfig(user, signerAddresses, transmitterAddresses, threshold, onchainConfig, offchainConfigVersion, offchainConfig) + require.NoError(t, err) + return ocrtypes2.ContractConfig{ + Signers: signers, + Transmitters: transmitters, + F: threshold, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } +} diff --git a/core/services/relay/evm/functions/contract_transmitter.go b/core/services/relay/evm/functions/contract_transmitter.go new file mode 100644 index 00000000..aceedd79 --- /dev/null +++ b/core/services/relay/evm/functions/contract_transmitter.go @@ -0,0 +1,271 @@ +package functions + +import ( + "bytes" + "context" + "database/sql" + "encoding/hex" + "fmt" + "math/big" + "sync/atomic" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + evmRelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type FunctionsContractTransmitter interface { + services.ServiceCtx + ocrtypes.ContractTransmitter +} + +type Transmitter interface { + CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error + FromAddress() common.Address +} + +type ReportToEthMetadata func([]byte) (*txmgr.TxMeta, error) + +func reportToEvmTxMetaNoop([]byte) (*txmgr.TxMeta, error) { + return nil, nil +} + +type contractTransmitter struct { + contractAddress atomic.Pointer[common.Address] + contractABI abi.ABI + transmitter Transmitter + transmittedEventSig common.Hash + contractReader contractReader + lp logpoller.LogPoller + lggr logger.Logger + reportToEvmTxMeta ReportToEthMetadata + contractVersion uint32 + reportCodec encoding.ReportCodec +} + +var _ FunctionsContractTransmitter = &contractTransmitter{} +var _ evmRelayTypes.RouteUpdateSubscriber = &contractTransmitter{} + +func transmitterFilterName(addr common.Address) string { + return logpoller.FilterName("FunctionsOCR2ContractTransmitter", addr.String()) +} + +func NewFunctionsContractTransmitter( + caller contractReader, + contractABI abi.ABI, + transmitter Transmitter, + lp logpoller.LogPoller, + lggr logger.Logger, + reportToEvmTxMeta ReportToEthMetadata, + contractVersion uint32, +) (*contractTransmitter, error) { + transmitted, ok := contractABI.Events["Transmitted"] + if !ok { + return nil, errors.New("invalid ABI, missing transmitted") + } + + if contractVersion != 1 { + return nil, fmt.Errorf("unsupported contract version: %d", contractVersion) + } + + if reportToEvmTxMeta == nil { + reportToEvmTxMeta = reportToEvmTxMetaNoop + } + codec, err := encoding.NewReportCodec(contractVersion) + if err != nil { + return nil, err + } + return &contractTransmitter{ + contractABI: contractABI, + transmitter: transmitter, + transmittedEventSig: transmitted.ID, + lp: lp, + contractReader: caller, + lggr: lggr.Named("OCRContractTransmitter"), + reportToEvmTxMeta: reportToEvmTxMeta, + contractVersion: contractVersion, + reportCodec: codec, + }, nil +} + +// Transmit sends the report to the on-chain smart contract's Transmit method. +func (oc *contractTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signatures []ocrtypes.AttributedOnchainSignature) error { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + if len(signatures) > 32 { + return errors.New("too many signatures, maximum is 32") + } + for i, as := range signatures { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(reportCtx) + + txMeta, err := oc.reportToEvmTxMeta(report) + if err != nil { + oc.lggr.Warnw("failed to generate tx metadata for report", "err", err) + } + + var destinationContract common.Address + switch oc.contractVersion { + case 1: + oc.lggr.Debugw("FunctionsContractTransmitter: start", "reportLenBytes", len(report)) + requests, err2 := oc.reportCodec.DecodeReport(report) + if err2 != nil { + return errors.Wrap(err2, "FunctionsContractTransmitter: DecodeReport failed") + } + if len(requests) == 0 { + return errors.New("FunctionsContractTransmitter: no requests in report") + } + if len(requests[0].CoordinatorContract) != common.AddressLength { + return fmt.Errorf("FunctionsContractTransmitter: incorrect length of CoordinatorContract field: %d", len(requests[0].CoordinatorContract)) + } + destinationContract.SetBytes(requests[0].CoordinatorContract) + if destinationContract == (common.Address{}) { + return errors.New("FunctionsContractTransmitter: destination coordinator contract is zero") + } + // Sanity check - every report should contain requests with the same coordinator contract. + for _, req := range requests[1:] { + if !bytes.Equal(req.CoordinatorContract, destinationContract.Bytes()) { + oc.lggr.Errorw("FunctionsContractTransmitter: non-uniform coordinator addresses in a batch - still sending to a single destination", + "requestID", hex.EncodeToString(req.RequestID), + "destinationContract", destinationContract, + "requestCoordinator", hex.EncodeToString(req.CoordinatorContract), + ) + } + } + oc.lggr.Debugw("FunctionsContractTransmitter: ready", "nRequests", len(requests), "coordinatorContract", destinationContract.Hex()) + default: + return fmt.Errorf("unsupported contract version: %d", oc.contractVersion) + } + payload, err := oc.contractABI.Pack("transmit", rawReportCtx, []byte(report), rs, ss, vs) + if err != nil { + return errors.Wrap(err, "abi.Pack failed") + } + + oc.lggr.Debugw("FunctionsContractTransmitter: transmitting report", "contractAddress", destinationContract, "txMeta", txMeta, "payloadSize", len(payload)) + return errors.Wrap(oc.transmitter.CreateEthTransaction(ctx, destinationContract, payload, txMeta), "failed to send Eth transaction") +} + +type contractReader interface { + CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} + +func parseTransmitted(log []byte) ([32]byte, uint32, error) { + var args abi.Arguments = []abi.Argument{ + { + Name: "configDigest", + Type: utils.MustAbiType("bytes32", nil), + }, + { + Name: "epoch", + Type: utils.MustAbiType("uint32", nil), + }, + } + transmitted, err := args.Unpack(log) + if err != nil { + return [32]byte{}, 0, err + } + if len(transmitted) < 2 { + return [32]byte{}, 0, errors.New("transmitted event log has too few arguments") + } + configDigest := *abi.ConvertType(transmitted[0], new([32]byte)).(*[32]byte) + epoch := *abi.ConvertType(transmitted[1], new(uint32)).(*uint32) + return configDigest, epoch, err +} + +func callContract(ctx context.Context, addr common.Address, contractABI abi.ABI, method string, args []interface{}, caller contractReader) ([]interface{}, error) { + input, err := contractABI.Pack(method, args...) + if err != nil { + return nil, err + } + output, err := caller.CallContract(ctx, ethereum.CallMsg{To: &addr, Data: input}, nil) + if err != nil { + return nil, err + } + return contractABI.Unpack(method, output) +} + +// LatestConfigDigestAndEpoch retrieves the latest config digest and epoch from the OCR2 contract. +// It is plugin independent, in particular avoids use of the plugin specific generated evm wrappers +// by using the evm client Call directly for functions/events that are part of OCR2Abstract. +func (oc *contractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (ocrtypes.ConfigDigest, uint32, error) { + contractAddr := oc.contractAddress.Load() + if contractAddr == nil { + return ocrtypes.ConfigDigest{}, 0, errors.New("destination contract address not set") + } + latestConfigDigestAndEpoch, err := callContract(ctx, *contractAddr, oc.contractABI, "latestConfigDigestAndEpoch", nil, oc.contractReader) + if err != nil { + return ocrtypes.ConfigDigest{}, 0, err + } + // Panic on these conversions erroring, would mean a broken contract. + scanLogs := *abi.ConvertType(latestConfigDigestAndEpoch[0], new(bool)).(*bool) + configDigest := *abi.ConvertType(latestConfigDigestAndEpoch[1], new([32]byte)).(*[32]byte) + epoch := *abi.ConvertType(latestConfigDigestAndEpoch[2], new(uint32)).(*uint32) + if !scanLogs { + return configDigest, epoch, nil + } + + // Otherwise, we have to scan for the logs. + if err != nil { + return ocrtypes.ConfigDigest{}, 0, err + } + latest, err := oc.lp.LatestLogByEventSigWithConfs( + oc.transmittedEventSig, *contractAddr, 1, pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // No transmissions yet + return configDigest, 0, nil + } + return ocrtypes.ConfigDigest{}, 0, err + } + return parseTransmitted(latest.Data) +} + +// FromAccount returns the account from which the transmitter invokes the contract +func (oc *contractTransmitter) FromAccount() (ocrtypes.Account, error) { + return ocrtypes.Account(oc.transmitter.FromAddress().String()), nil +} + +func (oc *contractTransmitter) Start(ctx context.Context) error { return nil } +func (oc *contractTransmitter) Close() error { return nil } + +// Has no state/lifecycle so it's always healthy and ready +func (oc *contractTransmitter) Ready() error { return nil } +func (oc *contractTransmitter) HealthReport() map[string]error { + return map[string]error{oc.Name(): nil} +} +func (oc *contractTransmitter) Name() string { return oc.lggr.Name() } + +func (oc *contractTransmitter) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { + // transmitter only cares about the active coordinator + previousContract := oc.contractAddress.Swap(&activeCoordinator) + if previousContract != nil && *previousContract == activeCoordinator { + return nil + } + oc.lggr.Debugw("FunctionsContractTransmitter: updating routes", "previousContract", previousContract, "activeCoordinator", activeCoordinator) + err := oc.lp.RegisterFilter(logpoller.Filter{Name: transmitterFilterName(activeCoordinator), EventSigs: []common.Hash{oc.transmittedEventSig}, Addresses: []common.Address{activeCoordinator}}) + if err != nil { + return err + } + // TODO: unregister old filter (needs refactor to get pg.Queryer) + return nil +} diff --git a/core/services/relay/evm/functions/contract_transmitter_test.go b/core/services/relay/evm/functions/contract_transmitter_test.go new file mode 100644 index 00000000..c51558d0 --- /dev/null +++ b/core/services/relay/evm/functions/contract_transmitter_test.go @@ -0,0 +1,147 @@ +package functions_test + +import ( + "context" + "encoding/hex" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + lpmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/functions" +) + +type mockTransmitter struct { + toAddress gethcommon.Address +} + +func (m *mockTransmitter) CreateEthTransaction(ctx context.Context, toAddress gethcommon.Address, payload []byte, _ *txmgr.TxMeta) error { + m.toAddress = toAddress + return nil +} +func (mockTransmitter) FromAddress() gethcommon.Address { return testutils.NewAddress() } + +func TestContractTransmitter_LatestConfigDigestAndEpoch(t *testing.T) { + t.Parallel() + + digestStr := "000130da6b9315bd59af6b0a3f5463c0d0a39e92eaa34cbcbdbace7b3bfcc776" + lggr := logger.TestLogger(t) + c := evmclimocks.NewClient(t) + lp := lpmocks.NewLogPoller(t) + digestAndEpochDontScanLogs, err := hex.DecodeString( + "0000000000000000000000000000000000000000000000000000000000000000" + // scan logs = false + digestStr + + "0000000000000000000000000000000000000000000000000000000000000002") // epoch + require.NoError(t, err) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(digestAndEpochDontScanLogs, nil).Once() + contractABI, err := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + require.NoError(t, err) + lp.On("RegisterFilter", mock.Anything).Return(nil) + + functionsTransmitter, err := functions.NewFunctionsContractTransmitter(c, contractABI, &mockTransmitter{}, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { + return &txmgr.TxMeta{}, nil + }, 1) + require.NoError(t, err) + require.NoError(t, functionsTransmitter.UpdateRoutes(gethcommon.Address{}, gethcommon.Address{})) + + digest, epoch, err := functionsTransmitter.LatestConfigDigestAndEpoch(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, digestStr, hex.EncodeToString(digest[:])) + assert.Equal(t, uint32(2), epoch) +} + +func TestContractTransmitter_Transmit_V1(t *testing.T) { + t.Parallel() + + contractVersion := uint32(1) + configuredDestAddress, coordinatorAddress := testutils.NewAddress(), testutils.NewAddress() + lggr := logger.TestLogger(t) + c := evmclimocks.NewClient(t) + lp := lpmocks.NewLogPoller(t) + contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + lp.On("RegisterFilter", mock.Anything).Return(nil) + + ocrTransmitter := mockTransmitter{} + ot, err := functions.NewFunctionsContractTransmitter(c, contractABI, &ocrTransmitter, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { + return &txmgr.TxMeta{}, nil + }, contractVersion) + require.NoError(t, err) + require.NoError(t, ot.UpdateRoutes(configuredDestAddress, configuredDestAddress)) + + reqId, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f") + require.NoError(t, err) + processedRequests := []*encoding.ProcessedRequest{ + { + RequestID: reqId, + CoordinatorContract: coordinatorAddress.Bytes(), + }, + } + codec, err := encoding.NewReportCodec(contractVersion) + require.NoError(t, err) + reportBytes, err := codec.EncodeReport(processedRequests) + require.NoError(t, err) + + // success + require.NoError(t, ot.Transmit(testutils.Context(t), ocrtypes.ReportContext{}, reportBytes, []ocrtypes.AttributedOnchainSignature{})) + require.Equal(t, coordinatorAddress, ocrTransmitter.toAddress) + + // failure on too many signatures + signatures := []ocrtypes.AttributedOnchainSignature{} + for i := 0; i < 33; i++ { + signatures = append(signatures, ocrtypes.AttributedOnchainSignature{}) + } + require.Error(t, ot.Transmit(testutils.Context(t), ocrtypes.ReportContext{}, reportBytes, signatures)) +} + +func TestContractTransmitter_Transmit_V1_CoordinatorMismatch(t *testing.T) { + t.Parallel() + + contractVersion := uint32(1) + configuredDestAddress, coordinatorAddress1, coordinatorAddress2 := testutils.NewAddress(), testutils.NewAddress(), testutils.NewAddress() + lggr := logger.TestLogger(t) + c := evmclimocks.NewClient(t) + lp := lpmocks.NewLogPoller(t) + contractABI, _ := abi.JSON(strings.NewReader(ocr2aggregator.OCR2AggregatorABI)) + lp.On("RegisterFilter", mock.Anything).Return(nil) + + ocrTransmitter := mockTransmitter{} + ot, err := functions.NewFunctionsContractTransmitter(c, contractABI, &ocrTransmitter, lp, lggr, func(b []byte) (*txmgr.TxMeta, error) { + return &txmgr.TxMeta{}, nil + }, contractVersion) + require.NoError(t, err) + require.NoError(t, ot.UpdateRoutes(configuredDestAddress, configuredDestAddress)) + + reqId1, err := hex.DecodeString("110102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f") + require.NoError(t, err) + reqId2, err := hex.DecodeString("220102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f") + require.NoError(t, err) + processedRequests := []*encoding.ProcessedRequest{ + { + RequestID: reqId1, + CoordinatorContract: coordinatorAddress1.Bytes(), + }, + { + RequestID: reqId2, + CoordinatorContract: coordinatorAddress2.Bytes(), + }, + } + codec, err := encoding.NewReportCodec(contractVersion) + require.NoError(t, err) + reportBytes, err := codec.EncodeReport(processedRequests) + require.NoError(t, err) + + require.NoError(t, ot.Transmit(testutils.Context(t), ocrtypes.ReportContext{}, reportBytes, []ocrtypes.AttributedOnchainSignature{})) + require.Equal(t, coordinatorAddress1, ocrTransmitter.toAddress) +} diff --git a/core/services/relay/evm/functions/logpoller_wrapper.go b/core/services/relay/evm/functions/logpoller_wrapper.go new file mode 100644 index 00000000..f938abec --- /dev/null +++ b/core/services/relay/evm/functions/logpoller_wrapper.go @@ -0,0 +1,440 @@ +package functions + +import ( + "context" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + evmRelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type logPollerWrapper struct { + services.StateMachine + + routerContract *functions_router.FunctionsRouter + pluginConfig config.PluginConfig + client client.Client + logPoller logpoller.LogPoller + subscribers map[string]evmRelayTypes.RouteUpdateSubscriber + activeCoordinator common.Address + proposedCoordinator common.Address + requestBlockOffset int64 + responseBlockOffset int64 + pastBlocksToPoll int64 + logPollerCacheDurationSec int64 + detectedRequests detectedEvents + detectedResponses detectedEvents + mu sync.Mutex + closeWait sync.WaitGroup + stopCh services.StopChan + lggr logger.Logger +} + +type detectedEvent struct { + requestId [32]byte + timeDetected time.Time +} + +type detectedEvents struct { + isPreviouslyDetected map[[32]byte]struct{} + detectedEventsOrdered []detectedEvent +} + +const logPollerCacheDurationSecDefault = 300 +const pastBlocksToPollDefault = 50 +const maxLogsToProcess = 1000 + +var _ evmRelayTypes.LogPollerWrapper = &logPollerWrapper{} + +func NewLogPollerWrapper(routerContractAddress common.Address, pluginConfig config.PluginConfig, client client.Client, logPoller logpoller.LogPoller, lggr logger.Logger) (evmRelayTypes.LogPollerWrapper, error) { + routerContract, err := functions_router.NewFunctionsRouter(routerContractAddress, client) + if err != nil { + return nil, err + } + blockOffset := int64(pluginConfig.MinIncomingConfirmations) - 1 + if blockOffset < 0 { + lggr.Warnw("invalid minIncomingConfirmations, using 1 instead", "minIncomingConfirmations", pluginConfig.MinIncomingConfirmations) + blockOffset = 0 + } + requestBlockOffset := int64(pluginConfig.MinRequestConfirmations) - 1 + if requestBlockOffset < 0 { + lggr.Warnw("invalid minRequestConfirmations, using minIncomingConfirmations instead", "minRequestConfirmations", pluginConfig.MinRequestConfirmations) + requestBlockOffset = blockOffset + } + responseBlockOffset := int64(pluginConfig.MinResponseConfirmations) - 1 + if responseBlockOffset < 0 { + lggr.Warnw("invalid minResponseConfirmations, using minIncomingConfirmations instead", "minResponseConfirmations", pluginConfig.MinResponseConfirmations) + responseBlockOffset = blockOffset + } + logPollerCacheDurationSec := int64(pluginConfig.LogPollerCacheDurationSec) + if logPollerCacheDurationSec <= 0 { + lggr.Warnw("invalid logPollerCacheDuration, using 300 instead", "logPollerCacheDurationSec", logPollerCacheDurationSec) + logPollerCacheDurationSec = logPollerCacheDurationSecDefault + } + pastBlocksToPoll := int64(pluginConfig.PastBlocksToPoll) + if pastBlocksToPoll <= 0 { + lggr.Warnw("invalid pastBlocksToPoll, using 50 instead", "pastBlocksToPoll", pastBlocksToPoll) + pastBlocksToPoll = pastBlocksToPollDefault + } + if blockOffset >= pastBlocksToPoll || requestBlockOffset >= pastBlocksToPoll || responseBlockOffset >= pastBlocksToPoll { + lggr.Errorw("invalid config: number of required confirmation blocks >= pastBlocksToPoll", "pastBlocksToPoll", pastBlocksToPoll, "minIncomingConfirmations", pluginConfig.MinIncomingConfirmations, "minRequestConfirmations", pluginConfig.MinRequestConfirmations, "minResponseConfirmations", pluginConfig.MinResponseConfirmations) + return nil, errors.Errorf("invalid config: number of required confirmation blocks >= pastBlocksToPoll") + } + + return &logPollerWrapper{ + routerContract: routerContract, + pluginConfig: pluginConfig, + requestBlockOffset: requestBlockOffset, + responseBlockOffset: responseBlockOffset, + pastBlocksToPoll: pastBlocksToPoll, + logPollerCacheDurationSec: logPollerCacheDurationSec, + detectedRequests: detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})}, + detectedResponses: detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})}, + logPoller: logPoller, + client: client, + subscribers: make(map[string]evmRelayTypes.RouteUpdateSubscriber), + stopCh: make(services.StopChan), + lggr: lggr.Named("LogPollerWrapper"), + }, nil +} + +func (l *logPollerWrapper) Start(context.Context) error { + return l.StartOnce("LogPollerWrapper", func() error { + l.lggr.Infow("starting LogPollerWrapper", "routerContract", l.routerContract.Address().Hex(), "contractVersion", l.pluginConfig.ContractVersion) + l.mu.Lock() + defer l.mu.Unlock() + if l.pluginConfig.ContractVersion != 1 { + return errors.New("only contract version 1 is supported") + } + l.closeWait.Add(1) + go l.checkForRouteUpdates() + return nil + }) +} + +func (l *logPollerWrapper) Close() error { + return l.StopOnce("LogPollerWrapper", func() (err error) { + l.lggr.Info("closing LogPollerWrapper") + close(l.stopCh) + l.closeWait.Wait() + return nil + }) +} + +func (l *logPollerWrapper) HealthReport() map[string]error { + return map[string]error{l.Name(): l.Ready()} +} + +func (l *logPollerWrapper) Name() string { return l.lggr.Name() } + +// methods of LogPollerWrapper +func (l *logPollerWrapper) LatestEvents() ([]evmRelayTypes.OracleRequest, []evmRelayTypes.OracleResponse, error) { + l.mu.Lock() + coordinators := []common.Address{} + if l.activeCoordinator != (common.Address{}) { + coordinators = append(coordinators, l.activeCoordinator) + } + if l.proposedCoordinator != (common.Address{}) && l.activeCoordinator != l.proposedCoordinator { + coordinators = append(coordinators, l.proposedCoordinator) + } + latest, err := l.logPoller.LatestBlock() + if err != nil { + l.mu.Unlock() + return nil, nil, err + } + latestBlockNum := latest.BlockNumber + startBlockNum := latestBlockNum - l.pastBlocksToPoll + if startBlockNum < 0 { + startBlockNum = 0 + } + l.mu.Unlock() + + // outside of the lock + resultsReq := []evmRelayTypes.OracleRequest{} + resultsResp := []evmRelayTypes.OracleResponse{} + if len(coordinators) == 0 { + l.lggr.Debug("LatestEvents: no non-zero coordinators to check") + return resultsReq, resultsResp, errors.New("no non-zero coordinators to check") + } + + for _, coordinator := range coordinators { + requestEndBlock := latestBlockNum - l.requestBlockOffset + requestLogs, err := l.logPoller.Logs(startBlockNum, requestEndBlock, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), coordinator) + if err != nil { + l.lggr.Errorw("LatestEvents: fetching request logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", requestEndBlock) + return nil, nil, err + } + l.lggr.Debugw("LatestEvents: fetched request logs", "nRequestLogs", len(requestLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", requestEndBlock) + requestLogs = l.filterPreviouslyDetectedEvents(requestLogs, &l.detectedRequests, "requests") + responseEndBlock := latestBlockNum - l.responseBlockOffset + responseLogs, err := l.logPoller.Logs(startBlockNum, responseEndBlock, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), coordinator) + if err != nil { + l.lggr.Errorw("LatestEvents: fetching response logs from LogPoller failed", "startBlock", startBlockNum, "endBlock", responseEndBlock) + return nil, nil, err + } + l.lggr.Debugw("LatestEvents: fetched request logs", "nResponseLogs", len(responseLogs), "latestBlock", latest, "startBlock", startBlockNum, "endBlock", responseEndBlock) + responseLogs = l.filterPreviouslyDetectedEvents(responseLogs, &l.detectedResponses, "responses") + + parsingContract, err := functions_coordinator.NewFunctionsCoordinator(coordinator, l.client) + if err != nil { + l.lggr.Error("LatestEvents: creating a contract instance for parsing failed") + return nil, nil, err + } + + l.lggr.Debugw("LatestEvents: parsing logs", "nRequestLogs", len(requestLogs), "nResponseLogs", len(responseLogs), "coordinatorAddress", coordinator.Hex()) + for _, log := range requestLogs { + gethLog := log.ToGethLog() + oracleRequest, err := parsingContract.ParseOracleRequest(gethLog) + if err != nil { + l.lggr.Errorw("LatestEvents: failed to parse a request log, skipping", "err", err) + continue + } + + uint32Type, errType1 := abi.NewType("uint32", "uint32", nil) + uint40Type, errType2 := abi.NewType("uint40", "uint40", nil) + uint64Type, errType3 := abi.NewType("uint64", "uint64", nil) + uint72Type, errType4 := abi.NewType("uint72", "uint72", nil) + uint96Type, errType5 := abi.NewType("uint96", "uint96", nil) + addressType, errType6 := abi.NewType("address", "address", nil) + bytes32Type, errType7 := abi.NewType("bytes32", "bytes32", nil) + + if errType1 != nil || errType2 != nil || errType3 != nil || errType4 != nil || errType5 != nil || errType6 != nil || errType7 != nil { + l.lggr.Errorw("LatestEvents: failed to initialize types", "errType1", errType1, + "errType2", errType2, "errType3", errType3, "errType4", errType4, "errType5", errType5, "errType6", errType6, "errType7", errType7, + ) + continue + } + commitmentABI := abi.Arguments{ + {Type: bytes32Type}, // RequestId + {Type: addressType}, // Coordinator + {Type: uint96Type}, // EstimatedTotalCostJuels + {Type: addressType}, // Client + {Type: uint64Type}, // SubscriptionId + {Type: uint32Type}, // CallbackGasLimit + {Type: uint72Type}, // AdminFee + {Type: uint72Type}, // DonFee + {Type: uint40Type}, // GasOverheadBeforeCallback + {Type: uint40Type}, // GasOverheadAfterCallback + {Type: uint32Type}, // TimeoutTimestamp + } + commitmentBytes, err := commitmentABI.Pack( + oracleRequest.Commitment.RequestId, + oracleRequest.Commitment.Coordinator, + oracleRequest.Commitment.EstimatedTotalCostJuels, + oracleRequest.Commitment.Client, + oracleRequest.Commitment.SubscriptionId, + oracleRequest.Commitment.CallbackGasLimit, + oracleRequest.Commitment.AdminFee, + oracleRequest.Commitment.DonFee, + oracleRequest.Commitment.GasOverheadBeforeCallback, + oracleRequest.Commitment.GasOverheadAfterCallback, + oracleRequest.Commitment.TimeoutTimestamp, + ) + if err != nil { + l.lggr.Errorw("LatestEvents: failed to pack commitment bytes, skipping", err) + } + + resultsReq = append(resultsReq, evmRelayTypes.OracleRequest{ + RequestId: oracleRequest.RequestId, + RequestingContract: oracleRequest.RequestingContract, + RequestInitiator: oracleRequest.RequestInitiator, + SubscriptionId: oracleRequest.SubscriptionId, + SubscriptionOwner: oracleRequest.SubscriptionOwner, + Data: oracleRequest.Data, + DataVersion: oracleRequest.DataVersion, + Flags: oracleRequest.Flags, + CallbackGasLimit: oracleRequest.CallbackGasLimit, + TxHash: oracleRequest.Raw.TxHash, + OnchainMetadata: commitmentBytes, + CoordinatorContract: coordinator, + }) + } + for _, log := range responseLogs { + gethLog := log.ToGethLog() + oracleResponse, err := parsingContract.ParseOracleResponse(gethLog) + if err != nil { + l.lggr.Errorw("LatestEvents: failed to parse a response log, skipping") + continue + } + resultsResp = append(resultsResp, evmRelayTypes.OracleResponse{ + RequestId: oracleResponse.RequestId, + }) + } + } + + l.lggr.Debugw("LatestEvents: done", "nRequestLogs", len(resultsReq), "nResponseLogs", len(resultsResp), "startBlock", startBlockNum, "endBlock", latestBlockNum) + return resultsReq, resultsResp, nil +} + +func (l *logPollerWrapper) filterPreviouslyDetectedEvents(logs []logpoller.Log, detectedEvents *detectedEvents, filterType string) []logpoller.Log { + if len(logs) > maxLogsToProcess { + l.lggr.Errorw("filterPreviouslyDetectedEvents: too many logs to process, only processing latest maxLogsToProcess logs", "filterType", filterType, "nLogs", len(logs), "maxLogsToProcess", maxLogsToProcess) + logs = logs[len(logs)-maxLogsToProcess:] + } + l.mu.Lock() + defer l.mu.Unlock() + filteredLogs := []logpoller.Log{} + for _, log := range logs { + var requestId [32]byte + if len(log.Topics) < 2 || len(log.Topics[1]) != 32 { + l.lggr.Errorw("filterPreviouslyDetectedEvents: invalid log, skipping", "filterType", filterType, "log", log) + continue + } + copy(requestId[:], log.Topics[1]) // requestId is the second topic (1st topic is the event signature) + if _, ok := detectedEvents.isPreviouslyDetected[requestId]; !ok { + filteredLogs = append(filteredLogs, log) + detectedEvents.isPreviouslyDetected[requestId] = struct{}{} + detectedEvents.detectedEventsOrdered = append(detectedEvents.detectedEventsOrdered, detectedEvent{requestId: requestId, timeDetected: time.Now()}) + } + } + expiredRequests := 0 + for _, detectedEvent := range detectedEvents.detectedEventsOrdered { + expirationTime := time.Now().Add(-time.Second * time.Duration(l.logPollerCacheDurationSec)) + if !detectedEvent.timeDetected.Before(expirationTime) { + break + } + delete(detectedEvents.isPreviouslyDetected, detectedEvent.requestId) + expiredRequests++ + } + detectedEvents.detectedEventsOrdered = detectedEvents.detectedEventsOrdered[expiredRequests:] + l.lggr.Debugw("filterPreviouslyDetectedEvents: done", "filterType", filterType, "nLogs", len(logs), "nFilteredLogs", len(filteredLogs), "nExpiredRequests", expiredRequests, "previouslyDetectedCacheSize", len(detectedEvents.detectedEventsOrdered)) + return filteredLogs +} + +// "internal" method called only by EVM relayer components +func (l *logPollerWrapper) SubscribeToUpdates(subscriberName string, subscriber evmRelayTypes.RouteUpdateSubscriber) { + if l.pluginConfig.ContractVersion == 0 { + // in V0, immediately set contract address to Oracle contract and never update again + if err := subscriber.UpdateRoutes(l.routerContract.Address(), l.routerContract.Address()); err != nil { + l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "subscriberName", subscriberName, "err", err) + } + } else if l.pluginConfig.ContractVersion == 1 { + l.mu.Lock() + defer l.mu.Unlock() + l.subscribers[subscriberName] = subscriber + } +} + +func (l *logPollerWrapper) checkForRouteUpdates() { + defer l.closeWait.Done() + freqSec := l.pluginConfig.ContractUpdateCheckFrequencySec + if freqSec == 0 { + l.lggr.Errorw("LogPollerWrapper: ContractUpdateCheckFrequencySec is zero - route update checks disabled") + return + } + + updateOnce := func() { + // NOTE: timeout == frequency here, could be changed to a separate config value + timeoutCtx, cancel := utils.ContextFromChanWithTimeout(l.stopCh, time.Duration(l.pluginConfig.ContractUpdateCheckFrequencySec)*time.Second) + defer cancel() + active, proposed, err := l.getCurrentCoordinators(timeoutCtx) + if err != nil { + l.lggr.Errorw("LogPollerWrapper: error calling getCurrentCoordinators", "err", err) + return + } + l.handleRouteUpdate(active, proposed) + } + + updateOnce() // update once right away + ticker := time.NewTicker(time.Duration(freqSec) * time.Second) + defer ticker.Stop() + for { + select { + case <-l.stopCh: + return + case <-ticker.C: + updateOnce() + } + } +} + +func (l *logPollerWrapper) getCurrentCoordinators(ctx context.Context) (common.Address, common.Address, error) { + if l.pluginConfig.ContractVersion == 0 { + return l.routerContract.Address(), l.routerContract.Address(), nil + } + var donId [32]byte + copy(donId[:], []byte(l.pluginConfig.DONID)) + + activeCoordinator, err := l.routerContract.GetContractById(&bind.CallOpts{ + Pending: false, + Context: ctx, + }, donId) + if err != nil { + return common.Address{}, common.Address{}, err + } + + proposedCoordinator, err := l.routerContract.GetProposedContractById(&bind.CallOpts{ + Pending: false, + Context: ctx, + }, donId) + if err != nil { + return activeCoordinator, l.proposedCoordinator, nil + } + + return activeCoordinator, proposedCoordinator, nil +} + +func (l *logPollerWrapper) handleRouteUpdate(activeCoordinator common.Address, proposedCoordinator common.Address) { + l.mu.Lock() + defer l.mu.Unlock() + + if activeCoordinator == (common.Address{}) { + l.lggr.Error("LogPollerWrapper: cannot update activeCoordinator to zero address") + return + } + + if activeCoordinator == l.activeCoordinator && proposedCoordinator == l.proposedCoordinator { + l.lggr.Debug("LogPollerWrapper: no changes to routes") + return + } + errActive := l.registerFilters(activeCoordinator) + errProposed := l.registerFilters(proposedCoordinator) + if errActive != nil || errProposed != nil { + l.lggr.Errorw("LogPollerWrapper: Failed to register filters", "errorActive", errActive, "errorProposed", errProposed) + return + } + + l.lggr.Debugw("LogPollerWrapper: new routes", "activeCoordinator", activeCoordinator.Hex(), "proposedCoordinator", proposedCoordinator.Hex()) + l.activeCoordinator = activeCoordinator + l.proposedCoordinator = proposedCoordinator + + for _, subscriber := range l.subscribers { + err := subscriber.UpdateRoutes(activeCoordinator, proposedCoordinator) + if err != nil { + l.lggr.Errorw("LogPollerWrapper: Failed to update routes", "err", err) + } + } +} + +func filterName(addr common.Address) string { + return logpoller.FilterName("FunctionsLogPollerWrapper", addr.String()) +} + +func (l *logPollerWrapper) registerFilters(coordinatorAddress common.Address) error { + if (coordinatorAddress == common.Address{}) { + return nil + } + return l.logPoller.RegisterFilter( + logpoller.Filter{ + Name: filterName(coordinatorAddress), + EventSigs: []common.Hash{ + functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), + functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), + }, + Addresses: []common.Address{coordinatorAddress}, + }) +} diff --git a/core/services/relay/evm/functions/logpoller_wrapper_test.go b/core/services/relay/evm/functions/logpoller_wrapper_test.go new file mode 100644 index 00000000..1efba1c2 --- /dev/null +++ b/core/services/relay/evm/functions/logpoller_wrapper_test.go @@ -0,0 +1,209 @@ +package functions + +import ( + "crypto/rand" + "encoding/hex" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + lpmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/functions/config" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type subscriber struct { + updates sync.WaitGroup + expectedCalls int +} + +func (s *subscriber) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { + if s.expectedCalls == 0 { + panic("unexpected call to UpdateRoutes") + } + if activeCoordinator == (common.Address{}) { + panic("activeCoordinator should not be zero") + } + s.expectedCalls-- + s.updates.Done() + return nil +} + +func newSubscriber(expectedCalls int) *subscriber { + sub := &subscriber{expectedCalls: expectedCalls} + sub.updates.Add(expectedCalls) + return sub +} + +func addr(t *testing.T, lastByte string) []byte { + contractAddr, err := hex.DecodeString("00000000000000000000000000000000000000000000000000000000000000" + lastByte) + require.NoError(t, err) + return contractAddr +} + +func setUp(t *testing.T, updateFrequencySec uint32) (*lpmocks.LogPoller, types.LogPollerWrapper, *evmclimocks.Client) { + lggr := logger.TestLogger(t) + client := evmclimocks.NewClient(t) + lp := lpmocks.NewLogPoller(t) + config := config.PluginConfig{ + ContractUpdateCheckFrequencySec: updateFrequencySec, + ContractVersion: 1, + } + lpWrapper, err := NewLogPollerWrapper(common.Address{}, config, client, lp, lggr) + require.NoError(t, err) + + return lp, lpWrapper, client +} + +func getMockedRequestLog(t *testing.T) logpoller.Log { + // NOTE: Change this to be a more readable log generation + data, err := hex.DecodeString("000000000000000000000000c113ba31b0080f940ca5812bbccc1e038ea9efb40000000000000000000000000000000000000000000000000000000000000001000000000000000000000000c113ba31b0080f940ca5812bbccc1e038ea9efb4000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001117082cd81744eb9504dc37f53a86db7e3fb24929b8e7507b097d501ab5b315fb20e0000000000000000000000001b4f2b0e6363097f413c249910d5bc632993ed08000000000000000000000000000000000000000000000000015bcf880382c000000000000000000000000000665785a800593e8fa915208c1ce62f6e57fd75ba0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000001117000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004f588000000000000000000000000000000000000000000000000000000000000c350000000000000000000000000000000000000000000000000000000000000021c00000000000000000000000000000000000000000000000000000000000008866c636f64654c6f636174696f6ec258200000000000000000000000000000000000000000000000000000000000000000686c616e6775616765c25820000000000000000000000000000000000000000000000000000000000000000066736f757263657907d0633836366665643238326533313137636466303836633934396662613133643834666331376131656335353934656361643034353133646632326137623538356333363763633132326236373138306334383737303435616235383033373463353066313862346564386132346131323437383532363731623030633035663237373163663036363632333663333236393939323139363866323833346438626462616266306661643165313237613837643237363936323831643965656539326134646263316337356137316136656333613135356438633230616661643064623432383362613433353736303734653035633433633561653061656466643332323838346536613231386466323430323630316436356437316131303061633065376563643037663565646364633535643562373932646130626632353665623038363139336463376431333965613764373965653531653831356465333834386565643363366330353837393265366461333434363738626436373239346636643639656564356132663836323835343965616530323235323835346232666361333635646265623032383433386537326234383465383864316136646563373933633739656265353834666465363465663831383363313365386231623735663037636532303963393138633532643637613735343862653236366433663964316439656132613162303166633838376231316162383739663164333861373833303563373031316533643938346130393863663634383931316536653065383038396365306130363230393136663134323935343036336630376239343931326435666331393366303138633764616135363136323562313966376463323036663930353365623234643036323234616164326338623430646162663631656166666635326234653831373239353837333830313561643730663739316663643864333739343035353737393563383937363164636665333639373938373437353439633234643530646464303563623337613465613863353162306530313032363738643433653766306563353039653434633564343764353335626261363831303936383264643864653439326532363633646336653133653532383539663664336565306533633430336236366362653338643236366137356163373639363863613465653331396166363965373431333137393162653630376537353832373430366164653038306335623239653665343262386563386137373761663865383166336234616337626263666531643066616633393338613664353061316561633835643933643234343066313863333037356237306433626134663930323836396439383937663266636562626262366263646439333436633336633663643838626434336265306562333134323562343665613765386338336638386230363933343836383666366134313839623535666132666431396634326264333730313634616339356530303635656461663130373761633131366632393930303833616631333839636661666336613433323439376531363437393762633738616633366335613435366136646661326636626430626639326136613930366130653930313130626266323265613066333163663364353132663466303331653236343330633831663935656431323362323938356266623830623161396432646337306232356264613961386261303839323833666166663634383661316231646235613938353564346237363966623835663531353063393935306462303964373536326537353133633234653531636163366634366634633231636234373561613937363166666466626434656138613531626465613432383037313466363538393630656336643139656539373237626339316635313665346466306665346264613762623035343161393462326334396636323938616132396337656130646662653635346632306437663164323239633066303262356535326137363031376237306439383232643533383166623966613166393361353861376338383632326631326462643363623937323363626132313639633337643538303939336333663666393065323039336331336130363132323334303064393731363031656262313631343332613966666333373033396562663537326364326566666635636562323539346236346462336261616431633734663532653938343938353964383363313238353465376263393764363432363464653931343735386333386438383739343132333937653263643534653431366234373962363331623830626633306266653062366239353564393066356362303435346361373531303963393938366330636536316165356566376534653433353036313432633633646235363862383634353139623463306636366137633161376661336538666431323231376666336665383164663830643138386232646334343833356132663332323733666133353139633531343764643233353763326161346336326461386238353232306535386130333565373662633133316634623734376632663731643263663933376431303832356138316533623963323136663962316134646431663239383463656635656363656265353530363662363061373263363063323864303336653766386635323131343735386638326366323330646636363930636364617267739f64617267316461726732ff6f736563726574734c6f636174696f6ec2582000000000000000000000000000000000000000000000000000000000000000016773656372657473430102030000000000000000000000000000000000000000000000000000") + require.NoError(t, err) + topic0, err := hex.DecodeString("bf50768ccf13bd0110ca6d53a9c4f1f3271abdd4c24a56878863ed25b20598ff") + require.NoError(t, err) + // Create a random requestID + topic1 := make([]byte, 32) + _, err = rand.Read(topic1) + require.NoError(t, err) + topic2, err := hex.DecodeString("000000000000000000000000665785a800593e8fa915208c1ce62f6e57fd75ba") + require.NoError(t, err) + return logpoller.Log{ + Topics: [][]byte{topic0, topic1, topic2}, + Data: data, + } +} + +func TestLogPollerWrapper_SingleSubscriberEmptyEvents(t *testing.T) { + t.Parallel() + lp, lpWrapper, client := setUp(t, 100_000) // check only once + lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{}, nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) + lp.On("RegisterFilter", mock.Anything).Return(nil) + + subscriber := newSubscriber(1) + lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) + + servicetest.Run(t, lpWrapper) + subscriber.updates.Wait() + reqs, resps, err := lpWrapper.LatestEvents() + require.NoError(t, err) + require.Equal(t, 0, len(reqs)) + require.Equal(t, 0, len(resps)) +} + +func TestLogPollerWrapper_ErrorOnZeroAddresses(t *testing.T) { + t.Parallel() + lp, lpWrapper, client := setUp(t, 100_000) // check only once + lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "00"), nil) + + servicetest.Run(t, lpWrapper) + _, _, err := lpWrapper.LatestEvents() + require.Error(t, err) +} + +func TestLogPollerWrapper_LatestEvents_ReorgHandling(t *testing.T) { + t.Parallel() + lp, lpWrapper, client := setUp(t, 100_000) + lp.On("LatestBlock").Return(logpoller.LogPollerBlock{BlockNumber: int64(100)}, nil) + client.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(addr(t, "01"), nil) + lp.On("RegisterFilter", mock.Anything).Return(nil) + subscriber := newSubscriber(1) + lpWrapper.SubscribeToUpdates("mock_subscriber", subscriber) + mockedLog := getMockedRequestLog(t) + // All logPoller queries for responses return none + lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleResponse{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil) + // On the first logPoller query for requests, the request log appears + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() + // On the 2nd query, the request log disappears + lp.On("Logs", mock.Anything, mock.Anything, functions_coordinator.FunctionsCoordinatorOracleRequest{}.Topic(), mock.Anything).Return([]logpoller.Log{}, nil).Once() + // On the 3rd query, the original request log appears again + lp.On("Logs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]logpoller.Log{mockedLog}, nil).Once() + + servicetest.Run(t, lpWrapper) + subscriber.updates.Wait() + + oracleRequests, _, err := lpWrapper.LatestEvents() + require.NoError(t, err) + assert.Equal(t, 1, len(oracleRequests)) + oracleRequests, _, err = lpWrapper.LatestEvents() + require.NoError(t, err) + assert.Equal(t, 0, len(oracleRequests)) + require.NoError(t, err) + oracleRequests, _, err = lpWrapper.LatestEvents() + require.NoError(t, err) + assert.Equal(t, 0, len(oracleRequests)) +} + +func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_TruncatesLogs(t *testing.T) { + t.Parallel() + _, lpWrapper, _ := setUp(t, 100_000) + + inputLogs := make([]logpoller.Log, maxLogsToProcess+100) + for i := 0; i < 1100; i++ { + inputLogs[i] = getMockedRequestLog(t) + } + + functionsLpWrapper := lpWrapper.(*logPollerWrapper) + mockedDetectedEvents := detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})} + outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request") + + assert.Equal(t, maxLogsToProcess, len(outputLogs)) + assert.Equal(t, 1000, len(mockedDetectedEvents.detectedEventsOrdered)) + assert.Equal(t, 1000, len(mockedDetectedEvents.isPreviouslyDetected)) +} + +func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_SkipsInvalidLog(t *testing.T) { + t.Parallel() + _, lpWrapper, _ := setUp(t, 100_000) + inputLogs := []logpoller.Log{getMockedRequestLog(t)} + inputLogs[0].Topics = [][]byte{[]byte("invalid topic")} + mockedDetectedEvents := detectedEvents{isPreviouslyDetected: make(map[[32]byte]struct{})} + + functionsLpWrapper := lpWrapper.(*logPollerWrapper) + outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request") + + assert.Equal(t, 0, len(outputLogs)) + assert.Equal(t, 0, len(mockedDetectedEvents.detectedEventsOrdered)) + assert.Equal(t, 0, len(mockedDetectedEvents.isPreviouslyDetected)) +} + +func TestLogPollerWrapper_FilterPreviouslyDetectedEvents_FiltersPreviouslyDetectedEvent(t *testing.T) { + t.Parallel() + _, lpWrapper, _ := setUp(t, 100_000) + mockedRequestLog := getMockedRequestLog(t) + inputLogs := []logpoller.Log{mockedRequestLog} + var mockedRequestId [32]byte + copy(mockedRequestId[:], mockedRequestLog.Topics[1]) + + mockedDetectedEvents := detectedEvents{ + isPreviouslyDetected: make(map[[32]byte]struct{}), + detectedEventsOrdered: make([]detectedEvent, 1), + } + mockedDetectedEvents.isPreviouslyDetected[mockedRequestId] = struct{}{} + mockedDetectedEvents.detectedEventsOrdered[0] = detectedEvent{ + requestId: mockedRequestId, + timeDetected: time.Now().Add(-time.Second * time.Duration(logPollerCacheDurationSecDefault+1)), + } + + functionsLpWrapper := lpWrapper.(*logPollerWrapper) + outputLogs := functionsLpWrapper.filterPreviouslyDetectedEvents(inputLogs, &mockedDetectedEvents, "request") + + assert.Equal(t, 0, len(outputLogs)) + // Ensure that expired events are removed from the cache + assert.Equal(t, 0, len(mockedDetectedEvents.detectedEventsOrdered)) + assert.Equal(t, 0, len(mockedDetectedEvents.isPreviouslyDetected)) +} diff --git a/core/services/relay/evm/functions/offchain_config_digester.go b/core/services/relay/evm/functions/offchain_config_digester.go new file mode 100644 index 00000000..5ec69bce --- /dev/null +++ b/core/services/relay/evm/functions/offchain_config_digester.go @@ -0,0 +1,88 @@ +package functions + +import ( + "encoding/binary" + "errors" + "fmt" + "sync/atomic" + + "github.com/ethereum/go-ethereum/common" + "github.com/goplugin/libocr/offchainreporting2/chains/evmutil" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + evmRelayTypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +var ( + _ types.OffchainConfigDigester = &functionsOffchainConfigDigester{} + _ evmRelayTypes.RouteUpdateSubscriber = &functionsOffchainConfigDigester{} + FunctionsDigestPrefix = types.ConfigDigestPrefixEVM + // In order to support multiple OCR plugins with a single jobspec & OCR2Base contract, each plugin must have a unique config digest. + // This is accomplished by overriding the single config digest from the contract with a unique prefix for each plugin via this custom offchain digester & config poller. + ThresholdDigestPrefix = types.ConfigDigestPrefix(7) + S4DigestPrefix = types.ConfigDigestPrefix(8) +) + +type functionsOffchainConfigDigester struct { + pluginType FunctionsPluginType + chainID uint64 + contractAddress atomic.Pointer[common.Address] +} + +func NewFunctionsOffchainConfigDigester(pluginType FunctionsPluginType, chainID uint64) *functionsOffchainConfigDigester { + return &functionsOffchainConfigDigester{ + pluginType: pluginType, + chainID: chainID, + } +} + +func (d *functionsOffchainConfigDigester) ConfigDigest(cc types.ContractConfig) (types.ConfigDigest, error) { + contractAddress := d.contractAddress.Load() + if contractAddress == nil { + return types.ConfigDigest{}, errors.New("contract address not set") + } + baseDigester := evmutil.EVMOffchainConfigDigester{ + ChainID: d.chainID, + ContractAddress: *contractAddress, + } + + configDigest, err := baseDigester.ConfigDigest(cc) + if err != nil { + return types.ConfigDigest{}, err + } + + var prefix types.ConfigDigestPrefix + switch d.pluginType { + case FunctionsPlugin: + prefix = FunctionsDigestPrefix + case ThresholdPlugin: + prefix = ThresholdDigestPrefix + case S4Plugin: + prefix = S4DigestPrefix + default: + return types.ConfigDigest{}, errors.New("unknown plugin type") + } + + binary.BigEndian.PutUint16(configDigest[:2], uint16(prefix)) + + return configDigest, nil +} + +func (d *functionsOffchainConfigDigester) ConfigDigestPrefix() (types.ConfigDigestPrefix, error) { + switch d.pluginType { + case FunctionsPlugin: + return FunctionsDigestPrefix, nil + case ThresholdPlugin: + return ThresholdDigestPrefix, nil + case S4Plugin: + return S4DigestPrefix, nil + default: + return 0, fmt.Errorf("unknown plugin type: %v", d.pluginType) + } +} + +// called from LogPollerWrapper in a separate goroutine +func (d *functionsOffchainConfigDigester) UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error { + d.contractAddress.Store(&activeCoordinator) + return nil +} diff --git a/core/services/relay/evm/loop_impl.go b/core/services/relay/evm/loop_impl.go new file mode 100644 index 00000000..5ee611d3 --- /dev/null +++ b/core/services/relay/evm/loop_impl.go @@ -0,0 +1,32 @@ +package evm + +import ( + "github.com/goplugin/plugin-common/pkg/loop" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +//go:generate mockery --quiet --name LoopRelayAdapter --output ./mocks/ --case=underscore +type LoopRelayAdapter interface { + loop.Relayer + Chain() legacyevm.Chain +} +type LoopRelayer struct { + loop.Relayer + ext EVMChainRelayerExtender +} + +var _ loop.Relayer = &LoopRelayer{} + +func NewLoopRelayServerAdapter(r *Relayer, cs EVMChainRelayerExtender) *LoopRelayer { + ra := relay.NewServerAdapter(r, cs) + return &LoopRelayer{ + Relayer: ra, + ext: cs, + } +} + +func (la *LoopRelayer) Chain() legacyevm.Chain { + return la.ext.Chain() +} diff --git a/core/services/relay/evm/median.go b/core/services/relay/evm/median.go new file mode 100644 index 00000000..4ea8d564 --- /dev/null +++ b/core/services/relay/evm/median.go @@ -0,0 +1,105 @@ +package evm + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + "github.com/goplugin/libocr/offchainreporting2plus/types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + offchain_aggregator_wrapper "github.com/goplugin/pluginv3.0/v2/core/internal/gethwrappers2/generated/offchainaggregator" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var _ median.MedianContract = &medianContract{} + +type medianContract struct { + services.StateMachine + lggr logger.Logger + configTracker types.ContractConfigTracker + contractCaller *ocr2aggregator.OCR2AggregatorCaller + requestRoundTracker *RequestRoundTracker +} + +func newMedianContract(configTracker types.ContractConfigTracker, contractAddress common.Address, chain legacyevm.Chain, specID int32, db *sqlx.DB, lggr logger.Logger) (*medianContract, error) { + lggr = lggr.Named("MedianContract") + contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(contractAddress, chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregator") + } + + contractFilterer, err := ocr2aggregator.NewOCR2AggregatorFilterer(contractAddress, chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregatorFilterer") + } + + contractCaller, err := ocr2aggregator.NewOCR2AggregatorCaller(contractAddress, chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregatorCaller") + } + + return &medianContract{ + lggr: lggr, + configTracker: configTracker, + contractCaller: contractCaller, + requestRoundTracker: NewRequestRoundTracker( + contract, + contractFilterer, + chain.Client(), + chain.LogBroadcaster(), + specID, + lggr, + db, + NewRoundRequestedDB(db.DB, specID, lggr), + chain.Config().EVM(), + chain.Config().Database(), + ), + }, nil +} +func (oc *medianContract) Start(context.Context) error { + return oc.StartOnce("MedianContract", func() error { + return oc.requestRoundTracker.Start() + }) +} + +func (oc *medianContract) Close() error { + return oc.StopOnce("MedianContract", func() error { + return oc.requestRoundTracker.Close() + }) +} + +func (oc *medianContract) Name() string { return oc.lggr.Name() } + +func (oc *medianContract) HealthReport() map[string]error { + return map[string]error{oc.Name(): oc.Ready()} +} + +func (oc *medianContract) LatestTransmissionDetails(ctx context.Context) (ocrtypes.ConfigDigest, uint32, uint8, *big.Int, time.Time, error) { + opts := bind.CallOpts{Context: ctx, Pending: false} + result, err := oc.contractCaller.LatestTransmissionDetails(&opts) + return result.ConfigDigest, result.Epoch, result.Round, result.LatestAnswer, time.Unix(int64(result.LatestTimestamp), 0), errors.Wrap(err, "error getting LatestTransmissionDetails") +} + +// LatestRoundRequested returns the configDigest, epoch, and round from the latest +// RoundRequested event emitted by the contract. LatestRoundRequested may or may not +// return a result if the latest such event was emitted in a block b such that +// b.timestamp < tip.timestamp - lookback. +// +// If no event is found, LatestRoundRequested should return zero values, not an error. +// An error should only be returned if an actual error occurred during execution, +// e.g. because there was an error querying the blockchain or the database. +// +// As an optimization, this function may also return zero values, if no +// RoundRequested event has been emitted after the latest NewTransmission event. +func (oc *medianContract) LatestRoundRequested(ctx context.Context, lookback time.Duration) (ocrtypes.ConfigDigest, uint32, uint8, error) { + return oc.requestRoundTracker.LatestRoundRequested(ctx, lookback) +} diff --git a/core/services/relay/evm/median_test.go b/core/services/relay/evm/median_test.go new file mode 100644 index 00000000..2cf69b52 --- /dev/null +++ b/core/services/relay/evm/median_test.go @@ -0,0 +1,47 @@ +package evm + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +func TestNewMedianProvider(t *testing.T) { + lggr := logger.TestLogger(t) + + chain := mocks.NewChain(t) + chainID := testutils.NewRandomEVMChainID() + chain.On("ID").Return(chainID) + contractID := testutils.NewAddress() + relayer := Relayer{lggr: lggr, chain: chain} + + pargs := commontypes.PluginArgs{} + + t.Run("wrong chainID", func(t *testing.T) { + relayConfigBadChainID := evmtypes.RelayConfig{} + rc, err2 := json.Marshal(&relayConfigBadChainID) + rargs2 := commontypes.RelayArgs{ContractID: contractID.String(), RelayConfig: rc} + require.NoError(t, err2) + _, err2 = relayer.NewMedianProvider(rargs2, pargs) + assert.ErrorContains(t, err2, "chain id in spec does not match") + }) + + t.Run("invalid contractID", func(t *testing.T) { + relayConfig := evmtypes.RelayConfig{ChainID: big.New(chainID)} + rc, err2 := json.Marshal(&relayConfig) + require.NoError(t, err2) + rargsBadContractID := commontypes.RelayArgs{ContractID: "NotAContractID", RelayConfig: rc} + _, err2 = relayer.NewMedianProvider(rargsBadContractID, pargs) + assert.ErrorContains(t, err2, "invalid contractID") + }) +} diff --git a/core/services/relay/evm/mercury/config_digest.go b/core/services/relay/evm/mercury/config_digest.go new file mode 100644 index 00000000..81e49b2a --- /dev/null +++ b/core/services/relay/evm/mercury/config_digest.go @@ -0,0 +1,69 @@ +package mercury + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/goplugin/wsrpc/credentials" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/exposed_verifier" +) + +func makeConfigDigestArgs() abi.Arguments { + abi, err := abi.JSON(strings.NewReader(exposed_verifier.ExposedVerifierABI)) + if err != nil { + // assertion + panic(fmt.Sprintf("could not parse aggregator ABI: %s", err.Error())) + } + return abi.Methods["exposedConfigDigestFromConfigData"].Inputs +} + +var configDigestArgs = makeConfigDigestArgs() + +func configDigest( + feedID common.Hash, + chainID *big.Int, + contractAddress common.Address, + configCount uint64, + oracles []common.Address, + transmitters []credentials.StaticSizedPublicKey, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) types.ConfigDigest { + msg, err := configDigestArgs.Pack( + feedID, + chainID, + contractAddress, + configCount, + oracles, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + // assertion + panic(err) + } + rawHash := crypto.Keccak256(msg) + configDigest := types.ConfigDigest{} + if n := copy(configDigest[:], rawHash); n != len(configDigest) { + // assertion + panic("copy too little data") + } + binary.BigEndian.PutUint16(configDigest[:2], uint16(types.ConfigDigestPrefixMercuryV02)) + if !(configDigest[0] == 0 || configDigest[1] == 6) { + // assertion + panic("unexpected mismatch") + } + return configDigest +} diff --git a/core/services/relay/evm/mercury/config_digest_test.go b/core/services/relay/evm/mercury/config_digest_test.go new file mode 100644 index 00000000..bf434693 --- /dev/null +++ b/core/services/relay/evm/mercury/config_digest_test.go @@ -0,0 +1,210 @@ +package mercury + +import ( + "math/big" + "reflect" + "testing" + "unsafe" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "github.com/goplugin/wsrpc/credentials" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/exposed_verifier" +) + +// Adapted from: https://github.com/goplugin/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/config_digest_test.go + +func TestConfigCalculationMatches(t *testing.T) { + key, err := crypto.GenerateKey() + require.NoError(t, err, "could not make private key for EOA owner") + owner, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + backend := backends.NewSimulatedBackend( + core.GenesisAlloc{owner.From: {Balance: new(big.Int).Lsh(big.NewInt(1), 60)}}, + ethconfig.Defaults.Miner.GasCeil, + ) + _, _, eoa, err := exposed_verifier.DeployExposedVerifier( + owner, backend, + ) + backend.Commit() + require.NoError(t, err, "could not deploy test EOA") + p := gopter.NewProperties(nil) + p.Property("onchain/offchain config digests match", prop.ForAll( + func( + feedID [32]byte, + chainID uint64, + contractAddress common.Address, + configCount uint64, + oracles []common.Address, + transmitters [][32]byte, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + ) bool { + chainIDBig := new(big.Int).SetUint64(chainID) + golangDigest := configDigest( + feedID, + chainIDBig, + contractAddress, + configCount, + oracles, + *(*[]credentials.StaticSizedPublicKey)(unsafe.Pointer(&transmitters)), + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + + bigChainID := new(big.Int) + bigChainID.SetUint64(chainID) + + solidityDigest, err := eoa.ExposedConfigDigestFromConfigData(nil, + feedID, + bigChainID, + contractAddress, + configCount, + oracles, + transmitters, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + require.NoError(t, err, "could not compute solidity version of config digest") + return golangDigest == solidityDigest + }, + GenHash(t), + gen.UInt64(), + GenAddress(t), + gen.UInt64(), + GenAddressArray(t), + GenClientPubKeyArray(t), + gen.UInt8(), + GenBytes(t), + gen.UInt64(), + GenBytes(t), + )) + p.TestingRun(t) +} + +func GenHash(t *testing.T) gopter.Gen { + var byteGens []gopter.Gen + for i := 0; i < 32; i++ { + byteGens = append(byteGens, gen.UInt8()) + } + return gopter.CombineGens(byteGens...).Map( + func(byteArray interface{}) (rv common.Hash) { + array, ok := byteArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve gen result") + for i, byteVal := range array.([]interface{}) { + rv[i] = byteVal.(uint8) + } + return rv + }, + ) +} + +func GenHashArray(t *testing.T) gopter.Gen { + return gen.UInt8Range(0, 31).FlatMap( + func(length interface{}) gopter.Gen { + var hashGens []gopter.Gen + for i := uint8(0); i < length.(uint8); i++ { + hashGens = append(hashGens, GenHash(t)) + } + return gopter.CombineGens(hashGens...).Map( + func(hashArray interface{}) (rv []common.Hash) { + array, ok := hashArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "could not extract hash array") + for _, hashVal := range array.([]interface{}) { + rv = append(rv, hashVal.(common.Hash)) + } + return rv + }, + ) + }, + reflect.ValueOf([]common.Hash{}).Type(), + ) +} + +func GenAddress(t *testing.T) gopter.Gen { + return GenHash(t).Map( + func(hash interface{}) common.Address { + iHash, ok := hash.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hash") + return common.BytesToAddress(iHash.(common.Hash).Bytes()) + }, + ) +} + +func GenAddressArray(t *testing.T) gopter.Gen { + return GenHashArray(t).Map( + func(hashes interface{}) (rv []common.Address) { + hashArray, ok := hashes.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hashes") + for _, hash := range hashArray.([]common.Hash) { + rv = append(rv, common.BytesToAddress(hash.Bytes())) + } + return rv + }, + ) +} + +func GenClientPubKey(t *testing.T) gopter.Gen { + return GenHash(t).Map( + func(hash interface{}) (pk [32]byte) { + iHash, ok := hash.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hash") + copy(pk[:], (iHash.(common.Hash).Bytes())) + return + }, + ) +} + +func GenClientPubKeyArray(t *testing.T) gopter.Gen { + return GenHashArray(t).Map( + func(hashes interface{}) (rv [][32]byte) { + hashArray, ok := hashes.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve hashes") + for _, hash := range hashArray.([]common.Hash) { + pk := [32]byte{} + copy(pk[:], hash.Bytes()) + rv = append(rv, pk) + } + return rv + }, + ) +} + +func GenBytes(t *testing.T) gopter.Gen { + return gen.UInt16Range(0, 2000).FlatMap( + func(length interface{}) gopter.Gen { + var byteGens []gopter.Gen + for i := uint16(0); i < length.(uint16); i++ { + byteGens = append(byteGens, gen.UInt8()) + } + return gopter.CombineGens(byteGens...).Map( + func(byteArray interface{}) []byte { + array, ok := byteArray.(*gopter.GenResult).Retrieve() + require.True(t, ok, "failed to retrieve gen result") + iArray := array.([]interface{}) + rv := make([]byte, len(iArray)) + for i, byteVal := range iArray { + rv[i] = byteVal.(uint8) + } + return rv + }, + ) + }, + reflect.ValueOf([]byte{}).Type(), + ) +} diff --git a/core/services/relay/evm/mercury/config_poller.go b/core/services/relay/evm/mercury/config_poller.go new file mode 100644 index 00000000..0827428a --- /dev/null +++ b/core/services/relay/evm/mercury/config_poller.go @@ -0,0 +1,177 @@ +package mercury + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" +) + +// FeedScopedConfigSet ConfigSet with FeedID for use with mercury (and multi-config DON) +var FeedScopedConfigSet common.Hash + +var verifierABI abi.ABI + +const ( + configSetEventName = "ConfigSet" + feedIdTopicIndex = 1 +) + +func init() { + var err error + verifierABI, err = abi.JSON(strings.NewReader(verifier.VerifierABI)) + if err != nil { + panic(err) + } + FeedScopedConfigSet = verifierABI.Events[configSetEventName].ID +} + +// FullConfigFromLog defines the contract config with the feedID +type FullConfigFromLog struct { + ocrtypes.ContractConfig + feedID utils.FeedID +} + +func unpackLogData(d []byte) (*verifier.VerifierConfigSet, error) { + unpacked := new(verifier.VerifierConfigSet) + + err := verifierABI.UnpackIntoInterface(unpacked, configSetEventName, d) + if err != nil { + return nil, errors.Wrap(err, "failed to unpack log data") + } + + return unpacked, nil +} + +func configFromLog(logData []byte) (FullConfigFromLog, error) { + unpacked, err := unpackLogData(logData) + if err != nil { + return FullConfigFromLog{}, err + } + + var transmitAccounts []ocrtypes.Account + for _, addr := range unpacked.OffchainTransmitters { + transmitAccounts = append(transmitAccounts, ocrtypes.Account(fmt.Sprintf("%x", addr))) + } + var signers []ocrtypes.OnchainPublicKey + for _, addr := range unpacked.Signers { + addr := addr + signers = append(signers, addr[:]) + } + + return FullConfigFromLog{ + feedID: unpacked.FeedId, + ContractConfig: ocrtypes.ContractConfig{ + ConfigDigest: unpacked.ConfigDigest, + ConfigCount: unpacked.ConfigCount, + Signers: signers, + Transmitters: transmitAccounts, + F: unpacked.F, + OnchainConfig: unpacked.OnchainConfig, + OffchainConfigVersion: unpacked.OffchainConfigVersion, + OffchainConfig: unpacked.OffchainConfig, + }, + }, nil +} + +// ConfigPoller defines the Mercury Config Poller +type ConfigPoller struct { + lggr logger.Logger + destChainLogPoller logpoller.LogPoller + addr common.Address + feedId common.Hash +} + +func FilterName(addr common.Address, feedID common.Hash) string { + return logpoller.FilterName("OCR3 Mercury ConfigPoller", addr.String(), feedID.Hex()) +} + +// NewConfigPoller creates a new Mercury ConfigPoller +func NewConfigPoller(lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) { + err := destChainPoller.RegisterFilter(logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}}) + if err != nil { + return nil, err + } + + cp := &ConfigPoller{ + lggr: lggr, + destChainLogPoller: destChainPoller, + addr: addr, + feedId: feedId, + } + + return cp, nil +} + +func (cp *ConfigPoller) Start() {} + +func (cp *ConfigPoller) Close() error { + return nil +} + +func (cp *ConfigPoller) Notify() <-chan struct{} { + return nil // rely on libocr's builtin config polling +} + +// Replay abstracts the logpoller.LogPoller Replay() implementation +func (cp *ConfigPoller) Replay(ctx context.Context, fromBlock int64) error { + return cp.destChainLogPoller.Replay(ctx, fromBlock) +} + +// LatestConfigDetails returns the latest config details from the logs +func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + cp.lggr.Debugw("LatestConfigDetails", "eventSig", FeedScopedConfigSet, "addr", cp.addr, "topicIndex", feedIdTopicIndex, "feedID", cp.feedId) + logs, err := cp.destChainLogPoller.IndexedLogs(FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1, pg.WithParentCtx(ctx)) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + if len(logs) == 0 { + return 0, ocrtypes.ConfigDigest{}, nil + } + latest := logs[len(logs)-1] + latestConfigSet, err := configFromLog(latest.Data) + if err != nil { + return 0, ocrtypes.ConfigDigest{}, err + } + return uint64(latest.BlockNumber), latestConfigSet.ConfigDigest, nil +} + +// LatestConfig returns the latest config from the logs on a certain block +func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { + lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, pg.WithParentCtx(ctx)) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + if len(lgs) == 0 { + return ocrtypes.ContractConfig{}, nil + } + latestConfigSet, err := configFromLog(lgs[len(lgs)-1].Data) + if err != nil { + return ocrtypes.ContractConfig{}, err + } + cp.lggr.Infow("LatestConfig", "latestConfig", latestConfigSet) + return latestConfigSet.ContractConfig, nil +} + +// LatestBlockHeight returns the latest block height from the logs +func (cp *ConfigPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { + latest, err := cp.destChainLogPoller.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return 0, err + } + return uint64(latest.BlockNumber), nil +} diff --git a/core/services/relay/evm/mercury/config_poller_test.go b/core/services/relay/evm/mercury/config_poller_test.go new file mode 100644 index 00000000..62decbb4 --- /dev/null +++ b/core/services/relay/evm/mercury/config_poller_test.go @@ -0,0 +1,123 @@ +package mercury + +import ( + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/pkg/errors" + confighelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + ocrtypes2 "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/umbracle/ethgo/abi" + + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestMercuryConfigPoller(t *testing.T) { + feedID := evmutils.NewHash() + feedIDBytes := [32]byte(feedID) + + th := SetupTH(t, feedID) + + notify := th.configPoller.Notify() + assert.Empty(t, notify) + + // Should have no config to begin with. + _, config, err := th.configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + require.Equal(t, ocrtypes2.ConfigDigest{}, config) + + // Create minimum number of nodes. + n := 4 + var oracles []confighelper2.OracleIdentityExtra + for i := 0; i < n; i++ { + oracles = append(oracles, confighelper2.OracleIdentityExtra{ + OracleIdentity: confighelper2.OracleIdentity{ + OnchainPublicKey: evmutils.RandomAddress().Bytes(), + TransmitAccount: ocrtypes2.Account(evmutils.RandomAddress().String()), + OffchainPublicKey: evmutils.RandomBytes32(), + PeerID: utils.MustNewPeerID(), + }, + ConfigEncryptionPublicKey: evmutils.RandomBytes32(), + }) + } + f := uint8(1) + // Setup config on contract + configType := abi.MustNewType("tuple()") + onchainConfigVal, err := abi.Encode(map[string]interface{}{}, configType) + require.NoError(t, err) + signers, _, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests( + 2*time.Second, // DeltaProgress + 20*time.Second, // DeltaResend + 100*time.Millisecond, // DeltaRound + 0, // DeltaGrace + 1*time.Minute, // DeltaStage + 100, // rMax + []int{len(oracles)}, // S + oracles, + []byte{}, // reportingPluginConfig []byte, + 0, // Max duration query + 250*time.Millisecond, // Max duration observation + 250*time.Millisecond, // MaxDurationReport + 250*time.Millisecond, // MaxDurationShouldAcceptFinalizedReport + 250*time.Millisecond, // MaxDurationShouldTransmitAcceptedReport + int(f), // f + onchainConfigVal, + ) + require.NoError(t, err) + signerAddresses, err := onchainPublicKeyToAddress(signers) + require.NoError(t, err) + offchainTransmitters := make([][32]byte, n) + encodedTransmitter := make([]ocrtypes2.Account, n) + for i := 0; i < n; i++ { + offchainTransmitters[i] = oracles[i].OffchainPublicKey + encodedTransmitter[i] = ocrtypes2.Account(fmt.Sprintf("%x", oracles[i].OffchainPublicKey[:])) + } + + _, err = th.verifierContract.SetConfig(th.user, feedIDBytes, signerAddresses, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, nil) + require.NoError(t, err, "failed to setConfig with feed ID") + th.backend.Commit() + + latest, err := th.backend.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // Ensure we capture this config set log. + require.NoError(t, th.logPoller.Replay(testutils.Context(t), latest.Number().Int64()-1)) + + // Send blocks until we see the config updated. + var configBlock uint64 + var digest [32]byte + gomega.NewGomegaWithT(t).Eventually(func() bool { + th.backend.Commit() + configBlock, digest, err = th.configPoller.LatestConfigDetails(testutils.Context(t)) + require.NoError(t, err) + return ocrtypes2.ConfigDigest{} != digest + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the config returned is the one we configured. + newConfig, err := th.configPoller.LatestConfig(testutils.Context(t), configBlock) + require.NoError(t, err) + // Note we don't check onchainConfig, as that is populated in the contract itself. + assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest)) + assert.Equal(t, signers, newConfig.Signers) + assert.Equal(t, threshold, newConfig.F) + assert.Equal(t, encodedTransmitter, newConfig.Transmitters) + assert.Equal(t, offchainConfigVersion, newConfig.OffchainConfigVersion) + assert.Equal(t, offchainConfig, newConfig.OffchainConfig) +} + +func onchainPublicKeyToAddress(publicKeys []types.OnchainPublicKey) (addresses []common.Address, err error) { + for _, signer := range publicKeys { + if len(signer) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(signer)) + } + return addresses, nil +} diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go new file mode 100644 index 00000000..1025986b --- /dev/null +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -0,0 +1,186 @@ +package mercury + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + reportcodecv1 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/reportcodec" + reportcodecv2 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/reportcodec" + reportcodecv3 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/reportcodec" +) + +var ( + sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + sampleClientPubKey = hexutil.MustDecode("0x724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93") +) + +var sampleReports [][]byte + +var ( + sampleV1Report = buildSampleV1Report(242) + sampleV2Report = buildSampleV2Report(242) + sampleV3Report = buildSampleV3Report(242) + sig2 = ocrtypes.AttributedOnchainSignature{Signature: testutils.MustDecodeBase64("kbeuRczizOJCxBzj7MUAFpz3yl2WRM6K/f0ieEBvA+oTFUaKslbQey10krumVjzAvlvKxMfyZo0WkOgNyfF6xwE="), Signer: 2} + sig3 = ocrtypes.AttributedOnchainSignature{Signature: testutils.MustDecodeBase64("9jz4b6Dh2WhXxQ97a6/S9UNjSfrEi9016XKTrfN0mLQFDiNuws23x7Z4n+6g0sqKH/hnxx1VukWUH/ohtw83/wE="), Signer: 3} + sampleSigs = []ocrtypes.AttributedOnchainSignature{sig2, sig3} + sampleReportContext = ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: MustHexToConfigDigest("0x0006fc30092226b37f6924b464e16a54a7978a9a524519a73403af64d487dc45"), + Epoch: 6, + Round: 28, + }, + ExtraHash: [32]uint8{27, 144, 106, 73, 166, 228, 123, 166, 179, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}, + } +) + +func init() { + sampleReports = make([][]byte, 4) + for i := 0; i < len(sampleReports); i++ { + sampleReports[i] = buildSampleV1Report(int64(i)) + } +} + +func buildSampleV1Report(p int64) []byte { + feedID := sampleFeedID + timestamp := uint32(42) + bp := big.NewInt(p) + bid := big.NewInt(243) + ask := big.NewInt(244) + currentBlockNumber := uint64(143) + currentBlockHash := utils.NewHash() + currentBlockTimestamp := uint64(123) + validFromBlockNum := uint64(142) + + b, err := reportcodecv1.ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, currentBlockTimestamp, validFromBlockNum) + if err != nil { + panic(err) + } + return b +} + +func buildSampleV2Report(ts int64) []byte { + feedID := sampleFeedID + timestamp := uint32(ts) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv2.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp) + if err != nil { + panic(err) + } + return b +} + +func buildSampleV3Report(ts int64) []byte { + feedID := sampleFeedID + timestamp := uint32(ts) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv3.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask) + if err != nil { + panic(err) + } + return b +} + +func buildSamplePayload(report []byte) []byte { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range sampleSigs { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(sampleReportContext) + payload, err := PayloadTypes.Pack(rawReportCtx, report, rs, ss, vs) + if err != nil { + panic(err) + } + return payload +} + +type TestHarness struct { + configPoller *ConfigPoller + user *bind.TransactOpts + backend *backends.SimulatedBackend + verifierAddress common.Address + verifierContract *verifier.Verifier + logPoller logpoller.LogPoller +} + +func SetupTH(t *testing.T, feedID common.Hash) TestHarness { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + b := backends.NewSimulatedBackend(core.GenesisAlloc{ + user.From: {Balance: big.NewInt(1000000000000000000)}}, + 5*ethconfig.Defaults.Miner.GasCeil) + + proxyAddress, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(user, b, common.Address{}) + require.NoError(t, err, "failed to deploy test mercury verifier proxy contract") + verifierAddress, _, verifierContract, err := verifier.DeployVerifier(user, b, proxyAddress) + require.NoError(t, err, "failed to deploy test mercury verifier contract") + _, err = verifierProxy.InitializeVerifier(user, verifierAddress) + require.NoError(t, err) + b.Commit() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(false) + ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337)) + lggr := logger.TestLogger(t) + lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) + servicetest.Run(t, lp) + + configPoller, err := NewConfigPoller(lggr, lp, verifierAddress, feedID) + require.NoError(t, err) + + configPoller.Start() + + return TestHarness{ + configPoller: configPoller, + user: user, + backend: b, + verifierAddress: verifierAddress, + verifierContract: verifierContract, + logPoller: lp, + } +} diff --git a/core/services/relay/evm/mercury/mocks/async_deleter.go b/core/services/relay/evm/mercury/mocks/async_deleter.go new file mode 100644 index 00000000..150f2159 --- /dev/null +++ b/core/services/relay/evm/mercury/mocks/async_deleter.go @@ -0,0 +1,32 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + pb "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + mock "github.com/stretchr/testify/mock" +) + +// AsyncDeleter is an autogenerated mock type for the asyncDeleter type +type AsyncDeleter struct { + mock.Mock +} + +// AsyncDelete provides a mock function with given fields: req +func (_m *AsyncDeleter) AsyncDelete(req *pb.TransmitRequest) { + _m.Called(req) +} + +// NewAsyncDeleter creates a new instance of AsyncDeleter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAsyncDeleter(t interface { + mock.TestingT + Cleanup(func()) +}) *AsyncDeleter { + mock := &AsyncDeleter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/relay/evm/mercury/mocks/pipeline.go b/core/services/relay/evm/mercury/mocks/pipeline.go new file mode 100644 index 00000000..2913dd2f --- /dev/null +++ b/core/services/relay/evm/mercury/mocks/pipeline.go @@ -0,0 +1,39 @@ +package mocks + +import ( + "context" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type MockRunner struct { + Trrs pipeline.TaskRunResults + Err error +} + +func (m *MockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) { + return &pipeline.Run{ID: 42}, m.Trrs, m.Err +} + +var _ pipeline.Task = &MockTask{} + +type MockTask struct { + result pipeline.Result +} + +func (m *MockTask) Type() pipeline.TaskType { return "MockTask" } +func (m *MockTask) ID() int { return 0 } +func (m *MockTask) DotID() string { return "" } +func (m *MockTask) Run(ctx context.Context, lggr logger.Logger, vars pipeline.Vars, inputs []pipeline.Result) (pipeline.Result, pipeline.RunInfo) { + return m.result, pipeline.RunInfo{} +} +func (m *MockTask) Base() *pipeline.BaseTask { return nil } +func (m *MockTask) Outputs() []pipeline.Task { return nil } +func (m *MockTask) Inputs() []pipeline.TaskDependency { return nil } +func (m *MockTask) OutputIndex() int32 { return 0 } +func (m *MockTask) TaskTimeout() (time.Duration, bool) { return 0, false } +func (m *MockTask) TaskRetries() uint32 { return 0 } +func (m *MockTask) TaskMinBackoff() time.Duration { return 0 } +func (m *MockTask) TaskMaxBackoff() time.Duration { return 0 } diff --git a/core/services/relay/evm/mercury/offchain_config_digester.go b/core/services/relay/evm/mercury/offchain_config_digester.go new file mode 100644 index 00000000..cc33c576 --- /dev/null +++ b/core/services/relay/evm/mercury/offchain_config_digester.go @@ -0,0 +1,71 @@ +package mercury + +import ( + "crypto/ed25519" + "encoding/hex" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/goplugin/wsrpc/credentials" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" +) + +// Originally sourced from: https://github.com/goplugin/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/offchain_config_digester.go + +var _ ocrtypes.OffchainConfigDigester = OffchainConfigDigester{} + +func NewOffchainConfigDigester(feedID [32]byte, chainID *big.Int, contractAddress common.Address) OffchainConfigDigester { + return OffchainConfigDigester{feedID, chainID, contractAddress} +} + +type OffchainConfigDigester struct { + FeedID utils.FeedID + ChainID *big.Int + ContractAddress common.Address +} + +func (d OffchainConfigDigester) ConfigDigest(cc ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) { + signers := []common.Address{} + for i, signer := range cc.Signers { + if len(signer) != 20 { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm signer should be a 20 byte address, but got %x", i, signer) + } + a := common.BytesToAddress(signer) + signers = append(signers, a) + } + transmitters := []credentials.StaticSizedPublicKey{} + for i, transmitter := range cc.Transmitters { + if len(transmitter) != 2*ed25519.PublicKeySize { + return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm transmitter should be a 64 character hex-encoded ed25519 public key, but got '%v' (%d chars)", i, transmitter, len(transmitter)) + } + var t credentials.StaticSizedPublicKey + b, err := hex.DecodeString(string(transmitter)) + if err != nil { + return ocrtypes.ConfigDigest{}, errors.Wrapf(err, "%v-th evm transmitter is not valid hex, got: %q", i, transmitter) + } + copy(t[:], b) + + transmitters = append(transmitters, t) + } + + return configDigest( + common.Hash(d.FeedID), + d.ChainID, + d.ContractAddress, + cc.ConfigCount, + signers, + transmitters, + cc.F, + cc.OnchainConfig, + cc.OffchainConfigVersion, + cc.OffchainConfig, + ), nil +} + +func (d OffchainConfigDigester) ConfigDigestPrefix() (ocrtypes.ConfigDigestPrefix, error) { + return ocrtypes.ConfigDigestPrefixMercuryV02, nil +} diff --git a/core/services/relay/evm/mercury/offchain_config_digester_test.go b/core/services/relay/evm/mercury/offchain_config_digester_test.go new file mode 100644 index 00000000..986b55af --- /dev/null +++ b/core/services/relay/evm/mercury/offchain_config_digester_test.go @@ -0,0 +1,55 @@ +package mercury + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/require" +) + +func Test_OffchainConfigDigester_ConfigDigest(t *testing.T) { + // ChainID and ContractAddress are taken into account for computation + cd1, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd2, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd3, err := OffchainConfigDigester{ChainID: big.NewInt(1)}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + cd4, err := OffchainConfigDigester{ChainID: big.NewInt(1), ContractAddress: common.Address{1}}.ConfigDigest(types.ContractConfig{}) + require.NoError(t, err) + + require.Equal(t, cd1, cd2) + require.NotEqual(t, cd2, cd3) + require.NotEqual(t, cd2, cd4) + require.NotEqual(t, cd3, cd4) + + // malformed signers + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Signers: []types.OnchainPublicKey{{1, 2}}, + }) + require.Error(t, err) + + // malformed transmitters + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"0x"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353"}, + }) + require.Error(t, err) + + _, err = OffchainConfigDigester{}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaz"}, + }) + require.Error(t, err) + + // well-formed transmitters + _, err = OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(types.ContractConfig{ + Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaa"}, + }) + require.NoError(t, err) +} diff --git a/core/services/relay/evm/mercury/orm.go b/core/services/relay/evm/mercury/orm.go new file mode 100644 index 00000000..6cfcc68f --- /dev/null +++ b/core/services/relay/evm/mercury/orm.go @@ -0,0 +1,177 @@ +package mercury + +import ( + "context" + "crypto/sha256" + "database/sql" + "errors" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +type ORM interface { + InsertTransmitRequest(req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext, qopts ...pg.QOpt) error + DeleteTransmitRequests(reqs []*pb.TransmitRequest, qopts ...pg.QOpt) error + GetTransmitRequests(jobID int32, qopts ...pg.QOpt) ([]*Transmission, error) + PruneTransmitRequests(jobID int32, maxSize int, qopts ...pg.QOpt) error + LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) +} + +func FeedIDFromReport(report ocrtypes.Report) (feedID utils.FeedID, err error) { + if n := copy(feedID[:], report); n != 32 { + return feedID, pkgerrors.Errorf("invalid length for report: %d", len(report)) + } + return feedID, nil +} + +type orm struct { + q pg.Q +} + +func NewORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig) ORM { + namedLogger := lggr.Named("MercuryORM") + q := pg.NewQ(db, namedLogger, cfg) + return &orm{ + q: q, + } +} + +// InsertTransmitRequest inserts one transmit request if the payload does not exist already. +func (o *orm) InsertTransmitRequest(req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext, qopts ...pg.QOpt) error { + feedID, err := FeedIDFromReport(req.Payload) + if err != nil { + return err + } + + q := o.q.WithOpts(qopts...) + var wg sync.WaitGroup + wg.Add(2) + var err1, err2 error + + go func() { + defer wg.Done() + err1 = q.ExecQ(` + INSERT INTO mercury_transmit_requests (payload, payload_hash, config_digest, epoch, round, extra_hash, job_id, feed_id) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (payload_hash) DO NOTHING + `, req.Payload, hashPayload(req.Payload), reportCtx.ConfigDigest[:], reportCtx.Epoch, reportCtx.Round, reportCtx.ExtraHash[:], jobID, feedID[:]) + }() + + go func() { + defer wg.Done() + err2 = q.ExecQ(` + INSERT INTO feed_latest_reports (feed_id, report, epoch, round, updated_at, job_id) + VALUES ($1, $2, $3, $4, NOW(), $5) + ON CONFLICT (feed_id) DO UPDATE + SET feed_id=$1, report=$2, epoch=$3, round=$4, updated_at=NOW() + WHERE excluded.epoch > feed_latest_reports.epoch OR (excluded.epoch = feed_latest_reports.epoch AND excluded.round > feed_latest_reports.round) + `, feedID[:], req.Payload, reportCtx.Epoch, reportCtx.Round, jobID) + }() + wg.Wait() + return errors.Join(err1, err2) +} + +// DeleteTransmitRequest deletes the given transmit requests if they exist. +func (o *orm) DeleteTransmitRequests(reqs []*pb.TransmitRequest, qopts ...pg.QOpt) error { + if len(reqs) == 0 { + return nil + } + + var hashes pq.ByteaArray + for _, req := range reqs { + hashes = append(hashes, hashPayload(req.Payload)) + } + + q := o.q.WithOpts(qopts...) + err := q.ExecQ(` + DELETE FROM mercury_transmit_requests + WHERE payload_hash = ANY($1) + `, hashes) + return err +} + +// GetTransmitRequests returns all transmit requests in chronologically descending order. +func (o *orm) GetTransmitRequests(jobID int32, qopts ...pg.QOpt) ([]*Transmission, error) { + q := o.q.WithOpts(qopts...) + // The priority queue uses epoch and round to sort transmissions so order by + // the same fields here for optimal insertion into the pq. + rows, err := q.QueryContext(q.ParentCtx, ` + SELECT payload, config_digest, epoch, round, extra_hash + FROM mercury_transmit_requests + WHERE job_id = $1 + ORDER BY epoch DESC, round DESC + `, jobID) + if err != nil { + return nil, err + } + defer rows.Close() + + var transmissions []*Transmission + for rows.Next() { + transmission := &Transmission{Req: &pb.TransmitRequest{}} + var digest, extraHash common.Hash + + err := rows.Scan( + &transmission.Req.Payload, + &digest, + &transmission.ReportCtx.Epoch, + &transmission.ReportCtx.Round, + &extraHash, + ) + if err != nil { + return nil, err + } + transmission.ReportCtx.ConfigDigest = ocrtypes.ConfigDigest(digest) + transmission.ReportCtx.ExtraHash = extraHash + + transmissions = append(transmissions, transmission) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return transmissions, nil +} + +// PruneTransmitRequests keeps at most maxSize rows for the given job ID, +// deleting the oldest transactions. +func (o *orm) PruneTransmitRequests(jobID int32, maxSize int, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + // Prune the oldest requests by epoch and round. + return q.ExecQ(` + DELETE FROM mercury_transmit_requests + WHERE job_id = $1 AND + payload_hash NOT IN ( + SELECT payload_hash + FROM mercury_transmit_requests + WHERE job_id = $1 + ORDER BY epoch DESC, round DESC + LIMIT $2 + ) + `, jobID, maxSize) +} + +func (o *orm) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) { + q := o.q.WithOpts(qopts...) + err = q.GetContext(ctx, &report, `SELECT report FROM feed_latest_reports WHERE feed_id = $1`, feedID[:]) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return report, err +} + +func hashPayload(payload []byte) []byte { + checksum := sha256.Sum256(payload) + return checksum[:] +} diff --git a/core/services/relay/evm/mercury/orm_test.go b/core/services/relay/evm/mercury/orm_test.go new file mode 100644 index 00000000..8452aef8 --- /dev/null +++ b/core/services/relay/evm/mercury/orm_test.go @@ -0,0 +1,292 @@ +package mercury + +import ( + "testing" + + "github.com/cometbft/cometbft/libs/rand" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +func TestORM(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + lggr := logger.TestLogger(t) + orm := NewORM(db, lggr, pgtest.NewQConfig(true)) + feedID := sampleFeedID + + reports := sampleReports + reportContexts := make([]ocrtypes.ReportContext, 4) + for i := range reportContexts { + reportContexts[i] = ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: 10, + Round: uint8(i), + }, + ExtraHash: [32]byte{'2'}, + } + } + + l, err := orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Nil(t, l) + + // Test insert and get requests. + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[0]}, jobID, reportContexts[0]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[1]}, jobID, reportContexts[1]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[2]}, jobID, reportContexts[2]) + require.NoError(t, err) + + transmissions, err := orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: reportContexts[1]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.NotEqual(t, reports[0], l) + assert.Equal(t, reports[2], l) + + // Test requests can be deleted. + err = orm.DeleteTransmitRequests([]*pb.TransmitRequest{{Payload: reports[1]}}) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + + // Test deleting non-existent requests does not error. + err = orm.DeleteTransmitRequests([]*pb.TransmitRequest{{Payload: []byte("does-not-exist")}}) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, + }) + + // Test deleting multiple requests. + err = orm.DeleteTransmitRequests([]*pb.TransmitRequest{ + {Payload: reports[0]}, + {Payload: reports[2]}, + }) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Empty(t, transmissions) + + // More inserts. + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]}, + }) + + // Duplicate requests are ignored. + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3]) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]}, + }) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) +} + +func TestORM_PruneTransmitRequests(t *testing.T) { + db := pgtest.NewSqlxDB(t) + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + lggr := logger.TestLogger(t) + orm := NewORM(db, lggr, pgtest.NewQConfig(true)) + + reports := sampleReports + + makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext { + return ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: epoch, + Round: round, + }, + ExtraHash: [32]byte{'2'}, + } + } + + err := orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 1)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 2)) + require.NoError(t, err) + + // Max size greater than table size, expect no-op + err = orm.PruneTransmitRequests(jobID, 5) + require.NoError(t, err) + + transmissions, err := orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }) + + // Max size equal to table size, expect no-op + err = orm.PruneTransmitRequests(jobID, 2) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, transmissions, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }) + + // Max size is table size + 1, but jobID differs, expect no-op + err = orm.PruneTransmitRequests(-1, 2) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)}, + }, transmissions) + + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 2)) + require.NoError(t, err) + + // Max size is table size - 1, expect the oldest row to be pruned. + err = orm.PruneTransmitRequests(jobID, 3) + require.NoError(t, err) + + transmissions, err = orm.GetTransmitRequests(jobID) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: makeReportContext(2, 2)}, + {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: makeReportContext(2, 1)}, + {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)}, + }, transmissions) +} + +func TestORM_InsertTransmitRequest_LatestReport(t *testing.T) { + db := pgtest.NewSqlxDB(t) + jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + lggr := logger.TestLogger(t) + orm := NewORM(db, lggr, pgtest.NewQConfig(true)) + feedID := sampleFeedID + + reports := sampleReports + + makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext { + return ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: ocrtypes.ConfigDigest{'1'}, + Epoch: epoch, + Round: round, + }, + ExtraHash: [32]byte{'2'}, + } + } + + err := orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext( + 0, 0, + )) + require.NoError(t, err) + + l, err := orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[0], l) + + t.Run("replaces if epoch and round are larger", func(t *testing.T) { + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[1], l) + }) + t.Run("replaces if epoch is the same but round is greater", func(t *testing.T) { + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(1, 2)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[2], l) + }) + t.Run("replaces if epoch is larger but round is smaller", func(t *testing.T) { + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) + }) + t.Run("does not overwrite if epoch/round is the same", func(t *testing.T) { + err = orm.InsertTransmitRequest(&pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(2, 1)) + require.NoError(t, err) + + l, err = orm.LatestReport(testutils.Context(t), feedID) + require.NoError(t, err) + assert.Equal(t, reports[3], l) + }) +} + +func Test_ReportCodec_FeedIDFromReport(t *testing.T) { + t.Run("FeedIDFromReport extracts the current block number from a valid report", func(t *testing.T) { + report := buildSampleV1Report(42) + + f, err := FeedIDFromReport(report) + require.NoError(t, err) + + assert.Equal(t, sampleFeedID[:], f[:]) + }) + t.Run("FeedIDFromReport returns error if report is invalid", func(t *testing.T) { + report := []byte{1} + + _, err := FeedIDFromReport(report) + assert.EqualError(t, err, "invalid length for report: 1") + }) +} diff --git a/core/services/relay/evm/mercury/persistence_manager.go b/core/services/relay/evm/mercury/persistence_manager.go new file mode 100644 index 00000000..97dad1fe --- /dev/null +++ b/core/services/relay/evm/mercury/persistence_manager.go @@ -0,0 +1,143 @@ +package mercury + +import ( + "context" + "sync" + "time" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + flushDeletesFrequency = time.Second + pruneFrequency = time.Hour +) + +type PersistenceManager struct { + lggr logger.Logger + orm ORM + + once services.StateMachine + stopCh services.StopChan + wg sync.WaitGroup + + deleteMu sync.Mutex + deleteQueue []*pb.TransmitRequest + + jobID int32 + + maxTransmitQueueSize int + flushDeletesFrequency time.Duration + pruneFrequency time.Duration +} + +func NewPersistenceManager(lggr logger.Logger, orm ORM, jobID int32, maxTransmitQueueSize int, flushDeletesFrequency, pruneFrequency time.Duration) *PersistenceManager { + return &PersistenceManager{ + lggr: lggr.Named("MercuryPersistenceManager"), + orm: orm, + stopCh: make(services.StopChan), + jobID: jobID, + maxTransmitQueueSize: maxTransmitQueueSize, + flushDeletesFrequency: flushDeletesFrequency, + pruneFrequency: pruneFrequency, + } +} + +func (pm *PersistenceManager) Start(ctx context.Context) error { + return pm.once.StartOnce("MercuryPersistenceManager", func() error { + pm.wg.Add(2) + go pm.runFlushDeletesLoop() + go pm.runPruneLoop() + return nil + }) +} + +func (pm *PersistenceManager) Close() error { + return pm.once.StopOnce("MercuryPersistenceManager", func() error { + close(pm.stopCh) + pm.wg.Wait() + return nil + }) +} + +func (pm *PersistenceManager) Insert(ctx context.Context, req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) error { + return pm.orm.InsertTransmitRequest(req, pm.jobID, reportCtx, pg.WithParentCtx(ctx)) +} + +func (pm *PersistenceManager) Delete(ctx context.Context, req *pb.TransmitRequest) error { + return pm.orm.DeleteTransmitRequests([]*pb.TransmitRequest{req}, pg.WithParentCtx(ctx)) +} + +func (pm *PersistenceManager) AsyncDelete(req *pb.TransmitRequest) { + pm.addToDeleteQueue(req) +} + +func (pm *PersistenceManager) Load(ctx context.Context) ([]*Transmission, error) { + return pm.orm.GetTransmitRequests(pm.jobID, pg.WithParentCtx(ctx)) +} + +func (pm *PersistenceManager) runFlushDeletesLoop() { + defer pm.wg.Done() + + ctx, cancel := pm.stopCh.Ctx(context.Background()) + defer cancel() + + ticker := time.NewTicker(utils.WithJitter(pm.flushDeletesFrequency)) + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + queuedReqs := pm.resetDeleteQueue() + if err := pm.orm.DeleteTransmitRequests(queuedReqs, pg.WithParentCtx(ctx)); err != nil { + pm.lggr.Errorw("Failed to delete queued transmit requests", "err", err) + pm.addToDeleteQueue(queuedReqs...) + } else { + pm.lggr.Debugw("Deleted queued transmit requests") + } + } + } +} + +func (pm *PersistenceManager) runPruneLoop() { + defer pm.wg.Done() + + ctx, cancel := pm.stopCh.Ctx(context.Background()) + defer cancel() + + ticker := time.NewTicker(utils.WithJitter(pm.pruneFrequency)) + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + if err := pm.orm.PruneTransmitRequests(pm.jobID, pm.maxTransmitQueueSize, pg.WithParentCtx(ctx), pg.WithLongQueryTimeout()); err != nil { + pm.lggr.Errorw("Failed to prune transmit requests table", "err", err) + } else { + pm.lggr.Debugw("Pruned transmit requests table") + } + } + } +} + +func (pm *PersistenceManager) addToDeleteQueue(reqs ...*pb.TransmitRequest) { + pm.deleteMu.Lock() + defer pm.deleteMu.Unlock() + pm.deleteQueue = append(pm.deleteQueue, reqs...) +} + +func (pm *PersistenceManager) resetDeleteQueue() []*pb.TransmitRequest { + pm.deleteMu.Lock() + defer pm.deleteMu.Unlock() + queue := pm.deleteQueue + pm.deleteQueue = nil + return queue +} diff --git a/core/services/relay/evm/mercury/persistence_manager_test.go b/core/services/relay/evm/mercury/persistence_manager_test.go new file mode 100644 index 00000000..c1aaff97 --- /dev/null +++ b/core/services/relay/evm/mercury/persistence_manager_test.go @@ -0,0 +1,179 @@ +package mercury + +import ( + "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +func bootstrapPersistenceManager(t *testing.T, jobID int32, db *sqlx.DB) (*PersistenceManager, *observer.ObservedLogs) { + t.Helper() + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) + orm := NewORM(db, lggr, pgtest.NewQConfig(true)) + return NewPersistenceManager(lggr, orm, jobID, 2, 5*time.Millisecond, 5*time.Millisecond), observedLogs +} + +func TestPersistenceManager(t *testing.T) { + jobID1 := rand.Int32() + jobID2 := jobID1 + 1 + + ctx := testutils.Context(t) + db := pgtest.NewSqlxDB(t) + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + pm, _ := bootstrapPersistenceManager(t, jobID1, db) + + reports := sampleReports + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[0]}}, + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + err = pm.Delete(ctx, &pb.TransmitRequest{Payload: reports[0]}) + require.NoError(t, err) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + t.Run("scopes load to only transmissions with matching job ID", func(t *testing.T) { + pm2, _ := bootstrapPersistenceManager(t, jobID2, db) + transmissions, err = pm2.Load(ctx) + require.NoError(t, err) + + assert.Len(t, transmissions, 0) + }) +} + +func TestPersistenceManagerAsyncDelete(t *testing.T) { + ctx := testutils.Context(t) + jobID := rand.Int32() + db := pgtest.NewSqlxDB(t) + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + pm, observedLogs := bootstrapPersistenceManager(t, jobID, db) + + reports := sampleReports + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{}) + require.NoError(t, err) + + err = pm.Start(ctx) + require.NoError(t, err) + + pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[0]}) + + // Wait for next poll. + observedLogs.TakeAll() + testutils.WaitForLogMessage(t, observedLogs, "Deleted queued transmit requests") + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) + + // Test AsyncDelete is a no-op after Close. + err = pm.Close() + require.NoError(t, err) + + pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[1]}) + + time.Sleep(15 * time.Millisecond) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[1]}}, + }, transmissions) +} + +func TestPersistenceManagerPrune(t *testing.T) { + jobID1 := rand.Int32() + jobID2 := jobID1 + 1 + db := pgtest.NewSqlxDB(t) + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + + ctx := testutils.Context(t) + + reports := make([][]byte, 25) + for i := 0; i < 25; i++ { + reports[i] = buildSampleV1Report(int64(i)) + } + + pm2, _ := bootstrapPersistenceManager(t, jobID2, db) + for i := 0; i < 20; i++ { + err := pm2.Insert(ctx, &pb.TransmitRequest{Payload: reports[i]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: uint32(i)}}) + require.NoError(t, err) + } + + pm, observedLogs := bootstrapPersistenceManager(t, jobID1, db) + + err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[21]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 21}}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[22]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}) + require.NoError(t, err) + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[23]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}) + require.NoError(t, err) + + err = pm.Start(ctx) + require.NoError(t, err) + + // Wait for next poll. + observedLogs.TakeAll() + testutils.WaitForLogMessage(t, observedLogs, "Pruned transmit requests table") + + transmissions, err := pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}}, + {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}}, + }, transmissions) + + // Test pruning stops after Close. + err = pm.Close() + require.NoError(t, err) + + err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[24]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}}) + require.NoError(t, err) + + transmissions, err = pm.Load(ctx) + require.NoError(t, err) + require.Equal(t, []*Transmission{ + {Req: &pb.TransmitRequest{Payload: reports[24]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}}}, + {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}}, + {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}}, + }, transmissions) + + t.Run("prune was scoped to job ID", func(t *testing.T) { + transmissions, err = pm2.Load(ctx) + require.NoError(t, err) + assert.Len(t, transmissions, 20) + }) +} diff --git a/core/services/relay/evm/mercury/queue.go b/core/services/relay/evm/mercury/queue.go new file mode 100644 index 00000000..8800d5cd --- /dev/null +++ b/core/services/relay/evm/mercury/queue.go @@ -0,0 +1,248 @@ +package mercury + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + heap "github.com/esote/minmaxheap" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +//go:generate mockery --quiet --name asyncDeleter --output ./mocks/ --case=underscore --structname=AsyncDeleter +type asyncDeleter interface { + AsyncDelete(req *pb.TransmitRequest) +} + +var _ services.Service = (*TransmitQueue)(nil) + +var transmitQueueLoad = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "mercury_transmit_queue_load", + Help: "Percent of transmit queue capacity used", +}, + []string{"feedID", "capacity"}, +) + +// Prometheus' default interval is 15s, set this to under 7.5s to avoid +// aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) +const promInterval = 6500 * time.Millisecond + +// TransmitQueue is the high-level package that everything outside of this file should be using +// It stores pending transmissions, yielding the latest (highest priority) first to the caller +type TransmitQueue struct { + services.StateMachine + + cond sync.Cond + lggr logger.Logger + asyncDeleter asyncDeleter + mu *sync.RWMutex + + pq *priorityQueue + maxlen int + closed bool + + // monitor loop + stopMonitor func() + transmitQueueLoad prometheus.Gauge +} + +type Transmission struct { + Req *pb.TransmitRequest // the payload to transmit + ReportCtx ocrtypes.ReportContext // contains priority information (latest epoch/round wins) +} + +// maxlen controls how many items will be stored in the queue +// 0 means unlimited - be careful, this can cause memory leaks +func NewTransmitQueue(lggr logger.Logger, feedID string, maxlen int, transmissions []*Transmission, asyncDeleter asyncDeleter) *TransmitQueue { + pq := priorityQueue(transmissions) + heap.Init(&pq) // ensure the heap is ordered + mu := new(sync.RWMutex) + return &TransmitQueue{ + services.StateMachine{}, + sync.Cond{L: mu}, + lggr.Named("TransmitQueue"), + asyncDeleter, + mu, + &pq, + maxlen, + false, + nil, + transmitQueueLoad.WithLabelValues(feedID, fmt.Sprintf("%d", maxlen)), + } +} + +func (tq *TransmitQueue) Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) { + tq.cond.L.Lock() + defer tq.cond.L.Unlock() + + if tq.closed { + return false + } + + if tq.maxlen != 0 && tq.pq.Len() == tq.maxlen { + // evict oldest entry to make room + tq.lggr.Criticalf("Transmit queue is full; dropping oldest transmission (reached max length of %d)", tq.maxlen) + removed := heap.PopMax(tq.pq) + if transmission, ok := removed.(*Transmission); ok { + tq.asyncDeleter.AsyncDelete(transmission.Req) + } + } + + heap.Push(tq.pq, &Transmission{req, reportCtx}) + tq.cond.Signal() + + return true +} + +// BlockingPop will block until at least one item is in the heap, and then return it +// If the queue is closed, it will immediately return nil +func (tq *TransmitQueue) BlockingPop() (t *Transmission) { + tq.cond.L.Lock() + defer tq.cond.L.Unlock() + if tq.closed { + return nil + } + for t = tq.pop(); t == nil; t = tq.pop() { + tq.cond.Wait() + if tq.closed { + return nil + } + } + return t +} + +func (tq *TransmitQueue) IsEmpty() bool { + tq.mu.RLock() + defer tq.mu.RUnlock() + return tq.pq.Len() == 0 +} + +func (tq *TransmitQueue) Start(context.Context) error { + return tq.StartOnce("TransmitQueue", func() error { + t := time.NewTicker(utils.WithJitter(promInterval)) + wg := new(sync.WaitGroup) + chStop := make(chan struct{}) + tq.stopMonitor = func() { + t.Stop() + close(chStop) + wg.Wait() + } + wg.Add(1) + go tq.monitorLoop(t.C, chStop, wg) + return nil + }) +} + +func (tq *TransmitQueue) Close() error { + return tq.StopOnce("TransmitQueue", func() error { + tq.cond.L.Lock() + tq.closed = true + tq.cond.L.Unlock() + tq.cond.Broadcast() + tq.stopMonitor() + return nil + }) +} + +func (tq *TransmitQueue) monitorLoop(c <-chan time.Time, chStop <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + for { + select { + case <-c: + tq.report() + case <-chStop: + return + } + } +} + +func (tq *TransmitQueue) report() { + tq.mu.RLock() + length := tq.pq.Len() + tq.mu.RUnlock() + tq.transmitQueueLoad.Set(float64(length)) +} + +func (tq *TransmitQueue) Ready() error { + return nil +} +func (tq *TransmitQueue) Name() string { return tq.lggr.Name() } +func (tq *TransmitQueue) HealthReport() map[string]error { + report := map[string]error{tq.Name(): errors.Join( + tq.status(), + )} + return report +} + +func (tq *TransmitQueue) status() (merr error) { + tq.mu.RLock() + length := tq.pq.Len() + closed := tq.closed + tq.mu.RUnlock() + if tq.maxlen != 0 && length > (tq.maxlen/2) { + merr = errors.Join(merr, fmt.Errorf("transmit priority queue is greater than 50%% full (%d/%d)", length, tq.maxlen)) + } + if closed { + merr = errors.New("transmit queue is closed") + } + return merr +} + +// pop latest Transmission from the heap +// Not thread-safe +func (tq *TransmitQueue) pop() *Transmission { + if tq.pq.Len() == 0 { + return nil + } + return heap.Pop(tq.pq).(*Transmission) +} + +// HEAP +// Adapted from https://pkg.go.dev/container/heap#example-package-PriorityQueue + +// WARNING: None of these methods are thread-safe, caller must synchronize + +var _ heap.Interface = &priorityQueue{} + +type priorityQueue []*Transmission + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + // We want Pop to give us the latest round, so we use greater than here + // i.e. a later epoch/round is "less" than an earlier one + return pq[i].ReportCtx.ReportTimestamp.Epoch > pq[j].ReportCtx.ReportTimestamp.Epoch && + pq[i].ReportCtx.ReportTimestamp.Round > pq[j].ReportCtx.ReportTimestamp.Round +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *priorityQueue) Pop() any { + n := len(*pq) + if n == 0 { + return nil + } + old := *pq + item := old[n-1] + old[n-1] = nil // avoid memory leak + *pq = old[0 : n-1] + return item +} + +func (pq *priorityQueue) Push(x any) { + *pq = append(*pq, x.(*Transmission)) +} diff --git a/core/services/relay/evm/mercury/queue_test.go b/core/services/relay/evm/mercury/queue_test.go new file mode 100644 index 00000000..020eeb63 --- /dev/null +++ b/core/services/relay/evm/mercury/queue_test.go @@ -0,0 +1,147 @@ +package mercury + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +type TestTransmissionWithReport struct { + tr *pb.TransmitRequest + ctx ocrtypes.ReportContext +} + +func createTestTransmissions(t *testing.T) []TestTransmissionWithReport { + t.Helper() + return []TestTransmissionWithReport{ + { + tr: &pb.TransmitRequest{ + Payload: []byte("test1"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 1, + Round: 1, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + { + tr: &pb.TransmitRequest{ + Payload: []byte("test2"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 2, + Round: 2, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + { + tr: &pb.TransmitRequest{ + Payload: []byte("test3"), + }, + ctx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 3, + Round: 3, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + } +} + +func Test_Queue(t *testing.T) { + t.Parallel() + lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + testTransmissions := createTestTransmissions(t) + deleter := mocks.NewAsyncDeleter(t) + transmitQueue := NewTransmitQueue(lggr, "foo feed ID", 7, nil, deleter) + + t.Run("successfully add transmissions to transmit queue", func(t *testing.T) { + for _, tt := range testTransmissions { + ok := transmitQueue.Push(tt.tr, tt.ctx) + require.True(t, ok) + } + report := transmitQueue.HealthReport() + assert.Nil(t, report[transmitQueue.Name()]) + }) + + t.Run("transmit queue is more than 50% full", func(t *testing.T) { + transmitQueue.Push(testTransmissions[2].tr, testTransmissions[2].ctx) + report := transmitQueue.HealthReport() + assert.Equal(t, report[transmitQueue.Name()].Error(), "transmit priority queue is greater than 50% full (4/7)") + }) + + t.Run("transmit queue pops the highest priority transmission", func(t *testing.T) { + tr := transmitQueue.BlockingPop() + assert.Equal(t, testTransmissions[2].tr, tr.Req) + }) + + t.Run("transmit queue is full and evicts the oldest transmission", func(t *testing.T) { + deleter.On("AsyncDelete", testTransmissions[0].tr).Once() + + // add 5 more transmissions to overflow the queue by 1 + for i := 0; i < 5; i++ { + transmitQueue.Push(testTransmissions[1].tr, testTransmissions[1].ctx) + } + + // expecting testTransmissions[0] to get evicted and not present in the queue anymore + testutils.WaitForLogMessage(t, observedLogs, "Transmit queue is full; dropping oldest transmission (reached max length of 7)") + for i := 0; i < 7; i++ { + tr := transmitQueue.BlockingPop() + assert.NotEqual(t, tr.Req, testTransmissions[0].tr) + } + }) + + t.Run("transmit queue blocks when empty and resumes when tranmission available", func(t *testing.T) { + assert.True(t, transmitQueue.IsEmpty()) + + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tr := transmitQueue.BlockingPop() + assert.Equal(t, tr.Req, testTransmissions[0].tr) + }() + go func() { + defer wg.Done() + transmitQueue.Push(testTransmissions[0].tr, testTransmissions[0].ctx) + }() + wg.Wait() + }) + + t.Run("initializes transmissions", func(t *testing.T) { + transmissions := []*Transmission{ + { + Req: &pb.TransmitRequest{ + Payload: []byte("new1"), + }, + ReportCtx: ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + Epoch: 1, + Round: 1, + ConfigDigest: ocrtypes.ConfigDigest{}, + }, + }, + }, + } + transmitQueue := NewTransmitQueue(lggr, "foo feed ID", 7, transmissions, deleter) + + transmission := transmitQueue.BlockingPop() + assert.Equal(t, transmission.Req.Payload, []byte("new1")) + assert.True(t, transmitQueue.IsEmpty()) + }) +} diff --git a/core/services/relay/evm/mercury/test_helpers.go b/core/services/relay/evm/mercury/test_helpers.go new file mode 100644 index 00000000..d381a03e --- /dev/null +++ b/core/services/relay/evm/mercury/test_helpers.go @@ -0,0 +1,39 @@ +package mercury + +import ( + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +func BuildSamplePayload(report []byte, reportCtx ocrtypes.ReportContext, sigs []ocrtypes.AttributedOnchainSignature) []byte { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range sigs { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(reportCtx) + payload, err := PayloadTypes.Pack(rawReportCtx, report, rs, ss, vs) + if err != nil { + panic(err) + } + return payload +} + +func MustHexToConfigDigest(s string) (cd ocrtypes.ConfigDigest) { + b := hexutil.MustDecode(s) + var err error + cd, err = ocrtypes.BytesToConfigDigest(b) + if err != nil { + panic(err) + } + return +} diff --git a/core/services/relay/evm/mercury/transmitter.go b/core/services/relay/evm/mercury/transmitter.go new file mode 100644 index 00000000..91bd4679 --- /dev/null +++ b/core/services/relay/evm/mercury/transmitter.go @@ -0,0 +1,472 @@ +package mercury + +import ( + "bytes" + "context" + "crypto/ed25519" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jpillora/backoff" + pkgerrors "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types/mercury" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + maxTransmitQueueSize = 10_000 + maxDeleteQueueSize = 10_000 + transmitTimeout = 5 * time.Second +) + +const ( + // Mercury server error codes + DuplicateReport = 2 +) + +var ( + transmitSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_success_count", + Help: "Number of successful transmissions (duplicates are counted as success)", + }, + []string{"feedID"}, + ) + transmitDuplicateCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_duplicate_count", + Help: "Number of transmissions where the server told us it was a duplicate", + }, + []string{"feedID"}, + ) + transmitConnectionErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_connection_error_count", + Help: "Number of errored transmissions that failed due to problem with the connection", + }, + []string{"feedID"}, + ) + transmitQueueDeleteErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_delete_error_count", + Help: "Running count of DB errors when trying to delete an item from the queue DB", + }, + []string{"feedID"}, + ) + transmitQueueInsertErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_insert_error_count", + Help: "Running count of DB errors when trying to insert an item into the queue DB", + }, + []string{"feedID"}, + ) + transmitQueuePushErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_queue_push_error_count", + Help: "Running count of DB errors when trying to push an item onto the queue", + }, + []string{"feedID"}, + ) + transmitServerErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_server_error_count", + Help: "Number of errored transmissions that failed due to an error returned by the mercury server", + }, + []string{"feedID", "code"}, + ) +) + +type Transmitter interface { + mercury.Transmitter + services.Service +} + +type ConfigTracker interface { + LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) +} + +type TransmitterReportDecoder interface { + BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) +} + +var _ Transmitter = (*mercuryTransmitter)(nil) + +type mercuryTransmitter struct { + services.StateMachine + lggr logger.Logger + rpcClient wsrpc.Client + persistenceManager *PersistenceManager + codec TransmitterReportDecoder + + feedID mercuryutils.FeedID + jobID int32 + fromAccount string + + stopCh services.StopChan + queue *TransmitQueue + wg sync.WaitGroup + + deleteQueue chan *pb.TransmitRequest + + transmitSuccessCount prometheus.Counter + transmitDuplicateCount prometheus.Counter + transmitConnectionErrorCount prometheus.Counter + transmitQueueDeleteErrorCount prometheus.Counter + transmitQueueInsertErrorCount prometheus.Counter + transmitQueuePushErrorCount prometheus.Counter +} + +var PayloadTypes = getPayloadTypes() + +func getPayloadTypes() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "reportContext", Type: mustNewType("bytes32[3]")}, + {Name: "report", Type: mustNewType("bytes")}, + {Name: "rawRs", Type: mustNewType("bytes32[]")}, + {Name: "rawSs", Type: mustNewType("bytes32[]")}, + {Name: "rawVs", Type: mustNewType("bytes32")}, + }) +} + +func NewTransmitter(lggr logger.Logger, rpcClient wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, db *sqlx.DB, cfg pg.QConfig, codec TransmitterReportDecoder) *mercuryTransmitter { + feedIDHex := fmt.Sprintf("0x%x", feedID[:]) + persistenceManager := NewPersistenceManager(lggr, NewORM(db, lggr, cfg), jobID, maxTransmitQueueSize, flushDeletesFrequency, pruneFrequency) + return &mercuryTransmitter{ + services.StateMachine{}, + lggr.Named("MercuryTransmitter").With("feedID", feedIDHex), + rpcClient, + persistenceManager, + codec, + feedID, + jobID, + fmt.Sprintf("%x", fromAccount), + make(services.StopChan), + nil, + sync.WaitGroup{}, + make(chan *pb.TransmitRequest, maxDeleteQueueSize), + transmitSuccessCount.WithLabelValues(feedIDHex), + transmitDuplicateCount.WithLabelValues(feedIDHex), + transmitConnectionErrorCount.WithLabelValues(feedIDHex), + transmitQueueDeleteErrorCount.WithLabelValues(feedIDHex), + transmitQueueInsertErrorCount.WithLabelValues(feedIDHex), + transmitQueuePushErrorCount.WithLabelValues(feedIDHex), + } +} + +func (mt *mercuryTransmitter) Start(ctx context.Context) (err error) { + return mt.StartOnce("MercuryTransmitter", func() error { + mt.lggr.Debugw("Loading transmit requests from database") + if err := mt.persistenceManager.Start(ctx); err != nil { + return err + } + transmissions, err := mt.persistenceManager.Load(ctx) + if err != nil { + return err + } + mt.queue = NewTransmitQueue(mt.lggr, mt.feedID.String(), maxTransmitQueueSize, transmissions, mt.persistenceManager) + + if err := mt.rpcClient.Start(ctx); err != nil { + return err + } + if err := mt.queue.Start(ctx); err != nil { + return err + } + mt.wg.Add(1) + go mt.runDeleteQueueLoop() + mt.wg.Add(1) + go mt.runQueueLoop() + return nil + }) +} + +func (mt *mercuryTransmitter) Close() error { + return mt.StopOnce("MercuryTransmitter", func() error { + if err := mt.queue.Close(); err != nil { + return err + } + if err := mt.persistenceManager.Close(); err != nil { + return err + } + close(mt.stopCh) + mt.wg.Wait() + return mt.rpcClient.Close() + }) +} + +func (mt *mercuryTransmitter) Name() string { return mt.lggr.Name() } + +func (mt *mercuryTransmitter) HealthReport() map[string]error { + report := map[string]error{mt.Name(): mt.Healthy()} + services.CopyHealth(report, mt.rpcClient.HealthReport()) + services.CopyHealth(report, mt.queue.HealthReport()) + return report +} + +func (mt *mercuryTransmitter) runDeleteQueueLoop() { + defer mt.wg.Done() + runloopCtx, cancel := mt.stopCh.Ctx(context.Background()) + defer cancel() + + // Exponential backoff for very rarely occurring errors (DB disconnect etc) + b := backoff.Backoff{ + Min: 1 * time.Second, + Max: 120 * time.Second, + Factor: 2, + Jitter: true, + } + + for { + select { + case req := <-mt.deleteQueue: + for { + if err := mt.persistenceManager.Delete(runloopCtx, req); err != nil { + mt.lggr.Errorw("Failed to delete transmit request record", "err", err, "req.Payload", req.Payload) + mt.transmitQueueDeleteErrorCount.Inc() + select { + case <-time.After(b.Duration()): + // Wait a backoff duration before trying to delete again + continue + case <-mt.stopCh: + // abort and return immediately on stop even if items remain in queue + return + } + } + break + } + // success + b.Reset() + case <-mt.stopCh: + // abort and return immediately on stop even if items remain in queue + return + } + } +} + +func (mt *mercuryTransmitter) runQueueLoop() { + defer mt.wg.Done() + // Exponential backoff with very short retry interval (since latency is a priority) + // 5ms, 10ms, 20ms, 40ms etc + b := backoff.Backoff{ + Min: 5 * time.Millisecond, + Max: 1 * time.Second, + Factor: 2, + Jitter: true, + } + runloopCtx, cancel := mt.stopCh.Ctx(context.Background()) + defer cancel() + for { + t := mt.queue.BlockingPop() + if t == nil { + // queue was closed + return + } + ctx, cancel := context.WithTimeout(runloopCtx, utils.WithJitter(transmitTimeout)) + res, err := mt.rpcClient.Transmit(ctx, t.Req) + cancel() + if runloopCtx.Err() != nil { + // runloop context is only canceled on transmitter close so we can + // exit the runloop here + return + } else if err != nil { + mt.transmitConnectionErrorCount.Inc() + mt.lggr.Errorw("Transmit report failed", "err", err, "reportCtx", t.ReportCtx) + if ok := mt.queue.Push(t.Req, t.ReportCtx); !ok { + mt.lggr.Error("Failed to push report to transmit queue; queue is closed") + return + } + // Wait a backoff duration before pulling the most recent transmission + // the heap + select { + case <-time.After(b.Duration()): + continue + case <-mt.stopCh: + return + } + } + + b.Reset() + if res.Error == "" { + mt.transmitSuccessCount.Inc() + mt.lggr.Debugw("Transmit report success", "payload", hexutil.Encode(t.Req.Payload), "response", res, "reportCtx", t.ReportCtx) + } else { + // We don't need to retry here because the mercury server + // has confirmed it received the report. We only need to retry + // on networking/unknown errors + switch res.Code { + case DuplicateReport: + mt.transmitSuccessCount.Inc() + mt.transmitDuplicateCount.Inc() + mt.lggr.Debugw("Transmit report success; duplicate report", "payload", hexutil.Encode(t.Req.Payload), "response", res, "reportCtx", t.ReportCtx) + default: + transmitServerErrorCount.WithLabelValues(mt.feedID.String(), fmt.Sprintf("%d", res.Code)).Inc() + mt.lggr.Errorw("Transmit report failed; mercury server returned error", "response", res, "reportCtx", t.ReportCtx, "err", res.Error, "code", res.Code) + } + } + + select { + case mt.deleteQueue <- t.Req: + default: + mt.lggr.Criticalw("Delete queue is full", "reportCtx", t.ReportCtx) + } + } +} + +// Transmit sends the report to the on-chain smart contract's Transmit method. +func (mt *mercuryTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signatures []ocrtypes.AttributedOnchainSignature) error { + var rs [][32]byte + var ss [][32]byte + var vs [32]byte + for i, as := range signatures { + r, s, v, err := evmutil.SplitSignature(as.Signature) + if err != nil { + panic("eventTransmit(ev): error in SplitSignature") + } + rs = append(rs, r) + ss = append(ss, s) + vs[i] = v + } + rawReportCtx := evmutil.RawReportContext(reportCtx) + + payload, err := PayloadTypes.Pack(rawReportCtx, []byte(report), rs, ss, vs) + if err != nil { + return pkgerrors.Wrap(err, "abi.Pack failed") + } + + req := &pb.TransmitRequest{ + Payload: payload, + } + + mt.lggr.Tracew("Transmit enqueue", "req.Payload", req.Payload, "report", report, "reportCtx", reportCtx, "signatures", signatures) + + if err := mt.persistenceManager.Insert(ctx, req, reportCtx); err != nil { + mt.transmitQueueInsertErrorCount.Inc() + return err + } + if ok := mt.queue.Push(req, reportCtx); !ok { + mt.transmitQueuePushErrorCount.Inc() + return errors.New("transmit queue is closed") + } + return nil +} + +// FromAccount returns the stringified (hex) CSA public key +func (mt *mercuryTransmitter) FromAccount() (ocrtypes.Account, error) { + return ocrtypes.Account(mt.fromAccount), nil +} + +// LatestConfigDigestAndEpoch retrieves the latest config digest and epoch from the OCR2 contract. +func (mt *mercuryTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (cd ocrtypes.ConfigDigest, epoch uint32, err error) { + panic("not needed for OCR3") +} + +func (mt *mercuryTransmitter) FetchInitialMaxFinalizedBlockNumber(ctx context.Context) (*int64, error) { + mt.lggr.Trace("FetchInitialMaxFinalizedBlockNumber") + + report, err := mt.latestReport(ctx, mt.feedID) + if err != nil { + return nil, err + } + + if report == nil { + mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success; got nil report") + return nil, nil + } + + mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success", "currentBlockNum", report.CurrentBlockNumber) + + return &report.CurrentBlockNumber, nil +} + +func (mt *mercuryTransmitter) LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) { + mt.lggr.Trace("LatestPrice") + + fullReport, err := mt.latestReport(ctx, feedID) + if err != nil { + return nil, err + } + if fullReport == nil { + return nil, nil + } + payload := fullReport.Payload + m := make(map[string]interface{}) + if err := PayloadTypes.UnpackIntoMap(m, payload); err != nil { + return nil, err + } + report, is := m["report"].([]byte) + if !is { + return nil, fmt.Errorf("expected report to be []byte, but it was %T", m["report"]) + } + return mt.codec.BenchmarkPriceFromReport(report) +} + +// LatestTimestamp will return -1, nil if the feed is missing +func (mt *mercuryTransmitter) LatestTimestamp(ctx context.Context) (int64, error) { + mt.lggr.Trace("LatestTimestamp") + + report, err := mt.latestReport(ctx, mt.feedID) + if err != nil { + return 0, err + } + + if report == nil { + mt.lggr.Debugw("LatestTimestamp success; got nil report") + return -1, nil + } + + mt.lggr.Debugw("LatestTimestamp success", "timestamp", report.ObservationsTimestamp) + + return report.ObservationsTimestamp, nil +} + +func (mt *mercuryTransmitter) latestReport(ctx context.Context, feedID [32]byte) (*pb.Report, error) { + mt.lggr.Trace("latestReport") + + req := &pb.LatestReportRequest{ + FeedId: feedID[:], + } + resp, err := mt.rpcClient.LatestReport(ctx, req) + if err != nil { + mt.lggr.Warnw("latestReport failed", "err", err) + return nil, pkgerrors.Wrap(err, "latestReport failed") + } + if resp == nil { + return nil, errors.New("latestReport expected non-nil response") + } + if resp.Error != "" { + err = errors.New(resp.Error) + mt.lggr.Warnw("latestReport failed; mercury server returned error", "err", err) + return nil, err + } + if resp.Report == nil { + mt.lggr.Tracew("latestReport success: returned nil") + return nil, nil + } else if !bytes.Equal(resp.Report.FeedId, feedID[:]) { + err = fmt.Errorf("latestReport failed; mismatched feed IDs, expected: 0x%x, got: 0x%x", mt.feedID[:], resp.Report.FeedId[:]) + mt.lggr.Errorw("latestReport failed", "err", err) + return nil, err + } + + mt.lggr.Tracew("latestReport success", "currentBlockNum", resp.Report.CurrentBlockNumber) + + return resp.Report, nil +} diff --git a/core/services/relay/evm/mercury/transmitter_test.go b/core/services/relay/evm/mercury/transmitter_test.go new file mode 100644 index 00000000..476b89ed --- /dev/null +++ b/core/services/relay/evm/mercury/transmitter_test.go @@ -0,0 +1,293 @@ +package mercury + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + mercurytypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mocks "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +func Test_MercuryTransmitter_Transmit(t *testing.T) { + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + var jobID int32 + pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`) + pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`) + q := NewTransmitQueue(lggr, "", 0, nil, nil) + codec := new(mockCodec) + + t.Run("v1 report transmission successfully enqueued", func(t *testing.T) { + report := sampleV1Report + c := mocks.MockWSRPCClient{ + TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(buildSamplePayload(report)), hexutil.Encode(in.Payload)) + out = new(pb.TransmitResponse) + out.Code = 42 + out.Error = "" + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + mt.queue = q + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + + require.NoError(t, err) + }) + t.Run("v2 report transmission successfully enqueued", func(t *testing.T) { + report := sampleV2Report + c := mocks.MockWSRPCClient{ + TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(buildSamplePayload(report)), hexutil.Encode(in.Payload)) + out = new(pb.TransmitResponse) + out.Code = 42 + out.Error = "" + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + mt.queue = q + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + + require.NoError(t, err) + }) + t.Run("v3 report transmission successfully enqueued", func(t *testing.T) { + report := sampleV3Report + c := mocks.MockWSRPCClient{ + TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(buildSamplePayload(report)), hexutil.Encode(in.Payload)) + out = new(pb.TransmitResponse) + out.Code = 42 + out.Error = "" + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + mt.queue = q + err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs) + + require.NoError(t, err) + }) +} + +func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) { + t.Parallel() + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) + + t.Run("successful query", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.ObservationsTimestamp = 42 + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + ts, err := mt.LatestTimestamp(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, int64(42), ts) + }) + + t.Run("successful query returning nil report (new feed) gives latest timestamp = -1", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + ts, err := mt.LatestTimestamp(testutils.Context(t)) + require.NoError(t, err) + + assert.Equal(t, int64(-1), ts) + }) + + t.Run("failing query", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + _, err := mt.LatestTimestamp(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) +} + +type mockCodec struct { + val *big.Int + err error +} + +var _ mercurytypes.ReportCodec = &mockCodec{} + +func (m *mockCodec) BenchmarkPriceFromReport(_ ocrtypes.Report) (*big.Int, error) { + return m.val, m.err +} + +func Test_MercuryTransmitter_LatestPrice(t *testing.T) { + t.Parallel() + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + var jobID int32 + + codec := new(mockCodec) + + t.Run("successful query", func(t *testing.T) { + originalPrice := big.NewInt(123456789) + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.Payload = buildSamplePayload([]byte("doesn't matter")) + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + + t.Run("BenchmarkPriceFromReport succeeds", func(t *testing.T) { + codec.val = originalPrice + codec.err = nil + + price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.NoError(t, err) + + assert.Equal(t, originalPrice, price) + }) + t.Run("BenchmarkPriceFromReport fails", func(t *testing.T) { + codec.val = nil + codec.err = errors.New("something exploded") + + _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.Error(t, err) + + assert.EqualError(t, err, "something exploded") + }) + }) + + t.Run("successful query returning nil report (new feed)", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.NoError(t, err) + + assert.Nil(t, price) + }) + + t.Run("failing query", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) +} + +func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) { + t.Parallel() + + lggr := logger.TestLogger(t) + db := pgtest.NewSqlxDB(t) + var jobID int32 + codec := new(mockCodec) + + t.Run("successful query", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.FeedId = sampleFeedID[:] + out.Report.CurrentBlockNumber = 42 + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.NoError(t, err) + + require.NotNil(t, bn) + assert.Equal(t, 42, int(*bn)) + }) + t.Run("successful query returning nil report (new feed)", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + out = new(pb.LatestReportResponse) + out.Report = nil + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, jobID, sampleFeedID, db, pgtest.NewQConfig(true), codec) + bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.NoError(t, err) + + assert.Nil(t, bn) + }) + t.Run("failing query", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return nil, errors.New("something exploded") + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), codec) + _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) + t.Run("return feed ID is wrong", func(t *testing.T) { + c := mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + require.NotNil(t, in) + assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId)) + out = new(pb.LatestReportResponse) + out.Report = new(pb.Report) + out.Report.CurrentBlockNumber = 42 + out.Report.FeedId = []byte{1, 2} + return out, nil + }, + } + mt := NewTransmitter(lggr, c, sampleClientPubKey, 0, sampleFeedID, db, pgtest.NewQConfig(true), codec) + _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t)) + require.Error(t, err) + assert.Contains(t, err.Error(), "latestReport failed; mismatched feed IDs, expected: 0x1c916b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472, got: 0x") + }) +} diff --git a/core/services/relay/evm/mercury/types/types.go b/core/services/relay/evm/mercury/types/types.go new file mode 100644 index 00000000..dd7af392 --- /dev/null +++ b/core/services/relay/evm/mercury/types/types.go @@ -0,0 +1,35 @@ +package types + +import ( + "context" + "math/big" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type DataSourceORM interface { + LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) +} + +type ReportCodec interface { + BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) +} + +var ( + PriceFeedMissingCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_price_feed_missing", + Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but it was missing", + }, + []string{"queriedFeedID"}, + ) + PriceFeedErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_price_feed_errors", + Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but got an error", + }, + []string{"queriedFeedID"}, + ) +) diff --git a/core/services/relay/evm/mercury/utils/feeds.go b/core/services/relay/evm/mercury/utils/feeds.go new file mode 100644 index 00000000..de3361f2 --- /dev/null +++ b/core/services/relay/evm/mercury/utils/feeds.go @@ -0,0 +1,112 @@ +package utils + +import ( + "encoding/binary" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var legacyV1FeedIDs = []FeedID{ + // Arbitrum mainnet (prod) + mustHexToFeedID("0xb43dc495134fa357725f93539511c5a4febeadf56e7c29c96566c825094f0b20"), + mustHexToFeedID("0xe65b31c6d5b9bdff43a8194dc5b2edc6914ddbc5e9f9e9521f605fc3738fabf5"), + mustHexToFeedID("0x30f9926cdef3de98995fb38a100d5c582ae025ebbb8f9a931500596ce080280a"), + mustHexToFeedID("0x0f49a4533a64c7f53bfdf5e86d791620d93afdec00cfe1896548397b0f4ec81c"), + mustHexToFeedID("0x2cdd4aea8298f5d2e7f8505b91e3313e3aa04376a81f401b4a48c5aab78ee5cf"), + mustHexToFeedID("0x5f82d154119f4251d83b2a58bf61c9483c84241053038a2883abf16ed4926433"), + mustHexToFeedID("0x74aca63821bf7ead199e924d261d277cbec96d1026ab65267d655c51b4536914"), + mustHexToFeedID("0x64ee16b94fdd72d0b3769955445cc82d6804573c22f0f49b67cd02edd07461e7"), + mustHexToFeedID("0x95241f154d34539741b19ce4bae815473fd1b2a90ac3b4b023a692f31edfe90e"), + mustHexToFeedID("0x297cc1e1ee5fc2f45dff1dd11a46694567904f4dbc596c7cc216d6c688605a1b"), + // // Arbitrum mainnet (staging) + mustHexToFeedID("0x62ce6a99c4bebb150191d7b72f7a0c0206af00baca480ab007caa4b5bf4bf02a"), + mustHexToFeedID("0x984126712e6a8b5b4fe138c49b29483a12e77b5cb3213a0769252380c57480e4"), + mustHexToFeedID("0xb74f650d9cae6259ab4212f76abe746600be3a4926947725ed107943915346c1"), + mustHexToFeedID("0xa0098c4c06cbab05b2598aecad0cbf49d44780c56d40514e09fd7a9e76a2db00"), + mustHexToFeedID("0x2206b467d04656a8a83af43a428d6b66f787162db629f9caed0c12b54a32998e"), + mustHexToFeedID("0x55488e61b59ea629df66698c8eea1390f0aedc24942e074a6d565569fb90afde"), + mustHexToFeedID("0x98d66aab30d62d044cc55ffccb79ae35151348f40ff06a98c92001ed6ec8e886"), + mustHexToFeedID("0x2e768c0eca65d0449ee825b8a921349501339a2487c02146f77611ae01c31a50"), + mustHexToFeedID("0xb29931d9fe1e9fc023b4d2f0f1789c8b5e21aabf389f86f9702241a0178345dd"), + mustHexToFeedID("0xd8b8cfc1e2dd75116e5792d11810d830ef48843fd44e1633385e81157f8da6b5"), + mustHexToFeedID("0x09f8d0caff8cecb7f5e493d4de2ab98b4392f6d07923cd19b2cb524779301b85"), + mustHexToFeedID("0xe645924bbf507304dc4bd37f02c8dac73da3b7eb67378de98cfc59f17ba6774a"), + // Arbitrum testnet (production) + mustHexToFeedID("0x695be66b6a7979f2b3ed33a3d718eabebaf0a881f1f6598b5530875b7e8150ab"), + mustHexToFeedID("0x259b566b9d3c64d1e4a8656e2d6fd4c08e19f9fa9637ae76d52e428d07cca8e9"), + mustHexToFeedID("0x26c16f2054b7a1d77ae83a0429dace9f3000ba4dbf1690236e8f575742e98f66"), + mustHexToFeedID("0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"), + mustHexToFeedID("0xbf1febc8c335cb236c1995c1007a928a3f7ae8307a1a20cb31334e6d316c62d1"), + mustHexToFeedID("0x4ce52cf28e49f4673198074968aeea280f13b5f897c687eb713bcfc1eeab89ba"), + mustHexToFeedID("0xb21d58dccab05dcea22ab780ca010c4bec34e61ce7310e30f4ad0ff8c1621d27"), + mustHexToFeedID("0x5ad0d18436dd95672e69903efe95bdfb43a05cb55e8965c5af93db8170c8820c"), + mustHexToFeedID("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"), + mustHexToFeedID("0x14e044f932bb959cc2aa8dc1ba110c09224e639aae00264c1ffc2a0830904a3c"), + mustHexToFeedID("0x555344432d5553442d415242495452554d2d544553544e455400000000000000"), + mustHexToFeedID("0x12be1859ee43f46bab53750915f20855f54e891f88ddd524f26a72d6f4deed1d"), + // // Arbitrum testnet (staging) + mustHexToFeedID("0x8837f28f5172f18071f164b8540fe8c95162dc0051e31005023fadc1cd9c4b50"), + mustHexToFeedID("0xd130b5acd88b47eb7c372611205d5a9ca474829a2719e396ab1eb4f956674e4e"), + mustHexToFeedID("0x6d2f5a4b3ba6c1953b4bb636f6ad03aec01b6222274f8ca1e39e53ee12a8cdf3"), + mustHexToFeedID("0x6962e629c3a0f5b7e3e9294b0c283c9b20f94f1c89c8ba8c1ee4650738f20fb2"), + mustHexToFeedID("0x557b817c6be7392364cef0dd11007c43caea1de78ce42e4f1eadc383e7cb209c"), + mustHexToFeedID("0x3250b5dd9491cb11138048d070b8636c35d96fff29671dc68b0723ad41f53433"), + mustHexToFeedID("0x3781c2691f6980dc66a72c03a32edb769fe05a9c9cb729cd7e96ecfd89450a0a"), + mustHexToFeedID("0xbbbf52c5797cc86d6bd9413d59ec624f07baf5045290ecd5ac6541d5a7ffd234"), + mustHexToFeedID("0xf753e1201d54ac94dfd9334c542562ff7e42993419a661261d010af0cbfd4e34"), + mustHexToFeedID("0x2489ce4577e814d6794218a13ef3c04cac976f991305400a4c0a1ddcffb90357"), + mustHexToFeedID("0xa5b07943b89e2c278fc8a2754e2854316e03cb959f6d323c2d5da218fb6b0ff8"), + mustHexToFeedID("0x1c2c0dfac0eb2aae2c05613f0d677daae164cdd406bd3dd6153d743302ce56e8"), +} + +var legacyV1FeedIDM map[FeedID]struct{} + +func init() { + legacyV1FeedIDM = make(map[FeedID]struct{}) + for _, feedID := range legacyV1FeedIDs { + legacyV1FeedIDM[feedID] = struct{}{} + } +} + +func mustHexToFeedID(s string) FeedID { + f := new(FeedID) + if err := f.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return *f +} + +type FeedVersion uint16 + +const ( + _ FeedVersion = iota + REPORT_V1 + REPORT_V2 + REPORT_V3 + _ +) + +type FeedID [32]byte + +func BytesToFeedID(b []byte) FeedID { + return (FeedID)(utils.BytesToHash(b)) +} + +func (f FeedID) Hex() string { return (utils.Hash)(f).Hex() } + +func (f FeedID) String() string { return (utils.Hash)(f).String() } + +func (f *FeedID) UnmarshalText(input []byte) error { + return (*utils.Hash)(f).UnmarshalText(input) +} + +func (f FeedID) Version() FeedVersion { + if _, exists := legacyV1FeedIDM[f]; exists { + return REPORT_V1 + } + return FeedVersion(binary.BigEndian.Uint16(f[:2])) +} + +func (f FeedID) IsV1() bool { return f.Version() == REPORT_V1 } +func (f FeedID) IsV2() bool { return f.Version() == REPORT_V2 } +func (f FeedID) IsV3() bool { return f.Version() == REPORT_V3 } diff --git a/core/services/relay/evm/mercury/utils/feeds_test.go b/core/services/relay/evm/mercury/utils/feeds_test.go new file mode 100644 index 00000000..37b9b47d --- /dev/null +++ b/core/services/relay/evm/mercury/utils/feeds_test.go @@ -0,0 +1,37 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + v1FeedId = (FeedID)([32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) + v2FeedId = (FeedID)([32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) + v3FeedId = (FeedID)([32]uint8{00, 03, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}) +) + +func Test_FeedID_Version(t *testing.T) { + t.Run("versioned feed ID", func(t *testing.T) { + assert.Equal(t, REPORT_V1, v1FeedId.Version()) + assert.True(t, v1FeedId.IsV1()) + assert.False(t, v1FeedId.IsV2()) + assert.False(t, v1FeedId.IsV3()) + + assert.Equal(t, REPORT_V2, v2FeedId.Version()) + assert.False(t, v2FeedId.IsV1()) + assert.True(t, v2FeedId.IsV2()) + assert.False(t, v2FeedId.IsV3()) + + assert.Equal(t, REPORT_V3, v3FeedId.Version()) + assert.False(t, v3FeedId.IsV1()) + assert.False(t, v3FeedId.IsV2()) + assert.True(t, v3FeedId.IsV3()) + }) + t.Run("legacy special cases", func(t *testing.T) { + for _, feedID := range legacyV1FeedIDs { + assert.Equal(t, REPORT_V1, feedID.Version()) + } + }) +} diff --git a/core/services/relay/evm/mercury/v1/data_source.go b/core/services/relay/evm/mercury/v1/data_source.go new file mode 100644 index 00000000..6bacb287 --- /dev/null +++ b/core/services/relay/evm/mercury/v1/data_source.go @@ -0,0 +1,327 @@ +package v1 + +import ( + "context" + "errors" + "fmt" + "math/big" + "sync" + + pkgerrors "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/types/mercury" + v1types "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + v1 "github.com/goplugin/plugin-data-streams/mercury/v1" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/reportcodec" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + insufficientBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_insufficient_blocks_count", + Help: fmt.Sprintf("Count of times that there were not enough blocks in the chain during observation (need: %d)", nBlocksObservation), + }, + []string{"feedID"}, + ) + zeroBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_zero_blocks_count", + Help: "Count of times that there were zero blocks in the chain during observation", + }, + []string{"feedID"}, + ) +) + +const nBlocksObservation int = v1.MaxAllowedBlocks + +type Runner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) +} + +// Fetcher fetcher data from Mercury server +type Fetcher interface { + // FetchInitialMaxFinalizedBlockNumber should fetch the initial max finalized block number + FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) +} + +type datasource struct { + pipelineRunner Runner + jb job.Job + spec pipeline.Spec + lggr logger.Logger + saver ocrcommon.Saver + orm types.DataSourceORM + codec reportcodec.ReportCodec + feedID [32]byte + + mu sync.RWMutex + + chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData + mercuryChainReader mercury.ChainReader + fetcher Fetcher + initialBlockNumber *int64 + + insufficientBlocksCounter prometheus.Counter + zeroBlocksCounter prometheus.Counter +} + +var _ v1.DataSource = &datasource{} + +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, mercuryChainReader mercury.ChainReader, fetcher Fetcher, initialBlockNumber *int64, feedID mercuryutils.FeedID) *datasource { + return &datasource{pr, jb, spec, lggr, s, orm, reportcodec.ReportCodec{}, feedID, sync.RWMutex{}, enhancedTelemChan, mercuryChainReader, fetcher, initialBlockNumber, insufficientBlocksCount.WithLabelValues(feedID.String()), zeroBlocksCount.WithLabelValues(feedID.String())} +} + +type ErrEmptyLatestReport struct { + Err error +} + +func (e ErrEmptyLatestReport) Unwrap() error { return e.Err } + +func (e ErrEmptyLatestReport) Error() string { + return fmt.Sprintf("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: %v", e.Err) +} + +func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedBlockNum bool) (obs v1types.Observation, pipelineExecutionErr error) { + // setLatestBlocks must come chronologically before observations, along + // with observationTimestamp, to avoid front-running + + // Errors are not expected when reading from the underlying ChainReader + if err := ds.setLatestBlocks(ctx, &obs); err != nil { + return obs, err + } + + var wg sync.WaitGroup + if fetchMaxFinalizedBlockNum { + wg.Add(1) + go func() { + defer wg.Done() + latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID) + if dbErr != nil { + obs.MaxFinalizedBlockNumber.Err = dbErr + return + } + if latest != nil { + obs.MaxFinalizedBlockNumber.Val, obs.MaxFinalizedBlockNumber.Err = ds.codec.CurrentBlockNumFromReport(latest) + return + } + val, fetchErr := ds.fetcher.FetchInitialMaxFinalizedBlockNumber(ctx) + if fetchErr != nil { + obs.MaxFinalizedBlockNumber.Err = fetchErr + return + } + if val != nil { + obs.MaxFinalizedBlockNumber.Val = *val + return + } + if ds.initialBlockNumber == nil { + if obs.CurrentBlockNum.Err != nil { + obs.MaxFinalizedBlockNumber.Err = ErrEmptyLatestReport{Err: obs.CurrentBlockNum.Err} + } else { + // Subract 1 here because we will later add 1 to the + // maxFinalizedBlockNumber to get the first validFromBlockNum, which + // ought to be the same as current block num. + obs.MaxFinalizedBlockNumber.Val = obs.CurrentBlockNum.Val - 1 + ds.lggr.Infof("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed so maxFinalizedBlockNumber=%d (initialBlockNumber unset, using currentBlockNum=%d-1)", obs.MaxFinalizedBlockNumber.Val, obs.CurrentBlockNum.Val) + } + } else { + // NOTE: It's important to subtract 1 if the server is missing any past + // report (brand new feed) since we will add 1 to the + // maxFinalizedBlockNumber to get the first validFromBlockNum, which + // ought to be zero. + // + // If "initialBlockNumber" is set to zero, this will give a starting block of zero. + obs.MaxFinalizedBlockNumber.Val = *ds.initialBlockNumber - 1 + ds.lggr.Infof("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed so maxFinalizedBlockNumber=%d (initialBlockNumber=%d)", obs.MaxFinalizedBlockNumber.Val, *ds.initialBlockNumber) + } + }() + } else { + obs.MaxFinalizedBlockNumber.Err = errors.New("fetchMaxFinalizedBlockNum=false") + } + var trrs pipeline.TaskRunResults + wg.Add(1) + go func() { + defer wg.Done() + var run *pipeline.Run + run, trrs, pipelineExecutionErr = ds.executeRun(ctx) + if pipelineExecutionErr != nil { + pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr) + return + } + + ds.saver.Save(run) + + // NOTE: trrs comes back as _all_ tasks, but we only want the terminal ones + // They are guaranteed to be sorted by index asc so should be in the correct order + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + var parsed parseOutput + parsed, pipelineExecutionErr = ds.parse(finaltrrs) + if pipelineExecutionErr != nil { + pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr) + return + } + obs.BenchmarkPrice = parsed.benchmarkPrice + obs.Bid = parsed.bid + obs.Ask = parsed.ask + }() + + wg.Wait() + + if pipelineExecutionErr != nil { + return + } + + ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{ + V1Observation: &obs, + TaskRunResults: trrs, + RepTimestamp: repts, + FeedVersion: mercuryutils.REPORT_V1, + }) + + return obs, nil +} + +func toBigInt(val interface{}) (*big.Int, error) { + dec, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + return dec.BigInt(), nil +} + +type parseOutput struct { + benchmarkPrice mercury.ObsResult[*big.Int] + bid mercury.ObsResult[*big.Int] + ask mercury.ObsResult[*big.Int] +} + +// parse expects the output of observe to be three values, in the following order: +// 1. benchmark price +// 2. bid +// 3. ask +// +// returns error on parse errors: if something is the wrong type +func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) { + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + // only return terminal trrs from executeRun + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed + // by the pipeline executor + if len(finaltrrs) != 3 { + return o, fmt.Errorf("invalid number of results, expected: 3, got: %d", len(finaltrrs)) + } + merr = errors.Join( + setBenchmarkPrice(&o, finaltrrs[0].Result), + setBid(&o, finaltrrs[1].Result), + setAsk(&o, finaltrrs[2].Result), + ) + + return o, merr +} + +func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.benchmarkPrice.Err = res.Error + } else if val, err := toBigInt(res.Value); err != nil { + return fmt.Errorf("failed to parse BenchmarkPrice: %w", err) + } else { + o.benchmarkPrice.Val = val + } + return nil +} + +func setBid(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.bid.Err = res.Error + } else if val, err := toBigInt(res.Value); err != nil { + return fmt.Errorf("failed to parse Bid: %w", err) + } else { + o.bid.Val = val + } + return nil +} + +func setAsk(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.ask.Err = res.Error + } else if val, err := toBigInt(res.Value); err != nil { + return fmt.Errorf("failed to parse Ask: %w", err) + } else { + o.ask.Val = val + } + return nil +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": ds.jb.ID, + "externalJobID": ds.jb.ExternalJobID, + "name": ds.jb.Name.ValueOrZero(), + }, + }) + + run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + } + + return run, trrs, err +} + +func (ds *datasource) setLatestBlocks(ctx context.Context, obs *v1types.Observation) error { + latestBlocks, err := ds.mercuryChainReader.LatestHeads(ctx, nBlocksObservation) + + if err != nil { + ds.lggr.Errorw("failed to read latest blocks", "err", err) + return err + } + + if len(latestBlocks) < nBlocksObservation { + ds.insufficientBlocksCounter.Inc() + ds.lggr.Warnw("Insufficient blocks", "latestBlocks", latestBlocks, "lenLatestBlocks", len(latestBlocks), "nBlocksObservation", nBlocksObservation) + } + + // TODO: remove with https://smartcontract-it.atlassian.net/browse/BCF-2209 + if len(latestBlocks) == 0 { + obsErr := fmt.Errorf("no blocks available") + ds.zeroBlocksCounter.Inc() + obs.CurrentBlockNum.Err = obsErr + obs.CurrentBlockHash.Err = obsErr + obs.CurrentBlockTimestamp.Err = obsErr + } else { + obs.CurrentBlockNum.Val = int64(latestBlocks[0].Number) + obs.CurrentBlockHash.Val = latestBlocks[0].Hash + obs.CurrentBlockTimestamp.Val = latestBlocks[0].Timestamp + } + + for _, block := range latestBlocks { + obs.LatestBlocks = append( + obs.LatestBlocks, + v1types.NewBlock(int64(block.Number), block.Hash, block.Timestamp)) + } + + return nil +} diff --git a/core/services/relay/evm/mercury/v1/data_source_test.go b/core/services/relay/evm/mercury/v1/data_source_test.go new file mode 100644 index 00000000..553c9602 --- /dev/null +++ b/core/services/relay/evm/mercury/v1/data_source_test.go @@ -0,0 +1,470 @@ +package v1 + +import ( + "context" + "fmt" + "io" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + mercurytypes "github.com/goplugin/plugin-common/pkg/types/mercury" + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + mercurymocks "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/mocks" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reportcodecv1 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/reportcodec" +) + +var _ mercurytypes.ServerFetcher = &mockFetcher{} + +type mockFetcher struct { + num *int64 + err error +} + +func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) { + return m.num, m.err +} + +func (m *mockFetcher) LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) { + return nil, nil +} + +func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) { + return 0, nil +} + +type mockSaver struct { + r *pipeline.Run +} + +func (ms *mockSaver) Save(r *pipeline.Run) { + ms.r = r +} + +type mockORM struct { + report []byte + err error +} + +func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) { + return m.report, m.err +} + +type mockChainReader struct { + err error + obs []mercurytypes.Head +} + +func (m *mockChainReader) LatestHeads(context.Context, int) ([]mercurytypes.Head, error) { + return m.obs, m.err +} + +func TestMercury_Observe(t *testing.T) { + orm := &mockORM{} + lggr := logger.TestLogger(t) + ds := NewDataSource(orm, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{}) + ctx := testutils.Context(t) + repts := ocrtypes.ReportTimestamp{} + + fetcher := &mockFetcher{} + ds.fetcher = fetcher + + saver := &mockSaver{} + ds.saver = saver + + trrs := []pipeline.TaskRunResult{ + { + // benchmark price + Result: pipeline.Result{Value: "122.345"}, + Task: &mercurymocks.MockTask{}, + }, + { + // bid + Result: pipeline.Result{Value: "121.993"}, + Task: &mercurymocks.MockTask{}, + }, + { + // ask + Result: pipeline.Result{Value: "123.111"}, + Task: &mercurymocks.MockTask{}, + }, + } + + runner := &mercurymocks.MockRunner{ + Trrs: trrs, + } + ds.pipelineRunner = runner + + spec := pipeline.Spec{} + ds.spec = spec + + h := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + ds.mercuryChainReader = evm.NewMercuryChainReader(h) + + head := &evmtypes.Head{ + Number: int64(rand.Int31()), + Hash: utils.NewHash(), + Timestamp: time.Now(), + } + h.On("LatestChain").Return(head) + + t.Run("when fetchMaxFinalizedBlockNum=true", func(t *testing.T) { + t.Run("with latest report in database", func(t *testing.T) { + orm.report = buildSampleV1Report() + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedBlockNumber.Err) + assert.Equal(t, int64(143), obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if querying latest report fails", func(t *testing.T) { + orm.report = nil + orm.err = errors.New("something exploded") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "something exploded") + assert.Zero(t, obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if decoding latest report fails", func(t *testing.T) { + orm.report = []byte{1, 2, 3} + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + assert.Zero(t, obs.MaxFinalizedBlockNumber.Val) + }) + + orm.report = nil + orm.err = nil + + t.Run("without latest report in database", func(t *testing.T) { + t.Run("if FetchInitialMaxFinalizedBlockNumber returns error", func(t *testing.T) { + fetcher.err = errors.New("mock fetcher error") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "mock fetcher error") + assert.Zero(t, obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if FetchInitialMaxFinalizedBlockNumber succeeds", func(t *testing.T) { + fetcher.err = nil + var num int64 = 32 + fetcher.num = &num + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedBlockNumber.Err) + assert.Equal(t, int64(32), obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if FetchInitialMaxFinalizedBlockNumber returns nil (new feed) and initialBlockNumber is set", func(t *testing.T) { + var initialBlockNumber int64 = 50 + ds.initialBlockNumber = &initialBlockNumber + fetcher.err = nil + fetcher.num = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedBlockNumber.Err) + assert.Equal(t, int64(49), obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if FetchInitialMaxFinalizedBlockNumber returns nil (new feed) and initialBlockNumber is not set", func(t *testing.T) { + ds.initialBlockNumber = nil + t.Run("if current block num is valid", func(t *testing.T) { + fetcher.err = nil + fetcher.num = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedBlockNumber.Err) + assert.Equal(t, head.Number-1, obs.MaxFinalizedBlockNumber.Val) + }) + t.Run("if no current block available", func(t *testing.T) { + h2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + h2.On("LatestChain").Return((*evmtypes.Head)(nil)) + ds.mercuryChainReader = evm.NewMercuryChainReader(h2) + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: no blocks available") + }) + }) + }) + }) + + ds.mercuryChainReader = evm.NewMercuryChainReader(h) + + t.Run("when fetchMaxFinalizedBlockNum=false", func(t *testing.T) { + t.Run("when run execution fails, returns error", func(t *testing.T) { + t.Cleanup(func() { + runner.Err = nil + }) + runner.Err = errors.New("run execution failed") + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed") + }) + t.Run("makes observation using pipeline, when all tasks succeed", func(t *testing.T) { + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val) + assert.NoError(t, obs.BenchmarkPrice.Err) + assert.Equal(t, big.NewInt(121), obs.Bid.Val) + assert.NoError(t, obs.Bid.Err) + assert.Equal(t, big.NewInt(123), obs.Ask.Val) + assert.NoError(t, obs.Ask.Err) + assert.Equal(t, head.Number, obs.CurrentBlockNum.Val) + assert.NoError(t, obs.CurrentBlockNum.Err) + assert.Equal(t, fmt.Sprintf("%x", head.Hash), fmt.Sprintf("%x", obs.CurrentBlockHash.Val)) + assert.NoError(t, obs.CurrentBlockHash.Err) + assert.Equal(t, uint64(head.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val) + assert.NoError(t, obs.CurrentBlockTimestamp.Err) + + assert.Zero(t, obs.MaxFinalizedBlockNumber.Val) + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "fetchMaxFinalizedBlockNum=false") + }) + t.Run("makes observation using pipeline, with erroring tasks", func(t *testing.T) { + for i := range trrs { + trrs[i].Result.Error = fmt.Errorf("task error %d", i) + } + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Zero(t, obs.BenchmarkPrice.Val) + assert.EqualError(t, obs.BenchmarkPrice.Err, "task error 0") + assert.Zero(t, obs.Bid.Val) + assert.EqualError(t, obs.Bid.Err, "task error 1") + assert.Zero(t, obs.Ask.Val) + assert.EqualError(t, obs.Ask.Err, "task error 2") + assert.Equal(t, head.Number, obs.CurrentBlockNum.Val) + assert.NoError(t, obs.CurrentBlockNum.Err) + assert.Equal(t, fmt.Sprintf("%x", head.Hash), fmt.Sprintf("%x", obs.CurrentBlockHash.Val)) + assert.NoError(t, obs.CurrentBlockHash.Err) + assert.Equal(t, uint64(head.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val) + assert.NoError(t, obs.CurrentBlockTimestamp.Err) + + assert.Zero(t, obs.MaxFinalizedBlockNumber.Val) + assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "fetchMaxFinalizedBlockNum=false") + }) + t.Run("makes partial observation using pipeline, if only some results have errored", func(t *testing.T) { + trrs[0].Result.Error = fmt.Errorf("task failed") + trrs[1].Result.Value = "33" + trrs[1].Result.Error = nil + trrs[2].Result.Value = nil + trrs[2].Result.Error = fmt.Errorf("task failed") + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Zero(t, obs.BenchmarkPrice.Val) + assert.EqualError(t, obs.BenchmarkPrice.Err, "task failed") + assert.Equal(t, big.NewInt(33), obs.Bid.Val) + assert.NoError(t, obs.Bid.Err) + assert.Zero(t, obs.Ask.Val) + assert.EqualError(t, obs.Ask.Err, "task failed") + }) + t.Run("returns error if at least one result is unparseable", func(t *testing.T) { + trrs[0].Result.Error = fmt.Errorf("task failed") + trrs[1].Result.Value = "foo" + trrs[1].Result.Error = nil + trrs[2].Result.Value = "123456" + trrs[2].Result.Error = nil + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while parsing run results: failed to parse Bid: can't convert foo to decimal") + }) + t.Run("saves run", func(t *testing.T) { + for i := range trrs { + trrs[i].Result.Value = "123" + trrs[i].Result.Error = nil + } + + _, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, int64(42), saver.r.ID) + }) + }) + + t.Run("LatestBlocks is populated correctly", func(t *testing.T) { + t.Run("when chain length is zero", func(t *testing.T) { + ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + ht2.On("LatestChain").Return((*evmtypes.Head)(nil)) + ds.mercuryChainReader = evm.NewMercuryChainReader(ht2) + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Len(t, obs.LatestBlocks, 0) + + ht2.AssertExpectations(t) + }) + t.Run("when chain is too short", func(t *testing.T) { + h4 := &evmtypes.Head{ + Number: 4, + Parent: nil, + } + h5 := &evmtypes.Head{ + Number: 5, + Parent: h4, + } + h6 := &evmtypes.Head{ + Number: 6, + Parent: h5, + } + + ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + ht2.On("LatestChain").Return(h6) + ds.mercuryChainReader = evm.NewMercuryChainReader(ht2) + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Len(t, obs.LatestBlocks, 3) + assert.Equal(t, 6, int(obs.LatestBlocks[0].Num)) + assert.Equal(t, 5, int(obs.LatestBlocks[1].Num)) + assert.Equal(t, 4, int(obs.LatestBlocks[2].Num)) + + ht2.AssertExpectations(t) + }) + t.Run("when chain is long enough", func(t *testing.T) { + heads := make([]*evmtypes.Head, nBlocksObservation+5) + for i := range heads { + heads[i] = &evmtypes.Head{Number: int64(i)} + if i > 0 { + heads[i].Parent = heads[i-1] + } + } + + ht2 := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + ht2.On("LatestChain").Return(heads[len(heads)-1]) + ds.mercuryChainReader = evm.NewMercuryChainReader(ht2) + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Len(t, obs.LatestBlocks, nBlocksObservation) + highestBlockNum := heads[len(heads)-1].Number + for i := range obs.LatestBlocks { + assert.Equal(t, int(highestBlockNum)-i, int(obs.LatestBlocks[i].Num)) + } + + ht2.AssertExpectations(t) + }) + + t.Run("when chain reader returns an error", func(t *testing.T) { + ds.mercuryChainReader = &mockChainReader{ + err: io.EOF, + obs: nil, + } + + obs, err := ds.Observe(ctx, repts, true) + assert.Error(t, err) + assert.Equal(t, obs, v1.Observation{}) + }) + }) +} + +func TestMercury_SetLatestBlocks(t *testing.T) { + lggr := logger.TestLogger(t) + ds := NewDataSource(nil, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{}) + + h := evmtypes.Head{ + Number: testutils.NewRandomPositiveInt64(), + Hash: utils.NewHash(), + ParentHash: utils.NewHash(), + Timestamp: time.Now(), + BaseFeePerGas: assets.NewWeiI(testutils.NewRandomPositiveInt64()), + ReceiptsRoot: utils.NewHash(), + TransactionsRoot: utils.NewHash(), + StateRoot: utils.NewHash(), + } + + t.Run("returns head from headtracker if present", func(t *testing.T) { + headTracker := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + headTracker.On("LatestChain").Return(&h, nil) + ds.mercuryChainReader = evm.NewMercuryChainReader(headTracker) + + obs := v1.Observation{} + err := ds.setLatestBlocks(testutils.Context(t), &obs) + + assert.NoError(t, err) + assert.Equal(t, h.Number, obs.CurrentBlockNum.Val) + assert.Equal(t, h.Hash.Bytes(), obs.CurrentBlockHash.Val) + assert.Equal(t, uint64(h.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val) + + assert.Len(t, obs.LatestBlocks, 1) + headTracker.AssertExpectations(t) + }) + + t.Run("if headtracker returns nil head", func(t *testing.T) { + headTracker := commonmocks.NewHeadTracker[*evmtypes.Head, common.Hash](t) + // This can happen in some cases e.g. RPC node is offline + headTracker.On("LatestChain").Return((*evmtypes.Head)(nil)) + ds.mercuryChainReader = evm.NewChainReader(headTracker) + obs := v1.Observation{} + err := ds.setLatestBlocks(testutils.Context(t), &obs) + + assert.NoError(t, err) + assert.Zero(t, obs.CurrentBlockNum.Val) + assert.Zero(t, obs.CurrentBlockHash.Val) + assert.Zero(t, obs.CurrentBlockTimestamp.Val) + assert.EqualError(t, obs.CurrentBlockNum.Err, "no blocks available") + assert.EqualError(t, obs.CurrentBlockHash.Err, "no blocks available") + assert.EqualError(t, obs.CurrentBlockTimestamp.Err, "no blocks available") + + assert.Len(t, obs.LatestBlocks, 0) + headTracker.AssertExpectations(t) + }) +} + +var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + +func buildSampleV1Report() []byte { + feedID := sampleFeedID + timestamp := uint32(42) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + currentBlockNumber := uint64(143) + currentBlockHash := utils.NewHash() + currentBlockTimestamp := uint64(123) + validFromBlockNum := uint64(142) + + b, err := reportcodecv1.ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, currentBlockTimestamp, validFromBlockNum) + if err != nil { + panic(err) + } + return b +} diff --git a/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go new file mode 100644 index 00000000..eea7ea87 --- /dev/null +++ b/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go @@ -0,0 +1,100 @@ +package reportcodec + +import ( + "errors" + "fmt" + "math" + "math/big" + + "github.com/ethereum/go-ethereum/common" + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reporttypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v1/types" +) + +// NOTE: +// This report codec is based on the original median evmreportcodec +// here: +// https://github.com/goplugin/offchain-reporting/blob/master/lib/offchainreporting2/reportingplugin/median/evmreportcodec/reportcodec.go +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word + +var _ v1.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(rf v1.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.Bid == nil { + merr = errors.Join(merr, errors.New("bid may not be nil")) + } + if rf.Ask == nil { + merr = errors.Join(merr, errors.New("ask may not be nil")) + } + if len(rf.CurrentBlockHash) != 32 { + merr = errors.Join(merr, fmt.Errorf("invalid length for currentBlockHash, expected: 32, got: %d", len(rf.CurrentBlockHash))) + } + if merr != nil { + return nil, merr + } + var currentBlockHash common.Hash + copy(currentBlockHash[:], rf.CurrentBlockHash) + + reportBytes, err := ReportTypes.Pack(r.feedID, rf.Timestamp, rf.BenchmarkPrice, rf.Bid, rf.Ask, uint64(rf.CurrentBlockNum), currentBlockHash, uint64(rf.ValidFromBlockNum), rf.CurrentBlockTimestamp) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +// Maximum length in bytes of Report returned by BuildReport. Used for +// defending against spam attacks. +func (r *ReportCodec) MaxReportLength(n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) CurrentBlockNumFromReport(report ocrtypes.Report) (int64, error) { + decoded, err := r.Decode(report) + if err != nil { + return 0, err + } + if decoded.CurrentBlockNum > math.MaxInt64 { + return 0, fmt.Errorf("CurrentBlockNum=%d overflows max int64", decoded.CurrentBlockNum) + } + return int64(decoded.CurrentBlockNum), nil +} + +func (r *ReportCodec) ValidFromBlockNumFromReport(report ocrtypes.Report) (int64, error) { + decoded, err := r.Decode(report) + if err != nil { + return 0, err + } + if decoded.ValidFromBlockNum > math.MaxInt64 { + return 0, fmt.Errorf("ValidFromBlockNum=%d overflows max int64", decoded.ValidFromBlockNum) + } + return int64(decoded.ValidFromBlockNum), nil +} + +func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go new file mode 100644 index 00000000..74c0f1c3 --- /dev/null +++ b/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go @@ -0,0 +1,181 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +var hash = hexutil.MustDecode("0x552c2cea3ab43bae137d89ee6142a01db3ae2b5678bc3c9bd5f509f537bea57b") + +func newValidReportFields() v1.ReportFields { + return v1.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + Bid: big.NewInt(244), + Ask: big.NewInt(245), + CurrentBlockNum: 248, + CurrentBlockHash: hash, + ValidFromBlockNum: 46, + CurrentBlockTimestamp: 123, + } +} + +func Test_ReportCodec(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero fields", func(t *testing.T) { + _, err := r.BuildReport(v1.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "bid may not be nil") + assert.Contains(t, err.Error(), "ask may not be nil") + assert.Contains(t, err.Error(), "invalid length for currentBlockHash, expected: 32, got: 0") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + report, err := r.BuildReport(rf) + require.NoError(t, err) + + reportElems := make(map[string]interface{}) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242) + assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243)) + assert.Equal(t, reportElems["bid"].(*big.Int).Int64(), int64(244)) + assert.Equal(t, reportElems["ask"].(*big.Int).Int64(), int64(245)) + assert.Equal(t, reportElems["currentBlockNum"].(uint64), uint64(248)) + assert.Equal(t, common.Hash(reportElems["currentBlockHash"].([32]byte)), common.BytesToHash(hash)) + assert.Equal(t, reportElems["currentBlockTimestamp"].(uint64), uint64(123)) + assert.Equal(t, reportElems["validFromBlockNum"].(uint64), uint64(46)) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf8, 0x55, 0x2c, 0x2c, 0xea, 0x3a, 0xb4, 0x3b, 0xae, 0x13, 0x7d, 0x89, 0xee, 0x61, 0x42, 0xa0, 0x1d, 0xb3, 0xae, 0x2b, 0x56, 0x78, 0xbc, 0x3c, 0x9b, 0xd5, 0xf5, 0x9, 0xf5, 0x37, 0xbe, 0xa5, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b}, report) + + max, err := r.MaxReportLength(4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := r.Decode(report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, big.NewInt(244), decoded.Bid) + assert.Equal(t, big.NewInt(245), decoded.Ask) + assert.Equal(t, uint64(248), decoded.CurrentBlockNum) + assert.Equal(t, [32]byte(common.BytesToHash(hash)), decoded.CurrentBlockHash) + assert.Equal(t, uint64(123), decoded.CurrentBlockTimestamp) + assert.Equal(t, uint64(46), decoded.ValidFromBlockNum) + }) + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + _, err := r.Decode([]byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := 0; i < len(longBad); i++ { + longBad[i] = byte(i) + } + _, err = r.Decode(longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(bn, validFromBn int64, feedID [32]byte) []byte { + timestamp := uint32(42) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + currentBlockNumber := uint64(bn) + currentBlockHash := utils.NewHash() + currentBlockTimestamp := uint64(123) + validFromBlockNum := uint64(validFromBn) + + b, err := ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, validFromBlockNum, currentBlockTimestamp) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_CurrentBlockNumFromReport(t *testing.T) { + r := ReportCodec{} + feedID := utils.NewHash() + + var validBn int64 = 42 + var invalidBn int64 = -1 + + t.Run("CurrentBlockNumFromReport extracts the current block number from a valid report", func(t *testing.T) { + report := buildSampleReport(validBn, 143, feedID) + + bn, err := r.CurrentBlockNumFromReport(report) + require.NoError(t, err) + + assert.Equal(t, validBn, bn) + }) + t.Run("CurrentBlockNumFromReport returns error if block num is too large", func(t *testing.T) { + report := buildSampleReport(invalidBn, 143, feedID) + + _, err := r.CurrentBlockNumFromReport(report) + require.Error(t, err) + + assert.Contains(t, err.Error(), "CurrentBlockNum=18446744073709551615 overflows max int64") + }) +} +func Test_ReportCodec_ValidFromBlockNumFromReport(t *testing.T) { + r := ReportCodec{} + feedID := utils.NewHash() + + t.Run("ValidFromBlockNumFromReport extracts the valid from block number from a valid report", func(t *testing.T) { + report := buildSampleReport(42, 999, feedID) + + bn, err := r.ValidFromBlockNumFromReport(report) + require.NoError(t, err) + + assert.Equal(t, int64(999), bn) + }) + t.Run("ValidFromBlockNumFromReport returns error if valid from block number is too large", func(t *testing.T) { + report := buildSampleReport(42, -1, feedID) + + _, err := r.ValidFromBlockNumFromReport(report) + require.Error(t, err) + + assert.Contains(t, err.Error(), "ValidFromBlockNum=18446744073709551615 overflows max int64") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + feedID := utils.NewHash() + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + report := buildSampleReport(42, 999, feedID) + + bp, err := r.BenchmarkPriceFromReport(report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + _, err := r.BenchmarkPriceFromReport([]byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/core/services/relay/evm/mercury/v1/types/types.go b/core/services/relay/evm/mercury/v1/types/types.go new file mode 100644 index 00000000..709fd856 --- /dev/null +++ b/core/services/relay/evm/mercury/v1/types/types.go @@ -0,0 +1,56 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + {Name: "bid", Type: mustNewType("int192")}, + {Name: "ask", Type: mustNewType("int192")}, + {Name: "currentBlockNum", Type: mustNewType("uint64")}, + {Name: "currentBlockHash", Type: mustNewType("bytes32")}, + {Name: "validFromBlockNum", Type: mustNewType("uint64")}, + {Name: "currentBlockTimestamp", Type: mustNewType("uint64")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + Bid *big.Int + Ask *big.Int + CurrentBlockNum uint64 + CurrentBlockHash [32]byte + ValidFromBlockNum uint64 + CurrentBlockTimestamp uint64 +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/core/services/relay/evm/mercury/v2/data_source.go b/core/services/relay/evm/mercury/v2/data_source.go new file mode 100644 index 00000000..74545cd8 --- /dev/null +++ b/core/services/relay/evm/mercury/v2/data_source.go @@ -0,0 +1,237 @@ +package v2 + +import ( + "context" + "fmt" + "math/big" + "sync" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/types/mercury" + v2types "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + v2 "github.com/goplugin/plugin-data-streams/mercury/v2" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mercurytypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/reportcodec" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Runner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) +} + +type LatestReportFetcher interface { + LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) + LatestTimestamp(context.Context) (int64, error) +} + +type datasource struct { + pipelineRunner Runner + jb job.Job + spec pipeline.Spec + feedID mercuryutils.FeedID + lggr logger.Logger + saver ocrcommon.Saver + orm types.DataSourceORM + codec reportcodec.ReportCodec + + fetcher LatestReportFetcher + linkFeedID mercuryutils.FeedID + nativeFeedID mercuryutils.FeedID + + mu sync.RWMutex + + chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData +} + +var _ v2.DataSource = &datasource{} + +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { + return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan} +} + +func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v2types.Observation, pipelineExecutionErr error) { + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(ctx) + + if fetchMaxFinalizedTimestamp { + wg.Add(1) + go func() { + defer wg.Done() + latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID) + if dbErr != nil { + obs.MaxFinalizedTimestamp.Err = dbErr + return + } + if latest != nil { + maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(latest) + obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr + return + } + obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx) + }() + } + + var trrs pipeline.TaskRunResults + wg.Add(1) + go func() { + defer wg.Done() + var run *pipeline.Run + run, trrs, pipelineExecutionErr = ds.executeRun(ctx) + if pipelineExecutionErr != nil { + cancel() + pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr) + return + } + + ds.saver.Save(run) + + var parsed parseOutput + parsed, pipelineExecutionErr = ds.parse(trrs) + if pipelineExecutionErr != nil { + cancel() + // This is not expected under normal circumstances + ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr) + pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr) + return + } + obs.BenchmarkPrice = parsed.benchmarkPrice + }() + + var isLink, isNative bool + if ds.feedID == ds.linkFeedID { + isLink = true + } else { + wg.Add(1) + go func() { + defer wg.Done() + obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID) + if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil { + mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc() + ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing PLI feed, using sentinel value of %s", v2.MissingPrice), "linkFeedID", ds.linkFeedID) + obs.LinkPrice.Val = v2.MissingPrice + } else if obs.LinkPrice.Err != nil { + mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc() + ds.lggr.Errorw("Mercury server returned error querying PLI price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID) + } + }() + } + + if ds.feedID == ds.nativeFeedID { + isNative = true + } else { + wg.Add(1) + go func() { + defer wg.Done() + obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID) + if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil { + mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc() + ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v2.MissingPrice), "nativeFeedID", ds.nativeFeedID) + obs.NativePrice.Val = v2.MissingPrice + } else if obs.NativePrice.Err != nil { + mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc() + ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID) + } + }() + } + + wg.Wait() + cancel() + + if pipelineExecutionErr != nil { + return + } + + if isLink || isNative { + // run has now completed so it is safe to use benchmark price + if isLink { + // This IS the PLI feed, use our observed price + obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err + } + if isNative { + // This IS the native feed, use our observed price + obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err + } + } + + ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{ + V2Observation: &obs, + TaskRunResults: trrs, + RepTimestamp: repts, + FeedVersion: mercuryutils.REPORT_V2, + FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp, + IsLinkFeed: isLink, + IsNativeFeed: isNative, + }) + + return obs, nil +} + +func toBigInt(val interface{}) (*big.Int, error) { + dec, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + return dec.BigInt(), nil +} + +type parseOutput struct { + benchmarkPrice mercury.ObsResult[*big.Int] +} + +func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) { + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + // only return terminal trrs from executeRun + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + if len(finaltrrs) != 1 { + return o, fmt.Errorf("invalid number of results, expected: 1, got: %d", len(finaltrrs)) + } + + return o, setBenchmarkPrice(&o, finaltrrs[0].Result) +} + +func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.benchmarkPrice.Err = res.Error + return res.Error + } + val, err := toBigInt(res.Value) + if err != nil { + return fmt.Errorf("failed to parse BenchmarkPrice: %w", err) + } + o.benchmarkPrice.Val = val + return nil +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": ds.jb.ID, + "externalJobID": ds.jb.ExternalJobID, + "name": ds.jb.Name.ValueOrZero(), + }, + }) + + run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + } + + return run, trrs, err +} diff --git a/core/services/relay/evm/mercury/v2/data_source_test.go b/core/services/relay/evm/mercury/v2/data_source_test.go new file mode 100644 index 00000000..575801a6 --- /dev/null +++ b/core/services/relay/evm/mercury/v2/data_source_test.go @@ -0,0 +1,307 @@ +package v2 + +import ( + "context" + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/types/mercury" + v2 "github.com/goplugin/plugin-data-streams/mercury/v2" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + mercurymocks "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reportcodecv2 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/reportcodec" +) + +var _ mercury.ServerFetcher = &mockFetcher{} + +type mockFetcher struct { + ts int64 + tsErr error + linkPrice *big.Int + linkPriceErr error + nativePrice *big.Int + nativePriceErr error +} + +var feedId utils.FeedID = [32]byte{1} +var linkFeedId utils.FeedID = [32]byte{2} +var nativeFeedId utils.FeedID = [32]byte{3} + +func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) { + return nil, nil +} + +func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) { + if fId == linkFeedId { + return m.linkPrice, m.linkPriceErr + } else if fId == nativeFeedId { + return m.nativePrice, m.nativePriceErr + } + return nil, nil +} + +func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) { + return m.ts, m.tsErr +} + +type mockORM struct { + report []byte + err error +} + +func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) { + return m.report, m.err +} + +type mockSaver struct { + r *pipeline.Run +} + +func (ms *mockSaver) Save(r *pipeline.Run) { + ms.r = r +} + +func Test_Datasource(t *testing.T) { + orm := &mockORM{} + ds := &datasource{orm: orm, lggr: logger.TestLogger(t)} + ctx := testutils.Context(t) + repts := ocrtypes.ReportTimestamp{} + + fetcher := &mockFetcher{} + ds.fetcher = fetcher + + saver := &mockSaver{} + ds.saver = saver + + goodTrrs := []pipeline.TaskRunResult{ + { + // bp + Result: pipeline.Result{Value: "122.345"}, + Task: &mercurymocks.MockTask{}, + }, + } + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + } + + spec := pipeline.Spec{} + ds.spec = spec + + t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) { + t.Run("with latest report in database", func(t *testing.T) { + orm.report = buildSampleV2Report() + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val) + }) + t.Run("if querying latest report fails", func(t *testing.T) { + orm.report = nil + orm.err = errors.New("something exploded") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + t.Run("if codec fails to decode", func(t *testing.T) { + orm.report = []byte{1, 2, 3} + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + orm.report = nil + orm.err = nil + + t.Run("if LatestTimestamp returns error", func(t *testing.T) { + fetcher.tsErr = errors.New("some error") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + t.Run("if LatestTimestamp succeeds", func(t *testing.T) { + fetcher.tsErr = nil + fetcher.ts = 123 + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + }) + + t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) { + fetcher.tsErr = nil + fetcher.ts = 0 + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + t.Run("when run execution succeeded", func(t *testing.T) { + t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) { + t.Cleanup(func() { + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId + }) + + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId + + fetcher.ts = 123123 + fetcher.tsErr = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val) + assert.NoError(t, obs.BenchmarkPrice.Err) + assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val) + assert.NoError(t, obs.LinkPrice.Err) + assert.Equal(t, big.NewInt(122), obs.NativePrice.Val) + assert.NoError(t, obs.NativePrice.Err) + }) + }) + }) + + t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) { + t.Run("when run execution fails, returns error", func(t *testing.T) { + t.Cleanup(func() { + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: nil, + } + }) + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: errors.New("run execution failed"), + } + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed") + }) + + t.Run("when parsing run results fails, return error", func(t *testing.T) { + t.Cleanup(func() { + runner := &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: nil, + } + ds.pipelineRunner = runner + }) + + badTrrs := []pipeline.TaskRunResult{ + { + // benchmark price + Result: pipeline.Result{Error: errors.New("some error with bp")}, + Task: &mercurymocks.MockTask{}, + }, + } + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: badTrrs, + Err: nil, + } + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while parsing run results: some error with bp") + }) + + t.Run("when run execution succeeded", func(t *testing.T) { + t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) { + t.Cleanup(func() { + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId + }) + + var feedId utils.FeedID = [32]byte{1} + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val) + assert.NoError(t, obs.BenchmarkPrice.Err) + assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val) + assert.NoError(t, obs.LinkPrice.Err) + assert.Equal(t, big.NewInt(122), obs.NativePrice.Val) + assert.NoError(t, obs.NativePrice.Err) + }) + + t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) { + t.Cleanup(func() { + fetcher.linkPriceErr = nil + fetcher.nativePriceErr = nil + }) + + fetcher.linkPriceErr = errors.New("some error fetching link price") + fetcher.nativePriceErr = errors.New("some error fetching native price") + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Nil(t, obs.LinkPrice.Val) + assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price") + assert.Nil(t, obs.NativePrice.Val) + assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price") + }) + + t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) { + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, obs.LinkPrice.Val, v2.MissingPrice) + assert.Nil(t, obs.LinkPrice.Err) + assert.Equal(t, obs.NativePrice.Val, v2.MissingPrice) + assert.Nil(t, obs.NativePrice.Err) + }) + }) + }) +} + +var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + +func buildSampleV2Report() []byte { + feedID := sampleFeedID + timestamp := uint32(124) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv2.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp) + if err != nil { + panic(err) + } + return b +} diff --git a/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go new file mode 100644 index 00000000..782b1acc --- /dev/null +++ b/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go @@ -0,0 +1,78 @@ +package reportcodec + +import ( + "errors" + "fmt" + "math/big" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + v2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reporttypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v2/types" +) + +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word +var zero = big.NewInt(0) + +var _ v2.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(rf v2.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.LinkFee == nil { + merr = errors.Join(merr, errors.New("linkFee may not be nil")) + } else if rf.LinkFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee)) + } + if rf.NativeFee == nil { + merr = errors.Join(merr, errors.New("nativeFee may not be nil")) + } else if rf.NativeFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee)) + } + if merr != nil { + return nil, merr + } + reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +func (r *ReportCodec) MaxReportLength(n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) ObservationTimestampFromReport(report ocrtypes.Report) (uint32, error) { + decoded, err := r.Decode(report) + if err != nil { + return 0, err + } + return decoded.ObservationsTimestamp, nil +} + +func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go new file mode 100644 index 00000000..7956a66d --- /dev/null +++ b/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go @@ -0,0 +1,152 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + v2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" +) + +func newValidReportFields() v2.ReportFields { + return v2.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + ValidFromTimestamp: 123, + ExpiresAt: 20, + LinkFee: big.NewInt(456), + NativeFee: big.NewInt(457), + } +} + +func Test_ReportCodec_BuildReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero values", func(t *testing.T) { + _, err := r.BuildReport(v2.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "linkFee may not be nil") + assert.Contains(t, err.Error(), "nativeFee may not be nil") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + report, err := r.BuildReport(rf) + require.NoError(t, err) + + reportElems := make(map[string]interface{}) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242) + assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243)) + assert.Equal(t, reportElems["validFromTimestamp"].(uint32), uint32(123)) + assert.Equal(t, reportElems["expiresAt"].(uint32), uint32(20)) + assert.Equal(t, reportElems["linkFee"].(*big.Int).Int64(), int64(456)) + assert.Equal(t, reportElems["nativeFee"].(*big.Int).Int64(), int64(457)) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3}, report) + max, err := r.MaxReportLength(4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := r.Decode(report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, uint32(123), decoded.ValidFromTimestamp) + assert.Equal(t, uint32(20), decoded.ExpiresAt) + assert.Equal(t, big.NewInt(456), decoded.LinkFee) + assert.Equal(t, big.NewInt(457), decoded.NativeFee) + }) + }) + + t.Run("errors on negative fee", func(t *testing.T) { + rf := newValidReportFields() + rf.LinkFee = big.NewInt(-1) + rf.NativeFee = big.NewInt(-1) + _, err := r.BuildReport(rf) + require.Error(t, err) + + assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)") + assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)") + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + _, err := r.Decode([]byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := 0; i < len(longBad); i++ { + longBad[i] = byte(i) + } + _, err = r.Decode(longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(ts int64) []byte { + feedID := [32]byte{'f', 'o', 'o'} + timestamp := uint32(ts) + bp := big.NewInt(242) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) { + report := buildSampleReport(123) + + ts, err := r.ObservationTimestampFromReport(report) + require.NoError(t, err) + + assert.Equal(t, ts, uint32(123)) + }) + t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) { + report := []byte{1, 2, 3} + + _, err := r.ObservationTimestampFromReport(report) + require.Error(t, err) + + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + report := buildSampleReport(123) + + bp, err := r.BenchmarkPriceFromReport(report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + _, err := r.BenchmarkPriceFromReport([]byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/core/services/relay/evm/mercury/v2/types/types.go b/core/services/relay/evm/mercury/v2/types/types.go new file mode 100644 index 00000000..3c1df286 --- /dev/null +++ b/core/services/relay/evm/mercury/v2/types/types.go @@ -0,0 +1,52 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "validFromTimestamp", Type: mustNewType("uint32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "nativeFee", Type: mustNewType("uint192")}, + {Name: "linkFee", Type: mustNewType("uint192")}, + {Name: "expiresAt", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + ValidFromTimestamp uint32 + ExpiresAt uint32 + LinkFee *big.Int + NativeFee *big.Int +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/core/services/relay/evm/mercury/v3/data_source.go b/core/services/relay/evm/mercury/v3/data_source.go new file mode 100644 index 00000000..ddc79822 --- /dev/null +++ b/core/services/relay/evm/mercury/v3/data_source.go @@ -0,0 +1,276 @@ +package v3 + +import ( + "context" + "errors" + "fmt" + "math/big" + "sync" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/types/mercury" + v3types "github.com/goplugin/plugin-common/pkg/types/mercury/v3" + v3 "github.com/goplugin/plugin-data-streams/mercury/v3" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mercurytypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/types" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/reportcodec" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Runner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) +} + +type LatestReportFetcher interface { + LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) + LatestTimestamp(context.Context) (int64, error) +} + +type datasource struct { + pipelineRunner Runner + jb job.Job + spec pipeline.Spec + feedID mercuryutils.FeedID + lggr logger.Logger + saver ocrcommon.Saver + orm types.DataSourceORM + codec reportcodec.ReportCodec + + fetcher LatestReportFetcher + linkFeedID mercuryutils.FeedID + nativeFeedID mercuryutils.FeedID + + mu sync.RWMutex + + chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData +} + +var _ v3.DataSource = &datasource{} + +func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource { + return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan} +} + +func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v3types.Observation, pipelineExecutionErr error) { + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(ctx) + + if fetchMaxFinalizedTimestamp { + wg.Add(1) + go func() { + defer wg.Done() + latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID) + if dbErr != nil { + obs.MaxFinalizedTimestamp.Err = dbErr + return + } + if latest != nil { + maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(latest) + obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr + return + } + obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx) + }() + } + + var trrs pipeline.TaskRunResults + wg.Add(1) + go func() { + defer wg.Done() + var run *pipeline.Run + run, trrs, pipelineExecutionErr = ds.executeRun(ctx) + if pipelineExecutionErr != nil { + cancel() + pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr) + return + } + + ds.saver.Save(run) + + var parsed parseOutput + parsed, pipelineExecutionErr = ds.parse(trrs) + if pipelineExecutionErr != nil { + cancel() + // This is not expected under normal circumstances + ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr) + pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr) + return + } + obs.BenchmarkPrice = parsed.benchmarkPrice + obs.Bid = parsed.bid + obs.Ask = parsed.ask + }() + + var isLink, isNative bool + if ds.feedID == ds.linkFeedID { + isLink = true + } else { + wg.Add(1) + go func() { + defer wg.Done() + obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID) + if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil { + mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc() + ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing PLI feed, using sentinel value of %s", v3.MissingPrice), "linkFeedID", ds.linkFeedID) + obs.LinkPrice.Val = v3.MissingPrice + } else if obs.LinkPrice.Err != nil { + mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc() + ds.lggr.Errorw("Mercury server returned error querying PLI price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID) + } + }() + } + + if ds.feedID == ds.nativeFeedID { + isNative = true + } else { + wg.Add(1) + go func() { + defer wg.Done() + obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID) + if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil { + mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc() + ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v3.MissingPrice), "nativeFeedID", ds.nativeFeedID) + obs.NativePrice.Val = v3.MissingPrice + } else if obs.NativePrice.Err != nil { + mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc() + ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID) + } + }() + } + + wg.Wait() + cancel() + + if pipelineExecutionErr != nil { + return + } + + if isLink || isNative { + // run has now completed so it is safe to use benchmark price + if isLink { + // This IS the PLI feed, use our observed price + obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err + } + if isNative { + // This IS the native feed, use our observed price + obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err + } + } + + ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{ + V3Observation: &obs, + TaskRunResults: trrs, + RepTimestamp: repts, + FeedVersion: mercuryutils.REPORT_V3, + FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp, + IsLinkFeed: isLink, + IsNativeFeed: isNative, + }) + + return obs, nil +} + +func toBigInt(val interface{}) (*big.Int, error) { + dec, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + return dec.BigInt(), nil +} + +type parseOutput struct { + benchmarkPrice mercury.ObsResult[*big.Int] + bid mercury.ObsResult[*big.Int] + ask mercury.ObsResult[*big.Int] +} + +func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) { + var finaltrrs []pipeline.TaskRunResult + for _, trr := range trrs { + // only return terminal trrs from executeRun + if trr.IsTerminal() { + finaltrrs = append(finaltrrs, trr) + } + } + + // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed + // by the pipeline executor + if len(finaltrrs) != 3 { + return o, fmt.Errorf("invalid number of results, expected: 3, got: %d", len(finaltrrs)) + } + + merr = errors.Join( + setBenchmarkPrice(&o, finaltrrs[0].Result), + setBid(&o, finaltrrs[1].Result), + setAsk(&o, finaltrrs[2].Result), + ) + + return o, merr +} + +func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.benchmarkPrice.Err = res.Error + return res.Error + } + val, err := toBigInt(res.Value) + if err != nil { + return fmt.Errorf("failed to parse BenchmarkPrice: %w", err) + } + o.benchmarkPrice.Val = val + return nil +} + +func setBid(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.bid.Err = res.Error + return res.Error + } + val, err := toBigInt(res.Value) + if err != nil { + return fmt.Errorf("failed to parse Bid: %w", err) + } + o.bid.Val = val + return nil +} + +func setAsk(o *parseOutput, res pipeline.Result) error { + if res.Error != nil { + o.ask.Err = res.Error + return res.Error + } + val, err := toBigInt(res.Value) + if err != nil { + return fmt.Errorf("failed to parse Ask: %w", err) + } + o.ask.Val = val + return nil +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jb": map[string]interface{}{ + "databaseID": ds.jb.ID, + "externalJobID": ds.jb.ExternalJobID, + "name": ds.jb.Name.ValueOrZero(), + }, + }) + + run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars, ds.lggr) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) + } + + return run, trrs, err +} diff --git a/core/services/relay/evm/mercury/v3/data_source_test.go b/core/services/relay/evm/mercury/v3/data_source_test.go new file mode 100644 index 00000000..f5783620 --- /dev/null +++ b/core/services/relay/evm/mercury/v3/data_source_test.go @@ -0,0 +1,337 @@ +package v3 + +import ( + "context" + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + mercurytypes "github.com/goplugin/plugin-common/pkg/types/mercury" + relaymercuryv3 "github.com/goplugin/plugin-data-streams/mercury/v3" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + mercurymocks "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reportcodecv3 "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/reportcodec" +) + +var _ mercurytypes.ServerFetcher = &mockFetcher{} + +type mockFetcher struct { + ts int64 + tsErr error + linkPrice *big.Int + linkPriceErr error + nativePrice *big.Int + nativePriceErr error +} + +var feedId utils.FeedID = [32]byte{1} +var linkFeedId utils.FeedID = [32]byte{2} +var nativeFeedId utils.FeedID = [32]byte{3} + +func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) { + return nil, nil +} + +func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) { + if fId == linkFeedId { + return m.linkPrice, m.linkPriceErr + } else if fId == nativeFeedId { + return m.nativePrice, m.nativePriceErr + } + return nil, nil +} + +func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) { + return m.ts, m.tsErr +} + +type mockORM struct { + report []byte + err error +} + +func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte, qopts ...pg.QOpt) (report []byte, err error) { + return m.report, m.err +} + +type mockSaver struct { + r *pipeline.Run +} + +func (ms *mockSaver) Save(r *pipeline.Run) { + ms.r = r +} + +func Test_Datasource(t *testing.T) { + orm := &mockORM{} + ds := &datasource{orm: orm, lggr: logger.TestLogger(t)} + ctx := testutils.Context(t) + repts := ocrtypes.ReportTimestamp{} + + fetcher := &mockFetcher{} + ds.fetcher = fetcher + + saver := &mockSaver{} + ds.saver = saver + + goodTrrs := []pipeline.TaskRunResult{ + { + // bp + Result: pipeline.Result{Value: "122.345"}, + Task: &mercurymocks.MockTask{}, + }, + { + // bid + Result: pipeline.Result{Value: "121.993"}, + Task: &mercurymocks.MockTask{}, + }, + { + // ask + Result: pipeline.Result{Value: "123.111"}, + Task: &mercurymocks.MockTask{}, + }, + } + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + } + + spec := pipeline.Spec{} + ds.spec = spec + + t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) { + t.Run("with latest report in database", func(t *testing.T) { + orm.report = buildSampleV3Report() + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val) + }) + t.Run("if querying latest report fails", func(t *testing.T) { + orm.report = nil + orm.err = errors.New("something exploded") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + t.Run("if codec fails to decode", func(t *testing.T) { + orm.report = []byte{1, 2, 3} + orm.err = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + orm.report = nil + orm.err = nil + + t.Run("if LatestTimestamp returns error", func(t *testing.T) { + fetcher.tsErr = errors.New("some error") + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error") + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + t.Run("if LatestTimestamp succeeds", func(t *testing.T) { + fetcher.tsErr = nil + fetcher.ts = 123 + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + }) + + t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) { + fetcher.tsErr = nil + fetcher.ts = 0 + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Zero(t, obs.MaxFinalizedTimestamp.Val) + }) + + t.Run("when run execution succeeded", func(t *testing.T) { + t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) { + t.Cleanup(func() { + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId + }) + + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId + + fetcher.ts = 123123 + fetcher.tsErr = nil + + obs, err := ds.Observe(ctx, repts, true) + assert.NoError(t, err) + + assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val) + assert.NoError(t, obs.BenchmarkPrice.Err) + assert.Equal(t, big.NewInt(121), obs.Bid.Val) + assert.NoError(t, obs.Bid.Err) + assert.Equal(t, big.NewInt(123), obs.Ask.Val) + assert.NoError(t, obs.Ask.Err) + assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val) + assert.NoError(t, obs.LinkPrice.Err) + assert.Equal(t, big.NewInt(122), obs.NativePrice.Val) + assert.NoError(t, obs.NativePrice.Err) + }) + }) + }) + + t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) { + t.Run("when run execution fails, returns error", func(t *testing.T) { + t.Cleanup(func() { + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: nil, + } + }) + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: errors.New("run execution failed"), + } + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed") + }) + + t.Run("when parsing run results fails, return error", func(t *testing.T) { + t.Cleanup(func() { + runner := &mercurymocks.MockRunner{ + Trrs: goodTrrs, + Err: nil, + } + ds.pipelineRunner = runner + }) + + badTrrs := []pipeline.TaskRunResult{ + { + // benchmark price + Result: pipeline.Result{Value: "122.345"}, + Task: &mercurymocks.MockTask{}, + }, + { + // bid + Result: pipeline.Result{Value: "121.993"}, + Task: &mercurymocks.MockTask{}, + }, + { + // ask + Result: pipeline.Result{Error: errors.New("some error with ask")}, + Task: &mercurymocks.MockTask{}, + }, + } + + ds.pipelineRunner = &mercurymocks.MockRunner{ + Trrs: badTrrs, + Err: nil, + } + + _, err := ds.Observe(ctx, repts, false) + assert.EqualError(t, err, "Observe failed while parsing run results: some error with ask") + }) + + t.Run("when run execution succeeded", func(t *testing.T) { + t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) { + t.Cleanup(func() { + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId + }) + + var feedId utils.FeedID = [32]byte{1} + ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val) + assert.NoError(t, obs.BenchmarkPrice.Err) + assert.Equal(t, big.NewInt(121), obs.Bid.Val) + assert.NoError(t, obs.Bid.Err) + assert.Equal(t, big.NewInt(123), obs.Ask.Val) + assert.NoError(t, obs.Ask.Err) + assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val) + assert.NoError(t, obs.MaxFinalizedTimestamp.Err) + assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val) + assert.NoError(t, obs.LinkPrice.Err) + assert.Equal(t, big.NewInt(122), obs.NativePrice.Val) + assert.NoError(t, obs.NativePrice.Err) + }) + + t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) { + t.Cleanup(func() { + fetcher.linkPriceErr = nil + fetcher.nativePriceErr = nil + }) + + fetcher.linkPriceErr = errors.New("some error fetching link price") + fetcher.nativePriceErr = errors.New("some error fetching native price") + + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Nil(t, obs.LinkPrice.Val) + assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price") + assert.Nil(t, obs.NativePrice.Val) + assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price") + }) + + t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) { + obs, err := ds.Observe(ctx, repts, false) + assert.NoError(t, err) + + assert.Equal(t, obs.LinkPrice.Val, relaymercuryv3.MissingPrice) + assert.Nil(t, obs.LinkPrice.Err) + assert.Equal(t, obs.NativePrice.Val, relaymercuryv3.MissingPrice) + assert.Nil(t, obs.NativePrice.Err) + }) + }) + }) +} + +var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114} + +func buildSampleV3Report() []byte { + feedID := sampleFeedID + timestamp := uint32(124) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := reportcodecv3.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask) + if err != nil { + panic(err) + } + return b +} diff --git a/core/services/relay/evm/mercury/v3/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v3/reportcodec/report_codec.go new file mode 100644 index 00000000..6f85fb9b --- /dev/null +++ b/core/services/relay/evm/mercury/v3/reportcodec/report_codec.go @@ -0,0 +1,84 @@ +package reportcodec + +import ( + "errors" + "fmt" + "math/big" + + pkgerrors "github.com/pkg/errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + v3 "github.com/goplugin/plugin-common/pkg/types/mercury/v3" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + reporttypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/v3/types" +) + +var ReportTypes = reporttypes.GetSchema() +var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word +var zero = big.NewInt(0) + +var _ v3.ReportCodec = &ReportCodec{} + +type ReportCodec struct { + logger logger.Logger + feedID utils.FeedID +} + +func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec { + return &ReportCodec{lggr, feedID} +} + +func (r *ReportCodec) BuildReport(rf v3.ReportFields) (ocrtypes.Report, error) { + var merr error + if rf.BenchmarkPrice == nil { + merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil")) + } + if rf.Bid == nil { + merr = errors.Join(merr, errors.New("bid may not be nil")) + } + if rf.Ask == nil { + merr = errors.Join(merr, errors.New("ask may not be nil")) + } + if rf.LinkFee == nil { + merr = errors.Join(merr, errors.New("linkFee may not be nil")) + } else if rf.LinkFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee)) + } + if rf.NativeFee == nil { + merr = errors.Join(merr, errors.New("nativeFee may not be nil")) + } else if rf.NativeFee.Cmp(zero) < 0 { + merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee)) + } + if merr != nil { + return nil, merr + } + reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice, rf.Bid, rf.Ask) + return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob") +} + +func (r *ReportCodec) MaxReportLength(n int) (int, error) { + return maxReportLength, nil +} + +func (r *ReportCodec) ObservationTimestampFromReport(report ocrtypes.Report) (uint32, error) { + decoded, err := r.Decode(report) + if err != nil { + return 0, err + } + return decoded.ObservationsTimestamp, nil +} + +func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) { + return reporttypes.Decode(report) +} + +func (r *ReportCodec) BenchmarkPriceFromReport(report ocrtypes.Report) (*big.Int, error) { + decoded, err := r.Decode(report) + if err != nil { + return nil, err + } + return decoded.BenchmarkPrice, nil +} diff --git a/core/services/relay/evm/mercury/v3/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v3/reportcodec/report_codec_test.go new file mode 100644 index 00000000..2582c05a --- /dev/null +++ b/core/services/relay/evm/mercury/v3/reportcodec/report_codec_test.go @@ -0,0 +1,160 @@ +package reportcodec + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2plus/types" + + v3 "github.com/goplugin/plugin-common/pkg/types/mercury/v3" +) + +func newValidReportFields() v3.ReportFields { + return v3.ReportFields{ + Timestamp: 242, + BenchmarkPrice: big.NewInt(243), + Bid: big.NewInt(244), + Ask: big.NewInt(245), + ValidFromTimestamp: 123, + ExpiresAt: 20, + LinkFee: big.NewInt(456), + NativeFee: big.NewInt(457), + } +} + +func Test_ReportCodec_BuildReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BuildReport errors on zero values", func(t *testing.T) { + _, err := r.BuildReport(v3.ReportFields{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "benchmarkPrice may not be nil") + assert.Contains(t, err.Error(), "linkFee may not be nil") + assert.Contains(t, err.Error(), "nativeFee may not be nil") + }) + + t.Run("BuildReport constructs a report from observations", func(t *testing.T) { + rf := newValidReportFields() + // only need to test happy path since validations are done in relaymercury + + report, err := r.BuildReport(rf) + require.NoError(t, err) + + reportElems := make(map[string]interface{}) + err = ReportTypes.UnpackIntoMap(reportElems, report) + require.NoError(t, err) + + assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242) + assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243)) + assert.Equal(t, reportElems["bid"].(*big.Int).Int64(), int64(244)) + assert.Equal(t, reportElems["ask"].(*big.Int).Int64(), int64(245)) + assert.Equal(t, reportElems["validFromTimestamp"].(uint32), uint32(123)) + assert.Equal(t, reportElems["expiresAt"].(uint32), uint32(20)) + assert.Equal(t, reportElems["linkFee"].(*big.Int).Int64(), int64(456)) + assert.Equal(t, reportElems["nativeFee"].(*big.Int).Int64(), int64(457)) + + assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf5}, report) + max, err := r.MaxReportLength(4) + require.NoError(t, err) + assert.LessOrEqual(t, len(report), max) + + t.Run("Decode decodes the report", func(t *testing.T) { + decoded, err := r.Decode(report) + require.NoError(t, err) + + require.NotNil(t, decoded) + + assert.Equal(t, uint32(242), decoded.ObservationsTimestamp) + assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice) + assert.Equal(t, big.NewInt(244), decoded.Bid) + assert.Equal(t, big.NewInt(245), decoded.Ask) + assert.Equal(t, uint32(123), decoded.ValidFromTimestamp) + assert.Equal(t, uint32(20), decoded.ExpiresAt) + assert.Equal(t, big.NewInt(456), decoded.LinkFee) + assert.Equal(t, big.NewInt(457), decoded.NativeFee) + }) + }) + + t.Run("errors on negative fee", func(t *testing.T) { + rf := newValidReportFields() + rf.LinkFee = big.NewInt(-1) + rf.NativeFee = big.NewInt(-1) + _, err := r.BuildReport(rf) + require.Error(t, err) + + assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)") + assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)") + }) + + t.Run("Decode errors on invalid report", func(t *testing.T) { + _, err := r.Decode([]byte{1, 2, 3}) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + + longBad := make([]byte, 64) + for i := 0; i < len(longBad); i++ { + longBad[i] = byte(i) + } + _, err = r.Decode(longBad) + assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value") + }) +} + +func buildSampleReport(ts int64) []byte { + feedID := [32]byte{'f', 'o', 'o'} + timestamp := uint32(ts) + bp := big.NewInt(242) + bid := big.NewInt(243) + ask := big.NewInt(244) + validFromTimestamp := uint32(123) + expiresAt := uint32(456) + linkFee := big.NewInt(3334455) + nativeFee := big.NewInt(556677) + + b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask) + if err != nil { + panic(err) + } + return b +} + +func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) { + report := buildSampleReport(123) + + ts, err := r.ObservationTimestampFromReport(report) + require.NoError(t, err) + + assert.Equal(t, ts, uint32(123)) + }) + t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) { + report := []byte{1, 2, 3} + + _, err := r.ObservationTimestampFromReport(report) + require.Error(t, err) + + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} + +func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) { + r := ReportCodec{} + + t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) { + report := buildSampleReport(123) + + bp, err := r.BenchmarkPriceFromReport(report) + require.NoError(t, err) + + assert.Equal(t, big.NewInt(242), bp) + }) + t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) { + _, err := r.BenchmarkPriceFromReport([]byte{1, 2, 3}) + require.Error(t, err) + assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32") + }) +} diff --git a/core/services/relay/evm/mercury/v3/types/types.go b/core/services/relay/evm/mercury/v3/types/types.go new file mode 100644 index 00000000..e99f529f --- /dev/null +++ b/core/services/relay/evm/mercury/v3/types/types.go @@ -0,0 +1,56 @@ +package reporttypes + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi" +) + +var schema = GetSchema() + +func GetSchema() abi.Arguments { + mustNewType := func(t string) abi.Type { + result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{}) + if err != nil { + panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err)) + } + return result + } + return abi.Arguments([]abi.Argument{ + {Name: "feedId", Type: mustNewType("bytes32")}, + {Name: "validFromTimestamp", Type: mustNewType("uint32")}, + {Name: "observationsTimestamp", Type: mustNewType("uint32")}, + {Name: "nativeFee", Type: mustNewType("uint192")}, + {Name: "linkFee", Type: mustNewType("uint192")}, + {Name: "expiresAt", Type: mustNewType("uint32")}, + {Name: "benchmarkPrice", Type: mustNewType("int192")}, + {Name: "bid", Type: mustNewType("int192")}, + {Name: "ask", Type: mustNewType("int192")}, + }) +} + +type Report struct { + FeedId [32]byte + ObservationsTimestamp uint32 + BenchmarkPrice *big.Int + Bid *big.Int + Ask *big.Int + ValidFromTimestamp uint32 + ExpiresAt uint32 + LinkFee *big.Int + NativeFee *big.Int +} + +// Decode is made available to external users (i.e. mercury server) +func Decode(report []byte) (*Report, error) { + values, err := schema.Unpack(report) + if err != nil { + return nil, fmt.Errorf("failed to decode report: %w", err) + } + decoded := new(Report) + if err = schema.Copy(decoded, values); err != nil { + return nil, fmt.Errorf("failed to copy report values to struct: %w", err) + } + return decoded, nil +} diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache.go b/core/services/relay/evm/mercury/wsrpc/cache/cache.go new file mode 100644 index 00000000..8b2a4a96 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/cache/cache.go @@ -0,0 +1,399 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/jpillora/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + promFetchFailedCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_fetch_failure_count", + Help: "Number of times we tried to call LatestReport from the mercury server, but some kind of error occurred", + }, + []string{"serverURL", "feedID"}, + ) + promCacheHitCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_hit_count", + Help: "Running count of cache hits", + }, + []string{"serverURL", "feedID"}, + ) + promCacheWaitCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_wait_count", + Help: "Running count of times that we had to wait for a fetch to complete before reading from cache", + }, + []string{"serverURL", "feedID"}, + ) + promCacheMissCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_cache_miss_count", + Help: "Running count of cache misses", + }, + []string{"serverURL", "feedID"}, + ) +) + +type Fetcher interface { + LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) +} + +type Client interface { + Fetcher + ServerURL() string + RawClient() pb.MercuryClient +} + +// Cache is scoped to one particular mercury server +// Use CacheSet to hold lookups for multiple servers +type Cache interface { + Fetcher + services.Service +} + +type Config struct { + // LatestReportTTL controls how "stale" we will allow a price to be e.g. if + // set to 1s, a new price will always be fetched if the last result was + // from more than 1 second ago. + // + // Another way of looking at it is such: the cache will _never_ return a + // price that was queried from before now-LatestReportTTL. + // + // Setting to zero disables caching entirely. + LatestReportTTL time.Duration + // MaxStaleAge is that maximum amount of time that a value can be stale + // before it is deleted from the cache (a form of garbage collection). + // + // This should generally be set to something much larger than + // LatestReportTTL. Setting to zero disables garbage collection. + MaxStaleAge time.Duration + // LatestReportDeadline controls how long to wait for a response before + // retrying. Setting this to zero will wait indefinitely. + LatestReportDeadline time.Duration +} + +func NewCache(lggr logger.Logger, client Client, cfg Config) Cache { + return newMemCache(lggr, client, cfg) +} + +type cacheVal struct { + sync.RWMutex + + fetching bool + fetchCh chan (struct{}) + + val *pb.LatestReportResponse + err error + + expiresAt time.Time +} + +func (v *cacheVal) read() (*pb.LatestReportResponse, error) { + v.RLock() + defer v.RUnlock() + return v.val, v.err +} + +// caller expected to hold lock +func (v *cacheVal) initiateFetch() <-chan struct{} { + if v.fetching { + panic("cannot initiateFetch on cache val that is already fetching") + } + v.fetching = true + v.fetchCh = make(chan struct{}) + return v.fetchCh +} + +func (v *cacheVal) setError(err error) { + v.Lock() + defer v.Unlock() + v.err = err +} + +func (v *cacheVal) completeFetch(val *pb.LatestReportResponse, err error, expiresAt time.Time) { + v.Lock() + defer v.Unlock() + if !v.fetching { + panic("can only completeFetch on cache val that is fetching") + } + v.val = val + v.err = err + if err == nil { + v.expiresAt = expiresAt + } + close(v.fetchCh) + v.fetchCh = nil + v.fetching = false +} + +func (v *cacheVal) abandonFetch(err error) { + v.completeFetch(nil, err, time.Now()) +} + +func (v *cacheVal) waitForResult(ctx context.Context, chResult <-chan struct{}, chStop <-chan struct{}) (*pb.LatestReportResponse, error) { + select { + case <-ctx.Done(): + _, err := v.read() + return nil, errors.Join(err, ctx.Err()) + case <-chStop: + return nil, errors.New("stopped") + case <-chResult: + return v.read() + } +} + +// memCache stores values in memory +// it will never return a stale value older than latestPriceTTL, instead +// waiting for a successful fetch or caller context cancels, whichever comes +// first +type memCache struct { + services.StateMachine + lggr logger.Logger + + client Client + + cfg Config + + cache sync.Map + + wg sync.WaitGroup + chStop services.StopChan +} + +func newMemCache(lggr logger.Logger, client Client, cfg Config) *memCache { + return &memCache{ + services.StateMachine{}, + lggr.Named("MemCache"), + client, + cfg, + sync.Map{}, + sync.WaitGroup{}, + make(chan (struct{})), + } +} + +// LatestReport +// NOTE: This will actually block on all types of errors, even non-timeouts. +// Context should be set carefully and timed to be the maximum time we are +// willing to wait for a result, the background thread will keep re-querying +// until it gets one even on networking errors etc. +func (m *memCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + if req == nil { + return nil, errors.New("req must not be nil") + } + feedIDHex := mercuryutils.BytesToFeedID(req.FeedId).String() + if m.cfg.LatestReportTTL <= 0 { + return m.client.RawClient().LatestReport(ctx, req) + } + vi, loaded := m.cache.LoadOrStore(feedIDHex, &cacheVal{ + sync.RWMutex{}, + false, + nil, + nil, + nil, + time.Now(), // first result is always "expired" and requires fetch + }) + v := vi.(*cacheVal) + + m.lggr.Tracew("LatestReport", "feedID", feedIDHex, "loaded", loaded) + + // HOT PATH + v.RLock() + if time.Now().Before(v.expiresAt) { + // CACHE HIT + promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE HIT (hot path)", "feedID", feedIDHex) + + defer v.RUnlock() + return v.val, nil + } else if v.fetching { + // CACHE WAIT + promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE WAIT (hot path)", "feedID", feedIDHex) + // if someone else is fetching then wait for the fetch to complete + ch := v.fetchCh + v.RUnlock() + return v.waitForResult(ctx, ch, m.chStop) + } + // CACHE MISS + promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + // fallthrough to cold path and fetch + v.RUnlock() + + // COLD PATH + v.Lock() + if time.Now().Before(v.expiresAt) { + // CACHE HIT + promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE HIT (cold path)", "feedID", feedIDHex) + defer v.Unlock() + return v.val, nil + } else if v.fetching { + // CACHE WAIT + promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE WAIT (cold path)", "feedID", feedIDHex) + // if someone else is fetching then wait for the fetch to complete + ch := v.fetchCh + v.Unlock() + return v.waitForResult(ctx, ch, m.chStop) + } + // CACHE MISS + promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc() + m.lggr.Tracew("LatestReport CACHE MISS (cold path)", "feedID", feedIDHex) + // initiate the fetch and wait for result + ch := v.initiateFetch() + v.Unlock() + + ok := m.IfStarted(func() { + m.wg.Add(1) + go m.fetch(req, v) + }) + if !ok { + err := fmt.Errorf("memCache must be started, but is: %v", m.State()) + v.abandonFetch(err) + return nil, err + } + return v.waitForResult(ctx, ch, m.chStop) +} + +const minBackoffRetryInterval = 50 * time.Millisecond + +// newBackoff creates a backoff for retrying +func (m *memCache) newBackoff() backoff.Backoff { + min := minBackoffRetryInterval + max := m.cfg.LatestReportTTL / 2 + if min > max { + // avoid setting a min that is greater than max + min = max + } + return backoff.Backoff{ + Min: min, + Max: max, + Factor: 2, + Jitter: true, + } +} + +// fetch continually tries to call FetchLatestReport and write the result to v +// it writes errors as they come up +func (m *memCache) fetch(req *pb.LatestReportRequest, v *cacheVal) { + defer m.wg.Done() + b := m.newBackoff() + memcacheCtx, cancel := m.chStop.NewCtx() + defer cancel() + var t time.Time + var val *pb.LatestReportResponse + var err error + defer func() { + v.completeFetch(val, err, t.Add(m.cfg.LatestReportTTL)) + }() + + for { + t = time.Now() + + ctx := memcacheCtx + cancel := func() {} + if m.cfg.LatestReportDeadline > 0 { + ctx, cancel = context.WithTimeoutCause(memcacheCtx, m.cfg.LatestReportDeadline, errors.New("latest report fetch deadline exceeded")) + } + + // NOTE: must drop down to RawClient here otherwise we enter an + // infinite loop of calling a client that calls back to this same cache + // and on and on + val, err = m.client.RawClient().LatestReport(ctx, req) + cancel() + v.setError(err) + if memcacheCtx.Err() != nil { + // stopped + return + } else if err != nil { + m.lggr.Warnw("FetchLatestReport failed", "err", err) + promFetchFailedCount.WithLabelValues(m.client.ServerURL(), mercuryutils.BytesToFeedID(req.FeedId).String()).Inc() + select { + case <-m.chStop: + return + case <-time.After(b.Duration()): + continue + } + } + return + } +} + +func (m *memCache) Start(context.Context) error { + return m.StartOnce(m.Name(), func() error { + m.lggr.Debugw("MemCache starting", "config", m.cfg, "serverURL", m.client.ServerURL()) + m.wg.Add(1) + go m.runloop() + return nil + }) +} + +func (m *memCache) runloop() { + defer m.wg.Done() + + if m.cfg.MaxStaleAge == 0 { + return + } + t := time.NewTicker(utils.WithJitter(m.cfg.MaxStaleAge)) + + for { + select { + case <-t.C: + m.cleanup() + t.Reset(utils.WithJitter(m.cfg.MaxStaleAge)) + case <-m.chStop: + return + } + } +} + +// remove anything that has been stale for longer than maxStaleAge so that +// cache doesn't grow forever and cause memory leaks +// +// NOTE: This should be concurrent-safe with LatestReport. The only time they +// can race is if the cache item has expired past the stale age between +// creation of the cache item and start of fetch. This is unlikely, and even if +// it does occur, the worst case is that we discard a cache item early and +// double fetch, which isn't bad at all. +func (m *memCache) cleanup() { + m.cache.Range(func(k, vi any) bool { + v := vi.(*cacheVal) + v.RLock() + defer v.RUnlock() + if v.fetching { + // skip cleanup if fetching + return true + } + if time.Now().After(v.expiresAt.Add(m.cfg.MaxStaleAge)) { + // garbage collection + m.cache.Delete(k) + } + return true + }) +} + +func (m *memCache) Close() error { + return m.StopOnce(m.Name(), func() error { + close(m.chStop) + m.wg.Wait() + return nil + }) +} +func (m *memCache) HealthReport() map[string]error { + return map[string]error{ + m.Name(): m.Ready(), + } +} +func (m *memCache) Name() string { return m.lggr.Name() } diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go new file mode 100644 index 00000000..408cf8c4 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go @@ -0,0 +1,118 @@ +package cache + +import ( + "context" + "fmt" + "sync" + + "golang.org/x/exp/maps" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// CacheSet holds a set of mercury caches keyed by server URL +type CacheSet interface { + services.Service + Get(ctx context.Context, client Client) (Fetcher, error) +} + +var _ CacheSet = (*cacheSet)(nil) + +type cacheSet struct { + sync.RWMutex + services.StateMachine + + lggr logger.Logger + caches map[string]Cache + + cfg Config +} + +func NewCacheSet(lggr logger.Logger, cfg Config) CacheSet { + return newCacheSet(lggr, cfg) +} + +func newCacheSet(lggr logger.Logger, cfg Config) *cacheSet { + return &cacheSet{ + sync.RWMutex{}, + services.StateMachine{}, + lggr.Named("CacheSet"), + make(map[string]Cache), + cfg, + } +} + +func (cs *cacheSet) Start(context.Context) error { + return cs.StartOnce("CacheSet", func() error { + cs.lggr.Debugw("CacheSet starting", "config", cs.cfg, "cachingEnabled", cs.cfg.LatestReportTTL > 0) + return nil + }) +} + +func (cs *cacheSet) Close() error { + return cs.StopOnce("CacheSet", func() error { + cs.Lock() + defer cs.Unlock() + caches := maps.Values(cs.caches) + if err := services.MultiCloser(caches).Close(); err != nil { + return err + } + cs.caches = nil + return nil + }) +} + +func (cs *cacheSet) Get(ctx context.Context, client Client) (f Fetcher, err error) { + if cs.cfg.LatestReportTTL == 0 { + // caching disabled + return nil, nil + } + ok := cs.IfStarted(func() { + f, err = cs.get(ctx, client) + }) + if !ok { + return nil, fmt.Errorf("cacheSet must be started, but is: %v", cs.State()) + } + return +} + +func (cs *cacheSet) get(ctx context.Context, client Client) (Fetcher, error) { + sURL := client.ServerURL() + // HOT PATH + cs.RLock() + c, exists := cs.caches[sURL] + cs.RUnlock() + if exists { + return c, nil + } + + // COLD PATH + cs.Lock() + defer cs.Unlock() + c, exists = cs.caches[sURL] + if exists { + return c, nil + } + c = newMemCache(cs.lggr, client, cs.cfg) + if err := c.Start(ctx); err != nil { + return nil, err + } + cs.caches[sURL] = c + return c, nil +} + +func (cs *cacheSet) HealthReport() map[string]error { + report := map[string]error{ + cs.Name(): cs.Ready(), + } + cs.RLock() + caches := maps.Values(cs.caches) + cs.RUnlock() + for _, c := range caches { + services.CopyHealth(report, c.HealthReport()) + } + return report +} +func (cs *cacheSet) Name() string { return cs.lggr.Name() } diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go new file mode 100644 index 00000000..9806eba9 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go @@ -0,0 +1,57 @@ +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func Test_CacheSet(t *testing.T) { + lggr := logger.TestLogger(t) + cs := newCacheSet(lggr, Config{LatestReportTTL: 1}) + disabledCs := newCacheSet(lggr, Config{LatestReportTTL: 0}) + ctx := testutils.Context(t) + servicetest.Run(t, cs) + + t.Run("Get", func(t *testing.T) { + c := &mockClient{} + + var err error + var f Fetcher + t.Run("with caching disabled, returns nil, nil", func(t *testing.T) { + assert.Len(t, disabledCs.caches, 0) + + f, err = disabledCs.Get(ctx, c) + require.NoError(t, err) + + assert.Nil(t, f) + assert.Len(t, disabledCs.caches, 0) + }) + + t.Run("with virgin cacheset, makes new entry and returns it", func(t *testing.T) { + assert.Len(t, cs.caches, 0) + + f, err = cs.Get(ctx, c) + require.NoError(t, err) + + assert.IsType(t, f, &memCache{}) + assert.Len(t, cs.caches, 1) + }) + t.Run("with existing cache for value, returns that", func(t *testing.T) { + var f2 Fetcher + assert.Len(t, cs.caches, 1) + + f2, err = cs.Get(ctx, c) + require.NoError(t, err) + + assert.IsType(t, f, &memCache{}) + assert.Equal(t, f, f2) + assert.Len(t, cs.caches, 1) + }) + }) +} diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go new file mode 100644 index 00000000..335e9158 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go @@ -0,0 +1,201 @@ +package cache + +import ( + "context" + "errors" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + mercuryutils "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +const neverExpireTTL = 1000 * time.Hour // some massive value that will never expire during a test + +func Test_Cache(t *testing.T) { + lggr := logger.TestLogger(t) + client := &mockClient{} + cfg := Config{} + ctx := testutils.Context(t) + + req1 := &pb.LatestReportRequest{FeedId: []byte{1}} + req2 := &pb.LatestReportRequest{FeedId: []byte{2}} + req3 := &pb.LatestReportRequest{FeedId: []byte{3}} + + feedID1Hex := mercuryutils.BytesToFeedID(req1.FeedId).String() + + t.Run("errors with nil req", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + + _, err := c.LatestReport(ctx, nil) + assert.EqualError(t, err, "req must not be nil") + }) + + t.Run("with LatestReportTTL=0 does no caching", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + + req := &pb.LatestReportRequest{} + for i := 0; i < 5; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + + resp, err := c.LatestReport(ctx, req) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + } + + client.resp = nil + client.err = errors.New("something exploded") + + resp, err := c.LatestReport(ctx, req) + assert.EqualError(t, err, "something exploded") + assert.Nil(t, resp) + }) + + t.Run("caches repeated calls to LatestReport, keyed by request", func(t *testing.T) { + cfg.LatestReportTTL = neverExpireTTL + client.err = nil + c := newMemCache(lggr, client, cfg) + + t.Run("if cache is unstarted, returns error", func(t *testing.T) { + // starting the cache is required for state management if we + // actually cache results, since fetches are initiated async and + // need to be cleaned up properly on close + _, err := c.LatestReport(ctx, &pb.LatestReportRequest{}) + assert.EqualError(t, err, "memCache must be started, but is: Unstarted") + }) + + err := c.StartOnce("test start", func() error { return nil }) + require.NoError(t, err) + + t.Run("returns cached value for key", func(t *testing.T) { + var firstResp *pb.LatestReportResponse + for i := 0; i < 5; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp == nil { + firstResp = client.resp + } + + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, firstResp, resp) + } + }) + + t.Run("cache keys do not conflict", func(t *testing.T) { + var firstResp1 *pb.LatestReportResponse + for i := 5; i < 10; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp1 == nil { + firstResp1 = client.resp + } + + resp, err := c.LatestReport(ctx, req2) + require.NoError(t, err) + assert.Equal(t, firstResp1, resp) + } + + var firstResp2 *pb.LatestReportResponse + for i := 10; i < 15; i++ { + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}} + if firstResp2 == nil { + firstResp2 = client.resp + } + + resp, err := c.LatestReport(ctx, req3) + require.NoError(t, err) + assert.Equal(t, firstResp2, resp) + } + + // req1 key still has same value + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, []byte(strconv.Itoa(0)), resp.Report.Price) + + // req2 key still has same value + resp, err = c.LatestReport(ctx, req2) + require.NoError(t, err) + assert.Equal(t, []byte(strconv.Itoa(5)), resp.Report.Price) + }) + + t.Run("re-queries when a cache item has expired", func(t *testing.T) { + vi, exists := c.cache.Load(feedID1Hex) + require.True(t, exists) + v := vi.(*cacheVal) + v.expiresAt = time.Now().Add(-1 * time.Second) + + client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(15))}} + + resp, err := c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + + // querying again yields the same cached item + resp, err = c.LatestReport(ctx, req1) + require.NoError(t, err) + assert.Equal(t, client.resp, resp) + }) + }) + + t.Run("complete fetch", func(t *testing.T) { + t.Run("does not change expiry if fetch returns error", func(t *testing.T) { + expires := time.Now().Add(-1 * time.Second) + v := &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: expires, + } + v.completeFetch(nil, errors.New("foo"), time.Now().Add(neverExpireTTL)) + assert.Equal(t, expires, v.expiresAt) + + v = &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: expires, + } + expires = time.Now().Add(neverExpireTTL) + v.completeFetch(nil, nil, expires) + assert.Equal(t, expires, v.expiresAt) + }) + }) + + t.Run("timeouts", func(t *testing.T) { + c := newMemCache(lggr, client, cfg) + // simulate fetch already executing in background + v := &cacheVal{ + fetching: true, + fetchCh: make(chan (struct{})), + val: nil, + err: nil, + expiresAt: time.Now().Add(-1 * time.Second), + } + c.cache.Store(feedID1Hex, v) + + canceledCtx, cancel := context.WithCancel(testutils.Context(t)) + cancel() + + t.Run("returns context deadline exceeded error if fetch takes too long", func(t *testing.T) { + _, err := c.LatestReport(canceledCtx, req1) + require.Error(t, err) + assert.True(t, errors.Is(err, context.Canceled)) + assert.EqualError(t, err, "context canceled") + }) + t.Run("returns wrapped context deadline exceeded error if fetch has errored and is in the retry loop", func(t *testing.T) { + v.err = errors.New("some background fetch error") + + _, err := c.LatestReport(canceledCtx, req1) + require.Error(t, err) + assert.True(t, errors.Is(err, context.Canceled)) + assert.EqualError(t, err, "some background fetch error\ncontext canceled") + }) + }) +} diff --git a/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go b/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go new file mode 100644 index 00000000..6127c2aa --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go @@ -0,0 +1,38 @@ +package cache + +import ( + "context" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +var _ Client = &mockClient{} + +type mockClient struct { + resp *pb.LatestReportResponse + err error +} + +func (m *mockClient) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + return m.resp, m.err +} + +func (m *mockClient) ServerURL() string { + return "mock client url" +} + +func (m *mockClient) RawClient() pb.MercuryClient { + return &mockRawClient{m.resp, m.err} +} + +type mockRawClient struct { + resp *pb.LatestReportResponse + err error +} + +func (m *mockRawClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + return nil, nil +} +func (m *mockRawClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + return m.resp, m.err +} diff --git a/core/services/relay/evm/mercury/wsrpc/client.go b/core/services/relay/evm/mercury/wsrpc/client.go new file mode 100644 index 00000000..ba52f194 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/client.go @@ -0,0 +1,336 @@ +package wsrpc + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/goplugin/wsrpc" + "github.com/goplugin/wsrpc/connectivity" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// MaxConsecutiveRequestFailures controls how many consecutive requests are +// allowed to time out before we reset the connection +const MaxConsecutiveRequestFailures = 10 + +var ( + timeoutCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_transmit_timeout_count", + Help: "Running count of transmit timeouts", + }, + []string{"serverURL"}, + ) + dialCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_count", + Help: "Running count of dials to mercury server", + }, + []string{"serverURL"}, + ) + dialSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_success_count", + Help: "Running count of successful dials to mercury server", + }, + []string{"serverURL"}, + ) + dialErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_dial_error_count", + Help: "Running count of errored dials to mercury server", + }, + []string{"serverURL"}, + ) + connectionResetCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "mercury_connection_reset_count", + Help: fmt.Sprintf("Running count of times connection to mercury server has been reset (connection reset happens automatically after %d consecutive request failures)", MaxConsecutiveRequestFailures), + }, + []string{"serverURL"}, + ) +) + +type Client interface { + services.Service + pb.MercuryClient + ServerURL() string + RawClient() pb.MercuryClient +} + +type Conn interface { + WaitForReady(ctx context.Context) bool + GetState() connectivity.State + Close() +} + +type client struct { + services.StateMachine + + csaKey csakey.KeyV2 + serverPubKey []byte + serverURL string + + logger logger.Logger + conn Conn + rawClient pb.MercuryClient + + consecutiveTimeoutCnt atomic.Int32 + wg sync.WaitGroup + chStop services.StopChan + chResetTransport chan struct{} + + cacheSet cache.CacheSet + cache cache.Fetcher + + timeoutCountMetric prometheus.Counter + dialCountMetric prometheus.Counter + dialSuccessCountMetric prometheus.Counter + dialErrorCountMetric prometheus.Counter + connectionResetCountMetric prometheus.Counter +} + +// Consumers of wsrpc package should not usually call NewClient directly, but instead use the Pool +func NewClient(lggr logger.Logger, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string, cacheSet cache.CacheSet) Client { + return newClient(lggr, clientPrivKey, serverPubKey, serverURL, cacheSet) +} + +func newClient(lggr logger.Logger, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string, cacheSet cache.CacheSet) *client { + return &client{ + csaKey: clientPrivKey, + serverPubKey: serverPubKey, + serverURL: serverURL, + logger: lggr.Named("WSRPC").With("mercuryServerURL", serverURL), + chResetTransport: make(chan struct{}, 1), + cacheSet: cacheSet, + chStop: make(services.StopChan), + timeoutCountMetric: timeoutCount.WithLabelValues(serverURL), + dialCountMetric: dialCount.WithLabelValues(serverURL), + dialSuccessCountMetric: dialSuccessCount.WithLabelValues(serverURL), + dialErrorCountMetric: dialErrorCount.WithLabelValues(serverURL), + connectionResetCountMetric: connectionResetCount.WithLabelValues(serverURL), + } +} + +func (w *client) Start(ctx context.Context) error { + return w.StartOnce("WSRPC Client", func() (err error) { + // NOTE: This is not a mistake, dial is non-blocking so it should use a + // background context, not the Start context + if err = w.dial(context.Background()); err != nil { + return err + } + w.cache, err = w.cacheSet.Get(ctx, w) + if err != nil { + return err + } + w.wg.Add(1) + go w.runloop() + return nil + }) +} + +// NOTE: Dial is non-blocking, and will retry on an exponential backoff +// in the background until close is called, or context is cancelled. +// This is why we use the background context, not the start context here. +// +// Any transmits made while client is still trying to dial will fail +// with error. +func (w *client) dial(ctx context.Context, opts ...wsrpc.DialOption) error { + w.dialCountMetric.Inc() + conn, err := wsrpc.DialWithContext(ctx, w.serverURL, + append(opts, + wsrpc.WithTransportCreds(w.csaKey.Raw().Bytes(), w.serverPubKey), + wsrpc.WithLogger(w.logger), + )..., + ) + if err != nil { + w.dialErrorCountMetric.Inc() + setLivenessMetric(false) + return errors.Wrap(err, "failed to dial wsrpc client") + } + w.dialSuccessCountMetric.Inc() + setLivenessMetric(true) + w.conn = conn + w.rawClient = pb.NewMercuryClient(conn) + return nil +} + +func (w *client) runloop() { + defer w.wg.Done() + for { + select { + case <-w.chStop: + return + case <-w.chResetTransport: + // Using channel here ensures we only have one reset in process at + // any given time + w.resetTransport() + } + } +} + +// resetTransport disconnects and reconnects to the mercury server +func (w *client) resetTransport() { + w.connectionResetCountMetric.Inc() + ok := w.IfStarted(func() { + w.conn.Close() // Close is safe to call multiple times + }) + if !ok { + panic("resetTransport should never be called unless client is in 'started' state") + } + ctx, cancel := w.chStop.Ctx(context.Background()) + defer cancel() + b := utils.NewRedialBackoff() + for { + // Will block until successful dial, or context is canceled (i.e. on close) + err := w.dial(ctx, wsrpc.WithBlock()) + if err == nil { + break + } + if ctx.Err() != nil { + w.logger.Debugw("ResetTransport exiting due to client Close", "err", err) + return + } + w.logger.Errorw("ResetTransport failed to redial", "err", err) + time.Sleep(b.Duration()) + } + w.logger.Info("ResetTransport successfully redialled") +} + +func (w *client) Close() error { + return w.StopOnce("WSRPC Client", func() error { + close(w.chStop) + w.conn.Close() + w.wg.Wait() + return nil + }) +} + +func (w *client) Name() string { + return "EVM.Mercury.WSRPCClient" +} + +func (w *client) HealthReport() map[string]error { + return map[string]error{w.Name(): w.Healthy()} +} + +// Healthy if connected +func (w *client) Healthy() (err error) { + if err = w.StateMachine.Healthy(); err != nil { + return err + } + state := w.conn.GetState() + if state != connectivity.Ready { + return errors.Errorf("client state should be %s; got %s", connectivity.Ready, state) + } + return nil +} + +func (w *client) waitForReady(ctx context.Context) (err error) { + ok := w.IfStarted(func() { + if ready := w.conn.WaitForReady(ctx); !ready { + err = errors.Errorf("websocket client not ready; got state: %v", w.conn.GetState()) + return + } + }) + if !ok { + return errors.New("client is not started") + } + return +} + +func (w *client) Transmit(ctx context.Context, req *pb.TransmitRequest) (resp *pb.TransmitResponse, err error) { + w.logger.Trace("Transmit") + start := time.Now() + if err = w.waitForReady(ctx); err != nil { + return nil, errors.Wrap(err, "Transmit call failed") + } + resp, err = w.rawClient.Transmit(ctx, req) + w.handleTimeout(err) + if err != nil { + w.logger.Warnw("Transmit call failed due to networking error", "err", err, "resp", resp) + incRequestStatusMetric(statusFailed) + } else { + w.logger.Tracew("Transmit call succeeded", "resp", resp) + incRequestStatusMetric(statusSuccess) + setRequestLatencyMetric(float64(time.Since(start).Milliseconds())) + } + return +} + +func (w *client) handleTimeout(err error) { + if errors.Is(err, context.DeadlineExceeded) { + w.timeoutCountMetric.Inc() + cnt := w.consecutiveTimeoutCnt.Add(1) + if cnt == MaxConsecutiveRequestFailures { + w.logger.Errorf("Timed out on %d consecutive transmits, resetting transport", cnt) + // NOTE: If we get at least MaxConsecutiveRequestFailures request + // timeouts in a row, close and re-open the websocket connection. + // + // This *shouldn't* be necessary in theory (ideally, wsrpc would + // handle it for us) but it acts as a "belts and braces" approach + // to ensure we get a websocket connection back up and running + // again if it gets itself into a bad state. + select { + case w.chResetTransport <- struct{}{}: + default: + // This can happen if we had MaxConsecutiveRequestFailures + // consecutive timeouts, already sent a reset signal, then the + // connection started working again (resetting the count) then + // we got MaxConsecutiveRequestFailures additional failures + // before the runloop was able to close the bad connection. + // + // It should be safe to just ignore in this case. + // + // Debug log in case my reasoning is wrong. + w.logger.Debugf("Transport is resetting, cnt=%d", cnt) + } + } + } else { + w.consecutiveTimeoutCnt.Store(0) + } +} + +func (w *client) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + lggr := w.logger.With("req.FeedId", hexutil.Encode(req.FeedId)) + lggr.Trace("LatestReport") + if err = w.waitForReady(ctx); err != nil { + return nil, errors.Wrap(err, "LatestReport failed") + } + var cached bool + if w.cache == nil { + resp, err = w.rawClient.LatestReport(ctx, req) + w.handleTimeout(err) + } else { + cached = true + resp, err = w.cache.LatestReport(ctx, req) + } + if err != nil { + lggr.Errorw("LatestReport failed", "err", err, "resp", resp, "cached", cached) + } else if resp.Error != "" { + lggr.Errorw("LatestReport failed; mercury server returned error", "err", resp.Error, "resp", resp, "cached", cached) + } else if !cached { + lggr.Debugw("LatestReport succeeded", "resp", resp, "cached", cached) + } else { + lggr.Tracew("LatestReport succeeded", "resp", resp, "cached", cached) + } + return +} + +func (w *client) ServerURL() string { + return w.serverURL +} + +func (w *client) RawClient() pb.MercuryClient { + return w.rawClient +} diff --git a/core/services/relay/evm/mercury/wsrpc/client_test.go b/core/services/relay/evm/mercury/wsrpc/client_test.go new file mode 100644 index 00000000..ee300a28 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/client_test.go @@ -0,0 +1,178 @@ +package wsrpc + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +// simulate start without dialling +func simulateStart(ctx context.Context, t *testing.T, c *client) { + require.NoError(t, c.StartOnce("Mock WSRPC Client", func() (err error) { + c.cache, err = c.cacheSet.Get(ctx, c) + return err + })) +} + +var _ cache.CacheSet = &mockCacheSet{} + +type mockCacheSet struct{} + +func (m *mockCacheSet) Get(ctx context.Context, client cache.Client) (cache.Fetcher, error) { + return nil, nil +} +func (m *mockCacheSet) Start(context.Context) error { return nil } +func (m *mockCacheSet) Ready() error { return nil } +func (m *mockCacheSet) HealthReport() map[string]error { return nil } +func (m *mockCacheSet) Name() string { return "" } +func (m *mockCacheSet) Close() error { return nil } + +var _ cache.Cache = &mockCache{} + +type mockCache struct{} + +func (m *mockCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) { + return nil, nil +} +func (m *mockCache) Start(context.Context) error { return nil } +func (m *mockCache) Ready() error { return nil } +func (m *mockCache) HealthReport() map[string]error { return nil } +func (m *mockCache) Name() string { return "" } +func (m *mockCache) Close() error { return nil } + +func newNoopCacheSet() cache.CacheSet { + return &mockCacheSet{} +} + +func Test_Client_Transmit(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + req := &pb.TransmitRequest{} + + noopCacheSet := newNoopCacheSet() + + t.Run("sends on reset channel after MaxConsecutiveRequestFailures timed out transmits", func(t *testing.T) { + calls := 0 + transmitErr := context.DeadlineExceeded + wsrpcClient := &mocks.MockWSRPCClient{ + TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + calls++ + return nil, transmitErr + }, + } + conn := &mocks.MockConn{ + Ready: true, + } + c := newClient(lggr, csakey.KeyV2{}, nil, "", noopCacheSet) + c.conn = conn + c.rawClient = wsrpcClient + require.NoError(t, c.StartOnce("Mock WSRPC Client", func() error { return nil })) + for i := 1; i < MaxConsecutiveRequestFailures; i++ { + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + } + assert.Equal(t, MaxConsecutiveRequestFailures-1, calls) + select { + case <-c.chResetTransport: + t.Fatal("unexpected send on chResetTransport") + default: + } + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + assert.Equal(t, MaxConsecutiveRequestFailures, calls) + select { + case <-c.chResetTransport: + default: + t.Fatal("expected send on chResetTransport") + } + + t.Run("successful transmit resets the counter", func(t *testing.T) { + transmitErr = nil + // working transmit to reset counter + _, err = c.Transmit(ctx, req) + require.NoError(t, err) + assert.Equal(t, MaxConsecutiveRequestFailures+1, calls) + assert.Equal(t, 0, int(c.consecutiveTimeoutCnt.Load())) + }) + + t.Run("doesn't block in case channel is full", func(t *testing.T) { + transmitErr = context.DeadlineExceeded + c.chResetTransport = nil // simulate full channel + for i := 0; i < MaxConsecutiveRequestFailures; i++ { + _, err := c.Transmit(ctx, req) + require.EqualError(t, err, "context deadline exceeded") + } + }) + }) +} + +func Test_Client_LatestReport(t *testing.T) { + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + cacheReads := 5 + + tests := []struct { + name string + ttl time.Duration + expectedCalls int + }{ + { + name: "with cache disabled", + ttl: 0, + expectedCalls: 5, + }, + { + name: "with cache enabled", + ttl: 1000 * time.Hour, //some large value that will never expire during a test + expectedCalls: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &pb.LatestReportRequest{} + + cacheSet := cache.NewCacheSet(lggr, cache.Config{LatestReportTTL: tt.ttl}) + + resp := &pb.LatestReportResponse{} + + var calls int + wsrpcClient := &mocks.MockWSRPCClient{ + LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + calls++ + assert.Equal(t, req, in) + return resp, nil + }, + } + + conn := &mocks.MockConn{ + Ready: true, + } + c := newClient(lggr, csakey.KeyV2{}, nil, "", cacheSet) + c.conn = conn + c.rawClient = wsrpcClient + + servicetest.Run(t, cacheSet) + simulateStart(ctx, t, c) + + for i := 0; i < cacheReads; i++ { + r, err := c.LatestReport(ctx, req) + + require.NoError(t, err) + assert.Equal(t, resp, r) + } + assert.Equal(t, tt.expectedCalls, calls, "expected %d calls to LatestReport but it was called %d times", tt.expectedCalls, calls) + }) + } +} diff --git a/core/services/relay/evm/mercury/wsrpc/metrics.go b/core/services/relay/evm/mercury/wsrpc/metrics.go new file mode 100644 index 00000000..8c12184c --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/metrics.go @@ -0,0 +1,49 @@ +package wsrpc + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type reqStatus string + +const ( + statusSuccess reqStatus = "success" + statusFailed reqStatus = "failed" +) + +var ( + aliveMetric = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "mercury", + Name: "wsrpc_connection_alive", + Help: "Total time spent connected to the Mercury WSRPC server", + }) + requestsStatusMetric = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "mercury", + Name: "wsrpc_requests_status_count", + Help: "Number of request status made to the Mercury WSRPC server", + }, []string{"status"}) + + requestLatencyMetric = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "mercury", + Name: "wsrpc_request_latency", + Help: "Latency of requests made to the Mercury WSRPC server", + Buckets: []float64{10, 30, 100, 200, 250, 300, 350, 400, 500, 750, 1000, 3000, 10000}, + }) +) + +func setLivenessMetric(live bool) { + if live { + aliveMetric.Set(1) + } else { + aliveMetric.Set(0) + } +} + +func incRequestStatusMetric(status reqStatus) { + requestsStatusMetric.WithLabelValues(string(status)).Inc() +} + +func setRequestLatencyMetric(latency float64) { + requestLatencyMetric.Observe(latency) +} diff --git a/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go b/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go new file mode 100644 index 00000000..11c08a8d --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go @@ -0,0 +1,43 @@ +package mocks + +import ( + "context" + + "github.com/goplugin/wsrpc/connectivity" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +type MockWSRPCClient struct { + TransmitF func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) + LatestReportF func(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) +} + +func (m MockWSRPCClient) Name() string { return "" } +func (m MockWSRPCClient) Start(context.Context) error { return nil } +func (m MockWSRPCClient) Close() error { return nil } +func (m MockWSRPCClient) HealthReport() map[string]error { return map[string]error{} } +func (m MockWSRPCClient) Ready() error { return nil } +func (m MockWSRPCClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) { + return m.TransmitF(ctx, in) +} +func (m MockWSRPCClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) { + return m.LatestReportF(ctx, in) +} +func (m MockWSRPCClient) ServerURL() string { return "mock server url" } + +func (m MockWSRPCClient) RawClient() pb.MercuryClient { return nil } + +type MockConn struct { + State connectivity.State + Ready bool + Closed bool +} + +func (m *MockConn) Close() { + m.Closed = true +} +func (m MockConn) WaitForReady(ctx context.Context) bool { + return m.Ready +} +func (m MockConn) GetState() connectivity.State { return m.State } diff --git a/core/services/relay/evm/mercury/wsrpc/pb/generate.go b/core/services/relay/evm/mercury/wsrpc/pb/generate.go new file mode 100644 index 00000000..2bb95012 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pb/generate.go @@ -0,0 +1,2 @@ +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-wsrpc_out=. --go-wsrpc_opt=paths=source_relative mercury.proto +package pb diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go new file mode 100644 index 00000000..ce4125bd --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go @@ -0,0 +1,657 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: mercury.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransmitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + ReportFormat string `protobuf:"bytes,2,opt,name=reportFormat,proto3" json:"reportFormat,omitempty"` +} + +func (x *TransmitRequest) Reset() { + *x = TransmitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransmitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransmitRequest) ProtoMessage() {} + +func (x *TransmitRequest) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransmitRequest.ProtoReflect.Descriptor instead. +func (*TransmitRequest) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{0} +} + +func (x *TransmitRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *TransmitRequest) GetReportFormat() string { + if x != nil { + return x.ReportFormat + } + return "" +} + +type TransmitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *TransmitResponse) Reset() { + *x = TransmitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransmitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransmitResponse) ProtoMessage() {} + +func (x *TransmitResponse) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransmitResponse.ProtoReflect.Descriptor instead. +func (*TransmitResponse) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{1} +} + +func (x *TransmitResponse) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *TransmitResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type LatestReportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"` +} + +func (x *LatestReportRequest) Reset() { + *x = LatestReportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LatestReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LatestReportRequest) ProtoMessage() {} + +func (x *LatestReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LatestReportRequest.ProtoReflect.Descriptor instead. +func (*LatestReportRequest) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{2} +} + +func (x *LatestReportRequest) GetFeedId() []byte { + if x != nil { + return x.FeedId + } + return nil +} + +type LatestReportResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Report *Report `protobuf:"bytes,2,opt,name=report,proto3" json:"report,omitempty"` +} + +func (x *LatestReportResponse) Reset() { + *x = LatestReportResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LatestReportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LatestReportResponse) ProtoMessage() {} + +func (x *LatestReportResponse) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LatestReportResponse.ProtoReflect.Descriptor instead. +func (*LatestReportResponse) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{3} +} + +func (x *LatestReportResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *LatestReportResponse) GetReport() *Report { + if x != nil { + return x.Report + } + return nil +} + +type Report struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"` + Price []byte `protobuf:"bytes,2,opt,name=price,proto3" json:"price,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + ValidFromBlockNumber int64 `protobuf:"varint,4,opt,name=validFromBlockNumber,proto3" json:"validFromBlockNumber,omitempty"` + CurrentBlockNumber int64 `protobuf:"varint,5,opt,name=currentBlockNumber,proto3" json:"currentBlockNumber,omitempty"` + CurrentBlockHash []byte `protobuf:"bytes,6,opt,name=currentBlockHash,proto3" json:"currentBlockHash,omitempty"` + CurrentBlockTimestamp uint64 `protobuf:"varint,7,opt,name=currentBlockTimestamp,proto3" json:"currentBlockTimestamp,omitempty"` + ObservationsTimestamp int64 `protobuf:"varint,8,opt,name=observationsTimestamp,proto3" json:"observationsTimestamp,omitempty"` + ConfigDigest []byte `protobuf:"bytes,9,opt,name=configDigest,proto3" json:"configDigest,omitempty"` + Epoch uint32 `protobuf:"varint,10,opt,name=epoch,proto3" json:"epoch,omitempty"` + Round uint32 `protobuf:"varint,11,opt,name=round,proto3" json:"round,omitempty"` + OperatorName string `protobuf:"bytes,12,opt,name=operatorName,proto3" json:"operatorName,omitempty"` + TransmittingOperator []byte `protobuf:"bytes,13,opt,name=transmittingOperator,proto3" json:"transmittingOperator,omitempty"` + CreatedAt *Timestamp `protobuf:"bytes,14,opt,name=createdAt,proto3" json:"createdAt,omitempty"` +} + +func (x *Report) Reset() { + *x = Report{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Report) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Report) ProtoMessage() {} + +func (x *Report) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Report.ProtoReflect.Descriptor instead. +func (*Report) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{4} +} + +func (x *Report) GetFeedId() []byte { + if x != nil { + return x.FeedId + } + return nil +} + +func (x *Report) GetPrice() []byte { + if x != nil { + return x.Price + } + return nil +} + +func (x *Report) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Report) GetValidFromBlockNumber() int64 { + if x != nil { + return x.ValidFromBlockNumber + } + return 0 +} + +func (x *Report) GetCurrentBlockNumber() int64 { + if x != nil { + return x.CurrentBlockNumber + } + return 0 +} + +func (x *Report) GetCurrentBlockHash() []byte { + if x != nil { + return x.CurrentBlockHash + } + return nil +} + +func (x *Report) GetCurrentBlockTimestamp() uint64 { + if x != nil { + return x.CurrentBlockTimestamp + } + return 0 +} + +func (x *Report) GetObservationsTimestamp() int64 { + if x != nil { + return x.ObservationsTimestamp + } + return 0 +} + +func (x *Report) GetConfigDigest() []byte { + if x != nil { + return x.ConfigDigest + } + return nil +} + +func (x *Report) GetEpoch() uint32 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *Report) GetRound() uint32 { + if x != nil { + return x.Round + } + return 0 +} + +func (x *Report) GetOperatorName() string { + if x != nil { + return x.OperatorName + } + return "" +} + +func (x *Report) GetTransmittingOperator() []byte { + if x != nil { + return x.TransmittingOperator + } + return nil +} + +func (x *Report) GetCreatedAt() *Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_mercury_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_mercury_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_mercury_proto_rawDescGZIP(), []int{5} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_mercury_proto protoreflect.FileDescriptor + +var file_mercury_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x02, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x22, 0x3c, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x2d, 0x0a, 0x13, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x65, 0x65, + 0x64, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x65, 0x65, 0x64, 0x49, + 0x64, 0x22, 0x50, 0x0a, 0x14, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x22, 0x0a, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x06, 0x72, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x04, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, + 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x32, 0x0a, 0x14, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x12, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x34, 0x0a, 0x15, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x15, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d, + 0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x70, 0x62, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, + 0x61, 0x6e, 0x6f, 0x73, 0x32, 0x83, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, + 0x12, 0x35, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x12, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x4c, 0x61, 0x74, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, + 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x65, 0x76, 0x6d, 0x2f, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, + 0x79, 0x2f, 0x77, 0x73, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_mercury_proto_rawDescOnce sync.Once + file_mercury_proto_rawDescData = file_mercury_proto_rawDesc +) + +func file_mercury_proto_rawDescGZIP() []byte { + file_mercury_proto_rawDescOnce.Do(func() { + file_mercury_proto_rawDescData = protoimpl.X.CompressGZIP(file_mercury_proto_rawDescData) + }) + return file_mercury_proto_rawDescData +} + +var file_mercury_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_mercury_proto_goTypes = []interface{}{ + (*TransmitRequest)(nil), // 0: pb.TransmitRequest + (*TransmitResponse)(nil), // 1: pb.TransmitResponse + (*LatestReportRequest)(nil), // 2: pb.LatestReportRequest + (*LatestReportResponse)(nil), // 3: pb.LatestReportResponse + (*Report)(nil), // 4: pb.Report + (*Timestamp)(nil), // 5: pb.Timestamp +} +var file_mercury_proto_depIdxs = []int32{ + 4, // 0: pb.LatestReportResponse.report:type_name -> pb.Report + 5, // 1: pb.Report.createdAt:type_name -> pb.Timestamp + 0, // 2: pb.Mercury.Transmit:input_type -> pb.TransmitRequest + 2, // 3: pb.Mercury.LatestReport:input_type -> pb.LatestReportRequest + 1, // 4: pb.Mercury.Transmit:output_type -> pb.TransmitResponse + 3, // 5: pb.Mercury.LatestReport:output_type -> pb.LatestReportResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_mercury_proto_init() } +func file_mercury_proto_init() { + if File_mercury_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_mercury_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransmitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mercury_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransmitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mercury_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LatestReportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mercury_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LatestReportResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mercury_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Report); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mercury_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_mercury_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_mercury_proto_goTypes, + DependencyIndexes: file_mercury_proto_depIdxs, + MessageInfos: file_mercury_proto_msgTypes, + }.Build() + File_mercury_proto = out.File + file_mercury_proto_rawDesc = nil + file_mercury_proto_goTypes = nil + file_mercury_proto_depIdxs = nil +} diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto b/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto new file mode 100644 index 00000000..4e616d10 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/services/relay/evm/mercury/wsrpc/pb"; + +package pb; + +service Mercury { + rpc Transmit(TransmitRequest) returns (TransmitResponse); + rpc LatestReport(LatestReportRequest) returns (LatestReportResponse); +} + +message TransmitRequest { + bytes payload = 1; + string reportFormat = 2; +} + +message TransmitResponse { + int32 code = 1; + string error = 2; +} + +message LatestReportRequest { + bytes feedId = 1; +} + +message LatestReportResponse { + string error = 1; + Report report = 2; +} + +message Report { + bytes feedId = 1; + bytes price = 2; + bytes payload = 3; + int64 validFromBlockNumber = 4; + int64 currentBlockNumber = 5; + bytes currentBlockHash = 6; + uint64 currentBlockTimestamp = 7; + int64 observationsTimestamp = 8; + bytes configDigest = 9; + uint32 epoch = 10; + uint32 round = 11; + string operatorName = 12; + bytes transmittingOperator = 13; + Timestamp createdAt = 14; +} + +// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go new file mode 100644 index 00000000..bb4345e8 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go @@ -0,0 +1,88 @@ +// Code generated by protoc-gen-go-wsrpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-wsrpc v0.0.1 +// - protoc v4.25.1 + +package pb + +import ( + context "context" + wsrpc "github.com/goplugin/wsrpc" +) + +// MercuryClient is the client API for Mercury service. +// +type MercuryClient interface { + Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) + LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) +} + +type mercuryClient struct { + cc wsrpc.ClientInterface +} + +func NewMercuryClient(cc wsrpc.ClientInterface) MercuryClient { + return &mercuryClient{cc} +} + +func (c *mercuryClient) Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) { + out := new(TransmitResponse) + err := c.cc.Invoke(ctx, "Transmit", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mercuryClient) LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) { + out := new(LatestReportResponse) + err := c.cc.Invoke(ctx, "LatestReport", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +// MercuryServer is the server API for Mercury service. +type MercuryServer interface { + Transmit(context.Context, *TransmitRequest) (*TransmitResponse, error) + LatestReport(context.Context, *LatestReportRequest) (*LatestReportResponse, error) +} + +func RegisterMercuryServer(s wsrpc.ServiceRegistrar, srv MercuryServer) { + s.RegisterService(&Mercury_ServiceDesc, srv) +} + +func _Mercury_Transmit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(TransmitRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(MercuryServer).Transmit(ctx, in) +} + +func _Mercury_LatestReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(LatestReportRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(MercuryServer).LatestReport(ctx, in) +} + +// Mercury_ServiceDesc is the wsrpc.ServiceDesc for Mercury service. +// It's only intended for direct use with wsrpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Mercury_ServiceDesc = wsrpc.ServiceDesc{ + ServiceName: "pb.Mercury", + HandlerType: (*MercuryServer)(nil), + Methods: []wsrpc.MethodDesc{ + { + MethodName: "Transmit", + Handler: _Mercury_Transmit_Handler, + }, + { + MethodName: "LatestReport", + Handler: _Mercury_LatestReport_Handler, + }, + }, +} diff --git a/core/services/relay/evm/mercury/wsrpc/pool.go b/core/services/relay/evm/mercury/wsrpc/pool.go new file mode 100644 index 00000000..d45b264c --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pool.go @@ -0,0 +1,231 @@ +package wsrpc + +import ( + "context" + "errors" + "sync" + + "github.com/goplugin/wsrpc/credentials" + "golang.org/x/exp/maps" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var _ Client = &clientCheckout{} + +type clientCheckout struct { + *connection // inherit all methods from client, with override on Start/Close +} + +func (cco *clientCheckout) Start(_ context.Context) error { + return nil +} + +func (cco *clientCheckout) Close() error { + cco.connection.checkin(cco) + return nil +} + +type connection struct { + // Client will be nil when checkouts is empty, if len(checkouts) > 0 then it is expected to be a non-nil, started client + Client + + lggr logger.Logger + clientPrivKey csakey.KeyV2 + serverPubKey []byte + serverURL string + + pool *pool + + checkouts []*clientCheckout // reference count, if this goes to zero the connection should be closed and *client nilified + + mu sync.Mutex +} + +func (conn *connection) checkout(ctx context.Context) (cco *clientCheckout, err error) { + conn.mu.Lock() + defer conn.mu.Unlock() + if err = conn.ensureStartedClient(ctx); err != nil { + return nil, err + } + cco = &clientCheckout{conn} + conn.checkouts = append(conn.checkouts, cco) + return cco, nil +} + +// not thread-safe, access must be serialized +func (conn *connection) ensureStartedClient(ctx context.Context) error { + if len(conn.checkouts) == 0 { + conn.Client = conn.pool.newClient(conn.lggr, conn.clientPrivKey, conn.serverPubKey, conn.serverURL, conn.pool.cacheSet) + return conn.Client.Start(ctx) + } + return nil +} + +func (conn *connection) checkin(checkinCco *clientCheckout) { + conn.mu.Lock() + defer conn.mu.Unlock() + var removed bool + for i, cco := range conn.checkouts { + if cco == checkinCco { + conn.checkouts = utils.DeleteUnstable(conn.checkouts, i) + removed = true + break + } + } + if !removed { + panic("tried to check in client that was never checked out") + } + if len(conn.checkouts) == 0 { + if err := conn.Client.Close(); err != nil { + // programming error if we hit this + panic(err) + } + conn.Client = nil + conn.pool.remove(conn.serverURL, conn.clientPrivKey.StaticSizedPublicKey()) + } +} + +func (conn *connection) forceCloseAll() (err error) { + conn.mu.Lock() + defer conn.mu.Unlock() + if conn.Client != nil { + err = conn.Client.Close() + if errors.Is(err, utils.ErrAlreadyStopped) { + // ignore error if it has already been stopped; no problem + err = nil + } + conn.Client = nil + conn.checkouts = nil + } + return +} + +type Pool interface { + services.ServiceCtx + // Checkout gets a wsrpc.Client for the given arguments + // The same underlying client can be checked out multiple times, the pool + // handles lifecycle management. The consumer can treat it as if it were + // its own unique client. + Checkout(ctx context.Context, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) (client Client, err error) +} + +// WSRPC allows only one connection per client key per server +type pool struct { + lggr logger.Logger + // server url => client public key => connection + connections map[string]map[credentials.StaticSizedPublicKey]*connection + + // embedding newClient makes testing/mocking easier + newClient func(lggr logger.Logger, privKey csakey.KeyV2, serverPubKey []byte, serverURL string, cacheSet cache.CacheSet) Client + + mu sync.RWMutex + + cacheSet cache.CacheSet + + closed bool +} + +func NewPool(lggr logger.Logger, cacheCfg cache.Config) Pool { + lggr = lggr.Named("Mercury.WSRPCPool") + p := newPool(lggr) + p.newClient = NewClient + p.cacheSet = cache.NewCacheSet(lggr, cacheCfg) + return p +} + +func newPool(lggr logger.Logger) *pool { + return &pool{ + lggr: lggr, + connections: make(map[string]map[credentials.StaticSizedPublicKey]*connection), + } +} + +func (p *pool) Checkout(ctx context.Context, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) (client Client, err error) { + clientPubKey := clientPrivKey.StaticSizedPublicKey() + + p.mu.Lock() + + if p.closed { + p.mu.Unlock() + return nil, errors.New("pool is closed") + } + + server, exists := p.connections[serverURL] + if !exists { + server = make(map[credentials.StaticSizedPublicKey]*connection) + p.connections[serverURL] = server + } + conn, exists := server[clientPubKey] + if !exists { + conn = p.newConnection(p.lggr, clientPrivKey, serverPubKey, serverURL) + server[clientPubKey] = conn + } + p.mu.Unlock() + + // checkout outside of pool lock since it might take non-trivial time + // the clientCheckout will be checked in again when its Close() method is called + // this also should avoid deadlocks between conn.mu and pool.mu + return conn.checkout(ctx) +} + +// remove performs garbage collection on the connections map after connections are no longer used +func (p *pool) remove(serverURL string, clientPubKey credentials.StaticSizedPublicKey) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.connections[serverURL], clientPubKey) + if len(p.connections[serverURL]) == 0 { + delete(p.connections, serverURL) + } + +} + +func (p *pool) newConnection(lggr logger.Logger, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) *connection { + return &connection{ + lggr: lggr, + clientPrivKey: clientPrivKey, + serverPubKey: serverPubKey, + serverURL: serverURL, + pool: p, + } +} + +func (p *pool) Start(ctx context.Context) error { + return p.cacheSet.Start(ctx) +} + +func (p *pool) Close() (merr error) { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + for _, clientPubKeys := range p.connections { + for _, conn := range clientPubKeys { + merr = errors.Join(merr, conn.forceCloseAll()) + } + } + merr = errors.Join(merr, p.cacheSet.Close()) + return +} + +func (p *pool) Name() string { + return p.lggr.Name() +} + +func (p *pool) Ready() error { + p.mu.RLock() + defer p.mu.RUnlock() + if p.closed { + return errors.New("pool is closed") + } + return nil +} + +func (p *pool) HealthReport() map[string]error { + hp := map[string]error{p.Name(): p.Ready()} + maps.Copy(hp, p.cacheSet.HealthReport()) + return hp +} diff --git a/core/services/relay/evm/mercury/wsrpc/pool_test.go b/core/services/relay/evm/mercury/wsrpc/pool_test.go new file mode 100644 index 00000000..9deb79b2 --- /dev/null +++ b/core/services/relay/evm/mercury/wsrpc/pool_test.go @@ -0,0 +1,266 @@ +package wsrpc + +import ( + "context" + "math/big" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/cache" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury/wsrpc/pb" +) + +var _ Client = &mockClient{} + +type mockClient struct { + started bool + closed bool + rawClient pb.MercuryClient +} + +func (c *mockClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) { + return +} +func (c *mockClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) { + return +} +func (c *mockClient) Start(context.Context) error { + c.started = true + return nil +} +func (c *mockClient) Close() error { + c.closed = true + return nil +} +func (c *mockClient) Name() string { return "mock client" } +func (c *mockClient) Ready() error { return nil } +func (c *mockClient) HealthReport() map[string]error { return nil } +func (c *mockClient) ServerURL() string { return "mock client url" } +func (c *mockClient) RawClient() pb.MercuryClient { return c.rawClient } + +func newMockClient(lggr logger.Logger) *mockClient { + return &mockClient{} +} + +func Test_Pool(t *testing.T) { + lggr := logger.TestLogger(t).Named("PoolTestLogger") + + ctx := testutils.Context(t) + + t.Run("Checkout", func(t *testing.T) { + p := newPool(lggr) + p.cacheSet = &mockCacheSet{} + + t.Run("checks out one started client", func(t *testing.T) { + clientPrivKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())) + serverPubKey := utils.NewHash().Bytes() + serverURL := "example.com:443/ws" + + client := newMockClient(lggr) + p.newClient = func(lggr logger.Logger, cprivk csakey.KeyV2, spubk []byte, surl string, cs cache.CacheSet) Client { + assert.Equal(t, clientPrivKey, cprivk) + assert.Equal(t, serverPubKey, spubk) + assert.Equal(t, serverURL, surl) + return client + } + + c, err := p.Checkout(ctx, clientPrivKey, serverPubKey, serverURL) + require.NoError(t, err) + + assert.True(t, client.started) + + require.IsType(t, &clientCheckout{}, c) + + conn := c.(*clientCheckout).connection + require.Equal(t, conn.Client, client) + + assert.Len(t, conn.checkouts, 1) + assert.Same(t, lggr, conn.lggr) + assert.Equal(t, clientPrivKey, conn.clientPrivKey) + assert.Equal(t, serverPubKey, conn.serverPubKey) + assert.Equal(t, serverURL, conn.serverURL) + assert.Same(t, p, conn.pool) + + t.Run("checks in the clientCheckout when Close is called", func(t *testing.T) { + err := c.Close() + require.NoError(t, err) + + assert.Len(t, conn.checkouts, 0) + require.IsType(t, nil, conn.Client) + assert.Nil(t, conn.Client) + assert.True(t, client.closed) + }) + }) + + t.Run("checks out multiple started clients and only closes if all of the clients for a given pk/server pair are checked back in", func(t *testing.T) { + clientPrivKeys := []csakey.KeyV2{ + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + } + serverPubKey := utils.NewHash().Bytes() + serverURLs := []string{ + "example.com:443/ws", + "example.invalid:8000/ws", + } + + p.newClient = func(lggr logger.Logger, cprivk csakey.KeyV2, spubk []byte, surl string, cs cache.CacheSet) Client { + return newMockClient(lggr) + } + + // conn 1 + c1 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + c2 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + c3 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + assert.Len(t, p.connections, 1) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Len(t, p.connections[serverURLs[1]], 0) + + // conn 2 + c4 := mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0]) + assert.Len(t, p.connections, 1) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 0) + + // conn 3 + c5 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + c6 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + conn1 := c1.(*clientCheckout).connection + assert.Same(t, conn1, c2.(*clientCheckout).connection) + assert.Same(t, conn1, c3.(*clientCheckout).connection) + assert.Len(t, conn1.checkouts, 3) + assert.True(t, conn1.Client.(*mockClient).started) + + conn2 := c4.(*clientCheckout).connection + assert.NotEqual(t, conn1, conn2) + assert.Len(t, conn2.checkouts, 1) + assert.True(t, conn2.Client.(*mockClient).started) + + conn3 := c5.(*clientCheckout).connection + assert.NotEqual(t, conn1, conn3) + assert.NotEqual(t, conn2, conn3) + assert.Same(t, conn3, c6.(*clientCheckout).connection) + assert.Len(t, conn3.checkouts, 2) + assert.True(t, conn3.Client.(*mockClient).started) + + require.NoError(t, c1.Close()) + assert.Len(t, conn1.checkouts, 2) + assert.NotNil(t, conn1.Client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c2.Close()) + assert.Len(t, conn1.checkouts, 1) + assert.NotNil(t, conn1.Client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c3.Close()) + assert.Len(t, conn1.checkouts, 0) + assert.Nil(t, conn1.Client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Len(t, p.connections[serverURLs[1]], 1) + + c7 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + // Not the same one, since previously all checkouts were checked in, the original connection was deleted from the map and a new one created + assert.NotSame(t, conn1, c7.(*clientCheckout).connection) + assert.Len(t, conn1.checkouts, 0) // actually, conn1 has already been removed from the map and will be garbage collected + conn4 := c7.(*clientCheckout).connection + assert.Len(t, conn4.checkouts, 1) + assert.NotNil(t, conn4.Client) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 2) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c7.Close()) + assert.Len(t, p.connections, 2) + assert.Len(t, p.connections[serverURLs[0]], 1) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c4.Close()) + assert.Len(t, p.connections, 1) + assert.Len(t, p.connections[serverURLs[0]], 0) + assert.Len(t, p.connections[serverURLs[1]], 1) + + require.NoError(t, c5.Close()) + require.NoError(t, c6.Close()) + assert.Len(t, p.connections, 0) + + require.NoError(t, p.Close()) + }) + }) + + p := newPool(lggr) + p.cacheSet = &mockCacheSet{} + + t.Run("Name", func(t *testing.T) { + assert.Equal(t, "PoolTestLogger", p.Name()) + }) + t.Run("Start", func(t *testing.T) { + require.NoError(t, p.Start(ctx)) + assert.Nil(t, p.Ready()) + assert.Nil(t, p.HealthReport()["PoolTestLogger"]) + }) + t.Run("Close force closes all connections", func(t *testing.T) { + clientPrivKeys := []csakey.KeyV2{ + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())), + } + serverPubKey := utils.NewHash().Bytes() + serverURLs := []string{ + "example.com:443/ws", + "example.invalid:8000/ws", + } + + var clients []*mockClient + p.newClient = func(lggr logger.Logger, cprivk csakey.KeyV2, spubk []byte, surl string, cs cache.CacheSet) Client { + c := newMockClient(lggr) + clients = append(clients, c) + return c + } + + // conn 1 + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0]) + + // conn 2 + mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0]) + + // conn 3 + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1]) + + for _, c := range clients { + assert.True(t, c.started) + assert.False(t, c.closed) + } + + require.NoError(t, p.Close()) + assert.EqualError(t, p.Ready(), "pool is closed") + assert.EqualError(t, p.HealthReport()["PoolTestLogger"], "pool is closed") + + for _, c := range clients { + assert.True(t, c.closed) + } + }) +} + +func mustCheckout(t *testing.T, p *pool, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) Client { + c, err := p.Checkout(testutils.Context(t), clientPrivKey, serverPubKey, serverURL) + require.NoError(t, err) + return c +} diff --git a/core/services/relay/evm/mercury_config_provider.go b/core/services/relay/evm/mercury_config_provider.go new file mode 100644 index 00000000..9c26b975 --- /dev/null +++ b/core/services/relay/evm/mercury_config_provider.go @@ -0,0 +1,44 @@ +package evm + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +func newMercuryConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (commontypes.ConfigProvider, error) { + if !common.IsHexAddress(opts.ContractID) { + return nil, errors.New("invalid contractID, expected hex address") + } + + aggregatorAddress := common.HexToAddress(opts.ContractID) + + relayConfig, err := opts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + if relayConfig.FeedID == nil { + return nil, errors.New("feed ID is required for tracking config on mercury contracts") + } + cp, err := mercury.NewConfigPoller( + lggr.Named(relayConfig.FeedID.String()), + chain.LogPoller(), + aggregatorAddress, + *relayConfig.FeedID, + // TODO: Does mercury need to support config contract? DF-19182 + ) + if err != nil { + return nil, err + } + + offchainConfigDigester := mercury.NewOffchainConfigDigester(*relayConfig.FeedID, chain.Config().EVM().ChainID(), aggregatorAddress) + return newConfigWatcher(lggr, aggregatorAddress, offchainConfigDigester, cp, chain, relayConfig.FromBlock, opts.New), nil +} diff --git a/core/services/relay/evm/mercury_provider.go b/core/services/relay/evm/mercury_provider.go new file mode 100644 index 00000000..34a1eefa --- /dev/null +++ b/core/services/relay/evm/mercury_provider.go @@ -0,0 +1,162 @@ +package evm + +import ( + "context" + "errors" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + mercurytypes "github.com/goplugin/plugin-common/pkg/types/mercury" + v1 "github.com/goplugin/plugin-common/pkg/types/mercury/v1" + v2 "github.com/goplugin/plugin-common/pkg/types/mercury/v2" + v3 "github.com/goplugin/plugin-common/pkg/types/mercury/v3" + "github.com/goplugin/plugin-data-streams/mercury" + + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" + evmmercury "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mercury" +) + +var _ commontypes.MercuryProvider = (*mercuryProvider)(nil) + +type mercuryProvider struct { + cp commontypes.ConfigProvider + chainReader commontypes.ChainReader + codec commontypes.Codec + transmitter evmmercury.Transmitter + reportCodecV1 v1.ReportCodec + reportCodecV2 v2.ReportCodec + reportCodecV3 v3.ReportCodec + mercuryChainReader mercurytypes.ChainReader + logger logger.Logger + ms services.MultiStart +} + +func NewMercuryProvider( + cp commontypes.ConfigProvider, + chainReader commontypes.ChainReader, + codec commontypes.Codec, + mercuryChainReader mercurytypes.ChainReader, + transmitter evmmercury.Transmitter, + reportCodecV1 v1.ReportCodec, + reportCodecV2 v2.ReportCodec, + reportCodecV3 v3.ReportCodec, + lggr logger.Logger, +) *mercuryProvider { + return &mercuryProvider{ + cp, + chainReader, + codec, + transmitter, + reportCodecV1, + reportCodecV2, + reportCodecV3, + mercuryChainReader, + lggr, + services.MultiStart{}, + } +} + +func (p *mercuryProvider) Start(ctx context.Context) error { + return p.ms.Start(ctx, p.cp, p.transmitter) +} + +func (p *mercuryProvider) Close() error { + return p.ms.Close() +} + +func (p *mercuryProvider) Ready() error { + return errors.Join(p.cp.Ready(), p.transmitter.Ready()) +} + +func (p *mercuryProvider) Name() string { + return p.logger.Name() +} + +func (p *mercuryProvider) HealthReport() map[string]error { + report := map[string]error{} + services.CopyHealth(report, p.cp.HealthReport()) + services.CopyHealth(report, p.transmitter.HealthReport()) + return report +} + +func (p *mercuryProvider) MercuryChainReader() mercurytypes.ChainReader { + return p.mercuryChainReader +} + +func (p *mercuryProvider) Codec() commontypes.Codec { + return p.codec +} + +func (p *mercuryProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.cp.ContractConfigTracker() +} + +func (p *mercuryProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.cp.OffchainConfigDigester() +} + +func (p *mercuryProvider) OnchainConfigCodec() mercurytypes.OnchainConfigCodec { + return mercury.StandardOnchainConfigCodec{} +} + +func (p *mercuryProvider) ReportCodecV1() v1.ReportCodec { + return p.reportCodecV1 +} + +func (p *mercuryProvider) ReportCodecV2() v2.ReportCodec { + return p.reportCodecV2 +} + +func (p *mercuryProvider) ReportCodecV3() v3.ReportCodec { + return p.reportCodecV3 +} + +func (p *mercuryProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return p.transmitter +} + +func (p *mercuryProvider) MercuryServerFetcher() mercurytypes.ServerFetcher { + return p.transmitter +} + +func (p *mercuryProvider) ChainReader() commontypes.ChainReader { + return p.chainReader +} + +var _ mercurytypes.ChainReader = (*mercuryChainReader)(nil) + +type mercuryChainReader struct { + tracker httypes.HeadTracker +} + +func NewChainReader(h httypes.HeadTracker) mercurytypes.ChainReader { + return &mercuryChainReader{h} +} + +func NewMercuryChainReader(h httypes.HeadTracker) mercurytypes.ChainReader { + return &mercuryChainReader{ + tracker: h, + } +} + +func (r *mercuryChainReader) LatestHeads(ctx context.Context, k int) ([]mercurytypes.Head, error) { + evmBlocks := r.tracker.LatestChain().AsSlice(k) + if len(evmBlocks) == 0 { + return nil, nil + } + + blocks := make([]mercurytypes.Head, len(evmBlocks)) + for x := 0; x < len(evmBlocks); x++ { + blocks[x] = mercurytypes.Head{ + Number: uint64(evmBlocks[x].BlockNumber()), + Hash: evmBlocks[x].Hash.Bytes(), + Timestamp: uint64(evmBlocks[x].Timestamp.Unix()), + } + } + + return blocks, nil +} diff --git a/core/services/relay/evm/method_binding.go b/core/services/relay/evm/method_binding.go new file mode 100644 index 00000000..0ca1546d --- /dev/null +++ b/core/services/relay/evm/method_binding.go @@ -0,0 +1,66 @@ +package evm + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" +) + +type methodBinding struct { + address common.Address + contractName string + method string + client evmclient.Client + codec commontypes.Codec + bound bool +} + +var _ readBinding = &methodBinding{} + +func (m *methodBinding) SetCodec(codec commontypes.RemoteCodec) { + m.codec = codec +} + +func (m *methodBinding) Register() error { + return nil +} + +func (m *methodBinding) Unregister() error { + return nil +} + +func (m *methodBinding) GetLatestValue(ctx context.Context, params, returnValue any) error { + if !m.bound { + return fmt.Errorf("%w: method not bound", commontypes.ErrInvalidType) + } + + data, err := m.codec.Encode(ctx, params, wrapItemType(m.contractName, m.method, true)) + if err != nil { + return err + } + + callMsg := ethereum.CallMsg{ + To: &m.address, + From: m.address, + Data: data, + } + + bytes, err := m.client.CallContract(ctx, callMsg, nil) + if err != nil { + return fmt.Errorf("%w: %w", commontypes.ErrInternal, err) + } + + return m.codec.Decode(ctx, bytes, returnValue, wrapItemType(m.contractName, m.method, false)) +} + +func (m *methodBinding) Bind(binding commontypes.BoundContract) error { + m.address = common.HexToAddress(binding.Address) + m.bound = true + return nil +} diff --git a/core/services/relay/evm/mocks/loop_relay_adapter.go b/core/services/relay/evm/mocks/loop_relay_adapter.go new file mode 100644 index 00000000..0a0e81bb --- /dev/null +++ b/core/services/relay/evm/mocks/loop_relay_adapter.go @@ -0,0 +1,325 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + legacyevm "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + + mock "github.com/stretchr/testify/mock" + + types "github.com/goplugin/plugin-common/pkg/types" +) + +// LoopRelayAdapter is an autogenerated mock type for the LoopRelayAdapter type +type LoopRelayAdapter struct { + mock.Mock +} + +// Chain provides a mock function with given fields: +func (_m *LoopRelayAdapter) Chain() legacyevm.Chain { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Chain") + } + + var r0 legacyevm.Chain + if rf, ok := ret.Get(0).(func() legacyevm.Chain); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(legacyevm.Chain) + } + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *LoopRelayAdapter) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetChainStatus provides a mock function with given fields: ctx +func (_m *LoopRelayAdapter) GetChainStatus(ctx context.Context) (types.ChainStatus, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetChainStatus") + } + + var r0 types.ChainStatus + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (types.ChainStatus, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) types.ChainStatus); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(types.ChainStatus) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HealthReport provides a mock function with given fields: +func (_m *LoopRelayAdapter) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// ListNodeStatuses provides a mock function with given fields: ctx, pageSize, pageToken +func (_m *LoopRelayAdapter) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) ([]types.NodeStatus, string, int, error) { + ret := _m.Called(ctx, pageSize, pageToken) + + if len(ret) == 0 { + panic("no return value specified for ListNodeStatuses") + } + + var r0 []types.NodeStatus + var r1 string + var r2 int + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, int32, string) ([]types.NodeStatus, string, int, error)); ok { + return rf(ctx, pageSize, pageToken) + } + if rf, ok := ret.Get(0).(func(context.Context, int32, string) []types.NodeStatus); ok { + r0 = rf(ctx, pageSize, pageToken) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.NodeStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int32, string) string); ok { + r1 = rf(ctx, pageSize, pageToken) + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func(context.Context, int32, string) int); ok { + r2 = rf(ctx, pageSize, pageToken) + } else { + r2 = ret.Get(2).(int) + } + + if rf, ok := ret.Get(3).(func(context.Context, int32, string) error); ok { + r3 = rf(ctx, pageSize, pageToken) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// Name provides a mock function with given fields: +func (_m *LoopRelayAdapter) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewConfigProvider provides a mock function with given fields: _a0, _a1 +func (_m *LoopRelayAdapter) NewConfigProvider(_a0 context.Context, _a1 types.RelayArgs) (types.ConfigProvider, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for NewConfigProvider") + } + + var r0 types.ConfigProvider + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs) (types.ConfigProvider, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs) types.ConfigProvider); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.ConfigProvider) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.RelayArgs) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLLOProvider provides a mock function with given fields: _a0, _a1, _a2 +func (_m *LoopRelayAdapter) NewLLOProvider(_a0 context.Context, _a1 types.RelayArgs, _a2 types.PluginArgs) (types.LLOProvider, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for NewLLOProvider") + } + + var r0 types.LLOProvider + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) (types.LLOProvider, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) types.LLOProvider); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.LLOProvider) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.RelayArgs, types.PluginArgs) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewPluginProvider provides a mock function with given fields: _a0, _a1, _a2 +func (_m *LoopRelayAdapter) NewPluginProvider(_a0 context.Context, _a1 types.RelayArgs, _a2 types.PluginArgs) (types.PluginProvider, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for NewPluginProvider") + } + + var r0 types.PluginProvider + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) (types.PluginProvider, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, types.RelayArgs, types.PluginArgs) types.PluginProvider); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.PluginProvider) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.RelayArgs, types.PluginArgs) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Ready provides a mock function with given fields: +func (_m *LoopRelayAdapter) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *LoopRelayAdapter) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Transact provides a mock function with given fields: ctx, from, to, amount, balanceCheck +func (_m *LoopRelayAdapter) Transact(ctx context.Context, from string, to string, amount *big.Int, balanceCheck bool) error { + ret := _m.Called(ctx, from, to, amount, balanceCheck) + + if len(ret) == 0 { + panic("no return value specified for Transact") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, *big.Int, bool) error); ok { + r0 = rf(ctx, from, to, amount, balanceCheck) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewLoopRelayAdapter creates a new instance of LoopRelayAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLoopRelayAdapter(t interface { + mock.TestingT + Cleanup(func()) +}) *LoopRelayAdapter { + mock := &LoopRelayAdapter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/relay/evm/mocks/request_round_db.go b/core/services/relay/evm/mocks/request_round_db.go new file mode 100644 index 00000000..1670930d --- /dev/null +++ b/core/services/relay/evm/mocks/request_round_db.go @@ -0,0 +1,74 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + ocr2aggregator "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + mock "github.com/stretchr/testify/mock" +) + +// RequestRoundDB is an autogenerated mock type for the RequestRoundDB type +type RequestRoundDB struct { + mock.Mock +} + +// LoadLatestRoundRequested provides a mock function with given fields: +func (_m *RequestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2AggregatorRoundRequested, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LoadLatestRoundRequested") + } + + var r0 ocr2aggregator.OCR2AggregatorRoundRequested + var r1 error + if rf, ok := ret.Get(0).(func() (ocr2aggregator.OCR2AggregatorRoundRequested, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ocr2aggregator.OCR2AggregatorRoundRequested); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(ocr2aggregator.OCR2AggregatorRoundRequested) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveLatestRoundRequested provides a mock function with given fields: tx, rr +func (_m *RequestRoundDB) SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { + ret := _m.Called(tx, rr) + + if len(ret) == 0 { + panic("no return value specified for SaveLatestRoundRequested") + } + + var r0 error + if rf, ok := ret.Get(0).(func(pg.Queryer, ocr2aggregator.OCR2AggregatorRoundRequested) error); ok { + r0 = rf(tx, rr) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRequestRoundDB creates a new instance of RequestRoundDB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRequestRoundDB(t interface { + mock.TestingT + Cleanup(func()) +}) *RequestRoundDB { + mock := &RequestRoundDB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/relay/evm/ocr2aggregator_decoder.go b/core/services/relay/evm/ocr2aggregator_decoder.go new file mode 100644 index 00000000..8df73b03 --- /dev/null +++ b/core/services/relay/evm/ocr2aggregator_decoder.go @@ -0,0 +1,65 @@ +package evm + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" +) + +var _ LogDecoder = &ocr2AggregatorLogDecoder{} + +type ocr2AggregatorLogDecoder struct { + eventName string + eventSig common.Hash + abi *abi.ABI +} + +func newOCR2AggregatorLogDecoder() (*ocr2AggregatorLogDecoder, error) { + const eventName = "ConfigSet" + abi, err := ocr2aggregator.OCR2AggregatorMetaData.GetAbi() + if err != nil { + return nil, err + } + return &ocr2AggregatorLogDecoder{ + eventName: eventName, + eventSig: abi.Events[eventName].ID, + abi: abi, + }, nil +} + +func (d *ocr2AggregatorLogDecoder) Decode(rawLog []byte) (ocrtypes.ContractConfig, error) { + unpacked := new(ocr2aggregator.OCR2AggregatorConfigSet) + err := d.abi.UnpackIntoInterface(unpacked, d.eventName, rawLog) + if err != nil { + return ocrtypes.ContractConfig{}, fmt.Errorf("failed to unpack log data: %w", err) + } + + var transmitAccounts []ocrtypes.Account + for _, addr := range unpacked.Transmitters { + transmitAccounts = append(transmitAccounts, ocrtypes.Account(addr.Hex())) + } + var signers []ocrtypes.OnchainPublicKey + for _, addr := range unpacked.Signers { + addr := addr + signers = append(signers, addr[:]) + } + + return ocrtypes.ContractConfig{ + ConfigDigest: unpacked.ConfigDigest, + ConfigCount: unpacked.ConfigCount, + Signers: signers, + Transmitters: transmitAccounts, + F: unpacked.F, + OnchainConfig: unpacked.OnchainConfig, + OffchainConfigVersion: unpacked.OffchainConfigVersion, + OffchainConfig: unpacked.OffchainConfig, + }, nil +} + +func (d *ocr2AggregatorLogDecoder) EventSig() common.Hash { + return d.eventSig +} diff --git a/core/services/relay/evm/ocr2keeper.go b/core/services/relay/evm/ocr2keeper.go new file mode 100644 index 00000000..fa070adb --- /dev/null +++ b/core/services/relay/evm/ocr2keeper.go @@ -0,0 +1,284 @@ +package evm + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + "github.com/goplugin/libocr/offchainreporting2plus/ocr3types" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-automation/pkg/v3/plugin" + commontypes "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/types/automation" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evm "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/transmit" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/upkeepstate" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +var ( + _ OCR2KeeperRelayer = (*ocr2keeperRelayer)(nil) + _ OCR2KeeperProvider = (*ocr2keeperProvider)(nil) + ErrInitializationFailure = fmt.Errorf("failed to initialize registry") +) + +// OCR2KeeperProviderOpts is the custom options to create a keeper provider +type OCR2KeeperProviderOpts struct { + RArgs commontypes.RelayArgs + PArgs commontypes.PluginArgs + InstanceID int +} + +// OCR2KeeperProvider provides all components needed for a OCR2Keeper plugin. +type OCR2KeeperProvider interface { + commontypes.Plugin + Registry() automation.Registry + Encoder() automation.Encoder + TransmitEventProvider() automation.EventProvider + BlockSubscriber() automation.BlockSubscriber + PayloadBuilder() automation.PayloadBuilder + UpkeepStateStore() automation.UpkeepStateStore + LogEventProvider() automation.LogEventProvider + LogRecoverer() automation.LogRecoverer + UpkeepProvider() automation.ConditionalUpkeepProvider +} + +// OCR2KeeperRelayer contains the relayer and instantiating functions for OCR2Keeper providers. +type OCR2KeeperRelayer interface { + NewOCR2KeeperProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2KeeperProvider, error) +} + +// ocr2keeperRelayer is the relayer with added DKG and OCR2Keeper provider functions. +type ocr2keeperRelayer struct { + db *sqlx.DB + chain legacyevm.Chain + lggr logger.Logger + ethKeystore keystore.Eth + dbCfg pg.QConfig +} + +// NewOCR2KeeperRelayer is the constructor of ocr2keeperRelayer +func NewOCR2KeeperRelayer(db *sqlx.DB, chain legacyevm.Chain, lggr logger.Logger, ethKeystore keystore.Eth, dbCfg pg.QConfig) OCR2KeeperRelayer { + return &ocr2keeperRelayer{ + db: db, + chain: chain, + lggr: lggr, + ethKeystore: ethKeystore, + dbCfg: dbCfg, + } +} + +func (r *ocr2keeperRelayer) NewOCR2KeeperProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2KeeperProvider, error) { + cfgWatcher, err := newOCR2KeeperConfigProvider(r.lggr, r.chain, rargs) + if err != nil { + return nil, err + } + + gasLimit := cfgWatcher.chain.Config().EVM().OCR2().Automation().GasLimit() + contractTransmitter, err := newOnChainContractTransmitter(r.lggr, rargs, pargs.TransmitterID, r.ethKeystore, cfgWatcher, configTransmitterOpts{pluginGasLimit: &gasLimit}, OCR2AggregatorTransmissionContractABI) + if err != nil { + return nil, err + } + + client := r.chain + + services := new(ocr2keeperProvider) + services.configWatcher = cfgWatcher + services.contractTransmitter = contractTransmitter + + addr := ethkey.MustEIP55Address(rargs.ContractID).Address() + + registryContract, err := iregistry21.NewIKeeperRegistryMaster(addr, client.Client()) + if err != nil { + return nil, fmt.Errorf("%w: failed to create caller for address and backend", ErrInitializationFailure) + } + // lookback blocks for transmit event is hard coded and should provide ample time for logs + // to be detected in most cases + var transmitLookbackBlocks int64 = 250 + transmitEventProvider, err := transmit.NewTransmitEventProvider(r.lggr, client.LogPoller(), addr, client.Client(), transmitLookbackBlocks) + if err != nil { + return nil, err + } + + services.transmitEventProvider = transmitEventProvider + + packer := encoding.NewAbiPacker() + services.encoder = encoding.NewReportEncoder(packer) + + finalityDepth := client.Config().EVM().FinalityDepth() + + orm := upkeepstate.NewORM(client.ID(), r.db, r.lggr, r.dbCfg) + scanner := upkeepstate.NewPerformedEventsScanner(r.lggr, client.LogPoller(), addr, finalityDepth) + services.upkeepStateStore = upkeepstate.NewUpkeepStateStore(orm, r.lggr, scanner) + + logProvider, logRecoverer := logprovider.New(r.lggr, client.LogPoller(), client.Client(), services.upkeepStateStore, finalityDepth) + services.logEventProvider = logProvider + services.logRecoverer = logRecoverer + blockSubscriber := evm.NewBlockSubscriber(client.HeadBroadcaster(), client.LogPoller(), finalityDepth, r.lggr) + services.blockSubscriber = blockSubscriber + + al := evm.NewActiveUpkeepList() + services.payloadBuilder = evm.NewPayloadBuilder(al, logRecoverer, r.lggr) + + services.registry = evm.NewEvmRegistry(r.lggr, addr, client, + registryContract, rargs.MercuryCredentials, al, logProvider, + packer, blockSubscriber, finalityDepth) + + services.conditionalUpkeepProvider = evm.NewUpkeepProvider(al, blockSubscriber, client.LogPoller()) + + return services, nil +} + +type ocr3keeperProviderContractTransmitter struct { + contractTransmitter ocrtypes.ContractTransmitter +} + +var _ ocr3types.ContractTransmitter[plugin.AutomationReportInfo] = &ocr3keeperProviderContractTransmitter{} + +func NewKeepersOCR3ContractTransmitter(ocr2ContractTransmitter ocrtypes.ContractTransmitter) *ocr3keeperProviderContractTransmitter { + return &ocr3keeperProviderContractTransmitter{ocr2ContractTransmitter} +} + +func (t *ocr3keeperProviderContractTransmitter) Transmit( + ctx context.Context, + digest ocrtypes.ConfigDigest, + seqNr uint64, + reportWithInfo ocr3types.ReportWithInfo[plugin.AutomationReportInfo], + aoss []ocrtypes.AttributedOnchainSignature, +) error { + return t.contractTransmitter.Transmit( + ctx, + ocrtypes.ReportContext{ + ReportTimestamp: ocrtypes.ReportTimestamp{ + ConfigDigest: digest, + Epoch: uint32(seqNr), + }, + }, + reportWithInfo.Report, + aoss, + ) +} + +func (t *ocr3keeperProviderContractTransmitter) FromAccount() (ocrtypes.Account, error) { + return t.contractTransmitter.FromAccount() +} + +type ocr2keeperProvider struct { + *configWatcher + contractTransmitter ContractTransmitter + registry automation.Registry + encoder automation.Encoder + transmitEventProvider automation.EventProvider + blockSubscriber automation.BlockSubscriber + payloadBuilder automation.PayloadBuilder + upkeepStateStore automation.UpkeepStateStore + logEventProvider automation.LogEventProvider + logRecoverer automation.LogRecoverer + conditionalUpkeepProvider automation.ConditionalUpkeepProvider +} + +func (c *ocr2keeperProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return c.contractTransmitter +} + +func (c *ocr2keeperProvider) ChainReader() commontypes.ChainReader { + return nil +} + +func (c *ocr2keeperProvider) Codec() commontypes.Codec { + return nil +} + +func newOCR2KeeperConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { + var relayConfig types.RelayConfig + err := json.Unmarshal(rargs.RelayConfig, &relayConfig) + if err != nil { + return nil, err + } + if !common.IsHexAddress(rargs.ContractID) { + return nil, fmt.Errorf("invalid contract address '%s'", rargs.ContractID) + } + + contractAddress := common.HexToAddress(rargs.ContractID) + + configPoller, err := NewConfigPoller( + lggr.With("contractID", rargs.ContractID), + CPConfig{ + chain.Client(), + chain.LogPoller(), + contractAddress, + // TODO: Does ocr2keeper need to support config contract? DF-19182 + nil, + OCR2AggregatorLogDecoder, + }, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to create config poller") + } + + offchainConfigDigester := evmutil.EVMOffchainConfigDigester{ + ChainID: chain.Config().EVM().ChainID().Uint64(), + ContractAddress: contractAddress, + } + + return newConfigWatcher( + lggr, + contractAddress, + offchainConfigDigester, + configPoller, + chain, + relayConfig.FromBlock, + rargs.New, + ), nil +} + +func (c *ocr2keeperProvider) Registry() automation.Registry { + return c.registry +} + +func (c *ocr2keeperProvider) Encoder() automation.Encoder { + return c.encoder +} + +func (c *ocr2keeperProvider) TransmitEventProvider() automation.EventProvider { + return c.transmitEventProvider +} + +func (c *ocr2keeperProvider) BlockSubscriber() automation.BlockSubscriber { + return c.blockSubscriber +} + +func (c *ocr2keeperProvider) PayloadBuilder() automation.PayloadBuilder { + return c.payloadBuilder +} + +func (c *ocr2keeperProvider) UpkeepStateStore() automation.UpkeepStateStore { + return c.upkeepStateStore +} + +func (c *ocr2keeperProvider) LogEventProvider() automation.LogEventProvider { + return c.logEventProvider +} + +func (c *ocr2keeperProvider) LogRecoverer() automation.LogRecoverer { + return c.logRecoverer +} + +func (c *ocr2keeperProvider) UpkeepProvider() automation.ConditionalUpkeepProvider { + return c.conditionalUpkeepProvider +} diff --git a/core/services/relay/evm/ocr2vrf.go b/core/services/relay/evm/ocr2vrf.go new file mode 100644 index 00000000..7d6362f6 --- /dev/null +++ b/core/services/relay/evm/ocr2vrf.go @@ -0,0 +1,174 @@ +package evm + +import ( + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/jmoiron/sqlx" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/dkg/config" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +// DKGProvider provides all components needed for a DKG plugin. +type DKGProvider interface { + commontypes.Plugin +} + +// OCR2VRFProvider provides all components needed for a OCR2VRF plugin. +type OCR2VRFProvider interface { + commontypes.Plugin +} + +// OCR2VRFRelayer contains the relayer and instantiating functions for OCR2VRF providers. +type OCR2VRFRelayer interface { + NewDKGProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (DKGProvider, error) + NewOCR2VRFProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2VRFProvider, error) +} + +var ( + _ OCR2VRFRelayer = (*ocr2vrfRelayer)(nil) + _ DKGProvider = (*dkgProvider)(nil) + _ OCR2VRFProvider = (*ocr2vrfProvider)(nil) +) + +// Relayer with added DKG and OCR2VRF provider functions. +type ocr2vrfRelayer struct { + db *sqlx.DB + chain legacyevm.Chain + lggr logger.Logger + ethKeystore keystore.Eth +} + +func NewOCR2VRFRelayer(db *sqlx.DB, chain legacyevm.Chain, lggr logger.Logger, ethKeystore keystore.Eth) OCR2VRFRelayer { + return &ocr2vrfRelayer{ + db: db, + chain: chain, + lggr: lggr, + ethKeystore: ethKeystore, + } +} + +func (r *ocr2vrfRelayer) NewDKGProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (DKGProvider, error) { + configWatcher, err := newOCR2VRFConfigProvider(r.lggr, r.chain, rargs) + if err != nil { + return nil, err + } + contractTransmitter, err := newOnChainContractTransmitter(r.lggr, rargs, pargs.TransmitterID, r.ethKeystore, configWatcher, configTransmitterOpts{}, OCR2AggregatorTransmissionContractABI) + if err != nil { + return nil, err + } + + var pluginConfig config.PluginConfig + err = json.Unmarshal(pargs.PluginConfig, &pluginConfig) + if err != nil { + return nil, err + } + + return &dkgProvider{ + configWatcher: configWatcher, + contractTransmitter: contractTransmitter, + pluginConfig: pluginConfig, + }, nil +} + +func (r *ocr2vrfRelayer) NewOCR2VRFProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (OCR2VRFProvider, error) { + configWatcher, err := newOCR2VRFConfigProvider(r.lggr, r.chain, rargs) + if err != nil { + return nil, err + } + contractTransmitter, err := newOnChainContractTransmitter(r.lggr, rargs, pargs.TransmitterID, r.ethKeystore, configWatcher, configTransmitterOpts{}, OCR2AggregatorTransmissionContractABI) + if err != nil { + return nil, err + } + return &ocr2vrfProvider{ + configWatcher: configWatcher, + contractTransmitter: contractTransmitter, + }, nil +} + +type dkgProvider struct { + *configWatcher + contractTransmitter ContractTransmitter + pluginConfig config.PluginConfig +} + +func (c *dkgProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return c.contractTransmitter +} + +func (c *dkgProvider) ChainReader() commontypes.ChainReader { + return nil +} + +func (c *dkgProvider) Codec() commontypes.Codec { + return nil +} + +type ocr2vrfProvider struct { + *configWatcher + contractTransmitter ContractTransmitter +} + +func (c *ocr2vrfProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return c.contractTransmitter +} + +func (c *ocr2vrfProvider) ChainReader() commontypes.ChainReader { + return nil +} + +func (c *ocr2vrfProvider) Codec() commontypes.Codec { + return nil +} + +func newOCR2VRFConfigProvider(lggr logger.Logger, chain legacyevm.Chain, rargs commontypes.RelayArgs) (*configWatcher, error) { + var relayConfig types.RelayConfig + err := json.Unmarshal(rargs.RelayConfig, &relayConfig) + if err != nil { + return nil, err + } + if !common.IsHexAddress(rargs.ContractID) { + return nil, fmt.Errorf("invalid contract address '%s'", rargs.ContractID) + } + + contractAddress := common.HexToAddress(rargs.ContractID) + configPoller, err := NewConfigPoller( + lggr.With("contractID", rargs.ContractID), + CPConfig{ + chain.Client(), + chain.LogPoller(), + contractAddress, + // TODO: Does ocr2vrf need to support config contract? DF-19182 + nil, + OCR2AggregatorLogDecoder, + }, + ) + if err != nil { + return nil, err + } + + offchainConfigDigester := evmutil.EVMOffchainConfigDigester{ + ChainID: chain.Config().EVM().ChainID().Uint64(), + ContractAddress: contractAddress, + } + + return newConfigWatcher( + lggr, + contractAddress, + offchainConfigDigester, + configPoller, + chain, + relayConfig.FromBlock, + rargs.New, + ), nil +} diff --git a/core/services/relay/evm/parsed_types.go b/core/services/relay/evm/parsed_types.go new file mode 100644 index 00000000..cb578654 --- /dev/null +++ b/core/services/relay/evm/parsed_types.go @@ -0,0 +1,50 @@ +package evm + +import ( + "fmt" + "reflect" + + "github.com/goplugin/plugin-common/pkg/codec" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +type parsedTypes struct { + encoderDefs map[string]types.CodecEntry + decoderDefs map[string]types.CodecEntry +} + +func (parsed *parsedTypes) toCodec() (commontypes.RemoteCodec, error) { + modByTypeName := map[string]codec.Modifier{} + if err := addEntries(parsed.encoderDefs, modByTypeName); err != nil { + return nil, err + } + if err := addEntries(parsed.decoderDefs, modByTypeName); err != nil { + return nil, err + } + + mod, err := codec.NewByItemTypeModifier(modByTypeName) + if err != nil { + return nil, err + } + underlying := &evmCodec{ + encoder: &encoder{Definitions: parsed.encoderDefs}, + decoder: &decoder{Definitions: parsed.decoderDefs}, + parsedTypes: parsed, + } + return codec.NewModifierCodec(underlying, mod, evmDecoderHooks...) +} + +// addEntries extracts the mods from codecEntry and adds them to modByTypeName use with codec.NewByItemTypeModifier +// Since each input/output can have its own modifications, we need to keep track of them by type name +func addEntries(defs map[string]types.CodecEntry, modByTypeName map[string]codec.Modifier) error { + for k, def := range defs { + modByTypeName[k] = def.Modifier() + _, err := def.Modifier().RetypeToOffChain(reflect.PointerTo(def.CheckedType()), k) + if err != nil { + return fmt.Errorf("%w: cannot retype %v: %w", commontypes.ErrInvalidConfig, k, err) + } + } + return nil +} diff --git a/core/services/relay/evm/plugin_provider.go b/core/services/relay/evm/plugin_provider.go new file mode 100644 index 00000000..26d2187a --- /dev/null +++ b/core/services/relay/evm/plugin_provider.go @@ -0,0 +1,79 @@ +package evm + +import ( + "context" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type pluginProvider struct { + services.Service + chainReader types.ChainReader + codec types.Codec + contractTransmitter ocrtypes.ContractTransmitter + configWatcher *configWatcher + lggr logger.Logger + ms services.MultiStart +} + +var _ types.PluginProvider = (*pluginProvider)(nil) + +func NewPluginProvider( + chainReader types.ChainReader, + codec types.Codec, + contractTransmitter ocrtypes.ContractTransmitter, + configWatcher *configWatcher, + lggr logger.Logger, +) *pluginProvider { + return &pluginProvider{ + chainReader: chainReader, + codec: codec, + contractTransmitter: contractTransmitter, + configWatcher: configWatcher, + lggr: lggr, + ms: services.MultiStart{}, + } +} + +func (p *pluginProvider) Name() string { return p.lggr.Name() } + +func (p *pluginProvider) Ready() error { return nil } + +func (p *pluginProvider) HealthReport() map[string]error { + hp := map[string]error{p.Name(): p.Ready()} + services.CopyHealth(hp, p.configWatcher.HealthReport()) + return hp +} + +func (p *pluginProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return p.contractTransmitter +} + +func (p *pluginProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return p.configWatcher.OffchainConfigDigester() +} + +func (p *pluginProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return p.configWatcher.configPoller +} + +func (p *pluginProvider) ChainReader() types.ChainReader { + return p.chainReader +} + +func (p *pluginProvider) Codec() types.Codec { + return p.codec +} + +func (p *pluginProvider) Start(ctx context.Context) error { + return p.configWatcher.Start(ctx) +} + +func (p *pluginProvider) Close() error { + return p.configWatcher.Close() +} diff --git a/core/services/relay/evm/relayer_extender.go b/core/services/relay/evm/relayer_extender.go new file mode 100644 index 00000000..16fa2800 --- /dev/null +++ b/core/services/relay/evm/relayer_extender.go @@ -0,0 +1,164 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/loop" + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" +) + +// ErrNoChains indicates that no EVM chains have been started +var ErrNoChains = errors.New("no EVM chains loaded") + +type EVMChainRelayerExtender interface { + loop.RelayerExt + Chain() legacyevm.Chain +} + +type EVMChainRelayerExtenderSlicer interface { + Slice() []EVMChainRelayerExtender + Len() int + AppConfig() legacyevm.AppConfig +} + +type ChainRelayerExtenders struct { + exts []EVMChainRelayerExtender + cfg legacyevm.AppConfig +} + +var _ EVMChainRelayerExtenderSlicer = &ChainRelayerExtenders{} + +func NewLegacyChainsFromRelayerExtenders(exts EVMChainRelayerExtenderSlicer) *legacyevm.LegacyChains { + m := make(map[string]legacyevm.Chain) + for _, r := range exts.Slice() { + m[r.Chain().ID().String()] = r.Chain() + } + return legacyevm.NewLegacyChains(m, exts.AppConfig().EVMConfigs()) +} + +func newChainRelayerExtsFromSlice(exts []*ChainRelayerExt, appConfig legacyevm.AppConfig) *ChainRelayerExtenders { + temp := make([]EVMChainRelayerExtender, len(exts)) + for i := range exts { + temp[i] = exts[i] + } + return &ChainRelayerExtenders{ + exts: temp, + cfg: appConfig, + } +} + +func (c *ChainRelayerExtenders) AppConfig() legacyevm.AppConfig { + return c.cfg +} + +func (c *ChainRelayerExtenders) Slice() []EVMChainRelayerExtender { + return c.exts +} + +func (c *ChainRelayerExtenders) Len() int { + return len(c.exts) +} + +// implements OneChain +type ChainRelayerExt struct { + chain legacyevm.Chain +} + +var _ EVMChainRelayerExtender = &ChainRelayerExt{} + +func (s *ChainRelayerExt) GetChainStatus(ctx context.Context) (commontypes.ChainStatus, error) { + return s.chain.GetChainStatus(ctx) +} + +func (s *ChainRelayerExt) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) (stats []commontypes.NodeStatus, nextPageToken string, total int, err error) { + return s.chain.ListNodeStatuses(ctx, pageSize, pageToken) +} + +func (s *ChainRelayerExt) Transact(ctx context.Context, from, to string, amount *big.Int, balanceCheck bool) error { + return s.chain.Transact(ctx, from, to, amount, balanceCheck) +} + +func (s *ChainRelayerExt) ID() string { + return s.chain.ID().String() +} + +func (s *ChainRelayerExt) Chain() legacyevm.Chain { + return s.chain +} + +var ErrCorruptEVMChain = errors.New("corrupt evm chain") + +func (s *ChainRelayerExt) Start(ctx context.Context) error { + return s.chain.Start(ctx) +} + +func (s *ChainRelayerExt) Close() (err error) { + return s.chain.Close() +} + +func (s *ChainRelayerExt) Name() string { + return s.chain.Name() +} + +func (s *ChainRelayerExt) HealthReport() map[string]error { + return s.chain.HealthReport() +} + +func (s *ChainRelayerExt) Ready() (err error) { + return s.chain.Ready() +} + +func NewChainRelayerExtenders(ctx context.Context, opts legacyevm.ChainRelayExtenderConfig) (*ChainRelayerExtenders, error) { + if err := opts.Validate(); err != nil { + return nil, err + } + + unique := make(map[string]struct{}) + + evmConfigs := opts.AppConfig.EVMConfigs() + var enabled []*toml.EVMConfig + for i, cfg := range evmConfigs { + _, alreadyExists := unique[cfg.ChainID.String()] + if alreadyExists { + return nil, fmt.Errorf("duplicate chain definition for evm chain id %s", cfg.ChainID.String()) + } + unique[cfg.ChainID.String()] = struct{}{} + if evmConfigs[i].IsEnabled() { + enabled = append(enabled, evmConfigs[i]) + } + } + + var result []*ChainRelayerExt + var err error + for i := range enabled { + + cid := enabled[i].ChainID.String() + privOpts := legacyevm.ChainRelayExtenderConfig{ + Logger: opts.Logger.Named(cid), + ChainOpts: opts.ChainOpts, + KeyStore: opts.KeyStore, + } + + privOpts.Logger.Infow(fmt.Sprintf("Loading chain %s", cid), "evmChainID", cid) + chain, err2 := legacyevm.NewTOMLChain(ctx, enabled[i], privOpts) + if err2 != nil { + err = multierr.Combine(err, fmt.Errorf("failed to create chain %s: %w", cid, err2)) + continue + } + + s := &ChainRelayerExt{ + chain: chain, + } + result = append(result, s) + } + // always return because it's accumulating errors + return newChainRelayerExtsFromSlice(result, opts.AppConfig), err +} diff --git a/core/services/relay/evm/relayer_extender_test.go b/core/services/relay/evm/relayer_extender_test.go new file mode 100644 index 00000000..02b38709 --- /dev/null +++ b/core/services/relay/evm/relayer_extender_test.go @@ -0,0 +1,69 @@ +package evm_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func TestChainRelayExtenders(t *testing.T) { + t.Parallel() + + newId := testutils.NewRandomEVMChainID() + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + one := uint32(1) + c.EVM[0].MinIncomingConfirmations = &one + t := true + c.EVM = append(c.EVM, &toml.EVMConfig{ChainID: ubig.New(newId), Enabled: &t, Chain: toml.Defaults(nil)}) + }) + db := pgtest.NewSqlxDB(t) + kst := cltest.NewKeyStore(t, db, cfg.Database()) + require.NoError(t, kst.Unlock(cltest.Password)) + + opts := evmtest.NewChainRelayExtOpts(t, evmtest.TestChainOpts{DB: db, KeyStore: kst.Eth(), GeneralConfig: cfg}) + opts.GenEthClient = func(*big.Int) evmclient.Client { + return cltest.NewEthMocksWithStartupAssertions(t) + } + relayExtenders, err := evmrelay.NewChainRelayerExtenders(testutils.Context(t), opts) + require.NoError(t, err) + + require.Equal(t, relayExtenders.Len(), 2) + relayExtendersInstances := relayExtenders.Slice() + for _, c := range relayExtendersInstances { + require.NoError(t, c.Start(testutils.Context(t))) + require.NoError(t, c.Ready()) + } + + require.NotEqual(t, relayExtendersInstances[0].Chain().ID().String(), relayExtendersInstances[1].Chain().ID().String()) + + for _, c := range relayExtendersInstances { + require.NoError(t, c.Close()) + } + + relayExtendersInstances[0].Chain().Client().(*evmclimocks.Client).AssertCalled(t, "Close") + relayExtendersInstances[1].Chain().Client().(*evmclimocks.Client).AssertCalled(t, "Close") + + assert.Error(t, relayExtendersInstances[0].Chain().Ready()) + assert.Error(t, relayExtendersInstances[1].Chain().Ready()) + + // test extender methods on single instance + relayExt := relayExtendersInstances[0] + s, err := relayExt.GetChainStatus(testutils.Context(t)) + assert.NotEmpty(t, s) + assert.NoError(t, err) + +} diff --git a/core/services/relay/evm/request_round_db.go b/core/services/relay/evm/request_round_db.go new file mode 100644 index 00000000..344168c9 --- /dev/null +++ b/core/services/relay/evm/request_round_db.go @@ -0,0 +1,91 @@ +package evm + +import ( + "database/sql" + "encoding/json" + + "github.com/pkg/errors" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// RequestRoundDB stores requested rounds for querying by the median plugin. +type RequestRoundDB interface { + SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error + LoadLatestRoundRequested() (rr ocr2aggregator.OCR2AggregatorRoundRequested, err error) +} + +var _ RequestRoundDB = &requestRoundDB{} + +//go:generate mockery --quiet --name RequestRoundDB --output ./mocks/ --case=underscore +type requestRoundDB struct { + *sql.DB + oracleSpecID int32 + lggr logger.Logger +} + +// NewDB returns a new DB scoped to this oracleSpecID +func NewRoundRequestedDB(sqldb *sql.DB, oracleSpecID int32, lggr logger.Logger) *requestRoundDB { + return &requestRoundDB{sqldb, oracleSpecID, lggr} +} + +func (d *requestRoundDB) SaveLatestRoundRequested(tx pg.Queryer, rr ocr2aggregator.OCR2AggregatorRoundRequested) error { + rawLog, err := json.Marshal(rr.Raw) + if err != nil { + return errors.Wrap(err, "could not marshal log as JSON") + } + _, err = tx.Exec(` +INSERT INTO ocr2_latest_round_requested (ocr2_oracle_spec_id, requester, config_digest, epoch, round, raw) +VALUES ($1,$2,$3,$4,$5,$6) ON CONFLICT (ocr2_oracle_spec_id) DO UPDATE SET + requester = EXCLUDED.requester, + config_digest = EXCLUDED.config_digest, + epoch = EXCLUDED.epoch, + round = EXCLUDED.round, + raw = EXCLUDED.raw +`, d.oracleSpecID, rr.Requester, rr.ConfigDigest[:], rr.Epoch, rr.Round, rawLog) + + return errors.Wrap(err, "could not save latest round requested") +} + +func (d *requestRoundDB) LoadLatestRoundRequested() (ocr2aggregator.OCR2AggregatorRoundRequested, error) { + rr := ocr2aggregator.OCR2AggregatorRoundRequested{} + rows, err := d.Query(` +SELECT requester, config_digest, epoch, round, raw +FROM ocr2_latest_round_requested +WHERE ocr2_oracle_spec_id = $1 +LIMIT 1 +`, d.oracleSpecID) + if err != nil { + return rr, errors.Wrap(err, "LoadLatestRoundRequested failed to query rows") + } + defer rows.Close() + + for rows.Next() { + var configDigest []byte + var rawLog []byte + + err = rows.Scan(&rr.Requester, &configDigest, &rr.Epoch, &rr.Round, &rawLog) + if err != nil { + return rr, errors.Wrap(err, "LoadLatestRoundRequested failed to scan row") + } + + rr.ConfigDigest, err = ocrtypes.BytesToConfigDigest(configDigest) + if err != nil { + return rr, errors.Wrap(err, "LoadLatestRoundRequested failed to decode config digest") + } + + err = json.Unmarshal(rawLog, &rr.Raw) + if err != nil { + return rr, errors.Wrap(err, "LoadLatestRoundRequested failed to unmarshal raw log") + } + } + + if err = rows.Err(); err != nil { + return rr, err + } + + return rr, nil +} diff --git a/core/services/relay/evm/request_round_db_test.go b/core/services/relay/evm/request_round_db_test.go new file mode 100644 index 00000000..512cc4b1 --- /dev/null +++ b/core/services/relay/evm/request_round_db_test.go @@ -0,0 +1,79 @@ +package evm_test + +import ( + "testing" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +func Test_DB_LatestRoundRequested(t *testing.T) { + sqlDB := pgtest.NewSqlxDB(t) + + _, err := sqlDB.Exec(`SET CONSTRAINTS offchainreporting2_latest_round_oracle_spec_fkey DEFERRED`) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + db := evm.NewRoundRequestedDB(sqlDB.DB, 1, lggr) + db2 := evm.NewRoundRequestedDB(sqlDB.DB, 2, lggr) + + rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/round_requested_log_1_1.json") + + rr := ocr2aggregator.OCR2AggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: testhelpers.MakeConfigDigest(t), + Epoch: 42, + Round: 9, + Raw: rawLog, + } + + t.Run("saves latest round requested", func(t *testing.T) { + ctx := testutils.Context(t) + err := pg.SqlxTransaction(ctx, sqlDB, logger.TestLogger(t), func(q pg.Queryer) error { + return db.SaveLatestRoundRequested(q, rr) + }) + require.NoError(t, err) + + rawLog.Index = 42 + + // Now overwrite to prove that updating works + rr = ocr2aggregator.OCR2AggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: testhelpers.MakeConfigDigest(t), + Epoch: 43, + Round: 8, + Raw: rawLog, + } + + err = pg.SqlxTransaction(ctx, sqlDB, logger.TestLogger(t), func(q pg.Queryer) error { + return db.SaveLatestRoundRequested(q, rr) + }) + require.NoError(t, err) + }) + + t.Run("loads latest round requested", func(t *testing.T) { + // There is no round for db2 + lrr, err := db2.LoadLatestRoundRequested() + require.NoError(t, err) + require.Equal(t, 0, int(lrr.Epoch)) + + lrr, err = db.LoadLatestRoundRequested() + require.NoError(t, err) + + assert.Equal(t, rr, lrr) + }) + + t.Run("spec with latest round requested can be deleted", func(t *testing.T) { + _, err := sqlDB.Exec(`DELETE FROM ocr2_oracle_specs`) + assert.NoError(t, err) + }) +} diff --git a/core/services/relay/evm/request_round_tracker.go b/core/services/relay/evm/request_round_tracker.go new file mode 100644 index 00000000..f32fdf3e --- /dev/null +++ b/core/services/relay/evm/request_round_tracker.go @@ -0,0 +1,203 @@ +package evm + +import ( + "context" + "sync" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/services" + + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + offchain_aggregator_wrapper "github.com/goplugin/pluginv3.0/v2/core/internal/gethwrappers2/generated/offchainaggregator" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// RequestRoundTracker subscribes to new request round logs. +type RequestRoundTracker struct { + services.StateMachine + + ethClient evmclient.Client + contract *offchain_aggregator_wrapper.OffchainAggregator + contractFilterer *ocr2aggregator.OCR2AggregatorFilterer + logBroadcaster log.Broadcaster + jobID int32 + lggr logger.SugaredLogger + odb RequestRoundDB + q pg.Q + blockTranslator ocrcommon.BlockTranslator + + // Start/Stop lifecycle + ctx context.Context + ctxCancel context.CancelFunc + unsubscribeLogs func() + + // LatestRoundRequested + latestRoundRequested ocr2aggregator.OCR2AggregatorRoundRequested + lrrMu sync.RWMutex +} + +// NewRequestRoundTracker makes a new RequestRoundTracker +func NewRequestRoundTracker( + contract *offchain_aggregator_wrapper.OffchainAggregator, + contractFilterer *ocr2aggregator.OCR2AggregatorFilterer, + ethClient evmclient.Client, + logBroadcaster log.Broadcaster, + jobID int32, + lggr logger.Logger, + db *sqlx.DB, + odb RequestRoundDB, + chain ocrcommon.Config, + qConfig pg.QConfig, +) (o *RequestRoundTracker) { + ctx, cancel := context.WithCancel(context.Background()) + return &RequestRoundTracker{ + ethClient: ethClient, + contract: contract, + contractFilterer: contractFilterer, + logBroadcaster: logBroadcaster, + jobID: jobID, + lggr: logger.Sugared(lggr), + odb: odb, + q: pg.NewQ(db, lggr, qConfig), + blockTranslator: ocrcommon.NewBlockTranslator(chain, ethClient, lggr), + ctx: ctx, + ctxCancel: cancel, + } +} + +// Start must be called before logs can be delivered +// It ought to be called before starting OCR +func (t *RequestRoundTracker) Start() error { + return t.StartOnce("RequestRoundTracker", func() (err error) { + t.latestRoundRequested, err = t.odb.LoadLatestRoundRequested() + if err != nil { + return errors.Wrap(err, "RequestRoundTracker#Start: failed to load latest round requested") + } + + t.unsubscribeLogs = t.logBroadcaster.Register(t, log.ListenerOpts{ + Contract: t.contract.Address(), + ParseLog: t.contract.ParseLog, + LogsWithTopics: map[gethCommon.Hash][][]log.Topic{ + offchain_aggregator_wrapper.OffchainAggregatorRoundRequested{}.Topic(): nil, + }, + MinIncomingConfirmations: 1, + }) + return nil + }) +} + +// Close should be called after teardown of the OCR job relying on this tracker +func (t *RequestRoundTracker) Close() error { + return t.StopOnce("RequestRoundTracker", func() error { + t.ctxCancel() + t.unsubscribeLogs() + return nil + }) +} + +// HandleLog complies with LogListener interface +// It is not thread safe +func (t *RequestRoundTracker) HandleLog(lb log.Broadcast) { + was, err := t.logBroadcaster.WasAlreadyConsumed(lb) + if err != nil { + t.lggr.Errorw("OCRContract: could not determine if log was already consumed", "err", err) + return + } else if was { + return + } + + raw := lb.RawLog() + if raw.Address != t.contract.Address() { + t.lggr.Errorf("log address of 0x%x does not match configured contract address of 0x%x", raw.Address, t.contract.Address()) + t.lggr.ErrorIf(t.logBroadcaster.MarkConsumed(lb), "unable to mark consumed") + return + } + topics := raw.Topics + if len(topics) == 0 { + t.lggr.ErrorIf(t.logBroadcaster.MarkConsumed(lb), "unable to mark consumed") + return + } + + var consumed bool + switch topics[0] { + case offchain_aggregator_wrapper.OffchainAggregatorRoundRequested{}.Topic(): + var rr *ocr2aggregator.OCR2AggregatorRoundRequested + rr, err = t.contractFilterer.ParseRoundRequested(raw) + if err != nil { + t.lggr.Errorw("could not parse round requested", "err", err) + t.lggr.ErrorIf(t.logBroadcaster.MarkConsumed(lb), "unable to mark consumed") + return + } + if IsLaterThan(raw, t.latestRoundRequested.Raw) { + err = t.q.Transaction(func(q pg.Queryer) error { + if err = t.odb.SaveLatestRoundRequested(q, *rr); err != nil { + return err + } + return t.logBroadcaster.MarkConsumed(lb, pg.WithQueryer(q)) + }) + if err != nil { + t.lggr.Error(err) + return + } + consumed = true + t.lrrMu.Lock() + t.latestRoundRequested = *rr + t.lrrMu.Unlock() + t.lggr.Infow("RequestRoundTracker: received new latest RoundRequested event", "latestRoundRequested", *rr) + } else { + t.lggr.Warnw("RequestRoundTracker: ignoring out of date RoundRequested event", "latestRoundRequested", t.latestRoundRequested, "roundRequested", rr) + } + default: + t.lggr.Debugw("RequestRoundTracker: got unrecognised log topic", "topic", topics[0]) + } + if !consumed { + t.lggr.ErrorIf(t.logBroadcaster.MarkConsumed(lb), "unable to mark consumed") + } +} + +// IsLaterThan returns true if the first log was emitted "after" the second log +// from the blockchain's point of view +func IsLaterThan(incoming gethTypes.Log, existing gethTypes.Log) bool { + return incoming.BlockNumber > existing.BlockNumber || + (incoming.BlockNumber == existing.BlockNumber && incoming.TxIndex > existing.TxIndex) || + (incoming.BlockNumber == existing.BlockNumber && incoming.TxIndex == existing.TxIndex && incoming.Index > existing.Index) +} + +// IsV2Job complies with LogListener interface +func (t *RequestRoundTracker) IsV2Job() bool { + return true +} + +// JobID complies with LogListener interface +func (t *RequestRoundTracker) JobID() int32 { + return t.jobID +} + +// LatestRoundRequested returns the configDigest, epoch, and round from the latest +// RoundRequested event emitted by the contract. LatestRoundRequested may or may not +// return a result if the latest such event was emitted in a block b such that +// b.timestamp < tip.timestamp - lookback. +// +// If no event is found, LatestRoundRequested should return zero values, not an error. +// An error should only be returned if an actual error occurred during execution, +// e.g. because there was an error querying the blockchain or the database. +// +// As an optimization, this function may also return zero values, if no +// RoundRequested event has been emitted after the latest NewTransmission event. +func (t *RequestRoundTracker) LatestRoundRequested(_ context.Context, lookback time.Duration) (configDigest ocrtypes.ConfigDigest, epoch uint32, round uint8, err error) { + t.lrrMu.RLock() + defer t.lrrMu.RUnlock() + return t.latestRoundRequested.ConfigDigest, t.latestRoundRequested.Epoch, t.latestRoundRequested.Round, nil +} diff --git a/core/services/relay/evm/request_round_tracker_test.go b/core/services/relay/evm/request_round_tracker_test.go new file mode 100644 index 00000000..a867c0e8 --- /dev/null +++ b/core/services/relay/evm/request_round_tracker_test.go @@ -0,0 +1,349 @@ +package evm_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + commonmocks "github.com/goplugin/pluginv3.0/v2/common/mocks" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmconfig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + logmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + offchain_aggregator_wrapper "github.com/goplugin/pluginv3.0/v2/core/internal/gethwrappers2/generated/offchainaggregator" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/mocks" +) + +func mustNewContract(t *testing.T, address gethCommon.Address) *offchain_aggregator_wrapper.OffchainAggregator { + contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(address, nil) + require.NoError(t, err) + return contract +} + +func mustNewFilterer(t *testing.T, address gethCommon.Address) *ocr2aggregator.OCR2AggregatorFilterer { + filterer, err := ocr2aggregator.NewOCR2AggregatorFilterer(testutils.NewAddress(), nil) + require.NoError(t, err) + return filterer +} + +type contractTrackerUni struct { + db *mocks.RequestRoundDB + lb *logmocks.Broadcaster + hb *commonmocks.HeadBroadcaster[*evmtypes.Head, common.Hash] + ec *evmclimocks.Client + requestRoundTracker *evm.RequestRoundTracker +} + +func newContractTrackerUni(t *testing.T, opts ...interface{}) (uni contractTrackerUni) { + var chain evmconfig.ChainScopedConfig + var filterer *ocr2aggregator.OCR2AggregatorFilterer + var contract *offchain_aggregator_wrapper.OffchainAggregator + for _, opt := range opts { + switch v := opt.(type) { + case evmconfig.ChainScopedConfig: + chain = v + case *ocr2aggregator.OCR2AggregatorFilterer: + filterer = v + case *offchain_aggregator_wrapper.OffchainAggregator: + contract = v + default: + t.Fatalf("unrecognised option type %T", v) + } + } + if chain == nil { + chain = evmtest.NewChainScopedConfig(t, configtest.NewTestGeneralConfig(t)) + } + if filterer == nil { + filterer = mustNewFilterer(t, testutils.NewAddress()) + } + if contract == nil { + contract = mustNewContract(t, testutils.NewAddress()) + } + uni.db = mocks.NewRequestRoundDB(t) + uni.lb = logmocks.NewBroadcaster(t) + uni.hb = commonmocks.NewHeadBroadcaster[*evmtypes.Head, common.Hash](t) + uni.ec = evmclimocks.NewClient(t) + + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + uni.requestRoundTracker = evm.NewRequestRoundTracker( + contract, + filterer, + uni.ec, + uni.lb, + 42, + lggr, + db, + uni.db, + chain.EVM(), + chain.Database(), + ) + + return uni +} + +func Test_OCRContractTracker_HandleLog_OCRContractLatestRoundRequested(t *testing.T) { + t.Parallel() + + fixtureLogAddress := gethCommon.HexToAddress("0x03bd0d5d39629423979f8a0e53dbce78c1791ebf") + fixtureFilterer := mustNewFilterer(t, fixtureLogAddress) + fixtureContract := mustNewContract(t, fixtureLogAddress) + + t.Run("does not update if contract address doesn't match", func(t *testing.T) { + uni := newContractTrackerUni(t) + logBroadcast := logmocks.NewBroadcast(t) + + rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_1_1.json") + logBroadcast.On("RawLog").Return(rawLog).Maybe() + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + uni.requestRoundTracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("does nothing if log has already been consumed", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("String").Return("").Maybe() + + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(true, nil) + + configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + uni.requestRoundTracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("for new round requested log", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + + // Any round supercedes the 0 round + + rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_1_1.json") + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("RawLog").Return(rawLog).Maybe() + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr ocr2aggregator.OCR2AggregatorRoundRequested) bool { + return rr.Epoch == 1 && rr.Round == 1 + })).Return(nil) + + uni.requestRoundTracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 1, int(round)) + + // Same round with higher epoch supercedes + rawLog2 := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_1_9.json") + logBroadcast2 := logmocks.NewBroadcast(t) + logBroadcast2.On("RawLog").Return(rawLog2).Maybe() + logBroadcast2.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr ocr2aggregator.OCR2AggregatorRoundRequested) bool { + return rr.Epoch == 1 && rr.Round == 9 + })).Return(nil) + + uni.requestRoundTracker.HandleLog(logBroadcast2) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 9, int(round)) + + // Same round with lower epoch is ignored + uni.requestRoundTracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", configDigest.Hex()) + assert.Equal(t, 1, int(epoch)) + assert.Equal(t, 9, int(round)) + + // Higher epoch with lower round supercedes + rawLog3 := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_2_1.json") + rawLog3.Address = fixtureContract.Address() + logBroadcast3 := logmocks.NewBroadcast(t) + logBroadcast3.On("RawLog").Return(rawLog3).Maybe() + logBroadcast3.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + uni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.MatchedBy(func(rr ocr2aggregator.OCR2AggregatorRoundRequested) bool { + return rr.Epoch == 2 && rr.Round == 1 + })).Return(nil) + + uni.requestRoundTracker.HandleLog(logBroadcast3) + + configDigest, epoch, round, err = uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", configDigest.Hex()) + assert.Equal(t, 2, int(epoch)) + assert.Equal(t, 1, int(round)) + }) + + t.Run("does not mark consumed or update state if latest round fails to save", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_1_1.json") + rawLog.Address = fixtureContract.Address() + logBroadcast := logmocks.NewBroadcast(t) + logBroadcast.On("RawLog").Return(rawLog).Maybe() + logBroadcast.On("String").Return("").Maybe() + uni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + + uni.db.On("SaveLatestRoundRequested", mock.Anything, mock.Anything).Return(errors.New("something exploded")) + + uni.requestRoundTracker.HandleLog(logBroadcast) + + configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + require.Equal(t, ocrtypes.ConfigDigest{}, configDigest) + require.Equal(t, 0, int(round)) + require.Equal(t, 0, int(epoch)) + }) + + t.Run("restores latest round requested from database on start", func(t *testing.T) { + uni := newContractTrackerUni(t, fixtureFilterer, fixtureContract) + + rawLog := cltest.LogFromFixture(t, "../../../testdata/jsonrpc/ocr2_round_requested_log_1_1.json") + rr := ocr2aggregator.OCR2AggregatorRoundRequested{ + Requester: testutils.NewAddress(), + ConfigDigest: testhelpers.MakeConfigDigest(t), + Epoch: 42, + Round: 9, + Raw: rawLog, + } + + eventuallyCloseLogBroadcaster := cltest.NewAwaiter() + uni.lb.On("Register", uni.requestRoundTracker, mock.Anything).Return(func() { eventuallyCloseLogBroadcaster.ItHappened() }) + uni.lb.On("IsConnected").Return(true).Maybe() + + uni.db.On("LoadLatestRoundRequested").Return(rr, nil) + + require.NoError(t, uni.requestRoundTracker.Start()) + + configDigest, epoch, round, err := uni.requestRoundTracker.LatestRoundRequested(testutils.Context(t), 0) + require.NoError(t, err) + assert.Equal(t, (ocrtypes.ConfigDigest)(rr.ConfigDigest).Hex(), configDigest.Hex()) + assert.Equal(t, rr.Epoch, epoch) + assert.Equal(t, rr.Round, round) + + require.NoError(t, uni.requestRoundTracker.Close()) + + eventuallyCloseLogBroadcaster.AssertHappened(t, true) + }) +} + +func Test_OCRContractTracker_IsLaterThan(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + incoming types.Log + existing types.Log + expected bool + }{ + { + "incoming higher index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 1}, + true, + }, + { + "incoming lower index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 1}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + false, + }, + { + "incoming identical to existing", + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + false, + }, + { + "incoming higher tx index than existing", + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + true, + }, + { + "incoming lower tx index than existing", + types.Log{BlockNumber: 1, TxIndex: 1, Index: 2}, + types.Log{BlockNumber: 1, TxIndex: 2, Index: 2}, + false, + }, + { + "incoming higher block number than existing", + types.Log{BlockNumber: 3, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 2, TxIndex: 2, Index: 2}, + true, + }, + { + "incoming lower block number than existing", + types.Log{BlockNumber: 2, TxIndex: 2, Index: 2}, + types.Log{BlockNumber: 3, TxIndex: 2, Index: 2}, + false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := evm.IsLaterThan(test.incoming, test.existing) + assert.Equal(t, test.expected, res) + }) + } +} diff --git a/core/services/relay/evm/standard_config_provider.go b/core/services/relay/evm/standard_config_provider.go new file mode 100644 index 00000000..d2b658f7 --- /dev/null +++ b/core/services/relay/evm/standard_config_provider.go @@ -0,0 +1,52 @@ +package evm + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/libocr/offchainreporting2plus/chains/evmutil" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +func newStandardConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (*configWatcher, error) { + if !common.IsHexAddress(opts.ContractID) { + return nil, errors.New("invalid contractID, expected hex address") + } + + aggregatorAddress := common.HexToAddress(opts.ContractID) + offchainConfigDigester := evmutil.EVMOffchainConfigDigester{ + ChainID: chain.Config().EVM().ChainID().Uint64(), + ContractAddress: aggregatorAddress, + } + return newContractConfigProvider(lggr, chain, opts, aggregatorAddress, OCR2AggregatorLogDecoder, offchainConfigDigester) +} + +func newContractConfigProvider(lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts, aggregatorAddress common.Address, ld LogDecoder, digester ocrtypes.OffchainConfigDigester) (*configWatcher, error) { + var cp types.ConfigPoller + + relayConfig, err := opts.RelayConfig() + if err != nil { + return nil, fmt.Errorf("failed to get relay config: %w", err) + } + cp, err = NewConfigPoller( + lggr, + CPConfig{ + chain.Client(), + chain.LogPoller(), + aggregatorAddress, + relayConfig.ConfigContractAddress, + ld, + }, + ) + if err != nil { + return nil, err + } + + return newConfigWatcher(lggr, aggregatorAddress, digester, cp, chain, relayConfig.FromBlock, opts.New), nil +} diff --git a/core/services/relay/evm/types/abi_types.go b/core/services/relay/evm/types/abi_types.go new file mode 100644 index 00000000..4d1328bc --- /dev/null +++ b/core/services/relay/evm/types/abi_types.go @@ -0,0 +1,70 @@ +package types + +import ( + "reflect" + + "github.com/ethereum/go-ethereum/common" +) + +//go:generate go run ./gen/main.go + +var typeMap = map[string]*ABIEncodingType{ + "bool": { + native: reflect.TypeOf(true), + checked: reflect.TypeOf(true), + }, + "int8": { + native: reflect.TypeOf(int8(0)), + checked: reflect.TypeOf(int8(0)), + }, + "int16": { + native: reflect.TypeOf(int16(0)), + checked: reflect.TypeOf(int16(0)), + }, + "int32": { + native: reflect.TypeOf(int32(0)), + checked: reflect.TypeOf(int32(0)), + }, + "int64": { + native: reflect.TypeOf(int64(0)), + checked: reflect.TypeOf(int64(0)), + }, + "uint8": { + native: reflect.TypeOf(uint8(0)), + checked: reflect.TypeOf(uint8(0)), + }, + "uint16": { + native: reflect.TypeOf(uint16(0)), + checked: reflect.TypeOf(uint16(0)), + }, + "uint32": { + native: reflect.TypeOf(uint32(0)), + checked: reflect.TypeOf(uint32(0)), + }, + "uint64": { + native: reflect.TypeOf(uint64(0)), + checked: reflect.TypeOf(uint64(0)), + }, + "string": { + native: reflect.TypeOf(""), + checked: reflect.TypeOf(""), + }, + "address": { + native: reflect.TypeOf(common.Address{}), + checked: reflect.TypeOf(common.Address{}), + }, + "bytes": { + native: reflect.TypeOf([]byte{}), + checked: reflect.TypeOf([]byte{}), + }, +} + +type ABIEncodingType struct { + native reflect.Type + checked reflect.Type +} + +func GetAbiEncodingType(name string) (*ABIEncodingType, bool) { + abiType, ok := typeMap[name] + return abiType, ok +} diff --git a/core/services/relay/evm/types/byte_types_gen.go b/core/services/relay/evm/types/byte_types_gen.go new file mode 100644 index 00000000..cf8d15cc --- /dev/null +++ b/core/services/relay/evm/types/byte_types_gen.go @@ -0,0 +1,300 @@ +package types + +import "reflect" + +type bytes1 [1]byte + +func init() { + typeMap["bytes1"] = &ABIEncodingType{ + native: reflect.TypeOf([1]byte{}), + checked: reflect.TypeOf(bytes1{}), + } +} + +type bytes2 [2]byte + +func init() { + typeMap["bytes2"] = &ABIEncodingType{ + native: reflect.TypeOf([2]byte{}), + checked: reflect.TypeOf(bytes2{}), + } +} + +type bytes3 [3]byte + +func init() { + typeMap["bytes3"] = &ABIEncodingType{ + native: reflect.TypeOf([3]byte{}), + checked: reflect.TypeOf(bytes3{}), + } +} + +type bytes4 [4]byte + +func init() { + typeMap["bytes4"] = &ABIEncodingType{ + native: reflect.TypeOf([4]byte{}), + checked: reflect.TypeOf(bytes4{}), + } +} + +type bytes5 [5]byte + +func init() { + typeMap["bytes5"] = &ABIEncodingType{ + native: reflect.TypeOf([5]byte{}), + checked: reflect.TypeOf(bytes5{}), + } +} + +type bytes6 [6]byte + +func init() { + typeMap["bytes6"] = &ABIEncodingType{ + native: reflect.TypeOf([6]byte{}), + checked: reflect.TypeOf(bytes6{}), + } +} + +type bytes7 [7]byte + +func init() { + typeMap["bytes7"] = &ABIEncodingType{ + native: reflect.TypeOf([7]byte{}), + checked: reflect.TypeOf(bytes7{}), + } +} + +type bytes8 [8]byte + +func init() { + typeMap["bytes8"] = &ABIEncodingType{ + native: reflect.TypeOf([8]byte{}), + checked: reflect.TypeOf(bytes8{}), + } +} + +type bytes9 [9]byte + +func init() { + typeMap["bytes9"] = &ABIEncodingType{ + native: reflect.TypeOf([9]byte{}), + checked: reflect.TypeOf(bytes9{}), + } +} + +type bytes10 [10]byte + +func init() { + typeMap["bytes10"] = &ABIEncodingType{ + native: reflect.TypeOf([10]byte{}), + checked: reflect.TypeOf(bytes10{}), + } +} + +type bytes11 [11]byte + +func init() { + typeMap["bytes11"] = &ABIEncodingType{ + native: reflect.TypeOf([11]byte{}), + checked: reflect.TypeOf(bytes11{}), + } +} + +type bytes12 [12]byte + +func init() { + typeMap["bytes12"] = &ABIEncodingType{ + native: reflect.TypeOf([12]byte{}), + checked: reflect.TypeOf(bytes12{}), + } +} + +type bytes13 [13]byte + +func init() { + typeMap["bytes13"] = &ABIEncodingType{ + native: reflect.TypeOf([13]byte{}), + checked: reflect.TypeOf(bytes13{}), + } +} + +type bytes14 [14]byte + +func init() { + typeMap["bytes14"] = &ABIEncodingType{ + native: reflect.TypeOf([14]byte{}), + checked: reflect.TypeOf(bytes14{}), + } +} + +type bytes15 [15]byte + +func init() { + typeMap["bytes15"] = &ABIEncodingType{ + native: reflect.TypeOf([15]byte{}), + checked: reflect.TypeOf(bytes15{}), + } +} + +type bytes16 [16]byte + +func init() { + typeMap["bytes16"] = &ABIEncodingType{ + native: reflect.TypeOf([16]byte{}), + checked: reflect.TypeOf(bytes16{}), + } +} + +type bytes17 [17]byte + +func init() { + typeMap["bytes17"] = &ABIEncodingType{ + native: reflect.TypeOf([17]byte{}), + checked: reflect.TypeOf(bytes17{}), + } +} + +type bytes18 [18]byte + +func init() { + typeMap["bytes18"] = &ABIEncodingType{ + native: reflect.TypeOf([18]byte{}), + checked: reflect.TypeOf(bytes18{}), + } +} + +type bytes19 [19]byte + +func init() { + typeMap["bytes19"] = &ABIEncodingType{ + native: reflect.TypeOf([19]byte{}), + checked: reflect.TypeOf(bytes19{}), + } +} + +type bytes20 [20]byte + +func init() { + typeMap["bytes20"] = &ABIEncodingType{ + native: reflect.TypeOf([20]byte{}), + checked: reflect.TypeOf(bytes20{}), + } +} + +type bytes21 [21]byte + +func init() { + typeMap["bytes21"] = &ABIEncodingType{ + native: reflect.TypeOf([21]byte{}), + checked: reflect.TypeOf(bytes21{}), + } +} + +type bytes22 [22]byte + +func init() { + typeMap["bytes22"] = &ABIEncodingType{ + native: reflect.TypeOf([22]byte{}), + checked: reflect.TypeOf(bytes22{}), + } +} + +type bytes23 [23]byte + +func init() { + typeMap["bytes23"] = &ABIEncodingType{ + native: reflect.TypeOf([23]byte{}), + checked: reflect.TypeOf(bytes23{}), + } +} + +type bytes24 [24]byte + +func init() { + typeMap["bytes24"] = &ABIEncodingType{ + native: reflect.TypeOf([24]byte{}), + checked: reflect.TypeOf(bytes24{}), + } +} + +type bytes25 [25]byte + +func init() { + typeMap["bytes25"] = &ABIEncodingType{ + native: reflect.TypeOf([25]byte{}), + checked: reflect.TypeOf(bytes25{}), + } +} + +type bytes26 [26]byte + +func init() { + typeMap["bytes26"] = &ABIEncodingType{ + native: reflect.TypeOf([26]byte{}), + checked: reflect.TypeOf(bytes26{}), + } +} + +type bytes27 [27]byte + +func init() { + typeMap["bytes27"] = &ABIEncodingType{ + native: reflect.TypeOf([27]byte{}), + checked: reflect.TypeOf(bytes27{}), + } +} + +type bytes28 [28]byte + +func init() { + typeMap["bytes28"] = &ABIEncodingType{ + native: reflect.TypeOf([28]byte{}), + checked: reflect.TypeOf(bytes28{}), + } +} + +type bytes29 [29]byte + +func init() { + typeMap["bytes29"] = &ABIEncodingType{ + native: reflect.TypeOf([29]byte{}), + checked: reflect.TypeOf(bytes29{}), + } +} + +type bytes30 [30]byte + +func init() { + typeMap["bytes30"] = &ABIEncodingType{ + native: reflect.TypeOf([30]byte{}), + checked: reflect.TypeOf(bytes30{}), + } +} + +type bytes31 [31]byte + +func init() { + typeMap["bytes31"] = &ABIEncodingType{ + native: reflect.TypeOf([31]byte{}), + checked: reflect.TypeOf(bytes31{}), + } +} + +type bytes32 [32]byte + +func init() { + typeMap["bytes32"] = &ABIEncodingType{ + native: reflect.TypeOf([32]byte{}), + checked: reflect.TypeOf(bytes32{}), + } +} + +type bytes0 [0]byte + +func init() { + typeMap["bytes0"] = &ABIEncodingType{ + native: reflect.TypeOf([0]byte{}), + checked: reflect.TypeOf(bytes0{}), + } +} diff --git a/core/services/relay/evm/types/codec_entry.go b/core/services/relay/evm/types/codec_entry.go new file mode 100644 index 00000000..c8a23727 --- /dev/null +++ b/core/services/relay/evm/types/codec_entry.go @@ -0,0 +1,256 @@ +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-common/pkg/codec" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +// MaxTopicFields is three because the EVM has a max of four topics, but the first topic is always the event signature. +const MaxTopicFields = 3 + +type CodecEntry interface { + Init() error + Args() abi.Arguments + EncodingPrefix() []byte + GetMaxSize(n int) (int, error) + Modifier() codec.Modifier + + // CheckedType provides a type that can be used to decode into with type-safety around sizes of integers etc. + CheckedType() reflect.Type + + // ToNative converts a pointer to checked value into a pointer of a type to use with the go-ethereum ABI encoder + // Note that modification of the returned value will modify the original checked value and vice versa. + ToNative(checked reflect.Value) (reflect.Value, error) + + // IsNativePointer returns if the type is a pointer to the native type + IsNativePointer(item reflect.Type) bool +} + +func NewCodecEntry(args abi.Arguments, encodingPrefix []byte, mod codec.Modifier) CodecEntry { + if mod == nil { + mod = codec.MultiModifier{} + } + return &codecEntry{args: args, encodingPrefix: encodingPrefix, mod: mod} +} + +type codecEntry struct { + args abi.Arguments + encodingPrefix []byte + checkedType reflect.Type + nativeType reflect.Type + mod codec.Modifier +} + +func (entry *codecEntry) CheckedType() reflect.Type { + return entry.checkedType +} + +func (entry *codecEntry) NativeType() reflect.Type { + return entry.nativeType +} + +func (entry *codecEntry) ToNative(checked reflect.Value) (reflect.Value, error) { + if checked.Type() != reflect.PointerTo(entry.checkedType) { + return reflect.Value{}, fmt.Errorf("%w: checked type %v does not match expected type %v", commontypes.ErrInvalidType, reflect.TypeOf(checked), entry.checkedType) + } + + return reflect.NewAt(entry.nativeType, checked.UnsafePointer()), nil +} + +func (entry *codecEntry) IsNativePointer(item reflect.Type) bool { + return item == reflect.PointerTo(entry.nativeType) +} + +func (entry *codecEntry) Modifier() codec.Modifier { + return entry.mod +} + +func (entry *codecEntry) Args() abi.Arguments { + tmp := make(abi.Arguments, len(entry.args)) + copy(tmp, entry.args) + return tmp +} + +func (entry *codecEntry) EncodingPrefix() []byte { + tmp := make([]byte, len(entry.encodingPrefix)) + copy(tmp, entry.encodingPrefix) + return tmp +} + +func (entry *codecEntry) Init() error { + if entry.checkedType != nil { + return nil + } + + args := unwrapArgs(entry.args) + argLen := len(args) + native := make([]reflect.StructField, argLen) + checked := make([]reflect.StructField, argLen) + + // Single returns that aren't named will return that type + // whereas named parameters will return a struct with the fields + // Eg: function foo() returns (int256) ... will return a *big.Int for the native type + // function foo() returns (int256 i) ... will return a struct { I *big.Int } for the native type + // function foo() returns (int256 i1, int256 i2) ... will return a struct { I1 *big.Int, I2 *big.Int } for the native type + if len(args) == 1 && args[0].Name == "" { + nativeArg, checkedArg, err := getNativeAndCheckedTypesForArg(&args[0]) + if err != nil { + return err + } + entry.nativeType = nativeArg + entry.checkedType = checkedArg + return nil + } + + numIndices := 0 + seenNames := map[string]bool{} + for i, arg := range args { + if arg.Indexed { + if numIndices == MaxTopicFields { + return fmt.Errorf("%w: too many indexed arguments", commontypes.ErrInvalidConfig) + } + numIndices++ + } + + tmp := arg + nativeArg, checkedArg, err := getNativeAndCheckedTypesForArg(&tmp) + if err != nil { + return err + } + if len(arg.Name) == 0 { + return fmt.Errorf("%w: empty field names are not supported for multiple returns", commontypes.ErrInvalidType) + } + + name := strings.ToUpper(arg.Name[:1]) + arg.Name[1:] + if seenNames[name] { + return fmt.Errorf("%w: duplicate field name %s, after ToCamelCase", commontypes.ErrInvalidConfig, name) + } + seenNames[name] = true + native[i] = reflect.StructField{Name: name, Type: nativeArg} + checked[i] = reflect.StructField{Name: name, Type: checkedArg} + } + + entry.nativeType = structOfPointers(native) + entry.checkedType = structOfPointers(checked) + return nil +} + +func (entry *codecEntry) GetMaxSize(n int) (int, error) { + return GetMaxSize(n, entry.args) +} + +func unwrapArgs(args abi.Arguments) abi.Arguments { + // Unwrap an unnamed tuple so that callers don't need to wrap it + // Eg: If you have struct Foo { ... } and return an unnamed Foo, you should be able ot decode to a go Foo{} directly + if len(args) != 1 || args[0].Name != "" { + return args + } + + elms := args[0].Type.TupleElems + if len(elms) != 0 { + names := args[0].Type.TupleRawNames + args = make(abi.Arguments, len(elms)) + for i, elm := range elms { + args[i] = abi.Argument{ + Name: names[i], + Type: *elm, + } + } + } + return args +} + +func getNativeAndCheckedTypesForArg(arg *abi.Argument) (reflect.Type, reflect.Type, error) { + tmp := arg.Type + if arg.Indexed { + switch arg.Type.T { + case abi.StringTy: + return reflect.TypeOf(common.Hash{}), reflect.TypeOf(common.Hash{}), nil + case abi.ArrayTy: + u8, _ := GetAbiEncodingType("uint8") + if arg.Type.Elem.GetType() == u8.native { + return reflect.TypeOf(common.Hash{}), reflect.TypeOf(common.Hash{}), nil + } + fallthrough + case abi.SliceTy, abi.TupleTy, abi.FixedBytesTy, abi.FixedPointTy, abi.FunctionTy: + // https://github.com/ethereum/go-ethereum/blob/release/1.12/accounts/abi/topics.go#L78 + return nil, nil, fmt.Errorf("%w: unsupported indexed type: %v", commontypes.ErrInvalidConfig, arg.Type) + default: + } + } + + return getNativeAndCheckedTypes(&tmp) +} + +func getNativeAndCheckedTypes(curType *abi.Type) (reflect.Type, reflect.Type, error) { + converter := func(t reflect.Type) reflect.Type { return t } + for curType.Elem != nil { + prior := converter + switch curType.GetType().Kind() { + case reflect.Slice: + converter = func(t reflect.Type) reflect.Type { + return prior(reflect.SliceOf(t)) + } + curType = curType.Elem + case reflect.Array: + tmp := curType + converter = func(t reflect.Type) reflect.Type { + return prior(reflect.ArrayOf(tmp.Size, t)) + } + curType = curType.Elem + default: + return nil, nil, fmt.Errorf( + "%w: cannot create type for kind %v", commontypes.ErrInvalidType, curType.GetType().Kind()) + } + } + base, ok := GetAbiEncodingType(curType.String()) + if ok { + return converter(base.native), converter(base.checked), nil + } + + return createTupleType(curType, converter) +} + +func createTupleType(curType *abi.Type, converter func(reflect.Type) reflect.Type) (reflect.Type, reflect.Type, error) { + if len(curType.TupleElems) == 0 { + if curType.TupleType == nil { + return nil, nil, fmt.Errorf("%w: unsupported solidity type: %v", commontypes.ErrInvalidType, curType.String()) + } + return curType.TupleType, curType.TupleType, nil + } + + // Our naive types always have the same layout as the checked ones. + // This differs intentionally from the type.GetType() in abi as fields on structs are pointers in ours to + // verify that fields are intentionally set. + nativeFields := make([]reflect.StructField, len(curType.TupleElems)) + checkedFields := make([]reflect.StructField, len(curType.TupleElems)) + for i, elm := range curType.TupleElems { + name := curType.TupleRawNames[i] + nativeFields[i].Name = name + checkedFields[i].Name = name + nativeArgType, checkedArgType, err := getNativeAndCheckedTypes(elm) + if err != nil { + return nil, nil, err + } + nativeFields[i].Type = nativeArgType + checkedFields[i].Type = checkedArgType + } + return converter(structOfPointers(nativeFields)), converter(structOfPointers(checkedFields)), nil +} + +func structOfPointers(fields []reflect.StructField) reflect.Type { + for i := range fields { + if fields[i].Type.Kind() != reflect.Pointer { + fields[i].Type = reflect.PointerTo(fields[i].Type) + } + } + return reflect.StructOf(fields) +} diff --git a/core/services/relay/evm/types/codec_entry_test.go b/core/services/relay/evm/types/codec_entry_test.go new file mode 100644 index 00000000..e00f5d87 --- /dev/null +++ b/core/services/relay/evm/types/codec_entry_test.go @@ -0,0 +1,289 @@ +package types + +import ( + "errors" + "math/big" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/codec" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +func TestCodecEntry(t *testing.T) { + t.Run("basic types", func(t *testing.T) { + type1, err := abi.NewType("uint16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + type2, err := abi.NewType("string", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + type3, err := abi.NewType("uint24", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + type4, err := abi.NewType("int24", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + args := abi.Arguments{ + {Name: "Field1", Type: type1}, + {Name: "Field2", Type: type2}, + {Name: "Field3", Type: type3}, + {Name: "Field4", Type: type4}, + } + entry := NewCodecEntry(args, nil, nil) + require.NoError(t, entry.Init()) + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + f1 := uint16(2) + iChecked.FieldByName("Field1").Set(reflect.ValueOf(&f1)) + f2 := "any string" + iChecked.FieldByName("Field2").Set(reflect.ValueOf(&f2)) + + f3 := big.NewInt( /*2^24 - 1*/ 16777215) + setAndVerifyLimit(t, (*uint24)(f3), f3, iChecked.FieldByName("Field3")) + + f4 := big.NewInt( /*2^23 - 1*/ 8388607) + setAndVerifyLimit(t, (*int24)(f4), f4, iChecked.FieldByName("Field4")) + + rNative, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(rNative) + assert.Equal(t, iNative.Field(0).Interface(), iChecked.Field(0).Interface()) + assert.Equal(t, iNative.Field(1).Interface(), iChecked.Field(1).Interface()) + assert.Equal(t, iNative.Field(2).Interface(), f3) + assert.Equal(t, iNative.Field(3).Interface(), f4) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("tuples", func(t *testing.T) { + type1, err := abi.NewType("uint16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + tupleType, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "Field3", Type: "uint24"}, + {Name: "Field4", Type: "int24"}, + }) + require.NoError(t, err) + args := abi.Arguments{ + {Name: "Field1", Type: type1}, + {Name: "Field2", Type: tupleType}, + } + entry := NewCodecEntry(args, nil, nil) + require.NoError(t, entry.Init()) + + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + f1 := uint16(2) + iChecked.FieldByName("Field1").Set(reflect.ValueOf(&f1)) + f2 := iChecked.FieldByName("Field2") + f2.Set(reflect.New(f2.Type().Elem())) + f2 = reflect.Indirect(f2) + f3 := big.NewInt( /*2^24 - 1*/ 16777215) + setAndVerifyLimit(t, (*uint24)(f3), f3, f2.FieldByName("Field3")) + f4 := big.NewInt( /*2^23 - 1*/ 8388607) + setAndVerifyLimit(t, (*int24)(f4), f4, f2.FieldByName("Field4")) + + native, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(native) + require.Equal(t, iNative.Field(0).Interface(), iChecked.Field(0).Interface()) + nF2 := reflect.Indirect(iNative.Field(1)) + assert.Equal(t, nF2.Field(0).Interface(), f3) + assert.Equal(t, nF2.Field(1).Interface(), f4) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("unwrapped types", func(t *testing.T) { + // This exists to allow you to decode single returned values without naming the parameter + wrappedTuple, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "Field1", Type: "int16"}, + }) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "", Type: wrappedTuple}}, nil, nil) + require.NoError(t, entry.Init()) + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + anyValue := int16(2) + iChecked.FieldByName("Field1").Set(reflect.ValueOf(&anyValue)) + native, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(native) + assert.Equal(t, &anyValue, iNative.FieldByName("Field1").Interface()) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("slice types", func(t *testing.T) { + type1, err := abi.NewType("int16[]", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Field1", Type: type1}}, nil, nil) + + require.NoError(t, entry.Init()) + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + anySliceValue := &[]int16{2, 3} + iChecked.FieldByName("Field1").Set(reflect.ValueOf(anySliceValue)) + native, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(native) + assert.Equal(t, anySliceValue, iNative.FieldByName("Field1").Interface()) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("array types", func(t *testing.T) { + type1, err := abi.NewType("int16[3]", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Field1", Type: type1}}, nil, nil) + require.NoError(t, entry.Init()) + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + anySliceValue := &[3]int16{2, 3, 30} + iChecked.FieldByName("Field1").Set(reflect.ValueOf(anySliceValue)) + native, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(native) + assert.Equal(t, anySliceValue, iNative.FieldByName("Field1").Interface()) + }) + + t.Run("Not return values makes struct{}", func(t *testing.T) { + entry := NewCodecEntry(abi.Arguments{}, nil, nil) + require.NoError(t, entry.Init()) + assert.Equal(t, reflect.TypeOf(struct{}{}), entry.CheckedType()) + native, err := entry.ToNative(reflect.ValueOf(&struct{}{})) + require.NoError(t, err) + assert.Equal(t, &struct{}{}, native.Interface()) + }) + + t.Run("Address works", func(t *testing.T) { + address, err := abi.NewType("address", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "foo", Type: address}}, nil, nil) + require.NoError(t, entry.Init()) + + checked := reflect.New(entry.CheckedType()) + iChecked := reflect.Indirect(checked) + anyAddr := &common.Address{1, 2, 3} + iChecked.FieldByName("Foo").Set(reflect.ValueOf(anyAddr)) + + native, err := entry.ToNative(checked) + require.NoError(t, err) + iNative := reflect.Indirect(native) + assert.Equal(t, anyAddr, iNative.FieldByName("Foo").Interface()) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("Multiple unnamed parameters are not supported", func(t *testing.T) { + anyType, err := abi.NewType("int16[3]", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "", Type: anyType}, {Name: "", Type: anyType}}, nil, nil) + assert.True(t, errors.Is(entry.Init(), commontypes.ErrInvalidType)) + }) + + t.Run("Multiple abi arguments with the same name returns an error", func(t *testing.T) { + anyType, err := abi.NewType("int16[3]", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType}, {Name: "Name", Type: anyType}}, nil, nil) + assert.True(t, errors.Is(entry.Init(), commontypes.ErrInvalidConfig)) + }) + + t.Run("Indexed basic types leave their native and checked types as-is", func(t *testing.T) { + anyType, err := abi.NewType("int16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, nil) + require.NoError(t, entry.Init()) + checkedField, ok := entry.CheckedType().FieldByName("Name") + require.True(t, ok) + assert.Equal(t, reflect.TypeOf((*int16)(nil)), checkedField.Type) + native, err := entry.ToNative(reflect.New(entry.CheckedType())) + require.NoError(t, err) + iNative := reflect.Indirect(native) + assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) + }) + + t.Run("Indexed non basic types change to hash", func(t *testing.T) { + anyType, err := abi.NewType("string", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, nil) + require.NoError(t, entry.Init()) + nativeField, ok := entry.CheckedType().FieldByName("Name") + require.True(t, ok) + assert.Equal(t, reflect.TypeOf(&common.Hash{}), nativeField.Type) + native, err := entry.ToNative(reflect.New(entry.CheckedType())) + require.NoError(t, err) + assertHaveSameStructureAndNames(t, native.Type().Elem(), entry.CheckedType()) + }) + + t.Run("Too many indexed items returns an error", func(t *testing.T) { + anyType, err := abi.NewType("int16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry( + abi.Arguments{ + {Name: "Name1", Type: anyType, Indexed: true}, + {Name: "Name2", Type: anyType, Indexed: true}, + {Name: "Name3", Type: anyType, Indexed: true}, + {Name: "Name4", Type: anyType, Indexed: true}, + }, nil, nil) + require.True(t, errors.Is(entry.Init(), commontypes.ErrInvalidConfig)) + }) + + // TODO: when the TODO on + // https://github.com/ethereum/go-ethereum/blob/release/1.12/accounts/abi/topics.go#L78 + // is removed, remove this test. + t.Run("Using unsupported types by go-ethereum returns an error", func(t *testing.T) { + anyType, err := abi.NewType("int256[2]", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, nil) + assert.True(t, errors.Is(entry.Init(), commontypes.ErrInvalidConfig)) + }) + + t.Run("Modifier returns provided modifier", func(t *testing.T) { + anyType, err := abi.NewType("int16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + mod := codec.NewRenamer(map[string]string{"Name": "RenamedName"}) + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, mod) + assert.Equal(t, mod, entry.Modifier()) + }) + + t.Run("EncodingPrefix returns provided prefix", func(t *testing.T) { + anyType, err := abi.NewType("int16", "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + prefix := []byte{1, 2, 3} + entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, prefix, nil) + assert.Equal(t, prefix, entry.EncodingPrefix()) + }) +} + +// sized and bi must be the same pointer. +func setAndVerifyLimit(t *testing.T, sbi SizedBigInt, bi *big.Int, field reflect.Value) { + require.Same(t, reflect.NewAt(reflect.TypeOf(big.Int{}), reflect.ValueOf(sbi).UnsafePointer()).Interface(), bi) + field.Set(reflect.ValueOf(sbi)) + assert.NoError(t, sbi.Verify()) + bi.Add(bi, big.NewInt(1)) + assert.IsType(t, commontypes.ErrInvalidType, sbi.Verify()) +} + +// verifying the same structure allows us to use unsafe pointers to cast between them. +// This is done for perf and simplicity in mapping the two structures. +// [reflect.NewAt]'s use is the same as (*native)(unsafe.Pointer(checked)) +// See the safe usecase 1 from [unsafe.Pointer], as this is a subset of that. +// This also verifies field names are the same. +func assertHaveSameStructureAndNames(t *testing.T, t1, t2 reflect.Type) { + require.Equal(t, t1.Kind(), t2.Kind()) + + switch t1.Kind() { + case reflect.Array: + require.Equal(t, t1.Len(), t2.Len()) + assertHaveSameStructureAndNames(t, t1.Elem(), t2.Elem()) + case reflect.Slice, reflect.Pointer: + assertHaveSameStructureAndNames(t, t1.Elem(), t2.Elem()) + case reflect.Struct: + numFields := t1.NumField() + require.Equal(t, numFields, t2.NumField()) + for i := 0; i < numFields; i++ { + require.Equal(t, t1.Field(i).Name, t2.Field(i).Name) + assertHaveSameStructureAndNames(t, t1.Field(i).Type, t2.Field(i).Type) + } + default: + require.Equal(t, t1, t2) + } +} diff --git a/core/services/relay/evm/types/gen/bytes.go.tmpl b/core/services/relay/evm/types/gen/bytes.go.tmpl new file mode 100644 index 00000000..3c06529b --- /dev/null +++ b/core/services/relay/evm/types/gen/bytes.go.tmpl @@ -0,0 +1,14 @@ +package types + +import "reflect" + +{{ range . }} +type bytes{{.Size}} [{{.Size}}]byte +func init() { + typeMap["bytes{{.Size}}"] = &ABIEncodingType { + native: reflect.TypeOf([{{.Size}}]byte{}), + checked: reflect.TypeOf(bytes{{.Size}}{}), + } +} + +{{ end }} \ No newline at end of file diff --git a/core/services/relay/evm/types/gen/ints.go.tmpl b/core/services/relay/evm/types/gen/ints.go.tmpl new file mode 100644 index 00000000..a9d44890 --- /dev/null +++ b/core/services/relay/evm/types/gen/ints.go.tmpl @@ -0,0 +1,74 @@ +package types + +import ( + "math/big" + "reflect" + + "github.com/fxamacker/cbor/v2" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/plugin-common/pkg/types" +) + +type SizedBigInt interface { + Verify() error + private() +} + +var sizedBigIntType = reflect.TypeOf((*SizedBigInt)(nil)).Elem() +func SizedBigIntType() reflect.Type { + return sizedBigIntType +} + +{{ range . }} +type {{.Prefix}}int{{.Size}} big.Int +func (i *{{.Prefix}}int{{.Size}}) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *{{.Prefix}}int{{.Size}}) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *{{.Prefix}}int{{.Size}}) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *{{.Prefix}}int{{.Size}}) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *{{.Prefix}}int{{.Size}}) Verify() error { + bi := (*big.Int)(i) + {{ if .Signed }} + if !codec.FitsInNBitsSigned({{.Size}}, bi) { + return types.ErrInvalidType + } + {{ else }} + if bi.BitLen() > {{.Size}} || bi.Sign() < 0 { + return types.ErrInvalidType + } + {{ end }} + return nil +} + +func (i *{{.Prefix}}int{{.Size}}) private() {} + +func init() { + typeMap["{{.Prefix}}int{{.Size}}"] = &ABIEncodingType { + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*{{.Prefix}}int{{.Size}})(nil)), + } +} +{{ end }} \ No newline at end of file diff --git a/core/services/relay/evm/types/gen/main.go b/core/services/relay/evm/types/gen/main.go new file mode 100644 index 00000000..84e7c008 --- /dev/null +++ b/core/services/relay/evm/types/gen/main.go @@ -0,0 +1,73 @@ +package main + +import ( + "bytes" + _ "embed" + "go/format" + "os" + "text/template" +) + +func main() { + genInts() + genBytes() +} + +func genBytes() { + byteTypes := [33]ByteType{} + for i := 1; i < 33; i++ { + byteTypes[i-1].Size = i + } + mustRunTemplate("bytes", bytesTemplate, "byte_types_gen.go", byteTypes) +} + +func genInts() { + var intTypes []*IntType + + // 8, 16, 32, and 64 bits have their own type in go that is used by abi. + // The test use *big.Int + for i := 24; i <= 256; i += 8 { + if i == 32 || i == 64 { + continue + } + + signed := &IntType{Size: i, Signed: true} + unsigned := &IntType{Prefix: "u", Size: i} + intTypes = append(intTypes, signed, unsigned) + } + mustRunTemplate("ints", intsTemplate, "int_types_gen.go", intTypes) +} + +func mustRunTemplate(name, rawTemplate, outputFile string, input any) { + t := template.Must(template.New(name).Parse(rawTemplate)) + + br := bytes.Buffer{} + if err := t.Execute(&br, input); err != nil { + panic(err) + } + + res, err := format.Source(br.Bytes()) + if err != nil { + panic(err) + } + + if err = os.WriteFile(outputFile, res, 0600); err != nil { + panic(err) + } +} + +type IntType struct { + Prefix string + Size int + Signed bool +} + +type ByteType struct { + Size int +} + +//go:embed bytes.go.tmpl +var bytesTemplate string + +//go:embed ints.go.tmpl +var intsTemplate string diff --git a/core/services/relay/evm/types/int_types_gen.go b/core/services/relay/evm/types/int_types_gen.go new file mode 100644 index 00000000..d9aa80b6 --- /dev/null +++ b/core/services/relay/evm/types/int_types_gen.go @@ -0,0 +1,2710 @@ +package types + +import ( + "math/big" + "reflect" + + "github.com/fxamacker/cbor/v2" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/plugin-common/pkg/types" +) + +type SizedBigInt interface { + Verify() error + private() +} + +var sizedBigIntType = reflect.TypeOf((*SizedBigInt)(nil)).Elem() + +func SizedBigIntType() reflect.Type { + return sizedBigIntType +} + +type int24 big.Int + +func (i *int24) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int24) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int24) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int24) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int24) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(24, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int24) private() {} + +func init() { + typeMap["int24"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int24)(nil)), + } +} + +type uint24 big.Int + +func (i *uint24) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint24) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint24) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint24) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint24) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 24 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint24) private() {} + +func init() { + typeMap["uint24"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint24)(nil)), + } +} + +type int40 big.Int + +func (i *int40) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int40) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int40) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int40) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int40) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(40, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int40) private() {} + +func init() { + typeMap["int40"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int40)(nil)), + } +} + +type uint40 big.Int + +func (i *uint40) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint40) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint40) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint40) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint40) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 40 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint40) private() {} + +func init() { + typeMap["uint40"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint40)(nil)), + } +} + +type int48 big.Int + +func (i *int48) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int48) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int48) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int48) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int48) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(48, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int48) private() {} + +func init() { + typeMap["int48"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int48)(nil)), + } +} + +type uint48 big.Int + +func (i *uint48) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint48) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint48) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint48) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint48) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 48 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint48) private() {} + +func init() { + typeMap["uint48"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint48)(nil)), + } +} + +type int56 big.Int + +func (i *int56) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int56) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int56) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int56) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int56) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(56, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int56) private() {} + +func init() { + typeMap["int56"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int56)(nil)), + } +} + +type uint56 big.Int + +func (i *uint56) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint56) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint56) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint56) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint56) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 56 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint56) private() {} + +func init() { + typeMap["uint56"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint56)(nil)), + } +} + +type int72 big.Int + +func (i *int72) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int72) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int72) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int72) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int72) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(72, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int72) private() {} + +func init() { + typeMap["int72"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int72)(nil)), + } +} + +type uint72 big.Int + +func (i *uint72) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint72) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint72) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint72) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint72) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 72 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint72) private() {} + +func init() { + typeMap["uint72"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint72)(nil)), + } +} + +type int80 big.Int + +func (i *int80) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int80) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int80) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int80) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int80) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(80, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int80) private() {} + +func init() { + typeMap["int80"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int80)(nil)), + } +} + +type uint80 big.Int + +func (i *uint80) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint80) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint80) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint80) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint80) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 80 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint80) private() {} + +func init() { + typeMap["uint80"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint80)(nil)), + } +} + +type int88 big.Int + +func (i *int88) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int88) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int88) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int88) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int88) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(88, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int88) private() {} + +func init() { + typeMap["int88"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int88)(nil)), + } +} + +type uint88 big.Int + +func (i *uint88) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint88) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint88) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint88) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint88) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 88 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint88) private() {} + +func init() { + typeMap["uint88"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint88)(nil)), + } +} + +type int96 big.Int + +func (i *int96) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int96) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int96) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int96) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int96) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(96, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int96) private() {} + +func init() { + typeMap["int96"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int96)(nil)), + } +} + +type uint96 big.Int + +func (i *uint96) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint96) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint96) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint96) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint96) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 96 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint96) private() {} + +func init() { + typeMap["uint96"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint96)(nil)), + } +} + +type int104 big.Int + +func (i *int104) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int104) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int104) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int104) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int104) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(104, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int104) private() {} + +func init() { + typeMap["int104"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int104)(nil)), + } +} + +type uint104 big.Int + +func (i *uint104) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint104) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint104) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint104) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint104) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 104 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint104) private() {} + +func init() { + typeMap["uint104"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint104)(nil)), + } +} + +type int112 big.Int + +func (i *int112) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int112) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int112) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int112) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int112) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(112, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int112) private() {} + +func init() { + typeMap["int112"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int112)(nil)), + } +} + +type uint112 big.Int + +func (i *uint112) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint112) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint112) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint112) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint112) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 112 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint112) private() {} + +func init() { + typeMap["uint112"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint112)(nil)), + } +} + +type int120 big.Int + +func (i *int120) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int120) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int120) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int120) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int120) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(120, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int120) private() {} + +func init() { + typeMap["int120"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int120)(nil)), + } +} + +type uint120 big.Int + +func (i *uint120) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint120) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint120) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint120) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint120) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 120 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint120) private() {} + +func init() { + typeMap["uint120"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint120)(nil)), + } +} + +type int128 big.Int + +func (i *int128) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int128) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int128) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int128) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int128) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(128, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int128) private() {} + +func init() { + typeMap["int128"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int128)(nil)), + } +} + +type uint128 big.Int + +func (i *uint128) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint128) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint128) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint128) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint128) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 128 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint128) private() {} + +func init() { + typeMap["uint128"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint128)(nil)), + } +} + +type int136 big.Int + +func (i *int136) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int136) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int136) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int136) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int136) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(136, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int136) private() {} + +func init() { + typeMap["int136"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int136)(nil)), + } +} + +type uint136 big.Int + +func (i *uint136) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint136) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint136) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint136) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint136) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 136 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint136) private() {} + +func init() { + typeMap["uint136"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint136)(nil)), + } +} + +type int144 big.Int + +func (i *int144) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int144) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int144) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int144) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int144) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(144, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int144) private() {} + +func init() { + typeMap["int144"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int144)(nil)), + } +} + +type uint144 big.Int + +func (i *uint144) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint144) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint144) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint144) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint144) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 144 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint144) private() {} + +func init() { + typeMap["uint144"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint144)(nil)), + } +} + +type int152 big.Int + +func (i *int152) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int152) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int152) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int152) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int152) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(152, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int152) private() {} + +func init() { + typeMap["int152"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int152)(nil)), + } +} + +type uint152 big.Int + +func (i *uint152) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint152) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint152) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint152) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint152) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 152 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint152) private() {} + +func init() { + typeMap["uint152"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint152)(nil)), + } +} + +type int160 big.Int + +func (i *int160) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int160) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int160) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int160) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int160) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(160, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int160) private() {} + +func init() { + typeMap["int160"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int160)(nil)), + } +} + +type uint160 big.Int + +func (i *uint160) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint160) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint160) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint160) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint160) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 160 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint160) private() {} + +func init() { + typeMap["uint160"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint160)(nil)), + } +} + +type int168 big.Int + +func (i *int168) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int168) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int168) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int168) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int168) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(168, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int168) private() {} + +func init() { + typeMap["int168"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int168)(nil)), + } +} + +type uint168 big.Int + +func (i *uint168) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint168) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint168) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint168) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint168) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 168 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint168) private() {} + +func init() { + typeMap["uint168"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint168)(nil)), + } +} + +type int176 big.Int + +func (i *int176) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int176) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int176) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int176) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int176) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(176, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int176) private() {} + +func init() { + typeMap["int176"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int176)(nil)), + } +} + +type uint176 big.Int + +func (i *uint176) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint176) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint176) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint176) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint176) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 176 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint176) private() {} + +func init() { + typeMap["uint176"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint176)(nil)), + } +} + +type int184 big.Int + +func (i *int184) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int184) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int184) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int184) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int184) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(184, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int184) private() {} + +func init() { + typeMap["int184"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int184)(nil)), + } +} + +type uint184 big.Int + +func (i *uint184) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint184) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint184) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint184) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint184) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 184 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint184) private() {} + +func init() { + typeMap["uint184"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint184)(nil)), + } +} + +type int192 big.Int + +func (i *int192) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int192) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int192) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int192) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int192) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(192, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int192) private() {} + +func init() { + typeMap["int192"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int192)(nil)), + } +} + +type uint192 big.Int + +func (i *uint192) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint192) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint192) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint192) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint192) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 192 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint192) private() {} + +func init() { + typeMap["uint192"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint192)(nil)), + } +} + +type int200 big.Int + +func (i *int200) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int200) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int200) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int200) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int200) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(200, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int200) private() {} + +func init() { + typeMap["int200"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int200)(nil)), + } +} + +type uint200 big.Int + +func (i *uint200) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint200) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint200) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint200) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint200) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 200 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint200) private() {} + +func init() { + typeMap["uint200"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint200)(nil)), + } +} + +type int208 big.Int + +func (i *int208) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int208) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int208) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int208) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int208) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(208, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int208) private() {} + +func init() { + typeMap["int208"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int208)(nil)), + } +} + +type uint208 big.Int + +func (i *uint208) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint208) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint208) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint208) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint208) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 208 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint208) private() {} + +func init() { + typeMap["uint208"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint208)(nil)), + } +} + +type int216 big.Int + +func (i *int216) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int216) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int216) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int216) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int216) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(216, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int216) private() {} + +func init() { + typeMap["int216"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int216)(nil)), + } +} + +type uint216 big.Int + +func (i *uint216) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint216) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint216) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint216) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint216) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 216 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint216) private() {} + +func init() { + typeMap["uint216"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint216)(nil)), + } +} + +type int224 big.Int + +func (i *int224) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int224) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int224) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int224) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int224) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(224, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int224) private() {} + +func init() { + typeMap["int224"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int224)(nil)), + } +} + +type uint224 big.Int + +func (i *uint224) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint224) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint224) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint224) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint224) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 224 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint224) private() {} + +func init() { + typeMap["uint224"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint224)(nil)), + } +} + +type int232 big.Int + +func (i *int232) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int232) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int232) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int232) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int232) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(232, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int232) private() {} + +func init() { + typeMap["int232"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int232)(nil)), + } +} + +type uint232 big.Int + +func (i *uint232) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint232) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint232) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint232) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint232) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 232 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint232) private() {} + +func init() { + typeMap["uint232"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint232)(nil)), + } +} + +type int240 big.Int + +func (i *int240) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int240) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int240) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int240) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int240) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(240, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int240) private() {} + +func init() { + typeMap["int240"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int240)(nil)), + } +} + +type uint240 big.Int + +func (i *uint240) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint240) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint240) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint240) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint240) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 240 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint240) private() {} + +func init() { + typeMap["uint240"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint240)(nil)), + } +} + +type int248 big.Int + +func (i *int248) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int248) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int248) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int248) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int248) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(248, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int248) private() {} + +func init() { + typeMap["int248"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int248)(nil)), + } +} + +type uint248 big.Int + +func (i *uint248) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint248) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint248) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint248) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint248) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 248 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint248) private() {} + +func init() { + typeMap["uint248"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint248)(nil)), + } +} + +type int256 big.Int + +func (i *int256) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *int256) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *int256) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *int256) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *int256) Verify() error { + bi := (*big.Int)(i) + + if !codec.FitsInNBitsSigned(256, bi) { + return types.ErrInvalidType + } + + return nil +} + +func (i *int256) private() {} + +func init() { + typeMap["int256"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*int256)(nil)), + } +} + +type uint256 big.Int + +func (i *uint256) UnmarshalCBOR(input []byte) error { + bi := (*big.Int)(i) + if err := cbor.Unmarshal(input, bi); err != nil { + return err + } + + return i.Verify() +} + +func (i *uint256) MarshalCBOR() ([]byte, error) { + return cbor.Marshal((*big.Int)(i)) +} + +func (i *uint256) UnmarshalText(input []byte) error { + bi := (*big.Int)(i) + if _, ok := bi.SetString(string(input), 10); !ok { + return types.ErrInvalidType + } + + return i.Verify() +} + +func (i *uint256) MarshalText() ([]byte, error) { + bi := (*big.Int)(i) + return []byte(bi.String()), nil +} + +func (i *uint256) Verify() error { + bi := (*big.Int)(i) + + if bi.BitLen() > 256 || bi.Sign() < 0 { + return types.ErrInvalidType + } + + return nil +} + +func (i *uint256) private() {} + +func init() { + typeMap["uint256"] = &ABIEncodingType{ + native: reflect.TypeOf((*big.Int)(nil)), + checked: reflect.TypeOf((*uint256)(nil)), + } +} diff --git a/core/services/relay/evm/types/int_types_test.go b/core/services/relay/evm/types/int_types_test.go new file mode 100644 index 00000000..70712f8c --- /dev/null +++ b/core/services/relay/evm/types/int_types_test.go @@ -0,0 +1,54 @@ +package types + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/types" +) + +func TestIntTypes(t *testing.T) { + t.Parallel() + for i := 24; i <= 256; i += 8 { + if i == 64 || i == 32 { + continue + } + t.Run(fmt.Sprintf("int%v", i), func(t *testing.T) { + tpe, ok := GetAbiEncodingType(fmt.Sprintf("int%v", i)) + require.True(t, ok) + minVal := new(big.Int).Neg(new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(i-1)), nil)) + maxVal := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(i-1)), nil), big.NewInt(1)) + assertBigIntBounds(t, tpe, minVal, maxVal) + }) + + t.Run(fmt.Sprintf("uint%v", i), func(t *testing.T) { + tep, ok := GetAbiEncodingType(fmt.Sprintf("uint%v", i)) + require.True(t, ok) + minVal := big.NewInt(0) + maxVal := new(big.Int).Sub(new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(i)), nil), big.NewInt(1)) + assertBigIntBounds(t, tep, minVal, maxVal) + }) + } +} + +func assertBigIntBounds(t *testing.T, tpe *ABIEncodingType, min, max *big.Int) { + t.Helper() + assert.Equal(t, reflect.TypeOf(min), tpe.native) + assert.True(t, tpe.checked.ConvertibleTo(reflect.TypeOf(min))) + minMinusOne := new(big.Int).Sub(min, big.NewInt(1)) + maxPlusOne := new(big.Int).Add(max, big.NewInt(1)) + sbi := reflect.ValueOf(min).Convert(tpe.checked).Interface().(SizedBigInt) + assert.NoError(t, sbi.Verify()) + sbi = reflect.ValueOf(max).Convert(tpe.checked).Interface().(SizedBigInt) + assert.NoError(t, sbi.Verify()) + sbi = reflect.ValueOf(minMinusOne).Convert(tpe.checked).Interface().(SizedBigInt) + assert.True(t, errors.Is(types.ErrInvalidType, sbi.Verify())) + sbi = reflect.ValueOf(maxPlusOne).Convert(tpe.checked).Interface().(SizedBigInt) + assert.True(t, errors.Is(types.ErrInvalidType, sbi.Verify())) +} diff --git a/core/services/relay/evm/types/mocks/log_poller_wrapper.go b/core/services/relay/evm/types/mocks/log_poller_wrapper.go new file mode 100644 index 00000000..3f9758fe --- /dev/null +++ b/core/services/relay/evm/types/mocks/log_poller_wrapper.go @@ -0,0 +1,165 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + mock "github.com/stretchr/testify/mock" +) + +// LogPollerWrapper is an autogenerated mock type for the LogPollerWrapper type +type LogPollerWrapper struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *LogPollerWrapper) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *LogPollerWrapper) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// LatestEvents provides a mock function with given fields: +func (_m *LogPollerWrapper) LatestEvents() ([]types.OracleRequest, []types.OracleResponse, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestEvents") + } + + var r0 []types.OracleRequest + var r1 []types.OracleResponse + var r2 error + if rf, ok := ret.Get(0).(func() ([]types.OracleRequest, []types.OracleResponse, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []types.OracleRequest); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.OracleRequest) + } + } + + if rf, ok := ret.Get(1).(func() []types.OracleResponse); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]types.OracleResponse) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Name provides a mock function with given fields: +func (_m *LogPollerWrapper) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *LogPollerWrapper) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *LogPollerWrapper) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeToUpdates provides a mock function with given fields: name, subscriber +func (_m *LogPollerWrapper) SubscribeToUpdates(name string, subscriber types.RouteUpdateSubscriber) { + _m.Called(name, subscriber) +} + +// NewLogPollerWrapper creates a new instance of LogPollerWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLogPollerWrapper(t interface { + mock.TestingT + Cleanup(func()) +}) *LogPollerWrapper { + mock := &LogPollerWrapper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/relay/evm/types/size_helper.go b/core/services/relay/evm/types/size_helper.go new file mode 100644 index 00000000..2cb7086b --- /dev/null +++ b/core/services/relay/evm/types/size_helper.go @@ -0,0 +1,69 @@ +package types + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +func GetMaxSize(n int, args abi.Arguments) (int, error) { + size := 0 + for _, arg := range args { + tmp := arg.Type + argSize, _, err := getTypeSize(n, &tmp, true, false) + if err != nil { + return 0, err + } + size += argSize + } + + return size, nil +} + +func getTypeSize(n int, t *abi.Type, dynamicTypeAllowed bool, isNested bool) (int, bool, error) { + // See https://docs.soliditylang.org/en/latest/abi-spec.html#formal-specification-of-the-encoding + switch t.T { + case abi.ArrayTy: + elmSize, _, err := getTypeSize(n, t.Elem, false, true) + return t.Size * elmSize, false, err + case abi.SliceTy: + if !dynamicTypeAllowed { + return 0, false, commontypes.ErrInvalidType + } + elmSize, _, err := getTypeSize(n, t.Elem, false, true) + return 32 /*header*/ + 32 /*footer*/ + elmSize*n, true, err + case abi.BytesTy, abi.StringTy: + if !dynamicTypeAllowed { + return 0, false, commontypes.ErrInvalidType + } + totalSize := (n + 31) / 32 * 32 // strings and bytes are padded to 32 bytes + return 32 /*header*/ + 32 /*footer*/ + totalSize, true, nil + case abi.TupleTy: + return getTupleSize(n, t, isNested) + default: + // types are padded to 32 bytes + return 32, false, nil + } +} + +func getTupleSize(n int, t *abi.Type, isNested bool) (int, bool, error) { + // No header or footer, because if the tuple is dynamically sized we would need to know the inner slice sizes + // so it would return error for that element. + size := 0 + dynamic := false + for _, elm := range t.TupleElems { + argSize, dynamicArg, err := getTypeSize(n, elm, !isNested, true) + if err != nil { + return 0, false, err + } + dynamic = dynamic || dynamicArg + size += argSize + } + + if dynamic { + // offset for the element needs to be included there are dynamic elements + size += 32 + } + + return size, dynamic, nil +} diff --git a/core/services/relay/evm/types/size_helper_test.go b/core/services/relay/evm/types/size_helper_test.go new file mode 100644 index 00000000..dca6593a --- /dev/null +++ b/core/services/relay/evm/types/size_helper_test.go @@ -0,0 +1,263 @@ +package types_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +const anyNumElements = 10 + +func TestGetMaxSize(t *testing.T) { + t.Run("Basic types all encode to 32 bytes", func(t *testing.T) { + args := abi.Arguments{ + {Name: "I8", Type: mustType(t, "int8")}, + {Name: "I80", Type: mustType(t, "int80")}, + {Name: "I256", Type: mustType(t, "int256")}, + {Name: "B3", Type: mustType(t, "bytes3")}, + {Name: "B32", Type: mustType(t, "bytes32")}, + {Name: "TF", Type: mustType(t, "bool")}, + } + + runSizeTest(t, anyNumElements, args, int8(9), big.NewInt(3), big.NewInt(200), [3]byte{1, 3, 4}, make32Bytes(1), true) + }) + + t.Run("Slices of basic types all encode to 32 bytes each + header and footer", func(t *testing.T) { + args := abi.Arguments{ + {Name: "I8", Type: mustType(t, "int8[]")}, + {Name: "I80", Type: mustType(t, "int80[]")}, + {Name: "I256", Type: mustType(t, "int256[]")}, + {Name: "B3", Type: mustType(t, "bytes3[]")}, + {Name: "B32", Type: mustType(t, "bytes32[]")}, + {Name: "TF", Type: mustType(t, "bool[]")}, + } + + i8 := []int8{9, 2, 1, 3, 5, 6, 2, 1, 2, 3} + i80 := []*big.Int{big.NewInt(9), big.NewInt(2), big.NewInt(1), big.NewInt(3), big.NewInt(5), big.NewInt(6), big.NewInt(2), big.NewInt(1), big.NewInt(2), big.NewInt(3)} + i256 := []*big.Int{big.NewInt(119), big.NewInt(112), big.NewInt(1), big.NewInt(3), big.NewInt(5), big.NewInt(6), big.NewInt(2), big.NewInt(1), big.NewInt(2), big.NewInt(3)} + b3 := [][3]byte{{1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}, {1, 2, 3}} + b32 := [][32]byte{make32Bytes(1), make32Bytes(2), make32Bytes(3), make32Bytes(4), make32Bytes(5), make32Bytes(6), make32Bytes(7), make32Bytes(8), make32Bytes(9), make32Bytes(10)} + tf := []bool{true, false, true, false, true, false, true, false, true, false} + runSizeTest(t, anyNumElements, args, i8, i80, i256, b3, b32, tf) + }) + + t.Run("Arrays of basic types all encode to 32 bytes each", func(t *testing.T) { + args := abi.Arguments{ + {Name: "I8", Type: mustType(t, "int8[3]")}, + {Name: "I80", Type: mustType(t, "int80[3]")}, + {Name: "I256", Type: mustType(t, "int256[3]")}, + {Name: "B3", Type: mustType(t, "bytes3[3]")}, + {Name: "B32", Type: mustType(t, "bytes32[3]")}, + {Name: "TF", Type: mustType(t, "bool[3]")}, + } + + i8 := [3]int8{9, 2, 1} + i80 := [3]*big.Int{big.NewInt(9), big.NewInt(2), big.NewInt(1)} + i256 := [3]*big.Int{big.NewInt(119), big.NewInt(112), big.NewInt(1)} + b3 := [3][3]byte{{1, 2, 3}, {1, 2, 3}, {1, 2, 3}} + b32 := [3][32]byte{make32Bytes(1), make32Bytes(2), make32Bytes(3)} + tf := [3]bool{true, false, true} + runSizeTest(t, anyNumElements, args, i8, i80, i256, b3, b32, tf) + }) + + t.Run("Tuples are a sum of their elements", func(t *testing.T) { + tuple1 := []abi.ArgumentMarshaling{ + {Name: "I8", Type: "int8"}, + {Name: "I80", Type: "int80"}, + {Name: "I256", Type: "int256"}, + {Name: "B3", Type: "bytes3"}, + {Name: "B32", Type: "bytes32"}, + {Name: "TF", Type: "bool"}, + } + t1, err := abi.NewType("tuple", "", tuple1) + require.NoError(t, err) + + tuple2 := []abi.ArgumentMarshaling{ + {Name: "I80", Type: "int80"}, + {Name: "TF", Type: "bool"}, + } + t2, err := abi.NewType("tuple", "", tuple2) + require.NoError(t, err) + + args := abi.Arguments{ + {Name: "t1", Type: t1}, + {Name: "t2", Type: t2}, + } + arg1 := struct { + I8 int8 + I80 *big.Int + I256 *big.Int + B3 [3]byte + B32 [32]byte + TF bool + }{ + int8(9), big.NewInt(3), big.NewInt(200), [3]byte{1, 3, 4}, make32Bytes(1), true, + } + + arg2 := struct { + I80 *big.Int + TF bool + }{ + big.NewInt(3), true, + } + runSizeTest(t, anyNumElements, args, arg1, arg2) + }) + + t.Run("Slices of tuples are a sum of their elements with header and footer", func(t *testing.T) { + tuple1 := []abi.ArgumentMarshaling{ + {Name: "I80", Type: "int80"}, + {Name: "TF", Type: "bool"}, + } + t1, err := abi.NewType("tuple[]", "", tuple1) + require.NoError(t, err) + + args := abi.Arguments{ + {Name: "t1", Type: t1}, + } + arg1 := []struct { + I80 *big.Int + TF bool + }{ + {big.NewInt(1), true}, + {big.NewInt(2), true}, + {big.NewInt(3), true}, + {big.NewInt(4), false}, + {big.NewInt(5), true}, + {big.NewInt(6), true}, + {big.NewInt(7), true}, + {big.NewInt(8), false}, + {big.NewInt(9), true}, + {big.NewInt(10), true}, + } + runSizeTest(t, anyNumElements, args, arg1) + }) + + t.Run("Arrays of tuples are a sum of their elements", func(t *testing.T) { + tuple1 := []abi.ArgumentMarshaling{ + {Name: "I80", Type: "int80"}, + {Name: "TF", Type: "bool"}, + } + t1, err := abi.NewType("tuple[3]", "", tuple1) + require.NoError(t, err) + + args := abi.Arguments{ + {Name: "t1", Type: t1}, + } + arg1 := []struct { + I80 *big.Int + TF bool + }{ + {big.NewInt(1), true}, + {big.NewInt(2), true}, + {big.NewInt(3), true}, + } + runSizeTest(t, anyNumElements, args, arg1) + + }) + + t.Run("Bytes pack themselves", func(t *testing.T) { + args := abi.Arguments{{Name: "B", Type: mustType(t, "bytes")}} + t.Run("No padding needed", func(t *testing.T) { + padded := []byte("12345789022345678903234567890412345678905123456789061234") + runSizeTest(t, 64, args, padded) + }) + t.Run("Padding needed", func(t *testing.T) { + needsPadding := []byte("12345789022345678903234567890412345678905123456") + runSizeTest(t, 56, args, needsPadding) + }) + }) + + t.Run("Strings pack themselves", func(t *testing.T) { + args := abi.Arguments{{Name: "B", Type: mustType(t, "string")}} + t.Run("No padding needed", func(t *testing.T) { + padded := "12345789022345678903234567890412345678905123456789061234" + runSizeTest(t, 64, args, padded) + }) + t.Run("Padding needed", func(t *testing.T) { + needsPadding := "12345789022345678903234567890412345678905123456" + runSizeTest(t, 56, args, needsPadding) + }) + }) + + t.Run("Nested dynamic types return errors", func(t *testing.T) { + t.Run("Slice in slice", func(t *testing.T) { + args := abi.Arguments{{Name: "B", Type: mustType(t, "int32[][]")}} + _, err := types.GetMaxSize(anyNumElements, args) + assert.IsType(t, commontypes.ErrInvalidType, err) + }) + t.Run("Slice in array", func(t *testing.T) { + args := abi.Arguments{{Name: "B", Type: mustType(t, "int32[][2]")}} + _, err := types.GetMaxSize(anyNumElements, args) + assert.IsType(t, commontypes.ErrInvalidType, err) + }) + }) + + t.Run("Slices in a top level tuple works as-if they are the sized element", func(t *testing.T) { + tuple1 := []abi.ArgumentMarshaling{ + {Name: "I80", Type: "int80[]"}, + {Name: "TF", Type: "bool[]"}, + } + t1, err := abi.NewType("tuple", "", tuple1) + require.NoError(t, err) + args := abi.Arguments{{Name: "tuple", Type: t1}} + + arg1 := struct { + I80 []*big.Int + TF []bool + }{ + I80: []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), big.NewInt(10)}, + TF: []bool{true, true, true, false, true, true, true, false, true, true}, + } + + runSizeTest(t, anyNumElements, args, arg1) + }) + + t.Run("Nested dynamic tuples return errors", func(t *testing.T) { + tuple1 := []abi.ArgumentMarshaling{ + {Name: "I8", Type: "int8"}, + {Name: "I80", Type: "int80"}, + {Name: "I256", Type: "int256"}, + {Name: "B3", Type: "bytes3"}, + {Name: "B32", Type: "bytes32"}, + {Name: "TF", Type: "bool[]"}, + } + + tuple2 := []abi.ArgumentMarshaling{ + {Name: "I80", Type: "int80"}, + {Name: "T1", Type: "tuple", Components: tuple1}, + } + t2, err := abi.NewType("tuple", "", tuple2) + require.NoError(t, err) + + args := abi.Arguments{{Name: "t2", Type: t2}} + _, err = types.GetMaxSize(anyNumElements, args) + assert.IsType(t, commontypes.ErrInvalidType, err) + }) +} + +func runSizeTest(t *testing.T, n int, args abi.Arguments, params ...any) { + + actual, err := types.GetMaxSize(n, args) + require.NoError(t, err) + + expected, err := args.Pack(params...) + require.NoError(t, err) + assert.Equal(t, len(expected), actual) +} + +func mustType(t *testing.T, name string) abi.Type { + aType, err := abi.NewType(name, "", []abi.ArgumentMarshaling{}) + require.NoError(t, err) + return aType +} + +func make32Bytes(firstByte byte) [32]byte { + return [32]byte{firstByte, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3} +} diff --git a/core/services/relay/evm/types/types.go b/core/services/relay/evm/types/types.go new file mode 100644 index 00000000..875e78ff --- /dev/null +++ b/core/services/relay/evm/types/types.go @@ -0,0 +1,198 @@ +package types + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + "gopkg.in/guregu/null.v2" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +type ChainReaderConfig struct { + // Contracts key is contract name + Contracts map[string]ChainContractReader `json:"contracts" toml:"contracts"` +} + +type CodecConfig struct { + // Configs key is the type's name for the codec + Configs map[string]ChainCodecConfig `json:"configs" toml:"configs"` +} + +type ChainCodecConfig struct { + TypeABI string `json:"typeAbi" toml:"typeABI"` + ModifierConfigs codec.ModifiersConfig `toml:"modifierConfigs,omitempty"` +} + +type ChainContractReader struct { + ContractABI string `json:"contractABI" toml:"contractABI"` + // key is genericName from config + Configs map[string]*ChainReaderDefinition `json:"configs" toml:"configs"` +} + +type ChainReaderDefinition chainReaderDefinitionFields + +// chainReaderDefinitionFields has the fields for ChainReaderDefinition but no methods. +// This is necessary because package json recognizes the text encoding methods used for TOML, +// and would infinitely recurse on itself. +type chainReaderDefinitionFields struct { + CacheEnabled bool `json:"cacheEnabled,omitempty"` + // chain specific contract method name or event type. + ChainSpecificName string `json:"chainSpecificName"` + ReadType ReadType `json:"readType,omitempty"` + InputModifications codec.ModifiersConfig `json:"inputModifications,omitempty"` + OutputModifications codec.ModifiersConfig `json:"outputModifications,omitempty"` + + // EventInputFields allows you to choose which indexed fields are expected from the input + EventInputFields []string `json:"eventInputFields,omitempty"` +} + +func (d *ChainReaderDefinition) MarshalText() ([]byte, error) { + var b bytes.Buffer + e := json.NewEncoder(&b) + e.SetIndent("", " ") + if err := e.Encode((*chainReaderDefinitionFields)(d)); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func (d *ChainReaderDefinition) UnmarshalText(b []byte) error { + return json.Unmarshal(b, (*chainReaderDefinitionFields)(d)) +} + +type ReadType int + +const ( + Method ReadType = iota + Event +) + +func (r ReadType) String() string { + switch r { + case Method: + return "method" + case Event: + return "event" + } + return fmt.Sprintf("ReadType(%d)", r) +} + +func (r ReadType) MarshalText() ([]byte, error) { + return []byte(r.String()), nil +} + +func (r *ReadType) UnmarshalText(text []byte) error { + switch string(text) { + case "method": + *r = Method + return nil + case "event": + *r = Event + return nil + } + return fmt.Errorf("unrecognized ReadType: %s", string(text)) +} + +type RelayConfig struct { + ChainID *big.Big `json:"chainID"` + FromBlock uint64 `json:"fromBlock"` + EffectiveTransmitterID null.String `json:"effectiveTransmitterID"` + ConfigContractAddress *common.Address `json:"configContractAddress"` + ChainReader *ChainReaderConfig `json:"chainReader"` + Codec *CodecConfig `json:"codec"` + + // Contract-specific + SendingKeys pq.StringArray `json:"sendingKeys"` + + // Mercury-specific + FeedID *common.Hash `json:"feedID"` +} + +var ErrBadRelayConfig = errors.New("bad relay config") + +type RelayOpts struct { + // TODO BCF-2508 -- should anyone ever get the raw config bytes that are embedded in args? if not, + // make this private and wrap the arg fields with funcs on RelayOpts + types.RelayArgs + c *RelayConfig +} + +func NewRelayOpts(args types.RelayArgs) *RelayOpts { + return &RelayOpts{ + RelayArgs: args, + c: nil, // lazy initialization + } +} + +func (o *RelayOpts) RelayConfig() (RelayConfig, error) { + var empty RelayConfig + //TODO this should be done once and the error should be cached + if o.c == nil { + var c RelayConfig + err := json.Unmarshal(o.RelayArgs.RelayConfig, &c) + if err != nil { + return empty, fmt.Errorf("%w: failed to deserialize relay config: %w", ErrBadRelayConfig, err) + } + o.c = &c + } + return *o.c, nil +} + +type ConfigPoller interface { + ocrtypes.ContractConfigTracker + + Start() + Close() error + Replay(ctx context.Context, fromBlock int64) error +} + +// TODO(FUN-668): Migrate this fully into types.FunctionsProvider +type FunctionsProvider interface { + types.FunctionsProvider + LogPollerWrapper() LogPollerWrapper +} + +type OracleRequest struct { + RequestId [32]byte + RequestingContract common.Address + RequestInitiator common.Address + SubscriptionId uint64 + SubscriptionOwner common.Address + Data []byte + DataVersion uint16 + Flags [32]byte + CallbackGasLimit uint64 + TxHash common.Hash + CoordinatorContract common.Address + OnchainMetadata []byte +} + +type OracleResponse struct { + RequestId [32]byte +} + +type RouteUpdateSubscriber interface { + UpdateRoutes(activeCoordinator common.Address, proposedCoordinator common.Address) error +} + +// A LogPoller wrapper that understands router proxy contracts +// +//go:generate mockery --quiet --name LogPollerWrapper --output ./mocks/ --case=underscore +type LogPollerWrapper interface { + services.Service + LatestEvents() ([]OracleRequest, []OracleResponse, error) + + // TODO (FUN-668): Remove from the LOOP interface and only use internally within the EVM relayer + SubscribeToUpdates(name string, subscriber RouteUpdateSubscriber) +} diff --git a/core/services/relay/evm/types/types_test.go b/core/services/relay/evm/types/types_test.go new file mode 100644 index 00000000..d2554a2c --- /dev/null +++ b/core/services/relay/evm/types/types_test.go @@ -0,0 +1,41 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +// ChainID *big.Big `json:"chainID"` +// FromBlock uint64 `json:"fromBlock"` + +// // Contract-specific +// EffectiveTransmitterAddress null.String `json:"effectiveTransmitterAddress"` +// SendingKeys pq.StringArray `json:"sendingKeys"` + +// // Mercury-specific +// FeedID *common.Hash `json:"feedID"` +func Test_RelayConfig(t *testing.T) { + cid := testutils.NewRandomEVMChainID() + fromBlock := uint64(2222) + feedID := utils.NewHash() + rawToml := fmt.Sprintf(` +ChainID = "%s" +FromBlock = %d +FeedID = "0x%x" +`, cid, fromBlock, feedID[:]) + + var rc RelayConfig + err := toml.Unmarshal([]byte(rawToml), &rc) + require.NoError(t, err) + + assert.Equal(t, cid.String(), rc.ChainID.String()) + assert.Equal(t, fromBlock, rc.FromBlock) + assert.Equal(t, feedID.Hex(), rc.FeedID.Hex()) +} diff --git a/core/services/relay/grpc_provider_server.go b/core/services/relay/grpc_provider_server.go new file mode 100644 index 00000000..01dae8cd --- /dev/null +++ b/core/services/relay/grpc_provider_server.go @@ -0,0 +1,68 @@ +package relay + +import ( + "context" + "net" + + "go.uber.org/multierr" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type ProviderServer struct { + s *grpc.Server + lis net.Listener + lggr logger.Logger + conns []*grpc.ClientConn +} + +func (p *ProviderServer) Start(ctx context.Context) error { + p.serve() + return nil +} + +func (p *ProviderServer) Close() error { + var err error + for _, c := range p.conns { + err = multierr.Combine(err, c.Close()) + } + p.s.Stop() + return err +} + +func (p *ProviderServer) GetConn() (*grpc.ClientConn, error) { + cc, err := grpc.Dial(p.lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + p.conns = append(p.conns, cc) + return cc, err +} + +// NewProviderServer creates a GRPC server that will wrap a provider, this is a workaround to test the Node API PoC until the EVM relayer is loopifyed +func NewProviderServer(p types.PluginProvider, pType types.OCR2PluginType, lggr logger.Logger) (*ProviderServer, error) { + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + ps := ProviderServer{ + s: grpc.NewServer(), + lis: lis, + lggr: lggr.Named("EVM.ProviderServer"), + } + err = loop.RegisterStandAloneProvider(ps.s, p, pType) + if err != nil { + return nil, err + } + + return &ps, nil +} + +func (p *ProviderServer) serve() { + go func() { + if err := p.s.Serve(p.lis); err != nil { + p.lggr.Errorf("Failed to serve EVM provider server: %v", err) + } + }() +} diff --git a/core/services/relay/grpc_provider_server_test.go b/core/services/relay/grpc_provider_server_test.go new file mode 100644 index 00000000..1d74bd78 --- /dev/null +++ b/core/services/relay/grpc_provider_server_test.go @@ -0,0 +1,28 @@ +package relay + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestProviderServer(t *testing.T) { + r := &mockRelayer{} + sa := NewServerAdapter(r, mockRelayerExt{}) + mp, _ := sa.NewPluginProvider(testutils.Context(t), types.RelayArgs{ProviderType: string(types.Median)}, types.PluginArgs{}) + + lggr := logger.TestLogger(t) + _, err := NewProviderServer(mp, "unsupported-type", lggr) + require.ErrorContains(t, err, "unsupported-type") + + ps, err := NewProviderServer(staticMedianProvider{}, types.Median, lggr) + require.NoError(t, err) + + _, err = ps.GetConn() + require.NoError(t, err) +} diff --git a/core/services/relay/relay.go b/core/services/relay/relay.go new file mode 100644 index 00000000..ac17338c --- /dev/null +++ b/core/services/relay/relay.go @@ -0,0 +1,100 @@ +package relay + +import ( + "context" + "fmt" + "regexp" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" +) + +type Network = string +type ChainID = string + +const ( + EVM = "evm" + Cosmos = "cosmos" + Solana = "solana" + StarkNet = "starknet" +) + +var SupportedRelays = map[Network]struct{}{ + EVM: {}, + Cosmos: {}, + Solana: {}, + StarkNet: {}, +} + +// ID uniquely identifies a relayer by network and chain id +type ID struct { + Network Network + ChainID ChainID +} + +func (i *ID) Name() string { + return fmt.Sprintf("%s.%s", i.Network, i.ChainID) +} + +func (i *ID) String() string { + return i.Name() +} +func NewID(n Network, c ChainID) ID { + return ID{Network: n, ChainID: c} +} + +var idRegex = regexp.MustCompile( + fmt.Sprintf("^((%s)|(%s)|(%s)|(%s))\\.", EVM, Cosmos, Solana, StarkNet), +) + +func (i *ID) UnmarshalString(s string) error { + idxs := idRegex.FindStringIndex(s) + if idxs == nil { + return fmt.Errorf("error unmarshaling Identifier. %q does not match expected pattern", s) + } + // ignore the `.` in the match by dropping last rune + network := s[idxs[0] : idxs[1]-1] + chainID := s[idxs[1]:] + newID := &ID{ChainID: chainID} + for n := range SupportedRelays { + if network == n { + newID.Network = n + break + } + } + if newID.Network == "" { + return fmt.Errorf("error unmarshaling identifier: did not find network in supported list %q", network) + } + i.ChainID = newID.ChainID + i.Network = newID.Network + return nil +} + +// ServerAdapter extends [loop.RelayerAdapter] by overriding NewPluginProvider to dispatches calls according to `RelayArgs.ProviderType`. +// This should only be used to adapt relayers not running via GRPC in a LOOPP. +type ServerAdapter struct { + loop.RelayerAdapter +} + +// NewServerAdapter returns a new ServerAdapter. +func NewServerAdapter(r types.Relayer, e loop.RelayerExt) *ServerAdapter { //nolint:staticcheck + return &ServerAdapter{RelayerAdapter: loop.RelayerAdapter{Relayer: r, RelayerExt: e}} +} + +func (r *ServerAdapter) NewPluginProvider(ctx context.Context, rargs types.RelayArgs, pargs types.PluginArgs) (types.PluginProvider, error) { + switch types.OCR2PluginType(rargs.ProviderType) { + case types.Median: + return r.NewMedianProvider(ctx, rargs, pargs) + case types.Functions: + return r.NewFunctionsProvider(ctx, rargs, pargs) + case types.Mercury: + return r.NewMercuryProvider(ctx, rargs, pargs) + case types.OCR2Keeper: + return r.NewAutomationProvider(ctx, rargs, pargs) + case types.DKG, types.OCR2VRF, types.GenericPlugin: + return r.RelayerAdapter.NewPluginProvider(ctx, rargs, pargs) + case types.LLO, types.CCIPCommit, types.CCIPExecution: + return nil, fmt.Errorf("provider type not supported: %s", rargs.ProviderType) + } + return nil, fmt.Errorf("provider type not recognized: %s", rargs.ProviderType) +} diff --git a/core/services/relay/relay_test.go b/core/services/relay/relay_test.go new file mode 100644 index 00000000..5b1cf222 --- /dev/null +++ b/core/services/relay/relay_test.go @@ -0,0 +1,195 @@ +package relay + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestIdentifier_UnmarshalString(t *testing.T) { + type fields struct { + Network Network + ChainID ChainID + } + type args struct { + s string + } + tests := []struct { + name string + want fields + args args + wantErr bool + }{ + {name: "evm", + args: args{s: "evm.1"}, + wantErr: false, + want: fields{Network: EVM, ChainID: "1"}, + }, + {name: "bad network", + args: args{s: "notANetwork.1"}, + wantErr: true, + }, + {name: "bad pattern", + args: args{s: "evm_1"}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + i := &ID{} + err := i.UnmarshalString(tt.args.s) + if (err != nil) != tt.wantErr { + t.Errorf("Identifier.UnmarshalString() error = %v, wantErr %v", err, tt.wantErr) + } + assert.Equal(t, tt.want.Network, i.Network) + assert.Equal(t, tt.want.ChainID, i.ChainID) + }) + } +} + +func TestNewID(t *testing.T) { + rid := NewID(EVM, "chain id") + assert.Equal(t, EVM, rid.Network) + assert.Equal(t, "chain id", rid.ChainID) +} + +type staticMedianProvider struct { + types.MedianProvider +} + +func (s staticMedianProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return nil +} + +func (s staticMedianProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return nil +} + +func (s staticMedianProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return nil +} + +func (s staticMedianProvider) ReportCodec() median.ReportCodec { + return nil +} + +func (s staticMedianProvider) MedianContract() median.MedianContract { + return nil +} + +func (s staticMedianProvider) OnchainConfigCodec() median.OnchainConfigCodec { + return nil +} + +type staticFunctionsProvider struct { + types.FunctionsProvider +} + +type staticMercuryProvider struct { + types.MercuryProvider +} + +type staticAutomationProvider struct { + types.AutomationProvider +} + +type staticPluginProvider struct { + types.PluginProvider +} + +type mockRelayer struct { + types.Relayer +} + +func (m *mockRelayer) NewMedianProvider(rargs types.RelayArgs, pargs types.PluginArgs) (types.MedianProvider, error) { + return staticMedianProvider{}, nil +} + +func (m *mockRelayer) NewFunctionsProvider(rargs types.RelayArgs, pargs types.PluginArgs) (types.FunctionsProvider, error) { + return staticFunctionsProvider{}, nil +} + +func (m *mockRelayer) NewMercuryProvider(rargs types.RelayArgs, pargs types.PluginArgs) (types.MercuryProvider, error) { + return staticMercuryProvider{}, nil +} + +func (m *mockRelayer) NewAutomationProvider(rargs types.RelayArgs, pargs types.PluginArgs) (types.AutomationProvider, error) { + return staticAutomationProvider{}, nil +} + +func (m *mockRelayer) NewPluginProvider(rargs types.RelayArgs, pargs types.PluginArgs) (types.PluginProvider, error) { + return staticPluginProvider{}, nil +} + +type mockRelayerExt struct { + loop.RelayerExt +} + +func isType[T any](p any) bool { + _, ok := p.(T) + return ok +} + +func TestRelayerServerAdapter(t *testing.T) { + r := &mockRelayer{} + sa := NewServerAdapter(r, mockRelayerExt{}) + + testCases := []struct { + ProviderType string + Test func(p any) bool + Error string + }{ + { + ProviderType: string(types.Median), + Test: isType[types.MedianProvider], + }, + { + ProviderType: string(types.Functions), + Test: isType[types.FunctionsProvider], + }, + { + ProviderType: string(types.Mercury), + Test: isType[types.MercuryProvider], + }, + { + ProviderType: string(types.CCIPCommit), + Error: "provider type not supported", + }, + { + ProviderType: string(types.CCIPExecution), + Error: "provider type not supported", + }, + { + ProviderType: "unknown", + Error: "provider type not recognized", + }, + { + ProviderType: string(types.GenericPlugin), + Test: isType[types.PluginProvider], + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + pp, err := sa.NewPluginProvider( + ctx, + types.RelayArgs{ProviderType: tc.ProviderType}, + types.PluginArgs{}, + ) + + if tc.Error != "" { + assert.ErrorContains(t, err, tc.Error) + } else { + assert.NoError(t, err) + assert.True(t, tc.Test(pp)) + } + } +} diff --git a/core/services/s4/README.md b/core/services/s4/README.md new file mode 100644 index 00000000..cd129035 --- /dev/null +++ b/core/services/s4/README.md @@ -0,0 +1,3 @@ +# S4: Simple Shared Storage Service + +See the corresponding **CLIP** describing the proposal. \ No newline at end of file diff --git a/core/services/s4/address_range.go b/core/services/s4/address_range.go new file mode 100644 index 00000000..57f0a822 --- /dev/null +++ b/core/services/s4/address_range.go @@ -0,0 +1,105 @@ +package s4 + +import ( + "bytes" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// AddressRange represents a range of Ethereum addresses. +type AddressRange struct { + // MinAddress (inclusive). + MinAddress *ubig.Big + // MaxAddress (inclusive). + MaxAddress *ubig.Big +} + +var ( + ErrInvalidIntervals = errors.New("invalid intervals value") + MinAddress = ubig.New(common.BytesToAddress(bytes.Repeat([]byte{0x00}, common.AddressLength)).Big()) + MaxAddress = ubig.New(common.BytesToAddress(bytes.Repeat([]byte{0xff}, common.AddressLength)).Big()) +) + +// NewFullAddressRange creates AddressRange for all address space: 0x00..-0xFF.. +func NewFullAddressRange() *AddressRange { + return &AddressRange{ + MinAddress: MinAddress, + MaxAddress: MaxAddress, + } +} + +// NewSingleAddressRange creates AddressRange for a single address. +func NewSingleAddressRange(address *ubig.Big) (*AddressRange, error) { + if address == nil || address.Cmp(MinAddress) < 0 || address.Cmp(MaxAddress) > 0 { + return nil, errors.New("invalid address") + } + return &AddressRange{ + MinAddress: address, + MaxAddress: address, + }, nil +} + +// NewInitialAddressRangeForIntervals splits the full address space with intervals, +// and returns a range for the first interval. +// Number of intervals must be > 0 and a power of 2. +func NewInitialAddressRangeForIntervals(intervals uint) (*AddressRange, error) { + if intervals == 0 || (intervals&(intervals-1) != 0) { + return nil, ErrInvalidIntervals + } + + if intervals == 1 { + return NewFullAddressRange(), nil + } + + divisor := big.NewInt(int64(intervals)) + maxPlusOne := MaxAddress.Add(ubig.NewI(1)) + interval := ubig.New(new(big.Int).Div(maxPlusOne.ToInt(), divisor)) + + return &AddressRange{ + MinAddress: MinAddress, + MaxAddress: MinAddress.Add(interval).Sub(ubig.NewI(1)), + }, nil +} + +// Advances the AddressRange by r.Interval. Has no effect for NewFullAddressRange(). +// When it reaches the end of the address space, it resets to the initial state, +// returned by NewAddressRangeForFirstInterval(). +func (r *AddressRange) Advance() { + if r == nil { + return + } + + interval := r.Interval() + + r.MinAddress = r.MinAddress.Add(interval) + r.MaxAddress = r.MaxAddress.Add(interval) + + if r.MinAddress.Cmp(MaxAddress) >= 0 { + r.MinAddress = MinAddress + r.MaxAddress = MinAddress.Add(interval).Sub(ubig.NewI(1)) + } + + if r.MaxAddress.Cmp(MaxAddress) > 0 { + r.MaxAddress = MaxAddress + } +} + +// Contains returns true if the given address belongs to the range. +func (r *AddressRange) Contains(address *ubig.Big) bool { + if r == nil { + return false + } + return r.MinAddress.Cmp(address) <= 0 && r.MaxAddress.Cmp(address) >= 0 +} + +// Interval returns the interval between max and min address plus one. +func (r *AddressRange) Interval() *ubig.Big { + if r == nil { + return nil + } + return r.MaxAddress.Sub(r.MinAddress).Add(ubig.NewI(1)) +} diff --git a/core/services/s4/address_range_test.go b/core/services/s4/address_range_test.go new file mode 100644 index 00000000..1276e8a3 --- /dev/null +++ b/core/services/s4/address_range_test.go @@ -0,0 +1,103 @@ +package s4_test + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/stretchr/testify/assert" +) + +func TestAddressRange_NewFullAddressRange(t *testing.T) { + t.Parallel() + + full := s4.NewFullAddressRange() + assert.Equal(t, s4.MinAddress, full.MinAddress) + assert.Equal(t, s4.MaxAddress, full.MaxAddress) + + t.Run("advance has no effect", func(t *testing.T) { + full.Advance() + assert.Equal(t, s4.MinAddress, full.MinAddress) + assert.Equal(t, s4.MaxAddress, full.MaxAddress) + }) +} + +func TestAddressRange_NewSingleAddressRange(t *testing.T) { + t.Parallel() + + addr := big.NewI(0x123) + sar, err := s4.NewSingleAddressRange(addr) + assert.NoError(t, err) + assert.Equal(t, addr, sar.MinAddress) + assert.Equal(t, addr, sar.MaxAddress) + assert.True(t, sar.Contains(addr)) + assert.Equal(t, int64(1), sar.Interval().Int64()) + + sar.Advance() + assert.False(t, sar.Contains(addr)) +} + +func TestAddressRange_NewInitialAddressRangeForIntervals(t *testing.T) { + t.Parallel() + + t.Run("invalid intervals", func(t *testing.T) { + _, err := s4.NewInitialAddressRangeForIntervals(0) + assert.ErrorIs(t, err, s4.ErrInvalidIntervals) + + _, err = s4.NewInitialAddressRangeForIntervals(3) + assert.ErrorIs(t, err, s4.ErrInvalidIntervals) + }) + + t.Run("full range for one interval", func(t *testing.T) { + r, err := s4.NewInitialAddressRangeForIntervals(1) + assert.NoError(t, err) + assert.Equal(t, s4.NewFullAddressRange(), r) + }) + + t.Run("initial range for 256 intervals", func(t *testing.T) { + r, err := s4.NewInitialAddressRangeForIntervals(256) + assert.NoError(t, err) + assert.Equal(t, "0x0", r.MinAddress.Hex()) + assert.Equal(t, "0xffffffffffffffffffffffffffffffffffffff", r.MaxAddress.Hex()) + }) + + t.Run("advance for 256 intervals", func(t *testing.T) { + r, err := s4.NewInitialAddressRangeForIntervals(256) + assert.NoError(t, err) + + r.Advance() + assert.Equal(t, "0x100000000000000000000000000000000000000", r.MinAddress.Hex()) + assert.Equal(t, "0x1ffffffffffffffffffffffffffffffffffffff", r.MaxAddress.Hex()) + + r.Advance() + assert.Equal(t, "0x200000000000000000000000000000000000000", r.MinAddress.Hex()) + assert.Equal(t, "0x2ffffffffffffffffffffffffffffffffffffff", r.MaxAddress.Hex()) + + for i := 0; i < 253; i++ { + r.Advance() + } + assert.Equal(t, "0xff00000000000000000000000000000000000000", r.MinAddress.Hex()) + assert.Equal(t, "0xffffffffffffffffffffffffffffffffffffffff", r.MaxAddress.Hex()) + + // initial + r.Advance() + assert.Equal(t, s4.MinAddress, r.MinAddress) + assert.Equal(t, "0xffffffffffffffffffffffffffffffffffffff", r.MaxAddress.Hex()) + }) +} + +func TestAddressRange_Contains(t *testing.T) { + t.Parallel() + + r, err := s4.NewInitialAddressRangeForIntervals(256) + assert.NoError(t, err) + assert.True(t, r.Contains(r.MinAddress)) + assert.True(t, r.Contains(r.MaxAddress)) + assert.False(t, r.Contains(r.MaxAddress.Add(big.NewI(1)))) + + r.Advance() + assert.True(t, r.Contains(r.MinAddress)) + assert.True(t, r.Contains(r.MaxAddress)) + assert.False(t, r.Contains(r.MinAddress.Sub(big.NewI(1)))) +} diff --git a/core/services/s4/envelope.go b/core/services/s4/envelope.go new file mode 100644 index 00000000..fc332103 --- /dev/null +++ b/core/services/s4/envelope.go @@ -0,0 +1,77 @@ +package s4 + +import ( + "crypto/ecdsa" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// Envelope represents a JSON object that is signed for address verification. +// All []byte values are encoded as base64 (default JSON behavior). +// Hex is not used to avoid confusion due to case-sensivity and 0x prefix. +// A signer is responsible for generating a JSON that has no whitespace and +// the keys appear in this exact order: +// {"address":base64,"slotid":int,"payload":base64,"version":int,"expiration":int} +type Envelope struct { + Address []byte `json:"address"` + SlotID uint `json:"slotid"` + Payload []byte `json:"payload"` + Version uint64 `json:"version"` + Expiration int64 `json:"expiration"` +} + +func NewEnvelopeFromRecord(key *Key, record *Record) *Envelope { + return &Envelope{ + Address: key.Address.Bytes(), + SlotID: key.SlotId, + Payload: record.Payload, + Version: key.Version, + Expiration: record.Expiration, + } +} + +// Sign calculates signature for the serialized envelope data. +func (e Envelope) Sign(privateKey *ecdsa.PrivateKey) (signature []byte, err error) { + if len(e.Address) != common.AddressLength { + return nil, fmt.Errorf("invalid address length: %d", len(e.Address)) + } + js, err := e.ToJson() + if err != nil { + return nil, err + } + return utils.GenerateEthSignature(privateKey, js) +} + +// GetSignerAddress verifies the signature and returns the signing address. +func (e Envelope) GetSignerAddress(signature []byte) (address common.Address, err error) { + if len(e.Address) != common.AddressLength { + return common.Address{}, fmt.Errorf("invalid address length: %d", len(e.Address)) + } + js, err := e.ToJson() + if err != nil { + return common.Address{}, err + } + return utils.GetSignersEthAddress(js, signature) +} + +func (e Envelope) ToJson() ([]byte, error) { + address, err := json.Marshal(e.Address) + if err != nil { + return nil, err + } + nonNilPayload := e.Payload + if nonNilPayload == nil { + // prevent unwanted "null" values in JSON representation + nonNilPayload = []byte{} + } + payload, err := json.Marshal(nonNilPayload) + if err != nil { + return nil, err + } + js := fmt.Sprintf(`{"address":%s,"slotid":%d,"payload":%s,"version":%d,"expiration":%d}`, address, e.SlotID, payload, e.Version, e.Expiration) + return []byte(js), nil +} diff --git a/core/services/s4/envelope_test.go b/core/services/s4/envelope_test.go new file mode 100644 index 00000000..c386820e --- /dev/null +++ b/core/services/s4/envelope_test.go @@ -0,0 +1,61 @@ +package s4_test + +import ( + "crypto/ecdsa" + "encoding/json" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" +) + +func TestEnvelope(t *testing.T) { + t.Parallel() + + payload := testutils.Random32Byte() + expiration := time.Now().Add(time.Hour).UnixMilli() + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: 3, + Version: 5, + } + env := s4.NewEnvelopeFromRecord(key, &s4.Record{ + Payload: payload[:], + Expiration: expiration, + }) + + t.Run("signing", func(t *testing.T) { + privateKey, err := crypto.GenerateKey() + assert.NoError(t, err) + + sig, err := env.Sign(privateKey) + assert.NoError(t, err) + + addr, err := env.GetSignerAddress(sig) + assert.NoError(t, err) + + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + assert.True(t, ok) + assert.Equal(t, crypto.PubkeyToAddress(*publicKeyECDSA), addr) + }) + + t.Run("json", func(t *testing.T) { + js, err := env.ToJson() + assert.NoError(t, err) + + var decoded s4.Envelope + err = json.Unmarshal(js, &decoded) + assert.NoError(t, err) + + js2, err := decoded.ToJson() + assert.NoError(t, err) + assert.Equal(t, js, js2) + + assert.Equal(t, *env, decoded) + }) +} diff --git a/core/services/s4/errors.go b/core/services/s4/errors.go new file mode 100644 index 00000000..aa447f88 --- /dev/null +++ b/core/services/s4/errors.go @@ -0,0 +1,13 @@ +package s4 + +import "errors" + +var ( + ErrNotFound = errors.New("not found") + ErrWrongSignature = errors.New("wrong signature") + ErrSlotIdTooBig = errors.New("slot id is too big") + ErrPayloadTooBig = errors.New("payload is too big") + ErrPastExpiration = errors.New("past expiration") + ErrVersionTooLow = errors.New("version too low") + ErrExpirationTooLong = errors.New("expiration too long") +) diff --git a/core/services/s4/in_memory_orm.go b/core/services/s4/in_memory_orm.go new file mode 100644 index 00000000..aeb5dd8c --- /dev/null +++ b/core/services/s4/in_memory_orm.go @@ -0,0 +1,144 @@ +package s4 + +import ( + "sort" + "sync" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type key struct { + address string + slot uint +} + +type mrow struct { + Row *Row + UpdatedAt time.Time +} + +type inMemoryOrm struct { + rows map[key]*mrow + mu sync.RWMutex +} + +var _ ORM = (*inMemoryOrm)(nil) + +func NewInMemoryORM() ORM { + return &inMemoryOrm{ + rows: make(map[key]*mrow), + } +} + +func (o *inMemoryOrm) Get(address *big.Big, slotId uint, qopts ...pg.QOpt) (*Row, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + mkey := key{ + address: address.Hex(), + slot: slotId, + } + mrow, ok := o.rows[mkey] + if !ok { + return nil, ErrNotFound + } + return mrow.Row.Clone(), nil +} + +func (o *inMemoryOrm) Update(row *Row, qopts ...pg.QOpt) error { + o.mu.Lock() + defer o.mu.Unlock() + + mkey := key{ + address: row.Address.Hex(), + slot: row.SlotId, + } + existing, ok := o.rows[mkey] + versionOk := false + if ok && row.Confirmed { + versionOk = existing.Row.Version <= row.Version + } + if ok && !row.Confirmed { + versionOk = existing.Row.Version < row.Version + } + if ok && !versionOk { + return ErrVersionTooLow + } + + o.rows[mkey] = &mrow{ + Row: row.Clone(), + UpdatedAt: time.Now().UTC(), + } + return nil +} + +func (o *inMemoryOrm) DeleteExpired(limit uint, now time.Time, qopts ...pg.QOpt) (int64, error) { + o.mu.Lock() + defer o.mu.Unlock() + + queue := make([]key, 0) + for k, v := range o.rows { + if v.Row.Expiration < now.UnixMilli() { + queue = append(queue, k) + if len(queue) >= int(limit) { + break + } + } + } + for _, k := range queue { + delete(o.rows, k) + } + + return int64(len(queue)), nil +} + +func (o *inMemoryOrm) GetSnapshot(addressRange *AddressRange, qopts ...pg.QOpt) ([]*SnapshotRow, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + now := time.Now().UnixMilli() + var rows []*SnapshotRow + for _, mrow := range o.rows { + if mrow.Row.Expiration > now { + rows = append(rows, &SnapshotRow{ + Address: big.New(mrow.Row.Address.ToInt()), + SlotId: mrow.Row.SlotId, + Version: mrow.Row.Version, + Expiration: mrow.Row.Expiration, + Confirmed: mrow.Row.Confirmed, + }) + } + } + + return rows, nil +} + +func (o *inMemoryOrm) GetUnconfirmedRows(limit uint, qopts ...pg.QOpt) ([]*Row, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + now := time.Now().UnixMilli() + var mrows []*mrow + for _, mrow := range o.rows { + if mrow.Row.Expiration > now && !mrow.Row.Confirmed { + mrows = append(mrows, mrow) + } + } + + sort.Slice(mrows, func(i, j int) bool { + return mrows[i].UpdatedAt.Before(mrows[j].UpdatedAt) + }) + + if uint(len(mrows)) > limit { + mrows = mrows[:limit] + } + + rows := make([]*Row, len(mrows)) + for i, mrow := range mrows { + rows[i] = mrow.Row.Clone() + } + + return rows, nil +} diff --git a/core/services/s4/in_memory_orm_test.go b/core/services/s4/in_memory_orm_test.go new file mode 100644 index 00000000..0d46f8c7 --- /dev/null +++ b/core/services/s4/in_memory_orm_test.go @@ -0,0 +1,166 @@ +package s4_test + +import ( + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +func TestInMemoryORM(t *testing.T) { + t.Parallel() + + address := testutils.NewAddress() + var slotId uint = 3 + payload := testutils.Random32Byte() + signature := testutils.Random32Byte() + expiration := time.Now().Add(time.Minute).UnixMilli() + row := &s4.Row{ + Address: big.New(address.Big()), + SlotId: slotId, + Payload: payload[:], + Version: 3, + Expiration: expiration, + Confirmed: false, + Signature: signature[:], + } + + orm := s4.NewInMemoryORM() + + t.Run("row not found", func(t *testing.T) { + _, err := orm.Get(big.New(address.Big()), slotId) + assert.ErrorIs(t, err, s4.ErrNotFound) + }) + + t.Run("insert and get", func(t *testing.T) { + err := orm.Update(row) + assert.NoError(t, err) + + e, err := orm.Get(big.New(address.Big()), slotId) + assert.NoError(t, err) + assert.Equal(t, row, e) + }) + + t.Run("update and get", func(t *testing.T) { + row.Version = 5 + err := orm.Update(row) + assert.NoError(t, err) + + // unconfirmed row requires greater version + err = orm.Update(row) + assert.ErrorIs(t, err, s4.ErrVersionTooLow) + + row.Confirmed = true + err = orm.Update(row) + assert.NoError(t, err) + + e, err := orm.Get(big.New(address.Big()), slotId) + assert.NoError(t, err) + assert.Equal(t, row, e) + }) +} + +func TestInMemoryORM_DeleteExpired(t *testing.T) { + t.Parallel() + + orm := s4.NewInMemoryORM() + baseTime := time.Now().Add(time.Minute).UTC() + + for i := 0; i < 256; i++ { + var thisAddress common.Address + thisAddress[0] = byte(i) + + row := &s4.Row{ + Address: big.New(thisAddress.Big()), + SlotId: 1, + Payload: []byte{}, + Version: 1, + Expiration: baseTime.Add(time.Duration(i) * time.Second).UnixMilli(), + Confirmed: false, + Signature: []byte{}, + } + err := orm.Update(row) + assert.NoError(t, err) + } + + deadline := baseTime.Add(100 * time.Second) + count, err := orm.DeleteExpired(200, deadline) + assert.NoError(t, err) + assert.Equal(t, int64(100), count) + + rows, err := orm.GetUnconfirmedRows(200) + assert.NoError(t, err) + assert.Len(t, rows, 156) +} + +func TestInMemoryORM_GetUnconfirmedRows(t *testing.T) { + t.Parallel() + + orm := s4.NewInMemoryORM() + expiration := time.Now().Add(100 * time.Second).UnixMilli() + + for i := 0; i < 256; i++ { + var thisAddress common.Address + thisAddress[0] = byte(i) + + row := &s4.Row{ + Address: big.New(thisAddress.Big()), + SlotId: 1, + Payload: []byte{}, + Version: 1, + Expiration: expiration, + Confirmed: i >= 100, + Signature: []byte{}, + } + err := orm.Update(row) + assert.NoError(t, err) + time.Sleep(time.Millisecond) + } + + rows, err := orm.GetUnconfirmedRows(100) + assert.NoError(t, err) + assert.Len(t, rows, 100) +} + +func TestInMemoryORM_GetSnapshot(t *testing.T) { + t.Parallel() + + orm := s4.NewInMemoryORM() + expiration := time.Now().Add(100 * time.Second).UnixMilli() + + const n = 256 + for i := 0; i < n; i++ { + var thisAddress common.Address + thisAddress[0] = byte(i) + + row := &s4.Row{ + Address: big.New(thisAddress.Big()), + SlotId: 1, + Payload: []byte{}, + Version: uint64(i), + Expiration: expiration, + Confirmed: i >= 100, + Signature: []byte{}, + } + err := orm.Update(row) + assert.NoError(t, err) + } + + rows, err := orm.GetSnapshot(s4.NewFullAddressRange()) + assert.NoError(t, err) + assert.Len(t, rows, n) + + testMap := make(map[uint64]int) + for i := 0; i < n; i++ { + testMap[rows[i].Version]++ + } + assert.Len(t, testMap, n) + for _, c := range testMap { + assert.Equal(t, 1, c) + } +} diff --git a/core/services/s4/mocks/orm.go b/core/services/s4/mocks/orm.go new file mode 100644 index 00000000..8c85d271 --- /dev/null +++ b/core/services/s4/mocks/orm.go @@ -0,0 +1,204 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + mock "github.com/stretchr/testify/mock" + + pg "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + s4 "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + time "time" +) + +// ORM is an autogenerated mock type for the ORM type +type ORM struct { + mock.Mock +} + +// DeleteExpired provides a mock function with given fields: limit, utcNow, qopts +func (_m *ORM) DeleteExpired(limit uint, utcNow time.Time, qopts ...pg.QOpt) (int64, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, limit, utcNow) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteExpired") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(uint, time.Time, ...pg.QOpt) (int64, error)); ok { + return rf(limit, utcNow, qopts...) + } + if rf, ok := ret.Get(0).(func(uint, time.Time, ...pg.QOpt) int64); ok { + r0 = rf(limit, utcNow, qopts...) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(uint, time.Time, ...pg.QOpt) error); ok { + r1 = rf(limit, utcNow, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: address, slotId, qopts +func (_m *ORM) Get(address *big.Big, slotId uint, qopts ...pg.QOpt) (*s4.Row, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, address, slotId) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *s4.Row + var r1 error + if rf, ok := ret.Get(0).(func(*big.Big, uint, ...pg.QOpt) (*s4.Row, error)); ok { + return rf(address, slotId, qopts...) + } + if rf, ok := ret.Get(0).(func(*big.Big, uint, ...pg.QOpt) *s4.Row); ok { + r0 = rf(address, slotId, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*s4.Row) + } + } + + if rf, ok := ret.Get(1).(func(*big.Big, uint, ...pg.QOpt) error); ok { + r1 = rf(address, slotId, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSnapshot provides a mock function with given fields: addressRange, qopts +func (_m *ORM) GetSnapshot(addressRange *s4.AddressRange, qopts ...pg.QOpt) ([]*s4.SnapshotRow, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, addressRange) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetSnapshot") + } + + var r0 []*s4.SnapshotRow + var r1 error + if rf, ok := ret.Get(0).(func(*s4.AddressRange, ...pg.QOpt) ([]*s4.SnapshotRow, error)); ok { + return rf(addressRange, qopts...) + } + if rf, ok := ret.Get(0).(func(*s4.AddressRange, ...pg.QOpt) []*s4.SnapshotRow); ok { + r0 = rf(addressRange, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*s4.SnapshotRow) + } + } + + if rf, ok := ret.Get(1).(func(*s4.AddressRange, ...pg.QOpt) error); ok { + r1 = rf(addressRange, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUnconfirmedRows provides a mock function with given fields: limit, qopts +func (_m *ORM) GetUnconfirmedRows(limit uint, qopts ...pg.QOpt) ([]*s4.Row, error) { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, limit) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetUnconfirmedRows") + } + + var r0 []*s4.Row + var r1 error + if rf, ok := ret.Get(0).(func(uint, ...pg.QOpt) ([]*s4.Row, error)); ok { + return rf(limit, qopts...) + } + if rf, ok := ret.Get(0).(func(uint, ...pg.QOpt) []*s4.Row); ok { + r0 = rf(limit, qopts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*s4.Row) + } + } + + if rf, ok := ret.Get(1).(func(uint, ...pg.QOpt) error); ok { + r1 = rf(limit, qopts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Update provides a mock function with given fields: row, qopts +func (_m *ORM) Update(row *s4.Row, qopts ...pg.QOpt) error { + _va := make([]interface{}, len(qopts)) + for _i := range qopts { + _va[_i] = qopts[_i] + } + var _ca []interface{} + _ca = append(_ca, row) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*s4.Row, ...pg.QOpt) error); ok { + r0 = rf(row, qopts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewORM creates a new instance of ORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewORM(t interface { + mock.TestingT + Cleanup(func()) +}) *ORM { + mock := &ORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/s4/mocks/storage.go b/core/services/s4/mocks/storage.go new file mode 100644 index 00000000..1fa07045 --- /dev/null +++ b/core/services/s4/mocks/storage.go @@ -0,0 +1,137 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + s4 "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +// Storage is an autogenerated mock type for the Storage type +type Storage struct { + mock.Mock +} + +// Constraints provides a mock function with given fields: +func (_m *Storage) Constraints() s4.Constraints { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Constraints") + } + + var r0 s4.Constraints + if rf, ok := ret.Get(0).(func() s4.Constraints); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(s4.Constraints) + } + + return r0 +} + +// Get provides a mock function with given fields: ctx, key +func (_m *Storage) Get(ctx context.Context, key *s4.Key) (*s4.Record, *s4.Metadata, error) { + ret := _m.Called(ctx, key) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *s4.Record + var r1 *s4.Metadata + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, *s4.Key) (*s4.Record, *s4.Metadata, error)); ok { + return rf(ctx, key) + } + if rf, ok := ret.Get(0).(func(context.Context, *s4.Key) *s4.Record); ok { + r0 = rf(ctx, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*s4.Record) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *s4.Key) *s4.Metadata); ok { + r1 = rf(ctx, key) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*s4.Metadata) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, *s4.Key) error); ok { + r2 = rf(ctx, key) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// List provides a mock function with given fields: ctx, address +func (_m *Storage) List(ctx context.Context, address common.Address) ([]*s4.SnapshotRow, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 []*s4.SnapshotRow + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]*s4.SnapshotRow, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []*s4.SnapshotRow); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*s4.SnapshotRow) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Put provides a mock function with given fields: ctx, key, record, signature +func (_m *Storage) Put(ctx context.Context, key *s4.Key, record *s4.Record, signature []byte) error { + ret := _m.Called(ctx, key, record, signature) + + if len(ret) == 0 { + panic("no return value specified for Put") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *s4.Key, *s4.Record, []byte) error); ok { + r0 = rf(ctx, key, record, signature) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStorage(t interface { + mock.TestingT + Cleanup(func()) +}) *Storage { + mock := &Storage{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/s4/orm.go b/core/services/s4/orm.go new file mode 100644 index 00000000..e13c03f8 --- /dev/null +++ b/core/services/s4/orm.go @@ -0,0 +1,74 @@ +package s4 + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// Row represents a data row persisted by ORM. +type Row struct { + Address *big.Big + SlotId uint + Payload []byte + Version uint64 + Expiration int64 + Confirmed bool + Signature []byte +} + +// SnapshotRow(s) are returned by GetSnapshot function. +type SnapshotRow struct { + Address *big.Big + SlotId uint + Version uint64 + Expiration int64 + Confirmed bool + PayloadSize uint64 +} + +//go:generate mockery --quiet --name ORM --output ./mocks/ --case=underscore + +// ORM represents S4 persistence layer. +// All functions are thread-safe. +type ORM interface { + // Get reads a row for the given address and slotId combination. + // If such row does not exist, ErrNotFound is returned. + // There is no filter on Expiration. + Get(address *big.Big, slotId uint, qopts ...pg.QOpt) (*Row, error) + + // Update inserts or updates the row identified by (Address, SlotId) pair. + // When updating, the new row must have greater or equal version, + // otherwise ErrVersionTooLow is returned. + // UpdatedAt field value is ignored. + Update(row *Row, qopts ...pg.QOpt) error + + // DeleteExpired deletes any entries having Expiration < utcNow, + // up to the given limit. + // Returns the number of deleted rows. + DeleteExpired(limit uint, utcNow time.Time, qopts ...pg.QOpt) (int64, error) + + // GetSnapshot selects all non-expired row versions for the given addresses range. + // For the full address range, use NewFullAddressRange(). + GetSnapshot(addressRange *AddressRange, qopts ...pg.QOpt) ([]*SnapshotRow, error) + + // GetUnconfirmedRows selects all non-expired, non-confirmed rows ordered by UpdatedAt. + // The number of returned rows is limited to the given limit. + GetUnconfirmedRows(limit uint, qopts ...pg.QOpt) ([]*Row, error) +} + +func (r Row) Clone() *Row { + clone := Row{ + Address: big.New(r.Address.ToInt()), + SlotId: r.SlotId, + Payload: make([]byte, len(r.Payload)), + Version: r.Version, + Expiration: r.Expiration, + Confirmed: r.Confirmed, + Signature: make([]byte, len(r.Signature)), + } + copy(clone.Payload, r.Payload) + copy(clone.Signature, r.Signature) + return &clone +} diff --git a/core/services/s4/postgres_orm.go b/core/services/s4/postgres_orm.go new file mode 100644 index 00000000..6c70c4f1 --- /dev/null +++ b/core/services/s4/postgres_orm.go @@ -0,0 +1,114 @@ +package s4 + +import ( + "database/sql" + "fmt" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +const ( + SharedTableName = "shared" + s4PostgresSchema = "s4" +) + +type orm struct { + q pg.Q + tableName string + namespace string +} + +var _ ORM = (*orm)(nil) + +func NewPostgresORM(db *sqlx.DB, lggr logger.Logger, cfg pg.QConfig, tableName, namespace string) ORM { + return &orm{ + q: pg.NewQ(db, lggr, cfg), + tableName: fmt.Sprintf(`"%s".%s`, s4PostgresSchema, tableName), + namespace: namespace, + } +} + +func (o orm) Get(address *big.Big, slotId uint, qopts ...pg.QOpt) (*Row, error) { + row := &Row{} + q := o.q.WithOpts(qopts...) + + stmt := fmt.Sprintf(`SELECT address, slot_id, version, expiration, confirmed, payload, signature FROM %s +WHERE namespace=$1 AND address=$2 AND slot_id=$3;`, o.tableName) + if err := q.Get(row, stmt, o.namespace, address, slotId); err != nil { + if errors.Is(err, sql.ErrNoRows) { + err = ErrNotFound + } + return nil, err + } + return row, nil +} + +func (o orm) Update(row *Row, qopts ...pg.QOpt) error { + q := o.q.WithOpts(qopts...) + + // This query inserts or updates a row, depending on whether the version is higher than the existing one. + // We only allow the same version when the row is confirmed. + // We never transition back from unconfirmed to confirmed state. + stmt := fmt.Sprintf(`INSERT INTO %s as t (namespace, address, slot_id, version, expiration, confirmed, payload, signature, updated_at) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW()) +ON CONFLICT (namespace, address, slot_id) +DO UPDATE SET version = EXCLUDED.version, +expiration = EXCLUDED.expiration, +confirmed = EXCLUDED.confirmed, +payload = EXCLUDED.payload, +signature = EXCLUDED.signature, +updated_at = NOW() +WHERE (t.version < EXCLUDED.version) OR (t.version <= EXCLUDED.version AND EXCLUDED.confirmed IS TRUE) +RETURNING id;`, o.tableName) + var id uint64 + err := q.Get(&id, stmt, o.namespace, row.Address, row.SlotId, row.Version, row.Expiration, row.Confirmed, row.Payload, row.Signature) + if errors.Is(err, sql.ErrNoRows) { + return ErrVersionTooLow + } + return err +} + +func (o orm) DeleteExpired(limit uint, utcNow time.Time, qopts ...pg.QOpt) (int64, error) { + q := o.q.WithOpts(qopts...) + + with := fmt.Sprintf(`WITH rows AS (SELECT id FROM %s WHERE namespace = $1 AND expiration < $2 LIMIT $3)`, o.tableName) + stmt := fmt.Sprintf(`%s DELETE FROM %s WHERE id IN (SELECT id FROM rows);`, with, o.tableName) + result, err := q.Exec(stmt, o.namespace, utcNow.UnixMilli(), limit) + if err != nil { + return 0, err + } + return result.RowsAffected() +} + +func (o orm) GetSnapshot(addressRange *AddressRange, qopts ...pg.QOpt) ([]*SnapshotRow, error) { + q := o.q.WithOpts(qopts...) + rows := make([]*SnapshotRow, 0) + + stmt := fmt.Sprintf(`SELECT address, slot_id, version, expiration, confirmed, octet_length(payload) AS payload_size FROM %s WHERE namespace = $1 AND address >= $2 AND address <= $3;`, o.tableName) + if err := q.Select(&rows, stmt, o.namespace, addressRange.MinAddress, addressRange.MaxAddress); err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + return rows, nil +} + +func (o orm) GetUnconfirmedRows(limit uint, qopts ...pg.QOpt) ([]*Row, error) { + q := o.q.WithOpts(qopts...) + rows := make([]*Row, 0) + + stmt := fmt.Sprintf(`SELECT address, slot_id, version, expiration, confirmed, payload, signature FROM %s +WHERE namespace = $1 AND confirmed IS FALSE ORDER BY updated_at LIMIT $2;`, o.tableName) + if err := q.Select(&rows, stmt, o.namespace, limit); err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + return rows, nil +} diff --git a/core/services/s4/postgres_orm_test.go b/core/services/s4/postgres_orm_test.go new file mode 100644 index 00000000..8e954b6a --- /dev/null +++ b/core/services/s4/postgres_orm_test.go @@ -0,0 +1,280 @@ +package s4_test + +import ( + "errors" + "math" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + + "github.com/stretchr/testify/assert" +) + +func setupORM(t *testing.T, namespace string) s4.ORM { + t.Helper() + + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + orm := s4.NewPostgresORM(db, lggr, pgtest.NewQConfig(true), s4.SharedTableName, namespace) + + t.Cleanup(func() { + assert.NoError(t, db.Close()) + }) + + return orm +} + +func generateTestRows(t *testing.T, n int) []*s4.Row { + t.Helper() + + rows := make([]*s4.Row, n) + for i := 0; i < n; i++ { + row := &s4.Row{ + Address: big.New(testutils.NewAddress().Big()), + SlotId: 1, + Payload: cltest.MustRandomBytes(t, 32), + Version: 1 + uint64(i), + Expiration: time.Now().Add(time.Hour).UnixMilli(), + Confirmed: i%2 == 0, + Signature: cltest.MustRandomBytes(t, 32), + } + rows[i] = row + } + + return rows +} + +func TestNewPostgresOrm(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + assert.NotNil(t, orm) +} + +func TestPostgresORM_UpdateAndGet(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + rows := generateTestRows(t, 10) + + for _, row := range rows { + err := orm.Update(row) + assert.NoError(t, err) + + row.Version++ + err = orm.Update(row) + assert.NoError(t, err) + + err = orm.Update(row) + if !row.Confirmed { + assert.ErrorIs(t, err, s4.ErrVersionTooLow) + } + } + + for _, row := range rows { + gotRow, err := orm.Get(row.Address, row.SlotId) + assert.NoError(t, err) + assert.Equal(t, row, gotRow) + } + + rows = generateTestRows(t, 1) + _, err := orm.Get(rows[0].Address, rows[0].SlotId) + assert.ErrorIs(t, err, s4.ErrNotFound) +} + +func TestPostgresORM_UpdateSimpleFlow(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + row := generateTestRows(t, 1)[0] + + // user sends a new version + assert.NoError(t, orm.Update(row)) + + // OCR round confirms it + row.Confirmed = true + assert.NoError(t, orm.Update(row)) + + // user sends a higher version (unconfirmed) + row.Version++ + row.Confirmed = false + assert.NoError(t, orm.Update(row)) + + // and again, before OCR has a chance to confirm + row.Version++ + assert.NoError(t, orm.Update(row)) + + // user tries to send a lower version + row.Version-- + assert.Error(t, orm.Update(row)) +} + +func TestPostgresORM_DeleteExpired(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + + const total = 10 + const expired = 4 + rows := generateTestRows(t, total) + + for _, row := range rows { + err := orm.Update(row) + assert.NoError(t, err) + } + + deleted, err := orm.DeleteExpired(expired, time.Now().Add(2*time.Hour).UTC()) + assert.NoError(t, err) + assert.Equal(t, int64(expired), deleted) + + count := 0 + for _, row := range rows { + _, err := orm.Get(row.Address, row.SlotId) + if !errors.Is(err, s4.ErrNotFound) { + count++ + } + } + assert.Equal(t, total-expired, count) +} + +func TestPostgresORM_GetSnapshot(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + + t.Run("no rows", func(t *testing.T) { + rows, err := orm.GetSnapshot(s4.NewFullAddressRange()) + assert.NoError(t, err) + assert.Empty(t, rows) + }) + + t.Run("with rows", func(t *testing.T) { + rows := generateTestRows(t, 100) + + for _, row := range rows { + err := orm.Update(row) + assert.NoError(t, err) + } + + t.Run("full range", func(t *testing.T) { + snapshot, err := orm.GetSnapshot(s4.NewFullAddressRange()) + assert.NoError(t, err) + assert.Equal(t, len(rows), len(snapshot)) + + snapshotRowMap := make(map[string]*s4.SnapshotRow) + for i, sr := range snapshot { + // assuming unique addresses + snapshotRowMap[sr.Address.String()] = snapshot[i] + } + + for _, sr := range rows { + snapshotRow, ok := snapshotRowMap[sr.Address.String()] + assert.True(t, ok) + assert.Equal(t, snapshotRow.Address, sr.Address) + assert.Equal(t, snapshotRow.SlotId, sr.SlotId) + assert.Equal(t, snapshotRow.Version, sr.Version) + assert.Equal(t, snapshotRow.Expiration, sr.Expiration) + assert.Equal(t, snapshotRow.Confirmed, sr.Confirmed) + assert.Equal(t, snapshotRow.PayloadSize, uint64(len(sr.Payload))) + } + }) + + t.Run("half range", func(t *testing.T) { + ar, err := s4.NewInitialAddressRangeForIntervals(2) + assert.NoError(t, err) + snapshot, err := orm.GetSnapshot(ar) + assert.NoError(t, err) + for _, sr := range snapshot { + assert.True(t, ar.Contains(sr.Address)) + } + }) + }) +} + +func TestPostgresORM_GetUnconfirmedRows(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + + t.Run("no rows", func(t *testing.T) { + rows, err := orm.GetUnconfirmedRows(5) + assert.NoError(t, err) + assert.Empty(t, rows) + }) + + t.Run("with rows", func(t *testing.T) { + rows := generateTestRows(t, 10) + + for _, row := range rows { + err := orm.Update(row) + assert.NoError(t, err) + time.Sleep(testutils.TestInterval / 10) + } + + gotRows, err := orm.GetUnconfirmedRows(5) + assert.NoError(t, err) + assert.Len(t, gotRows, 5) + + for _, row := range gotRows { + assert.False(t, row.Confirmed) + } + }) +} + +func TestPostgresORM_Namespace(t *testing.T) { + t.Parallel() + + ormA := setupORM(t, "a") + ormB := setupORM(t, "b") + + const n = 10 + rowsA := generateTestRows(t, n) + rowsB := generateTestRows(t, n) + for i := 0; i < n; i++ { + err := ormA.Update(rowsA[i]) + assert.NoError(t, err) + + err = ormB.Update(rowsB[i]) + assert.NoError(t, err) + } + + urowsA, err := ormA.GetUnconfirmedRows(n) + assert.NoError(t, err) + assert.Len(t, urowsA, n/2) + + urowsB, err := ormB.GetUnconfirmedRows(n) + assert.NoError(t, err) + assert.Len(t, urowsB, n/2) + + _, err = ormB.DeleteExpired(n, time.Now().UTC()) + assert.NoError(t, err) + + snapshotA, err := ormA.GetSnapshot(s4.NewFullAddressRange()) + assert.NoError(t, err) + assert.Len(t, snapshotA, n) +} + +func TestPostgresORM_BigIntVersion(t *testing.T) { + t.Parallel() + + orm := setupORM(t, "test") + row := generateTestRows(t, 1)[0] + row.Version = math.MaxUint64 - 10 + + err := orm.Update(row) + assert.NoError(t, err) + + row.Version++ + err = orm.Update(row) + assert.NoError(t, err) + + gotRow, err := orm.Get(row.Address, row.SlotId) + assert.NoError(t, err) + assert.Equal(t, row, gotRow) +} diff --git a/core/services/s4/storage.go b/core/services/s4/storage.go new file mode 100644 index 00000000..1128f31f --- /dev/null +++ b/core/services/s4/storage.go @@ -0,0 +1,165 @@ +package s4 + +import ( + "context" + + "github.com/jonboulle/clockwork" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/ethereum/go-ethereum/common" +) + +// Constraints specifies the global storage constraints. +type Constraints struct { + MaxPayloadSizeBytes uint `json:"maxPayloadSizeBytes"` + MaxSlotsPerUser uint `json:"maxSlotsPerUser"` + MaxExpirationLengthSec uint64 `json:"maxExpirationLengthSec"` +} + +// Key identifies a versioned user record. +type Key struct { + // Address is a user address + Address common.Address + // SlotId is a slot number + SlotId uint + // Version is a data version + Version uint64 +} + +// Record represents a user record persisted by S4. +type Record struct { + // Arbitrary user data + Payload []byte + // Expiration timestamp assigned by user (unix time in milliseconds) + Expiration int64 +} + +// Metadata is the internal S4 data associated with a Record +type Metadata struct { + // Confirmed turns true once consensus is reached. + Confirmed bool + // Signature contains the original user signature. + Signature []byte +} + +//go:generate mockery --quiet --name Storage --output ./mocks/ --case=underscore + +// Storage represents S4 storage access interface. +// All functions are thread-safe. +type Storage interface { + // Constraints returns a copy of Constraints struct specified during service creation. + // The implementation is thread-safe. + Constraints() Constraints + + // Get returns a copy of record (with metadata) associated with the specified key. + // The returned Record & Metadata are always a copy. + Get(ctx context.Context, key *Key) (*Record, *Metadata, error) + + // Put creates (or updates) a record identified by the specified key. + // For signature calculation see envelope.go + Put(ctx context.Context, key *Key, record *Record, signature []byte) error + + // List returns a snapshot for the specified address. + // Slots having no data are not returned. + List(ctx context.Context, address common.Address) ([]*SnapshotRow, error) +} + +type storage struct { + lggr logger.Logger + contraints Constraints + orm ORM + clock clockwork.Clock +} + +var _ Storage = (*storage)(nil) + +func NewStorage(lggr logger.Logger, contraints Constraints, orm ORM, clock clockwork.Clock) Storage { + return &storage{ + lggr: lggr.Named("S4Storage"), + contraints: contraints, + orm: orm, + clock: clock, + } +} + +func (s *storage) Constraints() Constraints { + return s.contraints +} + +func (s *storage) Get(ctx context.Context, key *Key) (*Record, *Metadata, error) { + if key.SlotId >= s.contraints.MaxSlotsPerUser { + return nil, nil, ErrSlotIdTooBig + } + + bigAddress := big.New(key.Address.Big()) + row, err := s.orm.Get(bigAddress, key.SlotId, pg.WithParentCtx(ctx)) + if err != nil { + return nil, nil, err + } + + if row.Version != key.Version || row.Expiration <= s.clock.Now().UnixMilli() { + return nil, nil, ErrNotFound + } + + record := &Record{ + Payload: make([]byte, len(row.Payload)), + Expiration: row.Expiration, + } + copy(record.Payload, row.Payload) + + metadata := &Metadata{ + Confirmed: row.Confirmed, + Signature: make([]byte, len(row.Signature)), + } + copy(metadata.Signature, row.Signature) + + return record, metadata, nil +} + +func (s *storage) List(ctx context.Context, address common.Address) ([]*SnapshotRow, error) { + bigAddress := big.New(address.Big()) + sar, err := NewSingleAddressRange(bigAddress) + if err != nil { + return nil, err + } + return s.orm.GetSnapshot(sar, pg.WithParentCtx(ctx)) +} + +func (s *storage) Put(ctx context.Context, key *Key, record *Record, signature []byte) error { + if key.SlotId >= s.contraints.MaxSlotsPerUser { + return ErrSlotIdTooBig + } + if len(record.Payload) > int(s.contraints.MaxPayloadSizeBytes) { + return ErrPayloadTooBig + } + now := s.clock.Now().UnixMilli() + if now > record.Expiration { + return ErrPastExpiration + } + if record.Expiration-now > int64(s.contraints.MaxExpirationLengthSec)*1000 { + return ErrExpirationTooLong + } + + envelope := NewEnvelopeFromRecord(key, record) + signer, err := envelope.GetSignerAddress(signature) + if err != nil || signer != key.Address { + return ErrWrongSignature + } + + row := &Row{ + Address: big.New(key.Address.Big()), + SlotId: key.SlotId, + Payload: make([]byte, len(record.Payload)), + Version: key.Version, + Expiration: record.Expiration, + Confirmed: false, + Signature: make([]byte, len(signature)), + } + copy(row.Payload, record.Payload) + copy(row.Signature, signature) + + return s.orm.Update(row, pg.WithParentCtx(ctx)) +} diff --git a/core/services/s4/storage_test.go b/core/services/s4/storage_test.go new file mode 100644 index 00000000..1eb819dd --- /dev/null +++ b/core/services/s4/storage_test.go @@ -0,0 +1,237 @@ +package s4_test + +import ( + "testing" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" + "github.com/goplugin/pluginv3.0/v2/core/services/s4/mocks" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + constraints = s4.Constraints{ + MaxSlotsPerUser: 5, + MaxPayloadSizeBytes: 32, + MaxExpirationLengthSec: 3600, + } +) + +func setupTestStorage(t *testing.T, now time.Time) (*mocks.ORM, s4.Storage) { + logger := logger.TestLogger(t) + orm := mocks.NewORM(t) + clock := clockwork.NewFakeClock() + storage := s4.NewStorage(logger, constraints, orm, clock) + return orm, storage +} + +func TestStorage_Constraints(t *testing.T) { + t.Parallel() + + _, storage := setupTestStorage(t, time.Now()) + c := storage.Constraints() + assert.Equal(t, constraints, c) +} + +func TestStorage_Errors(t *testing.T) { + t.Parallel() + + now := time.Now() + ormMock, storage := setupTestStorage(t, now) + + t.Run("ErrNotFound", func(t *testing.T) { + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: 1, + Version: 0, + } + ormMock.On("Get", big.New(key.Address.Big()), key.SlotId, mock.Anything).Return(nil, s4.ErrNotFound) + _, _, err := storage.Get(testutils.Context(t), key) + assert.ErrorIs(t, err, s4.ErrNotFound) + }) + + t.Run("ErrSlotIdTooBig", func(t *testing.T) { + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: constraints.MaxSlotsPerUser + 1, + Version: 0, + } + _, _, err := storage.Get(testutils.Context(t), key) + assert.ErrorIs(t, err, s4.ErrSlotIdTooBig) + + record := &s4.Record{ + Payload: make([]byte, 10), + Expiration: now.Add(time.Minute).UnixMilli(), + } + err = storage.Put(testutils.Context(t), key, record, []byte{}) + assert.ErrorIs(t, err, s4.ErrSlotIdTooBig) + }) + + t.Run("ErrPayloadTooBig", func(t *testing.T) { + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: 1, + Version: 0, + } + record := &s4.Record{ + Payload: make([]byte, constraints.MaxPayloadSizeBytes+1), + Expiration: now.Add(time.Minute).UnixMilli(), + } + err := storage.Put(testutils.Context(t), key, record, []byte{}) + assert.ErrorIs(t, err, s4.ErrPayloadTooBig) + }) + + t.Run("ErrPastExpiration", func(t *testing.T) { + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: 1, + Version: 0, + } + record := &s4.Record{ + Payload: make([]byte, 10), + Expiration: now.UnixMilli() - 1, + } + err := storage.Put(testutils.Context(t), key, record, []byte{}) + assert.ErrorIs(t, err, s4.ErrPastExpiration) + }) + + t.Run("ErrExpirationTooLong", func(t *testing.T) { + key := &s4.Key{ + Address: testutils.NewAddress(), + SlotId: 1, + Version: 0, + } + record := &s4.Record{ + Payload: make([]byte, 10), + Expiration: now.UnixMilli() + 10000000, + } + err := storage.Put(testutils.Context(t), key, record, []byte{}) + assert.ErrorIs(t, err, s4.ErrExpirationTooLong) + }) + + t.Run("ErrWrongSignature", func(t *testing.T) { + privateKey, address := testutils.NewPrivateKeyAndAddress(t) + key := &s4.Key{ + Address: address, + SlotId: 2, + Version: 0, + } + record := &s4.Record{ + Payload: []byte("foobar"), + Expiration: now.Add(time.Minute).UnixMilli(), + } + env := s4.NewEnvelopeFromRecord(key, record) + signature, err := env.Sign(privateKey) + assert.NoError(t, err) + + signature[0]++ + err = storage.Put(testutils.Context(t), key, record, signature) + assert.ErrorIs(t, err, s4.ErrWrongSignature) + }) + + t.Run("ErrVersionTooLow", func(t *testing.T) { + privateKey, address := testutils.NewPrivateKeyAndAddress(t) + key := &s4.Key{ + Address: address, + SlotId: 2, + Version: 5, + } + record := &s4.Record{ + Payload: []byte("foobar"), + Expiration: now.Add(time.Hour).UnixMilli(), + } + env := s4.NewEnvelopeFromRecord(key, record) + signature, err := env.Sign(privateKey) + assert.NoError(t, err) + + ormMock.ExpectedCalls = make([]*mock.Call, 0) + ormMock.On("Update", mock.Anything, mock.Anything).Return(s4.ErrVersionTooLow).Once() + + err = storage.Put(testutils.Context(t), key, record, signature) + assert.ErrorIs(t, err, s4.ErrVersionTooLow) + }) +} + +func TestStorage_PutAndGet(t *testing.T) { + t.Parallel() + + now := time.Now() + ormMock, storage := setupTestStorage(t, now) + + privateKey, address := testutils.NewPrivateKeyAndAddress(t) + key := &s4.Key{ + Address: address, + SlotId: 2, + Version: 0, + } + record := &s4.Record{ + Payload: []byte("foobar"), + Expiration: now.Add(time.Hour).UnixMilli(), + } + env := s4.NewEnvelopeFromRecord(key, record) + signature, err := env.Sign(privateKey) + assert.NoError(t, err) + + ormMock.On("Update", mock.Anything, mock.Anything).Return(nil) + ormMock.On("Get", big.New(key.Address.Big()), uint(2), mock.Anything).Return(&s4.Row{ + Address: big.New(key.Address.Big()), + SlotId: key.SlotId, + Version: key.Version, + Payload: record.Payload, + Expiration: record.Expiration, + Signature: signature, + }, nil) + + err = storage.Put(testutils.Context(t), key, record, signature) + assert.NoError(t, err) + + rec, metadata, err := storage.Get(testutils.Context(t), key) + assert.NoError(t, err) + assert.Equal(t, false, metadata.Confirmed) + assert.Equal(t, signature, metadata.Signature) + assert.Equal(t, record.Expiration, rec.Expiration) + assert.Equal(t, record.Payload, rec.Payload) +} + +func TestStorage_List(t *testing.T) { + t.Parallel() + + ormMock, storage := setupTestStorage(t, time.Now()) + address := testutils.NewAddress() + ormRows := []*s4.SnapshotRow{ + { + SlotId: 1, + Version: 1, + Expiration: 1, + }, + { + SlotId: 5, + Version: 5, + Expiration: 5, + }, + } + + addressRange, err := s4.NewSingleAddressRange(big.New(address.Big())) + assert.NoError(t, err) + ormMock.On("GetSnapshot", addressRange, mock.Anything).Return(ormRows, nil) + + rows, err := storage.List(testutils.Context(t), address) + require.NoError(t, err) + assert.Len(t, rows, 2) + for _, row := range rows { + if row.SlotId == ormRows[0].SlotId { + assert.Equal(t, ormRows[0], row) + } + if row.SlotId == ormRows[1].SlotId { + assert.Equal(t, ormRows[1], row) + } + } +} diff --git a/core/services/service.go b/core/services/service.go new file mode 100644 index 00000000..033a1405 --- /dev/null +++ b/core/services/service.go @@ -0,0 +1,7 @@ +package services + +import ( + "github.com/goplugin/plugin-common/pkg/services" +) + +type ServiceCtx = services.Service diff --git a/core/services/signatures/cryptotest/cryptotest.go b/core/services/signatures/cryptotest/cryptotest.go new file mode 100644 index 00000000..d788af56 --- /dev/null +++ b/core/services/signatures/cryptotest/cryptotest.go @@ -0,0 +1,33 @@ +// Package cryptotest provides convenience functions for kyber-based APIs. +// +// It is separate from cltest to prevent an import cycle. +package cryptotest + +import ( + "math/rand" + "testing" +) + +// randomStream implements cipher.Stream, but with a deterministic output. +type randomStream rand.Rand + +// NewStream returns a randomStream seeded from seed, for deterministic +// randomness in tests of random outputs, and for small property-based tests. +// +// This API is deliberately awkward to prevent it from being used outside of +// tests. +// +// The testing framework runs the tests in a file in series, unless you +// explicitly request otherwise with testing.T.Parallel(). So one such stream +// per file is enough, most of the time. +func NewStream(t *testing.T, seed int64) *randomStream { + return (*randomStream)(rand.New(rand.NewSource(seed))) +} + +// XORKeyStream dumps the output from a math/rand PRNG on dst. +// +// It gives no consideration for the contents of src, and is named so +// misleadingly purely to satisfy the cipher.Stream interface. +func (s *randomStream) XORKeyStream(dst, src []byte) { + (*rand.Rand)(s).Read(dst) +} diff --git a/core/services/signatures/ethdss/ethdss.go b/core/services/signatures/ethdss/ethdss.go new file mode 100644 index 00000000..c3ba4f95 --- /dev/null +++ b/core/services/signatures/ethdss/ethdss.go @@ -0,0 +1,306 @@ +// Package ethdss implements the Distributed Schnorr Signature protocol from the +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// paper "Provably Secure Distributed Schnorr Signatures and a (t, n) +// Threshold Scheme for Implicit Certificates". +// https://dl.acm.org/citation.cfm?id=678297 +// To generate a distributed signature from a group of participants, the group +// must first generate one longterm distributed secret with the share/dkg +// package, and then one random secret to be used only once. +// Each participant then creates a DSS struct, that can issue partial signatures +// with `dss.PartialSignature()`. These partial signatures can be broadcasted to +// the whole group or to a trusted combiner. Once one has collected enough +// partial signatures, it is possible to compute the distributed signature with +// the `Signature` method. +// +// This is mostly copied from the sign/dss package, with minor adjustments for +// use with ethschnorr. +package clientdss + +import ( + "bytes" + "errors" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/ethschnorr" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/share" +) + +// Suite represents the functionalities needed by the dss package +type Suite interface { + kyber.Group + kyber.HashFactory + kyber.Random +} + +var secp256k1Suite = secp256k1.NewBlakeKeccackSecp256k1() +var secp256k1Group kyber.Group = secp256k1Suite + +// DistKeyShare is an abstraction to allow one to use distributed key share +// from different schemes easily into this distributed threshold Schnorr +// signature framework. +type DistKeyShare interface { + PriShare() *share.PriShare + Commitments() []kyber.Point +} + +// DSS holds the information used to issue partial signatures as well as to +// compute the distributed schnorr signature. +type DSS struct { + // Keypair for this participant in the signing process (i.e., the one where + // this struct is stored.) This is not the keypair for full signing key; that + // would defeat the point. + secret kyber.Scalar + public kyber.Point + // Index value of this participant in the signing process. The index is shared + // across participants. + index int + // Public keys of potential participants in the signing process + participants []kyber.Point + // Number of participants needed to construct a signature + T int + // Shares of the distributed long-term signing keypair + long DistKeyShare + // Shares of the distributed ephemeral nonce keypair + random DistKeyShare + // Pedersen commitments to the coefficients of the polynomial implicitly used + // to share the long-term signing public/private keypair. + longPoly *share.PubPoly + // Pedersen commitments to the coefficients of the polynomial implicitly used + // to share the ephemeral nonce keypair. + randomPoly *share.PubPoly + // Message to be signed + msg *big.Int + // The partial signatures collected so far. + partials []*share.PriShare + // Indices for the participants who have provided their partial signatures to + // this participant. + partialsIdx map[int]bool + // True iff the partial signature for this dss has been signed by its owner. + signed bool + // String which uniquely identifies this signature, shared by all + // participants. + sessionID []byte +} + +// DSSArgs is the arguments to NewDSS, as a struct. See NewDSS for details. +type DSSArgs = struct { + secret kyber.Scalar + participants []kyber.Point + long DistKeyShare + random DistKeyShare + msg *big.Int + T int +} + +// PartialSig is partial representation of the final distributed signature. It +// must be sent to each of the other participants. +type PartialSig struct { + Partial *share.PriShare + SessionID []byte + Signature ethschnorr.Signature +} + +// NewDSS returns a DSS struct out of the suite, the longterm secret of this +// node, the list of participants, the longterm and random distributed key +// (generated by the dkg package), the message to sign and finally the T +// threshold. It returns an error if the public key of the secret can't be found +// in the list of participants. +func NewDSS(args DSSArgs) (*DSS, error) { + public := secp256k1Group.Point().Mul(args.secret, nil) + var i int + var found bool + for j, p := range args.participants { + if p.Equal(public) { + found = true + i = j + break + } + } + if !found { + return nil, errors.New("dss: public key not found in list of participants") + } + return &DSS{ + secret: args.secret, + public: public, + index: i, + participants: args.participants, + long: args.long, + longPoly: share.NewPubPoly(secp256k1Suite, + secp256k1Group.Point().Base(), args.long.Commitments()), + random: args.random, + randomPoly: share.NewPubPoly(secp256k1Suite, + secp256k1Group.Point().Base(), args.random.Commitments()), + msg: args.msg, + T: args.T, + partialsIdx: make(map[int]bool), + sessionID: sessionID(secp256k1Suite, args.long, args.random), + }, nil +} + +// PartialSig generates the partial signature related to this DSS. This +// PartialSig can be broadcasted to every other participant or only to a +// trusted combiner as described in the paper. +// The signature format is compatible with EdDSA verification implementations. +// +// Corresponds to section 4.2, step 2 the Stinson 2001 paper. +func (d *DSS) PartialSig() (*PartialSig, error) { + secretPartialLongTermKey := d.long.PriShare().V // ɑᵢ, in the paper + secretPartialCommitmentKey := d.random.PriShare().V // βᵢ, in the paper + fullChallenge := d.hashSig() // h(m‖V), in the paper + secretChallengeMultiple := secp256k1Suite.Scalar().Mul( + fullChallenge, secretPartialLongTermKey) // ɑᵢh(m‖V)G, in the paper + // Corresponds to ɣᵢG=βᵢG+ɑᵢh(m‖V)G in the paper, but NB, in its notation, we + // use ɣᵢG=βᵢG-ɑᵢh(m‖V)G. (Subtract instead of add.) + partialSignature := secp256k1Group.Scalar().Sub( + secretPartialCommitmentKey, secretChallengeMultiple) + ps := &PartialSig{ + Partial: &share.PriShare{V: partialSignature, I: d.index}, + SessionID: d.sessionID, + } + var err error + ps.Signature, err = ethschnorr.Sign(d.secret, ps.Hash()) // sign share + if !d.signed { + d.partialsIdx[d.index] = true + d.partials = append(d.partials, ps.Partial) + d.signed = true + } + return ps, err +} + +// ProcessPartialSig takes a PartialSig from another participant and stores it +// for generating the distributed signature. It returns an error if the index is +// wrong, or the signature is invalid or if a partial signature has already been +// received by the same peer. To know whether the distributed signature can be +// computed after this call, one can use the `EnoughPartialSigs` method. +// +// Corresponds to section 4.3, step 3 of the paper +func (d *DSS) ProcessPartialSig(ps *PartialSig) error { + var err error + public, ok := findPub(d.participants, ps.Partial.I) + if !ok { + err = errors.New("dss: partial signature with invalid index") + } + // nothing secret here + if err == nil && !bytes.Equal(ps.SessionID, d.sessionID) { + err = errors.New("dss: session id do not match") + } + if err == nil { + if vrr := ethschnorr.Verify(public, ps.Hash(), ps.Signature); vrr != nil { + err = vrr + } + } + if err == nil { + if _, ok := d.partialsIdx[ps.Partial.I]; ok { + err = errors.New("dss: partial signature already received from peer") + } + } + if err != nil { + return err + } + hash := d.hashSig() // h(m‖V), in the paper's notation + idx := ps.Partial.I + // βᵢG=sum(cₖi^kG), in the paper, defined as sᵢ in step 2 of section 2.4 + randShare := d.randomPoly.Eval(idx) + // ɑᵢG=sum(bₖi^kG), defined as sᵢ in step 2 of section 2.4 + longShare := d.longPoly.Eval(idx) + // h(m‖V)(Y+...) term from equation (3) of the paper. AKA h(m‖V)ɑᵢG + challengeSummand := secp256k1Group.Point().Mul(hash, longShare.V) + // RHS of equation (3), except we subtract the second term instead of adding. + // AKA (βᵢ-ɑᵢh(m‖V))G, which should equal ɣᵢG, according to equation (3) + maybePartialSigCommitment := secp256k1Group.Point().Sub(randShare.V, + challengeSummand) + // Check that equation (3) holds (ɣᵢ is represented as ps.Partial.V, here.) + partialSigCommitment := secp256k1Group.Point().Mul(ps.Partial.V, nil) + if !partialSigCommitment.Equal(maybePartialSigCommitment) { + return errors.New("dss: partial signature not valid") + } + d.partialsIdx[ps.Partial.I] = true + d.partials = append(d.partials, ps.Partial) + return nil +} + +// EnoughPartialSig returns true if there are enough partial signature to compute +// the distributed signature. It returns false otherwise. If there are enough +// partial signatures, one can issue the signature with `Signature()`. +func (d *DSS) EnoughPartialSig() bool { + return len(d.partials) >= d.T +} + +// Signature computes the distributed signature from the list of partial +// signatures received. It returns an error if there are not enough partial +// signatures. +// +// Corresponds to section 4.2, step 4 of Stinson, 2001 paper +func (d *DSS) Signature() (ethschnorr.Signature, error) { + if !d.EnoughPartialSig() { + return nil, errors.New("dkg: not enough partial signatures to sign") + } + // signature corresponds to σ in step 4 of section 4.2 + signature, err := share.RecoverSecret(secp256k1Suite, d.partials, d.T, + len(d.participants)) + if err != nil { + return nil, err + } + rv := ethschnorr.NewSignature() + rv.Signature = secp256k1.ToInt(signature) + // commitmentPublicKey corresponds to V in step 4 of section 4.2 + commitmentPublicKey := d.random.Commitments()[0] + rv.CommitmentPublicAddress = secp256k1.EthereumAddress(commitmentPublicKey) + return rv, nil +} + +// hashSig returns, in the paper's notation, h(m‖V). It is the challenge hash +// for the signature. (Actually, the hash also includes the public key, but that +// has no effect on the correctness or robustness arguments from the paper.) +func (d *DSS) hashSig() kyber.Scalar { + v := d.random.Commitments()[0] // Public-key commitment, in signature from d + vAddress := secp256k1.EthereumAddress(v) + publicKey := d.long.Commitments()[0] + rv, err := ethschnorr.ChallengeHash(publicKey, vAddress, d.msg) + if err != nil { + panic(err) + } + return rv +} + +// Verify takes a public key, a message and a signature and returns an error if +// the signature is invalid. +func Verify(public kyber.Point, msg *big.Int, sig ethschnorr.Signature) error { + return ethschnorr.Verify(public, msg, sig) +} + +// Hash returns the hash representation of this PartialSig to be used in a +// signature. +func (ps *PartialSig) Hash() *big.Int { + h := secp256k1Suite.Hash() + _, _ = h.Write(ps.Partial.Hash(secp256k1Suite)) + _, _ = h.Write(ps.SessionID) + return (&big.Int{}).SetBytes(h.Sum(nil)) +} + +func findPub(list []kyber.Point, i int) (kyber.Point, bool) { + if i >= len(list) { + return nil, false + } + return list[i], true +} + +func sessionID(s Suite, a, b DistKeyShare) []byte { + h := s.Hash() + for _, p := range a.Commitments() { + _, _ = p.MarshalTo(h) + } + + for _, p := range b.Commitments() { + _, _ = p.MarshalTo(h) + } + + return h.Sum(nil) +} diff --git a/core/services/signatures/ethdss/ethdss_test.go b/core/services/signatures/ethdss/ethdss_test.go new file mode 100644 index 00000000..aa14ffd9 --- /dev/null +++ b/core/services/signatures/ethdss/ethdss_test.go @@ -0,0 +1,289 @@ +package clientdss + +import ( + "crypto/rand" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/ethschnorr" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "go.dedis.ch/kyber/v3" + dkg "go.dedis.ch/kyber/v3/share/dkg/rabin" +) + +var suite = secp256k1.NewBlakeKeccackSecp256k1() + +var nbParticipants = 7 +var t = nbParticipants/2 + 1 + +var partPubs []kyber.Point +var partSec []kyber.Scalar + +var longterms []*dkg.DistKeyShare +var randoms []*dkg.DistKeyShare + +var msg *big.Int + +var randomStream = cryptotest.NewStream(&testing.T{}, 0) + +func init() { + partPubs = make([]kyber.Point, nbParticipants) + partSec = make([]kyber.Scalar, nbParticipants) + for i := 0; i < nbParticipants; i++ { + kp := secp256k1.Generate(randomStream) + partPubs[i] = kp.Public + partSec[i] = kp.Private + } + // Corresponds to section 4.2, step 1 of Stinson, 2001 paper + longterms = genDistSecret(true) // Keep trying until valid public key + randoms = genDistSecret(false) + + var err error + msg, err = rand.Int(rand.Reader, big.NewInt(0).Lsh(big.NewInt(1), 256)) + if err != nil { + panic(err) + } +} + +func TestDSSNew(t *testing.T) { + dssArgs := DSSArgs{secret: partSec[0], participants: partPubs, + long: longterms[0], random: randoms[0], msg: msg, T: 4} + dss, err := NewDSS(dssArgs) + assert.NotNil(t, dss) + assert.Nil(t, err) + dssArgs.secret = suite.Scalar().Zero() + dss, err = NewDSS(dssArgs) + assert.Nil(t, dss) + assert.Error(t, err) +} + +func TestDSSPartialSigs(t *testing.T) { + dss0 := getDSS(0) + dss1 := getDSS(1) + ps0, err := dss0.PartialSig() + assert.Nil(t, err) + assert.NotNil(t, ps0) + assert.Len(t, dss0.partials, 1) + // second time should not affect list + ps0, err = dss0.PartialSig() + assert.Nil(t, err) + assert.NotNil(t, ps0) + assert.Len(t, dss0.partials, 1) + + // wrong index + goodI := ps0.Partial.I + ps0.Partial.I = 100 + err = dss1.ProcessPartialSig(ps0) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid index") + ps0.Partial.I = goodI + + // wrong sessionID + goodSessionID := ps0.SessionID + ps0.SessionID = []byte("ahhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh") + err = dss1.ProcessPartialSig(ps0) + assert.Error(t, err) + assert.Contains(t, err.Error(), "dss: session id") + ps0.SessionID = goodSessionID + + // wrong Signature + goodSig := ps0.Signature + ps0.Signature = ethschnorr.NewSignature() + copy(ps0.Signature.CommitmentPublicAddress[:], randomBytes(20)) + badSig := secp256k1.ToInt(suite.Scalar().Pick(randomStream)) + ps0.Signature.Signature.Set(badSig) + assert.Error(t, dss1.ProcessPartialSig(ps0)) + ps0.Signature = goodSig + + // invalid partial sig + goodV := ps0.Partial.V + ps0.Partial.V = suite.Scalar().Zero() + ps0.Signature, err = ethschnorr.Sign(dss0.secret, ps0.Hash()) + require.Nil(t, err) + err = dss1.ProcessPartialSig(ps0) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not valid") + ps0.Partial.V = goodV + ps0.Signature = goodSig + + // fine + err = dss1.ProcessPartialSig(ps0) + assert.Nil(t, err) + + // already received + assert.Error(t, dss1.ProcessPartialSig(ps0)) + + // if not enough partial signatures, can't generate signature + sig, err := dss1.Signature() + assert.Nil(t, sig) // XXX: Should also check err is nil? + assert.Error(t, err) + assert.Contains(t, err.Error(), "not enough") + + // enough partial sigs ? + for i := 2; i < nbParticipants; i++ { + dss := getDSS(i) + ps, e := dss.PartialSig() + require.Nil(t, e) + require.Nil(t, dss1.ProcessPartialSig(ps)) + } + assert.True(t, dss1.EnoughPartialSig()) + sig, err = dss1.Signature() + assert.NoError(t, err) + assert.NoError(t, Verify(dss1.long.Commitments()[0], msg, sig)) +} + +var printTests = false + +func printTest(t *testing.T, msg *big.Int, public kyber.Point, + signature ethschnorr.Signature) { + pX, pY := secp256k1.Coordinates(public) + t.Logf(" ['%064x',\n '%064x',\n '%064x',\n '%064x',\n '%040x'],\n", + msg, pX, pY, signature.Signature, + signature.CommitmentPublicAddress) +} + +func TestDSSSignature(t *testing.T) { + dsss := make([]*DSS, nbParticipants) + pss := make([]*PartialSig, nbParticipants) + for i := 0; i < nbParticipants; i++ { + dsss[i] = getDSS(i) + ps, err := dsss[i].PartialSig() + require.Nil(t, err) + require.NotNil(t, ps) + pss[i] = ps + } + for i, dss := range dsss { + for j, ps := range pss { + if i == j { + continue + } + require.Nil(t, dss.ProcessPartialSig(ps)) + } + } + // issue and verify signature + dss0 := dsss[0] + sig, err := dss0.Signature() + assert.NotNil(t, sig) + assert.Nil(t, err) + assert.NoError(t, ethschnorr.Verify(longterms[0].Public(), dss0.msg, sig)) + // Original contains this second check. Unclear why. + assert.NoError(t, ethschnorr.Verify(longterms[0].Public(), dss0.msg, sig)) + if printTests { + printTest(t, dss0.msg, dss0.long.Commitments()[0], sig) + } +} + +func TestPartialSig_Hash(t *testing.T) { + observedHashes := make(map[*big.Int]bool) + for i := 0; i < nbParticipants; i++ { + psig, err := getDSS(i).PartialSig() + require.NoError(t, err) + hash := psig.Hash() + require.False(t, observedHashes[hash]) + observedHashes[hash] = true + } +} + +func getDSS(i int) *DSS { + dss, err := NewDSS(DSSArgs{secret: partSec[i], participants: partPubs, + long: longterms[i], random: randoms[i], msg: msg, T: t}) + if dss == nil || err != nil { + panic("nil dss") + } + return dss +} + +func _genDistSecret() []*dkg.DistKeyShare { + dkgs := make([]*dkg.DistKeyGenerator, nbParticipants) + for i := 0; i < nbParticipants; i++ { + dkg, err := dkg.NewDistKeyGenerator(suite, partSec[i], partPubs, nbParticipants/2+1) + if err != nil { + panic(err) + } + dkgs[i] = dkg + } + // full secret sharing exchange + // 1. broadcast deals + resps := make([]*dkg.Response, 0, nbParticipants*nbParticipants) + for _, dkg := range dkgs { + deals, err := dkg.Deals() + if err != nil { + panic(err) + } + for i, d := range deals { + resp, err := dkgs[i].ProcessDeal(d) + if err != nil { + panic(err) + } + if !resp.Response.Approved { + panic("wrong approval") + } + resps = append(resps, resp) + } + } + // 2. Broadcast responses + for _, resp := range resps { + for h, dkg := range dkgs { + // ignore all messages from ourself + if resp.Response.Index == uint32(h) { + continue + } + j, err := dkg.ProcessResponse(resp) + if err != nil || j != nil { + panic("wrongProcessResponse") + } + } + } + // 4. Broadcast secret commitment + for i, dkg := range dkgs { + scs, err := dkg.SecretCommits() + if err != nil { + panic("wrong SecretCommits") + } + for j, dkg2 := range dkgs { + if i == j { + continue + } + cc, err := dkg2.ProcessSecretCommits(scs) + if err != nil || cc != nil { + panic("wrong ProcessSecretCommits") + } + } + } + + // 5. reveal shares + dkss := make([]*dkg.DistKeyShare, len(dkgs)) + for i, dkg := range dkgs { + dks, err := dkg.DistKeyShare() + if err != nil { + panic(err) + } + dkss[i] = dks + } + return dkss + +} + +func genDistSecret(checkValidPublicKey bool) []*dkg.DistKeyShare { + rv := _genDistSecret() + if checkValidPublicKey { + // Because of the trick we're using to verify the signatures on-chain, we + // need to make sure that the ordinate of this distributed public key is + // in the lower half of {0,...,} + for !secp256k1.ValidPublicKey(rv[0].Public()) { + rv = _genDistSecret() // Keep trying until valid distributed public key. + } + } + return rv +} + +func randomBytes(n int) []byte { + var buff = make([]byte, n) + _, _ = rand.Read(buff[:]) + return buff +} diff --git a/core/services/signatures/ethschnorr/ethschnorr.go b/core/services/signatures/ethschnorr/ethschnorr.go new file mode 100644 index 00000000..21d251d3 --- /dev/null +++ b/core/services/signatures/ethschnorr/ethschnorr.go @@ -0,0 +1,154 @@ +// Package ethschnorr implements a version of the Schnorr signature which is +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// cheap to verify on-chain. +// +// See https://en.wikipedia.org/wiki/Schnorr_signature For vanilla Schnorr. +// +// Since we are targeting ethereum specifically, there is no need to abstract +// away the group operations, as original kyber Schnorr code does. Thus, these +// functions only work with secp256k1 objects, even though they are expressed in +// terms of the abstract kyber Group interfaces. +// +// This code is largely based on EPFL-DEDIS's go.dedis.ch/kyber/sign/schnorr +package ethschnorr + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "go.dedis.ch/kyber/v3" +) + +var secp256k1Suite = secp256k1.NewBlakeKeccackSecp256k1() +var secp256k1Group kyber.Group = secp256k1Suite + +type signature = struct { + CommitmentPublicAddress [20]byte + Signature *big.Int +} + +// Signature is a representation of the Schnorr signature generated and verified +// by this library. +type Signature = *signature + +func i() *big.Int { return big.NewInt(0) } + +var one = big.NewInt(1) +var u256Cardinality = i().Lsh(one, 256) +var maxUint256 = i().Sub(u256Cardinality, one) + +// NewSignature allocates space for a Signature, and returns it +func NewSignature() Signature { return &signature{Signature: i()} } + +var zero = i() + +// ValidSignature(s) is true iff s.Signature represents an element of secp256k1 +func ValidSignature(s Signature) bool { + return s.Signature.Cmp(secp256k1.GroupOrder) == -1 && + s.Signature.Cmp(zero) != -1 +} + +// ChallengeHash returns the value the signer must use to demonstrate knowledge +// of the secret key +// +// NB: for parity with the on-chain hash, it's important that public and r +// marshall to the big-endian x ordinate, followed by a byte which is 0 if the y +// ordinate is even, 1 if it's odd. See evm/contracts/SchnorrSECP256K1.sol and +// evm/test/schnorr_test.js +func ChallengeHash(public kyber.Point, rAddress [20]byte, msg *big.Int) ( + kyber.Scalar, error) { + var err error + h := secp256k1Suite.Hash() + if _, herr := public.MarshalTo(h); herr != nil { + err = fmt.Errorf("failed to hash public key for signature: %s", herr) + } + if err != nil && (msg.BitLen() > 256 || msg.Cmp(zero) == -1) { + err = fmt.Errorf("msg must be a uint256") + } + if err == nil { + if _, herr := h.Write(msg.Bytes()); herr != nil { + err = fmt.Errorf("failed to hash message for signature: %s", herr) + } + } + if err == nil { + if _, herr := h.Write(rAddress[:]); herr != nil { + err = fmt.Errorf("failed to hash r for signature: %s", herr) + } + } + if err != nil { + return nil, err + } + return secp256k1Suite.Scalar().SetBytes(h.Sum(nil)), nil +} + +// Sign creates a signature from a msg and a private key. Verify with the +// function Verify, or on-chain with SchnorrSECP256K1.sol. +func Sign(private kyber.Scalar, msg *big.Int) (Signature, error) { + if !secp256k1.IsSecp256k1Scalar(private) { + return nil, fmt.Errorf("private key is not a secp256k1 scalar") + } + // create random secret and public commitment to it + commitmentSecretKey := secp256k1Group.Scalar().Pick( + secp256k1Suite.RandomStream()) + commitmentPublicKey := secp256k1Group.Point().Mul(commitmentSecretKey, nil) + commitmentPublicAddress := secp256k1.EthereumAddress(commitmentPublicKey) + + public := secp256k1Group.Point().Mul(private, nil) + challenge, err := ChallengeHash(public, commitmentPublicAddress, msg) + if err != nil { + return nil, err + } + // commitmentSecretKey-private*challenge + s := secp256k1Group.Scalar().Sub(commitmentSecretKey, + secp256k1Group.Scalar().Mul(private, challenge)) + rv := signature{commitmentPublicAddress, secp256k1.ToInt(s)} + return &rv, nil +} + +// Verify verifies the given Schnorr signature. It returns true iff the +// signature is valid. +func Verify(public kyber.Point, msg *big.Int, s Signature) error { + var err error + if !ValidSignature(s) { + err = fmt.Errorf("s is not a valid signature") + } + if err == nil && !secp256k1.IsSecp256k1Point(public) { + err = fmt.Errorf("public key is not a secp256k1 point") + } + if err == nil && !secp256k1.ValidPublicKey(public) { + err = fmt.Errorf("`public` is not a valid public key") + } + if err == nil && (msg.Cmp(zero) == -1 || msg.Cmp(maxUint256) == 1) { + err = fmt.Errorf("msg is not a uint256") + } + var challenge kyber.Scalar + var herr error + if err == nil { + challenge, herr = ChallengeHash(public, s.CommitmentPublicAddress, msg) + if herr != nil { + err = herr + } + } + if err != nil { + return err + } + sigScalar := secp256k1.IntToScalar(s.Signature) + // s*g + challenge*public = s*g + challenge*(secretKey*g) = + // commitmentSecretKey*g = commitmentPublicKey + maybeCommitmentPublicKey := secp256k1Group.Point().Add( + secp256k1Group.Point().Mul(sigScalar, nil), + secp256k1Group.Point().Mul(challenge, public)) + maybeCommitmentPublicAddress := secp256k1.EthereumAddress(maybeCommitmentPublicKey) + if !bytes.Equal(s.CommitmentPublicAddress[:], + maybeCommitmentPublicAddress[:]) { + return fmt.Errorf("signature mismatch") + } + return nil +} diff --git a/core/services/signatures/ethschnorr/ethschnorr_test.go b/core/services/signatures/ethschnorr/ethschnorr_test.go new file mode 100644 index 00000000..c6b16a48 --- /dev/null +++ b/core/services/signatures/ethschnorr/ethschnorr_test.go @@ -0,0 +1,113 @@ +package ethschnorr + +// This code is largely based on go.dedis.ch/kyber/sign/schnorr_test from +// EPFL's DEDIS + +import ( + crand "crypto/rand" + "math/big" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/curve25519" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +var numSignatures = 5 + +var randomStream = cryptotest.NewStream(&testing.T{}, 0) + +var printTests = false + +func printTest(t *testing.T, msg *big.Int, private kyber.Scalar, + public kyber.Point, signature Signature) { + privateBytes, err := private.MarshalBinary() + require.Nil(t, err) + pX, pY := secp256k1.Coordinates(public) + t.Logf(" ['%064x',\n '%064x',\n '%064x',\n '%064x',\n "+ + "'%064x',\n '%040x'],\n", + msg, privateBytes, pX, pY, signature.Signature, + signature.CommitmentPublicAddress) +} + +func TestShortSchnorr_SignAndVerify(t *testing.T) { + if printTests { + t.Log("tests = [\n") + } + for i := 0; i < numSignatures; i++ { + rand := mrand.New(mrand.NewSource(0)) + msg, err := crand.Int(rand, maxUint256) + require.NoError(t, err) + kp := secp256k1.Generate(randomStream) + sig, err := Sign(kp.Private, msg) + require.NoError(t, err, "failed to sign message") + require.NoError(t, Verify(kp.Public, msg, sig), + "failed to validate own signature") + require.Error(t, Verify(kp.Public, u256Cardinality, sig), + "failed to abort on too large a message") + require.Error(t, Verify(kp.Public, big.NewInt(0).Neg(big.NewInt(1)), sig), + "failed to abort on negative message") + if printTests { + printTest(t, msg, kp.Private, kp.Public, sig) + } + wrongMsg := big.NewInt(0).Add(msg, big.NewInt(1)) + require.Error(t, Verify(kp.Public, wrongMsg, sig), + "failed to reject signature with bad message") + wrongPublic := secp256k1Group.Point().Add(kp.Public, kp.Public) + require.Error(t, Verify(wrongPublic, msg, sig), + "failed to reject signature with bad public key") + wrongSignature := &signature{ + CommitmentPublicAddress: sig.CommitmentPublicAddress, + Signature: big.NewInt(0).Add(sig.Signature, one), + } + require.Error(t, Verify(kp.Public, msg, wrongSignature), + "failed to reject bad signature") + badPublicCommitmentAddress := &signature{Signature: sig.Signature} + copy(badPublicCommitmentAddress.CommitmentPublicAddress[:], + sig.CommitmentPublicAddress[:]) + badPublicCommitmentAddress.CommitmentPublicAddress[0] ^= 1 // Corrupt it + require.Error(t, Verify(kp.Public, msg, badPublicCommitmentAddress), + "failed to reject signature with bad public commitment") + } + if printTests { + t.Log("]") + } + // Check other validations + edSuite := curve25519.NewBlakeSHA256Curve25519(false) + badScalar := edSuite.Scalar() + _, err := Sign(badScalar, i()) + require.Error(t, err) + require.Contains(t, err.Error(), "not a secp256k1 scalar") + err = Verify(edSuite.Point(), i(), NewSignature()) + require.Error(t, err) + require.Contains(t, err.Error(), "not a secp256k1 point") + err = Verify(secp256k1Suite.Point(), i(), &signature{Signature: big.NewInt(-1)}) + require.Error(t, err) + require.Contains(t, err.Error(), "not a valid signature") + err = Verify(secp256k1Suite.Point(), i(), &signature{Signature: u256Cardinality}) + require.Error(t, err) + require.Contains(t, err.Error(), "not a valid signature") +} + +func TestShortSchnorr_NewSignature(t *testing.T) { + s := NewSignature() + require.Equal(t, s.Signature, big.NewInt(0)) +} + +func TestShortSchnorr_ChallengeHash(t *testing.T) { + point := secp256k1Group.Point() + var hash [20]byte + h, err := ChallengeHash(point, hash, big.NewInt(-1)) + require.Nil(t, h) + require.Error(t, err) + require.Contains(t, err.Error(), "msg must be a uint256") + h, err = ChallengeHash(point, hash, u256Cardinality) + require.Nil(t, h) + require.Error(t, err) + require.Contains(t, err.Error(), "msg must be a uint256") +} diff --git a/core/services/signatures/secp256k1/curve.go b/core/services/signatures/secp256k1/curve.go new file mode 100644 index 00000000..70187e68 --- /dev/null +++ b/core/services/signatures/secp256k1/curve.go @@ -0,0 +1,46 @@ +// Package secp256k1 is an implementation of the kyber.{Group,Point,Scalar} +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// interfaces, based on btcd/btcec and kyber/group/mod +// +// XXX: NOT CONSTANT TIME! +package secp256k1 + +import ( + "math/big" + + secp256k1BTCD "github.com/btcsuite/btcd/btcec/v2" + + "go.dedis.ch/kyber/v3" +) + +// Secp256k1 represents the secp256k1 group. +// There are no parameters and no initialization is required +// because it supports only this one specific curve. +type Secp256k1 struct{} + +// s256 is the btcec representation of secp256k1. +var s256 *secp256k1BTCD.KoblitzCurve = secp256k1BTCD.S256() + +// String returns the name of the curve +func (*Secp256k1) String() string { return "Secp256k1" } + +var egScalar kyber.Scalar = newScalar(big.NewInt(0)) +var egPoint kyber.Point = &secp256k1Point{newFieldZero(), newFieldZero()} + +// ScalarLen returns the length of a marshalled Scalar +func (*Secp256k1) ScalarLen() int { return egScalar.MarshalSize() } + +// Scalar creates a new Scalar for the prime-order group on the secp256k1 curve +func (*Secp256k1) Scalar() kyber.Scalar { return newScalar(big.NewInt(0)) } + +// PointLen returns the length of a marshalled Point +func (*Secp256k1) PointLen() int { return egPoint.MarshalSize() } + +// Point returns a new secp256k1 point +func (*Secp256k1) Point() kyber.Point { + return &secp256k1Point{newFieldZero(), newFieldZero()} +} diff --git a/core/services/signatures/secp256k1/curve_test.go b/core/services/signatures/secp256k1/curve_test.go new file mode 100644 index 00000000..e4802ea3 --- /dev/null +++ b/core/services/signatures/secp256k1/curve_test.go @@ -0,0 +1,20 @@ +package secp256k1 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +var group = &Secp256k1{} + +func TestSecp256k1_String(t *testing.T) { + require.Equal(t, group.String(), "Secp256k1") +} + +func TestSecp256k1_Constructors(t *testing.T) { + require.Equal(t, group.ScalarLen(), 32) + require.Equal(t, ToInt(group.Scalar()), bigZero) + require.Equal(t, group.PointLen(), 33) + require.Equal(t, group.Point(), &secp256k1Point{fieldZero, fieldZero}) +} diff --git a/core/services/signatures/secp256k1/field.go b/core/services/signatures/secp256k1/field.go new file mode 100644 index 00000000..bf9652e5 --- /dev/null +++ b/core/services/signatures/secp256k1/field.go @@ -0,0 +1,169 @@ +// Package secp256k1 is an implementation of the kyber.{Group,Point,Scalar} +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// interfaces, based on btcd/btcec and kyber/group/mod +// +// XXX: NOT CONSTANT TIME! +package secp256k1 + +// Arithmetic operations in the base field of secp256k1, i.e. ℤ/qℤ, where q is +// the base field characteristic. + +import ( + "crypto/cipher" + "fmt" + "math/big" + + "go.dedis.ch/kyber/v3/util/random" +) + +// q is the field characteristic (cardinality) of the secp256k1 base field. All +// arithmetic operations on the field are modulo this. +var q = s256.P + +type fieldElt big.Int + +// newFieldZero returns a newly allocated field element. +func newFieldZero() *fieldElt { return (*fieldElt)(big.NewInt(0)) } + +// Int returns f as a big.Int +func (f *fieldElt) int() *big.Int { return (*big.Int)(f) } + +// modQ reduces f's underlying big.Int modulo q, and returns it +func (f *fieldElt) modQ() *fieldElt { + if f.int().Cmp(q) != -1 || f.int().Cmp(bigZero) == -1 { + // f ∉ {0, ..., q-1}. Find the representative of f+qℤ in that set. + // + // Per Mod docstring, "Mod implements Euclidean modulus", meaning that after + // this, f will be the smallest non-negative representative of its + // equivalence class in ℤ/qℤ. TODO(alx): Make this faster + f.int().Mod(f.int(), q) + } + return f +} + +// This differs from SetInt below, in that it does not take a copy of v. +func fieldEltFromBigInt(v *big.Int) *fieldElt { return (*fieldElt)(v).modQ() } + +func fieldEltFromInt(v int64) *fieldElt { + return fieldEltFromBigInt(big.NewInt(v)).modQ() +} + +var fieldZero = fieldEltFromInt(0) +var bigZero = big.NewInt(0) + +// String returns the string representation of f +func (f *fieldElt) String() string { + return fmt.Sprintf("fieldElt{%x}", f.int()) +} + +// Equal returns true iff f=g, i.e. the backing big.Ints satisfy f ≡ g mod q +func (f *fieldElt) Equal(g *fieldElt) bool { + if f == (*fieldElt)(nil) && g == (*fieldElt)(nil) { + return true + } + if f == (*fieldElt)(nil) { // f is nil, g is not + return false + } + if g == (*fieldElt)(nil) { // g is nil, f is not + return false + } + return bigZero.Cmp(newFieldZero().Sub(f, g).modQ().int()) == 0 +} + +// Add sets f to the sum of a and b modulo q, and returns it. +func (f *fieldElt) Add(a, b *fieldElt) *fieldElt { + f.int().Add(a.int(), b.int()) + return f.modQ() +} + +// Sub sets f to a-b mod q, and returns it. +func (f *fieldElt) Sub(a, b *fieldElt) *fieldElt { + f.int().Sub(a.int(), b.int()) + return f.modQ() +} + +// Set sets f's value to v, and returns f. +func (f *fieldElt) Set(v *fieldElt) *fieldElt { + f.int().Set(v.int()) + return f.modQ() +} + +// SetInt sets f's value to v mod q, and returns f. +func (f *fieldElt) SetInt(v *big.Int) *fieldElt { + f.int().Set(v) + return f.modQ() +} + +// Pick samples uniformly from {0, ..., q-1}, assigns sample to f, and returns f +func (f *fieldElt) Pick(rand cipher.Stream) *fieldElt { + return f.SetInt(random.Int(q, rand)) // random.Int safe because q≅2²⁵⁶, q<2²⁵⁶ +} + +// Neg sets f to the negation of g modulo q, and returns it +func (f *fieldElt) Neg(g *fieldElt) *fieldElt { + f.int().Neg(g.int()) + return f.modQ() +} + +// Clone returns a new fieldElt, backed by a clone of f +func (f *fieldElt) Clone() *fieldElt { return newFieldZero().Set(f.modQ()) } + +// SetBytes sets f to the 32-byte big-endian value represented by buf, reduces +// it, and returns it. +func (f *fieldElt) SetBytes(buf [32]byte) *fieldElt { + f.int().SetBytes(buf[:]) + return f.modQ() +} + +// Bytes returns the 32-byte big-endian representation of f +func (f *fieldElt) Bytes() [32]byte { + bytes := f.modQ().int().Bytes() + if len(bytes) > 32 { + panic("field element longer than 256 bits") + } + var rv [32]byte + copy(rv[32-len(bytes):], bytes) // leftpad w zeros + return rv +} + +var two = big.NewInt(2) + +// square returns y² mod q +func fieldSquare(y *fieldElt) *fieldElt { + return fieldEltFromBigInt(newFieldZero().int().Exp(y.int(), two, q)) +} + +func i() *big.Int { return new(big.Int) } + +// sqrtPower is s.t. n^sqrtPower≡sqrt(n) mod q, if n has a root at all. See +// https://math.stackexchange.com/a/1816280, for instance +var sqrtPower = i().Rsh(i().Add(q, big.NewInt(1)), 2) // (q +1)/4 + +// maybeSqrtInField returns a square root of v, if it has any, else nil +func maybeSqrtInField(v *fieldElt) *fieldElt { + s := newFieldZero() + s.int().Exp(v.int(), sqrtPower, q) + if !fieldSquare(s).Equal(v) { + return nil + } + return s +} + +var three = big.NewInt(3) +var seven = fieldEltFromInt(7) + +// rightHandSide returns the RHS of the secp256k1 equation, x³+7 mod q, given x +func rightHandSide(x *fieldElt) *fieldElt { + xCubed := newFieldZero() + xCubed.int().Exp(x.int(), three, q) + return xCubed.Add(xCubed, seven) +} + +// isEven returns true if f is even, false otherwise +func (f *fieldElt) isEven() bool { + return big.NewInt(0).Mod(f.int(), two).Cmp(big.NewInt(0)) == 0 +} diff --git a/core/services/signatures/secp256k1/field_test.go b/core/services/signatures/secp256k1/field_test.go new file mode 100644 index 00000000..56bffda7 --- /dev/null +++ b/core/services/signatures/secp256k1/field_test.go @@ -0,0 +1,159 @@ +package secp256k1 + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" +) + +var numFieldSamples = 10 + +var observedFieldElts map[string]bool + +func init() { + observedFieldElts = make(map[string]bool) +} + +// observedFieldElt ensures that novel scalars are being picked. +func observedFieldElt(t *testing.T, s *fieldElt) { + elt := s.Bytes() + data := hex.Dump(elt[:]) + require.False(t, observedFieldElts[data]) + observedFieldElts[data] = true +} + +var randomStream = cryptotest.NewStream(&testing.T{}, 0) + +func TestField_SetIntAndEqual(t *testing.T) { + tests := []int64{5, 67108864, 67108865, 4294967295} + g := newFieldZero() + for _, test := range tests { + f := fieldEltFromInt(test) + i := big.NewInt(test) + g.SetInt(i) + assert.Equal(t, f, g, + "different values obtained for same input, using "+ + "SetInt vs fieldEltFromInt") + i.Add(i, big.NewInt(1)) + assert.Equal(t, f, g, + "SetInt should take a copy of the backing big.Int") + } +} + +func TestField_String(t *testing.T) { + require.Equal(t, fieldZero.String(), "fieldElt{0}") +} + +func TestField_Equal(t *testing.T) { + require.True(t, (*fieldElt)(nil).Equal((*fieldElt)(nil))) + require.False(t, (*fieldElt)(nil).Equal(fieldZero)) + require.False(t, fieldZero.Equal((*fieldElt)(nil))) +} + +func TestField_Set(t *testing.T) { + f := fieldEltFromInt(1) + g := newFieldZero() + g.Set(f) + g.Add(g, fieldEltFromInt(1)) + assert.Equal(t, f, fieldEltFromInt(1), + "Set takes a copy of the backing big.Int") +} + +func TestFieldEltFromInt(t *testing.T) { + assert.Equal(t, fieldEltFromInt(1), // Also tests fieldElt.modQ + fieldEltFromBigInt(new(big.Int).Add(q, big.NewInt(1))), + "only one representation of a ℤ/qℤ element should be used") +} + +func TestField_SmokeTestPick(t *testing.T) { + f := newFieldZero() + f.Pick(randomStream) + observedFieldElt(t, f) + assert.True(t, f.int().Cmp(big.NewInt(1000000000)) == 1, + "should be greater than 1000000000, with very high probability") +} + +func TestField_Neg(t *testing.T) { + f := newFieldZero() + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStream) + observedFieldElt(t, f) + g := f.Clone() + g.Neg(g) + require.True(t, g.Add(f, g).Equal(fieldZero), + "adding something to its negative should give zero: "+ + "failed with %s", f) + } +} + +func TestField_Sub(t *testing.T) { + f := newFieldZero() + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStream) + observedFieldElt(t, f) + require.True(t, f.Sub(f, f).Equal(fieldZero), + "subtracting something from itself should give zero: "+ + "failed with %s", f) + } +} + +func TestField_Clone(t *testing.T) { + f := fieldEltFromInt(1) + g := f.Clone() + h := f.Clone() + assert.Equal(t, f, g, "clone output does not equal original") + g.Add(f, f) + assert.Equal(t, f, h, "clone does not make a copy") + +} + +func TestField_SetBytesAndBytes(t *testing.T) { + f := newFieldZero() + g := newFieldZero() + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStream) + observedFieldElt(t, f) + g.SetBytes(f.Bytes()) + require.True(t, g.Equal(f), + "roundtrip through serialization should give same "+ + "result back: failed with %s", f) + } +} + +func TestField_MaybeSquareRootInField(t *testing.T) { + f := newFieldZero() + minusOne := fieldEltFromInt(-1) + assert.Nil(t, maybeSqrtInField(minusOne), "-1 is not a square, in this field") + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStream) + observedFieldElt(t, f) + require.True(t, f.int().Cmp(q) == -1, "picked larger value than q: %s", f) + require.True(t, f.int().Cmp(big.NewInt(-1)) != -1, + "backing int must be non-negative") + s := fieldSquare(f) + g := maybeSqrtInField(s) + require.NotEqual(t, g, (*fieldElt)(nil)) + ng := newFieldZero().Neg(g) + require.True(t, f.Equal(g) || f.Equal(ng), "squaring something and "+ + "taking the square root should give ± the original: failed with %s", f) + bigIntSqrt := newFieldZero() // Cross-check against big.ModSqrt + rv := bigIntSqrt.int().ModSqrt(s.int(), q) + require.NotNil(t, rv) + require.True(t, bigIntSqrt.Equal(g) || bigIntSqrt.Equal(ng)) + nonSquare := newFieldZero().Neg(s) + rv = bigIntSqrt.int().ModSqrt(nonSquare.int(), q) + require.Nil(t, rv, "ModSqrt indicates nonSquare is square") + require.Nil(t, maybeSqrtInField(nonSquare), "the negative of square "+ + "should not be a square") + } +} + +func TestField_RightHandSide(t *testing.T) { + assert.Equal(t, rightHandSide(fieldEltFromInt(1)), fieldEltFromInt(8)) + assert.Equal(t, rightHandSide(fieldEltFromInt(2)), fieldEltFromInt(15)) +} diff --git a/core/services/signatures/secp256k1/point.go b/core/services/signatures/secp256k1/point.go new file mode 100644 index 00000000..06a5f467 --- /dev/null +++ b/core/services/signatures/secp256k1/point.go @@ -0,0 +1,381 @@ +// Package secp256k1 is an implementation of the kyber.{Group,Point,Scalar} +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// interfaces, based on btcd/btcec and kyber/group/mod +// +// XXX: NOT CONSTANT TIME! +package secp256k1 + +// Implementation of kyber.Point interface for elliptic-curve arithmetic +// operations on secpk256k1. +// +// This is mostly a wrapper of the functionality provided by btcec + +import ( + "crypto/cipher" + "fmt" + "io" + "math/big" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/util/key" + "golang.org/x/crypto/sha3" +) + +// btcec's public interface uses this affine representation for points on the +// curve. This does not naturally accommodate the point at infinity. btcec +// represents it as (0, 0), which is not a point on {y²=x³+7}. +type secp256k1Point struct { + X *fieldElt + Y *fieldElt +} + +func newPoint() *secp256k1Point { + return &secp256k1Point{newFieldZero(), newFieldZero()} +} + +// String returns a string representation of P +func (P *secp256k1Point) String() string { + return fmt.Sprintf("Secp256k1{X: %s, Y: %s}", P.X, P.Y) +} + +// Equal returns true if p and pPrime represent the same point, false otherwise. +func (P *secp256k1Point) Equal(pPrime kyber.Point) bool { + return P.X.Equal(pPrime.(*secp256k1Point).X) && + P.Y.Equal(pPrime.(*secp256k1Point).Y) +} + +// Null sets p to the group-identity value, and returns it. +func (P *secp256k1Point) Null() kyber.Point { + P.X = fieldEltFromInt(0) // btcec representation of null point is (0,0) + P.Y = fieldEltFromInt(0) + return P +} + +// Base sets p to a copy of the standard group generator, and returns it. +func (P *secp256k1Point) Base() kyber.Point { + P.X.SetInt(s256.Gx) + P.Y.SetInt(s256.Gy) + return P +} + +// Pick sets P to a random point sampled from rand, and returns it. +func (P *secp256k1Point) Pick(rand cipher.Stream) kyber.Point { + for { // Keep trying X's until one fits the curve (~50% probability of + // success each iteration + P.X.Set(newFieldZero().Pick(rand)) + maybeRHS := rightHandSide(P.X) + if maybeY := maybeSqrtInField(maybeRHS); maybeY != (*fieldElt)(nil) { + P.Y.Set(maybeY) + // Take the negative with 50% probability + b := make([]byte, 1) + rand.XORKeyStream(b, b) + if b[0]&1 == 0 { + P.Y.Neg(P.Y) + } + return P + } + } +} + +// Set sets P to copies of pPrime's values, and returns it. +func (P *secp256k1Point) Set(pPrime kyber.Point) kyber.Point { + P.X.Set(pPrime.(*secp256k1Point).X) + P.Y.Set(pPrime.(*secp256k1Point).Y) + return P +} + +// Clone returns a copy of P. +func (P *secp256k1Point) Clone() kyber.Point { + return &secp256k1Point{X: P.X.Clone(), Y: P.Y.Clone()} +} + +// EmbedLen returns the number of bytes of data which can be embedded in a point. +func (*secp256k1Point) EmbedLen() int { + // Reserve the most-significant 8 bits for pseudo-randomness. + // Reserve the least-significant 8 bits for embedded data length. + return (255 - 8 - 8) / 8 +} + +// Embed encodes a limited amount of specified data in the Point, using r as a +// source of cryptographically secure random data. Implementations only embed +// the first EmbedLen bytes of the given data. +func (P *secp256k1Point) Embed(data []byte, r cipher.Stream) kyber.Point { + numEmbedBytes := P.EmbedLen() + if len(data) > numEmbedBytes { + panic("too much data to embed in a point") + } + numEmbedBytes = len(data) + var x [32]byte + randStart := 1 // First byte to fill with random data + if data != nil { + x[0] = byte(numEmbedBytes) // Encode length in low 8 bits + copy(x[1:1+numEmbedBytes], data) // Copy in data to embed + randStart = 1 + numEmbedBytes + } + maxAttempts := 10000 + // Try random x ordinates satisfying the constraints, until one provides + // a point on secp256k1 + for numAttempts := 0; numAttempts < maxAttempts; numAttempts++ { + // Fill the rest of the x ordinate with random data + r.XORKeyStream(x[randStart:], x[randStart:]) + xOrdinate := newFieldZero().SetBytes(x) + // RHS of secp256k1 equation is x³+7 mod p. Success if square. + // We optimistically don't use btcec.IsOnCurve, here, because we + // hope to assign the intermediate result maybeY to P.Y + secp256k1RHS := rightHandSide(xOrdinate) + if maybeY := maybeSqrtInField(secp256k1RHS); maybeY != (*fieldElt)(nil) { + P.X = xOrdinate // success: found (x,y) s.t. y²=x³+7 + P.Y = maybeY + return P + } + } + // Probability 2^{-maxAttempts}, under correct operation. + panic("failed to find point satisfying all constraints") +} + +// Data returns data embedded in P, or an error if inconsistent with encoding +func (P *secp256k1Point) Data() ([]byte, error) { + b := P.X.Bytes() + dataLength := int(b[0]) + if dataLength > P.EmbedLen() { + return nil, fmt.Errorf("point specifies too much data") + } + return b[1 : dataLength+1], nil +} + +// Add sets P to a+b (secp256k1 group operation) and returns it. +func (P *secp256k1Point) Add(a, b kyber.Point) kyber.Point { + X, Y := s256.Add( + a.(*secp256k1Point).X.int(), a.(*secp256k1Point).Y.int(), + b.(*secp256k1Point).X.int(), b.(*secp256k1Point).Y.int()) + P.X.SetInt(X) + P.Y.SetInt(Y) + return P +} + +// Add sets P to a-b (secp256k1 group operation), and returns it. +func (P *secp256k1Point) Sub(a, b kyber.Point) kyber.Point { + X, Y := s256.Add( + a.(*secp256k1Point).X.int(), a.(*secp256k1Point).Y.int(), + b.(*secp256k1Point).X.int(), + newFieldZero().Neg(b.(*secp256k1Point).Y).int()) // -b_y + P.X.SetInt(X) + P.Y.SetInt(Y) + return P +} + +// Neg sets P to -a (in the secp256k1 group), and returns it. +func (P *secp256k1Point) Neg(a kyber.Point) kyber.Point { + P.X = a.(*secp256k1Point).X.Clone() + P.Y = newFieldZero().Neg(a.(*secp256k1Point).Y) + return P +} + +// Mul sets P to s*a (in the secp256k1 group, i.e. adding a to itself s times), +// and returns it. If a is nil, it is replaced by the secp256k1 generator. +func (P *secp256k1Point) Mul(s kyber.Scalar, a kyber.Point) kyber.Point { + sBytes, err := s.(*secp256k1Scalar).MarshalBinary() + if err != nil { + panic(fmt.Errorf("failure while marshaling multiplier: %s", + err)) + } + var X, Y *big.Int + if a == (*secp256k1Point)(nil) || a == nil { + X, Y = s256.ScalarBaseMult(sBytes) + } else { + X, Y = s256.ScalarMult(a.(*secp256k1Point).X.int(), + a.(*secp256k1Point).Y.int(), sBytes) + } + P.X.SetInt(X) + P.Y.SetInt(Y) + return P +} + +// MarshalBinary returns the concatenated big-endian representation of the X +// ordinate and a byte which is 0 if Y is even, 1 if it's odd. Or it returns an +// error on failure. +func (P *secp256k1Point) MarshalBinary() ([]byte, error) { + maybeSqrt := maybeSqrtInField(rightHandSide(P.X)) + if maybeSqrt == (*fieldElt)(nil) { + return nil, fmt.Errorf("x³+7 not a square") + } + minusMaybeSqrt := newFieldZero().Neg(maybeSqrt) + if !P.Y.Equal(maybeSqrt) && !P.Y.Equal(minusMaybeSqrt) { + return nil, fmt.Errorf( + "y ≠ ±maybeSqrt(x³+7), so not a point on the curve") + } + rv := make([]byte, P.MarshalSize()) + signByte := P.MarshalSize() - 1 // Last byte contains sign of Y. + xordinate := P.X.Bytes() + copyLen := copy(rv[:signByte], xordinate[:]) + if copyLen != P.MarshalSize()-1 { + return []byte{}, fmt.Errorf("marshal of x ordinate too short") + } + if P.Y.isEven() { + rv[signByte] = 0 + } else { + rv[signByte] = 1 + } + return rv, nil +} + +// MarshalSize returns the length of the byte representation of P +func (P *secp256k1Point) MarshalSize() int { return 33 } + +// MarshalID returns the ID for a secp256k1 point +func (P *secp256k1Point) MarshalID() [8]byte { + return [8]byte{'s', 'p', '2', '5', '6', '.', 'p', 'o'} +} + +// UnmarshalBinary sets P to the point represented by contents of buf, or +// returns an non-nil error +func (P *secp256k1Point) UnmarshalBinary(buf []byte) error { + var err error + if len(buf) != P.MarshalSize() { + err = fmt.Errorf("wrong length for marshaled point") + } + if err == nil && !(buf[32] == 0 || buf[32] == 1) { + err = fmt.Errorf("bad sign byte (the last one)") + } + if err != nil { + return err + } + var xordinate [32]byte + copy(xordinate[:], buf[:32]) + P.X = newFieldZero().SetBytes(xordinate) + secp256k1RHS := rightHandSide(P.X) + maybeY := maybeSqrtInField(secp256k1RHS) + if maybeY == (*fieldElt)(nil) { + return fmt.Errorf("x ordinate does not correspond to a curve point") + } + isEven := maybeY.isEven() + P.Y.Set(maybeY) + if (buf[32] == 0 && !isEven) || (buf[32] == 1 && isEven) { + P.Y.Neg(P.Y) + } else { + if buf[32] != 0 && buf[32] != 1 { + return fmt.Errorf("parity byte must be 0 or 1") + } + } + return nil +} + +// MarshalTo writes the serialized P to w, and returns the number of bytes +// written, or an error on failure. +func (P *secp256k1Point) MarshalTo(w io.Writer) (int, error) { + buf, err := P.MarshalBinary() + if err != nil { + return 0, err + } + return w.Write(buf) +} + +// UnmarshalFrom sets P to the secp256k1 point represented by bytes read from r, +// and returns the number of bytes read, or an error on failure. +func (P *secp256k1Point) UnmarshalFrom(r io.Reader) (int, error) { + buf := make([]byte, P.MarshalSize()) + n, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return n, P.UnmarshalBinary(buf) +} + +// EthereumAddress returns the 160-bit address corresponding to p as public key. +func EthereumAddress(p kyber.Point) (rv [20]byte) { + // The Ethereum address of P is the bottom 160 bits of keccak256(P.X‖P.Y), + // where P.X and P.Y are represented in 32 bytes as big-endian. See equations + // (277, 284) of Ethereum Yellow Paper version 3e36772, or go-ethereum's + // crypto.PubkeyToAddress. + h := sha3.NewLegacyKeccak256() + if _, err := h.Write(LongMarshal(p)); err != nil { + panic(err) + } + copy(rv[:], h.Sum(nil)[12:]) + return rv +} + +// IsSecp256k1Point returns true if p is a secp256k1Point +func IsSecp256k1Point(p kyber.Point) bool { + switch p.(type) { + case *secp256k1Point: + return true + default: + return false + } +} + +// Coordinates returns the coordinates of p +func Coordinates(p kyber.Point) (*big.Int, *big.Int) { + return p.(*secp256k1Point).X.int(), p.(*secp256k1Point).Y.int() +} + +// ValidPublicKey returns true iff p can be used in the optimized on-chain +// Schnorr-signature verification. See SchnorrSECP256K1.sol for details. +func ValidPublicKey(p kyber.Point) bool { + if p == (*secp256k1Point)(nil) || p == nil { + return false + } + P, ok := p.(*secp256k1Point) + if !ok { + return false + } + maybeY := maybeSqrtInField(rightHandSide(P.X)) + return maybeY != nil && (P.Y.Equal(maybeY) || P.Y.Equal(maybeY.Neg(maybeY))) +} + +// Generate generates a public/private key pair, which can be verified cheaply +// on-chain +func Generate(random cipher.Stream) *key.Pair { + p := key.Pair{} + for !ValidPublicKey(p.Public) { + p.Private = (&Secp256k1{}).Scalar().Pick(random) + p.Public = (&Secp256k1{}).Point().Mul(p.Private, nil) + } + return &p +} + +// LongMarshal returns the concatenated coordinates serialized as uint256's +func LongMarshal(p kyber.Point) []byte { + xMarshal := p.(*secp256k1Point).X.Bytes() + yMarshal := p.(*secp256k1Point).Y.Bytes() + return append(xMarshal[:], yMarshal[:]...) +} + +// LongUnmarshal returns the secp256k1 point represented by m, as a concatenated +// pair of uint256's +func LongUnmarshal(m []byte) (kyber.Point, error) { + if len(m) != 64 { + return nil, fmt.Errorf( + "0x%x does not represent an uncompressed secp256k1Point. Should be length 64, but is length %d", + m, len(m)) + } + p := newPoint() + p.X.SetInt(big.NewInt(0).SetBytes(m[:32])) + p.Y.SetInt(big.NewInt(0).SetBytes(m[32:])) + if !ValidPublicKey(p) { + return nil, fmt.Errorf("%s is not a valid secp256k1 point", p) + } + return p, nil +} + +// ScalarToPublicPoint returns the public secp256k1 point associated to s +func ScalarToPublicPoint(s kyber.Scalar) kyber.Point { + publicPoint := (&Secp256k1{}).Point() + return publicPoint.Mul(s, nil) +} + +// SetCoordinates returns the point (x,y), or panics if an invalid secp256k1Point +func SetCoordinates(x, y *big.Int) kyber.Point { + rv := newPoint() + rv.X.SetInt(x) + rv.Y.SetInt(y) + if !ValidPublicKey(rv) { + panic("point requested from invalid coordinates") + } + return rv +} diff --git a/core/services/signatures/secp256k1/point_test.go b/core/services/signatures/secp256k1/point_test.go new file mode 100644 index 00000000..b3ca68c0 --- /dev/null +++ b/core/services/signatures/secp256k1/point_test.go @@ -0,0 +1,232 @@ +package secp256k1 + +import ( + "bytes" + "crypto/rand" + "fmt" + "math/big" + "testing" + + "go.dedis.ch/kyber/v3/group/curve25519" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" +) + +var numPointSamples = 10 + +var randomStreamPoint = cryptotest.NewStream(&testing.T{}, 0) + +func TestPoint_String(t *testing.T) { + require.Equal(t, newPoint().String(), + "Secp256k1{X: fieldElt{0}, Y: fieldElt{0}}") +} + +func TestPoint_CloneAndEqual(t *testing.T) { + f := newPoint() + for i := 0; i < numPointSamples; i++ { + g := f.Clone() + f.Pick(randomStreamPoint) + assert.NotEqual(t, f, g, + "modifying original shouldn't change clone") + g, h := f.Clone(), f.Clone() + assert.Equal(t, f, g, "clones should be equal") + g.Add(g, f) + assert.Equal(t, h, f, + "modifying a clone shouldn't change original") + } +} + +func TestPoint_NullAndAdd(t *testing.T) { + f, g := newPoint(), newPoint() + for i := 0; i < numPointSamples; i++ { + g.Null() + f.Pick(randomStreamPoint) + g.Add(f, g) + assert.Equal(t, f, g, "adding zero should have no effect") + } +} + +func TestPoint_Set(t *testing.T) { + p := newPoint() + base := newPoint().Base() + assert.NotEqual(t, p, base, "generator should not be zero") + p.Set(base) + assert.Equal(t, p, base, "setting to generator should yield generator") +} + +func TestPoint_Embed(t *testing.T) { + p := newPoint() + for i := 0; i < numPointSamples; i++ { + data := make([]byte, p.EmbedLen()) + _, err := rand.Read(data) + require.Nil(t, err) + p.Embed(data, randomStreamPoint) + require.True(t, s256.IsOnCurve(p.X.int(), p.Y.int()), + "should embed to a secp256k1 point") + output, err := p.Data() + require.NoError(t, err) + require.True(t, bytes.Equal(data, output), + "should get same value back after round-trip "+ + "embedding, got %v, then %v", data, output) + } + var uint256Bytes [32]byte + uint256Bytes[0] = 30 + p.X.SetBytes(uint256Bytes) + _, err := p.Data() + require.Error(t, err) + require.Contains(t, err.Error(), "specifies too much data") + var b bytes.Buffer + p.Pick(randomStreamPoint) + _, err = p.MarshalTo(&b) + require.NoError(t, err) + _, err = p.UnmarshalFrom(&b) + require.NoError(t, err) + data := make([]byte, p.EmbedLen()+1) // Check length validation. This test + defer func() { // comes last, because it triggers panic + r := recover() + require.NotNil(t, r, "calling embed with too much data should panic") + require.Contains(t, r, "too much data to embed in a point") + }() + p.Embed(data, randomStreamPoint) +} + +func TestPoint_AddSubAndNeg(t *testing.T) { + zero := newPoint().Null() + p := newPoint() + for i := 0; i < numPointSamples; i++ { + p.Pick(randomStreamPoint) + q := p.Clone() + p.Sub(p, q) + require.True(t, p.Equal(zero), + "subtracting a point from itself should give zero, "+ + "got %v - %v = %v ≠ %v", q, q, p, zero) + p.Neg(q) + r := newPoint().Add(p, q) + require.True(t, r.Equal(zero), + "adding a point to its negative should give zero"+ + " got %v+%v=%v≠%v", q, p, r, zero) + r.Neg(q) + p.Sub(q, r) + s := newPoint().Add(q, q) + require.True(t, p.Equal(s), "q-(-q)=q+q?"+ + " got %v-%v=%v≠%v", q, r, p, s) + } +} + +func TestPoint_Mul(t *testing.T) { + zero := newPoint().Null() + multiplier := newScalar(bigZero) + one := newScalar(big.NewInt(int64(1))) + var p *secp256k1Point + for i := 0; i < numPointSamples/5; i++ { + if i%20 == 0 { + p = nil // Test default to generator point + } else { + p = newPoint() + p.Pick(randomStreamPoint) + } + multiplier.Pick(randomStreamPoint) + q := newPoint().Mul(one, p) + comparee := newPoint() + if p == (*secp256k1Point)(nil) { + comparee.Base() + } else { + comparee = p.Clone().(*secp256k1Point) + } + require.True(t, comparee.Equal(q), "1*p=p? %v * %v ≠ %v", one, + comparee, q) + q.Mul(multiplier, p) + negMultiplier := newScalar(bigZero).Neg(multiplier) + r := newPoint().Mul(negMultiplier, p) + s := newPoint().Add(q, r) + require.True(t, s.Equal(zero), "s*p+(-s)*p=0? got "+ + "%v*%v + %v*%v = %v + %v = %v ≠ %v", multiplier, p, + ) + } +} + +func TestPoint_Marshal(t *testing.T) { + p := newPoint() + for i := 0; i < numPointSamples; i++ { + p.Pick(randomStreamPoint) + serialized, err := p.MarshalBinary() + require.Nil(t, err) + q := newPoint() + err = q.UnmarshalBinary(serialized) + require.Nil(t, err) + require.True(t, p.Equal(q), "%v marshalled to %x, which "+ + "unmarshalled to %v", p, serialized, q) + } + p.X.SetInt(big.NewInt(0)) // 0³+7 is not a square in the base field. + _, err := p.MarshalBinary() + require.Error(t, err) + require.Contains(t, err.Error(), "not a square") + p.X.SetInt(big.NewInt(1)) + _, err = p.MarshalBinary() + require.Error(t, err) + require.Contains(t, err.Error(), "not a point on the curve") + id := p.MarshalID() + require.Equal(t, string(id[:]), "sp256.po") + data := make([]byte, 34) + err = p.UnmarshalBinary(data) + require.Error(t, err) + require.Contains(t, err.Error(), "wrong length for marshaled point") + require.Contains(t, p.UnmarshalBinary(data[:32]).Error(), + "wrong length for marshaled point") + data[32] = 2 + require.Contains(t, p.UnmarshalBinary(data[:33]).Error(), + "bad sign byte") + data[32] = 0 + data[31] = 5 // I.e., x-ordinate is now 5 + require.Contains(t, p.UnmarshalBinary(data[:33]).Error(), + "does not correspond to a curve point") +} + +func TestPoint_BaseTakesCopy(t *testing.T) { + p := newPoint().Base() + p.Add(p, p) + q := newPoint().Base() + assert.False(t, p.Equal(q), + "modifying output from Base changes S256.G{x,y}") +} + +func TestPoint_EthereumAddress(t *testing.T) { + // Example taken from + // https://theethereum.wiki/w/index.php/Accounts,_Addresses,_Public_And_Private_Keys,_And_Tokens + pString := "3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266" + pInt, ok := big.NewInt(0).SetString(pString, 16) + require.True(t, ok, "failed to parse private key") + private := newScalar(pInt) + public := newPoint().Mul(private, nil) + address := EthereumAddress(public) + assert.Equal(t, fmt.Sprintf("%x", address), + "c2d7cf95645d33006175b78989035c7c9061d3f9") +} + +func TestIsSecp256k1Point(t *testing.T) { + p := curve25519.NewBlakeSHA256Curve25519(false).Point() + require.False(t, IsSecp256k1Point(p)) + require.True(t, IsSecp256k1Point(newPoint())) +} + +func TestCoordinates(t *testing.T) { + x, y := Coordinates(newPoint()) + require.Equal(t, x, bigZero) + require.Equal(t, y, bigZero) +} + +func TestValidPublicKey(t *testing.T) { + require.False(t, ValidPublicKey(newPoint()), "zero is not a valid key") + require.True(t, ValidPublicKey(newPoint().Base())) +} + +func TestGenerate(t *testing.T) { + for { + if ValidPublicKey(Generate(randomStreamPoint).Public) { + break + } + } +} diff --git a/core/services/signatures/secp256k1/public_key.go b/core/services/signatures/secp256k1/public_key.go new file mode 100644 index 00000000..1596292b --- /dev/null +++ b/core/services/signatures/secp256k1/public_key.go @@ -0,0 +1,150 @@ +package secp256k1 + +import ( + "database/sql/driver" + "fmt" + + "github.com/pkg/errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +// PublicKey is a secp256k1 point in compressed format +type PublicKey [CompressedPublicKeyLength]byte + +// CompressedPublicKeyLength is the length of a secp256k1 public key's x +// coordinate as a uint256, concatenated with 00 if y is even, 01 if odd. +const CompressedPublicKeyLength = 33 + +func init() { + if CompressedPublicKeyLength != (&Secp256k1{}).Point().MarshalSize() { + panic("disparity in expected public key lengths") + } +} + +// Set sets k to the public key represented by l +func (k *PublicKey) Set(l PublicKey) { + if copy(k[:], l[:]) != CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy entire public key %x to %x", l, k)) + } +} + +// Point returns the secp256k1 point corresponding to k +func (k *PublicKey) Point() (kyber.Point, error) { + p := (&Secp256k1{}).Point() + return p, p.UnmarshalBinary(k[:]) +} + +// NewPublicKeyFromHex returns the PublicKey encoded by 0x-hex string hex, or errors +func NewPublicKeyFromHex(hex string) (PublicKey, error) { + rawKey, err := hexutil.Decode(hex) + if err != nil { + return PublicKey{}, err + } + return NewPublicKeyFromBytes(rawKey) +} + +// NewPublicKeyFromBytes returns the PublicKey built from the given bytes, or errors +func NewPublicKeyFromBytes(rawKey []byte) (PublicKey, error) { + if l := len(rawKey); l != CompressedPublicKeyLength { + return PublicKey{}, fmt.Errorf( + "wrong length for public key: %s of length %d", rawKey, l) + } + var k PublicKey + if c := copy(k[:], rawKey); c != CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy entire key to return value")) + } + return k, nil +} + +// SetFromHex sets k to the public key represented by hex, which must represent +// the compressed binary format +func (k *PublicKey) SetFromHex(hex string) error { + nk, err := NewPublicKeyFromHex(hex) + if err != nil { + return err + } + k.Set(nk) + return nil +} + +// String returns k's binary compressed representation, as 0x-hex +func (k PublicKey) String() string { + return hexutil.Encode(k[:]) +} + +// StringUncompressed returns k's binary uncompressed representation, as 0x-hex +func (k *PublicKey) StringUncompressed() (string, error) { + p, err := k.Point() + if err != nil { + return "", err + } + return hexutil.Encode(LongMarshal(p)), nil +} + +// Hash returns the solidity Keccak256 hash of k. Corresponds to hashOfKey on +// VRFCoordinator. +func (k *PublicKey) Hash() (common.Hash, error) { + p, err := k.Point() + if err != nil { + return common.Hash{}, err + } + return utils.MustHash(string(LongMarshal(p))), nil +} + +// MustHash is like Hash, but panics on error. Useful for testing. +func (k *PublicKey) MustHash() common.Hash { + hash, err := k.Hash() + if err != nil { + panic(fmt.Sprintf("Failed to compute hash of public vrf key %v", k)) + } + return hash +} + +// Address returns the Ethereum address of k or 0 if the key is invalid +func (k *PublicKey) Address() common.Address { + hash, err := k.Hash() + if err != nil { + return common.Address{} + } + return common.BytesToAddress(hash.Bytes()[12:]) +} + +// IsZero returns true iff k is the zero value for PublicKey +func (k *PublicKey) IsZero() bool { + return *k == PublicKey{} +} + +// MarshalText renders k as a text string +func (k PublicKey) MarshalText() ([]byte, error) { + return []byte(k.String()), nil +} + +// UnmarshalText reads a PublicKey into k from text, or errors +func (k *PublicKey) UnmarshalText(text []byte) error { + if err := k.SetFromHex(string(text)); err != nil { + return errors.Wrapf(err, "while parsing %s as public key", text) + } + return nil +} + +// Value marshals PublicKey to be saved in the DB +func (k PublicKey) Value() (driver.Value, error) { + return k.String(), nil +} + +// Scan reconstructs a PublicKey from a DB record of it. +func (k *PublicKey) Scan(value interface{}) error { + rawKey, ok := value.(string) + if !ok { + return errors.Wrap(fmt.Errorf("unable to convert %+v of type %T to PublicKey", value, value), "scan failure") + } + if err := k.SetFromHex(rawKey); err != nil { + return errors.Wrapf(err, "while scanning %s as PublicKey", rawKey) + } + return nil +} diff --git a/core/services/signatures/secp256k1/scalar.go b/core/services/signatures/secp256k1/scalar.go new file mode 100644 index 00000000..d12826a8 --- /dev/null +++ b/core/services/signatures/secp256k1/scalar.go @@ -0,0 +1,231 @@ +// Package secp256k1 is an implementation of the kyber.{Group,Point,Scalcar} +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// interfaces, based on btcd/btcec and kyber/group/mod +// +// XXX: NOT CONSTANT TIME! +package secp256k1 + +// Implementation of kyber.Scalar interface for arithmetic operations mod the +// order of the secpk256k1 group (i.e. hex value +// 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141.) + +import ( + "crypto/cipher" + "fmt" + "io" + "math/big" + + secp256k1BTCD "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethereum/go-ethereum/common" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/util/random" +) + +var GroupOrder = secp256k1BTCD.S256().N +var FieldSize = secp256k1BTCD.S256().P + +type secp256k1Scalar big.Int + +// AllowVarTime, if passed true indicates that variable-time operations may be +// used on s. +func (s *secp256k1Scalar) AllowVarTime(varTimeAllowed bool) { + // Since constant-time operations are unimplemented for secp256k1, a + // value of false panics. + if !varTimeAllowed { + panic("implementation is not constant-time!") + } +} + +// newScalar returns a secpk256k1 scalar, with value v modulo GroupOrder. +func newScalar(v *big.Int) kyber.Scalar { + return (*secp256k1Scalar)(zero().Mod(v, GroupOrder)) +} + +func zero() *big.Int { return big.NewInt(0) } + +func ToInt(s kyber.Scalar) *big.Int { return (*big.Int)(s.(*secp256k1Scalar)) } + +func (s *secp256k1Scalar) int() *big.Int { return (*big.Int)(s) } + +func (s *secp256k1Scalar) modG() kyber.Scalar { + // TODO(alx): Make this faster + s.int().Mod(s.int(), GroupOrder) + return s +} + +func (s *secp256k1Scalar) String() string { + return fmt.Sprintf("scalar{%x}", (*big.Int)(s)) +} + +var scalarZero = zero() + +// Equal returns true if s and sPrime represent the same value modulo the group +// order, false otherwise +func (s *secp256k1Scalar) Equal(sPrime kyber.Scalar) bool { + difference := zero().Sub(s.int(), ToInt(sPrime)) + return scalarZero.Cmp(difference.Mod(difference, GroupOrder)) == 0 +} + +// Set copies sPrime's value (modulo GroupOrder) to s, and returns it +func (s *secp256k1Scalar) Set(sPrime kyber.Scalar) kyber.Scalar { + return (*secp256k1Scalar)(s.int().Mod(ToInt(sPrime), GroupOrder)) +} + +// Clone returns a copy of s mod GroupOrder +func (s *secp256k1Scalar) Clone() kyber.Scalar { + return (*secp256k1Scalar)(zero().Mod(s.int(), GroupOrder)) +} + +// SetInt64 returns s with value set to v modulo GroupOrder +func (s *secp256k1Scalar) SetInt64(v int64) kyber.Scalar { + return (*secp256k1Scalar)(s.int().SetInt64(v)).modG() +} + +// Zero sets s to 0 mod GroupOrder, and returns it +func (s *secp256k1Scalar) Zero() kyber.Scalar { + return s.SetInt64(0) +} + +// Add sets s to a+b mod GroupOrder, and returns it +func (s *secp256k1Scalar) Add(a, b kyber.Scalar) kyber.Scalar { + s.int().Add(ToInt(a), ToInt(b)) + return s.modG() +} + +// Sub sets s to a-b mod GroupOrder, and returns it +func (s *secp256k1Scalar) Sub(a, b kyber.Scalar) kyber.Scalar { + s.int().Sub(ToInt(a), ToInt(b)) + return s.modG() +} + +// Neg sets s to -a mod GroupOrder, and returns it +func (s *secp256k1Scalar) Neg(a kyber.Scalar) kyber.Scalar { + s.int().Neg(ToInt(a)) + return s.modG() +} + +// One sets s to 1 mod GroupOrder, and returns it +func (s *secp256k1Scalar) One() kyber.Scalar { + return s.SetInt64(1) +} + +// Mul sets s to a*b mod GroupOrder, and returns it +func (s *secp256k1Scalar) Mul(a, b kyber.Scalar) kyber.Scalar { + // TODO(alx): Make this faster + s.int().Mul(ToInt(a), ToInt(b)) + return s.modG() +} + +// Div sets s to a*b⁻¹ mod GroupOrder, and returns it +func (s *secp256k1Scalar) Div(a, b kyber.Scalar) kyber.Scalar { + if ToInt(b).Cmp(scalarZero) == 0 { + panic("attempt to divide by zero") + } + // TODO(alx): Make this faster + s.int().Mul(ToInt(a), zero().ModInverse(ToInt(b), GroupOrder)) + return s.modG() +} + +// Inv sets s to s⁻¹ mod GroupOrder, and returns it +func (s *secp256k1Scalar) Inv(a kyber.Scalar) kyber.Scalar { + if ToInt(a).Cmp(scalarZero) == 0 { + panic("attempt to divide by zero") + } + s.int().ModInverse(ToInt(a), GroupOrder) + return s +} + +// Pick sets s to a random value mod GroupOrder sampled from rand, and returns +// it +func (s *secp256k1Scalar) Pick(rand cipher.Stream) kyber.Scalar { + return s.Set((*secp256k1Scalar)(random.Int(GroupOrder, rand))) +} + +// MarshalBinary returns the big-endian byte representation of s, or an error on +// failure +func (s *secp256k1Scalar) MarshalBinary() ([]byte, error) { + b := ToInt(s.modG()).Bytes() + // leftpad with zeros + rv := append(make([]byte, s.MarshalSize()-len(b)), b...) + if len(rv) != s.MarshalSize() { + return nil, fmt.Errorf("marshalled scalar to wrong length") + } + return rv, nil +} + +// MarshalSize returns the length of the byte representation of s +func (s *secp256k1Scalar) MarshalSize() int { return 32 } + +// MarshalID returns the ID for a secp256k1 scalar +func (s *secp256k1Scalar) MarshalID() [8]byte { + return [8]byte{'s', 'p', '2', '5', '6', '.', 's', 'c'} +} + +// UnmarshalBinary sets s to the scalar represented by the contents of buf, +// returning error on failure. +func (s *secp256k1Scalar) UnmarshalBinary(buf []byte) error { + if len(buf) != s.MarshalSize() { + return fmt.Errorf("cannot unmarshal to scalar: wrong length") + } + s.int().Mod(s.int().SetBytes(buf), GroupOrder) + return nil +} + +// MarshalTo writes the serialized s to w, and returns the number of bytes +// written, or an error on failure. +func (s *secp256k1Scalar) MarshalTo(w io.Writer) (int, error) { + buf, err := s.MarshalBinary() + if err != nil { + return 0, fmt.Errorf("cannot marshal binary: %s", err) + } + return w.Write(buf) +} + +// UnmarshalFrom sets s to the scalar represented by bytes read from r, and +// returns the number of bytes read, or an error on failure. +func (s *secp256k1Scalar) UnmarshalFrom(r io.Reader) (int, error) { + buf := make([]byte, s.MarshalSize()) + n, err := io.ReadFull(r, buf) + if err != nil { + return n, err + } + return n, s.UnmarshalBinary(buf) +} + +// SetBytes sets s to the number with big-endian representation a mod +// GroupOrder, and returns it +func (s *secp256k1Scalar) SetBytes(a []byte) kyber.Scalar { + return ((*secp256k1Scalar)(s.int().SetBytes(a))).modG() +} + +// IsSecp256k1Scalar returns true if p is a secp256k1Scalar +func IsSecp256k1Scalar(s kyber.Scalar) bool { + switch s := s.(type) { + case *secp256k1Scalar: + s.modG() + return true + default: + return false + } +} + +// IntToScalar returns i wrapped as a big.Int. +// +// May modify i to reduce mod GroupOrder +func IntToScalar(i *big.Int) kyber.Scalar { + return ((*secp256k1Scalar)(i)).modG() +} + +func ScalarToHash(s kyber.Scalar) common.Hash { + return common.BigToHash(ToInt(s.(*secp256k1Scalar))) +} + +// RepresentsScalar returns true iff i is in the right range to be a scalar +func RepresentsScalar(i *big.Int) bool { + return i.Cmp(GroupOrder) == -1 +} diff --git a/core/services/signatures/secp256k1/scalar_test.go b/core/services/signatures/secp256k1/scalar_test.go new file mode 100644 index 00000000..1495efa9 --- /dev/null +++ b/core/services/signatures/secp256k1/scalar_test.go @@ -0,0 +1,189 @@ +package secp256k1 + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/curve25519" + + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/cryptotest" +) + +var numScalarSamples = 10 + +var observedScalars map[string]bool + +func init() { + observedScalars = make(map[string]bool) +} + +// observedScalar ensures that novel scalars are being picked. +func observedScalar(t *testing.T, s kyber.Scalar) { + data, err := s.(*secp256k1Scalar).modG().MarshalBinary() + require.NoError(t, err) + scalar := hex.Dump(data) + require.False(t, observedScalars[scalar]) + observedScalars[scalar] = true +} + +var randomStreamScalar = cryptotest.NewStream(&testing.T{}, 0) + +func TestScalar_SetAndEqual(t *testing.T) { + tests := []int64{5, 67108864, 67108865, 4294967295} + g := newScalar(scalarZero) + for _, test := range tests { + f := newScalar(big.NewInt(test)) + g.Set(f) + assert.Equal(t, f, g, + "the method Set should give the same value to receiver") + f.Add(f, newScalar(big.NewInt(1))) + assert.NotEqual(t, f, g, + "SetInt should take a copy of the backing big.Int") + } +} + +func TestNewScalar(t *testing.T) { + one := newScalar(big.NewInt(1)) + assert.Equal(t, ToInt(one), + ToInt(newScalar(big.NewInt(0).Add(ToInt(one), GroupOrder))), + "equivalence classes mod GroupOrder not equal") +} + +func TestScalar_SmokeTestPick(t *testing.T) { + f := newScalar(scalarZero).Clone() + for i := 0; i < numScalarSamples; i++ { + f.Pick(randomStreamScalar) + observedScalar(t, f) + require.True(t, ToInt(f).Cmp(big.NewInt(1000000000)) == 1, + "implausibly low value returned from Pick: %v", f) + } +} + +func TestScalar_Neg(t *testing.T) { + f := newScalar(scalarZero).Clone() + for i := 0; i < numScalarSamples; i++ { + f.Pick(randomStreamScalar) + observedScalar(t, f) + g := f.Clone() + g.Neg(g) + require.True(t, g.Add(f, g).Equal(newScalar(scalarZero))) + } +} + +func TestScalar_Sub(t *testing.T) { + f := newScalar(scalarZero).Clone() + for i := 0; i < numScalarSamples; i++ { + f.Pick(randomStreamScalar) + observedScalar(t, f) + require.True(t, f.Sub(f, f).Equal(newScalar(scalarZero)), + "subtracting something from itself should give zero") + } +} + +func TestScalar_Clone(t *testing.T) { + f := newScalar(big.NewInt(1)) + g := f.Clone() + h := f.Clone() + assert.Equal(t, f, g, "clone output does not equal input") + g.Add(f, f) + assert.Equal(t, f, h, "clone does not make a copy") +} + +func TestScalar_Marshal(t *testing.T) { + f := newScalar(scalarZero) + g := newScalar(scalarZero) + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStreamScalar) + observedScalar(t, f) + data, err := f.MarshalBinary() + require.Nil(t, err) + err = g.UnmarshalBinary(data) + require.Nil(t, err) + require.True(t, g.Equal(f), + "roundtrip through serialization should give same "+ + "result back: failed with %s", f) + } + marshalID := f.(*secp256k1Scalar).MarshalID() + require.Equal(t, string(marshalID[:]), "sp256.sc") + data := make([]byte, 33) + require.Contains(t, f.UnmarshalBinary(data).Error(), "wrong length") + var buf bytes.Buffer + _, err := f.MarshalTo(&buf) + require.NoError(t, err) + _, err = f.UnmarshalFrom(&buf) + require.NoError(t, err) +} + +func TestScalar_MulDivInv(t *testing.T) { + f := newScalar(scalarZero) + g := newScalar(scalarZero) + h := newScalar(scalarZero) + j := newScalar(scalarZero) + k := newScalar(scalarZero) + for i := 0; i < numFieldSamples; i++ { + f.Pick(randomStreamScalar) + observedScalar(t, f) + g.Inv(f) + h.Mul(f, g) + require.True(t, h.Equal(newScalar(big.NewInt(1)))) + h.Div(f, f) + require.True(t, h.Equal(newScalar(big.NewInt(1)))) + h.Div(newScalar(big.NewInt(1)), f) + require.True(t, h.Equal(g)) + h.Pick(randomStreamScalar) + observedScalar(t, h) + j.Neg(j.Mul(h, f)) + k.Mul(h, k.Neg(f)) + require.True(t, j.Equal(k), "-(h*f) != h*(-f)") + } +} + +func TestScalar_AllowVarTime(t *testing.T) { + defer func() { require.Contains(t, recover(), "not constant-time!") }() + newScalar(bigZero).(*secp256k1Scalar).AllowVarTime(false) +} + +func TestScalar_String(t *testing.T) { + require.Equal(t, newScalar(bigZero).String(), "scalar{0}") +} + +func TestScalar_SetInt64(t *testing.T) { + require.Equal(t, newScalar(bigZero).SetInt64(1), newScalar(big.NewInt(1))) + require.True(t, newScalar(big.NewInt(1)).Zero().Equal(newScalar(bigZero))) + require.Equal(t, newScalar(bigZero).One(), newScalar(big.NewInt(1))) +} + +func TestScalar_DivPanicsOnZeroDivisor(t *testing.T) { + defer func() { require.Contains(t, recover(), "divide by zero") }() + newScalar(bigZero).Div(newScalar(bigZero).One(), newScalar(bigZero)) +} + +func TestScalar_InvPanicsOnZero(t *testing.T) { + defer func() { require.Contains(t, recover(), "divide by zero") }() + newScalar(bigZero).Inv(newScalar(bigZero)) +} + +func TestScalar_SetBytes(t *testing.T) { + u256Cardinality := zero().Lsh(big.NewInt(1), 256) + newScalar(bigZero).(*secp256k1Scalar).int().Cmp( + zero().Sub(u256Cardinality, GroupOrder)) +} + +func TestScalar_IsSecp256k1Scalar(t *testing.T) { + c := curve25519.NewBlakeSHA256Curve25519(true) + require.False(t, IsSecp256k1Scalar(c.Scalar())) + require.True(t, IsSecp256k1Scalar(newScalar(bigZero))) +} + +func TestScalar_IntToScalar(t *testing.T) { + u256Cardinality := zero().Lsh(big.NewInt(1), 256) + IntToScalar(u256Cardinality) + require.Equal(t, u256Cardinality, zero().Sub(zero().Lsh(big.NewInt(1), 256), + GroupOrder)) +} diff --git a/core/services/signatures/secp256k1/suite.go b/core/services/signatures/secp256k1/suite.go new file mode 100644 index 00000000..605f0828 --- /dev/null +++ b/core/services/signatures/secp256k1/suite.go @@ -0,0 +1,91 @@ +// Package secp256k1 is an implementation of the kyber.{Group,Point,Scalar} +// ////////////////////////////////////////////////////////////////////////////// +// +// XXX: Do not use in production until this code has been audited. +// +// ////////////////////////////////////////////////////////////////////////////// +// interfaces, based on btcd/btcec and kyber/group/mod +// +// XXX: NOT CONSTANT TIME! +package secp256k1 + +import ( + "crypto/cipher" + "hash" + "io" + "reflect" + + "golang.org/x/crypto/sha3" + + "go.dedis.ch/fixbuf" + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/util/random" + "go.dedis.ch/kyber/v3/xof/blake2xb" +) + +// SuiteSecp256k1 implements some basic functionalities such as Group, HashFactory, +// and XOFFactory. +type SuiteSecp256k1 struct { + Secp256k1 + r cipher.Stream +} + +// Hash returns a newly instantiated keccak hash function. +func (s *SuiteSecp256k1) Hash() hash.Hash { + return sha3.NewLegacyKeccak256() +} + +// XOF returns an XOR function, implemented via the Blake2b hash. +// +// This should only be used for generating secrets, so there is no need to make +// it cheap to compute on-chain. +func (s *SuiteSecp256k1) XOF(key []byte) kyber.XOF { + return blake2xb.New(key) +} + +// Read implements the Encoding interface function, and reads a series of objs from r +// The objs must all be pointers +func (s *SuiteSecp256k1) Read(r io.Reader, objs ...interface{}) error { + return fixbuf.Read(r, s, objs...) +} + +// Write implements the Encoding interface, and writes the objs to r using their +// built-in binary serializations. Supports Points, Scalars, fixed-length data +// types supported by encoding/binary/Write(), and structs, arrays, and slices +// containing these types. +func (s *SuiteSecp256k1) Write(w io.Writer, objs ...interface{}) error { + return fixbuf.Write(w, objs) +} + +var aScalar kyber.Scalar +var tScalar = reflect.TypeOf(aScalar) +var aPoint kyber.Point +var tPoint = reflect.TypeOf(aPoint) + +// New implements the kyber.Encoding interface, and returns a new element of +// type t, which can be a Point or a Scalar +func (s *SuiteSecp256k1) New(t reflect.Type) interface{} { + switch t { + case tScalar: + return s.Scalar() + case tPoint: + return s.Point() + } + return nil +} + +// RandomStream returns a cipher.Stream that returns a key stream +// from crypto/rand. +func (s *SuiteSecp256k1) RandomStream() cipher.Stream { + if s.r != nil { + return s.r + } + return random.New() +} + +// NewBlakeKeccackSecp256k1 returns a cipher suite based on package +// go.dedis.ch/kyber/xof/blake2xb, SHA-256, and the secp256k1 curve. It +// produces cryptographically secure random numbers via package crypto/rand. +func NewBlakeKeccackSecp256k1() *SuiteSecp256k1 { + return new(SuiteSecp256k1) +} diff --git a/core/services/signatures/secp256k1/suite_test.go b/core/services/signatures/secp256k1/suite_test.go new file mode 100644 index 00000000..64dd2b07 --- /dev/null +++ b/core/services/signatures/secp256k1/suite_test.go @@ -0,0 +1,16 @@ +package secp256k1 + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSuite(t *testing.T) { + s := NewBlakeKeccackSecp256k1() + emptyHashAsHex := hex.EncodeToString(s.Hash().Sum(nil)) + require.Equal(t, emptyHashAsHex, + "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") + _ = s.RandomStream() +} diff --git a/core/services/streams/delegate.go b/core/services/streams/delegate.go new file mode 100644 index 00000000..741f2abc --- /dev/null +++ b/core/services/streams/delegate.go @@ -0,0 +1,110 @@ +package streams + +import ( + "context" + "fmt" + "strings" + + "github.com/google/uuid" + "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type DelegateConfig interface { + MaxSuccessfulRuns() uint64 + ResultWriteQueueDepth() uint64 +} + +type Delegate struct { + lggr logger.Logger + registry Registry + runner ocrcommon.Runner + cfg DelegateConfig +} + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate(lggr logger.Logger, registry Registry, runner ocrcommon.Runner, cfg DelegateConfig) *Delegate { + return &Delegate{lggr.Named("StreamsDelegate"), registry, runner, cfg} +} + +func (d *Delegate) JobType() job.Type { + return job.Stream +} + +func (d *Delegate) BeforeJobCreated(jb job.Job) {} +func (d *Delegate) AfterJobCreated(jb job.Job) {} +func (d *Delegate) BeforeJobDeleted(jb job.Job) {} +func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } + +func (d *Delegate) ServicesForSpec(jb job.Job) (services []job.ServiceCtx, err error) { + if jb.StreamID == nil { + return nil, errors.New("streamID is required to be present for stream specs") + } + id := *jb.StreamID + lggr := d.lggr.Named(fmt.Sprintf("%d", id)).With("streamID", id) + + rrs := ocrcommon.NewResultRunSaver(d.runner, lggr, d.cfg.MaxSuccessfulRuns(), d.cfg.ResultWriteQueueDepth()) + services = append(services, rrs, &StreamService{ + d.registry, + id, + jb.PipelineSpec, + lggr, + rrs, + }) + return services, nil +} + +type ResultRunSaver interface { + Save(run *pipeline.Run) +} + +type StreamService struct { + registry Registry + id StreamID + spec *pipeline.Spec + lggr logger.Logger + rrs ResultRunSaver +} + +func (s *StreamService) Start(_ context.Context) error { + if s.spec == nil { + return fmt.Errorf("pipeline spec unexpectedly missing for stream %q", s.id) + } + s.lggr.Debugf("Starting stream %d", s.id) + return s.registry.Register(s.id, *s.spec, s.rrs) +} + +func (s *StreamService) Close() error { + s.lggr.Debugf("Stopping stream %d", s.id) + s.registry.Unregister(s.id) + return nil +} + +func ValidatedStreamSpec(tomlString string) (job.Job, error) { + var jb = job.Job{ExternalJobID: uuid.New()} + + r := strings.NewReader(tomlString) + d := toml.NewDecoder(r) + d.DisallowUnknownFields() + err := d.Decode(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + + if jb.Type != job.Stream { + return jb, errors.Errorf("unsupported type: %q", jb.Type) + } + + if jb.StreamID == nil { + return jb, errors.New("jobs of type 'stream' require streamID to be specified") + } + + return jb, nil +} diff --git a/core/services/streams/delegate_test.go b/core/services/streams/delegate_test.go new file mode 100644 index 00000000..9215871c --- /dev/null +++ b/core/services/streams/delegate_test.go @@ -0,0 +1,182 @@ +package streams + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockRegistry struct{} + +func (m *mockRegistry) Get(streamID StreamID) (strm Stream, exists bool) { return } +func (m *mockRegistry) Register(streamID StreamID, spec pipeline.Spec, rrs ResultRunSaver) error { + return nil +} +func (m *mockRegistry) Unregister(streamID StreamID) {} + +type mockDelegateConfig struct{} + +func (m *mockDelegateConfig) MaxSuccessfulRuns() uint64 { return 0 } +func (m *mockDelegateConfig) ResultWriteQueueDepth() uint64 { return 0 } + +func Test_Delegate(t *testing.T) { + lggr := logger.TestLogger(t) + registry := &mockRegistry{} + runner := &mockRunner{} + cfg := &mockDelegateConfig{} + d := NewDelegate(lggr, registry, runner, cfg) + + t.Run("ServicesForSpec", func(t *testing.T) { + jb := job.Job{PipelineSpec: &pipeline.Spec{ID: 1}} + t.Run("errors if job is missing streamID", func(t *testing.T) { + _, err := d.ServicesForSpec(jb) + assert.EqualError(t, err, "streamID is required to be present for stream specs") + }) + jb.StreamID = ptr(uint32(42)) + t.Run("returns services", func(t *testing.T) { + srvs, err := d.ServicesForSpec(jb) + require.NoError(t, err) + + assert.Len(t, srvs, 2) + assert.IsType(t, &ocrcommon.RunResultSaver{}, srvs[0]) + + strmSrv := srvs[1].(*StreamService) + assert.Equal(t, registry, strmSrv.registry) + assert.Equal(t, StreamID(42), strmSrv.id) + assert.Equal(t, jb.PipelineSpec, strmSrv.spec) + assert.NotNil(t, strmSrv.lggr) + assert.Equal(t, srvs[0], strmSrv.rrs) + }) + }) +} + +func Test_ValidatedStreamSpec(t *testing.T) { + var tt = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "minimal stream spec", + toml: ` +type = "stream" +streamID = 12345 +name = "voter-turnout" +schemaVersion = 1 +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, jb job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, job.Type("stream"), jb.Type) + assert.Equal(t, uint32(1), jb.SchemaVersion) + assert.True(t, jb.Name.Valid) + require.NotNil(t, jb.StreamID) + assert.Equal(t, uint32(12345), *jb.StreamID) + assert.Equal(t, "voter-turnout", jb.Name.String) + }, + }, + { + name: "unparseable toml", + toml: `not toml`, + assertion: func(t *testing.T, jb job.Job, err error) { + assert.EqualError(t, err, "toml unmarshal error on job: toml: expected character =") + }, + }, + { + name: "invalid field type", + toml: ` +type = "stream" +name = "voter-turnout" +schemaVersion = "should be integer" +`, + assertion: func(t *testing.T, jb job.Job, err error) { + assert.EqualError(t, err, "toml unmarshal error on job: toml: cannot decode TOML string into struct field job.Job.SchemaVersion of type uint32") + }, + }, + { + name: "invalid fields", + toml: ` +type = "stream" +name = "voter-turnout" +notAValidField = "some value" +schemaVersion = 1 +`, + assertion: func(t *testing.T, jb job.Job, err error) { + assert.EqualError(t, err, "toml unmarshal error on job: strict mode: fields in the document are missing in the target struct") + }, + }, + { + name: "wrong type", + toml: ` +type = "not a valid type" +name = "voter-turnout" +schemaVersion = 1 +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, jb job.Job, err error) { + assert.EqualError(t, err, "unsupported type: \"not a valid type\"") + }, + }, + { + name: "no error if missing name", + toml: ` +type = "stream" +schemaVersion = 1 +streamID = 12345 +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, jb job.Job, err error) { + require.NoError(t, err) + }, + }, + { + name: "error if missing streamID", + toml: ` +type = "stream" +schemaVersion = 1 +observationSource = """ +ds1 [type=bridge name=voter_turnout]; +ds1_parse [type=jsonparse path="one,two"]; +ds1_multiply [type=multiply times=1.23]; +ds1 -> ds1_parse -> ds1_multiply -> answer1; +answer1 [type=median index=0]; +""" +`, + assertion: func(t *testing.T, jb job.Job, err error) { + assert.EqualError(t, err, "jobs of type 'stream' require streamID to be specified") + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, err := ValidatedStreamSpec(tc.toml) + tc.assertion(t, s, err) + }) + } +} +func ptr[T any](t T) *T { return &t } diff --git a/core/services/streams/stream.go b/core/services/streams/stream.go new file mode 100644 index 00000000..f691df68 --- /dev/null +++ b/core/services/streams/stream.go @@ -0,0 +1,128 @@ +package streams + +import ( + "context" + "fmt" + "math/big" + "sync" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type Runner interface { + ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) + InitializePipeline(spec pipeline.Spec) (*pipeline.Pipeline, error) +} + +type RunResultSaver interface { + Save(run *pipeline.Run) +} + +type Stream interface { + Run(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) +} + +type stream struct { + sync.RWMutex + id StreamID + lggr logger.Logger + spec *pipeline.Spec + runner Runner + rrs RunResultSaver +} + +func NewStream(lggr logger.Logger, id StreamID, spec pipeline.Spec, runner Runner, rrs RunResultSaver) Stream { + return newStream(lggr, id, spec, runner, rrs) +} + +func newStream(lggr logger.Logger, id StreamID, spec pipeline.Spec, runner Runner, rrs RunResultSaver) *stream { + return &stream{sync.RWMutex{}, id, lggr.Named("Stream").With("streamID", id), &spec, runner, rrs} +} + +func (s *stream) Run(ctx context.Context) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) { + run, trrs, err = s.executeRun(ctx) + + if err != nil { + return nil, nil, fmt.Errorf("Run failed: %w", err) + } + if s.rrs != nil { + s.rrs.Save(run) + } + + return +} + +// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod). +// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod. +func (s *stream) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + // the hot path here is to avoid parsing and use the pre-parsed, cached, pipeline + s.RLock() + initialize := s.spec.Pipeline == nil + s.RUnlock() + if initialize { + pipeline, err := s.spec.ParsePipeline() + if err != nil { + return nil, nil, fmt.Errorf("Run failed due to unparseable pipeline: %w", err) + } + + s.Lock() + if s.spec.Pipeline == nil { + s.spec.Pipeline = pipeline + // initialize it for the given runner + if _, err := s.runner.InitializePipeline(*s.spec); err != nil { + return nil, nil, fmt.Errorf("Run failed due to error while initializing pipeline: %w", err) + } + } + s.Unlock() + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "pipelineSpec": map[string]interface{}{ + "id": s.spec.ID, + }, + "stream": map[string]interface{}{ + "id": s.id, + }, + }) + + run, trrs, err := s.runner.ExecuteRun(ctx, *s.spec, vars, s.lggr) + if err != nil { + return nil, nil, fmt.Errorf("error executing run for spec ID %v: %w", s.spec.ID, err) + } + + return run, trrs, err +} + +// ExtractBigInt returns a result of a pipeline run that returns one single +// decimal result, as a *big.Int. +// This acts as a reference/example method, other methods can be implemented to +// extract any desired type that matches a particular pipeline run output. +// Returns error on parse errors: if results are wrong type +func ExtractBigInt(trrs pipeline.TaskRunResults) (*big.Int, error) { + // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed + // by the pipeline executor + finaltrrs := trrs.Terminals() + + if len(finaltrrs) != 1 { + return nil, fmt.Errorf("invalid number of results, expected: 1, got: %d", len(finaltrrs)) + } + res := finaltrrs[0].Result + if res.Error != nil { + return nil, res.Error + } + val, err := toBigInt(res.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse BenchmarkPrice: %w", err) + } + return val, nil +} + +func toBigInt(val interface{}) (*big.Int, error) { + dec, err := utils.ToDecimal(val) + if err != nil { + return nil, err + } + return dec.BigInt(), nil +} diff --git a/core/services/streams/stream_registry.go b/core/services/streams/stream_registry.go new file mode 100644 index 00000000..dc104951 --- /dev/null +++ b/core/services/streams/stream_registry.go @@ -0,0 +1,67 @@ +package streams + +import ( + "fmt" + "sync" + + commontypes "github.com/goplugin/plugin-common/pkg/types/llo" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// alias for easier refactoring +type StreamID = commontypes.StreamID + +type Registry interface { + Getter + Register(streamID StreamID, spec pipeline.Spec, rrs ResultRunSaver) error + Unregister(streamID StreamID) +} + +type Getter interface { + Get(streamID StreamID) (strm Stream, exists bool) +} + +type streamRegistry struct { + sync.RWMutex + lggr logger.Logger + runner Runner + streams map[StreamID]Stream +} + +func NewRegistry(lggr logger.Logger, runner Runner) Registry { + return newRegistry(lggr, runner) +} + +func newRegistry(lggr logger.Logger, runner Runner) *streamRegistry { + return &streamRegistry{ + sync.RWMutex{}, + lggr.Named("Registry"), + runner, + make(map[StreamID]Stream), + } +} + +func (s *streamRegistry) Get(streamID StreamID) (strm Stream, exists bool) { + s.RLock() + defer s.RUnlock() + strm, exists = s.streams[streamID] + return +} + +func (s *streamRegistry) Register(streamID StreamID, spec pipeline.Spec, rrs ResultRunSaver) error { + s.Lock() + defer s.Unlock() + if _, exists := s.streams[streamID]; exists { + return fmt.Errorf("stream already registered for id: %d", streamID) + } + s.streams[streamID] = NewStream(s.lggr, streamID, spec, s.runner, rrs) + return nil +} + +func (s *streamRegistry) Unregister(streamID StreamID) { + s.Lock() + defer s.Unlock() + delete(s.streams, streamID) +} diff --git a/core/services/streams/stream_registry_test.go b/core/services/streams/stream_registry_test.go new file mode 100644 index 00000000..6feb8a31 --- /dev/null +++ b/core/services/streams/stream_registry_test.go @@ -0,0 +1,107 @@ +package streams + +import ( + "context" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockStream struct { + run *pipeline.Run + trrs pipeline.TaskRunResults + err error +} + +func (m *mockStream) Run(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) { + return m.run, m.trrs, m.err +} + +func Test_Registry(t *testing.T) { + lggr := logger.TestLogger(t) + runner := &mockRunner{} + + t.Run("Get", func(t *testing.T) { + sr := newRegistry(lggr, runner) + + sr.streams[1] = &mockStream{run: &pipeline.Run{ID: 1}} + sr.streams[2] = &mockStream{run: &pipeline.Run{ID: 2}} + sr.streams[3] = &mockStream{run: &pipeline.Run{ID: 3}} + + v, exists := sr.Get(1) + assert.True(t, exists) + assert.Equal(t, sr.streams[1], v) + + v, exists = sr.Get(2) + assert.True(t, exists) + assert.Equal(t, sr.streams[2], v) + + v, exists = sr.Get(3) + assert.True(t, exists) + assert.Equal(t, sr.streams[3], v) + + v, exists = sr.Get(4) + assert.Nil(t, v) + assert.False(t, exists) + }) + t.Run("Register", func(t *testing.T) { + sr := newRegistry(lggr, runner) + + t.Run("registers new stream", func(t *testing.T) { + assert.Len(t, sr.streams, 0) + err := sr.Register(1, pipeline.Spec{ID: 32, DotDagSource: "source"}, nil) + require.NoError(t, err) + assert.Len(t, sr.streams, 1) + + v, exists := sr.Get(1) + require.True(t, exists) + strm := v.(*stream) + assert.Equal(t, StreamID(1), strm.id) + assert.Equal(t, int32(32), strm.spec.ID) + }) + + t.Run("errors when attempt to re-register a stream with an existing ID", func(t *testing.T) { + assert.Len(t, sr.streams, 1) + err := sr.Register(1, pipeline.Spec{ID: 33, DotDagSource: "source"}, nil) + require.Error(t, err) + assert.Len(t, sr.streams, 1) + assert.EqualError(t, err, "stream already registered for id: 1") + + v, exists := sr.Get(1) + require.True(t, exists) + strm := v.(*stream) + assert.Equal(t, StreamID(1), strm.id) + assert.Equal(t, int32(32), strm.spec.ID) + }) + }) + t.Run("Unregister", func(t *testing.T) { + sr := newRegistry(lggr, runner) + + sr.streams[1] = &mockStream{run: &pipeline.Run{ID: 1}} + sr.streams[2] = &mockStream{run: &pipeline.Run{ID: 2}} + sr.streams[3] = &mockStream{run: &pipeline.Run{ID: 3}} + + t.Run("unregisters a stream", func(t *testing.T) { + assert.Len(t, sr.streams, 3) + + sr.Unregister(1) + + assert.Len(t, sr.streams, 2) + _, exists := sr.streams[1] + assert.False(t, exists) + }) + t.Run("no effect when unregistering a non-existent stream", func(t *testing.T) { + assert.Len(t, sr.streams, 2) + + sr.Unregister(1) + + assert.Len(t, sr.streams, 2) + _, exists := sr.streams[1] + assert.False(t, exists) + }) + }) +} diff --git a/core/services/streams/stream_test.go b/core/services/streams/stream_test.go new file mode 100644 index 00000000..80fddafa --- /dev/null +++ b/core/services/streams/stream_test.go @@ -0,0 +1,133 @@ +package streams + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +var UUID = uuid.New() + +type mockRunner struct { + p *pipeline.Pipeline + run *pipeline.Run + trrs pipeline.TaskRunResults + err error +} + +func (m *mockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars, l logger.Logger) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) { + return m.run, m.trrs, m.err +} +func (m *mockRunner) InitializePipeline(spec pipeline.Spec) (p *pipeline.Pipeline, err error) { + return m.p, m.err +} +func (m *mockRunner) InsertFinishedRun(run *pipeline.Run, saveSuccessfulTaskRuns bool, qopts ...pg.QOpt) error { + return m.err +} + +type MockTask struct { + result pipeline.Result +} + +func (m *MockTask) Type() pipeline.TaskType { return "MockTask" } +func (m *MockTask) ID() int { return 0 } +func (m *MockTask) DotID() string { return "" } +func (m *MockTask) Run(ctx context.Context, lggr logger.Logger, vars pipeline.Vars, inputs []pipeline.Result) (pipeline.Result, pipeline.RunInfo) { + return m.result, pipeline.RunInfo{} +} +func (m *MockTask) Base() *pipeline.BaseTask { return nil } +func (m *MockTask) Outputs() []pipeline.Task { return nil } +func (m *MockTask) Inputs() []pipeline.TaskDependency { return nil } +func (m *MockTask) OutputIndex() int32 { return 0 } +func (m *MockTask) TaskTimeout() (time.Duration, bool) { return 0, false } +func (m *MockTask) TaskRetries() uint32 { return 0 } +func (m *MockTask) TaskMinBackoff() time.Duration { return 0 } +func (m *MockTask) TaskMaxBackoff() time.Duration { return 0 } + +func Test_Stream(t *testing.T) { + lggr := logger.TestLogger(t) + runner := &mockRunner{} + spec := pipeline.Spec{} + id := StreamID(123) + ctx := testutils.Context(t) + + t.Run("Run", func(t *testing.T) { + strm := newStream(lggr, id, spec, runner, nil) + + t.Run("errors with empty pipeline", func(t *testing.T) { + _, _, err := strm.Run(ctx) + assert.EqualError(t, err, "Run failed: Run failed due to unparseable pipeline: empty pipeline") + }) + + spec.DotDagSource = ` +succeed [type=memo value=42] +succeed; +` + + strm = newStream(lggr, id, spec, runner, nil) + + t.Run("executes the pipeline (success)", func(t *testing.T) { + runner.run = &pipeline.Run{ID: 42} + runner.trrs = []pipeline.TaskRunResult{pipeline.TaskRunResult{ID: UUID}} + runner.err = nil + + run, trrs, err := strm.Run(ctx) + assert.NoError(t, err) + + assert.Equal(t, int64(42), run.ID) + require.Len(t, trrs, 1) + assert.Equal(t, UUID, trrs[0].ID) + }) + t.Run("executes the pipeline (failure)", func(t *testing.T) { + runner.err = errors.New("something exploded") + + _, _, err := strm.Run(ctx) + require.Error(t, err) + + assert.EqualError(t, err, "Run failed: error executing run for spec ID 0: something exploded") + }) + }) +} + +func Test_ExtractBigInt(t *testing.T) { + t.Run("wrong number of inputs", func(t *testing.T) { + trrs := []pipeline.TaskRunResult{} + + _, err := ExtractBigInt(trrs) + assert.EqualError(t, err, "invalid number of results, expected: 1, got: 0") + }) + t.Run("wrong type", func(t *testing.T) { + trrs := []pipeline.TaskRunResult{ + { + Result: pipeline.Result{Value: []byte{1, 2, 3}}, + Task: &MockTask{}, + }, + } + + _, err := ExtractBigInt(trrs) + assert.EqualError(t, err, "failed to parse BenchmarkPrice: type []uint8 cannot be converted to decimal.Decimal ([1 2 3])") + }) + t.Run("correct inputs", func(t *testing.T) { + trrs := []pipeline.TaskRunResult{ + { + Result: pipeline.Result{Value: "122.345"}, + Task: &MockTask{}, + }, + } + + val, err := ExtractBigInt(trrs) + require.NoError(t, err) + assert.Equal(t, big.NewInt(122), val) + }) +} diff --git a/core/services/synchronization/common.go b/core/services/synchronization/common.go new file mode 100644 index 00000000..f0bb5e65 --- /dev/null +++ b/core/services/synchronization/common.go @@ -0,0 +1,41 @@ +package synchronization + +import ( + "context" + + "github.com/goplugin/pluginv3.0/v2/core/services" +) + +// TelemetryType defines supported telemetry types +type TelemetryType string + +const ( + EnhancedEA TelemetryType = "enhanced-ea" + FunctionsRequests TelemetryType = "functions-requests" + EnhancedEAMercury TelemetryType = "enhanced-ea-mercury" + OCR TelemetryType = "ocr" + OCR2Automation TelemetryType = "ocr2-automation" + OCR2Functions TelemetryType = "ocr2-functions" + OCR2Threshold TelemetryType = "ocr2-threshold" + OCR2S4 TelemetryType = "ocr2-s4" + OCR2Median TelemetryType = "ocr2-median" + OCR3Mercury TelemetryType = "ocr3-mercury" + OCR2VRF TelemetryType = "ocr2-vrf" + AutomationCustom TelemetryType = "automation-custom" + OCR3Automation TelemetryType = "ocr3-automation" +) + +type TelemPayload struct { + Telemetry []byte + TelemType TelemetryType + ContractID string +} + +// TelemetryService encapsulates all the functionality needed to +// send telemetry to the ingress server using wsrpc +// +//go:generate mockery --quiet --name TelemetryService --output ./mocks --case=underscore +type TelemetryService interface { + services.ServiceCtx + Send(ctx context.Context, telemetry []byte, contractID string, telemType TelemetryType) +} diff --git a/core/services/synchronization/helpers_test.go b/core/services/synchronization/helpers_test.go new file mode 100644 index 00000000..4aeaba20 --- /dev/null +++ b/core/services/synchronization/helpers_test.go @@ -0,0 +1,26 @@ +package synchronization + +import ( + "net/url" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +// NewTestTelemetryIngressClient calls NewTelemetryIngressClient and injects telemClient. +func NewTestTelemetryIngressClient(t *testing.T, url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, telemClient telemPb.TelemClient) TelemetryService { + tc := NewTelemetryIngressClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100, "test", "test") + tc.(*telemetryIngressClient).telemClient = telemClient + return tc +} + +// NewTestTelemetryIngressBatchClient calls NewTelemetryIngressBatchClient and injects telemClient. +func NewTestTelemetryIngressBatchClient(t *testing.T, url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, telemClient telemPb.TelemClient, sendInterval time.Duration, uniconn bool) TelemetryService { + tc := NewTelemetryIngressBatchClient(url, serverPubKeyHex, ks, logging, logger.TestLogger(t), 100, 50, sendInterval, time.Second, uniconn, "test", "test") + tc.(*telemetryIngressBatchClient).close = func() error { return nil } + tc.(*telemetryIngressBatchClient).telemClient = telemClient + return tc +} diff --git a/core/services/synchronization/mocks/telem_client.go b/core/services/synchronization/mocks/telem_client.go new file mode 100644 index 00000000..08a5f1b8 --- /dev/null +++ b/core/services/synchronization/mocks/telem_client.go @@ -0,0 +1,81 @@ +// Code generated by mockery v2.35.4. DO NOT EDIT. + +package mocks + +import ( + context "context" + + telem "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" + mock "github.com/stretchr/testify/mock" +) + +// TelemClient is an autogenerated mock type for the TelemClient type +type TelemClient struct { + mock.Mock +} + +// Telem provides a mock function with given fields: ctx, in +func (_m *TelemClient) Telem(ctx context.Context, in *telem.TelemRequest) (*telem.TelemResponse, error) { + ret := _m.Called(ctx, in) + + var r0 *telem.TelemResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *telem.TelemRequest) (*telem.TelemResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *telem.TelemRequest) *telem.TelemResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*telem.TelemResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *telem.TelemRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TelemBatch provides a mock function with given fields: ctx, in +func (_m *TelemClient) TelemBatch(ctx context.Context, in *telem.TelemBatchRequest) (*telem.TelemResponse, error) { + ret := _m.Called(ctx, in) + + var r0 *telem.TelemResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *telem.TelemBatchRequest) (*telem.TelemResponse, error)); ok { + return rf(ctx, in) + } + if rf, ok := ret.Get(0).(func(context.Context, *telem.TelemBatchRequest) *telem.TelemResponse); ok { + r0 = rf(ctx, in) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*telem.TelemResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *telem.TelemBatchRequest) error); ok { + r1 = rf(ctx, in) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTelemClient creates a new instance of TelemClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTelemClient(t interface { + mock.TestingT + Cleanup(func()) +}) *TelemClient { + mock := &TelemClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/synchronization/mocks/telemetry_service.go b/core/services/synchronization/mocks/telemetry_service.go new file mode 100644 index 00000000..69177ffa --- /dev/null +++ b/core/services/synchronization/mocks/telemetry_service.go @@ -0,0 +1,126 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + synchronization "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + mock "github.com/stretchr/testify/mock" +) + +// TelemetryService is an autogenerated mock type for the TelemetryService type +type TelemetryService struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *TelemetryService) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HealthReport provides a mock function with given fields: +func (_m *TelemetryService) HealthReport() map[string]error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HealthReport") + } + + var r0 map[string]error + if rf, ok := ret.Get(0).(func() map[string]error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]error) + } + } + + return r0 +} + +// Name provides a mock function with given fields: +func (_m *TelemetryService) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Ready provides a mock function with given fields: +func (_m *TelemetryService) Ready() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Send provides a mock function with given fields: ctx, telemetry, contractID, telemType +func (_m *TelemetryService) Send(ctx context.Context, telemetry []byte, contractID string, telemType synchronization.TelemetryType) { + _m.Called(ctx, telemetry, contractID, telemType) +} + +// Start provides a mock function with given fields: _a0 +func (_m *TelemetryService) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTelemetryService creates a new instance of TelemetryService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTelemetryService(t interface { + mock.TestingT + Cleanup(func()) +}) *TelemetryService { + mock := &TelemetryService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/synchronization/telem/telem.pb.go b/core/services/synchronization/telem/telem.pb.go new file mode 100644 index 00000000..e382823e --- /dev/null +++ b/core/services/synchronization/telem/telem.pb.go @@ -0,0 +1,345 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: core/services/synchronization/telem/telem.proto + +package telem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TelemRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Telemetry []byte `protobuf:"bytes,1,opt,name=telemetry,proto3" json:"telemetry,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + TelemetryType string `protobuf:"bytes,3,opt,name=telemetry_type,json=telemetryType,proto3" json:"telemetry_type,omitempty"` + SentAt int64 `protobuf:"varint,4,opt,name=sent_at,json=sentAt,proto3" json:"sent_at,omitempty"` +} + +func (x *TelemRequest) Reset() { + *x = TelemRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TelemRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TelemRequest) ProtoMessage() {} + +func (x *TelemRequest) ProtoReflect() protoreflect.Message { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TelemRequest.ProtoReflect.Descriptor instead. +func (*TelemRequest) Descriptor() ([]byte, []int) { + return file_core_services_synchronization_telem_telem_proto_rawDescGZIP(), []int{0} +} + +func (x *TelemRequest) GetTelemetry() []byte { + if x != nil { + return x.Telemetry + } + return nil +} + +func (x *TelemRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *TelemRequest) GetTelemetryType() string { + if x != nil { + return x.TelemetryType + } + return "" +} + +func (x *TelemRequest) GetSentAt() int64 { + if x != nil { + return x.SentAt + } + return 0 +} + +type TelemBatchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContractId string `protobuf:"bytes,1,opt,name=contract_id,json=contractId,proto3" json:"contract_id,omitempty"` + Telemetry [][]byte `protobuf:"bytes,2,rep,name=telemetry,proto3" json:"telemetry,omitempty"` + TelemetryType string `protobuf:"bytes,3,opt,name=telemetry_type,json=telemetryType,proto3" json:"telemetry_type,omitempty"` + SentAt int64 `protobuf:"varint,4,opt,name=sent_at,json=sentAt,proto3" json:"sent_at,omitempty"` +} + +func (x *TelemBatchRequest) Reset() { + *x = TelemBatchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TelemBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TelemBatchRequest) ProtoMessage() {} + +func (x *TelemBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TelemBatchRequest.ProtoReflect.Descriptor instead. +func (*TelemBatchRequest) Descriptor() ([]byte, []int) { + return file_core_services_synchronization_telem_telem_proto_rawDescGZIP(), []int{1} +} + +func (x *TelemBatchRequest) GetContractId() string { + if x != nil { + return x.ContractId + } + return "" +} + +func (x *TelemBatchRequest) GetTelemetry() [][]byte { + if x != nil { + return x.Telemetry + } + return nil +} + +func (x *TelemBatchRequest) GetTelemetryType() string { + if x != nil { + return x.TelemetryType + } + return "" +} + +func (x *TelemBatchRequest) GetSentAt() int64 { + if x != nil { + return x.SentAt + } + return 0 +} + +type TelemResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Body string `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *TelemResponse) Reset() { + *x = TelemResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TelemResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TelemResponse) ProtoMessage() {} + +func (x *TelemResponse) ProtoReflect() protoreflect.Message { + mi := &file_core_services_synchronization_telem_telem_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TelemResponse.ProtoReflect.Descriptor instead. +func (*TelemResponse) Descriptor() ([]byte, []int) { + return file_core_services_synchronization_telem_telem_proto_rawDescGZIP(), []int{2} +} + +func (x *TelemResponse) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +var File_core_services_synchronization_telem_telem_proto protoreflect.FileDescriptor + +var file_core_services_synchronization_telem_telem_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0x86, 0x01, 0x0a, 0x0c, 0x54, 0x65, 0x6c, + 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x74, + 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x73, 0x65, 0x6e, 0x74, 0x41, + 0x74, 0x22, 0x92, 0x01, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, + 0x07, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x73, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x22, 0x23, 0x0a, 0x0d, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x32, 0x79, 0x0a, 0x05, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x12, 0x32, 0x0a, 0x05, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x12, 0x13, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x54, 0x65, 0x6c, 0x65, + 0x6d, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x18, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_core_services_synchronization_telem_telem_proto_rawDescOnce sync.Once + file_core_services_synchronization_telem_telem_proto_rawDescData = file_core_services_synchronization_telem_telem_proto_rawDesc +) + +func file_core_services_synchronization_telem_telem_proto_rawDescGZIP() []byte { + file_core_services_synchronization_telem_telem_proto_rawDescOnce.Do(func() { + file_core_services_synchronization_telem_telem_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_services_synchronization_telem_telem_proto_rawDescData) + }) + return file_core_services_synchronization_telem_telem_proto_rawDescData +} + +var file_core_services_synchronization_telem_telem_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_core_services_synchronization_telem_telem_proto_goTypes = []interface{}{ + (*TelemRequest)(nil), // 0: telem.TelemRequest + (*TelemBatchRequest)(nil), // 1: telem.TelemBatchRequest + (*TelemResponse)(nil), // 2: telem.TelemResponse +} +var file_core_services_synchronization_telem_telem_proto_depIdxs = []int32{ + 0, // 0: telem.Telem.Telem:input_type -> telem.TelemRequest + 1, // 1: telem.Telem.TelemBatch:input_type -> telem.TelemBatchRequest + 2, // 2: telem.Telem.Telem:output_type -> telem.TelemResponse + 2, // 3: telem.Telem.TelemBatch:output_type -> telem.TelemResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_core_services_synchronization_telem_telem_proto_init() } +func file_core_services_synchronization_telem_telem_proto_init() { + if File_core_services_synchronization_telem_telem_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_core_services_synchronization_telem_telem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TelemRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_synchronization_telem_telem_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TelemBatchRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_services_synchronization_telem_telem_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TelemResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_core_services_synchronization_telem_telem_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_core_services_synchronization_telem_telem_proto_goTypes, + DependencyIndexes: file_core_services_synchronization_telem_telem_proto_depIdxs, + MessageInfos: file_core_services_synchronization_telem_telem_proto_msgTypes, + }.Build() + File_core_services_synchronization_telem_telem_proto = out.File + file_core_services_synchronization_telem_telem_proto_rawDesc = nil + file_core_services_synchronization_telem_telem_proto_goTypes = nil + file_core_services_synchronization_telem_telem_proto_depIdxs = nil +} diff --git a/core/services/synchronization/telem/telem.proto b/core/services/synchronization/telem/telem.proto new file mode 100644 index 00000000..ff9ba4d9 --- /dev/null +++ b/core/services/synchronization/telem/telem.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem"; + +package telem; + +service Telem { + rpc Telem(TelemRequest) returns (TelemResponse); + rpc TelemBatch(TelemBatchRequest) returns (TelemResponse); +} + +message TelemRequest { + bytes telemetry = 1; + string address = 2; + string telemetry_type = 3; + int64 sent_at = 4; +} + +message TelemBatchRequest { + string contract_id = 1; + repeated bytes telemetry = 2; + string telemetry_type = 3; + int64 sent_at = 4; +} + +message TelemResponse { + string body = 1; +} diff --git a/core/services/synchronization/telem/telem_automation_custom.pb.go b/core/services/synchronization/telem/telem_automation_custom.pb.go new file mode 100644 index 00000000..e88cb2f9 --- /dev/null +++ b/core/services/synchronization/telem/telem_automation_custom.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.23.4 +// source: telem_automation_custom.proto + +package telem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type BlockNumber struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + BlockNumber uint64 `protobuf:"varint,2,opt,name=block_number,json=blockNumber,proto3" json:"block_number,omitempty"` + BlockHash string `protobuf:"bytes,3,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + ConfigDigest []byte `protobuf:"bytes,4,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"` +} + +func (x *BlockNumber) Reset() { + *x = BlockNumber{} + if protoimpl.UnsafeEnabled { + mi := &file_telem_automation_custom_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockNumber) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockNumber) ProtoMessage() {} + +func (x *BlockNumber) ProtoReflect() protoreflect.Message { + mi := &file_telem_automation_custom_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockNumber.ProtoReflect.Descriptor instead. +func (*BlockNumber) Descriptor() ([]byte, []int) { + return file_telem_automation_custom_proto_rawDescGZIP(), []int{0} +} + +func (x *BlockNumber) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *BlockNumber) GetBlockNumber() uint64 { + if x != nil { + return x.BlockNumber + } + return 0 +} + +func (x *BlockNumber) GetBlockHash() string { + if x != nil { + return x.BlockHash + } + return "" +} + +func (x *BlockNumber) GetConfigDigest() []byte { + if x != nil { + return x.ConfigDigest + } + return nil +} + +type NodeVersion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + NodeVersion string `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion,proto3" json:"node_version,omitempty"` + ConfigDigest []byte `protobuf:"bytes,3,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"` +} + +func (x *NodeVersion) Reset() { + *x = NodeVersion{} + if protoimpl.UnsafeEnabled { + mi := &file_telem_automation_custom_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeVersion) ProtoMessage() {} + +func (x *NodeVersion) ProtoReflect() protoreflect.Message { + mi := &file_telem_automation_custom_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeVersion.ProtoReflect.Descriptor instead. +func (*NodeVersion) Descriptor() ([]byte, []int) { + return file_telem_automation_custom_proto_rawDescGZIP(), []int{1} +} + +func (x *NodeVersion) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *NodeVersion) GetNodeVersion() string { + if x != nil { + return x.NodeVersion + } + return "" +} + +func (x *NodeVersion) GetConfigDigest() []byte { + if x != nil { + return x.ConfigDigest + } + return nil +} + +type AutomationTelemWrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Msg: + // + // *AutomationTelemWrapper_BlockNumber + // *AutomationTelemWrapper_NodeVersion + Msg isAutomationTelemWrapper_Msg `protobuf_oneof:"msg"` +} + +func (x *AutomationTelemWrapper) Reset() { + *x = AutomationTelemWrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_telem_automation_custom_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AutomationTelemWrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AutomationTelemWrapper) ProtoMessage() {} + +func (x *AutomationTelemWrapper) ProtoReflect() protoreflect.Message { + mi := &file_telem_automation_custom_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AutomationTelemWrapper.ProtoReflect.Descriptor instead. +func (*AutomationTelemWrapper) Descriptor() ([]byte, []int) { + return file_telem_automation_custom_proto_rawDescGZIP(), []int{2} +} + +func (m *AutomationTelemWrapper) GetMsg() isAutomationTelemWrapper_Msg { + if m != nil { + return m.Msg + } + return nil +} + +func (x *AutomationTelemWrapper) GetBlockNumber() *BlockNumber { + if x, ok := x.GetMsg().(*AutomationTelemWrapper_BlockNumber); ok { + return x.BlockNumber + } + return nil +} + +func (x *AutomationTelemWrapper) GetNodeVersion() *NodeVersion { + if x, ok := x.GetMsg().(*AutomationTelemWrapper_NodeVersion); ok { + return x.NodeVersion + } + return nil +} + +type isAutomationTelemWrapper_Msg interface { + isAutomationTelemWrapper_Msg() +} + +type AutomationTelemWrapper_BlockNumber struct { + BlockNumber *BlockNumber `protobuf:"bytes,1,opt,name=block_number,json=blockNumber,proto3,oneof"` +} + +type AutomationTelemWrapper_NodeVersion struct { + NodeVersion *NodeVersion `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion,proto3,oneof"` +} + +func (*AutomationTelemWrapper_BlockNumber) isAutomationTelemWrapper_Msg() {} + +func (*AutomationTelemWrapper_NodeVersion) isAutomationTelemWrapper_Msg() {} + +var File_telem_automation_custom_proto protoreflect.FileDescriptor + +var file_telem_automation_custom_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0x92, 0x01, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x73, 0x0a, 0x0b, 0x4e, + 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x22, 0x91, 0x01, 0x0a, 0x16, 0x41, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x05, 0x0a, + 0x03, 0x6d, 0x73, 0x67, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, + 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_telem_automation_custom_proto_rawDescOnce sync.Once + file_telem_automation_custom_proto_rawDescData = file_telem_automation_custom_proto_rawDesc +) + +func file_telem_automation_custom_proto_rawDescGZIP() []byte { + file_telem_automation_custom_proto_rawDescOnce.Do(func() { + file_telem_automation_custom_proto_rawDescData = protoimpl.X.CompressGZIP(file_telem_automation_custom_proto_rawDescData) + }) + return file_telem_automation_custom_proto_rawDescData +} + +var file_telem_automation_custom_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_telem_automation_custom_proto_goTypes = []interface{}{ + (*BlockNumber)(nil), // 0: telem.BlockNumber + (*NodeVersion)(nil), // 1: telem.NodeVersion + (*AutomationTelemWrapper)(nil), // 2: telem.AutomationTelemWrapper +} +var file_telem_automation_custom_proto_depIdxs = []int32{ + 0, // 0: telem.AutomationTelemWrapper.block_number:type_name -> telem.BlockNumber + 1, // 1: telem.AutomationTelemWrapper.node_version:type_name -> telem.NodeVersion + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_telem_automation_custom_proto_init() } +func file_telem_automation_custom_proto_init() { + if File_telem_automation_custom_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_telem_automation_custom_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockNumber); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_telem_automation_custom_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeVersion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_telem_automation_custom_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AutomationTelemWrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_telem_automation_custom_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*AutomationTelemWrapper_BlockNumber)(nil), + (*AutomationTelemWrapper_NodeVersion)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_telem_automation_custom_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_telem_automation_custom_proto_goTypes, + DependencyIndexes: file_telem_automation_custom_proto_depIdxs, + MessageInfos: file_telem_automation_custom_proto_msgTypes, + }.Build() + File_telem_automation_custom_proto = out.File + file_telem_automation_custom_proto_rawDesc = nil + file_telem_automation_custom_proto_goTypes = nil + file_telem_automation_custom_proto_depIdxs = nil +} diff --git a/core/services/synchronization/telem/telem_automation_custom.proto b/core/services/synchronization/telem/telem_automation_custom.proto new file mode 100644 index 00000000..b683d831 --- /dev/null +++ b/core/services/synchronization/telem/telem_automation_custom.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem"; + +package telem; + +message BlockNumber { + uint64 timestamp = 1; + uint64 block_number = 2; + string block_hash = 3; + bytes config_digest = 4; +} + +message NodeVersion { + uint64 timestamp = 1; + string node_version = 2; + bytes config_digest = 3; +} + +message AutomationTelemWrapper{ + oneof msg { + BlockNumber block_number = 1; + NodeVersion node_version = 2; + } +} + +// // StreamsLookup contains the metadata about a mercury request +// message StreamsLookup { +// string upkeep_id = 1; +// uint64 block_number = 2; // block number provided by ocr2keepers plugin +// uint64 timestamp = 3; // current timestamp +// uint64 time_param = 4; // time param key is block number for v0.2 and timestamp for v0.3, time param is the corresponding value +// repeated string feeds = 5; // array of feed names +// } + +// // StreamsResponse contains the metadata about mercury response +// message StreamsResponse { +// string upkeep_id = 1; +// uint64 block_number = 2; // block number provided by ocr2keepers plugin +// uint64 timestamp = 3; // current timestamp +// repeated string feeds = 4; // array of feed names +// repeated uint32 http_status_codes = 5; // Mercury server response code +// bool success = 6; // True if all feeds gave successful response +// bool retryable = 7; // whether feedLookup should be retried if request fails +// uint32 failure_reason = 8; // failure enum defined in abi.go (UPKEEP_FAILURE_REASON_MERCURY_ACCESS_NOT_ALLOWED or some on chain reasons) +// } + +// // StreamsCheckCallback contains whether customer's checkCallBack returns true with mercury data as input +// message StreamsCheckCallback { +// string upkeep_id = 1; +// uint64 block_number = 2; // block number provided by ocr2keepers plugin +// uint64 timestamp = 3; // current timestamp +// uint32 failure_reason = 4; // failure enum defined in abi.go (on chain reason) +// bool upkeep_needed = 5; // result of checkCallBack eth call, whether upkeep needs to be performed +// } + +// // LogTrigger contains log trigger upkeep's information +// message LogTrigger { +// string upkeep_id = 1; +// uint64 block_number = 2; // block number provided by ocr2keepers plugin +// uint64 timestamp = 3; // current timestamp +// uint64 log_block_number = 4; // block number of log we are checking in pipeline +// string log_block_hash = 5; // block has of log we are checking in pipeline +// } + +// // LogTriggerSuccess contains whether checkLog/checkUpkeep eth call returns true for a LogTriggered Upkeep +// message LogTriggerSimulateResult { +// string upkeep_id = 1; +// uint64 block_number = 2; // block number provided by ocr2keepers plugin +// uint64 timestamp = 3; // current timestamp +// bool success = 4; // result of checkLog/checkUpkeep eth call, whether upkeep needs to be performed +// } + +// message AutomationTelemWrapper { +// oneof msg { +// StreamsLookup streams_lookup = 1; +// StreamsResponse streams_response = 2; +// StreamsCheckCallback streams_checkcallback = 3; +// LogTrigger log_trigger = 4; +// LogTriggerSimulateResult log_trigger_simulate_result = 5; +// } +// } \ No newline at end of file diff --git a/core/services/synchronization/telem/telem_enhanced_ea.pb.go b/core/services/synchronization/telem/telem_enhanced_ea.pb.go new file mode 100644 index 00000000..94237396 --- /dev/null +++ b/core/services/synchronization/telem/telem_enhanced_ea.pb.go @@ -0,0 +1,290 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: core/services/synchronization/telem/telem_enhanced_ea.proto + +package telem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EnhancedEA struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataSource string `protobuf:"bytes,1,opt,name=data_source,json=dataSource,proto3" json:"data_source,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + BridgeTaskRunStartedTimestamp int64 `protobuf:"varint,3,opt,name=bridge_task_run_started_timestamp,json=bridgeTaskRunStartedTimestamp,proto3" json:"bridge_task_run_started_timestamp,omitempty"` + BridgeTaskRunEndedTimestamp int64 `protobuf:"varint,4,opt,name=bridge_task_run_ended_timestamp,json=bridgeTaskRunEndedTimestamp,proto3" json:"bridge_task_run_ended_timestamp,omitempty"` + ProviderRequestedTimestamp int64 `protobuf:"varint,5,opt,name=provider_requested_timestamp,json=providerRequestedTimestamp,proto3" json:"provider_requested_timestamp,omitempty"` + ProviderReceivedTimestamp int64 `protobuf:"varint,6,opt,name=provider_received_timestamp,json=providerReceivedTimestamp,proto3" json:"provider_received_timestamp,omitempty"` + ProviderDataStreamEstablished int64 `protobuf:"varint,7,opt,name=provider_data_stream_established,json=providerDataStreamEstablished,proto3" json:"provider_data_stream_established,omitempty"` + ProviderIndicatedTime int64 `protobuf:"varint,8,opt,name=provider_indicated_time,json=providerIndicatedTime,proto3" json:"provider_indicated_time,omitempty"` + Feed string `protobuf:"bytes,9,opt,name=feed,proto3" json:"feed,omitempty"` + ChainId string `protobuf:"bytes,10,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Observation int64 `protobuf:"varint,11,opt,name=observation,proto3" json:"observation,omitempty"` + ConfigDigest string `protobuf:"bytes,12,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"` + Round int64 `protobuf:"varint,13,opt,name=round,proto3" json:"round,omitempty"` + Epoch int64 `protobuf:"varint,14,opt,name=epoch,proto3" json:"epoch,omitempty"` +} + +func (x *EnhancedEA) Reset() { + *x = EnhancedEA{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnhancedEA) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnhancedEA) ProtoMessage() {} + +func (x *EnhancedEA) ProtoReflect() protoreflect.Message { + mi := &file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnhancedEA.ProtoReflect.Descriptor instead. +func (*EnhancedEA) Descriptor() ([]byte, []int) { + return file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescGZIP(), []int{0} +} + +func (x *EnhancedEA) GetDataSource() string { + if x != nil { + return x.DataSource + } + return "" +} + +func (x *EnhancedEA) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *EnhancedEA) GetBridgeTaskRunStartedTimestamp() int64 { + if x != nil { + return x.BridgeTaskRunStartedTimestamp + } + return 0 +} + +func (x *EnhancedEA) GetBridgeTaskRunEndedTimestamp() int64 { + if x != nil { + return x.BridgeTaskRunEndedTimestamp + } + return 0 +} + +func (x *EnhancedEA) GetProviderRequestedTimestamp() int64 { + if x != nil { + return x.ProviderRequestedTimestamp + } + return 0 +} + +func (x *EnhancedEA) GetProviderReceivedTimestamp() int64 { + if x != nil { + return x.ProviderReceivedTimestamp + } + return 0 +} + +func (x *EnhancedEA) GetProviderDataStreamEstablished() int64 { + if x != nil { + return x.ProviderDataStreamEstablished + } + return 0 +} + +func (x *EnhancedEA) GetProviderIndicatedTime() int64 { + if x != nil { + return x.ProviderIndicatedTime + } + return 0 +} + +func (x *EnhancedEA) GetFeed() string { + if x != nil { + return x.Feed + } + return "" +} + +func (x *EnhancedEA) GetChainId() string { + if x != nil { + return x.ChainId + } + return "" +} + +func (x *EnhancedEA) GetObservation() int64 { + if x != nil { + return x.Observation + } + return 0 +} + +func (x *EnhancedEA) GetConfigDigest() string { + if x != nil { + return x.ConfigDigest + } + return "" +} + +func (x *EnhancedEA) GetRound() int64 { + if x != nil { + return x.Round + } + return 0 +} + +func (x *EnhancedEA) GetEpoch() int64 { + if x != nil { + return x.Epoch + } + return 0 +} + +var File_core_services_synchronization_telem_telem_enhanced_ea_proto protoreflect.FileDescriptor + +var file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x65, 0x6e, 0x68, 0x61, + 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x65, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xf8, 0x04, 0x0a, 0x0a, 0x45, 0x6e, 0x68, 0x61, 0x6e, 0x63, 0x65, + 0x64, 0x45, 0x41, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x62, 0x72, + 0x69, 0x64, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x44, 0x0a, 0x1f, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x5f, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x62, + 0x72, 0x69, 0x64, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x6e, 0x64, 0x65, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x40, 0x0a, 0x1c, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x1a, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3e, 0x0a, 0x1b, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, + 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x19, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x63, 0x65, 0x69, + 0x76, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x47, 0x0a, 0x20, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x66, 0x65, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x65, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x42, + 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, + 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, + 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescOnce sync.Once + file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescData = file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDesc +) + +func file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescGZIP() []byte { + file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescOnce.Do(func() { + file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescData) + }) + return file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDescData +} + +var file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_core_services_synchronization_telem_telem_enhanced_ea_proto_goTypes = []interface{}{ + (*EnhancedEA)(nil), // 0: telem.EnhancedEA +} +var file_core_services_synchronization_telem_telem_enhanced_ea_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_core_services_synchronization_telem_telem_enhanced_ea_proto_init() } +func file_core_services_synchronization_telem_telem_enhanced_ea_proto_init() { + if File_core_services_synchronization_telem_telem_enhanced_ea_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnhancedEA); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_core_services_synchronization_telem_telem_enhanced_ea_proto_goTypes, + DependencyIndexes: file_core_services_synchronization_telem_telem_enhanced_ea_proto_depIdxs, + MessageInfos: file_core_services_synchronization_telem_telem_enhanced_ea_proto_msgTypes, + }.Build() + File_core_services_synchronization_telem_telem_enhanced_ea_proto = out.File + file_core_services_synchronization_telem_telem_enhanced_ea_proto_rawDesc = nil + file_core_services_synchronization_telem_telem_enhanced_ea_proto_goTypes = nil + file_core_services_synchronization_telem_telem_enhanced_ea_proto_depIdxs = nil +} diff --git a/core/services/synchronization/telem/telem_enhanced_ea.proto b/core/services/synchronization/telem/telem_enhanced_ea.proto new file mode 100644 index 00000000..5296c78c --- /dev/null +++ b/core/services/synchronization/telem/telem_enhanced_ea.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem"; + +package telem; + +message EnhancedEA { + string data_source=1; + double value=2; + int64 bridge_task_run_started_timestamp=3; + int64 bridge_task_run_ended_timestamp=4; + int64 provider_requested_timestamp=5; + int64 provider_received_timestamp=6; + int64 provider_data_stream_established=7; + int64 provider_indicated_time=8; + string feed=9; + string chain_id=10; + int64 observation=11; + string config_digest = 12; + int64 round=13; + int64 epoch=14; +} diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go new file mode 100644 index 00000000..9cda6ef9 --- /dev/null +++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.pb.go @@ -0,0 +1,493 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.24.3 +// source: core/services/synchronization/telem/telem_enhanced_ea_mercury.proto + +package telem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EnhancedEAMercury struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,32,opt,name=version,proto3" json:"version,omitempty"` + DataSource string `protobuf:"bytes,1,opt,name=data_source,json=dataSource,proto3" json:"data_source,omitempty"` + DpBenchmarkPrice float64 `protobuf:"fixed64,2,opt,name=dp_benchmark_price,json=dpBenchmarkPrice,proto3" json:"dp_benchmark_price,omitempty"` + DpBid float64 `protobuf:"fixed64,3,opt,name=dp_bid,json=dpBid,proto3" json:"dp_bid,omitempty"` + DpAsk float64 `protobuf:"fixed64,4,opt,name=dp_ask,json=dpAsk,proto3" json:"dp_ask,omitempty"` + // v1 fields (block range) + CurrentBlockNumber int64 `protobuf:"varint,5,opt,name=current_block_number,json=currentBlockNumber,proto3" json:"current_block_number,omitempty"` + CurrentBlockHash string `protobuf:"bytes,6,opt,name=current_block_hash,json=currentBlockHash,proto3" json:"current_block_hash,omitempty"` + CurrentBlockTimestamp uint64 `protobuf:"varint,7,opt,name=current_block_timestamp,json=currentBlockTimestamp,proto3" json:"current_block_timestamp,omitempty"` + // v2+v3 fields (timestamp range) + FetchMaxFinalizedTimestamp bool `protobuf:"varint,25,opt,name=fetch_max_finalized_timestamp,json=fetchMaxFinalizedTimestamp,proto3" json:"fetch_max_finalized_timestamp,omitempty"` + MaxFinalizedTimestamp int64 `protobuf:"varint,26,opt,name=max_finalized_timestamp,json=maxFinalizedTimestamp,proto3" json:"max_finalized_timestamp,omitempty"` + ObservationTimestamp uint32 `protobuf:"varint,27,opt,name=observation_timestamp,json=observationTimestamp,proto3" json:"observation_timestamp,omitempty"` + IsLinkFeed bool `protobuf:"varint,28,opt,name=is_link_feed,json=isLinkFeed,proto3" json:"is_link_feed,omitempty"` + LinkPrice int64 `protobuf:"varint,29,opt,name=link_price,json=linkPrice,proto3" json:"link_price,omitempty"` + IsNativeFeed bool `protobuf:"varint,30,opt,name=is_native_feed,json=isNativeFeed,proto3" json:"is_native_feed,omitempty"` + NativePrice int64 `protobuf:"varint,31,opt,name=native_price,json=nativePrice,proto3" json:"native_price,omitempty"` + BridgeTaskRunStartedTimestamp int64 `protobuf:"varint,8,opt,name=bridge_task_run_started_timestamp,json=bridgeTaskRunStartedTimestamp,proto3" json:"bridge_task_run_started_timestamp,omitempty"` + BridgeTaskRunEndedTimestamp int64 `protobuf:"varint,9,opt,name=bridge_task_run_ended_timestamp,json=bridgeTaskRunEndedTimestamp,proto3" json:"bridge_task_run_ended_timestamp,omitempty"` + ProviderRequestedTimestamp int64 `protobuf:"varint,10,opt,name=provider_requested_timestamp,json=providerRequestedTimestamp,proto3" json:"provider_requested_timestamp,omitempty"` + ProviderReceivedTimestamp int64 `protobuf:"varint,11,opt,name=provider_received_timestamp,json=providerReceivedTimestamp,proto3" json:"provider_received_timestamp,omitempty"` + ProviderDataStreamEstablished int64 `protobuf:"varint,12,opt,name=provider_data_stream_established,json=providerDataStreamEstablished,proto3" json:"provider_data_stream_established,omitempty"` + ProviderIndicatedTime int64 `protobuf:"varint,13,opt,name=provider_indicated_time,json=providerIndicatedTime,proto3" json:"provider_indicated_time,omitempty"` + Feed string `protobuf:"bytes,14,opt,name=feed,proto3" json:"feed,omitempty"` + // v1+v2+v3 + ObservationBenchmarkPrice int64 `protobuf:"varint,15,opt,name=observation_benchmark_price,json=observationBenchmarkPrice,proto3" json:"observation_benchmark_price,omitempty"` // This value overflows, will be reserved and removed in future versions + ObservationBenchmarkPriceString string `protobuf:"bytes,22,opt,name=observation_benchmark_price_string,json=observationBenchmarkPriceString,proto3" json:"observation_benchmark_price_string,omitempty"` + // v1+v3 + ObservationBid int64 `protobuf:"varint,16,opt,name=observation_bid,json=observationBid,proto3" json:"observation_bid,omitempty"` // This value overflows, will be reserved and removed in future versions + ObservationAsk int64 `protobuf:"varint,17,opt,name=observation_ask,json=observationAsk,proto3" json:"observation_ask,omitempty"` // This value overflows, will be reserved and removed in future versions + ObservationBidString string `protobuf:"bytes,23,opt,name=observation_bid_string,json=observationBidString,proto3" json:"observation_bid_string,omitempty"` + ObservationAskString string `protobuf:"bytes,24,opt,name=observation_ask_string,json=observationAskString,proto3" json:"observation_ask_string,omitempty"` + ConfigDigest string `protobuf:"bytes,18,opt,name=config_digest,json=configDigest,proto3" json:"config_digest,omitempty"` + Round int64 `protobuf:"varint,19,opt,name=round,proto3" json:"round,omitempty"` + Epoch int64 `protobuf:"varint,20,opt,name=epoch,proto3" json:"epoch,omitempty"` + AssetSymbol string `protobuf:"bytes,21,opt,name=asset_symbol,json=assetSymbol,proto3" json:"asset_symbol,omitempty"` +} + +func (x *EnhancedEAMercury) Reset() { + *x = EnhancedEAMercury{} + if protoimpl.UnsafeEnabled { + mi := &file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnhancedEAMercury) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnhancedEAMercury) ProtoMessage() {} + +func (x *EnhancedEAMercury) ProtoReflect() protoreflect.Message { + mi := &file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnhancedEAMercury.ProtoReflect.Descriptor instead. +func (*EnhancedEAMercury) Descriptor() ([]byte, []int) { + return file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescGZIP(), []int{0} +} + +func (x *EnhancedEAMercury) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *EnhancedEAMercury) GetDataSource() string { + if x != nil { + return x.DataSource + } + return "" +} + +func (x *EnhancedEAMercury) GetDpBenchmarkPrice() float64 { + if x != nil { + return x.DpBenchmarkPrice + } + return 0 +} + +func (x *EnhancedEAMercury) GetDpBid() float64 { + if x != nil { + return x.DpBid + } + return 0 +} + +func (x *EnhancedEAMercury) GetDpAsk() float64 { + if x != nil { + return x.DpAsk + } + return 0 +} + +func (x *EnhancedEAMercury) GetCurrentBlockNumber() int64 { + if x != nil { + return x.CurrentBlockNumber + } + return 0 +} + +func (x *EnhancedEAMercury) GetCurrentBlockHash() string { + if x != nil { + return x.CurrentBlockHash + } + return "" +} + +func (x *EnhancedEAMercury) GetCurrentBlockTimestamp() uint64 { + if x != nil { + return x.CurrentBlockTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetFetchMaxFinalizedTimestamp() bool { + if x != nil { + return x.FetchMaxFinalizedTimestamp + } + return false +} + +func (x *EnhancedEAMercury) GetMaxFinalizedTimestamp() int64 { + if x != nil { + return x.MaxFinalizedTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetObservationTimestamp() uint32 { + if x != nil { + return x.ObservationTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetIsLinkFeed() bool { + if x != nil { + return x.IsLinkFeed + } + return false +} + +func (x *EnhancedEAMercury) GetLinkPrice() int64 { + if x != nil { + return x.LinkPrice + } + return 0 +} + +func (x *EnhancedEAMercury) GetIsNativeFeed() bool { + if x != nil { + return x.IsNativeFeed + } + return false +} + +func (x *EnhancedEAMercury) GetNativePrice() int64 { + if x != nil { + return x.NativePrice + } + return 0 +} + +func (x *EnhancedEAMercury) GetBridgeTaskRunStartedTimestamp() int64 { + if x != nil { + return x.BridgeTaskRunStartedTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetBridgeTaskRunEndedTimestamp() int64 { + if x != nil { + return x.BridgeTaskRunEndedTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetProviderRequestedTimestamp() int64 { + if x != nil { + return x.ProviderRequestedTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetProviderReceivedTimestamp() int64 { + if x != nil { + return x.ProviderReceivedTimestamp + } + return 0 +} + +func (x *EnhancedEAMercury) GetProviderDataStreamEstablished() int64 { + if x != nil { + return x.ProviderDataStreamEstablished + } + return 0 +} + +func (x *EnhancedEAMercury) GetProviderIndicatedTime() int64 { + if x != nil { + return x.ProviderIndicatedTime + } + return 0 +} + +func (x *EnhancedEAMercury) GetFeed() string { + if x != nil { + return x.Feed + } + return "" +} + +func (x *EnhancedEAMercury) GetObservationBenchmarkPrice() int64 { + if x != nil { + return x.ObservationBenchmarkPrice + } + return 0 +} + +func (x *EnhancedEAMercury) GetObservationBenchmarkPriceString() string { + if x != nil { + return x.ObservationBenchmarkPriceString + } + return "" +} + +func (x *EnhancedEAMercury) GetObservationBid() int64 { + if x != nil { + return x.ObservationBid + } + return 0 +} + +func (x *EnhancedEAMercury) GetObservationAsk() int64 { + if x != nil { + return x.ObservationAsk + } + return 0 +} + +func (x *EnhancedEAMercury) GetObservationBidString() string { + if x != nil { + return x.ObservationBidString + } + return "" +} + +func (x *EnhancedEAMercury) GetObservationAskString() string { + if x != nil { + return x.ObservationAskString + } + return "" +} + +func (x *EnhancedEAMercury) GetConfigDigest() string { + if x != nil { + return x.ConfigDigest + } + return "" +} + +func (x *EnhancedEAMercury) GetRound() int64 { + if x != nil { + return x.Round + } + return 0 +} + +func (x *EnhancedEAMercury) GetEpoch() int64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *EnhancedEAMercury) GetAssetSymbol() string { + if x != nil { + return x.AssetSymbol + } + return "" +} + +var File_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto protoreflect.FileDescriptor + +var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x65, 0x6e, 0x68, 0x61, + 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x65, 0x61, 0x5f, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0xe2, 0x0b, 0x0a, + 0x11, 0x45, 0x6e, 0x68, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x45, 0x41, 0x4d, 0x65, 0x72, 0x63, 0x75, + 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x64, 0x70, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x70, 0x72, + 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x64, 0x70, 0x42, 0x65, 0x6e, + 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x64, + 0x70, 0x5f, 0x62, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x64, 0x70, 0x42, + 0x69, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x70, 0x5f, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x05, 0x64, 0x70, 0x41, 0x73, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x41, 0x0a, 0x1d, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x66, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x66, 0x65, 0x74, 0x63, 0x68, 0x4d, + 0x61, 0x78, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x33, 0x0a, 0x15, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x6f, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x66, 0x65, 0x65, + 0x64, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4c, 0x69, 0x6e, 0x6b, 0x46, + 0x65, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x70, 0x72, 0x69, 0x63, + 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, 0x6e, 0x6b, 0x50, 0x72, 0x69, + 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x66, 0x65, 0x65, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x4e, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x46, 0x65, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, + 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x50, 0x72, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x62, + 0x72, 0x69, 0x64, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x44, 0x0a, 0x1f, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x5f, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, + 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x75, 0x6e, 0x45, 0x6e, 0x64, + 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x40, 0x0a, 0x1c, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x1a, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3e, 0x0a, + 0x1b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x19, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x47, 0x0a, + 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, + 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x73, 0x74, 0x61, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x66, 0x65, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x65, + 0x65, 0x64, 0x12, 0x3e, 0x0a, 0x1b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x70, 0x72, 0x69, 0x63, + 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x19, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x50, 0x72, 0x69, + 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x22, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x70, 0x72, 0x69, 0x63, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1f, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x65, 0x6e, 0x63, 0x68, + 0x6d, 0x61, 0x72, 0x6b, 0x50, 0x72, 0x69, 0x63, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, + 0x27, 0x0a, 0x0f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x73, 0x6b, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x73, + 0x6b, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x62, 0x69, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x14, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x69, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x16, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x73, 0x6b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x21, + 0x0a, 0x0c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x73, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6d, 0x62, 0x6f, + 0x6c, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, + 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x79, 0x6e, 0x63, + 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescOnce sync.Once + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescData = file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc +) + +func file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescGZIP() []byte { + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescOnce.Do(func() { + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescData) + }) + return file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDescData +} + +var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes = []interface{}{ + (*EnhancedEAMercury)(nil), // 0: telem.EnhancedEAMercury +} +var file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_init() } +func file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_init() { + if File_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnhancedEAMercury); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes, + DependencyIndexes: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_depIdxs, + MessageInfos: file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_msgTypes, + }.Build() + File_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto = out.File + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_rawDesc = nil + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_goTypes = nil + file_core_services_synchronization_telem_telem_enhanced_ea_mercury_proto_depIdxs = nil +} diff --git a/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto new file mode 100644 index 00000000..eac644cc --- /dev/null +++ b/core/services/synchronization/telem/telem_enhanced_ea_mercury.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem"; + +package telem; + +message EnhancedEAMercury { + uint32 version = 32; + + string data_source=1; + double dp_benchmark_price=2; + double dp_bid=3; + double dp_ask=4; + + // v1 fields (block range) + int64 current_block_number=5; + string current_block_hash=6; + uint64 current_block_timestamp=7; + + // v2+v3 fields (timestamp range) + bool fetch_max_finalized_timestamp = 25; + int64 max_finalized_timestamp=26; + uint32 observation_timestamp=27; + bool is_link_feed=28; + int64 link_price=29; + bool is_native_feed=30; + int64 native_price=31; + + int64 bridge_task_run_started_timestamp=8; + int64 bridge_task_run_ended_timestamp=9; + int64 provider_requested_timestamp=10; + int64 provider_received_timestamp=11; + int64 provider_data_stream_established=12; + int64 provider_indicated_time=13; + + string feed=14; + + // v1+v2+v3 + int64 observation_benchmark_price=15; // This value overflows, will be reserved and removed in future versions + string observation_benchmark_price_string = 22; + // v1+v3 + int64 observation_bid=16; // This value overflows, will be reserved and removed in future versions + int64 observation_ask=17; // This value overflows, will be reserved and removed in future versions + string observation_bid_string = 23; + string observation_ask_string = 24; + + string config_digest = 18; + int64 round=19; + int64 epoch=20; + string asset_symbol=21; + + +} diff --git a/core/services/synchronization/telem/telem_functions_request.pb.go b/core/services/synchronization/telem/telem_functions_request.pb.go new file mode 100644 index 00000000..82690cb9 --- /dev/null +++ b/core/services/synchronization/telem/telem_functions_request.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: telem_functions_request.proto + +package telem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FunctionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + NodeAddress string `protobuf:"bytes,2,opt,name=node_address,json=nodeAddress,proto3" json:"node_address,omitempty"` + Domains []string `protobuf:"bytes,3,rep,name=domains,proto3" json:"domains,omitempty"` +} + +func (x *FunctionsRequest) Reset() { + *x = FunctionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_telem_functions_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FunctionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FunctionsRequest) ProtoMessage() {} + +func (x *FunctionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_telem_functions_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FunctionsRequest.ProtoReflect.Descriptor instead. +func (*FunctionsRequest) Descriptor() ([]byte, []int) { + return file_telem_functions_request_proto_rawDescGZIP(), []int{0} +} + +func (x *FunctionsRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *FunctionsRequest) GetNodeAddress() string { + if x != nil { + return x.NodeAddress + } + return "" +} + +func (x *FunctionsRequest) GetDomains() []string { + if x != nil { + return x.Domains + } + return nil +} + +var File_telem_functions_request_proto protoreflect.FileDescriptor + +var file_telem_functions_request_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x05, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x22, 0x6e, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_telem_functions_request_proto_rawDescOnce sync.Once + file_telem_functions_request_proto_rawDescData = file_telem_functions_request_proto_rawDesc +) + +func file_telem_functions_request_proto_rawDescGZIP() []byte { + file_telem_functions_request_proto_rawDescOnce.Do(func() { + file_telem_functions_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_telem_functions_request_proto_rawDescData) + }) + return file_telem_functions_request_proto_rawDescData +} + +var file_telem_functions_request_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_telem_functions_request_proto_goTypes = []interface{}{ + (*FunctionsRequest)(nil), // 0: telem.FunctionsRequest +} +var file_telem_functions_request_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_telem_functions_request_proto_init() } +func file_telem_functions_request_proto_init() { + if File_telem_functions_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_telem_functions_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FunctionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_telem_functions_request_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_telem_functions_request_proto_goTypes, + DependencyIndexes: file_telem_functions_request_proto_depIdxs, + MessageInfos: file_telem_functions_request_proto_msgTypes, + }.Build() + File_telem_functions_request_proto = out.File + file_telem_functions_request_proto_rawDesc = nil + file_telem_functions_request_proto_goTypes = nil + file_telem_functions_request_proto_depIdxs = nil +} diff --git a/core/services/synchronization/telem/telem_functions_request.proto b/core/services/synchronization/telem/telem_functions_request.proto new file mode 100644 index 00000000..3c9141b2 --- /dev/null +++ b/core/services/synchronization/telem/telem_functions_request.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +option go_package = "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem"; + +package telem; + +message FunctionsRequest { + string request_id = 1; + string node_address = 2; + repeated string domains = 3; +} diff --git a/core/services/synchronization/telem/telem_wsrpc.pb.go b/core/services/synchronization/telem/telem_wsrpc.pb.go new file mode 100644 index 00000000..ecf5899f --- /dev/null +++ b/core/services/synchronization/telem/telem_wsrpc.pb.go @@ -0,0 +1,88 @@ +// Code generated by protoc-gen-go-wsrpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-wsrpc v0.0.1 +// - protoc v3.21.12 + +package telem + +import ( + context "context" + + wsrpc "github.com/goplugin/wsrpc" +) + +// TelemClient is the client API for Telem service. +type TelemClient interface { + Telem(ctx context.Context, in *TelemRequest) (*TelemResponse, error) + TelemBatch(ctx context.Context, in *TelemBatchRequest) (*TelemResponse, error) +} + +type telemClient struct { + cc wsrpc.ClientInterface +} + +func NewTelemClient(cc wsrpc.ClientInterface) TelemClient { + return &telemClient{cc} +} + +func (c *telemClient) Telem(ctx context.Context, in *TelemRequest) (*TelemResponse, error) { + out := new(TelemResponse) + err := c.cc.Invoke(ctx, "Telem", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *telemClient) TelemBatch(ctx context.Context, in *TelemBatchRequest) (*TelemResponse, error) { + out := new(TelemResponse) + err := c.cc.Invoke(ctx, "TelemBatch", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +// TelemServer is the server API for Telem service. +type TelemServer interface { + Telem(context.Context, *TelemRequest) (*TelemResponse, error) + TelemBatch(context.Context, *TelemBatchRequest) (*TelemResponse, error) +} + +func RegisterTelemServer(s wsrpc.ServiceRegistrar, srv TelemServer) { + s.RegisterService(&Telem_ServiceDesc, srv) +} + +func _Telem_Telem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(TelemRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(TelemServer).Telem(ctx, in) +} + +func _Telem_TelemBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(TelemBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + return srv.(TelemServer).TelemBatch(ctx, in) +} + +// Telem_ServiceDesc is the wsrpc.ServiceDesc for Telem service. +// It's only intended for direct use with wsrpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Telem_ServiceDesc = wsrpc.ServiceDesc{ + ServiceName: "telem.Telem", + HandlerType: (*TelemServer)(nil), + Methods: []wsrpc.MethodDesc{ + { + MethodName: "Telem", + Handler: _Telem_Telem_Handler, + }, + { + MethodName: "TelemBatch", + Handler: _Telem_TelemBatch_Handler, + }, + }, +} diff --git a/core/services/synchronization/telemetry_ingress_batch_client.go b/core/services/synchronization/telemetry_ingress_batch_client.go new file mode 100644 index 00000000..d4d091df --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_batch_client.go @@ -0,0 +1,225 @@ +package synchronization + +import ( + "context" + "errors" + "fmt" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/goplugin/wsrpc" + "github.com/goplugin/wsrpc/examples/simple/keys" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +// NoopTelemetryIngressBatchClient is a no-op interface for TelemetryIngressBatchClient +type NoopTelemetryIngressBatchClient struct{} + +// Start is a no-op +func (NoopTelemetryIngressBatchClient) Start(context.Context) error { return nil } + +// Close is a no-op +func (NoopTelemetryIngressBatchClient) Close() error { return nil } + +// Send is a no-op +func (NoopTelemetryIngressBatchClient) Send(TelemPayload) {} + +func (NoopTelemetryIngressBatchClient) HealthReport() map[string]error { return map[string]error{} } +func (NoopTelemetryIngressBatchClient) Name() string { return "NoopTelemetryIngressBatchClient" } + +// Ready is a no-op +func (NoopTelemetryIngressBatchClient) Ready() error { return nil } + +type telemetryIngressBatchClient struct { + services.StateMachine + url *url.URL + ks keystore.CSA + serverPubKeyHex string + + connected atomic.Bool + telemClient telemPb.TelemClient + close func() error + + globalLogger logger.Logger + logging bool + lggr logger.Logger + + wgDone sync.WaitGroup + chDone services.StopChan + + telemBufferSize uint + telemMaxBatchSize uint + telemSendInterval time.Duration + telemSendTimeout time.Duration + + workers map[string]*telemetryIngressBatchWorker + workersMutex sync.Mutex + + useUniConn bool +} + +// NewTelemetryIngressBatchClient returns a client backed by wsrpc that +// can send telemetry to the telemetry ingress server +func NewTelemetryIngressBatchClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint, telemMaxBatchSize uint, telemSendInterval time.Duration, telemSendTimeout time.Duration, useUniconn bool, network string, chainID string) TelemetryService { + return &telemetryIngressBatchClient{ + telemBufferSize: telemBufferSize, + telemMaxBatchSize: telemMaxBatchSize, + telemSendInterval: telemSendInterval, + telemSendTimeout: telemSendTimeout, + url: url, + ks: ks, + serverPubKeyHex: serverPubKeyHex, + globalLogger: lggr, + logging: logging, + lggr: lggr.Named("TelemetryIngressBatchClient").Named(network).Named(chainID), + chDone: make(services.StopChan), + workers: make(map[string]*telemetryIngressBatchWorker), + useUniConn: useUniconn, + } +} + +// Start connects the wsrpc client to the telemetry ingress server +// +// If a connection cannot be established with the ingress server, Dial will return without +// an error and wsrpc will continue to retry the connection. Eventually when the ingress +// server does come back up, wsrpc will establish the connection without any interaction +// on behalf of the node operator. +func (tc *telemetryIngressBatchClient) Start(ctx context.Context) error { + return tc.StartOnce("TelemetryIngressBatchClient", func() error { + clientPrivKey, err := tc.getCSAPrivateKey() + if err != nil { + return err + } + + serverPubKey := keys.FromHex(tc.serverPubKeyHex) + + // Initialize a new wsrpc client caller + // This is used to call RPC methods on the server + if tc.telemClient == nil { // only preset for tests + if tc.useUniConn { + tc.wgDone.Add(1) + go func() { + defer tc.wgDone.Done() + ctx2, cancel := tc.chDone.NewCtx() + defer cancel() + conn, err := wsrpc.DialUniWithContext(ctx2, tc.lggr, tc.url.String(), clientPrivKey, serverPubKey) + if err != nil { + if ctx2.Err() != nil { + tc.lggr.Warnw("gave up connecting to telemetry endpoint", "err", err) + } else { + tc.lggr.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err) + tc.SvcErrBuffer.Append(err) + } + return + } + tc.telemClient = telemPb.NewTelemClient(conn) + tc.close = conn.Close + tc.connected.Store(true) + }() + } else { + // Spawns a goroutine that will eventually connect + conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.lggr)) + if err != nil { + return fmt.Errorf("could not start TelemIngressBatchClient, Dial returned error: %v", err) + } + tc.telemClient = telemPb.NewTelemClient(conn) + tc.close = func() error { conn.Close(); return nil } + } + } + + return nil + }) +} + +// Close disconnects the wsrpc client from the ingress server and waits for all workers to exit +func (tc *telemetryIngressBatchClient) Close() error { + return tc.StopOnce("TelemetryIngressBatchClient", func() error { + close(tc.chDone) + tc.wgDone.Wait() + if (tc.useUniConn && tc.connected.Load()) || !tc.useUniConn { + return tc.close() + } + return nil + }) +} + +func (tc *telemetryIngressBatchClient) Name() string { + return tc.lggr.Name() +} + +func (tc *telemetryIngressBatchClient) HealthReport() map[string]error { + return map[string]error{tc.Name(): tc.Healthy()} +} + +// getCSAPrivateKey gets the client's CSA private key +func (tc *telemetryIngressBatchClient) getCSAPrivateKey() (privkey []byte, err error) { + keys, err := tc.ks.GetAll() + if err != nil { + return privkey, err + } + if len(keys) < 1 { + return privkey, errors.New("CSA key does not exist") + } + + return keys[0].Raw(), nil +} + +// Send directs incoming telmetry messages to the worker responsible for pushing it to +// the ingress server. If the worker telemetry buffer is full, messages are dropped +// and a warning is logged. +func (tc *telemetryIngressBatchClient) Send(ctx context.Context, telemData []byte, contractID string, telemType TelemetryType) { + if tc.useUniConn && !tc.connected.Load() { + tc.lggr.Warnw("not connected to telemetry endpoint", "endpoint", tc.url.String()) + return + } + payload := TelemPayload{ + Telemetry: telemData, + TelemType: telemType, + ContractID: contractID, + } + worker := tc.findOrCreateWorker(payload) + + select { + case worker.chTelemetry <- payload: + worker.dropMessageCount.Store(0) + case <-ctx.Done(): + return + default: + worker.logBufferFullWithExpBackoff(payload) + } +} + +// findOrCreateWorker finds a worker by ContractID or creates a new one if none exists +func (tc *telemetryIngressBatchClient) findOrCreateWorker(payload TelemPayload) *telemetryIngressBatchWorker { + tc.workersMutex.Lock() + defer tc.workersMutex.Unlock() + + workerKey := fmt.Sprintf("%s_%s", payload.ContractID, payload.TelemType) + worker, found := tc.workers[workerKey] + + if !found { + worker = NewTelemetryIngressBatchWorker( + tc.telemMaxBatchSize, + tc.telemSendInterval, + tc.telemSendTimeout, + tc.telemClient, + &tc.wgDone, + tc.chDone, + make(chan TelemPayload, tc.telemBufferSize), + payload.ContractID, + payload.TelemType, + tc.globalLogger, + tc.logging, + ) + worker.Start() + tc.workers[workerKey] = worker + } + + return worker +} diff --git a/core/services/synchronization/telemetry_ingress_batch_client_test.go b/core/services/synchronization/telemetry_ingress_batch_client_test.go new file mode 100644 index 00000000..b3050456 --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_batch_client_test.go @@ -0,0 +1,103 @@ +package synchronization_test + +import ( + "net/url" + "sync/atomic" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +func TestTelemetryIngressBatchClient_HappyPath(t *testing.T) { + g := gomega.NewWithT(t) + + // Create mocks + telemClient := mocks.NewTelemClient(t) + csaKeystore := new(ksmocks.CSA) + + // Set mock handlers for keystore + key := cltest.DefaultCSAKey + keyList := []csakey.KeyV2{key} + csaKeystore.On("GetAll").Return(keyList, nil) + + // Wire up the telem ingress client + url := &url.URL{} + serverPubKeyHex := "33333333333" + sendInterval := time.Millisecond * 5 + telemIngressClient := synchronization.NewTestTelemetryIngressBatchClient(t, url, serverPubKeyHex, csaKeystore, false, telemClient, sendInterval, false) + servicetest.Run(t, telemIngressClient) + + // Create telemetry payloads for different contracts + telemPayload1 := synchronization.TelemPayload{ + Telemetry: []byte("Mock telem 1"), + ContractID: "0x1", + TelemType: synchronization.OCR, + } + telemPayload2 := synchronization.TelemPayload{ + Telemetry: []byte("Mock telem 2"), + ContractID: "0x2", + TelemType: synchronization.OCR2VRF, + } + telemPayload3 := synchronization.TelemPayload{ + Telemetry: []byte("Mock telem 3"), + ContractID: "0x3", + TelemType: synchronization.OCR2Functions, + } + + // Assert telemetry payloads for each contract are correctly sent to wsrpc + var contractCounter1 atomic.Uint32 + var contractCounter2 atomic.Uint32 + var contractCounter3 atomic.Uint32 + telemClient.On("TelemBatch", mock.Anything, mock.Anything).Return(nil, nil).Run(func(args mock.Arguments) { + telemBatchReq := args.Get(1).(*telemPb.TelemBatchRequest) + + if telemBatchReq.ContractId == "0x1" { + for _, telem := range telemBatchReq.Telemetry { + contractCounter1.Add(1) + assert.Equal(t, telemPayload1.Telemetry, telem) + assert.Equal(t, synchronization.OCR, telemPayload1.TelemType) + } + } + if telemBatchReq.ContractId == "0x2" { + for _, telem := range telemBatchReq.Telemetry { + contractCounter2.Add(1) + assert.Equal(t, telemPayload2.Telemetry, telem) + assert.Equal(t, synchronization.OCR2VRF, telemPayload2.TelemType) + } + } + if telemBatchReq.ContractId == "0x3" { + for _, telem := range telemBatchReq.Telemetry { + contractCounter3.Add(1) + assert.Equal(t, telemPayload3.Telemetry, telem) + assert.Equal(t, synchronization.OCR2Functions, telemPayload3.TelemType) + } + } + }) + + // Send telemetry + testCtx := testutils.Context(t) + telemIngressClient.Send(testCtx, telemPayload1.Telemetry, telemPayload1.ContractID, telemPayload1.TelemType) + telemIngressClient.Send(testCtx, telemPayload2.Telemetry, telemPayload2.ContractID, telemPayload2.TelemType) + telemIngressClient.Send(testCtx, telemPayload3.Telemetry, telemPayload3.ContractID, telemPayload3.TelemType) + time.Sleep(sendInterval * 2) + telemIngressClient.Send(testCtx, telemPayload1.Telemetry, telemPayload1.ContractID, telemPayload1.TelemType) + telemIngressClient.Send(testCtx, telemPayload1.Telemetry, telemPayload1.ContractID, telemPayload1.TelemType) + telemIngressClient.Send(testCtx, telemPayload2.Telemetry, telemPayload2.ContractID, telemPayload2.TelemType) + + // Wait for the telemetry to be handled + g.Eventually(func() []uint32 { + return []uint32{contractCounter1.Load(), contractCounter2.Load(), contractCounter3.Load()} + }).Should(gomega.Equal([]uint32{3, 2, 1})) +} diff --git a/core/services/synchronization/telemetry_ingress_batch_worker.go b/core/services/synchronization/telemetry_ingress_batch_worker.go new file mode 100644 index 00000000..e11df930 --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_batch_worker.go @@ -0,0 +1,134 @@ +package synchronization + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +// telemetryIngressBatchWorker pushes telemetry in batches to the ingress server via wsrpc. +// A worker is created per ContractID. +type telemetryIngressBatchWorker struct { + services.Service + + telemMaxBatchSize uint + telemSendInterval time.Duration + telemSendTimeout time.Duration + telemClient telemPb.TelemClient + wgDone *sync.WaitGroup + chDone services.StopChan + chTelemetry chan TelemPayload + contractID string + telemType TelemetryType + logging bool + lggr logger.Logger + dropMessageCount atomic.Uint32 +} + +// NewTelemetryIngressBatchWorker returns a worker for a given contractID that can send +// telemetry to the ingress server via WSRPC +func NewTelemetryIngressBatchWorker( + telemMaxBatchSize uint, + telemSendInterval time.Duration, + telemSendTimeout time.Duration, + telemClient telemPb.TelemClient, + wgDone *sync.WaitGroup, + chDone chan struct{}, + chTelemetry chan TelemPayload, + contractID string, + telemType TelemetryType, + globalLogger logger.Logger, + logging bool, +) *telemetryIngressBatchWorker { + return &telemetryIngressBatchWorker{ + telemSendInterval: telemSendInterval, + telemSendTimeout: telemSendTimeout, + telemMaxBatchSize: telemMaxBatchSize, + telemClient: telemClient, + wgDone: wgDone, + chDone: chDone, + chTelemetry: chTelemetry, + contractID: contractID, + telemType: telemType, + logging: logging, + lggr: globalLogger.Named("TelemetryIngressBatchWorker"), + } +} + +// Start sends batched telemetry to the ingress server on an interval +func (tw *telemetryIngressBatchWorker) Start() { + tw.wgDone.Add(1) + sendTicker := time.NewTicker(tw.telemSendInterval) + + go func() { + defer tw.wgDone.Done() + + for { + select { + case <-sendTicker.C: + if len(tw.chTelemetry) == 0 { + continue + } + + // Send batched telemetry to the ingress server, log any errors + telemBatchReq := tw.BuildTelemBatchReq() + ctx, cancel := tw.chDone.CtxCancel(context.WithTimeout(context.Background(), tw.telemSendTimeout)) + _, err := tw.telemClient.TelemBatch(ctx, telemBatchReq) + cancel() + + if err != nil { + tw.lggr.Warnf("Could not send telemetry: %v", err) + continue + } + if tw.logging { + tw.lggr.Debugw("Successfully sent telemetry to ingress server", "contractID", telemBatchReq.ContractId, "telemType", telemBatchReq.TelemetryType, "telemetry", telemBatchReq.Telemetry) + } + case <-tw.chDone: + return + } + } + }() +} + +// logBufferFullWithExpBackoff logs messages at +// 1 +// 2 +// 4 +// 8 +// 16 +// 32 +// 64 +// 100 +// 200 +// 300 +// etc... +func (tw *telemetryIngressBatchWorker) logBufferFullWithExpBackoff(payload TelemPayload) { + count := tw.dropMessageCount.Add(1) + if count > 0 && (count%100 == 0 || count&(count-1) == 0) { + tw.lggr.Warnw("telemetry ingress client buffer full, dropping message", "telemetry", payload.Telemetry, "droppedCount", count) + } +} + +// BuildTelemBatchReq reads telemetry off the worker channel and packages it into a batch request +func (tw *telemetryIngressBatchWorker) BuildTelemBatchReq() *telemPb.TelemBatchRequest { + var telemBatch [][]byte + + // Read telemetry off the channel up to the max batch size + for len(tw.chTelemetry) > 0 && len(telemBatch) < int(tw.telemMaxBatchSize) { + telemPayload := <-tw.chTelemetry + telemBatch = append(telemBatch, telemPayload.Telemetry) + } + + return &telemPb.TelemBatchRequest{ + ContractId: tw.contractID, + TelemetryType: string(tw.telemType), + Telemetry: telemBatch, + SentAt: time.Now().UnixNano(), + } +} diff --git a/core/services/synchronization/telemetry_ingress_batch_worker_test.go b/core/services/synchronization/telemetry_ingress_batch_worker_test.go new file mode 100644 index 00000000..4e432e8b --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_batch_worker_test.go @@ -0,0 +1,58 @@ +package synchronization_test + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" +) + +func TestTelemetryIngressWorker_BuildTelemBatchReq(t *testing.T) { + telemPayload := synchronization.TelemPayload{ + Telemetry: []byte("Mock telemetry"), + ContractID: "0xa", + } + + maxTelemBatchSize := 3 + chTelemetry := make(chan synchronization.TelemPayload, 10) + worker := synchronization.NewTelemetryIngressBatchWorker( + uint(maxTelemBatchSize), + time.Millisecond*1, + time.Second, + mocks.NewTelemClient(t), + &sync.WaitGroup{}, + make(chan struct{}), + chTelemetry, + "0xa", + synchronization.OCR, + logger.TestLogger(t), + false, + ) + + chTelemetry <- telemPayload + chTelemetry <- telemPayload + chTelemetry <- telemPayload + chTelemetry <- telemPayload + chTelemetry <- telemPayload + + // Batch request should not exceed the max batch size + batchReq1 := worker.BuildTelemBatchReq() + assert.Equal(t, "0xa", batchReq1.ContractId) + assert.Equal(t, string(synchronization.OCR), batchReq1.TelemetryType) + assert.Len(t, batchReq1.Telemetry, maxTelemBatchSize) + assert.Len(t, chTelemetry, 2) + assert.Greater(t, batchReq1.SentAt, int64(0)) + + // Remainder of telemetry should be batched on next call + batchReq2 := worker.BuildTelemBatchReq() + assert.Equal(t, "0xa", batchReq2.ContractId) + assert.Equal(t, string(synchronization.OCR), batchReq2.TelemetryType) + assert.Len(t, batchReq2.Telemetry, 2) + assert.Len(t, chTelemetry, 0) + assert.Greater(t, batchReq2.SentAt, int64(0)) +} diff --git a/core/services/synchronization/telemetry_ingress_client.go b/core/services/synchronization/telemetry_ingress_client.go new file mode 100644 index 00000000..fed1ea77 --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_client.go @@ -0,0 +1,216 @@ +package synchronization + +import ( + "context" + "errors" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/goplugin/wsrpc" + "github.com/goplugin/wsrpc/examples/simple/keys" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +type NoopTelemetryIngressClient struct{} + +// Start is a no-op +func (NoopTelemetryIngressClient) Start(context.Context) error { return nil } + +// Close is a no-op +func (NoopTelemetryIngressClient) Close() error { return nil } + +// Send is a no-op +func (NoopTelemetryIngressClient) Send(context.Context, TelemPayload) {} + +func (NoopTelemetryIngressClient) HealthReport() map[string]error { return map[string]error{} } +func (NoopTelemetryIngressClient) Name() string { return "NoopTelemetryIngressClient" } + +// Ready is a no-op +func (NoopTelemetryIngressClient) Ready() error { return nil } + +type telemetryIngressClient struct { + services.StateMachine + url *url.URL + ks keystore.CSA + serverPubKeyHex string + + telemClient telemPb.TelemClient + logging bool + lggr logger.Logger + + wgDone sync.WaitGroup + chDone services.StopChan + dropMessageCount atomic.Uint32 + chTelemetry chan TelemPayload +} + +// NewTelemetryIngressClient returns a client backed by wsrpc that +// can send telemetry to the telemetry ingress server +func NewTelemetryIngressClient(url *url.URL, serverPubKeyHex string, ks keystore.CSA, logging bool, lggr logger.Logger, telemBufferSize uint, network string, chainID string) TelemetryService { + return &telemetryIngressClient{ + url: url, + ks: ks, + serverPubKeyHex: serverPubKeyHex, + logging: logging, + lggr: lggr.Named("TelemetryIngressClient").Named(network).Named(chainID), + chTelemetry: make(chan TelemPayload, telemBufferSize), + chDone: make(services.StopChan), + } +} + +// Start connects the wsrpc client to the telemetry ingress server +func (tc *telemetryIngressClient) Start(context.Context) error { + return tc.StartOnce("TelemetryIngressClient", func() error { + privkey, err := tc.getCSAPrivateKey() + if err != nil { + return err + } + + tc.connect(privkey) + + return nil + }) +} + +// Close disconnects the wsrpc client from the ingress server +func (tc *telemetryIngressClient) Close() error { + return tc.StopOnce("TelemetryIngressClient", func() error { + close(tc.chDone) + tc.wgDone.Wait() + return nil + }) +} + +func (tc *telemetryIngressClient) Name() string { + return tc.lggr.Name() +} + +func (tc *telemetryIngressClient) HealthReport() map[string]error { + return map[string]error{tc.Name(): tc.Healthy()} +} + +func (tc *telemetryIngressClient) connect(clientPrivKey []byte) { + tc.wgDone.Add(1) + + go func() { + defer tc.wgDone.Done() + ctx, cancel := tc.chDone.NewCtx() + defer cancel() + + serverPubKey := keys.FromHex(tc.serverPubKeyHex) + conn, err := wsrpc.DialWithContext(ctx, tc.url.String(), wsrpc.WithTransportCreds(clientPrivKey, serverPubKey), wsrpc.WithLogger(tc.lggr)) + if err != nil { + if ctx.Err() != nil { + tc.lggr.Warnw("gave up connecting to telemetry endpoint", "err", err) + } else { + tc.lggr.Criticalw("telemetry endpoint dial errored unexpectedly", "err", err) + tc.SvcErrBuffer.Append(err) + } + return + } + defer conn.Close() + + // Initialize a new wsrpc client caller + // This is used to call RPC methods on the server + if tc.telemClient == nil { // only preset for tests + tc.telemClient = telemPb.NewTelemClient(conn) + } + + // Start handler for telemetry + tc.handleTelemetry() + + // Wait for close + <-tc.chDone + + }() +} + +func (tc *telemetryIngressClient) handleTelemetry() { + tc.wgDone.Add(1) + go func() { + defer tc.wgDone.Done() + ctx, cancel := tc.chDone.NewCtx() + defer cancel() + for { + select { + case p := <-tc.chTelemetry: + // Send telemetry to the ingress server, log any errors + telemReq := &telemPb.TelemRequest{ + Telemetry: p.Telemetry, + Address: p.ContractID, + TelemetryType: string(p.TelemType), + SentAt: time.Now().UnixNano(), + } + _, err := tc.telemClient.Telem(ctx, telemReq) + if err != nil { + tc.lggr.Errorf("Could not send telemetry: %v", err) + continue + } + if tc.logging { + tc.lggr.Debugw("successfully sent telemetry to ingress server", "contractID", p.ContractID, "telemetry", p.Telemetry) + } + case <-tc.chDone: + return + } + } + }() +} + +// logBufferFullWithExpBackoff logs messages at +// 1 +// 2 +// 4 +// 8 +// 16 +// 32 +// 64 +// 100 +// 200 +// 300 +// etc... +func (tc *telemetryIngressClient) logBufferFullWithExpBackoff(payload TelemPayload) { + count := tc.dropMessageCount.Add(1) + if count > 0 && (count%100 == 0 || count&(count-1) == 0) { + tc.lggr.Warnw("telemetry ingress client buffer full, dropping message", "telemetry", payload.Telemetry, "droppedCount", count) + } +} + +// getCSAPrivateKey gets the client's CSA private key +func (tc *telemetryIngressClient) getCSAPrivateKey() (privkey []byte, err error) { + // Fetch the client's public key + keys, err := tc.ks.GetAll() + if err != nil { + return privkey, err + } + if len(keys) < 1 { + return privkey, errors.New("CSA key does not exist") + } + + return keys[0].Raw(), nil +} + +// Send sends telemetry to the ingress server using wsrpc if the client is ready. +// Also stores telemetry in a small buffer in case of backpressure from wsrpc, +// throwing away messages once buffer is full +func (tc *telemetryIngressClient) Send(ctx context.Context, telemData []byte, contractID string, telemType TelemetryType) { + payload := TelemPayload{ + Telemetry: telemData, + TelemType: telemType, + ContractID: contractID, + } + + select { + case tc.chTelemetry <- payload: + tc.dropMessageCount.Store(0) + case <-ctx.Done(): + return + default: + tc.logBufferFullWithExpBackoff(payload) + } +} diff --git a/core/services/synchronization/telemetry_ingress_client_test.go b/core/services/synchronization/telemetry_ingress_client_test.go new file mode 100644 index 00000000..e11907c6 --- /dev/null +++ b/core/services/synchronization/telemetry_ingress_client_test.go @@ -0,0 +1,65 @@ +package synchronization_test + +import ( + "net/url" + "sync/atomic" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + ksmocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + telemPb "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +func TestTelemetryIngressClient_Send_HappyPath(t *testing.T) { + + // Create mocks + telemClient := mocks.NewTelemClient(t) + csaKeystore := new(ksmocks.CSA) + + // Set mock handlers for keystore + key := cltest.DefaultCSAKey + keyList := []csakey.KeyV2{key} + csaKeystore.On("GetAll").Return(keyList, nil) + + // Wire up the telem ingress client + url := &url.URL{} + serverPubKeyHex := "33333333333" + telemIngressClient := synchronization.NewTestTelemetryIngressClient(t, url, serverPubKeyHex, csaKeystore, false, telemClient) + servicetest.Run(t, telemIngressClient) + + // Create the telemetry payload + telemetry := []byte("101010") + address := common.HexToAddress("0xa") + telemPayload := synchronization.TelemPayload{ + Telemetry: telemetry, + ContractID: address.String(), + TelemType: synchronization.OCR, + } + + // Assert the telemetry payload is correctly sent to wsrpc + var called atomic.Bool + telemClient.On("Telem", mock.Anything, mock.Anything).Return(nil, nil).Run(func(args mock.Arguments) { + called.Store(true) + telemReq := args.Get(1).(*telemPb.TelemRequest) + assert.Equal(t, telemPayload.ContractID, telemReq.Address) + assert.Equal(t, telemPayload.Telemetry, telemReq.Telemetry) + assert.Equal(t, string(synchronization.OCR), telemReq.TelemetryType) + assert.Greater(t, telemReq.SentAt, int64(0)) + }) + + // Send telemetry + telemIngressClient.Send(testutils.Context(t), telemPayload.Telemetry, telemPayload.ContractID, telemPayload.TelemType) + + // Wait for the telemetry to be handled + gomega.NewWithT(t).Eventually(called.Load).Should(gomega.BeTrue()) +} diff --git a/core/services/synchronization/uni_client_integration_test.go b/core/services/synchronization/uni_client_integration_test.go new file mode 100644 index 00000000..f3b9798e --- /dev/null +++ b/core/services/synchronization/uni_client_integration_test.go @@ -0,0 +1,42 @@ +package synchronization + +import ( + "context" + "encoding/hex" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/wsrpc" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/telem" +) + +func TestUniClient(t *testing.T) { + t.Skip("Incomplete", "https://smartcontract-it.atlassian.net/browse/BCF-2729") + privKey, err := hex.DecodeString("TODO") + require.NoError(t, err) + pubKey, err := hex.DecodeString("TODO") + require.NoError(t, err) + t.Log(len(privKey), len(pubKey)) + lggr := logger.TestLogger(t) + c, err := wsrpc.DialUniWithContext(testutils.Context(t), + lggr, + "TODO", + privKey, + pubKey) + require.NoError(t, err) + t.Log(c) + client := telem.NewTelemClient(c) + ctx, cancel := context.WithTimeout(testutils.Context(t), 500*time.Millisecond) + resp, err := client.Telem(ctx, &telem.TelemRequest{ + Telemetry: []byte(`hello world`), + Address: "myaddress", + }) + cancel() + t.Log(resp, err) + require.NoError(t, c.Close()) +} diff --git a/core/services/telemetry/common.go b/core/services/telemetry/common.go new file mode 100644 index 00000000..e811c266 --- /dev/null +++ b/core/services/telemetry/common.go @@ -0,0 +1,11 @@ +package telemetry + +import ( + ocrtypes "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +type MonitoringEndpointGenerator interface { + GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint +} diff --git a/core/services/telemetry/ingress.go b/core/services/telemetry/ingress.go new file mode 100644 index 00000000..840a48db --- /dev/null +++ b/core/services/telemetry/ingress.go @@ -0,0 +1,46 @@ +package telemetry + +import ( + "context" + + ocrtypes "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +var _ MonitoringEndpointGenerator = &IngressAgentWrapper{} + +type IngressAgentWrapper struct { + telemetryIngressClient synchronization.TelemetryService +} + +func NewIngressAgentWrapper(telemetryIngressClient synchronization.TelemetryService) *IngressAgentWrapper { + return &IngressAgentWrapper{telemetryIngressClient} +} + +func (t *IngressAgentWrapper) GenMonitoringEndpoint(network, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint { + return NewIngressAgent(t.telemetryIngressClient, network, chainID, contractID, telemType) +} + +type IngressAgent struct { + telemetryIngressClient synchronization.TelemetryService + network string + chainID string + contractID string + telemType synchronization.TelemetryType +} + +func NewIngressAgent(telemetryIngressClient synchronization.TelemetryService, network string, chainID string, contractID string, telemType synchronization.TelemetryType) *IngressAgent { + return &IngressAgent{ + telemetryIngressClient, + network, + chainID, + contractID, + telemType, + } +} + +// SendLog sends a telemetry log to the ingress server +func (t *IngressAgent) SendLog(telemetry []byte) { + t.telemetryIngressClient.Send(context.Background(), telemetry, t.contractID, t.telemType) +} diff --git a/core/services/telemetry/ingress_batch.go b/core/services/telemetry/ingress_batch.go new file mode 100644 index 00000000..e5087151 --- /dev/null +++ b/core/services/telemetry/ingress_batch.go @@ -0,0 +1,51 @@ +package telemetry + +import ( + "context" + + ocrtypes "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +var _ MonitoringEndpointGenerator = &IngressAgentBatchWrapper{} + +// IngressAgentBatchWrapper provides monitoring endpoint generation for the telemetry batch client +type IngressAgentBatchWrapper struct { + telemetryIngressBatchClient synchronization.TelemetryService +} + +// NewIngressAgentBatchWrapper creates a new IngressAgentBatchWrapper with the provided telemetry batch client +func NewIngressAgentBatchWrapper(telemetryIngressBatchClient synchronization.TelemetryService) *IngressAgentBatchWrapper { + return &IngressAgentBatchWrapper{telemetryIngressBatchClient} +} + +// GenMonitoringEndpoint returns a new ingress batch agent instantiated with the batch client and a contractID +func (t *IngressAgentBatchWrapper) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint { + return NewIngressAgentBatch(t.telemetryIngressBatchClient, network, chainID, contractID, telemType) +} + +// IngressAgentBatch allows for sending batch telemetry for a given contractID +type IngressAgentBatch struct { + telemetryIngressBatchClient synchronization.TelemetryService + network string + chainID string + contractID string + telemType synchronization.TelemetryType +} + +// NewIngressAgentBatch creates a new IngressAgentBatch with the given batch client and contractID +func NewIngressAgentBatch(telemetryIngressBatchClient synchronization.TelemetryService, network string, chainID string, contractID string, telemType synchronization.TelemetryType) *IngressAgentBatch { + return &IngressAgentBatch{ + telemetryIngressBatchClient, + network, + chainID, + contractID, + telemType, + } +} + +// SendLog sends a telemetry log to the ingress server +func (t *IngressAgentBatch) SendLog(telemetry []byte) { + t.telemetryIngressBatchClient.Send(context.Background(), telemetry, t.contractID, t.telemType) +} diff --git a/core/services/telemetry/ingress_batch_test.go b/core/services/telemetry/ingress_batch_test.go new file mode 100644 index 00000000..57dac04d --- /dev/null +++ b/core/services/telemetry/ingress_batch_test.go @@ -0,0 +1,37 @@ +package telemetry_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" +) + +func TestIngressAgentBatch(t *testing.T) { + telemetryBatchClient := mocks.NewTelemetryService(t) + ingressAgentBatch := telemetry.NewIngressAgentWrapper(telemetryBatchClient) + monitoringEndpoint := ingressAgentBatch.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.OCR) + + // Handle the Send call and store the telem + var telemPayload synchronization.TelemPayload + telemetryBatchClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + telemPayload = synchronization.TelemPayload{ + Telemetry: args[1].([]byte), + ContractID: args[2].(string), + TelemType: args[3].(synchronization.TelemetryType), + } + }) + + // Send the log to the monitoring endpoint + log := []byte("test log") + monitoringEndpoint.SendLog(log) + + // Telemetry should be sent to the mock as expected + assert.Equal(t, log, telemPayload.Telemetry) + assert.Equal(t, synchronization.OCR, telemPayload.TelemType) + assert.Equal(t, "0xa", telemPayload.ContractID) +} diff --git a/core/services/telemetry/ingress_test.go b/core/services/telemetry/ingress_test.go new file mode 100644 index 00000000..f4f8ab37 --- /dev/null +++ b/core/services/telemetry/ingress_test.go @@ -0,0 +1,37 @@ +package telemetry_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/telemetry" +) + +func TestIngressAgent(t *testing.T) { + telemetryClient := mocks.NewTelemetryService(t) + ingressAgent := telemetry.NewIngressAgentWrapper(telemetryClient) + monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.OCR) + + // Handle the Send call and store the telem + var telemPayload synchronization.TelemPayload + telemetryClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + telemPayload = synchronization.TelemPayload{ + Telemetry: args[1].([]byte), + ContractID: args[2].(string), + TelemType: args[3].(synchronization.TelemetryType), + } + }) + + // Send the log to the monitoring endpoint + log := []byte("test log") + monitoringEndpoint.SendLog(log) + + // Telemetry should be sent to the mock as expected + assert.Equal(t, log, telemPayload.Telemetry) + assert.Equal(t, synchronization.OCR, telemPayload.TelemType) + assert.Equal(t, "0xa", telemPayload.ContractID) +} diff --git a/core/services/telemetry/manager.go b/core/services/telemetry/manager.go new file mode 100644 index 00000000..4a236887 --- /dev/null +++ b/core/services/telemetry/manager.go @@ -0,0 +1,163 @@ +package telemetry + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +type Manager struct { + services.StateMachine + bufferSize uint + endpoints []*telemetryEndpoint + ks keystore.CSA + lggr logger.Logger + logging bool + maxBatchSize uint + sendInterval time.Duration + sendTimeout time.Duration + uniConn bool + useBatchSend bool + MonitoringEndpointGenerator MonitoringEndpointGenerator +} + +type telemetryEndpoint struct { + ChainID string + Network string + URL *url.URL + client synchronization.TelemetryService + PubKey string +} + +// NewManager create a new telemetry manager that is responsible for configuring telemetry agents and generating the defined telemetry endpoints and monitoring endpoints +func NewManager(cfg config.TelemetryIngress, csaKeyStore keystore.CSA, lggr logger.Logger) *Manager { + m := &Manager{ + bufferSize: cfg.BufferSize(), + endpoints: nil, + ks: csaKeyStore, + lggr: lggr.Named("TelemetryManager"), + logging: cfg.Logging(), + maxBatchSize: cfg.MaxBatchSize(), + sendInterval: cfg.SendInterval(), + sendTimeout: cfg.SendTimeout(), + uniConn: cfg.UniConn(), + useBatchSend: cfg.UseBatchSend(), + } + for _, e := range cfg.Endpoints() { + if err := m.addEndpoint(e); err != nil { + m.lggr.Error(err) + } + } + return m +} + +func (m *Manager) Start(ctx context.Context) error { + return m.StartOnce("TelemetryManager", func() error { + var err error + for _, e := range m.endpoints { + err = multierr.Append(err, e.client.Start(ctx)) + } + return err + }) +} +func (m *Manager) Close() error { + return m.StopOnce("TelemetryManager", func() error { + var err error + for _, e := range m.endpoints { + err = multierr.Append(err, e.client.Close()) + } + return err + }) +} + +func (m *Manager) Name() string { + return m.lggr.Name() +} + +func (m *Manager) HealthReport() map[string]error { + hr := map[string]error{m.Name(): m.Healthy()} + + for _, e := range m.endpoints { + services.CopyHealth(hr, e.client.HealthReport()) + } + return hr +} + +// GenMonitoringEndpoint creates a new monitoring endpoints based on the existing available endpoints defined in the core config TOML, if no endpoint for the network and chainID exists, a NOOP agent will be used and the telemetry will not be sent +func (m *Manager) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) commontypes.MonitoringEndpoint { + + e, found := m.getEndpoint(network, chainID) + + if !found { + m.lggr.Warnf("no telemetry endpoint found for network %q chainID %q, telemetry %q for contactID %q will NOT be sent", network, chainID, telemType, contractID) + return &NoopAgent{} + } + + if m.useBatchSend { + return NewIngressAgentBatch(e.client, network, chainID, contractID, telemType) + } + + return NewIngressAgent(e.client, network, chainID, contractID, telemType) + +} + +func (m *Manager) addEndpoint(e config.TelemetryIngressEndpoint) error { + if e.Network() == "" { + return errors.New("cannot add telemetry endpoint, network cannot be empty") + } + + if e.ChainID() == "" { + return errors.New("cannot add telemetry endpoint, chainID cannot be empty") + } + + if e.URL() == nil { + return errors.New("cannot add telemetry endpoint, URL cannot be empty") + } + + if e.ServerPubKey() == "" { + return errors.New("cannot add telemetry endpoint, ServerPubKey cannot be empty") + } + + if _, found := m.getEndpoint(e.Network(), e.ChainID()); found { + return errors.Errorf("cannot add telemetry endpoint for network %q and chainID %q, endpoint already exists", e.Network(), e.ChainID()) + } + + var tClient synchronization.TelemetryService + if m.useBatchSend { + tClient = synchronization.NewTelemetryIngressBatchClient(e.URL(), e.ServerPubKey(), m.ks, m.logging, m.lggr, m.bufferSize, m.maxBatchSize, m.sendInterval, m.sendTimeout, m.uniConn, e.Network(), e.ChainID()) + } else { + tClient = synchronization.NewTelemetryIngressClient(e.URL(), e.ServerPubKey(), m.ks, m.logging, m.lggr, m.bufferSize, e.Network(), e.ChainID()) + } + + te := telemetryEndpoint{ + Network: strings.ToUpper(e.Network()), + ChainID: strings.ToUpper(e.ChainID()), + URL: e.URL(), + PubKey: e.ServerPubKey(), + client: tClient, + } + + m.endpoints = append(m.endpoints, &te) + return nil +} + +func (m *Manager) getEndpoint(network string, chainID string) (*telemetryEndpoint, bool) { + for _, e := range m.endpoints { + if e.Network == strings.ToUpper(network) && e.ChainID == strings.ToUpper(chainID) { + return e, true + } + } + return nil, false +} diff --git a/core/services/telemetry/manager_test.go b/core/services/telemetry/manager_test.go new file mode 100644 index 00000000..a7d71ed2 --- /dev/null +++ b/core/services/telemetry/manager_test.go @@ -0,0 +1,286 @@ +package telemetry + +import ( + "fmt" + "math/big" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/config/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + mocks3 "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" + mocks2 "github.com/goplugin/pluginv3.0/v2/core/services/synchronization/mocks" +) + +func setupMockConfig(t *testing.T, useBatchSend bool) *mocks.TelemetryIngress { + tic := mocks.NewTelemetryIngress(t) + tic.On("BufferSize").Return(uint(123)) + tic.On("Logging").Return(true) + tic.On("MaxBatchSize").Return(uint(51)) + tic.On("SendInterval").Return(time.Millisecond * 512) + tic.On("SendTimeout").Return(time.Second * 7) + tic.On("UniConn").Return(true) + tic.On("UseBatchSend").Return(useBatchSend) + + return tic +} + +func TestManagerAgents(t *testing.T) { + tic := setupMockConfig(t, true) + te := mocks.NewTelemetryIngressEndpoint(t) + te.On("Network").Return("network-1") + te.On("ChainID").Return("network-1-chainID-1") + te.On("ServerPubKey").Return("some-pubkey") + u, _ := url.Parse("http://some-url.test") + te.On("URL").Return(u) + tic.On("Endpoints").Return([]config.TelemetryIngressEndpoint{te}) + + lggr, _ := logger.TestLoggerObserved(t, zapcore.InfoLevel) + + ks := mocks3.NewCSA(t) + + tm := NewManager(tic, ks, lggr) + require.Equal(t, "*synchronization.telemetryIngressBatchClient", reflect.TypeOf(tm.endpoints[0].client).String()) + me := tm.GenMonitoringEndpoint("network-1", "network-1-chainID-1", "", "") + require.Equal(t, "*telemetry.IngressAgentBatch", reflect.TypeOf(me).String()) + + tic = setupMockConfig(t, false) + tic.On("Endpoints").Return([]config.TelemetryIngressEndpoint{te}) + tm = NewManager(tic, ks, lggr) + require.Equal(t, "*synchronization.telemetryIngressClient", reflect.TypeOf(tm.endpoints[0].client).String()) + me = tm.GenMonitoringEndpoint("network-1", "network-1-chainID-1", "", "") + require.Equal(t, "*telemetry.IngressAgent", reflect.TypeOf(me).String()) +} + +func TestNewManager(t *testing.T) { + + type endpointTest struct { + network string + chainID string + url string + pubKey string + shouldError bool + expectedError string + } + + endpoints := []endpointTest{ + { + network: "NETWORK-1", + chainID: "NETWORK-1-CHAINID-1", + url: "http://network-1-chainID-1.test", + pubKey: "network-1-chainID-1-pub-key", + shouldError: false, + }, + { + network: "NETWORK-1", + chainID: "NETWORK-1-CHAINID-2", + url: "http://network-1-chainID-2.test", + pubKey: "network-1-chainID-2-pub-key", + shouldError: false, + }, + { + network: "NETWORK-2", + chainID: "NETWORK-2-CHAINID-1", + url: "http://network-2-chainID-1.test", + pubKey: "network-2-chainID-1-pub-key", + shouldError: false, + }, + { + shouldError: true, + expectedError: "network cannot be empty", + }, + { + network: "ERROR", + shouldError: true, + expectedError: "chainID cannot be empty", + }, + { + network: "ERROR", + chainID: "ERROR", + shouldError: true, + expectedError: "URL cannot be empty", + }, + { + network: "ERROR", + chainID: "ERROR", + url: "http://error.test", + shouldError: true, + expectedError: "cannot add telemetry endpoint, ServerPubKey cannot be empty", + }, + { + network: "NETWORK-1", + chainID: "NETWORK-1-CHAINID-1", + url: "http://network-1-chainID-1.test", + pubKey: "network-1-chainID-1-pub-key", + shouldError: true, + expectedError: "endpoint already exists", + }, + } + + var mockEndpoints []config.TelemetryIngressEndpoint + + for _, e := range endpoints { + te := mocks.NewTelemetryIngressEndpoint(t) + te.On("Network").Maybe().Return(e.network) + te.On("ChainID").Maybe().Return(e.chainID) + te.On("ServerPubKey").Maybe().Return(e.pubKey) + + u, _ := url.Parse(e.url) + if e.url == "" { + u = nil + } + te.On("URL").Maybe().Return(u) + mockEndpoints = append(mockEndpoints, te) + } + + tic := setupMockConfig(t, true) + tic.On("Endpoints").Return(mockEndpoints) + + lggr, logObs := logger.TestLoggerObserved(t, zapcore.InfoLevel) + + ks := mocks3.NewCSA(t) + + ks.On("GetAll").Return([]csakey.KeyV2{csakey.MustNewV2XXXTestingOnly(big.NewInt(0))}, nil) + + m := NewManager(tic, ks, lggr) + + require.Equal(t, uint(123), m.bufferSize) + require.Equal(t, ks, m.ks) + require.Equal(t, "TelemetryManager", m.lggr.Name()) + require.Equal(t, true, m.logging) + require.Equal(t, uint(51), m.maxBatchSize) + require.Equal(t, time.Millisecond*512, m.sendInterval) + require.Equal(t, time.Second*7, m.sendTimeout) + require.Equal(t, true, m.uniConn) + require.Equal(t, true, m.useBatchSend) + + logs := logObs.TakeAll() + for i, e := range endpoints { + if !e.shouldError { + require.Equal(t, e.network, m.endpoints[i].Network) + require.Equal(t, e.chainID, m.endpoints[i].ChainID) + require.Equal(t, e.pubKey, m.endpoints[i].PubKey) + require.Equal(t, e.url, m.endpoints[i].URL.String()) + } else { + found := false + for _, l := range logs { + if strings.Contains(l.Message, e.expectedError) { + found = true + } + } + require.Equal(t, true, found, "cannot find log: %s", e.expectedError) + } + + } + + require.Equal(t, "TelemetryManager", m.Name()) + + require.Nil(t, m.Start(testutils.Context(t))) + t.Cleanup(func() { + require.NoError(t, m.Close()) + }) + testutils.WaitForLogMessageCount(t, logObs, "error connecting error while dialing dial tcp", 3) + + hr := m.HealthReport() + require.Equal(t, 4, len(hr)) +} + +func TestCorrectEndpointRouting(t *testing.T) { + tic := setupMockConfig(t, true) + tic.On("Endpoints").Return(nil) + + lggr, obsLogs := logger.TestLoggerObserved(t, zapcore.InfoLevel) + ks := mocks3.NewCSA(t) + + tm := NewManager(tic, ks, lggr) + + type testEndpoint struct { + network string + chainID string + } + + testEndpoints := []testEndpoint{ + { + network: "NETWORK-1", + chainID: "NETWORK-1-CHAINID-1", + }, + { + network: "NETWORK-1", + chainID: "NETWORK-1-CHAINID-2", + }, + { + network: "NETWORK-2", + chainID: "NETWORK-2-CHAINID-1", + }, + { + network: "NETWORK-2", + chainID: "NETWORK-2-CHAINID-2", + }, + } + + tm.endpoints = make([]*telemetryEndpoint, len(testEndpoints)) + clientSent := make([]synchronization.TelemPayload, 0) + for i, e := range testEndpoints { + clientMock := mocks2.NewTelemetryService(t) + clientMock.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) { + clientSent = append(clientSent, synchronization.TelemPayload{ + Telemetry: args[1].([]byte), + ContractID: args[2].(string), + TelemType: args[3].(synchronization.TelemetryType), + }) + }) + + tm.endpoints[i] = &telemetryEndpoint{ + ChainID: e.chainID, + Network: e.network, + client: clientMock, + } + + } + //Unknown networks or chainID + noopEndpoint := tm.GenMonitoringEndpoint("unknown-network", "unknown-chainID", "some-contractID", "some-type") + require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String()) + require.Equal(t, 1, obsLogs.Len()) + require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found") + + noopEndpoint = tm.GenMonitoringEndpoint("network-1", "unknown-chainID", "some-contractID", "some-type") + require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String()) + require.Equal(t, 1, obsLogs.Len()) + require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found") + + noopEndpoint = tm.GenMonitoringEndpoint("network-2", "network-1-chainID-1", "some-contractID", "some-type") + require.Equal(t, "*telemetry.NoopAgent", reflect.TypeOf(noopEndpoint).String()) + require.Equal(t, 1, obsLogs.Len()) + require.Contains(t, obsLogs.TakeAll()[0].Message, "no telemetry endpoint found") + + //Known networks and chainID + for i, e := range testEndpoints { + telemType := fmt.Sprintf("TelemType_%s", e.chainID) + contractID := fmt.Sprintf("contractID_%s", e.chainID) + me := tm.GenMonitoringEndpoint( + e.network, + e.chainID, + contractID, + synchronization.TelemetryType(telemType), + ) + me.SendLog([]byte(e.chainID)) + require.Equal(t, 0, obsLogs.Len()) + + require.Equal(t, i+1, len(clientSent)) + require.Equal(t, contractID, clientSent[i].ContractID) + require.Equal(t, telemType, string(clientSent[i].TelemType)) + require.Equal(t, []byte(e.chainID), clientSent[i].Telemetry) + } + +} diff --git a/core/services/telemetry/noop.go b/core/services/telemetry/noop.go new file mode 100644 index 00000000..31d78590 --- /dev/null +++ b/core/services/telemetry/noop.go @@ -0,0 +1,21 @@ +package telemetry + +import ( + ocrtypes "github.com/goplugin/libocr/commontypes" + + "github.com/goplugin/pluginv3.0/v2/core/services/synchronization" +) + +var _ MonitoringEndpointGenerator = &NoopAgent{} + +type NoopAgent struct { +} + +// SendLog sends a telemetry log to the ingress service +func (t *NoopAgent) SendLog(log []byte) { +} + +// GenMonitoringEndpoint creates a monitoring endpoint for telemetry +func (t *NoopAgent) GenMonitoringEndpoint(network string, chainID string, contractID string, telemType synchronization.TelemetryType) ocrtypes.MonitoringEndpoint { + return t +} diff --git a/core/services/transmission/integration_test.go b/core/services/transmission/integration_test.go new file mode 100644 index 00000000..e000de9d --- /dev/null +++ b/core/services/transmission/integration_test.go @@ -0,0 +1,496 @@ +package transmission_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_consumer_interface_v08" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_mock" + + "github.com/ethereum/go-ethereum/core/types" + + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/entry_point" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/greeter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/paymaster_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/sca_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/smart_contract_account_factory" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/transmission/generated/smart_contract_account_helper" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/transmission" +) + +var ( + greeterABI = evmtypes.MustGetABI(greeter_wrapper.GreeterABI) + consumerABI = evmtypes.MustGetABI(solidity_vrf_consumer_interface_v08.VRFConsumerABI) + entrypointABI = evmtypes.MustGetABI(entry_point.EntryPointABI) +) + +type EntryPointUniverse struct { + holder1 *bind.TransactOpts + holder1Key ethkey.KeyV2 + holder2 *bind.TransactOpts + backend *backends.SimulatedBackend + entryPointAddress common.Address + entryPoint *entry_point.EntryPoint + factoryAddress common.Address + helper *smart_contract_account_helper.SmartContractAccountHelper + greeterAddress common.Address + greeter *greeter_wrapper.Greeter + linkTokenAddress common.Address + linkToken *link_token_interface.LinkToken + linkEthFeedAddress common.Address + vrfCoordinatorAddress common.Address + vrfCoordinator *vrf_coordinator_mock.VRFCoordinatorMock + vrfConsumerAddress common.Address +} + +func deployTransmissionUniverse(t *testing.T) *EntryPointUniverse { + // Create a key for holder1 that we can use to sign + holder1Key := cltest.MustGenerateRandomKey(t) + t.Log("Holder key:", holder1Key.String()) + + // Construct simulated blockchain environment. + holder1Transactor, err := bind.NewKeyedTransactorWithChainID(holder1Key.ToEcdsaPrivKey(), testutils.SimulatedChainID) + require.NoError(t, err) + var ( + holder1 = holder1Transactor + holder2 = testutils.MustNewSimTransactor(t) + ) + genesisData := core.GenesisAlloc{ + holder1.From: {Balance: assets.Ether(1000).ToInt()}, + holder2.From: {Balance: assets.Ether(1000).ToInt()}, + } + gasLimit := uint32(30e6) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + backend.Commit() + + // Setup all contracts and addresses used by tests. + entryPointAddress, _, entryPoint, err := entry_point.DeployEntryPoint(holder1, backend) + require.NoError(t, err) + factoryAddress, _, _, _ := smart_contract_account_factory.DeploySmartContractAccountFactory(holder1, backend) + require.NoError(t, err) + _, _, helper, err := smart_contract_account_helper.DeploySmartContractAccountHelper(holder1, backend) + require.NoError(t, err) + greeterAddress, _, greeter, err := greeter_wrapper.DeployGreeter(holder1, backend) + require.NoError(t, err) + linkTokenAddress, _, linkToken, err := link_token_interface.DeployLinkToken(holder1, backend) + require.NoError(t, err) + linkEthFeedAddress, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + holder1, + backend, + 18, + (*big.Int)(assets.GWei(5000000)), // .005 ETH + ) + require.NoError(t, err) + vrfCoordinatorAddress, _, vrfCoordinator, err := vrf_coordinator_mock.DeployVRFCoordinatorMock(holder1, backend, linkTokenAddress) + require.NoError(t, err) + vrfConsumerAddress, _, _, err := solidity_vrf_consumer_interface_v08.DeployVRFConsumer(holder1, backend, vrfCoordinatorAddress, linkTokenAddress) + require.NoError(t, err) + backend.Commit() + + return &EntryPointUniverse{ + holder1: holder1, + holder1Key: holder1Key, + holder2: holder2, + backend: backend, + entryPointAddress: entryPointAddress, + entryPoint: entryPoint, + factoryAddress: factoryAddress, + helper: helper, + greeterAddress: greeterAddress, + greeter: greeter, + linkTokenAddress: linkTokenAddress, + linkToken: linkToken, + linkEthFeedAddress: linkEthFeedAddress, + vrfCoordinatorAddress: vrfCoordinatorAddress, + vrfCoordinator: vrfCoordinator, + vrfConsumerAddress: vrfConsumerAddress, + } +} + +func Test4337Basic(t *testing.T) { + // Deploy universe. + universe := deployTransmissionUniverse(t) + holder1 := universe.holder1 + holder2 := universe.holder2 + backend := universe.backend + + // Ensure no greeting is already set. + initialGreeting, err := universe.greeter.GetGreeting(nil) + require.NoError(t, err) + require.Equal(t, "", initialGreeting) + + // Get the address at which the Smart Contract Account will be deployed. + toDeployAddress, err := universe.helper.CalculateSmartContractAccountAddress( + nil, + holder1.From, + universe.entryPointAddress, + universe.factoryAddress, + ) + require.NoError(t, err) + t.Log("Smart Contract Account Address:", toDeployAddress) + + // Get the initialization code for the Smart Contract Account. + fullInitializeCode, err := universe.helper.GetInitCode(nil, universe.factoryAddress, holder1.From, universe.entryPointAddress) + require.NoError(t, err) + t.Log("Full initialization code:", common.Bytes2Hex(fullInitializeCode)) + + // Construct calldata for setGreeting. + encodedGreetingCall, err := greeterABI.Pack("setGreeting", "bye") + require.NoError(t, err) + t.Log("Encoded greeting call:", common.Bytes2Hex(encodedGreetingCall)) + + // Construct the calldata to be passed in the user operation. + var ( + value = big.NewInt(0) + nonce = big.NewInt(0) + deadline = big.NewInt(1000) + ) + fullEncoding, err := universe.helper.GetFullEndTxEncoding(nil, universe.greeterAddress, value, deadline, encodedGreetingCall) + require.NoError(t, err) + t.Log("Full user operation calldata:", common.Bytes2Hex(fullEncoding)) + + // Construct and execute user operation. + userOp := entry_point.UserOperation{ + Sender: toDeployAddress, + Nonce: nonce, + InitCode: fullInitializeCode, + CallData: fullEncoding, + CallGasLimit: big.NewInt(10_000_000), + VerificationGasLimit: big.NewInt(10_000_000), + PreVerificationGas: big.NewInt(10_000_000), + MaxFeePerGas: big.NewInt(100), + MaxPriorityFeePerGas: big.NewInt(200), + PaymasterAndData: []byte(""), + Signature: []byte(""), + } + + // Generate hash from user operation, sign it, and include it in the user operation. + userOpHash, err := universe.entryPoint.GetUserOpHash(nil, userOp) + require.NoError(t, err) + fullHash, err := universe.helper.GetFullHashForSigning(nil, userOpHash, toDeployAddress) + require.NoError(t, err) + t.Log("Full hash for signing:", common.Bytes2Hex(fullHash[:])) + sig, err := transmission.SignMessage(universe.holder1Key.ToEcdsaPrivKey(), fullHash[:]) + require.NoError(t, err) + t.Log("Signature:", common.Bytes2Hex(sig)) + userOp.Signature = sig + + // Deposit to the SCA's account to pay for this transaction. + holder1.Value = assets.Ether(10).ToInt() + tx, err := universe.entryPoint.DepositTo(holder1, toDeployAddress) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + holder1.Value = assets.Ether(0).ToInt() + balance, err := universe.entryPoint.BalanceOf(nil, toDeployAddress) + require.NoError(t, err) + require.Equal(t, assets.Ether(10).ToInt(), balance) + + // Run handleOps from holder2's account, to demonstrate that any account can execute this signed user operation. + tx, err = universe.entryPoint.HandleOps(holder2, []entry_point.UserOperation{userOp}, holder1.From) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + + // Ensure "bye" was successfully set as the greeting. + greetingResult, err := universe.greeter.GetGreeting(nil) + require.NoError(t, err) + require.Equal(t, "bye", greetingResult) + + // Assert smart contract account is created and nonce incremented. + sca, err := sca_wrapper.NewSCA(toDeployAddress, backend) + require.NoError(t, err) + onChainNonce, err := sca.SNonce(nil) + require.NoError(t, err) + require.Equal(t, big.NewInt(1), onChainNonce) +} + +func Test4337WithLinkTokenPaymaster(t *testing.T) { + // Deploy universe. + universe := deployTransmissionUniverse(t) + holder1 := universe.holder1 + holder2 := universe.holder2 + backend := universe.backend + + // Ensure no greeting is already set. + initialGreeting, err := universe.greeter.GetGreeting(nil) + require.NoError(t, err) + require.Equal(t, "", initialGreeting) + + // Get the address at which the Smart Contract Account will be deployed. + toDeployAddress, err := universe.helper.CalculateSmartContractAccountAddress( + nil, + holder1.From, + universe.entryPointAddress, + universe.factoryAddress, + ) + require.NoError(t, err) + t.Log("Smart Contract Account Address:", toDeployAddress) + + // Get the initialization code for the Smart Contract Account. + fullInitializeCode, err := universe.helper.GetInitCode(nil, universe.factoryAddress, holder1.From, universe.entryPointAddress) + require.NoError(t, err) + t.Log("Full initialization code:", common.Bytes2Hex(fullInitializeCode)) + + // Construct calldata for setGreeting. + encodedGreetingCall, err := greeterABI.Pack("setGreeting", "bye") + require.NoError(t, err) + t.Log("Encoded greeting call:", common.Bytes2Hex(encodedGreetingCall)) + + // Construct the calldata to be passed in the user operation. + var ( + value = big.NewInt(0) + nonce = big.NewInt(0) + deadline = big.NewInt(1000) + ) + fullEncoding, err := universe.helper.GetFullEndTxEncoding(nil, universe.greeterAddress, value, deadline, encodedGreetingCall) + require.NoError(t, err) + t.Log("Full user operation calldata:", common.Bytes2Hex(fullEncoding)) + + // Deposit to PLI paymaster. + linkTokenAddress, _, linkToken, err := link_token_interface.DeployLinkToken(holder1, backend) + require.NoError(t, err) + linkEthFeedAddress, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + holder1, + backend, + 18, + (*big.Int)(assets.GWei(5000000)), // .005 ETH + ) + require.NoError(t, err) + paymasterAddress, _, _, err := paymaster_wrapper.DeployPaymaster(holder1, backend, linkTokenAddress, linkEthFeedAddress, universe.entryPointAddress) + require.NoError(t, err) + backend.Commit() + tx, err := linkToken.TransferAndCall( + holder1, + paymasterAddress, + assets.Ether(1000).ToInt(), + common.LeftPadBytes(toDeployAddress.Bytes(), 32), + ) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + + // Construct and execute user operation. + userOp := entry_point.UserOperation{ + Sender: toDeployAddress, + Nonce: nonce, + InitCode: fullInitializeCode, + CallData: fullEncoding, + CallGasLimit: big.NewInt(10_000_000), + VerificationGasLimit: big.NewInt(10_000_000), + PreVerificationGas: big.NewInt(10_000_000), + MaxFeePerGas: big.NewInt(100), + MaxPriorityFeePerGas: big.NewInt(200), + PaymasterAndData: paymasterAddress.Bytes(), + Signature: []byte(""), + } + + // Generate hash from user operation, sign it, and include it in the user operation. + userOpHash, err := universe.entryPoint.GetUserOpHash(nil, userOp) + require.NoError(t, err) + fullHash, err := universe.helper.GetFullHashForSigning(nil, userOpHash, toDeployAddress) + require.NoError(t, err) + t.Log("Full hash for signing:", common.Bytes2Hex(fullHash[:])) + sig, err := transmission.SignMessage(universe.holder1Key.ToEcdsaPrivKey(), fullHash[:]) + require.NoError(t, err) + t.Log("Signature:", common.Bytes2Hex(sig)) + userOp.Signature = sig + + // Deposit to the Paymaster's account to pay for this transaction. + holder1.Value = assets.Ether(10).ToInt() + tx, err = universe.entryPoint.DepositTo(holder1, paymasterAddress) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + holder1.Value = assets.Ether(0).ToInt() + balance, err := universe.entryPoint.BalanceOf(nil, paymasterAddress) + require.NoError(t, err) + require.Equal(t, assets.Ether(10).ToInt(), balance) + + // Run handleOps from holder2's account, to demonstrate that any account can execute this signed user operation. + tx, err = universe.entryPoint.HandleOps(holder2, []entry_point.UserOperation{userOp}, holder1.From) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + + // Ensure "bye" was successfully set as the greeting. + greetingResult, err := universe.greeter.GetGreeting(nil) + require.NoError(t, err) + require.Equal(t, "bye", greetingResult) + + // Assert smart contract account is created and nonce incremented. + sca, err := sca_wrapper.NewSCA(toDeployAddress, backend) + require.NoError(t, err) + onChainNonce, err := sca.SNonce(nil) + require.NoError(t, err) + require.Equal(t, big.NewInt(1), onChainNonce) +} + +func Test4337WithLinkTokenVRFRequestAndPaymaster(t *testing.T) { + // Deploy universe. + universe := deployTransmissionUniverse(t) + holder1 := universe.holder1 + holder2 := universe.holder2 + backend := universe.backend + + // Get the address at which the Smart Contract Account will be deployed. + toDeployAddress, err := universe.helper.CalculateSmartContractAccountAddress( + nil, + holder1.From, + universe.entryPointAddress, + universe.factoryAddress, + ) + require.NoError(t, err) + t.Log("Smart Contract Account Address:", toDeployAddress) + + // Get the initialization code for the Smart Contract Account. + fullInitializeCode, err := universe.helper.GetInitCode(nil, universe.factoryAddress, holder1.From, universe.entryPointAddress) + require.NoError(t, err) + t.Log("Full initialization code:", common.Bytes2Hex(fullInitializeCode)) + + // Construct calldata for the vrf request. + var keyhash [32]byte + copy(keyhash[:], common.LeftPadBytes(big.NewInt(123).Bytes(), 32)) + var fee = assets.Ether(1).ToInt() + encodedVRFRequest, err := consumerABI.Pack("doRequestRandomness", keyhash, fee) + require.NoError(t, err) + t.Log("Encoded vrf request:", common.Bytes2Hex(encodedVRFRequest)) + + // Construct the calldata to be passed in the user operation. + var ( + value = big.NewInt(0) + nonce = big.NewInt(0) + deadline = big.NewInt(1000) + ) + fullEncoding, err := universe.helper.GetFullEndTxEncoding(nil, universe.vrfConsumerAddress, value, deadline, encodedVRFRequest) + require.NoError(t, err) + t.Log("Full user operation calldata:", common.Bytes2Hex(fullEncoding)) + + // Deposit to PLI paymaster. + paymasterAddress, _, _, err := paymaster_wrapper.DeployPaymaster(holder1, backend, universe.linkTokenAddress, universe.linkEthFeedAddress, universe.entryPointAddress) + require.NoError(t, err) + backend.Commit() + tx, err := universe.linkToken.TransferAndCall( + holder1, + paymasterAddress, + assets.Ether(1000).ToInt(), + common.LeftPadBytes(toDeployAddress.Bytes(), 32), + ) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + + // Generate encoded paymaster data to fund the VRF consumer. + encodedPaymasterData, err := universe.helper.GetAbiEncodedDirectRequestData(nil, universe.vrfConsumerAddress, fee, fee) + require.NoError(t, err) + + // Construct and execute user operation. + userOp := entry_point.UserOperation{ + Sender: toDeployAddress, + Nonce: nonce, + InitCode: fullInitializeCode, + CallData: fullEncoding, + CallGasLimit: big.NewInt(10_000_000), + VerificationGasLimit: big.NewInt(10_000_000), + PreVerificationGas: big.NewInt(10_000_000), + MaxFeePerGas: big.NewInt(100), + MaxPriorityFeePerGas: big.NewInt(200), + PaymasterAndData: append(append(paymasterAddress.Bytes(), byte(0)), encodedPaymasterData...), + Signature: []byte(""), + } + + // Generate hash from user operation, sign it, and include it in the user operation. + userOpHash, err := universe.entryPoint.GetUserOpHash(nil, userOp) + require.NoError(t, err) + fullHash, err := universe.helper.GetFullHashForSigning(nil, userOpHash, toDeployAddress) + require.NoError(t, err) + t.Log("Full hash for signing:", common.Bytes2Hex(fullHash[:])) + sig, err := transmission.SignMessage(universe.holder1Key.ToEcdsaPrivKey(), fullHash[:]) + require.NoError(t, err) + t.Log("Signature:", common.Bytes2Hex(sig)) + userOp.Signature = sig + + // Deposit to the Paymaster's account to pay for this transaction. + holder1.Value = assets.Ether(10).ToInt() + tx, err = universe.entryPoint.DepositTo(holder1, paymasterAddress) + require.NoError(t, err) + backend.Commit() + _, err = bind.WaitMined(testutils.Context(t), backend, tx) + require.NoError(t, err) + holder1.Value = assets.Ether(0).ToInt() + balance, err := universe.entryPoint.BalanceOf(nil, paymasterAddress) + require.NoError(t, err) + require.Equal(t, assets.Ether(10).ToInt(), balance) + + // Run handleOps from holder2's account, to demonstrate that any account can execute this signed user operation. + // Manually execute transaction to test ABI packing. + gasPrice, err := backend.SuggestGasPrice(testutils.Context(t)) + require.NoError(t, err) + accountNonce, err := backend.PendingNonceAt(testutils.Context(t), holder2.From) + require.NoError(t, err) + payload, err := entrypointABI.Pack("handleOps", []entry_point.UserOperation{userOp}, holder1.From) + require.NoError(t, err) + gas, err := backend.EstimateGas(testutils.Context(t), ethereum.CallMsg{ + From: holder2.From, + To: &universe.entryPointAddress, + Gas: 0, + Data: payload, + GasPrice: gasPrice, + }) + unsigned := types.NewTx(&types.LegacyTx{ + Nonce: accountNonce, + Gas: gas, + To: &universe.entryPointAddress, + Value: big.NewInt(0), + Data: payload, + GasPrice: gasPrice, + }) + require.NoError(t, err) + signedtx, err := holder2.Signer(holder2.From, unsigned) + require.NoError(t, err) + err = backend.SendTransaction(testutils.Context(t), signedtx) + require.NoError(t, err) + backend.Commit() + receipt, err := bind.WaitMined(testutils.Context(t), backend, signedtx) + require.NoError(t, err) + t.Log("Receipt:", receipt.Status) + + // Assert the VRF request was correctly made. + logs, err := backend.FilterLogs(testutils.Context(t), ethereum.FilterQuery{ + Addresses: []common.Address{universe.vrfCoordinatorAddress}, + }) + require.NoError(t, err) + require.Equal(t, 1, len(logs)) + randomnessRequestLog, err := universe.vrfCoordinator.ParseRandomnessRequest(logs[0]) + require.NoError(t, err) + require.Equal(t, fee, randomnessRequestLog.Fee) + require.Equal(t, keyhash, randomnessRequestLog.KeyHash) + require.Equal(t, universe.vrfConsumerAddress, randomnessRequestLog.Sender) + + // Assert smart contract account is created and nonce incremented. + sca, err := sca_wrapper.NewSCA(toDeployAddress, backend) + require.NoError(t, err) + onChainNonce, err := sca.SNonce(nil) + require.NoError(t, err) + require.Equal(t, big.NewInt(1), onChainNonce) +} diff --git a/core/services/transmission/signature.go b/core/services/transmission/signature.go new file mode 100644 index 00000000..50b7bc58 --- /dev/null +++ b/core/services/transmission/signature.go @@ -0,0 +1,19 @@ +package transmission + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/crypto" +) + +func SignMessage( + ownerPrivateKey *ecdsa.PrivateKey, + message []byte, +) ([]byte, error) { + sig, err := crypto.Sign(message, ownerPrivateKey) + if err != nil { + return nil, err + } + + return sig, nil +} diff --git a/core/services/versioning/models.go b/core/services/versioning/models.go new file mode 100644 index 00000000..9a411187 --- /dev/null +++ b/core/services/versioning/models.go @@ -0,0 +1,17 @@ +package versioning + +import ( + "time" +) + +type NodeVersion struct { + Version string + CreatedAt time.Time +} + +func NewNodeVersion(version string) NodeVersion { + return NodeVersion{ + Version: version, + CreatedAt: time.Now(), + } +} diff --git a/core/services/versioning/orm.go b/core/services/versioning/orm.go new file mode 100644 index 00000000..bf4c8108 --- /dev/null +++ b/core/services/versioning/orm.go @@ -0,0 +1,122 @@ +package versioning + +import ( + "context" + "database/sql" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/jackc/pgconn" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// Version ORM manages the node_versions table +// NOTE: If you just need the current application version, consider using static.Version instead +// The database version is ONLY useful for managing versioning specific to the database e.g. for backups or migrations + +type ORM interface { + FindLatestNodeVersion() (*NodeVersion, error) + UpsertNodeVersion(version NodeVersion) error +} + +type orm struct { + db *sqlx.DB + lggr logger.Logger + timeout time.Duration +} + +func NewORM(db *sqlx.DB, lggr logger.Logger, timeout time.Duration) *orm { + return &orm{ + db: db, + lggr: lggr.Named("VersioningORM"), + timeout: timeout, + } +} + +// UpsertNodeVersion inserts a new NodeVersion, returning error if the DB +// version is newer than the current one +// NOTE: If you just need the current application version, consider using static.Version instead +// The database version is ONLY useful for managing versioning specific to the database e.g. for backups or migrations +func (o *orm) UpsertNodeVersion(version NodeVersion) error { + now := time.Now() + + if _, err := semver.NewVersion(version.Version); err != nil { + return errors.Wrapf(err, "%q is not valid semver", version.Version) + } + + ctx, cancel := context.WithTimeout(context.Background(), o.timeout) + defer cancel() + return pg.SqlxTransaction(ctx, o.db, o.lggr, func(tx pg.Queryer) error { + if _, _, err := CheckVersion(tx, logger.NullLogger, version.Version); err != nil { + return err + } + + stmt := ` +INSERT INTO node_versions (version, created_at) +VALUES ($1, $2) +ON CONFLICT ((version IS NOT NULL)) DO UPDATE SET +version = EXCLUDED.version, +created_at = EXCLUDED.created_at +` + + _, err := tx.Exec(stmt, version.Version, now) + return err + }) +} + +// CheckVersion returns an error if there is a valid semver version in the +// node_versions table that is higher than the current app version +func CheckVersion(q pg.Queryer, lggr logger.Logger, appVersion string) (appv, dbv *semver.Version, err error) { + lggr = lggr.Named("Version") + var dbVersion string + err = q.Get(&dbVersion, `SELECT version FROM node_versions ORDER BY created_at DESC LIMIT 1 FOR UPDATE`) + if errors.Is(err, sql.ErrNoRows) { + lggr.Debugw("No previous version set", "appVersion", appVersion) + return nil, nil, nil + } else if err != nil { + var pqErr *pgconn.PgError + ok := errors.As(err, &pqErr) + if ok && pqErr.Code == "42P01" && pqErr.Message == `relation "node_versions" does not exist` { + lggr.Debugw("Previous version not set; node_versions table does not exist", "appVersion", appVersion) + return nil, nil, nil + } + return nil, nil, err + } + + dbv, dberr := semver.NewVersion(dbVersion) + appv, apperr := semver.NewVersion(appVersion) + if dberr != nil { + lggr.Warnf("Database version %q is not valid semver; skipping version check", dbVersion) + return nil, nil, nil + } + if apperr != nil { + return nil, nil, errors.Errorf("Application version %q is not valid semver", appVersion) + } + if dbv.GreaterThan(appv) { + return nil, nil, errors.Errorf("Application version (%s) is lower than database version (%s). Only Plugin %s or higher can be run on this database", appv, dbv, dbv) + } + return appv, dbv, nil +} + +// FindLatestNodeVersion looks up the latest node version +// NOTE: If you just need the current application version, consider using static.Version instead +// The database version is ONLY useful for managing versioning specific to the database e.g. for backups or migrations +func (o *orm) FindLatestNodeVersion() (*NodeVersion, error) { + stmt := ` +SELECT version, created_at +FROM node_versions +ORDER BY created_at DESC +` + + var nodeVersion NodeVersion + err := o.db.Get(&nodeVersion, stmt) + if err != nil { + return nil, err + } + + return &nodeVersion, err +} diff --git a/core/services/versioning/orm_test.go b/core/services/versioning/orm_test.go new file mode 100644 index 00000000..19f3fd05 --- /dev/null +++ b/core/services/versioning/orm_test.go @@ -0,0 +1,115 @@ +package versioning + +import ( + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +func TestORM_NodeVersion_UpsertNodeVersion(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := NewORM(db, logger.TestLogger(t), pg.DefaultQueryTimeout) + + err := orm.UpsertNodeVersion(NewNodeVersion("9.9.8")) + require.NoError(t, err) + + ver, err := orm.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, "9.9.8", ver.Version) + require.NotZero(t, ver.CreatedAt) + + // Testing Upsert + require.NoError(t, orm.UpsertNodeVersion(NewNodeVersion("9.9.8"))) + + err = orm.UpsertNodeVersion(NewNodeVersion("9.9.7")) + require.Error(t, err) + assert.Contains(t, err.Error(), "Application version (9.9.7) is lower than database version (9.9.8). Only Plugin 9.9.8 or higher can be run on this database") + + require.NoError(t, orm.UpsertNodeVersion(NewNodeVersion("9.9.9"))) + + var count int + err = db.QueryRowx(`SELECT count(*) FROM node_versions`).Scan(&count) + require.NoError(t, err) + assert.Equal(t, 1, count) + + ver, err = orm.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, "9.9.9", ver.Version) + + // invalid semver returns error + err = orm.UpsertNodeVersion(NewNodeVersion("random_12345")) + require.Error(t, err) + assert.Contains(t, err.Error(), "\"random_12345\" is not valid semver: Invalid Semantic Version") + + ver, err = orm.FindLatestNodeVersion() + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, "9.9.9", ver.Version) +} + +func Test_Version_CheckVersion(t *testing.T) { + db := pgtest.NewSqlxDB(t) + + lggr := logger.TestLogger(t) + + orm := NewORM(db, lggr, pg.DefaultQueryTimeout) + + err := orm.UpsertNodeVersion(NewNodeVersion("9.9.8")) + require.NoError(t, err) + + // invalid app version semver returns error + _, _, err = CheckVersion(db, lggr, static.Unset) + require.Error(t, err) + assert.Contains(t, err.Error(), `Application version "unset" is not valid semver`) + _, _, err = CheckVersion(db, lggr, "some old bollocks") + require.Error(t, err) + assert.Contains(t, err.Error(), `Application version "some old bollocks" is not valid semver`) + + // lower version returns error + _, _, err = CheckVersion(db, lggr, "9.9.7") + require.Error(t, err) + assert.Contains(t, err.Error(), "Application version (9.9.7) is lower than database version (9.9.8). Only Plugin 9.9.8 or higher can be run on this database") + + // equal version is ok + var appv, dbv *semver.Version + appv, dbv, err = CheckVersion(db, lggr, "9.9.8") + require.NoError(t, err) + assert.Equal(t, "9.9.8", appv.String()) + assert.Equal(t, "9.9.8", dbv.String()) + + // greater version is ok + appv, dbv, err = CheckVersion(db, lggr, "9.9.9") + require.NoError(t, err) + assert.Equal(t, "9.9.9", appv.String()) + assert.Equal(t, "9.9.8", dbv.String()) +} + +func TestORM_NodeVersion_FindLatestNodeVersion(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := NewORM(db, logger.TestLogger(t), pg.DefaultQueryTimeout) + + // Not Found + _, err := orm.FindLatestNodeVersion() + require.Error(t, err) + + err = orm.UpsertNodeVersion(NewNodeVersion("9.9.8")) + require.NoError(t, err) + + ver, err := orm.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, "9.9.8", ver.Version) + require.NotZero(t, ver.CreatedAt) +} diff --git a/core/services/vrf/delegate.go b/core/services/vrf/delegate.go new file mode 100644 index 00000000..f41ccdf9 --- /dev/null +++ b/core/services/vrf/delegate.go @@ -0,0 +1,311 @@ +package vrf + +import ( + "fmt" + "time" + + "github.com/avast/retry-go/v4" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/theodesp/go-heaps/pairing" + "go.uber.org/multierr" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + v1 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v1" + v2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +type Delegate struct { + q pg.Q + pr pipeline.Runner + porm pipeline.ORM + ks keystore.Master + legacyChains legacyevm.LegacyChainContainer + lggr logger.Logger + mailMon *mailbox.Monitor +} + +func NewDelegate( + db *sqlx.DB, + ks keystore.Master, + pr pipeline.Runner, + porm pipeline.ORM, + legacyChains legacyevm.LegacyChainContainer, + lggr logger.Logger, + cfg pg.QConfig, + mailMon *mailbox.Monitor) *Delegate { + return &Delegate{ + q: pg.NewQ(db, lggr, cfg), + ks: ks, + pr: pr, + porm: porm, + legacyChains: legacyChains, + lggr: lggr.Named("VRF"), + mailMon: mailMon, + } +} + +func (d *Delegate) JobType() job.Type { + return job.VRF +} + +func (d *Delegate) BeforeJobCreated(job.Job) {} +func (d *Delegate) AfterJobCreated(job.Job) {} +func (d *Delegate) BeforeJobDeleted(job.Job) {} +func (d *Delegate) OnDeleteJob(job.Job, pg.Queryer) error { return nil } + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(jb job.Job) ([]job.ServiceCtx, error) { + if jb.VRFSpec == nil || jb.PipelineSpec == nil { + return nil, errors.Errorf("vrf.Delegate expects a VRFSpec and PipelineSpec to be present, got %+v", jb) + } + pl, err := jb.PipelineSpec.ParsePipeline() + if err != nil { + return nil, err + } + chain, err := d.legacyChains.Get(jb.VRFSpec.EVMChainID.String()) + if err != nil { + return nil, err + } + coordinator, err := solidity_vrf_coordinator_interface.NewVRFCoordinator(jb.VRFSpec.CoordinatorAddress.Address(), chain.Client()) + if err != nil { + return nil, err + } + coordinatorV2, err := vrf_coordinator_v2.NewVRFCoordinatorV2(jb.VRFSpec.CoordinatorAddress.Address(), chain.Client()) + if err != nil { + return nil, err + } + coordinatorV2Plus, err := vrf_coordinator_v2_5.NewVRFCoordinatorV25(jb.VRFSpec.CoordinatorAddress.Address(), chain.Client()) + if err != nil { + return nil, err + } + + // If the batch coordinator address is not provided, we will fall back to non-batched + var batchCoordinatorV2 *batch_vrf_coordinator_v2.BatchVRFCoordinatorV2 + if jb.VRFSpec.BatchCoordinatorAddress != nil { + batchCoordinatorV2, err = batch_vrf_coordinator_v2.NewBatchVRFCoordinatorV2( + jb.VRFSpec.BatchCoordinatorAddress.Address(), chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "create batch coordinator wrapper") + } + } + + var vrfOwner *vrf_owner.VRFOwner + if jb.VRFSpec.VRFOwnerAddress != nil { + vrfOwner, err = vrf_owner.NewVRFOwner( + jb.VRFSpec.VRFOwnerAddress.Address(), chain.Client(), + ) + if err != nil { + return nil, errors.Wrap(err, "create vrf owner wrapper") + } + } + + l := d.lggr.Named(jb.ExternalJobID.String()).With( + "jobID", jb.ID, + "externalJobID", jb.ExternalJobID, + "coordinatorAddress", jb.VRFSpec.CoordinatorAddress, + ) + lV1 := l.Named("VRFListener") + lV2 := l.Named("VRFListenerV2") + lV2Plus := l.Named("VRFListenerV2Plus") + + for _, task := range pl.Tasks { + if _, ok := task.(*pipeline.VRFTaskV2Plus); ok { + if err2 := CheckFromAddressesExist(jb, d.ks.Eth()); err != nil { + return nil, err2 + } + + if !FromAddressMaxGasPricesAllEqual(jb, chain.Config().EVM().GasEstimator().PriceMaxKey) { + return nil, errors.New("key-specific max gas prices of all fromAddresses are not equal, please set them to equal values") + } + + if err2 := CheckFromAddressMaxGasPrices(jb, chain.Config().EVM().GasEstimator().PriceMaxKey); err != nil { + return nil, err2 + } + if vrfOwner != nil { + return nil, errors.New("VRF Owner is not supported for VRF V2 Plus") + } + if jb.VRFSpec.CustomRevertsPipelineEnabled { + return nil, errors.New("Custom Reverted Txns Pipeline is not supported for VRF V2 Plus") + } + + // Get the PLINATIVEFEED address with retries + // This is needed because the RPC endpoint may be down so we need to + // switch over to another one. + var linkNativeFeedAddress common.Address + err = retry.Do(func() error { + linkNativeFeedAddress, err = coordinatorV2Plus.PLINATIVEFEED(nil) + return err + }, retry.Attempts(10), retry.Delay(500*time.Millisecond)) + if err != nil { + return nil, errors.Wrap(err, "can't call PLINATIVEFEED") + } + + aggregator, err2 := aggregator_v3_interface.NewAggregatorV3Interface(linkNativeFeedAddress, chain.Client()) + if err2 != nil { + return nil, errors.Wrap(err2, "NewAggregatorV3Interface") + } + + return []job.ServiceCtx{ + v2.New( + chain.Config().EVM(), + chain.Config().EVM().GasEstimator(), + lV2Plus, + chain, + chain.ID(), + d.q, + v2.NewCoordinatorV2_5(coordinatorV2Plus), + batchCoordinatorV2, + vrfOwner, + aggregator, + d.pr, + d.ks.Eth(), + jb, + func() {}, + // the lookback in the deduper must be >= the lookback specified for the log poller + // otherwise we will end up re-delivering logs that were already delivered. + vrfcommon.NewInflightCache(int(chain.Config().EVM().FinalityDepth())), + vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())), + ), + }, nil + } + if _, ok := task.(*pipeline.VRFTaskV2); ok { + if err2 := CheckFromAddressesExist(jb, d.ks.Eth()); err != nil { + return nil, err2 + } + + if !FromAddressMaxGasPricesAllEqual(jb, chain.Config().EVM().GasEstimator().PriceMaxKey) { + return nil, errors.New("key-specific max gas prices of all fromAddresses are not equal, please set them to equal values") + } + + if err2 := CheckFromAddressMaxGasPrices(jb, chain.Config().EVM().GasEstimator().PriceMaxKey); err != nil { + return nil, err2 + } + + // Get the PLIETHFEED address with retries + // This is needed because the RPC endpoint may be down so we need to + // switch over to another one. + var linkEthFeedAddress common.Address + err = retry.Do(func() error { + linkEthFeedAddress, err = coordinatorV2.PLIETHFEED(nil) + return err + }, retry.Attempts(10), retry.Delay(500*time.Millisecond)) + if err != nil { + return nil, errors.Wrap(err, "PLIETHFEED") + } + aggregator, err := aggregator_v3_interface.NewAggregatorV3Interface(linkEthFeedAddress, chain.Client()) + if err != nil { + return nil, errors.Wrap(err, "NewAggregatorV3Interface") + } + if vrfOwner == nil { + lV2.Infow("Running without VRFOwnerAddress set on the spec") + } + + return []job.ServiceCtx{v2.New( + chain.Config().EVM(), + chain.Config().EVM().GasEstimator(), + lV2, + chain, + chain.ID(), + d.q, + v2.NewCoordinatorV2(coordinatorV2), + batchCoordinatorV2, + vrfOwner, + aggregator, + d.pr, + d.ks.Eth(), + jb, + func() {}, + // the lookback in the deduper must be >= the lookback specified for the log poller + // otherwise we will end up re-delivering logs that were already delivered. + vrfcommon.NewInflightCache(int(chain.Config().EVM().FinalityDepth())), + vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())), + ), + }, nil + } + if _, ok := task.(*pipeline.VRFTask); ok { + return []job.ServiceCtx{&v1.Listener{ + Cfg: chain.Config().EVM(), + FeeCfg: chain.Config().EVM().GasEstimator(), + L: logger.Sugared(lV1), + Q: d.q, + Coordinator: coordinator, + PipelineRunner: d.pr, + GethKs: d.ks.Eth(), + Job: jb, + MailMon: d.mailMon, + // Note the mailbox size effectively sets a limit on how many logs we can replay + // in the event of a VRF outage. + ReqLogs: mailbox.NewHighCapacity[log.Broadcast](), + ChStop: make(chan struct{}), + WaitOnStop: make(chan struct{}), + NewHead: make(chan struct{}, 1), + BlockNumberToReqID: pairing.New(), + ReqAdded: func() {}, + Deduper: vrfcommon.NewLogDeduper(int(chain.Config().EVM().FinalityDepth())), + Chain: chain, + }}, nil + } + } + return nil, errors.New("invalid job spec expected a vrf task") +} + +// CheckFromAddressesExist returns an error if and only if one of the addresses +// in the VRF spec's fromAddresses field does not exist in the keystore. +func CheckFromAddressesExist(jb job.Job, gethks keystore.Eth) (err error) { + for _, a := range jb.VRFSpec.FromAddresses { + _, err2 := gethks.Get(a.Hex()) + err = multierr.Append(err, err2) + } + return +} + +// CheckFromAddressMaxGasPrices checks if the provided gas price in the job spec gas lane parameter +// matches what is set for the provided from addresses. +// If they don't match, this is a configuration error. An error is returned with all the keys that do +// not match the provided gas lane price. +func CheckFromAddressMaxGasPrices(jb job.Job, keySpecificMaxGas keySpecificMaxGasFn) (err error) { + if jb.VRFSpec.GasLanePrice != nil { + for _, a := range jb.VRFSpec.FromAddresses { + if keySpecific := keySpecificMaxGas(a.Address()); !keySpecific.Equal(jb.VRFSpec.GasLanePrice) { + err = multierr.Append(err, + fmt.Errorf( + "key-specific max gas price of from address %s (%s) does not match gasLanePriceGWei (%s) specified in job spec", + a.Hex(), keySpecific.String(), jb.VRFSpec.GasLanePrice.String())) + } + } + } + return +} + +type keySpecificMaxGasFn func(common.Address) *assets.Wei + +// FromAddressMaxGasPricesAllEqual returns true if and only if all the specified from +// addresses in the fromAddresses field of the VRF v2 job have the same key-specific max +// gas price. +func FromAddressMaxGasPricesAllEqual(jb job.Job, keySpecificMaxGasPriceWei keySpecificMaxGasFn) (allEqual bool) { + allEqual = true + for i := range jb.VRFSpec.FromAddresses { + allEqual = allEqual && keySpecificMaxGasPriceWei(jb.VRFSpec.FromAddresses[i].Address()).Equal( + keySpecificMaxGasPriceWei(jb.VRFSpec.FromAddresses[0].Address()), + ) + } + return +} diff --git a/core/services/vrf/delegate_test.go b/core/services/vrf/delegate_test.go new file mode 100644 index 00000000..0d1f721f --- /dev/null +++ b/core/services/vrf/delegate_test.go @@ -0,0 +1,707 @@ +package vrf_test + +import ( + "bytes" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/services/servicetest" + "github.com/goplugin/plugin-common/pkg/utils/mailbox/mailboxtest" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker" + httypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/headtracker/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + log_mocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf" + vrf_mocks "github.com/goplugin/pluginv3.0/v2/core/services/vrf/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/solidity_cross_tests" + v1 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type vrfUniverse struct { + jrm job.ORM + pr pipeline.Runner + prm pipeline.ORM + lb *log_mocks.Broadcaster + ec *evmclimocks.Client + ks keystore.Master + vrfkey vrfkey.KeyV2 + submitter common.Address + txm *txmgr.TxManager + hb httypes.HeadBroadcaster + legacyChains legacyevm.LegacyChainContainer + cid big.Int +} + +func buildVrfUni(t *testing.T, db *sqlx.DB, cfg plugin.GeneralConfig) vrfUniverse { + // Mock all chain interactions + lb := log_mocks.NewBroadcaster(t) + lb.On("AddDependents", 1).Maybe() + lb.On("Register", mock.Anything, mock.Anything).Return(func() {}).Maybe() + ec := evmclimocks.NewClient(t) + ec.On("ConfiguredChainID").Return(testutils.FixtureChainID) + ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(51), nil).Maybe() + lggr := logger.TestLogger(t) + hb := headtracker.NewHeadBroadcaster(lggr) + + // Don't mock db interactions + prm := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + btORM := bridges.NewORM(db, lggr, cfg.Database()) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) + _, dbConfig, evmConfig := txmgr.MakeTestConfigs(t) + txm, err := txmgr.NewTxm(db, evmConfig, evmConfig.GasEstimator(), evmConfig.Transactions(), dbConfig, dbConfig.Listener(), ec, logger.TestLogger(t), nil, ks.Eth(), nil) + orm := headtracker.NewORM(db, lggr, cfg.Database(), *testutils.FixtureChainID) + require.NoError(t, orm.IdempotentInsertHead(testutils.Context(t), cltest.Head(51))) + jrm := job.NewORM(db, prm, btORM, ks, lggr, cfg.Database()) + t.Cleanup(func() { assert.NoError(t, jrm.Close()) }) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{LogBroadcaster: lb, KeyStore: ks.Eth(), Client: ec, DB: db, GeneralConfig: cfg, TxManager: txm}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + pr := pipeline.NewRunner(prm, btORM, cfg.JobPipeline(), cfg.WebServer(), legacyChains, ks.Eth(), ks.VRF(), lggr, nil, nil) + require.NoError(t, ks.Unlock(testutils.Password)) + k, err2 := ks.Eth().Create(testutils.FixtureChainID) + require.NoError(t, err2) + submitter := k.Address + require.NoError(t, err) + vrfkey, err3 := ks.VRF().Create() + require.NoError(t, err3) + + return vrfUniverse{ + jrm: jrm, + pr: pr, + prm: prm, + lb: lb, + ec: ec, + ks: ks, + vrfkey: vrfkey, + submitter: submitter, + txm: &txm, + hb: hb, + legacyChains: legacyChains, + cid: *ec.ConfiguredChainID(), + } +} + +func generateCallbackReturnValues(t *testing.T, fulfilled bool) []byte { + callback, err := abi.NewType("tuple", "", []abi.ArgumentMarshaling{ + {Name: "callback_contract", Type: "address"}, + {Name: "randomness_fee", Type: "int256"}, + {Name: "seed_and_block_num", Type: "bytes32"}}) + require.NoError(t, err) + var args abi.Arguments = []abi.Argument{{Type: callback}} + if fulfilled { + // Empty callback + b, err2 := args.Pack(solidity_vrf_coordinator_interface.Callbacks{ + RandomnessFee: big.NewInt(10), + SeedAndBlockNum: evmutils.EmptyHash, + }) + require.NoError(t, err2) + return b + } + b, err := args.Pack(solidity_vrf_coordinator_interface.Callbacks{ + RandomnessFee: big.NewInt(10), + SeedAndBlockNum: evmutils.NewHash(), + }) + require.NoError(t, err) + return b +} + +func waitForChannel(t *testing.T, c chan struct{}, timeout time.Duration, errMsg string) { + select { + case <-c: + case <-time.After(timeout): + t.Error(errMsg) + } +} + +func setup(t *testing.T) (vrfUniverse, *v1.Listener, job.Job) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + vuni := buildVrfUni(t, db, cfg) + + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + vd := vrf.NewDelegate( + db, + vuni.ks, + vuni.pr, + vuni.prm, + vuni.legacyChains, + logger.TestLogger(t), + cfg.Database(), + mailMon) + vs := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{PublicKey: vuni.vrfkey.PublicKey.String(), EVMChainID: testutils.FixtureChainID.String()}) + jb, err := vrfcommon.ValidatedVRFSpec(vs.Toml()) + require.NoError(t, err) + err = vuni.jrm.CreateJob(&jb) + require.NoError(t, err) + vl, err := vd.ServicesForSpec(jb) + require.NoError(t, err) + require.Len(t, vl, 1) + listener := vl[0].(*v1.Listener) + // Start the listenerV1 + go func() { + listener.RunLogListener([]func(){}, 6) + }() + go func() { + listener.RunHeadListener(func() {}) + }() + servicetest.Run(t, listener) + return vuni, listener, jb +} + +func TestDelegate_ReorgAttackProtection(t *testing.T) { + vuni, listener, jb := setup(t) + + // Same request has already been fulfilled twice + reqID := evmutils.NewHash() + var reqIDBytes [32]byte + copy(reqIDBytes[:], reqID.Bytes()) + listener.SetRespCount(reqIDBytes, 2) + + // Send in the same request again + pk, err := secp256k1.NewPublicKeyFromHex(vuni.vrfkey.PublicKey.String()) + require.NoError(t, err) + added := make(chan struct{}) + listener.SetReqAdded(func() { + added <- struct{}{} + }) + preSeed := common.BigToHash(big.NewInt(42)).Bytes() + txHash := evmutils.NewHash() + vuni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil).Maybe() + vuni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Return(nil).Maybe() + vuni.ec.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(generateCallbackReturnValues(t, false), nil).Maybe() + listener.HandleLog(log.NewLogBroadcast(types.Log{ + // Data has all the NON-indexed parameters + Data: bytes.Join([][]byte{pk.MustHash().Bytes(), // key hash + preSeed, // preSeed + evmutils.NewHash().Bytes(), // sender + evmutils.NewHash().Bytes(), // fee + reqID.Bytes()}, []byte{}, // requestID + ), + // JobID is indexed, thats why it lives in the Topics. + Topics: []common.Hash{ + solidity_cross_tests.VRFRandomnessRequestLogTopic(), + jb.ExternalIDEncodeStringToTopic(), // jobID + }, + BlockNumber: 10, + TxHash: txHash, + }, vuni.cid, nil)) + + // Wait until the log is present + waitForChannel(t, added, time.Second, "request not added to the queue") + reqs := listener.ReqsConfirmedAt() + if assert.Equal(t, 1, len(reqs)) { + // It should be confirmed at 10+6*(2^2) + assert.Equal(t, uint64(34), reqs[0]) + } +} + +func TestDelegate_ValidLog(t *testing.T) { + vuni, listener, jb := setup(t) + txHash := evmutils.NewHash() + reqID1 := evmutils.NewHash() + reqID2 := evmutils.NewHash() + keyID := vuni.vrfkey.PublicKey.String() + pk, err := secp256k1.NewPublicKeyFromHex(keyID) + require.NoError(t, err) + added := make(chan struct{}) + listener.SetReqAdded(func() { + added <- struct{}{} + }) + preSeed := common.BigToHash(big.NewInt(42)).Bytes() + bh := evmutils.NewHash() + var tt = []struct { + reqID [32]byte + log types.Log + }{ + { + reqID: reqID1, + log: types.Log{ + // Data has all the NON-indexed parameters + Data: bytes.Join([][]byte{ + pk.MustHash().Bytes(), // key hash + common.BigToHash(big.NewInt(42)).Bytes(), // seed + evmutils.NewHash().Bytes(), // sender + evmutils.NewHash().Bytes(), // fee + reqID1.Bytes()}, // requestID + []byte{}), + // JobID is indexed, thats why it lives in the Topics. + Topics: []common.Hash{ + solidity_cross_tests.VRFRandomnessRequestLogTopic(), + jb.ExternalIDEncodeStringToTopic(), // jobID STRING + }, + TxHash: txHash, + BlockNumber: 10, + BlockHash: bh, + Index: 1, + }, + }, + { + + reqID: reqID2, + log: types.Log{ + Data: bytes.Join([][]byte{ + pk.MustHash().Bytes(), // key hash + common.BigToHash(big.NewInt(42)).Bytes(), // seed + evmutils.NewHash().Bytes(), // sender + evmutils.NewHash().Bytes(), // fee + reqID2.Bytes()}, // requestID + []byte{}), + Topics: []common.Hash{ + solidity_cross_tests.VRFRandomnessRequestLogTopic(), + jb.ExternalIDEncodeBytesToTopic(), // jobID BYTES + }, + TxHash: txHash, + BlockNumber: 10, + BlockHash: bh, + Index: 2, + }, + }, + } + + runComplete := make(chan struct{}) + vuni.pr.OnRunFinished(func(run *pipeline.Run) { + if run.State == pipeline.RunStatusCompleted { + runComplete <- struct{}{} + } + }) + + consumed := make(chan struct{}) + for i, tc := range tt { + tc := tc + vuni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + vuni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + consumed <- struct{}{} + }).Return(nil).Once() + // Expect a call to check if the req is already fulfilled. + vuni.ec.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(generateCallbackReturnValues(t, false), nil) + + listener.HandleLog(log.NewLogBroadcast(tc.log, vuni.cid, nil)) + // Wait until the log is present + waitForChannel(t, added, time.Second, "request not added to the queue") + // Feed it a head which confirms it. + listener.OnNewLongestChain(testutils.Context(t), &evmtypes.Head{Number: 16}) + waitForChannel(t, consumed, 2*time.Second, "did not mark consumed") + + // Ensure we created a successful run. + waitForChannel(t, runComplete, 2*time.Second, "pipeline not complete") + runs, err := vuni.prm.GetAllRuns() + require.NoError(t, err) + require.Equal(t, i+1, len(runs)) + assert.False(t, runs[0].FatalErrors.HasError()) + // Should have 4 tasks all completed + assert.Len(t, runs[0].PipelineTaskRuns, 4) + + p, err := vuni.ks.VRF().GenerateProof(keyID, evmutils.MustHash(string(bytes.Join([][]byte{preSeed, bh.Bytes()}, []byte{}))).Big()) + require.NoError(t, err) + vuni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + vuni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + consumed <- struct{}{} + }).Return(nil).Once() + // If we send a completed log we should the respCount increase + var reqIDBytes []byte + copy(reqIDBytes[:], tc.reqID[:]) + listener.HandleLog(log.NewLogBroadcast(types.Log{ + // Data has all the NON-indexed parameters + Data: bytes.Join([][]byte{reqIDBytes, // output + p.Output.Bytes(), + }, []byte{}, + ), + BlockNumber: 10, + TxHash: txHash, + Index: uint(i), + }, vuni.cid, &solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{RequestId: tc.reqID})) + waitForChannel(t, consumed, 2*time.Second, "fulfillment log not marked consumed") + // Should record that we've responded to this request + assert.Equal(t, uint64(1), listener.RespCount(tc.reqID)) + } +} + +func TestDelegate_InvalidLog(t *testing.T) { + vuni, listener, jb := setup(t) + vuni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + done := make(chan struct{}) + vuni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + done <- struct{}{} + }).Return(nil).Once() + // Expect a call to check if the req is already fulfilled. + vuni.ec.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(generateCallbackReturnValues(t, false), nil) + + added := make(chan struct{}) + listener.SetReqAdded(func() { + added <- struct{}{} + }) + // Send an invalid log (keyhash doesnt match) + listener.HandleLog(log.NewLogBroadcast(types.Log{ + // Data has all the NON-indexed parameters + Data: append(append(append(append( + evmutils.NewHash().Bytes(), // key hash + common.BigToHash(big.NewInt(42)).Bytes()...), // seed + evmutils.NewHash().Bytes()...), // sender + evmutils.NewHash().Bytes()...), // fee + evmutils.NewHash().Bytes()...), // requestID + // JobID is indexed, that's why it lives in the Topics. + Topics: []common.Hash{ + solidity_cross_tests.VRFRandomnessRequestLogTopic(), + jb.ExternalIDEncodeBytesToTopic(), // jobID + }, + Address: common.Address{}, + BlockNumber: 10, + TxHash: common.Hash{}, + TxIndex: 0, + BlockHash: common.Hash{}, + Index: 0, + Removed: false, + }, vuni.cid, nil)) + waitForChannel(t, added, time.Second, "request not queued") + // Feed it a head which confirms it. + listener.OnNewLongestChain(testutils.Context(t), &evmtypes.Head{Number: 16}) + waitForChannel(t, done, time.Second, "log not consumed") + + // Should create a run that errors in the vrf task + runs, err := vuni.prm.GetAllRuns() + require.NoError(t, err) + require.Equal(t, len(runs), 1) + for _, tr := range runs[0].PipelineTaskRuns { + if tr.Type == pipeline.TaskTypeVRF { + assert.Contains(t, tr.Error.String, "invalid key hash") + } + // Log parsing task itself should succeed. + if tr.Type != pipeline.TaskTypeETHABIDecodeLog { + assert.False(t, tr.Output.Valid) + } + } + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + + txes, err := txStore.GetAllTxes(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, txes, 0) +} + +func TestFulfilledCheck(t *testing.T) { + vuni, listener, jb := setup(t) + vuni.lb.On("WasAlreadyConsumed", mock.Anything, mock.Anything).Return(false, nil) + done := make(chan struct{}) + vuni.lb.On("MarkConsumed", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + done <- struct{}{} + }).Return(nil).Once() + // Expect a call to check if the req is already fulfilled. + // We return already fulfilled + vuni.ec.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(generateCallbackReturnValues(t, true), nil) + + added := make(chan struct{}) + listener.SetReqAdded(func() { + added <- struct{}{} + }) + // Send an invalid log (keyhash doesn't match) + listener.HandleLog(log.NewLogBroadcast( + types.Log{ + // Data has all the NON-indexed parameters + Data: bytes.Join([][]byte{ + vuni.vrfkey.PublicKey.MustHash().Bytes(), // key hash + common.BigToHash(big.NewInt(42)).Bytes(), // seed + evmutils.NewHash().Bytes(), // sender + evmutils.NewHash().Bytes(), // fee + evmutils.NewHash().Bytes()}, // requestID + []byte{}), + // JobID is indexed, that's why it lives in the Topics. + Topics: []common.Hash{ + solidity_cross_tests.VRFRandomnessRequestLogTopic(), + jb.ExternalIDEncodeBytesToTopic(), // jobID STRING + }, + //TxHash: evmutils.NewHash().Bytes(), + BlockNumber: 10, + //BlockHash: evmutils.NewHash().Bytes(), + }, vuni.cid, nil)) + + // Should queue the request, even though its already fulfilled + waitForChannel(t, added, time.Second, "request not queued") + listener.OnNewLongestChain(testutils.Context(t), &evmtypes.Head{Number: 16}) + waitForChannel(t, done, time.Second, "log not consumed") + + // Should consume the log with no run + runs, err := vuni.prm.GetAllRuns() + require.NoError(t, err) + require.Equal(t, len(runs), 0) +} + +func Test_CheckFromAddressMaxGasPrices(t *testing.T) { + t.Run("returns nil error if gasLanePrice not set in job spec", func(tt *testing.T) { + spec := ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +` + jb, err := vrfcommon.ValidatedVRFSpec(spec) + require.NoError(tt, err) + + cfg := vrf_mocks.NewFeeConfig(t) + require.NoError(tt, vrf.CheckFromAddressMaxGasPrices(jb, cfg.PriceMaxKey)) + }) + + t.Run("returns nil error on valid gas lane <=> key specific gas price setting", func(tt *testing.T) { + var fromAddresses []string + for i := 0; i < 3; i++ { + fromAddresses = append(fromAddresses, testutils.NewAddress().Hex()) + } + + cfg := vrf_mocks.NewFeeConfig(t) + for _, a := range fromAddresses { + cfg.On("PriceMaxKey", common.HexToAddress(a)).Return(assets.GWei(100)).Once() + } + defer cfg.AssertExpectations(tt) + + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }). + Toml()) + require.NoError(t, err) + + require.NoError(tt, vrf.CheckFromAddressMaxGasPrices(jb, cfg.PriceMaxKey)) + }) + + t.Run("returns error on invalid setting", func(tt *testing.T) { + var fromAddresses []string + for i := 0; i < 3; i++ { + fromAddresses = append(fromAddresses, testutils.NewAddress().Hex()) + } + + cfg := vrf_mocks.NewFeeConfig(t) + cfg.On("PriceMaxKey", common.HexToAddress(fromAddresses[0])).Return(assets.GWei(100)).Once() + cfg.On("PriceMaxKey", common.HexToAddress(fromAddresses[1])).Return(assets.GWei(100)).Once() + // last from address has wrong key-specific max gas price + cfg.On("PriceMaxKey", common.HexToAddress(fromAddresses[2])).Return(assets.GWei(50)).Once() + defer cfg.AssertExpectations(tt) + + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }). + Toml()) + require.NoError(t, err) + + require.Error(tt, vrf.CheckFromAddressMaxGasPrices(jb, cfg.PriceMaxKey)) + }) +} + +func Test_CheckFromAddressesExist(t *testing.T) { + t.Run("from addresses exist", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + lggr := logger.TestLogger(t) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) + require.NoError(t, ks.Unlock(testutils.Password)) + + var fromAddresses []string + for i := 0; i < 3; i++ { + k, err := ks.Eth().Create(big.NewInt(1337)) + assert.NoError(t, err) + fromAddresses = append(fromAddresses, k.Address.Hex()) + } + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }). + Toml()) + assert.NoError(t, err) + + assert.NoError(t, vrf.CheckFromAddressesExist(jb, ks.Eth())) + }) + + t.Run("one of from addresses doesn't exist", func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + lggr := logger.TestLogger(t) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) + require.NoError(t, ks.Unlock(testutils.Password)) + + var fromAddresses []string + for i := 0; i < 3; i++ { + k, err := ks.Eth().Create(big.NewInt(1337)) + assert.NoError(t, err) + fromAddresses = append(fromAddresses, k.Address.Hex()) + } + // add an address that isn't in the keystore + fromAddresses = append(fromAddresses, testutils.NewAddress().Hex()) + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec( + testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }). + Toml()) + assert.NoError(t, err) + + assert.Error(t, vrf.CheckFromAddressesExist(jb, ks.Eth())) + }) +} + +func Test_FromAddressMaxGasPricesAllEqual(t *testing.T) { + t.Run("all max gas prices equal", func(tt *testing.T) { + fromAddresses := []string{ + "0x498C2Dce1d3aEDE31A8c808c511C38a809e67684", + "0x253b01b9CaAfbB9dC138d7D8c3ACBCDd47144b4B", + "0xD94E6AD557277c6E3e163cefF90F52AB51A95143", + } + + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }).Toml()) + require.NoError(tt, err) + + cfg := vrf_mocks.NewFeeConfig(t) + for _, a := range fromAddresses { + cfg.On("PriceMaxKey", common.HexToAddress(a)).Return(assets.GWei(100)) + } + defer cfg.AssertExpectations(tt) + + assert.True(tt, vrf.FromAddressMaxGasPricesAllEqual(jb, cfg.PriceMaxKey)) + }) + + t.Run("one max gas price not equal to others", func(tt *testing.T) { + fromAddresses := []string{ + "0x498C2Dce1d3aEDE31A8c808c511C38a809e67684", + "0x253b01b9CaAfbB9dC138d7D8c3ACBCDd47144b4B", + "0xD94E6AD557277c6E3e163cefF90F52AB51A95143", + "0x86E7c45Bf013Bf1Df3C22c14d5fd6fc3051AC569", + } + + jb, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + FromAddresses: fromAddresses, + ChunkSize: 25, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(100), + }).Toml()) + require.NoError(tt, err) + + cfg := vrf_mocks.NewFeeConfig(t) + for _, a := range fromAddresses[:3] { + cfg.On("PriceMaxKey", common.HexToAddress(a)).Return(assets.GWei(100)) + } + cfg.On("PriceMaxKey", common.HexToAddress(fromAddresses[len(fromAddresses)-1])). + Return(assets.GWei(200)) // doesn't match the rest + defer cfg.AssertExpectations(tt) + + assert.False(tt, vrf.FromAddressMaxGasPricesAllEqual(jb, cfg.PriceMaxKey)) + }) +} + +func Test_VRFV2PlusServiceFailsWhenVRFOwnerProvided(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewTestGeneralConfig(t) + vuni := buildVrfUni(t, db, cfg) + + mailMon := servicetest.Run(t, mailboxtest.NewMonitor(t)) + + vd := vrf.NewDelegate( + db, + vuni.ks, + vuni.pr, + vuni.prm, + vuni.legacyChains, + logger.TestLogger(t), + cfg.Database(), + mailMon) + chain, err := vuni.legacyChains.Get(testutils.FixtureChainID.String()) + require.NoError(t, err) + vs := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + VRFVersion: vrfcommon.V2Plus, + PublicKey: vuni.vrfkey.PublicKey.String(), + FromAddresses: []string{vuni.submitter.Hex()}, + GasLanePrice: chain.Config().EVM().GasEstimator().PriceMax(), + }) + toml := "vrfOwnerAddress=\"0xF62fEFb54a0af9D32CDF0Db21C52710844c7eddb\"\n" + vs.Toml() + jb, err := vrfcommon.ValidatedVRFSpec(toml) + require.NoError(t, err) + err = vuni.jrm.CreateJob(&jb) + require.NoError(t, err) + _, err = vd.ServicesForSpec(jb) + require.Error(t, err) + require.Equal(t, "VRF Owner is not supported for VRF V2 Plus", err.Error()) +} diff --git a/core/services/vrf/doc.go b/core/services/vrf/doc.go new file mode 100644 index 00000000..89e1f011 --- /dev/null +++ b/core/services/vrf/doc.go @@ -0,0 +1,32 @@ +// Package vrf provides a cryptographically secure pseudo-random number generator. + +// Numbers are deterministically generated from seeds and a secret key, and are +// statistically indistinguishable from uniform sampling from {0,...,2**256-1}, +// to computationally-bounded observers who know the seeds, don't know the key, +// and only see the generated numbers. But each number also comes with a proof +// that it was generated according to the procedure mandated by a public key +// associated with that secret key. +// +// See VRF.sol for design notes. +// +// Usage +// ----- +// +// You should probably not be using this directly. +// plugin/store/core/models/vrfkey.PrivateKey provides a simple, more +// misuse-resistant interface to the same functionality, via the CreateKey and +// MarshaledProof methods. +// +// Nonetheless, a secret key sk should be securely sampled uniformly from +// {0,...,Order-1}. Its public key can be calculated from it by +// +// secp256k1.Secp256k1{}.Point().Mul(secretKey, Generator) +// +// To generate random output from a big.Int seed, pass sk and the seed to +// GenerateProof, and use the Output field of the returned Proof object. +// +// To verify a Proof object p, run p.Verify(); or to verify it on-chain pass +// p.MarshalForSolidityVerifier() to randomValueFromVRFProof on the VRF solidity +// contract. + +package vrf diff --git a/core/services/vrf/extraargs/types.go b/core/services/vrf/extraargs/types.go new file mode 100644 index 00000000..3db5cecc --- /dev/null +++ b/core/services/vrf/extraargs/types.go @@ -0,0 +1,34 @@ +package extraargs + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +const functionSignatureLength = 4 +const boolAbiType = `[{ "type": "bool" }]` + +var extraArgsV1Tag = crypto.Keccak256([]byte("VRF ExtraArgsV1"))[:4] + +func FromExtraArgsV1(extraArgs []byte) (nativePayment bool, err error) { + decodedBool, err := utils.ABIDecode(boolAbiType, extraArgs[functionSignatureLength:]) + if err != nil { + return false, fmt.Errorf("failed to decode 0x%x to bool", extraArgs[functionSignatureLength:]) + } + nativePayment, ok := decodedBool[0].(bool) + if !ok { + return false, fmt.Errorf("failed to decode 0x%x to bool", extraArgs[functionSignatureLength:]) + } + return nativePayment, nil +} + +func ExtraArgsV1(nativePayment bool) ([]byte, error) { + encodedArgs, err := utils.ABIEncode(boolAbiType, nativePayment) + if err != nil { + return nil, err + } + return append(extraArgsV1Tag, encodedArgs...), nil +} diff --git a/core/services/vrf/mocks/aggregator_v3_interface.go b/core/services/vrf/mocks/aggregator_v3_interface.go new file mode 100644 index 00000000..3699fe18 --- /dev/null +++ b/core/services/vrf/mocks/aggregator_v3_interface.go @@ -0,0 +1,196 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + aggregator_v3_interface "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// AggregatorV3Interface is an autogenerated mock type for the AggregatorV3InterfaceInterface type +type AggregatorV3Interface struct { + mock.Mock +} + +// Address provides a mock function with given fields: +func (_m *AggregatorV3Interface) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// Decimals provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Decimals(opts *bind.CallOpts) (uint8, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Decimals") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint8, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint8); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Description provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Description(opts *bind.CallOpts) (string, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Description") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (string, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) string); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRoundData provides a mock function with given fields: opts, _roundId +func (_m *AggregatorV3Interface) GetRoundData(opts *bind.CallOpts, _roundId *big.Int) (aggregator_v3_interface.GetRoundData, error) { + ret := _m.Called(opts, _roundId) + + if len(ret) == 0 { + panic("no return value specified for GetRoundData") + } + + var r0 aggregator_v3_interface.GetRoundData + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) (aggregator_v3_interface.GetRoundData, error)); ok { + return rf(opts, _roundId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) aggregator_v3_interface.GetRoundData); ok { + r0 = rf(opts, _roundId) + } else { + r0 = ret.Get(0).(aggregator_v3_interface.GetRoundData) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, _roundId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestRoundData provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) LatestRoundData(opts *bind.CallOpts) (aggregator_v3_interface.LatestRoundData, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for LatestRoundData") + } + + var r0 aggregator_v3_interface.LatestRoundData + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (aggregator_v3_interface.LatestRoundData, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) aggregator_v3_interface.LatestRoundData); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(aggregator_v3_interface.LatestRoundData) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Version provides a mock function with given fields: opts +func (_m *AggregatorV3Interface) Version(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Version") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAggregatorV3Interface creates a new instance of AggregatorV3Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAggregatorV3Interface(t interface { + mock.TestingT + Cleanup(func()) +}) *AggregatorV3Interface { + mock := &AggregatorV3Interface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/vrf/mocks/config.go b/core/services/vrf/mocks/config.go new file mode 100644 index 00000000..b46a28ec --- /dev/null +++ b/core/services/vrf/mocks/config.go @@ -0,0 +1,60 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Config is an autogenerated mock type for the Config type +type Config struct { + mock.Mock +} + +// FinalityDepth provides a mock function with given fields: +func (_m *Config) FinalityDepth() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalityDepth") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// MinIncomingConfirmations provides a mock function with given fields: +func (_m *Config) MinIncomingConfirmations() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinIncomingConfirmations") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// NewConfig creates a new instance of Config. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *Config { + mock := &Config{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/vrf/mocks/fee_config.go b/core/services/vrf/mocks/fee_config.go new file mode 100644 index 00000000..4fe92b7a --- /dev/null +++ b/core/services/vrf/mocks/fee_config.go @@ -0,0 +1,89 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + assets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + + config "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + + mock "github.com/stretchr/testify/mock" +) + +// FeeConfig is an autogenerated mock type for the FeeConfig type +type FeeConfig struct { + mock.Mock +} + +// LimitDefault provides a mock function with given fields: +func (_m *FeeConfig) LimitDefault() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitDefault") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// LimitJobType provides a mock function with given fields: +func (_m *FeeConfig) LimitJobType() config.LimitJobType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LimitJobType") + } + + var r0 config.LimitJobType + if rf, ok := ret.Get(0).(func() config.LimitJobType); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(config.LimitJobType) + } + } + + return r0 +} + +// PriceMaxKey provides a mock function with given fields: addr +func (_m *FeeConfig) PriceMaxKey(addr common.Address) *assets.Wei { + ret := _m.Called(addr) + + if len(ret) == 0 { + panic("no return value specified for PriceMaxKey") + } + + var r0 *assets.Wei + if rf, ok := ret.Get(0).(func(common.Address) *assets.Wei); ok { + r0 = rf(addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*assets.Wei) + } + } + + return r0 +} + +// NewFeeConfig creates a new instance of FeeConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFeeConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *FeeConfig { + mock := &FeeConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/vrf/mocks/vrf_coordinator_v2.go b/core/services/vrf/mocks/vrf_coordinator_v2.go new file mode 100644 index 00000000..040cbb5e --- /dev/null +++ b/core/services/vrf/mocks/vrf_coordinator_v2.go @@ -0,0 +1,2513 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + common "github.com/ethereum/go-ethereum/common" + + event "github.com/ethereum/go-ethereum/event" + + generated "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + vrf_coordinator_v2 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" +) + +// VRFCoordinatorV2Interface is an autogenerated mock type for the VRFCoordinatorV2Interface type +type VRFCoordinatorV2Interface struct { + mock.Mock +} + +// AcceptOwnership provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for AcceptOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AcceptSubscriptionOwnerTransfer provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) AcceptSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for AcceptSubscriptionOwnerTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64) (*types.Transaction, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64) *types.Transaction); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddConsumer provides a mock function with given fields: opts, subId, consumer +func (_m *VRFCoordinatorV2Interface) AddConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, consumer) + + if len(ret) == 0 { + panic("no return value specified for AddConsumer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, consumer) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, consumer) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64, common.Address) error); ok { + r1 = rf(opts, subId, consumer) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Address provides a mock function with given fields: +func (_m *VRFCoordinatorV2Interface) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// BLOCKHASHSTORE provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) BLOCKHASHSTORE(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for BLOCKHASHSTORE") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CancelSubscription provides a mock function with given fields: opts, subId, to +func (_m *VRFCoordinatorV2Interface) CancelSubscription(opts *bind.TransactOpts, subId uint64, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, to) + + if len(ret) == 0 { + panic("no return value specified for CancelSubscription") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64, common.Address) error); ok { + r1 = rf(opts, subId, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateSubscription provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for CreateSubscription") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) (*types.Transaction, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts) *types.Transaction); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeregisterProvingKey provides a mock function with given fields: opts, publicProvingKey +func (_m *VRFCoordinatorV2Interface) DeregisterProvingKey(opts *bind.TransactOpts, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, publicProvingKey) + + if len(ret) == 0 { + panic("no return value specified for DeregisterProvingKey") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [2]*big.Int) (*types.Transaction, error)); ok { + return rf(opts, publicProvingKey) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [2]*big.Int) *types.Transaction); ok { + r0 = rf(opts, publicProvingKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, [2]*big.Int) error); ok { + r1 = rf(opts, publicProvingKey) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterConfigSet provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) FilterConfigSet(opts *bind.FilterOpts) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSetIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterConfigSet") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ConfigSetIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSetIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator_v2.VRFCoordinatorV2ConfigSetIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ConfigSetIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterFundsRecovered provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) FilterFundsRecovered(opts *bind.FilterOpts) (*vrf_coordinator_v2.VRFCoordinatorV2FundsRecoveredIterator, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for FilterFundsRecovered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2FundsRecoveredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) (*vrf_coordinator_v2.VRFCoordinatorV2FundsRecoveredIterator, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts) *vrf_coordinator_v2.VRFCoordinatorV2FundsRecoveredIterator); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2FundsRecoveredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferRequested provides a mock function with given fields: opts, from, to +func (_m *VRFCoordinatorV2Interface) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequestedIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequestedIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequestedIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterOwnershipTransferred provides a mock function with given fields: opts, from, to +func (_m *VRFCoordinatorV2Interface) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferredIterator, error) { + ret := _m.Called(opts, from, to) + + if len(ret) == 0 { + panic("no return value specified for FilterOwnershipTransferred") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferredIterator, error)); ok { + return rf(opts, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address, []common.Address) *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferredIterator); ok { + r0 = rf(opts, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address, []common.Address) error); ok { + r1 = rf(opts, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterProvingKeyDeregistered provides a mock function with given fields: opts, oracle +func (_m *VRFCoordinatorV2Interface) FilterProvingKeyDeregistered(opts *bind.FilterOpts, oracle []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregisteredIterator, error) { + ret := _m.Called(opts, oracle) + + if len(ret) == 0 { + panic("no return value specified for FilterProvingKeyDeregistered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregisteredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregisteredIterator, error)); ok { + return rf(opts, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregisteredIterator); ok { + r0 = rf(opts, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregisteredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterProvingKeyRegistered provides a mock function with given fields: opts, oracle +func (_m *VRFCoordinatorV2Interface) FilterProvingKeyRegistered(opts *bind.FilterOpts, oracle []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegisteredIterator, error) { + ret := _m.Called(opts, oracle) + + if len(ret) == 0 { + panic("no return value specified for FilterProvingKeyRegistered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegisteredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegisteredIterator, error)); ok { + return rf(opts, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []common.Address) *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegisteredIterator); ok { + r0 = rf(opts, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegisteredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []common.Address) error); ok { + r1 = rf(opts, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomWordsFulfilled provides a mock function with given fields: opts, requestId +func (_m *VRFCoordinatorV2Interface) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestId []*big.Int) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator, error) { + ret := _m.Called(opts, requestId) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomWordsFulfilled") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator, error)); ok { + return rf(opts, requestId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []*big.Int) *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator); ok { + r0 = rf(opts, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []*big.Int) error); ok { + r1 = rf(opts, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterRandomWordsRequested provides a mock function with given fields: opts, keyHash, subId, sender +func (_m *VRFCoordinatorV2Interface) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subId []uint64, sender []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator, error) { + ret := _m.Called(opts, keyHash, subId, sender) + + if len(ret) == 0 { + panic("no return value specified for FilterRandomWordsRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, [][32]byte, []uint64, []common.Address) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator, error)); ok { + return rf(opts, keyHash, subId, sender) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, [][32]byte, []uint64, []common.Address) *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator); ok { + r0 = rf(opts, keyHash, subId, sender) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, [][32]byte, []uint64, []common.Address) error); ok { + r1 = rf(opts, keyHash, subId, sender) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionCanceled provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionCanceled(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceledIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionCanceled") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceledIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceledIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceledIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceledIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionConsumerAdded provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionConsumerAdded(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAddedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionConsumerAdded") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAddedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAddedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAddedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAddedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionConsumerRemoved provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionConsumerRemoved(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemovedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionConsumerRemoved") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemovedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemovedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemovedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemovedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionCreated provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionCreated(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionCreated") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionFunded provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionFunded(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFundedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionFunded") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFundedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFundedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFundedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFundedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionOwnerTransferRequested provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionOwnerTransferRequested(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionOwnerTransferRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequestedIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FilterSubscriptionOwnerTransferred provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) FilterSubscriptionOwnerTransferred(opts *bind.FilterOpts, subId []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferredIterator, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for FilterSubscriptionOwnerTransferred") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferredIterator + var r1 error + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferredIterator, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.FilterOpts, []uint64) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferredIterator); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferredIterator) + } + } + + if rf, ok := ret.Get(1).(func(*bind.FilterOpts, []uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FulfillRandomWords provides a mock function with given fields: opts, proof, rc +func (_m *VRFCoordinatorV2Interface) FulfillRandomWords(opts *bind.TransactOpts, proof vrf_coordinator_v2.VRFProof, rc vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) (*types.Transaction, error) { + ret := _m.Called(opts, proof, rc) + + if len(ret) == 0 { + panic("no return value specified for FulfillRandomWords") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator_v2.VRFProof, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) (*types.Transaction, error)); ok { + return rf(opts, proof, rc) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, vrf_coordinator_v2.VRFProof, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) *types.Transaction); ok { + r0 = rf(opts, proof, rc) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, vrf_coordinator_v2.VRFProof, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) error); ok { + r1 = rf(opts, proof, rc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCommitment provides a mock function with given fields: opts, requestId +func (_m *VRFCoordinatorV2Interface) GetCommitment(opts *bind.CallOpts, requestId *big.Int) ([32]byte, error) { + ret := _m.Called(opts, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCommitment") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) ([32]byte, error)); ok { + return rf(opts, requestId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, *big.Int) [32]byte); ok { + r0 = rf(opts, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, *big.Int) error); ok { + r1 = rf(opts, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetConfig provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetConfig(opts *bind.CallOpts) (vrf_coordinator_v2.GetConfig, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetConfig") + } + + var r0 vrf_coordinator_v2.GetConfig + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_coordinator_v2.GetConfig, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_coordinator_v2.GetConfig); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_coordinator_v2.GetConfig) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCurrentSubId provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetCurrentSubId(opts *bind.CallOpts) (uint64, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetCurrentSubId") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint64, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint64); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFallbackWeiPerUnitLink provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetFallbackWeiPerUnitLink(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetFallbackWeiPerUnitLink") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFeeConfig provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetFeeConfig(opts *bind.CallOpts) (vrf_coordinator_v2.GetFeeConfig, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetFeeConfig") + } + + var r0 vrf_coordinator_v2.GetFeeConfig + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (vrf_coordinator_v2.GetFeeConfig, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) vrf_coordinator_v2.GetFeeConfig); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(vrf_coordinator_v2.GetFeeConfig) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetFeeTier provides a mock function with given fields: opts, reqCount +func (_m *VRFCoordinatorV2Interface) GetFeeTier(opts *bind.CallOpts, reqCount uint64) (uint32, error) { + ret := _m.Called(opts, reqCount) + + if len(ret) == 0 { + panic("no return value specified for GetFeeTier") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) (uint32, error)); ok { + return rf(opts, reqCount) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) uint32); ok { + r0 = rf(opts, reqCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, uint64) error); ok { + r1 = rf(opts, reqCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRequestConfig provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetRequestConfig(opts *bind.CallOpts) (uint16, uint32, [][32]byte, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetRequestConfig") + } + + var r0 uint16 + var r1 uint32 + var r2 [][32]byte + var r3 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint16, uint32, [][32]byte, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint16); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint16) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) uint32); ok { + r1 = rf(opts) + } else { + r1 = ret.Get(1).(uint32) + } + + if rf, ok := ret.Get(2).(func(*bind.CallOpts) [][32]byte); ok { + r2 = rf(opts) + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).([][32]byte) + } + } + + if rf, ok := ret.Get(3).(func(*bind.CallOpts) error); ok { + r3 = rf(opts) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// GetSubscription provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) GetSubscription(opts *bind.CallOpts, subId uint64) (vrf_coordinator_v2.GetSubscription, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for GetSubscription") + } + + var r0 vrf_coordinator_v2.GetSubscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) (vrf_coordinator_v2.GetSubscription, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) vrf_coordinator_v2.GetSubscription); ok { + r0 = rf(opts, subId) + } else { + r0 = ret.Get(0).(vrf_coordinator_v2.GetSubscription) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTotalBalance provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) GetTotalBalance(opts *bind.CallOpts) (*big.Int, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for GetTotalBalance") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (*big.Int, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) *big.Int); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HashOfKey provides a mock function with given fields: opts, publicKey +func (_m *VRFCoordinatorV2Interface) HashOfKey(opts *bind.CallOpts, publicKey [2]*big.Int) ([32]byte, error) { + ret := _m.Called(opts, publicKey) + + if len(ret) == 0 { + panic("no return value specified for HashOfKey") + } + + var r0 [32]byte + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, [2]*big.Int) ([32]byte, error)); ok { + return rf(opts, publicKey) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, [2]*big.Int) [32]byte); ok { + r0 = rf(opts, publicKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([32]byte) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, [2]*big.Int) error); ok { + r1 = rf(opts, publicKey) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PLI provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) PLI(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for PLI") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PLIETHFEED provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) PLIETHFEED(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for PLIETHFEED") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MAXCONSUMERS provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) MAXCONSUMERS(opts *bind.CallOpts) (uint16, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MAXCONSUMERS") + } + + var r0 uint16 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint16, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint16); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint16) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MAXNUMWORDS provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) MAXNUMWORDS(opts *bind.CallOpts) (uint32, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MAXNUMWORDS") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint32, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint32); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MAXREQUESTCONFIRMATIONS provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) MAXREQUESTCONFIRMATIONS(opts *bind.CallOpts) (uint16, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for MAXREQUESTCONFIRMATIONS") + } + + var r0 uint16 + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (uint16, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) uint16); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(uint16) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnTokenTransfer provides a mock function with given fields: opts, arg0, amount, data +func (_m *VRFCoordinatorV2Interface) OnTokenTransfer(opts *bind.TransactOpts, arg0 common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + ret := _m.Called(opts, arg0, amount, data) + + if len(ret) == 0 { + panic("no return value specified for OnTokenTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) (*types.Transaction, error)); ok { + return rf(opts, arg0, amount, data) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) *types.Transaction); ok { + r0 = rf(opts, arg0, amount, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int, []byte) error); ok { + r1 = rf(opts, arg0, amount, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OracleWithdraw provides a mock function with given fields: opts, recipient, amount +func (_m *VRFCoordinatorV2Interface) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, recipient, amount) + + if len(ret) == 0 { + panic("no return value specified for OracleWithdraw") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) (*types.Transaction, error)); ok { + return rf(opts, recipient, amount) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, *big.Int) *types.Transaction); ok { + r0 = rf(opts, recipient, amount) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, *big.Int) error); ok { + r1 = rf(opts, recipient, amount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Owner provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) Owner(opts *bind.CallOpts) (common.Address, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for Owner") + } + + var r0 common.Address + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (common.Address, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) common.Address); ok { + r0 = rf(opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OwnerCancelSubscription provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) OwnerCancelSubscription(opts *bind.TransactOpts, subId uint64) (*types.Transaction, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for OwnerCancelSubscription") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64) (*types.Transaction, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64) *types.Transaction); ok { + r0 = rf(opts, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseConfigSet provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseConfigSet(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseConfigSet") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseFundsRecovered provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseFundsRecovered(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseFundsRecovered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseLog provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseLog(log types.Log) (generated.AbigenLog, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseLog") + } + + var r0 generated.AbigenLog + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (generated.AbigenLog, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) generated.AbigenLog); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(generated.AbigenLog) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseOwnershipTransferRequested(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseOwnershipTransferred provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseOwnershipTransferred(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseOwnershipTransferred") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseProvingKeyDeregistered provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseProvingKeyDeregistered(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseProvingKeyDeregistered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseProvingKeyRegistered provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseProvingKeyRegistered(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseProvingKeyRegistered") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomWordsFulfilled provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseRandomWordsFulfilled(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomWordsFulfilled") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseRandomWordsRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseRandomWordsRequested(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseRandomWordsRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionCanceled provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionCanceled(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionCanceled") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionConsumerAdded provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionConsumerAdded(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionConsumerAdded") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionConsumerRemoved provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionConsumerRemoved(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionConsumerRemoved") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionCreated provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionCreated(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionCreated") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionFunded provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionFunded(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionFunded") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionOwnerTransferRequested provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionOwnerTransferRequested(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionOwnerTransferRequested") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ParseSubscriptionOwnerTransferred provides a mock function with given fields: log +func (_m *VRFCoordinatorV2Interface) ParseSubscriptionOwnerTransferred(log types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, error) { + ret := _m.Called(log) + + if len(ret) == 0 { + panic("no return value specified for ParseSubscriptionOwnerTransferred") + } + + var r0 *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred + var r1 error + if rf, ok := ret.Get(0).(func(types.Log) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, error)); ok { + return rf(log) + } + if rf, ok := ret.Get(0).(func(types.Log) *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred); ok { + r0 = rf(log) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred) + } + } + + if rf, ok := ret.Get(1).(func(types.Log) error); ok { + r1 = rf(log) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PendingRequestExists provides a mock function with given fields: opts, subId +func (_m *VRFCoordinatorV2Interface) PendingRequestExists(opts *bind.CallOpts, subId uint64) (bool, error) { + ret := _m.Called(opts, subId) + + if len(ret) == 0 { + panic("no return value specified for PendingRequestExists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) (bool, error)); ok { + return rf(opts, subId) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts, uint64) bool); ok { + r0 = rf(opts, subId) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts, uint64) error); ok { + r1 = rf(opts, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RecoverFunds provides a mock function with given fields: opts, to +func (_m *VRFCoordinatorV2Interface) RecoverFunds(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, to) + + if len(ret) == 0 { + panic("no return value specified for RecoverFunds") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterProvingKey provides a mock function with given fields: opts, oracle, publicProvingKey +func (_m *VRFCoordinatorV2Interface) RegisterProvingKey(opts *bind.TransactOpts, oracle common.Address, publicProvingKey [2]*big.Int) (*types.Transaction, error) { + ret := _m.Called(opts, oracle, publicProvingKey) + + if len(ret) == 0 { + panic("no return value specified for RegisterProvingKey") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, [2]*big.Int) (*types.Transaction, error)); ok { + return rf(opts, oracle, publicProvingKey) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address, [2]*big.Int) *types.Transaction); ok { + r0 = rf(opts, oracle, publicProvingKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address, [2]*big.Int) error); ok { + r1 = rf(opts, oracle, publicProvingKey) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveConsumer provides a mock function with given fields: opts, subId, consumer +func (_m *VRFCoordinatorV2Interface) RemoveConsumer(opts *bind.TransactOpts, subId uint64, consumer common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, consumer) + + if len(ret) == 0 { + panic("no return value specified for RemoveConsumer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, consumer) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, consumer) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64, common.Address) error); ok { + r1 = rf(opts, subId, consumer) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestRandomWords provides a mock function with given fields: opts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords +func (_m *VRFCoordinatorV2Interface) RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subId uint64, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32) (*types.Transaction, error) { + ret := _m.Called(opts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) + + if len(ret) == 0 { + panic("no return value specified for RequestRandomWords") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [32]byte, uint64, uint16, uint32, uint32) (*types.Transaction, error)); ok { + return rf(opts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, [32]byte, uint64, uint16, uint32, uint32) *types.Transaction); ok { + r0 = rf(opts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, [32]byte, uint64, uint16, uint32, uint32) error); ok { + r1 = rf(opts, keyHash, subId, requestConfirmations, callbackGasLimit, numWords) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RequestSubscriptionOwnerTransfer provides a mock function with given fields: opts, subId, newOwner +func (_m *VRFCoordinatorV2Interface) RequestSubscriptionOwnerTransfer(opts *bind.TransactOpts, subId uint64, newOwner common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, subId, newOwner) + + if len(ret) == 0 { + panic("no return value specified for RequestSubscriptionOwnerTransfer") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) (*types.Transaction, error)); ok { + return rf(opts, subId, newOwner) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint64, common.Address) *types.Transaction); ok { + r0 = rf(opts, subId, newOwner) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint64, common.Address) error); ok { + r1 = rf(opts, subId, newOwner) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetConfig provides a mock function with given fields: opts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig +func (_m *VRFCoordinatorV2Interface) SetConfig(opts *bind.TransactOpts, minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig) (*types.Transaction, error) { + ret := _m.Called(opts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) + + if len(ret) == 0 { + panic("no return value specified for SetConfig") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint16, uint32, uint32, uint32, *big.Int, vrf_coordinator_v2.VRFCoordinatorV2FeeConfig) (*types.Transaction, error)); ok { + return rf(opts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, uint16, uint32, uint32, uint32, *big.Int, vrf_coordinator_v2.VRFCoordinatorV2FeeConfig) *types.Transaction); ok { + r0 = rf(opts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, uint16, uint32, uint32, uint32, *big.Int, vrf_coordinator_v2.VRFCoordinatorV2FeeConfig) error); ok { + r1 = rf(opts, minimumRequestConfirmations, maxGasLimit, stalenessSeconds, gasAfterPaymentCalculation, fallbackWeiPerUnitLink, feeConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransferOwnership provides a mock function with given fields: opts, to +func (_m *VRFCoordinatorV2Interface) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + ret := _m.Called(opts, to) + + if len(ret) == 0 { + panic("no return value specified for TransferOwnership") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) (*types.Transaction, error)); ok { + return rf(opts, to) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, common.Address) *types.Transaction); ok { + r0 = rf(opts, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, common.Address) error); ok { + r1 = rf(opts, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TypeAndVersion provides a mock function with given fields: opts +func (_m *VRFCoordinatorV2Interface) TypeAndVersion(opts *bind.CallOpts) (string, error) { + ret := _m.Called(opts) + + if len(ret) == 0 { + panic("no return value specified for TypeAndVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(*bind.CallOpts) (string, error)); ok { + return rf(opts) + } + if rf, ok := ret.Get(0).(func(*bind.CallOpts) string); ok { + r0 = rf(opts) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(*bind.CallOpts) error); ok { + r1 = rf(opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchConfigSet provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorV2Interface) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchConfigSet") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchFundsRecovered provides a mock function with given fields: opts, sink +func (_m *VRFCoordinatorV2Interface) WatchFundsRecovered(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered) (event.Subscription, error) { + ret := _m.Called(opts, sink) + + if len(ret) == 0 { + panic("no return value specified for WatchFundsRecovered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered) (event.Subscription, error)); ok { + return rf(opts, sink) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered) event.Subscription); ok { + r0 = rf(opts, sink) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2FundsRecovered) error); ok { + r1 = rf(opts, sink) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferRequested provides a mock function with given fields: opts, sink, from, to +func (_m *VRFCoordinatorV2Interface) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferRequested, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchOwnershipTransferred provides a mock function with given fields: opts, sink, from, to +func (_m *VRFCoordinatorV2Interface) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, from, to) + + if len(ret) == 0 { + panic("no return value specified for WatchOwnershipTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, []common.Address, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, from, to) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, []common.Address, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, from, to) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2OwnershipTransferred, []common.Address, []common.Address) error); ok { + r1 = rf(opts, sink, from, to) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchProvingKeyDeregistered provides a mock function with given fields: opts, sink, oracle +func (_m *VRFCoordinatorV2Interface) WatchProvingKeyDeregistered(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, oracle []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, oracle) + + if len(ret) == 0 { + panic("no return value specified for WatchProvingKeyDeregistered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyDeregistered, []common.Address) error); ok { + r1 = rf(opts, sink, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchProvingKeyRegistered provides a mock function with given fields: opts, sink, oracle +func (_m *VRFCoordinatorV2Interface) WatchProvingKeyRegistered(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, oracle []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, oracle) + + if len(ret) == 0 { + panic("no return value specified for WatchProvingKeyRegistered") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, oracle) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, oracle) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2ProvingKeyRegistered, []common.Address) error); ok { + r1 = rf(opts, sink, oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomWordsFulfilled provides a mock function with given fields: opts, sink, requestId +func (_m *VRFCoordinatorV2Interface) WatchRandomWordsFulfilled(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, requestId []*big.Int) (event.Subscription, error) { + ret := _m.Called(opts, sink, requestId) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomWordsFulfilled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, []*big.Int) (event.Subscription, error)); ok { + return rf(opts, sink, requestId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, []*big.Int) event.Subscription); ok { + r0 = rf(opts, sink, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, []*big.Int) error); ok { + r1 = rf(opts, sink, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchRandomWordsRequested provides a mock function with given fields: opts, sink, keyHash, subId, sender +func (_m *VRFCoordinatorV2Interface) WatchRandomWordsRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, keyHash [][32]byte, subId []uint64, sender []common.Address) (event.Subscription, error) { + ret := _m.Called(opts, sink, keyHash, subId, sender) + + if len(ret) == 0 { + panic("no return value specified for WatchRandomWordsRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, [][32]byte, []uint64, []common.Address) (event.Subscription, error)); ok { + return rf(opts, sink, keyHash, subId, sender) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, [][32]byte, []uint64, []common.Address) event.Subscription); ok { + r0 = rf(opts, sink, keyHash, subId, sender) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, [][32]byte, []uint64, []common.Address) error); ok { + r1 = rf(opts, sink, keyHash, subId, sender) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionCanceled provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionCanceled(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionCanceled") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionConsumerAdded provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionConsumerAdded(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionConsumerAdded") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionConsumerRemoved provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionConsumerRemoved(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionConsumerRemoved") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionCreated provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionCreated(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionCreated") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionFunded provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionFunded(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionFunded") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionOwnerTransferRequested provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionOwnerTransferRequested(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionOwnerTransferRequested") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferRequested, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WatchSubscriptionOwnerTransferred provides a mock function with given fields: opts, sink, subId +func (_m *VRFCoordinatorV2Interface) WatchSubscriptionOwnerTransferred(opts *bind.WatchOpts, sink chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, subId []uint64) (event.Subscription, error) { + ret := _m.Called(opts, sink, subId) + + if len(ret) == 0 { + panic("no return value specified for WatchSubscriptionOwnerTransferred") + } + + var r0 event.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, []uint64) (event.Subscription, error)); ok { + return rf(opts, sink, subId) + } + if rf, ok := ret.Get(0).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, []uint64) event.Subscription); ok { + r0 = rf(opts, sink, subId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(event.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(*bind.WatchOpts, chan<- *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionOwnerTransferred, []uint64) error); ok { + r1 = rf(opts, sink, subId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewVRFCoordinatorV2Interface creates a new instance of VRFCoordinatorV2Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVRFCoordinatorV2Interface(t interface { + mock.TestingT + Cleanup(func()) +}) *VRFCoordinatorV2Interface { + mock := &VRFCoordinatorV2Interface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/vrf/proof/proof_response.go b/core/services/vrf/proof/proof_response.go new file mode 100644 index 00000000..e220ab23 --- /dev/null +++ b/core/services/vrf/proof/proof_response.go @@ -0,0 +1,206 @@ +package proof + +// Contains logic/data for mandatorily mixing VRF seeds with the hash of the +// block in which a VRF request appeared + +import ( + "math/big" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +// ProofResponse is the data which is sent back to the VRFCoordinator, so that +// it can verify that the seed the oracle finally used is correct. +type ProofResponse struct { + // Approximately the proof which will be checked on-chain. Note that this + // contains the pre-seed in place of the final seed. That should be computed + // as in FinalSeed. + P vrfkey.Proof + PreSeed Seed // Seed received during VRF request + BlockNum uint64 // Height of the block in which this request was made +} + +// OnChainResponseLength is the length of the MarshaledOnChainResponse. The +// extra 32 bytes are for blocknumber (as a uint256), which goes at the end. The +// seed is rewritten with the preSeed. (See MarshalForVRFCoordinator and +// ProofResponse#ActualProof.) +const OnChainResponseLength = ProofLength + + 32 // blocknum + +// MarshaledOnChainResponse is the flat bytes which are sent back to the +// VRFCoordinator. +type MarshaledOnChainResponse [OnChainResponseLength]byte + +// MarshalForVRFCoordinator constructs the flat bytes which are sent to the +// VRFCoordinator. +func (p *ProofResponse) MarshalForVRFCoordinator() ( + response MarshaledOnChainResponse, err error) { + solidityProof, err := SolidityPrecalculations(&p.P) + if err != nil { + return MarshaledOnChainResponse{}, errors.Wrap(err, + "while marshaling proof for VRFCoordinator") + } + // Overwrite seed input to the VRF proof generator with the seed the + // VRFCoordinator originally requested, so that it can identify the request + // corresponding to this response, and compute the final seed itself using the + // blockhash it infers from the block number. + solidityProof.P.Seed = common.BytesToHash(p.PreSeed[:]).Big() + mProof := solidityProof.MarshalForSolidityVerifier() + wireBlockNum := utils.EVMWordUint64(p.BlockNum) + rl := copy(response[:], append(mProof[:], wireBlockNum...)) + if rl != OnChainResponseLength { + return MarshaledOnChainResponse{}, errors.Errorf( + "wrong length for response to VRFCoordinator") + } + return response, nil +} + +// UnmarshalProofResponse returns the ProofResponse represented by the bytes in m +func UnmarshalProofResponse(m MarshaledOnChainResponse) (*ProofResponse, error) { + blockNum := common.BytesToHash(m[ProofLength : ProofLength+32]).Big().Uint64() + proof, err := UnmarshalSolidityProof(m[:ProofLength]) + if err != nil { + return nil, errors.Wrap(err, "while parsing ProofResponse") + } + preSeed, err := BigToSeed(proof.Seed) + if err != nil { + return nil, errors.Wrap(err, "while converting seed to bytes representation") + } + return &ProofResponse{P: proof, PreSeed: preSeed, BlockNum: blockNum}, nil +} + +// CryptoProof returns the proof implied by p, with the correct seed +func (p ProofResponse) CryptoProof(s PreSeedData) (vrfkey.Proof, error) { + proof := p.P // Copy P, which has wrong seed value + proof.Seed = FinalSeed(s) + valid, err := proof.VerifyVRFProof() + if err != nil { + return vrfkey.Proof{}, errors.Wrap(err, + "could not validate proof implied by on-chain response") + } + if !valid { + return vrfkey.Proof{}, errors.Errorf( + "proof implied by on-chain response is invalid") + } + return proof, nil +} + +func GenerateProofResponseFromProof(proof vrfkey.Proof, s PreSeedData) (MarshaledOnChainResponse, error) { + p := ProofResponse{P: proof, PreSeed: s.PreSeed, BlockNum: s.BlockNum} + rv, err := p.MarshalForVRFCoordinator() + if err != nil { + return MarshaledOnChainResponse{}, err + } + return rv, nil +} + +func GenerateProofResponseFromProofV2(p vrfkey.Proof, s PreSeedDataV2) (vrf_coordinator_v2.VRFProof, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, error) { + var proof vrf_coordinator_v2.VRFProof + var rc vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment + solidityProof, err := SolidityPrecalculations(&p) + if err != nil { + return proof, rc, errors.Wrap(err, + "while marshaling proof for VRFCoordinatorV2") + } + solidityProof.P.Seed = common.BytesToHash(s.PreSeed[:]).Big() + x, y := secp256k1.Coordinates(solidityProof.P.PublicKey) + gx, gy := secp256k1.Coordinates(solidityProof.P.Gamma) + cgx, cgy := secp256k1.Coordinates(solidityProof.CGammaWitness) + shx, shy := secp256k1.Coordinates(solidityProof.SHashWitness) + return vrf_coordinator_v2.VRFProof{ + Pk: [2]*big.Int{x, y}, + Gamma: [2]*big.Int{gx, gy}, + C: solidityProof.P.C, + S: solidityProof.P.S, + Seed: common.BytesToHash(s.PreSeed[:]).Big(), + UWitness: solidityProof.UWitness, + CGammaWitness: [2]*big.Int{cgx, cgy}, + SHashWitness: [2]*big.Int{shx, shy}, + ZInv: solidityProof.ZInv, + }, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment{ + BlockNum: s.BlockNum, + SubId: s.SubId, + CallbackGasLimit: s.CallbackGasLimit, + NumWords: s.NumWords, + Sender: s.Sender, + }, nil +} + +func GenerateProofResponseFromProofV2Plus( + p vrfkey.Proof, + s PreSeedDataV2Plus) ( + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof, + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment, + error) { + var proof vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof + var rc vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment + solidityProof, err := SolidityPrecalculations(&p) + if err != nil { + return proof, rc, errors.Wrap(err, + "while marshaling proof for VRFCoordinatorV2Plus") + } + solidityProof.P.Seed = common.BytesToHash(s.PreSeed[:]).Big() + x, y := secp256k1.Coordinates(solidityProof.P.PublicKey) + gx, gy := secp256k1.Coordinates(solidityProof.P.Gamma) + cgx, cgy := secp256k1.Coordinates(solidityProof.CGammaWitness) + shx, shy := secp256k1.Coordinates(solidityProof.SHashWitness) + return vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof{ + Pk: [2]*big.Int{x, y}, + Gamma: [2]*big.Int{gx, gy}, + C: solidityProof.P.C, + S: solidityProof.P.S, + Seed: common.BytesToHash(s.PreSeed[:]).Big(), + UWitness: solidityProof.UWitness, + CGammaWitness: [2]*big.Int{cgx, cgy}, + SHashWitness: [2]*big.Int{shx, shy}, + ZInv: solidityProof.ZInv, + }, vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment{ + BlockNum: s.BlockNum, + SubId: s.SubId, + CallbackGasLimit: s.CallbackGasLimit, + NumWords: s.NumWords, + Sender: s.Sender, + ExtraArgs: s.ExtraArgs, + }, nil +} + +func GenerateProofResponse(keystore keystore.VRF, id string, s PreSeedData) ( + MarshaledOnChainResponse, error) { + seed := FinalSeed(s) + proof, err := keystore.GenerateProof(id, seed) + if err != nil { + return MarshaledOnChainResponse{}, err + } + return GenerateProofResponseFromProof(proof, s) +} + +func GenerateProofResponseV2(keystore keystore.VRF, id string, s PreSeedDataV2) ( + vrf_coordinator_v2.VRFProof, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, error) { + seedHashMsg := append(s.PreSeed[:], s.BlockHash.Bytes()...) + seed := utils.MustHash(string(seedHashMsg)).Big() + proof, err := keystore.GenerateProof(id, seed) + if err != nil { + return vrf_coordinator_v2.VRFProof{}, vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment{}, err + } + return GenerateProofResponseFromProofV2(proof, s) +} + +func GenerateProofResponseV2Plus(keystore keystore.VRF, id string, s PreSeedDataV2Plus) ( + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof, vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment, error) { + seedHashMsg := append(s.PreSeed[:], s.BlockHash.Bytes()...) + seed := utils.MustHash(string(seedHashMsg)).Big() + proof, err := keystore.GenerateProof(id, seed) + if err != nil { + return vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof{}, vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment{}, err + } + return GenerateProofResponseFromProofV2Plus(proof, s) +} diff --git a/core/services/vrf/proof/proof_response_test.go b/core/services/vrf/proof/proof_response_test.go new file mode 100644 index 00000000..67afff27 --- /dev/null +++ b/core/services/vrf/proof/proof_response_test.go @@ -0,0 +1,57 @@ +package proof_test + +import ( + "math/big" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_verifier_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" +) + +func TestMarshaledProof(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := configtest.NewGeneralConfig(t, nil) + keyStore := cltest.NewKeyStore(t, db, cfg.Database()) + key := cltest.DefaultVRFKey + require.NoError(t, keyStore.VRF().Add(key)) + blockHash := common.Hash{} + blockNum := 0 + preSeed := big.NewInt(1) + s := proof2.TestXXXSeedData(t, preSeed, blockHash, blockNum) + proofResponse, err := proof2.GenerateProofResponse(keyStore.VRF(), key.ID(), s) + require.NoError(t, err) + goProof, err := proof2.UnmarshalProofResponse(proofResponse) + require.NoError(t, err) + actualProof, err := goProof.CryptoProof(s) + require.NoError(t, err) + proof, err := proof2.MarshalForSolidityVerifier(&actualProof) + require.NoError(t, err) + // NB: For changes to the VRF solidity code to be reflected here, "go generate" + // must be run in core/services/vrf. + ethereumKey, _ := crypto.GenerateKey() + auth, err := bind.NewKeyedTransactorWithChainID(ethereumKey, big.NewInt(1337)) + require.NoError(t, err) + genesisData := core.GenesisAlloc{auth.From: {Balance: assets.Ether(100).ToInt()}} + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + _, _, verifier, err := solidity_vrf_verifier_wrapper.DeployVRFTestHelper(auth, backend) + if err != nil { + panic(errors.Wrapf(err, "while initializing EVM contract wrapper")) + } + backend.Commit() + _, err = verifier.RandomValueFromVRFProof(nil, proof[:]) + require.NoError(t, err) +} diff --git a/core/services/vrf/proof/seed.go b/core/services/vrf/proof/seed.go new file mode 100644 index 00000000..2fa99fea --- /dev/null +++ b/core/services/vrf/proof/seed.go @@ -0,0 +1,86 @@ +package proof + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +// Seed represents a VRF seed as a serialized uint256 +type Seed [32]byte + +// BigToSeed returns seed x represented as a Seed, or an error if x is too big +func BigToSeed(x *big.Int) (Seed, error) { + seed, err := utils.Uint256ToBytes(x) + if err != nil { + return Seed{}, err + } + return Seed(common.BytesToHash(seed)), nil +} + +// Big returns the uint256 seed represented by s +func (s *Seed) Big() *big.Int { + return common.Hash(*s).Big() +} + +// PreSeedData contains the data the VRF provider needs to compute the final VRF +// output and marshal the proof for transmission to the VRFCoordinator contract. +type PreSeedData struct { + PreSeed Seed // Seed to be mixed with hash of containing block + BlockHash common.Hash // Hash of block containing VRF request + BlockNum uint64 // Cardinal number of block containing VRF request +} + +type PreSeedDataV2 struct { + PreSeed Seed // Seed to be mixed with hash of containing block + BlockHash common.Hash // Hash of block containing VRF request + BlockNum uint64 // Cardinal number of block containing VRF request + SubId uint64 + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address +} + +type PreSeedDataV2Plus struct { + PreSeed Seed // Seed to be mixed with hash of containing block + BlockHash common.Hash // Hash of block containing VRF request + BlockNum uint64 // Cardinal number of block containing VRF request + SubId *big.Int + CallbackGasLimit uint32 + NumWords uint32 + Sender common.Address + ExtraArgs []byte +} + +// FinalSeed is the seed which is actually passed to the VRF proof generator, +// given the pre-seed and the hash of the block in which the VRFCoordinator +// emitted the log for the request this is responding to. +func FinalSeed(s PreSeedData) (finalSeed *big.Int) { + seedHashMsg := append(s.PreSeed[:], s.BlockHash.Bytes()...) + return utils.MustHash(string(seedHashMsg)).Big() +} + +func FinalSeedV2(s PreSeedDataV2) (finalSeed *big.Int) { + seedHashMsg := append(s.PreSeed[:], s.BlockHash.Bytes()...) + return utils.MustHash(string(seedHashMsg)).Big() +} + +func FinalSeedV2Plus(s PreSeedDataV2Plus) (finalSeed *big.Int) { + seedHashMsg := append(s.PreSeed[:], s.BlockHash.Bytes()...) + return utils.MustHash(string(seedHashMsg)).Big() +} + +func TestXXXSeedData(t *testing.T, preSeed *big.Int, blockHash common.Hash, + blockNum int) PreSeedData { + seedAsSeed, err := BigToSeed(big.NewInt(0x10)) + require.NoError(t, err, "seed %x out of range", 0x10) + return PreSeedData{ + PreSeed: seedAsSeed, + BlockNum: uint64(blockNum), + BlockHash: blockHash, + } +} diff --git a/core/services/vrf/proof/solidity_proof.go b/core/services/vrf/proof/solidity_proof.go new file mode 100644 index 00000000..c3d70f60 --- /dev/null +++ b/core/services/vrf/proof/solidity_proof.go @@ -0,0 +1,133 @@ +package proof + +// Logic for providing the precomputed values required by the solidity verifier, +// in binary-blob format. + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + bm "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +// SolidityProof contains precalculations which VRF.sol needs to verify proofs +type SolidityProof struct { + P *vrfkey.Proof // The core proof + UWitness common.Address // Address of P.C*P.PK+P.S*G + CGammaWitness, SHashWitness kyber.Point // P.C*P.Gamma, P.S*HashToCurve(P.Seed) + ZInv *big.Int // Inverse of Z coord from ProjectiveECAdd(CGammaWitness, SHashWitness) +} + +// String returns the values in p, in hexadecimal format +func (p *SolidityProof) String() string { + return fmt.Sprintf( + "SolidityProof{P: %s, UWitness: %x, CGammaWitness: %s, SHashWitness: %s, ZInv: %x}", + p.P, p.UWitness, p.CGammaWitness, p.SHashWitness, p.ZInv) +} + +func point() kyber.Point { + return vrfkey.Secp256k1Curve.Point() +} + +// SolidityPrecalculations returns the precomputed values needed by the solidity +// verifier, or an error on failure. +func SolidityPrecalculations(p *vrfkey.Proof) (*SolidityProof, error) { + var rv SolidityProof + rv.P = p + c := secp256k1.IntToScalar(p.C) + s := secp256k1.IntToScalar(p.S) + u := point().Add(point().Mul(c, p.PublicKey), point().Mul(s, vrfkey.Generator)) + var err error + rv.UWitness = secp256k1.EthereumAddress(u) + rv.CGammaWitness = point().Mul(c, p.Gamma) + hash, err := vrfkey.HashToCurve(p.PublicKey, p.Seed, func(*big.Int) {}) + if err != nil { + return nil, err + } + rv.SHashWitness = point().Mul(s, hash) + _, _, z := vrfkey.ProjectiveECAdd(rv.CGammaWitness, rv.SHashWitness) + rv.ZInv = z.ModInverse(z, vrfkey.FieldSize) + return &rv, nil +} + +// Length of marshaled proof, in bytes +const ProofLength = 64 + // PublicKey + 64 + // Gamma + 32 + // C + 32 + // S + 32 + // Seed + 32 + // uWitness (gets padded to 256 bits, even though it's only 160) + 64 + // cGammaWitness + 64 + // sHashWitness + 32 // zInv (Leave Output out, because that can be efficiently calculated) + +// MarshaledProof contains a VRF proof for randomValueFromVRFProof. +// +// NB: when passing one of these to randomValueFromVRFProof via the geth +// blockchain simulator, it must be passed as a slice ("proof[:]"). Passing it +// as-is sends hundreds of single bytes, each padded to their own 32-byte word. +type MarshaledProof [ProofLength]byte + +// String returns m as 0x-hex bytes +func (m MarshaledProof) String() string { + return fmt.Sprintf("0x%x", [ProofLength]byte(m)) +} + +// MarshalForSolidityVerifier renders p as required by randomValueFromVRFProof +func (p *SolidityProof) MarshalForSolidityVerifier() (proof MarshaledProof) { + cursor := proof[:0] + write := func(b []byte) { cursor = append(cursor, b...) } + write(secp256k1.LongMarshal(p.P.PublicKey)) + write(secp256k1.LongMarshal(p.P.Gamma)) + write(utils.Uint256ToBytes32(p.P.C)) + write(utils.Uint256ToBytes32(p.P.S)) + write(utils.Uint256ToBytes32(p.P.Seed)) + write(make([]byte, 12)) // Left-pad address to 32 bytes, with zeros + write(p.UWitness[:]) + write(secp256k1.LongMarshal(p.CGammaWitness)) + write(secp256k1.LongMarshal(p.SHashWitness)) + write(utils.Uint256ToBytes32(p.ZInv)) + if len(cursor) != ProofLength { + panic(fmt.Errorf("wrong proof length: %d", len(proof))) + } + return proof +} + +// MarshalForSolidityVerifier renders p as required by randomValueFromVRFProof +func MarshalForSolidityVerifier(p *vrfkey.Proof) (MarshaledProof, error) { + var rv MarshaledProof + solidityProof, err := SolidityPrecalculations(p) + if err != nil { + return rv, err + } + return solidityProof.MarshalForSolidityVerifier(), nil +} + +func UnmarshalSolidityProof(proof []byte) (rv vrfkey.Proof, err error) { + failedProof := vrfkey.Proof{} + if len(proof) != ProofLength { + return failedProof, fmt.Errorf( + "VRF proof is %d bytes long, should be %d: \"%x\"", len(proof), + ProofLength, proof) + } + if rv.PublicKey, err = secp256k1.LongUnmarshal(proof[:64]); err != nil { + return failedProof, errors.Wrapf(err, "while reading proof public key") + } + rawGamma := proof[64:128] + if rv.Gamma, err = secp256k1.LongUnmarshal(rawGamma); err != nil { + return failedProof, errors.Wrapf(err, "while reading proof gamma") + } + rv.C = bm.I().SetBytes(proof[128:160]) + rv.S = bm.I().SetBytes(proof[160:192]) + rv.Seed = bm.I().SetBytes(proof[192:224]) + rv.Output = utils.MustHash(string(vrfkey.RandomOutputHashPrefix) + + string(rawGamma)).Big() + return rv, nil +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_consumer_base_test.go b/core/services/vrf/solidity_cross_tests/vrf_consumer_base_test.go new file mode 100644 index 00000000..fe0242d3 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_consumer_base_test.go @@ -0,0 +1,25 @@ +package solidity_cross_tests_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" +) + +func TestConsumerBaseRejectsBadVRFCoordinator(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash, _ /* jobID */, fee := registerProvingKey(t, coordinator) + log := requestRandomness(t, coordinator, keyHash, fee) + // Ensure that VRFConsumerBase.rawFulfillRandomness's check, + // require(msg.sender==vrfCoordinator), by using the wrong sender address. + _, err := coordinator.ConsumerContract.RawFulfillRandomness(coordinator.Neil, + keyHash, big.NewInt(0).SetBytes([]byte("a bad random value"))) + require.Error(t, err) + // Verify that correct fulfilment is possible, in this setup + _ = fulfillRandomnessRequest(t, coordinator, *log) +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_coordinator_abi_values.go b/core/services/vrf/solidity_cross_tests/vrf_coordinator_abi_values.go new file mode 100644 index 00000000..a4d11698 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_coordinator_abi_values.go @@ -0,0 +1,63 @@ +package solidity_cross_tests + +import ( + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" +) + +// VRFRandomnessRequestLogTopic returns the signature of the RandomnessRequest log +// emitted by the VRFCoordinator contract +func VRFRandomnessRequestLogTopic() common.Hash { + return coordinatorABIValues().randomnessRequestLogTopic +} + +// randomnessRequestRawDataArgs returns a list of the arguments to the +// RandomnessRequest log emitted by the VRFCoordinator contract +func randomnessRequestRawDataArgs() abi.Arguments { + return coordinatorABIValues().randomnessRequestRawDataArgs +} + +var fulfillMethodName = "fulfillRandomnessRequest" + +// abiValues is a singleton carrying information parsed once from the +// VRFCoordinator abi string +type abiValues struct { + // CoordinatorABI is the ABI of the VRFCoordinator + coordinatorABI abi.ABI + fulfillSelector string + fulfillMethod abi.Method + // RandomnessRequestLogTopic is the signature of the RandomnessRequest log + randomnessRequestLogTopic common.Hash + randomnessRequestRawDataArgs abi.Arguments +} + +var coordinatorABIValues = sync.OnceValue(func() (v *abiValues) { + v = new(abiValues) + var err error + v.coordinatorABI, err = abi.JSON(strings.NewReader( + solidity_vrf_coordinator_interface.VRFCoordinatorABI)) + if err != nil { + panic(err) + } + var found bool + v.fulfillMethod, found = v.coordinatorABI.Methods[fulfillMethodName] + if !found { + panic(fmt.Errorf("could not find method %s in VRFCoordinator ABI", fulfillMethodName)) + } + v.fulfillSelector = hexutil.Encode(v.fulfillMethod.ID) + randomnessRequestABI := v.coordinatorABI.Events["RandomnessRequest"] + v.randomnessRequestLogTopic = randomnessRequestABI.ID + for _, arg := range randomnessRequestABI.Inputs { + if !arg.Indexed { + v.randomnessRequestRawDataArgs = append(v.randomnessRequestRawDataArgs, arg) + } + } + return +}) diff --git a/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface.go b/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface.go new file mode 100644 index 00000000..2ea20669 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface.go @@ -0,0 +1,100 @@ +package solidity_cross_tests + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" +) + +// RawRandomnessRequestLog is used to parse a RandomnessRequest log into types +// go-ethereum knows about. +type RawRandomnessRequestLog solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest + +// RandomnessRequestLog contains the data for a RandomnessRequest log, +// represented as compatible golang types. +type RandomnessRequestLog struct { + KeyHash common.Hash + Seed *big.Int // uint256 + JobID common.Hash + Sender common.Address + Fee *assets.Link // uint256 + RequestID common.Hash + Raw RawRandomnessRequestLog +} + +var dummyCoordinator, _ = solidity_vrf_coordinator_interface.NewVRFCoordinator( + common.Address{}, nil) + +func toGethLog(log types.Log) types.Log { + return types.Log{ + Address: log.Address, + Topics: log.Topics, + Data: log.Data, + BlockNumber: log.BlockNumber, + TxHash: log.TxHash, + TxIndex: log.TxIndex, + BlockHash: log.BlockHash, + Index: log.Index, + Removed: log.Removed, + } +} + +// ParseRandomnessRequestLog returns the RandomnessRequestLog corresponding to +// the raw logData +func ParseRandomnessRequestLog(log types.Log) (*RandomnessRequestLog, error) { + rawLog, err := dummyCoordinator.ParseRandomnessRequest(toGethLog(log)) + if err != nil { + return nil, errors.Wrapf(err, + "while parsing %x as RandomnessRequestLog", log.Data) + } + return RawRandomnessRequestLogToRandomnessRequestLog( + (*RawRandomnessRequestLog)(rawLog)), nil +} + +// RawData returns the raw bytes corresponding to l in a solidity log +// +// This serialization does not include the JobID, because that's an indexed field. +func (l *RandomnessRequestLog) RawData() ([]byte, error) { + return randomnessRequestRawDataArgs().Pack(l.KeyHash, + l.Seed, l.Sender, (*big.Int)(l.Fee), l.RequestID) +} + +// Equal(ol) is true iff l is the same log as ol, and both represent valid +// RandomnessRequest logs. +func (l *RandomnessRequestLog) Equal(ol RandomnessRequestLog) bool { + return l.KeyHash == ol.KeyHash && + equal(l.Seed, ol.Seed) && + l.JobID == ol.JobID && + l.Sender == ol.Sender && + l.Fee.Cmp(ol.Fee) == 0 && + l.RequestID == ol.RequestID +} + +func (l *RandomnessRequestLog) ComputedRequestID() common.Hash { + soliditySeed, err := utils.Uint256ToBytes(l.Seed) + if err != nil { + panic(errors.Wrapf(err, "vrf seed out of bounds in %#+v", l)) + } + return utils.MustHash(string(append(l.KeyHash[:], soliditySeed...))) +} + +func RawRandomnessRequestLogToRandomnessRequestLog( + l *RawRandomnessRequestLog) *RandomnessRequestLog { + return &RandomnessRequestLog{ + KeyHash: l.KeyHash, + Seed: l.Seed, + JobID: l.JobID, + Sender: l.Sender, + Fee: (*assets.Link)(l.Fee), + RequestID: l.RequestID, + Raw: *l, + } +} + +func equal(left, right *big.Int) bool { return left.Cmp(right) == 0 } diff --git a/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface_test.go b/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface_test.go new file mode 100644 index 00000000..e2396610 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_coordinator_interface_test.go @@ -0,0 +1,55 @@ +package solidity_cross_tests_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/solidity_cross_tests" +) + +var ( + keyHash = secretKey.PublicKey.MustHash() + jobID = common.BytesToHash([]byte("1234567890abcdef1234567890abcdef")) + seed = big.NewInt(1) + sender = common.HexToAddress("0xecfcab0a285d3380e488a39b4bb21e777f8a4eac") + fee = big.NewInt(100) + requestID = common.HexToHash("0xcafe") + raw = solidity_cross_tests.RawRandomnessRequestLog{ + KeyHash: keyHash, + Seed: seed, + JobID: jobID, + Sender: sender, + Fee: fee, + RequestID: requestID, + Raw: types.Log{ + // A raw, on-the-wire RandomnessRequestLog is the concat of fields as uint256's + Data: append(append(append(append( + keyHash.Bytes(), + common.BigToHash(seed).Bytes()...), + common.BytesToHash(sender.Bytes()).Bytes()...), + common.BigToHash(fee).Bytes()...), + requestID.Bytes()...), + Topics: []common.Hash{{}, jobID}, + }, + } +) + +func TestVRFParseRandomnessRequestLog(t *testing.T) { + r := solidity_cross_tests.RawRandomnessRequestLogToRandomnessRequestLog(&raw) + rawLog, err := r.RawData() + require.NoError(t, err) + assert.Equal(t, rawLog, raw.Raw.Data) + nR, err := solidity_cross_tests.ParseRandomnessRequestLog(types.Log{ + Data: rawLog, + Topics: []common.Hash{solidity_cross_tests.VRFRandomnessRequestLogTopic(), jobID}, + }) + require.NoError(t, err) + require.True(t, r.Equal(*nR), + "Round-tripping RandomnessRequestLog through serialization and parsing "+ + "resulted in a different log.") +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_coordinator_solidity_crosscheck_test.go b/core/services/vrf/solidity_cross_tests/vrf_coordinator_solidity_crosscheck_test.go new file mode 100644 index 00000000..4647c2fb --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_coordinator_solidity_crosscheck_test.go @@ -0,0 +1,259 @@ +package solidity_cross_tests_test + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/solidity_cross_tests" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" +) + +const defaultGasLimit uint32 = 500000 + +func TestRequestIDMatches(t *testing.T) { + keyHash := common.HexToHash("0x01") + key := cltest.MustGenerateRandomKey(t) + baseContract := vrftesthelpers.NewVRFCoordinatorUniverse(t, key).RequestIDBase + var seed = big.NewInt(1) + solidityRequestID, err := baseContract.MakeRequestId(nil, keyHash, seed) + require.NoError(t, err, "failed to calculate VRF requestID on simulated ethereum blockchain") + goRequestLog := &solidity_cross_tests.RandomnessRequestLog{KeyHash: keyHash, Seed: seed} + assert.Equal(t, common.Hash(solidityRequestID), goRequestLog.ComputedRequestID(), + "solidity VRF requestID differs from golang requestID!") +} + +var ( + rawSecretKey = big.NewInt(1) // never do this in production! + secretKey = vrfkey.MustNewV2XXXTestingOnly(rawSecretKey) + publicKey = (&secp256k1.Secp256k1{}).Point().Mul(secp256k1.IntToScalar( + rawSecretKey), nil) + hardcodedSeed = big.NewInt(0) + vrfFee = big.NewInt(7) +) + +// registerProvingKey registers keyHash to neil in the VRFCoordinator universe +// represented by coordinator, with the given jobID and fee. +func registerProvingKey(t *testing.T, coordinator vrftesthelpers.CoordinatorUniverse) ( + keyHash [32]byte, jobID [32]byte, fee *big.Int) { + copy(jobID[:], []byte("exactly 32 characters in length.")) + _, err := coordinator.RootContract.RegisterProvingKey( + coordinator.Neil, vrfFee, coordinator.Neil.From, pair(secp256k1.Coordinates(publicKey)), jobID) + require.NoError(t, err, "failed to register VRF proving key on VRFCoordinator contract") + coordinator.Backend.Commit() + keyHash = utils.MustHash(string(secp256k1.LongMarshal(publicKey))) + return keyHash, jobID, vrfFee +} + +func TestRegisterProvingKey(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coord := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash, jobID, fee := registerProvingKey(t, coord) + log, err := coord.RootContract.FilterNewServiceAgreement(nil) + require.NoError(t, err, "failed to subscribe to NewServiceAgreement logs on simulated ethereum blockchain") + logCount := 0 + for log.Next() { + logCount++ + assert.Equal(t, log.Event.KeyHash, keyHash, "VRFCoordinator logged a different keyHash than was registered") + assert.True(t, fee.Cmp(log.Event.Fee) == 0, "VRFCoordinator logged a different fee than was registered") + } + require.Equal(t, 1, logCount, "unexpected NewServiceAgreement log generated by key VRF key registration") + serviceAgreement, err := coord.RootContract.ServiceAgreements(nil, keyHash) + require.NoError(t, err, "failed to retrieve previously registered VRF service agreement from VRFCoordinator") + assert.Equal(t, coord.Neil.From, serviceAgreement.VRFOracle, + "VRFCoordinator registered wrong provider, on service agreement!") + assert.Equal(t, jobID, serviceAgreement.JobID, + "VRFCoordinator registered wrong jobID, on service agreement!") + assert.True(t, fee.Cmp(serviceAgreement.Fee) == 0, + "VRFCoordinator registered wrong fee, on service agreement!") +} + +func TestFailToRegisterProvingKeyFromANonOwnerAddress(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + + var jobID [32]byte + copy(jobID[:], []byte("exactly 32 characters in length.")) + _, err := coordinator.RootContract.RegisterProvingKey( + coordinator.Ned, vrfFee, coordinator.Neil.From, pair(secp256k1.Coordinates(publicKey)), jobID) + + require.Error(t, err, "expected an error") + require.Contains(t, err.Error(), "Ownable: caller is not the owner") +} + +// requestRandomness sends a randomness request via Carol's consuming contract, +// in the VRFCoordinator universe represented by coordinator, specifying the +// given keyHash and seed, and paying the given fee. It returns the log emitted +// from the VRFCoordinator in response to the request +func requestRandomness(t *testing.T, coordinator vrftesthelpers.CoordinatorUniverse, + keyHash common.Hash, fee *big.Int) *solidity_cross_tests.RandomnessRequestLog { + _, err := coordinator.ConsumerContract.TestRequestRandomness(coordinator.Carol, + keyHash, fee) + require.NoError(t, err, "problem during initial VRF randomness request") + coordinator.Backend.Commit() + log, err := coordinator.RootContract.FilterRandomnessRequest(nil, nil) + require.NoError(t, err, "failed to subscribe to RandomnessRequest logs") + logCount := 0 + for log.Next() { + logCount++ + } + require.Equal(t, 1, logCount, "unexpected log generated by randomness request to VRFCoordinator") + return solidity_cross_tests.RawRandomnessRequestLogToRandomnessRequestLog( + (*solidity_cross_tests.RawRandomnessRequestLog)(log.Event)) +} + +func requestRandomnessV08(t *testing.T, coordinator vrftesthelpers.CoordinatorUniverse, + keyHash common.Hash, fee *big.Int) *solidity_cross_tests.RandomnessRequestLog { + _, err := coordinator.ConsumerContractV08.DoRequestRandomness(coordinator.Carol, + keyHash, fee) + require.NoError(t, err, "problem during initial VRF randomness request") + coordinator.Backend.Commit() + log, err := coordinator.RootContract.FilterRandomnessRequest(nil, nil) + require.NoError(t, err, "failed to subscribe to RandomnessRequest logs") + logCount := 0 + for log.Next() { + if log.Event.Sender == coordinator.ConsumerContractAddressV08 { + logCount++ + } + } + require.Equal(t, 1, logCount, "unexpected log generated by randomness request to VRFCoordinator") + return solidity_cross_tests.RawRandomnessRequestLogToRandomnessRequestLog( + (*solidity_cross_tests.RawRandomnessRequestLog)(log.Event)) +} + +func TestRandomnessRequestLog(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coord := vrftesthelpers.NewVRFCoordinatorUniverseWithV08Consumer(t, key) + keyHash_, jobID_, fee := registerProvingKey(t, coord) + keyHash := common.BytesToHash(keyHash_[:]) + jobID := common.BytesToHash(jobID_[:]) + var tt = []struct { + rr func(t *testing.T, coordinator vrftesthelpers.CoordinatorUniverse, + keyHash common.Hash, fee *big.Int) *solidity_cross_tests.RandomnessRequestLog + ms func() (*big.Int, error) + consumerAddress common.Address + }{ + { + rr: requestRandomness, + ms: func() (*big.Int, error) { + return coord.RequestIDBase.MakeVRFInputSeed(nil, keyHash, hardcodedSeed, coord.ConsumerContractAddress, big.NewInt(0)) + }, + consumerAddress: coord.ConsumerContractAddress, + }, + { + rr: requestRandomnessV08, + ms: func() (*big.Int, error) { + return coord.RequestIDBaseV08.MakeVRFInputSeed(nil, keyHash, hardcodedSeed, coord.ConsumerContractAddressV08, big.NewInt(0)) + }, + consumerAddress: coord.ConsumerContractAddressV08, + }, + } + for _, tc := range tt { + log := tc.rr(t, coord, keyHash, fee) + assert.Equal(t, keyHash, log.KeyHash, "VRFCoordinator logged wrong KeyHash for randomness request") + nonce := big.NewInt(0) + actualSeed, err := tc.ms() + require.NoError(t, err, "failure while using VRFCoordinator to calculate actual VRF input seed") + assert.True(t, actualSeed.Cmp(log.Seed) == 0, + "VRFCoordinator logged wrong actual input seed from randomness request") + golangSeed := utils.MustHash(string(append(append(append( + keyHash[:], + common.BigToHash(hardcodedSeed).Bytes()...), + common.BytesToHash(tc.consumerAddress.Bytes()).Bytes()...), + common.BigToHash(nonce).Bytes()...))) + assert.Equal(t, golangSeed, common.BigToHash((log.Seed)), "VRFCoordinator logged different actual input seed than expected by golang code!") + assert.Equal(t, jobID, log.JobID, "VRFCoordinator logged different JobID from randomness request!") + assert.Equal(t, tc.consumerAddress, log.Sender, "VRFCoordinator logged different requester address from randomness request!") + assert.True(t, fee.Cmp((*big.Int)(log.Fee)) == 0, "VRFCoordinator logged different fee from randomness request!") + parsedLog, err := solidity_cross_tests.ParseRandomnessRequestLog(log.Raw.Raw) + assert.NoError(t, err, "could not parse randomness request log generated by VRFCoordinator") + assert.True(t, parsedLog.Equal(*log), "got a different randomness request log by parsing the raw data than reported by simulated backend") + } +} + +// fulfillRandomnessRequest is neil fulfilling randomness requested by log. +func fulfillRandomnessRequest(t *testing.T, coordinator vrftesthelpers.CoordinatorUniverse, log solidity_cross_tests.RandomnessRequestLog) vrfkey.Proof { + preSeed, err := proof2.BigToSeed(log.Seed) + require.NoError(t, err, "pre-seed %x out of range", preSeed) + s := proof2.PreSeedData{ + PreSeed: preSeed, + BlockHash: log.Raw.Raw.BlockHash, + BlockNum: log.Raw.Raw.BlockNumber, + } + seed := proof2.FinalSeed(s) + proof, err := secretKey.GenerateProofWithNonce(seed, big.NewInt(1) /* nonce */) + require.NoError(t, err) + proofBlob, err := vrftesthelpers.GenerateProofResponseFromProof(proof, s) + require.NoError(t, err, "could not generate VRF proof!") + // Seems to be a bug in the simulated backend: without this extra Commit, the + // EVM seems to think it's still on the block in which the request was made, + // which means that the relevant blockhash is unavailable. + coordinator.Backend.Commit() + // This is simulating a node response, so set the gas limit as plugin does + var neil bind.TransactOpts = *coordinator.Neil + neil.GasLimit = uint64(defaultGasLimit) + _, err = coordinator.RootContract.FulfillRandomnessRequest(&neil, proofBlob[:]) + require.NoError(t, err, "failed to fulfill randomness request!") + coordinator.Backend.Commit() + return proof +} + +func TestFulfillRandomness(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash, _, fee := registerProvingKey(t, coordinator) + randomnessRequestLog := requestRandomness(t, coordinator, keyHash, fee) + proof := fulfillRandomnessRequest(t, coordinator, *randomnessRequestLog) + output, err := coordinator.ConsumerContract.RandomnessOutput(nil) + require.NoError(t, err, "failed to get VRF output from consuming contract, "+ + "after randomness request was fulfilled") + assert.True(t, proof.Output.Cmp(output) == 0, "VRF output from randomness "+ + "request fulfillment was different than provided! Expected %d, got %d. "+ + "This can happen if you update the VRFCoordinator wrapper without a "+ + "corresponding update to the VRFConsumer", proof.Output, output) + requestID, err := coordinator.ConsumerContract.RequestId(nil) + require.NoError(t, err, "failed to get requestId from VRFConsumer") + assert.Equal(t, randomnessRequestLog.RequestID, common.Hash(requestID), + "VRFConsumer has different request ID than logged from randomness request!") + neilBalance, err := coordinator.RootContract.WithdrawableTokens( + nil, coordinator.Neil.From) + require.NoError(t, err, "failed to get neil's token balance, after he "+ + "successfully fulfilled a randomness request") + assert.True(t, neilBalance.Cmp(fee) == 0, "neil's balance on VRFCoordinator "+ + "was not paid his fee, despite successful fulfillment of randomness request!") +} + +func TestWithdraw(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash, _, fee := registerProvingKey(t, coordinator) + log := requestRandomness(t, coordinator, keyHash, fee) + fulfillRandomnessRequest(t, coordinator, *log) + payment := big.NewInt(4) + peteThePunter := common.HexToAddress("0xdeadfa11deadfa11deadfa11deadfa11deadfa11") + _, err := coordinator.RootContract.Withdraw(coordinator.Neil, peteThePunter, payment) + require.NoError(t, err, "failed to withdraw PLI from neil's balance") + coordinator.Backend.Commit() + peteBalance, err := coordinator.LinkContract.BalanceOf(nil, peteThePunter) + require.NoError(t, err, "failed to get balance of payee on PLI contract, after payment") + assert.True(t, payment.Cmp(peteBalance) == 0, + "PLI balance is wrong, following payment") + neilBalance, err := coordinator.RootContract.WithdrawableTokens( + nil, coordinator.Neil.From) + require.NoError(t, err, "failed to get neil's balance on VRFCoordinator") + assert.True(t, big.NewInt(0).Sub(fee, payment).Cmp(neilBalance) == 0, + "neil's VRFCoordinator balance is wrong, after he's made a withdrawal!") + _, err = coordinator.RootContract.Withdraw(coordinator.Neil, peteThePunter, fee) + assert.Error(t, err, "VRFcoordinator allowed overdraft") +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_fulfillment_cost_test.go b/core/services/vrf/solidity_cross_tests/vrf_fulfillment_cost_test.go new file mode 100644 index 00000000..29595f78 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_fulfillment_cost_test.go @@ -0,0 +1,48 @@ +package solidity_cross_tests_test + +import ( + "math/big" + "testing" + + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" +) + +// TestMeasureFulfillmentGasCost establishes rough bounds on the cost of +// providing a proof to the VRF coordinator. +func TestMeasureFulfillmentGasCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash, _, fee := registerProvingKey(t, coordinator) + // Set up a request to fulfill + log := requestRandomness(t, coordinator, keyHash, fee) + preSeed, err := proof2.BigToSeed(log.Seed) + require.NoError(t, err, "pre-seed %x out of range", preSeed) + s := proof2.PreSeedData{ + PreSeed: preSeed, + BlockHash: log.Raw.Raw.BlockHash, + BlockNum: log.Raw.Raw.BlockNumber, + } + seed := proof2.FinalSeed(s) + proof, err := secretKey.GenerateProofWithNonce(seed, big.NewInt(1) /* nonce */) + require.NoError(t, err) + proofBlob, err := vrftesthelpers.GenerateProofResponseFromProof(proof, s) + require.NoError(t, err, "could not generate VRF proof!") + coordinator.Backend.Commit() // Work around simbackend/EVM block number bug + estimate := estimateGas(t, coordinator.Backend, coordinator.Neil.From, + coordinator.RootContractAddress, coordinator.CoordinatorABI, + "fulfillRandomnessRequest", proofBlob[:]) + + assert.Greater(t, estimate, uint64(108000), + "fulfillRandomness tx cost less gas than expected") + t.Log("estimate", estimate) + // Note that this is probably a very loose upper bound on gas usage. + // TODO:https://www.pivotaltracker.com/story/show/175040572 + assert.Less(t, estimate, uint64(500000), + "fulfillRandomness tx cost more gas than expected") +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_hash_to_curve_cost_test.go b/core/services/vrf/solidity_cross_tests/vrf_hash_to_curve_cost_test.go new file mode 100644 index 00000000..1c5f3220 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_hash_to_curve_cost_test.go @@ -0,0 +1,106 @@ +package solidity_cross_tests_test + +import ( + "crypto/ecdsa" + "math/big" + "strings" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_verifier_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + + "github.com/ethereum/go-ethereum/eth/ethconfig" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type contract struct { + contract *bind.BoundContract + address common.Address + abi *abi.ABI + backend *backends.SimulatedBackend +} + +// deployVRFContract returns a deployed VRF contract, with some extra attributes +// which are useful for gas measurements. +func deployVRFContract(t *testing.T) (contract, common.Address) { + x, y := secp256k1.Coordinates(vrfkey.Generator) + key := ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{Curve: crypto.S256(), X: x, Y: y}, + D: big.NewInt(1), + } + auth, _ := bind.NewKeyedTransactorWithChainID(&key, testutils.SimulatedChainID) + genesisData := core.GenesisAlloc{auth.From: {Balance: assets.Ether(100).ToInt()}} + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + parsed, err := abi.JSON(strings.NewReader( + solidity_vrf_verifier_wrapper.VRFTestHelperABI)) + require.NoError(t, err, "could not parse VRF ABI") + address, _, vRFContract, err := bind.DeployContract(auth, parsed, + common.FromHex(solidity_vrf_verifier_wrapper.VRFTestHelperBin), backend) + require.NoError(t, err, "failed to deploy VRF contract to simulated blockchain") + backend.Commit() + return contract{vRFContract, address, &parsed, backend}, crypto.PubkeyToAddress( + key.PublicKey) +} + +// estimateGas returns the estimated gas cost of running the given method on the +// contract at address to, on the given backend, with the given args, and given +// that the transaction is sent from the from address. +func estimateGas(t *testing.T, backend *backends.SimulatedBackend, + from, to common.Address, abi *abi.ABI, method string, args ...interface{}, +) uint64 { + rawData, err := abi.Pack(method, args...) + require.NoError(t, err, "failed to construct raw %s transaction with args %s", + method, args) + callMsg := ethereum.CallMsg{From: from, To: &to, Data: rawData} + estimate, err := backend.EstimateGas(testutils.Context(t), callMsg) + require.NoError(t, err, "failed to estimate gas from %s call with args %s", + method, args) + return estimate +} + +func measureHashToCurveGasCost(t *testing.T, contract contract, + owner common.Address, input int64) (gasCost, numOrdinates uint64) { + estimate := estimateGas(t, contract.backend, owner, contract.address, + contract.abi, "hashToCurve_", pair(secp256k1.Coordinates(vrfkey.Generator)), + big.NewInt(input)) + + _, err := vrfkey.HashToCurve(vrfkey.Generator, big.NewInt(input), + func(*big.Int) { numOrdinates++ }) + require.NoError(t, err, "corresponding golang HashToCurve calculation failed") + return estimate, numOrdinates +} + +var baseCost uint64 = 25000 +var marginalCost uint64 = 15555 + +func HashToCurveGasCostBound(numOrdinates uint64) uint64 { + return baseCost + marginalCost*numOrdinates +} + +func TestMeasureHashToCurveGasCost(t *testing.T) { + contract, owner := deployVRFContract(t) + numSamples := int64(numSamples()) + for i := int64(0); i < numSamples; i++ { + gasCost, numOrdinates := measureHashToCurveGasCost(t, contract, owner, i) + assert.Less(t, gasCost, HashToCurveGasCostBound(numOrdinates), + "on-chain hashToCurve gas cost exceeded estimate function") + } + require.Less(t, HashToCurveGasCostBound(128), uint64(2.017e6), + "estimate for on-chain hashToCurve gas cost with 128 iterations is greater "+ + "than stated in the VRF.sol documentation") +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_randomness_output_cost_test.go b/core/services/vrf/solidity_cross_tests/vrf_randomness_output_cost_test.go new file mode 100644 index 00000000..c9487a59 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_randomness_output_cost_test.go @@ -0,0 +1,34 @@ +package solidity_cross_tests_test + +import ( + mrand "math/rand" + "testing" + + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +func TestMeasureRandomValueFromVRFProofGasCost(t *testing.T) { + r := mrand.New(mrand.NewSource(10)) + sk := randomScalar(t, r) + skNum := secp256k1.ToInt(sk) + pk := vrfkey.MustNewV2XXXTestingOnly(skNum) + nonce := randomScalar(t, r) + randomSeed := randomUint256(t, r) + proof, err := pk.GenerateProofWithNonce(randomSeed, secp256k1.ToInt(nonce)) + require.NoError(t, err, "failed to generate VRF proof") + mproof, err := proof2.MarshalForSolidityVerifier(&proof) + require.NoError(t, err, "failed to marshal VRF proof for on-chain verification") + contract, _ := deployVRFContract(t) + + estimate := estimateGas(t, contract.backend, common.Address{}, + contract.address, contract.abi, "randomValueFromVRFProof_", mproof[:]) + + require.NoError(t, err, "failed to estimate gas cost for VRF verification") + require.Less(t, estimate, uint64(100000)) +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_request_cost_test.go b/core/services/vrf/solidity_cross_tests/vrf_request_cost_test.go new file mode 100644 index 00000000..308f205c --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_request_cost_test.go @@ -0,0 +1,27 @@ +package solidity_cross_tests_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" +) + +func TestMeasureRandomnessRequestGasCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + coordinator := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + keyHash_, _, fee := registerProvingKey(t, coordinator) + + estimate := estimateGas(t, coordinator.Backend, common.Address{}, + coordinator.ConsumerContractAddress, coordinator.ConsumerABI, + "testRequestRandomness", common.BytesToHash(keyHash_[:]), fee) + + assert.Greater(t, estimate, uint64(134000), + "requestRandomness tx gas cost lower than expected") + // Note: changed from 160000 to 164079 in the Berlin hard fork (Geth 1.10) + assert.Less(t, estimate, uint64(164080), + "requestRandomness tx gas cost higher than expected") +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_solidity_crosscheck_test.go b/core/services/vrf/solidity_cross_tests/vrf_solidity_crosscheck_test.go new file mode 100644 index 00000000..9e18f7d5 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_solidity_crosscheck_test.go @@ -0,0 +1,396 @@ +package solidity_cross_tests_test + +import ( + "crypto/ecdsa" + "math/big" + mrand "math/rand" + "strings" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_verifier_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + + "github.com/ethereum/go-ethereum/eth/ethconfig" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.dedis.ch/kyber/v3" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +// Cross-checks of golang implementation details vs corresponding solidity +// details. +// +// It's worth automatically checking these implementation details because they +// can help to quickly locate any disparity between the solidity and golang +// implementations. + +// deployVRFContract returns the wrapper of the EVM verifier contract. +// +// NB: For changes to the VRF solidity code to be reflected here, "go generate" +// must be run in core/services/vrf. +// +// TODO(alx): This suit used to be much faster, presumably because all tests +// were sharing a common global verifier (which is fine, because all methods are +// pure.) Revert to that, and see if it helps. +func deployVRFTestHelper(t *testing.T) *solidity_vrf_verifier_wrapper.VRFTestHelper { + auth := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{auth.From: {Balance: assets.Ether(100).ToInt()}} + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + _, _, verifier, err := solidity_vrf_verifier_wrapper.DeployVRFTestHelper(auth, backend) + require.NoError(t, err, "failed to deploy VRF contract to simulated blockchain") + backend.Commit() + return verifier +} + +// randomUint256 deterministically simulates a uniform sample of uint256's, +// given r's seed +// +// Never use this if cryptographic security is required +func randomUint256(t *testing.T, r *mrand.Rand) *big.Int { + b := make([]byte, 32) + _, err := r.Read(b) + require.NoError(t, err, "failed to read random sample") // deterministic, though + return big.NewInt(0).SetBytes(b) +} + +// numSamples returns the number of examples which should be checked, in +// generative tests +func numSamples() int { + return 10 +} + +func TestVRF_CompareProjectiveECAddToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(11)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + q := randomPoint(t, r) + px, py := secp256k1.Coordinates(p) + qx, qy := secp256k1.Coordinates(q) + actualX, actualY, actualZ := vrfkey.ProjectiveECAdd(p, q) + verifier := deployVRFTestHelper(t) + expectedX, expectedY, expectedZ, err := verifier.ProjectiveECAdd( + nil, px, py, qx, qy) + require.NoError(t, err, "failed to compute secp256k1 sum in projective coords") + assert.Equal(t, [3]*big.Int{expectedX, expectedY, expectedZ}, + [3]*big.Int{actualX, actualY, actualZ}, + "got different answers on-chain vs off-chain, for ProjectiveECAdd") + } +} + +func TestVRF_CompareBigModExpToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(0)) + for j := 0; j < numSamples(); j++ { + base := randomUint256(t, r) + exponent := randomUint256(t, r) + actual, err := deployVRFTestHelper(t).BigModExp(nil, base, exponent) + require.NoError(t, err, "while computing bigmodexp on-chain") + expected := big.NewInt(0).Exp(base, exponent, vrfkey.FieldSize) + assert.Equal(t, expected, actual, + "%x ** %x %% %x = %x ≠ %x from solidity calculation", + base, exponent, vrfkey.FieldSize, expected, actual) + } +} + +func TestVRF_CompareSquareRoot(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(1)) + for j := 0; j < numSamples(); j++ { + maybeSquare := randomUint256(t, r) // Might not be square; should get same result anyway + squareRoot, err := deployVRFTestHelper(t).SquareRoot(nil, maybeSquare) + require.NoError(t, err, "failed to compute square root on-chain") + golangSquareRoot := vrfkey.SquareRoot(maybeSquare) + assert.Equal(t, golangSquareRoot, squareRoot, + "expected square root in GF(fieldSize) of %x to be %x, got %x on-chain", + maybeSquare, golangSquareRoot, squareRoot) + assert.True(t, + (!vrfkey.IsSquare(maybeSquare)) || big.NewInt(1).Exp(squareRoot, + big.NewInt(2), vrfkey.FieldSize).Cmp(maybeSquare) == 0, + "maybeSquare is a square, but failed to calculate its square root!") + assert.NotEqual(t, vrfkey.IsSquare(maybeSquare), vrfkey.IsSquare( + big.NewInt(1).Sub(vrfkey.FieldSize, maybeSquare)), + "negative of a non square should be square, and vice-versa, since -1 is not a square") + } +} + +func TestVRF_CompareYSquared(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(2)) + for i := 0; i < numSamples(); i++ { + x := randomUint256(t, r) + actual, err := deployVRFTestHelper(t).YSquared(nil, x) + require.NoError(t, err, "failed to compute y² given x, on-chain") + assert.Equal(t, vrfkey.YSquared(x), actual, + "different answers for y², on-chain vs off-chain") + } +} + +func TestVRF_CompareFieldHash(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(3)) + msg := make([]byte, 32) + for j := 0; j < numSamples(); j++ { + _, err := r.Read(msg) + require.NoError(t, err, "failed to randomize intended hash message") + actual, err := deployVRFTestHelper(t).FieldHash(nil, msg) + require.NoError(t, err, "failed to compute fieldHash on-chain") + expected := vrfkey.FieldHash(msg) + require.Equal(t, expected, actual, + "fieldHash value on-chain differs from off-chain") + } +} + +// randomKey deterministically generates a secp256k1 key. +// +// Never use this if cryptographic security is required +func randomKey(t *testing.T, r *mrand.Rand) *ecdsa.PrivateKey { + secretKey := vrfkey.FieldSize + for secretKey.Cmp(vrfkey.FieldSize) >= 0 { // Keep picking until secretKey < fieldSize + secretKey = randomUint256(t, r) + } + cKey := crypto.ToECDSAUnsafe(secretKey.Bytes()) + return cKey +} + +// pair returns the inputs as a length-2 big.Int array. Useful for translating +// coordinates to the uint256[2]'s VRF.sol uses to represent secp256k1 points. +func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } +func asPair(p kyber.Point) [2]*big.Int { return pair(secp256k1.Coordinates(p)) } + +func TestVRF_CompareHashToCurve(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(4)) + for i := 0; i < numSamples(); i++ { + input := randomUint256(t, r) + cKey := randomKey(t, r) + pubKeyCoords := pair(cKey.X, cKey.Y) + actual, err := deployVRFTestHelper(t).HashToCurve(nil, pubKeyCoords, input) + require.NoError(t, err, "failed to compute hashToCurve on-chain") + pubKeyPoint := secp256k1.SetCoordinates(cKey.X, cKey.Y) + expected, err := vrfkey.HashToCurve(pubKeyPoint, input, func(*big.Int) {}) + require.NoError(t, err, "failed to compute HashToCurve in golang") + require.Equal(t, asPair(expected), actual, + "on-chain and off-chain calculations of HashToCurve gave different secp256k1 points") + } +} + +// randomPoint deterministically simulates a uniform sample of secp256k1 points, +// given r's seed +// +// Never use this if cryptographic security is required +func randomPoint(t *testing.T, r *mrand.Rand) kyber.Point { + p, err := vrfkey.HashToCurve(vrfkey.Generator, randomUint256(t, r), func(*big.Int) {}) + require.NoError(t, err, + "failed to hash random value to secp256k1 while generating random point") + if r.Int63n(2) == 1 { // Uniform sample of ±p + p.Neg(p) + } + return p +} + +// randomPointWithPair returns a random secp256k1, both as a kyber.Point and as +// a pair of *big.Int's. Useful for translating between the types needed by the +// golang contract wrappers. +func randomPointWithPair(t *testing.T, r *mrand.Rand) (kyber.Point, [2]*big.Int) { + p := randomPoint(t, r) + return p, asPair(p) +} + +// randomScalar deterministically simulates a uniform sample of secp256k1 +// scalars, given r's seed +// +// Never use this if cryptographic security is required +func randomScalar(t *testing.T, r *mrand.Rand) kyber.Scalar { + s := randomUint256(t, r) + for s.Cmp(secp256k1.GroupOrder) >= 0 { + s = randomUint256(t, r) + } + return secp256k1.IntToScalar(s) +} + +func TestVRF_CheckSolidityPointAddition(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(5)) + for j := 0; j < numSamples(); j++ { + p1 := randomPoint(t, r) + p2 := randomPoint(t, r) + p1x, p1y := secp256k1.Coordinates(p1) + p2x, p2y := secp256k1.Coordinates(p2) + psx, psy, psz, err := deployVRFTestHelper(t).ProjectiveECAdd( + nil, p1x, p1y, p2x, p2y) + require.NoError(t, err, "failed to compute ProjectiveECAdd, on-chain") + apx, apy, apz := vrfkey.ProjectiveECAdd(p1, p2) + require.Equal(t, []*big.Int{apx, apy, apz}, []*big.Int{psx, psy, psz}, + "got different values on-chain and off-chain for ProjectiveECAdd") + zInv := big.NewInt(1).ModInverse(psz, vrfkey.FieldSize) + require.Equal(t, big.NewInt(1).Mod(big.NewInt(1).Mul(psz, zInv), + vrfkey.FieldSize), big.NewInt(1), + "failed to calculate correct inverse of z ordinate") + actualSum, err := deployVRFTestHelper(t).AffineECAdd( + nil, pair(p1x, p1y), pair(p2x, p2y), zInv) + require.NoError(t, err, + "failed to deploy VRF contract to simulated blockchain") + assert.Equal(t, asPair((&secp256k1.Secp256k1{}).Point().Add(p1, p2)), + actualSum, + "got different answers, on-chain vs off-chain, for secp256k1 sum in affine coordinates") + } +} + +func TestVRF_CheckSolidityECMulVerify(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(6)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + pxy := pair(secp256k1.Coordinates(p)) + s := randomScalar(t, r) + product := asPair((&secp256k1.Secp256k1{}).Point().Mul(s, p)) + actual, err := deployVRFTestHelper(t).EcmulVerify(nil, pxy, secp256k1.ToInt(s), + product) + require.NoError(t, err, "failed to check on-chain that s*p=product") + assert.True(t, actual, + "EcmulVerify rejected a valid secp256k1 scalar product relation") + shouldReject, err := deployVRFTestHelper(t).EcmulVerify(nil, pxy, + big.NewInt(0).Add(secp256k1.ToInt(s), big.NewInt(1)), product) + require.NoError(t, err, "failed to check on-chain that (s+1)*p≠product") + assert.False(t, shouldReject, + "failed to reject a false secp256k1 scalar product relation") + } +} + +func TestVRF_CheckSolidityVerifyLinearCombinationWithGenerator(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(7)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + s := randomScalar(t, r) + p := randomPoint(t, r) + expectedPoint := (&secp256k1.Secp256k1{}).Point().Add( + (&secp256k1.Secp256k1{}).Point().Mul(c, p), + (&secp256k1.Secp256k1{}).Point().Mul(s, vrfkey.Generator)) // cp+sg + expectedAddress := secp256k1.EthereumAddress(expectedPoint) + pPair := asPair(p) + actual, err := deployVRFTestHelper(t).VerifyLinearCombinationWithGenerator(nil, + secp256k1.ToInt(c), pPair, secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that secp256k1 linear relationship holds") + assert.True(t, actual, + "VerifyLinearCombinationWithGenerator rejected a valid secp256k1 linear relationship") + shouldReject, err := deployVRFTestHelper(t).VerifyLinearCombinationWithGenerator(nil, + big.NewInt(0).Add(secp256k1.ToInt(c), big.NewInt(1)), pPair, + secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that address((c+1)*p+s*g)≠expectedAddress") + assert.False(t, shouldReject, + "VerifyLinearCombinationWithGenerator accepted an invalid secp256k1 linear relationship!") + } +} + +func TestVRF_CheckSolidityLinearComination(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(8)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + cNum := secp256k1.ToInt(c) + p1, p1Pair := randomPointWithPair(t, r) + s := randomScalar(t, r) + sNum := secp256k1.ToInt(s) + p2, p2Pair := randomPointWithPair(t, r) + cp1 := (&secp256k1.Secp256k1{}).Point().Mul(c, p1) + cp1Pair := asPair(cp1) + sp2 := (&secp256k1.Secp256k1{}).Point().Mul(s, p2) + sp2Pair := asPair(sp2) + expected := asPair((&secp256k1.Secp256k1{}).Point().Add(cp1, sp2)) + _, _, z := vrfkey.ProjectiveECAdd(cp1, sp2) + zInv := big.NewInt(0).ModInverse(z, vrfkey.FieldSize) + actual, err := deployVRFTestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, sNum, p2Pair, sp2Pair, zInv) + require.NoError(t, err, "failed to compute c*p1+s*p2, on-chain") + assert.Equal(t, expected, actual, + "on-chain computation of c*p1+s*p2 gave wrong answer") + _, err = deployVRFTestHelper(t).LinearCombination(nil, big.NewInt(0).Add( + cNum, big.NewInt(1)), p1Pair, cp1Pair, sNum, p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((c+1)*p1)") + assert.Contains(t, err.Error(), "First multiplication check failed", + "revert message wrong.") + _, err = deployVRFTestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, big.NewInt(0).Add(sNum, big.NewInt(1)), p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((s+1)*p2)") + assert.Contains(t, err.Error(), "Second multiplication check failed", + "revert message wrong.") + } +} + +func TestVRF_CompareSolidityScalarFromCurvePoints(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(9)) + for j := 0; j < numSamples(); j++ { + hash, hashPair := randomPointWithPair(t, r) + pk, pkPair := randomPointWithPair(t, r) + gamma, gammaPair := randomPointWithPair(t, r) + var uWitness [20]byte + require.NoError(t, utils.JustError(r.Read(uWitness[:])), + "failed to randomize uWitness") + v, vPair := randomPointWithPair(t, r) + expected := vrfkey.ScalarFromCurvePoints(hash, pk, gamma, uWitness, v) + actual, err := deployVRFTestHelper(t).ScalarFromCurvePoints(nil, hashPair, pkPair, + gammaPair, uWitness, vPair) + require.NoError(t, err, "on-chain ScalarFromCurvePoints calculation failed") + assert.Equal(t, expected, actual, + "on-chain ScalarFromCurvePoints output does not match off-chain output!") + } +} + +func TestVRF_MarshalProof(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(10)) + for j := 0; j < numSamples(); j++ { + sk := randomScalar(t, r) + skNum := secp256k1.ToInt(sk) + pk := vrfkey.MustNewV2XXXTestingOnly(skNum) + nonce := randomScalar(t, r) + randomSeed := randomUint256(t, r) + proof, err := pk.GenerateProofWithNonce(randomSeed, secp256k1.ToInt(nonce)) + require.NoError(t, err, "failed to generate VRF proof!") + mproof, err := proof2.MarshalForSolidityVerifier(&proof) + require.NoError(t, err, "failed to marshal VRF proof for on-chain verification") + response, err := deployVRFTestHelper(t).RandomValueFromVRFProof(nil, mproof[:]) + require.NoError(t, err, "failed on-chain to verify VRF proof / get its output") + require.True(t, response.Cmp(proof.Output) == 0, + "on-chain VRF output differs from off-chain!") + corruptionTargetByte := r.Int63n(int64(len(mproof))) + // Only the lower 160 bits of the word containing uWitness have any effect + inAddressZeroBytes := func(b int64) bool { return b >= 224 && b < 236 } + originalByte := mproof[corruptionTargetByte] + mproof[corruptionTargetByte]++ + _, err = deployVRFTestHelper(t).RandomValueFromVRFProof(nil, mproof[:]) + require.True(t, inAddressZeroBytes(corruptionTargetByte) || err != nil, + "VRF verification accepted a bad proof! Changed byte %d from %d to %d in %s, which is of length %d", + corruptionTargetByte, originalByte, mproof[corruptionTargetByte], + mproof.String(), len(mproof)) + require.True(t, + inAddressZeroBytes(corruptionTargetByte) || + strings.Contains(err.Error(), "invZ must be inverse of z") || + strings.Contains(err.Error(), "First multiplication check failed") || + strings.Contains(err.Error(), "Second multiplication check failed") || + strings.Contains(err.Error(), "cGammaWitness is not on curve") || + strings.Contains(err.Error(), "sHashWitness is not on curve") || + strings.Contains(err.Error(), "gamma is not on curve") || + strings.Contains(err.Error(), "addr(c*pk+s*g)≠_uWitness") || + strings.Contains(err.Error(), "public key is not on curve"), + "VRF verification returned an unknown error: %s", err, + ) + } +} diff --git a/core/services/vrf/solidity_cross_tests/vrf_v08_solidity_crosscheck_test.go b/core/services/vrf/solidity_cross_tests/vrf_v08_solidity_crosscheck_test.go new file mode 100644 index 00000000..4640b976 --- /dev/null +++ b/core/services/vrf/solidity_cross_tests/vrf_v08_solidity_crosscheck_test.go @@ -0,0 +1,377 @@ +package solidity_cross_tests_test + +import ( + "math/big" + mrand "math/rand" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_v08_verifier_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + proof2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + + "github.com/ethereum/go-ethereum/eth/ethconfig" + + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +// Note these tests are identical to the ones in vrf_solidity_crosscheck_test.go, +// (with the exception of TestVRFV08_InvalidPointCoordinates which is a new check in v0.8) +// except we are testing against the v0.8 implementation of VRF.sol. +func deployVRFV08TestHelper(t *testing.T) *solidity_vrf_v08_verifier_wrapper.VRFV08TestHelper { + auth := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{auth.From: {Balance: assets.Ether(100).ToInt()}} + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + _, _, verifier, err := solidity_vrf_v08_verifier_wrapper.DeployVRFV08TestHelper(auth, backend) + require.NoError(t, err, "failed to deploy VRF contract to simulated blockchain") + backend.Commit() + return verifier +} + +func TestVRFV08_InvalidPointCoordinates(t *testing.T) { + verifier := deployVRFV08TestHelper(t) + // A value outside [0, ..., FIELD_SIZE-1] should fail + _, err := verifier.IsOnCurve(nil, + [2]*big.Int{big.NewInt(10), secp256k1.FieldSize}) + require.Error(t, err) + assert.Equal(t, err.Error(), "execution reverted: invalid y-ordinate") + _, err = verifier.IsOnCurve(nil, + [2]*big.Int{secp256k1.FieldSize, big.NewInt(10)}) + require.Error(t, err) + assert.Equal(t, err.Error(), "execution reverted: invalid x-ordinate") + // Values inside should succeed + _, err = verifier.IsOnCurve(nil, + [2]*big.Int{big.NewInt(10), big.NewInt(0).Sub(secp256k1.FieldSize, big.NewInt(1))}) + require.NoError(t, err) + _, err = verifier.IsOnCurve(nil, + [2]*big.Int{big.NewInt(0).Sub(secp256k1.FieldSize, big.NewInt(1)), big.NewInt(10)}) + require.NoError(t, err) +} + +func TestVRFV08_CompareProjectiveECAddToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(11)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + q := randomPoint(t, r) + px, py := secp256k1.Coordinates(p) + qx, qy := secp256k1.Coordinates(q) + actualX, actualY, actualZ := vrfkey.ProjectiveECAdd(p, q) + verifier := deployVRFV08TestHelper(t) + expectedX, expectedY, expectedZ, err := verifier.ProjectiveECAdd( + nil, px, py, qx, qy) + require.NoError(t, err, "failed to compute secp256k1 sum in projective coords") + assert.Equal(t, [3]*big.Int{expectedX, expectedY, expectedZ}, + [3]*big.Int{actualX, actualY, actualZ}, + "got different answers on-chain vs off-chain, for ProjectiveECAdd") + } +} + +func TestVRFV08_CompareBigModExpToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(0)) + for j := 0; j < numSamples(); j++ { + base := randomUint256(t, r) + exponent := randomUint256(t, r) + actual, err := deployVRFV08TestHelper(t).BigModExp(nil, base, exponent) + require.NoError(t, err, "while computing bigmodexp on-chain") + expected := big.NewInt(0).Exp(base, exponent, vrfkey.FieldSize) + assert.Equal(t, expected, actual, + "%x ** %x %% %x = %x ≠ %x from solidity calculation", + base, exponent, vrfkey.FieldSize, expected, actual) + } +} + +func TestVRFV08_CompareSquareRoot(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(1)) + for j := 0; j < numSamples(); j++ { + maybeSquare := randomUint256(t, r) // Might not be square; should get same result anyway + squareRoot, err := deployVRFV08TestHelper(t).SquareRoot(nil, maybeSquare) + require.NoError(t, err, "failed to compute square root on-chain") + golangSquareRoot := vrfkey.SquareRoot(maybeSquare) + assert.Equal(t, golangSquareRoot, squareRoot, + "expected square root in GF(fieldSize) of %x to be %x, got %x on-chain", + maybeSquare, golangSquareRoot, squareRoot) + assert.True(t, + (!vrfkey.IsSquare(maybeSquare)) || big.NewInt(1).Exp(squareRoot, + big.NewInt(2), vrfkey.FieldSize).Cmp(maybeSquare) == 0, + "maybeSquare is a square, but failed to calculate its square root!") + assert.NotEqual(t, vrfkey.IsSquare(maybeSquare), vrfkey.IsSquare( + big.NewInt(1).Sub(vrfkey.FieldSize, maybeSquare)), + "negative of a non square should be square, and vice-versa, since -1 is not a square") + } +} + +func TestVRFV08_CompareYSquared(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(2)) + for i := 0; i < numSamples(); i++ { + x := randomUint256(t, r) + actual, err := deployVRFV08TestHelper(t).YSquared(nil, x) + require.NoError(t, err, "failed to compute y² given x, on-chain") + assert.Equal(t, vrfkey.YSquared(x), actual, + "different answers for y², on-chain vs off-chain") + } +} + +func TestVRFV08_CompareFieldHash(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(3)) + msg := make([]byte, 32) + for j := 0; j < numSamples(); j++ { + _, err := r.Read(msg) + require.NoError(t, err, "failed to randomize intended hash message") + actual, err := deployVRFV08TestHelper(t).FieldHash(nil, msg) + require.NoError(t, err, "failed to compute fieldHash on-chain") + expected := vrfkey.FieldHash(msg) + require.Equal(t, expected, actual, + "fieldHash value on-chain differs from off-chain") + } +} + +// randomKey deterministically generates a secp256k1 key. +// +// Never use this if cryptographic security is required +//func randomKey(t *testing.T, r *mrand.Rand) *ecdsa.PrivateKey { +// secretKey := vrfkey.FieldSize +// for secretKey.Cmp(vrfkey.FieldSize) >= 0 { // Keep picking until secretKey < fieldSize +// secretKey = randomUint256(t, r) +// } +// cKey := crypto.ToECDSAUnsafe(secretKey.Bytes()) +// return cKey +//} +// +// pair returns the inputs as a length-2 big.Int array. Useful for translating +// coordinates to the uint256[2]'s VRF.sol uses to represent secp256k1 points. +//func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } +//func asPair(p kyber.Point) [2]*big.Int { return pair(secp256k1.Coordinates(p)) } + +func TestVRFV08_CompareHashToCurve(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(4)) + for i := 0; i < numSamples(); i++ { + input := randomUint256(t, r) + cKey := randomKey(t, r) + pubKeyCoords := pair(cKey.X, cKey.Y) + actual, err := deployVRFV08TestHelper(t).HashToCurve(nil, pubKeyCoords, input) + require.NoError(t, err, "failed to compute hashToCurve on-chain") + pubKeyPoint := secp256k1.SetCoordinates(cKey.X, cKey.Y) + expected, err := vrfkey.HashToCurve(pubKeyPoint, input, func(*big.Int) {}) + require.NoError(t, err, "failed to compute HashToCurve in golang") + require.Equal(t, asPair(expected), actual, + "on-chain and off-chain calculations of HashToCurve gave different secp256k1 points") + } +} + +// randomPoint deterministically simulates a uniform sample of secp256k1 points, +// given r's seed +// +// Never use this if cryptographic security is required +//func randomPoint(t *testing.T, r *mrand.Rand) kyber.Point { +// p, err := vrfkey.HashToCurve(vrfkey.Generator, randomUint256(t, r), func(*big.Int) {}) +// require.NoError(t, err, +// "failed to hash random value to secp256k1 while generating random point") +// if r.Int63n(2) == 1 { // Uniform sample of ±p +// p.Neg(p) +// } +// return p +//} +// +//// randomPointWithPair returns a random secp256k1, both as a kyber.Point and as +//// a pair of *big.Int's. Useful for translating between the types needed by the +//// golang contract wrappers. +//func randomPointWithPair(t *testing.T, r *mrand.Rand) (kyber.Point, [2]*big.Int) { +// p := randomPoint(t, r) +// return p, asPair(p) +//} + +// randomScalar deterministically simulates a uniform sample of secp256k1 +// scalars, given r's seed +// +// Never use this if cryptographic security is required +//func randomScalar(t *testing.T, r *mrand.Rand) kyber.Scalar { +// s := randomUint256(t, r) +// for s.Cmp(secp256k1.GroupOrder) >= 0 { +// s = randomUint256(t, r) +// } +// return secp256k1.IntToScalar(s) +//} + +func TestVRFV08_CheckSolidityPointAddition(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(5)) + for j := 0; j < numSamples(); j++ { + p1 := randomPoint(t, r) + p2 := randomPoint(t, r) + p1x, p1y := secp256k1.Coordinates(p1) + p2x, p2y := secp256k1.Coordinates(p2) + psx, psy, psz, err := deployVRFV08TestHelper(t).ProjectiveECAdd( + nil, p1x, p1y, p2x, p2y) + require.NoError(t, err, "failed to compute ProjectiveECAdd, on-chain") + apx, apy, apz := vrfkey.ProjectiveECAdd(p1, p2) + require.Equal(t, []*big.Int{apx, apy, apz}, []*big.Int{psx, psy, psz}, + "got different values on-chain and off-chain for ProjectiveECAdd") + zInv := big.NewInt(1).ModInverse(psz, vrfkey.FieldSize) + require.Equal(t, big.NewInt(1).Mod(big.NewInt(1).Mul(psz, zInv), + vrfkey.FieldSize), big.NewInt(1), + "failed to calculate correct inverse of z ordinate") + actualSum, err := deployVRFV08TestHelper(t).AffineECAdd( + nil, pair(p1x, p1y), pair(p2x, p2y), zInv) + require.NoError(t, err, + "failed to deploy VRF contract to simulated blockchain") + assert.Equal(t, asPair((&secp256k1.Secp256k1{}).Point().Add(p1, p2)), + actualSum, + "got different answers, on-chain vs off-chain, for secp256k1 sum in affine coordinates") + } +} + +func TestVRFV08_CheckSolidityECMulVerify(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(6)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + pxy := pair(secp256k1.Coordinates(p)) + s := randomScalar(t, r) + product := asPair((&secp256k1.Secp256k1{}).Point().Mul(s, p)) + actual, err := deployVRFV08TestHelper(t).EcmulVerify(nil, pxy, secp256k1.ToInt(s), + product) + require.NoError(t, err, "failed to check on-chain that s*p=product") + assert.True(t, actual, + "EcmulVerify rejected a valid secp256k1 scalar product relation") + shouldReject, err := deployVRFV08TestHelper(t).EcmulVerify(nil, pxy, + big.NewInt(0).Add(secp256k1.ToInt(s), big.NewInt(1)), product) + require.NoError(t, err, "failed to check on-chain that (s+1)*p≠product") + assert.False(t, shouldReject, + "failed to reject a false secp256k1 scalar product relation") + } +} + +func TestVRFV08_CheckSolidityVerifyLinearCombinationWithGenerator(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(7)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + s := randomScalar(t, r) + p := randomPoint(t, r) + expectedPoint := (&secp256k1.Secp256k1{}).Point().Add( + (&secp256k1.Secp256k1{}).Point().Mul(c, p), + (&secp256k1.Secp256k1{}).Point().Mul(s, vrfkey.Generator)) // cp+sg + expectedAddress := secp256k1.EthereumAddress(expectedPoint) + pPair := asPair(p) + actual, err := deployVRFV08TestHelper(t).VerifyLinearCombinationWithGenerator(nil, + secp256k1.ToInt(c), pPair, secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that secp256k1 linear relationship holds") + assert.True(t, actual, + "VerifyLinearCombinationWithGenerator rejected a valid secp256k1 linear relationship") + shouldReject, err := deployVRFV08TestHelper(t).VerifyLinearCombinationWithGenerator(nil, + big.NewInt(0).Add(secp256k1.ToInt(c), big.NewInt(1)), pPair, + secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that address((c+1)*p+s*g)≠expectedAddress") + assert.False(t, shouldReject, + "VerifyLinearCombinationWithGenerator accepted an invalid secp256k1 linear relationship!") + } +} + +func TestVRFV08_CheckSolidityLinearComination(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(8)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + cNum := secp256k1.ToInt(c) + p1, p1Pair := randomPointWithPair(t, r) + s := randomScalar(t, r) + sNum := secp256k1.ToInt(s) + p2, p2Pair := randomPointWithPair(t, r) + cp1 := (&secp256k1.Secp256k1{}).Point().Mul(c, p1) + cp1Pair := asPair(cp1) + sp2 := (&secp256k1.Secp256k1{}).Point().Mul(s, p2) + sp2Pair := asPair(sp2) + expected := asPair((&secp256k1.Secp256k1{}).Point().Add(cp1, sp2)) + _, _, z := vrfkey.ProjectiveECAdd(cp1, sp2) + zInv := big.NewInt(0).ModInverse(z, vrfkey.FieldSize) + actual, err := deployVRFV08TestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, sNum, p2Pair, sp2Pair, zInv) + require.NoError(t, err, "failed to compute c*p1+s*p2, on-chain") + assert.Equal(t, expected, actual, + "on-chain computation of c*p1+s*p2 gave wrong answer") + _, err = deployVRFV08TestHelper(t).LinearCombination(nil, big.NewInt(0).Add( + cNum, big.NewInt(1)), p1Pair, cp1Pair, sNum, p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((c+1)*p1)") + assert.Contains(t, err.Error(), "First mul check failed", + "revert message wrong.") + _, err = deployVRFV08TestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, big.NewInt(0).Add(sNum, big.NewInt(1)), p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((s+1)*p2)") + assert.Contains(t, err.Error(), "Second mul check failed", + "revert message wrong.") + } +} + +func TestVRFV08_CompareSolidityScalarFromCurvePoints(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(9)) + for j := 0; j < numSamples(); j++ { + hash, hashPair := randomPointWithPair(t, r) + pk, pkPair := randomPointWithPair(t, r) + gamma, gammaPair := randomPointWithPair(t, r) + var uWitness [20]byte + require.NoError(t, utils.JustError(r.Read(uWitness[:])), + "failed to randomize uWitness") + v, vPair := randomPointWithPair(t, r) + expected := vrfkey.ScalarFromCurvePoints(hash, pk, gamma, uWitness, v) + actual, err := deployVRFV08TestHelper(t).ScalarFromCurvePoints(nil, hashPair, pkPair, + gammaPair, uWitness, vPair) + require.NoError(t, err, "on-chain ScalarFromCurvePoints calculation failed") + assert.Equal(t, expected, actual, + "on-chain ScalarFromCurvePoints output does not match off-chain output!") + } +} + +func TestVRFV08_MarshalProof(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(10)) + for j := 0; j < numSamples(); j++ { + sk := randomScalar(t, r) + skNum := secp256k1.ToInt(sk) + pk := vrfkey.MustNewV2XXXTestingOnly(skNum) + nonce := randomScalar(t, r) + randomSeed := randomUint256(t, r) + proof, err := pk.GenerateProofWithNonce(randomSeed, secp256k1.ToInt(nonce)) + require.NoError(t, err, "failed to generate VRF proof!") + require.NoError(t, err, "failed to marshal VRF proof for on-chain verification") + seed, err := proof2.BigToSeed(randomSeed) + require.NoError(t, err) + // Don't care about the request commitment for this test. + solProof, _, err := proof2.GenerateProofResponseFromProofV2(proof, proof2.PreSeedDataV2{ + PreSeed: seed, + }) + require.NoError(t, err) + response, err := deployVRFV08TestHelper(t).RandomValueFromVRFProof(nil, solidity_vrf_v08_verifier_wrapper.VRFProof{ + Pk: solProof.Pk, + Gamma: solProof.Gamma, + C: solProof.C, + S: solProof.S, + Seed: solProof.Seed, + UWitness: solProof.UWitness, + CGammaWitness: solProof.CGammaWitness, + SHashWitness: solProof.SHashWitness, + ZInv: solProof.ZInv, + }, randomSeed) + require.NoError(t, err, "failed on-chain to verify VRF proof / get its output") + require.True(t, response.Cmp(proof.Output) == 0, + "on-chain VRF output differs from off-chain!") + } +} diff --git a/core/services/vrf/solidity_ports.go b/core/services/vrf/solidity_ports.go new file mode 100644 index 00000000..16086c02 --- /dev/null +++ b/core/services/vrf/solidity_ports.go @@ -0,0 +1,74 @@ +package vrf + +// This file contains golang re-implementations of functions on the VRF solidity +// contract. They are used to verify correct operation of those functions, and +// also to efficiently compute zInv off-chain, which makes computing the linear +// combination of c*gamma+s*hash onchain much more efficient. + +// ////////////////////////////////////////// + +// FieldSize is number of elements in secp256k1's base field, i.e. GF(FieldSize) +// var FieldSize = utils.HexToBig( +// "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", +// ) + +// var bi = big.NewInt +// var zero, one, two, three, four, seven = bi(0), bi(1), bi(2), bi(3), bi(4), bi(7) + +// Compensate for awkward big.Int API. Can cause an extra allocation or two. +// func i() *big.Int { return new(big.Int) } +// func add(addend1, addend2 *big.Int) *big.Int { return i().Add(addend1, addend2) } +// func div(dividend, divisor *big.Int) *big.Int { return i().Div(dividend, divisor) } +// func equal(left, right *big.Int) bool { return left.Cmp(right) == 0 } +// func exp(base, exponent, modulus *big.Int) *big.Int { return i().Exp(base, exponent, modulus) } +// func mul(multiplicand, multiplier *big.Int) *big.Int { return i().Mul(multiplicand, multiplier) } +// func mod(dividend, divisor *big.Int) *big.Int { return i().Mod(dividend, divisor) } +// func sub(minuend, subtrahend *big.Int) *big.Int { return i().Sub(minuend, subtrahend) } + +// ////////////////////////////////////////// + +// type fieldElt = *big.Int + +// neg(f) is the negation of f in the base field +// func neg(f fieldElt) fieldElt { return sub(FieldSize, f) } + +// projectiveSub(x1, z1, x2, z2) is the projective coordinates of x1/z1 - x2/z2 +// func projectiveSub(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { +// num1 := mul(z2, x1) +// num2 := neg(mul(z1, x2)) +// return mod(add(num1, num2), FieldSize), mod(mul(z1, z2), FieldSize) +// } + +// projectiveMul(x1, z1, x2, z2) is projective coordinates of (x1/z1)×(x2/z2) +// func projectiveMul(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { +// return mul(x1, x2), mul(z1, z2) +// } + +// ProjectiveECAdd(px, py, qx, qy) duplicates the calculation in projective +// coordinates of VRF.sol#projectiveECAdd, so we can reliably get the +// denominator (i.e, z) +// func ProjectiveECAdd(p, q kyber.Point) (x, y, z fieldElt) { +// px, py := secp256k1.Coordinates(p) +// qx, qy := secp256k1.Coordinates(q) +// pz, qz := one, one +// lx := sub(qy, py) +// lz := sub(qx, px) + +// sx, dx := projectiveMul(lx, lz, lx, lz) +// sx, dx = projectiveSub(sx, dx, px, pz) +// sx, dx = projectiveSub(sx, dx, qx, qz) + +// sy, dy := projectiveSub(px, pz, sx, dx) +// sy, dy = projectiveMul(sy, dy, lx, lz) +// sy, dy = projectiveSub(sy, dy, py, pz) + +// var sz fieldElt +// if dx != dy { +// sx = mul(sx, dy) +// sy = mul(sy, dx) +// sz = mul(dx, dy) +// } else { +// sz = dx +// } +// return mod(sx, FieldSize), mod(sy, FieldSize), mod(sz, FieldSize) +// } diff --git a/core/services/vrf/v1/integration_test.go b/core/services/vrf/v1/integration_test.go new file mode 100644 index 00000000..958f8ac6 --- /dev/null +++ b/core/services/vrf/v1/integration_test.go @@ -0,0 +1,263 @@ +package v1_test + +import ( + "encoding/hex" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" +) + +func TestIntegration_VRF_JPV2(t *testing.T) { + t.Parallel() + tests := []struct { + name string + eip1559 bool + }{ + {"legacy", false}, + {"eip1559", true}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = &test.eip1559 + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + key1 := cltest.MustGenerateRandomKey(t) + key2 := cltest.MustGenerateRandomKey(t) + cu := vrftesthelpers.NewVRFCoordinatorUniverse(t, key1, key2) + incomingConfs := 2 + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, cu.Backend, key1, key2) + require.NoError(t, app.Start(testutils.Context(t))) + + jb, vrfKey := createVRFJobRegisterKey(t, cu, app, incomingConfs) + require.NoError(t, app.JobSpawner().CreateJob(&jb)) + + _, err := cu.ConsumerContract.TestRequestRandomness(cu.Carol, + vrfKey.PublicKey.MustHash(), big.NewInt(100)) + require.NoError(t, err) + + _, err = cu.ConsumerContract.TestRequestRandomness(cu.Carol, + vrfKey.PublicKey.MustHash(), big.NewInt(100)) + require.NoError(t, err) + cu.Backend.Commit() + t.Log("Sent 2 test requests") + // Mine the required number of blocks + // So our request gets confirmed. + for i := 0; i < incomingConfs; i++ { + cu.Backend.Commit() + } + var runs []pipeline.Run + gomega.NewWithT(t).Eventually(func() bool { + runs, err = app.PipelineORM().GetAllRuns() + require.NoError(t, err) + // It possible that we send the test request + // before the Job spawner has started the vrf services, which is fine + // the lb will backfill the logs. However we need to + // keep blocks coming in for the lb to send the backfilled logs. + cu.Backend.Commit() + return len(runs) == 2 && runs[0].State == pipeline.RunStatusCompleted && runs[1].State == pipeline.RunStatusCompleted + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + assert.Equal(t, pipeline.RunErrors([]null.String{{}}), runs[0].FatalErrors) + assert.Equal(t, 4, len(runs[0].PipelineTaskRuns)) + assert.Equal(t, 4, len(runs[1].PipelineTaskRuns)) + assert.NotNil(t, 0, runs[0].Outputs.Val) + assert.NotNil(t, 0, runs[1].Outputs.Val) + + // stop jobs as to not cause a race condition in geth simulated backend + // between job creating new tx and fulfillment logs polling below + require.NoError(t, app.JobSpawner().DeleteJob(jb.ID)) + + // Ensure the eth transaction gets confirmed on chain. + gomega.NewWithT(t).Eventually(func() bool { + orm := txmgr.NewTxStore(app.GetSqlxDB(), app.GetLogger(), app.GetConfig().Database()) + uc, err2 := orm.CountUnconfirmedTransactions(testutils.Context(t), key1.Address, testutils.SimulatedChainID) + require.NoError(t, err2) + return uc == 0 + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the request was fulfilled on-chain. + var rf []*solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled + gomega.NewWithT(t).Eventually(func() bool { + rfIterator, err2 := cu.RootContract.FilterRandomnessRequestFulfilled(nil) + require.NoError(t, err2, "failed to subscribe to RandomnessRequest logs") + rf = nil + for rfIterator.Next() { + rf = append(rf, rfIterator.Event) + } + return len(rf) == 2 + }, testutils.WaitTimeout(t), 500*time.Millisecond).Should(gomega.BeTrue()) + + // Check that each sending address sent one transaction + n1, err := cu.Backend.PendingNonceAt(testutils.Context(t), key1.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n1) + + n2, err := cu.Backend.PendingNonceAt(testutils.Context(t), key2.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n2) + }) + } +} + +func TestIntegration_VRF_WithBHS(t *testing.T) { + t.Parallel() + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].BlockBackfillDepth = ptr[uint32](500) + c.Feature.LogPoller = ptr(true) + c.EVM[0].FinalityDepth = ptr[uint32](2) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(time.Second) + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + }) + key := cltest.MustGenerateRandomKey(t) + cu := vrftesthelpers.NewVRFCoordinatorUniverse(t, key) + incomingConfs := 2 + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, cu.Backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF Job but do not start it yet + jb, vrfKey := createVRFJobRegisterKey(t, cu, app, incomingConfs) + + sendingKeys := []string{key.Address.String()} + + // Create BHS Job and start it + bhsJob := vrftesthelpers.CreateAndStartBHSJob(t, sendingKeys, app, cu.BHSContractAddress.String(), + cu.RootContractAddress.String(), "", "", "", 0, 200, 0, 100) + + // Ensure log poller is ready and has all logs. + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Ready()) + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), 1)) + + // Create a VRF request + _, err := cu.ConsumerContract.TestRequestRandomness(cu.Carol, + vrfKey.PublicKey.MustHash(), big.NewInt(100)) + require.NoError(t, err) + + cu.Backend.Commit() + requestBlock := cu.Backend.Blockchain().CurrentHeader().Number + + // Wait 101 blocks. + for i := 0; i < 100; i++ { + cu.Backend.Commit() + } + + // Wait for the blockhash to be stored + gomega.NewGomegaWithT(t).Eventually(func() bool { + cu.Backend.Commit() + _, err2 := cu.BHSContract.GetBlockhash(&bind.CallOpts{ + Pending: false, + From: common.Address{}, + BlockNumber: nil, + Context: nil, + }, requestBlock) + if err2 == nil { + return true + } else if strings.Contains(err2.Error(), "execution reverted") { + return false + } + t.Fatal(err2) + return false + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Wait another 160 blocks so that the request is outside the 256 block window + for i := 0; i < 160; i++ { + cu.Backend.Commit() + } + + // Start the VRF Job and wait until it's processed + require.NoError(t, app.JobSpawner().CreateJob(&jb)) + + var runs []pipeline.Run + gomega.NewWithT(t).Eventually(func() bool { + runs, err = app.PipelineORM().GetAllRuns() + require.NoError(t, err) + cu.Backend.Commit() + return len(runs) == 1 && runs[0].State == pipeline.RunStatusCompleted + }, 10*time.Second, 1*time.Second).Should(gomega.BeTrue()) + assert.Equal(t, pipeline.RunErrors([]null.String{{}}), runs[0].FatalErrors) + assert.Equal(t, 4, len(runs[0].PipelineTaskRuns)) + assert.NotNil(t, 0, runs[0].Outputs.Val) + + // stop jobs as to not cause a race condition in geth simulated backend + // between job creating new tx and fulfillment logs polling below + require.NoError(t, app.JobSpawner().DeleteJob(jb.ID)) + require.NoError(t, app.JobSpawner().DeleteJob(bhsJob.ID)) + + // Ensure the eth transaction gets confirmed on chain. + gomega.NewWithT(t).Eventually(func() bool { + orm := txmgr.NewTxStore(app.GetSqlxDB(), app.GetLogger(), app.GetConfig().Database()) + uc, err2 := orm.CountUnconfirmedTransactions(testutils.Context(t), key.Address, testutils.SimulatedChainID) + require.NoError(t, err2) + return uc == 0 + }, 5*time.Second, 100*time.Millisecond).Should(gomega.BeTrue()) + + // Assert the request was fulfilled on-chain. + gomega.NewWithT(t).Eventually(func() bool { + rfIterator, err := cu.RootContract.FilterRandomnessRequestFulfilled(nil) + require.NoError(t, err, "failed to subscribe to RandomnessRequest logs") + var rf []*solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled + for rfIterator.Next() { + rf = append(rf, rfIterator.Event) + } + return len(rf) == 1 + }, 5*time.Second, 500*time.Millisecond).Should(gomega.BeTrue()) +} + +func createVRFJobRegisterKey(t *testing.T, u vrftesthelpers.CoordinatorUniverse, app *cltest.TestApplication, incomingConfs int) (job.Job, vrfkey.KeyV2) { + vrfKey, err := app.KeyStore.VRF().Create() + require.NoError(t, err) + + jid := uuid.MustParse("96a8a26f-d426-4784-8d8f-fb387d4d8345") + expectedOnChainJobID, err := hex.DecodeString("3936613861323666643432363437383438643866666233383764346438333435") + require.NoError(t, err) + s := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + JobID: jid.String(), + Name: "vrf-primary", + CoordinatorAddress: u.RootContractAddress.String(), + MinIncomingConfirmations: incomingConfs, + PublicKey: vrfKey.PublicKey.String(), + EVMChainID: testutils.SimulatedChainID.String(), + }).Toml() + jb, err := vrfcommon.ValidatedVRFSpec(s) + require.NoError(t, err) + assert.Equal(t, expectedOnChainJobID, jb.ExternalIDEncodeStringToTopic().Bytes()) + + p, err := vrfKey.PublicKey.Point() + require.NoError(t, err) + _, err = u.RootContract.RegisterProvingKey( + u.Neil, big.NewInt(7), u.Neil.From, pair(secp256k1.Coordinates(p)), jb.ExternalIDEncodeStringToTopic()) + require.NoError(t, err) + u.Backend.Commit() + return jb, vrfKey +} + +func ptr[T any](t T) *T { return &t } + +func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } diff --git a/core/services/vrf/v1/listener_v1.go b/core/services/vrf/v1/listener_v1.go new file mode 100644 index 00000000..970c6306 --- /dev/null +++ b/core/services/vrf/v1/listener_v1.go @@ -0,0 +1,556 @@ +package v1 + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/avast/retry-go/v4" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + heaps "github.com/theodesp/go-heaps" + "github.com/theodesp/go-heaps/pairing" + + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/utils/mailbox" + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/recovery" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + _ log.Listener = &Listener{} + _ job.ServiceCtx = &Listener{} +) + +const callbacksTimeout = 10 * time.Second + +type request struct { + confirmedAtBlock uint64 + req *solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest + lb log.Broadcast + utcTimestamp time.Time +} + +type Listener struct { + services.StateMachine + + Cfg vrfcommon.Config + FeeCfg vrfcommon.FeeConfig + L logger.SugaredLogger + Coordinator *solidity_vrf_coordinator_interface.VRFCoordinator + PipelineRunner pipeline.Runner + Job job.Job + Q pg.Q + GethKs vrfcommon.GethKeyStore + MailMon *mailbox.Monitor + ReqLogs *mailbox.Mailbox[log.Broadcast] + ChStop services.StopChan + WaitOnStop chan struct{} + NewHead chan struct{} + LatestHead uint64 + LatestHeadMu sync.RWMutex + Chain legacyevm.Chain + + // We can keep these pending logs in memory because we + // only mark them confirmed once we send a corresponding fulfillment transaction. + // So on node restart in the middle of processing, the lb will resend them. + ReqsMu sync.Mutex // Both goroutines write to Reqs + Reqs []request + ReqAdded func() // A simple debug helper + + // Data structures for reorg attack protection + // We want a map so we can do an O(1) count update every fulfillment log we get. + RespCountMu sync.Mutex + ResponseCount map[[32]byte]uint64 + // This auxiliary heap is to used when we need to purge the + // ResponseCount map - we repeatedly want remove the minimum log. + // You could use a sorted list if the completed logs arrive in order, but they may not. + BlockNumberToReqID *pairing.PairHeap + + // Deduper prevents processing duplicate requests from the log broadcaster. + Deduper *vrfcommon.LogDeduper +} + +// Note that we have 2 seconds to do this processing +func (lsn *Listener) OnNewLongestChain(_ context.Context, head *evmtypes.Head) { + lsn.setLatestHead(head) + select { + case lsn.NewHead <- struct{}{}: + default: + } +} + +func (lsn *Listener) setLatestHead(h *evmtypes.Head) { + lsn.LatestHeadMu.Lock() + defer lsn.LatestHeadMu.Unlock() + num := uint64(h.Number) + if num > lsn.LatestHead { + lsn.LatestHead = num + } +} + +func (lsn *Listener) getLatestHead() uint64 { + lsn.LatestHeadMu.RLock() + defer lsn.LatestHeadMu.RUnlock() + return lsn.LatestHead +} + +// Start complies with job.Service +func (lsn *Listener) Start(ctx context.Context) error { + return lsn.StartOnce("VRFListener", func() error { + spec := job.LoadDefaultVRFPollPeriod(*lsn.Job.VRFSpec) + + unsubscribeLogs := lsn.Chain.LogBroadcaster().Register(lsn, log.ListenerOpts{ + Contract: lsn.Coordinator.Address(), + ParseLog: lsn.Coordinator.ParseLog, + LogsWithTopics: map[common.Hash][][]log.Topic{ + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{}.Topic(): { + { + log.Topic(lsn.Job.ExternalIDEncodeStringToTopic()), + log.Topic(lsn.Job.ExternalIDEncodeBytesToTopic()), + }, + }, + solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled{}.Topic(): {}, + }, + // If we set this to MinIncomingConfirmations, since both the log broadcaster and head broadcaster get heads + // at the same time from the head tracker whether we process the log at MinIncomingConfirmations or + // MinIncomingConfirmations+1 would depend on the order in which their OnNewLongestChain callbacks got + // called. + // We listen one block early so that the log can be stored in pendingRequests to avoid this. + MinIncomingConfirmations: spec.MinIncomingConfirmations - 1, + ReplayStartedCallback: lsn.ReplayStartedCallback, + }) + // Subscribe to the head broadcaster for handling + // per request conf requirements. + latestHead, unsubscribeHeadBroadcaster := lsn.Chain.HeadBroadcaster().Subscribe(lsn) + if latestHead != nil { + lsn.setLatestHead(latestHead) + } + + // Populate the response count map + lsn.RespCountMu.Lock() + defer lsn.RespCountMu.Unlock() + respCount, err := lsn.GetStartingResponseCountsV1(ctx) + if err != nil { + return err + } + lsn.ResponseCount = respCount + go lsn.RunLogListener([]func(){unsubscribeLogs}, spec.MinIncomingConfirmations) + go lsn.RunHeadListener(unsubscribeHeadBroadcaster) + + lsn.MailMon.Monitor(lsn.ReqLogs, "VRFListener", "RequestLogs", fmt.Sprint(lsn.Job.ID)) + return nil + }) +} + +func (lsn *Listener) GetStartingResponseCountsV1(ctx context.Context) (respCount map[[32]byte]uint64, err error) { + respCounts := make(map[[32]byte]uint64) + var latestBlockNum *big.Int + // Retry client call for LatestBlockHeight if fails + // Want to avoid failing startup due to potential faulty RPC call + err = retry.Do(func() error { + latestBlockNum, err = lsn.Chain.Client().LatestBlockHeight(ctx) + return err + }, retry.Attempts(10), retry.Delay(500*time.Millisecond)) + if err != nil { + return nil, err + } + if latestBlockNum == nil { + return nil, errors.New("LatestBlockHeight return nil block num") + } + confirmedBlockNum := latestBlockNum.Int64() - int64(lsn.Chain.Config().EVM().FinalityDepth()) + // Only check as far back as the evm finality depth for completed transactions. + var counts []vrfcommon.RespCountEntry + counts, err = vrfcommon.GetRespCounts(ctx, lsn.Chain.TxManager(), lsn.Chain.Client().ConfiguredChainID(), confirmedBlockNum) + if err != nil { + // Continue with an empty map, do not block job on this. + lsn.L.Errorw("Unable to read previous confirmed fulfillments", "err", err) + return respCounts, nil + } + + for _, c := range counts { + // Remove the quotes from the json + req := strings.Replace(c.RequestID, `"`, ``, 2) + // Remove the 0x prefix + b, err := hex.DecodeString(req[2:]) + if err != nil { + lsn.L.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID) + continue + } + var reqID [32]byte + copy(reqID[:], b) + respCounts[reqID] = uint64(c.Count) + } + + return respCounts, nil +} + +// Removes and returns all the confirmed logs from +// the pending queue. +func (lsn *Listener) extractConfirmedLogs() []request { + lsn.ReqsMu.Lock() + defer lsn.ReqsMu.Unlock() + vrfcommon.UpdateQueueSize(lsn.Job.Name.ValueOrZero(), lsn.Job.ExternalJobID, vrfcommon.V1, len(lsn.Reqs)) + var toProcess, toKeep []request + for i := 0; i < len(lsn.Reqs); i++ { + if lsn.Reqs[i].confirmedAtBlock <= lsn.getLatestHead() { + toProcess = append(toProcess, lsn.Reqs[i]) + } else { + toKeep = append(toKeep, lsn.Reqs[i]) + } + } + lsn.Reqs = toKeep + return toProcess +} + +type fulfilledReq struct { + blockNumber uint64 + reqID [32]byte +} + +func (a fulfilledReq) Compare(b heaps.Item) int { + a1 := a + a2 := b.(fulfilledReq) + switch { + case a1.blockNumber > a2.blockNumber: + return 1 + case a1.blockNumber < a2.blockNumber: + return -1 + default: + return 0 + } +} + +// Remove all entries 10000 blocks or older +// to avoid a memory leak. +func (lsn *Listener) pruneConfirmedRequestCounts() { + lsn.RespCountMu.Lock() + defer lsn.RespCountMu.Unlock() + min := lsn.BlockNumberToReqID.FindMin() + for min != nil { + m := min.(fulfilledReq) + if m.blockNumber > (lsn.getLatestHead() - 10000) { + break + } + delete(lsn.ResponseCount, m.reqID) + lsn.BlockNumberToReqID.DeleteMin() + min = lsn.BlockNumberToReqID.FindMin() + } +} + +// Listen for new heads +func (lsn *Listener) RunHeadListener(unsubscribe func()) { + ctx, cancel := lsn.ChStop.NewCtx() + defer cancel() + + for { + select { + case <-ctx.Done(): + unsubscribe() + lsn.WaitOnStop <- struct{}{} + return + case <-lsn.NewHead: + recovery.WrapRecover(lsn.L, func() { + toProcess := lsn.extractConfirmedLogs() + var toRetry []request + for _, r := range toProcess { + if success := lsn.ProcessRequest(ctx, r); !success { + toRetry = append(toRetry, r) + } + } + lsn.ReqsMu.Lock() + defer lsn.ReqsMu.Unlock() + lsn.Reqs = append(lsn.Reqs, toRetry...) + lsn.pruneConfirmedRequestCounts() + }) + } + } +} + +func (lsn *Listener) RunLogListener(unsubscribes []func(), minConfs uint32) { + lsn.L.Infow("Listening for run requests", + "gasLimit", lsn.FeeCfg.LimitDefault(), + "minConfs", minConfs) + for { + select { + case <-lsn.ChStop: + for _, f := range unsubscribes { + f() + } + lsn.WaitOnStop <- struct{}{} + return + case <-lsn.ReqLogs.Notify(): + // Process all the logs in the queue if one is added + for { + lb, exists := lsn.ReqLogs.Retrieve() + if !exists { + break + } + recovery.WrapRecover(lsn.L, func() { + lsn.handleLog(lb, minConfs) + }) + } + } + } +} + +func (lsn *Listener) handleLog(lb log.Broadcast, minConfs uint32) { + lggr := lsn.L.With( + "log", lb.String(), + "decodedLog", lb.DecodedLog(), + "blockNumber", lb.RawLog().BlockNumber, + "blockHash", lb.RawLog().BlockHash, + "txHash", lb.RawLog().TxHash, + ) + + lggr.Infow("Log received") + if v, ok := lb.DecodedLog().(*solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequestFulfilled); ok { + lggr.Debugw("Got fulfillment log", + "requestID", hex.EncodeToString(v.RequestId[:])) + if !lsn.shouldProcessLog(lb) { + return + } + lsn.RespCountMu.Lock() + lsn.ResponseCount[v.RequestId]++ + lsn.BlockNumberToReqID.Insert(fulfilledReq{ + blockNumber: v.Raw.BlockNumber, + reqID: v.RequestId, + }) + lsn.RespCountMu.Unlock() + lsn.markLogAsConsumed(lb) + return + } + + req, err := lsn.Coordinator.ParseRandomnessRequest(lb.RawLog()) + if err != nil { + lggr.Errorw("Failed to parse RandomnessRequest log", "err", err) + if !lsn.shouldProcessLog(lb) { + return + } + lsn.markLogAsConsumed(lb) + return + } + + confirmedAt := lsn.getConfirmedAt(req, minConfs) + lsn.ReqsMu.Lock() + lsn.Reqs = append(lsn.Reqs, request{ + confirmedAtBlock: confirmedAt, + req: req, + lb: lb, + utcTimestamp: time.Now().UTC(), + }) + lsn.ReqAdded() + lsn.ReqsMu.Unlock() + lggr.Infow("Enqueued randomness request", + "requestID", hex.EncodeToString(req.RequestID[:]), + "requestJobID", hex.EncodeToString(req.JobID[:]), + "keyHash", hex.EncodeToString(req.KeyHash[:]), + "fee", req.Fee, + "sender", req.Sender.Hex(), + "txHash", lb.RawLog().TxHash) +} + +func (lsn *Listener) shouldProcessLog(lb log.Broadcast) bool { + consumed, err := lsn.Chain.LogBroadcaster().WasAlreadyConsumed(lb) + if err != nil { + lsn.L.Errorw("Could not determine if log was already consumed", "err", err, "txHash", lb.RawLog().TxHash) + // Do not process, let lb resend it as a retry mechanism. + return false + } + return !consumed +} + +func (lsn *Listener) markLogAsConsumed(lb log.Broadcast) { + err := lsn.Chain.LogBroadcaster().MarkConsumed(lb) + lsn.L.ErrorIf(err, fmt.Sprintf("Unable to mark log %v as consumed", lb.String())) +} + +func (lsn *Listener) getConfirmedAt(req *solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest, minConfs uint32) uint64 { + lsn.RespCountMu.Lock() + defer lsn.RespCountMu.Unlock() + newConfs := uint64(minConfs) * (1 << lsn.ResponseCount[req.RequestID]) + // We cap this at 200 because solidity only supports the most recent 256 blocks + // in the contract so if it was older than that, fulfillments would start failing + // without the blockhash store feeder. We use 200 to give the node plenty of time + // to fulfill even on fast chains. + if newConfs > 200 { + newConfs = 200 + } + if lsn.ResponseCount[req.RequestID] > 0 { + lsn.L.Warnw("Duplicate request found after fulfillment, doubling incoming confirmations", + "txHash", req.Raw.TxHash, + "blockNumber", req.Raw.BlockNumber, + "blockHash", req.Raw.BlockHash, + "requestID", hex.EncodeToString(req.RequestID[:]), + "newConfs", newConfs) + vrfcommon.IncDupeReqs(lsn.Job.Name.ValueOrZero(), lsn.Job.ExternalJobID, vrfcommon.V1) + } + return req.Raw.BlockNumber + newConfs +} + +// ProcessRequest attempts to process the VRF request. Returns true if successful, false otherwise. +func (lsn *Listener) ProcessRequest(ctx context.Context, req request) bool { + // This check to see if the log was consumed needs to be in the same + // goroutine as the mark consumed to avoid processing duplicates. + if !lsn.shouldProcessLog(req.lb) { + return true + } + + lggr := lsn.L.With( + "log", req.lb.String(), + "requestID", hex.EncodeToString(req.req.RequestID[:]), + "txHash", req.req.Raw.TxHash, + "keyHash", hex.EncodeToString(req.req.KeyHash[:]), + "jobID", hex.EncodeToString(req.req.JobID[:]), + "sender", req.req.Sender.Hex(), + "blockNumber", req.req.Raw.BlockNumber, + "blockHash", req.req.Raw.BlockHash, + "seed", req.req.Seed, + "fee", req.req.Fee, + ) + + // Check if the vrf req has already been fulfilled + // Note we have to do this after the log has been confirmed. + // If not, the following problematic (example) scenario can arise: + // 1. Request log comes in block 100 + // 2. Fulfill the request in block 110 + // 3. Reorg both request and fulfillment, now request lives at + // block 101 and fulfillment lives at block 115 + // 4. The eth node sees the request reorg and tells us about it. We do our fulfillment + // check and the node says its already fulfilled (hasn't seen the fulfillment reorged yet), + // so we don't process the request. + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not + // found" errors. + m := mathutil.Max(req.confirmedAtBlock, lsn.getLatestHead()-5) + ctx, cancel := context.WithTimeout(ctx, callbacksTimeout) + defer cancel() + callback, err := lsn.Coordinator.Callbacks(&bind.CallOpts{ + BlockNumber: big.NewInt(int64(m)), + Context: ctx, + }, req.req.RequestID) + if err != nil { + lggr.Errorw("Unable to check if already fulfilled, processing anyways", "err", err) + } else if utils.IsEmpty(callback.SeedAndBlockNum[:]) { + // If seedAndBlockNumber is zero then the response has been fulfilled + // and we should skip it + lggr.Infow("Request already fulfilled") + lsn.markLogAsConsumed(req.lb) + return true + } + + // Check if we can ignore the request due to its age. + if time.Now().UTC().Sub(req.utcTimestamp) >= lsn.Job.VRFSpec.RequestTimeout { + lggr.Infow("Request too old, dropping it") + lsn.markLogAsConsumed(req.lb) + return true + } + + lggr.Infow("Processing log request") + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": lsn.Job.ID, + "externalJobID": lsn.Job.ExternalJobID, + "name": lsn.Job.Name.ValueOrZero(), + "publicKey": lsn.Job.VRFSpec.PublicKey[:], + "from": lsn.fromAddresses(), + "evmChainID": lsn.Job.VRFSpec.EVMChainID.String(), + }, + "jobRun": map[string]interface{}{ + "logBlockHash": req.req.Raw.BlockHash[:], + "logBlockNumber": req.req.Raw.BlockNumber, + "logTxHash": req.req.Raw.TxHash, + "logTopics": req.req.Raw.Topics, + "logData": req.req.Raw.Data, + }, + }) + + run := pipeline.NewRun(*lsn.Job.PipelineSpec, vars) + // The VRF pipeline has no async tasks, so we don't need to check for `incomplete` + if _, err = lsn.PipelineRunner.Run(ctx, run, lggr, true, func(tx pg.Queryer) error { + // Always mark consumed regardless of whether the proof failed or not. + if err = lsn.Chain.LogBroadcaster().MarkConsumed(req.lb, pg.WithQueryer(tx)); err != nil { + lggr.Errorw("Failed mark consumed", "err", err) + } + return nil + }); err != nil { + lggr.Errorw("Failed to execute VRFV1 pipeline run", + "err", err) + return false + } + + // At this point the pipeline runner has completed the run of the pipeline, + // but it may have errored out. + if run.HasErrors() || run.HasFatalErrors() { + lggr.Error("VRFV1 pipeline run failed with errors", + "runErrors", run.AllErrors.ToError(), + "runFatalErrors", run.FatalErrors.ToError(), + ) + return false + } + + // At this point, the pipeline run executed successfully, and we mark + // the request as processed. + lggr.Infow("Executed VRFV1 fulfillment run") + vrfcommon.IncProcessedReqs(lsn.Job.Name.ValueOrZero(), lsn.Job.ExternalJobID, vrfcommon.V1) + return true +} + +// Close complies with job.Service +func (lsn *Listener) Close() error { + return lsn.StopOnce("VRFListener", func() error { + close(lsn.ChStop) + <-lsn.WaitOnStop // Log Listener + <-lsn.WaitOnStop // Head Listener + return lsn.ReqLogs.Close() + }) +} + +func (lsn *Listener) HandleLog(lb log.Broadcast) { + if !lsn.Deduper.ShouldDeliver(lb.RawLog()) { + lsn.L.Tracew("skipping duplicate log broadcast", "log", lb.RawLog()) + return + } + + wasOverCapacity := lsn.ReqLogs.Deliver(lb) + if wasOverCapacity { + lsn.L.Error("log mailbox is over capacity - dropped the oldest log") + vrfcommon.IncDroppedReqs(lsn.Job.Name.ValueOrZero(), lsn.Job.ExternalJobID, vrfcommon.V1, vrfcommon.ReasonMailboxSize) + } +} + +func (lsn *Listener) fromAddresses() []common.Address { + var addresses []common.Address + for _, a := range lsn.Job.VRFSpec.FromAddresses { + addresses = append(addresses, a.Address()) + } + return addresses +} + +// Job complies with log.Listener +func (lsn *Listener) JobID() int32 { + return lsn.Job.ID +} + +// ReplayStartedCallback is called by the log broadcaster when a replay is about to start. +func (lsn *Listener) ReplayStartedCallback() { + // Clear the log Deduper cache so that we don't incorrectly ignore logs that have been sent that + // are already in the cache. + lsn.Deduper.Clear() +} diff --git a/core/services/vrf/v1/listener_v1_test.go b/core/services/vrf/v1/listener_v1_test.go new file mode 100644 index 00000000..826745a5 --- /dev/null +++ b/core/services/vrf/v1/listener_v1_test.go @@ -0,0 +1,76 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/theodesp/go-heaps/pairing" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" +) + +func TestConfirmedLogExtraction(t *testing.T) { + lsn := Listener{} + lsn.Reqs = []request{ + { + confirmedAtBlock: 2, + req: &solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{ + RequestID: utils.PadByteToHash(0x02), + }, + }, + { + confirmedAtBlock: 1, + req: &solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{ + RequestID: utils.PadByteToHash(0x01), + }, + }, + { + confirmedAtBlock: 3, + req: &solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest{ + RequestID: utils.PadByteToHash(0x03), + }, + }, + } + // None are confirmed + lsn.LatestHead = 0 + logs := lsn.extractConfirmedLogs() + assert.Equal(t, 0, len(logs)) // None ready + assert.Equal(t, 3, len(lsn.Reqs)) // All pending + lsn.LatestHead = 2 + logs = lsn.extractConfirmedLogs() + assert.Equal(t, 2, len(logs)) // 1 and 2 should be confirmed + assert.Equal(t, 1, len(lsn.Reqs)) // 3 is still pending + assert.Equal(t, uint64(3), lsn.Reqs[0].confirmedAtBlock) + // Another block way in the future should clear it + lsn.LatestHead = 10 + logs = lsn.extractConfirmedLogs() + assert.Equal(t, 1, len(logs)) // remaining log + assert.Equal(t, 0, len(lsn.Reqs)) // all processed +} + +func TestResponsePruning(t *testing.T) { + lsn := Listener{} + lsn.LatestHead = 10000 + lsn.ResponseCount = map[[32]byte]uint64{ + utils.PadByteToHash(0x00): 1, + utils.PadByteToHash(0x01): 1, + } + lsn.BlockNumberToReqID = pairing.New() + lsn.BlockNumberToReqID.Insert(fulfilledReq{ + blockNumber: 1, + reqID: utils.PadByteToHash(0x00), + }) + lsn.BlockNumberToReqID.Insert(fulfilledReq{ + blockNumber: 2, + reqID: utils.PadByteToHash(0x01), + }) + lsn.pruneConfirmedRequestCounts() + assert.Equal(t, 2, len(lsn.ResponseCount)) + lsn.LatestHead = 10001 + lsn.pruneConfirmedRequestCounts() + assert.Equal(t, 1, len(lsn.ResponseCount)) + lsn.LatestHead = 10002 + lsn.pruneConfirmedRequestCounts() + assert.Equal(t, 0, len(lsn.ResponseCount)) +} diff --git a/core/services/vrf/v1/listener_v1_test_helpers.go b/core/services/vrf/v1/listener_v1_test_helpers.go new file mode 100644 index 00000000..f9532bf8 --- /dev/null +++ b/core/services/vrf/v1/listener_v1_test_helpers.go @@ -0,0 +1,36 @@ +package v1 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func (lsn *Listener) SetReqAdded(fn func()) { + lsn.ReqAdded = fn +} + +func (lsn *Listener) Stop(t *testing.T) { + assert.NoError(t, lsn.Close()) + select { + case <-lsn.WaitOnStop: + case <-time.After(time.Second): + t.Error("did not clean up properly") + } +} + +func (lsn *Listener) ReqsConfirmedAt() (us []uint64) { + for i := range lsn.Reqs { + us = append(us, lsn.Reqs[i].confirmedAtBlock) + } + return us +} + +func (lsn *Listener) RespCount(reqIDBytes [32]byte) uint64 { + return lsn.ResponseCount[reqIDBytes] +} + +func (lsn *Listener) SetRespCount(reqIDBytes [32]byte, c uint64) { + lsn.ResponseCount[reqIDBytes] = c +} diff --git a/core/services/vrf/v2/bhs_feeder_test.go b/core/services/vrf/v2/bhs_feeder_test.go new file mode 100644 index 00000000..cb449944 --- /dev/null +++ b/core/services/vrf/v2/bhs_feeder_test.go @@ -0,0 +1,91 @@ +package v2_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" +) + +func TestStartHeartbeats(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 2) + + vrfKey := cltest.MustGenerateRandomKey(t) + sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) + gasLanePriceWei := assets.GWei(1) + gasLimit := 3_000_000 + + consumers := uni.vrfConsumers + + // generate n BHS keys to make sure BHS job rotates sending keys + var bhsKeyAddresses []string + var keySpecificOverrides []toml.KeySpecific + var keys []interface{} + for i := 0; i < len(consumers); i++ { + bhsKey := cltest.MustGenerateRandomKey(t) + bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address.String()) + keys = append(keys, bhsKey) + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + Key: ptr(bhsKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) + } + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + // Gas lane. + Key: ptr(vrfKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + + keys = append(keys, ownerKey, vrfKey) + + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, gasLanePriceWei, keySpecificOverrides...)(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].FinalityDepth = ptr[uint32](2) + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(gasLimit)) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(time.Second) + }) + + heartbeatPeriod := 5 * time.Second + + t.Run("bhs_feeder_startheartbeats_happy_path", func(tt *testing.T) { + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, keys...) + require.NoError(t, app.Start(testutils.Context(t))) + + _ = vrftesthelpers.CreateAndStartBHSJob( + t, bhsKeyAddresses, app, uni.bhsContractAddress.String(), "", + uni.rootContractAddress.String(), "", "", 0, 200, heartbeatPeriod, 100) + + // Ensure log poller is ready and has all logs. + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Ready()) + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), 1)) + + initTxns := 260 + // Wait 260 blocks. + for i := 0; i < initTxns; i++ { + uni.backend.Commit() + } + diff := heartbeatPeriod + 1*time.Second + t.Logf("Sleeping %.2f seconds before checking blockhash in BHS added by BHS_Heartbeats_Service\n", diff.Seconds()) + time.Sleep(diff) + // storeEarliest in BHS contract stores blocktip - 256 in the Blockhash Store (BHS) + tipHeader, err := uni.backend.HeaderByNumber(testutils.Context(t), nil) + require.NoError(t, err) + // the storeEarliest transaction will end up in a new block, hence the + 1 below. + blockNumberStored := tipHeader.Number.Uint64() - 256 + 1 + verifyBlockhashStored(t, uni.coordinatorV2UniverseCommon, blockNumberStored) + }) +} diff --git a/core/services/vrf/v2/coordinator_v2x_interface.go b/core/services/vrf/v2/coordinator_v2x_interface.go new file mode 100644 index 00000000..f8babf82 --- /dev/null +++ b/core/services/vrf/v2/coordinator_v2x_interface.go @@ -0,0 +1,1119 @@ +package v2 + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/extraargs" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +var ( + _ CoordinatorV2_X = (*coordinatorV2)(nil) + _ CoordinatorV2_X = (*coordinatorV2_5)(nil) +) + +// CoordinatorV2_X is an interface that allows us to use the same code for +// both the V2 and V2Plus coordinators. +type CoordinatorV2_X interface { + Address() common.Address + ParseRandomWordsRequested(log types.Log) (RandomWordsRequested, error) + ParseRandomWordsFulfilled(log types.Log) (RandomWordsFulfilled, error) + RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subID *big.Int, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, payInEth bool) (*types.Transaction, error) + AddConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) + CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) + GetSubscription(opts *bind.CallOpts, subID *big.Int) (Subscription, error) + GetConfig(opts *bind.CallOpts) (Config, error) + ParseLog(log types.Log) (generated.AbigenLog, error) + OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) + Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) + LogsWithTopics(keyHash common.Hash) map[common.Hash][][]log.Topic + Version() vrfcommon.Version + RegisterProvingKey(opts *bind.TransactOpts, oracle *common.Address, publicProvingKey [2]*big.Int, maxGasPrice *uint64) (*types.Transaction, error) + FilterSubscriptionCreated(opts *bind.FilterOpts, subID []*big.Int) (SubscriptionCreatedIterator, error) + FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subID []*big.Int, sender []common.Address) (RandomWordsRequestedIterator, error) + FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestID []*big.Int, subID []*big.Int) (RandomWordsFulfilledIterator, error) + TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) + RemoveConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) + CancelSubscription(opts *bind.TransactOpts, subID *big.Int, to common.Address) (*types.Transaction, error) + GetCommitment(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) + Migrate(opts *bind.TransactOpts, subID *big.Int, newCoordinator common.Address) (*types.Transaction, error) + FundSubscriptionWithNative(opts *bind.TransactOpts, subID *big.Int, amount *big.Int) (*types.Transaction, error) + // RandomWordsRequestedTopic returns the log topic of the RandomWordsRequested log + RandomWordsRequestedTopic() common.Hash + // RandomWordsFulfilledTopic returns the log topic of the RandomWordsFulfilled log + RandomWordsFulfilledTopic() common.Hash +} + +type coordinatorV2 struct { + vrfVersion vrfcommon.Version + coordinator *vrf_coordinator_v2.VRFCoordinatorV2 +} + +func NewCoordinatorV2(c *vrf_coordinator_v2.VRFCoordinatorV2) CoordinatorV2_X { + return &coordinatorV2{ + vrfVersion: vrfcommon.V2, + coordinator: c, + } +} + +func (c *coordinatorV2) RandomWordsRequestedTopic() common.Hash { + return vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic() +} + +func (c *coordinatorV2) RandomWordsFulfilledTopic() common.Hash { + return vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled{}.Topic() +} + +func (c *coordinatorV2) Address() common.Address { + return c.coordinator.Address() +} + +func (c *coordinatorV2) ParseRandomWordsRequested(log types.Log) (RandomWordsRequested, error) { + parsed, err := c.coordinator.ParseRandomWordsRequested(log) + if err != nil { + return nil, err + } + return NewV2RandomWordsRequested(parsed), nil +} + +func (c *coordinatorV2) ParseRandomWordsFulfilled(log types.Log) (RandomWordsFulfilled, error) { + parsed, err := c.coordinator.ParseRandomWordsFulfilled(log) + if err != nil { + return nil, err + } + return NewV2RandomWordsFulfilled(parsed), nil +} + +func (c *coordinatorV2) RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subID *big.Int, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, payInEth bool) (*types.Transaction, error) { + return c.coordinator.RequestRandomWords(opts, keyHash, subID.Uint64(), requestConfirmations, callbackGasLimit, numWords) +} + +func (c *coordinatorV2) AddConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) { + return c.coordinator.AddConsumer(opts, subID.Uint64(), consumer) +} + +func (c *coordinatorV2) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return c.coordinator.CreateSubscription(opts) +} + +func (c *coordinatorV2) GetSubscription(opts *bind.CallOpts, subID *big.Int) (Subscription, error) { + sub, err := c.coordinator.GetSubscription(opts, subID.Uint64()) + if err != nil { + return nil, err + } + return NewV2Subscription(sub), nil +} + +func (c *coordinatorV2) GetConfig(opts *bind.CallOpts) (Config, error) { + config, err := c.coordinator.GetConfig(opts) + if err != nil { + return nil, err + } + return NewV2Config(config), nil +} + +func (c *coordinatorV2) ParseLog(log types.Log) (generated.AbigenLog, error) { + return c.coordinator.ParseLog(log) +} + +func (c *coordinatorV2) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return c.coordinator.OracleWithdraw(opts, recipient, amount) +} + +func (c *coordinatorV2) Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return nil, errors.New("withdraw not implemented for v2") +} + +func (c *coordinatorV2) WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return nil, errors.New("withdrawNative not implemented for v2") +} + +func (c *coordinatorV2) LogsWithTopics(keyHash common.Hash) map[common.Hash][][]log.Topic { + return map[common.Hash][][]log.Topic{ + vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{}.Topic(): { + { + log.Topic(keyHash), + }, + }, + } +} + +func (c *coordinatorV2) Version() vrfcommon.Version { + return c.vrfVersion +} + +func (c *coordinatorV2) RegisterProvingKey(opts *bind.TransactOpts, oracle *common.Address, publicProvingKey [2]*big.Int, maxGasPrice *uint64) (*types.Transaction, error) { + if maxGasPrice != nil { + return nil, fmt.Errorf("max gas price not supported for registering proving key in v2") + + } + return c.coordinator.RegisterProvingKey(opts, *oracle, publicProvingKey) +} + +func (c *coordinatorV2) FilterSubscriptionCreated(opts *bind.FilterOpts, subID []*big.Int) (SubscriptionCreatedIterator, error) { + it, err := c.coordinator.FilterSubscriptionCreated(opts, toV2SubIDs(subID)) + if err != nil { + return nil, err + } + return NewV2SubscriptionCreatedIterator(it), nil +} + +func (c *coordinatorV2) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subID []*big.Int, sender []common.Address) (RandomWordsRequestedIterator, error) { + it, err := c.coordinator.FilterRandomWordsRequested(opts, keyHash, toV2SubIDs(subID), sender) + if err != nil { + return nil, err + } + return NewV2RandomWordsRequestedIterator(it), nil +} + +func (c *coordinatorV2) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestID []*big.Int, subID []*big.Int) (RandomWordsFulfilledIterator, error) { + it, err := c.coordinator.FilterRandomWordsFulfilled(opts, requestID) + if err != nil { + return nil, err + } + return NewV2RandomWordsFulfilledIterator(it), nil +} + +func (c *coordinatorV2) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return c.coordinator.TransferOwnership(opts, to) +} + +func (c *coordinatorV2) RemoveConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) { + return c.coordinator.RemoveConsumer(opts, subID.Uint64(), consumer) +} + +func (c *coordinatorV2) CancelSubscription(opts *bind.TransactOpts, subID *big.Int, to common.Address) (*types.Transaction, error) { + return c.coordinator.CancelSubscription(opts, subID.Uint64(), to) +} + +func (c *coordinatorV2) GetCommitment(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) { + return c.coordinator.GetCommitment(opts, requestID) +} + +func (c *coordinatorV2) Migrate(opts *bind.TransactOpts, subID *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + panic("migrate not implemented for v2") +} + +func (c *coordinatorV2) FundSubscriptionWithNative(opts *bind.TransactOpts, subID *big.Int, amount *big.Int) (*types.Transaction, error) { + panic("fund subscription with Eth not implemented for v2") +} + +type coordinatorV2_5 struct { + vrfVersion vrfcommon.Version + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25Interface +} + +func NewCoordinatorV2_5(c vrf_coordinator_v2_5.VRFCoordinatorV25Interface) CoordinatorV2_X { + return &coordinatorV2_5{ + vrfVersion: vrfcommon.V2Plus, + coordinator: c, + } +} + +func (c *coordinatorV2_5) RandomWordsRequestedTopic() common.Hash { + return vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsRequested{}.Topic() +} + +func (c *coordinatorV2_5) RandomWordsFulfilledTopic() common.Hash { + return vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRandomWordsFulfilled{}.Topic() +} + +func (c *coordinatorV2_5) Address() common.Address { + return c.coordinator.Address() +} + +func (c *coordinatorV2_5) ParseRandomWordsRequested(log types.Log) (RandomWordsRequested, error) { + parsed, err := c.coordinator.ParseRandomWordsRequested(log) + if err != nil { + return nil, err + } + return NewV2_5RandomWordsRequested(parsed), nil +} + +func (c *coordinatorV2_5) ParseRandomWordsFulfilled(log types.Log) (RandomWordsFulfilled, error) { + parsed, err := c.coordinator.ParseRandomWordsFulfilled(log) + if err != nil { + return nil, err + } + return NewV2_5RandomWordsFulfilled(parsed), nil +} + +func (c *coordinatorV2_5) RequestRandomWords(opts *bind.TransactOpts, keyHash [32]byte, subID *big.Int, requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, payInEth bool) (*types.Transaction, error) { + extraArgs, err := extraargs.ExtraArgsV1(payInEth) + if err != nil { + return nil, err + } + req := vrf_coordinator_v2_5.VRFV2PlusClientRandomWordsRequest{ + KeyHash: keyHash, + SubId: subID, + RequestConfirmations: requestConfirmations, + CallbackGasLimit: callbackGasLimit, + NumWords: numWords, + ExtraArgs: extraArgs, + } + return c.coordinator.RequestRandomWords(opts, req) +} + +func (c *coordinatorV2_5) AddConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) { + return c.coordinator.AddConsumer(opts, subID, consumer) +} + +func (c *coordinatorV2_5) CreateSubscription(opts *bind.TransactOpts) (*types.Transaction, error) { + return c.coordinator.CreateSubscription(opts) +} + +func (c *coordinatorV2_5) GetSubscription(opts *bind.CallOpts, subID *big.Int) (Subscription, error) { + sub, err := c.coordinator.GetSubscription(opts, subID) + if err != nil { + return nil, err + } + return NewV2_5Subscription(sub), nil +} + +func (c *coordinatorV2_5) GetConfig(opts *bind.CallOpts) (Config, error) { + config, err := c.coordinator.SConfig(opts) + if err != nil { + return nil, err + } + return NewV2_5Config(config), nil +} + +func (c *coordinatorV2_5) ParseLog(log types.Log) (generated.AbigenLog, error) { + return c.coordinator.ParseLog(log) +} + +func (c *coordinatorV2_5) OracleWithdraw(opts *bind.TransactOpts, recipient common.Address, amount *big.Int) (*types.Transaction, error) { + return nil, errors.New("oracle withdraw not implemented for v2.5") +} + +func (c *coordinatorV2_5) Withdraw(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return c.coordinator.Withdraw(opts, recipient) +} + +func (c *coordinatorV2_5) WithdrawNative(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) { + return c.coordinator.WithdrawNative(opts, recipient) +} + +func (c *coordinatorV2_5) LogsWithTopics(keyHash common.Hash) map[common.Hash][][]log.Topic { + return map[common.Hash][][]log.Topic{ + vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested{}.Topic(): { + { + log.Topic(keyHash), + }, + }, + } +} + +func (c *coordinatorV2_5) Version() vrfcommon.Version { + return c.vrfVersion +} + +func (c *coordinatorV2_5) RegisterProvingKey(opts *bind.TransactOpts, oracle *common.Address, publicProvingKey [2]*big.Int, maxGasPrice *uint64) (*types.Transaction, error) { + if oracle != nil { + return nil, errors.New("oracle address not supported for registering proving key in v2.5") + } + if maxGasPrice == nil { + return nil, errors.New("max gas price is required for registering proving key in v2.5") + } + return c.coordinator.RegisterProvingKey(opts, publicProvingKey, *maxGasPrice) +} + +func (c *coordinatorV2_5) FilterSubscriptionCreated(opts *bind.FilterOpts, subID []*big.Int) (SubscriptionCreatedIterator, error) { + it, err := c.coordinator.FilterSubscriptionCreated(opts, subID) + if err != nil { + return nil, err + } + return NewV2_5SubscriptionCreatedIterator(it), nil +} + +func (c *coordinatorV2_5) FilterRandomWordsRequested(opts *bind.FilterOpts, keyHash [][32]byte, subID []*big.Int, sender []common.Address) (RandomWordsRequestedIterator, error) { + it, err := c.coordinator.FilterRandomWordsRequested(opts, keyHash, subID, sender) + if err != nil { + return nil, err + } + return NewV2_5RandomWordsRequestedIterator(it), nil +} + +func (c *coordinatorV2_5) FilterRandomWordsFulfilled(opts *bind.FilterOpts, requestID []*big.Int, subID []*big.Int) (RandomWordsFulfilledIterator, error) { + it, err := c.coordinator.FilterRandomWordsFulfilled(opts, requestID, subID) + if err != nil { + return nil, err + } + return NewV2_5RandomWordsFulfilledIterator(it), nil +} + +func (c *coordinatorV2_5) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return c.coordinator.TransferOwnership(opts, to) +} + +func (c *coordinatorV2_5) RemoveConsumer(opts *bind.TransactOpts, subID *big.Int, consumer common.Address) (*types.Transaction, error) { + return c.coordinator.RemoveConsumer(opts, subID, consumer) +} + +func (c *coordinatorV2_5) CancelSubscription(opts *bind.TransactOpts, subID *big.Int, to common.Address) (*types.Transaction, error) { + return c.coordinator.CancelSubscription(opts, subID, to) +} + +func (c *coordinatorV2_5) GetCommitment(opts *bind.CallOpts, requestID *big.Int) ([32]byte, error) { + return c.coordinator.SRequestCommitments(opts, requestID) +} + +func (c *coordinatorV2_5) Migrate(opts *bind.TransactOpts, subID *big.Int, newCoordinator common.Address) (*types.Transaction, error) { + return c.coordinator.Migrate(opts, subID, newCoordinator) +} + +func (c *coordinatorV2_5) FundSubscriptionWithNative(opts *bind.TransactOpts, subID *big.Int, amount *big.Int) (*types.Transaction, error) { + if opts == nil { + return nil, errors.New("*bind.TransactOpts cannot be nil") + } + o := *opts + o.Value = amount + return c.coordinator.FundSubscriptionWithNative(&o, subID) +} + +var ( + _ RandomWordsRequestedIterator = (*v2RandomWordsRequestedIterator)(nil) + _ RandomWordsRequestedIterator = (*v2_5RandomWordsRequestedIterator)(nil) +) + +type RandomWordsRequestedIterator interface { + Next() bool + Error() error + Close() error + Event() RandomWordsRequested +} + +type v2RandomWordsRequestedIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator +} + +func NewV2RandomWordsRequestedIterator(it *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequestedIterator) RandomWordsRequestedIterator { + return &v2RandomWordsRequestedIterator{ + vrfVersion: vrfcommon.V2, + iterator: it, + } +} + +func (it *v2RandomWordsRequestedIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2RandomWordsRequestedIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2RandomWordsRequestedIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2RandomWordsRequestedIterator) Event() RandomWordsRequested { + return NewV2RandomWordsRequested(it.iterator.Event) +} + +type v2_5RandomWordsRequestedIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequestedIterator +} + +func NewV2_5RandomWordsRequestedIterator(it *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequestedIterator) RandomWordsRequestedIterator { + return &v2_5RandomWordsRequestedIterator{ + vrfVersion: vrfcommon.V2Plus, + iterator: it, + } +} + +func (it *v2_5RandomWordsRequestedIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2_5RandomWordsRequestedIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2_5RandomWordsRequestedIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2_5RandomWordsRequestedIterator) Event() RandomWordsRequested { + return NewV2_5RandomWordsRequested(it.iterator.Event) +} + +var ( + _ RandomWordsRequested = (*v2RandomWordsRequested)(nil) + _ RandomWordsRequested = (*v2_5RandomWordsRequested)(nil) +) + +type RandomWordsRequested interface { + Raw() types.Log + NumWords() uint32 + SubID() *big.Int + MinimumRequestConfirmations() uint16 + KeyHash() [32]byte + RequestID() *big.Int + PreSeed() *big.Int + Sender() common.Address + CallbackGasLimit() uint32 + NativePayment() bool +} + +type v2RandomWordsRequested struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested +} + +func NewV2RandomWordsRequested(event *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested) RandomWordsRequested { + return &v2RandomWordsRequested{ + vrfVersion: vrfcommon.V2, + event: event, + } +} + +func (r *v2RandomWordsRequested) Raw() types.Log { + return r.event.Raw +} + +func (r *v2RandomWordsRequested) NumWords() uint32 { + return r.event.NumWords +} + +func (r *v2RandomWordsRequested) SubID() *big.Int { + return new(big.Int).SetUint64(r.event.SubId) +} + +func (r *v2RandomWordsRequested) MinimumRequestConfirmations() uint16 { + return r.event.MinimumRequestConfirmations +} + +func (r *v2RandomWordsRequested) KeyHash() [32]byte { + return r.event.KeyHash +} + +func (r *v2RandomWordsRequested) RequestID() *big.Int { + return r.event.RequestId +} + +func (r *v2RandomWordsRequested) PreSeed() *big.Int { + return r.event.PreSeed +} + +func (r *v2RandomWordsRequested) Sender() common.Address { + return r.event.Sender +} + +func (r *v2RandomWordsRequested) CallbackGasLimit() uint32 { + return r.event.CallbackGasLimit +} + +func (r *v2RandomWordsRequested) NativePayment() bool { + return false +} + +type v2_5RandomWordsRequested struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested +} + +func NewV2_5RandomWordsRequested(event *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested) RandomWordsRequested { + return &v2_5RandomWordsRequested{ + vrfVersion: vrfcommon.V2Plus, + event: event, + } +} + +func (r *v2_5RandomWordsRequested) Raw() types.Log { + return r.event.Raw +} + +func (r *v2_5RandomWordsRequested) NumWords() uint32 { + return r.event.NumWords +} + +func (r *v2_5RandomWordsRequested) SubID() *big.Int { + return r.event.SubId +} + +func (r *v2_5RandomWordsRequested) MinimumRequestConfirmations() uint16 { + return r.event.MinimumRequestConfirmations +} + +func (r *v2_5RandomWordsRequested) KeyHash() [32]byte { + return r.event.KeyHash +} + +func (r *v2_5RandomWordsRequested) RequestID() *big.Int { + return r.event.RequestId +} + +func (r *v2_5RandomWordsRequested) PreSeed() *big.Int { + return r.event.PreSeed +} + +func (r *v2_5RandomWordsRequested) Sender() common.Address { + return r.event.Sender +} + +func (r *v2_5RandomWordsRequested) CallbackGasLimit() uint32 { + return r.event.CallbackGasLimit +} + +func (r *v2_5RandomWordsRequested) NativePayment() bool { + nativePayment, err := extraargs.FromExtraArgsV1(r.event.ExtraArgs) + if err != nil { + panic(err) + } + return nativePayment +} + +var ( + _ RandomWordsFulfilledIterator = (*v2RandomWordsFulfilledIterator)(nil) + _ RandomWordsFulfilledIterator = (*v2_5RandomWordsFulfilledIterator)(nil) +) + +type RandomWordsFulfilledIterator interface { + Next() bool + Error() error + Close() error + Event() RandomWordsFulfilled +} + +type v2RandomWordsFulfilledIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator +} + +func NewV2RandomWordsFulfilledIterator(it *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilledIterator) RandomWordsFulfilledIterator { + return &v2RandomWordsFulfilledIterator{ + vrfVersion: vrfcommon.V2, + iterator: it, + } +} + +func (it *v2RandomWordsFulfilledIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2RandomWordsFulfilledIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2RandomWordsFulfilledIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2RandomWordsFulfilledIterator) Event() RandomWordsFulfilled { + return NewV2RandomWordsFulfilled(it.iterator.Event) +} + +type v2_5RandomWordsFulfilledIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilledIterator +} + +func NewV2_5RandomWordsFulfilledIterator(it *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilledIterator) RandomWordsFulfilledIterator { + return &v2_5RandomWordsFulfilledIterator{ + vrfVersion: vrfcommon.V2Plus, + iterator: it, + } +} + +func (it *v2_5RandomWordsFulfilledIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2_5RandomWordsFulfilledIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2_5RandomWordsFulfilledIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2_5RandomWordsFulfilledIterator) Event() RandomWordsFulfilled { + return NewV2_5RandomWordsFulfilled(it.iterator.Event) +} + +var ( + _ RandomWordsFulfilled = (*v2RandomWordsFulfilled)(nil) + _ RandomWordsFulfilled = (*v2_5RandomWordsFulfilled)(nil) +) + +type RandomWordsFulfilled interface { + RequestID() *big.Int + Success() bool + SubID() *big.Int + Payment() *big.Int + Raw() types.Log +} + +func NewV2RandomWordsFulfilled(event *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled) RandomWordsFulfilled { + return &v2RandomWordsFulfilled{ + vrfVersion: vrfcommon.V2, + event: event, + } +} + +type v2RandomWordsFulfilled struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled +} + +func (rwf *v2RandomWordsFulfilled) RequestID() *big.Int { + return rwf.event.RequestId +} + +func (rwf *v2RandomWordsFulfilled) Success() bool { + return rwf.event.Success +} + +func (rwf *v2RandomWordsFulfilled) NativePayment() bool { + return false +} + +func (rwf *v2RandomWordsFulfilled) SubID() *big.Int { + panic("VRF V2 RandomWordsFulfilled does not implement SubID") +} + +func (rwf *v2RandomWordsFulfilled) Payment() *big.Int { + return rwf.event.Payment +} + +func (rwf *v2RandomWordsFulfilled) Raw() types.Log { + return rwf.event.Raw +} + +type v2_5RandomWordsFulfilled struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled +} + +func NewV2_5RandomWordsFulfilled(event *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled) RandomWordsFulfilled { + return &v2_5RandomWordsFulfilled{ + vrfVersion: vrfcommon.V2Plus, + event: event, + } +} + +func (rwf *v2_5RandomWordsFulfilled) RequestID() *big.Int { + return rwf.event.RequestId +} + +func (rwf *v2_5RandomWordsFulfilled) Success() bool { + return rwf.event.Success +} + +func (rwf *v2_5RandomWordsFulfilled) SubID() *big.Int { + return rwf.event.SubId +} + +func (rwf *v2_5RandomWordsFulfilled) Payment() *big.Int { + return rwf.event.Payment +} + +func (rwf *v2_5RandomWordsFulfilled) Raw() types.Log { + return rwf.event.Raw +} + +var ( + _ SubscriptionCreatedIterator = (*v2SubscriptionCreatedIterator)(nil) + _ SubscriptionCreatedIterator = (*v2_5SubscriptionCreatedIterator)(nil) +) + +type SubscriptionCreatedIterator interface { + Next() bool + Error() error + Close() error + Event() SubscriptionCreated +} + +type v2SubscriptionCreatedIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator +} + +func NewV2SubscriptionCreatedIterator(it *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreatedIterator) SubscriptionCreatedIterator { + return &v2SubscriptionCreatedIterator{ + vrfVersion: vrfcommon.V2, + iterator: it, + } +} + +func (it *v2SubscriptionCreatedIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2SubscriptionCreatedIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2SubscriptionCreatedIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2SubscriptionCreatedIterator) Event() SubscriptionCreated { + return NewV2SubscriptionCreated(it.iterator.Event) +} + +type v2_5SubscriptionCreatedIterator struct { + vrfVersion vrfcommon.Version + iterator *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreatedIterator +} + +func NewV2_5SubscriptionCreatedIterator(it *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreatedIterator) SubscriptionCreatedIterator { + return &v2_5SubscriptionCreatedIterator{ + vrfVersion: vrfcommon.V2Plus, + iterator: it, + } +} + +func (it *v2_5SubscriptionCreatedIterator) Next() bool { + return it.iterator.Next() +} + +func (it *v2_5SubscriptionCreatedIterator) Error() error { + return it.iterator.Error() +} + +func (it *v2_5SubscriptionCreatedIterator) Close() error { + return it.iterator.Close() +} + +func (it *v2_5SubscriptionCreatedIterator) Event() SubscriptionCreated { + return NewV2_5SubscriptionCreated(it.iterator.Event) +} + +var ( + _ SubscriptionCreated = (*v2SubscriptionCreated)(nil) + _ SubscriptionCreated = (*v2_5SubscriptionCreated)(nil) +) + +type SubscriptionCreated interface { + Owner() common.Address + SubID() *big.Int +} + +type v2SubscriptionCreated struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated +} + +func NewV2SubscriptionCreated(event *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated) SubscriptionCreated { + return &v2SubscriptionCreated{ + vrfVersion: vrfcommon.V2, + event: event, + } +} + +func (sc *v2SubscriptionCreated) Owner() common.Address { + return sc.event.Owner +} + +func (sc *v2SubscriptionCreated) SubID() *big.Int { + return new(big.Int).SetUint64(sc.event.SubId) +} + +type v2_5SubscriptionCreated struct { + vrfVersion vrfcommon.Version + event *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated +} + +func NewV2_5SubscriptionCreated(event *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated) SubscriptionCreated { + return &v2_5SubscriptionCreated{ + vrfVersion: vrfcommon.V2Plus, + event: event, + } +} + +func (sc *v2_5SubscriptionCreated) Owner() common.Address { + return sc.event.Owner +} + +func (sc *v2_5SubscriptionCreated) SubID() *big.Int { + return sc.event.SubId +} + +var ( + _ Subscription = (*v2Subscription)(nil) + _ Subscription = (*v2_5Subscription)(nil) +) + +type Subscription interface { + Balance() *big.Int + NativeBalance() *big.Int + Owner() common.Address + Consumers() []common.Address + Version() vrfcommon.Version +} + +type v2Subscription struct { + vrfVersion vrfcommon.Version + event vrf_coordinator_v2.GetSubscription +} + +func NewV2Subscription(event vrf_coordinator_v2.GetSubscription) Subscription { + return v2Subscription{ + vrfVersion: vrfcommon.V2, + event: event, + } +} + +func (s v2Subscription) Balance() *big.Int { + return s.event.Balance +} + +func (s v2Subscription) NativeBalance() *big.Int { + panic("EthBalance not supported on V2") +} + +func (s v2Subscription) Owner() common.Address { + return s.event.Owner +} + +func (s v2Subscription) Consumers() []common.Address { + return s.event.Consumers +} + +func (s v2Subscription) Version() vrfcommon.Version { + return s.vrfVersion +} + +type v2_5Subscription struct { + vrfVersion vrfcommon.Version + event vrf_coordinator_v2_5.GetSubscription +} + +func NewV2_5Subscription(event vrf_coordinator_v2_5.GetSubscription) Subscription { + return &v2_5Subscription{ + vrfVersion: vrfcommon.V2Plus, + event: event, + } +} + +func (s *v2_5Subscription) Balance() *big.Int { + return s.event.Balance +} + +func (s *v2_5Subscription) NativeBalance() *big.Int { + return s.event.NativeBalance +} + +func (s *v2_5Subscription) Owner() common.Address { + return s.event.Owner +} + +func (s *v2_5Subscription) Consumers() []common.Address { + return s.event.Consumers +} + +func (s *v2_5Subscription) Version() vrfcommon.Version { + return s.vrfVersion +} + +var ( + _ Config = (*v2Config)(nil) + _ Config = (*v2_5Config)(nil) +) + +type Config interface { + MinimumRequestConfirmations() uint16 + MaxGasLimit() uint32 + GasAfterPaymentCalculation() uint32 + StalenessSeconds() uint32 +} + +type v2Config struct { + vrfVersion vrfcommon.Version + config vrf_coordinator_v2.GetConfig +} + +func NewV2Config(config vrf_coordinator_v2.GetConfig) Config { + return &v2Config{ + vrfVersion: vrfcommon.V2, + config: config, + } +} + +func (c *v2Config) MinimumRequestConfirmations() uint16 { + return c.config.MinimumRequestConfirmations +} + +func (c *v2Config) MaxGasLimit() uint32 { + return c.config.MaxGasLimit +} + +func (c *v2Config) GasAfterPaymentCalculation() uint32 { + return c.config.GasAfterPaymentCalculation +} + +func (c *v2Config) StalenessSeconds() uint32 { + return c.config.StalenessSeconds +} + +type v2_5Config struct { + vrfVersion vrfcommon.Version + config vrf_coordinator_v2_5.SConfig +} + +func NewV2_5Config(config vrf_coordinator_v2_5.SConfig) Config { + return &v2_5Config{ + vrfVersion: vrfcommon.V2Plus, + config: config, + } +} + +func (c *v2_5Config) MinimumRequestConfirmations() uint16 { + return c.config.MinimumRequestConfirmations +} + +func (c *v2_5Config) MaxGasLimit() uint32 { + return c.config.MaxGasLimit +} + +func (c *v2_5Config) GasAfterPaymentCalculation() uint32 { + return c.config.GasAfterPaymentCalculation +} + +func (c *v2_5Config) StalenessSeconds() uint32 { + return c.config.StalenessSeconds +} + +type VRFProof struct { + VRFVersion vrfcommon.Version + V2 vrf_coordinator_v2.VRFProof + V2Plus vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof +} + +func FromV2Proof(proof vrf_coordinator_v2.VRFProof) VRFProof { + return VRFProof{ + VRFVersion: vrfcommon.V2, + V2: proof, + } +} + +func FromV2PlusProof(proof vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof) VRFProof { + return VRFProof{ + VRFVersion: vrfcommon.V2Plus, + V2Plus: proof, + } +} + +func ToV2Proofs(proofs []VRFProof) []vrf_coordinator_v2.VRFProof { + v2Proofs := make([]vrf_coordinator_v2.VRFProof, len(proofs)) + for i, proof := range proofs { + v2Proofs[i] = proof.V2 + } + return v2Proofs +} + +func ToV2PlusProofs(proofs []VRFProof) []vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof { + v2Proofs := make([]vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof, len(proofs)) + for i, proof := range proofs { + v2Proofs[i] = proof.V2Plus + } + return v2Proofs +} + +type RequestCommitment struct { + VRFVersion vrfcommon.Version + V2 vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment + V2Plus vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment +} + +func ToV2Commitments(commitments []RequestCommitment) []vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment { + v2Commitments := make([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, len(commitments)) + for i, commitment := range commitments { + v2Commitments[i] = commitment.V2 + } + return v2Commitments +} + +func ToV2PlusCommitments(commitments []RequestCommitment) []vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment { + v2PlusCommitments := make([]vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment, len(commitments)) + for i, commitment := range commitments { + v2PlusCommitments[i] = commitment.V2Plus + } + return v2PlusCommitments +} + +func NewRequestCommitment(val any) RequestCommitment { + switch val := val.(type) { + case vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment: + return RequestCommitment{VRFVersion: vrfcommon.V2, V2: val} + case vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalRequestCommitment: + return RequestCommitment{VRFVersion: vrfcommon.V2Plus, V2Plus: val} + default: + panic(fmt.Sprintf("NewRequestCommitment: unknown type %T", val)) + } +} + +func (r *RequestCommitment) Get() any { + if r.VRFVersion == vrfcommon.V2 { + return r.V2 + } + return r.V2Plus +} + +func (r *RequestCommitment) NativePayment() bool { + if r.VRFVersion == vrfcommon.V2 { + return false + } + nativePayment, err := extraargs.FromExtraArgsV1(r.V2Plus.ExtraArgs) + if err != nil { + panic(err) + } + return nativePayment +} + +func (r *RequestCommitment) NumWords() uint32 { + if r.VRFVersion == vrfcommon.V2 { + return r.V2.NumWords + } + return r.V2Plus.NumWords +} + +func (r *RequestCommitment) Sender() common.Address { + if r.VRFVersion == vrfcommon.V2 { + return r.V2.Sender + } + return r.V2Plus.Sender +} + +func (r *RequestCommitment) BlockNum() uint64 { + if r.VRFVersion == vrfcommon.V2 { + return r.V2.BlockNum + } + return r.V2Plus.BlockNum +} + +func (r *RequestCommitment) SubID() *big.Int { + if r.VRFVersion == vrfcommon.V2 { + return new(big.Int).SetUint64(r.V2.SubId) + } + return r.V2Plus.SubId +} + +func (r *RequestCommitment) CallbackGasLimit() uint32 { + if r.VRFVersion == vrfcommon.V2 { + return r.V2.CallbackGasLimit + } + return r.V2Plus.CallbackGasLimit +} + +func toV2SubIDs(subID []*big.Int) (v2SubIDs []uint64) { + for _, sID := range subID { + v2SubIDs = append(v2SubIDs, sID.Uint64()) + } + return +} diff --git a/core/services/vrf/v2/integration_helpers_test.go b/core/services/vrf/v2/integration_helpers_test.go new file mode 100644 index 00000000..45b6c504 --- /dev/null +++ b/core/services/vrf/v2/integration_helpers_test.go @@ -0,0 +1,1881 @@ +package v2_test + +import ( + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + v2 "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_external_sub_owner_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + v22 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func testSingleConsumerHappyPath( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerContractAddress common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled, + subID *big.Int), +) { + key1 := cltest.MustGenerateRandomKey(t) + key2 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }, toml.KeySpecific{ + // Gas lane. + Key: ptr(key2.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), coordinator, uni.backend, nativePayment) + + // Fund gas lanes. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + sendEth(t, ownerKey, uni.backend, key2.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1, key2}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make the first randomness request. + numWords := uint32(20) + requestID1, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + // In particular: + // * success should be true + // * payment should be exactly the amount specified as the premium in the coordinator fee config + rwfe := assertRandomWordsFulfilled(t, requestID1, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe, subID) + } + + // Make the second randomness request and assert fulfillment is successful + requestID2, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 2 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + mine(t, requestID2, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + // In particular: + // * success should be true + // * payment should be exactly the amount specified as the premium in the coordinator fee config + rwfe = assertRandomWordsFulfilled(t, requestID2, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe, subID) + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + + // Assert that both send addresses were used to fulfill the requests + n, err := uni.backend.PendingNonceAt(testutils.Context(t), key1.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n) + + n, err = uni.backend.PendingNonceAt(testutils.Context(t), key2.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n) + + t.Log("Done!") +} + +func testMultipleConsumersNeedBHS( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumers []*bind.TransactOpts, + consumerContracts []vrftesthelpers.VRFConsumerContract, + consumerContractAddresses []common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled), +) { + nConsumers := len(consumers) + vrfKey := cltest.MustGenerateRandomKey(t) + sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) + + // generate n BHS keys to make sure BHS job rotates sending keys + var bhsKeyAddresses []string + var keySpecificOverrides []toml.KeySpecific + var keys []interface{} + gasLanePriceWei := assets.GWei(10) + for i := 0; i < nConsumers; i++ { + bhsKey := cltest.MustGenerateRandomKey(t) + bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address.String()) + keys = append(keys, bhsKey) + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + Key: ptr(bhsKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) + } + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + // Gas lane. + Key: ptr(vrfKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + c.EVM[0].FinalityDepth = ptr[uint32](2) + }) + keys = append(keys, ownerKey, vrfKey) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, keys...) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + vrfJobs := createVRFJobs( + t, + [][]ethkey.KeyV2{{vrfKey}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + false, + gasLanePriceWei) + keyHash := vrfJobs[0].VRFSpec.PublicKey.MustHash() + + var ( + v2CoordinatorAddress string + v2PlusCoordinatorAddress string + ) + + if vrfVersion == vrfcommon.V2 { + v2CoordinatorAddress = coordinatorAddress.String() + } else if vrfVersion == vrfcommon.V2Plus { + v2PlusCoordinatorAddress = coordinatorAddress.String() + } + + _ = vrftesthelpers.CreateAndStartBHSJob( + t, bhsKeyAddresses, app, uni.bhsContractAddress.String(), "", + v2CoordinatorAddress, v2PlusCoordinatorAddress, "", 0, 200, 0, 100) + + // Ensure log poller is ready and has all logs. + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Ready()) + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), 1)) + + for i := 0; i < nConsumers; i++ { + consumer := consumers[i] + consumerContract := consumerContracts[i] + + // Create a subscription and fund with 0 PLI. + _, subID := subscribeVRF(t, consumer, consumerContract, coordinator, uni.backend, new(big.Int), nativePayment) + if vrfVersion == vrfcommon.V2 { + require.Equal(t, uint64(i+1), subID.Uint64()) + } + + // Make the randomness request. It will not yet succeed since it is underfunded. + numWords := uint32(20) + + requestID, requestBlock := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // Wait 101 blocks. + for i := 0; i < 100; i++ { + uni.backend.Commit() + } + verifyBlockhashStored(t, uni, requestBlock) + + // Wait another 160 blocks so that the request is outside of the 256 block window + for i := 0; i < 160; i++ { + uni.backend.Commit() + } + + // Fund the subscription + topUpSubscription(t, consumer, consumerContract, uni.backend, big.NewInt(5e18 /* 5 PLI */), nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe) + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + } +} + +func testMultipleConsumersNeedTrustedBHS( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2PlusUniverse, + consumers []*bind.TransactOpts, + consumerContracts []vrftesthelpers.VRFConsumerContract, + consumerContractAddresses []common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfVersion vrfcommon.Version, + nativePayment bool, + addedDelay bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled), +) { + nConsumers := len(consumers) + vrfKey := cltest.MustGenerateRandomKey(t) + sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) + + // generate n BHS keys to make sure BHS job rotates sending keys + var bhsKeyAddresses []common.Address + var bhsKeyAddressesStrings []string + var keySpecificOverrides []toml.KeySpecific + var keys []interface{} + gasLanePriceWei := assets.GWei(10) + for i := 0; i < nConsumers; i++ { + bhsKey := cltest.MustGenerateRandomKey(t) + bhsKeyAddressesStrings = append(bhsKeyAddressesStrings, bhsKey.Address.String()) + bhsKeyAddresses = append(bhsKeyAddresses, bhsKey.Address) + keys = append(keys, bhsKey) + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + Key: ptr(bhsKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + sendEth(t, ownerKey, uni.backend, bhsKey.Address, 10) + } + keySpecificOverrides = append(keySpecificOverrides, toml.KeySpecific{ + // Gas lane. + Key: ptr(vrfKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }) + + // Whitelist vrf key for trusted BHS. + { + _, err := uni.trustedBhsContract.SetWhitelist(uni.neil, bhsKeyAddresses) + require.NoError(t, err) + uni.backend.Commit() + } + + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), keySpecificOverrides...)(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(5_000_000)) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + c.EVM[0].FinalityDepth = ptr[uint32](2) + }) + keys = append(keys, ownerKey, vrfKey) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, keys...) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + vrfJobs := createVRFJobs( + t, + [][]ethkey.KeyV2{{vrfKey}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni.coordinatorV2UniverseCommon, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := vrfJobs[0].VRFSpec.PublicKey.MustHash() + + var ( + v2CoordinatorAddress string + v2PlusCoordinatorAddress string + ) + + if vrfVersion == vrfcommon.V2 { + v2CoordinatorAddress = coordinatorAddress.String() + } else if vrfVersion == vrfcommon.V2Plus { + v2PlusCoordinatorAddress = coordinatorAddress.String() + } + + waitBlocks := 100 + if addedDelay { + waitBlocks = 400 + } + _ = vrftesthelpers.CreateAndStartBHSJob( + t, bhsKeyAddressesStrings, app, "", "", + v2CoordinatorAddress, v2PlusCoordinatorAddress, uni.trustedBhsContractAddress.String(), 20, 1000, 0, waitBlocks) + + // Ensure log poller is ready and has all logs. + chain := app.GetRelayers().LegacyEVMChains().Slice()[0] + require.NoError(t, chain.LogPoller().Ready()) + require.NoError(t, chain.LogPoller().Replay(testutils.Context(t), 1)) + + for i := 0; i < nConsumers; i++ { + consumer := consumers[i] + consumerContract := consumerContracts[i] + + // Create a subscription and fund with 0 PLI. + _, subID := subscribeVRF(t, consumer, consumerContract, coordinator, uni.backend, new(big.Int), nativePayment) + if vrfVersion == vrfcommon.V2 { + require.Equal(t, uint64(i+1), subID.Uint64()) + } + + // Make the randomness request. It will not yet succeed since it is underfunded. + numWords := uint32(20) + + requestID, requestBlock := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // Wait 101 blocks. + for i := 0; i < 100; i++ { + uni.backend.Commit() + } + + // For an added delay, we even go beyond the EVM lookback limit. This is not a problem in a trusted BHS setup. + if addedDelay { + for i := 0; i < 300; i++ { + uni.backend.Commit() + } + } + + verifyBlockhashStoredTrusted(t, uni, requestBlock) + + // Wait another 160 blocks so that the request is outside of the 256 block window + for i := 0; i < 160; i++ { + uni.backend.Commit() + } + + // Fund the subscription + topUpSubscription(t, consumer, consumerContract, uni.backend, big.NewInt(5e18 /* 5 PLI */), nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe) + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + } +} + +func verifyBlockhashStored( + t *testing.T, + uni coordinatorV2UniverseCommon, + requestBlock uint64, +) { + // Wait for the blockhash to be stored + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + callOpts := &bind.CallOpts{ + Pending: false, + From: common.Address{}, + BlockNumber: nil, + Context: nil, + } + _, err := uni.bhsContract.GetBlockhash(callOpts, big.NewInt(int64(requestBlock))) + if err == nil { + return true + } else if strings.Contains(err.Error(), "execution reverted") { + return false + } + t.Fatal(err) + return false + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func verifyBlockhashStoredTrusted( + t *testing.T, + uni coordinatorV2PlusUniverse, + requestBlock uint64, +) { + // Wait for the blockhash to be stored + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + callOpts := &bind.CallOpts{ + Pending: false, + From: common.Address{}, + BlockNumber: nil, + Context: nil, + } + _, err := uni.trustedBhsContract.GetBlockhash(callOpts, big.NewInt(int64(requestBlock))) + if err == nil { + return true + } else if strings.Contains(err.Error(), "execution reverted") { + return false + } + t.Fatal(err) + return false + }, time.Second*300, time.Second).Should(gomega.BeTrue()) +} + +func testSingleConsumerHappyPathBatchFulfillment( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerContractAddress common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + numRequests int, + bigGasCallback bool, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled, + subID *big.Int), +) { + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](5_000_000) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), coordinator, uni.backend, nativePayment) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + true, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make some randomness requests. + numWords := uint32(2) + var reqIDs []*big.Int + for i := 0; i < numRequests; i++ { + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + reqIDs = append(reqIDs, requestID) + } + + if bigGasCallback { + // Make one randomness request with the max callback gas limit. + // It should live in a batch on it's own. + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 2_500_000, coordinator, uni.backend, nativePayment) + reqIDs = append(reqIDs, requestID) + } + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + if bigGasCallback { + return len(runs) == (numRequests + 1) + } + return len(runs) == numRequests + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mineBatch(t, reqIDs, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + for i, requestID := range reqIDs { + // Assert correct state of RandomWordsFulfilled event. + // The last request will be the successful one because of the way the example + // contract is written. + var rwfe v22.RandomWordsFulfilled + if i == (len(reqIDs) - 1) { + rwfe = assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment) + } else { + rwfe = assertRandomWordsFulfilled(t, requestID, false, coordinator, nativePayment) + } + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe, subID) + } + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) +} + +func testSingleConsumerNeedsTopUp( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerContractAddress common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + initialFundingAmount *big.Int, + topUpAmount *big.Int, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled), +) { + key := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(1000) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(1000), toml.KeySpecific{ + // Gas lane. + Key: ptr(key.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key) + + // Create and fund a subscription + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, initialFundingAmount, coordinator, uni.backend, nativePayment) + + // Fund expensive gas lane. + sendEth(t, ownerKey, uni.backend, key.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + numWords := uint32(20) + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // Fulfillment will not be enqueued because subscriber doesn't have enough PLI. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 1", "runs", len(runs)) + return len(runs) == 0 + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + + // Top up subscription with enough PLI to see the job through. + topUpSubscription(t, consumer, consumerContract, uni.backend, topUpAmount, nativePayment) + uni.backend.Commit() + + // Wait for fulfillment to go through. + gomega.NewWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 2", "runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment. Need to wait for Txm to mark the tx as confirmed + // so that we can actually see the event on the simulated chain. + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert the state of the RandomWordsFulfilled event. + rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe) + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) +} + +// testBlockHeaderFeeder starts VRF and block header feeder jobs +// subscription is unfunded initially and funded after 256 blocks +// the function makes sure the block header feeder stored blockhash for +// a block older than 256 blocks +func testBlockHeaderFeeder( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumers []*bind.TransactOpts, + consumerContracts []vrftesthelpers.VRFConsumerContract, + consumerContractAddresses []common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled), +) { + nConsumers := len(consumers) + + vrfKey := cltest.MustGenerateRandomKey(t) + bhfKey := cltest.MustGenerateRandomKey(t) + bhfKeys := []string{bhfKey.Address.String()} + + sendEth(t, ownerKey, uni.backend, bhfKey.Address, 10) + sendEth(t, ownerKey, uni.backend, vrfKey.Address, 10) + + gasLanePriceWei := assets.GWei(10) + + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, gasLanePriceWei, toml.KeySpecific{ + // Gas lane. + Key: ptr(vrfKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + c.EVM[0].FinalityDepth = ptr[uint32](2) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, vrfKey, bhfKey) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + vrfJobs := createVRFJobs( + t, + [][]ethkey.KeyV2{{vrfKey}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + false, + gasLanePriceWei) + keyHash := vrfJobs[0].VRFSpec.PublicKey.MustHash() + var ( + v2coordinatorAddress string + v2plusCoordinatorAddress string + ) + if vrfVersion == vrfcommon.V2 { + v2coordinatorAddress = coordinatorAddress.String() + } else if vrfVersion == vrfcommon.V2Plus { + v2plusCoordinatorAddress = coordinatorAddress.String() + } + + _ = vrftesthelpers.CreateAndStartBlockHeaderFeederJob( + t, bhfKeys, app, uni.bhsContractAddress.String(), uni.batchBHSContractAddress.String(), "", + v2coordinatorAddress, v2plusCoordinatorAddress) + + // Ensure log poller is ready and has all logs. + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Ready()) + require.NoError(t, app.GetRelayers().LegacyEVMChains().Slice()[0].LogPoller().Replay(testutils.Context(t), 1)) + + for i := 0; i < nConsumers; i++ { + consumer := consumers[i] + consumerContract := consumerContracts[i] + + // Create a subscription and fund with 0 PLI. + _, subID := subscribeVRF(t, consumer, consumerContract, coordinator, uni.backend, new(big.Int), nativePayment) + if vrfVersion == vrfcommon.V2 { + require.Equal(t, uint64(i+1), subID.Uint64()) + } + + // Make the randomness request. It will not yet succeed since it is underfunded. + numWords := uint32(20) + + requestID, requestBlock := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // Wait 256 blocks. + for i := 0; i < 256; i++ { + uni.backend.Commit() + } + verifyBlockhashStored(t, uni, requestBlock) + + // Fund the subscription + topUpSubscription(t, consumer, consumerContract, uni.backend, big.NewInt(5e18), nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe) + } + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + } +} + +func createSubscriptionAndGetSubscriptionCreatedEvent( + t *testing.T, + subOwner *bind.TransactOpts, + coordinator v22.CoordinatorV2_X, + backend *backends.SimulatedBackend, +) v22.SubscriptionCreated { + _, err := coordinator.CreateSubscription(subOwner) + require.NoError(t, err) + backend.Commit() + + iter, err := coordinator.FilterSubscriptionCreated(nil, nil) + require.NoError(t, err) + require.True(t, iter.Next(), "could not find SubscriptionCreated event for subID") + return iter.Event() +} + +func setupAndFundSubscriptionAndConsumer( + t *testing.T, + uni coordinatorV2UniverseCommon, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + subOwner *bind.TransactOpts, + consumerAddress common.Address, + vrfVersion vrfcommon.Version, + fundingAmount *big.Int, +) (subID *big.Int) { + event := createSubscriptionAndGetSubscriptionCreatedEvent(t, subOwner, coordinator, uni.backend) + subID = event.SubID() + + _, err := coordinator.AddConsumer(subOwner, subID, consumerAddress) + require.NoError(t, err, "failed to add consumer") + uni.backend.Commit() + + if vrfVersion == vrfcommon.V2Plus { + b, err2 := evmutils.ABIEncode(`[{"type":"uint256"}]`, subID) + require.NoError(t, err2) + _, err2 = uni.linkContract.TransferAndCall( + uni.sergey, coordinatorAddress, fundingAmount, b) + require.NoError(t, err2, "failed to fund sub") + uni.backend.Commit() + return + } + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, subID.Uint64()) + require.NoError(t, err) + _, err = uni.linkContract.TransferAndCall( + uni.sergey, coordinatorAddress, fundingAmount, b) + require.NoError(t, err, "failed to fund sub") + uni.backend.Commit() + return +} + +func testSingleConsumerForcedFulfillment( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2Universe, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, +) { + key1 := cltest.MustGenerateRandomKey(t) + key2 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }, toml.KeySpecific{ + // Gas lane. + Key: ptr(key2.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) + + eoaConsumerAddr, _, eoaConsumer, err := vrf_external_sub_owner_example.DeployVRFExternalSubOwnerExample( + uni.neil, + uni.backend, + uni.oldRootContractAddress, + uni.linkContractAddress, + ) + require.NoError(t, err, "failed to deploy eoa consumer") + uni.backend.Commit() + + // Create a subscription and fund with 5 PLI. + subID := setupAndFundSubscriptionAndConsumer( + t, + uni.coordinatorV2UniverseCommon, + uni.oldRootContract, + uni.oldRootContractAddress, + uni.neil, + eoaConsumerAddr, + vrfVersion, + assets.Ether(5).ToInt(), + ) + + // Check the subscription state + sub, err := uni.oldRootContract.GetSubscription(nil, subID) + require.NoError(t, err, "failed to get subscription with id %d", subID) + require.Equal(t, assets.Ether(5).ToInt(), sub.Balance()) + require.Equal(t, 1, len(sub.Consumers())) + require.Equal(t, eoaConsumerAddr, sub.Consumers()[0]) + require.Equal(t, uni.neil.From, sub.Owner()) + + // Fund gas lanes. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + sendEth(t, ownerKey, uni.backend, key2.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1, key2}}, + app, + coordinator, + coordinatorAddress, + batchCoordinatorAddress, + uni.coordinatorV2UniverseCommon, + ptr(uni.vrfOwnerAddress), + vrfVersion, + batchEnabled, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Transfer ownership of the VRF coordinator to the VRF owner, + // which is critical for this test. + _, err = uni.oldRootContract.TransferOwnership(uni.neil, uni.vrfOwnerAddress) + require.NoError(t, err, "unable to TransferOwnership of VRF coordinator to VRFOwner") + uni.backend.Commit() + + _, err = uni.vrfOwner.AcceptVRFOwnership(uni.neil) + require.NoError(t, err, "unable to Accept VRF Ownership") + uni.backend.Commit() + + actualCoordinatorAddr, err := uni.vrfOwner.GetVRFCoordinator(nil) + require.NoError(t, err) + require.Equal(t, uni.oldRootContractAddress, actualCoordinatorAddr) + + t.Log("vrf owner address:", uni.vrfOwnerAddress) + + // Add allowed callers so that the oracle can call fulfillRandomWords + // on VRFOwner. + _, err = uni.vrfOwner.SetAuthorizedSenders(uni.neil, []common.Address{ + key1.EIP55Address.Address(), + key2.EIP55Address.Address(), + }) + require.NoError(t, err, "unable to update authorized senders in VRFOwner") + uni.backend.Commit() + + // Make the randomness request. + // Give it a larger number of confs so that we have enough time to remove the consumer + // and cause a 0 balance to the sub. + numWords := 3 + confs := 10 + _, err = eoaConsumer.RequestRandomWords(uni.neil, subID.Uint64(), 500_000, uint16(confs), uint32(numWords), keyHash) + require.NoError(t, err, "failed to request randomness from consumer") + uni.backend.Commit() + + requestID, err := eoaConsumer.SRequestId(nil) + require.NoError(t, err) + + // Remove consumer and cancel the sub before the request can be fulfilled + _, err = uni.oldRootContract.RemoveConsumer(uni.neil, subID, eoaConsumerAddr) + require.NoError(t, err, "RemoveConsumer tx failed") + _, err = uni.oldRootContract.CancelSubscription(uni.neil, subID, uni.neil.From) + require.NoError(t, err, "CancelSubscription tx failed") + uni.backend.Commit() + + // Wait for force-fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + commitment, err2 := uni.oldRootContract.GetCommitment(nil, requestID) + require.NoError(t, err2) + t.Log("commitment is:", hexutil.Encode(commitment[:])) + it, err2 := uni.vrfOwner.FilterRandomWordsForced(nil, []*big.Int{requestID}, []uint64{subID.Uint64()}, []common.Address{eoaConsumerAddr}) + require.NoError(t, err2) + i := 0 + for it.Next() { + i++ + require.Equal(t, requestID.String(), it.Event.RequestId.String()) + require.Equal(t, subID.Uint64(), it.Event.SubId) + require.Equal(t, eoaConsumerAddr.String(), it.Event.Sender.String()) + } + t.Log("num RandomWordsForced logs:", i) + return utils.IsEmpty(commitment[:]) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + // In this particular case: + // * success should be true + // * payment should be zero (forced fulfillment) + rwfe := assertRandomWordsFulfilled(t, requestID, true, coordinator, false) + require.Equal(t, "0", rwfe.Payment().String()) + + // Check that the RandomWordsForced event is emitted correctly. + it, err := uni.vrfOwner.FilterRandomWordsForced(nil, []*big.Int{requestID}, []uint64{subID.Uint64()}, []common.Address{eoaConsumerAddr}) + require.NoError(t, err) + i := 0 + for it.Next() { + i++ + require.Equal(t, requestID.String(), it.Event.RequestId.String()) + require.Equal(t, subID.Uint64(), it.Event.SubId) + require.Equal(t, eoaConsumerAddr.String(), it.Event.Sender.String()) + } + require.Greater(t, i, 0) + + t.Log("Done!") +} + +func testSingleConsumerEIP150( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + callBackGasLimit := int64(2_500_000) // base callback gas. + + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(3.5e6)) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + // Create a subscription and fund with 500 PLI. + subAmount := big.NewInt(1).Mul(big.NewInt(5e18), big.NewInt(100)) + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, subAmount, uni.rootContract, uni.backend, nativePayment) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make the first randomness request. + numWords := uint32(1) + requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uint32(callBackGasLimit), uni.rootContract, uni.backend, nativePayment) + + // Wait for simulation to pass. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + t.Log("Done!") +} + +func testSingleConsumerEIP150Revert( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + callBackGasLimit := int64(2_500_000) // base callback gas. + eip150Fee := int64(0) // no premium given for callWithExactGas + coordinatorFulfillmentOverhead := int64(90_000) // fixed gas used in coordinator fulfillment + gasLimit := callBackGasLimit + eip150Fee + coordinatorFulfillmentOverhead + + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(gasLimit)) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + // Create a subscription and fund with 500 PLI. + subAmount := big.NewInt(1).Mul(big.NewInt(5e18), big.NewInt(100)) + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, subAmount, uni.rootContract, uni.backend, nativePayment) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make the first randomness request. + numWords := uint32(1) + requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, uint32(callBackGasLimit), uni.rootContract, uni.backend, nativePayment) + + // Simulation should not pass. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 0 + }, 5*time.Second, time.Second).Should(gomega.BeTrue()) + + t.Log("Done!") +} + +func testSingleConsumerBigGasCallbackSandwich( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(100) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(100), v2.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](5_000_000) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, assets.Ether(2).ToInt(), uni.rootContract, uni.backend, nativePayment) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make some randomness requests, each one block apart, which contain a single low-gas request sandwiched between two high-gas requests. + numWords := uint32(2) + reqIDs := []*big.Int{} + callbackGasLimits := []uint32{2_500_000, 50_000, 1_500_000} + for _, limit := range callbackGasLimits { + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, limit, uni.rootContract, uni.backend, nativePayment) + reqIDs = append(reqIDs, requestID) + uni.backend.Commit() + } + + // Assert that we've completed 0 runs before adding 3 new requests. + { + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + assert.Equal(t, 0, len(runs)) + assert.Equal(t, 3, len(reqIDs)) + } + + // Wait for the 50_000 gas randomness request to be enqueued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // After the first successful request, no more will be enqueued. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 1", "runs", len(runs)) + return len(runs) == 1 + }, 3*time.Second, 1*time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, reqIDs[1], subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert the random word was fulfilled + assertRandomWordsFulfilled(t, reqIDs[1], false, uni.rootContract, nativePayment) + + // Assert that we've still only completed 1 run before adding new requests. + { + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + assert.Equal(t, 1, len(runs)) + } + + // Make some randomness requests, each one block apart, this time without a low-gas request present in the callbackGasLimit slice. + callbackGasLimits = []uint32{2_500_000, 2_500_000, 2_500_000} + for _, limit := range callbackGasLimits { + _, _ = requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, limit, uni.rootContract, uni.backend, nativePayment) + uni.backend.Commit() + } + + // Fulfillment will not be enqueued because subscriber doesn't have enough PLI for any of the requests. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 1", "runs", len(runs)) + return len(runs) == 1 + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + + t.Log("Done!") +} + +func testSingleConsumerMultipleGasLanes( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + cheapKey := cltest.MustGenerateRandomKey(t) + expensiveKey := cltest.MustGenerateRandomKey(t) + cheapGasLane := assets.GWei(10) + expensiveGasLane := assets.GWei(1000) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ + // Cheap gas lane. + Key: ptr(cheapKey.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: cheapGasLane}, + }, v2.KeySpecific{ + // Expensive gas lane. + Key: ptr(expensiveKey.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: expensiveGasLane}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](5_000_000) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, cheapKey, expensiveKey) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), uni.rootContract, uni.backend, nativePayment) + + // Fund gas lanes. + sendEth(t, ownerKey, uni.backend, cheapKey.Address, 10) + sendEth(t, ownerKey, uni.backend, expensiveKey.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF jobs. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{cheapKey}, {expensiveKey}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + cheapGasLane, expensiveGasLane) + cheapHash := jbs[0].VRFSpec.PublicKey.MustHash() + expensiveHash := jbs[1].VRFSpec.PublicKey.MustHash() + + numWords := uint32(20) + cheapRequestID, _ := + requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, cheapHash, subID, numWords, 500_000, uni.rootContract, uni.backend, nativePayment) + + // Wait for fulfillment to be queued for cheap key hash. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 1", "runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, cheapRequestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, cheapRequestID, true, uni.rootContract, nativePayment) + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + + expensiveRequestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, expensiveHash, subID, numWords, 500_000, uni.rootContract, uni.backend, nativePayment) + + // We should not have any new fulfillments until a top up. + gomega.NewWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 2", "runs", len(runs)) + return len(runs) == 1 + }, 5*time.Second, 1*time.Second).Should(gomega.BeTrue()) + + // Top up subscription with enough PLI to see the job through. 100 PLI should do the trick. + topUpSubscription(t, consumer, consumerContract, uni.backend, decimal.RequireFromString("100e18").BigInt(), nativePayment) + + // Wait for fulfillment to be queued for expensive key hash. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("assert 1", "runs", len(runs)) + return len(runs) == 2 + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, expensiveRequestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, expensiveRequestID, true, uni.rootContract, nativePayment) + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) +} + +func topUpSubscription(t *testing.T, consumer *bind.TransactOpts, consumerContract vrftesthelpers.VRFConsumerContract, backend *backends.SimulatedBackend, fundingAmount *big.Int, nativePayment bool) { + if nativePayment { + _, err := consumerContract.TopUpSubscriptionNative(consumer, fundingAmount) + require.NoError(t, err) + } else { + _, err := consumerContract.TopUpSubscription(consumer, fundingAmount) + require.NoError(t, err) + } + backend.Commit() +} + +func testSingleConsumerAlwaysRevertingCallbackStillFulfilled( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + key := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ + // Gas lane. + Key: ptr(key.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key) + consumer := uni.reverter + consumerContract := uni.revertingConsumerContract + consumerContractAddress := uni.revertingConsumerContractAddress + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), uni.rootContract, uni.backend, nativePayment) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make the randomness request. + numWords := uint32(20) + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni.rootContract, uni.backend, nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, requestID, false, uni.rootContract, nativePayment) + t.Log("Done!") +} + +func testConsumerProxyHappyPath( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, + nativePayment bool, +) { + key1 := cltest.MustGenerateRandomKey(t) + key2 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), v2.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }, v2.KeySpecific{ + Key: ptr(key2.EIP55Address), + GasEstimator: v2.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) + consumerOwner := uni.neil + consumerContract := uni.consumerProxyContract + consumerContractAddress := uni.consumerProxyContractAddress + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent( + t, consumerContract, consumerOwner, consumerContractAddress, + assets.Ether(5).ToInt(), uni.rootContract, uni.backend, nativePayment) + + // Create gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + sendEth(t, ownerKey, uni.backend, key2.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1, key2}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + nil, + vrfVersion, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make the first randomness request. + numWords := uint32(20) + requestID1, _ := requestRandomnessAndAssertRandomWordsRequestedEvent( + t, consumerContract, consumerOwner, keyHash, subID, numWords, 750_000, uni.rootContract, uni.backend, nativePayment) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, requestID1, true, uni.rootContract, nativePayment) + + // Gas available will be around 724,385, which means that 750,000 - 724,385 = 25,615 gas was used. + // This is ~20k more than what the non-proxied consumer uses. + // So to be safe, users should probably over-estimate their fulfillment gas by ~25k. + { + gasAvailable, err := consumerContract.SGasAvailable(nil) + require.NoError(t, err) + t.Log("gas available after proxied callback:", gasAvailable) + } + + // Make the second randomness request and assert fulfillment is successful + requestID2, _ := requestRandomnessAndAssertRandomWordsRequestedEvent( + t, consumerContract, consumerOwner, keyHash, subID, numWords, 750_000, uni.rootContract, uni.backend, nativePayment) + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 2 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + mine(t, requestID2, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + assertRandomWordsFulfilled(t, requestID2, true, uni.rootContract, nativePayment) + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + + // Assert that both send addresses were used to fulfill the requests + n, err := uni.backend.PendingNonceAt(testutils.Context(t), key1.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n) + + n, err = uni.backend.PendingNonceAt(testutils.Context(t), key2.Address) + require.NoError(t, err) + require.EqualValues(t, 1, n) + + t.Log("Done!") +} + +func testConsumerProxyCoordinatorZeroAddress( + t *testing.T, + uni coordinatorV2UniverseCommon, +) { + // Deploy another upgradeable consumer, proxy, and proxy admin + // to test vrfCoordinator != 0x0 condition. + upgradeableConsumerAddress, _, _, err := vrf_consumer_v2_upgradeable_example.DeployVRFConsumerV2UpgradeableExample(uni.neil, uni.backend) + require.NoError(t, err, "failed to deploy upgradeable consumer to simulated ethereum blockchain") + uni.backend.Commit() + + // Deployment should revert if we give the 0x0 address for the coordinator. + upgradeableAbi, err := vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExampleMetaData.GetAbi() + require.NoError(t, err) + initializeCalldata, err := upgradeableAbi.Pack("initialize", + common.BytesToAddress(common.LeftPadBytes([]byte{}, 20)), // zero address for the coordinator + uni.linkContractAddress) + require.NoError(t, err) + _, _, _, err = vrfv2_transparent_upgradeable_proxy.DeployVRFV2TransparentUpgradeableProxy( + uni.neil, uni.backend, upgradeableConsumerAddress, uni.proxyAdminAddress, initializeCalldata) + require.Error(t, err) +} + +func testMaliciousConsumer( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchCoordinatorAddress common.Address, + batchEnabled bool, + vrfVersion vrfcommon.Version, +) { + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](2_000_000) + c.EVM[0].GasEstimator.PriceMax = assets.GWei(1) + c.EVM[0].GasEstimator.PriceDefault = assets.GWei(1) + c.EVM[0].GasEstimator.FeeCapDefault = assets.GWei(1) + c.EVM[0].ChainID = (*ubig.Big)(testutils.SimulatedChainID) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + carol := uni.vrfConsumers[0] + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey) + require.NoError(t, app.Start(testutils.Context(t))) + + err := app.GetKeyStore().Unlock(cltest.Password) + require.NoError(t, err) + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + + jid := uuid.New() + incomingConfs := 2 + s := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + JobID: jid.String(), + Name: "vrf-primary", + VRFVersion: vrfVersion, + FromAddresses: []string{ownerKey.Address.String()}, + CoordinatorAddress: uni.rootContractAddress.String(), + BatchCoordinatorAddress: batchCoordinatorAddress.String(), + MinIncomingConfirmations: incomingConfs, + GasLanePrice: assets.GWei(1), + PublicKey: vrfkey.PublicKey.String(), + V2: true, + EVMChainID: testutils.SimulatedChainID.String(), + }).Toml() + jb, err := vrfcommon.ValidatedVRFSpec(s) + require.NoError(t, err) + err = app.JobSpawner().CreateJob(&jb) + require.NoError(t, err) + time.Sleep(1 * time.Second) + + // Register a proving key associated with the VRF job. + registerProvingKeyHelper(t, uni, uni.rootContract, vrfkey, &defaultMaxGasPrice) + + subFunding := decimal.RequireFromString("1000000000000000000") + _, err = uni.maliciousConsumerContract.CreateSubscriptionAndFund(carol, + subFunding.BigInt()) + require.NoError(t, err) + uni.backend.Commit() + + // Send a re-entrant request + // subID, nConfs, callbackGas, numWords are hard-coded within the contract, so setting them to 0 here + _, err = uni.maliciousConsumerContract.RequestRandomness(carol, vrfkey.PublicKey.MustHash(), big.NewInt(0), 0, 0, 0, false) + require.NoError(t, err) + + // We expect the request to be serviced + // by the node. + var attempts []txmgr.TxAttempt + gomega.NewWithT(t).Eventually(func() bool { + attempts, _, err = app.TxmStorageService().TxAttempts(0, 1000) + require.NoError(t, err) + // It possible that we send the test request + // before the job spawner has started the vrf services, which is fine + // the lb will backfill the logs. However we need to + // keep blocks coming in for the lb to send the backfilled logs. + t.Log("attempts", attempts) + uni.backend.Commit() + return len(attempts) == 1 && attempts[0].Tx.State == txmgrcommon.TxConfirmed + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // The fulfillment tx should succeed + ch, err := app.GetRelayers().LegacyEVMChains().Get(evmtest.MustGetDefaultChainID(t, config.EVMConfigs()).String()) + require.NoError(t, err) + r, err := ch.Client().TransactionReceipt(testutils.Context(t), attempts[0].Hash) + require.NoError(t, err) + require.Equal(t, uint64(1), r.Status) + + // The user callback should have errored + it, err := uni.rootContract.FilterRandomWordsFulfilled(nil, nil, nil) + require.NoError(t, err) + var fulfillments []v22.RandomWordsFulfilled + for it.Next() { + fulfillments = append(fulfillments, it.Event()) + } + require.Equal(t, 1, len(fulfillments)) + require.Equal(t, false, fulfillments[0].Success()) + + // It should not have succeeded in placing another request. + it2, err2 := uni.rootContract.FilterRandomWordsRequested(nil, nil, nil, nil) + require.NoError(t, err2) + var requests []v22.RandomWordsRequested + for it2.Next() { + requests = append(requests, it2.Event()) + } + require.Equal(t, 1, len(requests)) +} + +func testReplayOldRequestsOnStartUp( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerContractAddress common.Address, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version, + nativePayment bool, + assertions ...func( + t *testing.T, + coordinator v22.CoordinatorV2_X, + rwfe v22.RandomWordsFulfilled, + subID *big.Int), +) { + sendingKey := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(sendingKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, sendingKey) + + // Create a subscription and fund with 5 PLI. + subID := subscribeAndAssertSubscriptionCreatedEvent(t, consumerContract, consumer, consumerContractAddress, big.NewInt(5e18), coordinator, uni.backend, nativePayment) + + // Fund gas lanes. + sendEth(t, ownerKey, uni.backend, sendingKey.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF Key, register it to coordinator and export + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + registerProvingKeyHelper(t, uni, coordinator, vrfkey, &defaultMaxGasPrice) + keyHash := vrfkey.PublicKey.MustHash() + + encodedVrfKey, err := app.GetKeyStore().VRF().Export(vrfkey.ID(), testutils.Password) + require.NoError(t, err) + + // Shut down the node before making the randomness request + require.NoError(t, app.Stop()) + + // Make the first randomness request. + numWords := uint32(20) + requestID1, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, coordinator, uni.backend, nativePayment) + + // number of blocks to mine before restarting the node + nBlocks := 100 + for i := 0; i < nBlocks; i++ { + uni.backend.Commit() + } + + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(sendingKey.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + + // Start a new app and create VRF job using the same VRF key created above + app = cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, sendingKey) + + require.NoError(t, app.Start(testutils.Context(t))) + + vrfKey, err := app.GetKeyStore().VRF().Import(encodedVrfKey, testutils.Password) + require.NoError(t, err) + + incomingConfs := 2 + var vrfOwnerString string + if vrfOwnerAddress != nil { + vrfOwnerString = vrfOwnerAddress.Hex() + } + + spec := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + Name: "vrf-primary", + VRFVersion: vrfVersion, + CoordinatorAddress: coordinatorAddress.Hex(), + BatchCoordinatorAddress: batchCoordinatorAddress.Hex(), + MinIncomingConfirmations: incomingConfs, + PublicKey: vrfKey.PublicKey.String(), + FromAddresses: []string{sendingKey.Address.String()}, + BackoffInitialDelay: 10 * time.Millisecond, + BackoffMaxDelay: time.Second, + V2: true, + GasLanePrice: gasLanePriceWei, + VRFOwnerAddress: vrfOwnerString, + EVMChainID: testutils.SimulatedChainID.String(), + }).Toml() + + jb, err := vrfcommon.ValidatedVRFSpec(spec) + require.NoError(t, err) + t.Log(jb.VRFSpec.PublicKey.MustHash(), vrfKey.PublicKey.MustHash()) + err = app.JobSpawner().CreateJob(&jb) + require.NoError(t, err) + + // Wait until all jobs are active and listening for logs + gomega.NewWithT(t).Eventually(func() bool { + jbs := app.JobSpawner().ActiveJobs() + for _, jb := range jbs { + if jb.Type == job.VRF { + return true + } + } + return false + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID1, subID, uni.backend, db, vrfVersion, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + // In particular: + // * success should be true + // * payment should be exactly the amount specified as the premium in the coordinator fee config + rwfe := assertRandomWordsFulfilled(t, requestID1, true, coordinator, nativePayment) + if len(assertions) > 0 { + assertions[0](t, coordinator, rwfe, subID) + } +} diff --git a/core/services/vrf/v2/integration_v2_plus_test.go b/core/services/vrf/v2/integration_v2_plus_test.go new file mode 100644 index 00000000..e66b6508 --- /dev/null +++ b/core/services/vrf/v2/integration_v2_plus_test.go @@ -0,0 +1,1369 @@ +package v2_test + +import ( + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/trusted_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_plus_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_plus_v2_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_malicious_consumer_v2_plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_single_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_sub_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_proxy_admin" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_reverting_example" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/extraargs" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + v22 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" +) + +type coordinatorV2PlusUniverse struct { + coordinatorV2UniverseCommon + submanager *bind.TransactOpts // Subscription owner + batchCoordinatorContract *batch_vrf_coordinator_v2plus.BatchVRFCoordinatorV2Plus + batchCoordinatorContractAddress common.Address + migrationTestCoordinator *vrf_coordinator_v2_plus_v2_example.VRFCoordinatorV2PlusV2Example + migrationTestCoordinatorAddress common.Address + trustedBhsContract *trusted_blockhash_store.TrustedBlockhashStore + trustedBhsContractAddress common.Address +} + +func newVRFCoordinatorV2PlusUniverse(t *testing.T, key ethkey.KeyV2, numConsumers int, trusting bool) coordinatorV2PlusUniverse { + testutils.SkipShort(t, "VRFCoordinatorV2Universe") + oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID) + require.NoError(t, err) + var ( + sergey = testutils.MustNewSimTransactor(t) + neil = testutils.MustNewSimTransactor(t) + ned = testutils.MustNewSimTransactor(t) + evil = testutils.MustNewSimTransactor(t) + reverter = testutils.MustNewSimTransactor(t) + submanager = testutils.MustNewSimTransactor(t) + nallory = oracleTransactor + vrfConsumers []*bind.TransactOpts + ) + + // Create consumer contract deployer identities + for i := 0; i < numConsumers; i++ { + vrfConsumers = append(vrfConsumers, testutils.MustNewSimTransactor(t)) + } + + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + neil.From: {Balance: assets.Ether(1000).ToInt()}, + ned.From: {Balance: assets.Ether(1000).ToInt()}, + nallory.From: {Balance: assets.Ether(1000).ToInt()}, + evil.From: {Balance: assets.Ether(1000).ToInt()}, + reverter.From: {Balance: assets.Ether(1000).ToInt()}, + submanager.From: {Balance: assets.Ether(1000).ToInt()}, + } + for _, consumer := range vrfConsumers { + genesisData[consumer.From] = core.GenesisAccount{ + Balance: assets.Ether(1000).ToInt(), + } + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + consumerABI, err := abi.JSON(strings.NewReader( + vrfv2plus_consumer_example.VRFV2PlusConsumerExampleABI)) + require.NoError(t, err) + coordinatorABI, err := abi.JSON(strings.NewReader( + vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalABI)) + require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + blockTime := time.UnixMilli(int64(backend.Blockchain().CurrentHeader().Time)) + err = backend.AdjustTime(time.Since(blockTime) - 24*time.Hour) + require.NoError(t, err) + backend.Commit() + // Deploy link + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + sergey, backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + + // Deploy feed + linkEthFeed, _, _, err := + mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + evil, backend, 18, vrftesthelpers.WeiPerUnitLink.BigInt()) // 0.01 eth per link + require.NoError(t, err) + + // Deploy blockhash store + bhsAddress, _, bhsContract, err := blockhash_store.DeployBlockhashStore(neil, backend) + require.NoError(t, err, "failed to deploy BlockhashStore contract to simulated ethereum blockchain") + + // Deploy trusted BHS + trustedBHSAddress, _, trustedBhsContract, err := trusted_blockhash_store.DeployTrustedBlockhashStore(neil, backend, []common.Address{}) + require.NoError(t, err, "failed to deploy trusted BlockhashStore contract to simulated ethereum blockchain") + + // Deploy batch blockhash store + batchBHSAddress, _, batchBHSContract, err := batch_blockhash_store.DeployBatchBlockhashStore(neil, backend, bhsAddress) + require.NoError(t, err, "failed to deploy BatchBlockhashStore contract to simulated ethereum blockchain") + + // Deploy VRF V2plus coordinator + var bhsAddr = bhsAddress + if trusting { + bhsAddr = trustedBHSAddress + } + coordinatorAddress, _, coordinatorContract, err := + vrf_coordinator_v2_5.DeployVRFCoordinatorV25( + neil, backend, bhsAddr) + require.NoError(t, err, "failed to deploy VRFCoordinatorV2 contract to simulated ethereum blockchain") + backend.Commit() + + _, err = coordinatorContract.SetPLIAndPLINativeFeed(neil, linkAddress, linkEthFeed) + require.NoError(t, err) + backend.Commit() + + migrationTestCoordinatorAddress, _, migrationTestCoordinator, err := vrf_coordinator_v2_plus_v2_example.DeployVRFCoordinatorV2PlusV2Example( + neil, backend, linkAddress, coordinatorAddress) + require.NoError(t, err) + backend.Commit() + + _, err = coordinatorContract.RegisterMigratableCoordinator(neil, migrationTestCoordinatorAddress) + require.NoError(t, err) + backend.Commit() + + // Deploy batch VRF V2 coordinator + batchCoordinatorAddress, _, batchCoordinatorContract, err := + batch_vrf_coordinator_v2plus.DeployBatchVRFCoordinatorV2Plus( + neil, backend, coordinatorAddress, + ) + require.NoError(t, err, "failed to deploy BatchVRFCoordinatorV2 contract to simulated ethereum blockchain") + backend.Commit() + + // Create the VRF consumers. + var ( + consumerContracts []vrftesthelpers.VRFConsumerContract + consumerContractAddresses []common.Address + ) + for _, author := range vrfConsumers { + // Deploy a VRF consumer. It has a starting balance of 500 PLI. + consumerContractAddress, _, consumerContract, err2 := + vrfv2plus_consumer_example.DeployVRFV2PlusConsumerExample( + author, backend, coordinatorAddress, linkAddress) + require.NoError(t, err2, "failed to deploy VRFConsumer contract to simulated ethereum blockchain") + _, err2 = linkContract.Transfer(sergey, consumerContractAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err2, "failed to send PLI to VRFConsumer contract on simulated ethereum blockchain") + + consumerContracts = append(consumerContracts, vrftesthelpers.NewVRFV2PlusConsumer(consumerContract)) + consumerContractAddresses = append(consumerContractAddresses, consumerContractAddress) + + backend.Commit() + } + + // Deploy malicious consumer with 1 pli + maliciousConsumerContractAddress, _, maliciousConsumerContract, err := + vrf_malicious_consumer_v2_plus.DeployVRFMaliciousConsumerV2Plus( + evil, backend, coordinatorAddress, linkAddress) + require.NoError(t, err, "failed to deploy VRFMaliciousConsumer contract to simulated ethereum blockchain") + _, err = linkContract.Transfer(sergey, maliciousConsumerContractAddress, assets.Ether(1).ToInt()) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFMaliciousConsumer contract on simulated ethereum blockchain") + backend.Commit() + + // Deploy upgradeable consumer, proxy, and proxy admin + upgradeableConsumerAddress, _, _, err := vrf_consumer_v2_plus_upgradeable_example.DeployVRFConsumerV2PlusUpgradeableExample(neil, backend) + require.NoError(t, err, "failed to deploy upgradeable consumer to simulated ethereum blockchain") + backend.Commit() + + proxyAdminAddress, _, proxyAdmin, err := vrfv2_proxy_admin.DeployVRFV2ProxyAdmin(neil, backend) + require.NoError(t, err) + backend.Commit() + + // provide abi-encoded initialize function call on the implementation contract + // so that it's called upon the proxy construction, to initialize it. + upgradeableAbi, err := vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExampleMetaData.GetAbi() + require.NoError(t, err) + initializeCalldata, err := upgradeableAbi.Pack("initialize", coordinatorAddress, linkAddress) + hexified := hexutil.Encode(initializeCalldata) + t.Log("initialize calldata:", hexified, "coordinator:", coordinatorAddress.String(), "link:", linkAddress) + require.NoError(t, err) + proxyAddress, _, _, err := vrfv2_transparent_upgradeable_proxy.DeployVRFV2TransparentUpgradeableProxy( + neil, backend, upgradeableConsumerAddress, proxyAdminAddress, initializeCalldata) + require.NoError(t, err) + + _, err = linkContract.Transfer(sergey, proxyAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err) + backend.Commit() + + implAddress, err := proxyAdmin.GetProxyImplementation(nil, proxyAddress) + require.NoError(t, err) + t.Log("impl address:", implAddress.String()) + require.Equal(t, upgradeableConsumerAddress, implAddress) + + proxiedConsumer, err := vrf_consumer_v2_plus_upgradeable_example.NewVRFConsumerV2PlusUpgradeableExample( + proxyAddress, backend) + require.NoError(t, err) + + cAddress, err := proxiedConsumer.COORDINATOR(nil) + require.NoError(t, err) + t.Log("coordinator address in proxy to upgradeable consumer:", cAddress.String()) + require.Equal(t, coordinatorAddress, cAddress) + + lAddress, err := proxiedConsumer.PLITOKEN(nil) + require.NoError(t, err) + t.Log("link address in proxy to upgradeable consumer:", lAddress.String()) + require.Equal(t, linkAddress, lAddress) + + // Deploy always reverting consumer + revertingConsumerContractAddress, _, revertingConsumerContract, err := vrfv2plus_reverting_example.DeployVRFV2PlusRevertingExample( + reverter, backend, coordinatorAddress, linkAddress, + ) + require.NoError(t, err, "failed to deploy VRFRevertingExample contract to simulated eth blockchain") + _, err = linkContract.Transfer(sergey, revertingConsumerContractAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFRevertingExample contract on simulated eth blockchain") + backend.Commit() + + // Set the configuration on the coordinator. + _, err = coordinatorContract.SetConfig(neil, + uint16(1), // minRequestConfirmations + uint32(2.5e6), // gas limit + uint32(60*60*24), // stalenessSeconds + uint32(v22.GasAfterPaymentCalculation), // gasAfterPaymentCalculation + big.NewInt(1e16), // 0.01 eth per link fallbackLinkPrice + uint32(5), // 0.000005 ETH premium + uint32(1), // 0.000001 PLI premium discount denominated in ETH + uint8(10), // 10% native payment percentage + uint8(5), // 5% PLI payment percentage + ) + require.NoError(t, err, "failed to set coordinator configuration") + backend.Commit() + + for i := 0; i < 200; i++ { + backend.Commit() + } + + return coordinatorV2PlusUniverse{ + coordinatorV2UniverseCommon: coordinatorV2UniverseCommon{ + vrfConsumers: vrfConsumers, + consumerContracts: consumerContracts, + consumerContractAddresses: consumerContractAddresses, + + revertingConsumerContract: vrftesthelpers.NewRevertingConsumerPlus(revertingConsumerContract), + revertingConsumerContractAddress: revertingConsumerContractAddress, + + consumerProxyContract: vrftesthelpers.NewUpgradeableConsumerPlus(proxiedConsumer), + consumerProxyContractAddress: proxiedConsumer.Address(), + proxyAdminAddress: proxyAdminAddress, + + rootContract: v22.NewCoordinatorV2_5(coordinatorContract), + rootContractAddress: coordinatorAddress, + linkContract: linkContract, + linkContractAddress: linkAddress, + linkEthFeedAddress: linkEthFeed, + bhsContract: bhsContract, + bhsContractAddress: bhsAddress, + batchBHSContract: batchBHSContract, + batchBHSContractAddress: batchBHSAddress, + maliciousConsumerContract: vrftesthelpers.NewMaliciousConsumerPlus(maliciousConsumerContract), + maliciousConsumerContractAddress: maliciousConsumerContractAddress, + backend: backend, + coordinatorABI: &coordinatorABI, + consumerABI: &consumerABI, + sergey: sergey, + neil: neil, + ned: ned, + nallory: nallory, + evil: evil, + reverter: reverter, + }, + batchCoordinatorContract: batchCoordinatorContract, + batchCoordinatorContractAddress: batchCoordinatorAddress, + submanager: submanager, + migrationTestCoordinator: migrationTestCoordinator, + migrationTestCoordinatorAddress: migrationTestCoordinatorAddress, + trustedBhsContract: trustedBhsContract, + trustedBhsContractAddress: trustedBHSAddress, + } +} + +func TestVRFV2PlusIntegration_SingleConsumer_HappyPath_BatchFulfillment(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + t.Run("link payment", func(tt *testing.T) { + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + 5, // number of requests to send + false, // don't send big callback + vrfcommon.V2Plus, + false, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }, + ) + }) + + t.Run("native payment", func(tt *testing.T) { + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + 5, // number of requests to send + false, // don't send big callback + vrfcommon.V2Plus, + true, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }, + ) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_HappyPath_BatchFulfillment_BigGasCallback(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + t.Run("link payment", func(tt *testing.T) { + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + 5, // number of requests to send + true, // send big callback + vrfcommon.V2Plus, + false, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }, + ) + }) + + t.Run("native payment", func(tt *testing.T) { + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + 5, // number of requests to send + true, // send big callback + vrfcommon.V2Plus, + true, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }, + ) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_HappyPath(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + t.Run("link payment", func(tt *testing.T) { + testSingleConsumerHappyPath( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + false, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }) + }) + t.Run("native payment", func(tt *testing.T) { + testSingleConsumerHappyPath( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + true, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + _, err := coordinator.GetSubscription(nil, rwfe.SubID()) + require.NoError(t, err) + require.Equal(t, expectedSubID, rwfe.SubID()) + }) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_EOA_Request(t *testing.T) { + t.Skip("questionable value of this test") + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testEoa( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + false, + uni.batchBHSContractAddress, + nil, + vrfcommon.V2Plus, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_EOA_Request_Batching_Enabled(t *testing.T) { + t.Skip("questionable value of this test") + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testEoa( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + true, + uni.batchBHSContractAddress, + nil, + vrfcommon.V2Plus, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_EIP150_HappyPath(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testSingleConsumerEIP150( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_EIP150_Revert(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testSingleConsumerEIP150Revert( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 2, false) + t.Run("link payment", func(tt *testing.T) { + testMultipleConsumersNeedBHS( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + false, + ) + }) + t.Run("native payment", func(tt *testing.T) { + testMultipleConsumersNeedBHS( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + true, + ) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_BlockHeaderFeeder(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + t.Run("link payment", func(tt *testing.T) { + testBlockHeaderFeeder( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + false, + ) + }) + t.Run("native payment", func(tt *testing.T) { + testBlockHeaderFeeder( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + true, + ) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_NeedsTopUp(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + t.Run("link payment", func(tt *testing.T) { + testSingleConsumerNeedsTopUp( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + assets.Ether(1).ToInt(), // initial funding of 1 PLI + assets.Ether(100).ToInt(), // top up of 100 PLI + vrfcommon.V2Plus, + false, + ) + }) + t.Run("native payment", func(tt *testing.T) { + testSingleConsumerNeedsTopUp( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + big.NewInt(1e17), // initial funding of 0.1 ETH + assets.Ether(100).ToInt(), // top up of 100 ETH + vrfcommon.V2Plus, + true, + ) + }) +} + +func TestVRFV2PlusIntegration_SingleConsumer_BigGasCallback_Sandwich(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testSingleConsumerBigGasCallbackSandwich( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_MultipleGasLanes(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + testSingleConsumerMultipleGasLanes( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_SingleConsumer_AlwaysRevertingCallback_StillFulfilled(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 0, false) + testSingleConsumerAlwaysRevertingCallbackStillFulfilled( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_ConsumerProxy_HappyPath(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 0, false) + testConsumerProxyHappyPath( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + false, + ) +} + +func TestVRFV2PlusIntegration_ConsumerProxy_CoordinatorZeroAddress(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 0, false) + testConsumerProxyCoordinatorZeroAddress(t, uni.coordinatorV2UniverseCommon) +} + +func TestVRFV2PlusIntegration_ExternalOwnerConsumerExample(t *testing.T) { + owner := testutils.MustNewSimTransactor(t) + random := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{ + owner.From: {Balance: assets.Ether(10).ToInt()}, + random.From: {Balance: assets.Ether(10).ToInt()}, + } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + owner, backend) + require.NoError(t, err) + backend.Commit() + // Deploy feed + linkEthFeed, _, _, err := + mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + owner, backend, 18, vrftesthelpers.WeiPerUnitLink.BigInt()) // 0.01 eth per link + require.NoError(t, err) + backend.Commit() + coordinatorAddress, _, coordinator, err := + vrf_coordinator_v2_5.DeployVRFCoordinatorV25( + owner, backend, common.Address{}) //bhs not needed for this test + require.NoError(t, err) + _, err = coordinator.SetConfig(owner, + uint16(1), // minimumRequestConfirmations + uint32(10000), // maxGasLimit + 1, // stalenessSeconds + 1, // gasAfterPaymentCalculation + big.NewInt(10), // fallbackWeiPerUnitLink + 0, // fulfillmentFlatFeeNativePPM + 0, // fulfillmentFlatFeeLinkDiscountPPM + 0, // nativePremiumPercentage + 0, // linkPremiumPercentage + ) + require.NoError(t, err) + backend.Commit() + _, err = coordinator.SetPLIAndPLINativeFeed(owner, linkAddress, linkEthFeed) + require.NoError(t, err) + backend.Commit() + consumerAddress, _, consumer, err := vrf_v2plus_sub_owner.DeployVRFV2PlusExternalSubOwnerExample(owner, backend, coordinatorAddress, linkAddress) + require.NoError(t, err) + backend.Commit() + _, err = linkContract.Transfer(owner, consumerAddress, assets.Ether(2).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(2).ToInt()}) + + // Create sub, fund it and assign consumer + _, err = coordinator.CreateSubscription(owner) + require.NoError(t, err) + backend.Commit() + + iter, err := coordinator.FilterSubscriptionCreated(nil, nil) + require.NoError(t, err) + require.True(t, iter.Next(), "could not find SubscriptionCreated event for subID") + subID := iter.Event.SubId + + b, err := utils.ABIEncode(`[{"type":"uint256"}]`, subID) + require.NoError(t, err) + _, err = linkContract.TransferAndCall(owner, coordinatorAddress, big.NewInt(0), b) + require.NoError(t, err) + _, err = coordinator.AddConsumer(owner, subID, consumerAddress) + require.NoError(t, err) + _, err = consumer.RequestRandomWords(random, subID, 1, 1, 1, [32]byte{}, false) + require.Error(t, err) + _, err = consumer.RequestRandomWords(owner, subID, 1, 1, 1, [32]byte{}, false) + require.NoError(t, err) + + // Reassign ownership, check that only new owner can request + _, err = consumer.TransferOwnership(owner, random.From) + require.NoError(t, err) + _, err = consumer.AcceptOwnership(random) + require.NoError(t, err) + _, err = consumer.RequestRandomWords(owner, subID, 1, 1, 1, [32]byte{}, false) + require.Error(t, err) + _, err = consumer.RequestRandomWords(random, subID, 1, 1, 1, [32]byte{}, false) + require.NoError(t, err) +} + +func TestVRFV2PlusIntegration_SimpleConsumerExample(t *testing.T) { + owner := testutils.MustNewSimTransactor(t) + random := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{ + owner.From: {Balance: assets.Ether(10).ToInt()}, + } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + owner, backend) + require.NoError(t, err) + backend.Commit() + // Deploy feed + linkEthFeed, _, _, err := + mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + owner, backend, 18, vrftesthelpers.WeiPerUnitLink.BigInt()) // 0.01 eth per link + require.NoError(t, err) + backend.Commit() + coordinatorAddress, _, coordinator, err := + vrf_coordinator_v2_5.DeployVRFCoordinatorV25( + owner, backend, common.Address{}) // bhs not needed for this test + require.NoError(t, err) + backend.Commit() + _, err = coordinator.SetPLIAndPLINativeFeed(owner, linkAddress, linkEthFeed) + require.NoError(t, err) + backend.Commit() + consumerAddress, _, consumer, err := vrf_v2plus_single_consumer.DeployVRFV2PlusSingleConsumerExample(owner, backend, coordinatorAddress, linkAddress, 1, 1, 1, [32]byte{}, false) + require.NoError(t, err) + backend.Commit() + _, err = linkContract.Transfer(owner, consumerAddress, assets.Ether(2).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(2).ToInt()}) + _, err = consumer.TopUpSubscription(owner, assets.Ether(1).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(1).ToInt(), assets.Ether(1).ToInt()}) + // Non-owner cannot withdraw + _, err = consumer.Withdraw(random, assets.Ether(1).ToInt(), owner.From) + require.Error(t, err) + _, err = consumer.Withdraw(owner, assets.Ether(1).ToInt(), owner.From) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(999_999_999).ToInt(), assets.Ether(0).ToInt(), assets.Ether(1).ToInt()}) + _, err = consumer.Unsubscribe(owner, owner.From) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(1_000_000_000).ToInt(), assets.Ether(0).ToInt(), assets.Ether(0).ToInt()}) +} + +func TestVRFV2PlusIntegration_TestMaliciousConsumer(t *testing.T) { + t.Parallel() + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, key, 1, false) + testMaliciousConsumer( + t, + key, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2Plus, + ) +} + +func TestVRFV2PlusIntegration_RequestCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, key, 1, false) + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, uni.rootContract, vrfkey, &defaultMaxGasPrice) + t.Run("non-proxied consumer", func(tt *testing.T) { + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + _, err = carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(tt, err) + uni.backend.Commit() + _, err = carolContract.TopUpSubscriptionNative(carol, + big.NewInt(2000000000000000000)) // 0.2 ETH + uni.backend.Commit() + // Ensure even with large number of consumers its still cheap + var addrs []common.Address + for i := 0; i < 99; i++ { + addrs = append(addrs, testutils.NewAddress()) + } + _, err = carolContract.UpdateSubscription(carol, addrs) + require.NoError(tt, err) + linkEstimate := estimateGas(tt, uni.backend, common.Address{}, + carolContractAddress, uni.consumerABI, + "requestRandomWords", uint32(10000), uint16(2), uint32(1), + vrfkey.PublicKey.MustHash(), false) + tt.Log("gas estimate of non-proxied requestRandomWords with PLI payment:", linkEstimate) + nativeEstimate := estimateGas(tt, uni.backend, common.Address{}, + carolContractAddress, uni.consumerABI, + "requestRandomWords", uint32(10000), uint16(2), uint32(1), + vrfkey.PublicKey.MustHash(), false) + tt.Log("gas estimate of non-proxied requestRandomWords with Native payment:", nativeEstimate) + assert.Less(tt, nativeEstimate, uint64(127_000), + "requestRandomWords tx gas cost more than expected") + }) + + t.Run("proxied consumer", func(tt *testing.T) { + consumerOwner := uni.neil + consumerContract := uni.consumerProxyContract + consumerContractAddress := uni.consumerProxyContractAddress + + // Create a subscription and fund with 5 PLI. + tx, err := consumerContract.CreateSubscriptionAndFund(consumerOwner, assets.Ether(5).ToInt()) + require.NoError(tt, err) + uni.backend.Commit() + r, err := uni.backend.TransactionReceipt(testutils.Context(t), tx.Hash()) + require.NoError(tt, err) + t.Log("gas used by proxied CreateSubscriptionAndFund:", r.GasUsed) + + subId, err := consumerContract.SSubId(nil) + require.NoError(tt, err) + _, err = uni.rootContract.GetSubscription(nil, subId) + require.NoError(tt, err) + + theAbi := evmtypes.MustGetABI(vrf_consumer_v2_plus_upgradeable_example.VRFConsumerV2PlusUpgradeableExampleMetaData.ABI) + estimate := estimateGas(tt, uni.backend, common.Address{}, + consumerContractAddress, &theAbi, + "requestRandomness", vrfkey.PublicKey.MustHash(), subId, uint16(2), uint32(10000), uint32(1)) + tt.Log("gas estimate of proxied requestRandomness:", estimate) + // There is some gas overhead of the delegatecall that is made by the proxy + // to the logic contract. See https://www.evm.codes/#f4?fork=grayGlacier for a detailed + // breakdown of the gas costs of a delegatecall. + assert.Less(tt, estimate, uint64(106_000), + "proxied testRequestRandomness tx gas cost more than expected") + }) +} + +func TestVRFV2PlusIntegration_MaxConsumersCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, key, 1, false) + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + _, err := carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(t, err) + uni.backend.Commit() + subId, err := carolContract.SSubId(nil) + require.NoError(t, err) + var addrs []common.Address + for i := 0; i < 98; i++ { + addrs = append(addrs, testutils.NewAddress()) + } + _, err = carolContract.UpdateSubscription(carol, addrs) + // Ensure even with max number of consumers its still reasonable gas costs. + require.NoError(t, err) + estimate := estimateGas(t, uni.backend, carolContractAddress, + uni.rootContractAddress, uni.coordinatorABI, + "removeConsumer", subId, carolContractAddress) + t.Log(estimate) + assert.Less(t, estimate, uint64(320000)) + estimate = estimateGas(t, uni.backend, carolContractAddress, + uni.rootContractAddress, uni.coordinatorABI, + "addConsumer", subId, testutils.NewAddress()) + t.Log(estimate) + assert.Less(t, estimate, uint64(100000)) +} + +func requestAndEstimateFulfillmentCost( + t *testing.T, + subID *big.Int, + consumer *bind.TransactOpts, + vrfkey vrfkey.KeyV2, + minConfs uint16, + gas uint32, + numWords uint32, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerContractAddress common.Address, + uni coordinatorV2UniverseCommon, + app *cltest.TestApplication, + nativePayment bool, + lowerBound, upperBound uint64, +) { + _, err := consumerContract.RequestRandomness(consumer, vrfkey.PublicKey.MustHash(), subID, minConfs, gas, numWords, nativePayment) + require.NoError(t, err) + for i := 0; i < int(minConfs); i++ { + uni.backend.Commit() + } + + requestLog := FindLatestRandomnessRequestedLog(t, uni.rootContract, vrfkey.PublicKey.MustHash(), nil) + s, err := proof.BigToSeed(requestLog.PreSeed()) + require.NoError(t, err) + extraArgs, err := extraargs.ExtraArgsV1(nativePayment) + require.NoError(t, err) + proof, rc, err := proof.GenerateProofResponseV2Plus(app.GetKeyStore().VRF(), vrfkey.ID(), proof.PreSeedDataV2Plus{ + PreSeed: s, + BlockHash: requestLog.Raw().BlockHash, + BlockNum: requestLog.Raw().BlockNumber, + SubId: subID, + CallbackGasLimit: gas, + NumWords: numWords, + Sender: consumerContractAddress, + ExtraArgs: extraArgs, + }) + require.NoError(t, err) + gasEstimate := estimateGas(t, uni.backend, common.Address{}, + uni.rootContractAddress, uni.coordinatorABI, + "fulfillRandomWords", proof, rc, false) + t.Log("consumer fulfillment gas estimate:", gasEstimate) + assert.Greater(t, gasEstimate, lowerBound) + assert.Less(t, gasEstimate, upperBound) +} + +func TestVRFV2PlusIntegration_FulfillmentCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, key, 1, false) + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, uni.rootContract, vrfkey, &defaultMaxGasPrice) + + t.Run("non-proxied consumer", func(tt *testing.T) { + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + _, err = carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(tt, err) + uni.backend.Commit() + subID, err2 := carolContract.SSubId(nil) + require.NoError(tt, err2) + _, err2 = carolContract.TopUpSubscriptionNative(carol, + big.NewInt(2000000000000000000)) // 0.2 ETH + require.NoError(tt, err2) + gasRequested := 50_000 + nw := 1 + requestedIncomingConfs := 3 + t.Run("native payment", func(tt *testing.T) { + requestAndEstimateFulfillmentCost( + t, + subID, + carol, + vrfkey, + uint16(requestedIncomingConfs), + uint32(gasRequested), + uint32(nw), + carolContract, + carolContractAddress, + uni.coordinatorV2UniverseCommon, + app, + true, + 120_000, + 500_000, + ) + }) + + t.Run("link payment", func(tt *testing.T) { + requestAndEstimateFulfillmentCost( + t, + subID, + carol, + vrfkey, + uint16(requestedIncomingConfs), + uint32(gasRequested), + uint32(nw), + carolContract, + carolContractAddress, + uni.coordinatorV2UniverseCommon, + app, + false, + 120_000, + 500_000, + ) + }) + }) + + t.Run("proxied consumer", func(tt *testing.T) { + consumerOwner := uni.neil + consumerContract := uni.consumerProxyContract + consumerContractAddress := uni.consumerProxyContractAddress + + _, err2 := consumerContract.CreateSubscriptionAndFund(consumerOwner, assets.Ether(5).ToInt()) + require.NoError(t, err2) + uni.backend.Commit() + subID, err2 := consumerContract.SSubId(nil) + require.NoError(t, err2) + gasRequested := 50_000 + nw := 1 + requestedIncomingConfs := 3 + requestAndEstimateFulfillmentCost( + t, + subID, + consumerOwner, + vrfkey, + uint16(requestedIncomingConfs), + uint32(gasRequested), + uint32(nw), + consumerContract, + consumerContractAddress, + uni.coordinatorV2UniverseCommon, + app, + false, + 120_000, + 500_000, + ) + }) +} + +func setupSubscriptionAndFund( + t *testing.T, + uni coordinatorV2UniverseCommon, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + consumerAddress common.Address, + linkAmount *big.Int, + nativeAmount *big.Int) *big.Int { + _, err := uni.rootContract.CreateSubscription(consumer) + require.NoError(t, err) + uni.backend.Commit() + + iter, err := uni.rootContract.FilterSubscriptionCreated(nil, nil) + require.NoError(t, err) + require.True(t, iter.Next(), "could not find SubscriptionCreated event for subID") + subID := iter.Event().SubID() + + _, err = consumerContract.SetSubID(consumer, subID) + require.NoError(t, err) + + _, err = uni.rootContract.AddConsumer(consumer, subID, consumerAddress) + require.NoError(t, err, "failed to add consumer") + uni.backend.Commit() + + b, err := utils.ABIEncode(`[{"type":"uint256"}]`, subID) + require.NoError(t, err) + _, err = uni.linkContract.TransferAndCall( + uni.sergey, uni.rootContractAddress, linkAmount, b) + require.NoError(t, err, "failed to fund sub") + uni.backend.Commit() + + _, err = uni.rootContract.FundSubscriptionWithNative(consumer, subID, nativeAmount) + require.NoError(t, err, "failed to fund sub with native") + uni.backend.Commit() + + return subID +} + +func TestVRFV2PlusIntegration_Migration(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](5_000_000) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + + // Create a subscription and fund with 5 PLI. + consumerContract := uni.consumerContracts[0] + consumer := uni.vrfConsumers[0] + consumerAddress := uni.consumerContractAddresses[0] + + subID := setupSubscriptionAndFund( + t, + uni.coordinatorV2UniverseCommon, + consumer, + consumerContract, + consumerAddress, + new(big.Int).SetUint64(5e18), + new(big.Int).SetUint64(3e18), + ) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + uni.coordinatorV2UniverseCommon, + nil, + vrfcommon.V2Plus, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make some randomness requests. + numWords := uint32(2) + + requestID, _ := requestRandomnessAndAssertRandomWordsRequestedEvent(t, consumerContract, consumer, keyHash, subID, numWords, 500_000, uni.rootContract, uni.backend, false) + + // Wait for fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + mine(t, requestID, subID, uni.backend, db, vrfcommon.V2Plus, testutils.SimulatedChainID) + assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false) + + // Assert correct number of random words sent by coordinator. + assertNumRandomWords(t, consumerContract, numWords) + + rw, err := consumerContract.SRandomWords(nil, big.NewInt(0)) + require.NoError(t, err) + + subV1, err := uni.rootContract.GetSubscription(nil, subID) + require.NoError(t, err) + + _, err = uni.rootContract.Migrate(consumer, subID, uni.migrationTestCoordinatorAddress) + require.NoError(t, err) + uni.backend.Commit() + + subV2, err := uni.migrationTestCoordinator.GetSubscription(nil, subID) + require.NoError(t, err) + + totalLinkBalance, err := uni.migrationTestCoordinator.STotalLinkBalance(nil) + require.NoError(t, err) + totalNativeBalance, err := uni.migrationTestCoordinator.STotalNativeBalance(nil) + require.NoError(t, err) + linkContractBalance, err := uni.linkContract.BalanceOf(nil, uni.migrationTestCoordinatorAddress) + require.NoError(t, err) + balance, err := uni.backend.BalanceAt(testutils.Context(t), uni.migrationTestCoordinatorAddress, nil) + require.NoError(t, err) + + require.Equal(t, subV1.Balance(), totalLinkBalance) + require.Equal(t, subV1.NativeBalance(), totalNativeBalance) + require.Equal(t, subV1.Balance(), linkContractBalance) + require.Equal(t, subV1.NativeBalance(), balance) + + require.Equal(t, subV1.Balance(), subV2.LinkBalance) + require.Equal(t, subV1.NativeBalance(), subV2.NativeBalance) + require.Equal(t, subV1.Owner(), subV2.Owner) + require.Equal(t, len(subV1.Consumers()), len(subV2.Consumers)) + for i, c := range subV1.Consumers() { + require.Equal(t, c, subV2.Consumers[i]) + } + + minRequestConfirmations := uint16(2) + requestID2, rw2 := requestRandomnessAndValidate( + t, + consumer, + consumerContract, + keyHash, + subID, + minRequestConfirmations, + 50_000, + numWords, + uni, + true, + ) + require.NotEqual(t, requestID, requestID2) + require.NotEqual(t, rw, rw2) + requestID3, rw3 := requestRandomnessAndValidate( + t, + consumer, + consumerContract, + keyHash, + subID, + minRequestConfirmations, + 50_000, + numWords, + uni, + false, + ) + require.NotEqual(t, requestID2, requestID3) + require.NotEqual(t, rw2, rw3) +} + +func requestRandomnessAndValidate(t *testing.T, + consumer *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + keyHash common.Hash, + subID *big.Int, + minConfs uint16, + gas, numWords uint32, + uni coordinatorV2PlusUniverse, + nativePayment bool) (*big.Int, *big.Int) { + _, err := consumerContract.RequestRandomness( + consumer, + keyHash, + subID, + minConfs, + gas, + numWords, + nativePayment, // test link payment works after migration + ) + require.NoError(t, err) + uni.backend.Commit() + + requestID, err := consumerContract.SRequestId(nil) + require.NoError(t, err) + + _, err = uni.migrationTestCoordinator.FulfillRandomWords(uni.neil, requestID) + require.NoError(t, err) + uni.backend.Commit() + + rw, err := consumerContract.SRandomWords(nil, big.NewInt(0)) + require.NoError(t, err) + + return requestID, rw +} + +func TestVRFV2PlusIntegration_CancelSubscription(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, key, 1, false) + consumer := uni.vrfConsumers[0] + consumerContract := uni.consumerContracts[0] + consumerContractAddress := uni.consumerContractAddresses[0] + linkAmount := new(big.Int).SetUint64(5e18) + nativeAmount := new(big.Int).SetUint64(3e18) + subID := setupSubscriptionAndFund( + t, + uni.coordinatorV2UniverseCommon, + consumer, + consumerContract, + consumerContractAddress, + linkAmount, + nativeAmount, + ) + + linkBalanceBeforeCancel, err := uni.linkContract.BalanceOf(nil, uni.neil.From) + require.NoError(t, err) + nativeBalanceBeforeCancel, err := uni.backend.BalanceAt(testutils.Context(t), uni.neil.From, nil) + require.NoError(t, err) + + // non-owner cannot cancel subscription + _, err = uni.rootContract.CancelSubscription(uni.neil, subID, consumer.From) + require.Error(t, err) + + _, err = uni.rootContract.CancelSubscription(consumer, subID, uni.neil.From) + require.NoError(t, err) + uni.backend.Commit() + + AssertLinkBalance(t, uni.linkContract, uni.neil.From, linkBalanceBeforeCancel.Add(linkBalanceBeforeCancel, linkAmount)) + AssertNativeBalance(t, uni.backend, uni.neil.From, nativeBalanceBeforeCancel.Add(nativeBalanceBeforeCancel, nativeAmount)) +} + +func TestVRFV2PlusIntegration_ReplayOldRequestsOnStartUp(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 1, false) + + testReplayOldRequestsOnStartUp( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2Plus, + false, + ) +} diff --git a/core/services/vrf/v2/integration_v2_reverted_txns_test.go b/core/services/vrf/v2/integration_v2_reverted_txns_test.go new file mode 100644 index 00000000..bf8068ba --- /dev/null +++ b/core/services/vrf/v2/integration_v2_reverted_txns_test.go @@ -0,0 +1,692 @@ +package v2_test + +import ( + "database/sql" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_external_sub_owner_example" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + prooflib "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + v2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + coordinatorV2ABI = evmtypes.MustGetABI(vrf_coordinator_v2.VRFCoordinatorV2ABI) + batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI) +) + +func TestVRFV2Integration_SingleRevertedTxn_ForceFulfillment(t *testing.T) { + t.Parallel() + + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + th := newRevertTxnTH(t, &uni, ownerKey, false, []uint64{1}) + + // Make VRF request without sufficient balance and send fulfillment without simulation + req := makeVRFReq(t, th, th.subs[0]) + req = fulfillVRFReq(t, th, req, th.subs[0], false, nil) + + waitForForceFulfillment(t, th, req, th.subs[0], true, 1) + + t.Log("Done!") +} + +func TestVRFV2Integration_BatchRevertedTxn_ForceFulfillment(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + + th := newRevertTxnTH(t, &uni, ownerKey, true, []uint64{1}) + + numReqs := 2 + reqs := make([]*vrfReq, numReqs) + for i := 0; i < numReqs; i++ { + reqs[i] = makeVRFReq(t, th, th.subs[0]) + } + fulfilBatchVRFReq(t, th, reqs, th.subs[0]) + + for i := 0; i < numReqs; i++ { + // The last request will be the successful one because of the way the example + // contract is written. + success := false + if i == (numReqs - 1) { + success = true + } + waitForForceFulfillment(t, th, reqs[i], th.subs[0], success, 1) + } + t.Log("Done!") +} + +func TestVRFV2Integration_ForceFulfillmentRevertedTxn_Retry(t *testing.T) { + t.Parallel() + + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + th := newRevertTxnTH(t, &uni, ownerKey, false, []uint64{1}) + + // Make VRF request without sufficient balance and send fulfillment without simulation + req := makeVRFReq(t, th, th.subs[0]) + req = fulfillVRFReq(t, th, req, th.subs[0], true, ptr(uint64(7))) + + waitForForceFulfillment(t, th, req, th.subs[0], true, 2) + + receipts, err := getTxnReceiptDB(th.db, -1) + require.Nil(t, err) + require.Len(t, receipts, 2) + require.Equal(t, uint64(0), receipts[0].EVMReceipt.Status) + require.Equal(t, uint64(1), receipts[1].EVMReceipt.Status) + require.Equal(t, uint64(8), receipts[1].ForceFulfillmentAttempt) + + t.Log("Done!") +} +func TestVRFV2Integration_CanceledSubForceFulfillmentRevertedTxn_Retry(t *testing.T) { + t.Parallel() + + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + th := newRevertTxnTH(t, &uni, ownerKey, false, []uint64{1}) + + // Make VRF request without sufficient balance and send fulfillment without simulation + req := makeVRFReq(t, th, th.subs[0]) + req = fulfillVRFReq(t, th, req, th.subs[0], true, nil) + + waitForForceFulfillment(t, th, req, th.subs[0], true, 2) + + receipts, err := getTxnReceiptDB(th.db, -1) + require.Nil(t, err) + require.Len(t, receipts, 2) + require.Equal(t, uint64(0), receipts[0].EVMReceipt.Status) + require.Equal(t, uint64(1), receipts[1].EVMReceipt.Status) + require.Equal(t, uint64(1), receipts[1].ForceFulfillmentAttempt) + + t.Log("Done!") +} + +func TestUniqueReqById_NoPendingReceipts(t *testing.T) { + revertedForceTxns := []v2.TxnReceiptDB{ + {RequestID: common.BigToHash(big.NewInt(1)).Hex(), + ForceFulfillmentAttempt: 1, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(1)).Hex(), + ForceFulfillmentAttempt: 2, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 1, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 2, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 3, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 4, EVMReceipt: evmtypes.Receipt{Status: 0}}, + } + allForceTxns := revertedForceTxns + res := v2.UniqueByReqID(revertedForceTxns, allForceTxns) + require.Len(t, res, 2) + for _, r := range res { + if r.RequestID == "1" { + require.Equal(t, r.ForceFulfillmentAttempt, 2) + } + if r.RequestID == "2" { + require.Equal(t, r.ForceFulfillmentAttempt, 4) + } + } +} + +func TestUniqueReqById_WithPendingReceipts(t *testing.T) { + revertedForceTxns := []v2.TxnReceiptDB{ + {RequestID: common.BigToHash(big.NewInt(1)).Hex(), + ForceFulfillmentAttempt: 1, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(1)).Hex(), + ForceFulfillmentAttempt: 2, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 1, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 2, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 3, EVMReceipt: evmtypes.Receipt{Status: 0}}, + {RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 4, EVMReceipt: evmtypes.Receipt{Status: 0}}, + } + allForceTxns := []v2.TxnReceiptDB{} + allForceTxns = append(allForceTxns, revertedForceTxns...) + allForceTxns = append(allForceTxns, v2.TxnReceiptDB{RequestID: common.BigToHash(big.NewInt(2)).Hex(), + ForceFulfillmentAttempt: 5}) + res := v2.UniqueByReqID(revertedForceTxns, allForceTxns) + require.Len(t, res, 1) + for _, r := range res { + if r.RequestID == "1" { + require.Equal(t, r.ForceFulfillmentAttempt, 2) + } + } +} + +// Wait till force fulfillment event fired for the req passed in, till go test timeout +func waitForForceFulfillment(t *testing.T, + th *revertTxnTH, + req *vrfReq, + sub *vrfSub, + success bool, + forceFulfilledCount int64) { + uni := th.uni + coordinator := th.uni.rootContract + requestID := req.requestID + + // Wait for force-fulfillment to be queued. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + commitment, err := coordinator.GetCommitment(nil, requestID) + require.NoError(t, err) + t.Log("commitment is:", hexutil.Encode(commitment[:]), ", requestID: ", common.BigToHash(requestID).Hex()) + checkForForceFulfilledEvent(t, th, req, sub, -1) + return utils.IsEmpty(commitment[:]) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mineForceFulfilled(t, requestID, sub.subID, forceFulfilledCount, *uni, th.db) + + // Assert correct state of RandomWordsFulfilled event. + // In this particular case: + // * success should be true + // * payment should be zero (forced fulfillment) + rwfe := assertRandomWordsFulfilled(t, requestID, success, coordinator, false) + require.Equal(t, "0", rwfe.Payment().String()) + + // Check that the RandomWordsForced event is emitted correctly. + checkForForceFulfilledEvent(t, th, req, sub, 0) +} + +// Check if force fulfillment event fired for the req passed in +func checkForForceFulfilledEvent(t *testing.T, + th *revertTxnTH, + req *vrfReq, + sub *vrfSub, + numForcedLogs int) { + requestID := req.requestID + it, err := th.uni.vrfOwnerNew.FilterRandomWordsForced(nil, []*big.Int{requestID}, + []uint64{sub.subID}, []common.Address{th.eoaConsumerAddr}) + require.NoError(t, err) + i := 0 + for it.Next() { + i++ + require.Equal(t, requestID.String(), it.Event.RequestId.String()) + require.Equal(t, sub.subID, it.Event.SubId) + require.Equal(t, th.eoaConsumerAddr.String(), it.Event.Sender.String()) + } + t.Log("Number of RandomWordsForced Logs:", i) + require.Greater(t, i, numForcedLogs) +} + +// Make VRF request without sufficient balance and send fulfillment without simulation +func makeVRFReq(t *testing.T, th *revertTxnTH, sub *vrfSub) (req *vrfReq) { + // Make the randomness request and send fulfillment without simulation + numWords := uint32(3) + confs := 10 + callbackGasLimit := uint32(600_000) + _, err := th.eoaConsumer.RequestRandomWords(th.uni.neil, sub.subID, + callbackGasLimit, uint16(confs), numWords, th.keyHash) + require.NoError(t, err, fmt.Sprintf("failed to request randomness from consumer: %v", err)) + th.uni.backend.Commit() + + // Generate VRF proof + requestID, err := th.eoaConsumer.SRequestId(nil) + require.NoError(t, err) + + return &vrfReq{requestID: requestID, callbackGasLimit: callbackGasLimit, numWords: numWords} +} + +// Fulfill VRF req without prior simulation, after computing req proof and commitment +func fulfillVRFReq(t *testing.T, + th *revertTxnTH, + req *vrfReq, + sub *vrfSub, + forceFulfill bool, + forceFulfilmentAttempt *uint64) *vrfReq { + // Generate VRF proof and commitment + reqUpdated := genReqProofNCommitment(t, th, *req, sub) + req = &reqUpdated + + // Send fulfillment TX w/ out simulation to txm, to revert on-chain + + // Construct data payload + b, err := coordinatorV2ABI.Pack("fulfillRandomWords", req.proof, req.reqCommitment) + require.NoError(t, err) + + ec := th.uni.backend + chainID := th.uni.backend.Blockchain().Config().ChainID + chain, err := th.app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + require.NoError(t, err) + + metadata := &txmgr.TxMeta{ + RequestID: ptr(common.BytesToHash(req.requestID.Bytes())), + SubID: &sub.subID, + RequestTxHash: req.requestTxHash, + // No max link since simulation failed + } + if forceFulfill { + metadata.ForceFulfilled = ptr(true) + if forceFulfilmentAttempt != nil { + metadata.ForceFulfillmentAttempt = forceFulfilmentAttempt + } + } + etx, err := chain.TxManager().CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: th.key1.EIP55Address.Address(), + ToAddress: th.uni.rootContractAddress, + EncodedPayload: b, + FeeLimit: 1e6, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Meta: metadata, + }) + require.NoError(t, err) + ec.Commit() + + // wait for above tx to mine (reach state confirmed) + mine(t, req.requestID, big.NewInt(int64(sub.subID)), th.uni.backend, th.db, vrfcommon.V2, th.chainID) + + receipts, err := getTxnReceiptDB(th.db, etx.ID) + require.Nil(t, err) + require.Len(t, receipts, 1) + require.Equal(t, uint64(0), receipts[0].EVMReceipt.Status) + req.txID = etx.ID + return req +} + +// Fulfill VRF req without prior simulation, after computing req proof and commitment +func fulfilBatchVRFReq(t *testing.T, + th *revertTxnTH, + reqs []*vrfReq, + sub *vrfSub) { + proofs := make([]vrf_coordinator_v2.VRFProof, 0) + reqCommitments := make([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment, 0) + requestIDs := make([]common.Hash, 0) + requestIDInts := make([]*big.Int, 0) + requestTxnHashes := make([]common.Hash, 0) + // Generate VRF proof and commitment + for i, req := range reqs { + reqUpdated := genReqProofNCommitment(t, th, *req, sub) + reqs[i] = &reqUpdated + proofs = append(proofs, *reqUpdated.proof) + reqCommitments = append(reqCommitments, *reqUpdated.reqCommitment) + requestIDs = append(requestIDs, common.BytesToHash(reqUpdated.requestID.Bytes())) + requestIDInts = append(requestIDInts, reqUpdated.requestID) + requestTxnHashes = append(requestTxnHashes, *reqUpdated.requestTxHash) + } + + // Send fulfillment TX w/ out simulation to txm, to revert on-chain + + // Construct data payload + b, err := batchCoordinatorV2ABI.Pack("fulfillRandomWords", proofs, reqCommitments) + require.NoError(t, err) + + ec := th.uni.backend + chainID := th.uni.backend.Blockchain().Config().ChainID + chain, err := th.app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + require.NoError(t, err) + + etx, err := chain.TxManager().CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: th.key1.EIP55Address.Address(), + ToAddress: th.uni.batchCoordinatorContractAddress, + EncodedPayload: b, + FeeLimit: 1e6, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Meta: &txmgr.TxMeta{ + RequestIDs: requestIDs, + RequestTxHashes: requestTxnHashes, + SubID: &sub.subID, + // No max link since simulation failed + }, + }) + require.NoError(t, err) + ec.Commit() + + // wait for above tx to mine (reach state confirmed) + mineBatch(t, requestIDInts, big.NewInt(int64(sub.subID)), th.uni.backend, th.db, vrfcommon.V2, chainID) + + receipts, err := getTxnReceiptDB(th.db, etx.ID) + require.Nil(t, err) + require.Len(t, receipts, 1) + require.Equal(t, uint64(1), receipts[0].EVMReceipt.Status) +} + +// Fulfill VRF req without prior simulation, after computing req proof and commitment +func genReqProofNCommitment(t *testing.T, + th *revertTxnTH, + req vrfReq, + sub *vrfSub) vrfReq { + // Generate VRF proof + requestLog := FindLatestRandomnessRequestedLog(t, th.uni.rootContract, th.keyHash, req.requestID) + s, err := prooflib.BigToSeed(requestLog.PreSeed()) + require.NoError(t, err) + proof, rc, err := prooflib.GenerateProofResponseV2(th.app.GetKeyStore().VRF(), th.vrfKeyID, prooflib.PreSeedDataV2{ + PreSeed: s, + BlockHash: requestLog.Raw().BlockHash, + BlockNum: requestLog.Raw().BlockNumber, + SubId: sub.subID, + CallbackGasLimit: req.callbackGasLimit, + NumWords: req.numWords, + Sender: th.eoaConsumerAddr, + }) + require.NoError(t, err) + txHash := requestLog.Raw().TxHash + req.proof, req.reqCommitment, req.requestTxHash = &proof, &rc, &txHash + return req +} + +// Create VRF jobs in test CL node +func createVRFJobsNew( + t *testing.T, + fromKeys [][]ethkey.KeyV2, + app *cltest.TestApplication, + coordinator v2.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + uni coordinatorV2Universe, + batchEnabled bool, + chainID *big.Int, + gasLanePrices ...*assets.Wei, +) (jobs []job.Job, vrfKeyIDs []string) { + if len(gasLanePrices) != len(fromKeys) { + t.Fatalf("must provide one gas lane price for each set of from addresses. len(gasLanePrices) != len(fromKeys) [%d != %d]", + len(gasLanePrices), len(fromKeys)) + } + // Create separate jobs for each gas lane and register their keys + for i, keys := range fromKeys { + var keyStrs []string + for _, k := range keys { + keyStrs = append(keyStrs, k.Address.String()) + } + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + + jid := uuid.New() + incomingConfs := 2 + s := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + JobID: jid.String(), + Name: fmt.Sprintf("vrf-primary-%d", i), + CoordinatorAddress: coordinatorAddress.Hex(), + BatchCoordinatorAddress: batchCoordinatorAddress.Hex(), + BatchFulfillmentEnabled: batchEnabled, + MinIncomingConfirmations: incomingConfs, + PublicKey: vrfkey.PublicKey.String(), + FromAddresses: keyStrs, + BackoffInitialDelay: 10 * time.Millisecond, + BackoffMaxDelay: time.Second, + V2: true, + GasLanePrice: gasLanePrices[i], + VRFOwnerAddress: uni.vrfOwnerAddressNew.Hex(), + CustomRevertsPipelineEnabled: true, + EVMChainID: chainID.String(), + }).Toml() + jb, err := vrfcommon.ValidatedVRFSpec(s) + t.Log(jb.VRFSpec.PublicKey.MustHash(), vrfkey.PublicKey.MustHash()) + require.NoError(t, err) + err = app.JobSpawner().CreateJob(&jb) + require.NoError(t, err) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, coordinator, vrfkey, ptr(gasLanePrices[i].ToInt().Uint64())) + jobs = append(jobs, jb) + vrfKeyIDs = append(vrfKeyIDs, vrfkey.ID()) + } + // Wait until all jobs are active and listening for logs + gomega.NewWithT(t).Eventually(func() bool { + jbs := app.JobSpawner().ActiveJobs() + var count int + for _, jb := range jbs { + if jb.Type == job.VRF { + count++ + } + } + return count == len(fromKeys) + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + // Unfortunately the lb needs heads to be able to backfill logs to new subscribers. + // To avoid confirming + // TODO: it could just backfill immediately upon receiving a new subscriber? (though would + // only be useful for tests, probably a more robust way is to have the job spawner accept a signal that a + // job is fully up and running and not add it to the active jobs list before then) + time.Sleep(2 * time.Second) + return +} + +// Get txn receipt from txstore DB for a given txID. Useful to get status +// of a txn on chain, to check if it reverted or not +func getTxnReceiptDB(db *sqlx.DB, txesID int64) ([]v2.TxnReceiptDB, error) { + sqlQuery := ` + WITH txes AS ( + SELECT * + FROM evm.txes + WHERE (state = 'confirmed' OR state = 'unconfirmed') + AND id = $1 + ), attempts AS ( + SELECT * + FROM evm.tx_attempts + WHERE eth_tx_id IN (SELECT id FROM txes) + ), receipts AS ( + SELECT * + FROM evm.receipts + WHERE tx_hash IN (SELECT hash FROM attempts) + ) + SELECT r.tx_hash, + r.receipt, + t.from_address, + t.meta->>'SubId' as sub_id, + COALESCE(t.meta->>'RequestID', '') as request_id, + COALESCE(t.meta->>'RequestTxHash', '') as request_tx_hash, + COALESCE(t.meta->>'ForceFulfillmentAttempt', '0') as force_fulfillment_attempt + FROM receipts r + INNER JOIN attempts a ON r.tx_hash = a.hash + INNER JOIN txes t ON a.eth_tx_id = t.id + ` + var recentReceipts []v2.TxnReceiptDB + var err error + if txesID != -1 { + err = db.Select(&recentReceipts, sqlQuery, txesID) + } else { + sqlQuery = strings.Replace(sqlQuery, "AND id = $1", "AND meta->>'ForceFulfilled' IS NOT NULL", 1) + err = db.Select(&recentReceipts, sqlQuery) + } + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, errors.Wrap(err, "fetch_failed_receipts_txm") + } + + return recentReceipts, nil +} + +// Type to store VRF req details like requestID, proof, reqCommitment +type vrfReq struct { + requestID *big.Int + callbackGasLimit uint32 + numWords uint32 + txID int64 + requestTxHash *common.Hash + proof *vrf_coordinator_v2.VRFProof + reqCommitment *vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment +} + +// Type to store VRF sub details like subID, balance +type vrfSub struct { + subID uint64 + balance uint64 +} + +// Test harness for handling reverted txns functionality +type revertTxnTH struct { + // VRF Key Details + key1 ethkey.KeyV2 + key2 ethkey.KeyV2 + vrfKeyID string + keyHash [32]byte + + // CL Node Details + chainID *big.Int + app *cltest.TestApplication + db *sqlx.DB + + // Contract Details + uni *coordinatorV2Universe + eoaConsumer *vrf_external_sub_owner_example.VRFExternalSubOwnerExample + eoaConsumerAddr common.Address + + // VRF Req Details + subs []*vrfSub +} + +// Constructor for handling reverted txns test harness +func newRevertTxnTH(t *testing.T, + uni *coordinatorV2Universe, + ownerKey ethkey.KeyV2, + batchEnabled bool, + subBalances []uint64) (th *revertTxnTH) { + key1 := cltest.MustGenerateRandomKey(t) + key2 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + }, toml.KeySpecific{ + // Gas lane. + Key: ptr(key2.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1, key2) + + th = &revertTxnTH{ + key1: key1, + key2: key2, + app: app, + db: db, + uni: uni, + subs: make([]*vrfSub, len(subBalances)), + } + coordinator := uni.rootContract + coordinatorAddress := uni.rootContractAddress + th.chainID = th.uni.backend.Blockchain().Config().ChainID + var err error + + th.eoaConsumerAddr, _, th.eoaConsumer, err = vrf_external_sub_owner_example.DeployVRFExternalSubOwnerExample( + uni.neil, + uni.backend, + coordinatorAddress, + uni.linkContractAddress, + ) + require.NoError(t, err, "failed to deploy eoa consumer") + uni.backend.Commit() + + for i := 0; i < len(subBalances); i++ { + subID := uint64(i + 1) + setupSub(t, th, subID, subBalances[i]) + th.subs[i] = &vrfSub{subID: subID, balance: subBalances[i]} + } + + // Fund gas lanes. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + sendEth(t, ownerKey, uni.backend, key2.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job using key1 and key2 on the same gas lane. + jbs, vrfKeyIDs := createVRFJobsNew( + t, + [][]ethkey.KeyV2{{key1, key2}}, + app, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + *uni, + batchEnabled, + th.chainID, + gasLanePriceWei) + vrfKey := jbs[0].VRFSpec.PublicKey + + th.keyHash = vrfKey.MustHash() + th.vrfKeyID = vrfKeyIDs[0] + + // Transfer ownership of the VRF coordinator to the VRF owner, + // which is critical for this test. + t.Log("vrf owner address:", uni.vrfOwnerAddressNew) + _, err = coordinator.TransferOwnership(uni.neil, uni.vrfOwnerAddressNew) + require.NoError(t, err, "unable to TransferOwnership of VRF coordinator to VRFOwner") + uni.backend.Commit() + + _, err = uni.vrfOwnerNew.AcceptVRFOwnership(uni.neil) + require.NoError(t, err, "unable to Accept VRF Ownership") + uni.backend.Commit() + + actualCoordinatorAddr, err := uni.vrfOwnerNew.GetVRFCoordinator(nil) + require.NoError(t, err) + require.Equal(t, coordinatorAddress, actualCoordinatorAddr) + + // Add allowed callers so that the oracle can call fulfillRandomWords + // on VRFOwner. + _, err = uni.vrfOwnerNew.SetAuthorizedSenders(uni.neil, []common.Address{ + key1.EIP55Address.Address(), + key2.EIP55Address.Address(), + }) + require.NoError(t, err, "unable to update authorized senders in VRFOwner") + uni.backend.Commit() + + return th +} + +func setupSub(t *testing.T, th *revertTxnTH, subID uint64, balance uint64) { + uni := th.uni + coordinator := uni.rootContract + coordinatorAddress := uni.rootContractAddress + var err error + + // Create a subscription and fund with amount specified + _, err = coordinator.CreateSubscription(uni.neil) + require.NoError(t, err, "failed to create eoa sub") + uni.backend.Commit() + + // Fund the sub + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, subID) + require.NoError(t, err) + _, err = uni.linkContract.TransferAndCall( + uni.sergey, coordinatorAddress, big.NewInt(int64(balance)), b) + require.NoError(t, err, "failed to fund sub") + uni.backend.Commit() + + // Add the consumer to the sub + subIDBig := big.NewInt(int64(subID)) + _, err = coordinator.AddConsumer(uni.neil, subIDBig, th.eoaConsumerAddr) + require.NoError(t, err, "failed to add consumer") + uni.backend.Commit() + + // Check the subscription state + sub, err := coordinator.GetSubscription(nil, subIDBig) + consumers := sub.Consumers() + require.NoError(t, err, "failed to get subscription with id %d", subID) + require.Equal(t, big.NewInt(int64(balance)), sub.Balance()) + require.Equal(t, 1, len(consumers)) + require.Equal(t, th.eoaConsumerAddr, consumers[0]) + require.Equal(t, uni.neil.From, sub.Owner()) +} diff --git a/core/services/vrf/v2/integration_v2_test.go b/core/services/vrf/v2/integration_v2_test.go new file mode 100644 index 00000000..db926047 --- /dev/null +++ b/core/services/vrf/v2/integration_v2_test.go @@ -0,0 +1,2311 @@ +package v2_test + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/jmoiron/sqlx" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/sqlutil" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + evmlogger "github.com/goplugin/pluginv3.0/v2/core/chains/evm/log" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_external_sub_owner_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_malicious_consumer_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_single_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_proxy_admin" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_reverting_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_transparent_upgradeable_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + v1 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v1" + v22 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrftesthelpers" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var defaultMaxGasPrice = uint64(1e12) + +type coordinatorV2UniverseCommon struct { + // Golang wrappers of solidity contracts + consumerContracts []vrftesthelpers.VRFConsumerContract + consumerContractAddresses []common.Address + rootContract v22.CoordinatorV2_X + rootContractAddress common.Address + linkContract *link_token_interface.LinkToken + linkContractAddress common.Address + linkEthFeedAddress common.Address + bhsContract *blockhash_store.BlockhashStore + bhsContractAddress common.Address + batchBHSContract *batch_blockhash_store.BatchBlockhashStore + batchBHSContractAddress common.Address + maliciousConsumerContract vrftesthelpers.VRFConsumerContract + maliciousConsumerContractAddress common.Address + revertingConsumerContract vrftesthelpers.VRFConsumerContract + revertingConsumerContractAddress common.Address + // This is a VRFConsumerV2Upgradeable wrapper that points to the proxy address. + consumerProxyContract vrftesthelpers.VRFConsumerContract + consumerProxyContractAddress common.Address + proxyAdminAddress common.Address + + // Abstract representation of the ethereum blockchain + backend *backends.SimulatedBackend + coordinatorABI *abi.ABI + consumerABI *abi.ABI + + // Cast of participants + vrfConsumers []*bind.TransactOpts // Authors of consuming contracts that request randomness + sergey *bind.TransactOpts // Owns all the PLI initially + neil *bind.TransactOpts // Node operator running VRF service + ned *bind.TransactOpts // Secondary node operator + nallory *bind.TransactOpts // Oracle transactor + evil *bind.TransactOpts // Author of a malicious consumer contract + reverter *bind.TransactOpts // Author of always reverting contract +} + +type coordinatorV2Universe struct { + coordinatorV2UniverseCommon + vrfOwner *vrf_owner.VRFOwner + vrfOwnerAddress common.Address + vrfOwnerNew *vrf_owner.VRFOwner + vrfOwnerAddressNew common.Address + oldRootContract v22.CoordinatorV2_X + oldRootContractAddress common.Address + oldBatchCoordinatorContract *batch_vrf_coordinator_v2.BatchVRFCoordinatorV2 + oldBatchCoordinatorContractAddress common.Address + batchCoordinatorContract *batch_vrf_coordinator_v2.BatchVRFCoordinatorV2 + batchCoordinatorContractAddress common.Address +} + +func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.Master, ec *evmclimocks.Client) txmgrcommon.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] { + _, _, evmConfig := txmgr.MakeTestConfigs(t) + txmConfig := txmgr.NewEvmTxmConfig(evmConfig) + txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil, + nil, txStore, nil, nil, nil, nil, nil) + + return txm +} + +func newVRFCoordinatorV2Universe(t *testing.T, key ethkey.KeyV2, numConsumers int) coordinatorV2Universe { + testutils.SkipShort(t, "VRFCoordinatorV2Universe") + oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID) + require.NoError(t, err) + var ( + sergey = testutils.MustNewSimTransactor(t) + neil = testutils.MustNewSimTransactor(t) + ned = testutils.MustNewSimTransactor(t) + evil = testutils.MustNewSimTransactor(t) + reverter = testutils.MustNewSimTransactor(t) + nallory = oracleTransactor + vrfConsumers []*bind.TransactOpts + ) + + // Create consumer contract deployer identities + for i := 0; i < numConsumers; i++ { + vrfConsumers = append(vrfConsumers, testutils.MustNewSimTransactor(t)) + } + + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + neil.From: {Balance: assets.Ether(1000).ToInt()}, + ned.From: {Balance: assets.Ether(1000).ToInt()}, + nallory.From: {Balance: assets.Ether(1000).ToInt()}, + evil.From: {Balance: assets.Ether(1000).ToInt()}, + reverter.From: {Balance: assets.Ether(1000).ToInt()}, + } + for _, consumer := range vrfConsumers { + genesisData[consumer.From] = core.GenesisAccount{ + Balance: assets.Ether(1000).ToInt(), + } + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + consumerABI, err := abi.JSON(strings.NewReader( + vrf_consumer_v2.VRFConsumerV2ABI)) + require.NoError(t, err) + coordinatorABI, err := abi.JSON(strings.NewReader( + vrf_coordinator_v2.VRFCoordinatorV2ABI)) + require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + blockTime := time.UnixMilli(int64(backend.Blockchain().CurrentHeader().Time)) + err = backend.AdjustTime(time.Since(blockTime) - 24*time.Hour) + require.NoError(t, err) + backend.Commit() + // Deploy link + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + sergey, backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + + // Deploy feed + linkEthFeed, _, _, err := + mock_v3_aggregator_contract.DeployMockV3AggregatorContract( + evil, backend, 18, vrftesthelpers.WeiPerUnitLink.BigInt()) // 0.01 eth per link + require.NoError(t, err) + + // Deploy blockhash store + bhsAddress, _, bhsContract, err := blockhash_store.DeployBlockhashStore(neil, backend) + require.NoError(t, err, "failed to deploy BlockhashStore contract to simulated ethereum blockchain") + + // Deploy batch blockhash store + batchBHSAddress, _, batchBHSContract, err := batch_blockhash_store.DeployBatchBlockhashStore(neil, backend, bhsAddress) + require.NoError(t, err, "failed to deploy BatchBlockhashStore contract to simulated ethereum blockchain") + + // Deploy VRF V2 coordinator + coordinatorAddress, _, coordinatorContract, err := + vrf_coordinator_v2.DeployVRFCoordinatorV2( + neil, backend, linkAddress, bhsAddress, linkEthFeed /* linkEth*/) + require.NoError(t, err, "failed to deploy VRFCoordinatorV2 contract to simulated ethereum blockchain") + backend.Commit() + + // Deploy batch VRF V2 coordinator + batchCoordinatorAddress, _, batchCoordinatorContract, err := + batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2( + neil, backend, coordinatorAddress, + ) + require.NoError(t, err, "failed to deploy BatchVRFCoordinatorV2 contract to simulated ethereum blockchain") + backend.Commit() + + // Deploy old VRF v2 coordinator from bytecode + oldRootContractAddress, oldRootContract := deployOldCoordinator( + t, linkAddress, bhsAddress, linkEthFeed, backend, neil) + + // Deploy the VRFOwner contract, which will own the VRF coordinator + // in some tests. + // Don't transfer ownership now because it'll unnecessarily complicate + // tests that don't really use this code path (which will be 99.9% of all + // real-world use cases). + vrfOwnerAddress, _, vrfOwner, err := vrf_owner.DeployVRFOwner( + neil, backend, oldRootContractAddress, + ) + require.NoError(t, err, "failed to deploy VRFOwner contract to simulated ethereum blockchain") + backend.Commit() + + vrfOwnerAddressNew, _, vrfOwnerNew, err := vrf_owner.DeployVRFOwner( + neil, backend, coordinatorAddress, + ) + require.NoError(t, err, "failed to deploy VRFOwner contract for vrfOwnerNew to simulated ethereum blockchain") + backend.Commit() + + // Deploy batch VRF V2 coordinator + oldBatchCoordinatorAddress, _, oldBatchCoordinatorContract, err := + batch_vrf_coordinator_v2.DeployBatchVRFCoordinatorV2( + neil, backend, coordinatorAddress, + ) + require.NoError(t, err, "failed to deploy BatchVRFCoordinatorV2 contract wrapping old vrf coordinator v2 to simulated ethereum blockchain") + backend.Commit() + + // Create the VRF consumers. + var ( + consumerContracts []vrftesthelpers.VRFConsumerContract + consumerContractAddresses []common.Address + ) + for _, author := range vrfConsumers { + // Deploy a VRF consumer. It has a starting balance of 500 PLI. + consumerContractAddress, _, consumerContract, err2 := + vrf_consumer_v2.DeployVRFConsumerV2( + author, backend, coordinatorAddress, linkAddress) + require.NoError(t, err2, "failed to deploy VRFConsumer contract to simulated ethereum blockchain") + _, err2 = linkContract.Transfer(sergey, consumerContractAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err2, "failed to send PLI to VRFConsumer contract on simulated ethereum blockchain") + + consumerContracts = append(consumerContracts, vrftesthelpers.NewVRFConsumerV2(consumerContract)) + consumerContractAddresses = append(consumerContractAddresses, consumerContractAddress) + + backend.Commit() + } + + // Deploy malicious consumer with 1 pli + maliciousConsumerContractAddress, _, maliciousConsumerContract, err := + vrf_malicious_consumer_v2.DeployVRFMaliciousConsumerV2( + evil, backend, coordinatorAddress, linkAddress) + require.NoError(t, err, "failed to deploy VRFMaliciousConsumer contract to simulated ethereum blockchain") + _, err = linkContract.Transfer(sergey, maliciousConsumerContractAddress, assets.Ether(1).ToInt()) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFMaliciousConsumer contract on simulated ethereum blockchain") + backend.Commit() + + // Deploy upgradeable consumer, proxy, and proxy admin + upgradeableConsumerAddress, _, _, err := vrf_consumer_v2_upgradeable_example.DeployVRFConsumerV2UpgradeableExample(neil, backend) + require.NoError(t, err, "failed to deploy upgradeable consumer to simulated ethereum blockchain") + backend.Commit() + + proxyAdminAddress, _, proxyAdmin, err := vrfv2_proxy_admin.DeployVRFV2ProxyAdmin(neil, backend) + require.NoError(t, err) + backend.Commit() + + // provide abi-encoded initialize function call on the implementation contract + // so that it's called upon the proxy construction, to initialize it. + upgradeableAbi, err := vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExampleMetaData.GetAbi() + require.NoError(t, err) + initializeCalldata, err := upgradeableAbi.Pack("initialize", coordinatorAddress, linkAddress) + hexified := hexutil.Encode(initializeCalldata) + t.Log("initialize calldata:", hexified, "coordinator:", coordinatorAddress.String(), "link:", linkAddress) + require.NoError(t, err) + proxyAddress, _, _, err := vrfv2_transparent_upgradeable_proxy.DeployVRFV2TransparentUpgradeableProxy( + neil, backend, upgradeableConsumerAddress, proxyAdminAddress, initializeCalldata) + require.NoError(t, err) + + _, err = linkContract.Transfer(sergey, proxyAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err) + backend.Commit() + + implAddress, err := proxyAdmin.GetProxyImplementation(nil, proxyAddress) + require.NoError(t, err) + t.Log("impl address:", implAddress.String()) + require.Equal(t, upgradeableConsumerAddress, implAddress) + + proxiedConsumer, err := vrf_consumer_v2_upgradeable_example.NewVRFConsumerV2UpgradeableExample( + proxyAddress, backend) + require.NoError(t, err) + + cAddress, err := proxiedConsumer.COORDINATOR(nil) + require.NoError(t, err) + t.Log("coordinator address in proxy to upgradeable consumer:", cAddress.String()) + require.Equal(t, coordinatorAddress, cAddress) + + lAddress, err := proxiedConsumer.PLITOKEN(nil) + require.NoError(t, err) + t.Log("link address in proxy to upgradeable consumer:", lAddress.String()) + require.Equal(t, linkAddress, lAddress) + + // Deploy always reverting consumer + revertingConsumerContractAddress, _, revertingConsumerContract, err := vrfv2_reverting_example.DeployVRFV2RevertingExample( + reverter, backend, coordinatorAddress, linkAddress, + ) + require.NoError(t, err, "failed to deploy VRFRevertingExample contract to simulated eth blockchain") + _, err = linkContract.Transfer(sergey, revertingConsumerContractAddress, assets.Ether(500).ToInt()) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFRevertingExample contract on simulated eth blockchain") + backend.Commit() + + // Set the configuration on the coordinator. + _, err = coordinatorContract.SetConfig(neil, + uint16(1), // minRequestConfirmations + uint32(2.5e6), // gas limit + uint32(60*60*24), // stalenessSeconds + uint32(v22.GasAfterPaymentCalculation), // gasAfterPaymentCalculation + big.NewInt(1e16), // 0.01 eth per link fallbackLinkPrice + vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: uint32(1000), + FulfillmentFlatFeeLinkPPMTier2: uint32(1000), + FulfillmentFlatFeeLinkPPMTier3: uint32(100), + FulfillmentFlatFeeLinkPPMTier4: uint32(10), + FulfillmentFlatFeeLinkPPMTier5: uint32(1), + ReqsForTier2: big.NewInt(10), + ReqsForTier3: big.NewInt(20), + ReqsForTier4: big.NewInt(30), + ReqsForTier5: big.NewInt(40), + }, + ) + require.NoError(t, err, "failed to set coordinator configuration") + backend.Commit() + + // Set the configuration on the old coordinator. + _, err = oldRootContract.SetConfig(neil, + uint16(1), // minRequestConfirmations + uint32(2.5e6), // gas limit + uint32(60*60*24), // stalenessSeconds + uint32(v22.GasAfterPaymentCalculation), // gasAfterPaymentCalculation + big.NewInt(1e16), // 0.01 eth per link fallbackLinkPrice + vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: uint32(1000), + FulfillmentFlatFeeLinkPPMTier2: uint32(1000), + FulfillmentFlatFeeLinkPPMTier3: uint32(100), + FulfillmentFlatFeeLinkPPMTier4: uint32(10), + FulfillmentFlatFeeLinkPPMTier5: uint32(1), + ReqsForTier2: big.NewInt(10), + ReqsForTier3: big.NewInt(20), + ReqsForTier4: big.NewInt(30), + ReqsForTier5: big.NewInt(40), + }, + ) + require.NoError(t, err, "failed to set old coordinator configuration") + backend.Commit() + + return coordinatorV2Universe{ + coordinatorV2UniverseCommon: coordinatorV2UniverseCommon{ + vrfConsumers: vrfConsumers, + consumerContracts: consumerContracts, + consumerContractAddresses: consumerContractAddresses, + revertingConsumerContract: vrftesthelpers.NewRevertingConsumer(revertingConsumerContract), + revertingConsumerContractAddress: revertingConsumerContractAddress, + + consumerProxyContract: vrftesthelpers.NewUpgradeableConsumer(proxiedConsumer), + consumerProxyContractAddress: proxiedConsumer.Address(), + proxyAdminAddress: proxyAdminAddress, + + rootContract: v22.NewCoordinatorV2(coordinatorContract), + rootContractAddress: coordinatorAddress, + linkContract: linkContract, + linkContractAddress: linkAddress, + linkEthFeedAddress: linkEthFeed, + bhsContract: bhsContract, + bhsContractAddress: bhsAddress, + batchBHSContract: batchBHSContract, + batchBHSContractAddress: batchBHSAddress, + maliciousConsumerContract: vrftesthelpers.NewMaliciousConsumer(maliciousConsumerContract), + maliciousConsumerContractAddress: maliciousConsumerContractAddress, + backend: backend, + coordinatorABI: &coordinatorABI, + consumerABI: &consumerABI, + sergey: sergey, + neil: neil, + ned: ned, + nallory: nallory, + evil: evil, + reverter: reverter, + }, + vrfOwner: vrfOwner, + vrfOwnerAddress: vrfOwnerAddress, + vrfOwnerNew: vrfOwnerNew, + vrfOwnerAddressNew: vrfOwnerAddressNew, + oldRootContractAddress: oldRootContractAddress, + oldRootContract: v22.NewCoordinatorV2(oldRootContract), + oldBatchCoordinatorContract: oldBatchCoordinatorContract, + oldBatchCoordinatorContractAddress: oldBatchCoordinatorAddress, + batchCoordinatorContract: batchCoordinatorContract, + batchCoordinatorContractAddress: batchCoordinatorAddress, + } +} + +func deployOldCoordinator( + t *testing.T, + linkAddress common.Address, + bhsAddress common.Address, + linkEthFeed common.Address, + backend *backends.SimulatedBackend, + neil *bind.TransactOpts, +) ( + common.Address, + *vrf_coordinator_v2.VRFCoordinatorV2, +) { + ctx := testutils.Context(t) + bytecode := hexutil.MustDecode("0x60e06040523480156200001157600080fd5b506040516200608c3803806200608c8339810160408190526200003491620001b1565b33806000816200008b5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000be57620000be81620000e8565b5050506001600160601b0319606093841b811660805290831b811660a052911b1660c052620001fb565b6001600160a01b038116331415620001435760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000082565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b80516001600160a01b0381168114620001ac57600080fd5b919050565b600080600060608486031215620001c757600080fd5b620001d28462000194565b9250620001e26020850162000194565b9150620001f26040850162000194565b90509250925092565b60805160601c60a05160601c60c05160601c615e2762000265600039600081816105260152613bd901526000818161061d015261402401526000818161036d01528181611599015281816125960152818161302c0152818161318201526138360152615e276000f3fe608060405234801561001057600080fd5b506004361061025b5760003560e01c80636f64f03f11610145578063ad178361116100bd578063d2f9f9a71161008c578063e72f6e3011610071578063e72f6e30146106fa578063e82ad7d41461070d578063f2fde38b1461073057600080fd5b8063d2f9f9a7146106d4578063d7ae1d30146106e757600080fd5b8063ad17836114610618578063af198b971461063f578063c3f909d41461066f578063caf70c4a146106c157600080fd5b80638da5cb5b11610114578063a21a23e4116100f9578063a21a23e4146105da578063a47c7696146105e2578063a4c0ed361461060557600080fd5b80638da5cb5b146105a95780639f87fad7146105c757600080fd5b80636f64f03f146105685780637341c10c1461057b57806379ba50971461058e578063823597401461059657600080fd5b8063356dac71116101d85780635fbbc0d2116101a757806366316d8d1161018c57806366316d8d1461050e578063689c45171461052157806369bcdb7d1461054857600080fd5b80635fbbc0d21461040057806364d51a2a1461050657600080fd5b8063356dac71146103b457806340d6bb82146103bc5780634cb48a54146103da5780635d3b1d30146103ed57600080fd5b806308821d581161022f57806315c48b841161021457806315c48b841461030e578063181f5a77146103295780631b6b6d231461036857600080fd5b806308821d58146102cf57806312b58349146102e257600080fd5b80620122911461026057806302bcc5b61461028057806304c357cb1461029557806306bfa637146102a8575b600080fd5b610268610743565b60405161027793929190615964565b60405180910390f35b61029361028e366004615792565b6107bf565b005b6102936102a33660046157ad565b61086b565b60055467ffffffffffffffff165b60405167ffffffffffffffff9091168152602001610277565b6102936102dd3660046154a3565b610a60565b6005546801000000000000000090046bffffffffffffffffffffffff165b604051908152602001610277565b61031660c881565b60405161ffff9091168152602001610277565b604080518082018252601681527f565246436f6f7264696e61746f72563220312e302e30000000000000000000006020820152905161027791906158f1565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001610277565b600a54610300565b6103c56101f481565b60405163ffffffff9091168152602001610277565b6102936103e836600461563c565b610c3f565b6103006103fb366004615516565b611036565b600c546040805163ffffffff80841682526401000000008404811660208301526801000000000000000084048116928201929092526c010000000000000000000000008304821660608201527001000000000000000000000000000000008304909116608082015262ffffff740100000000000000000000000000000000000000008304811660a0830152770100000000000000000000000000000000000000000000008304811660c08301527a0100000000000000000000000000000000000000000000000000008304811660e08301527d01000000000000000000000000000000000000000000000000000000000090920490911661010082015261012001610277565b610316606481565b61029361051c36600461545b565b611444565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b610300610556366004615779565b60009081526009602052604090205490565b6102936105763660046153a0565b6116ad565b6102936105893660046157ad565b6117f7565b610293611a85565b6102936105a4366004615792565b611b82565b60005473ffffffffffffffffffffffffffffffffffffffff1661038f565b6102936105d53660046157ad565b611d7c565b6102b66121fd565b6105f56105f0366004615792565b6123ed565b6040516102779493929190615b02565b6102936106133660046153d4565b612537565b61038f7f000000000000000000000000000000000000000000000000000000000000000081565b61065261064d366004615574565b6127a8565b6040516bffffffffffffffffffffffff9091168152602001610277565b600b546040805161ffff8316815263ffffffff6201000084048116602083015267010000000000000084048116928201929092526b010000000000000000000000909204166060820152608001610277565b6103006106cf3660046154bf565b612c6d565b6103c56106e2366004615792565b612c9d565b6102936106f53660046157ad565b612e92565b610293610708366004615385565b612ff3565b61072061071b366004615792565b613257565b6040519015158152602001610277565b61029361073e366004615385565b6134ae565b600b546007805460408051602080840282018101909252828152600094859460609461ffff8316946201000090930463ffffffff169391928391908301828280156107ad57602002820191906000526020600020905b815481526020019060010190808311610799575b50505050509050925092509250909192565b6107c76134bf565b67ffffffffffffffff811660009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff1661082d576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff811660009081526003602052604090205461086890829073ffffffffffffffffffffffffffffffffffffffff16613542565b50565b67ffffffffffffffff8216600090815260036020526040902054829073ffffffffffffffffffffffffffffffffffffffff16806108d4576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614610940576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff821660048201526024015b60405180910390fd5b600b546601000000000000900460ff1615610987576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff841660009081526003602052604090206001015473ffffffffffffffffffffffffffffffffffffffff848116911614610a5a5767ffffffffffffffff841660008181526003602090815260409182902060010180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88169081179091558251338152918201527f69436ea6df009049404f564eff6622cd00522b0bd6a89efd9e52a355c4a879be91015b60405180910390a25b50505050565b610a686134bf565b604080518082018252600091610a97919084906002908390839080828437600092019190915250612c6d915050565b60008181526006602052604090205490915073ffffffffffffffffffffffffffffffffffffffff1680610af9576040517f77f5b84c00000000000000000000000000000000000000000000000000000000815260048101839052602401610937565b600082815260066020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555b600754811015610be9578260078281548110610b4c57610b4c615dbc565b90600052602060002001541415610bd7576007805460009190610b7190600190615c76565b81548110610b8157610b81615dbc565b906000526020600020015490508060078381548110610ba257610ba2615dbc565b6000918252602090912001556007805480610bbf57610bbf615d8d565b60019003818190600052602060002001600090559055505b80610be181615cba565b915050610b2e565b508073ffffffffffffffffffffffffffffffffffffffff167f72be339577868f868798bac2c93e52d6f034fef4689a9848996c14ebb7416c0d83604051610c3291815260200190565b60405180910390a2505050565b610c476134bf565b60c861ffff87161115610c9a576040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff871660048201819052602482015260c86044820152606401610937565b60008213610cd7576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101839052602401610937565b6040805160a0808201835261ffff891680835263ffffffff89811660208086018290526000868801528a831660608088018290528b85166080988901819052600b80547fffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000001690971762010000909502949094177fffffffffffffffffffffffffffffffffff000000000000000000ffffffffffff166701000000000000009092027fffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffff16919091176b010000000000000000000000909302929092179093558651600c80549489015189890151938a0151978a0151968a015160c08b015160e08c01516101008d01519588167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009099169890981764010000000093881693909302929092177fffffffffffffffffffffffffffffffff0000000000000000ffffffffffffffff1668010000000000000000958716959095027fffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffff16949094176c0100000000000000000000000098861698909802979097177fffffffffffffffffff00000000000000ffffffffffffffffffffffffffffffff1670010000000000000000000000000000000096909416959095027fffffffffffffffffff000000ffffffffffffffffffffffffffffffffffffffff16929092177401000000000000000000000000000000000000000062ffffff92831602177fffffff000000000000ffffffffffffffffffffffffffffffffffffffffffffff1677010000000000000000000000000000000000000000000000958216959095027fffffff000000ffffffffffffffffffffffffffffffffffffffffffffffffffff16949094177a01000000000000000000000000000000000000000000000000000092851692909202919091177cffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167d0100000000000000000000000000000000000000000000000000000000009390911692909202919091178155600a84905590517fc21e3bd2e0b339d2848f0dd956947a88966c242c0c0c582a33137a5c1ceb5cb2916110269189918991899189918991906159c3565b60405180910390a1505050505050565b600b546000906601000000000000900460ff1615611080576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff851660009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff166110e6576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260026020908152604080832067ffffffffffffffff808a1685529252909120541680611156576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff87166004820152336024820152604401610937565b600b5461ffff9081169086161080611172575060c861ffff8616115b156111c257600b546040517fa738697600000000000000000000000000000000000000000000000000000000815261ffff8088166004830152909116602482015260c86044820152606401610937565b600b5463ffffffff620100009091048116908516111561122957600b546040517ff5d7e01e00000000000000000000000000000000000000000000000000000000815263ffffffff8087166004830152620100009092049091166024820152604401610937565b6101f463ffffffff8416111561127b576040517f47386bec00000000000000000000000000000000000000000000000000000000815263ffffffff841660048201526101f46024820152604401610937565b6000611288826001615bd2565b6040805160208082018c9052338284015267ffffffffffffffff808c16606084015284166080808401919091528351808403909101815260a08301845280519082012060c083018d905260e080840182905284518085039091018152610100909301909352815191012091925060009182916040805160208101849052439181019190915267ffffffffffffffff8c16606082015263ffffffff808b166080830152891660a08201523360c0820152919350915060e001604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0018152828252805160209182012060008681526009835283902055848352820183905261ffff8a169082015263ffffffff808916606083015287166080820152339067ffffffffffffffff8b16908c907f63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a97729060a00160405180910390a45033600090815260026020908152604080832067ffffffffffffffff808d16855292529091208054919093167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009091161790915591505095945050505050565b600b546601000000000000900460ff161561148b576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152600860205260409020546bffffffffffffffffffffffff808316911610156114e5576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b33600090815260086020526040812080548392906115129084906bffffffffffffffffffffffff16615c8d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555080600560088282829054906101000a90046bffffffffffffffffffffffff166115699190615c8d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb83836040518363ffffffff1660e01b815260040161162192919073ffffffffffffffffffffffffffffffffffffffff9290921682526bffffffffffffffffffffffff16602082015260400190565b602060405180830381600087803b15801561163b57600080fd5b505af115801561164f573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061167391906154db565b6116a9576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050565b6116b56134bf565b6040805180820182526000916116e4919084906002908390839080828437600092019190915250612c6d915050565b60008181526006602052604090205490915073ffffffffffffffffffffffffffffffffffffffff1615611746576040517f4a0b8fa700000000000000000000000000000000000000000000000000000000815260048101829052602401610937565b600081815260066020908152604080832080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88169081179091556007805460018101825594527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688909301849055518381527fe729ae16526293f74ade739043022254f1489f616295a25bf72dfb4511ed73b89101610c32565b67ffffffffffffffff8216600090815260036020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611860576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff8216146118c7576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610937565b600b546601000000000000900460ff161561190e576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff841660009081526003602052604090206002015460641415611965576040517f05a48e0f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8316600090815260026020908152604080832067ffffffffffffffff808916855292529091205416156119ac57610a5a565b73ffffffffffffffffffffffffffffffffffffffff8316600081815260026020818152604080842067ffffffffffffffff8a1680865290835281852080547fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000166001908117909155600384528286209094018054948501815585529382902090920180547fffffffffffffffffffffffff00000000000000000000000000000000000000001685179055905192835290917f43dc749a04ac8fb825cbd514f7c0e13f13bc6f2ee66043b76629d51776cff8e09101610a51565b60015473ffffffffffffffffffffffffffffffffffffffff163314611b06576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4d7573742062652070726f706f736564206f776e6572000000000000000000006044820152606401610937565b60008054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560018054909116905560405173ffffffffffffffffffffffffffffffffffffffff90921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b600b546601000000000000900460ff1615611bc9576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff811660009081526003602052604090205473ffffffffffffffffffffffffffffffffffffffff16611c2f576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff811660009081526003602052604090206001015473ffffffffffffffffffffffffffffffffffffffff163314611cd15767ffffffffffffffff8116600090815260036020526040908190206001015490517fd084e97500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff9091166004820152602401610937565b67ffffffffffffffff81166000818152600360209081526040918290208054337fffffffffffffffffffffffff00000000000000000000000000000000000000008083168217845560019093018054909316909255835173ffffffffffffffffffffffffffffffffffffffff909116808252928101919091529092917f6f1dc65165ffffedfd8e507b4a0f1fcfdada045ed11f6c26ba27cedfe87802f0910160405180910390a25050565b67ffffffffffffffff8216600090815260036020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680611de5576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614611e4c576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610937565b600b546601000000000000900460ff1615611e93576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff8316600090815260026020908152604080832067ffffffffffffffff808916855292529091205416611f2e576040517ff0019fe600000000000000000000000000000000000000000000000000000000815267ffffffffffffffff8516600482015273ffffffffffffffffffffffffffffffffffffffff84166024820152604401610937565b67ffffffffffffffff8416600090815260036020908152604080832060020180548251818502810185019093528083529192909190830182828015611fa957602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311611f7e575b50505050509050600060018251611fc09190615c76565b905060005b825181101561215f578573ffffffffffffffffffffffffffffffffffffffff16838281518110611ff757611ff7615dbc565b602002602001015173ffffffffffffffffffffffffffffffffffffffff16141561214d57600083838151811061202f5761202f615dbc565b6020026020010151905080600360008a67ffffffffffffffff1667ffffffffffffffff168152602001908152602001600020600201838154811061207557612075615dbc565b600091825260208083209190910180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff949094169390931790925567ffffffffffffffff8a1681526003909152604090206002018054806120ef576120ef615d8d565b60008281526020902081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90810180547fffffffffffffffffffffffff00000000000000000000000000000000000000001690550190555061215f565b8061215781615cba565b915050611fc5565b5073ffffffffffffffffffffffffffffffffffffffff8516600081815260026020908152604080832067ffffffffffffffff8b168085529083529281902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690555192835290917f182bff9831466789164ca77075fffd84916d35a8180ba73c27e45634549b445b91015b60405180910390a2505050505050565b600b546000906601000000000000900460ff1615612247576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6005805467ffffffffffffffff1690600061226183615cf3565b82546101009290920a67ffffffffffffffff8181021990931691831602179091556005541690506000806040519080825280602002602001820160405280156122b4578160200160208202803683370190505b506040805180820182526000808252602080830182815267ffffffffffffffff888116808552600484528685209551865493516bffffffffffffffffffffffff9091167fffffffffffffffffffffffff0000000000000000000000000000000000000000948516176c010000000000000000000000009190931602919091179094558451606081018652338152808301848152818701888152958552600384529590932083518154831673ffffffffffffffffffffffffffffffffffffffff918216178255955160018201805490931696169590951790559151805194955090936123a592600285019201906150c5565b505060405133815267ffffffffffffffff841691507f464722b4166576d3dcbba877b999bc35cf911f4eaf434b7eba68fa113951d0bf9060200160405180910390a250905090565b67ffffffffffffffff81166000908152600360205260408120548190819060609073ffffffffffffffffffffffffffffffffffffffff1661245a576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff80861660009081526004602090815260408083205460038352928190208054600290910180548351818602810186019094528084526bffffffffffffffffffffffff8616966c010000000000000000000000009096049095169473ffffffffffffffffffffffffffffffffffffffff90921693909291839183018282801561252157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116124f6575b5050505050905093509350935093509193509193565b600b546601000000000000900460ff161561257e576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146125ed576040517f44b0e3c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60208114612627576040517f8129bbcd00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061263582840184615792565b67ffffffffffffffff811660009081526003602052604090205490915073ffffffffffffffffffffffffffffffffffffffff1661269e576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff8116600090815260046020526040812080546bffffffffffffffffffffffff16918691906126d58385615bfe565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff16021790555084600560088282829054906101000a90046bffffffffffffffffffffffff1661272c9190615bfe565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055508167ffffffffffffffff167fd39ec07f4e209f627a4c427971473820dc129761ba28de8906bd56f57101d4f88287846127939190615bba565b604080519283526020830191909152016121ed565b600b546000906601000000000000900460ff16156127f2576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005a9050600080600061280687876139b5565b9250925092506000866060015163ffffffff1667ffffffffffffffff81111561283157612831615deb565b60405190808252806020026020018201604052801561285a578160200160208202803683370190505b50905060005b876060015163ffffffff168110156128ce5760408051602081018590529081018290526060016040516020818303038152906040528051906020012060001c8282815181106128b1576128b1615dbc565b6020908102919091010152806128c681615cba565b915050612860565b506000838152600960205260408082208290555181907f1fe543e300000000000000000000000000000000000000000000000000000000906129169087908690602401615ab4565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090941693909317909252600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff166601000000000000179055908a015160808b01519192506000916129e49163ffffffff169084613d04565b600b80547fffffffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffff1690556020808c01805167ffffffffffffffff9081166000908152600490935260408084205492518216845290922080549394506c01000000000000000000000000918290048316936001939192600c92612a68928692900416615bd2565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055506000612abf8a600b600001600b9054906101000a900463ffffffff1663ffffffff16612ab985612c9d565b3a613d52565b6020808e015167ffffffffffffffff166000908152600490915260409020549091506bffffffffffffffffffffffff80831691161015612b2b576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808d015167ffffffffffffffff1660009081526004909152604081208054839290612b679084906bffffffffffffffffffffffff16615c8d565b82546101009290920a6bffffffffffffffffffffffff81810219909316918316021790915560008b81526006602090815260408083205473ffffffffffffffffffffffffffffffffffffffff1683526008909152812080548594509092612bd091859116615bfe565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff160217905550877f7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4888386604051612c53939291909283526bffffffffffffffffffffffff9190911660208301521515604082015260600190565b60405180910390a299505050505050505050505b92915050565b600081604051602001612c8091906158e3565b604051602081830303815290604052805190602001209050919050565b6040805161012081018252600c5463ffffffff80821683526401000000008204811660208401526801000000000000000082048116938301939093526c010000000000000000000000008104831660608301527001000000000000000000000000000000008104909216608082015262ffffff740100000000000000000000000000000000000000008304811660a08301819052770100000000000000000000000000000000000000000000008404821660c08401527a0100000000000000000000000000000000000000000000000000008404821660e08401527d0100000000000000000000000000000000000000000000000000000000009093041661010082015260009167ffffffffffffffff841611612dbb575192915050565b8267ffffffffffffffff168160a0015162ffffff16108015612df057508060c0015162ffffff168367ffffffffffffffff1611155b15612dff576020015192915050565b8267ffffffffffffffff168160c0015162ffffff16108015612e3457508060e0015162ffffff168367ffffffffffffffff1611155b15612e43576040015192915050565b8267ffffffffffffffff168160e0015162ffffff16108015612e79575080610100015162ffffff168367ffffffffffffffff1611155b15612e88576060015192915050565b6080015192915050565b67ffffffffffffffff8216600090815260036020526040902054829073ffffffffffffffffffffffffffffffffffffffff1680612efb576040517f1f6a65b600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614612f62576040517fd8a3fb5200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82166004820152602401610937565b600b546601000000000000900460ff1615612fa9576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612fb284613257565b15612fe9576040517fb42f66e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610a5a8484613542565b612ffb6134bf565b6040517f70a082310000000000000000000000000000000000000000000000000000000081523060048201526000907f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16906370a082319060240160206040518083038186803b15801561308357600080fd5b505afa158015613097573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906130bb91906154fd565b6005549091506801000000000000000090046bffffffffffffffffffffffff168181111561311f576040517fa99da3020000000000000000000000000000000000000000000000000000000081526004810182905260248101839052604401610937565b818110156132525760006131338284615c76565b6040517fa9059cbb00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8681166004830152602482018390529192507f00000000000000000000000000000000000000000000000000000000000000009091169063a9059cbb90604401602060405180830381600087803b1580156131c857600080fd5b505af11580156131dc573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061320091906154db565b506040805173ffffffffffffffffffffffffffffffffffffffff86168152602081018390527f59bfc682b673f8cbf945f1e454df9334834abf7dfe7f92237ca29ecb9b436600910160405180910390a1505b505050565b67ffffffffffffffff811660009081526003602090815260408083208151606081018352815473ffffffffffffffffffffffffffffffffffffffff9081168252600183015416818501526002820180548451818702810187018652818152879693958601939092919083018282801561330657602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116132db575b505050505081525050905060005b8160400151518110156134a45760005b60075481101561349157600061345a6007838154811061334657613346615dbc565b90600052602060002001548560400151858151811061336757613367615dbc565b602002602001015188600260008960400151898151811061338a5761338a615dbc565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600090812067ffffffffffffffff808f168352935220541660408051602080820187905273ffffffffffffffffffffffffffffffffffffffff959095168183015267ffffffffffffffff9384166060820152919092166080808301919091528251808303909101815260a08201835280519084012060c082019490945260e080820185905282518083039091018152610100909101909152805191012091565b506000818152600960205260409020549091501561347e5750600195945050505050565b508061348981615cba565b915050613324565b508061349c81615cba565b915050613314565b5060009392505050565b6134b66134bf565b61086881613e5a565b60005473ffffffffffffffffffffffffffffffffffffffff163314613540576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4f6e6c792063616c6c61626c65206279206f776e6572000000000000000000006044820152606401610937565b565b600b546601000000000000900460ff1615613589576040517fed3ba6a600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b67ffffffffffffffff821660009081526003602090815260408083208151606081018352815473ffffffffffffffffffffffffffffffffffffffff90811682526001830154168185015260028201805484518187028101870186528181529295939486019383018282801561363457602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311613609575b5050509190925250505067ffffffffffffffff80851660009081526004602090815260408083208151808301909252546bffffffffffffffffffffffff81168083526c01000000000000000000000000909104909416918101919091529293505b83604001515181101561373b5760026000856040015183815181106136bc576136bc615dbc565b60209081029190910181015173ffffffffffffffffffffffffffffffffffffffff168252818101929092526040908101600090812067ffffffffffffffff8a168252909252902080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001690558061373381615cba565b915050613695565b5067ffffffffffffffff8516600090815260036020526040812080547fffffffffffffffffffffffff00000000000000000000000000000000000000009081168255600182018054909116905590613796600283018261514f565b505067ffffffffffffffff8516600090815260046020526040902080547fffffffffffffffffffffffff0000000000000000000000000000000000000000169055600580548291906008906138069084906801000000000000000090046bffffffffffffffffffffffff16615c8d565b92506101000a8154816bffffffffffffffffffffffff02191690836bffffffffffffffffffffffff1602179055507f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663a9059cbb85836bffffffffffffffffffffffff166040518363ffffffff1660e01b81526004016138be92919073ffffffffffffffffffffffffffffffffffffffff929092168252602082015260400190565b602060405180830381600087803b1580156138d857600080fd5b505af11580156138ec573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061391091906154db565b613946576040517ff4d678b800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805173ffffffffffffffffffffffffffffffffffffffff861681526bffffffffffffffffffffffff8316602082015267ffffffffffffffff8716917fe8ed5b475a5b5987aa9165e8731bb78043f39eee32ec5a1169a89e27fcd49815910160405180910390a25050505050565b60008060006139c78560000151612c6d565b60008181526006602052604090205490935073ffffffffffffffffffffffffffffffffffffffff1680613a29576040517f77f5b84c00000000000000000000000000000000000000000000000000000000815260048101859052602401610937565b6080860151604051613a48918691602001918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291815281516020928301206000818152600990935291205490935080613ac5576040517f3688124a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b85516020808801516040808a015160608b015160808c01519251613b3e968b96909594910195865267ffffffffffffffff948516602087015292909316604085015263ffffffff908116606085015291909116608083015273ffffffffffffffffffffffffffffffffffffffff1660a082015260c00190565b604051602081830303815290604052805190602001208114613b8c576040517fd529142c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b855167ffffffffffffffff164080613cb05786516040517fe9413d3800000000000000000000000000000000000000000000000000000000815267ffffffffffffffff90911660048201527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169063e9413d389060240160206040518083038186803b158015613c3057600080fd5b505afa158015613c44573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190613c6891906154fd565b905080613cb05786516040517f175dadad00000000000000000000000000000000000000000000000000000000815267ffffffffffffffff9091166004820152602401610937565b6000886080015182604051602001613cd2929190918252602082015260400190565b6040516020818303038152906040528051906020012060001c9050613cf78982613f50565b9450505050509250925092565b60005a611388811015613d1657600080fd5b611388810390508460408204820311613d2e57600080fd5b50823b613d3a57600080fd5b60008083516020850160008789f190505b9392505050565b600080613d5d613fd9565b905060008113613d9c576040517f43d4cf6600000000000000000000000000000000000000000000000000000000815260048101829052602401610937565b6000815a613daa8989615bba565b613db49190615c76565b613dc686670de0b6b3a7640000615c39565b613dd09190615c39565b613dda9190615c25565b90506000613df363ffffffff871664e8d4a51000615c39565b9050613e0b816b033b2e3c9fd0803ce8000000615c76565b821115613e44576040517fe80fa38100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b613e4e8183615bba565b98975050505050505050565b73ffffffffffffffffffffffffffffffffffffffff8116331415613eda576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c660000000000000000006044820152606401610937565b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6000613f848360000151846020015185604001518660600151868860a001518960c001518a60e001518b61010001516140ed565b60038360200151604051602001613f9c929190615aa0565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209392505050565b600b54604080517ffeaf968c0000000000000000000000000000000000000000000000000000000081529051600092670100000000000000900463ffffffff169182151591849182917f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff169163feaf968c9160048083019260a0929190829003018186803b15801561407f57600080fd5b505afa158015614093573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906140b791906157d7565b5094509092508491505080156140db57506140d28242615c76565b8463ffffffff16105b156140e55750600a545b949350505050565b6140f6896143c4565b61415c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f7075626c6963206b6579206973206e6f74206f6e2063757276650000000000006044820152606401610937565b614165886143c4565b6141cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f67616d6d61206973206e6f74206f6e20637572766500000000000000000000006044820152606401610937565b6141d4836143c4565b61423a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6347616d6d615769746e657373206973206e6f74206f6e2063757276650000006044820152606401610937565b614243826143c4565b6142a9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f73486173685769746e657373206973206e6f74206f6e206375727665000000006044820152606401610937565b6142b5878a888761451f565b61431b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f6164647228632a706b2b732a6729213d5f755769746e657373000000000000006044820152606401610937565b60006143278a876146c2565b9050600061433a898b878b868989614726565b9050600061434b838d8d8a866148ae565b9050808a146143b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600d60248201527f696e76616c69642070726f6f66000000000000000000000000000000000000006044820152606401610937565b505050505050505050505050565b80516000907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f11614451576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e76616c696420782d6f7264696e61746500000000000000000000000000006044820152606401610937565b60208201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f116144de576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f696e76616c696420792d6f7264696e61746500000000000000000000000000006044820152606401610937565b60208201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f9080096145188360005b602002015161490c565b1492915050565b600073ffffffffffffffffffffffffffffffffffffffff821661459e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f626164207769746e6573730000000000000000000000000000000000000000006044820152606401610937565b6020840151600090600116156145b557601c6145b8565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418587600060200201510986517ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141918203925060009190890987516040805160008082526020820180845287905260ff88169282019290925260608101929092526080820183905291925060019060a0016020604051602081039080840390855afa15801561466f573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff9081169088161495505050505050949350505050565b6146ca61516d565b6146f7600184846040516020016146e3939291906158c2565b604051602081830303815290604052614964565b90505b614703816143c4565b612c6757805160408051602081019290925261471f91016146e3565b90506146fa565b61472e61516d565b825186517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f90819006910614156147c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f706f696e747320696e2073756d206d7573742062652064697374696e637400006044820152606401610937565b6147cc8789886149cd565b614832576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4669727374206d756c20636865636b206661696c6564000000000000000000006044820152606401610937565b61483d8486856149cd565b6148a3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f5365636f6e64206d756c20636865636b206661696c65640000000000000000006044820152606401610937565b613e4e868484614b5a565b6000600286868685876040516020016148cc96959493929190615850565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101209695505050505050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f600782089392505050565b61496c61516d565b61497582614c89565b815261498a61498582600061450e565b614cde565b6020820181905260029006600114156149c8576020810180517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0390525b919050565b600082614a36576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600b60248201527f7a65726f207363616c61720000000000000000000000000000000000000000006044820152606401610937565b83516020850151600090614a4c90600290615d1b565b15614a5857601c614a5b565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418387096040805160008082526020820180845281905260ff86169282019290925260608101869052608081018390529192509060019060a0016020604051602081039080840390855afa158015614adb573d6000803e3d6000fd5b505050602060405103519050600086604051602001614afa919061583e565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052805160209091012073ffffffffffffffffffffffffffffffffffffffff92831692169190911498975050505050505050565b614b6261516d565b835160208086015185519186015160009384938493614b8393909190614d18565b919450925090507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f858209600114614c17576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f696e765a206d75737420626520696e7665727365206f66207a000000000000006044820152606401610937565b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80614c5057614c50615d5e565b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8785099052979650505050505050565b805160208201205b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f81106149c857604080516020808201939093528151808203840181529082019091528051910120614c91565b6000612c67826002614d117ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f6001615bba565b901c614eae565b60008080600180827ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a0890506000614dc083838585614fa2565b9098509050614dd188828e88614ffa565b9098509050614de288828c87614ffa565b90985090506000614df58d878b85614ffa565b9098509050614e0688828686614fa2565b9098509050614e1788828e89614ffa565b9098509050818114614e9a577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183099650614e9e565b8196505b5050505050509450945094915050565b600080614eb961518b565b6020808252818101819052604082015260608101859052608081018490527ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f60a0820152614f056151a9565b60208160c08460057ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa925082614f98576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6269674d6f64457870206661696c7572652100000000000000000000000000006044820152606401610937565b5195945050505050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8487099097909650945050505050565b600080807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f86890990999098509650505050505050565b82805482825590600052602060002090810192821561513f579160200282015b8281111561513f57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff9091161782556020909201916001909101906150e5565b5061514b9291506151c7565b5090565b508054600082559060005260206000209081019061086891906151c7565b60405180604001604052806002906020820280368337509192915050565b6040518060c001604052806006906020820280368337509192915050565b60405180602001604052806001906020820280368337509192915050565b5b8082111561514b57600081556001016151c8565b803573ffffffffffffffffffffffffffffffffffffffff811681146149c857600080fd5b8060408101831015612c6757600080fd5b600082601f83011261522257600080fd5b6040516040810181811067ffffffffffffffff8211171561524557615245615deb565b806040525080838560408601111561525c57600080fd5b60005b600281101561527e57813583526020928301929091019060010161525f565b509195945050505050565b600060a0828403121561529b57600080fd5b60405160a0810181811067ffffffffffffffff821117156152be576152be615deb565b6040529050806152cd83615353565b81526152db60208401615353565b60208201526152ec6040840161533f565b60408201526152fd6060840161533f565b606082015261530e608084016151dc565b60808201525092915050565b803561ffff811681146149c857600080fd5b803562ffffff811681146149c857600080fd5b803563ffffffff811681146149c857600080fd5b803567ffffffffffffffff811681146149c857600080fd5b805169ffffffffffffffffffff811681146149c857600080fd5b60006020828403121561539757600080fd5b613d4b826151dc565b600080606083850312156153b357600080fd5b6153bc836151dc565b91506153cb8460208501615200565b90509250929050565b600080600080606085870312156153ea57600080fd5b6153f3856151dc565b935060208501359250604085013567ffffffffffffffff8082111561541757600080fd5b818701915087601f83011261542b57600080fd5b81358181111561543a57600080fd5b88602082850101111561544c57600080fd5b95989497505060200194505050565b6000806040838503121561546e57600080fd5b615477836151dc565b915060208301356bffffffffffffffffffffffff8116811461549857600080fd5b809150509250929050565b6000604082840312156154b557600080fd5b613d4b8383615200565b6000604082840312156154d157600080fd5b613d4b8383615211565b6000602082840312156154ed57600080fd5b81518015158114613d4b57600080fd5b60006020828403121561550f57600080fd5b5051919050565b600080600080600060a0868803121561552e57600080fd5b8535945061553e60208701615353565b935061554c6040870161531a565b925061555a6060870161533f565b91506155686080870161533f565b90509295509295909350565b60008082840361024081121561558957600080fd5b6101a08082121561559957600080fd5b6155a1615b90565b91506155ad8686615211565b82526155bc8660408701615211565b60208301526080850135604083015260a0850135606083015260c085013560808301526155eb60e086016151dc565b60a08301526101006155ff87828801615211565b60c0840152615612876101408801615211565b60e0840152610180860135818401525081935061563186828701615289565b925050509250929050565b6000806000806000808688036101c081121561565757600080fd5b6156608861531a565b965061566e6020890161533f565b955061567c6040890161533f565b945061568a6060890161533f565b935060808801359250610120807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60830112156156c557600080fd5b6156cd615b90565b91506156db60a08a0161533f565b82526156e960c08a0161533f565b60208301526156fa60e08a0161533f565b604083015261010061570d818b0161533f565b606084015261571d828b0161533f565b608084015261572f6101408b0161532c565b60a08401526157416101608b0161532c565b60c08401526157536101808b0161532c565b60e08401526157656101a08b0161532c565b818401525050809150509295509295509295565b60006020828403121561578b57600080fd5b5035919050565b6000602082840312156157a457600080fd5b613d4b82615353565b600080604083850312156157c057600080fd5b6157c983615353565b91506153cb602084016151dc565b600080600080600060a086880312156157ef57600080fd5b6157f88661536b565b94506020860151935060408601519250606086015191506155686080870161536b565b8060005b6002811015610a5a57815184526020938401939091019060010161581f565b615848818361581b565b604001919050565b868152615860602082018761581b565b61586d606082018661581b565b61587a60a082018561581b565b61588760e082018461581b565b60609190911b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000166101208201526101340195945050505050565b8381526158d2602082018461581b565b606081019190915260800192915050565b60408101612c67828461581b565b600060208083528351808285015260005b8181101561591e57858101830151858201604001528201615902565b81811115615930576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006060820161ffff86168352602063ffffffff86168185015260606040850152818551808452608086019150828701935060005b818110156159b557845183529383019391830191600101615999565b509098975050505050505050565b60006101c08201905061ffff8816825263ffffffff808816602084015280871660408401528086166060840152846080840152835481811660a0850152615a1760c08501838360201c1663ffffffff169052565b615a2e60e08501838360401c1663ffffffff169052565b615a466101008501838360601c1663ffffffff169052565b615a5e6101208501838360801c1663ffffffff169052565b62ffffff60a082901c811661014086015260b882901c811661016086015260d082901c1661018085015260e81c6101a090930192909252979650505050505050565b82815260608101613d4b602083018461581b565b6000604082018483526020604081850152818551808452606086019150828701935060005b81811015615af557845183529383019391830191600101615ad9565b5090979650505050505050565b6000608082016bffffffffffffffffffffffff87168352602067ffffffffffffffff87168185015273ffffffffffffffffffffffffffffffffffffffff80871660408601526080606086015282865180855260a087019150838801945060005b81811015615b80578551841683529484019491840191600101615b62565b50909a9950505050505050505050565b604051610120810167ffffffffffffffff81118282101715615bb457615bb4615deb565b60405290565b60008219821115615bcd57615bcd615d2f565b500190565b600067ffffffffffffffff808316818516808303821115615bf557615bf5615d2f565b01949350505050565b60006bffffffffffffffffffffffff808316818516808303821115615bf557615bf5615d2f565b600082615c3457615c34615d5e565b500490565b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615615c7157615c71615d2f565b500290565b600082821015615c8857615c88615d2f565b500390565b60006bffffffffffffffffffffffff83811690831681811015615cb257615cb2615d2f565b039392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415615cec57615cec615d2f565b5060010190565b600067ffffffffffffffff80831681811415615d1157615d11615d2f565b6001019392505050565b600082615d2a57615d2a615d5e565b500690565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a") + ctorArgs, err := evmutils.ABIEncode(`[{"type":"address"}, {"type":"address"}, {"type":"address"}]`, linkAddress, bhsAddress, linkEthFeed) + require.NoError(t, err) + bytecode = append(bytecode, ctorArgs...) + nonce, err := backend.PendingNonceAt(ctx, neil.From) + require.NoError(t, err) + gasPrice, err := backend.SuggestGasPrice(ctx) + require.NoError(t, err) + unsignedTx := gethtypes.NewContractCreation(nonce, big.NewInt(0), 15e6, gasPrice, bytecode) + signedTx, err := neil.Signer(neil.From, unsignedTx) + require.NoError(t, err) + err = backend.SendTransaction(ctx, signedTx) + require.NoError(t, err, "could not deploy old vrf coordinator to simulated blockchain") + backend.Commit() + receipt, err := backend.TransactionReceipt(ctx, signedTx.Hash()) + require.NoError(t, err) + oldRootContractAddress := receipt.ContractAddress + require.NotEqual(t, common.HexToAddress("0x0"), oldRootContractAddress, "old vrf coordinator address equal to zero address, deployment failed") + oldRootContract, err := vrf_coordinator_v2.NewVRFCoordinatorV2(oldRootContractAddress, backend) + require.NoError(t, err, "could not create wrapper object for old vrf coordinator v2") + return oldRootContractAddress, oldRootContract +} + +// Send eth from prefunded account. +// Amount is number of ETH not wei. +func sendEth(t *testing.T, key ethkey.KeyV2, ec *backends.SimulatedBackend, to common.Address, eth int) { + nonce, err := ec.PendingNonceAt(testutils.Context(t), key.Address) + require.NoError(t, err) + tx := gethtypes.NewTx(&gethtypes.DynamicFeeTx{ + ChainID: testutils.SimulatedChainID, + Nonce: nonce, + GasTipCap: big.NewInt(1), + GasFeeCap: assets.GWei(10).ToInt(), // block base fee in sim + Gas: uint64(21_000), + To: &to, + Value: big.NewInt(0).Mul(big.NewInt(int64(eth)), big.NewInt(1e18)), + Data: nil, + }) + signedTx, err := gethtypes.SignTx(tx, gethtypes.NewLondonSigner(testutils.SimulatedChainID), key.ToEcdsaPrivKey()) + require.NoError(t, err) + err = ec.SendTransaction(testutils.Context(t), signedTx) + require.NoError(t, err) + ec.Commit() +} + +func subscribeVRF( + t *testing.T, + author *bind.TransactOpts, + consumerContract vrftesthelpers.VRFConsumerContract, + coordinator v22.CoordinatorV2_X, + backend *backends.SimulatedBackend, + fundingAmount *big.Int, + nativePayment bool, +) (v22.Subscription, *big.Int) { + var err error + if nativePayment { + _, err = consumerContract.CreateSubscriptionAndFundNative(author, fundingAmount) + } else { + _, err = consumerContract.CreateSubscriptionAndFund(author, fundingAmount) + } + require.NoError(t, err) + backend.Commit() + + subID, err := consumerContract.SSubId(nil) + require.NoError(t, err) + + sub, err := coordinator.GetSubscription(nil, subID) + require.NoError(t, err) + + if nativePayment { + require.Equal(t, fundingAmount.String(), sub.NativeBalance().String()) + } else { + require.Equal(t, fundingAmount.String(), sub.Balance().String()) + } + + return sub, subID +} + +func createVRFJobs( + t *testing.T, + fromKeys [][]ethkey.KeyV2, + app *cltest.TestApplication, + coordinator v22.CoordinatorV2_X, + coordinatorAddress common.Address, + batchCoordinatorAddress common.Address, + uni coordinatorV2UniverseCommon, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version, + batchEnabled bool, + gasLanePrices ...*assets.Wei, +) (jobs []job.Job) { + if len(gasLanePrices) != len(fromKeys) { + t.Fatalf("must provide one gas lane price for each set of from addresses. len(gasLanePrices) != len(fromKeys) [%d != %d]", + len(gasLanePrices), len(fromKeys)) + } + // Create separate jobs for each gas lane and register their keys + for i, keys := range fromKeys { + var keyStrs []string + for _, k := range keys { + keyStrs = append(keyStrs, k.Address.String()) + } + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + + jid := uuid.New() + incomingConfs := 2 + var vrfOwnerString string + if vrfOwnerAddress != nil { + vrfOwnerString = vrfOwnerAddress.Hex() + } + + spec := testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + JobID: jid.String(), + Name: fmt.Sprintf("vrf-primary-%d", i), + VRFVersion: vrfVersion, + CoordinatorAddress: coordinatorAddress.Hex(), + BatchCoordinatorAddress: batchCoordinatorAddress.Hex(), + BatchFulfillmentEnabled: batchEnabled, + MinIncomingConfirmations: incomingConfs, + PublicKey: vrfkey.PublicKey.String(), + FromAddresses: keyStrs, + BackoffInitialDelay: 10 * time.Millisecond, + BackoffMaxDelay: time.Second, + V2: true, + GasLanePrice: gasLanePrices[i], + VRFOwnerAddress: vrfOwnerString, + EVMChainID: testutils.SimulatedChainID.String(), + }).Toml() + + jb, err := vrfcommon.ValidatedVRFSpec(spec) + require.NoError(t, err) + t.Log(jb.VRFSpec.PublicKey.MustHash(), vrfkey.PublicKey.MustHash()) + err = app.JobSpawner().CreateJob(&jb) + require.NoError(t, err) + registerProvingKeyHelper(t, uni, coordinator, vrfkey, ptr(gasLanePrices[i].ToInt().Uint64())) + jobs = append(jobs, jb) + } + // Wait until all jobs are active and listening for logs + gomega.NewWithT(t).Eventually(func() bool { + jbs := app.JobSpawner().ActiveJobs() + var count int + for _, jb := range jbs { + if jb.Type == job.VRF { + count++ + } + } + return count == len(fromKeys) + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + // Unfortunately the lb needs heads to be able to backfill logs to new subscribers. + // To avoid confirming + // TODO: it could just backfill immediately upon receiving a new subscriber? (though would + // only be useful for tests, probably a more robust way is to have the job spawner accept a signal that a + // job is fully up and running and not add it to the active jobs list before then) + time.Sleep(2 * time.Second) + + return +} + +func requestRandomnessForWrapper( + t *testing.T, + vrfWrapperConsumer vrfv2_wrapper_consumer_example.VRFV2WrapperConsumerExample, + consumerOwner *bind.TransactOpts, + keyHash common.Hash, + subID *big.Int, + numWords uint32, + cbGasLimit uint32, + coordinator v22.CoordinatorV2_X, + uni coordinatorV2UniverseCommon, + wrapperOverhead uint32, +) (*big.Int, uint64) { + minRequestConfirmations := uint16(3) + _, err := vrfWrapperConsumer.MakeRequest( + consumerOwner, + cbGasLimit, + minRequestConfirmations, + numWords, + ) + require.NoError(t, err) + uni.backend.Commit() + + iter, err := coordinator.FilterRandomWordsRequested(nil, nil, []*big.Int{subID}, nil) + require.NoError(t, err, "could not filter RandomWordsRequested events") + + var events []v22.RandomWordsRequested + for iter.Next() { + events = append(events, iter.Event()) + } + + wrapperIter, err := vrfWrapperConsumer.FilterWrapperRequestMade(nil, nil) + require.NoError(t, err, "could not filter WrapperRequestMade events") + + wrapperConsumerEvents := []*vrfv2_wrapper_consumer_example.VRFV2WrapperConsumerExampleWrapperRequestMade{} + for wrapperIter.Next() { + wrapperConsumerEvents = append(wrapperConsumerEvents, wrapperIter.Event) + } + + event := events[len(events)-1] + eventKeyHash := event.KeyHash() + wrapperConsumerEvent := wrapperConsumerEvents[len(wrapperConsumerEvents)-1] + require.Equal(t, event.RequestID(), wrapperConsumerEvent.RequestId, "request ID in consumer log does not match request ID in coordinator log") + require.Equal(t, keyHash.Bytes(), eventKeyHash[:], "key hash of event (%s) and of request not equal (%s)", hex.EncodeToString(eventKeyHash[:]), keyHash.String()) + require.Equal(t, cbGasLimit+(cbGasLimit/63+1)+wrapperOverhead, event.CallbackGasLimit(), "callback gas limit of event and of request not equal") + require.Equal(t, minRequestConfirmations, event.MinimumRequestConfirmations(), "min request confirmations of event and of request not equal") + require.Equal(t, numWords, event.NumWords(), "num words of event and of request not equal") + + return event.RequestID(), event.Raw().BlockNumber +} + +// requestRandomness requests randomness from the given vrf consumer contract +// and asserts that the request ID logged by the RandomWordsRequested event +// matches the request ID that is returned and set by the consumer contract. +// The request ID and request block number are then returned to the caller. +func requestRandomnessAndAssertRandomWordsRequestedEvent( + t *testing.T, + vrfConsumerHandle vrftesthelpers.VRFConsumerContract, + consumerOwner *bind.TransactOpts, + keyHash common.Hash, + subID *big.Int, + numWords uint32, + cbGasLimit uint32, + coordinator v22.CoordinatorV2_X, + backend *backends.SimulatedBackend, + nativePayment bool, +) (requestID *big.Int, requestBlockNumber uint64) { + minRequestConfirmations := uint16(2) + _, err := vrfConsumerHandle.RequestRandomness( + consumerOwner, + keyHash, + subID, + minRequestConfirmations, + cbGasLimit, + numWords, + nativePayment, + ) + require.NoError(t, err) + backend.Commit() + + iter, err := coordinator.FilterRandomWordsRequested(nil, nil, []*big.Int{subID}, nil) + require.NoError(t, err, "could not filter RandomWordsRequested events") + + var events []v22.RandomWordsRequested + for iter.Next() { + events = append(events, iter.Event()) + } + + requestID, err = vrfConsumerHandle.SRequestId(nil) + require.NoError(t, err) + + event := events[len(events)-1] + eventKeyHash := event.KeyHash() + require.Equal(t, event.RequestID(), requestID, "request ID in contract does not match request ID in log") + require.Equal(t, keyHash.Bytes(), eventKeyHash[:], "key hash of event (%s) and of request not equal (%s)", hex.EncodeToString(eventKeyHash[:]), keyHash.String()) + require.Equal(t, cbGasLimit, event.CallbackGasLimit(), "callback gas limit of event and of request not equal") + require.Equal(t, minRequestConfirmations, event.MinimumRequestConfirmations(), "min request confirmations of event and of request not equal") + require.Equal(t, numWords, event.NumWords(), "num words of event and of request not equal") + require.Equal(t, nativePayment, event.NativePayment()) + + return requestID, event.Raw().BlockNumber +} + +// subscribeAndAssertSubscriptionCreatedEvent subscribes the given consumer contract +// to VRF and funds the subscription with the given fundingJuels amount. It returns the +// subscription ID of the resulting subscription. +func subscribeAndAssertSubscriptionCreatedEvent( + t *testing.T, + vrfConsumerHandle vrftesthelpers.VRFConsumerContract, + consumerOwner *bind.TransactOpts, + consumerContractAddress common.Address, + fundingAmount *big.Int, + coordinator v22.CoordinatorV2_X, + backend *backends.SimulatedBackend, + nativePayment bool, +) *big.Int { + // Create a subscription and fund with PLI. + _, subID := subscribeVRF(t, consumerOwner, vrfConsumerHandle, coordinator, backend, fundingAmount, nativePayment) + + // Assert the subscription event in the coordinator contract. + iter, err := coordinator.FilterSubscriptionCreated(nil, []*big.Int{subID}) + require.NoError(t, err) + found := false + for iter.Next() { + if iter.Event().Owner() != consumerContractAddress { + require.FailNowf(t, "SubscriptionCreated event contains wrong owner address", "expected: %+v, actual: %+v", consumerContractAddress, iter.Event().Owner()) + } else { + found = true + } + } + require.True(t, found, "could not find SubscriptionCreated event for subID %d", subID) + + return subID +} + +func assertRandomWordsFulfilled( + t *testing.T, + requestID *big.Int, + expectedSuccess bool, + coordinator v22.CoordinatorV2_X, + nativePayment bool, +) (rwfe v22.RandomWordsFulfilled) { + // Check many times in case there are delays processing the event + // this could happen occasionally and cause flaky tests. + numChecks := 3 + found := false + for i := 0; i < numChecks; i++ { + filter, err := coordinator.FilterRandomWordsFulfilled(nil, []*big.Int{requestID}, nil) + require.NoError(t, err) + for filter.Next() { + require.Equal(t, expectedSuccess, filter.Event().Success(), "fulfillment event success not correct, expected: %+v, actual: %+v", expectedSuccess, filter.Event().Success()) + require.Equal(t, requestID, filter.Event().RequestID()) + found = true + rwfe = filter.Event() + } + + if found { + break + } + + // Wait a bit and try again. + time.Sleep(time.Second) + } + require.True(t, found, "RandomWordsFulfilled event not found") + return +} + +func assertNumRandomWords( + t *testing.T, + contract vrftesthelpers.VRFConsumerContract, + numWords uint32, +) { + var err error + for i := uint32(0); i < numWords; i++ { + _, err = contract.SRandomWords(nil, big.NewInt(int64(i))) + require.NoError(t, err) + } +} + +func mine(t *testing.T, requestID, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version, chainId *big.Int) bool { + cfg := pgtest.NewQConfig(false) + txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + var metaField string + if vrfVersion == vrfcommon.V2Plus { + metaField = "GlobalSubId" + } else if vrfVersion == vrfcommon.V2 { + metaField = "SubId" + } else { + t.Errorf("unsupported vrf version %s", vrfVersion) + } + + return gomega.NewWithT(t).Eventually(func() bool { + backend.Commit() + txes, err := txstore.FindTxesByMetaFieldAndStates(testutils.Context(t), metaField, subID.String(), []txmgrtypes.TxState{txmgrcommon.TxConfirmed}, chainId) + require.NoError(t, err) + for _, tx := range txes { + meta, err := tx.GetMeta() + require.NoError(t, err) + if meta.RequestID.String() == common.BytesToHash(requestID.Bytes()).String() { + return true + } + } + return false + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func mineBatch(t *testing.T, requestIDs []*big.Int, subID *big.Int, backend *backends.SimulatedBackend, db *sqlx.DB, vrfVersion vrfcommon.Version, chainId *big.Int) bool { + requestIDMap := map[string]bool{} + cfg := pgtest.NewQConfig(false) + txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + var metaField string + if vrfVersion == vrfcommon.V2Plus { + metaField = "GlobalSubId" + } else if vrfVersion == vrfcommon.V2 { + metaField = "SubId" + } else { + t.Errorf("unsupported vrf version %s", vrfVersion) + } + for _, requestID := range requestIDs { + requestIDMap[common.BytesToHash(requestID.Bytes()).String()] = false + } + return gomega.NewWithT(t).Eventually(func() bool { + backend.Commit() + txes, err := txstore.FindTxesByMetaFieldAndStates(testutils.Context(t), metaField, subID.String(), []txmgrtypes.TxState{txmgrcommon.TxConfirmed}, chainId) + require.NoError(t, err) + for _, tx := range txes { + meta, err := tx.GetMeta() + require.NoError(t, err) + for _, requestID := range meta.RequestIDs { + if _, ok := requestIDMap[requestID.String()]; ok { + requestIDMap[requestID.String()] = true + } + } + } + foundAll := true + for _, found := range requestIDMap { + foundAll = foundAll && found + } + t.Log("requestIDMap:", requestIDMap) + return foundAll + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func mineForceFulfilled(t *testing.T, requestID *big.Int, subID uint64, forceFulfilledCount int64, uni coordinatorV2Universe, db *sqlx.DB) bool { + return gomega.NewWithT(t).Eventually(func() bool { + uni.backend.Commit() + var txs []txmgr.DbEthTx + err := db.Select(&txs, ` + SELECT * FROM evm.txes + WHERE evm.txes.state = 'confirmed' + AND evm.txes.meta->>'RequestID' = $1 + AND CAST(evm.txes.meta->>'SubId' AS NUMERIC) = $2 ORDER BY created_at DESC + `, common.BytesToHash(requestID.Bytes()).String(), subID) + require.NoError(t, err) + t.Log("num txs", len(txs)) + return len(txs) == int(forceFulfilledCount) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) +} + +func TestVRFV2Integration_SingleConsumer_ForceFulfillment(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerForcedFulfillment( + t, + ownerKey, + uni, + uni.oldRootContract, + uni.oldRootContractAddress, + uni.oldBatchCoordinatorContractAddress, + false, // batchEnabled + vrfcommon.V2, + ) +} + +func TestVRFV2Integration_SingleConsumer_ForceFulfillment_BatchEnabled(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerForcedFulfillment( + t, + ownerKey, + uni, + uni.oldRootContract, + uni.oldRootContractAddress, + uni.oldBatchCoordinatorContractAddress, + true, // batchEnabled + vrfcommon.V2, + ) +} + +func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + 5, // number of requests to send + false, // don't send big callback + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_HappyPath_BatchFulfillment_BigGasCallback(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerHappyPathBatchFulfillment( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + 5, // number of requests to send + true, // send big callback + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_HappyPath(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerHappyPath( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + func(t *testing.T, coordinator v22.CoordinatorV2_X, rwfe v22.RandomWordsFulfilled, expectedSubID *big.Int) { + require.PanicsWithValue(t, "VRF V2 RandomWordsFulfilled does not implement SubID", func() { + rwfe.SubID() + }) + }, + ) +} + +func TestVRFV2Integration_SingleConsumer_EOA_Request(t *testing.T) { + t.Skip("questionable value of this test") + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testEoa( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + false, + uni.batchBHSContractAddress, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + ) +} + +func TestVRFV2Integration_SingleConsumer_EOA_Request_Batching_Enabled(t *testing.T) { + t.Skip("questionable value of this test") + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testEoa( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + true, + uni.batchBHSContractAddress, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + ) +} + +func testEoa( + t *testing.T, + ownerKey ethkey.KeyV2, + uni coordinatorV2UniverseCommon, + batchingEnabled bool, + batchCoordinatorAddress common.Address, + vrfOwnerAddress *common.Address, + vrfVersion vrfcommon.Version) { + gasLimit := int64(2_500_000) + + finalityDepth := uint32(50) + + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr(uint32(gasLimit)) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.EVM[0].FinalityDepth = ptr(finalityDepth) + }) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + consumer := uni.vrfConsumers[0] + + // Createa a new subscription. + subID := setupAndFundSubscriptionAndConsumer( + t, + uni, + uni.rootContract, + uni.rootContractAddress, + consumer, + consumer.From, + vrfVersion, + assets.Ether(1).ToInt(), + ) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + batchCoordinatorAddress, + uni, + vrfOwnerAddress, + vrfVersion, + batchingEnabled, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Make a randomness request with the EOA. This request is impossible to fulfill. + numWords := uint32(1) + minRequestConfirmations := uint16(2) + { + _, err := uni.rootContract.RequestRandomWords(consumer, keyHash, subID, minRequestConfirmations, uint32(200_000), numWords, false) + require.NoError(t, err) + } + uni.backend.Commit() + + // Ensure request is not fulfilled. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 0 + }, 5*time.Second, time.Second).Should(gomega.BeTrue()) + + // Create query to fetch the application's log broadcasts. + var broadcastsBeforeFinality []evmlogger.LogBroadcast + var broadcastsAfterFinality []evmlogger.LogBroadcast + query := `SELECT block_hash, consumed, log_index, job_id FROM log_broadcasts` + q := pg.NewQ(app.GetSqlxDB(), app.Logger, app.Config.Database()) + + // Execute the query. + require.NoError(t, q.Select(&broadcastsBeforeFinality, query)) + + // Ensure there is only one log broadcast (our EOA request), and that + // it hasn't been marked as consumed yet. + require.Equal(t, 1, len(broadcastsBeforeFinality)) + require.Equal(t, false, broadcastsBeforeFinality[0].Consumed) + + // Create new blocks until the finality depth has elapsed. + for i := 0; i < int(finalityDepth); i++ { + uni.backend.Commit() + } + + // Ensure the request is still not fulfilled. + gomega.NewGomegaWithT(t).Consistently(func() bool { + uni.backend.Commit() + runs, err := app.PipelineORM().GetAllRuns() + require.NoError(t, err) + t.Log("runs", len(runs)) + return len(runs) == 0 + }, 5*time.Second, time.Second).Should(gomega.BeTrue()) + + // Execute the query for log broadcasts again after finality depth has elapsed. + require.NoError(t, q.Select(&broadcastsAfterFinality, query)) + + // Ensure that there is still only one log broadcast (our EOA request), but that + // it has been marked as "consumed," such that it won't be retried. + require.Equal(t, 1, len(broadcastsAfterFinality)) + require.Equal(t, true, broadcastsAfterFinality[0].Consumed) + + t.Log("Done!") +} + +func TestVRFV2Integration_SingleConsumer_EIP150_HappyPath(t *testing.T) { + t.Skip("TODO: VRF-617") + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerEIP150( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_EIP150_Revert(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerEIP150Revert( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func deployWrapper(t *testing.T, uni coordinatorV2UniverseCommon, wrapperOverhead uint32, coordinatorOverhead uint32, keyHash common.Hash) ( + wrapper *vrfv2_wrapper.VRFV2Wrapper, + wrapperAddress common.Address, + wrapperConsumer *vrfv2_wrapper_consumer_example.VRFV2WrapperConsumerExample, + wrapperConsumerAddress common.Address, +) { + wrapperAddress, _, wrapper, err := vrfv2_wrapper.DeployVRFV2Wrapper(uni.neil, uni.backend, uni.linkContractAddress, uni.linkEthFeedAddress, uni.rootContractAddress) + require.NoError(t, err) + uni.backend.Commit() + + _, err = wrapper.SetConfig(uni.neil, wrapperOverhead, coordinatorOverhead, 0, keyHash, 10) + require.NoError(t, err) + uni.backend.Commit() + + wrapperConsumerAddress, _, wrapperConsumer, err = vrfv2_wrapper_consumer_example.DeployVRFV2WrapperConsumerExample(uni.neil, uni.backend, uni.linkContractAddress, wrapperAddress) + require.NoError(t, err) + uni.backend.Commit() + + return +} + +func TestVRFV2Integration_SingleConsumer_Wrapper(t *testing.T) { + t.Parallel() + wrapperOverhead := uint32(30_000) + coordinatorOverhead := uint32(90_000) + + callBackGasLimit := int64(100_000) // base callback gas. + key1 := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](3_500_000) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + }) + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + uni.coordinatorV2UniverseCommon, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + wrapper, _, consumer, consumerAddress := deployWrapper(t, uni.coordinatorV2UniverseCommon, wrapperOverhead, coordinatorOverhead, keyHash) + + // Fetch Subscription ID for Wrapper. + wrapperSubID, err := wrapper.SUBSCRIPTIONID(nil) + require.NoError(t, err) + + // Fund Subscription. + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, wrapperSubID) + require.NoError(t, err) + _, err = uni.linkContract.TransferAndCall(uni.sergey, uni.rootContractAddress, assets.Ether(100).ToInt(), b) + require.NoError(t, err) + uni.backend.Commit() + + // Fund Consumer Contract. + _, err = uni.linkContract.Transfer(uni.sergey, consumerAddress, assets.Ether(100).ToInt()) + require.NoError(t, err) + uni.backend.Commit() + + // Make the first randomness request. + numWords := uint32(1) + requestID, _ := requestRandomnessForWrapper(t, *consumer, uni.neil, keyHash, new(big.Int).SetUint64(wrapperSubID), numWords, uint32(callBackGasLimit), uni.rootContract, uni.coordinatorV2UniverseCommon, wrapperOverhead) + + // Wait for simulation to pass. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err2 := app.PipelineORM().GetAllRuns() + require.NoError(t, err2) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false) + + t.Log("Done!") +} + +func TestVRFV2Integration_Wrapper_High_Gas(t *testing.T) { + t.Parallel() + wrapperOverhead := uint32(30_000) + coordinatorOverhead := uint32(90_000) + + key1 := cltest.MustGenerateRandomKey(t) + callBackGasLimit := int64(2_000_000) // base callback gas. + gasLanePriceWei := assets.GWei(10) + config, db := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, assets.GWei(10), toml.KeySpecific{ + // Gas lane. + Key: ptr(key1.EIP55Address), + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](3_500_000) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + }) + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, ownerKey, key1) + + // Fund gas lane. + sendEth(t, ownerKey, uni.backend, key1.Address, 10) + require.NoError(t, app.Start(testutils.Context(t))) + + // Create VRF job. + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key1}}, + app, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + uni.coordinatorV2UniverseCommon, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + wrapper, _, consumer, consumerAddress := deployWrapper(t, uni.coordinatorV2UniverseCommon, wrapperOverhead, coordinatorOverhead, keyHash) + + // Fetch Subscription ID for Wrapper. + wrapperSubID, err := wrapper.SUBSCRIPTIONID(nil) + require.NoError(t, err) + + // Fund Subscription. + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, wrapperSubID) + require.NoError(t, err) + _, err = uni.linkContract.TransferAndCall(uni.sergey, uni.rootContractAddress, assets.Ether(100).ToInt(), b) + require.NoError(t, err) + uni.backend.Commit() + + // Fund Consumer Contract. + _, err = uni.linkContract.Transfer(uni.sergey, consumerAddress, assets.Ether(100).ToInt()) + require.NoError(t, err) + uni.backend.Commit() + + // Make the first randomness request. + numWords := uint32(1) + requestID, _ := requestRandomnessForWrapper(t, *consumer, uni.neil, keyHash, new(big.Int).SetUint64(wrapperSubID), numWords, uint32(callBackGasLimit), uni.rootContract, uni.coordinatorV2UniverseCommon, wrapperOverhead) + + // Wait for simulation to pass. + gomega.NewGomegaWithT(t).Eventually(func() bool { + uni.backend.Commit() + runs, err2 := app.PipelineORM().GetAllRuns() + require.NoError(t, err2) + t.Log("runs", len(runs)) + return len(runs) == 1 + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue()) + + // Mine the fulfillment that was queued. + mine(t, requestID, new(big.Int).SetUint64(wrapperSubID), uni.backend, db, vrfcommon.V2, testutils.SimulatedChainID) + + // Assert correct state of RandomWordsFulfilled event. + assertRandomWordsFulfilled(t, requestID, true, uni.rootContract, false) + + t.Log("Done!") +} + +func TestVRFV2Integration_SingleConsumer_NeedsBlockhashStore(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 2) + testMultipleConsumersNeedBHS( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_NeedsTrustedBlockhashStore(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 2, true) + testMultipleConsumersNeedTrustedBHS( + t, + ownerKey, + uni, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + vrfcommon.V2Plus, + false, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_NeedsTrustedBlockhashStore_AfterDelay(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2PlusUniverse(t, ownerKey, 2, true) + testMultipleConsumersNeedTrustedBHS( + t, + ownerKey, + uni, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + vrfcommon.V2Plus, + false, + true, + ) +} + +func TestVRFV2Integration_SingleConsumer_BlockHeaderFeeder(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testBlockHeaderFeeder( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers, + uni.consumerContracts, + uni.consumerContractAddresses, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_NeedsTopUp(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerNeedsTopUp( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + ptr(uni.vrfOwnerAddress), + assets.Ether(1).ToInt(), // initial funding of 1 PLI + assets.Ether(100).ToInt(), // top up of 100 PLI + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_BigGasCallback_Sandwich(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerBigGasCallbackSandwich( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_MultipleGasLanes(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + testSingleConsumerMultipleGasLanes( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_SingleConsumer_AlwaysRevertingCallback_StillFulfilled(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 0) + testSingleConsumerAlwaysRevertingCallbackStillFulfilled( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_ConsumerProxy_HappyPath(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 0) + testConsumerProxyHappyPath( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + false, + ) +} + +func TestVRFV2Integration_ConsumerProxy_CoordinatorZeroAddress(t *testing.T) { + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 0) + testConsumerProxyCoordinatorZeroAddress(t, uni.coordinatorV2UniverseCommon) +} + +func simulatedOverrides(t *testing.T, defaultGasPrice *assets.Wei, ks ...toml.KeySpecific) func(*plugin.Config, *plugin.Secrets) { + return func(c *plugin.Config, s *plugin.Secrets) { + require.Zero(t, testutils.SimulatedChainID.Cmp(c.EVM[0].ChainID.ToInt())) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + if defaultGasPrice != nil { + c.EVM[0].GasEstimator.PriceDefault = defaultGasPrice + } + c.EVM[0].GasEstimator.LimitDefault = ptr[uint32](3_500_000) + + c.Feature.LogPoller = ptr(true) + c.EVM[0].LogPollInterval = commonconfig.MustNewDuration(1 * time.Second) + + c.EVM[0].HeadTracker.MaxBufferSize = ptr[uint32](100) + c.EVM[0].HeadTracker.SamplingInterval = commonconfig.MustNewDuration(0) // Head sampling disabled + + c.EVM[0].Transactions.ResendAfterThreshold = commonconfig.MustNewDuration(0) + c.EVM[0].Transactions.ReaperThreshold = commonconfig.MustNewDuration(100 * time.Millisecond) + + c.EVM[0].FinalityDepth = ptr[uint32](15) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](1) + c.EVM[0].MinContractPayment = commonassets.NewLinkFromJuels(100) + c.EVM[0].KeySpecific = ks + } +} + +func registerProvingKeyHelper(t *testing.T, uni coordinatorV2UniverseCommon, coordinator v22.CoordinatorV2_X, vrfkey vrfkey.KeyV2, gasLaneMaxGas *uint64) { + // Register a proving key associated with the VRF job. + p, err := vrfkey.PublicKey.Point() + require.NoError(t, err) + if uni.rootContract.Version() == vrfcommon.V2Plus { + if gasLaneMaxGas == nil { + t.Error("gasLaneMaxGas must be non-nil for V2+") + } + _, err = coordinator.RegisterProvingKey( + uni.neil, nil, pair(secp256k1.Coordinates(p)), gasLaneMaxGas) + } else { + if gasLaneMaxGas != nil { + t.Log("gasLaneMaxGas is ignored for V2") + } + _, err = coordinator.RegisterProvingKey( + uni.neil, &uni.nallory.From, pair(secp256k1.Coordinates(p)), nil) + } + require.NoError(t, err) + uni.backend.Commit() +} + +func TestExternalOwnerConsumerExample(t *testing.T) { + owner := testutils.MustNewSimTransactor(t) + random := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{ + owner.From: {Balance: assets.Ether(10).ToInt()}, + random.From: {Balance: assets.Ether(10).ToInt()}, + } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + owner, backend) + require.NoError(t, err) + backend.Commit() + coordinatorAddress, _, coordinator, err := + vrf_coordinator_v2.DeployVRFCoordinatorV2( + owner, backend, linkAddress, common.Address{}, common.Address{}) + require.NoError(t, err) + _, err = coordinator.SetConfig(owner, uint16(1), uint32(10000), 1, 1, big.NewInt(10), vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: 0, + FulfillmentFlatFeeLinkPPMTier2: 0, + FulfillmentFlatFeeLinkPPMTier3: 0, + FulfillmentFlatFeeLinkPPMTier4: 0, + FulfillmentFlatFeeLinkPPMTier5: 0, + ReqsForTier2: big.NewInt(0), + ReqsForTier3: big.NewInt(0), + ReqsForTier4: big.NewInt(0), + ReqsForTier5: big.NewInt(0), + }) + require.NoError(t, err) + backend.Commit() + consumerAddress, _, consumer, err := vrf_external_sub_owner_example.DeployVRFExternalSubOwnerExample(owner, backend, coordinatorAddress, linkAddress) + require.NoError(t, err) + backend.Commit() + _, err = linkContract.Transfer(owner, consumerAddress, assets.Ether(2).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(2).ToInt()}) + + // Create sub, fund it and assign consumer + _, err = coordinator.CreateSubscription(owner) + require.NoError(t, err) + backend.Commit() + b, err := evmutils.ABIEncode(`[{"type":"uint64"}]`, uint64(1)) + require.NoError(t, err) + _, err = linkContract.TransferAndCall(owner, coordinatorAddress, big.NewInt(0), b) + require.NoError(t, err) + _, err = coordinator.AddConsumer(owner, 1, consumerAddress) + require.NoError(t, err) + _, err = consumer.RequestRandomWords(random, 1, 1, 1, 1, [32]byte{}) + require.Error(t, err) + _, err = consumer.RequestRandomWords(owner, 1, 1, 1, 1, [32]byte{}) + require.NoError(t, err) + + // Reassign ownership, check that only new owner can request + _, err = consumer.TransferOwnership(owner, random.From) + require.NoError(t, err) + _, err = consumer.RequestRandomWords(owner, 1, 1, 1, 1, [32]byte{}) + require.Error(t, err) + _, err = consumer.RequestRandomWords(random, 1, 1, 1, 1, [32]byte{}) + require.NoError(t, err) +} + +func TestSimpleConsumerExample(t *testing.T) { + owner := testutils.MustNewSimTransactor(t) + random := testutils.MustNewSimTransactor(t) + genesisData := core.GenesisAlloc{ + owner.From: {Balance: assets.Ether(10).ToInt()}, + } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + owner, backend) + require.NoError(t, err) + backend.Commit() + coordinatorAddress, _, _, err := + vrf_coordinator_v2.DeployVRFCoordinatorV2( + owner, backend, linkAddress, common.Address{}, common.Address{}) + require.NoError(t, err) + backend.Commit() + consumerAddress, _, consumer, err := vrf_single_consumer_example.DeployVRFSingleConsumerExample(owner, backend, coordinatorAddress, linkAddress, 1, 1, 1, [32]byte{}) + require.NoError(t, err) + backend.Commit() + _, err = linkContract.Transfer(owner, consumerAddress, assets.Ether(2).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(2).ToInt()}) + _, err = consumer.TopUpSubscription(owner, assets.Ether(1).ToInt()) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(999_999_998).ToInt(), assets.Ether(1).ToInt(), assets.Ether(1).ToInt()}) + // Non-owner cannot withdraw + _, err = consumer.Withdraw(random, assets.Ether(1).ToInt(), owner.From) + require.Error(t, err) + _, err = consumer.Withdraw(owner, assets.Ether(1).ToInt(), owner.From) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(999_999_999).ToInt(), assets.Ether(0).ToInt(), assets.Ether(1).ToInt()}) + _, err = consumer.Unsubscribe(owner, owner.From) + require.NoError(t, err) + backend.Commit() + AssertLinkBalances(t, linkContract, []common.Address{owner.From, consumerAddress, coordinatorAddress}, []*big.Int{assets.Ether(1_000_000_000).ToInt(), assets.Ether(0).ToInt(), assets.Ether(0).ToInt()}) +} + +func TestIntegrationVRFV2(t *testing.T) { + t.Parallel() + // Reconfigure the sim chain with a default gas price of 1 gwei, + // max gas limit of 2M and a key specific max 10 gwei price. + // Keep the prices low so we can operate with small link balance subscriptions. + gasPrice := assets.GWei(1) + key := cltest.MustGenerateRandomKey(t) + gasLanePriceWei := assets.GWei(10) + config, _ := heavyweight.FullTestDBV2(t, func(c *plugin.Config, s *plugin.Secrets) { + simulatedOverrides(t, gasPrice, toml.KeySpecific{ + Key: &key.EIP55Address, + GasEstimator: toml.KeySpecificGasEstimator{PriceMax: gasLanePriceWei}, + })(c, s) + c.EVM[0].MinIncomingConfirmations = ptr[uint32](2) + }) + uni := newVRFCoordinatorV2Universe(t, key, 1) + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, config, uni.backend, key) + keys, err := app.KeyStore.Eth().EnabledKeysForChain(testutils.SimulatedChainID) + require.NoError(t, err) + require.Zero(t, key.Cmp(keys[0])) + + require.NoError(t, app.Start(testutils.Context(t))) + var chain legacyevm.Chain + chain, err = app.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + listenerV2 := v22.MakeTestListenerV2(chain) + + jbs := createVRFJobs( + t, + [][]ethkey.KeyV2{{key}}, + app, + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + uni.coordinatorV2UniverseCommon, + ptr(uni.vrfOwnerAddress), + vrfcommon.V2, + false, + gasLanePriceWei) + keyHash := jbs[0].VRFSpec.PublicKey.MustHash() + + // Create and fund a subscription. + // We should see that our subscription has 1 pli. + AssertLinkBalances(t, uni.linkContract, []common.Address{ + carolContractAddress, + uni.rootContractAddress, + }, []*big.Int{ + assets.Ether(500).ToInt(), // 500 pli + big.NewInt(0), // 0 link + }) + subFunding := decimal.RequireFromString("1000000000000000000") + _, err = carolContract.CreateSubscriptionAndFund(carol, + subFunding.BigInt()) + require.NoError(t, err) + uni.backend.Commit() + AssertLinkBalances(t, uni.linkContract, []common.Address{ + carolContractAddress, + uni.rootContractAddress, + uni.nallory.From, // Oracle's own address should have nothing + }, []*big.Int{ + assets.Ether(499).ToInt(), + assets.Ether(1).ToInt(), + big.NewInt(0), + }) + subId, err := carolContract.SSubId(nil) + require.NoError(t, err) + subStart, err := uni.rootContract.GetSubscription(nil, subId) + require.NoError(t, err) + + // Make a request for random words. + // By requesting 500k callback with a configured eth gas limit default of 500k, + // we ensure that the job is indeed adjusting the gaslimit to suit the users request. + gasRequested := 500_000 + nw := 10 + requestedIncomingConfs := 3 + _, err = carolContract.RequestRandomness(carol, keyHash, subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + require.NoError(t, err) + + // Oracle tries to withdraw before its fulfilled should fail + _, err = uni.rootContract.OracleWithdraw(uni.nallory, uni.nallory.From, big.NewInt(1000)) + require.Error(t, err) + + for i := 0; i < requestedIncomingConfs; i++ { + uni.backend.Commit() + } + + // We expect the request to be serviced + // by the node. + var runs []pipeline.Run + gomega.NewWithT(t).Eventually(func() bool { + runs, err = app.PipelineORM().GetAllRuns() + require.NoError(t, err) + // It is possible that we send the test request + // before the job spawner has started the vrf services, which is fine + // the lb will backfill the logs. However, we need to + // keep blocks coming in for the lb to send the backfilled logs. + uni.backend.Commit() + return len(runs) == 1 && runs[0].State == pipeline.RunStatusCompleted + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue()) + + // Wait for the request to be fulfilled on-chain. + var rf []v22.RandomWordsFulfilled + gomega.NewWithT(t).Eventually(func() bool { + rfIterator, err2 := uni.rootContract.FilterRandomWordsFulfilled(nil, nil, nil) + require.NoError(t, err2, "failed to logs") + uni.backend.Commit() + for rfIterator.Next() { + rf = append(rf, rfIterator.Event()) + } + return len(rf) == 1 + }, testutils.WaitTimeout(t), 500*time.Millisecond).Should(gomega.BeTrue()) + assert.True(t, rf[0].Success(), "expected callback to succeed") + fulfillReceipt, err := uni.backend.TransactionReceipt(testutils.Context(t), rf[0].Raw().TxHash) + require.NoError(t, err) + + // Assert all the random words received by the consumer are different and non-zero. + seen := make(map[string]struct{}) + var rw *big.Int + for i := 0; i < nw; i++ { + rw, err = carolContract.SRandomWords(nil, big.NewInt(int64(i))) + require.NoError(t, err) + _, ok := seen[rw.String()] + assert.False(t, ok) + seen[rw.String()] = struct{}{} + } + + // We should have exactly as much gas as we requested + // after accounting for function look up code, argument decoding etc. + // which should be fixed in this test. + ga, err := carolContract.SGasAvailable(nil) + require.NoError(t, err) + gaDecoding := big.NewInt(0).Add(ga, big.NewInt(3701)) + assert.Equal(t, 0, gaDecoding.Cmp(big.NewInt(int64(gasRequested))), "expected gas available %v to exceed gas requested %v", gaDecoding, gasRequested) + t.Log("gas available", ga.String()) + + // Assert that we were only charged for how much gas we actually used. + // We should be charged for the verification + our callbacks execution in link. + subEnd, err := uni.rootContract.GetSubscription(nil, subId) + require.NoError(t, err) + var ( + end = decimal.RequireFromString(subEnd.Balance().String()) + start = decimal.RequireFromString(subStart.Balance().String()) + wei = decimal.RequireFromString("1000000000000000000") + gwei = decimal.RequireFromString("1000000000") + ) + t.Log("end balance", end) + linkWeiCharged := start.Sub(end) + // Remove flat fee of 0.001 to get fee for just gas. + linkCharged := linkWeiCharged.Sub(decimal.RequireFromString("1000000000000000")).Div(wei) + gasPriceD := decimal.NewFromBigInt(gasPrice.ToInt(), 0) + t.Logf("subscription charged %s with gas prices of %s gwei and %s ETH per PLI\n", linkCharged, gasPriceD.Div(gwei), vrftesthelpers.WeiPerUnitLink.Div(wei)) + expected := decimal.RequireFromString(strconv.Itoa(int(fulfillReceipt.GasUsed))).Mul(gasPriceD).Div(vrftesthelpers.WeiPerUnitLink) + t.Logf("expected sub charge gas use %v %v off by %v", fulfillReceipt.GasUsed, expected, expected.Sub(linkCharged)) + // The expected sub charge should be within 200 gas of the actual gas usage. + // wei/link * link / wei/gas = wei / (wei/gas) = gas + gasDiff := linkCharged.Sub(expected).Mul(vrftesthelpers.WeiPerUnitLink).Div(gasPriceD).Abs().IntPart() + t.Log("gasDiff", gasDiff) + assert.Less(t, gasDiff, int64(200)) + + // If the oracle tries to withdraw more than it was paid it should fail. + _, err = uni.rootContract.OracleWithdraw(uni.nallory, uni.nallory.From, linkWeiCharged.Add(decimal.NewFromInt(1)).BigInt()) + require.Error(t, err) + + // Assert the oracle can withdraw its payment. + _, err = uni.rootContract.OracleWithdraw(uni.nallory, uni.nallory.From, linkWeiCharged.BigInt()) + require.NoError(t, err) + uni.backend.Commit() + AssertLinkBalances(t, uni.linkContract, []common.Address{ + carolContractAddress, + uni.rootContractAddress, + uni.nallory.From, // Oracle's own address should have nothing + }, []*big.Int{ + assets.Ether(499).ToInt(), + subFunding.Sub(linkWeiCharged).BigInt(), + linkWeiCharged.BigInt(), + }) + + // We should see the response count present + require.NoError(t, err) + var counts map[string]uint64 + counts, err = listenerV2.GetStartingResponseCountsV2(testutils.Context(t)) + require.NoError(t, err) + t.Log(counts, rf[0].RequestID().String()) + assert.Equal(t, uint64(1), counts[rf[0].RequestID().String()]) +} + +func TestMaliciousConsumer(t *testing.T) { + t.Parallel() + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, key, 1) + testMaliciousConsumer( + t, + key, + uni.coordinatorV2UniverseCommon, + uni.batchCoordinatorContractAddress, + false, + vrfcommon.V2, + ) +} + +func TestRequestCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, key, 1) + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, uni.rootContract, vrfkey, nil) + t.Run("non-proxied consumer", func(tt *testing.T) { + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + _, err = carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(tt, err) + uni.backend.Commit() + subId, err := carolContract.SSubId(nil) + require.NoError(tt, err) + // Ensure even with large number of consumers its still cheap + var addrs []common.Address + for i := 0; i < 99; i++ { + addrs = append(addrs, testutils.NewAddress()) + } + _, err = carolContract.UpdateSubscription(carol, addrs) + require.NoError(tt, err) + estimate := estimateGas(tt, uni.backend, common.Address{}, + carolContractAddress, uni.consumerABI, + "requestRandomness", vrfkey.PublicKey.MustHash(), subId.Uint64(), uint16(2), uint32(10000), uint32(1)) + tt.Log("gas estimate of non-proxied testRequestRandomness:", estimate) + // V2 should be at least (87000-134000)/134000 = 35% cheaper + // Note that a second call drops further to 68998 gas, but would also drop in V1. + assert.Less(tt, estimate, uint64(90_000), + "requestRandomness tx gas cost more than expected") + }) + + t.Run("proxied consumer", func(tt *testing.T) { + consumerOwner := uni.neil + consumerContract := uni.consumerProxyContract + consumerContractAddress := uni.consumerProxyContractAddress + + // Create a subscription and fund with 5 PLI. + tx, err := consumerContract.CreateSubscriptionAndFund(consumerOwner, assets.Ether(5).ToInt()) + require.NoError(tt, err) + uni.backend.Commit() + r, err := uni.backend.TransactionReceipt(testutils.Context(t), tx.Hash()) + require.NoError(tt, err) + t.Log("gas used by proxied CreateSubscriptionAndFund:", r.GasUsed) + + subId, err := consumerContract.SSubId(nil) + require.NoError(tt, err) + _, err = uni.rootContract.GetSubscription(nil, subId) + require.NoError(tt, err) + + theAbi := evmtypes.MustGetABI(vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExampleMetaData.ABI) + estimate := estimateGas(tt, uni.backend, common.Address{}, + consumerContractAddress, &theAbi, + "requestRandomness", vrfkey.PublicKey.MustHash(), subId.Uint64(), uint16(2), uint32(10000), uint32(1)) + tt.Log("gas estimate of proxied requestRandomness:", estimate) + // There is some gas overhead of the delegatecall that is made by the proxy + // to the logic contract. See https://www.evm.codes/#f4?fork=grayGlacier for a detailed + // breakdown of the gas costs of a delegatecall. + assert.Less(tt, estimate, uint64(96_000), + "proxied testRequestRandomness tx gas cost more than expected") + }) +} + +func TestMaxConsumersCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, key, 1) + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + _, err := carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(t, err) + uni.backend.Commit() + subId, err := carolContract.SSubId(nil) + require.NoError(t, err) + var addrs []common.Address + for i := 0; i < 98; i++ { + addrs = append(addrs, testutils.NewAddress()) + } + _, err = carolContract.UpdateSubscription(carol, addrs) + // Ensure even with max number of consumers its still reasonable gas costs. + require.NoError(t, err) + estimate := estimateGas(t, uni.backend, carolContractAddress, + uni.rootContractAddress, uni.coordinatorABI, + "removeConsumer", subId.Uint64(), carolContractAddress) + t.Log(estimate) + assert.Less(t, estimate, uint64(310000)) + estimate = estimateGas(t, uni.backend, carolContractAddress, + uni.rootContractAddress, uni.coordinatorABI, + "addConsumer", subId.Uint64(), testutils.NewAddress()) + t.Log(estimate) + assert.Less(t, estimate, uint64(100000)) +} + +func TestFulfillmentCost(t *testing.T) { + key := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, key, 1) + + cfg := configtest.NewGeneralConfigSimulated(t, nil) + app := cltest.NewApplicationWithConfigV2AndKeyOnSimulatedBlockchain(t, cfg, uni.backend, key) + require.NoError(t, app.Start(testutils.Context(t))) + + vrfkey, err := app.GetKeyStore().VRF().Create() + require.NoError(t, err) + registerProvingKeyHelper(t, uni.coordinatorV2UniverseCommon, uni.rootContract, vrfkey, nil) + + var ( + nonProxiedConsumerGasEstimate uint64 + proxiedConsumerGasEstimate uint64 + ) + t.Run("non-proxied consumer", func(tt *testing.T) { + carol := uni.vrfConsumers[0] + carolContract := uni.consumerContracts[0] + carolContractAddress := uni.consumerContractAddresses[0] + + _, err := carolContract.CreateSubscriptionAndFund(carol, + big.NewInt(1000000000000000000)) // 0.1 PLI + require.NoError(tt, err) + uni.backend.Commit() + subId, err := carolContract.SSubId(nil) + require.NoError(tt, err) + + gasRequested := 50_000 + nw := 1 + requestedIncomingConfs := 3 + _, err = carolContract.RequestRandomness(carol, vrfkey.PublicKey.MustHash(), subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + require.NoError(t, err) + for i := 0; i < requestedIncomingConfs; i++ { + uni.backend.Commit() + } + + requestLog := FindLatestRandomnessRequestedLog(tt, uni.rootContract, vrfkey.PublicKey.MustHash(), nil) + s, err := proof.BigToSeed(requestLog.PreSeed()) + require.NoError(t, err) + proof, rc, err := proof.GenerateProofResponseV2(app.GetKeyStore().VRF(), vrfkey.ID(), proof.PreSeedDataV2{ + PreSeed: s, + BlockHash: requestLog.Raw().BlockHash, + BlockNum: requestLog.Raw().BlockNumber, + SubId: subId.Uint64(), + CallbackGasLimit: uint32(gasRequested), + NumWords: uint32(nw), + Sender: carolContractAddress, + }) + require.NoError(tt, err) + nonProxiedConsumerGasEstimate = estimateGas(tt, uni.backend, common.Address{}, + uni.rootContractAddress, uni.coordinatorABI, + "fulfillRandomWords", proof, rc) + t.Log("non-proxied consumer fulfillment gas estimate:", nonProxiedConsumerGasEstimate) + // Establish very rough bounds on fulfillment cost + assert.Greater(tt, nonProxiedConsumerGasEstimate, uint64(120_000)) + assert.Less(tt, nonProxiedConsumerGasEstimate, uint64(500_000)) + }) + + t.Run("proxied consumer", func(tt *testing.T) { + consumerOwner := uni.neil + consumerContract := uni.consumerProxyContract + consumerContractAddress := uni.consumerProxyContractAddress + + _, err := consumerContract.CreateSubscriptionAndFund(consumerOwner, assets.Ether(5).ToInt()) + require.NoError(t, err) + uni.backend.Commit() + subId, err := consumerContract.SSubId(nil) + require.NoError(t, err) + gasRequested := 50_000 + nw := 1 + requestedIncomingConfs := 3 + _, err = consumerContract.RequestRandomness(consumerOwner, vrfkey.PublicKey.MustHash(), subId, uint16(requestedIncomingConfs), uint32(gasRequested), uint32(nw), false) + require.NoError(t, err) + for i := 0; i < requestedIncomingConfs; i++ { + uni.backend.Commit() + } + + requestLog := FindLatestRandomnessRequestedLog(t, uni.rootContract, vrfkey.PublicKey.MustHash(), nil) + require.Equal(tt, subId, requestLog.SubID()) + s, err := proof.BigToSeed(requestLog.PreSeed()) + require.NoError(t, err) + proof, rc, err := proof.GenerateProofResponseV2(app.GetKeyStore().VRF(), vrfkey.ID(), proof.PreSeedDataV2{ + PreSeed: s, + BlockHash: requestLog.Raw().BlockHash, + BlockNum: requestLog.Raw().BlockNumber, + SubId: subId.Uint64(), + CallbackGasLimit: uint32(gasRequested), + NumWords: uint32(nw), + Sender: consumerContractAddress, + }) + require.NoError(t, err) + proxiedConsumerGasEstimate = estimateGas(t, uni.backend, common.Address{}, + uni.rootContractAddress, uni.coordinatorABI, + "fulfillRandomWords", proof, rc) + t.Log("proxied consumer fulfillment gas estimate", proxiedConsumerGasEstimate) + // Establish very rough bounds on fulfillment cost + assert.Greater(t, proxiedConsumerGasEstimate, uint64(120_000)) + assert.Less(t, proxiedConsumerGasEstimate, uint64(500_000)) + }) +} + +func TestStartingCountsV1(t *testing.T) { + cfg, db := heavyweight.FullTestDBNoFixturesV2(t, nil) + + lggr := logger.TestLogger(t) + qCfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), qCfg) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg.Database()) + ec := evmclimocks.NewClient(t) + ec.On("ConfiguredChainID").Return(testutils.SimulatedChainID) + ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(2), nil).Maybe() + txm := makeTestTxm(t, txStore, ks, ec) + relayExtenders := evmtest.NewChainRelayExtenders(t, evmtest.TestChainOpts{KeyStore: ks.Eth(), Client: ec, DB: db, GeneralConfig: cfg, TxManager: txm}) + legacyChains := evmrelay.NewLegacyChainsFromRelayerExtenders(relayExtenders) + chain, err := legacyChains.Get(testutils.SimulatedChainID.String()) + require.NoError(t, err) + listenerV1 := &v1.Listener{ + Chain: chain, + } + listenerV2 := v22.MakeTestListenerV2(chain) + var counts map[[32]byte]uint64 + counts, err = listenerV1.GetStartingResponseCountsV1(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, 0, len(counts)) + err = ks.Unlock(testutils.Password) + require.NoError(t, err) + k, err := ks.Eth().Create(testutils.SimulatedChainID) + require.NoError(t, err) + b := time.Now() + n1, n2, n3, n4 := evmtypes.Nonce(0), evmtypes.Nonce(1), evmtypes.Nonce(2), evmtypes.Nonce(3) + reqID := evmutils.PadByteToHash(0x10) + m1 := txmgr.TxMeta{ + RequestID: &reqID, + } + md1, err := json.Marshal(&m1) + require.NoError(t, err) + md1SQL := sqlutil.JSON(md1) + reqID2 := evmutils.PadByteToHash(0x11) + m2 := txmgr.TxMeta{ + RequestID: &reqID2, + } + md2, err := json.Marshal(&m2) + md2SQL := sqlutil.JSON(md2) + require.NoError(t, err) + chainID := ubig.New(testutils.SimulatedChainID) + confirmedTxes := []txmgr.Tx{ + { + Sequence: &n1, + FromAddress: k.Address, + Error: null.String{}, + BroadcastAt: &b, + InitialBroadcastAt: &b, + CreatedAt: b, + State: txmgrcommon.TxConfirmed, + Meta: &sqlutil.JSON{}, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }, + { + Sequence: &n2, + FromAddress: k.Address, + Error: null.String{}, + BroadcastAt: &b, + InitialBroadcastAt: &b, + CreatedAt: b, + State: txmgrcommon.TxConfirmed, + Meta: &md1SQL, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }, + { + Sequence: &n3, + FromAddress: k.Address, + Error: null.String{}, + BroadcastAt: &b, + InitialBroadcastAt: &b, + CreatedAt: b, + State: txmgrcommon.TxConfirmed, + Meta: &md2SQL, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }, + { + Sequence: &n4, + FromAddress: k.Address, + Error: null.String{}, + BroadcastAt: &b, + InitialBroadcastAt: &b, + CreatedAt: b, + State: txmgrcommon.TxConfirmed, + Meta: &md2SQL, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }, + } + // add unconfirmed txes + unconfirmedTxes := []txmgr.Tx{} + for i := int64(4); i < 6; i++ { + reqID3 := evmutils.PadByteToHash(0x12) + md, err2 := json.Marshal(&txmgr.TxMeta{ + RequestID: &reqID3, + }) + require.NoError(t, err2) + mdSQL := sqlutil.JSON(md) + newNonce := evmtypes.Nonce(i + 1) + unconfirmedTxes = append(unconfirmedTxes, txmgr.Tx{ + Sequence: &newNonce, + FromAddress: k.Address, + Error: null.String{}, + CreatedAt: b, + State: txmgrcommon.TxUnconfirmed, + BroadcastAt: &b, + InitialBroadcastAt: &b, + Meta: &mdSQL, + EncodedPayload: []byte{}, + ChainID: chainID.ToInt(), + }) + } + txList := append(confirmedTxes, unconfirmedTxes...) + for i := range txList { + err = txStore.InsertTx(&txList[i]) + require.NoError(t, err) + } + + // add tx attempt for confirmed + broadcastBlock := int64(1) + var txAttempts []txmgr.TxAttempt + for i := range confirmedTxes { + txAttempts = append(txAttempts, txmgr.TxAttempt{ + TxID: int64(i + 1), + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(100)}, + SignedRawTx: []byte(`blah`), + Hash: evmutils.NewHash(), + BroadcastBeforeBlockNum: &broadcastBlock, + State: txmgrtypes.TxAttemptBroadcast, + CreatedAt: time.Now(), + ChainSpecificFeeLimit: uint32(100), + }) + } + // add tx attempt for unconfirmed + for i := range unconfirmedTxes { + txAttempts = append(txAttempts, txmgr.TxAttempt{ + TxID: int64(i + 1 + len(confirmedTxes)), + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(100)}, + SignedRawTx: []byte(`blah`), + Hash: evmutils.NewHash(), + State: txmgrtypes.TxAttemptInProgress, + CreatedAt: time.Now(), + ChainSpecificFeeLimit: uint32(100), + }) + } + for _, txAttempt := range txAttempts { + t.Log("tx attempt eth tx id: ", txAttempt.TxID) + } + for i := range txAttempts { + err = txStore.InsertTxAttempt(&txAttempts[i]) + require.NoError(t, err) + } + + // add evm.receipts + receipts := []evmtypes.Receipt{} + for i := 0; i < 4; i++ { + receipts = append(receipts, evmtypes.Receipt{ + BlockHash: evmutils.NewHash(), + TxHash: txAttempts[i].Hash, + BlockNumber: big.NewInt(broadcastBlock), + TransactionIndex: 1, + }) + } + for i := range receipts { + _, err = txStore.InsertReceipt(&receipts[i]) + require.NoError(t, err) + } + + counts, err = listenerV1.GetStartingResponseCountsV1(testutils.Context(t)) + require.NoError(t, err) + assert.Equal(t, 3, len(counts)) + assert.Equal(t, uint64(1), counts[evmutils.PadByteToHash(0x10)]) + assert.Equal(t, uint64(2), counts[evmutils.PadByteToHash(0x11)]) + assert.Equal(t, uint64(2), counts[evmutils.PadByteToHash(0x12)]) + + countsV2, err := listenerV2.GetStartingResponseCountsV2(testutils.Context(t)) + require.NoError(t, err) + t.Log(countsV2) + assert.Equal(t, 3, len(countsV2)) + assert.Equal(t, uint64(1), countsV2[big.NewInt(0x10).String()]) + assert.Equal(t, uint64(2), countsV2[big.NewInt(0x11).String()]) + assert.Equal(t, uint64(2), countsV2[big.NewInt(0x12).String()]) +} + +func TestVRFV2Integration_ReplayOldRequestsOnStartUp(t *testing.T) { + t.Parallel() + ownerKey := cltest.MustGenerateRandomKey(t) + uni := newVRFCoordinatorV2Universe(t, ownerKey, 1) + + testReplayOldRequestsOnStartUp( + t, + ownerKey, + uni.coordinatorV2UniverseCommon, + uni.vrfConsumers[0], + uni.consumerContracts[0], + uni.consumerContractAddresses[0], + uni.rootContract, + uni.rootContractAddress, + uni.batchCoordinatorContractAddress, + nil, + vrfcommon.V2, + false, + ) +} + +func FindLatestRandomnessRequestedLog(t *testing.T, + coordContract v22.CoordinatorV2_X, + keyHash [32]byte, + requestID *big.Int) v22.RandomWordsRequested { + var rf []v22.RandomWordsRequested + gomega.NewWithT(t).Eventually(func() bool { + rfIterator, err2 := coordContract.FilterRandomWordsRequested(nil, [][32]byte{keyHash}, nil, []common.Address{}) + require.NoError(t, err2, "failed to logs") + for rfIterator.Next() { + if requestID == nil || requestID.Cmp(rfIterator.Event().RequestID()) == 0 { + rf = append(rf, rfIterator.Event()) + } + } + return len(rf) >= 1 + }, testutils.WaitTimeout(t), 500*time.Millisecond).Should(gomega.BeTrue()) + latest := len(rf) - 1 + return rf[latest] +} + +func AssertLinkBalance(t *testing.T, linkContract *link_token_interface.LinkToken, address common.Address, balance *big.Int) { + b, err := linkContract.BalanceOf(nil, address) + require.NoError(t, err) + assert.Equal(t, balance.String(), b.String(), "invalid balance for %v", address) +} + +func AssertNativeBalance(t *testing.T, backend *backends.SimulatedBackend, address common.Address, balance *big.Int) { + b, err := backend.BalanceAt(testutils.Context(t), address, nil) + require.NoError(t, err) + assert.Equal(t, balance.String(), b.String(), "invalid balance for %v", address) +} + +func AssertLinkBalances(t *testing.T, linkContract *link_token_interface.LinkToken, addresses []common.Address, balances []*big.Int) { + require.Equal(t, len(addresses), len(balances)) + for i, a := range addresses { + AssertLinkBalance(t, linkContract, a, balances[i]) + } +} + +func ptr[T any](t T) *T { return &t } + +func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } + +// estimateGas returns the estimated gas cost of running the given method on the +// contract at address to, on the given backend, with the given args, and given +// that the transaction is sent from the from address. +func estimateGas(t *testing.T, backend *backends.SimulatedBackend, + from, to common.Address, abi *abi.ABI, method string, args ...interface{}, +) uint64 { + rawData, err := abi.Pack(method, args...) + require.NoError(t, err, "failed to construct raw %s transaction with args %s", + method, args) + callMsg := ethereum.CallMsg{From: from, To: &to, Data: rawData} + estimate, err := backend.EstimateGas(testutils.Context(t), callMsg) + require.NoError(t, err, "failed to estimate gas from %s call with args %s", + method, args) + return estimate +} diff --git a/core/services/vrf/v2/listener_v2.go b/core/services/vrf/v2/listener_v2.go new file mode 100644 index 00000000..a4268058 --- /dev/null +++ b/core/services/vrf/v2/listener_v2.go @@ -0,0 +1,277 @@ +package v2 + +import ( + "context" + "encoding/hex" + "math/big" + "strings" + "sync" + "time" + + "github.com/avast/retry-go/v4" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/pkg/errors" + "github.com/theodesp/go-heaps/pairing" + + "github.com/goplugin/plugin-common/pkg/services" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/aggregator_v3_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_vrf_coordinator_v2plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +var ( + _ job.ServiceCtx = &listenerV2{} + coordinatorV2ABI = evmtypes.MustGetABI(vrf_coordinator_v2.VRFCoordinatorV2ABI) + coordinatorV2PlusABI = evmtypes.MustGetABI(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalABI) + batchCoordinatorV2ABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2.BatchVRFCoordinatorV2ABI) + batchCoordinatorV2PlusABI = evmtypes.MustGetABI(batch_vrf_coordinator_v2plus.BatchVRFCoordinatorV2PlusABI) + vrfOwnerABI = evmtypes.MustGetABI(vrf_owner.VRFOwnerMetaData.ABI) + // These are the transaction states used when summing up already reserved subscription funds that are about to be used in in-flight transactions + reserveEthLinkQueryStates = []txmgrtypes.TxState{txmgrcommon.TxUnconfirmed, txmgrcommon.TxUnstarted, txmgrcommon.TxInProgress} +) + +const ( + // GasAfterPaymentCalculation is the gas used after computing the payment + GasAfterPaymentCalculation = 21000 + // base cost of the transaction + 100 + 5000 + // warm subscription balance read and update. See https://eips.ethereum.org/EIPS/eip-2929 + 2*2100 + 20000 - // cold read oracle address and oracle balance and first time oracle balance update, note first time will be 20k, but 5k subsequently + 4800 + // request delete refund (refunds happen after execution), note pre-london fork was 15k. See https://eips.ethereum.org/EIPS/eip-3529 + 6685 // Positive static costs of argument encoding etc. note that it varies by +/- x*12 for every x bytes of non-zero data in the proof. + + // BatchFulfillmentIterationGasCost is the cost of a single iteration of the batch coordinator's + // loop. This is used to determine the gas allowance for a batch fulfillment call. + BatchFulfillmentIterationGasCost = 52_000 + + // backoffFactor is the factor by which to increase the delay each time a request fails. + backoffFactor = 1.3 + + txMetaFieldSubId = "SubId" + txMetaGlobalSubId = "GlobalSubId" +) + +func New( + cfg vrfcommon.Config, + feeCfg vrfcommon.FeeConfig, + l logger.Logger, + chain legacyevm.Chain, + chainID *big.Int, + q pg.Q, + coordinator CoordinatorV2_X, + batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface, + vrfOwner vrf_owner.VRFOwnerInterface, + aggregator *aggregator_v3_interface.AggregatorV3Interface, + pipelineRunner pipeline.Runner, + gethks keystore.Eth, + job job.Job, + reqAdded func(), + inflightCache vrfcommon.InflightCache, + fulfillmentDeduper *vrfcommon.LogDeduper, +) job.ServiceCtx { + return &listenerV2{ + cfg: cfg, + feeCfg: feeCfg, + l: logger.Sugared(l), + chain: chain, + chainID: chainID, + coordinator: coordinator, + batchCoordinator: batchCoordinator, + vrfOwner: vrfOwner, + pipelineRunner: pipelineRunner, + job: job, + q: q, + gethks: gethks, + chStop: make(chan struct{}), + reqAdded: reqAdded, + blockNumberToReqID: pairing.New(), + latestHeadMu: sync.RWMutex{}, + wg: &sync.WaitGroup{}, + aggregator: aggregator, + inflightCache: inflightCache, + fulfillmentLogDeduper: fulfillmentDeduper, + } +} + +type listenerV2 struct { + services.StateMachine + cfg vrfcommon.Config + feeCfg vrfcommon.FeeConfig + l logger.SugaredLogger + chain legacyevm.Chain + chainID *big.Int + + coordinator CoordinatorV2_X + batchCoordinator batch_vrf_coordinator_v2.BatchVRFCoordinatorV2Interface + vrfOwner vrf_owner.VRFOwnerInterface + + pipelineRunner pipeline.Runner + job job.Job + q pg.Q + gethks keystore.Eth + chStop services.StopChan + + reqAdded func() // A simple debug helper + + // Data structures for reorg attack protection + // We want a map so we can do an O(1) count update every fulfillment log we get. + respCount map[string]uint64 + // This auxiliary heap is used when we need to purge the + // respCount map - we repeatedly want to remove the minimum log. + // You could use a sorted list if the completed logs arrive in order, but they may not. + blockNumberToReqID *pairing.PairHeap + + // head tracking data structures + latestHeadMu sync.RWMutex + latestHeadNumber uint64 + + // Wait group to wait on all goroutines to shut down. + wg *sync.WaitGroup + + // aggregator client to get link/eth feed prices from chain. Can be nil for VRF V2 plus + aggregator aggregator_v3_interface.AggregatorV3InterfaceInterface + + // fulfillmentLogDeduper prevents re-processing fulfillment logs. + // fulfillment logs are used to increment counts in the respCount map + // and to update the blockNumberToReqID heap. + fulfillmentLogDeduper *vrfcommon.LogDeduper + + // inflightCache is a cache of in-flight requests, used to prevent + // re-processing of requests that are in-flight or already fulfilled. + inflightCache vrfcommon.InflightCache +} + +func (lsn *listenerV2) HealthReport() map[string]error { + return map[string]error{lsn.Name(): lsn.Healthy()} +} + +func (lsn *listenerV2) Name() string { return lsn.l.Name() } + +// Start starts listenerV2. +func (lsn *listenerV2) Start(ctx context.Context) error { + return lsn.StartOnce("VRFListenerV2", func() error { + // Check gas limit configuration + confCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + conf, err := lsn.coordinator.GetConfig(&bind.CallOpts{Context: confCtx}) + gasLimit := lsn.feeCfg.LimitDefault() + vrfLimit := lsn.feeCfg.LimitJobType().VRF() + if vrfLimit != nil { + gasLimit = *vrfLimit + } + if err != nil { + lsn.l.Criticalw("Error getting coordinator config for gas limit check, starting anyway.", "err", err) + } else if conf.MaxGasLimit()+(GasProofVerification*2) > gasLimit { + lsn.l.Criticalw("Node gas limit setting may not be high enough to fulfill all requests; it should be increased. Starting anyway.", + "currentGasLimit", gasLimit, + "neededGasLimit", conf.MaxGasLimit()+(GasProofVerification*2), + "callbackGasLimit", conf.MaxGasLimit(), + "proofVerificationGas", GasProofVerification) + } + + spec := job.LoadDefaultVRFPollPeriod(*lsn.job.VRFSpec) + + var respCount map[string]uint64 + respCount, err = lsn.GetStartingResponseCountsV2(ctx) + if err != nil { + return err + } + lsn.respCount = respCount + + if lsn.job.VRFSpec.CustomRevertsPipelineEnabled && lsn.vrfOwner != nil && lsn.job.VRFSpec.VRFOwnerAddress != nil { + // Start reverted txns handler in background + lsn.wg.Add(1) + go func() { + defer lsn.wg.Done() + lsn.runRevertedTxnsHandler(spec.PollPeriod) + }() + } + + // Log listener gathers request logs and processes them + lsn.wg.Add(1) + go func() { + defer lsn.wg.Done() + lsn.runLogListener(spec.PollPeriod, spec.MinIncomingConfirmations) + }() + + return nil + }) +} + +func (lsn *listenerV2) GetStartingResponseCountsV2(ctx context.Context) (respCount map[string]uint64, err error) { + respCounts := map[string]uint64{} + var latestBlockNum *big.Int + // Retry client call for LatestBlockHeight if fails + // Want to avoid failing startup due to potential faulty RPC call + err = retry.Do(func() error { + latestBlockNum, err = lsn.chain.Client().LatestBlockHeight(ctx) + return err + }, retry.Attempts(10), retry.Delay(500*time.Millisecond)) + if err != nil { + return nil, err + } + if latestBlockNum == nil { + return nil, errors.New("LatestBlockHeight return nil block num") + } + confirmedBlockNum := latestBlockNum.Int64() - int64(lsn.chain.Config().EVM().FinalityDepth()) + // Only check as far back as the evm finality depth for completed transactions. + var counts []vrfcommon.RespCountEntry + counts, err = vrfcommon.GetRespCounts(ctx, lsn.chain.TxManager(), lsn.chainID, confirmedBlockNum) + if err != nil { + // Continue with an empty map, do not block job on this. + lsn.l.Errorw("Unable to read previous confirmed fulfillments", "err", err) + return respCounts, nil + } + + for _, c := range counts { + // Remove the quotes from the json + req := strings.Replace(c.RequestID, `"`, ``, 2) + // Remove the 0x prefix + b, err := hex.DecodeString(req[2:]) + if err != nil { + lsn.l.Errorw("Unable to read fulfillment", "err", err, "reqID", c.RequestID) + continue + } + bi := new(big.Int).SetBytes(b) + respCounts[bi.String()] = uint64(c.Count) + } + return respCounts, nil +} + +func (lsn *listenerV2) setLatestHead(head logpoller.LogPollerBlock) { + lsn.latestHeadMu.Lock() + defer lsn.latestHeadMu.Unlock() + num := uint64(head.BlockNumber) + if num > lsn.latestHeadNumber { + lsn.latestHeadNumber = num + } +} + +func (lsn *listenerV2) getLatestHead() uint64 { + lsn.latestHeadMu.RLock() + defer lsn.latestHeadMu.RUnlock() + return lsn.latestHeadNumber +} + +// Close complies with job.Service +func (lsn *listenerV2) Close() error { + return lsn.StopOnce("VRFListenerV2", func() error { + close(lsn.chStop) + // wait on the request handler, log listener + lsn.wg.Wait() + return nil + }) +} diff --git a/core/services/vrf/v2/listener_v2_helpers.go b/core/services/vrf/v2/listener_v2_helpers.go new file mode 100644 index 00000000..247aa385 --- /dev/null +++ b/core/services/vrf/v2/listener_v2_helpers.go @@ -0,0 +1,103 @@ +package v2 + +import ( + "math/big" + "strings" + "time" + + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +func uniqueReqs(reqs []pendingRequest) int { + s := map[string]struct{}{} + for _, r := range reqs { + s[r.req.RequestID().String()] = struct{}{} + } + return len(s) +} + +// GasProofVerification is an upper limit on the gas used for verifying the VRF proof on-chain. +// It can be used to estimate the amount of PLI or native needed to fulfill a request. +const GasProofVerification uint32 = 200_000 + +// EstimateFeeJuels estimates the amount of link needed to fulfill a request +// given the callback gas limit, the gas price, and the wei per unit link. +// An error is returned if the wei per unit link provided is zero. +func EstimateFeeJuels(callbackGasLimit uint32, maxGasPriceWei, weiPerUnitLink *big.Int) (*big.Int, error) { + if weiPerUnitLink.Cmp(big.NewInt(0)) == 0 { + return nil, errors.New("wei per unit link is zero") + } + maxGasUsed := big.NewInt(int64(callbackGasLimit + GasProofVerification)) + costWei := maxGasUsed.Mul(maxGasUsed, maxGasPriceWei) + // Multiply by 1e18 first so that we don't lose a ton of digits due to truncation when we divide + // by weiPerUnitLink + numerator := costWei.Mul(costWei, big.NewInt(1e18)) + costJuels := numerator.Quo(numerator, weiPerUnitLink) + return costJuels, nil +} + +// EstimateFeeWei estimates the amount of wei needed to fulfill a request +func EstimateFeeWei(callbackGasLimit uint32, maxGasPriceWei *big.Int) (*big.Int, error) { + maxGasUsed := big.NewInt(int64(callbackGasLimit + GasProofVerification)) + costWei := maxGasUsed.Mul(maxGasUsed, maxGasPriceWei) + return costWei, nil +} + +// observeRequestSimDuration records the time between the given requests simulations or +// the time until it's first simulation, whichever is applicable. +// Cases: +// 1. Never simulated: in this case, we want to observe the time until simulated +// on the utcTimestamp field of the pending request. +// 2. Simulated before: in this case, lastTry will be set to a non-zero time value, +// in which case we'd want to use that as a relative point from when we last tried +// the request. +func observeRequestSimDuration(jobName string, extJobID uuid.UUID, vrfVersion vrfcommon.Version, pendingReqs []pendingRequest) { + now := time.Now().UTC() + for _, request := range pendingReqs { + // First time around lastTry will be zero because the request has not been + // simulated yet. It will be updated every time the request is simulated (in the event + // the request is simulated multiple times, due to it being underfunded). + if request.lastTry.IsZero() { + vrfcommon.MetricTimeUntilInitialSim. + WithLabelValues(jobName, extJobID.String(), string(vrfVersion)). + Observe(float64(now.Sub(request.utcTimestamp))) + } else { + vrfcommon.MetricTimeBetweenSims. + WithLabelValues(jobName, extJobID.String(), string(vrfVersion)). + Observe(float64(now.Sub(request.lastTry))) + } + } +} + +func ptr[T any](t T) *T { return &t } + +func isProofVerificationError(errMsg string) bool { + // See VRF.sol for all these messages + // NOTE: it's unclear which of these errors are impossible and which + // may actually happen, so including them all to be safe. + errMessages := []string{ + "invalid x-ordinate", + "invalid y-ordinate", + "zero scalar", + "invZ must be inverse of z", + "bad witness", + "points in sum must be distinct", + "First mul check failed", + "Second mul check failed", + "public key is not on curve", + "gamma is not on curve", + "cGammaWitness is not on curve", + "sHashWitness is not on curve", + "addr(c*pk+s*g)!=_uWitness", + "invalid proof", + } + for _, msg := range errMessages { + if strings.Contains(errMsg, msg) { + return true + } + } + return false +} diff --git a/core/services/vrf/v2/listener_v2_helpers_test.go b/core/services/vrf/v2/listener_v2_helpers_test.go new file mode 100644 index 00000000..7e2e4b67 --- /dev/null +++ b/core/services/vrf/v2/listener_v2_helpers_test.go @@ -0,0 +1,53 @@ +package v2_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + v2 "github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +func TestListener_EstimateFeeJuels(t *testing.T) { + callbackGasLimit := uint32(150_000) + maxGasPriceGwei := assets.GWei(30).ToInt() + weiPerUnitLink := big.NewInt(5898160000000000) + actual, err := v2.EstimateFeeJuels(callbackGasLimit, maxGasPriceGwei, weiPerUnitLink) + expected := big.NewInt(1780216203019246680) + require.True(t, actual.Cmp(expected) == 0, "expected:", expected.String(), "actual:", actual.String()) + require.NoError(t, err) + + weiPerUnitLink = big.NewInt(5898161234554321) + actual, err = v2.EstimateFeeJuels(callbackGasLimit, maxGasPriceGwei, weiPerUnitLink) + expected = big.NewInt(1780215830399116719) + require.True(t, actual.Cmp(expected) == 0, "expected:", expected.String(), "actual:", actual.String()) + require.NoError(t, err) + + actual, err = v2.EstimateFeeJuels(callbackGasLimit, maxGasPriceGwei, big.NewInt(0)) + require.Nil(t, actual) + require.Error(t, err) +} + +func Test_TxListDeduper(t *testing.T) { + tx1 := &txmgr.Tx{ + ID: 1, + Value: *big.NewInt(0), + ChainID: big.NewInt(0), + } + tx2 := &txmgr.Tx{ + ID: 1, + Value: *big.NewInt(1), + ChainID: big.NewInt(0), + } + tx3 := &txmgr.Tx{ + ID: 2, + Value: *big.NewInt(1), + ChainID: big.NewInt(0), + } + txList := vrfcommon.DedupeTxList([]*txmgr.Tx{tx1, tx2, tx3}) + require.Equal(t, len(txList), 2) +} diff --git a/core/services/vrf/v2/listener_v2_log_listener.go b/core/services/vrf/v2/listener_v2_log_listener.go new file mode 100644 index 00000000..50f01a7c --- /dev/null +++ b/core/services/vrf/v2/listener_v2_log_listener.go @@ -0,0 +1,451 @@ +package v2 + +import ( + "bytes" + "context" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +func (lsn *listenerV2) runLogListener( + pollPeriod time.Duration, + minConfs uint32, +) { + lsn.l.Infow("Listening for run requests via log poller", + "minConfs", minConfs) + ticker := time.NewTicker(pollPeriod) + defer ticker.Stop() + var ( + lastProcessedBlock int64 + startingUp = true + ) + ctx, cancel := lsn.chStop.NewCtx() + defer cancel() + for { + select { + case <-lsn.chStop: + return + case <-ticker.C: + start := time.Now() + lsn.l.Debugw("log listener loop") + // Filter registration is idempotent, so we can just call it every time + // and retry on errors using the ticker. + err := lsn.chain.LogPoller().RegisterFilter(logpoller.Filter{ + Name: logpoller.FilterName( + "VRFListener", + "version", lsn.coordinator.Version(), + "keyhash", lsn.job.VRFSpec.PublicKey.MustHash(), + "coordinatorAddress", lsn.coordinator.Address()), + EventSigs: evmtypes.HashArray{ + lsn.coordinator.RandomWordsFulfilledTopic(), + lsn.coordinator.RandomWordsRequestedTopic(), + }, + Addresses: evmtypes.AddressArray{ + lsn.coordinator.Address(), + }, + }) + if err != nil { + lsn.l.Errorw("error registering filter in log poller, retrying", + "err", err, + "elapsed", time.Since(start)) + continue + } + + // on startup we want to initialize the last processed block + if startingUp { + lsn.l.Debugw("initializing last processed block on startup") + lastProcessedBlock, err = lsn.initializeLastProcessedBlock(ctx) + if err != nil { + lsn.l.Errorw("error initializing last processed block, retrying", + "err", err, + "elapsed", time.Since(start)) + continue + } + startingUp = false + lsn.l.Debugw("initialized last processed block", "lastProcessedBlock", lastProcessedBlock) + } + + pending, err := lsn.pollLogs(ctx, minConfs, lastProcessedBlock) + if err != nil { + lsn.l.Errorw("error polling vrf logs, retrying", + "err", err, + "elapsed", time.Since(start)) + continue + } + + // process pending requests and insert any fulfillments into the inflight cache + lsn.processPendingVRFRequests(ctx, pending) + + lastProcessedBlock, err = lsn.updateLastProcessedBlock(ctx, lastProcessedBlock) + if err != nil { + lsn.l.Errorw("error updating last processed block, continuing anyway", "err", err) + } else { + lsn.l.Debugw("updated last processed block", "lastProcessedBlock", lastProcessedBlock) + } + lsn.l.Debugw("log listener loop done", "elapsed", time.Since(start)) + } + } +} + +// initializeLastProcessedBlock returns the earliest block number that we need to +// process requests for. This is the block number of the earliest unfulfilled request +// or the latest finalized block, if there are no unfulfilled requests. +// TODO: add tests +func (lsn *listenerV2) initializeLastProcessedBlock(ctx context.Context) (lastProcessedBlock int64, err error) { + lp := lsn.chain.LogPoller() + start := time.Now() + + // will retry on error in the runLogListener loop + latestBlock, err := lp.LatestBlock() + if err != nil { + return 0, fmt.Errorf("LogPoller.LatestBlock(): %w", err) + } + fromTimestamp := time.Now().UTC().Add(-lsn.job.VRFSpec.RequestTimeout) + ll := lsn.l.With( + "latestFinalizedBlock", latestBlock.FinalizedBlockNumber, + "latestBlock", latestBlock.BlockNumber, + "fromTimestamp", fromTimestamp) + ll.Debugw("Initializing last processed block") + defer func() { + ll.Debugw("Done initializing last processed block", "elapsed", time.Since(start)) + }() + + numBlocksToReplay := numReplayBlocks(lsn.job.VRFSpec.RequestTimeout, lsn.chain.ID()) + ll.Debugw("running replay on log poller") + err = lp.Replay(ctx, mathutil.Max(latestBlock.FinalizedBlockNumber-numBlocksToReplay, 1)) + if err != nil { + return 0, fmt.Errorf("LogPoller.Replay: %w", err) + } + + // get randomness requested logs with the appropriate keyhash + // keyhash is specified in topic1 + requests, err := lp.IndexedLogsCreatedAfter( + lsn.coordinator.RandomWordsRequestedTopic(), // event sig + lsn.coordinator.Address(), // address + 1, // topic index + []common.Hash{lsn.job.VRFSpec.PublicKey.MustHash()}, // topic values + fromTimestamp, // from time + logpoller.Finalized, // confs + ) + if err != nil { + return 0, fmt.Errorf("LogPoller.LogsCreatedAfter RandomWordsRequested logs: %w", err) + } + + // fulfillments don't have keyhash indexed, we'll have to get all of them + // TODO: can we instead write a single query that joins on request id's somehow? + fulfillments, err := lp.LogsCreatedAfter( + lsn.coordinator.RandomWordsFulfilledTopic(), // event sig + lsn.coordinator.Address(), // address + fromTimestamp, // from time + logpoller.Finalized, // confs + ) + if err != nil { + return 0, fmt.Errorf("LogPoller.LogsCreatedAfter RandomWordsFulfilled logs: %w", err) + } + + unfulfilled, _, _ := lsn.getUnfulfilled(append(requests, fulfillments...), ll) + // find request block of earliest unfulfilled request + // even if this block is > latest finalized, we use latest finalized as earliest unprocessed + // because re-orgs can occur on any unfinalized block. + var earliestUnfulfilledBlock = latestBlock.FinalizedBlockNumber + for _, req := range unfulfilled { + if req.Raw().BlockNumber < uint64(earliestUnfulfilledBlock) { + earliestUnfulfilledBlock = int64(req.Raw().BlockNumber) + } + } + + return earliestUnfulfilledBlock, nil +} + +func (lsn *listenerV2) updateLastProcessedBlock(ctx context.Context, currLastProcessedBlock int64) (lastProcessedBlock int64, err error) { + lp := lsn.chain.LogPoller() + start := time.Now() + + latestBlock, err := lp.LatestBlock(pg.WithParentCtx(ctx)) + if err != nil { + lsn.l.Errorw("error getting latest block", "err", err) + return 0, fmt.Errorf("LogPoller.LatestBlock(): %w", err) + } + ll := lsn.l.With( + "currLastProcessedBlock", currLastProcessedBlock, + "latestBlock", latestBlock.BlockNumber, + "latestFinalizedBlock", latestBlock.FinalizedBlockNumber) + ll.Debugw("updating last processed block") + defer func() { + ll.Debugw("done updating last processed block", "elapsed", time.Since(start)) + }() + + logs, err := lp.LogsWithSigs( + currLastProcessedBlock, + latestBlock.FinalizedBlockNumber, + []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, + lsn.coordinator.Address(), + pg.WithParentCtx(ctx), + ) + if err != nil { + return currLastProcessedBlock, fmt.Errorf("LogPoller.LogsWithSigs: %w", err) + } + + unfulfilled, unfulfilledLP, _ := lsn.getUnfulfilled(logs, ll) + // find request block of earliest unfulfilled request + // even if this block is > latest finalized, we use latest finalized as earliest unprocessed + // because re-orgs can occur on any unfinalized block. + var earliestUnprocessedRequestBlock = latestBlock.FinalizedBlockNumber + for i, req := range unfulfilled { + // need to drop requests that have timed out otherwise the earliestUnprocessedRequestBlock + // will be unnecessarily far back and our queries will be slower. + if unfulfilledLP[i].CreatedAt.Before(time.Now().UTC().Add(-lsn.job.VRFSpec.RequestTimeout)) { + // request timed out, don't process + lsn.l.Debugw("request timed out, skipping", + "reqID", req.RequestID(), + ) + continue + } + if req.Raw().BlockNumber < uint64(earliestUnprocessedRequestBlock) { + earliestUnprocessedRequestBlock = int64(req.Raw().BlockNumber) + } + } + + return earliestUnprocessedRequestBlock, nil +} + +// pollLogs uses the log poller to poll for the latest VRF logs +func (lsn *listenerV2) pollLogs(ctx context.Context, minConfs uint32, lastProcessedBlock int64) (pending []pendingRequest, err error) { + start := time.Now() + lp := lsn.chain.LogPoller() + + // latest unfinalized block used on purpose to get bleeding edge logs + // we don't really have the luxury to wait for finalization on most chains + // if we want to fulfill on time. + latestBlock, err := lp.LatestBlock() + if err != nil { + return nil, fmt.Errorf("LogPoller.LatestBlock(): %w", err) + } + lsn.setLatestHead(latestBlock) + ll := lsn.l.With( + "lastProcessedBlock", lastProcessedBlock, + "minConfs", minConfs, + "latestBlock", latestBlock.BlockNumber, + "latestFinalizedBlock", latestBlock.FinalizedBlockNumber) + ll.Debugw("polling for logs") + defer func() { + ll.Debugw("done polling for logs", "elapsed", time.Since(start)) + }() + + // We don't specify confs because each request can have a different conf above + // the minimum. So we do all conf handling in getConfirmedAt. + logs, err := lp.LogsWithSigs( + lastProcessedBlock, + latestBlock.BlockNumber, + []common.Hash{lsn.coordinator.RandomWordsFulfilledTopic(), lsn.coordinator.RandomWordsRequestedTopic()}, + lsn.coordinator.Address(), + pg.WithParentCtx(ctx), + ) + if err != nil { + return nil, fmt.Errorf("LogPoller.LogsWithSigs: %w", err) + } + + unfulfilled, unfulfilledLP, fulfilled := lsn.getUnfulfilled(logs, ll) + if len(unfulfilled) > 0 { + ll.Debugw("found unfulfilled logs", "unfulfilled", len(unfulfilled)) + } else { + ll.Debugw("no unfulfilled logs found") + } + + lsn.handleFulfilled(fulfilled) + + return lsn.handleRequested(unfulfilled, unfulfilledLP, minConfs), nil +} + +func (lsn *listenerV2) getUnfulfilled(logs []logpoller.Log, ll logger.Logger) (unfulfilled []RandomWordsRequested, unfulfilledLP []logpoller.Log, fulfilled map[string]RandomWordsFulfilled) { + var ( + requested = make(map[string]RandomWordsRequested) + requestedLP = make(map[string]logpoller.Log) + errs error + expectedKeyHash = lsn.job.VRFSpec.PublicKey.MustHash() + ) + fulfilled = make(map[string]RandomWordsFulfilled) + for _, l := range logs { + if l.EventSig == lsn.coordinator.RandomWordsFulfilledTopic() { + parsed, err2 := lsn.coordinator.ParseRandomWordsFulfilled(l.ToGethLog()) + if err2 != nil { + // should never happen + errs = multierr.Append(errs, err2) + continue + } + fulfilled[parsed.RequestID().String()] = parsed + } else if l.EventSig == lsn.coordinator.RandomWordsRequestedTopic() { + parsed, err2 := lsn.coordinator.ParseRandomWordsRequested(l.ToGethLog()) + if err2 != nil { + // should never happen + errs = multierr.Append(errs, err2) + continue + } + keyHash := parsed.KeyHash() + if !bytes.Equal(keyHash[:], expectedKeyHash[:]) { + // wrong keyhash, can ignore + continue + } + requested[parsed.RequestID().String()] = parsed + requestedLP[parsed.RequestID().String()] = l + } + } + // should never happen, unsure if recoverable + // may be worth a panic + if errs != nil { + ll.Errorw("encountered parse errors", "err", errs) + } + + if len(fulfilled) > 0 || len(requested) > 0 { + ll.Infow("found logs", "fulfilled", len(fulfilled), "requested", len(requested)) + } else { + ll.Debugw("no logs found") + } + + // find unfulfilled requests by comparing requested events with the fulfilled events + for reqID, req := range requested { + if _, isFulfilled := fulfilled[reqID]; !isFulfilled { + unfulfilled = append(unfulfilled, req) + unfulfilledLP = append(unfulfilledLP, requestedLP[reqID]) + } + } + + return unfulfilled, unfulfilledLP, fulfilled +} + +func (lsn *listenerV2) getConfirmedAt(req RandomWordsRequested, nodeMinConfs uint32) uint64 { + // Take the max(nodeMinConfs, requestedConfs + requestedConfsDelay). + // Add the requested confs delay if provided in the jobspec so that we avoid an edge case + // where the primary and backup VRF v2 nodes submit a proof at the same time. + minConfs := nodeMinConfs + if uint32(req.MinimumRequestConfirmations())+uint32(lsn.job.VRFSpec.RequestedConfsDelay) > nodeMinConfs { + minConfs = uint32(req.MinimumRequestConfirmations()) + uint32(lsn.job.VRFSpec.RequestedConfsDelay) + } + newConfs := uint64(minConfs) * (1 << lsn.respCount[req.RequestID().String()]) + // We cap this at 200 because solidity only supports the most recent 256 blocks + // in the contract so if it was older than that, fulfillments would start failing + // without the blockhash store feeder. We use 200 to give the node plenty of time + // to fulfill even on fast chains. + if newConfs > 200 { + newConfs = 200 + } + if lsn.respCount[req.RequestID().String()] > 0 { + lsn.l.Warnw("Duplicate request found after fulfillment, doubling incoming confirmations", + "txHash", req.Raw().TxHash, + "blockNumber", req.Raw().BlockNumber, + "blockHash", req.Raw().BlockHash, + "reqID", req.RequestID().String(), + "newConfs", newConfs) + vrfcommon.IncDupeReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, lsn.coordinator.Version()) + } + return req.Raw().BlockNumber + newConfs +} + +func (lsn *listenerV2) handleFulfilled(fulfilled map[string]RandomWordsFulfilled) { + for _, v := range fulfilled { + // don't process same log over again + // log key includes block number and blockhash, so on re-orgs it would return true + // and we would re-process the re-orged request. + if !lsn.fulfillmentLogDeduper.ShouldDeliver(v.Raw()) { + continue + } + lsn.l.Debugw("Received fulfilled log", "reqID", v.RequestID(), "success", v.Success()) + lsn.respCount[v.RequestID().String()]++ + lsn.blockNumberToReqID.Insert(fulfilledReqV2{ + blockNumber: v.Raw().BlockNumber, + reqID: v.RequestID().String(), + }) + } +} + +func (lsn *listenerV2) handleRequested(requested []RandomWordsRequested, requestedLP []logpoller.Log, minConfs uint32) (pendingRequests []pendingRequest) { + for i, req := range requested { + // don't process same log over again + // log key includes block number and blockhash, so on re-orgs it would return true + // and we would re-process the re-orged request. + if lsn.inflightCache.Contains(req.Raw()) { + continue + } + + confirmedAt := lsn.getConfirmedAt(req, minConfs) + lsn.l.Debugw("VRFListenerV2: Received log request", + "reqID", req.RequestID(), + "reqBlockNumber", req.Raw().BlockNumber, + "reqBlockHash", req.Raw().BlockHash, + "reqTxHash", req.Raw().TxHash, + "confirmedAt", confirmedAt, + "subID", req.SubID(), + "sender", req.Sender()) + pendingRequests = append(pendingRequests, pendingRequest{ + confirmedAtBlock: confirmedAt, + req: req, + utcTimestamp: requestedLP[i].CreatedAt.UTC(), + }) + lsn.reqAdded() + } + + return pendingRequests +} + +// numReplayBlocks returns the number of blocks to replay on startup +// given the request timeout and the chain ID. +// if the chain ID is not recognized it assumes a block time of 1 second +// and returns the number of blocks in a day. +func numReplayBlocks(requestTimeout time.Duration, chainID *big.Int) int64 { + var timeoutSeconds = int64(requestTimeout.Seconds()) + switch chainID.String() { + case "1": // eth mainnet + case "3": // eth ropsten + case "4": // eth rinkeby + case "5": // eth goerli + case "11155111": // eth sepolia + // block time is 12s + return timeoutSeconds / 12 + case "137": // polygon mainnet + case "80001": // polygon mumbai + // block time is 2s + return timeoutSeconds / 2 + case "56": // bsc mainnet + case "97": // bsc testnet + // block time is 2s + return timeoutSeconds / 2 + case "43114": // avalanche mainnet + case "43113": // avalanche fuji + // block time is 1s + return timeoutSeconds + case "250": // fantom mainnet + case "4002": // fantom testnet + // block time is 1s + return timeoutSeconds + case "42161": // arbitrum mainnet + case "421613": // arbitrum goerli + case "421614": // arbitrum sepolia + // block time is 0.25s in the worst case + return timeoutSeconds * 4 + case "10": // optimism mainnet + case "69": // optimism kovan + case "420": // optimism goerli + case "11155420": // optimism sepolia + case "8453": // base mainnet + case "84531": // base goerli + case "84532": // base sepolia + // block time is 2s + return timeoutSeconds / 2 + default: + // assume block time of 1s + return timeoutSeconds + } + // assume block time of 1s + return timeoutSeconds +} diff --git a/core/services/vrf/v2/listener_v2_log_listener_test.go b/core/services/vrf/v2/listener_v2_log_listener_test.go new file mode 100644 index 00000000..cfcdcb06 --- /dev/null +++ b/core/services/vrf/v2/listener_v2_log_listener_test.go @@ -0,0 +1,1040 @@ +package v2 + +import ( + "context" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +var ( + emitterABI, _ = abi.JSON(strings.NewReader(log_emitter.LogEmitterABI)) + vrfEmitterABI, _ = abi.JSON(strings.NewReader(vrf_log_emitter.VRFLogEmitterABI)) +) + +type vrfLogPollerListenerTH struct { + Lggr logger.Logger + ChainID *big.Int + ORM *logpoller.DbORM + LogPoller logpoller.LogPollerTest + Client *backends.SimulatedBackend + Emitter *log_emitter.LogEmitter + EmitterAddress common.Address + VRFLogEmitter *vrf_log_emitter.VRFLogEmitter + VRFEmitterAddress common.Address + Owner *bind.TransactOpts + EthDB ethdb.Database + Db *sqlx.DB + Listener *listenerV2 + Ctx context.Context +} + +func setupVRFLogPollerListenerTH(t *testing.T, + useFinalityTag bool, + finalityDepth, backfillBatchSize, + rpcBatchSize, keepFinalizedBlocksDepth int64, + mockChainUpdateFn func(*evmmocks.Chain, *vrfLogPollerListenerTH)) *vrfLogPollerListenerTH { + + lggr := logger.TestLogger(t) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + + o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + owner := testutils.MustNewSimTransactor(t) + ethDB := rawdb.NewMemoryDatabase() + ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + // VRF Listener relies on block timestamps, but SimulatedBackend uses by default clock starting from 1970-01-01 + // This trick is used to move the clock closer to the current time. We set first block to be X hours ago. + // FirstBlockAge is used to compute first block's timestamp in SimulatedBackend (time.Now() - FirstBlockAge) + const FirstBlockAge = 24 * time.Hour + blockTime := time.UnixMilli(int64(ec.Blockchain().CurrentHeader().Time)) + err := ec.AdjustTime(time.Since(blockTime) - FirstBlockAge) + require.NoError(t, err) + ec.Commit() + + esc := client.NewSimulatedBackendClient(t, ec, chainID) + // Mark genesis block as finalized to avoid any nulls in the tests + head := esc.Backend().Blockchain().CurrentHeader() + esc.Backend().Blockchain().SetFinalized(head) + + // Poll period doesn't matter, we intend to call poll and save logs directly in the test. + // Set it to some insanely high value to not interfere with any tests. + lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, useFinalityTag, finalityDepth, backfillBatchSize, rpcBatchSize, keepFinalizedBlocksDepth) + + emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) + require.NoError(t, err) + vrfLogEmitterAddress, _, vrfLogEmitter, err := vrf_log_emitter.DeployVRFLogEmitter(owner, ec) + require.NoError(t, err) + ec.Commit() + + // Log Poller Listener + cfg := pgtest.NewQConfig(false) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg) + require.NoError(t, ks.Unlock("blah")) + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + EVMChainID: chainID.String(), + }).Toml()) + require.NoError(t, err) + + coordinatorV2, err := vrf_coordinator_v2.NewVRFCoordinatorV2(vrfLogEmitter.Address(), ec) + require.Nil(t, err) + coordinator := NewCoordinatorV2(coordinatorV2) + + chain := evmmocks.NewChain(t) + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + chain: chain, + l: logger.Sugared(lggr), + coordinator: coordinator, + } + ctx := testutils.Context(t) + + // Filter registration is idempotent, so we can just call it every time + // and retry on errors using the ticker. + err = lp.RegisterFilter(logpoller.Filter{ + Name: fmt.Sprintf("vrf_%s_keyhash_%s_job_%d", "v2", listener.job.VRFSpec.PublicKey.MustHash().String(), listener.job.ID), + EventSigs: evmtypes.HashArray{ + vrf_log_emitter.VRFLogEmitterRandomWordsRequested{}.Topic(), + vrf_log_emitter.VRFLogEmitterRandomWordsFulfilled{}.Topic(), + }, + Addresses: evmtypes.AddressArray{ + vrfLogEmitter.Address(), + // listener.job.VRFSpec.CoordinatorAddress.Address(), + }, + }) + require.Nil(t, err) + require.NoError(t, lp.RegisterFilter(logpoller.Filter{ + Name: "Integration test", + EventSigs: []common.Hash{emitterABI.Events["Log1"].ID}, + Addresses: []common.Address{emitterAddress1}, + Retention: 0})) + require.Nil(t, err) + require.Len(t, lp.Filter(nil, nil, nil).Addresses, 2) + require.Len(t, lp.Filter(nil, nil, nil).Topics, 1) + require.Len(t, lp.Filter(nil, nil, nil).Topics[0], 3) + + th := &vrfLogPollerListenerTH{ + Lggr: lggr, + ChainID: chainID, + ORM: o, + LogPoller: lp, + Emitter: emitter1, + EmitterAddress: emitterAddress1, + VRFLogEmitter: vrfLogEmitter, + VRFEmitterAddress: vrfLogEmitterAddress, + Client: ec, + Owner: owner, + EthDB: ethDB, + Db: db, + Listener: listener, + Ctx: ctx, + } + mockChainUpdateFn(chain, th) + return th +} + +/* Tests for initializeLastProcessedBlock: BEGIN + * TestInitProcessedBlock_NoVRFReqs + * TestInitProcessedBlock_NoUnfulfilledVRFReqs + * TestInitProcessedBlock_OneUnfulfilledVRFReq + * TestInitProcessedBlock_SomeUnfulfilledVRFReqs + * TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs + */ + +func TestInitProcessedBlock_NoVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, th *vrfLogPollerListenerTH) { + mockChain.On("ID").Return(th.ChainID) + mockChain.On("LogPoller").Return(th.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Emit some logs from block 5 to 9 (Inclusive) + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 5 (EmitLog blocks) = 9 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(testutils.Context(t))) + + // The poller starts on a new chain at latest-finality (finalityDepth + 5 in this case), + // Replaying from block 4 should guarantee we have block 4 immediately. (We will also get + // block 3 once the backup poller runs, since it always starts 100 blocks behind.) + require.NoError(t, th.LogPoller.Replay(testutils.Context(t), 4)) + + // Should return logs from block 5 to 7 (inclusive) + logs, err := th.LogPoller.Logs(4, 7, emitterABI.Events["Log1"].ID, th.EmitterAddress, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + require.Equal(t, 3, len(logs)) + + lastProcessedBlock, err := th.Listener.initializeLastProcessedBlock(th.Ctx) + require.Nil(t, err) + require.Equal(t, int64(6), lastProcessedBlock) +} + +func TestInitProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("ID").Return(curTH.ChainID) + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Create VRF request block and a fulfillment block + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID := big.NewInt(1) + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + _, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID, preSeed, big.NewInt(10), true) + require.NoError(t, err2) + th.Client.Commit() + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 2 (VRF req/resp block) + 5 (EmitLog blocks) = 11 + latestBlock := int64(2 + 2 + 2 + 5) + + // A replay is needed so that log poller has a latest block + // Replay from block 11 (latest) onwards, so that log poller has a latest block + // Then test if log poller is able to replay from finalizedBlockNumber (8 --> onwards) + // since there are no pending VRF requests + // Blocks: 1 2 3 4 [5;Request] [6;Fulfilment] 7 8 9 10 11 + require.NoError(t, th.LogPoller.Replay(th.Ctx, latestBlock)) + + // initializeLastProcessedBlock must return the finalizedBlockNumber (8) instead of + // VRF request block number (5), since all VRF requests are fulfilled + lastProcessedBlock, err := th.Listener.initializeLastProcessedBlock(th.Ctx) + require.Nil(t, err) + require.Equal(t, int64(8), lastProcessedBlock) +} + +func TestInitProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("ID").Return(curTH.ChainID) + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Make a VRF request without fulfilling it + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID := big.NewInt(1) + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + th.Client.Commit() + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 1 (VRF req block) + 5 (EmitLog blocks) = 10 + latestBlock := int64(2 + 2 + 1 + 5) + + // A replay is needed so that log poller has a latest block + // Replay from block 10 (latest) onwards, so that log poller has a latest block + // Then test if log poller is able to replay from earliestUnprocessedBlock (5 --> onwards) + // Blocks: 1 2 3 4 [5;Request] 6 7 8 9 10 + require.NoError(t, th.LogPoller.Replay(th.Ctx, latestBlock)) + + // initializeLastProcessedBlock must return the unfulfilled VRF + // request block number (5) instead of finalizedBlockNumber (8) + lastProcessedBlock, err := th.Listener.initializeLastProcessedBlock(th.Ctx) + require.Nil(t, err) + require.Equal(t, int64(5), lastProcessedBlock) +} + +func TestInitProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("ID").Return(curTH.ChainID) + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Emit some logs in blocks with VRF reqs interspersed + // No fulfillment for any VRF requests + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + + // Create 2 blocks with VRF requests in each iteration + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(2 * i)) + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + reqID2 := big.NewInt(int64(2*i + 1)) + _, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + } + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req/resp blocks) = 19 + latestBlock := int64(2 + 2 + 3*5) + + // A replay is needed so that log poller has a latest block + // Replay from block 19 (latest) onwards, so that log poller has a latest block + // Then test if log poller is able to replay from earliestUnprocessedBlock (6 --> onwards) + // Blocks: 1 2 3 4 5 [6;Request] [7;Request] 8 [9;Request] [10;Request] + // 11 [12;Request] [13;Request] 14 [15;Request] [16;Request] + // 17 [18;Request] [19;Request] + require.NoError(t, th.LogPoller.Replay(th.Ctx, latestBlock)) + + // initializeLastProcessedBlock must return the earliest unfulfilled VRF request block + // number instead of finalizedBlockNumber + lastProcessedBlock, err := th.Listener.initializeLastProcessedBlock(th.Ctx) + require.Nil(t, err) + require.Equal(t, int64(6), lastProcessedBlock) +} + +func TestInitProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("ID").Return(curTH.ChainID) + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Emit some logs in blocks with VRF reqs interspersed + // One VRF request in each iteration is fulfilled to imitate mixed workload + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + + // Create 2 blocks with VRF requests in each iteration and fulfill one + // of them. This creates a mixed workload of fulfilled and unfulfilled + // VRF requests for testing the VRF listener + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(2 * i)) + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + reqID2 := big.NewInt(int64(2*i + 1)) + _, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + + _, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true) + require.NoError(t, err2) + th.Client.Commit() + } + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req/resp blocks) = 19 + latestBlock := int64(2 + 2 + 3*5) + // A replay is needed so that log poller has a latest block + // Replay from block 19 (latest) onwards, so that log poller has a latest block + // Then test if log poller is able to replay from earliestUnprocessedBlock (7 --> onwards) + // Blocks: 1 2 3 4 5 [6;Request] [7;Request;6-Fulfilment] 8 [9;Request] [10;Request;9-Fulfilment] + // 11 [12;Request] [13;Request;12-Fulfilment] 14 [15;Request] [16;Request;15-Fulfilment] + // 17 [18;Request] [19;Request;18-Fulfilment] + require.NoError(t, th.LogPoller.Replay(th.Ctx, latestBlock)) + + // initializeLastProcessedBlock must return the earliest unfulfilled VRF request block + // number instead of finalizedBlockNumber + lastProcessedBlock, err := th.Listener.initializeLastProcessedBlock(th.Ctx) + require.Nil(t, err) + require.Equal(t, int64(7), lastProcessedBlock) +} + +/* Tests for initializeLastProcessedBlock: END */ + +/* Tests for updateLastProcessedBlock: BEGIN + * TestUpdateLastProcessedBlock_NoVRFReqs + * TestUpdateLastProcessedBlock_NoUnfulfilledVRFReqs + * TestUpdateLastProcessedBlock_OneUnfulfilledVRFReq + * TestUpdateLastProcessedBlock_SomeUnfulfilledVRFReqs + * TestUpdateLastProcessedBlock_UnfulfilledNFulfilledVRFReqs + */ + +func TestUpdateLastProcessedBlock_NoVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Create VRF request logs + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(1)) + + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + reqID2 := big.NewInt(int64(2)) + _, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 2 (VRF req blocks) + 5 (EmitLog blocks) = 11 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // We've to replay from before VRF request log, since updateLastProcessedBlock + // does not internally call LogPoller.Replay + require.NoError(t, th.LogPoller.Replay(th.Ctx, 4)) + + // updateLastProcessedBlock must return the finalizedBlockNumber as there are + // no VRF requests, after currLastProcessedBlock (block 6). The VRF requests + // made above are before the currLastProcessedBlock (7) passed in below + lastProcessedBlock, err := th.Listener.updateLastProcessedBlock(th.Ctx, 7) + require.Nil(t, err) + require.Equal(t, int64(8), lastProcessedBlock) +} + +func TestUpdateLastProcessedBlock_NoUnfulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Create VRF request log block with a fulfillment log block + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(1)) + + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + _, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true) + require.NoError(t, err2) + th.Client.Commit() + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 2 (VRF req/resp blocks) + 5 (EmitLog blocks) = 11 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // We've to replay from before VRF request log, since updateLastProcessedBlock + // does not internally call LogPoller.Replay + require.NoError(t, th.LogPoller.Replay(th.Ctx, 4)) + + // updateLastProcessedBlock must return the finalizedBlockNumber (8) though we have + // a VRF req at block (5) after currLastProcessedBlock (4) passed below, because + // the VRF request is fulfilled + lastProcessedBlock, err := th.Listener.updateLastProcessedBlock(th.Ctx, 4) + require.Nil(t, err) + require.Equal(t, int64(8), lastProcessedBlock) +} + +func TestUpdateLastProcessedBlock_OneUnfulfilledVRFReq(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Create VRF request logs without a fulfillment log block + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(1)) + + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 1 (VRF req block) + 5 (EmitLog blocks) = 10 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // We've to replay from before VRF request log, since updateLastProcessedBlock + // does not internally call LogPoller.Replay + require.NoError(t, th.LogPoller.Replay(th.Ctx, 4)) + + // updateLastProcessedBlock must return the VRF req at block (5) instead of + // finalizedBlockNumber (8) after currLastProcessedBlock (4) passed below, + // because the VRF request is unfulfilled + lastProcessedBlock, err := th.Listener.updateLastProcessedBlock(th.Ctx, 4) + require.Nil(t, err) + require.Equal(t, int64(5), lastProcessedBlock) +} + +func TestUpdateLastProcessedBlock_SomeUnfulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + + // Create 2 blocks with VRF requests in each iteration + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(2 * i)) + + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + reqID2 := big.NewInt(int64(2*i + 1)) + _, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req blocks) = 19 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // We've to replay from before VRF request log, since updateLastProcessedBlock + // does not internally call LogPoller.Replay + require.NoError(t, th.LogPoller.Replay(th.Ctx, 4)) + + // updateLastProcessedBlock must return the VRF req at block (6) instead of + // finalizedBlockNumber (16) after currLastProcessedBlock (4) passed below, + // as block 6 contains the earliest unfulfilled VRF request + lastProcessedBlock, err := th.Listener.updateLastProcessedBlock(th.Ctx, 4) + require.Nil(t, err) + require.Equal(t, int64(6), lastProcessedBlock) +} + +func TestUpdateLastProcessedBlock_UnfulfilledNFulfilledVRFReqs(t *testing.T) { + t.Parallel() + + finalityDepth := int64(3) + th := setupVRFLogPollerListenerTH(t, false, finalityDepth, 3, 2, 1000, func(mockChain *evmmocks.Chain, curTH *vrfLogPollerListenerTH) { + mockChain.On("LogPoller").Return(curTH.LogPoller) + }) + + // Block 3 to finalityDepth. Ensure we have finality number of blocks + for i := 1; i < int(finalityDepth); i++ { + th.Client.Commit() + } + + // Emit some logs in blocks to make the VRF req and fulfillment older than finalityDepth from latestBlock + n := 5 + for i := 0; i < n; i++ { + _, err1 := th.Emitter.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + _, err1 = th.Emitter.EmitLog2(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) + th.Client.Commit() + + // Create 2 blocks with VRF requests in each iteration and fulfill one + // of them. This creates a mixed workload of fulfilled and unfulfilled + // VRF requests for testing the VRF listener + keyHash := [32]byte(th.Listener.job.VRFSpec.PublicKey.MustHash().Bytes()) + preSeed := big.NewInt(105) + subID := uint64(1) + reqID1 := big.NewInt(int64(2 * i)) + + _, err2 := th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID1, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + th.Client.Commit() + + reqID2 := big.NewInt(int64(2*i + 1)) + _, err2 = th.VRFLogEmitter.EmitRandomWordsRequested(th.Owner, + keyHash, reqID2, preSeed, subID, 10, 10000, 2, th.Owner.From) + require.NoError(t, err2) + _, err2 = th.VRFLogEmitter.EmitRandomWordsFulfilled(th.Owner, reqID1, preSeed, big.NewInt(10), true) + require.NoError(t, err2) + th.Client.Commit() + } + + // Blocks till now: 2 (in SetupTH) + 2 (empty blocks) + 3*5 (EmitLog + VRF req blocks) = 19 + + // Calling Start() after RegisterFilter() simulates a node restart after job creation, should reload Filter from db. + require.NoError(t, th.LogPoller.Start(th.Ctx)) + + // We've to replay from before VRF request log, since updateLastProcessedBlock + // does not internally call LogPoller.Replay + require.NoError(t, th.LogPoller.Replay(th.Ctx, 4)) + + // updateLastProcessedBlock must return the VRF req at block (7) instead of + // finalizedBlockNumber (16) after currLastProcessedBlock (4) passed below, + // as block 7 contains the earliest unfulfilled VRF request. VRF request + // in block 6 has been fulfilled in block 7. + lastProcessedBlock, err := th.Listener.updateLastProcessedBlock(th.Ctx, 4) + require.Nil(t, err) + require.Equal(t, int64(7), lastProcessedBlock) +} + +/* Tests for updateLastProcessedBlock: END */ + +/* Tests for getUnfulfilled: BEGIN + * TestGetUnfulfilled_NoVRFReqs + * TestGetUnfulfilled_NoUnfulfilledVRFReqs + * TestGetUnfulfilled_OneUnfulfilledVRFReq + * TestGetUnfulfilled_SomeUnfulfilledVRFReqs + * TestGetUnfulfilled_UnfulfilledNFulfilledVRFReqs + */ + +func SetupGetUnfulfilledTH(t *testing.T) (*listenerV2, *ubig.Big) { + chainID := ubig.New(big.NewInt(12345)) + lggr := logger.TestLogger(t) + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + EVMChainID: chainID.String(), + }).Toml()) + require.NoError(t, err) + chain := evmmocks.NewChain(t) + + // Construct CoordinatorV2_X object for VRF listener + owner := testutils.MustNewSimTransactor(t) + ethDB := rawdb.NewMemoryDatabase() + ec := backends.NewSimulatedBackendWithDatabase(ethDB, map[common.Address]core.GenesisAccount{ + owner.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + _, _, vrfLogEmitter, err := vrf_log_emitter.DeployVRFLogEmitter(owner, ec) + require.NoError(t, err) + ec.Commit() + coordinatorV2, err := vrf_coordinator_v2.NewVRFCoordinatorV2(vrfLogEmitter.Address(), ec) + require.Nil(t, err) + coordinator := NewCoordinatorV2(coordinatorV2) + + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + chain: chain, + l: logger.Sugared(lggr), + coordinator: coordinator, + } + return listener, chainID +} + +func TestGetUnfulfilled_NoVRFReqs(t *testing.T) { + t.Parallel() + + listener, chainID := SetupGetUnfulfilledTH(t) + + logs := []logpoller.Log{} + for i := 0; i < 10; i++ { + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(i))), + BlockNumber: int64(i), + BlockTimestamp: time.Now(), + Topics: [][]byte{ + []byte("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8"), + }, + EventSig: emitterABI.Events["Log1"].ID, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(i))), + Data: nil, + CreatedAt: time.Now(), + }) + } + + unfulfilled, _, fulfilled := listener.getUnfulfilled(logs, listener.l) + require.Empty(t, unfulfilled) + require.Empty(t, fulfilled) +} + +func TestGetUnfulfilled_NoUnfulfilledVRFReqs(t *testing.T) { + t.Parallel() + + listener, chainID := SetupGetUnfulfilledTH(t) + + logs := []logpoller.Log{} + for i := 0; i < 10; i++ { + eventSig := emitterABI.Events["Log1"].ID + topics := [][]byte{ + common.FromHex("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8"), + } + if i%2 == 0 { + eventSig = vrfEmitterABI.Events["RandomWordsRequested"].ID + topics = [][]byte{ + common.FromHex("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772"), + common.FromHex("0xc0a6c424ac7157ae408398df7e5f4552091a69125d5dfcb7b8c2659029395bdf"), + common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000001"), + common.FromHex("0x0000000000000000000000005ee3b50502b5c4c9184dcb281471a0614d4b2ef9"), + } + } + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2 * i))), + BlockNumber: int64(2 * i), + BlockTimestamp: time.Now(), + Topics: topics, + EventSig: eventSig, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2 * i))), + Data: common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i) + "000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000002"), + CreatedAt: time.Now(), + }) + if i%2 == 0 { + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2*i + 1))), + BlockNumber: int64(2*i + 1), + BlockTimestamp: time.Now(), + Topics: [][]byte{ + common.FromHex("0x7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4"), + common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i)), + }, + EventSig: vrfEmitterABI.Events["RandomWordsFulfilled"].ID, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2*i + 1))), + Data: common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000069000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001"), + CreatedAt: time.Now(), + }) + } + } + + unfulfilled, _, fulfilled := listener.getUnfulfilled(logs, listener.l) + require.Empty(t, unfulfilled) + require.Len(t, fulfilled, 5) +} + +func TestGetUnfulfilled_OneUnfulfilledVRFReq(t *testing.T) { + t.Parallel() + + listener, chainID := SetupGetUnfulfilledTH(t) + + logs := []logpoller.Log{} + for i := 0; i < 10; i++ { + eventSig := emitterABI.Events["Log1"].ID + topics := [][]byte{ + common.FromHex("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8"), + } + if i == 4 { + eventSig = vrfEmitterABI.Events["RandomWordsRequested"].ID + topics = [][]byte{ + common.FromHex("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772"), + common.FromHex("0xc0a6c424ac7157ae408398df7e5f4552091a69125d5dfcb7b8c2659029395bdf"), + common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000001"), + common.FromHex("0x0000000000000000000000005ee3b50502b5c4c9184dcb281471a0614d4b2ef9"), + } + } + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2 * i))), + BlockNumber: int64(2 * i), + BlockTimestamp: time.Now(), + Topics: topics, + EventSig: eventSig, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2 * i))), + Data: common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i) + "000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000002"), + CreatedAt: time.Now(), + }) + } + + unfulfilled, _, fulfilled := listener.getUnfulfilled(logs, listener.l) + require.Equal(t, unfulfilled[0].RequestID().Int64(), big.NewInt(4).Int64()) + require.Len(t, unfulfilled, 1) + require.Empty(t, fulfilled) +} + +func TestGetUnfulfilled_SomeUnfulfilledVRFReq(t *testing.T) { + t.Parallel() + + listener, chainID := SetupGetUnfulfilledTH(t) + + logs := []logpoller.Log{} + for i := 0; i < 10; i++ { + eventSig := emitterABI.Events["Log1"].ID + topics := [][]byte{ + common.FromHex("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8"), + } + if i%2 == 0 { + eventSig = vrfEmitterABI.Events["RandomWordsRequested"].ID + topics = [][]byte{ + common.FromHex("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772"), + common.FromHex("0xc0a6c424ac7157ae408398df7e5f4552091a69125d5dfcb7b8c2659029395bdf"), + common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000001"), + common.FromHex("0x0000000000000000000000005ee3b50502b5c4c9184dcb281471a0614d4b2ef9"), + } + } + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2 * i))), + BlockNumber: int64(2 * i), + BlockTimestamp: time.Now(), + Topics: topics, + EventSig: eventSig, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2 * i))), + Data: common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i) + "000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000002"), + CreatedAt: time.Now(), + }) + } + + unfulfilled, _, fulfilled := listener.getUnfulfilled(logs, listener.l) + require.Len(t, unfulfilled, 5) + require.Len(t, fulfilled, 0) + expected := map[int64]bool{0: true, 2: true, 4: true, 6: true, 8: true} + for _, u := range unfulfilled { + v, ok := expected[u.RequestID().Int64()] + require.Equal(t, ok, true) + require.Equal(t, v, true) + } + require.Equal(t, len(expected), len(unfulfilled)) +} + +func TestGetUnfulfilled_UnfulfilledNFulfilledVRFReqs(t *testing.T) { + t.Parallel() + + listener, chainID := SetupGetUnfulfilledTH(t) + + logs := []logpoller.Log{} + for i := 0; i < 10; i++ { + eventSig := emitterABI.Events["Log1"].ID + topics := [][]byte{ + common.FromHex("0x46692c0e59ca9cd1ad8f984a9d11715ec83424398b7eed4e05c8ce84662415a8"), + } + if i%2 == 0 { + eventSig = vrfEmitterABI.Events["RandomWordsRequested"].ID + topics = [][]byte{ + common.FromHex("0x63373d1c4696214b898952999c9aaec57dac1ee2723cec59bea6888f489a9772"), + common.FromHex("0xc0a6c424ac7157ae408398df7e5f4552091a69125d5dfcb7b8c2659029395bdf"), + common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000001"), + common.FromHex("0x0000000000000000000000005ee3b50502b5c4c9184dcb281471a0614d4b2ef9"), + } + } + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2 * i))), + BlockNumber: int64(2 * i), + BlockTimestamp: time.Now(), + Topics: topics, + EventSig: eventSig, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2 * i))), + Data: common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i) + "000000000000000000000000000000000000000000000000000000000000006a000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000027100000000000000000000000000000000000000000000000000000000000000002"), + CreatedAt: time.Now(), + }) + if i%2 == 0 && i < 6 { + logs = append(logs, logpoller.Log{ + EvmChainId: chainID, + LogIndex: 0, + BlockHash: common.BigToHash(big.NewInt(int64(2*i + 1))), + BlockNumber: int64(2*i + 1), + BlockTimestamp: time.Now(), + Topics: [][]byte{ + common.FromHex("0x7dffc5ae5ee4e2e4df1651cf6ad329a73cebdb728f37ea0187b9b17e036756e4"), + common.FromHex("0x000000000000000000000000000000000000000000000000000000000000000" + fmt.Sprintf("%d", i)), + }, + EventSig: vrfEmitterABI.Events["RandomWordsFulfilled"].ID, + Address: common.Address{}, + TxHash: common.BigToHash(big.NewInt(int64(2*i + 1))), + Data: common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000069000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001"), + CreatedAt: time.Now(), + }) + } + } + + unfulfilled, _, fulfilled := listener.getUnfulfilled(logs, listener.l) + require.Len(t, unfulfilled, 2) + require.Len(t, fulfilled, 3) + expected := map[int64]bool{6: true, 8: true} + for _, u := range unfulfilled { + v, ok := expected[u.RequestID().Int64()] + require.Equal(t, ok, true) + require.Equal(t, v, true) + } + require.Equal(t, len(expected), len(unfulfilled)) +} + +/* Tests for getUnfulfilled: END */ diff --git a/core/services/vrf/v2/listener_v2_log_processor.go b/core/services/vrf/v2/listener_v2_log_processor.go new file mode 100644 index 00000000..7022ed47 --- /dev/null +++ b/core/services/vrf/v2/listener_v2_log_processor.go @@ -0,0 +1,1220 @@ +package v2 + +import ( + "cmp" + "context" + "database/sql" + "fmt" + "math" + "math/big" + "slices" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/utils/hex" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2plus_interface" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/utils" + bigmath "github.com/goplugin/pluginv3.0/v2/core/utils/big_math" +) + +// Returns all the confirmed logs from the provided pending queue by subscription +func (lsn *listenerV2) getConfirmedLogsBySub(latestHead uint64, pendingRequests []pendingRequest) map[string][]pendingRequest { + vrfcommon.UpdateQueueSize(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, lsn.coordinator.Version(), uniqueReqs(pendingRequests)) + var toProcess = make(map[string][]pendingRequest) + for _, request := range pendingRequests { + if lsn.ready(request, latestHead) { + toProcess[request.req.SubID().String()] = append(toProcess[request.req.SubID().String()], request) + } + } + return toProcess +} + +func (lsn *listenerV2) ready(req pendingRequest, latestHead uint64) bool { + // Request is not eligible for fulfillment yet + if req.confirmedAtBlock > latestHead { + return false + } + + if lsn.job.VRFSpec.BackoffInitialDelay == 0 || req.attempts == 0 { + // Backoff is disabled, or this is the first try + return true + } + + return time.Now().UTC().After( + nextTry( + req.attempts, + lsn.job.VRFSpec.BackoffInitialDelay, + lsn.job.VRFSpec.BackoffMaxDelay, + req.lastTry)) +} + +func nextTry(retries int, initial, max time.Duration, last time.Time) time.Time { + expBackoffFactor := math.Pow(backoffFactor, float64(retries-1)) + + var delay time.Duration + if expBackoffFactor > float64(max/initial) { + delay = max + } else { + delay = time.Duration(float64(initial) * expBackoffFactor) + } + return last.Add(delay) +} + +// Remove all entries 10000 blocks or older +// to avoid a memory leak. +func (lsn *listenerV2) pruneConfirmedRequestCounts() { + min := lsn.blockNumberToReqID.FindMin() + for min != nil { + m := min.(fulfilledReqV2) + if m.blockNumber > (lsn.getLatestHead() - 10000) { + break + } + delete(lsn.respCount, m.reqID) + lsn.blockNumberToReqID.DeleteMin() + min = lsn.blockNumberToReqID.FindMin() + } +} + +// Determine a set of logs that are confirmed +// and the subscription has sufficient balance to fulfill, +// given a eth call with the max gas price. +// Note we have to consider the pending reqs already in the txm as already "spent" link or native, +// using a max link or max native consumed in their metadata. +// A user will need a minBalance capable of fulfilling a single req at the max gas price or nothing will happen. +// This is acceptable as users can choose different keyhashes which have different max gas prices. +// Other variables which can change the bill amount between our eth call simulation and tx execution: +// - Link/eth price fluctuation +// - Falling back to BHS +// However the likelihood is vanishingly small as +// 1) the window between simulation and tx execution is tiny. +// 2) the max gas price provides a very large buffer most of the time. +// Its easier to optimistically assume it will go though and in the rare case of a reversion +// we simply retry TODO: follow up where if we see a fulfillment revert, return log to the queue. +func (lsn *listenerV2) processPendingVRFRequests(ctx context.Context, pendingRequests []pendingRequest) { + confirmed := lsn.getConfirmedLogsBySub(lsn.getLatestHead(), pendingRequests) + var processedMu sync.Mutex + processed := make(map[string]struct{}) + start := time.Now() + + defer func() { + for _, subReqs := range confirmed { + for _, req := range subReqs { + if _, ok := processed[req.req.RequestID().String()]; ok { + // add to the inflight cache so that we don't re-process this request + lsn.inflightCache.Add(req.req.Raw()) + } + } + } + lsn.l.Infow("Finished processing pending requests", + "totalProcessed", len(processed), + "totalFailed", len(pendingRequests)-len(processed), + "total", len(pendingRequests), + "time", time.Since(start).String(), + "inflightCacheSize", lsn.inflightCache.Size()) + }() + + if len(confirmed) == 0 { + lsn.l.Infow("No pending requests ready for processing") + return + } + for subID, reqs := range confirmed { + l := lsn.l.With("subID", subID, "startTime", time.Now(), "numReqsForSub", len(reqs)) + // Get the balance of the subscription and also it's active status. + // The reason we need both is that we cannot determine if a subscription + // is active solely by it's balance, since an active subscription could legitimately + // have a zero balance. + var ( + startLinkBalance *big.Int + startEthBalance *big.Int + subIsActive bool + ) + sID, ok := new(big.Int).SetString(subID, 10) + if !ok { + l.Criticalw("Unable to convert %s to Int", subID) + return + } + sub, err := lsn.coordinator.GetSubscription(&bind.CallOpts{ + Context: ctx}, sID) + + if err != nil { + if !strings.Contains(err.Error(), "execution reverted") { + // Most likely this is an RPC error, so we re-try later. + l.Errorw("Unable to read subscription balance", "err", err) + return + } + // "execution reverted" indicates that the subscription no longer exists. + // We can no longer just mark these as processed and continue, + // since it could be that the subscription was canceled while there + // were still unfulfilled requests. + // The simplest approach to handle this is to enter the processRequestsPerSub + // loop rather than create a bunch of largely duplicated code + // to handle this specific situation, since we need to run the pipeline to get + // the VRF proof, abi-encode it, etc. + l.Warnw("Subscription not found - setting start balance to zero", "subID", subID, "err", err) + startLinkBalance = big.NewInt(0) + } else { + // Happy path - sub is active. + startLinkBalance = sub.Balance() + if sub.Version() == vrfcommon.V2Plus { + startEthBalance = sub.NativeBalance() + } + subIsActive = true + } + + // Sort requests in ascending order by CallbackGasLimit + // so that we process the "cheapest" requests for each subscription + // first. This allows us to break out of the processing loop as early as possible + // in the event that a subscription is too underfunded to have it's + // requests processed. + slices.SortFunc(reqs, func(a, b pendingRequest) int { + return cmp.Compare(a.req.CallbackGasLimit(), b.req.CallbackGasLimit()) + }) + + p := lsn.processRequestsPerSub(ctx, sID, startLinkBalance, startEthBalance, reqs, subIsActive) + processedMu.Lock() + for reqID := range p { + processed[reqID] = struct{}{} + } + processedMu.Unlock() + } + lsn.pruneConfirmedRequestCounts() +} + +// MaybeSubtractReservedLink figures out how much PLI is reserved for other VRF requests that +// have not been fully confirmed yet on-chain, and subtracts that from the given startBalance, +// and returns that value if there are no errors. +func (lsn *listenerV2) MaybeSubtractReservedLink(ctx context.Context, startBalance *big.Int, chainID *big.Int, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) { + var metaField string + if vrfVersion == vrfcommon.V2Plus { + metaField = txMetaGlobalSubId + } else if vrfVersion == vrfcommon.V2 { + metaField = txMetaFieldSubId + } else { + return nil, errors.Errorf("unsupported vrf version %s", vrfVersion) + } + + txes, err := lsn.chain.TxManager().FindTxesByMetaFieldAndStates(ctx, metaField, subID.String(), reserveEthLinkQueryStates, chainID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("TXM FindTxesByMetaFieldAndStates failed: %w", err) + } + + reservedLinkSum := big.NewInt(0) + // Aggregate non-null MaxLink from all txes returned + for _, tx := range txes { + var meta *txmgrtypes.TxMeta[common.Address, common.Hash] + meta, err = tx.GetMeta() + if err != nil { + return nil, fmt.Errorf("GetMeta for Tx failed: %w", err) + } + if meta != nil && meta.MaxLink != nil { + txMaxLink, success := new(big.Int).SetString(*meta.MaxLink, 10) + if !success { + return nil, fmt.Errorf("converting reserved PLI %s", *meta.MaxLink) + } + + reservedLinkSum.Add(reservedLinkSum, txMaxLink) + } + } + + return new(big.Int).Sub(startBalance, reservedLinkSum), nil +} + +// MaybeSubtractReservedEth figures out how much ether is reserved for other VRF requests that +// have not been fully confirmed yet on-chain, and subtracts that from the given startBalance, +// and returns that value if there are no errors. +func (lsn *listenerV2) MaybeSubtractReservedEth(ctx context.Context, startBalance *big.Int, chainID *big.Int, subID *big.Int, vrfVersion vrfcommon.Version) (*big.Int, error) { + var metaField string + if vrfVersion == vrfcommon.V2Plus { + metaField = txMetaGlobalSubId + } else if vrfVersion == vrfcommon.V2 { + // native payment is not supported for v2, so returning 0 reserved ETH + return big.NewInt(0), nil + } else { + return nil, errors.Errorf("unsupported vrf version %s", vrfVersion) + } + txes, err := lsn.chain.TxManager().FindTxesByMetaFieldAndStates(ctx, metaField, subID.String(), reserveEthLinkQueryStates, chainID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("TXM FindTxesByMetaFieldAndStates failed: %w", err) + } + + reservedEthSum := big.NewInt(0) + // Aggregate non-null MaxEth from all txes returned + for _, tx := range txes { + var meta *txmgrtypes.TxMeta[common.Address, common.Hash] + meta, err = tx.GetMeta() + if err != nil { + return nil, fmt.Errorf("GetMeta for Tx failed: %w", err) + } + if meta != nil && meta.MaxEth != nil { + txMaxEth, success := new(big.Int).SetString(*meta.MaxEth, 10) + if !success { + return nil, fmt.Errorf("converting reserved ETH %s", *meta.MaxEth) + } + + reservedEthSum.Add(reservedEthSum, txMaxEth) + } + } + + if startBalance != nil { + return new(big.Int).Sub(startBalance, reservedEthSum), nil + } + return big.NewInt(0), nil +} + +func (lsn *listenerV2) processRequestsPerSubBatchHelper( + ctx context.Context, + subID *big.Int, + startBalance *big.Int, + startBalanceNoReserved *big.Int, + reqs []pendingRequest, + subIsActive bool, + nativePayment bool, +) (processed map[string]struct{}) { + start := time.Now() + processed = make(map[string]struct{}) + + // Base the max gas for a batch on the max gas limit for a single callback. + // Since the max gas limit for a single callback is usually quite large already, + // we probably don't want to exceed it too much so that we can reliably get + // batch fulfillments included, while also making sure that the biggest gas guzzler + // callbacks are included. + config, err := lsn.coordinator.GetConfig(&bind.CallOpts{ + Context: ctx, + }) + if err != nil { + lsn.l.Errorw("Couldn't get config from coordinator", "err", err) + return processed + } + + // Add very conservative upper bound estimate on verification costs. + batchMaxGas := config.MaxGasLimit() + 400_000 + + l := lsn.l.With( + "subID", subID, + "eligibleSubReqs", len(reqs), + "startBalance", startBalance.String(), + "startBalanceNoReserved", startBalanceNoReserved.String(), + "batchMaxGas", batchMaxGas, + "subIsActive", subIsActive, + "nativePayment", nativePayment, + ) + + defer func() { + l.Infow("Finished processing for sub", + "endBalance", startBalanceNoReserved.String(), + "totalProcessed", len(processed), + "totalUnique", uniqueReqs(reqs), + "time", time.Since(start).String()) + }() + + l.Infow("Processing requests for subscription with batching") + + ready, expired := lsn.getReadyAndExpired(l, reqs) + for _, reqID := range expired { + processed[reqID] = struct{}{} + } + + // Process requests in chunks in order to kick off as many jobs + // as configured in parallel. Then we can combine into fulfillment + // batches afterwards. + for chunkStart := 0; chunkStart < len(ready); chunkStart += int(lsn.job.VRFSpec.ChunkSize) { + chunkEnd := chunkStart + int(lsn.job.VRFSpec.ChunkSize) + if chunkEnd > len(ready) { + chunkEnd = len(ready) + } + chunk := ready[chunkStart:chunkEnd] + + var unfulfilled []pendingRequest + alreadyFulfilled, err := lsn.checkReqsFulfilled(ctx, l, chunk) + if errors.Is(err, context.Canceled) { + l.Infow("Context canceled, stopping request processing", "err", err) + return processed + } else if err != nil { + l.Errorw("Error checking for already fulfilled requests, proceeding anyway", "err", err) + } + for i, a := range alreadyFulfilled { + if a { + processed[chunk[i].req.RequestID().String()] = struct{}{} + } else { + unfulfilled = append(unfulfilled, chunk[i]) + } + } + + // All fromAddresses passed to the VRFv2 job have the same KeySpecific-MaxPrice value. + fromAddresses := lsn.fromAddresses() + maxGasPriceWei := lsn.feeCfg.PriceMaxKey(fromAddresses[0]) + + // Cases: + // 1. Never simulated: in this case, we want to observe the time until simulated + // on the utcTimestamp field of the pending request. + // 2. Simulated before: in this case, lastTry will be set to a non-zero time value, + // in which case we'd want to use that as a relative point from when we last tried + // the request. + observeRequestSimDuration(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, lsn.coordinator.Version(), unfulfilled) + + pipelines := lsn.runPipelines(ctx, l, maxGasPriceWei, unfulfilled) + batches := newBatchFulfillments(batchMaxGas, lsn.coordinator.Version()) + outOfBalance := false + for _, p := range pipelines { + ll := l.With("reqID", p.req.req.RequestID().String(), + "txHash", p.req.req.Raw().TxHash, + "maxGasPrice", maxGasPriceWei.String(), + "fundsNeeded", p.fundsNeeded.String(), + "maxFee", p.maxFee.String(), + "gasLimit", p.gasLimit, + "attempts", p.req.attempts, + "remainingBalance", startBalanceNoReserved.String(), + "consumerAddress", p.req.req.Sender(), + "blockNumber", p.req.req.Raw().BlockNumber, + "blockHash", p.req.req.Raw().BlockHash, + ) + fromAddresses := lsn.fromAddresses() + fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.chainID, fromAddresses...) + if err != nil { + l.Errorw("Couldn't get next from address", "err", err) + continue + } + ll = ll.With("fromAddress", fromAddress) + + if p.err != nil { + if errors.Is(p.err, errBlockhashNotInStore{}) { + // Running the blockhash store feeder in backwards mode will be required to + // resolve this. + ll.Criticalw("Pipeline error", "err", p.err) + } else if errors.Is(p.err, errProofVerificationFailed{}) { + // This occurs when the proof reverts in the simulation + // This is almost always (if not always) due to a proof generated with an out-of-date + // blockhash + // we can simply mark as processed and move on, since we will eventually + // process the request with the right blockhash + ll.Infow("proof reverted in simulation, likely stale blockhash") + processed[p.req.req.RequestID().String()] = struct{}{} + } else { + ll.Errorw("Pipeline error", "err", p.err) + if !subIsActive { + ll.Warnw("Force-fulfilling a request with insufficient funds on a cancelled sub") + etx, err := lsn.enqueueForceFulfillment(ctx, p, fromAddress) + if err != nil { + ll.Errorw("Error enqueuing force-fulfillment, re-queueing request", "err", err) + continue + } + ll.Infow("Successfully enqueued force-fulfillment", "ethTxID", etx.ID) + processed[p.req.req.RequestID().String()] = struct{}{} + + // Need to put a continue here, otherwise the next if statement will be hit + // and we'd break out of the loop prematurely. + // If a sub is canceled, we want to force-fulfill ALL of it's pending requests + // before saying we're done with it. + continue + } + + if startBalanceNoReserved.Cmp(p.fundsNeeded) < 0 && errors.Is(p.err, errPossiblyInsufficientFunds{}) { + ll.Infow("Insufficient balance to fulfill a request based on estimate, breaking", "err", p.err) + outOfBalance = true + + // break out of this inner loop to process the currently constructed batch + break + } + + // Ensure consumer is valid, otherwise drop the request. + if !lsn.isConsumerValidAfterFinalityDepthElapsed(ctx, p.req) { + lsn.l.Infow( + "Dropping request that was made by an invalid consumer.", + "consumerAddress", p.req.req.Sender(), + "reqID", p.req.req.RequestID(), + "blockNumber", p.req.req.Raw().BlockNumber, + "blockHash", p.req.req.Raw().BlockHash, + ) + processed[p.req.req.RequestID().String()] = struct{}{} + continue + } + } + continue + } + + if startBalanceNoReserved.Cmp(p.maxFee) < 0 { + // Insufficient funds, have to wait for a user top up. + // Break out of the loop now and process what we are able to process + // in the constructed batches. + ll.Infow("Insufficient balance to fulfill a request, breaking") + break + } + + batches.addRun(p, fromAddress) + + startBalanceNoReserved.Sub(startBalanceNoReserved, p.maxFee) + } + + var processedRequestIDs []string + for _, batch := range batches.fulfillments { + l.Debugw("Processing batch", "batchSize", len(batch.proofs)) + p := lsn.processBatch(l, subID, startBalanceNoReserved, batchMaxGas, batch, batch.fromAddress) + processedRequestIDs = append(processedRequestIDs, p...) + } + + for _, reqID := range processedRequestIDs { + processed[reqID] = struct{}{} + } + + // outOfBalance is set to true if the current sub we are processing + // has run out of funds to process any remaining requests. After enqueueing + // this constructed batch, we break out of this outer loop in order to + // avoid unnecessarily processing the remaining requests. + if outOfBalance { + break + } + } + + return +} + +func (lsn *listenerV2) processRequestsPerSubBatch( + ctx context.Context, + subID *big.Int, + startLinkBalance *big.Int, + startEthBalance *big.Int, + reqs []pendingRequest, + subIsActive bool, +) map[string]struct{} { + var processed = make(map[string]struct{}) + startBalanceNoReserveLink, err := lsn.MaybeSubtractReservedLink( + ctx, startLinkBalance, lsn.chainID, subID, lsn.coordinator.Version()) + if err != nil { + lsn.l.Errorw("Couldn't get reserved PLI for subscription", "sub", reqs[0].req.SubID(), "err", err) + return processed + } + startBalanceNoReserveEth, err := lsn.MaybeSubtractReservedEth( + ctx, startEthBalance, lsn.chainID, subID, lsn.coordinator.Version()) + if err != nil { + lsn.l.Errorw("Couldn't get reserved ether for subscription", "sub", reqs[0].req.SubID(), "err", err) + return processed + } + + // Split the requests into native and PLI requests. + var ( + nativeRequests []pendingRequest + linkRequests []pendingRequest + ) + for _, req := range reqs { + if req.req.NativePayment() { + nativeRequests = append(nativeRequests, req) + } else { + linkRequests = append(linkRequests, req) + } + } + // process the native and link requests in parallel + var wg sync.WaitGroup + var nativeProcessed, linkProcessed map[string]struct{} + wg.Add(2) + go func() { + defer wg.Done() + nativeProcessed = lsn.processRequestsPerSubBatchHelper(ctx, subID, startEthBalance, startBalanceNoReserveEth, nativeRequests, subIsActive, true) + }() + go func() { + defer wg.Done() + linkProcessed = lsn.processRequestsPerSubBatchHelper(ctx, subID, startLinkBalance, startBalanceNoReserveLink, linkRequests, subIsActive, false) + }() + wg.Wait() + // combine the processed link and native requests into the processed map + for k, v := range nativeProcessed { + processed[k] = v + } + for k, v := range linkProcessed { + processed[k] = v + } + + return processed +} + +// enqueueForceFulfillment enqueues a forced fulfillment through the +// VRFOwner contract. It estimates gas again on the transaction due +// to the extra steps taken within VRFOwner.fulfillRandomWords. +func (lsn *listenerV2) enqueueForceFulfillment( + ctx context.Context, + p vrfPipelineResult, + fromAddress common.Address, +) (etx txmgr.Tx, err error) { + if lsn.job.VRFSpec.VRFOwnerAddress == nil { + err = errors.New("vrf owner address not set in job spec, recreate job and provide it to force-fulfill") + return + } + + if p.payload == "" { + // should probably never happen + // a critical log will be logged if this is the case in simulateFulfillment + err = errors.New("empty payload in vrfPipelineResult") + return + } + + // fulfill the request through the VRF owner + err = lsn.q.Transaction(func(tx pg.Queryer) error { + lsn.l.Infow("VRFOwner.fulfillRandomWords vs. VRFCoordinatorV2.fulfillRandomWords", + "vrf_owner.fulfillRandomWords", hexutil.Encode(vrfOwnerABI.Methods["fulfillRandomWords"].ID), + "vrf_coordinator_v2.fulfillRandomWords", hexutil.Encode(coordinatorV2ABI.Methods["fulfillRandomWords"].ID), + ) + + vrfOwnerAddress1 := lsn.vrfOwner.Address() + vrfOwnerAddressSpec := lsn.job.VRFSpec.VRFOwnerAddress.Address() + lsn.l.Infow("addresses diff", "wrapper_address", vrfOwnerAddress1, "spec_address", vrfOwnerAddressSpec) + + lsn.l.Infow("fulfillRandomWords payload", "proof", p.proof, "commitment", p.reqCommitment.Get(), "payload", p.payload) + txData := hexutil.MustDecode(p.payload) + if err != nil { + return fmt.Errorf("abi pack VRFOwner.fulfillRandomWords: %w", err) + } + estimateGasLimit, err := lsn.chain.Client().EstimateGas(ctx, ethereum.CallMsg{ + From: fromAddress, + To: &vrfOwnerAddressSpec, + Data: txData, + }) + if err != nil { + return fmt.Errorf("failed to estimate gas on VRFOwner.fulfillRandomWords: %w", err) + } + + lsn.l.Infow("Estimated gas limit on force fulfillment", + "estimateGasLimit", estimateGasLimit, "pipelineGasLimit", p.gasLimit) + if estimateGasLimit < uint64(p.gasLimit) { + estimateGasLimit = uint64(p.gasLimit) + } + + requestID := common.BytesToHash(p.req.req.RequestID().Bytes()) + subID := p.req.req.SubID() + requestTxHash := p.req.req.Raw().TxHash + etx, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: lsn.vrfOwner.Address(), + EncodedPayload: txData, + FeeLimit: uint32(estimateGasLimit), + Strategy: txmgrcommon.NewSendEveryStrategy(), + Meta: &txmgr.TxMeta{ + RequestID: &requestID, + SubID: ptr(subID.Uint64()), + RequestTxHash: &requestTxHash, + // No max link since simulation failed + }, + }) + return err + }) + return +} + +// For an errored pipeline run, wait until the finality depth of the chain to have elapsed, +// then check if the failing request is being called by an invalid sender. Return false if this is the case, +// otherwise true. +func (lsn *listenerV2) isConsumerValidAfterFinalityDepthElapsed(ctx context.Context, req pendingRequest) bool { + latestHead := lsn.getLatestHead() + if latestHead-req.req.Raw().BlockNumber > uint64(lsn.cfg.FinalityDepth()) { + code, err := lsn.chain.Client().CodeAt(ctx, req.req.Sender(), big.NewInt(int64(latestHead))) + if err != nil { + lsn.l.Warnw("Failed to fetch contract code", "err", err) + return true // error fetching code, give the benefit of doubt to the consumer + } + if len(code) == 0 { + return false // invalid consumer + } + } + + return true // valid consumer, or finality depth has not elapsed +} + +// processRequestsPerSubHelper processes a set of pending requests for the provided sub id. +// It returns a set of request IDs that were processed. +// Note that the provided startBalanceNoReserve is the balance of the subscription +// minus any pending requests that have already been processed and not yet fulfilled onchain. +func (lsn *listenerV2) processRequestsPerSubHelper( + ctx context.Context, + subID *big.Int, + startBalance *big.Int, + startBalanceNoReserved *big.Int, + reqs []pendingRequest, + subIsActive bool, + nativePayment bool, +) (processed map[string]struct{}) { + start := time.Now() + processed = make(map[string]struct{}) + + l := lsn.l.With( + "subID", subID, + "eligibleSubReqs", len(reqs), + "startBalance", startBalance.String(), + "startBalanceNoReserved", startBalanceNoReserved.String(), + "subIsActive", subIsActive, + "nativePayment", nativePayment, + ) + + defer func() { + l.Infow("Finished processing for sub", + "endBalance", startBalanceNoReserved.String(), + "totalProcessed", len(processed), + "totalUnique", uniqueReqs(reqs), + "time", time.Since(start).String()) + }() + + l.Infow("Processing requests for subscription") + + ready, expired := lsn.getReadyAndExpired(l, reqs) + for _, reqID := range expired { + processed[reqID] = struct{}{} + } + + // Process requests in chunks + for chunkStart := 0; chunkStart < len(ready); chunkStart += int(lsn.job.VRFSpec.ChunkSize) { + chunkEnd := chunkStart + int(lsn.job.VRFSpec.ChunkSize) + if chunkEnd > len(ready) { + chunkEnd = len(ready) + } + chunk := ready[chunkStart:chunkEnd] + + var unfulfilled []pendingRequest + alreadyFulfilled, err := lsn.checkReqsFulfilled(ctx, l, chunk) + if errors.Is(err, context.Canceled) { + l.Infow("Context canceled, stopping request processing", "err", err) + return processed + } else if err != nil { + l.Errorw("Error checking for already fulfilled requests, proceeding anyway", "err", err) + } + for i, a := range alreadyFulfilled { + if a { + processed[chunk[i].req.RequestID().String()] = struct{}{} + } else { + unfulfilled = append(unfulfilled, chunk[i]) + } + } + + // All fromAddresses passed to the VRFv2 job have the same KeySpecific-MaxPrice value. + fromAddresses := lsn.fromAddresses() + maxGasPriceWei := lsn.feeCfg.PriceMaxKey(fromAddresses[0]) + observeRequestSimDuration(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, lsn.coordinator.Version(), unfulfilled) + pipelines := lsn.runPipelines(ctx, l, maxGasPriceWei, unfulfilled) + for _, p := range pipelines { + ll := l.With("reqID", p.req.req.RequestID().String(), + "txHash", p.req.req.Raw().TxHash, + "maxGasPrice", maxGasPriceWei.String(), + "fundsNeeded", p.fundsNeeded.String(), + "maxFee", p.maxFee.String(), + "gasLimit", p.gasLimit, + "attempts", p.req.attempts, + "remainingBalance", startBalanceNoReserved.String(), + "consumerAddress", p.req.req.Sender(), + "blockNumber", p.req.req.Raw().BlockNumber, + "blockHash", p.req.req.Raw().BlockHash, + ) + fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.chainID, fromAddresses...) + if err != nil { + l.Errorw("Couldn't get next from address", "err", err) + continue + } + ll = ll.With("fromAddress", fromAddress) + + if p.err != nil { + if errors.Is(p.err, errBlockhashNotInStore{}) { + // Running the blockhash store feeder in backwards mode will be required to + // resolve this. + ll.Criticalw("Pipeline error", "err", p.err) + } else if errors.Is(p.err, errProofVerificationFailed{}) { + // This occurs when the proof reverts in the simulation + // This is almost always (if not always) due to a proof generated with an out-of-date + // blockhash + // we can simply mark as processed and move on, since we will eventually + // process the request with the right blockhash + ll.Infow("proof reverted in simulation, likely stale blockhash") + processed[p.req.req.RequestID().String()] = struct{}{} + } else { + ll.Errorw("Pipeline error", "err", p.err) + + if !subIsActive { + lsn.l.Warnw("Force-fulfilling a request with insufficient funds on a cancelled sub") + etx, err2 := lsn.enqueueForceFulfillment(ctx, p, fromAddress) + if err2 != nil { + ll.Errorw("Error enqueuing force-fulfillment, re-queueing request", "err", err2) + continue + } + ll.Infow("Enqueued force-fulfillment", "ethTxID", etx.ID) + processed[p.req.req.RequestID().String()] = struct{}{} + + // Need to put a continue here, otherwise the next if statement will be hit + // and we'd break out of the loop prematurely. + // If a sub is canceled, we want to force-fulfill ALL of it's pending requests + // before saying we're done with it. + continue + } + + if startBalanceNoReserved.Cmp(p.fundsNeeded) < 0 { + ll.Infow("Insufficient balance to fulfill a request based on estimate, returning", "err", p.err) + return processed + } + + // Ensure consumer is valid, otherwise drop the request. + if !lsn.isConsumerValidAfterFinalityDepthElapsed(ctx, p.req) { + lsn.l.Infow( + "Dropping request that was made by an invalid consumer.", + "consumerAddress", p.req.req.Sender(), + "reqID", p.req.req.RequestID(), + "blockNumber", p.req.req.Raw().BlockNumber, + "blockHash", p.req.req.Raw().BlockHash, + ) + processed[p.req.req.RequestID().String()] = struct{}{} + continue + } + } + continue + } + + if startBalanceNoReserved.Cmp(p.maxFee) < 0 { + // Insufficient funds, have to wait for a user top up. Leave it unprocessed for now + ll.Infow("Insufficient balance to fulfill a request, returning") + return processed + } + + ll.Infow("Enqueuing fulfillment") + var transaction txmgr.Tx + err = lsn.q.Transaction(func(tx pg.Queryer) error { + if err = lsn.pipelineRunner.InsertFinishedRun(p.run, true, pg.WithQueryer(tx)); err != nil { + return err + } + + var maxLink, maxEth *string + tmp := p.maxFee.String() + if p.reqCommitment.NativePayment() { + maxEth = &tmp + } else { + maxLink = &tmp + } + var ( + txMetaSubID *uint64 + txMetaGlobalSubID *string + ) + if lsn.coordinator.Version() == vrfcommon.V2Plus { + txMetaGlobalSubID = ptr(p.req.req.SubID().String()) + } else if lsn.coordinator.Version() == vrfcommon.V2 { + txMetaSubID = ptr(p.req.req.SubID().Uint64()) + } + requestID := common.BytesToHash(p.req.req.RequestID().Bytes()) + coordinatorAddress := lsn.coordinator.Address() + requestTxHash := p.req.req.Raw().TxHash + transaction, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: lsn.coordinator.Address(), + EncodedPayload: hexutil.MustDecode(p.payload), + FeeLimit: p.gasLimit, + Meta: &txmgr.TxMeta{ + RequestID: &requestID, + MaxLink: maxLink, + MaxEth: maxEth, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + RequestTxHash: &requestTxHash, + }, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Checker: txmgr.TransmitCheckerSpec{ + CheckerType: lsn.transmitCheckerType(), + VRFCoordinatorAddress: &coordinatorAddress, + VRFRequestBlockNumber: new(big.Int).SetUint64(p.req.req.Raw().BlockNumber), + }, + }) + return err + }) + if err != nil { + ll.Errorw("Error enqueuing fulfillment, requeuing request", "err", err) + continue + } + ll.Infow("Enqueued fulfillment", "ethTxID", transaction.GetID()) + + // If we successfully enqueued for the txm, subtract that balance + // And loop to attempt to enqueue another fulfillment + startBalanceNoReserved.Sub(startBalanceNoReserved, p.maxFee) + processed[p.req.req.RequestID().String()] = struct{}{} + vrfcommon.IncProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, lsn.coordinator.Version()) + } + } + + return +} + +func (lsn *listenerV2) transmitCheckerType() txmgrtypes.TransmitCheckerType { + if lsn.coordinator.Version() == vrfcommon.V2 { + return txmgr.TransmitCheckerTypeVRFV2 + } + return txmgr.TransmitCheckerTypeVRFV2Plus +} + +func (lsn *listenerV2) processRequestsPerSub( + ctx context.Context, + subID *big.Int, + startLinkBalance *big.Int, + startEthBalance *big.Int, + reqs []pendingRequest, + subIsActive bool, +) map[string]struct{} { + if lsn.job.VRFSpec.BatchFulfillmentEnabled && lsn.batchCoordinator != nil { + return lsn.processRequestsPerSubBatch(ctx, subID, startLinkBalance, startEthBalance, reqs, subIsActive) + } + + var processed = make(map[string]struct{}) + chainId := lsn.chain.Client().ConfiguredChainID() + startBalanceNoReserveLink, err := lsn.MaybeSubtractReservedLink( + ctx, startLinkBalance, chainId, subID, lsn.coordinator.Version()) + if err != nil { + lsn.l.Errorw("Couldn't get reserved PLI for subscription", "sub", reqs[0].req.SubID(), "err", err) + return processed + } + startBalanceNoReserveEth, err := lsn.MaybeSubtractReservedEth( + ctx, startEthBalance, lsn.chainID, subID, lsn.coordinator.Version()) + if err != nil { + lsn.l.Errorw("Couldn't get reserved ETH for subscription", "sub", reqs[0].req.SubID(), "err", err) + return processed + } + + // Split the requests into native and PLI requests. + var ( + nativeRequests []pendingRequest + linkRequests []pendingRequest + ) + for _, req := range reqs { + if req.req.NativePayment() { + if !lsn.inflightCache.Contains(req.req.Raw()) { + nativeRequests = append(nativeRequests, req) + } else { + lsn.l.Debugw("Skipping native request because it is already inflight", + "reqID", req.req.RequestID()) + } + } else { + if !lsn.inflightCache.Contains(req.req.Raw()) { + linkRequests = append(linkRequests, req) + } else { + lsn.l.Debugw("Skipping link request because it is already inflight", + "reqID", req.req.RequestID()) + } + } + } + // process the native and link requests in parallel + var ( + wg sync.WaitGroup + nativeProcessed, linkProcessed map[string]struct{} + ) + wg.Add(2) + go func() { + defer wg.Done() + nativeProcessed = lsn.processRequestsPerSubHelper( + ctx, + subID, + startEthBalance, + startBalanceNoReserveEth, + nativeRequests, + subIsActive, + true) + }() + go func() { + defer wg.Done() + linkProcessed = lsn.processRequestsPerSubHelper( + ctx, + subID, + startLinkBalance, + startBalanceNoReserveLink, + linkRequests, + subIsActive, + false) + }() + wg.Wait() + // combine the native and link processed requests into the processed map + for k, v := range nativeProcessed { + processed[k] = v + } + for k, v := range linkProcessed { + processed[k] = v + } + + return processed +} + +func (lsn *listenerV2) requestCommitmentPayload(requestID *big.Int) (payload []byte, err error) { + if lsn.coordinator.Version() == vrfcommon.V2Plus { + return coordinatorV2PlusABI.Pack("s_requestCommitments", requestID) + } else if lsn.coordinator.Version() == vrfcommon.V2 { + return coordinatorV2ABI.Pack("getCommitment", requestID) + } + return nil, errors.Errorf("unsupported coordinator version: %s", lsn.coordinator.Version()) +} + +// checkReqsFulfilled returns a bool slice the same size of the given reqs slice +// where each slice element indicates whether that request was already fulfilled +// or not. +func (lsn *listenerV2) checkReqsFulfilled(ctx context.Context, l logger.Logger, reqs []pendingRequest) ([]bool, error) { + var ( + start = time.Now() + calls = make([]rpc.BatchElem, len(reqs)) + fulfilled = make([]bool, len(reqs)) + ) + + for i, req := range reqs { + payload, err := lsn.requestCommitmentPayload(req.req.RequestID()) + if err != nil { + // This shouldn't happen + return fulfilled, fmt.Errorf("creating getCommitment payload: %w", err) + } + + reqBlockNumber := new(big.Int).SetUint64(req.req.Raw().BlockNumber) + + // Subtract 5 since the newest block likely isn't indexed yet and will cause "header not + // found" errors. + currBlock := new(big.Int).SetUint64(lsn.getLatestHead() - 5) + m := bigmath.Max(reqBlockNumber, currBlock) + + var result string + calls[i] = rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{ + map[string]interface{}{ + "to": lsn.coordinator.Address(), + "data": hexutil.Bytes(payload), + }, + // The block at which we want to make the call + hexutil.EncodeBig(m), + }, + Result: &result, + } + } + + err := lsn.chain.Client().BatchCallContext(ctx, calls) + if err != nil { + return fulfilled, fmt.Errorf("making batch call: %w", err) + } + + var errs error + for i, call := range calls { + if call.Error != nil { + errs = multierr.Append(errs, fmt.Errorf("checking request %s with hash %s: %w", + reqs[i].req.RequestID().String(), reqs[i].req.Raw().TxHash.String(), call.Error)) + continue + } + + rString, ok := call.Result.(*string) + if !ok { + errs = multierr.Append(errs, + fmt.Errorf("unexpected result %+v on request %s with hash %s", + call.Result, reqs[i].req.RequestID().String(), reqs[i].req.Raw().TxHash.String())) + continue + } + result, err := hexutil.Decode(*rString) + if err != nil { + errs = multierr.Append(errs, + fmt.Errorf("decoding batch call result %+v %s request %s with hash %s: %w", + call.Result, *rString, reqs[i].req.RequestID().String(), reqs[i].req.Raw().TxHash.String(), err)) + continue + } + + if utils.IsEmpty(result) { + l.Infow("Request already fulfilled", + "reqID", reqs[i].req.RequestID().String(), + "attempts", reqs[i].attempts, + "txHash", reqs[i].req.Raw().TxHash) + fulfilled[i] = true + } + } + + l.Debugw("Done checking fulfillment status", + "numChecked", len(reqs), "time", time.Since(start).String()) + return fulfilled, errs +} + +func (lsn *listenerV2) runPipelines( + ctx context.Context, + l logger.Logger, + maxGasPriceWei *assets.Wei, + reqs []pendingRequest, +) []vrfPipelineResult { + var ( + start = time.Now() + results = make([]vrfPipelineResult, len(reqs)) + wg = sync.WaitGroup{} + ) + + for i, req := range reqs { + wg.Add(1) + go func(i int, req pendingRequest) { + defer wg.Done() + results[i] = lsn.simulateFulfillment(ctx, maxGasPriceWei, req, l) + }(i, req) + } + wg.Wait() + + l.Debugw("Finished running pipelines", + "count", len(reqs), "time", time.Since(start).String()) + return results +} + +func (lsn *listenerV2) estimateFee( + ctx context.Context, + req RandomWordsRequested, + maxGasPriceWei *assets.Wei, +) (*big.Int, error) { + // NativePayment() returns true if and only if the version is V2+ and the + // request was made in ETH. + if req.NativePayment() { + return EstimateFeeWei(req.CallbackGasLimit(), maxGasPriceWei.ToInt()) + } + + // In the event we are using PLI we need to estimate the fee in juels + // Don't use up too much time to get this info, it's not critical for operating vrf. + callCtx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + roundData, err := lsn.aggregator.LatestRoundData(&bind.CallOpts{Context: callCtx}) + if err != nil { + return nil, fmt.Errorf("get aggregator latestAnswer: %w", err) + } + + return EstimateFeeJuels( + req.CallbackGasLimit(), + maxGasPriceWei.ToInt(), + roundData.Answer, + ) +} + +// Here we use the pipeline to parse the log, generate a vrf response +// then simulate the transaction at the max gas price to determine its maximum link cost. +func (lsn *listenerV2) simulateFulfillment( + ctx context.Context, + maxGasPriceWei *assets.Wei, + req pendingRequest, + lg logger.Logger, +) vrfPipelineResult { + var ( + res = vrfPipelineResult{req: req} + err error + ) + // estimate how much funds are needed so that we can log it if the simulation fails. + res.fundsNeeded, err = lsn.estimateFee(ctx, req.req, maxGasPriceWei) + if err != nil { + // not critical, just log and continue + lg.Warnw("unable to estimate funds needed for request, continuing anyway", + "reqID", req.req.RequestID(), + "err", err) + res.fundsNeeded = big.NewInt(0) + } + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": lsn.job.ID, + "externalJobID": lsn.job.ExternalJobID, + "name": lsn.job.Name.ValueOrZero(), + "publicKey": lsn.job.VRFSpec.PublicKey[:], + "maxGasPrice": maxGasPriceWei.ToInt().String(), + "evmChainID": lsn.job.VRFSpec.EVMChainID.String(), + }, + "jobRun": map[string]interface{}{ + "logBlockHash": req.req.Raw().BlockHash.Bytes(), + "logBlockNumber": req.req.Raw().BlockNumber, + "logTxHash": req.req.Raw().TxHash, + "logTopics": req.req.Raw().Topics, + "logData": req.req.Raw().Data, + }, + }) + var trrs pipeline.TaskRunResults + res.run, trrs, err = lsn.pipelineRunner.ExecuteRun(ctx, *lsn.job.PipelineSpec, vars, lg) + if err != nil { + res.err = fmt.Errorf("executing run: %w", err) + return res + } + // The call task will fail if there are insufficient funds + if res.run.AllErrors.HasError() { + res.err = errors.WithStack(res.run.AllErrors.ToError()) + + if strings.Contains(res.err.Error(), "blockhash not found in store") { + res.err = multierr.Combine(res.err, errBlockhashNotInStore{}) + } else if isProofVerificationError(res.err.Error()) { + res.err = multierr.Combine(res.err, errProofVerificationFailed{}) + } else if strings.Contains(res.err.Error(), "execution reverted") { + // Even if the simulation fails, we want to get the + // txData for the fulfillRandomWords call, in case + // we need to force fulfill. + for _, trr := range trrs { + if trr.Task.Type() == pipeline.TaskTypeVRFV2 { + if trr.Result.Error != nil { + // error in VRF proof generation + // this means that we won't be able to force-fulfill in the event of a + // canceled sub and active requests. + // since this would be an extraordinary situation, + // we can log loudly here. + lg.Criticalw("failed to generate VRF proof", "err", trr.Result.Error) + break + } + + // extract the abi-encoded tx data to fulfillRandomWords from the VRF task. + // that's all we need in the event of a force-fulfillment. + m := trr.Result.Value.(map[string]any) + res.payload = m["output"].(string) + res.proof = FromV2Proof(m["proof"].(vrf_coordinator_v2.VRFProof)) + res.reqCommitment = NewRequestCommitment(m["requestCommitment"]) + } + } + res.err = multierr.Combine(res.err, errPossiblyInsufficientFunds{}) + } + + return res + } + finalResult := trrs.FinalResult(lg) + if len(finalResult.Values) != 1 { + res.err = errors.Errorf("unexpected number of outputs, expected 1, was %d", len(finalResult.Values)) + return res + } + + // Run succeeded, we expect a byte array representing the billing amount + b, ok := finalResult.Values[0].([]uint8) + if !ok { + res.err = errors.New("expected []uint8 final result") + return res + } + + res.maxFee, err = hex.ParseBig(hexutil.Encode(b)[2:]) + if err != nil { + res.err = err + return res + } + + for _, trr := range trrs { + if trr.Task.Type() == pipeline.TaskTypeVRFV2 { + m := trr.Result.Value.(map[string]interface{}) + res.payload = m["output"].(string) + res.proof = FromV2Proof(m["proof"].(vrf_coordinator_v2.VRFProof)) + res.reqCommitment = NewRequestCommitment(m["requestCommitment"]) + } + + if trr.Task.Type() == pipeline.TaskTypeVRFV2Plus { + m := trr.Result.Value.(map[string]interface{}) + res.payload = m["output"].(string) + res.proof = FromV2PlusProof(m["proof"].(vrf_coordinator_v2plus_interface.IVRFCoordinatorV2PlusInternalProof)) + res.reqCommitment = NewRequestCommitment(m["requestCommitment"]) + } + + if trr.Task.Type() == pipeline.TaskTypeEstimateGasLimit { + res.gasLimit = trr.Result.Value.(uint32) + } + } + return res +} + +func (lsn *listenerV2) fromAddresses() []common.Address { + var addresses []common.Address + for _, a := range lsn.job.VRFSpec.FromAddresses { + addresses = append(addresses, a.Address()) + } + return addresses +} diff --git a/core/services/vrf/v2/listener_v2_test.go b/core/services/vrf/v2/listener_v2_test.go new file mode 100644 index 00000000..70e69a2f --- /dev/null +++ b/core/services/vrf/v2/listener_v2_test.go @@ -0,0 +1,500 @@ +package v2 + +import ( + "encoding/json" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + + "github.com/goplugin/plugin-common/pkg/sqlutil" + clnull "github.com/goplugin/plugin-common/pkg/utils/null" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + evmmocks "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func makeTestTxm(t *testing.T, txStore txmgr.TestEvmTxStore, keyStore keystore.Master) txmgrcommon.TxManager[*big.Int, *evmtypes.Head, common.Address, common.Hash, common.Hash, evmtypes.Nonce, gas.EvmFee] { + _, _, evmConfig := txmgr.MakeTestConfigs(t) + ec := evmtest.NewEthClientMockWithDefaultChain(t) + txmConfig := txmgr.NewEvmTxmConfig(evmConfig) + txm := txmgr.NewEvmTxm(ec.ConfiguredChainID(), txmConfig, evmConfig.Transactions(), keyStore.Eth(), logger.TestLogger(t), nil, nil, + nil, txStore, nil, nil, nil, nil, nil) + + return txm +} + +func MakeTestListenerV2(chain legacyevm.Chain) *listenerV2 { + return &listenerV2{chainID: chain.Client().ConfiguredChainID(), chain: chain} +} + +func txMetaSubIDs(t *testing.T, vrfVersion vrfcommon.Version, subID *big.Int) (*uint64, *string) { + var ( + txMetaSubID *uint64 + txMetaGlobalSubID *string + ) + if vrfVersion == vrfcommon.V2Plus { + txMetaGlobalSubID = ptr(subID.String()) + } else if vrfVersion == vrfcommon.V2 { + txMetaSubID = ptr(subID.Uint64()) + } else { + t.Errorf("unsupported vrf version: %s", vrfVersion) + } + return txMetaSubID, txMetaGlobalSubID +} + +func addEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, state txmgrtypes.TxState, maxLink string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) { + txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID) + b, err := json.Marshal(txmgr.TxMeta{ + MaxLink: &maxLink, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + RequestTxHash: &reqTxHash, + }) + require.NoError(t, err) + meta := sqlutil.JSON(b) + tx := &txmgr.Tx{ + FromAddress: from, + ToAddress: from, + EncodedPayload: []byte(`blah`), + Value: *big.NewInt(0), + FeeLimit: 0, + State: state, + Meta: &meta, + Subject: uuid.NullUUID{}, + ChainID: testutils.SimulatedChainID, + MinConfirmations: clnull.Uint32{Uint32: 0}, + PipelineTaskRunID: uuid.NullUUID{}, + } + err = txStore.InsertTx(tx) + require.NoError(t, err) +} + +func addConfirmedEthTx(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, maxLink string, subID *big.Int, nonce evmtypes.Nonce, vrfVersion vrfcommon.Version) { + txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID) + b, err := json.Marshal(txmgr.TxMeta{ + MaxLink: &maxLink, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + }) + require.NoError(t, err) + meta := sqlutil.JSON(b) + now := time.Now() + + tx := &txmgr.Tx{ + Sequence: &nonce, + FromAddress: from, + ToAddress: from, + EncodedPayload: []byte(`blah`), + Value: *big.NewInt(0), + FeeLimit: 0, + State: txmgrcommon.TxConfirmed, + Meta: &meta, + Subject: uuid.NullUUID{}, + ChainID: testutils.SimulatedChainID, + MinConfirmations: clnull.Uint32{Uint32: 0}, + PipelineTaskRunID: uuid.NullUUID{}, + BroadcastAt: &now, + InitialBroadcastAt: &now, + } + err = txStore.InsertTx(tx) + require.NoError(t, err) +} + +func addEthTxNativePayment(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, state txmgrtypes.TxState, maxNative string, subID *big.Int, reqTxHash common.Hash, vrfVersion vrfcommon.Version) { + txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID) + b, err := json.Marshal(txmgr.TxMeta{ + MaxEth: &maxNative, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + RequestTxHash: &reqTxHash, + }) + require.NoError(t, err) + meta := sqlutil.JSON(b) + tx := &txmgr.Tx{ + FromAddress: from, + ToAddress: from, + EncodedPayload: []byte(`blah`), + Value: *big.NewInt(0), + FeeLimit: 0, + State: state, + Meta: &meta, + Subject: uuid.NullUUID{}, + ChainID: testutils.SimulatedChainID, + MinConfirmations: clnull.Uint32{Uint32: 0}, + PipelineTaskRunID: uuid.NullUUID{}, + } + err = txStore.InsertTx(tx) + require.NoError(t, err) +} + +func addConfirmedEthTxNativePayment(t *testing.T, txStore txmgr.TestEvmTxStore, from common.Address, maxNative string, subID *big.Int, nonce evmtypes.Nonce, vrfVersion vrfcommon.Version) { + txMetaSubID, txMetaGlobalSubID := txMetaSubIDs(t, vrfVersion, subID) + b, err := json.Marshal(txmgr.TxMeta{ + MaxEth: &maxNative, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + }) + require.NoError(t, err) + meta := sqlutil.JSON(b) + now := time.Now() + tx := &txmgr.Tx{ + Sequence: &nonce, + FromAddress: from, + ToAddress: from, + EncodedPayload: []byte(`blah`), + Value: *big.NewInt(0), + FeeLimit: 0, + State: txmgrcommon.TxConfirmed, + Meta: &meta, + Subject: uuid.NullUUID{}, + ChainID: testutils.SimulatedChainID, + MinConfirmations: clnull.Uint32{Uint32: 0}, + PipelineTaskRunID: uuid.NullUUID{}, + BroadcastAt: &now, + InitialBroadcastAt: &now, + } + err = txStore.InsertTx(tx) + require.NoError(t, err) +} + +func testMaybeSubtractReservedLink(t *testing.T, vrfVersion vrfcommon.Version) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + cfg := pgtest.NewQConfig(false) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg) + require.NoError(t, ks.Unlock("blah")) + chainID := testutils.SimulatedChainID + k, err := ks.Eth().Create(chainID) + require.NoError(t, err) + + subID := new(big.Int).SetUint64(1) + reqTxHash := common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8") + + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + }).Toml()) + require.NoError(t, err) + txstore := txmgr.NewTxStore(db, lggr, cfg) + txm := makeTestTxm(t, txstore, ks) + chain := evmmocks.NewChain(t) + chain.On("TxManager").Return(txm) + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + chain: chain, + } + + ctx := testutils.Context(t) + + // Insert an unstarted eth tx with link metadata + addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err := listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + + require.NoError(t, err) + assert.Equal(t, "90000", start.String()) + + // A confirmed tx should not affect the starting balance + addConfirmedEthTx(t, txstore, k.Address, "10000", subID, 1, vrfVersion) + start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + assert.Equal(t, "90000", start.String()) + + // An unconfirmed tx _should_ affect the starting balance. + addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + assert.Equal(t, "80000", start.String()) + + // One subscriber's reserved link should not affect other subscribers prospective balance. + otherSubID := new(big.Int).SetUint64(2) + require.NoError(t, err) + addEthTx(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "80000", start.String()) + + // One key's data should not affect other keys' data in the case of different subscribers. + k2, err := ks.Eth().Create(testutils.SimulatedChainID) + require.NoError(t, err) + + anotherSubID := new(big.Int).SetUint64(3) + addEthTx(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "80000", start.String()) + + // A subscriber's balance is deducted with the link reserved across multiple keys, + // i.e, gas lanes. + addEthTx(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedLink(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "70000", start.String()) +} + +func TestMaybeSubtractReservedLinkV2(t *testing.T) { + testMaybeSubtractReservedLink(t, vrfcommon.V2) +} + +func TestMaybeSubtractReservedLinkV2Plus(t *testing.T) { + testMaybeSubtractReservedLink(t, vrfcommon.V2Plus) +} + +func testMaybeSubtractReservedNative(t *testing.T, vrfVersion vrfcommon.Version) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + cfg := pgtest.NewQConfig(false) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg) + require.NoError(t, ks.Unlock("blah")) + chainID := testutils.SimulatedChainID + k, err := ks.Eth().Create(chainID) + require.NoError(t, err) + + subID := new(big.Int).SetUint64(1) + reqTxHash := common.HexToHash("0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8") + + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + }).Toml()) + require.NoError(t, err) + txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + txm := makeTestTxm(t, txstore, ks) + require.NoError(t, err) + chain := evmmocks.NewChain(t) + chain.On("TxManager").Return(txm) + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + chain: chain, + } + + ctx := testutils.Context(t) + + // Insert an unstarted eth tx with native metadata + addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err := listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + + require.NoError(t, err) + assert.Equal(t, "90000", start.String()) + + // A confirmed tx should not affect the starting balance + addConfirmedEthTxNativePayment(t, txstore, k.Address, "10000", subID, 1, vrfVersion) + start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + assert.Equal(t, "90000", start.String()) + + // An unconfirmed tx _should_ affect the starting balance. + addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + assert.Equal(t, "80000", start.String()) + + // One subscriber's reserved native should not affect other subscribers prospective balance. + otherSubID := new(big.Int).SetUint64(2) + require.NoError(t, err) + addEthTxNativePayment(t, txstore, k.Address, txmgrcommon.TxUnstarted, "10000", otherSubID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "80000", start.String()) + + // One key's data should not affect other keys' data in the case of different subscribers. + k2, err := ks.Eth().Create(testutils.SimulatedChainID) + require.NoError(t, err) + + anotherSubID := new(big.Int).SetUint64(3) + addEthTxNativePayment(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", anotherSubID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "80000", start.String()) + + // A subscriber's balance is deducted with the native reserved across multiple keys, + // i.e, gas lanes. + addEthTxNativePayment(t, txstore, k2.Address, txmgrcommon.TxUnstarted, "10000", subID, reqTxHash, vrfVersion) + start, err = listener.MaybeSubtractReservedEth(ctx, big.NewInt(100_000), chainID, subID, vrfVersion) + require.NoError(t, err) + require.Equal(t, "70000", start.String()) +} + +func TestMaybeSubtractReservedNativeV2Plus(t *testing.T) { + testMaybeSubtractReservedNative(t, vrfcommon.V2Plus) +} + +func TestMaybeSubtractReservedNativeV2(t *testing.T) { + db := pgtest.NewSqlxDB(t) + lggr := logger.TestLogger(t) + cfg := pgtest.NewQConfig(false) + ks := keystore.NewInMemory(db, utils.FastScryptParams, lggr, cfg) + require.NoError(t, ks.Unlock("blah")) + chainID := testutils.SimulatedChainID + subID := new(big.Int).SetUint64(1) + + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + }).Toml()) + require.NoError(t, err) + txstore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + txm := makeTestTxm(t, txstore, ks) + chain := evmmocks.NewChain(t) + chain.On("TxManager").Return(txm).Maybe() + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + chain: chain, + } + // returns error because native payment is not supported for V2 + start, err := listener.MaybeSubtractReservedEth(testutils.Context(t), big.NewInt(100_000), chainID, subID, vrfcommon.V2) + require.NoError(t, err) + assert.Equal(t, big.NewInt(0), start) +} + +func TestListener_GetConfirmedAt(t *testing.T) { + j, err := vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 10, + }).Toml()) + require.NoError(t, err) + + listener := &listenerV2{ + respCount: map[string]uint64{}, + job: j, + } + + // Requester asks for 100 confirmations, we have a delay of 10, + // so we should wait for max(nodeMinConfs, requestedConfs + requestedConfsDelay) = 110 confirmations + nodeMinConfs := 10 + confirmedAt := listener.getConfirmedAt(NewV2RandomWordsRequested(&vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + MinimumRequestConfirmations: 100, + Raw: types.Log{ + BlockNumber: 100, + }, + }), uint32(nodeMinConfs)) + require.Equal(t, uint64(210), confirmedAt) // log block number + # of confirmations + + // Requester asks for 100 confirmations, we have a delay of 0, + // so we should wait for max(nodeMinConfs, requestedConfs + requestedConfsDelay) = 100 confirmations + j, err = vrfcommon.ValidatedVRFSpec(testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{ + RequestedConfsDelay: 0, + }).Toml()) + require.NoError(t, err) + listener.job = j + confirmedAt = listener.getConfirmedAt(NewV2RandomWordsRequested(&vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + MinimumRequestConfirmations: 100, + Raw: types.Log{ + BlockNumber: 100, + }, + }), uint32(nodeMinConfs)) + require.Equal(t, uint64(200), confirmedAt) // log block number + # of confirmations +} + +func TestListener_Backoff(t *testing.T) { + var tests = []struct { + name string + initial time.Duration + max time.Duration + last time.Duration + retries int + expected bool + }{ + { + name: "Backoff disabled, ready", + expected: true, + }, + { + name: "First try, ready", + initial: time.Minute, + max: time.Hour, + last: 0, + retries: 0, + expected: true, + }, + { + name: "Second try, not ready", + initial: time.Minute, + max: time.Hour, + last: 59 * time.Second, + retries: 1, + expected: false, + }, + { + name: "Second try, ready", + initial: time.Minute, + max: time.Hour, + last: 61 * time.Second, // Last try was over a minute ago + retries: 1, + expected: true, + }, + { + name: "Third try, not ready", + initial: time.Minute, + max: time.Hour, + last: 77 * time.Second, // Slightly less than backoffFactor * initial + retries: 2, + expected: false, + }, + { + name: "Third try, ready", + initial: time.Minute, + max: time.Hour, + last: 79 * time.Second, // Slightly more than backoffFactor * initial + retries: 2, + expected: true, + }, + { + name: "Max, not ready", + initial: time.Minute, + max: time.Hour, + last: 59 * time.Minute, // Slightly less than max + retries: 900, + expected: false, + }, + { + name: "Max, ready", + initial: time.Minute, + max: time.Hour, + last: 61 * time.Minute, // Slightly more than max + retries: 900, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lsn := &listenerV2{job: job.Job{ + VRFSpec: &job.VRFSpec{ + BackoffInitialDelay: test.initial, + BackoffMaxDelay: test.max, + }, + }} + + req := pendingRequest{ + confirmedAtBlock: 5, + attempts: test.retries, + lastTry: time.Now().Add(-test.last), + } + + require.Equal(t, test.expected, lsn.ready(req, 10)) + }) + } +} diff --git a/core/services/vrf/v2/listener_v2_types.go b/core/services/vrf/v2/listener_v2_types.go new file mode 100644 index 00000000..3c8454b4 --- /dev/null +++ b/core/services/vrf/v2/listener_v2_types.go @@ -0,0 +1,324 @@ +package v2 + +import ( + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + heaps "github.com/theodesp/go-heaps" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +type errPossiblyInsufficientFunds struct{} + +func (errPossiblyInsufficientFunds) Error() string { + return "Simulation errored, possibly insufficient funds. Request will remain unprocessed until funds are available" +} + +type errBlockhashNotInStore struct{} + +func (errBlockhashNotInStore) Error() string { + return "Blockhash not in store" +} + +type errProofVerificationFailed struct{} + +func (errProofVerificationFailed) Error() string { + return "Proof verification failed" +} + +type fulfilledReqV2 struct { + blockNumber uint64 + reqID string +} + +func (a fulfilledReqV2) Compare(b heaps.Item) int { + a1 := a + a2 := b.(fulfilledReqV2) + switch { + case a1.blockNumber > a2.blockNumber: + return 1 + case a1.blockNumber < a2.blockNumber: + return -1 + default: + return 0 + } +} + +type pendingRequest struct { + confirmedAtBlock uint64 + req RandomWordsRequested + utcTimestamp time.Time + + // used for exponential backoff when retrying + attempts int + lastTry time.Time +} + +type vrfPipelineResult struct { + err error + // maxFee indicates how much juels (link) or wei (ether) would be paid for the VRF request + // if it were to be fulfilled at the maximum gas price (i.e gas lane gas price). + maxFee *big.Int + // fundsNeeded indicates a "minimum balance" in juels or wei that must be held in the + // subscription's account in order to fulfill the request. + fundsNeeded *big.Int + run *pipeline.Run + payload string + gasLimit uint32 + req pendingRequest + proof VRFProof + reqCommitment RequestCommitment +} + +// batchFulfillment contains all the information needed in order to +// perform a batch fulfillment operation on the batch VRF coordinator. +type batchFulfillment struct { + proofs []VRFProof + commitments []RequestCommitment + totalGasLimit uint32 + runs []*pipeline.Run + reqIDs []*big.Int + maxFees []*big.Int + txHashes []common.Hash + fromAddress common.Address + version vrfcommon.Version +} + +func newBatchFulfillment(result vrfPipelineResult, fromAddress common.Address, version vrfcommon.Version) *batchFulfillment { + return &batchFulfillment{ + proofs: []VRFProof{ + result.proof, + }, + commitments: []RequestCommitment{ + result.reqCommitment, + }, + totalGasLimit: result.gasLimit, + runs: []*pipeline.Run{ + result.run, + }, + reqIDs: []*big.Int{ + result.req.req.RequestID(), + }, + maxFees: []*big.Int{ + result.maxFee, + }, + txHashes: []common.Hash{ + result.req.req.Raw().TxHash, + }, + fromAddress: fromAddress, + version: version, + } +} + +// batchFulfillments manages many batchFulfillment objects. +// It makes organizing many runs into batches that respect the +// batchGasLimit easy via the addRun method. +type batchFulfillments struct { + fulfillments []*batchFulfillment + batchGasLimit uint32 + currIndex int + version vrfcommon.Version +} + +func newBatchFulfillments(batchGasLimit uint32, version vrfcommon.Version) *batchFulfillments { + return &batchFulfillments{ + fulfillments: []*batchFulfillment{}, + batchGasLimit: batchGasLimit, + currIndex: 0, + version: version, + } +} + +// addRun adds the given run to an existing batch, or creates a new +// batch if the batchGasLimit that has been configured was exceeded. +func (b *batchFulfillments) addRun(result vrfPipelineResult, fromAddress common.Address) { + if len(b.fulfillments) == 0 { + b.fulfillments = append(b.fulfillments, newBatchFulfillment(result, fromAddress, b.version)) + } else { + currBatch := b.fulfillments[b.currIndex] + if (currBatch.totalGasLimit + result.gasLimit) >= b.batchGasLimit { + // don't add to curr batch, add new batch and increment index + b.fulfillments = append(b.fulfillments, newBatchFulfillment(result, fromAddress, b.version)) + b.currIndex++ + } else { + // we're okay on gas, add to current batch + currBatch.proofs = append(currBatch.proofs, result.proof) + currBatch.commitments = append(currBatch.commitments, result.reqCommitment) + currBatch.totalGasLimit += result.gasLimit + currBatch.runs = append(currBatch.runs, result.run) + currBatch.reqIDs = append(currBatch.reqIDs, result.req.req.RequestID()) + currBatch.maxFees = append(currBatch.maxFees, result.maxFee) + currBatch.txHashes = append(currBatch.txHashes, result.req.req.Raw().TxHash) + } + } +} + +func (lsn *listenerV2) processBatch( + l logger.Logger, + subID *big.Int, + startBalanceNoReserveLink *big.Int, + maxCallbackGasLimit uint32, + batch *batchFulfillment, + fromAddress common.Address, +) (processedRequestIDs []string) { + start := time.Now() + ctx, cancel := lsn.chStop.NewCtx() + defer cancel() + + // Enqueue a single batch tx for requests that we're able to fulfill based on whether + // they passed simulation or not. + var ( + payload []byte + err error + txMetaSubID *uint64 + txMetaGlobalSubID *string + ) + + if batch.version == vrfcommon.V2 { + payload, err = batchCoordinatorV2ABI.Pack("fulfillRandomWords", ToV2Proofs(batch.proofs), ToV2Commitments(batch.commitments)) + if err != nil { + // should never happen + l.Errorw("Failed to pack batch fulfillRandomWords payload", + "err", err, "proofs", batch.proofs, "commitments", batch.commitments) + return + } + txMetaSubID = ptr(subID.Uint64()) + } else if batch.version == vrfcommon.V2Plus { + payload, err = batchCoordinatorV2PlusABI.Pack("fulfillRandomWords", ToV2PlusProofs(batch.proofs), ToV2PlusCommitments(batch.commitments)) + if err != nil { + // should never happen + l.Errorw("Failed to pack batch fulfillRandomWords payload", + "err", err, "proofs", batch.proofs, "commitments", batch.commitments) + return + } + txMetaGlobalSubID = ptr(subID.String()) + } else { + panic("batch version should be v2 or v2plus") + } + + // Bump the total gas limit by a bit so that we account for the overhead of the batch + // contract's calling. + totalGasLimitBumped := batchFulfillmentGasEstimate( + uint64(len(batch.proofs)), + maxCallbackGasLimit, + float64(lsn.job.VRFSpec.BatchFulfillmentGasMultiplier), + ) + + ll := l.With("numRequestsInBatch", len(batch.reqIDs), + "requestIDs", batch.reqIDs, + "batchSumGasLimit", batch.totalGasLimit, + "fromAddress", fromAddress, + "linkBalance", startBalanceNoReserveLink, + "totalGasLimitBumped", totalGasLimitBumped, + "gasMultiplier", lsn.job.VRFSpec.BatchFulfillmentGasMultiplier, + ) + ll.Info("Enqueuing batch fulfillment") + var ethTX txmgr.Tx + err = lsn.q.Transaction(func(tx pg.Queryer) error { + if err = lsn.pipelineRunner.InsertFinishedRuns(batch.runs, true, pg.WithQueryer(tx)); err != nil { + return fmt.Errorf("inserting finished pipeline runs: %w", err) + } + + maxLink, maxEth := accumulateMaxLinkAndMaxEth(batch) + var ( + txHashes []common.Hash + reqIDHashes []common.Hash + ) + copy(txHashes, batch.txHashes) + for _, reqID := range batch.reqIDs { + reqIDHashes = append(reqIDHashes, common.BytesToHash(reqID.Bytes())) + } + ethTX, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: lsn.batchCoordinator.Address(), + EncodedPayload: payload, + FeeLimit: totalGasLimitBumped, + Strategy: txmgrcommon.NewSendEveryStrategy(), + Meta: &txmgr.TxMeta{ + RequestIDs: reqIDHashes, + MaxLink: &maxLink, + MaxEth: &maxEth, + SubID: txMetaSubID, + GlobalSubID: txMetaGlobalSubID, + RequestTxHashes: txHashes, + }, + }) + if err != nil { + return fmt.Errorf("create batch fulfillment eth transaction: %w", err) + } + + return nil + }) + if err != nil { + ll.Errorw("Error enqueuing batch fulfillments, requeuing requests", "err", err) + return + } + ll.Infow("Enqueued fulfillment", "ethTxID", ethTX.GetID()) + + // mark requests as processed since the fulfillment has been successfully enqueued + // to the txm. + for _, reqID := range batch.reqIDs { + processedRequestIDs = append(processedRequestIDs, reqID.String()) + vrfcommon.IncProcessedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, vrfcommon.V2) + } + + ll.Infow("Successfully enqueued batch", "duration", time.Since(start)) + + return +} + +// getReadyAndExpired filters out requests that are expired from the given pendingRequest slice +// and returns requests that are ready for processing. +func (lsn *listenerV2) getReadyAndExpired(l logger.Logger, reqs []pendingRequest) (ready []pendingRequest, expired []string) { + for _, req := range reqs { + // Check if we can ignore the request due to its age. + if time.Now().UTC().Sub(req.utcTimestamp) >= lsn.job.VRFSpec.RequestTimeout { + l.Infow("Request too old, dropping it", + "reqID", req.req.RequestID().String(), + "txHash", req.req.Raw().TxHash) + expired = append(expired, req.req.RequestID().String()) + vrfcommon.IncDroppedReqs(lsn.job.Name.ValueOrZero(), lsn.job.ExternalJobID, vrfcommon.V2, vrfcommon.ReasonAge) + continue + } + // we always check if the requests are already fulfilled prior to trying to fulfill them again + ready = append(ready, req) + } + return +} + +func batchFulfillmentGasEstimate( + batchSize uint64, + maxCallbackGasLimit uint32, + gasMultiplier float64, +) uint32 { + return uint32( + gasMultiplier * float64((uint64(maxCallbackGasLimit)+400_000)+batchSize*BatchFulfillmentIterationGasCost), + ) +} + +func accumulateMaxLinkAndMaxEth(batch *batchFulfillment) (maxLinkStr string, maxEthStr string) { + maxLink := big.NewInt(0) + maxEth := big.NewInt(0) + for i := range batch.commitments { + if batch.commitments[i].VRFVersion == vrfcommon.V2 { + // v2 always bills in link + maxLink.Add(maxLink, batch.maxFees[i]) + } else { + // v2plus can bill in link or eth, depending on the commitment + if batch.commitments[i].NativePayment() { + maxEth.Add(maxEth, batch.maxFees[i]) + } else { + maxLink.Add(maxLink, batch.maxFees[i]) + } + } + } + return maxLink.String(), maxEth.String() +} diff --git a/core/services/vrf/v2/listener_v2_types_test.go b/core/services/vrf/v2/listener_v2_types_test.go new file mode 100644 index 00000000..539408dc --- /dev/null +++ b/core/services/vrf/v2/listener_v2_types_test.go @@ -0,0 +1,92 @@ +package v2 + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" +) + +func Test_BatchFulfillments_AddRun(t *testing.T) { + batchLimit := uint32(2500) + bfs := newBatchFulfillments(batchLimit, vrfcommon.V2) + fromAddress := testutils.NewAddress() + for i := 0; i < 4; i++ { + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: NewV2RandomWordsRequested(&vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + Raw: types.Log{ + TxHash: common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }), + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }, fromAddress) + require.Len(t, bfs.fulfillments, 1) + } + + require.Equal(t, uint32(2000), bfs.fulfillments[0].totalGasLimit) + + // This addition should create and add a new batch + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: NewV2RandomWordsRequested(&vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested{ + RequestId: big.NewInt(1), + Raw: types.Log{ + TxHash: common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }), + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }, fromAddress) + require.Len(t, bfs.fulfillments, 2) +} + +func Test_BatchFulfillments_AddRun_V2Plus(t *testing.T) { + batchLimit := uint32(2500) + bfs := newBatchFulfillments(batchLimit, vrfcommon.V2Plus) + fromAddress := testutils.NewAddress() + for i := 0; i < 4; i++ { + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: NewV2_5RandomWordsRequested(&vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested{ + RequestId: big.NewInt(1), + Raw: types.Log{ + TxHash: common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }), + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }, fromAddress) + require.Len(t, bfs.fulfillments, 1) + } + + require.Equal(t, uint32(2000), bfs.fulfillments[0].totalGasLimit) + + // This addition should create and add a new batch + bfs.addRun(vrfPipelineResult{ + gasLimit: 500, + req: pendingRequest{ + req: NewV2_5RandomWordsRequested(&vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested{ + RequestId: big.NewInt(1), + Raw: types.Log{ + TxHash: common.HexToHash("0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65"), + }, + }), + }, + run: pipeline.NewRun(pipeline.Spec{}, pipeline.Vars{}), + }, fromAddress) + require.Len(t, bfs.fulfillments, 2) +} diff --git a/core/services/vrf/v2/reverted_txns.go b/core/services/vrf/v2/reverted_txns.go new file mode 100644 index 00000000..75613636 --- /dev/null +++ b/core/services/vrf/v2/reverted_txns.go @@ -0,0 +1,722 @@ +package v2 + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pkg/errors" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + evmclient "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type ( + TxnReceiptDB struct { + TxHash common.Hash `db:"tx_hash"` + EVMReceipt evmtypes.Receipt `db:"receipt"` + FromAddress common.Address `db:"from_address"` + ToAddress common.Address `db:"to_address"` + EncodedPayload hexutil.Bytes `db:"encoded_payload"` + GasLimit uint64 `db:"gas_limit"` + SubID uint64 `db:"sub_id"` + RequestID string `db:"request_id"` + RequestTxHash string `db:"request_tx_hash"` + ForceFulfillmentAttempt uint64 `db:"force_fulfillment_attempt"` + } + + RevertedVRFTxn struct { + DBReceipt TxnReceiptDB + IsBatchReq bool + Proof vrf_coordinator_v2.VRFProof + Commitment vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment + } +) + +var ReqScanTimeRangeInDB = "1 hour" + +func (lsn *listenerV2) runRevertedTxnsHandler(pollPeriod time.Duration) { + pollPeriod = pollPeriod + time.Second*3 + tick := time.NewTicker(pollPeriod) + defer tick.Stop() + ctx, cancel := lsn.chStop.NewCtx() + defer cancel() + for { + select { + case <-lsn.chStop: + return + case <-tick.C: + lsn.handleRevertedTxns(ctx, pollPeriod) + } + } +} + +func (lsn *listenerV2) handleRevertedTxns(ctx context.Context, pollPeriod time.Duration) { + lsn.l.Infow("Handling reverted txns") + + // Fetch recent single and batch txns, that have not been force-fulfilled + recentSingleTxns, err := lsn.fetchRecentSingleTxns(ctx, lsn.q, lsn.chainID.Uint64(), pollPeriod) + if err != nil { + lsn.l.Fatalw("Fetch recent txns", "err", err) + } + recentBatchTxns, err := lsn.fetchRecentBatchTxns(ctx, lsn.q, lsn.chainID.Uint64(), pollPeriod) + if err != nil { + lsn.l.Fatalw("Fetch recent batch txns", "err", err) + } + recentForceFulfillmentTxns, err := lsn.fetchRevertedForceFulfilmentTxns(ctx, lsn.q, lsn.chainID.Uint64(), pollPeriod) + if err != nil { + lsn.l.Fatalw("Fetch recent reverted force-fulfillment txns", "err", err) + } + recentTxns := make([]TxnReceiptDB, 0) + if len(recentSingleTxns) > 0 { + recentTxns = append(recentTxns, recentSingleTxns...) + } + if len(recentBatchTxns) > 0 { + recentTxns = append(recentTxns, recentBatchTxns...) + } + if len(recentForceFulfillmentTxns) > 0 { + recentTxns = append(recentTxns, recentForceFulfillmentTxns...) + } + + // Query RPC using TransactionByHash to get the transaction object + revertedTxns := lsn.filterRevertedTxns(ctx, recentTxns) + + // Extract calldata of function call from transaction object + for _, revertedTxn := range revertedTxns { + // Pass that to txm to create a new tx for force fulfillment + _, err := lsn.enqueueForceFulfillmentForRevertedTxn(ctx, revertedTxn) + if err != nil { + lsn.l.Errorw("Enqueue force fulfilment", "err", err) + } + } +} + +func (lsn *listenerV2) fetchRecentSingleTxns(ctx context.Context, + q pg.Q, + chainID uint64, + pollPeriod time.Duration) ([]TxnReceiptDB, error) { + + // (state = 'confirmed' OR state = 'unconfirmed') + sqlQuery := fmt.Sprintf(` + WITH already_ff as ( + SELECT meta->>'RequestID' as request_id + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'ForceFulfilled' is NOT NULL + ), txes AS ( + SELECT * + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'SubId' IS NOT NULL + AND meta->>'RequestID' IS NOT NULL + AND meta->>'ForceFulfilled' is NULL + AND meta->>'RequestID' NOT IN (SELECT request_id FROM already_ff) + ), attempts AS ( + SELECT * + FROM evm.tx_attempts + WHERE eth_tx_id IN (SELECT id FROM txes) + ), receipts AS ( + SELECT * + FROM evm.receipts + WHERE tx_hash IN (SELECT hash FROM attempts) + AND receipt->>'status' = '0x0' + ) + SELECT r.tx_hash, + r.receipt, + t.from_address, + t.to_address, + t.encoded_payload, + t.gas_limit, + t.meta->>'SubId' as sub_id, + t.meta->>'RequestID' as request_id, + t.meta->>'RequestTxHash' as request_tx_hash + FROM receipts r + INNER JOIN attempts a ON r.tx_hash = a.hash + INNER JOIN txes t ON a.eth_tx_id = t.id + `, ReqScanTimeRangeInDB, ReqScanTimeRangeInDB) + var recentReceipts []TxnReceiptDB + + before := time.Now() + err := q.Select(&recentReceipts, sqlQuery, chainID) + lsn.postSqlLog(ctx, before, pollPeriod, "FetchRecentSingleTxns") + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, errors.Wrap(err, "Error fetching recent non-force-fulfilled txns") + } + + recentReceipts = unique(recentReceipts) + lsn.l.Infow("finished querying for recently reverting single fulfillments", + "count", len(recentReceipts), + ) + for _, r := range recentReceipts { + lsn.l.Infow("found reverted fulfillment", "requestID", r.RequestID, "fulfillmentTxHash", r.TxHash.String()) + } + return recentReceipts, nil +} + +func (lsn *listenerV2) fetchRecentBatchTxns(ctx context.Context, + q pg.Q, + chainID uint64, + pollPeriod time.Duration) ([]TxnReceiptDB, error) { + sqlQuery := fmt.Sprintf(` + WITH already_ff as ( + SELECT meta->>'RequestID' as request_id + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'ForceFulfilled' is NOT NULL + ), txes AS ( + SELECT * + FROM ( + SELECT * + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'SubId' IS NOT NULL + AND meta->>'RequestIDs' IS NOT NULL + AND meta->>'ForceFulfilled' IS NULL + ) AS eth_txes1 + WHERE (meta->'RequestIDs' ?| (SELECT ARRAY_AGG(request_id) FROM already_ff)) IS NOT TRUE + ), attempts AS ( + SELECT * + FROM evm.tx_attempts + WHERE eth_tx_id IN (SELECT id FROM txes) + ), receipts AS ( + SELECT * + FROM evm.receipts + WHERE tx_hash IN (SELECT hash FROM attempts) + ) + SELECT r.tx_hash, + r.receipt, + t.from_address, + t.to_address, + t.encoded_payload, + t.gas_limit, + t.meta->>'SubId' as sub_id + FROM receipts r + INNER JOIN attempts a ON r.tx_hash = a.hash + INNER JOIN txes t ON a.eth_tx_id = t.id + `, ReqScanTimeRangeInDB, ReqScanTimeRangeInDB) + var recentReceipts []TxnReceiptDB + + before := time.Now() + err := q.Select(&recentReceipts, sqlQuery, chainID) + lsn.postSqlLog(ctx, before, pollPeriod, "FetchRecentBatchTxns") + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, errors.Wrap(err, "Error fetching recent non-force-fulfilled txns") + } + + recentReceipts = unique(recentReceipts) + lsn.l.Infow("finished querying for recent batch fulfillments", + "count", len(recentReceipts), + ) + return recentReceipts, nil +} + +func (lsn *listenerV2) fetchRevertedForceFulfilmentTxns(ctx context.Context, + q pg.Q, + chainID uint64, + pollPeriod time.Duration) ([]TxnReceiptDB, error) { + + sqlQuery := fmt.Sprintf(` + WITH txes AS ( + SELECT * + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'SubId' IS NOT NULL + AND meta->>'RequestID' IS NOT NULL + AND meta->>'ForceFulfilled' is NOT NULL + ), attempts AS ( + SELECT * + FROM evm.tx_attempts + WHERE eth_tx_id IN (SELECT id FROM txes) + ), receipts AS ( + SELECT * + FROM evm.receipts + WHERE tx_hash IN (SELECT hash FROM attempts) + AND receipt->>'status' = '0x0' + ) + SELECT r.tx_hash, + r.receipt, + t.from_address, + t.to_address, + t.encoded_payload, + t.gas_limit, + t.meta->>'SubId' as sub_id, + t.meta->>'RequestID' as request_id, + t.meta->>'RequestTxHash' as request_tx_hash, + CAST(COALESCE(t.meta->>'ForceFulfillmentAttempt', '0') AS INT) as force_fulfillment_attempt + FROM receipts r + INNER JOIN attempts a ON r.tx_hash = a.hash + INNER JOIN txes t ON a.eth_tx_id = t.id + `, ReqScanTimeRangeInDB) + var recentReceipts []TxnReceiptDB + + before := time.Now() + err := q.Select(&recentReceipts, sqlQuery, chainID) + lsn.postSqlLog(ctx, before, pollPeriod, "FetchRevertedForceFulfilmentTxns") + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, errors.Wrap(err, "Error fetching recent reverted force-fulfilled txns") + } + + sqlQueryAll := fmt.Sprintf(` + WITH txes AS ( + SELECT * + FROM evm.txes + WHERE created_at >= NOW() - interval '%s' + AND evm_chain_id = $1 + AND meta->>'SubId' IS NOT NULL + AND meta->>'RequestID' IS NOT NULL + AND meta->>'ForceFulfilled' is NOT NULL + ), attempts AS ( + SELECT * + FROM evm.tx_attempts + WHERE eth_tx_id IN (SELECT id FROM txes) + ) + SELECT a.hash as tx_hash, + t.meta->>'SubId' as sub_id, + t.meta->>'RequestID' as request_id, + CAST(COALESCE(t.meta->>'ForceFulfillmentAttempt', '0') AS INT) as force_fulfillment_attempt + FROM attempts a + INNER JOIN txes t ON a.eth_tx_id = t.id + `, ReqScanTimeRangeInDB) + var allReceipts []TxnReceiptDB + before = time.Now() + err = q.Select(&allReceipts, sqlQueryAll, chainID) + lsn.postSqlLog(ctx, before, pollPeriod, "Fetch all ForceFulfilment Txns") + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, errors.Wrap(err, "Error fetching all recent force-fulfilled txns") + } + + recentReceipts = UniqueByReqID(recentReceipts, allReceipts) + + lsn.l.Infow("finished querying for recently reverting reverted force-fulfillment txns", + "count", len(recentReceipts), + ) + for _, r := range recentReceipts { + lsn.l.Infow("found reverted force-fulfillment txn", "requestID", r.RequestID, + "fulfillmentTxHash", r.TxHash.String(), + "ForceFulfillmentAttempt", r.ForceFulfillmentAttempt) + } + return unique(recentReceipts), nil +} + +func unique(rs []TxnReceiptDB) (res []TxnReceiptDB) { + if len(rs) == 0 { + return + } + exists := make(map[string]bool) + res = make([]TxnReceiptDB, 0) + for _, r := range rs { + if _, ok := exists[r.TxHash.Hex()]; ok { + continue + } + res = append(res, r) + exists[r.TxHash.Hex()] = true + } + return res +} + +func UniqueByReqID(revertedForceTxns []TxnReceiptDB, allForceTxns []TxnReceiptDB) (res []TxnReceiptDB) { + if len(revertedForceTxns) == 0 { + return + } + + // Load all force fulfillment txns into a map + // allForceTxns would have successful, reverted and pending force fulfillment txns + allForceTxnsMap := make(map[string]TxnReceiptDB) + for _, r := range allForceTxns { + if existingReceipt, ok := allForceTxnsMap[r.RequestID]; ok { + // Get the latest force fulfillment attempt for a given RequestID + if existingReceipt.ForceFulfillmentAttempt < r.ForceFulfillmentAttempt { + allForceTxnsMap[r.RequestID] = r + } + continue + } + allForceTxnsMap[r.RequestID] = r + } + + // Deduplicate reverted force fulfillment txns and skip/ignore reverted + // force-fulfillment txns which have a pending force-fulfillment retry + revertedForceTxnsMap := make(map[string]TxnReceiptDB) + res = make([]TxnReceiptDB, 0) + for _, forceTxn := range revertedForceTxns { + // If there is a pending force fulfilment without a receipt yet, skip force-fulfilling it now again, until a txn receipt + // This prevents a race between this Custom-VRF-Reverted-Txns-Pipeline and TransactionManager + if receipt, ok := allForceTxnsMap[forceTxn.RequestID]; ok && receipt.ForceFulfillmentAttempt > forceTxn.ForceFulfillmentAttempt { + continue + } + if existingReceipt, ok := revertedForceTxnsMap[forceTxn.RequestID]; ok { + // Get the latest force fulfillment attempt for a given RequestID + if existingReceipt.ForceFulfillmentAttempt < forceTxn.ForceFulfillmentAttempt { + revertedForceTxnsMap[forceTxn.RequestID] = forceTxn + } + continue + } + revertedForceTxnsMap[forceTxn.RequestID] = forceTxn + } + + // Load the deduplicated map into a list and return + for _, r := range revertedForceTxnsMap { + res = append(res, r) + } + return res +} + +// postSqlLog logs about context cancellation and timing after a query returns. +// Queries which use their full timeout log critical level. More than 50% log error, and 10% warn. +func (lsn *listenerV2) postSqlLog(ctx context.Context, begin time.Time, pollPeriod time.Duration, queryName string) { + elapsed := time.Since(begin) + if ctx.Err() != nil { + lsn.l.Debugw("SQL context canceled", "ms", elapsed.Milliseconds(), "err", ctx.Err(), "sql", queryName) + } + + timeout := lsn.q.QueryTimeout + if timeout <= 0 { + timeout = pollPeriod + } + + pct := float64(elapsed) / float64(timeout) + pct *= 100 + + kvs := []any{"ms", elapsed.Milliseconds(), + "timeout", timeout.Milliseconds(), + "percent", strconv.FormatFloat(pct, 'f', 1, 64), + "sql", queryName} + + if elapsed >= timeout { + lsn.l.Criticalw("ExtremelySlowSQLQuery", kvs...) + } else if errThreshold := timeout / 5; errThreshold > 0 && elapsed > errThreshold { + lsn.l.Errorw("VerySlowSQLQuery", kvs...) + } else if warnThreshold := timeout / 10; warnThreshold > 0 && elapsed > warnThreshold { + lsn.l.Warnw("SlowSQLQuery", kvs...) + } else { + lsn.l.Infow("SQLQueryLatency", kvs...) + } +} + +func (lsn *listenerV2) filterRevertedTxns(ctx context.Context, + recentReceipts []TxnReceiptDB) []RevertedVRFTxn { + + revertedVRFTxns := make([]RevertedVRFTxn, 0) + for _, txnReceipt := range recentReceipts { + switch txnReceipt.ToAddress.Hex() { + case lsn.vrfOwner.Address().Hex(): + fallthrough + case lsn.coordinator.Address().Hex(): + // Filter Single VRF Fulfilment + revertedVRFTxn, err := lsn.filterSingleRevertedTxn(ctx, txnReceipt) + if err != nil { + lsn.l.Errorw("Filter reverted single fulfillment txn", "Err", err) + continue + } + // Revert reason is not insufficient balance + if revertedVRFTxn == nil { + continue + } + revertedVRFTxns = append(revertedVRFTxns, *revertedVRFTxn) + case lsn.batchCoordinator.Address().Hex(): + // Filter Batch VRF Fulfilment + revertedBatchVRFTxns, err := lsn.filterBatchRevertedTxn(ctx, txnReceipt) + if err != nil { + lsn.l.Errorw("Filter batchfulfilment with reverted txns", "Err", err) + continue + } + // No req in the batch txn with insufficient balance revert reason + if len(revertedBatchVRFTxns) == 0 { + continue + } + revertedVRFTxns = append(revertedVRFTxns, revertedBatchVRFTxns...) + default: + // Unrecognised Txn + lsn.l.Warnw("Unrecognised txn in VRF-Reverted-Pipeline", + "ToAddress", txnReceipt.ToAddress.Hex(), + ) + } + } + + lsn.l.Infow("Reverted VRF fulfilment txns due to InsufficientBalance", + "count", len(revertedVRFTxns), + "reverted_txns", revertedVRFTxns, + ) + for _, r := range revertedVRFTxns { + lsn.l.Infow("Reverted VRF fulfilment txns due to InsufficientBalance", + "RequestID", r.DBReceipt.RequestID, + "TxnStoreEVMReceipt.BlockHash", r.DBReceipt.EVMReceipt.BlockHash.String(), + "TxnStoreEVMReceipt.BlockNumber", r.DBReceipt.EVMReceipt.BlockNumber.String(), + "VRFFulfillmentTxHash", r.DBReceipt.TxHash.String()) + } + return revertedVRFTxns +} + +func (lsn *listenerV2) filterSingleRevertedTxn(ctx context.Context, + txnReceiptDB TxnReceiptDB) ( + *RevertedVRFTxn, error) { + + requestID := common.HexToHash(txnReceiptDB.RequestID).Big() + commitment, err := lsn.coordinator.GetCommitment(&bind.CallOpts{Context: ctx}, requestID) + if err != nil { + // Not able to get commitment from chain RPC node, continue + lsn.l.Errorw("Force-fulfilment of single reverted txns: Not able to get commitment from chain RPC node", "err", err) + } else if utils.IsEmpty(commitment[:]) { + // VRF request already fulfilled, return + return nil, nil + } + lsn.l.Infow("Single reverted txn: Unfulfilled req", "req", requestID.String()) + + // Get txn object from RPC node + ethClient := lsn.chain.Client() + tx, err := ethClient.TransactionByHash(ctx, txnReceiptDB.TxHash) + if err != nil { + return nil, errors.Wrap(err, "get_txn_by_hash") + } + + // Simulate txn to get revert error + call := ethereum.CallMsg{ + From: txnReceiptDB.FromAddress, + To: &txnReceiptDB.ToAddress, + Data: tx.Data(), // txnReceiptDB.EncodedPayload, + Gas: txnReceiptDB.GasLimit, + GasPrice: tx.GasPrice(), + } + _, rpcError := ethClient.CallContract(ctx, call, txnReceiptDB.EVMReceipt.BlockNumber) + if rpcError == nil { + return nil, fmt.Errorf("error fetching revert reason %v: %v", txnReceiptDB.TxHash, err) + } + revertErr, err := evmclient.ExtractRPCError(rpcError) + lsn.l.Infow("InsufficientBalRevertedTxn", + "RawRevertData", rpcError, + "ParsedRevertData", revertErr.Data, + "ParsingErr", err, + ) + if err != nil { + return nil, fmt.Errorf("reverted_txn_reason_parse_err: %v", err) + } + revertErrDataStr := "" + revertErrDataBytes := []byte{} + if revertErr.Data != nil { + revertErrDataStr = revertErr.Data.(string) + revertErrDataStr = strings.Replace(revertErrDataStr, "Reverted ", "", 1) + // If force fulfillment txn reverts on chain due to getFeedData not falling back + // to MAXINT256 due to stalenessSeconds criteria not satisfying + revertErrDataBytes = common.FromHex(revertErrDataStr) + } + insufficientErr := coordinatorV2ABI.Errors["InsufficientBalance"].ID.Bytes()[0:4] + // Revert reason may not be accurately determined from all RPC nodes and may + // not work in some chains + if len(revertErrDataStr) > 0 && !bytes.Equal(revertErrDataBytes[0:4], insufficientErr) { + return nil, nil + } + // If reached maximum number of retries for force fulfillment + if txnReceiptDB.ForceFulfillmentAttempt >= 15 { + return nil, nil + } + + // Get VRF fulfillment proof and commitment from tx object + txData := txnReceiptDB.EncodedPayload + if len(txData) <= 4 { + return nil, fmt.Errorf("invalid_txn_data_for_tx: %s", tx.Hash().String()) + } + callData := txData[4:] // Remove first 4 bytes of function signature + unpacked, err := coordinatorV2ABI.Methods["fulfillRandomWords"].Inputs.Unpack(callData) + if err != nil { + return nil, fmt.Errorf("invalid_txn_data_for_tx_pack: %s, err %v", tx.Hash().String(), err) + } + proof := abi.ConvertType(unpacked[0], new(vrf_coordinator_v2.VRFProof)).(*vrf_coordinator_v2.VRFProof) + reqCommitment := abi.ConvertType(unpacked[1], new(vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment)).(*vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) + return &RevertedVRFTxn{ + DBReceipt: txnReceiptDB, + IsBatchReq: false, + Proof: *proof, + Commitment: *reqCommitment}, nil +} + +func (lsn *listenerV2) filterBatchRevertedTxn(ctx context.Context, + txnReceiptDB TxnReceiptDB) ( + []RevertedVRFTxn, error) { + if len(txnReceiptDB.EncodedPayload) <= 4 { + return nil, fmt.Errorf("invalid encodedPayload: %v", hexutil.Encode(txnReceiptDB.EncodedPayload)) + } + unpackedInputs, err := batchCoordinatorV2ABI.Methods["fulfillRandomWords"].Inputs.Unpack(txnReceiptDB.EncodedPayload[4:]) + if err != nil { + return nil, errors.Wrap(err, "cannot_unpack_batch_txn") + } + proofs := abi.ConvertType(unpackedInputs[0], new([]vrf_coordinator_v2.VRFProof)).(*[]vrf_coordinator_v2.VRFProof) + reqCommitments := abi.ConvertType(unpackedInputs[1], new([]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment)).(*[]vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment) + + proofReqIDs := make([]common.Hash, 0) + keyHash := lsn.job.VRFSpec.PublicKey.MustHash() + for _, proof := range *proofs { + payload, err := evmutils.ABIEncode(`[{"type":"bytes32"},{"type":"uint256"}]`, keyHash, proof.Seed) + if err != nil { + return nil, fmt.Errorf("ABI Encode Error: (err %v), (keyHash %v), (prood: %v)", err, keyHash, proof.Seed) + } + requestIDOfProof := common.BytesToHash(crypto.Keccak256(payload)) + proofReqIDs = append(proofReqIDs, requestIDOfProof) + } + + // BatchVRFCoordinatorV2 + revertedTxns := make([]RevertedVRFTxn, 0) + for _, log := range txnReceiptDB.EVMReceipt.Logs { + if log.Topics[0] != batchCoordinatorV2ABI.Events["RawErrorReturned"].ID { + continue + } + + // Extract revert reason for individual req in batch txn + unpacked, err := batchCoordinatorV2ABI.Events["RawErrorReturned"].Inputs.Unpack(log.Data) + if err != nil { + lsn.l.Errorw("cannot_unpack_batch_coordinator_log", "err", err) + continue + } + lowLevelData := unpacked[0].([]byte) + if !bytes.Equal(lowLevelData, coordinatorV2ABI.Errors["InsufficientBalance"].ID.Bytes()[0:4]) { + continue + } + + // Match current log to a (proof, commitment) pair from rawTxData using requestID + requestID := log.Topics[1] + var curProof vrf_coordinator_v2.VRFProof + var curReqCommitment vrf_coordinator_v2.VRFCoordinatorV2RequestCommitment + found := false + for i, proof := range *proofs { + requestIDOfProof := proofReqIDs[i] + if requestID == requestIDOfProof { + found = true + curProof = proof + curReqCommitment = (*reqCommitments)[i] + break + } + } + + if found { + commitment, err := lsn.coordinator.GetCommitment(&bind.CallOpts{Context: ctx}, requestID.Big()) + if err != nil { + // Not able to get commitment from chain RPC node, continue + lsn.l.Errorw("Force-fulfilment of batch reverted txns: Not able to get commitment from chain RPC node", + "err", err, + "requestID", requestID.Big()) + } else if utils.IsEmpty(commitment[:]) { + lsn.l.Infow("Batch fulfillment with initial reverted fulfillment txn and later successful fulfillment, Skipping", "req", requestID.String()) + continue + } + lsn.l.Infow("Batch fulfillment with reverted fulfillment txn", "req", requestID.String()) + revertedTxn := RevertedVRFTxn{ + DBReceipt: TxnReceiptDB{ + TxHash: txnReceiptDB.TxHash, + EVMReceipt: txnReceiptDB.EVMReceipt, + FromAddress: txnReceiptDB.FromAddress, + SubID: txnReceiptDB.SubID, + RequestID: requestID.Hex(), + }, + IsBatchReq: true, + Proof: curProof, + Commitment: curReqCommitment, + } + revertedTxns = append(revertedTxns, revertedTxn) + } else { + lsn.l.Criticalw("Reverted Batch fulfilment requestID from log does not have proof in req EncodedPayload", + "requestIDFromLog", requestID.Big().Int64(), + ) + } + } + return revertedTxns, nil +} + +// enqueueForceFulfillment enqueues a forced fulfillment through the +// VRFOwner contract. It estimates gas again on the transaction due +// to the extra steps taken within VRFOwner.fulfillRandomWords. +func (lsn *listenerV2) enqueueForceFulfillmentForRevertedTxn( + ctx context.Context, + revertedTxn RevertedVRFTxn, +) (etx txmgr.Tx, err error) { + if lsn.job.VRFSpec.VRFOwnerAddress == nil { + return txmgr.Tx{}, errors.New("vrf_owner_not_set_in_job_spec") + } + + proof := revertedTxn.Proof + reqCommitment := revertedTxn.Commitment + + fromAddresses := lsn.fromAddresses() + fromAddress, err := lsn.gethks.GetRoundRobinAddress(lsn.chainID, fromAddresses...) + if err != nil { + return txmgr.Tx{}, errors.Wrap(err, "failed_to_get_vrf_listener_from_address") + } + + // fulfill the request through the VRF owner + lsn.l.Infow("VRFOwner.fulfillRandomWords vs. VRFCoordinatorV2.fulfillRandomWords", + "vrf_owner.fulfillRandomWords", hexutil.Encode(vrfOwnerABI.Methods["fulfillRandomWords"].ID), + "vrf_coordinator_v2.fulfillRandomWords", hexutil.Encode(coordinatorV2ABI.Methods["fulfillRandomWords"].ID), + ) + + vrfOwnerAddress1 := lsn.vrfOwner.Address() + vrfOwnerAddressSpec := lsn.job.VRFSpec.VRFOwnerAddress.Address() + lsn.l.Infow("addresses diff", "wrapper_address", vrfOwnerAddress1, "spec_address", vrfOwnerAddressSpec) + + txData, err := vrfOwnerABI.Pack("fulfillRandomWords", proof, reqCommitment) + if err != nil { + return txmgr.Tx{}, errors.Wrap(err, "abi pack VRFOwner.fulfillRandomWords") + } + vrfOwnerCoordinator, _ := lsn.vrfOwner.GetVRFCoordinator(nil) + lsn.l.Infow("RevertedTxnForceFulfilment EstimatingGas", + "EncodedPayload", hexutil.Encode(txData), + "VRFOwnerCoordinator", vrfOwnerCoordinator.String(), + ) + ethClient := lsn.chain.Client() + estimateGasLimit, err := ethClient.EstimateGas(ctx, ethereum.CallMsg{ + From: fromAddress, + To: &vrfOwnerAddressSpec, + Data: txData, + }) + if err != nil { + return txmgr.Tx{}, errors.Wrap(err, "failed to estimate gas on VRFOwner.fulfillRandomWords") + } + estimateGasLimit = uint64(1.4 * float64(estimateGasLimit)) + + lsn.l.Infow("Estimated gas limit on force fulfillment", "estimateGasLimit", estimateGasLimit) + + reqID := common.BytesToHash(hexutil.MustDecode(revertedTxn.DBReceipt.RequestID)) + var reqTxHash common.Hash + if revertedTxn.DBReceipt.RequestTxHash != "" { + reqTxHash = common.BytesToHash(hexutil.MustDecode(revertedTxn.DBReceipt.RequestTxHash)) + } + lsn.l.Infow("RevertedTxnForceFulfilment CreateTransaction", + "RequestID", revertedTxn.DBReceipt.RequestID, + "RequestTxHash", revertedTxn.DBReceipt.RequestTxHash, + ) + forceFulfiled := true + forceFulfillmentAttempt := revertedTxn.DBReceipt.ForceFulfillmentAttempt + 1 + etx, err = lsn.chain.TxManager().CreateTransaction(ctx, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: lsn.vrfOwner.Address(), + EncodedPayload: txData, + FeeLimit: uint32(estimateGasLimit), + Strategy: txmgrcommon.NewSendEveryStrategy(), + Meta: &txmgr.TxMeta{ + RequestID: &reqID, + SubID: &revertedTxn.DBReceipt.SubID, + RequestTxHash: &reqTxHash, + ForceFulfilled: &forceFulfiled, + ForceFulfillmentAttempt: &forceFulfillmentAttempt, + // No max link since simulation failed + }, + }) + return etx, err +} diff --git a/core/services/vrf/vrfcommon/inflight_cache.go b/core/services/vrf/vrfcommon/inflight_cache.go new file mode 100644 index 00000000..e5bd0e88 --- /dev/null +++ b/core/services/vrf/vrfcommon/inflight_cache.go @@ -0,0 +1,85 @@ +package vrfcommon + +import ( + "sync" + + "github.com/ethereum/go-ethereum/core/types" +) + +type InflightCache interface { + Add(lg types.Log) + Contains(lg types.Log) bool + Size() int +} + +var _ InflightCache = (*inflightCache)(nil) + +const cachePruneInterval = 1000 + +type inflightCache struct { + // cache stores the logs whose fulfillments are currently in flight or already fulfilled. + cache map[logKey]struct{} + + // lookback defines how long state should be kept for. Logs included in blocks older than + // lookback may or may not be redelivered. + lookback int + + // lastPruneHeight is the blockheight at which logs were last pruned. + lastPruneHeight uint64 + + // mu synchronizes access to the delivered map. + mu sync.RWMutex +} + +func NewInflightCache(lookback int) InflightCache { + return &inflightCache{ + cache: make(map[logKey]struct{}), + lookback: lookback, + mu: sync.RWMutex{}, + } +} + +func (c *inflightCache) Size() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *inflightCache) Add(lg types.Log) { + c.mu.Lock() + defer c.mu.Unlock() // unlock in the last defer, so that we hold the lock when pruning. + defer c.prune(lg.BlockNumber) + + c.cache[logKey{ + blockHash: lg.BlockHash, + blockNumber: lg.BlockNumber, + logIndex: lg.Index, + }] = struct{}{} +} + +func (c *inflightCache) Contains(lg types.Log) bool { + c.mu.RLock() + defer c.mu.RUnlock() + + _, ok := c.cache[logKey{ + blockHash: lg.BlockHash, + blockNumber: lg.BlockNumber, + logIndex: lg.Index, + }] + return ok +} + +func (c *inflightCache) prune(logBlock uint64) { + // Only prune every pruneInterval blocks + if int(logBlock)-int(c.lastPruneHeight) < cachePruneInterval { + return + } + + for key := range c.cache { + if int(key.blockNumber) < int(logBlock)-c.lookback { + delete(c.cache, key) + } + } + + c.lastPruneHeight = logBlock +} diff --git a/core/services/vrf/vrfcommon/log_dedupe.go b/core/services/vrf/vrfcommon/log_dedupe.go new file mode 100644 index 00000000..3130f8f0 --- /dev/null +++ b/core/services/vrf/vrfcommon/log_dedupe.go @@ -0,0 +1,89 @@ +package vrfcommon + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// pruneInterval is the interval in blocks at which to prune old data from the delivered set. +const pruneInterval = 100 + +func NewLogDeduper(lookback int) *LogDeduper { + return &LogDeduper{ + delivered: make(map[logKey]struct{}), + lookback: lookback, + } +} + +// LogDeduper prevents duplicate logs from being reprocessed. +type LogDeduper struct { + // delivered is the set of logs within the lookback that have already been delivered. + delivered map[logKey]struct{} + + // lookback defines how long state should be kept for. Logs included in blocks older than + // lookback may or may not be redelivered. + lookback int + + // lastPruneHeight is the blockheight at which logs were last pruned. + lastPruneHeight uint64 + + // mu synchronizes access to the delivered map. + mu sync.Mutex +} + +// logKey represents uniquely identifying information for a single log broadcast. +type logKey struct { + + // blockHash of the block the log was included in. + blockHash common.Hash + + // blockNumber of the block the log was included in. This is necessary to prune old logs. + blockNumber uint64 + + // logIndex of the log in the block. + logIndex uint +} + +func (l *LogDeduper) ShouldDeliver(log types.Log) bool { + l.mu.Lock() + defer l.mu.Unlock() // unlock in the last defer, so that we hold the lock when pruning. + defer l.Prune(log.BlockNumber) + + key := logKey{ + blockHash: log.BlockHash, + blockNumber: log.BlockNumber, + logIndex: log.Index, + } + + if _, ok := l.delivered[key]; ok { + return false + } + + l.delivered[key] = struct{}{} + return true +} + +func (l *LogDeduper) Prune(logBlock uint64) { + // Only prune every pruneInterval blocks + if int(logBlock)-int(l.lastPruneHeight) < pruneInterval { + return + } + + for key := range l.delivered { + if int(key.blockNumber) < int(logBlock)-l.lookback { + delete(l.delivered, key) + } + } + + l.lastPruneHeight = logBlock +} + +// Clear clears the log deduper's internal cache. +func (l *LogDeduper) Clear() { + l.mu.Lock() + defer l.mu.Unlock() + + l.delivered = make(map[logKey]struct{}) +} diff --git a/core/services/vrf/vrfcommon/log_dedupe_test.go b/core/services/vrf/vrfcommon/log_dedupe_test.go new file mode 100644 index 00000000..f606b95e --- /dev/null +++ b/core/services/vrf/vrfcommon/log_dedupe_test.go @@ -0,0 +1,183 @@ +package vrfcommon + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +func TestLogDeduper(t *testing.T) { + tests := []struct { + name string + logs []types.Log + results []bool + }{ + { + name: "dupe", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + }, + results: []bool{true, false}, + }, + { + name: "different block number", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + { + BlockNumber: 11, + BlockHash: common.Hash{0x2}, + Index: 3, + }, + }, + results: []bool{true, true}, + }, + { + name: "same block number different hash", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x2}, + Index: 3, + }, + }, + results: []bool{true, true}, + }, + { + name: "same block number same hash different index", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 4, + }, + }, + results: []bool{true, true}, + }, + { + name: "same block number same hash different index", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x1}, + Index: 4, + }, + }, + results: []bool{true, true}, + }, + { + name: "multiple blocks with dupes", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 4, + }, + { + BlockNumber: 11, + BlockHash: common.Hash{0x11}, + Index: 0, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 3, + }, + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 4, + }, + { + BlockNumber: 12, + BlockHash: common.Hash{0x12}, + Index: 1, + }, + }, + results: []bool{true, true, true, false, false, true}, + }, + { + name: "prune", + logs: []types.Log{ + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 3, + }, + { + BlockNumber: 11, + BlockHash: common.Hash{0x11}, + Index: 11, + }, + { + BlockNumber: 1015, + BlockHash: common.Hash{0x1, 0x1, 0x5}, + Index: 0, + }, + // Now the logs at blocks 10 and 11 should be pruned, and therefore redelivered. + // The log at block 115 should not be redelivered. + { + BlockNumber: 10, + BlockHash: common.Hash{0x10}, + Index: 3, + }, + { + BlockNumber: 11, + BlockHash: common.Hash{0x11}, + Index: 11, + }, + { + BlockNumber: 1015, + BlockHash: common.Hash{0x1, 0x1, 0x5}, + Index: 0, + }, + }, + results: []bool{true, true, true, true, true, false}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + deduper := NewLogDeduper(100) + + for i := range test.logs { + require.Equal(t, test.results[i], deduper.ShouldDeliver(test.logs[i]), + "expected shouldDeliver for log %d to be %t", i, test.results[i]) + } + }) + } +} diff --git a/core/services/vrf/vrfcommon/metrics.go b/core/services/vrf/vrfcommon/metrics.go new file mode 100644 index 00000000..cd64b5f1 --- /dev/null +++ b/core/services/vrf/vrfcommon/metrics.go @@ -0,0 +1,94 @@ +package vrfcommon + +import ( + "time" + + "github.com/google/uuid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// version describes a VRF version. +type Version string + +const ( + V1 Version = "V1" + V2 Version = "V2" + V2Plus Version = "V2Plus" +) + +// dropReason describes a reason why a VRF request is dropped from the queue. +type dropReason string + +const ( + // ReasonMailboxSize describes when a VRF request is dropped due to the log mailbox being + // over capacity. + ReasonMailboxSize dropReason = "mailbox_size" + + // ReasonAge describes when a VRF request is dropped due to its age. + ReasonAge dropReason = "age" +) + +var ( + MetricQueueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "vrf_request_queue_size", + Help: "The number of VRF requests currently in the in-memory queue.", + }, []string{"job_name", "external_job_id", "vrf_version"}) + + MetricProcessedReqs = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vrf_processed_request_count", + Help: "The number of VRF requests processed.", + }, []string{"job_name", "external_job_id", "vrf_version"}) + + MetricDroppedRequests = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vrf_dropped_request_count", + Help: "The number of VRF requests dropped due to reasons such as expiry or mailbox size.", + }, []string{"job_name", "external_job_id", "vrf_version", "drop_reason"}) + + MetricDupeRequests = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "vrf_duplicate_requests", + Help: "The number of times the VRF listener receives duplicate requests, which could indicate a reorg.", + }, []string{"job_name", "external_job_id", "vrf_version"}) + + MetricTimeBetweenSims = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "vrf_request_time_between_sims", + Help: "How long a VRF request sits in the in-memory queue in between simulation attempts.", + Buckets: []float64{ + float64(time.Second), + float64(30 * time.Second), + float64(time.Minute), + float64(2 * time.Minute), + float64(5 * time.Minute), + }, + }, []string{"job_name", "external_job_id", "vrf_version"}) + + MetricTimeUntilInitialSim = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "vrf_request_time_until_initial_sim", + Help: "How long a VRF request sits in the in-memory queue until it gets simulated for the first time.", + Buckets: []float64{ + float64(time.Second), + float64(30 * time.Second), + float64(time.Minute), + float64(2 * time.Minute), + float64(5 * time.Minute), + }, + }, []string{"job_name", "external_job_id", "vrf_version"}) +) + +func UpdateQueueSize(jobName string, extJobID uuid.UUID, vrfVersion Version, size int) { + MetricQueueSize.WithLabelValues(jobName, extJobID.String(), string(vrfVersion)). + Set(float64(size)) +} + +func IncProcessedReqs(jobName string, extJobID uuid.UUID, vrfVersion Version) { + MetricProcessedReqs.WithLabelValues(jobName, extJobID.String(), string(vrfVersion)).Inc() +} + +func IncDroppedReqs(jobName string, extJobID uuid.UUID, vrfVersion Version, reason dropReason) { + MetricDroppedRequests.WithLabelValues( + jobName, extJobID.String(), string(vrfVersion), string(reason)).Inc() +} + +func IncDupeReqs(jobName string, extJobID uuid.UUID, vrfVersion Version) { + MetricDupeRequests.WithLabelValues(jobName, extJobID.String(), string(vrfVersion)).Inc() +} diff --git a/core/services/vrf/vrfcommon/types.go b/core/services/vrf/vrfcommon/types.go new file mode 100644 index 00000000..00922d79 --- /dev/null +++ b/core/services/vrf/vrfcommon/types.go @@ -0,0 +1,27 @@ +package vrfcommon + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" +) + +type GethKeyStore interface { + GetRoundRobinAddress(chainID *big.Int, addresses ...common.Address) (common.Address, error) +} + +//go:generate mockery --quiet --name Config --output ../mocks/ --case=underscore +type Config interface { + FinalityDepth() uint32 + MinIncomingConfirmations() uint32 +} + +//go:generate mockery --quiet --name FeeConfig --output ../mocks/ --case=underscore +type FeeConfig interface { + LimitDefault() uint32 + LimitJobType() config.LimitJobType + PriceMaxKey(addr common.Address) *assets.Wei +} diff --git a/core/services/vrf/vrfcommon/utils.go b/core/services/vrf/vrfcommon/utils.go new file mode 100644 index 00000000..eeeb3e73 --- /dev/null +++ b/core/services/vrf/vrfcommon/utils.go @@ -0,0 +1,78 @@ +package vrfcommon + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" +) + +type RespCountEntry struct { + RequestID string + Count int +} + +func GetRespCounts(ctx context.Context, txm txmgr.TxManager, chainID *big.Int, confirmedBlockNum int64) ( + []RespCountEntry, + error, +) { + counts := []RespCountEntry{} + metaField := "RequestID" + states := []txmgrtypes.TxState{txmgrcommon.TxUnconfirmed, txmgrcommon.TxUnstarted, txmgrcommon.TxInProgress} + // Search for txes with a non-null meta field in the provided states + unconfirmedTxes, err := txm.FindTxesWithMetaFieldByStates(ctx, metaField, states, chainID) + if err != nil { + return nil, errors.Wrap(err, "getRespCounts failed due to error in FindTxesWithMetaFieldByStates") + } + // Fetch completed transactions only as far back as the given confirmedBlockNum. This avoids + // a table scan of the whole table, which could be large if it is unpruned. + var confirmedTxes []*txmgr.Tx + confirmedTxes, err = txm.FindTxesWithMetaFieldByReceiptBlockNum(ctx, metaField, confirmedBlockNum, chainID) + if err != nil { + return nil, errors.Wrap(err, "getRespCounts failed due to error in FindTxesWithMetaFieldByReceiptBlockNum") + } + txes := DedupeTxList(append(unconfirmedTxes, confirmedTxes...)) + respCountMap := make(map[string]int) + // Consolidate the number of txes for each meta RequestID + for _, tx := range txes { + var meta *txmgrtypes.TxMeta[common.Address, common.Hash] + meta, err = tx.GetMeta() + if err != nil { + return nil, errors.Wrap(err, "getRespCounts failed parsing tx meta field") + } + if meta != nil && meta.RequestID != nil { + requestId := meta.RequestID.String() + if _, exists := respCountMap[requestId]; !exists { + respCountMap[requestId] = 0 + } + respCountMap[requestId]++ + } + } + + // Parse response count map into output + for key, value := range respCountMap { + respCountEntry := RespCountEntry{ + RequestID: key, + Count: value, + } + counts = append(counts, respCountEntry) + } + return counts, nil +} + +func DedupeTxList(txes []*txmgr.Tx) []*txmgr.Tx { + txIdMap := make(map[string]bool) + dedupedTxes := []*txmgr.Tx{} + for _, tx := range txes { + if _, found := txIdMap[tx.GetID()]; !found { + txIdMap[tx.GetID()] = true + dedupedTxes = append(dedupedTxes, tx) + } + } + return dedupedTxes +} diff --git a/core/services/vrf/vrfcommon/validate.go b/core/services/vrf/vrfcommon/validate.go new file mode 100644 index 00000000..3cc6b464 --- /dev/null +++ b/core/services/vrf/vrfcommon/validate.go @@ -0,0 +1,101 @@ +package vrfcommon + +import ( + "bytes" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" +) + +var ( + ErrKeyNotSet = errors.New("key not set") +) + +func ValidatedVRFSpec(tomlString string) (job.Job, error) { + var jb = job.Job{ + ExternalJobID: uuid.New(), // Default to generating a uuid, can be overwritten by the specified one in tomlString. + } + + tree, err := toml.Load(tomlString) + if err != nil { + return jb, errors.Wrap(err, "toml error on load") + } + + err = tree.Unmarshal(&jb) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on spec") + } + if jb.Type != job.VRF { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + var spec job.VRFSpec + err = tree.Unmarshal(&spec) + if err != nil { + return jb, errors.Wrap(err, "toml unmarshal error on job") + } + + var empty secp256k1.PublicKey + if bytes.Equal(spec.PublicKey[:], empty[:]) { + return jb, errors.Wrap(ErrKeyNotSet, "publicKey") + } + if spec.CoordinatorAddress.String() == "" { + return jb, errors.Wrap(ErrKeyNotSet, "coordinatorAddress") + } + if spec.RequestedConfsDelay < 0 { + return jb, errors.Wrap(ErrKeyNotSet, "requestedConfsDelay must be >= 0") + } + // If a request timeout is not provided set it to a reasonable default. + if spec.RequestTimeout == 0 { + spec.RequestTimeout = 24 * time.Hour + } + + if spec.BatchFulfillmentEnabled && spec.BatchCoordinatorAddress == nil { + return jb, errors.Wrap(ErrKeyNotSet, "batch coordinator address must be provided if batchFulfillmentEnabled = true") + } + + if spec.BatchFulfillmentGasMultiplier <= 0 { + spec.BatchFulfillmentGasMultiplier = 1.15 + } + + if spec.ChunkSize == 0 { + spec.ChunkSize = 20 + } + + if spec.BackoffMaxDelay < spec.BackoffInitialDelay { + return jb, fmt.Errorf("backoff max delay (%s) cannot be less than backoff initial delay (%s)", + spec.BackoffMaxDelay.String(), spec.BackoffInitialDelay.String()) + } + + if spec.GasLanePrice != nil && spec.GasLanePrice.Cmp(assets.GWei(0)) <= 0 { + return jb, fmt.Errorf("gasLanePrice must be positive, given: %s", spec.GasLanePrice.String()) + } + + var foundVRFTask bool + for _, t := range jb.Pipeline.Tasks { + if t.Type() == pipeline.TaskTypeVRF || t.Type() == pipeline.TaskTypeVRFV2 || t.Type() == pipeline.TaskTypeVRFV2Plus { + foundVRFTask = true + } + + if t.Type() == pipeline.TaskTypeVRFV2 || t.Type() == pipeline.TaskTypeVRFV2Plus { + if len(spec.FromAddresses) == 0 { + return jb, errors.Wrap(ErrKeyNotSet, "fromAddreses needs to have a non-zero length") + } + } + } + if !foundVRFTask { + return jb, errors.Wrapf(ErrKeyNotSet, "invalid pipeline, expected a vrf task") + } + + jb.VRFSpec = &spec + + return jb, nil +} diff --git a/core/services/vrf/vrfcommon/validate_test.go b/core/services/vrf/vrfcommon/validate_test.go new file mode 100644 index 00000000..ed21864e --- /dev/null +++ b/core/services/vrf/vrfcommon/validate_test.go @@ -0,0 +1,595 @@ +package vrfcommon + +import ( + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +func TestValidateVRFJobSpec(t *testing.T) { + var tt = []struct { + name string + toml string + assertion func(t *testing.T, os job.Job, err error) + }{ + { + name: "valid spec", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +vrfOwnerAddress = "0x2a0d386f122851dc5AFBE45cb2E8411CE255b000" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + require.NotNil(t, s.VRFSpec) + assert.Equal(t, uint32(10), s.VRFSpec.MinIncomingConfirmations) + assert.Equal(t, "0xB3b7874F13387D44a3398D298B075B7A3505D8d4", s.VRFSpec.CoordinatorAddress.String()) + assert.Equal(t, "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179800", s.VRFSpec.PublicKey.String()) + assert.Equal(t, "0x2a0d386f122851dc5AFBE45cb2E8411CE255b000", s.VRFSpec.VRFOwnerAddress.String()) + require.Equal(t, 168*time.Hour, s.VRFSpec.RequestTimeout) + require.Equal(t, time.Minute, s.VRFSpec.BackoffInitialDelay) + require.Equal(t, 2*time.Hour, s.VRFSpec.BackoffMaxDelay) + require.EqualValues(t, 25, s.VRFSpec.ChunkSize) + }, + }, + { + name: "missing pubkey", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + require.True(t, errors.Is(ErrKeyNotSet, errors.Cause(err))) + }, + }, + { + name: "missing fromAddresses", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrfv2 + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" + `, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + require.True(t, errors.Is(ErrKeyNotSet, errors.Cause(err))) + }, + }, + { + name: "missing coordinator address", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + require.True(t, errors.Is(ErrKeyNotSet, errors.Cause(err))) + }, + }, + { + name: "jobID override default", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + assert.Equal(t, s.ExternalJobID.String(), "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46") + }, + }, + { + name: "no requested confs delay", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, int64(0), os.VRFSpec.RequestedConfsDelay) + }, + }, + { + name: "with requested confs delay", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, int64(10), os.VRFSpec.RequestedConfsDelay) + }, + }, + { + name: "negative (illegal) requested confs delay", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = -10 + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "no request timeout provided, sets default of 1 day", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, 24*time.Hour, os.VRFSpec.RequestTimeout) + }, + }, + { + name: "request timeout provided, uses that", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + requestTimeout = "168h" # 7 days + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, 7*24*time.Hour, os.VRFSpec.RequestTimeout) + }, + }, + { + name: "batch fulfillment enabled, no batch coordinator address", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + batchFulfillmentEnabled = true + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "batch fulfillment enabled, batch coordinator address provided", + toml: ` + type = "vrf" + schemaVersion = 1 + minIncomingConfirmations = 10 + requestedConfsDelay = 10 + batchFulfillmentEnabled = true + batchCoordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] + submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] + decode_log->vrf->encode_tx->submit_tx + """ + `, + assertion: func(t *testing.T, os job.Job, err error) { + require.NoError(t, err) + require.Equal(t, "0xB3b7874F13387D44a3398D298B075B7A3505D8d4", os.VRFSpec.BatchCoordinatorAddress.String()) + }, + }, + { + name: "initial delay must be <= max delay, invalid", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1h" +backoffMaxDelay = "30m" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "gas lane price provided", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +gasLanePrice = "200 gwei" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + require.NotNil(t, s.VRFSpec) + assert.Equal(t, uint32(10), s.VRFSpec.MinIncomingConfirmations) + assert.Equal(t, "0xB3b7874F13387D44a3398D298B075B7A3505D8d4", s.VRFSpec.CoordinatorAddress.String()) + assert.Equal(t, "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179800", s.VRFSpec.PublicKey.String()) + require.Equal(t, 168*time.Hour, s.VRFSpec.RequestTimeout) + require.Equal(t, time.Minute, s.VRFSpec.BackoffInitialDelay) + require.Equal(t, 2*time.Hour, s.VRFSpec.BackoffMaxDelay) + require.EqualValues(t, 25, s.VRFSpec.ChunkSize) + require.Equal(t, assets.GWei(200), s.VRFSpec.GasLanePrice) + }, + }, + { + name: "invalid (negative) gas lane price provided", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +gasLanePrice = "-200" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + }, + }, + { + name: "invalid (zero) gas lane price gwei provided", + toml: ` +type = "vrf" +schemaVersion = 1 +minIncomingConfirmations = 10 +publicKey = "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" +coordinatorAddress = "0xB3b7874F13387D44a3398D298B075B7A3505D8d4" +requestTimeout = "168h" # 7 days +chunkSize = 25 +backoffInitialDelay = "1m" +backoffMaxDelay = "2h" +gasLanePrice = "0 gwei" +observationSource = """ +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx +""" +`, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + }, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + s, err := ValidatedVRFSpec(tc.toml) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/vrf/vrftesthelpers/consumer_v2.go b/core/services/vrf/vrftesthelpers/consumer_v2.go new file mode 100644 index 00000000..85549a81 --- /dev/null +++ b/core/services/vrf/vrftesthelpers/consumer_v2.go @@ -0,0 +1,331 @@ +package vrftesthelpers + +import ( + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_plus_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2_upgradeable_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_malicious_consumer_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_malicious_consumer_v2_plus" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_reverting_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_consumer_example" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_reverting_example" +) + +var ( + _ VRFConsumerContract = (*vrfConsumerContract)(nil) +) + +// VRFConsumerContract is the common interface implemented by +// the example contracts used for the integration tests. +type VRFConsumerContract interface { + CreateSubscriptionAndFund(opts *bind.TransactOpts, fundingJuels *big.Int) (*gethtypes.Transaction, error) + CreateSubscriptionAndFundNative(opts *bind.TransactOpts, fundingAmount *big.Int) (*gethtypes.Transaction, error) + SSubId(opts *bind.CallOpts) (*big.Int, error) + SRequestId(opts *bind.CallOpts) (*big.Int, error) + RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subID *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, payInEth bool) (*gethtypes.Transaction, error) + SRandomWords(opts *bind.CallOpts, randomwordIdx *big.Int) (*big.Int, error) + TopUpSubscription(opts *bind.TransactOpts, amount *big.Int) (*gethtypes.Transaction, error) + TopUpSubscriptionNative(opts *bind.TransactOpts, amount *big.Int) (*gethtypes.Transaction, error) + SGasAvailable(opts *bind.CallOpts) (*big.Int, error) + UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*gethtypes.Transaction, error) + SetSubID(opts *bind.TransactOpts, subID *big.Int) (*gethtypes.Transaction, error) +} + +type ConsumerType string + +const ( + VRFConsumerV2 ConsumerType = "VRFConsumerV2" + VRFV2PlusConsumer ConsumerType = "VRFV2PlusConsumer" + MaliciousConsumer ConsumerType = "MaliciousConsumer" + MaliciousConsumerPlus ConsumerType = "MaliciousConsumerPlus" + RevertingConsumer ConsumerType = "RevertingConsumer" + RevertingConsumerPlus ConsumerType = "RevertingConsumerPlus" + UpgradeableConsumer ConsumerType = "UpgradeableConsumer" + UpgradeableConsumerPlus ConsumerType = "UpgradeableConsumerPlus" +) + +type vrfConsumerContract struct { + consumerType ConsumerType + vrfConsumerV2 *vrf_consumer_v2.VRFConsumerV2 + vrfV2PlusConsumer *vrfv2plus_consumer_example.VRFV2PlusConsumerExample + maliciousConsumer *vrf_malicious_consumer_v2.VRFMaliciousConsumerV2 + maliciousConsumerPlus *vrf_malicious_consumer_v2_plus.VRFMaliciousConsumerV2Plus + revertingConsumer *vrfv2_reverting_example.VRFV2RevertingExample + revertingConsumerPlus *vrfv2plus_reverting_example.VRFV2PlusRevertingExample + upgradeableConsumer *vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExample + upgradeableConsumerPlus *vrf_consumer_v2_plus_upgradeable_example.VRFConsumerV2PlusUpgradeableExample +} + +func NewVRFConsumerV2(consumer *vrf_consumer_v2.VRFConsumerV2) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: VRFConsumerV2, + vrfConsumerV2: consumer, + } +} + +func NewVRFV2PlusConsumer(consumer *vrfv2plus_consumer_example.VRFV2PlusConsumerExample) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: VRFV2PlusConsumer, + vrfV2PlusConsumer: consumer, + } +} + +func NewMaliciousConsumer(consumer *vrf_malicious_consumer_v2.VRFMaliciousConsumerV2) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: MaliciousConsumer, + maliciousConsumer: consumer, + } +} + +func NewMaliciousConsumerPlus(consumer *vrf_malicious_consumer_v2_plus.VRFMaliciousConsumerV2Plus) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: MaliciousConsumerPlus, + maliciousConsumerPlus: consumer, + } +} + +func NewRevertingConsumer(consumer *vrfv2_reverting_example.VRFV2RevertingExample) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: RevertingConsumer, + revertingConsumer: consumer, + } +} + +func NewRevertingConsumerPlus(consumer *vrfv2plus_reverting_example.VRFV2PlusRevertingExample) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: RevertingConsumerPlus, + revertingConsumerPlus: consumer, + } +} + +func NewUpgradeableConsumer(consumer *vrf_consumer_v2_upgradeable_example.VRFConsumerV2UpgradeableExample) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: UpgradeableConsumer, + upgradeableConsumer: consumer, + } +} + +func NewUpgradeableConsumerPlus(consumer *vrf_consumer_v2_plus_upgradeable_example.VRFConsumerV2PlusUpgradeableExample) *vrfConsumerContract { + return &vrfConsumerContract{ + consumerType: UpgradeableConsumerPlus, + upgradeableConsumerPlus: consumer, + } +} + +func (c *vrfConsumerContract) CreateSubscriptionAndFund(opts *bind.TransactOpts, fundingJuels *big.Int) (*gethtypes.Transaction, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == UpgradeableConsumer { + return c.upgradeableConsumer.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == MaliciousConsumer { + return c.maliciousConsumer.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == MaliciousConsumerPlus { + return c.maliciousConsumerPlus.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == RevertingConsumer { + return c.revertingConsumer.CreateSubscriptionAndFund(opts, fundingJuels) + } + if c.consumerType == RevertingConsumerPlus { + return c.revertingConsumerPlus.CreateSubscriptionAndFund(opts, fundingJuels) + } + return nil, errors.New("CreateSubscriptionAndFund is not supported") +} + +func (c *vrfConsumerContract) SSubId(opts *bind.CallOpts) (*big.Int, error) { + if c.consumerType == VRFConsumerV2 { + subID, err := c.vrfConsumerV2.SSubId(opts) + if err != nil { + return nil, err + } + return new(big.Int).SetUint64(subID), nil + } + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.SSubId(opts) + } + if c.consumerType == UpgradeableConsumer { + subID, err := c.upgradeableConsumer.SSubId(opts) + if err != nil { + return nil, err + } + return new(big.Int).SetUint64(subID), nil + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.SSubId(opts) + } + if c.consumerType == RevertingConsumer { + subID, err := c.revertingConsumer.SSubId(opts) + if err != nil { + return nil, err + } + return new(big.Int).SetUint64(subID), nil + } + if c.consumerType == RevertingConsumerPlus { + return c.revertingConsumerPlus.SSubId(opts) + } + return nil, errors.New("SSubId is not supported") +} + +func (c *vrfConsumerContract) SRequestId(opts *bind.CallOpts) (*big.Int, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.SRequestId(opts) + } + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.SRecentRequestId(opts) + } + if c.consumerType == UpgradeableConsumer { + return c.upgradeableConsumer.SRequestId(opts) + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.SRequestId(opts) + } + if c.consumerType == MaliciousConsumer { + return c.maliciousConsumer.SRequestId(opts) + } + if c.consumerType == MaliciousConsumerPlus { + return c.maliciousConsumerPlus.SRequestId(opts) + } + if c.consumerType == RevertingConsumer { + return c.revertingConsumer.SRequestId(opts) + } + if c.consumerType == RevertingConsumerPlus { + return c.revertingConsumerPlus.SRequestId(opts) + } + return nil, errors.New("SRequestId is not supported") +} + +func (c *vrfConsumerContract) RequestRandomness(opts *bind.TransactOpts, keyHash [32]byte, subID *big.Int, minReqConfs uint16, callbackGasLimit uint32, numWords uint32, payInEth bool) (*gethtypes.Transaction, error) { + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.RequestRandomWords(opts, callbackGasLimit, minReqConfs, numWords, keyHash, payInEth) + } + if payInEth { + return nil, errors.New("eth payment not supported") + } + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.RequestRandomness(opts, keyHash, subID.Uint64(), minReqConfs, callbackGasLimit, numWords) + } + if c.consumerType == UpgradeableConsumer { + return c.upgradeableConsumer.RequestRandomness(opts, keyHash, subID.Uint64(), minReqConfs, callbackGasLimit, numWords) + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.RequestRandomness(opts, keyHash, subID, minReqConfs, callbackGasLimit, numWords) + } + if c.consumerType == MaliciousConsumer { + return c.maliciousConsumer.RequestRandomness(opts, keyHash) + } + if c.consumerType == MaliciousConsumerPlus { + return c.maliciousConsumerPlus.RequestRandomness(opts, keyHash) + } + if c.consumerType == RevertingConsumer { + return c.revertingConsumer.RequestRandomness(opts, keyHash, subID.Uint64(), minReqConfs, callbackGasLimit, numWords) + } + if c.consumerType == RevertingConsumerPlus { + return c.revertingConsumerPlus.RequestRandomness(opts, keyHash, subID, minReqConfs, callbackGasLimit, numWords) + } + return nil, errors.New("RequestRandomness is not supported") +} + +func (c *vrfConsumerContract) SRandomWords(opts *bind.CallOpts, randomwordIdx *big.Int) (*big.Int, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.SRandomWords(opts, randomwordIdx) + } + if c.consumerType == VRFV2PlusConsumer { + requestID, err := c.vrfV2PlusConsumer.SRecentRequestId(opts) + if err != nil { + return nil, err + } + randomWord, err := c.vrfV2PlusConsumer.GetRandomness(opts, requestID, randomwordIdx) + if err != nil { + return nil, err + } + return randomWord, nil + } + if c.consumerType == UpgradeableConsumer { + return c.upgradeableConsumer.SRandomWords(opts, randomwordIdx) + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.SRandomWords(opts, randomwordIdx) + } + return nil, errors.New("SRandomWords is not supported") +} + +func (c *vrfConsumerContract) TopUpSubscription(opts *bind.TransactOpts, fundingJuels *big.Int) (*gethtypes.Transaction, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.TopUpSubscription(opts, fundingJuels) + } + if c.consumerType == RevertingConsumer { + return c.revertingConsumer.TopUpSubscription(opts, fundingJuels) + } + if c.consumerType == RevertingConsumerPlus { + return c.revertingConsumerPlus.TopUpSubscription(opts, fundingJuels) + } + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.TopUpSubscription(opts, fundingJuels) + } + return nil, errors.New("TopUpSubscription is not supported") +} + +func (c *vrfConsumerContract) SGasAvailable(opts *bind.CallOpts) (*big.Int, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.SGasAvailable(opts) + } + if c.consumerType == UpgradeableConsumer { + return c.upgradeableConsumer.SGasAvailable(opts) + } + if c.consumerType == UpgradeableConsumerPlus { + return c.upgradeableConsumerPlus.SGasAvailable(opts) + } + return nil, errors.New("SGasAvailable is not supported") +} + +func (c *vrfConsumerContract) UpdateSubscription(opts *bind.TransactOpts, consumers []common.Address) (*gethtypes.Transaction, error) { + if c.consumerType == VRFConsumerV2 { + return c.vrfConsumerV2.UpdateSubscription(opts, consumers) + } + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.UpdateSubscription(opts, consumers) + } + return nil, errors.New("UpdateSubscription is not supported") +} + +func (c *vrfConsumerContract) SetSubID(opts *bind.TransactOpts, subID *big.Int) (*gethtypes.Transaction, error) { + if c.consumerType == VRFV2PlusConsumer { + return c.vrfV2PlusConsumer.SetSubId(opts, subID) + } + return nil, errors.New("SetSubID is not supported") +} + +func (c *vrfConsumerContract) CreateSubscriptionAndFundNative(opts *bind.TransactOpts, fundingAmount *big.Int) (*gethtypes.Transaction, error) { + if c.consumerType == VRFV2PlusConsumer { + // copy object to not mutate original opts + o := *opts + o.Value = fundingAmount + return c.vrfV2PlusConsumer.CreateSubscriptionAndFundNative(&o) + } + return nil, errors.New("CreateSubscriptionAndFundNative is not supported") +} + +func (c *vrfConsumerContract) TopUpSubscriptionNative(opts *bind.TransactOpts, amount *big.Int) (*gethtypes.Transaction, error) { + if c.consumerType == VRFV2PlusConsumer { + // copy object to not mutate original opts + o := *opts + o.Value = amount + return c.vrfV2PlusConsumer.TopUpSubscriptionNative(&o) + } + return nil, errors.New("TopUpSubscriptionNative is not supported") +} diff --git a/core/services/vrf/vrftesthelpers/helpers.go b/core/services/vrf/vrftesthelpers/helpers.go new file mode 100644 index 00000000..fb7e126c --- /dev/null +++ b/core/services/vrf/vrftesthelpers/helpers.go @@ -0,0 +1,252 @@ +package vrftesthelpers + +import ( + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_consumer_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_consumer_interface_v08" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_request_id" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_request_id_v08" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/blockheaderfeeder" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/proof" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" +) + +var ( + WeiPerUnitLink = decimal.RequireFromString("10000000000000000") +) + +func GenerateProofResponseFromProof(p vrfkey.Proof, s proof.PreSeedData) ( + proof.MarshaledOnChainResponse, error) { + return proof.GenerateProofResponseFromProof(p, s) +} + +func CreateAndStartBHSJob( + t *testing.T, + fromAddresses []string, + app *cltest.TestApplication, + bhsAddress, coordinatorV1Address, coordinatorV2Address, coordinatorV2PlusAddress string, + trustedBlockhashStoreAddress string, trustedBlockhashStoreBatchSize int32, lookback int, + heartbeatPeriod time.Duration, waitBlocks int, +) job.Job { + jid := uuid.New() + s := testspecs.GenerateBlockhashStoreSpec(testspecs.BlockhashStoreSpecParams{ + JobID: jid.String(), + Name: "blockhash-store", + CoordinatorV1Address: coordinatorV1Address, + CoordinatorV2Address: coordinatorV2Address, + CoordinatorV2PlusAddress: coordinatorV2PlusAddress, + WaitBlocks: waitBlocks, + LookbackBlocks: lookback, + HeartbeatPeriod: heartbeatPeriod, + BlockhashStoreAddress: bhsAddress, + TrustedBlockhashStoreAddress: trustedBlockhashStoreAddress, + TrustedBlockhashStoreBatchSize: trustedBlockhashStoreBatchSize, + PollPeriod: time.Second, + RunTimeout: 10 * time.Second, + EVMChainID: 1337, + FromAddresses: fromAddresses, + }) + jb, err := blockhashstore.ValidatedSpec(s.Toml()) + require.NoError(t, err) + + require.NoError(t, app.JobSpawner().CreateJob(&jb)) + gomega.NewWithT(t).Eventually(func() bool { + jbs := app.JobSpawner().ActiveJobs() + for _, jb := range jbs { + if jb.Type == job.BlockhashStore { + return true + } + } + return false + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + return jb +} + +func CreateAndStartBlockHeaderFeederJob( + t *testing.T, + fromAddresses []string, + app *cltest.TestApplication, + bhsAddress, batchBHSAddress, coordinatorV1Address, coordinatorV2Address, coordinatorV2PlusAddress string, +) job.Job { + jid := uuid.New() + s := testspecs.GenerateBlockHeaderFeederSpec(testspecs.BlockHeaderFeederSpecParams{ + JobID: jid.String(), + Name: "block-header-feeder", + CoordinatorV1Address: coordinatorV1Address, + CoordinatorV2Address: coordinatorV2Address, + CoordinatorV2PlusAddress: coordinatorV2PlusAddress, + WaitBlocks: 256, + LookbackBlocks: 1000, + BlockhashStoreAddress: bhsAddress, + BatchBlockhashStoreAddress: batchBHSAddress, + PollPeriod: 15 * time.Second, + RunTimeout: 15 * time.Second, + EVMChainID: 1337, + FromAddresses: fromAddresses, + GetBlockhashesBatchSize: 20, + StoreBlockhashesBatchSize: 20, + }) + jb, err := blockheaderfeeder.ValidatedSpec(s.Toml()) + require.NoError(t, err) + + require.NoError(t, app.JobSpawner().CreateJob(&jb)) + gomega.NewWithT(t).Eventually(func() bool { + jbs := app.JobSpawner().ActiveJobs() + for _, jb := range jbs { + if jb.Type == job.BlockHeaderFeeder { + return true + } + } + return false + }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue()) + + return jb +} + +// CoordinatorUniverse represents the universe in which a randomness request occurs and +// is fulfilled. +type CoordinatorUniverse struct { + // Golang wrappers ofr solidity contracts + RootContract *solidity_vrf_coordinator_interface.VRFCoordinator + LinkContract *link_token_interface.LinkToken + BHSContract *blockhash_store.BlockhashStore + ConsumerContract *solidity_vrf_consumer_interface.VRFConsumer + RequestIDBase *solidity_vrf_request_id.VRFRequestIDBaseTestHelper + ConsumerContractV08 *solidity_vrf_consumer_interface_v08.VRFConsumer + RequestIDBaseV08 *solidity_vrf_request_id_v08.VRFRequestIDBaseTestHelper + RootContractAddress common.Address + ConsumerContractAddress common.Address + ConsumerContractAddressV08 common.Address + LinkContractAddress common.Address + BHSContractAddress common.Address + + // Abstraction representation of the ethereum blockchain + Backend *backends.SimulatedBackend + CoordinatorABI *abi.ABI + ConsumerABI *abi.ABI + // Cast of participants + Sergey *bind.TransactOpts // Owns all the PLI initially + Neil *bind.TransactOpts // Node operator running VRF service + Ned *bind.TransactOpts // Secondary node operator + Carol *bind.TransactOpts // Author of consuming contract which requests randomness +} + +var oneEth = big.NewInt(1000000000000000000) // 1e18 wei + +func NewVRFCoordinatorUniverseWithV08Consumer(t *testing.T, key ethkey.KeyV2) CoordinatorUniverse { + cu := NewVRFCoordinatorUniverse(t, key) + consumerContractAddress, _, consumerContract, err := + solidity_vrf_consumer_interface_v08.DeployVRFConsumer( + cu.Carol, cu.Backend, cu.RootContractAddress, cu.LinkContractAddress) + require.NoError(t, err, "failed to deploy v08 VRFConsumer contract to simulated ethereum blockchain") + _, _, requestIDBase, err := + solidity_vrf_request_id_v08.DeployVRFRequestIDBaseTestHelper(cu.Neil, cu.Backend) + require.NoError(t, err, "failed to deploy v08 VRFRequestIDBaseTestHelper contract to simulated ethereum blockchain") + cu.ConsumerContractAddressV08 = consumerContractAddress + cu.RequestIDBaseV08 = requestIDBase + cu.ConsumerContractV08 = consumerContract + _, err = cu.LinkContract.Transfer(cu.Sergey, consumerContractAddress, oneEth) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFConsumer contract on simulated ethereum blockchain") + cu.Backend.Commit() + return cu +} + +// newVRFCoordinatorUniverse sets up all identities and contracts associated with +// testing the solidity VRF contracts involved in randomness request workflow +func NewVRFCoordinatorUniverse(t *testing.T, keys ...ethkey.KeyV2) CoordinatorUniverse { + var oracleTransactors []*bind.TransactOpts + for _, key := range keys { + oracleTransactor, err := bind.NewKeyedTransactorWithChainID(key.ToEcdsaPrivKey(), testutils.SimulatedChainID) + require.NoError(t, err) + oracleTransactors = append(oracleTransactors, oracleTransactor) + } + + var ( + sergey = testutils.MustNewSimTransactor(t) + neil = testutils.MustNewSimTransactor(t) + ned = testutils.MustNewSimTransactor(t) + carol = testutils.MustNewSimTransactor(t) + ) + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(1000).ToInt()}, + neil.From: {Balance: assets.Ether(1000).ToInt()}, + ned.From: {Balance: assets.Ether(1000).ToInt()}, + carol.From: {Balance: assets.Ether(1000).ToInt()}, + } + + for _, t := range oracleTransactors { + genesisData[t.From] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } + + gasLimit := uint32(ethconfig.Defaults.Miner.GasCeil) + consumerABI, err := abi.JSON(strings.NewReader( + solidity_vrf_consumer_interface.VRFConsumerABI)) + require.NoError(t, err) + coordinatorABI, err := abi.JSON(strings.NewReader( + solidity_vrf_coordinator_interface.VRFCoordinatorABI)) + require.NoError(t, err) + backend := cltest.NewSimulatedBackend(t, genesisData, gasLimit) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + sergey, backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + bhsAddress, _, bhsContract, err := blockhash_store.DeployBlockhashStore(neil, backend) + require.NoError(t, err, "failed to deploy BlockhashStore contract to simulated ethereum blockchain") + coordinatorAddress, _, coordinatorContract, err := + solidity_vrf_coordinator_interface.DeployVRFCoordinator( + neil, backend, linkAddress, bhsAddress) + require.NoError(t, err, "failed to deploy VRFCoordinator contract to simulated ethereum blockchain") + consumerContractAddress, _, consumerContract, err := + solidity_vrf_consumer_interface.DeployVRFConsumer( + carol, backend, coordinatorAddress, linkAddress) + require.NoError(t, err, "failed to deploy VRFConsumer contract to simulated ethereum blockchain") + _, _, requestIDBase, err := + solidity_vrf_request_id.DeployVRFRequestIDBaseTestHelper(neil, backend) + require.NoError(t, err, "failed to deploy VRFRequestIDBaseTestHelper contract to simulated ethereum blockchain") + _, err = linkContract.Transfer(sergey, consumerContractAddress, oneEth) // Actually, PLI + require.NoError(t, err, "failed to send PLI to VRFConsumer contract on simulated ethereum blockchain") + backend.Commit() + return CoordinatorUniverse{ + RootContract: coordinatorContract, + RootContractAddress: coordinatorAddress, + LinkContract: linkContract, + LinkContractAddress: linkAddress, + BHSContract: bhsContract, + BHSContractAddress: bhsAddress, + ConsumerContract: consumerContract, + RequestIDBase: requestIDBase, + ConsumerContractAddress: consumerContractAddress, + Backend: backend, + CoordinatorABI: &coordinatorABI, + ConsumerABI: &consumerABI, + Sergey: sergey, + Neil: neil, + Ned: ned, + Carol: carol, + } +} diff --git a/core/services/webhook/authorizer.go b/core/services/webhook/authorizer.go new file mode 100644 index 00000000..5220a9d5 --- /dev/null +++ b/core/services/webhook/authorizer.go @@ -0,0 +1,74 @@ +package webhook + +import ( + "context" + "database/sql" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +type AuthorizerConfig interface { + ExternalInitiatorsEnabled() bool +} + +type Authorizer interface { + CanRun(ctx context.Context, config AuthorizerConfig, jobUUID uuid.UUID) (bool, error) +} + +var ( + _ Authorizer = &eiAuthorizer{} + _ Authorizer = &alwaysAuthorizer{} + _ Authorizer = &neverAuthorizer{} +) + +func NewAuthorizer(db *sql.DB, user *sessions.User, ei *bridges.ExternalInitiator) Authorizer { + if user != nil { + return &alwaysAuthorizer{} + } else if ei != nil { + return NewEIAuthorizer(db, *ei) + } + return &neverAuthorizer{} +} + +type eiAuthorizer struct { + db *sql.DB + ei bridges.ExternalInitiator +} + +func NewEIAuthorizer(db *sql.DB, ei bridges.ExternalInitiator) *eiAuthorizer { + return &eiAuthorizer{db, ei} +} + +func (ea *eiAuthorizer) CanRun(ctx context.Context, config AuthorizerConfig, jobUUID uuid.UUID) (can bool, err error) { + if !config.ExternalInitiatorsEnabled() { + return false, nil + } + row := ea.db.QueryRowContext(ctx, ` +SELECT EXISTS ( + SELECT 1 FROM external_initiator_webhook_specs + JOIN jobs ON external_initiator_webhook_specs.webhook_spec_id = jobs.webhook_spec_id + AND jobs.external_job_id = $1 + AND external_initiator_webhook_specs.external_initiator_id = $2 +)`, jobUUID, ea.ei.ID) + + err = row.Scan(&can) + if err != nil { + return false, err + } + return can, nil +} + +type alwaysAuthorizer struct{} + +func (*alwaysAuthorizer) CanRun(context.Context, AuthorizerConfig, uuid.UUID) (bool, error) { + return true, nil +} + +type neverAuthorizer struct{} + +func (*neverAuthorizer) CanRun(context.Context, AuthorizerConfig, uuid.UUID) (bool, error) { + return false, nil +} diff --git a/core/services/webhook/authorizer_test.go b/core/services/webhook/authorizer_test.go new file mode 100644 index 00000000..3fa88bd0 --- /dev/null +++ b/core/services/webhook/authorizer_test.go @@ -0,0 +1,100 @@ +package webhook_test + +import ( + "testing" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +func newBridgeORM(t *testing.T, db *sqlx.DB, cfg pg.QConfig) bridges.ORM { + return bridges.NewORM(db, logger.TestLogger(t), cfg) +} + +type eiEnabledCfg struct{} + +func (eiEnabledCfg) ExternalInitiatorsEnabled() bool { return true } + +type eiDisabledCfg struct{} + +func (eiDisabledCfg) ExternalInitiatorsEnabled() bool { return false } + +func Test_Authorizer(t *testing.T) { + db := pgtest.NewSqlxDB(t) + borm := newBridgeORM(t, db, pgtest.NewQConfig(true)) + + eiFoo := cltest.MustInsertExternalInitiator(t, borm) + eiBar := cltest.MustInsertExternalInitiator(t, borm) + + jobWithFooAndBarEI, webhookSpecWithFooAndBarEI := cltest.MustInsertWebhookSpec(t, db) + jobWithBarEI, webhookSpecWithBarEI := cltest.MustInsertWebhookSpec(t, db) + jobWithNoEI, _ := cltest.MustInsertWebhookSpec(t, db) + + _, err := db.Exec(`INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiFoo.ID, webhookSpecWithFooAndBarEI.ID, `{"ei": "foo", "name": "webhookSpecWithFooAndBarEI"}`) + require.NoError(t, err) + _, err = db.Exec(`INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiBar.ID, webhookSpecWithFooAndBarEI.ID, `{"ei": "bar", "name": "webhookSpecWithFooAndBarEI"}`) + require.NoError(t, err) + _, err = db.Exec(`INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiBar.ID, webhookSpecWithBarEI.ID, `{"ei": "bar", "name": "webhookSpecTwoEIs"}`) + require.NoError(t, err) + + t.Run("no user no ei never authorizes", func(t *testing.T) { + a := webhook.NewAuthorizer(db.DB, nil, nil) + + can, err := a.CanRun(testutils.Context(t), nil, jobWithFooAndBarEI.ExternalJobID) + require.NoError(t, err) + assert.False(t, can) + can, err = a.CanRun(testutils.Context(t), nil, jobWithNoEI.ExternalJobID) + require.NoError(t, err) + assert.False(t, can) + can, err = a.CanRun(testutils.Context(t), nil, uuid.New()) + require.NoError(t, err) + assert.False(t, can) + }) + + t.Run("with user no ei always authorizes", func(t *testing.T) { + a := webhook.NewAuthorizer(db.DB, &sessions.User{}, nil) + + can, err := a.CanRun(testutils.Context(t), nil, jobWithFooAndBarEI.ExternalJobID) + require.NoError(t, err) + assert.True(t, can) + can, err = a.CanRun(testutils.Context(t), nil, jobWithNoEI.ExternalJobID) + require.NoError(t, err) + assert.True(t, can) + can, err = a.CanRun(testutils.Context(t), nil, uuid.New()) + require.NoError(t, err) + assert.True(t, can) + }) + + t.Run("no user with ei authorizes conditionally", func(t *testing.T) { + a := webhook.NewAuthorizer(db.DB, nil, &eiFoo) + + can, err := a.CanRun(testutils.Context(t), eiEnabledCfg{}, jobWithFooAndBarEI.ExternalJobID) + require.NoError(t, err) + assert.True(t, can) + can, err = a.CanRun(testutils.Context(t), eiDisabledCfg{}, jobWithFooAndBarEI.ExternalJobID) + require.NoError(t, err) + assert.False(t, can) + can, err = a.CanRun(testutils.Context(t), eiEnabledCfg{}, jobWithBarEI.ExternalJobID) + require.NoError(t, err) + assert.False(t, can) + can, err = a.CanRun(testutils.Context(t), eiEnabledCfg{}, jobWithNoEI.ExternalJobID) + require.NoError(t, err) + assert.False(t, can) + can, err = a.CanRun(testutils.Context(t), eiEnabledCfg{}, uuid.New()) + require.NoError(t, err) + assert.False(t, can) + }) +} diff --git a/core/services/webhook/delegate.go b/core/services/webhook/delegate.go new file mode 100644 index 00000000..6316d5e0 --- /dev/null +++ b/core/services/webhook/delegate.go @@ -0,0 +1,191 @@ +package webhook + +import ( + "context" + "sync" + + "github.com/google/uuid" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/services" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type ( + Delegate struct { + webhookJobRunner *webhookJobRunner + externalInitiatorManager ExternalInitiatorManager + lggr logger.Logger + stopCh services.StopChan + } + + JobRunner interface { + RunJob(ctx context.Context, jobUUID uuid.UUID, requestBody string, meta pipeline.JSONSerializable) (int64, error) + } +) + +var _ job.Delegate = (*Delegate)(nil) + +func NewDelegate(runner pipeline.Runner, externalInitiatorManager ExternalInitiatorManager, lggr logger.Logger) *Delegate { + lggr = lggr.Named("Webhook") + return &Delegate{ + externalInitiatorManager: externalInitiatorManager, + webhookJobRunner: newWebhookJobRunner(runner, lggr), + lggr: lggr, + stopCh: make(services.StopChan), + } +} + +func (d *Delegate) WebhookJobRunner() JobRunner { + return d.webhookJobRunner +} + +func (d *Delegate) JobType() job.Type { + return job.Webhook +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} +func (d *Delegate) AfterJobCreated(jb job.Job) { + ctx, cancel := d.stopCh.NewCtx() + defer cancel() + err := d.externalInitiatorManager.Notify(ctx, *jb.WebhookSpecID) + if err != nil { + d.lggr.Errorw("Webhook delegate AfterJobCreated errored", + "err", err, + "jobID", jb.ID, + ) + } +} + +func (d *Delegate) BeforeJobDeleted(spec job.Job) { + ctx, cancel := d.stopCh.NewCtx() + defer cancel() + err := d.externalInitiatorManager.DeleteJob(ctx, *spec.WebhookSpecID) + if err != nil { + d.lggr.Errorw("Webhook delegate OnDeleteJob errored", + "err", err, + "jobID", spec.ID, + ) + } +} +func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(spec job.Job) ([]job.ServiceCtx, error) { + service := &pseudoService{ + spec: spec, + webhookJobRunner: d.webhookJobRunner, + } + return []job.ServiceCtx{service}, nil +} + +type pseudoService struct { + spec job.Job + webhookJobRunner *webhookJobRunner +} + +// Start starts PseudoService. +func (s pseudoService) Start(context.Context) error { + // add the spec to the webhookJobRunner + return s.webhookJobRunner.addSpec(s.spec) +} + +func (s pseudoService) Close() error { + // remove the spec from the webhookJobRunner + s.webhookJobRunner.rmSpec(s.spec) + return nil +} + +type webhookJobRunner struct { + specsByUUID map[uuid.UUID]registeredJob + muSpecsByUUID sync.RWMutex + runner pipeline.Runner + lggr logger.Logger +} + +func newWebhookJobRunner(runner pipeline.Runner, lggr logger.Logger) *webhookJobRunner { + return &webhookJobRunner{ + specsByUUID: make(map[uuid.UUID]registeredJob), + runner: runner, + lggr: lggr.Named("JobRunner"), + } +} + +type registeredJob struct { + job.Job + chRemove services.StopChan +} + +func (r *webhookJobRunner) addSpec(spec job.Job) error { + r.muSpecsByUUID.Lock() + defer r.muSpecsByUUID.Unlock() + _, exists := r.specsByUUID[spec.ExternalJobID] + if exists { + return errors.Errorf("a webhook job with that UUID already exists (uuid: %v)", spec.ExternalJobID) + } + r.specsByUUID[spec.ExternalJobID] = registeredJob{spec, make(chan struct{})} + return nil +} + +func (r *webhookJobRunner) rmSpec(spec job.Job) { + r.muSpecsByUUID.Lock() + defer r.muSpecsByUUID.Unlock() + j, exists := r.specsByUUID[spec.ExternalJobID] + if exists { + close(j.chRemove) + delete(r.specsByUUID, spec.ExternalJobID) + } +} + +func (r *webhookJobRunner) spec(externalJobID uuid.UUID) (registeredJob, bool) { + r.muSpecsByUUID.RLock() + defer r.muSpecsByUUID.RUnlock() + spec, exists := r.specsByUUID[externalJobID] + return spec, exists +} + +var ErrJobNotExists = errors.New("job does not exist") + +func (r *webhookJobRunner) RunJob(ctx context.Context, jobUUID uuid.UUID, requestBody string, meta pipeline.JSONSerializable) (int64, error) { + spec, exists := r.spec(jobUUID) + if !exists { + return 0, ErrJobNotExists + } + + jobLggr := r.lggr.With( + "jobID", spec.ID, + "uuid", spec.ExternalJobID, + ) + + ctx, cancel := spec.chRemove.Ctx(ctx) + defer cancel() + + vars := pipeline.NewVarsFrom(map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": spec.ID, + "externalJobID": spec.ExternalJobID, + "name": spec.Name.ValueOrZero(), + }, + "jobRun": map[string]interface{}{ + "requestBody": requestBody, + "meta": meta.Val, + }, + }) + + run := pipeline.NewRun(*spec.PipelineSpec, vars) + + _, err := r.runner.Run(ctx, run, jobLggr, true, nil) + if err != nil { + jobLggr.Errorw("Error running pipeline for webhook job", "err", err) + return 0, err + } + if run.ID == 0 { + panic("expected run to have non-zero id") + } + return run.ID, nil +} diff --git a/core/services/webhook/delegate_test.go b/core/services/webhook/delegate_test.go new file mode 100644 index 00000000..922d2616 --- /dev/null +++ b/core/services/webhook/delegate_test.go @@ -0,0 +1,95 @@ +package webhook_test + +import ( + "testing" + + "github.com/google/uuid" + "gopkg.in/guregu/null.v4" + + "github.com/pkg/errors" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + pipelinemocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + webhookmocks "github.com/goplugin/pluginv3.0/v2/core/services/webhook/mocks" +) + +func TestWebhookDelegate(t *testing.T) { + var ( + spec = &job.Job{ + ID: 123, + Type: job.Webhook, + Name: null.StringFrom("sergtoshi stevemoto"), + SchemaVersion: 1, + ExternalJobID: uuid.New(), + WebhookSpec: &job.WebhookSpec{}, + PipelineSpec: &pipeline.Spec{}, + } + + requestBody = "foo" + meta = pipeline.JSONSerializable{Val: "bar", Valid: true} + vars = map[string]interface{}{ + "jobSpec": map[string]interface{}{ + "databaseID": spec.ID, + "externalJobID": spec.ExternalJobID, + "name": spec.Name.ValueOrZero(), + }, + "jobRun": map[string]interface{}{ + "requestBody": requestBody, + "meta": meta.Val, + }, + } + runner = pipelinemocks.NewRunner(t) + eiManager = new(webhookmocks.ExternalInitiatorManager) + delegate = webhook.NewDelegate(runner, eiManager, logger.TestLogger(t)) + ) + + services, err := delegate.ServicesForSpec(*spec) + require.NoError(t, err) + require.Len(t, services, 1) + service := services[0] + + // Should error before service is started + _, err = delegate.WebhookJobRunner().RunJob(testutils.Context(t), spec.ExternalJobID, requestBody, meta) + require.Error(t, err) + require.Equal(t, webhook.ErrJobNotExists, errors.Cause(err)) + + // Should succeed after service is started upon a successful run + err = service.Start(testutils.Context(t)) + require.NoError(t, err) + + runner.On("Run", mock.Anything, mock.AnythingOfType("*pipeline.Run"), mock.Anything, mock.Anything, mock.Anything). + Return(false, nil). + Run(func(args mock.Arguments) { + run := args.Get(1).(*pipeline.Run) + run.ID = int64(123) + + require.Equal(t, vars, run.Inputs.Val) + }).Once() + + runID, err := delegate.WebhookJobRunner().RunJob(testutils.Context(t), spec.ExternalJobID, requestBody, meta) + require.NoError(t, err) + require.Equal(t, int64(123), runID) + + // Should error after service is started upon a failed run + expectedErr := errors.New("foo bar") + + runner.On("Run", mock.Anything, mock.AnythingOfType("*pipeline.Run"), mock.Anything, mock.Anything, mock.Anything). + Return(false, expectedErr).Once() + + _, err = delegate.WebhookJobRunner().RunJob(testutils.Context(t), spec.ExternalJobID, requestBody, meta) + require.Equal(t, expectedErr, errors.Cause(err)) + + // Should error after service is stopped + err = service.Close() + require.NoError(t, err) + + _, err = delegate.WebhookJobRunner().RunJob(testutils.Context(t), spec.ExternalJobID, requestBody, meta) + require.Equal(t, webhook.ErrJobNotExists, errors.Cause(err)) +} diff --git a/core/services/webhook/external_initiator_manager.go b/core/services/webhook/external_initiator_manager.go new file mode 100644 index 00000000..ad1d1c6e --- /dev/null +++ b/core/services/webhook/external_initiator_manager.go @@ -0,0 +1,216 @@ +package webhook + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +//go:generate mockery --quiet --name ExternalInitiatorManager --output ./mocks/ --case=underscore + +// ExternalInitiatorManager manages HTTP requests to remote external initiators +type ExternalInitiatorManager interface { + Notify(ctx context.Context, webhookSpecID int32) error + DeleteJob(ctx context.Context, webhookSpecID int32) error + FindExternalInitiatorByName(name string) (bridges.ExternalInitiator, error) +} + +//go:generate mockery --quiet --name HTTPClient --output ./mocks/ --case=underscore +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type externalInitiatorManager struct { + q pg.Q + httpclient HTTPClient +} + +var _ ExternalInitiatorManager = (*externalInitiatorManager)(nil) + +// NewExternalInitiatorManager returns the concrete externalInitiatorManager +func NewExternalInitiatorManager(db *sqlx.DB, httpclient HTTPClient, lggr logger.Logger, cfg pg.QConfig) *externalInitiatorManager { + namedLogger := lggr.Named("ExternalInitiatorManager") + return &externalInitiatorManager{ + q: pg.NewQ(db, namedLogger, cfg), + httpclient: httpclient, + } +} + +// Notify sends a POST notification to the External Initiator +// responsible for initiating the Job Spec. +func (m externalInitiatorManager) Notify(ctx context.Context, webhookSpecID int32) error { + eiWebhookSpecs, jobID, err := m.Load(webhookSpecID) + if err != nil { + return err + } + for _, eiWebhookSpec := range eiWebhookSpecs { + ei := eiWebhookSpec.ExternalInitiator + if ei.URL == nil { + continue + } + notice := JobSpecNotice{ + JobID: jobID, + Type: ei.Name, + Params: eiWebhookSpec.Spec, + } + buf, err := json.Marshal(notice) + if err != nil { + return errors.Wrap(err, "new Job Spec notification") + } + req, err := newNotifyHTTPRequest(ctx, buf, ei) + if err != nil { + return errors.Wrap(err, "creating notify HTTP request") + } + resp, err := m.httpclient.Do(req) + if err != nil { + return errors.Wrap(err, "could not notify '%s' (%s)") + } + if err := resp.Body.Close(); err != nil { + return err + } + if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { + return fmt.Errorf(" notify '%s' (%s) received bad response '%d: %s'", ei.Name, ei.URL, resp.StatusCode, resp.Status) + } + } + return nil +} + +func (m externalInitiatorManager) Load(webhookSpecID int32) (eiWebhookSpecs []job.ExternalInitiatorWebhookSpec, jobID uuid.UUID, err error) { + err = m.q.Transaction(func(tx pg.Queryer) error { + if err = tx.Get(&jobID, "SELECT external_job_id FROM jobs WHERE webhook_spec_id = $1", webhookSpecID); err != nil { + if err = errors.Wrapf(err, "failed to load job ID from job for webhook spec with ID %d", webhookSpecID); err != nil { + return err + } + } + if err = tx.Select(&eiWebhookSpecs, "SELECT * FROM external_initiator_webhook_specs WHERE external_initiator_webhook_specs.webhook_spec_id = $1", webhookSpecID); err != nil { + if err = errors.Wrapf(err, "failed to load external_initiator_webhook_specs for webhook_spec_id %d", webhookSpecID); err != nil { + return err + } + } + if err = m.eagerLoadExternalInitiator(tx, eiWebhookSpecs); err != nil { + if err = errors.Wrapf(err, "failed to preload ExternalInitiator for webhook_spec_id %d", webhookSpecID); err != nil { + return err + } + } + return nil + }) + + return +} + +func (m externalInitiatorManager) eagerLoadExternalInitiator(q pg.Queryer, txs []job.ExternalInitiatorWebhookSpec) error { + var ids []int64 + for _, tx := range txs { + ids = append(ids, tx.ExternalInitiatorID) + } + if len(ids) == 0 { + return nil + } + var externalInitiators []bridges.ExternalInitiator + if err := sqlx.Select(q, &externalInitiators, `SELECT * FROM external_initiators WHERE external_initiators.id = ANY($1);`, pq.Array(ids)); err != nil { + return err + } + + eiMap := make(map[int64]bridges.ExternalInitiator) + for _, externalInitiator := range externalInitiators { + eiMap[externalInitiator.ID] = externalInitiator + } + + for i := range txs { + txs[i].ExternalInitiator = eiMap[txs[i].ExternalInitiatorID] + } + return nil +} + +func (m externalInitiatorManager) DeleteJob(ctx context.Context, webhookSpecID int32) error { + eiWebhookSpecs, jobID, err := m.Load(webhookSpecID) + if err != nil { + return err + } + for _, eiWebhookSpec := range eiWebhookSpecs { + ei := eiWebhookSpec.ExternalInitiator + if ei.URL == nil { + continue + } + + req, err := newDeleteJobFromExternalInitiatorHTTPRequest(ctx, ei, jobID) + if err != nil { + return errors.Wrap(err, "creating delete HTTP request") + } + resp, err := m.httpclient.Do(req) + if err != nil { + return errors.Wrapf(err, "could not delete job from remote external initiator at %s", req.URL) + } + if err := resp.Body.Close(); err != nil { + return err + } + if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { + return fmt.Errorf(" delete '%s' (%s) received bad response '%d: %s'", ei.Name, ei.URL, resp.StatusCode, resp.Status) + } + } + return nil +} + +func (m externalInitiatorManager) FindExternalInitiatorByName(name string) (bridges.ExternalInitiator, error) { + var exi bridges.ExternalInitiator + err := m.q.Get(&exi, "SELECT * FROM external_initiators WHERE lower(external_initiators.name) = lower($1)", name) + return exi, err +} + +// JobSpecNotice is sent to the External Initiator when JobSpecs are created. +type JobSpecNotice struct { + JobID uuid.UUID `json:"jobId"` + Type string `json:"type"` + Params models.JSON `json:"params,omitempty"` +} + +func newNotifyHTTPRequest(ctx context.Context, buf []byte, ei bridges.ExternalInitiator) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ei.URL.String(), bytes.NewBuffer(buf)) + if err != nil { + return nil, err + } + setHeaders(req, ei) + return req, nil +} + +func newDeleteJobFromExternalInitiatorHTTPRequest(ctx context.Context, ei bridges.ExternalInitiator, jobID uuid.UUID) (*http.Request, error) { + url := fmt.Sprintf("%s/%s", ei.URL.String(), jobID) + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) + if err != nil { + return nil, err + } + setHeaders(req, ei) + return req, nil +} + +func setHeaders(req *http.Request, ei bridges.ExternalInitiator) { + req.Header.Set("Content-Type", "application/json") + req.Header.Set(static.ExternalInitiatorAccessKeyHeader, ei.OutgoingToken) + req.Header.Set(static.ExternalInitiatorSecretHeader, ei.OutgoingSecret) +} + +type NullExternalInitiatorManager struct{} + +var _ ExternalInitiatorManager = (*NullExternalInitiatorManager)(nil) + +func (NullExternalInitiatorManager) Notify(context.Context, int32) error { return nil } +func (NullExternalInitiatorManager) DeleteJob(context.Context, int32) error { return nil } +func (NullExternalInitiatorManager) FindExternalInitiatorByName(name string) (bridges.ExternalInitiator, error) { + return bridges.ExternalInitiator{}, nil +} diff --git a/core/services/webhook/external_initiator_manager_test.go b/core/services/webhook/external_initiator_manager_test.go new file mode 100644 index 00000000..4e8ba311 --- /dev/null +++ b/core/services/webhook/external_initiator_manager_test.go @@ -0,0 +1,131 @@ +package webhook_test + +import ( + "fmt" + "io" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "github.com/goplugin/plugin-common/pkg/utils/tests" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + _ "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + webhookmocks "github.com/goplugin/pluginv3.0/v2/core/services/webhook/mocks" +) + +func Test_ExternalInitiatorManager_Load(t *testing.T) { + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + borm := newBridgeORM(t, db, cfg) + + eiFoo := cltest.MustInsertExternalInitiator(t, borm) + eiBar := cltest.MustInsertExternalInitiator(t, borm) + + jb1, webhookSpecOneEI := cltest.MustInsertWebhookSpec(t, db) + jb2, webhookSpecTwoEIs := cltest.MustInsertWebhookSpec(t, db) + jb3, webhookSpecNoEIs := cltest.MustInsertWebhookSpec(t, db) + + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiFoo.ID, webhookSpecTwoEIs.ID, `{"ei": "foo", "name": "webhookSpecTwoEIs"}`) + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiBar.ID, webhookSpecTwoEIs.ID, `{"ei": "bar", "name": "webhookSpecTwoEIs"}`) + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiFoo.ID, webhookSpecOneEI.ID, `{"ei": "foo", "name": "webhookSpecOneEI"}`) + + eim := webhook.NewExternalInitiatorManager(db, nil, logger.TestLogger(t), cfg) + + eiWebhookSpecs, jobID, err := eim.Load(webhookSpecNoEIs.ID) + require.NoError(t, err) + assert.Len(t, eiWebhookSpecs, 0) + assert.Equal(t, jb3.ExternalJobID, jobID) + + eiWebhookSpecs, jobID, err = eim.Load(webhookSpecOneEI.ID) + require.NoError(t, err) + assert.Len(t, eiWebhookSpecs, 1) + assert.Equal(t, `{"ei": "foo", "name": "webhookSpecOneEI"}`, eiWebhookSpecs[0].Spec.Raw) + assert.Equal(t, eiFoo.ID, eiWebhookSpecs[0].ExternalInitiator.ID) + assert.Equal(t, jb1.ExternalJobID, jobID) + + eiWebhookSpecs, jobID, err = eim.Load(webhookSpecTwoEIs.ID) + require.NoError(t, err) + assert.Len(t, eiWebhookSpecs, 2) + assert.Equal(t, jb2.ExternalJobID, jobID) +} + +func Test_ExternalInitiatorManager_Notify(t *testing.T) { + ctx := tests.Context(t) + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + borm := newBridgeORM(t, db, cfg) + + eiWithURL := cltest.MustInsertExternalInitiatorWithOpts(t, borm, cltest.ExternalInitiatorOpts{ + URL: cltest.MustWebURL(t, "http://example.com/foo"), + OutgoingSecret: "secret", + OutgoingToken: "token", + }) + eiNoURL := cltest.MustInsertExternalInitiator(t, borm) + + jb, webhookSpecTwoEIs := cltest.MustInsertWebhookSpec(t, db) + _, webhookSpecNoEIs := cltest.MustInsertWebhookSpec(t, db) + + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiWithURL.ID, webhookSpecTwoEIs.ID, `{"ei": "foo", "name": "webhookSpecTwoEIs"}`) + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiNoURL.ID, webhookSpecTwoEIs.ID, `{"ei": "bar", "name": "webhookSpecTwoEIs"}`) + + client := webhookmocks.NewHTTPClient(t) + eim := webhook.NewExternalInitiatorManager(db, client, logger.TestLogger(t), cfg) + + // Does nothing with no EI + require.NoError(t, eim.Notify(ctx, webhookSpecNoEIs.ID)) + + client.On("Do", mock.MatchedBy(func(r *http.Request) bool { + body, err := r.GetBody() + require.NoError(t, err) + b, err := io.ReadAll(body) + require.NoError(t, err) + + assert.Equal(t, jb.ExternalJobID.String(), gjson.GetBytes(b, "jobId").Str) + assert.Equal(t, eiWithURL.Name, gjson.GetBytes(b, "type").Str) + assert.Equal(t, `{"ei":"foo","name":"webhookSpecTwoEIs"}`, gjson.GetBytes(b, "params").Raw) + + return r.Method == "POST" && r.URL.String() == eiWithURL.URL.String() && r.Header["Content-Type"][0] == "application/json" && r.Header["X-Plugin-Ea-Accesskey"][0] == "token" && r.Header["X-Plugin-Ea-Secret"][0] == "secret" + })).Once().Return(&http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil) + require.NoError(t, eim.Notify(ctx, webhookSpecTwoEIs.ID)) +} + +func Test_ExternalInitiatorManager_DeleteJob(t *testing.T) { + ctx := testutils.Context(t) + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + borm := newBridgeORM(t, db, cfg) + + eiWithURL := cltest.MustInsertExternalInitiatorWithOpts(t, borm, cltest.ExternalInitiatorOpts{ + URL: cltest.MustWebURL(t, "http://example.com/foo"), + OutgoingSecret: "secret", + OutgoingToken: "token", + }) + eiNoURL := cltest.MustInsertExternalInitiator(t, borm) + + jb, webhookSpecTwoEIs := cltest.MustInsertWebhookSpec(t, db) + _, webhookSpecNoEIs := cltest.MustInsertWebhookSpec(t, db) + + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiWithURL.ID, webhookSpecTwoEIs.ID, `{"ei": "foo", "name": "webhookSpecTwoEIs"}`) + pgtest.MustExec(t, db, `INSERT INTO external_initiator_webhook_specs (external_initiator_id, webhook_spec_id, spec) VALUES ($1,$2,$3)`, eiNoURL.ID, webhookSpecTwoEIs.ID, `{"ei": "bar", "name": "webhookSpecTwoEIs"}`) + + client := webhookmocks.NewHTTPClient(t) + eim := webhook.NewExternalInitiatorManager(db, client, logger.TestLogger(t), cfg) + + // Does nothing with no EI + require.NoError(t, eim.DeleteJob(ctx, webhookSpecNoEIs.ID)) + + client.On("Do", mock.MatchedBy(func(r *http.Request) bool { + expectedURL := fmt.Sprintf("%s/%s", eiWithURL.URL.String(), jb.ExternalJobID.String()) + return r.Method == "DELETE" && r.URL.String() == expectedURL && r.Header["Content-Type"][0] == "application/json" && r.Header["X-Plugin-Ea-Accesskey"][0] == "token" && r.Header["X-Plugin-Ea-Secret"][0] == "secret" + })).Once().Return(&http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil) + require.NoError(t, eim.DeleteJob(ctx, webhookSpecTwoEIs.ID)) +} diff --git a/core/services/webhook/mocks/external_initiator_manager.go b/core/services/webhook/mocks/external_initiator_manager.go new file mode 100644 index 00000000..fee390ac --- /dev/null +++ b/core/services/webhook/mocks/external_initiator_manager.go @@ -0,0 +1,94 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + bridges "github.com/goplugin/pluginv3.0/v2/core/bridges" + + mock "github.com/stretchr/testify/mock" +) + +// ExternalInitiatorManager is an autogenerated mock type for the ExternalInitiatorManager type +type ExternalInitiatorManager struct { + mock.Mock +} + +// DeleteJob provides a mock function with given fields: ctx, webhookSpecID +func (_m *ExternalInitiatorManager) DeleteJob(ctx context.Context, webhookSpecID int32) error { + ret := _m.Called(ctx, webhookSpecID) + + if len(ret) == 0 { + panic("no return value specified for DeleteJob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int32) error); ok { + r0 = rf(ctx, webhookSpecID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindExternalInitiatorByName provides a mock function with given fields: name +func (_m *ExternalInitiatorManager) FindExternalInitiatorByName(name string) (bridges.ExternalInitiator, error) { + ret := _m.Called(name) + + if len(ret) == 0 { + panic("no return value specified for FindExternalInitiatorByName") + } + + var r0 bridges.ExternalInitiator + var r1 error + if rf, ok := ret.Get(0).(func(string) (bridges.ExternalInitiator, error)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func(string) bridges.ExternalInitiator); ok { + r0 = rf(name) + } else { + r0 = ret.Get(0).(bridges.ExternalInitiator) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Notify provides a mock function with given fields: ctx, webhookSpecID +func (_m *ExternalInitiatorManager) Notify(ctx context.Context, webhookSpecID int32) error { + ret := _m.Called(ctx, webhookSpecID) + + if len(ret) == 0 { + panic("no return value specified for Notify") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int32) error); ok { + r0 = rf(ctx, webhookSpecID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewExternalInitiatorManager creates a new instance of ExternalInitiatorManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExternalInitiatorManager(t interface { + mock.TestingT + Cleanup(func()) +}) *ExternalInitiatorManager { + mock := &ExternalInitiatorManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/webhook/mocks/http_client.go b/core/services/webhook/mocks/http_client.go new file mode 100644 index 00000000..fa4f597d --- /dev/null +++ b/core/services/webhook/mocks/http_client.go @@ -0,0 +1,58 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + http "net/http" + + mock "github.com/stretchr/testify/mock" +) + +// HTTPClient is an autogenerated mock type for the HTTPClient type +type HTTPClient struct { + mock.Mock +} + +// Do provides a mock function with given fields: req +func (_m *HTTPClient) Do(req *http.Request) (*http.Response, error) { + ret := _m.Called(req) + + if len(ret) == 0 { + panic("no return value specified for Do") + } + + var r0 *http.Response + var r1 error + if rf, ok := ret.Get(0).(func(*http.Request) (*http.Response, error)); ok { + return rf(req) + } + if rf, ok := ret.Get(0).(func(*http.Request) *http.Response); ok { + r0 = rf(req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*http.Response) + } + } + + if rf, ok := ret.Get(1).(func(*http.Request) error); ok { + r1 = rf(req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewHTTPClient creates a new instance of HTTPClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHTTPClient(t interface { + mock.TestingT + Cleanup(func()) +}) *HTTPClient { + mock := &HTTPClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/webhook/validate.go b/core/services/webhook/validate.go new file mode 100644 index 00000000..f6867e8a --- /dev/null +++ b/core/services/webhook/validate.go @@ -0,0 +1,65 @@ +package webhook + +import ( + "github.com/pelletier/go-toml" + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +type TOMLWebhookSpecExternalInitiator struct { + Name string `toml:"name"` + Spec models.JSON `toml:"spec"` +} + +type TOMLWebhookSpec struct { + ExternalInitiators []TOMLWebhookSpecExternalInitiator `toml:"externalInitiators"` +} + +func ValidatedWebhookSpec(tomlString string, externalInitiatorManager ExternalInitiatorManager) (jb job.Job, err error) { + var tree *toml.Tree + tree, err = toml.Load(tomlString) + if err != nil { + return + } + err = tree.Unmarshal(&jb) + if err != nil { + return + } + if jb.Type != job.Webhook { + return jb, errors.Errorf("unsupported type %s", jb.Type) + } + + var tomlSpec TOMLWebhookSpec + err = tree.Unmarshal(&tomlSpec) + if err != nil { + return jb, err + } + + var externalInitiatorWebhookSpecs []job.ExternalInitiatorWebhookSpec + for _, eiSpec := range tomlSpec.ExternalInitiators { + ei, findErr := externalInitiatorManager.FindExternalInitiatorByName(eiSpec.Name) + if findErr != nil { + err = multierr.Combine(err, errors.Wrapf(findErr, "unable to find external initiator named %s", eiSpec.Name)) + continue + } + eiWS := job.ExternalInitiatorWebhookSpec{ + ExternalInitiatorID: ei.ID, + WebhookSpecID: 0, // It will be populated later, on save + Spec: eiSpec.Spec, + } + externalInitiatorWebhookSpecs = append(externalInitiatorWebhookSpecs, eiWS) + } + + if err != nil { + return jb, err + } + + jb.WebhookSpec = &job.WebhookSpec{ + ExternalInitiatorWebhookSpecs: externalInitiatorWebhookSpecs, + } + + return jb, nil +} diff --git a/core/services/webhook/validate_test.go b/core/services/webhook/validate_test.go new file mode 100644 index 00000000..20516613 --- /dev/null +++ b/core/services/webhook/validate_test.go @@ -0,0 +1,158 @@ +package webhook_test + +import ( + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + webhookmocks "github.com/goplugin/pluginv3.0/v2/core/services/webhook/mocks" +) + +func TestValidatedWebJobSpec(t *testing.T) { + t.Parallel() + var tt = []struct { + name string + toml string + mock func(t *testing.T, eim *webhookmocks.ExternalInitiatorManager) + assertion func(t *testing.T, spec job.Job, err error) + }{ + { + name: "valid spec", + toml: ` + type = "webhook" + schemaVersion = 1 + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data,price"]; + ds -> ds_parse; + """ + `, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + require.NotNil(t, s.WebhookSpec) + b, err := jsonapi.Marshal(s.WebhookSpec) + require.NoError(t, err) + var r job.WebhookSpec + err = jsonapi.Unmarshal(b, &r) + require.NoError(t, err) + require.Equal(t, "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", s.ExternalJobID.String()) + }, + }, + { + name: "invalid job name", + toml: ` + type = "webhookjob" + schemaVersion = 1 + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data,price"]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply; + """ + `, + assertion: func(t *testing.T, s job.Job, err error) { + require.Error(t, err) + assert.Equal(t, "unsupported type webhookjob", err.Error()) + }, + }, + { + name: "missing jobID is fine (it will be autogenerated later)", + toml: ` + type = "webhook" + schemaVersion = 1 + observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data,price"]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply; + """ + `, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + }, + }, + { + name: "with multiple external initiators and externalJobID", + toml: ` + type = "webhook" + schemaVersion = 1 + externalJobID = "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46" + externalInitiators = [ + { name = "foo", spec = '{"foo": 42}' }, + { name = "bar", spec = '{"bar": 42}' } + ] + observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data,price"]; + ds -> ds_parse; + """ + `, + mock: func(t *testing.T, eim *webhookmocks.ExternalInitiatorManager) { + eim.On("FindExternalInitiatorByName", "foo").Return(bridges.ExternalInitiator{ID: 42}, nil).Once() + eim.On("FindExternalInitiatorByName", "bar").Return(bridges.ExternalInitiator{ID: 43}, nil).Once() + }, + assertion: func(t *testing.T, s job.Job, err error) { + require.NoError(t, err) + assert.Len(t, s.WebhookSpec.ExternalInitiatorWebhookSpecs, 2) + assert.Equal(t, int64(42), s.WebhookSpec.ExternalInitiatorWebhookSpecs[0].ExternalInitiatorID) + assert.Equal(t, `{"foo": 42}`, s.WebhookSpec.ExternalInitiatorWebhookSpecs[0].Spec.Raw) + + assert.Equal(t, int64(43), s.WebhookSpec.ExternalInitiatorWebhookSpecs[1].ExternalInitiatorID) + assert.Equal(t, `{"bar": 42}`, s.WebhookSpec.ExternalInitiatorWebhookSpecs[1].Spec.Raw) + + require.NotNil(t, s.WebhookSpec) + b, err := jsonapi.Marshal(s.WebhookSpec) + require.NoError(t, err) + require.Equal(t, "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", s.ExternalJobID.String()) + var r job.WebhookSpec + err = jsonapi.Unmarshal(b, &r) + require.NoError(t, err) + }, + }, + { + name: "with external initiators that do not exist", + toml: ` + type = "webhook" + schemaVersion = 1 + externalInitiators = [ + { name = "foo", spec = '{"foo": 42}' }, + { name = "bar", spec = '{"bar": 42}' }, + { name = "baz", spec = '{"baz": 42}' } + ] + observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data,price"]; + ds -> ds_parse; + """ + `, + mock: func(t *testing.T, eim *webhookmocks.ExternalInitiatorManager) { + eim.On("FindExternalInitiatorByName", "foo").Return(bridges.ExternalInitiator{ID: 42}, nil).Once() + eim.On("FindExternalInitiatorByName", "bar").Return(bridges.ExternalInitiator{}, errors.New("something exploded")).Once() + eim.On("FindExternalInitiatorByName", "baz").Return(bridges.ExternalInitiator{}, errors.New("something exploded")).Once() + }, + assertion: func(t *testing.T, s job.Job, err error) { + require.EqualError(t, err, "unable to find external initiator named bar: something exploded; unable to find external initiator named baz: something exploded") + }, + }, + } + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + eim := new(webhookmocks.ExternalInitiatorManager) + if tc.mock != nil { + tc.mock(t, eim) + } + s, err := webhook.ValidatedWebhookSpec(tc.toml, eim) + tc.assertion(t, s, err) + }) + } +} diff --git a/core/services/workflows/delegate.go b/core/services/workflows/delegate.go new file mode 100644 index 00000000..7254d544 --- /dev/null +++ b/core/services/workflows/delegate.go @@ -0,0 +1,45 @@ +package workflows + +import ( + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/capabilities/targets" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +type Delegate struct { + registry types.CapabilitiesRegistry + logger logger.Logger +} + +var _ job.Delegate = (*Delegate)(nil) + +func (d *Delegate) JobType() job.Type { + return job.Workflow +} + +func (d *Delegate) BeforeJobCreated(spec job.Job) {} + +func (d *Delegate) AfterJobCreated(jb job.Job) {} + +func (d *Delegate) BeforeJobDeleted(spec job.Job) {} + +func (d *Delegate) OnDeleteJob(jb job.Job, q pg.Queryer) error { return nil } + +// ServicesForSpec satisfies the job.Delegate interface. +func (d *Delegate) ServicesForSpec(spec job.Job) ([]job.ServiceCtx, error) { + engine, err := NewEngine(d.logger, d.registry) + if err != nil { + return nil, err + } + return []job.ServiceCtx{engine}, nil +} + +func NewDelegate(logger logger.Logger, registry types.CapabilitiesRegistry, legacyEVMChains legacyevm.LegacyChainContainer) *Delegate { + // NOTE: we temporarily do registration inside NewDelegate, this will be moved out of job specs in the future + _ = targets.InitializeWrite(registry, legacyEVMChains) + + return &Delegate{logger: logger, registry: registry} +} diff --git a/core/services/workflows/engine.go b/core/services/workflows/engine.go new file mode 100644 index 00000000..be35ee49 --- /dev/null +++ b/core/services/workflows/engine.go @@ -0,0 +1,216 @@ +package workflows + +import ( + "context" + "fmt" + + "github.com/goplugin/plugin-common/pkg/capabilities" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/values" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +const ( + mockedWorkflowID = "ef7c8168-f4d1-422f-a4b2-8ce0a1075f0a" + mockedTriggerID = "bd727a82-5cac-4071-be62-0152dd9adb0f" +) + +type Engine struct { + services.StateMachine + logger logger.Logger + registry types.CapabilitiesRegistry + trigger capabilities.TriggerCapability + consensus capabilities.ConsensusCapability + target capabilities.TargetCapability + callbackCh chan capabilities.CapabilityResponse + cancel func() +} + +func (e *Engine) Start(ctx context.Context) error { + return e.StartOnce("Engine", func() error { + err := e.registerTrigger(ctx) + if err != nil { + return err + } + + // create a new context, since the one passed in via Start is short-lived. + ctx, cancel := context.WithCancel(context.Background()) + e.cancel = cancel + go e.loop(ctx) + return nil + }) +} + +func (e *Engine) registerTrigger(ctx context.Context) error { + triggerConf, err := values.NewMap( + map[string]any{ + "feedlist": []any{ + // ETHUSD, PLIUSD, USDBTC + 123, 456, 789, + }, + }, + ) + if err != nil { + return err + } + + triggerInputs, err := values.NewMap( + map[string]any{ + "triggerId": mockedTriggerID, + }, + ) + if err != nil { + return err + } + + triggerRegRequest := capabilities.CapabilityRequest{ + Metadata: capabilities.RequestMetadata{ + WorkflowID: mockedWorkflowID, + }, + Config: triggerConf, + Inputs: triggerInputs, + } + err = e.trigger.RegisterTrigger(ctx, e.callbackCh, triggerRegRequest) + if err != nil { + return fmt.Errorf("failed to instantiate mercury_trigger, %s", err) + } + return nil +} + +func (e *Engine) loop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case resp := <-e.callbackCh: + err := e.handleExecution(ctx, resp) + if err != nil { + e.logger.Error("error executing event %+v: %w", resp, err) + } + } + } +} + +func (e *Engine) handleExecution(ctx context.Context, resp capabilities.CapabilityResponse) error { + results, err := e.handleConsensus(ctx, resp) + if err != nil { + return err + } + + _, err = e.handleTarget(ctx, results) + return err +} + +func (e *Engine) handleTarget(ctx context.Context, resp *values.List) (*values.List, error) { + report, err := resp.Unwrap() + if err != nil { + return nil, err + } + inputs := map[string]values.Value{ + "report": resp, + } + config, err := values.NewMap(map[string]any{ + "address": "0xaabbcc", + "method": "updateFeedValues(report bytes, role uint8)", + "params": []any{ + report, 1, + }, + }) + if err != nil { + return nil, err + } + + tr := capabilities.CapabilityRequest{ + Inputs: &values.Map{Underlying: inputs}, + Config: config, + Metadata: capabilities.RequestMetadata{ + WorkflowID: mockedWorkflowID, + }, + } + return capabilities.ExecuteSync(ctx, e.target, tr) +} + +func (e *Engine) handleConsensus(ctx context.Context, resp capabilities.CapabilityResponse) (*values.List, error) { + inputs := map[string]values.Value{ + "observations": resp.Value, + } + config, err := values.NewMap(map[string]any{ + "aggregation_method": "data_feeds_2_0", + "aggregation_config": map[string]any{ + // ETHUSD + "123": map[string]any{ + "deviation": "0.005", + "heartbeat": "24h", + }, + // PLIUSD + "456": map[string]any{ + "deviation": "0.001", + "heartbeat": "24h", + }, + // BTCUSD + "789": map[string]any{ + "deviation": "0.002", + "heartbeat": "6h", + }, + }, + "encoder": "EVM", + }) + if err != nil { + return nil, nil + } + cr := capabilities.CapabilityRequest{ + Metadata: capabilities.RequestMetadata{ + WorkflowID: mockedWorkflowID, + }, + Inputs: &values.Map{Underlying: inputs}, + Config: config, + } + return capabilities.ExecuteSync(ctx, e.consensus, cr) +} + +func (e *Engine) Close() error { + return e.StopOnce("Engine", func() error { + defer e.cancel() + + triggerInputs, err := values.NewMap( + map[string]any{ + "triggerId": mockedTriggerID, + }, + ) + if err != nil { + return err + } + deregRequest := capabilities.CapabilityRequest{ + Metadata: capabilities.RequestMetadata{ + WorkflowID: mockedWorkflowID, + }, + Inputs: triggerInputs, + } + return e.trigger.UnregisterTrigger(context.Background(), deregRequest) + }) +} + +func NewEngine(lggr logger.Logger, registry types.CapabilitiesRegistry) (*Engine, error) { + ctx := context.Background() + trigger, err := registry.GetTrigger(ctx, "on_mercury_report") + if err != nil { + return nil, err + } + consensus, err := registry.GetConsensus(ctx, "off-chain-reporting") + if err != nil { + return nil, err + } + target, err := registry.GetTarget(ctx, "write_polygon_mainnet") + if err != nil { + return nil, err + } + return &Engine{ + logger: lggr, + registry: registry, + trigger: trigger, + consensus: consensus, + target: target, + callbackCh: make(chan capabilities.CapabilityResponse), + }, nil +} diff --git a/core/services/workflows/engine_test.go b/core/services/workflows/engine_test.go new file mode 100644 index 00000000..94592a42 --- /dev/null +++ b/core/services/workflows/engine_test.go @@ -0,0 +1,125 @@ +package workflows + +import ( + "context" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/capabilities" + "github.com/goplugin/plugin-common/pkg/values" + coreCap "github.com/goplugin/pluginv3.0/v2/core/capabilities" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type mockCapability struct { + capabilities.CapabilityInfo + capabilities.CallbackExecutable + response chan capabilities.CapabilityResponse + transform func(capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) +} + +func newMockCapability(info capabilities.CapabilityInfo, transform func(capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error)) *mockCapability { + return &mockCapability{ + transform: transform, + CapabilityInfo: info, + response: make(chan capabilities.CapabilityResponse, 10), + } +} + +func (m *mockCapability) Execute(ctx context.Context, ch chan<- capabilities.CapabilityResponse, req capabilities.CapabilityRequest) error { + cr, err := m.transform(req) + if err != nil { + return err + } + + ch <- cr + close(ch) + m.response <- cr + return nil +} + +type mockTriggerCapability struct { + capabilities.CapabilityInfo + ch chan<- capabilities.CapabilityResponse +} + +var _ capabilities.TriggerCapability = (*mockTriggerCapability)(nil) + +func (m *mockTriggerCapability) RegisterTrigger(ctx context.Context, ch chan<- capabilities.CapabilityResponse, req capabilities.CapabilityRequest) error { + m.ch = ch + return nil +} + +func (m *mockTriggerCapability) UnregisterTrigger(ctx context.Context, req capabilities.CapabilityRequest) error { + return nil +} + +func TestEngineWithHardcodedWorkflow(t *testing.T) { + ctx := context.Background() + reg := coreCap.NewRegistry() + + trigger := &mockTriggerCapability{ + CapabilityInfo: capabilities.MustNewCapabilityInfo( + "on_mercury_report", + capabilities.CapabilityTypeTrigger, + "issues a trigger when a mercury report is received.", + "v1.0.0", + ), + } + require.NoError(t, reg.Add(ctx, trigger)) + + consensus := newMockCapability( + capabilities.MustNewCapabilityInfo( + "off-chain-reporting", + capabilities.CapabilityTypeConsensus, + "an ocr3 consensus capability", + "v3.0.0", + ), + func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { + return capabilities.CapabilityResponse{ + Value: req.Inputs.Underlying["observations"], + }, nil + }, + ) + require.NoError(t, reg.Add(ctx, consensus)) + + target := newMockCapability( + capabilities.MustNewCapabilityInfo( + "write_polygon_mainnet", + capabilities.CapabilityTypeTarget, + "a write capability targeting polygon mainnet", + "v1.0.0", + ), + func(req capabilities.CapabilityRequest) (capabilities.CapabilityResponse, error) { + + list := req.Inputs.Underlying["report"].(*values.List) + return capabilities.CapabilityResponse{ + Value: list.Underlying[0], + }, nil + }, + ) + require.NoError(t, reg.Add(ctx, target)) + + lggr := logger.TestLogger(t) + eng, err := NewEngine(lggr, reg) + require.NoError(t, err) + + err = eng.Start(ctx) + require.NoError(t, err) + defer eng.Close() + + resp, err := values.NewMap(map[string]any{ + "123": decimal.NewFromFloat(1.00), + "456": decimal.NewFromFloat(1.25), + "789": decimal.NewFromFloat(1.50), + }) + require.NoError(t, err) + cr := capabilities.CapabilityResponse{ + Value: resp, + } + trigger.ch <- cr + assert.Equal(t, cr, <-target.response) +} diff --git a/core/sessions/authentication.go b/core/sessions/authentication.go new file mode 100644 index 00000000..314760c5 --- /dev/null +++ b/core/sessions/authentication.go @@ -0,0 +1,66 @@ +package sessions + +import ( + "errors" + "fmt" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" +) + +// Application config constant options +type AuthenticationProviderName string + +const ( + LocalAuth AuthenticationProviderName = "local" + LDAPAuth AuthenticationProviderName = "ldap" +) + +// ErrUserSessionExpired defines the error triggered when the user session has expired +var ErrUserSessionExpired = errors.New("session missing or expired, please login again") + +// ErrNotSupported defines the error where interface functionality doesn't align with the underlying Auth Provider +var ErrNotSupported = fmt.Errorf("functionality not supported with current authentication provider: %w", errors.ErrUnsupported) + +// ErrEmptySessionID captures the empty case error message +var ErrEmptySessionID = errors.New("session ID cannot be empty") + +//go:generate mockery --quiet --name BasicAdminUsersORM --output ./mocks/ --case=underscore + +// BasicAdminUsersORM is the interface that defines the functionality required for supporting basic admin functionality +// adjacent to the identity provider authentication provider implementation. It is currently implemented by the local +// users/sessions ORM containing local admin CLI actions. This is separate from the AuthenticationProvider, +// as local admin management (ie initial core node setup, initial admin user creation), is always +// required no matter what the pluggable AuthenticationProvider implementation is. +type BasicAdminUsersORM interface { + ListUsers() ([]User, error) + CreateUser(user *User) error + FindUser(email string) (User, error) +} + +//go:generate mockery --quiet --name AuthenticationProvider --output ./mocks/ --case=underscore + +// AuthenticationProvider is an interface that abstracts the required application calls to a user management backend +// Currently localauth (users table DB) or LDAP server (readonly) +type AuthenticationProvider interface { + FindUser(email string) (User, error) + FindUserByAPIToken(apiToken string) (User, error) + ListUsers() ([]User, error) + AuthorizedUserWithSession(sessionID string) (User, error) + DeleteUser(email string) error + DeleteUserSession(sessionID string) error + CreateSession(sr SessionRequest) (string, error) + ClearNonCurrentSessions(sessionID string) error + CreateUser(user *User) error + UpdateRole(email, newRole string) (User, error) + SetAuthToken(user *User, token *auth.Token) error + CreateAndSetAuthToken(user *User) (*auth.Token, error) + DeleteAuthToken(user *User) error + SetPassword(user *User, newPassword string) error + TestPassword(email, password string) error + Sessions(offset, limit int) ([]Session, error) + GetUserWebAuthn(email string) ([]WebAuthn, error) + SaveWebAuthn(token *WebAuthn) error + + FindExternalInitiator(eia *auth.Token) (initiator *bridges.ExternalInitiator, err error) +} diff --git a/core/sessions/ldapauth/client.go b/core/sessions/ldapauth/client.go new file mode 100644 index 00000000..f8f1ce9c --- /dev/null +++ b/core/sessions/ldapauth/client.go @@ -0,0 +1,47 @@ +package ldapauth + +import ( + "fmt" + + "github.com/go-ldap/ldap/v3" + + "github.com/goplugin/pluginv3.0/v2/core/config" +) + +type ldapClient struct { + config config.LDAP +} + +//go:generate mockery --quiet --name LDAPClient --output ./mocks/ --case=underscore + +// Wrapper for creating a handle to a *ldap.Conn/LDAPConn interface +type LDAPClient interface { + CreateEphemeralConnection() (LDAPConn, error) +} + +//go:generate mockery --quiet --name LDAPConn --output ./mocks/ --case=underscore + +// Wrapper for ldap connection and mock testing, implemented by *ldap.Conn +type LDAPConn interface { + Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) + Bind(username string, password string) error + Close() (err error) +} + +func newLDAPClient(config config.LDAP) LDAPClient { + return &ldapClient{config} +} + +// CreateEphemeralConnection returns a valid, active LDAP connection for upstream Search and Bind queries +func (l *ldapClient) CreateEphemeralConnection() (LDAPConn, error) { + conn, err := ldap.DialURL(l.config.ServerAddress()) + if err != nil { + return nil, fmt.Errorf("failed to Dial LDAP Server: %w", err) + } + // Root level root user auth with credentials provided from config + bindStr := l.config.BaseUserAttr() + "=" + l.config.ReadOnlyUserLogin() + "," + l.config.BaseDN() + if err := conn.Bind(bindStr, l.config.ReadOnlyUserPass()); err != nil { + return nil, fmt.Errorf("unable to login as initial root LDAP user: %w", err) + } + return conn, nil +} diff --git a/core/sessions/ldapauth/helpers_test.go b/core/sessions/ldapauth/helpers_test.go new file mode 100644 index 00000000..1b5d7f0d --- /dev/null +++ b/core/sessions/ldapauth/helpers_test.go @@ -0,0 +1,131 @@ +package ldapauth + +import ( + "time" + + "github.com/jmoiron/sqlx" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" +) + +// Returns an instantiated ldapAuthenticator struct without validation for testing +func NewTestLDAPAuthenticator( + db *sqlx.DB, + pgCfg pg.QConfig, + ldapCfg config.LDAP, + dev bool, + lggr logger.Logger, + auditLogger audit.AuditLogger, +) (*ldapAuthenticator, error) { + namedLogger := lggr.Named("LDAPAuthenticationProvider") + ldapAuth := ldapAuthenticator{ + q: pg.NewQ(db, namedLogger, pgCfg), + ldapClient: newLDAPClient(ldapCfg), + config: ldapCfg, + lggr: lggr.Named("LDAPAuthenticationProvider"), + auditLogger: auditLogger, + } + + return &ldapAuth, nil +} + +// Default server group name mappings for test config and mocked ldap search results +const ( + NodeAdminsGroupCN = "NodeAdmins" + NodeEditorsGroupCN = "NodeEditors" + NodeRunnersGroupCN = "NodeRunners" + NodeReadOnlyGroupCN = "NodeReadOnly" +) + +// Implement a setter function within the _test file so that the ldapauth_test module can set the unexported field with a mock +func (l *ldapAuthenticator) SetLDAPClient(newClient LDAPClient) { + l.ldapClient = newClient +} + +// Implements config.LDAP +type TestConfig struct { +} + +func (t *TestConfig) ServerAddress() string { + return "ldaps://MOCK" +} + +func (t *TestConfig) ReadOnlyUserLogin() string { + return "mock-readonly" +} + +func (t *TestConfig) ReadOnlyUserPass() string { + return "mock-password" +} + +func (t *TestConfig) ServerTLS() bool { + return false +} + +func (t *TestConfig) SessionTimeout() commonconfig.Duration { + return *commonconfig.MustNewDuration(time.Duration(0)) +} + +func (t *TestConfig) QueryTimeout() time.Duration { + return time.Duration(0) +} + +func (t *TestConfig) UserAPITokenDuration() commonconfig.Duration { + return *commonconfig.MustNewDuration(time.Duration(0)) +} + +func (t *TestConfig) BaseUserAttr() string { + return "uid" +} + +func (t *TestConfig) BaseDN() string { + return "dc=custom,dc=example,dc=com" +} + +func (t *TestConfig) UsersDN() string { + return "ou=users" +} + +func (t *TestConfig) GroupsDN() string { + return "ou=groups" +} + +func (t *TestConfig) ActiveAttribute() string { + return "organizationalStatus" +} + +func (t *TestConfig) ActiveAttributeAllowedValue() string { + return "ACTIVE" +} + +func (t *TestConfig) AdminUserGroupCN() string { + return NodeAdminsGroupCN +} + +func (t *TestConfig) EditUserGroupCN() string { + return NodeEditorsGroupCN +} + +func (t *TestConfig) RunUserGroupCN() string { + return NodeRunnersGroupCN +} + +func (t *TestConfig) ReadUserGroupCN() string { + return NodeReadOnlyGroupCN +} + +func (t *TestConfig) UserApiTokenEnabled() bool { + return true +} + +func (t *TestConfig) UpstreamSyncInterval() commonconfig.Duration { + return *commonconfig.MustNewDuration(time.Duration(0)) +} + +func (t *TestConfig) UpstreamSyncRateLimit() commonconfig.Duration { + return *commonconfig.MustNewDuration(time.Duration(0)) +} diff --git a/core/sessions/ldapauth/ldap.go b/core/sessions/ldapauth/ldap.go new file mode 100644 index 00000000..c3f2dbb6 --- /dev/null +++ b/core/sessions/ldapauth/ldap.go @@ -0,0 +1,856 @@ +/* +The LDAP authentication package forwards the credentials in the user session request +for authentication with a configured upstream LDAP server + +This package relies on the two following local database tables: + + ldap_sessions: Upon successful LDAP response, creates a keyed local copy of the user email + ldap_user_api_tokens: User created API tokens, tied to the node, storing user email. + +Note: user can have only one API token at a time, and token expiration is enforced + +User session and roles are cached and revalidated with the upstream service at the interval defined in +the local LDAP config through the Application.sessionReaper implementation in reaper.go. + +Changes to the upstream identity server will propagate through and update local tables (web sessions, API tokens) +by either removing the entries or updating the roles. This sync happens for every auth endpoint hit, and +via the defined sync interval. One goroutine is created to coordinate the sync timing in the New function + +This implementation is read only; user mutation actions such as Delete are not supported. + +MFA is supported via the remote LDAP server implementation. Sufficient request time out should accommodate +for a blocking auth call while the user responds to a potential push notification callback. +*/ +package ldapauth + +import ( + "crypto/subtle" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "github.com/go-ldap/ldap/v3" + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +const ( + UniqueMemberAttribute = "uniqueMember" +) + +var ErrUserNotInUpstream = errors.New("LDAP query returned no matching users") +var ErrUserNoLDAPGroups = errors.New("user present in directory, but matching no role groups assigned") + +type ldapAuthenticator struct { + q pg.Q + ldapClient LDAPClient + config config.LDAP + lggr logger.Logger + auditLogger audit.AuditLogger +} + +// ldapAuthenticator implements sessions.AuthenticationProvider interface +var _ sessions.AuthenticationProvider = (*ldapAuthenticator)(nil) + +func NewLDAPAuthenticator( + db *sqlx.DB, + pgCfg pg.QConfig, + ldapCfg config.LDAP, + dev bool, + lggr logger.Logger, + auditLogger audit.AuditLogger, +) (*ldapAuthenticator, error) { + namedLogger := lggr.Named("LDAPAuthenticationProvider") + + // If not plugin dev and not tls, error + if !dev && !ldapCfg.ServerTLS() { + return nil, errors.New("LDAP Authentication driver requires TLS when running in Production mode") + } + + // Ensure all RBAC role mappings to LDAP Groups are defined, and required fields populated, or error on startup + if ldapCfg.AdminUserGroupCN() == "" || ldapCfg.EditUserGroupCN() == "" || + ldapCfg.RunUserGroupCN() == "" || ldapCfg.ReadUserGroupCN() == "" { + return nil, errors.New("LDAP Group mapping from server group name for all local RBAC role required. Set group names for `_UserGroupCN` fields") + } + if ldapCfg.ServerAddress() == "" { + return nil, errors.New("LDAP ServerAddress config required") + } + if ldapCfg.ReadOnlyUserLogin() == "" { + return nil, errors.New("LDAP ReadOnlyUserLogin config required") + } + + ldapAuth := ldapAuthenticator{ + q: pg.NewQ(db, namedLogger, pgCfg), + ldapClient: newLDAPClient(ldapCfg), + config: ldapCfg, + lggr: lggr.Named("LDAPAuthenticationProvider"), + auditLogger: auditLogger, + } + + // Single override of library defined global + ldap.DefaultTimeout = ldapCfg.QueryTimeout() + + // Test initial connection and credentials + lggr.Infof("Attempting initial connection to configured LDAP server with bind as API user") + conn, err := ldapAuth.ldapClient.CreateEphemeralConnection() + if err != nil { + return nil, fmt.Errorf("unable to establish connection to LDAP server with provided URL and credentials: %w", err) + } + conn.Close() + + // Store LDAP connection config for auth/new connection per request instead of persisted connection with reconnect + return &ldapAuth, nil +} + +// FindUser will attempt to return an LDAP user with mapped role by email. +func (l *ldapAuthenticator) FindUser(email string) (sessions.User, error) { + email = strings.ToLower(email) + foundUser := sessions.User{} + + // First check for the supported local admin users table + var foundLocalAdminUser sessions.User + checkErr := l.q.Transaction(func(tx pg.Queryer) error { + sql := "SELECT * FROM users WHERE lower(email) = lower($1)" + return tx.Get(&foundLocalAdminUser, sql, email) + }) + if checkErr == nil { + return foundLocalAdminUser, nil + } + // If error is not nil, there was either an issue or no local users found + if !errors.Is(checkErr, sql.ErrNoRows) { + // If the error is not that no local user was found, log and exit + l.lggr.Errorf("error searching users table: %v", checkErr) + return sessions.User{}, errors.New("error Finding user") + } + + // First query for user "is active" property if defined + usersActive, err := l.validateUsersActive([]string{email}) + if err != nil { + if errors.Is(err, ErrUserNotInUpstream) { + return foundUser, ErrUserNotInUpstream + } + l.lggr.Errorf("error in validateUsers call: %v", err) + return foundUser, errors.New("error running query to validate user active") + } + if !usersActive[0] { + return foundUser, errors.New("user not active") + } + + conn, err := l.ldapClient.CreateEphemeralConnection() + if err != nil { + l.lggr.Errorf("error in LDAP dial: ", err) + return foundUser, errors.New("unable to establish connection to LDAP server with provided URL and credentials") + } + defer conn.Close() + + // User email and role are the only upstream data that needs queried for. + // List query user groups using the provided email, on success is a list of group the uniquemember belongs to + // data is readily available + escapedEmail := ldap.EscapeFilter(email) + searchBaseDN := fmt.Sprintf("%s, %s", l.config.GroupsDN(), l.config.BaseDN()) + filterQuery := fmt.Sprintf("(&(uniquemember=%s=%s,%s,%s))", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN()) + searchRequest := ldap.NewSearchRequest( + searchBaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, + 0, int(l.config.QueryTimeout().Seconds()), false, + filterQuery, + []string{"cn"}, + nil, + ) + + // Query the server + result, err := conn.Search(searchRequest) + if err != nil { + l.lggr.Errorf("error searching users in LDAP query: %v", err) + return foundUser, errors.New("error searching users in LDAP directory") + } + + if len(result.Entries) == 0 { + // Provided email is not present in upstream LDAP server, local admin CLI auth is supported + // So query and check the users table as well before failing + if err = l.q.Transaction(func(tx pg.Queryer) error { + var localUserRole sessions.UserRole + if err = tx.Get(&localUserRole, "SELECT role FROM users WHERE email = $1", email); err != nil { + return err + } + foundUser = sessions.User{ + Email: email, + Role: localUserRole, + } + return nil + }); err != nil { + // Above query for local user unsuccessful, return error + l.lggr.Warnf("No local users table user found with email %s", email) + return foundUser, errors.New("no users found with provided email") + } + + // If the above query to the local users table was successful, return that local user's role + return foundUser, nil + } + + // Populate found user by email and role based on matched group names + userRole, err := l.groupSearchResultsToUserRole(result.Entries) + if err != nil { + l.lggr.Warnf("User '%s' found but no matching assigned groups in LDAP to assume role", email) + return sessions.User{}, err + } + + // Convert search result to sessions.User type with required fields + foundUser = sessions.User{ + Email: email, + Role: userRole, + } + + return foundUser, nil +} + +// FindUserByAPIToken retrieves a possible stored user and role from the ldap_user_api_tokens table store +func (l *ldapAuthenticator) FindUserByAPIToken(apiToken string) (sessions.User, error) { + if !l.config.UserApiTokenEnabled() { + return sessions.User{}, errors.New("API token is not enabled ") + } + + var foundUser sessions.User + err := l.q.Transaction(func(tx pg.Queryer) error { + // Query the ldap user API token table for given token, user role and email are cached so + // no further upstream LDAP query is performed, sessions and tokens are synced against the upstream server + // via the UpstreamSyncInterval config and reaper.go sync implementation + var foundUserToken struct { + UserEmail string + UserRole sessions.UserRole + Valid bool + } + if err := tx.Get(&foundUserToken, + "SELECT user_email, user_role, created_at + $2 >= now() as valid FROM ldap_user_api_tokens WHERE token_key = $1", + apiToken, l.config.UserAPITokenDuration().Duration(), + ); err != nil { + return err + } + if !foundUserToken.Valid { + return sessions.ErrUserSessionExpired + } + foundUser = sessions.User{ + Email: foundUserToken.UserEmail, + Role: foundUserToken.UserRole, + } + return nil + }) + if err != nil { + if errors.Is(err, sessions.ErrUserSessionExpired) { + // API Token expired, purge + if _, execErr := l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE token_key = $1", apiToken); err != nil { + l.lggr.Errorf("error purging stale ldap API token session: %v", execErr) + } + } + return sessions.User{}, err + } + return foundUser, nil +} + +// ListUsers will load and return all active users in applicable LDAP groups, extended with local admin users as well +func (l *ldapAuthenticator) ListUsers() ([]sessions.User, error) { + // For each defined role/group, query for the list of group members to gather the full list of possible users + users := []sessions.User{} + var err error + + conn, err := l.ldapClient.CreateEphemeralConnection() + if err != nil { + l.lggr.Errorf("error in LDAP dial: ", err) + return users, errors.New("unable to establish connection to LDAP server with provided URL and credentials") + } + defer conn.Close() + + // Query for list of uniqueMember IDs present in Admin group + adminUsers, err := l.ldapGroupMembersListToUser(conn, l.config.AdminUserGroupCN(), sessions.UserRoleAdmin) + if err != nil { + l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err) + return users, errors.New("unable to list group users") + } + // Query for list of uniqueMember IDs present in Edit group + editUsers, err := l.ldapGroupMembersListToUser(conn, l.config.EditUserGroupCN(), sessions.UserRoleEdit) + if err != nil { + l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err) + return users, errors.New("unable to list group users") + } + // Query for list of uniqueMember IDs present in Run group + runUsers, err := l.ldapGroupMembersListToUser(conn, l.config.RunUserGroupCN(), sessions.UserRoleRun) + if err != nil { + l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err) + return users, errors.New("unable to list group users") + } + // Query for list of uniqueMember IDs present in Read group + readUsers, err := l.ldapGroupMembersListToUser(conn, l.config.ReadUserGroupCN(), sessions.UserRoleView) + if err != nil { + l.lggr.Errorf("error in ldapGroupMembersListToUser: ", err) + return users, errors.New("unable to list group users") + } + + // Aggregate full list + users = append(users, adminUsers...) + users = append(users, editUsers...) + users = append(users, runUsers...) + users = append(users, readUsers...) + + // Dedupe preserving order of highest role + uniqueRef := make(map[string]struct{}) + dedupedUsers := []sessions.User{} + for _, user := range users { + if _, ok := uniqueRef[user.Email]; !ok { + uniqueRef[user.Email] = struct{}{} + dedupedUsers = append(dedupedUsers, user) + } + } + + // If no active attribute to check is defined, user simple being assigned the group is enough, return full list + if l.config.ActiveAttribute() == "" { + return dedupedUsers, nil + } + + // Now optionally validate that all uniqueMembers are active in the org/LDAP server + emails := []string{} + for _, user := range dedupedUsers { + emails = append(emails, user.Email) + } + activeUsers, err := l.validateUsersActive(emails) + if err != nil { + l.lggr.Errorf("error validating supplied user list: ", err) + return users, errors.New("error validating supplied user list") + } + + // Filter non active users + returnUsers := []sessions.User{} + for i, active := range activeUsers { + if active { + returnUsers = append(returnUsers, dedupedUsers[i]) + } + } + + // Extend with local admin users + var localAdminUsers []sessions.User + if err := l.q.Transaction(func(tx pg.Queryer) error { + sql := "SELECT * FROM users ORDER BY email ASC;" + return tx.Select(&localAdminUsers, sql) + }); err != nil { + l.lggr.Errorf("error extending upstream LDAP users with local admin users in users table: ", err) + } else { + returnUsers = append(returnUsers, localAdminUsers...) + } + + return returnUsers, nil +} + +// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group +func (l *ldapAuthenticator) ldapGroupMembersListToUser(conn LDAPConn, groupNameCN string, roleToAssign sessions.UserRole) ([]sessions.User, error) { + users, err := ldapGroupMembersListToUser( + conn, groupNameCN, roleToAssign, l.config.GroupsDN(), + l.config.BaseDN(), l.config.QueryTimeout(), + l.lggr, + ) + if err != nil { + l.lggr.Errorf("error listing members of group (%s): %v", groupNameCN, err) + return users, errors.New("error searching group members in LDAP directory") + } + return users, nil +} + +// AuthorizedUserWithSession will return the API user associated with the Session ID if it +// exists and hasn't expired, and update session's LastUsed field. The state of the upstream LDAP server +// is polled and synced at the defined interval via a SleeperTask +func (l *ldapAuthenticator) AuthorizedUserWithSession(sessionID string) (sessions.User, error) { + if len(sessionID) == 0 { + return sessions.User{}, errors.New("session ID cannot be empty") + } + var foundUser sessions.User + err := l.q.Transaction(func(tx pg.Queryer) error { + // Query the ldap_sessions table for given session ID, user role and email are cached so + // no further upstream LDAP query is performed + var foundSession struct { + UserEmail string + UserRole sessions.UserRole + Valid bool + } + if err := tx.Get(&foundSession, + "SELECT user_email, user_role, created_at + $2 >= now() as valid FROM ldap_sessions WHERE id = $1", + sessionID, l.config.SessionTimeout().Duration(), + ); err != nil { + return sessions.ErrUserSessionExpired + } + if !foundSession.Valid { + // Sessions expired, purge + return sessions.ErrUserSessionExpired + } + foundUser = sessions.User{ + Email: foundSession.UserEmail, + Role: foundSession.UserRole, + } + return nil + }) + if err != nil { + if errors.Is(err, sessions.ErrUserSessionExpired) { + if _, execErr := l.q.Exec("DELETE FROM ldap_sessions WHERE id = $1", sessionID); err != nil { + l.lggr.Errorf("error purging stale ldap session: %v", execErr) + } + } + return sessions.User{}, err + } + return foundUser, nil +} + +// DeleteUser is not supported for read only LDAP +func (l *ldapAuthenticator) DeleteUser(email string) error { + return sessions.ErrNotSupported +} + +// DeleteUserSession removes an ldapSession table entry by ID +func (l *ldapAuthenticator) DeleteUserSession(sessionID string) error { + _, err := l.q.Exec("DELETE FROM ldap_sessions WHERE id = $1", sessionID) + return err +} + +// GetUserWebAuthn returns an empty stub, MFA token prompt is handled either by the upstream +// server blocking callback, or an error code to pass a OTP +func (l *ldapAuthenticator) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) { + return []sessions.WebAuthn{}, nil +} + +// CreateSession will forward the session request credentials to the +// LDAP server, querying for a user + role response if username and +// password match. The API call is blocking with timeout, so a sufficient timeout +// should allow the user to respond to potential MFA push notifications +func (l *ldapAuthenticator) CreateSession(sr sessions.SessionRequest) (string, error) { + conn, err := l.ldapClient.CreateEphemeralConnection() + if err != nil { + return "", errors.New("unable to establish connection to LDAP server with provided URL and credentials") + } + defer conn.Close() + + var returnErr error + + // Attempt to LDAP Bind with user provided credentials + escapedEmail := ldap.EscapeFilter(strings.ToLower(sr.Email)) + searchBaseDN := fmt.Sprintf("%s=%s,%s,%s", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN()) + if err = conn.Bind(searchBaseDN, sr.Password); err != nil { + l.lggr.Infof("Error binding user authentication request in LDAP Bind: %v", err) + returnErr = errors.New("unable to log in with LDAP server. Check credentials") + } + + // Bind was successful meaning user and credentials are present in LDAP directory + // Reuse FindUser functionality to fetch user roles used to create ldap_session entry + // with cached user email and role + foundUser, err := l.FindUser(escapedEmail) + if err != nil { + l.lggr.Infof("Successful user login, but error querying for user groups: user: %s, error %v", escapedEmail, err) + returnErr = errors.New("log in successful, but no assigned groups to assume role") + } + + isLocalUser := false + if returnErr != nil { + // Unable to log in against LDAP server, attempt fallback local auth with credentials, case of local CLI Admin account + // Successful local user sessions can not be managed by the upstream server and have expiration handled by the reaper sync module + foundUser, returnErr = l.localLoginFallback(sr) + isLocalUser = true + } + + // If err is still populated, return + if returnErr != nil { + return "", returnErr + } + + l.lggr.Infof("Successful LDAP login request for user %s - %s", sr.Email, foundUser.Role) + + // Save session, user, and role to database. Given a session ID for future queries, the LDAP server will not be queried + // Sessions are set to expire after the duration + creation date elapsed, and are synced on an interval against the upstream + // LDAP server + session := sessions.NewSession() + _, err = l.q.Exec( + "INSERT INTO ldap_sessions (id, user_email, user_role, localauth_user, created_at) VALUES ($1, $2, $3, $4, now())", + session.ID, + strings.ToLower(sr.Email), + foundUser.Role, + isLocalUser, + ) + if err != nil { + l.lggr.Errorf("unable to create new session in ldap_sessions table %v", err) + return "", fmt.Errorf("error creating local LDAP session: %w", err) + } + + l.auditLogger.Audit(audit.AuthLoginSuccessNo2FA, map[string]interface{}{"email": sr.Email}) + + return session.ID, nil +} + +// ClearNonCurrentSessions removes all ldap_sessions but the id passed in. +func (l *ldapAuthenticator) ClearNonCurrentSessions(sessionID string) error { + _, err := l.q.Exec("DELETE FROM ldap_sessions where id != $1", sessionID) + return err +} + +// CreateUser is not supported for read only LDAP +func (l *ldapAuthenticator) CreateUser(user *sessions.User) error { + return sessions.ErrNotSupported +} + +// UpdateRole is not supported for read only LDAP +func (l *ldapAuthenticator) UpdateRole(email, newRole string) (sessions.User, error) { + return sessions.User{}, sessions.ErrNotSupported +} + +// SetPassword for remote users is not supported via the read only LDAP implementation, however change password +// in the context of updating a local admin user's password is required +func (l *ldapAuthenticator) SetPassword(user *sessions.User, newPassword string) error { + // Ensure specified user is part of the local admins user table + var localAdminUser sessions.User + if err := l.q.Transaction(func(tx pg.Queryer) error { + sql := "SELECT * FROM users WHERE lower(email) = lower($1)" + return tx.Get(&localAdminUser, sql, user.Email) + }); err != nil { + l.lggr.Infof("Can not change password, local user with email not found in users table: %s, err: %v", user.Email, err) + return sessions.ErrNotSupported + } + + // User is local admin, save new password + hashedPassword, err := utils.HashPassword(newPassword) + if err != nil { + return err + } + if err := l.q.Transaction(func(tx pg.Queryer) error { + sql := "UPDATE users SET hashed_password = $1, updated_at = now() WHERE email = $2 RETURNING *" + return tx.Get(user, sql, hashedPassword, user.Email) + }); err != nil { + l.lggr.Errorf("unable to set password for user: %s, err: %v", user.Email, err) + return errors.New("unable to save password") + } + return nil +} + +// TestPassword tests if an LDAP login bind can be performed with provided credentials, returns nil if success +func (l *ldapAuthenticator) TestPassword(email string, password string) error { + conn, err := l.ldapClient.CreateEphemeralConnection() + if err != nil { + return errors.New("unable to establish connection to LDAP server with provided URL and credentials") + } + defer conn.Close() + + // Attempt to LDAP Bind with user provided credentials + escapedEmail := ldap.EscapeFilter(strings.ToLower(email)) + searchBaseDN := fmt.Sprintf("%s=%s,%s,%s", l.config.BaseUserAttr(), escapedEmail, l.config.UsersDN(), l.config.BaseDN()) + err = conn.Bind(searchBaseDN, password) + if err == nil { + return nil + } + l.lggr.Infof("Error binding user authentication request in TestPassword call LDAP Bind: %v", err) + + // Fall back to test local users table in case of supported local CLI users as well + var hashedPassword string + if err := l.q.Get(&hashedPassword, "SELECT hashed_password FROM users WHERE lower(email) = lower($1)", email); err != nil { + return errors.New("invalid credentials") + } + if !utils.CheckPasswordHash(password, hashedPassword) { + return errors.New("invalid credentials") + } + + return nil +} + +// CreateAndSetAuthToken generates a new credential token with the user role +func (l *ldapAuthenticator) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) { + newToken := auth.NewToken() + + err := l.SetAuthToken(user, newToken) + if err != nil { + return nil, err + } + + return newToken, nil +} + +// SetAuthToken updates the user to use the given Authentication Token. +func (l *ldapAuthenticator) SetAuthToken(user *sessions.User, token *auth.Token) error { + if !l.config.UserApiTokenEnabled() { + return errors.New("API token is not enabled ") + } + + salt := utils.NewSecret(utils.DefaultSecretSize) + hashedSecret, err := auth.HashedSecret(token, salt) + if err != nil { + return fmt.Errorf("LDAPAuth SetAuthToken hashed secret error: %w", err) + } + + err = l.q.Transaction(func(tx pg.Queryer) error { + // Is this user a local CLI Admin or upstream LDAP user? + // Check presence in local users table. Set localauth_user column true if present. + // This flag omits the session/token from being purged by the sync daemon/reaper.go + isLocalCLIAdmin := false + err = l.q.QueryRow("SELECT EXISTS (SELECT 1 FROM users WHERE email = $1)", user.Email).Scan(&isLocalCLIAdmin) + if err != nil { + return fmt.Errorf("error checking user presence in users table: %w", err) + } + + // Remove any existing API tokens + if _, err = l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE user_email = $1", user.Email); err != nil { + return fmt.Errorf("error executing DELETE FROM ldap_user_api_tokens: %w", err) + } + // Create new API token for user + _, err = l.q.Exec( + "INSERT INTO ldap_user_api_tokens (user_email, user_role, localauth_user, token_key, token_salt, token_hashed_secret, created_at) VALUES ($1, $2, $3, $4, $5, $6, now())", + user.Email, + user.Role, + isLocalCLIAdmin, + token.AccessKey, + salt, + hashedSecret, + ) + if err != nil { + return fmt.Errorf("failed insert into ldap_user_api_tokens: %w", err) + } + return nil + }) + if err != nil { + return errors.New("error creating API token") + } + + l.auditLogger.Audit(audit.APITokenCreated, map[string]interface{}{"user": user.Email}) + return nil +} + +// DeleteAuthToken clears and disables the users Authentication Token. +func (l *ldapAuthenticator) DeleteAuthToken(user *sessions.User) error { + _, err := l.q.Exec("DELETE FROM ldap_user_api_tokens WHERE email = $1") + return err +} + +// SaveWebAuthn is not supported for read only LDAP +func (l *ldapAuthenticator) SaveWebAuthn(token *sessions.WebAuthn) error { + return sessions.ErrNotSupported +} + +// Sessions returns all sessions limited by the parameters. +func (l *ldapAuthenticator) Sessions(offset, limit int) ([]sessions.Session, error) { + var sessions []sessions.Session + sql := `SELECT * FROM ldap_sessions ORDER BY created_at, id LIMIT $1 OFFSET $2;` + if err := l.q.Select(&sessions, sql, limit, offset); err != nil { + return sessions, nil + } + return sessions, nil +} + +// FindExternalInitiator supports the 'Run' role external intiator header auth functionality +func (l *ldapAuthenticator) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) { + exi := &bridges.ExternalInitiator{} + err := l.q.Get(exi, `SELECT * FROM external_initiators WHERE access_key = $1`, eia.AccessKey) + return exi, err +} + +// localLoginFallback tests the credentials provided against the 'local' authentication method +// This covers the case of local CLI API calls requiring local login separate from the LDAP server +func (l *ldapAuthenticator) localLoginFallback(sr sessions.SessionRequest) (sessions.User, error) { + var user sessions.User + sql := "SELECT * FROM users WHERE lower(email) = lower($1)" + err := l.q.Get(&user, sql, sr.Email) + if err != nil { + return user, err + } + if !constantTimeEmailCompare(strings.ToLower(sr.Email), strings.ToLower(user.Email)) { + l.auditLogger.Audit(audit.AuthLoginFailedEmail, map[string]interface{}{"email": sr.Email}) + return user, errors.New("invalid email") + } + + if !utils.CheckPasswordHash(sr.Password, user.HashedPassword) { + l.auditLogger.Audit(audit.AuthLoginFailedPassword, map[string]interface{}{"email": sr.Email}) + return user, errors.New("invalid password") + } + + return user, nil +} + +// validateUsersActive performs an additional LDAP server query for the supplied emails, checking the +// returned user data for an 'active' property defined optionally in the config. +// Returns same length bool 'valid' array, indexed by sorted email +func (l *ldapAuthenticator) validateUsersActive(emails []string) ([]bool, error) { + validUsers := make([]bool, len(emails)) + // If active attribute to check is not defined in config, skip + if l.config.ActiveAttribute() == "" { + // fill with valids + for i := range emails { + validUsers[i] = true + } + return validUsers, nil + } + + conn, err := l.ldapClient.CreateEphemeralConnection() + if err != nil { + l.lggr.Errorf("error in LDAP dial: ", err) + return validUsers, errors.New("unable to establish connection to LDAP server with provided URL and credentials") + } + defer conn.Close() + + // Build the full email list query to pull all 'isActive' information for each user specified in one query + filterQuery := "(|" + for _, email := range emails { + escapedEmail := ldap.EscapeFilter(email) + filterQuery = fmt.Sprintf("%s(%s=%s)", filterQuery, l.config.BaseUserAttr(), escapedEmail) + } + filterQuery = fmt.Sprintf("(&%s))", filterQuery) + searchBaseDN := fmt.Sprintf("%s,%s", l.config.UsersDN(), l.config.BaseDN()) + searchRequest := ldap.NewSearchRequest( + searchBaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, + 0, int(l.config.QueryTimeout().Seconds()), false, + filterQuery, + []string{l.config.BaseUserAttr(), l.config.ActiveAttribute()}, + nil, + ) + // Query LDAP server for the ActiveAttribute property of each specified user + results, err := conn.Search(searchRequest) + if err != nil { + l.lggr.Errorf("error searching user in LDAP query: %v", err) + return validUsers, errors.New("error searching users in LDAP directory") + } + + // Ensure user response entries + if len(results.Entries) == 0 { + return validUsers, ErrUserNotInUpstream + } + + // Pull expected ActiveAttribute value from list of string possible values + // keyed on email for final step to return flag bool list where order is preserved + emailToActiveMap := make(map[string]bool) + for _, result := range results.Entries { + isActiveAttribute := result.GetAttributeValue(l.config.ActiveAttribute()) + uidAttribute := result.GetAttributeValue(l.config.BaseUserAttr()) + emailToActiveMap[uidAttribute] = isActiveAttribute == l.config.ActiveAttributeAllowedValue() + } + for i, email := range emails { + active, ok := emailToActiveMap[email] + if ok && active { + validUsers[i] = true + } + } + + return validUsers, nil +} + +// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group. Reused by sync.go +func ldapGroupMembersListToUser( + conn LDAPConn, + groupNameCN string, + roleToAssign sessions.UserRole, + groupsDN string, + baseDN string, + queryTimeout time.Duration, + lggr logger.Logger, +) ([]sessions.User, error) { + users := []sessions.User{} + // Prepare and query the GroupsDN for the specified group name + searchBaseDN := fmt.Sprintf("%s, %s", groupsDN, baseDN) + filterQuery := fmt.Sprintf("(&(cn=%s))", groupNameCN) + searchRequest := ldap.NewSearchRequest( + searchBaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, + 0, int(queryTimeout.Seconds()), false, + filterQuery, + []string{UniqueMemberAttribute}, + nil, + ) + result, err := conn.Search(searchRequest) + if err != nil { + lggr.Errorf("error searching group members in LDAP query: %v", err) + return users, errors.New("error searching group members in LDAP directory") + } + + // The result.Entry query response here is for the 'group' type of LDAP resource. The result should be a single entry, containing + // a single Attribute named 'uniqueMember' containing a list of string Values. These Values are strings that should be returned in + // the format "uid=test.user@example.com,ou=users,dc=example,dc=com". The 'uid' is then manually parsed here as the library does + // not expose the functionality + if len(result.Entries) != 1 { + lggr.Errorf("unexpected length of query results for group user members, expected one got %d", len(result.Entries)) + return users, errors.New("error searching group members in LDAP directory") + } + + // Get string list of members from 'uniqueMember' attribute + uniqueMemberValues := result.Entries[0].GetAttributeValues(UniqueMemberAttribute) + for _, uniqueMemberEntry := range uniqueMemberValues { + parts := strings.Split(uniqueMemberEntry, ",") // Split attribute value on comma (uid, ou, dc parts) + uidComponent := "" + for _, part := range parts { // Iterate parts for "uid=" + if strings.HasPrefix(part, "uid=") { + uidComponent = part + break + } + } + if uidComponent == "" { + lggr.Errorf("unexpected LDAP group query response for unique members - expected list of LDAP Values for uniqueMember containing LDAP strings in format uid=test.user@example.com,ou=users,dc=example,dc=com. Got %s", uniqueMemberEntry) + continue + } + // Map each user email to the sessions.User struct + userEmail := strings.TrimPrefix(uidComponent, "uid=") + users = append(users, sessions.User{ + Email: userEmail, + Role: roleToAssign, + }) + } + return users, nil +} + +// groupSearchResultsToUserRole takes a list of LDAP group search result entries and returns the associated +// internal user role based on the group name mappings defined in the configuration +func (l *ldapAuthenticator) groupSearchResultsToUserRole(ldapGroups []*ldap.Entry) (sessions.UserRole, error) { + return GroupSearchResultsToUserRole( + ldapGroups, + l.config.AdminUserGroupCN(), + l.config.EditUserGroupCN(), + l.config.RunUserGroupCN(), + l.config.ReadUserGroupCN(), + ) +} + +func GroupSearchResultsToUserRole(ldapGroups []*ldap.Entry, adminCN string, editCN string, runCN string, readCN string) (sessions.UserRole, error) { + // If defined Admin group name is present in groups search result, return UserRoleAdmin + for _, group := range ldapGroups { + if group.GetAttributeValue("cn") == adminCN { + return sessions.UserRoleAdmin, nil + } + } + // Check edit role + for _, group := range ldapGroups { + if group.GetAttributeValue("cn") == editCN { + return sessions.UserRoleEdit, nil + } + } + // Check run role + for _, group := range ldapGroups { + if group.GetAttributeValue("cn") == runCN { + return sessions.UserRoleRun, nil + } + } + // Check view role + for _, group := range ldapGroups { + if group.GetAttributeValue("cn") == readCN { + return sessions.UserRoleView, nil + } + } + // No role group found, error + return sessions.UserRoleView, ErrUserNoLDAPGroups +} + +const constantTimeEmailLength = 256 + +func constantTimeEmailCompare(left, right string) bool { + length := mathutil.Max(constantTimeEmailLength, len(left), len(right)) + leftBytes := make([]byte, length) + rightBytes := make([]byte, length) + copy(leftBytes, left) + copy(rightBytes, right) + return subtle.ConstantTimeCompare(leftBytes, rightBytes) == 1 +} diff --git a/core/sessions/ldapauth/ldap_test.go b/core/sessions/ldapauth/ldap_test.go new file mode 100644 index 00000000..00cd651b --- /dev/null +++ b/core/sessions/ldapauth/ldap_test.go @@ -0,0 +1,639 @@ +package ldapauth_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/go-ldap/ldap/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/ldapauth" + "github.com/goplugin/pluginv3.0/v2/core/sessions/ldapauth/mocks" +) + +// Setup LDAP Auth authenticator +func setupAuthenticationProvider(t *testing.T, ldapClient ldapauth.LDAPClient) (*sqlx.DB, sessions.AuthenticationProvider) { + t.Helper() + + cfg := ldapauth.TestConfig{} + db := pgtest.NewSqlxDB(t) + ldapAuthProvider, err := ldapauth.NewTestLDAPAuthenticator(db, pgtest.NewQConfig(true), &cfg, true, logger.TestLogger(t), &audit.AuditLoggerService{}) + if err != nil { + t.Fatalf("Error constructing NewTestLDAPAuthenticator: %v\n", err) + } + + // Override the LDAPClient responsible for returning the *ldap.Conn struct with Mock + ldapAuthProvider.SetLDAPClient(ldapClient) + return db, ldapAuthProvider +} + +func TestORM_FindUser_Empty(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // User not in upstream, return no entry + expectedResults := ldap.SearchResult{} + + // On search performed for validateUsersActive + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil) + + // Not in upstream, no local admin users, expect error + _, err := ldapAuthProvider.FindUser("unknown-user") + require.ErrorContains(t, err, "LDAP query returned no matching users") +} + +func TestORM_FindUser_NoGroups(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // User present in Upstream but no groups assigned + user1 := cltest.MustRandomUser(t) + expectedResults := ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=User One,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "organizationalStatus", + Values: []string{"ACTIVE"}, + }, + { + Name: "uid", + Values: []string{user1.Email}, + }, + }, + }, + }, + } + + // On search performed for validateUsersActive + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil) + + // No Groups, expect error + _, err := ldapAuthProvider.FindUser(user1.Email) + require.ErrorContains(t, err, "user present in directory, but matching no role groups assigned") +} + +func TestORM_FindUser_NotActive(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // User present in Upstream but not active + user1 := cltest.MustRandomUser(t) + expectedResults := ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=User One,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "organizationalStatus", + Values: []string{"INACTIVE"}, + }, + { + Name: "uid", + Values: []string{user1.Email}, + }, + }, + }, + }, + } + + // On search performed for validateUsersActive + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil) + + // User not active, expect error + _, err := ldapAuthProvider.FindUser(user1.Email) + require.ErrorContains(t, err, "user not active") +} + +func TestORM_FindUser_Single(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // User present and valid + user1 := cltest.MustRandomUser(t) + expectedResults := ldap.SearchResult{ // Users query + Entries: []*ldap.Entry{ + { + DN: "cn=User One,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "organizationalStatus", + Values: []string{"ACTIVE"}, + }, + { + Name: "uid", + Values: []string{user1.Email}, + }, + }, + }, + }, + } + expectedGroupResults := ldap.SearchResult{ // Groups query + Entries: []*ldap.Entry{ + { + DN: "cn=NodeEditors,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{"NodeEditors"}, + }, + }, + }, + }, + } + + // On search performed for validateUsersActive + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil).Once() + + // Second call on user groups search + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedGroupResults, nil).Once() + + // User active, and has editor group. Expect success + user, err := ldapAuthProvider.FindUser(user1.Email) + require.NoError(t, err) + require.Equal(t, user1.Email, user.Email) + require.Equal(t, sessions.UserRoleEdit, user.Role) +} + +func TestORM_FindUser_FallbackMatchLocalAdmin(t *testing.T) { + t.Parallel() + + // Initilaize LDAP Authentication Provider with mock client + mockLdapClient := mocks.NewLDAPClient(t) + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Not in upstream, but utilize text fixture admin user presence in test DB. Succeed + user, err := ldapAuthProvider.FindUser(cltest.APIEmailAdmin) + require.NoError(t, err) + require.Equal(t, cltest.APIEmailAdmin, user.Email) + require.Equal(t, sessions.UserRoleAdmin, user.Role) +} + +func TestORM_FindUserByAPIToken_Success(t *testing.T) { + // Initilaize LDAP Authentication Provider with mock client + mockLdapClient := mocks.NewLDAPClient(t) + db, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Ensure valid tokens return a user with role + testEmail := "test@test.com" + apiToken := "example" + _, err := db.Exec("INSERT INTO ldap_user_api_tokens values ($1, 'edit', false, $2, '', '', now())", testEmail, apiToken) + require.NoError(t, err) + + // Found user by API token in specific ldap_user_api_tokens table + user, err := ldapAuthProvider.FindUserByAPIToken(apiToken) + require.NoError(t, err) + require.Equal(t, testEmail, user.Email) + require.Equal(t, sessions.UserRoleEdit, user.Role) +} + +func TestORM_FindUserByAPIToken_Expired(t *testing.T) { + cfg := ldapauth.TestConfig{} + + // Initilaize LDAP Authentication Provider with mock client + mockLdapClient := mocks.NewLDAPClient(t) + db, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Ensure valid tokens return a user with role + testEmail := "test@test.com" + apiToken := "example" + expiredTime := time.Now().Add(-cfg.UserAPITokenDuration().Duration()) + _, err := db.Exec("INSERT INTO ldap_user_api_tokens values ($1, 'edit', false, $2, '', '', $3)", testEmail, apiToken, expiredTime) + require.NoError(t, err) + + // Token found, but expired. Expect error + _, err = ldapAuthProvider.FindUserByAPIToken(apiToken) + require.Equal(t, sessions.ErrUserSessionExpired, err) +} + +func TestORM_ListUsers_Full(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + user1 := cltest.MustRandomUser(t) + user2 := cltest.MustRandomUser(t) + user3 := cltest.MustRandomUser(t) + user4 := cltest.MustRandomUser(t) + user5 := cltest.MustRandomUser(t) + user6 := cltest.MustRandomUser(t) + + // LDAP Group queries per role - admin + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeAdminsGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: ldapauth.UniqueMemberAttribute, + Values: []string{ + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user1.Email), + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user2.Email), + }, + }, + }, + }, + }, + }, nil).Once() + // LDAP Group queries per role - edit + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeEditorsGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: ldapauth.UniqueMemberAttribute, + Values: []string{ + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user3.Email), + }, + }, + }, + }, + }, + }, nil).Once() + // LDAP Group queries per role - run + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=NodeRunners,ou=Groups,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: ldapauth.UniqueMemberAttribute, + Values: []string{ + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user4.Email), + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user4.Email), // Test deduped + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user5.Email), + }, + }, + }, + }, + }, + }, nil).Once() + // LDAP Group queries per role - view + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&ldap.SearchResult{ + Entries: []*ldap.Entry{ + { + DN: "cn=NodeReadOnly,ou=Groups,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: ldapauth.UniqueMemberAttribute, + Values: []string{ + fmt.Sprintf("uid=%s,ou=users,dc=example,dc=com", user6.Email), + }, + }, + }, + }, + }, + }, nil).Once() + // Lastly followed by IsActive lookup + type userActivePair struct { + email string + active string + } + emailsActive := []userActivePair{ + {user1.Email, "ACTIVE"}, + {user2.Email, "INACTIVE"}, + {user3.Email, "ACTIVE"}, + {user4.Email, "ACTIVE"}, + {user5.Email, "INACTIVE"}, + {user6.Email, "ACTIVE"}, + } + listUpstreamUsersQuery := ldap.SearchResult{} + for _, upstreamUser := range emailsActive { + listUpstreamUsersQuery.Entries = append(listUpstreamUsersQuery.Entries, &ldap.Entry{ + DN: "cn=User,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "organizationalStatus", + Values: []string{upstreamUser.active}, + }, + { + Name: "uid", + Values: []string{upstreamUser.email}, + }, + }, + }, + ) + } + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&listUpstreamUsersQuery, nil).Once() + + // Asserts 'uid=' parsing log in ldapGroupMembersListToUser + // Expected full list of users above, including local admin user, excluding 'inactive' and duplicate users + users, err := ldapAuthProvider.ListUsers() + require.NoError(t, err) + require.Equal(t, users[0].Email, user1.Email) + require.Equal(t, users[0].Role, sessions.UserRoleAdmin) + require.Equal(t, users[1].Email, user3.Email) // User 2 inactive + require.Equal(t, users[1].Role, sessions.UserRoleEdit) + require.Equal(t, users[2].Email, user4.Email) + require.Equal(t, users[2].Role, sessions.UserRoleRun) + require.Equal(t, users[3].Email, user6.Email) // User 5 inactive + require.Equal(t, users[3].Role, sessions.UserRoleView) + require.Equal(t, users[4].Email, cltest.APIEmailAdmin) // Text fixture user is local admin included as well + require.Equal(t, users[4].Role, sessions.UserRoleAdmin) +} + +func TestORM_CreateSession_UpstreamBind(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Upsream user present + user1 := cltest.MustRandomUser(t) + expectedResults := ldap.SearchResult{ // Users query + Entries: []*ldap.Entry{ + { + DN: "cn=User One,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "organizationalStatus", + Values: []string{"ACTIVE"}, + }, + { + Name: "uid", + Values: []string{user1.Email}, + }, + }, + }, + }, + } + expectedGroupResults := ldap.SearchResult{ // Groups query + Entries: []*ldap.Entry{ + { + DN: "cn=NodeEditors,ou=Users,dc=example,dc=com", + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{"NodeEditors"}, + }, + }, + }, + }, + } + + // On search performed for validateUsersActive + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedResults, nil).Once() + + // Second call on user groups search + mockLdapConnProvider.On("Search", mock.AnythingOfType("*ldap.SearchRequest")).Return(&expectedGroupResults, nil).Once() + + // User active, and has editor group. Expect success + mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(nil) + sessionRequest := sessions.SessionRequest{ + Email: user1.Email, + Password: cltest.Password, + } + + _, err := ldapAuthProvider.CreateSession(sessionRequest) + require.NoError(t, err) +} + +func TestORM_CreateSession_LocalAdminFallbackLogin(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Fail the bind to trigger 'localLoginFallback' - local admin users should still be able to login + // regardless of whether the authentication provider is remote or not + mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(errors.New("unable to login via LDAP server")).Once() + + // User active, and has editor group. Expect success + sessionRequest := sessions.SessionRequest{ + Email: cltest.APIEmailAdmin, + Password: cltest.Password, + } + + _, err := ldapAuthProvider.CreateSession(sessionRequest) + require.NoError(t, err) + + // Finally, assert login failing altogether + // User active, and has editor group. Expect success + mockLdapConnProvider.On("Bind", mock.Anything, "incorrect-password").Return(errors.New("unable to login via LDAP server")).Once() + sessionRequest = sessions.SessionRequest{ + Email: cltest.APIEmailAdmin, + Password: "incorrect-password", + } + + _, err = ldapAuthProvider.CreateSession(sessionRequest) + require.ErrorContains(t, err, "invalid password") +} + +func TestORM_SetPassword_LocalAdminFallbackLogin(t *testing.T) { + t.Parallel() + + mockLdapClient := mocks.NewLDAPClient(t) + mockLdapConnProvider := mocks.NewLDAPConn(t) + mockLdapClient.On("CreateEphemeralConnection").Return(mockLdapConnProvider, nil) + mockLdapConnProvider.On("Close").Return(nil) + + // Initilaize LDAP Authentication Provider with mock client + _, ldapAuthProvider := setupAuthenticationProvider(t, mockLdapClient) + + // Fail the bind to trigger 'localLoginFallback' - local admin users should still be able to login + // regardless of whether the authentication provider is remote or not + mockLdapConnProvider.On("Bind", mock.Anything, cltest.Password).Return(errors.New("unable to login via LDAP server")).Once() + + // User active, and has editor group. Expect success + sessionRequest := sessions.SessionRequest{ + Email: cltest.APIEmailAdmin, + Password: cltest.Password, + } + + _, err := ldapAuthProvider.CreateSession(sessionRequest) + require.NoError(t, err) + + // Finally, assert login failing altogether + // User active, and has editor group. Expect success + mockLdapConnProvider.On("Bind", mock.Anything, "incorrect-password").Return(errors.New("unable to login via LDAP server")).Once() + sessionRequest = sessions.SessionRequest{ + Email: cltest.APIEmailAdmin, + Password: "incorrect-password", + } + + _, err = ldapAuthProvider.CreateSession(sessionRequest) + require.ErrorContains(t, err, "invalid password") +} + +func TestORM_MapSearchGroups(t *testing.T) { + t.Parallel() + + cfg := ldapauth.TestConfig{} + + tests := []struct { + name string + groupsQuerySearchResult []*ldap.Entry + wantMappedRole sessions.UserRole + wantErr error + }{ + { + "user in admin group only", + []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeAdminsGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeAdminsGroupCN}, + }, + }, + }, + }, + sessions.UserRoleAdmin, + nil, + }, + { + "user in edit group", + []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeEditorsGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeEditorsGroupCN}, + }, + }, + }, + }, + sessions.UserRoleEdit, + nil, + }, + { + "user in run group", + []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeRunnersGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeRunnersGroupCN}, + }, + }, + }, + }, + sessions.UserRoleRun, + nil, + }, + { + "user in view role", + []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeReadOnlyGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeReadOnlyGroupCN}, + }, + }, + }, + }, + sessions.UserRoleView, + nil, + }, + { + "user in none", + []*ldap.Entry{}, + sessions.UserRole(""), // ignored, error case + ldapauth.ErrUserNoLDAPGroups, + }, + { + "user in run and view", + []*ldap.Entry{ + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeRunnersGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeRunnersGroupCN}, + }, + }, + }, + { + DN: fmt.Sprintf("cn=%s,ou=Groups,dc=example,dc=com", ldapauth.NodeReadOnlyGroupCN), + Attributes: []*ldap.EntryAttribute{ + { + Name: "cn", + Values: []string{ldapauth.NodeReadOnlyGroupCN}, + }, + }, + }, + }, + sessions.UserRoleRun, // Take highest role + nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + role, err := ldapauth.GroupSearchResultsToUserRole( + test.groupsQuerySearchResult, + cfg.AdminUserGroupCN(), + cfg.EditUserGroupCN(), + cfg.RunUserGroupCN(), + cfg.ReadUserGroupCN(), + ) + if test.wantErr != nil { + assert.Equal(t, test.wantErr, err) + } else { + assert.Equal(t, test.wantMappedRole, role) + } + }) + } +} diff --git a/core/sessions/ldapauth/mocks/ldap_client.go b/core/sessions/ldapauth/mocks/ldap_client.go new file mode 100644 index 00000000..6e5ea598 --- /dev/null +++ b/core/sessions/ldapauth/mocks/ldap_client.go @@ -0,0 +1,57 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + ldapauth "github.com/goplugin/pluginv3.0/v2/core/sessions/ldapauth" + mock "github.com/stretchr/testify/mock" +) + +// LDAPClient is an autogenerated mock type for the LDAPClient type +type LDAPClient struct { + mock.Mock +} + +// CreateEphemeralConnection provides a mock function with given fields: +func (_m *LDAPClient) CreateEphemeralConnection() (ldapauth.LDAPConn, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for CreateEphemeralConnection") + } + + var r0 ldapauth.LDAPConn + var r1 error + if rf, ok := ret.Get(0).(func() (ldapauth.LDAPConn, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() ldapauth.LDAPConn); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ldapauth.LDAPConn) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLDAPClient creates a new instance of LDAPClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLDAPClient(t interface { + mock.TestingT + Cleanup(func()) +}) *LDAPClient { + mock := &LDAPClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/sessions/ldapauth/mocks/ldap_conn.go b/core/sessions/ldapauth/mocks/ldap_conn.go new file mode 100644 index 00000000..8b4fff82 --- /dev/null +++ b/core/sessions/ldapauth/mocks/ldap_conn.go @@ -0,0 +1,94 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + ldap "github.com/go-ldap/ldap/v3" + + mock "github.com/stretchr/testify/mock" +) + +// LDAPConn is an autogenerated mock type for the LDAPConn type +type LDAPConn struct { + mock.Mock +} + +// Bind provides a mock function with given fields: username, password +func (_m *LDAPConn) Bind(username string, password string) error { + ret := _m.Called(username, password) + + if len(ret) == 0 { + panic("no return value specified for Bind") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(username, password) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Close provides a mock function with given fields: +func (_m *LDAPConn) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Search provides a mock function with given fields: searchRequest +func (_m *LDAPConn) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + ret := _m.Called(searchRequest) + + if len(ret) == 0 { + panic("no return value specified for Search") + } + + var r0 *ldap.SearchResult + var r1 error + if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) (*ldap.SearchResult, error)); ok { + return rf(searchRequest) + } + if rf, ok := ret.Get(0).(func(*ldap.SearchRequest) *ldap.SearchResult); ok { + r0 = rf(searchRequest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ldap.SearchResult) + } + } + + if rf, ok := ret.Get(1).(func(*ldap.SearchRequest) error); ok { + r1 = rf(searchRequest) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLDAPConn creates a new instance of LDAPConn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLDAPConn(t interface { + mock.TestingT + Cleanup(func()) +}) *LDAPConn { + mock := &LDAPConn{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/sessions/ldapauth/sync.go b/core/sessions/ldapauth/sync.go new file mode 100644 index 00000000..99190c14 --- /dev/null +++ b/core/sessions/ldapauth/sync.go @@ -0,0 +1,343 @@ +package ldapauth + +import ( + "errors" + "fmt" + "time" + + "github.com/go-ldap/ldap/v3" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/config" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +type LDAPServerStateSyncer struct { + q pg.Q + ldapClient LDAPClient + config config.LDAP + lggr logger.Logger + nextSyncTime time.Time +} + +// NewLDAPServerStateSync creates a reaper that cleans stale sessions from the store. +func NewLDAPServerStateSync( + db *sqlx.DB, + pgCfg pg.QConfig, + config config.LDAP, + lggr logger.Logger, +) *utils.SleeperTask { + namedLogger := lggr.Named("LDAPServerStateSync") + serverSync := LDAPServerStateSyncer{ + q: pg.NewQ(db, namedLogger, pgCfg), + ldapClient: newLDAPClient(config), + config: config, + lggr: namedLogger, + nextSyncTime: time.Time{}, + } + // If enabled, start a background task that calls the Sync/Work function on an + // interval without needing an auth event to trigger it + // Use IsInstant to check 0 value to omit functionality. + if !config.UpstreamSyncInterval().IsInstant() { + lggr.Info("LDAP Config UpstreamSyncInterval is non-zero, sync functionality will be called on a timer, respecting the UpstreamSyncRateLimit value") + serverSync.StartWorkOnTimer() + } else { + // Ensure upstream server state is synced on startup manually if interval check not set + serverSync.Work() + } + + // Start background Sync call task reactive to auth related events + serverSyncSleeperTask := utils.NewSleeperTask(&serverSync) + return serverSyncSleeperTask +} + +func (ldSync *LDAPServerStateSyncer) Name() string { + return "LDAPServerStateSync" +} + +func (ldSync *LDAPServerStateSyncer) StartWorkOnTimer() { + time.AfterFunc(ldSync.config.UpstreamSyncInterval().Duration(), ldSync.StartWorkOnTimer) + ldSync.Work() +} + +func (ldSync *LDAPServerStateSyncer) Work() { + // Purge expired ldap_sessions and ldap_user_api_tokens + recordCreationStaleThreshold := ldSync.config.SessionTimeout().Before(time.Now()) + err := ldSync.deleteStaleSessions(recordCreationStaleThreshold) + if err != nil { + ldSync.lggr.Error("unable to expire local LDAP sessions: ", err) + } + recordCreationStaleThreshold = ldSync.config.UserAPITokenDuration().Before(time.Now()) + err = ldSync.deleteStaleAPITokens(recordCreationStaleThreshold) + if err != nil { + ldSync.lggr.Error("unable to expire user API tokens: ", err) + } + + // Optional rate limiting check to limit the amount of upstream LDAP server queries performed + if !ldSync.config.UpstreamSyncRateLimit().IsInstant() { + if !time.Now().After(ldSync.nextSyncTime) { + return + } + + // Enough time has elapsed to sync again, store the time for when next sync is allowed and begin sync + ldSync.nextSyncTime = time.Now().Add(ldSync.config.UpstreamSyncRateLimit().Duration()) + } + + ldSync.lggr.Info("Begin Upstream LDAP provider state sync after checking time against config UpstreamSyncInterval and UpstreamSyncRateLimit") + + // For each defined role/group, query for the list of group members to gather the full list of possible users + users := []sessions.User{} + + conn, err := ldSync.ldapClient.CreateEphemeralConnection() + if err != nil { + ldSync.lggr.Errorf("Failed to Dial LDAP Server", err) + return + } + // Root level root user auth with credentials provided from config + bindStr := ldSync.config.BaseUserAttr() + "=" + ldSync.config.ReadOnlyUserLogin() + "," + ldSync.config.BaseDN() + if err = conn.Bind(bindStr, ldSync.config.ReadOnlyUserPass()); err != nil { + ldSync.lggr.Errorf("Unable to login as initial root LDAP user", err) + } + defer conn.Close() + + // Query for list of uniqueMember IDs present in Admin group + adminUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.AdminUserGroupCN(), sessions.UserRoleAdmin) + if err != nil { + ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err) + return + } + // Query for list of uniqueMember IDs present in Edit group + editUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.EditUserGroupCN(), sessions.UserRoleEdit) + if err != nil { + ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err) + return + } + // Query for list of uniqueMember IDs present in Edit group + runUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.RunUserGroupCN(), sessions.UserRoleRun) + if err != nil { + ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err) + return + } + // Query for list of uniqueMember IDs present in Edit group + readUsers, err := ldSync.ldapGroupMembersListToUser(conn, ldSync.config.ReadUserGroupCN(), sessions.UserRoleView) + if err != nil { + ldSync.lggr.Errorf("Error in ldapGroupMembersListToUser: ", err) + return + } + + users = append(users, adminUsers...) + users = append(users, editUsers...) + users = append(users, runUsers...) + users = append(users, readUsers...) + + // Dedupe preserving order of highest role (sorted) + // Preserve members as a map for future lookup + upstreamUserStateMap := make(map[string]sessions.User) + dedupedEmails := []string{} + for _, user := range users { + if _, ok := upstreamUserStateMap[user.Email]; !ok { + upstreamUserStateMap[user.Email] = user + dedupedEmails = append(dedupedEmails, user.Email) + } + } + + // For each unique user in list of active sessions, check for 'Is Active' propery if defined in the config. Some LDAP providers + // list group members that are no longer marked as active + usersActiveFlags, err := ldSync.validateUsersActive(dedupedEmails, conn) + if err != nil { + ldSync.lggr.Errorf("Error validating supplied user list: ", err) + } + // Remove users in the upstreamUserStateMap source of truth who are part of groups but marked as deactivated/no-active + for i, active := range usersActiveFlags { + if !active { + delete(upstreamUserStateMap, dedupedEmails[i]) + } + } + + // upstreamUserStateMap is now the most up to date source of truth + // Now sync database sessions and roles with new data + err = ldSync.q.Transaction(func(tx pg.Queryer) error { + // First, purge users present in the local ldap_sessions table but not in the upstream server + type LDAPSession struct { + UserEmail string + UserRole sessions.UserRole + } + var existingSessions []LDAPSession + if err = tx.Select(&existingSessions, "SELECT user_email, user_role FROM ldap_sessions WHERE localauth_user = false"); err != nil { + return fmt.Errorf("unable to query ldap_sessions table: %w", err) + } + var existingAPITokens []LDAPSession + if err = tx.Select(&existingAPITokens, "SELECT user_email, user_role FROM ldap_user_api_tokens WHERE localauth_user = false"); err != nil { + return fmt.Errorf("unable to query ldap_user_api_tokens table: %w", err) + } + + // Create existing sessions and API tokens lookup map for later + existingSessionsMap := make(map[string]LDAPSession) + for _, sess := range existingSessions { + existingSessionsMap[sess.UserEmail] = sess + } + existingAPITokensMap := make(map[string]LDAPSession) + for _, sess := range existingAPITokens { + existingAPITokensMap[sess.UserEmail] = sess + } + + // Populate list of session emails present in the local session table but not in the upstream state + emailsToPurge := []interface{}{} + for _, ldapSession := range existingSessions { + if _, ok := upstreamUserStateMap[ldapSession.UserEmail]; !ok { + emailsToPurge = append(emailsToPurge, ldapSession.UserEmail) + } + } + // Likewise for API Tokens table + apiTokenEmailsToPurge := []interface{}{} + for _, ldapSession := range existingAPITokens { + if _, ok := upstreamUserStateMap[ldapSession.UserEmail]; !ok { + apiTokenEmailsToPurge = append(apiTokenEmailsToPurge, ldapSession.UserEmail) + } + } + + // Remove any active sessions this user may have + if len(emailsToPurge) > 0 { + _, err = ldSync.q.Exec("DELETE FROM ldap_sessions WHERE user_email = ANY($1)", pq.Array(emailsToPurge)) + if err != nil { + return err + } + } + + // Remove any active API tokens this user may have + if len(apiTokenEmailsToPurge) > 0 { + _, err = ldSync.q.Exec("DELETE FROM ldap_user_api_tokens WHERE user_email = ANY($1)", pq.Array(apiTokenEmailsToPurge)) + if err != nil { + return err + } + } + + // For each user session row, update role to match state of user map from upstream source + queryWhenClause := "" + emailValues := []interface{}{} + // Prepare CASE WHEN query statement with parameterized argument $n placeholders and matching role based on index + for email, user := range upstreamUserStateMap { + // Only build on SET CASE statement per local session and API token role, not for each upstream user value + _, sessionOk := existingSessionsMap[email] + _, tokenOk := existingAPITokensMap[email] + if !sessionOk && !tokenOk { + continue + } + emailValues = append(emailValues, email) + queryWhenClause += fmt.Sprintf("WHEN user_email = $%d THEN '%s' ", len(emailValues), user.Role) + } + + // If there are remaining user entries to update + if len(emailValues) != 0 { + // Set new role state for all rows in single Exec + query := fmt.Sprintf("UPDATE ldap_sessions SET user_role = CASE %s ELSE user_role END", queryWhenClause) + _, err = ldSync.q.Exec(query, emailValues...) + if err != nil { + return err + } + + // Update role of API tokens as well + query = fmt.Sprintf("UPDATE ldap_user_api_tokens SET user_role = CASE %s ELSE user_role END", queryWhenClause) + _, err = ldSync.q.Exec(query, emailValues...) + if err != nil { + return err + } + } + + ldSync.lggr.Info("local ldap_sessions and ldap_user_api_tokens table successfully synced with upstream LDAP state") + return nil + }) + if err != nil { + ldSync.lggr.Errorf("Error syncing local database state: ", err) + } + ldSync.lggr.Info("Upstream LDAP sync complete") +} + +// deleteStaleSessions deletes all ldap_sessions before the passed time. +func (ldSync *LDAPServerStateSyncer) deleteStaleSessions(before time.Time) error { + _, err := ldSync.q.Exec("DELETE FROM ldap_sessions WHERE created_at < $1", before) + return err +} + +// deleteStaleAPITokens deletes all ldap_user_api_tokens before the passed time. +func (ldSync *LDAPServerStateSyncer) deleteStaleAPITokens(before time.Time) error { + _, err := ldSync.q.Exec("DELETE FROM ldap_user_api_tokens WHERE created_at < $1", before) + return err +} + +// ldapGroupMembersListToUser queries the LDAP server given a conn for a list of uniqueMember who are part of the parameterized group +func (ldSync *LDAPServerStateSyncer) ldapGroupMembersListToUser(conn LDAPConn, groupNameCN string, roleToAssign sessions.UserRole) ([]sessions.User, error) { + users, err := ldapGroupMembersListToUser( + conn, groupNameCN, roleToAssign, ldSync.config.GroupsDN(), + ldSync.config.BaseDN(), ldSync.config.QueryTimeout(), + ldSync.lggr, + ) + if err != nil { + ldSync.lggr.Errorf("Error listing members of group (%s): %v", groupNameCN, err) + return users, errors.New("error searching group members in LDAP directory") + } + return users, nil +} + +// validateUsersActive performs an additional LDAP server query for the supplied emails, checking the +// returned user data for an 'active' property defined optionally in the config. +// Returns same length bool 'valid' array, order preserved +func (ldSync *LDAPServerStateSyncer) validateUsersActive(emails []string, conn LDAPConn) ([]bool, error) { + validUsers := make([]bool, len(emails)) + // If active attribute to check is not defined in config, skip + if ldSync.config.ActiveAttribute() == "" { + // pre fill with valids + for i := range emails { + validUsers[i] = true + } + return validUsers, nil + } + + // Build the full email list query to pull all 'isActive' information for each user specified in one query + filterQuery := "(|" + for _, email := range emails { + escapedEmail := ldap.EscapeFilter(email) + filterQuery = fmt.Sprintf("%s(%s=%s)", filterQuery, ldSync.config.BaseUserAttr(), escapedEmail) + } + filterQuery = fmt.Sprintf("(&%s))", filterQuery) + searchBaseDN := fmt.Sprintf("%s,%s", ldSync.config.UsersDN(), ldSync.config.BaseDN()) + searchRequest := ldap.NewSearchRequest( + searchBaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, + 0, int(ldSync.config.QueryTimeout().Seconds()), false, + filterQuery, + []string{ldSync.config.BaseUserAttr(), ldSync.config.ActiveAttribute()}, + nil, + ) + // Query LDAP server for the ActiveAttribute property of each specified user + results, err := conn.Search(searchRequest) + if err != nil { + ldSync.lggr.Errorf("Error searching user in LDAP query: %v", err) + return validUsers, errors.New("error searching users in LDAP directory") + } + // Ensure user response entries + if len(results.Entries) == 0 { + return validUsers, errors.New("no users matching email query") + } + + // Pull expected ActiveAttribute value from list of string possible values + // keyed on email for final step to return flag bool list where order is preserved + emailToActiveMap := make(map[string]bool) + for _, result := range results.Entries { + isActiveAttribute := result.GetAttributeValue(ldSync.config.ActiveAttribute()) + uidAttribute := result.GetAttributeValue(ldSync.config.BaseUserAttr()) + emailToActiveMap[uidAttribute] = isActiveAttribute == ldSync.config.ActiveAttributeAllowedValue() + } + for i, email := range emails { + active, ok := emailToActiveMap[email] + if ok && active { + validUsers[i] = true + } + } + + return validUsers, nil +} diff --git a/core/sessions/localauth/orm.go b/core/sessions/localauth/orm.go new file mode 100644 index 00000000..83af80d0 --- /dev/null +++ b/core/sessions/localauth/orm.go @@ -0,0 +1,366 @@ +package localauth + +import ( + "crypto/subtle" + "encoding/json" + "strings" + "time" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/utils/mathutil" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type orm struct { + q pg.Q + sessionDuration time.Duration + lggr logger.Logger + auditLogger audit.AuditLogger +} + +// orm implements sessions.AuthenticationProvider and sessions.BasicAdminUsersORM interfaces +var _ sessions.AuthenticationProvider = (*orm)(nil) +var _ sessions.BasicAdminUsersORM = (*orm)(nil) + +func NewORM(db *sqlx.DB, sd time.Duration, lggr logger.Logger, cfg pg.QConfig, auditLogger audit.AuditLogger) sessions.AuthenticationProvider { + namedLogger := lggr.Named("LocalAuthAuthenticationProviderORM") + return &orm{ + q: pg.NewQ(db, namedLogger, cfg), + sessionDuration: sd, + lggr: lggr.Named("LocalAuthAuthenticationProviderORM"), + auditLogger: auditLogger, + } +} + +// FindUser will attempt to return an API user by email. +func (o *orm) FindUser(email string) (sessions.User, error) { + return o.findUser(email) +} + +// FindUserByAPIToken will attempt to return an API user via the user's table token_key column. +func (o *orm) FindUserByAPIToken(apiToken string) (user sessions.User, err error) { + sql := "SELECT * FROM users WHERE token_key = $1" + err = o.q.Get(&user, sql, apiToken) + return +} + +func (o *orm) findUser(email string) (user sessions.User, err error) { + sql := "SELECT * FROM users WHERE lower(email) = lower($1)" + err = o.q.Get(&user, sql, email) + return +} + +// ListUsers will load and return all user rows from the db. +func (o *orm) ListUsers() (users []sessions.User, err error) { + sql := "SELECT * FROM users ORDER BY email ASC;" + err = o.q.Select(&users, sql) + return +} + +// findValidSession finds an unexpired session by its ID and returns the associated email. +func (o *orm) findValidSession(sessionID string) (email string, err error) { + if err := o.q.Get(&email, "SELECT email FROM sessions WHERE id = $1 AND last_used + $2 >= now() FOR UPDATE", sessionID, o.sessionDuration); err != nil { + o.lggr.Infof("query result: %v", email) + return email, errors.Wrap(err, "no matching user for provided session token") + } + return email, nil +} + +// updateSessionLastUsed updates a session by its ID and sets the LastUsed field to now(). +func (o *orm) updateSessionLastUsed(sessionID string) error { + return o.q.ExecQ("UPDATE sessions SET last_used = now() WHERE id = $1", sessionID) +} + +// AuthorizedUserWithSession will return the API user associated with the Session ID if it +// exists and hasn't expired, and update session's LastUsed field. +// AuthorizedUserWithSession will return the API user associated with the Session ID if it +// exists and hasn't expired, and update session's LastUsed field. +func (o *orm) AuthorizedUserWithSession(sessionID string) (user sessions.User, err error) { + if len(sessionID) == 0 { + return sessions.User{}, sessions.ErrEmptySessionID + } + + email, err := o.findValidSession(sessionID) + if err != nil { + return sessions.User{}, sessions.ErrUserSessionExpired + } + + user, err = o.findUser(email) + if err != nil { + return sessions.User{}, sessions.ErrUserSessionExpired + } + + if err := o.updateSessionLastUsed(sessionID); err != nil { + return sessions.User{}, err + } + + return user, nil +} + +// DeleteUser will delete an API User and sessions by email. +func (o *orm) DeleteUser(email string) error { + return o.q.Transaction(func(tx pg.Queryer) error { + // session table rows are deleted on cascade through the user email constraint + if _, err := tx.Exec("DELETE FROM users WHERE email = $1", email); err != nil { + return err + } + return nil + }) +} + +// DeleteUserSession will delete a session by ID. +func (o *orm) DeleteUserSession(sessionID string) error { + _, err := o.q.Exec("DELETE FROM sessions WHERE id = $1", sessionID) + return err +} + +// GetUserWebAuthn will return a list of structures representing all enrolled WebAuthn +// tokens for the user. This list must be used when logging in (for obvious reasons) but +// must also be used for registration to prevent the user from enrolling the same hardware +// token multiple times. +func (o *orm) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) { + var uwas []sessions.WebAuthn + err := o.q.Select(&uwas, "SELECT email, public_key_data FROM web_authns WHERE LOWER(email) = $1", strings.ToLower(email)) + if err != nil { + return uwas, err + } + // In the event of not found, there is no MFA on this account and it is not an error + // so this returns either an empty list or list of WebAuthn rows + return uwas, nil +} + +// CreateSession will check the password in the SessionRequest against +// the hashed API User password in the db. Also will check WebAuthn if it's +// enabled for that user. +func (o *orm) CreateSession(sr sessions.SessionRequest) (string, error) { + user, err := o.FindUser(sr.Email) + if err != nil { + return "", err + } + lggr := o.lggr.With("user", user.Email) + lggr.Debugw("Found user") + + // Do email and password check first to prevent extra database look up + // for MFA tokens leaking if an account has MFA tokens or not. + if !constantTimeEmailCompare(strings.ToLower(sr.Email), strings.ToLower(user.Email)) { + o.auditLogger.Audit(audit.AuthLoginFailedEmail, map[string]interface{}{"email": sr.Email}) + return "", errors.New("Invalid email") + } + + if !utils.CheckPasswordHash(sr.Password, user.HashedPassword) { + o.auditLogger.Audit(audit.AuthLoginFailedPassword, map[string]interface{}{"email": sr.Email}) + return "", errors.New("Invalid password") + } + + // Load all valid MFA tokens associated with user's email + uwas, err := o.GetUserWebAuthn(user.Email) + if err != nil { + // There was an error with the database query + lggr.Errorf("Could not fetch user's MFA data: %v", err) + return "", errors.New("MFA Error") + } + + // No webauthn tokens registered for the current user, so normal authentication is now complete + if len(uwas) == 0 { + lggr.Infof("No MFA for user. Creating Session") + session := sessions.NewSession() + _, err = o.q.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, user.Email) + o.auditLogger.Audit(audit.AuthLoginSuccessNo2FA, map[string]interface{}{"email": sr.Email}) + return session.ID, err + } + + // Next check if this session request includes the required WebAuthn challenge data + // if not, return a 401 error for the frontend to prompt the user to provide this + // data in the next round trip request (tap key to include webauthn data on the login page) + if sr.WebAuthnData == "" { + lggr.Warnf("Attempted login to MFA user. Generating challenge for user.") + options, webauthnError := sessions.BeginWebAuthnLogin(user, uwas, sr) + if webauthnError != nil { + lggr.Errorf("Could not begin WebAuthn verification: %v", webauthnError) + return "", errors.New("MFA Error") + } + + j, jsonError := json.Marshal(options) + if jsonError != nil { + lggr.Errorf("Could not serialize WebAuthn challenge: %v", jsonError) + return "", errors.New("MFA Error") + } + + return "", errors.New(string(j)) + } + + // The user is at the final stage of logging in with MFA. We have an + // attestation back from the user, we now need to verify that it is + // correct. + err = sessions.FinishWebAuthnLogin(user, uwas, sr) + + if err != nil { + // The user does have WebAuthn enabled but failed the check + o.auditLogger.Audit(audit.AuthLoginFailed2FA, map[string]interface{}{"email": sr.Email, "error": err}) + lggr.Errorf("User sent an invalid attestation: %v", err) + return "", errors.New("MFA Error") + } + + lggr.Infof("User passed MFA authentication and login will proceed") + // This is a success so we can create the sessions + session := sessions.NewSession() + _, err = o.q.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, user.Email) + if err != nil { + return "", err + } + + // Forward registered credentials for audit logs + uwasj, err := json.Marshal(uwas) + if err != nil { + lggr.Errorf("error in Marshal credentials: %s", err) + } else { + o.auditLogger.Audit(audit.AuthLoginSuccessWith2FA, map[string]interface{}{"email": sr.Email, "credential": string(uwasj)}) + } + + return session.ID, nil +} + +const constantTimeEmailLength = 256 + +func constantTimeEmailCompare(left, right string) bool { + length := mathutil.Max(constantTimeEmailLength, len(left), len(right)) + leftBytes := make([]byte, length) + rightBytes := make([]byte, length) + copy(leftBytes, left) + copy(rightBytes, right) + return subtle.ConstantTimeCompare(leftBytes, rightBytes) == 1 +} + +// ClearNonCurrentSessions removes all sessions but the id passed in. +func (o *orm) ClearNonCurrentSessions(sessionID string) error { + _, err := o.q.Exec("DELETE FROM sessions where id != $1", sessionID) + return err +} + +// CreateUser creates a new API user +func (o *orm) CreateUser(user *sessions.User) error { + sql := "INSERT INTO users (email, hashed_password, role, created_at, updated_at) VALUES ($1, $2, $3, now(), now()) RETURNING *" + return o.q.Get(user, sql, strings.ToLower(user.Email), user.HashedPassword, user.Role) +} + +// UpdateRole overwrites role field of the user specified by email. +func (o *orm) UpdateRole(email, newRole string) (sessions.User, error) { + var userToEdit sessions.User + + if newRole == "" { + return userToEdit, errors.New("user role must be specified") + } + + err := o.q.Transaction(func(tx pg.Queryer) error { + // First, attempt to load specified user by email + if err := tx.Get(&userToEdit, "SELECT * FROM users WHERE lower(email) = lower($1)", email); err != nil { + return errors.New("no matching user for provided email") + } + + // Patch validated role + userRole, err := sessions.GetUserRole(newRole) + if err != nil { + return err + } + userToEdit.Role = userRole + + _, err = tx.Exec("DELETE FROM sessions WHERE email = lower($1)", email) + if err != nil { + o.lggr.Errorf("Failed to purge user sessions for UpdateRole", "err", err) + return errors.New("error updating API user") + } + + sql := "UPDATE users SET role = $1, updated_at = now() WHERE lower(email) = lower($2) RETURNING *" + if err := tx.Get(&userToEdit, sql, userToEdit.Role, email); err != nil { + o.lggr.Errorf("Error updating API user", "err", err) + return errors.New("error updating API user") + } + + return nil + }) + + return userToEdit, err +} + +// SetAuthToken updates the user to use the given Authentication Token. +func (o *orm) SetPassword(user *sessions.User, newPassword string) error { + hashedPassword, err := utils.HashPassword(newPassword) + if err != nil { + return err + } + sql := "UPDATE users SET hashed_password = $1, updated_at = now() WHERE email = $2 RETURNING *" + return o.q.Get(user, sql, hashedPassword, user.Email) +} + +// TestPassword checks plaintext user provided password with hashed database password, returns nil if matched +func (o *orm) TestPassword(email string, password string) error { + var hashedPassword string + if err := o.q.Get(&hashedPassword, "SELECT hashed_password FROM users WHERE lower(email) = lower($1)", email); err != nil { + return errors.New("no matching user for provided email") + } + if !utils.CheckPasswordHash(password, hashedPassword) { + return errors.New("passwords don't match") + } + return nil +} + +func (o *orm) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) { + newToken := auth.NewToken() + + err := o.SetAuthToken(user, newToken) + if err != nil { + return nil, err + } + + return newToken, nil +} + +// SetAuthToken updates the user to use the given Authentication Token. +func (o *orm) SetAuthToken(user *sessions.User, token *auth.Token) error { + salt := utils.NewSecret(utils.DefaultSecretSize) + hashedSecret, err := auth.HashedSecret(token, salt) + if err != nil { + return errors.Wrap(err, "user") + } + sql := "UPDATE users SET token_salt = $1, token_key = $2, token_hashed_secret = $3, updated_at = now() WHERE email = $4 RETURNING *" + return o.q.Get(user, sql, salt, token.AccessKey, hashedSecret, user.Email) +} + +// DeleteAuthToken clears and disables the users Authentication Token. +func (o *orm) DeleteAuthToken(user *sessions.User) error { + sql := "UPDATE users SET token_salt = '', token_key = '', token_hashed_secret = '', updated_at = now() WHERE email = $1 RETURNING *" + return o.q.Get(user, sql, user.Email) +} + +// SaveWebAuthn saves new WebAuthn token information. +func (o *orm) SaveWebAuthn(token *sessions.WebAuthn) error { + sql := "INSERT INTO web_authns (email, public_key_data) VALUES ($1, $2)" + _, err := o.q.Exec(sql, token.Email, token.PublicKeyData) + return err +} + +// Sessions returns all sessions limited by the parameters. +func (o *orm) Sessions(offset, limit int) (sessions []sessions.Session, err error) { + sql := `SELECT * FROM sessions ORDER BY created_at, id LIMIT $1 OFFSET $2;` + if err = o.q.Select(&sessions, sql, limit, offset); err != nil { + return + } + return +} + +// NOTE: this is duplicated from the bridges ORM to appease the AuthStorer interface +func (o *orm) FindExternalInitiator( + eia *auth.Token, +) (*bridges.ExternalInitiator, error) { + exi := &bridges.ExternalInitiator{} + err := o.q.Get(exi, `SELECT * FROM external_initiators WHERE access_key = $1`, eia.AccessKey) + return exi, err +} diff --git a/core/sessions/localauth/orm_test.go b/core/sessions/localauth/orm_test.go new file mode 100644 index 00000000..5024a5e2 --- /dev/null +++ b/core/sessions/localauth/orm_test.go @@ -0,0 +1,303 @@ +package localauth_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/localauth" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func setupORM(t *testing.T) (*sqlx.DB, sessions.AuthenticationProvider) { + t.Helper() + + db := pgtest.NewSqlxDB(t) + orm := localauth.NewORM(db, time.Minute, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{}) + + return db, orm +} + +func TestORM_FindUser(t *testing.T) { + t.Parallel() + + db, orm := setupORM(t) + user1 := cltest.MustRandomUser(t) + user2 := cltest.MustRandomUser(t) + + require.NoError(t, orm.CreateUser(&user1)) + require.NoError(t, orm.CreateUser(&user2)) + _, err := db.Exec("UPDATE users SET created_at = now() - interval '1 day' WHERE email = $1", user2.Email) + require.NoError(t, err) + + actual, err := orm.FindUser(user1.Email) + require.NoError(t, err) + assert.Equal(t, user1.Email, actual.Email) + assert.Equal(t, user1.HashedPassword, actual.HashedPassword) +} + +func TestORM_AuthorizedUserWithSession(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sessionID string + sessionDuration time.Duration + wantError string + }{ + {"authorized", "correctID", cltest.MustParseDuration(t, "3m"), ""}, + {"expired", "correctID", cltest.MustParseDuration(t, "0m"), sessions.ErrUserSessionExpired.Error()}, + {"incorrect", "wrong", cltest.MustParseDuration(t, "3m"), sessions.ErrUserSessionExpired.Error()}, + {"empty", "", cltest.MustParseDuration(t, "3m"), sessions.ErrEmptySessionID.Error()}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + db := pgtest.NewSqlxDB(t) + orm := localauth.NewORM(db, test.sessionDuration, logger.TestLogger(t), pgtest.NewQConfig(true), &audit.AuditLoggerService{}) + + user := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&user)) + + prevSession := cltest.NewSession("correctID") + prevSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "2m")) + _, err := db.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, $3, now())", prevSession.ID, user.Email, prevSession.LastUsed) + require.NoError(t, err) + + expectedTime := utils.ISO8601UTC(time.Now()) + actual, err := orm.AuthorizedUserWithSession(test.sessionID) + if test.wantError != "" { + require.EqualError(t, err, test.wantError) + } else { + require.NoError(t, err) + assert.Equal(t, user.Email, actual.Email) + var bumpedSession sessions.Session + err = db.Get(&bumpedSession, "SELECT * FROM sessions WHERE ID = $1", prevSession.ID) + require.NoError(t, err) + assert.Equal(t, expectedTime[0:13], utils.ISO8601UTC(bumpedSession.LastUsed)[0:13]) // only compare up to the hour + } + }) + } +} + +func TestORM_DeleteUser(t *testing.T) { + t.Parallel() + _, orm := setupORM(t) + + u := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&u)) + + err := orm.DeleteUser(u.Email) + require.NoError(t, err) + + _, err = orm.FindUser(u.Email) + require.Error(t, err) +} + +func TestORM_DeleteUserSession(t *testing.T) { + t.Parallel() + + db, orm := setupORM(t) + + u := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&u)) + + session := sessions.NewSession() + _, err := db.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, u.Email) + require.NoError(t, err) + + err = orm.DeleteUserSession(session.ID) + require.NoError(t, err) + + _, err = orm.FindUser(u.Email) + require.NoError(t, err) + + sessions, err := orm.Sessions(0, 10) + assert.NoError(t, err) + require.Empty(t, sessions) +} + +func TestORM_DeleteUserCascade(t *testing.T) { + db, orm := setupORM(t) + + u := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&u)) + + session := sessions.NewSession() + _, err := db.Exec("INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, now(), now())", session.ID, u.Email) + require.NoError(t, err) + + err = orm.DeleteUser(u.Email) + require.NoError(t, err) + + _, err = orm.FindUser(u.Email) + require.Error(t, err) + + sessions, err := orm.Sessions(0, 10) + assert.NoError(t, err) + require.Empty(t, sessions) +} + +func TestORM_CreateSession(t *testing.T) { + t.Parallel() + + _, orm := setupORM(t) + + initial := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&initial)) + + tests := []struct { + name string + email string + password string + wantSession bool + }{ + {"correct", initial.Email, cltest.Password, true}, + {"incorrect email", "bogus@town.org", cltest.Password, false}, + {"incorrect pwd", initial.Email, "jamaicandundada", false}, + {"incorrect both", "dudus@coke.ja", "jamaicandundada", false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sessionRequest := sessions.SessionRequest{ + Email: test.email, + Password: test.password, + } + + sessionID, err := orm.CreateSession(sessionRequest) + if test.wantSession { + require.NoError(t, err) + assert.NotEmpty(t, sessionID) + } else { + require.Error(t, err) + assert.Empty(t, sessionID) + } + }) + } +} + +func TestORM_WebAuthn(t *testing.T) { + t.Parallel() + + _, orm := setupORM(t) + + initial := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&initial)) + + was, err := orm.GetUserWebAuthn(initial.Email) + require.NoError(t, err) + assert.Len(t, was, 0) + + cred := webauthn.Credential{ + ID: []byte("test-id"), + PublicKey: []byte("test-key"), + AttestationType: "test-attestation", + } + require.NoError(t, sessions.AddCredentialToUser(orm, initial.Email, &cred)) + + was, err = orm.GetUserWebAuthn(initial.Email) + require.NoError(t, err) + require.NotEmpty(t, was) + + _, err = orm.CreateSession(sessions.SessionRequest{ + Email: initial.Email, + Password: cltest.Password, + }) + require.Error(t, err) + require.ErrorContains(t, err, "MFA Error") + + ss := sessions.NewWebAuthnSessionStore() + _, err = orm.CreateSession(sessions.SessionRequest{ + Email: initial.Email, + Password: cltest.Password, + WebAuthnConfig: sessions.WebAuthnConfiguration{ + RPID: "test-rpid", + RPOrigin: "test-rporigin", + }, + SessionStore: ss, + }) + require.Error(t, err) + var ca protocol.CredentialAssertion + require.NoError(t, json.Unmarshal([]byte(err.Error()), &ca)) + require.Equal(t, "test-rpid", ca.Response.RelyingPartyID) + + _, err = orm.CreateSession(sessions.SessionRequest{ + Email: initial.Email, + Password: cltest.Password, + WebAuthnConfig: sessions.WebAuthnConfiguration{ + RPID: "test-rpid", + RPOrigin: "test-rporigin", + }, + SessionStore: ss, + WebAuthnData: "invalid-format", + }) + require.Error(t, err) + require.ErrorContains(t, err, "MFA Error") + + challengeResp, err := json.Marshal(protocol.CredentialAssertionResponse{ + PublicKeyCredential: protocol.PublicKeyCredential{ + Credential: protocol.Credential{ + ID: "test-id", + Type: "test-type", + }, + }, + }) + require.NoError(t, err) + _, err = orm.CreateSession(sessions.SessionRequest{ + Email: initial.Email, + Password: cltest.Password, + WebAuthnConfig: sessions.WebAuthnConfiguration{ + RPID: "test-rpid", + RPOrigin: "test-rporigin", + }, + WebAuthnData: string(challengeResp), + SessionStore: ss, + }) + require.Error(t, err) +} + +func TestOrm_GenerateAuthToken(t *testing.T) { + t.Parallel() + + _, orm := setupORM(t) + + initial := cltest.MustRandomUser(t) + require.NoError(t, orm.CreateUser(&initial)) + + token, err := orm.CreateAndSetAuthToken(&initial) + require.NoError(t, err) + + dbUser, err := orm.FindUser(initial.Email) + require.NoError(t, err) + + hashedSecret, err := auth.HashedSecret(token, dbUser.TokenSalt.String) + require.NoError(t, err) + + assert.NotNil(t, token) + assert.NotNil(t, token.Secret) + assert.NotEmpty(t, token.AccessKey) + assert.Equal(t, dbUser.TokenKey.String, token.AccessKey) + assert.Equal(t, dbUser.TokenHashedSecret.String, hashedSecret) + + require.NoError(t, orm.DeleteAuthToken(&initial)) + dbUser, err = orm.FindUser(initial.Email) + require.NoError(t, err) + assert.Empty(t, dbUser.TokenKey.ValueOrZero()) + assert.Empty(t, dbUser.TokenSalt.ValueOrZero()) + assert.Empty(t, dbUser.TokenHashedSecret.ValueOrZero()) +} diff --git a/core/sessions/localauth/reaper.go b/core/sessions/localauth/reaper.go new file mode 100644 index 00000000..c0cc3434 --- /dev/null +++ b/core/sessions/localauth/reaper.go @@ -0,0 +1,49 @@ +package localauth + +import ( + "database/sql" + "time" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type sessionReaper struct { + db *sql.DB + config SessionReaperConfig + lggr logger.Logger +} + +type SessionReaperConfig interface { + SessionTimeout() commonconfig.Duration + SessionReaperExpiration() commonconfig.Duration +} + +// NewSessionReaper creates a reaper that cleans stale sessions from the store. +func NewSessionReaper(db *sql.DB, config SessionReaperConfig, lggr logger.Logger) *utils.SleeperTask { + return utils.NewSleeperTask(&sessionReaper{ + db, + config, + lggr.Named("SessionReaper"), + }) +} + +func (sr *sessionReaper) Name() string { + return "SessionReaper" +} + +func (sr *sessionReaper) Work() { + recordCreationStaleThreshold := sr.config.SessionReaperExpiration().Before( + sr.config.SessionTimeout().Before(time.Now())) + err := sr.deleteStaleSessions(recordCreationStaleThreshold) + if err != nil { + sr.lggr.Error("unable to reap stale sessions: ", err) + } +} + +// DeleteStaleSessions deletes all sessions before the passed time. +func (sr *sessionReaper) deleteStaleSessions(before time.Time) error { + _, err := sr.db.Exec("DELETE FROM sessions WHERE last_used < $1", before) + return err +} diff --git a/core/sessions/localauth/reaper_test.go b/core/sessions/localauth/reaper_test.go new file mode 100644 index 00000000..65c5d650 --- /dev/null +++ b/core/sessions/localauth/reaper_test.go @@ -0,0 +1,83 @@ +package localauth_test + +import ( + "testing" + "time" + + "github.com/onsi/gomega" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/localauth" +) + +type sessionReaperConfig struct{} + +func (c sessionReaperConfig) SessionTimeout() commonconfig.Duration { + return *commonconfig.MustNewDuration(42 * time.Second) +} + +func (c sessionReaperConfig) SessionReaperExpiration() commonconfig.Duration { + return *commonconfig.MustNewDuration(142 * time.Second) +} + +func TestSessionReaper_ReapSessions(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + config := sessionReaperConfig{} + lggr := logger.TestLogger(t) + orm := localauth.NewORM(db, config.SessionTimeout().Duration(), lggr, pgtest.NewQConfig(true), audit.NoopLogger) + + r := localauth.NewSessionReaper(db.DB, config, lggr) + t.Cleanup(func() { + assert.NoError(t, r.Stop()) + }) + + tests := []struct { + name string + lastUsed time.Time + wantReap bool + }{ + {"current", time.Now(), false}, + {"expired", time.Now().Add(-config.SessionTimeout().Duration()), false}, + {"almost stale", time.Now().Add(-config.SessionReaperExpiration().Duration()), false}, + {"stale", time.Now().Add(-config.SessionReaperExpiration().Duration()). + Add(-config.SessionTimeout().Duration()), true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Cleanup(func() { + _, err2 := db.Exec("DELETE FROM sessions where email = $1", cltest.APIEmailAdmin) + require.NoError(t, err2) + }) + + _, err := db.Exec("INSERT INTO sessions (last_used, email, id, created_at) VALUES ($1, $2, $3, now())", test.lastUsed, cltest.APIEmailAdmin, test.name) + require.NoError(t, err) + + r.WakeUp() + + if test.wantReap { + gomega.NewWithT(t).Eventually(func() []sessions.Session { + sessions, err := orm.Sessions(0, 10) + assert.NoError(t, err) + return sessions + }).Should(gomega.HaveLen(0)) + } else { + gomega.NewWithT(t).Consistently(func() []sessions.Session { + sessions, err := orm.Sessions(0, 10) + assert.NoError(t, err) + return sessions + }).Should(gomega.HaveLen(1)) + } + }) + } +} diff --git a/core/sessions/mocks/authentication_provider.go b/core/sessions/mocks/authentication_provider.go new file mode 100644 index 00000000..31d139b1 --- /dev/null +++ b/core/sessions/mocks/authentication_provider.go @@ -0,0 +1,483 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + auth "github.com/goplugin/pluginv3.0/v2/core/auth" + bridges "github.com/goplugin/pluginv3.0/v2/core/bridges" + + mock "github.com/stretchr/testify/mock" + + sessions "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +// AuthenticationProvider is an autogenerated mock type for the AuthenticationProvider type +type AuthenticationProvider struct { + mock.Mock +} + +// AuthorizedUserWithSession provides a mock function with given fields: sessionID +func (_m *AuthenticationProvider) AuthorizedUserWithSession(sessionID string) (sessions.User, error) { + ret := _m.Called(sessionID) + + if len(ret) == 0 { + panic("no return value specified for AuthorizedUserWithSession") + } + + var r0 sessions.User + var r1 error + if rf, ok := ret.Get(0).(func(string) (sessions.User, error)); ok { + return rf(sessionID) + } + if rf, ok := ret.Get(0).(func(string) sessions.User); ok { + r0 = rf(sessionID) + } else { + r0 = ret.Get(0).(sessions.User) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(sessionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClearNonCurrentSessions provides a mock function with given fields: sessionID +func (_m *AuthenticationProvider) ClearNonCurrentSessions(sessionID string) error { + ret := _m.Called(sessionID) + + if len(ret) == 0 { + panic("no return value specified for ClearNonCurrentSessions") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(sessionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateAndSetAuthToken provides a mock function with given fields: user +func (_m *AuthenticationProvider) CreateAndSetAuthToken(user *sessions.User) (*auth.Token, error) { + ret := _m.Called(user) + + if len(ret) == 0 { + panic("no return value specified for CreateAndSetAuthToken") + } + + var r0 *auth.Token + var r1 error + if rf, ok := ret.Get(0).(func(*sessions.User) (*auth.Token, error)); ok { + return rf(user) + } + if rf, ok := ret.Get(0).(func(*sessions.User) *auth.Token); ok { + r0 = rf(user) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*auth.Token) + } + } + + if rf, ok := ret.Get(1).(func(*sessions.User) error); ok { + r1 = rf(user) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateSession provides a mock function with given fields: sr +func (_m *AuthenticationProvider) CreateSession(sr sessions.SessionRequest) (string, error) { + ret := _m.Called(sr) + + if len(ret) == 0 { + panic("no return value specified for CreateSession") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(sessions.SessionRequest) (string, error)); ok { + return rf(sr) + } + if rf, ok := ret.Get(0).(func(sessions.SessionRequest) string); ok { + r0 = rf(sr) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(sessions.SessionRequest) error); ok { + r1 = rf(sr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateUser provides a mock function with given fields: user +func (_m *AuthenticationProvider) CreateUser(user *sessions.User) error { + ret := _m.Called(user) + + if len(ret) == 0 { + panic("no return value specified for CreateUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.User) error); ok { + r0 = rf(user) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteAuthToken provides a mock function with given fields: user +func (_m *AuthenticationProvider) DeleteAuthToken(user *sessions.User) error { + ret := _m.Called(user) + + if len(ret) == 0 { + panic("no return value specified for DeleteAuthToken") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.User) error); ok { + r0 = rf(user) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteUser provides a mock function with given fields: email +func (_m *AuthenticationProvider) DeleteUser(email string) error { + ret := _m.Called(email) + + if len(ret) == 0 { + panic("no return value specified for DeleteUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(email) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteUserSession provides a mock function with given fields: sessionID +func (_m *AuthenticationProvider) DeleteUserSession(sessionID string) error { + ret := _m.Called(sessionID) + + if len(ret) == 0 { + panic("no return value specified for DeleteUserSession") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(sessionID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindExternalInitiator provides a mock function with given fields: eia +func (_m *AuthenticationProvider) FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) { + ret := _m.Called(eia) + + if len(ret) == 0 { + panic("no return value specified for FindExternalInitiator") + } + + var r0 *bridges.ExternalInitiator + var r1 error + if rf, ok := ret.Get(0).(func(*auth.Token) (*bridges.ExternalInitiator, error)); ok { + return rf(eia) + } + if rf, ok := ret.Get(0).(func(*auth.Token) *bridges.ExternalInitiator); ok { + r0 = rf(eia) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*bridges.ExternalInitiator) + } + } + + if rf, ok := ret.Get(1).(func(*auth.Token) error); ok { + r1 = rf(eia) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindUser provides a mock function with given fields: email +func (_m *AuthenticationProvider) FindUser(email string) (sessions.User, error) { + ret := _m.Called(email) + + if len(ret) == 0 { + panic("no return value specified for FindUser") + } + + var r0 sessions.User + var r1 error + if rf, ok := ret.Get(0).(func(string) (sessions.User, error)); ok { + return rf(email) + } + if rf, ok := ret.Get(0).(func(string) sessions.User); ok { + r0 = rf(email) + } else { + r0 = ret.Get(0).(sessions.User) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(email) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FindUserByAPIToken provides a mock function with given fields: apiToken +func (_m *AuthenticationProvider) FindUserByAPIToken(apiToken string) (sessions.User, error) { + ret := _m.Called(apiToken) + + if len(ret) == 0 { + panic("no return value specified for FindUserByAPIToken") + } + + var r0 sessions.User + var r1 error + if rf, ok := ret.Get(0).(func(string) (sessions.User, error)); ok { + return rf(apiToken) + } + if rf, ok := ret.Get(0).(func(string) sessions.User); ok { + r0 = rf(apiToken) + } else { + r0 = ret.Get(0).(sessions.User) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(apiToken) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUserWebAuthn provides a mock function with given fields: email +func (_m *AuthenticationProvider) GetUserWebAuthn(email string) ([]sessions.WebAuthn, error) { + ret := _m.Called(email) + + if len(ret) == 0 { + panic("no return value specified for GetUserWebAuthn") + } + + var r0 []sessions.WebAuthn + var r1 error + if rf, ok := ret.Get(0).(func(string) ([]sessions.WebAuthn, error)); ok { + return rf(email) + } + if rf, ok := ret.Get(0).(func(string) []sessions.WebAuthn); ok { + r0 = rf(email) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sessions.WebAuthn) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(email) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListUsers provides a mock function with given fields: +func (_m *AuthenticationProvider) ListUsers() ([]sessions.User, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListUsers") + } + + var r0 []sessions.User + var r1 error + if rf, ok := ret.Get(0).(func() ([]sessions.User, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []sessions.User); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sessions.User) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveWebAuthn provides a mock function with given fields: token +func (_m *AuthenticationProvider) SaveWebAuthn(token *sessions.WebAuthn) error { + ret := _m.Called(token) + + if len(ret) == 0 { + panic("no return value specified for SaveWebAuthn") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.WebAuthn) error); ok { + r0 = rf(token) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Sessions provides a mock function with given fields: offset, limit +func (_m *AuthenticationProvider) Sessions(offset int, limit int) ([]sessions.Session, error) { + ret := _m.Called(offset, limit) + + if len(ret) == 0 { + panic("no return value specified for Sessions") + } + + var r0 []sessions.Session + var r1 error + if rf, ok := ret.Get(0).(func(int, int) ([]sessions.Session, error)); ok { + return rf(offset, limit) + } + if rf, ok := ret.Get(0).(func(int, int) []sessions.Session); ok { + r0 = rf(offset, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sessions.Session) + } + } + + if rf, ok := ret.Get(1).(func(int, int) error); ok { + r1 = rf(offset, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetAuthToken provides a mock function with given fields: user, token +func (_m *AuthenticationProvider) SetAuthToken(user *sessions.User, token *auth.Token) error { + ret := _m.Called(user, token) + + if len(ret) == 0 { + panic("no return value specified for SetAuthToken") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.User, *auth.Token) error); ok { + r0 = rf(user, token) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetPassword provides a mock function with given fields: user, newPassword +func (_m *AuthenticationProvider) SetPassword(user *sessions.User, newPassword string) error { + ret := _m.Called(user, newPassword) + + if len(ret) == 0 { + panic("no return value specified for SetPassword") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.User, string) error); ok { + r0 = rf(user, newPassword) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TestPassword provides a mock function with given fields: email, password +func (_m *AuthenticationProvider) TestPassword(email string, password string) error { + ret := _m.Called(email, password) + + if len(ret) == 0 { + panic("no return value specified for TestPassword") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(email, password) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateRole provides a mock function with given fields: email, newRole +func (_m *AuthenticationProvider) UpdateRole(email string, newRole string) (sessions.User, error) { + ret := _m.Called(email, newRole) + + if len(ret) == 0 { + panic("no return value specified for UpdateRole") + } + + var r0 sessions.User + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (sessions.User, error)); ok { + return rf(email, newRole) + } + if rf, ok := ret.Get(0).(func(string, string) sessions.User); ok { + r0 = rf(email, newRole) + } else { + r0 = ret.Get(0).(sessions.User) + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(email, newRole) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAuthenticationProvider creates a new instance of AuthenticationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAuthenticationProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *AuthenticationProvider { + mock := &AuthenticationProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/sessions/mocks/basic_admin_users_orm.go b/core/sessions/mocks/basic_admin_users_orm.go new file mode 100644 index 00000000..8ed5e471 --- /dev/null +++ b/core/sessions/mocks/basic_admin_users_orm.go @@ -0,0 +1,103 @@ +// Code generated by mockery v2.38.0. DO NOT EDIT. + +package mocks + +import ( + sessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + mock "github.com/stretchr/testify/mock" +) + +// BasicAdminUsersORM is an autogenerated mock type for the BasicAdminUsersORM type +type BasicAdminUsersORM struct { + mock.Mock +} + +// CreateUser provides a mock function with given fields: user +func (_m *BasicAdminUsersORM) CreateUser(user *sessions.User) error { + ret := _m.Called(user) + + if len(ret) == 0 { + panic("no return value specified for CreateUser") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*sessions.User) error); ok { + r0 = rf(user) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FindUser provides a mock function with given fields: email +func (_m *BasicAdminUsersORM) FindUser(email string) (sessions.User, error) { + ret := _m.Called(email) + + if len(ret) == 0 { + panic("no return value specified for FindUser") + } + + var r0 sessions.User + var r1 error + if rf, ok := ret.Get(0).(func(string) (sessions.User, error)); ok { + return rf(email) + } + if rf, ok := ret.Get(0).(func(string) sessions.User); ok { + r0 = rf(email) + } else { + r0 = ret.Get(0).(sessions.User) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(email) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListUsers provides a mock function with given fields: +func (_m *BasicAdminUsersORM) ListUsers() ([]sessions.User, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ListUsers") + } + + var r0 []sessions.User + var r1 error + if rf, ok := ret.Get(0).(func() ([]sessions.User, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []sessions.User); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sessions.User) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewBasicAdminUsersORM creates a new instance of BasicAdminUsersORM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBasicAdminUsersORM(t interface { + mock.TestingT + Cleanup(func()) +}) *BasicAdminUsersORM { + mock := &BasicAdminUsersORM{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/sessions/session.go b/core/sessions/session.go new file mode 100644 index 00000000..f678f63f --- /dev/null +++ b/core/sessions/session.go @@ -0,0 +1,74 @@ +package sessions + +import ( + "crypto/subtle" + "time" + + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// SessionRequest encapsulates the fields needed to generate a new SessionID, +// including the hashed password. +type SessionRequest struct { + Email string `json:"email"` + Password string `json:"password"` + WebAuthnData string `json:"webauthndata"` + WebAuthnConfig WebAuthnConfiguration + SessionStore *WebAuthnSessionStore +} + +// Session holds the unique id for the authenticated session. +type Session struct { + ID string `json:"id"` + Email string `json:"email"` + LastUsed time.Time `json:"lastUsed"` + CreatedAt time.Time `json:"createdAt"` +} + +// NewSession returns a session instance with ID set to a random ID and +// LastUsed to now. +func NewSession() Session { + return Session{ + ID: utils.NewBytes32ID(), + LastUsed: time.Now(), + } +} + +// Changeauth.TokenRequest is sent when updating a User's authentication token. +type ChangeAuthTokenRequest struct { + Password string `json:"password"` +} + +// GenerateAuthToken randomly generates and sets the users Authentication +// Token. +func (u *User) GenerateAuthToken() (*auth.Token, error) { + token := auth.NewToken() + return token, u.SetAuthToken(token) +} + +// SetAuthToken updates the user to use the given Authentication Token. +func (u *User) SetAuthToken(token *auth.Token) error { + salt := utils.NewSecret(utils.DefaultSecretSize) + hashedSecret, err := auth.HashedSecret(token, salt) + if err != nil { + return errors.Wrap(err, "user") + } + u.TokenSalt = null.StringFrom(salt) + u.TokenKey = null.StringFrom(token.AccessKey) + u.TokenHashedSecret = null.StringFrom(hashedSecret) + return nil +} + +// AuthenticateUserByToken returns true on successful authentication of the +// user against the given Authentication Token. +func AuthenticateUserByToken(token *auth.Token, user *User) (bool, error) { + hashedSecret, err := auth.HashedSecret(token, user.TokenSalt.ValueOrZero()) + if err != nil { + return false, err + } + return subtle.ConstantTimeCompare([]byte(hashedSecret), []byte(user.TokenHashedSecret.ValueOrZero())) == 1, nil +} diff --git a/core/sessions/user.go b/core/sessions/user.go new file mode 100644 index 00000000..f96da164 --- /dev/null +++ b/core/sessions/user.go @@ -0,0 +1,108 @@ +package sessions + +import ( + "fmt" + "net/mail" + "time" + + "github.com/pkg/errors" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// User holds the credentials for API user. +type User struct { + Email string + HashedPassword string + Role UserRole + CreatedAt time.Time + TokenKey null.String + TokenSalt null.String + TokenHashedSecret null.String + UpdatedAt time.Time +} + +type UserRole string + +const ( + UserRoleAdmin UserRole = "admin" + UserRoleEdit UserRole = "edit" + UserRoleRun UserRole = "run" + UserRoleView UserRole = "view" +) + +// https://security.stackexchange.com/questions/39849/does-bcrypt-have-a-maximum-password-length +const ( + MaxBcryptPasswordLength = 50 +) + +// NewUser creates a new user by hashing the passed plainPwd with bcrypt. +func NewUser(email string, plainPwd string, role UserRole) (User, error) { + if err := ValidateEmail(email); err != nil { + return User{}, err + } + + pwd, err := ValidateAndHashPassword(plainPwd) + if err != nil { + return User{}, err + } + + return User{ + Email: email, + HashedPassword: pwd, + Role: role, + }, nil +} + +// ValidateEmail is the single point of logic for user email validations +func ValidateEmail(email string) error { + if len(email) == 0 { + return errors.New("Must enter an email") + } + _, err := mail.ParseAddress(email) + return err +} + +// ValidateAndHashPassword is the single point of logic for user password validations +func ValidateAndHashPassword(plainPwd string) (string, error) { + if err := utils.VerifyPasswordComplexity(plainPwd); err != nil { + return "", errors.Wrapf(err, "password insufficiently complex:\n%s", utils.PasswordComplexityRequirements) + } + if len(plainPwd) > MaxBcryptPasswordLength { + return "", errors.Errorf("must enter a password less than %v characters", MaxBcryptPasswordLength) + } + + pwd, err := utils.HashPassword(plainPwd) + if err != nil { + return "", err + } + + return pwd, nil +} + +// GetUserRole is the single point of logic for mapping role string to UserRole +func GetUserRole(role string) (UserRole, error) { + if role == string(UserRoleAdmin) { + return UserRoleAdmin, nil + } + if role == string(UserRoleEdit) { + return UserRoleEdit, nil + } + if role == string(UserRoleRun) { + return UserRoleRun, nil + } + if role == string(UserRoleView) { + return UserRoleView, nil + } + + errStr := fmt.Sprintf( + "Invalid role: %s. Allowed roles: '%s', '%s', '%s', '%s'.", + role, + UserRoleAdmin, + UserRoleEdit, + UserRoleRun, + UserRoleView, + ) + return UserRole(""), errors.New(errStr) +} diff --git a/core/sessions/user_test.go b/core/sessions/user_test.go new file mode 100644 index 00000000..96662d0f --- /dev/null +++ b/core/sessions/user_test.go @@ -0,0 +1,74 @@ +package sessions_test + +import ( + "testing" + + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewUser(t *testing.T) { + t.Parallel() + + tests := []struct { + email, pwd string + role sessions.UserRole + wantError bool + }{ + {"good@email.com", cltest.Password, sessions.UserRoleAdmin, false}, + {"notld@email", cltest.Password, sessions.UserRoleEdit, false}, + {"view@email", cltest.Password, sessions.UserRoleView, false}, + {"good@email.com", "badpd", sessions.UserRoleAdmin, true}, + {"bademail", cltest.Password, sessions.UserRoleAdmin, true}, + {"bad@", cltest.Password, sessions.UserRoleAdmin, true}, + {"@email", cltest.Password, sessions.UserRoleAdmin, true}, + {"good@email.com", cltest.Password, sessions.UserRoleRun, false}, + {"good@email-pass-too-long.com", cltest.Password + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", sessions.UserRoleAdmin, true}, + } + + for _, test := range tests { + t.Run(test.email, func(t *testing.T) { + user, err := sessions.NewUser(test.email, test.pwd, test.role) + if test.wantError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.email, user.Email) + assert.Equal(t, test.role, user.Role) + assert.NotEmpty(t, user.HashedPassword) + newHash, _ := utils.HashPassword(test.pwd) + assert.NotEqual(t, newHash, user.HashedPassword, "Salt should prevent equality") + } + }) + } +} + +func TestUserGenerateAuthToken(t *testing.T) { + var user sessions.User + token, err := user.GenerateAuthToken() + require.NoError(t, err) + assert.Equal(t, null.StringFrom(token.AccessKey), user.TokenKey) + assert.NotEqual(t, null.StringFrom(token.Secret), user.TokenHashedSecret) +} + +func TestAuthenticateUserByToken(t *testing.T) { + var user sessions.User + + token, err := user.GenerateAuthToken() + assert.NoError(t, err, "failed when generate auth token") + ok, err := sessions.AuthenticateUserByToken(token, &user) + require.NoError(t, err) + assert.True(t, ok, "authentication must be successful") + + _, err = user.GenerateAuthToken() + assert.NoError(t, err, "failed to generate auth token") + ok, err = sessions.AuthenticateUserByToken(token, &user) + require.NoError(t, err) + assert.False(t, ok, "authentication must fail with past token") +} diff --git a/core/sessions/webauthn.go b/core/sessions/webauthn.go new file mode 100644 index 00000000..fe07aac1 --- /dev/null +++ b/core/sessions/webauthn.go @@ -0,0 +1,293 @@ +package sessions + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + sqlxTypes "github.com/jmoiron/sqlx/types" + "github.com/pkg/errors" +) + +// WebAuthn holds the credentials for API user. +type WebAuthn struct { + Email string + PublicKeyData sqlxTypes.JSONText +} + +// WebAuthnUser implements the required duo-labs/webauthn/ 'User' interface +// kept separate from our internal 'User' struct +type WebAuthnUser struct { + Email string + WACredentials []webauthn.Credential +} + +type WebAuthnConfiguration struct { + RPID string + RPOrigin string +} + +func (store *WebAuthnSessionStore) BeginWebAuthnRegistration(user User, uwas []WebAuthn, config WebAuthnConfiguration) (*protocol.CredentialCreation, error) { + webAuthn, err := webauthn.New(&webauthn.Config{ + RPDisplayName: "Plugin Operator", // Display Name + RPID: config.RPID, // Generally the domain name + RPOrigin: config.RPOrigin, // The origin URL for WebAuthn requests + }) + + if err != nil { + return nil, err + } + + waUser, err := duoWebAuthUserFromUser(user, uwas) + if err != nil { + return nil, err + } + + registerOptions := func(credCreationOpts *protocol.PublicKeyCredentialCreationOptions) { + credCreationOpts.CredentialExcludeList = waUser.CredentialExcludeList() + } + + // generate PublicKeyCredentialCreationOptions, session data + options, sessionData, err := webAuthn.BeginRegistration( + waUser, + registerOptions, + ) + + if err != nil { + return nil, err + } + + userRegistrationIndexKey := fmt.Sprintf("%s-registration", user.Email) + err = store.SaveWebauthnSession(userRegistrationIndexKey, sessionData) + if err != nil { + return nil, err + } + + return options, nil +} + +func (store *WebAuthnSessionStore) FinishWebAuthnRegistration(user User, uwas []WebAuthn, response *http.Request, config WebAuthnConfiguration) (*webauthn.Credential, error) { + webAuthn, err := webauthn.New(&webauthn.Config{ + RPDisplayName: "Plugin Operator", // Display Name + RPID: config.RPID, // Generally the domain name + RPOrigin: config.RPOrigin, // The origin URL for WebAuthn requests + }) + if err != nil { + return nil, err + } + + userRegistrationIndexKey := fmt.Sprintf("%s-registration", user.Email) + sessionData, err := store.GetWebauthnSession(userRegistrationIndexKey) + if err != nil { + return nil, err + } + + waUser, err := duoWebAuthUserFromUser(user, uwas) + if err != nil { + return nil, err + } + + credential, err := webAuthn.FinishRegistration(waUser, sessionData, response) + if err != nil { + return nil, errors.Wrap(err, "failed to FinishRegistration") + } + + return credential, nil +} + +func BeginWebAuthnLogin(user User, uwas []WebAuthn, sr SessionRequest) (*protocol.CredentialAssertion, error) { + webAuthn, err := webauthn.New(&webauthn.Config{ + RPDisplayName: "Plugin Operator", // Display Name + RPID: sr.WebAuthnConfig.RPID, // Generally the domain name + RPOrigin: sr.WebAuthnConfig.RPOrigin, // The origin URL for WebAuthn requests + }) + + if err != nil { + return nil, err + } + + waUser, err := duoWebAuthUserFromUser(user, uwas) + if err != nil { + return nil, err + } + + options, sessionData, err := webAuthn.BeginLogin(waUser) + if err != nil { + return nil, err + } + + userLoginIndexKey := fmt.Sprintf("%s-authentication", user.Email) + err = sr.SessionStore.SaveWebauthnSession(userLoginIndexKey, sessionData) + if err != nil { + return nil, err + } + + return options, nil +} + +func FinishWebAuthnLogin(user User, uwas []WebAuthn, sr SessionRequest) error { + webAuthn, err := webauthn.New(&webauthn.Config{ + RPDisplayName: "Plugin Operator", // Display Name + RPID: sr.WebAuthnConfig.RPID, // Generally the domain name + RPOrigin: sr.WebAuthnConfig.RPOrigin, // The origin URL for WebAuthn requests + }) + + if err != nil { + return errors.Wrapf(err, "failed to create webAuthn structure with RPID: %s and RPOrigin: %s", sr.WebAuthnConfig.RPID, sr.WebAuthnConfig.RPOrigin) + } + + credential, err := protocol.ParseCredentialRequestResponseBody(strings.NewReader(sr.WebAuthnData)) + if err != nil { + return err + } + + userLoginIndexKey := fmt.Sprintf("%s-authentication", user.Email) + sessionData, err := sr.SessionStore.GetWebauthnSession(userLoginIndexKey) + if err != nil { + return err + } + + waUser, err := duoWebAuthUserFromUser(user, uwas) + if err != nil { + return err + } + + _, err = webAuthn.ValidateLogin(waUser, sessionData, credential) + return err +} + +// WebAuthnID returns the user's ID +func (u WebAuthnUser) WebAuthnID() []byte { + return []byte(u.Email) +} + +// WebAuthnName returns the user's email +func (u WebAuthnUser) WebAuthnName() string { + return u.Email +} + +// WebAuthnDisplayName returns the user's display name. +// In this case we just return the email +func (u WebAuthnUser) WebAuthnDisplayName() string { + return u.Email +} + +// WebAuthnIcon should be the logo in some form. How it should +// be is currently unclear to me. +func (u WebAuthnUser) WebAuthnIcon() string { + return "" +} + +// WebAuthnCredentials returns credentials owned by the user +func (u WebAuthnUser) WebAuthnCredentials() []webauthn.Credential { + return u.WACredentials +} + +// CredentialExcludeList returns a CredentialDescriptor array filled +// with all the user's credentials to prevent them from re-registering +// keys +func (u WebAuthnUser) CredentialExcludeList() []protocol.CredentialDescriptor { + credentialExcludeList := []protocol.CredentialDescriptor{} + + for _, cred := range u.WACredentials { + descriptor := protocol.CredentialDescriptor{ + Type: protocol.PublicKeyCredentialType, + CredentialID: cred.ID, + } + credentialExcludeList = append(credentialExcludeList, descriptor) + } + + return credentialExcludeList +} + +func (u *WebAuthnUser) LoadWebAuthnCredentials(uwas []WebAuthn) error { + for _, v := range uwas { + var credential webauthn.Credential + err := v.PublicKeyData.Unmarshal(&credential) + if err != nil { + return fmt.Errorf("error unmarshalling provided PublicKeyData: %s", err) + } + u.WACredentials = append(u.WACredentials, credential) + } + return nil +} + +func duoWebAuthUserFromUser(user User, uwas []WebAuthn) (WebAuthnUser, error) { + waUser := WebAuthnUser{ + Email: user.Email, + } + err := waUser.LoadWebAuthnCredentials(uwas) + + return waUser, err +} + +// WebAuthnSessionStore is a wrapper around an in memory key value store which provides some helper +// methods related to webauthn operations. +type WebAuthnSessionStore struct { + inProgressRegistrations map[string]string + mu sync.Mutex +} + +// NewWebAuthnSessionStore returns a new session store. +func NewWebAuthnSessionStore() *WebAuthnSessionStore { + return &WebAuthnSessionStore{ + inProgressRegistrations: map[string]string{}, + } +} + +// SaveWebauthnSession marshals and saves the webauthn data to the provided +// key given the request and responsewriter +func (store *WebAuthnSessionStore) SaveWebauthnSession(key string, data *webauthn.SessionData) error { + marshaledData, err := json.Marshal(data) + if err != nil { + return err + } + store.put(key, string(marshaledData)) + return nil +} + +func (store *WebAuthnSessionStore) put(key, val string) { + store.mu.Lock() + defer store.mu.Unlock() + store.inProgressRegistrations[key] = val +} + +// take returns the val for key, as well as removing it. +func (store *WebAuthnSessionStore) take(key string) (val string, ok bool) { + store.mu.Lock() + defer store.mu.Unlock() + val, ok = store.inProgressRegistrations[key] + if ok { + delete(store.inProgressRegistrations, key) + } + return +} + +// GetWebauthnSession unmarshals and returns the webauthn session information +// from the session cookie, which is removed. +func (store *WebAuthnSessionStore) GetWebauthnSession(key string) (data webauthn.SessionData, err error) { + assertion, ok := store.take(key) + if !ok { + err = errors.New("assertion not in challenge store") + return + } + err = json.Unmarshal([]byte(assertion), &data) + return +} + +func AddCredentialToUser(ap AuthenticationProvider, email string, credential *webauthn.Credential) error { + credj, err := json.Marshal(credential) + if err != nil { + return err + } + + token := WebAuthn{ + Email: email, + PublicKeyData: sqlxTypes.JSONText(credj), + } + return ap.SaveWebAuthn(&token) +} diff --git a/core/sessions/webauthn_test.go b/core/sessions/webauthn_test.go new file mode 100644 index 00000000..32009b17 --- /dev/null +++ b/core/sessions/webauthn_test.go @@ -0,0 +1,81 @@ +package sessions + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + sqlxTypes "github.com/jmoiron/sqlx/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestWebAuthnSessionStore(t *testing.T) { + const key = "test-key" + data := webauthn.SessionData{ + Challenge: "challenge-string", + UserID: []byte("test-user-id"), + AllowedCredentialIDs: [][]byte{ + []byte("test"), + []byte("foo"), + []byte("bar"), + }, + UserVerification: protocol.UserVerificationRequirement("test-user-verification"), + } + s := NewWebAuthnSessionStore() + + val, ok := s.take(key) + assert.Equal(t, "", val) + require.False(t, ok) + + require.NoError(t, s.SaveWebauthnSession(key, &data)) + + got, err := s.GetWebauthnSession(key) + require.NoError(t, err) + assert.Equal(t, data, got) + + val, ok = s.take(key) + assert.Equal(t, "", val) + require.False(t, ok) + + _, err = s.GetWebauthnSession(key) + assert.ErrorContains(t, err, "assertion not in challenge store") + + user := mustRandomUser(t) + cred := webauthn.Credential{ + ID: []byte("test-id"), + PublicKey: []byte("test-key"), + AttestationType: "test-attestation", + } + credj, err := json.Marshal(cred) + require.NoError(t, err) + + token := WebAuthn{ + Email: user.Email, + PublicKeyData: sqlxTypes.JSONText(credj), + } + uwas := []WebAuthn{token} + wcfg := WebAuthnConfiguration{RPID: "test-rpid", RPOrigin: "test-rporigin"} + cc, err := s.BeginWebAuthnRegistration(user, uwas, wcfg) + require.NoError(t, err) + require.Equal(t, "Plugin Operator", cc.Response.RelyingParty.CredentialEntity.Name) + require.Equal(t, "test-rpid", cc.Response.RelyingParty.ID) + require.Equal(t, user.Email, cc.Response.User.Name) + require.Equal(t, user.Email, cc.Response.User.DisplayName) + + _, err = s.FinishWebAuthnRegistration(user, uwas, nil, wcfg) + require.Error(t, err) +} + +func mustRandomUser(t testing.TB) User { + email := fmt.Sprintf("user-%v@plugin.test", testutils.NewRandomPositiveInt64()) + r, err := NewUser(email, testutils.Password, UserRoleAdmin) + if err != nil { + t.Fatal(err) + } + return r +} diff --git a/core/shutdown/shutdown.go b/core/shutdown/shutdown.go new file mode 100644 index 00000000..1a864d3a --- /dev/null +++ b/core/shutdown/shutdown.go @@ -0,0 +1,16 @@ +package shutdown + +import ( + "os" + ossignal "os/signal" + "syscall" +) + +// HandleShutdown waits for SIGINT/SIGTERM signals and calls handleFunc +func HandleShutdown(handleFunc func(sig string)) { + ch := make(chan os.Signal, 1) + ossignal.Notify(ch, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + sig := <-ch + handleFunc(sig.String()) +} diff --git a/core/shutdown/shutdown_test.go b/core/shutdown/shutdown_test.go new file mode 100644 index 00000000..1953c40b --- /dev/null +++ b/core/shutdown/shutdown_test.go @@ -0,0 +1,45 @@ +package shutdown + +import ( + "context" + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestHandleShutdown(t *testing.T) { + proc, err := os.FindProcess(os.Getpid()) + require.NoError(t, err) + + tests := map[string]os.Signal{ + "SIGINT": syscall.SIGINT, + "SIGTERM": syscall.SIGTERM, + } + + for name, sig := range tests { + t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(testutils.Context(t)) + go HandleShutdown(func(string) { + cancel() + }) + + // have to wait for ossignal.Notify + time.Sleep(time.Second) + + err = proc.Signal(sig) + require.NoError(t, err) + + select { + case <-ctx.Done(): + // all good + case <-time.After(3 * time.Second): + require.Fail(t, "context is not cancelled within 3 seconds") + } + }) + } +} diff --git a/core/static/static.go b/core/static/static.go new file mode 100644 index 00000000..15778ace --- /dev/null +++ b/core/static/static.go @@ -0,0 +1,71 @@ +package static + +import ( + "fmt" + "net/url" + "time" + + "github.com/google/uuid" +) + +// Version and Sha are set at compile time via build arguments. +var ( + // Version is the semantic version of the build or Unset. + Version = Unset + // Sha is the commit hash of the build or Unset. + Sha = Unset +) + +// InitTime holds the initial start timestamp. +var InitTime = time.Now() + +const ( + // Unset is a sentinel value. + Unset = "unset" + // ExternalInitiatorAccessKeyHeader is the header name for the access key + // used by external initiators to authenticate + ExternalInitiatorAccessKeyHeader = "X-Plugin-EA-AccessKey" + // ExternalInitiatorSecretHeader is the header name for the secret used by + // external initiators to authenticate + ExternalInitiatorSecretHeader = "X-Plugin-EA-Secret" +) + +func buildPrettyVersion() string { + if Version == Unset { + return " " + } + return fmt.Sprintf(" %s ", Version) +} + +// SetConsumerName sets a nicely formatted application_name on the +// database uri +func SetConsumerName(uri *url.URL, name string, id *uuid.UUID) { + q := uri.Query() + + applicationName := fmt.Sprintf("Plugin%s|%s", buildPrettyVersion(), name) + if id != nil { + applicationName += fmt.Sprintf("|%s", id.String()) + } + if len(applicationName) > 63 { + applicationName = applicationName[:63] + } + q.Set("application_name", applicationName) + uri.RawQuery = q.Encode() +} + +// Short returns a 7-character sha prefix and version, or Unset if blank. +func Short() (shaPre string, ver string) { + return short(Sha, Version) +} + +func short(sha, ver string) (string, string) { + if sha == "" { + sha = Unset + } else if len(sha) > 7 { + sha = sha[:7] + } + if ver == "" { + ver = Unset + } + return sha, ver +} diff --git a/core/static/static_test.go b/core/static/static_test.go new file mode 100644 index 00000000..24dc6d58 --- /dev/null +++ b/core/static/static_test.go @@ -0,0 +1,26 @@ +package static + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_short(t *testing.T) { + for _, tt := range []struct { + ver, sha string + expVer, expSha string + }{ + {"1.0", "1234567890", "1.0", "1234567"}, + {"1", "a", "1", "a"}, + {"", "", "unset", "unset"}, + {"1.0", "", "1.0", "unset"}, + {"", "1234567890", "unset", "1234567"}, + } { + t.Run(tt.ver+":"+tt.sha, func(t *testing.T) { + sha, ver := short(tt.sha, tt.ver) + assert.Equal(t, tt.expSha, sha) + assert.Equal(t, tt.expVer, ver) + }) + } +} diff --git a/core/store/dialects/dialects.go b/core/store/dialects/dialects.go new file mode 100644 index 00000000..d250fa1b --- /dev/null +++ b/core/store/dialects/dialects.go @@ -0,0 +1,18 @@ +package dialects + +import ( + // need to make sure pgx driver is registered before opening connection + _ "github.com/jackc/pgx/v4/stdlib" +) + +// DialectName is a compiler enforced type used that maps to database dialect names +type DialectName string + +const ( + // Postgres represents the postgres dialect. + Postgres DialectName = "pgx" + // TransactionWrappedPostgres is useful for tests. + // When the connection is opened, it starts a transaction and all + // operations performed on the DB will be within that transaction. + TransactionWrappedPostgres DialectName = "txdb" +) diff --git a/core/store/doc.go b/core/store/doc.go new file mode 100644 index 00000000..e3032901 --- /dev/null +++ b/core/store/doc.go @@ -0,0 +1,34 @@ +// Package store is used to keep application events in sync between +// the database on the node and the blockchain. +// +// # Config +// +// Config contains the local configuration options that the application +// will adhere to. +// +// # CallerSubscriberClient +// +// This makes use of Go-Ethereum's functions to interact with the blockchain. +// The underlying functions can be viewed here: +// +// go-ethereum/rpc/client.go +// +// # KeyStore +// +// KeyStore also utilizes Go-Ethereum's functions to store encrypted keys +// on the local file system. +// The underlying functions can be viewed here: +// +// go-ethereum/accounts/keystore/keystore.go +// +// # Store +// +// The Store is the persistence layer for the application. It saves the +// the application state and most interaction with the node needs to occur +// through the store. +// +// # Tx Manager +// +// The transaction manager is used to synchronize interactions on the +// Ethereum blockchain with the application and database. +package store diff --git a/core/store/fixtures/fixtures.sql b/core/store/fixtures/fixtures.sql new file mode 100644 index 00000000..f8c8e250 --- /dev/null +++ b/core/store/fixtures/fixtures.sql @@ -0,0 +1,9 @@ +INSERT INTO users (email, hashed_password, token_hashed_secret, role, created_at, updated_at) VALUES +( + 'apiuser@plugin.test', + '$2a$10$bUMgzjxp1Jtaq4nt5ICPB.fWsfVP6FpdxXB1ZOsI0t9je0JOIkpRW', -- hash of literal string '16charlengthp4SsW0rD1!@#_' + '1eCP/w0llVkchejFaoBpfIGaLRxZK54lTXBCT22YLW+pdzE4Fafy/XO5LoJ2uwHi', + 'admin', + '2019-01-01', + '2019-01-01' +); diff --git a/core/store/fixtures/users_only_fixture.sql b/core/store/fixtures/users_only_fixture.sql new file mode 100644 index 00000000..31918441 --- /dev/null +++ b/core/store/fixtures/users_only_fixture.sql @@ -0,0 +1,9 @@ +INSERT INTO users (email, hashed_password, token_hashed_secret, role, created_at, updated_at) VALUES +( + 'apiuser@plugin.test', + '$2a$10$bUMgzjxp1Jtaq4nt5ICPB.fWsfVP6FpdxXB1ZOsI0t9je0JOIkpRW', -- hash of literal string '16charlengthp4SsW0rD1!@#_' + '1eCP/w0llVkchejFaoBpfIGaLRxZK54lTXBCT22YLW+pdzE4Fafy/XO5LoJ2uwHi', + 'admin', + '2019-01-01', + '2019-01-01' +); \ No newline at end of file diff --git a/core/store/migrate/README.md b/core/store/migrate/README.md new file mode 100644 index 00000000..f70b63cf --- /dev/null +++ b/core/store/migrate/README.md @@ -0,0 +1,3 @@ +# Notes +- Node operators do not always run their migrations with +super user priviledges so you cannot use ```CREATE EXTENSION``` \ No newline at end of file diff --git a/core/store/migrate/migrate.go b/core/store/migrate/migrate.go new file mode 100644 index 00000000..163cc45b --- /dev/null +++ b/core/store/migrate/migrate.go @@ -0,0 +1,151 @@ +package migrate + +import ( + "context" + "database/sql" + "embed" + "fmt" + "os" + "strconv" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/pressly/goose/v3" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/store/migrate/migrations" // Invoke init() functions within migrations pkg. +) + +//go:embed migrations/*.sql migrations/*.go +var embedMigrations embed.FS + +const MIGRATIONS_DIR string = "migrations" + +func init() { + goose.SetBaseFS(embedMigrations) + goose.SetSequential(true) + goose.SetTableName("goose_migrations") + logMigrations := os.Getenv("CL_LOG_SQL_MIGRATIONS") + verbose, _ := strconv.ParseBool(logMigrations) + goose.SetVerbose(verbose) +} + +// Ensure we migrated from v1 migrations to goose_migrations +func ensureMigrated(ctx context.Context, db *sql.DB, lggr logger.Logger) error { + sqlxDB := pg.WrapDbWithSqlx(db) + var names []string + err := sqlxDB.SelectContext(ctx, &names, `SELECT id FROM migrations`) + if err != nil { + // already migrated + return nil + } + err = pg.SqlTransaction(ctx, db, lggr, func(tx *sqlx.Tx) error { + // ensure that no legacy job specs are present: we _must_ bail out early if + // so because otherwise we run the risk of dropping working jobs if the + // user has not read the release notes + return migrations.CheckNoLegacyJobs(tx.Tx) + }) + if err != nil { + return err + } + + // Look for the squashed migration. If not present, the db needs to be migrated on an earlier release first + found := false + for _, name := range names { + if name == "1611847145" { + found = true + } + } + if !found { + return errors.New("database state is too old. Need to migrate to plugin version 0.9.10 first before upgrading to this version. This upgrade is NOT REVERSIBLE, so it is STRONGLY RECOMMENDED that you take a database backup before continuing") + } + + // ensure a goose migrations table exists with it's initial v0 + if _, err = goose.GetDBVersion(db); err != nil { + return err + } + + // insert records for existing migrations + //nolint + sql := fmt.Sprintf(`INSERT INTO %s (version_id, is_applied) VALUES ($1, true);`, goose.TableName()) + return pg.SqlTransaction(ctx, db, lggr, func(tx *sqlx.Tx) error { + for _, name := range names { + var id int64 + // the first migration doesn't follow the naming convention + if name == "1611847145" { + id = 1 + } else { + idx := strings.Index(name, "_") + if idx < 0 { + // old migration we don't care about + continue + } + + id, err = strconv.ParseInt(name[:idx], 10, 64) + if err == nil && id <= 0 { + return errors.New("migration IDs must be greater than zero") + } + } + + if _, err = db.Exec(sql, id); err != nil { + return err + } + } + + _, err = db.Exec("DROP TABLE migrations;") + return err + }) +} + +func Migrate(ctx context.Context, db *sql.DB, lggr logger.Logger) error { + if err := ensureMigrated(ctx, db, lggr); err != nil { + return err + } + // WithAllowMissing is necessary when upgrading from 0.10.14 since it + // includes out-of-order migrations + return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing()) +} + +func Rollback(ctx context.Context, db *sql.DB, lggr logger.Logger, version null.Int) error { + if err := ensureMigrated(ctx, db, lggr); err != nil { + return err + } + if version.Valid { + return goose.DownTo(db, MIGRATIONS_DIR, version.Int64) + } + return goose.Down(db, MIGRATIONS_DIR) +} + +func Current(ctx context.Context, db *sql.DB, lggr logger.Logger) (int64, error) { + if err := ensureMigrated(ctx, db, lggr); err != nil { + return -1, err + } + return goose.EnsureDBVersion(db) +} + +func Status(ctx context.Context, db *sql.DB, lggr logger.Logger) error { + if err := ensureMigrated(ctx, db, lggr); err != nil { + return err + } + return goose.Status(db, MIGRATIONS_DIR) +} + +func Create(db *sql.DB, name, migrationType string) error { + return goose.Create(db, "core/store/migrate/migrations", name, migrationType) +} + +// SetMigrationENVVars is used to inject values from config to goose migrations via env. +func SetMigrationENVVars(generalConfig plugin.GeneralConfig) error { + if generalConfig.EVMEnabled() { + err := os.Setenv(env.EVMChainIDNotNullMigration0195, generalConfig.EVMConfigs()[0].ChainID.String()) + if err != nil { + panic(errors.Wrap(err, "failed to set migrations env variables")) + } + } + return nil +} diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go new file mode 100644 index 00000000..e61eb13b --- /dev/null +++ b/core/store/migrate/migrate_test.go @@ -0,0 +1,607 @@ +package migrate_test + +import ( + "math/big" + "os" + "testing" + "time" + + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/pressly/goose/v3" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/types" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest/heavyweight" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/store/migrate" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +var migrationDir = "migrations" + +type OffchainReporting2OracleSpec100 struct { + ID int32 `toml:"-"` + ContractID string `toml:"contractID"` + Relay relay.Network `toml:"relay"` + RelayConfig job.JSONConfig `toml:"relayConfig"` + P2PBootstrapPeers pq.StringArray `toml:"p2pBootstrapPeers"` + OCRKeyBundleID null.String `toml:"ocrKeyBundleID"` + MonitoringEndpoint null.String `toml:"monitoringEndpoint"` + TransmitterID null.String `toml:"transmitterID"` + BlockchainTimeout models.Interval `toml:"blockchainTimeout"` + ContractConfigTrackerPollInterval models.Interval `toml:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `toml:"contractConfigConfirmations"` + JuelsPerFeeCoinPipeline string `toml:"juelsPerFeeCoinSource"` + CreatedAt time.Time `toml:"-"` + UpdatedAt time.Time `toml:"-"` +} + +func getOCR2Spec100() OffchainReporting2OracleSpec100 { + return OffchainReporting2OracleSpec100{ + ID: 100, + ContractID: "terra_187246hr3781h9fd198fh391g8f924", + Relay: "terra", + RelayConfig: map[string]interface{}{"chainID": float64(1337)}, + P2PBootstrapPeers: pq.StringArray{""}, + OCRKeyBundleID: null.String{}, + MonitoringEndpoint: null.StringFrom("endpoint:plugin.monitor"), + TransmitterID: null.String{}, + BlockchainTimeout: 1337, + ContractConfigTrackerPollInterval: 16, + ContractConfigConfirmations: 32, + JuelsPerFeeCoinPipeline: `ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + ds1 -> ds1_parse -> ds1_multiply -> answer1; + answer1 [type=median index=0];`, + } +} + +func TestMigrate_0100_BootstrapConfigs(t *testing.T) { + cfg, db := heavyweight.FullTestDBEmptyV2(t, nil) + lggr := logger.TestLogger(t) + err := goose.UpTo(db.DB, migrationDir, 99) + require.NoError(t, err) + + pipelineORM := pipeline.NewORM(db, lggr, cfg.Database(), cfg.JobPipeline().MaxSuccessfulRuns()) + pipelineID, err := pipelineORM.CreateSpec(pipeline.Pipeline{}, 0) + require.NoError(t, err) + pipelineID2, err := pipelineORM.CreateSpec(pipeline.Pipeline{}, 0) + require.NoError(t, err) + nonBootstrapPipelineID, err := pipelineORM.CreateSpec(pipeline.Pipeline{}, 0) + require.NoError(t, err) + newFormatBoostrapPipelineID2, err := pipelineORM.CreateSpec(pipeline.Pipeline{}, 0) + require.NoError(t, err) + + // OCR2 struct at migration v0099 + type OffchainReporting2OracleSpec struct { + OffchainReporting2OracleSpec100 + IsBootstrapPeer bool + } + + // Job struct at migration v0099 + type Job struct { + job.Job + OffchainreportingOracleSpecID *int32 + Offchainreporting2OracleSpecID *int32 + Offchainreporting2OracleSpec *OffchainReporting2OracleSpec + } + + spec := OffchainReporting2OracleSpec{ + OffchainReporting2OracleSpec100: getOCR2Spec100(), + IsBootstrapPeer: true, + } + spec2 := OffchainReporting2OracleSpec{ + OffchainReporting2OracleSpec100: OffchainReporting2OracleSpec100{ + ID: 200, + ContractID: "sol_187246hr3781h9fd198fh391g8f924", + Relay: "sol", + RelayConfig: job.JSONConfig{}, + P2PBootstrapPeers: pq.StringArray{""}, + OCRKeyBundleID: null.String{}, + MonitoringEndpoint: null.StringFrom("endpoint:chain.link.monitor"), + TransmitterID: null.String{}, + BlockchainTimeout: 1338, + ContractConfigTrackerPollInterval: 17, + ContractConfigConfirmations: 33, + JuelsPerFeeCoinPipeline: "", + }, + IsBootstrapPeer: true, + } + + jb := Job{ + Job: job.Job{ + ID: 10, + ExternalJobID: uuid.New(), + Type: job.OffchainReporting2, + SchemaVersion: 1, + PipelineSpecID: pipelineID, + }, + Offchainreporting2OracleSpec: &spec, + Offchainreporting2OracleSpecID: &spec.ID, + } + + jb2 := Job{ + Job: job.Job{ + ID: 20, + ExternalJobID: uuid.New(), + Type: job.OffchainReporting2, + SchemaVersion: 1, + PipelineSpecID: pipelineID2, + }, + Offchainreporting2OracleSpec: &spec2, + Offchainreporting2OracleSpecID: &spec2.ID, + } + + nonBootstrapSpec := OffchainReporting2OracleSpec{ + OffchainReporting2OracleSpec100: OffchainReporting2OracleSpec100{ + ID: 101, + P2PBootstrapPeers: pq.StringArray{""}, + ContractID: "empty", + }, + IsBootstrapPeer: false, + } + nonBootstrapJob := Job{ + Job: job.Job{ + ID: 11, + ExternalJobID: uuid.New(), + Type: job.OffchainReporting2, + SchemaVersion: 1, + PipelineSpecID: nonBootstrapPipelineID, + }, + Offchainreporting2OracleSpec: &nonBootstrapSpec, + Offchainreporting2OracleSpecID: &nonBootstrapSpec.ID, + } + + newFormatBoostrapSpec := job.BootstrapSpec{ + ID: 1, + ContractID: "evm_187246hr3781h9fd198fh391g8f924", + Relay: "evm", + RelayConfig: job.JSONConfig{}, + MonitoringEndpoint: null.StringFrom("new:chain.link.monitor"), + BlockchainTimeout: 2448, + ContractConfigTrackerPollInterval: 18, + ContractConfigConfirmations: 34, + } + + newFormatBootstrapJob := Job{ + Job: job.Job{ + ID: 30, + ExternalJobID: uuid.New(), + Type: job.Bootstrap, + SchemaVersion: 1, + PipelineSpecID: newFormatBoostrapPipelineID2, + BootstrapSpecID: &newFormatBoostrapSpec.ID, + BootstrapSpec: &newFormatBoostrapSpec, + }, + } + + sql := `INSERT INTO offchainreporting2_oracle_specs (id, contract_id, relay, relay_config, p2p_bootstrap_peers, ocr_key_bundle_id, transmitter_id, + blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, juels_per_fee_coin_pipeline, is_bootstrap_peer, + monitoring_endpoint, created_at, updated_at) + VALUES (:id, :contract_id, :relay, :relay_config, :p2p_bootstrap_peers, :ocr_key_bundle_id, :transmitter_id, + :blockchain_timeout, :contract_config_tracker_poll_interval, :contract_config_confirmations, :juels_per_fee_coin_pipeline, :is_bootstrap_peer, + :monitoring_endpoint, NOW(), NOW()) + RETURNING id;` + _, err = db.NamedExec(sql, jb.Offchainreporting2OracleSpec) + require.NoError(t, err) + _, err = db.NamedExec(sql, nonBootstrapJob.Offchainreporting2OracleSpec) + require.NoError(t, err) + _, err = db.NamedExec(sql, jb2.Offchainreporting2OracleSpec) + require.NoError(t, err) + + sql = `INSERT INTO bootstrap_specs (contract_id, relay, relay_config, monitoring_endpoint, + blockchain_timeout, contract_config_tracker_poll_interval, + contract_config_confirmations, created_at, updated_at) + VALUES ( :contract_id, :relay, :relay_config, :monitoring_endpoint, + :blockchain_timeout, :contract_config_tracker_poll_interval, + :contract_config_confirmations, NOW(), NOW()) + RETURNING id;` + + _, err = db.NamedExec(sql, newFormatBootstrapJob.BootstrapSpec) + require.NoError(t, err) + + sql = `INSERT INTO jobs (id, pipeline_spec_id, external_job_id, schema_version, type, offchainreporting2_oracle_spec_id, bootstrap_spec_id, created_at) + VALUES (:id, :pipeline_spec_id, :external_job_id, :schema_version, :type, :offchainreporting2_oracle_spec_id, :bootstrap_spec_id, NOW()) + RETURNING *;` + _, err = db.NamedExec(sql, jb) + require.NoError(t, err) + _, err = db.NamedExec(sql, nonBootstrapJob) + require.NoError(t, err) + _, err = db.NamedExec(sql, jb2) + require.NoError(t, err) + _, err = db.NamedExec(sql, newFormatBootstrapJob) + require.NoError(t, err) + + // Migrate up + err = goose.UpByOne(db.DB, migrationDir) + require.NoError(t, err) + + var bootstrapSpecs []job.BootstrapSpec + sql = `SELECT * FROM bootstrap_specs;` + err = db.Select(&bootstrapSpecs, sql) + require.NoError(t, err) + require.Len(t, bootstrapSpecs, 3) + t.Logf("bootstrap count %d\n", len(bootstrapSpecs)) + for _, bootstrapSpec := range bootstrapSpecs { + t.Logf("bootstrap id: %d\n", bootstrapSpec.ID) + } + + var jobs []Job + sql = `SELECT * FROM jobs ORDER BY created_at DESC, id DESC;` + err = db.Select(&jobs, sql) + + require.NoError(t, err) + require.Len(t, jobs, 4) + t.Logf("jobs count %d\n", len(jobs)) + for _, jb := range jobs { + t.Logf("job id: %d with BootstrapSpecID: %d\n", jb.ID, jb.BootstrapSpecID) + } + require.Nil(t, jobs[2].BootstrapSpecID) + + migratedJob := jobs[3] + require.Nil(t, migratedJob.Offchainreporting2OracleSpecID) + require.NotNil(t, migratedJob.BootstrapSpecID) + + var resultingBootstrapSpec job.BootstrapSpec + err = db.Get(&resultingBootstrapSpec, `SELECT * FROM bootstrap_specs WHERE id = $1`, *migratedJob.BootstrapSpecID) + migratedJob.BootstrapSpec = &resultingBootstrapSpec + require.NoError(t, err) + + require.Equal(t, &job.BootstrapSpec{ + ID: 2, + ContractID: spec.ContractID, + Relay: spec.Relay, + RelayConfig: spec.RelayConfig, + MonitoringEndpoint: spec.MonitoringEndpoint, + BlockchainTimeout: spec.BlockchainTimeout, + ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, + ContractConfigConfirmations: spec.ContractConfigConfirmations, + CreatedAt: migratedJob.BootstrapSpec.CreatedAt, + UpdatedAt: migratedJob.BootstrapSpec.UpdatedAt, + }, migratedJob.BootstrapSpec) + require.Equal(t, job.Bootstrap, migratedJob.Type) + + sql = `SELECT COUNT(*) FROM offchainreporting2_oracle_specs;` + var count int + err = db.Get(&count, sql) + require.NoError(t, err) + require.Equal(t, 1, count) + + // Migrate down + err = goose.Down(db.DB, migrationDir) + require.NoError(t, err) + + var oldJobs []Job + sql = `SELECT * FROM jobs;` + err = db.Select(&oldJobs, sql) + require.NoError(t, err) + require.Len(t, oldJobs, 4) + + revertedJob := oldJobs[0] + require.NotNil(t, revertedJob.Offchainreporting2OracleSpecID) + require.Nil(t, revertedJob.BootstrapSpecID) + + var oldOCR2Spec []OffchainReporting2OracleSpec + sql = `SELECT contract_id, relay, relay_config, p2p_bootstrap_peers, ocr_key_bundle_id, transmitter_id, + blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, juels_per_fee_coin_pipeline, is_bootstrap_peer, + monitoring_endpoint, created_at, updated_at + FROM offchainreporting2_oracle_specs;` + err = db.Select(&oldOCR2Spec, sql) + require.NoError(t, err) + require.Len(t, oldOCR2Spec, 4) + bootSpec := oldOCR2Spec[2] + + require.Equal(t, spec.Relay, bootSpec.Relay) + require.Equal(t, spec.ContractID, bootSpec.ContractID) + require.Equal(t, spec.RelayConfig, bootSpec.RelayConfig) + require.Equal(t, spec.ContractConfigConfirmations, bootSpec.ContractConfigConfirmations) + require.Equal(t, spec.ContractConfigTrackerPollInterval, bootSpec.ContractConfigTrackerPollInterval) + require.Equal(t, spec.BlockchainTimeout, bootSpec.BlockchainTimeout) + require.True(t, bootSpec.IsBootstrapPeer) + + sql = `SELECT COUNT(*) FROM bootstrap_specs;` + err = db.Get(&count, sql) + require.NoError(t, err) + require.Equal(t, 0, count) + + type jobIdAndContractId struct { + ID int32 + ContractID string + } + + var jobsAndContracts []jobIdAndContractId + sql = `SELECT jobs.id, ocr2.contract_id +FROM jobs +INNER JOIN offchainreporting2_oracle_specs as ocr2 +ON jobs.offchainreporting2_oracle_spec_id = ocr2.id` + err = db.Select(&jobsAndContracts, sql) + require.NoError(t, err) + + require.Len(t, jobsAndContracts, 4) + require.Equal(t, jobIdAndContractId{ID: 11, ContractID: "empty"}, jobsAndContracts[0]) + require.Equal(t, jobIdAndContractId{ID: 30, ContractID: "evm_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[1]) + require.Equal(t, jobIdAndContractId{ID: 10, ContractID: "terra_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[2]) + require.Equal(t, jobIdAndContractId{ID: 20, ContractID: "sol_187246hr3781h9fd198fh391g8f924"}, jobsAndContracts[3]) + +} + +func TestMigrate_101_GenericOCR2(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + err := goose.UpTo(db.DB, migrationDir, 100) + require.NoError(t, err) + + sql := `INSERT INTO offchainreporting2_oracle_specs (id, contract_id, relay, relay_config, p2p_bootstrap_peers, ocr_key_bundle_id, transmitter_id, + blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, juels_per_fee_coin_pipeline, + monitoring_endpoint, created_at, updated_at) + VALUES (:id, :contract_id, :relay, :relay_config, :p2p_bootstrap_peers, :ocr_key_bundle_id, :transmitter_id, + :blockchain_timeout, :contract_config_tracker_poll_interval, :contract_config_confirmations, :juels_per_fee_coin_pipeline, + :monitoring_endpoint, NOW(), NOW()) + RETURNING id;` + + spec := getOCR2Spec100() + + _, err = db.NamedExec(sql, spec) + require.NoError(t, err) + + err = goose.UpByOne(db.DB, migrationDir) + require.NoError(t, err) + + type PluginValues struct { + PluginType types.OCR2PluginType + PluginConfig job.JSONConfig + } + + var pluginValues PluginValues + + sql = `SELECT plugin_type, plugin_config FROM ocr2_oracle_specs` + err = db.Get(&pluginValues, sql) + require.NoError(t, err) + + require.Equal(t, types.Median, pluginValues.PluginType) + require.Equal(t, job.JSONConfig{"juelsPerFeeCoinSource": spec.JuelsPerFeeCoinPipeline}, pluginValues.PluginConfig) + + err = goose.Down(db.DB, migrationDir) + require.NoError(t, err) + + sql = `SELECT plugin_type, plugin_config FROM offchainreporting2_oracle_specs` + err = db.Get(&pluginValues, sql) + require.Error(t, err) + + var juels string + sql = `SELECT juels_per_fee_coin_pipeline FROM offchainreporting2_oracle_specs` + err = db.Get(&juels, sql) + require.NoError(t, err) + require.Equal(t, spec.JuelsPerFeeCoinPipeline, juels) +} + +func TestMigrate(t *testing.T) { + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + err := goose.UpTo(db.DB, migrationDir, 100) + require.NoError(t, err) + + err = migrate.Status(ctx, db.DB, lggr) + require.NoError(t, err) + + ver, err := migrate.Current(ctx, db.DB, lggr) + require.NoError(t, err) + require.Equal(t, int64(100), ver) + + err = migrate.Migrate(ctx, db.DB, lggr) + require.NoError(t, err) + + err = migrate.Rollback(ctx, db.DB, lggr, null.IntFrom(99)) + require.NoError(t, err) + + ver, err = migrate.Current(ctx, db.DB, lggr) + require.NoError(t, err) + require.Equal(t, int64(99), ver) +} + +func TestSetMigrationENVVars(t *testing.T) { + t.Run("ValidEVMConfig", func(t *testing.T) { + chainID := ubig.New(big.NewInt(1337)) + testConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + evmEnabled := true + c.EVM = evmcfg.EVMConfigs{&evmcfg.EVMConfig{ + ChainID: chainID, + Enabled: &evmEnabled, + }} + }) + + require.NoError(t, migrate.SetMigrationENVVars(testConfig)) + + actualChainID := os.Getenv(env.EVMChainIDNotNullMigration0195) + require.Equal(t, actualChainID, chainID.String()) + }) + + t.Run("EVMConfigMissing", func(t *testing.T) { + chainID := ubig.New(big.NewInt(1337)) + testConfig := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { c.EVM = nil }) + + require.NoError(t, migrate.SetMigrationENVVars(testConfig)) + + actualChainID := os.Getenv(env.EVMChainIDNotNullMigration0195) + require.Equal(t, actualChainID, chainID.String()) + }) +} + +func TestDatabaseBackFillWithMigration202(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + + err := goose.UpTo(db.DB, migrationDir, 201) + require.NoError(t, err) + + simulatedOrm := logpoller.NewORM(testutils.SimulatedChainID, db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 10, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 51, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 90, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 120, time.Now(), 23), err) + + baseOrm := logpoller.NewORM(big.NewInt(int64(84531)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, baseOrm.InsertBlock(testutils.Random32Byte(), 400, time.Now(), 0), err) + + klaytnOrm := logpoller.NewORM(big.NewInt(int64(1001)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, klaytnOrm.InsertBlock(testutils.Random32Byte(), 100, time.Now(), 0), err) + + err = goose.UpTo(db.DB, migrationDir, 202) + require.NoError(t, err) + + tests := []struct { + name string + blockNumber int64 + expectedFinalizedBlock int64 + orm *logpoller.DbORM + }{ + { + name: "last finalized block not changed if finality is too deep", + blockNumber: 10, + expectedFinalizedBlock: 0, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated for first block", + blockNumber: 51, + expectedFinalizedBlock: 1, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated", + blockNumber: 90, + expectedFinalizedBlock: 40, + orm: simulatedOrm, + }, + { + name: "last finalized block is not changed when finality is set", + blockNumber: 120, + expectedFinalizedBlock: 23, + orm: simulatedOrm, + }, + { + name: "use non default finality depth for chain 84531", + blockNumber: 400, + expectedFinalizedBlock: 200, + orm: baseOrm, + }, + { + name: "use default finality depth for chain 1001", + blockNumber: 100, + expectedFinalizedBlock: 99, + orm: klaytnOrm, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + block, err := tt.orm.SelectBlockByNumber(tt.blockNumber) + require.NoError(t, err) + require.Equal(t, tt.expectedFinalizedBlock, block.FinalizedBlockNumber) + }) + } +} + +func TestNoTriggers(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + + assert_num_triggers := func(expected int) { + + row := db.DB.QueryRow("select count(*) from information_schema.triggers") + var count int + err := row.Scan(&count) + + require.NoError(t, err) + require.Equal(t, expected, count) + } + + // if you find yourself here and are tempted to add a trigger, something has gone wrong + // and you should talk to the foundations team before proceeding + assert_num_triggers(0) + + // version prior to removal of all triggers + v := 217 + err := goose.UpTo(db.DB, migrationDir, int64(v)) + require.NoError(t, err) + assert_num_triggers(1) + +} + +func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) { + previousMigration := int64(201) + backfillMigration := int64(202) + chainCount := 2 + // By default, log poller keeps up to 100_000 blocks in the database, this is the pessimistic case + maxLogsSize := 100_000 + // Disable Goose logging for benchmarking + goose.SetLogger(goose.NopLogger()) + _, db := heavyweight.FullTestDBEmptyV2(b, nil) + + err := goose.UpTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + q := pg.NewQ(db, logger.NullLogger, pgtest.NewQConfig(true)) + for j := 0; j < chainCount; j++ { + // Insert 100_000 block to database, can't do all at once, so batching by 10k + var blocks []logpoller.LogPollerBlock + for i := 0; i < maxLogsSize; i++ { + blocks = append(blocks, logpoller.LogPollerBlock{ + EvmChainId: ubig.NewI(int64(j + 1)), + BlockHash: testutils.Random32Byte(), + BlockNumber: int64(i + 1000), + FinalizedBlockNumber: 0, + }) + } + batchInsertSize := 10_000 + for i := 0; i < maxLogsSize; i += batchInsertSize { + start, end := i, i+batchInsertSize + if end > maxLogsSize { + end = maxLogsSize + } + + err = q.ExecQNamed(` + INSERT INTO evm.log_poller_blocks + (evm_chain_id, block_hash, block_number, finalized_block_number, block_timestamp, created_at) + VALUES + (:evm_chain_id, :block_hash, :block_number, :finalized_block_number, NOW(), NOW()) + ON CONFLICT DO NOTHING`, blocks[start:end]) + require.NoError(b, err) + } + } + + b.ResetTimer() + + // 1. Measure time of migration 200 + // 2. Goose down to 199 + // 3. Reset last_finalized_block_number to 0 + // Repeat 1-3 + for i := 0; i < b.N; i++ { + b.StartTimer() + err = goose.UpTo(db.DB, migrationDir, backfillMigration) + require.NoError(b, err) + b.StopTimer() + + // Cleanup + err = goose.DownTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + err = q.ExecQ(` + UPDATE evm.log_poller_blocks + SET finalized_block_number = 0`) + require.NoError(b, err) + } +} diff --git a/core/store/migrate/migrations/0001_initial.sql b/core/store/migrate/migrations/0001_initial.sql new file mode 100644 index 00000000..b7f1d9c1 --- /dev/null +++ b/core/store/migrate/migrations/0001_initial.sql @@ -0,0 +1,3046 @@ +-- +goose Up +-- +goose StatementBegin + +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 11.6 (Debian 11.6-1.pgdg90+1) +-- Dumped by pg_dump version 13.1 + +-- +-- Name: eth_tx_attempts_state; Type: TYPE; Schema: public; Owner: postgres +-- + +CREATE TYPE public.eth_tx_attempts_state AS ENUM ( + 'in_progress', + 'insufficient_eth', + 'broadcast' +); + + + + +-- +-- Name: eth_txes_state; Type: TYPE; Schema: public; Owner: postgres +-- + +CREATE TYPE public.eth_txes_state AS ENUM ( + 'unstarted', + 'in_progress', + 'fatal_error', + 'unconfirmed', + 'confirmed_missing_receipt', + 'confirmed' +); + + + + +-- +-- Name: run_status; Type: TYPE; Schema: public; Owner: postgres +-- + +CREATE TYPE public.run_status AS ENUM ( + 'unstarted', + 'in_progress', + 'pending_incoming_confirmations', + 'pending_outgoing_confirmations', + 'pending_connection', + 'pending_bridge', + 'pending_sleep', + 'errored', + 'completed', + 'cancelled' +); + + + + +-- +-- Name: notifyethtxinsertion(); Type: FUNCTION; Schema: public; Owner: postgres +-- + +CREATE FUNCTION public.notifyethtxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('insert_on_eth_txes'::text, NOW()::text); + RETURN NULL; + END + $$; + + + + +-- +-- Name: notifyjobcreated(); Type: FUNCTION; Schema: public; Owner: postgres +-- + +CREATE FUNCTION public.notifyjobcreated() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('insert_on_jobs', NEW.id::text); + RETURN NEW; + END + $$; + + + + +-- +-- Name: notifyjobdeleted(); Type: FUNCTION; Schema: public; Owner: postgres +-- + +CREATE FUNCTION public.notifyjobdeleted() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('delete_from_jobs', OLD.id::text); + RETURN OLD; + END + $$; + + + + +-- +-- Name: notifypipelinerunstarted(); Type: FUNCTION; Schema: public; Owner: postgres +-- + +CREATE FUNCTION public.notifypipelinerunstarted() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF NEW.finished_at IS NULL THEN + PERFORM pg_notify('pipeline_run_started', NEW.id::text); + END IF; + RETURN NEW; + END + $$; + + + + +SET default_tablespace = ''; + +-- +-- Name: bridge_types; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.bridge_types ( + name text NOT NULL, + url text NOT NULL, + confirmations bigint DEFAULT 0 NOT NULL, + incoming_token_hash text NOT NULL, + salt text NOT NULL, + outgoing_token text NOT NULL, + minimum_contract_payment character varying(255), + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: configurations; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.configurations ( + id bigint NOT NULL, + name text NOT NULL, + value text NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone +); + + + + +-- +-- Name: configurations_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.configurations_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: configurations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.configurations_id_seq OWNED BY public.configurations.id; + + +-- +-- Name: direct_request_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.direct_request_specs ( + id integer NOT NULL, + contract_address bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + on_chain_job_spec_id bytea NOT NULL, + CONSTRAINT direct_request_specs_on_chain_job_spec_id_check CHECK ((octet_length(on_chain_job_spec_id) = 32)), + CONSTRAINT eth_request_event_specs_contract_address_check CHECK ((octet_length(contract_address) = 20)) +); + + + + +-- +-- Name: encrypted_ocr_key_bundles; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.encrypted_ocr_key_bundles ( + id bytea NOT NULL, + on_chain_signing_address bytea NOT NULL, + off_chain_public_key bytea NOT NULL, + encrypted_private_keys jsonb NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + config_public_key bytea NOT NULL, + deleted_at timestamp with time zone +); + + + + +-- +-- Name: encrypted_p2p_keys; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.encrypted_p2p_keys ( + id integer NOT NULL, + peer_id text NOT NULL, + pub_key bytea NOT NULL, + encrypted_priv_key jsonb NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone, + CONSTRAINT chk_pub_key_length CHECK ((octet_length(pub_key) = 32)) +); + + + + +-- +-- Name: encrypted_p2p_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.encrypted_p2p_keys_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: encrypted_p2p_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.encrypted_p2p_keys_id_seq OWNED BY public.encrypted_p2p_keys.id; + + +-- +-- Name: encrypted_vrf_keys; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.encrypted_vrf_keys ( + public_key character varying(68) NOT NULL, + vrf_key text NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone +); + + + + +-- +-- Name: encumbrances; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.encumbrances ( + id bigint NOT NULL, + payment numeric(78,0), + expiration bigint, + end_at timestamp with time zone, + oracles text, + aggregator bytea NOT NULL, + agg_initiate_job_selector bytea NOT NULL, + agg_fulfill_selector bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: encumbrances_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.encumbrances_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: encumbrances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.encumbrances_id_seq OWNED BY public.encumbrances.id; + + +-- +-- Name: eth_receipts; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.eth_receipts ( + id bigint NOT NULL, + tx_hash bytea NOT NULL, + block_hash bytea NOT NULL, + block_number bigint NOT NULL, + transaction_index bigint NOT NULL, + receipt jsonb NOT NULL, + created_at timestamp with time zone NOT NULL, + CONSTRAINT chk_hash_length CHECK (((octet_length(tx_hash) = 32) AND (octet_length(block_hash) = 32))) +); + + + + +-- +-- Name: eth_receipts_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.eth_receipts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: eth_receipts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.eth_receipts_id_seq OWNED BY public.eth_receipts.id; + + +-- +-- Name: eth_request_event_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.eth_request_event_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: eth_request_event_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.eth_request_event_specs_id_seq OWNED BY public.direct_request_specs.id; + + +-- +-- Name: eth_task_run_txes; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.eth_task_run_txes ( + task_run_id uuid NOT NULL, + eth_tx_id bigint NOT NULL +); + + + + +-- +-- Name: eth_tx_attempts; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.eth_tx_attempts ( + id bigint NOT NULL, + eth_tx_id bigint NOT NULL, + gas_price numeric(78,0) NOT NULL, + signed_raw_tx bytea NOT NULL, + hash bytea NOT NULL, + broadcast_before_block_num bigint, + state public.eth_tx_attempts_state NOT NULL, + created_at timestamp with time zone NOT NULL, + CONSTRAINT chk_cannot_broadcast_before_block_zero CHECK (((broadcast_before_block_num IS NULL) OR (broadcast_before_block_num > 0))), + CONSTRAINT chk_eth_tx_attempts_fsm CHECK ((((state = ANY (ARRAY['in_progress'::public.eth_tx_attempts_state, 'insufficient_eth'::public.eth_tx_attempts_state])) AND (broadcast_before_block_num IS NULL)) OR (state = 'broadcast'::public.eth_tx_attempts_state))), + CONSTRAINT chk_hash_length CHECK ((octet_length(hash) = 32)), + CONSTRAINT chk_signed_raw_tx_present CHECK ((octet_length(signed_raw_tx) > 0)) +); + + + + +-- +-- Name: eth_tx_attempts_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.eth_tx_attempts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: eth_tx_attempts_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.eth_tx_attempts_id_seq OWNED BY public.eth_tx_attempts.id; + + +-- +-- Name: eth_txes; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.eth_txes ( + id bigint NOT NULL, + nonce bigint, + from_address bytea NOT NULL, + to_address bytea NOT NULL, + encoded_payload bytea NOT NULL, + value numeric(78,0) NOT NULL, + gas_limit bigint NOT NULL, + error text, + broadcast_at timestamp with time zone, + created_at timestamp with time zone NOT NULL, + state public.eth_txes_state DEFAULT 'unstarted'::public.eth_txes_state NOT NULL, + CONSTRAINT chk_broadcast_at_is_sane CHECK ((broadcast_at > '2019-01-01 00:00:00+00'::timestamp with time zone)), + CONSTRAINT chk_error_cannot_be_empty CHECK (((error IS NULL) OR (length(error) > 0))), + CONSTRAINT chk_eth_txes_fsm CHECK ((((state = 'unstarted'::public.eth_txes_state) AND (nonce IS NULL) AND (error IS NULL) AND (broadcast_at IS NULL)) OR ((state = 'in_progress'::public.eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NULL)) OR ((state = 'fatal_error'::public.eth_txes_state) AND (nonce IS NULL) AND (error IS NOT NULL) AND (broadcast_at IS NULL)) OR ((state = 'unconfirmed'::public.eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL)) OR ((state = 'confirmed'::public.eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL)) OR ((state = 'confirmed_missing_receipt'::public.eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL)))), + CONSTRAINT chk_from_address_length CHECK ((octet_length(from_address) = 20)), + CONSTRAINT chk_to_address_length CHECK ((octet_length(to_address) = 20)) +); + + + + +-- +-- Name: eth_txes_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.eth_txes_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: eth_txes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.eth_txes_id_seq OWNED BY public.eth_txes.id; + + +-- +-- Name: external_initiators; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.external_initiators ( + id bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone, + name text NOT NULL, + url text, + access_key text NOT NULL, + salt text NOT NULL, + hashed_secret text NOT NULL, + outgoing_secret text NOT NULL, + outgoing_token text NOT NULL +); + + + + +-- +-- Name: external_initiators_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.external_initiators_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: external_initiators_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.external_initiators_id_seq OWNED BY public.external_initiators.id; + + +-- +-- Name: flux_monitor_round_stats; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.flux_monitor_round_stats ( + id bigint NOT NULL, + aggregator bytea NOT NULL, + round_id integer NOT NULL, + num_new_round_logs integer DEFAULT 0 NOT NULL, + num_submissions integer DEFAULT 0 NOT NULL, + job_run_id uuid +); + + + + +-- +-- Name: flux_monitor_round_stats_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.flux_monitor_round_stats_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: flux_monitor_round_stats_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.flux_monitor_round_stats_id_seq OWNED BY public.flux_monitor_round_stats.id; + + +-- +-- Name: flux_monitor_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.flux_monitor_specs ( + id integer NOT NULL, + contract_address bytea NOT NULL, + "precision" integer, + threshold real, + absolute_threshold real, + poll_timer_period bigint, + poll_timer_disabled boolean, + idle_timer_period bigint, + idle_timer_disabled boolean, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT flux_monitor_specs_check CHECK ((poll_timer_disabled OR (poll_timer_period > 0))), + CONSTRAINT flux_monitor_specs_check1 CHECK ((idle_timer_disabled OR (idle_timer_period > 0))), + CONSTRAINT flux_monitor_specs_contract_address_check CHECK ((octet_length(contract_address) = 20)) +); + + + + +-- +-- Name: flux_monitor_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.flux_monitor_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: flux_monitor_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.flux_monitor_specs_id_seq OWNED BY public.flux_monitor_specs.id; + + +-- +-- Name: heads; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.heads ( + id bigint NOT NULL, + hash bytea NOT NULL, + number bigint NOT NULL, + parent_hash bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + "timestamp" timestamp with time zone NOT NULL, + CONSTRAINT chk_hash_size CHECK ((octet_length(hash) = 32)), + CONSTRAINT chk_parent_hash_size CHECK ((octet_length(parent_hash) = 32)) +); + + + + +-- +-- Name: heads_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.heads_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: heads_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.heads_id_seq OWNED BY public.heads.id; + + +-- +-- Name: initiators; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.initiators ( + id bigint NOT NULL, + job_spec_id uuid NOT NULL, + type text NOT NULL, + created_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone, + schedule text, + "time" timestamp with time zone, + ran boolean, + address bytea, + requesters text, + name character varying(255), + params jsonb, + from_block numeric(78,0), + to_block numeric(78,0), + topics jsonb, + request_data text, + feeds text, + threshold double precision, + "precision" smallint, + polling_interval bigint, + absolute_threshold double precision, + updated_at timestamp with time zone NOT NULL, + poll_timer jsonb, + idle_timer jsonb +); + + + + +-- +-- Name: initiators_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.initiators_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: initiators_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.initiators_id_seq OWNED BY public.initiators.id; + + +-- +-- Name: job_runs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.job_runs ( + result_id bigint, + run_request_id bigint, + status public.run_status DEFAULT 'unstarted'::public.run_status NOT NULL, + created_at timestamp with time zone NOT NULL, + finished_at timestamp with time zone, + updated_at timestamp with time zone NOT NULL, + initiator_id bigint NOT NULL, + deleted_at timestamp with time zone, + creation_height numeric(78,0), + observed_height numeric(78,0), + payment numeric(78,0), + job_spec_id uuid NOT NULL, + id uuid NOT NULL +); + + + + +-- +-- Name: job_spec_errors; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.job_spec_errors ( + id bigint NOT NULL, + job_spec_id uuid NOT NULL, + description text NOT NULL, + occurrences integer DEFAULT 1 NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: job_spec_errors_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.job_spec_errors_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: job_spec_errors_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.job_spec_errors_id_seq OWNED BY public.job_spec_errors.id; + + +-- +-- Name: job_spec_errors_v2; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.job_spec_errors_v2 ( + id bigint NOT NULL, + job_id integer, + description text NOT NULL, + occurrences integer DEFAULT 1 NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: job_spec_errors_v2_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.job_spec_errors_v2_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: job_spec_errors_v2_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.job_spec_errors_v2_id_seq OWNED BY public.job_spec_errors_v2.id; + + +-- +-- Name: job_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.job_specs ( + created_at timestamp with time zone NOT NULL, + start_at timestamp with time zone, + end_at timestamp with time zone, + deleted_at timestamp with time zone, + min_payment character varying(255), + id uuid NOT NULL, + updated_at timestamp with time zone NOT NULL, + name character varying(255) +); + + + + +-- +-- Name: jobs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.jobs ( + id integer NOT NULL, + pipeline_spec_id integer, + offchainreporting_oracle_spec_id integer, + name character varying(255), + schema_version integer NOT NULL, + type character varying(255) NOT NULL, + max_task_duration bigint, + direct_request_spec_id integer, + flux_monitor_spec_id integer, + CONSTRAINT chk_only_one_spec CHECK ((num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id) = 1)), + CONSTRAINT chk_schema_version CHECK ((schema_version > 0)), + CONSTRAINT chk_type CHECK (((type)::text <> ''::text)) +); + + + + +-- +-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.jobs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id; + + +-- +-- Name: keys; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.keys ( + address bytea NOT NULL, + json jsonb NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + next_nonce bigint, + id integer NOT NULL, + last_used timestamp with time zone, + is_funding boolean DEFAULT false NOT NULL, + deleted_at timestamp with time zone, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)) +); + + + + +-- +-- Name: keys_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.keys_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.keys_id_seq OWNED BY public.keys.id; + + +-- +-- Name: log_consumptions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.log_consumptions ( + id bigint NOT NULL, + block_hash bytea NOT NULL, + log_index bigint NOT NULL, + job_id uuid, + created_at timestamp without time zone NOT NULL, + block_number bigint, + job_id_v2 integer, + CONSTRAINT chk_log_consumptions_exactly_one_job_id CHECK ((((job_id IS NOT NULL) AND (job_id_v2 IS NULL)) OR ((job_id_v2 IS NOT NULL) AND (job_id IS NULL)))) +); + + + + +-- +-- Name: log_consumptions_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.log_consumptions_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: log_consumptions_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.log_consumptions_id_seq OWNED BY public.log_consumptions.id; + + + +-- +-- Name: offchainreporting_contract_configs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.offchainreporting_contract_configs ( + offchainreporting_oracle_spec_id integer NOT NULL, + config_digest bytea NOT NULL, + signers bytea[], + transmitters bytea[], + threshold integer, + encoded_config_version bigint, + encoded bytea, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting_contract_configs_config_digest_check CHECK ((octet_length(config_digest) = 16)) +); + + + + +-- +-- Name: offchainreporting_oracle_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.offchainreporting_oracle_specs ( + id integer NOT NULL, + contract_address bytea NOT NULL, + p2p_peer_id text, + p2p_bootstrap_peers text[], + is_bootstrap_peer boolean NOT NULL, + encrypted_ocr_key_bundle_id bytea, + monitoring_endpoint text, + transmitter_address bytea, + observation_timeout bigint, + blockchain_timeout bigint, + contract_config_tracker_subscribe_interval bigint, + contract_config_tracker_poll_interval bigint, + contract_config_confirmations integer, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT chk_contract_address_length CHECK ((octet_length(contract_address) = 20)) +); + + + + +-- +-- Name: offchainreporting_oracle_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.offchainreporting_oracle_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: offchainreporting_oracle_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.offchainreporting_oracle_specs_id_seq OWNED BY public.offchainreporting_oracle_specs.id; + + +-- +-- Name: offchainreporting_pending_transmissions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.offchainreporting_pending_transmissions ( + offchainreporting_oracle_spec_id integer NOT NULL, + config_digest bytea NOT NULL, + epoch bigint NOT NULL, + round bigint NOT NULL, + "time" timestamp with time zone NOT NULL, + median numeric(78,0) NOT NULL, + serialized_report bytea NOT NULL, + rs bytea[] NOT NULL, + ss bytea[] NOT NULL, + vs bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting_pending_transmissions_config_digest_check CHECK ((octet_length(config_digest) = 16)) +); + + + + +-- +-- Name: offchainreporting_persistent_states; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.offchainreporting_persistent_states ( + offchainreporting_oracle_spec_id integer NOT NULL, + config_digest bytea NOT NULL, + epoch bigint NOT NULL, + highest_sent_epoch bigint NOT NULL, + highest_received_epoch bigint[] NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting_persistent_states_config_digest_check CHECK ((octet_length(config_digest) = 16)) +); + + + + +-- +-- Name: p2p_peers; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.p2p_peers ( + id text NOT NULL, + addr text NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + peer_id text NOT NULL +); + + + + +-- +-- Name: pipeline_runs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.pipeline_runs ( + id bigint NOT NULL, + pipeline_spec_id integer NOT NULL, + meta jsonb DEFAULT '{}'::jsonb NOT NULL, + created_at timestamp with time zone NOT NULL, + finished_at timestamp with time zone, + errors jsonb, + outputs jsonb, + CONSTRAINT pipeline_runs_check CHECK ((((outputs IS NULL) AND (errors IS NULL) AND (finished_at IS NULL)) OR ((outputs IS NOT NULL) AND (errors IS NOT NULL) AND (finished_at IS NOT NULL)))) +); + + + + +-- +-- Name: pipeline_runs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.pipeline_runs_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: pipeline_runs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.pipeline_runs_id_seq OWNED BY public.pipeline_runs.id; + + +-- +-- Name: pipeline_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.pipeline_specs ( + id integer NOT NULL, + dot_dag_source text NOT NULL, + created_at timestamp with time zone NOT NULL, + max_task_duration bigint +); + + + + +-- +-- Name: pipeline_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.pipeline_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: pipeline_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.pipeline_specs_id_seq OWNED BY public.pipeline_specs.id; + + +-- +-- Name: pipeline_task_runs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.pipeline_task_runs ( + id bigint NOT NULL, + pipeline_run_id bigint NOT NULL, + type text NOT NULL, + index integer DEFAULT 0 NOT NULL, + output jsonb, + error text, + pipeline_task_spec_id integer NOT NULL, + created_at timestamp with time zone NOT NULL, + finished_at timestamp with time zone, + CONSTRAINT chk_pipeline_task_run_fsm CHECK ((((type <> 'result'::text) AND (((finished_at IS NULL) AND (error IS NULL) AND (output IS NULL)) OR ((finished_at IS NOT NULL) AND (NOT ((error IS NOT NULL) AND (output IS NOT NULL)))))) OR ((type = 'result'::text) AND (((output IS NULL) AND (error IS NULL) AND (finished_at IS NULL)) OR ((output IS NOT NULL) AND (error IS NOT NULL) AND (finished_at IS NOT NULL)))))) +); + + + + +-- +-- Name: pipeline_task_runs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.pipeline_task_runs_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: pipeline_task_runs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.pipeline_task_runs_id_seq OWNED BY public.pipeline_task_runs.id; + + +-- +-- Name: pipeline_task_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.pipeline_task_specs ( + id integer NOT NULL, + dot_id text NOT NULL, + pipeline_spec_id integer NOT NULL, + type text NOT NULL, + json jsonb NOT NULL, + index integer DEFAULT 0 NOT NULL, + successor_id integer, + created_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: COLUMN pipeline_task_specs.dot_id; Type: COMMENT; Schema: public; Owner: postgres +-- + +COMMENT ON COLUMN public.pipeline_task_specs.dot_id IS 'Dot ID is included to help in debugging'; + + +-- +-- Name: pipeline_task_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.pipeline_task_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: pipeline_task_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.pipeline_task_specs_id_seq OWNED BY public.pipeline_task_specs.id; + + +-- +-- Name: run_requests; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.run_requests ( + id bigint NOT NULL, + request_id bytea, + tx_hash bytea, + requester bytea, + created_at timestamp with time zone NOT NULL, + block_hash bytea, + payment numeric(78,0), + request_params jsonb DEFAULT '{}'::jsonb NOT NULL +); + + + + +-- +-- Name: run_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.run_requests_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: run_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.run_requests_id_seq OWNED BY public.run_requests.id; + + +-- +-- Name: run_results; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.run_results ( + id bigint NOT NULL, + data jsonb, + error_message text, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: run_results_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.run_results_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: run_results_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.run_results_id_seq OWNED BY public.run_results.id; + + +-- +-- Name: service_agreements; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.service_agreements ( + id text NOT NULL, + created_at timestamp with time zone NOT NULL, + encumbrance_id bigint, + request_body text, + signature character varying(255), + job_spec_id uuid, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: sessions; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.sessions ( + id text NOT NULL, + last_used timestamp with time zone, + created_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: sync_events; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.sync_events ( + id bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + body text NOT NULL +); + + + + +-- +-- Name: sync_events_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.sync_events_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: sync_events_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.sync_events_id_seq OWNED BY public.sync_events.id; + + +-- +-- Name: task_runs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.task_runs ( + result_id bigint, + status public.run_status DEFAULT 'unstarted'::public.run_status NOT NULL, + task_spec_id bigint NOT NULL, + minimum_confirmations bigint, + created_at timestamp with time zone NOT NULL, + confirmations bigint, + job_run_id uuid NOT NULL, + id uuid NOT NULL, + updated_at timestamp with time zone NOT NULL +); + + + + +-- +-- Name: task_specs; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.task_specs ( + id bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + deleted_at timestamp with time zone, + type text NOT NULL, + confirmations bigint, + params jsonb, + job_spec_id uuid NOT NULL +); + + + + +-- +-- Name: task_specs_id_seq; Type: SEQUENCE; Schema: public; Owner: postgres +-- + +CREATE SEQUENCE public.task_specs_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + + +-- +-- Name: task_specs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: postgres +-- + +ALTER SEQUENCE public.task_specs_id_seq OWNED BY public.task_specs.id; + + +-- +-- Name: users; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.users ( + email text NOT NULL, + hashed_password text, + created_at timestamp with time zone NOT NULL, + token_key text, + token_salt text, + token_hashed_secret text, + updated_at timestamp with time zone NOT NULL, + token_secret text +); + + + + +-- +-- Name: configurations id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.configurations ALTER COLUMN id SET DEFAULT nextval('public.configurations_id_seq'::regclass); + + +-- +-- Name: direct_request_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.direct_request_specs ALTER COLUMN id SET DEFAULT nextval('public.eth_request_event_specs_id_seq'::regclass); + + +-- +-- Name: encrypted_p2p_keys id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encrypted_p2p_keys ALTER COLUMN id SET DEFAULT nextval('public.encrypted_p2p_keys_id_seq'::regclass); + + +-- +-- Name: encumbrances id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encumbrances ALTER COLUMN id SET DEFAULT nextval('public.encumbrances_id_seq'::regclass); + + +-- +-- Name: eth_receipts id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_receipts ALTER COLUMN id SET DEFAULT nextval('public.eth_receipts_id_seq'::regclass); + + +-- +-- Name: eth_tx_attempts id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_tx_attempts ALTER COLUMN id SET DEFAULT nextval('public.eth_tx_attempts_id_seq'::regclass); + + +-- +-- Name: eth_txes id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_txes ALTER COLUMN id SET DEFAULT nextval('public.eth_txes_id_seq'::regclass); + + +-- +-- Name: external_initiators id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.external_initiators ALTER COLUMN id SET DEFAULT nextval('public.external_initiators_id_seq'::regclass); + + +-- +-- Name: flux_monitor_round_stats id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_round_stats ALTER COLUMN id SET DEFAULT nextval('public.flux_monitor_round_stats_id_seq'::regclass); + + +-- +-- Name: flux_monitor_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_specs ALTER COLUMN id SET DEFAULT nextval('public.flux_monitor_specs_id_seq'::regclass); + + +-- +-- Name: heads id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.heads ALTER COLUMN id SET DEFAULT nextval('public.heads_id_seq'::regclass); + + +-- +-- Name: initiators id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.initiators ALTER COLUMN id SET DEFAULT nextval('public.initiators_id_seq'::regclass); + + +-- +-- Name: job_spec_errors id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors ALTER COLUMN id SET DEFAULT nextval('public.job_spec_errors_id_seq'::regclass); + + +-- +-- Name: job_spec_errors_v2 id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors_v2 ALTER COLUMN id SET DEFAULT nextval('public.job_spec_errors_v2_id_seq'::regclass); + + +-- +-- Name: jobs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs ALTER COLUMN id SET DEFAULT nextval('public.jobs_id_seq'::regclass); + + +-- +-- Name: keys id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.keys ALTER COLUMN id SET DEFAULT nextval('public.keys_id_seq'::regclass); + + +-- +-- Name: log_consumptions id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.log_consumptions ALTER COLUMN id SET DEFAULT nextval('public.log_consumptions_id_seq'::regclass); + + +-- +-- Name: offchainreporting_oracle_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs ALTER COLUMN id SET DEFAULT nextval('public.offchainreporting_oracle_specs_id_seq'::regclass); + + +-- +-- Name: pipeline_runs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_runs ALTER COLUMN id SET DEFAULT nextval('public.pipeline_runs_id_seq'::regclass); + + +-- +-- Name: pipeline_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_specs ALTER COLUMN id SET DEFAULT nextval('public.pipeline_specs_id_seq'::regclass); + + +-- +-- Name: pipeline_task_runs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_runs ALTER COLUMN id SET DEFAULT nextval('public.pipeline_task_runs_id_seq'::regclass); + + +-- +-- Name: pipeline_task_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_specs ALTER COLUMN id SET DEFAULT nextval('public.pipeline_task_specs_id_seq'::regclass); + + +-- +-- Name: run_requests id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.run_requests ALTER COLUMN id SET DEFAULT nextval('public.run_requests_id_seq'::regclass); + + +-- +-- Name: run_results id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.run_results ALTER COLUMN id SET DEFAULT nextval('public.run_results_id_seq'::regclass); + + +-- +-- Name: sync_events id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.sync_events ALTER COLUMN id SET DEFAULT nextval('public.sync_events_id_seq'::regclass); + + +-- +-- Name: task_specs id; Type: DEFAULT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_specs ALTER COLUMN id SET DEFAULT nextval('public.task_specs_id_seq'::regclass); + + +-- +-- Name: bridge_types bridge_types_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.bridge_types + ADD CONSTRAINT bridge_types_pkey PRIMARY KEY (name); + + +-- +-- Name: configurations configurations_name_key; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.configurations + ADD CONSTRAINT configurations_name_key UNIQUE (name); + + +-- +-- Name: configurations configurations_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.configurations + ADD CONSTRAINT configurations_pkey PRIMARY KEY (id); + + +-- +-- Name: direct_request_specs direct_request_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.direct_request_specs + ADD CONSTRAINT direct_request_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: encrypted_ocr_key_bundles encrypted_ocr_key_bundles_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encrypted_ocr_key_bundles + ADD CONSTRAINT encrypted_ocr_key_bundles_pkey PRIMARY KEY (id); + + +-- +-- Name: encrypted_p2p_keys encrypted_p2p_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encrypted_p2p_keys + ADD CONSTRAINT encrypted_p2p_keys_pkey PRIMARY KEY (id); + + +-- +-- Name: encrypted_vrf_keys encrypted_secret_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encrypted_vrf_keys + ADD CONSTRAINT encrypted_secret_keys_pkey PRIMARY KEY (public_key); + + +-- +-- Name: encumbrances encumbrances_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.encumbrances + ADD CONSTRAINT encumbrances_pkey PRIMARY KEY (id); + + +-- +-- Name: eth_receipts eth_receipts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_receipts + ADD CONSTRAINT eth_receipts_pkey PRIMARY KEY (id); + + +-- +-- Name: eth_tx_attempts eth_tx_attempts_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_tx_attempts + ADD CONSTRAINT eth_tx_attempts_pkey PRIMARY KEY (id); + + +-- +-- Name: eth_txes eth_txes_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_txes + ADD CONSTRAINT eth_txes_pkey PRIMARY KEY (id); + + +-- +-- Name: external_initiators external_initiators_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.external_initiators + ADD CONSTRAINT external_initiators_pkey PRIMARY KEY (id); + + +-- +-- Name: flux_monitor_round_stats flux_monitor_round_stats_aggregator_round_id_key; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_round_stats + ADD CONSTRAINT flux_monitor_round_stats_aggregator_round_id_key UNIQUE (aggregator, round_id); + + +-- +-- Name: flux_monitor_round_stats flux_monitor_round_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_round_stats + ADD CONSTRAINT flux_monitor_round_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: flux_monitor_specs flux_monitor_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_specs + ADD CONSTRAINT flux_monitor_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: heads heads_pkey1; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.heads + ADD CONSTRAINT heads_pkey1 PRIMARY KEY (id); + + +-- +-- Name: initiators initiators_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.initiators + ADD CONSTRAINT initiators_pkey PRIMARY KEY (id); + + +-- +-- Name: job_runs job_run_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_runs + ADD CONSTRAINT job_run_pkey PRIMARY KEY (id); + + +-- +-- Name: job_spec_errors job_spec_errors_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors + ADD CONSTRAINT job_spec_errors_pkey PRIMARY KEY (id); + + +-- +-- Name: job_spec_errors_v2 job_spec_errors_v2_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors_v2 + ADD CONSTRAINT job_spec_errors_v2_pkey PRIMARY KEY (id); + + +-- +-- Name: job_specs job_spec_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_specs + ADD CONSTRAINT job_spec_pkey PRIMARY KEY (id); + + +-- +-- Name: jobs jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs + ADD CONSTRAINT jobs_pkey PRIMARY KEY (id); + + +-- +-- Name: keys keys_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.keys + ADD CONSTRAINT keys_pkey PRIMARY KEY (id); + + +-- +-- Name: log_consumptions log_consumptions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.log_consumptions + ADD CONSTRAINT log_consumptions_pkey PRIMARY KEY (id); + + +-- +-- Name: offchainreporting_contract_configs offchainreporting_contract_configs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_contract_configs + ADD CONSTRAINT offchainreporting_contract_configs_pkey PRIMARY KEY (offchainreporting_oracle_spec_id); + + +-- +-- Name: offchainreporting_oracle_specs offchainreporting_oracle_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs + ADD CONSTRAINT offchainreporting_oracle_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: offchainreporting_pending_transmissions offchainreporting_pending_transmissions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_pending_transmissions + ADD CONSTRAINT offchainreporting_pending_transmissions_pkey PRIMARY KEY (offchainreporting_oracle_spec_id, config_digest, epoch, round); + + +-- +-- Name: offchainreporting_persistent_states offchainreporting_persistent_states_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_persistent_states + ADD CONSTRAINT offchainreporting_persistent_states_pkey PRIMARY KEY (offchainreporting_oracle_spec_id, config_digest); + + +-- +-- Name: pipeline_runs pipeline_runs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_runs + ADD CONSTRAINT pipeline_runs_pkey PRIMARY KEY (id); + + +-- +-- Name: pipeline_specs pipeline_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_specs + ADD CONSTRAINT pipeline_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: pipeline_task_runs pipeline_task_runs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_runs + ADD CONSTRAINT pipeline_task_runs_pkey PRIMARY KEY (id); + + +-- +-- Name: pipeline_task_specs pipeline_task_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_specs + ADD CONSTRAINT pipeline_task_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: run_requests run_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.run_requests + ADD CONSTRAINT run_requests_pkey PRIMARY KEY (id); + + +-- +-- Name: run_results run_results_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.run_results + ADD CONSTRAINT run_results_pkey PRIMARY KEY (id); + + +-- +-- Name: service_agreements service_agreements_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.service_agreements + ADD CONSTRAINT service_agreements_pkey PRIMARY KEY (id); + + +-- +-- Name: sessions sessions_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.sessions + ADD CONSTRAINT sessions_pkey PRIMARY KEY (id); + + +-- +-- Name: sync_events sync_events_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.sync_events + ADD CONSTRAINT sync_events_pkey PRIMARY KEY (id); + + +-- +-- Name: task_runs task_run_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_runs + ADD CONSTRAINT task_run_pkey PRIMARY KEY (id); + + +-- +-- Name: task_specs task_specs_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_specs + ADD CONSTRAINT task_specs_pkey PRIMARY KEY (id); + + +-- +-- Name: offchainreporting_oracle_specs unique_contract_addr; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs + ADD CONSTRAINT unique_contract_addr UNIQUE (contract_address); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.users + ADD CONSTRAINT users_pkey PRIMARY KEY (email); + + +-- +-- Name: external_initiators_name_key; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX external_initiators_name_key ON public.external_initiators USING btree (lower(name)); + + +-- +-- Name: idx_bridge_types_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_bridge_types_created_at ON public.bridge_types USING brin (created_at); + + +-- +-- Name: idx_bridge_types_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_bridge_types_updated_at ON public.bridge_types USING brin (updated_at); + + +-- +-- Name: idx_configurations_name; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_configurations_name ON public.configurations USING btree (name); + + +-- +-- Name: idx_direct_request_specs_unique_job_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_direct_request_specs_unique_job_spec_id ON public.direct_request_specs USING btree (on_chain_job_spec_id); + + +-- +-- Name: idx_encumbrances_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_encumbrances_created_at ON public.encumbrances USING brin (created_at); + + +-- +-- Name: idx_encumbrances_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_encumbrances_updated_at ON public.encumbrances USING brin (updated_at); + + +-- +-- Name: idx_eth_receipts_block_number; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_receipts_block_number ON public.eth_receipts USING btree (block_number); + + +-- +-- Name: idx_eth_receipts_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_receipts_created_at ON public.eth_receipts USING brin (created_at); + + +-- +-- Name: idx_eth_receipts_unique; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_receipts_unique ON public.eth_receipts USING btree (tx_hash, block_hash); + + +-- +-- Name: idx_eth_task_run_txes_eth_tx_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_task_run_txes_eth_tx_id ON public.eth_task_run_txes USING btree (eth_tx_id); + + +-- +-- Name: idx_eth_task_run_txes_task_run_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_task_run_txes_task_run_id ON public.eth_task_run_txes USING btree (task_run_id); + + +-- +-- Name: idx_eth_tx_attempts_broadcast_before_block_num; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_tx_attempts_broadcast_before_block_num ON public.eth_tx_attempts USING btree (broadcast_before_block_num); + + +-- +-- Name: idx_eth_tx_attempts_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_tx_attempts_created_at ON public.eth_tx_attempts USING brin (created_at); + + +-- +-- Name: idx_eth_tx_attempts_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_tx_attempts_hash ON public.eth_tx_attempts USING btree (hash); + + +-- +-- Name: idx_eth_tx_attempts_in_progress; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_tx_attempts_in_progress ON public.eth_tx_attempts USING btree (state) WHERE (state = 'in_progress'::public.eth_tx_attempts_state); + + +-- +-- Name: idx_eth_tx_attempts_unique_gas_prices; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_tx_attempts_unique_gas_prices ON public.eth_tx_attempts USING btree (eth_tx_id, gas_price); + + +-- +-- Name: idx_eth_txes_broadcast_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_txes_broadcast_at ON public.eth_txes USING brin (broadcast_at); + + +-- +-- Name: idx_eth_txes_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_txes_created_at ON public.eth_txes USING brin (created_at); + + +-- +-- Name: idx_eth_txes_min_unconfirmed_nonce_for_key; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key ON public.eth_txes USING btree (nonce, from_address) WHERE (state = 'unconfirmed'::public.eth_txes_state); + + +-- +-- Name: idx_eth_txes_nonce_from_address; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_eth_txes_nonce_from_address ON public.eth_txes USING btree (nonce, from_address); + + +-- +-- Name: idx_eth_txes_state; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_eth_txes_state ON public.eth_txes USING btree (state) WHERE (state <> 'confirmed'::public.eth_txes_state); + + +-- +-- Name: idx_external_initiators_deleted_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_external_initiators_deleted_at ON public.external_initiators USING btree (deleted_at); + + +-- +-- Name: idx_heads_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_heads_hash ON public.heads USING btree (hash); + + +-- +-- Name: idx_heads_number; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_heads_number ON public.heads USING btree (number); + + +-- +-- Name: idx_initiators_address; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_address ON public.initiators USING btree (address); + + +-- +-- Name: idx_initiators_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_created_at ON public.initiators USING btree (created_at); + + +-- +-- Name: idx_initiators_deleted_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_deleted_at ON public.initiators USING btree (deleted_at); + + +-- +-- Name: idx_initiators_job_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_job_spec_id ON public.initiators USING btree (job_spec_id); + + +-- +-- Name: idx_initiators_type; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_type ON public.initiators USING btree (type); + + +-- +-- Name: idx_initiators_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_initiators_updated_at ON public.initiators USING brin (updated_at); + + +-- +-- Name: idx_job_runs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_created_at ON public.job_runs USING brin (created_at); + + +-- +-- Name: idx_job_runs_deleted_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_deleted_at ON public.job_runs USING btree (deleted_at); + + +-- +-- Name: idx_job_runs_finished_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_finished_at ON public.job_runs USING brin (finished_at); + + +-- +-- Name: idx_job_runs_initiator_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_initiator_id ON public.job_runs USING btree (initiator_id); + + +-- +-- Name: idx_job_runs_job_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_job_spec_id ON public.job_runs USING btree (job_spec_id); + + +-- +-- Name: idx_job_runs_result_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_result_id ON public.job_runs USING btree (result_id); + + +-- +-- Name: idx_job_runs_run_request_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_run_request_id ON public.job_runs USING btree (run_request_id); + + +-- +-- Name: idx_job_runs_status; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_status ON public.job_runs USING btree (status) WHERE (status <> 'completed'::public.run_status); + + +-- +-- Name: idx_job_runs_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_runs_updated_at ON public.job_runs USING brin (updated_at); + + +-- +-- Name: idx_job_spec_errors_v2_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_spec_errors_v2_created_at ON public.job_spec_errors_v2 USING brin (created_at); + + +-- +-- Name: idx_job_spec_errors_v2_finished_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_spec_errors_v2_finished_at ON public.job_spec_errors_v2 USING brin (updated_at); + + +-- +-- Name: idx_job_specs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_specs_created_at ON public.job_specs USING btree (created_at); + + +-- +-- Name: idx_job_specs_deleted_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_specs_deleted_at ON public.job_specs USING btree (deleted_at); + + +-- +-- Name: idx_job_specs_end_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_specs_end_at ON public.job_specs USING btree (end_at); + + +-- +-- Name: idx_job_specs_start_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_specs_start_at ON public.job_specs USING btree (start_at); + + +-- +-- Name: idx_job_specs_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_job_specs_updated_at ON public.job_specs USING brin (updated_at); + + +-- +-- Name: idx_jobs_unique_direct_request_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_jobs_unique_direct_request_spec_id ON public.jobs USING btree (direct_request_spec_id); + + +-- +-- Name: idx_jobs_unique_offchain_reporting_oracle_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_jobs_unique_offchain_reporting_oracle_spec_id ON public.jobs USING btree (offchainreporting_oracle_spec_id); + + +-- +-- Name: idx_jobs_unique_pipeline_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_jobs_unique_pipeline_spec_id ON public.jobs USING btree (pipeline_spec_id); + + +-- +-- Name: idx_keys_only_one_funding; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_keys_only_one_funding ON public.keys USING btree (is_funding) WHERE (is_funding = true); + + +-- +-- Name: idx_offchainreporting_oracle_specs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_offchainreporting_oracle_specs_created_at ON public.offchainreporting_oracle_specs USING brin (created_at); + + +-- +-- Name: idx_offchainreporting_oracle_specs_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_offchainreporting_oracle_specs_updated_at ON public.offchainreporting_oracle_specs USING brin (updated_at); + + +-- +-- Name: idx_offchainreporting_pending_transmissions_time; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_offchainreporting_pending_transmissions_time ON public.offchainreporting_pending_transmissions USING btree ("time"); + + +-- +-- Name: idx_only_one_in_progress_attempt_per_eth_tx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_only_one_in_progress_attempt_per_eth_tx ON public.eth_tx_attempts USING btree (eth_tx_id) WHERE (state = 'in_progress'::public.eth_tx_attempts_state); + + +-- +-- Name: idx_only_one_in_progress_tx_per_account; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account ON public.eth_txes USING btree (from_address) WHERE (state = 'in_progress'::public.eth_txes_state); + + +-- +-- Name: idx_pipeline_runs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_runs_created_at ON public.pipeline_runs USING brin (created_at); + + +-- +-- Name: idx_pipeline_runs_finished_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_runs_finished_at ON public.pipeline_runs USING brin (finished_at); + + +-- +-- Name: idx_pipeline_runs_pipeline_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_runs_pipeline_spec_id ON public.pipeline_runs USING btree (pipeline_spec_id); + + +-- +-- Name: idx_pipeline_runs_unfinished_runs; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_runs_unfinished_runs ON public.pipeline_runs USING btree (id) WHERE (finished_at IS NULL); + + +-- +-- Name: idx_pipeline_specs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_specs_created_at ON public.pipeline_specs USING brin (created_at); + + +-- +-- Name: idx_pipeline_task_runs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_runs_created_at ON public.pipeline_task_runs USING brin (created_at); + + +-- +-- Name: idx_pipeline_task_runs_finished_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_runs_finished_at ON public.pipeline_task_runs USING brin (finished_at); + + +-- +-- Name: idx_pipeline_task_runs_optimise_find_results; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_runs_optimise_find_results ON public.pipeline_task_runs USING btree (pipeline_run_id); + + +-- +-- Name: idx_pipeline_task_specs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_specs_created_at ON public.pipeline_task_specs USING brin (created_at); + + +-- +-- Name: idx_pipeline_task_specs_pipeline_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_specs_pipeline_spec_id ON public.pipeline_task_specs USING btree (pipeline_spec_id); + + +-- +-- Name: idx_pipeline_task_specs_single_output; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_pipeline_task_specs_single_output ON public.pipeline_task_specs USING btree (pipeline_spec_id) WHERE (successor_id IS NULL); + + +-- +-- Name: idx_pipeline_task_specs_successor_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_pipeline_task_specs_successor_id ON public.pipeline_task_specs USING btree (successor_id); + + +-- +-- Name: idx_run_requests_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_run_requests_created_at ON public.run_requests USING brin (created_at); + + +-- +-- Name: idx_run_results_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_run_results_created_at ON public.run_results USING brin (created_at); + + +-- +-- Name: idx_run_results_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_run_results_updated_at ON public.run_results USING brin (updated_at); + + +-- +-- Name: idx_service_agreements_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_service_agreements_created_at ON public.service_agreements USING btree (created_at); + + +-- +-- Name: idx_service_agreements_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_service_agreements_updated_at ON public.service_agreements USING brin (updated_at); + + +-- +-- Name: idx_sessions_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_sessions_created_at ON public.sessions USING brin (created_at); + + +-- +-- Name: idx_sessions_last_used; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_sessions_last_used ON public.sessions USING brin (last_used); + + +-- +-- Name: idx_task_runs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_created_at ON public.task_runs USING brin (created_at); + + +-- +-- Name: idx_task_runs_job_run_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_job_run_id ON public.task_runs USING btree (job_run_id); + + +-- +-- Name: idx_task_runs_result_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_result_id ON public.task_runs USING btree (result_id); + + +-- +-- Name: idx_task_runs_status; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_status ON public.task_runs USING btree (status) WHERE (status <> 'completed'::public.run_status); + + +-- +-- Name: idx_task_runs_task_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_task_spec_id ON public.task_runs USING btree (task_spec_id); + + +-- +-- Name: idx_task_runs_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_runs_updated_at ON public.task_runs USING brin (updated_at); + + +-- +-- Name: idx_task_specs_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_specs_created_at ON public.task_specs USING brin (created_at); + + +-- +-- Name: idx_task_specs_deleted_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_specs_deleted_at ON public.task_specs USING btree (deleted_at); + + +-- +-- Name: idx_task_specs_job_spec_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_specs_job_spec_id ON public.task_specs USING btree (job_spec_id); + + +-- +-- Name: idx_task_specs_type; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_specs_type ON public.task_specs USING btree (type); + + +-- +-- Name: idx_task_specs_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_task_specs_updated_at ON public.task_specs USING brin (updated_at); + + +-- +-- Name: idx_unique_keys_address; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_unique_keys_address ON public.keys USING btree (address); + + +-- +-- Name: idx_unique_peer_ids; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_unique_peer_ids ON public.encrypted_p2p_keys USING btree (peer_id); + + +-- +-- Name: idx_unique_pub_keys; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX idx_unique_pub_keys ON public.encrypted_p2p_keys USING btree (pub_key); + + +-- +-- Name: idx_users_created_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_users_created_at ON public.users USING btree (created_at); + + +-- +-- Name: idx_users_updated_at; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_users_updated_at ON public.users USING brin (updated_at); + + +-- +-- Name: job_spec_errors_created_at_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX job_spec_errors_created_at_idx ON public.job_spec_errors USING brin (created_at); + + +-- +-- Name: job_spec_errors_occurrences_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX job_spec_errors_occurrences_idx ON public.job_spec_errors USING btree (occurrences); + + +-- +-- Name: job_spec_errors_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX job_spec_errors_unique_idx ON public.job_spec_errors USING btree (job_spec_id, description); + + +-- +-- Name: job_spec_errors_updated_at_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX job_spec_errors_updated_at_idx ON public.job_spec_errors USING brin (updated_at); + + +-- +-- Name: job_spec_errors_v2_unique_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX job_spec_errors_v2_unique_idx ON public.job_spec_errors_v2 USING btree (job_id, description); + + +-- +-- Name: log_consumptions_created_at_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX log_consumptions_created_at_idx ON public.log_consumptions USING brin (created_at); + + +-- +-- Name: log_consumptions_unique_v1_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON public.log_consumptions USING btree (job_id, block_hash, log_index); + + +-- +-- Name: log_consumptions_unique_v2_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON public.log_consumptions USING btree (job_id_v2, block_hash, log_index); + + +-- +-- Name: p2p_peers_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX p2p_peers_id ON public.p2p_peers USING btree (id); + + +-- +-- Name: p2p_peers_peer_id; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX p2p_peers_peer_id ON public.p2p_peers USING btree (peer_id); + + +-- +-- Name: sync_events_id_created_at_idx; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX sync_events_id_created_at_idx ON public.sync_events USING btree (id, created_at); + + +-- +-- Name: eth_txes notify_eth_tx_insertion; Type: TRIGGER; Schema: public; Owner: postgres +-- + +CREATE TRIGGER notify_eth_tx_insertion AFTER INSERT ON public.eth_txes FOR EACH STATEMENT EXECUTE PROCEDURE public.notifyethtxinsertion(); + + +-- +-- Name: jobs notify_job_created; Type: TRIGGER; Schema: public; Owner: postgres +-- + +CREATE TRIGGER notify_job_created AFTER INSERT ON public.jobs FOR EACH ROW EXECUTE PROCEDURE public.notifyjobcreated(); + + +-- +-- Name: jobs notify_job_deleted; Type: TRIGGER; Schema: public; Owner: postgres +-- + +CREATE TRIGGER notify_job_deleted AFTER DELETE ON public.jobs FOR EACH ROW EXECUTE PROCEDURE public.notifyjobdeleted(); + + +-- +-- Name: pipeline_runs notify_pipeline_run_started; Type: TRIGGER; Schema: public; Owner: postgres +-- + +CREATE TRIGGER notify_pipeline_run_started AFTER INSERT ON public.pipeline_runs FOR EACH ROW EXECUTE PROCEDURE public.notifypipelinerunstarted(); + + +-- +-- Name: eth_receipts eth_receipts_tx_hash_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_receipts + ADD CONSTRAINT eth_receipts_tx_hash_fkey FOREIGN KEY (tx_hash) REFERENCES public.eth_tx_attempts(hash) ON DELETE CASCADE; + + +-- +-- Name: eth_task_run_txes eth_task_run_txes_eth_tx_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_task_run_txes + ADD CONSTRAINT eth_task_run_txes_eth_tx_id_fkey FOREIGN KEY (eth_tx_id) REFERENCES public.eth_txes(id) ON DELETE CASCADE; + + +-- +-- Name: eth_task_run_txes eth_task_run_txes_task_run_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_task_run_txes + ADD CONSTRAINT eth_task_run_txes_task_run_id_fkey FOREIGN KEY (task_run_id) REFERENCES public.task_runs(id) ON DELETE CASCADE; + + +-- +-- Name: eth_tx_attempts eth_tx_attempts_eth_tx_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_tx_attempts + ADD CONSTRAINT eth_tx_attempts_eth_tx_id_fkey FOREIGN KEY (eth_tx_id) REFERENCES public.eth_txes(id) ON DELETE CASCADE; + + +-- +-- Name: eth_txes eth_txes_from_address_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.eth_txes + ADD CONSTRAINT eth_txes_from_address_fkey FOREIGN KEY (from_address) REFERENCES public.keys(address); + + +-- +-- Name: initiators fk_initiators_job_spec_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.initiators + ADD CONSTRAINT fk_initiators_job_spec_id FOREIGN KEY (job_spec_id) REFERENCES public.job_specs(id) ON DELETE RESTRICT; + + +-- +-- Name: job_runs fk_job_runs_initiator_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_runs + ADD CONSTRAINT fk_job_runs_initiator_id FOREIGN KEY (initiator_id) REFERENCES public.initiators(id) ON DELETE CASCADE; + + +-- +-- Name: job_runs fk_job_runs_result_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_runs + ADD CONSTRAINT fk_job_runs_result_id FOREIGN KEY (result_id) REFERENCES public.run_results(id) ON DELETE CASCADE; + + +-- +-- Name: job_runs fk_job_runs_run_request_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_runs + ADD CONSTRAINT fk_job_runs_run_request_id FOREIGN KEY (run_request_id) REFERENCES public.run_requests(id) ON DELETE CASCADE; + + +-- +-- Name: service_agreements fk_service_agreements_encumbrance_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.service_agreements + ADD CONSTRAINT fk_service_agreements_encumbrance_id FOREIGN KEY (encumbrance_id) REFERENCES public.encumbrances(id) ON DELETE RESTRICT; + + +-- +-- Name: task_runs fk_task_runs_result_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_runs + ADD CONSTRAINT fk_task_runs_result_id FOREIGN KEY (result_id) REFERENCES public.run_results(id) ON DELETE CASCADE; + + +-- +-- Name: task_runs fk_task_runs_task_spec_id; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_runs + ADD CONSTRAINT fk_task_runs_task_spec_id FOREIGN KEY (task_spec_id) REFERENCES public.task_specs(id) ON DELETE CASCADE; + + +-- +-- Name: flux_monitor_round_stats flux_monitor_round_stats_job_run_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.flux_monitor_round_stats + ADD CONSTRAINT flux_monitor_round_stats_job_run_id_fkey FOREIGN KEY (job_run_id) REFERENCES public.job_runs(id) ON DELETE CASCADE; + + +-- +-- Name: job_runs job_runs_job_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_runs + ADD CONSTRAINT job_runs_job_spec_id_fkey FOREIGN KEY (job_spec_id) REFERENCES public.job_specs(id) ON DELETE CASCADE; + + +-- +-- Name: job_spec_errors job_spec_errors_job_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors + ADD CONSTRAINT job_spec_errors_job_spec_id_fkey FOREIGN KEY (job_spec_id) REFERENCES public.job_specs(id) ON DELETE CASCADE; + + +-- +-- Name: job_spec_errors_v2 job_spec_errors_v2_job_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.job_spec_errors_v2 + ADD CONSTRAINT job_spec_errors_v2_job_id_fkey FOREIGN KEY (job_id) REFERENCES public.jobs(id) ON DELETE CASCADE; + + +-- +-- Name: jobs jobs_direct_request_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs + ADD CONSTRAINT jobs_direct_request_spec_id_fkey FOREIGN KEY (direct_request_spec_id) REFERENCES public.direct_request_specs(id); + + +-- +-- Name: jobs jobs_flux_monitor_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs + ADD CONSTRAINT jobs_flux_monitor_spec_id_fkey FOREIGN KEY (flux_monitor_spec_id) REFERENCES public.flux_monitor_specs(id); + + +-- +-- Name: jobs jobs_offchainreporting_oracle_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs + ADD CONSTRAINT jobs_offchainreporting_oracle_spec_id_fkey FOREIGN KEY (offchainreporting_oracle_spec_id) REFERENCES public.offchainreporting_oracle_specs(id) ON DELETE CASCADE; + + +-- +-- Name: jobs jobs_pipeline_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.jobs + ADD CONSTRAINT jobs_pipeline_spec_id_fkey FOREIGN KEY (pipeline_spec_id) REFERENCES public.pipeline_specs(id) ON DELETE CASCADE; + + +-- +-- Name: log_consumptions log_consumptions_job_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.log_consumptions + ADD CONSTRAINT log_consumptions_job_id_fkey FOREIGN KEY (job_id) REFERENCES public.job_specs(id) ON DELETE CASCADE; + + +-- +-- Name: log_consumptions log_consumptions_job_id_v2_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.log_consumptions + ADD CONSTRAINT log_consumptions_job_id_v2_fkey FOREIGN KEY (job_id_v2) REFERENCES public.jobs(id) ON DELETE CASCADE; + + +-- +-- Name: offchainreporting_contract_configs offchainreporting_contract_co_offchainreporting_oracle_spe_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_contract_configs + ADD CONSTRAINT offchainreporting_contract_co_offchainreporting_oracle_spe_fkey FOREIGN KEY (offchainreporting_oracle_spec_id) REFERENCES public.offchainreporting_oracle_specs(id) ON DELETE CASCADE; + + +-- +-- Name: offchainreporting_oracle_specs offchainreporting_oracle_specs_encrypted_ocr_key_bundle_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs + ADD CONSTRAINT offchainreporting_oracle_specs_encrypted_ocr_key_bundle_id_fkey FOREIGN KEY (encrypted_ocr_key_bundle_id) REFERENCES public.encrypted_ocr_key_bundles(id); + + +-- +-- Name: offchainreporting_oracle_specs offchainreporting_oracle_specs_p2p_peer_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs + ADD CONSTRAINT offchainreporting_oracle_specs_p2p_peer_id_fkey FOREIGN KEY (p2p_peer_id) REFERENCES public.encrypted_p2p_keys(peer_id); + + +-- +-- Name: offchainreporting_oracle_specs offchainreporting_oracle_specs_transmitter_address_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_oracle_specs + ADD CONSTRAINT offchainreporting_oracle_specs_transmitter_address_fkey FOREIGN KEY (transmitter_address) REFERENCES public.keys(address); + + +-- +-- Name: offchainreporting_pending_transmissions offchainreporting_pending_tra_offchainreporting_oracle_spe_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_pending_transmissions + ADD CONSTRAINT offchainreporting_pending_tra_offchainreporting_oracle_spe_fkey FOREIGN KEY (offchainreporting_oracle_spec_id) REFERENCES public.offchainreporting_oracle_specs(id) ON DELETE CASCADE; + + +-- +-- Name: offchainreporting_persistent_states offchainreporting_persistent__offchainreporting_oracle_spe_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.offchainreporting_persistent_states + ADD CONSTRAINT offchainreporting_persistent__offchainreporting_oracle_spe_fkey FOREIGN KEY (offchainreporting_oracle_spec_id) REFERENCES public.offchainreporting_oracle_specs(id) ON DELETE CASCADE; + + +-- +-- Name: p2p_peers p2p_peers_peer_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.p2p_peers + ADD CONSTRAINT p2p_peers_peer_id_fkey FOREIGN KEY (peer_id) REFERENCES public.encrypted_p2p_keys(peer_id) DEFERRABLE; + + +-- +-- Name: pipeline_runs pipeline_runs_pipeline_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_runs + ADD CONSTRAINT pipeline_runs_pipeline_spec_id_fkey FOREIGN KEY (pipeline_spec_id) REFERENCES public.pipeline_specs(id) ON DELETE CASCADE DEFERRABLE; + + +-- +-- Name: pipeline_task_runs pipeline_task_runs_pipeline_run_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_runs + ADD CONSTRAINT pipeline_task_runs_pipeline_run_id_fkey FOREIGN KEY (pipeline_run_id) REFERENCES public.pipeline_runs(id) ON DELETE CASCADE DEFERRABLE; + + +-- +-- Name: pipeline_task_runs pipeline_task_runs_pipeline_task_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_runs + ADD CONSTRAINT pipeline_task_runs_pipeline_task_spec_id_fkey FOREIGN KEY (pipeline_task_spec_id) REFERENCES public.pipeline_task_specs(id) ON DELETE CASCADE DEFERRABLE; + + +-- +-- Name: pipeline_task_specs pipeline_task_specs_pipeline_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_specs + ADD CONSTRAINT pipeline_task_specs_pipeline_spec_id_fkey FOREIGN KEY (pipeline_spec_id) REFERENCES public.pipeline_specs(id) ON DELETE CASCADE DEFERRABLE; + + +-- +-- Name: pipeline_task_specs pipeline_task_specs_successor_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.pipeline_task_specs + ADD CONSTRAINT pipeline_task_specs_successor_id_fkey FOREIGN KEY (successor_id) REFERENCES public.pipeline_task_specs(id) DEFERRABLE; + + +-- +-- Name: service_agreements service_agreements_job_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.service_agreements + ADD CONSTRAINT service_agreements_job_spec_id_fkey FOREIGN KEY (job_spec_id) REFERENCES public.job_specs(id) ON DELETE CASCADE; + + +-- +-- Name: task_runs task_runs_job_run_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_runs + ADD CONSTRAINT task_runs_job_run_id_fkey FOREIGN KEY (job_run_id) REFERENCES public.job_runs(id) ON DELETE CASCADE; + + +-- +-- Name: task_specs task_specs_job_spec_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.task_specs + ADD CONSTRAINT task_specs_job_spec_id_fkey FOREIGN KEY (job_spec_id) REFERENCES public.job_specs(id) ON DELETE CASCADE; + + +-- +-- PostgreSQL database dump complete +-- + +-- +goose StatementEnd + +-- +goose Down +-- Note we cannot just drop schema since +-- the migrations table is handled separately. +DROP TABLE bridge_types, + configurations, + direct_request_specs, + encrypted_ocr_key_bundles, + encrypted_p2p_keys, + encrypted_vrf_keys, + encumbrances, + eth_receipts, + eth_task_run_txes, + eth_tx_attempts, + eth_txes, + external_initiators, + flux_monitor_round_stats, + flux_monitor_specs, + heads, + initiators, + job_runs, + job_spec_errors, + job_spec_errors_v2, + job_specs, + jobs, + keys, + log_consumptions, + offchainreporting_contract_configs, + offchainreporting_oracle_specs, + offchainreporting_pending_transmissions, + offchainreporting_persistent_states, + p2p_peers, + pipeline_runs, + pipeline_specs, + pipeline_task_runs, + pipeline_task_specs, + run_requests, + run_results, + service_agreements, + sessions, + sync_events, + task_runs, + task_specs, + eth_txes, + users CASCADE; +DROP FUNCTION notifyethtxinsertion, notifyjobcreated, notifyjobdeleted, notifypipelinerunstarted CASCADE; +DROP TYPE eth_tx_attempts_state, eth_txes_state, run_status CASCADE; diff --git a/core/store/migrate/migrations/0002_gormv2.sql b/core/store/migrate/migrations/0002_gormv2.sql new file mode 100644 index 00000000..fd1c60a9 --- /dev/null +++ b/core/store/migrate/migrations/0002_gormv2.sql @@ -0,0 +1,10 @@ +-- +goose Up +UPDATE offchainreporting_oracle_specs SET contract_config_confirmations = 0 where contract_config_confirmations is NULL; +ALTER TABLE offchainreporting_oracle_specs + ALTER COLUMN contract_config_confirmations SET NOT NULL; +ALTER TABLE external_initiators ADD CONSTRAINT "access_key_unique" UNIQUE ("access_key"); + +-- +goose Down +ALTER TABLE offchainreporting_oracle_specs + ALTER COLUMN contract_config_confirmations DROP NOT NULL; +ALTER TABLE external_initiators DROP CONSTRAINT "access_key_unique"; diff --git a/core/store/migrate/migrations/0003_eth_logs_table.sql b/core/store/migrate/migrations/0003_eth_logs_table.sql new file mode 100644 index 00000000..538ba9f2 --- /dev/null +++ b/core/store/migrate/migrations/0003_eth_logs_table.sql @@ -0,0 +1,51 @@ +-- +goose Up +CREATE TABLE "eth_logs" ( + "id" BIGSERIAL PRIMARY KEY, + "block_hash" bytea NOT NULL, + "block_number" bigint NOT NULL, + "index" bigint NOT NULL, + "address" bytea NOT NULL, + "topics" bytea[] NOT NULL, + "data" bytea NOT NULL, + "order_received" serial NOT NULL, + "created_at" timestamp without time zone NOT NULL +); + +CREATE UNIQUE INDEX idx_eth_logs_unique ON eth_logs (block_hash, index) INCLUDE (id); +CREATE INDEX IF NOT EXISTS idx_eth_logs_block_number ON eth_logs (block_number); +CREATE INDEX IF NOT EXISTS idx_eth_logs_address_block_number ON eth_logs (address, block_number); + +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_consumptions_exactly_one_job_id TO chk_log_broadcasts_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_consumptions_job_id_fkey TO log_broadcasts_job_id_fkey; +ALTER TABLE log_consumptions RENAME TO log_broadcasts; + +ALTER TABLE log_broadcasts + ADD COLUMN "consumed" BOOL NOT NULL DEFAULT FALSE, + ADD COLUMN "eth_log_id" BIGINT, -- NOTE: This ought to be not null in the final application of this migration + ADD CONSTRAINT log_broadcasts_eth_log_id_fkey FOREIGN KEY (eth_log_id) REFERENCES eth_logs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + +CREATE INDEX idx_log_broadcasts_unconsumed_eth_log_id ON log_broadcasts (eth_log_id) WHERE consumed = false; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id ON log_broadcasts (job_id) WHERE consumed = false AND job_id IS NOT NULL; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id_v2 ON log_broadcasts (job_id_v2) WHERE consumed = false AND job_id_v2 IS NOT NULL; + +DROP INDEX IF EXISTS log_consumptions_unique_v1_idx; +DROP INDEX IF EXISTS log_consumptions_unique_v2_idx; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON log_broadcasts(job_id, block_hash, log_index) INCLUDE (consumed) WHERE job_id IS NOT NULL; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id_v2, block_hash, log_index) INCLUDE (consumed) WHERE job_id_v2 IS NOT NULL; + +-- +goose Down +DELETE FROM eth_logs; + +ALTER TABLE log_broadcasts + DROP COLUMN "eth_log_id", + DROP COLUMN "consumed"; + +ALTER TABLE log_broadcasts RENAME TO log_consumptions; +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_broadcasts_exactly_one_job_id TO chk_log_consumptions_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_broadcasts_job_id_fkey TO log_consumptions_job_id_fkey; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON public.log_consumptions USING btree (job_id, block_hash, log_index); +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON public.log_consumptions USING btree (job_id_v2, block_hash, log_index); + +DROP TABLE "eth_logs"; diff --git a/core/store/migrate/migrations/0004_cleanup_tx_attempt_state.sql b/core/store/migrate/migrations/0004_cleanup_tx_attempt_state.sql new file mode 100644 index 00000000..8ce19421 --- /dev/null +++ b/core/store/migrate/migrations/0004_cleanup_tx_attempt_state.sql @@ -0,0 +1,4 @@ +-- +goose Up +UPDATE eth_tx_attempts SET state = 'broadcast', broadcast_before_block_num = eth_receipts.block_number +FROM eth_receipts +WHERE eth_tx_attempts.state = 'in_progress' AND eth_tx_attempts.hash = eth_receipts.tx_hash; diff --git a/core/store/migrate/migrations/0005_tx_attempts_insufficient_eth_index.sql b/core/store/migrate/migrations/0005_tx_attempts_insufficient_eth_index.sql new file mode 100644 index 00000000..301d9982 --- /dev/null +++ b/core/store/migrate/migrations/0005_tx_attempts_insufficient_eth_index.sql @@ -0,0 +1,15 @@ +-- +goose Up +DROP INDEX IF EXISTS idx_eth_tx_attempts_in_progress; +CREATE INDEX idx_eth_tx_attempts_unbroadcast ON eth_tx_attempts (state enum_ops) WHERE state != 'broadcast'::eth_tx_attempts_state; +DROP INDEX IF EXISTS idx_only_one_in_progress_attempt_per_eth_tx; +CREATE UNIQUE INDEX idx_only_one_unbroadcast_attempt_per_eth_tx ON eth_tx_attempts(eth_tx_id int8_ops) WHERE state != 'broadcast'::eth_tx_attempts_state; +DROP INDEX IF EXISTS idx_eth_txes_state; +CREATE INDEX idx_eth_txes_state_from_address ON eth_txes(state, from_address) WHERE state <> 'confirmed'::eth_txes_state; + +-- +goose Down +DROP INDEX IF EXISTS idx_eth_tx_attempts_unbroadcast; +CREATE INDEX idx_eth_tx_attempts_in_progress ON eth_tx_attempts(state enum_ops) WHERE state = 'in_progress'::eth_tx_attempts_state; +DROP INDEX IF EXISTS idx_only_one_unbroadcast_attempt_per_eth_tx; +CREATE UNIQUE INDEX idx_only_one_in_progress_attempt_per_eth_tx ON eth_tx_attempts(eth_tx_id int8_ops) WHERE state = 'in_progress'::eth_tx_attempts_state; +DROP INDEX IF EXISTS idx_eth_txes_state_from_address; +CREATE INDEX idx_eth_txes_state ON eth_txes(state enum_ops) WHERE state <> 'confirmed'::eth_txes_state; diff --git a/core/store/migrate/migrations/0006_unique_task_specs_per_pipeline_run.sql b/core/store/migrate/migrations/0006_unique_task_specs_per_pipeline_run.sql new file mode 100644 index 00000000..9ab85222 --- /dev/null +++ b/core/store/migrate/migrations/0006_unique_task_specs_per_pipeline_run.sql @@ -0,0 +1,5 @@ +-- +goose Up +CREATE UNIQUE INDEX idx_pipeline_task_runs_unique_task_spec_id_per_run ON pipeline_task_runs (pipeline_task_spec_id, pipeline_run_id); + +-- +goose Down +DROP INDEX IF EXISTS idx_pipeline_task_runs_unique_task_spec_id_per_run; diff --git a/core/store/migrate/migrations/0007_reverse_eth_logs_table.sql b/core/store/migrate/migrations/0007_reverse_eth_logs_table.sql new file mode 100644 index 00000000..a1c882d7 --- /dev/null +++ b/core/store/migrate/migrations/0007_reverse_eth_logs_table.sql @@ -0,0 +1,51 @@ +-- +goose Up +DELETE FROM eth_logs; + +ALTER TABLE log_broadcasts + DROP COLUMN "eth_log_id", + DROP COLUMN "consumed"; + +ALTER TABLE log_broadcasts RENAME TO log_consumptions; +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_broadcasts_exactly_one_job_id TO chk_log_consumptions_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_broadcasts_job_id_fkey TO log_consumptions_job_id_fkey; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON public.log_consumptions USING btree (job_id, block_hash, log_index); +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON public.log_consumptions USING btree (job_id_v2, block_hash, log_index); + +DROP TABLE "eth_logs"; + +-- +goose Down +CREATE TABLE "eth_logs" ( + "id" BIGSERIAL PRIMARY KEY, + "block_hash" bytea NOT NULL, + "block_number" bigint NOT NULL, + "index" bigint NOT NULL, + "address" bytea NOT NULL, + "topics" bytea[] NOT NULL, + "data" bytea NOT NULL, + "order_received" serial NOT NULL, + "created_at" timestamp without time zone NOT NULL +); + +CREATE UNIQUE INDEX idx_eth_logs_unique ON eth_logs (block_hash, index) INCLUDE (id); +CREATE INDEX IF NOT EXISTS idx_eth_logs_block_number ON eth_logs (block_number); +CREATE INDEX IF NOT EXISTS idx_eth_logs_address_block_number ON eth_logs (address, block_number); + +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_consumptions_exactly_one_job_id TO chk_log_broadcasts_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_consumptions_job_id_fkey TO log_broadcasts_job_id_fkey; +ALTER TABLE log_consumptions RENAME TO log_broadcasts; + +ALTER TABLE log_broadcasts + ADD COLUMN "consumed" BOOL NOT NULL DEFAULT FALSE, + ADD COLUMN "eth_log_id" BIGINT, -- NOTE: This ought to be not null in the final application of this reversal + ADD CONSTRAINT log_broadcasts_eth_log_id_fkey FOREIGN KEY (eth_log_id) REFERENCES eth_logs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + +CREATE INDEX idx_log_broadcasts_unconsumed_eth_log_id ON log_broadcasts (eth_log_id) WHERE consumed = false; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id ON log_broadcasts (job_id) WHERE consumed = false AND job_id IS NOT NULL; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id_v2 ON log_broadcasts (job_id_v2) WHERE consumed = false AND job_id_v2 IS NOT NULL; + +DROP INDEX IF EXISTS log_consumptions_unique_v1_idx; +DROP INDEX IF EXISTS log_consumptions_unique_v2_idx; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON log_broadcasts(job_id, block_hash, log_index) INCLUDE (consumed) WHERE job_id IS NOT NULL; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id_v2, block_hash, log_index) INCLUDE (consumed) WHERE job_id_v2 IS NOT NULL; diff --git a/core/store/migrate/migrations/0008_reapply_eth_logs_table.sql b/core/store/migrate/migrations/0008_reapply_eth_logs_table.sql new file mode 100644 index 00000000..0162647e --- /dev/null +++ b/core/store/migrate/migrations/0008_reapply_eth_logs_table.sql @@ -0,0 +1,54 @@ +-- +goose Up +CREATE TABLE "eth_logs" ( + "id" BIGSERIAL PRIMARY KEY, + "block_hash" bytea NOT NULL, + "block_number" bigint NOT NULL, + "index" bigint NOT NULL, + "address" bytea NOT NULL, + "topics" bytea[] NOT NULL, + "data" bytea NOT NULL, + "order_received" serial NOT NULL, + "created_at" timestamp without time zone NOT NULL +); + +CREATE UNIQUE INDEX idx_eth_logs_unique ON eth_logs (block_hash, index) INCLUDE (id); +CREATE INDEX IF NOT EXISTS idx_eth_logs_block_number ON eth_logs (block_number); +CREATE INDEX IF NOT EXISTS idx_eth_logs_address_block_number ON eth_logs (address, block_number); + +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_consumptions_exactly_one_job_id TO chk_log_broadcasts_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_consumptions_job_id_fkey TO log_broadcasts_job_id_fkey; +ALTER TABLE log_consumptions RENAME TO log_broadcasts; + +-- NOTE: one-time deletion is necessary to maintain FK constraints, this probably won't hurt +DELETE FROM log_broadcasts; + +ALTER TABLE log_broadcasts + ADD COLUMN "consumed" BOOL NOT NULL DEFAULT FALSE, + ADD COLUMN "eth_log_id" BIGINT NOT NULL, + ADD CONSTRAINT log_broadcasts_eth_log_id_fkey FOREIGN KEY (eth_log_id) REFERENCES eth_logs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + +CREATE INDEX idx_log_broadcasts_unconsumed_eth_log_id ON log_broadcasts (eth_log_id) WHERE consumed = false; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id ON log_broadcasts (job_id) WHERE consumed = false AND job_id IS NOT NULL; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id_v2 ON log_broadcasts (job_id_v2) WHERE consumed = false AND job_id_v2 IS NOT NULL; + +DROP INDEX IF EXISTS log_consumptions_unique_v1_idx; +DROP INDEX IF EXISTS log_consumptions_unique_v2_idx; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON log_broadcasts(job_id, block_hash, log_index) INCLUDE (consumed) WHERE job_id IS NOT NULL; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id_v2, block_hash, log_index) INCLUDE (consumed) WHERE job_id_v2 IS NOT NULL; + +-- +goose Down +DELETE FROM eth_logs; + +ALTER TABLE log_broadcasts + DROP COLUMN "eth_log_id", + DROP COLUMN "consumed"; + +ALTER TABLE log_broadcasts RENAME TO log_consumptions; +ALTER TABLE log_consumptions RENAME CONSTRAINT chk_log_broadcasts_exactly_one_job_id TO chk_log_consumptions_exactly_one_job_id; +ALTER TABLE log_consumptions RENAME CONSTRAINT log_broadcasts_job_id_fkey TO log_consumptions_job_id_fkey; + +CREATE UNIQUE INDEX log_consumptions_unique_v1_idx ON public.log_consumptions USING btree (job_id, block_hash, log_index); +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON public.log_consumptions USING btree (job_id_v2, block_hash, log_index); + +DROP TABLE "eth_logs"; diff --git a/core/store/migrate/migrations/0009_add_min_payment_to_flux_monitor_spec.sql b/core/store/migrate/migrations/0009_add_min_payment_to_flux_monitor_spec.sql new file mode 100644 index 00000000..eb8f7aa7 --- /dev/null +++ b/core/store/migrate/migrations/0009_add_min_payment_to_flux_monitor_spec.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE flux_monitor_specs +ADD min_payment varchar(255); + +-- +goose Down +ALTER TABLE flux_monitor_specs +DROP COLUMN min_payment; diff --git a/core/store/migrate/migrations/0010_bridge_fk.sql b/core/store/migrate/migrations/0010_bridge_fk.sql new file mode 100644 index 00000000..0b6afcdc --- /dev/null +++ b/core/store/migrate/migrations/0010_bridge_fk.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE pipeline_task_specs ADD COLUMN bridge_name text; +ALTER TABLE pipeline_task_specs ADD CONSTRAINT fk_pipeline_task_specs_bridge_name FOREIGN KEY (bridge_name) REFERENCES bridge_types (name); +UPDATE pipeline_task_specs SET bridge_name = ts.json->>'name' FROM pipeline_task_specs ts WHERE ts.type = 'bridge'; + +-- +goose Down +ALTER TABLE pipeline_task_specs DROP CONSTRAINT fk_pipeline_task_specs_bridge_name, DROP COLUMN bridge_name; diff --git a/core/store/migrate/migrations/0011_latest_round_requested.sql b/core/store/migrate/migrations/0011_latest_round_requested.sql new file mode 100644 index 00000000..24f55828 --- /dev/null +++ b/core/store/migrate/migrations/0011_latest_round_requested.sql @@ -0,0 +1,12 @@ +-- +goose Up +CREATE TABLE offchainreporting_latest_round_requested ( + offchainreporting_oracle_spec_id integer PRIMARY KEY REFERENCES offchainreporting_oracle_specs (id) DEFERRABLE INITIALLY IMMEDIATE, + requester bytea not null CHECK (octet_length(requester) = 20), + config_digest bytea not null CHECK (octet_length(config_digest) = 16), + epoch bigint not null, + round bigint not null, + raw jsonb not null +); + +-- +goose Down +DROP TABLE offchainreporting_latest_round_requested; diff --git a/core/store/migrate/migrations/0012_change_jobs_to_numeric.sql b/core/store/migrate/migrations/0012_change_jobs_to_numeric.sql new file mode 100644 index 00000000..e7b61fd3 --- /dev/null +++ b/core/store/migrate/migrations/0012_change_jobs_to_numeric.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE job_specs ALTER COLUMN min_payment TYPE numeric(78, 0) USING min_payment::numeric; +ALTER TABLE flux_monitor_specs ALTER COLUMN min_payment TYPE numeric(78, 0) USING min_payment::numeric; + +-- +goose Down +ALTER TABLE job_specs ALTER COLUMN min_payment TYPE varchar(255) USING min_payment::varchar; +ALTER TABLE flux_monitor_specs ALTER COLUMN min_payment TYPE varchar(255) USING min_payment::varchar; diff --git a/core/store/migrate/migrations/0013_create_flux_monitor_round_stats_v2.sql b/core/store/migrate/migrations/0013_create_flux_monitor_round_stats_v2.sql new file mode 100644 index 00000000..8c5ad5e3 --- /dev/null +++ b/core/store/migrate/migrations/0013_create_flux_monitor_round_stats_v2.sql @@ -0,0 +1,13 @@ +-- +goose Up +CREATE TABLE flux_monitor_round_stats_v2 ( + id BIGSERIAL PRIMARY KEY, + aggregator bytea NOT NULL, + round_id integer NOT NULL, + num_new_round_logs integer NOT NULL DEFAULT 0, + num_submissions integer NOT NULL DEFAULT 0, + pipeline_run_id bigint REFERENCES pipeline_runs(id) ON DELETE CASCADE, + CONSTRAINT flux_monitor_round_stats_v2_aggregator_round_id_key UNIQUE (aggregator, round_id) +); + +-- +goose Down +DROP TABLE flux_monitor_round_stats_v2; diff --git a/core/store/migrate/migrations/0014_add_keeper_tables.sql b/core/store/migrate/migrations/0014_add_keeper_tables.sql new file mode 100644 index 00000000..487d2419 --- /dev/null +++ b/core/store/migrate/migrations/0014_add_keeper_tables.sql @@ -0,0 +1,53 @@ +-- +goose Up + CREATE TABLE keeper_specs ( + id BIGSERIAL PRIMARY KEY, + contract_address bytea NOT NULL, + from_address bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT keeper_specs_contract_address_check CHECK ((octet_length(contract_address) = 20)), + CONSTRAINT keeper_specs_from_address_check CHECK ((octet_length(from_address) = 20)) + ); + + ALTER TABLE jobs ADD COLUMN keeper_spec_id INT REFERENCES keeper_specs(id), + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id) = 1 + ); + + CREATE TABLE keeper_registries ( + id BIGSERIAL PRIMARY KEY, + job_id int UNIQUE NOT NULL REFERENCES jobs(id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE, + keeper_index int NOT NULL, + contract_address bytea UNIQUE NOT NULL, + from_address bytea NOT NULL, + check_gas int NOT NULL, + block_count_per_turn int NOT NULL, + num_keepers int NOT NULL + CONSTRAINT keeper_registries_contract_address_check CHECK ((octet_length(contract_address) = 20)) + CONSTRAINT keeper_registries_from_address_check CHECK ((octet_length(from_address) = 20)) + ); + + CREATE INDEX idx_keeper_registries_keeper_index ON keeper_registries(keeper_index); + + CREATE TABLE upkeep_registrations ( + id BIGSERIAL PRIMARY KEY, + registry_id bigint NOT NULL REFERENCES keeper_registries(id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE, + execute_gas int NOT NULL, + check_data bytea NOT NULL, + upkeep_id bigint NOT NULL, + positioning_constant int NOT NULL + ); + + CREATE UNIQUE INDEX idx_upkeep_registrations_unique_upkeep_ids_per_keeper ON upkeep_registrations(registry_id, upkeep_id); + CREATE INDEX idx_upkeep_registrations_upkeep_id ON upkeep_registrations(upkeep_id); + +-- +goose Down + DROP TABLE IF EXISTS keeper_specs, keeper_registries, upkeep_registrations; + + ALTER TABLE jobs DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id) = 1 + ); + + ALTER TABLE jobs DROP COLUMN keeper_spec_id; diff --git a/core/store/migrate/migrations/0015_simplify_log_broadcaster.sql b/core/store/migrate/migrations/0015_simplify_log_broadcaster.sql new file mode 100644 index 00000000..53f34249 --- /dev/null +++ b/core/store/migrate/migrations/0015_simplify_log_broadcaster.sql @@ -0,0 +1,29 @@ +-- +goose Up + ALTER TABLE log_broadcasts DROP COLUMN "eth_log_id"; + DROP TABLE "eth_logs"; + +-- +goose Down + CREATE TABLE "eth_logs" ( + "id" BIGSERIAL PRIMARY KEY, + "block_hash" bytea NOT NULL, + "block_number" bigint NOT NULL, + "index" bigint NOT NULL, + "address" bytea NOT NULL, + "topics" bytea[] NOT NULL, + "data" bytea NOT NULL, + "order_received" serial NOT NULL, + "created_at" timestamp without time zone NOT NULL + ); + + CREATE UNIQUE INDEX idx_eth_logs_unique ON eth_logs (block_hash, index) INCLUDE (id); + CREATE INDEX IF NOT EXISTS idx_eth_logs_block_number ON eth_logs (block_number); + CREATE INDEX IF NOT EXISTS idx_eth_logs_address_block_number ON eth_logs (address, block_number); + + -- NOTE: one-time deletion is necessary to maintain FK constraints, this probably won't hurt + DELETE FROM log_broadcasts; + + ALTER TABLE log_broadcasts + ADD COLUMN "eth_log_id" BIGINT NOT NULL, + ADD CONSTRAINT log_broadcasts_eth_log_id_fkey FOREIGN KEY (eth_log_id) REFERENCES eth_logs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + + CREATE INDEX idx_log_broadcasts_unconsumed_eth_log_id ON log_broadcasts (eth_log_id) WHERE consumed = false; diff --git a/core/store/migrate/migrations/0016_pipeline_task_run_dot_id.sql b/core/store/migrate/migrations/0016_pipeline_task_run_dot_id.sql new file mode 100644 index 00000000..ca67ce05 --- /dev/null +++ b/core/store/migrate/migrations/0016_pipeline_task_run_dot_id.sql @@ -0,0 +1,26 @@ +-- +goose Up +ALTER TABLE pipeline_task_runs ADD COLUMN dot_id text; +UPDATE pipeline_task_runs SET dot_id = ts.dot_id FROM pipeline_task_specs ts WHERE ts.id = pipeline_task_runs.pipeline_task_spec_id; +ALTER TABLE pipeline_task_runs ALTER COLUMN dot_id SET NOT NULL, DROP COLUMN pipeline_task_spec_id; +DROP TABLE pipeline_task_specs; + +CREATE UNIQUE INDEX ON pipeline_task_runs(pipeline_run_id, dot_id); +DROP INDEX idx_pipeline_task_runs_optimise_find_results; + +-- +goose Down +ALTER TABLE pipeline_task_runs DROP COLUMN dot_id; +CREATE TABLE public.pipeline_task_specs ( + id BIGSERIAL PRIMARY KEY, + dot_id text NOT NULL, + pipeline_spec_id integer NOT NULL, + type text NOT NULL, + json jsonb NOT NULL, + index integer DEFAULT 0 NOT NULL, + successor_id integer, + created_at timestamp with time zone NOT NULL +); +CREATE INDEX idx_pipeline_task_specs_created_at ON public.pipeline_task_specs USING brin (created_at); +CREATE INDEX idx_pipeline_task_specs_pipeline_spec_id ON public.pipeline_task_specs USING btree (pipeline_spec_id); +CREATE UNIQUE INDEX idx_pipeline_task_specs_single_output ON public.pipeline_task_specs USING btree (pipeline_spec_id) WHERE (successor_id IS NULL); +CREATE INDEX idx_pipeline_task_specs_successor_id ON public.pipeline_task_specs USING btree (successor_id); +CREATE INDEX idx_pipeline_task_runs_optimise_find_results ON public.pipeline_task_runs USING btree (pipeline_run_id); diff --git a/core/store/migrate/migrations/0017_bptxm_chain_nonce_fastforward.sql b/core/store/migrate/migrations/0017_bptxm_chain_nonce_fastforward.sql new file mode 100644 index 00000000..fbfe023c --- /dev/null +++ b/core/store/migrate/migrations/0017_bptxm_chain_nonce_fastforward.sql @@ -0,0 +1,6 @@ +-- +goose Up +UPDATE keys SET next_nonce = 0 WHERE next_nonce IS NULL; +ALTER TABLE keys ALTER COLUMN next_nonce SET NOT NULL, ALTER COLUMN next_nonce SET DEFAULT 0; + +-- +goose Down +ALTER TABLE keys ALTER COLUMN next_nonce SET DEFAULT NULL; diff --git a/core/store/migrate/migrations/0018_add_node_version_table.sql b/core/store/migrate/migrations/0018_add_node_version_table.sql new file mode 100644 index 00000000..0351cea9 --- /dev/null +++ b/core/store/migrate/migrations/0018_add_node_version_table.sql @@ -0,0 +1,8 @@ +-- +goose Up +CREATE TABLE IF NOT EXISTS "node_versions" ( + "version" TEXT PRIMARY KEY, + "created_at" timestamp without time zone NOT NULL +); + +-- +goose Down +DROP TABLE IF EXISTS "node_versions"; diff --git a/core/store/migrate/migrations/0019_last_run_height_column_to_keeper_table.sql b/core/store/migrate/migrations/0019_last_run_height_column_to_keeper_table.sql new file mode 100644 index 00000000..dd5c2443 --- /dev/null +++ b/core/store/migrate/migrations/0019_last_run_height_column_to_keeper_table.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE upkeep_registrations ADD COLUMN last_run_block_height BIGINT NOT NULL DEFAULT 0; + +-- +goose Down +ALTER TABLE upkeep_registrations DROP COLUMN last_run_block_height; diff --git a/core/store/migrate/migrations/0020_remove_result_task.sql b/core/store/migrate/migrations/0020_remove_result_task.sql new file mode 100644 index 00000000..bc224f71 --- /dev/null +++ b/core/store/migrate/migrations/0020_remove_result_task.sql @@ -0,0 +1,21 @@ +-- +goose Up +ALTER TABLE pipeline_task_runs DROP CONSTRAINT chk_pipeline_task_run_fsm; +DELETE FROM pipeline_task_runs WHERE type = 'result'; +ALTER TABLE pipeline_task_runs + ADD CONSTRAINT chk_pipeline_task_run_fsm CHECK ( + ((finished_at IS NOT NULL) AND (num_nonnulls(output, error) != 2)) + OR + (num_nulls(finished_at, output, error) = 3) + ); + +-- +goose Down +ALTER TABLE pipeline_task_runs DROP CONSTRAINT chk_pipeline_task_run_fsm; +ALTER TABLE pipeline_task_runs + ADD CONSTRAINT chk_pipeline_task_run_fsm CHECK ( + (((type <> 'result'::text) AND (((finished_at IS NULL) AND (error IS NULL) AND (output IS NULL)) + OR + ((finished_at IS NOT NULL) AND (NOT ((error IS NOT NULL) AND (output IS NOT NULL)))))) + OR + ((type = 'result'::text) AND (((output IS NULL) AND (error IS NULL) AND (finished_at IS NULL)) + OR + ((output IS NOT NULL) AND (error IS NOT NULL) AND (finished_at IS NOT NULL)))))); diff --git a/core/store/migrate/migrations/0021_add_job_id_topic_filter.sql b/core/store/migrate/migrations/0021_add_job_id_topic_filter.sql new file mode 100644 index 00000000..8898ccee --- /dev/null +++ b/core/store/migrate/migrations/0021_add_job_id_topic_filter.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE initiators ADD COLUMN job_id_topic_filter uuid; +-- +goose Down +ALTER TABLE initiators DROP COLUMN job_id_topic_filter; diff --git a/core/store/migrate/migrations/0022_unfinished_pipeline_task_run_idx.sql b/core/store/migrate/migrations/0022_unfinished_pipeline_task_run_idx.sql new file mode 100644 index 00000000..d08624ba --- /dev/null +++ b/core/store/migrate/migrations/0022_unfinished_pipeline_task_run_idx.sql @@ -0,0 +1,4 @@ +-- +goose Up +CREATE INDEX idx_unfinished_pipeline_task_runs ON pipeline_task_runs (pipeline_run_id) WHERE finished_at IS NULL; +-- +goose Down +DROP INDEX idx_unfinished_pipeline_task_runs; diff --git a/core/store/migrate/migrations/0023_add_confirmations_to_direct_request.sql b/core/store/migrate/migrations/0023_add_confirmations_to_direct_request.sql new file mode 100644 index 00000000..f309dafc --- /dev/null +++ b/core/store/migrate/migrations/0023_add_confirmations_to_direct_request.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE direct_request_specs ADD COLUMN num_confirmations bigint DEFAULT NULL; +-- +goose Down +ALTER TABLE direct_request_specs DROP COLUMN num_confirmations; diff --git a/core/store/migrate/migrations/0024_add_cron_spec_tables.sql b/core/store/migrate/migrations/0024_add_cron_spec_tables.sql new file mode 100644 index 00000000..043315fb --- /dev/null +++ b/core/store/migrate/migrations/0024_add_cron_spec_tables.sql @@ -0,0 +1,21 @@ +-- +goose Up +CREATE TABLE cron_specs ( + id SERIAL PRIMARY KEY, + cron_schedule text NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); +ALTER TABLE jobs ADD COLUMN cron_spec_id INT REFERENCES cron_specs(id), +DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id, cron_spec_id) = 1 +); + +-- +goose Down +ALTER TABLE jobs DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id) = 1 +); + +ALTER TABLE jobs DROP COLUMN cron_spec_id; +DROP TABLE IF EXISTS cron_specs; diff --git a/core/store/migrate/migrations/0025_create_log_config_table.sql b/core/store/migrate/migrations/0025_create_log_config_table.sql new file mode 100644 index 00000000..43a94649 --- /dev/null +++ b/core/store/migrate/migrations/0025_create_log_config_table.sql @@ -0,0 +1,20 @@ +-- +goose Up +CREATE TYPE log_level AS ENUM ( + 'debug', + 'info', + 'warn', + 'error', + 'panic' +); + +CREATE TABLE log_configs ( + "id" BIGSERIAL PRIMARY KEY, + "service_name" text NOT NULL UNIQUE, + "log_level" log_level NOT NULL, + "created_at" timestamp with time zone, + "updated_at" timestamp with time zone +); + +-- +goose Down +DROP TABLE IF EXISTS log_configs; +DROP TYPE IF EXISTS log_level; diff --git a/core/store/migrate/migrations/0026_eth_tx_meta.sql b/core/store/migrate/migrations/0026_eth_tx_meta.sql new file mode 100644 index 00000000..4bf9342b --- /dev/null +++ b/core/store/migrate/migrations/0026_eth_tx_meta.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE eth_txes ADD COLUMN meta jsonb; +-- +goose Down +ALTER TABLE eth_txes DROP COLUMN meta; diff --git a/core/store/migrate/migrations/0027_cascade_ocr_latest_round_request.sql b/core/store/migrate/migrations/0027_cascade_ocr_latest_round_request.sql new file mode 100644 index 00000000..aab0de3a --- /dev/null +++ b/core/store/migrate/migrations/0027_cascade_ocr_latest_round_request.sql @@ -0,0 +1,8 @@ +-- +goose Up +ALTER TABLE offchainreporting_latest_round_requested +DROP CONSTRAINT offchainreporting_latest_roun_offchainreporting_oracle_spe_fkey, +ADD CONSTRAINT offchainreporting_latest_roun_offchainreporting_oracle_spe_fkey + FOREIGN KEY (offchainreporting_oracle_spec_id) + REFERENCES offchainreporting_oracle_specs (id) + ON DELETE CASCADE + DEFERRABLE INITIALLY IMMEDIATE; diff --git a/core/store/migrate/migrations/0028_vrf_v2.sql b/core/store/migrate/migrations/0028_vrf_v2.sql new file mode 100644 index 00000000..282e79c5 --- /dev/null +++ b/core/store/migrate/migrations/0028_vrf_v2.sql @@ -0,0 +1,24 @@ +-- +goose Up +CREATE TABLE vrf_specs ( + id BIGSERIAL PRIMARY KEY, + public_key text NOT NULL, + coordinator_address bytea NOT NULL, + confirmations bigint NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL + CONSTRAINT coordinator_address_len_chk CHECK (octet_length(coordinator_address) = 20) +); +ALTER TABLE jobs ADD COLUMN vrf_spec_id INT REFERENCES vrf_specs(id), +DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id, cron_spec_id, vrf_spec_id) = 1 +); + +-- +goose Down +ALTER TABLE jobs DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id, cron_spec_id) = 1 +); + +ALTER TABLE jobs DROP COLUMN vrf_spec_id; +DROP TABLE IF EXISTS vrf_specs; \ No newline at end of file diff --git a/core/store/migrate/migrations/0029_add_webhook_spec_tables.sql b/core/store/migrate/migrations/0029_add_webhook_spec_tables.sql new file mode 100644 index 00000000..71b112f1 --- /dev/null +++ b/core/store/migrate/migrations/0029_add_webhook_spec_tables.sql @@ -0,0 +1,27 @@ +-- +goose Up +CREATE TABLE webhook_specs ( + id SERIAL PRIMARY KEY, + on_chain_job_spec_id uuid NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +ALTER TABLE jobs ADD COLUMN webhook_spec_id INT REFERENCES webhook_specs(id), +DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id, cron_spec_id, vrf_spec_id, webhook_spec_id) = 1 +); + +ALTER TABLE direct_request_specs DROP CONSTRAINT direct_request_specs_on_chain_job_spec_id_check; +ALTER TABLE direct_request_specs DROP COLUMN on_chain_job_spec_id; +ALTER TABLE direct_request_specs ADD COLUMN on_chain_job_spec_id uuid NOT NULL; + +-- +goose Down +ALTER TABLE jobs DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls(offchainreporting_oracle_spec_id, direct_request_spec_id, flux_monitor_spec_id, keeper_spec_id, cron_spec_id, vrf_spec_id) = 1 +); + +ALTER TABLE jobs DROP COLUMN webhook_spec_id; + +DROP TABLE IF EXISTS webhook_specs; diff --git a/core/store/migrate/migrations/0030_drop_keys_last_used.sql b/core/store/migrate/migrations/0030_drop_keys_last_used.sql new file mode 100644 index 00000000..071a5367 --- /dev/null +++ b/core/store/migrate/migrations/0030_drop_keys_last_used.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE keys DROP COLUMN last_used; +-- +goose Down +ALTER TABLE keys ADD COLUMN last_used timestamptz; diff --git a/core/store/migrate/migrations/0031_eth_tx_trigger_with_key_address.sql b/core/store/migrate/migrations/0031_eth_tx_trigger_with_key_address.sql new file mode 100644 index 00000000..9727446b --- /dev/null +++ b/core/store/migrate/migrations/0031_eth_tx_trigger_with_key_address.sql @@ -0,0 +1,14 @@ +-- +goose Up +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION public.notifyethtxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('insert_on_eth_txes'::text, encode(NEW.from_address, 'hex')); + RETURN NULL; + END + $$; + +DROP TRIGGER IF EXISTS notify_eth_tx_insertion on public.eth_txes; +CREATE TRIGGER notify_eth_tx_insertion AFTER INSERT ON public.eth_txes FOR EACH ROW EXECUTE PROCEDURE public.notifyethtxinsertion(); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0032_rename_direct_request_specs_num_confirmations.sql b/core/store/migrate/migrations/0032_rename_direct_request_specs_num_confirmations.sql new file mode 100644 index 00000000..9f75356f --- /dev/null +++ b/core/store/migrate/migrations/0032_rename_direct_request_specs_num_confirmations.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE direct_request_specs RENAME COLUMN num_confirmations TO min_incoming_confirmations; +-- +goose Down +ALTER TABLE direct_request_specs RENAME COLUMN min_incoming_confirmations TO num_confirmations; diff --git a/core/store/migrate/migrations/0033_flux_monitor_round_stats_fk_index.sql b/core/store/migrate/migrations/0033_flux_monitor_round_stats_fk_index.sql new file mode 100644 index 00000000..7428c42e --- /dev/null +++ b/core/store/migrate/migrations/0033_flux_monitor_round_stats_fk_index.sql @@ -0,0 +1,7 @@ +-- +goose Up +CREATE INDEX flux_monitor_round_stats_job_run_id_idx ON flux_monitor_round_stats (job_run_id); +CREATE INDEX flux_monitor_round_stats_v2_pipeline_run_id_idx ON flux_monitor_round_stats_v2 (pipeline_run_id); + +-- + goose Down +DROP INDEX flux_monitor_round_stats_job_run_id_idx; +DROP INDEX flux_monitor_round_stats_v2_pipeline_run_id_idx; diff --git a/core/store/migrate/migrations/0034_webhook_external_initiator.sql b/core/store/migrate/migrations/0034_webhook_external_initiator.sql new file mode 100644 index 00000000..5bf74e43 --- /dev/null +++ b/core/store/migrate/migrations/0034_webhook_external_initiator.sql @@ -0,0 +1,15 @@ +-- +goose Up +ALTER TABLE external_initiators ADD CONSTRAINT external_initiators_name_unique UNIQUE(name); +ALTER TABLE webhook_specs ADD COLUMN external_initiator_name TEXT REFERENCES external_initiators (name); +ALTER TABLE webhook_specs ADD COLUMN external_initiator_spec JSONB; +ALTER TABLE webhook_specs ADD CONSTRAINT external_initiator_null_not_null CHECK ( + external_initiator_name IS NULL AND external_initiator_spec IS NULL + OR + external_initiator_name IS NOT NULL AND external_initiator_spec IS NOT NULL +); + +-- +goose Down +ALTER TABLE external_initiators DROP CONSTRAINT external_initiators_name_unique; +ALTER TABLE webhook_specs DROP COLUMN external_initiator_name; +ALTER TABLE webhook_specs DROP COLUMN external_initiator_spec; +ALTER TABLE webhook_specs DROP CONSTRAINT external_initiator_null_not_null; diff --git a/core/store/migrate/migrations/0035_create_feeds_managers.sql b/core/store/migrate/migrations/0035_create_feeds_managers.sql new file mode 100644 index 00000000..d82ccdfe --- /dev/null +++ b/core/store/migrate/migrations/0035_create_feeds_managers.sql @@ -0,0 +1,13 @@ +-- +goose Up +CREATE TABLE feeds_managers ( + id BIGSERIAL PRIMARY KEY, + name VARCHAR (255) NOT NULL, + uri VARCHAR (255) NOT NULL, + public_key bytea CHECK (octet_length(public_key) = 32) NOT NULL UNIQUE, + job_types TEXT [] NOT NULL, + network VARCHAR (100) NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); +-- +goose Down + DROP TABLE feeds_managers diff --git a/core/store/migrate/migrations/0036_external_job_id.go b/core/store/migrate/migrations/0036_external_job_id.go new file mode 100644 index 00000000..fc9ec08e --- /dev/null +++ b/core/store/migrate/migrations/0036_external_job_id.go @@ -0,0 +1,81 @@ +package migrations + +import ( + "context" + "database/sql" + "fmt" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/pressly/goose/v3" +) + +func init() { + goose.AddMigrationContext(Up36, Down36) +} + +const ( + up36_1 = ` + ALTER TABLE direct_request_specs DROP COLUMN on_chain_job_spec_id; + ALTER TABLE webhook_specs DROP COLUMN on_chain_job_spec_id; + ALTER TABLE vrf_specs ADD CONSTRAINT vrf_specs_public_key_fkey FOREIGN KEY (public_key) REFERENCES encrypted_vrf_keys(public_key) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + ALTER TABLE jobs ADD COLUMN external_job_id uuid; + ` + up36_2 = ` + ALTER TABLE jobs + ALTER COLUMN external_job_id SET NOT NULL, + ADD CONSTRAINT external_job_id_uniq UNIQUE(external_job_id), + ADD CONSTRAINT non_zero_uuid_check CHECK (external_job_id <> '00000000-0000-0000-0000-000000000000'); + ` + down36 = ` + ALTER TABLE direct_request_specs ADD COLUMN on_chain_job_spec_id bytea; + ALTER TABLE webhook_specs ADD COLUMN on_chain_job_spec_id bytea; + ALTER TABLE jobs DROP CONSTRAINT external_job_id_uniq; + ALTER TABLE vrf_specs DROP CONSTRAINT vrf_specs_public_key_fkey; + ` +) + +// nolint +func Up36(ctx context.Context, tx *sql.Tx) error { + // Add the external ID column and remove type specific ones. + if _, err := tx.ExecContext(ctx, up36_1); err != nil { + return err + } + + // Update all jobs to have an external_job_id. + // We do this to avoid using the uuid postgres extension. + var jobIDs []int32 + txx := sqlx.Tx{Tx: tx} + if err := txx.SelectContext(ctx, &jobIDs, "SELECT id FROM jobs"); err != nil { + return err + } + if len(jobIDs) != 0 { + stmt := `UPDATE jobs AS j SET external_job_id = vals.external_job_id FROM (values ` + for i := range jobIDs { + if i == len(jobIDs)-1 { + stmt += fmt.Sprintf("(uuid('%s'), %d))", uuid.New(), jobIDs[i]) + } else { + stmt += fmt.Sprintf("(uuid('%s'), %d),", uuid.New(), jobIDs[i]) + } + } + stmt += ` AS vals(external_job_id, id) WHERE vals.id = j.id` + if _, err := tx.ExecContext(ctx, stmt); err != nil { + return err + + } + } + + // Add constraints on the external_job_id. + if _, err := tx.ExecContext(ctx, up36_2); err != nil { + return err + } + return nil +} + +// nolint +func Down36(ctx context.Context, tx *sql.Tx) error { + if _, err := tx.ExecContext(ctx, down36); err != nil { + return err + } + return nil +} diff --git a/core/store/migrate/migrations/0037_cascade_deletes.sql b/core/store/migrate/migrations/0037_cascade_deletes.sql new file mode 100644 index 00000000..add270a0 --- /dev/null +++ b/core/store/migrate/migrations/0037_cascade_deletes.sql @@ -0,0 +1,28 @@ +-- +goose Up +ALTER TABLE jobs DROP CONSTRAINT jobs_cron_spec_id_fkey, + DROP CONSTRAINT jobs_direct_request_spec_id_fkey, + DROP CONSTRAINT jobs_vrf_spec_id_fkey, + DROP CONSTRAINT jobs_keeper_spec_id_fkey, + DROP CONSTRAINT jobs_webhook_spec_id_fkey, + DROP CONSTRAINT jobs_flux_monitor_spec_id_fkey; +ALTER TABLE jobs ADD CONSTRAINT jobs_cron_spec_id_fkey FOREIGN KEY (cron_spec_id) REFERENCES cron_specs(id) ON DELETE CASCADE, + ADD CONSTRAINT jobs_direct_request_spec_id_fkey FOREIGN KEY (direct_request_spec_id) REFERENCES direct_request_specs(id) ON DELETE CASCADE, + ADD CONSTRAINT jobs_vrf_spec_id_fkey FOREIGN KEY (vrf_spec_id) REFERENCES vrf_specs(id) ON DELETE CASCADE, + ADD CONSTRAINT jobs_keeper_spec_id_fkey FOREIGN KEY (keeper_spec_id) REFERENCES keeper_specs(id) ON DELETE CASCADE, + ADD CONSTRAINT jobs_webhook_spec_id_fkey FOREIGN KEY (webhook_spec_id) REFERENCES webhook_specs(id) ON DELETE CASCADE, + ADD CONSTRAINT jobs_flux_monitor_spec_id_fkey FOREIGN KEY (flux_monitor_spec_id) REFERENCES flux_monitor_specs(id) ON DELETE CASCADE; +-- +goose Down +ALTER TABLE jobs + DROP CONSTRAINT jobs_cron_spec_id_fkey, + DROP CONSTRAINT jobs_direct_request_spec_id_fkey, + DROP CONSTRAINT jobs_vrf_spec_id_fkey, + DROP CONSTRAINT jobs_keeper_spec_id_fkey, + DROP CONSTRAINT jobs_webhook_spec_id_fkey, + DROP CONSTRAINT jobs_flux_monitor_spec_id_fkey; +ALTER TABLE jobs + ADD CONSTRAINT jobs_cron_spec_id_fkey FOREIGN KEY (cron_spec_id) REFERENCES cron_specs(id), + ADD CONSTRAINT jobs_direct_request_spec_id_fkey FOREIGN KEY (direct_request_spec_id) REFERENCES direct_request_specs(id), + ADD CONSTRAINT jobs_vrf_spec_id_fkey FOREIGN KEY (vrf_spec_id) REFERENCES vrf_specs(id), + ADD CONSTRAINT jobs_keeper_spec_id_fkey FOREIGN KEY (keeper_spec_id) REFERENCES keeper_specs(id), + ADD CONSTRAINT jobs_webhook_spec_id_fkey FOREIGN KEY (webhook_spec_id) REFERENCES webhook_specs(id), + ADD CONSTRAINT jobs_flux_monitor_spec_id_fkey FOREIGN KEY (flux_monitor_spec_id) REFERENCES flux_monitor_specs(id); diff --git a/core/store/migrate/migrations/0038_create_csa_keys.sql b/core/store/migrate/migrations/0038_create_csa_keys.sql new file mode 100644 index 00000000..2e3cb225 --- /dev/null +++ b/core/store/migrate/migrations/0038_create_csa_keys.sql @@ -0,0 +1,10 @@ +-- +goose Up +CREATE TABLE csa_keys( + id BIGSERIAL PRIMARY KEY, + public_key bytea NOT NULL CHECK (octet_length(public_key) = 32) UNIQUE, + encrypted_private_key jsonb NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); +-- +goose Down +DROP TABLE csa_keys; diff --git a/core/store/migrate/migrations/0039_remove_fmv2_precision.sql b/core/store/migrate/migrations/0039_remove_fmv2_precision.sql new file mode 100644 index 00000000..a980c3d8 --- /dev/null +++ b/core/store/migrate/migrations/0039_remove_fmv2_precision.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE flux_monitor_specs DROP COLUMN precision; +-- +goose Down +ALTER TABLE flux_monitor_specs ADD COLUMN precision integer; diff --git a/core/store/migrate/migrations/0040_heads_l1_block_number.sql b/core/store/migrate/migrations/0040_heads_l1_block_number.sql new file mode 100644 index 00000000..fa03793b --- /dev/null +++ b/core/store/migrate/migrations/0040_heads_l1_block_number.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE heads ADD COLUMN l1_block_number bigint; +-- +goose Down +ALTER TABLE heads DROP COLUMN l1_block_number; diff --git a/core/store/migrate/migrations/0041_eth_tx_strategies.sql b/core/store/migrate/migrations/0041_eth_tx_strategies.sql new file mode 100644 index 00000000..ceae01fe --- /dev/null +++ b/core/store/migrate/migrations/0041_eth_tx_strategies.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE eth_txes ADD COLUMN subject uuid; +CREATE INDEX idx_eth_txes_unstarted_subject_id ON eth_txes (subject, id) WHERE subject IS NOT NULL AND state = 'unstarted'; +-- +goose Down +ALTER TABLE eth_txes DROP COLUMN subject; diff --git a/core/store/migrate/migrations/0042_create_job_proposals.sql b/core/store/migrate/migrations/0042_create_job_proposals.sql new file mode 100644 index 00000000..f46d720b --- /dev/null +++ b/core/store/migrate/migrations/0042_create_job_proposals.sql @@ -0,0 +1,23 @@ +-- +goose Up +CREATE TYPE job_proposal_status AS ENUM ('pending', 'approved', 'rejected'); +CREATE TABLE job_proposals ( + id BIGSERIAL PRIMARY KEY, + spec TEXT NOT NULL, + status job_proposal_status NOT NULL, + job_id uuid REFERENCES jobs (external_job_id) DEFERRABLE INITIALLY IMMEDIATE, + feeds_manager_id int NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT fk_feeds_manager FOREIGN KEY(feeds_manager_id) REFERENCES feeds_managers(id) DEFERRABLE INITIALLY IMMEDIATE, + CONSTRAINT chk_job_proposals_status_fsm CHECK ( + (status = 'pending' AND job_id IS NULL) OR + (status = 'approved' AND job_id IS NOT NULL) OR + (status = 'rejected' AND job_id IS NULL) + ) +); +CREATE UNIQUE INDEX idx_job_proposals_job_id on job_proposals (job_id); +CREATE INDEX idx_job_proposals_feeds_manager_id on job_proposals (feeds_manager_id); + +-- +goose Down +DROP TABLE job_proposals; +DROP TYPE job_proposal_status; diff --git a/core/store/migrate/migrations/0043_gas_limit_on_eth_tx_attempts.sql b/core/store/migrate/migrations/0043_gas_limit_on_eth_tx_attempts.sql new file mode 100644 index 00000000..88e176c1 --- /dev/null +++ b/core/store/migrate/migrations/0043_gas_limit_on_eth_tx_attempts.sql @@ -0,0 +1,10 @@ +-- +goose Up +ALTER TABLE eth_tx_attempts ADD COLUMN chain_specific_gas_limit bigint; +UPDATE eth_tx_attempts +SET chain_specific_gas_limit = eth_txes.gas_limit +FROM eth_txes +WHERE eth_txes.id = eth_tx_attempts.eth_tx_id; +ALTER TABLE eth_tx_attempts ALTER COLUMN chain_specific_gas_limit SET NOT NULL; + +-- +goose Down +ALTER TABLE eth_tx_attempts DROP COLUMN chain_specific_gas_limit; diff --git a/core/store/migrate/migrations/0044_create_table_ocr_discoverer_database.sql b/core/store/migrate/migrations/0044_create_table_ocr_discoverer_database.sql new file mode 100644 index 00000000..f565399b --- /dev/null +++ b/core/store/migrate/migrations/0044_create_table_ocr_discoverer_database.sql @@ -0,0 +1,11 @@ +-- +goose Up +CREATE TABLE offchainreporting_discoverer_announcements ( + local_peer_id text NOT NULL REFERENCES encrypted_p2p_keys (peer_id) DEFERRABLE INITIALLY IMMEDIATE, + remote_peer_id text NOT NULL, + ann bytea NOT NULL, + created_at timestamptz not null, + updated_at timestamptz not null, + PRIMARY KEY(local_peer_id, remote_peer_id) +); +-- +goose Down +DROP TABLE offchainreporting_discoverer_announcements; diff --git a/core/store/migrate/migrations/0045_add_uuid_to_pipeline_task_runs.sql b/core/store/migrate/migrations/0045_add_uuid_to_pipeline_task_runs.sql new file mode 100644 index 00000000..b87fa9e1 --- /dev/null +++ b/core/store/migrate/migrations/0045_add_uuid_to_pipeline_task_runs.sql @@ -0,0 +1,36 @@ +-- +goose Up +-- Truncate tables to ease bigint -> UUID migration + TRUNCATE TABLE pipeline_runs, pipeline_task_runs, flux_monitor_round_stats_v2; + + -- Migrate pipeline_task_runs to UUID + ALTER TABLE pipeline_task_runs DROP CONSTRAINT pipeline_task_runs_pkey; + ALTER TABLE pipeline_task_runs DROP COLUMN id; + ALTER TABLE pipeline_task_runs ADD COLUMN id uuid PRIMARY KEY; + + -- Add state & inputs to pipeline_runs + ALTER TABLE pipeline_runs ADD COLUMN inputs jsonb; + CREATE TYPE pipeline_runs_state AS ENUM ( + 'running', + 'suspended', + 'errored', + 'completed' + ); + ALTER TABLE pipeline_runs ADD COLUMN state pipeline_runs_state NOT NULL DEFAULT 'completed'; + + ALTER TABLE pipeline_runs DROP CONSTRAINT pipeline_runs_check; + ALTER TABLE pipeline_runs ADD CONSTRAINT pipeline_runs_check CHECK ( + ((state IN ('completed', 'errored')) AND (finished_at IS NOT NULL) AND (num_nulls(outputs, errors) = 0)) + OR + ((state IN ('running', 'suspended')) AND num_nulls(finished_at, outputs, errors) = 3) + ); + +-- +goose Down + ALTER TABLE pipeline_runs DROP CONSTRAINT pipeline_runs_check; + ALTER TABLE pipeline_runs ADD CONSTRAINT pipeline_runs_check CHECK ( + (((outputs IS NULL) AND (errors IS NULL) AND (finished_at IS NULL)) + OR ((outputs IS NOT NULL) AND (errors IS NOT NULL) AND (finished_at IS NOT NULL))) + ) + DROP CONSTRAINT IF EXISTS pipeline_task_runs_run_id_key; + ALTER TABLE pipeline_task_runs DROP COLUMN run_id; + ALTER TABLE pipeline_runs DROP COLUMN inputs; + DROP TYPE pipeline_runs_state; diff --git a/core/store/migrate/migrations/0046_add_fmv2_drumbeat_ticker.sql b/core/store/migrate/migrations/0046_add_fmv2_drumbeat_ticker.sql new file mode 100644 index 00000000..080d773d --- /dev/null +++ b/core/store/migrate/migrations/0046_add_fmv2_drumbeat_ticker.sql @@ -0,0 +1,6 @@ +-- +goose Up +ALTER TABLE flux_monitor_specs ADD COLUMN drumbeat_enabled boolean NOT NULL DEFAULT false; +ALTER TABLE flux_monitor_specs ADD COLUMN drumbeat_schedule text; +-- +goose Down +ALTER TABLE flux_monitor_specs DROP COLUMN drumbeat_enabled; +ALTER TABLE flux_monitor_specs DROP COLUMN drumbeat_schedule; diff --git a/core/store/migrate/migrations/0047_add_uuid_to_job_proposals.sql b/core/store/migrate/migrations/0047_add_uuid_to_job_proposals.sql new file mode 100644 index 00000000..8c165b1e --- /dev/null +++ b/core/store/migrate/migrations/0047_add_uuid_to_job_proposals.sql @@ -0,0 +1,8 @@ +-- +goose Up +ALTER TABLE job_proposals +ADD COLUMN remote_uuid UUID NOT NULL; + +CREATE UNIQUE INDEX idx_job_proposals_remote_uuid ON job_proposals(remote_uuid); +-- +goose Down +ALTER TABLE job_proposals +DROP COLUMN remote_uuid; diff --git a/core/store/migrate/migrations/0048_add_ocr_bootstrap_node_to_feeds_manager.sql b/core/store/migrate/migrations/0048_add_ocr_bootstrap_node_to_feeds_manager.sql new file mode 100644 index 00000000..7859cbed --- /dev/null +++ b/core/store/migrate/migrations/0048_add_ocr_bootstrap_node_to_feeds_manager.sql @@ -0,0 +1,6 @@ +-- +goose Up +ALTER TABLE feeds_managers +ADD COLUMN is_ocr_bootstrap_peer boolean NOT NULL DEFAULT false; +-- +goose Down +ALTER TABLE feeds_managers +DROP COLUMN is_ocr_bootstrap_peer; diff --git a/core/store/migrate/migrations/0049_rename_job_id_to_external_job_id_in_job_proposals.sql b/core/store/migrate/migrations/0049_rename_job_id_to_external_job_id_in_job_proposals.sql new file mode 100644 index 00000000..0b717033 --- /dev/null +++ b/core/store/migrate/migrations/0049_rename_job_id_to_external_job_id_in_job_proposals.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE job_proposals +RENAME COLUMN job_id TO external_job_id; + +ALTER INDEX idx_job_proposals_job_id RENAME TO idx_job_proposals_external_job_id; + +-- +goose Down +ALTER TABLE job_proposals +RENAME COLUMN external_job_id TO job_id; + +ALTER INDEX idx_job_proposals_external_job_id RENAME TO idx_job_proposals_job_id; diff --git a/core/store/migrate/migrations/0050_add_ocr_bootstrap_fields_to_feeds_managers.sql b/core/store/migrate/migrations/0050_add_ocr_bootstrap_fields_to_feeds_managers.sql new file mode 100644 index 00000000..eb647c03 --- /dev/null +++ b/core/store/migrate/migrations/0050_add_ocr_bootstrap_fields_to_feeds_managers.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE feeds_managers +DROP COLUMN network, +ADD COLUMN ocr_bootstrap_peer_multiaddr VARCHAR, +ADD CONSTRAINT chk_ocr_bootstrap_peer_multiaddr CHECK ( NOT ( + is_ocr_bootstrap_peer AND + ( + ocr_bootstrap_peer_multiaddr IS NULL OR + ocr_bootstrap_peer_multiaddr = '' + ) +)); + +-- +goose Down +ALTER TABLE feeds_managers +ADD COLUMN network VARCHAR (100), +DROP CONSTRAINT chk_ocr_bootstrap_peer_multiaddr, +DROP COLUMN ocr_bootstrap_peer_multiaddr; diff --git a/core/store/migrate/migrations/0051_webhook_specs_external_initiators_join.sql b/core/store/migrate/migrations/0051_webhook_specs_external_initiators_join.sql new file mode 100644 index 00000000..aa460b2b --- /dev/null +++ b/core/store/migrate/migrations/0051_webhook_specs_external_initiators_join.sql @@ -0,0 +1,20 @@ +-- +goose Up +ALTER TABLE webhook_specs DROP COLUMN external_initiator_name, DROP COLUMN external_initiator_spec; + +CREATE TABLE external_initiator_webhook_specs ( + external_initiator_id bigint NOT NULL references external_initiators (id) ON DELETE RESTRICT DEFERRABLE INITIALLY IMMEDIATE, + webhook_spec_id int NOT NULL references webhook_specs (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE, + spec jsonb NOT NULL, + PRIMARY KEY (external_initiator_id, webhook_spec_id) +); + +CREATE INDEX idx_external_initiator_webhook_specs_webhook_spec_id ON external_initiator_webhook_specs (webhook_spec_id); +CREATE UNIQUE INDEX idx_jobs_unique_flux_monitor_spec_id ON jobs (flux_monitor_spec_id); +CREATE UNIQUE INDEX idx_jobs_unique_keeper_spec_id ON jobs (keeper_spec_id); +CREATE UNIQUE INDEX idx_jobs_unique_cron_spec_id ON jobs (cron_spec_id); +CREATE UNIQUE INDEX idx_jobs_unique_vrf_spec_id ON jobs (vrf_spec_id); +CREATE UNIQUE INDEX idx_jobs_unique_webhook_spec_id ON jobs (webhook_spec_id); + +-- +goose Down +DROP TABLE external_initiator_webhook_specs; +ALTER TABLE webhook_specs ADD COLUMN external_initiator_name text, ADD COLUMN external_initiator_spec text; diff --git a/core/store/migrate/migrations/0052_not_null_job_pipeline_spec_id.sql b/core/store/migrate/migrations/0052_not_null_job_pipeline_spec_id.sql new file mode 100644 index 00000000..0f70bf65 --- /dev/null +++ b/core/store/migrate/migrations/0052_not_null_job_pipeline_spec_id.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE jobs ALTER COLUMN pipeline_spec_id SET NOT NULL; +-- +goose Down +ALTER TABLE jobs ALTER COLUMN pipeline_spec_id DEFAULT NULL; diff --git a/core/store/migrate/migrations/0053_add_fmv2_drumbeat_random_delay.sql b/core/store/migrate/migrations/0053_add_fmv2_drumbeat_random_delay.sql new file mode 100644 index 00000000..4b069187 --- /dev/null +++ b/core/store/migrate/migrations/0053_add_fmv2_drumbeat_random_delay.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE flux_monitor_specs ADD COLUMN drumbeat_random_delay bigint NOT NULL DEFAULT 0; + + UPDATE flux_monitor_specs SET drumbeat_schedule = '' where drumbeat_schedule IS NULL; + ALTER TABLE flux_monitor_specs ALTER COLUMN drumbeat_schedule SET DEFAULT ''; + ALTER TABLE flux_monitor_specs ALTER COLUMN drumbeat_schedule SET NOT NULL; + +-- +goose Down +ALTER TABLE flux_monitor_specs ALTER COLUMN drumbeat_schedule SET NULL; +ALTER TABLE flux_monitor_specs ALTER COLUMN drumbeat_schedule DROP DEFAULT; +ALTER TABLE flux_monitor_specs DROP COLUMN drumbeat_random_delay; diff --git a/core/store/migrate/migrations/0054_remove_legacy_pipeline.go b/core/store/migrate/migrations/0054_remove_legacy_pipeline.go new file mode 100644 index 00000000..924d3230 --- /dev/null +++ b/core/store/migrate/migrations/0054_remove_legacy_pipeline.go @@ -0,0 +1,61 @@ +package migrations + +import ( + "context" + "database/sql" + + "github.com/pkg/errors" + "github.com/pressly/goose/v3" +) + +const up54 = ` +ALTER TABLE log_broadcasts DROP COLUMN job_id; +DROP TABLE service_agreements; +DROP TABLE eth_task_run_txes; +DROP TABLE task_runs; +DROP TABLE task_specs; +DROP TABLE flux_monitor_round_stats; +DROP TABLE job_runs; +DROP TABLE job_spec_errors; +DROP TABLE initiators; +DROP TABLE job_specs; + +DROP TABLE run_results; +DROP TABLE run_requests; +DROP TABLE sync_events; + +ALTER TABLE log_broadcasts RENAME COLUMN job_id_v2 TO job_id; +ALTER TABLE job_spec_errors_v2 RENAME TO job_spec_errors; +` + +func init() { + goose.AddMigrationContext(Up54, Down54) +} + +// nolint +func Up54(ctx context.Context, tx *sql.Tx) error { + if err := CheckNoLegacyJobs(tx); err != nil { + return err + } + if _, err := tx.ExecContext(ctx, up54); err != nil { + return err + } + return nil +} + +// nolint +func Down54(ctx context.Context, tx *sql.Tx) error { + return errors.New("irreversible migration") +} + +// CheckNoLegacyJobs ensures that there are no legacy job specs +func CheckNoLegacyJobs(tx *sql.Tx) error { + var count int + if err := tx.QueryRow(`SELECT COUNT(*) FROM job_specs WHERE deleted_at IS NULL`).Scan(&count); err != nil { + return err + } + if count > 0 { + return errors.Errorf("cannot migrate; this release removes support for legacy job specs but there are still %d in the database. Please migrate these jobs specs to the V2 pipeline (further details found here: https://docs.chain.link/docs/jobs/migration-v1-v2/) and make sure job_specs table is empty (run sql command: `TRUNCATE job_specs CASCADE;`), then run the migration again. These operations are NOT REVERSIBLE, so it is STRONGLY RECOMMENDED that you take a database backup before continuing", count) + } + return nil +} diff --git a/core/store/migrate/migrations/0055_add_multiaddrs_to_job_proposal.sql b/core/store/migrate/migrations/0055_add_multiaddrs_to_job_proposal.sql new file mode 100644 index 00000000..b339324e --- /dev/null +++ b/core/store/migrate/migrations/0055_add_multiaddrs_to_job_proposal.sql @@ -0,0 +1,6 @@ +-- +goose Up +ALTER TABLE job_proposals +ADD COLUMN multiaddrs TEXT[] DEFAULT NULL; +-- +goose Down +ALTER TABLE job_proposals +DROP COLUMN multiaddrs; diff --git a/core/store/migrate/migrations/0056_multichain.go b/core/store/migrate/migrations/0056_multichain.go new file mode 100644 index 00000000..128e7be5 --- /dev/null +++ b/core/store/migrate/migrations/0056_multichain.go @@ -0,0 +1,85 @@ +package migrations + +import ( + "context" + "database/sql" + "fmt" + "log" + "math/big" + "os" + "strings" + + "github.com/pressly/goose/v3" +) + +func init() { + goose.AddMigrationContext(Up56, Down56) +} + +const up56 = ` +CREATE TABLE evm_chains ( + id numeric(78,0) PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}', + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); + +CREATE TABLE nodes ( + id serial PRIMARY KEY, + name varchar(255) NOT NULL CHECK (name != ''), + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id), + ws_url text CHECK (ws_url != ''), + http_url text CHECK (http_url != ''), + send_only bool NOT NULL CONSTRAINT primary_or_sendonly CHECK ( + (send_only AND ws_url IS NULL AND http_url IS NOT NULL) + OR + (NOT send_only AND ws_url IS NOT NULL) + ), + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); + +CREATE INDEX idx_nodes_evm_chain_id ON nodes (evm_chain_id); +CREATE UNIQUE INDEX idx_nodes_unique_name ON nodes (lower(name)); +` + +const down56 = ` +DROP TABLE nodes; +DROP TABLE evm_chains; +` + +// nolint +func Up56(ctx context.Context, tx *sql.Tx) error { + if _, err := tx.ExecContext(ctx, up56); err != nil { + return err + } + evmDisabled := os.Getenv("EVM_ENABLED") == "false" + if evmDisabled { + dbURL := os.Getenv("DATABASE_URL") + if strings.Contains(dbURL, "_test") { + log.Println("Running on a database ending in _test; assume we are running in a test suite and skip creation of the default chain") + } else { + chainIDStr := os.Getenv("ETH_CHAIN_ID") + if chainIDStr == "" { + log.Println("ETH_CHAIN_ID was not specified, auto-creating chain with id 1") + chainIDStr = "1" + } + chainID, ok := new(big.Int).SetString(chainIDStr, 10) + if !ok { + panic(fmt.Sprintf("ETH_CHAIN_ID was invalid, expected a number, got: %s", chainIDStr)) + } + _, err := tx.ExecContext(ctx, "INSERT INTO evm_chains (id, created_at, updated_at) VALUES ($1, NOW(), NOW());", chainID.String()) + return err + } + } + return nil +} + +// nolint +func Down56(ctx context.Context, tx *sql.Tx) error { + _, err := tx.ExecContext(ctx, down56) + if err != nil { + return err + } + return nil +} diff --git a/core/store/migrate/migrations/0057_add_pipeline_task_runs_id_to_eth_txs.sql b/core/store/migrate/migrations/0057_add_pipeline_task_runs_id_to_eth_txs.sql new file mode 100644 index 00000000..9ab507d8 --- /dev/null +++ b/core/store/migrate/migrations/0057_add_pipeline_task_runs_id_to_eth_txs.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE eth_txes ADD COLUMN pipeline_task_run_id uuid UNIQUE; +ALTER TABLE eth_txes ADD COLUMN min_confirmations integer; +CREATE INDEX pipeline_runs_suspended ON pipeline_runs (id) WHERE state = 'suspended' ; + +-- +goose Down +ALTER TABLE eth_txes DROP COLUMN pipeline_task_run_id; +ALTER TABLE eth_txes DROP COLUMN min_confirmations; +DROP INDEX pipeline_runs_suspended; diff --git a/core/store/migrate/migrations/0058_direct_request_whitelist.sql b/core/store/migrate/migrations/0058_direct_request_whitelist.sql new file mode 100644 index 00000000..7483b3b5 --- /dev/null +++ b/core/store/migrate/migrations/0058_direct_request_whitelist.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE direct_request_specs ADD COLUMN requesters TEXT; +-- +goose Down +ALTER TABLE direct_request_specs DROP COLUMN requesters; diff --git a/core/store/migrate/migrations/0059_direct_request_whitelist_min_contract_payment.sql b/core/store/migrate/migrations/0059_direct_request_whitelist_min_contract_payment.sql new file mode 100644 index 00000000..fae2df67 --- /dev/null +++ b/core/store/migrate/migrations/0059_direct_request_whitelist_min_contract_payment.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE direct_request_specs ADD COLUMN min_contract_payment numeric(78,0); +-- +goose Down +ALTER TABLE direct_request_specs DROP COLUMN min_contract_payment; diff --git a/core/store/migrate/migrations/0060_combine_keys_tables.sql b/core/store/migrate/migrations/0060_combine_keys_tables.sql new file mode 100644 index 00000000..4209c66b --- /dev/null +++ b/core/store/migrate/migrations/0060_combine_keys_tables.sql @@ -0,0 +1,35 @@ +-- +goose Up +CREATE TABLE encrypted_key_rings( + encrypted_keys jsonb, + updated_at timestamptz NOT NULL +); + +CREATE TABLE eth_key_states( + id SERIAL PRIMARY KEY, + address bytea UNIQUE NOT NULL, + next_nonce bigint NOT NULL DEFAULT 0, + is_funding boolean DEFAULT false NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)) +); + +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_from_address_fkey; +-- Need the NOT VALID constraint here because the eth_key_states are not created yet; they will be created on application boot +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_from_address_fkey FOREIGN KEY (from_address) REFERENCES eth_key_states(address) NOT VALID; +ALTER TABLE vrf_specs DROP CONSTRAINT vrf_specs_public_key_fkey; +ALTER TABLE offchainreporting_oracle_specs DROP CONSTRAINT offchainreporting_oracle_specs_transmitter_address_fkey; +ALTER TABLE offchainreporting_oracle_specs DROP CONSTRAINT offchainreporting_oracle_specs_encrypted_ocr_key_bundle_id_fkey; +ALTER TABLE offchainreporting_oracle_specs DROP CONSTRAINT offchainreporting_oracle_specs_p2p_peer_id_fkey; +ALTER TABLE p2p_peers DROP CONSTRAINT p2p_peers_peer_id_fkey; + +-- +goose Down +DROP TABLE encrypted_key_rings; +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_from_address_fkey; +DROP TABLE eth_key_states; +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_from_address_fkey FOREIGN KEY (from_address) REFERENCES keys(address); +ALTER TABLE vrf_specs ADD CONSTRAINT vrf_specs_public_key_fkey FOREIGN KEY (public_key) REFERENCES encrypted_vrf_keys(public_key) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE offchainreporting_oracle_specs ADD CONSTRAINT offchainreporting_oracle_specs_transmitter_address_fkey FOREIGN KEY (transmitter_address) REFERENCES keys(address); +ALTER TABLE offchainreporting_oracle_specs ADD CONSTRAINT offchainreporting_oracle_specs_encrypted_ocr_key_bundle_id_fkey FOREIGN KEY (encrypted_ocr_key_bundle_id) REFERENCES encrypted_ocr_key_bundles(id); +ALTER TABLE offchainreporting_oracle_specs ADD CONSTRAINT offchainreporting_oracle_specs_p2p_peer_id_fkey FOREIGN KEY (p2p_peer_id) REFERENCES encrypted_p2p_keys(peer_id); +ALTER TABLE p2p_peers ADD CONSTRAINT p2p_peers_peer_id_fkey FOREIGN KEY (peer_id) REFERENCES encrypted_p2p_keys (peer_id); diff --git a/core/store/migrate/migrations/0061_multichain_relations.sql b/core/store/migrate/migrations/0061_multichain_relations.sql new file mode 100644 index 00000000..669184b6 --- /dev/null +++ b/core/store/migrate/migrations/0061_multichain_relations.sql @@ -0,0 +1,70 @@ +-- +goose Up +ALTER TABLE evm_chains ADD COLUMN enabled BOOL DEFAULT TRUE NOT NULL; + +ALTER TABLE eth_txes ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE log_broadcasts ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE heads ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE eth_key_states ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; + +UPDATE eth_txes SET evm_chain_id = (SELECT id FROM evm_chains ORDER BY created_at, id ASC LIMIT 1); +UPDATE log_broadcasts SET evm_chain_id = (SELECT id FROM evm_chains ORDER BY created_at, id ASC LIMIT 1); +UPDATE heads SET evm_chain_id = (SELECT id FROM evm_chains ORDER BY created_at, id ASC LIMIT 1); +UPDATE eth_key_states SET evm_chain_id = (SELECT id FROM evm_chains ORDER BY created_at, id ASC LIMIT 1); + +DROP INDEX IF EXISTS idx_eth_txes_min_unconfirmed_nonce_for_key; +DROP INDEX IF EXISTS idx_eth_txes_nonce_from_address; +DROP INDEX IF EXISTS idx_only_one_in_progress_tx_per_account; +DROP INDEX IF EXISTS idx_eth_txes_state_from_address; +DROP INDEX IF EXISTS idx_eth_txes_unstarted_subject_id; +CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id ON eth_txes(evm_chain_id, from_address, nonce) WHERE state = 'unconfirmed'::eth_txes_state; +CREATE UNIQUE INDEX idx_eth_txes_nonce_from_address_per_evm_chain_id ON eth_txes(evm_chain_id, from_address, nonce); +CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id ON eth_txes(evm_chain_id, from_address) WHERE state = 'in_progress'::eth_txes_state; +CREATE INDEX idx_eth_txes_state_from_address_evm_chain_id ON eth_txes(evm_chain_id, from_address, state) WHERE state <> 'confirmed'::eth_txes_state; +CREATE INDEX idx_eth_txes_unstarted_subject_id_evm_chain_id ON eth_txes(evm_chain_id, subject, id) WHERE subject IS NOT NULL AND state = 'unstarted'::eth_txes_state; + +DROP INDEX IF EXISTS idx_heads_hash; +DROP INDEX IF EXISTS idx_heads_number; +CREATE UNIQUE INDEX idx_heads_evm_chain_id_hash ON heads(evm_chain_id, hash); +CREATE INDEX idx_heads_evm_chain_id_number ON heads(evm_chain_id, number); + +DROP INDEX IF EXISTS idx_log_broadcasts_unconsumed_job_id_v2; +DROP INDEX IF EXISTS log_consumptions_unique_v2_idx; +CREATE INDEX idx_log_broadcasts_unconsumed_job_id_v2 ON log_broadcasts(job_id, evm_chain_id) WHERE consumed = false AND job_id IS NOT NULL; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id, block_hash, log_index, consumed, evm_chain_id) WHERE job_id IS NOT NULL; + +ALTER TABLE eth_txes ALTER COLUMN evm_chain_id SET NOT NULL; +ALTER TABLE log_broadcasts ALTER COLUMN evm_chain_id SET NOT NULL; +ALTER TABLE heads ALTER COLUMN evm_chain_id SET NOT NULL; +ALTER TABLE eth_key_states ALTER COLUMN evm_chain_id SET NOT NULL; + +ALTER TABLE vrf_specs ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE direct_request_specs ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE keeper_specs ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE flux_monitor_specs ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE; + +-- +goose Down +ALTER TABLE evm_chains DROP COLUMN enabled; + +ALTER TABLE heads DROP COLUMN evm_chain_id; +ALTER TABLE log_broadcasts DROP COLUMN evm_chain_id; +ALTER TABLE eth_txes DROP COLUMN evm_chain_id; +ALTER TABLE eth_key_states DROP COLUMN evm_chain_id; + +CREATE UNIQUE INDEX idx_heads_hash ON heads(hash bytea_ops); +CREATE INDEX idx_heads_number ON heads(number int8_ops); + +CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key ON eth_txes(from_address bytea_ops,nonce int8_ops) WHERE state = 'unconfirmed'::eth_txes_state; +CREATE UNIQUE INDEX idx_eth_txes_nonce_from_address ON eth_txes(from_address bytea_ops,nonce int8_ops); +CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account ON eth_txes(from_address bytea_ops) WHERE state = 'in_progress'::eth_txes_state; +CREATE INDEX idx_eth_txes_state_from_address ON eth_txes(from_address bytea_ops,state enum_ops) WHERE state <> 'confirmed'::eth_txes_state; +CREATE INDEX idx_eth_txes_unstarted_subject_id ON eth_txes(subject uuid_ops,id int8_ops) WHERE subject IS NOT NULL AND state = 'unstarted'::eth_txes_state; + +CREATE INDEX idx_log_broadcasts_unconsumed_job_id_v2 ON log_broadcasts(job_id int4_ops) WHERE consumed = false AND job_id IS NOT NULL; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id int4_ops,block_hash bytea_ops,log_index int8_ops,consumed) WHERE job_id IS NOT NULL; + +ALTER TABLE vrf_specs DROP COLUMN evm_chain_id; +ALTER TABLE direct_request_specs DROP COLUMN evm_chain_id; +ALTER TABLE keeper_specs DROP COLUMN evm_chain_id; +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN evm_chain_id; +ALTER TABLE flux_monitor_specs DROP COLUMN evm_chain_id; diff --git a/core/store/migrate/migrations/0062_upgrade_keepers.sql b/core/store/migrate/migrations/0062_upgrade_keepers.sql new file mode 100644 index 00000000..d639949d --- /dev/null +++ b/core/store/migrate/migrations/0062_upgrade_keepers.sql @@ -0,0 +1,45 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 1 +); + +UPDATE jobs +SET schema_version = 2 +WHERE type = 'keeper' AND schema_version = 1; + +-- +goose Down +UPDATE jobs +SET schema_version = 1 +WHERE type = 'keeper' AND schema_version = 2; + +UPDATE pipeline_specs +SET dot_dag_source = '' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 1 +); \ No newline at end of file diff --git a/core/store/migrate/migrations/0063_add_job_proposal_timestamp.sql b/core/store/migrate/migrations/0063_add_job_proposal_timestamp.sql new file mode 100644 index 00000000..1f41a5a9 --- /dev/null +++ b/core/store/migrate/migrations/0063_add_job_proposal_timestamp.sql @@ -0,0 +1,12 @@ +-- +goose Up +ALTER TABLE job_proposals + ADD COLUMN proposed_at TIMESTAMP WITH TIME ZONE; + +UPDATE job_proposals + SET proposed_at = created_at; + +ALTER TABLE job_proposals + ALTER COLUMN proposed_at SET NOT NULL; + +-- +goose Down +ALTER TABLE job_proposals DROP COLUMN proposed_at; diff --git a/core/store/migrate/migrations/0064_cascade_delete_chain_nodes.sql b/core/store/migrate/migrations/0064_cascade_delete_chain_nodes.sql new file mode 100644 index 00000000..7076845a --- /dev/null +++ b/core/store/migrate/migrations/0064_cascade_delete_chain_nodes.sql @@ -0,0 +1,13 @@ +-- +goose Up +ALTER TABLE nodes +DROP CONSTRAINT nodes_evm_chain_id_fkey; + +ALTER TABLE nodes +ADD FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE; + +--+goose Down +ALTER TABLE nodes +DROP CONSTRAINT nodes_evm_chain_id_fkey; + +ALTER TABLE nodes +ADD FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id); diff --git a/core/store/migrate/migrations/0065_ensure_chain_specific_gas_limit.sql b/core/store/migrate/migrations/0065_ensure_chain_specific_gas_limit.sql new file mode 100644 index 00000000..7d6ffc50 --- /dev/null +++ b/core/store/migrate/migrations/0065_ensure_chain_specific_gas_limit.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin + +-- Grandfather in old attempts which inadvertently saved with a 0 value, and +-- enforce correctly writing data for future attempts +UPDATE eth_tx_attempts SET chain_specific_gas_limit=1 WHERE chain_specific_gas_limit=0; +ALTER TABLE eth_tx_attempts ADD CONSTRAINT chk_chain_specific_gas_limit_not_zero CHECK (chain_specific_gas_limit > 0); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE eth_tx_attempts DROP CONSTRAINT chk_chain_specific_gas_limit_not_zero; + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0066_update_job_proposal_status.sql b/core/store/migrate/migrations/0066_update_job_proposal_status.sql new file mode 100644 index 00000000..362484e1 --- /dev/null +++ b/core/store/migrate/migrations/0066_update_job_proposal_status.sql @@ -0,0 +1,54 @@ +-- +goose Up +-- +goose StatementBegin + +-- We must remove the old contraint to add an enum value to support Postgres v11 +ALTER TABLE job_proposals +DROP CONSTRAINT chk_job_proposals_status_fsm; + +-- Drop the cancelled enum value. Unfortunately postgres does not support a +-- a way to remove a value from an enum. +ALTER TYPE job_proposal_status RENAME TO job_proposal_status_old; +CREATE TYPE job_proposal_status AS ENUM('pending', 'approved', 'rejected', 'cancelled'); + +ALTER TABLE job_proposals ALTER COLUMN status TYPE job_proposal_status USING status::text::job_proposal_status; + +DROP TYPE job_proposal_status_old; + +-- Add the contraint back +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + (status = 'pending' AND external_job_id IS NULL) OR + (status = 'approved' AND external_job_id IS NOT NULL) OR + (status = 'rejected' AND external_job_id IS NULL) +); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- We must remove the old contraint to remove an enum value +ALTER TABLE job_proposals +DROP CONSTRAINT chk_job_proposals_status_fsm; + +-- Drop the cancelled enum value. Unfortunately postgres does not support a +-- a way to remove a value from an enum. +ALTER TYPE job_proposal_status RENAME TO job_proposal_status_old; +CREATE TYPE job_proposal_status AS ENUM('pending', 'approved', 'rejected'); + +-- This will fail if any records are using the 'cancelled' enum. +-- Manually update these as we cannot decide what you want to do with them. +-- +ALTER TABLE job_proposals ALTER COLUMN status TYPE job_proposal_status USING status::text::job_proposal_status; + +DROP TYPE job_proposal_status_old; + +-- Add the contraint back +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + (status = 'pending' AND external_job_id IS NULL) OR + (status = 'approved' AND external_job_id IS NOT NULL) OR + (status = 'rejected' AND external_job_id IS NULL) +); + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0067_update_check_job_proposals_status_fsm.sql b/core/store/migrate/migrations/0067_update_check_job_proposals_status_fsm.sql new file mode 100644 index 00000000..15ac04c1 --- /dev/null +++ b/core/store/migrate/migrations/0067_update_check_job_proposals_status_fsm.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE job_proposals +DROP CONSTRAINT chk_job_proposals_status_fsm; + +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + (status = 'pending' AND external_job_id IS NULL) OR + (status = 'approved' AND external_job_id IS NOT NULL) OR + (status = 'rejected' AND external_job_id IS NULL) OR + (status = 'cancelled' AND external_job_id IS NULL) +); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE job_proposals +DROP CONSTRAINT chk_job_proposals_status_fsm; + +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + (status = 'pending' AND external_job_id IS NULL) OR + (status = 'approved' AND external_job_id IS NOT NULL) OR + (status = 'rejected' AND external_job_id IS NULL) +); + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0068_eth_tx_from_address_idx.sql b/core/store/migrate/migrations/0068_eth_tx_from_address_idx.sql new file mode 100644 index 00000000..47494234 --- /dev/null +++ b/core/store/migrate/migrations/0068_eth_tx_from_address_idx.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin +-- Needed to speed up FK checks from eth_key_states +CREATE INDEX idx_eth_txes_from_address ON eth_txes (from_address); +-- Since almost all of them are null we can greatly reduce the size of this index by setting the condition +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_pipeline_task_run_id_key; +CREATE UNIQUE INDEX idx_eth_txes_pipeline_run_task_id ON eth_txes (pipeline_task_run_id) WHERE pipeline_task_run_id IS NOT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX idx_eth_txes_from_address; +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_pipeline_task_run_id_key UNIQUE (pipeline_task_run_id); +DROP INDEX idx_eth_txes_pipeline_run_task_id; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0069_remove_unused_columns.sql b/core/store/migrate/migrations/0069_remove_unused_columns.sql new file mode 100644 index 00000000..4f091f6e --- /dev/null +++ b/core/store/migrate/migrations/0069_remove_unused_columns.sql @@ -0,0 +1,3 @@ +-- +goose Up +ALTER TABLE external_initiators DROP COLUMN IF EXISTS deleted_at; +ALTER TABLE users DROP COLUMN IF EXISTS token_secret; diff --git a/core/store/migrate/migrations/0070_dynamic_fee_txes.sql b/core/store/migrate/migrations/0070_dynamic_fee_txes.sql new file mode 100644 index 00000000..9d70db8f --- /dev/null +++ b/core/store/migrate/migrations/0070_dynamic_fee_txes.sql @@ -0,0 +1,32 @@ +-- +goose Up + +ALTER TABLE eth_txes ADD COLUMN access_list jsonb; +ALTER TABLE eth_tx_attempts + ADD COLUMN tx_type smallint NOT NULL DEFAULT 0, + ADD COLUMN gas_tip_cap numeric(78,0), + ADD COLUMN gas_fee_cap numeric(78,0), + ADD CONSTRAINT chk_tx_type_is_byte CHECK ( + tx_type >= 0 AND tx_type <= 255 + ), + ADD CONSTRAINT chk_legacy_or_dynamic CHECK ( + (tx_type = 0 AND gas_price IS NOT NULL AND gas_tip_cap IS NULL AND gas_fee_cap IS NULL) + OR + (tx_type = 2 AND gas_price IS NULL AND gas_tip_cap IS NOT NULL AND gas_fee_cap IS NOT NULL) + ), + ALTER COLUMN gas_price DROP NOT NULL +; +ALTER TABLE heads ADD COLUMN base_fee_per_gas numeric(78,0); +ALTER TABLE eth_tx_attempts + ADD CONSTRAINT chk_sanity_fee_cap_tip_cap CHECK ( + gas_tip_cap IS NULL + OR + gas_fee_cap IS NULL + OR + (gas_tip_cap <= gas_fee_cap) + ); + + +-- +goose Down +ALTER TABLE eth_txes DROP COLUMN access_list; +ALTER TABLE eth_tx_attempts DROP COLUMN tx_type, DROP COLUMN gas_tip_cap, DROP COLUMN gas_fee_cap, ALTER COLUMN gas_price SET NOT NULL; +ALTER TABLE heads DROP COLUMN base_fee_per_gas; diff --git a/core/store/migrate/migrations/0071_allow_null_json_serializable.sql b/core/store/migrate/migrations/0071_allow_null_json_serializable.sql new file mode 100644 index 00000000..be77a83e --- /dev/null +++ b/core/store/migrate/migrations/0071_allow_null_json_serializable.sql @@ -0,0 +1,9 @@ +-- +goose Up + +ALTER TABLE pipeline_runs ALTER COLUMN meta DROP NOT NULL; + + +-- +goose Down + +ALTER TABLE pipeline_runs ALTER COLUMN meta SET NOT NULL; + diff --git a/core/store/migrate/migrations/0072_drop_unused_tables.sql b/core/store/migrate/migrations/0072_drop_unused_tables.sql new file mode 100644 index 00000000..5d069e3d --- /dev/null +++ b/core/store/migrate/migrations/0072_drop_unused_tables.sql @@ -0,0 +1,18 @@ +-- +goose Up + +DROP TABLE encumbrances; + +-- +goose Down + +CREATE TABLE encumbrances ( + id bigint NOT NULL, + payment numeric(78,0), + expiration bigint, + end_at timestamp with time zone, + oracles text, + aggregator bytea NOT NULL, + agg_initiate_job_selector bytea NOT NULL, + agg_fulfill_selector bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); diff --git a/core/store/migrate/migrations/0073_ocr_duplicate_contract_addresses_allowed_across_chains.sql b/core/store/migrate/migrations/0073_ocr_duplicate_contract_addresses_allowed_across_chains.sql new file mode 100644 index 00000000..1d1f53dd --- /dev/null +++ b/core/store/migrate/migrations/0073_ocr_duplicate_contract_addresses_allowed_across_chains.sql @@ -0,0 +1,11 @@ +-- +goose Up + +ALTER TABLE offchainreporting_oracle_specs DROP CONSTRAINT unique_contract_addr; +CREATE UNIQUE INDEX unique_contract_addr_per_chain ON offchainreporting_oracle_specs (contract_address, evm_chain_id) WHERE evm_chain_id IS NOT NULL; +CREATE UNIQUE INDEX unique_contract_addr ON offchainreporting_oracle_specs (contract_address) WHERE evm_chain_id IS NULL; + +-- +goose Down + +DROP INDEX unique_contract_addr; +DROP INDEX unique_contract_addr_per_chain; +ALTER TABLE offchainreporting_oracle_specs ADD CONSTRAINT unique_contract_addr UNIQUE (contract_address); diff --git a/core/store/migrate/migrations/0074_simulation_eth_tx.sql b/core/store/migrate/migrations/0074_simulation_eth_tx.sql new file mode 100644 index 00000000..ad0cfe93 --- /dev/null +++ b/core/store/migrate/migrations/0074_simulation_eth_tx.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE eth_txes ADD COLUMN IF NOT EXISTS simulate bool NOT NULL DEFAULT FALSE; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE eth_txes DROP COLUMN simulate; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0075_unique_job_names.sql b/core/store/migrate/migrations/0075_unique_job_names.sql new file mode 100644 index 00000000..4acc1122 --- /dev/null +++ b/core/store/migrate/migrations/0075_unique_job_names.sql @@ -0,0 +1,12 @@ +-- +goose Up + +-- Rename duplicate jobs first +UPDATE jobs +SET name = jobs.name || ' (' || j.rank::text || ')' +FROM (SELECT id, row_number() OVER (PARTITION BY name ORDER BY id) AS rank FROM jobs) j +WHERE jobs.id = j.id AND j.rank > 1; + +CREATE UNIQUE INDEX idx_jobs_name ON jobs (name); + +-- +goose Down +DROP INDEX IF EXISTS idx_jobs_name; \ No newline at end of file diff --git a/core/store/migrate/migrations/0076_add_non_fatal_errors_to_runs.sql b/core/store/migrate/migrations/0076_add_non_fatal_errors_to_runs.sql new file mode 100644 index 00000000..8946f531 --- /dev/null +++ b/core/store/migrate/migrations/0076_add_non_fatal_errors_to_runs.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE pipeline_runs + RENAME COLUMN errors TO fatal_errors; +ALTER TABLE pipeline_runs + ADD COLUMN all_errors jsonb; + +-- +goose Down +ALTER TABLE pipeline_runs + RENAME COLUMN fatal_errors TO errors; +ALTER TABLE pipeline_runs + DROP COLUMN all_errors; diff --git a/core/store/migrate/migrations/0077_add_webauthn_table.sql b/core/store/migrate/migrations/0077_add_webauthn_table.sql new file mode 100644 index 00000000..0b4e0c46 --- /dev/null +++ b/core/store/migrate/migrations/0077_add_webauthn_table.sql @@ -0,0 +1,14 @@ +-- +goose Up +CREATE TABLE web_authns ( + "id" BIGSERIAL PRIMARY KEY, + "email" text NOT NULL, + "public_key_data" jsonb NOT NULL, + CONSTRAINT fk_email + FOREIGN KEY(email) + REFERENCES users(email) +); + +CREATE UNIQUE INDEX web_authns_email_idx ON web_authns (lower(email)); + +-- +goose Down +DROP TABLE IF EXISTS web_authns; diff --git a/core/store/migrate/migrations/0078_only_one_version.sql b/core/store/migrate/migrations/0078_only_one_version.sql new file mode 100644 index 00000000..69110134 --- /dev/null +++ b/core/store/migrate/migrations/0078_only_one_version.sql @@ -0,0 +1,8 @@ +-- +goose Up +DELETE FROM node_versions WHERE version IN ( + SELECT version FROM node_versions ORDER BY created_at DESC OFFSET 1 +); +CREATE UNIQUE INDEX idx_only_one_node_version ON node_versions ((version IS NOT NULL)); + +-- +goose Down +DROP INDEX idx_only_one_node_version; diff --git a/core/store/migrate/migrations/0079_vrf_v2_fields.sql b/core/store/migrate/migrations/0079_vrf_v2_fields.sql new file mode 100644 index 00000000..1c1b9109 --- /dev/null +++ b/core/store/migrate/migrations/0079_vrf_v2_fields.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN from_address bytea, + ADD COLUMN poll_period bigint NOT NULL DEFAULT 0; + +-- +goose Down +ALTER TABLE vrf_specs + DROP COLUMN from_address, + DROP COLUMN poll_period; diff --git a/core/store/migrate/migrations/0080_drop_unused_cols.sql b/core/store/migrate/migrations/0080_drop_unused_cols.sql new file mode 100644 index 00000000..1d515ec6 --- /dev/null +++ b/core/store/migrate/migrations/0080_drop_unused_cols.sql @@ -0,0 +1,19 @@ +-- +goose Up +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN monitoring_endpoint; +ALTER TABLE jobs ADD COLUMN created_at timestamptz; + +UPDATE jobs SET created_at=offchainreporting_oracle_specs.created_at FROM offchainreporting_oracle_specs WHERE jobs.offchainreporting_oracle_spec_id = offchainreporting_oracle_specs.id; +UPDATE jobs SET created_at=direct_request_specs.created_at FROM direct_request_specs WHERE jobs.direct_request_spec_id = direct_request_specs.id; +UPDATE jobs SET created_at=flux_monitor_specs.created_at FROM flux_monitor_specs WHERE jobs.flux_monitor_spec_id = flux_monitor_specs.id; +UPDATE jobs SET created_at=keeper_specs.created_at FROM keeper_specs WHERE jobs.keeper_spec_id = keeper_specs.id; +UPDATE jobs SET created_at=cron_specs.created_at FROM cron_specs WHERE jobs.cron_spec_id = cron_specs.id; +UPDATE jobs SET created_at=vrf_specs.created_at FROM vrf_specs WHERE jobs.vrf_spec_id = vrf_specs.id; +UPDATE jobs SET created_at=webhook_specs.created_at FROM webhook_specs WHERE jobs.webhook_spec_id = webhook_specs.id; + +UPDATE jobs SET created_at = NOW() WHERE created_at IS NULL; +CREATE INDEX idx_jobs_created_at ON jobs USING BRIN (created_at); +ALTER TABLE jobs ALTER COLUMN created_at SET NOT NULL; + +-- +goose Down +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN monitoring_endpoint text; +ALTER TABLE jobs DROP COLUMN created_at; diff --git a/core/store/migrate/migrations/0081_unconsumed_log_broadcasts.sql b/core/store/migrate/migrations/0081_unconsumed_log_broadcasts.sql new file mode 100644 index 00000000..f7ca6146 --- /dev/null +++ b/core/store/migrate/migrations/0081_unconsumed_log_broadcasts.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE log_broadcasts ADD COLUMN updated_at timestamp with time zone NOT NULL DEFAULT NOW(); +DROP INDEX IF EXISTS log_consumptions_unique_v2_idx; +CREATE UNIQUE INDEX log_broadcasts_unique_idx ON log_broadcasts(job_id, block_hash, log_index, evm_chain_id); +CREATE TABLE log_broadcasts_pending ( + evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE PRIMARY KEY, + block_number int8, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); +CREATE INDEX idx_log_broadcasts_unconsumed on log_broadcasts(evm_chain_id, block_number) WHERE consumed = false AND block_number IS NOT NULL; +-- +goose Down +DROP INDEX IF EXISTS idx_log_broadcasts_unconsumed; +DROP TABLE IF EXISTS log_broadcasts_pending; +ALTER TABLE log_broadcasts DROP COLUMN updated_at; +DROP INDEX IF EXISTS log_broadcasts_unique_idx; +CREATE UNIQUE INDEX log_consumptions_unique_v2_idx ON log_broadcasts(job_id, block_hash, log_index, consumed, evm_chain_id) WHERE job_id IS NOT NULL; diff --git a/core/store/migrate/migrations/0082_lease_lock.sql b/core/store/migrate/migrations/0082_lease_lock.sql new file mode 100644 index 00000000..29650a1a --- /dev/null +++ b/core/store/migrate/migrations/0082_lease_lock.sql @@ -0,0 +1,10 @@ +-- +goose Up +CREATE TABLE IF NOT EXISTS lease_lock ( + client_id uuid NOT NULL, + expires_at timestamptz NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS only_one_lease_lock ON lease_lock ((client_id IS NOT NULL)); + +-- +goose Down +DROP TABLE lease_lock; diff --git a/core/store/migrate/migrations/0083_add_keeper_confirmations_to_spec.sql b/core/store/migrate/migrations/0083_add_keeper_confirmations_to_spec.sql new file mode 100644 index 00000000..5bcc5ddf --- /dev/null +++ b/core/store/migrate/migrations/0083_add_keeper_confirmations_to_spec.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE keeper_specs + ADD COLUMN min_incoming_confirmations integer; + +-- +goose Down +ALTER TABLE keeper_specs + DROP COLUMN min_incoming_confirmations; diff --git a/core/store/migrate/migrations/0084_rename_vrf_min_incoming_confirmations.sql b/core/store/migrate/migrations/0084_rename_vrf_min_incoming_confirmations.sql new file mode 100644 index 00000000..4532d92c --- /dev/null +++ b/core/store/migrate/migrations/0084_rename_vrf_min_incoming_confirmations.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE vrf_specs + RENAME COLUMN confirmations TO min_incoming_confirmations; + +-- +goose Down +ALTER TABLE vrf_specs + RENAME COLUMN min_incoming_confirmations TO confirmations; \ No newline at end of file diff --git a/core/store/migrate/migrations/0085_requested_confs_delay.sql b/core/store/migrate/migrations/0085_requested_confs_delay.sql new file mode 100644 index 00000000..e460b336 --- /dev/null +++ b/core/store/migrate/migrations/0085_requested_confs_delay.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "requested_confs_delay" BIGINT CHECK (requested_confs_delay >= 0) DEFAULT 0 NOT NULL; + +-- +goose Down +ALTER TABLE vrf_specs + DROP COLUMN "requested_confs_delay"; diff --git a/core/store/migrate/migrations/0086_upgrade_keepers_observation_source.sql b/core/store/migrate/migrations/0086_upgrade_keepers_observation_source.sql new file mode 100644 index 00000000..32a8949f --- /dev/null +++ b/core/store/migrate/migrations/0086_upgrade_keepers_observation_source.sql @@ -0,0 +1,71 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 2 +); + +UPDATE jobs +SET schema_version = 3 +WHERE type = 'keeper' AND schema_version = 2; + +-- +goose Down +UPDATE jobs +SET schema_version = 2 +WHERE type = 'keeper' AND schema_version = 3; + +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 2 +); diff --git a/core/store/migrate/migrations/0087_ocr2_tables.sql b/core/store/migrate/migrations/0087_ocr2_tables.sql new file mode 100644 index 00000000..a4b61add --- /dev/null +++ b/core/store/migrate/migrations/0087_ocr2_tables.sql @@ -0,0 +1,165 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE offchainreporting2_oracle_specs ( + id SERIAL PRIMARY KEY, + contract_address bytea NOT NULL, + p2p_peer_id text, + p2p_bootstrap_peers text[] NOT NULL DEFAULT '{}', + is_bootstrap_peer boolean NOT NULL, + encrypted_ocr_key_bundle_id bytea, + monitoring_endpoint text, + transmitter_address bytea, + blockchain_timeout bigint, + evm_chain_id numeric(78,0) REFERENCES evm_chains (id), + contract_config_tracker_subscribe_interval bigint, + contract_config_tracker_poll_interval bigint, + contract_config_confirmations integer NOT NULL, + juels_per_fee_coin_pipeline text NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT chk_contract_address_length CHECK ((octet_length(contract_address) = 20)) +); + +ALTER TABLE ONLY offchainreporting2_oracle_specs + ADD CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr + UNIQUE (contract_address); + +CREATE INDEX idx_offchainreporting2_oracle_specs_created_at + ON offchainreporting2_oracle_specs USING brin (created_at); +CREATE INDEX idx_offchainreporting2_oracle_specs_updated_at + ON offchainreporting2_oracle_specs USING brin (updated_at); + +ALTER TABLE jobs + ADD COLUMN offchainreporting2_oracle_spec_id integer, + ADD CONSTRAINT jobs_offchainreporting2_oracle_spec_id_fkey + FOREIGN KEY (offchainreporting2_oracle_spec_id) + REFERENCES offchainreporting2_oracle_specs(id) + ON DELETE CASCADE, + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + offchainreporting2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + vrf_spec_id, + webhook_spec_id + ) = 1 + ); + +CREATE UNIQUE INDEX idx_jobs_unique_offchain2_reporting_oracle_spec_id + ON jobs + USING btree (offchainreporting2_oracle_spec_id); + +CREATE TABLE offchainreporting2_contract_configs ( + offchainreporting2_oracle_spec_id INTEGER PRIMARY KEY, + config_digest bytea NOT NULL, + config_count bigint NOT NULL, + signers bytea[], + transmitters text[], + f smallint NOT NULL, + onchain_config bytea, -- this field exists in ocr2 but not in ocr1 + offchain_config_version bigint NOT NULL, + offchain_config bytea, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting2_contract_configs_config_digest_check CHECK ((octet_length(config_digest) = 32)) +); + +ALTER TABLE ONLY offchainreporting2_contract_configs + ADD CONSTRAINT offchainreporting2_contract_configs_oracle_spec_fkey + FOREIGN KEY (offchainreporting2_oracle_spec_id) + REFERENCES offchainreporting2_oracle_specs(id) + ON DELETE CASCADE; + +CREATE TABLE offchainreporting2_latest_round_requested ( + offchainreporting2_oracle_spec_id INTEGER PRIMARY KEY, + requester bytea NOT NULL, + config_digest bytea NOT NULL, + epoch bigint NOT NULL, + round bigint NOT NULL, + raw jsonb NOT NULL, + CONSTRAINT offchainreporting2_latest_round_requested_config_digest_check + CHECK ((octet_length(config_digest) = 32)), + CONSTRAINT offchainreporting2_latest_round_requested_requester_check + CHECK ((octet_length(requester) = 20)) +); + +ALTER TABLE offchainreporting2_latest_round_requested + ADD CONSTRAINT offchainreporting2_latest_round_oracle_spec_fkey + FOREIGN KEY (offchainreporting2_oracle_spec_id) + REFERENCES offchainreporting2_oracle_specs(id) + ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + +CREATE TABLE offchainreporting2_persistent_states ( + offchainreporting2_oracle_spec_id integer NOT NULL, + config_digest bytea NOT NULL, + epoch bigint NOT NULL, + highest_sent_epoch bigint NOT NULL, + highest_received_epoch bigint[] NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting2_persistent_states_config_digest_check + CHECK ((octet_length(config_digest) = 32)) +); + +ALTER TABLE ONLY offchainreporting2_persistent_states + ADD CONSTRAINT offchainreporting2_persistent_states_pkey + PRIMARY KEY (offchainreporting2_oracle_spec_id, config_digest), + ADD CONSTRAINT offchainreporting2_persistent_oracle_spec_fkey + FOREIGN KEY (offchainreporting2_oracle_spec_id) + REFERENCES offchainreporting2_oracle_specs(id) + ON DELETE CASCADE; + +CREATE TABLE offchainreporting2_pending_transmissions ( + offchainreporting2_oracle_spec_id integer NOT NULL, + config_digest bytea NOT NULL, + epoch bigint NOT NULL, + round bigint NOT NULL, + "time" timestamp with time zone NOT NULL, + extra_hash bytea NOT NULL, + report bytea NOT NULL, + attributed_signatures bytea[] NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT offchainreporting2_pending_transmissions_config_digest_check CHECK ((octet_length(config_digest) = 32)) +); + +ALTER TABLE ONLY offchainreporting2_pending_transmissions + ADD CONSTRAINT offchainreporting2_pending_transmissions_pkey + PRIMARY KEY (offchainreporting2_oracle_spec_id, config_digest, epoch, round), + ADD CONSTRAINT offchainreporting2_pending_transmission_oracle_spec_fkey + FOREIGN KEY (offchainreporting2_oracle_spec_id) REFERENCES offchainreporting2_oracle_specs(id) + ON DELETE CASCADE; + +CREATE INDEX idx_offchainreporting2_pending_transmissions_time ON offchainreporting2_pending_transmissions USING btree ("time"); + +-- After moving to the unified keystore the encrypted_p2p_keys table is no longer used +-- So we have to drop this FK to be able to uses the discoverer (v2) networking stack +ALTER TABLE offchainreporting_discoverer_announcements DROP CONSTRAINT offchainreporting_discoverer_announcements_local_peer_id_fkey; +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin +DROP TABLE offchainreporting2_pending_transmissions; +DROP TABLE offchainreporting2_persistent_states; +DROP TABLE offchainreporting2_latest_round_requested; +DROP TABLE offchainreporting2_contract_configs; +ALTER TABLE jobs DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id) = 1 + ); +ALTER TABLE jobs DROP COLUMN offchainreporting2_oracle_spec_id; +ALTER TABLE offchainreporting_discoverer_announcements ADD CONSTRAINT offchainreporting_discoverer_announcements_local_peer_id_fkey FOREIGN KEY (local_peer_id) REFERENCES encrypted_p2p_keys(peer_id) DEFERRABLE INITIALLY IMMEDIATE; +DROP TABLE offchainreporting2_oracle_specs; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0088_vrfv2_request_timeout.sql b/core/store/migrate/migrations/0088_vrfv2_request_timeout.sql new file mode 100644 index 00000000..a8581ba1 --- /dev/null +++ b/core/store/migrate/migrations/0088_vrfv2_request_timeout.sql @@ -0,0 +1,10 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "request_timeout" BIGINT + CHECK (request_timeout > 0) + DEFAULT 24 * 60 * 60 * 1e9 -- default of one day in nanoseconds + NOT NULL; + +-- +goose Down +ALTER TABLE vrf_specs + DROP COLUMN "request_timeout"; diff --git a/core/store/migrate/migrations/0089_ocr_spec_drop_p2p_peer_id.sql b/core/store/migrate/migrations/0089_ocr_spec_drop_p2p_peer_id.sql new file mode 100644 index 00000000..2e62d8ba --- /dev/null +++ b/core/store/migrate/migrations/0089_ocr_spec_drop_p2p_peer_id.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN p2p_peer_id; +ALTER TABLE offchainreporting2_oracle_specs DROP COLUMN p2p_peer_id; + +-- +goose Down +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN p2p_peer_id TEXT; +ALTER TABLE offchainreporting2_oracle_specs ADD COLUMN p2p_peer_id TEXT; diff --git a/core/store/migrate/migrations/0090_ocr_new_timeouts.sql b/core/store/migrate/migrations/0090_ocr_new_timeouts.sql new file mode 100644 index 00000000..056c65e6 --- /dev/null +++ b/core/store/migrate/migrations/0090_ocr_new_timeouts.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN database_timeout BIGINT; +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN observation_grace_period BIGINT; +ALTER TABLE offchainreporting_oracle_specs ADD COLUMN contract_transmitter_transmit_timeout BIGINT; + +-- +goose Down +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN database_timeout; +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN observation_grace_period; +ALTER TABLE offchainreporting_oracle_specs DROP COLUMN contract_transmitter_transmit_timeout; diff --git a/core/store/migrate/migrations/0091_ocr2_relay.sql b/core/store/migrate/migrations/0091_ocr2_relay.sql new file mode 100644 index 00000000..f6083406 --- /dev/null +++ b/core/store/migrate/migrations/0091_ocr2_relay.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE offchainreporting2_oracle_specs + ADD COLUMN relay text NOT NULL, + ADD COLUMN relay_config JSONB NOT NULL DEFAULT '{}', + ALTER COLUMN contract_address TYPE text, + ALTER COLUMN transmitter_address TYPE text, + DROP COLUMN evm_chain_id, + DROP CONSTRAINT chk_contract_address_length; +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN contract_address TO contract_id; +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN transmitter_address TO transmitter_id; +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN encrypted_ocr_key_bundle_id TO ocr_key_bundle_id; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE offchainreporting2_oracle_specs + DROP COLUMN relay, + DROP COLUMN relay_config, + ADD COLUMN evm_chain_id numeric(78,0) REFERENCES evm_chains(id); +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN contract_id TO contract_address; +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN transmitter_id TO transmitter_address; +ALTER TABLE offchainreporting2_oracle_specs + RENAME COLUMN ocr_key_bundle_id TO encrypted_ocr_key_bundle_id; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0092_bptxm_tx_checkers.sql b/core/store/migrate/migrations/0092_bptxm_tx_checkers.sql new file mode 100644 index 00000000..76c8eb8d --- /dev/null +++ b/core/store/migrate/migrations/0092_bptxm_tx_checkers.sql @@ -0,0 +1,24 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE eth_txes +ADD COLUMN IF NOT EXISTS transmit_checker jsonb DEFAULT NULL; + +UPDATE eth_txes +SET transmit_checker = '{"CheckerType": "simulate"}'::jsonb +WHERE simulate; + +ALTER TABLE eth_txes DROP COLUMN simulate; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE eth_txes +ADD COLUMN IF NOT EXISTS simulate bool NOT NULL DEFAULT FALSE; + +UPDATE eth_txes +SET simulate = true +WHERE transmit_checker::jsonb->>'CheckerType' = 'simulate'; + +ALTER TABLE eth_txes +DROP COLUMN transmit_checker; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0093_terra_txm.sql b/core/store/migrate/migrations/0093_terra_txm.sql new file mode 100644 index 00000000..26ead4f2 --- /dev/null +++ b/core/store/migrate/migrations/0093_terra_txm.sql @@ -0,0 +1,75 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE heads RENAME TO evm_heads; +ALTER TABLE nodes RENAME TO evm_nodes; +CREATE TABLE terra_chains ( + id text PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}', + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled BOOL DEFAULT TRUE NOT NULL +); +CREATE TABLE terra_nodes ( + id serial PRIMARY KEY, + name varchar(255) NOT NULL CHECK (name != ''), + terra_chain_id text NOT NULL REFERENCES terra_chains (id), + tendermint_url text CHECK (tendermint_url != ''), + fcd_url text CHECK (fcd_url != ''), + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); +CREATE INDEX idx_nodes_terra_chain_id ON terra_nodes (terra_chain_id); +CREATE UNIQUE INDEX idx_terra_nodes_unique_name ON terra_nodes (lower(name)); +CREATE FUNCTION notify_terra_msg_insert() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify('insert_on_terra_msg'::text, NOW()::text); + RETURN NULL; +END +$$; +CREATE TABLE terra_msgs ( + id BIGSERIAL PRIMARY KEY, + terra_chain_id text NOT NULL REFERENCES terra_chains (id), + contract_id text NOT NULL, + raw bytea NOT NULL, + state text NOT NULL, + tx_hash text, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + CHECK (tx_hash<>null OR (state<>'broadcasted' AND state<>'confirmed')) +); +CREATE TRIGGER notify_terra_msg_insertion AFTER INSERT ON terra_msgs FOR EACH STATEMENT EXECUTE PROCEDURE notify_terra_msg_insert(); +CREATE INDEX idx_terra_msgs_terra_chain_id_state ON terra_msgs (terra_chain_id, state); + +CREATE FUNCTION check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE + state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; + END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; + END IF; + RETURN NEW; +END +$$ LANGUAGE plpgsql; +CREATE TRIGGER validate_state_update BEFORE UPDATE ON terra_msgs + FOR EACH ROW EXECUTE PROCEDURE check_terra_msg_state_transition(); + + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE terra_msgs; +DROP FUNCTION notify_terra_msg_insert; +DROP FUNCTION check_terra_msg_state_transition; +DROP TABLE terra_nodes; +DROP TABLE terra_chains; +ALTER TABLE evm_nodes RENAME TO nodes; +ALTER TABLE evm_heads RENAME TO heads; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0094_blockhash_store_job.sql b/core/store/migrate/migrations/0094_blockhash_store_job.sql new file mode 100644 index 00000000..1db1fb2a --- /dev/null +++ b/core/store/migrate/migrations/0094_blockhash_store_job.sql @@ -0,0 +1,54 @@ +-- +goose Up +CREATE TABLE blockhash_store_specs +( + id BIGSERIAL PRIMARY KEY, + coordinator_v1_address bytea DEFAULT NULL, + coordinator_v2_address bytea DEFAULT NULL, + wait_blocks bigint NOT NULL, + lookback_blocks bigint NOT NULL, + blockhash_store_address bytea NOT NULL, + poll_period bigint NOT NULL, + run_timeout bigint NOT NULL, + evm_chain_id numeric(78) + REFERENCES evm_chains + DEFERRABLE, + from_address bytea DEFAULT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL + CONSTRAINT coordinator_v1_address_len_chk CHECK (octet_length(coordinator_v1_address) = 20) + CONSTRAINT coordinator_v2_address_len_chk CHECK (octet_length(coordinator_v2_address) = 20) + CONSTRAINT blockhash_store_address_len_chk CHECK (octet_length(blockhash_store_address) = 20) + CONSTRAINT at_least_one_coordinator_chk CHECK (coordinator_v1_address IS NOT NULL OR coordinator_v2_address IS NOT NULL) +); +ALTER TABLE jobs + ADD COLUMN blockhash_store_spec_id INT REFERENCES blockhash_store_specs (id), + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + offchainreporting2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + vrf_spec_id, + webhook_spec_id, + blockhash_store_spec_id) = 1); + +-- +goose Down +ALTER TABLE jobs + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + offchainreporting2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + vrf_spec_id, + webhook_spec_id) = 1); + +ALTER TABLE jobs + DROP COLUMN blockhash_store_spec_id; +DROP TABLE IF EXISTS blockhash_store_specs; diff --git a/core/store/migrate/migrations/0095_terra_fcd.sql b/core/store/migrate/migrations/0095_terra_fcd.sql new file mode 100644 index 00000000..45d76981 --- /dev/null +++ b/core/store/migrate/migrations/0095_terra_fcd.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE terra_nodes + DROP COLUMN fcd_url; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE terra_nodes + ADD COLUMN fcd_url text CHECK (fcd_url != ''); +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0096_create_job_proposal_specs.sql b/core/store/migrate/migrations/0096_create_job_proposal_specs.sql new file mode 100644 index 00000000..455316e2 --- /dev/null +++ b/core/store/migrate/migrations/0096_create_job_proposal_specs.sql @@ -0,0 +1,91 @@ +-- +goose Up +-- +goose StatementBegin + +-- Create a new enum type for the spec's status. +CREATE TYPE job_proposal_spec_status AS ENUM('pending', 'approved', 'rejected', 'cancelled'); + +-- Create a new table to store the versioned specs +CREATE TABLE job_proposal_specs ( + id SERIAL PRIMARY KEY, + definition TEXT NOT NULL, + version INTEGER NOT NULL, + status job_proposal_spec_status NOT NULL, + job_proposal_id INTEGER REFERENCES job_proposals(id), + status_updated_at timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +CREATE UNIQUE INDEX idx_job_proposals_job_proposal_id_and_version ON job_proposal_specs(job_proposal_id, version); +CREATE UNIQUE INDEX idx_job_proposal_specs_job_proposal_id_and_status ON job_proposal_specs(job_proposal_id) WHERE status = 'approved'; + +-- Seed existing data from job_proposals into the job proposal specs +INSERT INTO job_proposal_specs +( + definition, + version, + status, + job_proposal_id, + status_updated_at, + created_at, + updated_at +) +SELECT spec, + 1, + -- Cast to a string before casting to a job_proposal_spec_status because + -- you can't cast from enum to enum. This is safe because the enums + -- match exactly. + status::varchar::job_proposal_spec_status, + id, + updated_at, + proposed_at, + updated_at +from job_proposals; + +-- Update job proposals table with new fields +-- * Drop columns now that we have moved the data +-- * Add a pending update column +ALTER TABLE job_proposals +DROP COLUMN spec, +DROP COLUMN proposed_at, +ADD COLUMN pending_update BOOLEAN NOT NULL DEFAULT FALSE; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Add the columns back into the job proposals table and drop the pending update +ALTER TABLE job_proposals +ADD COLUMN spec TEXT, +ADD COLUMN proposed_at timestamp with time zone, +DROP COLUMN pending_update; + +-- Return the data back to the job_proposals table +UPDATE job_proposals +SET spec=jps.definition, + proposed_at=jps.created_at, + status=jps.status +FROM ( + SELECT a.definition, a.job_proposal_id, a.created_at, a.status::varchar::job_proposal_status + FROM job_proposal_specs a + INNER JOIN ( + SELECT job_proposal_id, MAX(version) ver + FROM job_proposal_specs + GROUP BY job_proposal_id + ) b ON a.job_proposal_id = b.job_proposal_id AND a.version = b.ver +) AS jps +WHERE job_proposals.id = jps.job_proposal_id; + +-- Add constraints to the new fields +ALTER TABLE job_proposals +ALTER COLUMN spec SET NOT NULL, +ALTER COLUMN proposed_at SET NOT NULL; + +-- Drop the job_proposals table +DROP TABLE job_proposal_specs; + +-- Drop the enum +DROP TYPE job_proposal_spec_status + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0097_bootstrap_spec.sql b/core/store/migrate/migrations/0097_bootstrap_spec.sql new file mode 100644 index 00000000..da4e83a7 --- /dev/null +++ b/core/store/migrate/migrations/0097_bootstrap_spec.sql @@ -0,0 +1,61 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE bootstrap_specs +( + id SERIAL PRIMARY KEY, + contract_id text NOT NULL, + relay text, + relay_config JSONB, + monitoring_endpoint text, + blockchain_timeout bigint, + contract_config_tracker_poll_interval bigint, + contract_config_confirmations integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +ALTER TABLE jobs + ADD COLUMN bootstrap_spec_id INT REFERENCES bootstrap_specs (id), + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + offchainreporting2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id, + bootstrap_spec_id) = 1 + ); + +ALTER TABLE offchainreporting2_oracle_specs + DROP COLUMN contract_config_tracker_subscribe_interval; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE jobs + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + offchainreporting_oracle_spec_id, + offchainreporting2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id) = 1 + ); +ALTER TABLE jobs + DROP COLUMN bootstrap_spec_id; +DROP TABLE IF EXISTS bootstrap_specs; + +ALTER TABLE offchainreporting2_oracle_specs + ADD COLUMN contract_config_tracker_subscribe_interval bigint; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0098_fm_clear_job_types_from_bootstrap_nodes.sql b/core/store/migrate/migrations/0098_fm_clear_job_types_from_bootstrap_nodes.sql new file mode 100644 index 00000000..7de98d27 --- /dev/null +++ b/core/store/migrate/migrations/0098_fm_clear_job_types_from_bootstrap_nodes.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +UPDATE feeds_managers +SET job_types = '{}' +WHERE is_ocr_bootstrap_peer AND array_length(job_types, 1) > 0; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0099_terra_msgs_created_at.sql b/core/store/migrate/migrations/0099_terra_msgs_created_at.sql new file mode 100644 index 00000000..3148960e --- /dev/null +++ b/core/store/migrate/migrations/0099_terra_msgs_created_at.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_terra_msgs_terra_chain_id_state; +CREATE INDEX idx_terra_msgs_terra_chain_id_state ON terra_msgs (terra_chain_id, state, created_at); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_terra_msgs_terra_chain_id_state; +CREATE INDEX idx_terra_msgs_terra_chain_id_state ON terra_msgs (terra_chain_id, state); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0100_bootstrap_config.sql b/core/store/migrate/migrations/0100_bootstrap_config.sql new file mode 100644 index 00000000..78f404a8 --- /dev/null +++ b/core/store/migrate/migrations/0100_bootstrap_config.sql @@ -0,0 +1,118 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE bootstrap_contract_configs +( + bootstrap_spec_id INTEGER PRIMARY KEY, + config_digest bytea NOT NULL, + config_count bigint NOT NULL, + signers bytea[] NOT NULL, + transmitters text[] NOT NULL, + f smallint NOT NULL, + onchain_config bytea, + offchain_config_version bigint NOT NULL, + offchain_config bytea, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + CONSTRAINT bootstrap_contract_configs_config_digest_check CHECK ((octet_length(config_digest) = 32)) +); + +ALTER TABLE ONLY bootstrap_contract_configs + ADD CONSTRAINT bootstrap_contract_configs_oracle_spec_fkey + FOREIGN KEY (bootstrap_spec_id) + REFERENCES bootstrap_specs (id) + ON DELETE CASCADE; + +-- add missing unique constraint for bootstrap specs +CREATE UNIQUE INDEX idx_jobs_unique_bootstrap_spec_id ON jobs USING btree (bootstrap_spec_id); + +-- migrate existing OCR2 bootstrap jobs to the new bootstrap spec +-- create helper column +ALTER TABLE bootstrap_specs + ADD COLUMN job_id INTEGER; + +-- insert bootstrap specs +INSERT INTO bootstrap_specs (contract_id, relay, relay_config, monitoring_endpoint, blockchain_timeout, + contract_config_tracker_poll_interval, contract_config_confirmations, created_at, + updated_at, job_id) +SELECT ocr2.contract_id, + ocr2.relay, + ocr2.relay_config, + ocr2.monitoring_endpoint, + ocr2.blockchain_timeout, + ocr2.contract_config_tracker_poll_interval, + ocr2.contract_config_confirmations, + ocr2.created_at, + ocr2.updated_at, + jobs.id +FROM jobs + INNER JOIN offchainreporting2_oracle_specs AS ocr2 ON jobs.offchainreporting2_oracle_spec_id = ocr2.id +WHERE ocr2.is_bootstrap_peer IS true; + +-- point jobs to new bootstrap specs +UPDATE jobs +SET type = 'bootstrap', + offchainreporting2_oracle_spec_id = null, + bootstrap_spec_id = (SELECT id FROM bootstrap_specs WHERE jobs.id = bootstrap_specs.job_id) +WHERE (SELECT COUNT(*) FROM bootstrap_specs WHERE jobs.id = bootstrap_specs.job_id) > 0; + +-- cleanup +-- delete old ocr2 bootstrap specs +DELETE +FROM offchainreporting2_oracle_specs +WHERE is_bootstrap_peer IS true; + +ALTER TABLE offchainreporting2_oracle_specs + DROP COLUMN is_bootstrap_peer; +ALTER TABLE bootstrap_specs + DROP COLUMN job_id; +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin +DROP TABLE bootstrap_contract_configs; + +-- create helper column +ALTER TABLE offchainreporting2_oracle_specs + ADD COLUMN is_bootstrap_peer bool not null default false, + ADD COLUMN job_id INTEGER; + +-- insert ocr2 specs +INSERT INTO offchainreporting2_oracle_specs (contract_id, is_bootstrap_peer, ocr_key_bundle_id, monitoring_endpoint, + transmitter_id, blockchain_timeout, contract_config_tracker_poll_interval, + contract_config_confirmations, juels_per_fee_coin_pipeline, created_at, + updated_at, relay, relay_config, job_id) +SELECT bootstrap_specs.contract_id, + true, + null, + bootstrap_specs.monitoring_endpoint, + '', + bootstrap_specs.blockchain_timeout, + bootstrap_specs.contract_config_tracker_poll_interval, + bootstrap_specs.contract_config_confirmations, + '', + bootstrap_specs.created_at, + bootstrap_specs.updated_at, + bootstrap_specs.relay, + bootstrap_specs.relay_config, + jobs.id +FROM jobs + INNER JOIN bootstrap_specs ON jobs.bootstrap_spec_id = bootstrap_specs.id +WHERE jobs.bootstrap_spec_id is not null; + +-- point jobs to new ocr2 specs +UPDATE jobs +SET type = 'offchainreporting2', + bootstrap_spec_id = null, + offchainreporting2_oracle_spec_id = (SELECT id + FROM offchainreporting2_oracle_specs + WHERE jobs.id = offchainreporting2_oracle_specs.job_id) +WHERE (SELECT COUNT(*) FROM offchainreporting2_oracle_specs WHERE jobs.id = offchainreporting2_oracle_specs.job_id) > 0; + +-- cleanup +DELETE +FROM bootstrap_specs; + +ALTER TABLE offchainreporting2_oracle_specs + DROP COLUMN job_id; +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0101_generic_ocr2.sql b/core/store/migrate/migrations/0101_generic_ocr2.sql new file mode 100644 index 00000000..c0d78025 --- /dev/null +++ b/core/store/migrate/migrations/0101_generic_ocr2.sql @@ -0,0 +1,145 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE offchainreporting2_oracle_specs + ADD COLUMN plugin_config JSONB NOT NULL DEFAULT '{}', + ADD COLUMN plugin_type text NOT NULL default ''; + +-- migrate existing juels_per_fee_coin_pipeline settings to json format and set plugin_type to median as the only +-- plugins that are supported before this version are median plugins. +UPDATE offchainreporting2_oracle_specs +SET plugin_type = 'median', + plugin_config = jsonb_build_object('juelsPerFeeCoinSource', juels_per_fee_coin_pipeline); + +ALTER TABLE offchainreporting2_oracle_specs + DROP COLUMN juels_per_fee_coin_pipeline; + +-- rename OCR2 tables +ALTER TABLE jobs + RENAME COLUMN offchainreporting2_oracle_spec_id TO ocr2_oracle_spec_id; +ALTER TABLE offchainreporting2_oracle_specs + RENAME TO ocr2_oracle_specs; + +ALTER TABLE offchainreporting2_contract_configs + RENAME TO ocr2_contract_configs; +ALTER TABLE ocr2_contract_configs + RENAME COLUMN offchainreporting2_oracle_spec_id TO ocr2_oracle_spec_id; + +ALTER TABLE offchainreporting2_latest_round_requested + RENAME TO ocr2_latest_round_requested; +ALTER TABLE ocr2_latest_round_requested + RENAME COLUMN offchainreporting2_oracle_spec_id TO ocr2_oracle_spec_id; + +ALTER TABLE offchainreporting2_pending_transmissions + RENAME TO ocr2_pending_transmissions; +ALTER TABLE ocr2_pending_transmissions + RENAME COLUMN offchainreporting2_oracle_spec_id TO ocr2_oracle_spec_id; + +ALTER TABLE offchainreporting2_persistent_states + RENAME TO ocr2_persistent_states; +ALTER TABLE ocr2_persistent_states + RENAME COLUMN offchainreporting2_oracle_spec_id TO ocr2_oracle_spec_id; + +-- rename OCR tables +ALTER TABLE jobs + RENAME COLUMN offchainreporting_oracle_spec_id TO ocr_oracle_spec_id; +ALTER TABLE offchainreporting_oracle_specs + RENAME TO ocr_oracle_specs; + +ALTER TABLE offchainreporting_contract_configs + RENAME TO ocr_contract_configs; +ALTER TABLE ocr_contract_configs + RENAME COLUMN offchainreporting_oracle_spec_id TO ocr_oracle_spec_id; + +-- this table does not have offchainreporting_oracle_spec_id +ALTER TABLE offchainreporting_discoverer_announcements + RENAME TO ocr_discoverer_announcements; + +ALTER TABLE offchainreporting_latest_round_requested + RENAME TO ocr_latest_round_requested; +ALTER TABLE ocr_latest_round_requested + RENAME COLUMN offchainreporting_oracle_spec_id TO ocr_oracle_spec_id; + +ALTER TABLE offchainreporting_pending_transmissions + RENAME TO ocr_pending_transmissions; +ALTER TABLE ocr_pending_transmissions + RENAME COLUMN offchainreporting_oracle_spec_id TO ocr_oracle_spec_id; + +ALTER TABLE offchainreporting_persistent_states + RENAME TO ocr_persistent_states; +ALTER TABLE ocr_persistent_states + RENAME COLUMN offchainreporting_oracle_spec_id TO ocr_oracle_spec_id; + +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE ocr2_oracle_specs + ADD COLUMN juels_per_fee_coin_pipeline text NOT NULL default ''; + +UPDATE ocr2_oracle_specs +SET juels_per_fee_coin_pipeline = plugin_config ->> 'juelsPerFeeCoinSource'; + +ALTER TABLE ocr2_oracle_specs + DROP COLUMN plugin_config, + DROP COLUMN plugin_type; + +-- rename OCR2 tables +ALTER TABLE jobs + RENAME COLUMN ocr2_oracle_spec_id TO offchainreporting2_oracle_spec_id; + +ALTER TABLE ocr2_oracle_specs + RENAME TO offchainreporting2_oracle_specs; + +ALTER TABLE ocr2_contract_configs + RENAME TO offchainreporting2_contract_configs; +ALTER TABLE offchainreporting2_contract_configs + RENAME COLUMN ocr2_oracle_spec_id TO offchainreporting2_oracle_spec_id; + +ALTER TABLE ocr2_latest_round_requested + RENAME TO offchainreporting2_latest_round_requested; +ALTER TABLE offchainreporting2_latest_round_requested + RENAME COLUMN ocr2_oracle_spec_id TO offchainreporting2_oracle_spec_id; + +ALTER TABLE ocr2_pending_transmissions + RENAME TO offchainreporting2_pending_transmissions; +ALTER TABLE offchainreporting2_pending_transmissions + RENAME COLUMN ocr2_oracle_spec_id TO offchainreporting2_oracle_spec_id; + +ALTER TABLE ocr2_persistent_states + RENAME TO offchainreporting2_persistent_states; +ALTER TABLE offchainreporting2_persistent_states + RENAME COLUMN ocr2_oracle_spec_id TO offchainreporting2_oracle_spec_id; + +-- rename OCR tables +ALTER TABLE jobs + RENAME COLUMN ocr_oracle_spec_id TO offchainreporting_oracle_spec_id; +ALTER TABLE ocr_oracle_specs + RENAME TO offchainreporting_oracle_specs; + +ALTER TABLE ocr_contract_configs + RENAME TO offchainreporting_contract_configs; +ALTER TABLE offchainreporting_contract_configs + RENAME COLUMN ocr_oracle_spec_id TO offchainreporting_oracle_spec_id; + +ALTER TABLE ocr_discoverer_announcements + RENAME TO offchainreporting_discoverer_announcements; + +ALTER TABLE ocr_latest_round_requested + RENAME TO offchainreporting_latest_round_requested; +ALTER TABLE offchainreporting_latest_round_requested + RENAME COLUMN ocr_oracle_spec_id TO offchainreporting_oracle_spec_id; + +ALTER TABLE ocr_pending_transmissions + RENAME TO offchainreporting_pending_transmissions; +ALTER TABLE offchainreporting_pending_transmissions + RENAME COLUMN ocr_oracle_spec_id TO offchainreporting_oracle_spec_id; + +ALTER TABLE ocr_persistent_states + RENAME TO offchainreporting_persistent_states; +ALTER TABLE offchainreporting_persistent_states + RENAME COLUMN ocr_oracle_spec_id TO offchainreporting_oracle_spec_id; + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0102_add_log_broadcasts_block_num_chain_idx.sql b/core/store/migrate/migrations/0102_add_log_broadcasts_block_num_chain_idx.sql new file mode 100644 index 00000000..e46b846d --- /dev/null +++ b/core/store/migrate/migrations/0102_add_log_broadcasts_block_num_chain_idx.sql @@ -0,0 +1,5 @@ +-- +goose Up +CREATE INDEX idx_log_broadcasts_block_number_evm_chain_id ON log_broadcasts (evm_chain_id, block_number); + +-- +goose Down +DROP INDEX IF EXISTS idx_log_broadcasts_block_number_evm_chain_id; diff --git a/core/store/migrate/migrations/0103_terra_msgs_type_url.sql b/core/store/migrate/migrations/0103_terra_msgs_type_url.sql new file mode 100644 index 00000000..ed4e8ba8 --- /dev/null +++ b/core/store/migrate/migrations/0103_terra_msgs_type_url.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE terra_msgs ADD COLUMN type text NOT NULL DEFAULT '/terra.wasm.v1beta1.MsgExecuteContract'; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE terra_msgs DROP COLUMN type; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0104_terra_cascade_delete.sql b/core/store/migrate/migrations/0104_terra_cascade_delete.sql new file mode 100644 index 00000000..7d3be9b1 --- /dev/null +++ b/core/store/migrate/migrations/0104_terra_cascade_delete.sql @@ -0,0 +1,25 @@ +-- +goose Up +ALTER TABLE terra_nodes +DROP CONSTRAINT terra_nodes_terra_chain_id_fkey; + +ALTER TABLE terra_nodes + ADD FOREIGN KEY (terra_chain_id) REFERENCES terra_chains(id) ON DELETE CASCADE; + +ALTER TABLE terra_msgs +DROP CONSTRAINT terra_msgs_terra_chain_id_fkey; + +ALTER TABLE terra_msgs + ADD FOREIGN KEY (terra_chain_id) REFERENCES terra_chains(id) ON DELETE CASCADE; + +--+goose Down +ALTER TABLE terra_nodes +DROP CONSTRAINT terra_nodes_terra_chain_id_fkey; + +ALTER TABLE terra_nodes + ADD FOREIGN KEY (terra_chain_id) REFERENCES terra_chains(id); + +ALTER TABLE terra_msgs +DROP CONSTRAINT terra_msgs_terra_chain_id_fkey; + +ALTER TABLE terra_msgs + ADD FOREIGN KEY (terra_chain_id) REFERENCES terra_chains(id); diff --git a/core/store/migrate/migrations/0105_create_forwarder_addresses.sql b/core/store/migrate/migrations/0105_create_forwarder_addresses.sql new file mode 100644 index 00000000..1788c685 --- /dev/null +++ b/core/store/migrate/migrations/0105_create_forwarder_addresses.sql @@ -0,0 +1,17 @@ +-- +goose Up +CREATE TABLE evm_forwarders ( + id BIGSERIAL PRIMARY KEY, + address bytea NOT NULL UNIQUE, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains(id) ON DELETE CASCADE, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)) +); + +CREATE INDEX idx_forwarders_evm_chain_id ON evm_forwarders(evm_chain_id); +CREATE INDEX idx_forwarders_evm_address ON evm_forwarders(address); +CREATE INDEX idx_forwarders_created_at ON evm_forwarders USING brin (created_at); +CREATE INDEX idx_forwarders_updated_at ON evm_forwarders USING brin (updated_at); + +-- +goose Down +DROP TABLE evm_forwarders; diff --git a/core/store/migrate/migrations/0106_evm_node_uniqueness.sql b/core/store/migrate/migrations/0106_evm_node_uniqueness.sql new file mode 100644 index 00000000..61f23c81 --- /dev/null +++ b/core/store/migrate/migrations/0106_evm_node_uniqueness.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- Delete sendonlys if they redundantly duplicate a primary +DELETE FROM + evm_nodes a + USING evm_nodes b +WHERE + a.http_url = b.http_url + AND a.id != b.id + AND a.send_only; + +CREATE UNIQUE INDEX idx_unique_ws_url ON evm_nodes (ws_url); +CREATE UNIQUE INDEX idx_unique_http_url ON evm_nodes (http_url); + +-- +goose Down +DROP INDEX idx_unique_ws_url; +DROP INDEX idx_unique_http_url; diff --git a/core/store/migrate/migrations/0107_vrf_multiple_from_addresses.sql b/core/store/migrate/migrations/0107_vrf_multiple_from_addresses.sql new file mode 100644 index 00000000..56adae87 --- /dev/null +++ b/core/store/migrate/migrations/0107_vrf_multiple_from_addresses.sql @@ -0,0 +1,15 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN from_addresses bytea[] DEFAULT '{}' NOT NULL ; + +UPDATE vrf_specs SET from_addresses = from_addresses || from_address +WHERE from_address IS NOT NULL; + +ALTER TABLE vrf_specs DROP COLUMN from_address; + +-- +goose Down +ALTER TABLE vrf_specs ADD COLUMN from_address bytea; + +UPDATE vrf_specs SET from_address = from_addresses[1] +WHERE array_length(from_addresses, 1) > 0; + +ALTER TABLE vrf_specs DROP COLUMN from_addresses; diff --git a/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql b/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql new file mode 100644 index 00000000..bfed0cfd --- /dev/null +++ b/core/store/migrate/migrations/0108_upgrade_keepers_tx_meta.sql @@ -0,0 +1,63 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.upkeepID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); + +-- +goose Down +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); diff --git a/core/store/migrate/migrations/0109_solana_chains_nodes.sql b/core/store/migrate/migrations/0109_solana_chains_nodes.sql new file mode 100644 index 00000000..6339f4e5 --- /dev/null +++ b/core/store/migrate/migrations/0109_solana_chains_nodes.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE solana_chains ( + id text PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}', + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled BOOL DEFAULT TRUE NOT NULL +); +CREATE TABLE solana_nodes ( + id serial PRIMARY KEY, + name varchar(255) NOT NULL CHECK (name != ''), + solana_chain_id text NOT NULL REFERENCES solana_chains (id) ON DELETE CASCADE, + solana_url text CHECK (solana_url != ''), + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); +CREATE INDEX idx_nodes_solana_chain_id ON solana_nodes (solana_chain_id); +CREATE UNIQUE INDEX idx_solana_nodes_unique_name ON solana_nodes (lower(name)); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE solana_nodes; +DROP TABLE solana_chains; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql b/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql new file mode 100644 index 00000000..4458f51a --- /dev/null +++ b/core/store/migrate/migrations/0110_add_vrf_chunk_size.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN chunk_size bigint NOT NULL DEFAULT 20; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN chunk_size; diff --git a/core/store/migrate/migrations/0111_terra_msgs_state_started.sql b/core/store/migrate/migrations/0111_terra_msgs_state_started.sql new file mode 100644 index 00000000..4b92ccb4 --- /dev/null +++ b/core/store/migrate/migrations/0111_terra_msgs_state_started.sql @@ -0,0 +1,46 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE INDEX idx_terra_msgs_terra_chain_id_contract_id_state ON terra_msgs (terra_chain_id, contract_id, state); + +CREATE OR REPLACE FUNCTION check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE +state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'started', true), + 'started', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; +END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; +END IF; +RETURN NEW; +END +$$ LANGUAGE plpgsql; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +CREATE OR REPLACE FUNCTION check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE +state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; +END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; +END IF; +RETURN NEW; +END +$$ LANGUAGE plpgsql; + +DROP INDEX idx_terra_msgs_terra_chain_id_contract_id_state; + +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql b/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql new file mode 100644 index 00000000..c2c3f94e --- /dev/null +++ b/core/store/migrate/migrations/0112_vrf_batch_coordinator_address.sql @@ -0,0 +1,9 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN batch_coordinator_address bytea, + ADD COLUMN batch_fulfillment_enabled bool NOT NULL DEFAULT false, + ADD COLUMN batch_fulfillment_gas_multiplier double precision NOT NULL DEFAULT 1.15; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN batch_coordinator_address, + DROP COLUMN batch_fulfillment_enabled, + DROP COLUMN batch_fulfillment_gas_multiplier; diff --git a/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql b/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql new file mode 100644 index 00000000..5bc0813b --- /dev/null +++ b/core/store/migrate/migrations/0113_vrf_v2_backoff_columns.sql @@ -0,0 +1,17 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "backoff_initial_delay" BIGINT + CHECK (backoff_initial_delay >= 0) + DEFAULT 0 + NOT NULL; + +ALTER TABLE vrf_specs + ADD COLUMN "backoff_max_delay" BIGINT + CHECK (backoff_max_delay >= 0) + DEFAULT 0 + NOT NULL; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN "backoff_initial_delay"; + +ALTER TABLE vrf_specs DROP COLUMN "backoff_max_delay"; diff --git a/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql b/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql new file mode 100644 index 00000000..3f13ce98 --- /dev/null +++ b/core/store/migrate/migrations/0114_add_last_keeper_id_to_upkeep_table.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE upkeep_registrations + ADD COLUMN IF NOT EXISTS last_keeper_index integer DEFAULT NULL; +ALTER TABLE keeper_registries + ADD COLUMN IF NOT EXISTS keeper_index_map jsonb DEFAULT NULL; + +-- +goose Down +ALTER TABLE upkeep_registrations + DROP COLUMN IF EXISTS last_keeper_index; +ALTER TABLE keeper_registries + DROP COLUMN IF EXISTS keeper_index_map; diff --git a/core/store/migrate/migrations/0115_log_poller.sql b/core/store/migrate/migrations/0115_log_poller.sql new file mode 100644 index 00000000..ff29310d --- /dev/null +++ b/core/store/migrate/migrations/0115_log_poller.sql @@ -0,0 +1,32 @@ +-- +goose Up +CREATE TABLE logs ( + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id) DEFERRABLE, + log_index bigint NOT NULL, + block_hash bytea NOT NULL, + block_number bigint NOT NULL CHECK (block_number > 0), + address bytea NOT NULL, + event_sig bytea NOT NULL, + topics bytea[] NOT NULL, + tx_hash bytea NOT NULL, + data bytea NOT NULL, + created_at timestamptz NOT NULL, + PRIMARY KEY (block_hash, log_index, evm_chain_id) +); + +-- Hot path query - clients searching for their logs. +CREATE INDEX logs_idx ON logs(evm_chain_id, block_number, address, event_sig); + +CREATE TABLE log_poller_blocks ( + evm_chain_id numeric(78,0) NOT NULL REFERENCES evm_chains (id) DEFERRABLE, + block_hash bytea NOT NULL, + block_number bigint NOT NULL CHECK (block_number > 0), + created_at timestamptz NOT NULL, + -- Only permit one block_number at a time + -- i.e. the poller is only ever aware of the canonical branch + PRIMARY KEY (block_number, evm_chain_id) +); + +-- +goose Down +DROP INDEX logs_idx; +DROP TABLE logs; +DROP TABLE log_poller_blocks; diff --git a/core/store/migrate/migrations/0116_migrate_upkeep_id_to_big_int.sql b/core/store/migrate/migrations/0116_migrate_upkeep_id_to_big_int.sql new file mode 100644 index 00000000..dbd97545 --- /dev/null +++ b/core/store/migrate/migrations/0116_migrate_upkeep_id_to_big_int.sql @@ -0,0 +1,121 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE upkeep_registrations ALTER COLUMN upkeep_id TYPE numeric(78,0); + +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); + +UPDATE jobs +SET schema_version = 4 +WHERE type = 'keeper' AND schema_version = 3; + +-- uint256_to_bit converts a uint256 to a bit string +CREATE OR REPLACE FUNCTION uint256_to_bit(num NUMERIC) + RETURNS BIT VARYING AS $$ +DECLARE + -- 1 + largest positive INT -- + max_int32 NUMERIC := '4294967296' :: NUMERIC(10); + result BIT VARYING; +BEGIN + ASSERT num <= 115792089237316195423570985008687907853269984665640564039457584007913129639935 AND num >= 0, "num outside uint256 range"; + -- break num into 32 bit chunks + WITH chunks (exponent, chunk) AS ( + SELECT + exponent, + floor(num::NUMERIC(178,100) / (max_int32 ^ exponent) % max_int32)::BIGINT from generate_series(0,7) exponent + ) + -- concat 32 bit chunks together + SELECT bit_or(chunk::bit(256) << (32*(exponent))) + FROM chunks INTO result; + RETURN result; +END; +$$ LANGUAGE plpgsql; + +-- least_significant selects the least significant n bits of a bit string +CREATE OR REPLACE FUNCTION least_significant(bits BIT VARYING, n integer) + RETURNS BIT VARYING AS $$ +BEGIN + ASSERT length(bits) >= n, "slice is larger than input"; + RETURN substring(bits from length(bits) - n + 1 for n); +END; +$$ LANGUAGE plpgsql; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE upkeep_registrations ALTER COLUMN upkeep_id TYPE bigint; + +UPDATE jobs +SET schema_version = 3 +WHERE type = 'keeper' AND schema_version = 4; + +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.upkeepID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); + + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0117_add_log_poller_idx.sql b/core/store/migrate/migrations/0117_add_log_poller_idx.sql new file mode 100644 index 00000000..0fe84e41 --- /dev/null +++ b/core/store/migrate/migrations/0117_add_log_poller_idx.sql @@ -0,0 +1,7 @@ +-- +goose Up +CREATE INDEX logs_idx_block_number ON logs using brin(block_number); +CREATE INDEX logs_idx_evm_id_event_address_block ON logs using btree (evm_chain_id,event_sig,address,block_number); + +-- +goose Down +DROP INDEX IF EXISTS logs_idx_block_number; +DROP INDEX IF EXISTS logs_idx_evm_id_event_address_block; diff --git a/core/store/migrate/migrations/0118_create_feeds_manager_chain_config.sql b/core/store/migrate/migrations/0118_create_feeds_manager_chain_config.sql new file mode 100644 index 00000000..d22dd135 --- /dev/null +++ b/core/store/migrate/migrations/0118_create_feeds_manager_chain_config.sql @@ -0,0 +1,44 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE feeds_manager_chain_configs ( + id SERIAL PRIMARY KEY, + chain_id VARCHAR NOT NULL, + chain_type VARCHAR NOT NULL, + account_address VARCHAR NOT NULL, + admin_address VARCHAR NOT NULL, + feeds_manager_id INTEGER REFERENCES feeds_managers ON DELETE CASCADE, + flux_monitor_config JSONB, + ocr1_config JSONB, + ocr2_config JSONB, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); + +CREATE INDEX idx_feeds_manager_chain_configs_chain_id_chain_type ON feeds_manager_chain_configs(chain_id, chain_type); +CREATE UNIQUE INDEX idx_feeds_manager_chain_configs_chain_id_chain_type_feeds_manager_id ON feeds_manager_chain_configs(chain_id, chain_type, feeds_manager_id); + +-- Remove the old configuration columns +ALTER TABLE feeds_managers +DROP CONSTRAINT chk_ocr_bootstrap_peer_multiaddr, +DROP COLUMN job_types, +DROP COLUMN is_ocr_bootstrap_peer, +DROP COLUMN ocr_bootstrap_peer_multiaddr; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE feeds_managers +ADD COLUMN job_types TEXT[], +ADD COLUMN is_ocr_bootstrap_peer boolean NOT NULL DEFAULT false, +ADD COLUMN ocr_bootstrap_peer_multiaddr VARCHAR, +ADD CONSTRAINT chk_ocr_bootstrap_peer_multiaddr CHECK ( NOT ( + is_ocr_bootstrap_peer AND + ( + ocr_bootstrap_peer_multiaddr IS NULL OR + ocr_bootstrap_peer_multiaddr = '' + ) +)); + +DROP TABLE feeds_manager_chain_configs; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0119_initial_rebroadcast_columns.sql b/core/store/migrate/migrations/0119_initial_rebroadcast_columns.sql new file mode 100644 index 00000000..a4e94fbb --- /dev/null +++ b/core/store/migrate/migrations/0119_initial_rebroadcast_columns.sql @@ -0,0 +1,23 @@ +-- +goose Up +ALTER TABLE eth_txes ADD COLUMN initial_broadcast_at timestamptz; +UPDATE eth_txes SET initial_broadcast_at = broadcast_at; -- Not perfect but this mirrors the old behaviour and will sort itself out in time when the old eth_txes are reaped +ALTER TABLE eth_txes DROP CONSTRAINT chk_eth_txes_fsm; +ALTER TABLE eth_txes ADD CONSTRAINT chk_eth_txes_fsm CHECK ( + state = 'unstarted'::eth_txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'in_progress'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'fatal_error'::eth_txes_state AND nonce IS NULL AND error IS NOT NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'unconfirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed_missing_receipt'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL +); +CREATE INDEX idx_eth_txes_initial_broadcast_at ON eth_txes USING BRIN (initial_broadcast_at timestamptz_minmax_ops); + + +-- +goose Down +ALTER TABLE eth_txes DROP COLUMN initial_broadcast_at; +ALTER TABLE eth_txes ADD CONSTRAINT chk_eth_txes_fsm CHECK (state = 'unstarted'::eth_txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL OR state = 'in_progress'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL OR state = 'fatal_error'::eth_txes_state AND nonce IS NULL AND error IS NOT NULL AND broadcast_at IS NULL OR state = 'unconfirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL OR state = 'confirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL OR state = 'confirmed_missing_receipt'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL); diff --git a/core/store/migrate/migrations/0120_log_poller_data_idx.sql b/core/store/migrate/migrations/0120_log_poller_data_idx.sql new file mode 100644 index 00000000..7289e902 --- /dev/null +++ b/core/store/migrate/migrations/0120_log_poller_data_idx.sql @@ -0,0 +1,14 @@ +-- +goose Up +-- We only index the first 3 words for the log. Revisit should we actually have products that need more. +-- The word value range is only helpful for integer based event arguments. +CREATE INDEX logs_idx_data_word_one ON logs (encode(substring(data from 1 for 32), 'hex')); +CREATE INDEX logs_idx_data_word_two ON logs (encode(substring(data from 33 for 32), 'hex')); +CREATE INDEX logs_idx_data_word_three ON logs (encode(substring(data from 65 for 32), 'hex')); +-- You can only index 3 event arguments. First topic is the event sig which we already have indexed separately. +CREATE INDEX logs_idx_topic_two ON logs (encode(topics[2], 'hex')); +CREATE INDEX logs_idx_topic_three ON logs (encode(topics[3], 'hex')); +CREATE INDEX logs_idx_topic_four ON logs (encode(topics[4], 'hex')); + +-- +goose Down +DROP INDEX IF EXISTS logs_idx_data_word_one, logs_idx_data_word_two, logs_idx_data_word_three; +DROP INDEX IF EXISTS logs_idx_topic_two, logs_idx_topic_three, logs_idx_topic_four; diff --git a/core/store/migrate/migrations/0121_remove_log_configs.sql b/core/store/migrate/migrations/0121_remove_log_configs.sql new file mode 100644 index 00000000..ad79b5d7 --- /dev/null +++ b/core/store/migrate/migrations/0121_remove_log_configs.sql @@ -0,0 +1,20 @@ +-- +goose Up +DROP TABLE IF EXISTS log_configs; +DROP TYPE IF EXISTS log_level; + +-- +goose Down +CREATE TYPE log_level AS ENUM ( + 'debug', + 'info', + 'warn', + 'error', + 'panic' +); + +CREATE TABLE log_configs ( + "id" BIGSERIAL PRIMARY KEY, + "service_name" text NOT NULL UNIQUE, + "log_level" log_level NOT NULL, + "created_at" timestamp with time zone, + "updated_at" timestamp with time zone +); diff --git a/core/store/migrate/migrations/0122_upgrade_keepers_observation_source.sql b/core/store/migrate/migrations/0122_upgrade_keepers_observation_source.sql new file mode 100644 index 00000000..7a95018b --- /dev/null +++ b/core/store/migrate/migrations/0122_upgrade_keepers_observation_source.sql @@ -0,0 +1,89 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +simulate_perform_upkeep_tx [type=ethcall + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + from="$(jobSpec.fromAddress)" + gas="$(jobSpec.performUpkeepGasLimit)" + data="$(encode_perform_upkeep_tx)"] +decode_check_perform_tx [type=ethabidecode + abi="bool success"] +check_success [type=conditional + failEarly=true + data="$(decode_check_perform_tx.success)"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> simulate_perform_upkeep_tx -> decode_check_perform_tx -> check_success -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 3 +); + +UPDATE jobs +SET schema_version = 4 +WHERE type = 'keeper' AND schema_version = 3; + +-- +goose Down +UPDATE jobs +SET schema_version = 3 +WHERE type = 'keeper' AND schema_version = 4; + +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' AND schema_version = 4 +); diff --git a/core/store/migrate/migrations/0123_terra_idx_simplify.sql b/core/store/migrate/migrations/0123_terra_idx_simplify.sql new file mode 100644 index 00000000..05fa3580 --- /dev/null +++ b/core/store/migrate/migrations/0123_terra_idx_simplify.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin +-- Simplify into one index to improve write performance. +DROP INDEX IF EXISTS idx_terra_msgs_terra_chain_id_state; +DROP INDEX IF EXISTS idx_terra_msgs_terra_chain_id_contract_id_state; +-- We order by state first, then contract_id, to permit efficient queries when grouping unstarted txes +-- across contracts. +CREATE INDEX idx_terra_msgs_terra_chain_id_state_contract_id ON terra_msgs (terra_chain_id, state, contract_id); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_terra_msgs_terra_chain_id_state_contract_id; +CREATE INDEX idx_terra_msgs_terra_chain_id_state ON terra_msgs (terra_chain_id, state, created_at); +CREATE INDEX idx_terra_msgs_terra_chain_id_contract_id_state ON terra_msgs(terra_chain_id, contract_id, state); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0124_rename_p2pBootstrapPeers_to_p2pv2Bootstrappers.sql b/core/store/migrate/migrations/0124_rename_p2pBootstrapPeers_to_p2pv2Bootstrappers.sql new file mode 100644 index 00000000..cb7832c7 --- /dev/null +++ b/core/store/migrate/migrations/0124_rename_p2pBootstrapPeers_to_p2pv2Bootstrappers.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE ocr2_oracle_specs + RENAME COLUMN p2p_bootstrap_peers to p2pv2_bootstrappers; + +-- +goose Down +ALTER TABLE ocr2_oracle_specs + RENAME COLUMN p2pv2_bootstrappers to p2p_bootstrap_peers; diff --git a/core/store/migrate/migrations/0125_keeper_flatten_schema_version.sql b/core/store/migrate/migrations/0125_keeper_flatten_schema_version.sql new file mode 100644 index 00000000..1fceb49f --- /dev/null +++ b/core/store/migrate/migrations/0125_keeper_flatten_schema_version.sql @@ -0,0 +1,55 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +simulate_perform_upkeep_tx [type=ethcall + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + from="$(jobSpec.fromAddress)" + gas="$(jobSpec.performUpkeepGasLimit)" + data="$(encode_perform_upkeep_tx)"] +decode_check_perform_tx [type=ethabidecode + abi="bool success"] +check_success [type=conditional + failEarly=true + data="$(decode_check_perform_tx.success)"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> simulate_perform_upkeep_tx -> decode_check_perform_tx -> check_success -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' +); + +UPDATE jobs +SET schema_version = 1 +WHERE type = 'keeper'; + +-- +goose Down +UPDATE jobs +SET schema_version = 4 +WHERE type = 'keeper'; diff --git a/core/store/migrate/migrations/0126_remove_observation_source.sql b/core/store/migrate/migrations/0126_remove_observation_source.sql new file mode 100644 index 00000000..ab71ec15 --- /dev/null +++ b/core/store/migrate/migrations/0126_remove_observation_source.sql @@ -0,0 +1,55 @@ +-- +goose Up +UPDATE pipeline_specs +SET dot_dag_source = '' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' +); + +-- +goose Down +UPDATE pipeline_specs +SET dot_dag_source = 'encode_check_upkeep_tx [type=ethabiencode + abi="checkUpkeep(uint256 id, address from)" + data="{\"id\":$(jobSpec.upkeepID),\"from\":$(jobSpec.fromAddress)}"] +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +decode_check_upkeep_tx [type=ethabidecode + abi="bytes memory performData, uint256 maxLinkPayment, uint256 gasLimit, uint256 adjustedGasWei, uint256 linkEth"] +encode_perform_upkeep_tx [type=ethabiencode + abi="performUpkeep(uint256 id, bytes calldata performData)" + data="{\"id\": $(jobSpec.upkeepID),\"performData\":$(decode_check_upkeep_tx.performData)}"] +simulate_perform_upkeep_tx [type=ethcall + extractRevertReason=true + evmChainID="$(jobSpec.evmChainID)" + contract="$(jobSpec.contractAddress)" + from="$(jobSpec.fromAddress)" + gas="$(jobSpec.performUpkeepGasLimit)" + data="$(encode_perform_upkeep_tx)"] +decode_check_perform_tx [type=ethabidecode + abi="bool success"] +check_success [type=conditional + failEarly=true + data="$(decode_check_perform_tx.success)"] +perform_upkeep_tx [type=ethtx + minConfirmations=0 + to="$(jobSpec.contractAddress)" + from="[$(jobSpec.fromAddress)]" + evmChainID="$(jobSpec.evmChainID)" + data="$(encode_perform_upkeep_tx)" + gasLimit="$(jobSpec.performUpkeepGasLimit)" + txMeta="{\"jobID\":$(jobSpec.jobID),\"upkeepID\":$(jobSpec.prettyID)}"] +encode_check_upkeep_tx -> check_upkeep_tx -> decode_check_upkeep_tx -> encode_perform_upkeep_tx -> simulate_perform_upkeep_tx -> decode_check_perform_tx -> check_success -> perform_upkeep_tx' +WHERE id IN ( + SELECT pipeline_spec_id + FROM jobs + WHERE type = 'keeper' +); diff --git a/core/store/migrate/migrations/0127_add_ocr1_p2pv2Bootstrappers.sql b/core/store/migrate/migrations/0127_add_ocr1_p2pv2Bootstrappers.sql new file mode 100644 index 00000000..d186da83 --- /dev/null +++ b/core/store/migrate/migrations/0127_add_ocr1_p2pv2Bootstrappers.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE ocr_oracle_specs + ADD COLUMN p2pv2_bootstrappers text[] NOT NULL DEFAULT '{}'; + +-- +goose Down +ALTER TABLE ocr_oracle_specs + DROP COLUMN p2pv2_bootstrappers; diff --git a/core/store/migrate/migrations/0128_maxgaspricegwei_vrf.sql b/core/store/migrate/migrations/0128_maxgaspricegwei_vrf.sql new file mode 100644 index 00000000..59d55590 --- /dev/null +++ b/core/store/migrate/migrations/0128_maxgaspricegwei_vrf.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "max_gas_price_gwei" BIGINT + CHECK (max_gas_price_gwei >= 0); + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN "max_gas_price_gwei"; diff --git a/core/store/migrate/migrations/0129_add_job_gas_limit.sql b/core/store/migrate/migrations/0129_add_job_gas_limit.sql new file mode 100644 index 00000000..868dc6ea --- /dev/null +++ b/core/store/migrate/migrations/0129_add_job_gas_limit.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE jobs ADD COLUMN gas_limit BIGINT DEFAULT NULL; +-- +goose Down +ALTER TABLE jobs DROP COLUMN gas_limit; diff --git a/core/store/migrate/migrations/0130_starknet_chains_nodes.sql b/core/store/migrate/migrations/0130_starknet_chains_nodes.sql new file mode 100644 index 00000000..3b808da6 --- /dev/null +++ b/core/store/migrate/migrations/0130_starknet_chains_nodes.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE starknet_chains ( + id text PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}', + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled BOOL DEFAULT TRUE NOT NULL +); +CREATE TABLE starknet_nodes ( + id serial PRIMARY KEY, + name varchar(255) NOT NULL CHECK (name != ''), + chain_id text NOT NULL REFERENCES starknet_chains (id) ON DELETE CASCADE, + url text CHECK (url != ''), + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL +); +CREATE INDEX idx_starknet_nodes_chain_id ON starknet_nodes (chain_id); +CREATE UNIQUE INDEX idx_starknet_nodes_unique_name ON starknet_nodes (lower(name)); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE starknet_nodes; +DROP TABLE starknet_chains; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0131_add_multi_user.sql b/core/store/migrate/migrations/0131_add_multi_user.sql new file mode 100644 index 00000000..e178bfe5 --- /dev/null +++ b/core/store/migrate/migrations/0131_add_multi_user.sql @@ -0,0 +1,28 @@ +-- +goose Up + +-- Create new user roles enum for users table +CREATE TYPE user_roles AS ENUM ('admin', 'edit', 'run', 'view'); + +-- Add new role column to users table, type enum +ALTER TABLE users ADD role user_roles NOT NULL DEFAULT 'view'; + +-- We are migrating up from a single user full access user - this should be reflected as the admin +UPDATE users SET role = 'admin'; + +CREATE UNIQUE INDEX unique_users_lowercase_email ON users (lower(email)); + +-- Update sessions table include email column to key on user tied to session +DELETE FROM sessions; +ALTER TABLE sessions ADD email text NOT NULL; + +ALTER TABLE sessions ADD CONSTRAINT sessions_fk_email FOREIGN KEY(email) REFERENCES users(email) ON DELETE cascade; + +-- +goose Down + +ALTER TABLE users DROP COLUMN role; +DROP TYPE user_roles; + +ALTER TABLE sessions DROP CONSTRAINT sessions_fk_email; +ALTER TABLE sessions DROP COLUMN email; + +DROP INDEX unique_users_lowercase_email; diff --git a/core/store/migrate/migrations/0132_log_index_uniqueness.sql b/core/store/migrate/migrations/0132_log_index_uniqueness.sql new file mode 100644 index 00000000..908cc0ad --- /dev/null +++ b/core/store/migrate/migrations/0132_log_index_uniqueness.sql @@ -0,0 +1,13 @@ +-- +goose Up + +-- Add tx_index column to log_broadcasts +ALTER TABLE log_broadcasts ADD COLUMN tx_index BIGINT; + +DROP INDEX IF EXISTS log_broadcasts_unique_idx; +CREATE UNIQUE INDEX log_broadcasts_unique_idx ON log_broadcasts USING BTREE (job_id, block_hash, tx_index, log_index, evm_chain_id); + +-- +goose Down + +DROP INDEX IF EXISTS log_broadcasts_unique_idx; +ALTER TABLE log_broadcasts DROP COLUMN tx_index; +CREATE UNIQUE INDEX log_broadcasts_unique_idx ON log_broadcasts USING BTREE (job_id, block_hash, log_index, evm_chain_id); diff --git a/core/store/migrate/migrations/0133_fix_pipeline_runs_constraint.sql b/core/store/migrate/migrations/0133_fix_pipeline_runs_constraint.sql new file mode 100644 index 00000000..9d31d194 --- /dev/null +++ b/core/store/migrate/migrations/0133_fix_pipeline_runs_constraint.sql @@ -0,0 +1,21 @@ +-- +goose Up +-- errors column was renamed to fatal_errors, see 0076_add_non_fatal_errors_to_runs.sql +-- but the constraint pipeline_runs_check was not updated + ALTER TABLE pipeline_runs DROP CONSTRAINT pipeline_runs_check; + ALTER TABLE pipeline_runs ADD CONSTRAINT pipeline_runs_check CHECK ( + ((state IN ('completed')) AND (finished_at IS NOT NULL) AND (num_nulls(outputs) = 0)) + OR + ((state IN ('errored')) AND (finished_at IS NOT NULL) AND (num_nulls(fatal_errors, all_errors) = 0)) + OR + ((state IN ('running', 'suspended')) AND num_nulls(finished_at, outputs, fatal_errors) = 3) + ); + +-- +goose Down +-- we cannot make a precise rollback, due to a wrong column name (errors => fatal_errors) +-- therefore the rollback flow will fix it for pre-0132 state as well... + ALTER TABLE pipeline_runs DROP CONSTRAINT pipeline_runs_check; + ALTER TABLE pipeline_runs ADD CONSTRAINT pipeline_runs_check CHECK ( + ((state IN ('completed', 'errored')) AND (finished_at IS NOT NULL) AND (num_nulls(outputs, fatal_errors) = 0)) + OR + ((state IN ('running', 'suspended')) AND num_nulls(finished_at, outputs, fatal_errors) = 3) + ); diff --git a/core/store/migrate/migrations/0134_starknet_nodes_chain_id.sql b/core/store/migrate/migrations/0134_starknet_nodes_chain_id.sql new file mode 100644 index 00000000..9b8afffe --- /dev/null +++ b/core/store/migrate/migrations/0134_starknet_nodes_chain_id.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE starknet_nodes RENAME COLUMN chain_id TO starknet_chain_id; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE starknet_nodes RENAME COLUMN starknet_chain_id TO chain_id; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0135_tx_index_not_null.sql b/core/store/migrate/migrations/0135_tx_index_not_null.sql new file mode 100644 index 00000000..d579e1da --- /dev/null +++ b/core/store/migrate/migrations/0135_tx_index_not_null.sql @@ -0,0 +1,10 @@ +-- +goose Up +-- +goose StatementBegin +UPDATE log_broadcasts SET tx_index=-1 WHERE tx_index IS NULL; +ALTER TABLE log_broadcasts ALTER COLUMN tx_index SET NOT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE log_broadcasts ALTER COLUMN tx_index DROP NOT NULL; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0136_add_job_allow_forwarding.sql b/core/store/migrate/migrations/0136_add_job_allow_forwarding.sql new file mode 100644 index 00000000..b4eff21f --- /dev/null +++ b/core/store/migrate/migrations/0136_add_job_allow_forwarding.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE jobs ADD COLUMN allow_forwarding boolean DEFAULT FALSE; +-- +goose Down +ALTER TABLE jobs DROP COLUMN allow_forwarding; diff --git a/core/store/migrate/migrations/0137_remove_tx_index.sql b/core/store/migrate/migrations/0137_remove_tx_index.sql new file mode 100644 index 00000000..48f75a3a --- /dev/null +++ b/core/store/migrate/migrations/0137_remove_tx_index.sql @@ -0,0 +1,13 @@ +-- +goose Up +-- +goose StatementBegin +DROP INDEX IF EXISTS log_broadcasts_unique_idx; +ALTER TABLE log_broadcasts DROP COLUMN tx_index; +CREATE UNIQUE INDEX log_broadcasts_unique_idx ON log_broadcasts USING BTREE (job_id, block_hash, log_index, evm_chain_id); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX log_broadcasts_unique_idx; +ALTER TABLE log_broadcasts ADD COLUMN tx_index BIGINT NOT NULL DEFAULT -1; +CREATE UNIQUE INDEX log_broadcasts_unique_idx ON log_broadcasts USING BTREE (job_id, block_hash, tx_index, log_index, evm_chain_id); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0138_rename_allowforwarding_field.sql b/core/store/migrate/migrations/0138_rename_allowforwarding_field.sql new file mode 100644 index 00000000..45df7528 --- /dev/null +++ b/core/store/migrate/migrations/0138_rename_allowforwarding_field.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE jobs RENAME COLUMN allow_forwarding TO forwarding_allowed; +-- +goose Down +ALTER TABLE jobs RENAME COLUMN forwarding_allowed TO allow_forwarding; \ No newline at end of file diff --git a/core/store/migrate/migrations/0139_multi_chain_keys.sql b/core/store/migrate/migrations/0139_multi_chain_keys.sql new file mode 100644 index 00000000..4d6b273b --- /dev/null +++ b/core/store/migrate/migrations/0139_multi_chain_keys.sql @@ -0,0 +1,18 @@ +-- +goose Up +ALTER TABLE eth_key_states RENAME TO evm_key_states; -- Might as well rename it while we are here +CREATE UNIQUE INDEX idx_evm_key_states_evm_chain_id_address ON evm_key_states (evm_chain_id, address); -- it is now only unique per-chain +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_from_address_fkey; -- foreign key is now composite of chain id/address +ALTER TABLE evm_key_states DROP CONSTRAINT eth_key_states_address_key; +ALTER TABLE evm_key_states RENAME is_funding TO disabled; -- little hack here, we are removing is funding, to avoid accidentally sending from the wrong keys we disable the funding key +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_evm_chain_id_from_address_fkey FOREIGN KEY (evm_chain_id, from_address) REFERENCES evm_key_states (evm_chain_id, address) NOT VALID; -- not valid skips the check, this speeds things up and we know it's safe +CREATE INDEX idx_evm_key_states_address ON evm_key_states (address); + +-- +goose Down +DROP INDEX idx_evm_key_states_address; +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_evm_chain_id_from_address_fkey; +ALTER TABLE evm_key_states RENAME disabled TO is_funding; +ALTER TABLE evm_key_states ADD CONSTRAINT eth_key_states_address_key UNIQUE (address); +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_from_address_fkey FOREIGN KEY (from_address) REFERENCES evm_key_states (address); +DROP INDEX idx_evm_key_states_evm_chain_id_address; +ALTER TABLE evm_key_states RENAME TO eth_key_states; + diff --git a/core/store/migrate/migrations/0140_pipeline_runs_brin_to_btree.sql b/core/store/migrate/migrations/0140_pipeline_runs_brin_to_btree.sql new file mode 100644 index 00000000..9cfb9320 --- /dev/null +++ b/core/store/migrate/migrations/0140_pipeline_runs_brin_to_btree.sql @@ -0,0 +1,22 @@ +-- +goose Up + -- Migrate idx_pipeline_runs_created_at to BTREE + DROP INDEX IF EXISTS idx_pipeline_runs_created_at; + CREATE INDEX idx_pipeline_runs_created_at ON public.pipeline_runs USING BTREE (created_at); + + -- Migrate idx_pipeline_runs_finished_at to BTREE + DROP INDEX IF EXISTS idx_pipeline_runs_finished_at; + CREATE INDEX idx_pipeline_runs_finished_at ON public.pipeline_runs USING BTREE (finished_at); + + -- Migrate idx_pipeline_runs_pipeline_spec_id to HASH index + DROP INDEX IF EXISTS idx_pipeline_runs_pipeline_spec_id; + CREATE INDEX idx_pipeline_runs_pipeline_spec_id ON public.pipeline_runs USING HASH (pipeline_spec_id); + +-- +goose Down + DROP INDEX IF EXISTS idx_pipeline_runs_created_at; + CREATE INDEX idx_pipeline_runs_created_at ON public.pipeline_runs USING BRIN (created_at); + + DROP INDEX IF EXISTS idx_pipeline_runs_finished_at; + CREATE INDEX idx_pipeline_runs_finished_at ON public.pipeline_runs USING BRIN (finished_at); + + DROP INDEX IF EXISTS idx_pipeline_runs_pipeline_spec_id; + CREATE INDEX idx_pipeline_runs_pipeline_spec_id ON public.pipeline_runs USING BTREE (pipeline_spec_id); diff --git a/core/store/migrate/migrations/0141_allow_broadcast_at_on_fatally_errored_tx.sql b/core/store/migrate/migrations/0141_allow_broadcast_at_on_fatally_errored_tx.sql new file mode 100644 index 00000000..a020859a --- /dev/null +++ b/core/store/migrate/migrations/0141_allow_broadcast_at_on_fatally_errored_tx.sql @@ -0,0 +1,32 @@ +-- +goose Up +ALTER TABLE eth_txes DROP CONSTRAINT chk_eth_txes_fsm; +ALTER TABLE eth_txes ADD CONSTRAINT chk_eth_txes_fsm CHECK ( + state = 'unstarted'::eth_txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'in_progress'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'fatal_error'::eth_txes_state AND nonce IS NULL AND error IS NOT NULL + OR + state = 'unconfirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed_missing_receipt'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL +) NOT VALID; -- NOT VALID gives large speedup and this is a relaxing of the constraint so its safe + +-- +goose Down +UPDATE eth_txes SET broadcast_at=NULL, initial_broadcast_at=NULL WHERE state='fatal_error'; +ALTER TABLE eth_txes DROP CONSTRAINT chk_eth_txes_fsm; +ALTER TABLE eth_txes ADD CONSTRAINT chk_eth_txes_fsm CHECK ( + state = 'unstarted'::eth_txes_state AND nonce IS NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'in_progress'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'fatal_error'::eth_txes_state AND nonce IS NULL AND error IS NOT NULL AND broadcast_at IS NULL AND initial_broadcast_at IS NULL + OR + state = 'unconfirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL + OR + state = 'confirmed_missing_receipt'::eth_txes_state AND nonce IS NOT NULL AND error IS NULL AND broadcast_at IS NOT NULL AND initial_broadcast_at IS NOT NULL +) NOT VALID; -- NOT VALID gives large speedup and we know data is valid because of update above diff --git a/core/store/migrate/migrations/0142_delete_eth_key_state_cascades.sql b/core/store/migrate/migrations/0142_delete_eth_key_state_cascades.sql new file mode 100644 index 00000000..759ebe7e --- /dev/null +++ b/core/store/migrate/migrations/0142_delete_eth_key_state_cascades.sql @@ -0,0 +1,34 @@ +-- +goose Up +ALTER TABLE blockhash_store_specs DROP CONSTRAINT blockhash_store_specs_evm_chain_id_fkey, ADD CONSTRAINT blockhash_store_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE direct_request_specs DROP CONSTRAINT direct_request_specs_evm_chain_id_fkey, ADD CONSTRAINT direct_request_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_evm_chain_id_fkey, ADD CONSTRAINT eth_txes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_forwarders DROP CONSTRAINT evm_forwarders_evm_chain_id_fkey, ADD CONSTRAINT evm_forwarders_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_heads DROP CONSTRAINT heads_evm_chain_id_fkey, ADD CONSTRAINT heads_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_key_states DROP CONSTRAINT eth_key_states_evm_chain_id_fkey, ADD CONSTRAINT eth_key_states_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_nodes DROP CONSTRAINT nodes_evm_chain_id_fkey, ADD CONSTRAINT nodes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE flux_monitor_specs DROP CONSTRAINT flux_monitor_specs_evm_chain_id_fkey, ADD CONSTRAINT flux_monitor_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE keeper_specs DROP CONSTRAINT keeper_specs_evm_chain_id_fkey, ADD CONSTRAINT keeper_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_broadcasts DROP CONSTRAINT log_broadcasts_evm_chain_id_fkey, ADD CONSTRAINT log_broadcasts_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_broadcasts_pending DROP CONSTRAINT log_broadcasts_pending_evm_chain_id_fkey, ADD CONSTRAINT log_broadcasts_pending_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_poller_blocks DROP CONSTRAINT log_poller_blocks_evm_chain_id_fkey, ADD CONSTRAINT log_poller_blocks_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE logs DROP CONSTRAINT logs_evm_chain_id_fkey, ADD CONSTRAINT logs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE ocr_oracle_specs DROP CONSTRAINT offchainreporting_oracle_specs_evm_chain_id_fkey, ADD CONSTRAINT offchainreporting_oracle_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE vrf_specs DROP CONSTRAINT vrf_specs_evm_chain_id_fkey, ADD CONSTRAINT vrf_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; + +-- +goose Down +ALTER TABLE blockhash_store_specs DROP CONSTRAINT blockhash_store_specs_evm_chain_id_fkey, ADD CONSTRAINT blockhash_store_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE direct_request_specs DROP CONSTRAINT direct_request_specs_evm_chain_id_fkey, ADD CONSTRAINT direct_request_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_evm_chain_id_fkey, ADD CONSTRAINT eth_txes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_forwarders DROP CONSTRAINT evm_forwarders_evm_chain_id_fkey, ADD CONSTRAINT evm_forwarders_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_heads DROP CONSTRAINT heads_evm_chain_id_fkey, ADD CONSTRAINT heads_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_key_states DROP CONSTRAINT eth_key_states_evm_chain_id_fkey, ADD CONSTRAINT eth_key_states_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE evm_nodes DROP CONSTRAINT nodes_evm_chain_id_fkey, ADD CONSTRAINT nodes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE flux_monitor_specs DROP CONSTRAINT flux_monitor_specs_evm_chain_id_fkey, ADD CONSTRAINT flux_monitor_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE keeper_specs DROP CONSTRAINT keeper_specs_evm_chain_id_fkey, ADD CONSTRAINT keeper_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_broadcasts DROP CONSTRAINT log_broadcasts_evm_chain_id_fkey, ADD CONSTRAINT log_broadcasts_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_broadcasts_pending DROP CONSTRAINT log_broadcasts_pending_evm_chain_id_fkey, ADD CONSTRAINT log_broadcasts_pending_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE log_poller_blocks DROP CONSTRAINT log_poller_blocks_evm_chain_id_fkey, ADD CONSTRAINT log_poller_blocks_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE logs DROP CONSTRAINT logs_evm_chain_id_fkey, ADD CONSTRAINT logs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE ocr_oracle_specs DROP CONSTRAINT offchainreporting_oracle_specs_evm_chain_id_fkey, ADD CONSTRAINT offchainreporting_oracle_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +ALTER TABLE vrf_specs DROP CONSTRAINT vrf_specs_evm_chain_id_fkey, ADD CONSTRAINT vrf_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; + diff --git a/core/store/migrate/migrations/0143_set_forwarding_allowed_not_null.sql b/core/store/migrate/migrations/0143_set_forwarding_allowed_not_null.sql new file mode 100644 index 00000000..4a29d055 --- /dev/null +++ b/core/store/migrate/migrations/0143_set_forwarding_allowed_not_null.sql @@ -0,0 +1,6 @@ +-- +goose Up +UPDATE jobs SET forwarding_allowed = false WHERE forwarding_allowed is NULL; +ALTER TABLE jobs ALTER COLUMN forwarding_allowed SET NOT NULL; + +-- +goose Down +ALTER TABLE jobs ALTER COLUMN forwarding_allowed DROP NOT NULL; diff --git a/core/store/migrate/migrations/0144_optimize_lp.sql b/core/store/migrate/migrations/0144_optimize_lp.sql new file mode 100644 index 00000000..4081d4e8 --- /dev/null +++ b/core/store/migrate/migrations/0144_optimize_lp.sql @@ -0,0 +1,21 @@ +-- +goose Up +-- Rebuild the indexes without the hex encoding. Its not required postgres can handle bytea comparisons. +DROP INDEX logs_idx_data_word_one, logs_idx_data_word_two, logs_idx_data_word_three, logs_idx_topic_two, logs_idx_topic_three, logs_idx_topic_four; +CREATE INDEX logs_idx_data_word_one ON logs (substring(data from 1 for 32)); +CREATE INDEX logs_idx_data_word_two ON logs (substring(data from 33 for 32)); +CREATE INDEX logs_idx_data_word_three ON logs (substring(data from 65 for 32)); + +-- You can only index 3 event arguments. First topic is the event sig which we already have indexed separately. +CREATE INDEX logs_idx_topic_two ON logs ((topics[2])); +CREATE INDEX logs_idx_topic_three ON logs ((topics[3])); +CREATE INDEX logs_idx_topic_four ON logs ((topics[4])); + +DROP INDEX logs_idx_evm_id_event_address_block; +DROP INDEX logs_idx_block_number; +ALTER TABLE log_poller_blocks ADD CONSTRAINT block_hash_uniq UNIQUE(evm_chain_id,block_hash); +-- +-- +goose Down +DROP INDEX IF EXISTS logs_idx_data_word_one, logs_idx_data_word_two, logs_idx_data_word_three, logs_idx_topic_two, logs_idx_topic_three, logs_idx_topic_four; +CREATE INDEX logs_idx_block_number ON logs using brin(block_number); +CREATE INDEX logs_idx_evm_id_event_address_block ON logs using btree (evm_chain_id,event_sig,address,block_number); +ALTER TABLE log_poller_blocks DROP CONSTRAINT IF EXISTS block_hash_uniq; diff --git a/core/store/migrate/migrations/0145_cascade_delete_of_evm_key_states.sql b/core/store/migrate/migrations/0145_cascade_delete_of_evm_key_states.sql new file mode 100644 index 00000000..107debd3 --- /dev/null +++ b/core/store/migrate/migrations/0145_cascade_delete_of_evm_key_states.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_evm_chain_id_from_address_fkey, ADD CONSTRAINT eth_txes_evm_chain_id_from_address_fkey FOREIGN KEY (evm_chain_id, from_address) REFERENCES public.evm_key_states(evm_chain_id, address) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; +-- +goose Down +ALTER TABLE eth_txes DROP CONSTRAINT eth_txes_evm_chain_id_from_address_fkey, ADD CONSTRAINT eth_txes_evm_chain_id_from_address_fkey FOREIGN KEY (evm_chain_id, from_address) REFERENCES public.evm_key_states(evm_chain_id, address) DEFERRABLE INITIALLY IMMEDIATE NOT VALID; diff --git a/core/store/migrate/migrations/0146_unique_contract_address_per_chain.sql b/core/store/migrate/migrations/0146_unique_contract_address_per_chain.sql new file mode 100644 index 00000000..098cc0f4 --- /dev/null +++ b/core/store/migrate/migrations/0146_unique_contract_address_per_chain.sql @@ -0,0 +1,10 @@ +-- +goose Up +--- Remove all but most recently added contract_address for each chain. We will no longer allow duplicates, but enforcing that with a db constraint requires CREATE OPERATOR (admin) privilege +DELETE FROM ocr_oracle_specs WHERE id IN (SELECT id FROM (SELECT id, MAX(id) OVER(PARTITION BY evm_chain_id, contract_address ORDER BY id) AS max FROM ocr_oracle_specs) x WHERE id != max); + +-- +goose Down +DROP INDEX IF EXISTS ocr_oracle_specs_unique_contract_addr; +DROP OPERATOR CLASS IF EXISTS wildcard_cmp USING BTREE CASCADE; +DROP FUNCTION IF EXISTS wildcard_cmp(INTEGER, INTEGER) CASCADE; +CREATE UNIQUE INDEX IF NOT EXISTS unique_contract_addr ON ocr_oracle_specs (contract_address) WHERE evm_chain_id IS NULL; +CREATE UNIQUE INDEX IF NOT EXISTS unique_contract_addr_per_chain ON ocr_oracle_specs (contract_address, evm_chain_id) WHERE evm_chain_id IS NOT NULL; diff --git a/core/store/migrate/migrations/0147_remove_maxgaspricegwei_vrf.sql b/core/store/migrate/migrations/0147_remove_maxgaspricegwei_vrf.sql new file mode 100644 index 00000000..c259a9e7 --- /dev/null +++ b/core/store/migrate/migrations/0147_remove_maxgaspricegwei_vrf.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE vrf_specs DROP COLUMN "max_gas_price_gwei"; + +-- +goose Down +ALTER TABLE vrf_specs + ADD COLUMN "max_gas_price_gwei" BIGINT + CHECK (max_gas_price_gwei >= 0); diff --git a/core/store/migrate/migrations/0148_dkg_shares.sql b/core/store/migrate/migrations/0148_dkg_shares.sql new file mode 100644 index 00000000..b913da63 --- /dev/null +++ b/core/store/migrate/migrations/0148_dkg_shares.sql @@ -0,0 +1,12 @@ +-- +goose Up +CREATE TABLE dkg_shares( + config_digest bytea NOT NULL CHECK ( length(config_digest) = 32 ), + key_id bytea NOT NULL CHECK ( length(key_id) = 32 ), + dealer bytea NOT NULL CHECK ( length(dealer) = 1), + marshaled_share_record bytea NOT NULL, + record_hash bytea NOT NULL CHECK ( length(record_hash) = 32 ), + PRIMARY KEY (config_digest, key_id, dealer) +); + +-- +goose Down +DROP TABLE dkg_shares; diff --git a/core/store/migrate/migrations/0149_bridge_last_good_value.sql b/core/store/migrate/migrations/0149_bridge_last_good_value.sql new file mode 100644 index 00000000..8fd15441 --- /dev/null +++ b/core/store/migrate/migrations/0149_bridge_last_good_value.sql @@ -0,0 +1,16 @@ +-- +goose Up + +CREATE TABLE bridge_last_value ( + spec_id int NOT NULL REFERENCES public.pipeline_specs(id) ON DELETE CASCADE DEFERRABLE, + dot_id text NOT NULL, + value bytea NOT NULL, + finished_at timestamp NOT NULL, + CONSTRAINT bridge_last_value_pkey PRIMARY KEY (spec_id, dot_id) +); + +CREATE INDEX idx_bridge_last_value_optimise_finding_last_value ON bridge_last_value USING btree (finished_at); + + +-- +goose Down +DROP INDEX idx_bridge_last_value_optimise_finding_last_value; +DROP TABLE bridge_last_value; diff --git a/core/store/migrate/migrations/0150_gaslaneprice_vrf.sql b/core/store/migrate/migrations/0150_gaslaneprice_vrf.sql new file mode 100644 index 00000000..f87f3f37 --- /dev/null +++ b/core/store/migrate/migrations/0150_gaslaneprice_vrf.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE vrf_specs + ADD COLUMN "gas_lane_price" NUMERIC(78, 0) + CHECK (gas_lane_price IS NULL OR gas_lane_price > 0); + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN "gas_lane_price"; diff --git a/core/store/migrate/migrations/0151_bridge_last_good_value_use_tz.sql b/core/store/migrate/migrations/0151_bridge_last_good_value_use_tz.sql new file mode 100644 index 00000000..78139277 --- /dev/null +++ b/core/store/migrate/migrations/0151_bridge_last_good_value_use_tz.sql @@ -0,0 +1,7 @@ +-- +goose Up + +ALTER TABLE bridge_last_value ALTER finished_at TYPE timestamptz; + + +-- +goose Down +ALTER TABLE bridge_last_value ALTER finished_at TYPE timestamp; diff --git a/core/store/migrate/migrations/0152_ocr2_multichain.sql b/core/store/migrate/migrations/0152_ocr2_multichain.sql new file mode 100644 index 00000000..3471a004 --- /dev/null +++ b/core/store/migrate/migrations/0152_ocr2_multichain.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE ocr2_oracle_specs DROP CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr; +-- +goose Down +ALTER TABLE ocr2_oracle_specs ADD CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr UNIQUE (contract_id); diff --git a/core/store/migrate/migrations/0153_ocr2_restore_global_constraint.sql b/core/store/migrate/migrations/0153_ocr2_restore_global_constraint.sql new file mode 100644 index 00000000..cf568258 --- /dev/null +++ b/core/store/migrate/migrations/0153_ocr2_restore_global_constraint.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE ocr2_oracle_specs ADD CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr UNIQUE (contract_id); +-- +goose Down +ALTER TABLE ocr2_oracle_specs DROP CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr; diff --git a/core/store/migrate/migrations/0154_ocr2dr_requests_table.sql b/core/store/migrate/migrations/0154_ocr2dr_requests_table.sql new file mode 100644 index 00000000..ebe4c74d --- /dev/null +++ b/core/store/migrate/migrations/0154_ocr2dr_requests_table.sql @@ -0,0 +1,24 @@ +-- +goose Up + +-- see 0183_functions_new_fields.sql for changes +CREATE TABLE ocr2dr_requests( + request_id bytea CHECK (octet_length(request_id) = 32) PRIMARY KEY, + contract_address bytea CHECK (octet_length(contract_address) = 20) NOT NULL, + run_id bigint, -- NOT NULL REFERENCES public.pipeline_runs(id) ON DELETE CASCADE DEFERRABLE + received_at timestamp with time zone NOT NULL, + request_tx_hash bytea CHECK (octet_length(request_tx_hash) = 32) NOT NULL, + state INTEGER, + result_ready_at timestamp with time zone, + result bytea, + error_type INTEGER, + error bytea, + transmitted_result bytea, + transmitted_error bytea +); + +CREATE INDEX idx_ocr2dr_requests ON ocr2dr_requests (contract_address); + +-- +goose Down + +DROP INDEX IF EXISTS idx_ocr2dr_requests; +DROP TABLE ocr2dr_requests; diff --git a/core/store/migrate/migrations/0155_remove_terra.sql b/core/store/migrate/migrations/0155_remove_terra.sql new file mode 100644 index 00000000..904ca422 --- /dev/null +++ b/core/store/migrate/migrations/0155_remove_terra.sql @@ -0,0 +1,54 @@ +-- +goose Up + +DROP TABLE terra_msgs; +DROP TABLE terra_nodes; +DROP TABLE terra_chains; + +-- +goose Down + +-- Table Definition ---------------------------------------------- + +CREATE TABLE terra_chains ( + id text PRIMARY KEY, + cfg jsonb NOT NULL DEFAULT '{}'::jsonb, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + enabled boolean NOT NULL DEFAULT true +); + +-- Table Definition ---------------------------------------------- + +CREATE TABLE terra_msgs ( + id BIGSERIAL PRIMARY KEY, + terra_chain_id text NOT NULL REFERENCES terra_chains(id) ON DELETE CASCADE, + contract_id text NOT NULL, + raw bytea NOT NULL, + state text NOT NULL, + tx_hash text, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + type text NOT NULL DEFAULT '/terra.wasm.v1beta1.MsgExecuteContract'::text, + CONSTRAINT terra_msgs_check CHECK (tx_hash <> NULL::text OR state <> 'broadcasted'::text AND state <> 'confirmed'::text) +); + +-- Indices ------------------------------------------------------- + +CREATE INDEX idx_terra_msgs_terra_chain_id_state_contract_id ON terra_msgs(terra_chain_id text_ops,state text_ops,contract_id text_ops); + +-- Table Definition ---------------------------------------------- + +CREATE TABLE terra_nodes ( + id SERIAL PRIMARY KEY, + name character varying(255) NOT NULL CHECK (name::text <> ''::text), + terra_chain_id text NOT NULL REFERENCES terra_chains(id) ON DELETE CASCADE, + tendermint_url text CHECK (tendermint_url <> ''::text), + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +-- Indices ------------------------------------------------------- + +CREATE INDEX idx_nodes_terra_chain_id ON terra_nodes(terra_chain_id text_ops); +CREATE UNIQUE INDEX idx_terra_nodes_unique_name ON terra_nodes((lower(name::text)) text_ops); + + diff --git a/core/store/migrate/migrations/0156_add_evm_prefix_to_log_poller.sql b/core/store/migrate/migrations/0156_add_evm_prefix_to_log_poller.sql new file mode 100644 index 00000000..a0842d87 --- /dev/null +++ b/core/store/migrate/migrations/0156_add_evm_prefix_to_log_poller.sql @@ -0,0 +1,43 @@ +-- +goose Up + +-- alter log_poller_blocks table, constraints and indices +ALTER TABLE log_poller_blocks RENAME TO evm_log_poller_blocks; + +ALTER TABLE evm_log_poller_blocks RENAME CONSTRAINT log_poller_blocks_evm_chain_id_fkey TO evm_log_poller_blocks_evm_chain_id_fkey; + +-- alter logs table, constraints and indices +ALTER TABLE logs RENAME TO evm_logs; + +ALTER INDEX logs_idx RENAME TO evm_logs_idx; + +ALTER INDEX logs_idx_data_word_one RENAME TO evm_logs_idx_data_word_one; +ALTER INDEX logs_idx_data_word_two RENAME TO evm_logs_idx_data_word_two; +ALTER INDEX logs_idx_data_word_three RENAME TO evm_logs_idx_data_word_three; + +ALTER INDEX logs_idx_topic_two RENAME TO evm_logs_idx_topic_two; +ALTER INDEX logs_idx_topic_three RENAME TO evm_logs_idx_topic_three; +ALTER INDEX logs_idx_topic_four RENAME TO evm_logs_idx_topic_four; + +ALTER TABLE evm_logs RENAME CONSTRAINT logs_evm_chain_id_fkey TO evm_logs_evm_chain_id_fkey; + +-- +goose Down + +-- alter log_poller_blocks table, constraints and indices +ALTER TABLE evm_log_poller_blocks RENAME TO log_poller_blocks; + +ALTER TABLE log_poller_blocks RENAME CONSTRAINT evm_log_poller_blocks_evm_chain_id_fkey TO log_poller_blocks_evm_chain_id_fkey; + +-- alter logs table, constraints and indices +ALTER TABLE evm_logs RENAME TO logs; + +ALTER INDEX evm_logs_idx RENAME TO logs_idx; + +ALTER INDEX evm_logs_idx_data_word_one RENAME TO logs_idx_data_word_one; +ALTER INDEX evm_logs_idx_data_word_two RENAME TO logs_idx_data_word_two; +ALTER INDEX evm_logs_idx_data_word_three RENAME TO logs_idx_data_word_three; + +ALTER INDEX evm_logs_idx_topic_two RENAME TO logs_idx_topic_two; +ALTER INDEX evm_logs_idx_topic_three RENAME TO logs_idx_topic_three; +ALTER INDEX evm_logs_idx_topic_four RENAME TO logs_idx_topic_four; + +ALTER TABLE logs RENAME CONSTRAINT evm_logs_evm_chain_id_fkey TO logs_evm_chain_id_fkey; diff --git a/core/store/migrate/migrations/0157_add_log_poller_filters_table.sql b/core/store/migrate/migrations/0157_add_log_poller_filters_table.sql new file mode 100644 index 00000000..fb038241 --- /dev/null +++ b/core/store/migrate/migrations/0157_add_log_poller_filters_table.sql @@ -0,0 +1,16 @@ +-- +goose Up + +CREATE TABLE evm_log_poller_filters( + id BIGSERIAL PRIMARY KEY, + name TEXT NOT NULL CHECK (length(name) > 0), + address BYTEA CHECK (octet_length(address) = 20) NOT NULL, + event BYTEA CHECK (octet_length(event) = 32) NOT NULL, + evm_chain_id numeric(78,0) REFERENCES evm_chains (id) DEFERRABLE INITIALLY IMMEDIATE, + created_at TIMESTAMPTZ NOT NULL, + UNIQUE (name, evm_chain_id, address, event) +); + +-- +goose Down + +DROP TABLE evm_log_poller_filters CASCADE; + diff --git a/core/store/migrate/migrations/0158_deferrable_job_spec_errors_v2_fkey.sql b/core/store/migrate/migrations/0158_deferrable_job_spec_errors_v2_fkey.sql new file mode 100644 index 00000000..6e7a2221 --- /dev/null +++ b/core/store/migrate/migrations/0158_deferrable_job_spec_errors_v2_fkey.sql @@ -0,0 +1,13 @@ +-- BCF-2095 +-- +goose Up +ALTER TABLE job_spec_errors +DROP CONSTRAINT job_spec_errors_v2_job_id_fkey, +ADD CONSTRAINT job_spec_errors_v2_job_id_fkey + FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE + DEFERRABLE INITIALLY IMMEDIATE; + +-- +goose Down +ALTER TABLE job_spec_errors +DROP CONSTRAINT job_spec_errors_v2_job_id_fkey, +ADD CONSTRAINT job_spec_errors_v2_job_id_fkey + FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE; \ No newline at end of file diff --git a/core/store/migrate/migrations/0159_add_name_to_job_proposals.sql b/core/store/migrate/migrations/0159_add_name_to_job_proposals.sql new file mode 100644 index 00000000..223164a1 --- /dev/null +++ b/core/store/migrate/migrations/0159_add_name_to_job_proposals.sql @@ -0,0 +1,29 @@ +-- +goose Up +-- +goose StatementBegin + +-- Add the name column to job proposals +ALTER TABLE job_proposals +ADD COLUMN name TEXT; + +-- Attempt to populate the name field from a proposal's job spec definition. +-- If it does not match the regex, it will continue to search through the +-- versions to find one that matches. If none match, the job proposal name is +-- left blank. +UPDATE job_proposals +SET name = specs.name +FROM ( + SELECT job_proposal_id, (regexp_matches(job_proposal_specs.definition, 'name = ''(.+?)\''\n'))[1] as name, MAX(version) + FROM job_proposal_specs + GROUP BY job_proposal_id, name +) AS specs +WHERE job_proposals.id = specs.job_proposal_id + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE job_proposals +DROP COLUMN name; + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0160_bhs_multiple_from_addresses.sql b/core/store/migrate/migrations/0160_bhs_multiple_from_addresses.sql new file mode 100644 index 00000000..de708dbd --- /dev/null +++ b/core/store/migrate/migrations/0160_bhs_multiple_from_addresses.sql @@ -0,0 +1,15 @@ +-- +goose Up +ALTER TABLE blockhash_store_specs ADD COLUMN from_addresses bytea[] DEFAULT '{}' NOT NULL ; + +UPDATE blockhash_store_specs SET from_addresses = from_addresses || from_address +WHERE from_address IS NOT NULL; + +ALTER TABLE blockhash_store_specs DROP COLUMN from_address; + +-- +goose Down +ALTER TABLE blockhash_store_specs ADD COLUMN from_address bytea; + +UPDATE blockhash_store_specs SET from_address = from_addresses[1] +WHERE array_length(from_addresses, 1) > 0; + +ALTER TABLE blockhash_store_specs DROP COLUMN from_addresses; diff --git a/core/store/migrate/migrations/0161_update_job_proposal_status.sql b/core/store/migrate/migrations/0161_update_job_proposal_status.sql new file mode 100644 index 00000000..dea7a787 --- /dev/null +++ b/core/store/migrate/migrations/0161_update_job_proposal_status.sql @@ -0,0 +1,93 @@ +-- +goose Up +-- +goose StatementBegin +-- JobProposals Table +-- We must remove the old contraint to add an enum value to support Postgres v11 +ALTER TABLE job_proposals DROP CONSTRAINT chk_job_proposals_status_fsm; + +ALTER TYPE job_proposal_status +RENAME TO job_proposal_status_old; + +CREATE TYPE job_proposal_status AS ENUM( + 'pending', + 'approved', + 'rejected', + 'cancelled', + 'deleted', + 'revoked' +); + +ALTER TABLE job_proposals +ALTER COLUMN status TYPE job_proposal_status USING status::text::job_proposal_status; + +DROP TYPE job_proposal_status_old; + +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + ( + status = 'pending' + AND external_job_id IS NULL + ) + OR ( + status = 'approved' + AND external_job_id IS NOT NULL + ) + OR ( + status = 'rejected' + AND external_job_id IS NULL + ) + OR ( + status = 'cancelled' + AND external_job_id IS NULL + ) + OR ( + status = 'deleted' + AND external_job_id IS NULL + ) + OR ( + status = 'revoked' + AND external_job_id IS NULL + ) + ); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +-- We must remove the old contraint to remove an enum value +ALTER TABLE job_proposals DROP CONSTRAINT chk_job_proposals_status_fsm; + +-- Drop the 'deleted' and 'revoked' enum value. Unfortunately postgres does not support a +-- a way to remove a value from an enum. +ALTER TYPE job_proposal_status +RENAME TO job_proposal_status_old; + +CREATE TYPE job_proposal_status AS ENUM('pending', 'approved', 'rejected', 'cancelled'); + +-- This will fail if any records are using the 'deleted' or 'revoked' enum. +-- Manually update these as we cannot decide what you want to do with them. +ALTER TABLE job_proposals +ALTER COLUMN status TYPE job_proposal_status USING status::text::job_proposal_status; + +DROP TYPE job_proposal_status_old; + +-- Add the contraint back +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + ( + status = 'pending' + AND external_job_id IS NULL + ) + OR ( + status = 'approved' + AND external_job_id IS NOT NULL + ) + OR ( + status = 'rejected' + AND external_job_id IS NULL + ) + OR ( + status = 'cancelled' + AND external_job_id IS NULL + ) + ); + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0162_logpoller_block_timestamps.sql b/core/store/migrate/migrations/0162_logpoller_block_timestamps.sql new file mode 100644 index 00000000..a7b05128 --- /dev/null +++ b/core/store/migrate/migrations/0162_logpoller_block_timestamps.sql @@ -0,0 +1,8 @@ +-- +goose Up +ALTER TABLE evm_logs ADD COLUMN block_timestamp timestamptz NOT NULL DEFAULT now(); +ALTER TABLE evm_logs ALTER COLUMN block_timestamp DROP DEFAULT; +ALTER TABLE evm_log_poller_blocks ADD COLUMN block_timestamp timestamptz NOT NULL DEFAULT now(); +ALTER TABLE evm_log_poller_blocks ALTER COLUMN block_timestamp DROP DEFAULT; +-- +goose Down +ALTER TABLE evm_log_poller_blocks DROP COLUMN block_timestamp; +ALTER TABLE evm_logs DROP COLUMN block_timestamp; diff --git a/core/store/migrate/migrations/0163_mercury_jobs_multiple_per_contract.sql b/core/store/migrate/migrations/0163_mercury_jobs_multiple_per_contract.sql new file mode 100644 index 00000000..37b8be74 --- /dev/null +++ b/core/store/migrate/migrations/0163_mercury_jobs_multiple_per_contract.sql @@ -0,0 +1,18 @@ +-- +goose Up +ALTER TABLE ocr2_oracle_specs + -- NOTE: The cleanest way to do this would be to allow NULL feed_id and use + -- postgres 15's NULLS NOT DISTINCT feature on the index. + -- However, it isn't reasonable to expect all users to upgrade to pg 15 at + -- this time, so we require all specs to have a feed ID and use the zero + -- value to indicate a missing feed ID. + ADD COLUMN feed_id bytea CHECK (octet_length(feed_id) = 32) NOT NULL DEFAULT '\x0000000000000000000000000000000000000000000000000000000000000000', + DROP CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr; +; +CREATE UNIQUE INDEX offchainreporting2_oracle_specs_unique_contract_addr ON ocr2_oracle_specs (contract_id, feed_id); + +-- NOTE: bootstrap_specs did not originally have a unique index, so we do not add one here +ALTER TABLE bootstrap_specs ADD COLUMN feed_id bytea CHECK (feed_id IS NULL OR octet_length(feed_id) = 32); + +-- +goose Down +ALTER TABLE ocr2_oracle_specs DROP COLUMN feed_id, ADD CONSTRAINT offchainreporting2_oracle_specs_unique_contract_addr UNIQUE (contract_id); +ALTER TABLE bootstrap_specs DROP COLUMN feed_id; diff --git a/core/store/migrate/migrations/0164_add_cosmos.sql b/core/store/migrate/migrations/0164_add_cosmos.sql new file mode 100644 index 00000000..11520fce --- /dev/null +++ b/core/store/migrate/migrations/0164_add_cosmos.sql @@ -0,0 +1,24 @@ +-- +goose Up + +-- Table Definition ---------------------------------------------- + +CREATE TABLE cosmos_msgs ( + id BIGSERIAL PRIMARY KEY, + cosmos_chain_id text NOT NULL, + contract_id text NOT NULL, + raw bytea NOT NULL, + state text NOT NULL, + tx_hash text, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + type text NOT NULL DEFAULT '/cosmwasm.wasm.v1.MsgExecuteContract'::text, + CONSTRAINT cosmos_msgs_check CHECK (tx_hash <> NULL::text OR state <> 'broadcasted'::text AND state <> 'confirmed'::text) +); + +-- Indices ------------------------------------------------------- + +CREATE INDEX idx_cosmos_msgs_cosmos_chain_id_state_contract_id ON cosmos_msgs(cosmos_chain_id text_ops,state text_ops,contract_id text_ops); + +-- +goose Down + +DROP TABLE cosmos_msgs; diff --git a/core/store/migrate/migrations/0165_update_job_proposal_constraints.sql b/core/store/migrate/migrations/0165_update_job_proposal_constraints.sql new file mode 100644 index 00000000..924d1dba --- /dev/null +++ b/core/store/migrate/migrations/0165_update_job_proposal_constraints.sql @@ -0,0 +1,65 @@ +-- +goose Up +-- +goose StatementBegin +-- JobProposals Table +ALTER TABLE job_proposals DROP CONSTRAINT chk_job_proposals_status_fsm; + +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + ( + status = 'pending' + AND external_job_id IS NULL + ) + OR ( + status = 'approved' + AND external_job_id IS NOT NULL + ) + OR ( + status = 'rejected' + AND external_job_id IS NULL + ) + OR ( + status = 'cancelled' + AND external_job_id IS NULL + ) + OR ( + status = 'deleted' + ) + OR ( + status = 'revoked' + AND external_job_id IS NULL + ) + ); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +ALTER TABLE job_proposals DROP CONSTRAINT chk_job_proposals_status_fsm; + +ALTER TABLE job_proposals +ADD CONSTRAINT chk_job_proposals_status_fsm CHECK ( + ( + status = 'pending' + AND external_job_id IS NULL + ) + OR ( + status = 'approved' + AND external_job_id IS NOT NULL + ) + OR ( + status = 'rejected' + AND external_job_id IS NULL + ) + OR ( + status = 'cancelled' + AND external_job_id IS NULL + ) + OR ( + status = 'deleted' + AND external_job_id IS NULL + ) + OR ( + status = 'revoked' + AND external_job_id IS NULL + ) + ); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0166_block_header_feeder_job.sql b/core/store/migrate/migrations/0166_block_header_feeder_job.sql new file mode 100644 index 00000000..f667b28e --- /dev/null +++ b/core/store/migrate/migrations/0166_block_header_feeder_job.sql @@ -0,0 +1,63 @@ +-- +goose Up +CREATE TABLE block_header_feeder_specs +( + id BIGSERIAL PRIMARY KEY, + coordinator_v1_address bytea DEFAULT NULL, + coordinator_v2_address bytea DEFAULT NULL, + wait_blocks bigint NOT NULL, + lookback_blocks bigint NOT NULL, + blockhash_store_address bytea NOT NULL, + batch_blockhash_store_address bytea NOT NULL, + poll_period bigint NOT NULL, + run_timeout bigint NOT NULL, + evm_chain_id numeric(78) + REFERENCES evm_chains + DEFERRABLE, + from_addresses bytea[] DEFAULT '{}' NOT NULL, + get_blockhashes_batch_size integer NOT NULL, + store_blockhashes_batch_size integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL + CONSTRAINT coordinator_v1_address_len_chk CHECK (octet_length(coordinator_v1_address) = 20) + CONSTRAINT coordinator_v2_address_len_chk CHECK (octet_length(coordinator_v2_address) = 20) + CONSTRAINT blockhash_store_address_len_chk CHECK (octet_length(blockhash_store_address) = 20) + CONSTRAINT batch_blockhash_store_address_len_chk CHECK (octet_length(batch_blockhash_store_address) = 20) + CONSTRAINT at_least_one_coordinator_chk CHECK (coordinator_v1_address IS NOT NULL OR coordinator_v2_address IS NOT NULL) +); + +ALTER TABLE jobs + ADD COLUMN block_header_feeder_spec_id INT REFERENCES block_header_feeder_specs (id), + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, + ocr2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id, + block_header_feeder_spec_id, + bootstrap_spec_id) = 1); + +-- +goose Down +ALTER TABLE jobs + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, + ocr2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id, + bootstrap_spec_id) = 1); + +ALTER TABLE jobs + DROP COLUMN block_header_feeder_spec_id; +DROP TABLE IF EXISTS block_header_feeder_specs; \ No newline at end of file diff --git a/core/store/migrate/migrations/0167_update_job_proposal_specs_status.sql b/core/store/migrate/migrations/0167_update_job_proposal_specs_status.sql new file mode 100644 index 00000000..3ebb10ea --- /dev/null +++ b/core/store/migrate/migrations/0167_update_job_proposal_specs_status.sql @@ -0,0 +1,44 @@ +-- +goose Up +-- +goose StatementBegin +DROP INDEX idx_job_proposal_specs_job_proposal_id_and_status; + +ALTER TYPE job_proposal_spec_status +RENAME TO job_proposal_spec_status_old; + +CREATE TYPE job_proposal_spec_status AS ENUM( + 'pending', + 'approved', + 'rejected', + 'cancelled', + 'revoked' +); + +ALTER TABLE job_proposal_specs +ALTER COLUMN status TYPE job_proposal_spec_status USING status::TEXT::job_proposal_spec_status; + +DROP TYPE job_proposal_spec_status_old; + +CREATE UNIQUE INDEX idx_job_proposal_specs_job_proposal_id_and_status ON job_proposal_specs(job_proposal_id) +WHERE status = 'approved'; + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP INDEX idx_job_proposal_specs_job_proposal_id_and_status; + +ALTER TYPE job_proposal_spec_status +RENAME TO job_proposal_spec_status_old; + +CREATE TYPE job_proposal_spec_status AS ENUM('pending', 'approved', 'rejected', 'cancelled'); + +-- This will fail if any records are using the 'revoked' enum. +-- Manually update these as we cannot decide what you want to do with them. +ALTER TABLE job_proposal_specs +ALTER COLUMN status TYPE job_proposal_spec_status USING status::TEXT::job_proposal_spec_status; + +DROP TYPE job_proposal_spec_status_old; + +CREATE UNIQUE INDEX idx_job_proposal_specs_job_proposal_id_and_status ON job_proposal_specs(job_proposal_id) +WHERE status = 'approved'; + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0168_drop_node_tables.sql b/core/store/migrate/migrations/0168_drop_node_tables.sql new file mode 100644 index 00000000..9ebce6cf --- /dev/null +++ b/core/store/migrate/migrations/0168_drop_node_tables.sql @@ -0,0 +1,64 @@ +-- +goose Up +-- Drop nodes tables +DROP TABLE evm_nodes; +DROP TABLE solana_nodes; +DROP TABLE starknet_nodes; + + +-- +goose Down +-- evm_nodes definition +CREATE TABLE evm_nodes ( + id serial NOT NULL, + "name" varchar(255) NOT NULL, + evm_chain_id numeric(78) NOT NULL, + ws_url text NULL, + http_url text NULL, + send_only bool NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + CONSTRAINT nodes_http_url_check CHECK ((http_url <> ''::text)), + CONSTRAINT nodes_name_check CHECK (((name)::text <> ''::text)), + CONSTRAINT nodes_pkey PRIMARY KEY (id), + CONSTRAINT nodes_ws_url_check CHECK ((ws_url <> ''::text)), + CONSTRAINT primary_or_sendonly CHECK (((send_only AND (ws_url IS NULL) AND (http_url IS NOT NULL)) OR ((NOT send_only) AND (ws_url IS NOT NULL)))) +); +CREATE INDEX idx_nodes_evm_chain_id ON evm_nodes USING btree (evm_chain_id); +CREATE UNIQUE INDEX idx_nodes_unique_name ON evm_nodes USING btree (lower((name)::text)); +CREATE UNIQUE INDEX idx_unique_http_url ON evm_nodes USING btree (http_url); +CREATE UNIQUE INDEX idx_unique_ws_url ON evm_nodes USING btree (ws_url); +-- evm_nodes foreign keys. +ALTER TABLE evm_nodes ADD CONSTRAINT nodes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE; + +-- solana_nodes definition +CREATE TABLE solana_nodes ( + id serial NOT NULL, + "name" varchar(255) NOT NULL, + solana_chain_id text NOT NULL, + solana_url text NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + CONSTRAINT solana_nodes_name_check CHECK (((name)::text <> ''::text)), + CONSTRAINT solana_nodes_pkey PRIMARY KEY (id), + CONSTRAINT solana_nodes_solana_url_check CHECK ((solana_url <> ''::text)) +); +CREATE INDEX idx_nodes_solana_chain_id ON solana_nodes USING btree (solana_chain_id); +CREATE UNIQUE INDEX idx_solana_nodes_unique_name ON solana_nodes USING btree (lower((name)::text)); +-- solana_nodes foreign keys +ALTER TABLE solana_nodes ADD CONSTRAINT solana_nodes_solana_chain_id_fkey FOREIGN KEY (solana_chain_id) REFERENCES solana_chains(id) ON DELETE CASCADE; + +-- starknet_nodes definition +CREATE TABLE starknet_nodes ( + id serial NOT NULL, + "name" varchar(255) NOT NULL, + starknet_chain_id text NOT NULL, + url text NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + CONSTRAINT starknet_nodes_name_check CHECK (((name)::text <> ''::text)), + CONSTRAINT starknet_nodes_pkey PRIMARY KEY (id), + CONSTRAINT starknet_nodes_url_check CHECK ((url <> ''::text)) +); +CREATE INDEX idx_starknet_nodes_chain_id ON starknet_nodes USING btree (starknet_chain_id); +CREATE UNIQUE INDEX idx_starknet_nodes_unique_name ON starknet_nodes USING btree (lower((name)::text)); +-- starknet_nodes foreign keys +ALTER TABLE starknet_nodes ADD CONSTRAINT starknet_nodes_chain_id_fkey FOREIGN KEY (starknet_chain_id) REFERENCES starknet_chains(id) ON DELETE CASCADE; diff --git a/core/store/migrate/migrations/0169_log_poller_pruning.sql b/core/store/migrate/migrations/0169_log_poller_pruning.sql new file mode 100644 index 00000000..67b62711 --- /dev/null +++ b/core/store/migrate/migrations/0169_log_poller_pruning.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE evm_log_poller_filters ADD COLUMN retention BIGINT DEFAULT 0; +CREATE INDEX evm_logs_idx_created_at ON evm_logs (created_at); +-- +goose StatementEnd + +-- +goose Down + +-- +goose StatementBegin +DROP INDEX evm_logs_idx_created_at; +ALTER TABLE evm_log_poller_filters DROP COLUMN retention; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0170_gateway_job_spec.sql b/core/store/migrate/migrations/0170_gateway_job_spec.sql new file mode 100644 index 00000000..669d1248 --- /dev/null +++ b/core/store/migrate/migrations/0170_gateway_job_spec.sql @@ -0,0 +1,52 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE gateway_specs ( + id SERIAL PRIMARY KEY, + gateway_config JSONB NOT NULL DEFAULT '{}', + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL +); + +ALTER TABLE jobs + ADD COLUMN gateway_spec_id INT REFERENCES gateway_specs (id), + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, + ocr2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id, + block_header_feeder_spec_id, + bootstrap_spec_id, + gateway_spec_id) = 1); +-- +goose StatementEnd + +-- +goose Down + +-- +goose StatementBegin +ALTER TABLE jobs + DROP CONSTRAINT chk_only_one_spec, + ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, + ocr2_oracle_spec_id, + direct_request_spec_id, + flux_monitor_spec_id, + keeper_spec_id, + cron_spec_id, + webhook_spec_id, + vrf_spec_id, + blockhash_store_spec_id, + block_header_feeder_spec_id, + bootstrap_spec_id) = 1); + +ALTER TABLE jobs + DROP COLUMN gateway_spec_id; + +DROP TABLE gateway_specs; +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0171_create_legacy_gas_station_resources.sql b/core/store/migrate/migrations/0171_create_legacy_gas_station_resources.sql new file mode 100644 index 00000000..47a96b9c --- /dev/null +++ b/core/store/migrate/migrations/0171_create_legacy_gas_station_resources.sql @@ -0,0 +1,114 @@ +-- +goose Up +CREATE TABLE legacy_gas_station_server_specs ( + id BIGSERIAL PRIMARY KEY, + forwarder_address BYTEA NOT NULL, + evm_chain_id numeric(78) NOT NULL REFERENCES evm_chains DEFERRABLE, + ccip_chain_selector numeric(78) NOT NULL, + from_addresses BYTEA[] DEFAULT '{}' NOT NULL, + created_at timestamp WITH TIME ZONE NOT NULL, + updated_at timestamp WITH TIME ZONE NOT NULL, + CONSTRAINT forwarder_address_len_chk CHECK ( + octet_length(forwarder_address) = 20 + ) +); +CREATE TABLE legacy_gas_station_sidecar_specs ( + id BIGSERIAL PRIMARY KEY, + forwarder_address BYTEA NOT NULL, + off_ramp_address BYTEA NOT NULL, + lookback_blocks bigint NOT NULL, + poll_period bigint NOT NULL, + run_timeout bigint NOT NULL, + evm_chain_id numeric(78) NOT NULL REFERENCES evm_chains DEFERRABLE, + ccip_chain_selector numeric(78) NOT NULL, + created_at timestamp WITH TIME ZONE NOT NULL, + updated_at timestamp WITH TIME ZONE NOT NULL, + CONSTRAINT forwarder_address_len_chk CHECK ( + octet_length(forwarder_address) = 20 + ), + CONSTRAINT off_ramp_address_len_chk CHECK ( + octet_length(off_ramp_address) = 20 + ) +); +CREATE TABLE legacy_gasless_txs ( + legacy_gasless_tx_id TEXT PRIMARY KEY, + forwarder_address BYTEA NOT NULL, + from_address BYTEA NOT NULL, + target_address BYTEA NOT NULL, + receiver_address BYTEA NOT NULL, + nonce numeric(78) NOT NULL, + amount numeric(78) NOT NULL, + source_chain_id numeric(78) NOT NULL, + destination_chain_id numeric(78) NOT NULL, + valid_until_time numeric(78) NOT NULL, + tx_signature BYTEA NOT NULL, + tx_status text NOT NULL, + token_name text NOT NULL, + token_version text NOT NULL, + eth_tx_id bigint REFERENCES eth_txes INITIALLY DEFERRED, + ccip_message_id BYTEA, + failure_reason text, + created_at timestamp WITH TIME ZONE NOT NULL, + updated_at timestamp WITH TIME ZONE NOT NULL, + CONSTRAINT forwarder_address_len_chk CHECK ( + octet_length(forwarder_address) = 20 + ), + CONSTRAINT target_address_len_chk CHECK ( + octet_length(target_address) = 20 + ), + CONSTRAINT receiver_address_len_chk CHECK ( + octet_length(receiver_address) = 20 + ), + CONSTRAINT ccip_message_id_len_chk CHECK ( + octet_length(ccip_message_id) = 32 + ) +); +CREATE INDEX idx_legacy_gasless_txs_source_chain_id_tx_status ON legacy_gasless_txs(source_chain_id, tx_status); +CREATE INDEX idx_legacy_gasless_txs_source_destination_id_tx_status ON legacy_gasless_txs(destination_chain_id, tx_status); +ALTER TABLE + jobs +ADD + COLUMN legacy_gas_station_server_spec_id INT REFERENCES legacy_gas_station_server_specs (id), +ADD + COLUMN legacy_gas_station_sidecar_spec_id INT REFERENCES legacy_gas_station_sidecar_specs (id), +DROP + CONSTRAINT chk_only_one_spec, +ADD + CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id + ) = 1 + ); +-- +goose Down +ALTER TABLE + jobs +DROP + CONSTRAINT chk_only_one_spec, +ADD + CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id + ) = 1 + ); +ALTER TABLE + jobs +DROP + COLUMN legacy_gas_station_server_spec_id, +DROP + COLUMN legacy_gas_station_sidecar_spec_id; +DROP + TABLE IF EXISTS legacy_gas_station_server_specs; +DROP + TABLE IF EXISTS legacy_gas_station_sidecar_specs; +DROP + TABLE IF EXISTS legacy_gasless_txs; diff --git a/core/store/migrate/migrations/0172_add_tx_hash_legacy_gasless_tx.sql b/core/store/migrate/migrations/0172_add_tx_hash_legacy_gasless_tx.sql new file mode 100644 index 00000000..f5e38f4b --- /dev/null +++ b/core/store/migrate/migrations/0172_add_tx_hash_legacy_gasless_tx.sql @@ -0,0 +1,13 @@ +-- +goose Up +ALTER TABLE legacy_gasless_txs ADD tx_hash BYTEA; +ALTER TABLE legacy_gasless_txs ADD CONSTRAINT tx_hash_len_chk CHECK ( + octet_length(tx_hash) = 32 +); + +ALTER TABLE legacy_gas_station_sidecar_specs ADD status_update_url text NOT NULL; + +-- +goose Down +ALTER TABLE legacy_gasless_txs DROP CONSTRAINT tx_hash_len_chk; +ALTER TABLE legacy_gasless_txs DROP tx_hash; + +ALTER TABLE legacy_gas_station_sidecar_specs DROP status_update_url; diff --git a/core/store/migrate/migrations/0173_add_s4_functions_table.sql b/core/store/migrate/migrations/0173_add_s4_functions_table.sql new file mode 100644 index 00000000..bc66673a --- /dev/null +++ b/core/store/migrate/migrations/0173_add_s4_functions_table.sql @@ -0,0 +1,29 @@ +-- +goose Up + +CREATE SCHEMA "s4"; + +CREATE TABLE "s4".functions( + id BIGSERIAL PRIMARY KEY, + address NUMERIC(78,0) NOT NULL, + slot_id INT NOT NULL, + version INT NOT NULL, + expiration BIGINT NOT NULL, + confirmed BOOLEAN NOT NULL, + payload BYTEA NOT NULL, + signature BYTEA NOT NULL, + updated_at TIMESTAMPTZ NOT NULL +); + +CREATE UNIQUE INDEX functions_address_slot_id_idx ON "s4".functions(address, slot_id); +CREATE INDEX functions_expiration_idx ON "s4".functions(expiration); +CREATE INDEX functions_confirmed_idx ON "s4".functions(confirmed); + +-- +goose Down + +DROP INDEX IF EXISTS functions_address_slot_id_idx; +DROP INDEX IF EXISTS functions_expiration_idx; +DROP INDEX IF EXISTS functions_confirmed_idx; + +DROP TABLE "s4".functions; + +DROP SCHEMA "s4"; \ No newline at end of file diff --git a/core/store/migrate/migrations/0174_vrf_owner.sql b/core/store/migrate/migrations/0174_vrf_owner.sql new file mode 100644 index 00000000..cade12f2 --- /dev/null +++ b/core/store/migrate/migrations/0174_vrf_owner.sql @@ -0,0 +1,8 @@ +-- +goose Up + +ALTER TABLE vrf_specs + ADD COLUMN IF NOT EXISTS "vrf_owner_address" bytea + CHECK (octet_length(vrf_owner_address) = 20); + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN "vrf_owner_address"; diff --git a/core/store/migrate/migrations/0175_logpoller_querying_improvements.sql b/core/store/migrate/migrations/0175_logpoller_querying_improvements.sql new file mode 100644 index 00000000..bfe5f1ea --- /dev/null +++ b/core/store/migrate/migrations/0175_logpoller_querying_improvements.sql @@ -0,0 +1,37 @@ +-- +goose Up +-- +goose StatementBegin + +-- This index should make the following queries work in almost const time, instead of doing sequential scan. +-- This subquery is heavily used by most of the logpoller's functions +-- +-- SELECT * FROM evm_log_poller_blocks +-- WHERE evm_chain_id = 420 +-- ORDER BY block_number DESC +-- LIMIT 1; +CREATE INDEX idx_evm_log_poller_blocks_order_by_block + ON evm_log_poller_blocks (evm_chain_id, block_number DESC); + +-- This index optimizes queries used in the following funcitons: +-- * logpoller.LogsCreatedAfter +-- * logpoller.LatestLogByEventSigWithConfs +-- +-- Example query: +-- SELECT * FROM evm_logs +-- WHERE evm_chain_id = 420 +-- AND address = '\xABC' +-- AND event_sig = '\xABC' +-- AND block_number <= (SELECT COALESCE(block_number, 0) FROM evm_log_poller_blocks WHERE evm_chain_id = 420) +-- AND created_at > '2023-05-31T07:29:11.29Z' +-- ORDER BY created_at ASC; +CREATE INDEX idx_evm_logs_ordered_by_block_and_created_at + ON evm_logs (evm_chain_id, address, event_sig, block_number, created_at); + +-- +goose StatementEnd + + +-- +goose Down + +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_evm_logs_ordered_by_block_and_created_at; +DROP INDEX IF EXISTS idx_evm_log_poller_blocks_order_by_block; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0176_s4_shared_table.sql b/core/store/migrate/migrations/0176_s4_shared_table.sql new file mode 100644 index 00000000..392e3dee --- /dev/null +++ b/core/store/migrate/migrations/0176_s4_shared_table.sql @@ -0,0 +1,28 @@ +-- +goose Up + +ALTER TABLE "s4".functions RENAME TO shared; + +ALTER TABLE "s4".shared ADD COLUMN IF NOT EXISTS namespace TEXT NOT NULL DEFAULT ''; + +DROP INDEX IF EXISTS "s4".functions_address_slot_id_idx; +DROP INDEX IF EXISTS "s4".functions_expiration_idx; +DROP INDEX IF EXISTS "s4".functions_confirmed_idx; + +CREATE UNIQUE INDEX shared_namespace_address_slot_id_idx ON "s4".shared(namespace, address, slot_id); +CREATE INDEX shared_namespace_expiration_idx ON "s4".shared(namespace, expiration); +CREATE INDEX shared_namespace_confirmed_idx ON "s4".shared(namespace, confirmed); + +-- +goose Down + +DROP INDEX IF EXISTS "s4".shared_namespace_address_slot_id_idx; +DROP INDEX IF EXISTS "s4".shared_namespace_expiration_idx; +DROP INDEX IF EXISTS "s4".shared_namespace_confirmed_idx; + +ALTER TABLE "s4".shared DROP COLUMN IF EXISTS namespace; + +ALTER TABLE "s4".shared RENAME TO functions; + +CREATE UNIQUE INDEX functions_address_slot_id_idx ON "s4".functions(address, slot_id); +CREATE INDEX functions_expiration_idx ON "s4".functions(expiration); +CREATE INDEX functions_confirmed_idx ON "s4".functions(confirmed); + diff --git a/core/store/migrate/migrations/0177_add_ocr_protocol_state.sql b/core/store/migrate/migrations/0177_add_ocr_protocol_state.sql new file mode 100644 index 00000000..4193d876 --- /dev/null +++ b/core/store/migrate/migrations/0177_add_ocr_protocol_state.sql @@ -0,0 +1,12 @@ +-- +goose Up +CREATE TABLE IF NOT EXISTS ocr_protocol_states ( + config_digest bytea NOT NULL CHECK (octet_length(config_digest) = 32), + key text NOT NULL CHECK (key != ''), + value bytea NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_ocr_protocol_states ON ocr_protocol_states (config_digest, key); + + +-- +goose Down +DROP TABLE ocr_protocol_states; diff --git a/core/store/migrate/migrations/0178_drop_access_list.sql b/core/store/migrate/migrations/0178_drop_access_list.sql new file mode 100644 index 00000000..d70a5592 --- /dev/null +++ b/core/store/migrate/migrations/0178_drop_access_list.sql @@ -0,0 +1,6 @@ +-- +goose Up +ALTER TABLE eth_txes DROP COLUMN access_list; + + +-- +goose Down +ALTER TABLE eth_txes ADD COLUMN access_list jsonb; diff --git a/core/store/migrate/migrations/0179_notify_channel_on_log_inserts.sql b/core/store/migrate/migrations/0179_notify_channel_on_log_inserts.sql new file mode 100644 index 00000000..6c7e899e --- /dev/null +++ b/core/store/migrate/migrations/0179_notify_channel_on_log_inserts.sql @@ -0,0 +1,27 @@ +-- +goose Up + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION PUBLIC.notifysavedlogtopics() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify( + 'insert_on_evm_logs'::text, + -- hex encoded address plus comma separated list of hex encoded topic values + -- e.g. "
:," + encode(NEW.address, 'hex') || ':' || array_to_string(array(SELECT encode(unnest(NEW.topics), 'hex')), ',') + ); + RETURN NULL; +END +$$; + +DROP TRIGGER IF EXISTS notify_insert_on_evm_logs_topics ON PUBLIC.evm_logs; +CREATE TRIGGER notify_insert_on_evm_logs_topics AFTER INSERT ON PUBLIC.evm_logs FOR EACH ROW EXECUTE PROCEDURE PUBLIC.notifysavedlogtopics(); +-- +goose StatementEnd + +-- +goose Down + +-- +goose StatementBegin +DROP TRIGGER IF EXISTS notify_insert_on_evm_logs_topics ON PUBLIC.evm_logs; +DROP FUNCTION IF EXISTS PUBLIC.notifysavedlogtopics; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0180_ocr2_multiple_configs_per_spec.sql b/core/store/migrate/migrations/0180_ocr2_multiple_configs_per_spec.sql new file mode 100644 index 00000000..60321073 --- /dev/null +++ b/core/store/migrate/migrations/0180_ocr2_multiple_configs_per_spec.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE ocr2_contract_configs + ADD COLUMN plugin_id INTEGER NOT NULL DEFAULT 0, + DROP CONSTRAINT offchainreporting2_contract_configs_pkey, + ADD CONSTRAINT ocr2_contract_configs_unique_id_pair UNIQUE (ocr2_oracle_spec_id, plugin_id); + +-- +goose Down +ALTER TABLE ocr2_contract_configs + DROP CONSTRAINT ocr2_contract_configs_unique_id_pair, + ADD CONSTRAINT offchainreporting2_contract_configs_pkey PRIMARY KEY (ocr2_oracle_spec_id), + DROP COLUMN plugin_id; diff --git a/core/store/migrate/migrations/0181_bhs_vrfv2plus.sql b/core/store/migrate/migrations/0181_bhs_vrfv2plus.sql new file mode 100644 index 00000000..77a0414f --- /dev/null +++ b/core/store/migrate/migrations/0181_bhs_vrfv2plus.sql @@ -0,0 +1,12 @@ +-- +goose Up +ALTER TABLE blockhash_store_specs + ADD COLUMN IF NOT EXISTS "coordinator_v2_plus_address" bytea + CHECK (octet_length(coordinator_v2_plus_address) = 20); + +ALTER TABLE block_header_feeder_specs + ADD COLUMN IF NOT EXISTS "coordinator_v2_plus_address" bytea + CHECK (octet_length(coordinator_v2_plus_address) = 20); + +-- +goose Down +ALTER TABLE blockhash_store_specs DROP COLUMN "coordinator_v2_plus_address"; +ALTER TABLE block_header_feeder_specs DROP COLUMN "coordinator_v2_plus_address"; \ No newline at end of file diff --git a/core/store/migrate/migrations/0182_nullable_feed_id.sql b/core/store/migrate/migrations/0182_nullable_feed_id.sql new file mode 100644 index 00000000..94b2e201 --- /dev/null +++ b/core/store/migrate/migrations/0182_nullable_feed_id.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE ocr2_oracle_specs ALTER COLUMN feed_id DROP NOT NULL, ALTER COLUMN feed_id SET DEFAULT NULL; +UPDATE ocr2_oracle_specs SET feed_id=NULL WHERE feed_id='\x0000000000000000000000000000000000000000000000000000000000000000'; + +-- +goose Down +UPDATE ocr2_oracle_specs SET feed_id='\x0000000000000000000000000000000000000000000000000000000000000000' WHERE feed_id IS NULL; +ALTER TABLE ocr2_oracle_specs ALTER COLUMN feed_id SET NOT NULL, ALTER COLUMN feed_id SET DEFAULT '\x0000000000000000000000000000000000000000000000000000000000000000'; diff --git a/core/store/migrate/migrations/0183_functions_new_fields.sql b/core/store/migrate/migrations/0183_functions_new_fields.sql new file mode 100644 index 00000000..404d5bb8 --- /dev/null +++ b/core/store/migrate/migrations/0183_functions_new_fields.sql @@ -0,0 +1,30 @@ +-- +goose Up + +-- see 0154_ocr2dr_requests_table.sql for initial definition +ALTER TABLE ocr2dr_requests RENAME TO functions_requests; +ALTER INDEX idx_ocr2dr_requests RENAME TO idx_functions_requests; + +ALTER TABLE functions_requests DROP COLUMN run_id; + +ALTER TABLE functions_requests + ADD COLUMN flags bytea, + ADD COLUMN aggregation_method INTEGER, + ADD COLUMN callback_gas_limit INTEGER, + ADD COLUMN coordinator_contract_address bytea CHECK (octet_length(coordinator_contract_address) = 20), + ADD COLUMN onchain_metadata bytea, + ADD COLUMN processing_metadata bytea; + +-- +goose Down + +ALTER TABLE functions_requests + DROP COLUMN flags, + DROP COLUMN aggregation_method, + DROP COLUMN callback_gas_limit, + DROP COLUMN coordinator_contract_address, + DROP COLUMN onchain_metadata, + DROP COLUMN processing_metadata; + +ALTER TABLE functions_requests ADD COLUMN run_id bigint; + +ALTER INDEX idx_functions_requests RENAME TO idx_ocr2dr_requests; +ALTER TABLE functions_requests RENAME TO ocr2dr_requests; diff --git a/core/store/migrate/migrations/0184_chains_tables_removal.sql b/core/store/migrate/migrations/0184_chains_tables_removal.sql new file mode 100644 index 00000000..d928cc45 --- /dev/null +++ b/core/store/migrate/migrations/0184_chains_tables_removal.sql @@ -0,0 +1,53 @@ +-- +goose Up +DROP TABLE evm_chains CASCADE; +DROP TABLE solana_chains CASCADE; +DROP TABLE starknet_chains CASCADE; + +-- +goose Down +-- evm_chains definition +CREATE TABLE evm_chains ( + id numeric(78) NOT NULL, + cfg jsonb NOT NULL DEFAULT '{}'::jsonb, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled bool NOT NULL DEFAULT true, + CONSTRAINT evm_chains_pkey PRIMARY KEY (id) +); + +-- evm_chains foreign keys +ALTER TABLE evm_log_poller_filters ADD CONSTRAINT evm_log_poller_filters_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) DEFERRABLE; +ALTER TABLE evm_log_poller_blocks ADD CONSTRAINT evm_log_poller_blocks_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE log_broadcasts ADD CONSTRAINT log_broadcasts_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE block_header_feeder_specs ADD CONSTRAINT block_header_feeder_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) DEFERRABLE; +ALTER TABLE direct_request_specs ADD CONSTRAINT direct_request_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE evm_logs ADD CONSTRAINT evm_logs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE vrf_specs ADD CONSTRAINT vrf_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE evm_heads ADD CONSTRAINT heads_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE evm_forwarders ADD CONSTRAINT evm_forwarders_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE blockhash_store_specs ADD CONSTRAINT blockhash_store_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE evm_key_states ADD CONSTRAINT eth_key_states_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE log_broadcasts_pending ADD CONSTRAINT log_broadcasts_pending_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE eth_txes ADD CONSTRAINT eth_txes_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE keeper_specs ADD CONSTRAINT keeper_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE flux_monitor_specs ADD CONSTRAINT flux_monitor_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; +ALTER TABLE ocr_oracle_specs ADD CONSTRAINT offchainreporting_oracle_specs_evm_chain_id_fkey FOREIGN KEY (evm_chain_id) REFERENCES evm_chains(id) ON DELETE CASCADE DEFERRABLE NOT VALID; + +-- solana_chains definition +CREATE TABLE solana_chains ( + id text NOT NULL, + cfg jsonb NOT NULL DEFAULT '{}'::jsonb, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled bool NOT NULL DEFAULT true, + CONSTRAINT solana_chains_pkey PRIMARY KEY (id) +); + +-- starknet_chains definition +CREATE TABLE starknet_chains ( + id text NOT NULL, + cfg jsonb NOT NULL DEFAULT '{}'::jsonb, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + enabled bool NOT NULL DEFAULT true, + CONSTRAINT starknet_chains_pkey PRIMARY KEY (id) +); \ No newline at end of file diff --git a/core/store/migrate/migrations/0185_create_mercury_transmit_requests.sql b/core/store/migrate/migrations/0185_create_mercury_transmit_requests.sql new file mode 100644 index 00000000..54a090f6 --- /dev/null +++ b/core/store/migrate/migrations/0185_create_mercury_transmit_requests.sql @@ -0,0 +1,16 @@ +-- +goose Up + +CREATE TABLE mercury_transmit_requests ( + payload_hash BYTEA PRIMARY KEY, + payload BYTEA NOT NULL, + config_digest BYTEA NOT NULL, + epoch INT NOT NULL, + round INT NOT NULL, + extra_hash BYTEA NOT NULL +); + +CREATE INDEX idx_mercury_transmission_requests_epoch_round ON mercury_transmit_requests (epoch DESC, round DESC); + +-- +goose Down + +DROP TABLE mercury_transmit_requests; diff --git a/core/store/migrate/migrations/0186_create_feed_latest_reports.sql b/core/store/migrate/migrations/0186_create_feed_latest_reports.sql new file mode 100644 index 00000000..9d90562f --- /dev/null +++ b/core/store/migrate/migrations/0186_create_feed_latest_reports.sql @@ -0,0 +1,13 @@ +-- +goose Up + +CREATE TABLE feed_latest_reports ( + feed_id BYTEA PRIMARY KEY CHECK (octet_length(feed_id) = 32), + report BYTEA NOT NULL, + epoch BIGINT NOT NULL, + round INT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL +); + +-- +goose Down + +DROP TABLE feed_latest_reports; diff --git a/core/store/migrate/migrations/0187_trusted_bhs_vrfv2plus.sql b/core/store/migrate/migrations/0187_trusted_bhs_vrfv2plus.sql new file mode 100644 index 00000000..288e5e00 --- /dev/null +++ b/core/store/migrate/migrations/0187_trusted_bhs_vrfv2plus.sql @@ -0,0 +1,10 @@ +-- +goose Up +ALTER TABLE blockhash_store_specs + ADD COLUMN IF NOT EXISTS "trusted_blockhash_store_address" bytea + CHECK (octet_length(trusted_blockhash_store_address) = 20); + +ALTER TABLE blockhash_store_specs + ADD COLUMN IF NOT EXISTS "trusted_blockhash_store_batch_size" integer DEFAULT 0; +-- +goose Down +ALTER TABLE blockhash_store_specs DROP COLUMN "trusted_blockhash_store_address"; +ALTER TABLE blockhash_store_specs DROP COLUMN "trusted_blockhash_store_batch_size"; \ No newline at end of file diff --git a/core/store/migrate/migrations/0188_nullable_feed_id_fix.sql b/core/store/migrate/migrations/0188_nullable_feed_id_fix.sql new file mode 100644 index 00000000..572f8da6 --- /dev/null +++ b/core/store/migrate/migrations/0188_nullable_feed_id_fix.sql @@ -0,0 +1,9 @@ +-- +goose Up +UPDATE ocr2_oracle_specs +SET feed_id=NULL +WHERE feed_id = '\x0000000000000000000000000000000000000000000000000000000000000000'; + +-- +goose Down +UPDATE ocr2_oracle_specs +SET feed_id='\x0000000000000000000000000000000000000000000000000000000000000000' +WHERE feed_id IS NULL; diff --git a/core/store/migrate/migrations/0189_create_automation_upkeep_state.sql b/core/store/migrate/migrations/0189_create_automation_upkeep_state.sql new file mode 100644 index 00000000..ffd4efea --- /dev/null +++ b/core/store/migrate/migrations/0189_create_automation_upkeep_state.sql @@ -0,0 +1,25 @@ +-- +goose Up + +CREATE TABLE evm_upkeep_states ( + id SERIAL PRIMARY KEY, + work_id TEXT NOT NULL, + evm_chain_id NUMERIC(20) NOT NULL, + upkeep_id NUMERIC(78) NOT NULL, -- upkeep id is an evm word (uint256) which has a max size of precision 78 + completion_state SMALLINT NOT NULL, + ineligibility_reason SMALLINT NOT NULL, + block_number BIGINT NOT NULL, + inserted_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL, + CONSTRAINT work_id_len_chk CHECK ( + length(work_id) > 0 AND length(work_id) < 255 + ) +); + +CREATE UNIQUE INDEX idx_evm_upkeep_state_chainid_workid ON evm_upkeep_states (evm_chain_id, work_id); +CREATE INDEX idx_evm_upkeep_state_added_at_chain_id ON evm_upkeep_states (evm_chain_id, inserted_at); + +-- +goose Down + +DROP INDEX IF EXISTS idx_evm_upkeep_state_chainid_workid; +DROP INDEX IF EXISTS idx_evm_upkeep_state_added_at_chain_id; + +DROP TABLE evm_upkeep_states; \ No newline at end of file diff --git a/core/store/migrate/migrations/0190_add_job_id_to_mercury_tables.sql b/core/store/migrate/migrations/0190_add_job_id_to_mercury_tables.sql new file mode 100644 index 00000000..5f76142b --- /dev/null +++ b/core/store/migrate/migrations/0190_add_job_id_to_mercury_tables.sql @@ -0,0 +1,11 @@ +-- +goose Up + +DELETE FROM feed_latest_reports; +ALTER TABLE feed_latest_reports ADD COLUMN job_id INTEGER NOT NULL REFERENCES jobs(id) DEFERRABLE INITIALLY IMMEDIATE; +DELETE FROM mercury_transmit_requests; +ALTER TABLE mercury_transmit_requests ADD COLUMN job_id INTEGER NOT NULL REFERENCES jobs(id) DEFERRABLE INITIALLY IMMEDIATE;; + +-- +goose Down + +ALTER TABLE feed_latest_reports DROP COLUMN job_id; +ALTER TABLE mercury_transmit_requests DROP COLUMN job_id; diff --git a/core/store/migrate/migrations/0191_mercury_tables_job_id_cascade.sql b/core/store/migrate/migrations/0191_mercury_tables_job_id_cascade.sql new file mode 100644 index 00000000..a407feb9 --- /dev/null +++ b/core/store/migrate/migrations/0191_mercury_tables_job_id_cascade.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE mercury_transmit_requests DROP CONSTRAINT mercury_transmit_requests_job_id_fkey; +ALTER TABLE mercury_transmit_requests ADD CONSTRAINT mercury_transmit_requests_job_id_fkey FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE feed_latest_reports DROP CONSTRAINT feed_latest_reports_job_id_fkey; +ALTER TABLE feed_latest_reports ADD CONSTRAINT feed_latest_reports_job_id_fkey FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE; + +-- +goose Down +ALTER TABLE mercury_transmit_requests DROP CONSTRAINT mercury_transmit_requests_job_id_fkey; +ALTER TABLE mercury_transmit_requests ADD CONSTRAINT mercury_transmit_requests_job_id_fkey FOREIGN KEY (job_id) REFERENCES jobs(id) DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE feed_latest_reports DROP CONSTRAINT feed_latest_reports_job_id_fkey; +ALTER TABLE feed_latest_reports ADD CONSTRAINT feed_latest_reports_job_id_fkey FOREIGN KEY (job_id) REFERENCES jobs(id) DEFERRABLE INITIALLY IMMEDIATE; diff --git a/core/store/migrate/migrations/0192_add_request_id_column_eth_txes_table.sql b/core/store/migrate/migrations/0192_add_request_id_column_eth_txes_table.sql new file mode 100644 index 00000000..6a9ab784 --- /dev/null +++ b/core/store/migrate/migrations/0192_add_request_id_column_eth_txes_table.sql @@ -0,0 +1,7 @@ +-- +goose Up + +ALTER TABLE eth_txes ADD COLUMN idempotency_key varchar(2000) UNIQUE; + +-- +goose Down + +ALTER TABLE eth_txes DROP COLUMN idempotency_key; diff --git a/core/store/migrate/migrations/0193_s4_alter_version_type.sql b/core/store/migrate/migrations/0193_s4_alter_version_type.sql new file mode 100644 index 00000000..36179d5d --- /dev/null +++ b/core/store/migrate/migrations/0193_s4_alter_version_type.sql @@ -0,0 +1,7 @@ +-- +goose Up + +ALTER TABLE "s4".shared ALTER COLUMN version TYPE NUMERIC; + +-- +goose Down + +ALTER TABLE "s4".shared ALTER COLUMN version TYPE INT USING version::integer; diff --git a/core/store/migrate/migrations/0194_evm_schema.sql b/core/store/migrate/migrations/0194_evm_schema.sql new file mode 100644 index 00000000..65b9bddd --- /dev/null +++ b/core/store/migrate/migrations/0194_evm_schema.sql @@ -0,0 +1,165 @@ +-- +goose Up +CREATE SCHEMA evm; +SET search_path TO evm,public; + +ALTER TABLE public.evm_forwarders SET SCHEMA evm; +ALTER TABLE evm.evm_forwarders RENAME TO forwarders; + +ALTER TABLE public.evm_heads SET SCHEMA evm; +ALTER TABLE evm.evm_heads RENAME TO heads; + +ALTER TABLE public.evm_key_states SET SCHEMA evm; +ALTER TABLE evm.evm_key_states RENAME TO key_states; + +ALTER TABLE public.evm_log_poller_blocks SET SCHEMA evm; +ALTER TABLE evm.evm_log_poller_blocks RENAME TO log_poller_blocks; + +ALTER TABLE public.evm_log_poller_filters SET SCHEMA evm; +ALTER TABLE evm.evm_log_poller_filters RENAME TO log_poller_filters; + + + +ALTER TABLE public.evm_upkeep_states SET SCHEMA evm; +ALTER TABLE evm.evm_upkeep_states RENAME TO upkeep_states; + +ALTER TABLE public.eth_receipts SET SCHEMA evm; +ALTER TABLE evm.eth_receipts RENAME TO receipts; + +ALTER TABLE public.eth_tx_attempts SET SCHEMA evm; +ALTER TABLE evm.eth_tx_attempts RENAME TO tx_attempts; + +--------------------- +-- Handle log triggers +--------------------- +DROP TRIGGER IF EXISTS notify_insert_on_evm_logs_topics ON PUBLIC.evm_logs; +DROP FUNCTION IF EXISTS public.notifysavedlogtopics(); + +ALTER TABLE public.evm_logs SET SCHEMA evm; +ALTER TABLE evm.evm_logs RENAME TO logs; + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION evm.notifysavedlogtopics() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify( + 'evm.insert_on_logs'::text, + -- hex encoded address plus comma separated list of hex encoded topic values + -- e.g. "
:," + encode(NEW.address, 'hex') || ':' || array_to_string(array(SELECT encode(unnest(NEW.topics), 'hex')), ',') + ); + RETURN NULL; +END +$$; + +DROP TRIGGER IF EXISTS notify_insert_on_logs_topics ON evm.logs; +CREATE TRIGGER notify_insert_on_logs_topics AFTER INSERT ON evm.logs FOR EACH ROW EXECUTE PROCEDURE evm.notifysavedlogtopics(); +-- +goose StatementEnd + +--------------------- +-- Handle tx triggers +--------------------- +DROP TRIGGER IF EXISTS notify_eth_tx_insertion on public.eth_txes; +DROP FUNCTION IF EXISTS public.notifyethtxinsertion(); + +ALTER TABLE public.eth_txes SET SCHEMA evm; +ALTER TABLE evm.eth_txes RENAME TO txes; + + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION evm.notifytxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('evm.insert_on_txes'::text, encode(NEW.from_address, 'hex')); + RETURN NULL; + END + $$; + +DROP TRIGGER IF EXISTS notify_tx_insertion on evm.txes; +CREATE TRIGGER notify_tx_insertion AFTER INSERT ON evm.txes FOR EACH ROW EXECUTE PROCEDURE evm.notifytxinsertion(); +-- +goose StatementEnd + + +-- +goose Down +SET search_path TO evm,public; +ALTER TABLE evm.forwarders SET SCHEMA public; +ALTER TABLE public.forwarders RENAME TO evm_forwarders; + +ALTER TABLE evm.heads SET SCHEMA public; +ALTER TABLE public.heads RENAME TO evm_heads; + +ALTER TABLE evm.key_states SET SCHEMA public; +ALTER TABLE public.key_states RENAME TO evm_key_states; + +ALTER TABLE evm.log_poller_blocks SET SCHEMA public; +ALTER TABLE public.log_poller_blocks RENAME TO evm_log_poller_blocks; + +ALTER TABLE evm.log_poller_filters SET SCHEMA public; +ALTER TABLE public.log_poller_filters RENAME TO evm_log_poller_filters; + +ALTER TABLE evm.upkeep_states SET SCHEMA public; +ALTER table public.upkeep_states RENAME TO evm_upkeep_states; + +ALTER TABLE evm.receipts SET SCHEMA public; +ALTER TABLE public.receipts RENAME TO eth_receipts; + +ALTER TABLE evm.tx_attempts SET SCHEMA public; +ALTER TABLE public.tx_attempts RENAME TO eth_tx_attempts; + + +--------------------- +-- Handle log triggers +--------------------- + +DROP TRIGGER IF EXISTS notify_insert_on_logs_topics ON evm.logs; +DROP FUNCTION IF EXISTS evm.notifysavedlogtopics(); + +ALTER TABLE evm.logs SET SCHEMA public; +ALTER TABLE public.logs RENAME TO evm_logs; + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION PUBLIC.notifysavedlogtopics() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify( + 'insert_on_evm_logs'::text, + -- hex encoded address plus comma separated list of hex encoded topic values + -- e.g. "
:," + encode(NEW.address, 'hex') || ':' || array_to_string(array(SELECT encode(unnest(NEW.topics), 'hex')), ',') + ); + RETURN NULL; +END +$$; + +DROP TRIGGER IF EXISTS notify_insert_on_evm_logs_topics ON PUBLIC.evm_logs; +CREATE TRIGGER notify_insert_on_evm_logs_topics AFTER INSERT ON PUBLIC.evm_logs FOR EACH ROW EXECUTE PROCEDURE PUBLIC.notifysavedlogtopics(); +-- +goose StatementEnd + +--------------------- +-- Handle tx triggers +--------------------- + +DROP TRIGGER IF EXISTS notify_tx_insertion on evm.txes; +DROP FUNCTION IF EXISTS evm.notifytxinsertion(); + +ALTER TABLE evm.txes SET SCHEMA public; +ALTER TABLE public.txes RENAME TO eth_txes; + +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION public.notifyethtxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('insert_on_eth_txes'::text, encode(NEW.from_address, 'hex')); + RETURN NULL; + END + $$; + +DROP TRIGGER IF EXISTS notify_eth_tx_insertion on public.eth_txes; +CREATE TRIGGER notify_eth_tx_insertion AFTER INSERT ON public.eth_txes FOR EACH ROW EXECUTE PROCEDURE public.notifyethtxinsertion(); +-- +goose StatementEnd + +DROP SCHEMA evm; +SET search_path TO public; \ No newline at end of file diff --git a/core/store/migrate/migrations/0195_add_not_null_to_evm_chain_id_in_job_specs.go b/core/store/migrate/migrations/0195_add_not_null_to_evm_chain_id_in_job_specs.go new file mode 100644 index 00000000..e9355688 --- /dev/null +++ b/core/store/migrate/migrations/0195_add_not_null_to_evm_chain_id_in_job_specs.go @@ -0,0 +1,71 @@ +package migrations + +import ( + "context" + "database/sql" + "os" + + "github.com/pkg/errors" + "github.com/pressly/goose/v3" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" +) + +func init() { + goose.AddMigrationContext(Up195, Down195) +} + +const ( + addNullConstraintsToSpecs = ` + ALTER TABLE direct_request_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE flux_monitor_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE ocr_oracle_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE keeper_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE vrf_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE blockhash_store_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ALTER TABLE block_header_feeder_specs ALTER COLUMN evm_chain_id SET NOT NULL; + ` + + dropNullConstraintsFromSpecs = ` + ALTER TABLE direct_request_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE flux_monitor_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE ocr_oracle_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE keeper_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE vrf_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE blockhash_store_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ALTER TABLE block_header_feeder_specs ALTER COLUMN evm_chain_id DROP NOT NULL; + ` +) + +// nolint +func Up195(ctx context.Context, tx *sql.Tx) error { + chainID, set := os.LookupEnv(env.EVMChainIDNotNullMigration0195) + if set { + updateQueries := []string{ + `UPDATE direct_request_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE flux_monitor_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE ocr_oracle_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE keeper_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE vrf_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE blockhash_store_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + `UPDATE block_header_feeder_specs SET evm_chain_id = $1 WHERE evm_chain_id IS NULL;`, + } + for i := range updateQueries { + _, err := tx.Exec(updateQueries[i], chainID) + if err != nil { + return errors.Wrap(err, "failed to set missing evm chain ids") + } + } + } + + _, err := tx.ExecContext(ctx, addNullConstraintsToSpecs) + return errors.Wrap(err, "failed to add null constraints") +} + +// nolint +func Down195(ctx context.Context, tx *sql.Tx) error { + if _, err := tx.ExecContext(ctx, dropNullConstraintsFromSpecs); err != nil { + return err + } + return nil +} diff --git a/core/store/migrate/migrations/0196_add_txhash_index_evm_logs.sql b/core/store/migrate/migrations/0196_add_txhash_index_evm_logs.sql new file mode 100644 index 00000000..a0bfe31d --- /dev/null +++ b/core/store/migrate/migrations/0196_add_txhash_index_evm_logs.sql @@ -0,0 +1,5 @@ +-- +goose Up +create index evm_logs_idx_tx_hash on evm.logs (tx_hash); + +-- +goose Down +DROP INDEX IF EXISTS evm_logs_idx_tx_hash; \ No newline at end of file diff --git a/core/store/migrate/migrations/0197_add_heartbeat_to_bhs_feeder.sql b/core/store/migrate/migrations/0197_add_heartbeat_to_bhs_feeder.sql new file mode 100644 index 00000000..74a7a74c --- /dev/null +++ b/core/store/migrate/migrations/0197_add_heartbeat_to_bhs_feeder.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE blockhash_store_specs ADD COLUMN heartbeat_period bigint DEFAULT 0 NOT NULL; + +-- +goose Down +ALTER TABLE blockhash_store_specs DROP COLUMN heartbeat_period; diff --git a/core/store/migrate/migrations/0198_add_block_timestamp_index.sql b/core/store/migrate/migrations/0198_add_block_timestamp_index.sql new file mode 100644 index 00000000..8f20f4d8 --- /dev/null +++ b/core/store/migrate/migrations/0198_add_block_timestamp_index.sql @@ -0,0 +1,5 @@ +-- +goose Up +create index log_poller_blocks_by_timestamp on evm.log_poller_blocks (evm_chain_id, block_timestamp); + +-- +goose Down +DROP INDEX IF EXISTS evm.log_poller_blocks_by_timestamp; \ No newline at end of file diff --git a/core/store/migrate/migrations/0199_remove_next_nonce_from_keystore.sql b/core/store/migrate/migrations/0199_remove_next_nonce_from_keystore.sql new file mode 100644 index 00000000..07cdfb02 --- /dev/null +++ b/core/store/migrate/migrations/0199_remove_next_nonce_from_keystore.sql @@ -0,0 +1,9 @@ +-- +goose Up + +ALTER TABLE evm.key_states DROP COLUMN next_nonce; +ALTER TABLE keys DROP COLUMN next_nonce; + +-- +goose Down + +ALTER TABLE evm.key_states ADD next_nonce bigint NOT NULL DEFAULT 0; +ALTER TABLE keys ADD next_nonce bigint NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/core/store/migrate/migrations/0200_evm_logs_add_block_timestamp_index.sql b/core/store/migrate/migrations/0200_evm_logs_add_block_timestamp_index.sql new file mode 100644 index 00000000..544a81f2 --- /dev/null +++ b/core/store/migrate/migrations/0200_evm_logs_add_block_timestamp_index.sql @@ -0,0 +1,15 @@ +-- +goose Up + +-- Start with dropping the index introduced in a previous migration - we are not going to use it +DROP INDEX IF EXISTS evm.log_poller_blocks_by_timestamp; + +CREATE INDEX evm_logs_by_timestamp + ON evm.logs (evm_chain_id, address, event_sig, block_timestamp, block_number); + +-- +goose Down +create index log_poller_blocks_by_timestamp on evm.log_poller_blocks (evm_chain_id, block_timestamp); + +drop index if exists evm.evm_logs_by_timestamp; + + + diff --git a/core/store/migrate/migrations/0201_add_finalized_block_number.sql b/core/store/migrate/migrations/0201_add_finalized_block_number.sql new file mode 100644 index 00000000..db15ebbe --- /dev/null +++ b/core/store/migrate/migrations/0201_add_finalized_block_number.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE evm.log_poller_blocks + ADD COLUMN finalized_block_number + bigint not null + default 0 + check (finalized_block_number >= 0); + + +-- +goose Down +ALTER TABLE evm.log_poller_blocks + DROP COLUMN finalized_block_number; diff --git a/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql new file mode 100644 index 00000000..0f93cd27 --- /dev/null +++ b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql @@ -0,0 +1,33 @@ +-- +goose Up + +WITH variables AS ( + SELECT + evm_chain_id, + CASE + WHEN evm_chain_id = 43113 then 1 -- Avax Fuji + WHEN evm_chain_id = 43114 then 1 -- Avax Mainnet + WHEN evm_chain_id = 84531 THEN 200 -- Base Goerli + WHEN evm_chain_id = 8453 THEN 200 -- Base Mainnet + WHEN evm_chain_id = 42220 THEN 1 -- Celo Mainnet + WHEN evm_chain_id = 44787 THEN 1 -- Celo Testnet + WHEN evm_chain_id = 8217 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1001 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1088 THEN 1 -- Metis Mainnet + WHEN evm_chain_id = 588 THEN 1 -- Metis Rinkeby + WHEN evm_chain_id = 420 THEN 200 -- Optimism Goerli + WHEN evm_chain_id = 10 THEN 200 -- Optimism Mainnet + WHEN evm_chain_id = 137 THEN 500 -- Polygon Mainnet + WHEN evm_chain_id = 80001 THEN 500 -- Polygon Mumbai + WHEN evm_chain_id = 534352 THEN 1 -- Scroll Mainnet + WHEN evm_chain_id = 534351 THEN 1 -- Scroll Sepolia + ELSE 50 -- all other chains + END AS finality_depth + FROM evm.log_poller_blocks + GROUP BY evm_chain_id +) + +UPDATE evm.log_poller_blocks AS lpb +SET finalized_block_number = greatest(lpb.block_number - v.finality_depth, 0) +FROM variables v +WHERE lpb.evm_chain_id = v.evm_chain_id + AND lpb.finalized_block_number = 0; \ No newline at end of file diff --git a/core/store/migrate/migrations/0203_search_path.sql b/core/store/migrate/migrations/0203_search_path.sql new file mode 100644 index 00000000..06213067 --- /dev/null +++ b/core/store/migrate/migrations/0203_search_path.sql @@ -0,0 +1,4 @@ +-- +goose Up +-- BFC-2694 - fix search path so public takes precedence. No need for a downward migration. +SET search_path TO public,evm; + diff --git a/core/store/migrate/migrations/0204_create_eal_tables.sql b/core/store/migrate/migrations/0204_create_eal_tables.sql new file mode 100644 index 00000000..8a8d20a9 --- /dev/null +++ b/core/store/migrate/migrations/0204_create_eal_tables.sql @@ -0,0 +1,97 @@ +-- +goose Up +CREATE TABLE eal_specs ( + id BIGSERIAL PRIMARY KEY, + forwarder_address BYTEA NOT NULL, + evm_chain_id NUMERIC(78) NOT NULL, + from_addresses BYTEA[] DEFAULT '{}' NOT NULL, + lookback_blocks BIGINT NOT NULL, + poll_period BIGINT NOT NULL, + run_timeout BIGINT NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + CONSTRAINT forwarder_address_len_chk CHECK ( + octet_length(forwarder_address) = 20 + ) +); + +ALTER TABLE + jobs +ADD + COLUMN eal_spec_id INT REFERENCES eal_specs (id), +DROP + CONSTRAINT chk_only_one_spec, +ADD + CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id, + eal_spec_id + ) = 1 + ); + +CREATE TABLE eal_txs ( + request_id TEXT PRIMARY KEY, + forwarder_address BYTEA NOT NULL, + from_address BYTEA NOT NULL, + target_address BYTEA NOT NULL, + evm_chain_id NUMERIC(78) NOT NULL, + payload BYTEA NOT NULL, + tx_status TEXT NOT NULL, + gas_limit BIGINT NOT NULL, + ccip_message_id BYTEA, + failure_reason TEXT, + status_update_url TEXT, + tx_hash BYTEA, + tx_id BIGINT REFERENCES txes INITIALLY DEFERRED, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + CONSTRAINT forwarder_address_len_chk CHECK ( + octet_length(forwarder_address) = 20 + ), + CONSTRAINT target_address_len_chk CHECK ( + octet_length(target_address) = 20 + ), + CONSTRAINT from_address_len_chk CHECK ( + octet_length(from_address) = 20 + ), + CONSTRAINT ccip_message_id_len_chk CHECK ( + octet_length(ccip_message_id) = 32 + ), + CONSTRAINT tx_hash_len_chk CHECK ( + octet_length(tx_hash) = 32 + ) +); +CREATE INDEX idx_eal_txs_chain_id_tx_status ON eal_txs(evm_chain_id, tx_status); + +-- +goose Down +ALTER TABLE + jobs +DROP + CONSTRAINT chk_only_one_spec, +ADD + CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id + ) = 1 + ); +ALTER TABLE + jobs +DROP + COLUMN eal_spec_id; +DROP + TABLE IF EXISTS eal_specs; +DROP + TABLE IF EXISTS eal_txs; diff --git a/core/store/migrate/migrations/0205_add_feed_id_to_mercury_transmit_requests.sql b/core/store/migrate/migrations/0205_add_feed_id_to_mercury_transmit_requests.sql new file mode 100644 index 00000000..04cf5a25 --- /dev/null +++ b/core/store/migrate/migrations/0205_add_feed_id_to_mercury_transmit_requests.sql @@ -0,0 +1,14 @@ +-- +goose Up +ALTER TABLE mercury_transmit_requests ADD COLUMN feed_id BYTEA CHECK (feed_id IS NULL OR octet_length(feed_id) = 32); +DROP INDEX idx_mercury_transmission_requests_epoch_round; +CREATE INDEX idx_mercury_transmission_requests_job_id_epoch_round ON mercury_transmit_requests (job_id, epoch DESC, round DESC); +CREATE INDEX idx_mercury_transmit_requests_job_id ON mercury_transmit_requests (job_id); +CREATE INDEX idx_mercury_transmit_requests_feed_id ON mercury_transmit_requests (feed_id); +CREATE INDEX idx_mercury_feed_latest_reports_job_id ON feed_latest_reports (job_id); + +-- +goose Down +ALTER TABLE mercury_transmit_requests DROP COLUMN feed_id; +DROP INDEX idx_mercury_transmit_requests_job_id; +DROP INDEX idx_mercury_feed_latest_reports_job_id; +CREATE INDEX idx_mercury_transmission_requests_epoch_round ON mercury_transmit_requests (epoch DESC, round DESC); +DROP INDEX idx_mercury_transmission_requests_job_id_epoch_round; diff --git a/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql b/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql new file mode 100644 index 00000000..94b2e4aa --- /dev/null +++ b/core/store/migrate/migrations/0206_remove_tx_insert_trigger.sql @@ -0,0 +1,18 @@ +-- +goose Up +DROP TRIGGER IF EXISTS notify_tx_insertion on evm.txes; +DROP FUNCTION IF EXISTS evm.notifyethtxinsertion(); + + +-- +goose Down +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION evm.notifytxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('evm.insert_on_txes'::text, encode(NEW.from_address, 'hex')); + RETURN NULL; + END + $$; + +CREATE TRIGGER notify_tx_insertion AFTER INSERT ON evm.txes FOR EACH ROW EXECUTE PROCEDURE evm.notifytxinsertion(); +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql b/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql new file mode 100644 index 00000000..f4ae4b98 --- /dev/null +++ b/core/store/migrate/migrations/0207_drop_insert_on_terra_msg.sql @@ -0,0 +1,20 @@ +-- +goose Up + +-- +goose StatementBegin +DROP TRIGGER IF EXISTS insert_on_terra_msg ON PUBLIC.cosmos_msgs; +DROP FUNCTION IF EXISTS PUBLIC.notify_terra_msg_insert; +-- +goose StatementEnd + +-- +goose Down + +-- +goose StatementBegin +CREATE FUNCTION notify_terra_msg_insert() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify('insert_on_terra_msg'::text, NOW()::text); + RETURN NULL; +END +$$; +CREATE TRIGGER notify_terra_msg_insertion AFTER INSERT ON cosmos_msgs FOR EACH STATEMENT EXECUTE PROCEDURE notify_terra_msg_insert(); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql b/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql new file mode 100644 index 00000000..f788cdab --- /dev/null +++ b/core/store/migrate/migrations/0208_create_ldap_sessions_table.sql @@ -0,0 +1,22 @@ +-- +goose Up +CREATE TABLE IF NOT EXISTS ldap_sessions ( + id text PRIMARY KEY, + user_email text NOT NULL, + user_role user_roles, + localauth_user BOOLEAN, + created_at timestamp with time zone NOT NULL +); + +CREATE TABLE IF NOT EXISTS ldap_user_api_tokens ( + user_email text PRIMARY KEY, + user_role user_roles, + localauth_user BOOLEAN, + token_key text UNIQUE NOT NULL, + token_salt text NOT NULL, + token_hashed_secret text NOT NULL, + created_at timestamp with time zone NOT NULL +); + +-- +goose Down +DROP TABLE ldap_sessions; +DROP TABLE ldap_user_api_tokens; diff --git a/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql b/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql new file mode 100644 index 00000000..dbe7e91b --- /dev/null +++ b/core/store/migrate/migrations/0209_add_resume_pipeline_task_flags_to_evm_txes.sql @@ -0,0 +1,15 @@ +-- +goose Up +ALTER TABLE evm.txes ADD COLUMN "signal_callback" BOOL DEFAULT FALSE; +ALTER TABLE evm.txes ADD COLUMN "callback_completed" BOOL DEFAULT FALSE; + +UPDATE evm.txes +SET signal_callback = TRUE AND callback_completed = FALSE +WHERE evm.txes.pipeline_task_run_id IN ( + SELECT pipeline_task_runs.id FROM pipeline_task_runs + INNER JOIN pipeline_runs ON pipeline_runs.id = pipeline_task_runs.pipeline_run_id + WHERE pipeline_runs.state = 'suspended' +); + +-- +goose Down +ALTER TABLE evm.txes DROP COLUMN "signal_callback"; +ALTER TABLE evm.txes DROP COLUMN "callback_completed"; diff --git a/core/store/migrate/migrations/0210_remove_evm_key_states_fk_constraint.sql b/core/store/migrate/migrations/0210_remove_evm_key_states_fk_constraint.sql new file mode 100644 index 00000000..119de9d2 --- /dev/null +++ b/core/store/migrate/migrations/0210_remove_evm_key_states_fk_constraint.sql @@ -0,0 +1,4 @@ +-- +goose Up +ALTER TABLE evm.txes DROP CONSTRAINT eth_txes_evm_chain_id_from_address_fkey; +-- +goose Down +ALTER TABLE evm.txes ADD CONSTRAINT eth_txes_evm_chain_id_from_address_fkey FOREIGN KEY (evm_chain_id, from_address) REFERENCES evm.key_states(evm_chain_id, address) ON DELETE CASCADE DEFERRABLE INITIALLY IMMEDIATE NOT VALID; \ No newline at end of file diff --git a/core/store/migrate/migrations/0211_log_poller_word_indexes.sql b/core/store/migrate/migrations/0211_log_poller_word_indexes.sql new file mode 100644 index 00000000..3d2e8bf8 --- /dev/null +++ b/core/store/migrate/migrations/0211_log_poller_word_indexes.sql @@ -0,0 +1,6 @@ +-- +goose Up +CREATE INDEX evm_logs_idx_data_word_four ON evm.logs (substring(data from 97 for 32)); + + +-- +goose Down +DROP INDEX IF EXISTS evm.evm_logs_idx_data_word_four; diff --git a/core/store/migrate/migrations/0212_ocr_oracle_specs_drop_p2p_bootstrap_peers.sql b/core/store/migrate/migrations/0212_ocr_oracle_specs_drop_p2p_bootstrap_peers.sql new file mode 100644 index 00000000..d38370e3 --- /dev/null +++ b/core/store/migrate/migrations/0212_ocr_oracle_specs_drop_p2p_bootstrap_peers.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE ocr_oracle_specs DROP COLUMN p2p_bootstrap_peers; + +-- +goose Down +ALTER TABLE ocr_oracle_specs ADD COLUMN p2p_bootstrap_peers text[]; diff --git a/core/store/migrate/migrations/0213_liquidity_balancer_specs.sql b/core/store/migrate/migrations/0213_liquidity_balancer_specs.sql new file mode 100644 index 00000000..cd717181 --- /dev/null +++ b/core/store/migrate/migrations/0213_liquidity_balancer_specs.sql @@ -0,0 +1,49 @@ +-- +goose Up +CREATE TABLE liquidity_balancer_specs ( + id BIGSERIAL PRIMARY KEY, + liquidity_balancer_config JSONB NOT NULL +); + +ALTER TABLE + jobs +ADD COLUMN + liquidity_balancer_spec_id BIGINT REFERENCES liquidity_balancer_specs(id), +DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id, + eal_spec_id, + liquidity_balancer_spec_id + ) = 1 +); + +-- +goose Down +ALTER TABLE + jobs +DROP CONSTRAINT chk_only_one_spec, +ADD CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id, + eal_spec_id + ) = 1 +); +ALTER TABLE + jobs +DROP COLUMN + liquidity_balancer_spec_id; +DROP TABLE + liquidity_balancer_specs; diff --git a/core/store/migrate/migrations/0214_add_custom_reverts_vrf.sql b/core/store/migrate/migrations/0214_add_custom_reverts_vrf.sql new file mode 100644 index 00000000..a2865fce --- /dev/null +++ b/core/store/migrate/migrations/0214_add_custom_reverts_vrf.sql @@ -0,0 +1,5 @@ +-- +goose Up +ALTER TABLE vrf_specs ADD COLUMN custom_reverts_pipeline_enabled boolean DEFAULT FALSE NOT NULL; + +-- +goose Down +ALTER TABLE vrf_specs DROP COLUMN custom_reverts_pipeline_enabled; diff --git a/core/store/migrate/migrations/0215_functions_subscriptions.sql b/core/store/migrate/migrations/0215_functions_subscriptions.sql new file mode 100644 index 00000000..c3859d42 --- /dev/null +++ b/core/store/migrate/migrations/0215_functions_subscriptions.sql @@ -0,0 +1,19 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE functions_subscriptions( + router_contract_address bytea, + subscription_id bigint, + owner bytea CHECK (octet_length(owner) = 20) NOT NULL, + balance bigint, + blocked_balance bigint, + proposed_owner bytea, + consumers bytea[], + flags bytea, + PRIMARY KEY(router_contract_address, subscription_id) +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS functions_subscriptions; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0216_drop_terra_state_transition_trigger.sql b/core/store/migrate/migrations/0216_drop_terra_state_transition_trigger.sql new file mode 100644 index 00000000..77a7c04a --- /dev/null +++ b/core/store/migrate/migrations/0216_drop_terra_state_transition_trigger.sql @@ -0,0 +1,27 @@ +-- +goose Up + +-- +goose StatementBegin +DROP FUNCTION IF EXISTS PUBLIC.check_terra_msg_state_transition; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +CREATE OR REPLACE FUNCTION PUBLIC.check_terra_msg_state_transition() RETURNS TRIGGER AS $$ +DECLARE +state_transition_map jsonb := json_build_object( + 'unstarted', json_build_object('errored', true, 'started', true), + 'started', json_build_object('errored', true, 'broadcasted', true), + 'broadcasted', json_build_object('errored', true, 'confirmed', true)); +BEGIN + IF NOT state_transition_map ? OLD.state THEN + RAISE EXCEPTION 'Invalid from state %. Valid from states %', OLD.state, state_transition_map; +END IF; + IF NOT state_transition_map->OLD.state ? NEW.state THEN + RAISE EXCEPTION 'Invalid state transition from % to %. Valid to states %', OLD.state, NEW.state, state_transition_map->OLD.state; +END IF; +RETURN NEW; +END +$$ LANGUAGE plpgsql; + +-- +goose StatementEnd \ No newline at end of file diff --git a/core/store/migrate/migrations/0217_drop_unused_job_triggers.sql b/core/store/migrate/migrations/0217_drop_unused_job_triggers.sql new file mode 100644 index 00000000..a59e5d5b --- /dev/null +++ b/core/store/migrate/migrations/0217_drop_unused_job_triggers.sql @@ -0,0 +1,48 @@ +-- +goose Up +-- +goose StatementBegin +DROP TRIGGER IF EXISTS notify_job_created ON PUBLIC.jobs; +DROP FUNCTION IF EXISTS PUBLIC.notifyjobcreated(); + +DROP TRIGGER IF EXISTS notify_job_deleted ON PUBLIC.jobs; +DROP FUNCTION IF EXISTS PUBLIC.notifyjobdeleted(); + +DROP TRIGGER IF EXISTS notify_pipeline_run_started ON PUBLIC.pipeline_runs; +DROP FUNCTION IF EXISTS PUBLIC.notifypipelinerunstarted(); +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin +CREATE FUNCTION PUBLIC.notifyjobcreated() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('insert_on_jobs', NEW.id::text); + RETURN NEW; + END + $$; +CREATE TRIGGER notify_job_created AFTER INSERT ON PUBLIC.jobs FOR EACH ROW EXECUTE PROCEDURE PUBLIC.notifyjobcreated(); + +CREATE FUNCTION PUBLIC.notifyjobdeleted() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('delete_from_jobs', OLD.id::text); + RETURN OLD; + END + $$; +CREATE TRIGGER notify_job_deleted AFTER DELETE ON PUBLIC.jobs FOR EACH ROW EXECUTE PROCEDURE PUBLIC.notifyjobdeleted(); + +CREATE FUNCTION PUBLIC.notifypipelinerunstarted() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF NEW.finished_at IS NULL THEN + PERFORM pg_notify('pipeline_run_started', NEW.id::text); + END IF; + RETURN NEW; + END + $$; +CREATE TRIGGER notify_pipeline_run_started AFTER INSERT ON PUBLIC.pipeline_runs FOR EACH ROW EXECUTE PROCEDURE PUBLIC.notifypipelinerunstarted(); + +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0218_drop_log_topic_trigger.sql b/core/store/migrate/migrations/0218_drop_log_topic_trigger.sql new file mode 100644 index 00000000..ea80cccd --- /dev/null +++ b/core/store/migrate/migrations/0218_drop_log_topic_trigger.sql @@ -0,0 +1,27 @@ +-- +goose Up +-- +goose StatementBegin +DROP TRIGGER IF EXISTS notify_insert_on_logs_topics ON EVM.logs; +DROP FUNCTION IF EXISTS evm.notifysavedlogtopics(); + +-- +goose StatementEnd + + +-- +goose Down +-- +goose StatementBegin + +CREATE FUNCTION evm.notifysavedlogtopics() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + PERFORM pg_notify( + 'evm.insert_on_logs'::text, + -- hex encoded address plus comma separated list of hex encoded topic values + -- e.g. "
:," + encode(NEW.address, 'hex') || ':' || array_to_string(array(SELECT encode(unnest(NEW.topics), 'hex')), ',') + ); + RETURN NULL; +END +$$; + +CREATE TRIGGER notify_insert_on_logs_topics AFTER INSERT ON evm.logs FOR EACH ROW EXECUTE PROCEDURE evm.notifysavedlogtopics(); +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0219_drop_notifytxinsertion.sql b/core/store/migrate/migrations/0219_drop_notifytxinsertion.sql new file mode 100644 index 00000000..9569fa48 --- /dev/null +++ b/core/store/migrate/migrations/0219_drop_notifytxinsertion.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin +DROP FUNCTION IF EXISTS evm.notifytxinsertion(); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +CREATE OR REPLACE FUNCTION evm.notifytxinsertion() RETURNS trigger + LANGUAGE plpgsql + AS $$ + BEGIN + PERFORM pg_notify('evm.insert_on_txes'::text, encode(NEW.from_address, 'hex')); + RETURN NULL; + END + $$; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0220_stream_specs.sql b/core/store/migrate/migrations/0220_stream_specs.sql new file mode 100644 index 00000000..f4469287 --- /dev/null +++ b/core/store/migrate/migrations/0220_stream_specs.sql @@ -0,0 +1,40 @@ +-- +goose Up +ALTER TABLE + jobs +DROP + CONSTRAINT chk_only_one_spec, +ADD + CONSTRAINT chk_specs CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id, + eal_spec_id, + CASE "type" WHEN 'stream' THEN 1 ELSE NULL END -- 'stream' type lacks a spec but should not cause validation to fail + ) = 1 + ); + +-- +goose Down +ALTER TABLE + jobs +DROP + CONSTRAINT chk_specs, +ADD + CONSTRAINT chk_only_one_spec CHECK ( + num_nonnulls( + ocr_oracle_spec_id, ocr2_oracle_spec_id, + direct_request_spec_id, flux_monitor_spec_id, + keeper_spec_id, cron_spec_id, webhook_spec_id, + vrf_spec_id, blockhash_store_spec_id, + block_header_feeder_spec_id, bootstrap_spec_id, + gateway_spec_id, + legacy_gas_station_server_spec_id, + legacy_gas_station_sidecar_spec_id, + eal_spec_id + ) = 1 + ); diff --git a/core/store/migrate/migrations/0221_functions_allowlist.sql b/core/store/migrate/migrations/0221_functions_allowlist.sql new file mode 100644 index 00000000..e97b2fc4 --- /dev/null +++ b/core/store/migrate/migrations/0221_functions_allowlist.sql @@ -0,0 +1,22 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE functions_allowlist( + id BIGSERIAL, + router_contract_address bytea CHECK (octet_length(router_contract_address) = 20) NOT NULL, + allowed_address bytea CHECK (octet_length(allowed_address) = 20) NOT NULL, + PRIMARY KEY(router_contract_address, allowed_address) +); + +ALTER TABLE functions_subscriptions +ADD CONSTRAINT router_contract_address_octet_length CHECK (octet_length(router_contract_address) = 20); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +ALTER TABLE functions_subscriptions +DROP CONSTRAINT router_contract_address_octet_length; + +DROP TABLE IF EXISTS functions_allowlist; +-- +goose StatementEnd diff --git a/core/store/migrate/migrations/0222_jobs_stream_id.sql b/core/store/migrate/migrations/0222_jobs_stream_id.sql new file mode 100644 index 00000000..5732011c --- /dev/null +++ b/core/store/migrate/migrations/0222_jobs_stream_id.sql @@ -0,0 +1,7 @@ +-- +goose Up +ALTER TABLE jobs ADD COLUMN stream_id BIGINT; +CREATE UNIQUE INDEX idx_jobs_unique_stream_id ON jobs(stream_id) WHERE stream_id IS NOT NULL; + +-- +goose Down +ALTER TABLE jobs DROP COLUMN stream_id; + diff --git a/core/store/models/common.go b/core/store/models/common.go new file mode 100644 index 00000000..28c496cc --- /dev/null +++ b/core/store/models/common.go @@ -0,0 +1,497 @@ +package models + +import ( + "bytes" + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" + "net/url" + "regexp" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/robfig/cron/v3" + "github.com/tidwall/gjson" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// CronParser is the global parser for crontabs. +// It accepts the standard 5 field cron syntax as well as an optional 6th field +// at the front to represent seconds. +var CronParser cron.Parser + +func init() { + cronParserSpec := cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor + CronParser = cron.NewParser(cronParserSpec) +} + +// JSON stores the json types string, number, bool, and null. +// Arrays and Objects are returned as their raw json types. +type JSON struct { + gjson.Result +} + +// Value returns this instance serialized for database storage. +func (j JSON) Value() (driver.Value, error) { + s := j.Bytes() + if len(s) == 0 { + return nil, nil + } + return s, nil +} + +// Scan reads the database value and returns an instance. +func (j *JSON) Scan(value interface{}) error { + switch v := value.(type) { + case string: + *j = JSON{Result: gjson.Parse(v)} + case []byte: + *j = JSON{Result: gjson.ParseBytes(v)} + default: + return fmt.Errorf("unable to convert %v of %T to JSON", value, value) + } + return nil +} + +// ParseJSON attempts to coerce the input byte array into valid JSON +// and parse it into a JSON object. +func ParseJSON(b []byte) (JSON, error) { + var j JSON + str := string(b) + if len(str) == 0 { + return j, nil + } + return j, json.Unmarshal([]byte(str), &j) +} + +// UnmarshalJSON parses the JSON bytes and stores in the *JSON pointer. +func (j *JSON) UnmarshalJSON(b []byte) error { + str := string(b) + if !gjson.Valid(str) { + return fmt.Errorf("invalid JSON: %v", str) + } + *j = JSON{gjson.Parse(str)} + return nil +} + +// MarshalJSON returns the JSON data if it already exists, returns +// an empty JSON object as bytes if not. +func (j JSON) MarshalJSON() ([]byte, error) { + if j.Exists() { + return j.Bytes(), nil + } + return []byte("{}"), nil +} + +func (j *JSON) UnmarshalTOML(val interface{}) error { + var bs []byte + switch v := val.(type) { + case string: + bs = []byte(v) + case []byte: + bs = v + } + var err error + *j, err = ParseJSON(bs) + return err +} + +// Bytes returns the raw JSON. +func (j JSON) Bytes() []byte { + if len(j.String()) == 0 { + return nil + } + return []byte(j.String()) +} + +// WebURL contains the URL of the endpoint. +type WebURL url.URL + +// UnmarshalJSON parses the raw URL stored in JSON-encoded +// data to a URL structure and sets it to the URL field. +func (w *WebURL) UnmarshalJSON(j []byte) error { + var v string + err := json.Unmarshal(j, &v) + if err != nil { + return err + } + // handle no url case + if len(v) == 0 { + return nil + } + + u, err := url.ParseRequestURI(v) + if err != nil { + return err + } + *w = WebURL(*u) + return nil +} + +// MarshalJSON returns the JSON-encoded string of the given data. +func (w WebURL) MarshalJSON() ([]byte, error) { + return json.Marshal(w.String()) +} + +// String delegates to the wrapped URL struct or an empty string when it is nil +func (w WebURL) String() string { + url := url.URL(w) + return url.String() +} + +// Value returns this instance serialized for database storage. +func (w WebURL) Value() (driver.Value, error) { + return w.String(), nil +} + +// Scan reads the database value and returns an instance. +func (w *WebURL) Scan(value interface{}) error { + s, ok := value.(string) + if !ok { + return fmt.Errorf("unable to convert %v of %T to WebURL", value, value) + } + + u, err := url.ParseRequestURI(s) + if err != nil { + return err + } + *w = WebURL(*u) + return nil +} + +// Cron holds the string that will represent the spec of the cron-job. +type Cron string + +// UnmarshalJSON parses the raw spec stored in JSON-encoded +// data and stores it to the Cron string. +func (c *Cron) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return fmt.Errorf("Cron: %w", err) + } + if s == "" { + return nil + } + + if !strings.HasPrefix(s, "CRON_TZ=") { + return errors.New("Cron: specs must specify a time zone using CRON_TZ, e.g. 'CRON_TZ=UTC 5 * * * *'") + } + + _, err = CronParser.Parse(s) + if err != nil { + return fmt.Errorf("Cron: %w", err) + } + *c = Cron(s) + return nil +} + +// String returns the current Cron spec string. +func (c Cron) String() string { + return string(c) +} + +// Interval represents a time.Duration stored as a Postgres interval type +type Interval time.Duration + +// NewInterval creates Interval for specified duration +func NewInterval(d time.Duration) *Interval { + i := new(Interval) + *i = Interval(d) + return i +} + +func (i Interval) Duration() time.Duration { + return time.Duration(i) +} + +// MarshalText implements the text.Marshaler interface. +func (i Interval) MarshalText() ([]byte, error) { + return []byte(time.Duration(i).String()), nil +} + +// UnmarshalText implements the text.Unmarshaler interface. +func (i *Interval) UnmarshalText(input []byte) error { + v, err := time.ParseDuration(string(input)) + if err != nil { + return err + } + *i = Interval(v) + return nil +} + +func (i *Interval) Scan(v interface{}) error { + if v == nil { + *i = Interval(time.Duration(0)) + return nil + } + asInt64, is := v.(int64) + if !is { + return errors.Errorf("models.Interval#Scan() wanted int64, got %T", v) + } + *i = Interval(time.Duration(asInt64) * time.Nanosecond) + return nil +} + +func (i Interval) Value() (driver.Value, error) { + return time.Duration(i).Nanoseconds(), nil +} + +func (i Interval) IsZero() bool { + return time.Duration(i) == time.Duration(0) +} + +// SendEtherRequest represents a request to transfer ETH. +type SendEtherRequest struct { + DestinationAddress common.Address `json:"address"` + FromAddress common.Address `json:"from"` + Amount assets.Eth `json:"amount"` + EVMChainID *big.Big `json:"evmChainID"` + AllowHigherAmounts bool `json:"allowHigherAmounts"` + SkipWaitTxAttempt bool `json:"skipWaitTxAttempt"` + WaitAttemptTimeout *time.Duration `json:"waitAttemptTimeout"` +} + +// AddressCollection is an array of common.Address +// serializable to and from a database. +type AddressCollection []common.Address + +// ToStrings returns this address collection as an array of strings. +func (r AddressCollection) ToStrings() []string { + // Unable to convert copy-free without unsafe: + // https://stackoverflow.com/a/48554123/639773 + converted := make([]string, len(r)) + for i, e := range r { + converted[i] = e.Hex() + } + return converted +} + +// Value returns the string value to be written to the database. +func (r AddressCollection) Value() (driver.Value, error) { + return strings.Join(r.ToStrings(), ","), nil +} + +// Scan parses the database value as a string. +func (r *AddressCollection) Scan(value interface{}) error { + str, ok := value.(string) + if !ok { + return fmt.Errorf("unable to convert %v of %T to AddressCollection", value, value) + } + + if len(str) == 0 { + return nil + } + + arr := strings.Split(str, ",") + collection := make(AddressCollection, len(arr)) + for i, a := range arr { + collection[i] = common.HexToAddress(a) + } + *r = collection + return nil +} + +// Merge returns a new map with all keys merged from left to right +// On conflicting keys, rightmost inputs will clobber leftmost inputs +func Merge(inputs ...JSON) (JSON, error) { + output := make(map[string]interface{}) + + for _, input := range inputs { + switch v := input.Result.Value().(type) { + case map[string]interface{}: + for key, value := range v { + output[key] = value + } + case nil: + default: + return JSON{}, errors.New("can only merge JSON objects") + } + } + + bytes, err := json.Marshal(output) + if err != nil { + return JSON{}, err + } + + return JSON{Result: gjson.ParseBytes(bytes)}, nil +} + +// Explicit type indicating a 32-byte sha256 hash +type Sha256Hash [32]byte + +var EmptySha256Hash = new(Sha256Hash) + +// MarshalJSON converts a Sha256Hash to a JSON byte slice. +func (s Sha256Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// UnmarshalJSON converts a bytes slice of JSON to a TaskType. +func (s *Sha256Hash) UnmarshalJSON(input []byte) error { + var shaHash string + if err := json.Unmarshal(input, &shaHash); err != nil { + return err + } + + sha, err := Sha256HashFromHex(shaHash) + if err != nil { + return err + } + + *s = sha + return nil +} + +func Sha256HashFromHex(x string) (Sha256Hash, error) { + bs, err := hex.DecodeString(x) + if err != nil { + return Sha256Hash{}, err + } + var hash Sha256Hash + copy(hash[:], bs) + return hash, nil +} + +func MustSha256HashFromHex(x string) Sha256Hash { + bs, err := hex.DecodeString(x) + if err != nil { + panic(err) + } + var hash Sha256Hash + copy(hash[:], bs) + return hash +} + +func (s Sha256Hash) String() string { + return hex.EncodeToString(s[:]) +} + +func (s *Sha256Hash) MarshalText() ([]byte, error) { + return []byte(s.String()), nil +} + +func (s *Sha256Hash) UnmarshalText(bs []byte) (err error) { + *s, err = Sha256HashFromHex(string(bs)) + return +} + +func (s *Sha256Hash) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.Errorf("Failed to unmarshal Sha256Hash value: %v", value) + } + if s == nil { + *s = Sha256Hash{} + } + copy((*s)[:], bytes) + return nil +} + +func (s Sha256Hash) Value() (driver.Value, error) { + b := make([]byte, 32) + copy(b, s[:]) + return b, nil +} + +// ServiceHeader is an HTTP header to include in POST to log service. +type ServiceHeader struct { + Header string + Value string +} + +func (h *ServiceHeader) UnmarshalText(input []byte) error { + parts := strings.SplitN(string(input), ":", 2) + h.Header = parts[0] + if len(parts) > 1 { + h.Value = strings.TrimSpace(parts[1]) + } + return h.Validate() +} + +func (h *ServiceHeader) MarshalText() ([]byte, error) { + var b bytes.Buffer + fmt.Fprintf(&b, "%s: %s", h.Header, h.Value) + return b.Bytes(), nil +} + +type ServiceHeaders []ServiceHeader + +func (sh *ServiceHeaders) UnmarshalText(input []byte) error { + if sh == nil { + return errors.New("Cannot unmarshal to a nil receiver") + } + + headers := string(input) + + var parsedHeaders []ServiceHeader + if headers != "" { + headerLines := strings.Split(headers, "\\") + for _, header := range headerLines { + keyValue := strings.Split(header, "||") + if len(keyValue) != 2 { + return errors.Errorf("invalid headers provided for the audit logger. Value, single pair split on || required, got: %s", keyValue) + } + h := ServiceHeader{ + Header: keyValue[0], + Value: keyValue[1], + } + + if err := h.Validate(); err != nil { + return err + } + parsedHeaders = append(parsedHeaders, h) + } + } + + *sh = parsedHeaders + return nil +} + +func (sh *ServiceHeaders) MarshalText() ([]byte, error) { + if sh == nil { + return nil, errors.New("Cannot marshal to a nil receiver") + } + + sb := strings.Builder{} + for _, header := range *sh { + sb.WriteString(header.Header) + sb.WriteString("||") + sb.WriteString(header.Value) + sb.WriteString("\\") + } + + serialized := sb.String() + + if len(serialized) > 0 { + serialized = serialized[:len(serialized)-1] + } + + return []byte(serialized), nil +} + +// We act slightly more strictly than the HTTP specifications +// technically allow instead following the guidelines of +// cloudflare transforms. +// https://developers.cloudflare.com/rules/transform/request-header-modification/reference/header-format +var ( + headerNameRegex = regexp.MustCompile(`^[A-Za-z\-]+$`) + headerValueRegex = regexp.MustCompile("^[A-Za-z_ :;.,\\/\"'?!(){}[\\]@<>=\\-+*#$&`|~^%]+$") +) + +func (h ServiceHeader) Validate() (err error) { + if !headerNameRegex.MatchString(h.Header) { + err = multierr.Append(err, errors.Errorf("invalid header name: %s", h.Header)) + } + + if !headerValueRegex.MatchString(h.Value) { + err = multierr.Append(err, errors.Errorf("invalid header value: %s", h.Value)) + } + return +} diff --git a/core/store/models/common_test.go b/core/store/models/common_test.go new file mode 100644 index 00000000..12dbb05c --- /dev/null +++ b/core/store/models/common_test.go @@ -0,0 +1,439 @@ +package models_test + +import ( + "encoding/json" + "net/url" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestJSON_Merge(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + original string + input string + want string + wantError bool + }{ + { + "new field", + `{"value":"OLD","other":1}`, + `{"extra":"fields"}`, + `{"value":"OLD","other":1,"extra":"fields"}`, + false, + }, + { + "overwritting fields", + `{"value":"OLD","other":1}`, + `{"value":["new","new"],"extra":2}`, + `{"value":["new","new"],"other":1,"extra":2}`, + false, + }, + { + "nested JSON", + `{"value":"OLD","other":1}`, + `{"extra":{"fields": ["more", 1]}}`, + `{"value":"OLD","other":1,"extra":{"fields":["more",1]}}`, + false, + }, + { + "empty JSON", + `{"value":"OLD","other":1}`, + `{}`, + `{"value":"OLD","other":1}`, + false, + }, + { + "null values", + `{"value":"OLD","other":1}`, + `{"value":null}`, + `{"value":null,"other":1}`, + false, + }, + { + "string", + `"string"`, + `{}`, + "", + true, + }, + { + "array", + `["a1"]`, + `{"value": null}`, + "", + true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + j1 := cltest.JSONFromString(t, test.original) + j2 := cltest.JSONFromString(t, test.input) + + merged, err := models.Merge(j1, j2) + if test.wantError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.JSONEq(t, test.want, merged.String()) + assert.JSONEq(t, test.original, j1.String()) + } + }) + } +} + +func TestJSON_MergeNull(t *testing.T) { + merged, err := models.Merge(models.JSON{}, models.JSON{}) + require.NoError(t, err) + assert.Equal(t, `{}`, merged.String()) +} + +func TestJSON_UnmarshalJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + json string + wantErrored bool + }{ + {"basic", `{"number": 100, "string": "100", "bool": true}`, false}, + {"invalid JSON", `{`, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var j models.JSON + err := json.Unmarshal([]byte(test.json), &j) + assert.Equal(t, test.wantErrored, (err != nil)) + }) + } +} + +func TestJSON_ParseJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + in string + want models.JSON + wantErrored bool + }{ + {"basic", `{"num": 100}`, cltest.JSONFromString(t, `{"num": 100}`), false}, + {"empty string", ``, models.JSON{}, false}, + {"invalid JSON", `{`, models.JSON{}, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + json, err := models.ParseJSON([]byte(test.in)) + assert.Equal(t, test.want, json) + assert.Equal(t, test.wantErrored, (err != nil)) + }) + } +} + +func TestWebURL_UnmarshalJSON_Error(t *testing.T) { + t.Parallel() + j := []byte(`"NotAUrl"`) + wurl := &models.WebURL{} + err := json.Unmarshal(j, wurl) + assert.Error(t, err) +} + +func TestWebURL_UnmarshalJSON(t *testing.T) { + t.Parallel() + j := []byte(`"http://www.duckduckgo.com"`) + wurl := &models.WebURL{} + err := json.Unmarshal(j, wurl) + assert.NoError(t, err) +} + +func TestWebURL_MarshalJSON(t *testing.T) { + t.Parallel() + + str := "http://www.duckduckgo.com" + parsed, err := url.ParseRequestURI(str) + assert.NoError(t, err) + wurl := models.WebURL(*parsed) + b, err := json.Marshal(wurl) + assert.NoError(t, err) + assert.Equal(t, `"`+str+`"`, string(b)) +} + +func TestWebURL_String_HasURL(t *testing.T) { + t.Parallel() + + u, _ := url.Parse("http://www.duckduckgo.com") + w := models.WebURL(*u) + + assert.Equal(t, "http://www.duckduckgo.com", w.String()) +} + +func TestWebURL_String_HasNilURL(t *testing.T) { + t.Parallel() + + w := models.WebURL{} + + assert.Equal(t, "", w.String()) +} + +func TestCron_UnmarshalJSON_Success(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + }{ + {"valid 5-field cron", `"CRON_TZ=UTC 0 0/5 * * *"`}, + {"valid 6-field cron", `"CRON_TZ=UTC 30 0 0/5 * * *"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var actual models.Cron + err := json.Unmarshal([]byte(test.input), &actual) + assert.NoError(t, err) + }) + } +} + +func TestCron_UnmarshalJSON_Invalid(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + wantError string + }{ + {"5-field cron without time zone", `"0 0/5 * * *"`, "Cron: specs must specify a time zone using CRON_TZ, e.g. 'CRON_TZ=UTC 5 * * * *'"}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var actual models.Cron + err := json.Unmarshal([]byte(test.input), &actual) + assert.EqualError(t, err, test.wantError) + }) + } +} + +func TestNewInterval(t *testing.T) { + t.Parallel() + + duration := 33 * time.Second + interval := models.NewInterval(duration) + + require.Equal(t, duration, interval.Duration()) +} + +func TestSha256Hash_MarshalJSON_UnmarshalJSON(t *testing.T) { + t.Parallel() + + hash := models.MustSha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + json, err := hash.MarshalJSON() + require.NoError(t, err) + require.NotEmpty(t, json) + + var newHash models.Sha256Hash + err = newHash.UnmarshalJSON(json) + require.NoError(t, err) + + require.Equal(t, hash, newHash) +} + +func TestSha256Hash_Sha256HashFromHex(t *testing.T) { + t.Parallel() + + _, err := models.Sha256HashFromHex("abczzz") + require.Error(t, err) + + _, err = models.Sha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + require.NoError(t, err) + + _, err = models.Sha256HashFromHex("f5bf259689b26f1374e6") + require.NoError(t, err) +} + +func TestSha256Hash_String(t *testing.T) { + t.Parallel() + + hash := models.MustSha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + assert.Equal(t, "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5", hash.String()) +} + +func TestSha256Hash_Scan_Value(t *testing.T) { + t.Parallel() + + hash := models.MustSha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + val, err := hash.Value() + require.NoError(t, err) + + var newHash models.Sha256Hash + err = newHash.Scan(val) + require.NoError(t, err) + + require.Equal(t, hash, newHash) +} + +func TestAddressCollection_Scan_Value(t *testing.T) { + t.Parallel() + + ac := models.AddressCollection{ + common.HexToAddress(strings.Repeat("AA", 20)), + common.HexToAddress(strings.Repeat("BB", 20)), + } + + val, err := ac.Value() + require.NoError(t, err) + + var acNew models.AddressCollection + err = acNew.Scan(val) + require.NoError(t, err) + + require.Equal(t, ac, acNew) +} + +func TestAddressCollection_ToStrings(t *testing.T) { + t.Parallel() + + hex1 := "0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa" + hex2 := "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB" + + ac := models.AddressCollection{ + common.HexToAddress(hex1), + common.HexToAddress(hex2), + } + + acStrings := ac.ToStrings() + require.Len(t, acStrings, 2) + require.Equal(t, hex1, acStrings[0]) + require.Equal(t, hex2, acStrings[1]) +} + +func TestInterval_IsZero(t *testing.T) { + t.Parallel() + + i := models.NewInterval(0) + require.NotNil(t, i) + require.True(t, i.IsZero()) + + i = models.NewInterval(1) + require.NotNil(t, i) + require.False(t, i.IsZero()) +} + +func TestInterval_Scan_Value(t *testing.T) { + t.Parallel() + + i := models.NewInterval(100) + require.NotNil(t, i) + + val, err := i.Value() + require.NoError(t, err) + + iNew := models.NewInterval(0) + err = iNew.Scan(val) + require.NoError(t, err) + + require.Equal(t, i, iNew) +} + +func TestInterval_MarshalText_UnmarshalText(t *testing.T) { + t.Parallel() + + i := models.NewInterval(100) + require.NotNil(t, i) + + txt, err := i.MarshalText() + require.NoError(t, err) + + iNew := models.NewInterval(0) + err = iNew.UnmarshalText(txt) + require.NoError(t, err) + + require.Equal(t, i, iNew) +} + +func TestWebURL_Scan_Value(t *testing.T) { + t.Parallel() + + u, err := url.Parse("https://chain.link") + require.NoError(t, err) + + w := models.WebURL(*u) + + val, err := w.Value() + require.NoError(t, err) + + var wNew models.WebURL + err = wNew.Scan(val) + require.NoError(t, err) + + require.Equal(t, w, wNew) +} + +func TestJSON_Scan_Value(t *testing.T) { + t.Parallel() + + js, err := models.ParseJSON([]byte(`{"foo":123}`)) + require.NoError(t, err) + + val, err := js.Value() + require.NoError(t, err) + + var jsNew models.JSON + err = jsNew.Scan(val) + require.NoError(t, err) + + require.Equal(t, js, jsNew) +} + +func TestJSON_Bytes(t *testing.T) { + t.Parallel() + + jsBytes := []byte(`{"foo":123}`) + + js, err := models.ParseJSON(jsBytes) + require.NoError(t, err) + + require.Equal(t, jsBytes, js.Bytes()) +} + +func TestJSON_MarshalJSON(t *testing.T) { + t.Parallel() + + jsBytes := []byte(`{"foo":123}`) + + js, err := models.ParseJSON(jsBytes) + require.NoError(t, err) + + bs, err := js.MarshalJSON() + require.NoError(t, err) + + require.Equal(t, jsBytes, bs) +} + +func TestJSON_UnmarshalTOML(t *testing.T) { + t.Parallel() + + jsBytes := []byte(`{"foo":123}`) + + var js models.JSON + err := js.UnmarshalTOML(jsBytes) + require.NoError(t, err) + require.Equal(t, jsBytes, js.Bytes()) + + err = js.UnmarshalTOML(string(jsBytes)) + require.NoError(t, err) + require.Equal(t, jsBytes, js.Bytes()) +} diff --git a/core/store/models/cosmos/common.go b/core/store/models/cosmos/common.go new file mode 100644 index 00000000..1bf896eb --- /dev/null +++ b/core/store/models/cosmos/common.go @@ -0,0 +1,13 @@ +package cosmos + +import sdk "github.com/cosmos/cosmos-sdk/types" + +// SendRequest represents a request to transfer Cosmos coins. +type SendRequest struct { + DestinationAddress sdk.AccAddress `json:"address"` + FromAddress sdk.AccAddress `json:"from"` + Amount sdk.Dec `json:"amount"` + CosmosChainID string `json:"cosmosChainID"` + Token string `json:"token"` + AllowHigherAmounts bool `json:"allowHigherAmounts"` +} diff --git a/core/store/models/errors.go b/core/store/models/errors.go new file mode 100644 index 00000000..6feddd96 --- /dev/null +++ b/core/store/models/errors.go @@ -0,0 +1,68 @@ +package models + +import ( + "errors" + "strings" +) + +// JSONAPIErrors holds errors conforming to the JSONAPI spec. +type JSONAPIErrors struct { + Errors []JSONAPIError `json:"errors"` +} + +// JSONAPIError is an individual JSONAPI Error. +type JSONAPIError struct { + Detail string `json:"detail"` +} + +// NewJSONAPIErrors creates an instance of JSONAPIErrors, with the intention +// of managing a collection of them. +func NewJSONAPIErrors() *JSONAPIErrors { + fe := JSONAPIErrors{ + Errors: []JSONAPIError{}, + } + return &fe +} + +// NewJSONAPIErrorsWith creates an instance of JSONAPIErrors populated with this +// single detail. +func NewJSONAPIErrorsWith(detail string) *JSONAPIErrors { + fe := NewJSONAPIErrors() + fe.Errors = append(fe.Errors, JSONAPIError{Detail: detail}) + return fe +} + +// Error collapses the collection of errors into a collection of comma separated +// strings. +func (jae *JSONAPIErrors) Error() string { + var messages []string + for _, e := range jae.Errors { + messages = append(messages, e.Detail) + } + return strings.Join(messages, ",") +} + +// Add adds a new error to JSONAPIErrors with the passed detail. +func (jae *JSONAPIErrors) Add(detail string) { + jae.Errors = append(jae.Errors, JSONAPIError{Detail: detail}) +} + +// Merge combines the arrays of the passed error if it is of type JSONAPIErrors, +// otherwise simply adds a single error with the error string as detail. +func (jae *JSONAPIErrors) Merge(e error) { + var jsonErr *JSONAPIErrors + if errors.As(e, &jsonErr) { + jae.Errors = append(jae.Errors, jsonErr.Errors...) + return + } + jae.Add(e.Error()) + +} + +// CoerceEmptyToNil will return nil if JSONAPIErrors has no errors. +func (jae *JSONAPIErrors) CoerceEmptyToNil() error { + if len(jae.Errors) == 0 { + return nil + } + return jae +} diff --git a/core/store/models/errors_test.go b/core/store/models/errors_test.go new file mode 100644 index 00000000..42be8997 --- /dev/null +++ b/core/store/models/errors_test.go @@ -0,0 +1,72 @@ +package models_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func TestNewJSONAPIErrors(t *testing.T) { + t.Parallel() + + res := models.NewJSONAPIErrors() + require.NotNil(t, res) + require.NotNil(t, res.Errors) + require.Len(t, res.Errors, 0) +} + +func TestNewJSONAPIErrorsWith(t *testing.T) { + t.Parallel() + + res := models.NewJSONAPIErrorsWith("foo") + require.NotNil(t, res) + require.NotNil(t, res.Errors) + require.Len(t, res.Errors, 1) + require.Equal(t, "foo", res.Errors[0].Detail) +} + +func TestJSONAPIErrors_Error(t *testing.T) { + t.Parallel() + + res := models.NewJSONAPIErrorsWith("foo") + require.NotNil(t, res) + require.Equal(t, "foo", res.Error()) + + res.Add("bar") + require.Equal(t, "foo,bar", res.Error()) +} + +func TestJSONAPIErrors_CoerceEmptyToNil(t *testing.T) { + t.Parallel() + + res := models.NewJSONAPIErrors() + require.NotNil(t, res) + + err := res.CoerceEmptyToNil() + require.NoError(t, err) + + res = models.NewJSONAPIErrorsWith("foo") + require.NotNil(t, res) + + err = res.CoerceEmptyToNil() + require.Equal(t, res, err) +} + +func TestJSONAPIErrors_Merge(t *testing.T) { + t.Parallel() + + res1 := models.NewJSONAPIErrorsWith("foo") + require.NotNil(t, res1) + + res2 := models.NewJSONAPIErrorsWith("bar") + require.NotNil(t, res2) + + res1.Merge(res2) + require.Equal(t, "foo,bar", res1.Error()) + + res1.Merge(errors.New("zet")) + require.Equal(t, "foo,bar,zet", res1.Error()) +} diff --git a/core/store/models/secrets.go b/core/store/models/secrets.go new file mode 100644 index 00000000..fa825d14 --- /dev/null +++ b/core/store/models/secrets.go @@ -0,0 +1,58 @@ +package models + +import ( + "encoding" + "fmt" + "net/url" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" +) + +const redacted = "xxxxx" + +var ( + _ fmt.Stringer = (*Secret)(nil) + _ encoding.TextMarshaler = (*Secret)(nil) +) + +// Secret is a string that formats and encodes redacted, as "xxxxx". +// +// Use Value to get the actual secret. +type Secret string + +func NewSecret(s string) *Secret { return (*Secret)(&s) } + +func (s Secret) String() string { return redacted } + +func (s Secret) GoString() string { return redacted } + +func (s Secret) MarshalText() ([]byte, error) { return []byte(redacted), nil } + +var ( + _ fmt.Stringer = (*SecretURL)(nil) + _ encoding.TextMarshaler = (*SecretURL)(nil) + _ encoding.TextUnmarshaler = (*SecretURL)(nil) +) + +// SecretURL is a URL that formats and encodes redacted, as "xxxxx". +type SecretURL commonconfig.URL + +func NewSecretURL(u *commonconfig.URL) *SecretURL { return (*SecretURL)(u) } + +func MustSecretURL(u string) *SecretURL { return NewSecretURL(commonconfig.MustParseURL(u)) } + +func (s *SecretURL) String() string { return redacted } + +func (s *SecretURL) GoString() string { return redacted } + +func (s *SecretURL) URL() *url.URL { return (*commonconfig.URL)(s).URL() } + +func (s *SecretURL) MarshalText() ([]byte, error) { return []byte(redacted), nil } + +func (s *SecretURL) UnmarshalText(text []byte) error { + if err := (*commonconfig.URL)(s).UnmarshalText(text); err != nil { + //opt: if errors.Is(url.Error), just redact the err.URL field? + return fmt.Errorf("failed to parse url: %s", redacted) + } + return nil +} diff --git a/core/store/models/secrets_test.go b/core/store/models/secrets_test.go new file mode 100644 index 00000000..421bd9c1 --- /dev/null +++ b/core/store/models/secrets_test.go @@ -0,0 +1,37 @@ +package models + +import ( + "encoding" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSecret(t *testing.T) { + type secret interface { + fmt.Stringer + encoding.TextMarshaler + } + for _, v := range []secret{ + Secret("secret"), + MustSecretURL("http://secret.url"), + } { + t.Run(fmt.Sprintf("%T", v), func(t *testing.T) { + assert.Equal(t, redacted, v.String()) + got, err := v.MarshalText() + if assert.NoError(t, err) { + assert.Equal(t, redacted, string(got)) + } + assert.Equal(t, redacted, fmt.Sprint(v)) + assert.Equal(t, redacted, fmt.Sprintf("%s", v)) //nolint:gosimple + assert.Equal(t, redacted, fmt.Sprintf("%v", v)) + assert.Equal(t, redacted, fmt.Sprintf("%#v", v)) + got, err = json.Marshal(v) + if assert.NoError(t, err) { + assert.Equal(t, fmt.Sprintf(`"%s"`, redacted), string(got)) + } + }) + } +} diff --git a/core/store/models/solana/common.go b/core/store/models/solana/common.go new file mode 100644 index 00000000..2ef03792 --- /dev/null +++ b/core/store/models/solana/common.go @@ -0,0 +1,11 @@ +package solana + +import "github.com/gagliardetto/solana-go" + +type SendRequest struct { + From solana.PublicKey `json:"from"` + To solana.PublicKey `json:"to"` + Amount uint64 `json:"amount"` + SolanaChainID string `json:"solanaChainID"` + AllowHigherAmounts bool `json:"allowHigherAmounts"` +} diff --git a/core/testdata/apiresponses/bravenewcoin.json b/core/testdata/apiresponses/bravenewcoin.json new file mode 100644 index 00000000..6d88febb --- /dev/null +++ b/core/testdata/apiresponses/bravenewcoin.json @@ -0,0 +1,30 @@ +{ + "data": { + "success": true, + "source": "BraveNewCoin", + "time_stamp": 1561642181, + "utc_date": "2019-06-27 13:29:41", + "coin_symbol": "ETH", + "coin_name": "Ethereum", + "market_symbol": "USD", + "market_name": "United States Dollar", + "column_names": [ + "timestamp", + "index", + "volume", + "index_usd", + "volume_usd" + ], + "data": [ + [ + "1561641900", + "306.52036004", + "1982770.5151919918", + "306.52036003822224", + "607759532.18982" + ] + ], + "result": 306.52036004 + }, + "statusCode": 200 +} diff --git a/core/testdata/apiresponses/coinmarketcap.error.json b/core/testdata/apiresponses/coinmarketcap.error.json new file mode 100644 index 00000000..80b4ca56 --- /dev/null +++ b/core/testdata/apiresponses/coinmarketcap.error.json @@ -0,0 +1,3 @@ +{ + "errorMessage": "RequestId: 593b595b-5777-435e-9a24-6e8d75b0a9c3 Process exited before completing request" +} diff --git a/core/testdata/apiresponses/coinmarketcap.json b/core/testdata/apiresponses/coinmarketcap.json new file mode 100644 index 00000000..8fa9cb6c --- /dev/null +++ b/core/testdata/apiresponses/coinmarketcap.json @@ -0,0 +1,43 @@ +{ + "data": { + "status": { + "timestamp": "2019-06-27T13:31:25.232Z", + "error_code": 0, + "error_message": null, + "elapsed": 10, + "credit_count": 1 + }, + "data": { + "ETH": { + "id": 1027, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "circulating_supply": 106663534.9366, + "total_supply": 106663534.9366, + "max_supply": null, + "date_added": "2015-08-07T00:00:00.000Z", + "num_market_pairs": 5408, + "tags": [ + "mineable" + ], + "platform": null, + "cmc_rank": 2, + "last_updated": "2019-06-27T13:30:22.000Z", + "quote": { + "USD": { + "price": 305.5574615, + "volume_24h": 15533097883.4532, + "percent_change_1h": -1.74066, + "percent_change_24h": -11.6077, + "percent_change_7d": 13.8838, + "market_cap": 32591838969.84406, + "last_updated": "2019-06-27T13:30:22.000Z" + } + } + } + }, + "result": 305.5574615 + }, + "statusCode": 200 +} diff --git a/core/testdata/apiresponses/confirmedEthTxData.json b/core/testdata/apiresponses/confirmedEthTxData.json new file mode 100644 index 00000000..0fb70df0 --- /dev/null +++ b/core/testdata/apiresponses/confirmedEthTxData.json @@ -0,0 +1,3 @@ +{ + "latestOutgoingTxHash": "0x1111111111111111111111111111111111111111111111111111111111111111" +} diff --git a/core/testdata/apiresponses/create_random_number_bridge_type.json b/core/testdata/apiresponses/create_random_number_bridge_type.json new file mode 100644 index 00000000..0c3692ee --- /dev/null +++ b/core/testdata/apiresponses/create_random_number_bridge_type.json @@ -0,0 +1,6 @@ +{ + "name": "randomNumber", + "url": "https://example.com/randomNumber", + "confirmations": 10, + "minimumContractPayment": "100" +} diff --git a/core/testdata/apiresponses/cryptocompare.json b/core/testdata/apiresponses/cryptocompare.json new file mode 100644 index 00000000..182da7b4 --- /dev/null +++ b/core/testdata/apiresponses/cryptocompare.json @@ -0,0 +1,7 @@ +{ + "data": { + "USD": 305.76, + "result": 305.76 + }, + "statusCode": 200 +} diff --git a/core/testdata/apiresponses/existing_core_adapter.json b/core/testdata/apiresponses/existing_core_adapter.json new file mode 100644 index 00000000..5586e263 --- /dev/null +++ b/core/testdata/apiresponses/existing_core_adapter.json @@ -0,0 +1,3 @@ +{ + "name": "ethtx" +} diff --git a/core/testdata/apiresponses/fulfilledReceiptResponse.json b/core/testdata/apiresponses/fulfilledReceiptResponse.json new file mode 100644 index 00000000..f08e87b1 --- /dev/null +++ b/core/testdata/apiresponses/fulfilledReceiptResponse.json @@ -0,0 +1,24 @@ +{ + "ethereumReceipts": [{ + "transactionHash": "0x1111111111111111111111111111111111111111111111111111111111111111", + "logs": [ + { + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "data": "0x", + "logIndex": "0x0", + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "transactionIndex": "0x0", + "topics": [ + "0x7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" + ] + } + ], + "status": "0x1", + "gasUsed": "0x1", + "cumulativeGasUsed": "0x1", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }] +} diff --git a/core/testdata/apiresponses/notFulfilledReceiptResponse.json b/core/testdata/apiresponses/notFulfilledReceiptResponse.json new file mode 100644 index 00000000..305c505c --- /dev/null +++ b/core/testdata/apiresponses/notFulfilledReceiptResponse.json @@ -0,0 +1,38 @@ +{ + "ethereumReceipts": [{ + "transactionHash": "0x1111111111111111111111111111111111111111111111111111111111111111", + "logs": [ + { + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "data": "0x", + "logIndex": "0x0", + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "transactionIndex": "0x0", + "topics": [ + "0x312c3030302c3030302e30300000000000000000000000000000000000000000", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" + ] + }, + { + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "data": "0x", + "logIndex": "0x1", + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "transactionIndex": "0x0", + "topics": [ + "0x0c2366233f634048c0f0458060d1228fab36d00f7c0ecf6bdf2d9c4585036311", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8", + "0x312c3030302c3030302e30300000000000000000000000000000000000000000" + ] + } + ], + "status": "0x1", + "gasUsed": "0x1", + "cumulativeGasUsed": "0x1", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }] +} diff --git a/core/testdata/cosmos/my_first_contract.wasm b/core/testdata/cosmos/my_first_contract.wasm new file mode 100644 index 00000000..e89b47b3 Binary files /dev/null and b/core/testdata/cosmos/my_first_contract.wasm differ diff --git a/core/testdata/jsonrpc/getTransactionReceipt.json b/core/testdata/jsonrpc/getTransactionReceipt.json new file mode 100644 index 00000000..45e6d0fb --- /dev/null +++ b/core/testdata/jsonrpc/getTransactionReceipt.json @@ -0,0 +1,16 @@ +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "transactionHash": "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", + "transactionIndex": "0x1", + "blockNumber": "0xb", + "blockHash": "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", + "cumulativeGasUsed": "0x33bc", + "gasUsed": "0x4dc", + "contractAddress": "0xb60e8dd61c5d32be8058bb8eb970870f07233155", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1" + } +} diff --git a/core/testdata/jsonrpc/getTransactionReceipt_notFound.json b/core/testdata/jsonrpc/getTransactionReceipt_notFound.json new file mode 100644 index 00000000..e1e4b818 --- /dev/null +++ b/core/testdata/jsonrpc/getTransactionReceipt_notFound.json @@ -0,0 +1,5 @@ +{ + "id": 1, + "jsonrpc": "2.0", + "result": null +} diff --git a/core/testdata/jsonrpc/new_round_log.json b/core/testdata/jsonrpc/new_round_log.json new file mode 100644 index 00000000..5f5e32de --- /dev/null +++ b/core/testdata/jsonrpc/new_round_log.json @@ -0,0 +1,22 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "logIndex": "0x0", + "transactionIndex": "0x0", + "transactionHash": "0x420de56323893bced814b83f16a94c8ef7f7b6f1e3920a11ec62733fcf82c730", + "blockHash": "0x5e3bd2cc97a68136cead922330e2ec27201420b3eff182875e388474079fcd9e", + "blockNumber": "0xa", + "address": "0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6", + "data": "0x000000000000000000000000000000000000000000000000000000000000000f", + "topics": [ + "0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271", + "0x0000000000000000000000000000000000000000000000000000000000000009", + "0x000000000000000000000000f17f52151ebef6c7334fad080c5704d77216b732" + ], + "type": "mined" + } + } +} diff --git a/core/testdata/jsonrpc/ocr2_round_requested_log_1_1.json b/core/testdata/jsonrpc/ocr2_round_requested_log_1_1.json new file mode 100644 index 00000000..2432e154 --- /dev/null +++ b/core/testdata/jsonrpc/ocr2_round_requested_log_1_1.json @@ -0,0 +1,24 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0x2aec6d67c79d333a07b4492aecb1672371f91e8b7f0e2121bdccdf51b30b68b6", + "blockNumber": "0x1676233", + "data": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", + "logIndex": "0x2", + "removed": false, + "topics": [ + "0x41e3990591fd372502daa15842da15bc7f41c75309ab3ff4f56f1848c178825c", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0x95b41d2a21d1a27844b6d1cd1ca25e6e2a0760b1e70a68c18aa564e5ca835915", + "transactionIndex": "0x3", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} + diff --git a/core/testdata/jsonrpc/ocr2_round_requested_log_1_9.json b/core/testdata/jsonrpc/ocr2_round_requested_log_1_9.json new file mode 100644 index 00000000..f19eeb82 --- /dev/null +++ b/core/testdata/jsonrpc/ocr2_round_requested_log_1_9.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0xee57202dad7045e0e7f3a1ec24cdb3cd56114c9798de69a1abfe2a2f52df1cec", + "blockNumber": "0x16862a6", + "data": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009", + "logIndex": "0x1", + "removed": false, + "topics": [ + "0x41e3990591fd372502daa15842da15bc7f41c75309ab3ff4f56f1848c178825c", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0xef3304d88eb373c6d118c10ba3e5dc53ee27270ea5b5bd00cb9da345a1a7d625", + "transactionIndex": "0x1", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} diff --git a/core/testdata/jsonrpc/ocr2_round_requested_log_2_1.json b/core/testdata/jsonrpc/ocr2_round_requested_log_2_1.json new file mode 100644 index 00000000..00b52e2e --- /dev/null +++ b/core/testdata/jsonrpc/ocr2_round_requested_log_2_1.json @@ -0,0 +1,24 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0xee57202dad7045e0e7f3a1ec24cdb3cd56114c9798de69a1abfe2a2f52df1cec", + "blockNumber": "0x16862a6", + "data": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001", + "logIndex": "0x1", + "removed": false, + "topics": [ + "0x41e3990591fd372502daa15842da15bc7f41c75309ab3ff4f56f1848c178825c", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0xef3304d88eb373c6d118c10ba3e5dc53ee27270ea5b5bd00cb9da345a1a7d625", + "transactionIndex": "0x2", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} + diff --git a/core/testdata/jsonrpc/request20200212paddedCBOR.json b/core/testdata/jsonrpc/request20200212paddedCBOR.json new file mode 100644 index 00000000..e87dde48 --- /dev/null +++ b/core/testdata/jsonrpc/request20200212paddedCBOR.json @@ -0,0 +1,24 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "logIndex": "0x3", + "transactionIndex": "0x0", + "transactionHash": "0x765fa34cc39cf63f36a89ca765e55d35dd1c52f83680c6b056ef8a4ee4ea9d19", + "blockHash": "0xf578c33c9f3645355a0a698903ea33d0a06ec23b36809c26119f8d15d403f625", + "blockNumber": "0x8", + "address": "0xfeb35e1f7abe4ef198b7c8df895e19767f3ab8a5", + "data": "0x000000000000000000000000650c346f84248abc27e716ea3c6de20f7fbbdb79e947f54ec4d3cab0588684217b029cd9421ea25c59f3309bef6e8fb0d75ff5310000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000650c346f84248abc27e716ea3c6de20f7fbbdb7992cdaaf300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005e1b7f6b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000005663676574783f68747470733a2f2f6d696e2d6170692e63727970746f636f6d706172652e636f6d2f646174612f70726963653f6673796d3d455448267473796d733d5553446470617468635553446574696d6573186400000000000000000000", + "removed": false, + "topics": [ + "0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65", + "0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16", + "0x000000000000000000000000650c346f84248abc27e716ea3c6de20f7fbbdb79", + "0x000000000000000000000000feb35e1f7abe4ef198b7c8df895e19767f3ab8a5" + ] + } + } +} + diff --git a/core/testdata/jsonrpc/requestLog0original.json b/core/testdata/jsonrpc/requestLog0original.json new file mode 100644 index 00000000..0c32ca0a --- /dev/null +++ b/core/testdata/jsonrpc/requestLog0original.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42", + "blockHash": "0xde3fb1df888c6c7f77f3a8e9c2582f87e7ad5277d98bd06cfd17cd2d7ea49f42", + "blockNumber": "0x5", + "data": "0x0000000000000000000000000000000000000000000000000000000000000017000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000034bf6375726c781a68747470733a2f2f657468657270726963652e636f6d2f61706964706174689f66726563656e7463757364ffff000000000000000000000000", + "logIndex": "0x0", + "removed": false, + "topics": [ + "0x6d6db1f8fe19d95b1d0fa6a4bce7bb24fbf84597b35a33ff95521fac453c1529", + "0x736f6d654a6f6249640000000000000000000000000000000000000000000000", + "0x000000000000000000000000d352677fcded6c358e03c73ea2a8a2832dffc0a4", + "0x0000000000000000000000000000000000000000000000000000000000000001" + ], + "transactionHash": "0xe05b171038320aca6634ce50de669bd0baa337130269c3ce3594ce4d45fc342a", + "transactionIndex": "0x0" + } + } +} diff --git a/core/testdata/jsonrpc/requestLog20190207withoutIndexes.json b/core/testdata/jsonrpc/requestLog20190207withoutIndexes.json new file mode 100644 index 00000000..d639c5c1 --- /dev/null +++ b/core/testdata/jsonrpc/requestLog20190207withoutIndexes.json @@ -0,0 +1,21 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "logIndex": "0x3", + "transactionIndex": "0x0", + "transactionHash": "0x04250548cd0b5d03b3bf1331aa83f32b35879440db31a6008d151260a5f3cc76", + "blockHash": "0x000c0d01ce8bd7100b73b1609ababc020e7f51dac75186bb799277c6b4b71e1c", + "blockNumber": "0x8", + "address": "0xf25186b5081ff5ce73482ad761db0eb0d25abfbf", + "data": "0x0000000000000000000000009fbda871d559710256a2502a2517b794b482db40c524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f80000000000000000000000000000000000000000000000000de0b6b3a76400010000000000000000000000009fbda871d559710256a2502a2517b794b482db40042f2b6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005c4a73380000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000586375726c784768747470733a2f2f6d696e2d6170692e63727970746f636f6d706172652e636f6d2f646174612f70726963653f6673796d3d455448267473796d733d5553442c4555522c4a505964706174689f63555344ff", + "removed": false, + "topics": [ + "0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65", + "0x000000000000000000000000000000004c7b7ffb66b344fbaa64995af81e355a" + ] + } + } +} diff --git a/core/testdata/jsonrpc/responseReceipt.json b/core/testdata/jsonrpc/responseReceipt.json new file mode 100644 index 00000000..ac779de2 --- /dev/null +++ b/core/testdata/jsonrpc/responseReceipt.json @@ -0,0 +1,48 @@ +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "transactionIndex": "0x0", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "from": "0xf17f52151ebef6c7334fad080c5704d77216b732", + "to": "0xf25186b5081ff5ce73482ad761db0eb0d25abfbf", + "gasUsed": "0x39522", + "cumulativeGasUsed": "0x39522", + "contractAddress": null, + "logs": [ + { + "logIndex": "0x0", + "transactionIndex": "0x0", + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "data": "0x", + "topics": [ + "0x7cc135e0cebb02c3480ae5d74d377283180a2601f8f644edf7987b009316c63a", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" + ], + "type": "mined" + }, + { + "logIndex": "0x1", + "transactionIndex": "0x0", + "transactionHash": "0x882969652440ccf14a5dbb9bd53eb21cb1e11e5c07736a935cba057e66b2b3bf", + "blockHash": "0x5198616554d738d9485d1a7cf53b2f33e09c3bbc8fe9ac0020bd672cd2bc15d2", + "blockNumber": "0x9", + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "data": "0x", + "topics": [ + "0x0c2366233f634048c0f0458060d1228fab36d00f7c0ecf6bdf2d9c4585036311", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8", + "0x312c3030302c3030302e30300000000000000000000000000000000000000000" + ], + "type": "mined" + } + ], + "status": "0x1", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000080800000020000000000100000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000004000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000004008000000000000000000000000000000000000000000000000000000000000000000000000000808000000000000400000000000000000000000000000000000000000001000000000000000000000000" + } +} diff --git a/core/testdata/jsonrpc/round_requested_log_1_1.json b/core/testdata/jsonrpc/round_requested_log_1_1.json new file mode 100644 index 00000000..07a45b4c --- /dev/null +++ b/core/testdata/jsonrpc/round_requested_log_1_1.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0x2aec6d67c79d333a07b4492aecb1672371f91e8b7f0e2121bdccdf51b30b68b6", + "blockNumber": "0x1676233", + "data": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", + "logIndex": "0x2", + "removed": false, + "topics": [ + "0x3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0x95b41d2a21d1a27844b6d1cd1ca25e6e2a0760b1e70a68c18aa564e5ca835915", + "transactionIndex": "0x3", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} diff --git a/core/testdata/jsonrpc/round_requested_log_1_9.json b/core/testdata/jsonrpc/round_requested_log_1_9.json new file mode 100644 index 00000000..f943f627 --- /dev/null +++ b/core/testdata/jsonrpc/round_requested_log_1_9.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0xee57202dad7045e0e7f3a1ec24cdb3cd56114c9798de69a1abfe2a2f52df1cec", + "blockNumber": "0x16862a6", + "data": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009", + "logIndex": "0x1", + "removed": false, + "topics": [ + "0x3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0xef3304d88eb373c6d118c10ba3e5dc53ee27270ea5b5bd00cb9da345a1a7d625", + "transactionIndex": "0x1", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} diff --git a/core/testdata/jsonrpc/round_requested_log_2_1.json b/core/testdata/jsonrpc/round_requested_log_2_1.json new file mode 100644 index 00000000..3a4f2158 --- /dev/null +++ b/core/testdata/jsonrpc/round_requested_log_2_1.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x03bd0d5d39629423979f8a0e53dbce78c1791ebf", + "blockHash": "0xee57202dad7045e0e7f3a1ec24cdb3cd56114c9798de69a1abfe2a2f52df1cec", + "blockNumber": "0x16862a6", + "data": "0xcccccccccccccccccccccccccccccccc0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001", + "logIndex": "0x1", + "removed": false, + "topics": [ + "0x3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037", + "0x0000000000000000000000007e42eadd458e6d7beeaf188a5b058bb6fa3ef7a1" + ], + "transactionHash": "0xef3304d88eb373c6d118c10ba3e5dc53ee27270ea5b5bd00cb9da345a1a7d625", + "transactionIndex": "0x2", + "transactionLogIndex": "0x0", + "type": "mined" + } + } +} diff --git a/core/testdata/jsonrpc/runlogReceipt.json b/core/testdata/jsonrpc/runlogReceipt.json new file mode 100644 index 00000000..b965e5ae --- /dev/null +++ b/core/testdata/jsonrpc/runlogReceipt.json @@ -0,0 +1,75 @@ +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "transactionHash": "0xe5f4977257d1fd01f943b9486a10633c42a65c2dd45edbe7af0ef26102e4568b", + "transactionIndex": "0x0", + "blockHash": "0x491ac4d840e4aace31cd76e55edfaf7b71ea3e0870c60cdbc887ade2f8c56494", + "blockNumber": "0x8", + "from": "0x627306090abab3a6e1400e9345bc60c78a8bef57", + "to": "0x9fbda871d559710256a2502a2517b794b482db40", + "gasUsed": "0x119324", + "cumulativeGasUsed": "0x119324", + "contractAddress": null, + "logs": [{ + "logIndex": "0x0", + "transactionIndex": "0x0", + "transactionHash": "0xe5f4977257d1fd01f943b9486a10633c42a65c2dd45edbe7af0ef26102e4568b", + "blockHash": "0x491ac4d840e4aace31cd76e55edfaf7b71ea3e0870c60cdbc887ade2f8c56494", + "blockNumber": "0x8", + "address": "0x9FBDa871d559710256a2502A2517b794B482Db40", + "data": "0x", + "topics": [ + "0xb5e6e01e79f91267dc17b4e6314d5d4d03593d2ceee0fbb452b750bd70ea5af9", + "0xc524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f8" + ], + "type": "mined" + }, + { + "logIndex": "0x1", + "transactionIndex": "0x0", + "transactionHash": "0xe5f4977257d1fd01f943b9486a10633c42a65c2dd45edbe7af0ef26102e4568b", + "blockHash": "0x491ac4d840e4aace31cd76e55edfaf7b71ea3e0870c60cdbc887ade2f8c56494", + "blockNumber": "0x8", + "address": "0x345cA3e014Aaf5dcA488057592ee47305D9B3e10", + "data": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000009fbda871d559710256a2502a2517b794b482db40", + "0x000000000000000000000000f25186b5081ff5ce73482ad761db0eb0d25abfbf" + ], + "type": "mined" + }, + { + "logIndex": "0x2", + "transactionIndex": "0x0", + "transactionHash": "0xe5f4977257d1fd01f943b9486a10633c42a65c2dd45edbe7af0ef26102e4568b", + "blockHash": "0x491ac4d840e4aace31cd76e55edfaf7b71ea3e0870c60cdbc887ade2f8c56494", + "blockNumber": "0x8", + "address": "0x345cA3e014Aaf5dcA488057592ee47305D9B3e10", + "data": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001844042994600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c7b7ffb66b344fbaa64995af81e355a0000000000000000000000009fbda871d559710256a2502a2517b794b482db40042f2b650000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000586375726c784768747470733a2f2f6d696e2d6170692e63727970746f636f6d706172652e636f6d2f646174612f70726963653f6673796d3d455448267473796d733d5553442c4555522c4a505964706174689f63555344ff000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16", + "0x0000000000000000000000009fbda871d559710256a2502a2517b794b482db40", + "0x000000000000000000000000f25186b5081ff5ce73482ad761db0eb0d25abfbf" + ], + "type": "mined" + }, + { + "logIndex": "0x3", + "transactionIndex": "0x0", + "transactionHash": "0xe5f4977257d1fd01f943b9486a10633c42a65c2dd45edbe7af0ef26102e4568b", + "blockHash": "0x491ac4d840e4aace31cd76e55edfaf7b71ea3e0870c60cdbc887ade2f8c56494", + "blockNumber": "0x8", + "address": "0xf25186B5081Ff5cE73482AD761DB0eB0d25abfBF", + "data": "0x0000000000000000000000009fbda871d559710256a2502a2517b794b482db40c524fafafcaec40652b1f84fca09c231185437d008d195fccf2f51e64b7062f80000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000009fbda871d559710256a2502a2517b794b482db40042f2b6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005cd0f2a70000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000586375726c784768747470733a2f2f6d696e2d6170692e63727970746f636f6d706172652e636f6d2f646174612f70726963653f6673796d3d455448267473796d733d5553442c4555522c4a505964706174689f63555344ff", + "topics": [ + "0xd8d7ecc4800d25fa53ce0372f13a416d98907a7ef3d8d3bdd79cf4fe75529c65", + "0x000000000000000000000000000000004c7b7ffb66b344fbaa64995af81e355a" + ], + "type": "mined" + }], + "logsBloom": "0x00000000000000000000000000000000010000000001000000000010020000008000000000000000000000000000000000000020000000000100000000000000000000000000000000800008000000040000000000010000000020000000000000000000000000000000000000000000000000000000080000008010000004000000000000000000000000000000000000000620000000000000000000000000000000000000000000000000000000000000002000400000000000000000001000000002000000000000200000000000010000000000804000000000400400000000000000000000000000000010000000000001000000000010000000000002", + "status": "0x1" + } +} diff --git a/core/testdata/jsonrpc/subscription_logs.json b/core/testdata/jsonrpc/subscription_logs.json new file mode 100644 index 00000000..6db5dc42 --- /dev/null +++ b/core/testdata/jsonrpc/subscription_logs.json @@ -0,0 +1,18 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "address": "0x8320fe7702b96808f7bbc0d4a888ed1468216cfd", + "blockHash": "0x61cdb2a09ab99abf791d474f20c2ea89bf8de2923a2d42bb49944c8c993cbf04", + "blockNumber": "0x29e87", + "data": "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000003", + "logIndex": "0x42", + "removed": false, + "topics": ["0xd78a0cb8bb633d06981248b816e7bd33c2a35a6089241d099fa519e361cab902"], + "transactionHash": "0xe044554a0a55067caafd07f8020ab9f2af60bdfe337e395ecd84b4877a3d1ab4", + "transactionIndex": "0x17" + } + } +} diff --git a/core/testdata/secrets/vrf_key.json b/core/testdata/secrets/vrf_key.json new file mode 100644 index 00000000..c5a90081 --- /dev/null +++ b/core/testdata/secrets/vrf_key.json @@ -0,0 +1,23 @@ +{ + "PublicKey":"0xe2c659dd73ded1663c0caf02304aac5ccd247047b3993d273a8920bba0402f4d01", + "vrf_key":{ + "address":"3612f05cfc042966fcdd82ec6e78bf128d91695a", + "crypto":{ + "cipher":"aes-128-ctr", + "ciphertext":"31165462357ae277da8176ce4baacab49741d70be6d6be0c7dfebfe00a10d8e3", + "cipherparams":{ + "iv":"11811bae89fef0cf469ae0f785461726" + }, + "kdf":"scrypt", + "kdfparams":{ + "dklen":32, + "n":262144, + "p":1, + "r":8, + "salt":"565f974e2e0f95cbb7f03155b30f4ac5dd30db6b40028e9c2f03df33dd56ec25" + }, + "mac":"1b6b24cbc4520fe67e8241d801851a1e934866e4bd8cf4390e4a3ba1df30a05b" + }, + "version":3 + } + } \ No newline at end of file diff --git a/core/testdata/secrets/vrf_password.txt b/core/testdata/secrets/vrf_password.txt new file mode 100644 index 00000000..c1767cc3 --- /dev/null +++ b/core/testdata/secrets/vrf_password.txt @@ -0,0 +1 @@ +testingpassword \ No newline at end of file diff --git a/core/testdata/testspecs/v2_specs.go b/core/testdata/testspecs/v2_specs.go new file mode 100644 index 00000000..f4429ff2 --- /dev/null +++ b/core/testdata/testspecs/v2_specs.go @@ -0,0 +1,865 @@ +package testspecs + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" +) + +var ( + CronSpecTemplate = ` +type = "cron" +schemaVersion = 1 +schedule = "CRON_TZ=UTC * 0 0 1 1 *" +externalJobID = "%s" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +` + CronSpecDotSepTemplate = ` +type = "cron" +schemaVersion = 1 +schedule = "CRON_TZ=UTC * 0 0 1 1 *" +externalJobID = "%s" +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data.price" separator="."]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +` + DirectRequestSpecNoExternalJobID = ` +type = "directrequest" +schemaVersion = 1 +name = "%s" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +evmChainID = "0" +observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; +""" +` + DirectRequestSpecTemplate = ` +type = "directrequest" +schemaVersion = 1 +name = "%s" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "%s" +evmChainID = "0" +observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; +""" +` + DirectRequestSpecWithRequestersAndMinContractPaymentTemplate = ` +type = "directrequest" +schemaVersion = 1 +requesters = ["0xaaaa1F8ee20f5565510B84f9353F1E333E753B7a", "0xbbbb70F0e81C6F3430dfdC9fa02fB22BdD818C4e"] +minContractPaymentLinkJuels = "1000000000000000000000" +name = "%s" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "%s" +evmChainID = 0 +observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; +""" +` + FluxMonitorSpecTemplate = ` +type = "fluxmonitor" +schemaVersion = 1 +name = "%s" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +externalJobID = "%s" +evmChainID = 0 +threshold = 0.5 +absoluteThreshold = 0.0 # optional + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "1m" +pollTimerDisabled = false + +observationSource = """ +// data source 1 +ds1 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds1_parse [type=jsonparse path="latest"]; + +// data source 2 +ds2 [type=http method=GET url="https://pricesource1.com" requestData="{\\"coin\\": \\"ETH\\", \\"market\\": \\"USD\\"}"]; +ds2_parse [type=jsonparse path="latest"]; + +ds1 -> ds1_parse -> answer1; +ds2 -> ds2_parse -> answer1; + +answer1 [type=median index=0]; +""" +` + + OCR2EVMSpecMinimalTemplate = `type = "offchainreporting2" +schemaVersion = 1 +name = "%s" +relay = "evm" +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +p2pv2Bootstrappers = [] +transmitterID = "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" +pluginType = "median" +observationSource = """ + ds [type=http method=GET url="https://chain.link/ETH-USD"]; + ds_parse [type=jsonparse path="data.price" separator="."]; + ds_multiply [type=multiply times=100]; + ds -> ds_parse -> ds_multiply; +""" +[relayConfig] +chainID = 0 +[pluginConfig] +` + WebhookSpecNoBodyTemplate = ` +type = "webhook" +schemaVersion = 1 +externalJobID = "%s" +observationSource = """ + fetch [type=bridge name="%s"] + parse_request [type=jsonparse path="data,result"]; + multiply [type=multiply times="100"]; + submit [type=bridge name="%s" includeInputAtKey="result"]; + + fetch -> parse_request -> multiply -> submit; +""" +` + + WebhookSpecWithBodyTemplate = ` +type = "webhook" +schemaVersion = 1 +externalJobID = "%s" +observationSource = """ + parse_request [type=jsonparse path="data,result" data="$(jobRun.requestBody)"]; + multiply [type=multiply times="100"]; + send_to_bridge [type=bridge name="%s" includeInputAtKey="result" ]; + + parse_request -> multiply -> send_to_bridge; +""" +` + + OCRBootstrapSpec = ` +type = "bootstrap" +name = "%s" +relay = "evm" +schemaVersion = 1 +contractID = "0x613a38AC1659769640aaE063C651F48E0250454C" +[relayConfig] +chainID = 1337 +` +) + +func GetOCRBootstrapSpec() string { + return fmt.Sprintf(OCRBootstrapSpec, uuid.New()) +} + +func GetDirectRequestSpec() string { + uuid := uuid.New() + return GetDirectRequestSpecWithUUID(uuid) +} + +func GetDirectRequestSpecWithUUID(u uuid.UUID) string { + return fmt.Sprintf(DirectRequestSpecTemplate, u, u) +} + +func GetOCR2EVMSpecMinimal() string { + return fmt.Sprintf(OCR2EVMSpecMinimalTemplate, uuid.New()) +} + +func GetWebhookSpecNoBody(u uuid.UUID, fetchBridge, submitBridge string) string { + return fmt.Sprintf(WebhookSpecNoBodyTemplate, u, fetchBridge, submitBridge) +} + +type KeeperSpecParams struct { + Name string + ContractAddress string + FromAddress string + EvmChainID int + ObservationSource string +} + +type KeeperSpec struct { + KeeperSpecParams + toml string +} + +func (os KeeperSpec) Toml() string { + return os.toml +} + +func GenerateKeeperSpec(params KeeperSpecParams) KeeperSpec { + template := ` +type = "keeper" +schemaVersion = 1 +name = "%s" +contractAddress = "%s" +fromAddress = "%s" +evmChainID = %d +externalJobID = "123e4567-e89b-12d3-a456-426655440002" +observationSource = """%s""" +` + escapedObvSource := strings.ReplaceAll(params.ObservationSource, `\`, `\\`) + return KeeperSpec{ + KeeperSpecParams: params, + toml: fmt.Sprintf(template, params.Name, params.ContractAddress, params.FromAddress, params.EvmChainID, escapedObvSource), + } +} + +type VRFSpecParams struct { + JobID string + Name string + CoordinatorAddress string + VRFVersion vrfcommon.Version + BatchCoordinatorAddress string + VRFOwnerAddress string + BatchFulfillmentEnabled bool + CustomRevertsPipelineEnabled bool + BatchFulfillmentGasMultiplier float64 + MinIncomingConfirmations int + FromAddresses []string + PublicKey string + ObservationSource string + EVMChainID string + RequestedConfsDelay int + RequestTimeout time.Duration + V2 bool + ChunkSize int + BackoffInitialDelay time.Duration + BackoffMaxDelay time.Duration + GasLanePrice *assets.Wei + PollPeriod time.Duration +} + +type VRFSpec struct { + VRFSpecParams + toml string +} + +func (vs VRFSpec) Toml() string { + return vs.toml +} + +func GenerateVRFSpec(params VRFSpecParams) VRFSpec { + jobID := "123e4567-e89b-12d3-a456-426655440000" + if params.JobID != "" { + jobID = params.JobID + } + name := "vrf-primary" + if params.Name != "" { + name = params.Name + } + vrfVersion := vrfcommon.V2 + if params.VRFVersion != "" { + vrfVersion = params.VRFVersion + } + coordinatorAddress := "0xABA5eDc1a551E55b1A570c0e1f1055e5BE11eca7" + if params.CoordinatorAddress != "" { + coordinatorAddress = params.CoordinatorAddress + } + batchCoordinatorAddress := "0x5C7B1d96CA3132576A84423f624C2c492f668Fea" + if params.BatchCoordinatorAddress != "" { + batchCoordinatorAddress = params.BatchCoordinatorAddress + } + vrfOwnerAddress := "0x5383C25DA15b1253463626243215495a3718beE4" + if params.VRFOwnerAddress != "" && vrfVersion == vrfcommon.V2 { + vrfOwnerAddress = params.VRFOwnerAddress + } + pollPeriod := 5 * time.Second + if params.PollPeriod > 0 && (vrfVersion == vrfcommon.V2 || vrfVersion == vrfcommon.V2Plus) { + pollPeriod = params.PollPeriod + } + batchFulfillmentGasMultiplier := 1.0 + if params.BatchFulfillmentGasMultiplier >= 1.0 { + batchFulfillmentGasMultiplier = params.BatchFulfillmentGasMultiplier + } + confirmations := 6 + if params.MinIncomingConfirmations != 0 { + confirmations = params.MinIncomingConfirmations + } + gasLanePrice := assets.GWei(100) + if params.GasLanePrice != nil { + gasLanePrice = params.GasLanePrice + } + requestTimeout := 24 * time.Hour + if params.RequestTimeout != 0 { + requestTimeout = params.RequestTimeout + } + publicKey := "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800" + if params.PublicKey != "" { + publicKey = params.PublicKey + } + chunkSize := 20 + if params.ChunkSize != 0 { + chunkSize = params.ChunkSize + } + observationSource := fmt.Sprintf(` +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="%s" + data="$(encode_tx)" + minConfirmations="0" + from="$(jobSpec.from)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}" + transmitChecker="{\\"CheckerType\\": \\"vrf_v1\\", \\"VRFCoordinatorAddress\\": \\"%s\\"}"] +decode_log->vrf->encode_tx->submit_tx +`, coordinatorAddress, coordinatorAddress) + if params.V2 { + observationSource = fmt.Sprintf(` +decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint64 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrfv2 + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="%s" + multiplier="1.1" + data="$(vrf.output)" +] +simulate [type=ethcall + to="%s" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="%s" + data="$(vrf.output)" +] +decode_log->vrf->estimate_gas->simulate +`, coordinatorAddress, coordinatorAddress, coordinatorAddress) + } + if vrfVersion == vrfcommon.V2Plus { + observationSource = fmt.Sprintf(` +decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint256 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,bytes extraArgs,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +generate_proof [type=vrfv2plus + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="%s" + multiplier="1.1" + data="$(generate_proof.output)" + block="latest" +] +simulate_fulfillment [type=ethcall + to="%s" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="%s" + data="$(generate_proof.output)" + block="latest" +] +decode_log->generate_proof->estimate_gas->simulate_fulfillment +`, coordinatorAddress, coordinatorAddress, coordinatorAddress) + } + if params.ObservationSource != "" { + observationSource = params.ObservationSource + } + if params.EVMChainID == "" { + params.EVMChainID = "0" + } + template := ` +externalJobID = "%s" +type = "vrf" +schemaVersion = 1 +name = "%s" +coordinatorAddress = "%s" +evmChainID = "%s" +batchCoordinatorAddress = "%s" +batchFulfillmentEnabled = %v +batchFulfillmentGasMultiplier = %s +customRevertsPipelineEnabled = %v +minIncomingConfirmations = %d +requestedConfsDelay = %d +requestTimeout = "%s" +publicKey = "%s" +chunkSize = %d +backoffInitialDelay = "%s" +backoffMaxDelay = "%s" +gasLanePrice = "%s" +pollPeriod = "%s" +observationSource = """ +%s +""" +` + toml := fmt.Sprintf(template, + jobID, name, coordinatorAddress, params.EVMChainID, batchCoordinatorAddress, + params.BatchFulfillmentEnabled, strconv.FormatFloat(batchFulfillmentGasMultiplier, 'f', 2, 64), + params.CustomRevertsPipelineEnabled, + confirmations, params.RequestedConfsDelay, requestTimeout.String(), publicKey, chunkSize, + params.BackoffInitialDelay.String(), params.BackoffMaxDelay.String(), gasLanePrice.String(), + pollPeriod.String(), observationSource) + if len(params.FromAddresses) != 0 { + var addresses []string + for _, address := range params.FromAddresses { + addresses = append(addresses, fmt.Sprintf("%q", address)) + } + toml = toml + "\n" + fmt.Sprintf(`fromAddresses = [%s]`, strings.Join(addresses, ", ")) + } + if vrfVersion == vrfcommon.V2 { + toml = toml + "\n" + fmt.Sprintf(`vrfOwnerAddress = "%s"`, vrfOwnerAddress) + } + + return VRFSpec{VRFSpecParams: VRFSpecParams{ + JobID: jobID, + Name: name, + CoordinatorAddress: coordinatorAddress, + BatchCoordinatorAddress: batchCoordinatorAddress, + BatchFulfillmentEnabled: params.BatchFulfillmentEnabled, + MinIncomingConfirmations: confirmations, + PublicKey: publicKey, + ObservationSource: observationSource, + EVMChainID: params.EVMChainID, + RequestedConfsDelay: params.RequestedConfsDelay, + RequestTimeout: requestTimeout, + ChunkSize: chunkSize, + BackoffInitialDelay: params.BackoffInitialDelay, + BackoffMaxDelay: params.BackoffMaxDelay, + VRFOwnerAddress: vrfOwnerAddress, + VRFVersion: vrfVersion, + PollPeriod: pollPeriod, + }, toml: toml} +} + +type OCRSpecParams struct { + JobID string + Name string + TransmitterAddress string + ContractAddress string + DS1BridgeName string + DS2BridgeName string + EVMChainID string +} + +type OCRSpec struct { + OCRSpecParams + toml string +} + +func (os OCRSpec) Toml() string { + return os.toml +} + +func GenerateOCRSpec(params OCRSpecParams) OCRSpec { + jobID := params.JobID + if jobID == "" { + jobID = uuid.New().String() + } + transmitterAddress := "0xF67D0290337bca0847005C7ffD1BC75BA9AAE6e4" + if params.TransmitterAddress != "" { + transmitterAddress = params.TransmitterAddress + } + contractAddress := "0x613a38AC1659769640aaE063C651F48E0250454C" + if params.ContractAddress != "" { + contractAddress = params.ContractAddress + } + name := params.Name + if params.Name == "" { + name = jobID + } + ds1BridgeName := fmt.Sprintf("automatically_generated_bridge_%s", uuid.New().String()) + if params.DS1BridgeName != "" { + ds1BridgeName = params.DS1BridgeName + } + ds2BridgeName := fmt.Sprintf("automatically_generated_bridge_%s", uuid.New().String()) + if params.DS2BridgeName != "" { + ds2BridgeName = params.DS2BridgeName + } + + evmChainID := "0" + if params.EVMChainID != "" { + evmChainID = params.EVMChainID + } + template := ` +type = "offchainreporting" +schemaVersion = 1 +name = "%s" +contractAddress = "%s" +evmChainID = %s +p2pPeerID = "12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" +externalJobID = "%s" +p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] +isBootstrapPeer = false +keyBundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" +monitoringEndpoint = "chain.link:4321" +transmitterAddress = "%s" +observationTimeout = "10s" +blockchainTimeout = "20s" +contractConfigTrackerSubscribeInterval = "2m" +contractConfigTrackerPollInterval = "1m" +contractConfigConfirmations = 3 +observationSource = """ + // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; + + // data source 2 + ds2 [type=http method=GET url="https://chain.link/voter_turnout/USA-2020" requestData="{\\"hi\\": \\"hello\\"}"]; + ds2_parse [type=jsonparse path="three,four"]; + ds2_multiply [type=multiply times=4.56]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; + answer2 [type=bridge name="%s" index=1]; +""" +` + return OCRSpec{OCRSpecParams: OCRSpecParams{ + JobID: jobID, + Name: name, + TransmitterAddress: transmitterAddress, + DS1BridgeName: ds1BridgeName, + DS2BridgeName: ds2BridgeName, + }, toml: fmt.Sprintf(template, name, contractAddress, evmChainID, jobID, transmitterAddress, ds1BridgeName, ds2BridgeName)} +} + +type WebhookSpecParams struct { + ExternalInitiators []webhook.TOMLWebhookSpecExternalInitiator +} + +type WebhookSpec struct { + WebhookSpecParams + toml string +} + +func (ws WebhookSpec) Toml() string { + return ws.toml +} + +func GenerateWebhookSpec(params WebhookSpecParams) (ws WebhookSpec) { + var externalInitiatorsTOMLs []string + for _, wsEI := range params.ExternalInitiators { + s := fmt.Sprintf(`{ name = "%s", spec = '%s' }`, wsEI.Name, wsEI.Spec) + externalInitiatorsTOMLs = append(externalInitiatorsTOMLs, s) + } + externalInitiatorsTOML := strings.Join(externalInitiatorsTOMLs, ",\n") + template := ` +type = "webhook" +schemaVersion = 1 +externalInitiators = [ + %s +] +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +` + ws.toml = fmt.Sprintf(template, externalInitiatorsTOML) + ws.WebhookSpecParams = params + + return ws +} + +// BlockhashStoreSpecParams defines params for building a blockhash store job spec. +type BlockhashStoreSpecParams struct { + JobID string + Name string + CoordinatorV1Address string + CoordinatorV2Address string + CoordinatorV2PlusAddress string + WaitBlocks int + HeartbeatPeriod time.Duration + LookbackBlocks int + BlockhashStoreAddress string + TrustedBlockhashStoreAddress string + TrustedBlockhashStoreBatchSize int32 + PollPeriod time.Duration + RunTimeout time.Duration + EVMChainID int64 + FromAddresses []string +} + +// BlockhashStoreSpec defines a blockhash store job spec. +type BlockhashStoreSpec struct { + BlockhashStoreSpecParams + toml string +} + +// Toml returns the BlockhashStoreSpec in TOML string form. +func (bhs BlockhashStoreSpec) Toml() string { + return bhs.toml +} + +// GenerateBlockhashStoreSpec creates a BlockhashStoreSpec from the given params. +func GenerateBlockhashStoreSpec(params BlockhashStoreSpecParams) BlockhashStoreSpec { + if params.JobID == "" { + params.JobID = "123e4567-e89b-12d3-a456-426655442222" + } + + if params.Name == "" { + params.Name = "blockhash-store" + } + + if params.CoordinatorV1Address == "" { + params.CoordinatorV1Address = "0x19D20b4Ec0424A530C3C1cDe874445E37747eb18" + } + + if params.CoordinatorV2Address == "" { + params.CoordinatorV2Address = "0x2498e651Ae17C2d98417C4826F0816Ac6366A95E" + } + + if params.CoordinatorV2PlusAddress == "" { + params.CoordinatorV2PlusAddress = "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b" + } + + if params.TrustedBlockhashStoreAddress == "" { + params.TrustedBlockhashStoreAddress = utils.ZeroAddress.Hex() + } + + if params.TrustedBlockhashStoreBatchSize == 0 { + params.TrustedBlockhashStoreBatchSize = 20 + } + + if params.WaitBlocks == 0 { + params.WaitBlocks = 100 + } + + if params.LookbackBlocks == 0 { + params.LookbackBlocks = 200 + } + + if params.BlockhashStoreAddress == "" { + params.BlockhashStoreAddress = "0x31Ca8bf590360B3198749f852D5c516c642846F6" + } + + if params.PollPeriod == 0 { + params.PollPeriod = 30 * time.Second + } + + if params.RunTimeout == 0 { + params.RunTimeout = 15 * time.Second + } + + var formattedFromAddresses string + if params.FromAddresses == nil { + formattedFromAddresses = `["0x4bd43cb108Bc3742e484f47E69EBfa378cb6278B"]` + } else { + var addresses []string + for _, address := range params.FromAddresses { + addresses = append(addresses, fmt.Sprintf("%q", address)) + } + formattedFromAddresses = fmt.Sprintf("[%s]", strings.Join(addresses, ", ")) + } + + template := ` +type = "blockhashstore" +schemaVersion = 1 +name = "%s" +coordinatorV1Address = "%s" +coordinatorV2Address = "%s" +coordinatorV2PlusAddress = "%s" +waitBlocks = %d +lookbackBlocks = %d +blockhashStoreAddress = "%s" +trustedBlockhashStoreAddress = "%s" +trustedBlockhashStoreBatchSize = %d +pollPeriod = "%s" +runTimeout = "%s" +evmChainID = "%d" +fromAddresses = %s +heartbeatPeriod = "%s" +` + toml := fmt.Sprintf(template, params.Name, params.CoordinatorV1Address, + params.CoordinatorV2Address, params.CoordinatorV2PlusAddress, params.WaitBlocks, params.LookbackBlocks, + params.BlockhashStoreAddress, params.TrustedBlockhashStoreAddress, params.TrustedBlockhashStoreBatchSize, params.PollPeriod.String(), params.RunTimeout.String(), + params.EVMChainID, formattedFromAddresses, params.HeartbeatPeriod.String()) + + return BlockhashStoreSpec{BlockhashStoreSpecParams: params, toml: toml} +} + +// BlockHeaderFeederSpecParams defines params for building a block header feeder job spec. +type BlockHeaderFeederSpecParams struct { + JobID string + Name string + CoordinatorV1Address string + CoordinatorV2Address string + CoordinatorV2PlusAddress string + WaitBlocks int + LookbackBlocks int + BlockhashStoreAddress string + BatchBlockhashStoreAddress string + PollPeriod time.Duration + RunTimeout time.Duration + EVMChainID int64 + FromAddresses []string + GetBlockhashesBatchSize uint16 + StoreBlockhashesBatchSize uint16 +} + +// BlockHeaderFeederSpec defines a block header feeder job spec. +type BlockHeaderFeederSpec struct { + BlockHeaderFeederSpecParams + toml string +} + +// Toml returns the BlockhashStoreSpec in TOML string form. +func (b BlockHeaderFeederSpec) Toml() string { + return b.toml +} + +// GenerateBlockHeaderFeederSpec creates a BlockHeaderFeederSpec from the given params. +func GenerateBlockHeaderFeederSpec(params BlockHeaderFeederSpecParams) BlockHeaderFeederSpec { + if params.JobID == "" { + params.JobID = "123e4567-e89b-12d3-a456-426655442211" + } + + if params.Name == "" { + params.Name = "blockheaderfeeder" + } + + if params.CoordinatorV1Address == "" { + params.CoordinatorV1Address = "0x2d7F888fE0dD469bd81A12f77e6291508f714d4B" + } + + if params.CoordinatorV2Address == "" { + params.CoordinatorV2Address = "0x2d7F888fE0dD469bd81A12f77e6291508f714d4B" + } + + if params.CoordinatorV2PlusAddress == "" { + params.CoordinatorV2PlusAddress = "0x2d7F888fE0dD469bd81A12f77e6291508f714d4B" + } + + if params.WaitBlocks == 0 { + params.WaitBlocks = 256 + } + + if params.LookbackBlocks == 0 { + params.LookbackBlocks = 500 + } + + if params.BlockhashStoreAddress == "" { + params.BlockhashStoreAddress = "0x016D54091ee83D42aF46e4F2d7177D0A232D2bDa" + } + + if params.BatchBlockhashStoreAddress == "" { + params.BatchBlockhashStoreAddress = "0xde08B57586839BfF5DB58Bdd7FdeB7142Bff3795" + } + + if params.PollPeriod == 0 { + params.PollPeriod = 60 * time.Second + } + + if params.RunTimeout == 0 { + params.RunTimeout = 30 * time.Second + } + + if params.GetBlockhashesBatchSize == 0 { + params.GetBlockhashesBatchSize = 10 + } + + if params.StoreBlockhashesBatchSize == 0 { + params.StoreBlockhashesBatchSize = 5 + } + + var formattedFromAddresses string + if params.FromAddresses == nil { + formattedFromAddresses = `["0xBe0b739f841bC113D4F4e4CdD16086ffAbB5f39f"]` + } else { + var addresses []string + for _, address := range params.FromAddresses { + addresses = append(addresses, fmt.Sprintf("%q", address)) + } + formattedFromAddresses = fmt.Sprintf("[%s]", strings.Join(addresses, ", ")) + } + + template := ` +type = "blockheaderfeeder" +schemaVersion = 1 +name = "%s" +coordinatorV1Address = "%s" +coordinatorV2Address = "%s" +coordinatorV2PlusAddress = "%s" +waitBlocks = %d +lookbackBlocks = %d +blockhashStoreAddress = "%s" +batchBlockhashStoreAddress = "%s" +pollPeriod = "%s" +runTimeout = "%s" +evmChainID = "%d" +fromAddresses = %s +getBlockhashesBatchSize = %d +storeBlockhashesBatchSize = %d +` + toml := fmt.Sprintf(template, params.Name, params.CoordinatorV1Address, + params.CoordinatorV2Address, params.CoordinatorV2PlusAddress, params.WaitBlocks, params.LookbackBlocks, + params.BlockhashStoreAddress, params.BatchBlockhashStoreAddress, params.PollPeriod.String(), + params.RunTimeout.String(), params.EVMChainID, formattedFromAddresses, params.GetBlockhashesBatchSize, + params.StoreBlockhashesBatchSize) + + return BlockHeaderFeederSpec{BlockHeaderFeederSpecParams: params, toml: toml} +} + +type StreamSpecParams struct { + Name string + StreamID uint64 +} + +type StreamSpec struct { + StreamSpecParams + toml string +} + +// Toml returns the BlockhashStoreSpec in TOML string form. +func (b StreamSpec) Toml() string { + return b.toml +} + +func GenerateStreamSpec(params StreamSpecParams) StreamSpec { + template := ` +type = "stream" +schemaVersion = 1 +name = "%s" +streamID = %d +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +` + + toml := fmt.Sprintf(template, params.Name, params.StreamID) + return StreamSpec{StreamSpecParams: params, toml: toml} +} diff --git a/core/utils/Mailboxes.md b/core/utils/Mailboxes.md new file mode 100644 index 00000000..a6c6615a --- /dev/null +++ b/core/utils/Mailboxes.md @@ -0,0 +1,192 @@ +# Mailboxes + +```mermaid +flowchart + subgraph Legend + style Legend fill:none + subgraph mailboxes [Types of Mailboxes] + style mailboxes fill:none + single>single] + custom{{"custom ('capacity')"}} + high[["high capacity (100,000)"]] + end + subgraph package + style package fill:none,stroke-dasharray: 6 + subgraph type + direction LR + from>from] -- "Retrieve()" --> method(["method()"]) -- "Deliver()" --> to[[to]] + end + end + end +``` + +```mermaid +flowchart TB + subgraph core/chains/evm + subgraph gas + subgraph BlockHistoryEstimator [BlockHistoryEstimator] + direction TB + BlockHistoryEstimator-mb>mb] + BlockHistoryEstimator-OnNewLongestChain(["OnNewLongestChain()"]) -- "Deliver()" --> BlockHistoryEstimator-mb + BlockHistoryEstimator-runLoop["runLoop()"] + BlockHistoryEstimator-mb -- "Notify()" --> BlockHistoryEstimator-runLoop + BlockHistoryEstimator-mb -- "Retrieve()" --> BlockHistoryEstimator-runLoop + end + end + subgraph headtracker-pkg [headtracker] + subgraph headBroadcaster + headBroadcaster-mailbox>mailbox] + headBroadcaster-BroadcastNewLongestChain(["BroadcastNewLongestChain()"]) -- "Deliver()" --> headBroadcaster-mailbox + headBroadcaster-mailbox -- "Notify()" --> headBroadcaster-run(["run()"]) + headBroadcaster-run --> headBroadcaster-executeCallbacks(["executeCallbacks()"]) + headBroadcaster-executeCallbacks -- "Retrieve()" ---> headBroadcaster-mailbox + end + subgraph HeadTrackable + trackable-OnNewLongestChain(["OnNewLongestChain()"]) + end + headBroadcaster-executeCallbacks --> HeadTrackable + subgraph headtracker + direction TB + headtracker-backfillMB>backfillMB] + headtracker-broadcastMB{{"broadcastMB (10)"}} + headtracker-handleNewHead(["handleNewHead()"]) -- "Deliver()" --> headtracker-backfillMB + headtracker-handleNewHead(["handleNewHead()"]) -- "Deliver()" --> headtracker-broadcastMB + headtracker-backfillLoop(["backfillLoop()"]) + headtracker-backfillMB -- "Notify()" --> headtracker-backfillLoop -- "Retrieve()" --> headtracker-backfillMB + headtracker-broadcastLoop(["broadcastLoop()"]) + headtracker-broadcastMB -- "Notify()" --> headtracker-broadcastLoop + headtracker-broadcastLoop -- "Retrieve()" ---> headtracker-broadcastMB + headtracker-broadcastLoop -- "RetrieveLatestAndClear()" --> headtracker-broadcastMB + end + headtracker-broadcastLoop --> headBroadcaster-BroadcastNewLongestChain + end + subgraph txmgr + direction TB + subgraph EthConfirmer + EthConfirmer-mb>mb] + EthConfirmer-mb -- "Notify()" --> EthConfirmer-runLoop(["runLoop"]) -- "Retrieve" --> EthConfirmer-mb + end + subgraph Txm [Txm] + Txm-OnNewLongestChain(["OnNewLongestChain"]) -- chHeads --> Txm-runLoop(["runLoop()"]) + Txm-runLoop -- "Deliver()" --> EthConfirmer-mb + end + end + subgraph log [log] + subgraph broadcaster [broadcaster] + subgraph boradcaster-subs [" "] + broadcaster-Register(["Register()"]) -- "Deliver()" --> broadcaster-changeSubscriberStatus[[changeSubscriberStatus]] + broadcaster-onChangeSubscriberStatus(["onChangeSubscriberStatus()"]) -- "Retrieve()" --> broadcaster-changeSubscriberStatus + end + broadcaster-eventLoop(["eventLoop()"]) + subgraph broadcaster-heads [" "] + broadcaster-OnNewLongestChain(["OnNewLongestChain()"]) -- "Deliver()" --> broadcaster-newHeads>newHeads] + broadcaster-onNewHeads(["onNewHeads()"]) -- "RetrieveLatestAndClear()" --> broadcaster-newHeads + end + broadcaster-changeSubscriberStatus -- "Notify()" --> broadcaster-eventLoop + broadcaster-newHeads -- "Notify()" --> broadcaster-eventLoop + broadcaster-eventLoop --> broadcaster-onChangeSubscriberStatus + broadcaster-eventLoop --> broadcaster-onNewHeads + end + broadcaster-onNewHeads(["onNewHeads()"]) ---> registrations-sendLogs(["sendLogs()"]) --> handler-sendLog(["sendLog()"]) + subgraph Listener [Listener] + listener-HandleLog(["HandleLog()"]) + end + handler-sendLog --> Listener + end + end + + subgraph services + subgraph directrequest [directrequest] + subgraph listener [listener] + direction TB + dr-mbOracleRequests[[mbOracleRequests]] + dr-mbOracleCancelRequests[[mbOracleCancelRequests]] + dr-HandleLog(["HandleLog()"]) + dr-HandleLog -- "Deliver()" --> dr-mbOracleRequests + dr-HandleLog -- "Deliver()" --> dr-mbOracleCancelRequests + dr-mbOracleRequests -- "Notify()" --> dr-processOracleRequests(["processOracleRequests()"]) + dr-mbOracleCancelRequests -- "Notify()" --> dr-processCancelOracleRequests(["processCancelOracleRequests()"]) + dr-handleReceivedLogs(["handleReceivedLogs()"]) + dr-processOracleRequests --> dr-handleReceivedLogs -- "Retrieve()" ---> dr-mbOracleRequests + dr-processCancelOracleRequests --> dr-handleReceivedLogs -- "Retrieve()" ---> dr-mbOracleCancelRequests + end + end + subgraph functions [functions] + subgraph FunctionsListener [FunctionsListener] + direction TB + functions-mbOracleEvents[[mbOracleEvents]] + functions-HandleLog(["HandleLog()"]) -- "Deliver()" --> functions-mbOracleEvents + functions-mbOracleEvents -- "Notify()" --> functions-processOracleEvents(["processOracleEvents"]) + functions-processOracleEvents -- "Retrieve()" --> functions-mbOracleEvents + end + end + subgraph keeper [keeper] + subgraph UpkeepExecuter [UpkeepExecuter] + direction TB + UpkeepExecuter-mailbox>mailbox] + UpkeepExecuter-Start(["Start()"]) -- "Deliver()" --> UpkeepExecuter-mailbox + UpkeepExecuter-OnNewLongestChain(["OnNewLongestChain()"]) -- "Deliver()" --> UpkeepExecuter-mailbox + UpkeepExecuter-mailbox -- "Notify()" --> UpkeepExecuter-run(["run()"]) + UpkeepExecuter-run --> UpkeepExecuter-processActiveUpkeeps(["processActiveUpkeeps()"]) -- "Retrieve()" ---> UpkeepExecuter-mailbox + end + subgraph RegistrySynchronizer [RegistrySynchronizer] + direction TB + RegistrySynchronizer-mbLogs{{"mbLogs (5000)"}} + RegistrySynchronizer-HandleLog(["HandleLog()"]) -- "Deliver()" --> RegistrySynchronizer-mbLogs + RegistrySynchronizer-mbLogs -- "Notify()" --> RegistrySynchronizer-run(["run()"]) + RegistrySynchronizer-run --> RegistrySynchronizer-processLogs(["processLogs()"]) -- "RetrieveAll()" ---> RegistrySynchronizer-mbLogs + end + end + subgraph ocr [ocr] + subgraph OCRContractTracker [OCRContractTracker] + direction TB + OCRContractTracker-configsMB{{"configsMB (100)"}} + OCRContractTracker-HandleLog(["HandleLog()"]) -- "Deliver()" --> OCRContractTracker-configsMB + OCRContractTracker-configsMB -- "Notify()" --> OCRContractTracker-processLogs(["processLogs()"]) + OCRContractTracker-processLogs -- "Retrieve()" --> OCRContractTracker-configsMB + end + end + subgraph promReporter [promReporter] + subgraph promreporter-type [promReporter] + direction TB + promReporter-newHeads>newHeads] + promReporter-OnNewLongestChain(["OnNewLongestChain()"]) -- "Deliver()" --> promReporter-newHeads + promReporter-newHeads -- "Notify()" --> promReporter-eventLoop(["eventLoop()"]) + promReporter-eventLoop -- "Retrieve()" --> promReporter-newHeads + end + end + subgraph vrf [vrf] + subgraph listenerV1 [listenerV1] + direction TB + vrfv1-reqLogs[[reqLogs]] + vrfv1-HandleLog(["HandleLog()"]) -- "Deliver()" --> vrfv1-reqLogs + vrfv1-reqLogs -- "Notify()" --> vrfv1-runLogListener(["runLogListener()"]) + vrfv1-runLogListener -- "Retrieve()" --> vrfv1-reqLogs + end + subgraph listenerV2 [listenerV2] + direction TB + vrfv2-reqLogs[[reqLogs]] + vrfv2-HandleLog(["HandleLog()"]) -- "Deliver()" --> vrfv2-reqLogs + vrfv2-reqLogs -- "Notify()" --> vrfv2-runLogListener(["runLogListener()"]) + vrfv2-runLogListener -- "Retrieve()" --> vrfv2-reqLogs + end + end + end + + HeadTrackable --> BlockHistoryEstimator + HeadTrackable --> broadcaster + HeadTrackable ---> Txm + HeadTrackable ---> UpkeepExecuter + HeadTrackable ---> promreporter-type + + Listener --> listener + Listener --> FunctionsListener + Listener --> RegistrySynchronizer + Listener --> OCRContractTracker + Listener --> listenerV1 + Listener --> listenerV2 + + + classDef package fill:none,stroke-dasharray: 10 + class core/chains/evm,gas,headtracker-pkg,txmgr,log,services,directrequest,functions,keeper,ocr,promReporter,vrf package +``` \ No newline at end of file diff --git a/core/utils/README.md b/core/utils/README.md new file mode 100644 index 00000000..1330d9a0 --- /dev/null +++ b/core/utils/README.md @@ -0,0 +1,17 @@ +# `package utils` + +## `StartStopOnce` + +```mermaid +stateDiagram-v2 + [*] --> Unstarted + Unstarted --> Starting : StartOnce() + Starting --> StartFailed + Starting --> Started + Started --> Stopping : StopOnce() + Stopping --> Stopped + Stopping --> StopFailed + StartFailed --> [*] + Stopped --> [*] + StopFailed --> [*] +``` \ No newline at end of file diff --git a/core/utils/backoff_ticker.go b/core/utils/backoff_ticker.go new file mode 100644 index 00000000..4a25f856 --- /dev/null +++ b/core/utils/backoff_ticker.go @@ -0,0 +1,103 @@ +package utils + +import ( + "sync" + "time" + + "github.com/jpillora/backoff" +) + +type timerFactory func(d time.Duration) *time.Timer + +func newBackoffTicker(tf timerFactory, min, max time.Duration) BackoffTicker { + c := make(chan time.Time, 1) + return BackoffTicker{ + createTimer: tf, + b: backoff.Backoff{ + Min: min, + Max: max, + }, + C: c, + chStop: make(chan struct{}), + } +} + +// BackoffTicker sends ticks with periods that increase over time, over a configured range. +type BackoffTicker struct { + createTimer timerFactory + b backoff.Backoff + timer *time.Timer + C chan time.Time + chStop chan struct{} + isRunning bool + sync.Mutex +} + +// NewBackoffTicker returns a new BackoffTicker for the given range. +func NewBackoffTicker(min, max time.Duration) BackoffTicker { + return newBackoffTicker(time.NewTimer, min, max) +} + +// Start - Starts the ticker +// Returns true if the ticker was not running yet +func (t *BackoffTicker) Start() bool { + t.Lock() + defer t.Unlock() + + if t.isRunning { + return false + } + + // Reset the backoff + t.b.Reset() + go t.run() + t.isRunning = true + return true +} + +// Stop stops the ticker. A ticker can be restarted by calling Start on a +// stopped ticker. +// Returns true if the ticker was actually stopped at this invocation (was previously running) +func (t *BackoffTicker) Stop() bool { + t.Lock() + defer t.Unlock() + + if !t.isRunning { + return false + } + + t.chStop <- struct{}{} + t.timer = nil + t.isRunning = false + return true +} + +func (t *BackoffTicker) run() { + d := t.b.Duration() + + for { + // Set up initial tick + if t.timer == nil { + t.timer = t.createTimer(d) + } + + select { + case tickTime := <-t.timer.C: + t.C <- tickTime + t.timer.Reset(t.b.Duration()) + + continue + case <-t.chStop: + return + } + } +} + +// Ticks returns the underlying channel. +func (t *BackoffTicker) Ticks() <-chan time.Time { + return t.C +} + +func (t *BackoffTicker) Bounds() (time.Duration, time.Duration) { + return t.b.Min, t.b.Max +} diff --git a/core/utils/backoff_ticker_test.go b/core/utils/backoff_ticker_test.go new file mode 100644 index 00000000..ee7c023a --- /dev/null +++ b/core/utils/backoff_ticker_test.go @@ -0,0 +1,154 @@ +package utils + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBackoffTicker_Bounds(t *testing.T) { + t.Parallel() + + bt := NewBackoffTicker(1*time.Millisecond, 2*time.Second) + min, max := bt.Bounds() + assert.Equal(t, min, 1*time.Millisecond) + assert.Equal(t, max, 2*time.Second) +} + +func TestBackoffTicker_StartTwice(t *testing.T) { + t.Parallel() + + bt := NewBackoffTicker(1*time.Second, 10*time.Second) + defer bt.Stop() + + ok := bt.Start() + assert.True(t, ok) + + ok = bt.Start() + assert.False(t, ok) +} + +func TestBackoffTicker_StopTwice(t *testing.T) { + t.Parallel() + + bt := NewBackoffTicker(1*time.Second, 10*time.Second) + ok := bt.Start() + assert.True(t, ok) + + ok = bt.Stop() + assert.True(t, ok) + + ok = bt.Stop() + assert.False(t, ok) +} + +func TestBackoffTicker_NoTicksAfterStop(t *testing.T) { + t.Parallel() + + min := 100 * time.Millisecond + max := 5 * time.Second + + chTime := make(chan time.Time, 1) + defer close(chTime) + + newFakeTimer := func(d time.Duration) *time.Timer { + assert.Equal(t, min, d) + realTimer := time.NewTimer(max) + realTimer.C = chTime + return realTimer + } + + bt := newBackoffTicker(newFakeTimer, min, max) + + ok := bt.Start() + assert.True(t, ok) + + ok = bt.Stop() + assert.True(t, ok) + + chTime <- time.Now() + + select { + case <-time.After(2 * min): + case <-bt.Ticks(): + assert.FailNow(t, "received a tick after Stop()") + } +} + +func TestBackoffTicker_Ticks(t *testing.T) { + t.Parallel() + + min := 100 * time.Millisecond + max := 5 * time.Second + + chTime := make(chan time.Time) + defer close(chTime) + + newFakeTimer := func(d time.Duration) *time.Timer { + assert.Equal(t, min, d) + realTimer := time.NewTimer(max) + realTimer.C = chTime + return realTimer + } + + bt := newBackoffTicker(newFakeTimer, min, max) + + ok := bt.Start() + assert.True(t, ok) + defer bt.Stop() + + t1 := time.Now() + t2 := t1.Add(1 * time.Second) + t3 := t2.Add(1 * time.Second) + times := []time.Time{t1, t2, t3} + + go func() { + for _, tm := range times { + chTime <- tm + } + }() + + for _, tm := range times { + tick := <-bt.Ticks() + assert.Equal(t, tm, tick) + } + + select { + case <-time.After(2 * min): + case <-bt.Ticks(): + assert.FailNow(t, "received an unexpected tick") + } +} + +func TestBackoffTicker_Restart(t *testing.T) { + t.Parallel() + + min := 1 * time.Second + max := 10 * time.Second + + var newTimerCount atomic.Int32 + + newFakeTimer := func(d time.Duration) *time.Timer { + newTimerCount.Add(1) + assert.Equal(t, min, d) + return time.NewTimer(max) + } + + bt := newBackoffTicker(newFakeTimer, min, max) + + ok := bt.Start() + assert.True(t, ok) + + ok = bt.Stop() + assert.True(t, ok) + + ok = bt.Start() + assert.True(t, ok) + defer bt.Stop() + + assert.Eventually(t, func() bool { + return newTimerCount.Load() == 2 + }, min, min/100, "expected timer factory to be triggered twice") +} diff --git a/core/utils/big_math/big_math.go b/core/utils/big_math/big_math.go new file mode 100644 index 00000000..a82621b9 --- /dev/null +++ b/core/utils/big_math/big_math.go @@ -0,0 +1,69 @@ +// Package bigmath compensates for awkward big.Int API. Can cause an extra allocation or two. +package bigmath + +import ( + "math/big" +) + +// I returns a new big.Int. +func I() *big.Int { return new(big.Int) } + +// Add performs addition with the given values. +func Add(addend1, addend2 *big.Int) *big.Int { return I().Add(addend1, addend2) } + +// Div performs division with the given values. +func Div(dividend, divisor *big.Int) *big.Int { return I().Div(dividend, divisor) } + +// Equal compares the given values. +func Equal(left, right *big.Int) bool { return left.Cmp(right) == 0 } + +// Exp performs modular eponentiation with the given values. +func Exp(base, exponent, modulus *big.Int) *big.Int { + return I().Exp(base, exponent, modulus) +} + +// Mul performs multiplication with the given values. +func Mul(multiplicand, multiplier *big.Int) *big.Int { + return I().Mul(multiplicand, multiplier) +} + +// Mod performs modulus with the given values. +func Mod(dividend, divisor *big.Int) *big.Int { return I().Mod(dividend, divisor) } + +// Sub performs subtraction with the given values. +func Sub(minuend, subtrahend *big.Int) *big.Int { return I().Sub(minuend, subtrahend) } + +// Max returns the maximum of the two given values. +func Max(x, y *big.Int) *big.Int { + if x.Cmp(y) == 1 { + return x + } + return y +} + +// Min returns the min of the two given values. +func Min(x, y *big.Int) *big.Int { + if x.Cmp(y) == -1 { + return x + } + return y +} + +// Accumulate returns the sum of the given slice. +func Accumulate(s []*big.Int) (r *big.Int) { + r = big.NewInt(0) + for _, e := range s { + r.Add(r, e) + } + return +} + +// nolint +var ( + Zero = big.NewInt(0) + One = big.NewInt(1) + Two = big.NewInt(2) + Three = big.NewInt(3) + Four = big.NewInt(4) + Seven = big.NewInt(7) +) diff --git a/core/utils/big_math/big_math_test.go b/core/utils/big_math/big_math_test.go new file mode 100644 index 00000000..40cbd7bc --- /dev/null +++ b/core/utils/big_math/big_math_test.go @@ -0,0 +1,33 @@ +package bigmath + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMax(t *testing.T) { + m := Max(big.NewInt(1), big.NewInt(2)) + require.Equal(t, 0, big.NewInt(2).Cmp(m)) +} + +func TestMin(t *testing.T) { + m := Min(big.NewInt(1), big.NewInt(2)) + require.Equal(t, 0, big.NewInt(1).Cmp(m)) +} + +func TestAccumulate(t *testing.T) { + s := []*big.Int{ + big.NewInt(1), + big.NewInt(2), + big.NewInt(3), + big.NewInt(4), + big.NewInt(5), + } + expected := big.NewInt(15) + require.Equal(t, expected, Accumulate(s)) + s = []*big.Int{} + expected = big.NewInt(0) + require.Equal(t, expected, Accumulate(s)) +} diff --git a/core/utils/collection.go b/core/utils/collection.go new file mode 100644 index 00000000..3ab7a806 --- /dev/null +++ b/core/utils/collection.go @@ -0,0 +1,21 @@ +package utils + +import ( + "fmt" +) + +// BatchSplit splits an slices into an slices of slicess with a maximum length +func BatchSplit[T any](list []T, max int) (out [][]T, err error) { + if max == 0 { + return out, fmt.Errorf("max batch length cannot be 0") + } + + // batch list into no more than max each + for len(list) > max { + // assign to list: remaining after taking slice from beginning + // append to out: max length slice from beginning of list + list, out = list[max:], append(out, list[:max]) + } + out = append(out, list) // append remaining to list (slice len < max) + return out, nil +} diff --git a/core/utils/collection_test.go b/core/utils/collection_test.go new file mode 100644 index 00000000..20bfb2c6 --- /dev/null +++ b/core/utils/collection_test.go @@ -0,0 +1,60 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBatchSplit(t *testing.T) { + list := []int{} + for i := 0; i < 100; i++ { + list = append(list, i) + } + + runs := []struct { + name string + input []int + max int // max per batch + num int // expected number of batches + lastLen int // expected number in last batch + expectErr bool + }{ + {"max=1", list, 1, len(list), 1, false}, + {"max=25", list, 25, 4, 25, false}, + {"max=33", list, 33, 4, 1, false}, + {"max=87", list, 87, 2, 13, false}, + {"max=len", list, len(list), 1, 100, false}, + {"max=len+1", list, len(list) + 1, 1, len(list), false}, // max exceeds len of list + {"zero-list", []int{}, 1, 1, 0, false}, // zero length list + {"zero-max", list, 0, 0, 0, true}, // zero as max input + } + + for _, r := range runs { + t.Run(r.name, func(t *testing.T) { + batch, err := BatchSplit(r.input, r.max) + if r.expectErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, r.num, len(batch)) // check number of batches + + temp := []int{} + for i := 0; i < len(batch); i++ { + expectedLen := r.max + if i == len(batch)-1 { + expectedLen = r.lastLen // expect last batch to be less than max + } + assert.Equal(t, expectedLen, len(batch[i])) // check length of batch + + temp = append(temp, batch[i]...) + } + // assert order has not changed when list is reconstructed + assert.Equal(t, r.input, temp) + + }) + } + +} diff --git a/core/utils/compare.go b/core/utils/compare.go new file mode 100644 index 00000000..ab42250b --- /dev/null +++ b/core/utils/compare.go @@ -0,0 +1,6 @@ +package utils + +func IsZero[C comparable](val C) bool { + var zero C + return zero == val +} diff --git a/core/utils/config/validate.go b/core/utils/config/validate.go new file mode 100644 index 00000000..ceea5fa2 --- /dev/null +++ b/core/utils/config/validate.go @@ -0,0 +1,162 @@ +package config + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/Masterminds/semver/v3" + "go.uber.org/multierr" + + "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +// Validated configurations impose constraints that must be checked. +type Validated interface { + // ValidateConfig returns nil if the config is valid, otherwise an error describing why it is invalid. + // + // For implementations: + // - Use package multierr to accumulate all errors, rather than returning the first encountered. + // - If an anonymous field also implements ValidateConfig(), it must be called explicitly! + ValidateConfig() error +} + +// Validate returns any errors from calling Validated.ValidateConfig on cfg and any nested types that implement Validated. +func Validate(cfg interface{}) (err error) { + _, err = utils.MultiErrorList(validate(reflect.ValueOf(cfg), true)) + return +} + +func validate(v reflect.Value, checkInterface bool) (err error) { + if checkInterface { + i := v.Interface() + if vc, ok := i.(Validated); ok { + err = multierr.Append(err, vc.ValidateConfig()) + } else if v.CanAddr() { + i = v.Addr().Interface() + if vc, ok := i.(Validated); ok { + err = multierr.Append(err, vc.ValidateConfig()) + } + } + } + + t := v.Type() + if t.Kind() == reflect.Ptr { + if v.IsNil() { + return + } + t = t.Elem() + v = v.Elem() + } + switch t.Kind() { + case reflect.Bool, reflect.Chan, reflect.Complex128, reflect.Complex64, reflect.Float32, reflect.Float64, + reflect.Func, reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8, reflect.Interface, + reflect.Invalid, reflect.Ptr, reflect.String, reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uint8, reflect.Uintptr, reflect.UnsafePointer: + return + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + ft := t.Field(i) + if !ft.IsExported() { + continue + } + fv := v.Field(i) + if !fv.CanInterface() { + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + // skip the interface if Anonymous, since the parent struct inherits the methods + if fe := validate(fv, !ft.Anonymous); fe != nil { + if ft.Anonymous { + err = multierr.Append(err, fe) + } else { + err = multierr.Append(err, NamedMultiErrorList(fe, ft.Name)) + } + } + } + return + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + mk := iter.Key() + mv := iter.Value() + if !v.CanInterface() { + continue + } + if mv.Kind() == reflect.Ptr && mv.IsNil() { + continue + } + if me := validate(mv, true); me != nil { + err = multierr.Append(err, NamedMultiErrorList(me, fmt.Sprintf("%s", mk.Interface()))) + } + } + return + case reflect.Slice, reflect.Array: + for i := 0; i < v.Len(); i++ { + iv := v.Index(i) + if !v.CanInterface() { + continue + } + if iv.Kind() == reflect.Ptr && iv.IsNil() { + continue + } + if me := validate(iv, true); me != nil { + err = multierr.Append(err, NamedMultiErrorList(me, strconv.Itoa(i))) + } + } + return + } + + return fmt.Errorf("should be unreachable: switch missing case for kind: %s", t.Kind()) +} + +func NamedMultiErrorList(err error, name string) error { + l, merr := utils.MultiErrorList(err) + if l == 0 { + return nil + } + msg := strings.ReplaceAll(merr.Error(), "\n", "\n\t") + if l == 1 { + return fmt.Errorf("%s.%s", name, msg) + } + return fmt.Errorf("%s: %s", name, msg) +} + +type ErrInvalid = config.ErrInvalid + +// NewErrDuplicate returns an ErrInvalid with a standard duplicate message. +func NewErrDuplicate(name string, value any) ErrInvalid { + return config.NewErrDuplicate(name, value) +} + +type ErrMissing = config.ErrMissing + +type ErrEmpty = config.ErrEmpty + +// UniqueStrings is a helper for tracking unique values in string form. +type UniqueStrings = config.UniqueStrings + +type ErrOverride struct { + Name string +} + +func (e ErrOverride) Error() string { + return fmt.Sprintf("%s: overrides (duplicate keys or list elements) are not allowed for multiple secrets files", e.Name) +} + +type ErrDeprecated struct { + Name string + Version semver.Version +} + +func (e ErrDeprecated) Error() string { + when := "a future version" + if e.Version != (semver.Version{}) { + when = fmt.Sprintf("version %s", e.Version) + } + return fmt.Sprintf("%s: is deprecated and will be removed in %s", e.Name, when) +} diff --git a/core/utils/crypto/private_key.go b/core/utils/crypto/private_key.go new file mode 100644 index 00000000..3f56ea45 --- /dev/null +++ b/core/utils/crypto/private_key.go @@ -0,0 +1,47 @@ +package crypto + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/keystore" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type EncryptedPrivateKey struct { + keystore.CryptoJSON +} + +func NewEncryptedPrivateKey(data []byte, passphrase string, scryptParams utils.ScryptParams) (*EncryptedPrivateKey, error) { + cryptoJSON, err := keystore.EncryptDataV3(data, []byte(passphrase), scryptParams.N, scryptParams.P) + if err != nil { + return nil, fmt.Errorf("could not encrypt key: %w", err) + } + + return &EncryptedPrivateKey{CryptoJSON: cryptoJSON}, nil +} + +// Decrypt returns the PrivateKey decrypted via auth, or an error +func (k EncryptedPrivateKey) Decrypt(passphrase string) (privkey []byte, err error) { + privkey, err = keystore.DecryptDataV3(k.CryptoJSON, passphrase) + if err != nil { + return privkey, fmt.Errorf("could not decrypt private key: %w", err) + } + return privkey, nil +} + +func (k *EncryptedPrivateKey) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &k) +} + +func (k EncryptedPrivateKey) Value() (driver.Value, error) { + return json.Marshal(k) +} diff --git a/core/utils/crypto/private_key_test.go b/core/utils/crypto/private_key_test.go new file mode 100644 index 00000000..01c11530 --- /dev/null +++ b/core/utils/crypto/private_key_test.go @@ -0,0 +1,95 @@ +package crypto + +import ( + "crypto/ed25519" + "encoding/json" + "testing" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func Test_EncryptedPrivateKey(t *testing.T) { + t.Parallel() + + privatekey := []byte("privatekey") + passphrase := "passphrase" + ecp, err := NewEncryptedPrivateKey(privatekey, passphrase, utils.FastScryptParams) + require.NoError(t, err) + + actual, err := ecp.Decrypt(passphrase) + require.NoError(t, err) + + assert.Equal(t, privatekey, actual) +} + +func Test_EncryptedPrivateKey_Decrypt(t *testing.T) { + t.Parallel() + + passphrase := []byte("passphrase") + _, privkey, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + encprivkey, err := keystore.EncryptDataV3(privkey, passphrase, 2, 1) + require.NoError(t, err) + + ecp := EncryptedPrivateKey{CryptoJSON: encprivkey} + + actual, err := ecp.Decrypt(string(passphrase)) + require.NoError(t, err) + + assert.Equal(t, []byte(privkey), actual) +} + +func Test_EncryptedPrivateKey_Scan(t *testing.T) { + t.Parallel() + + _, privKey, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + encPrivkey, err := keystore.EncryptDataV3(privKey, []byte("passphrase"), 2, 1) + require.NoError(t, err) + b, err := json.Marshal(encPrivkey) + require.NoError(t, err) + + actual := &EncryptedPrivateKey{} + + // Error if not bytes + err = actual.Scan("not bytes") + assert.Error(t, err) + + // Bytes + err = actual.Scan(b) + require.NoError(t, err) + + // Unmarshaling bytes into a struct results in numbers being stored as a + // float64 which prevents us from asserting against the generated public key + // which uses ints. Instead we do a JSON string comparison + expPrivKey, err := json.Marshal(EncryptedPrivateKey{CryptoJSON: encPrivkey}) + require.NoError(t, err) + actPrivKey, err := json.Marshal(actual) + require.NoError(t, err) + assert.JSONEq(t, string(expPrivKey), string(actPrivKey)) +} + +func Test_EncryptedPrivateKey_Value(t *testing.T) { + t.Parallel() + + _, privKey, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + cryptoJSON, err := keystore.EncryptDataV3(privKey, []byte("passphrase"), 2, 1) + require.NoError(t, err) + + encPrivkey := EncryptedPrivateKey{CryptoJSON: cryptoJSON} + + dv, err := encPrivkey.Value() + require.NoError(t, err) + + expected, err := json.Marshal(EncryptedPrivateKey{CryptoJSON: cryptoJSON}) + require.NoError(t, err) + + assert.Equal(t, expected, dv) +} diff --git a/core/utils/crypto/public_key.go b/core/utils/crypto/public_key.go new file mode 100644 index 00000000..8dfc299d --- /dev/null +++ b/core/utils/crypto/public_key.go @@ -0,0 +1,63 @@ +package crypto + +import ( + "database/sql/driver" + "encoding/hex" + "encoding/json" + "fmt" +) + +// PublicKey defines a type which can be used for JSON and SQL. +type PublicKey []byte + +// PublicKeyFromHex generates a public key from a hex string +func PublicKeyFromHex(hexStr string) (*PublicKey, error) { + result, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + + pubKey := PublicKey(result) + + return &pubKey, err +} + +func (k PublicKey) String() string { + return hex.EncodeToString(k) +} + +func (k PublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(hex.EncodeToString(k)) +} + +func (k *PublicKey) UnmarshalJSON(in []byte) error { + var hexStr string + if err := json.Unmarshal(in, &hexStr); err != nil { + return err + } + + result, err := hex.DecodeString(hexStr) + if err != nil { + return err + } + + *k = PublicKey(result) + return nil +} + +func (k *PublicKey) Scan(value interface{}) error { + switch v := value.(type) { + case nil: + *k = nil + return nil + case []byte: + *k = v + return nil + default: + return fmt.Errorf("invalid public key bytes got %T wanted []byte", v) + } +} + +func (k PublicKey) Value() (driver.Value, error) { + return []byte(k), nil +} diff --git a/core/utils/crypto/public_key_test.go b/core/utils/crypto/public_key_test.go new file mode 100644 index 00000000..e835f289 --- /dev/null +++ b/core/utils/crypto/public_key_test.go @@ -0,0 +1,83 @@ +package crypto + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_PublicKey_String(t *testing.T) { + t.Parallel() + + pubKey, _, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + pk := PublicKey(pubKey) + expected := hex.EncodeToString(pubKey) + + assert.Equal(t, expected, pk.String()) +} + +func Test_PublicKey_MarshalJSON(t *testing.T) { + t.Parallel() + + pubKey, _, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + hexKey := hex.EncodeToString(pubKey) + + pk := PublicKey(pubKey) + actual, err := pk.MarshalJSON() + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf(`"%s"`, hexKey), string(actual)) +} + +func Test_PublicKey_UnmarshalJSON(t *testing.T) { + t.Parallel() + + pubKey, _, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + hexKey := hex.EncodeToString(pubKey) + + actual := &PublicKey{} + err = actual.UnmarshalJSON([]byte(fmt.Sprintf(`"%s"`, hexKey))) + require.NoError(t, err) + + assert.Equal(t, PublicKey(pubKey), *actual) +} + +func Test_PublicKey_Scan(t *testing.T) { + pubKey, _, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + actual := &PublicKey{} + + // Error if not bytes + err = actual.Scan("not bytes") + assert.Error(t, err) + + // Nil + err = actual.Scan(nil) + require.NoError(t, err) + nilPk := PublicKey(nil) + assert.Equal(t, &nilPk, actual) + + // Bytes + err = actual.Scan([]byte(pubKey)) + require.NoError(t, err) + assert.Equal(t, PublicKey(pubKey), *actual) +} + +func Test_PublicKey_Value(t *testing.T) { + pubKey, _, err := ed25519.GenerateKey(nil) + require.NoError(t, err) + + pk := PublicKey(pubKey) + dv, err := pk.Value() + require.NoError(t, err) + assert.Equal(t, []byte(pubKey), dv) +} diff --git a/core/utils/decimal.go b/core/utils/decimal.go new file mode 100644 index 00000000..48343839 --- /dev/null +++ b/core/utils/decimal.go @@ -0,0 +1,61 @@ +package utils + +import ( + "math" + "math/big" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" +) + +// ToDecimal converts an input to a decimal +func ToDecimal(input interface{}) (decimal.Decimal, error) { + switch v := input.(type) { + case string: + return decimal.NewFromString(v) + case int: + return decimal.New(int64(v), 0), nil + case int8: + return decimal.New(int64(v), 0), nil + case int16: + return decimal.New(int64(v), 0), nil + case int32: + return decimal.New(int64(v), 0), nil + case int64: + return decimal.New(v, 0), nil + case uint: + return decimal.New(int64(v), 0), nil + case uint8: + return decimal.New(int64(v), 0), nil + case uint16: + return decimal.New(int64(v), 0), nil + case uint32: + return decimal.New(int64(v), 0), nil + case uint64: + return decimal.New(int64(v), 0), nil + case float64: + if !validFloat(v) { + return decimal.Decimal{}, errors.Errorf("invalid float %v, cannot convert to decimal", v) + } + return decimal.NewFromFloat(v), nil + case float32: + if !validFloat(float64(v)) { + return decimal.Decimal{}, errors.Errorf("invalid float %v, cannot convert to decimal", v) + } + return decimal.NewFromFloat32(v), nil + case big.Int: + return decimal.NewFromBigInt(&v, 0), nil + case *big.Int: + return decimal.NewFromBigInt(v, 0), nil + case decimal.Decimal: + return v, nil + case *decimal.Decimal: + return *v, nil + default: + return decimal.Decimal{}, errors.Errorf("type %T cannot be converted to decimal.Decimal (%v)", input, input) + } +} + +func validFloat(f float64) bool { + return !math.IsNaN(f) && !math.IsInf(f, 0) +} diff --git a/core/utils/decimal_test.go b/core/utils/decimal_test.go new file mode 100644 index 00000000..6a44f462 --- /dev/null +++ b/core/utils/decimal_test.go @@ -0,0 +1,58 @@ +package utils + +import ( + "math" + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" +) + +func TestDecimal(t *testing.T) { + t.Parallel() + + dec := decimal.New(1, 0) + big := big.NewInt(1) + + var tt = []struct { + v interface{} + expectedErr bool + }{ + {"1.1", false}, + {int(1), false}, + {int(-1), false}, + {int8(1), false}, + {int16(1), false}, + {int32(1), false}, + {int64(-1), false}, + {int32(-1), false}, + {uint(1), false}, + {uint8(1), false}, + {uint16(1), false}, + {uint32(1), false}, + {uint64(1), false}, + {float64(1.1), false}, + {float32(1.1), false}, + {float64(-1.1), false}, + {dec, false}, + {&dec, false}, + {big, false}, + {*big, false}, + {math.Inf(1), true}, + {math.Inf(-1), true}, + {float32(math.Inf(-1)), true}, + {float32(math.Inf(1)), true}, + {math.NaN(), true}, + {float32(math.NaN()), true}, + {true, true}, + } + for _, tc := range tt { + _, err := ToDecimal(tc.v) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } +} diff --git a/core/utils/deferable_write_closer.go b/core/utils/deferable_write_closer.go new file mode 100644 index 00000000..e27b6d91 --- /dev/null +++ b/core/utils/deferable_write_closer.go @@ -0,0 +1,61 @@ +package utils + +import ( + "io" + "sync" +) + +// DeferableWriteCloser is to be used in leiu of defer'ing +// Close on an [io.WriteCloser] (// For more background see https://www.joeshaw.org/dont-defer-close-on-writable-files/) +// Callers should *both* +// explicitly call Close and check for errors when done with the underlying writerclose +// *and* defer the Close() to handle returns before the explicit close +// +// For example rather than +// +// import "os" +// f, err := os.Create("./foo") +// if err != nil { return err} +// defer f.Close() +// return f.Write([]bytes("hi")) +// +// do +// +// import "os" +// f, err := os.Create("./foo") +// if err != nil {return nil} +// wc := NewDeferableWriteCloser(f) +// defer wc.Close() +// err = wc.Write([]bytes("hi")) +// if err != nil {return err} +// return wc.Close() +type DeferableWriteCloser struct { + mu sync.Mutex + closed bool + closeErr error + io.WriteCloser +} + +// NewDeferableWriteCloser creates a deferable writercloser. Callers +// should explicit call and defer Close. See DeferabelWriterCloser for details. +func NewDeferableWriteCloser(wc io.WriteCloser) *DeferableWriteCloser { + return &DeferableWriteCloser{ + WriteCloser: wc, + } +} + +// Close closes the WriterCloser. The underlying Closer +// is Closed exactly once and resulting error is cached. +// Should be called explicitly AND defered +// Thread safe +func (wc *DeferableWriteCloser) Close() error { + + wc.mu.Lock() + defer wc.mu.Unlock() + if !wc.closed { + wc.closeErr = wc.WriteCloser.Close() + wc.closed = true + } + return wc.closeErr + +} diff --git a/core/utils/deferable_write_closer_test.go b/core/utils/deferable_write_closer_test.go new file mode 100644 index 00000000..d12ff1c4 --- /dev/null +++ b/core/utils/deferable_write_closer_test.go @@ -0,0 +1,40 @@ +package utils + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDeferableWriteCloser_Close(t *testing.T) { + + d := t.TempDir() + f, err := os.Create(filepath.Join(d, "test-file")) + require.NoError(t, err) + + wc := NewDeferableWriteCloser(f) + wantStr := "wanted" + _, err = wc.Write([]byte(wantStr)) + assert.NoError(t, err) + defer func() { + assert.NoError(t, wc.Close()) + }() + + assert.NoError(t, wc.Close()) + assert.True(t, wc.closed) + // safe to close multiple times + assert.NoError(t, wc.Close()) + + _, err = f.Write([]byte("after close")) + assert.ErrorIs(t, err, os.ErrClosed) + + _, err = wc.Write([]byte("write to wc after close")) + assert.ErrorIs(t, err, os.ErrClosed) + + r, err := os.ReadFile(f.Name()) + assert.NoError(t, err) + assert.Equal(t, wantStr, string(r)) +} diff --git a/core/utils/errors.go b/core/utils/errors.go new file mode 100644 index 00000000..3ed5e6b0 --- /dev/null +++ b/core/utils/errors.go @@ -0,0 +1,36 @@ +package utils + +import ( + "fmt" + "strings" + + "go.uber.org/multierr" +) + +type multiErrorList []error + +// MultiErrorList returns an error which formats underlying errors as a list, or nil if err is nil. +func MultiErrorList(err error) (int, error) { + if err == nil { + return 0, nil + } + errs := multierr.Errors(err) + return len(errs), multiErrorList(errs) +} + +func (m multiErrorList) Error() string { + l := len(m) + if l == 1 { + return m[0].Error() + } + var sb strings.Builder + fmt.Fprintf(&sb, "%d errors:", l) + for _, e := range m { + fmt.Fprintf(&sb, "\n\t- %v", e) + } + return sb.String() +} + +func (m multiErrorList) Unwrap() []error { + return m +} diff --git a/core/utils/eth_signatures.go b/core/utils/eth_signatures.go new file mode 100644 index 00000000..688d6710 --- /dev/null +++ b/core/utils/eth_signatures.go @@ -0,0 +1,47 @@ +package utils + +import ( + "crypto/ecdsa" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/pkg/errors" +) + +const EthSignedMessagePrefix = "\x19Ethereum Signed Message:\n" + +func GetSignersEthAddress(msg []byte, sig []byte) (recoveredAddr common.Address, err error) { + if len(sig) != 65 { + return recoveredAddr, errors.New("invalid signature: signature length must be 65 bytes") + } + + // Adjust the V component of the signature in case it uses 27 or 28 instead of 0 or 1 + if sig[64] == 27 || sig[64] == 28 { + sig[64] -= 27 + } + if sig[64] != 0 && sig[64] != 1 { + return recoveredAddr, errors.New("invalid signature: invalid V component") + } + + prefixedMsg := fmt.Sprintf("%s%d%s", EthSignedMessagePrefix, len(msg), msg) + hash := crypto.Keccak256Hash([]byte(prefixedMsg)) + + sigPublicKey, err := crypto.SigToPub(hash[:], sig) + if err != nil { + return recoveredAddr, err + } + + recoveredAddr = crypto.PubkeyToAddress(*sigPublicKey) + return recoveredAddr, nil +} + +func GenerateEthPrefixedMsgHash(msg []byte) (hash common.Hash) { + prefixedMsg := fmt.Sprintf("%s%d%s", EthSignedMessagePrefix, len(msg), msg) + return crypto.Keccak256Hash([]byte(prefixedMsg)) +} + +func GenerateEthSignature(privateKey *ecdsa.PrivateKey, msg []byte) (signature []byte, err error) { + hash := GenerateEthPrefixedMsgHash(msg) + return crypto.Sign(hash[:], privateKey) +} diff --git a/core/utils/eth_signatures_test.go b/core/utils/eth_signatures_test.go new file mode 100644 index 00000000..85b94500 --- /dev/null +++ b/core/utils/eth_signatures_test.go @@ -0,0 +1,53 @@ +package utils + +import ( + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSignersEthAddress_Success(t *testing.T) { + privateKey, err := crypto.GenerateKey() + require.NoError(t, err) + address := crypto.PubkeyToAddress(privateKey.PublicKey) + + msg := []byte("test message") + sig, err := GenerateEthSignature(privateKey, msg) + assert.NoError(t, err) + + recoveredAddress, err := GetSignersEthAddress(msg, sig) + assert.NoError(t, err) + assert.Equal(t, address, recoveredAddress) +} + +func TestGetSignersEthAddress_InvalidSignatureLength(t *testing.T) { + msg := []byte("test message") + sig := []byte("invalid signature length") + _, err := GetSignersEthAddress(msg, sig) + assert.EqualError(t, err, "invalid signature: signature length must be 65 bytes") +} + +func TestGenerateEthPrefixedMsgHash(t *testing.T) { + msg := []byte("test message") + expectedPrefix := "\x19Ethereum Signed Message:\n" + expectedHash := crypto.Keccak256Hash([]byte(expectedPrefix + "12" + string(msg))) + + hash := GenerateEthPrefixedMsgHash(msg) + assert.Equal(t, expectedHash, hash) +} + +func TestGenerateEthSignature(t *testing.T) { + privateKey, err := crypto.GenerateKey() + assert.NoError(t, err) + + msg := []byte("test message") + signature, err := GenerateEthSignature(privateKey, msg) + assert.NoError(t, err) + assert.Len(t, signature, 65) + + recoveredPub, err := crypto.SigToPub(GenerateEthPrefixedMsgHash(msg).Bytes(), signature) + assert.NoError(t, err) + assert.Equal(t, privateKey.PublicKey, *recoveredPub) +} diff --git a/core/utils/files.go b/core/utils/files.go new file mode 100644 index 00000000..a62e45bf --- /dev/null +++ b/core/utils/files.go @@ -0,0 +1,172 @@ +package utils + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/pkg/errors" + "go.uber.org/multierr" +) + +// FileExists returns true if a file at the passed string exists. +func FileExists(name string) (bool, error) { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, errors.Wrapf(err, "failed to check if file exists %q", name) + } + return true, nil +} + +// TooPermissive checks if the file has more than the allowed permissions +func TooPermissive(fileMode, maxAllowedPerms os.FileMode) bool { + return fileMode&^maxAllowedPerms != 0 +} + +// IsFileOwnedByPlugin attempts to read fileInfo to verify file owner +func IsFileOwnedByPlugin(fileInfo os.FileInfo) (bool, error) { + stat, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return false, errors.Errorf("Unable to determine file owner of %s", fileInfo.Name()) + } + return int(stat.Uid) == os.Getuid(), nil +} + +// EnsureDirAndMaxPerms ensures that the given path exists, that it's a directory, +// and that it has permissions that are no more permissive than the given ones. +// +// - If the path does not exist, it is created +// - If the path exists, but is not a directory, an error is returned +// - If the path exists, and is a directory, but has the wrong perms, it is chmod'ed +func EnsureDirAndMaxPerms(path string, perms os.FileMode) error { + stat, err := os.Stat(path) + if err != nil && !os.IsNotExist(err) { + // Regular error + return err + } else if os.IsNotExist(err) { + // Dir doesn't exist, create it with desired perms + return os.MkdirAll(path, perms) + } else if !stat.IsDir() { + // Path exists, but it's a file, so don't clobber + return errors.Errorf("%v already exists and is not a directory", path) + } else if stat.Mode() != perms { + // Dir exists, but wrong perms, so chmod + return os.Chmod(path, stat.Mode()&perms) + } + return nil +} + +// WriteFileWithMaxPerms writes `data` to `path` and ensures that +// the file has permissions that are no more permissive than the given ones. +func WriteFileWithMaxPerms(path string, data []byte, perms os.FileMode) (err error) { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, perms) + if err != nil { + return err + } + defer func() { err = multierr.Combine(err, f.Close()) }() + err = EnsureFileMaxPerms(f, perms) + if err != nil { + return + } + _, err = f.Write(data) + return +} + +// EnsureFileMaxPerms ensures that the given file has permissions +// that are no more permissive than the given ones. +func EnsureFileMaxPerms(file *os.File, perms os.FileMode) error { + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Mode() == perms { + return nil + } + return file.Chmod(stat.Mode() & perms) +} + +// EnsureFilepathMaxPerms ensures that the file at the given filepath +// has permissions that are no more permissive than the given ones. +func EnsureFilepathMaxPerms(filepath string, perms os.FileMode) (err error) { + dst, err := os.OpenFile(filepath, os.O_RDWR, perms) + if err != nil { + return err + } + defer func() { err = multierr.Combine(err, dst.Close()) }() + return EnsureFileMaxPerms(dst, perms) +} + +// FileSize repesents a file size in bytes. +type FileSize uint64 + +// nolint +const ( + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB +) + +var ( + fsregex = regexp.MustCompile(`(\d+\.?\d*)(tb|gb|mb|kb|b)?`) + + fsUnitMap = map[string]int{ + "tb": TB, + "gb": GB, + "mb": MB, + "kb": KB, + "b": 1, + "": 1, + } +) + +// MarshalText encodes s as a human readable string. +func (s FileSize) MarshalText() ([]byte, error) { + if s >= TB { + return []byte(fmt.Sprintf("%.2ftb", float64(s)/TB)), nil + } else if s >= GB { + return []byte(fmt.Sprintf("%.2fgb", float64(s)/GB)), nil + } else if s >= MB { + return []byte(fmt.Sprintf("%.2fmb", float64(s)/MB)), nil + } else if s >= KB { + return []byte(fmt.Sprintf("%.2fkb", float64(s)/KB)), nil + } + return []byte(fmt.Sprintf("%db", s)), nil +} + +// UnmarshalText parses a file size from bs in to s. +func (s *FileSize) UnmarshalText(bs []byte) error { + lc := strings.ToLower(strings.TrimSpace(string(bs))) + matches := fsregex.FindAllStringSubmatch(lc, -1) + if len(matches) != 1 || len(matches[0]) != 3 || fmt.Sprintf("%s%s", matches[0][1], matches[0][2]) != lc { + return errors.Errorf(`bad filesize expression: "%v"`, string(bs)) + } + + var ( + num = matches[0][1] + unit = matches[0][2] + ) + + value, err := strconv.ParseFloat(num, 64) + if err != nil { + return errors.Errorf(`bad filesize value: "%v"`, string(bs)) + } + + u, ok := fsUnitMap[unit] + if !ok { + return errors.Errorf(`bad filesize unit: "%v"`, unit) + } + + *s = FileSize(value * float64(u)) + return nil +} + +func (s FileSize) String() string { + str, _ := s.MarshalText() + return string(str) +} diff --git a/core/utils/files_test.go b/core/utils/files_test.go new file mode 100644 index 00000000..8c47da22 --- /dev/null +++ b/core/utils/files_test.go @@ -0,0 +1,131 @@ +package utils + +import ( + "crypto/rand" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func tempFileName() string { + randBytes := make([]byte, 16) + _, err := rand.Read(randBytes) + if err != nil { + panic(err) + } + return filepath.Join(os.TempDir(), hex.EncodeToString(randBytes)) +} + +func TestFileExists(t *testing.T) { + t.Parallel() + + exists, err := FileExists(tempFileName()) + require.NoError(t, err) + assert.False(t, exists) + + exists, err = FileExists(os.Args[0]) + require.NoError(t, err) + assert.True(t, exists) +} + +func TestTooPermissive(t *testing.T) { + t.Parallel() + + res := TooPermissive(os.FileMode(0700), os.FileMode(0600)) + assert.True(t, res) + + res = TooPermissive(os.FileMode(0600), os.FileMode(0600)) + assert.False(t, res) + + res = TooPermissive(os.FileMode(0600), os.FileMode(0700)) + assert.False(t, res) +} + +func TestFileSize_MarshalText_String(t *testing.T) { + t.Parallel() + + tests := []struct { + input FileSize + expected string + }{ + {FileSize(0), "0b"}, + {FileSize(1), "1b"}, + {FileSize(MB), "1.00mb"}, + {FileSize(KB), "1.00kb"}, + {FileSize(MB), "1.00mb"}, + {FileSize(GB), "1.00gb"}, + {FileSize(TB), "1.00tb"}, + {FileSize(5 * GB), "5.00gb"}, + {FileSize(0.5 * GB), "500.00mb"}, + } + + for _, test := range tests { + test := test + + t.Run(test.expected, func(t *testing.T) { + t.Parallel() + + bstr, err := test.input.MarshalText() + assert.NoError(t, err) + assert.Equal(t, test.expected, string(bstr)) + assert.Equal(t, test.expected, test.input.String()) + }) + } +} + +func TestFileSize_UnmarshalText(t *testing.T) { + t.Parallel() + + tests := []struct { + input string + expected FileSize + valid bool + }{ + // valid + {"0", FileSize(0), true}, + {"0.0", FileSize(0), true}, + {"1.12345", FileSize(1), true}, + {"123", FileSize(123), true}, + {"123", FileSize(123), true}, + {"123b", FileSize(123), true}, + {"123B", FileSize(123), true}, + {"123kb", FileSize(123 * KB), true}, + {"123KB", FileSize(123 * KB), true}, + {"123mb", FileSize(123 * MB), true}, + {"123gb", FileSize(123 * GB), true}, + {"123tb", FileSize(123 * TB), true}, + {"5.5mb", FileSize(5.5 * MB), true}, + {"0.5mb", FileSize(0.5 * MB), true}, + // invalid + {"", FileSize(0), false}, + {"xyz", FileSize(0), false}, + {"-1g", FileSize(0), false}, + {"+1g", FileSize(0), false}, + {"1g", FileSize(0), false}, + {"1t", FileSize(0), false}, + {"1a", FileSize(0), false}, + {"1tbtb", FileSize(0), false}, + {"1tb1tb", FileSize(0), false}, + } + + for _, test := range tests { + test := test + + t.Run(test.input, func(t *testing.T) { + t.Parallel() + + var fs FileSize + err := fs.UnmarshalText([]byte(test.input)) + if test.valid { + assert.NoError(t, err) + assert.Equal(t, test.expected, fs) + } else { + assert.Error(t, err) + } + }) + } +} diff --git a/core/utils/finite_ticker.go b/core/utils/finite_ticker.go new file mode 100644 index 00000000..d1e437f0 --- /dev/null +++ b/core/utils/finite_ticker.go @@ -0,0 +1,34 @@ +package utils + +import ( + "sync" + "time" +) + +// FiniteTicker starts a goroutine to execute the given function periodically, until the returned function is called. +func FiniteTicker(period time.Duration, onTick func()) func() { + tick := time.NewTicker(period) + chStop := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + for { + select { + case <-tick.C: + onTick() + case <-chStop: + return + } + } + }() + + // NOTE: tick.Stop does not close the ticker channel, + // so we still need another way of returning (chStop). + return func() { + tick.Stop() + close(chStop) + wg.Wait() + } +} diff --git a/core/utils/finite_ticker_test.go b/core/utils/finite_ticker_test.go new file mode 100644 index 00000000..e3182efc --- /dev/null +++ b/core/utils/finite_ticker_test.go @@ -0,0 +1,36 @@ +package utils_test + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestFiniteTicker(t *testing.T) { + t.Parallel() + + var counter atomic.Int32 + + onTick := func() { + counter.Add(1) + } + + now := time.Now() + stop := utils.FiniteTicker(testutils.TestInterval, onTick) + + assert.Eventually(t, func() bool { + return counter.Load() >= 10 + }, testutils.WaitTimeout(t), testutils.TestInterval) + + assert.Greater(t, time.Now().Add(10*testutils.TestInterval), now) + + stop() + last := counter.Load() + time.Sleep(2 * testutils.TestInterval) + assert.Equal(t, last, counter.Load()) +} diff --git a/core/utils/hash.go b/core/utils/hash.go new file mode 100644 index 00000000..b0a32454 --- /dev/null +++ b/core/utils/hash.go @@ -0,0 +1,60 @@ +package utils + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/pkg/errors" +) + +const HashLength = 32 + +// Hash is a simplified version of go-ethereum's common.Hash to avoid +// go-ethereum dependency +// It represents a 32 byte fixed size array that marshals/unmarshals assuming a +// 0x prefix +type Hash [32]byte + +// BytesToHash sets b to hash. +// If b is larger than len(h), b will be cropped from the left. +func BytesToHash(b []byte) Hash { + var h Hash + h.SetBytes(b) + return h +} + +// SetBytes sets the hash to the value of b. +// If b is larger than len(h), b will be cropped from the left. +func (h *Hash) SetBytes(b []byte) { + if len(b) > len(h) { + b = b[len(b)-HashLength:] + } + + copy(h[HashLength-len(b):], b) +} + +// Hex converts a hash to a hex string. +func (h Hash) Hex() string { return fmt.Sprintf("0x%s", hex.EncodeToString(h[:])) } + +// String implements the stringer interface and is used also by the logger when +// doing full logging into a file. +func (h Hash) String() string { + return h.Hex() +} + +// UnmarshalText parses a hash in hex syntax. +func (h *Hash) UnmarshalText(input []byte) error { + if !strings.HasPrefix(string(input), "0x") { + return errors.New("hash: expected a hex string starting with '0x'") + } + phex := new(PlainHexBytes) + if err := phex.UnmarshalText(input[2:]); err != nil { + return fmt.Errorf("hash: %w", err) + } + if len(*phex) != 32 { + return fmt.Errorf("hash: expected 32-byte sequence, got %d bytes", len(*phex)) + } + copy((*h)[:], (*phex)) + return nil +} diff --git a/core/utils/hash_test.go b/core/utils/hash_test.go new file mode 100644 index 00000000..07ab11f9 --- /dev/null +++ b/core/utils/hash_test.go @@ -0,0 +1,65 @@ +package utils + +import ( + "encoding/json" + "strings" + "testing" +) + +func Test_Hash_UnmarshalText(t *testing.T) { + var tests = []struct { + Prefix string + Size int + Error string + }{ + {"", 62, "hash: expected a hex string starting with '0x'"}, + {"0x", 66, "hash: expected 32-byte sequence, got 33 bytes"}, + {"0x", 63, "hash: UnmarshalText failed: odd length"}, + {"0x", 0, "hash: expected 32-byte sequence, got 0 bytes"}, + {"0x", 64, ""}, + {"0X", 64, "hash: expected a hex string starting with '0x'"}, + } + for _, test := range tests { + input := test.Prefix + strings.Repeat("0", test.Size) + v := new(Hash) + err := v.UnmarshalText([]byte(input)) + if err == nil { + if test.Error != "" { + t.Errorf("%s: error mismatch: have nil, want %q", input, test.Error) + } + } else { + if err.Error() != test.Error { + t.Errorf("%s: error mismatch: have %q, want %q", input, err, test.Error) + } + } + } +} + +func Test_Hash_UnmarshalJSON(t *testing.T) { + var tests = []struct { + Prefix string + Size int + Error string + }{ + {"", 62, "hash: expected a hex string starting with '0x'"}, + {"0x", 66, "hash: expected 32-byte sequence, got 33 bytes"}, + {"0x", 63, "hash: UnmarshalText failed: odd length"}, + {"0x", 0, "hash: expected 32-byte sequence, got 0 bytes"}, + {"0x", 64, ""}, + {"0X", 64, "hash: expected a hex string starting with '0x'"}, + } + for _, test := range tests { + input := `"` + test.Prefix + strings.Repeat("0", test.Size) + `"` + var v Hash + err := json.Unmarshal([]byte(input), &v) + if err == nil { + if test.Error != "" { + t.Errorf("%s: error mismatch: have nil, want %q", input, test.Error) + } + } else { + if err.Error() != test.Error { + t.Errorf("%s: error mismatch: have %q, want %q", input, err, test.Error) + } + } + } +} diff --git a/core/utils/http/http.go b/core/utils/http/http.go new file mode 100644 index 00000000..22ca5116 --- /dev/null +++ b/core/utils/http/http.go @@ -0,0 +1,82 @@ +package http + +import ( + "fmt" + "io" + "net/http" + "net/url" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type httpClientConfig interface { + URL() url.URL // DatabaseURL +} + +// NewRestrictedHTTPClient returns a secure HTTP Client (queries to certain +// local addresses are blocked) +func NewRestrictedHTTPClient(cfg httpClientConfig, lggr logger.Logger) *http.Client { + tr := newDefaultTransport() + tr.DialContext = makeRestrictedDialContext(cfg, lggr) + return &http.Client{Transport: tr} +} + +// NewUnrestrictedClient returns a HTTP Client with no Transport restrictions +func NewUnrestrictedHTTPClient() *http.Client { + unrestrictedTr := newDefaultTransport() + return &http.Client{Transport: unrestrictedTr} +} + +func newDefaultTransport() *http.Transport { + t := http.DefaultTransport.(*http.Transport).Clone() + // There are certain classes of vulnerabilities that open up when + // compression is enabled. For simplicity, we disable compression + // to cut off this class of attacks. + // https://www.cyberis.co.uk/2013/08/vulnerabilities-that-just-wont-die.html + t.DisableCompression = true + return t +} + +// HTTPRequest holds the request and config struct for a http request +type HTTPRequest struct { + Client *http.Client + Request *http.Request + Config HTTPRequestConfig + Logger logger.Logger +} + +// HTTPRequestConfig holds the configurable settings for a http request +type HTTPRequestConfig struct { + SizeLimit int64 +} + +// SendRequest sends a HTTPRequest, +// returns a body, status code, and error. +func (h *HTTPRequest) SendRequest() (responseBody []byte, statusCode int, headers http.Header, err error) { + start := time.Now() + + r, err := h.Client.Do(h.Request) + if err != nil { + h.Logger.Tracew("http adapter got error", "err", err) + return nil, 0, nil, err + } + defer logger.Sugared(h.Logger).ErrorIfFn(r.Body.Close, "Error closing SendRequest response body") + + statusCode = r.StatusCode + elapsed := time.Since(start) + h.Logger.Tracew(fmt.Sprintf("http adapter got %v in %s", statusCode, elapsed), "statusCode", statusCode, "timeElapsedSeconds", elapsed) + + source := http.MaxBytesReader(nil, r.Body, h.Config.SizeLimit) + bytes, err := io.ReadAll(source) + if err != nil { + h.Logger.Errorw("http adapter error reading body", "err", err) + return nil, statusCode, nil, err + } + elapsed = time.Since(start) + h.Logger.Tracew(fmt.Sprintf("http adapter finished after %s", elapsed), "statusCode", statusCode, "timeElapsedSeconds", elapsed) + + responseBody = bytes + + return responseBody, statusCode, r.Header, nil +} diff --git a/core/utils/http/http_allowed_ips.go b/core/utils/http/http_allowed_ips.go new file mode 100644 index 00000000..240189b3 --- /dev/null +++ b/core/utils/http/http_allowed_ips.go @@ -0,0 +1,111 @@ +package http + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "169.254.0.0/16", // RFC3927 link-local + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, err := net.ParseCIDR(cidr) + if err != nil { + panic(fmt.Errorf("parse error on %q: %v", cidr, err)) + } + privateIPBlocks = append(privateIPBlocks, block) + } +} + +func isRestrictedIP(ip net.IP, cfg httpClientConfig) (bool, error) { + if !ip.IsGlobalUnicast() || + ip.IsLoopback() || + ip.IsLinkLocalUnicast() || + ip.IsLinkLocalMulticast() || + ip.IsInterfaceLocalMulticast() || + ip.IsUnspecified() || + ip.Equal(net.IPv4bcast) || + ip.Equal(net.IPv4allsys) || + ip.Equal(net.IPv4allrouter) || + ip.Equal(net.IPv4zero) || + ip.IsMulticast() { + return true, nil + } + + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true, nil + } + } + + blacklisted, err := isBlacklistedIP(ip, cfg) + if err != nil { + return false, errors.Wrapf(err, "failed to check IP blacklist status") + } + + return blacklisted, nil +} + +func isBlacklistedIP(ip net.IP, cfg httpClientConfig) (bool, error) { + dbURL := cfg.URL() + if dbURL.String() == "" { + return false, nil + } + ips, err := net.LookupIP(dbURL.Host) + if err != nil { + return true, errors.Wrapf(err, "failed to lookup IP for DB URL") + } + for _, dbIP := range ips { + if dbIP.Equal(ip) { + return true, nil + } + } + return false, nil +} + +var ErrDisallowedIP = errors.New("disallowed IP") + +// makeRestrictedDialContext returns a dialcontext function using the given arguments +func makeRestrictedDialContext(cfg httpClientConfig, lggr logger.Logger) func(context.Context, string, string) (net.Conn, error) { + // restrictedDialContext wraps the Dialer such that after successful connection, + // we check the IP. + // If the resolved IP is restricted, close the connection and return an error. + return func(ctx context.Context, network, address string) (net.Conn, error) { + con, err := (&net.Dialer{ + // Defaults from GoLang standard http package + // https://golang.org/pkg/net/http/#RoundTripper + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + if err == nil { + // If a connection could be established, ensure it's not local or private + a, _ := con.RemoteAddr().(*net.TCPAddr) + + if restrict, rerr := isRestrictedIP(a.IP, cfg); rerr != nil { + lggr.Errorw("Restricted IP check failed, this IP will be allowed", "ip", a.IP, "err", rerr) + } else if restrict { + return nil, multierr.Combine( + errors.Wrapf(ErrDisallowedIP, "disallowed IP %s. Connections to local/private and multicast networks are disabled by default for security reasons", a.IP.String()), + con.Close()) + } + } + return con, err + } +} diff --git a/core/utils/http/http_allowed_ips_test.go b/core/utils/http/http_allowed_ips_test.go new file mode 100644 index 00000000..289da486 --- /dev/null +++ b/core/utils/http/http_allowed_ips_test.go @@ -0,0 +1,82 @@ +package http + +import ( + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +type emptyDBURLcfg struct{} + +func (emptyDBURLcfg) URL() url.URL { + return url.URL{} +} + +type testDBURLcfg struct { + u url.URL +} + +func (c testDBURLcfg) URL() url.URL { + return c.u +} + +func TestHttpAllowedIPS_isRestrictedIP(t *testing.T) { + t.Parallel() + + tests := []struct { + ip net.IP + isRestricted bool + }{ + {net.ParseIP("1.1.1.1"), false}, + {net.ParseIP("216.239.32.10"), false}, + {net.ParseIP("2001:4860:4860::8888"), false}, + {net.ParseIP("127.0.0.1"), true}, + {net.ParseIP("255.255.255.255"), true}, + {net.ParseIP("224.0.0.1"), true}, + {net.ParseIP("224.0.0.2"), true}, + {net.ParseIP("224.1.1.1"), true}, + {net.ParseIP("0.0.0.0"), true}, + {net.ParseIP("192.168.0.1"), true}, + {net.ParseIP("192.168.1.255"), true}, + {net.ParseIP("255.255.255.255"), true}, + {net.ParseIP("10.0.0.1"), true}, + {net.ParseIP("::1"), true}, + {net.ParseIP("fd57:03f9:9ef5:8a81::1"), true}, + {net.ParseIP("FD00::1"), true}, + {net.ParseIP("FF02::1"), true}, + {net.ParseIP("FE80:0000:0000:0000:abcd:abcd:abcd:abcd"), true}, + {net.IP{0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}, true}, + {net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}, true}, + {net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02}, true}, + } + + for _, test := range tests { + t.Run(test.ip.String(), func(t *testing.T) { + r, err := isRestrictedIP(test.ip, emptyDBURLcfg{}) + require.NoError(t, err) + assert.Equal(t, test.isRestricted, r) + }) + } + + t.Run("disallows queries to database IP", func(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) + t.Cleanup(s.Close) + u := testutils.MustParseURL(t, s.URL) + r, err := isRestrictedIP(net.ParseIP(u.Host), testDBURLcfg{*u}) + require.NoError(t, err) + assert.True(t, r) + }) + + t.Run("errors on failed lookup", func(t *testing.T) { + u := testutils.MustParseURL(t, "postgresql://postgres@1.2.3.4:5432/plugin_test?sslmode=disable") + _, err := isRestrictedIP(net.ParseIP("1.2.3.4"), testDBURLcfg{*u}) + require.Error(t, err) + }) +} diff --git a/core/utils/http/http_test.go b/core/utils/http/http_test.go new file mode 100644 index 00000000..dbb8ee47 --- /dev/null +++ b/core/utils/http/http_test.go @@ -0,0 +1,59 @@ +package http_test + +import ( + "bytes" + "io" + netHttp "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/utils/http" +) + +func TestUnrestrictedHTTPClient(t *testing.T) { + t.Parallel() + + client := http.NewUnrestrictedHTTPClient() + assert.True(t, client.Transport.(*netHttp.Transport).DisableCompression) + client.Transport = newMockTransport() + + netReq, err := netHttp.NewRequestWithContext(testutils.Context(t), "GET", "http://localhost", bytes.NewReader([]byte{})) + assert.NoError(t, err) + + req := &http.HTTPRequest{ + Client: client, + Request: netReq, + Config: http.HTTPRequestConfig{SizeLimit: 1000}, + Logger: logger.NullLogger, + } + + response, statusCode, headers, err := req.SendRequest() + assert.NoError(t, err) + assert.Equal(t, 200, statusCode) + assert.Equal(t, "application/json", headers.Get("Content-Type")) + assert.Equal(t, `{"foo":123}`, string(response)) +} + +type mockTransport struct{} + +func newMockTransport() netHttp.RoundTripper { + return &mockTransport{} +} + +func (t *mockTransport) RoundTrip(req *netHttp.Request) (*netHttp.Response, error) { + // Create mocked http.Response + response := &netHttp.Response{ + Header: make(netHttp.Header), + Request: req, + StatusCode: netHttp.StatusOK, + } + response.Header.Set("Content-Type", "application/json") + + responseBody := `{"foo":123}` + response.Body = io.NopCloser(strings.NewReader(responseBody)) + return response, nil +} diff --git a/core/utils/json_normalization.go b/core/utils/json_normalization.go new file mode 100644 index 00000000..53879463 --- /dev/null +++ b/core/utils/json_normalization.go @@ -0,0 +1,163 @@ +package utils + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + + "golang.org/x/text/unicode/norm" +) + +// NormalizedJSON returns a JSON representation of an object that has been +// normalized to produce a consistent output for hashing. +// +// NOTE: If this string is unmarshalled again, there is no guarantee that the +// final representation will be consistent with the string produced by this +// function due to differences in JSON implementations and information loss. +// e.g: +// +// JSON does not have a requirement to respect object key ordering. +func NormalizedJSON(val []byte) (string, error) { + // Unmarshal into a generic interface{} + var data interface{} + var err error + if err = json.Unmarshal(val, &data); err != nil { + return "", err + } + + buffer := &strings.Builder{} + writer := bufio.NewWriter(buffer) + + // Wrap the buffer in a normalization writer + wc := norm.NFC.Writer(writer) + + // Now marshal the generic interface + if err = marshal(wc, data); err != nil { + return "", err + } + if err = wc.Close(); err != nil { + return "", err + } + if err = writer.Flush(); err != nil { + return "", err + } + return buffer.String(), nil +} + +// recursively write elements of the JSON to the hash, making sure to sort +// objects and to represent floats in exponent form +func marshal(writer io.Writer, data interface{}) error { + switch element := data.(type) { + case map[string]interface{}: + return marshalObject(writer, element) + case []interface{}: + return marshalArray(writer, element) + case float64: + return marshalFloat(writer, element) + case string: + return marshalPrimitive(writer, element) + case bool: + return marshalPrimitive(writer, element) + case nil: + return marshalPrimitive(writer, element) + default: + panic(fmt.Sprintf("type '%T' in JSON input not handled", data)) + } +} + +func marshalObject(writer io.Writer, data map[string]interface{}) error { + _, err := fmt.Fprintf(writer, "{") + if err != nil { + return err + } + + err = marshalMapOrderedKeys(writer, orderedKeys(data), data) + if err != nil { + return err + } + + _, err = fmt.Fprintf(writer, "}") + return err +} + +func orderedKeys(data map[string]interface{}) []string { + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func marshalMapOrderedKeys(writer io.Writer, orderedKeys []string, data map[string]interface{}) error { + for index, key := range orderedKeys { + err := marshal(writer, key) + if err != nil { + return err + } + + _, err = fmt.Fprintf(writer, ":") + if err != nil { + return err + } + + value := data[key] + err = marshal(writer, value) + if err != nil { + return err + } + + if index == len(orderedKeys)-1 { + break + } + + _, err = fmt.Fprintf(writer, ",") + if err != nil { + return err + } + } + return nil +} + +func marshalArray(writer io.Writer, data []interface{}) error { + _, err := fmt.Fprintf(writer, "[") + if err != nil { + return err + } + + for index, item := range data { + marErr := marshal(writer, item) + if marErr != nil { + return marErr + } + + if index == len(data)-1 { + break + } + + _, fmtErr := fmt.Fprintf(writer, ",") + if fmtErr != nil { + return fmtErr + } + } + + _, err = fmt.Fprintf(writer, "]") + return err +} + +func marshalPrimitive(writer io.Writer, data interface{}) error { + output, err := json.Marshal(data) + if err != nil { + return err + } + _, err = writer.Write(output) + return err +} + +func marshalFloat(writer io.Writer, data float64) error { + _, err := fmt.Fprintf(writer, "%e", data) + return err +} diff --git a/core/utils/json_normalization_test.go b/core/utils/json_normalization_test.go new file mode 100644 index 00000000..b1e3c20b --- /dev/null +++ b/core/utils/json_normalization_test.go @@ -0,0 +1,59 @@ +package utils_test + +import ( + "encoding/json" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" +) + +func TestNormalizedJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input interface{} + want string + wantError bool + }{ + {"empty object", struct{}{}, "{}", false}, + {"empty array", []string{}, "[]", false}, + {"null", nil, "null", false}, + {"float", 1510599740287532257480015872.0, "1.510600e+27", false}, + {"bool", true, "true", false}, + {"string", "string", "\"string\"", false}, + {"array with one item", []string{"item"}, "[\"item\"]", false}, + {"map with one item", map[string]string{"item": "value"}, "{\"item\":\"value\"}", false}, + // See https://en.wikipedia.org/wiki/Precomposed_character + {"string with decomposed characters", + "\u0041\u030a\u0073\u0074\u0072\u006f\u0308\u006d", + "\"\u00c5\u0073\u0074\u0072\u00f6\u006d\"", + false, + }, + {"reordering", + cltest.JSONFromString(t, `{"a": "!", "A": "1"}`), + `{"A":"1","a":"!"}`, + false, + }, + {"more key reordering", + cltest.JSONFromString(t, `{"a": "!", "A": "1", "B": "@", "b":"?", "c":"..."}`), + `{"A":"1","B":"@","a":"!","b":"?","c":"..."}`, + false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + jsonBytes, err := json.Marshal(test.input) + assert.NoError(t, err) + + str, err := utils.NormalizedJSON(jsonBytes) + + cltest.AssertError(t, test.wantError, err) + assert.Equal(t, test.want, str) + }) + } +} diff --git a/core/utils/password.go b/core/utils/password.go new file mode 100644 index 00000000..6dcc5cbc --- /dev/null +++ b/core/utils/password.go @@ -0,0 +1,90 @@ +package utils + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + ErrPasswordWhitespace = errors.New("leading/trailing whitespace detected in password") + ErrEmptyPasswordInFile = errors.New("detected empty password in password file") +) + +// PasswordComplexityRequirements defines the complexity requirements message +// Note that adding an entropy requirement wouldn't add much, since a 16 +// character password already has an entropy score of 75 even if it's all +// lowercase characters +const PasswordComplexityRequirements = ` +Must have a length of 16-50 characters +Must not comprise: + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) +` + +const MinRequiredLen = 16 + +var LeadingWhitespace = regexp.MustCompile(`^\s+`) +var TrailingWhitespace = regexp.MustCompile(`\s+$`) + +var ( + ErrMsgHeader = fmt.Sprintf(` +Expected password complexity: +Must be at least %d characters long +Must not comprise: + Leading or trailing whitespace + A user's API email + +Faults: +`, MinRequiredLen) + ErrWhitespace = errors.New("password contains a leading or trailing whitespace") +) + +func VerifyPasswordComplexity(password string, disallowedStrings ...string) (merr error) { + errMsg := ErrMsgHeader + var stringErrs []string + + if LeadingWhitespace.MatchString(password) || TrailingWhitespace.MatchString(password) { + stringErrs = append(stringErrs, ErrWhitespace.Error()) + } + + if len(password) < MinRequiredLen { + stringErrs = append(stringErrs, fmt.Sprintf("password is less than %d characters long", MinRequiredLen)) + } + + for _, s := range disallowedStrings { + if strings.Contains(strings.ToLower(password), strings.ToLower(s)) { + stringErrs = append(stringErrs, fmt.Sprintf("password may not contain: %q", s)) + } + } + + if len(stringErrs) > 0 { + for _, stringErr := range stringErrs { + errMsg = fmt.Sprintf("%s %s\n", errMsg, stringErr) + } + merr = errors.New(errMsg) + } + + return +} + +func PasswordFromFile(pwdFile string) (string, error) { + if len(pwdFile) == 0 { + return "", nil + } + dat, err := os.ReadFile(pwdFile) + // handle POSIX case, when text files may have a trailing \n + pwd := strings.TrimSuffix(string(dat), "\n") + + if err != nil { + return pwd, err + } + if len(pwd) == 0 { + return pwd, ErrEmptyPasswordInFile + } + if strings.TrimSpace(pwd) != pwd { + return pwd, ErrPasswordWhitespace + } + return pwd, err +} diff --git a/core/utils/password_test.go b/core/utils/password_test.go new file mode 100644 index 00000000..e7482bd4 --- /dev/null +++ b/core/utils/password_test.go @@ -0,0 +1,88 @@ +package utils_test + +import ( + "os" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestVerifyPasswordComplexity(t *testing.T) { + t.Parallel() + + tests := []struct { + password string + mustNotcontain string + errors []error + }{ + {"thispasswordislongenough", "", []error{}}, + {"exactlyrightlen1", "", []error{}}, + {"notlongenough", "", []error{errors.New("password is less than 16 characters long")}}, + {"whitespace in password is ok", "", []error{}}, + {"\t leading whitespace not ok", "", []error{utils.ErrWhitespace}}, + {"trailing whitespace not ok\n", "", []error{utils.ErrWhitespace}}, + {"contains bad string", "bad", []error{errors.New("password may not contain: \"bad\"")}}, + {"contains bAd string 2", "bad", []error{errors.New("password may not contain: \"bad\"")}}, + } + + for _, test := range tests { + test := test + + t.Run(test.password, func(t *testing.T) { + t.Parallel() + + var disallowedStrings []string + if test.mustNotcontain != "" { + disallowedStrings = []string{test.mustNotcontain} + } + err := utils.VerifyPasswordComplexity(test.password, disallowedStrings...) + if len(test.errors) == 0 { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.ErrorContains(t, err, utils.ErrMsgHeader) + for _, subErr := range test.errors { + assert.ErrorContains(t, err, subErr.Error()) + } + } + }) + } +} + +func TestPasswordFromFile(t *testing.T) { + t.Parallel() + + tests := []struct { + password string + err error + }{ + {"", utils.ErrEmptyPasswordInFile}, + {" has whitespace ", utils.ErrPasswordWhitespace}, + {"reasonable_password", nil}, + } + + for _, test := range tests { + test := test + t.Run(test.password, func(t *testing.T) { + t.Parallel() + + pwdFile, err := os.CreateTemp("", "") + assert.NoError(t, err) + defer os.Remove(pwdFile.Name()) + _, err = pwdFile.WriteString(test.password) + assert.NoError(t, err) + + pwd, err := utils.PasswordFromFile(pwdFile.Name()) + if test.err != nil { + assert.Error(t, err) + assert.ErrorContains(t, err, test.err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, pwd, test.password) + } + }) + } +} diff --git a/core/utils/plainhex.go b/core/utils/plainhex.go new file mode 100644 index 00000000..706b585d --- /dev/null +++ b/core/utils/plainhex.go @@ -0,0 +1,63 @@ +package utils + +import ( + "encoding/hex" + "encoding/json" + "reflect" + + "github.com/pkg/errors" +) + +// Similar to go-ethereum's hexutil.Bytes but does not assume a 0x prefix. + +// PlainHexBytes marshals/unmarshals as a JSON string without a 0x prefix. +// The empty slice marshals as "". +type PlainHexBytes []byte + +// MarshalText implements encoding.TextMarshaler +func (b PlainHexBytes) MarshalText() ([]byte, error) { + result := make([]byte, len(b)*2) + hex.Encode(result, b) + return result, nil +} + +func (b PlainHexBytes) String() string { + return hex.EncodeToString(b) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (b *PlainHexBytes) UnmarshalJSON(input []byte) (err error) { + if !isString(input) { + return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf((PlainHexBytes)(nil))} + } + err = b.UnmarshalText(input[1 : len(input)-1]) + return errors.Wrap(err, "UnmarshalJSON failed") +} + +func isString(input []byte) bool { + return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (b *PlainHexBytes) UnmarshalText(input []byte) error { + raw, err := checkText(input, true) + if err != nil { + return errors.Wrap(err, "UnmarshalText failed") + } + dec := make([]byte, len(raw)/2) + if _, err = hex.Decode(dec, raw); err != nil { + return errors.Wrap(err, "UnmarshalText failed") + } + *b = dec + return nil +} + +func checkText(input []byte, wantPrefix bool) ([]byte, error) { + if len(input) == 0 { + return nil, nil // empty strings are allowed + } + if len(input)%2 != 0 { + return nil, errors.New("odd length") + } + return input, nil +} diff --git a/core/utils/plainhex_test.go b/core/utils/plainhex_test.go new file mode 100644 index 00000000..6ba08149 --- /dev/null +++ b/core/utils/plainhex_test.go @@ -0,0 +1,105 @@ +package utils + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "testing" +) + +type marshalTest struct { + input interface{} + want string +} + +type unmarshalTest struct { + input string + want interface{} + wantErr string +} + +var ( + unmarshalBytesTests = []unmarshalTest{ + // invalid encoding + {input: "", wantErr: "unexpected end of JSON input"}, + {input: "null", wantErr: "json: cannot unmarshal non-string into Go value of type utils.PlainHexBytes"}, + {input: `"null"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: encoding/hex: invalid byte: U+006E 'n'"}, + {input: `"0x"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: encoding/hex: invalid byte: U+0078 'x'"}, + {input: `"0X"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: encoding/hex: invalid byte: U+0058 'X'"}, + {input: `"0"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: odd length"}, + {input: `"xx"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: encoding/hex: invalid byte: U+0078 'x'"}, + {input: `"01zz01"`, wantErr: "UnmarshalJSON failed: UnmarshalText failed: encoding/hex: invalid byte: U+007A 'z'"}, + + // valid encoding + {input: `""`, want: referenceBytes("")}, + {input: `"02"`, want: referenceBytes("02")}, + {input: `"ffffffffff"`, want: referenceBytes("ffffffffff")}, + { + input: `"ffffffffffffffffffffffffffffffffffff"`, + want: referenceBytes("ffffffffffffffffffffffffffffffffffff"), + }, + } + + encodeBytesTests = []marshalTest{ + {[]byte{}, ""}, + {[]byte{0}, "00"}, + {[]byte{0, 0, 1, 2}, "00000102"}, + } +) + +func TestUnmarshalBytes(t *testing.T) { + for _, test := range unmarshalBytesTests { + var v PlainHexBytes + err := json.Unmarshal([]byte(test.input), &v) + if !checkError(t, test.input, err, test.wantErr) { + continue + } + if !bytes.Equal(test.want.([]byte), v) { + t.Errorf("input %s: value mismatch: got %x, want %x", test.input, &v, test.want) + continue + } + } +} + +func TestMarshalBytes(t *testing.T) { + for _, test := range encodeBytesTests { + in := test.input.([]byte) + out, err := json.Marshal(PlainHexBytes(in)) + if err != nil { + t.Errorf("%x: %v", in, err) + continue + } + if want := `"` + test.want + `"`; string(out) != want { + t.Errorf("%x: MarshalJSON output mismatch: got %q, want %q", in, out, want) + continue + } + if out := PlainHexBytes(in).String(); out != test.want { + t.Errorf("%x: String mismatch: got %q, want %q", in, out, test.want) + continue + } + } +} + +func referenceBytes(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func checkError(t *testing.T, input string, got error, want string) bool { + if got == nil { + if want != "" { + t.Errorf("input %s: got no error, want %q", input, want) + return false + } + return true + } + if want == "" { + t.Errorf("input %s: unexpected error %q", input, got) + } else if got.Error() != want { + t.Errorf("input %s: got error %q, want %q", input, got, want) + } + return false +} diff --git a/core/utils/scrypt.go b/core/utils/scrypt.go new file mode 100644 index 00000000..5875a1f5 --- /dev/null +++ b/core/utils/scrypt.go @@ -0,0 +1,39 @@ +package utils + +import ( + "github.com/ethereum/go-ethereum/accounts/keystore" +) + +const ( + // FastN is a shorter N parameter for testing + FastN = 2 + // FastP is a shorter P parameter for testing + FastP = 1 +) + +type ( + // ScryptParams represents two integers, N and P. + ScryptParams struct{ N, P int } + // ScryptConfigReader can check for an insecure, fast flag + ScryptConfigReader interface { + InsecureFastScrypt() bool + } +) + +// DefaultScryptParams is for use in production. It used geth's standard level +// of encryption and is relatively expensive to decode. +// Avoid using this in tests. +var DefaultScryptParams = ScryptParams{N: keystore.StandardScryptN, P: keystore.StandardScryptP} + +// FastScryptParams is for use in tests, where you don't want to wear out your +// CPU with expensive key derivations, do not use it in production, or your +// encrypted keys will be easy to brute-force! +var FastScryptParams = ScryptParams{N: FastN, P: FastP} + +// GetScryptParams fetches ScryptParams from a ScryptConfigReader +func GetScryptParams(config ScryptConfigReader) ScryptParams { + if config.InsecureFastScrypt() { + return FastScryptParams + } + return DefaultScryptParams +} diff --git a/core/utils/stringutils/string_utils.go b/core/utils/stringutils/string_utils.go new file mode 100644 index 00000000..571f4acb --- /dev/null +++ b/core/utils/stringutils/string_utils.go @@ -0,0 +1,28 @@ +package stringutils + +import "strconv" + +// ToInt64 parses s as a base 10 int64. +func ToInt64(s string) (int64, error) { + return strconv.ParseInt(s, 10, 64) +} + +// FromInt64 formats n as a base 10 string. +func FromInt64(n int64) string { + return strconv.FormatInt(n, 10) +} + +// ToInt32 parses s as a base 10 int32. +func ToInt32(s string) (int32, error) { + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, err + } + + return int32(n), nil +} + +// FromInt32 formats n as a base 10 string. +func FromInt32(n int32) string { + return FromInt64(int64(n)) +} diff --git a/core/utils/stringutils/string_utils_test.go b/core/utils/stringutils/string_utils_test.go new file mode 100644 index 00000000..f4206609 --- /dev/null +++ b/core/utils/stringutils/string_utils_test.go @@ -0,0 +1,50 @@ +package stringutils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStringUtils_ToInt64(t *testing.T) { + t.Parallel() + + want := int64(12) + + got, err := ToInt64("12") + require.NoError(t, err) + + assert.Equal(t, want, got) +} + +func TestStringUtils_FromInt64(t *testing.T) { + t.Parallel() + + want := "12" + + got := FromInt64(int64(12)) + + assert.Equal(t, want, got) +} + +func TestStringUtils_ToInt32(t *testing.T) { + t.Parallel() + + want := int32(32) + + got, err := ToInt32("32") + require.NoError(t, err) + + assert.Equal(t, want, got) +} + +func TestStringUtils_FromInt32(t *testing.T) { + t.Parallel() + + want := "32" + + got := FromInt32(int32(32)) + + assert.Equal(t, want, got) +} diff --git a/core/utils/thread_control.go b/core/utils/thread_control.go new file mode 100644 index 00000000..e4ac89a6 --- /dev/null +++ b/core/utils/thread_control.go @@ -0,0 +1,46 @@ +package utils + +import ( + "context" + "sync" + + "github.com/goplugin/plugin-common/pkg/services" +) + +var _ ThreadControl = &threadControl{} + +// ThreadControl is a helper for managing a group of goroutines. +type ThreadControl interface { + // Go starts a goroutine and tracks the lifetime of the goroutine. + Go(fn func(context.Context)) + // Close cancels the goroutines and waits for all of them to exit. + Close() +} + +func NewThreadControl() *threadControl { + tc := &threadControl{ + stop: make(chan struct{}), + } + + return tc +} + +type threadControl struct { + threadsWG sync.WaitGroup + stop services.StopChan +} + +func (tc *threadControl) Go(fn func(context.Context)) { + tc.threadsWG.Add(1) + go func() { + defer tc.threadsWG.Done() + ctx, cancel := tc.stop.NewCtx() + defer cancel() + fn(ctx) + }() +} + +func (tc *threadControl) Close() { + close(tc.stop) + tc.threadsWG.Wait() +} diff --git a/core/utils/thread_control_test.go b/core/utils/thread_control_test.go new file mode 100644 index 00000000..9001ca72 --- /dev/null +++ b/core/utils/thread_control_test.go @@ -0,0 +1,27 @@ +package utils + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestThreadControl_Close(t *testing.T) { + n := 10 + tc := NewThreadControl() + + finished := atomic.Int32{} + + for i := 0; i < n; i++ { + tc.Go(func(ctx context.Context) { + <-ctx.Done() + finished.Add(1) + }) + } + + tc.Close() + + require.Equal(t, int32(n), finished.Load()) +} diff --git a/core/utils/tomlutils/toml.go b/core/utils/tomlutils/toml.go new file mode 100644 index 00000000..a55ad14e --- /dev/null +++ b/core/utils/tomlutils/toml.go @@ -0,0 +1,35 @@ +package tomlutils + +import ( + "strconv" +) + +// Float32 represents float32 values for TOML +type Float32 float32 + +// UnmarshalText parses the value as a proper float32 +func (t *Float32) UnmarshalText(text []byte) error { + f32, err := strconv.ParseFloat(string(text), 32) + if err != nil { + return err + } + + *t = Float32(f32) + + return nil +} + +// Float64 represents float64 values for TOML +type Float64 float64 + +// UnmarshalText parses the value as a proper float64 +func (t *Float64) UnmarshalText(text []byte) error { + f32, err := strconv.ParseFloat(string(text), 64) + if err != nil { + return err + } + + *t = Float64(f32) + + return nil +} diff --git a/core/utils/tomlutils/toml_test.go b/core/utils/tomlutils/toml_test.go new file mode 100644 index 00000000..ece71b03 --- /dev/null +++ b/core/utils/tomlutils/toml_test.go @@ -0,0 +1,71 @@ +package tomlutils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUtils_TomlFloat32_Success_Decimal(t *testing.T) { + t.Parallel() + + var tomlF32 Float32 + + err := tomlF32.UnmarshalText([]byte("0.23")) + + assert.Nil(t, err) + assert.Equal(t, tomlF32, Float32(0.23)) +} + +func TestUtils_TomlFloat32_Success_Integer(t *testing.T) { + t.Parallel() + + var tomlF32 Float32 + + err := tomlF32.UnmarshalText([]byte("13")) + + assert.Nil(t, err) + assert.Equal(t, tomlF32, Float32(13)) +} + +func TestUtils_TomlFloat32_Failure(t *testing.T) { + t.Parallel() + + var tomlF32 Float32 + + err := tomlF32.UnmarshalText([]byte("1s")) + + assert.NotNil(t, err) +} + +func TestUtils_TomlFloat64_Success_Decimal(t *testing.T) { + t.Parallel() + + var tomlF64 Float64 + + err := tomlF64.UnmarshalText([]byte("2.82")) + + assert.Nil(t, err) + assert.Equal(t, tomlF64, Float64(2.82)) +} + +func TestUtils_TomlFloat64_Success_Integer(t *testing.T) { + t.Parallel() + + var tomlF64 Float64 + + err := tomlF64.UnmarshalText([]byte("3")) + + assert.Nil(t, err) + assert.Equal(t, tomlF64, Float64(3)) +} + +func TestUtils_TomlFloat64_Failure(t *testing.T) { + t.Parallel() + + var tomlF64 Float64 + + err := tomlF64.UnmarshalText([]byte("1s")) + + assert.NotNil(t, err) +} diff --git a/core/utils/utils.go b/core/utils/utils.go new file mode 100644 index 00000000..879740d7 --- /dev/null +++ b/core/utils/utils.go @@ -0,0 +1,597 @@ +// Package utils is used for common functions and tools used across the codebase. +package utils + +import ( + "bytes" + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "math" + mrand "math/rand" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + "github.com/jpillora/backoff" + pkgerrors "github.com/pkg/errors" + "github.com/robfig/cron/v3" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/sha3" + + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + + "github.com/goplugin/plugin-common/pkg/services" +) + +// DefaultSecretSize is the entropy in bytes to generate a base64 string of 64 characters. +const DefaultSecretSize = 48 + +func MustNewPeerID() string { + pubKey, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + peerID, err := ragep2ptypes.PeerIDFromPublicKey(pubKey) + if err != nil { + panic(err) + } + return peerID.String() +} + +// ISO8601UTC formats given time to ISO8601. +func ISO8601UTC(t time.Time) string { + return t.UTC().Format(time.RFC3339) +} + +// DurationFromNow returns the amount of time since the Time +// field was last updated. +func DurationFromNow(t time.Time) time.Duration { + return time.Until(t) +} + +// FormatJSON applies indent to format a JSON response. +func FormatJSON(v interface{}) ([]byte, error) { + return json.MarshalIndent(v, "", " ") +} + +// NewBytes32ID returns a randomly generated UUID that conforms to +// Ethereum bytes32. +func NewBytes32ID() string { + return strings.ReplaceAll(uuid.New().String(), "-", "") +} + +// NewSecret returns a new securely random sequence of n bytes of entropy. The +// result is a base64 encoded string. +// +// Panics on failed attempts to read from system's PRNG. +func NewSecret(n int) string { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + panic(pkgerrors.Wrap(err, "generating secret failed")) + } + return base64.StdEncoding.EncodeToString(b) +} + +// StringToHex converts a standard string to a hex encoded string. +func StringToHex(in string) string { + return AddHexPrefix(hex.EncodeToString([]byte(in))) +} + +// AddHexPrefix adds the prefix (0x) to a given hex string. +func AddHexPrefix(str string) string { + if len(str) < 2 || len(str) > 1 && strings.ToLower(str[0:2]) != "0x" { + str = "0x" + str + } + return str +} + +// IsEmpty returns true if bytes contains only zero values, or has len 0. +func IsEmpty(bytes []byte) bool { + for _, b := range bytes { + if b != 0 { + return false + } + } + return true +} + +// UnmarshalToMap takes an input json string and returns a map[string]interface i.e. a raw object +func UnmarshalToMap(input string) (map[string]interface{}, error) { + var output map[string]interface{} + err := json.Unmarshal([]byte(input), &output) + return output, err +} + +// MustUnmarshalToMap performs UnmarshalToMap, panics upon failure +func MustUnmarshalToMap(input string) map[string]interface{} { + output, err := UnmarshalToMap(input) + if err != nil { + panic(err) + } + return output +} + +// HashPassword wraps around bcrypt.GenerateFromPassword for a friendlier API. +func HashPassword(password string) (string, error) { + bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + return string(bytes), err +} + +// CheckPasswordHash wraps around bcrypt.CompareHashAndPassword for a friendlier API. +func CheckPasswordHash(password, hash string) bool { + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err == nil +} + +// Sha256 returns a hexadecimal encoded string of a hashed input +func Sha256(in string) (string, error) { + hasher := sha3.New256() + _, err := hasher.Write([]byte(in)) + if err != nil { + return "", pkgerrors.Wrap(err, "sha256 write error") + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} + +// WithCloseChan wraps a context so that it is canceled if the passed in channel is closed. +// Deprecated: Call [services.StopChan.Ctx] directly +func WithCloseChan(parentCtx context.Context, chStop chan struct{}) (context.Context, context.CancelFunc) { + return services.StopChan(chStop).Ctx(parentCtx) +} + +// ContextFromChan creates a context that finishes when the provided channel receives or is closed. +// Deprecated: Call [services.StopChan.NewCtx] directly. +func ContextFromChan(chStop chan struct{}) (context.Context, context.CancelFunc) { + return services.StopChan(chStop).NewCtx() +} + +// ContextFromChanWithTimeout creates a context with a timeout that finishes when the provided channel receives or is closed. +// Deprecated: Call [services.StopChan.CtxCancel] directly +func ContextFromChanWithTimeout(chStop chan struct{}, timeout time.Duration) (context.Context, context.CancelFunc) { + return services.StopChan(chStop).CtxCancel(context.WithTimeout(context.Background(), timeout)) +} + +// Deprecated: use services.StopChan +type StopChan = services.StopChan + +// Deprecated: use services.StopRChan +type StopRChan = services.StopRChan + +// BoundedQueue is a FIFO queue that discards older items when it reaches its capacity. +type BoundedQueue[T any] struct { + capacity int + items []T + mu sync.RWMutex +} + +// NewBoundedQueue creates a new BoundedQueue instance +func NewBoundedQueue[T any](capacity int) *BoundedQueue[T] { + var bq BoundedQueue[T] + bq.capacity = capacity + return &bq +} + +// Add appends items to a BoundedQueue +func (q *BoundedQueue[T]) Add(x T) { + q.mu.Lock() + defer q.mu.Unlock() + q.items = append(q.items, x) + if len(q.items) > q.capacity { + excess := len(q.items) - q.capacity + q.items = q.items[excess:] + } +} + +// Take pulls the first item from the array and removes it +func (q *BoundedQueue[T]) Take() (t T) { + q.mu.Lock() + defer q.mu.Unlock() + if len(q.items) == 0 { + return + } + t = q.items[0] + q.items = q.items[1:] + return +} + +// Empty check is a BoundedQueue is empty +func (q *BoundedQueue[T]) Empty() bool { + q.mu.RLock() + defer q.mu.RUnlock() + return len(q.items) == 0 +} + +// Full checks if a BoundedQueue is over capacity. +func (q *BoundedQueue[T]) Full() bool { + q.mu.RLock() + defer q.mu.RUnlock() + return len(q.items) >= q.capacity +} + +// BoundedPriorityQueue stores a series of BoundedQueues +// with associated priorities and capacities +type BoundedPriorityQueue[T any] struct { + queues map[uint]*BoundedQueue[T] + priorities []uint + capacities map[uint]int + mu sync.RWMutex +} + +// NewBoundedPriorityQueue creates a new BoundedPriorityQueue +func NewBoundedPriorityQueue[T any](capacities map[uint]int) *BoundedPriorityQueue[T] { + queues := make(map[uint]*BoundedQueue[T]) + var priorities []uint + for priority, capacity := range capacities { + priorities = append(priorities, priority) + queues[priority] = NewBoundedQueue[T](capacity) + } + sort.Slice(priorities, func(i, j int) bool { return priorities[i] < priorities[j] }) + bpq := BoundedPriorityQueue[T]{ + queues: queues, + priorities: priorities, + capacities: capacities, + } + return &bpq +} + +// Add pushes an item into a subque within a BoundedPriorityQueue +func (q *BoundedPriorityQueue[T]) Add(priority uint, x T) { + q.mu.Lock() + defer q.mu.Unlock() + + subqueue, exists := q.queues[priority] + if !exists { + panic(fmt.Sprintf("nonexistent priority: %v", priority)) + } + + subqueue.Add(x) +} + +// Take takes from the BoundedPriorityQueue's subque +func (q *BoundedPriorityQueue[T]) Take() (t T) { + q.mu.Lock() + defer q.mu.Unlock() + + for _, priority := range q.priorities { + queue := q.queues[priority] + if queue.Empty() { + continue + } + return queue.Take() + } + return +} + +// Empty checks the BoundedPriorityQueue +// if all subqueues are empty +func (q *BoundedPriorityQueue[T]) Empty() bool { + q.mu.RLock() + defer q.mu.RUnlock() + + for _, priority := range q.priorities { + queue := q.queues[priority] + if !queue.Empty() { + return false + } + } + return true +} + +// TickerBase is an interface for pausable tickers. +type TickerBase interface { + Resume() + Pause() + Destroy() + Ticks() <-chan time.Time +} + +// PausableTicker stores a ticker with a duration +type PausableTicker struct { + ticker *time.Ticker + duration time.Duration + mu *sync.RWMutex +} + +// NewPausableTicker creates a new PausableTicker +func NewPausableTicker(duration time.Duration) PausableTicker { + return PausableTicker{ + duration: duration, + mu: &sync.RWMutex{}, + } +} + +// Ticks retrieves the ticks from a PausableTicker +func (t *PausableTicker) Ticks() <-chan time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if t.ticker == nil { + return nil + } + return t.ticker.C +} + +// Pause pauses a PausableTicker +func (t *PausableTicker) Pause() { + t.mu.Lock() + defer t.mu.Unlock() + if t.ticker != nil { + t.ticker.Stop() + t.ticker = nil + } +} + +// Resume resumes a Ticker +// using a PausibleTicker's duration +func (t *PausableTicker) Resume() { + t.mu.Lock() + defer t.mu.Unlock() + if t.ticker == nil { + t.ticker = time.NewTicker(t.duration) + } +} + +// Destroy pauses the PausibleTicker +func (t *PausableTicker) Destroy() { + t.Pause() +} + +// CronTicker is like a time.Ticker but for a cron schedule. +type CronTicker struct { + *cron.Cron + ch chan time.Time + beenRun atomic.Bool +} + +// NewCronTicker returns a new CrontTicker for the given schedule. +func NewCronTicker(schedule string) (CronTicker, error) { + cron := cron.New(cron.WithSeconds()) + ch := make(chan time.Time, 1) + _, err := cron.AddFunc(schedule, func() { + select { + case ch <- time.Now(): + default: + } + }) + if err != nil { + return CronTicker{}, err + } + return CronTicker{Cron: cron, ch: ch}, nil +} + +// Start - returns true if the CronTicker was actually started, false otherwise +func (t *CronTicker) Start() bool { + if t.Cron != nil { + if t.beenRun.CompareAndSwap(false, true) { + t.Cron.Start() + return true + } + } + return false +} + +// Stop - returns true if the CronTicker was actually stopped, false otherwise +func (t *CronTicker) Stop() bool { + if t.Cron != nil { + if t.beenRun.CompareAndSwap(true, false) { + t.Cron.Stop() + return true + } + } + return false +} + +// Ticks returns the underlying chanel. +func (t *CronTicker) Ticks() <-chan time.Time { + return t.ch +} + +// ValidateCronSchedule returns an error if the given schedule is invalid. +func ValidateCronSchedule(schedule string) error { + if !(strings.HasPrefix(schedule, "CRON_TZ=") || strings.HasPrefix(schedule, "@every ")) { + return errors.New("cron schedule must specify a time zone using CRON_TZ, e.g. 'CRON_TZ=UTC 5 * * * *', or use the @every syntax, e.g. '@every 1h30m'") + } + parser := cron.NewParser(cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + _, err := parser.Parse(schedule) + return pkgerrors.Wrapf(err, "invalid cron schedule '%v'", schedule) +} + +// ResettableTimer stores a timer +type ResettableTimer struct { + timer *time.Timer + mu *sync.RWMutex +} + +// NewResettableTimer creates a new ResettableTimer +func NewResettableTimer() ResettableTimer { + return ResettableTimer{ + mu: &sync.RWMutex{}, + } +} + +// Ticks retrieves the ticks from a ResettableTimer +func (t *ResettableTimer) Ticks() <-chan time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if t.timer == nil { + return nil + } + return t.timer.C +} + +// Stop stops a ResettableTimer +func (t *ResettableTimer) Stop() { + t.mu.Lock() + defer t.mu.Unlock() + if t.timer != nil { + t.timer.Stop() + t.timer = nil + } +} + +// Reset stops a ResettableTimer +// and resets it with a new duration +func (t *ResettableTimer) Reset(duration time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + if t.timer != nil { + t.timer.Stop() + } + t.timer = time.NewTimer(duration) +} + +var ( + ErrAlreadyStopped = errors.New("already stopped") + ErrCannotStopUnstarted = errors.New("cannot stop unstarted service") +) + +// StartStopOnce contains a StartStopOnceState integer +// Deprecated: use services.StateMachine +type StartStopOnce = services.StateMachine + +// WithJitter adds +/- 10% to a duration +func WithJitter(d time.Duration) time.Duration { + // #nosec + if d == 0 { + return 0 + } + // ensure non-zero arg to Intn to avoid panic + max := math.Max(float64(d.Abs())/5.0, 1.) + // #nosec - non critical randomness + jitter := mrand.Intn(int(max)) + jitter = jitter - (jitter / 2) + return time.Duration(int(d) + jitter) +} + +// NewRedialBackoff is a standard backoff to use for redialling or reconnecting to +// unreachable network endpoints +func NewRedialBackoff() backoff.Backoff { + return backoff.Backoff{ + Min: 1 * time.Second, + Max: 15 * time.Second, + Jitter: true, + } + +} + +// KeyedMutex allows to lock based on particular values +type KeyedMutex struct { + mutexes sync.Map +} + +// LockInt64 locks the value for read/write +func (m *KeyedMutex) LockInt64(key int64) func() { + value, _ := m.mutexes.LoadOrStore(key, new(sync.Mutex)) + mtx := value.(*sync.Mutex) + mtx.Lock() + + return mtx.Unlock +} + +// BoxOutput formats its arguments as fmt.Printf, and encloses them in a box of +// arrows pointing at their content, in order to better highlight it. See +// ExampleBoxOutput +func BoxOutput(errorMsgTemplate string, errorMsgValues ...interface{}) string { + errorMsgTemplate = fmt.Sprintf(errorMsgTemplate, errorMsgValues...) + lines := strings.Split(errorMsgTemplate, "\n") + maxlen := 0 + for _, line := range lines { + if len(line) > maxlen { + maxlen = len(line) + } + } + internalLength := maxlen + 4 + output := "↘" + strings.Repeat("↓", internalLength) + "↙\n" // top line + output += "→ " + strings.Repeat(" ", maxlen) + " ←\n" + readme := strings.Repeat("README ", maxlen/7) + output += "→ " + readme + strings.Repeat(" ", maxlen-len(readme)) + " ←\n" + output += "→ " + strings.Repeat(" ", maxlen) + " ←\n" + for _, line := range lines { + output += "→ " + line + strings.Repeat(" ", maxlen-len(line)) + " ←\n" + } + output += "→ " + strings.Repeat(" ", maxlen) + " ←\n" + output += "→ " + readme + strings.Repeat(" ", maxlen-len(readme)) + " ←\n" + output += "→ " + strings.Repeat(" ", maxlen) + " ←\n" + return "\n" + output + "↗" + strings.Repeat("↑", internalLength) + "↖" + // bottom line + "\n\n" +} + +// ConcatBytes appends a bunch of byte arrays into a single byte array +func ConcatBytes(bufs ...[]byte) []byte { + return bytes.Join(bufs, []byte{}) +} + +func LeftPadBitString(input string, length int) string { + if len(input) >= length { + return input + } + return strings.Repeat("0", length-len(input)) + input +} + +// ErrorBuffer uses joinedErrors interface to join multiple errors into a single error. +// This is useful to track the most recent N errors in a service and flush them as a single error. +type ErrorBuffer struct { + // buffer is a slice of errors + buffer []error + + // cap is the maximum number of errors that the buffer can hold. + // Exceeding the cap results in discarding the oldest error + cap int + + mu sync.RWMutex +} + +func (eb *ErrorBuffer) Flush() (err error) { + eb.mu.RLock() + defer eb.mu.RUnlock() + err = errors.Join(eb.buffer...) + eb.buffer = nil + return +} + +func (eb *ErrorBuffer) Append(incoming error) { + eb.mu.Lock() + defer eb.mu.Unlock() + + if len(eb.buffer) == eb.cap && eb.cap != 0 { + eb.buffer = append(eb.buffer[1:], incoming) + return + } + eb.buffer = append(eb.buffer, incoming) +} + +func (eb *ErrorBuffer) SetCap(cap int) { + eb.mu.Lock() + defer eb.mu.Unlock() + if len(eb.buffer) > cap { + eb.buffer = eb.buffer[len(eb.buffer)-cap:] + } + eb.cap = cap +} + +// UnwrapError returns a list of underlying errors if passed error implements joinedError or return the err in a single-element list otherwise. +// +//nolint:errorlint // error type checks will fail on wrapped errors. Disabled since we are not doing checks on error types. +func UnwrapError(err error) []error { + joined, ok := err.(interface{ Unwrap() []error }) + if !ok { + return []error{err} + } + return joined.Unwrap() +} + +// DeleteUnstable destructively removes slice element at index i +// It does no bounds checking and may re-order the slice +func DeleteUnstable[T any](s []T, i int) []T { + s[i] = s[len(s)-1] + s = s[:len(s)-1] + return s +} diff --git a/core/utils/utils_test.go b/core/utils/utils_test.go new file mode 100644 index 00000000..aeab6791 --- /dev/null +++ b/core/utils/utils_test.go @@ -0,0 +1,553 @@ +package utils_test + +import ( + "context" + "encoding/hex" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUtils_NewBytes32ID(t *testing.T) { + t.Parallel() + + id := utils.NewBytes32ID() + assert.NotContains(t, id, "-") +} + +func TestUtils_NewSecret(t *testing.T) { + t.Parallel() + + tests := []struct { + numOfBytes int + wantStrLen int + }{ + {12, 16}, {24, 32}, {48, 64}, {96, 128}, + } + for _, test := range tests { + test := test + + t.Run(fmt.Sprintf("%d_%d", test.numOfBytes, test.wantStrLen), func(t *testing.T) { + t.Parallel() + + secret := utils.NewSecret(test.numOfBytes) + assert.Equal(t, test.wantStrLen, len(secret)) + }) + } +} + +func TestUtils_StringToHex(t *testing.T) { + t.Parallel() + + tests := []struct { + utf8 string + hex string + }{ + {"abc", "0x616263"}, + {"Hi Mom!", "0x4869204d6f6d21"}, + {"", "0x"}, + } + + for _, test := range tests { + test := test + + t.Run(test.utf8, func(t *testing.T) { + t.Parallel() + + assert.Equal(t, test.hex, utils.StringToHex(test.utf8)) + }) + } +} + +func TestUtils_DurationFromNow(t *testing.T) { + t.Parallel() + + future := time.Now().Add(time.Second) + duration := utils.DurationFromNow(future) + assert.True(t, 0 < duration) +} + +func TestBoundedQueue(t *testing.T) { + t.Parallel() + + q := utils.NewBoundedQueue[int](3) + require.True(t, q.Empty()) + require.False(t, q.Full()) + + q.Add(1) + require.False(t, q.Empty()) + require.False(t, q.Full()) + + x := q.Take() + require.Equal(t, 1, x) + + require.Zero(t, q.Take()) + require.True(t, q.Empty()) + require.False(t, q.Full()) + + q.Add(1) + q.Add(2) + q.Add(3) + q.Add(4) + require.True(t, q.Full()) + + x = q.Take() + require.Equal(t, 2, x) + require.False(t, q.Empty()) + require.False(t, q.Full()) + + x = q.Take() + require.Equal(t, 3, x) + require.False(t, q.Empty()) + require.False(t, q.Full()) + + x = q.Take() + require.Equal(t, 4, x) + require.True(t, q.Empty()) + require.False(t, q.Full()) +} + +func TestBoundedPriorityQueue(t *testing.T) { + t.Parallel() + + q := utils.NewBoundedPriorityQueue[int](map[uint]int{ + 1: 3, + 2: 1, + }) + require.True(t, q.Empty()) + + q.Add(1, 1) + require.False(t, q.Empty()) + + x := q.Take() + require.Equal(t, 1, x) + require.True(t, q.Empty()) + + require.Zero(t, q.Take()) + require.True(t, q.Empty()) + + q.Add(2, 1) + q.Add(1, 2) + q.Add(1, 3) + q.Add(1, 4) + + x = q.Take() + require.Equal(t, 2, x) + require.False(t, q.Empty()) + + x = q.Take() + require.Equal(t, 3, x) + require.False(t, q.Empty()) + + x = q.Take() + require.Equal(t, 4, x) + require.False(t, q.Empty()) + + x = q.Take() + require.Equal(t, 1, x) + require.True(t, q.Empty()) + + require.Zero(t, q.Take()) + + q.Add(2, 1) + q.Add(2, 2) + + x = q.Take() + require.Equal(t, 2, x) + require.True(t, q.Empty()) + + require.Zero(t, q.Take()) +} + +func Test_WithJitter(t *testing.T) { + t.Parallel() + + d := 10 * time.Second + + for i := 0; i < 32; i++ { + r := utils.WithJitter(d) + require.GreaterOrEqual(t, int(r), int(9*time.Second)) + require.LessOrEqual(t, int(r), int(11*time.Second)) + } +} + +func TestIsEmpty(t *testing.T) { + t.Parallel() + + b := make([]byte, 32) + assert.True(t, utils.IsEmpty(b)) + + b[10] = 1 + assert.False(t, utils.IsEmpty(b)) +} + +func TestHashPassword(t *testing.T) { + t.Parallel() + + h, err := utils.HashPassword("Qwerty123!") + assert.NoError(t, err) + assert.NotEmpty(t, h) + + ok := utils.CheckPasswordHash("Qwerty123!", h) + assert.True(t, ok) + + ok = utils.CheckPasswordHash("God", h) + assert.False(t, ok) +} + +func TestBoxOutput(t *testing.T) { + t.Parallel() + + output := utils.BoxOutput("some error %d %s", 123, "foo") + const expected = "\n" + + "↘↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↙\n" + + "→ ←\n" + + "→ README README ←\n" + + "→ ←\n" + + "→ some error 123 foo ←\n" + + "→ ←\n" + + "→ README README ←\n" + + "→ ←\n" + + "↗↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↖\n" + + "\n" + assert.Equal(t, expected, output) +} + +func TestISO8601UTC(t *testing.T) { + t.Parallel() + + ts := time.Unix(1651818206, 0) + str := utils.ISO8601UTC(ts) + assert.Equal(t, "2022-05-06T06:23:26Z", str) +} + +func TestFormatJSON(t *testing.T) { + t.Parallel() + + json := `{"foo":123}` + formatted, err := utils.FormatJSON(json) + assert.NoError(t, err) + assert.Equal(t, "\"{\\\"foo\\\":123}\"", string(formatted)) +} + +func TestMustUnmarshalToMap(t *testing.T) { + t.Parallel() + + json := `{"foo":123.45}` + expected := make(map[string]interface{}) + expected["foo"] = 123.45 + m := utils.MustUnmarshalToMap(json) + assert.Equal(t, expected, m) + + assert.Panics(t, func() { + utils.MustUnmarshalToMap("") + }) + + assert.Panics(t, func() { + utils.MustUnmarshalToMap("123") + }) +} + +func TestSha256(t *testing.T) { + t.Parallel() + + hexHash, err := utils.Sha256("test") + assert.NoError(t, err) + + hash, err := hex.DecodeString(hexHash) + assert.NoError(t, err) + assert.Len(t, hash, 32) +} + +func TestWithCloseChan(t *testing.T) { + t.Parallel() + + assertCtxCancelled := func(ctx context.Context, t *testing.T) { + select { + case <-ctx.Done(): + case <-time.After(testutils.WaitTimeout(t)): + assert.FailNow(t, "context was not cancelled") + } + } + + t.Run("closing channel", func(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + ctx, cancel := utils.WithCloseChan(testutils.Context(t), ch) + defer cancel() + + close(ch) + + assertCtxCancelled(ctx, t) + }) + + t.Run("cancelling ctx", func(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + defer close(ch) + ctx, cancel := utils.WithCloseChan(testutils.Context(t), ch) + cancel() + + assertCtxCancelled(ctx, t) + }) + + t.Run("cancelling parent ctx", func(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + defer close(ch) + pctx, pcancel := context.WithCancel(testutils.Context(t)) + ctx, cancel := utils.WithCloseChan(pctx, ch) + defer cancel() + + pcancel() + + assertCtxCancelled(ctx, t) + }) +} + +func TestContextFromChan(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + ctx, cancel := utils.ContextFromChan(ch) + defer cancel() + + close(ch) + + select { + case <-ctx.Done(): + case <-time.After(testutils.WaitTimeout(t)): + assert.FailNow(t, "context was not cancelled") + } +} + +func TestContextFromChanWithTimeout(t *testing.T) { + t.Parallel() + + assertCtxCancelled := func(ctx context.Context, t *testing.T) { + select { + case <-ctx.Done(): + case <-time.After(testutils.WaitTimeout(t)): + assert.FailNow(t, "context was not cancelled") + } + } + + t.Run("small deadline", func(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + ctx, cancel := utils.ContextFromChanWithTimeout(ch, testutils.TestInterval) + defer cancel() + + assertCtxCancelled(ctx, t) + }) + + t.Run("stopped", func(t *testing.T) { + t.Parallel() + + ch := make(chan struct{}) + ctx, cancel := utils.ContextFromChanWithTimeout(ch, testutils.WaitTimeout(t)) + defer cancel() + + ch <- struct{}{} + + assertCtxCancelled(ctx, t) + }) +} + +func TestLeftPadBitString(t *testing.T) { + t.Parallel() + + for _, test := range []struct { + str string + len int + expected string + }{ + {"abc", 10, "0000000abc"}, + {"abc", 0, "abc"}, + {"abc", 2, "abc"}, + {"abc", 3, "abc"}, + {"abc", -10, "abc"}, + } { + s := utils.LeftPadBitString(test.str, test.len) + assert.Equal(t, test.expected, s) + } +} + +func TestKeyedMutex(t *testing.T) { + t.Parallel() + + var km utils.KeyedMutex + unlock1 := km.LockInt64(1) + unlock2 := km.LockInt64(2) + + awaiter := cltest.NewAwaiter() + go func() { + km.LockInt64(1)() + km.LockInt64(2)() + awaiter.ItHappened() + }() + + unlock2() + unlock1() + awaiter.AwaitOrFail(t) +} + +func TestValidateCronSchedule(t *testing.T) { + t.Parallel() + + err := utils.ValidateCronSchedule("") + assert.Error(t, err) + + err = utils.ValidateCronSchedule("CRON_TZ=UTC 5 * * * *") + assert.NoError(t, err) + + err = utils.ValidateCronSchedule("@every 1h30m") + assert.NoError(t, err) + + err = utils.ValidateCronSchedule("@every xyz") + assert.Error(t, err) +} + +func TestPausableTicker(t *testing.T) { + t.Parallel() + + var counter atomic.Int32 + + pt := utils.NewPausableTicker(testutils.TestInterval) + assert.Nil(t, pt.Ticks()) + defer pt.Destroy() + + followNTicks := func(n int32, awaiter cltest.Awaiter) { + for range pt.Ticks() { + if counter.Add(1) == n { + awaiter.ItHappened() + } + } + } + + pt.Resume() + + wait10 := cltest.NewAwaiter() + go followNTicks(10, wait10) + + wait10.AwaitOrFail(t) + + pt.Pause() + time.Sleep(10 * testutils.TestInterval) + assert.Less(t, counter.Load(), int32(20)) + pt.Resume() + + wait20 := cltest.NewAwaiter() + go followNTicks(20, wait20) + + wait20.AwaitOrFail(t) +} + +func TestCronTicker(t *testing.T) { + t.Parallel() + + var counter atomic.Int32 + + ct, err := utils.NewCronTicker("@every 100ms") + assert.NoError(t, err) + + awaiter := cltest.NewAwaiter() + + go func() { + for range ct.Ticks() { + if counter.Add(1) == 2 { + awaiter.ItHappened() + } + } + }() + + assert.True(t, ct.Start()) + assert.True(t, ct.Stop()) + assert.Zero(t, counter.Load()) + + assert.True(t, ct.Start()) + + awaiter.AwaitOrFail(t) + + assert.True(t, ct.Stop()) + c := counter.Load() + time.Sleep(1 * time.Second) + assert.Equal(t, c, counter.Load()) +} + +func TestErrorBuffer(t *testing.T) { + t.Parallel() + + err1 := errors.New("err1") + err2 := errors.New("err2") + err3 := errors.New("err3") + + t.Run("happy path", func(t *testing.T) { + t.Parallel() + buff := utils.ErrorBuffer{} + buff.Append(err1) + buff.Append(err2) + combined := buff.Flush() + errs := utils.UnwrapError(combined) + assert.Equal(t, 2, len(errs)) + assert.Equal(t, err1.Error(), errs[0].Error()) + assert.Equal(t, err2.Error(), errs[1].Error()) + }) + + t.Run("ovewrite oldest error when cap exceeded", func(t *testing.T) { + t.Parallel() + buff := utils.ErrorBuffer{} + buff.SetCap(2) + buff.Append(err1) + buff.Append(err2) + buff.Append(err3) + combined := buff.Flush() + errs := utils.UnwrapError(combined) + assert.Equal(t, 2, len(errs)) + assert.Equal(t, err2.Error(), errs[0].Error()) + assert.Equal(t, err3.Error(), errs[1].Error()) + }) + + t.Run("does not overwrite the buffer if cap == 0", func(t *testing.T) { + t.Parallel() + buff := utils.ErrorBuffer{} + for i := 1; i <= 20; i++ { + buff.Append(errors.Errorf("err#%d", i)) + } + + combined := buff.Flush() + errs := utils.UnwrapError(combined) + assert.Equal(t, 20, len(errs)) + assert.Equal(t, "err#20", errs[19].Error()) + }) + + t.Run("UnwrapError returns the a single element err array if passed err is not a joinedError", func(t *testing.T) { + t.Parallel() + errs := utils.UnwrapError(err1) + assert.Equal(t, 1, len(errs)) + assert.Equal(t, err1.Error(), errs[0].Error()) + }) + + t.Run("flushing an empty err buffer is a nil error", func(t *testing.T) { + t.Parallel() + buff := utils.ErrorBuffer{} + + combined := buff.Flush() + require.Nil(t, combined) + }) + +} diff --git a/core/web/api.go b/core/web/api.go new file mode 100644 index 00000000..1f97d59c --- /dev/null +++ b/core/web/api.go @@ -0,0 +1,144 @@ +package web + +import ( + "encoding/json" + "fmt" + "net/url" + "strconv" + + "github.com/manyminds/api2go/jsonapi" + "github.com/pkg/errors" +) + +const ( + // PaginationDefault is the number of records to supply from a paginated + // request when no size param is supplied. + PaginationDefault = 25 + + // MediaType is the response header for JSONAPI documents. + MediaType = "application/vnd.api+json" + + // KeyNextLink is the name of the key that contains the HREF for the next + // document in a paginated response. + KeyNextLink = "next" + // KeyPreviousLink is the name of the key that contains the HREF for the + // previous document in a paginated response. + KeyPreviousLink = "prev" +) + +// ParsePaginatedRequest parses the parameters that control pagination for a +// collection request, returning the size and offset if specified, or a +// sensible default. +func ParsePaginatedRequest(sizeParam, pageParam string) (int, int, int, error) { + var err error + page := 1 + size := PaginationDefault + + if sizeParam != "" { + if size, err = strconv.Atoi(sizeParam); err != nil || size < 1 { + return 0, 0, 0, fmt.Errorf("invalid size param, error: %+v", err) + } + } + + if pageParam != "" { + if page, err = strconv.Atoi(pageParam); err != nil || page < 1 { + return 0, 0, 0, fmt.Errorf("invalid page param, error: %+v", err) + } + } + + offset := (page - 1) * size + return size, page, offset, nil +} + +func paginationLink(url url.URL, size, page int) jsonapi.Link { + query := url.Query() + query.Set("size", strconv.Itoa(size)) + query.Set("page", strconv.Itoa(page)) + url.RawQuery = query.Encode() + return jsonapi.Link{Href: url.String()} +} + +func nextLink(url url.URL, size, page int) jsonapi.Link { + return paginationLink(url, size, page+1) +} + +func prevLink(url url.URL, size, page int) jsonapi.Link { + return paginationLink(url, size, page-1) +} + +// NewJSONAPIResponse returns a JSONAPI response for a single resource. +func NewJSONAPIResponse(resource interface{}) ([]byte, error) { + document, err := jsonapi.MarshalToStruct(resource, nil) + if err != nil { + return nil, fmt.Errorf("failed to marshal resource to struct: %+v", err) + } + + return json.Marshal(document) +} + +// NewPaginatedResponse returns a jsonapi.Document with links to next and previous collection pages +func NewPaginatedResponse(url url.URL, size, page, count int, resource interface{}) ([]byte, error) { + document, err := getPaginatedResponseDoc(url, size, page, count, resource) + if err != nil { + return nil, err + } + return json.Marshal(document) +} + +func getPaginatedResponseDoc(url url.URL, size, page, count int, resource interface{}) (*jsonapi.Document, error) { + document, err := jsonapi.MarshalToStruct(resource, nil) + if err != nil { + return nil, fmt.Errorf("failed to marshal resource to struct: %+v", err) + } + + document.Meta = make(jsonapi.Meta) + document.Meta["count"] = count + + document.Links = make(jsonapi.Links) + if count > size { + if page*size < count { + document.Links[KeyNextLink] = nextLink(url, size, page) + } + if page > 1 { + document.Links[KeyPreviousLink] = prevLink(url, size, page) + } + } + return document, nil +} + +// ParsePaginatedResponse parse a JSONAPI response for a document with links +func ParsePaginatedResponse(input []byte, resource interface{}, links *jsonapi.Links) error { + document := jsonapi.Document{} + err := parsePaginatedResponseToDocument(input, resource, &document) + if err != nil { + return err + } + *links = document.Links + return nil +} + +func parsePaginatedResponseToDocument(input []byte, resource interface{}, document *jsonapi.Document) error { + err := ParseJSONAPIResponse(input, resource) + if err != nil { + return errors.Wrap(err, "ParseJSONAPIResponse error") + } + + // Unmarshal using the stdlib Unmarshal to extract the links part of the document + err = json.Unmarshal(input, &document) + if err != nil { + return fmt.Errorf("unable to unmarshal links: %+v", err) + } + return nil +} + +// ParseJSONAPIResponse parses the bytes of the root document and unmarshals it +// into the given resource. +func ParseJSONAPIResponse(input []byte, resource interface{}) error { + // as is api2go will discard the links + err := jsonapi.Unmarshal(input, resource) + if err != nil { + return fmt.Errorf("web: unable to unmarshal data of type %T, %+v", resource, err) + } + + return nil +} diff --git a/core/web/api_test.go b/core/web/api_test.go new file mode 100644 index 00000000..1e414ccd --- /dev/null +++ b/core/web/api_test.go @@ -0,0 +1,163 @@ +package web + +import ( + "net/url" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" +) + +func TestApi_ParsePaginatedRequest(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sizeParam string + pageParam string + err bool + size int + page int + offset int + }{ + {"blank values", "", "", false, 25, 1, 0}, + {"valid sizeParam", "10", "", false, 10, 1, 0}, + {"valid pageParam", "", "3", false, 25, 3, 50}, + {"invalid sizeParam", "xhje", "", true, 0, 0, 0}, + {"invalid pageParam", "", "ewjh", true, 0, 0, 0}, + {"small sizeParam", "0", "", true, 0, 0, 0}, + {"negative pageParam", "", "-1", true, 0, 0, 0}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + size, page, offset, err := ParsePaginatedRequest(test.sizeParam, test.pageParam) + if test.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.size, size) + assert.Equal(t, test.page, page) + assert.Equal(t, test.offset, offset) + }) + } +} + +type TestResource struct { + Title string +} + +func (r TestResource) GetID() string { + return "1" +} + +func (r *TestResource) SetID(value string) error { + return nil +} + +func TestApi_NewPaginatedResponse(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + path string + size int + page int + count int + resource interface{} + err bool + output string + }{ + { + "a single resource", + "/v2/index", 1, 0, 0, TestResource{Title: "Item"}, + false, `{"data":{"type":"testResources","id":"1","attributes":{"Title":"Item"}},"meta":{"count":0}}`, + }, + { + "a resource collection", + "/v2/index", 1, 0, 0, []TestResource{{Title: "Item 1"}, {Title: "Item 2"}}, + false, `{"data":[{"type":"testResources","id":"1","attributes":{"Title":"Item 1"}},{"type":"testResources","id":"1","attributes":{"Title":"Item 2"}}],"meta":{"count":0}}`, + }, + { + "first page of collection results", + "/v2/index", 5, 1, 7, []TestResource{{Title: "Item 1"}}, + false, `{"links":{"next":"/v2/index?page=2\u0026size=5"},"data":[{"type":"testResources","id":"1","attributes":{"Title":"Item 1"}}],"meta":{"count":7}}`, + }, + { + "middle page of collection results", + "/v2/index", 5, 2, 13, []TestResource{{Title: "Item 2"}}, + false, `{"links":{"next":"/v2/index?page=3\u0026size=5","prev":"/v2/index?page=1\u0026size=5"},"data":[{"type":"testResources","id":"1","attributes":{"Title":"Item 2"}}],"meta":{"count":13}}`, + }, + { + "end page of collection results", + "/v2/index", 5, 3, 13, []TestResource{{Title: "Item 3"}}, + false, `{"links":{"prev":"/v2/index?page=2\u0026size=5"},"data":[{"type":"testResources","id":"1","attributes":{"Title":"Item 3"}}],"meta":{"count":13}}`, + }, + { + "path with existing query", + "/v2/index?authToken=3123", 1, 0, 2, []TestResource{{Title: "Item 1"}}, + false, `{"links":{"next":"/v2/index?authToken=3123\u0026page=1\u0026size=1"},"data":[{"type":"testResources","id":"1","attributes":{"Title":"Item 1"}}],"meta":{"count":2}}`, + }, + { + "json marshalling failure", + "/v2/index", 1, 0, 0, "", + true, ``, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + url, err := url.Parse(test.path) + assert.NoError(t, err) + buffer, err := NewPaginatedResponse(*url, test.size, test.page, test.count, test.resource) + if test.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.output, string(buffer)) + }) + } +} + +func TestPagination_ParsePaginatedResponse(t *testing.T) { + t.Parallel() + + var docs []TestResource + var links jsonapi.Links + + err := ParsePaginatedResponse([]byte(`{"data":[{"type":"testResources","id":"1","attributes":{"Title":"album 1"}}]}`), &docs, &links) + assert.NoError(t, err) + assert.Equal(t, "album 1", docs[0].Title) + + // Typo in "type" + err = ParsePaginatedResponse([]byte(`{"data":[{"type":"testNotResources","id":"1","attributes":{}}]}`), &docs, &links) + assert.Error(t, err) + + // Typo in "links" + err = ParsePaginatedResponse([]byte(`{"links":[],"data":[{"type":"testResources","id":"1","attributes":{}}]}`), &docs, &links) + assert.Error(t, err) +} + +type DummyResource struct { + ID string +} + +// GetID returns the ID of this structure for jsonapi serialization. +func (d DummyResource) GetID() string { + return d.ID +} + +func TestNewJSONAPIResponse(t *testing.T) { + t.Parallel() + + buffer, err := NewJSONAPIResponse(12981) + assert.Error(t, err) + assert.Len(t, buffer, 0) + + r := DummyResource{ID: "782"} + buffer, err = NewJSONAPIResponse(&r) + assert.NoError(t, err) + assert.Equal(t, `{"data":{"type":"dummyResources","id":"782","attributes":{"ID":"782"}}}`, string(buffer)) +} diff --git a/core/web/assets/9f6d832ef97e8493764e.svg b/core/web/assets/9f6d832ef97e8493764e.svg new file mode 100644 index 00000000..e35f1df2 --- /dev/null +++ b/core/web/assets/9f6d832ef97e8493764e.svg @@ -0,0 +1 @@ + diff --git a/core/web/assets/9f6d832ef97e8493764e.svg.gz b/core/web/assets/9f6d832ef97e8493764e.svg.gz new file mode 100644 index 00000000..94a2aade Binary files /dev/null and b/core/web/assets/9f6d832ef97e8493764e.svg.gz differ diff --git a/core/web/assets/ba8bbf16ebf8e1d05bef.svg b/core/web/assets/ba8bbf16ebf8e1d05bef.svg new file mode 100644 index 00000000..3ca546a8 --- /dev/null +++ b/core/web/assets/ba8bbf16ebf8e1d05bef.svg @@ -0,0 +1 @@ +Artboard 1 \ No newline at end of file diff --git a/core/web/assets/ba8bbf16ebf8e1d05bef.svg.gz b/core/web/assets/ba8bbf16ebf8e1d05bef.svg.gz new file mode 100644 index 00000000..36ce61e5 Binary files /dev/null and b/core/web/assets/ba8bbf16ebf8e1d05bef.svg.gz differ diff --git a/core/web/assets/index.html b/core/web/assets/index.html new file mode 100644 index 00000000..32915b36 --- /dev/null +++ b/core/web/assets/index.html @@ -0,0 +1 @@ +Operator UIPlugin
\ No newline at end of file diff --git a/core/web/assets/index.html.gz b/core/web/assets/index.html.gz new file mode 100644 index 00000000..afbd1b1a Binary files /dev/null and b/core/web/assets/index.html.gz differ diff --git a/core/web/assets/main.74b124ef5d2ef3614139.js b/core/web/assets/main.74b124ef5d2ef3614139.js new file mode 100644 index 00000000..ceff6b6e --- /dev/null +++ b/core/web/assets/main.74b124ef5d2ef3614139.js @@ -0,0 +1,187 @@ +(()=>{var __webpack_modules__={23564(e,t,n){"use strict";n.d(t,{Jh:()=>u,ZT:()=>i,_T:()=>o,ev:()=>c,mG:()=>s,pi:()=>a});/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. +***************************************************************************** */ var r=function(e,t){return(r=Object.setPrototypeOf||({__proto__:[]})instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])})(e,t)};function i(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Class extends value "+String(t)+" is not a constructor or null");function n(){this.constructor=e}r(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n)}var a=function(){return(a=Object.assign||function(e){for(var t,n=1,r=arguments.length;nt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var i=0,r=Object.getOwnPropertySymbols(e);it.indexOf(r[i])&&Object.prototype.propertyIsEnumerable.call(e,r[i])&&(n[r[i]]=e[r[i]]);return n}function s(e,t,n,r){function i(e){return e instanceof n?e:new n(function(t){t(e)})}return new(n||(n=Promise))(function(n,a){function o(e){try{u(r.next(e))}catch(t){a(t)}}function s(e){try{u(r.throw(e))}catch(t){a(t)}}function u(e){e.done?n(e.value):i(e.value).then(o,s)}u((r=r.apply(e,t||[])).next())})}function u(e,t){var n,r,i,a,o={label:0,sent:function(){if(1&i[0])throw i[1];return i[1]},trys:[],ops:[]};return a={next:s(0),throw:s(1),return:s(2)},"function"==typeof Symbol&&(a[Symbol.iterator]=function(){return this}),a;function s(e){return function(t){return u([e,t])}}function u(a){if(n)throw TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(i=2&a[0]?r.return:a[0]?r.throw||((i=r.return)&&i.call(r),0):r.next)&&!(i=i.call(r,a[1])).done)return i;switch(r=0,i&&(a=[2&a[0],i.value]),a[0]){case 0:case 1:i=a;break;case 4:return o.label++,{value:a[1],done:!1};case 5:o.label++,r=a[1],a=[0];continue;case 7:a=o.ops.pop(),o.trys.pop();continue;default:if(!(i=(i=o.trys).length>0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}e.exports=i,e.exports.default=e.exports,e.exports.__esModule=!0},37316(e){function t(e,t){if(null==e)return{};var n,r,i={},a=Object.keys(e);for(r=0;r=0||(i[n]=e[n]);return i}e.exports=t,e.exports.default=e.exports,e.exports.__esModule=!0},78585(e,t,n){var r=n(50008).default,i=n(81506);function a(e,t){return t&&("object"===r(t)||"function"==typeof t)?t:i(e)}e.exports=a,e.exports.default=e.exports,e.exports.__esModule=!0},99489(e){function t(n,r){return e.exports=t=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},e.exports.default=e.exports,e.exports.__esModule=!0,t(n,r)}e.exports=t,e.exports.default=e.exports,e.exports.__esModule=!0},319(e,t,n){var r=n(23646),i=n(46860),a=n(60379),o=n(98206);function s(e){return r(e)||i(e)||a(e)||o()}e.exports=s,e.exports.default=e.exports,e.exports.__esModule=!0},50008(e){function t(n){return"function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?(e.exports=t=function(e){return typeof e},e.exports.default=e.exports,e.exports.__esModule=!0):(e.exports=t=function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},e.exports.default=e.exports,e.exports.__esModule=!0),t(n)}e.exports=t,e.exports.default=e.exports,e.exports.__esModule=!0},60379(e,t,n){var r=n(67228);function i(e,t){if(e){if("string"==typeof e)return r(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);if("Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return r(e,t)}}e.exports=i,e.exports.default=e.exports,e.exports.__esModule=!0},98925(e,t,n){"use strict";let r=n(98633),i=n.g.Date;class a extends i{constructor(e){super(e),this.isDate=!0}toISOString(){return`${this.getUTCFullYear()}-${r(2,this.getUTCMonth()+1)}-${r(2,this.getUTCDate())}`}}e.exports=e=>{let t=new a(e);if(!isNaN(t))return t;throw TypeError("Invalid Datetime")}},86595(e,t,n){"use strict";let r=n(98633);class i extends Date{constructor(e){super(e+"Z"),this.isFloating=!0}toISOString(){let e=`${this.getUTCFullYear()}-${r(2,this.getUTCMonth()+1)}-${r(2,this.getUTCDate())}`,t=`${r(2,this.getUTCHours())}:${r(2,this.getUTCMinutes())}:${r(2,this.getUTCSeconds())}.${r(3,this.getUTCMilliseconds())}`;return`${e}T${t}`}}e.exports=e=>{let t=new i(e);if(!isNaN(t))return t;throw TypeError("Invalid Datetime")}},76114(e){"use strict";e.exports=e=>{let t=new Date(e);if(!isNaN(t))return t;throw TypeError("Invalid Datetime")}},99439(e,t,n){"use strict";let r=n(98633);class i extends Date{constructor(e){super(`0000-01-01T${e}Z`),this.isTime=!0}toISOString(){return`${r(2,this.getUTCHours())}:${r(2,this.getUTCMinutes())}:${r(2,this.getUTCSeconds())}.${r(3,this.getUTCMilliseconds())}`}}e.exports=e=>{let t=new i(e);if(!isNaN(t))return t;throw TypeError("Invalid Datetime")}},98633(e){"use strict";e.exports=(e,t)=>{for(t=String(t);t.length{let t=new TomlError(e.message);return t.code=e.code,t.wrapped=e,t},module.exports.TomlError=TomlError;let createDateTime=__webpack_require__(76114),createDateTimeFloat=__webpack_require__(86595),createDate=__webpack_require__(98925),createTime=__webpack_require__(99439),CTRL_I=9,CTRL_J=10,CTRL_M=13,CTRL_CHAR_BOUNDARY=31,CHAR_SP=32,CHAR_QUOT=34,CHAR_NUM=35,CHAR_APOS=39,CHAR_PLUS=43,CHAR_COMMA=44,CHAR_HYPHEN=45,CHAR_PERIOD=46,CHAR_0=48,CHAR_1=49,CHAR_7=55,CHAR_9=57,CHAR_COLON=58,CHAR_EQUALS=61,CHAR_A=65,CHAR_E=69,CHAR_F=70,CHAR_T=84,CHAR_U=85,CHAR_Z=90,CHAR_LOWBAR=95,CHAR_a=97,CHAR_b=98,CHAR_e=101,CHAR_f=102,CHAR_i=105,CHAR_l=108,CHAR_n=110,CHAR_o=111,CHAR_r=114,CHAR_s=115,CHAR_t=116,CHAR_u=117,CHAR_x=120,CHAR_z=122,CHAR_LCUB=123,CHAR_RCUB=125,CHAR_LSQB=91,CHAR_BSOL=92,CHAR_RSQB=93,CHAR_DEL=127,SURROGATE_FIRST=55296,SURROGATE_LAST=57343,escapes={[CHAR_b]:"\b",[CHAR_t]:" ",[CHAR_n]:"\n",[CHAR_f]:"\f",[CHAR_r]:"\r",[CHAR_QUOT]:'"',[CHAR_BSOL]:"\\"};function isDigit(e){return e>=CHAR_0&&e<=CHAR_9}function isHexit(e){return e>=CHAR_A&&e<=CHAR_F||e>=CHAR_a&&e<=CHAR_f||e>=CHAR_0&&e<=CHAR_9}function isBit(e){return e===CHAR_1||e===CHAR_0}function isOctit(e){return e>=CHAR_0&&e<=CHAR_7}function isAlphaNumQuoteHyphen(e){return e>=CHAR_A&&e<=CHAR_Z||e>=CHAR_a&&e<=CHAR_z||e>=CHAR_0&&e<=CHAR_9||e===CHAR_APOS||e===CHAR_QUOT||e===CHAR_LOWBAR||e===CHAR_HYPHEN}function isAlphaNumHyphen(e){return e>=CHAR_A&&e<=CHAR_Z||e>=CHAR_a&&e<=CHAR_z||e>=CHAR_0&&e<=CHAR_9||e===CHAR_LOWBAR||e===CHAR_HYPHEN}let _type=Symbol("type"),_declared=Symbol("declared"),hasOwnProperty=Object.prototype.hasOwnProperty,defineProperty=Object.defineProperty,descriptor={configurable:!0,enumerable:!0,writable:!0,value:void 0};function hasKey(e,t){return!!hasOwnProperty.call(e,t)||("__proto__"===t&&defineProperty(e,"__proto__",descriptor),!1)}let INLINE_TABLE=Symbol("inline-table");function InlineTable(){return Object.defineProperties({},{[_type]:{value:INLINE_TABLE}})}function isInlineTable(e){return null!==e&&"object"==typeof e&&e[_type]===INLINE_TABLE}let TABLE=Symbol("table");function Table(){return Object.defineProperties({},{[_type]:{value:TABLE},[_declared]:{value:!1,writable:!0}})}function isTable(e){return null!==e&&"object"==typeof e&&e[_type]===TABLE}let _contentType=Symbol("content-type"),INLINE_LIST=Symbol("inline-list");function InlineList(e){return Object.defineProperties([],{[_type]:{value:INLINE_LIST},[_contentType]:{value:e}})}function isInlineList(e){return null!==e&&"object"==typeof e&&e[_type]===INLINE_LIST}let LIST=Symbol("list");function List(){return Object.defineProperties([],{[_type]:{value:LIST}})}function isList(e){return null!==e&&"object"==typeof e&&e[_type]===LIST}let _custom;try{let utilInspect=eval("require('util').inspect");_custom=utilInspect.custom}catch(_){}let _inspect=_custom||"inspect";class BoxedBigInt{constructor(e){try{this.value=__webpack_require__.g.BigInt.asIntN(64,e)}catch(t){this.value=null}Object.defineProperty(this,_type,{value:INTEGER})}isNaN(){return null===this.value}toString(){return String(this.value)}[_inspect](){return`[BigInt: ${this.toString()}]}`}valueOf(){return this.value}}let INTEGER=Symbol("integer");function Integer(e){let t=Number(e);return(Object.is(t,-0)&&(t=0),__webpack_require__.g.BigInt&&!Number.isSafeInteger(t))?new BoxedBigInt(e):Object.defineProperties(new Number(t),{isNaN:{value:function(){return isNaN(this)}},[_type]:{value:INTEGER},[_inspect]:{value:()=>`[Integer: ${e}]`}})}function isInteger(e){return null!==e&&"object"==typeof e&&e[_type]===INTEGER}let FLOAT=Symbol("float");function Float(e){return Object.defineProperties(new Number(e),{[_type]:{value:FLOAT},[_inspect]:{value:()=>`[Float: ${e}]`}})}function isFloat(e){return null!==e&&"object"==typeof e&&e[_type]===FLOAT}function tomlType(e){let t=typeof e;if("object"===t){if(null===e)return"null";if(e instanceof Date)return"datetime";if(_type in e)switch(e[_type]){case INLINE_TABLE:return"inline-table";case INLINE_LIST:return"inline-list";case TABLE:return"table";case LIST:return"list";case FLOAT:return"float";case INTEGER:return"integer"}}return t}function makeParserClass(e){class t extends e{constructor(){super(),this.ctx=this.obj=Table()}atEndOfWord(){return this.char===CHAR_NUM||this.char===CTRL_I||this.char===CHAR_SP||this.atEndOfLine()}atEndOfLine(){return this.char===e.END||this.char===CTRL_J||this.char===CTRL_M}parseStart(){if(this.char===e.END)return null;if(this.char===CHAR_LSQB)return this.call(this.parseTableOrList);if(this.char===CHAR_NUM)return this.call(this.parseComment);if(this.char===CTRL_J||this.char===CHAR_SP||this.char===CTRL_I||this.char===CTRL_M)return null;if(isAlphaNumQuoteHyphen(this.char))return this.callNow(this.parseAssignStatement);else throw this.error(new TomlError(`Unknown character "${this.char}"`))}parseWhitespaceToEOL(){if(this.char===CHAR_SP||this.char===CTRL_I||this.char===CTRL_M)return null;if(this.char===CHAR_NUM)return this.goto(this.parseComment);if(this.char===e.END||this.char===CTRL_J)return this.return();throw this.error(new TomlError("Unexpected character, expected only whitespace or comments till end of line"))}parseAssignStatement(){return this.callNow(this.parseAssign,this.recordAssignStatement)}recordAssignStatement(e){let t=this.ctx,n=e.key.pop();for(let r of e.key){if(hasKey(t,r)&&!isTable(t[r]))throw this.error(new TomlError("Can't redefine existing key"));t=t[r]=t[r]||Table()}if(hasKey(t,n))throw this.error(new TomlError("Can't redefine existing key"));return t[_declared]=!0,isInteger(e.value)||isFloat(e.value)?t[n]=e.value.valueOf():t[n]=e.value,this.goto(this.parseWhitespaceToEOL)}parseAssign(){return this.callNow(this.parseKeyword,this.recordAssignKeyword)}recordAssignKeyword(e){return this.state.resultTable?this.state.resultTable.push(e):this.state.resultTable=[e],this.goto(this.parseAssignKeywordPreDot)}parseAssignKeywordPreDot(){return this.char===CHAR_PERIOD?this.next(this.parseAssignKeywordPostDot):this.char!==CHAR_SP&&this.char!==CTRL_I?this.goto(this.parseAssignEqual):void 0}parseAssignKeywordPostDot(){if(this.char!==CHAR_SP&&this.char!==CTRL_I)return this.callNow(this.parseKeyword,this.recordAssignKeyword)}parseAssignEqual(){if(this.char===CHAR_EQUALS)return this.next(this.parseAssignPreValue);throw this.error(new TomlError('Invalid character, expected "="'))}parseAssignPreValue(){return this.char===CHAR_SP||this.char===CTRL_I?null:this.callNow(this.parseValue,this.recordAssignValue)}recordAssignValue(e){return this.returnNow({key:this.state.resultTable,value:e})}parseComment(){do{if(this.char===e.END||this.char===CTRL_J)return this.return();if(this.char===CHAR_DEL||this.char<=CTRL_CHAR_BOUNDARY&&this.char!==CTRL_I)throw this.errorControlCharIn("comments")}while(this.nextChar())}parseTableOrList(){if(this.char!==CHAR_LSQB)return this.goto(this.parseTable);this.next(this.parseList)}parseTable(){return this.ctx=this.obj,this.goto(this.parseTableNext)}parseTableNext(){return this.char===CHAR_SP||this.char===CTRL_I?null:this.callNow(this.parseKeyword,this.parseTableMore)}parseTableMore(e){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===CHAR_RSQB){if(hasKey(this.ctx,e)&&(!isTable(this.ctx[e])||this.ctx[e][_declared]))throw this.error(new TomlError("Can't redefine existing key"));return this.ctx=this.ctx[e]=this.ctx[e]||Table(),this.ctx[_declared]=!0,this.next(this.parseWhitespaceToEOL)}if(this.char===CHAR_PERIOD){if(hasKey(this.ctx,e)){if(isTable(this.ctx[e]))this.ctx=this.ctx[e];else if(isList(this.ctx[e]))this.ctx=this.ctx[e][this.ctx[e].length-1];else throw this.error(new TomlError("Can't redefine existing key"))}else this.ctx=this.ctx[e]=Table();return this.next(this.parseTableNext)}throw this.error(new TomlError("Unexpected character, expected whitespace, . or ]"))}parseList(){return this.ctx=this.obj,this.goto(this.parseListNext)}parseListNext(){return this.char===CHAR_SP||this.char===CTRL_I?null:this.callNow(this.parseKeyword,this.parseListMore)}parseListMore(e){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===CHAR_RSQB){if(hasKey(this.ctx,e)||(this.ctx[e]=List()),isInlineList(this.ctx[e]))throw this.error(new TomlError("Can't extend an inline array"));if(isList(this.ctx[e])){let t=Table();this.ctx[e].push(t),this.ctx=t}else throw this.error(new TomlError("Can't redefine an existing key"));return this.next(this.parseListEnd)}if(this.char===CHAR_PERIOD){if(hasKey(this.ctx,e)){if(isInlineList(this.ctx[e]))throw this.error(new TomlError("Can't extend an inline array"));if(isInlineTable(this.ctx[e]))throw this.error(new TomlError("Can't extend an inline table"));else if(isList(this.ctx[e]))this.ctx=this.ctx[e][this.ctx[e].length-1];else if(isTable(this.ctx[e]))this.ctx=this.ctx[e];else throw this.error(new TomlError("Can't redefine an existing key"))}else this.ctx=this.ctx[e]=Table();return this.next(this.parseListNext)}throw this.error(new TomlError("Unexpected character, expected whitespace, . or ]"))}parseListEnd(e){if(this.char===CHAR_RSQB)return this.next(this.parseWhitespaceToEOL);throw this.error(new TomlError("Unexpected character, expected whitespace, . or ]"))}parseValue(){if(this.char===e.END)throw this.error(new TomlError("Key without value"));if(this.char===CHAR_QUOT)return this.next(this.parseDoubleString);if(this.char===CHAR_APOS)return this.next(this.parseSingleString);if(this.char===CHAR_HYPHEN||this.char===CHAR_PLUS)return this.goto(this.parseNumberSign);if(this.char===CHAR_i)return this.next(this.parseInf);if(this.char===CHAR_n)return this.next(this.parseNan);if(isDigit(this.char))return this.goto(this.parseNumberOrDateTime);else if(this.char===CHAR_t||this.char===CHAR_f)return this.goto(this.parseBoolean);else if(this.char===CHAR_LSQB)return this.call(this.parseInlineList,this.recordValue);else if(this.char===CHAR_LCUB)return this.call(this.parseInlineTable,this.recordValue);else throw this.error(new TomlError("Unexpected character, expecting string, number, datetime, boolean, inline array or inline table"))}recordValue(e){return this.returnNow(e)}parseInf(){if(this.char===CHAR_n)return this.next(this.parseInf2);throw this.error(new TomlError('Unexpected character, expected "inf", "+inf" or "-inf"'))}parseInf2(){if(this.char===CHAR_f)return"-"===this.state.buf?this.return(-1/0):this.return(1/0);throw this.error(new TomlError('Unexpected character, expected "inf", "+inf" or "-inf"'))}parseNan(){if(this.char===CHAR_a)return this.next(this.parseNan2);throw this.error(new TomlError('Unexpected character, expected "nan"'))}parseNan2(){if(this.char===CHAR_n)return this.return(NaN);throw this.error(new TomlError('Unexpected character, expected "nan"'))}parseKeyword(){return this.char===CHAR_QUOT?this.next(this.parseBasicString):this.char===CHAR_APOS?this.next(this.parseLiteralString):this.goto(this.parseBareKey)}parseBareKey(){do{if(this.char===e.END)throw this.error(new TomlError("Key ended without value"));if(isAlphaNumHyphen(this.char))this.consume();else if(0!==this.state.buf.length)return this.returnNow();else throw this.error(new TomlError("Empty bare keys are not allowed"))}while(this.nextChar())}parseSingleString(){return this.char===CHAR_APOS?this.next(this.parseLiteralMultiStringMaybe):this.goto(this.parseLiteralString)}parseLiteralString(){do{if(this.char===CHAR_APOS)return this.return();if(this.atEndOfLine())throw this.error(new TomlError("Unterminated string"));if(this.char===CHAR_DEL||this.char<=CTRL_CHAR_BOUNDARY&&this.char!==CTRL_I)throw this.errorControlCharIn("strings");else this.consume()}while(this.nextChar())}parseLiteralMultiStringMaybe(){return this.char===CHAR_APOS?this.next(this.parseLiteralMultiString):this.returnNow()}parseLiteralMultiString(){return this.char===CTRL_M?null:this.char===CTRL_J?this.next(this.parseLiteralMultiStringContent):this.goto(this.parseLiteralMultiStringContent)}parseLiteralMultiStringContent(){do{if(this.char===CHAR_APOS)return this.next(this.parseLiteralMultiEnd);if(this.char===e.END)throw this.error(new TomlError("Unterminated multi-line string"));if(this.char===CHAR_DEL||this.char<=CTRL_CHAR_BOUNDARY&&this.char!==CTRL_I&&this.char!==CTRL_J&&this.char!==CTRL_M)throw this.errorControlCharIn("strings");else this.consume()}while(this.nextChar())}parseLiteralMultiEnd(){return this.char===CHAR_APOS?this.next(this.parseLiteralMultiEnd2):(this.state.buf+="'",this.goto(this.parseLiteralMultiStringContent))}parseLiteralMultiEnd2(){return this.char===CHAR_APOS?this.next(this.parseLiteralMultiEnd3):(this.state.buf+="''",this.goto(this.parseLiteralMultiStringContent))}parseLiteralMultiEnd3(){return this.char===CHAR_APOS?(this.state.buf+="'",this.next(this.parseLiteralMultiEnd4)):this.returnNow()}parseLiteralMultiEnd4(){return this.char===CHAR_APOS?(this.state.buf+="'",this.return()):this.returnNow()}parseDoubleString(){return this.char===CHAR_QUOT?this.next(this.parseMultiStringMaybe):this.goto(this.parseBasicString)}parseBasicString(){do{if(this.char===CHAR_BSOL)return this.call(this.parseEscape,this.recordEscapeReplacement);if(this.char===CHAR_QUOT)return this.return();if(this.atEndOfLine())throw this.error(new TomlError("Unterminated string"));else if(this.char===CHAR_DEL||this.char<=CTRL_CHAR_BOUNDARY&&this.char!==CTRL_I)throw this.errorControlCharIn("strings");else this.consume()}while(this.nextChar())}recordEscapeReplacement(e){return this.state.buf+=e,this.goto(this.parseBasicString)}parseMultiStringMaybe(){return this.char===CHAR_QUOT?this.next(this.parseMultiString):this.returnNow()}parseMultiString(){return this.char===CTRL_M?null:this.char===CTRL_J?this.next(this.parseMultiStringContent):this.goto(this.parseMultiStringContent)}parseMultiStringContent(){do{if(this.char===CHAR_BSOL)return this.call(this.parseMultiEscape,this.recordMultiEscapeReplacement);if(this.char===CHAR_QUOT)return this.next(this.parseMultiEnd);if(this.char===e.END)throw this.error(new TomlError("Unterminated multi-line string"));else if(this.char===CHAR_DEL||this.char<=CTRL_CHAR_BOUNDARY&&this.char!==CTRL_I&&this.char!==CTRL_J&&this.char!==CTRL_M)throw this.errorControlCharIn("strings");else this.consume()}while(this.nextChar())}errorControlCharIn(e){let t="\\u00";return this.char<16&&(t+="0"),t+=this.char.toString(16),this.error(new TomlError(`Control characters (codes < 0x1f and 0x7f) are not allowed in ${e}, use ${t} instead`))}recordMultiEscapeReplacement(e){return this.state.buf+=e,this.goto(this.parseMultiStringContent)}parseMultiEnd(){return this.char===CHAR_QUOT?this.next(this.parseMultiEnd2):(this.state.buf+='"',this.goto(this.parseMultiStringContent))}parseMultiEnd2(){return this.char===CHAR_QUOT?this.next(this.parseMultiEnd3):(this.state.buf+='""',this.goto(this.parseMultiStringContent))}parseMultiEnd3(){return this.char===CHAR_QUOT?(this.state.buf+='"',this.next(this.parseMultiEnd4)):this.returnNow()}parseMultiEnd4(){return this.char===CHAR_QUOT?(this.state.buf+='"',this.return()):this.returnNow()}parseMultiEscape(){return this.char===CTRL_M||this.char===CTRL_J?this.next(this.parseMultiTrim):this.char===CHAR_SP||this.char===CTRL_I?this.next(this.parsePreMultiTrim):this.goto(this.parseEscape)}parsePreMultiTrim(){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===CTRL_M||this.char===CTRL_J)return this.next(this.parseMultiTrim);throw this.error(new TomlError("Can't escape whitespace"))}parseMultiTrim(){return this.char===CTRL_J||this.char===CHAR_SP||this.char===CTRL_I||this.char===CTRL_M?null:this.returnNow()}parseEscape(){if(this.char in escapes)return this.return(escapes[this.char]);if(this.char===CHAR_u)return this.call(this.parseSmallUnicode,this.parseUnicodeReturn);if(this.char===CHAR_U)return this.call(this.parseLargeUnicode,this.parseUnicodeReturn);throw this.error(new TomlError("Unknown escape character: "+this.char))}parseUnicodeReturn(e){try{let t=parseInt(e,16);if(t>=SURROGATE_FIRST&&t<=SURROGATE_LAST)throw this.error(new TomlError("Invalid unicode, character in range 0xD800 - 0xDFFF is reserved"));return this.returnNow(String.fromCodePoint(t))}catch(n){throw this.error(TomlError.wrap(n))}}parseSmallUnicode(){if(isHexit(this.char)){if(this.consume(),this.state.buf.length>=4)return this.return()}else throw this.error(new TomlError("Invalid character in unicode sequence, expected hex"))}parseLargeUnicode(){if(isHexit(this.char)){if(this.consume(),this.state.buf.length>=8)return this.return()}else throw this.error(new TomlError("Invalid character in unicode sequence, expected hex"))}parseNumberSign(){return this.consume(),this.next(this.parseMaybeSignedInfOrNan)}parseMaybeSignedInfOrNan(){return this.char===CHAR_i?this.next(this.parseInf):this.char===CHAR_n?this.next(this.parseNan):this.callNow(this.parseNoUnder,this.parseNumberIntegerStart)}parseNumberIntegerStart(){return this.char===CHAR_0?(this.consume(),this.next(this.parseNumberIntegerExponentOrDecimal)):this.goto(this.parseNumberInteger)}parseNumberIntegerExponentOrDecimal(){return this.char===CHAR_PERIOD?(this.consume(),this.call(this.parseNoUnder,this.parseNumberFloat)):this.char===CHAR_E||this.char===CHAR_e?(this.consume(),this.next(this.parseNumberExponentSign)):this.returnNow(Integer(this.state.buf))}parseNumberInteger(){if(isDigit(this.char))this.consume();else{if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnder);if(this.char===CHAR_E||this.char===CHAR_e)return this.consume(),this.next(this.parseNumberExponentSign);if(this.char===CHAR_PERIOD)return this.consume(),this.call(this.parseNoUnder,this.parseNumberFloat);let e=Integer(this.state.buf);if(!e.isNaN())return this.returnNow(e);throw this.error(new TomlError("Invalid number"))}}parseNoUnder(){if(this.char===CHAR_LOWBAR||this.char===CHAR_PERIOD||this.char===CHAR_E||this.char===CHAR_e)throw this.error(new TomlError("Unexpected character, expected digit"));if(this.atEndOfWord())throw this.error(new TomlError("Incomplete number"));return this.returnNow()}parseNoUnderHexOctBinLiteral(){if(this.char===CHAR_LOWBAR||this.char===CHAR_PERIOD)throw this.error(new TomlError("Unexpected character, expected digit"));if(this.atEndOfWord())throw this.error(new TomlError("Incomplete number"));return this.returnNow()}parseNumberFloat(){if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnder,this.parseNumberFloat);if(isDigit(this.char))this.consume();else if(this.char===CHAR_E||this.char===CHAR_e)return this.consume(),this.next(this.parseNumberExponentSign);else return this.returnNow(Float(this.state.buf))}parseNumberExponentSign(){if(isDigit(this.char))return this.goto(this.parseNumberExponent);if(this.char===CHAR_HYPHEN||this.char===CHAR_PLUS)this.consume(),this.call(this.parseNoUnder,this.parseNumberExponent);else throw this.error(new TomlError("Unexpected character, expected -, + or digit"))}parseNumberExponent(){if(isDigit(this.char))this.consume();else if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnder);else return this.returnNow(Float(this.state.buf))}parseNumberOrDateTime(){return this.char===CHAR_0?(this.consume(),this.next(this.parseNumberBaseOrDateTime)):this.goto(this.parseNumberOrDateTimeOnly)}parseNumberOrDateTimeOnly(){if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnder,this.parseNumberInteger);if(isDigit(this.char))this.consume(),this.state.buf.length>4&&this.next(this.parseNumberInteger);else if(this.char===CHAR_E||this.char===CHAR_e)return this.consume(),this.next(this.parseNumberExponentSign);else if(this.char===CHAR_PERIOD)return this.consume(),this.call(this.parseNoUnder,this.parseNumberFloat);else if(this.char===CHAR_HYPHEN)return this.goto(this.parseDateTime);else if(this.char===CHAR_COLON)return this.goto(this.parseOnlyTimeHour);else return this.returnNow(Integer(this.state.buf))}parseDateTimeOnly(){if(this.state.buf.length<4){if(isDigit(this.char))return this.consume();if(this.char===CHAR_COLON)return this.goto(this.parseOnlyTimeHour);throw this.error(new TomlError("Expected digit while parsing year part of a date"))}if(this.char===CHAR_HYPHEN)return this.goto(this.parseDateTime);throw this.error(new TomlError("Expected hyphen (-) while parsing year part of date"))}parseNumberBaseOrDateTime(){if(this.char===CHAR_b)return this.consume(),this.call(this.parseNoUnderHexOctBinLiteral,this.parseIntegerBin);if(this.char===CHAR_o)return this.consume(),this.call(this.parseNoUnderHexOctBinLiteral,this.parseIntegerOct);if(this.char===CHAR_x)return this.consume(),this.call(this.parseNoUnderHexOctBinLiteral,this.parseIntegerHex);if(this.char===CHAR_PERIOD)return this.goto(this.parseNumberInteger);if(isDigit(this.char))return this.goto(this.parseDateTimeOnly);else return this.returnNow(Integer(this.state.buf))}parseIntegerHex(){if(isHexit(this.char))this.consume();else{if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnderHexOctBinLiteral);let e=Integer(this.state.buf);if(!e.isNaN())return this.returnNow(e);throw this.error(new TomlError("Invalid number"))}}parseIntegerOct(){if(isOctit(this.char))this.consume();else{if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnderHexOctBinLiteral);let e=Integer(this.state.buf);if(!e.isNaN())return this.returnNow(e);throw this.error(new TomlError("Invalid number"))}}parseIntegerBin(){if(isBit(this.char))this.consume();else{if(this.char===CHAR_LOWBAR)return this.call(this.parseNoUnderHexOctBinLiteral);let e=Integer(this.state.buf);if(!e.isNaN())return this.returnNow(e);throw this.error(new TomlError("Invalid number"))}}parseDateTime(){if(this.state.buf.length<4)throw this.error(new TomlError("Years less than 1000 must be zero padded to four characters"));return this.state.result=this.state.buf,this.state.buf="",this.next(this.parseDateMonth)}parseDateMonth(){if(this.char===CHAR_HYPHEN){if(this.state.buf.length<2)throw this.error(new TomlError("Months less than 10 must be zero padded to two characters"));return this.state.result+="-"+this.state.buf,this.state.buf="",this.next(this.parseDateDay)}if(isDigit(this.char))this.consume();else throw this.error(new TomlError("Incomplete datetime"))}parseDateDay(){if(this.char===CHAR_T||this.char===CHAR_SP){if(this.state.buf.length<2)throw this.error(new TomlError("Days less than 10 must be zero padded to two characters"));return this.state.result+="-"+this.state.buf,this.state.buf="",this.next(this.parseStartTimeHour)}if(this.atEndOfWord())return this.returnNow(createDate(this.state.result+"-"+this.state.buf));if(isDigit(this.char))this.consume();else throw this.error(new TomlError("Incomplete datetime"))}parseStartTimeHour(){return this.atEndOfWord()?this.returnNow(createDate(this.state.result)):this.goto(this.parseTimeHour)}parseTimeHour(){if(this.char===CHAR_COLON){if(this.state.buf.length<2)throw this.error(new TomlError("Hours less than 10 must be zero padded to two characters"));return this.state.result+="T"+this.state.buf,this.state.buf="",this.next(this.parseTimeMin)}if(isDigit(this.char))this.consume();else throw this.error(new TomlError("Incomplete datetime"))}parseTimeMin(){if(this.state.buf.length<2&&isDigit(this.char))this.consume();else if(2===this.state.buf.length&&this.char===CHAR_COLON)return this.state.result+=":"+this.state.buf,this.state.buf="",this.next(this.parseTimeSec);else throw this.error(new TomlError("Incomplete datetime"))}parseTimeSec(){if(isDigit(this.char)){if(this.consume(),2===this.state.buf.length)return this.state.result+=":"+this.state.buf,this.state.buf="",this.next(this.parseTimeZoneOrFraction)}else throw this.error(new TomlError("Incomplete datetime"))}parseOnlyTimeHour(){if(this.char===CHAR_COLON){if(this.state.buf.length<2)throw this.error(new TomlError("Hours less than 10 must be zero padded to two characters"));return this.state.result=this.state.buf,this.state.buf="",this.next(this.parseOnlyTimeMin)}throw this.error(new TomlError("Incomplete time"))}parseOnlyTimeMin(){if(this.state.buf.length<2&&isDigit(this.char))this.consume();else if(2===this.state.buf.length&&this.char===CHAR_COLON)return this.state.result+=":"+this.state.buf,this.state.buf="",this.next(this.parseOnlyTimeSec);else throw this.error(new TomlError("Incomplete time"))}parseOnlyTimeSec(){if(isDigit(this.char)){if(this.consume(),2===this.state.buf.length)return this.next(this.parseOnlyTimeFractionMaybe)}else throw this.error(new TomlError("Incomplete time"))}parseOnlyTimeFractionMaybe(){if(this.state.result+=":"+this.state.buf,this.char!==CHAR_PERIOD)return this.return(createTime(this.state.result));this.state.buf="",this.next(this.parseOnlyTimeFraction)}parseOnlyTimeFraction(){if(isDigit(this.char))this.consume();else if(this.atEndOfWord()){if(0===this.state.buf.length)throw this.error(new TomlError("Expected digit in milliseconds"));return this.returnNow(createTime(this.state.result+"."+this.state.buf))}else throw this.error(new TomlError("Unexpected character in datetime, expected period (.), minus (-), plus (+) or Z"))}parseTimeZoneOrFraction(){if(this.char===CHAR_PERIOD)this.consume(),this.next(this.parseDateTimeFraction);else if(this.char===CHAR_HYPHEN||this.char===CHAR_PLUS)this.consume(),this.next(this.parseTimeZoneHour);else if(this.char===CHAR_Z)return this.consume(),this.return(createDateTime(this.state.result+this.state.buf));else if(this.atEndOfWord())return this.returnNow(createDateTimeFloat(this.state.result+this.state.buf));else throw this.error(new TomlError("Unexpected character in datetime, expected period (.), minus (-), plus (+) or Z"))}parseDateTimeFraction(){if(isDigit(this.char))this.consume();else if(1===this.state.buf.length)throw this.error(new TomlError("Expected digit in milliseconds"));else if(this.char===CHAR_HYPHEN||this.char===CHAR_PLUS)this.consume(),this.next(this.parseTimeZoneHour);else if(this.char===CHAR_Z)return this.consume(),this.return(createDateTime(this.state.result+this.state.buf));else if(this.atEndOfWord())return this.returnNow(createDateTimeFloat(this.state.result+this.state.buf));else throw this.error(new TomlError("Unexpected character in datetime, expected period (.), minus (-), plus (+) or Z"))}parseTimeZoneHour(){if(isDigit(this.char)){if(this.consume(),/\d\d$/.test(this.state.buf))return this.next(this.parseTimeZoneSep)}else throw this.error(new TomlError("Unexpected character in datetime, expected digit"))}parseTimeZoneSep(){if(this.char===CHAR_COLON)this.consume(),this.next(this.parseTimeZoneMin);else throw this.error(new TomlError("Unexpected character in datetime, expected colon"))}parseTimeZoneMin(){if(isDigit(this.char)){if(this.consume(),/\d\d$/.test(this.state.buf))return this.return(createDateTime(this.state.result+this.state.buf))}else throw this.error(new TomlError("Unexpected character in datetime, expected digit"))}parseBoolean(){return this.char===CHAR_t?(this.consume(),this.next(this.parseTrue_r)):this.char===CHAR_f?(this.consume(),this.next(this.parseFalse_a)):void 0}parseTrue_r(){if(this.char===CHAR_r)return this.consume(),this.next(this.parseTrue_u);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseTrue_u(){if(this.char===CHAR_u)return this.consume(),this.next(this.parseTrue_e);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseTrue_e(){if(this.char===CHAR_e)return this.return(!0);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseFalse_a(){if(this.char===CHAR_a)return this.consume(),this.next(this.parseFalse_l);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseFalse_l(){if(this.char===CHAR_l)return this.consume(),this.next(this.parseFalse_s);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseFalse_s(){if(this.char===CHAR_s)return this.consume(),this.next(this.parseFalse_e);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseFalse_e(){if(this.char===CHAR_e)return this.return(!1);throw this.error(new TomlError("Invalid boolean, expected true or false"))}parseInlineList(){if(this.char===CHAR_SP||this.char===CTRL_I||this.char===CTRL_M||this.char===CTRL_J)return null;if(this.char===e.END)throw this.error(new TomlError("Unterminated inline array"));return this.char===CHAR_NUM?this.call(this.parseComment):this.char===CHAR_RSQB?this.return(this.state.resultArr||InlineList()):this.callNow(this.parseValue,this.recordInlineListValue)}recordInlineListValue(e){return this.state.resultArr||(this.state.resultArr=InlineList(tomlType(e))),isFloat(e)||isInteger(e)?this.state.resultArr.push(e.valueOf()):this.state.resultArr.push(e),this.goto(this.parseInlineListNext)}parseInlineListNext(){if(this.char===CHAR_SP||this.char===CTRL_I||this.char===CTRL_M||this.char===CTRL_J)return null;if(this.char===CHAR_NUM)return this.call(this.parseComment);if(this.char===CHAR_COMMA)return this.next(this.parseInlineList);if(this.char===CHAR_RSQB)return this.goto(this.parseInlineList);throw this.error(new TomlError("Invalid character, expected whitespace, comma (,) or close bracket (])"))}parseInlineTable(){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===e.END||this.char===CHAR_NUM||this.char===CTRL_J||this.char===CTRL_M)throw this.error(new TomlError("Unterminated inline array"));return this.char===CHAR_RCUB?this.return(this.state.resultTable||InlineTable()):(this.state.resultTable||(this.state.resultTable=InlineTable()),this.callNow(this.parseAssign,this.recordInlineTableValue))}recordInlineTableValue(e){let t=this.state.resultTable,n=e.key.pop();for(let r of e.key){if(hasKey(t,r)&&(!isTable(t[r])||t[r][_declared]))throw this.error(new TomlError("Can't redefine existing key"));t=t[r]=t[r]||Table()}if(hasKey(t,n))throw this.error(new TomlError("Can't redefine existing key"));return isInteger(e.value)||isFloat(e.value)?t[n]=e.value.valueOf():t[n]=e.value,this.goto(this.parseInlineTableNext)}parseInlineTableNext(){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===e.END||this.char===CHAR_NUM||this.char===CTRL_J||this.char===CTRL_M)throw this.error(new TomlError("Unterminated inline array"));if(this.char===CHAR_COMMA)return this.next(this.parseInlineTablePostComma);if(this.char===CHAR_RCUB)return this.goto(this.parseInlineTable);throw this.error(new TomlError("Invalid character, expected whitespace, comma (,) or close bracket (])"))}parseInlineTablePostComma(){if(this.char===CHAR_SP||this.char===CTRL_I)return null;if(this.char===e.END||this.char===CHAR_NUM||this.char===CTRL_J||this.char===CTRL_M)throw this.error(new TomlError("Unterminated inline array"));if(this.char===CHAR_COMMA)throw this.error(new TomlError("Empty elements in inline tables are not permitted"));if(this.char!==CHAR_RCUB)return this.goto(this.parseInlineTable);throw this.error(new TomlError("Trailing commas in inline tables are not permitted"))}}return t}},90560(e,t,n){"use strict";e.exports=a;let r=n(8676),i=n(22418);function a(e,t){t||(t={});let n=0,a=t.blocksize||40960,o=new r;return new Promise((e,t)=>{setImmediate(s,n,a,e,t)});function s(t,n,r,a){if(t>=e.length)try{return r(o.finish())}catch(u){return a(i(u,e))}try{o.parse(e.slice(t,t+n)),setImmediate(s,t+n,n,r,a)}catch(c){a(i(c,e))}}}},22418(e){"use strict";function t(e,t){if(null==e.pos||null==e.line)return e;let n=e.message;if(n+=` at row ${e.line+1}, col ${e.col+1}, pos ${e.pos}: +`,t&&t.split){let r=t.split(/\n/),i=String(Math.min(r.length,e.line+3)).length,a=" ";for(;a.length "+r[o]+"\n",n+=a+" ";for(let u=0;u{let i,a=!1,o=!1;function s(){if(a=!0,!i)try{n(t.finish())}catch(e){r(e)}}function u(e){o=!0,r(e)}function c(){i=!0;let n;for(;null!==(n=e.read());)try{t.parse(n)}catch(r){return u(r)}if(i=!1,a)return s();o||e.once("readable",c)}e.once("end",s),e.once("error",u),c()})}function s(){let e=new i;return new r.Transform({objectMode:!0,transform(t,n,r){try{e.parse(t.toString(n))}catch(i){this.emit("error",i)}r()},flush(t){try{this.push(e.finish())}catch(n){this.emit("error",n)}t()}})}},56530(e,t,n){"use strict";e.exports=a;let r=n(8676),i=n(22418);function a(e){n.g.Buffer&&n.g.Buffer.isBuffer(e)&&(e=e.toString("utf8"));let t=new r;try{return t.parse(e),t.finish()}catch(a){throw i(a,e)}}},83512(e,t,n){"use strict";e.exports=n(56530),e.exports.async=n(90560),e.exports.stream=n(6435),e.exports.prettyError=n(22418)},36921(e){"use strict";function t(e){if(null===e)throw n("null");if(void 0===e)throw n("undefined");if("object"!=typeof e)throw n(typeof e);if("function"==typeof e.toJSON&&(e=e.toJSON()),null==e)return null;let t=u(e);if("table"!==t)throw n(t);return o("","",e)}function n(e){return Error("Can only stringify objects, not "+e)}function r(e){return Object.keys(e).filter(t=>s(e[t]))}function i(e){return Object.keys(e).filter(t=>!s(e[t]))}function a(e){let t=Array.isArray(e)?[]:Object.prototype.hasOwnProperty.call(e,"__proto__")?{["__proto__"]:void 0}:{};for(let n of Object.keys(e))!e[n]||"function"!=typeof e[n].toJSON||"toISOString"in e[n]?t[n]=e[n]:t[n]=e[n].toJSON();return t}function o(e,t,n){let o,s;o=r(n=a(n)),s=i(n);let l=[],f=t||"";o.forEach(e=>{var t=u(n[e]);"undefined"!==t&&"null"!==t&&l.push(f+c(e)+" = "+b(n[e],!0))}),l.length>0&&l.push("");let d=e&&o.length>0?t+" ":"";return s.forEach(t=>{l.push(S(e,d,t,n[t]))}),l.join("\n")}function s(e){switch(u(e)){case"undefined":case"null":case"integer":case"nan":case"float":case"boolean":case"string":case"datetime":return!0;case"array":return 0===e.length||"table"!==u(e[0]);case"table":return 0===Object.keys(e).length;default:return!1}}function u(e){if(void 0===e)return"undefined";if(null===e)return"null";if("bigint"==typeof e||Number.isInteger(e)&&!Object.is(e,-0))return"integer";if("number"==typeof e)return"float";if("boolean"==typeof e)return"boolean";else if("string"==typeof e)return"string";else if("toISOString"in e)return isNaN(e)?"undefined":"datetime";else if(Array.isArray(e))return"array";else return"table"}function c(e){let t=String(e);return/^[-A-Za-z0-9_]+$/.test(t)?t:l(t)}function l(e){return'"'+h(e).replace(/"/g,'\\"')+'"'}function f(e){return"'"+e+"'"}function d(e,t){for(;t.length"\\u"+d(4,e.codePointAt(0).toString(16)))}function p(e){let t=e.split(/\n/).map(e=>h(e).replace(/"(?="")/g,'\\"')).join("\n");return'"'===t.slice(-1)&&(t+="\\\n"),'"""\n'+t+'"""'}function b(e,t){let n=u(e);return"string"===n&&(t&&/\n/.test(e)?n="string-multiline":!/[\b\t\n\f\r']/.test(e)&&/"/.test(e)&&(n="string-literal")),m(e,n)}function m(e,t){switch(t||(t=u(e)),t){case"string-multiline":return p(e);case"string":return l(e);case"string-literal":return f(e);case"integer":return g(e);case"float":return v(e);case"boolean":return y(e);case"datetime":return w(e);case"array":return _(e.filter(e=>"null"!==u(e)&&"undefined"!==u(e)&&"nan"!==u(e)));case"table":return E(e);default:throw n(t)}}function g(e){return String(e).replace(/\B(?=(\d{3})+(?!\d))/g,"_")}function v(e){if(e===1/0)return"inf";if(e===-1/0)return"-inf";if(Object.is(e,NaN))return"nan";if(Object.is(e,-0))return"-0.0";let[t,n]=String(e).split(".");return g(t)+"."+n}function y(e){return String(e)}function w(e){return e.toISOString()}function _(e){e=a(e);let t="[",n=e.map(e=>m(e));return n.join(", ").length>60||/\n/.test(n)?t+="\n "+n.join(",\n ")+"\n":t+=" "+n.join(", ")+(n.length>0?" ":""),t+"]"}function E(e){e=a(e);let t=[];return Object.keys(e).forEach(n=>{t.push(c(n)+" = "+b(e[n],!1))}),"{ "+t.join(", ")+(t.length>0?" ":"")+"}"}function S(e,t,r,i){let a=u(i);if("array"===a)return k(e,t,r,i);if("table"===a)return x(e,t,r,i);throw n(a)}function k(e,t,r,i){i=a(i);let s=u(i[0]);if("table"!==s)throw n(s);let l=e+c(r),f="";return i.forEach(e=>{f.length>0&&(f+="\n"),f+=t+"[["+l+"]]\n",f+=o(l+".",t,e)}),f}function x(e,t,n,i){let a=e+c(n),s="";return r(i).length>0&&(s+=t+"["+a+"]\n"),s+o(a+".",t,i)}e.exports=t,e.exports.value=m},5022(e,t,n){"use strict";t.parse=n(83512),t.stringify=n(36921)},46515(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(78252)),l=n(98741),f=r(n(68821)),d=function(e){var t="light"===e.palette.type?e.palette.grey[100]:e.palette.grey[900];return{root:{display:"flex",flexDirection:"column",width:"100%",boxSizing:"border-box",zIndex:e.zIndex.appBar,flexShrink:0},positionFixed:{position:"fixed",top:0,left:"auto",right:0},positionAbsolute:{position:"absolute",top:0,left:"auto",right:0},positionSticky:{position:"sticky",top:0,left:"auto",right:0},positionStatic:{position:"static"},positionRelative:{position:"relative"},colorDefault:{backgroundColor:t,color:e.palette.getContrastText(t)},colorPrimary:{backgroundColor:e.palette.primary.main,color:e.palette.primary.contrastText},colorSecondary:{backgroundColor:e.palette.secondary.main,color:e.palette.secondary.contrastText}}};function h(e){var t,n=e.children,r=e.classes,c=e.className,d=e.color,h=e.position,p=(0,o.default)(e,["children","classes","className","color","position"]),b=(0,u.default)(r.root,r["position".concat((0,l.capitalize)(h))],(t={},(0,a.default)(t,r["color".concat((0,l.capitalize)(d))],"inherit"!==d),(0,a.default)(t,"mui-fixed","fixed"===h),t),c);return s.default.createElement(f.default,(0,i.default)({square:!0,component:"header",elevation:4,className:b},p),n)}t.styles=d,h.defaultProps={color:"primary",position:"fixed"};var p=(0,c.default)(d,{name:"MuiAppBar"})(h);t.default=p},95880(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(46515))},68477(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(59713)),a=r(n(67154)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=function(e){return{root:{position:"relative",display:"flex",alignItems:"center",justifyContent:"center",flexShrink:0,width:40,height:40,fontFamily:e.typography.fontFamily,fontSize:e.typography.pxToRem(20),borderRadius:"50%",overflow:"hidden",userSelect:"none"},colorDefault:{color:e.palette.background.default,backgroundColor:"light"===e.palette.type?e.palette.grey[400]:e.palette.grey[600]},img:{width:"100%",height:"100%",textAlign:"center",objectFit:"cover"}}};function f(e){var t=e.alt,n=e.children,r=e.childrenClassName,c=e.classes,l=e.className,f=e.component,d=e.imgProps,h=e.sizes,p=e.src,b=e.srcSet,m=(0,o.default)(e,["alt","children","childrenClassName","classes","className","component","imgProps","sizes","src","srcSet"]),g=null,v=p||b;return g=v?s.default.createElement("img",(0,a.default)({alt:t,src:p,srcSet:b,sizes:h,className:c.img},d)):r&&s.default.isValidElement(n)?s.default.cloneElement(n,{className:(0,u.default)(r,n.props.className)}):n,s.default.createElement(f,(0,a.default)({className:(0,u.default)(c.root,c.system,(0,i.default)({},c.colorDefault,!v),l)},m),g)}t.styles=l,f.defaultProps={component:"div"};var d=(0,c.default)(l,{name:"MuiAvatar"})(f);t.default=d},90338(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(68477))},9211(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(78252)),l=r(n(46408)),f={root:{zIndex:-1,position:"fixed",right:0,bottom:0,top:0,left:0,backgroundColor:"rgba(0, 0, 0, 0.5)",WebkitTapHighlightColor:"transparent",touchAction:"none"},invisible:{backgroundColor:"transparent"}};function d(e){var t=e.classes,n=e.className,r=e.invisible,c=e.open,f=e.transitionDuration,d=(0,o.default)(e,["classes","className","invisible","open","transitionDuration"]);return s.default.createElement(l.default,(0,i.default)({in:c,timeout:f},d),s.default.createElement("div",{className:(0,u.default)(t.root,(0,a.default)({},t.invisible,r),n),"aria-hidden":"true"}))}t.styles=f,d.defaultProps={invisible:!1};var h=(0,c.default)(f,{name:"MuiBackdrop"})(d);t.default=h},14983(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(9211))},84732(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=n(98741),f=10,d=function(e){return{root:{position:"relative",display:"inline-flex",verticalAlign:"middle"},badge:{display:"flex",flexDirection:"row",flexWrap:"wrap",justifyContent:"center",alignContent:"center",alignItems:"center",position:"absolute",top:0,right:0,boxSizing:"border-box",fontFamily:e.typography.fontFamily,fontWeight:e.typography.fontWeightMedium,fontSize:e.typography.pxToRem(12),minWidth:2*f,padding:"0 4px",height:2*f,borderRadius:f,backgroundColor:e.palette.color,color:e.palette.textColor,zIndex:1,transform:"scale(1) translate(50%, -50%)",transformOrigin:"100% 0%",transition:e.transitions.create("transform",{easing:e.transitions.easing.easeInOut,duration:e.transitions.duration.enteringScreen})},colorPrimary:{backgroundColor:e.palette.primary.main,color:e.palette.primary.contrastText},colorSecondary:{backgroundColor:e.palette.secondary.main,color:e.palette.secondary.contrastText},colorError:{backgroundColor:e.palette.error.main,color:e.palette.error.contrastText},invisible:{transition:e.transitions.create("transform",{easing:e.transitions.easing.easeInOut,duration:e.transitions.duration.leavingScreen}),transform:"scale(0) translate(50%, -50%)",transformOrigin:"100% 0%"},dot:{height:6,minWidth:6,padding:0}}};function h(e){var t,n=e.badgeContent,r=e.children,c=e.classes,f=e.className,d=e.color,h=e.component,p=e.invisible,b=e.showZero,m=e.max,g=e.variant,v=(0,o.default)(e,["badgeContent","children","classes","className","color","component","invisible","showZero","max","variant"]),y=p;null!=p||0!==Number(n)||b||(y=!0);var w=(0,u.default)(c.badge,(t={},(0,a.default)(t,c["color".concat((0,l.capitalize)(d))],"default"!==d),(0,a.default)(t,c.invisible,y),(0,a.default)(t,c.dot,"dot"===g),t)),_="";return"dot"!==g&&(_=n>m?"".concat(m,"+"):n),s.default.createElement(h,(0,i.default)({className:(0,u.default)(c.root,f)},v),r,s.default.createElement("span",{className:w},_))}t.styles=d,h.defaultProps={color:"default",component:"span",max:99,showZero:!1,variant:"standard"};var p=(0,c.default)(d,{name:"MuiBadge"})(h);t.default=p},70398(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(84732))},21783(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(59713)),a=r(n(6479)),o=r(n(67154)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=n(59114),f=r(n(16070)),d=n(98741),h=function(e){return{root:(0,o.default)({lineHeight:1.75},e.typography.button,{boxSizing:"border-box",minWidth:64,padding:"6px 16px",borderRadius:e.shape.borderRadius,color:e.palette.text.primary,transition:e.transitions.create(["background-color","box-shadow","border"],{duration:e.transitions.duration.short}),"&:hover":{textDecoration:"none",backgroundColor:(0,l.fade)(e.palette.text.primary,e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:"transparent"},"&$disabled":{backgroundColor:"transparent"}},"&$disabled":{color:e.palette.action.disabled}}),label:{width:"100%",display:"inherit",alignItems:"inherit",justifyContent:"inherit"},text:{padding:"6px 8px"},textPrimary:{color:e.palette.primary.main,"&:hover":{backgroundColor:(0,l.fade)(e.palette.primary.main,e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:"transparent"}}},textSecondary:{color:e.palette.secondary.main,"&:hover":{backgroundColor:(0,l.fade)(e.palette.secondary.main,e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:"transparent"}}},flat:{},flatPrimary:{},flatSecondary:{},outlined:{padding:"5px 16px",border:"1px solid ".concat("light"===e.palette.type?"rgba(0, 0, 0, 0.23)":"rgba(255, 255, 255, 0.23)"),"&$disabled":{border:"1px solid ".concat(e.palette.action.disabled)}},outlinedPrimary:{color:e.palette.primary.main,border:"1px solid ".concat((0,l.fade)(e.palette.primary.main,.5)),"&:hover":{border:"1px solid ".concat(e.palette.primary.main),backgroundColor:(0,l.fade)(e.palette.primary.main,e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:"transparent"}}},outlinedSecondary:{color:e.palette.secondary.main,border:"1px solid ".concat((0,l.fade)(e.palette.secondary.main,.5)),"&:hover":{border:"1px solid ".concat(e.palette.secondary.main),backgroundColor:(0,l.fade)(e.palette.secondary.main,e.palette.action.hoverOpacity),"@media (hover: none)":{backgroundColor:"transparent"}},"&$disabled":{border:"1px solid ".concat(e.palette.action.disabled)}},contained:{color:e.palette.getContrastText(e.palette.grey[300]),backgroundColor:e.palette.grey[300],boxShadow:e.shadows[2],"&$focusVisible":{boxShadow:e.shadows[6]},"&:active":{boxShadow:e.shadows[8]},"&$disabled":{color:e.palette.action.disabled,boxShadow:e.shadows[0],backgroundColor:e.palette.action.disabledBackground},"&:hover":{backgroundColor:e.palette.grey.A100,"@media (hover: none)":{backgroundColor:e.palette.grey[300]},"&$disabled":{backgroundColor:e.palette.action.disabledBackground}}},containedPrimary:{color:e.palette.primary.contrastText,backgroundColor:e.palette.primary.main,"&:hover":{backgroundColor:e.palette.primary.dark,"@media (hover: none)":{backgroundColor:e.palette.primary.main}}},containedSecondary:{color:e.palette.secondary.contrastText,backgroundColor:e.palette.secondary.main,"&:hover":{backgroundColor:e.palette.secondary.dark,"@media (hover: none)":{backgroundColor:e.palette.secondary.main}}},raised:{},raisedPrimary:{},raisedSecondary:{},fab:{borderRadius:"50%",padding:0,minWidth:0,width:56,height:56,boxShadow:e.shadows[6],"&:active":{boxShadow:e.shadows[12]}},extendedFab:{borderRadius:24,padding:"0 16px",width:"auto",minWidth:48,height:48},focusVisible:{},disabled:{},colorInherit:{color:"inherit",borderColor:"currentColor"},mini:{width:40,height:40},sizeSmall:{padding:"4px 8px",minWidth:64,fontSize:e.typography.pxToRem(13)},sizeLarge:{padding:"8px 24px",fontSize:e.typography.pxToRem(15)},fullWidth:{width:"100%"}}};function p(e){var t,n=e.children,r=e.classes,c=e.className,l=e.color,h=e.disabled,p=e.disableFocusRipple,b=e.focusVisibleClassName,m=e.fullWidth,g=e.mini,v=e.size,y=e.variant,w=(0,a.default)(e,["children","classes","className","color","disabled","disableFocusRipple","focusVisibleClassName","fullWidth","mini","size","variant"]),_="fab"===y||"extendedFab"===y,E="contained"===y||"raised"===y,S="text"===y||"flat"===y,k=(0,u.default)(r.root,(t={},(0,i.default)(t,r.fab,_),(0,i.default)(t,r.mini,_&&g),(0,i.default)(t,r.extendedFab,"extendedFab"===y),(0,i.default)(t,r.text,S),(0,i.default)(t,r.textPrimary,S&&"primary"===l),(0,i.default)(t,r.textSecondary,S&&"secondary"===l),(0,i.default)(t,r.flat,S),(0,i.default)(t,r.flatPrimary,S&&"primary"===l),(0,i.default)(t,r.flatSecondary,S&&"secondary"===l),(0,i.default)(t,r.contained,E||_),(0,i.default)(t,r.containedPrimary,(E||_)&&"primary"===l),(0,i.default)(t,r.containedSecondary,(E||_)&&"secondary"===l),(0,i.default)(t,r.raised,E||_),(0,i.default)(t,r.raisedPrimary,(E||_)&&"primary"===l),(0,i.default)(t,r.raisedSecondary,(E||_)&&"secondary"===l),(0,i.default)(t,r.outlined,"outlined"===y),(0,i.default)(t,r.outlinedPrimary,"outlined"===y&&"primary"===l),(0,i.default)(t,r.outlinedSecondary,"outlined"===y&&"secondary"===l),(0,i.default)(t,r["size".concat((0,d.capitalize)(v))],"medium"!==v),(0,i.default)(t,r.disabled,h),(0,i.default)(t,r.fullWidth,m),(0,i.default)(t,r.colorInherit,"inherit"===l),t),c);return s.default.createElement(f.default,(0,o.default)({className:k,disabled:h,focusRipple:!p,focusVisibleClassName:(0,u.default)(r.focusVisible,b)},w),s.default.createElement("span",{className:r.label},n))}t.styles=h,p.defaultProps={color:"default",component:"button",disabled:!1,disableFocusRipple:!1,fullWidth:!1,mini:!1,size:"medium",type:"button",variant:"text"};var b=(0,c.default)(h,{name:"MuiButton"})(p);t.default=b},83638(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(21783))},74610(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(34575)),u=r(n(93913)),c=r(n(78585)),l=r(n(29754)),f=r(n(2205)),d=r(n(81506)),h=r(n(67294));r(n(45697));var p=r(n(73935)),b=r(n(94184));n(55252);var m=r(n(62614)),g=r(n(78252)),v=r(n(78582)),y=n(32252),w=r(n(65406)),_=r(n(83673)),E={root:{display:"inline-flex",alignItems:"center",justifyContent:"center",position:"relative",WebkitTapHighlightColor:"transparent",backgroundColor:"transparent",outline:"none",border:0,margin:0,borderRadius:0,padding:0,cursor:"pointer",userSelect:"none",verticalAlign:"middle","-moz-appearance":"none","-webkit-appearance":"none",textDecoration:"none",color:"inherit","&::-moz-focus-inner":{borderStyle:"none"},"&$disabled":{pointerEvents:"none",cursor:"default"}},disabled:{},focusVisible:{}};t.styles=E;var S=function(e){function t(){(0,s.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a0&&void 0!==arguments[0]?arguments[0]:{},o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},s=arguments.length>2?arguments[2]:void 0,u=o.pulsate,c=void 0!==u&&u,l=o.center,f=void 0===l?n.props.center||o.pulsate:l,h=o.fakeElement,b=void 0!==h&&h;if("mousedown"===a.type&&n.ignoringMouseDown){n.ignoringMouseDown=!1;return}"touchstart"===a.type&&(n.ignoringMouseDown=!0);var m=b?null:p.default.findDOMNode((0,d.default)((0,d.default)(n))),g=m?m.getBoundingClientRect():{width:0,height:0,left:0,top:0};if(!f&&(0!==a.clientX||0!==a.clientY)&&(a.clientX||a.touches)){var v=a.clientX?a.clientX:a.touches[0].clientX,y=a.clientY?a.clientY:a.touches[0].clientY;t=Math.round(v-g.left),r=Math.round(y-g.top)}else t=Math.round(g.width/2),r=Math.round(g.height/2);if(f)(i=Math.sqrt((2*Math.pow(g.width,2)+Math.pow(g.height,2))/3))%2==0&&(i+=1);else{i=Math.sqrt(Math.pow(2*Math.max(Math.abs((m?m.clientWidth:0)-t),t)+2,2)+Math.pow(2*Math.max(Math.abs((m?m.clientHeight:0)-r),r)+2,2))}a.touches?(n.startTimerCommit=function(){n.startCommit({pulsate:c,rippleX:t,rippleY:r,rippleSize:i,cb:s})},n.startTimer=setTimeout(function(){n.startTimerCommit&&(n.startTimerCommit(),n.startTimerCommit=null)},w)):n.startCommit({pulsate:c,rippleX:t,rippleY:r,rippleSize:i,cb:s})},n.startCommit=function(e){var t=e.pulsate,r=e.rippleX,i=e.rippleY,a=e.rippleSize,s=e.cb;n.setState(function(e){return{nextKey:e.nextKey+1,ripples:[].concat((0,o.default)(e.ripples),[h.default.createElement(v.default,{key:e.nextKey,classes:n.props.classes,timeout:{exit:y,enter:y},pulsate:t,rippleX:r,rippleY:i,rippleSize:a})])}},s)},n.stop=function(e,t){clearTimeout(n.startTimer);var r=n.state.ripples;if("touchend"===e.type&&n.startTimerCommit){e.persist(),n.startTimerCommit(),n.startTimerCommit=null,n.startTimer=setTimeout(function(){n.stop(e,t)});return}n.startTimerCommit=null,r&&r.length&&n.setState({ripples:r.slice(1)},t)},n}return(0,f.default)(t,e),(0,u.default)(t,[{key:"componentWillUnmount",value:function(){clearTimeout(this.startTimer)}},{key:"render",value:function(){var e=this.props,t=(e.center,e.classes),n=e.className,r=(0,a.default)(e,["center","classes","className"]);return h.default.createElement(b.default,(0,i.default)({component:"span",enter:!0,exit:!0,className:(0,m.default)(t.root,n)},r),this.state.ripples)}}]),t}(h.default.PureComponent);E.defaultProps={center:!1};var S=(0,g.default)(_,{flip:!1,name:"MuiTouchRipple"})(E);t.default=S},83673(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=function(e,t,n,r){return function(i){r&&r.call(e,i);var a=!1;return i.defaultPrevented&&(a=!0),e.props.disableTouchRipple&&"Blur"!==t&&(a=!0),!a&&e.ripple&&e.ripple[n](i),"function"==typeof e.props["on".concat(t)]&&e.props["on".concat(t)](i),!0}};"undefined"==typeof window&&(n=function(){return function(){}});var r=n;t.default=r},32252(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.detectFocusVisible=s,t.listenForFocusKeys=f,r(n(42473));var i=r(n(16143)),a={focusKeyPressed:!1,keyUpEventTimeout:-1};function o(e){for(var t=e.activeElement;t&&t.shadowRoot&&t.shadowRoot.activeElement;)t=t.shadowRoot.activeElement;return t}function s(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1;e.focusVisibleTimeout=setTimeout(function(){var u=(0,i.default)(t),c=o(u);a.focusKeyPressed&&(c===t||t.contains(c))?n():r-1}var l=function(e){c(e)&&(a.focusKeyPressed=!0,clearTimeout(a.keyUpEventTimeout),a.keyUpEventTimeout=setTimeout(function(){a.focusKeyPressed=!1},500))};function f(e){e.addEventListener("keyup",l)}},16070(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(74610))},46003(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184)),u=r(n(68821)),c=r(n(78252)),l={root:{overflow:"hidden"}};function f(e){var t=e.classes,n=e.className,r=e.raised,c=(0,a.default)(e,["classes","className","raised"]);return o.default.createElement(u.default,(0,i.default)({className:(0,s.default)(t.root,n),elevation:r?8:1},c))}t.styles=l,f.defaultProps={raised:!1};var d=(0,c.default)(l,{name:"MuiCard"})(f);t.default=d},82204(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(46003))},5780(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184));n(55252);var u=r(n(78252)),c={root:{padding:16,"&:last-child":{paddingBottom:24}}};function l(e){var t=e.classes,n=e.className,r=e.component,u=(0,a.default)(e,["classes","className","component"]);return o.default.createElement(r,(0,i.default)({className:(0,s.default)(t.root,n)},u))}t.styles=c,l.defaultProps={component:"div"};var f=(0,u.default)(c,{name:"MuiCardContent"})(l);t.default=f},30060(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(5780))},50704(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184));n(55252);var u=r(n(78252)),c=r(n(71426)),l={root:{display:"flex",alignItems:"center",padding:16},avatar:{flex:"0 0 auto",marginRight:16},action:{flex:"0 0 auto",alignSelf:"flex-start",marginTop:-8,marginRight:-8},content:{flex:"1 1 auto"},title:{},subheader:{}};function f(e){var t=e.action,n=e.avatar,r=e.classes,u=e.className,l=e.component,f=e.disableTypography,d=e.subheader,h=e.subheaderTypographyProps,p=e.title,b=e.titleTypographyProps,m=(0,a.default)(e,["action","avatar","classes","className","component","disableTypography","subheader","subheaderTypographyProps","title","titleTypographyProps"]),g=p;null==g||g.type===c.default||f||(g=o.default.createElement(c.default,(0,i.default)({variant:n?"body2":"headline",internalDeprecatedVariant:!0,className:r.title,component:"span"},b),g));var v=d;return null==v||v.type===c.default||f||(v=o.default.createElement(c.default,(0,i.default)({variant:n?"body2":"body1",className:r.subheader,color:"textSecondary",component:"span"},h),v)),o.default.createElement(l,(0,i.default)({className:(0,s.default)(r.root,u)},m),n&&o.default.createElement("div",{className:r.avatar},n),o.default.createElement("div",{className:r.content},g,v),t&&o.default.createElement("div",{className:r.action},t))}t.styles=l,f.defaultProps={component:"div",disableTypography:!1};var d=(0,u.default)(l,{name:"MuiCardHeader"})(f);t.default=d},52658(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(50704))},82811(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(85609)),l=r(n(42159)),f=r(n(41549)),d=r(n(61486)),h=n(98741),p=r(n(78252)),b=function(e){return{root:{color:e.palette.text.secondary},checked:{},disabled:{},indeterminate:{},colorPrimary:{"&$checked":{color:e.palette.primary.main},"&$disabled":{color:e.palette.action.disabled}},colorSecondary:{"&$checked":{color:e.palette.secondary.main},"&$disabled":{color:e.palette.action.disabled}}}};function m(e){var t=e.checkedIcon,n=e.classes,r=e.className,l=e.color,f=e.icon,d=e.indeterminate,p=e.indeterminateIcon,b=e.inputProps,m=(0,o.default)(e,["checkedIcon","classes","className","color","icon","indeterminate","indeterminateIcon","inputProps"]);return s.default.createElement(c.default,(0,i.default)({type:"checkbox",checkedIcon:d?p:t,className:(0,u.default)((0,a.default)({},n.indeterminate,d),r),classes:{root:(0,u.default)(n.root,n["color".concat((0,h.capitalize)(l))]),checked:n.checked,disabled:n.disabled},inputProps:(0,i.default)({"data-indeterminate":d},b),icon:d?p:f},m))}t.styles=b,m.defaultProps={checkedIcon:s.default.createElement(f.default,null),color:"secondary",icon:s.default.createElement(l.default,null),indeterminate:!1,indeterminateIcon:s.default.createElement(d.default,null)};var g=(0,p.default)(b,{name:"MuiCheckbox"})(m);t.default=g},71209(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(82811))},16444(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(34575)),u=r(n(93913)),c=r(n(78585)),l=r(n(29754)),f=r(n(2205)),d=r(n(67294));r(n(45697));var h=r(n(94184));r(n(42473)),n(55252);var p=r(n(99781)),b=r(n(78252)),m=n(59114);r(n(21677));var g=n(98741);n(68477);var v=function(e){var t=32,n="light"===e.palette.type?e.palette.grey[300]:e.palette.grey[700],r=(0,m.fade)(e.palette.text.primary,.26);return{root:{fontFamily:e.typography.fontFamily,fontSize:e.typography.pxToRem(13),display:"inline-flex",alignItems:"center",justifyContent:"center",height:t,color:e.palette.getContrastText(n),backgroundColor:n,borderRadius:t/2,whiteSpace:"nowrap",transition:e.transitions.create(["background-color","box-shadow"]),cursor:"default",outline:"none",textDecoration:"none",border:"none",padding:0,verticalAlign:"middle",boxSizing:"border-box"},colorPrimary:{backgroundColor:e.palette.primary.main,color:e.palette.primary.contrastText},colorSecondary:{backgroundColor:e.palette.secondary.main,color:e.palette.secondary.contrastText},clickable:{WebkitTapHighlightColor:"transparent",cursor:"pointer","&:hover, &:focus":{backgroundColor:(0,m.emphasize)(n,.08)},"&:active":{boxShadow:e.shadows[1],backgroundColor:(0,m.emphasize)(n,.12)}},clickableColorPrimary:{"&:hover, &:focus":{backgroundColor:(0,m.emphasize)(e.palette.primary.main,.08)},"&:active":{backgroundColor:(0,m.emphasize)(e.palette.primary.main,.12)}},clickableColorSecondary:{"&:hover, &:focus":{backgroundColor:(0,m.emphasize)(e.palette.secondary.main,.08)},"&:active":{backgroundColor:(0,m.emphasize)(e.palette.secondary.main,.12)}},deletable:{"&:focus":{backgroundColor:(0,m.emphasize)(n,.08)}},deletableColorPrimary:{"&:focus":{backgroundColor:(0,m.emphasize)(e.palette.primary.main,.2)}},deletableColorSecondary:{"&:focus":{backgroundColor:(0,m.emphasize)(e.palette.secondary.main,.2)}},outlined:{backgroundColor:"transparent",border:"1px solid ".concat("light"===e.palette.type?"rgba(0, 0, 0, 0.23)":"rgba(255, 255, 255, 0.23)"),"$clickable&:hover, $clickable&:focus, $deletable&:focus":{backgroundColor:(0,m.fade)(e.palette.text.primary,e.palette.action.hoverOpacity)},"& $avatar":{marginLeft:-1}},outlinedPrimary:{color:e.palette.primary.main,border:"1px solid ".concat(e.palette.primary.main),"$clickable&:hover, $clickable&:focus, $deletable&:focus":{backgroundColor:(0,m.fade)(e.palette.primary.main,e.palette.action.hoverOpacity)}},outlinedSecondary:{color:e.palette.secondary.main,border:"1px solid ".concat(e.palette.secondary.main),"$clickable&:hover, $clickable&:focus, $deletable&:focus":{backgroundColor:(0,m.fade)(e.palette.secondary.main,e.palette.action.hoverOpacity)}},avatar:{marginRight:-4,width:t,height:t,color:"light"===e.palette.type?e.palette.grey[700]:e.palette.grey[300],fontSize:e.typography.pxToRem(16)},avatarColorPrimary:{color:e.palette.primary.contrastText,backgroundColor:e.palette.primary.dark},avatarColorSecondary:{color:e.palette.secondary.contrastText,backgroundColor:e.palette.secondary.dark},avatarChildren:{width:19,height:19},icon:{color:"light"===e.palette.type?e.palette.grey[700]:e.palette.grey[300],marginLeft:4,marginRight:-8},iconColorPrimary:{color:"inherit"},iconColorSecondary:{color:"inherit"},label:{display:"flex",alignItems:"center",paddingLeft:12,paddingRight:12,userSelect:"none",whiteSpace:"nowrap",cursor:"inherit"},deleteIcon:{WebkitTapHighlightColor:"transparent",color:r,cursor:"pointer",height:"auto",margin:"0 4px 0 -8px","&:hover":{color:(0,m.fade)(r,.4)}},deleteIconColorPrimary:{color:(0,m.fade)(e.palette.primary.contrastText,.7),"&:hover, &:active":{color:e.palette.primary.contrastText}},deleteIconColorSecondary:{color:(0,m.fade)(e.palette.secondary.contrastText,.7),"&:hover, &:active":{color:e.palette.secondary.contrastText}},deleteIconOutlinedColorPrimary:{color:(0,m.fade)(e.palette.primary.main,.7),"&:hover, &:active":{color:e.palette.primary.main}},deleteIconOutlinedColorSecondary:{color:(0,m.fade)(e.palette.secondary.main,.7),"&:hover, &:active":{color:e.palette.secondary.main}}}};t.styles=v;var y=function(e){function t(){(0,s.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a :last-child":{paddingRight:32},"&$expanded":{margin:"20px 0"}},expandIcon:{position:"absolute",top:"50%",right:8,transform:"translateY(-50%) rotate(0deg)",transition:e.transitions.create("transform",t),"&:hover":{backgroundColor:"transparent"},"&$expanded":{transform:"translateY(-50%) rotate(180deg)"}}}};t.styles=g;var v=function(e){function t(){(0,s.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a $item":{padding:e/2}})}),n}var b=function(e){return(0,o.default)({container:{boxSizing:"border-box",display:"flex",flexWrap:"wrap",width:"100%"},item:{boxSizing:"border-box",margin:"0"},zeroMinWidth:{minWidth:0},"direction-xs-column":{flexDirection:"column"},"direction-xs-column-reverse":{flexDirection:"column-reverse"},"direction-xs-row-reverse":{flexDirection:"row-reverse"},"wrap-xs-nowrap":{flexWrap:"nowrap"},"wrap-xs-wrap-reverse":{flexWrap:"wrap-reverse"},"align-items-xs-center":{alignItems:"center"},"align-items-xs-flex-start":{alignItems:"flex-start"},"align-items-xs-flex-end":{alignItems:"flex-end"},"align-items-xs-baseline":{alignItems:"baseline"},"align-content-xs-center":{alignContent:"center"},"align-content-xs-flex-start":{alignContent:"flex-start"},"align-content-xs-flex-end":{alignContent:"flex-end"},"align-content-xs-space-between":{alignContent:"space-between"},"align-content-xs-space-around":{alignContent:"space-around"},"justify-xs-center":{justifyContent:"center"},"justify-xs-flex-end":{justifyContent:"flex-end"},"justify-xs-space-between":{justifyContent:"space-between"},"justify-xs-space-around":{justifyContent:"space-around"},"justify-xs-space-evenly":{justifyContent:"space-evenly"}},p(e,"xs"),l.keys.reduce(function(t,n){return h(t,e,n),t},{}))};function m(e){var t,n=e.alignContent,r=e.alignItems,c=e.classes,l=e.className,f=e.component,d=e.container,h=e.direction,p=e.item,b=e.justify,g=e.lg,v=e.md,y=e.sm,w=e.spacing,_=e.wrap,E=e.xl,S=e.xs,k=e.zeroMinWidth,x=(0,a.default)(e,["alignContent","alignItems","classes","className","component","container","direction","item","justify","lg","md","sm","spacing","wrap","xl","xs","zeroMinWidth"]),T=(0,u.default)((t={},(0,i.default)(t,c.container,d),(0,i.default)(t,c.item,p),(0,i.default)(t,c.zeroMinWidth,k),(0,i.default)(t,c["spacing-xs-".concat(String(w))],d&&0!==w),(0,i.default)(t,c["direction-xs-".concat(String(h))],h!==m.defaultProps.direction),(0,i.default)(t,c["wrap-xs-".concat(String(_))],_!==m.defaultProps.wrap),(0,i.default)(t,c["align-items-xs-".concat(String(r))],r!==m.defaultProps.alignItems),(0,i.default)(t,c["align-content-xs-".concat(String(n))],n!==m.defaultProps.alignContent),(0,i.default)(t,c["justify-xs-".concat(String(b))],b!==m.defaultProps.justify),(0,i.default)(t,c["grid-xs-".concat(String(S))],!1!==S),(0,i.default)(t,c["grid-sm-".concat(String(y))],!1!==y),(0,i.default)(t,c["grid-md-".concat(String(v))],!1!==v),(0,i.default)(t,c["grid-lg-".concat(String(g))],!1!==g),(0,i.default)(t,c["grid-xl-".concat(String(E))],!1!==E),t),l);return s.default.createElement(f,(0,o.default)({className:T},x))}t.styles=b,m.defaultProps={alignContent:"stretch",alignItems:"stretch",component:"div",container:!1,direction:"row",item:!1,justify:"flex-start",lg:!1,md:!1,sm:!1,spacing:0,wrap:"wrap",xl:!1,xs:!1,zeroMinWidth:!1};var g,v=(0,c.default)(b,{name:"MuiGrid"})(m);t.default=v},97779(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(27973))},57205(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(60644)),h=r(n(82313)),p=n(41929);function b(e){return"scale(".concat(e,", ").concat(Math.pow(e,2),")")}var m={entering:{opacity:1,transform:b(1)},entered:{opacity:1,transform:"".concat(b(1)," translateZ(0)")}},g=function(e){function t(){(0,o.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a=Number(e.rows)&&(n=Math.min(Number(e.rowsMax)*t,n)),n=Math.max(n,t),Math.abs(this.state.height-n)>1&&this.setState({height:n}))}}},{key:"render",value:function(){var e=this.props,t=e.classes,n=e.className,r=e.defaultValue,o=(e.onChange,e.rows),s=(e.rowsMax,e.style),u=(e.textareaRef,e.value),c=(0,a.default)(e,["classes","className","defaultValue","onChange","rows","rowsMax","style","textareaRef","value"]);return f.default.createElement("div",{className:t.root},f.default.createElement(p.default,{target:"window",onResize:this.handleResize}),f.default.createElement("textarea",{"aria-hidden":"true",className:(0,d.default)(t.textarea,t.shadow),readOnly:!0,ref:this.handleRefSinglelineShadow,rows:"1",tabIndex:-1,value:""}),f.default.createElement("textarea",{"aria-hidden":"true",className:(0,d.default)(t.textarea,t.shadow),defaultValue:r,readOnly:!0,ref:this.handleRefShadow,rows:o,tabIndex:-1,value:u}),f.default.createElement("textarea",(0,i.default)({rows:o,className:(0,d.default)(t.textarea,n),defaultValue:r,value:u,onChange:this.handleChange,ref:this.handleRefInput,style:(0,i.default)({height:this.state.height},s)},c)))}}]),t}(f.default.Component);y.defaultProps={rows:1};var w=(0,b.default)(v,{name:"MuiPrivateTextarea"})(y);t.default=w},67598(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(62010))},78586(e,t){"use strict";function n(e){return null!=e&&!(Array.isArray(e)&&0===e.length)}function r(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];return e&&(n(e.value)&&""!==e.value||t&&n(e.defaultValue)&&""!==e.defaultValue)}function i(e){return e.startAdornment}Object.defineProperty(t,"__esModule",{value:!0}),t.hasValue=n,t.isFilled=r,t.isAdornedStart=i},56030(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(58189)),l=r(n(52598)),f=r(n(78252)),d=r(n(69645)),h=function(e){return{root:{transformOrigin:"top left"},focused:{},disabled:{},error:{},required:{},formControl:{position:"absolute",left:0,top:0,transform:"translate(0, 24px) scale(1)"},marginDense:{transform:"translate(0, 21px) scale(1)"},shrink:{transform:"translate(0, 1.5px) scale(0.75)",transformOrigin:"top left"},animated:{transition:e.transitions.create(["color","transform"],{duration:e.transitions.duration.shorter,easing:e.transitions.easing.easeOut})},filled:{zIndex:1,pointerEvents:"none",transform:"translate(12px, 20px) scale(1)","&$marginDense":{transform:"translate(12px, 17px) scale(1)"},"&$shrink":{transform:"translate(12px, 10px) scale(0.75)","&$marginDense":{transform:"translate(12px, 7px) scale(0.75)"}}},outlined:{zIndex:1,pointerEvents:"none",transform:"translate(14px, 20px) scale(1)","&$marginDense":{transform:"translate(14px, 17px) scale(1)"},"&$shrink":{transform:"translate(14px, -6px) scale(0.75)"}}}};function p(e){var t,n=e.children,r=e.classes,l=e.className,f=e.disableAnimation,h=e.FormLabelClasses,p=(e.margin,e.muiFormControl),b=e.shrink,m=(e.variant,(0,o.default)(e,["children","classes","className","disableAnimation","FormLabelClasses","margin","muiFormControl","shrink","variant"])),g=b;void 0===g&&p&&(g=p.filled||p.focused||p.adornedStart);var v=(0,c.default)({props:e,muiFormControl:p,states:["margin","variant"]}),y=(0,u.default)(r.root,(t={},(0,a.default)(t,r.formControl,p),(0,a.default)(t,r.animated,!f),(0,a.default)(t,r.shrink,g),(0,a.default)(t,r.marginDense,"dense"===v.margin),(0,a.default)(t,r.filled,"filled"===v.variant),(0,a.default)(t,r.outlined,"outlined"===v.variant),t),l);return s.default.createElement(d.default,(0,i.default)({"data-shrink":g,className:y,classes:(0,i.default)({focused:r.focused,disabled:r.disabled,error:r.error,required:r.required},h)},m),n)}t.styles=h,p.defaultProps={disableAnimation:!1};var b=(0,f.default)(h,{name:"MuiInputLabel"})((0,l.default)(p));t.default=b},23153(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(56030))},46616(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));r(n(42473));var c=r(n(78252)),l=n(59114),f=4,d=function(e){return{root:{position:"relative",overflow:"hidden",height:4},colorPrimary:{backgroundColor:(0,l.lighten)(e.palette.primary.light,.6)},colorSecondary:{backgroundColor:(0,l.lighten)(e.palette.secondary.light,.4)},determinate:{},indeterminate:{},buffer:{backgroundColor:"transparent"},query:{transform:"rotate(180deg)"},dashed:{position:"absolute",marginTop:0,height:"100%",width:"100%",animation:"buffer 3s infinite linear",animationName:"$buffer"},dashedColorPrimary:{backgroundImage:"radial-gradient(".concat((0,l.lighten)(e.palette.primary.light,.6)," 0%, ").concat((0,l.lighten)(e.palette.primary.light,.6)," 16%, transparent 42%)"),backgroundSize:"10px 10px",backgroundPosition:"0px -23px"},dashedColorSecondary:{backgroundImage:"radial-gradient(".concat((0,l.lighten)(e.palette.secondary.light,.4)," 0%, ").concat((0,l.lighten)(e.palette.secondary.light,.6)," 16%, transparent 42%)"),backgroundSize:"10px 10px",backgroundPosition:"0px -23px"},bar:{width:"100%",position:"absolute",left:0,bottom:0,top:0,transition:"transform 0.2s linear",transformOrigin:"left"},barColorPrimary:{backgroundColor:e.palette.primary.main},barColorSecondary:{backgroundColor:e.palette.secondary.main},bar1Indeterminate:{width:"auto",animation:"mui-indeterminate1 2.1s cubic-bezier(0.65, 0.815, 0.735, 0.395) infinite",animationName:"$mui-indeterminate1"},bar1Determinate:{transition:"transform .".concat(f,"s linear")},bar1Buffer:{zIndex:1,transition:"transform .".concat(f,"s linear")},bar2Indeterminate:{width:"auto",animation:"mui-indeterminate2 2.1s cubic-bezier(0.165, 0.84, 0.44, 1) infinite",animationName:"$mui-indeterminate2",animationDelay:"1.15s"},bar2Buffer:{transition:"transform .".concat(f,"s linear")},"@keyframes mui-indeterminate1":{"0%":{left:"-35%",right:"100%"},"60%":{left:"100%",right:"-90%"},"100%":{left:"100%",right:"-90%"}},"@keyframes mui-indeterminate2":{"0%":{left:"-200%",right:"100%"},"60%":{left:"107%",right:"-8%"},"100%":{left:"107%",right:"-8%"}},"@keyframes buffer":{"0%":{opacity:1,backgroundPosition:"0px -23px"},"50%":{opacity:0,backgroundPosition:"0px -23px"},"100%":{opacity:1,backgroundPosition:"-200px -23px"}}}};function h(e){var t,n,r,c,l=e.classes,f=e.className,d=e.color,h=e.value,p=e.valueBuffer,b=e.variant,m=(0,o.default)(e,["classes","className","color","value","valueBuffer","variant"]),g=(0,u.default)(l.root,(t={},(0,a.default)(t,l.colorPrimary,"primary"===d),(0,a.default)(t,l.colorSecondary,"secondary"===d),(0,a.default)(t,l.determinate,"determinate"===b),(0,a.default)(t,l.indeterminate,"indeterminate"===b),(0,a.default)(t,l.buffer,"buffer"===b),(0,a.default)(t,l.query,"query"===b),t),f),v=(0,u.default)(l.dashed,(n={},(0,a.default)(n,l.dashedColorPrimary,"primary"===d),(0,a.default)(n,l.dashedColorSecondary,"secondary"===d),n)),y=(0,u.default)(l.bar,(r={},(0,a.default)(r,l.barColorPrimary,"primary"===d),(0,a.default)(r,l.barColorSecondary,"secondary"===d),(0,a.default)(r,l.bar1Indeterminate,"indeterminate"===b||"query"===b),(0,a.default)(r,l.bar1Determinate,"determinate"===b),(0,a.default)(r,l.bar1Buffer,"buffer"===b),r)),w=(0,u.default)(l.bar,(c={},(0,a.default)(c,l.barColorPrimary,"primary"===d&&"buffer"!==b),(0,a.default)(c,l.colorPrimary,"primary"===d&&"buffer"===b),(0,a.default)(c,l.barColorSecondary,"secondary"===d&&"buffer"!==b),(0,a.default)(c,l.colorSecondary,"secondary"===d&&"buffer"===b),(0,a.default)(c,l.bar2Indeterminate,"indeterminate"===b||"query"===b),(0,a.default)(c,l.bar2Buffer,"buffer"===b),c)),_={},E={bar1:{},bar2:{}};return("determinate"===b||"buffer"===b)&&void 0!==h&&(_["aria-valuenow"]=Math.round(h),E.bar1.transform="scaleX(".concat(h/100,")")),"buffer"===b&&void 0!==p&&(E.bar2.transform="scaleX(".concat((p||0)/100,")")),s.default.createElement("div",(0,i.default)({className:g,role:"progressbar"},_,m),"buffer"===b?s.default.createElement("div",{className:v}):null,s.default.createElement("div",{className:y,style:E.bar1}),"determinate"===b?null:s.default.createElement("div",{className:w,style:E.bar2}))}t.styles=d,h.defaultProps={color:"primary",variant:"indeterminate"};var p=(0,c.default)(d,{name:"MuiLinearProgress"})(h);t.default=p},79424(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(46616))},74080(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=r(n(47457)),f={root:{listStyle:"none",margin:0,padding:0,position:"relative"},padding:{paddingTop:8,paddingBottom:8},dense:{paddingTop:4,paddingBottom:4},subheader:{paddingTop:0}};function d(e){var t,n=e.children,r=e.classes,c=e.className,f=e.component,d=e.dense,h=e.disablePadding,p=e.subheader,b=(0,o.default)(e,["children","classes","className","component","dense","disablePadding","subheader"]);return s.default.createElement(f,(0,i.default)({className:(0,u.default)(r.root,(t={},(0,a.default)(t,r.dense,d&&!h),(0,a.default)(t,r.padding,!h),(0,a.default)(t,r.subheader,p),t),c)},b),s.default.createElement(l.default.Provider,{value:{dense:d}},p,n))}t.styles=f,d.defaultProps={component:"ul",dense:!1,disablePadding:!1};var h=(0,c.default)(f,{name:"MuiList"})(d);t.default=h},47457(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)).default.createContext({});t.default=i},3022(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(74080))},29936(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=r(n(16070)),f=n(44370),d=r(n(671)),h=function(e){return{root:{display:"flex",justifyContent:"flex-start",alignItems:"center",position:"relative",textDecoration:"none",width:"100%",boxSizing:"border-box",textAlign:"left",paddingTop:11,paddingBottom:11,"&$selected, &$selected:hover, &$selected:focus":{backgroundColor:e.palette.action.selected}},container:{position:"relative"},focusVisible:{},default:{},dense:{paddingTop:8,paddingBottom:8},alignItemsFlexStart:{alignItems:"flex-start"},disabled:{opacity:.5},divider:{borderBottom:"1px solid ".concat(e.palette.divider),backgroundClip:"padding-box"},gutters:{paddingLeft:16,paddingRight:16},button:{transition:e.transitions.create("background-color",{duration:e.transitions.duration.shortest}),"&:hover":{textDecoration:"none",backgroundColor:e.palette.action.hover,"@media (hover: none)":{backgroundColor:"transparent"}},"&:focus":{backgroundColor:e.palette.action.hover}},secondaryAction:{paddingRight:32},selected:{}}};function p(e){var t=e.alignItems,n=e.button,r=e.children,c=e.classes,h=e.className,p=e.component,b=e.ContainerComponent,m=e.ContainerProps,g=(m=void 0===m?{}:m).className,v=(0,o.default)(m,["className"]),y=e.dense,w=e.disabled,_=e.disableGutters,E=e.divider,S=e.focusVisibleClassName,k=e.selected,x=(0,o.default)(e,["alignItems","button","children","classes","className","component","ContainerComponent","ContainerProps","dense","disabled","disableGutters","divider","focusVisibleClassName","selected"]);return s.default.createElement(d.default,{dense:y,alignItems:t},function(e){var o,d=e.dense,m=s.default.Children.toArray(r),y=m.some(function(e){return(0,f.isMuiElement)(e,["ListItemAvatar"])}),T=m.length&&(0,f.isMuiElement)(m[m.length-1],["ListItemSecondaryAction"]),M=(0,u.default)(c.root,c.default,(o={},(0,a.default)(o,c.dense,d||y),(0,a.default)(o,c.gutters,!_),(0,a.default)(o,c.divider,E),(0,a.default)(o,c.disabled,w),(0,a.default)(o,c.button,n),(0,a.default)(o,c.alignItemsFlexStart,"flex-start"===t),(0,a.default)(o,c.secondaryAction,T),(0,a.default)(o,c.selected,k),o),h),O=(0,i.default)({className:M,disabled:w},x),A=p||"li";return(n&&(O.component=p||"div",O.focusVisibleClassName=(0,u.default)(c.focusVisible,S),A=l.default),T)?(A=O.component||p?A:"div","li"===b&&("li"===A?A="div":"li"===O.component&&(O.component="div")),s.default.createElement(b,(0,i.default)({className:(0,u.default)(c.container,g)},v),s.default.createElement(A,O,m),m.pop())):s.default.createElement(A,O,m)})}t.styles=h,p.defaultProps={alignItems:"center",button:!1,ContainerComponent:"li",dense:!1,disabled:!1,disableGutters:!1,divider:!1,selected:!1};var b=(0,c.default)(h,{name:"MuiListItem"})(p);t.default=b},671(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294));r(n(45697));var a=r(n(47457));function o(e){var t=e.alignItems,n=e.children,r=e.dense;return i.default.createElement(a.default.Consumer,null,function(e){var o={dense:r||e.dense||!1,alignItems:t};return i.default.createElement(a.default.Provider,{value:o},n(o))})}var s=o;t.default=s},60323(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(29936))},69394(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184)),u=r(n(78252)),c=function(e){return{root:{marginRight:16,color:e.palette.action.active,flexShrink:0,display:"inline-flex"}}};function l(e){var t=e.children,n=e.classes,r=e.className,u=(0,a.default)(e,["children","classes","className"]);return o.default.createElement("div",(0,i.default)({className:(0,s.default)(n.root,r)},u),t)}t.styles=c;var f=(0,u.default)(c,{name:"MuiListItemIcon"})(l);t.default=f},11186(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(69394))},73390(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(78252)),l=r(n(71426)),f=r(n(47457)),d=function(e){return{root:{flex:"1 1 auto",minWidth:0,padding:"0 16px","&:first-child":{paddingLeft:0}},inset:{"&:first-child":{paddingLeft:56}},dense:{fontSize:e.typography.pxToRem(13)},primary:{"&$textDense":{fontSize:"inherit"}},secondary:{"&$textDense":{fontSize:"inherit"}},textDense:{}}};function h(e){var t=e.children,n=e.classes,r=e.className,c=e.disableTypography,d=e.inset,h=e.primary,p=e.primaryTypographyProps,b=e.secondary,m=e.secondaryTypographyProps,g=e.theme,v=(0,o.default)(e,["children","classes","className","disableTypography","inset","primary","primaryTypographyProps","secondary","secondaryTypographyProps","theme"]);return s.default.createElement(f.default.Consumer,null,function(e){var o,f=e.dense,y=null!=h?h:t;null==y||y.type===l.default||c||(y=s.default.createElement(l.default,(0,i.default)({variant:g.typography.useNextVariants?"body1":"subheading",className:(0,u.default)(n.primary,(0,a.default)({},n.textDense,f)),component:"span"},p),y));var w=b;return null==w||w.type===l.default||c||(w=s.default.createElement(l.default,(0,i.default)({className:(0,u.default)(n.secondary,(0,a.default)({},n.textDense,f)),color:"textSecondary"},m),w)),s.default.createElement("div",(0,i.default)({className:(0,u.default)(n.root,(o={},(0,a.default)(o,n.dense,f),(0,a.default)(o,n.inset,d),o),r)},v),y,w)})}t.styles=d,h.defaultProps={disableTypography:!1,inset:!1};var p=(0,c.default)(d,{name:"MuiListItemText",withTheme:!0})(h);t.default=p},87591(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(73390))},95890(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(73935)),h=r(n(44825)),p=r(n(78252)),b=r(n(50810)),m=r(n(34980)),g={vertical:"top",horizontal:"right"},v={vertical:"top",horizontal:"left"},y={paper:{maxHeight:"calc(100% - 96px)",WebkitOverflowScrolling:"touch"}};t.styles=y;var w=function(e){function t(){(0,o.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a=0?t.children[e].focus():t.firstChild.focus())}},{key:"resetTabIndex",value:function(){for(var e=this.listRef,t=(0,h.default)(e).activeElement,n=[],r=0;r0&&void 0!==arguments[0]?arguments[0]:{};(0,i.default)(this,e);var n=t.hideSiblingNodes,r=void 0===n||n,a=t.handleContainerOverflow,o=void 0===a||a;this.hideSiblingNodes=r,this.handleContainerOverflow=o,this.modals=[],this.data=[]}return(0,a.default)(e,[{key:"add",value:function(e,t){var n=this.modals.indexOf(e);if(-1!==n)return n;n=this.modals.length,this.modals.push(e),e.modalRef&&(0,l.ariaHidden)(e.modalRef,!1),this.hideSiblingNodes&&(0,l.ariaHiddenSiblings)(t,e.mountNode,e.modalRef,!0);var r=f(this.data,function(e){return e.container===t});if(-1!==r)return this.data[r].modals.push(e),n;var i={modals:[e],container:t,overflowing:(0,c.default)(t),prevPaddings:[]};return this.data.push(i),n}},{key:"mount",value:function(e){var t=f(this.data,function(t){return -1!==t.modals.indexOf(e)}),n=this.data[t];!n.style&&this.handleContainerOverflow&&h(n)}},{key:"remove",value:function(e){var t=this.modals.indexOf(e);if(-1===t)return t;var n=f(this.data,function(t){return -1!==t.modals.indexOf(e)}),r=this.data[n];if(r.modals.splice(r.modals.indexOf(e),1),this.modals.splice(t,1),0===r.modals.length)this.handleContainerOverflow&&p(r),e.modalRef&&(0,l.ariaHidden)(e.modalRef,!0),this.hideSiblingNodes&&(0,l.ariaHiddenSiblings)(r.container,e.mountNode,e.modalRef,!1),this.data.splice(n,1);else if(this.hideSiblingNodes){var i=r.modals[r.modals.length-1];i.modalRef&&(0,l.ariaHidden)(i.modalRef,!1)}return t}},{key:"isTopModal",value:function(e){return!!this.modals.length&&this.modals[this.modals.length-1]===e}}]),e}();t.default=b},55536(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}}),Object.defineProperty(t,"ModalManager",{enumerable:!0,get:function(){return a.default}});var i=r(n(58228)),a=r(n(2158))},16575(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.isBody=s,t.default=u;var i=r(n(7624)),a=r(n(16143)),o=r(n(62614));function s(e){return e&&"body"===e.tagName.toLowerCase()}function u(e){var t=(0,a.default)(e),n=(0,o.default)(t);if(!(0,i.default)(t)&&!s(e))return e.scrollHeight>e.clientHeight;var r=n.getComputedStyle(t.body),u=parseInt(r.getPropertyValue("margin-left"),10),c=parseInt(r.getPropertyValue("margin-right"),10);return u+t.body.clientWidth+c0?.75*r+8:0;return s.default.createElement("fieldset",(0,a.default)({"aria-hidden":!0,style:(0,a.default)((0,i.default)({},"padding".concat((0,l.capitalize)(p)),8+(c?0:b/2)),f),className:(0,u.default)(t.root,n)},h),s.default.createElement("legend",{className:t.legend,style:{width:c?b:.01}},s.default.createElement("span",{dangerouslySetInnerHTML:{__html:"​"}})))}t.styles=f;var h=(0,c.withStyles)(f,{name:"MuiPrivateNotchedOutline",withTheme:!0})(d);t.default=h},96405(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184));n(55252);var u=r(n(67598)),c=r(n(21142)),l=r(n(78252)),f=function(e){var t="light"===e.palette.type?"rgba(0, 0, 0, 0.23)":"rgba(255, 255, 255, 0.23)";return{root:{position:"relative","& $notchedOutline":{borderColor:t},"&:hover:not($disabled):not($focused):not($error) $notchedOutline":{borderColor:e.palette.text.primary,"@media (hover: none)":{borderColor:t}},"&$focused $notchedOutline":{borderColor:e.palette.primary.main,borderWidth:2},"&$error $notchedOutline":{borderColor:e.palette.error.main},"&$disabled $notchedOutline":{borderColor:e.palette.action.disabled}},focused:{},disabled:{},adornedStart:{paddingLeft:14},adornedEnd:{paddingRight:14},error:{},multiline:{padding:"18.5px 14px",boxSizing:"border-box"},notchedOutline:{},input:{padding:"18.5px 14px"},inputMarginDense:{paddingTop:15,paddingBottom:15},inputMultiline:{padding:0},inputAdornedStart:{paddingLeft:0},inputAdornedEnd:{paddingRight:0}}};function d(e){var t=e.classes,n=e.labelWidth,r=e.notched,l=(0,a.default)(e,["classes","labelWidth","notched"]);return o.default.createElement(u.default,(0,i.default)({renderPrefix:function(e){return o.default.createElement(c.default,{className:t.notchedOutline,labelWidth:n,notched:void 0!==r?r:Boolean(e.startAdornment||e.filled||e.focused)})},classes:(0,i.default)({},t,{root:(0,s.default)(t.root,t.underline),notchedOutline:null})},l))}t.styles=f,u.default.defaultProps={fullWidth:!1,inputComponent:"input",multiline:!1,type:"text"},d.muiName="Input";var h=(0,l.default)(f,{name:"MuiOutlinedInput"})(d);t.default=h},59537(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(96405))},30083(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(59713)),a=r(n(6479)),o=r(n(67154)),s=r(n(67294));r(n(45697));var u=r(n(94184));r(n(42473)),n(55252);var c=r(n(78252)),l=function(e){var t={};return e.shadows.forEach(function(e,n){t["elevation".concat(n)]={boxShadow:e}}),(0,o.default)({root:{backgroundColor:e.palette.background.paper},rounded:{borderRadius:e.shape.borderRadius}},t)};function f(e){var t=e.classes,n=e.className,r=e.component,c=e.square,l=e.elevation,f=(0,a.default)(e,["classes","className","component","square","elevation"]),d=(0,u.default)(t.root,t["elevation".concat(l)],(0,i.default)({},t.rounded,!c),n);return s.default.createElement(r,(0,o.default)({className:d},f))}t.styles=l,f.defaultProps={component:"div",elevation:2,square:!1};var d=(0,c.default)(l,{name:"MuiPaper"})(f);t.default=d},68821(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(30083))},64224(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(73935));r(n(42473));var h=r(n(20296)),p=r(n(96421));n(55252);var b=r(n(16143)),m=r(n(62614)),g=n(98741),v=r(n(78252)),y=r(n(55536)),w=r(n(261)),_=r(n(68821));function E(e,t){var n=0;return"number"==typeof t?n=t:"center"===t?n=e.height/2:"bottom"===t&&(n=e.height),n}function S(e,t){var n=0;return"number"==typeof t?n=t:"center"===t?n=e.width/2:"right"===t&&(n=e.width),n}function k(e){return[e.horizontal,e.vertical].map(function(e){return"number"==typeof e?"".concat(e,"px"):e}).join(" ")}function x(e,t){for(var n=t,r=0;n&&n!==e;)r+=(n=n.parentNode).scrollTop;return r}function T(e){return"function"==typeof e?e():e}var M={paper:{position:"absolute",overflowY:"auto",overflowX:"hidden",minWidth:16,minHeight:16,maxWidth:"calc(100% - 32px)",maxHeight:"calc(100% - 32px)",outline:"none"}};t.styles=M;var O=function(e){function t(){var e;return(0,o.default)(this,t),(e=(0,u.default)(this,(0,c.default)(t).call(this))).handleGetOffsetTop=E,e.handleGetOffsetLeft=S,e.componentWillUnmount=function(){e.handleResize.clear()},e.setPositioningStyles=function(t){var n=e.getPositioningStyle(t);null!==n.top&&(t.style.top=n.top),null!==n.left&&(t.style.left=n.left),t.style.transformOrigin=n.transformOrigin},e.getPositioningStyle=function(t){var n=e.props,r=n.anchorEl,i=n.anchorReference,a=n.marginThreshold,o=e.getContentAnchorOffset(t),s={width:t.offsetWidth,height:t.offsetHeight},u=e.getTransformOrigin(s,o);if("none"===i)return{top:null,left:null,transformOrigin:k(u)};var c=e.getAnchorOffset(o),l=c.top-u.vertical,f=c.left-u.horizontal,d=l+s.height,h=f+s.width,p=(0,m.default)(T(r)),b=p.innerHeight-a,g=p.innerWidth-a;if(lb){var y=d-b;l-=y,u.vertical+=y}if(fg){var _=h-g;f-=_,u.horizontal+=_}return{top:"".concat(l,"px"),left:"".concat(f,"px"),transformOrigin:k(u)}},e.handleEntering=function(t){e.props.onEntering&&e.props.onEntering(t),e.setPositioningStyles(t)},"undefined"!=typeof window&&(e.handleResize=(0,h.default)(function(){e.props.open&&e.setPositioningStyles(e.paperRef)},166)),e}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidMount",value:function(){this.props.action&&this.props.action({updatePosition:this.handleResize})}},{key:"getAnchorOffset",value:function(e){var t=this.props,n=t.anchorEl,r=t.anchorOrigin,i=t.anchorReference,a=t.anchorPosition;if("anchorPosition"===i)return a;var o=(T(n)||(0,b.default)(this.paperRef).body).getBoundingClientRect(),s=0===e?r.vertical:"center";return{top:o.top+this.handleGetOffsetTop(o,s),left:o.left+this.handleGetOffsetLeft(o,r.horizontal)}}},{key:"getContentAnchorOffset",value:function(e){var t=this.props,n=t.getContentAnchorEl,r=t.anchorReference,i=0;if(n&&"anchorEl"===r){var a=n(e);if(a&&e.contains(a)){var o=x(e,a);i=a.offsetTop+a.clientHeight/2-o||0}}return i}},{key:"getTransformOrigin",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n=this.props.transformOrigin;return{vertical:this.handleGetOffsetTop(e,n.vertical)+t,horizontal:this.handleGetOffsetLeft(e,n.horizontal)}}},{key:"render",value:function(){var e=this,t=this.props,n=(t.action,t.anchorEl),r=(t.anchorOrigin,t.anchorPosition,t.anchorReference,t.children),o=t.classes,s=t.container,u=t.elevation,c=(t.getContentAnchorEl,t.marginThreshold,t.ModalClasses),l=t.onEnter,h=t.onEntered,m=(t.onEntering,t.onExit),v=t.onExited,w=t.onExiting,E=t.open,S=t.PaperProps,k=t.role,x=(t.transformOrigin,t.TransitionComponent),M=t.transitionDuration,O=t.TransitionProps,A=void 0===O?{}:O,L=(0,a.default)(t,["action","anchorEl","anchorOrigin","anchorPosition","anchorReference","children","classes","container","elevation","getContentAnchorEl","marginThreshold","ModalClasses","onEnter","onEntered","onEntering","onExit","onExited","onExiting","open","PaperProps","role","transformOrigin","TransitionComponent","transitionDuration","TransitionProps"]),C=M;"auto"!==M||x.muiSupportAuto||(C=void 0);var I=s||(n?(0,b.default)(T(n)).body:void 0);return f.default.createElement(y.default,(0,i.default)({classes:c,container:I,open:E,BackdropProps:{invisible:!0}},L),f.default.createElement(x,(0,i.default)({appear:!0,in:E,onEnter:l,onEntered:h,onExit:m,onExited:v,onExiting:w,role:k,timeout:C},A,{onEntering:(0,g.createChainedFunction)(this.handleEntering,A.onEntering)}),f.default.createElement(_.default,(0,i.default)({className:o.paper,elevation:u,ref:function(t){e.paperRef=d.default.findDOMNode(t)}},S),f.default.createElement(p.default,{target:"window",onResize:this.handleResize}),r)))}}]),t}(f.default.Component);O.defaultProps={anchorReference:"anchorEl",anchorOrigin:{vertical:"top",horizontal:"left"},elevation:8,marginThreshold:16,transformOrigin:{vertical:"top",horizontal:"left"},TransitionComponent:w.default,transitionDuration:"auto"};var A=(0,v.default)(M,{name:"MuiPopover"})(O);t.default=A},50810(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(64224))},24693(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(6479)),a=r(n(67154)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(81506)),d=r(n(67294)),h=r(n(73935));r(n(45697));var p=r(n(28981)),b=r(n(25649));function m(e){if("rtl"!==("undefined"!=typeof window&&document.body.getAttribute("dir")||"ltr"))return e;switch(e){case"bottom-end":return"bottom-start";case"bottom-start":return"bottom-end";case"top-end":return"top-start";case"top-start":return"top-end";default:return e}}function g(e){return"function"==typeof e?e():e}var v=function(e){function t(e){var n;return(0,o.default)(this,t),(n=(0,u.default)(this,(0,c.default)(t).call(this))).handleOpen=function(){var e=n.props,t=e.anchorEl,r=e.modifiers,i=e.open,o=e.placement,s=e.popperOptions,u=void 0===s?{}:s,c=e.disablePortal,l=h.default.findDOMNode((0,f.default)((0,f.default)(n)));l&&t&&i&&(n.popper&&(n.popper.destroy(),n.popper=null),n.popper=new p.default(g(t),l,(0,a.default)({placement:m(o)},u,{modifiers:(0,a.default)({},c?{}:{preventOverflow:{boundariesElement:"window"}},r,u.modifiers),onCreate:n.handlePopperUpdate,onUpdate:n.handlePopperUpdate})))},n.handlePopperUpdate=function(e){e.placement!==n.state.placement&&n.setState({placement:e.placement})},n.handleExited=function(){n.setState({exited:!0}),n.handleClose()},n.handleClose=function(){n.popper&&(n.popper.destroy(),n.popper=null)},n.state={exited:!e.open},n}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidUpdate",value:function(e){e.open===this.props.open||this.props.open||this.props.transition||this.handleClose(),(e.open!==this.props.open||e.anchorEl!==this.props.anchorEl||e.popperOptions!==this.props.popperOptions||e.modifiers!==this.props.modifiers||e.disablePortal!==this.props.disablePortal||e.placement!==this.props.placement)&&this.handleOpen()}},{key:"componentWillUnmount",value:function(){this.handleClose()}},{key:"render",value:function(){var e=this.props,t=(e.anchorEl,e.children),n=e.container,r=e.disablePortal,o=e.keepMounted,s=(e.modifiers,e.open),u=e.placement,c=(e.popperOptions,e.transition),l=(0,i.default)(e,["anchorEl","children","container","disablePortal","keepMounted","modifiers","open","placement","popperOptions","transition"]),f=this.state,h=f.exited,p=f.placement;if(!o&&!s&&(!c||h))return null;var g={placement:p||m(u)};return c&&(g.TransitionProps={in:s,onExited:this.handleExited}),d.default.createElement(b.default,{onRendered:this.handleOpen,disablePortal:r,container:n},d.default.createElement("div",(0,a.default)({role:"tooltip",style:{position:"absolute"}},l),"function"==typeof t?t(g):t))}}],[{key:"getDerivedStateFromProps",value:function(e){return e.open?{exited:!1}:e.transition?null:{exited:!0}}}]),t}(d.default.Component);v.defaultProps={disablePortal:!1,placement:"bottom",transition:!1};var y=v;t.default=y},60111(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(24693))},92261(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(34575)),a=r(n(93913)),o=r(n(78585)),s=r(n(29754)),u=r(n(2205)),c=r(n(67294)),l=r(n(73935));r(n(45697));var f=r(n(16143));function d(e,t){return e="function"==typeof e?e():e,l.default.findDOMNode(e)||t}function h(e){return(0,f.default)(l.default.findDOMNode(e))}n(55252);var p=function(e){function t(){(0,i.default)(this,t);for(var e,n,r=arguments.length,a=Array(r),u=0;u1;n.state.labelWrapped!==e&&n.setState({labelWrapped:e})}},n}return(0,c.default)(t,e),(0,o.default)(t,[{key:"componentDidMount",value:function(){this.checkTextWrap()}},{key:"componentDidUpdate",value:function(e,t){this.state.labelWrapped===t.labelWrapped&&this.checkTextWrap()}},{key:"render",value:function(){var e,t,n=this,r=this.props,a=r.classes,o=r.className,s=r.disabled,u=r.fullWidth,c=r.icon,p=r.indicator,g=r.label,v=(r.onChange,r.selected),y=r.textColor,w=(r.value,(0,i.default)(r,["classes","className","disabled","fullWidth","icon","indicator","label","onChange","selected","textColor","value"]));return void 0!==g&&(e=d.default.createElement("span",{className:a.labelContainer},d.default.createElement("span",{className:(0,h.default)(a.label,(0,l.default)({},a.labelWrapped,this.state.labelWrapped)),ref:function(e){n.labelRef=e}},g))),d.default.createElement(b.default,(0,f.default)({focusRipple:!0,className:(0,h.default)(a.root,a["textColor".concat((0,m.capitalize)(y))],(t={},(0,l.default)(t,a.disabled,s),(0,l.default)(t,a.selected,v),(0,l.default)(t,a.labelIcon,c&&e),(0,l.default)(t,a.fullWidth,u),t),o),role:"tab","aria-selected":v,disabled:s},w,{onClick:this.handleChange}),d.default.createElement("span",{className:a.wrapper},c,e),p)}}]),t}(d.default.Component);v.defaultProps={disabled:!1,textColor:"inherit"};var y=(0,p.default)(g,{name:"MuiTab"})(v);t.default=y},75759(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(70201))},7575(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(94184));n(55252);var h=r(n(78252)),p=r(n(82577)),b=function(e){return{root:{display:"table",fontFamily:e.typography.fontFamily,width:"100%",borderCollapse:"collapse",borderSpacing:0}}};t.styles=b;var m=function(e){function t(){(0,o.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;ai&&n(null,i)}},{key:"render",value:function(){var e,t=this.props,n=t.ActionsComponent,r=t.backIconButtonProps,o=t.classes,s=t.colSpan,u=t.component,c=t.count,l=t.labelDisplayedRows,d=t.labelRowsPerPage,y=t.nextIconButtonProps,w=t.onChangePage,_=t.onChangeRowsPerPage,E=t.page,S=t.rowsPerPage,k=t.rowsPerPageOptions,x=t.SelectProps,T=void 0===x?{}:x,M=(0,a.default)(t,["ActionsComponent","backIconButtonProps","classes","colSpan","component","count","labelDisplayedRows","labelRowsPerPage","nextIconButtonProps","onChangePage","onChangeRowsPerPage","page","rowsPerPage","rowsPerPageOptions","SelectProps"]);(u===m.default||"td"===u)&&(e=s||1e3);var O=T.native?"option":p.default;return f.default.createElement(u,(0,i.default)({className:o.root,colSpan:e},M),f.default.createElement(g.default,{className:o.toolbar},f.default.createElement("div",{className:o.spacer}),k.length>1&&f.default.createElement(v.default,{color:"inherit",variant:"caption",className:o.caption},d),k.length>1&&f.default.createElement(b.default,(0,i.default)({classes:{root:o.selectRoot,select:o.select,icon:o.selectIcon},input:f.default.createElement(h.default,{className:o.input}),value:S,onChange:_},T),k.map(function(e){return f.default.createElement(O,{className:o.menuItem,key:e,value:e},e)})),f.default.createElement(v.default,{color:"inherit",variant:"caption",className:o.caption},l({from:0===c?0:E*S+1,to:Math.min(c,(E+1)*S),count:c,page:E})),f.default.createElement(n,{className:o.actions,backIconButtonProps:r,count:c,nextIconButtonProps:y,onChangePage:w,page:E,rowsPerPage:S})))}}]),t}(f.default.Component);_.defaultProps={ActionsComponent:y.default,component:m.default,labelDisplayedRows:function(e){var t=e.from,n=e.to,r=e.count;return"".concat(t,"-").concat(n," of ").concat(r)},labelRowsPerPage:"Rows per page:",rowsPerPageOptions:[10,25,50,100]};var E=(0,d.default)(w,{name:"MuiTablePagination"})(_);t.default=E},32844(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(86861)),h=r(n(43836)),p=r(n(82313)),b=r(n(81701)),m=f.default.createElement(h.default,null),g=f.default.createElement(d.default,null),v=f.default.createElement(d.default,null),y=f.default.createElement(h.default,null),w=function(e){function t(){(0,o.default)(this,t);for(var e,n,r=arguments.length,i=Array(r),a=0;a=Math.ceil(n/s)-1,color:"inherit"},r),"rtl"===u.direction?v:y))}}]),t}(f.default.Component),_=(0,p.default)()(w);t.default=_},18217(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(71744))},86424(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=r(n(27628)),f=function(e){return{root:{color:"inherit",display:"table-row",height:48,verticalAlign:"middle",outline:"none","&$selected":{backgroundColor:"light"===e.palette.type?"rgba(0, 0, 0, 0.04)":"rgba(255, 255, 255, 0.08)"},"&$hover:hover":{backgroundColor:"light"===e.palette.type?"rgba(0, 0, 0, 0.07)":"rgba(255, 255, 255, 0.14)"}},selected:{},hover:{},head:{height:56},footer:{height:56}}};function d(e){var t=e.classes,n=e.className,r=e.component,c=e.hover,f=e.selected,d=(0,o.default)(e,["classes","className","component","hover","selected"]);return s.default.createElement(l.default.Consumer,null,function(e){var o,l=(0,u.default)(t.root,(o={},(0,a.default)(o,t.head,e&&"head"===e.variant),(0,a.default)(o,t.footer,e&&"footer"===e.variant),(0,a.default)(o,t.hover,c),(0,a.default)(o,t.selected,f),o),n);return s.default.createElement(r,(0,i.default)({className:l},d))})}t.styles=f,d.defaultProps={component:"tr",hover:!1,selected:!1};var h=(0,c.default)(f,{name:"MuiTableRow"})(d);t.default=h},17175(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(86424))},28550(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(34575)),a=r(n(93913)),o=r(n(78585)),s=r(n(29754)),u=r(n(2205)),c=r(n(67294));r(n(45697));var l,f=r(n(96421)),d=r(n(20296)),h={width:90,height:90,position:"absolute",top:-9e3,overflow:"scroll",msOverflowStyle:"scrollbar"},p=function(e){function t(){var e;return(0,i.default)(this,t),(e=(0,o.default)(this,(0,s.default)(t).call(this))).handleRef=function(t){e.nodeRef=t},e.setMeasurements=function(){var t=e.nodeRef;t&&(e.scrollbarHeight=t.offsetHeight-t.clientHeight)},"undefined"!=typeof window&&(e.handleResize=(0,d.default)(function(){var t=e.scrollbarHeight;e.setMeasurements(),t!==e.scrollbarHeight&&e.props.onChange(e.scrollbarHeight)},166)),e}return(0,u.default)(t,e),(0,a.default)(t,[{key:"componentDidMount",value:function(){this.setMeasurements(),this.props.onChange(this.scrollbarHeight)}},{key:"componentWillUnmount",value:function(){this.handleResize.clear()}},{key:"render",value:function(){return c.default.createElement(c.default.Fragment,null,c.default.createElement(f.default,{target:"window",onResize:this.handleResize}),c.default.createElement("div",{style:h,ref:this.handleRef}))}}]),t}(c.default.Component);t.default=p},12417(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184)),u=r(n(78252)),c=n(98741),l=function(e){return{root:{position:"absolute",height:2,bottom:0,width:"100%",transition:e.transitions.create()},colorPrimary:{backgroundColor:e.palette.primary.main},colorSecondary:{backgroundColor:e.palette.secondary.main}}};function f(e){var t=e.classes,n=e.className,r=e.color,u=(0,a.default)(e,["classes","className","color"]);return o.default.createElement("span",(0,i.default)({className:(0,s.default)(t.root,t["color".concat((0,c.capitalize)(r))],n)},u))}t.styles=l;var d=(0,u.default)(l,{name:"MuiPrivateTabIndicator"})(f);t.default=d},69583(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(67294));r(n(45697));var s=r(n(94184)),u=r(n(86861)),c=r(n(43836)),l=r(n(78252)),f=r(n(16070)),d={root:{color:"inherit",width:56,flexShrink:0}};t.styles=d;var h=o.default.createElement(u.default,null),p=o.default.createElement(c.default,null);function b(e){var t=e.classes,n=e.className,r=e.direction,u=e.onClick,c=e.visible,l=(0,a.default)(e,["classes","className","direction","onClick","visible"]),d=(0,s.default)(t.root,n);return c?o.default.createElement(f.default,(0,i.default)({className:d,onClick:u,tabIndex:-1},l),"left"===r?h:p):o.default.createElement("div",{className:d})}b.defaultProps={visible:!0};var m=(0,l.default)(d,{name:"MuiPrivateTabScrollButton"})(b);t.default=m},89172(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(59713)),d=r(n(67294));r(n(45697)),r(n(42473));var h=r(n(94184)),p=r(n(96421)),b=r(n(20296)),m=n(46417);n(55252);var g=r(n(13329)),v=r(n(28550)),y=r(n(78252)),w=r(n(12417)),_=r(n(69583));r(n(346));var E=function(e){return{root:{overflow:"hidden",minHeight:48,WebkitOverflowScrolling:"touch"},flexContainer:{display:"flex"},centered:{justifyContent:"center"},scroller:{position:"relative",display:"inline-block",flex:"1 1 auto",whiteSpace:"nowrap"},fixed:{overflowX:"hidden",width:"100%"},scrollable:{overflowX:"scroll"},scrollButtons:{},scrollButtonsAuto:(0,f.default)({},e.breakpoints.down("xs"),{display:"none"}),indicator:{}}};t.styles=E;var S=function(e){function t(){var e;return(0,o.default)(this,t),(e=(0,u.default)(this,(0,c.default)(t).call(this))).state={indicatorStyle:{},scrollerStyle:{marginBottom:0},showLeftScroll:!1,showRightScroll:!1,mounted:!1},e.getConditionalElements=function(){var t=e.props,n=t.classes,r=t.scrollable,i=t.ScrollButtonComponent,a=t.scrollButtons,o=t.theme,s=t.variant,u={},c="scrollable"===s||r;u.scrollbarSizeListener=c?d.default.createElement(v.default,{onChange:e.handleScrollbarSizeChange}):null;var l=c&&("auto"===a||"on"===a);return u.scrollButtonLeft=l?d.default.createElement(i,{direction:o&&"rtl"===o.direction?"right":"left",onClick:e.handleLeftScrollClick,visible:e.state.showLeftScroll,className:(0,h.default)(n.scrollButtons,(0,f.default)({},n.scrollButtonsAuto,"auto"===a))}):null,u.scrollButtonRight=l?d.default.createElement(i,{direction:o&&"rtl"===o.direction?"left":"right",onClick:e.handleRightScrollClick,visible:e.state.showRightScroll,className:(0,h.default)(n.scrollButtons,(0,f.default)({},n.scrollButtonsAuto,"auto"===a))}):null,u},e.getTabsMeta=function(t,n){if(e.tabsRef){var r,i,a=e.tabsRef.getBoundingClientRect();r={clientWidth:e.tabsRef.clientWidth,scrollLeft:e.tabsRef.scrollLeft,scrollLeftNormalized:(0,m.getNormalizedScrollLeft)(e.tabsRef,n),scrollWidth:e.tabsRef.scrollWidth,left:a.left,right:a.right}}if(e.tabsRef&&!1!==t){var o=e.tabsRef.children[0].children;if(o.length>0){var s=o[e.valueToIndex.get(t)];i=s?s.getBoundingClientRect():null}}return{tabsMeta:r,tabMeta:i}},e.handleLeftScrollClick=function(){e.moveTabsScroll(-e.tabsRef.clientWidth)},e.handleRightScrollClick=function(){e.moveTabsScroll(e.tabsRef.clientWidth)},e.handleScrollbarSizeChange=function(t){e.setState({scrollerStyle:{marginBottom:-t}})},e.moveTabsScroll=function(t){var n=e.props.theme,r="rtl"===n.direction?-1:1,i=e.tabsRef.scrollLeft+t*r,a="rtl"===n.direction&&"reverse"===(0,m.detectScrollType)()?-1:1;e.scroll(a*i)},e.scrollSelectedIntoView=function(){var t=e.props,n=t.theme,r=t.value,i=e.getTabsMeta(r,n.direction),a=i.tabsMeta,o=i.tabMeta;if(o&&a){if(o.lefta.right){var u=a.scrollLeft+(o.right-a.right);e.scroll(u)}}},e.scroll=function(t){(0,g.default)("scrollLeft",e.tabsRef,t)},e.updateScrollButtonState=function(){var t=e.props,n=t.scrollable,r=t.scrollButtons,i=t.theme;if(("scrollable"===t.variant||n)&&"off"!==r){var a=e.tabsRef,o=a.scrollWidth,s=a.clientWidth,u=(0,m.getNormalizedScrollLeft)(e.tabsRef,i.direction),c="rtl"===i.direction?o>s+u:u>0,l="rtl"===i.direction?u>0:o>s+u;(c!==e.state.showLeftScroll||l!==e.state.showRightScroll)&&e.setState({showLeftScroll:c,showRightScroll:l})}},"undefined"!=typeof window&&(e.handleResize=(0,b.default)(function(){e.updateIndicatorState(e.props),e.updateScrollButtonState()},166),e.handleTabsScroll=(0,b.default)(function(){e.updateScrollButtonState()},166)),e}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidMount",value:function(){this.setState({mounted:!0}),this.updateIndicatorState(this.props),this.updateScrollButtonState(),this.props.action&&this.props.action({updateIndicator:this.handleResize})}},{key:"componentDidUpdate",value:function(e,t){this.updateIndicatorState(this.props),this.updateScrollButtonState(),this.state.indicatorStyle!==t.indicatorStyle&&this.scrollSelectedIntoView()}},{key:"componentWillUnmount",value:function(){this.handleResize.clear(),this.handleTabsScroll.clear()}},{key:"updateIndicatorState",value:function(e){var t=e.theme,n=e.value,r=this.getTabsMeta(n,t.direction),i=r.tabsMeta,a=r.tabMeta,o=0;if(a&&i){var s="rtl"===t.direction?i.scrollLeftNormalized+i.clientWidth-i.scrollWidth:i.scrollLeft;o=Math.round(a.left-i.left+s)}var u={left:o,width:a?Math.round(a.width):0};u.left===this.state.indicatorStyle.left&&u.width===this.state.indicatorStyle.width||isNaN(u.left)||isNaN(u.width)||this.setState({indicatorStyle:u})}},{key:"render",value:function(){var e,t=this,n=this.props,r=(n.action,n.centered),o=n.children,s=n.classes,u=n.className,c=n.component,l=n.fullWidth,b=void 0!==l&&l,m=n.indicatorColor,g=n.onChange,v=n.scrollable,y=void 0!==v&&v,_=(n.ScrollButtonComponent,n.scrollButtons,n.TabIndicatorProps),E=void 0===_?{}:_,S=n.textColor,k=(n.theme,n.value),x=n.variant,T=(0,a.default)(n,["action","centered","children","classes","className","component","fullWidth","indicatorColor","onChange","scrollable","ScrollButtonComponent","scrollButtons","TabIndicatorProps","textColor","theme","value","variant"]),M="scrollable"===x||y,O=(0,h.default)(s.root,u),A=(0,h.default)(s.flexContainer,(0,f.default)({},s.centered,r&&!M)),L=(0,h.default)(s.scroller,(e={},(0,f.default)(e,s.fixed,!M),(0,f.default)(e,s.scrollable,M),e)),C=d.default.createElement(w.default,(0,i.default)({className:s.indicator,color:m},E,{style:(0,i.default)({},this.state.indicatorStyle,E.style)}));this.valueToIndex=new Map;var I=0,D=d.default.Children.map(o,function(e){if(!d.default.isValidElement(e))return null;var n=void 0===e.props.value?I:e.props.value;t.valueToIndex.set(n,I);var r=n===k;return I+=1,d.default.cloneElement(e,{fullWidth:"fullWidth"===x||b,indicator:r&&!t.state.mounted&&C,selected:r,onChange:g,textColor:S,value:n})}),N=this.getConditionalElements();return d.default.createElement(c,(0,i.default)({className:O},T),d.default.createElement(p.default,{target:"window",onResize:this.handleResize}),N.scrollbarSizeListener,d.default.createElement("div",{className:s.flexContainer},N.scrollButtonLeft,d.default.createElement("div",{className:L,style:this.state.scrollerStyle,ref:function(e){t.tabsRef=e},role:"tablist",onScroll:this.handleTabsScroll},d.default.createElement("div",{className:A},D),this.state.mounted&&C),N.scrollButtonRight))}}]),t}(d.default.Component);S.defaultProps={centered:!1,component:"div",indicatorColor:"secondary",ScrollButtonComponent:_.default,scrollButtons:"auto",textColor:"inherit",variant:"standard"};var k=(0,y.default)(E,{name:"MuiTabs",withTheme:!0})(S);t.default=k},12794(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(89172))},78592(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294)),d=r(n(73935));r(n(42473)),r(n(45697));var h=r(n(54846)),p=r(n(1402)),b=r(n(59537)),m=r(n(23153)),g=r(n(85461)),v=r(n(76023)),y=r(n(11970)),w={standard:h.default,filled:p.default,outlined:b.default},_=function(e){function t(e){var n;return(0,o.default)(this,t),(n=(0,u.default)(this,(0,c.default)(t).call(this,e))).labelRef=f.default.createRef(),n}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidMount",value:function(){"outlined"===this.props.variant&&(this.labelNode=d.default.findDOMNode(this.labelRef.current),this.forceUpdate())}},{key:"render",value:function(){var e=this.props,t=e.autoComplete,n=e.autoFocus,r=e.children,o=e.className,s=e.defaultValue,u=e.error,c=e.FormHelperTextProps,l=e.fullWidth,d=e.helperText,h=e.id,p=e.InputLabelProps,b=e.inputProps,_=e.InputProps,E=e.inputRef,S=e.label,k=e.multiline,x=e.name,T=e.onBlur,M=e.onChange,O=e.onFocus,A=e.placeholder,L=e.required,C=e.rows,I=e.rowsMax,D=e.select,N=e.SelectProps,P=e.type,R=e.value,j=e.variant,F=(0,a.default)(e,["autoComplete","autoFocus","children","className","defaultValue","error","FormHelperTextProps","fullWidth","helperText","id","InputLabelProps","inputProps","InputProps","inputRef","label","multiline","name","onBlur","onChange","onFocus","placeholder","required","rows","rowsMax","select","SelectProps","type","value","variant"]),Y={};"outlined"===j&&(p&&void 0!==p.shrink&&(Y.notched=p.shrink),Y.labelWidth=this.labelNode&&this.labelNode.offsetWidth||0);var B=d&&h?"".concat(h,"-helper-text"):void 0,U=w[j],H=f.default.createElement(U,(0,i.default)({"aria-describedby":B,autoComplete:t,autoFocus:n,defaultValue:s,fullWidth:l,multiline:k,name:x,rows:C,rowsMax:I,type:P,value:R,id:h,inputRef:E,onBlur:T,onChange:M,onFocus:O,placeholder:A,inputProps:b},Y,_));return f.default.createElement(g.default,(0,i.default)({className:o,error:u,fullWidth:l,required:L,variant:j},F),S&&f.default.createElement(m.default,(0,i.default)({htmlFor:h,ref:this.labelRef},p),S),D?f.default.createElement(y.default,(0,i.default)({"aria-describedby":B,value:R,input:H},N),r):H,d&&f.default.createElement(v.default,(0,i.default)({id:B},c),d))}}]),t}(f.default.Component);_.defaultProps={required:!1,select:!1,variant:"standard"};var E=_;t.default=E},60520(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(78592))},48596(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184)),c=r(n(78252)),l=function(e){return{root:{position:"relative",display:"flex",alignItems:"center"},gutters:e.mixins.gutters(),regular:e.mixins.toolbar,dense:{minHeight:48}}};function f(e){var t=e.children,n=e.classes,r=e.className,c=e.disableGutters,l=e.variant,f=(0,o.default)(e,["children","classes","className","disableGutters","variant"]),d=(0,u.default)(n.root,n[l],(0,a.default)({},n.gutters,!c),r);return s.default.createElement("div",(0,i.default)({className:d},f),t)}t.styles=l,f.defaultProps={disableGutters:!1,variant:"regular"};var d=(0,c.default)(l,{name:"MuiToolbar"})(f);t.default=d},28902(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(48596))},83065(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(59713)),d=r(n(67294));r(n(45697)),r(n(42473));var h=r(n(94184));n(55252);var p=r(n(39737)),b=r(n(78252)),m=n(98741),g=r(n(261)),v=r(n(60111)),y=function(e){return{popper:{zIndex:e.zIndex.tooltip,opacity:.9,pointerEvents:"none"},popperInteractive:{pointerEvents:"auto"},tooltip:{backgroundColor:e.palette.grey[700],borderRadius:e.shape.borderRadius,color:e.palette.common.white,fontFamily:e.typography.fontFamily,padding:"4px 8px",fontSize:e.typography.pxToRem(10),lineHeight:"".concat(e.typography.round(1.4),"em"),maxWidth:300},touch:{padding:"8px 16px",fontSize:e.typography.pxToRem(14),lineHeight:"".concat(e.typography.round(16/14),"em")},tooltipPlacementLeft:(0,f.default)({transformOrigin:"right center",margin:"0 24px "},e.breakpoints.up("sm"),{margin:"0 14px"}),tooltipPlacementRight:(0,f.default)({transformOrigin:"left center",margin:"0 24px"},e.breakpoints.up("sm"),{margin:"0 14px"}),tooltipPlacementTop:(0,f.default)({transformOrigin:"center bottom",margin:"24px 0"},e.breakpoints.up("sm"),{margin:"14px 0"}),tooltipPlacementBottom:(0,f.default)({transformOrigin:"center top",margin:"24px 0"},e.breakpoints.up("sm"),{margin:"14px 0"})}};t.styles=y;var w=function(e){function t(e){var n;return(0,o.default)(this,t),(n=(0,u.default)(this,(0,c.default)(t).call(this))).ignoreNonTouchEvents=!1,n.onRootRef=function(e){n.childrenRef=e},n.handleFocus=function(e){n.childrenRef||(n.childrenRef=e.currentTarget),n.handleEnter(e);var t=n.props.children.props;t.onFocus&&t.onFocus(e)},n.handleEnter=function(e){var t=n.props,r=t.children,i=t.enterDelay,a=r.props;"mouseover"===e.type&&a.onMouseOver&&a.onMouseOver(e),(!n.ignoreNonTouchEvents||"touchstart"===e.type)&&(n.childrenRef.setAttribute("title",""),clearTimeout(n.enterTimer),clearTimeout(n.leaveTimer),i?(e.persist(),n.enterTimer=setTimeout(function(){n.handleOpen(e)},i)):n.handleOpen(e))},n.handleOpen=function(e){n.isControlled||n.state.open||n.setState({open:!0}),n.props.onOpen&&n.props.onOpen(e)},n.handleLeave=function(e){var t=n.props,r=t.children,i=t.leaveDelay,a=r.props;"blur"===e.type&&a.onBlur&&a.onBlur(e),"mouseleave"===e.type&&a.onMouseLeave&&a.onMouseLeave(e),clearTimeout(n.enterTimer),clearTimeout(n.leaveTimer),i?(e.persist(),n.leaveTimer=setTimeout(function(){n.handleClose(e)},i)):n.handleClose(e)},n.handleClose=function(e){n.isControlled||n.setState({open:!1}),n.props.onClose&&n.props.onClose(e),clearTimeout(n.closeTimer),n.closeTimer=setTimeout(function(){n.ignoreNonTouchEvents=!1},n.props.theme.transitions.duration.shortest)},n.handleTouchStart=function(e){n.ignoreNonTouchEvents=!0;var t=n.props,r=t.children,i=t.enterTouchDelay;r.props.onTouchStart&&r.props.onTouchStart(e),clearTimeout(n.leaveTimer),clearTimeout(n.closeTimer),clearTimeout(n.touchTimer),e.persist(),n.touchTimer=setTimeout(function(){n.handleEnter(e)},i)},n.handleTouchEnd=function(e){var t=n.props,r=t.children,i=t.leaveTouchDelay;r.props.onTouchEnd&&r.props.onTouchEnd(e),clearTimeout(n.touchTimer),clearTimeout(n.leaveTimer),e.persist(),n.leaveTimer=setTimeout(function(){n.handleClose(e)},i)},n.isControlled=null!=e.open,n.state={open:null},n.isControlled||(n.state.open=!1),n}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidMount",value:function(){this.defaultId="mui-tooltip-".concat(Math.round(1e5*Math.random())),this.props.open&&this.forceUpdate()}},{key:"componentWillUnmount",value:function(){clearTimeout(this.closeTimer),clearTimeout(this.enterTimer),clearTimeout(this.focusTimer),clearTimeout(this.leaveTimer),clearTimeout(this.touchTimer)}},{key:"render",value:function(){var e=this,t=this.props,n=t.children,r=t.classes,o=t.disableFocusListener,s=t.disableHoverListener,u=t.disableTouchListener,c=(t.enterDelay,t.enterTouchDelay,t.id),l=t.interactive,b=(t.leaveDelay,t.leaveTouchDelay,t.onClose,t.onOpen,t.open),g=t.placement,y=t.PopperProps,w=t.theme,_=t.title,E=t.TransitionComponent,S=t.TransitionProps,k=(0,a.default)(t,["children","classes","disableFocusListener","disableHoverListener","disableTouchListener","enterDelay","enterTouchDelay","id","interactive","leaveDelay","leaveTouchDelay","onClose","onOpen","open","placement","PopperProps","theme","title","TransitionComponent","TransitionProps"]),x=this.isControlled?b:this.state.open;""===_&&(x=!1);var T=!x&&!s,M=(0,i.default)({"aria-describedby":x?c||this.defaultId:null,title:T&&"string"==typeof _?_:null},k,n.props,{className:(0,h.default)(k.className,n.props.className)});u||(M.onTouchStart=this.handleTouchStart,M.onTouchEnd=this.handleTouchEnd),s||(M.onMouseOver=this.handleEnter,M.onMouseLeave=this.handleLeave),o||(M.onFocus=this.handleFocus,M.onBlur=this.handleLeave);var O=l?{onMouseOver:M.onMouseOver,onMouseLeave:M.onMouseLeave,onFocus:M.onFocus,onBlur:M.onBlur}:{};return d.default.createElement(d.default.Fragment,null,d.default.createElement(p.default,{rootRef:this.onRootRef},d.default.cloneElement(n,M)),d.default.createElement(v.default,(0,i.default)({className:(0,h.default)(r.popper,(0,f.default)({},r.popperInteractive,l)),placement:g,anchorEl:this.childrenRef,open:x,id:M["aria-describedby"],transition:!0},O,y),function(t){var n=t.placement,a=t.TransitionProps;return d.default.createElement(E,(0,i.default)({timeout:w.transitions.duration.shorter},a,S),d.default.createElement("div",{className:(0,h.default)(r.tooltip,(0,f.default)({},r.touch,e.ignoreNonTouchEvents),r["tooltipPlacement".concat((0,m.capitalize)(n.split("-")[0]))])},_))}))}}]),t}(d.default.Component);w.defaultProps={disableFocusListener:!1,disableHoverListener:!1,disableTouchListener:!1,enterDelay:0,enterTouchDelay:1e3,interactive:!1,leaveDelay:0,leaveTouchDelay:1500,placement:"bottom",TransitionComponent:g.default};var _=(0,b.default)(y,{name:"MuiTooltip",withTheme:!0})(w);t.default=_},31657(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"Z",{enumerable:!0,get:function(){return a.default}});var a=i(n(83065))},49476(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(67294));r(n(45697));var u=r(n(94184));n(55252);var c=r(n(78252)),l=n(98741),f=function(e){return{root:{display:"block",margin:0},display4:e.typography.display4,display3:e.typography.display3,display2:e.typography.display2,display1:e.typography.display1,headline:e.typography.headline,title:e.typography.title,subheading:e.typography.subheading,body2:e.typography.body2,body1:e.typography.body1,caption:e.typography.caption,button:e.typography.button,h1:e.typography.h1,h2:e.typography.h2,h3:e.typography.h3,h4:e.typography.h4,h5:e.typography.h5,h6:e.typography.h6,subtitle1:e.typography.subtitle1,subtitle2:e.typography.subtitle2,overline:e.typography.overline,srOnly:{position:"absolute",height:1,width:1,overflow:"hidden"},alignLeft:{textAlign:"left"},alignCenter:{textAlign:"center"},alignRight:{textAlign:"right"},alignJustify:{textAlign:"justify"},noWrap:{overflow:"hidden",textOverflow:"ellipsis",whiteSpace:"nowrap"},gutterBottom:{marginBottom:"0.35em"},paragraph:{marginBottom:16},colorInherit:{color:"inherit"},colorPrimary:{color:e.palette.primary.main},colorSecondary:{color:e.palette.secondary.main},colorTextPrimary:{color:e.palette.text.primary},colorTextSecondary:{color:e.palette.text.secondary},colorError:{color:e.palette.error.main},inline:{display:"inline"}}};t.styles=f;var d={display4:"h1",display3:"h2",display2:"h3",display1:"h4",headline:"h5",title:"h6",subheading:"subtitle1"};function h(e,t){var n=e.typography,r=t;return r||(r=n.useNextVariants?"body2":"body1"),n.useNextVariants&&(r=d[r]||r),r}var p={h1:"h1",h2:"h2",h3:"h3",h4:"h4",h5:"h5",h6:"h6",subtitle1:"h6",subtitle2:"h6",body1:"p",body2:"p",display4:"h1",display3:"h1",display2:"h1",display1:"h1",headline:"h1",title:"h2",subheading:"h3"};function b(e){var t,n=e.align,r=e.classes,c=e.className,f=e.color,d=e.component,b=e.gutterBottom,m=e.headlineMapping,g=e.inline,v=(e.internalDeprecatedVariant,e.noWrap),y=e.paragraph,w=e.theme,_=e.variant,E=(0,o.default)(e,["align","classes","className","color","component","gutterBottom","headlineMapping","inline","internalDeprecatedVariant","noWrap","paragraph","theme","variant"]),S=h(w,_),k=(0,u.default)(r.root,(t={},(0,a.default)(t,r[S],"inherit"!==S),(0,a.default)(t,r["color".concat((0,l.capitalize)(f))],"default"!==f),(0,a.default)(t,r.noWrap,v),(0,a.default)(t,r.gutterBottom,b),(0,a.default)(t,r.paragraph,y),(0,a.default)(t,r["align".concat((0,l.capitalize)(n))],"inherit"!==n),(0,a.default)(t,r.inline,g),t),c),x=d||(y?"p":m[S]||p[S])||"span";return s.default.createElement(x,(0,i.default)({className:k},E))}b.defaultProps={align:"inherit",color:"default",gutterBottom:!1,headlineMapping:p,inline:!1,noWrap:!1,paragraph:!1};var m=(0,c.default)(f,{name:"MuiTypography",withTheme:!0})(b);t.default=m},71426(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return i.default}});var i=r(n(49476))},8070(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fff8e1",100:"#ffecb3",200:"#ffe082",300:"#ffd54f",400:"#ffca28",500:"#ffc107",600:"#ffb300",700:"#ffa000",800:"#ff8f00",900:"#ff6f00",A100:"#ffe57f",A200:"#ffd740",A400:"#ffc400",A700:"#ffab00"};t.default=n},63259(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e3f2fd",100:"#bbdefb",200:"#90caf9",300:"#64b5f6",400:"#42a5f5",500:"#2196f3",600:"#1e88e5",700:"#1976d2",800:"#1565c0",900:"#0d47a1",A100:"#82b1ff",A200:"#448aff",A400:"#2979ff",A700:"#2962ff"};t.default=n},38236(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#eceff1",100:"#cfd8dc",200:"#b0bec5",300:"#90a4ae",400:"#78909c",500:"#607d8b",600:"#546e7a",700:"#455a64",800:"#37474f",900:"#263238",A100:"#cfd8dc",A200:"#b0bec5",A400:"#78909c",A700:"#455a64"};t.default=n},60169(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#efebe9",100:"#d7ccc8",200:"#bcaaa4",300:"#a1887f",400:"#8d6e63",500:"#795548",600:"#6d4c41",700:"#5d4037",800:"#4e342e",900:"#3e2723",A100:"#d7ccc8",A200:"#bcaaa4",A400:"#8d6e63",A700:"#5d4037"};t.default=n},515(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={black:"#000",white:"#fff"};t.default=n},57646(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e0f7fa",100:"#b2ebf2",200:"#80deea",300:"#4dd0e1",400:"#26c6da",500:"#00bcd4",600:"#00acc1",700:"#0097a7",800:"#00838f",900:"#006064",A100:"#84ffff",A200:"#18ffff",A400:"#00e5ff",A700:"#00b8d4"};t.default=n},50173(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fbe9e7",100:"#ffccbc",200:"#ffab91",300:"#ff8a65",400:"#ff7043",500:"#ff5722",600:"#f4511e",700:"#e64a19",800:"#d84315",900:"#bf360c",A100:"#ff9e80",A200:"#ff6e40",A400:"#ff3d00",A700:"#dd2c00"};t.default=n},45018(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#ede7f6",100:"#d1c4e9",200:"#b39ddb",300:"#9575cd",400:"#7e57c2",500:"#673ab7",600:"#5e35b1",700:"#512da8",800:"#4527a0",900:"#311b92",A100:"#b388ff",A200:"#7c4dff",A400:"#651fff",A700:"#6200ea"};t.default=n},47559(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e8f5e9",100:"#c8e6c9",200:"#a5d6a7",300:"#81c784",400:"#66bb6a",500:"#4caf50",600:"#43a047",700:"#388e3c",800:"#2e7d32",900:"#1b5e20",A100:"#b9f6ca",A200:"#69f0ae",A400:"#00e676",A700:"#00c853"};t.default=n},70167(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fafafa",100:"#f5f5f5",200:"#eeeeee",300:"#e0e0e0",400:"#bdbdbd",500:"#9e9e9e",600:"#757575",700:"#616161",800:"#424242",900:"#212121",A100:"#d5d5d5",A200:"#aaaaaa",A400:"#303030",A700:"#616161"};t.default=n},19350(e,t,n){"use strict";var r,i=n(95318);r={value:!0},Object.defineProperty(t,"y0",{enumerable:!0,get:function(){return a.default}}),r={enumerable:!0,get:function(){return o.default}},r={enumerable:!0,get:function(){return s.default}},r={enumerable:!0,get:function(){return u.default}},r={enumerable:!0,get:function(){return c.default}},r={enumerable:!0,get:function(){return l.default}},r={enumerable:!0,get:function(){return f.default}},r={enumerable:!0,get:function(){return d.default}},r={enumerable:!0,get:function(){return h.default}},r={enumerable:!0,get:function(){return p.default}},Object.defineProperty(t,"ek",{enumerable:!0,get:function(){return b.default}}),r={enumerable:!0,get:function(){return m.default}},r={enumerable:!0,get:function(){return g.default}},r={enumerable:!0,get:function(){return v.default}},r={enumerable:!0,get:function(){return y.default}},r={enumerable:!0,get:function(){return w.default}},r={enumerable:!0,get:function(){return _.default}},r={enumerable:!0,get:function(){return E.default}},Object.defineProperty(t,"BA",{enumerable:!0,get:function(){return S.default}}),r={enumerable:!0,get:function(){return k.default}};var a=i(n(515)),o=i(n(83165)),s=i(n(124)),u=i(n(18118)),c=i(n(45018)),l=i(n(78768)),f=i(n(63259)),d=i(n(4923)),h=i(n(57646)),p=i(n(91605)),b=i(n(47559)),m=i(n(40192)),g=i(n(98567)),v=i(n(74578)),y=i(n(8070)),w=i(n(36594)),_=i(n(50173)),E=i(n(60169)),S=i(n(70167)),k=i(n(38236))},78768(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e8eaf6",100:"#c5cae9",200:"#9fa8da",300:"#7986cb",400:"#5c6bc0",500:"#3f51b5",600:"#3949ab",700:"#303f9f",800:"#283593",900:"#1a237e",A100:"#8c9eff",A200:"#536dfe",A400:"#3d5afe",A700:"#304ffe"};t.default=n},4923(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e1f5fe",100:"#b3e5fc",200:"#81d4fa",300:"#4fc3f7",400:"#29b6f6",500:"#03a9f4",600:"#039be5",700:"#0288d1",800:"#0277bd",900:"#01579b",A100:"#80d8ff",A200:"#40c4ff",A400:"#00b0ff",A700:"#0091ea"};t.default=n},40192(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#f1f8e9",100:"#dcedc8",200:"#c5e1a5",300:"#aed581",400:"#9ccc65",500:"#8bc34a",600:"#7cb342",700:"#689f38",800:"#558b2f",900:"#33691e",A100:"#ccff90",A200:"#b2ff59",A400:"#76ff03",A700:"#64dd17"};t.default=n},98567(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#f9fbe7",100:"#f0f4c3",200:"#e6ee9c",300:"#dce775",400:"#d4e157",500:"#cddc39",600:"#c0ca33",700:"#afb42b",800:"#9e9d24",900:"#827717",A100:"#f4ff81",A200:"#eeff41",A400:"#c6ff00",A700:"#aeea00"};t.default=n},36594(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fff3e0",100:"#ffe0b2",200:"#ffcc80",300:"#ffb74d",400:"#ffa726",500:"#ff9800",600:"#fb8c00",700:"#f57c00",800:"#ef6c00",900:"#e65100",A100:"#ffd180",A200:"#ffab40",A400:"#ff9100",A700:"#ff6d00"};t.default=n},124(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fce4ec",100:"#f8bbd0",200:"#f48fb1",300:"#f06292",400:"#ec407a",500:"#e91e63",600:"#d81b60",700:"#c2185b",800:"#ad1457",900:"#880e4f",A100:"#ff80ab",A200:"#ff4081",A400:"#f50057",A700:"#c51162"};t.default=n},18118(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#f3e5f5",100:"#e1bee7",200:"#ce93d8",300:"#ba68c8",400:"#ab47bc",500:"#9c27b0",600:"#8e24aa",700:"#7b1fa2",800:"#6a1b9a",900:"#4a148c",A100:"#ea80fc",A200:"#e040fb",A400:"#d500f9",A700:"#aa00ff"};t.default=n},83165(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#ffebee",100:"#ffcdd2",200:"#ef9a9a",300:"#e57373",400:"#ef5350",500:"#f44336",600:"#e53935",700:"#d32f2f",800:"#c62828",900:"#b71c1c",A100:"#ff8a80",A200:"#ff5252",A400:"#ff1744",A700:"#d50000"};t.default=n},91605(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#e0f2f1",100:"#b2dfdb",200:"#80cbc4",300:"#4db6ac",400:"#26a69a",500:"#009688",600:"#00897b",700:"#00796b",800:"#00695c",900:"#004d40",A100:"#a7ffeb",A200:"#64ffda",A400:"#1de9b6",A700:"#00bfa5"};t.default=n},74578(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={50:"#fffde7",100:"#fff9c4",200:"#fff59d",300:"#fff176",400:"#ffee58",500:"#ffeb3b",600:"#fdd835",700:"#fbc02d",800:"#f9a825",900:"#f57f17",A100:"#ffff8d",A200:"#ffff00",A400:"#ffea00",A700:"#ffd600"};t.default=n},85609(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.styles=void 0;var i=r(n(67154)),a=r(n(59713)),o=r(n(6479)),s=r(n(34575)),u=r(n(93913)),c=r(n(78585)),l=r(n(29754)),f=r(n(2205)),d=r(n(67294));r(n(45697));var h=r(n(94184)),p=r(n(52598)),b=r(n(78252)),m=r(n(81701)),g={root:{display:"inline-flex",alignItems:"center",transition:"none","&:hover":{backgroundColor:"transparent"}},checked:{},disabled:{},input:{cursor:"inherit",position:"absolute",opacity:0,width:"100%",height:"100%",top:0,left:0,margin:0,padding:0}};t.styles=g;var v=function(e){function t(e){var n;return(0,s.default)(this,t),(n=(0,c.default)(this,(0,l.default)(t).call(this))).handleFocus=function(e){n.props.onFocus&&n.props.onFocus(e);var t=n.props.muiFormControl;t&&t.onFocus&&t.onFocus(e)},n.handleBlur=function(e){n.props.onBlur&&n.props.onBlur(e);var t=n.props.muiFormControl;t&&t.onBlur&&t.onBlur(e)},n.handleInputChange=function(e){var t=e.target.checked;n.isControlled||n.setState({checked:t}),n.props.onChange&&n.props.onChange(e,t)},n.isControlled=null!=e.checked,n.state={},n.isControlled||(n.state.checked=void 0!==e.defaultChecked&&e.defaultChecked),n}return(0,f.default)(t,e),(0,u.default)(t,[{key:"render",value:function(){var e,t=this.props,n=t.autoFocus,r=t.checked,s=t.checkedIcon,u=t.classes,c=t.className,l=t.defaultChecked,f=t.disabled,p=t.icon,b=t.id,g=t.inputProps,v=t.inputRef,y=t.muiFormControl,w=t.name,_=(t.onBlur,t.onChange,t.onFocus,t.readOnly),E=t.required,S=t.tabIndex,k=t.type,x=t.value,T=(0,o.default)(t,["autoFocus","checked","checkedIcon","classes","className","defaultChecked","disabled","icon","id","inputProps","inputRef","muiFormControl","name","onBlur","onChange","onFocus","readOnly","required","tabIndex","type","value"]),M=f;y&&void 0===M&&(M=y.disabled);var O=this.isControlled?r:this.state.checked,A="checkbox"===k||"radio"===k;return d.default.createElement(m.default,(0,i.default)({component:"span",className:(0,h.default)(u.root,(e={},(0,a.default)(e,u.checked,O),(0,a.default)(e,u.disabled,M),e),c),disabled:M,tabIndex:null,role:void 0,onFocus:this.handleFocus,onBlur:this.handleBlur},T),O?s:p,d.default.createElement("input",(0,i.default)({autoFocus:n,checked:r,defaultChecked:l,className:u.input,disabled:M,id:A&&b,name:w,onChange:this.handleInputChange,readOnly:_,ref:v,required:E,tabIndex:S,type:k,value:x},g)))}}]),t}(d.default.Component),y=(0,b.default)(g,{name:"MuiPrivateSwitchBase"})((0,p.default)(v));t.default=y},13329(e,t){"use strict";function n(e){return(1+Math.sin(Math.PI*e-Math.PI/2))/2}function r(e,t,r){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},a=arguments.length>4&&void 0!==arguments[4]?arguments[4]:function(){},o=i.ease,s=void 0===o?n:o,u=i.duration,c=void 0===u?300:u,l=null,f=t[e],d=!1,h=function(){d=!0},p=function n(i){if(d){a(Error("Animation cancelled"));return}null===l&&(l=i);var o=Math.min(1,(i-l)/c);if(t[e]=s(o)*(r-f)+f,o>=1){requestAnimationFrame(function(){a(null)});return}requestAnimationFrame(n)};return f===r?(a(Error("Element already at target position")),h):(requestAnimationFrame(p),h)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r;t.default=i},74622(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M7 10l5 5 5-5z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},99781(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M12 2C6.47 2 2 6.47 2 12s4.47 10 10 10 10-4.47 10-10S17.53 2 12 2zm5 13.59L15.59 17 12 13.41 8.41 17 7 15.59 10.59 12 7 8.41 8.41 7 12 10.59 15.59 7 17 8.41 13.41 12 17 15.59z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},41549(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M19 3H5c-1.11 0-2 .9-2 2v14c0 1.1.89 2 2 2h14c1.11 0 2-.9 2-2V5c0-1.1-.89-2-2-2zm-9 14l-5-5 1.41-1.41L10 14.17l7.59-7.59L19 8l-9 9z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},42159(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M19 5v14H5V5h14m0-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},61486(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-2 10H7v-2h10v2z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},86861(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M15.41 16.09l-4.58-4.59 4.58-4.59L14 5.5l-6 6 6 6z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},43836(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67294)),a=r(n(46949)),o=r(n(40577)),s=i.default.createElement("path",{d:"M8.59 16.34l4.58-4.59-4.58-4.59L10 5.75l6 6-6 6z"}),u=function(e){return i.default.createElement(o.default,e,s)};(u=(0,a.default)(u)).muiName="SvgIcon";var c=u;t.default=c},93078(e,t,n){"use strict";/*! + * is-plain-object + * + * Copyright (c) 2014-2017, Jon Schlinkert. + * Released under the MIT License. + */ var r=n(47798);function i(e){return!0===r(e)&&"[object Object]"===Object.prototype.toString.call(e)}e.exports=function(e){var t,n;return!1!==i(e)&&"function"==typeof(t=e.constructor)&&!1!==i(n=t.prototype)&&!1!==n.hasOwnProperty("isPrototypeOf")}},72366(e,t,n){"use strict";var r=n(20862),i=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.MuiThemeProviderOld=void 0;var a=i(n(67154)),o=i(n(59713)),s=i(n(34575)),u=i(n(93913)),c=i(n(78585)),l=i(n(29754)),f=i(n(2205)),d=i(n(67294)),h=i(n(45697));i(n(42473));var p=i(n(43890)),b=n(55252),m=r(n(51067)),g=function(e){function t(e,n){var r;return(0,s.default)(this,t),(r=(0,c.default)(this,(0,l.default)(t).call(this))).broadcast=(0,p.default)(),r.outerTheme=m.default.initial(n),r.broadcast.setState(r.mergeOuterLocalTheme(e.theme)),r}return(0,f.default)(t,e),(0,u.default)(t,[{key:"getChildContext",value:function(){var e,t=this.props,n=t.disableStylesGeneration,r=t.sheetsCache,i=t.sheetsManager,a=this.context.muiThemeProviderOptions||{};return void 0!==n&&(a.disableStylesGeneration=n),void 0!==r&&(a.sheetsCache=r),void 0!==i&&(a.sheetsManager=i),e={},(0,o.default)(e,m.CHANNEL,this.broadcast),(0,o.default)(e,"muiThemeProviderOptions",a),e}},{key:"componentDidMount",value:function(){var e=this;this.unsubscribeId=m.default.subscribe(this.context,function(t){e.outerTheme=t,e.broadcast.setState(e.mergeOuterLocalTheme(e.props.theme))})}},{key:"componentDidUpdate",value:function(e){this.props.theme!==e.theme&&this.broadcast.setState(this.mergeOuterLocalTheme(this.props.theme))}},{key:"componentWillUnmount",value:function(){null!==this.unsubscribeId&&m.default.unsubscribe(this.context,this.unsubscribeId)}},{key:"mergeOuterLocalTheme",value:function(e){return"function"==typeof e?e(this.outerTheme):this.outerTheme?(0,a.default)({},this.outerTheme,e):e}},{key:"render",value:function(){return this.props.children}}]),t}(d.default.Component);t.MuiThemeProviderOld=g,g.childContextTypes=(0,a.default)({},m.default.contextTypes,{muiThemeProviderOptions:h.default.object}),g.contextTypes=(0,a.default)({},m.default.contextTypes,{muiThemeProviderOptions:h.default.object}),b.ponyfillGlobal.__MUI_STYLES__||(b.ponyfillGlobal.__MUI_STYLES__={}),b.ponyfillGlobal.__MUI_STYLES__.MuiThemeProvider||(b.ponyfillGlobal.__MUI_STYLES__.MuiThemeProvider=g);var v=b.ponyfillGlobal.__MUI_STYLES__.MuiThemeProvider;t.default=v},59114(e,t,n){"use strict";var r=n(95318);function i(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1;return en?n:e}function a(e){e=e.substr(1);var t=RegExp(".{1,".concat(e.length/3,"}"),"g"),n=e.match(t);return n&&1===n[0].length&&(n=n.map(function(e){return e+e})),n?"rgb(".concat(n.map(function(e){return parseInt(e,16)}).join(", "),")"):""}function o(e){if(0===e.indexOf("#"))return e;function t(e){var t=e.toString(16);return 1===t.length?"0".concat(t):t}var n=s(e).values;return n=n.map(function(e){return t(e)}),"#".concat(n.join(""))}function s(e){if("#"===e.charAt(0))return s(a(e));var t=e.indexOf("("),n=e.substring(0,t),r=e.substring(t+1,e.length-1).split(",");return r=r.map(function(e){return parseFloat(e)}),{type:n,values:r}}function u(e){var t=e.type,n=e.values;return -1!==t.indexOf("rgb")&&(n=n.map(function(e,t){return t<3?parseInt(e,10):e})),-1!==t.indexOf("hsl")&&(n[1]="".concat(n[1],"%"),n[2]="".concat(n[2],"%")),"".concat(e.type,"(").concat(n.join(", "),")")}function c(e,t){var n=l(e),r=l(t);return(Math.max(n,r)+.05)/(Math.min(n,r)+.05)}function l(e){var t=s(e);if(-1!==t.type.indexOf("rgb")){var n=t.values.map(function(e){return(e/=255)<=.03928?e/12.92:Math.pow((e+.055)/1.055,2.4)});return Number((.2126*n[0]+.7152*n[1]+.0722*n[2]).toFixed(3))}return t.values[2]/100}function f(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:.15;return l(e)>.5?h(e,t):p(e,t)}function d(e,t){return e?(e=s(e),t=i(t),("rgb"===e.type||"hsl"===e.type)&&(e.type+="a"),e.values[3]=t,u(e)):e}function h(e,t){if(!e)return e;if(e=s(e),t=i(t),-1!==e.type.indexOf("hsl"))e.values[2]*=1-t;else if(-1!==e.type.indexOf("rgb"))for(var n=0;n<3;n+=1)e.values[n]*=1-t;return u(e)}function p(e,t){if(!e)return e;if(e=s(e),t=i(t),-1!==e.type.indexOf("hsl"))e.values[2]+=(100-e.values[2])*t;else if(-1!==e.type.indexOf("rgb"))for(var n=0;n<3;n+=1)e.values[n]+=(255-e.values[n])*t;return u(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.convertHexToRGB=a,t.rgbToHex=o,t.decomposeColor=s,t.recomposeColor=u,t.getContrastRatio=c,t.getLuminance=l,t.emphasize=f,t.fade=d,t.darken=h,t.lighten=p,r(n(42473))},94811(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=s,t.keys=void 0;var i=r(n(67154)),a=r(n(6479)),o=["xs","sm","md","lg","xl"];function s(e){var t=e.values,n=void 0===t?{xs:0,sm:600,md:960,lg:1280,xl:1920}:t,r=e.unit,s=void 0===r?"px":r,u=e.step,c=void 0===u?5:u,l=(0,a.default)(e,["values","unit","step"]);function f(e){var t="number"==typeof n[e]?n[e]:e;return"@media (min-width:".concat(t).concat(s,")")}function d(e){var t=o.indexOf(e)+1,r=n[o[t]];if(t===o.length)return f("xs");var i="number"==typeof r&&t>0?r:e;return"@media (max-width:".concat(i-c/100).concat(s,")")}function h(e,t){var r=o.indexOf(t)+1;return r===o.length?f(e):"@media (min-width:".concat(n[e]).concat(s,") and ")+"(max-width:".concat(n[o[r]]-c/100).concat(s,")")}function p(e){return h(e,e)}function b(e){return n[e]}return(0,i.default)({keys:o,values:n,up:f,down:d,between:h,only:p,width:b},l)}t.keys=o},20237(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=o,r(n(42473));var i=/([[\].#*$><+~=|^:(),"'`\s])/g;function a(e){var t;return String(e).replace(i,"-")}function o(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.dangerouslyUseGlobalCSS,n=void 0!==t&&t,r=e.productionPrefix,i=void 0===r?"jss":r,o=e.seed,s=void 0===o?"":o,u=0;return function(e,t){return(u+=1,n&&t&&t.options.name)?"".concat(a(t.options.name),"-").concat(e.key):"".concat(i).concat(s).concat(u)}}},40226(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=o;var i=r(n(59713)),a=r(n(67154));function o(e,t,n){var r;return(0,a.default)({gutters:function(){var n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return(0,a.default)({paddingLeft:2*t.unit,paddingRight:2*t.unit},n,(0,i.default)({},e.up("sm"),(0,a.default)({paddingLeft:3*t.unit,paddingRight:3*t.unit},n[e.up("sm")])))},toolbar:(r={minHeight:56},(0,i.default)(r,"".concat(e.up("xs")," and (orientation: landscape)"),{minHeight:48}),(0,i.default)(r,e.up("sm"),{minHeight:64}),r)},n)}},71615(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0,r(n(59713));var i=r(n(67154)),a=r(n(6479)),o=r(n(94863)),s=r(n(93078));r(n(42473));var u=r(n(94811)),c=r(n(40226)),l=r(n(21091)),f=r(n(45184)),d=r(n(80743)),h=r(n(59591)),p=r(n(5324)),b=r(n(15406)),m=r(n(88676));function g(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},n=t.breakpoints,r=void 0===n?{}:n,g=t.mixins,v=void 0===g?{}:g,y=t.palette,w=void 0===y?{}:y,_=t.shadows,E=t.spacing,S=void 0===E?{}:E,k=t.typography,x=void 0===k?{}:k,T=(0,a.default)(t,["breakpoints","mixins","palette","shadows","spacing","typography"]),M=(0,l.default)(w),O=(0,u.default)(r),A=(0,i.default)({},p.default,S);return(0,i.default)({breakpoints:O,direction:"ltr",mixins:(0,c.default)(O,A,v),overrides:{},palette:M,props:{},shadows:_||d.default,typography:(0,f.default)(M,x)},(0,o.default)({shape:h.default,spacing:A,transitions:b.default,zIndex:m.default},T,{isMergeableObject:s.default}))}var v=g;t.default=v},21091(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=m,t.dark=t.light=void 0;var i=r(n(67154)),a=r(n(6479));r(n(42473));var o=r(n(94863)),s=r(n(78768)),u=r(n(124)),c=r(n(70167)),l=r(n(83165)),f=r(n(515)),d=n(59114),h={text:{primary:"rgba(0, 0, 0, 0.87)",secondary:"rgba(0, 0, 0, 0.54)",disabled:"rgba(0, 0, 0, 0.38)",hint:"rgba(0, 0, 0, 0.38)"},divider:"rgba(0, 0, 0, 0.12)",background:{paper:f.default.white,default:c.default[50]},action:{active:"rgba(0, 0, 0, 0.54)",hover:"rgba(0, 0, 0, 0.08)",hoverOpacity:.08,selected:"rgba(0, 0, 0, 0.14)",disabled:"rgba(0, 0, 0, 0.26)",disabledBackground:"rgba(0, 0, 0, 0.12)"}};t.light=h;var p={text:{primary:f.default.white,secondary:"rgba(255, 255, 255, 0.7)",disabled:"rgba(255, 255, 255, 0.5)",hint:"rgba(255, 255, 255, 0.5)",icon:"rgba(255, 255, 255, 0.5)"},divider:"rgba(255, 255, 255, 0.12)",background:{paper:c.default[800],default:"#303030"},action:{active:f.default.white,hover:"rgba(255, 255, 255, 0.1)",hoverOpacity:.1,selected:"rgba(255, 255, 255, 0.2)",disabled:"rgba(255, 255, 255, 0.3)",disabledBackground:"rgba(255, 255, 255, 0.12)"}};function b(e,t,n,r){e[t]||(e.hasOwnProperty(n)?e[t]=e[n]:"light"===t?e.light=(0,d.lighten)(e.main,r):"dark"===t&&(e.dark=(0,d.darken)(e.main,1.5*r)))}function m(e){var t=e.primary,n=void 0===t?{light:s.default[300],main:s.default[500],dark:s.default[700]}:t,r=e.secondary,m=void 0===r?{light:u.default.A200,main:u.default.A400,dark:u.default.A700}:r,g=e.error,v=void 0===g?{light:l.default[300],main:l.default[500],dark:l.default[700]}:g,y=e.type,w=void 0===y?"light":y,_=e.contrastThreshold,E=void 0===_?3:_,S=e.tonalOffset,k=void 0===S?.2:S,x=(0,a.default)(e,["primary","secondary","error","type","contrastThreshold","tonalOffset"]);function T(e){var t;return(0,d.getContrastRatio)(e,p.text.primary)>=E?p.text.primary:h.text.primary}function M(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:500,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:300,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:700;return!e.main&&e[t]&&(e.main=e[t]),b(e,"light",n,k),b(e,"dark",r,k),e.contrastText||(e.contrastText=T(e.main)),e}M(n),M(m,"A400","A200","A700"),M(v);var O={dark:p,light:h};return(0,o.default)((0,i.default)({common:f.default,type:w,primary:n,secondary:m,error:v,grey:c.default,contrastThreshold:E,getContrastText:T,augmentColor:M,tonalOffset:k},O[w]),x,{clone:!1})}t.dark=p},16059(e,t){"use strict";function n(e){return e}Object.defineProperty(t,"__esModule",{value:!0}),t.default=n},45184(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=f;var i=r(n(67154)),a=r(n(6479)),o=r(n(94863));r(n(42473));var s=n(55252);function u(e){return Math.round(1e5*e)/1e5}var c={textTransform:"uppercase"},l='"Roboto", "Helvetica", "Arial", sans-serif';function f(e,t){var n="function"==typeof t?t(e):t,r=n.fontFamily,f=void 0===r?l:r,d=n.fontSize,h=void 0===d?14:d,p=n.fontWeightLight,b=void 0===p?300:p,m=n.fontWeightRegular,g=void 0===m?400:m,v=n.fontWeightMedium,y=void 0===v?500:v,w=n.htmlFontSize,_=void 0===w?16:w,E=n.useNextVariants,S=void 0===E?Boolean(s.ponyfillGlobal.__MUI_USE_NEXT_TYPOGRAPHY_VARIANTS__):E,k=(n.suppressWarning,n.allVariants),x=(0,a.default)(n,["fontFamily","fontSize","fontWeightLight","fontWeightRegular","fontWeightMedium","htmlFontSize","useNextVariants","suppressWarning","allVariants"]),T=h/14,M=function(e){return"".concat(e/_*T,"rem")},O=function(t,n,r,a,o){return(0,i.default)({color:e.text.primary,fontFamily:f,fontWeight:t,fontSize:M(n),lineHeight:r},f===l?{letterSpacing:"".concat(u(a/n),"em")}:{},o,k)},A={h1:O(b,96,1,-1.5),h2:O(b,60,1,-.5),h3:O(g,48,1.04,0),h4:O(g,34,1.17,.25),h5:O(g,24,1.33,0),h6:O(y,20,1.6,.15),subtitle1:O(g,16,1.75,.15),subtitle2:O(y,14,1.57,.1),body1Next:O(g,16,1.5,.15),body2Next:O(g,14,1.5,.15),buttonNext:O(y,14,1.75,.4,c),captionNext:O(g,12,1.66,.4),overline:O(g,12,2.66,1,c)},L={display4:(0,i.default)({fontSize:M(112),fontWeight:b,fontFamily:f,letterSpacing:"-.04em",lineHeight:"".concat(u(128/112),"em"),marginLeft:"-.04em",color:e.text.secondary},k),display3:(0,i.default)({fontSize:M(56),fontWeight:g,fontFamily:f,letterSpacing:"-.02em",lineHeight:"".concat(u(73/56),"em"),marginLeft:"-.02em",color:e.text.secondary},k),display2:(0,i.default)({fontSize:M(45),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(51/45),"em"),marginLeft:"-.02em",color:e.text.secondary},k),display1:(0,i.default)({fontSize:M(34),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(41/34),"em"),color:e.text.secondary},k),headline:(0,i.default)({fontSize:M(24),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(32.5/24),"em"),color:e.text.primary},k),title:(0,i.default)({fontSize:M(21),fontWeight:y,fontFamily:f,lineHeight:"".concat(u(24.5/21),"em"),color:e.text.primary},k),subheading:(0,i.default)({fontSize:M(16),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(1.5),"em"),color:e.text.primary},k),body2:(0,i.default)({fontSize:M(14),fontWeight:y,fontFamily:f,lineHeight:"".concat(u(24/14),"em"),color:e.text.primary},k),body1:(0,i.default)({fontSize:M(14),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(20.5/14),"em"),color:e.text.primary},k),caption:(0,i.default)({fontSize:M(12),fontWeight:g,fontFamily:f,lineHeight:"".concat(u(1.375),"em"),color:e.text.secondary},k),button:(0,i.default)({fontSize:M(14),textTransform:"uppercase",fontWeight:y,fontFamily:f,color:e.text.primary},k)};return(0,o.default)((0,i.default)({pxToRem:M,round:u,fontFamily:f,fontSize:h,fontWeightLight:b,fontWeightRegular:g,fontWeightMedium:y},L,A,S?{body1:A.body1Next,body2:A.body2Next,button:A.buttonNext,caption:A.captionNext}:{},{useNextVariants:S}),x,{clone:!1})}},42458(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67154));r(n(50008)),r(n(42473));var a=r(n(94863));function o(e,t){return t}function s(e){var t="function"==typeof e;function n(n,r){var s=t?e(n):e;if(!r||!n.overrides||!n.overrides[r])return s;var u=n.overrides[r],c=(0,i.default)({},s);return Object.keys(u).forEach(function(e){c[e]=(0,a.default)(c[e],u[e],{arrayMerge:o})}),c}return{create:n,options:{},themingEnabled:t}}var u=s;t.default=u},58057(e,t){"use strict";function n(e){var t,n=e.theme,r=e.name,i=e.props;if(!n.props||!r||!n.props[r])return i;var a=n.props[r];for(t in a)void 0===i[t]&&(i[t]=a[t]);return i}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=n;t.default=r},32316(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"createGenerateClassName",{enumerable:!0,get:function(){return i.default}}),Object.defineProperty(t,"createMuiTheme",{enumerable:!0,get:function(){return a.default}}),Object.defineProperty(t,"jssPreset",{enumerable:!0,get:function(){return o.default}}),Object.defineProperty(t,"MuiThemeProvider",{enumerable:!0,get:function(){return s.default}}),Object.defineProperty(t,"createStyles",{enumerable:!0,get:function(){return u.default}}),Object.defineProperty(t,"withStyles",{enumerable:!0,get:function(){return c.default}}),Object.defineProperty(t,"withTheme",{enumerable:!0,get:function(){return l.default}});var i=r(n(20237)),a=r(n(71615)),o=r(n(9399)),s=r(n(72366)),u=r(n(16059)),c=r(n(78252)),l=r(n(82313))},9399(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(29059)),a=r(n(28752)),o=r(n(35828)),s=r(n(50462)),u=r(n(65926)),c=r(n(89347));function l(){return{plugins:[(0,i.default)(),(0,a.default)(),(0,o.default)(),(0,s.default)(),"undefined"==typeof window?null:(0,u.default)(),(0,c.default)()]}}var f=l;t.default=f},35199(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var i=r(n(67154));function a(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.baseClasses,n=e.newClasses;if(e.Component,!n)return t;var r=(0,i.default)({},t);return Object.keys(n).forEach(function(e){n[e]&&(r[e]="".concat(t[e]," ").concat(n[e]))}),r}r(n(42473)),n(55252);var o=a;t.default=o},88693(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={set:function(e,t,n,r){var i=e.get(t);i||(i=new Map,e.set(t,i)),i.set(n,r)},get:function(e,t,n){var r=e.get(t);return r?r.get(n):void 0},delete:function(e,t,n){e.get(t).delete(n)}};t.default=n},31898(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={jss:"64a55d578f856d258dc345b094a2a2b3",sheetsRegistry:"d4bd0baacbc52bbd48bbb9eb24344ecd",sheetOptions:"6fc570d6bd61383819d0f9e7407c452d"};t.default=n},80743(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n=.2,r=.14,i=.12;function a(){return["".concat(arguments.length<=0?void 0:arguments[0],"px ").concat(arguments.length<=1?void 0:arguments[1],"px ").concat(arguments.length<=2?void 0:arguments[2],"px ").concat(arguments.length<=3?void 0:arguments[3],"px rgba(0,0,0,").concat(n,")"),"".concat(arguments.length<=4?void 0:arguments[4],"px ").concat(arguments.length<=5?void 0:arguments[5],"px ").concat(arguments.length<=6?void 0:arguments[6],"px ").concat(arguments.length<=7?void 0:arguments[7],"px rgba(0,0,0,").concat(r,")"),"".concat(arguments.length<=8?void 0:arguments[8],"px ").concat(arguments.length<=9?void 0:arguments[9],"px ").concat(arguments.length<=10?void 0:arguments[10],"px ").concat(arguments.length<=11?void 0:arguments[11],"px rgba(0,0,0,").concat(i,")")].join(",")}var o=["none",a(0,1,3,0,0,1,1,0,0,2,1,-1),a(0,1,5,0,0,2,2,0,0,3,1,-2),a(0,1,8,0,0,3,4,0,0,3,3,-2),a(0,2,4,-1,0,4,5,0,0,1,10,0),a(0,3,5,-1,0,5,8,0,0,1,14,0),a(0,3,5,-1,0,6,10,0,0,1,18,0),a(0,4,5,-2,0,7,10,1,0,2,16,1),a(0,5,5,-3,0,8,10,1,0,3,14,2),a(0,5,6,-3,0,9,12,1,0,3,16,2),a(0,6,6,-3,0,10,14,1,0,4,18,3),a(0,6,7,-4,0,11,15,1,0,4,20,3),a(0,7,8,-4,0,12,17,2,0,5,22,4),a(0,7,8,-4,0,13,19,2,0,5,24,4),a(0,7,9,-4,0,14,21,2,0,5,26,4),a(0,8,9,-5,0,15,22,2,0,6,28,5),a(0,8,10,-5,0,16,24,2,0,6,30,5),a(0,8,11,-5,0,17,26,2,0,6,32,5),a(0,9,11,-5,0,18,28,2,0,7,34,6),a(0,9,12,-6,0,19,29,2,0,7,36,6),a(0,10,13,-6,0,20,31,3,0,8,38,7),a(0,10,13,-6,0,21,33,3,0,8,40,7),a(0,10,14,-6,0,22,35,3,0,8,42,7),a(0,11,14,-7,0,23,36,3,0,9,44,8),a(0,11,15,-7,0,24,38,3,0,9,46,8)];t.default=o},59591(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={borderRadius:4};t.default=n},5324(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={unit:8};t.default=n},51067(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.CHANNEL=void 0;var i=r(n(59713)),a="__THEMING__";t.CHANNEL=a;var o={contextTypes:(0,i.default)({},a,function(){}),initial:function(e){return e[a]?e[a].getState():null},subscribe:function(e,t){return e[a]?e[a].subscribe(t):null},unsubscribe:function(e,t){e[a]&&e[a].unsubscribe(t)}};t.default=o},15406(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.isNumber=t.isString=t.formatMs=t.duration=t.easing=void 0;var i=r(n(6479));r(n(42473));var a={easeInOut:"cubic-bezier(0.4, 0, 0.2, 1)",easeOut:"cubic-bezier(0.0, 0, 0.2, 1)",easeIn:"cubic-bezier(0.4, 0, 1, 1)",sharp:"cubic-bezier(0.4, 0, 0.6, 1)"};t.easing=a;var o={shortest:150,shorter:200,short:250,standard:300,complex:375,enteringScreen:225,leavingScreen:195};t.duration=o;var s=function(e){return"".concat(Math.round(e),"ms")};t.formatMs=s;var u=function(e){return"string"==typeof e};t.isString=u;var c=function(e){return!isNaN(parseFloat(e))};t.isNumber=c;var l={easing:a,duration:o,create:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:["all"],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.duration,r=void 0===n?o.standard:n,u=t.easing,c=void 0===u?a.easeInOut:u,l=t.delay,f=void 0===l?0:l;return(0,i.default)(t,["duration","easing","delay"]),(Array.isArray(e)?e:[e]).map(function(e){return"".concat(e," ").concat("string"==typeof r?r:s(r)," ").concat(c," ").concat("string"==typeof f?f:s(f))}).join(",")},getAutoHeightDuration:function(e){if(!e)return 0;var t=e/36;return Math.round((4+15*Math.pow(t,.25)+t/5)*10)}};t.default=l},78252(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.sheetsManager=void 0;var i=r(n(59713)),a=r(n(67154)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(6479)),d=r(n(67294)),h=r(n(45697));r(n(42473));var p=r(n(8679)),b=n(55252),m=n(55690),g=r(n(31898)),v=r(n(9399)),y=r(n(35199)),w=r(n(88693)),_=r(n(71615)),E=r(n(51067)),S=r(n(20237)),k=r(n(42458)),x=r(n(58057)),T=(0,m.create)((0,v.default)()),M=(0,S.default)(),O=-1e11,A=new Map;t.sheetsManager=A;var L={},C=(0,_.default)({typography:{suppressWarning:!0}}),I=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return function(n){var r,b=t.withTheme,m=void 0!==b&&b,v=t.flip,_=void 0===v?null:v,S=t.name,I=(0,f.default)(t,["withTheme","flip","name"]),D=(0,k.default)(e),N=D.themingEnabled||"string"==typeof S||m;O+=1,D.options.index=O;var P=function(e){function t(e,n){(0,o.default)(this,t),(r=(0,u.default)(this,(0,c.default)(t).call(this,e,n))).jss=n[g.default.jss]||T,r.sheetsManager=A,r.unsubscribeId=null;var r,i=n.muiThemeProviderOptions;return i&&(i.sheetsManager&&(r.sheetsManager=i.sheetsManager),r.sheetsCache=i.sheetsCache,r.disableStylesGeneration=i.disableStylesGeneration),r.stylesCreatorSaved=D,r.sheetOptions=(0,a.default)({generateClassName:M},n[g.default.sheetOptions]),r.theme=N?E.default.initial(n)||C:L,r.attach(r.theme),r.cacheClasses={value:null,lastProp:null,lastJSS:{}},r}return(0,l.default)(t,e),(0,s.default)(t,[{key:"componentDidMount",value:function(){var e=this;N&&(this.unsubscribeId=E.default.subscribe(this.context,function(t){var n=e.theme;e.theme=t,e.attach(e.theme),e.setState({},function(){e.detach(n)})}))}},{key:"componentDidUpdate",value:function(){this.stylesCreatorSaved}},{key:"componentWillUnmount",value:function(){this.detach(this.theme),null!==this.unsubscribeId&&E.default.unsubscribe(this.context,this.unsubscribeId)}},{key:"getClasses",value:function(){if(this.disableStylesGeneration)return this.props.classes||{};var e=!1,t=w.default.get(this.sheetsManager,this.stylesCreatorSaved,this.theme);return t.sheet.classes!==this.cacheClasses.lastJSS&&(this.cacheClasses.lastJSS=t.sheet.classes,e=!0),this.props.classes!==this.cacheClasses.lastProp&&(this.cacheClasses.lastProp=this.props.classes,e=!0),e&&(this.cacheClasses.value=(0,y.default)({baseClasses:this.cacheClasses.lastJSS,newClasses:this.props.classes,Component:n})),this.cacheClasses.value}},{key:"attach",value:function(e){if(!this.disableStylesGeneration){var t=this.stylesCreatorSaved,n=w.default.get(this.sheetsManager,t,e);if(n||(n={refs:0,sheet:null},w.default.set(this.sheetsManager,t,e,n)),0===n.refs){this.sheetsCache&&(r=w.default.get(this.sheetsCache,t,e)),!r&&((r=this.createSheet(e)).attach(),this.sheetsCache&&w.default.set(this.sheetsCache,t,e,r)),n.sheet=r;var r,i=this.context[g.default.sheetsRegistry];i&&i.add(r)}n.refs+=1}}},{key:"createSheet",value:function(e){var t=this.stylesCreatorSaved.create(e,S),r=S;return this.jss.createStyleSheet(t,(0,a.default)({meta:r,classNamePrefix:r,flip:"boolean"==typeof _?_:"rtl"===e.direction,link:!1},this.sheetOptions,this.stylesCreatorSaved.options,{name:S||n.displayName},I))}},{key:"detach",value:function(e){if(!this.disableStylesGeneration){var t=w.default.get(this.sheetsManager,this.stylesCreatorSaved,e);if(t.refs-=1,0===t.refs){w.default.delete(this.sheetsManager,this.stylesCreatorSaved,e),this.jss.removeStyleSheet(t.sheet);var n=this.context[g.default.sheetsRegistry];n&&n.remove(t.sheet)}}}},{key:"render",value:function(){var e=this.props,t=(e.classes,e.innerRef),r=(0,f.default)(e,["classes","innerRef"]),i=(0,x.default)({theme:this.theme,name:S,props:r});return m&&!i.theme&&(i.theme=this.theme),d.default.createElement(n,(0,a.default)({},i,{classes:this.getClasses(),ref:t}))}}]),t}(d.default.Component);return P.contextTypes=(0,a.default)((r={muiThemeProviderOptions:h.default.object},(0,i.default)(r,g.default.jss,h.default.object),(0,i.default)(r,g.default.sheetOptions,h.default.object),(0,i.default)(r,g.default.sheetsRegistry,h.default.object),r),N?E.default.contextTypes:{}),(0,p.default)(P,n),P}};b.ponyfillGlobal.__MUI_STYLES__||(b.ponyfillGlobal.__MUI_STYLES__={}),b.ponyfillGlobal.__MUI_STYLES__.withStyles||(b.ponyfillGlobal.__MUI_STYLES__.withStyles=I);var D=function(e,t){return b.ponyfillGlobal.__MUI_STYLES__.withStyles(e,(0,a.default)({defaultTheme:C},t))};t.default=D},82313(e,t,n){"use strict";var r,i=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var a=i(n(67154)),o=i(n(6479)),s=i(n(34575)),u=i(n(93913)),c=i(n(78585)),l=i(n(29754)),f=i(n(2205)),d=i(n(67294));i(n(45697));var h=i(n(8679)),p=n(55252),b=i(n(71615)),m=i(n(51067));function g(){return r||(r=(0,b.default)({typography:{suppressWarning:!0}}))}var v=function(){return function(e){var t=function(t){function n(e,t){var r;return(0,s.default)(this,n),(r=(0,c.default)(this,(0,l.default)(n).call(this))).state={theme:m.default.initial(t)||g()},r}return(0,f.default)(n,t),(0,u.default)(n,[{key:"componentDidMount",value:function(){var e=this;this.unsubscribeId=m.default.subscribe(this.context,function(t){e.setState({theme:t})})}},{key:"componentWillUnmount",value:function(){null!==this.unsubscribeId&&m.default.unsubscribe(this.context,this.unsubscribeId)}},{key:"render",value:function(){var t=this.props,n=t.innerRef,r=(0,o.default)(t,["innerRef"]);return d.default.createElement(e,(0,a.default)({theme:this.state.theme,ref:n},r))}}]),n}(d.default.Component);return t.contextTypes=m.default.contextTypes,(0,h.default)(t,e),t}};p.ponyfillGlobal.__MUI_STYLES__||(p.ponyfillGlobal.__MUI_STYLES__={}),p.ponyfillGlobal.__MUI_STYLES__.withTheme||(p.ponyfillGlobal.__MUI_STYLES__.withTheme=v);var y=p.ponyfillGlobal.__MUI_STYLES__.withTheme;t.default=y},88676(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={mobileStepper:1e3,appBar:1100,drawer:1200,modal:1300,snackbar:1400,tooltip:1500};t.default=n},41929(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getTransitionProps=r,t.reflow=void 0;var n=function(e){return e.scrollTop};function r(e,t){var n=e.timeout,r=e.style,i=void 0===r?{}:r;return{duration:i.transitionDuration||"number"==typeof n?n:n[t.mode],delay:i.transitionDelay}}t.reflow=n},346(e,t){"use strict";function n(e,t){return function(){return null}}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=n;t.default=r},98741(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.capitalize=a,t.contains=o,t.findIndex=s,t.find=u,t.createChainedFunction=c;var i=r(n(50008));function a(e){return e.charAt(0).toUpperCase()+e.slice(1)}function o(e,t){return Object.keys(t).every(function(n){return e.hasOwnProperty(n)&&e[n]===t[n]})}function s(e,t){for(var n=(0,i.default)(t),r=0;r-1?e[n]:void 0}function c(){for(var e=arguments.length,t=Array(e),n=0;n1&&void 0!==arguments[1]?arguments[1]:window,n=(0,i.default)(e);return n.defaultView||n.parentView||t}var o=a;t.default=o},44370(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.cloneElementWithClassName=o,t.cloneChildrenWithClassName=s,t.isMuiElement=u,t.setRef=c;var i=r(n(67294)),a=r(n(94184));function o(e,t){return i.default.cloneElement(e,{className:(0,a.default)(e.props.className,t)})}function s(e,t){return i.default.Children.map(e,function(e){return i.default.isValidElement(e)&&o(e,t)})}function u(e,t){return i.default.isValidElement(e)&&-1!==t.indexOf(e.type.muiName)}function c(e,t){"function"==typeof e?e(t):e&&(e.current=t)}},47348(e,t){"use strict";function n(e){return function(){return null}}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=n;t.default=r},21677(e,t){"use strict";function n(e,t,n,r,i){return null}Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=n;t.default=r},78290(e,t,n){"use strict";var r=n(20862);Object.defineProperty(t,"__esModule",{value:!0});var i={};Object.defineProperty(t,"default",{enumerable:!0,get:function(){return a.default}});var a=r(n(88446));Object.keys(a).forEach(function(e){"default"!==e&&"__esModule"!==e&&(Object.prototype.hasOwnProperty.call(i,e)||Object.defineProperty(t,e,{enumerable:!0,get:function(){return a[e]}}))})},88446(e,t,n){"use strict";var r=n(95318);Object.defineProperty(t,"__esModule",{value:!0}),t.default=t.isWidthDown=t.isWidthUp=void 0;var i=r(n(67154)),a=r(n(6479)),o=r(n(34575)),s=r(n(93913)),u=r(n(78585)),c=r(n(29754)),l=r(n(2205)),f=r(n(67294));r(n(45697));var d=r(n(96421)),h=r(n(20296));n(55252);var p=r(n(8679)),b=r(n(82313)),m=n(94811),g=r(n(58057)),v=function(e,t){var n=!(arguments.length>2)||void 0===arguments[2]||arguments[2];return n?m.keys.indexOf(e)<=m.keys.indexOf(t):m.keys.indexOf(e)2)||void 0===arguments[2]||arguments[2];return n?m.keys.indexOf(t)<=m.keys.indexOf(e):m.keys.indexOf(t)0&&void 0!==arguments[0]?arguments[0]:{};return function(t){var n=e.withTheme,r=void 0!==n&&n,v=e.noSSR,y=void 0!==v&&v,w=e.initialWidth,_=e.resizeInterval,E=void 0===_?166:_,S=function(e){function n(e){var t;return(0,o.default)(this,n),(t=(0,u.default)(this,(0,c.default)(n).call(this,e))).state={width:y?t.getWidth():void 0},"undefined"!=typeof window&&(t.handleResize=(0,h.default)(function(){var e=t.getWidth();e!==t.state.width&&t.setState({width:e})},E)),t}return(0,l.default)(n,e),(0,s.default)(n,[{key:"componentDidMount",value:function(){var e=this.getWidth();e!==this.state.width&&this.setState({width:e})}},{key:"componentWillUnmount",value:function(){this.handleResize.clear()}},{key:"getWidth",value:function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:window.innerWidth,t=this.props.theme.breakpoints,n=null,r=1;null===n&&ri.Z,componentPropType:()=>r.Z,exactProp:()=>a.ZP,getDisplayName:()=>o.ZP,ponyfillGlobal:()=>s.Z});var r=n(78728),i=n(5477),a=n(43781),o=n(25189),s=n(34712);/** @license Material-UI v3.0.0-alpha.3 + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ },34712(e,t){"use strict";n={value:!0},t.Z=void 0;var n,r="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();t.Z=r},82152(e,t,n){"use strict";n.d(t,{D:()=>u});var r=Object.prototype,i=r.toString,a=r.hasOwnProperty,o=Function.prototype.toString,s=new Map;function u(e,t){try{return c(e,t)}finally{s.clear()}}function c(e,t){if(e===t)return!0;var n=i.call(e),r=i.call(t);if(n!==r)return!1;switch(n){case"[object Array]":if(e.length!==t.length)break;case"[object Object]":if(p(e,t))return!0;var s=l(e),u=l(t),f=s.length;if(f!==u.length)break;for(var b=0;b=0&&e.indexOf(t,n)===n}function p(e,t){var n=s.get(e);if(n){if(n.has(t))return!0}else s.set(e,n=new Set);return n.add(t),!1}},79742(e,t){"use strict";t.byteLength=c,t.toByteArray=f,t.fromByteArray=p;for(var n=[],r=[],i="undefined"!=typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0,s=a.length;o0)throw Error("Invalid string. Length must be a multiple of 4");var n=e.indexOf("=");-1===n&&(n=t);var r=n===t?0:4-n%4;return[n,r]}function c(e){var t=u(e),n=t[0],r=t[1];return(n+r)*3/4-r}function l(e,t,n){return(t+n)*3/4-n}function f(e){var t,n,a=u(e),o=a[0],s=a[1],c=new i(l(e,o,s)),f=0,d=s>0?o-4:o;for(n=0;n>16&255,c[f++]=t>>8&255,c[f++]=255&t;return 2===s&&(t=r[e.charCodeAt(n)]<<2|r[e.charCodeAt(n+1)]>>4,c[f++]=255&t),1===s&&(t=r[e.charCodeAt(n)]<<10|r[e.charCodeAt(n+1)]<<4|r[e.charCodeAt(n+2)]>>2,c[f++]=t>>8&255,c[f++]=255&t),c}function d(e){return n[e>>18&63]+n[e>>12&63]+n[e>>6&63]+n[63&e]}function h(e,t,n){for(var r,i=[],a=t;au?u:s+o));return 1===i?a.push(n[(t=e[r-1])>>2]+n[t<<4&63]+"=="):2===i&&a.push(n[(t=(e[r-2]<<8)+e[r-1])>>10]+n[t>>4&63]+n[t<<2&63]+"="),a.join("")}r["-".charCodeAt(0)]=62,r["_".charCodeAt(0)]=63},44431:function(e,t,n){var r;!function(i){"use strict";var a,o=/^-?(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?$/i,s=Math.ceil,u=Math.floor,c="[BigNumber Error] ",l=c+"Number primitive has more than 15 significant digits: ",f=1e14,d=14,h=9007199254740991,p=[1,10,100,1e3,1e4,1e5,1e6,1e7,1e8,1e9,1e10,1e11,1e12,1e13],b=1e7,m=1e9;function g(e){var t,n,r,i,a,x,T,M,O,A,L=$.prototype={constructor:$,toString:null,valueOf:null},C=new $(1),I=20,D=4,N=-7,P=21,R=-1e7,j=1e7,F=!1,Y=1,B=0,U={prefix:"",groupSize:3,secondaryGroupSize:0,groupSeparator:",",decimalSeparator:".",fractionGroupSize:0,fractionGroupSeparator:"\xa0",suffix:""},H="0123456789abcdefghijklmnopqrstuvwxyz";function $(e,t){var n,r,i,a,s,c,f,p,b=this;if(!(b instanceof $))return new $(e,t);if(null==t){if(e&&!0===e._isBigNumber){b.s=e.s,!e.c||e.e>j?b.c=b.e=null:e.e=10;s/=10,a++);a>j?b.c=b.e=null:(b.e=a,b.c=[e]);return}p=String(e)}else{if(!o.test(p=String(e)))return A(b,p,c);b.s=45==p.charCodeAt(0)?(p=p.slice(1),-1):1}(a=p.indexOf("."))>-1&&(p=p.replace(".","")),(s=p.search(/e/i))>0?(a<0&&(a=s),a+=+p.slice(s+1),p=p.substring(0,s)):a<0&&(a=p.length)}else{if(_(t,2,H.length,"Base"),10==t)return b=new $(e),K(b,I+b.e+1,D);if(p=String(e),c="number"==typeof e){if(0*e!=0)return A(b,p,c,t);if(b.s=1/e<0?(p=p.slice(1),-1):1,$.DEBUG&&p.replace(/^0\.0*|\./,"").length>15)throw Error(l+e)}else b.s=45===p.charCodeAt(0)?(p=p.slice(1),-1):1;for(n=H.slice(0,t),a=s=0,f=p.length;sn.indexOf(r=p.charAt(s))){if("."==r){if(s>a){a=f;continue}}else if(!i&&(p==p.toUpperCase()&&(p=p.toLowerCase())||p==p.toLowerCase()&&(p=p.toUpperCase()))){i=!0,s=-1,a=0;continue}return A(b,String(e),c,t)}c=!1,(a=(p=O(p,t,10,b.s)).indexOf("."))>-1?p=p.replace(".",""):a=p.length}for(s=0;48===p.charCodeAt(s);s++);for(f=p.length;48===p.charCodeAt(--f););if(p=p.slice(s,++f)){if(f-=s,c&&$.DEBUG&&f>15&&(e>h||e!==u(e)))throw Error(l+b.s*e);if((a=a-s-1)>j)b.c=b.e=null;else if(a=P)?S(u,o):k(u,o,"0");else if(a=(e=K(new $(e),t,n)).e,s=(u=y(e.c)).length,1==r||2==r&&(t<=a||a<=N)){for(;ss){if(--t>0)for(u+=".";t--;u+="0");}else if((t+=a-s)>0)for(a+1==s&&(u+=".");t--;u+="0");return e.s<0&&i?"-"+u:u}function G(e,t){for(var n,r=1,i=new $(e[0]);r=10;i/=10,r++);return(n=r+n*d-1)>j?e.c=e.e=null:n=10;c/=10,i++);if((a=t-i)<0)a+=d,o=t,b=(l=m[h=0])/g[i-o-1]%10|0;else if((h=s((a+1)/d))>=m.length){if(r){for(;m.length<=h;m.push(0));l=b=0,i=1,a%=d,o=a-d+1}else break out}else{for(i=1,l=c=m[h];c>=10;c/=10,i++);a%=d,b=(o=a-d+i)<0?0:l/g[i-o-1]%10|0}if(r=r||t<0||null!=m[h+1]||(o<0?l:l%g[i-o-1]),r=n<4?(b||r)&&(0==n||n==(e.s<0?3:2)):b>5||5==b&&(4==n||r||6==n&&(a>0?o>0?l/g[i-o]:0:m[h-1])%10&1||n==(e.s<0?8:7)),t<1||!m[0])return m.length=0,r?(t-=e.e+1,m[0]=g[(d-t%d)%d],e.e=-t||0):m[0]=e.e=0,e;if(0==a?(m.length=h,c=1,h--):(m.length=h+1,c=g[d-a],m[h]=o>0?u(l/g[i-o]%g[o])*c:0),r)for(;;){if(0==h){for(a=1,o=m[0];o>=10;o/=10,a++);for(o=m[0]+=c,c=1;o>=10;o/=10,c++);a!=c&&(e.e++,m[0]==f&&(m[0]=1));break}if(m[h]+=c,m[h]!=f)break;m[h--]=0,c=1}for(a=m.length;0===m[--a];m.pop());}e.e>j?e.c=e.e=null:e.e=P?S(t,n):k(t,n,"0"),e.s<0?"-"+t:t)}return $.clone=g,$.ROUND_UP=0,$.ROUND_DOWN=1,$.ROUND_CEIL=2,$.ROUND_FLOOR=3,$.ROUND_HALF_UP=4,$.ROUND_HALF_DOWN=5,$.ROUND_HALF_EVEN=6,$.ROUND_HALF_CEIL=7,$.ROUND_HALF_FLOOR=8,$.EUCLID=9,$.config=$.set=function(e){var t,n;if(null!=e){if("object"==typeof e){if(e.hasOwnProperty(t="DECIMAL_PLACES")&&(_(n=e[t],0,m,t),I=n),e.hasOwnProperty(t="ROUNDING_MODE")&&(_(n=e[t],0,8,t),D=n),e.hasOwnProperty(t="EXPONENTIAL_AT")&&((n=e[t])&&n.pop?(_(n[0],-m,0,t),_(n[1],0,m,t),N=n[0],P=n[1]):(_(n,-m,m,t),N=-(P=n<0?-n:n))),e.hasOwnProperty(t="RANGE")){if((n=e[t])&&n.pop)_(n[0],-m,-1,t),_(n[1],1,m,t),R=n[0],j=n[1];else if(_(n,-m,m,t),n)R=-(j=n<0?-n:n);else throw Error(c+t+" cannot be zero: "+n)}if(e.hasOwnProperty(t="CRYPTO")){if(!!(n=e[t])===n){if(n){if("undefined"!=typeof crypto&&crypto&&(crypto.getRandomValues||crypto.randomBytes))F=n;else throw F=!n,Error(c+"crypto unavailable")}else F=n}else throw Error(c+t+" not true or false: "+n)}if(e.hasOwnProperty(t="MODULO_MODE")&&(_(n=e[t],0,9,t),Y=n),e.hasOwnProperty(t="POW_PRECISION")&&(_(n=e[t],0,m,t),B=n),e.hasOwnProperty(t="FORMAT")){if("object"==typeof(n=e[t]))U=n;else throw Error(c+t+" not an object: "+n)}if(e.hasOwnProperty(t="ALPHABET")){if("string"!=typeof(n=e[t])||/^.?$|[+\-.\s]|(.).*\1/.test(n))throw Error(c+t+" invalid: "+n);H=n}}else throw Error(c+"Object expected: "+e)}return{DECIMAL_PLACES:I,ROUNDING_MODE:D,EXPONENTIAL_AT:[N,P],RANGE:[R,j],CRYPTO:F,MODULO_MODE:Y,POW_PRECISION:B,FORMAT:U,ALPHABET:H}},$.isBigNumber=function(e){if(!e||!0!==e._isBigNumber)return!1;if(!$.DEBUG)return!0;var t,n,r=e.c,i=e.e,a=e.s;out:if("[object Array]"==({}).toString.call(r)){if((1===a||-1===a)&&i>=-m&&i<=m&&i===u(i)){if(0===r[0]){if(0===i&&1===r.length)return!0;break out}if((t=(i+1)%d)<1&&(t+=d),String(r[0]).length==t){for(t=0;t=f||n!==u(n))break out;if(0!==n)return!0}}}else if(null===r&&null===i&&(null===a||1===a||-1===a))return!0;throw Error(c+"Invalid BigNumber: "+e)},$.maximum=$.max=function(){return G(arguments,L.lt)},$.minimum=$.min=function(){return G(arguments,L.gt)},$.random=(n=Math.random()*(t=9007199254740992)&2097151?function(){return u(Math.random()*t)}:function(){return(1073741824*Math.random()|0)*8388608+(8388608*Math.random()|0)},function(e){var t,r,i,a,o,l=0,f=[],h=new $(C);if(null==e?e=I:_(e,0,m),a=s(e/d),F){if(crypto.getRandomValues){for(t=crypto.getRandomValues(new Uint32Array(a*=2));l>>11))>=9e15?(r=crypto.getRandomValues(new Uint32Array(2)),t[l]=r[0],t[l+1]=r[1]):(f.push(o%1e14),l+=2);l=a/2}else if(crypto.randomBytes){for(t=crypto.randomBytes(a*=7);l=9e15?crypto.randomBytes(7).copy(t,l):(f.push(o%1e14),l+=7);l=a/7}else throw F=!1,Error(c+"crypto unavailable")}if(!F)for(;l=10;o/=10,l++);ln-1&&(null==o[i+1]&&(o[i+1]=0),o[i+1]+=o[i]/n|0,o[i]%=n)}return o.reverse()}return function(n,r,i,a,o){var s,u,c,l,f,d,h,p,b=n.indexOf("."),m=I,g=D;for(b>=0&&(l=B,B=0,n=n.replace(".",""),d=(p=new $(r)).pow(n.length-b),B=l,p.c=t(k(y(d.c),d.e,"0"),10,i,e),p.e=p.c.length),c=l=(h=t(n,r,i,o?(s=H,e):(s=e,H))).length;0==h[--l];h.pop());if(!h[0])return s.charAt(0);if(b<0?--c:(d.c=h,d.e=c,d.s=a,h=(d=M(d,p,m,g,i)).c,f=d.r,c=d.e),b=h[u=c+m+1],l=i/2,f=f||u<0||null!=h[u+1],f=g<4?(null!=b||f)&&(0==g||g==(d.s<0?3:2)):b>l||b==l&&(4==g||f||6==g&&1&h[u-1]||g==(d.s<0?8:7)),u<1||!h[0])n=f?k(s.charAt(1),-m,s.charAt(0)):s.charAt(0);else{if(h.length=u,f)for(--i;++h[--u]>i;)h[u]=0,u||(++c,h=[1].concat(h));for(l=h.length;!h[--l];);for(b=0,n="";b<=l;n+=s.charAt(h[b++]));n=k(n,c,s.charAt(0))}return n}}(),M=function(){function e(e,t,n){var r,i,a,o,s=0,u=e.length,c=t%b,l=t/b|0;for(e=e.slice();u--;)r=l*(a=e[u]%b)+(o=e[u]/b|0)*c,s=((i=c*a+r%b*b+s)/n|0)+(r/b|0)+l*o,e[u]=i%n;return s&&(e=[s].concat(e)),e}function t(e,t,n,r){var i,a;if(n!=r)a=n>r?1:-1;else for(i=a=0;it[i]?1:-1;break}return a}function n(e,t,n,r){for(var i=0;n--;)e[n]-=i,i=e[n]1;e.splice(0,1));}return function(r,i,a,o,s){var c,l,h,p,b,m,g,y,w,_,E,S,k,x,T,M,O,A=r.s==i.s?1:-1,L=r.c,C=i.c;if(!L||!L[0]||!C||!C[0])return new $(r.s&&i.s&&(L?!C||L[0]!=C[0]:C)?L&&0==L[0]||!C?0*A:A/0:NaN);for(w=(y=new $(A)).c=[],A=a+(l=r.e-i.e)+1,s||(s=f,l=v(r.e/d)-v(i.e/d),A=A/d|0),h=0;C[h]==(L[h]||0);h++);if(C[h]>(L[h]||0)&&l--,A<0)w.push(1),p=!0;else{for(x=L.length,M=C.length,h=0,A+=2,(b=u(s/(C[0]+1)))>1&&(C=e(C,b,s),L=e(L,b,s),M=C.length,x=L.length),k=M,E=(_=L.slice(0,M)).length;E=s/2&&T++;do{if(b=0,(c=t(C,_,M,E))<0){if(S=_[0],M!=E&&(S=S*s+(_[1]||0)),(b=u(S/T))>1)for(b>=s&&(b=s-1),g=(m=e(C,b,s)).length,E=_.length;1==t(m,_,g,E);)b--,n(m,Mt(C,_,M,E);)b++,n(_,M=10;A/=10,h++);K(y,a+(y.e=h+l*d-1)+1,o,p)}else y.e=l,y.r=+p;return y}}(),A=(r=/^(-?)0([xbo])(?=\w[\w.]*$)/i,i=/^([^.]+)\.$/,a=/^\.([^.]+)$/,x=/^-?(Infinity|NaN)$/,T=/^\s*\+(?=[\w.])|^\s+|\s+$/g,function(e,t,n,o){var s,u=n?t:t.replace(T,"");if(x.test(u))e.s=isNaN(u)?null:u<0?-1:1;else{if(!n&&(u=u.replace(r,function(e,t,n){return s="x"==(n=n.toLowerCase())?16:"b"==n?2:8,o&&o!=s?e:t}),o&&(s=o,u=u.replace(i,"$1").replace(a,"0.$1")),t!=u))return new $(u,s);if($.DEBUG)throw Error(c+"Not a"+(o?" base "+o:"")+" number: "+t);e.s=null}e.c=e.e=null}),L.absoluteValue=L.abs=function(){var e=new $(this);return e.s<0&&(e.s=1),e},L.comparedTo=function(e,t){return w(this,new $(e,t))},L.decimalPlaces=L.dp=function(e,t){var n,r,i,a=this;if(null!=e)return _(e,0,m),null==t?t=D:_(t,0,8),K(new $(a),e+a.e+1,t);if(!(n=a.c))return null;if(r=((i=n.length-1)-v(this.e/d))*d,i=n[i])for(;i%10==0;i/=10,r--);return r<0&&(r=0),r},L.dividedBy=L.div=function(e,t){return M(this,new $(e,t),I,D)},L.dividedToIntegerBy=L.idiv=function(e,t){return M(this,new $(e,t),0,1)},L.exponentiatedBy=L.pow=function(e,t){var n,r,i,a,o,l,f,h,p,b=this;if((e=new $(e)).c&&!e.isInteger())throw Error(c+"Exponent not an integer: "+V(e));if(null!=t&&(t=new $(t)),l=e.e>14,!b.c||!b.c[0]||1==b.c[0]&&!b.e&&1==b.c.length||!e.c||!e.c[0])return p=new $(Math.pow(+V(b),l?2-E(e):+V(e))),t?p.mod(t):p;if(f=e.s<0,t){if(t.c?!t.c[0]:!t.s)return new $(NaN);(r=!f&&b.isInteger()&&t.isInteger())&&(b=b.mod(t))}else{if(e.e>9&&(b.e>0||b.e<-1||(0==b.e?b.c[0]>1||l&&b.c[1]>=24e7:b.c[0]<8e13||l&&b.c[0]<=9999975e7)))return a=(b.s<0&&E(e),-0),b.e>-1&&(a=1/a),new $(f?1/a:a);B&&(a=s(B/d+2))}for(l?(n=new $(.5),f&&(e.s=1),h=E(e)):h=(i=Math.abs(+V(e)))%2,p=new $(C);;){if(h){if(!(p=p.times(b)).c)break;a?p.c.length>a&&(p.c.length=a):r&&(p=p.mod(t))}if(i){if(0===(i=u(i/2)))break;h=i%2}else if(K(e=e.times(n),e.e+1,1),e.e>14)h=E(e);else{if(0==(i=+V(e)))break;h=i%2}b=b.times(b),a?b.c&&b.c.length>a&&(b.c.length=a):r&&(b=b.mod(t))}return r?p:(f&&(p=C.div(p)),t?p.mod(t):a?K(p,B,D,o):p)},L.integerValue=function(e){var t=new $(this);return null==e?e=D:_(e,0,8),K(t,t.e+1,e)},L.isEqualTo=L.eq=function(e,t){return 0===w(this,new $(e,t))},L.isFinite=function(){return!!this.c},L.isGreaterThan=L.gt=function(e,t){return w(this,new $(e,t))>0},L.isGreaterThanOrEqualTo=L.gte=function(e,t){return 1===(t=w(this,new $(e,t)))||0===t},L.isInteger=function(){return!!this.c&&v(this.e/d)>this.c.length-2},L.isLessThan=L.lt=function(e,t){return 0>w(this,new $(e,t))},L.isLessThanOrEqualTo=L.lte=function(e,t){return -1===(t=w(this,new $(e,t)))||0===t},L.isNaN=function(){return!this.s},L.isNegative=function(){return this.s<0},L.isPositive=function(){return this.s>0},L.isZero=function(){return!!this.c&&0==this.c[0]},L.minus=function(e,t){var n,r,i,a,o=this,s=o.s;if(t=(e=new $(e,t)).s,!s||!t)return new $(NaN);if(s!=t)return e.s=-t,o.plus(e);var u=o.e/d,c=e.e/d,l=o.c,h=e.c;if(!u||!c){if(!l||!h)return l?(e.s=-t,e):new $(h?o:NaN);if(!l[0]||!h[0])return h[0]?(e.s=-t,e):new $(l[0]?o:-0)}if(u=v(u),c=v(c),l=l.slice(),s=u-c){for((a=s<0)?(s=-s,i=l):(c=u,i=h),i.reverse(),t=s;t--;i.push(0));i.reverse()}else for(r=(a=(s=l.length)<(t=h.length))?s:t,s=t=0;t0)for(;t--;l[n++]=0);for(t=f-1;r>s;){if(l[--r]=0;){for(n=0,p=S[i]%w,m=S[i]/w|0,a=i+(o=u);a>i;)s=m*(c=E[--o]%w)+(l=E[o]/w|0)*p,n=((c=p*c+s%w*w+g[a]+n)/y|0)+(s/w|0)+m*l,g[a--]=c%y;g[a]=n}return n?++r:g.splice(0,1),W(e,g,r)},L.negated=function(){var e=new $(this);return e.s=-e.s||null,e},L.plus=function(e,t){var n,r=this,i=r.s;if(t=(e=new $(e,t)).s,!i||!t)return new $(NaN);if(i!=t)return e.s=-t,r.minus(e);var a=r.e/d,o=e.e/d,s=r.c,u=e.c;if(!a||!o){if(!s||!u)return new $(i/0);if(!s[0]||!u[0])return u[0]?e:new $(s[0]?r:0*i)}if(a=v(a),o=v(o),s=s.slice(),i=a-o){for(i>0?(o=a,n=u):(i=-i,n=s),n.reverse();i--;n.push(0));n.reverse()}for((i=s.length)-(t=u.length)<0&&(n=u,u=s,s=n,t=i),i=0;t;)i=(s[--t]=s[t]+u[t]+i)/f|0,s[t]=f===s[t]?0:s[t]%f;return i&&(s=[i].concat(s),++o),W(e,s,o)},L.precision=L.sd=function(e,t){var n,r,i,a=this;if(null!=e&&!!e!==e)return _(e,1,m),null==t?t=D:_(t,0,8),K(new $(a),e,t);if(!(n=a.c))return null;if(r=(i=n.length-1)*d+1,i=n[i]){for(;i%10==0;i/=10,r--);for(i=n[0];i>=10;i/=10,r++);}return e&&a.e+1>r&&(r=a.e+1),r},L.shiftedBy=function(e){return _(e,-h,h),this.times("1e"+e)},L.squareRoot=L.sqrt=function(){var e,t,n,r,i,a=this,o=a.c,s=a.s,u=a.e,c=I+4,l=new $("0.5");if(1!==s||!o||!o[0])return new $(!s||s<0&&(!o||o[0])?NaN:o?a:1/0);if(0==(s=Math.sqrt(+V(a)))||s==1/0?(((t=y(o)).length+u)%2==0&&(t+="0"),s=Math.sqrt(+t),u=v((u+1)/2)-(u<0||u%2),t=s==1/0?"5e"+u:(t=s.toExponential()).slice(0,t.indexOf("e")+1)+u,n=new $(t)):n=new $(s+""),n.c[0]){for((s=(u=n.e)+c)<3&&(s=0);;)if(i=n,n=l.times(i.plus(M(a,i,c,1))),y(i.c).slice(0,s)===(t=y(n.c)).slice(0,s)){if(n.e0&&b>0){for(a=b%s||s,f=p.substr(0,a);a0&&(f+=l+p.slice(a)),h&&(f="-"+f)}r=d?f+(n.decimalSeparator||"")+((u=+n.fractionGroupSize)?d.replace(RegExp("\\d{"+u+"}\\B","g"),"$&"+(n.fractionGroupSeparator||"")):d):f}return(n.prefix||"")+r+(n.suffix||"")},L.toFraction=function(e){var t,n,r,i,a,o,s,u,l,f,h,b,m=this,g=m.c;if(null!=e&&(!(s=new $(e)).isInteger()&&(s.c||1!==s.s)||s.lt(C)))throw Error(c+"Argument "+(s.isInteger()?"out of range: ":"not an integer: ")+V(s));if(!g)return new $(m);for(t=new $(C),l=n=new $(C),r=u=new $(C),b=y(g),a=t.e=b.length-m.e-1,t.c[0]=p[(o=a%d)<0?d+o:o],e=!e||s.comparedTo(t)>0?a>0?t:l:s,o=j,j=1/0,s=new $(b),u.c[0]=0;f=M(s,t,0,1),1!=(i=n.plus(f.times(r))).comparedTo(e);)n=r,r=i,l=u.plus(f.times(i=l)),u=i,t=s.minus(f.times(i=t)),s=i;return i=M(e.minus(n),r,0,1),u=u.plus(i.times(l)),n=n.plus(i.times(r)),u.s=l.s=m.s,a*=2,h=1>M(l,r,a,D).minus(m).abs().comparedTo(M(u,n,a,D).minus(m).abs())?[l,r]:[u,n],j=o,h},L.toNumber=function(){return+V(this)},L.toPrecision=function(e,t){return null!=e&&_(e,1,m),z(this,e,t,2)},L.toString=function(e){var t,n=this,r=n.s,i=n.e;return null===i?r?(t="Infinity",r<0&&(t="-"+t)):t="NaN":(null==e?t=i<=N||i>=P?S(y(n.c),i):k(y(n.c),i,"0"):10===e?(n=K(new $(n),I+i+1,D),t=k(y(n.c),n.e,"0")):(_(e,2,H.length,"Base"),t=O(k(y(n.c),i,"0"),10,e,r,!0)),r<0&&n.c[0]&&(t="-"+t)),t},L.valueOf=L.toJSON=function(){return V(this)},L._isBigNumber=!0,null!=e&&$.set(e),$}function v(e){var t=0|e;return e>0||e===t?t:t-1}function y(e){for(var t,n,r=1,i=e.length,a=e[0]+"";rc^n?1:-1;for(o=0,s=(u=i.length)<(c=a.length)?u:c;oa[o]^n?1:-1;return u==c?0:u>c^n?1:-1}function _(e,t,n,r){if(en||e!==u(e))throw Error(c+(r||"Argument")+("number"==typeof e?en?" out of range: ":" not an integer: ":" not a primitive number: ")+String(e))}function E(e){var t=e.c.length-1;return v(e.e/d)==t&&e.c[t]%2!=0}function S(e,t){return(e.length>1?e.charAt(0)+"."+e.slice(1):e)+(t<0?"e":"e+")+t}function k(e,t,n){var r,i;if(t<0){for(i=n+".";++t;i+=n);e=i+e}else if(r=e.length,++t>r){for(i=n,t-=r;--t;i+=n);e+=i}else ti});let i=r},48764(e,t,n){"use strict";/*! + * The buffer module from node.js, for the browser. + * + * @author Feross Aboukhadijeh + * @license MIT + */ var r=n(79742),i=n(80645),a="function"==typeof Symbol&&"function"==typeof Symbol.for?Symbol.for("nodejs.util.inspect.custom"):null;t.Buffer=c,t.SlowBuffer=w,t.INSPECT_MAX_BYTES=50;var o=2147483647;function s(){try{var e=new Uint8Array(1),t={foo:function(){return 42}};return Object.setPrototypeOf(t,Uint8Array.prototype),Object.setPrototypeOf(e,t),42===e.foo()}catch(n){return!1}}function u(e){if(e>o)throw RangeError('The value "'+e+'" is invalid for option "size"');var t=new Uint8Array(e);return Object.setPrototypeOf(t,c.prototype),t}function c(e,t,n){if("number"==typeof e){if("string"==typeof t)throw TypeError('The "string" argument must be of type string. Received type number');return h(e)}return l(e,t,n)}function l(e,t,n){if("string"==typeof e)return p(e,t);if(ArrayBuffer.isView(e))return m(e);if(null==e)throw TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof e);if(X(e,ArrayBuffer)||e&&X(e.buffer,ArrayBuffer)||"undefined"!=typeof SharedArrayBuffer&&(X(e,SharedArrayBuffer)||e&&X(e.buffer,SharedArrayBuffer)))return g(e,t,n);if("number"==typeof e)throw TypeError('The "value" argument must not be of type number. Received type number');var r=e.valueOf&&e.valueOf();if(null!=r&&r!==e)return c.from(r,t,n);var i=v(e);if(i)return i;if("undefined"!=typeof Symbol&&null!=Symbol.toPrimitive&&"function"==typeof e[Symbol.toPrimitive])return c.from(e[Symbol.toPrimitive]("string"),t,n);throw TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof e)}function f(e){if("number"!=typeof e)throw TypeError('"size" argument must be of type number');if(e<0)throw RangeError('The value "'+e+'" is invalid for option "size"')}function d(e,t,n){return(f(e),e<=0)?u(e):void 0!==t?"string"==typeof n?u(e).fill(t,n):u(e).fill(t):u(e)}function h(e){return f(e),u(e<0?0:0|y(e))}function p(e,t){if(("string"!=typeof t||""===t)&&(t="utf8"),!c.isEncoding(t))throw TypeError("Unknown encoding: "+t);var n=0|_(e,t),r=u(n),i=r.write(e,t);return i!==n&&(r=r.slice(0,i)),r}function b(e){for(var t=e.length<0?0:0|y(e.length),n=u(t),r=0;r=o)throw RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+o.toString(16)+" bytes");return 0|e}function w(e){return+e!=e&&(e=0),c.alloc(+e)}function _(e,t){if(c.isBuffer(e))return e.length;if(ArrayBuffer.isView(e)||X(e,ArrayBuffer))return e.byteLength;if("string"!=typeof e)throw TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof e);var n=e.length,r=arguments.length>2&&!0===arguments[2];if(!r&&0===n)return 0;for(var i=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":return W(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return q(e).length;default:if(i)return r?-1:W(e).length;t=(""+t).toLowerCase(),i=!0}}function E(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length||((void 0===n||n>this.length)&&(n=this.length),n<=0||(n>>>=0)<=(t>>>=0)))return"";for(e||(e="utf8");;)switch(e){case"hex":return j(this,t,n);case"utf8":case"utf-8":return I(this,t,n);case"ascii":return P(this,t,n);case"latin1":case"binary":return R(this,t,n);case"base64":return C(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return F(this,t,n);default:if(r)throw TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}function S(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function k(e,t,n,r,i){if(0===e.length)return -1;if("string"==typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),J(n=+n)&&(n=i?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(i)return -1;n=e.length-1}else if(n<0){if(!i)return -1;n=0}if("string"==typeof t&&(t=c.from(t,r)),c.isBuffer(t))return 0===t.length?-1:x(e,t,n,r,i);if("number"==typeof t)return(t&=255,"function"==typeof Uint8Array.prototype.indexOf)?i?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):x(e,[t],n,r,i);throw TypeError("val must be string, number or Buffer")}function x(e,t,n,r,i){var a,o=1,s=e.length,u=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return -1;o=2,s/=2,u/=2,n/=2}function c(e,t){return 1===o?e[t]:e.readUInt16BE(t*o)}if(i){var l=-1;for(a=n;as&&(n=s-u),a=n;a>=0;a--){for(var f=!0,d=0;di&&(r=i):r=i;var a=t.length;r>a/2&&(r=a/2);for(var o=0;o239?4:c>223?3:c>191?2:1;if(i+f<=n)switch(f){case 1:c<128&&(l=c);break;case 2:(192&(a=e[i+1]))==128&&(u=(31&c)<<6|63&a)>127&&(l=u);break;case 3:a=e[i+1],o=e[i+2],(192&a)==128&&(192&o)==128&&(u=(15&c)<<12|(63&a)<<6|63&o)>2047&&(u<55296||u>57343)&&(l=u);break;case 4:a=e[i+1],o=e[i+2],s=e[i+3],(192&a)==128&&(192&o)==128&&(192&s)==128&&(u=(15&c)<<18|(63&a)<<12|(63&o)<<6|63&s)>65535&&u<1114112&&(l=u)}null===l?(l=65533,f=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),i+=f}return N(r)}t.kMaxLength=o,c.TYPED_ARRAY_SUPPORT=s(),c.TYPED_ARRAY_SUPPORT||"undefined"==typeof console||"function"!=typeof console.error||console.error("This browser lacks typed array (Uint8Array) support which is required by `buffer` v5.x. Use `buffer` v4.x if you require old browser support."),Object.defineProperty(c.prototype,"parent",{enumerable:!0,get:function(){if(c.isBuffer(this))return this.buffer}}),Object.defineProperty(c.prototype,"offset",{enumerable:!0,get:function(){if(c.isBuffer(this))return this.byteOffset}}),c.poolSize=8192,c.from=function(e,t,n){return l(e,t,n)},Object.setPrototypeOf(c.prototype,Uint8Array.prototype),Object.setPrototypeOf(c,Uint8Array),c.alloc=function(e,t,n){return d(e,t,n)},c.allocUnsafe=function(e){return h(e)},c.allocUnsafeSlow=function(e){return h(e)},c.isBuffer=function(e){return null!=e&&!0===e._isBuffer&&e!==c.prototype},c.compare=function(e,t){if(X(e,Uint8Array)&&(e=c.from(e,e.offset,e.byteLength)),X(t,Uint8Array)&&(t=c.from(t,t.offset,t.byteLength)),!c.isBuffer(e)||!c.isBuffer(t))throw TypeError('The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array');if(e===t)return 0;for(var n=e.length,r=t.length,i=0,a=Math.min(n,r);ir.length?c.from(a).copy(r,i):Uint8Array.prototype.set.call(r,a,i);else if(c.isBuffer(a))a.copy(r,i);else throw TypeError('"list" argument must be an Array of Buffers');i+=a.length}return r},c.byteLength=_,c.prototype._isBuffer=!0,c.prototype.swap16=function(){var e=this.length;if(e%2!=0)throw RangeError("Buffer size must be a multiple of 16-bits");for(var t=0;tn&&(e+=" ... "),""},a&&(c.prototype[a]=c.prototype.inspect),c.prototype.compare=function(e,t,n,r,i){if(X(e,Uint8Array)&&(e=c.from(e,e.offset,e.byteLength)),!c.isBuffer(e))throw TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof e);if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===i&&(i=this.length),t<0||n>e.length||r<0||i>this.length)throw RangeError("out of range index");if(r>=i&&t>=n)return 0;if(r>=i)return -1;if(t>=n)return 1;if(t>>>=0,n>>>=0,r>>>=0,i>>>=0,this===e)return 0;for(var a=i-r,o=n-t,s=Math.min(a,o),u=this.slice(r,i),l=e.slice(t,n),f=0;f>>=0,isFinite(n)?(n>>>=0,void 0===r&&(r="utf8")):(r=n,n=void 0);else throw Error("Buffer.write(string, encoding, offset[, length]) is no longer supported");var i=this.length-t;if((void 0===n||n>i)&&(n=i),e.length>0&&(n<0||t<0)||t>this.length)throw RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var a=!1;;)switch(r){case"hex":return T(this,e,t,n);case"utf8":case"utf-8":return M(this,e,t,n);case"ascii":case"latin1":case"binary":return O(this,e,t,n);case"base64":return A(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return L(this,e,t,n);default:if(a)throw TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),a=!0}},c.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var D=4096;function N(e){var t=e.length;if(t<=D)return String.fromCharCode.apply(String,e);for(var n="",r=0;rr)&&(n=r);for(var i="",a=t;an)throw RangeError("Trying to access beyond buffer length")}function B(e,t,n,r,i,a){if(!c.isBuffer(e))throw TypeError('"buffer" argument must be a Buffer instance');if(t>i||te.length)throw RangeError("Index out of range")}function U(e,t,n,r,i,a){if(n+r>e.length||n<0)throw RangeError("Index out of range")}function H(e,t,n,r,a){return t=+t,n>>>=0,a||U(e,t,n,4,34028234663852886e22,-34028234663852886e22),i.write(e,t,n,r,23,4),n+4}function $(e,t,n,r,a){return t=+t,n>>>=0,a||U(e,t,n,8,17976931348623157e292,-17976931348623157e292),i.write(e,t,n,r,52,8),n+8}c.prototype.slice=function(e,t){var n=this.length;e=~~e,t=void 0===t?n:~~t,e<0?(e+=n)<0&&(e=0):e>n&&(e=n),t<0?(t+=n)<0&&(t=0):t>n&&(t=n),t>>=0,t>>>=0,n||Y(e,t,this.length);for(var r=this[e],i=1,a=0;++a>>=0,t>>>=0,n||Y(e,t,this.length);for(var r=this[e+--t],i=1;t>0&&(i*=256);)r+=this[e+--t]*i;return r},c.prototype.readUint8=c.prototype.readUInt8=function(e,t){return e>>>=0,t||Y(e,1,this.length),this[e]},c.prototype.readUint16LE=c.prototype.readUInt16LE=function(e,t){return e>>>=0,t||Y(e,2,this.length),this[e]|this[e+1]<<8},c.prototype.readUint16BE=c.prototype.readUInt16BE=function(e,t){return e>>>=0,t||Y(e,2,this.length),this[e]<<8|this[e+1]},c.prototype.readUint32LE=c.prototype.readUInt32LE=function(e,t){return e>>>=0,t||Y(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},c.prototype.readUint32BE=c.prototype.readUInt32BE=function(e,t){return e>>>=0,t||Y(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},c.prototype.readIntLE=function(e,t,n){e>>>=0,t>>>=0,n||Y(e,t,this.length);for(var r=this[e],i=1,a=0;++a=(i*=128)&&(r-=Math.pow(2,8*t)),r},c.prototype.readIntBE=function(e,t,n){e>>>=0,t>>>=0,n||Y(e,t,this.length);for(var r=t,i=1,a=this[e+--r];r>0&&(i*=256);)a+=this[e+--r]*i;return a>=(i*=128)&&(a-=Math.pow(2,8*t)),a},c.prototype.readInt8=function(e,t){return(e>>>=0,t||Y(e,1,this.length),128&this[e])?-((255-this[e]+1)*1):this[e]},c.prototype.readInt16LE=function(e,t){e>>>=0,t||Y(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},c.prototype.readInt16BE=function(e,t){e>>>=0,t||Y(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},c.prototype.readInt32LE=function(e,t){return e>>>=0,t||Y(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},c.prototype.readInt32BE=function(e,t){return e>>>=0,t||Y(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},c.prototype.readFloatLE=function(e,t){return e>>>=0,t||Y(e,4,this.length),i.read(this,e,!0,23,4)},c.prototype.readFloatBE=function(e,t){return e>>>=0,t||Y(e,4,this.length),i.read(this,e,!1,23,4)},c.prototype.readDoubleLE=function(e,t){return e>>>=0,t||Y(e,8,this.length),i.read(this,e,!0,52,8)},c.prototype.readDoubleBE=function(e,t){return e>>>=0,t||Y(e,8,this.length),i.read(this,e,!1,52,8)},c.prototype.writeUintLE=c.prototype.writeUIntLE=function(e,t,n,r){if(e=+e,t>>>=0,n>>>=0,!r){var i=Math.pow(2,8*n)-1;B(this,e,t,n,i,0)}var a=1,o=0;for(this[t]=255&e;++o>>=0,n>>>=0,!r){var i=Math.pow(2,8*n)-1;B(this,e,t,n,i,0)}var a=n-1,o=1;for(this[t+a]=255&e;--a>=0&&(o*=256);)this[t+a]=e/o&255;return t+n},c.prototype.writeUint8=c.prototype.writeUInt8=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,1,255,0),this[t]=255&e,t+1},c.prototype.writeUint16LE=c.prototype.writeUInt16LE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,2,65535,0),this[t]=255&e,this[t+1]=e>>>8,t+2},c.prototype.writeUint16BE=c.prototype.writeUInt16BE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,2,65535,0),this[t]=e>>>8,this[t+1]=255&e,t+2},c.prototype.writeUint32LE=c.prototype.writeUInt32LE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,4,4294967295,0),this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e,t+4},c.prototype.writeUint32BE=c.prototype.writeUInt32BE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,4,4294967295,0),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},c.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t>>>=0,!r){var i=Math.pow(2,8*n-1);B(this,e,t,n,i-1,-i)}var a=0,o=1,s=0;for(this[t]=255&e;++a>0)-s&255;return t+n},c.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t>>>=0,!r){var i=Math.pow(2,8*n-1);B(this,e,t,n,i-1,-i)}var a=n-1,o=1,s=0;for(this[t+a]=255&e;--a>=0&&(o*=256);)e<0&&0===s&&0!==this[t+a+1]&&(s=1),this[t+a]=(e/o>>0)-s&255;return t+n},c.prototype.writeInt8=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,1,127,-128),e<0&&(e=255+e+1),this[t]=255&e,t+1},c.prototype.writeInt16LE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,2,32767,-32768),this[t]=255&e,this[t+1]=e>>>8,t+2},c.prototype.writeInt16BE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,2,32767,-32768),this[t]=e>>>8,this[t+1]=255&e,t+2},c.prototype.writeInt32LE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,4,2147483647,-2147483648),this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24,t+4},c.prototype.writeInt32BE=function(e,t,n){return e=+e,t>>>=0,n||B(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},c.prototype.writeFloatLE=function(e,t,n){return H(this,e,t,!0,n)},c.prototype.writeFloatBE=function(e,t,n){return H(this,e,t,!1,n)},c.prototype.writeDoubleLE=function(e,t,n){return $(this,e,t,!0,n)},c.prototype.writeDoubleBE=function(e,t,n){return $(this,e,t,!1,n)},c.prototype.copy=function(e,t,n,r){if(!c.isBuffer(e))throw TypeError("argument should be a Buffer");if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw RangeError("Index out of range");if(r<0)throw RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"==typeof e)for(i=t;i55295&&n<57344){if(!i){if(n>56319||o+1===r){(t-=3)>-1&&a.push(239,191,189);continue}i=n;continue}if(n<56320){(t-=3)>-1&&a.push(239,191,189),i=n;continue}n=(i-55296<<10|n-56320)+65536}else i&&(t-=3)>-1&&a.push(239,191,189);if(i=null,n<128){if((t-=1)<0)break;a.push(n)}else if(n<2048){if((t-=2)<0)break;a.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;a.push(n>>12|224,n>>6&63|128,63&n|128)}else if(n<1114112){if((t-=4)<0)break;a.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}else throw Error("Invalid code point")}return a}function K(e){for(var t=[],n=0;n>8,a.push(i=n%256),a.push(r);return a}function q(e){return r.toByteArray(G(e))}function Z(e,t,n,r){for(var i=0;i=t.length)&&!(i>=e.length);++i)t[i+n]=e[i];return i}function X(e,t){return e instanceof t||null!=e&&null!=e.constructor&&null!=e.constructor.name&&e.constructor.name===t.name}function J(e){return e!=e}var Q=function(){for(var e="0123456789abcdef",t=Array(256),n=0;n<16;++n)for(var r=16*n,i=0;i<16;++i)t[r+i]=e[n]+e[i];return t}()},94184(e,t){var n,r; /*! + Copyright (c) 2018 Jed Watson. + Licensed under the MIT License (MIT), see + http://jedwatson.github.io/classnames +*/ !function(){"use strict";var i={}.hasOwnProperty;function a(){for(var e=[],t=0;t>8&255]},F=function(e){return[255&e,e>>8&255,e>>16&255,e>>24&255]},Y=function(e){return e[3]<<24|e[2]<<16|e[1]<<8|e[0]},B=function(e){return N(e,23,4)},U=function(e){return N(e,52,8)},H=function(e,t){g(e[x],t,{get:function(){return _(this)[t]}})},$=function(e,t,n,r){var i=d(n),a=_(e);if(i+t>a.byteLength)throw D(M);var o=_(a.buffer).bytes,s=i+a.byteOffset,u=o.slice(s,s+t);return r?u:u.reverse()},z=function(e,t,n,r,i,a){var o=d(n),s=_(e);if(o+t>s.byteLength)throw D(M);for(var u=_(s.buffer).bytes,c=o+s.byteOffset,l=r(+i),f=0;fV;)(G=K[V++])in A||o(A,G,O[G]);W.constructor=A}b&&p(C)!==I&&b(C,I);var q=new L(new A(2)),Z=C.setInt8;q.setInt8(0,2147483648),q.setInt8(1,2147483649),(q.getInt8(0)||!q.getInt8(1))&&s(C,{setInt8:function(e,t){Z.call(this,e,t<<24>>24)},setUint8:function(e,t){Z.call(this,e,t<<24>>24)}},{unsafe:!0})}else A=function(e){c(this,A,S);var t=d(e);E(this,{bytes:v.call(Array(t),0),byteLength:t}),i||(this.byteLength=t)},L=function(e,t,n){c(this,L,k),c(e,A,k);var r=_(e).byteLength,a=l(t);if(a<0||a>r)throw D("Wrong offset");if(n=void 0===n?r-a:f(n),a+n>r)throw D(T);E(this,{buffer:e,byteLength:n,byteOffset:a}),i||(this.buffer=e,this.byteLength=n,this.byteOffset=a)},i&&(H(A,"byteLength"),H(L,"buffer"),H(L,"byteLength"),H(L,"byteOffset")),s(L[x],{getInt8:function(e){return $(this,1,e)[0]<<24>>24},getUint8:function(e){return $(this,1,e)[0]},getInt16:function(e){var t=$(this,2,e,arguments.length>1?arguments[1]:void 0);return(t[1]<<8|t[0])<<16>>16},getUint16:function(e){var t=$(this,2,e,arguments.length>1?arguments[1]:void 0);return t[1]<<8|t[0]},getInt32:function(e){return Y($(this,4,e,arguments.length>1?arguments[1]:void 0))},getUint32:function(e){return Y($(this,4,e,arguments.length>1?arguments[1]:void 0))>>>0},getFloat32:function(e){return P($(this,4,e,arguments.length>1?arguments[1]:void 0),23)},getFloat64:function(e){return P($(this,8,e,arguments.length>1?arguments[1]:void 0),52)},setInt8:function(e,t){z(this,1,e,R,t)},setUint8:function(e,t){z(this,1,e,R,t)},setInt16:function(e,t){z(this,2,e,j,t,arguments.length>2?arguments[2]:void 0)},setUint16:function(e,t){z(this,2,e,j,t,arguments.length>2?arguments[2]:void 0)},setInt32:function(e,t){z(this,4,e,F,t,arguments.length>2?arguments[2]:void 0)},setUint32:function(e,t){z(this,4,e,F,t,arguments.length>2?arguments[2]:void 0)},setFloat32:function(e,t){z(this,4,e,B,t,arguments.length>2?arguments[2]:void 0)},setFloat64:function(e,t){z(this,8,e,U,t,arguments.length>2?arguments[2]:void 0)}});y(A,S),y(L,k),e.exports={ArrayBuffer:A,DataView:L}},1048(e,t,n){"use strict";var r=n(47908),i=n(51400),a=n(17466),o=Math.min;e.exports=[].copyWithin||function(e,t){var n=r(this),s=a(n.length),u=i(e,s),c=i(t,s),l=arguments.length>2?arguments[2]:void 0,f=o((void 0===l?s:i(l,s))-c,s-u),d=1;for(c0;)c in n?n[u]=n[c]:delete n[u],u+=d,c+=d;return n}},21285(e,t,n){"use strict";var r=n(47908),i=n(51400),a=n(17466);e.exports=function(e){for(var t=r(this),n=a(t.length),o=arguments.length,s=i(o>1?arguments[1]:void 0,n),u=o>2?arguments[2]:void 0,c=void 0===u?n:i(u,n);c>s;)t[s++]=e;return t}},18533(e,t,n){"use strict";var r=n(42092).forEach,i=n(9341)("forEach");e.exports=i?[].forEach:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}},97745(e){e.exports=function(e,t){for(var n=0,r=t.length,i=new e(r);r>n;)i[n]=t[n++];return i}},48457(e,t,n){"use strict";var r=n(49974),i=n(47908),a=n(53411),o=n(97659),s=n(17466),u=n(86135),c=n(18554),l=n(71246);e.exports=function(e){var t,n,f,d,h,p,b=i(e),m="function"==typeof this?this:Array,g=arguments.length,v=g>1?arguments[1]:void 0,y=void 0!==v,w=l(b),_=0;if(y&&(v=r(v,g>2?arguments[2]:void 0,2)),void 0==w||m==Array&&o(w))for(t=s(b.length),n=new m(t);t>_;_++)p=y?v(b[_],_):b[_],u(n,_,p);else for(h=(d=c(b,w)).next,n=new m;!(f=h.call(d)).done;_++)p=y?a(d,v,[f.value,_],!0):f.value,u(n,_,p);return n.length=_,n}},61386(e,t,n){var r=n(49974),i=n(68361),a=n(47908),o=n(17466),s=n(34948),u=n(70030),c=n(97745),l=[].push;e.exports=function(e,t,n,f){for(var d,h,p,b=a(e),m=i(b),g=r(t,n,3),v=u(null),y=o(m.length),w=0;y>w;w++)(h=s(g(p=m[w],w,b)))in v?l.call(v[h],p):v[h]=[p];if(f&&(d=f(b))!==Array)for(h in v)v[h]=c(d,v[h]);return v}},41318(e,t,n){var r=n(45656),i=n(17466),a=n(51400),o=function(e){return function(t,n,o){var s,u=r(t),c=i(u.length),l=a(o,c);if(e&&n!=n){for(;c>l;)if((s=u[l++])!=s)return!0}else for(;c>l;l++)if((e||l in u)&&u[l]===n)return e||l||0;return!e&&-1}};e.exports={includes:o(!0),indexOf:o(!1)}},9671(e,t,n){var r=n(49974),i=n(68361),a=n(47908),o=n(17466),s=function(e){var t=1==e;return function(n,s,u){for(var c,l,f=a(n),d=i(f),h=r(s,u,3),p=o(d.length);p-- >0;)if(l=h(c=d[p],p,f))switch(e){case 0:return c;case 1:return p}return t?-1:void 0}};e.exports={findLast:s(0),findLastIndex:s(1)}},42092(e,t,n){var r=n(49974),i=n(68361),a=n(47908),o=n(17466),s=n(65417),u=[].push,c=function(e){var t=1==e,n=2==e,c=3==e,l=4==e,f=6==e,d=7==e,h=5==e||f;return function(p,b,m,g){for(var v,y,w=a(p),_=i(w),E=r(b,m,3),S=o(_.length),k=0,x=g||s,T=t?x(p,S):n||d?x(p,0):void 0;S>k;k++)if((h||k in _)&&(y=E(v=_[k],k,w),e)){if(t)T[k]=y;else if(y)switch(e){case 3:return!0;case 5:return v;case 6:return k;case 2:u.call(T,v)}else switch(e){case 4:return!1;case 7:u.call(T,v)}}return f?-1:c||l?l:T}};e.exports={forEach:c(0),map:c(1),filter:c(2),some:c(3),every:c(4),find:c(5),findIndex:c(6),filterReject:c(7)}},86583(e,t,n){"use strict";var r=n(45656),i=n(99958),a=n(17466),o=n(9341),s=Math.min,u=[].lastIndexOf,c=!!u&&1/[1].lastIndexOf(1,-0)<0,l=o("lastIndexOf"),f=c||!l;e.exports=f?function(e){if(c)return u.apply(this,arguments)||0;var t=r(this),n=a(t.length),o=n-1;for(arguments.length>1&&(o=s(o,i(arguments[1]))),o<0&&(o=n+o);o>=0;o--)if(o in t&&t[o]===e)return o||0;return -1}:u},81194(e,t,n){var r=n(47293),i=n(5112),a=n(7392),o=i("species");e.exports=function(e){return a>=51||!r(function(){var t=[];return(t.constructor={})[o]=function(){return{foo:1}},1!==t[e](Boolean).foo})}},9341(e,t,n){"use strict";var r=n(47293);e.exports=function(e,t){var n=[][e];return!!n&&r(function(){n.call(null,t||function(){throw 1},1)})}},53671(e,t,n){var r=n(13099),i=n(47908),a=n(68361),o=n(17466),s=function(e){return function(t,n,s,u){r(n);var c=i(t),l=a(c),f=o(c.length),d=e?f-1:0,h=e?-1:1;if(s<2)for(;;){if(d in l){u=l[d],d+=h;break}if(d+=h,e?d<0:f<=d)throw TypeError("Reduce of empty array with no initial value")}for(;e?d>=0:f>d;d+=h)d in l&&(u=n(u,l[d],d,c));return u}};e.exports={left:s(!1),right:s(!0)}},94362(e){var t=Math.floor,n=function(e,a){var o=e.length,s=t(o/2);return o<8?r(e,a):i(n(e.slice(0,s),a),n(e.slice(s),a),a)},r=function(e,t){for(var n,r,i=e.length,a=1;a0;)e[r]=e[--r];r!==a++&&(e[r]=n)}return e},i=function(e,t,n){for(var r=e.length,i=t.length,a=0,o=0,s=[];a=n(e[a],t[o])?e[a++]:t[o++]):s.push(a1?arguments[1]:void 0;return(r(this),(t=void 0!==c)&&r(c),void 0==e)?new this:(n=[],t?(o=0,s=i(c,u>2?arguments[2]:void 0,2),a(e,function(e){n.push(s(e,o++))})):a(e,n.push,{that:n}),new this(n))}},82044(e){"use strict";e.exports=function(){for(var e=arguments.length,t=Array(e);e--;)t[e]=arguments[e];return new this(t)}},95631(e,t,n){"use strict";var r=n(3070).f,i=n(70030),a=n(12248),o=n(49974),s=n(25787),u=n(20408),c=n(70654),l=n(96340),f=n(19781),d=n(62423).fastKey,h=n(29909),p=h.set,b=h.getterFor;e.exports={getConstructor:function(e,t,n,c){var l=e(function(e,r){s(e,l,t),p(e,{type:t,index:i(null),first:void 0,last:void 0,size:0}),f||(e.size=0),void 0!=r&&u(r,e[c],{that:e,AS_ENTRIES:n})}),h=b(t),m=function(e,t,n){var r,i,a=h(e),o=g(e,t);return o?o.value=n:(a.last=o={index:i=d(t,!0),key:t,value:n,previous:r=a.last,next:void 0,removed:!1},a.first||(a.first=o),r&&(r.next=o),f?a.size++:e.size++,"F"!==i&&(a.index[i]=o)),e},g=function(e,t){var n,r=h(e),i=d(t);if("F"!==i)return r.index[i];for(n=r.first;n;n=n.next)if(n.key==t)return n};return a(l.prototype,{clear:function(){for(var e=this,t=h(e),n=t.index,r=t.first;r;)r.removed=!0,r.previous&&(r.previous=r.previous.next=void 0),delete n[r.index],r=r.next;t.first=t.last=void 0,f?t.size=0:e.size=0},delete:function(e){var t=this,n=h(t),r=g(t,e);if(r){var i=r.next,a=r.previous;delete n.index[r.index],r.removed=!0,a&&(a.next=i),i&&(i.previous=a),n.first==r&&(n.first=i),n.last==r&&(n.last=a),f?n.size--:t.size--}return!!r},forEach:function(e){for(var t,n=h(this),r=o(e,arguments.length>1?arguments[1]:void 0,3);t=t?t.next:n.first;)for(r(t.value,t.key,this);t&&t.removed;)t=t.previous},has:function(e){return!!g(this,e)}}),a(l.prototype,n?{get:function(e){var t=g(this,e);return t&&t.value},set:function(e,t){return m(this,0===e?0:e,t)}}:{add:function(e){return m(this,e=0===e?0:e,e)}}),f&&r(l.prototype,"size",{get:function(){return h(this).size}}),l},setStrong:function(e,t,n){var r=t+" Iterator",i=b(t),a=b(r);c(e,t,function(e,t){p(this,{type:r,target:e,state:i(e),kind:t,last:void 0})},function(){for(var e=a(this),t=e.kind,n=e.last;n&&n.removed;)n=n.previous;return e.target&&(e.last=n=n?n.next:e.state.first)?"keys"==t?{value:n.key,done:!1}:"values"==t?{value:n.value,done:!1}:{value:[n.key,n.value],done:!1}:(e.target=void 0,{value:void 0,done:!0})},n?"entries":"values",!n,!0),l(t)}}},29320(e,t,n){"use strict";var r=n(12248),i=n(62423).getWeakData,a=n(19670),o=n(70111),s=n(25787),u=n(20408),c=n(42092),l=n(86656),f=n(29909),d=f.set,h=f.getterFor,p=c.find,b=c.findIndex,m=0,g=function(e){return e.frozen||(e.frozen=new v)},v=function(){this.entries=[]},y=function(e,t){return p(e.entries,function(e){return e[0]===t})};v.prototype={get:function(e){var t=y(this,e);if(t)return t[1]},has:function(e){return!!y(this,e)},set:function(e,t){var n=y(this,e);n?n[1]=t:this.entries.push([e,t])},delete:function(e){var t=b(this.entries,function(t){return t[0]===e});return~t&&this.entries.splice(t,1),!!~t}},e.exports={getConstructor:function(e,t,n,c){var f=e(function(e,r){s(e,f,t),d(e,{type:t,id:m++,frozen:void 0}),void 0!=r&&u(r,e[c],{that:e,AS_ENTRIES:n})}),p=h(t),b=function(e,t,n){var r=p(e),o=i(a(t),!0);return!0===o?g(r).set(t,n):o[r.id]=n,e};return r(f.prototype,{delete:function(e){var t=p(this);if(!o(e))return!1;var n=i(e);return!0===n?g(t).delete(e):n&&l(n,t.id)&&delete n[t.id]},has:function(e){var t=p(this);if(!o(e))return!1;var n=i(e);return!0===n?g(t).has(e):n&&l(n,t.id)}}),r(f.prototype,n?{get:function(e){var t=p(this);if(o(e)){var n=i(e);return!0===n?g(t).get(e):n?n[t.id]:void 0}},set:function(e,t){return b(this,e,t)}}:{add:function(e){return b(this,e,!0)}}),f}}},77710(e,t,n){"use strict";var r=n(82109),i=n(17854),a=n(54705),o=n(31320),s=n(62423),u=n(20408),c=n(25787),l=n(70111),f=n(47293),d=n(17072),h=n(58003),p=n(79587);e.exports=function(e,t,n){var b=-1!==e.indexOf("Map"),m=-1!==e.indexOf("Weak"),g=b?"set":"add",v=i[e],y=v&&v.prototype,w=v,_={},E=function(e){var t=y[e];o(y,e,"add"==e?function(e){return t.call(this,0===e?0:e),this}:"delete"==e?function(e){return(!m||!!l(e))&&t.call(this,0===e?0:e)}:"get"==e?function(e){return m&&!l(e)?void 0:t.call(this,0===e?0:e)}:"has"==e?function(e){return(!m||!!l(e))&&t.call(this,0===e?0:e)}:function(e,n){return t.call(this,0===e?0:e,n),this})};if(a(e,"function"!=typeof v||!(m||y.forEach&&!f(function(){new v().entries().next()}))))w=n.getConstructor(t,e,b,g),s.enable();else if(a(e,!0)){var S=new w,k=S[g](m?{}:-0,1)!=S,x=f(function(){S.has(1)}),T=d(function(e){new v(e)}),M=!m&&f(function(){for(var e=new v,t=5;t--;)e[g](t,t);return!e.has(-0)});T||((w=t(function(t,n){c(t,w,e);var r=p(new v,t,w);return void 0!=n&&u(n,r[g],{that:r,AS_ENTRIES:b}),r})).prototype=y,y.constructor=w),(x||M)&&(E("delete"),E("has"),b&&E("get")),(M||k)&&E(g),m&&y.clear&&delete y.clear}return _[e]=w,r({global:!0,forced:w!=v},_),h(w,e),m||n.setStrong(w,e,b),w}},10313(e,t,n){var r=n(51532),i=n(4129),a=n(70030),o=n(70111),s=function(){this.object=null,this.symbol=null,this.primitives=null,this.objectsByIndex=a(null)};s.prototype.get=function(e,t){return this[e]||(this[e]=t())},s.prototype.next=function(e,t,n){var a=n?this.objectsByIndex[e]||(this.objectsByIndex[e]=new i):this.primitives||(this.primitives=new r),o=a.get(t);return o||a.set(t,o=new s),o};var u=new s;e.exports=function(){var e,t,n=u,r=arguments.length;for(e=0;e"+s+""}},24994(e,t,n){"use strict";var r=n(13383).IteratorPrototype,i=n(70030),a=n(79114),o=n(58003),s=n(97497),u=function(){return this};e.exports=function(e,t,n){var c=t+" Iterator";return e.prototype=i(r,{next:a(1,n)}),o(e,c,!1,!0),s[c]=u,e}},68880(e,t,n){var r=n(19781),i=n(3070),a=n(79114);e.exports=r?function(e,t,n){return i.f(e,t,a(1,n))}:function(e,t,n){return e[t]=n,e}},79114(e){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},86135(e,t,n){"use strict";var r=n(34948),i=n(3070),a=n(79114);e.exports=function(e,t,n){var o=r(t);o in e?i.f(e,o,a(0,n)):e[o]=n}},85573(e,t,n){"use strict";var r=n(47293),i=n(76650).start,a=Math.abs,o=Date.prototype,s=o.getTime,u=o.toISOString;e.exports=r(function(){return"0385-07-25T07:06:39.999Z"!=u.call(new Date(-5e13-1))})||!r(function(){u.call(new Date(NaN))})?function(){if(!isFinite(s.call(this)))throw RangeError("Invalid time value");var e=this,t=e.getUTCFullYear(),n=e.getUTCMilliseconds(),r=t<0?"-":t>9999?"+":"";return r+i(a(t),r?6:4,0)+"-"+i(e.getUTCMonth()+1,2,0)+"-"+i(e.getUTCDate(),2,0)+"T"+i(e.getUTCHours(),2,0)+":"+i(e.getUTCMinutes(),2,0)+":"+i(e.getUTCSeconds(),2,0)+"."+i(n,3,0)+"Z"}:u},38709(e,t,n){"use strict";var r=n(19670),i=n(92140);e.exports=function(e){if(r(this),"string"===e||"default"===e)e="string";else if("number"!==e)throw TypeError("Incorrect hint");return i(this,e)}},70654(e,t,n){"use strict";var r=n(82109),i=n(24994),a=n(79518),o=n(27674),s=n(58003),u=n(68880),c=n(31320),l=n(5112),f=n(31913),d=n(97497),h=n(13383),p=h.IteratorPrototype,b=h.BUGGY_SAFARI_ITERATORS,m=l("iterator"),g="keys",v="values",y="entries",w=function(){return this};e.exports=function(e,t,n,l,h,_,E){i(n,t,l);var S,k,x,T=function(e){if(e===h&&C)return C;if(!b&&e in A)return A[e];switch(e){case g:case v:case y:return function(){return new n(this,e)}}return function(){return new n(this)}},M=t+" Iterator",O=!1,A=e.prototype,L=A[m]||A["@@iterator"]||h&&A[h],C=!b&&L||T(h),I="Array"==t&&A.entries||L;if(I&&(S=a(I.call(new e)),p!==Object.prototype&&S.next&&(f||a(S)===p||(o?o(S,p):"function"!=typeof S[m]&&u(S,m,w)),s(S,M,!0,!0),f&&(d[M]=w))),h==v&&L&&L.name!==v&&(O=!0,C=function(){return L.call(this)}),(!f||E)&&A[m]!==C&&u(A,m,C),d[t]=C,h){if(k={values:T(v),keys:_?C:T(g),entries:T(y)},E)for(x in k)!b&&!O&&x in A||c(A,x,k[x]);else r({target:t,proto:!0,forced:b||O},k)}return k}},97235(e,t,n){var r=n(40857),i=n(86656),a=n(6061),o=n(3070).f;e.exports=function(e){var t=r.Symbol||(r.Symbol={});i(t,e)||o(t,e,{value:a.f(e)})}},19781(e,t,n){var r=n(47293);e.exports=!r(function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]})},80317(e,t,n){var r=n(17854),i=n(70111),a=r.document,o=i(a)&&i(a.createElement);e.exports=function(e){return o?a.createElement(e):{}}},48324(e){e.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},68886(e,t,n){var r=n(88113).match(/firefox\/(\d+)/i);e.exports=!!r&&+r[1]},7871(e){e.exports="object"==typeof window},30256(e,t,n){var r=n(88113);e.exports=/MSIE|Trident/.test(r)},71528(e,t,n){var r=n(88113),i=n(17854);e.exports=/ipad|iphone|ipod/i.test(r)&&void 0!==i.Pebble},6833(e,t,n){var r=n(88113);e.exports=/(?:ipad|iphone|ipod).*applewebkit/i.test(r)},35268(e,t,n){var r=n(84326),i=n(17854);e.exports="process"==r(i.process)},71036(e,t,n){var r=n(88113);e.exports=/web0s(?!.*chrome)/i.test(r)},88113(e,t,n){var r=n(35005);e.exports=r("navigator","userAgent")||""},7392(e,t,n){var r,i,a=n(17854),o=n(88113),s=a.process,u=a.Deno,c=s&&s.versions||u&&u.version,l=c&&c.v8;l?i=(r=l.split("."))[0]<4?1:r[0]+r[1]:o&&(!(r=o.match(/Edge\/(\d+)/))||r[1]>=74)&&(r=o.match(/Chrome\/(\d+)/))&&(i=r[1]),e.exports=i&&+i},98008(e,t,n){var r=n(88113).match(/AppleWebKit\/(\d+)\./);e.exports=!!r&&+r[1]},80748(e){e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},82109(e,t,n){var r=n(17854),i=n(31236).f,a=n(68880),o=n(31320),s=n(83505),u=n(99920),c=n(54705);e.exports=function(e,t){var n,l,f,d,h,p,b=e.target,m=e.global,g=e.stat;if(l=m?r:g?r[b]||s(b,{}):(r[b]||{}).prototype)for(f in t){if(h=t[f],d=e.noTargetGet?(p=i(l,f))&&p.value:l[f],!(n=c(m?f:b+(g?".":"#")+f,e.forced))&&void 0!==d){if(typeof h==typeof d)continue;u(h,d)}(e.sham||d&&d.sham)&&a(h,"sham",!0),o(l,f,h,e)}}},47293(e){e.exports=function(e){try{return!!e()}catch(t){return!0}}},27007(e,t,n){"use strict";n(74916);var r=n(31320),i=n(22261),a=n(47293),o=n(5112),s=n(68880),u=o("species"),c=RegExp.prototype;e.exports=function(e,t,n,l){var f=o(e),d=!a(function(){var t={};return t[f]=function(){return 7},7!=""[e](t)}),h=d&&!a(function(){var t=!1,n=/a/;return"split"===e&&((n={}).constructor={},n.constructor[u]=function(){return n},n.flags="",n[f]=/./[f]),n.exec=function(){return t=!0,null},n[f](""),!t});if(!d||!h||n){var p=/./[f],b=t(f,""[e],function(e,t,n,r,a){var o=t.exec;return o===i||o===c.exec?d&&!a?{done:!0,value:p.call(t,n,r)}:{done:!0,value:e.call(n,t,r)}:{done:!1}});r(String.prototype,e,b[0]),r(c,f,b[1])}l&&s(c[f],"sham",!0)}},6790(e,t,n){"use strict";var r=n(43157),i=n(17466),a=n(49974),o=function(e,t,n,s,u,c,l,f){for(var d,h=u,p=0,b=!!l&&a(l,f,3);p0&&r(d))h=o(e,t,d,i(d.length),h,c-1)-1;else{if(h>=9007199254740991)throw TypeError("Exceed the acceptable array length");e[h]=d}h++}p++}return h};e.exports=o},76677(e,t,n){var r=n(47293);e.exports=!r(function(){return Object.isExtensible(Object.preventExtensions({}))})},49974(e,t,n){var r=n(13099);e.exports=function(e,t,n){if(r(e),void 0===t)return e;switch(n){case 0:return function(){return e.call(t)};case 1:return function(n){return e.call(t,n)};case 2:return function(n,r){return e.call(t,n,r)};case 3:return function(n,r,i){return e.call(t,n,r,i)}}return function(){return e.apply(t,arguments)}}},27065(e,t,n){"use strict";var r=n(13099),i=n(70111),a=[].slice,o={},s=function(e,t,n){if(!(t in o)){for(var r=[],i=0;i]*>)/g,s=/\$([$&'`]|\d{1,2})/g;e.exports=function(e,t,n,u,c,l){var f=n+e.length,d=u.length,h=s;return void 0!==c&&(c=r(c),h=o),a.call(l,h,function(r,a){var o;switch(a.charAt(0)){case"$":return"$";case"&":return e;case"`":return t.slice(0,n);case"'":return t.slice(f);case"<":o=c[a.slice(1,-1)];break;default:var s=+a;if(0===s)return r;if(s>d){var l=i(s/10);if(0===l)return r;if(l<=d)return void 0===u[l-1]?a.charAt(1):u[l-1]+a.charAt(1);return r}o=u[s-1]}return void 0===o?"":o})}},17854(e,t,n){var r=function(e){return e&&e.Math==Math&&e};e.exports=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof n.g&&n.g)||function(){return this}()||Function("return this")()},86656(e,t,n){var r=n(47908),i={}.hasOwnProperty;e.exports=Object.hasOwn||function(e,t){return i.call(r(e),t)}},3501(e){e.exports={}},842(e,t,n){var r=n(17854);e.exports=function(e,t){var n=r.console;n&&n.error&&(1===arguments.length?n.error(e):n.error(e,t))}},60490(e,t,n){var r=n(35005);e.exports=r("document","documentElement")},64664(e,t,n){var r=n(19781),i=n(47293),a=n(80317);e.exports=!r&&!i(function(){return 7!=Object.defineProperty(a("div"),"a",{get:function(){return 7}}).a})},11179(e){var t=Math.abs,n=Math.pow,r=Math.floor,i=Math.log,a=Math.LN2,o=function(e,o,s){var u,c,l,f=Array(s),d=8*s-o-1,h=(1<>1,b=23===o?n(2,-24)-n(2,-77):0,m=e<0||0===e&&1/e<0?1:0,g=0;for((e=t(e))!=e||e===1/0?(c=e!=e?1:0,u=h):(u=r(i(e)/a),e*(l=n(2,-u))<1&&(u--,l*=2),u+p>=1?e+=b/l:e+=b*n(2,1-p),e*l>=2&&(u++,l/=2),u+p>=h?(c=0,u=h):u+p>=1?(c=(e*l-1)*n(2,o),u+=p):(c=e*n(2,p-1)*n(2,o),u=0));o>=8;f[g++]=255&c,c/=256,o-=8);for(u=u<0;f[g++]=255&u,u/=256,d-=8);return f[--g]|=128*m,f},s=function(e,t){var r,i=e.length,a=8*i-t-1,o=(1<>1,u=a-7,c=i-1,l=e[c--],f=127&l;for(l>>=7;u>0;f=256*f+e[c],c--,u-=8);for(r=f&(1<<-u)-1,f>>=-u,u+=t;u>0;r=256*r+e[c],c--,u-=8);if(0===f)f=1-s;else{if(f===o)return r?NaN:l?-1/0:1/0;r+=n(2,t),f-=s}return(l?-1:1)*r*n(2,f-t)};e.exports={pack:o,unpack:s}},68361(e,t,n){var r=n(47293),i=n(84326),a="".split;e.exports=r(function(){return!Object("z").propertyIsEnumerable(0)})?function(e){return"String"==i(e)?a.call(e,""):Object(e)}:Object},79587(e,t,n){var r=n(70111),i=n(27674);e.exports=function(e,t,n){var a,o;return i&&"function"==typeof(a=t.constructor)&&a!==n&&r(o=a.prototype)&&o!==n.prototype&&i(e,o),e}},42788(e,t,n){var r=n(5465),i=Function.toString;"function"!=typeof r.inspectSource&&(r.inspectSource=function(e){return i.call(e)}),e.exports=r.inspectSource},62423(e,t,n){var r=n(82109),i=n(3501),a=n(70111),o=n(86656),s=n(3070).f,u=n(8006),c=n(1156),l=n(69711),f=n(76677),d=!1,h=l("meta"),p=0,b=Object.isExtensible||function(){return!0},m=function(e){s(e,h,{value:{objectID:"O"+p++,weakData:{}}})},g=function(e,t){if(!a(e))return"symbol"==typeof e?e:("string"==typeof e?"S":"P")+e;if(!o(e,h)){if(!b(e))return"F";if(!t)return"E";m(e)}return e[h].objectID},v=function(e,t){if(!o(e,h)){if(!b(e))return!0;if(!t)return!1;m(e)}return e[h].weakData},y=function(e){return f&&d&&b(e)&&!o(e,h)&&m(e),e},w=function(){_.enable=function(){},d=!0;var e=u.f,t=[].splice,n={};n[h]=1,e(n).length&&(u.f=function(n){for(var r=e(n),i=0,a=r.length;ih;h++)if((b=k(e[h]))&&b instanceof l)return b;return new l(!1)}f=s(e,d)}for(m=f.next;!(g=m.call(f)).done;){try{b=k(g.value)}catch(x){c(f,"throw",x)}if("object"==typeof b&&b&&b instanceof l)return b}return new l(!1)}},99212(e,t,n){var r=n(19670);e.exports=function(e,t,n){var i,a;r(e);try{if(void 0===(i=e.return)){if("throw"===t)throw n;return n}i=i.call(e)}catch(o){a=!0,i=o}if("throw"===t)throw n;if(a)throw i;return r(i),n}},54956(e,t,n){"use strict";var r=n(40857),i=n(13099),a=n(19670),o=n(70030),s=n(68880),u=n(12248),c=n(5112),l=n(29909),f=l.set,d=l.get,h=c("toStringTag");e.exports=function(e,t){var n=function(e){e.next=i(e.iterator.next),e.done=!1,e.ignoreArg=!t,f(this,e)};return n.prototype=u(o(r.Iterator.prototype),{next:function(n){var r=d(this),i=arguments.length?[r.ignoreArg?void 0:n]:t?[]:[void 0];r.ignoreArg=!1;var a=r.done?void 0:e.call(r,i);return{done:r.done,value:a}},return:function(e){var t=d(this).iterator;t.done=!0;var n=t.return;return{done:!0,value:void 0===n?e:a(n.call(t,e)).value}},throw:function(e){var t=d(this).iterator;t.done=!0;var n=t.throw;if(void 0===n)throw e;return n.call(t,e)}}),t||s(n.prototype,h,"Generator"),n}},13383(e,t,n){"use strict";var r,i,a,o=n(47293),s=n(79518),u=n(68880),c=n(86656),l=n(5112),f=n(31913),d=l("iterator"),h=!1,p=function(){return this};[].keys&&("next"in(a=[].keys())?(i=s(s(a)))!==Object.prototype&&(r=i):h=!0);var b=void 0==r||o(function(){var e={};return r[d].call(e)!==e});b&&(r={}),f&&!b||c(r,d)||u(r,d,p),e.exports={IteratorPrototype:r,BUGGY_SAFARI_ITERATORS:h}},97497(e){e.exports={}},37502(e,t,n){"use strict";var r=n(19670);e.exports=function(e,t){var n=r(this),i=n.has(e)&&"update"in t?t.update(n.get(e),e,n):t.insert(e,n);return n.set(e,i),i}},8154(e,t,n){"use strict";var r=n(19670);e.exports=function(e,t){var n,i=r(this),a=arguments.length>2?arguments[2]:void 0;if("function"!=typeof t&&"function"!=typeof a)throw TypeError("At least one callback required");return i.has(e)?(n=i.get(e),"function"==typeof t&&(n=t(n),i.set(e,n))):"function"==typeof a&&(n=a(),i.set(e,n)),n}},66736(e){var t=Math.expm1,n=Math.exp;e.exports=!t||t(10)>22025.465794806718||22025.465794806718>t(10)||-.00000000000000002!=t(-.00000000000000002)?function(e){return 0==(e=+e)?e:e>-.000001&&e<1e-6?e+e*e/2:n(e)-1}:t},26130(e,t,n){var r=n(64310),i=Math.abs,a=Math.pow,o=a(2,-52),s=a(2,-23),u=a(2,127)*(2-s),c=a(2,-126),l=function(e){return e+1/o-1/o};e.exports=Math.fround||function(e){var t,n,a=i(e),f=r(e);return au||n!=n?f*(1/0):f*n}},26513(e){var t=Math.log;e.exports=Math.log1p||function(e){return(e=+e)>-.00000001&&e<1e-8?e-e*e/2:t(1+e)}},47103(e){e.exports=Math.scale||function(e,t,n,r,i){return 0===arguments.length||e!=e||t!=t||n!=n||r!=r||i!=i?NaN:e===1/0||e===-1/0?e:(e-t)*(i-r)/(n-t)+r}},64310(e){e.exports=Math.sign||function(e){return 0==(e=+e)||e!=e?e:e<0?-1:1}},95948(e,t,n){var r,i,a,o,s,u,c,l,f=n(17854),d=n(31236).f,h=n(20261).set,p=n(6833),b=n(71528),m=n(71036),g=n(35268),v=f.MutationObserver||f.WebKitMutationObserver,y=f.document,w=f.process,_=f.Promise,E=d(f,"queueMicrotask"),S=E&&E.value;S||(r=function(){var e,t;for(g&&(e=w.domain)&&e.exit();i;){t=i.fn,i=i.next;try{t()}catch(n){throw i?o():a=void 0,n}}a=void 0,e&&e.enter()},p||g||m||!v||!y?!b&&_&&_.resolve?((c=_.resolve(void 0)).constructor=_,l=c.then,o=function(){l.call(c,r)}):o=g?function(){w.nextTick(r)}:function(){h.call(f,r)}:(s=!0,u=y.createTextNode(""),new v(r).observe(u,{characterData:!0}),o=function(){u.data=s=!s})),e.exports=S||function(e){var t={fn:e,next:void 0};a&&(a.next=t),i||(i=t,o()),a=t}},13366(e,t,n){var r=n(17854);e.exports=r.Promise},30133(e,t,n){var r=n(7392),i=n(47293);e.exports=!!Object.getOwnPropertySymbols&&!i(function(){var e=Symbol();return!String(e)||!(Object(e) instanceof Symbol)||!Symbol.sham&&r&&r<41})},590(e,t,n){var r=n(47293),i=n(5112),a=n(31913),o=i("iterator");e.exports=!r(function(){var e=new URL("b?a=1&b=2&c=3","http://a"),t=e.searchParams,n="";return e.pathname="c%20d",t.forEach(function(e,r){t.delete("b"),n+=r+e}),a&&!e.toJSON||!t.sort||"http://a/c%20d?a=1&c=3"!==e.href||"3"!==t.get("c")||"a=1"!==String(new URLSearchParams("?a=1"))||!t[o]||"a"!==new URL("https://a@b").username||"b"!==new URLSearchParams(new URLSearchParams("a=b")).get("a")||"xn--e1aybc"!==new URL("http://тест").host||"#%D0%B1"!==new URL("http://a#б").hash||"a1c3"!==n||"x"!==new URL("http://x",void 0).host})},68536(e,t,n){var r=n(17854),i=n(42788),a=r.WeakMap;e.exports="function"==typeof a&&/native code/.test(i(a))},78523(e,t,n){"use strict";var r=n(13099),i=function(e){var t,n;this.promise=new e(function(e,r){if(void 0!==t||void 0!==n)throw TypeError("Bad Promise constructor");t=e,n=r}),this.resolve=r(t),this.reject=r(n)};e.exports.f=function(e){return new i(e)}},3929(e,t,n){var r=n(47850);e.exports=function(e){if(r(e))throw TypeError("The method doesn't accept regular expressions");return e}},77023(e,t,n){var r=n(17854).isFinite;e.exports=Number.isFinite||function(e){return"number"==typeof e&&r(e)}},2814(e,t,n){var r=n(17854),i=n(41340),a=n(53111).trim,o=n(81361),s=r.parseFloat,u=1/s(o+"-0")!=-1/0;e.exports=u?function(e){var t=a(i(e)),n=s(t);return 0===n&&"-"==t.charAt(0)?-0:n}:s},83009(e,t,n){var r=n(17854),i=n(41340),a=n(53111).trim,o=n(81361),s=r.parseInt,u=/^[+-]?0[Xx]/,c=8!==s(o+"08")||22!==s(o+"0x16");e.exports=c?function(e,t){var n=a(i(e));return s(n,t>>>0||(u.test(n)?16:10))}:s},80430(e,t,n){"use strict";var r=n(29909),i=n(24994),a=n(70111),o=n(36048),s=n(19781),u="Incorrect Number.range arguments",c="NumericRangeIterator",l=r.set,f=r.getterFor(c),d=i(function(e,t,n,r,i,o){if(typeof e!=r||t!==1/0&&t!==-1/0&&typeof t!=r)throw TypeError(u);if(e===1/0||e===-1/0)throw RangeError(u);var f,d=t>e,h=!1;if(void 0===n)f=void 0;else if(a(n))f=n.step,h=!!n.inclusive;else if(typeof n==r)f=n;else throw TypeError(u);if(null==f&&(f=d?o:-o),typeof f!=r)throw TypeError(u);if(f===1/0||f===-1/0||f===i&&e!==t)throw RangeError(u);var p=e!=e||t!=t||f!=f||t>e!=f>i;l(this,{type:c,start:e,end:t,step:f,inclusiveEnd:h,hitsEnd:p,currentCount:i,zero:i}),s||(this.start=e,this.end=t,this.step=f,this.inclusive=h)},c,function(){var e,t=f(this);if(t.hitsEnd)return{value:void 0,done:!0};var n=t.start,r=t.end,i=n+t.step*t.currentCount++;i===r&&(t.hitsEnd=!0);var a=t.inclusiveEnd;return(e=r>n?a?i>r:i>=r:a?r>i:r>=i)?{value:void 0,done:t.hitsEnd=!0}:{value:i,done:!1}}),h=function(e){return{get:e,set:function(){},configurable:!0,enumerable:!1}};s&&o(d.prototype,{start:h(function(){return f(this).start}),end:h(function(){return f(this).end}),inclusive:h(function(){return f(this).inclusiveEnd}),step:h(function(){return f(this).step})}),e.exports=d},21574(e,t,n){"use strict";var r=n(19781),i=n(47293),a=n(81956),o=n(25181),s=n(55296),u=n(47908),c=n(68361),l=Object.assign,f=Object.defineProperty;e.exports=!l||i(function(){if(r&&1!==l({b:1},l(f({},"a",{enumerable:!0,get:function(){f(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var e={},t={},n=Symbol(),i="abcdefghijklmnopqrst";return e[n]=7,i.split("").forEach(function(e){t[e]=e}),7!=l({},e)[n]||a(l({},t)).join("")!=i})?function(e,t){for(var n=u(e),i=arguments.length,l=1,f=o.f,d=s.f;i>l;)for(var h,p=c(arguments[l++]),b=f?a(p).concat(f(p)):a(p),m=b.length,g=0;m>g;)h=b[g++],(!r||d.call(p,h))&&(n[h]=p[h]);return n}:l},70030(e,t,n){var r,i=n(19670),a=n(36048),o=n(80748),s=n(3501),u=n(60490),c=n(80317),l=n(6200),f=">",d="<",h="prototype",p="script",b=l("IE_PROTO"),m=function(){},g=function(e){return d+p+f+e+d+"/"+p+f},v=function(e){e.write(g("")),e.close();var t=e.parentWindow.Object;return e=null,t},y=function(){var e,t=c("iframe"),n="java"+p+":";return t.style.display="none",u.appendChild(t),t.src=String(n),(e=t.contentWindow.document).open(),e.write(g("document.F=Object")),e.close(),e.F},w=function(){try{r=new ActiveXObject("htmlfile")}catch(e){}w="undefined"!=typeof document?document.domain&&r?v(r):y():v(r);for(var t=o.length;t--;)delete w[h][o[t]];return w()};s[b]=!0,e.exports=Object.create||function(e,t){var n;return null!==e?(m[h]=i(e),n=new m,m[h]=null,n[b]=e):n=w(),void 0===t?n:a(n,t)}},36048(e,t,n){var r=n(19781),i=n(3070),a=n(19670),o=n(81956);e.exports=r?Object.defineProperties:function(e,t){a(e);for(var n,r=o(t),s=r.length,u=0;s>u;)i.f(e,n=r[u++],t[n]);return e}},3070(e,t,n){var r=n(19781),i=n(64664),a=n(19670),o=n(34948),s=Object.defineProperty;t.f=r?s:function(e,t,n){if(a(e),t=o(t),a(n),i)try{return s(e,t,n)}catch(r){}if("get"in n||"set"in n)throw TypeError("Accessors not supported");return"value"in n&&(e[t]=n.value),e}},31236(e,t,n){var r=n(19781),i=n(55296),a=n(79114),o=n(45656),s=n(34948),u=n(86656),c=n(64664),l=Object.getOwnPropertyDescriptor;t.f=r?l:function(e,t){if(e=o(e),t=s(t),c)try{return l(e,t)}catch(n){}if(u(e,t))return a(!i.f.call(e,t),e[t])}},1156(e,t,n){var r=n(45656),i=n(8006).f,a={}.toString,o="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],s=function(e){try{return i(e)}catch(t){return o.slice()}};e.exports.f=function(e){return o&&"[object Window]"==a.call(e)?s(e):i(r(e))}},8006(e,t,n){var r=n(16324),i=n(80748).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return r(e,i)}},25181(e,t){t.f=Object.getOwnPropertySymbols},79518(e,t,n){var r=n(86656),i=n(47908),a=n(6200),o=n(49920),s=a("IE_PROTO"),u=Object.prototype;e.exports=o?Object.getPrototypeOf:function(e){return(e=i(e),r(e,s))?e[s]:"function"==typeof e.constructor&&e instanceof e.constructor?e.constructor.prototype:e instanceof Object?u:null}},60996(e,t,n){"use strict";var r=n(29909),i=n(24994),a=n(86656),o=n(81956),s=n(47908),u="Object Iterator",c=r.set,l=r.getterFor(u);e.exports=i(function(e,t){var n=s(e);c(this,{type:u,mode:t,object:n,keys:o(n),index:0})},"Object",function(){for(var e=l(this),t=e.keys;;){if(null===t||e.index>=t.length)return e.object=e.keys=null,{value:void 0,done:!0};var n=t[e.index++],r=e.object;if(a(r,n)){switch(e.mode){case"keys":return{value:n,done:!1};case"values":return{value:r[n],done:!1}}return{value:[n,r[n]],done:!1}}}})},16324(e,t,n){var r=n(86656),i=n(45656),a=n(41318).indexOf,o=n(3501);e.exports=function(e,t){var n,s=i(e),u=0,c=[];for(n in s)!r(o,n)&&r(s,n)&&c.push(n);for(;t.length>u;)r(s,n=t[u++])&&(~a(c,n)||c.push(n));return c}},81956(e,t,n){var r=n(16324),i=n(80748);e.exports=Object.keys||function(e){return r(e,i)}},55296(e,t){"use strict";var n={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,i=r&&!n.call({1:2},1);t.f=i?function(e){var t=r(this,e);return!!t&&t.enumerable}:n},69026(e,t,n){"use strict";var r=n(31913),i=n(17854),a=n(47293),o=n(98008);e.exports=r||!a(function(){if(!o||!(o<535)){var e=Math.random();__defineSetter__.call(null,e,function(){}),delete i[e]}})},27674(e,t,n){var r=n(19670),i=n(96077);e.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var e,t=!1,n={};try{(e=Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set).call(n,[]),t=n instanceof Array}catch(a){}return function(n,a){return r(n),i(a),t?e.call(n,a):n.__proto__=a,n}}():void 0)},44699(e,t,n){var r=n(19781),i=n(81956),a=n(45656),o=n(55296).f,s=function(e){return function(t){for(var n,s=a(t),u=i(s),c=u.length,l=0,f=[];c>l;)n=u[l++],(!r||o.call(s,n))&&f.push(e?[n,s[n]]:s[n]);return f}};e.exports={entries:s(!0),values:s(!1)}},90288(e,t,n){"use strict";var r=n(51694),i=n(70648);e.exports=r?({}).toString:function(){return"[object "+i(this)+"]"}},92140(e,t,n){var r=n(70111);e.exports=function(e,t){var n,i;if("string"===t&&"function"==typeof(n=e.toString)&&!r(i=n.call(e))||"function"==typeof(n=e.valueOf)&&!r(i=n.call(e))||"string"!==t&&"function"==typeof(n=e.toString)&&!r(i=n.call(e)))return i;throw TypeError("Can't convert object to primitive value")}},53887(e,t,n){var r=n(35005),i=n(8006),a=n(25181),o=n(19670);e.exports=r("Reflect","ownKeys")||function(e){var t=i.f(o(e)),n=a.f;return n?t.concat(n(e)):t}},40857(e,t,n){var r=n(17854);e.exports=r},12534(e){e.exports=function(e){try{return{error:!1,value:e()}}catch(t){return{error:!0,value:t}}}},69478(e,t,n){var r=n(19670),i=n(70111),a=n(78523);e.exports=function(e,t){if(r(e),i(t)&&t.constructor===e)return t;var n=a.f(e);return(0,n.resolve)(t),n.promise}},12248(e,t,n){var r=n(31320);e.exports=function(e,t,n){for(var i in t)r(e,i,t[i],n);return e}},31320(e,t,n){var r=n(17854),i=n(68880),a=n(86656),o=n(83505),s=n(42788),u=n(29909),c=u.get,l=u.enforce,f=String(String).split("String");(e.exports=function(e,t,n,s){var u,c=!!s&&!!s.unsafe,d=!!s&&!!s.enumerable,h=!!s&&!!s.noTargetGet;if("function"!=typeof n||("string"!=typeof t||a(n,"name")||i(n,"name",t),(u=l(n)).source||(u.source=f.join("string"==typeof t?t:""))),e===r){d?e[t]=n:o(t,n);return}c?!h&&e[t]&&(d=!0):delete e[t],d?e[t]=n:i(e,t,n)})(Function.prototype,"toString",function(){return"function"==typeof this&&c(this).source||s(this)})},38845(e,t,n){var r=n(51532),i=n(4129),a=n(72309)("metadata"),o=a.store||(a.store=new i),s=function(e,t,n){var i=o.get(e);if(!i){if(!n)return;o.set(e,i=new r)}var a=i.get(t);if(!a){if(!n)return;i.set(t,a=new r)}return a},u=function(e,t,n){var r=s(t,n,!1);return void 0!==r&&r.has(e)},c=function(e,t,n){var r=s(t,n,!1);return void 0===r?void 0:r.get(e)},l=function(e,t,n,r){s(n,r,!0).set(e,t)},f=function(e,t){var n=s(e,t,!1),r=[];return n&&n.forEach(function(e,t){r.push(t)}),r},d=function(e){return void 0===e||"symbol"==typeof e?e:String(e)};e.exports={store:o,getMap:s,has:u,get:c,set:l,keys:f,toKey:d}},97651(e,t,n){var r=n(84326),i=n(22261);e.exports=function(e,t){var n=e.exec;if("function"==typeof n){var a=n.call(e,t);if("object"!=typeof a)throw TypeError("RegExp exec method returned something other than an Object or null");return a}if("RegExp"!==r(e))throw TypeError("RegExp#exec called on incompatible receiver");return i.call(e,t)}},22261(e,t,n){"use strict";var r,i,a=n(41340),o=n(67066),s=n(52999),u=n(72309),c=n(70030),l=n(29909).get,f=n(9441),d=n(38173),h=RegExp.prototype.exec,p=u("native-string-replace",String.prototype.replace),b=h,m=(r=/a/,i=/b*/g,h.call(r,"a"),h.call(i,"a"),0!==r.lastIndex||0!==i.lastIndex),g=s.UNSUPPORTED_Y||s.BROKEN_CARET,v=void 0!==/()??/.exec("")[1];(m||v||g||f||d)&&(b=function(e){var t,n,r,i,s,u,f,d=this,y=l(d),w=a(e),_=y.raw;if(_)return _.lastIndex=d.lastIndex,t=b.call(_,w),d.lastIndex=_.lastIndex,t;var E=y.groups,S=g&&d.sticky,k=o.call(d),x=d.source,T=0,M=w;if(S&&(-1===(k=k.replace("y","")).indexOf("g")&&(k+="g"),M=w.slice(d.lastIndex),d.lastIndex>0&&(!d.multiline||d.multiline&&"\n"!==w.charAt(d.lastIndex-1))&&(x="(?: "+x+")",M=" "+M,T++),n=RegExp("^(?:"+x+")",k)),v&&(n=RegExp("^"+x+"$(?!\\s)",k)),m&&(r=d.lastIndex),i=h.call(S?n:d,M),S?i?(i.input=i.input.slice(T),i[0]=i[0].slice(T),i.index=d.lastIndex,d.lastIndex+=i[0].length):d.lastIndex=0:m&&i&&(d.lastIndex=d.global?i.index+i[0].length:r),v&&i&&i.length>1&&p.call(i[0],n,function(){for(s=1;sb)","g");return"b"!==e.exec("b").groups.a||"bc"!=="b".replace(e,"$c")})},84488(e){e.exports=function(e){if(void 0==e)throw TypeError("Can't call method on "+e);return e}},46465(e){e.exports=function(e,t){return e===t||e!=e&&t!=t}},81150(e){e.exports=Object.is||function(e,t){return e===t?0!==e||1/e==1/t:e!=e&&t!=t}},83505(e,t,n){var r=n(17854);e.exports=function(e,t){try{Object.defineProperty(r,e,{value:t,configurable:!0,writable:!0})}catch(n){r[e]=t}return t}},96340(e,t,n){"use strict";var r=n(35005),i=n(3070),a=n(5112),o=n(19781),s=a("species");e.exports=function(e){var t=r(e),n=i.f;o&&t&&!t[s]&&n(t,s,{configurable:!0,get:function(){return this}})}},58003(e,t,n){var r=n(3070).f,i=n(86656),a=n(5112)("toStringTag");e.exports=function(e,t,n){e&&!i(e=n?e:e.prototype,a)&&r(e,a,{configurable:!0,value:t})}},6200(e,t,n){var r=n(72309),i=n(69711),a=r("keys");e.exports=function(e){return a[e]||(a[e]=i(e))}},5465(e,t,n){var r=n(17854),i=n(83505),a="__core-js_shared__",o=r[a]||i(a,{});e.exports=o},72309(e,t,n){var r=n(31913),i=n(5465);(e.exports=function(e,t){return i[e]||(i[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.17.0",mode:r?"pure":"global",copyright:"\xa9 2021 Denis Pushkarev (zloirock.ru)"})},36707(e,t,n){var r=n(19670),i=n(13099),a=n(5112)("species");e.exports=function(e,t){var n,o=r(e).constructor;return void 0===o||void 0==(n=r(o)[a])?t:i(n)}},43429(e,t,n){var r=n(47293);e.exports=function(e){return r(function(){var t=""[e]('"');return t!==t.toLowerCase()||t.split('"').length>3})}},28710(e,t,n){var r=n(99958),i=n(41340),a=n(84488),o=function(e){return function(t,n){var o,s,u=i(a(t)),c=r(n),l=u.length;return c<0||c>=l?e?"":void 0:(o=u.charCodeAt(c))<55296||o>56319||c+1===l||(s=u.charCodeAt(c+1))<56320||s>57343?e?u.charAt(c):o:e?u.slice(c,c+2):(o-55296<<10)+(s-56320)+65536}};e.exports={codeAt:o(!1),charAt:o(!0)}},54986(e,t,n){var r=n(88113);e.exports=/Version\/10(?:\.\d+){1,2}(?: [\w./]+)?(?: Mobile\/\w+)? Safari\//.test(r)},76650(e,t,n){var r=n(17466),i=n(41340),a=n(38415),o=n(84488),s=Math.ceil,u=function(e){return function(t,n,u){var c,l,f=i(o(t)),d=f.length,h=void 0===u?" ":i(u),p=r(n);return p<=d||""==h?f:(c=p-d,(l=a.call(h,s(c/h.length))).length>c&&(l=l.slice(0,c)),e?f+l:l+f)}};e.exports={start:u(!1),end:u(!0)}},33197(e){"use strict";var t=2147483647,n=36,r=1,i=26,a=38,o=700,s=72,u=128,c="-",l=/[^\0-\u007E]/,f=/[.\u3002\uFF0E\uFF61]/g,d="Overflow: input needs wider integers to process",h=n-r,p=Math.floor,b=String.fromCharCode,m=function(e){for(var t=[],n=0,r=e.length;n=55296&&i<=56319&&n>1,e+=p(e/t);e>h*i>>1;s+=n)e=p(e/h);return p(s+(h+1)*e/(e+a))},y=function(e){var a,o,l=[],f=(e=m(e)).length,h=u,y=0,w=s;for(a=0;a=h&&op((t-y)/k))throw RangeError(d);for(y+=(S-h)*k,h=S,a=0;at)throw RangeError(d);if(o==h){for(var x=y,T=n;;T+=n){var M=T<=w?r:T>=w+i?i:T-w;if(x0;(o>>>=1)&&(t+=t))1&o&&(n+=t);return n}},76091(e,t,n){var r=n(47293),i=n(81361),a="​\x85᠎";e.exports=function(e){return r(function(){return!!i[e]()||a[e]()!=a||i[e].name!==e})}},53111(e,t,n){var r=n(84488),i=n(41340),a="["+n(81361)+"]",o=RegExp("^"+a+a+"*"),s=RegExp(a+a+"*$"),u=function(e){return function(t){var n=i(r(t));return 1&e&&(n=n.replace(o,"")),2&e&&(n=n.replace(s,"")),n}};e.exports={start:u(1),end:u(2),trim:u(3)}},20261(e,t,n){var r,i,a,o,s=n(17854),u=n(47293),c=n(49974),l=n(60490),f=n(80317),d=n(6833),h=n(35268),p=s.setImmediate,b=s.clearImmediate,m=s.process,g=s.MessageChannel,v=s.Dispatch,y=0,w={},_="onreadystatechange";try{r=s.location}catch(E){}var S=function(e){if(w.hasOwnProperty(e)){var t=w[e];delete w[e],t()}},k=function(e){return function(){S(e)}},x=function(e){S(e.data)},T=function(e){s.postMessage(String(e),r.protocol+"//"+r.host)};p&&b||(p=function(e){for(var t=[],n=arguments.length,r=1;n>r;)t.push(arguments[r++]);return w[++y]=function(){("function"==typeof e?e:Function(e)).apply(void 0,t)},i(y),y},b=function(e){delete w[e]},h?i=function(e){m.nextTick(k(e))}:v&&v.now?i=function(e){v.now(k(e))}:g&&!d?(o=(a=new g).port2,a.port1.onmessage=x,i=c(o.postMessage,o,1)):s.addEventListener&&"function"==typeof postMessage&&!s.importScripts&&r&&"file:"!==r.protocol&&!u(T)?(i=T,s.addEventListener("message",x,!1)):i=_ in f("script")?function(e){l.appendChild(f("script"))[_]=function(){l.removeChild(this),S(e)}}:function(e){setTimeout(k(e),0)}),e.exports={set:p,clear:b}},50863(e,t,n){var r=n(84326);e.exports=function(e){if("number"!=typeof e&&"Number"!=r(e))throw TypeError("Incorrect invocation");return+e}},51400(e,t,n){var r=n(99958),i=Math.max,a=Math.min;e.exports=function(e,t){var n=r(e);return n<0?i(n+t,0):a(n,t)}},57067(e,t,n){var r=n(99958),i=n(17466);e.exports=function(e){if(void 0===e)return 0;var t=r(e),n=i(t);if(t!==n)throw RangeError("Wrong length or index");return n}},45656(e,t,n){var r=n(68361),i=n(84488);e.exports=function(e){return r(i(e))}},99958(e){var t=Math.ceil,n=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?n:t)(e)}},17466(e,t,n){var r=n(99958),i=Math.min;e.exports=function(e){return e>0?i(r(e),9007199254740991):0}},47908(e,t,n){var r=n(84488);e.exports=function(e){return Object(r(e))}},84590(e,t,n){var r=n(73002);e.exports=function(e,t){var n=r(e);if(n%t)throw RangeError("Wrong offset");return n}},73002(e,t,n){var r=n(99958);e.exports=function(e){var t=r(e);if(t<0)throw RangeError("The argument can't be less than 0");return t}},57593(e,t,n){var r=n(70111),i=n(52190),a=n(92140),o=n(5112)("toPrimitive");e.exports=function(e,t){if(!r(e)||i(e))return e;var n,s=e[o];if(void 0!==s){if(void 0===t&&(t="default"),!r(n=s.call(e,t))||i(n))return n;throw TypeError("Can't convert object to primitive value")}return void 0===t&&(t="number"),a(e,t)}},34948(e,t,n){var r=n(57593),i=n(52190);e.exports=function(e){var t=r(e,"string");return i(t)?t:String(t)}},51694(e,t,n){var r=n(5112)("toStringTag"),i={};i[r]="z",e.exports="[object z]"===String(i)},41340(e,t,n){var r=n(52190);e.exports=function(e){if(r(e))throw TypeError("Cannot convert a Symbol value to a string");return String(e)}},19843(e,t,n){"use strict";var r=n(82109),i=n(17854),a=n(19781),o=n(63832),s=n(90260),u=n(13331),c=n(25787),l=n(79114),f=n(68880),d=n(18730),h=n(17466),p=n(57067),b=n(84590),m=n(34948),g=n(86656),v=n(70648),y=n(70111),w=n(52190),_=n(70030),E=n(27674),S=n(8006).f,k=n(97321),x=n(42092).forEach,T=n(96340),M=n(3070),O=n(31236),A=n(29909),L=n(79587),C=A.get,I=A.set,D=M.f,N=O.f,P=Math.round,R=i.RangeError,j=u.ArrayBuffer,F=u.DataView,Y=s.NATIVE_ARRAY_BUFFER_VIEWS,B=s.TYPED_ARRAY_CONSTRUCTOR,U=s.TYPED_ARRAY_TAG,H=s.TypedArray,$=s.TypedArrayPrototype,z=s.aTypedArrayConstructor,G=s.isTypedArray,W="BYTES_PER_ELEMENT",K="Wrong length",V=function(e,t){for(var n=0,r=t.length,i=new(z(e))(r);r>n;)i[n]=t[n++];return i},q=function(e,t){D(e,t,{get:function(){return C(this)[t]}})},Z=function(e){var t;return e instanceof j||"ArrayBuffer"==(t=v(e))||"SharedArrayBuffer"==t},X=function(e,t){return G(e)&&!w(t)&&t in e&&d(+t)&&t>=0},J=function(e,t){return t=m(t),X(e,t)?l(2,e[t]):N(e,t)},Q=function(e,t,n){return(t=m(t),X(e,t)&&y(n)&&g(n,"value")&&!g(n,"get")&&!g(n,"set")&&!n.configurable&&(!g(n,"writable")||n.writable)&&(!g(n,"enumerable")||n.enumerable))?(e[t]=n.value,e):D(e,t,n)};a?(Y||(O.f=J,M.f=Q,q($,"buffer"),q($,"byteOffset"),q($,"byteLength"),q($,"length")),r({target:"Object",stat:!0,forced:!Y},{getOwnPropertyDescriptor:J,defineProperty:Q}),e.exports=function(e,t,n){var a=e.match(/\d+$/)[0]/8,s=e+(n?"Clamped":"")+"Array",u="get"+e,l="set"+e,d=i[s],m=d,g=m&&m.prototype,v={},w=function(e,t){var n=C(e);return n.view[u](t*a+n.byteOffset,!0)},M=function(e,t,r){var i=C(e);n&&(r=(r=P(r))<0?0:r>255?255:255&r),i.view[l](t*a+i.byteOffset,r,!0)},O=function(e,t){D(e,t,{get:function(){return w(this,t)},set:function(e){return M(this,t,e)},enumerable:!0})};Y?o&&(m=t(function(e,t,n,r){return c(e,m,s),L(y(t)?Z(t)?void 0!==r?new d(t,b(n,a),r):void 0!==n?new d(t,b(n,a)):new d(t):G(t)?V(m,t):k.call(m,t):new d(p(t)),e,m)}),E&&E(m,H),x(S(d),function(e){e in m||f(m,e,d[e])}),m.prototype=g):(m=t(function(e,t,n,r){c(e,m,s);var i,o,u,l=0,f=0;if(y(t)){if(Z(t)){i=t,f=b(n,a);var d=t.byteLength;if(void 0===r){if(d%a||(o=d-f)<0)throw R(K)}else if((o=h(r)*a)+f>d)throw R(K);u=o/a}else if(G(t))return V(m,t);else return k.call(m,t)}else o=(u=p(t))*a,i=new j(o);for(I(e,{buffer:i,byteOffset:f,byteLength:o,length:u,view:new F(i)});l1?arguments[1]:void 0,g=void 0!==m,v=o(p);if(void 0!=v&&!s(v))for(h=(d=a(p,v)).next,p=[];!(f=h.call(d)).done;)p.push(f.value);for(g&&b>2&&(m=u(m,arguments[2],2)),n=i(p.length),l=new(c(this))(n),t=0;n>t;t++)l[t]=g?m(p[t],t):p[t];return l}},66304(e,t,n){var r=n(90260),i=n(36707),a=r.TYPED_ARRAY_CONSTRUCTOR,o=r.aTypedArrayConstructor;e.exports=function(e){return o(i(e,e[a]))}},69711(e){var t=0,n=Math.random();e.exports=function(e){return"Symbol("+String(void 0===e?"":e)+")_"+(++t+n).toString(36)}},43307(e,t,n){var r=n(30133);e.exports=r&&!Symbol.sham&&"symbol"==typeof Symbol.iterator},6061(e,t,n){var r=n(5112);t.f=r},5112(e,t,n){var r=n(17854),i=n(72309),a=n(86656),o=n(69711),s=n(30133),u=n(43307),c=i("wks"),l=r.Symbol,f=u?l:l&&l.withoutSetter||o;e.exports=function(e){return a(c,e)&&(s||"string"==typeof c[e])||(s&&a(l,e)?c[e]=l[e]:c[e]=f("Symbol."+e)),c[e]}},81361(e){e.exports=" \n\v\f\r \xa0               \u2028\u2029\uFEFF"},9170(e,t,n){"use strict";var r=n(82109),i=n(79518),a=n(27674),o=n(70030),s=n(68880),u=n(79114),c=n(20408),l=n(41340),f=function(e,t){var n=this;if(!(n instanceof f))return new f(e,t);a&&(n=a(Error(void 0),i(n))),void 0!==t&&s(n,"message",l(t));var r=[];return c(e,r.push,{that:r}),s(n,"errors",r),n};f.prototype=o(Error.prototype,{constructor:u(5,f),message:u(5,""),name:u(5,"AggregateError")}),r({global:!0},{AggregateError:f})},18264(e,t,n){"use strict";var r=n(82109),i=n(17854),a=n(13331),o=n(96340),s="ArrayBuffer",u=a[s];r({global:!0,forced:i[s]!==u},{ArrayBuffer:u}),o(s)},76938(e,t,n){var r=n(82109),i=n(90260);r({target:"ArrayBuffer",stat:!0,forced:!i.NATIVE_ARRAY_BUFFER_VIEWS},{isView:i.isView})},39575(e,t,n){"use strict";var r=n(82109),i=n(47293),a=n(13331),o=n(19670),s=n(51400),u=n(17466),c=n(36707),l=a.ArrayBuffer,f=a.DataView,d=l.prototype.slice,h=i(function(){return!new l(2).slice(1,void 0).byteLength});r({target:"ArrayBuffer",proto:!0,unsafe:!0,forced:h},{slice:function(e,t){if(void 0!==d&&void 0===t)return d.call(o(this),e);for(var n=o(this).byteLength,r=s(e,n),i=s(void 0===t?n:t,n),a=new(c(this,l))(u(i-r)),h=new f(this),p=new f(a),b=0;r=0?r:n+r;return s<0||s>=n?void 0:t[s]}}),s("at")},92222(e,t,n){"use strict";var r=n(82109),i=n(47293),a=n(43157),o=n(70111),s=n(47908),u=n(17466),c=n(86135),l=n(65417),f=n(81194),d=n(5112),h=n(7392),p=d("isConcatSpreadable"),b=9007199254740991,m="Maximum allowed index exceeded",g=h>=51||!i(function(){var e=[];return e[p]=!1,e.concat()[0]!==e}),v=f("concat"),y=function(e){if(!o(e))return!1;var t=e[p];return void 0!==t?!!t:a(e)};r({target:"Array",proto:!0,forced:!g||!v},{concat:function(e){var t,n,r,i,a,o=s(this),f=l(o,0),d=0;for(t=-1,r=arguments.length;tb)throw TypeError(m);for(n=0;n=b)throw TypeError(m);c(f,d++,a)}return f.length=d,f}})},50545(e,t,n){var r=n(82109),i=n(1048),a=n(51223);r({target:"Array",proto:!0},{copyWithin:i}),a("copyWithin")},26541(e,t,n){"use strict";var r=n(82109),i=n(42092).every,a=n(9341)("every");r({target:"Array",proto:!0,forced:!a},{every:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}})},43290(e,t,n){var r=n(82109),i=n(21285),a=n(51223);r({target:"Array",proto:!0},{fill:i}),a("fill")},57327(e,t,n){"use strict";var r=n(82109),i=n(42092).filter,a=n(81194)("filter");r({target:"Array",proto:!0,forced:!a},{filter:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}})},34553(e,t,n){"use strict";var r=n(82109),i=n(42092).findIndex,a=n(51223),o="findIndex",s=!0;o in[]&&[,][o](function(){s=!1}),r({target:"Array",proto:!0,forced:s},{findIndex:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a(o)},69826(e,t,n){"use strict";var r=n(82109),i=n(42092).find,a=n(51223),o="find",s=!0;o in[]&&[,][o](function(){s=!1}),r({target:"Array",proto:!0,forced:s},{find:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a(o)},86535(e,t,n){"use strict";var r=n(82109),i=n(6790),a=n(47908),o=n(17466),s=n(13099),u=n(65417);r({target:"Array",proto:!0},{flatMap:function(e){var t,n=a(this),r=o(n.length);return s(e),(t=u(n,0)).length=i(t,n,n,r,0,1,e,arguments.length>1?arguments[1]:void 0),t}})},84944(e,t,n){"use strict";var r=n(82109),i=n(6790),a=n(47908),o=n(17466),s=n(99958),u=n(65417);r({target:"Array",proto:!0},{flat:function(){var e=arguments.length?arguments[0]:void 0,t=a(this),n=o(t.length),r=u(t,0);return r.length=i(r,t,t,n,0,void 0===e?1:s(e)),r}})},89554(e,t,n){"use strict";var r=n(82109),i=n(18533);r({target:"Array",proto:!0,forced:[].forEach!=i},{forEach:i})},91038(e,t,n){var r=n(82109),i=n(48457),a=!n(17072)(function(e){Array.from(e)});r({target:"Array",stat:!0,forced:a},{from:i})},26699(e,t,n){"use strict";var r=n(82109),i=n(41318).includes,a=n(51223);r({target:"Array",proto:!0},{includes:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a("includes")},82772(e,t,n){"use strict";var r=n(82109),i=n(41318).indexOf,a=n(9341),o=[].indexOf,s=!!o&&1/[1].indexOf(1,-0)<0,u=a("indexOf");r({target:"Array",proto:!0,forced:s||!u},{indexOf:function(e){return s?o.apply(this,arguments)||0:i(this,e,arguments.length>1?arguments[1]:void 0)}})},79753(e,t,n){var r=n(82109),i=n(43157);r({target:"Array",stat:!0},{isArray:i})},66992(e,t,n){"use strict";var r=n(45656),i=n(51223),a=n(97497),o=n(29909),s=n(70654),u="Array Iterator",c=o.set,l=o.getterFor(u);e.exports=s(Array,"Array",function(e,t){c(this,{type:u,target:r(e),index:0,kind:t})},function(){var e=l(this),t=e.target,n=e.kind,r=e.index++;return!t||r>=t.length?(e.target=void 0,{value:void 0,done:!0}):"keys"==n?{value:r,done:!1}:"values"==n?{value:t[r],done:!1}:{value:[r,t[r]],done:!1}},"values"),a.Arguments=a.Array,i("keys"),i("values"),i("entries")},69600(e,t,n){"use strict";var r=n(82109),i=n(68361),a=n(45656),o=n(9341),s=[].join,u=i!=Object,c=o("join",",");r({target:"Array",proto:!0,forced:u||!c},{join:function(e){return s.call(a(this),void 0===e?",":e)}})},94986(e,t,n){var r=n(82109),i=n(86583);r({target:"Array",proto:!0,forced:i!==[].lastIndexOf},{lastIndexOf:i})},21249(e,t,n){"use strict";var r=n(82109),i=n(42092).map,a=n(81194)("map");r({target:"Array",proto:!0,forced:!a},{map:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}})},26572(e,t,n){"use strict";var r=n(82109),i=n(47293),a=n(86135),o=i(function(){function e(){}return!(Array.of.call(e) instanceof e)});r({target:"Array",stat:!0,forced:o},{of:function(){for(var e=0,t=arguments.length,n=new("function"==typeof this?this:Array)(t);t>e;)a(n,e,arguments[e++]);return n.length=t,n}})},96644(e,t,n){"use strict";var r=n(82109),i=n(53671).right,a=n(9341),o=n(7392),s=n(35268),u=a("reduceRight"),c=!s&&o>79&&o<83;r({target:"Array",proto:!0,forced:!u||c},{reduceRight:function(e){return i(this,e,arguments.length,arguments.length>1?arguments[1]:void 0)}})},85827(e,t,n){"use strict";var r=n(82109),i=n(53671).left,a=n(9341),o=n(7392),s=n(35268),u=a("reduce"),c=!s&&o>79&&o<83;r({target:"Array",proto:!0,forced:!u||c},{reduce:function(e){return i(this,e,arguments.length,arguments.length>1?arguments[1]:void 0)}})},65069(e,t,n){"use strict";var r=n(82109),i=n(43157),a=[].reverse,o=[1,2];r({target:"Array",proto:!0,forced:String(o)===String(o.reverse())},{reverse:function(){return i(this)&&(this.length=this.length),a.call(this)}})},47042(e,t,n){"use strict";var r=n(82109),i=n(70111),a=n(43157),o=n(51400),s=n(17466),u=n(45656),c=n(86135),l=n(5112),f=n(81194)("slice"),d=l("species"),h=[].slice,p=Math.max;r({target:"Array",proto:!0,forced:!f},{slice:function(e,t){var n,r,l,f=u(this),b=s(f.length),m=o(e,b),g=o(void 0===t?b:t,b);if(a(f)&&("function"==typeof(n=f.constructor)&&(n===Array||a(n.prototype))?n=void 0:i(n)&&null===(n=n[d])&&(n=void 0),n===Array||void 0===n))return h.call(f,m,g);for(l=0,r=new(void 0===n?Array:n)(p(g-m,0));m1?arguments[1]:void 0)}})},2707(e,t,n){"use strict";var r=n(82109),i=n(13099),a=n(47908),o=n(17466),s=n(41340),u=n(47293),c=n(94362),l=n(9341),f=n(68886),d=n(30256),h=n(7392),p=n(98008),b=[],m=b.sort,g=u(function(){b.sort(void 0)}),v=u(function(){b.sort(null)}),y=l("sort"),w=!u(function(){if(h)return h<70;if(!f||!(f>3)){if(d)return!0;if(p)return p<603;var e,t,n,r,i="";for(e=65;e<76;e++){switch(t=String.fromCharCode(e),e){case 66:case 69:case 70:case 72:n=3;break;case 68:case 71:n=4;break;default:n=2}for(r=0;r<47;r++)b.push({k:t+r,v:n})}for(b.sort(function(e,t){return t.v-e.v}),r=0;rs(n)?1:-1}};r({target:"Array",proto:!0,forced:_},{sort:function(e){void 0!==e&&i(e);var t,n,r=a(this);if(w)return void 0===e?m.call(r):m.call(r,e);var s=[],u=o(r.length);for(n=0;nh)throw TypeError(p);for(b=0,l=u(v,r);by-r+n;b--)delete v[b-1]}else if(n>r)for(b=y-r;b>w;b--)m=b+r-1,g=b+n-1,m in v?v[g]=v[m]:delete v[g];for(b=0;b94906265.62425156?s(e)+c:a(e-1+u(e-1)*u(e+1))}})},82376(e,t,n){var r=n(82109),i=Math.asinh,a=Math.log,o=Math.sqrt;function s(e){return isFinite(e=+e)&&0!=e?e<0?-s(-e):a(e+o(e*e+1)):e}r({target:"Math",stat:!0,forced:!(i&&1/i(0)>0)},{asinh:s})},73181(e,t,n){var r=n(82109),i=Math.atanh,a=Math.log;r({target:"Math",stat:!0,forced:!(i&&1/i(-0)<0)},{atanh:function(e){return 0==(e=+e)?e:a((1+e)/(1-e))/2}})},23484(e,t,n){var r=n(82109),i=n(64310),a=Math.abs,o=Math.pow;r({target:"Math",stat:!0},{cbrt:function(e){return i(e=+e)*o(a(e),1/3)}})},2388(e,t,n){var r=n(82109),i=Math.floor,a=Math.log,o=Math.LOG2E;r({target:"Math",stat:!0},{clz32:function(e){return(e>>>=0)?31-i(a(e+.5)*o):32}})},88621(e,t,n){var r=n(82109),i=n(66736),a=Math.cosh,o=Math.abs,s=Math.E;r({target:"Math",stat:!0,forced:!a||a(710)===1/0},{cosh:function(e){var t=i(o(e)-1)+1;return(t+1/(t*s*s))*(s/2)}})},60403(e,t,n){var r=n(82109),i=n(66736);r({target:"Math",stat:!0,forced:i!=Math.expm1},{expm1:i})},84755(e,t,n){var r=n(82109),i=n(26130);r({target:"Math",stat:!0},{fround:i})},25438(e,t,n){var r=n(82109),i=Math.hypot,a=Math.abs,o=Math.sqrt,s=!!i&&i(1/0,NaN)!==1/0;r({target:"Math",stat:!0,forced:s},{hypot:function(e,t){for(var n,r,i=0,s=0,u=arguments.length,c=0;s0?i+=(r=n/c)*r:i+=n;return c===1/0?1/0:c*o(i)}})},90332(e,t,n){var r=n(82109),i=n(47293),a=Math.imul,o=i(function(){return -5!=a(4294967295,5)||2!=a.length});r({target:"Math",stat:!0,forced:o},{imul:function(e,t){var n=65535,r=+e,i=+t,a=n&r,o=n&i;return 0|a*o+((n&r>>>16)*o+a*(n&i>>>16)<<16>>>0)}})},40658(e,t,n){var r=n(82109),i=Math.log,a=Math.LOG10E;r({target:"Math",stat:!0},{log10:function(e){return i(e)*a}})},40197(e,t,n){var r=n(82109),i=n(26513);r({target:"Math",stat:!0},{log1p:i})},44914(e,t,n){var r=n(82109),i=Math.log,a=Math.LN2;r({target:"Math",stat:!0},{log2:function(e){return i(e)/a}})},52420(e,t,n){var r=n(82109),i=n(64310);r({target:"Math",stat:!0},{sign:i})},60160(e,t,n){var r=n(82109),i=n(47293),a=n(66736),o=Math.abs,s=Math.exp,u=Math.E,c=i(function(){return -.00000000000000002!=Math.sinh(-.00000000000000002)});r({target:"Math",stat:!0,forced:c},{sinh:function(e){return 1>o(e=+e)?(a(e)-a(-e))/2:(s(e-1)-s(-e-1))*(u/2)}})},60970(e,t,n){var r=n(82109),i=n(66736),a=Math.exp;r({target:"Math",stat:!0},{tanh:function(e){var t=i(e=+e),n=i(-e);return t==1/0?1:n==1/0?-1:(t-n)/(a(e)+a(-e))}})},10408(e,t,n){n(58003)(Math,"Math",!0)},73689(e,t,n){var r=n(82109),i=Math.ceil,a=Math.floor;r({target:"Math",stat:!0},{trunc:function(e){return(e>0?a:i)(e)}})},9653(e,t,n){"use strict";var r=n(19781),i=n(17854),a=n(54705),o=n(31320),s=n(86656),u=n(84326),c=n(79587),l=n(52190),f=n(57593),d=n(47293),h=n(70030),p=n(8006).f,b=n(31236).f,m=n(3070).f,g=n(53111).trim,v="Number",y=i[v],w=y.prototype,_=u(h(w))==v,E=function(e){if(l(e))throw TypeError("Cannot convert a Symbol value to a number");var t,n,r,i,a,o,s,u,c=f(e,"number");if("string"==typeof c&&c.length>2){if(43===(t=(c=g(c)).charCodeAt(0))||45===t){if(88===(n=c.charCodeAt(2))||120===n)return NaN}else if(48===t){switch(c.charCodeAt(1)){case 66:case 98:r=2,i=49;break;case 79:case 111:r=8,i=55;break;default:return+c}for(s=0,o=(a=c.slice(2)).length;si)return NaN;return parseInt(a,r)}}return+c};if(a(v,!y(" 0o1")||!y("0b1")||y("+0x1"))){for(var S,k=function(e){var t=arguments.length<1?0:e,n=this;return n instanceof k&&(_?d(function(){w.valueOf.call(n)}):u(n)!=v)?c(new y(E(t)),n,k):E(t)},x=r?p(y):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger,fromString,range".split(","),T=0;x.length>T;T++)s(y,S=x[T])&&!s(k,S)&&m(k,S,b(y,S));k.prototype=w,w.constructor=k,o(i,v,k)}},93299(e,t,n){n(82109)({target:"Number",stat:!0},{EPSILON:2220446049250313e-31})},35192(e,t,n){var r=n(82109),i=n(77023);r({target:"Number",stat:!0},{isFinite:i})},33161(e,t,n){var r=n(82109),i=n(18730);r({target:"Number",stat:!0},{isInteger:i})},44048(e,t,n){n(82109)({target:"Number",stat:!0},{isNaN:function(e){return e!=e}})},78285(e,t,n){var r=n(82109),i=n(18730),a=Math.abs;r({target:"Number",stat:!0},{isSafeInteger:function(e){return i(e)&&9007199254740991>=a(e)}})},44363(e,t,n){n(82109)({target:"Number",stat:!0},{MAX_SAFE_INTEGER:9007199254740991})},55994(e,t,n){n(82109)({target:"Number",stat:!0},{MIN_SAFE_INTEGER:-9007199254740991})},61874(e,t,n){var r=n(82109),i=n(2814);r({target:"Number",stat:!0,forced:Number.parseFloat!=i},{parseFloat:i})},9494(e,t,n){var r=n(82109),i=n(83009);r({target:"Number",stat:!0,forced:Number.parseInt!=i},{parseInt:i})},56977(e,t,n){"use strict";var r=n(82109),i=n(99958),a=n(50863),o=n(38415),s=n(47293),u=1..toFixed,c=Math.floor,l=function(e,t,n){return 0===t?n:t%2==1?l(e,t-1,n*e):l(e*e,t/2,n)},f=function(e){for(var t=0,n=e;n>=4096;)t+=12,n/=4096;for(;n>=2;)t+=1,n/=2;return t},d=function(e,t,n){for(var r=-1,i=n;++r<6;)i+=t*e[r],e[r]=i%1e7,i=c(i/1e7)},h=function(e,t){for(var n=6,r=0;--n>=0;)r+=e[n],e[n]=c(r/t),r=r%t*1e7},p=function(e){for(var t=6,n="";--t>=0;)if(""!==n||0===t||0!==e[t]){var r=String(e[t]);n=""===n?r:n+o.call("0",7-r.length)+r}return n},b=!!u||!s(function(){u.call({})});r({target:"Number",proto:!0,forced:b},{toFixed:function(e){var t,n,r,s,u=a(this),c=i(e),b=[0,0,0,0,0,0],m="",g="0";if(c<0||c>20)throw RangeError("Incorrect fraction digits");if(u!=u)return"NaN";if(u<=-1e21||u>=1e21)return String(u);if(u<0&&(m="-",u=-u),u>1e-21){if(n=(t=f(u*l(2,69,1))-69)<0?u*l(2,-t,1):u/l(2,t,1),n*=4503599627370496,(t=52-t)>0){for(d(b,0,n),r=c;r>=7;)d(b,1e7,0),r-=7;for(d(b,l(10,r,1),0),r=t-1;r>=23;)h(b,8388608),r-=23;h(b,1<0?m+((s=g.length)<=c?"0."+o.call("0",c-s)+g:g.slice(0,s-c)+"."+g.slice(s-c)):m+g}})},55147(e,t,n){"use strict";var r=n(82109),i=n(47293),a=n(50863),o=1..toPrecision,s=i(function(){return"1"!==o.call(1,void 0)})||!i(function(){o.call({})});r({target:"Number",proto:!0,forced:s},{toPrecision:function(e){return void 0===e?o.call(a(this)):o.call(a(this),e)}})},19601(e,t,n){var r=n(82109),i=n(21574);r({target:"Object",stat:!0,forced:Object.assign!==i},{assign:i})},78011(e,t,n){var r=n(82109),i=n(19781),a=n(70030);r({target:"Object",stat:!0,sham:!i},{create:a})},59595(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(69026),o=n(47908),s=n(13099),u=n(3070);i&&r({target:"Object",proto:!0,forced:a},{__defineGetter__:function(e,t){u.f(o(this),e,{get:s(t),enumerable:!0,configurable:!0})}})},33321(e,t,n){var r=n(82109),i=n(19781),a=n(36048);r({target:"Object",stat:!0,forced:!i,sham:!i},{defineProperties:a})},69070(e,t,n){var r=n(82109),i=n(19781),a=n(3070);r({target:"Object",stat:!0,forced:!i,sham:!i},{defineProperty:a.f})},35500(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(69026),o=n(47908),s=n(13099),u=n(3070);i&&r({target:"Object",proto:!0,forced:a},{__defineSetter__:function(e,t){u.f(o(this),e,{set:s(t),enumerable:!0,configurable:!0})}})},69720(e,t,n){var r=n(82109),i=n(44699).entries;r({target:"Object",stat:!0},{entries:function(e){return i(e)}})},43371(e,t,n){var r=n(82109),i=n(76677),a=n(47293),o=n(70111),s=n(62423).onFreeze,u=Object.freeze,c=a(function(){u(1)});r({target:"Object",stat:!0,forced:c,sham:!i},{freeze:function(e){return u&&o(e)?u(s(e)):e}})},38559(e,t,n){var r=n(82109),i=n(20408),a=n(86135);r({target:"Object",stat:!0},{fromEntries:function(e){var t={};return i(e,function(e,n){a(t,e,n)},{AS_ENTRIES:!0}),t}})},38880(e,t,n){var r=n(82109),i=n(47293),a=n(45656),o=n(31236).f,s=n(19781),u=i(function(){o(1)}),c=!s||u;r({target:"Object",stat:!0,forced:c,sham:!s},{getOwnPropertyDescriptor:function(e,t){return o(a(e),t)}})},49337(e,t,n){var r=n(82109),i=n(19781),a=n(53887),o=n(45656),s=n(31236),u=n(86135);r({target:"Object",stat:!0,sham:!i},{getOwnPropertyDescriptors:function(e){for(var t,n,r=o(e),i=s.f,c=a(r),l={},f=0;c.length>f;)void 0!==(n=i(r,t=c[f++]))&&u(l,t,n);return l}})},36210(e,t,n){var r=n(82109),i=n(47293),a=n(1156).f,o=i(function(){return!Object.getOwnPropertyNames(1)});r({target:"Object",stat:!0,forced:o},{getOwnPropertyNames:a})},30489(e,t,n){var r=n(82109),i=n(47293),a=n(47908),o=n(79518),s=n(49920),u=i(function(){o(1)});r({target:"Object",stat:!0,forced:u,sham:!s},{getPrototypeOf:function(e){return o(a(e))}})},46314(e,t,n){var r=n(82109),i=n(86656);r({target:"Object",stat:!0},{hasOwn:i})},41825(e,t,n){var r=n(82109),i=n(47293),a=n(70111),o=Object.isExtensible,s=i(function(){o(1)});r({target:"Object",stat:!0,forced:s},{isExtensible:function(e){return!!a(e)&&(!o||o(e))}})},98410(e,t,n){var r=n(82109),i=n(47293),a=n(70111),o=Object.isFrozen,s=i(function(){o(1)});r({target:"Object",stat:!0,forced:s},{isFrozen:function(e){return!a(e)||!!o&&o(e)}})},72200(e,t,n){var r=n(82109),i=n(47293),a=n(70111),o=Object.isSealed,s=i(function(){o(1)});r({target:"Object",stat:!0,forced:s},{isSealed:function(e){return!a(e)||!!o&&o(e)}})},43304(e,t,n){var r=n(82109),i=n(81150);r({target:"Object",stat:!0},{is:i})},47941(e,t,n){var r=n(82109),i=n(47908),a=n(81956),o=n(47293)(function(){a(1)});r({target:"Object",stat:!0,forced:o},{keys:function(e){return a(i(e))}})},94869(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(69026),o=n(47908),s=n(34948),u=n(79518),c=n(31236).f;i&&r({target:"Object",proto:!0,forced:a},{__lookupGetter__:function(e){var t,n=o(this),r=s(e);do if(t=c(n,r))return t.get;while(n=u(n))}})},33952(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(69026),o=n(47908),s=n(34948),u=n(79518),c=n(31236).f;i&&r({target:"Object",proto:!0,forced:a},{__lookupSetter__:function(e){var t,n=o(this),r=s(e);do if(t=c(n,r))return t.set;while(n=u(n))}})},57227(e,t,n){var r=n(82109),i=n(70111),a=n(62423).onFreeze,o=n(76677),s=n(47293),u=Object.preventExtensions,c=s(function(){u(1)});r({target:"Object",stat:!0,forced:c,sham:!o},{preventExtensions:function(e){return u&&i(e)?u(a(e)):e}})},60514(e,t,n){var r=n(82109),i=n(70111),a=n(62423).onFreeze,o=n(76677),s=n(47293),u=Object.seal,c=s(function(){u(1)});r({target:"Object",stat:!0,forced:c,sham:!o},{seal:function(e){return u&&i(e)?u(a(e)):e}})},68304(e,t,n){var r=n(82109),i=n(27674);r({target:"Object",stat:!0},{setPrototypeOf:i})},41539(e,t,n){var r=n(51694),i=n(31320),a=n(90288);r||i(Object.prototype,"toString",a,{unsafe:!0})},26833(e,t,n){var r=n(82109),i=n(44699).values;r({target:"Object",stat:!0},{values:function(e){return i(e)}})},54678(e,t,n){var r=n(82109),i=n(2814);r({global:!0,forced:parseFloat!=i},{parseFloat:i})},91058(e,t,n){var r=n(82109),i=n(83009);r({global:!0,forced:parseInt!=i},{parseInt:i})},17922(e,t,n){"use strict";var r=n(82109),i=n(13099),a=n(78523),o=n(12534),s=n(20408);r({target:"Promise",stat:!0},{allSettled:function(e){var t=this,n=a.f(t),r=n.resolve,u=n.reject,c=o(function(){var n=i(t.resolve),a=[],o=0,u=1;s(e,function(e){var i=o++,s=!1;a.push(void 0),u++,n.call(t,e).then(function(e){!s&&(s=!0,a[i]={status:"fulfilled",value:e},--u||r(a))},function(e){!s&&(s=!0,a[i]={status:"rejected",reason:e},--u||r(a))})}),--u||r(a)});return c.error&&u(c.value),n.promise}})},34668(e,t,n){"use strict";var r=n(82109),i=n(13099),a=n(35005),o=n(78523),s=n(12534),u=n(20408),c="No one promise resolved";r({target:"Promise",stat:!0},{any:function(e){var t=this,n=o.f(t),r=n.resolve,l=n.reject,f=s(function(){var n=i(t.resolve),o=[],s=0,f=1,d=!1;u(e,function(e){var i=s++,u=!1;o.push(void 0),f++,n.call(t,e).then(function(e){u||d||(d=!0,r(e))},function(e){!u&&!d&&(u=!0,o[i]=e,--f||l(new(a("AggregateError"))(o,c)))})}),--f||l(new(a("AggregateError"))(o,c))});return f.error&&l(f.value),n.promise}})},17727(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(13366),o=n(47293),s=n(35005),u=n(36707),c=n(69478),l=n(31320),f=!!a&&o(function(){a.prototype.finally.call({then:function(){}},function(){})});if(r({target:"Promise",proto:!0,real:!0,forced:f},{finally:function(e){var t=u(this,s("Promise")),n="function"==typeof e;return this.then(n?function(n){return c(t,e()).then(function(){return n})}:e,n?function(n){return c(t,e()).then(function(){throw n})}:e)}}),!i&&"function"==typeof a){var d=s("Promise").prototype.finally;a.prototype.finally!==d&&l(a.prototype,"finally",d,{unsafe:!0})}},88674(e,t,n){"use strict";var r,i,a,o,s=n(82109),u=n(31913),c=n(17854),l=n(35005),f=n(13366),d=n(31320),h=n(12248),p=n(27674),b=n(58003),m=n(96340),g=n(70111),v=n(13099),y=n(25787),w=n(42788),_=n(20408),E=n(17072),S=n(36707),k=n(20261).set,x=n(95948),T=n(69478),M=n(842),O=n(78523),A=n(12534),L=n(29909),C=n(54705),I=n(5112),D=n(7871),N=n(35268),P=n(7392),R=I("species"),j="Promise",F=L.get,Y=L.set,B=L.getterFor(j),U=f&&f.prototype,H=f,$=U,z=c.TypeError,G=c.document,W=c.process,K=O.f,V=K,q=!!(G&&G.createEvent&&c.dispatchEvent),Z="function"==typeof PromiseRejectionEvent,X="unhandledrejection",J="rejectionhandled",Q=0,ee=1,et=2,en=1,er=2,ei=!1,ea=C(j,function(){var e=w(H),t=e!==String(H);if(!t&&66===P||u&&!$.finally)return!0;if(P>=51&&/native code/.test(e))return!1;var n=new H(function(e){e(1)}),r=function(e){e(function(){},function(){})};return(n.constructor={})[R]=r,!(ei=n.then(function(){}) instanceof r)||!t&&D&&!Z}),eo=ea||!E(function(e){H.all(e).catch(function(){})}),es=function(e){var t;return!!g(e)&&"function"==typeof(t=e.then)&&t},eu=function(e,t){if(!e.notified){e.notified=!0;var n=e.reactions;x(function(){for(var r=e.value,i=e.state==ee,a=0;n.length>a;){var o,s,u,c=n[a++],l=i?c.ok:c.fail,f=c.resolve,d=c.reject,h=c.domain;try{l?(i||(e.rejection===er&&ed(e),e.rejection=en),!0===l?o=r:(h&&h.enter(),o=l(r),h&&(h.exit(),u=!0)),o===c.promise?d(z("Promise-chain cycle")):(s=es(o))?s.call(o,f,d):f(o)):d(r)}catch(p){h&&!u&&h.exit(),d(p)}}e.reactions=[],e.notified=!1,t&&!e.rejection&&el(e)})}},ec=function(e,t,n){var r,i;q?((r=G.createEvent("Event")).promise=t,r.reason=n,r.initEvent(e,!1,!0),c.dispatchEvent(r)):r={promise:t,reason:n},!Z&&(i=c["on"+e])?i(r):e===X&&M("Unhandled promise rejection",n)},el=function(e){k.call(c,function(){var t,n=e.facade,r=e.value;if(ef(e)&&(t=A(function(){N?W.emit("unhandledRejection",r,n):ec(X,n,r)}),e.rejection=N||ef(e)?er:en,t.error))throw t.value})},ef=function(e){return e.rejection!==en&&!e.parent},ed=function(e){k.call(c,function(){var t=e.facade;N?W.emit("rejectionHandled",t):ec(J,t,e.value)})},eh=function(e,t,n){return function(r){e(t,r,n)}},ep=function(e,t,n){e.done||(e.done=!0,n&&(e=n),e.value=t,e.state=et,eu(e,!0))},eb=function(e,t,n){if(!e.done){e.done=!0,n&&(e=n);try{if(e.facade===t)throw z("Promise can't be resolved itself");var r=es(t);r?x(function(){var n={done:!1};try{r.call(t,eh(eb,n,e),eh(ep,n,e))}catch(i){ep(n,i,e)}}):(e.value=t,e.state=ee,eu(e,!1))}catch(i){ep({done:!1},i,e)}}};if(ea&&($=(H=function(e){y(this,H,j),v(e),r.call(this);var t=F(this);try{e(eh(eb,t),eh(ep,t))}catch(n){ep(t,n)}}).prototype,(r=function(e){Y(this,{type:j,done:!1,notified:!1,parent:!1,reactions:[],rejection:!1,state:Q,value:void 0})}).prototype=h($,{then:function(e,t){var n=B(this),r=K(S(this,H));return r.ok="function"!=typeof e||e,r.fail="function"==typeof t&&t,r.domain=N?W.domain:void 0,n.parent=!0,n.reactions.push(r),n.state!=Q&&eu(n,!1),r.promise},catch:function(e){return this.then(void 0,e)}}),i=function(){var e=new r,t=F(e);this.promise=e,this.resolve=eh(eb,t),this.reject=eh(ep,t)},O.f=K=function(e){return e===H||e===a?new i(e):V(e)},!u&&"function"==typeof f&&U!==Object.prototype)){o=U.then,ei||(d(U,"then",function(e,t){var n=this;return new H(function(e,t){o.call(n,e,t)}).then(e,t)},{unsafe:!0}),d(U,"catch",$.catch,{unsafe:!0}));try{delete U.constructor}catch(em){}p&&p(U,$)}s({global:!0,wrap:!0,forced:ea},{Promise:H}),b(H,j,!1,!0),m(j),a=l(j),s({target:j,stat:!0,forced:ea},{reject:function(e){var t=K(this);return t.reject.call(void 0,e),t.promise}}),s({target:j,stat:!0,forced:u||ea},{resolve:function(e){return T(u&&this===a?H:this,e)}}),s({target:j,stat:!0,forced:eo},{all:function(e){var t=this,n=K(t),r=n.resolve,i=n.reject,a=A(function(){var n=v(t.resolve),a=[],o=0,s=1;_(e,function(e){var u=o++,c=!1;a.push(void 0),s++,n.call(t,e).then(function(e){!c&&(c=!0,a[u]=e,--s||r(a))},i)}),--s||r(a)});return a.error&&i(a.value),n.promise},race:function(e){var t=this,n=K(t),r=n.reject,i=A(function(){var i=v(t.resolve);_(e,function(e){i.call(t,e).then(n.resolve,r)})});return i.error&&r(i.value),n.promise}})},36535(e,t,n){var r=n(82109),i=n(35005),a=n(13099),o=n(19670),s=n(47293),u=i("Reflect","apply"),c=Function.apply,l=!s(function(){u(function(){})});r({target:"Reflect",stat:!0,forced:l},{apply:function(e,t,n){return a(e),o(n),u?u(e,t,n):c.call(e,t,n)}})},12419(e,t,n){var r=n(82109),i=n(35005),a=n(13099),o=n(19670),s=n(70111),u=n(70030),c=n(27065),l=n(47293),f=i("Reflect","construct"),d=l(function(){function e(){}return!(f(function(){},[],e) instanceof e)}),h=!l(function(){f(function(){})}),p=d||h;r({target:"Reflect",stat:!0,forced:p,sham:p},{construct:function(e,t){a(e),o(t);var n=arguments.length<3?e:a(arguments[2]);if(h&&!d)return f(e,t,n);if(e==n){switch(t.length){case 0:return new e;case 1:return new e(t[0]);case 2:return new e(t[0],t[1]);case 3:return new e(t[0],t[1],t[2]);case 4:return new e(t[0],t[1],t[2],t[3])}var r=[null];return r.push.apply(r,t),new(c.apply(e,r))}var i=n.prototype,l=u(s(i)?i:Object.prototype),p=Function.apply.call(e,l,t);return s(p)?p:l}})},69596(e,t,n){var r=n(82109),i=n(19781),a=n(19670),o=n(34948),s=n(3070),u=n(47293)(function(){Reflect.defineProperty(s.f({},1,{value:1}),1,{value:2})});r({target:"Reflect",stat:!0,forced:u,sham:!i},{defineProperty:function(e,t,n){a(e);var r=o(t);a(n);try{return s.f(e,r,n),!0}catch(i){return!1}}})},52586(e,t,n){var r=n(82109),i=n(19670),a=n(31236).f;r({target:"Reflect",stat:!0},{deleteProperty:function(e,t){var n=a(i(e),t);return(!n||!!n.configurable)&&delete e[t]}})},95683(e,t,n){var r=n(82109),i=n(19781),a=n(19670),o=n(31236);r({target:"Reflect",stat:!0,sham:!i},{getOwnPropertyDescriptor:function(e,t){return o.f(a(e),t)}})},39361(e,t,n){var r=n(82109),i=n(19670),a=n(79518),o=n(49920);r({target:"Reflect",stat:!0,sham:!o},{getPrototypeOf:function(e){return a(i(e))}})},74819(e,t,n){var r=n(82109),i=n(70111),a=n(19670),o=n(45032),s=n(31236),u=n(79518);function c(e,t){var n,r,l=arguments.length<3?e:arguments[2];return a(e)===l?e[t]:(n=s.f(e,t))?o(n)?n.value:void 0===n.get?void 0:n.get.call(l):i(r=u(e))?c(r,t,l):void 0}r({target:"Reflect",stat:!0},{get:c})},51037(e,t,n){n(82109)({target:"Reflect",stat:!0},{has:function(e,t){return t in e}})},5898(e,t,n){var r=n(82109),i=n(19670),a=Object.isExtensible;r({target:"Reflect",stat:!0},{isExtensible:function(e){return i(e),!a||a(e)}})},67556(e,t,n){var r=n(82109),i=n(53887);r({target:"Reflect",stat:!0},{ownKeys:i})},14361(e,t,n){var r=n(82109),i=n(35005),a=n(19670),o=n(76677);r({target:"Reflect",stat:!0,sham:!o},{preventExtensions:function(e){a(e);try{var t=i("Object","preventExtensions");return t&&t(e),!0}catch(n){return!1}}})},39532(e,t,n){var r=n(82109),i=n(19670),a=n(96077),o=n(27674);o&&r({target:"Reflect",stat:!0},{setPrototypeOf:function(e,t){i(e),a(t);try{return o(e,t),!0}catch(n){return!1}}})},83593(e,t,n){var r=n(82109),i=n(19670),a=n(70111),o=n(45032),s=n(47293),u=n(3070),c=n(31236),l=n(79518),f=n(79114);function d(e,t,n){var r,s,h,p=arguments.length<4?e:arguments[3],b=c.f(i(e),t);if(!b){if(a(s=l(e)))return d(s,t,n,p);b=f(0)}if(o(b)){if(!1===b.writable||!a(p))return!1;if(r=c.f(p,t)){if(r.get||r.set||!1===r.writable)return!1;r.value=n,u.f(p,t,r)}else u.f(p,t,f(0,n))}else{if(void 0===(h=b.set))return!1;h.call(p,n)}return!0}var h=s(function(){var e=function(){},t=u.f(new e,"a",{configurable:!0});return!1!==Reflect.set(e.prototype,"a",1,t)});r({target:"Reflect",stat:!0,forced:h},{set:d})},81299(e,t,n){var r=n(82109),i=n(17854),a=n(58003);r({global:!0},{Reflect:{}}),a(i.Reflect,"Reflect",!0)},24603(e,t,n){var r=n(19781),i=n(17854),a=n(54705),o=n(79587),s=n(68880),u=n(3070).f,c=n(8006).f,l=n(47850),f=n(41340),d=n(67066),h=n(52999),p=n(31320),b=n(47293),m=n(86656),g=n(29909).enforce,v=n(96340),y=n(5112),w=n(9441),_=n(38173),E=y("match"),S=i.RegExp,k=S.prototype,x=/^\?<[^\s\d!#%&*+<=>@^][^\s!#%&*+<=>@^]*>/,T=/a/g,M=/a/g,O=new S(T)!==T,A=h.UNSUPPORTED_Y,L=r&&(!O||A||w||_||b(function(){return M[E]=!1,S(T)!=T||S(M)==M||"/a/i"!=S(T,"i")})),C=function(e){for(var t,n=e.length,r=0,i="",a=!1;r<=n;r++){if("\\"===(t=e.charAt(r))){i+=t+e.charAt(++r);continue}a||"."!==t?("["===t?a=!0:"]"===t&&(a=!1),i+=t):i+="[\\s\\S]"}return i},I=function(e){for(var t,n=e.length,r=0,i="",a=[],o={},s=!1,u=!1,c=0,l="";r<=n;r++){if("\\"===(t=e.charAt(r)))t+=e.charAt(++r);else if("]"===t)s=!1;else if(!s)switch(!0){case"["===t:s=!0;break;case"("===t:x.test(e.slice(r+1))&&(r+=2,u=!0),i+=t,c++;continue;case">"===t&&u:if(""===l||m(o,l))throw SyntaxError("Invalid capture group name");o[l]=!0,a.push([l,c]),u=!1,l="";continue}u?l+=t:i+=t}return[i,a]};if(a("RegExp",L)){for(var D=function(e,t){var n,r,i,a,u,c,h=this instanceof D,p=l(e),b=void 0===t,m=[],v=e;if(!h&&p&&b&&e.constructor===D)return e;if((p||e instanceof D)&&(e=e.source,b&&(t=("flags"in v)?v.flags:d.call(v))),e=void 0===e?"":f(e),t=void 0===t?"":f(t),v=e,w&&("dotAll"in T)&&(r=!!t&&t.indexOf("s")>-1)&&(t=t.replace(/s/g,"")),n=t,A&&("sticky"in T)&&(i=!!t&&t.indexOf("y")>-1)&&(t=t.replace(/y/g,"")),_&&(e=(a=I(e))[0],m=a[1]),u=o(S(e,t),h?this:k,D),(r||i||m.length)&&(c=g(u),r&&(c.dotAll=!0,c.raw=D(C(e),n)),i&&(c.sticky=!0),m.length&&(c.groups=m)),e!==v)try{s(u,"source",""===v?"(?:)":v)}catch(y){}return u},N=function(e){(e in D)||u(D,e,{configurable:!0,get:function(){return S[e]},set:function(t){S[e]=t}})},P=c(S),R=0;P.length>R;)N(P[R++]);k.constructor=D,D.prototype=k,p(i,"RegExp",D)}v("RegExp")},28450(e,t,n){var r=n(19781),i=n(9441),a=n(3070).f,o=n(29909).get,s=RegExp.prototype;r&&i&&a(s,"dotAll",{configurable:!0,get:function(){if(this!==s){if(this instanceof RegExp)return!!o(this).dotAll;throw TypeError("Incompatible receiver, RegExp required")}}})},74916(e,t,n){"use strict";var r=n(82109),i=n(22261);r({target:"RegExp",proto:!0,forced:/./.exec!==i},{exec:i})},92087(e,t,n){var r=n(19781),i=n(3070),a=n(67066),o=n(47293);r&&o(function(){return"sy"!==Object.getOwnPropertyDescriptor(RegExp.prototype,"flags").get.call({dotAll:!0,sticky:!0})})&&i.f(RegExp.prototype,"flags",{configurable:!0,get:a})},88386(e,t,n){var r=n(19781),i=n(52999).UNSUPPORTED_Y,a=n(3070).f,o=n(29909).get,s=RegExp.prototype;r&&i&&a(s,"sticky",{configurable:!0,get:function(){if(this!==s){if(this instanceof RegExp)return!!o(this).sticky;throw TypeError("Incompatible receiver, RegExp required")}}})},77601(e,t,n){"use strict";n(74916);var r,i,a=n(82109),o=n(70111),s=(r=!1,(i=/[ac]/).exec=function(){return r=!0,/./.exec.apply(this,arguments)},!0===i.test("abc")&&r),u=/./.test;a({target:"RegExp",proto:!0,forced:!s},{test:function(e){if("function"!=typeof this.exec)return u.call(this,e);var t=this.exec(e);if(null!==t&&!o(t))throw Error("RegExp exec method returned something other than an Object or null");return!!t}})},39714(e,t,n){"use strict";var r=n(31320),i=n(19670),a=n(41340),o=n(47293),s=n(67066),u="toString",c=RegExp.prototype,l=c[u],f=o(function(){return"/a/b"!=l.call({source:"a",flags:"b"})}),d=l.name!=u;(f||d)&&r(RegExp.prototype,u,function(){var e=i(this),t=a(e.source),n=e.flags,r=a(void 0===n&&e instanceof RegExp&&!("flags"in c)?s.call(e):n);return"/"+t+"/"+r},{unsafe:!0})},70189(e,t,n){"use strict";var r=n(77710),i=n(95631);e.exports=r("Set",function(e){return function(){return e(this,arguments.length?arguments[0]:void 0)}},i)},15218(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("anchor")},{anchor:function(e){return i(this,"a","name",e)}})},24506(e,t,n){"use strict";var r=n(82109),i=n(84488),a=n(99958),o=n(17466),s=n(41340),u=n(47293)(function(){return"\uD842"!=="𠮷".at(0)});r({target:"String",proto:!0,forced:u},{at:function(e){var t=s(i(this)),n=o(t.length),r=a(e),u=r>=0?r:n+r;return u<0||u>=n?void 0:t.charAt(u)}})},74475(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("big")},{big:function(){return i(this,"big","","")}})},57929(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("blink")},{blink:function(){return i(this,"blink","","")}})},50915(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("bold")},{bold:function(){return i(this,"b","","")}})},79841(e,t,n){"use strict";var r=n(82109),i=n(28710).codeAt;r({target:"String",proto:!0},{codePointAt:function(e){return i(this,e)}})},27852(e,t,n){"use strict";var r,i=n(82109),a=n(31236).f,o=n(17466),s=n(41340),u=n(3929),c=n(84488),l=n(84964),f=n(31913),d="".endsWith,h=Math.min,p=l("endsWith"),b=!f&&!p&&!!(r=a(String.prototype,"endsWith"))&&!r.writable;i({target:"String",proto:!0,forced:!b&&!p},{endsWith:function(e){var t=s(c(this));u(e);var n=arguments.length>1?arguments[1]:void 0,r=o(t.length),i=void 0===n?r:h(o(n),r),a=s(e);return d?d.call(t,a,i):t.slice(i-a.length,i)===a}})},29253(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("fixed")},{fixed:function(){return i(this,"tt","","")}})},42125(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("fontcolor")},{fontcolor:function(e){return i(this,"font","color",e)}})},78830(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("fontsize")},{fontsize:function(e){return i(this,"font","size",e)}})},94953(e,t,n){var r=n(82109),i=n(51400),a=String.fromCharCode,o=String.fromCodePoint;r({target:"String",stat:!0,forced:!!o&&1!=o.length},{fromCodePoint:function(e){for(var t,n=[],r=arguments.length,o=0;r>o;){if(t=+arguments[o++],i(t,1114111)!==t)throw RangeError(t+" is not a valid code point");n.push(t<65536?a(t):a(((t-=65536)>>10)+55296,t%1024+56320))}return n.join("")}})},32023(e,t,n){"use strict";var r=n(82109),i=n(3929),a=n(84488),o=n(41340),s=n(84964);r({target:"String",proto:!0,forced:!s("includes")},{includes:function(e){return!!~o(a(this)).indexOf(o(i(e)),arguments.length>1?arguments[1]:void 0)}})},58734(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("italics")},{italics:function(){return i(this,"i","","")}})},78783(e,t,n){"use strict";var r=n(28710).charAt,i=n(41340),a=n(29909),o=n(70654),s="String Iterator",u=a.set,c=a.getterFor(s);o(String,"String",function(e){u(this,{type:s,string:i(e),index:0})},function(){var e,t=c(this),n=t.string,i=t.index;return i>=n.length?{value:void 0,done:!0}:(e=r(n,i),t.index+=e.length,{value:e,done:!1})})},29254(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("link")},{link:function(e){return i(this,"a","href",e)}})},76373(e,t,n){"use strict";var r=n(82109),i=n(24994),a=n(84488),o=n(17466),s=n(41340),u=n(13099),c=n(19670),l=n(84326),f=n(47850),d=n(67066),h=n(68880),p=n(47293),b=n(5112),m=n(36707),g=n(31530),v=n(29909),y=n(31913),w=b("matchAll"),_="RegExp String",E=_+" Iterator",S=v.set,k=v.getterFor(E),x=RegExp.prototype,T=x.exec,M="".matchAll,O=!!M&&!p(function(){"a".matchAll(/./)}),A=function(e,t){var n,r=e.exec;if("function"==typeof r){if("object"!=typeof(n=r.call(e,t)))throw TypeError("Incorrect exec result");return n}return T.call(e,t)},L=i(function(e,t,n,r){S(this,{type:E,regexp:e,string:t,global:n,unicode:r,done:!1})},_,function(){var e=k(this);if(e.done)return{value:void 0,done:!0};var t=e.regexp,n=e.string,r=A(t,n);return null===r?{value:void 0,done:e.done=!0}:e.global?(""===s(r[0])&&(t.lastIndex=g(n,o(t.lastIndex),e.unicode)),{value:r,done:!1}):(e.done=!0,{value:r,done:!1})}),C=function(e){var t,n,r,i,a,u,l=c(this),f=s(e);return t=m(l,RegExp),void 0===(n=l.flags)&&l instanceof RegExp&&!("flags"in x)&&(n=d.call(l)),r=void 0===n?"":s(n),i=new t(t===RegExp?l.source:l,r),a=!!~r.indexOf("g"),u=!!~r.indexOf("u"),i.lastIndex=o(l.lastIndex),new L(i,f,a,u)};r({target:"String",proto:!0,forced:O},{matchAll:function(e){var t,n,r,i,o=a(this);if(null!=e){if(f(e)&&!~(t=s(a("flags"in x?e.flags:d.call(e)))).indexOf("g"))throw TypeError("`.matchAll` does not allow non-global regexes");if(O)return M.apply(o,arguments);if(void 0===(r=e[w])&&y&&"RegExp"==l(e)&&(r=C),null!=r)return u(r).call(e,o)}else if(O)return M.apply(o,arguments);return n=s(o),i=RegExp(e,"g"),y?C.call(i,n):i[w](n)}}),y||w in x||h(x,w,C)},4723(e,t,n){"use strict";var r=n(27007),i=n(19670),a=n(17466),o=n(41340),s=n(84488),u=n(31530),c=n(97651);r("match",function(e,t,n){return[function(t){var n=s(this),r=void 0==t?void 0:t[e];return void 0!==r?r.call(t,n):RegExp(t)[e](o(n))},function(e){var r,s=i(this),l=o(e),f=n(t,s,l);if(f.done)return f.value;if(!s.global)return c(s,l);var d=s.unicode;s.lastIndex=0;for(var h=[],p=0;null!==(r=c(s,l));){var b=o(r[0]);h[p]=b,""===b&&(s.lastIndex=u(l,a(s.lastIndex),d)),p++}return 0===p?null:h}]})},66528(e,t,n){"use strict";var r=n(82109),i=n(76650).end,a=n(54986);r({target:"String",proto:!0,forced:a},{padEnd:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}})},83112(e,t,n){"use strict";var r=n(82109),i=n(76650).start,a=n(54986);r({target:"String",proto:!0,forced:a},{padStart:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}})},38992(e,t,n){var r=n(82109),i=n(45656),a=n(17466),o=n(41340);r({target:"String",stat:!0},{raw:function(e){for(var t=i(e.raw),n=a(t.length),r=arguments.length,s=[],u=0;n>u;)s.push(o(t[u++])),ue.length?-1:""===t?n:e.indexOf(t,n)};r({target:"String",proto:!0},{replaceAll:function(e,t){var n,r,c,b,m,g,v,y,w,_=i(this),E=0,S=0,k="";if(null!=e){if((n=a(e))&&!~(r=o(i("flags"in d?e.flags:s.call(e)))).indexOf("g"))throw TypeError("`.replaceAll` does not allow non-global regexes");if(void 0!==(c=e[f]))return c.call(e,_,t);if(l&&n)return o(_).replace(e,t)}for(b=o(_),m=o(e),(g="function"==typeof t)||(t=o(t)),y=h(1,v=m.length),E=p(b,m,0);-1!==E;)w=g?o(t(m,E,b)):u(m,b,E,[],void 0,t),k+=b.slice(S,E)+w,S=E+v,E=p(b,m,E+y);return S")});r("replace",function(e,t,n){var r=v?"$":"$0";return[function(e,n){var r=c(this),i=void 0==e?void 0:e[h];return void 0!==i?i.call(e,r,n):t.call(u(r),e,n)},function(e,i){var c=a(this),h=u(e);if("string"==typeof i&&-1===i.indexOf(r)&&-1===i.indexOf("$<")){var g=n(t,c,h,i);if(g.done)return g.value}var v="function"==typeof i;v||(i=u(i));var y=c.global;if(y){var w=c.unicode;c.lastIndex=0}for(var _=[];;){var E=d(c,h);if(null===E||(_.push(E),!y))break;""===u(E[0])&&(c.lastIndex=l(h,s(c.lastIndex),w))}for(var S="",k=0,x=0;x<_.length;x++){for(var T=u((E=_[x])[0]),M=p(b(o(E.index),h.length),0),O=[],A=1;A=k&&(S+=h.slice(k,M)+I,k=M+T.length)}return S+h.slice(k)}]},!y||!g||v)},64765(e,t,n){"use strict";var r=n(27007),i=n(19670),a=n(84488),o=n(81150),s=n(41340),u=n(97651);r("search",function(e,t,n){return[function(t){var n=a(this),r=void 0==t?void 0:t[e];return void 0!==r?r.call(t,n):RegExp(t)[e](s(n))},function(e){var r=i(this),a=s(e),c=n(t,r,a);if(c.done)return c.value;var l=r.lastIndex;o(l,0)||(r.lastIndex=0);var f=u(r,a);return o(r.lastIndex,l)||(r.lastIndex=l),null===f?-1:f.index}]})},37268(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("small")},{small:function(){return i(this,"small","","")}})},23123(e,t,n){"use strict";var r=n(27007),i=n(47850),a=n(19670),o=n(84488),s=n(36707),u=n(31530),c=n(17466),l=n(41340),f=n(97651),d=n(22261),h=n(52999),p=n(47293),b=h.UNSUPPORTED_Y,m=[].push,g=Math.min,v=4294967295,y=!p(function(){var e=/(?:)/,t=e.exec;e.exec=function(){return t.apply(this,arguments)};var n="ab".split(e);return 2!==n.length||"a"!==n[0]||"b"!==n[1]});r("split",function(e,t,n){var r;return r="c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length?function(e,n){var r,a,s,u=l(o(this)),c=void 0===n?v:n>>>0;if(0===c)return[];if(void 0===e)return[u];if(!i(e))return t.call(u,e,c);for(var f=[],h=(e.ignoreCase?"i":"")+(e.multiline?"m":"")+(e.unicode?"u":"")+(e.sticky?"y":""),p=0,b=RegExp(e.source,h+"g");(r=d.call(b,u))&&(!((a=b.lastIndex)>p)||(f.push(u.slice(p,r.index)),r.length>1&&r.index=c)));)b.lastIndex===r.index&&b.lastIndex++;return p===u.length?(s||!b.test(""))&&f.push(""):f.push(u.slice(p)),f.length>c?f.slice(0,c):f}:"0".split(void 0,0).length?function(e,n){return void 0===e&&0===n?[]:t.call(this,e,n)}:t,[function(t,n){var i=o(this),a=void 0==t?void 0:t[e];return void 0!==a?a.call(t,i,n):r.call(l(i),t,n)},function(e,i){var o=a(this),d=l(e),h=n(r,o,d,i,r!==t);if(h.done)return h.value;var p=s(o,RegExp),m=o.unicode,y=(o.ignoreCase?"i":"")+(o.multiline?"m":"")+(o.unicode?"u":"")+(b?"g":"y"),w=new p(b?"^(?:"+o.source+")":o,y),_=void 0===i?v:i>>>0;if(0===_)return[];if(0===d.length)return null===f(w,d)?[d]:[];for(var E=0,S=0,k=[];S1?arguments[1]:void 0,t.length)),r=s(e);return d?d.call(t,r,n):t.slice(n,n+r.length)===r}})},7397(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("strike")},{strike:function(){return i(this,"strike","","")}})},60086(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("sub")},{sub:function(){return i(this,"sub","","")}})},83650(e,t,n){"use strict";var r=n(82109),i=n(84488),a=n(99958),o=n(41340),s="".slice,u=Math.max,c=Math.min;r({target:"String",proto:!0},{substr:function(e,t){var n,r,l=o(i(this)),f=l.length,d=a(e);return(d===1/0&&(d=0),d<0&&(d=u(f+d,0)),(n=void 0===t?f:a(t))<=0||n===1/0)?"":(r=c(d+n,f),d>=r?"":s.call(l,d,r))}})},80623(e,t,n){"use strict";var r=n(82109),i=n(14230),a=n(43429);r({target:"String",proto:!0,forced:a("sup")},{sup:function(){return i(this,"sup","","")}})},48702(e,t,n){"use strict";var r=n(82109),i=n(53111).end,a=n(76091)("trimEnd"),o=a?function(){return i(this)}:"".trimEnd;r({target:"String",proto:!0,forced:a},{trimEnd:o,trimRight:o})},55674(e,t,n){"use strict";var r=n(82109),i=n(53111).start,a=n(76091)("trimStart"),o=a?function(){return i(this)}:"".trimStart;r({target:"String",proto:!0,forced:a},{trimStart:o,trimLeft:o})},73210(e,t,n){"use strict";var r=n(82109),i=n(53111).trim,a=n(76091);r({target:"String",proto:!0,forced:a("trim")},{trim:function(){return i(this)}})},72443(e,t,n){n(97235)("asyncIterator")},41817(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(17854),o=n(86656),s=n(70111),u=n(3070).f,c=n(99920),l=a.Symbol;if(i&&"function"==typeof l&&(!("description"in l.prototype)||void 0!==l().description)){var f={},d=function(){var e=arguments.length<1||void 0===arguments[0]?void 0:String(arguments[0]),t=this instanceof d?new l(e):void 0===e?l():l(e);return""===e&&(f[t]=!0),t};c(d,l);var h=d.prototype=l.prototype;h.constructor=d;var p=h.toString,b="Symbol(test)"==String(l("test")),m=/^Symbol\((.*)\)[^)]+$/;u(h,"description",{configurable:!0,get:function(){var e=s(this)?this.valueOf():this,t=p.call(e);if(o(f,e))return"";var n=b?t.slice(7,-1):t.replace(m,"$1");return""===n?void 0:n}}),r({global:!0,forced:!0},{Symbol:d})}},92401(e,t,n){n(97235)("hasInstance")},8722(e,t,n){n(97235)("isConcatSpreadable")},32165(e,t,n){n(97235)("iterator")},82526(e,t,n){"use strict";var r=n(82109),i=n(17854),a=n(35005),o=n(31913),s=n(19781),u=n(30133),c=n(47293),l=n(86656),f=n(43157),d=n(70111),h=n(52190),p=n(19670),b=n(47908),m=n(45656),g=n(34948),v=n(41340),y=n(79114),w=n(70030),_=n(81956),E=n(8006),S=n(1156),k=n(25181),x=n(31236),T=n(3070),M=n(55296),O=n(68880),A=n(31320),L=n(72309),C=n(6200),I=n(3501),D=n(69711),N=n(5112),P=n(6061),R=n(97235),j=n(58003),F=n(29909),Y=n(42092).forEach,B=C("hidden"),U="Symbol",H="prototype",$=N("toPrimitive"),z=F.set,G=F.getterFor(U),W=Object[H],K=i.Symbol,V=a("JSON","stringify"),q=x.f,Z=T.f,X=S.f,J=M.f,Q=L("symbols"),ee=L("op-symbols"),et=L("string-to-symbol-registry"),en=L("symbol-to-string-registry"),er=L("wks"),ei=i.QObject,ea=!ei||!ei[H]||!ei[H].findChild,eo=s&&c(function(){return 7!=w(Z({},"a",{get:function(){return Z(this,"a",{value:7}).a}})).a})?function(e,t,n){var r=q(W,t);r&&delete W[t],Z(e,t,n),r&&e!==W&&Z(W,t,r)}:Z,es=function(e,t){var n=Q[e]=w(K[H]);return z(n,{type:U,tag:e,description:t}),s||(n.description=t),n},eu=function(e,t,n){e===W&&eu(ee,t,n),p(e);var r=g(t);return(p(n),l(Q,r))?(n.enumerable?(l(e,B)&&e[B][r]&&(e[B][r]=!1),n=w(n,{enumerable:y(0,!1)})):(l(e,B)||Z(e,B,y(1,{})),e[B][r]=!0),eo(e,r,n)):Z(e,r,n)},ec=function(e,t){p(e);var n=m(t),r=_(n).concat(ep(n));return Y(r,function(t){(!s||ef.call(n,t))&&eu(e,t,n[t])}),e},el=function(e,t){return void 0===t?w(e):ec(w(e),t)},ef=function(e){var t=g(e),n=J.call(this,t);return(!(this===W&&l(Q,t))||!!l(ee,t))&&(!(n||!l(this,t)||!l(Q,t)||l(this,B)&&this[B][t])||n)},ed=function(e,t){var n=m(e),r=g(t);if(!(n===W&&l(Q,r))||l(ee,r)){var i=q(n,r);return i&&l(Q,r)&&!(l(n,B)&&n[B][r])&&(i.enumerable=!0),i}},eh=function(e){var t=X(m(e)),n=[];return Y(t,function(e){l(Q,e)||l(I,e)||n.push(e)}),n},ep=function(e){var t=e===W,n=X(t?ee:m(e)),r=[];return Y(n,function(e){l(Q,e)&&(!t||l(W,e))&&r.push(Q[e])}),r};if(u||(A((K=function(){if(this instanceof K)throw TypeError("Symbol is not a constructor");var e=arguments.length&&void 0!==arguments[0]?v(arguments[0]):void 0,t=D(e),n=function(e){this===W&&n.call(ee,e),l(this,B)&&l(this[B],t)&&(this[B][t]=!1),eo(this,t,y(1,e))};return s&&ea&&eo(W,t,{configurable:!0,set:n}),es(t,e)})[H],"toString",function(){return G(this).tag}),A(K,"withoutSetter",function(e){return es(D(e),e)}),M.f=ef,T.f=eu,x.f=ed,E.f=S.f=eh,k.f=ep,P.f=function(e){return es(N(e),e)},s&&(Z(K[H],"description",{configurable:!0,get:function(){return G(this).description}}),o||A(W,"propertyIsEnumerable",ef,{unsafe:!0}))),r({global:!0,wrap:!0,forced:!u,sham:!u},{Symbol:K}),Y(_(er),function(e){R(e)}),r({target:U,stat:!0,forced:!u},{for:function(e){var t=v(e);if(l(et,t))return et[t];var n=K(t);return et[t]=n,en[n]=t,n},keyFor:function(e){if(!h(e))throw TypeError(e+" is not a symbol");if(l(en,e))return en[e]},useSetter:function(){ea=!0},useSimple:function(){ea=!1}}),r({target:"Object",stat:!0,forced:!u,sham:!s},{create:el,defineProperty:eu,defineProperties:ec,getOwnPropertyDescriptor:ed}),r({target:"Object",stat:!0,forced:!u},{getOwnPropertyNames:eh,getOwnPropertySymbols:ep}),r({target:"Object",stat:!0,forced:c(function(){k.f(1)})},{getOwnPropertySymbols:function(e){return k.f(b(e))}}),V){var eb=!u||c(function(){var e=K();return"[null]"!=V([e])||"{}"!=V({a:e})||"{}"!=V(Object(e))});r({target:"JSON",stat:!0,forced:eb},{stringify:function(e,t,n){for(var r,i=[e],a=1;arguments.length>a;)i.push(arguments[a++]);if(r=t,!(!d(t)&&void 0===e||h(e)))return f(t)||(t=function(e,t){if("function"==typeof r&&(t=r.call(this,e,t)),!h(t))return t}),i[1]=t,V.apply(null,i)}})}K[H][$]||O(K[H],$,K[H].valueOf),j(K,U),I[B]=!0},16066(e,t,n){n(97235)("matchAll")},69007(e,t,n){n(97235)("match")},83510(e,t,n){n(97235)("replace")},41840(e,t,n){n(97235)("search")},6982(e,t,n){n(97235)("species")},32159(e,t,n){n(97235)("split")},96649(e,t,n){n(97235)("toPrimitive")},39341(e,t,n){n(97235)("toStringTag")},60543(e,t,n){n(97235)("unscopables")},48675(e,t,n){"use strict";var r=n(90260),i=n(17466),a=n(99958),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("at",function(e){var t=o(this),n=i(t.length),r=a(e),s=r>=0?r:n+r;return s<0||s>=n?void 0:t[s]})},92990(e,t,n){"use strict";var r=n(90260),i=n(1048),a=r.aTypedArray;(0,r.exportTypedArrayMethod)("copyWithin",function(e,t){return i.call(a(this),e,t,arguments.length>2?arguments[2]:void 0)})},18927(e,t,n){"use strict";var r=n(90260),i=n(42092).every,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("every",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},33105(e,t,n){"use strict";var r=n(90260),i=n(21285),a=r.aTypedArray;(0,r.exportTypedArrayMethod)("fill",function(e){return i.apply(a(this),arguments)})},35035(e,t,n){"use strict";var r=n(90260),i=n(42092).filter,a=n(43074),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("filter",function(e){var t=i(o(this),e,arguments.length>1?arguments[1]:void 0);return a(this,t)})},7174(e,t,n){"use strict";var r=n(90260),i=n(42092).findIndex,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("findIndex",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},74345(e,t,n){"use strict";var r=n(90260),i=n(42092).find,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("find",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},44197(e,t,n){n(19843)("Float32",function(e){return function(t,n,r){return e(this,t,n,r)}})},76495(e,t,n){n(19843)("Float64",function(e){return function(t,n,r){return e(this,t,n,r)}})},32846(e,t,n){"use strict";var r=n(90260),i=n(42092).forEach,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("forEach",function(e){i(a(this),e,arguments.length>1?arguments[1]:void 0)})},98145(e,t,n){"use strict";var r=n(63832),i=n(90260).exportTypedArrayStaticMethod,a=n(97321);i("from",a,r)},44731(e,t,n){"use strict";var r=n(90260),i=n(41318).includes,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("includes",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},77209(e,t,n){"use strict";var r=n(90260),i=n(41318).indexOf,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("indexOf",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},35109(e,t,n){n(19843)("Int16",function(e){return function(t,n,r){return e(this,t,n,r)}})},65125(e,t,n){n(19843)("Int32",function(e){return function(t,n,r){return e(this,t,n,r)}})},87145(e,t,n){n(19843)("Int8",function(e){return function(t,n,r){return e(this,t,n,r)}})},96319(e,t,n){"use strict";var r=n(17854),i=n(90260),a=n(66992),o=n(5112)("iterator"),s=r.Uint8Array,u=a.values,c=a.keys,l=a.entries,f=i.aTypedArray,d=i.exportTypedArrayMethod,h=s&&s.prototype[o],p=!!h&&("values"==h.name||void 0==h.name),b=function(){return u.call(f(this))};d("entries",function(){return l.call(f(this))}),d("keys",function(){return c.call(f(this))}),d("values",b,!p),d(o,b,!p)},58867(e,t,n){"use strict";var r=n(90260),i=r.aTypedArray,a=r.exportTypedArrayMethod,o=[].join;a("join",function(e){return o.apply(i(this),arguments)})},37789(e,t,n){"use strict";var r=n(90260),i=n(86583),a=r.aTypedArray;(0,r.exportTypedArrayMethod)("lastIndexOf",function(e){return i.apply(a(this),arguments)})},33739(e,t,n){"use strict";var r=n(90260),i=n(42092).map,a=n(66304),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("map",function(e){return i(o(this),e,arguments.length>1?arguments[1]:void 0,function(e,t){return new(a(e))(t)})})},95206(e,t,n){"use strict";var r=n(90260),i=n(63832),a=r.aTypedArrayConstructor;(0,r.exportTypedArrayStaticMethod)("of",function(){for(var e=0,t=arguments.length,n=new(a(this))(t);t>e;)n[e]=arguments[e++];return n},i)},14483(e,t,n){"use strict";var r=n(90260),i=n(53671).right,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("reduceRight",function(e){return i(a(this),e,arguments.length,arguments.length>1?arguments[1]:void 0)})},29368(e,t,n){"use strict";var r=n(90260),i=n(53671).left,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("reduce",function(e){return i(a(this),e,arguments.length,arguments.length>1?arguments[1]:void 0)})},12056(e,t,n){"use strict";var r=n(90260),i=r.aTypedArray,a=r.exportTypedArrayMethod,o=Math.floor;a("reverse",function(){for(var e,t=this,n=i(t).length,r=o(n/2),a=0;a1?arguments[1]:void 0,1),n=this.length,r=o(e),s=i(r.length),c=0;if(s+t>n)throw RangeError("Wrong length");for(;ca;)c[a]=n[a++];return c},c)},27462(e,t,n){"use strict";var r=n(90260),i=n(42092).some,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("some",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},33824(e,t,n){"use strict";var r=n(90260),i=n(17854),a=n(47293),o=n(13099),s=n(17466),u=n(94362),c=n(68886),l=n(30256),f=n(7392),d=n(98008),h=r.aTypedArray,p=r.exportTypedArrayMethod,b=i.Uint16Array,m=b&&b.prototype.sort,g=!!m&&!a(function(){var e=new b(2);e.sort(null),e.sort({})}),v=!!m&&!a(function(){if(f)return f<74;if(c)return c<67;if(l)return!0;if(d)return d<602;var e,t,n=new b(516),r=Array(516);for(e=0;e<516;e++)t=e%4,n[e]=515-e,r[e]=e-2*t+3;for(n.sort(function(e,t){return(e/4|0)-(t/4|0)}),e=0;e<516;e++)if(n[e]!==r[e])return!0}),y=function(e){return function(t,n){return void 0!==e?+e(t,n)||0:n!=n?-1:t!=t?1:0===t&&0===n?1/t>0&&1/n<0?1:-1:t>n}};p("sort",function(e){var t,n=this;if(void 0!==e&&o(e),v)return m.call(n,e);h(n);var r=s(n.length),i=Array(r);for(t=0;t1?arguments[1]:void 0)}}),a("filterOut")},34286(e,t,n){"use strict";var r=n(82109),i=n(42092).filterReject,a=n(51223);r({target:"Array",proto:!0},{filterReject:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a("filterReject")},77461(e,t,n){"use strict";var r=n(82109),i=n(9671).findLastIndex,a=n(51223);r({target:"Array",proto:!0},{findLastIndex:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a("findLastIndex")},3048(e,t,n){"use strict";var r=n(82109),i=n(9671).findLast,a=n(51223);r({target:"Array",proto:!0},{findLast:function(e){return i(this,e,arguments.length>1?arguments[1]:void 0)}}),a("findLast")},1999(e,t,n){"use strict";var r=n(82109),i=n(61386),a=n(77475),o=n(51223);r({target:"Array",proto:!0},{groupBy:function(e){var t=arguments.length>1?arguments[1]:void 0;return i(this,e,t,a)}}),o("groupBy")},8e4(e,t,n){var r=n(82109),i=n(43157),a=Object.isFrozen,o=function(e,t){if(!a||!i(e)||!a(e))return!1;for(var n,r=0,o=e.length;r1?arguments[1]:void 0,3);return!u(n,function(e,n,i){if(!r(n,e,t))return i()},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},71957(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(49974),c=n(36707),l=n(54647),f=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{filter:function(e){var t=o(this),n=l(t),r=u(e,arguments.length>1?arguments[1]:void 0,3),i=new(c(t,a("Map"))),d=s(i.set);return f(n,function(e,n){r(n,e,t)&&d.call(i,e,n)},{AS_ENTRIES:!0,IS_ITERATOR:!0}),i}})},103(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(49974),s=n(54647),u=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{findKey:function(e){var t=a(this),n=s(t),r=o(e,arguments.length>1?arguments[1]:void 0,3);return u(n,function(e,n,i){if(r(n,e,t))return i(e)},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).result}})},96306(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(49974),s=n(54647),u=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{find:function(e){var t=a(this),n=s(t),r=o(e,arguments.length>1?arguments[1]:void 0,3);return u(n,function(e,n,i){if(r(n,e,t))return i(n)},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).result}})},8582(e,t,n){var r=n(82109),i=n(27296);r({target:"Map",stat:!0},{from:i})},90618(e,t,n){"use strict";var r=n(82109),i=n(20408),a=n(13099);r({target:"Map",stat:!0},{groupBy:function(e,t){var n=new this;a(t);var r=a(n.has),o=a(n.get),s=a(n.set);return i(e,function(e){var i=t(e);r.call(n,i)?o.call(n,i).push(e):s.call(n,i,[e])}),n}})},74592(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(54647),s=n(46465),u=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{includes:function(e){return u(o(a(this)),function(t,n,r){if(s(n,e))return r()},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},88440(e,t,n){"use strict";var r=n(82109),i=n(20408),a=n(13099);r({target:"Map",stat:!0},{keyBy:function(e,t){var n=new this;a(t);var r=a(n.set);return i(e,function(e){r.call(n,t(e),e)}),n}})},58276(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(54647),s=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{keyOf:function(e){return s(o(a(this)),function(t,n,r){if(n===e)return r(t)},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).result}})},35082(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(49974),c=n(36707),l=n(54647),f=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{mapKeys:function(e){var t=o(this),n=l(t),r=u(e,arguments.length>1?arguments[1]:void 0,3),i=new(c(t,a("Map"))),d=s(i.set);return f(n,function(e,n){d.call(i,r(n,e,t),n)},{AS_ENTRIES:!0,IS_ITERATOR:!0}),i}})},12813(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(49974),c=n(36707),l=n(54647),f=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{mapValues:function(e){var t=o(this),n=l(t),r=u(e,arguments.length>1?arguments[1]:void 0,3),i=new(c(t,a("Map"))),d=s(i.set);return f(n,function(e,n){d.call(i,e,r(n,e,t))},{AS_ENTRIES:!0,IS_ITERATOR:!0}),i}})},18222(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(13099),s=n(20408);r({target:"Map",proto:!0,real:!0,forced:i},{merge:function(e){for(var t=a(this),n=o(t.set),r=arguments.length,i=0;i1?arguments[1]:void 0,3);return u(n,function(e,n,i){if(r(n,e,t))return i()},{AS_ENTRIES:!0,IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},74442(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(8154);r({target:"Map",proto:!0,real:!0,forced:i},{updateOrInsert:a})},7512(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(13099);r({target:"Map",proto:!0,real:!0,forced:i},{update:function(e,t){var n=a(this),r=arguments.length;o(t);var i=n.has(e);if(!i&&r<3)throw TypeError("Updating absent value");var s=i?n.get(e):o(r>2?arguments[2]:void 0)(e,n);return n.set(e,t(s,e,n)),n}})},87713(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(8154);r({target:"Map",proto:!0,real:!0,forced:i},{upsert:a})},46603(e,t,n){var r=n(82109),i=Math.min,a=Math.max;r({target:"Math",stat:!0},{clamp:function(e,t,n){return i(n,a(t,e))}})},70100(e,t,n){n(82109)({target:"Math",stat:!0},{DEG_PER_RAD:Math.PI/180})},26429(e,t,n){var r=n(82109),i=180/Math.PI;r({target:"Math",stat:!0},{degrees:function(e){return e*i}})},13187(e,t,n){var r=n(82109),i=n(47103),a=n(26130);r({target:"Math",stat:!0},{fscale:function(e,t,n,r,o){return a(i(e,t,n,r,o))}})},60092(e,t,n){n(82109)({target:"Math",stat:!0},{iaddh:function(e,t,n,r){var i=e>>>0,a=n>>>0;return(t>>>0)+(r>>>0)+((i&a|(i|a)&~(i+a>>>0))>>>31)|0}})},19041(e,t,n){n(82109)({target:"Math",stat:!0},{imulh:function(e,t){var n=65535,r=+e,i=+t,a=r&n,o=i&n,s=r>>16,u=i>>16,c=(s*o>>>0)+(a*o>>>16);return s*u+(c>>16)+((a*u>>>0)+(c&n)>>16)}})},30666(e,t,n){n(82109)({target:"Math",stat:!0},{isubh:function(e,t,n,r){var i=e>>>0,a=n>>>0;return(t>>>0)-(r>>>0)-((~i&a|~(i^a)&i-a>>>0)>>>31)|0}})},51638(e,t,n){n(82109)({target:"Math",stat:!0},{RAD_PER_DEG:180/Math.PI})},62975(e,t,n){var r=n(82109),i=Math.PI/180;r({target:"Math",stat:!0},{radians:function(e){return e*i}})},15728(e,t,n){var r=n(82109),i=n(47103);r({target:"Math",stat:!0},{scale:i})},46056(e,t,n){var r=n(82109),i=n(19670),a=n(77023),o=n(24994),s=n(29909),u="Seeded Random",c=u+" Generator",l=s.set,f=s.getterFor(c),d='Math.seededPRNG() argument should have a "seed" field with a finite value.',h=o(function(e){l(this,{type:c,seed:e%2147483647})},u,function(){var e=f(this);return{value:(1073741823&(e.seed=(1103515245*e.seed+12345)%2147483647))/1073741823,done:!1}});r({target:"Math",stat:!0,forced:!0},{seededPRNG:function(e){var t=i(e).seed;if(!a(t))throw TypeError(d);return new h(t)}})},44299(e,t,n){n(82109)({target:"Math",stat:!0},{signbit:function(e){return(e=+e)==e&&0==e?1/e==-1/0:e<0}})},5162(e,t,n){n(82109)({target:"Math",stat:!0},{umulh:function(e,t){var n=65535,r=+e,i=+t,a=r&n,o=i&n,s=r>>>16,u=i>>>16,c=(s*o>>>0)+(a*o>>>16);return s*u+(c>>>16)+((a*u>>>0)+(c&n)>>>16)}})},50292(e,t,n){"use strict";var r=n(82109),i=n(99958),a=n(83009),o="Invalid number representation",s="Invalid radix",u=/^[\da-z]+$/;r({target:"Number",stat:!0},{fromString:function(e,t){var n,r,c=1;if("string"!=typeof e)throw TypeError(o);if(!e.length||"-"==e.charAt(0)&&(c=-1,!(e=e.slice(1)).length))throw SyntaxError(o);if((n=void 0===t?10:i(t))<2||n>36)throw RangeError(s);if(!u.test(e)||(r=a(e,n)).toString(n)!==e)throw SyntaxError(o);return c*r}})},29427(e,t,n){"use strict";var r=n(82109),i=n(80430);r({target:"Number",stat:!0},{range:function(e,t,n){return new i(e,t,n,"number",0,1)}})},96936(e,t,n){n(46314)},99964(e,t,n){"use strict";var r=n(82109),i=n(60996);r({target:"Object",stat:!0},{iterateEntries:function(e){return new i(e,"entries")}})},75238(e,t,n){"use strict";var r=n(82109),i=n(60996);r({target:"Object",stat:!0},{iterateKeys:function(e){return new i(e,"keys")}})},4987(e,t,n){"use strict";var r=n(82109),i=n(60996);r({target:"Object",stat:!0},{iterateValues:function(e){return new i(e,"values")}})},1025(e,t,n){"use strict";var r=n(82109),i=n(19781),a=n(96340),o=n(13099),s=n(19670),u=n(70111),c=n(25787),l=n(3070).f,f=n(68880),d=n(12248),h=n(18554),p=n(58173),b=n(20408),m=n(842),g=n(5112),v=n(29909),y=g("observable"),w=v.get,_=v.set,E=function(e){var t=e.cleanup;if(t){e.cleanup=void 0;try{t()}catch(n){m(n)}}},S=function(e){return void 0===e.observer},k=function(e){var t=e.facade;if(!i){t.closed=!0;var n=e.subscriptionObserver;n&&(n.closed=!0)}e.observer=void 0},x=function(e,t){var n,r=_(this,{cleanup:void 0,observer:s(e),subscriptionObserver:void 0});i||(this.closed=!1);try{(n=p(e.start))&&n.call(e,this)}catch(a){m(a)}if(!S(r)){var u=r.subscriptionObserver=new T(this);try{var c=t(u),l=c;null!=c&&(r.cleanup="function"==typeof c.unsubscribe?function(){l.unsubscribe()}:o(c))}catch(f){u.error(f);return}S(r)&&E(r)}};x.prototype=d({},{unsubscribe:function(){var e=w(this);S(e)||(k(e),E(e))}}),i&&l(x.prototype,"closed",{configurable:!0,get:function(){return S(w(this))}});var T=function(e){_(this,{subscription:e}),i||(this.closed=!1)};T.prototype=d({},{next:function(e){var t=w(w(this).subscription);if(!S(t)){var n=t.observer;try{var r=p(n.next);r&&r.call(n,e)}catch(i){m(i)}}},error:function(e){var t=w(w(this).subscription);if(!S(t)){var n=t.observer;k(t);try{var r=p(n.error);r?r.call(n,e):m(e)}catch(i){m(i)}E(t)}},complete:function(){var e=w(w(this).subscription);if(!S(e)){var t=e.observer;k(e);try{var n=p(t.complete);n&&n.call(t)}catch(r){m(r)}E(e)}}}),i&&l(T.prototype,"closed",{configurable:!0,get:function(){return S(w(w(this).subscription))}});var M=function(e){c(this,M,"Observable"),_(this,{subscriber:o(e)})};d(M.prototype,{subscribe:function(e){var t=arguments.length;return new x("function"==typeof e?{next:e,error:t>1?arguments[1]:void 0,complete:t>2?arguments[2]:void 0}:u(e)?e:{},w(this).subscriber)}}),d(M,{from:function(e){var t="function"==typeof this?this:M,n=p(s(e)[y]);if(n){var r=s(n.call(e));return r.constructor===t?r:new t(function(e){return r.subscribe(e)})}var i=h(e);return new t(function(e){b(i,function(t,n){if(e.next(t),e.closed)return n()},{IS_ITERATOR:!0,INTERRUPTED:!0}),e.complete()})},of:function(){for(var e="function"==typeof this?this:M,t=arguments.length,n=Array(t),r=0;r1?arguments[1]:void 0,3);return!u(n,function(e,n){if(!r(e,e,t))return n()},{IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},64362(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(49974),c=n(36707),l=n(96767),f=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{filter:function(e){var t=o(this),n=l(t),r=u(e,arguments.length>1?arguments[1]:void 0,3),i=new(c(t,a("Set"))),d=s(i.add);return f(n,function(e){r(e,e,t)&&d.call(i,e)},{IS_ITERATOR:!0}),i}})},15389(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(49974),s=n(96767),u=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{find:function(e){var t=a(this),n=s(t),r=o(e,arguments.length>1?arguments[1]:void 0,3);return u(n,function(e,n){if(r(e,e,t))return n(e)},{IS_ITERATOR:!0,INTERRUPTED:!0}).result}})},46006(e,t,n){var r=n(82109),i=n(27296);r({target:"Set",stat:!0},{from:i})},90401(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(36707),c=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{intersection:function(e){var t=o(this),n=new(u(t,a("Set"))),r=s(t.has),i=s(n.add);return c(e,function(e){r.call(t,e)&&i.call(n,e)}),n}})},45164(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(13099),s=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{isDisjointFrom:function(e){var t=a(this),n=o(t.has);return!s(e,function(e,r){if(!0===n.call(t,e))return r()},{INTERRUPTED:!0}).stopped}})},91238(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(18554),c=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{isSubsetOf:function(e){var t=u(this),n=o(e),r=n.has;return"function"!=typeof r&&(n=new(a("Set"))(e),r=s(n.has)),!c(t,function(e,t){if(!1===r.call(n,e))return t()},{IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},54837(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(13099),s=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{isSupersetOf:function(e){var t=a(this),n=o(t.has);return!s(e,function(e,r){if(!1===n.call(t,e))return r()},{INTERRUPTED:!0}).stopped}})},87485(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(96767),s=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{join:function(e){var t=a(this),n=o(t),r=void 0===e?",":String(e),i=[];return s(n,i.push,{that:i,IS_ITERATOR:!0}),i.join(r)}})},56767(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(49974),c=n(36707),l=n(96767),f=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{map:function(e){var t=o(this),n=l(t),r=u(e,arguments.length>1?arguments[1]:void 0,3),i=new(c(t,a("Set"))),d=s(i.add);return f(n,function(e){d.call(i,r(e,e,t))},{IS_ITERATOR:!0}),i}})},69916(e,t,n){var r=n(82109),i=n(82044);r({target:"Set",stat:!0},{of:i})},76651(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(13099),s=n(96767),u=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{reduce:function(e){var t=a(this),n=s(t),r=arguments.length<2,i=r?void 0:arguments[1];if(o(e),u(n,function(n){r?(r=!1,i=n):i=e(i,n,n,t)},{IS_ITERATOR:!0}),r)throw TypeError("Reduce of empty set with no initial value");return i}})},61437(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(19670),o=n(49974),s=n(96767),u=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{some:function(e){var t=a(this),n=s(t),r=o(e,arguments.length>1?arguments[1]:void 0,3);return u(n,function(e,n){if(r(e,e,t))return n()},{IS_ITERATOR:!0,INTERRUPTED:!0}).stopped}})},35285(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(36707),c=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{symmetricDifference:function(e){var t=o(this),n=new(u(t,a("Set")))(t),r=s(n.delete),i=s(n.add);return c(e,function(e){r.call(n,e)||i.call(n,e)}),n}})},39865(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(35005),o=n(19670),s=n(13099),u=n(36707),c=n(20408);r({target:"Set",proto:!0,real:!0,forced:i},{union:function(e){var t=o(this),n=new(u(t,a("Set")))(t);return c(e,s(n.add),{that:n}),n}})},86035(e,t,n){"use strict";var r=n(82109),i=n(28710).charAt,a=n(47293)(function(){return"𠮷"!=="𠮷".at(0)});r({target:"String",proto:!0,forced:a},{at:function(e){return i(this,e)}})},67501(e,t,n){"use strict";var r=n(82109),i=n(24994),a=n(84488),o=n(41340),s=n(29909),u=n(28710),c=u.codeAt,l=u.charAt,f="String Iterator",d=s.set,h=s.getterFor(f),p=i(function(e){d(this,{type:f,string:e,index:0})},"String",function(){var e,t=h(this),n=t.string,r=t.index;return r>=n.length?{value:void 0,done:!0}:(e=l(n,r),t.index+=e.length,{value:{codePoint:c(e,0),position:r},done:!1})});r({target:"String",proto:!0},{codePoints:function(){return new p(o(a(this)))}})},13728(e,t,n){n(76373)},27207(e,t,n){n(68757)},609(e,t,n){n(97235)("asyncDispose")},21568(e,t,n){n(97235)("dispose")},54534(e,t,n){n(97235)("matcher")},95090(e,t,n){n(97235)("metadata")},48824(e,t,n){n(97235)("observable")},44130(e,t,n){n(97235)("patternMatch")},35954(e,t,n){n(97235)("replaceAll")},38012(e,t,n){n(48675)},26182(e,t,n){"use strict";var r=n(90260),i=n(42092).filterReject,a=n(43074),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("filterOut",function(e){var t=i(o(this),e,arguments.length>1?arguments[1]:void 0);return a(this,t)})},8922(e,t,n){"use strict";var r=n(90260),i=n(42092).filterReject,a=n(43074),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("filterReject",function(e){var t=i(o(this),e,arguments.length>1?arguments[1]:void 0);return a(this,t)})},1118(e,t,n){"use strict";var r=n(90260),i=n(9671).findLastIndex,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("findLastIndex",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},37380(e,t,n){"use strict";var r=n(90260),i=n(9671).findLast,a=r.aTypedArray;(0,r.exportTypedArrayMethod)("findLast",function(e){return i(a(this),e,arguments.length>1?arguments[1]:void 0)})},5835(e,t,n){"use strict";var r=n(90260),i=n(61386),a=n(66304),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("groupBy",function(e){var t=arguments.length>1?arguments[1]:void 0;return i(o(this),e,t,a)})},84444(e,t,n){"use strict";var r=n(90260),i=n(60956),a=n(43074),o=r.aTypedArray;(0,r.exportTypedArrayMethod)("uniqueBy",function(e){return a(this,i.call(o(this),e))})},78206(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(34092);r({target:"WeakMap",proto:!0,real:!0,forced:i},{deleteAll:function(){return a.apply(this,arguments)}})},12714(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(37502);r({target:"WeakMap",proto:!0,real:!0,forced:i},{emplace:a})},76478(e,t,n){var r=n(82109),i=n(27296);r({target:"WeakMap",stat:!0},{from:i})},79715(e,t,n){var r=n(82109),i=n(82044);r({target:"WeakMap",stat:!0},{of:i})},5964(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(8154);r({target:"WeakMap",proto:!0,real:!0,forced:i},{upsert:a})},43561(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(31501);r({target:"WeakSet",proto:!0,real:!0,forced:i},{addAll:function(){return a.apply(this,arguments)}})},32049(e,t,n){"use strict";var r=n(82109),i=n(31913),a=n(34092);r({target:"WeakSet",proto:!0,real:!0,forced:i},{deleteAll:function(){return a.apply(this,arguments)}})},86020(e,t,n){var r=n(82109),i=n(27296);r({target:"WeakSet",stat:!0},{from:i})},56585(e,t,n){var r=n(82109),i=n(82044);r({target:"WeakSet",stat:!0},{of:i})},54747(e,t,n){var r=n(17854),i=n(48324),a=n(18533),o=n(68880);for(var s in i){var u=r[s],c=u&&u.prototype;if(c&&c.forEach!==a)try{o(c,"forEach",a)}catch(l){c.forEach=a}}},33948(e,t,n){var r=n(17854),i=n(48324),a=n(66992),o=n(68880),s=n(5112),u=s("iterator"),c=s("toStringTag"),l=a.values;for(var f in i){var d=r[f],h=d&&d.prototype;if(h){if(h[u]!==l)try{o(h,u,l)}catch(p){h[u]=l}if(h[c]||o(h,c,f),i[f]){for(var b in a)if(h[b]!==a[b])try{o(h,b,a[b])}catch(m){h[b]=a[b]}}}}},84633(e,t,n){var r=n(82109),i=n(17854),a=n(20261);r({global:!0,bind:!0,enumerable:!0,forced:!i.setImmediate||!i.clearImmediate},{setImmediate:a.set,clearImmediate:a.clear})},85844(e,t,n){var r=n(82109),i=n(17854),a=n(95948),o=n(35268),s=i.process;r({global:!0,enumerable:!0,noTargetGet:!0},{queueMicrotask:function(e){var t=o&&s.domain;a(t?t.bind(e):e)}})},32564(e,t,n){var r=n(82109),i=n(17854),a=n(88113),o=[].slice,s=/MSIE .\./.test(a),u=function(e){return function(t,n){var r=arguments.length>2,i=r?o.call(arguments,2):void 0;return e(r?function(){("function"==typeof t?t:Function(t)).apply(this,i)}:t,n)}};r({global:!0,bind:!0,forced:s},{setTimeout:u(i.setTimeout),setInterval:u(i.setInterval)})},41637(e,t,n){"use strict";n(66992);var r=n(82109),i=n(35005),a=n(590),o=n(31320),s=n(12248),u=n(58003),c=n(24994),l=n(29909),f=n(25787),d=n(86656),h=n(49974),p=n(70648),b=n(19670),m=n(70111),g=n(41340),v=n(70030),y=n(79114),w=n(18554),_=n(71246),E=n(5112),S=i("fetch"),k=i("Request"),x=k&&k.prototype,T=i("Headers"),M=E("iterator"),O="URLSearchParams",A=O+"Iterator",L=l.set,C=l.getterFor(O),I=l.getterFor(A),D=/\+/g,N=[,,,,],P=function(e){return N[e-1]||(N[e-1]=RegExp("((?:%[\\da-f]{2}){"+e+"})","gi"))},R=function(e){try{return decodeURIComponent(e)}catch(t){return e}},j=function(e){var t=e.replace(D," "),n=4;try{return decodeURIComponent(t)}catch(r){for(;n;)t=t.replace(P(n--),R);return t}},F=/[!'()~]|%20/g,Y={"!":"%21","'":"%27","(":"%28",")":"%29","~":"%7E","%20":"+"},B=function(e){return Y[e]},U=function(e){return encodeURIComponent(e).replace(F,B)},H=function(e,t){if(t)for(var n,r,i=t.split("&"),a=0;a0?arguments[0]:void 0,l=this,h=[];if(L(l,{type:O,entries:h,updateURL:function(){},updateSearchParams:$}),void 0!==c){if(m(c)){if("function"==typeof(e=_(c)))for(n=(t=w(c,e)).next;!(r=n.call(t)).done;){if((o=(a=(i=w(b(r.value))).next).call(i)).done||(s=a.call(i)).done||!a.call(i).done)throw TypeError("Expected sequence with length 2");h.push({key:g(o.value),value:g(s.value)})}else for(u in c)d(c,u)&&h.push({key:u,value:g(c[u])})}else H(h,"string"==typeof c?"?"===c.charAt(0)?c.slice(1):c:g(c))}},K=W.prototype;if(s(K,{append:function(e,t){z(arguments.length,2);var n=C(this);n.entries.push({key:g(e),value:g(t)}),n.updateURL()},delete:function(e){z(arguments.length,1);for(var t=C(this),n=t.entries,r=g(e),i=0;ie.key){i.splice(t,0,e);break}t===n&&i.push(e)}r.updateURL()},forEach:function(e){for(var t,n=C(this).entries,r=h(e,arguments.length>1?arguments[1]:void 0,3),i=0;i1?V(arguments[1]):{})}}),"function"==typeof k){var q=function(e){return f(this,q,"Request"),new k(e,arguments.length>1?V(arguments[1]):{})};x.constructor=q,q.prototype=x,r({global:!0,forced:!0},{Request:q})}}e.exports={URLSearchParams:W,getState:C}},60285(e,t,n){"use strict";n(78783);var r,i=n(82109),a=n(19781),o=n(590),s=n(17854),u=n(36048),c=n(31320),l=n(25787),f=n(86656),d=n(21574),h=n(48457),p=n(28710).codeAt,b=n(33197),m=n(41340),g=n(58003),v=n(41637),y=n(29909),w=s.URL,_=v.URLSearchParams,E=v.getState,S=y.set,k=y.getterFor("URL"),x=Math.floor,T=Math.pow,M="Invalid authority",O="Invalid scheme",A="Invalid host",L="Invalid port",C=/[A-Za-z]/,I=/[\d+-.A-Za-z]/,D=/\d/,N=/^0x/i,P=/^[0-7]+$/,R=/^\d+$/,j=/^[\dA-Fa-f]+$/,F=/[\0\t\n\r #%/:<>?@[\\\]^|]/,Y=/[\0\t\n\r #/:<>?@[\\\]^|]/,B=/^[\u0000-\u0020]+|[\u0000-\u0020]+$/g,U=/[\t\n\r]/g,H=function(e,t){var n,r,i;if("["==t.charAt(0)){if("]"!=t.charAt(t.length-1)||!(n=z(t.slice(1,-1))))return A;e.host=n}else if(Q(e)){if(t=b(t),F.test(t)||null===(n=$(t)))return A;e.host=n}else{if(Y.test(t))return A;for(i=0,n="",r=h(t);i4)return e;for(r=0,n=[];r1&&"0"==i.charAt(0)&&(a=N.test(i)?16:8,i=i.slice(8==a?1:2)),""===i)o=0;else{if(!(10==a?R:8==a?P:j).test(i))return e;o=parseInt(i,a)}n.push(o)}for(r=0;r=T(256,5-t))return null}else if(o>255)return null;for(r=0,s=n.pop();r6))return;for(r=0;d();){if(i=null,r>0){if("."!=d()||!(r<4))return;f++}if(!D.test(d()))return;for(;D.test(d());){if(a=parseInt(d(),10),null===i)i=a;else{if(0==i)return;i=10*i+a}if(i>255)return;f++}u[c]=256*u[c]+i,(2==++r||4==r)&&c++}if(4!=r)return;break}if(":"==d()){if(f++,!d())return}else if(d())return;u[c++]=t}if(null!==l)for(o=c-l,c=7;0!=c&&o>0;)s=u[c],u[c--]=u[l+o-1],u[l+--o]=s;else if(8!=c)return;return u},G=function(e){for(var t=null,n=1,r=null,i=0,a=0;a<8;a++)0!==e[a]?(i>n&&(t=r,n=i),r=null,i=0):(null===r&&(r=a),++i);return i>n&&(t=r,n=i),t},W=function(e){var t,n,r,i;if("number"==typeof e){for(n=0,t=[];n<4;n++)t.unshift(e%256),e=x(e/256);return t.join(".")}if("object"==typeof e){for(n=0,t="",r=G(e);n<8;n++)(!i||0!==e[n])&&(i&&(i=!1),r===n?(t+=n?":":"::",i=!0):(t+=e[n].toString(16),n<7&&(t+=":")));return"["+t+"]"}return e},K={},V=d({},K,{" ":1,'"':1,"<":1,">":1,"`":1}),q=d({},V,{"#":1,"?":1,"{":1,"}":1}),Z=d({},q,{"/":1,":":1,";":1,"=":1,"@":1,"[":1,"\\":1,"]":1,"^":1,"|":1}),X=function(e,t){var n=p(e,0);return n>32&&n<127&&!f(t,e)?e:encodeURIComponent(e)},J={ftp:21,file:null,http:80,https:443,ws:80,wss:443},Q=function(e){return f(J,e.scheme)},ee=function(e){return""!=e.username||""!=e.password},et=function(e){return!e.host||e.cannotBeABaseURL||"file"==e.scheme},en=function(e,t){var n;return 2==e.length&&C.test(e.charAt(0))&&(":"==(n=e.charAt(1))||!t&&"|"==n)},er=function(e){var t;return e.length>1&&en(e.slice(0,2))&&(2==e.length||"/"===(t=e.charAt(2))||"\\"===t||"?"===t||"#"===t)},ei=function(e){var t=e.path,n=t.length;n&&("file"!=e.scheme||1!=n||!en(t[0],!0))&&t.pop()},ea=function(e){return"."===e||"%2e"===e.toLowerCase()},eo=function(e){return".."===(e=e.toLowerCase())||"%2e."===e||".%2e"===e||"%2e%2e"===e},es={},eu={},ec={},el={},ef={},ed={},eh={},ep={},eb={},em={},eg={},ev={},ey={},ew={},e_={},eE={},eS={},ek={},ex={},eT={},eM={},eO=function(e,t,n,i){var a,o,s,u,c=n||es,l=0,d="",p=!1,b=!1,m=!1;for(n||(e.scheme="",e.username="",e.password="",e.host=null,e.port=null,e.path=[],e.query=null,e.fragment=null,e.cannotBeABaseURL=!1,t=t.replace(B,"")),t=t.replace(U,""),a=h(t);l<=a.length;){switch(o=a[l],c){case es:if(o&&C.test(o))d+=o.toLowerCase(),c=eu;else{if(n)return O;c=ec;continue}break;case eu:if(o&&(I.test(o)||"+"==o||"-"==o||"."==o))d+=o.toLowerCase();else if(":"==o){if(n&&(Q(e)!=f(J,d)||"file"==d&&(ee(e)||null!==e.port)||"file"==e.scheme&&!e.host))return;if(e.scheme=d,n){Q(e)&&J[e.scheme]==e.port&&(e.port=null);return}d="","file"==e.scheme?c=ew:Q(e)&&i&&i.scheme==e.scheme?c=el:Q(e)?c=ep:"/"==a[l+1]?(c=ef,l++):(e.cannotBeABaseURL=!0,e.path.push(""),c=ex)}else{if(n)return O;d="",c=ec,l=0;continue}break;case ec:if(!i||i.cannotBeABaseURL&&"#"!=o)return O;if(i.cannotBeABaseURL&&"#"==o){e.scheme=i.scheme,e.path=i.path.slice(),e.query=i.query,e.fragment="",e.cannotBeABaseURL=!0,c=eM;break}c="file"==i.scheme?ew:ed;continue;case el:if("/"==o&&"/"==a[l+1])c=eb,l++;else{c=ed;continue}break;case ef:if("/"==o){c=em;break}c=ek;continue;case ed:if(e.scheme=i.scheme,o==r)e.username=i.username,e.password=i.password,e.host=i.host,e.port=i.port,e.path=i.path.slice(),e.query=i.query;else if("/"==o||"\\"==o&&Q(e))c=eh;else if("?"==o)e.username=i.username,e.password=i.password,e.host=i.host,e.port=i.port,e.path=i.path.slice(),e.query="",c=eT;else if("#"==o)e.username=i.username,e.password=i.password,e.host=i.host,e.port=i.port,e.path=i.path.slice(),e.query=i.query,e.fragment="",c=eM;else{e.username=i.username,e.password=i.password,e.host=i.host,e.port=i.port,e.path=i.path.slice(),e.path.pop(),c=ek;continue}break;case eh:if(Q(e)&&("/"==o||"\\"==o))c=eb;else if("/"==o)c=em;else{e.username=i.username,e.password=i.password,e.host=i.host,e.port=i.port,c=ek;continue}break;case ep:if(c=eb,"/"!=o||"/"!=d.charAt(l+1))continue;l++;break;case eb:if("/"!=o&&"\\"!=o){c=em;continue}break;case em:if("@"==o){p&&(d="%40"+d),p=!0,s=h(d);for(var g=0;g65535)return L;e.port=Q(e)&&w===J[e.scheme]?null:w,d=""}if(n)return;c=eS;continue}break;case ew:if(e.scheme="file","/"==o||"\\"==o)c=e_;else if(i&&"file"==i.scheme){if(o==r)e.host=i.host,e.path=i.path.slice(),e.query=i.query;else if("?"==o)e.host=i.host,e.path=i.path.slice(),e.query="",c=eT;else if("#"==o)e.host=i.host,e.path=i.path.slice(),e.query=i.query,e.fragment="",c=eM;else{er(a.slice(l).join(""))||(e.host=i.host,e.path=i.path.slice(),ei(e)),c=ek;continue}}else{c=ek;continue}break;case e_:if("/"==o||"\\"==o){c=eE;break}i&&"file"==i.scheme&&!er(a.slice(l).join(""))&&(en(i.path[0],!0)?e.path.push(i.path[0]):e.host=i.host),c=ek;continue;case eE:if(o==r||"/"==o||"\\"==o||"?"==o||"#"==o){if(!n&&en(d))c=ek;else if(""==d){if(e.host="",n)return;c=eS}else{if(u=H(e,d))return u;if("localhost"==e.host&&(e.host=""),n)return;d="",c=eS}continue}d+=o;break;case eS:if(Q(e)){if(c=ek,"/"!=o&&"\\"!=o)continue}else if(n||"?"!=o){if(n||"#"!=o){if(o!=r&&(c=ek,"/"!=o))continue}else e.fragment="",c=eM}else e.query="",c=eT;break;case ek:if(o==r||"/"==o||"\\"==o&&Q(e)||!n&&("?"==o||"#"==o)){if(eo(d)?(ei(e),"/"==o||"\\"==o&&Q(e)||e.path.push("")):ea(d)?"/"==o||"\\"==o&&Q(e)||e.path.push(""):("file"==e.scheme&&!e.path.length&&en(d)&&(e.host&&(e.host=""),d=d.charAt(0)+":"),e.path.push(d)),d="","file"==e.scheme&&(o==r||"?"==o||"#"==o))for(;e.path.length>1&&""===e.path[0];)e.path.shift();"?"==o?(e.query="",c=eT):"#"==o&&(e.fragment="",c=eM)}else d+=X(o,q);break;case ex:"?"==o?(e.query="",c=eT):"#"==o?(e.fragment="",c=eM):o!=r&&(e.path[0]+=X(o,K));break;case eT:n||"#"!=o?o!=r&&("'"==o&&Q(e)?e.query+="%27":"#"==o?e.query+="%23":e.query+=X(o,K)):(e.fragment="",c=eM);break;case eM:o!=r&&(e.fragment+=X(o,V))}l++}},eA=function(e){var t,n,r=l(this,eA,"URL"),i=arguments.length>1?arguments[1]:void 0,o=m(e),s=S(r,{type:"URL"});if(void 0!==i){if(i instanceof eA)t=k(i);else if(n=eO(t={},m(i)))throw TypeError(n)}if(n=eO(s,o,null,t))throw TypeError(n);var u=s.searchParams=new _,c=E(u);c.updateSearchParams(s.query),c.updateURL=function(){s.query=String(u)||null},a||(r.href=eC.call(r),r.origin=eI.call(r),r.protocol=eD.call(r),r.username=eN.call(r),r.password=eP.call(r),r.host=eR.call(r),r.hostname=ej.call(r),r.port=eF.call(r),r.pathname=eY.call(r),r.search=eB.call(r),r.searchParams=eU.call(r),r.hash=eH.call(r))},eL=eA.prototype,eC=function(){var e=k(this),t=e.scheme,n=e.username,r=e.password,i=e.host,a=e.port,o=e.path,s=e.query,u=e.fragment,c=t+":";return null!==i?(c+="//",ee(e)&&(c+=n+(r?":"+r:"")+"@"),c+=W(i),null!==a&&(c+=":"+a)):"file"==t&&(c+="//"),c+=e.cannotBeABaseURL?o[0]:o.length?"/"+o.join("/"):"",null!==s&&(c+="?"+s),null!==u&&(c+="#"+u),c},eI=function(){var e=k(this),t=e.scheme,n=e.port;if("blob"==t)try{return new eA(t.path[0]).origin}catch(r){return"null"}return"file"!=t&&Q(e)?t+"://"+W(e.host)+(null!==n?":"+n:""):"null"},eD=function(){return k(this).scheme+":"},eN=function(){return k(this).username},eP=function(){return k(this).password},eR=function(){var e=k(this),t=e.host,n=e.port;return null===t?"":null===n?W(t):W(t)+":"+n},ej=function(){var e=k(this).host;return null===e?"":W(e)},eF=function(){var e=k(this).port;return null===e?"":String(e)},eY=function(){var e=k(this),t=e.path;return e.cannotBeABaseURL?t[0]:t.length?"/"+t.join("/"):""},eB=function(){var e=k(this).query;return e?"?"+e:""},eU=function(){return k(this).searchParams},eH=function(){var e=k(this).fragment;return e?"#"+e:""},e$=function(e,t){return{get:e,set:t,configurable:!0,enumerable:!0}};if(a&&u(eL,{href:e$(eC,function(e){var t=k(this),n=m(e),r=eO(t,n);if(r)throw TypeError(r);E(t.searchParams).updateSearchParams(t.query)}),origin:e$(eI),protocol:e$(eD,function(e){var t=k(this);eO(t,m(e)+":",es)}),username:e$(eN,function(e){var t=k(this),n=h(m(e));if(!et(t)){t.username="";for(var r=0;rc});var r={value:function(){}};function i(){for(var e,t=0,n=arguments.length,r={};t=0&&(n=e.slice(r+1),e=e.slice(0,r)),e&&!t.hasOwnProperty(e))throw Error("unknown type: "+e);return{type:e,name:n}})}function s(e,t){for(var n,r=0,i=e.length;r0)for(var n,r,i=Array(n),a=0;am,dragDisable:()=>u.Z,dragEnable:()=>u.D});var r=n(92626),i=n(25109),a=n(43095),o=n(94017),s=n(24793),u=n(44266),c=n(34299);function l(e){return function(){return e}}function f(e,t,n,r,i,a,o,s,u,c){this.target=e,this.type=t,this.subject=n,this.identifier=r,this.active=i,this.x=a,this.y=o,this.dx=s,this.dy=u,this._=c}function d(){return!i.B.ctrlKey&&!i.B.button}function h(){return this.parentNode}function p(e){return null==e?{x:i.B.x,y:i.B.y}:e}function b(){return navigator.maxTouchPoints||"ontouchstart"in this}function m(){var e,t,n,m,g=d,v=h,y=p,w=b,_={},E=(0,r.Z)("start","drag","end"),S=0,k=0;function x(e){e.on("mousedown.drag",T).filter(w).on("touchstart.drag",A).on("touchmove.drag",L).on("touchend.drag touchcancel.drag",C).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function T(){if(!m&&g.apply(this,arguments)){var r=I("mouse",v.apply(this,arguments),a.Z,this,arguments);r&&((0,o.Z)(i.B.view).on("mousemove.drag",M,!0).on("mouseup.drag",O,!0),(0,u.Z)(i.B.view),(0,c.r)(),n=!1,e=i.B.clientX,t=i.B.clientY,r("start"))}}function M(){if((0,c.Z)(),!n){var r=i.B.clientX-e,a=i.B.clientY-t;n=r*r+a*a>k}_.mouse("drag")}function O(){(0,o.Z)(i.B.view).on("mousemove.drag mouseup.drag",null),(0,u.D)(i.B.view,n),(0,c.Z)(),_.mouse("end")}function A(){if(g.apply(this,arguments)){var e,t,n=i.B.changedTouches,r=v.apply(this,arguments),a=n.length;for(e=0;eo,Z:()=>a});var r=n(94017),i=n(34299);function a(e){var t=e.document.documentElement,n=(0,r.Z)(e).on("dragstart.drag",i.Z,!0);"onselectstart"in t?n.on("selectstart.drag",i.Z,!0):(t.__noselect=t.style.MozUserSelect,t.style.MozUserSelect="none")}function o(e,t){var n=e.document.documentElement,a=(0,r.Z)(e).on("dragstart.drag",null);t&&(a.on("click.drag",i.Z,!0),setTimeout(function(){a.on("click.drag",null)},0)),"onselectstart"in n?a.on("selectstart.drag",null):(n.style.MozUserSelect=n.__noselect,delete n.__noselect)}},34299(e,t,n){"use strict";n.d(t,{Z:()=>a,r:()=>i});var r=n(25109);function i(){r.B.stopImmediatePropagation()}function a(){r.B.preventDefault(),r.B.stopImmediatePropagation()}},9893(e,t,n){"use strict";function r(e,t){var n;function r(){var r,i,a=n.length,o=0,s=0;for(r=0;r=(a=(b+g)/2))?b=a:g=a,(l=n>=(o=(m+v)/2))?m=o:v=o,i=h,!(h=h[f=l<<1|c]))return i[f]=p,e;if(s=+e._x.call(null,h.data),u=+e._y.call(null,h.data),t===s&&n===u)return p.next=h,i?i[f]=p:e._root=p,e;do i=i?i[f]=[,,,,]:e._root=[,,,,],(c=t>=(a=(b+g)/2))?b=a:g=a,(l=n>=(o=(m+v)/2))?m=o:v=o;while((f=l<<1|c)==(d=(u>=o)<<1|s>=a))return i[d]=h,i[f]=p,e}function u(e){var t,n,r,i,a=e.length,o=Array(a),u=Array(a),c=1/0,l=1/0,f=-1/0,d=-1/0;for(n=0;nf&&(f=r),id&&(d=i));if(c>f||l>d)return this;for(this.cover(c,l).cover(f,d),n=0;ne||e>=i||r>t||t>=a;)switch(s=(th)&&!((a=u.y0)>p)&&!((o=u.x1)=v)<<1|e>=g)&&(u=b[b.length-1],b[b.length-1]=b[b.length-1-c],b[b.length-1-c]=u)}else{var y=e-+this._x.call(null,m.data),w=t-+this._y.call(null,m.data),_=y*y+w*w;if(_=(s=(p+m)/2))?p=s:m=s,(l=o>=(u=(b+g)/2))?b=u:g=u,t=h,!(h=h[f=l<<1|c]))return this;if(!h.length)break;(t[f+1&3]||t[f+2&3]||t[f+3&3])&&(n=t,d=f)}for(;h.data!==e;)if(r=h,!(h=h.next))return this;return((i=h.next)&&delete h.next,r)?(i?r.next=i:delete r.next,this):t?(i?t[f]=i:delete t[f],(h=t[0]||t[1]||t[2]||t[3])&&h===(t[3]||t[2]||t[1]||t[0])&&!h.length&&(n?n[d]=h:this._root=h),this):(this._root=i,this)}function b(e){for(var t=0,n=e.length;tr,forceCollide:()=>L,forceLink:()=>B,forceManyBody:()=>V,forceRadial:()=>q,forceSimulation:()=>K,forceX:()=>Z,forceY:()=>X});var M=k.prototype=x.prototype;function O(e){return e.x+e.vx}function A(e){return e.y+e.vy}function L(e){var t,n,r=1,o=1;function s(){for(var e,i,s,c,l,f,d,h=t.length,p=0;ps.index){var b=c-u.x-u.vx,m=l-u.y-u.vy,g=b*b+m*m;gc+p||il+p||oe.r&&(e.r=e[t].r)}function c(){if(t){var r,i,a=t.length;for(r=0,n=Array(a);r1?(null==n?s.remove(e):s.set(e,h(n)),t):s.get(e)},find:function(t,n,r){var i,a,o,s,u,c=0,l=e.length;for(null==r?r=1/0:r*=r,c=0;c1?(c.on(e,n),t):c.on(e)}}}function V(){var e,t,n,r,o=i(-30),s=1,u=1/0,c=.81;function l(r){var i,a=e.length,o=k(e,$,z).visitAfter(d);for(n=r,i=0;i=u)){(e.data!==t||e.next)&&(0===f&&(p+=(f=a())*f),0===d&&(p+=(d=a())*d),ps});var r=n(73888),i=n(31986);function a(e){return function(){var t=this.ownerDocument,n=this.namespaceURI;return n===i.P&&t.documentElement.namespaceURI===i.P?t.createElement(e):t.createElementNS(n,e)}}function o(e){return function(){return this.ownerDocument.createElementNS(e.space,e.local)}}function s(e){var t=(0,r.Z)(e);return(t.local?o:a)(t)}},58556(e,t,n){"use strict";n.r(t),n.d(t,{clientPoint:()=>h.Z,create:()=>a,creator:()=>r.Z,customEvent:()=>S._H,event:()=>S.B,local:()=>s,matcher:()=>c.Z,mouse:()=>l.Z,namespace:()=>f.Z,namespaces:()=>d.Z,select:()=>i.Z,selectAll:()=>b,selection:()=>p.ZP,selector:()=>m.Z,selectorAll:()=>g.Z,style:()=>v.S,touch:()=>y.Z,touches:()=>_,window:()=>E.Z});var r=n(789),i=n(94017);function a(e){return(0,i.Z)((0,r.Z)(e).call(document.documentElement))}var o=0;function s(){return new u}function u(){this._="@"+(++o).toString(36)}u.prototype=s.prototype={constructor:u,get:function(e){for(var t=this._;!(t in e);)if(!(e=e.parentNode))return;return e[t]},set:function(e,t){return e[this._]=t},remove:function(e){return this._ in e&&delete e[this._]},toString:function(){return this._}};var c=n(3083),l=n(43095),f=n(73888),d=n(31986),h=n(42115),p=n(23817);function b(e){return"string"==typeof e?new p.Y1([document.querySelectorAll(e)],[document.documentElement]):new p.Y1([null==e?[]:e],p.Jz)}var m=n(82634),g=n(3545),v=n(49986),y=n(24793),w=n(45553);function _(e,t){null==t&&(t=(0,w.Z)().touches);for(var n=0,r=t?t.length:0,i=Array(r);nr})},43095(e,t,n){"use strict";n.d(t,{Z:()=>a});var r=n(45553),i=n(42115);function a(e){var t=(0,r.Z)();return t.changedTouches&&(t=t.changedTouches[0]),(0,i.Z)(e,t)}},73888(e,t,n){"use strict";n.d(t,{Z:()=>i});var r=n(31986);function i(e){var t=e+="",n=t.indexOf(":");return n>=0&&"xmlns"!==(t=e.slice(0,n))&&(e=e.slice(n+1)),r.Z.hasOwnProperty(t)?{space:r.Z[t],local:e}:e}},31986(e,t,n){"use strict";n.d(t,{P:()=>r,Z:()=>i});var r="http://www.w3.org/1999/xhtml";let i={svg:"http://www.w3.org/2000/svg",xhtml:r,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"}},42115(e,t,n){"use strict";function r(e,t){var n=e.ownerSVGElement||e;if(n.createSVGPoint){var r=n.createSVGPoint();return r.x=t.clientX,r.y=t.clientY,[(r=r.matrixTransform(e.getScreenCTM().inverse())).x,r.y]}var i=e.getBoundingClientRect();return[t.clientX-i.left-e.clientLeft,t.clientY-i.top-e.clientTop]}n.d(t,{Z:()=>r})},94017(e,t,n){"use strict";n.d(t,{Z:()=>i});var r=n(23817);function i(e){return"string"==typeof e?new r.Y1([[document.querySelector(e)]],[document.documentElement]):new r.Y1([[e]],r.Jz)}},23817(e,t,n){"use strict";n.d(t,{Y1:()=>eT,ZP:()=>eO,Jz:()=>ex});var r=n(82634);function i(e){"function"!=typeof e&&(e=(0,r.Z)(e));for(var t=this._groups,n=t.length,i=Array(n),a=0;a=k&&(k=S+1);!(E=y[k])&&++k=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this}function _(e){function t(t,n){return t&&n?e(t.__data__,n.__data__):!t-!n}e||(e=E);for(var n=this._groups,r=n.length,i=Array(r),a=0;at?1:e>=t?0:NaN}function S(){var e=arguments[0];return arguments[0]=this,e.apply(null,arguments),this}function k(){var e=Array(this.size()),t=-1;return this.each(function(){e[++t]=this}),e}function x(){for(var e=this._groups,t=0,n=e.length;t1?this.each((null==t?F:"function"==typeof t?B:Y)(e,t)):this.node()[e]}function H(e){return e.trim().split(/^|\s+/)}function $(e){return e.classList||new z(e)}function z(e){this._node=e,this._names=H(e.getAttribute("class")||"")}function G(e,t){for(var n=$(e),r=-1,i=t.length;++rthis._names.indexOf(e)&&(this._names.push(e),this._node.setAttribute("class",this._names.join(" ")))},remove:function(e){var t=this._names.indexOf(e);t>=0&&(this._names.splice(t,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(e){return this._names.indexOf(e)>=0}};var ec=n(789);function el(e){var t="function"==typeof e?e:(0,ec.Z)(e);return this.select(function(){return this.appendChild(t.apply(this,arguments))})}function ef(){return null}function ed(e,t){var n="function"==typeof e?e:(0,ec.Z)(e),i=null==t?ef:"function"==typeof t?t:(0,r.Z)(t);return this.select(function(){return this.insertBefore(n.apply(this,arguments),i.apply(this,arguments)||null)})}function eh(){var e=this.parentNode;e&&e.removeChild(this)}function ep(){return this.each(eh)}function eb(){var e=this.cloneNode(!1),t=this.parentNode;return t?t.insertBefore(e,this.nextSibling):e}function em(){var e=this.cloneNode(!0),t=this.parentNode;return t?t.insertBefore(e,this.nextSibling):e}function eg(e){return this.select(e?em:eb)}function ev(e){return arguments.length?this.property("__data__",e):this.node().__data__}var ey=n(25109),ew=n(85021);function e_(e,t,n){var r=(0,ew.Z)(e),i=r.CustomEvent;"function"==typeof i?i=new i(t,n):(i=r.document.createEvent("Event"),n?(i.initEvent(t,n.bubbles,n.cancelable),i.detail=n.detail):i.initEvent(t,!1,!1)),e.dispatchEvent(i)}function eE(e,t){return function(){return e_(this,e,t)}}function eS(e,t){return function(){return e_(this,e,t.apply(this,arguments))}}function ek(e,t){return this.each(("function"==typeof t?eS:eE)(e,t))}var ex=[null];function eT(e,t){this._groups=e,this._parents=t}function eM(){return new eT([[document.documentElement]],ex)}eT.prototype=eM.prototype={constructor:eT,select:i,selectAll:o,filter:u,data:m,enter:l,exit:g,join:v,merge:y,order:w,sort:_,call:S,nodes:k,node:x,size:T,empty:M,each:O,attr:R,style:j.Z,property:U,classed:Z,text:ee,html:ei,raise:eo,lower:eu,append:el,insert:ed,remove:ep,clone:eg,datum:ev,on:ey.ZP,dispatch:ek};let eO=eM},25109(e,t,n){"use strict";n.d(t,{B:()=>i,ZP:()=>l,_H:()=>f});var r={},i=null;function a(e,t,n){return e=o(e,t,n),function(t){var n=t.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||e.call(this,t)}}function o(e,t,n){return function(r){var a=i;i=r;try{e.call(this,this.__data__,t,n)}finally{i=a}}}function s(e){return e.trim().split(/^|\s+/).map(function(e){var t="",n=e.indexOf(".");return n>=0&&(t=e.slice(n+1),e=e.slice(0,n)),{type:e,name:t}})}function u(e){return function(){var t=this.__on;if(t){for(var n,r=0,i=-1,a=t.length;ru,Z:()=>s});var r=n(85021);function i(e){return function(){this.style.removeProperty(e)}}function a(e,t,n){return function(){this.style.setProperty(e,t,n)}}function o(e,t,n){return function(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(e):this.style.setProperty(e,r,n)}}function s(e,t,n){return arguments.length>1?this.each((null==t?i:"function"==typeof t?o:a)(e,t,null==n?"":n)):u(this.node(),e)}function u(e,t){return e.style.getPropertyValue(t)||(0,r.Z)(e).getComputedStyle(e,null).getPropertyValue(t)}},82634(e,t,n){"use strict";function r(){}function i(e){return null==e?r:function(){return this.querySelector(e)}}n.d(t,{Z:()=>i})},3545(e,t,n){"use strict";function r(){return[]}function i(e){return null==e?r:function(){return this.querySelectorAll(e)}}n.d(t,{Z:()=>i})},45553(e,t,n){"use strict";n.d(t,{Z:()=>i});var r=n(25109);function i(){for(var e,t=r.B;e=t.sourceEvent;)t=e;return t}},24793(e,t,n){"use strict";n.d(t,{Z:()=>a});var r=n(45553),i=n(42115);function a(e,t,n){arguments.length<3&&(n=t,t=(0,r.Z)().changedTouches);for(var a,o=0,s=t?t.length:0;or})},71098(e,t,n){"use strict";n.r(t),n.d(t,{arc:()=>C,area:()=>j,areaRadial:()=>W,curveBasis:()=>eM,curveBasisClosed:()=>eA,curveBasisOpen:()=>eC,curveBundle:()=>eD,curveCardinal:()=>eR,curveCardinalClosed:()=>eF,curveCardinalOpen:()=>eB,curveCatmullRom:()=>e$,curveCatmullRomClosed:()=>eG,curveCatmullRomOpen:()=>eK,curveLinear:()=>D,curveLinearClosed:()=>eq,curveMonotoneX:()=>e3,curveMonotoneY:()=>e4,curveNatural:()=>e9,curveStep:()=>e7,curveStepAfter:()=>tt,curveStepBefore:()=>te,line:()=>R,lineRadial:()=>G,linkHorizontal:()=>et,linkRadial:()=>er,linkVertical:()=>en,pie:()=>B,pointRadial:()=>K,radialArea:()=>W,radialLine:()=>G,stack:()=>ta,stackOffsetDiverging:()=>ts,stackOffsetExpand:()=>to,stackOffsetNone:()=>tn,stackOffsetSilhouette:()=>tu,stackOffsetWiggle:()=>tc,stackOrderAppearance:()=>tl,stackOrderAscending:()=>td,stackOrderDescending:()=>tp,stackOrderInsideOut:()=>tb,stackOrderNone:()=>tr,stackOrderReverse:()=>tm,symbol:()=>eS,symbolCircle:()=>ei,symbolCross:()=>ea,symbolDiamond:()=>eu,symbolSquare:()=>ep,symbolStar:()=>eh,symbolTriangle:()=>em,symbolWye:()=>e_,symbols:()=>eE});var r=Math.PI,i=2*r,a=1e-6,o=i-a;function s(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function u(){return new s}s.prototype=u.prototype={constructor:s,moveTo:function(e,t){this._+="M"+(this._x0=this._x1=+e)+","+(this._y0=this._y1=+t)},closePath:function(){null!==this._x1&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(e,t){this._+="L"+(this._x1=+e)+","+(this._y1=+t)},quadraticCurveTo:function(e,t,n,r){this._+="Q"+ +e+","+ +t+","+(this._x1=+n)+","+(this._y1=+r)},bezierCurveTo:function(e,t,n,r,i,a){this._+="C"+ +e+","+ +t+","+ +n+","+ +r+","+(this._x1=+i)+","+(this._y1=+a)},arcTo:function(e,t,n,i,o){e=+e,t=+t,n=+n,i=+i,o=+o;var s=this._x1,u=this._y1,c=n-e,l=i-t,f=s-e,d=u-t,h=f*f+d*d;if(o<0)throw Error("negative radius: "+o);if(null===this._x1)this._+="M"+(this._x1=e)+","+(this._y1=t);else if(h>a){if(Math.abs(d*c-l*f)>a&&o){var p=n-s,b=i-u,m=c*c+l*l,g=Math.sqrt(m),v=Math.sqrt(h),y=o*Math.tan((r-Math.acos((m+h-(p*p+b*b))/(2*g*v)))/2),w=y/v,_=y/g;Math.abs(w-1)>a&&(this._+="L"+(e+w*f)+","+(t+w*d)),this._+="A"+o+","+o+",0,0,"+ +(d*p>f*b)+","+(this._x1=e+_*c)+","+(this._y1=t+_*l)}else this._+="L"+(this._x1=e)+","+(this._y1=t)}},arc:function(e,t,n,s,u,c){e=+e,t=+t,n=+n,c=!!c;var l=n*Math.cos(s),f=n*Math.sin(s),d=e+l,h=t+f,p=1^c,b=c?s-u:u-s;if(n<0)throw Error("negative radius: "+n);null===this._x1?this._+="M"+d+","+h:(Math.abs(this._x1-d)>a||Math.abs(this._y1-h)>a)&&(this._+="L"+d+","+h),n&&(b<0&&(b=b%i+i),b>o?this._+="A"+n+","+n+",0,1,"+p+","+(e-l)+","+(t-f)+"A"+n+","+n+",0,1,"+p+","+(this._x1=d)+","+(this._y1=h):b>a&&(this._+="A"+n+","+n+",0,"+ +(b>=r)+","+p+","+(this._x1=e+n*Math.cos(u))+","+(this._y1=t+n*Math.sin(u))))},rect:function(e,t,n,r){this._+="M"+(this._x0=this._x1=+e)+","+(this._y0=this._y1=+t)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};let c=u;function l(e){return function(){return e}}var f=Math.abs,d=Math.atan2,h=Math.cos,p=Math.max,b=Math.min,m=Math.sin,g=Math.sqrt,v=1e-12,y=Math.PI,w=y/2,_=2*y;function E(e){return e>1?0:e<-1?y:Math.acos(e)}function S(e){return e>=1?w:e<=-1?-w:Math.asin(e)}function k(e){return e.innerRadius}function x(e){return e.outerRadius}function T(e){return e.startAngle}function M(e){return e.endAngle}function O(e){return e&&e.padAngle}function A(e,t,n,r,i,a,o,s){var u=n-e,c=r-t,l=o-i,f=s-a,d=f*u-l*c;if(!(d*dI*I+D*D&&(T=O,M=A),{cx:T,cy:M,x01:-l,y01:-f,x11:T*(i/S-1),y11:M*(i/S-1)}}function C(){var e=k,t=x,n=l(0),r=null,i=T,a=M,o=O,s=null;function u(){var u,l,p=+e.apply(this,arguments),k=+t.apply(this,arguments),x=i.apply(this,arguments)-w,T=a.apply(this,arguments)-w,M=f(T-x),O=T>x;if(s||(s=u=c()),kv){if(M>_-v)s.moveTo(k*h(x),k*m(x)),s.arc(0,0,k,x,T,!O),p>v&&(s.moveTo(p*h(T),p*m(T)),s.arc(0,0,p,T,x,O));else{var C,I,D=x,N=T,P=x,R=T,j=M,F=M,Y=o.apply(this,arguments)/2,B=Y>v&&(r?+r.apply(this,arguments):g(p*p+k*k)),U=b(f(k-p)/2,+n.apply(this,arguments)),H=U,$=U;if(B>v){var z=S(B/p*m(Y)),G=S(B/k*m(Y));(j-=2*z)>v?(z*=O?1:-1,P+=z,R-=z):(j=0,P=R=(x+T)/2),(F-=2*G)>v?(G*=O?1:-1,D+=G,N-=G):(F=0,D=N=(x+T)/2)}var W=k*h(D),K=k*m(D),V=p*h(R),q=p*m(R);if(U>v){var Z,X=k*h(N),J=k*m(N),Q=p*h(P),ee=p*m(P);if(Mv?$>v?(C=L(Q,ee,W,K,k,$,O),I=L(X,J,V,q,k,$,O),s.moveTo(C.cx+C.x01,C.cy+C.y01),$v&&j>v?H>v?(C=L(V,q,X,J,p,-H,O),I=L(W,K,Q,ee,p,-H,O),s.lineTo(C.cx+C.x01,C.cy+C.y01),H=f;--d)s.point(g[d],v[d]);s.lineEnd(),s.areaEnd()}}m&&(g[l]=+e(h,l,u),v[l]=+n(h,l,u),s.point(t?+t(h,l,u):g[l],r?+r(h,l,u):v[l]))}if(p)return s=null,p+""||null}function f(){return R().defined(i).curve(o).context(a)}return u.x=function(n){return arguments.length?(e="function"==typeof n?n:l(+n),t=null,u):e},u.x0=function(t){return arguments.length?(e="function"==typeof t?t:l(+t),u):e},u.x1=function(e){return arguments.length?(t=null==e?null:"function"==typeof e?e:l(+e),u):t},u.y=function(e){return arguments.length?(n="function"==typeof e?e:l(+e),r=null,u):n},u.y0=function(e){return arguments.length?(n="function"==typeof e?e:l(+e),u):n},u.y1=function(e){return arguments.length?(r=null==e?null:"function"==typeof e?e:l(+e),u):r},u.lineX0=u.lineY0=function(){return f().x(e).y(n)},u.lineY1=function(){return f().x(e).y(r)},u.lineX1=function(){return f().x(t).y(n)},u.defined=function(e){return arguments.length?(i="function"==typeof e?e:l(!!e),u):i},u.curve=function(e){return arguments.length?(o=e,null!=a&&(s=o(a)),u):o},u.context=function(e){return arguments.length?(null==e?a=s=null:s=o(a=e),u):a},u}function F(e,t){return te?1:t>=e?0:NaN}function Y(e){return e}function B(){var e=Y,t=F,n=null,r=l(0),i=l(_),a=l(0);function o(o){var s,u,c,l,f,d=o.length,h=0,p=Array(d),b=Array(d),m=+r.apply(this,arguments),g=Math.min(_,Math.max(-_,i.apply(this,arguments)-m)),v=Math.min(Math.abs(g)/d,a.apply(this,arguments)),y=v*(g<0?-1:1);for(s=0;s0&&(h+=f);for(null!=t?p.sort(function(e,n){return t(b[e],b[n])}):null!=n&&p.sort(function(e,t){return n(o[e],o[t])}),s=0,c=h?(g-d*y)/h:0;s0?f*c:0)+y,b[u]={data:o[u],index:s,value:f,startAngle:m,endAngle:l,padAngle:v};return b}return o.value=function(t){return arguments.length?(e="function"==typeof t?t:l(+t),o):e},o.sortValues=function(e){return arguments.length?(t=e,n=null,o):t},o.sort=function(e){return arguments.length?(n=e,t=null,o):n},o.startAngle=function(e){return arguments.length?(r="function"==typeof e?e:l(+e),o):r},o.endAngle=function(e){return arguments.length?(i="function"==typeof e?e:l(+e),o):i},o.padAngle=function(e){return arguments.length?(a="function"==typeof e?e:l(+e),o):a},o}I.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:this._context.lineTo(e,t)}}};var U=$(D);function H(e){this._curve=e}function $(e){function t(t){return new H(e(t))}return t._curve=e,t}function z(e){var t=e.curve;return e.angle=e.x,delete e.x,e.radius=e.y,delete e.y,e.curve=function(e){return arguments.length?t($(e)):t()._curve},e}function G(){return z(R().curve(U))}function W(){var e=j().curve(U),t=e.curve,n=e.lineX0,r=e.lineX1,i=e.lineY0,a=e.lineY1;return e.angle=e.x,delete e.x,e.startAngle=e.x0,delete e.x0,e.endAngle=e.x1,delete e.x1,e.radius=e.y,delete e.y,e.innerRadius=e.y0,delete e.y0,e.outerRadius=e.y1,delete e.y1,e.lineStartAngle=function(){return z(n())},delete e.lineX0,e.lineEndAngle=function(){return z(r())},delete e.lineX1,e.lineInnerRadius=function(){return z(i())},delete e.lineY0,e.lineOuterRadius=function(){return z(a())},delete e.lineY1,e.curve=function(e){return arguments.length?t($(e)):t()._curve},e}function K(e,t){return[(t=+t)*Math.cos(e-=Math.PI/2),t*Math.sin(e)]}H.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(e,t){this._curve.point(t*Math.sin(e),-(t*Math.cos(e)))}};var V=Array.prototype.slice;function q(e){return e.source}function Z(e){return e.target}function X(e){var t=q,n=Z,r=N,i=P,a=null;function o(){var o,s=V.call(arguments),u=t.apply(this,s),l=n.apply(this,s);if(a||(a=o=c()),e(a,+r.apply(this,(s[0]=u,s)),+i.apply(this,s),+r.apply(this,(s[0]=l,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(e){return arguments.length?(t=e,o):t},o.target=function(e){return arguments.length?(n=e,o):n},o.x=function(e){return arguments.length?(r="function"==typeof e?e:l(+e),o):r},o.y=function(e){return arguments.length?(i="function"==typeof e?e:l(+e),o):i},o.context=function(e){return arguments.length?(a=null==e?null:e,o):a},o}function J(e,t,n,r,i){e.moveTo(t,n),e.bezierCurveTo(t=(t+r)/2,n,t,i,r,i)}function Q(e,t,n,r,i){e.moveTo(t,n),e.bezierCurveTo(t,n=(n+i)/2,r,n,r,i)}function ee(e,t,n,r,i){var a=K(t,n),o=K(t,n=(n+i)/2),s=K(r,n),u=K(r,i);e.moveTo(a[0],a[1]),e.bezierCurveTo(o[0],o[1],s[0],s[1],u[0],u[1])}function et(){return X(J)}function en(){return X(Q)}function er(){var e=X(ee);return e.angle=e.x,delete e.x,e.radius=e.y,delete e.y,e}let ei={draw:function(e,t){var n=Math.sqrt(t/y);e.moveTo(n,0),e.arc(0,0,n,0,_)}},ea={draw:function(e,t){var n=Math.sqrt(t/5)/2;e.moveTo(-3*n,-n),e.lineTo(-n,-n),e.lineTo(-n,-3*n),e.lineTo(n,-3*n),e.lineTo(n,-n),e.lineTo(3*n,-n),e.lineTo(3*n,n),e.lineTo(n,n),e.lineTo(n,3*n),e.lineTo(-n,3*n),e.lineTo(-n,n),e.lineTo(-3*n,n),e.closePath()}};var eo=Math.sqrt(1/3),es=2*eo;let eu={draw:function(e,t){var n=Math.sqrt(t/es),r=n*eo;e.moveTo(0,-n),e.lineTo(r,0),e.lineTo(0,n),e.lineTo(-r,0),e.closePath()}};var ec=.8908130915292852,el=Math.sin(y/10)/Math.sin(7*y/10),ef=Math.sin(_/10)*el,ed=-Math.cos(_/10)*el;let eh={draw:function(e,t){var n=Math.sqrt(t*ec),r=ef*n,i=ed*n;e.moveTo(0,-n),e.lineTo(r,i);for(var a=1;a<5;++a){var o=_*a/5,s=Math.cos(o),u=Math.sin(o);e.lineTo(u*n,-s*n),e.lineTo(s*r-u*i,u*r+s*i)}e.closePath()}},ep={draw:function(e,t){var n=Math.sqrt(t),r=-n/2;e.rect(r,r,n,n)}};var eb=Math.sqrt(3);let em={draw:function(e,t){var n=-Math.sqrt(t/(3*eb));e.moveTo(0,2*n),e.lineTo(-eb*n,-n),e.lineTo(eb*n,-n),e.closePath()}};var eg=-.5,ev=Math.sqrt(3)/2,ey=1/Math.sqrt(12),ew=(ey/2+1)*3;let e_={draw:function(e,t){var n=Math.sqrt(t/ew),r=n/2,i=n*ey,a=r,o=n*ey+n,s=-a,u=o;e.moveTo(r,i),e.lineTo(a,o),e.lineTo(s,u),e.lineTo(eg*r-ev*i,ev*r+eg*i),e.lineTo(eg*a-ev*o,ev*a+eg*o),e.lineTo(eg*s-ev*u,ev*s+eg*u),e.lineTo(eg*r+ev*i,eg*i-ev*r),e.lineTo(eg*a+ev*o,eg*o-ev*a),e.lineTo(eg*s+ev*u,eg*u-ev*s),e.closePath()}};var eE=[ei,ea,eu,ep,eh,em,e_];function eS(){var e=l(ei),t=l(64),n=null;function r(){var r;if(n||(n=r=c()),e.apply(this,arguments).draw(n,+t.apply(this,arguments)),r)return n=null,r+""||null}return r.type=function(t){return arguments.length?(e="function"==typeof t?t:l(t),r):e},r.size=function(e){return arguments.length?(t="function"==typeof e?e:l(+e),r):t},r.context=function(e){return arguments.length?(n=null==e?null:e,r):n},r}function ek(){}function ex(e,t,n){e._context.bezierCurveTo((2*e._x0+e._x1)/3,(2*e._y0+e._y1)/3,(e._x0+2*e._x1)/3,(e._y0+2*e._y1)/3,(e._x0+4*e._x1+t)/6,(e._y0+4*e._y1+n)/6)}function eT(e){this._context=e}function eM(e){return new eT(e)}function eO(e){this._context=e}function eA(e){return new eO(e)}function eL(e){this._context=e}function eC(e){return new eL(e)}function eI(e,t){this._basis=new eT(e),this._beta=t}eT.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:ex(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:ex(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},eO.prototype={areaStart:ek,areaEnd:ek,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x2=e,this._y2=t;break;case 1:this._point=2,this._x3=e,this._y3=t;break;case 2:this._point=3,this._x4=e,this._y4=t,this._context.moveTo((this._x0+4*this._x1+e)/6,(this._y0+4*this._y1+t)/6);break;default:ex(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},eL.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+e)/6,r=(this._y0+4*this._y1+t)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:ex(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},eI.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var e=this._x,t=this._y,n=e.length-1;if(n>0)for(var r,i=e[0],a=t[0],o=e[n]-i,s=t[n]-a,u=-1;++u<=n;)r=u/n,this._basis.point(this._beta*e[u]+(1-this._beta)*(i+r*o),this._beta*t[u]+(1-this._beta)*(a+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(e,t){this._x.push(+e),this._y.push(+t)}};let eD=function e(t){function n(e){return 1===t?new eT(e):new eI(e,t)}return n.beta=function(t){return e(+t)},n}(.85);function eN(e,t,n){e._context.bezierCurveTo(e._x1+e._k*(e._x2-e._x0),e._y1+e._k*(e._y2-e._y0),e._x2+e._k*(e._x1-t),e._y2+e._k*(e._y1-n),e._x2,e._y2)}function eP(e,t){this._context=e,this._k=(1-t)/6}eP.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:eN(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2,this._x1=e,this._y1=t;break;case 2:this._point=3;default:eN(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let eR=function e(t){function n(e){return new eP(e,0)}return n.tension=function(t){return e(+t)},n}(0);function ej(e,t){this._context=e,this._k=(1-t)/6}ej.prototype={areaStart:ek,areaEnd:ek,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:eN(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let eF=function e(t){function n(e){return new ej(e,0)}return n.tension=function(t){return e(+t)},n}(0);function eY(e,t){this._context=e,this._k=(1-t)/6}eY.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:eN(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let eB=function e(t){function n(e){return new eY(e,0)}return n.tension=function(t){return e(+t)},n}(0);function eU(e,t,n){var r=e._x1,i=e._y1,a=e._x2,o=e._y2;if(e._l01_a>v){var s=2*e._l01_2a+3*e._l01_a*e._l12_a+e._l12_2a,u=3*e._l01_a*(e._l01_a+e._l12_a);r=(r*s-e._x0*e._l12_2a+e._x2*e._l01_2a)/u,i=(i*s-e._y0*e._l12_2a+e._y2*e._l01_2a)/u}if(e._l23_a>v){var c=2*e._l23_2a+3*e._l23_a*e._l12_a+e._l12_2a,l=3*e._l23_a*(e._l23_a+e._l12_a);a=(a*c+e._x1*e._l23_2a-t*e._l12_2a)/l,o=(o*c+e._y1*e._l23_2a-n*e._l12_2a)/l}e._context.bezierCurveTo(r,i,a,o,e._x2,e._y2)}function eH(e,t){this._context=e,this._alpha=t}eH.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3;default:eU(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let e$=function e(t){function n(e){return t?new eH(e,t):new eP(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function ez(e,t){this._context=e,this._alpha=t}ez.prototype={areaStart:ek,areaEnd:ek,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:eU(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let eG=function e(t){function n(e){return t?new ez(e,t):new ej(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function eW(e,t){this._context=e,this._alpha=t}eW.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:eU(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};let eK=function e(t){function n(e){return t?new eW(e,t):new eY(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function eV(e){this._context=e}function eq(e){return new eV(e)}function eZ(e){return e<0?-1:1}function eX(e,t,n){var r=e._x1-e._x0,i=t-e._x1,a=(e._y1-e._y0)/(r||i<0&&-0),o=(n-e._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(eZ(a)+eZ(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function eJ(e,t){var n=e._x1-e._x0;return n?(3*(e._y1-e._y0)/n-t)/2:t}function eQ(e,t,n){var r=e._x0,i=e._y0,a=e._x1,o=e._y1,s=(a-r)/3;e._context.bezierCurveTo(r+s,i+s*t,a-s,o-s*n,a,o)}function e1(e){this._context=e}function e0(e){this._context=new e2(e)}function e2(e){this._context=e}function e3(e){return new e1(e)}function e4(e){return new e0(e)}function e5(e){this._context=e}function e6(e){var t,n,r=e.length-1,i=Array(r),a=Array(r),o=Array(r);for(i[0]=0,a[0]=2,o[0]=e[0]+2*e[1],t=1;t=0;--t)i[t]=(o[t]-i[t+1])/a[t];for(t=0,a[r-1]=(e[r]+i[r-1])/2;t1)for(var n,r,i,a=1,o=e[t[0]],s=o.length;a=0;)n[t]=t;return n}function ti(e,t){return e[t]}function ta(){var e=l([]),t=tr,n=tn,r=ti;function i(i){var a,o,s=e.apply(this,arguments),u=i.length,c=s.length,l=Array(c);for(a=0;a0){for(var n,r,i,a=0,o=e[0].length;a0)for(var n,r,i,a,o,s,u=0,c=e[t[0]].length;u0?(r[0]=a,r[1]=a+=i):i<0?(r[1]=o,r[0]=o+=i):(r[0]=0,r[1]=i)}function tu(e,t){if((n=e.length)>0){for(var n,r=0,i=e[t[0]],a=i.length;r0&&(r=(n=e[t[0]]).length)>0){for(var n,r,i,a=0,o=1;oa&&(a=t,r=n);return r}function td(e){var t=e.map(th);return tr(e).sort(function(e,n){return t[e]-t[n]})}function th(e){for(var t,n=0,r=-1,i=e.length;++r=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,t),this._context.lineTo(e,t);else{var n=this._x*(1-this._t)+e*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,t)}}this._x=e,this._y=t}}},35374(e,t,n){"use strict";n.d(t,{B7:()=>m,HT:()=>g,zO:()=>p});var r,i,a=0,o=0,s=0,u=1e3,c=0,l=0,f=0,d="object"==typeof performance&&performance.now?performance:Date,h="object"==typeof window&&window.requestAnimationFrame?window.requestAnimationFrame.bind(window):function(e){setTimeout(e,17)};function p(){return l||(h(b),l=d.now()+f)}function b(){l=0}function m(){this._call=this._time=this._next=null}function g(e,t,n){var r=new m;return r.restart(e,t,n),r}function v(){p(),++a;for(var e,t=r;t;)(e=l-t._time)>=0&&t._call.call(null,e),t=t._next;--a}function y(){l=(c=d.now())+f,a=o=0;try{v()}finally{a=0,_(),l=0}}function w(){var e=d.now(),t=e-c;t>u&&(f-=t,c=e)}function _(){for(var e,t,n=r,a=1/0;n;)n._call?(a>n._time&&(a=n._time),e=n,n=n._next):(t=n._next,n._next=null,n=e?e._next=t:r=t);i=e,E(a)}function E(e){if(!a){var t;o&&(o=clearTimeout(o)),e-l>24?(e<1/0&&(o=setTimeout(y,e-d.now()-f)),s&&(s=clearInterval(s))):(s||(c=d.now(),s=setInterval(w,u)),a=1,h(y))}}m.prototype=g.prototype={constructor:m,restart:function(e,t,n){if("function"!=typeof e)throw TypeError("callback is not a function");n=(null==n?p():+n)+(null==t?0:+t),this._next||i===this||(i?i._next=this:r=this,i=this),this._call=e,this._time=n,E()},stop:function(){this._call&&(this._call=null,this._time=1/0,E())}}},76626(e,t,n){"use strict";n.r(t),n.d(t,{zoom:()=>t5,zoomIdentity:()=>tq,zoomTransform:()=>tZ});var r,i,a,o,s=n(92626),u=n(44266),c=Math.SQRT2,l=2,f=4,d=1e-12;function h(e){return((e=Math.exp(e))+1/e)/2}function p(e){return((e=Math.exp(e))-1/e)/2}function b(e){return((e=Math.exp(2*e))-1)/(e+1)}function m(e,t){var n,r,i=e[0],a=e[1],o=e[2],s=t[0],u=t[1],m=t[2],g=s-i,v=u-a,y=g*g+v*v;if(yT)throw Error("too late; already scheduled");return n}function P(e,t){var n=R(e,t);if(n.state>A)throw Error("too late; already running");return n}function R(e,t){var n=e.__transition;if(!n||!(n=n[t]))throw Error("transition not found");return n}function j(e,t,n){var r,i=e.__transition;function a(e){n.state=M,n.timer.restart(o,n.delay,n.time),n.delay<=e&&o(e-n.delay)}function o(a){var c,l,f,d;if(n.state!==M)return u();for(c in i)if((d=i[c]).name===n.name){if(d.state===A)return S(o);d.state===L?(d.state=I,d.timer.stop(),d.on.call("interrupt",e,e.__data__,d.index,d.group),delete i[c]):+cO&&n.state180?t+=360:t-e>180&&(e+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:B(e,t)})):t&&n.push(i(n)+"rotate("+t+r)}function s(e,t,n,a){e!==t?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:B(e,t)}):t&&n.push(i(n)+"skewX("+t+r)}function u(e,t,n,r,a,o){if(e!==n||t!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:B(e,n)},{i:s-2,x:B(t,r)})}else(1!==n||1!==r)&&a.push(i(a)+"scale("+n+","+r+")")}return function(t,n){var r=[],i=[];return t=e(t),n=e(n),a(t.translateX,t.translateY,n.translateX,n.translateY,r,i),o(t.rotate,n.rotate,r,i),s(t.skewX,n.skewX,r,i),u(t.scaleX,t.scaleY,n.scaleX,n.scaleY,r,i),t=n=null,function(e){for(var t,n=-1,a=i.length;++n>8&15|t>>4&240,t>>4&15|240&t,(15&t)<<4|15&t,1):8===n?e_(t>>24&255,t>>16&255,t>>8&255,(255&t)/255):4===n?e_(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|240&t,((15&t)<<4|15&t)/255):null):(t=ec.exec(e))?new ek(t[1],t[2],t[3],1):(t=el.exec(e))?new ek(255*t[1]/100,255*t[2]/100,255*t[3]/100,1):(t=ef.exec(e))?e_(t[1],t[2],t[3],t[4]):(t=ed.exec(e))?e_(255*t[1]/100,255*t[2]/100,255*t[3]/100,t[4]):(t=eh.exec(e))?eO(t[1],t[2]/100,t[3]/100,1):(t=ep.exec(e))?eO(t[1],t[2]/100,t[3]/100,t[4]):eb.hasOwnProperty(e)?ew(eb[e]):"transparent"===e?new ek(NaN,NaN,NaN,0):null}function ew(e){return new ek(e>>16&255,e>>8&255,255&e,1)}function e_(e,t,n,r){return r<=0&&(e=t=n=NaN),new ek(e,t,n,r)}function eE(e){return(e instanceof en||(e=ey(e)),e)?(e=e.rgb(),new ek(e.r,e.g,e.b,e.opacity)):new ek}function eS(e,t,n,r){return 1===arguments.length?eE(e):new ek(e,t,n,null==r?1:r)}function ek(e,t,n,r){this.r=+e,this.g=+t,this.b=+n,this.opacity=+r}function ex(){return"#"+eM(this.r)+eM(this.g)+eM(this.b)}function eT(){var e=this.opacity;return(1===(e=isNaN(e)?1:Math.max(0,Math.min(1,e)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===e?")":", "+e+")")}function eM(e){return((e=Math.max(0,Math.min(255,Math.round(e)||0)))<16?"0":"")+e.toString(16)}function eO(e,t,n,r){return r<=0?e=t=n=NaN:n<=0||n>=1?e=t=NaN:t<=0&&(e=NaN),new eC(e,t,n,r)}function eA(e){if(e instanceof eC)return new eC(e.h,e.s,e.l,e.opacity);if(e instanceof en||(e=ey(e)),!e)return new eC;if(e instanceof eC)return e;var t=(e=e.rgb()).r/255,n=e.g/255,r=e.b/255,i=Math.min(t,n,r),a=Math.max(t,n,r),o=NaN,s=a-i,u=(a+i)/2;return s?(o=t===a?(n-r)/s+(n0&&u<1?0:o,new eC(o,s,u,e.opacity)}function eL(e,t,n,r){return 1===arguments.length?eA(e):new eC(e,t,n,null==r?1:r)}function eC(e,t,n,r){this.h=+e,this.s=+t,this.l=+n,this.opacity=+r}function eI(e,t,n){return(e<60?t+(n-t)*e/60:e<180?n:e<240?t+(n-t)*(240-e)/60:t)*255}function eD(e,t,n,r,i){var a=e*e,o=a*e;return((1-3*e+3*a-o)*t+(4-6*a+3*o)*n+(1+3*e+3*a-3*o)*r+o*i)/6}function eN(e){var t=e.length-1;return function(n){var r=n<=0?n=0:n>=1?(n=1,t-1):Math.floor(n*t),i=e[r],a=e[r+1],o=r>0?e[r-1]:2*i-a,s=r=240?e-240:e+120,i,r),eI(e,i,r),eI(e<120?e+240:e-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var e=this.opacity;return(1===(e=isNaN(e)?1:Math.max(0,Math.min(1,e)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===e?")":", "+e+")")}}));let eU=function e(t){var n=eY(1);function r(e,t){var r=n((e=eS(e)).r,(t=eS(t)).r),i=n(e.g,t.g),a=n(e.b,t.b),o=eB(e.opacity,t.opacity);return function(t){return e.r=r(t),e.g=i(t),e.b=a(t),e.opacity=o(t),e+""}}return r.gamma=e,r}(1);function eH(e){return function(t){var n,r,i=t.length,a=Array(i),o=Array(i),s=Array(i);for(n=0;na&&(i=t.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,u.push({i:o,x:B(n,r)})),a=ez.lastIndex;return a=0&&(e=e.slice(0,t)),!e||"start"===e})}function tc(e,t,n){var r,i,a=tu(t)?N:P;return function(){var o=a(this,e),s=o.on;s!==r&&(i=(r=s).copy()).on(t,n),o.on=i}}function tl(e,t){var n=this._id;return arguments.length<2?R(this.node(),n).on.on(e):this.each(tc(n,e,t))}function tf(e){return function(){var t=this.parentNode;for(var n in this.__transition)if(+n!==e)return;t&&t.removeChild(this)}}function td(){return this.on("end.remove",tf(this._id))}var th=n(82634);function tp(e){var t=this._name,n=this._id;"function"!=typeof e&&(e=(0,th.Z)(e));for(var r=this._groups,i=r.length,a=Array(i),o=0;or?(r+i)/2:Math.min(0,r)||Math.max(0,i),o>a?(a+o)/2:Math.min(0,a)||Math.max(0,o))}function t5(){var e,t,n=tQ,r=t1,i=t4,a=t2,o=t3,c=[0,1/0],l=[[-1/0,-1/0],[1/0,1/0]],f=250,d=m,h=(0,s.Z)("start","zoom","end"),p=500,b=150,_=0;function E(e){e.property("__zoom",t0).on("wheel.zoom",A).on("mousedown.zoom",L).on("dblclick.zoom",C).filter(o).on("touchstart.zoom",I).on("touchmove.zoom",D).on("touchend.zoom touchcancel.zoom",N).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function S(e,t){return(t=Math.max(c[0],Math.min(c[1],t)))===e.k?e:new tV(t,e.x,e.y)}function k(e,t,n){var r=t[0]-n[0]*e.k,i=t[1]-n[1]*e.k;return r===e.x&&i===e.y?e:new tV(e.k,r,i)}function x(e){return[(+e[0][0]+ +e[1][0])/2,(+e[0][1]+ +e[1][1])/2]}function T(e,t,n){e.on("start.zoom",function(){M(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){M(this,arguments).end()}).tween("zoom",function(){var e=this,i=arguments,a=M(e,i),o=r.apply(e,i),s=null==n?x(o):"function"==typeof n?n.apply(e,i):n,u=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),c=e.__zoom,l="function"==typeof t?t.apply(e,i):t,f=d(c.invert(s).concat(u/c.k),l.invert(s).concat(u/l.k));return function(e){if(1===e)e=l;else{var t=f(e),n=u/t[2];e=new tV(n,s[0]-t[0]*n,s[1]-t[1]*n)}a.zoom(null,e)}})}function M(e,t,n){return!n&&e.__zooming||new O(e,t)}function O(e,t){this.that=e,this.args=t,this.active=0,this.extent=r.apply(e,t),this.taps=0}function A(){if(n.apply(this,arguments)){var e=M(this,arguments),t=this.__zoom,r=Math.max(c[0],Math.min(c[1],t.k*Math.pow(2,a.apply(this,arguments)))),o=(0,v.Z)(this);if(e.wheel)(e.mouse[0][0]!==o[0]||e.mouse[0][1]!==o[1])&&(e.mouse[1]=t.invert(e.mouse[0]=o)),clearTimeout(e.wheel);else{if(t.k===r)return;e.mouse=[o,t.invert(o)],F(this),e.start()}tJ(),e.wheel=setTimeout(s,b),e.zoom("mouse",i(k(S(t,r),e.mouse[0],e.mouse[1]),e.extent,l))}function s(){e.wheel=null,e.end()}}function L(){if(!t&&n.apply(this,arguments)){var e=M(this,arguments,!0),r=(0,y.Z)(g.B.view).on("mousemove.zoom",c,!0).on("mouseup.zoom",f,!0),a=(0,v.Z)(this),o=g.B.clientX,s=g.B.clientY;(0,u.Z)(g.B.view),tX(),e.mouse=[a,this.__zoom.invert(a)],F(this),e.start()}function c(){if(tJ(),!e.moved){var t=g.B.clientX-o,n=g.B.clientY-s;e.moved=t*t+n*n>_}e.zoom("mouse",i(k(e.that.__zoom,e.mouse[0]=(0,v.Z)(e.that),e.mouse[1]),e.extent,l))}function f(){r.on("mousemove.zoom mouseup.zoom",null),(0,u.D)(g.B.view,e.moved),tJ(),e.end()}}function C(){if(n.apply(this,arguments)){var e=this.__zoom,t=(0,v.Z)(this),a=e.invert(t),o=e.k*(g.B.shiftKey?.5:2),s=i(k(S(e,o),t,a),r.apply(this,arguments),l);tJ(),f>0?(0,y.Z)(this).transition().duration(f).call(T,s,t):(0,y.Z)(this).call(E.transform,s)}}function I(){if(n.apply(this,arguments)){var t,r,i,a,o=g.B.touches,s=o.length,u=M(this,arguments,g.B.changedTouches.length===s);for(tX(),r=0;r=0?i=setTimeout(r,t-c):(i=null,n||(u=e.apply(o,a),o=a=null))}null==t&&(t=100);var i,a,o,s,u,c=function(){o=this,a=arguments,s=Date.now();var c=n&&!i;return i||(i=setTimeout(r,t)),c&&(u=e.apply(o,a),o=a=null),u};return c.clear=function(){i&&(clearTimeout(i),i=null)},c.flush=function(){i&&(u=e.apply(o,a),o=a=null,clearTimeout(i),i=null)},c}t.debounce=t,e.exports=t},94863:function(e){var t,n;t=this,n=function(){"use strict";var e=function(e){return t(e)&&!n(e)};function t(e){return!!e&&"object"==typeof e}function n(e){var t=Object.prototype.toString.call(e);return"[object RegExp]"===t||"[object Date]"===t||i(e)}var r="function"==typeof Symbol&&Symbol.for?Symbol.for("react.element"):60103;function i(e){return e.$$typeof===r}function a(e){return Array.isArray(e)?[]:{}}function o(e,t){return!1!==t.clone&&t.isMergeableObject(e)?d(a(e),e,t):e}function s(e,t,n){return e.concat(t).map(function(e){return o(e,n)})}function u(e,t){if(!t.customMerge)return d;var n=t.customMerge(e);return"function"==typeof n?n:d}function c(e){return Object.getOwnPropertySymbols?Object.getOwnPropertySymbols(e).filter(function(t){return e.propertyIsEnumerable(t)}):[]}function l(e){return Object.keys(e).concat(c(e))}function f(e,t,n){var r={};return n.isMergeableObject(e)&&l(e).forEach(function(t){r[t]=o(e[t],n)}),l(t).forEach(function(i){n.isMergeableObject(t[i])&&e[i]?r[i]=u(i,n)(e[i],t[i],n):r[i]=o(t[i],n)}),r}function d(t,n,r){(r=r||{}).arrayMerge=r.arrayMerge||s,r.isMergeableObject=r.isMergeableObject||e;var i=Array.isArray(n);return i!==Array.isArray(t)?o(n,r):i?r.arrayMerge(t,n,r):f(t,n,r)}return d.all=function(e,t){if(!Array.isArray(e))throw Error("first argument should be an array");return e.reduce(function(e,n){return d(e,n,t)},{})},d},e.exports=n()},7624(e,t){"use strict";function n(e){return e===e.window?e:9===e.nodeType&&(e.defaultView||e.parentWindow)}t.__esModule=!0,t.default=n,e.exports=t.default},87797(e,t,n){"use strict";var r=n(95318);t.__esModule=!0,t.default=s;var i=r(n(53497)),a=/^(top|right|bottom|left)$/,o=/^([+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|))(?!px)[a-z%]+$/i;function s(e){if(!e)throw TypeError("No Element passed to `getComputedStyle()`");var t=e.ownerDocument;return"defaultView"in t?t.defaultView.opener?e.ownerDocument.defaultView.getComputedStyle(e,null):window.getComputedStyle(e,null):{getPropertyValue:function(t){var n=e.style;"float"==(t=(0,i.default)(t))&&(t="styleFloat");var r=e.currentStyle[t]||null;if(null==r&&n&&n[t]&&(r=n[t]),o.test(r)&&!a.test(t)){var s=n.left,u=e.runtimeStyle,c=u&&u.left;c&&(u.left=e.currentStyle.left),n.left="fontSize"===t?"1em":r,r=n.pixelLeft+"px",n.left=s,c&&(u.left=c)}return r}}}e.exports=t.default},10162(e,t,n){"use strict";var r=n(95318);t.__esModule=!0,t.default=l;var i=r(n(53497)),a=r(n(24403)),o=r(n(87797)),s=r(n(91760)),u=n(20702),c=r(n(43293));function l(e,t,n){var r="",l="",f=t;if("string"==typeof t){if(void 0===n)return e.style[(0,i.default)(t)]||(0,o.default)(e).getPropertyValue((0,a.default)(t));(f={})[t]=n}Object.keys(f).forEach(function(t){var n=f[t];n||0===n?(0,c.default)(t)?l+=t+"("+n+") ":r+=(0,a.default)(t)+": "+n+";":(0,s.default)(e,(0,a.default)(t))}),l&&(r+=u.transform+": "+l+";"),e.style.cssText+=";"+r}e.exports=t.default},91760(e,t){"use strict";function n(e,t){return"removeProperty"in e.style?e.style.removeProperty(t):e.style.removeAttribute(t)}t.__esModule=!0,t.default=n,e.exports=t.default},43293(e,t){"use strict";t.__esModule=!0,t.default=r;var n=/^((translate|rotate|scale)(X|Y|Z|3d)?|matrix(3d)?|perspective|skew(X|Y)?)$/i;function r(e){return!!(e&&n.test(e))}e.exports=t.default},20702(e,t,n){"use strict";var r,i,a,o,s,u,c,l,f,d,h,p=n(95318);t.__esModule=!0,t.default=t.animationEnd=t.animationDelay=t.animationTiming=t.animationDuration=t.animationName=t.transitionEnd=t.transitionDuration=t.transitionDelay=t.transitionTiming=t.transitionProperty=t.transform=void 0;var b=p(n(50139)),m="transform";if(t.transform=m,t.animationEnd=a,t.transitionEnd=i,t.transitionDelay=c,t.transitionTiming=u,t.transitionDuration=s,t.transitionProperty=o,t.animationDelay=h,t.animationTiming=d,t.animationDuration=f,t.animationName=l,b.default){var g=y();r=g.prefix,t.transitionEnd=i=g.transitionEnd,t.animationEnd=a=g.animationEnd,t.transform=m=r+"-"+m,t.transitionProperty=o=r+"-transition-property",t.transitionDuration=s=r+"-transition-duration",t.transitionDelay=c=r+"-transition-delay",t.transitionTiming=u=r+"-transition-timing-function",t.animationName=l=r+"-animation-name",t.animationDuration=f=r+"-animation-duration",t.animationTiming=d=r+"-animation-delay",t.animationDelay=h=r+"-animation-timing-function"}var v={transform:m,end:i,property:o,timing:u,delay:c,duration:s};function y(){for(var e,t,n=document.createElement("div").style,r={O:function(e){return"o"+e.toLowerCase()},Moz:function(e){return e.toLowerCase()},Webkit:function(e){return"webkit"+e},ms:function(e){return"MS"+e}},i=Object.keys(r),a="",o=0;o0&&void 0!==arguments[0]?arguments[0]:{},r=n.defaultLayoutOptions,a=void 0===r?{}:r,s=n.algorithms,u=void 0===s?["layered","stress","mrtree","radial","force","disco","sporeOverlap","sporeCompaction","rectpacking"]:s,c=n.workerFactory,l=n.workerUrl;if(i(this,e),this.defaultLayoutOptions=a,this.initialized=!1,void 0===l&&void 0===c)throw Error("Cannot construct an ELK without both 'workerUrl' and 'workerFactory'.");var f=c;void 0!==l&&void 0===c&&(f=function(e){return new Worker(e)});var d=f(l);if("function"!=typeof d.postMessage)throw TypeError("Created worker does not provide the required 'postMessage' function.");this.worker=new o(d),this.worker.postMessage({cmd:"register",algorithms:u}).then(function(e){return t.initialized=!0}).catch(console.err)}return r(e,[{key:"layout",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.layoutOptions,r=void 0===n?this.defaultLayoutOptions:n,i=t.logging,a=void 0!==i&&i,o=t.measureExecutionTime,s=void 0!==o&&o;return e?this.worker.postMessage({cmd:"layout",graph:e,layoutOptions:r,options:{logging:a,measureExecutionTime:s}}):Promise.reject(Error("Missing mandatory parameter 'graph'."))}},{key:"knownLayoutAlgorithms",value:function(){return this.worker.postMessage({cmd:"algorithms"})}},{key:"knownLayoutOptions",value:function(){return this.worker.postMessage({cmd:"options"})}},{key:"knownLayoutCategories",value:function(){return this.worker.postMessage({cmd:"categories"})}},{key:"terminateWorker",value:function(){this.worker.terminate()}}]),e}();n.default=a;var o=function(){function e(t){var n=this;if(i(this,e),void 0===t)throw Error("Missing mandatory parameter 'worker'.");this.resolvers={},this.worker=t,this.worker.onmessage=function(e){setTimeout(function(){n.receive(n,e)},0)}}return r(e,[{key:"postMessage",value:function(e){var t=this.id||0;this.id=t+1,e.id=t;var n=this;return new Promise(function(r,i){n.resolvers[t]=function(e,t){e?(n.convertGwtStyleError(e),i(e)):r(t)},n.worker.postMessage(e)})}},{key:"receive",value:function(e,t){var n=t.data,r=e.resolvers[n.id];r&&(delete e.resolvers[n.id],n.error?r(n.error):r(null,n.data))}},{key:"terminate",value:function(){this.worker.terminate&&this.worker.terminate()}},{key:"convertGwtStyleError",value:function(e){if(e){var t=e.__java$exception;t&&(t.cause&&t.cause.backingJsObject&&(e.cause=t.cause.backingJsObject,this.convertGwtStyleError(e.cause)),delete e.__java$exception)}}}]),e}()},{}],2:[function(e,t,n){"use strict";var r=e("./elk-api.js").default;Object.defineProperty(t.exports,"__esModule",{value:!0}),t.exports=r,r.default=r},{"./elk-api.js":1}]},{},[2])(2)},e.exports=t()},55273(e,t,n){"use strict";function r(){}function i(){}function a(){}function o(){}function s(){}function u(){}function c(){}function l(){}function f(){}function d(){}function h(){}function p(){}function b(){}function m(){}function g(){}function v(){}function y(){}function w(){}function _(){}function E(){}function S(){}function k(){}function x(){}function T(){}function M(){}function O(){}function A(){}function L(){}function C(){}function I(){}function D(){}function N(){}function P(){}function R(){}function j(){}function F(){}function Y(){}function B(){}function U(){}function H(){}function $(){}function z(){}function G(){}function W(){}function K(){}function V(){}function q(){}function Z(){}function X(){}function J(){}function Q(){}function ee(){}function et(){}function en(){}function er(){}function ei(){}function ea(){}function eo(){}function es(){}function eu(){}function ec(){}function el(){}function ef(){}function ed(){}function eh(){}function ep(){}function eb(){}function em(){}function eg(){}function ev(){}function ey(){}function ew(){}function e_(){}function eE(){}function eS(){}function ek(){}function ex(){}function eT(){}function eM(){}function eO(){}function eA(){}function eL(){}function eC(){}function eI(){}function eD(){}function eN(){}function eP(){}function eR(){}function ej(){}function eF(){}function eY(){}function eB(){}function eU(){}function eH(){}function e$(){}function ez(){}function eG(){}function eW(){}function eK(){}function eV(){}function eq(){}function eZ(){}function eX(){}function eJ(){}function eQ(){}function e1(){}function e0(){}function e2(){}function e3(){}function e4(){}function e5(){}function e6(){}function e9(){}function e8(){}function e7(){}function te(){}function tt(){}function tn(){}function tr(){}function ti(){}function ta(){}function to(){}function ts(){}function tu(){}function tc(){}function tl(){}function tf(){}function td(){}function th(){}function tp(){}function tb(){}function tm(){}function tg(){}function tv(){}function ty(){}function tw(){}function t_(){}function tE(){}function tS(){}function tk(){}function tx(){}function tT(){}function tM(){}function tO(){}function tA(){}function tL(){}function tC(){}function tI(){}function tD(){}function tN(){}function tP(){}function tR(){}function tj(){}function tF(){}function tY(){}function tB(){}function tU(){}function tH(){}function t$(){}function tz(){}function tG(){}function tW(){}function tK(){}function tV(){}function tq(){}function tZ(){}function tX(){}function tJ(){}function tQ(){}function t1(){}function t0(){}function t2(){}function t3(){}function t4(){}function t5(){}function t6(){}function t9(){}function t8(){}function t7(){}function ne(){}function nt(){}function nn(){}function nr(){}function ni(){}function na(){}function no(){}function ns(){}function nu(){}function nc(){}function nl(){}function nf(){}function nd(){}function nh(){}function np(){}function nb(){}function nm(){}function ng(){}function nv(){}function ny(){}function nw(){}function n_(){}function nE(){}function nS(){}function nk(){}function nx(){}function nT(){}function nM(){}function nO(){}function nA(){}function nL(){}function nC(){}function nI(){}function nD(){}function nN(){}function nP(){}function nR(){}function nj(){}function nF(){}function nY(){}function nB(){}function nU(){}function nH(){}function n$(){}function nz(){}function nG(){}function nW(){}function nK(){}function nV(){}function nq(){}function nZ(){}function nX(){}function nJ(){}function nQ(){}function n1(){}function n0(){}function n2(){}function n3(){}function n4(){}function n5(){}function n6(){}function n9(){}function n8(){}function n7(){}function re(){}function rt(){}function rn(){}function rr(){}function ri(){}function ra(){}function ro(){}function rs(){}function ru(){}function rc(){}function rl(){}function rf(){}function rd(){}function rh(){}function rp(){}function rb(){}function rm(){}function rg(){}function rv(){}function ry(){}function rw(){}function r_(){}function rE(){}function rS(){}function rk(){}function rx(){}function rT(){}function rM(){}function rO(){}function rA(){}function rL(){}function rC(){}function rI(){}function rD(){}function rN(){}function rP(){}function rR(){}function rj(){}function rF(){}function rY(){}function rB(){}function rU(){}function rH(){}function r$(){}function rz(){}function rG(){}function rW(){}function rK(){}function rV(){}function rq(){}function rZ(){}function rX(){}function rJ(){}function rQ(){}function r1(){}function r0(){}function r2(){}function r3(){}function r4(){}function r5(){}function r6(){}function r9(){}function r8(){}function r7(){}function ie(){}function it(){}function ir(){}function ii(){}function ia(){}function io(){}function is(){}function iu(){}function ic(){}function il(){}function id(){}function ih(){}function ip(){}function ib(){}function im(){}function ig(){}function iv(){}function iy(){}function iw(){}function i_(){}function iE(){}function iS(){}function ik(){}function ix(){}function iT(){}function iM(){}function iO(){}function iA(){}function iL(){}function iC(){}function iI(){}function iD(){}function iN(){}function iP(){}function iR(){}function ij(){}function iF(){}function iY(){}function iB(){}function iU(){}function iH(){}function i$(){}function iz(){}function iG(){}function iW(){}function iK(){}function iV(){}function iq(){}function iZ(){}function iX(){}function iJ(){}function iQ(){}function i1(){}function i0(){}function i2(){}function i3(){}function i4(){}function i5(){}function i6(){}function i9(){}function i8(){}function i7(){}function ae(){}function at(){}function an(){}function ar(){}function ai(){}function aa(){}function ao(){}function as(){}function au(){}function ac(){}function al(){}function af(){}function ad(){}function ah(){}function ap(){}function ab(){}function am(){}function ag(){}function av(){}function ay(){}function aw(){}function a_(){}function aE(){}function aS(){}function ak(){}function ax(){}function aT(){}function aM(){}function aO(){}function aA(){}function aL(){}function aC(){}function aI(){}function aD(){}function aN(){}function aP(){}function aR(){}function aj(){}function aF(){}function aY(){}function aB(){}function aU(){}function aH(){}function a$(){}function az(){}function aG(){}function aW(){}function aK(){}function aV(){}function aq(){}function aZ(){}function aX(){}function aJ(){}function aQ(){}function a1(){}function a0(){}function a2(){}function a3(){}function a4(){}function a5(){}function a6(){}function a9(){}function a8(){}function a7(){}function oe(){}function ot(){}function on(){}function or(){}function oi(){}function oa(){}function oo(){}function os(){}function ou(){}function oc(){}function ol(){}function of(){}function od(){}function oh(){}function op(){}function ob(){}function om(){}function og(){}function ov(){}function oy(){}function ow(){}function o_(){}function oE(){}function oS(){}function ok(){}function ox(){}function oT(){}function oM(){}function oO(){}function oA(){}function oL(){}function oC(){}function oI(){}function oD(){}function oN(){}function oP(){}function oR(){}function oj(){}function oF(){}function oY(){}function oB(){}function oU(){}function oH(){}function o$(){}function oz(){}function oG(){}function oW(){}function oK(){}function oV(){}function oq(){}function oZ(){}function oX(){}function oJ(){}function oQ(){}function o1(){}function o0(){}function o2(){}function o3(){}function o4(){}function o5(){}function o6(){}function o9(){}function o8(){}function o7(){}function se(){}function st(){}function sn(){}function sr(){}function si(){}function sa(){}function so(){}function ss(){}function su(){}function sc(){}function sl(){}function sf(){}function sd(){}function sh(){}function sp(){}function sb(){}function sm(){}function sg(){}function sv(){}function sy(){}function sw(){}function s_(){}function sE(){}function sS(){}function sk(){}function sx(){}function sT(){}function sM(){}function sO(){}function sA(){}function sL(){}function sC(){}function sI(){}function sD(){}function sN(){}function sP(){}function sR(){}function sj(){}function sF(){}function sY(){}function sB(){}function sU(){}function sH(){}function s$(){}function sz(){}function sG(){}function sW(){}function sK(){}function sV(){}function sq(){}function sZ(){}function sX(){}function sJ(){}function sQ(){}function s1(){}function s0(){}function s2(){}function s3(){}function s4(){}function s5(){}function s6(){}function s9(){}function s8(){}function s7(){}function ue(){}function ut(){}function un(){}function ur(){}function ui(){}function ua(){}function uo(){}function us(){}function uu(){}function uc(){}function ul(){}function uf(){}function ud(){}function uh(){}function up(){}function ub(){}function um(){}function ug(){}function uv(){}function uy(){}function uw(){}function u_(){}function uE(){}function uS(){}function uk(){}function ux(){}function uT(){}function uM(){}function uO(){}function uA(){}function uL(){}function uC(){}function uI(){}function uD(){}function uN(){}function uP(){}function uR(){}function uj(){}function uF(){}function uY(){}function uB(){}function uU(){}function uH(){}function u$(){}function uz(){}function uG(){}function uW(){}function uK(){}function uV(){}function uq(){}function uZ(){}function uX(){}function uJ(){}function uQ(){}function u1(){}function u0(){}function u2(){}function u3(){}function u4(){}function u5(){}function u6(){}function u9(){}function u8(){}function u7(){}function ce(){}function ct(){}function cn(e){}function cr(e){}function ci(){m4()}function ca(){eug()}function co(){epz()}function cs(){evw()}function cu(){eEg()}function cc(){eCk()}function cl(){egA()}function cf(){egq()}function cd(){_O()}function ch(){_k()}function cp(){DR()}function cb(){_A()}function cm(){erJ()}function cg(){_C()}function cv(){Xi()}function cy(){en6()}function cw(){Jb()}function c_(){Gw()}function cE(){euv()}function cS(){e_z()}function ck(){en9()}function cx(){K9()}function cT(){eBH()}function cM(){egP()}function cO(){G_()}function cA(){eBy()}function cL(){Gv()}function cC(){en8()}function cI(){eoz()}function cD(){Gx()}function cN(){JK()}function cP(){_I()}function cR(){eTK()}function cj(){egj()}function cF(){eiQ()}function cY(){e_L()}function cB(){eCT()}function cU(){ebJ()}function cH(){eTj()}function c$(){eaB()}function cz(){GS()}function cG(){eDn()}function cW(){eTU()}function cK(){eMK()}function cV(){J1()}function cq(){e_C()}function cZ(){eBB()}function cX(){euw()}function cJ(){ed5()}function cQ(){ePm()}function c1(){De()}function c0(){eiM()}function c2(){eD4()}function c3(e){BJ(e)}function c4(e){this.a=e}function c5(e){this.a=e}function c6(e){this.a=e}function c9(e){this.a=e}function c8(e){this.a=e}function c7(e){this.a=e}function le(e){this.a=e}function lt(e){this.a=e}function ln(e){this.a=e}function lr(e){this.a=e}function li(e){this.a=e}function la(e){this.a=e}function lo(e){this.a=e}function ls(e){this.a=e}function lu(e){this.a=e}function lc(e){this.a=e}function ll(e){this.a=e}function lf(e){this.a=e}function ld(e){this.a=e}function lh(e){this.a=e}function lp(e){this.a=e}function lb(e){this.b=e}function lm(e){this.c=e}function lg(e){this.a=e}function lv(e){this.a=e}function ly(e){this.a=e}function lw(e){this.a=e}function l_(e){this.a=e}function lE(e){this.a=e}function lS(e){this.a=e}function lk(e){this.a=e}function lx(e){this.a=e}function lT(e){this.a=e}function lM(e){this.a=e}function lO(e){this.a=e}function lA(e){this.a=e}function lL(e){this.a=e}function lC(e){this.a=e}function lI(e){this.a=e}function lD(e){this.a=e}function lN(){this.a=[]}function lP(e,t){e.a=t}function lR(e,t){e.a=t}function lj(e,t){e.b=t}function lF(e,t){e.b=t}function lY(e,t){e.b=t}function lB(e,t){e.j=t}function lU(e,t){e.g=t}function lH(e,t){e.i=t}function l$(e,t){e.c=t}function lz(e,t){e.d=t}function lG(e,t){e.d=t}function lW(e,t){e.c=t}function lK(e,t){e.k=t}function lV(e,t){e.c=t}function lq(e,t){e.c=t}function lZ(e,t){e.a=t}function lX(e,t){e.a=t}function lJ(e,t){e.f=t}function lQ(e,t){e.a=t}function l1(e,t){e.b=t}function l0(e,t){e.d=t}function l2(e,t){e.i=t}function l3(e,t){e.o=t}function l4(e,t){e.r=t}function l5(e,t){e.a=t}function l6(e,t){e.b=t}function l9(e,t){e.e=t}function l8(e,t){e.f=t}function l7(e,t){e.g=t}function fe(e,t){e.e=t}function ft(e,t){e.f=t}function fn(e,t){e.f=t}function fr(e,t){e.n=t}function fi(e,t){e.a=t}function fa(e,t){e.a=t}function fo(e,t){e.c=t}function fs(e,t){e.c=t}function fu(e,t){e.d=t}function fc(e,t){e.e=t}function fl(e,t){e.g=t}function ff(e,t){e.a=t}function fd(e,t){e.c=t}function fh(e,t){e.d=t}function fp(e,t){e.e=t}function fb(e,t){e.f=t}function fm(e,t){e.j=t}function fg(e,t){e.a=t}function fv(e,t){e.b=t}function fy(e,t){e.a=t}function fw(e){e.b=e.a}function f_(e){e.c=e.d.d}function fE(e){this.d=e}function fS(e){this.a=e}function fk(e){this.a=e}function fx(e){this.a=e}function fT(e){this.a=e}function fM(e){this.a=e}function fO(e){this.a=e}function fA(e){this.a=e}function fL(e){this.a=e}function fC(e){this.a=e}function fI(e){this.a=e}function fD(e){this.a=e}function fN(e){this.a=e}function fP(e){this.a=e}function fR(e){this.a=e}function fj(e){this.b=e}function fF(e){this.b=e}function fY(e){this.b=e}function fB(e){this.a=e}function fU(e){this.a=e}function fH(e){this.a=e}function f$(e){this.c=e}function fz(e){this.c=e}function fG(e){this.c=e}function fW(e){this.a=e}function fK(e){this.a=e}function fV(e){this.a=e}function fq(e){this.a=e}function fZ(e){this.a=e}function fX(e){this.a=e}function fJ(e){this.a=e}function fQ(e){this.a=e}function f1(e){this.a=e}function f0(e){this.a=e}function f2(e){this.a=e}function f3(e){this.a=e}function f4(e){this.a=e}function f5(e){this.a=e}function f6(e){this.a=e}function f9(e){this.a=e}function f8(e){this.a=e}function f7(e){this.a=e}function de(e){this.a=e}function dt(e){this.a=e}function dn(e){this.a=e}function dr(e){this.a=e}function di(e){this.a=e}function da(e){this.a=e}function ds(e){this.a=e}function du(e){this.a=e}function dc(e){this.a=e}function dl(e){this.a=e}function df(e){this.a=e}function dd(e){this.a=e}function dh(e){this.a=e}function dp(e){this.a=e}function db(e){this.a=e}function dm(e){this.a=e}function dg(e){this.a=e}function dv(e){this.a=e}function dy(e){this.a=e}function dw(e){this.a=e}function d_(e){this.a=e}function dE(e){this.a=e}function dS(e){this.a=e}function dk(e){this.a=e}function dx(e){this.a=e}function dT(e){this.a=e}function dM(e){this.a=e}function dO(e){this.e=e}function dA(e){this.a=e}function dL(e){this.a=e}function dC(e){this.a=e}function dI(e){this.a=e}function dD(e){this.a=e}function dN(e){this.a=e}function dP(e){this.a=e}function dR(e){this.a=e}function dj(e){this.a=e}function dF(e){this.a=e}function dY(e){this.a=e}function dB(e){this.a=e}function dU(e){this.a=e}function dH(e){this.a=e}function d$(e){this.a=e}function dz(e){this.a=e}function dG(e){this.a=e}function dW(e){this.a=e}function dK(e){this.a=e}function dV(e){this.a=e}function dq(e){this.a=e}function dZ(e){this.a=e}function dX(e){this.a=e}function dJ(e){this.a=e}function dQ(e){this.a=e}function d1(e){this.a=e}function d0(e){this.a=e}function d2(e){this.a=e}function d3(e){this.a=e}function d4(e){this.a=e}function d5(e){this.a=e}function d6(e){this.a=e}function d9(e){this.a=e}function d8(e){this.a=e}function d7(e){this.a=e}function he(e){this.a=e}function ht(e){this.a=e}function hn(e){this.a=e}function hr(e){this.a=e}function hi(e){this.a=e}function ha(e){this.a=e}function ho(e){this.a=e}function hs(e){this.a=e}function hu(e){this.a=e}function hc(e){this.a=e}function hl(e){this.a=e}function hf(e){this.a=e}function hd(e){this.a=e}function hh(e){this.a=e}function hp(e){this.a=e}function hb(e){this.a=e}function hm(e){this.a=e}function hg(e){this.a=e}function hv(e){this.c=e}function hy(e){this.b=e}function hw(e){this.a=e}function h_(e){this.a=e}function hE(e){this.a=e}function hS(e){this.a=e}function hk(e){this.a=e}function hx(e){this.a=e}function hT(e){this.a=e}function hM(e){this.a=e}function hO(e){this.a=e}function hA(e){this.a=e}function hL(e){this.a=e}function hC(e){this.a=e}function hI(e){this.a=e}function hD(e){this.a=e}function hN(e){this.a=e}function hP(e){this.a=e}function hR(e){this.a=e}function hj(e){this.a=e}function hF(e){this.a=e}function hY(e){this.a=e}function hB(e){this.a=e}function hU(e){this.a=e}function hH(e){this.a=e}function h$(e){this.a=e}function hz(e){this.a=e}function hG(e){this.a=e}function hW(e){this.a=e}function hK(e){this.a=e}function hV(e){this.a=e}function hq(e){this.a=e}function hZ(e){this.a=e}function hX(e){this.a=e}function hJ(e){this.a=e}function hQ(e){this.a=e}function h1(e){this.a=e}function h0(e){this.a=e}function h2(e){this.a=e}function h3(e){this.a=e}function h4(e){this.a=e}function h5(e){this.a=e}function h6(e){this.a=e}function h9(e){this.a=e}function h8(e){this.a=e}function h7(e){this.a=e}function pe(e){this.a=e}function pt(e){this.a=e}function pn(e){this.a=e}function pr(e){this.a=e}function pi(e){this.a=e}function pa(e){this.a=e}function po(e){this.a=e}function ps(e){this.a=e}function pu(e){this.a=e}function pc(e){this.a=e}function pl(e){this.a=e}function pf(e){this.a=e}function pd(e){this.a=e}function ph(e){this.a=e}function pp(e){this.a=e}function pb(e){this.a=e}function pm(e){this.a=e}function pg(e){this.a=e}function pv(e){this.a=e}function py(e){this.a=e}function pw(e){this.a=e}function p_(e){this.a=e}function pE(e){this.a=e}function pS(e){this.a=e}function pk(e){this.a=e}function px(e){this.a=e}function pT(e){this.a=e}function pM(e){this.a=e}function pO(e){this.b=e}function pA(e){this.f=e}function pL(e){this.a=e}function pC(e){this.a=e}function pI(e){this.a=e}function pD(e){this.a=e}function pN(e){this.a=e}function pP(e){this.a=e}function pR(e){this.a=e}function pj(e){this.a=e}function pF(e){this.a=e}function pY(e){this.a=e}function pB(e){this.a=e}function pU(e){this.b=e}function pH(e){this.c=e}function p$(e){this.e=e}function pz(e){this.a=e}function pG(e){this.a=e}function pW(e){this.a=e}function pK(e){this.a=e}function pV(e){this.a=e}function pq(e){this.d=e}function pZ(e){this.a=e}function pX(e){this.a=e}function pJ(e){this.e=e}function pQ(){this.a=0}function p1(){TG(this)}function p0(){Tz(this)}function p2(){Yy(this)}function p3(){UP(this)}function p4(){cn(this)}function p5(){this.c=tgK}function p6(e,t){t.Wb(e)}function p9(e,t){e.b+=t}function p8(e){e.b=new gQ}function p7(e){return e.e}function be(e){return e.a}function bt(e){return e.a}function bn(e){return e.a}function br(e){return e.a}function bi(e){return e.a}function ba(){return null}function bo(){return null}function bs(){yC(),eY2()}function bu(e){e.b.tf(e.e)}function bc(e,t){e.b=t-e.b}function bl(e,t){e.a=t-e.a}function bf(e,t){t.ad(e.a)}function bd(e,t){ekv(t,e)}function bh(e,t,n){e.Od(n,t)}function bp(e,t){e.e=t,t.b=e}function bb(e){Dn(),this.a=e}function bm(e){Dn(),this.a=e}function bg(e){Dn(),this.a=e}function bv(e){Bx(),this.a=e}function by(e){$O(),e0E.be(e)}function bw(){O5.call(this)}function b_(){O5.call(this)}function bE(){bw.call(this)}function bS(){bw.call(this)}function bk(){bw.call(this)}function bx(){bw.call(this)}function bT(){bw.call(this)}function bM(){bw.call(this)}function bO(){bw.call(this)}function bA(){bw.call(this)}function bL(){bw.call(this)}function bC(){bw.call(this)}function bI(){bw.call(this)}function bD(){this.a=this}function bN(){this.Bb|=256}function bP(){this.b=new xW}function bR(){bR=A,new p2}function bj(){bE.call(this)}function bF(e,t){e.length=t}function bY(e,t){P_(e.a,t)}function bB(e,t){eEU(e.c,t)}function bU(e,t){Yf(e.b,t)}function bH(e,t){ebB(e.a,t)}function b$(e,t){elj(e.a,t)}function bz(e,t){eam(e.e,t)}function bG(e){exZ(e.c,e.b)}function bW(e,t){e.kc().Nb(t)}function bK(e){this.a=efh(e)}function bV(){this.a=new p2}function bq(){this.a=new p2}function bZ(){this.a=new p0}function bX(){this.a=new p0}function bJ(){this.a=new p0}function bQ(){this.a=new ey}function b1(){this.a=new Z6}function b0(){this.a=new tt}function b2(){this.a=new w7}function b3(){this.a=new W9}function b4(){this.a=new zZ}function b5(){this.a=new Cz}function b6(){this.a=new p0}function b9(){this.a=new p0}function b8(){this.a=new p0}function b7(){this.a=new p0}function me(){this.d=new p0}function mt(){this.a=new bV}function mn(){this.a=new p2}function mr(){this.b=new p2}function mi(){this.b=new p0}function ma(){this.e=new p0}function mo(){this.d=new p0}function ms(){this.a=new cS}function mu(){p0.call(this)}function mc(){bZ.call(this)}function ml(){CK.call(this)}function mf(){b9.call(this)}function md(){mh.call(this)}function mh(){p4.call(this)}function mp(){p4.call(this)}function mb(){mp.call(this)}function mm(){$m.call(this)}function mg(){$m.call(this)}function mv(){mq.call(this)}function my(){mq.call(this)}function mw(){mq.call(this)}function m_(){mZ.call(this)}function mE(){_n.call(this)}function mS(){oZ.call(this)}function mk(){oZ.call(this)}function mx(){m0.call(this)}function mT(){m0.call(this)}function mM(){p2.call(this)}function mO(){p2.call(this)}function mA(){p2.call(this)}function mL(){bV.call(this)}function mC(){en0.call(this)}function mI(){bN.call(this)}function mD(){Oy.call(this)}function mN(){Oy.call(this)}function mP(){p2.call(this)}function mR(){p2.call(this)}function mj(){p2.call(this)}function mF(){sr.call(this)}function mY(){sr.call(this)}function mB(){mF.call(this)}function mU(){u7.call(this)}function mH(e){eti.call(this,e)}function m$(e){eti.call(this,e)}function mz(e){ln.call(this,e)}function mG(e){wB.call(this,e)}function mW(e){mG.call(this,e)}function mK(e){wB.call(this,e)}function mV(){this.a=new _n}function mq(){this.a=new bV}function mZ(){this.a=new p2}function mX(){this.a=new p0}function mJ(){this.j=new p0}function mQ(){this.a=new aX}function m1(){this.a=new y4}function m0(){this.a=new sn}function m2(){m2=A,e0d=new vm}function m3(){m3=A,e0f=new vb}function m4(){m4=A,e0l=new i}function m5(){m5=A,e0m=new OV}function m6(e){mG.call(this,e)}function m9(e){mG.call(this,e)}function m8(e){ql.call(this,e)}function m7(e){ql.call(this,e)}function ge(e){IJ.call(this,e)}function gt(e){eEb.call(this,e)}function gn(e){w$.call(this,e)}function gr(e){wG.call(this,e)}function gi(e){wG.call(this,e)}function ga(e){wG.call(this,e)}function go(e){Fu.call(this,e)}function gs(e){go.call(this,e)}function gu(){lD.call(this,{})}function gc(e){Og(),this.a=e}function gl(e){e.b=null,e.c=0}function gf(e,t){e.e=t,eA9(e,t)}function gd(e,t){e.a=t,eSG(e)}function gh(e,t,n){e.a[t.g]=n}function gp(e,t,n){evq(n,e,t)}function gb(e,t){In(t.i,e.n)}function gm(e,t){esW(e).td(t)}function gg(e,t){return e*e/t}function gv(e,t){return e.g-t.g}function gy(e){return new lI(e)}function gw(e){return new B_(e)}function g_(e){go.call(this,e)}function gE(e){go.call(this,e)}function gS(e){go.call(this,e)}function gk(e){Fu.call(this,e)}function gx(e){eiJ(),this.a=e}function gT(e){I7(),this.a=e}function gM(e){jK(),this.f=e}function gO(e){jK(),this.f=e}function gA(e){go.call(this,e)}function gL(e){go.call(this,e)}function gC(e){go.call(this,e)}function gI(e){go.call(this,e)}function gD(e){go.call(this,e)}function gN(e){return BJ(e),e}function gP(e){return BJ(e),e}function gR(e){return BJ(e),e}function gj(e){return BJ(e),e}function gF(e){return BJ(e),e}function gY(e){return e.b==e.c}function gB(e){return!!e&&e.b}function gU(e){return!!e&&e.k}function gH(e){return!!e&&e.j}function g$(e){BJ(e),this.a=e}function gz(e){return esR(e),e}function gG(e){Ya(e,e.length)}function gW(e){go.call(this,e)}function gK(e){go.call(this,e)}function gV(e){go.call(this,e)}function gq(e){go.call(this,e)}function gZ(e){go.call(this,e)}function gX(e){go.call(this,e)}function gJ(e){AI.call(this,e,0)}function gQ(){G$.call(this,12,3)}function g1(){g1=A,e0_=new _}function g0(){g0=A,e0y=new r}function g2(){g2=A,e0k=new b}function g3(){g3=A,e0M=new g}function g4(){throw p7(new bO)}function g5(){throw p7(new bO)}function g6(){throw p7(new bO)}function g9(){throw p7(new bO)}function g8(){throw p7(new bO)}function g7(){throw p7(new bO)}function ve(){this.a=Lq(Y9(eUd))}function vt(e){Dn(),this.a=Y9(e)}function vn(e,t){e.Td(t),t.Sd(e)}function vr(e,t){e.a.ec().Mc(t)}function vi(e,t,n){e.c.lf(t,n)}function va(e){gE.call(this,e)}function vo(e){gL.call(this,e)}function vs(){fM.call(this,"")}function vu(){fM.call(this,"")}function vc(){fM.call(this,"")}function vl(){fM.call(this,"")}function vf(e){gE.call(this,e)}function vd(e){fF.call(this,e)}function vh(e){O2.call(this,e)}function vp(e){vd.call(this,e)}function vb(){ls.call(this,null)}function vm(){ls.call(this,null)}function vg(){vg=A,$O()}function vv(){vv=A,e2d=eyz()}function vy(e){return e.a?e.b:0}function vw(e){return e.a?e.b:0}function v_(e,t){return e.a-t.a}function vE(e,t){return e.a-t.a}function vS(e,t){return e.a-t.a}function vk(e,t){return QO(e,t)}function vx(e,t){return z9(e,t)}function vT(e,t){return t in e.a}function vM(e,t){return e.f=t,e}function vO(e,t){return e.b=t,e}function vA(e,t){return e.c=t,e}function vL(e,t){return e.g=t,e}function vC(e,t){return e.a=t,e}function vI(e,t){return e.f=t,e}function vD(e,t){return e.k=t,e}function vN(e,t){return e.a=t,e}function vP(e,t){return e.e=t,e}function vR(e,t){return e.e=t,e}function vj(e,t){return e.f=t,e}function vF(e,t){e.b=!0,e.d=t}function vY(e,t){e.b=new TS(t)}function vB(e,t,n){t.td(e.a[n])}function vU(e,t,n){t.we(e.a[n])}function vH(e,t){return e.b-t.b}function v$(e,t){return e.g-t.g}function vz(e,t){return e.s-t.s}function vG(e,t){return e?0:t-1}function vW(e,t){return e?0:t-1}function vK(e,t){return e?t-1:0}function vV(e,t){return t.Yf(e)}function vq(e,t){return e.b=t,e}function vZ(e,t){return e.a=t,e}function vX(e,t){return e.c=t,e}function vJ(e,t){return e.d=t,e}function vQ(e,t){return e.e=t,e}function v1(e,t){return e.f=t,e}function v0(e,t){return e.a=t,e}function v2(e,t){return e.b=t,e}function v3(e,t){return e.c=t,e}function v4(e,t){return e.c=t,e}function v5(e,t){return e.b=t,e}function v6(e,t){return e.d=t,e}function v9(e,t){return e.e=t,e}function v8(e,t){return e.f=t,e}function v7(e,t){return e.g=t,e}function ye(e,t){return e.a=t,e}function yt(e,t){return e.i=t,e}function yn(e,t){return e.j=t,e}function yr(e,t){return e.k=t,e}function yi(e,t){return e.j=t,e}function ya(e,t){e_z(),Gc(t,e)}function yo(e,t,n){jX(e.a,t,n)}function ys(e){U8.call(this,e)}function yu(e){U8.call(this,e)}function yc(e){I3.call(this,e)}function yl(e){efB.call(this,e)}function yf(e){eta.call(this,e)}function yd(e){HO.call(this,e)}function yh(e){HO.call(this,e)}function yp(){MA.call(this,"")}function yb(){this.a=0,this.b=0}function ym(){this.b=0,this.a=0}function yg(e,t){e.b=0,enh(e,t)}function yv(e,t){e.c=t,e.b=!0}function yy(e,t){return e.c._b(t)}function yw(e){return e.e&&e.e()}function y_(e){return e?e.d:null}function yE(e,t){return ecD(e.b,t)}function yS(e){return e?e.g:null}function yk(e){return e?e.i:null}function yx(e){return LW(e),e.o}function yT(){yT=A,tmc=evO()}function yM(){yM=A,tml=ewS()}function yO(){yO=A,tgg=evL()}function yA(){yA=A,tvE=evA()}function yL(){yL=A,tvS=eSH()}function yC(){yC=A,tmF=enF()}function yI(){throw p7(new bO)}function yD(){throw p7(new bO)}function yN(){throw p7(new bO)}function yP(){throw p7(new bO)}function yR(){throw p7(new bO)}function yj(){throw p7(new bO)}function yF(e){this.a=new w8(e)}function yY(e){eF7(),eBh(this,e)}function yB(e){this.a=new FG(e)}function yU(e,t){for(;e.ye(t););}function yH(e,t){for(;e.sd(t););}function y$(e,t){return e.a+=t,e}function yz(e,t){return e.a+=t,e}function yG(e,t){return e.a+=t,e}function yW(e,t){return e.a+=t,e}function yK(e){return B1(e),e.a}function yV(e){return e.b!=e.d.c}function yq(e){return e.l|e.m<<22}function yZ(e,t){return e.d[t.p]}function yX(e,t){return eA5(e,t)}function yJ(e,t,n){e.splice(t,n)}function yQ(e){e.c?eL3(e):eL4(e)}function y1(e){this.a=0,this.b=e}function y0(){this.a=new eAs(e5I)}function y2(){this.b=new eAs(e5T)}function y3(){this.b=new eAs(e5H)}function y4(){this.b=new eAs(e5H)}function y5(){throw p7(new bO)}function y6(){throw p7(new bO)}function y9(){throw p7(new bO)}function y8(){throw p7(new bO)}function y7(){throw p7(new bO)}function we(){throw p7(new bO)}function wt(){throw p7(new bO)}function wn(){throw p7(new bO)}function wr(){throw p7(new bO)}function wi(){throw p7(new bO)}function wa(){throw p7(new bC)}function wo(){throw p7(new bC)}function ws(e){this.a=new wu(e)}function wu(e){erh(this,e,ey0())}function wc(e){return!e||BV(e)}function wl(e){return -1!=tvJ[e]}function wf(){0!=e1Z&&(e1Z=0),e1J=-1}function wd(){null==eUn&&(eUn=[])}function wh(e,t){eTl(H9(e.a),t)}function wp(e,t){eTl(H9(e.a),t)}function wb(e,t){OC.call(this,e,t)}function wm(e,t){wb.call(this,e,t)}function wg(e,t){this.b=e,this.c=t}function wv(e,t){this.b=e,this.a=t}function wy(e,t){this.a=e,this.b=t}function ww(e,t){this.a=e,this.b=t}function w_(e,t){this.a=e,this.b=t}function wE(e,t){this.a=e,this.b=t}function wS(e,t){this.a=e,this.b=t}function wk(e,t){this.a=e,this.b=t}function wx(e,t){this.a=e,this.b=t}function wT(e,t){this.a=e,this.b=t}function wM(e,t){this.b=e,this.a=t}function wO(e,t){this.b=e,this.a=t}function wA(e,t){this.b=e,this.a=t}function wL(e,t){this.b=e,this.a=t}function wC(e,t){this.f=e,this.g=t}function wI(e,t){this.e=e,this.d=t}function wD(e,t){this.g=e,this.i=t}function wN(e,t){this.a=e,this.b=t}function wP(e,t){this.a=e,this.f=t}function wR(e,t){this.b=e,this.c=t}function wj(e,t){this.a=e,this.b=t}function wF(e,t){this.a=e,this.b=t}function wY(e,t){this.a=e,this.b=t}function wB(e){Oq(e.dc()),this.c=e}function wU(e){this.b=Pp(Y9(e),83)}function wH(e){this.a=Pp(Y9(e),83)}function w$(e){this.a=Pp(Y9(e),15)}function wz(e){this.a=Pp(Y9(e),15)}function wG(e){this.b=Pp(Y9(e),47)}function wW(){this.q=new eB4.Date}function wK(){wK=A,e0V=new L}function wV(){wV=A,e2o=new T}function wq(e){return e.f.c+e.g.c}function wZ(e,t){return e.b.Hc(t)}function wX(e,t){return e.b.Ic(t)}function wJ(e,t){return e.b.Qc(t)}function wQ(e,t){return e.b.Hc(t)}function w1(e,t){return e.c.uc(t)}function w0(e,t){return e.a._b(t)}function w2(e,t){return ecX(e.c,t)}function w3(e,t){return F9(e.b,t)}function w4(e,t){return e>t&&t0}function Ei(e,t){return 0>ecd(e,t)}function Ea(e,t){return e.a.get(t)}function Eo(e,t){return t.split(e)}function Es(e,t){return F9(e.e,t)}function Eu(e){return BJ(e),!1}function Ec(e){Gq.call(this,e,21)}function El(e,t){zL.call(this,e,t)}function Ef(e,t){wC.call(this,e,t)}function Ed(e,t){wC.call(this,e,t)}function Eh(e){BT(),IJ.call(this,e)}function Ep(e,t){jA(e,e.length,t)}function Eb(e,t){Yj(e,e.length,t)}function Em(e,t,n){t.ud(e.a.Ge(n))}function Eg(e,t,n){t.we(e.a.Fe(n))}function Ev(e,t,n){t.td(e.a.Kb(n))}function Ey(e,t,n){e.Mb(n)&&t.td(n)}function Ew(e,t,n){e.splice(t,0,n)}function E_(e,t){return Aa(e.e,t)}function EE(e,t){this.d=e,this.e=t}function ES(e,t){this.b=e,this.a=t}function Ek(e,t){this.b=e,this.a=t}function Ex(e,t){this.b=e,this.a=t}function ET(e,t){this.a=e,this.b=t}function EM(e,t){this.a=e,this.b=t}function EO(e,t){this.a=e,this.b=t}function EA(e,t){this.a=e,this.b=t}function EL(e,t){this.a=e,this.b=t}function EC(e,t){this.b=e,this.a=t}function EI(e,t){this.b=e,this.a=t}function ED(e,t){wC.call(this,e,t)}function EN(e,t){wC.call(this,e,t)}function EP(e,t){wC.call(this,e,t)}function ER(e,t){wC.call(this,e,t)}function Ej(e,t){wC.call(this,e,t)}function EF(e,t){wC.call(this,e,t)}function EY(e,t){wC.call(this,e,t)}function EB(e,t){wC.call(this,e,t)}function EU(e,t){wC.call(this,e,t)}function EH(e,t){wC.call(this,e,t)}function E$(e,t){wC.call(this,e,t)}function Ez(e,t){wC.call(this,e,t)}function EG(e,t){wC.call(this,e,t)}function EW(e,t){wC.call(this,e,t)}function EK(e,t){wC.call(this,e,t)}function EV(e,t){wC.call(this,e,t)}function Eq(e,t){wC.call(this,e,t)}function EZ(e,t){wC.call(this,e,t)}function EX(e,t){this.a=e,this.b=t}function EJ(e,t){this.a=e,this.b=t}function EQ(e,t){this.a=e,this.b=t}function E1(e,t){this.a=e,this.b=t}function E0(e,t){this.a=e,this.b=t}function E2(e,t){this.a=e,this.b=t}function E3(e,t){this.a=e,this.b=t}function E4(e,t){this.a=e,this.b=t}function E5(e,t){this.a=e,this.b=t}function E6(e,t){this.b=e,this.a=t}function E9(e,t){this.b=e,this.a=t}function E8(e,t){this.b=e,this.a=t}function E7(e,t){this.b=e,this.a=t}function Se(e,t){this.c=e,this.d=t}function St(e,t){this.e=e,this.d=t}function Sn(e,t){this.a=e,this.b=t}function Sr(e,t){this.b=t,this.c=e}function Si(e,t){wC.call(this,e,t)}function Sa(e,t){wC.call(this,e,t)}function So(e,t){wC.call(this,e,t)}function Ss(e,t){wC.call(this,e,t)}function Su(e,t){wC.call(this,e,t)}function Sc(e,t){wC.call(this,e,t)}function Sl(e,t){wC.call(this,e,t)}function Sf(e,t){wC.call(this,e,t)}function Sd(e,t){wC.call(this,e,t)}function Sh(e,t){wC.call(this,e,t)}function Sp(e,t){wC.call(this,e,t)}function Sb(e,t){wC.call(this,e,t)}function Sm(e,t){wC.call(this,e,t)}function Sg(e,t){wC.call(this,e,t)}function Sv(e,t){wC.call(this,e,t)}function Sy(e,t){wC.call(this,e,t)}function Sw(e,t){wC.call(this,e,t)}function S_(e,t){wC.call(this,e,t)}function SE(e,t){wC.call(this,e,t)}function SS(e,t){wC.call(this,e,t)}function Sk(e,t){wC.call(this,e,t)}function Sx(e,t){wC.call(this,e,t)}function ST(e,t){wC.call(this,e,t)}function SM(e,t){wC.call(this,e,t)}function SO(e,t){wC.call(this,e,t)}function SA(e,t){wC.call(this,e,t)}function SL(e,t){wC.call(this,e,t)}function SC(e,t){wC.call(this,e,t)}function SI(e,t){wC.call(this,e,t)}function SD(e,t){wC.call(this,e,t)}function SN(e,t){wC.call(this,e,t)}function SP(e,t){wC.call(this,e,t)}function SR(e,t){wC.call(this,e,t)}function Sj(e,t){wC.call(this,e,t)}function SF(e,t){this.b=e,this.a=t}function SY(e,t){this.a=e,this.b=t}function SB(e,t){this.a=e,this.b=t}function SU(e,t){this.a=e,this.b=t}function SH(e,t){this.a=e,this.b=t}function S$(e,t){wC.call(this,e,t)}function Sz(e,t){wC.call(this,e,t)}function SG(e,t){this.b=e,this.d=t}function SW(e,t){wC.call(this,e,t)}function SK(e,t){wC.call(this,e,t)}function SV(e,t){this.a=e,this.b=t}function Sq(e,t){this.a=e,this.b=t}function SZ(e,t){wC.call(this,e,t)}function SX(e,t){wC.call(this,e,t)}function SJ(e,t){wC.call(this,e,t)}function SQ(e,t){wC.call(this,e,t)}function S1(e,t){wC.call(this,e,t)}function S0(e,t){wC.call(this,e,t)}function S2(e,t){wC.call(this,e,t)}function S3(e,t){wC.call(this,e,t)}function S4(e,t){wC.call(this,e,t)}function S5(e,t){wC.call(this,e,t)}function S6(e,t){wC.call(this,e,t)}function S9(e,t){wC.call(this,e,t)}function S8(e,t){wC.call(this,e,t)}function S7(e,t){wC.call(this,e,t)}function ke(e,t){wC.call(this,e,t)}function kt(e,t){wC.call(this,e,t)}function kn(e,t){return Aa(e.c,t)}function kr(e,t){return Aa(t.b,e)}function ki(e,t){return-e.b.Je(t)}function ka(e,t){return Aa(e.g,t)}function ko(e,t){wC.call(this,e,t)}function ks(e,t){wC.call(this,e,t)}function ku(e,t){this.a=e,this.b=t}function kc(e,t){this.a=e,this.b=t}function kl(e,t){this.a=e,this.b=t}function kf(e,t){wC.call(this,e,t)}function kd(e,t){wC.call(this,e,t)}function kh(e,t){wC.call(this,e,t)}function kp(e,t){wC.call(this,e,t)}function kb(e,t){wC.call(this,e,t)}function km(e,t){wC.call(this,e,t)}function kg(e,t){wC.call(this,e,t)}function kv(e,t){wC.call(this,e,t)}function ky(e,t){wC.call(this,e,t)}function kw(e,t){wC.call(this,e,t)}function k_(e,t){wC.call(this,e,t)}function kE(e,t){wC.call(this,e,t)}function kS(e,t){wC.call(this,e,t)}function kk(e,t){wC.call(this,e,t)}function kx(e,t){wC.call(this,e,t)}function kT(e,t){wC.call(this,e,t)}function kM(e,t){this.a=e,this.b=t}function kO(e,t){this.a=e,this.b=t}function kA(e,t){this.a=e,this.b=t}function kL(e,t){this.a=e,this.b=t}function kC(e,t){this.a=e,this.b=t}function kI(e,t){this.a=e,this.b=t}function kD(e,t){this.a=e,this.b=t}function kN(e,t){wC.call(this,e,t)}function kP(e,t){this.a=e,this.b=t}function kR(e,t){this.a=e,this.b=t}function kj(e,t){this.a=e,this.b=t}function kF(e,t){this.a=e,this.b=t}function kY(e,t){this.a=e,this.b=t}function kB(e,t){this.a=e,this.b=t}function kU(e,t){this.b=e,this.a=t}function kH(e,t){this.b=e,this.a=t}function k$(e,t){this.b=e,this.a=t}function kz(e,t){this.b=e,this.a=t}function kG(e,t){this.a=e,this.b=t}function kW(e,t){this.a=e,this.b=t}function kK(e,t){eOU(e.a,Pp(t,56))}function kV(e,t){QM(e.a,Pp(t,11))}function kq(e,t){return Pj(),t!=e}function kZ(){return vv(),new e2d}function kX(){Gk(),this.b=new bV}function kJ(){eAV(),this.a=new bV}function kQ(){Gy(),jG.call(this)}function k1(e,t){wC.call(this,e,t)}function k0(e,t){this.a=e,this.b=t}function k2(e,t){this.a=e,this.b=t}function k3(e,t){this.a=e,this.b=t}function k4(e,t){this.a=e,this.b=t}function k5(e,t){this.a=e,this.b=t}function k6(e,t){this.a=e,this.b=t}function k9(e,t){this.d=e,this.b=t}function k8(e,t){this.d=e,this.e=t}function k7(e,t){this.f=e,this.c=t}function xe(e,t){this.b=e,this.c=t}function xt(e,t){this.i=e,this.g=t}function xn(e,t){this.e=e,this.a=t}function xr(e,t){this.a=e,this.b=t}function xi(e,t){e.i=null,erA(e,t)}function xa(e,t){e&&Um(tmR,e,t)}function xo(e,t){return edG(e.a,t)}function xs(e){return edK(e.c,e.b)}function xu(e){return e?e.dd():null}function xc(e){return null==e?null:e}function xl(e){return typeof e===eUi}function xf(e){return typeof e===eUa}function xd(e){return typeof e===eUo}function xh(e,t){return e.Hd().Xb(t)}function xp(e,t){return ei7(e.Kc(),t)}function xb(e,t){return 0==ecd(e,t)}function xm(e,t){return ecd(e,t)>=0}function xg(e,t){return 0!=ecd(e,t)}function xv(e){return""+(BJ(e),e)}function xy(e,t){return e.substr(t)}function xw(e){return efH(e),e.d.gc()}function x_(e){return eTe(e,e.c),e}function xE(e){return Rb(null==e),e}function xS(e,t){return e.a+=""+t,e}function xk(e,t){return e.a+=""+t,e}function xx(e,t){return e.a+=""+t,e}function xT(e,t){return e.a+=""+t,e}function xM(e,t){return e.a+=""+t,e}function xO(e,t){return e.a+=""+t,e}function xA(e,t){qQ(e,t,e.a,e.a.a)}function xL(e,t){qQ(e,t,e.c.b,e.c)}function xC(e,t,n){eyc(t,eSE(e,n))}function xI(e,t,n){eyc(t,eSE(e,n))}function xD(e,t){eeS(new Ow(e),t)}function xN(e,t){e.q.setTime(Kj(t))}function xP(e,t){FH.call(this,e,t)}function xR(e,t){FH.call(this,e,t)}function xj(e,t){FH.call(this,e,t)}function xF(e){Yy(this),eij(this,e)}function xY(e){return GK(e,0),null}function xB(e){return e.a=0,e.b=0,e}function xU(e,t){return e.a=t.g+1,e}function xH(e,t){return 2==e.j[t.p]}function x$(e){return YZ(Pp(e,79))}function xz(){xz=A,e4r=euY(epE())}function xG(){xG=A,e7$=euY(eAn())}function xW(){this.b=new w8(ee0(12))}function xK(){this.b=0,this.a=!1}function xV(){this.b=0,this.a=!1}function xq(e){this.a=e,ci.call(this)}function xZ(e){this.a=e,ci.call(this)}function xX(e,t){Cm.call(this,e,t)}function xJ(e,t){Ii.call(this,e,t)}function xQ(e,t){xt.call(this,e,t)}function x1(e,t){eaN.call(this,e,t)}function x0(e,t){AA.call(this,e,t)}function x2(e,t){_5(),Um(tmU,e,t)}function x3(e,t){return Az(e.a,0,t)}function x4(e,t){return e.a.a.a.cc(t)}function x5(e,t){return xc(e)===xc(t)}function x6(e,t){return elN(e.a,t.a)}function x9(e,t){return ME(e.a,t.a)}function x8(e,t){return YM(e.a,t.a)}function x7(e,t){return e.indexOf(t)}function Te(e,t){return e==t?0:e?1:-1}function Tt(e){return e<10?"0"+e:""+e}function Tn(e){return Y9(e),new xq(e)}function Tr(e){return Mk(e.l,e.m,e.h)}function Ti(e){return zy((BJ(e),e))}function Ta(e){return zy((BJ(e),e))}function To(e,t){return ME(e.g,t.g)}function Ts(e){return typeof e===eUa}function Tu(e){return e==e8f||e==e8p}function Tc(e){return e==e8f||e==e8d}function Tl(e){return QI(e.b.b,e,0)}function Tf(e){this.a=kZ(),this.b=e}function Td(e){this.a=kZ(),this.b=e}function Th(e,t){return P_(e.a,t),t}function Tp(e,t){return P_(e.c,t),e}function Tb(e,t){return eat(e.a,t),e}function Tm(e,t){return Dj(),t.a+=e}function Tg(e,t){return Dj(),t.a+=e}function Tv(e,t){return Dj(),t.c+=e}function Ty(e,t){Qe(e,0,e.length,t)}function Tw(){fJ.call(this,new qh)}function T_(){jp.call(this,0,0,0,0)}function TE(){Hr.call(this,0,0,0,0)}function TS(e){this.a=e.a,this.b=e.b}function Tk(e){return e==tpm||e==tpg}function Tx(e){return e==tpy||e==tpb}function TT(e){return e==tss||e==tso}function TM(e){return e!=tbc&&e!=tbl}function TO(e){return e.Lg()&&e.Mg()}function TA(e){return UB(Pp(e,118))}function TL(e){return eat(new K2,e)}function TC(e,t){return new eaN(t,e)}function TI(e,t){return new eaN(t,e)}function TD(e,t,n){ent(e,t),enn(e,n)}function TN(e,t,n){ena(e,t),eni(e,n)}function TP(e,t,n){eno(e,t),ens(e,n)}function TR(e,t,n){enr(e,t),enc(e,n)}function Tj(e,t,n){enu(e,t),enl(e,n)}function TF(e,t){euc(e,t),enp(e,e.D)}function TY(e){k7.call(this,e,!0)}function TB(e,t,n){L3.call(this,e,t,n)}function TU(e){eLQ(),ead.call(this,e)}function TH(){Ef.call(this,"Head",1)}function T$(){Ef.call(this,"Tail",3)}function Tz(e){e.c=Je(e1R,eUp,1,0,5,1)}function TG(e){e.a=Je(e1R,eUp,1,8,5,1)}function TW(e){ety(e.xf(),new dh(e))}function TK(e){return null!=e?esj(e):0}function TV(e,t){return etg(t,zY(e))}function Tq(e,t){return etg(t,zY(e))}function TZ(e,t){return e[e.length]=t}function TX(e,t){return e[e.length]=t}function TJ(e){return Ph(e.b.Kc(),e.a)}function TQ(e,t){return erb(Bi(e.d),t)}function T1(e,t){return erb(Bi(e.g),t)}function T0(e,t){return erb(Bi(e.j),t)}function T2(e,t){Cm.call(this,e.b,t)}function T3(e){jp.call(this,e,e,e,e)}function T4(e){return e.b&&ePE(e),e.a}function T5(e){return e.b&&ePE(e),e.c}function T6(e,t){!e2M&&(e.b=t)}function T9(e,t,n){return Bc(e,t,n),n}function T8(e,t,n){Bc(e.c[t.g],t.g,n)}function T7(e,t,n){Pp(e.c,69).Xh(t,n)}function Me(e,t,n){TP(n,n.i+e,n.j+t)}function Mt(e,t){JL(qt(e.a),Gj(t))}function Mn(e,t){JL(QX(e.a),GF(t))}function Mr(e){eBG(),pJ.call(this,e)}function Mi(e){return null==e?0:esj(e)}function Ma(){Ma=A,tuT=new efY(e59)}function Mo(){Mo=A,new Ms,new p0}function Ms(){new p2,new p2,new p2}function Mu(){Mu=A,bR(),e0S=new p2}function Mc(){Mc=A,eB4.Math.log(2)}function Ml(){Ml=A,tgZ=(_Z(),tmE)}function Mf(){throw p7(new gW(e1O))}function Md(){throw p7(new gW(e1O))}function Mh(){throw p7(new gW(e1A))}function Mp(){throw p7(new gW(e1A))}function Mb(e){this.a=e,PS.call(this,e)}function Mm(e){this.a=e,wU.call(this,e)}function Mg(e){this.a=e,wU.call(this,e)}function Mv(e,t){jM(e.c,e.c.length,t)}function My(e){return e.at?1:0}function MS(e,t){return ecd(e,t)>0?e:t}function Mk(e,t,n){return{l:e,m:t,h:n}}function Mx(e,t){null!=e.a&&kV(t,e.a)}function MT(e){e.a=new C,e.c=new C}function MM(e){this.b=e,this.a=new p0}function MO(e){this.b=new e1,this.a=e}function MA(e){CW.call(this),this.a=e}function ML(){Ef.call(this,"Range",2)}function MC(){evR(),this.a=new eAs(e4k)}function MI(e,t){Y9(t),Uz(e).Jc(new d)}function MD(e,t){return GE(),t.n.b+=e}function MN(e,t,n){return Um(e.g,n,t)}function MP(e,t,n){return Um(e.k,n,t)}function MR(e,t){return Um(e.a,t.a,t)}function Mj(e,t,n){return eho(t,n,e.c)}function MF(e){return new kl(e.c,e.d)}function MY(e){return new kl(e.c,e.d)}function MB(e){return new kl(e.a,e.b)}function MU(e,t){return ej8(e.a,t,null)}function MH(e){Gs(e,null),Go(e,null)}function M$(e){GA(e,null),GL(e,null)}function Mz(){AA.call(this,null,null)}function MG(){AL.call(this,null,null)}function MW(e){this.a=e,p2.call(this)}function MK(e){this.b=(Hj(),new f$(e))}function MV(e){e.j=Je(e18,eUP,310,0,0,1)}function Mq(e,t,n){e.c.Vc(t,Pp(n,133))}function MZ(e,t,n){e.c.ji(t,Pp(n,133))}function MX(e,t){eRT(e),e.Gc(Pp(t,15))}function MJ(e,t){return eR4(e.c,e.b,t)}function MQ(e,t){return new O6(e.Kc(),t)}function M1(e,t){return -1!=eoD(e.Kc(),t)}function M0(e,t){return null!=e.a.Bc(t)}function M2(e){return e.Ob()?e.Pb():null}function M3(e){return ehv(e,0,e.length)}function M4(e,t){return null!=e&&ebs(e,t)}function M5(e,t){e.q.setHours(t),eNq(e,t)}function M6(e,t){e.c&&(Re(t),zd(t))}function M9(e,t,n){Pp(e.Kb(n),164).Nb(t)}function M8(e,t,n){return ejq(e,t,n),n}function M7(e,t,n){e.a=1502^t,e.b=n^e$d}function Oe(e,t,n){return e.a[t.g][n.g]}function Ot(e,t){return e.a[t.c.p][t.p]}function On(e,t){return e.e[t.c.p][t.p]}function Or(e,t){return e.c[t.c.p][t.p]}function Oi(e,t){return e.j[t.p]=eOo(t)}function Oa(e,t){return ZZ(e.f,t.tg())}function Oo(e,t){return ZZ(e.b,t.tg())}function Os(e,t){return e.a0?t*t/e:t*t*100}function Li(e,t){return e>0?t/(e*e):100*t}function La(e,t,n){return P_(t,ef5(e,n))}function Lo(e,t,n){J1(),e.Xe(t)&&n.td(e)}function Ls(e,t,n){var r;(r=e.Zc(t)).Rb(n)}function Lu(e,t,n){return e.a+=t,e.b+=n,e}function Lc(e,t,n){return e.a*=t,e.b*=n,e}function Ll(e,t,n){return e.a-=t,e.b-=n,e}function Lf(e,t){return e.a=t.a,e.b=t.b,e}function Ld(e){return e.a=-e.a,e.b=-e.b,e}function Lh(e){this.c=e,this.a=1,this.b=1}function Lp(e){this.c=e,eno(e,0),ens(e,0)}function Lb(e){_n.call(this),enD(this,e)}function Lm(e){eBp(),p8(this),this.mf(e)}function Lg(e,t){_0(),AA.call(this,e,t)}function Lv(e,t){_2(),AL.call(this,e,t)}function Ly(e,t){_2(),AL.call(this,e,t)}function Lw(e,t){_2(),Lv.call(this,e,t)}function L_(e,t,n){JY.call(this,e,t,n,2)}function LE(e,t){Ml(),jd.call(this,e,t)}function LS(e,t){Ml(),LE.call(this,e,t)}function Lk(e,t){Ml(),LE.call(this,e,t)}function Lx(e,t){Ml(),Lk.call(this,e,t)}function LT(e,t){Ml(),jd.call(this,e,t)}function LM(e,t){Ml(),LT.call(this,e,t)}function LO(e,t){Ml(),jd.call(this,e,t)}function LA(e,t){return e.c.Fc(Pp(t,133))}function LL(e,t,n){return eP9(Qq(e,t),n)}function LC(e,t,n){return t.Qk(e.e,e.c,n)}function LI(e,t,n){return t.Rk(e.e,e.c,n)}function LD(e,t){return ecv(e.e,Pp(t,49))}function LN(e,t,n){elm(QX(e.a),t,GF(n))}function LP(e,t,n){elm(qt(e.a),t,Gj(n))}function LR(e,t){t.$modCount=e.$modCount}function Lj(){Lj=A,tcV=new pO("root")}function LF(){LF=A,tmB=new mx,new mT}function LY(){this.a=new zu,this.b=new zu}function LB(){en0.call(this),this.Bb|=eH3}function LU(){wC.call(this,"GROW_TREE",0)}function LH(e){return null==e?null:eYt(e)}function L$(e){return null==e?null:eEO(e)}function Lz(e){return null==e?null:efF(e)}function LG(e){return null==e?null:efF(e)}function LW(e){null==e.o&&eMb(e)}function LK(e){return Rb(null==e||xl(e)),e}function LV(e){return Rb(null==e||xf(e)),e}function Lq(e){return Rb(null==e||xd(e)),e}function LZ(e){this.q=new eB4.Date(Kj(e))}function LX(e,t){this.c=e,wI.call(this,e,t)}function LJ(e,t){this.a=e,LX.call(this,e,t)}function LQ(e,t){this.d=e,f_(this),this.b=t}function L1(e,t){Jo.call(this,e),this.a=t}function L0(e,t){Jo.call(this,e),this.a=t}function L2(e){edL.call(this,0,0),this.f=e}function L3(e,t,n){XS.call(this,e,t,n,null)}function L4(e,t,n){XS.call(this,e,t,n,null)}function L5(e,t,n){return 0>=e.ue(t,n)?n:t}function L6(e,t,n){return 0>=e.ue(t,n)?t:n}function L9(e,t){return Pp(eef(e.b,t),149)}function L8(e,t){return Pp(eef(e.c,t),229)}function L7(e){return Pp(RJ(e.a,e.b),287)}function Ce(e){return new kl(e.c,e.d+e.a)}function Ct(e){return GE(),TT(Pp(e,197))}function Cn(){Cn=A,e4i=el9((ed6(),tbq))}function Cr(e,t){t.a?eLc(e,t):Ai(e.a,t.b)}function Ci(e,t){!e2M&&P_(e.a,t)}function Ca(e,t){return _k(),eag(t.d.i,e)}function Co(e,t){return erJ(),new eIu(t,e)}function Cs(e,t){return $C(t,ezr),e.f=t,e}function Cu(e,t,n){return n=eDg(e,t,3,n)}function Cc(e,t,n){return n=eDg(e,t,6,n)}function Cl(e,t,n){return n=eDg(e,t,9,n)}function Cf(e,t,n){++e.j,e.Ki(),X8(e,t,n)}function Cd(e,t,n){++e.j,e.Hi(t,e.oi(t,n))}function Ch(e,t,n){var r;(r=e.Zc(t)).Rb(n)}function Cp(e,t,n){return ePT(e.c,e.b,t,n)}function Cb(e,t){return(t&eUu)%e.d.length}function Cm(e,t){pO.call(this,e),this.a=t}function Cg(e,t){pH.call(this,e),this.a=t}function Cv(e,t){pH.call(this,e),this.a=t}function Cy(e,t){this.c=e,eta.call(this,t)}function Cw(e,t){this.a=e,pU.call(this,t)}function C_(e,t){this.a=e,pU.call(this,t)}function CE(e){this.a=(enG(e,eU3),new XM(e))}function CS(e){this.a=(enG(e,eU3),new XM(e))}function Ck(e){return e.a||(e.a=new h),e.a}function Cx(e){return e>8?0:e+1}function CT(e,t){return OQ(),e==t?0:e?1:-1}function CM(e,t,n){return jT(e,Pp(t,22),n)}function CO(e,t,n){return e.apply(t,n)}function CA(e,t,n){return e.a+=ehv(t,0,n),e}function CL(e,t){var n;return n=e.e,e.e=t,n}function CC(e,t){var n;(n=e[e$c]).call(e,t)}function CI(e,t){var n;(n=e[e$c]).call(e,t)}function CD(e,t){e.a.Vc(e.b,t),++e.b,e.c=-1}function CN(e){Yy(e.e),e.d.b=e.d,e.d.a=e.d}function CP(e){e.b?CP(e.b):e.f.c.zc(e.e,e.d)}function CR(e,t,n){_w(),lP(e,t.Ce(e.a,n))}function Cj(e,t){return y_(ehn(e.a,t,!0))}function CF(e,t){return y_(ehr(e.a,t,!0))}function CY(e,t){return vk(Array(t),e)}function CB(e){return String.fromCharCode(e)}function CU(e){return null==e?null:e.message}function CH(){this.a=new p0,this.b=new p0}function C$(){this.a=new tt,this.b=new bP}function Cz(){this.b=new yb,this.c=new p0}function CG(){this.d=new yb,this.e=new yb}function CW(){this.n=new yb,this.o=new yb}function CK(){this.n=new mp,this.i=new TE}function CV(){this.a=new cg,this.b=new i_}function Cq(){this.a=new p0,this.d=new p0}function CZ(){this.b=new bV,this.a=new bV}function CX(){this.b=new p2,this.a=new p2}function CJ(){this.b=new y2,this.a=new ay}function CQ(){CK.call(this),this.a=new yb}function C1(e){eaD.call(this,e,(Qu(),e2D))}function C0(e,t,n,r){jp.call(this,e,t,n,r)}function C2(e,t,n){null!=n&&ern(t,emI(e,n))}function C3(e,t,n){null!=n&&err(t,emI(e,n))}function C4(e,t,n){return n=eDg(e,t,11,n)}function C5(e,t){return e.a+=t.a,e.b+=t.b,e}function C6(e,t){return e.a-=t.a,e.b-=t.b,e}function C9(e,t){return e.n.a=(BJ(t),t+10)}function C8(e,t){return e.n.a=(BJ(t),t+10)}function C7(e,t){return t==e||ev9(eOg(t),e)}function Ie(e,t){return null==Um(e.a,t,"")}function It(e,t){return _k(),!eag(t.d.i,e)}function In(e,t){Tk(e.f)?eMi(e,t):ewz(e,t)}function Ir(e,t){var n;return t.Hh(e.a)}function Ii(e,t){gE.call(this,eJT+e+eXH+t)}function Ia(e,t,n,r){FQ.call(this,e,t,n,r)}function Io(e,t,n,r){FQ.call(this,e,t,n,r)}function Is(e,t,n,r){Io.call(this,e,t,n,r)}function Iu(e,t,n,r){F1.call(this,e,t,n,r)}function Ic(e,t,n,r){F1.call(this,e,t,n,r)}function Il(e,t,n,r){F1.call(this,e,t,n,r)}function If(e,t,n,r){Ic.call(this,e,t,n,r)}function Id(e,t,n,r){Ic.call(this,e,t,n,r)}function Ih(e,t,n,r){Il.call(this,e,t,n,r)}function Ip(e,t,n,r){Id.call(this,e,t,n,r)}function Ib(e,t,n,r){FZ.call(this,e,t,n,r)}function Im(e,t,n){this.a=e,AI.call(this,t,n)}function Ig(e,t,n){this.c=t,this.b=n,this.a=e}function Iv(e,t,n){return e.d=Pp(t.Kb(n),164)}function Iy(e,t){return e.Aj().Nh().Kh(e,t)}function Iw(e,t){return e.Aj().Nh().Ih(e,t)}function I_(e,t){return BJ(e),xc(e)===xc(t)}function IE(e,t){return BJ(e),xc(e)===xc(t)}function IS(e,t){return y_(ehn(e.a,t,!1))}function Ik(e,t){return y_(ehr(e.a,t,!1))}function Ix(e,t){return e.b.sd(new EM(e,t))}function IT(e,t){return e.b.sd(new EO(e,t))}function IM(e,t){return e.b.sd(new EA(e,t))}function IO(e,t,n){return e.lastIndexOf(t,n)}function IA(e,t,n){return elN(e[t.b],e[n.b])}function IL(e,t){return eo3(t,(eBy(),tat),e)}function IC(e,t){return ME(t.a.d.p,e.a.d.p)}function II(e,t){return ME(e.a.d.p,t.a.d.p)}function ID(e,t){return elN(e.c-e.s,t.c-t.s)}function IN(e){return e.c?QI(e.c.a,e,0):-1}function IP(e){return e<100?null:new yf(e)}function IR(e){return e==tba||e==tbs||e==tbo}function Ij(e,t){return M4(t,15)&&eCc(e.c,t)}function IF(e,t){!e2M&&t&&(e.d=t)}function IY(e,t){var n;return!!esq(e,n=t)}function IB(e,t){this.c=e,YC.call(this,e,t)}function IU(e){this.c=e,xj.call(this,eUY,0)}function IH(e,t){Px.call(this,e,e.length,t)}function I$(e,t,n){return Pp(e.c,69).lk(t,n)}function Iz(e,t,n){return Pp(e.c,69).mk(t,n)}function IG(e,t,n){return LC(e,Pp(t,332),n)}function IW(e,t,n){return LI(e,Pp(t,332),n)}function IK(e,t,n){return ey1(e,Pp(t,332),n)}function IV(e,t,n){return e_t(e,Pp(t,332),n)}function Iq(e,t){return null==t?null:ecA(e.b,t)}function IZ(e){return xf(e)?(BJ(e),e):e.ke()}function IX(e){return!isNaN(e)&&!isFinite(e)}function IJ(e){Dn(),this.a=(Hj(),new vd(e))}function IQ(e){Pj(),this.d=e,this.a=new p1}function I1(e,t,n){this.a=e,this.b=t,this.c=n}function I0(e,t,n){this.a=e,this.b=t,this.c=n}function I2(e,t,n){this.d=e,this.b=n,this.a=t}function I3(e){MT(this),HC(this),er7(this,e)}function I4(e){Tz(this),PO(this.c,0,e.Pc())}function I5(e){BH(e.a),Jl(e.c,e.b),e.b=null}function I6(e){this.a=e,wK(),eap(Date.now())}function I9(){I9=A,e2G=new r,e2W=new r}function I8(){I8=A,e2h=new I,e2p=new D}function I7(){I7=A,tmY=Je(e1R,eUp,1,0,5,1)}function De(){De=A,tgH=Je(e1R,eUp,1,0,5,1)}function Dt(){Dt=A,tg$=Je(e1R,eUp,1,0,5,1)}function Dn(){Dn=A,new bb((Hj(),Hj(),e2r))}function Dr(e){return Qu(),eeM((Qc(),e2j),e)}function Di(e){return eum(),eeM((XC(),e2$),e)}function Da(e){return epC(),eeM((qk(),e3d),e)}function Do(e){return eeR(),eeM((qx(),e3b),e)}function Ds(e){return eCp(),eeM((eaF(),e3I),e)}function Du(e){return etx(),eeM((XO(),e3R),e)}function Dc(e){return Qs(),eeM((XA(),e3B),e)}function Dl(e){return QQ(),eeM((XL(),e3z),e)}function Df(e){return eBW(),eeM((xz(),e4r),e)}function Dd(e){return eaY(),eeM((Qf(),e4l),e)}function Dh(e){return ep7(),eeM((Qd(),e4b),e)}function Dp(e){return ebe(),eeM((Qh(),e6z),e)}function Db(e){return _y(),eeM((Vt(),e6W),e)}function Dm(e){return eej(),eeM((qT(),e9h),e)}function Dg(e){return QJ(),eeM((XI(),e96),e)}function Dv(e){return e_x(),eeM((eeW(),e8a),e)}function Dy(e){return eok(),eeM((Ql(),e8b),e)}function Dw(e){return ec4(),eeM((XD(),e8T),e)}function D_(e,t){if(!e)throw p7(new gL(t))}function DE(e){return eEn(),eeM((etQ(),e8R),e)}function DS(e){jp.call(this,e.d,e.c,e.a,e.b)}function Dk(e){jp.call(this,e.d,e.c,e.a,e.b)}function Dx(e,t,n){this.b=e,this.c=t,this.a=n}function DT(e,t,n){this.b=e,this.a=t,this.c=n}function DM(e,t,n){this.a=e,this.b=t,this.c=n}function DO(e,t,n){this.a=e,this.b=t,this.c=n}function DA(e,t,n){this.a=e,this.b=t,this.c=n}function DL(e,t,n){this.a=e,this.b=t,this.c=n}function DC(e,t,n){this.b=e,this.a=t,this.c=n}function DI(e,t,n){this.e=t,this.b=e,this.d=n}function DD(e,t,n){return _w(),e.a.Od(t,n),t}function DN(e){var t;return(t=new ew).e=e,t}function DP(e){var t;return(t=new me).b=e,t}function DR(){DR=A,e8V=new nd,e8q=new nh}function Dj(){Dj=A,e75=new rB,e76=new rU}function DF(e){return eoE(),eeM((Qb(),e7X),e)}function DY(e){return eoS(),eeM((Qg(),tet),e)}function DB(e){return eLz(),eeM((ei3(),tek),e)}function DU(e){return eSg(),eeM((et2(),teI),e)}function DH(e){return Jp(),eeM((qI(),teP),e)}function D$(e){return en7(),eeM((XN(),teY),e)}function Dz(e){return ey4(),eeM((eeU(),tes),e)}function DG(e){return erX(),eeM((Xj(),teb),e)}function DW(e){return enB(),eeM((XP(),te$),e)}function DK(e){return eb6(),eeM((eeY(),teq),e)}function DV(e){return eeF(),eeM((qO(),teJ),e)}function Dq(e){return eoG(),eeM((XR(),te2),e)}function DZ(e){return eEf(),eeM((et6(),te7),e)}function DX(e){return Qx(),eeM((qA(),ttn),e)}function DJ(e){return eyd(),eeM((et4(),ttc),e)}function DQ(e){return e_3(),eeM((et3(),ttm),e)}function D1(e){return eLR(),eeM((eoH(),ttM),e)}function D0(e){return eaU(),eeM((XY(),ttC),e)}function D2(e){return Q1(),eeM((XF(),ttP),e)}function D3(e){return K6(),eeM((qD(),ttF),e)}function D4(e){return ef_(),eeM((eeH(),tnF),e)}function D5(e){return ewY(),eeM((et5(),tst),e)}function D6(e){return euJ(),eeM((XB(),tsa),e)}function D9(e){return ebk(),eeM((Qv(),tsl),e)}function D8(e){return enY(),eeM((X$(),tsR),e)}function D7(e){return eOJ(),eeM((ei2(),tsx),e)}function Ne(e){return esn(),eeM((XH(),tsA),e)}function Nt(e){return Q0(),eeM((qC(),tsI),e)}function Nn(e){return ei0(),eeM((XU(),tsB),e)}function Nr(e){return ebG(),eeM((eeB(),tsm),e)}function Ni(e){return Xo(),eeM((qL(),ts$),e)}function Na(e){return euy(),eeM((XG(),tsK),e)}function No(e){return eiO(),eeM((XW(),tsX),e)}function Ns(e){return eox(),eeM((Xz(),ts0),e)}function Nu(e){return enU(),eeM((XK(),tuo),e)}function Nc(e){return qG(),eeM((qP(),tud),e)}function Nl(e){return zs(),eeM((qR(),tu_),e)}function Nf(e){return zQ(),eeM((qj(),tuk),e)}function Nd(e){return Xa(),eeM((qN(),tu$),e)}function Nh(e){return zo(),eeM((qF(),tuX),e)}function Np(e){return egR(),eeM((Qp(),tu2),e)}function Nb(e){return eS_(),eeM((et9(),tu7),e)}function Nm(e){return z1(),eeM((qU(),tcB),e)}function Ng(e){return erZ(),eeM((qB(),tcX),e)}function Nv(e){return Kn(),eeM((qY(),tc$),e)}function Ny(e){return efx(),eeM((XV(),tc0),e)}function Nw(e){return J0(),eeM((qH(),tc4),e)}function N_(e){return eub(),eeM((Xq(),tc8),e)}function NE(e){return emC(),eeM((Qm(),tlA),e)}function NS(e){return ei1(),eeM((XX(),tlD),e)}function Nk(e){return efS(),eeM((XZ(),tlj),e)}function Nx(e){return eOB(),eeM((eeG(),tfl),e)}function NT(e){return efk(),eeM((XJ(),tfp),e)}function NM(e){return _D(),eeM((K7(),tfm),e)}function NO(e){return _N(),eeM((K8(),tfv),e)}function NA(e){return Xs(),eeM((qz(),tf_),e)}function NL(e){return eEM(),eeM((ee$(),tfM),e)}function NC(e){return _P(),eeM((Ve(),tf7),e)}function NI(e){return eoT(),eeM((q$(),tdn),e)}function ND(e){return epx(),eeM((eez(),tdb),e)}function NN(e){return eSd(),eeM((ei4(),tdk),e)}function NP(e){return ebx(),eeM((et0(),tdD),e)}function NR(e){return eyY(),eeM((et1(),tdJ),e)}function Nj(e){return eB$(),eeM((xG(),e7$),e)}function NF(e){return erq(),eeM((qM(),e8K),e)}function NY(e){return ec3(),eeM((eeK(),tpw),e)}function NB(e){return etT(),eeM((X1(),tpk),e)}function NU(e){return efE(),eeM((Q_(),tpA),e)}function NH(e){return e_a(),eeM((et7(),tpR),e)}function N$(e){return eck(),eeM((XQ(),tpK),e)}function Nz(e){return egF(),eeM((Qw(),tpJ),e)}function NG(e){return eT7(),eeM((eaj(),tp8),e)}function NW(e){return epT(),eeM((eeV(),tbi),e)}function NK(e){return ewf(),eeM((etC(),tbf),e)}function NV(e){return ekU(),eeM((et8(),tbv),e)}function Nq(e){return ed6(),eeM((QS(),tbZ),e)}function NZ(e){return eI3(),eeM((eo$(),tb6),e)}function NX(e){return eYu(),eeM((eeq(),tbB),e)}function NJ(e){return edM(),eeM((QE(),tmt),e)}function NQ(e){return eup(),eeM((Qy(),tmo),e)}function N1(e){return eTy(),eeM((ei5(),tmP),e)}function N0(e,t){return BJ(e),e+(BJ(t),t)}function N2(e,t){return wK(),JL(H9(e.a),t)}function N3(e,t){return wK(),JL(H9(e.a),t)}function N4(e,t){this.c=e,this.a=t,this.b=t-e}function N5(e,t,n){this.a=e,this.b=t,this.c=n}function N6(e,t,n){this.a=e,this.b=t,this.c=n}function N9(e,t,n){this.a=e,this.b=t,this.c=n}function N8(e,t,n){this.a=e,this.b=t,this.c=n}function N7(e,t,n){this.a=e,this.b=t,this.c=n}function Pe(e,t,n){this.e=e,this.a=t,this.c=n}function Pt(e,t,n){Ml(),zl.call(this,e,t,n)}function Pn(e,t,n){Ml(),BP.call(this,e,t,n)}function Pr(e,t,n){Ml(),BP.call(this,e,t,n)}function Pi(e,t,n){Ml(),BP.call(this,e,t,n)}function Pa(e,t,n){Ml(),Pn.call(this,e,t,n)}function Po(e,t,n){Ml(),Pn.call(this,e,t,n)}function Ps(e,t,n){Ml(),Po.call(this,e,t,n)}function Pu(e,t,n){Ml(),Pr.call(this,e,t,n)}function Pc(e,t,n){Ml(),Pi.call(this,e,t,n)}function Pl(e,t){return Y9(e),Y9(t),new wx(e,t)}function Pf(e,t){return Y9(e),Y9(t),new Rn(e,t)}function Pd(e,t){return Y9(e),Y9(t),new Rr(e,t)}function Ph(e,t){return Y9(e),Y9(t),new wM(e,t)}function Pp(e,t){return Rb(null==e||ebs(e,t)),e}function Pb(e){var t;return t=new p0,eel(t,e),t}function Pm(e){var t;return t=new bV,eel(t,e),t}function Pg(e){var t;return ein(t=new b2,e),t}function Pv(e){var t;return ein(t=new _n,e),t}function Py(e){return e.e||(e.e=new p0),e.e}function Pw(e){return e.c||(e.c=new sk),e.c}function P_(e,t){return e.c[e.c.length]=t,!0}function PE(e,t){this.c=e,this.b=t,this.a=!1}function PS(e){this.d=e,f_(this),this.b=Ft(e.d)}function Pk(){this.a=";,;",this.b="",this.c=""}function Px(e,t,n){F$.call(this,t,n),this.a=e}function PT(e,t,n){this.b=e,xP.call(this,t,n)}function PM(e,t,n){this.c=e,EE.call(this,t,n)}function PO(e,t,n){ekp(n,0,e,t,n.length,!1)}function PA(e,t,n,r,i){e.b=t,e.c=n,e.d=r,e.a=i}function PL(e,t){t&&(e.b=t,e.a=(B1(t),t.a))}function PC(e,t,n,r,i){e.d=t,e.c=n,e.a=r,e.b=i}function PI(e){var t,n;t=e.b,n=e.c,e.b=n,e.c=t}function PD(e){var t,n;n=e.d,t=e.a,e.d=t,e.a=n}function PN(e){return eal(YE(Ts(e)?eaL(e):e))}function PP(e,t){return ME(Rx(e.d),Rx(t.d))}function PR(e,t){return t==(eYu(),tbY)?e.c:e.d}function Pj(){Pj=A,tuu=(eYu(),tbY),tuc=tby}function PF(){this.b=gP(LV(epB((eCk(),e9N))))}function PY(e){return _w(),Je(e1R,eUp,1,e,5,1)}function PB(e){return new kl(e.c+e.b,e.d+e.a)}function PU(e,t){return _C(),ME(e.d.p,t.d.p)}function PH(e){return A6(0!=e.b),etw(e,e.a.a)}function P$(e){return A6(0!=e.b),etw(e,e.c.b)}function Pz(e,t){if(!e)throw p7(new gS(t))}function PG(e,t){if(!e)throw p7(new gL(t))}function PW(e,t,n){Se.call(this,e,t),this.b=n}function PK(e,t,n){k8.call(this,e,t),this.c=n}function PV(e,t,n){etn.call(this,t,n),this.d=e}function Pq(e){Dt(),sr.call(this),this.th(e)}function PZ(e,t,n){this.a=e,xQ.call(this,t,n)}function PX(e,t,n){this.a=e,xQ.call(this,t,n)}function PJ(e,t,n){k8.call(this,e,t),this.c=n}function PQ(){ZE(),BY.call(this,(_Q(),tgp))}function P1(e){return null!=e&&!efz(e,tm1,tm0)}function P0(e,t){return(elt(e)<<4|elt(t))&eHd}function P2(e,t){return U_(),eb2(e,t),new Uf(e,t)}function P3(e,t){var n;e.n&&(n=t,P_(e.f,n))}function P4(e,t,n){var r;ee3(e,t,r=new B_(n))}function P5(e,t){var n;return n=e.c,ers(e,t),n}function P6(e,t){return t<0?e.g=-1:e.g=t,e}function P9(e,t){return etN(e),e.a*=t,e.b*=t,e}function P8(e,t,n,r,i){e.c=t,e.d=n,e.b=r,e.a=i}function P7(e,t){return qQ(e,t,e.c.b,e.c),!0}function Re(e){e.a.b=e.b,e.b.a=e.a,e.a=e.b=null}function Rt(e){this.b=e,this.a=Fc(this.b.a).Ed()}function Rn(e,t){this.b=e,this.a=t,ci.call(this)}function Rr(e,t){this.a=e,this.b=t,ci.call(this)}function Ri(e,t){F$.call(this,t,1040),this.a=e}function Ra(e){return 0==e||isNaN(e)?e:e<0?-1:1}function Ro(e){return HR(),e_I(e)==z$(e_P(e))}function Rs(e){return HR(),e_P(e)==z$(e_I(e))}function Ru(e,t){return eyE(e,new Se(t.a,t.b))}function Rc(e){return!q8(e)&&e.c.i.c==e.d.i.c}function Rl(e){var t;return t=e.n,e.a.b+t.d+t.a}function Rf(e){var t;return t=e.n,e.e.b+t.d+t.a}function Rd(e){var t;return t=e.n,e.e.a+t.b+t.c}function Rh(e){return eBG(),++tyv,new jb(0,e)}function Rp(e){return e.a?e.a:Hh(e)}function Rb(e){if(!e)throw p7(new gA(null))}function Rm(){Rm=A,tvm=(Hj(),new fB(eQU))}function Rg(){Rg=A,new ebw((m2(),e0d),(m3(),e0f))}function Rv(){Rv=A,e0B=Je(e15,eUP,19,256,0,1)}function Ry(e,t,n,r){ef3.call(this,e,t,n,r,0,0)}function Rw(e,t,n){return Um(e.b,Pp(n.b,17),t)}function R_(e,t,n){return Um(e.b,Pp(n.b,17),t)}function RE(e,t){return P_(e,new kl(t.a,t.b))}function RS(e,t){return e.c=t)throw p7(new bj)}function FR(e,t,n){return Bc(t,0,R5(t[0],n[0])),t}function Fj(e,t,n){t.Ye(n,gP(LV(Bp(e.b,n)))*e.a)}function FF(e,t,n){return eLG(),eiq(e,t)&&eiq(e,n)}function FY(e){return ekU(),!e.Hc(tbp)&&!e.Hc(tbm)}function FB(e){return new kl(e.c+e.b/2,e.d+e.a/2)}function FU(e,t){return t.kh()?ecv(e.b,Pp(t,49)):t}function FH(e,t){this.e=e,this.d=(64&t)!=0?t|eUR:t}function F$(e,t){this.c=0,this.d=e,this.b=64|t|eUR}function Fz(e){this.b=new XM(11),this.a=(HF(),e)}function FG(e){this.b=null,this.a=(HF(),e||e2s)}function FW(e){this.a=ebb(e.a),this.b=new I4(e.b)}function FK(e){this.b=e,AF.call(this,e),Op(this)}function FV(e){this.b=e,AB.call(this,e),Ob(this)}function Fq(e,t,n){this.a=e,Ia.call(this,t,n,5,6)}function FZ(e,t,n,r){this.b=e,O_.call(this,t,n,r)}function FX(e,t,n,r,i){JB.call(this,e,t,n,r,i,-1)}function FJ(e,t,n,r,i){JU.call(this,e,t,n,r,i,-1)}function FQ(e,t,n,r){O_.call(this,e,t,n),this.b=r}function F1(e,t,n,r){PK.call(this,e,t,n),this.b=r}function F0(e){k7.call(this,e,!1),this.a=!1}function F2(e,t){this.b=e,lm.call(this,e.b),this.a=t}function F3(e,t){Bx(),wj.call(this,e,ecT(new g$(t)))}function F4(e,t){return eBG(),++tyv,new BR(e,t,0)}function F5(e,t){return eBG(),++tyv,new BR(6,e,t)}function F6(e,t){return IE(e.substr(0,t.length),t)}function F9(e,t){return xd(t)?$r(e,t):!!$I(e.f,t)}function F8(e,t){for(BJ(t);e.Ob();)t.td(e.Pb())}function F7(e,t,n){eLQ(),this.e=e,this.d=t,this.a=n}function Ye(e,t,n,r){var i;(i=e.i).i=t,i.a=n,i.b=r}function Yt(e){var t;for(t=e;t.f;)t=t.f;return t}function Yn(e){var t;return A6(null!=(t=eso(e))),t}function Yr(e){var t;return A6(null!=(t=elT(e))),t}function Yi(e,t){var n;return ZQ(t,n=e.a.gc()),n-t}function Ya(e,t){var n;for(n=0;n0?eB4.Math.log(e/t):-100}function YM(e,t){return 0>ecd(e,t)?-1:ecd(e,t)>0?1:0}function YO(e,t,n){return ePQ(e,Pp(t,46),Pp(n,167))}function YA(e,t){return Pp(Ff(Fc(e.a)).Xb(t),42).cd()}function YL(e,t){return eto(t,e.length),new Ri(e,t)}function YC(e,t){this.d=e,Ow.call(this,e),this.e=t}function YI(e){this.d=(BJ(e),e),this.a=0,this.c=eUY}function YD(e,t){pJ.call(this,1),this.a=e,this.b=t}function YN(e,t){return e.c?YN(e.c,t):P_(e.b,t),e}function YP(e,t,n){var r;return r=eep(e,t),V7(e,t,n),r}function YR(e,t){var n;return QO(n=e.slice(0,t),e)}function Yj(e,t,n){var r;for(r=0;r=e.g}function BL(e,t,n){var r;return r=er$(e,t,n),eCK(e,r)}function BC(e,t){var n;n=e.a.length,eep(e,n),V7(e,n,t)}function BI(e,t){var n;(n=console[e]).call(console,t)}function BD(e,t){var n;++e.j,n=e.Vi(),e.Ii(e.oi(n,t))}function BN(e,t,n){Pp(t.b,65),ety(t.a,new N6(e,n,t))}function BP(e,t,n){p$.call(this,t),this.a=e,this.b=n}function BR(e,t,n){pJ.call(this,e),this.a=t,this.b=n}function Bj(e,t,n){this.a=e,pH.call(this,t),this.b=n}function BF(e,t,n){this.a=e,K3.call(this,8,t,null,n)}function BY(e){this.a=(BJ(eJ7),eJ7),this.b=e,new mP}function BB(e){this.c=e,this.b=this.c.a,this.a=this.c.e}function BU(e){this.c=e,this.b=e.a.d.a,LR(e.a.e,this)}function BH(e){A4(-1!=e.c),e.d.$c(e.c),e.b=e.c,e.c=-1}function B$(e){return eB4.Math.sqrt(e.a*e.a+e.b*e.b)}function Bz(e,t){return FP(t,e.a.c.length),RJ(e.a,t)}function BG(e,t){return xc(e)===xc(t)||null!=e&&ecX(e,t)}function BW(e){return 0>=e?new _e:erg(e-1)}function BK(e){return!!tyb&&$r(tyb,e)}function BV(e){return e?e.dc():!e.Kc().Ob()}function Bq(e){return!e.a&&e.c?e.c.b:e.a}function BZ(e){return e.a||(e.a=new O_(e6f,e,4)),e.a}function BX(e){return e.d||(e.d=new O_(tgr,e,1)),e.d}function BJ(e){if(null==e)throw p7(new bM);return e}function BQ(e){e.c?e.c.He():(e.d=!0,eAA(e))}function B1(e){e.c?B1(e.c):(el3(e),e.d=!0)}function B0(e){UG(e.a),e.b=Je(e1R,eUp,1,e.b.length,5,1)}function B2(e,t){return ME(t.j.c.length,e.j.c.length)}function B3(e,t){e.c<0||e.b.b=0?e.Bh(n):ekN(e,t)}function B5(e){var t,n;return(t=e.c.i.c)==(n=e.d.i.c)}function B6(e){if(4!=e.p)throw p7(new bT);return e.e}function B9(e){if(3!=e.p)throw p7(new bT);return e.e}function B8(e){if(6!=e.p)throw p7(new bT);return e.f}function B7(e){if(6!=e.p)throw p7(new bT);return e.k}function Ue(e){if(3!=e.p)throw p7(new bT);return e.j}function Ut(e){if(4!=e.p)throw p7(new bT);return e.j}function Un(e){return e.b||(e.b=new pG(new mR)),e.b}function Ur(e){return -2==e.c&&fd(e,e_d(e.g,e.b)),e.c}function Ui(e,t){var n;return(n=Y6("",e)).n=t,n.i=1,n}function Ua(e,t){jB(Pp(t.b,65),e),ety(t.a,new dv(e))}function Uo(e,t){JL((e.a||(e.a=new C_(e,e)),e.a),t)}function Us(e,t){this.b=e,YC.call(this,e,t),Op(this)}function Uu(e,t){this.b=e,IB.call(this,e,t),Ob(this)}function Uc(e,t,n,r){wD.call(this,e,t),this.d=n,this.a=r}function Ul(e,t,n,r){wD.call(this,e,n),this.a=t,this.f=r}function Uf(e,t){MK.call(this,erv(Y9(e),Y9(t))),this.a=t}function Ud(){e_w.call(this,eQB,(yA(),tvE)),ejt(this)}function Uh(){e_w.call(this,eQc,(yO(),tgg)),eP3(this)}function Up(){wC.call(this,"DELAUNAY_TRIANGULATION",0)}function Ub(e){return String.fromCharCode.apply(null,e)}function Um(e,t,n){return xd(t)?Ge(e,t,n):eS9(e.f,t,n)}function Ug(e){return Hj(),e?e.ve():(HF(),HF(),e2c)}function Uv(e,t,n){return eoM(),n.pg(e,Pp(t.cd(),146))}function Uy(e,t){return Rg(),new ebw(new OK(e),new OW(t))}function Uw(e){return enG(e,eU6),ee1(eft(eft(5,e),e/10|0))}function U_(){U_=A,e0p=new gt(eow(vx(e1$,1),eUK,42,0,[]))}function UE(e){return e.d||(e.d=new fF(e.c.Cc())),e.d}function US(e){return e.a||(e.a=new vp(e.c.vc())),e.a}function Uk(e){return e.b||(e.b=new vd(e.c.ec())),e.b}function Ux(e,t){for(;t-- >0;)e=e<<1|(e<0?1:0);return e}function UT(e,t){return xc(e)===xc(t)||null!=e&&ecX(e,t)}function UM(e,t){return OQ(),Pp(t.b,19).ar&&++r,r}function Hl(e){var t,n;return etV(n=t=new p5,e),n}function Hf(e){var t,n;return e_U(n=t=new p5,e),n}function Hd(e,t){var n;return n=Bp(e.f,t),eiX(t,n),null}function Hh(e){var t;return(t=erw(e))?t:null}function Hp(e){return e.b||(e.b=new FQ(e6g,e,12,3)),e.b}function Hb(e){return null!=e&&wZ(tm$,e.toLowerCase())}function Hm(e,t){return elN(jl(e)*jc(e),jl(t)*jc(t))}function Hg(e,t){return elN(jl(e)*jc(e),jl(t)*jc(t))}function Hv(e,t){return elN(e.d.c+e.d.b/2,t.d.c+t.d.b/2)}function Hy(e,t){return elN(e.g.c+e.g.b/2,t.g.c+t.g.b/2)}function Hw(e,t,n){n.a?ens(e,t.b-e.f/2):eno(e,t.a-e.g/2)}function H_(e,t,n,r){this.a=e,this.b=t,this.c=n,this.d=r}function HE(e,t,n,r){this.a=e,this.b=t,this.c=n,this.d=r}function HS(e,t,n,r){this.e=e,this.a=t,this.c=n,this.d=r}function Hk(e,t,n,r){this.a=e,this.c=t,this.d=n,this.b=r}function Hx(e,t,n,r){Ml(),ZU.call(this,t,n,r),this.a=e}function HT(e,t,n,r){Ml(),ZU.call(this,t,n,r),this.a=e}function HM(e,t){this.a=e,LQ.call(this,e,Pp(e.d,15).Zc(t))}function HO(e){this.f=e,this.c=this.f.e,e.f>0&&evH(this)}function HA(e,t,n,r){this.b=e,this.c=r,xj.call(this,t,n)}function HL(e){return A6(e.b=0&&IE(e.substr(n,t.length),t)}function $N(e,t,n,r,i,a,o){return new qu(e.e,t,n,r,i,a,o)}function $P(e,t,n,r,i,a){this.a=e,en1.call(this,t,n,r,i,a)}function $R(e,t,n,r,i,a){this.a=e,en1.call(this,t,n,r,i,a)}function $j(e,t){this.g=e,this.d=eow(vx(e4N,1),eGW,10,0,[t])}function $F(e,t){this.e=e,this.a=e1R,this.b=eCz(t),this.c=t}function $Y(e,t){CK.call(this),etk(this),this.a=e,this.c=t}function $B(e,t,n,r){Bc(e.c[t.g],n.g,r),Bc(e.c[n.g],t.g,r)}function $U(e,t,n,r){Bc(e.c[t.g],t.g,n),Bc(e.b[t.g],t.g,r)}function $H(){return Xo(),eow(vx(e5u,1),eU4,376,0,[tsH,tsU])}function $$(){return Qx(),eow(vx(e40,1),eU4,479,0,[ttt,tte])}function $z(){return eeF(),eow(vx(e4J,1),eU4,419,0,[teZ,teX])}function $G(){return Jp(),eow(vx(e4V,1),eU4,422,0,[teD,teN])}function $W(){return K6(),eow(vx(e49,1),eU4,420,0,[ttR,ttj])}function $K(){return Q0(),eow(vx(e5a,1),eU4,421,0,[tsL,tsC])}function $V(){return qG(),eow(vx(e5v,1),eU4,523,0,[tuf,tul])}function $q(){return Xa(),eow(vx(e5k,1),eU4,520,0,[tuH,tuU])}function $Z(){return zs(),eow(vx(e5E,1),eU4,516,0,[tuw,tuy])}function $X(){return zQ(),eow(vx(e5S,1),eU4,515,0,[tuE,tuS])}function $J(){return zo(),eow(vx(e5x,1),eU4,455,0,[tuq,tuZ])}function $Q(){return Kn(),eow(vx(e5C,1),eU4,425,0,[tcH,tcU])}function $1(){return z1(),eow(vx(e5L,1),eU4,480,0,[tcF,tcY])}function $0(){return erZ(),eow(vx(e5I,1),eU4,495,0,[tcq,tcZ])}function $2(){return J0(),eow(vx(e5N,1),eU4,426,0,[tc2,tc3])}function $3(){return eoT(),eow(vx(e5V,1),eU4,429,0,[tdt,tde])}function $4(){return Xs(),eow(vx(e5G,1),eU4,430,0,[tfw,tfy])}function $5(){return epC(),eow(vx(e2Q,1),eU4,428,0,[e3f,e3l])}function $6(){return eeR(),eow(vx(e21,1),eU4,427,0,[e3h,e3p])}function $9(){return eej(),eow(vx(e4E,1),eU4,424,0,[e9f,e9d])}function $8(){return erq(),eow(vx(e4F,1),eU4,511,0,[e8W,e8G])}function $7(e,t,n,r){return n>=0?e.jh(t,n,r):e.Sg(null,n,r)}function ze(e){return 0==e.b.b?e.a.$e():PH(e.b)}function zt(e){if(5!=e.p)throw p7(new bT);return jE(e.f)}function zn(e){if(5!=e.p)throw p7(new bT);return jE(e.k)}function zr(e){return xc(e.a)===xc((eiM(),tgW))&&eR1(e),e.a}function zi(e){this.a=Pp(Y9(e),271),this.b=(Hj(),new O4(e))}function za(e,t){l5(this,new kl(e.a,e.b)),l6(this,Pv(t))}function zo(){zo=A,tuq=new SK(ezt,0),tuZ=new SK(ezn,1)}function zs(){zs=A,tuw=new Sz(ezn,0),tuy=new Sz(ezt,1)}function zu(){m9.call(this,new w8(ee0(12))),Oq(!0),this.a=2}function zc(e,t,n){eBG(),pJ.call(this,e),this.b=t,this.a=n}function zl(e,t,n){Ml(),p$.call(this,t),this.a=e,this.b=n}function zf(e){CK.call(this),etk(this),this.a=e,this.c=!0}function zd(e){var t;t=e.c.d.b,e.b=t,e.a=e.c.d,t.a=e.c.d.b=e}function zh(e){var t;enZ(e.a),TW(e.a),efJ(t=new dp(e.a))}function zp(e,t){eC_(e,!0),ety(e.e.wf(),new Dx(e,!0,t))}function zb(e,t){return qe(t),enL(e,Je(ty_,eHT,25,t,15,1),t)}function zm(e,t){return HR(),e==z$(e_I(t))||e==z$(e_P(t))}function zg(e,t){return null==t?xu($I(e.f,null)):Ea(e.g,t)}function zv(e){return 0==e.b?null:(A6(0!=e.b),etw(e,e.a.a))}function zy(e){return 0|Math.max(Math.min(e,eUu),-2147483648)}function zw(e,t){var n=e0w[e.charCodeAt(0)];return null==n?e:n}function z_(e,t){return H5(e,"set1"),H5(t,"set2"),new wF(e,t)}function zE(e,t){var n;return C5(Ld(n=et$(e.f,t)),e.f.d)}function zS(e,t){var n,r;return ej4(e,n=t,r=new H),r.d}function zk(e,t,n,r){var i;i=new CQ,t.a[n.g]=i,jT(e.b,r,i)}function zx(e,t,n){var r;(r=e.Yg(t))>=0?e.sh(r,n):eOh(e,t,n)}function zT(e,t,n){z0(),e&&Um(tmj,e,t),e&&Um(tmR,e,n)}function zM(e,t,n){this.i=new p0,this.b=e,this.g=t,this.a=n}function zO(e,t,n){this.c=new p0,this.e=e,this.f=t,this.b=n}function zA(e,t,n){this.a=new p0,this.e=e,this.f=t,this.c=n}function zL(e,t){MV(this),this.f=t,this.g=e,HD(this),this._d()}function zC(e,t){var n;n=e.q.getHours(),e.q.setDate(t),eNq(e,n)}function zI(e,t){var n;for(Y9(t),n=e.a;n;n=n.c)t.Od(n.g,n.i)}function zD(e){var t;return esb(t=new yF(ee0(e.length)),e),t}function zN(e){function t(){}return t.prototype=e||{},new t}function zP(e,t){return!!eos(e,t)&&(enP(e),!0)}function zR(e,t){if(null==t)throw p7(new bM);return ehF(e,t)}function zj(e){return e.qe()?null:(0,eUt[e.n])}function zF(e){return e.Db>>16!=3?null:Pp(e.Cb,33)}function zY(e){return e.Db>>16!=9?null:Pp(e.Cb,33)}function zB(e){return e.Db>>16!=6?null:Pp(e.Cb,79)}function zU(e){return e.Db>>16!=7?null:Pp(e.Cb,235)}function zH(e){return e.Db>>16!=7?null:Pp(e.Cb,160)}function z$(e){return e.Db>>16!=11?null:Pp(e.Cb,33)}function zz(e,t){var n;return(n=e.Yg(t))>=0?e.lh(n):exu(e,t)}function zG(e,t){var n;return n=new RZ(t),e_h(n,e),new I4(n)}function zW(e){var t;return t=e.d,t=e.si(e.f),JL(e,t),t.Ob()}function zK(e,t){return e.b+=t.b,e.c+=t.c,e.d+=t.d,e.a+=t.a,e}function zV(e,t){return eB4.Math.abs(e)0}function zZ(){this.a=new Tw,this.e=new bV,this.g=0,this.i=0}function zX(e){this.a=e,this.b=Je(e5b,eUP,1944,e.e.length,0,2)}function zJ(e,t,n){var r;r=esg(e,t,n),e.b=new erH(r.c.length)}function zQ(){zQ=A,tuE=new S$(ezh,0),tuS=new S$("UP",1)}function z1(){z1=A,tcF=new SJ(eV2,0),tcY=new SJ("FAN",1)}function z0(){z0=A,tmj=new p2,tmR=new p2,xa(e0r,new o8)}function z2(e){if(0!=e.p)throw p7(new bT);return xg(e.f,0)}function z3(e){if(0!=e.p)throw p7(new bT);return xg(e.k,0)}function z4(e){return e.Db>>16!=3?null:Pp(e.Cb,147)}function z5(e){return e.Db>>16!=6?null:Pp(e.Cb,235)}function z6(e){return e.Db>>16!=17?null:Pp(e.Cb,26)}function z9(e,t){var n=e.a=e.a||[];return n[t]||(n[t]=e.le(t))}function z8(e,t){var n;return null==(n=e.a.get(t))?[]:n}function z7(e,t){var n;n=e.q.getHours(),e.q.setMonth(t),eNq(e,n)}function Ge(e,t,n){return null==t?eS9(e.f,null,n):efi(e.g,t,n)}function Gt(e,t,n,r,i,a){return new Q$(e.e,t,e.aj(),n,r,i,a)}function Gn(e,t,n){return e.a=Az(e.a,0,t)+""+n+xy(e.a,t),e}function Gr(e,t,n){return P_(e.a,(U_(),eb2(t,n),new wD(t,n))),e}function Gi(e){return OX(e.c),e.e=e.a=e.c,e.c=e.c.c,++e.d,e.a.f}function Ga(e){return OX(e.e),e.c=e.a=e.e,e.e=e.e.e,--e.d,e.a.f}function Go(e,t){e.d&&QA(e.d.e,e),e.d=t,e.d&&P_(e.d.e,e)}function Gs(e,t){e.c&&QA(e.c.g,e),e.c=t,e.c&&P_(e.c.g,e)}function Gu(e,t){e.c&&QA(e.c.a,e),e.c=t,e.c&&P_(e.c.a,e)}function Gc(e,t){e.i&&QA(e.i.j,e),e.i=t,e.i&&P_(e.i.j,e)}function Gl(e,t,n){this.a=t,this.c=e,this.b=(Y9(n),new I4(n))}function Gf(e,t,n){this.a=t,this.c=e,this.b=(Y9(n),new I4(n))}function Gd(e,t){this.a=e,this.c=MB(this.a),this.b=new $g(t)}function Gh(e){var t;return el3(e),t=new bV,UJ(e,new di(t))}function Gp(e,t){if(e<0||e>t)throw p7(new gE(e$O+e+e$A+t))}function Gb(e,t){return jR(e.a,t)?Yl(e,Pp(t,22).g,null):null}function Gm(e){return euQ(),OQ(),0!=Pp(e.a,81).d.e}function Gg(){Gg=A,e0g=euY((m5(),eow(vx(e1W,1),eU4,538,0,[e0m])))}function Gv(){Gv=A,ts2=j0(new K2,(e_x(),e8i),(eB$(),e7N))}function Gy(){Gy=A,ts3=j0(new K2,(e_x(),e8i),(eB$(),e7N))}function Gw(){Gw=A,ts5=j0(new K2,(e_x(),e8i),(eB$(),e7N))}function G_(){G_=A,tuh=RI(new K2,(e_x(),e8i),(eB$(),e7o))}function GE(){GE=A,tug=RI(new K2,(e_x(),e8i),(eB$(),e7o))}function GS(){GS=A,tuv=RI(new K2,(e_x(),e8i),(eB$(),e7o))}function Gk(){Gk=A,tux=RI(new K2,(e_x(),e8i),(eB$(),e7o))}function Gx(){Gx=A,tcz=j0(new K2,(egR(),tu0),(eS_(),tu3))}function GT(e,t,n,r){this.c=e,this.d=r,GA(this,t),GL(this,n)}function GM(e){this.c=new _n,this.b=e.b,this.d=e.c,this.a=e.a}function GO(e){this.a=eB4.Math.cos(e),this.b=eB4.Math.sin(e)}function GA(e,t){e.a&&QA(e.a.k,e),e.a=t,e.a&&P_(e.a.k,e)}function GL(e,t){e.b&&QA(e.b.f,e),e.b=t,e.b&&P_(e.b.f,e)}function GC(e,t){BN(e,e.b,e.c),Pp(e.b.b,65),t&&Pp(t.b,65).b}function GI(e,t){elJ(e,t),M4(e.Cb,88)&&eko(Zd(Pp(e.Cb,88)),2)}function GD(e,t){M4(e.Cb,88)&&eko(Zd(Pp(e.Cb,88)),4),er3(e,t)}function GN(e,t){M4(e.Cb,179)&&(Pp(e.Cb,179).tb=null),er3(e,t)}function GP(e,t){return _4(),eec(t)?new RA(t,e):new xe(t,e)}function GR(e,t){var n,r;(r=null!=(n=t.c))&&BC(e,new B_(t.c))}function Gj(e){var t,n;return n=(yO(),t=new p5),etV(n,e),n}function GF(e){var t,n;return n=(yO(),t=new p5),etV(n,e),n}function GY(e,t){var n;return n=new By(e),t.c[t.c.length]=n,n}function GB(e,t){var n;return(n=Pp(ecA(HU(e.a),t),14))?n.gc():0}function GU(e){var t;return el3(e),etc(e,t=(HF(),HF(),e2u))}function GH(e){for(var t;;)if(t=e.Pb(),!e.Ob())return t}function G$(e,t){mK.call(this,new w8(ee0(e))),enG(t,eUN),this.a=t}function Gz(e,t,n){ec5(t,n,e.gc()),this.c=e,this.a=t,this.b=n-t}function GG(e,t,n){var r;ec5(t,n,e.c.length),r=n-t,yJ(e.c,t,r)}function GW(e,t){M7(e,jE(WM(Fv(t,24),e$b)),jE(WM(t,e$b)))}function GK(e,t){if(e<0||e>=t)throw p7(new gE(e$O+e+e$A+t))}function GV(e,t){if(e<0||e>=t)throw p7(new vf(e$O+e+e$A+t))}function Gq(e,t){this.b=(BJ(e),e),this.a=(t&eH0)==0?64|t|eUR:t}function GZ(e){TG(this),bF(this.a,esi(eB4.Math.max(8,e))<<1)}function GX(e){return esp(eow(vx(e50,1),eUP,8,0,[e.i.n,e.n,e.a]))}function GJ(){return eum(),eow(vx(e2L,1),eU4,132,0,[e2B,e2U,e2H])}function GQ(){return etx(),eow(vx(e26,1),eU4,232,0,[e3D,e3N,e3P])}function G1(){return Qs(),eow(vx(e27,1),eU4,461,0,[e3F,e3j,e3Y])}function G0(){return QQ(),eow(vx(e3t,1),eU4,462,0,[e3$,e3H,e3U])}function G2(){return ec4(),eow(vx(e4L,1),eU4,423,0,[e8x,e8k,e8S])}function G3(){return QJ(),eow(vx(e4S,1),eU4,379,0,[e94,e93,e95])}function G4(){return euJ(),eow(vx(e5e,1),eU4,378,0,[tsn,tsr,tsi])}function G5(){return en7(),eow(vx(e4q,1),eU4,314,0,[tej,teR,teF])}function G6(){return enB(),eow(vx(e4Z,1),eU4,337,0,[teB,teH,teU])}function G9(){return eoG(),eow(vx(e4Q,1),eU4,450,0,[te1,teQ,te0])}function G8(){return erX(),eow(vx(e4G,1),eU4,361,0,[tep,teh,ted])}function G7(){return Q1(),eow(vx(e46,1),eU4,303,0,[ttD,ttN,ttI])}function We(){return eaU(),eow(vx(e45,1),eU4,292,0,[ttA,ttL,ttO])}function Wt(){return enY(),eow(vx(e5o,1),eU4,452,0,[tsP,tsD,tsN])}function Wn(){return esn(),eow(vx(e5i,1),eU4,339,0,[tsM,tsT,tsO])}function Wr(){return ei0(),eow(vx(e5s,1),eU4,375,0,[tsj,tsF,tsY])}function Wi(){return eox(),eow(vx(e5f,1),eU4,377,0,[tsQ,ts1,tsJ])}function Wa(){return euy(),eow(vx(e5c,1),eU4,336,0,[tsz,tsG,tsW])}function Wo(){return eiO(),eow(vx(e5l,1),eU4,338,0,[tsZ,tsV,tsq])}function Ws(){return enU(),eow(vx(e5p,1),eU4,454,0,[tur,tui,tua])}function Wu(){return efx(),eow(vx(e5D,1),eU4,442,0,[tc1,tcJ,tcQ])}function Wc(){return eub(),eow(vx(e5P,1),eU4,380,0,[tc5,tc6,tc9])}function Wl(){return efS(),eow(vx(e5Y,1),eU4,381,0,[tlP,tlR,tlN])}function Wf(){return ei1(),eow(vx(e5j,1),eU4,293,0,[tlC,tlI,tlL])}function Wd(){return efk(),eow(vx(e5H,1),eU4,437,0,[tff,tfd,tfh])}function Wh(){return eck(),eow(vx(e57,1),eU4,334,0,[tpG,tpz,tpW])}function Wp(){return etT(),eow(vx(e56,1),eU4,272,0,[tp_,tpE,tpS])}function Wb(e,t){return eMw(e,t,M4(t,99)&&(Pp(t,18).Bb&eH3)!=0)}function Wm(e,t,n){var r;return(r=ePI(e,t,!1)).b<=t&&r.a<=n}function Wg(e,t,n){var r;(r=new ac).b=t,r.a=n,++t.b,P_(e.d,r)}function Wv(e,t){var n;return A3(!!(n=(BJ(e),e).g)),BJ(t),n(t)}function Wy(e,t){var n,r;return r=Yi(e,t),n=e.a.Zc(r),new wR(e,n)}function Ww(e){return e.Db>>16!=6?null:Pp(eTp(e),235)}function W_(e){if(2!=e.p)throw p7(new bT);return jE(e.f)&eHd}function WE(e){if(2!=e.p)throw p7(new bT);return jE(e.k)&eHd}function WS(e){return e.a==(ZE(),tvd)&&ff(e,eM0(e.g,e.b)),e.a}function Wk(e){return e.d==(ZE(),tvd)&&fh(e,eIj(e.g,e.b)),e.d}function Wx(e){return A6(e.ar?1:0}function WY(e,t){var n,r;return r=n=QP(t),Pp(Bp(e.c,r),19).a}function WB(e,t){var n;for(n=e+"";n.length0&&0==e.a[--e.d];);0==e.a[e.d++]&&(e.e=0)}function Kc(e){return e.a?0==e.e.length?e.a.a:e.a.a+""+e.e:e.c}function Kl(e){return!!e.a&&0!=QX(e.a.a).i&&!(e.b&&ebq(e.b))}function Kf(e){return!!e.u&&0!=qt(e.u.a).i&&!(e.n&&ebV(e.n))}function Kd(e){return Rj(e.e.Hd().gc()*e.c.Hd().gc(),16,new c9(e))}function Kh(e,t){return YM(eap(e.q.getTime()),eap(t.q.getTime()))}function Kp(e){return Pp(epg(e,Je(e4C,eGG,17,e.c.length,0,1)),474)}function Kb(e){return Pp(epg(e,Je(e4N,eGW,10,e.c.length,0,1)),193)}function Km(e){return GE(),!q8(e)&&!(!q8(e)&&e.c.i.c==e.d.i.c)}function Kg(e,t,n){var r;r=(Y9(e),new I4(e)),egT(new Gl(r,t,n))}function Kv(e,t,n){var r;r=(Y9(e),new I4(e)),egM(new Gf(r,t,n))}function Ky(e,t){var n;return n=1-t,e.a[n]=erj(e.a[n],n),erj(e,t)}function Kw(e,t){var n;e.e=new mQ,n=eLj(t),Mv(n,e.c),eLJ(e,n,0)}function K_(e,t,n,r){var i;(i=new od).a=t,i.b=n,i.c=r,P7(e.a,i)}function KE(e,t,n,r){var i;(i=new od).a=t,i.b=n,i.c=r,P7(e.b,i)}function KS(e){var t,n,r;return n=eI4(t=new YQ,e),eFg(t),r=n}function Kk(){var e,t,n;return P_(tg6,t=n=e=new p5),t}function Kx(e){return e.j.c=Je(e1R,eUp,1,0,5,1),UG(e.c),Uj(e.a),e}function KT(e){return(_L(),M4(e.g,10))?Pp(e.g,10):null}function KM(e){return!Uz(e).dc()&&(MI(e,new v),!0)}function KO(e){if(!("stack"in e))try{throw e}catch(t){}return e}function KA(e,t){if(e<0||e>=t)throw p7(new gE(eku(e,t)));return e}function KL(e,t,n){if(e<0||tn)throw p7(new gE(eE3(e,t,n)))}function KC(e,t){if(Yf(e.a,t),t.d)throw p7(new go(e$P));t.d=e}function KI(e,t){if(t.$modCount!=e.$modCount)throw p7(new bA)}function KD(e,t){return!!M4(t,42)&&emT(e.a,Pp(t,42))}function KN(e,t){return!!M4(t,42)&&emT(e.a,Pp(t,42))}function KP(e,t){return!!M4(t,42)&&emT(e.a,Pp(t,42))}function KR(e,t){return e.a<=e.b&&(t.ud(e.a++),!0)}function Kj(e){var t;return Ts(e)?-0==(t=e)?0:t:eem(e)}function KF(e){var t;return B1(e),t=new Y,yU(e.a,new dn(t)),t}function KY(e){var t;return B1(e),t=new F,yU(e.a,new dt(t)),t}function KB(e,t){this.a=e,fE.call(this,e),Gp(t,e.gc()),this.b=t}function KU(e){this.e=e,this.b=this.e.a.entries(),this.a=[]}function KH(e){return Rj(e.e.Hd().gc()*e.c.Hd().gc(),273,new c6(e))}function K$(e){return new XM((enG(e,eU6),ee1(eft(eft(5,e),e/10|0))))}function Kz(e){return Pp(epg(e,Je(e4j,eGK,11,e.c.length,0,1)),1943)}function KG(e,t,n){return n.f.c.length>0?YO(e.a,t,n):YO(e.b,t,n)}function KW(e,t,n){e.d&&QA(e.d.e,e),e.d=t,e.d&&jO(e.d.e,n,e)}function KK(e,t){eY5(t,e),PD(e.d),PD(Pp(e_k(e,(eBy(),taq)),207))}function KV(e,t){eY4(t,e),PI(e.d),PI(Pp(e_k(e,(eBy(),taq)),207))}function Kq(e,t){var n,r;return n=zR(e,t),r=null,n&&(r=n.fe()),r}function KZ(e,t){var n,r;return n=eep(e,t),r=null,n&&(r=n.ie()),r}function KX(e,t){var n,r;return n=zR(e,t),r=null,n&&(r=n.ie()),r}function KJ(e,t){var n,r;return n=zR(e,t),r=null,n&&(r=eSa(n)),r}function KQ(e,t,n){var r;return r=ehM(n),eIg(e.g,r,t),eIg(e.i,t,n),t}function K1(e,t,n){var r;r=ehl();try{return CO(e,t,n)}finally{Vx(r)}}function K0(e){var t;t=e.Wg(),this.a=M4(t,69)?Pp(t,69).Zh():t.Kc()}function K2(){mJ.call(this),this.j.c=Je(e1R,eUp,1,0,5,1),this.a=-1}function K3(e,t,n,r){this.d=e,this.n=t,this.g=n,this.o=r,this.p=-1}function K4(e,t,n,r){this.e=r,this.d=null,this.c=e,this.a=t,this.b=n}function K5(e,t,n){this.d=new hg(this),this.e=e,this.i=t,this.f=n}function K6(){K6=A,ttR=new S_(e$8,0),ttj=new S_("TOP_LEFT",1)}function K9(){K9=A,ts7=Uy(ell(1),ell(4)),ts8=Uy(ell(1),ell(2))}function K8(){K8=A,tfv=euY((_N(),eow(vx(e5z,1),eU4,551,0,[tfg])))}function K7(){K7=A,tfm=euY((_D(),eow(vx(e5$,1),eU4,482,0,[tfb])))}function Ve(){Ve=A,tf7=euY((_P(),eow(vx(e5K,1),eU4,530,0,[tf8])))}function Vt(){Vt=A,e6W=euY((_y(),eow(vx(e4w,1),eU4,481,0,[e6G])))}function Vn(){return eaY(),eow(vx(e3r,1),eU4,406,0,[e4c,e4o,e4s,e4u])}function Vr(){return Qu(),eow(vx(e2E,1),eU4,297,0,[e2D,e2N,e2P,e2R])}function Vi(){return ebe(),eow(vx(e4y,1),eU4,394,0,[e6U,e6B,e6H,e6$])}function Va(){return ep7(),eow(vx(e3i,1),eU4,323,0,[e4d,e4f,e4h,e4p])}function Vo(){return eok(),eow(vx(e4A,1),eU4,405,0,[e8f,e8p,e8d,e8h])}function Vs(){return eoE(),eow(vx(e4U,1),eU4,360,0,[e7Z,e7V,e7q,e7K])}function Vu(e,t,n,r){return M4(n,54)?new A7(e,t,n,r):new Fo(e,t,n,r)}function Vc(){return eoS(),eow(vx(e4$,1),eU4,411,0,[e79,e78,e77,tee])}function Vl(e){var t;return e.j==(eYu(),tbj)&&(t=eTt(e),Aa(t,tby))}function Vf(e,t){var n;Gs(n=t.a,t.c.d),Go(n,t.d.d),etH(n.a,e.n)}function Vd(e,t){return Pp(Af(FT(Pp(Zq(e.k,t),15).Oc(),tex)),113)}function Vh(e,t){return Pp(Af(FM(Pp(Zq(e.k,t),15).Oc(),tex)),113)}function Vp(e){return new Gq(eip(Pp(e.a.dd(),14).gc(),e.a.cd()),16)}function Vb(e){return M4(e,14)?Pp(e,14).dc():!e.Kc().Ob()}function Vm(e){return(_L(),M4(e.g,145))?Pp(e.g,145):null}function Vg(e){if(e.e.g!=e.b)throw p7(new bA);return!!e.c&&e.d>0}function Vv(e){return A6(e.b!=e.d.c),e.c=e.b,e.b=e.b.a,++e.a,e.c.c}function Vy(e,t){BJ(t),Bc(e.a,e.c,t),e.c=e.c+1&e.a.length-1,ega(e)}function Vw(e,t){BJ(t),e.b=e.b-1&e.a.length-1,Bc(e.a,e.b,t),ega(e)}function V_(e,t){var n;for(n=e.j.c.length;n0&&ePD(e.g,0,t,0,e.i),t}function VB(e,t){var n;return _5(),!(n=Pp(Bp(tmU,e),55))||n.wj(t)}function VU(e){if(1!=e.p)throw p7(new bT);return jE(e.f)<<24>>24}function VH(e){if(1!=e.p)throw p7(new bT);return jE(e.k)<<24>>24}function V$(e){if(7!=e.p)throw p7(new bT);return jE(e.k)<<16>>16}function Vz(e){if(7!=e.p)throw p7(new bT);return jE(e.f)<<16>>16}function VG(e){var t;for(t=0;e.Ob();)e.Pb(),t=eft(t,1);return ee1(t)}function VW(e,t){var n;return n=new vl,e.xd(n),n.a+="..",t.yd(n),n.a}function VK(e,t,n){var r;r=Pp(Bp(e.g,n),57),P_(e.a.c,new kD(t,r))}function VV(e,t,n){return F_(LV(xu($I(e.f,t))),LV(xu($I(e.f,n))))}function Vq(e,t,n){return eNA(e,t,n,M4(t,99)&&(Pp(t,18).Bb&eH3)!=0)}function VZ(e,t,n){return eN1(e,t,n,M4(t,99)&&(Pp(t,18).Bb&eH3)!=0)}function VX(e,t,n){return eMN(e,t,n,M4(t,99)&&(Pp(t,18).Bb&eH3)!=0)}function VJ(e,t){return e==(eEn(),e8N)&&t==e8N?4:e==e8N||t==e8N?8:32}function VQ(e,t){return xc(t)===xc(e)?"(this Map)":null==t?eUg:efF(t)}function V1(e,t){return Pp(null==t?xu($I(e.f,null)):Ea(e.g,t),281)}function V0(e,t,n){var r;return r=ehM(n),Um(e.b,r,t),Um(e.c,t,n),t}function V2(e,t){var n;for(n=t;n;)Lu(e,n.i,n.j),n=z$(n);return e}function V3(e,t){var n;return n=$a(Pb(new Qj(e,t))),RG(new Qj(e,t)),n}function V4(e,t){var n;return _4(),eEy(n=Pp(e,66).Mj(),t),n.Ok(t)}function V5(e,t,n,r,i){var a;a=eMW(i,n,r),P_(t,eS4(i,a)),e_X(e,i,t)}function V6(e,t,n){e.i=0,e.e=0,t!=n&&(esC(e,t,n),esL(e,t,n))}function V9(e,t){var n;n=e.q.getHours(),e.q.setFullYear(t+eHx),eNq(e,n)}function V8(e,t,n){if(n){var r=n.ee();e.a[t]=r(n)}else delete e.a[t]}function V7(e,t,n){n=n?n.ee()(n):void 0,e.a[t]=n}function qe(e){if(e<0)throw p7(new gI("Negative array size: "+e))}function qt(e){return e.n||(Zd(e),e.n=new j4(e,tgr,e),$E(e)),e.n}function qn(e){return A6(e.a=0&&e.a[n]===t[n];n--);return n<0}function qy(e,t){var n;return(euv(),0!=(n=e.j.g-t.j.g))?n:0}function qw(e,t){return(BJ(t),null!=e.a)?jN(t.Kb(e.a)):e2b}function q_(e){var t;return e?new RZ(e):(t=new Tw,ein(t,e),t)}function qE(e,t){var n;return t.b.Kb(QD(e,t.c.Ee(),n=new ds(t)))}function qS(e){ewP(),M7(this,jE(WM(Fv(e,24),e$b)),jE(WM(e,e$b)))}function qk(){qk=A,e3d=euY((epC(),eow(vx(e2Q,1),eU4,428,0,[e3f,e3l])))}function qx(){qx=A,e3b=euY((eeR(),eow(vx(e21,1),eU4,427,0,[e3h,e3p])))}function qT(){qT=A,e9h=euY((eej(),eow(vx(e4E,1),eU4,424,0,[e9f,e9d])))}function qM(){qM=A,e8K=euY((erq(),eow(vx(e4F,1),eU4,511,0,[e8W,e8G])))}function qO(){qO=A,teJ=euY((eeF(),eow(vx(e4J,1),eU4,419,0,[teZ,teX])))}function qA(){qA=A,ttn=euY((Qx(),eow(vx(e40,1),eU4,479,0,[ttt,tte])))}function qL(){qL=A,ts$=euY((Xo(),eow(vx(e5u,1),eU4,376,0,[tsH,tsU])))}function qC(){qC=A,tsI=euY((Q0(),eow(vx(e5a,1),eU4,421,0,[tsL,tsC])))}function qI(){qI=A,teP=euY((Jp(),eow(vx(e4V,1),eU4,422,0,[teD,teN])))}function qD(){qD=A,ttF=euY((K6(),eow(vx(e49,1),eU4,420,0,[ttR,ttj])))}function qN(){qN=A,tu$=euY((Xa(),eow(vx(e5k,1),eU4,520,0,[tuH,tuU])))}function qP(){qP=A,tud=euY((qG(),eow(vx(e5v,1),eU4,523,0,[tuf,tul])))}function qR(){qR=A,tu_=euY((zs(),eow(vx(e5E,1),eU4,516,0,[tuw,tuy])))}function qj(){qj=A,tuk=euY((zQ(),eow(vx(e5S,1),eU4,515,0,[tuE,tuS])))}function qF(){qF=A,tuX=euY((zo(),eow(vx(e5x,1),eU4,455,0,[tuq,tuZ])))}function qY(){qY=A,tc$=euY((Kn(),eow(vx(e5C,1),eU4,425,0,[tcH,tcU])))}function qB(){qB=A,tcX=euY((erZ(),eow(vx(e5I,1),eU4,495,0,[tcq,tcZ])))}function qU(){qU=A,tcB=euY((z1(),eow(vx(e5L,1),eU4,480,0,[tcF,tcY])))}function qH(){qH=A,tc4=euY((J0(),eow(vx(e5N,1),eU4,426,0,[tc2,tc3])))}function q$(){q$=A,tdn=euY((eoT(),eow(vx(e5V,1),eU4,429,0,[tdt,tde])))}function qz(){qz=A,tf_=euY((Xs(),eow(vx(e5G,1),eU4,430,0,[tfw,tfy])))}function qG(){qG=A,tuf=new Sj("UPPER",0),tul=new Sj("LOWER",1)}function qW(e,t){var n;H1(n=new gu,"x",t.a),H1(n,"y",t.b),BC(e,n)}function qK(e,t){var n;H1(n=new gu,"x",t.a),H1(n,"y",t.b),BC(e,n)}function qV(e,t){var n,r;r=!1;do n=eo6(e,t),r|=n;while(n)return r}function qq(e,t){var n,r;for(n=t,r=0;n>0;)r+=e.a[n],n-=n&-n;return r}function qZ(e,t){var n;for(n=t;n;)Lu(e,-n.i,-n.j),n=z$(n);return e}function qX(e,t){var n,r;for(BJ(t),r=e.Kc();r.Ob();)n=r.Pb(),t.td(n)}function qJ(e,t){var n;return n=t.cd(),new wD(n,e.e.pc(n,Pp(t.dd(),14)))}function qQ(e,t,n,r){var i;(i=new C).c=t,i.b=n,i.a=r,r.b=n.a=i,++e.b}function q1(e,t,n){var r;return r=(GK(t,e.c.length),e.c[t]),e.c[t]=n,r}function q0(e,t,n){return Pp(null==t?eS9(e.f,null,n):efi(e.g,t,n),281)}function q2(e){return e.c&&e.d?WH(e.c)+"->"+WH(e.d):"e_"+Ao(e)}function q3(e,t){return(el3(e),yK(new R1(e,new Qa(t,e.a)))).sd(e2z)}function q4(){return e_x(),eow(vx(e4k,1),eU4,356,0,[e8e,e8t,e8n,e8r,e8i])}function q5(){return eYu(),eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY])}function q6(e){return vg(),function(){return K1(e,this,arguments)}}function q9(){return Date.now?Date.now():(new Date).getTime()}function q8(e){return!!e.c&&!!e.d&&!!e.c.i&&e.c.i==e.d.i}function q7(e){if(!e.c.Sb())throw p7(new bC);return e.a=!0,e.c.Ub()}function Ze(e){e.i=0,Eb(e.b,null),Eb(e.c,null),e.a=null,e.e=null,++e.g}function Zt(e){El.call(this,null==e?eUg:efF(e),M4(e,78)?Pp(e,78):null)}function Zn(e){eBD(),p8(this),this.a=new _n,esJ(this,e),P7(this.a,e)}function Zr(){Tz(this),this.b=new kl(eHQ,eHQ),this.a=new kl(eH1,eH1)}function Zi(e,t){this.c=0,this.b=t,xR.call(this,e,17493),this.a=this.c}function Za(e){Zo(),!e2M&&(this.c=e,this.e=!0,this.a=new p0)}function Zo(){Zo=A,e2M=!0,e2x=!1,e2T=!1,e2A=!1,e2O=!1}function Zs(e,t){return!!M4(t,149)&&IE(e.c,Pp(t,149).c)}function Zu(e,t){var n;return n=0,e&&(n+=e.f.a/2),t&&(n+=t.f.a/2),n}function Zc(e,t){var n;return(n=Pp(eef(e.d,t),23))||Pp(eef(e.e,t),23)}function Zl(e){this.b=e,Ow.call(this,e),this.a=Pp(eaS(this.b.a,4),126)}function Zf(e){this.b=e,AY.call(this,e),this.a=Pp(eaS(this.b.a,4),126)}function Zd(e){return e.t||(e.t=new pR(e),elm(new gT(e),0,e.t)),e.t}function Zh(){return ec3(),eow(vx(e55,1),eU4,103,0,[tpv,tpg,tpm,tpb,tpy])}function Zp(){return epT(),eow(vx(e6n,1),eU4,249,0,[tbt,tbr,tp7,tbe,tbn])}function Zb(){return epx(),eow(vx(e5Q,1),eU4,175,0,[tdh,tdd,tdl,tdp,tdf])}function Zm(){return eEM(),eow(vx(e5W,1),eU4,316,0,[tfE,tfS,tfT,tfk,tfx])}function Zg(){return ebG(),eow(vx(e5n,1),eU4,315,0,[tsb,tsd,tsh,tsf,tsp])}function Zv(){return eb6(),eow(vx(e4X,1),eU4,335,0,[teG,tez,teK,teV,teW])}function Zy(){return eOB(),eow(vx(e5U,1),eU4,355,0,[tfo,tfa,tfu,tfs,tfc])}function Zw(){return ey4(),eow(vx(e4z,1),eU4,363,0,[ter,tea,teo,tei,ten])}function Z_(){return ef_(),eow(vx(e48,1),eU4,163,0,[tnj,tnD,tnN,tnP,tnR])}function ZE(){var e,t;ZE=A,tvf=(yO(),t=new bN),tvd=e=new mC}function ZS(e){var t;return!e.c&&M4(t=e.r,88)&&(e.c=Pp(t,26)),e.c}function Zk(e){return e.e=3,e.d=e.Yb(),2!=e.e&&(e.e=0,!0)}function Zx(e){var t,n,r;return t=e&eHH,Mk(t,n=e>>22&eHH,r=e<0?eH$:0)}function ZT(e){var t,n,r,i;for(r=0,i=(n=e).length;r0?ehe(e,t):eA8(e,-t)}function ZL(e,t){return 0==t||0==e.e?e:t>0?eA8(e,t):ehe(e,-t)}function ZC(e){if(eTk(e))return e.c=e.a,e.a.Pb();throw p7(new bC)}function ZI(e){var t,n;return t=e.c.i,n=e.d.i,t.k==(eEn(),e8C)&&n.k==e8C}function ZD(e){var t;return t=new $b,eaW(t,e),eo3(t,(eBy(),taR),null),t}function ZN(e,t,n){var r;return(r=e.Yg(t))>=0?e._g(r,n,!0):exk(e,t,n)}function ZP(e,t,n,r){var i;for(i=0;it)throw p7(new gE(eS1(e,t,"index")));return e}function Z1(e,t,n,r){var i;return i=Je(ty_,eHT,25,t,15,1),ewD(i,e,t,n,r),i}function Z0(e,t){var n;n=e.q.getHours()+(t/60|0),e.q.setMinutes(t),eNq(e,n)}function Z2(e,t){return eB4.Math.min(Jh(t.a,e.d.d.c),Jh(t.b,e.d.d.c))}function Z3(e,t){return xd(t)?null==t?eTx(e.f,null):eaK(e.g,t):eTx(e.f,t)}function Z4(e){this.c=e,this.a=new fz(this.c.a),this.b=new fz(this.c.b)}function Z5(){this.e=new p0,this.c=new p0,this.d=new p0,this.b=new p0}function Z6(){this.g=new bJ,this.b=new bJ,this.a=new p0,this.k=new p0}function Z9(e,t,n){this.a=e,this.c=t,this.d=n,P_(t.e,this),P_(n.b,this)}function Z8(e,t){xP.call(this,t.rd(),-6&t.qd()),BJ(e),this.a=e,this.b=t}function Z7(e,t){xR.call(this,t.rd(),-6&t.qd()),BJ(e),this.a=e,this.b=t}function Xe(e,t){xj.call(this,t.rd(),-6&t.qd()),BJ(e),this.a=e,this.b=t}function Xt(e,t,n){this.a=e,this.b=t,this.c=n,P_(e.t,this),P_(t.i,this)}function Xn(){this.b=new _n,this.a=new _n,this.b=new _n,this.a=new _n}function Xr(){Xr=A,tdx=new pO("org.eclipse.elk.labels.labelManager")}function Xi(){Xi=A,e7W=new Cm("separateLayerConnections",(eoE(),e7Z))}function Xa(){Xa=A,tuH=new SW("REGULAR",0),tuU=new SW("CRITICAL",1)}function Xo(){Xo=A,tsH=new SI("STACKED",0),tsU=new SI("SEQUENCED",1)}function Xs(){Xs=A,tfw=new S7("FIXED",0),tfy=new S7("CENTER_NODE",1)}function Xu(e,t){var n;return n=ejH(e,t),e.b=new erH(n.c.length),eRj(e,n)}function Xc(e,t,n){var r;return++e.e,--e.f,(r=Pp(e.d[t].$c(n),133)).dd()}function Xl(e){var t;return!e.a&&M4(t=e.r,148)&&(e.a=Pp(t,148)),e.a}function Xf(e){return e.a?e.e?Xf(e.e):null:e}function Xd(e,t){return e.pt.p?-1:0}function Xh(e,t){return BJ(t),e.c=0,"Initial capacity must not be negative")}function XO(){XO=A,e3R=euY((etx(),eow(vx(e26,1),eU4,232,0,[e3D,e3N,e3P])))}function XA(){XA=A,e3B=euY((Qs(),eow(vx(e27,1),eU4,461,0,[e3F,e3j,e3Y])))}function XL(){XL=A,e3z=euY((QQ(),eow(vx(e3t,1),eU4,462,0,[e3$,e3H,e3U])))}function XC(){XC=A,e2$=euY((eum(),eow(vx(e2L,1),eU4,132,0,[e2B,e2U,e2H])))}function XI(){XI=A,e96=euY((QJ(),eow(vx(e4S,1),eU4,379,0,[e94,e93,e95])))}function XD(){XD=A,e8T=euY((ec4(),eow(vx(e4L,1),eU4,423,0,[e8x,e8k,e8S])))}function XN(){XN=A,teY=euY((en7(),eow(vx(e4q,1),eU4,314,0,[tej,teR,teF])))}function XP(){XP=A,te$=euY((enB(),eow(vx(e4Z,1),eU4,337,0,[teB,teH,teU])))}function XR(){XR=A,te2=euY((eoG(),eow(vx(e4Q,1),eU4,450,0,[te1,teQ,te0])))}function Xj(){Xj=A,teb=euY((erX(),eow(vx(e4G,1),eU4,361,0,[tep,teh,ted])))}function XF(){XF=A,ttP=euY((Q1(),eow(vx(e46,1),eU4,303,0,[ttD,ttN,ttI])))}function XY(){XY=A,ttC=euY((eaU(),eow(vx(e45,1),eU4,292,0,[ttA,ttL,ttO])))}function XB(){XB=A,tsa=euY((euJ(),eow(vx(e5e,1),eU4,378,0,[tsn,tsr,tsi])))}function XU(){XU=A,tsB=euY((ei0(),eow(vx(e5s,1),eU4,375,0,[tsj,tsF,tsY])))}function XH(){XH=A,tsA=euY((esn(),eow(vx(e5i,1),eU4,339,0,[tsM,tsT,tsO])))}function X$(){X$=A,tsR=euY((enY(),eow(vx(e5o,1),eU4,452,0,[tsP,tsD,tsN])))}function Xz(){Xz=A,ts0=euY((eox(),eow(vx(e5f,1),eU4,377,0,[tsQ,ts1,tsJ])))}function XG(){XG=A,tsK=euY((euy(),eow(vx(e5c,1),eU4,336,0,[tsz,tsG,tsW])))}function XW(){XW=A,tsX=euY((eiO(),eow(vx(e5l,1),eU4,338,0,[tsZ,tsV,tsq])))}function XK(){XK=A,tuo=euY((enU(),eow(vx(e5p,1),eU4,454,0,[tur,tui,tua])))}function XV(){XV=A,tc0=euY((efx(),eow(vx(e5D,1),eU4,442,0,[tc1,tcJ,tcQ])))}function Xq(){Xq=A,tc8=euY((eub(),eow(vx(e5P,1),eU4,380,0,[tc5,tc6,tc9])))}function XZ(){XZ=A,tlj=euY((efS(),eow(vx(e5Y,1),eU4,381,0,[tlP,tlR,tlN])))}function XX(){XX=A,tlD=euY((ei1(),eow(vx(e5j,1),eU4,293,0,[tlC,tlI,tlL])))}function XJ(){XJ=A,tfp=euY((efk(),eow(vx(e5H,1),eU4,437,0,[tff,tfd,tfh])))}function XQ(){XQ=A,tpK=euY((eck(),eow(vx(e57,1),eU4,334,0,[tpG,tpz,tpW])))}function X1(){X1=A,tpk=euY((etT(),eow(vx(e56,1),eU4,272,0,[tp_,tpE,tpS])))}function X0(){return ewf(),eow(vx(e6r,1),eU4,98,0,[tbl,tbc,tbu,tba,tbs,tbo])}function X2(e,t){return e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),edG(e.o,t)}function X3(e){return e.g||(e.g=new o2),e.g.d||(e.g.d=new pD(e)),e.g.d}function X4(e){return e.g||(e.g=new o2),e.g.a||(e.g.a=new pN(e)),e.g.a}function X5(e){return e.g||(e.g=new o2),e.g.b||(e.g.b=new pI(e)),e.g.b}function X6(e){return e.g||(e.g=new o2),e.g.c||(e.g.c=new pP(e)),e.g.c}function X9(e,t,n){var r,i;for(r=0,i=new eaN(t,e);rn||t=0?e._g(n,!0,!0):exk(e,t,!0)}function JW(e,t){return elN(gP(LV(e_k(e,(eBU(),tnv)))),gP(LV(e_k(t,tnv))))}function JK(){JK=A,tcG=ehY(ehY(_G(new K2,(egR(),tuQ)),(eS_(),tu8)),tu4)}function JV(e,t,n){var r;return r=esg(e,t,n),e.b=new erH(r.c.length),eLI(e,r)}function Jq(e){if(e.b<=0)throw p7(new bC);return--e.b,e.a-=e.c.c,ell(e.a)}function JZ(e){var t;if(!e.a)throw p7(new UD);return t=e.a,e.a=z$(e.a),t}function JX(e){for(;!e.a;)if(!IM(e.c,new dr(e)))return!1;return!0}function JJ(e){var t;return(Y9(e),M4(e,198))?t=Pp(e,198):new lp(e)}function JQ(e){J1(),Pp(e.We((eBB(),thJ)),174).Fc((ekU(),tbb)),e.Ye(thX,null)}function J1(){J1=A,tdo=new os,tdu=new ou,tds=es0((eBB(),thX),tdo,thL,tdu)}function J0(){J0=A,tc2=new S2("LEAF_NUMBER",0),tc3=new S2("NODE_SIZE",1)}function J2(e,t,n){e.a=t,e.c=n,e.b.a.$b(),HC(e.d),e.e.a.c=Je(e1R,eUp,1,0,5,1)}function J3(e){e.a=Je(ty_,eHT,25,e.b+1,15,1),e.c=Je(ty_,eHT,25,e.b,15,1),e.d=0}function J4(e,t){e.a.ue(t.d,e.b)>0&&(P_(e.c,new PW(t.c,t.d,e.d)),e.b=t.d)}function J5(e,t){if(null==e.g||t>=e.i)throw p7(new xJ(t,e.i));return e.g[t]}function J6(e,t,n){if(euu(e,n),null!=n&&!e.wj(n))throw p7(new bS);return n}function J9(e){var t;if(e.Ek())for(t=e.i-1;t>=0;--t)etj(e,t);return VY(e)}function J8(e){var t,n;if(!e.b)return null;for(n=e.b;t=n.a[0];)n=t;return n}function J7(e,t){var n,r;return qe(t),(n=QO(r=e.slice(0,t),e)).length=t,n}function Qe(e,t,n,r){var i;r=(HF(),r||e2s),eS0(i=e.slice(t,n),e,t,n,-t,r)}function Qt(e,t,n,r,i){return t<0?exk(e,n,r):Pp(n,66).Nj().Pj(e,e.yh(),t,r,i)}function Qn(e){return M4(e,172)?""+Pp(e,172).a:null==e?null:efF(e)}function Qr(e){return M4(e,172)?""+Pp(e,172).a:null==e?null:efF(e)}function Qi(e,t){if(t.a)throw p7(new go(e$P));Yf(e.a,t),t.a=e,e.j||(e.j=t)}function Qa(e,t){xj.call(this,t.rd(),-16449&t.qd()),BJ(e),this.a=e,this.c=t}function Qo(e,t){var n,r;return r=t/e.c.Hd().gc()|0,n=t%e.c.Hd().gc(),X_(e,r,n)}function Qs(){Qs=A,e3F=new EY(ezt,0),e3j=new EY(e$8,1),e3Y=new EY(ezn,2)}function Qu(){Qu=A,e2D=new Ef("All",0),e2N=new TH,e2P=new ML,e2R=new T$}function Qc(){Qc=A,e2j=euY((Qu(),eow(vx(e2E,1),eU4,297,0,[e2D,e2N,e2P,e2R])))}function Ql(){Ql=A,e8b=euY((eok(),eow(vx(e4A,1),eU4,405,0,[e8f,e8p,e8d,e8h])))}function Qf(){Qf=A,e4l=euY((eaY(),eow(vx(e3r,1),eU4,406,0,[e4c,e4o,e4s,e4u])))}function Qd(){Qd=A,e4b=euY((ep7(),eow(vx(e3i,1),eU4,323,0,[e4d,e4f,e4h,e4p])))}function Qh(){Qh=A,e6z=euY((ebe(),eow(vx(e4y,1),eU4,394,0,[e6U,e6B,e6H,e6$])))}function Qp(){Qp=A,tu2=euY((egR(),eow(vx(e5T,1),eU4,393,0,[tuJ,tuQ,tu1,tu0])))}function Qb(){Qb=A,e7X=euY((eoE(),eow(vx(e4U,1),eU4,360,0,[e7Z,e7V,e7q,e7K])))}function Qm(){Qm=A,tlA=euY((emC(),eow(vx(e5R,1),eU4,340,0,[tlO,tlT,tlM,tlx])))}function Qg(){Qg=A,tet=euY((eoS(),eow(vx(e4$,1),eU4,411,0,[e79,e78,e77,tee])))}function Qv(){Qv=A,tsl=euY((ebk(),eow(vx(e5t,1),eU4,197,0,[tsu,tsc,tss,tso])))}function Qy(){Qy=A,tmo=euY((eup(),eow(vx(e6l,1),eU4,396,0,[tmr,tmi,tmn,tma])))}function Qw(){Qw=A,tpJ=euY((egF(),eow(vx(e6e,1),eU4,285,0,[tpX,tpV,tpq,tpZ])))}function Q_(){Q_=A,tpA=euY((efE(),eow(vx(e59,1),eU4,218,0,[tpO,tpT,tpx,tpM])))}function QE(){QE=A,tmt=euY((edM(),eow(vx(e6u,1),eU4,311,0,[tme,tb9,tb7,tb8])))}function QS(){QS=A,tbZ=euY((ed6(),eow(vx(e6o,1),eU4,374,0,[tbV,tbq,tbK,tbW])))}function Qk(){Qk=A,ePm(),tvq=eHQ,tvV=eH1,tvX=new fL(eHQ),tvZ=new fL(eH1)}function Qx(){Qx=A,ttt=new Sb(eGR,0),tte=new Sb("IMPROVE_STRAIGHTNESS",1)}function QT(e,t){return Pj(),P_(e,new kD(t,ell(t.e.c.length+t.g.c.length)))}function QM(e,t){return Pj(),P_(e,new kD(t,ell(t.e.c.length+t.g.c.length)))}function QO(e,t){return 10!=eeg(t)&&eow(esF(t),t.hm,t.__elementTypeId$,eeg(t),e),e}function QA(e,t){var n;return -1!=(n=QI(e,t,0))&&(ZV(e,n),!0)}function QL(e,t){var n;return(n=Pp(Z3(e.e,t),387))?(Re(n),n.e):null}function QC(e){var t;return Ts(e)&&!isNaN(t=0-e)?t:eal(eoQ(e))}function QI(e,t,n){for(;n=0?ebl(e,n,!0,!0):exk(e,t,!0)}function Q8(e,t){var n,r;return _L(),n=Vm(e),r=Vm(t),!!n&&!!r&&!ep5(n.k,r.k)}function Q7(e,t){eno(e,null==t||IX((BJ(t),t))||isNaN((BJ(t),t))?0:(BJ(t),t))}function eee(e,t){ens(e,null==t||IX((BJ(t),t))||isNaN((BJ(t),t))?0:(BJ(t),t))}function eet(e,t){ena(e,null==t||IX((BJ(t),t))||isNaN((BJ(t),t))?0:(BJ(t),t))}function een(e,t){eni(e,null==t||IX((BJ(t),t))||isNaN((BJ(t),t))?0:(BJ(t),t))}function eer(e){(this.q?this.q:(Hj(),Hj(),e2i)).Ac(e.q?e.q:(Hj(),Hj(),e2i))}function eei(e,t){return M4(t,99)&&(Pp(t,18).Bb&eH3)!=0?new x1(t,e):new eaN(t,e)}function eea(e,t){return M4(t,99)&&(Pp(t,18).Bb&eH3)!=0?new x1(t,e):new eaN(t,e)}function eeo(e,t){e4g=new e0,e4v=t,Pp((e4m=e).b,65),Jr(e4m,e4g,null),eRk(e4m)}function ees(e,t,n){var r;return r=e.g[t],Of(e,t,e.oi(t,n)),e.gi(t,n,r),e.ci(),r}function eeu(e,t){var n;return(n=e.Xc(t))>=0&&(e.$c(n),!0)}function eec(e){var t;return e.d!=e.r&&(t=evl(e),e.e=!!t&&t.Cj()==eJK,e.d=t),e.e}function eel(e,t){var n;for(Y9(e),Y9(t),n=!1;t.Ob();)n|=e.Fc(t.Pb());return n}function eef(e,t){var n;return(n=Pp(Bp(e.e,t),387))?(M6(e,n),n.e):null}function eed(e){var t,n;return(t=e/60|0,0==(n=e%60))?""+t:""+t+":"+n}function eeh(e,t){var n,r;return el3(e),r=new Xe(t,e.a),n=new IU(r),new R1(e,n)}function eep(e,t){var n=e.a[t],r=(eoW(),e0O)[typeof n];return r?r(n):euV(typeof n)}function eeb(e){switch(e.g){case 0:return eUu;case 1:return -1;default:return 0}}function eem(e){return 0>evy(e,(Q2(),e0D))?-As(eoQ(e)):e.l+e.m*eHG+e.h*eHW}function eeg(e){return null==e.__elementTypeCategory$?10:e.__elementTypeCategory$}function eev(e){var t;return null!=(t=0==e.b.c.length?null:RJ(e.b,0))&&erD(e,0),t}function eey(e,t){for(;t[0]=0;)++t[0]}function eew(e,t){this.e=t,this.a=eaJ(e),this.a<54?this.f=Kj(e):this.c=ep_(e)}function ee_(e,t,n,r){eBG(),pJ.call(this,26),this.c=e,this.a=t,this.d=n,this.b=r}function eeE(e,t,n){var r,i;for(i=0,r=10;ie.a[r]&&(r=n);return r}function eeI(e,t){var n;return 0==(n=efT(e.e.c,t.e.c))?elN(e.e.d,t.e.d):n}function eeD(e,t){return 0==t.e||0==e.e?e08:(exX(),eAl(e,t))}function eeN(e,t){if(!e)throw p7(new gL(eAL("Enum constant undefined: %s",t)))}function eeP(){eeP=A,e8v=new tp,e8y=new td,e8m=new ty,e8g=new tw,e8w=new t_}function eeR(){eeR=A,e3h=new ER("BY_SIZE",0),e3p=new ER("BY_SIZE_AND_SHAPE",1)}function eej(){eej=A,e9f=new EH("EADES",0),e9d=new EH("FRUCHTERMAN_REINGOLD",1)}function eeF(){eeF=A,teZ=new Sd("READING_DIRECTION",0),teX=new Sd("ROTATION",1)}function eeY(){eeY=A,teq=euY((eb6(),eow(vx(e4X,1),eU4,335,0,[teG,tez,teK,teV,teW])))}function eeB(){eeB=A,tsm=euY((ebG(),eow(vx(e5n,1),eU4,315,0,[tsb,tsd,tsh,tsf,tsp])))}function eeU(){eeU=A,tes=euY((ey4(),eow(vx(e4z,1),eU4,363,0,[ter,tea,teo,tei,ten])))}function eeH(){eeH=A,tnF=euY((ef_(),eow(vx(e48,1),eU4,163,0,[tnj,tnD,tnN,tnP,tnR])))}function ee$(){ee$=A,tfM=euY((eEM(),eow(vx(e5W,1),eU4,316,0,[tfE,tfS,tfT,tfk,tfx])))}function eez(){eez=A,tdb=euY((epx(),eow(vx(e5Q,1),eU4,175,0,[tdh,tdd,tdl,tdp,tdf])))}function eeG(){eeG=A,tfl=euY((eOB(),eow(vx(e5U,1),eU4,355,0,[tfo,tfa,tfu,tfs,tfc])))}function eeW(){eeW=A,e8a=euY((e_x(),eow(vx(e4k,1),eU4,356,0,[e8e,e8t,e8n,e8r,e8i])))}function eeK(){eeK=A,tpw=euY((ec3(),eow(vx(e55,1),eU4,103,0,[tpv,tpg,tpm,tpb,tpy])))}function eeV(){eeV=A,tbi=euY((epT(),eow(vx(e6n,1),eU4,249,0,[tbt,tbr,tp7,tbe,tbn])))}function eeq(){eeq=A,tbB=euY((eYu(),eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY])))}function eeZ(e,t){var n;return(n=Pp(Bp(e.a,t),134))||(n=new eX,Um(e.a,t,n)),n}function eeX(e){var t;return!!(t=Pp(e_k(e,(eBU(),ttU)),305))&&t.a==e}function eeJ(e){var t;return!!(t=Pp(e_k(e,(eBU(),ttU)),305))&&t.i==e}function eeQ(e,t){return BJ(t),FD(e),!!e.d.Ob()&&(t.td(e.d.Pb()),!0)}function ee1(e){return ecd(e,eUu)>0?eUu:0>ecd(e,eHt)?eHt:jE(e)}function ee0(e){return e<3?(enG(e,eU0),e+1):e=0&&t=-.01&&e.a<=ezs&&(e.a=0),e.b>=-.01&&e.b<=ezs&&(e.b=0),e}function ee5(e,t){return t==(I8(),I8(),e2p)?e.toLocaleLowerCase():e.toLowerCase()}function ee6(e){return((2&e.i)!=0?"interface ":(1&e.i)!=0?"":"class ")+(LW(e),e.o)}function ee9(e){var t,n;n=t=new mD,JL((e.q||(e.q=new FQ(tgi,e,11,10)),e.q),n)}function ee8(e,t){var n;return n=t>0?t-1:t,yr(yi(eny(P6(new mV,n),e.n),e.j),e.k)}function ee7(e,t,n,r){var i;e.j=-1,ex8(e,eSu(e,t,n),(_4(),(i=Pp(t,66).Mj()).Ok(r)))}function ete(e){this.g=e,this.f=new p0,this.a=eB4.Math.min(this.g.c.c,this.g.d.c)}function ett(e){this.b=new p0,this.a=new p0,this.c=new p0,this.d=new p0,this.e=e}function etn(e,t){this.a=new p2,this.e=new p2,this.b=(euJ(),tsi),this.c=e,this.b=t}function etr(e,t,n){CK.call(this),etk(this),this.a=e,this.c=n,this.b=t.d,this.f=t.e}function eti(e){this.d=e,this.c=e.c.vc().Kc(),this.b=null,this.a=null,this.e=(m5(),e0m)}function eta(e){if(e<0)throw p7(new gL("Illegal Capacity: "+e));this.g=this.ri(e)}function eto(e,t){if(0>e||e>t)throw p7(new va("fromIndex: 0, toIndex: "+e+e$m+t))}function ets(e){var t;if(e.a==e.b.a)throw p7(new bC);return t=e.a,e.c=t,e.a=e.a.e,t}function etu(e){var t;A4(!!e.c),t=e.c.a,etw(e.d,e.c),e.b==e.c?e.b=t:--e.a,e.c=null}function etc(e,t){var n;return el3(e),n=new HA(e,e.a.rd(),4|e.a.qd(),t),new R1(e,n)}function etl(e,t){var n,r;return(n=Pp(ecA(e.d,t),14))?(r=t,e.e.pc(r,n)):null}function etf(e,t){var n,r;for(r=e.Kc();r.Ob();)eo3(n=Pp(r.Pb(),70),(eBU(),tnt),t)}function etd(e){var t;return(t=gP(LV(e_k(e,(eBy(),tak)))))<0&&eo3(e,tak,t=0),t}function eth(e,t,n){var r;ev_(n,r=eB4.Math.max(0,e.b/2-.5),1),P_(t,new EJ(n,r))}function etp(e,t,n){var r;return zy(Ra(r=e.a.e[Pp(t.a,10).p]-e.a.e[Pp(n.a,10).p]))}function etb(e,t,n,r,i,a){var o;o=ZD(r),Gs(o,i),Go(o,a),exg(e.a,r,new DT(o,t,n.f))}function etm(e,t){var n;if(!(n=eAh(e.Tg(),t)))throw p7(new gL(eZV+t+eZX));return n}function etg(e,t){var n;for(n=e;z$(n);)if((n=z$(n))==t)return!0;return!1}function etv(e,t){var n,r,i;for(i=0,r=t.a.cd(),n=Pp(t.a.dd(),14).gc();i0&&(e.a/=t,e.b/=t),e}function etP(e){var t;return e.w?e.w:((t=Ww(e))&&!t.kh()&&(e.w=t),t)}function etR(e){var t;return null==e?null:e_e(t=Pp(e,190),t.length)}function etj(e,t){if(null==e.g||t>=e.i)throw p7(new xJ(t,e.i));return e.li(t,e.g[t])}function etF(e){var t,n;for(t=e.a.d.j,n=e.c.d.j;t!=n;)erC(e.b,t),t=elI(t);erC(e.b,t)}function etY(e){var t;for(t=0;t=14&&t<=16)),e}function etW(e,t,n){var r=function(){return e.apply(r,arguments)};return t.apply(r,n),r}function etK(e,t,n){var r,i;r=t;do i=gP(e.p[r.p])+n,e.p[r.p]=i,r=e.a[r.p];while(r!=t)}function etV(e,t){var n,r;r=e.a,n=elr(e,t,null),r==t||e.e||(n=eFr(e,t,n)),n&&n.Fi()}function etq(e,t){return Mc(),enj(eHe),eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)}function etZ(e,t){return Mc(),enj(eHe),eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)}function etX(e,t){return e_z(),ME(e.b.c.length-e.e.c.length,t.b.c.length-t.e.c.length)}function etJ(e,t){return yk(eif(e,t,jE(efn(eUJ,Ux(jE(efn(null==t?0:esj(t),eUQ)),15)))))}function etQ(){etQ=A,e8R=euY((eEn(),eow(vx(e4P,1),eU4,267,0,[e8N,e8D,e8C,e8P,e8I,e8L])))}function et1(){et1=A,tdJ=euY((eyY(),eow(vx(e54,1),eU4,291,0,[tdX,tdZ,tdq,tdK,tdW,tdV])))}function et0(){et0=A,tdD=euY((ebx(),eow(vx(e53,1),eU4,248,0,[tdM,tdL,tdC,tdI,tdO,tdA])))}function et2(){et2=A,teI=euY((eSg(),eow(vx(e4K,1),eU4,227,0,[teO,teL,teM,teA,teC,teT])))}function et3(){et3=A,ttm=euY((e_3(),eow(vx(e43,1),eU4,275,0,[ttp,ttf,ttb,tth,ttd,ttl])))}function et4(){et4=A,ttc=euY((eyd(),eow(vx(e42,1),eU4,274,0,[tto,tta,ttu,tti,tts,ttr])))}function et5(){et5=A,tst=euY((ewY(),eow(vx(e47,1),eU4,313,0,[to7,to9,to5,to6,tse,to8])))}function et6(){et6=A,te7=euY((eEf(),eow(vx(e41,1),eU4,276,0,[te4,te3,te6,te5,te8,te9])))}function et9(){et9=A,tu7=euY((eS_(),eow(vx(e5A,1),eU4,327,0,[tu8,tu4,tu6,tu5,tu9,tu3])))}function et8(){et8=A,tbv=euY((ekU(),eow(vx(e6i,1),eU4,273,0,[tbm,tbp,tbb,tbh,tbd,tbg])))}function et7(){et7=A,tpR=euY((e_a(),eow(vx(e58,1),eU4,312,0,[tpN,tpI,tpP,tpL,tpD,tpC])))}function ene(){return eT7(),eow(vx(e6t,1),eU4,93,0,[tp1,tpQ,tp2,tp9,tp6,tp5,tp3,tp4,tp0])}function ent(e,t){var n;n=e.a,e.a=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,0,n,e.a))}function enn(e,t){var n;n=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,1,n,e.b))}function enr(e,t){var n;n=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,3,n,e.b))}function eni(e,t){var n;n=e.f,e.f=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,3,n,e.f))}function ena(e,t){var n;n=e.g,e.g=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,4,n,e.g))}function eno(e,t){var n;n=e.i,e.i=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,5,n,e.i))}function ens(e,t){var n;n=e.j,e.j=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,6,n,e.j))}function enu(e,t){var n;n=e.j,e.j=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,1,n,e.j))}function enc(e,t){var n;n=e.c,e.c=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,4,n,e.c))}function enl(e,t){var n;n=e.k,e.k=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qo(e,2,n,e.k))}function enf(e,t){var n;n=e.d,e.d=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qs(e,2,n,e.d))}function end(e,t){var n;n=e.s,e.s=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qs(e,4,n,e.s))}function enh(e,t){var n;n=e.t,e.t=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new qs(e,5,n,e.t))}function enp(e,t){var n;n=e.F,e.F=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,5,n,t))}function enb(e,t){var n;return(n=Pp(Bp((_5(),tmU),e),55))?n.xj(t):Je(e1R,eUp,1,t,5,1)}function enm(e,t){var n,r;return(n=t in e.a)&&(r=zR(e,t).he())?r.a:null}function eng(e,t){var n,r,i;return n=(r=(yT(),i=new o0),t&&eAu(r,t),r),eri(n,e),n}function env(e,t,n){if(euu(e,n),!e.Bk()&&null!=n&&!e.wj(n))throw p7(new bS);return n}function eny(e,t){return e.n=t,e.n?(e.f=new p0,e.e=new p0):(e.f=null,e.e=null),e}function enw(e,t,n,r,i,a){var o;return enA(n,o=Y6(e,t)),o.i=i?8:0,o.f=r,o.e=i,o.g=a,o}function en_(e,t,n,r,i){this.d=t,this.k=r,this.f=i,this.o=-1,this.p=1,this.c=e,this.a=n}function enE(e,t,n,r,i){this.d=t,this.k=r,this.f=i,this.o=-1,this.p=2,this.c=e,this.a=n}function enS(e,t,n,r,i){this.d=t,this.k=r,this.f=i,this.o=-1,this.p=6,this.c=e,this.a=n}function enk(e,t,n,r,i){this.d=t,this.k=r,this.f=i,this.o=-1,this.p=7,this.c=e,this.a=n}function enx(e,t,n,r,i){this.d=t,this.j=r,this.e=i,this.o=-1,this.p=4,this.c=e,this.a=n}function enT(e,t){var n,r,i,a;for(i=0,a=(r=t).length;i=0),0>ehP(e.d,e.c)&&(e.a=e.a-1&e.d.a.length-1,e.b=e.d.c),e.c=-1}function enR(e){return e.a<54?e.f<0?-1:e.f>0?1:0:(e.c||(e.c=euK(e.f)),e.c).e}function enj(e){if(!(e>=0))throw p7(new gL("tolerance ("+e+") must be >= 0"));return e}function enF(){return tdc||(tdc=new eC$,es4(tdc,eow(vx(e20,1),eUp,130,0,[new cZ]))),tdc}function enY(){enY=A,tsP=new SL(ezo,0),tsD=new SL("INPUT",1),tsN=new SL("OUTPUT",2)}function enB(){enB=A,teB=new Sl("ARD",0),teH=new Sl("MSD",1),teU=new Sl("MANUAL",2)}function enU(){enU=A,tur=new SR("BARYCENTER",0),tui=new SR(eG7,1),tua=new SR(eWe,2)}function enH(e,t){var n;if(n=e.gc(),t<0||t>n)throw p7(new Ii(t,n));return new IB(e,t)}function en$(e,t){var n;return M4(t,42)?e.c.Mc(t):(n=edG(e,t),ehx(e,t),n)}function enz(e,t,n){return eu2(e,t),er3(e,n),end(e,0),enh(e,1),els(e,!0),eli(e,!0),e}function enG(e,t){if(e<0)throw p7(new gL(t+" cannot be negative but was: "+e));return e}function enW(e,t){var n,r;for(n=0,r=e.gc();n0)?Pp(RJ(n.a,r-1),10):null}function ert(e,t){var n;n=e.k,e.k=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,2,n,e.k))}function ern(e,t){var n;n=e.f,e.f=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,8,n,e.f))}function err(e,t){var n;n=e.i,e.i=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,7,n,e.i))}function eri(e,t){var n;n=e.a,e.a=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,8,n,e.a))}function era(e,t){var n;n=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,0,n,e.b))}function ero(e,t){var n;n=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,0,n,e.b))}function ers(e,t){var n;n=e.c,e.c=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,1,n,e.c))}function eru(e,t){var n;n=e.c,e.c=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,1,n,e.c))}function erc(e,t){var n;n=e.c,e.c=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,4,n,e.c))}function erl(e,t){var n;n=e.d,e.d=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,1,n,e.d))}function erf(e,t){var n;n=e.D,e.D=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,2,n,e.D))}function erd(e,t){e.r>0&&e.c0&&0!=e.g&&erd(e.i,t/e.r*e.i.d))}function erh(e,t,n){var r;e.b=t,e.a=n,r=(512&e.a)==512?new mU:new u7,e.c=eLV(r,e.b,e.a)}function erp(e,t){return eLt(e.e,t)?(_4(),eec(t)?new RA(t,e):new xe(t,e)):new xr(t,e)}function erb(e,t){return yS(eid(e.a,t,jE(efn(eUJ,Ux(jE(efn(null==t?0:esj(t),eUQ)),15)))))}function erm(e,t,n){return Qz(e,new f9(t),new ea,new f8(n),eow(vx(e2L,1),eU4,132,0,[]))}function erg(e){var t,n;return 0>e?new _e:(t=e+1,n=new Zi(t,e),new L0(null,n))}function erv(e,t){var n;return Hj(),n=new w8(1),xd(e)?Ge(n,e,t):eS9(n.f,e,t),new f$(n)}function ery(e,t){var n,r;return(n=e.o+e.p)<(r=t.o+t.p)?-1:n==r?0:1}function erw(e){var t;return(t=e_k(e,(eBU(),tnc)),M4(t,160))?edo(Pp(t,160)):null}function er_(e){var t;return(t=esi(e=eB4.Math.max(e,2)),e>t)?(t<<=1)>0?t:eU2:t}function erE(e){switch(OZ(3!=e.e),e.e){case 2:return!1;case 0:return!0}return Zk(e)}function erS(e,t){var n;return!!M4(t,8)&&(n=Pp(t,8),e.a==n.a&&e.b==n.b)}function erk(e,t,n){var r,i,a;return a=t>>5,i=31&t,r=WM(Fy(e.n[n][a],jE(Fg(i,1))),3)}function erx(e,t){var n,r;for(r=t.vc().Kc();r.Ob();)evQ(e,(n=Pp(r.Pb(),42)).cd(),n.dd())}function erT(e,t){var n;n=new e0,Pp(t.b,65),Pp(t.b,65),Pp(t.b,65),ety(t.a,new N9(e,n,t))}function erM(e,t){var n;n=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,21,n,e.b))}function erO(e,t){var n;n=e.d,e.d=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,11,n,e.d))}function erA(e,t){var n;n=e.j,e.j=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,13,n,e.j))}function erL(e,t,n){var r,i,a;for(a=e.a.length-1,i=e.b,r=0;r>>31;0!=r&&(e[n]=r)}function eip(e,t){var n,r;for(Hj(),r=new p0,n=0;n0&&(this.g=this.ri(this.i+(this.i/8|0)+1),e.Qc(this.g))}function eiR(e,t){PJ.call(this,tgd,e,t),this.b=this,this.a=eAY(e.Tg(),ee2(this.e.Tg(),this.c))}function eij(e,t){var n,r;for(BJ(t),r=t.vc().Kc();r.Ob();)n=Pp(r.Pb(),42),e.zc(n.cd(),n.dd())}function eiF(e,t,n){var r;for(r=n.Kc();r.Ob();)if(!Vq(e,t,r.Pb()))return!1;return!0}function eiY(e,t,n,r,i){var a;return n&&(a=edv(t.Tg(),e.c),i=n.gh(t,-1-(-1==a?r:a),null,i)),i}function eiB(e,t,n,r,i){var a;return n&&(a=edv(t.Tg(),e.c),i=n.ih(t,-1-(-1==a?r:a),null,i)),i}function eiU(e){var t;if(-2==e.b){if(0==e.e)t=-1;else for(t=0;0==e.a[t];t++);e.b=t}return e.b}function eiH(e){switch(e.g){case 2:return eYu(),tbY;case 4:return eYu(),tby;default:return e}}function ei$(e){switch(e.g){case 1:return eYu(),tbj;case 3:return eYu(),tbw;default:return e}}function eiz(e){var t,n,r;return e.j==(eYu(),tbw)&&(t=eTt(e),n=Aa(t,tby),(r=Aa(t,tbY))||r&&n)}function eiG(e){var t,n;return t=Pp(e.e&&e.e(),9),n=Pp(YR(t,t.length),9),new I1(t,n,t.length)}function eiW(e,t){ewG(t,eG9,1),efJ(_p(new dp((__(),new U7(e,!1,!1,new tO))))),eEj(t)}function eiK(e,t){return OQ(),xd(e)?ZZ(e,Lq(t)):xf(e)?F_(e,LV(t)):xl(e)?Fw(e,LK(t)):e.wd(t)}function eiV(e,t){t.q=e,e.d=eB4.Math.max(e.d,t.r),e.b+=t.d+(0==e.a.c.length?0:e.c),P_(e.a,t)}function eiq(e,t){var n,r,i,a;return i=e.c,n=e.c+e.b,a=e.d,r=e.d+e.a,t.a>i&&t.aa&&t.b1||e.Ob())return++e.a,e.g=0,t=e.i,e.Ob(),t;throw p7(new bC)}function eaA(e){var t;return Ma(),En(tuT,e)||((t=new af).a=e,CM(tuT,e,t)),Pp(UA(tuT,e),635)}function eaL(e){var t,n,r,i;return r=0,(i=e)<0&&(i+=eHW,r=eH$),n=zy(i/eHG),Mk(t=zy(i-n*eHG),n,r)}function eaC(e){var t,n,r;for(r=0,n=new _t(e.a);n.aecd(e,0)&&(e=PN(e)),64-(0!=(t=jE(Fv(e,32)))?exv(t):exv(jE(e))+32)}function eaQ(e){var t;return t=Pp(e_k(e,(eBU(),tt1)),61),e.k==(eEn(),e8C)&&(t==(eYu(),tbY)||t==tby)}function ea1(e,t,n){var r,i;(i=Pp(e_k(e,(eBy(),taR)),74))&&(eu_(r=new mE,0,i),etH(r,n),er7(t,r))}function ea0(e,t,n){var r,i,a,o;r=(o=Bq(e)).d,i=o.c,a=e.n,t&&(a.a=a.a-r.b-i.a),n&&(a.b=a.b-r.d-i.b)}function ea2(e,t){var n,r;return(n=e.j)!=(r=t.j)?n.g-r.g:e.p==t.p?0:n==(eYu(),tbw)?e.p-t.p:t.p-e.p}function ea3(e){var t,n;for(eYp(e),n=new fz(e.d);n.a>22),i=e.h+t.h+(r>>22),Mk(n&eHH,r&eHH,i&eH$)}function eor(e,t){var n,r,i;return n=e.l-t.l,r=e.m-t.m+(n>>22),i=e.h-t.h+(r>>22),Mk(n&eHH,r&eHH,i&eH$)}function eoi(e){var t;return e<128?((t=(RH(),e0Y)[e])||(t=e0Y[e]=new fA(e)),t):new fA(e)}function eoa(e){var t;return M4(e,78)?e:((t=e&&e.__java$exception)||(t=new euq(e),by(t)),t)}function eoo(e){if(M4(e,186))return Pp(e,118);if(e)return null;throw p7(new gD(eXR))}function eos(e,t){if(null==t)return!1;for(;e.a!=e.b;)if(ecX(t,ecn(e)))return!0;return!1}function eou(e){return!!e.a.Ob()||e.a==e.d&&(e.a=new KU(e.e.f),e.a.Ob())}function eoc(e,t){var n,r;return 0!=(r=(n=t.Pc()).length)&&(PO(e.c,e.c.length,n),!0)}function eol(e,t,n){var r,i;for(i=t.vc().Kc();i.Ob();)r=Pp(i.Pb(),42),e.yc(r.cd(),r.dd(),n);return e}function eof(e,t){var n,r;for(r=new fz(e.b);r.a=0,"Negative initial capacity"),PG(t>=0,"Non-positive load factor"),Yy(this)}function eoV(e,t,n){return!(e>=128)&&(e<64?xg(WM(Fg(1,e),n),0):xg(WM(Fg(1,e-64),t),0))}function eoq(e,t){return!!e&&!!t&&e!=t&&0>efT(e.b.c,t.b.c+t.b.b)&&0>efT(t.b.c,e.b.c+e.b.b)}function eoZ(e){var t,n,r;return n=e.n,r=e.o,t=e.d,new Hr(n.a-t.b,n.b-t.d,r.a+(t.b+t.c),r.b+(t.d+t.a))}function eoX(e){var t,n,r,i;for(n=e.a,r=0,i=n.length;r(r=e.gc()))throw p7(new Ii(t,r));return e.hi()&&(n=zG(e,n)),e.Vh(t,n)}function eo2(e,t,n){return null==n?(e.q||(e.q=new p2),Z3(e.q,t)):(e.q||(e.q=new p2),Um(e.q,t,n)),e}function eo3(e,t,n){return null==n?(e.q||(e.q=new p2),Z3(e.q,t)):(e.q||(e.q=new p2),Um(e.q,t,n)),e}function eo4(e){var t,n;return n=new Z5,eaW(n,e),eo3(n,(erV(),e9j),e),t=new p2,eNY(e,n,t),eFS(e,n,t),n}function eo5(e){var t,n,r;for(eLG(),n=Je(e50,eUP,8,2,0,1),r=0,t=0;t<2;t++)r+=.5,n[t]=emh(r,e);return n}function eo6(e,t){var n,r,i,a;for(a=0,n=!1,r=e.a[t].length;a>=1);return t}function esa(e){var t,n;return 32==(n=exv(e.h))?32==(t=exv(e.m))?exv(e.l)+32:t+20-10:n-12}function eso(e){var t;return null==(t=e.a[e.b])?null:(Bc(e.a,e.b,null),e.b=e.b+1&e.a.length-1,t)}function ess(e){var t,n;return t=e.t-e.k[e.o.p]*e.d+e.j[e.o.p]>e.f,n=e.u+e.e[e.o.p]*e.d>e.f*e.s*e.d,t||n}function esu(e,t,n){var r,i;return r=new Js(t,n),i=new H,e.b=eLg(e,e.b,r,i),i.b||++e.c,e.b.b=!1,i.d}function esc(e,t,n){var r,i,a,o;for(o=ecZ(t,n),a=0,i=o.Kc();i.Ob();)r=Pp(i.Pb(),11),Um(e.c,r,ell(a++))}function esl(e){var t,n;for(n=new fz(e.a.b);n.an&&(n=e[t]);return n}function esg(e,t,n){var r;return r=new p0,eA0(e,t,r,(eYu(),tby),!0,!1),eA0(e,n,r,tbY,!1,!1),r}function esv(e,t,n){var r,i,a,o;return a=null,i=Kq(o=t,"labels"),a=(eT2((r=new kG(e,n)).a,r.b,i),i)}function esy(e,t,n,r){var i;return!(!(i=eMv(e,t,n,r))&&(i=elh(e,n,r)))||eR3(e,t,i)?i:null}function esw(e,t,n,r){var i;return!(!(i=eMy(e,t,n,r))&&(i=elp(e,n,r)))||eR3(e,t,i)?i:null}function es_(e,t){var n;for(n=0;n1||t>=0&&e.b<3)}function esP(e){var t,n,r;for(t=new mE,r=epL(e,0);r.b!=r.d.c;)n=Pp(Vv(r),8),Ls(t,0,new TS(n));return t}function esR(e){var t,n;for(n=new fz(e.a.b);n.ar?1:0}function esJ(e,t){return!!eO2(e,t)&&(exg(e.b,Pp(e_k(t,(eBU(),ttX)),21),t),P7(e.a,t),!0)}function esQ(e){var t,n;(t=Pp(e_k(e,(eBU(),tng)),10))&&(QA((n=t.c).a,t),0==n.a.c.length&&QA(Bq(t).b,n))}function es1(e){return e2M?Je(e2k,e$_,572,0,0,1):Pp(epg(e.a,Je(e2k,e$_,572,e.a.c.length,0,1)),842)}function es0(e,t,n,r){return U_(),new gt(eow(vx(e1$,1),eUK,42,0,[(eb2(e,t),new wD(e,t)),(eb2(n,r),new wD(n,r))]))}function es2(e,t,n){var r,i;return enz(i=r=new mD,t,n),JL((e.q||(e.q=new FQ(tgi,e,11,10)),e.q),i),i}function es3(e){var t,n,r,i;for(t=0,r=Je(e17,eUP,2,n=(i=Eo(tmx,e)).length,6,1);t=e.b.c.length)&&(es6(e,2*t+1),(n=2*t+2)=0&&e[r]===t[r];r--);return r<0?0:Ei(WM(e[r],eH8),WM(t[r],eH8))?-1:1}function es7(e,t){var n,r;for(r=epL(e,0);r.b!=r.d.c;)(n=Pp(Vv(r),214)).e.length>0&&(t.td(n),n.i&&elk(n))}function eue(e,t){var n,r;return r=Pp(eaS(e.a,4),126),n=Je(e6N,eJM,415,t,0,1),null!=r&&ePD(r,0,n,0,r.length),n}function eut(e,t){var n;return n=new eCg((256&e.f)!=0,e.i,e.a,e.d,(16&e.f)!=0,e.j,e.g,t),null!=e.e||(n.c=e),n}function eun(e,t){var n,r;for(r=e.Zb().Cc().Kc();r.Ob();)if((n=Pp(r.Pb(),14)).Hc(t))return!0;return!1}function eur(e,t,n,r,i){var a,o;for(o=n;o<=i;o++)for(a=t;a<=r;a++)if(emy(e,a,o))return!0;return!1}function eui(e,t,n){var r,i,a,o;for(BJ(n),o=!1,a=e.Zc(t),i=n.Kc();i.Ob();)r=i.Pb(),a.Rb(r),o=!0;return o}function eua(e,t){var n;return e===t||!!M4(t,83)&&(n=Pp(t,83),eEB(Fc(e),n.vc()))}function euo(e,t,n){var r,i;for(i=n.Kc();i.Ob();)if(r=Pp(i.Pb(),42),e.re(t,r.dd()))return!0;return!1}function eus(e,t,n){return e.d[t.p][n.p]||(ebp(e,t,n),e.d[t.p][n.p]=!0,e.d[n.p][t.p]=!0),e.a[t.p][n.p]}function euu(e,t){if(!e.ai()&&null==t)throw p7(new gL("The 'no null' constraint is violated"));return t}function euc(e,t){null==e.D&&null!=e.B&&(e.D=e.B,e.B=null),erf(e,null==t?null:(BJ(t),t)),e.C&&e.yk(null)}function eul(e,t){var n;return!!(e&&e!=t&&Ln(t,(eBU(),tt8)))&&(n=Pp(e_k(t,(eBU(),tt8)),10))!=e}function euf(e){switch(e.i){case 2:return!0;case 1:return!1;case -1:++e.c;default:return e.pl()}}function eud(e){switch(e.i){case -2:return!0;case -1:return!1;case 1:--e.c;default:return e.ql()}}function euh(e){zL.call(this,"The given string does not match the expected format for individual spacings.",e)}function eup(){eup=A,tmr=new kN("ELK",0),tmi=new kN("JSON",1),tmn=new kN("DOT",2),tma=new kN("SVG",3)}function eub(){eub=A,tc5=new S3(eGR,0),tc6=new S3("RADIAL_COMPACTION",1),tc9=new S3("WEDGE_COMPACTION",2)}function eum(){eum=A,e2B=new Ed("CONCURRENT",0),e2U=new Ed("IDENTITY_FINISH",1),e2H=new Ed("UNORDERED",2)}function eug(){eug=A,e6q=(_y(),e6G),e6V=new xX(ezj,e6q),e6K=new pO(ezF),e6Z=new pO(ezY),e6X=new pO(ezB)}function euv(){euv=A,e72=new n1,e73=new n0,e70=new n2,e71=new n3,e7J=(BJ(e7Q=new n4),new P)}function euy(){euy=A,tsz=new SD("CONSERVATIVE",0),tsG=new SD("CONSERVATIVE_SOFT",1),tsW=new SD("SLOPPY",2)}function euw(){euw=A,tpH=new T3(15),tpU=new T2((eBB(),thN),tpH),tp$=th3,tpj=td3,tpF=thx,tpB=thO,tpY=thM}function eu_(e,t,n){var r,i,a;for(r=new _n,a=epL(n,0);a.b!=a.d.c;)i=Pp(Vv(a),8),P7(r,new TS(i));eui(e,t,r)}function euE(e){var t,n,r;for(t=0,r=Je(e50,eUP,8,e.b,0,1),n=epL(e,0);n.b!=n.d.c;)r[t++]=Pp(Vv(n),8);return r}function euS(e){var t;return 0!=(t=(e.a||(e.a=new FQ(tgn,e,9,5)),e.a)).i?_K(Pp(etj(t,0),678)):null}function euk(e,t){var n;return(n=eft(e,t),Ei(WA(e,t),0)|xm(WA(e,n),0))?n:eft(eUY,WA(Fy(n,63),1))}function eux(e,t){var n;n=null!=epB((edk(),to3))&&null!=t.wg()?gP(LV(t.wg()))/gP(LV(epB(to3))):1,Um(e.b,t,n)}function euT(e,t){var n,r;return(n=Pp(e.d.Bc(t),14))?((r=e.e.hc()).Gc(n),e.e.d-=n.gc(),n.$b(),r):null}function euM(e,t){var n,r;if(0!=(r=e.c[t]))for(e.c[t]=0,e.d-=r,n=t+1;n0)return FP(t-1,e.a.c.length),ZV(e.a,t-1);throw p7(new bL)}function euA(e,t,n){if(t<0)throw p7(new gE(eq1+t));tt)throw p7(new gL(e$x+e+e$T+t));if(e<0||t>n)throw p7(new va(e$x+e+e$M+t+e$m+n))}function euC(e){if(!e.a||(8&e.a.i)==0)throw p7(new gC("Enumeration class expected for layout option "+e.f))}function euI(e){var t;++e.j,0==e.i?e.g=null:e.ieVq?e-n>eVq:n-e>eVq)}function euG(e,t){return!e||t&&!e.j||M4(e,124)&&0==Pp(e,124).a.b?0:e.Re()}function euW(e,t){return!e||t&&!e.k||M4(e,124)&&0==Pp(e,124).a.a?0:e.Se()}function euK(e){return(eLQ(),e<0)?-1!=e?new ep4(-1,-e):e03:e<=10?e05[zy(e)]:new ep4(1,e)}function euV(e){throw eoW(),p7(new gs("Unexpected typeof result '"+e+"'; please report this bug to the GWT team"))}function euq(e){g0(),MV(this),HD(this),this.e=e,eA9(this,e),this.g=null==e?eUg:efF(e),this.a="",this.b=e,this.a=""}function euZ(){this.a=new a4,this.f=new hW(this),this.b=new hK(this),this.i=new hV(this),this.e=new hq(this)}function euX(){m6.call(this,new Ju(ee0(16))),enG(2,eUN),this.b=2,this.a=new Uc(null,null,0,null),bp(this.a,this.a)}function euJ(){euJ=A,tsn=new SS("DUMMY_NODE_OVER",0),tsr=new SS("DUMMY_NODE_UNDER",1),tsi=new SS("EQUAL",2)}function euQ(){euQ=A,e8u=zD(eow(vx(e55,1),eU4,103,0,[(ec3(),tpm),tpg])),e8c=zD(eow(vx(e55,1),eU4,103,0,[tpy,tpb]))}function eu1(e){return(eYu(),tbC).Hc(e.j)?gP(LV(e_k(e,(eBU(),tnM)))):esp(eow(vx(e50,1),eUP,8,0,[e.i.n,e.n,e.a])).b}function eu0(e){var t,n,r,i;for(n=(r=e.b.a).a.ec().Kc();n.Ob();)t=Pp(n.Pb(),561),i=new eMq(t,e.e,e.f),P_(e.g,i)}function eu2(e,t){var n,r,i;r=e.nk(t,null),i=null,t&&(i=(yO(),n=new p5),etV(i,e.r)),(r=ew3(e,i,r))&&r.Fi()}function eu3(e,t){var n,r;for(r=0!=eMU(e.d,1),n=!0;n;)n=!1,n=t.c.Tf(t.e,r),n|=eAb(e,t,r,!1),r=!r;er0(e)}function eu4(e,t){var n,r,i;return r=!1,n=t.q.d,t.di&&(eyC(t.q,i),r=n!=t.q.d)),r}function eu5(e,t){var n,r,i,a,o,s,u,c;return u=t.i,c=t.j,i=(r=e.f).i,a=r.j,o=u-i,s=c-a,n=eB4.Math.sqrt(o*o+s*s)}function eu6(e,t){var n,r;return(r=ehO(e))||(tmT||(tmT=new sh),n=(eRe(),eSR(t)),JL((r=new pq(n)).Vk(),e)),r}function eu9(e,t){var n,r;return(n=Pp(e.c.Bc(t),14))?((r=e.hc()).Gc(n),e.d-=n.gc(),n.$b(),e.mc(r)):e.jc()}function eu8(e,t){var n;for(n=0;n=e.c.b:e.a<=e.c.b))throw p7(new bC);return t=e.a,e.a+=e.c.c,++e.b,ell(t)}function eci(e){var t;return t=new ete(e),Kv(e.a,e8w,new g$(eow(vx(e4M,1),eUp,369,0,[t]))),t.d&&P_(t.f,t.d),t.f}function eca(e){var t;return eaW(t=new MA(e.a),e),eo3(t,(eBU(),tnc),e),t.o.a=e.g,t.o.b=e.f,t.n.a=e.i,t.n.b=e.j,t}function eco(e,t,n,r){var i,a;for(a=e.Kc();a.Ob();)(i=Pp(a.Pb(),70)).n.a=t.a+(r.a-i.o.a)/2,i.n.b=t.b,t.b+=i.o.b+n}function ecs(e,t,n){var r,i;for(i=t.a.a.ec().Kc();i.Ob();)if($o(e,r=Pp(i.Pb(),57),n))return!0;return!1}function ecu(e){var t,n;for(n=new fz(e.r);n.a=0?t:-t;r>0;)r%2==0?(n*=n,r=r/2|0):(i*=n,r-=1);return t<0?1/i:i}function ecw(e,t){var n,r,i;for(i=1,n=e,r=t>=0?t:-t;r>0;)r%2==0?(n*=n,r=r/2|0):(i*=n,r-=1);return t<0?1/i:i}function ec_(e){var t,n,r,i;if(null!=e){for(n=0;n0&&esJ(n=Pp(RJ(e.a,e.a.c.length-1),570),t))&&P_(e.a,new Zn(t))}function ecP(e){var t,n;Dj(),t=e.d.c-e.e.c,ety((n=Pp(e.g,145)).b,new d7(t)),ety(n.c,new he(t)),qX(n.i,new ht(t))}function ecR(e){var t;return t=new vc,t.a+="VerticalSegment ",xT(t,e.e),t.a+=" ",xM(t,OU(new ve,new fz(e.k))),t.a}function ecj(e){var t;return(t=Pp(eef(e.c.c,""),229))||(t=new GM(v3(v2(new of,""),"Other")),epy(e.c.c,"",t)),t}function ecF(e){var t;return(64&e.Db)!=0?eMT(e):(t=new O1(eMT(e)),t.a+=" (name: ",xk(t,e.zb),t.a+=")",t.a)}function ecY(e,t,n){var r,i;return i=e.sb,e.sb=t,(4&e.Db)!=0&&(1&e.Db)==0&&(r=new FX(e,1,4,i,t),n?n.Ei(r):n=r),n}function ecB(e,t){var n,r,i;for(n=0,i=efr(e,t).Kc();i.Ob();)n+=null!=e_k(r=Pp(i.Pb(),11),(eBU(),tng))?1:0;return n}function ecU(e,t,n){var r,i,a;for(r=0,a=epL(e,0);a.b!=a.d.c&&!((i=gP(LV(Vv(a))))>n);)i>=t&&++r;return r}function ecH(e,t,n){var r,i;return r=new Q$(e.e,3,13,null,(i=t.c)||(eBK(),tgA),ebv(e,t),!1),n?n.Ei(r):n=r,n}function ec$(e,t,n){var r,i;return r=new Q$(e.e,4,13,(i=t.c)||(eBK(),tgA),null,ebv(e,t),!1),n?n.Ei(r):n=r,n}function ecz(e,t,n){var r,i;return i=e.r,e.r=t,(4&e.Db)!=0&&(1&e.Db)==0&&(r=new FX(e,1,8,i,e.r),n?n.Ei(r):n=r),n}function ecG(e,t){var n,r;return(r=(n=Pp(t,676)).vk())||n.wk(r=M4(t,88)?new k9(e,Pp(t,26)):new Ke(e,Pp(t,148))),r}function ecW(e,t,n){var r;e.qi(e.i+1),r=e.oi(t,n),t!=e.i&&ePD(e.g,t,e.g,t+1,e.i-t),Bc(e.g,t,r),++e.i,e.bi(t,n),e.ci()}function ecK(e,t){var n;return t.a&&(n=t.a.a.length,e.a?xM(e.a,e.b):e.a=new O0(e.d),Ka(e.a,t.a,t.d.length,n)),e}function ecV(e,t){var n,r,i,a;if(t.vi(e.a),null!=(a=Pp(eaS(e.a,8),1936)))for(r=0,i=(n=a).length;rn)throw p7(new gE(e$x+e+e$M+t+", size: "+n));if(e>t)throw p7(new gL(e$x+e+e$T+t))}function ec6(e,t,n){if(t<0)ekN(e,n);else{if(!n.Ij())throw p7(new gL(eZV+n.ne()+eZq));Pp(n,66).Nj().Vj(e,e.yh(),t)}}function ec9(e,t,n,r,i,a,o,s){var u;for(u=n;a=r||t=s.ue(e[t],e[u])?Bc(i,a++,e[t++]):Bc(i,a++,e[u++])}function ec8(e,t,n,r,i,a){this.e=new p0,this.f=(enY(),tsP),P_(this.e,e),this.d=t,this.a=n,this.b=r,this.f=i,this.c=a}function ec7(e,t){var n,r;for(r=new Ow(e);r.e!=r.i.gc();)if(n=Pp(epH(r),26),xc(t)===xc(n))return!0;return!1}function ele(e){var t,n,r,i;for(eBW(),n=epE(),r=0,i=n.length;r=65&&e<=70?e-65+10:e>=97&&e<=102?e-97+10:e>=48&&e<=57?e-48:0}function eln(e){var t;return(64&e.Db)!=0?eMT(e):(t=new O1(eMT(e)),t.a+=" (source: ",xk(t,e.d),t.a+=")",t.a)}function elr(e,t,n){var r,i;return i=e.a,e.a=t,(4&e.Db)!=0&&(1&e.Db)==0&&(r=new FX(e,1,5,i,e.a),n?ey7(n,r):n=r),n}function eli(e,t){var n;n=(256&e.Bb)!=0,t?e.Bb|=256:e.Bb&=-257,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,2,n,t))}function ela(e,t){var n;n=(256&e.Bb)!=0,t?e.Bb|=256:e.Bb&=-257,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,8,n,t))}function elo(e,t){var n;n=(256&e.Bb)!=0,t?e.Bb|=256:e.Bb&=-257,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,8,n,t))}function els(e,t){var n;n=(512&e.Bb)!=0,t?e.Bb|=512:e.Bb&=-513,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,3,n,t))}function elu(e,t){var n;n=(512&e.Bb)!=0,t?e.Bb|=512:e.Bb&=-513,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,9,n,t))}function elc(e,t){var n;return -1==e.b&&e.a&&(n=e.a.Gj(),e.b=n?e.c.Xg(e.a.aj(),n):edv(e.c.Tg(),e.a)),e.c.Og(e.b,t)}function ell(e){var t,n;return e>-129&&e<128?(t=e+128,(n=(Rv(),e0B)[t])||(n=e0B[t]=new fC(e)),n):new fC(e)}function elf(e){var t,n;return e>-129&&e<128?(t=e+128,(n=(RU(),e0K)[t])||(n=e0K[t]=new fD(e)),n):new fD(e)}function eld(e){var t,n;return(t=e.k)==(eEn(),e8C)&&((n=Pp(e_k(e,(eBU(),tt1)),61))==(eYu(),tbw)||n==tbj)}function elh(e,t,n){var r,i,a;return(a=i=eMC(e.b,t))&&(r=Pp(eP9(Qq(e,a),""),26))?eMv(e,r,t,n):null}function elp(e,t,n){var r,i,a;return(a=i=eMC(e.b,t))&&(r=Pp(eP9(Qq(e,a),""),26))?eMy(e,r,t,n):null}function elb(e,t){var n,r;for(r=new Ow(e);r.e!=r.i.gc();)if(n=Pp(epH(r),138),xc(t)===xc(n))return!0;return!1}function elm(e,t,n){var r;if(t>(r=e.gc()))throw p7(new Ii(t,r));if(e.hi()&&e.Hc(n))throw p7(new gL(eXB));e.Xh(t,n)}function elg(e,t){var n;if(null==(n=etJ(e.i,t)))throw p7(new gK("Node did not exist in input."));return eiX(t,n),null}function elv(e,t){var n;if(n=eAh(e,t),M4(n,322))return Pp(n,34);throw p7(new gL(eZV+t+"' is not a valid attribute"))}function ely(e,t,n){var r,i;for(r=0,i=M4(t,99)&&(Pp(t,18).Bb&eH3)!=0?new x1(t,e):new eaN(t,e);rt?1:e==t?0==e?elN(1/e,1/t):0:isNaN(e)?isNaN(t)?0:1:-1}function elP(e,t){ewG(t,"Sort end labels",1),_r(UJ(eeh(new R1(null,new Gq(e.b,16)),new t2),new t3),new t4),eEj(t)}function elR(e,t,n){var r,i;return e.ej()?(i=e.fj(),r=exm(e,t,n),e.$i(e.Zi(7,ell(n),r,t,i)),r):exm(e,t,n)}function elj(e,t){var n,r,i;null==e.d?(++e.e,--e.f):(i=t.cd(),r=((n=t.Sh())&eUu)%e.d.length,Xc(e,r,eML(e,r,n,i)))}function elF(e,t){var n;n=(e.Bb&eXt)!=0,t?e.Bb|=eXt:e.Bb&=-1025,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,10,n,t))}function elY(e,t){var n;n=(e.Bb&eH0)!=0,t?e.Bb|=eH0:e.Bb&=-4097,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,12,n,t))}function elB(e,t){var n;n=(e.Bb&eJV)!=0,t?e.Bb|=eJV:e.Bb&=-8193,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,15,n,t))}function elU(e,t){var n;n=(e.Bb&eJq)!=0,t?e.Bb|=eJq:e.Bb&=-2049,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new ZB(e,1,11,n,t))}function elH(e,t){var n;return 0!=(n=elN(e.b.c,t.b.c))||0!=(n=elN(e.a.a,t.a.a))?n:elN(e.a.b,t.a.b)}function el$(e,t){var n;if(null==(n=Bp(e.k,t)))throw p7(new gK("Port did not exist in input."));return eiX(t,n),null}function elz(e){var t,n;for(n=eM$(etP(e)).Kc();n.Ob();)if(eDM(e,t=Lq(n.Pb())))return qb((_X(),tgh),t);return null}function elG(e,t){var n,r,i,a,o;for(i=0,o=eAY(e.e.Tg(),t),a=0,n=Pp(e.g,119);i>10)+eH4&eHd,t[1]=(1023&e)+56320&eHd,ehv(t,0,t.length)}function el0(e){var t,n;return(n=Pp(e_k(e,(eBy(),tal)),103))==(ec3(),tpv)?(t=gP(LV(e_k(e,tiX))))>=1?tpg:tpb:n}function el2(e){switch(Pp(e_k(e,(eBy(),tag)),218).g){case 1:return new ig;case 3:return new iE;default:return new im}}function el3(e){if(e.c)el3(e.c);else if(e.d)throw p7(new gC("Stream already terminated, can't be modified or used"))}function el4(e){var t;return(64&e.Db)!=0?eMT(e):(t=new O1(eMT(e)),t.a+=" (identifier: ",xk(t,e.k),t.a+=")",t.a)}function el5(e,t,n){var r,i;return r=(yT(),i=new oJ),ent(r,t),enn(r,n),e&&JL((e.a||(e.a=new O_(e6h,e,5)),e.a),r),r}function el6(e,t,n,r){var i,a;return BJ(r),BJ(n),null==(a=null==(i=e.xc(t))?n:_i(Pp(i,15),Pp(n,14)))?e.Bc(t):e.zc(t,a),a}function el9(e){var t,n,r,i;return n=(t=Pp(yw((i=(r=e.gm).f)==e1G?r:i),9),new I1(t,Pp(CY(t,t.length),9),0)),erC(n,e),n}function el8(e,t,n){var r,i;for(i=e.a.ec().Kc();i.Ob();)if(r=Pp(i.Pb(),10),eot(n,Pp(RJ(t,r.p),14)))return r;return null}function el7(e,t,n){var r;try{esE(e,t,n)}catch(i){if(i=eoa(i),M4(i,597))throw r=i,p7(new Zt(r));throw p7(i)}return t}function efe(e,t){var n;return Ts(e)&&Ts(t)&&eHV<(n=e-t)&&n>1,e.k=n-1>>1}function efo(){var e,t,n;ewP(),n=e2w+++Date.now(),e=zy(eB4.Math.floor(n*e$h))&e$b,t=zy(n-e*e$p),this.a=1502^e,this.b=t^e$d}function efs(e){var t,n,r;for(t=new p0,r=new fz(e.j);r.a34028234663852886e22?eHQ:t<-34028234663852886e22?eH1:t}function efp(e){return e-=e>>1&1431655765,e=((e=(e>>2&858993459)+(858993459&e))>>4)+e&252645135,e+=e>>8,63&(e+=e>>16)}function efb(e){var t,n,r,i;for(t=new CS(e.Hd().gc()),i=0,r=JJ(e.Hd().Kc());r.Ob();)Gr(t,n=r.Pb(),ell(i++));return eEA(t.a)}function efm(e,t){var n,r,i;for(i=new p2,r=t.vc().Kc();r.Ob();)Um(i,(n=Pp(r.Pb(),42)).cd(),eab(e,Pp(n.dd(),15)));return i}function efg(e,t){0==e.n.c.length&&P_(e.n,new zO(e.s,e.t,e.i)),P_(e.b,t),eml(Pp(RJ(e.n,e.n.c.length-1),211),t),eNk(e,t)}function efv(e){return(e.c!=e.b.b||e.i!=e.g.b)&&(e.a.c=Je(e1R,eUp,1,0,5,1),eoc(e.a,e.b),eoc(e.a,e.g),e.c=e.b.b,e.i=e.g.b),e.a}function efy(e,t){var n,r,i;for(i=0,r=Pp(t.Kb(e),20).Kc();r.Ob();)gN(LK(e_k(n=Pp(r.Pb(),17),(eBU(),tnE))))||++i;return i}function efw(e,t){var n,r,i;i=gP(LV(ed$(r=KT(t),(eBy(),toO)))),ev_(t,n=eB4.Math.max(0,i/2-.5),1),P_(e,new E9(t,n))}function ef_(){ef_=A,tnj=new ST(eGR,0),tnD=new ST("FIRST",1),tnN=new ST(eWi,2),tnP=new ST("LAST",3),tnR=new ST(eWa,4)}function efE(){efE=A,tpO=new kb(ezo,0),tpT=new kb("POLYLINE",1),tpx=new kb("ORTHOGONAL",2),tpM=new kb("SPLINES",3)}function efS(){efS=A,tlP=new S6("ASPECT_RATIO_DRIVEN",0),tlR=new S6("MAX_SCALE_DRIVEN",1),tlN=new S6("AREA_DRIVEN",2)}function efk(){efk=A,tff=new S8("P1_STRUCTURE",0),tfd=new S8("P2_PROCESSING_ORDER",1),tfh=new S8("P3_EXECUTION",2)}function efx(){efx=A,tc1=new S0("OVERLAP_REMOVAL",0),tcJ=new S0("COMPACTION",1),tcQ=new S0("GRAPH_SIZE_CALCULATION",2)}function efT(e,t){return Mc(),enj(eHe),eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)?0:et?1:Te(isNaN(e),isNaN(t))}function efM(e,t){var n,r;for(n=epL(e,0);n.b!=n.d.c;){if((r=gR(LV(Vv(n))))==t)return;if(r>t){Ks(n);break}}YU(n,t)}function efO(e,t){var n,r,i,a,o;if(n=t.f,epy(e.c.d,n,t),null!=t.g)for(i=t.g,a=0,o=i.length;at&&r.ue(e[a-1],e[a])>0;--a)o=e[a],Bc(e,a,e[a-1]),Bc(e,a-1,o)}function efL(e,t,n,r){if(t<0)eOh(e,n,r);else{if(!n.Ij())throw p7(new gL(eZV+n.ne()+eZq));Pp(n,66).Nj().Tj(e,e.yh(),t,r)}}function efC(e,t){if(t==e.d)return e.e;if(t==e.e)return e.d;throw p7(new gL("Node "+t+" not part of edge "+e))}function efI(e,t){switch(t.g){case 2:return e.b;case 1:return e.c;case 4:return e.d;case 3:return e.a;default:return!1}}function efD(e,t){switch(t.g){case 2:return e.b;case 1:return e.c;case 4:return e.d;case 3:return e.a;default:return!1}}function efN(e,t,n,r){switch(t){case 3:return e.f;case 4:return e.g;case 5:return e.i;case 6:return e.j}return ec2(e,t,n,r)}function efP(e){return e.k==(eEn(),e8N)&&q3(new R1(null,new YI(new Fa(OH(efc(e).a.Kc(),new c)))),new it)}function efR(e){return null==e.e?e:(e.c||(e.c=new eCg((256&e.f)!=0,e.i,e.a,e.d,(16&e.f)!=0,e.j,e.g,null)),e.c)}function efj(e,t){return e.h==eHz&&0==e.m&&0==e.l?(t&&(e0A=Mk(0,0,0)),Tr((Q2(),e0I))):(t&&(e0A=Mk(e.l,e.m,e.h)),Mk(0,0,0))}function efF(e){var t;return Array.isArray(e)&&e.im===O?yx(esF(e))+"@"+(t=esj(e)>>>0).toString(16):e.toString()}function efY(e){var t;this.a=(t=Pp(e.e&&e.e(),9),new I1(t,Pp(CY(t,t.length),9),0)),this.b=Je(e1R,eUp,1,this.a.a.length,5,1)}function efB(e){var t,n,r;for(this.a=new Tw,r=new fz(e);r.a0&&(GV(t-1,e.length),58==e.charCodeAt(t-1))&&!efz(e,tm1,tm0)}function efz(e,t,n){var r,i;for(r=0,i=e.length;r=i)return t.c+n;return t.c+t.b.gc()}function efK(e,t){var n,r,i,a;for(LF(),r=J9(e),i=t,Qe(r,0,r.length,i),n=0;n0&&(r+=i,++n);return n>1&&(r+=e.d*(n-1)),r}function efq(e){var t,n,r;for(r=new vs,r.a+="[",t=0,n=e.gc();t0&&this.b>0&&ji(this.c,this.b,this.a)}function ef4(e){edk(),this.c=ZW(eow(vx(e5Z,1),eUp,831,0,[to2])),this.b=new p2,this.a=e,Um(this.b,to3,1),ety(to4,new h4(this))}function ef5(e,t){var n;return e.d?F9(e.b,t)?Pp(Bp(e.b,t),51):(n=t.Kf(),Um(e.b,t,n),n):t.Kf()}function ef6(e,t){var n;return xc(e)===xc(t)||!!M4(t,91)&&(n=Pp(t,91),e.e==n.e&&e.d==n.d&&qv(e,n.a))}function ef9(e){switch(eYu(),e.g){case 4:return tbw;case 1:return tby;case 3:return tbj;case 2:return tbY;default:return tbF}}function ef8(e,t){switch(t){case 3:return 0!=e.f;case 4:return 0!=e.g;case 5:return 0!=e.i;case 6:return 0!=e.j}return eaT(e,t)}function ef7(e){switch(e.g){case 0:return new aV;case 1:return new aq;default:throw p7(new gL(eqa+(null!=e.f?e.f:""+e.g)))}}function ede(e){switch(e.g){case 0:return new aK;case 1:return new aZ;default:throw p7(new gL(eWt+(null!=e.f?e.f:""+e.g)))}}function edt(e){switch(e.g){case 0:return new mZ;case 1:return new m_;default:throw p7(new gL(eqN+(null!=e.f?e.f:""+e.g)))}}function edn(e){switch(e.g){case 1:return new aU;case 2:return new LY;default:throw p7(new gL(eqa+(null!=e.f?e.f:""+e.g)))}}function edr(e){var t,n;if(e.b)return e.b;for(n=e2M?null:e.d;n;){if(t=e2M?null:n.b)return t;n=e2M?null:n.d}return _g(),e2F}function edi(e){var t,n,r;return 0==e.e?0:(t=e.d<<5,n=e.a[e.d-1],e.e<0&&(r=eiU(e))==e.d-1&&(--n,n|=0),t-=exv(n))}function eda(e){var t,n,r;return e>5,t=31&e,(r=Je(ty_,eHT,25,n+1,15,1))[n]=1<3;)i*=10,--a;e=(e+(i>>1))/i|0}return r.i=e,!0}function edl(e){return euQ(),OQ(),!!(efD(Pp(e.a,81).j,Pp(e.b,103))||0!=Pp(e.a,81).d.e&&efD(Pp(e.a,81).j,Pp(e.b,103)))}function edf(e){J1(),Pp(e.We((eBB(),thL)),174).Hc((eI3(),tb4))&&(Pp(e.We(thJ),174).Fc((ekU(),tbg)),Pp(e.We(thL),174).Mc(tb4))}function edd(e,t){var n,r;if(!t)return!1;for(n=0;n=0;--r)for(i=0,t=n[r];i>1,this.k=t-1>>1}function edC(e,t){ewG(t,"End label post-processing",1),_r(UJ(eeh(new R1(null,new Gq(e.b,16)),new tV),new tq),new tZ),eEj(t)}function edI(e,t,n){var r,i;return r=gP(e.p[t.i.p])+gP(e.d[t.i.p])+t.n.b+t.a.b,(i=gP(e.p[n.i.p])+gP(e.d[n.i.p])+n.n.b+n.a.b)-r}function edD(e,t,n){var r,i;for(i=0,r=WM(n,eH8);0!=ecd(r,0)&&i0&&(GV(0,t.length),43==t.charCodeAt(0))?t.substr(1):t)}function edR(e){var t;return null==e?null:new TU((t=ePh(e,!0)).length>0&&(GV(0,t.length),43==t.charCodeAt(0))?t.substr(1):t)}function edj(e,t){var n;return e.i>0&&(t.lengthe.i&&Bc(t,e.i,null),t}function edF(e,t,n){var r,i,a;return e.ej()?(r=e.i,a=e.fj(),ecW(e,r,t),i=e.Zi(3,null,t,r,a),n?n.Ei(i):n=i):ecW(e,e.i,t),n}function edY(e,t,n){var r,i;return r=new Q$(e.e,4,10,M4(i=t.c,88)?Pp(i,26):(eBK(),tgI),null,ebv(e,t),!1),n?n.Ei(r):n=r,n}function edB(e,t,n){var r,i;return r=new Q$(e.e,3,10,null,M4(i=t.c,88)?Pp(i,26):(eBK(),tgI),ebv(e,t),!1),n?n.Ei(r):n=r,n}function edU(e){var t;return Cn(),t=new TS(Pp(e.e.We((eBB(),thO)),8)),e.B.Hc((eI3(),tbQ))&&(t.a<=0&&(t.a=20),t.b<=0&&(t.b=20)),t}function edH(e){var t;return ebk(),t=(e.q?e.q:(Hj(),Hj(),e2i))._b((eBy(),ta0))?Pp(e_k(e,ta0),197):Pp(e_k(Bq(e),ta2),197)}function ed$(e,t){var n,r;return r=null,Ln(e,(eBy(),toD))&&(n=Pp(e_k(e,toD),94)).Xe(t)&&(r=n.We(t)),null==r&&(r=e_k(Bq(e),t)),r}function edz(e,t){var n,r,i;return!!M4(t,42)&&(r=(n=Pp(t,42)).cd(),i=ecA(e.Rc(),r),BG(i,n.dd())&&(null!=i||e.Rc()._b(r)))}function edG(e,t){var n,r,i;return e.f>0&&(e.qj(),i=((r=null==t?0:esj(t))&eUu)%e.d.length,-1!=(n=eML(e,i,r,t)))}function edW(e,t){var n,r,i;return e.f>0&&(e.qj(),i=((r=null==t?0:esj(t))&eUu)%e.d.length,n=exx(e,i,r,t))?n.dd():null}function edK(e,t){var n,r,i,a;for(i=0,a=eAY(e.e.Tg(),t),n=Pp(e.g,119);i1?WO(Fg(t.a[1],32),WM(t.a[0],eH8)):WM(t.a[0],eH8),Kj(efn(t.e,n))))}function edQ(e,t){var n;return Ts(e)&&Ts(t)&&eHV<(n=e%t)&&n>5,t&=31,r=Je(ty_,eHT,25,i=e.d+n+(0==t?0:1),15,1),ewZ(r,e.a,n,t),a=new F7(e.e,i,r),Ku(a),a}function eht(e,t,n){var r,i;r=Pp(zg(tv4,t),117),i=Pp(zg(tv5,t),117),n?(Ge(tv4,e,r),Ge(tv5,e,i)):(Ge(tv5,e,r),Ge(tv4,e,i))}function ehn(e,t,n){var r,i,a;for(i=null,a=e.b;a;){if(r=e.a.ue(t,a.d),n&&0==r)return a;r>=0?a=a.a[1]:(i=a,a=a.a[0])}return i}function ehr(e,t,n){var r,i,a;for(i=null,a=e.b;a;){if(r=e.a.ue(t,a.d),n&&0==r)return a;r<=0?a=a.a[0]:(i=a,a=a.a[1])}return i}function ehi(e,t,n,r){var i,a,o;return i=!1,ejB(e.f,n,r)&&(epn(e.f,e.a[t][n],e.a[t][r]),o=(a=e.a[t])[r],a[r]=a[n],a[n]=o,i=!0),i}function eha(e,t,n,r,i){var a,o,s;for(o=i;t.b!=t.c;)a=Pp(Yn(t),10),s=Pp(efr(a,r).Xb(0),11),e.d[s.p]=o++,n.c[n.c.length]=s;return o}function eho(e,t,n){var r,i,a,o,s;return o=e.k,s=t.k,i=LV(ed$(e,r=n[o.g][s.g])),a=LV(ed$(t,r)),eB4.Math.max((BJ(i),i),(BJ(a),a))}function ehs(e,t,n){var r,i,a,o;for(r=n/e.c.length,i=0,o=new fz(e);o.a2e3&&(e1X=e,e1J=eB4.setTimeout(wf,10)),0==e1Z++&&(eeA((g1(),e0_)),!0)}function ehf(e,t){var n,r,i;for(r=new Fa(OH(efc(e).a.Kc(),new c));eTk(r);)if((i=(n=Pp(ZC(r),17)).d.i).c==t)return!1;return!0}function ehd(e,t){var n,r;if(M4(t,245)){r=Pp(t,245);try{return n=e.vd(r),0==n}catch(i){if(i=eoa(i),!M4(i,205))throw p7(i)}}return!1}function ehh(){return Error.stackTraceLimit>0?(eB4.Error.stackTraceLimit=Error.stackTraceLimit=64,!0):"stack"in Error()}function ehp(e,t){return Mc(),Mc(),enj(eHe),(eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)?0:et?1:Te(isNaN(e),isNaN(t)))>0}function ehb(e,t){return Mc(),Mc(),enj(eHe),(eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)?0:et?1:Te(isNaN(e),isNaN(t)))<0}function ehm(e,t){return Mc(),Mc(),enj(eHe),(eB4.Math.abs(e-t)<=eHe||e==t||isNaN(e)&&isNaN(t)?0:et?1:Te(isNaN(e),isNaN(t)))<=0}function ehg(e,t){for(var n=0;!t[n]||""==t[n];)n++;for(var r=t[n++];neH6)return n.fh();if((r=n.Zg())||n==e)break}return r}function ehA(e){return(z0(),M4(e,156))?Pp(Bp(tmR,e0r),288).vg(e):F9(tmR,esF(e))?Pp(Bp(tmR,esF(e)),288).vg(e):null}function ehL(e){if(ehZ(eq6,e))return OQ(),e0P;if(ehZ(eq9,e))return OQ(),e0N;throw p7(new gL("Expecting true or false"))}function ehC(e,t){if(t.c==e)return t.d;if(t.d==e)return t.c;throw p7(new gL("Input edge is not connected to the input port."))}function ehI(e,t){return e.e>t.e?1:e.et.d?e.e:e.d=48&&e<48+eB4.Math.min(10,10)?e-48:e>=97&&e<97?e-97+10:e>=65&&e<65?e-65+10:-1}function ehN(e,t){var n;return xc(t)===xc(e)||!!M4(t,21)&&(n=Pp(t,21)).gc()==e.gc()&&e.Ic(n)}function ehP(e,t){var n,r,i,a;return(r=e.a.length-1,n=t-e.b&r,a=e.c-t&r,A2(n<(i=e.c-e.b&r)),n>=a)?(euD(e,t),-1):(euN(e,t),1)}function ehR(e,t){var n,r;for(n=(GV(t,e.length),e.charCodeAt(t)),r=t+1;rt.e?1:e.ft.f?1:esj(e)-esj(t)}function ehZ(e,t){return BJ(e),null!=t&&(!!IE(e,t)||e.length==t.length&&IE(e.toLowerCase(),t.toLowerCase()))}function ehX(e,t){var n,r,i,a;for(r=0,i=t.gc();r0&&0>ecd(e,128)?(t=jE(e)+128,(n=(RB(),e0H)[t])||(n=e0H[t]=new fI(e)),n):new fI(e)}function eh1(e,t){var n,r;return(n=t.Hh(e.a))&&null!=(r=Lq(edW((n.b||(n.b=new L_((eBK(),tgF),tgf,n)),n.b),eXP)))?r:t.ne()}function eh0(e,t){var n,r;return(n=t.Hh(e.a))&&null!=(r=Lq(edW((n.b||(n.b=new L_((eBK(),tgF),tgf,n)),n.b),eXP)))?r:t.ne()}function eh2(e,t){var n,r;for(Gk(),r=new Fa(OH(efs(e).a.Kc(),new c));eTk(r);)if((n=Pp(ZC(r),17)).d.i==t||n.c.i==t)return n;return null}function eh3(e,t,n){this.c=e,this.f=new p0,this.e=new yb,this.j=new R$,this.n=new R$,this.b=t,this.g=new Hr(t.c,t.d,t.b,t.a),this.a=n}function eh4(e){var t,n,r,i;for(r=0,this.a=new Tw,this.d=new bV,this.e=0,i=(n=e).length;r0)}function ept(e){var t;xc(eT8(e,(eBB(),thl)))===xc((eck(),tpG))&&(z$(e)?(t=Pp(eT8(z$(e),thl),334),ebu(e,thl,t)):ebu(e,thl,tpW))}function epn(e,t,n){var r,i;e_m(e.e,t,n,(eYu(),tbY)),e_m(e.i,t,n,tby),e.a&&(i=Pp(e_k(t,(eBU(),tnc)),11),r=Pp(e_k(n,tnc),11),WW(e.g,i,r))}function epr(e,t,n){var r,i,a;r=t.c.p,a=t.p,e.b[r][a]=new $j(e,t),n&&(e.a[r][a]=new hv(t),(i=Pp(e_k(t,(eBU(),tt8)),10))&&exg(e.d,i,t))}function epi(e,t){var n,r,i;if(P_(e9n,e),t.Fc(e),n=Pp(Bp(e9t,e),21))for(i=n.Kc();i.Ob();)-1!=QI(e9n,r=Pp(i.Pb(),33),0)||epi(r,t)}function epa(e,t,n){var r;(e2x?(edr(e),0):e2T?(_g(),0):e2A?(_g(),0):!e2O||(_g(),1))||((r=new I6(t)).b=n,eEt(e,r))}function epo(e,t){var n;n=!e.A.Hc((ed6(),tbq))||e.q==(ewf(),tbo),e.u.Hc((ekU(),tbp))?n?eY_(e,t):eF3(e,t):e.u.Hc(tbm)&&(n?eFO(e,t):eYY(e,t))}function eps(e,t){var n,r;if(++e.j,null!=t&&exM(t,n=M4(r=e.a.Cb,97)?Pp(r,97).Jg():null)){ehU(e.a,4,n);return}ehU(e.a,4,Pp(t,126))}function epu(e,t,n){return new Hr(eB4.Math.min(e.a,t.a)-n/2,eB4.Math.min(e.b,t.b)-n/2,eB4.Math.abs(e.a-t.a)+n,eB4.Math.abs(e.b-t.b)+n)}function epc(e,t){var n,r;return 0!=(n=ME(e.a.c.p,t.a.c.p))?n:0!=(r=ME(e.a.d.i.p,t.a.d.i.p))?r:ME(t.a.d.p,e.a.d.p)}function epl(e,t,n){var r,i,a,o;return(a=t.j)!=(o=n.j)?a.g-o.g:(r=e.f[t.p],i=e.f[n.p],0==r&&0==i?0:0==r?-1:0==i?1:elN(r,i))}function epf(e,t,n){var r,i,a;if(!n[t.d])for(n[t.d]=!0,i=new fz(efv(t));i.a=(i=e.length))return i;for(t=t>0?t:0;tr&&Bc(t,r,null),t}function epv(e,t){var n,r;for(r=e.a.length,t.lengthr&&Bc(t,r,null),t}function epy(e,t,n){var r,i,a;return(i=Pp(Bp(e.e,t),387))?(a=CL(i,n),M6(e,i),a):(r=new PM(e,t,n),Um(e.e,t,r),zd(r),null)}function epw(e){var t;if(null==e)return null;if(null==(t=eMI(ePh(e,!0))))throw p7(new gV("Invalid hexBinary value: '"+e+"'"));return t}function ep_(e){return(eLQ(),0>ecd(e,0))?0!=ecd(e,-1)?new ey$(-1,QC(e)):e03:0>=ecd(e,10)?e05[jE(e)]:new ey$(1,e)}function epE(){return eBW(),eow(vx(e3n,1),eU4,159,0,[e4e,e37,e4t,e30,e31,e32,e35,e34,e33,e38,e39,e36,e3J,e3X,e3Q,e3q,e3V,e3Z,e3W,e3G,e3K,e4n])}function epS(e){var t;this.d=new p0,this.j=new yb,this.g=new yb,t=e.g.b,this.f=Pp(e_k(Bq(t),(eBy(),tal)),103),this.e=gP(LV(epj(t,toN)))}function epk(e){this.b=new p0,this.e=new p0,this.d=e,this.a=!yK(UJ(new R1(null,new YI(new Z4(e.b))),new f2(new ir))).sd((_w(),e2z))}function epx(){epx=A,tdh=new ko("PARENTS",0),tdd=new ko("NODES",1),tdl=new ko("EDGES",2),tdp=new ko("PORTS",3),tdf=new ko("LABELS",4)}function epT(){epT=A,tbt=new kw("DISTRIBUTED",0),tbr=new kw("JUSTIFIED",1),tp7=new kw("BEGIN",2),tbe=new kw(e$8,3),tbn=new kw("END",4)}function epM(e){var t;switch(t=e.yi(null)){case 10:return 0;case 15:return 1;case 14:return 2;case 11:return 3;case 21:return 4}return -1}function epO(e){switch(e.g){case 1:return ec3(),tpy;case 4:return ec3(),tpm;case 2:return ec3(),tpg;case 3:return ec3(),tpb}return ec3(),tpv}function epA(e,t,n){var r;switch((r=n.q.getFullYear()-eHx+eHx)<0&&(r=-r),t){case 1:e.a+=r;break;case 2:eeE(e,r%100,2);break;default:eeE(e,r,t)}}function epL(e,t){var n,r;if(Gp(t,e.b),t>=e.b>>1)for(r=e.c,n=e.b;n>t;--n)r=r.b;else for(n=0,r=e.a.a;n=64&&t<128&&(i=WO(i,Fg(1,t-64)));return i}function epj(e,t){var n,r;return r=null,Ln(e,(eBB(),tpa))&&(n=Pp(e_k(e,tpa),94)).Xe(t)&&(r=n.We(t)),null==r&&Bq(e)&&(r=e_k(Bq(e),t)),r}function epF(e,t){var n,r,i;(r=(i=t.d.i).k)!=(eEn(),e8N)&&r!=e8L&&(n=new Fa(OH(efc(i).a.Kc(),new c)),eTk(n)&&Um(e.k,t,Pp(ZC(n),17)))}function epY(e,t){var n,r,i;return r=ee2(e.Tg(),t),(n=t-e.Ah())<0?(i=e.Yg(r))>=0?e.lh(i):exu(e,r):n<0?exu(e,r):Pp(r,66).Nj().Sj(e,e.yh(),n)}function epB(e){var t;if(!M4(e.a,4))return e.a;if(null==(t=ehA(e.a)))throw p7(new gC(eq8+e.b+"'. "+eq4+(LW(e6D),e6D.k)+eq5));return t}function epU(e){var t;if(null==e)return null;if(null==(t=eYD(ePh(e,!0))))throw p7(new gV("Invalid base64Binary value: '"+e+"'"));return t}function epH(e){var t;try{return t=e.i.Xb(e.e),e.mj(),e.g=e.e++,t}catch(n){if(n=eoa(n),M4(n,73))throw e.mj(),p7(new bC);throw p7(n)}}function ep$(e){var t;try{return t=e.c.ki(e.e),e.mj(),e.g=e.e++,t}catch(n){if(n=eoa(n),M4(n,73))throw e.mj(),p7(new bC);throw p7(n)}}function epz(){epz=A,e67=(eBB(),tpt),e63=ths,e6J=td2,e64=thN,e69=(evw(),e3y),e66=e3g,e68=e3_,e65=e3m,e61=(eug(),e6V),e6Q=e6K,e60=e6Z,e62=e6X}function epG(e){switch(_M(),this.c=new p0,this.d=e,e.g){case 0:case 2:this.a=Ug(e8_),this.b=eHQ;break;case 3:case 1:this.a=e8_,this.b=eH1}}function epW(e,t,n){var r,i;if(e.c)eno(e.c,e.c.i+t),ens(e.c,e.c.j+n);else for(i=new fz(e.b);i.a0&&(P_(e.b,new PE(t.a,n)),0<(r=t.a.length)?t.a=t.a.substr(0,0):0>r&&(t.a+=M3(Je(tyw,eHl,25,-r,15,1))))}function epq(e,t){var n,r,i;for(n=e.o,i=Pp(Pp(Zq(e.r,t),21),84).Kc();i.Ob();)(r=Pp(i.Pb(),111)).e.a=ego(r,n.a),r.e.b=n.b*gP(LV(r.b.We(e4a)))}function epZ(e,t){var n,r,i,a;return i=e.k,n=gP(LV(e_k(e,(eBU(),tnv)))),a=t.k,r=gP(LV(e_k(t,tnv))),a!=(eEn(),e8C)?-1:i!=e8C?1:n==r?0:n=0?e.hh(t,n,r):(e.eh()&&(r=(i=e.Vg())>=0?e.Qg(r):e.eh().ih(e,-1-i,null,r)),e.Sg(t,n,r))}function ep2(e,t){switch(t){case 7:e.e||(e.e=new Ih(e6g,e,7,4)),eRT(e.e);return;case 8:e.d||(e.d=new Ih(e6g,e,8,5)),eRT(e.d);return}edS(e,t)}function ep3(e,t){var n;n=e.Zc(t);try{return n.Pb()}catch(r){if(r=eoa(r),M4(r,109))throw p7(new gE("Can't get element "+t));throw p7(r)}}function ep4(e,t){this.e=e,t=0&&(n.d=e.t);break;case 3:e.t>=0&&(n.a=e.t)}e.C&&(n.b=e.C.b,n.c=e.C.c)}function ep7(){ep7=A,e4d=new EN(ezb,0),e4f=new EN(ezm,1),e4h=new EN(ezg,2),e4p=new EN(ezv,3),e4d.a=!1,e4f.a=!0,e4h.a=!1,e4p.a=!0}function ebe(){ebe=A,e6U=new ED(ezb,0),e6B=new ED(ezm,1),e6H=new ED(ezg,2),e6$=new ED(ezv,3),e6U.a=!1,e6B.a=!0,e6H.a=!1,e6$.a=!0}function ebt(e){var t;t=e.a;do(t=Pp(ZC(new Fa(OH(efu(t).a.Kc(),new c))),17).c.i).k==(eEn(),e8D)&&e.b.Fc(t);while(t.k==(eEn(),e8D))e.b=eaa(e.b)}function ebn(e){var t,n,r;for(r=e.c.a,e.p=(Y9(r),new I4(r)),n=new fz(r);n.an.b))}function ebs(e,t){return xd(e)?!!e0c[t]:e.hm?!!e.hm[t]:xf(e)?!!e0u[t]:!!xl(e)&&!!e0s[t]}function ebu(e,t,n){return null==n?(e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),ehx(e.o,t)):(e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),evQ(e.o,t,n)),e}function ebc(e,t,n,r){var i,a;a=t.Xe((eBB(),thS))?Pp(t.We(thS),21):e.j,(i=ele(a))!=(eBW(),e4n)&&(!n||ehj(i))&&eEU(eMD(e,i,r),t)}function ebl(e,t,n,r){var i,a,o;return a=ee2(e.Tg(),t),(i=t-e.Ah())<0?(o=e.Yg(a))>=0?e._g(o,n,!0):exk(e,a,n):Pp(a,66).Nj().Pj(e,e.yh(),i,n,r)}function ebf(e,t,n,r){var i,a,o;n.mh(t)&&(_4(),eec(t)?ehX(e,i=Pp(n.ah(t),153)):(a=(o=t)?Pp(r,49).xh(o):null)&&p6(n.ah(t),a))}function ebd(e){switch(e.g){case 1:return eaY(),e4c;case 3:return eaY(),e4o;case 2:return eaY(),e4u;case 4:return eaY(),e4s;default:return null}}function ebh(e){switch(typeof e){case eUo:return ebA(e);case eUa:return zy(e);case eUi:return OQ(),e?1231:1237;default:return null==e?0:Ao(e)}}function ebp(e,t,n){if(e.e)switch(e.b){case 1:HJ(e.c,t,n);break;case 0:HQ(e.c,t,n)}else V6(e.c,t,n);e.a[t.p][n.p]=e.c.i,e.a[n.p][t.p]=e.c.e}function ebb(e){var t,n;if(null==e)return null;for(t=0,n=Je(e4N,eUP,193,e.length,0,2);t=0)return i;if(e.Fk()){for(r=0;r=(i=e.gc()))throw p7(new Ii(t,i));if(e.hi()&&(r=e.Xc(n))>=0&&r!=t)throw p7(new gL(eXB));return e.mi(t,n)}function ebw(e,t){if(this.a=Pp(Y9(e),245),this.b=Pp(Y9(t),245),e.vd(t)>0||e==(m3(),e0f)||t==(m2(),e0d))throw p7(new gL("Invalid range: "+VW(e,t)))}function eb_(e){var t,n;for(this.b=new p0,this.c=e,this.a=!1,n=new fz(e.a);n.a0),(t&-t)==t)return zy(t*eMU(e,31)*4656612873077393e-25);do r=(n=eMU(e,31))%t;while(n-r+(t-1)<0)return zy(r)}function ebA(e){var t,n,r;return(I9(),null!=(r=e2W[n=":"+e]))?zy((BJ(r),r)):(t=null==(r=e2G[n])?eAC(e):zy((BJ(r),r)),HB(),e2W[n]=t,t)}function ebL(e,t,n){ewG(n,"Compound graph preprocessor",1),e.a=new zu,eFC(e,t,null),eRs(e,t),eOz(e),eo3(t,(eBU(),ttW),e.a),e.a=null,Yy(e.b),eEj(n)}function ebC(e,t,n){switch(n.g){case 1:e.a=t.a/2,e.b=0;break;case 2:e.a=t.a,e.b=t.b/2;break;case 3:e.a=t.a/2,e.b=t.b;break;case 4:e.a=0,e.b=t.b/2}}function ebI(e){var t,n,r;for(r=Pp(Zq(e.a,(ey4(),tea)),15).Kc();r.Ob();)t=egD(n=Pp(r.Pb(),101)),Yz(e,n,t[0],(erX(),ted),0),Yz(e,n,t[1],tep,1)}function ebD(e){var t,n,r;for(r=Pp(Zq(e.a,(ey4(),teo)),15).Kc();r.Ob();)t=egD(n=Pp(r.Pb(),101)),Yz(e,n,t[0],(erX(),ted),0),Yz(e,n,t[1],tep,1)}function ebN(e){switch(e.g){case 0:return null;case 1:return new er1;case 2:return new mQ;default:throw p7(new gL(eqa+(null!=e.f?e.f:""+e.g)))}}function ebP(e,t,n){var r,i;for(eod(e,t-e.s,n-e.t),i=new fz(e.n);i.a1&&(a=ebE(e,t)),a}function ebj(e){var t;return e.f&&e.f.kh()&&(t=Pp(e.f,49),e.f=Pp(ecv(e,t),82),e.f!=t&&(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,9,8,t,e.f))),e.f}function ebF(e){var t;return e.i&&e.i.kh()&&(t=Pp(e.i,49),e.i=Pp(ecv(e,t),82),e.i!=t&&(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,9,7,t,e.i))),e.i}function ebY(e){var t;return e.b&&(64&e.b.Db)!=0&&(t=e.b,e.b=Pp(ecv(e,t),18),e.b!=t&&(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,9,21,t,e.b))),e.b}function ebB(e,t){var n,r,i;null==e.d?(++e.e,++e.f):(r=t.Sh(),eO1(e,e.f+1),i=(r&eUu)%e.d.length,(n=e.d[i])||(n=e.d[i]=e.uj()),n.Fc(t),++e.f)}function ebU(e,t,n){var r;return!t.Kj()&&(-2!=t.Zj()?null==(r=t.zj())?null==n:ecX(r,n):t.Hj()==e.e.Tg()&&null==n)}function ebH(){var e;enG(16,eU0),e=er_(16),this.b=Je(e1z,eU1,317,e,0,1),this.c=Je(e1z,eU1,317,e,0,1),this.a=null,this.e=null,this.i=0,this.f=e-1,this.g=0}function eb$(e){CW.call(this),this.k=(eEn(),e8N),this.j=(enG(6,eU3),new XM(6)),this.b=(enG(2,eU3),new XM(2)),this.d=new md,this.f=new mb,this.a=e}function ebz(e){var t,n;!(e.c.length<=1)&&(t=eLW(e,(eYu(),tbj)),eSe(e,Pp(t.a,19).a,Pp(t.b,19).a),n=eLW(e,tbY),eSe(e,Pp(n.a,19).a,Pp(n.b,19).a))}function ebG(){ebG=A,tsb=new Sx("SIMPLE",0),tsd=new Sx(eWg,1),tsh=new Sx("LINEAR_SEGMENTS",2),tsf=new Sx("BRANDES_KOEPF",3),tsp=new Sx(eVI,4)}function ebW(e,t,n){IR(Pp(e_k(t,(eBy(),tol)),98))||(Q3(e,t,eEC(t,n)),Q3(e,t,eEC(t,(eYu(),tbj))),Q3(e,t,eEC(t,tbw)),Hj(),Mv(t.j,new hm(e)))}function ebK(e,t,n,r){var i,a,o;for(o=(i=r?Pp(Zq(e.a,t),21):Pp(Zq(e.b,t),21)).Kc();o.Ob();)if(eL8(e,n,a=Pp(o.Pb(),33)))return!0;return!1}function ebV(e){var t,n;for(n=new Ow(e);n.e!=n.i.gc();)if((t=Pp(epH(n),87)).e||0!=(t.d||(t.d=new O_(tgr,t,1)),t.d).i)return!0;return!1}function ebq(e){var t,n;for(n=new Ow(e);n.e!=n.i.gc();)if((t=Pp(epH(n),87)).e||0!=(t.d||(t.d=new O_(tgr,t,1)),t.d).i)return!0;return!1}function ebZ(e){var t,n,r;for(t=0,r=new fz(e.c.a);r.a102?-1:e<=57?e-48:e<65?-1:e<=70?e-65+10:e<97?-1:e-97+10}function eb2(e,t){if(null==e)throw p7(new gD("null key in entry: null="+t));if(null==t)throw p7(new gD("null value in entry: "+e+"=null"))}function eb3(e,t){for(var n,r;e.Ob();)if(!t.Ob()||(n=e.Pb(),r=t.Pb(),!(xc(n)===xc(r)||null!=n&&ecX(n,r))))return!1;return!t.Ob()}function eb4(e,t){var n;return n=eow(vx(tyx,1),eH5,25,15,[euG(e.a[0],t),euG(e.a[1],t),euG(e.a[2],t)]),e.d&&(n[0]=eB4.Math.max(n[0],n[2]),n[2]=n[0]),n}function eb5(e,t){var n;return n=eow(vx(tyx,1),eH5,25,15,[euW(e.a[0],t),euW(e.a[1],t),euW(e.a[2],t)]),e.d&&(n[0]=eB4.Math.max(n[0],n[2]),n[2]=n[0]),n}function eb6(){eb6=A,teG=new Sf("GREEDY",0),tez=new Sf(eWv,1),teK=new Sf(eWg,2),teV=new Sf("MODEL_ORDER",3),teW=new Sf("GREEDY_MODEL_ORDER",4)}function eb9(e,t){var n,r,i;for(e.b[t.g]=1,r=epL(t.d,0);r.b!=r.d.c;)i=(n=Pp(Vv(r),188)).c,1==e.b[i.g]?P7(e.a,n):2==e.b[i.g]?e.b[i.g]=1:eb9(e,i)}function eb8(e,t){var n,r,i;for(i=new XM(t.gc()),r=t.Kc();r.Ob();)(n=Pp(r.Pb(),286)).c==n.f?eE5(e,n,n.c):eEQ(e,n)||(i.c[i.c.length]=n);return i}function eb7(e,t,n){var r,i,a,o,s;for(s=e.r+t,e.r+=t,e.d+=n,r=n/e.n.c.length,i=0,o=new fz(e.n);o.aa&&Bc(t,a,null),t}function emx(e,t){var n,r;if(r=e.gc(),null==t){for(n=0;n0&&(u+=i),c[l]=o,o+=s*(u+r)}function emj(e){var t,n,r;for(t=0,r=e.f,e.n=Je(tyx,eH5,25,r,15,1),e.d=Je(tyx,eH5,25,r,15,1);t0?e.c:0),++i;e.b=r,e.d=a}function emW(e,t){var n,r,i,a,o;for(r=0,i=0,n=0,o=new fz(t);o.a0?e.g:0),++n;e.c=i,e.d=r}function emK(e,t){var n;return n=eow(vx(tyx,1),eH5,25,15,[ebM(e,(etx(),e3D),t),ebM(e,e3N,t),ebM(e,e3P,t)]),e.f&&(n[0]=eB4.Math.max(n[0],n[2]),n[2]=n[0]),n}function emV(e,t,n){var r;try{eCQ(e,t+e.j,n+e.k,!1,!0)}catch(i){if(i=eoa(i),M4(i,73))throw r=i,p7(new gE(r.g+ezk+t+eUd+n+")."));throw p7(i)}}function emq(e,t,n){var r;try{eCQ(e,t+e.j,n+e.k,!0,!1)}catch(i){if(i=eoa(i),M4(i,73))throw r=i,p7(new gE(r.g+ezk+t+eUd+n+")."));throw p7(i)}}function emZ(e){var t;Ln(e,(eBy(),taZ))&&((t=Pp(e_k(e,taZ),21)).Hc((eT7(),tp1))?(t.Mc(tp1),t.Fc(tp2)):t.Hc(tp2)&&(t.Mc(tp2),t.Fc(tp1)))}function emX(e){var t;Ln(e,(eBy(),taZ))&&((t=Pp(e_k(e,taZ),21)).Hc((eT7(),tp9))?(t.Mc(tp9),t.Fc(tp5)):t.Hc(tp5)&&(t.Mc(tp5),t.Fc(tp9)))}function emJ(e,t,n){ewG(n,"Self-Loop ordering",1),_r(UQ(UJ(UJ(eeh(new R1(null,new Gq(t.b,16)),new n9),new n8),new n7),new re),new d1(e)),eEj(n)}function emQ(e,t,n,r){var i,a;for(i=t;i0&&(i.b+=t),i}function em8(e,t){var n,r,i;for(i=new yb,r=e.Kc();r.Ob();)eIn(n=Pp(r.Pb(),37),0,i.b),i.b+=n.f.b+t,i.a=eB4.Math.max(i.a,n.f.a);return i.a>0&&(i.a+=t),i}function em7(e){var t,n,r;for(r=eUu,n=new fz(e.a);n.a>16==6?e.Cb.ih(e,5,e6E,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||e.zh(),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function egr(e){$O();var t=e.e;if(t&&t.stack){var n=t.stack,r=t+"\n";return n.substring(0,r.length)==r&&(n=n.substring(r.length)),n.split("\n")}return[]}function egi(e){var t;return(t=(en4(),e0U))[e>>>28]|t[e>>24&15]<<4|t[e>>20&15]<<8|t[e>>16&15]<<12|t[e>>12&15]<<16|t[e>>8&15]<<20|t[e>>4&15]<<24|t[15&e]<<28}function ega(e){var t,n,r;e.b==e.c&&(r=e.a.length,n=esi(eB4.Math.max(8,r))<<1,0!=e.b?(t=CY(e.a,n),erL(e,t,r),e.a=t,e.b=0):bF(e.a,n),e.c=r)}function ego(e,t){var n;return(n=e.b).Xe((eBB(),thK))?n.Hf()==(eYu(),tbY)?-n.rf().a-gP(LV(n.We(thK))):t+gP(LV(n.We(thK))):n.Hf()==(eYu(),tbY)?-n.rf().a:t}function egs(e){var t;return 0!=e.b.c.length&&Pp(RJ(e.b,0),70).a?Pp(RJ(e.b,0),70).a:null!=(t=Hh(e))?t:""+(e.c?QI(e.c.a,e,0):-1)}function egu(e){var t;return 0!=e.f.c.length&&Pp(RJ(e.f,0),70).a?Pp(RJ(e.f,0),70).a:null!=(t=Hh(e))?t:""+(e.i?QI(e.i.j,e,0):-1)}function egc(e,t){var n,r;if(t<0||t>=e.gc())return null;for(n=t;n0?e.c:0),i=eB4.Math.max(i,t.d),++r;e.e=a,e.b=i}function egd(e){var t,n;if(!e.b)for(e.b=K$(Pp(e.f,118).Ag().i),n=new Ow(Pp(e.f,118).Ag());n.e!=n.i.gc();)t=Pp(epH(n),137),P_(e.b,new gO(t));return e.b}function egh(e,t){var n,r,i;if(t.dc())return LF(),LF(),tmB;for(n=new Cy(e,t.gc()),i=new Ow(e);i.e!=i.i.gc();)r=epH(i),t.Hc(r)&&JL(n,r);return n}function egp(e,t,n,r){return 0==t?r?(e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),e.o):(e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),X6(e.o)):ebl(e,t,n,r)}function egb(e){var t,n;if(e.rb)for(t=0,n=e.rb.i;t>22))>>22)<0)&&(e.l=n&eHH,e.m=r&eHH,e.h=i&eH$,!0))}function egw(e,t,n,r,i,a,o){var s,u;return!(t.Ae()&&((u=e.a.ue(n,r))<0||!i&&0==u)||t.Be()&&((s=e.a.ue(n,a))>0||!o&&0==s))}function eg_(e,t){var n;if(euv(),0!=(n=e.j.g-t.j.g))return 0;switch(e.j.g){case 2:return efy(t,e73)-efy(e,e73);case 4:return efy(e,e72)-efy(t,e72)}return 0}function egE(e){switch(e.g){case 0:return te3;case 1:return te4;case 2:return te5;case 3:return te6;case 4:return te9;case 5:return te8;default:return null}}function egS(e,t,n){var r,i;return r=(eu2(i=new mN,t),er3(i,n),JL((e.c||(e.c=new FQ(tga,e,12,10)),e.c),i),i),end(r,0),enh(r,1),els(r,!0),eli(r,!0),r}function egk(e,t){var n,r;if(t>=e.i)throw p7(new xJ(t,e.i));return++e.j,n=e.g[t],(r=e.i-t-1)>0&&ePD(e.g,t+1,e.g,t,r),Bc(e.g,--e.i,null),e.fi(t,n),e.ci(),n}function egx(e,t){var n,r;return e.Db>>16==17?e.Cb.ih(e,21,tm7,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||e.zh(),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function egT(e){var t,n,r,i;for(Hj(),Mv(e.c,e.a),i=new fz(e.c);i.an.a.c.length))throw p7(new gL("index must be >= 0 and <= layer node count"));e.c&&QA(e.c.a,e),e.c=n,n&&jO(n.a,t,e)}function egH(e,t){var n,r,i;for(r=new Fa(OH(efs(e).a.Kc(),new c));eTk(r);)return n=Pp(ZC(r),17),i=Pp(t.Kb(n),10),new c5(Y9(i.n.b+i.o.b/2));return m4(),m4(),e0l}function eg$(e,t){this.c=new p2,this.a=e,this.b=t,this.d=Pp(e_k(e,(eBU(),tnx)),304),xc(e_k(e,(eBy(),taX)))===xc((Qx(),tte))?this.e=new mg:this.e=new mm}function egz(e,t){var n,r,i,a;for(a=0,r=new fz(e);r.a>16==6?e.Cb.ih(e,6,e6g,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBa(),tmp),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg0(e,t){var n,r;return e.Db>>16==7?e.Cb.ih(e,1,e6p,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBa(),tmm),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg2(e,t){var n,r;return e.Db>>16==9?e.Cb.ih(e,9,e6k,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBa(),tmv),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg3(e,t){var n,r;return e.Db>>16==5?e.Cb.ih(e,9,tgt,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBK(),tgT),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg4(e,t){var n,r;return e.Db>>16==3?e.Cb.ih(e,0,e6y,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBK(),tgy),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg5(e,t){var n,r;return e.Db>>16==7?e.Cb.ih(e,6,e6E,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBK(),tgP),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function eg6(){this.a=new o6,this.g=new ebH,this.j=new ebH,this.b=new p2,this.d=new ebH,this.i=new ebH,this.k=new p2,this.c=new p2,this.e=new p2,this.f=new p2}function eg9(e,t,n){var r,i,a;for(n<0&&(n=0),a=e.i,i=n;ieH6)return eg7(e,r);if(r==e)return!0}}return!1}function eve(e){switch(Ab(),e.q.g){case 5:ekK(e,(eYu(),tbw)),ekK(e,tbj);break;case 4:eMz(e,(eYu(),tbw)),eMz(e,tbj);break;default:eYa(e,(eYu(),tbw)),eYa(e,tbj)}}function evt(e){switch(Ab(),e.q.g){case 5:exG(e,(eYu(),tby)),exG(e,tbY);break;case 4:epq(e,(eYu(),tby)),epq(e,tbY);break;default:eYo(e,(eYu(),tby)),eYo(e,tbY)}}function evn(e){var t,n;(t=Pp(e_k(e,(eCk(),e9O)),19))?0==(n=t.a)?eo3(e,(erV(),e9F),new efo):eo3(e,(erV(),e9F),new qS(n)):eo3(e,(erV(),e9F),new qS(1))}function evr(e,t){var n;switch(n=e.i,t.g){case 1:return-(e.n.b+e.o.b);case 2:return e.n.a-n.o.a;case 3:return e.n.b-n.o.b;case 4:return-(e.n.a+e.o.a)}return 0}function evi(e,t){switch(e.g){case 0:return t==(ef_(),tnN)?e7V:e7q;case 1:return t==(ef_(),tnN)?e7V:e7K;case 2:return t==(ef_(),tnN)?e7K:e7q;default:return e7K}}function eva(e,t){var n,r,i;for(QA(e.a,t),e.e-=t.r+(0==e.a.c.length?0:e.c),i=eqe,r=new fz(e.a);r.a>16==3?e.Cb.ih(e,12,e6k,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBa(),tmh),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function evs(e,t){var n,r;return e.Db>>16==11?e.Cb.ih(e,10,e6k,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBa(),tmg),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function evu(e,t){var n,r;return e.Db>>16==10?e.Cb.ih(e,11,tm7,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBK(),tgD),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function evc(e,t){var n,r;return e.Db>>16==10?e.Cb.ih(e,12,tgi,t):(r=ebY(Pp(ee2((n=Pp(eaS(e,16),26))||(eBK(),tgR),e.Db>>16),18)),e.Cb.ih(e,r.n,r.f,t))}function evl(e){var t;return(1&e.Bb)==0&&e.r&&e.r.kh()&&(t=Pp(e.r,49),e.r=Pp(ecv(e,t),138),e.r!=t&&(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,9,8,t,e.r))),e.r}function evf(e,t,n){var r;return r=eow(vx(tyx,1),eH5,25,15,[e_u(e,(etx(),e3D),t,n),e_u(e,e3N,t,n),e_u(e,e3P,t,n)]),e.f&&(r[0]=eB4.Math.max(r[0],r[2]),r[2]=r[0]),r}function evd(e,t){var n,r,i;if(0!=(i=eb8(e,t)).c.length)for(Mv(i,new nD),n=i.c.length,r=0;r>19)!=(c=t.h>>19)?c-u:(i=e.h)!=(s=t.h)?i-s:(r=e.m)!=(o=t.m)?r-o:(n=e.l)-(a=t.l)}function evw(){evw=A,e3E=(eCp(),e3A),e3_=new xX(e$J,e3E),e3w=(eeR(),e3p),e3y=new xX(e$Q,e3w),e3v=(epC(),e3f),e3g=new xX(e$1,e3v),e3m=new xX(e$0,(OQ(),!0))}function ev_(e,t,n){var r,i;r=t*n,M4(e.g,145)?(i=Vm(e)).f.d?i.f.a||(e.d.a+=r+ezs):(e.d.d-=r+ezs,e.d.a+=r+ezs):M4(e.g,10)&&(e.d.d-=r,e.d.a+=2*r)}function evE(e,t,n){var r,i,a,o,s;for(i=e[n.g],s=new fz(t.d);s.a0?e.g:0),++n;t.b=r,t.e=i}function evk(e){var t,n,r;if(r=e.b,w4(e.i,r.length)){for(n=2*r.length,e.b=Je(e1z,eU1,317,n,0,1),e.c=Je(e1z,eU1,317,n,0,1),e.f=n-1,e.i=0,t=e.a;t;t=t.c)ekT(e,t,t);++e.g}}function evx(e,t,n,r){var i,a,o,s;for(i=0;io&&(s=o/r),i>a&&(u=a/i),Ol(e,eB4.Math.min(s,u)),e}function evO(){var e,t;ePm();try{if(t=Pp(eyv((_Q(),tgp),eXe),2014))return t}catch(n){if(n=eoa(n),M4(n,102))e=n,Fi((Mo(),e));else throw p7(n)}return new o1}function evA(){var e,t;Qk();try{if(t=Pp(eyv((_Q(),tgp),eQB),2024))return t}catch(n){if(n=eoa(n),M4(n,102))e=n,Fi((Mo(),e));else throw p7(n)}return new uc}function evL(){var e,t;ePm();try{if(t=Pp(eyv((_Q(),tgp),eQc),1941))return t}catch(n){if(n=eoa(n),M4(n,102))e=n,Fi((Mo(),e));else throw p7(n)}return new sT}function evC(e,t,n){var r,i;return i=e.e,e.e=t,(4&e.Db)!=0&&(1&e.Db)==0&&(r=new FX(e,1,4,i,t),n?n.Ei(r):n=r),i!=t&&(n=t?eFr(e,eOl(e,t),n):eFr(e,e.a,n)),n}function evI(){wW.call(this),this.e=-1,this.a=!1,this.p=eHt,this.k=-1,this.c=-1,this.b=-1,this.g=!1,this.f=-1,this.j=-1,this.n=-1,this.i=-1,this.d=-1,this.o=eHt}function evD(e,t){var n,r,i;if(r=e.b.d.d,e.a||(r+=e.b.d.a),i=t.b.d.d,t.a||(i+=t.b.d.a),0==(n=elN(r,i))){if(!e.a&&t.a)return -1;if(!t.a&&e.a)return 1}return n}function evN(e,t){var n,r,i;if(r=e.b.b.d,e.a||(r+=e.b.b.a),i=t.b.b.d,t.a||(i+=t.b.b.a),0==(n=elN(r,i))){if(!e.a&&t.a)return -1;if(!t.a&&e.a)return 1}return n}function evP(e,t){var n,r,i;if(r=e.b.g.d,e.a||(r+=e.b.g.a),i=t.b.g.d,t.a||(i+=t.b.g.a),0==(n=elN(r,i))){if(!e.a&&t.a)return -1;if(!t.a&&e.a)return 1}return n}function evR(){evR=A,e99=j0(RI(RI(RI(new K2,(e_x(),e8r),(eB$(),e7f)),e8r,e7b),e8i,e7E),e8i,e87),e97=RI(RI(new K2,e8r,e8Q),e8r,e7e),e98=j0(new K2,e8i,e7n)}function evj(e){var t,n,r,i,a;for(t=Pp(e_k(e,(eBU(),ttq)),83),a=e.n,r=t.Cc().Kc();r.Ob();)i=(n=Pp(r.Pb(),306)).i,i.c+=a.a,i.d+=a.b,n.c?eL3(n):eL4(n);eo3(e,ttq,null)}function evF(e,t,n){var r,i;switch(r=(i=e.b).d,t.g){case 1:return-r.d-n;case 2:return i.o.a+r.c+n;case 3:return i.o.b+r.a+n;case 4:return-r.b-n;default:return -1}}function evY(e){var t,n,r,i,a;if(r=0,i=ezq,e.b)for(t=0;t<360;t++)n=.017453292519943295*t,eIq(e,e.d,0,0,eV7,n),(a=e.b.ig(e.d))0&&(o=(a&eUu)%e.d.length,i=exx(e,o,a,t)))?s=i.ed(n):(r=e.tj(a,t,n),e.c.Fc(r),null)}function ev1(e,t){var n,r,i,a;switch(ecG(e,t)._k()){case 3:case 2:for(i=0,a=(n=ePk(t)).i;i=0;r--)if(IE(e[r].d,t)||IE(e[r].d,n)){e.length>=r+1&&e.splice(0,r+1);break}return e}function eyt(e,t){var n;return Ts(e)&&Ts(t)&&eHV<(n=e/t)&&n0&&(e.b+=2,e.a+=r):(e.b+=1,e.a+=eB4.Math.min(r,i))}function eyc(e,t){var n,r;if(r=!1,xd(t)&&(r=!0,BC(e,new B_(Lq(t)))),!r&&M4(t,236)&&(r=!0,BC(e,(n=IZ(Pp(t,236)),new lI(n)))),!r)throw p7(new gk(eXE))}function eyl(e,t,n,r){var i,a,o;return i=new Q$(e.e,1,10,M4(o=t.c,88)?Pp(o,26):(eBK(),tgI),M4(a=n.c,88)?Pp(a,26):(eBK(),tgI),ebv(e,t),!1),r?r.Ei(i):r=i,r}function eyf(e){var t,n;switch(Pp(e_k(Bq(e),(eBy(),taP)),420).g){case 0:return t=e.n,n=e.o,new kl(t.a+n.a/2,t.b+n.b/2);case 1:return new TS(e.n);default:return null}}function eyd(){eyd=A,tto=new Sm(eGR,0),tta=new Sm("LEFTUP",1),ttu=new Sm("RIGHTUP",2),tti=new Sm("LEFTDOWN",3),tts=new Sm("RIGHTDOWN",4),ttr=new Sm("BALANCED",5)}function eyh(e,t,n){var r,i,a;if(0==(r=elN(e.a[t.p],e.a[n.p]))){if(i=Pp(e_k(t,(eBU(),tt7)),15),a=Pp(e_k(n,tt7),15),i.Hc(n))return -1;if(a.Hc(t))return 1}return r}function eyp(e){switch(e.g){case 1:return new a$;case 2:return new az;case 3:return new aH;case 0:return null;default:throw p7(new gL(eqa+(null!=e.f?e.f:""+e.g)))}}function eyb(e,t,n){switch(t){case 1:e.n||(e.n=new FQ(e6S,e,1,7)),eRT(e.n),e.n||(e.n=new FQ(e6S,e,1,7)),Y4(e.n,Pp(n,14));return;case 2:ert(e,Lq(n));return}esU(e,t,n)}function eym(e,t,n){switch(t){case 3:eni(e,gP(LV(n)));return;case 4:ena(e,gP(LV(n)));return;case 5:eno(e,gP(LV(n)));return;case 6:ens(e,gP(LV(n)));return}eyb(e,t,n)}function eyg(e,t,n){var r,i,a;(i=ew3(a=r=new mN,t,null))&&i.Fi(),er3(a,n),JL((e.c||(e.c=new FQ(tga,e,12,10)),e.c),a),end(a,0),enh(a,1),els(a,!0),eli(a,!0)}function eyv(e,t){var n,r,i;return M4(n=Ea(e.g,t),235)?((i=Pp(n,235)).Qh(),i.Nh()):M4(n,498)?i=(r=Pp(n,1938)).b:null}function eyy(e,t,n,r){var i,a;return Y9(t),Y9(n),a=Pp(Iq(e.d,t),19),QW(!!a,"Row %s not in %s",t,e.e),i=Pp(Iq(e.b,n),19),QW(!!i,"Column %s not in %s",n,e.c),eoy(e,a.a,i.a,r)}function eyw(e,t,n,r,i,a,o){var s,u,c,l,f;if(l=i[a],f=emH(s=(c=a==o-1)?r:0,l),10!=r&&eow(vx(e,o-a),t[a],n[a],s,f),!c)for(++a,u=0;u1||-1==s?(a=Pp(u,15),i.Wb(ehk(e,a))):i.Wb(eI4(e,Pp(u,56))))}function eyP(e,t,n,r){wd();var i=eUn;function a(){for(var e=0;eeVW);)i>-.000001&&++n;return n}function eyW(e,t){var n;t!=e.b?(n=null,e.b&&(n=$7(e.b,e,-4,n)),t&&(n=ep0(t,e,-4,n)),(n=ecm(e,t,n))&&n.Fi()):(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,3,t,t))}function eyK(e,t){var n;t!=e.f?(n=null,e.f&&(n=$7(e.f,e,-1,n)),t&&(n=ep0(t,e,-1,n)),(n=ecg(e,t,n))&&n.Fi()):(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,0,t,t))}function eyV(e){var t,n,r;if(null==e)return null;if((n=Pp(e,15)).dc())return"";for(r=new vs,t=n.Kc();t.Ob();)xk(r,(eR7(),Lq(t.Pb()))),r.a+=" ";return x3(r,r.a.length-1)}function eyq(e){var t,n,r;if(null==e)return null;if((n=Pp(e,15)).dc())return"";for(r=new vs,t=n.Kc();t.Ob();)xk(r,(eR7(),Lq(t.Pb()))),r.a+=" ";return x3(r,r.a.length-1)}function eyZ(e,t,n){var r,i;return(r=e.c[t.c.p][t.p],i=e.c[n.c.p][n.p],null!=r.a&&null!=i.a)?F_(r.a,i.a):null!=r.a?-1:null!=i.a?1:0}function eyX(e,t){var n,r,i,a,o,s;if(t)for(a=t.a.length,s=((n=new Fs(a)).b-n.a)*n.c<0?(_9(),eB3):new OR(n);s.Ob();)i=KZ(t,(o=Pp(s.Pb(),19)).a),UX((r=new pu(e)).a,i)}function eyJ(e,t){var n,r,i,a,o,s;if(t)for(a=t.a.length,s=((n=new Fs(a)).b-n.a)*n.c<0?(_9(),eB3):new OR(n);s.Ob();)i=KZ(t,(o=Pp(s.Pb(),19)).a),UZ((r=new h7(e)).a,i)}function eyQ(e){var t;if(null!=e&&e.length>0&&33==UI(e,e.length-1))try{return t=eSR(Az(e,0,e.length-1)),null==t.e}catch(n){if(n=eoa(n),!M4(n,32))throw p7(n)}return!1}function ey1(e,t,n){var r,i,a;return r=t.ak(),a=t.dd(),i=r.$j()?$N(e,3,r,null,a,eN1(e,r,a,M4(r,99)&&(Pp(r,18).Bb&eH3)!=0),!0):$N(e,1,r,r.zj(),a,-1,!0),n?n.Ei(i):n=i,n}function ey0(){var e,t,n;for(e=0,t=0;e<1;e++){if(0==(n=eTa((GV(e,1),"X".charCodeAt(e)))))throw p7(new gX("Unknown Option: "+"X".substr(e)));t|=n}return t}function ey2(e,t,n){var r,i,a;switch(i=el0(r=Bq(t)),a=new eES,Gc(a,t),n.g){case 1:ekv(a,elC(ef9(i)));break;case 2:ekv(a,ef9(i))}return eo3(a,(eBy(),toc),LV(e_k(e,toc))),a}function ey3(e){var t,n;return t=Pp(ZC(new Fa(OH(efu(e.a).a.Kc(),new c))),17),n=Pp(ZC(new Fa(OH(efc(e.a).a.Kc(),new c))),17),gN(LK(e_k(t,(eBU(),tnE))))||gN(LK(e_k(n,tnE)))}function ey4(){ey4=A,ter=new Sa("ONE_SIDE",0),tea=new Sa("TWO_SIDES_CORNER",1),teo=new Sa("TWO_SIDES_OPPOSING",2),tei=new Sa("THREE_SIDES",3),ten=new Sa("FOUR_SIDES",4)}function ey5(e,t,n,r,i){var a,o;a=Pp(qE(UJ(t.Oc(),new ih),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[(eum(),e2U)]))),15),o=Pp(eay(e.b,n,r),15),0==i?o.Wc(0,a):o.Gc(a)}function ey6(e,t){var n,r,i,a,o;for(a=new fz(t.a);a.a0&&egL(this,this.c-1,(eYu(),tby)),this.c0&&e[0].length>0&&(this.c=gN(LK(e_k(Bq(e[0][0]),(eBU(),tne))))),this.a=Je(e5d,eUP,2018,e.length,0,2),this.b=Je(e5h,eUP,2019,e.length,0,2),this.d=new euX}function ewu(e){return 0!=e.c.length&&((GK(0,e.c.length),Pp(e.c[0],17)).c.i.k==(eEn(),e8D)||q3(UQ(new R1(null,new Gq(e,16)),new iJ),new iQ))}function ewc(e,t,n){return ewG(n,"Tree layout",1),Kx(e.b),Yb(e.b,(egR(),tuJ),tuJ),Yb(e.b,tuQ,tuQ),Yb(e.b,tu1,tu1),Yb(e.b,tu0,tu0),e.a=eRq(e.b,t),eAG(e,t,eiI(n,1)),eEj(n),t}function ewl(e,t){var n,r,i,a,o,s,u;for(s=eLj(t),a=t.f,u=t.g,o=eB4.Math.sqrt(a*a+u*u),i=0,r=new fz(s);r.a=0?(n=eyt(e,eHK),r=edQ(e,eHK)):(n=eyt(t=Fy(e,1),5e8),r=eft(Fg(r=edQ(t,5e8),1),WM(e,1))),WO(Fg(r,32),WM(n,eH8))}function ewM(e,t,n){var r,i;switch(r=(A6(0!=t.b),Pp(etw(t,t.a.a),8)),n.g){case 0:r.b=0;break;case 2:r.b=e.f;break;case 3:r.a=0;break;default:r.a=e.g}return YU(i=epL(t,0),r),t}function ewO(e,t,n,r){var i,a,o,s,u;switch(u=e.b,s=epd(o=(a=t.d).j,u.d[o.g],n),i=C5(MB(a.n),a.a),a.j.g){case 1:case 3:s.a+=i.a;break;case 2:case 4:s.b+=i.b}qQ(r,s,r.c.b,r.c)}function ewA(e,t,n){var r,i,a,o;for(o=QI(e.e,t,0),(a=new ma).b=n,r=new KB(e.e,o);r.b1;t>>=1)(1&t)!=0&&(r=eeD(r,n)),n=1==n.d?eeD(n,n):new eh5(eDE(n.a,n.d,Je(ty_,eHT,25,n.d<<1,15,1)));return eeD(r,n)}function ewP(){var e,t,n,r;for(t=32,ewP=A,e2v=Je(tyx,eH5,25,25,15,1),e2y=Je(tyx,eH5,25,33,15,1),r=152587890625e-16;t>=0;t--)e2y[t]=r,r*=.5;for(e=24,n=1;e>=0;e--)e2v[e]=n,n*=.5}function ewR(e){var t,n;if(gN(LK(eT8(e,(eBy(),taI))))){for(n=new Fa(OH(eOi(e).a.Kc(),new c));eTk(n);)if(t=Pp(ZC(n),79),exb(t)&&gN(LK(eT8(t,taD))))return!0}return!1}function ewj(e,t){var n,r,i;Yf(e.f,t)&&(t.b=e,r=t.c,-1!=QI(e.j,r,0)||P_(e.j,r),i=t.d,-1!=QI(e.j,i,0)||P_(e.j,i),0!=(n=t.a.b).c.length&&(e.i||(e.i=new epS(e)),ea_(e.i,n)))}function ewF(e){var t,n,r,i,a;return(r=(n=e.c.d).j)==(a=(i=e.d.d).j)?n.p=0&&IE(e.substr(t,3),"GMT")?(n[0]=t+3,eDh(e,n,r)):(t>=0&&IE(e.substr(t,3),"UTC")&&(n[0]=t+3),eDh(e,n,r))}function ewz(e,t){var n,r,i,a,o;for(a=e.g.a,o=e.g.b,r=new fz(e.d);r.an;a--)e[a]|=t[a-n-1]>>>o,e[a-1]=t[a-n-1]<=e.f)break;a.c[a.c.length]=n}return a}function ew1(e){var t,n,r,i;for(t=null,i=new fz(e.wf());i.a0&&ePD(e.g,t,e.g,t+r,s),o=n.Kc(),e.i+=r,i=0;ia&&F6(c,ee5(n[s],e2h))&&(i=s,a=u);return i>=0&&(r[0]=t+a),i}function ew9(e,t){var n;if(0!=(n=To(e.b.Hf(),t.b.Hf())))return n;switch(e.b.Hf().g){case 1:case 2:return ME(e.b.sf(),t.b.sf());case 3:case 4:return ME(t.b.sf(),e.b.sf())}return 0}function ew8(e){var t,n,r;for(r=e.e.c.length,e.a=RF(ty_,[eUP,eHT],[48,25],15,[r,r],2),n=new fz(e.c);n.a>4&15,a=15&e[r],o[i++]=tmk[n],o[i++]=tmk[a];return ehv(o,0,o.length)}function e_t(e,t,n){var r,i,a;return r=t.ak(),a=t.dd(),i=r.$j()?$N(e,4,r,a,null,eN1(e,r,a,M4(r,99)&&(Pp(r,18).Bb&eH3)!=0),!0):$N(e,r.Kj()?2:1,r,a,r.zj(),-1,!0),n?n.Ei(i):n=i,n}function e_n(e){var t,n;return e>=eH3?(t=eH4+(e-eH3>>10&1023)&eHd,n=56320+(e-eH3&1023)&eHd,String.fromCharCode(t)+""+String.fromCharCode(n)):String.fromCharCode(e&eHd)}function e_r(e,t){var n,r,i,a;return Cn(),(i=Pp(Pp(Zq(e.r,t),21),84)).gc()>=2&&(r=Pp(i.Kc().Pb(),111),n=e.u.Hc((ekU(),tbh)),a=e.u.Hc(tbg),!r.a&&!n&&(2==i.gc()||a))}function e_i(e,t,n,r,i){var a,o,s;for(a=eLx(e,t,n,r,i),s=!1;!a;)eME(e,i,!0),s=!0,a=eLx(e,t,n,r,i);s&&eME(e,i,!1),0!=(o=eoA(i)).c.length&&(e.d&&e.d.lg(o),e_i(e,i,n,r,o))}function e_a(){e_a=A,tpN=new km(eGR,0),tpI=new km("DIRECTED",1),tpP=new km("UNDIRECTED",2),tpL=new km("ASSOCIATION",3),tpD=new km("GENERALIZATION",4),tpC=new km("DEPENDENCY",5)}function e_o(e,t){var n;if(!zY(e))throw p7(new gC(eZL));switch(n=zY(e),t.g){case 1:return-(e.j+e.f);case 2:return e.i-n.g;case 3:return e.j-n.f;case 4:return-(e.i+e.g)}return 0}function e_s(e,t){var n,r;for(BJ(t),r=e.b.c.length,P_(e.b,t);r>0;){if(n=r,r=(r-1)/2|0,0>=e.a.ue(RJ(e.b,r),t))return q1(e.b,n,t),!0;q1(e.b,n,RJ(e.b,r))}return q1(e.b,r,t),!0}function e_u(e,t,n,r){var i,a;if(i=0,n)i=euW(e.a[n.g][t.g],r);else for(a=0;a=s)}function e_l(e,t,n,r){var i;if(i=!1,xd(r)&&(i=!0,P4(t,n,Lq(r))),!i&&xl(r)&&(i=!0,e_l(e,t,n,r)),!i&&M4(r,236)&&(i=!0,H1(t,n,Pp(r,236))),!i)throw p7(new gk(eXE))}function e_f(e,t){var n,r,i;if((n=t.Hh(e.a))&&null!=(i=edW((n.b||(n.b=new L_((eBK(),tgF),tgf,n)),n.b),eQe))){for(r=1;r<(eSp(),tvs).length;++r)if(IE(tvs[r],i))return r}return 0}function e_d(e,t){var n,r,i;if((n=t.Hh(e.a))&&null!=(i=edW((n.b||(n.b=new L_((eBK(),tgF),tgf,n)),n.b),eQe))){for(r=1;r<(eSp(),tvu).length;++r)if(IE(tvu[r],i))return r}return 0}function e_h(e,t){var n,r,i,a;if(BJ(t),(a=e.a.gc())0?1:0;a.a[i]!=n;)a=a.a[i],i=e.a.ue(n.d,a.d)>0?1:0;a.a[i]=r,r.b=n.b,r.a[0]=n.a[0],r.a[1]=n.a[1],n.a[0]=null,n.a[1]=null}function e_y(e){var t,n;return ekU(),t=jL(tbp,eow(vx(e6i,1),eU4,273,0,[tbm])),!(eaC(z_(t,e))>1)&&(n=jL(tbh,eow(vx(e6i,1),eU4,273,0,[tbd,tbg])),!(eaC(z_(n,e))>1))}function e_w(e,t){var n;M4(n=zg((_Q(),tgp),e),498)?Ge(tgp,e,new k5(this,t)):Ge(tgp,e,this),e_8(this,t),t==(yO(),tgg)?(this.wb=Pp(this,1939),Pp(t,1941)):this.wb=(BM(),tgv)}function e__(e){var t,n,r;if(null==e)return null;for(n=0,t=null;n=eHf?"error":r>=900?"warn":r>=800?"info":"log",e.a),e.b&&eAp(t,n,e.b,"Exception: ",!0))}function e_k(e,t){var n,r;return null!=(r=(e.q||(e.q=new p2),Bp(e.q,t)))?r:(M4(n=t.wg(),4)&&(null==n?(e.q||(e.q=new p2),Z3(e.q,t)):(e.q||(e.q=new p2),Um(e.q,t,n))),n)}function e_x(){e_x=A,e8e=new Ez("P1_CYCLE_BREAKING",0),e8t=new Ez("P2_LAYERING",1),e8n=new Ez("P3_NODE_ORDERING",2),e8r=new Ez("P4_NODE_PLACEMENT",3),e8i=new Ez("P5_EDGE_ROUTING",4)}function e_T(e,t){var n,r,i,a,o;for(r=(i=1==t?e8c:e8u).a.ec().Kc();r.Ob();)for(n=Pp(r.Pb(),103),o=Pp(Zq(e.f.c,n),21).Kc();o.Ob();)a=Pp(o.Pb(),46),QA(e.b.b,a.b),QA(e.b.a,Pp(a.b,81).d)}function e_M(e,t){var n;if(eeP(),e.c!=t.c)return elN(e.c,t.c);if(e.b==t.b||eiS(e.b,t.b)){if(n=Tu(e.b)?1:-1,e.a&&!t.a)return n;if(!e.a&&t.a)return-n}return ME(e.b.g,t.b.g)}function e_O(e,t){var n;ewG(t,"Hierarchical port position processing",1),(n=e.b).c.length>0&&eI6((GK(0,n.c.length),Pp(n.c[0],29)),e),n.c.length>1&&eI6(Pp(RJ(n,n.c.length-1),29),e),eEj(t)}function e_A(e,t){var n,r,i;if(e_Y(e,t))return!0;for(r=new fz(t);r.a=(i=e.Vi())||t<0)throw p7(new gE(eXU+t+eXH+i));if(n>=i||n<0)throw p7(new gE(eX$+n+eXH+i));return t!=n?(a=e.Ti(n),e.Hi(t,a),a):e.Oi(n)}function e_j(e){var t,n,r;if(r=e,e)for(t=0,n=e.Ug();n;n=n.Ug()){if(++t>eH6)return e_j(n);if(r=n,n==e)throw p7(new gC("There is a cycle in the containment hierarchy of "+e))}return r}function e_F(e){var t,n,r;for(r=new eaP(eUd,"[","]"),n=e.Kc();n.Ob();)ZJ(r,xc(t=n.Pb())===xc(e)?"(this Collection)":null==t?eUg:efF(t));return r.a?0==r.e.length?r.a.a:r.a.a+""+r.e:r.c}function e_Y(e,t){var n,r;if(r=!1,2>t.gc())return!1;for(n=0;n=e.charCodeAt(r));)++r;for(t=n;t>r&&(GV(t-1,e.length),32>=e.charCodeAt(t-1));)--t;return r>0||t1&&(e.j.b+=e.e)):(e.j.a+=n.a,e.j.b=eB4.Math.max(e.j.b,n.b),e.d.c.length>1&&(e.j.a+=e.e))}function e_z(){e_z=A,tec=eow(vx(e6a,1),eGj,61,0,[(eYu(),tbw),tby,tbj]),teu=eow(vx(e6a,1),eGj,61,0,[tby,tbj,tbY]),tel=eow(vx(e6a,1),eGj,61,0,[tbj,tbY,tbw]),tef=eow(vx(e6a,1),eGj,61,0,[tbY,tbw,tby])}function e_G(e,t,n,r){var i,a,o,s,u,c,l;if(o=e.c.d,s=e.d.d,o.j!=s.j)for(l=e.b,i=o.j,u=null;i!=s.j;)u=0==t?elI(i):elL(i),P7(r,C5(a=epd(i,l.d[i.g],n),c=epd(u,l.d[u.g],n))),i=u}function e_W(e,t,n,r){var i,a,o,s,u;return o=egN(e.a,t,n),s=Pp(o.a,19).a,a=Pp(o.b,19).a,r&&(u=Pp(e_k(t,(eBU(),tng)),10),i=Pp(e_k(n,tng),10),u&&i&&(V6(e.b,u,i),s+=e.b.i,a+=e.b.e)),s>a}function e_K(e){var t,n,r,i,a,o,s,u,c;for(r=0,this.a=ebb(e),this.b=new p0,i=(n=e).length;rL7(e.d).c?(e.i+=e.g.c,ed3(e.d)):L7(e.d).c>L7(e.g).c?(e.e+=e.d.c,ed3(e.g)):(e.i+=R6(e.g),e.e+=R6(e.d),ed3(e.g),ed3(e.d))}function e_X(e,t,n){var r,i,a,o;for(a=t.q,o=t.r,new GT((Xa(),tuU),t,a,1),new GT(tuU,a,o,1),i=new fz(n);i.as&&(u=s/r),i>a&&(c=a/i),o=eB4.Math.min(u,c),e.a+=o*(t.a-e.a),e.b+=o*(t.b-e.b)}function e_5(e,t,n,r,i){var a,o;for(o=!1,a=Pp(RJ(n.b,0),33);eNK(e,t,a,r,i)&&(o=!0,eyL(n,a),0!=n.b.c.length);)a=Pp(RJ(n.b,0),33);return 0==n.b.c.length&&eva(n.j,n),o&&emG(t.q),o}function e_6(e,t){var n,r,i,a;if(eLG(),t.b<2)return!1;for(r=n=Pp(Vv(a=epL(t,0)),8);a.b!=a.d.c;){if(eOV(e,r,i=Pp(Vv(a),8)))return!0;r=i}return!!eOV(e,r,n)}function e_9(e,t,n,r){var i,a;return 0==n?(e.o||(e.o=new JY((eBa(),tmy),e6O,e,0)),Iz(e.o,t,r)):(a=Pp(ee2((i=Pp(eaS(e,16),26))||e.zh(),n),66)).Nj().Rj(e,ehH(e),n-Y1(e.zh()),t,r)}function e_8(e,t){var n;t!=e.sb?(n=null,e.sb&&(n=Pp(e.sb,49).ih(e,1,e6w,n)),t&&(n=Pp(t,49).gh(e,1,e6w,n)),(n=ecY(e,t,n))&&n.Fi()):(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,4,t,t))}function e_7(e,t){var n,r,i,a;if(t)i=enm(t,"x"),enr((n=new pa(e)).a,(BJ(i),i)),a=enm(t,"y"),enc((r=new po(e)).a,(BJ(a),a));else throw p7(new gK("All edge sections need an end point."))}function eEe(e,t){var n,r,i,a;if(t)i=enm(t,"x"),enu((n=new pn(e)).a,(BJ(i),i)),a=enm(t,"y"),enl((r=new pr(e)).a,(BJ(a),a));else throw p7(new gK("All edge sections need a start point."))}function eEt(e,t){var n,r,i,a,o,s,u;for(r=es1(e),a=0,s=r.length;a>22-t,i=e.h<>22-t):t<44?(n=0,r=e.l<>44-t):(n=0,r=0,i=e.l<e))return 0==t||t==e?1:0==e?0:ev6(e)/(ev6(t)*ev6(e-t));throw p7(new gL("k must be smaller than n"))}function eEh(e,t){var n,r,i,a;for(n=new TY(e);null!=n.g||n.c?null==n.g||0!=n.i&&Pp(n.g[n.i-1],47).Ob():zW(n);)if(M4(a=Pp(eM5(n),56),160))for(i=0,r=Pp(a,160);i>4],t[2*n+1]=tv0[15&a];return ehv(t,0,t.length)}function eEA(e){var t,n,r;switch(U_(),r=e.c.length){case 0:return e0p;case 1:return P2((t=Pp(ekM(new fz(e)),42)).cd(),t.dd());default:return n=Pp(epg(e,Je(e1$,eUK,42,e.c.length,0,1)),165),new gt(n)}}function eEL(e){var t,n,r,i,a,o;for(t=new p1,n=new p1,Vw(t,e),Vw(n,e);n.b!=n.c;)for(i=Pp(Yn(n),37),o=new fz(i.a);o.a0&&eIl(e,n,t),i):exV(e,t,n)}function eEN(e,t,n){var r,i,a,o;if(0!=t.b){for(r=new _n,o=epL(t,0);o.b!=o.d.c;)er7(r,eoO(a=Pp(Vv(o),86))),(i=a.e).a=Pp(e_k(a,(eR6(),tcg)),19).a,i.b=Pp(e_k(a,tcv),19).a;eEN(e,r,eiI(n,r.b/e.a|0))}}function eEP(e,t){var n,r,i,a,o;if(e.e<=t||Wm(e,e.g,t))return e.g;for(a=e.r,r=e.g,o=e.r,i=(a-r)/2+r;r+11&&(e.e.b+=e.a)):(e.e.a+=n.a,e.e.b=eB4.Math.max(e.e.b,n.b),e.d.c.length>1&&(e.e.a+=e.a))}function eEH(e){var t,n,r,i;switch(t=(i=e.i).b,r=i.j,n=i.g,i.a.g){case 0:n.a=(e.g.b.o.a-r.a)/2;break;case 1:n.a=t.d.n.a+t.d.a.a;break;case 2:n.a=t.d.n.a+t.d.a.a-r.a;break;case 3:n.b=t.d.n.b+t.d.a.b}}function eE$(e,t,n,r,i){if(rr&&(e.a=r),e.bi&&(e.b=i),e}function eEz(e){if(M4(e,149))return eAi(Pp(e,149));if(M4(e,229))return efZ(Pp(e,229));if(M4(e,23))return eEa(Pp(e,23));throw p7(new gL(eXx+e_F(new g$(eow(vx(e1R,1),eUp,1,5,[e])))))}function eEG(e,t,n,r,i){var a,o,s;for(o=0,a=!0;o>>i|n[o+r+1]<>>i,++o}return a}function eEW(e,t,n,r){var i,a,o;if(t.k==(eEn(),e8D)){for(a=new Fa(OH(efu(t).a.Kc(),new c));eTk(a);)if((o=(i=Pp(ZC(a),17)).c.i.k)==e8D&&e.c.a[i.c.i.c.p]==r&&e.c.a[t.c.p]==n)return!0}return!1}function eEK(e,t){var n,r,i,a;return t&=63,n=e.h&eH$,t<22?(a=n>>>t,i=e.m>>t|n<<22-t,r=e.l>>t|e.m<<22-t):t<44?(a=0,i=n>>>t-22,r=e.m>>t-22|e.h<<44-t):(a=0,i=0,r=n>>>t-44),Mk(r&eHH,i&eHH,a&eH$)}function eEV(e,t,n,r){var i;this.b=r,this.e=e==(enU(),tui),i=t[n],this.d=RF(tyE,[eUP,e$5],[177,25],16,[i.length,i.length],2),this.a=RF(ty_,[eUP,eHT],[48,25],15,[i.length,i.length],2),this.c=new ewo(t,n)}function eEq(e){var t,n,r;for(e.k=new G$((eYu(),eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY])).length,e.j.c.length),r=new fz(e.j);r.a=n)return eE5(e,t,r.p),!0;return!1}function eE1(e){var t;return(64&e.Db)!=0?eEp(e):(t=new O0(eZ$),e.a&&xM(xM((t.a+=' "',t),e.a),'"'),xM(yW(xM(yW(xM(yW(xM(yW((t.a+=" (",t),e.i),","),e.j)," | "),e.g),","),e.f),")"),t.a)}function eE0(e,t,n){var r,i,a,o,s;for(o=0,s=eAY(e.e.Tg(),t),i=Pp(e.g,119),r=0;on?eS1(e,n,"start index"):t<0||t>n?eS1(t,n,"end index"):eCG("end index (%s) must not be less than start index (%s)",eow(vx(e1R,1),eUp,1,5,[ell(t),ell(e)]))}function eE4(e,t){var n,r,i,a;for(r=0,i=e.length;r0&&eE9(e,a,n));t.p=0}function eE8(e){var t;this.c=new _n,this.f=e.e,this.e=e.d,this.i=e.g,this.d=e.c,this.b=e.b,this.k=e.j,this.a=e.a,e.i?this.j=e.i:this.j=(t=Pp(yw(e5Q),9),new I1(t,Pp(CY(t,t.length),9),0)),this.g=e.f}function eE7(e){var t,n,r,i;for(t=Bd(xM(new O0("Predicates."),"and"),40),n=!0,i=new fE(e);i.b0?s[o-1]:Je(e4N,eGW,10,0,0,1),i=s[o],c=o=0?e.Bh(i):ekN(e,r);else throw p7(new gL(eZV+r.ne()+eZq))}else throw p7(new gL(eZJ+t+eZQ))}else ec6(e,n,r)}function eSa(e){var t,n;if(n=null,t=!1,M4(e,204)&&(t=!0,n=Pp(e,204).a),!t&&M4(e,258)&&(t=!0,n=""+Pp(e,258).a),!t&&M4(e,483)&&(t=!0,n=""+Pp(e,483).a),!t)throw p7(new gk(eXE));return n}function eSo(e,t){var n,r;if(!e.f)return t.Ob();for(;t.Ob();)if(M4(r=(n=Pp(t.Pb(),72)).ak(),99)&&(Pp(r,18).Bb&eZ1)!=0&&(!e.e||r.Gj()!=e6d||0!=r.aj())&&null!=n.dd())return t.Ub(),!0;return!1}function eSs(e,t){var n,r;if(!e.f)return t.Sb();for(;t.Sb();)if(M4(r=(n=Pp(t.Ub(),72)).ak(),99)&&(Pp(r,18).Bb&eZ1)!=0&&(!e.e||r.Gj()!=e6d||0!=r.aj())&&null!=n.dd())return t.Pb(),!0;return!1}function eSu(e,t,n){var r,i,a,o,s,u;for(o=0,u=eAY(e.e.Tg(),t),r=0,s=e.i,i=Pp(e.g,119);o1&&(t.c[t.c.length]=a)}function eSf(e){var t,n,r,i;for(er7(n=new _n,e.o),r=new mc;0!=n.b;)(i=eYP(e,t=Pp(0==n.b?null:(A6(0!=n.b),etw(n,n.a.a)),508),!0))&&P_(r.a,t);for(;0!=r.a.c.length;)eYP(e,t=Pp(euO(r),508),!1)}function eSd(){eSd=A,tdS=new ks(ezo,0),tdm=new ks("BOOLEAN",1),tdw=new ks("INT",2),tdE=new ks("STRING",3),tdg=new ks("DOUBLE",4),tdv=new ks("ENUM",5),tdy=new ks("ENUMSET",6),td_=new ks("OBJECT",7)}function eSh(e,t){var n,r,i,a,o;r=eB4.Math.min(e.c,t.c),a=eB4.Math.min(e.d,t.d),i=eB4.Math.max(e.c+e.b,t.c+t.b),o=eB4.Math.max(e.d+e.a,t.d+t.a),i=(i/2|0))for(this.e=r?r.c:null,this.d=i;n++0;)Gi(this);this.b=t,this.a=null}function eSk(e,t){var n,r;t.a?eAk(e,t):((n=Pp(Ik(e.b,t.b),57))&&n==e.a[t.b.f]&&n.a&&n.a!=t.b.a&&n.c.Fc(t.b),(r=Pp(IS(e.b,t.b),57))&&e.a[r.f]==t.b&&r.a&&r.a!=t.b.a&&t.b.c.Fc(r),Ai(e.b,t.b))}function eSx(e,t){var n,r;if(n=Pp(UA(e.b,t),124),Pp(Pp(Zq(e.r,t),21),84).dc()){n.n.b=0,n.n.c=0;return}n.n.b=e.C.b,n.n.c=e.C.c,e.A.Hc((ed6(),tbq))&&eCD(e,t),r=ebi(e,t),eLZ(e,t)==(epT(),tbt)&&(r+=2*e.w),n.a.a=r}function eST(e,t){var n,r;if(n=Pp(UA(e.b,t),124),Pp(Pp(Zq(e.r,t),21),84).dc()){n.n.d=0,n.n.a=0;return}n.n.d=e.C.d,n.n.a=e.C.a,e.A.Hc((ed6(),tbq))&&eCN(e,t),r=eba(e,t),eLZ(e,t)==(epT(),tbt)&&(r+=2*e.w),n.a.b=r}function eSM(e,t){var n,r,i,a;for(a=new p0,r=new fz(t);r.aeB4.Math.abs(r-i))}function eSU(e,t,n){var r,i,a,o,s,u;if(null!=(s=Pp(eaS(e.a,8),1936)))for(a=0,o=(i=s).length;an.a&&(r.Hc((eyY(),tdW))?i=(t.a-n.a)/2:r.Hc(tdV)&&(i=t.a-n.a)),t.b>n.b&&(r.Hc((eyY(),tdZ))?a=(t.b-n.b)/2:r.Hc(tdq)&&(a=t.b-n.b)),e_g(e,i,a)}function eSJ(e,t,n,r,i,a,o,s,u,c,l,f,d){M4(e.Cb,88)&&eko(Zd(Pp(e.Cb,88)),4),er3(e,n),e.f=o,elY(e,s),elU(e,u),elF(e,c),elB(e,l),els(e,f),elZ(e,d),eli(e,!0),end(e,i),e.ok(a),eu2(e,t),null!=r&&(e.i=null,erA(e,r))}function eSQ(e){var t,n;if(!e.f)return e.n>0;for(;e.n>0;){if(M4(n=(t=Pp(e.k.Xb(e.n-1),72)).ak(),99)&&(Pp(n,18).Bb&eZ1)!=0&&(!e.e||n.Gj()!=e6d||0!=n.aj())&&null!=t.dd())return!0;--e.n}return!1}function eS1(e,t,n){if(e<0)return eCG(eUh,eow(vx(e1R,1),eUp,1,5,[n,ell(e)]));if(!(t<0))return eCG("%s (%s) must not be greater than size (%s)",eow(vx(e1R,1),eUp,1,5,[n,ell(e),ell(t)]));throw p7(new gL(eUb+t))}function eS0(e,t,n,r,i,a){var o,s,u,c;if((o=r-n)<7){efA(t,n,r,a);return}if(c=(u=n+i)+((s=r+i)-u>>1),eS0(t,e,u,c,-i,a),eS0(t,e,c,s,-i,a),0>=a.ue(e[c-1],e[c])){for(;n=0?e.sh(a,n):eOh(e,i,n);else throw p7(new gL(eZV+i.ne()+eZq))}else throw p7(new gL(eZJ+t+eZQ))}else efL(e,r,i,n)}function eS6(e){var t,n,r,i;if(n=Pp(e,49).qh())try{if(r=null,(t=eMC((_Q(),tgp),eDv(efR(n))))&&(i=t.rh())&&(r=i.Wk(gF(n.e))),r&&r!=e)return eS6(r)}catch(a){if(a=eoa(a),!M4(a,60))throw p7(a)}return e}function eS9(e,t,n){var r,i,a,o;if(o=null==t?0:e.b.se(t),0==(i=null==(r=e.a.get(o))?[]:r).length)e.a.set(o,i);else if(a=euj(e,t,i))return a.ed(n);return Bc(i,i.length,new EE(t,n)),++e.c,$c(e.b),null}function eS8(e,t){var n,r;return Kx(e.a),Yb(e.a,(erZ(),tcq),tcq),Yb(e.a,tcZ,tcZ),r=new K2,RI(r,tcZ,(efx(),tc1)),xc(eT8(t,(egj(),tlf)))!==xc((eub(),tc5))&&RI(r,tcZ,tcJ),RI(r,tcZ,tcQ),Tb(e.a,r),n=eRq(e.a,t)}function eS7(e){if(!e)return g3(),e0M;var t=e.valueOf?e.valueOf():e;if(t!==e){var n=e0O[typeof t];return n?n(t):euV(typeof t)}return e instanceof Array||e instanceof eB4.Array?new lL(e):new lD(e)}function eke(e,t,n){var r,i,a;switch(a=e.o,(i=(r=Pp(UA(e.p,n),244)).i).b=ek0(r),i.a=ek1(r),i.b=eB4.Math.max(i.b,a.a),i.b>a.a&&!t&&(i.b=a.a),i.c=-(i.b-a.a)/2,n.g){case 1:i.d=-i.a;break;case 3:i.d=a.b}eNE(r),eNM(r)}function ekt(e,t,n){var r,i,a;switch(a=e.o,(i=(r=Pp(UA(e.p,n),244)).i).b=ek0(r),i.a=ek1(r),i.a=eB4.Math.max(i.a,a.b),i.a>a.b&&!t&&(i.a=a.b),i.d=-(i.a-a.b)/2,n.g){case 4:i.c=-i.b;break;case 2:i.c=a.a}eNE(r),eNM(r)}function ekn(e,t){var n,r,i,a,o;if(!t.dc()){if(i=Pp(t.Xb(0),128),1==t.gc()){eA1(e,i,i,1,0,t);return}for(n=1;n0)try{i=eDa(t,eHt,eUu)}catch(a){if(a=eoa(a),M4(a,127))throw r=a,p7(new QH(r));throw p7(a)}return i<(n=(e.a||(e.a=new pK(e)),e.a)).i&&i>=0?Pp(etj(n,i),56):null}function eku(e,t){if(e<0)return eCG(eUh,eow(vx(e1R,1),eUp,1,5,["index",ell(e)]));if(!(t<0))return eCG("%s (%s) must be less than size (%s)",eow(vx(e1R,1),eUp,1,5,["index",ell(e),ell(t)]));throw p7(new gL(eUb+t))}function ekc(e){var t,n,r,i,a;if(null==e)return eUg;for(r=0,a=new eaP(eUd,"[","]"),i=(n=e).length;re.a.ue(RJ(e.b,o),RJ(e.b,a))&&(s=o),s),!(0>e.a.ue(i,RJ(e.b,r))));)q1(e.b,t,RJ(e.b,r)),t=r;q1(e.b,t,i)}function ekp(e,t,n,r,i,a){var o,s,u,c,l;for(xc(e)===xc(n)&&(e=e.slice(t,t+i),t=0),u=n,s=t,c=t+i;s0)for(o=e.c.d,s=e.d.d,i=Ol(C6(new kl(s.a,s.b),o),1/(r+1)),a=new kl(o.a,o.b),n=new fz(e.a);n.a=0?e._g(n,!0,!0):exk(e,i,!0),153),Pp(r,215).ol(t);else throw p7(new gL(eZV+t.ne()+eZq))}function ekP(e){var t,n;return e>-140737488355328&&e<0x800000000000?0==e?0:((t=e<0)&&(e=-e),n=zy(eB4.Math.floor(eB4.Math.log(e)/.6931471805599453)),(!t||e!=eB4.Math.pow(2,n))&&++n,n):eaJ(eap(e))}function ekR(e){var t,n,r,i,a,o,s;for(a=new Tw,n=new fz(e);n.a2&&s.e.b+s.j.b<=2&&(i=s,r=o),a.a.zc(i,a),i.q=r);return a}function ekj(e,t){var n,r,i;return r=new eb$(e),eaW(r,t),eo3(r,(eBU(),ttQ),t),eo3(r,(eBy(),tol),(ewf(),tbo)),eo3(r,tiq,(ebx(),tdA)),lK(r,(eEn(),e8C)),n=new eES,Gc(n,r),ekv(n,(eYu(),tbY)),i=new eES,Gc(i,r),ekv(i,tby),r}function ekF(e){switch(e.g){case 0:return new gx((enU(),tur));case 1:return new cC;case 2:return new cF;default:throw p7(new gL("No implementation is available for the crossing minimizer "+(null!=e.f?e.f:""+e.g)))}}function ekY(e,t){var n,r,i,a,o;for(e.c[t.p]=!0,P_(e.a,t),o=new fz(t.j);o.a=(a=o.gc()))o.$b();else for(r=0,i=o.Kc();r0?g5():o<0&&ekJ(e,t,-o),!0)}function ek1(e){var t,n,r,i,a,o,s;if(s=0,0==e.b){for(i=0,o=eb4(e,!0),t=0,a=(r=o).length;i0&&(s+=n,++t);t>1&&(s+=e.c*(t-1))}else s=vy(eib(U1(UJ(Yw(e.a),new eS),new ek)));return s>0?s+e.n.d+e.n.a:0}function ek0(e){var t,n,r,i,a,o,s;if(s=0,0==e.b)s=vy(eib(U1(UJ(Yw(e.a),new e_),new eE)));else{for(i=0,o=eb5(e,!0),t=0,a=(r=o).length;i0&&(s+=n,++t);t>1&&(s+=e.c*(t-1))}return s>0?s+e.n.b+e.n.c:0}function ek2(e,t){var n,r,i,a;for(n=(a=Pp(UA(e.b,t),124)).a,i=Pp(Pp(Zq(e.r,t),21),84).Kc();i.Ob();)(r=Pp(i.Pb(),111)).c&&(n.a=eB4.Math.max(n.a,Rd(r.c)));if(n.a>0)switch(t.g){case 2:a.n.c=e.s;break;case 4:a.n.b=e.s}}function ek3(e,t){var n,r,i;return 0==(n=Pp(e_k(t,(eCk(),e9M)),19).a-Pp(e_k(e,e9M),19).a)?(r=C6(MB(Pp(e_k(e,(erV(),e9P)),8)),Pp(e_k(e,e9R),8)),i=C6(MB(Pp(e_k(t,e9P),8)),Pp(e_k(t,e9R),8)),elN(r.a*r.b,i.a*i.b)):n}function ek4(e,t){var n,r,i;return 0==(n=Pp(e_k(t,(eTj(),tcD)),19).a-Pp(e_k(e,tcD),19).a)?(r=C6(MB(Pp(e_k(e,(eR6(),tce)),8)),Pp(e_k(e,tct),8)),i=C6(MB(Pp(e_k(t,tce),8)),Pp(e_k(t,tct),8)),elN(r.a*r.b,i.a*i.b)):n}function ek5(e){var t,n;return n=new vc,n.a+="e_",null!=(t=eaZ(e))&&(n.a+=""+t),e.c&&e.d&&(xM((n.a+=" ",n),egu(e.c)),xM(xT((n.a+="[",n),e.c.i),"]"),xM((n.a+=eGH,n),egu(e.d)),xM(xT((n.a+="[",n),e.d.i),"]")),n.a}function ek6(e){switch(e.g){case 0:return new cD;case 1:return new cN;case 2:return new cI;case 3:return new cP;default:throw p7(new gL("No implementation is available for the layout phase "+(null!=e.f?e.f:""+e.g)))}}function ek9(e,t,n,r,i){var a;switch(a=0,i.g){case 1:a=eB4.Math.max(0,t.b+e.b-(n.b+r));break;case 3:a=eB4.Math.max(0,-e.b-r);break;case 2:a=eB4.Math.max(0,-e.a-r);break;case 4:a=eB4.Math.max(0,t.a+e.a-(n.a+r))}return a}function ek8(e,t,n){var r,i,a,o,s;if(n)for(i=n.a.length,s=((r=new Fs(i)).b-r.a)*r.c<0?(_9(),eB3):new OR(r);s.Ob();)eXh in(a=KZ(n,(o=Pp(s.Pb(),19)).a)).a||eXp in a.a?eId(e,a,t):eBe(e,a,t),Om(Pp(Bp(e.b,ehM(a)),79))}function ek7(e){var t,n;switch(e.b){case -1:return!0;case 0:if((n=e.t)>1||-1==n||(t=evl(e))&&(_4(),t.Cj()==eJK))return e.b=-1,!0;return e.b=1,!1;default:return!1}}function exe(e,t){var n,r,i,a,o;for(i=0,r=(t.s||(t.s=new FQ(tm6,t,21,17)),t.s),a=null,o=r.i;i=0&&r=0?e._g(n,!0,!0):exk(e,i,!0),153),Pp(r,215).ll(t);throw p7(new gL(eZV+t.ne()+eZX))}function exc(){var e;return(_6(),tg9)?Pp(eMC((_Q(),tgp),eQc),1939):(x2(e1$,new ut),ej9(),e=Pp(M4(zg((_Q(),tgp),eQc),547)?zg(tgp,eQc):new Uh,547),tg9=!0,eBY(e),eB0(e),Um((_1(),tgm),e,new sM),Ge(tgp,eQc,e),e)}function exl(e,t){var n,r,i,a;e.j=-1,TO(e.e)?(n=e.i,a=0!=e.i,Zz(e,t),r=new Q$(e.e,3,e.c,null,t,n,a),i=t.Qk(e.e,e.c,null),(i=ey1(e,t,i))?(i.Ei(r),i.Fi()):eam(e.e,r)):(Zz(e,t),(i=t.Qk(e.e,e.c,null))&&i.Fi())}function exf(e,t){var n,r,i;if(i=0,(r=t[0])>=e.length)return -1;for(n=(GV(r,e.length),e.charCodeAt(r));n>=48&&n<=57&&(i=10*i+(n-48),!(++r>=e.length));)n=(GV(r,e.length),e.charCodeAt(r));return r>t[0]?t[0]=r:i=-1,i}function exd(e){var t,n,r,i,a;return i=Pp(e.a,19).a,a=Pp(e.b,19).a,n=i,r=a,t=eB4.Math.max(eB4.Math.abs(i),eB4.Math.abs(a)),i<=0&&i==a?(n=0,r=a-1):i==-t&&a!=t?(n=a,r=i,a>=0&&++n):(n=-a,r=i),new kD(ell(n),ell(r))}function exh(e,t,n,r){var i,a,o,s,u,c;for(i=0;i=0&&c>=0&&u=e.i)throw p7(new gE(eXU+t+eXH+e.i));if(n>=e.i)throw p7(new gE(eX$+n+eXH+e.i));return r=e.g[n],t!=n&&(t>16))>>16&16),e>>=t,n+=t=(r=e-256)>>16&8,e<<=t,n+=t=(r=e-eH0)>>16&4,e<<=t,n+=t=(r=e-eUR)>>16&2,e<<=t,n+2-(t=(r=e>>14)&~(r>>1)))}function exy(e){var t,n,r,i;for(HR(),e9n=new p0,e9t=new p2,e9e=new p0,t=(e.a||(e.a=new FQ(e6k,e,10,11)),e.a),eYE(t),i=new Ow(t);i.e!=i.i.gc();)r=Pp(epH(i),33),-1==QI(e9n,r,0)&&(n=new p0,P_(e9e,n),epi(r,n));return e9e}function exw(e,t,n){var r,i,a,o;e.a=n.b.d,M4(t,352)?(i=eLO(Pp(t,79),!1,!1),a=eEF(i),qX(a,r=new d_(e)),eNI(a,i),null!=t.We((eBB(),thg))&&qX(Pp(t.We(thg),74),r)):((o=Pp(t,470)).Hg(o.Dg()+e.a.a),o.Ig(o.Eg()+e.a.b))}function ex_(e,t){var n,r,i,a,o,s,u,c;for(s=1,c=gP(LV(e_k(t,(eBy(),toH)))),u=e[0].n.a+e[0].o.a+e[0].d.c+c;s=0)?n:(s=B$(C6(new kl(o.c+o.b/2,o.d+o.a/2),new kl(a.c+a.b/2,a.d+a.a/2))),-(eDz(a,o)-1)*s)}function exS(e,t,n){var r;_r(new R1(null,(n.a||(n.a=new FQ(e6v,n,6,6)),new Gq(n.a,16))),new kC(e,t)),_r(new R1(null,(n.n||(n.n=new FQ(e6S,n,1,7)),new Gq(n.n,16))),new kI(e,t)),(r=Pp(eT8(n,(eBB(),thg)),74))&&eil(r,e,t)}function exk(e,t,n){var r,i,a;if(a=eR3((eSp(),tvc),e.Tg(),t))return _4(),Pp(a,66).Oj()||(a=Wk(QZ(tvc,a))),i=Pp((r=e.Yg(a))>=0?e._g(r,!0,!0):exk(e,a,!0),153),Pp(i,215).hl(t,n);throw p7(new gL(eZV+t.ne()+eZX))}function exx(e,t,n,r){var i,a,o,s,u;if(i=e.d[t]){if(a=i.g,u=i.i,null!=r){for(s=0;s=n&&(r=t,o=(c=(u.c+u.a)/2)-n,u.c<=c-n&&(i=new N4(u.c,o),jO(e,r++,i)),(s=c+n)<=u.a&&(a=new N4(s,u.a),Gp(r,e.c.length),Ew(e.c,r,a)))}function exI(e){var t;if(e.c||null!=e.g){if(null==e.g)return!0;if(0==e.i)return!1;t=Pp(e.g[e.i-1],47)}else e.d=e.si(e.f),JL(e,e.d),t=e.d;return t==e.b&&null.km>=null.jm()?(eM5(e),exI(e)):t.Ob()}function exD(e,t,n){var r,i,a,o,s;if((s=n)||(s=P6(new mV,0)),ewG(s,eGA,1),ejY(e.c,t),1==(o=ejz(e.a,t)).gc())eRd(Pp(o.Xb(0),37),s);else for(a=1/o.gc(),i=o.Kc();i.Ob();)eRd(r=Pp(i.Pb(),37),eiI(s,a));vi(e.a,o,t),eL7(t),eEj(s)}function exN(e){if(this.a=e,e.c.i.k==(eEn(),e8C))this.c=e.c,this.d=Pp(e_k(e.c.i,(eBU(),tt1)),61);else if(e.d.i.k==e8C)this.c=e.d,this.d=Pp(e_k(e.d.i,(eBU(),tt1)),61);else throw p7(new gL("Edge "+e+" is not an external edge."))}function exP(e,t){var n,r,i;i=e.b,e.b=t,(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,3,i,e.b)),t?t!=e&&(er3(e,t.zb),enf(e,t.d),erc(e,null==(n=null==(r=t.c)?t.zb:r)||IE(n,t.zb)?null:n)):(er3(e,null),enf(e,0),erc(e,null))}function exR(e){var t,n;if(!e.f)return e.n=(o=null==(n=Pp(eaS(e.a,4),126))?0:n.length))throw p7(new Ii(t,o));return i=n[t],1==o?r=null:(r=Je(e6N,eJM,415,o-1,0,1),ePD(n,0,r,0,t),(a=o-t-1)>0&&ePD(n,t+1,r,t,a)),eps(e,r),eSU(e,t,i),i}function ex$(){ex$=A,tvw=Pp(etj(H9((yL(),tvS).qb),6),34),tvg=Pp(etj(H9(tvS.qb),3),34),tvv=Pp(etj(H9(tvS.qb),4),34),tvy=Pp(etj(H9(tvS.qb),5),18),eyD(tvw),eyD(tvg),eyD(tvv),eyD(tvy),tv_=new g$(eow(vx(tm6,1),eJ4,170,0,[tvw,tvg]))}function exz(e,t){var n;this.d=new mh,this.b=t,this.e=new TS(t.qf()),n=e.u.Hc((ekU(),tbb)),e.u.Hc(tbp)?e.D?this.a=n&&!t.If():this.a=!0:e.u.Hc(tbm)&&n?this.a=!(t.zf().Kc().Ob()||t.Bf().Kc().Ob()):this.a=!1}function exG(e,t){var n,r,i,a;for(n=e.o.a,a=Pp(Pp(Zq(e.r,t),21),84).Kc();a.Ob();)(i=Pp(a.Pb(),111)).e.a=(r=i.b).Xe((eBB(),thK))?r.Hf()==(eYu(),tbY)?-r.rf().a-gP(LV(r.We(thK))):n+gP(LV(r.We(thK))):r.Hf()==(eYu(),tbY)?-r.rf().a:n}function exW(e,t){var n,r,i,a;n=Pp(e_k(e,(eBy(),tal)),103),a=Pp(eT8(t,tob),61),(i=Pp(e_k(e,tol),98))!=(ewf(),tbc)&&i!=tbl?a==(eYu(),tbF)&&(a=eNh(t,n))==tbF&&(a=ef9(n)):a=(r=eRl(t))>0?ef9(n):elC(ef9(n)),ebu(t,tob,a)}function exK(e,t){var n,r,i,a,o;for(o=e.j,t.a!=t.b&&Mv(o,new ia),i=o.c.length/2|0,r=0;r0&&eIl(e,n,t),a):null!=r.a?(eIl(e,t,n),-1):null!=i.a?(eIl(e,n,t),1):0}function exq(e,t){var n,r,i,a;e.ej()?(n=e.Vi(),a=e.fj(),++e.j,e.Hi(n,e.oi(n,t)),r=e.Zi(3,null,t,n,a),e.bj()&&(i=e.cj(t,null))?(i.Ei(r),i.Fi()):e.$i(r)):(BD(e,t),e.bj()&&(i=e.cj(t,null))&&i.Fi())}function exZ(e,t){var n,r,i,a,o;for(o=eAY(e.e.Tg(),t),i=new o7,n=Pp(e.g,119),a=e.i;--a>=0;)r=n[a],o.rl(r.ak())&&JL(i,r);!eYK(e,i)&&TO(e.e)&&bz(e,t.$j()?$N(e,6,t,(Hj(),e2r),null,-1,!1):$N(e,t.Kj()?2:1,t,null,null,-1,!1))}function exX(){var e,t;for(t=0,exX=A,e2t=Je(e0t,eUP,91,32,0,1),e2n=Je(e0t,eUP,91,32,0,1),e=1;t<=18;t++)e2t[t]=ep_(e),e2n[t]=ep_(Fg(e,t)),e=efn(e,5);for(;to)))&&(!t.q||(o=(r=t.C).c.c.a-r.o.a/2,!((i=r.n.a-n)>o))))}function exQ(e,t){var n;ewG(t,"Partition preprocessing",1),n=Pp(qE(UJ(eeh(UJ(new R1(null,new Gq(e.a,16)),new nZ),new nX),new nJ),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[(eum(),e2U)]))),15),_r(n.Oc(),new nQ),eEj(t)}function ex1(e){var t,n,r,i,a,o,s;for(Gk(),n=new qh,i=new fz(e.e.b);i.a1?e.e*=gP(e.a):e.f/=gP(e.a),eu0(e),ehK(e),eCj(e),eo3(e.b,(epz(),e62),e.g)}function ex9(e,t,n){var r,i,a,o,s,u;for(r=0,u=n,t||(r=n*(e.c.length-1),u*=-1),a=new fz(e);a.a=0?(!t&&(t=new vu,r>0&&xk(t,e.substr(0,r))),t.a+="\\",Bf(t,n&eHd)):t&&Bf(t,n&eHd);return t?t.a:e}function eTh(e){var t;if(!e.a)throw p7(new gC("IDataType class expected for layout option "+e.f));if(null==(t=VN(e.a)))throw p7(new gC("Couldn't create new instance of property '"+e.f+"'. "+eq4+(LW(e6D),e6D.k)+eq5));return Pp(t,414)}function eTp(e){var t,n,r,i,a;return(a=e.eh())&&a.kh()&&(i=ecv(e,a))!=a?(n=e.Vg(),r=(t=e.Vg())>=0?e.Qg(null):e.eh().ih(e,-1-t,null,null),e.Rg(Pp(i,49),n),r&&r.Fi(),e.Lg()&&e.Mg()&&n>-1&&eam(e,new FX(e,9,n,a,i)),i):a}function eTb(e){var t,n,r,i,a,o,s,u;for(r=0,o=0,a=e.f.e;r>5)>=e.d)return e.e<0;if(n=e.a[i],t=1<<(31&t),e.e<0){if(i<(r=eiU(e)))return!1;n=r==i?-n:~n}return(n&t)!=0}function eT_(e,t,n,r){var i;Pp(n.b,65),Pp(n.b,65),Pp(r.b,65),Pp(r.b,65),P9(i=C6(MB(Pp(n.b,65).c),Pp(r.b,65).c),ekg(Pp(n.b,65),Pp(r.b,65),i)),Pp(r.b,65),Pp(r.b,65),Pp(r.b,65).c.a,i.a,Pp(r.b,65).c.b,i.b,Pp(r.b,65),ety(r.a,new N9(e,t,r))}function eTE(e,t){var n,r,i,a,o,s,u;if(a=t.e){for(o=0,n=eTp(a),r=Pp(e.g,674);o>16)),15).Xc(a))0&&(Tk(e.a.c)&&t.n.d||Tx(e.a.c)&&t.n.b||(t.g.d+=eB4.Math.max(0,r/2-.5)),Tk(e.a.c)&&t.n.a||Tx(e.a.c)&&t.n.c||(t.g.a-=r-1))}function eTO(e){var t,n,r,i,a;if(i=new p0,a=eDC(e,i),t=Pp(e_k(e,(eBU(),tng)),10))for(r=new fz(t.j);r.a>t,a=e.m>>t|n<<22-t,i=e.l>>t|e.m<<22-t):t<44?(o=r?eH$:0,a=n>>t-22,i=e.m>>t-22|n<<44-t):(o=r?eH$:0,a=r?eHH:0,i=n>>t-44),Mk(i&eHH,a&eHH,o&eH$)}function eTI(e){var t,n,r,i,a,o;for(this.c=new p0,this.d=e,r=eHQ,i=eHQ,t=eH1,n=eH1,o=epL(e,0);o.b!=o.d.c;)a=Pp(Vv(o),8),r=eB4.Math.min(r,a.a),i=eB4.Math.min(i,a.b),t=eB4.Math.max(t,a.a),n=eB4.Math.max(n,a.b);this.a=new Hr(r,i,t-r,n-i)}function eTD(e,t){var n,r,i,a,o,s;for(a=new fz(e.b);a.a0&&M4(t,42)&&(e.a.qj(),a=null==(u=(c=Pp(t,42)).cd())?0:esj(u),o=Cb(e.a,a),n=e.a.d[o])){for(s=0,r=Pp(n.g,367),l=n.i;s=2)for(t=LV((n=i.Kc()).Pb());n.Ob();)a=t,t=LV(n.Pb()),r=eB4.Math.min(r,(BJ(t),t-(BJ(a),a)));return r}function eTX(e,t){var n,r,i,a,o;qQ(r=new _n,t,r.c.b,r.c);do for(n=(A6(0!=r.b),Pp(etw(r,r.a.a),86)),e.b[n.g]=1,a=epL(n.d,0);a.b!=a.d.c;)o=(i=Pp(Vv(a),188)).c,1==e.b[o.g]?P7(e.a,i):2==e.b[o.g]?e.b[o.g]=1:qQ(r,o,r.c.b,r.c);while(0!=r.b)}function eTJ(e,t){var n,r,i;if(xc(t)===xc(Y9(e)))return!0;if(!M4(t,15)||(r=Pp(t,15),(i=e.gc())!=r.gc()))return!1;if(!M4(r,54))return eb3(e.Kc(),r.Kc());for(n=0;n0&&(i=n),o=new fz(e.f.e);o.a0?(t-=1,n-=1):r>=0&&i<0?(t+=1,n+=1):r>0&&i>=0?(t-=1,n+=1):(t+=1,n-=1),new kD(ell(t),ell(n))}function eMf(e,t){if(e.ct.c)return 1;if(e.bt.b)return 1;if(e.a!=t.a)return esj(e.a)-esj(t.a);else if(e.d==(qG(),tuf)&&t.d==tul)return -1;else if(e.d==tul&&t.d==tuf)return 1;return 0}function eMd(e,t){var n,r,i,a,o;return(o=(a=t.a).c.i==t.b?a.d:a.c,r=a.c.i==t.b?a.c:a.d,(i=edI(e.a,o,r))>0&&i0):i<0&&-i0)}function eMh(e,t,n,r){var i,a,o,s,u,c,l,f;for(i=(t-e.d)/e.c.c.length,a=0,e.a+=n,e.d=t,f=new fz(e.c);f.a>24;return o}function eMb(e){if(e.pe()){var t=e.c;t.qe()?e.o="["+t.n:t.pe()?e.o="["+t.ne():e.o="[L"+t.ne()+";",e.b=t.me()+"[]",e.k=t.oe()+"[]";return}var n=e.j,r=e.d;r=r.split("/"),e.o=ehg(".",[n,ehg("$",r)]),e.b=ehg(".",[n,ehg(".",r)]),e.k=r[r.length-1]}function eMm(e,t){var n,r,i,a,o;for(o=null,a=new fz(e.e.a);a.a=0;t-=2)for(n=0;n<=t;n+=2)(e.b[n]>e.b[n+2]||e.b[n]===e.b[n+2]&&e.b[n+1]>e.b[n+3])&&(r=e.b[n+2],e.b[n+2]=e.b[n],e.b[n]=r,r=e.b[n+3],e.b[n+3]=e.b[n+1],e.b[n+1]=r);e.c=!0}}function eMk(e,t){var n,r,i,a,o,s,u,c;for(a=(o=1==t?e8c:e8u).a.ec().Kc();a.Ob();)for(i=Pp(a.Pb(),103),u=Pp(Zq(e.f.c,i),21).Kc();u.Ob();)switch(s=Pp(u.Pb(),46),r=Pp(s.b,81),n=(c=Pp(s.a,189)).c,i.g){case 2:case 1:r.g.d+=n;break;case 4:case 3:r.g.c+=n}}function eMx(e,t){var n,r,i,a,o,s,u,c,l;for(s=0,c=-1,l=0,u=(o=e).length;s0&&++l;++c}return l}function eMT(e){var t,n;return n=new O0(yx(e.gm)),n.a+="@",xM(n,(t=esj(e)>>>0).toString(16)),e.kh()?(n.a+=" (eProxyURI: ",xT(n,e.qh()),e.$g()&&(n.a+=" eClass: ",xT(n,e.$g())),n.a+=")"):e.$g()&&(n.a+=" (eClass: ",xT(n,e.$g()),n.a+=")"),n.a}function eMM(e){var t,n,r,i;if(e.e)throw p7(new gC((LW(e2J),e$j+e2J.k+e$F)));for(e.d==(ec3(),tpv)&&eF_(e,tpm),n=new fz(e.a.a);n.a>24}return n}function eMD(e,t,n){var r,i,a;if(!(i=Pp(UA(e.i,t),306))){if(i=new etr(e.d,t,n),jT(e.i,t,i),ehj(t))Od(e.a,t.c,t.b,i);else switch(a=eSv(t),r=Pp(UA(e.p,a),244),a.g){case 1:case 3:i.j=!0,gh(r,t.b,i);break;case 4:case 2:i.k=!0,gh(r,t.c,i)}}return i}function eMN(e,t,n,r){var i,a,o,s,u,c;if(s=new o7,u=eAY(e.e.Tg(),t),i=Pp(e.g,119),_4(),Pp(t,66).Oj())for(o=0;o=0)return i;for(a=1,s=new fz(t.j);s.a0&&t.ue((GK(i-1,e.c.length),Pp(e.c[i-1],10)),a)>0;)q1(e,i,(GK(i-1,e.c.length),Pp(e.c[i-1],10))),--i;GK(i,e.c.length),e.c[i]=a}n.a=new p2,n.b=new p2}function eMj(e,t,n){var r,i,a,o,s,u,c,l;for(o=0,l=(r=Pp(t.e&&t.e(),9),new I1(r,Pp(CY(r,r.length),9),0)),s=(a=u=eIk(n,"[\\[\\]\\s,]+")).length;o0&&(Tk(e.a.c)&&t.n.d||Tx(e.a.c)&&t.n.b||(t.g.d-=eB4.Math.max(0,r/2-.5)),Tk(e.a.c)&&t.n.a||Tx(e.a.c)&&t.n.c||(t.g.a+=eB4.Math.max(0,r-1)))}function eMY(e,t,n){var r,i;if((e.c-e.b&e.a.length-1)==2)t==(eYu(),tbw)||t==tby?(etf(Pp(eso(e),15),(egF(),tpV)),etf(Pp(eso(e),15),tpq)):(etf(Pp(eso(e),15),(egF(),tpq)),etf(Pp(eso(e),15),tpV));else for(i=new UN(e);i.a!=i.b;)etf(r=Pp(ecn(i),15),n)}function eMB(e,t){var n,r,i,a,o,s,u;for(i=Pb(new pL(e)),s=new KB(i,i.c.length),a=Pb(new pL(t)),u=new KB(a,a.c.length),o=null;s.b>0&&u.b>0;)if((n=(A6(s.b>0),Pp(s.a.Xb(s.c=--s.b),33)))==(r=(A6(u.b>0),Pp(u.a.Xb(u.c=--u.b),33))))o=n;else break;return o}function eMU(e,t){var n,r,i,a,o,s;return(a=e.a*e$d+1502*e.b,s=e.b*e$d+11,a+=n=eB4.Math.floor(s*e$h),s-=n*e$p,a%=e$p,e.a=a,e.b=s,t<=24)?eB4.Math.floor(e.a*e2v[t]):((r=(i=e.a*(1<=2147483648&&(r-=eH7),r)}function eMH(e,t,n){var r,i,a,o;WY(e,t)>WY(e,n)?(r=efr(n,(eYu(),tby)),e.d=r.dc()?0:Rk(Pp(r.Xb(0),11)),o=efr(t,tbY),e.b=o.dc()?0:Rk(Pp(o.Xb(0),11))):(i=efr(n,(eYu(),tbY)),e.d=i.dc()?0:Rk(Pp(i.Xb(0),11)),a=efr(t,tby),e.b=a.dc()?0:Rk(Pp(a.Xb(0),11)))}function eM$(e){var t,n,r,i,a,o,s;if(e&&(t=e.Hh(eQc))&&null!=(o=Lq(edW((t.b||(t.b=new L_((eBK(),tgF),tgf,t)),t.b),"conversionDelegates")))){for(s=new p0,r=eIk(o,"\\w+"),i=0,a=r.length;ie.c);o++)i.a>=e.s&&(a<0&&(a=o),s=o);return u=(e.s+e.c)/2,a>=0&&(r=eIe(e,t,a,s),u=_V((GK(r,t.c.length),Pp(t.c[r],329))),exC(t,r,n)),u}function eMK(){eMK=A,tlK=new T2((eBB(),td2),1.3),tlX=thc,tfe=new T3(15),tl7=new T2(thN,tfe),tfr=new T2(tpl,15),tlV=td9,tl3=thx,tl4=thO,tl5=thL,tl2=thS,tl6=thD,tft=thJ,tl8=(eTU(),tl$),tl0=tlU,tl9=tlH,tfn=tlG,tlJ=tlB,tlQ=thb,tl1=thm,tlZ=tlY,tlq=tlF,tfi=tlW}function eMV(e,t,n){var r,i,a,o,s,u,c;for(erl(o=a=new sa,(BJ(t),t)),c=(o.b||(o.b=new L_((eBK(),tgF),tgf,o)),o.b),u=1;u0&&eRJ(this,i)}function eMZ(e,t,n,r,i,a){var o,s,u;if(!i[t.b]){for(i[t.b]=!0,(o=r)||(o=new Z5),P_(o.e,t),u=a[t.b].Kc();u.Ob();)(s=Pp(u.Pb(),282)).d!=n&&s.c!=n&&(s.c!=t&&eMZ(e,s.c,t,o,i,a),s.d!=t&&eMZ(e,s.d,t,o,i,a),P_(o.c,s),eoc(o.d,s.b));return o}return null}function eMX(e){var t,n,r,i,a,o,s;for(t=0,i=new fz(e.e);i.a=2}function eMJ(e,t){var n,r,i,a;for(ewG(t,"Self-Loop pre-processing",1),r=new fz(e.a);r.a1)&&(t=jL(tp1,eow(vx(e6t,1),eU4,93,0,[tpQ,tp2])),!(eaC(z_(t,e))>1)&&(r=jL(tp9,eow(vx(e6t,1),eU4,93,0,[tp6,tp5])),!(eaC(z_(r,e))>1)))}function eM0(e,t){var n,r,i;return(n=t.Hh(e.a))&&null!=(i=Lq(edW((n.b||(n.b=new L_((eBK(),tgF),tgf,n)),n.b),"affiliation")))?-1==(r=O8(i,e_n(35)))?elp(e,Fr(e,etP(t.Hj())),i):0==r?elp(e,null,i.substr(1)):elp(e,i.substr(0,r),i.substr(r+1)):null}function eM2(e){var t,n,r;try{return null==e?eUg:efF(e)}catch(i){if(i=eoa(i),M4(i,102))return t=i,r=yx(esF(e))+"@"+(n=(wK(),ebh(e)>>>0)).toString(16),epa(eob(),(_g(),"Exception during lenientFormat for "+r),t),"<"+r+" threw "+yx(t.gm)+">";throw p7(i)}}function eM3(e){switch(e.g){case 0:return new ck;case 1:return new cy;case 2:return new _j;case 3:return new i$;case 4:return new CZ;case 5:return new cx;default:throw p7(new gL("No implementation is available for the layerer "+(null!=e.f?e.f:""+e.g)))}}function eM4(e,t,n){var r,i,a;for(a=new fz(e.t);a.a0&&(r.b.n-=r.c,r.b.n<=0&&r.b.u>0&&P7(t,r.b));for(i=new fz(e.i);i.a0&&(r.a.u-=r.c,r.a.u<=0&&r.a.n>0&&P7(n,r.a))}function eM5(e){var t,n,r,i,a;if(null==e.g&&(e.d=e.si(e.f),JL(e,e.d),e.c))return e.f;if(i=(t=Pp(e.g[e.i-1],47)).Pb(),e.e=t,(n=e.si(i)).Ob())e.d=n,JL(e,n);else for(e.d=null;!t.Ob()&&(Bc(e.g,--e.i,null),0!=e.i);)t=r=Pp(e.g[e.i-1],47);return i}function eM6(e,t){var n,r,i,a,o,s;if(i=(r=t).ak(),eLt(e.e,i)){if(i.hi()&&Vq(e,i,r.dd()))return!1}else for(a=0,s=eAY(e.e.Tg(),i),n=Pp(e.g,119);a1||n>1)return 2;return t+n==1?2:0}function eOs(e,t,n){var r,i,a,o,s;for(ewG(n,"ELK Force",1),gN(LK(eT8(t,(eCk(),e9E))))||zh(r=new df((_q(),new gM(t)))),s=eo4(t),evn(s),esO(e,Pp(e_k(s,e9v),424)),a=(o=eNx(e.a,s)).Kc();a.Ob();)i=Pp(a.Pb(),231),eIL(e.b,i,eiI(n,1/o.gc()));s=eYC(o),eYh(s),eEj(n)}function eOu(e,t){var n,r,i,a,o;if(ewG(t,"Breaking Point Processor",1),eFM(e),gN(LK(e_k(e,(eBy(),toJ))))){for(i=new fz(e.b);i.a=0?e._g(r,!0,!0):exk(e,a,!0),153),Pp(i,215).ml(t,n)}else throw p7(new gL(eZV+t.ne()+eZq))}function eOp(e,t){var n,r,i,a,o;for(r=1,n=new p0,i=eeh(new R1(null,new Gq(e,16)),new aM),a=eeh(new R1(null,new Gq(e,16)),new aO),o=QN(Xg(U1(eAa(eow(vx(e2C,1),eUp,833,0,[i,a])),new aA)));r=2*t&&P_(n,new N4(o[r-1]+t,o[r]-t));return n}function eOb(e,t,n){ewG(n,"Eades radial",1),n.n&&t&&WG(n,KS(t),(eup(),tmr)),e.d=Pp(eT8(t,(Lj(),tcV)),33),e.c=gP(LV(eT8(t,(egj(),tl_)))),e.e=ebN(Pp(eT8(t,tlE),293)),e.a=ef7(Pp(eT8(t,tlk),426)),e.b=eyp(Pp(eT8(t,tlg),340)),evY(e),n.n&&t&&WG(n,KS(t),(eup(),tmr))}function eOm(e,t,n){var r,i,a,o,s,u,c,l;if(n)for(a=n.a.length,s=((r=new Fs(a)).b-r.a)*r.c<0?(_9(),eB3):new OR(r);s.Ob();)(i=KZ(n,(o=Pp(s.Pb(),19)).a))&&(eB8=null,u=Vj(e,(c=(yT(),l=new mk),t&&eOL(c,t),c),i),ert(u,KJ(i,eXS)),ewU(i,u),eka(i,u),esv(e,i,u))}function eOg(e){var t,n,r,i,a,o;if(!e.j){if(o=new sd,null==(a=(t=tgz).a.zc(e,t))){for(r=new Ow($E(e));r.e!=r.i.gc();)n=Pp(epH(r),26),i=eOg(n),Y4(o,i),JL(o,n);t.a.Bc(e)}euI(o),e.j=new xQ((Pp(etj(H9((BM(),tgv).o),11),18),o.i),o.g),Zd(e).b&=-33}return e.j}function eOv(e){var t,n,r,i;if(null==e)return null;if(r=ePh(e,!0),i=eQq.length,IE(r.substr(r.length-i,i),eQq)){if(4==(n=r.length)){if(43==(t=(GV(0,r.length),r.charCodeAt(0))))return tvX;if(45==t)return tvZ}else if(3==n)return tvX}return new bK(r)}function eOy(e){var t,n,r;return((n=e.l)&n-1)!=0||((r=e.m)&r-1)!=0||((t=e.h)&t-1)!=0||0==t&&0==r&&0==n?-1:0==t&&0==r&&0!=n?enq(n):0==t&&0!=r&&0==n?enq(r)+22:0!=t&&0==r&&0==n?enq(t)+44:-1}function eOw(e,t){var n,r,i,a,o;for(ewG(t,"Edge joining",1),n=gN(LK(e_k(e,(eBy(),toz)))),i=new fz(e.b);i.a1)for(i=new fz(e.a);i.a0),a.a.Xb(a.c=--a.b),CD(a,i),A6(a.becd(r,0)?(i=eHf-jE(edQ(QC(r),eHf)))==eHf&&(i=0):i=jE(edQ(r,eHf)),1==t?Bd(e,48+(i=eB4.Math.min((i+50)/100|0,9))&eHd):2==t?eeE(e,i=eB4.Math.min((i+5)/10|0,99),2):(eeE(e,i,3),t>3&&eeE(e,0,t-3))}function eOM(e){var t,n,r,i;return xc(e_k(e,(eBy(),taM)))===xc((eck(),tpz))?!e.e&&xc(e_k(e,tat))!==xc((eaU(),ttO)):(r=Pp(e_k(e,tan),292),i=gN(LK(e_k(e,tao)))||xc(e_k(e,tas))===xc((en7(),teR)),t=Pp(e_k(e,tae),19).a,n=e.a.c.length,!i&&r!=(eaU(),ttO)&&(0==t||t>n))}function eOO(e){var t,n;for(n=0;n0);n++);if(n>0&&n0);t++);return t>0&&n>16!=6&&t){if(eg7(e,t))throw p7(new gL(eZ4+ex2(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?eg1(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=ep0(t,e,6,r)),(r=Cc(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,6,t,t))}function eOL(e,t){var n,r;if(t!=e.Cb||e.Db>>16!=9&&t){if(eg7(e,t))throw p7(new gL(eZ4+eC5(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?eg2(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=ep0(t,e,9,r)),(r=Cl(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,9,t,t))}function eOC(e,t){var n,r;if(t!=e.Cb||e.Db>>16!=3&&t){if(eg7(e,t))throw p7(new gL(eZ4+ePY(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?evo(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=ep0(t,e,12,r)),(r=Cu(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,3,t,t))}function eOI(e){var t,n,r,i,a;if(r=evl(e),null==(a=e.j)&&r)return e.$j()?null:r.zj();if(M4(r,148)){if((n=r.Aj())&&(i=n.Nh())!=e.i){if((t=Pp(r,148)).Ej())try{e.g=i.Kh(t,a)}catch(o){if(o=eoa(o),M4(o,78))e.g=null;else throw p7(o)}e.i=i}return e.g}return null}function eOD(e){var t;return t=new p0,P_(t,new EL(new kl(e.c,e.d),new kl(e.c+e.b,e.d))),P_(t,new EL(new kl(e.c,e.d),new kl(e.c,e.d+e.a))),P_(t,new EL(new kl(e.c+e.b,e.d+e.a),new kl(e.c+e.b,e.d))),P_(t,new EL(new kl(e.c+e.b,e.d+e.a),new kl(e.c,e.d+e.a))),t}function eON(e,t,n,r){var i,a,o;if(o=eyn(t,n),r.c[r.c.length]=t,-1==e.j[o.p]||2==e.j[o.p]||e.a[t.p])return r;for(e.j[o.p]=-1,a=new Fa(OH(efs(o).a.Kc(),new c));eTk(a);)if(i=Pp(ZC(a),17),!q8(i)&&!(!q8(i)&&i.c.i.c==i.d.i.c)&&i!=t)return eON(e,i,o,r);return r}function eOP(e,t,n){var r,i,a;for(a=t.a.ec().Kc();a.Ob();)i=Pp(a.Pb(),79),(r=Pp(Bp(e.b,i),266))||(z$(e_I(i))==z$(e_P(i))?eLk(e,i,n):e_I(i)==z$(e_P(i))?null==Bp(e.c,i)&&null!=Bp(e.b,e_P(i))&&eFt(e,i,n,!1):null==Bp(e.d,i)&&null!=Bp(e.b,e_I(i))&&eFt(e,i,n,!0))}function eOR(e,t){var n,r,i,a,o,s,u;for(i=e.Kc();i.Ob();)for(r=Pp(i.Pb(),10),s=new eES,Gc(s,r),ekv(s,(eYu(),tby)),eo3(s,(eBU(),tnm),(OQ(),!0)),o=t.Kc();o.Ob();)a=Pp(o.Pb(),10),u=new eES,Gc(u,a),ekv(u,tbY),eo3(u,tnm,!0),n=new $b,eo3(n,tnm,!0),Gs(n,s),Go(n,u)}function eOj(e,t,n,r){var i,a,o,s;i=ehu(e,t,n),a=ehu(e,n,t),o=Pp(Bp(e.c,t),112),s=Pp(Bp(e.c,n),112),ir.b.g&&(a.c[a.c.length]=r);return a}function eOB(){eOB=A,tfo=new S9("CANDIDATE_POSITION_LAST_PLACED_RIGHT",0),tfa=new S9("CANDIDATE_POSITION_LAST_PLACED_BELOW",1),tfu=new S9("CANDIDATE_POSITION_WHOLE_DRAWING_RIGHT",2),tfs=new S9("CANDIDATE_POSITION_WHOLE_DRAWING_BELOW",3),tfc=new S9("WHOLE_DRAWING",4)}function eOU(e,t){if(M4(t,239))return elg(e,Pp(t,33));if(M4(t,186))return el$(e,Pp(t,118));if(M4(t,354))return Hd(e,Pp(t,137));if(M4(t,352))return eNP(e,Pp(t,79));if(t)return null;else throw p7(new gL(eXx+e_F(new g$(eow(vx(e1R,1),eUp,1,5,[t])))))}function eOH(e){var t,n,r,i,a,o,s;for(a=new _n,i=new fz(e.d.a);i.a1)for(t=Al((n=new b1,++e.b,n),e.d),s=epL(a,0);s.b!=s.d.c;)o=Pp(Vv(s),121),eAx(_f(_l(_d(_c(new bQ,1),0),t),o))}function eO$(e,t){var n,r;if(t!=e.Cb||e.Db>>16!=11&&t){if(eg7(e,t))throw p7(new gL(eZ4+eC4(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?evs(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=ep0(t,e,10,r)),(r=C4(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,11,t,t))}function eOz(e){var t,n,r,i;for(r=new esz(new fS(e.b).a);r.b;)n=etz(r),i=Pp(n.cd(),11),eo3(t=Pp(n.dd(),10),(eBU(),tnc),i),eo3(i,tng,t),eo3(i,tt6,(OQ(),!0)),ekv(i,Pp(e_k(t,tt1),61)),e_k(t,tt1),eo3(i.i,(eBy(),tol),(ewf(),tbu)),Pp(e_k(Bq(i.i),tt3),21).Fc((eLR(),ttS))}function eOG(e,t,n){var r,i,a,o,s,u;if(a=0,o=0,e.c)for(u=new fz(e.d.i.j);u.aa.a)?-1:i.a(u=null==e.d?0:e.d.length)))return!1;for(a=0,l=e.d,e.d=Je(e6C,eJA,63,2*u+4,0,1);a=0x7fffffffffffffff?(Q2(),e0L):(i=!1,e<0&&(i=!0,e=-e),r=0,e>=eHW&&(r=zy(e/eHW),e-=r*eHW),n=0,e>=eHG&&(n=zy(e/eHG),e-=n*eHG),a=Mk(t=zy(e),n,r),i&&esh(a),a)}function eO6(e,t){var n,r,i,a;for(n=!t||!e.u.Hc((ekU(),tbp)),a=0,i=new fz(e.e.Cf());i.a=-t&&r==t?new kD(ell(n-1),ell(r)):new kD(ell(n),ell(r-1))}function eAn(){return eB$(),eow(vx(e4B,1),eU4,77,0,[e85,e82,e86,e7d,e7C,e7m,e7j,e7_,e7A,e7s,e7x,e7w,e7L,e7r,e7Y,e8Z,e7k,e7D,e7h,e7I,e7U,e7M,e8X,e7O,e7H,e7P,e7B,e7p,e7e,e7b,e7f,e7F,e81,e88,e7v,e8Q,e7y,e7c,e7i,e7E,e7o,e83,e80,e7l,e7a,e7S,e7R,e8J,e7T,e7u,e7g,e7t,e87,e7N,e89,e7n,e84])}function eAr(e,t,n){e.d=0,e.b=0,t.k==(eEn(),e8P)&&n.k==e8P&&Pp(e_k(t,(eBU(),tnc)),10)==Pp(e_k(n,tnc),10)&&(QP(t).j==(eYu(),tbw)?eMH(e,t,n):eMH(e,n,t)),t.k==e8P&&n.k==e8D?QP(t).j==(eYu(),tbw)?e.d=1:e.b=1:n.k==e8P&&t.k==e8D&&(QP(n).j==(eYu(),tbw)?e.b=1:e.d=1),emu(e,t,n)}function eAi(e){var t,n,r,i,a,o,s,u,c,l,f;return f=ewW(e),(u=null!=(t=e.a))&&P4(f,"category",e.a),(o=!(i=wc(new fk(e.d))))&&(ee3(f,"knownOptions",c=new lN),n=new pS(c),qX(new fk(e.d),n)),(s=!(a=wc(e.g)))&&(ee3(f,"supportedFeatures",l=new lN),r=new pk(l),qX(e.g,r)),f}function eAa(e){var t,n,r,i,a,o,s,u,c;for(u=0,r=!1,t=336,n=0,a=new CE(e.length),c=(s=e).length;u>16!=7&&t){if(eg7(e,t))throw p7(new gL(eZ4+eE1(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?eg0(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=Pp(t,49).gh(e,1,e6p,r)),(r=j2(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,7,t,t))}function eAc(e,t){var n,r;if(t!=e.Cb||e.Db>>16!=3&&t){if(eg7(e,t))throw p7(new gL(eZ4+eln(e)));r=null,e.Cb&&(r=(n=e.Db>>16)>=0?eg4(e,r):e.Cb.ih(e,-1-n,null,r)),t&&(r=Pp(t,49).gh(e,0,e6y,r)),(r=j3(e,t,r))&&r.Fi()}else(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,3,t,t))}function eAl(e,t){var n,r,i,a,o,s,u,c,l;return(exX(),t.d>e.d&&(s=e,e=t,t=s),t.d<63)?eLm(e,t):(o=(-2&e.d)<<4,c=ZL(e,o),l=ZL(t,o),r=eNz(e,ZA(c,o)),i=eNz(t,ZA(l,o)),u=eAl(c,l),n=eAl(r,i),a=eAl(eNz(c,r),eNz(i,l)),a=eP5(eP5(a,u),n),a=ZA(a,o),u=ZA(u,o<<1),eP5(eP5(u,a),n))}function eAf(e,t,n){var r,i,a,o,s;for(o=ecZ(e,n),s=Je(e4N,eGW,10,t.length,0,1),r=0,a=o.Kc();a.Ob();)gN(LK(e_k(i=Pp(a.Pb(),11),(eBU(),tt6))))&&(s[r++]=Pp(e_k(i,tng),10));if(r=0;a+=n?1:-1)o|=t.c.Sf(u,a,n,r&&!gN(LK(e_k(t.j,(eBU(),tt2))))&&!gN(LK(e_k(t.j,(eBU(),tnS))))),o|=t.q._f(u,a,n),o|=eCA(e,u[a],n,r);return Yf(e.c,t),o}function eAm(e,t,n){var r,i,a,o,s,u,c,l,f,d;for(l=Kz(e.j),f=0,d=l.length;f1&&(e.a=!0),jU(Pp(n.b,65),C5(MB(Pp(t.b,65).c),Ol(C6(MB(Pp(n.b,65).a),Pp(t.b,65).a),i))),GC(e,t),eAy(e,n)}function eAw(e){var t,n,r,i,a,o,s;for(a=new fz(e.a.a);a.a0&&a>0?o.p=t++:r>0?o.p=n++:a>0?o.p=i++:o.p=n++}Hj(),Mv(e.j,new nG)}function eAE(e){var t,n;n=null,t=Pp(RJ(e.g,0),17);do{if(Ln(n=t.d.i,(eBU(),tna)))return Pp(e_k(n,tna),11).i;if(n.k!=(eEn(),e8N)&&eTk(new Fa(OH(efc(n).a.Kc(),new c))))t=Pp(ZC(new Fa(OH(efc(n).a.Kc(),new c))),17);else if(n.k!=e8N)return null}while(!!n&&n.k!=(eEn(),e8N))return n}function eAS(e,t){var n,r,i,a,o,s,u,c,l;for(a=1,s=t.j,o=t.g,c=em1(e,o,u=Pp(RJ(s,s.c.length-1),113),l=(GK(0,s.c.length),Pp(s.c[0],113)));ac&&(u=n,l=i,c=r);t.a=l,t.c=u}function eAk(e,t){var n,r;if(!(r=YB(e.b,t.b)))throw p7(new gC("Invalid hitboxes for scanline constraint calculation."));(eop(t.b,Pp(CF(e.b,t.b),57))||eop(t.b,Pp(Cj(e.b,t.b),57)))&&(wK(),t.b),e.a[t.b.f]=Pp(Ik(e.b,t.b),57),(n=Pp(IS(e.b,t.b),57))&&(e.a[n.f]=t.b)}function eAx(e){if(!e.a.d||!e.a.e)throw p7(new gC((LW(e23),e23.k+" must have a source and target "+(LW(e24),e24.k)+" specified.")));if(e.a.d==e.a.e)throw p7(new gC("Network simplex does not support self-loops: "+e.a+" "+e.a.d+" "+e.a.e));return Am(e.a.d.g,e.a),Am(e.a.e.b,e.a),e.a}function eAT(e,t,n){var r,i,a,o,s,u,c;for(c=new yB(new hA(e)),o=eow(vx(e4j,1),eGK,11,0,[t,n]),s=0,u=o.length;su-e.b&&su-e.a&&s0&&++h;++d}return h}function eAF(e,t){var n,r,i,a,o;for(o=Pp(e_k(t,(eTj(),tcN)),425),a=epL(t.b,0);a.b!=a.d.c;)if(i=Pp(Vv(a),86),0==e.b[i.g]){switch(o.g){case 0:eb9(e,i);break;case 1:eTX(e,i)}e.b[i.g]=2}for(r=epL(e.a,0);r.b!=r.d.c;)eds((n=Pp(Vv(r),188)).b.d,n,!0),eds(n.c.b,n,!0);eo3(t,(eR6(),tch),e.a)}function eAY(e,t){var n,r,i,a;return(_4(),t)?t==(eR7(),tvG)||(t==tvM||t==tvx||t==tvT)&&e!=tvk?new eF2(e,t):((n=(r=Pp(t,677)).pk())||(UH(QZ((eSp(),tvc),t)),n=r.pk()),a=(n.i||(n.i=new p2),n.i),(i=Pp(xu($I(a.f,e)),1942))||Um(a,e,i=new eF2(e,t)),i):tvb}function eAB(e,t){var n,r,i,a,o,s,u,c,l;for(a=0,u=Pp(e_k(e,(eBU(),tnc)),11),c=esp(eow(vx(e50,1),eUP,8,0,[u.i.n,u.n,u.a])).a,l=e.i.n.b,o=(i=n=Kp(e.e)).length;a0?a.a?n>(s=a.b.rf().a)&&(i=(n-s)/2,a.d.b=i,a.d.c=i):a.d.c=e.s+n:FY(e.u)&&((r=ew1(a.b)).c<0&&(a.d.b=-r.c),r.c+r.b>a.b.rf().a&&(a.d.c=r.c+r.b-a.b.rf().a))}function eAz(e,t){var n,r,i,a;for(ewG(t,"Semi-Interactive Crossing Minimization Processor",1),n=!1,i=new fz(e.b);i.a=0){if(t==n)return new kD(ell(-t-1),ell(-t-1));if(t==-n)return new kD(ell(-t),ell(n+1))}return eB4.Math.abs(t)>eB4.Math.abs(n)?t<0?new kD(ell(-t),ell(n)):new kD(ell(-t),ell(n+1)):new kD(ell(t+1),ell(n))}function eAK(e){var t,n;n=Pp(e_k(e,(eBy(),taY)),163),t=Pp(e_k(e,(eBU(),tt9)),303),n==(ef_(),tnN)?(eo3(e,taY,tnj),eo3(e,tt9,(Q1(),ttN))):n==tnR?(eo3(e,taY,tnj),eo3(e,tt9,(Q1(),ttI))):t==(Q1(),ttN)?(eo3(e,taY,tnN),eo3(e,tt9,ttD)):t==ttI&&(eo3(e,taY,tnR),eo3(e,tt9,ttD))}function eAV(){eAV=A,tuY=new ad,tuP=RI(new K2,(e_x(),e8n),(eB$(),e7h)),tuF=j0(RI(new K2,e8n,e7M),e8i,e7T),tuB=ehY(ehY(_G(j0(RI(new K2,e8e,e7j),e8i,e7R),e8r),e7P),e7F),tuR=j0(RI(RI(RI(new K2,e8t,e7m),e8r,e7v),e8r,e7y),e8i,e7g),tuj=j0(RI(RI(new K2,e8r,e7y),e8r,e88),e8i,e89)}function eAq(){eAq=A,tuz=RI(j0(new K2,(e_x(),e8i),(eB$(),e7t)),e8n,e7h),tuV=ehY(ehY(_G(j0(RI(new K2,e8e,e7j),e8i,e7R),e8r),e7P),e7F),tuG=j0(RI(RI(RI(new K2,e8t,e7m),e8r,e7v),e8r,e7y),e8i,e7g),tuK=RI(RI(new K2,e8n,e7M),e8i,e7T),tuW=j0(RI(RI(new K2,e8r,e7y),e8r,e88),e8i,e89)}function eAZ(e,t,n,r,i){var a,o;(q8(t)||t.c.i.c!=t.d.i.c)&&erS(esp(eow(vx(e50,1),eUP,8,0,[i.i.n,i.n,i.a])),n)||q8(t)||(t.c==i?Ls(t.a,0,new TS(n)):P7(t.a,new TS(n)),r&&!w0(e.a,n)&&((o=Pp(e_k(t,(eBy(),taR)),74))||eo3(t,taR,o=new mE),qQ(o,a=new TS(n),o.c.b,o.c),Yf(e.a,a)))}function eAX(e){var t,n;for(n=new Fa(OH(efu(e).a.Kc(),new c));eTk(n);)if((t=Pp(ZC(n),17)).c.i.k!=(eEn(),e8I))throw p7(new gq(eWr+egs(e)+"' has its layer constraint set to FIRST, but has at least one incoming edge that does not come from a FIRST_SEPARATE node. That must not happen."))}function eAJ(e,t,n){var r,i,a,o,s,u,c;if(0==(i=efp(254&e.Db)))e.Eb=n;else{if(1==i)s=Je(e1R,eUp,1,2,5,1),0==(a=emF(e,t))?(s[0]=n,s[1]=e.Eb):(s[0]=e.Eb,s[1]=n);else for(r=2,s=Je(e1R,eUp,1,i+1,5,1),o=etG(e.Eb),u=0,c=0;r<=128;r<<=1)r==t?s[c++]=n:(e.Db&r)!=0&&(s[c++]=o[u++]);e.Eb=s}e.Db|=t}function eAQ(e,t,n){var r,i,a,o;for(this.b=new p0,i=0,r=0,o=new fz(e);o.a0&&(i+=(a=Pp(RJ(this.b,0),167)).o,r+=a.p),i*=2,r*=2,t>1?i=zy(eB4.Math.ceil(i*t)):r=zy(eB4.Math.ceil(r/t)),this.a=new edL(i,r)}function eA1(e,t,n,r,i,a){var o,s,u,c,l,f,d,h,p,b,m,g;for(l=r,t.j&&t.o?(b=(h=Pp(Bp(e.f,t.A),57)).d.c+h.d.b,--l):b=t.a.c+t.a.b,f=i,n.q&&n.o?(c=(h=Pp(Bp(e.f,n.C),57)).d.c,++f):c=n.a.c,m=c-b,p=b+(s=m/(u=eB4.Math.max(2,f-l))),d=l;d=0;o+=i?1:-1){for(s=t[o],u=r==(eYu(),tby)?i?efr(s,r):eaa(efr(s,r)):i?eaa(efr(s,r)):efr(s,r),a&&(e.c[s.p]=u.gc()),f=u.Kc();f.Ob();)l=Pp(f.Pb(),11),e.d[l.p]=c++;eoc(n,u)}}function eA2(e,t,n){var r,i,a,o,s,u,c,l;for(a=gP(LV(e.b.Kc().Pb())),c=gP(LV(eaX(t.b))),l=C5(r=Ol(MB(e.a),c-n),i=Ol(MB(t.a),n-a)),Ol(l,1/(c-a)),this.a=l,this.b=new p0,s=!0,(o=e.b.Kc()).Pb();o.Ob();)u=gP(LV(o.Pb())),s&&u-n>eVW&&(this.b.Fc(n),s=!1),this.b.Fc(u);s&&this.b.Fc(n)}function eA3(e){var t,n,r,i;if(eIh(e,e.n),e.d.c.length>0){for(gG(e.c);eTT(e,Pp(Wx(new fz(e.e.a)),121))>5,t&=31,r>=e.d)return e.e<0?(eLQ(),e03):(eLQ(),e08);if(i=Je(ty_,eHT,25,(a=e.d-r)+1,15,1),eEG(i,a,e.a,r,t),e.e<0){for(n=0;n0&&e.a[n]<<32-t!=0){for(n=0;n=0)&&(!(n=eR3((eSp(),tvc),i,t))||((r=n.Zj())>1||-1==r)&&3!=Ur(QZ(tvc,n))))}function eLn(e,t,n,r){var i,a,o,s,u;return(s=ewH(Pp(etj((t.b||(t.b=new Ih(e6m,t,4,7)),t.b),0),82)),u=ewH(Pp(etj((t.c||(t.c=new Ih(e6m,t,5,8)),t.c),0),82)),z$(s)==z$(u)||etg(u,s))?null:(o=zF(t))==n?r:(a=Pp(Bp(e.a,o),10))&&(i=a.e)?i:null}function eLr(e,t){var n;switch(n=Pp(e_k(e,(eBy(),tam)),276),ewG(t,"Label side selection ("+n+")",1),n.g){case 0:eTD(e,(egF(),tpV));break;case 1:eTD(e,(egF(),tpq));break;case 2:eNW(e,(egF(),tpV));break;case 3:eNW(e,(egF(),tpq));break;case 4:eLL(e,(egF(),tpV));break;case 5:eLL(e,(egF(),tpq))}eEj(t)}function eLi(e,t,n){var r,i,a,o,s,u;if((o=e[r=vK(n,e.length)])[0].k==(eEn(),e8C))for(i=0,a=vW(n,o.length),u=t.j;i0&&(n[0]+=e.d,o-=n[0]),n[2]>0&&(n[2]+=e.d,o-=n[2]),a=eB4.Math.max(0,o),n[1]=eB4.Math.max(n[1],o),ZR(e,e3N,i.c+r.b+n[0]-(n[1]-o)/2,n),t==e3N&&(e.c.b=a,e.c.c=i.c+r.b+(a-o)/2)}function eLy(){this.c=Je(tyx,eH5,25,(eYu(),eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY])).length,15,1),this.b=Je(tyx,eH5,25,eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY]).length,15,1),this.a=Je(tyx,eH5,25,eow(vx(e6a,1),eGj,61,0,[tbF,tbw,tby,tbj,tbY]).length,15,1),Ep(this.c,eHQ),Ep(this.b,eH1),Ep(this.a,eH1)}function eLw(e,t,n){var r,i,a,o;if(t<=n?(i=t,a=n):(i=n,a=t),r=0,null==e.b)e.b=Je(ty_,eHT,25,2,15,1),e.b[0]=i,e.b[1]=a,e.c=!0;else{if(r=e.b.length,e.b[r-1]+1==i){e.b[r-1]=a;return}o=Je(ty_,eHT,25,r+2,15,1),ePD(e.b,0,o,0,r),e.b=o,e.b[r-1]>=i&&(e.c=!1,e.a=!1),e.b[r++]=i,e.b[r]=a,e.c||eMS(e)}}function eL_(e,t,n){var r,i,a,o,s,u,c;for(c=t.d,e.a=new XM(c.c.length),e.c=new p2,s=new fz(c);s.a=0?e._g(c,!1,!0):exk(e,n,!1),58);n:for(a=f.Kc();a.Ob();){for(l=0,i=Pp(a.Pb(),56);l1;)eLN(i,i.i-1);return r}function eLA(e,t){var n,r,i,a,o,s,u;for(ewG(t,"Comment post-processing",1),a=new fz(e.b);a.ae.d[o.p]&&(n+=qq(e.b,a),Vw(e.a,ell(a)));for(;!gY(e.a);)eek(e.b,Pp(Yn(e.a),19).a)}return n}function eLD(e,t,n){var r,i,a,o;for(a=(t.a||(t.a=new FQ(e6k,t,10,11)),t.a).i,i=new Ow((t.a||(t.a=new FQ(e6k,t,10,11)),t.a));i.e!=i.i.gc();)0==((r=Pp(epH(i),33)).a||(r.a=new FQ(e6k,r,10,11)),r.a).i||(a+=eLD(e,r,!1));if(n)for(o=z$(t);o;)a+=(o.a||(o.a=new FQ(e6k,o,10,11)),o.a).i,o=z$(o);return a}function eLN(e,t){var n,r,i,a;return e.ej()?(r=null,i=e.fj(),e.ij()&&(r=e.kj(e.pi(t),null)),n=e.Zi(4,a=egk(e,t),null,t,i),e.bj()&&null!=a?(r=e.dj(a,r))?(r.Ei(n),r.Fi()):e.$i(n):r?(r.Ei(n),r.Fi()):e.$i(n),a):(a=egk(e,t),e.bj()&&null!=a&&(r=e.dj(a,null))&&r.Fi(),a)}function eLP(e){var t,n,r,i,a,o,s,u,c,l;for(c=e.a,t=new bV,u=0,r=new fz(e.d);r.as.d&&(l=s.d+s.a+c));n.c.d=l,t.a.zc(n,t),u=eB4.Math.max(u,n.c.d+n.c.a)}return u}function eLR(){eLR=A,ttv=new Sv("COMMENTS",0),ttw=new Sv("EXTERNAL_PORTS",1),tt_=new Sv("HYPEREDGES",2),ttE=new Sv("HYPERNODES",3),ttS=new Sv("NON_FREE_PORTS",4),ttk=new Sv("NORTH_SOUTH_PORTS",5),ttT=new Sv(eWw,6),ttg=new Sv("CENTER_LABELS",7),tty=new Sv("END_LABELS",8),ttx=new Sv("PARTITIONS",9)}function eLj(e){var t,n,r,i,a;for(i=new p0,t=new Rq((e.a||(e.a=new FQ(e6k,e,10,11)),e.a)),r=new Fa(OH(eOi(e).a.Kc(),new c));eTk(r);)n=Pp(ZC(r),79),!M4(etj((n.b||(n.b=new Ih(e6m,n,4,7)),n.b),0),186)&&(a=ewH(Pp(etj((n.c||(n.c=new Ih(e6m,n,5,8)),n.c),0),82)),t.a._b(a)||(i.c[i.c.length]=a));return i}function eLF(e){var t,n,r,i,a,o;for(a=new bV,t=new Rq((e.a||(e.a=new FQ(e6k,e,10,11)),e.a)),i=new Fa(OH(eOi(e).a.Kc(),new c));eTk(i);)r=Pp(ZC(i),79),!M4(etj((r.b||(r.b=new Ih(e6m,r,4,7)),r.b),0),186)&&(o=ewH(Pp(etj((r.c||(r.c=new Ih(e6m,r,5,8)),r.c),0),82)),t.a._b(o)||(n=a.a.zc(o,a)));return a}function eLY(e,t,n,r,i){return r<0?((r=ew6(e,i,eow(vx(e17,1),eUP,2,6,[eHh,eHp,eHb,eHm,eHg,eHv,eHy,eHw,eH_,eHE,eHS,eHk]),t))<0&&(r=ew6(e,i,eow(vx(e17,1),eUP,2,6,["Jan","Feb","Mar","Apr",eHg,"Jun","Jul","Aug","Sep","Oct","Nov","Dec"]),t)),!(r<0)&&(n.k=r,!0)):r>0&&(n.k=r-1,!0)}function eLB(e,t,n,r,i){return r<0?((r=ew6(e,i,eow(vx(e17,1),eUP,2,6,[eHh,eHp,eHb,eHm,eHg,eHv,eHy,eHw,eH_,eHE,eHS,eHk]),t))<0&&(r=ew6(e,i,eow(vx(e17,1),eUP,2,6,["Jan","Feb","Mar","Apr",eHg,"Jun","Jul","Aug","Sep","Oct","Nov","Dec"]),t)),!(r<0)&&(n.k=r,!0)):r>0&&(n.k=r-1,!0)}function eLU(e,t,n,r,i,a){var o,s,u,c;if(s=32,r<0){if(t[0]>=e.length||43!=(s=UI(e,t[0]))&&45!=s||(++t[0],(r=exf(e,t))<0))return!1;45==s&&(r=-r)}return 32==s&&t[0]-n==2&&2==i.b&&(o=(c=(u=new wW).q.getFullYear()-eHx+eHx-80)%100,a.a=r==o,r+=(c/100|0)*100+(r=c&&(u=r);u&&(l=eB4.Math.max(l,u.a.o.a)),l>d&&(f=c,d=l)}return f}function eLV(e,t,n){var r,i,a;if(e.e=n,e.d=0,e.b=0,e.f=1,e.i=t,(16&e.e)==16&&(e.i=eIw(e.i)),e.j=e.i.length,eBM(e),a=ehT(e),e.d!=e.j)throw p7(new gX(eBJ((Mo(),eXV))));if(e.g){for(r=0;reqg?Mv(u,e.b):r<=eqg&&r>eqv?Mv(u,e.d):r<=eqv&&r>eqy?Mv(u,e.c):r<=eqy&&Mv(u,e.a),a=eLJ(e,u,a);return i}function eLQ(){var e;for(e=0,eLQ=A,e04=new XE(1,1),e06=new XE(1,10),e08=new XE(0,0),e03=new XE(-1,1),e05=eow(vx(e0t,1),eUP,91,0,[e08,e04,new XE(1,2),new XE(1,3),new XE(1,4),new XE(1,5),new XE(1,6),new XE(1,7),new XE(1,8),new XE(1,9),e06]),e09=Je(e0t,eUP,91,32,0,1);e1)&&(r=new kl(i,n.b),P7(t.a,r)),enD(t.a,eow(vx(e50,1),eUP,8,0,[d,f]))}function eL6(e){_Y(e,new ewB(vQ(vq(vJ(vX(new oc,eZA),"ELK Randomizer"),'Distributes the nodes randomly on the plane, leading to very obfuscating layouts. Can be useful to demonstrate the power of "real" layout algorithms.'),new oz))),KE(e,eZA,ezW,tb$),KE(e,eZA,eGi,15),KE(e,eZA,eGo,ell(0)),KE(e,eZA,ezG,eGt)}function eL9(){var e,t,n,r,i,a;for(t=0,eL9=A,tv1=Je(tyk,eZ8,25,255,15,1),tv0=Je(tyw,eHl,25,16,15,1);t<255;t++)tv1[t]=-1;for(n=57;n>=48;n--)tv1[n]=n-48<<24>>24;for(r=70;r>=65;r--)tv1[r]=r-65+10<<24>>24;for(i=102;i>=97;i--)tv1[i]=i-97+10<<24>>24;for(a=0;a<10;a++)tv0[a]=48+a&eHd;for(e=10;e<=15;e++)tv0[e]=65+e-10&eHd}function eL8(e,t,n){var r,i,a,o,s,u,c,l;return s=t.i-e.g/2,u=n.i-e.g/2,c=t.j-e.g/2,l=n.j-e.g/2,a=t.g+e.g/2,o=n.g+e.g/2,r=t.f+e.g/2,i=n.f+e.g/2,!!(s>19!=0)return"-"+eCr(eoQ(e));for(n=e,r="";!(0==n.l&&0==n.m&&0==n.h);){if(n=eRV(n,i=Zx(eHK),!0),t=""+yq(e0A),!(0==n.l&&0==n.m&&0==n.h))for(a=9-t.length;a>0;a--)t="0"+t;r=t+r}return r}function eCi(){if(!Object.create||!Object.getOwnPropertyNames)return!1;var e="__proto__",t=Object.create(null);return void 0===t[e]&&0==Object.getOwnPropertyNames(t).length&&(t[e]=42,42===t[e]&&0!=Object.getOwnPropertyNames(t).length)}function eCa(e){var t,n,r,i,a,o,s;for(t=!1,n=0,i=new fz(e.d.b);i.a=e.a||!ewg(t,n))return -1;if(Vb(Pp(r.Kb(t),20)))return 1;for(i=0,o=Pp(r.Kb(t),20).Kc();o.Ob();)if(-1==(s=eCu(e,u=(a=Pp(o.Pb(),17)).c.i==t?a.d.i:a.c.i,n,r))||(i=eB4.Math.max(i,s))>e.c-1)return -1;return i+1}function eCc(e,t){var n,r,i,a,o,s;if(xc(t)===xc(e))return!0;if(!M4(t,15)||(r=Pp(t,15),s=e.gc(),r.gc()!=s))return!1;if(o=r.Kc(),e.ni()){for(n=0;n0){if(e.qj(),null!=t){for(a=0;a>24;case 97:case 98:case 99:case 100:case 101:case 102:return e-97+10<<24>>24;case 65:case 66:case 67:case 68:case 69:case 70:return e-65+10<<24>>24;default:throw p7(new vo("Invalid hexadecimal"))}}function eCh(e,t,n){var r,i,a,o;for(ewG(n,"Processor order nodes",2),e.a=gP(LV(e_k(t,(eTj(),tcR)))),i=new _n,o=epL(t.b,0);o.b!=o.d.c;)gN(LK(e_k(a=Pp(Vv(o),86),(eR6(),tcm))))&&qQ(i,a,i.c.b,i.c);eRt(e,r=(A6(0!=i.b),Pp(i.a.a.c,86))),n.b||erd(n,1),eC1(e,r,0-gP(LV(e_k(r,(eR6(),tcu))))/2,0),n.b||erd(n,1),eEj(n)}function eCp(){eCp=A,e3C=new Ej("SPIRAL",0),e3T=new Ej("LINE_BY_LINE",1),e3M=new Ej("MANHATTAN",2),e3x=new Ej("JITTER",3),e3A=new Ej("QUADRANTS_LINE_BY_LINE",4),e3L=new Ej("QUADRANTS_MANHATTAN",5),e3O=new Ej("QUADRANTS_JITTER",6),e3k=new Ej("COMBINE_LINE_BY_LINE_MANHATTAN",7),e3S=new Ej("COMBINE_JITTER_MANHATTAN",8)}function eCb(e,t,n,r){var i,a,o,s,u,c;for(u=eya(e,n),c=eya(t,n),i=!1;u&&c;)if(r||egl(u,c,n))o=eya(u,n),s=eya(c,n),QB(t),QB(e),a=u.c,ejf(u,!1),ejf(c,!1),n?(egU(t,c.p,a),t.p=c.p,egU(e,u.p+1,a),e.p=u.p):(egU(e,u.p,a),e.p=u.p,egU(t,c.p+1,a),t.p=c.p),Gu(u,null),Gu(c,null),u=o,c=s,i=!0;else break;return i}function eCm(e,t,n,r){var i,a,o,s,u;for(i=!1,a=!1,s=new fz(r.j);s.a=t.length)throw p7(new gE("Greedy SwitchDecider: Free layer not in graph."));this.c=t[e],this.e=new IQ(r),er$(this.e,this.c,(eYu(),tbY)),this.i=new IQ(r),er$(this.i,this.c,tby),this.f=new jy(this.c),this.a=!a&&i.i&&!i.s&&this.c[0].k==(eEn(),e8C),this.a&&eSt(this,e,t.length)}function eC_(e,t){var n,r,i,a,o,s;a=!e.B.Hc((eI3(),tbX)),o=e.B.Hc(tb1),e.a=new edA(o,a,e.c),e.n&&HI(e.a.n,e.n),gh(e.g,(etx(),e3N),e.a),t||((r=new eh6(1,a,e.c)).n.a=e.k,jT(e.p,(eYu(),tbw),r),(i=new eh6(1,a,e.c)).n.d=e.k,jT(e.p,tbj,i),(s=new eh6(0,a,e.c)).n.c=e.k,jT(e.p,tbY,s),(n=new eh6(0,a,e.c)).n.b=e.k,jT(e.p,tby,n))}function eCE(e){var t,n,r;switch((t=Pp(e_k(e.d,(eBy(),tag)),218)).g){case 2:n=eBn(e);break;case 3:n=(r=new p0,_r(UJ(UQ(eeh(eeh(new R1(null,new Gq(e.d.b,16)),new rJ),new rQ),new r1),new rY),new ha(r)),r);break;default:throw p7(new gC("Compaction not supported for "+t+" edges."))}eRD(e,n),qX(new fk(e.g),new hr(e))}function eCS(e,t){var n;return(n=new eX,t&&eaW(n,Pp(Bp(e.a,e6p),94)),M4(t,470)&&eaW(n,Pp(Bp(e.a,e6b),94)),M4(t,354))?(eaW(n,Pp(Bp(e.a,e6S),94)),n):(M4(t,82)&&eaW(n,Pp(Bp(e.a,e6m),94)),M4(t,239))?(eaW(n,Pp(Bp(e.a,e6k),94)),n):M4(t,186)?(eaW(n,Pp(Bp(e.a,e6x),94)),n):(M4(t,352)&&eaW(n,Pp(Bp(e.a,e6g),94)),n)}function eCk(){eCk=A,e9M=new T2((eBB(),th4),ell(1)),e9D=new T2(tpl,80),e9I=new T2(tpr,5),e9p=new T2(td2,eGt),e9O=new T2(th5,ell(1)),e9C=new T2(th8,(OQ(),!0)),e9k=new T3(50),e9S=new T2(thN,e9k),e9m=thb,e9x=thV,e9b=new T2(thn,!1),e9E=thD,e9_=thL,e9w=thx,e9y=thS,e9T=thJ,e9v=(eEg(),e9i),e9N=e9c,e9g=e9r,e9A=e9o,e9L=e9u}function eCx(e){var t,n,r,i,a,o,s,u;for(u=new Zr,s=new fz(e.a);s.a0&&t=0)return!1;if(t.p=n.b,P_(n.e,t),i==(eEn(),e8D)||i==e8P){for(o=new fz(t.j);o.a1||-1==o)&&(a|=16),(i.Bb&eZ1)!=0&&(a|=64)),(n.Bb&eH3)!=0&&(a|=eJq),a|=eXt):M4(t,457)?a|=512:(r=t.Bj())&&(1&r.i)!=0&&(a|=256),(512&e.Bb)!=0&&(a|=128),a}function eCG(e,t){var n,r,i,a,o;for(i=0,e=null==e?eUg:(BJ(e),e);ie.d[s.p]&&(n+=qq(e.b,a),Vw(e.a,ell(a))):++o;for(n+=e.b.d*o;!gY(e.a);)eek(e.b,Pp(Yn(e.a),19).a)}return n}function eCV(e,t){var n;return e.f==tvm?(n=Ur(QZ((eSp(),tvc),t)),e.e?4==n&&t!=(ex$(),tvw)&&t!=(ex$(),tvg)&&t!=(ex$(),tvv)&&t!=(ex$(),tvy):2==n):!!(e.d&&(e.d.Hc(t)||e.d.Hc(Wk(QZ((eSp(),tvc),t)))||e.d.Hc(eR3((eSp(),tvc),e.b,t))))||!!(e.f&&eOq((eSp(),e.f),U$(QZ(tvc,t))))&&(n=Ur(QZ(tvc,t)),e.e?4==n:2==n)}function eCq(e,t,n,r){var i,a,o,s,u,c,l,f;return u=(o=Pp(eT8(n,(eBB(),th3)),8)).a,l=o.b+e,(i=eB4.Math.atan2(l,u))<0&&(i+=eV7),(i+=t)>eV7&&(i-=eV7),c=(s=Pp(eT8(r,th3),8)).a,f=s.b+e,(a=eB4.Math.atan2(f,c))<0&&(a+=eV7),(a+=t)>eV7&&(a-=eV7),Mc(),enj(1e-10),1e-10>=eB4.Math.abs(i-a)||i==a||isNaN(i)&&isNaN(a)?0:ia?1:Te(isNaN(i),isNaN(a))}function eCZ(e){var t,n,r,i,a,o,s;for(s=new p2,r=new fz(e.a.b);r.a=e.o)throw p7(new bj);s=t>>5,o=31&t,a=Fg(1,jE(Fg(o,1))),i?e.n[n][s]=WO(e.n[n][s],a):e.n[n][s]=WM(e.n[n][s],PN(a)),a=Fg(a,1),r?e.n[n][s]=WO(e.n[n][s],a):e.n[n][s]=WM(e.n[n][s],PN(a))}catch(u){if(u=eoa(u),M4(u,320))throw p7(new gE(ez_+e.o+"*"+e.p+ezE+t+eUd+n+ezS));throw p7(u)}}function eC1(e,t,n,r){var i,a,o;t&&(a=gP(LV(e_k(t,(eR6(),tcd))))+r,o=n+gP(LV(e_k(t,tcu)))/2,eo3(t,tcg,ell(jE(eap(eB4.Math.round(a))))),eo3(t,tcv,ell(jE(eap(eB4.Math.round(o))))),0==t.d.b||eC1(e,Pp(M2((i=epL(new hz(t).a.d,0),new hG(i))),86),n+gP(LV(e_k(t,tcu)))+e.a,r+gP(LV(e_k(t,tcc)))),null!=e_k(t,tcb)&&eC1(e,Pp(e_k(t,tcb),86),n,r))}function eC0(e,t){var n,r,i,a,o,s,u,c,l,f,d;for(i=2*gP(LV(e_k(u=Bq(t.a),(eBy(),toI)))),l=gP(LV(e_k(u,toY))),c=eB4.Math.max(i,l),a=Je(tyx,eH5,25,t.f-t.c+1,15,1),r=-c,n=0,s=t.b.Kc();s.Ob();)o=Pp(s.Pb(),10),r+=e.a[o.c.p]+c,a[n++]=r;for(r+=e.a[t.a.c.p]+c,a[n++]=r,d=new fz(t.e);d.a0&&(r=(e.n||(e.n=new FQ(e6S,e,1,7)),Pp(etj(e.n,0),137)).a)&&xM(xM((t.a+=' "',t),r),'"')),xM(yW(xM(yW(xM(yW(xM(yW((t.a+=" (",t),e.i),","),e.j)," | "),e.g),","),e.f),")"),t.a)}function eC5(e){var t,n,r;return(64&e.Db)!=0?eEp(e):(t=new O0(eZG),(n=e.k)?xM(xM((t.a+=' "',t),n),'"'):(e.n||(e.n=new FQ(e6S,e,1,7)),e.n.i>0&&(r=(e.n||(e.n=new FQ(e6S,e,1,7)),Pp(etj(e.n,0),137)).a)&&xM(xM((t.a+=' "',t),r),'"')),xM(yW(xM(yW(xM(yW(xM(yW((t.a+=" (",t),e.i),","),e.j)," | "),e.g),","),e.f),")"),t.a)}function eC6(e,t){var n,r,i,a,o,s,u;if(null==t||0==t.length)return null;if(!(i=Pp(zg(e.a,t),149))){for(r=(s=new fT(e.b).a.vc().Kc(),new fN(s));r.a.Ob();)if(o=(n=(a=Pp(r.a.Pb(),42),Pp(a.dd(),149))).c,u=t.length,IE(o.substr(o.length-u,u),t)&&(t.length==o.length||46==UI(o,o.length-t.length-1))){if(i)return null;i=n}i&&Ge(e.a,t,i)}return i}function eC9(e,t){var n,r,i,a;return(n=new eD,i=(r=Pp(qE(UQ(new R1(null,new Gq(e.f,16)),n),Qz(new q,new Z,new er,new ei,eow(vx(e2L,1),eU4,132,0,[(eum(),e2H),e2U]))),21)).gc(),a=(r=Pp(qE(UQ(new R1(null,new Gq(t.f,16)),n),Qz(new q,new Z,new er,new ei,eow(vx(e2L,1),eU4,132,0,[e2H,e2U]))),21)).gc(),ii.p?(ekv(a,tbj),a.d&&(s=a.o.b,t=a.a.b,a.a.b=s-t)):a.j==tbj&&i.p>e.p&&(ekv(a,tbw),a.d&&(s=a.o.b,t=a.a.b,a.a.b=-(s-t)));break}return i}function eIe(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p;if(a=n,n1)&&(r=new kl(i,n.b),P7(t.a,r)),enD(t.a,eow(vx(e50,1),eUP,8,0,[d,f]))}function eIy(e,t,n){var r,i,a,o,s,u;if(!t)return null;if(!(n<=-1))return ebY(Pp(ee2(e.Tg(),n),18));if(r=ee2(t.Tg(),-1-n),M4(r,99))return Pp(r,18);for(s=0,u=(o=Pp(t.ah(r),153)).gc();s0){for(i=u.length;i>0&&""==u[i-1];)--i;i=t.d.a.gc()){o=t.a.c,s=t.a.c+t.a.b,u=new kl(o+(s-o)/2,t.b),P7(Pp(t.d.a.ec().Kc().Pb(),17).a,u);continue}if((i=Pp(Bp(t.c,n),459)).b||i.c){eIv(e,n,t);continue}(a=e.d==(euy(),tsW)&&(i.d||i.e)&&exJ(e,t)&&1>=t.d.a.gc())?eFd(n,t):eL5(e,n,t)}t.k&&qX(t.d,new nn)}}function eIq(e,t,n,r,i,a){var o,s,u,c,l,f,d,h,p,b,m,g,v,y;for(s=(r+i)/2+(d=a),m=n*eB4.Math.cos(s),g=n*eB4.Math.sin(s),v=m-t.g/2,y=g-t.f/2,eno(t,v),ens(t,y),f=e.a.jg(t),(b=2*eB4.Math.acos(n/n+e.c))=40)&&eNo(e),eRi(e),eA3(e),n=elM(e),r=0;n&&r0&&P7(e.f,a)):(e.c[o]-=c+1,e.c[o]<=0&&e.a[o]>0&&P7(e.e,a))))}function eI1(e){var t,n,r,i,a,o,s,u,c;for(s=new yB(Pp(Y9(new eP),62)),c=eH1,n=new fz(e.d);n.a=0&&un?t:n;c<=f;++c)c==n?s=r++:(a=i[c],l=p.rl(a.ak()),c==t&&(u=c!=f||l?r:r-1),l&&++r);return d=Pp(elR(e,t,n),72),s!=u&&bz(e,new JU(e.e,7,o,ell(s),h.dd(),u)),d}return Pp(elR(e,t,n),72)}function eDe(e,t){var n,r,i,a,o,s,u;for(ewG(t,"Port order processing",1),u=Pp(e_k(e,(eBy(),tom)),421),r=new fz(e.b);r.a=0&&(!(s=egy(e,o))||(c<22?u.l|=1<>>1,o.m=l>>>1|(1&f)<<21,o.l=d>>>1|(1&l)<<21,--c;return n&&esh(u),a&&(r?(e0A=eoQ(e),i&&(e0A=eor(e0A,(Q2(),e0I)))):e0A=Mk(e.l,e.m,e.h)),u}function eDi(e,t){var n,r,i,a,o,s,u,c,l,f;for(c=e.e[t.c.p][t.p]+1,u=t.c.a.c.length+1,s=new fz(e.a);s.a0&&(GV(0,e.length),45==e.charCodeAt(0)||(GV(0,e.length),43==e.charCodeAt(0)))?1:0;rn)throw p7(new vo(eHJ+e+'"'));return s}function eDo(e){var t,n,r,i,a,o,s;for(o=new _n,a=new fz(e.a);a.a1)&&1==t&&Pp(e.a[e.b],10).k==(eEn(),e8I)?eD3(Pp(e.a[e.b],10),(egF(),tpV)):r&&(!n||(e.c-e.b&e.a.length-1)>1)&&1==t&&Pp(e.a[e.c-1&e.a.length-1],10).k==(eEn(),e8I)?eD3(Pp(e.a[e.c-1&e.a.length-1],10),(egF(),tpq)):(e.c-e.b&e.a.length-1)==2?(eD3(Pp(eso(e),10),(egF(),tpV)),eD3(Pp(eso(e),10),tpq)):eM8(e,i),qr(e)}function eDf(e,t,n){var r,i,a,o,s;for(a=0,i=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));i.e!=i.i.gc();)r=Pp(epH(i),33),o="",0==(r.n||(r.n=new FQ(e6S,r,1,7)),r.n).i||(o=Pp(etj((r.n||(r.n=new FQ(e6S,r,1,7)),r.n),0),137).a),eaW(s=new esH(a++,t,o),r),eo3(s,(eR6(),tcl),r),s.e.b=r.j+r.f/2,s.f.a=eB4.Math.max(r.g,1),s.e.a=r.i+r.g/2,s.f.b=eB4.Math.max(r.f,1),P7(t.b,s),eS9(n.f,r,s)}function eDd(e){var t,n,r,i,a;r=Pp(e_k(e,(eBU(),tnc)),33),a=Pp(eT8(r,(eBy(),ta4)),174).Hc((ed6(),tbq)),!e.e&&(i=Pp(e_k(e,tt3),21),t=new kl(e.f.a+e.d.b+e.d.c,e.f.b+e.d.d+e.d.a),i.Hc((eLR(),ttw))?(ebu(r,tol,(ewf(),tbo)),eYx(r,t.a,t.b,!1,!0)):gN(LK(eT8(r,ta5)))||eYx(r,t.a,t.b,!0,!0)),a?ebu(r,ta4,el9(tbq)):ebu(r,ta4,(n=Pp(yw(e6o),9),new I1(n,Pp(CY(n,n.length),9),0)))}function eDh(e,t,n){var r,i,a,o;if(t[0]>=e.length)return n.o=0,!0;switch(UI(e,t[0])){case 43:i=1;break;case 45:i=-1;break;default:return n.o=0,!0}if(++t[0],a=t[0],0==(o=exf(e,t))&&t[0]==a)return!1;if(t[0]=0&&s!=n&&(a=new FX(e,1,s,o,null),r?r.Ei(a):r=a),n>=0&&(a=new FX(e,1,n,s==n?o:null,t),r?r.Ei(a):r=a)),r}function eDv(e){var t,n,r;if(null==e.b){if(r=new vs,null!=e.i&&(xk(r,e.i),r.a+=":"),(256&e.f)!=0){for((256&e.f)!=0&&null!=e.a&&(Hb(e.i)||(r.a+="//"),xk(r,e.a)),null!=e.d&&(r.a+="/",xk(r,e.d)),(16&e.f)!=0&&(r.a+="/"),t=0,n=e.j.length;td)&&(f=(u=ePI(r,d,!1)).a,l+s+f<=t.b&&(JR(n,a-n.s),n.c=!0,JR(r,a-n.s),ebP(r,n.s,n.t+n.d+s),r.k=!0,eiV(n.q,r),h=!0,i&&(enN(t,r),r.j=t,e.c.length>o&&(eva((GK(o,e.c.length),Pp(e.c[o],200)),r),0==(GK(o,e.c.length),Pp(e.c[o],200)).a.c.length&&ZV(e,o)))),h)}function eDx(e,t){var n,r,i,a,o,s;if(ewG(t,"Partition midprocessing",1),i=new zu,_r(UJ(new R1(null,new Gq(e.a,16)),new nK),new dQ(i)),0!=i.d){for(r=(s=Pp(qE(GU((a=i.i,new R1(null,(a||(i.i=new OC(i,i.c))).Nc()))),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[(eum(),e2U)]))),15)).Kc(),n=Pp(r.Pb(),19);r.Ob();)o=Pp(r.Pb(),19),eOR(Pp(Zq(i,n),21),Pp(Zq(i,o),21)),n=o;eEj(t)}}function eDT(e,t,n){var r,i,a,o,s,u,c,l;if(0==t.p){for(t.p=1,(o=n)||(i=new p0,a=(r=Pp(yw(e6a),9),new I1(r,Pp(CY(r,r.length),9),0)),o=new kD(i,a)),Pp(o.a,15).Fc(t),t.k==(eEn(),e8C)&&Pp(o.b,21).Fc(Pp(e_k(t,(eBU(),tt1)),61)),u=new fz(t.j);u.a0){if(i=Pp(e.Ab.g,1934),null==t){for(a=0;a1)for(r=new fz(i);r.an.s&&ss&&(s=i,f.c=Je(e1R,eUp,1,0,5,1)),i==s&&P_(f,new kD(n.c.i,n)));Hj(),Mv(f,e.c),jO(e.b,u.p,f)}}function eDR(e,t){var n,r,i,a,o,s,u,l,f;for(o=new fz(t.b);o.as&&(s=i,f.c=Je(e1R,eUp,1,0,5,1)),i==s&&P_(f,new kD(n.d.i,n)));Hj(),Mv(f,e.c),jO(e.f,u.p,f)}}function eDj(e){_Y(e,new ewB(vQ(vq(vJ(vX(new oc,eZn),"ELK Box"),"Algorithm for packing of unconnected boxes, i.e. graphs without edges."),new oA))),KE(e,eZn,ezW,td$),KE(e,eZn,eGi,15),KE(e,eZn,eGr,ell(0)),KE(e,eZn,eqC,epB(tdj)),KE(e,eZn,eGh,epB(tdY)),KE(e,eZn,eGd,epB(tdU)),KE(e,eZn,ezG,eZt),KE(e,eZn,eGu,epB(tdF)),KE(e,eZn,eGM,epB(tdB)),KE(e,eZn,eZr,epB(tdP)),KE(e,eZn,eVg,epB(tdR))}function eDF(e,t){var n,r,i,a,o,s,u,c,l;if(o=(i=e.i).o.a,a=i.o.b,o<=0&&a<=0)return eYu(),tbF;switch(c=e.n.a,l=e.n.b,s=e.o.a,n=e.o.b,t.g){case 2:case 1:if(c<0)return eYu(),tbY;if(c+s>o)return eYu(),tby;break;case 4:case 3:if(l<0)return eYu(),tbw;if(l+n>a)return eYu(),tbj}return(u=(c+s/2)/o)+(r=(l+n/2)/a)<=1&&u-r<=0?(eYu(),tbY):u+r>=1&&u-r>=0?(eYu(),tby):r<.5?(eYu(),tbw):(eYu(),tbj)}function eDY(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b;for(n=!1,l=gP(LV(e_k(t,(eBy(),toF)))),p=eHe*l,i=new fz(t.b);i.a(u=s.n.b-s.d.d+d.a)+p&&(b=f.g+d.g,d.a=(d.g*d.a+f.g*f.a)/b,d.g=b,f.f=d,n=!0)),a=s,f=d;return n}function eDB(e,t,n,r,i,a,o){var s,u,c,l,f,d;for(d=new TE,c=t.Kc();c.Ob();)for(s=Pp(c.Pb(),839),f=new fz(s.wf());f.a0?s.a?i>(c=s.b.rf().b)&&(e.v||1==s.c.d.c.length?(o=(i-c)/2,s.d.d=o,s.d.a=o):(r=((n=Pp(RJ(s.c.d,0),181).rf().b)-c)/2,s.d.d=eB4.Math.max(0,r),s.d.a=i-r-c)):s.d.a=e.t+i:FY(e.u)&&((a=ew1(s.b)).d<0&&(s.d.d=-a.d),a.d+a.a>s.b.rf().b&&(s.d.a=a.d+a.a-s.b.rf().b))}function eD$(e,t){var n;switch(eeg(e)){case 6:return xd(t);case 7:return xf(t);case 8:return xl(t);case 3:return Array.isArray(t)&&!((n=eeg(t))>=14&&n<=16);case 11:return null!=t&&typeof t===eUs;case 12:return null!=t&&(typeof t===eUr||typeof t==eUs);case 0:return ebs(t,e.__elementTypeId$);case 2:return YS(t)&&t.im!==O;case 1:return YS(t)&&t.im!==O||ebs(t,e.__elementTypeId$);default:return!0}}function eDz(e,t){var n,r,i,a;return(r=eB4.Math.min(eB4.Math.abs(e.c-(t.c+t.b)),eB4.Math.abs(e.c+e.b-t.c)),a=eB4.Math.min(eB4.Math.abs(e.d-(t.d+t.a)),eB4.Math.abs(e.d+e.a-t.d)),(n=eB4.Math.abs(e.c+e.b/2-(t.c+t.b/2)))>e.b/2+t.b/2||(i=eB4.Math.abs(e.d+e.a/2-(t.d+t.a/2)))>e.a/2+t.a/2)?1:0==n&&0==i?0:0==n?a/i+1:0==i?r/n+1:eB4.Math.min(r/n,a/i)+1}function eDG(e,t){var n,r,i,a,o,s;return(i=enR(e),s=enR(t),i!=s)?it.f?1:0:(r=e.e-t.e,(n=(e.d>0?e.d:eB4.Math.floor((e.a-1)*eH9)+1)-(t.d>0?t.d:eB4.Math.floor((t.a-1)*eH9)+1))>r+1)?i:n0&&(o=eeD(o,eN4(r))),ehI(a,o))}function eDW(e,t){var n,r,i,a,o,s,u;for(a=0,s=0,u=0,i=new fz(e.f.e);i.a0&&e.d!=(QJ(),e95)&&(s+=o*(r.d.a+e.a[t.b][r.b]*(t.d.a-r.d.a)/n)),n>0&&e.d!=(QJ(),e93)&&(u+=o*(r.d.b+e.a[t.b][r.b]*(t.d.b-r.d.b)/n)));switch(e.d.g){case 1:return new kl(s/a,t.d.b);case 2:return new kl(t.d.a,u/a);default:return new kl(s/a,u/a)}}function eDK(e,t){var n,r,i,a,o;if(euv(),o=Pp(e_k(e.i,(eBy(),tol)),98),0!=(a=e.j.g-t.j.g)||!(o==(ewf(),tba)||o==tbs||o==tbo))return 0;if(o==(ewf(),tba)&&(n=Pp(e_k(e,tof),19),r=Pp(e_k(t,tof),19),n&&r&&0!=(i=n.a-r.a)))return i;switch(e.j.g){case 1:return elN(e.n.a,t.n.a);case 2:return elN(e.n.b,t.n.b);case 3:return elN(t.n.a,e.n.a);case 4:return elN(t.n.b,e.n.b);default:throw p7(new gC(eGz))}}function eDV(e){var t,n,r,i,a,o;for(n=(e.a||(e.a=new O_(e6h,e,5)),e.a).i+2,o=new XM(n),P_(o,new kl(e.j,e.k)),_r(new R1(null,(e.a||(e.a=new O_(e6h,e,5)),new Gq(e.a,16))),new h6(o)),P_(o,new kl(e.b,e.c)),t=1;t0&&(eoY(u,!1,(ec3(),tpm)),eoY(u,!0,tpg)),ety(t.g,new E4(e,n)),Um(e.g,t,n)}function eDZ(){var e;for(e=2,eDZ=A,e0$=eow(vx(ty_,1),eHT,25,15,[-1,-1,30,19,15,13,11,11,10,9,9,8,8,8,8,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5]),e0z=Je(ty_,eHT,25,37,15,1),e0G=eow(vx(ty_,1),eHT,25,15,[-1,-1,63,40,32,28,25,23,21,20,19,19,18,18,17,17,16,16,16,15,15,15,15,14,14,14,14,14,14,13,13,13,13,13,13,13,13]),e0W=Je(tyS,eH2,25,37,14,1);e<=36;e++)e0z[e]=zy(eB4.Math.pow(e,e0$[e])),e0W[e]=eyt(eUY,e0z[e])}function eDX(e){var t;if(1!=(e.a||(e.a=new FQ(e6v,e,6,6)),e.a).i)throw p7(new gL(eZC+(e.a||(e.a=new FQ(e6v,e,6,6)),e.a).i));return t=new mE,eoo(Pp(etj((e.b||(e.b=new Ih(e6m,e,4,7)),e.b),0),82))&&er7(t,eBE(e,eoo(Pp(etj((e.b||(e.b=new Ih(e6m,e,4,7)),e.b),0),82)),!1)),eoo(Pp(etj((e.c||(e.c=new Ih(e6m,e,5,8)),e.c),0),82))&&er7(t,eBE(e,eoo(Pp(etj((e.c||(e.c=new Ih(e6m,e,5,8)),e.c),0),82)),!0)),t}function eDJ(e,t){var n,r,i,a,o;for(i=t.d?e.a.c==(zs(),tuw)?efu(t.b):efc(t.b):e.a.c==(zs(),tuy)?efu(t.b):efc(t.b),a=!1,r=new Fa(OH(i.a.Kc(),new c));eTk(r);)if(n=Pp(ZC(r),17),!(!(o=gN(e.a.f[e.a.g[t.b.p].p]))&&!q8(n)&&n.c.i.c==n.d.i.c||gN(e.a.n[e.a.g[t.b.p].p])||gN(e.a.n[e.a.g[t.b.p].p]))&&(a=!0,w0(e.b,e.a.g[emN(n,t.b).p])))return t.c=!0,t.a=n,t;return t.c=a,t.a=null,t}function eDQ(e,t,n,r,i){var a,o,s,u,c,l,f;for(Hj(),Mv(e,new oU),s=new KB(e,0),f=new p0,a=0;s.b2*a?(l=new etD(f),c=jl(o)/jc(o),u=eY9(l,t,new mp,n,r,i,c),C5(xB(l.e),u),f.c=Je(e1R,eUp,1,0,5,1),a=0,f.c[f.c.length]=l,f.c[f.c.length]=o,a=jl(l)*jc(l)+jl(o)*jc(o)):(f.c[f.c.length]=o,a+=jl(o)*jc(o));return f}function eD1(e,t,n){var r,i,a,o,s,u,c;if(0==(r=n.gc()))return!1;if(e.ej()){if(u=e.fj(),edu(e,t,n),o=1==r?e.Zi(3,null,n.Kc().Pb(),t,u):e.Zi(5,null,n,t,u),e.bj()){for(s=r<100?null:new yf(r),a=t+r,i=t;i0){for(o=0;o>16==-15&&e.Cb.nh()&&QU(new JB(e.Cb,9,13,n,e.c,ebv(QX(Pp(e.Cb,59)),e))):M4(e.Cb,88)&&e.Db>>16==-23&&e.Cb.nh()&&(M4(t=e.c,88)||(t=(eBK(),tgI)),M4(n,88)||(n=(eBK(),tgI)),QU(new JB(e.Cb,9,10,n,t,ebv(qt(Pp(e.Cb,26)),e)))))),e.c}function eD6(e,t){var n,r,i,a,o,s,u,c,l,f;for(ewG(t,"Hypernodes processing",1),i=new fz(e.b);i.an)return i}function eNe(e,t){var n,r,i;r=0!=eMU(e.d,1),(gN(LK(e_k(t.j,(eBU(),tt2))))||gN(LK(e_k(t.j,tnS))))&&xc(e_k(t.j,(eBy(),ti9)))!==xc((esn(),tsM))?r=gN(LK(e_k(t.j,tt2))):t.c.Tf(t.e,r),eAb(e,t,r,!0),gN(LK(e_k(t.j,tnS)))&&eo3(t.j,tnS,(OQ(),!1)),gN(LK(e_k(t.j,tt2)))&&(eo3(t.j,tt2,(OQ(),!1)),eo3(t.j,tnS,!0)),n=eSY(e,t);do{if(er0(e),0==n)return 0;r=!r,i=n,eAb(e,t,r,!1),n=eSY(e,t)}while(i>n)return i}function eNt(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p;if(t==n)return!0;if(t=eTE(e,t),n=eTE(e,n),!(r=eb1(t)))return(s=t.e)==(h=n.e);if((l=eb1(n))!=r)return!!l&&(u=r.Dj())==(p=l.Dj())&&null!=u;if(a=(o=(t.d||(t.d=new O_(tgr,t,1)),t.d)).i,d=(n.d||(n.d=new O_(tgr,n,1)),n.d),a==d.i){for(c=0;c0,s=efC(t,a),n?Ag(s.b,t):Ag(s.g,t),1==efv(s).c.length&&qQ(r,s,r.c.b,r.c),i=new kD(a,t),Vw(e.o,i),QA(e.e.a,a))}function eNs(e,t){var n,r,i,a,o,s,u;return r=eB4.Math.abs(FB(e.b).a-FB(t.b).a),s=eB4.Math.abs(FB(e.b).b-FB(t.b).b),i=0,u=0,n=1,o=1,r>e.b.b/2+t.b.b/2&&(n=1-(i=eB4.Math.min(eB4.Math.abs(e.b.c-(t.b.c+t.b.b)),eB4.Math.abs(e.b.c+e.b.b-t.b.c)))/r),s>e.b.a/2+t.b.a/2&&(o=1-(u=eB4.Math.min(eB4.Math.abs(e.b.d-(t.b.d+t.b.a)),eB4.Math.abs(e.b.d+e.b.a-t.b.d)))/s),(1-(a=eB4.Math.min(n,o)))*eB4.Math.sqrt(r*r+s*s)}function eNu(e){var t,n,r,i;for(eFX(e,e.e,e.f,(zo(),tuq),!0,e.c,e.i),eFX(e,e.e,e.f,tuq,!1,e.c,e.i),eFX(e,e.e,e.f,tuZ,!0,e.c,e.i),eFX(e,e.e,e.f,tuZ,!1,e.c,e.i),eNd(e,e.c,e.e,e.f,e.i),r=new KB(e.i,0);r.b=65;n--)tvJ[n]=n-65<<24>>24;for(r=122;r>=97;r--)tvJ[r]=r-97+26<<24>>24;for(i=57;i>=48;i--)tvJ[i]=i-48+52<<24>>24;for(a=0,tvJ[43]=62,tvJ[47]=63;a<=25;a++)tvQ[a]=65+a&eHd;for(o=26,u=0;o<=51;++o,u++)tvQ[o]=97+u&eHd;for(e=52,s=0;e<=61;++e,s++)tvQ[e]=48+s&eHd;tvQ[62]=43,tvQ[63]=47}function eNf(e,t){var n,r,i,a,o,s,u,c,l,f,d,h;if(e.dc())return new yb;for(c=0,f=0,i=e.Kc();i.Ob();)a=(r=Pp(i.Pb(),37)).f,c=eB4.Math.max(c,a.a),f+=a.a*a.b;for(c=eB4.Math.max(c,eB4.Math.sqrt(f)*gP(LV(e_k(Pp(e.Kc().Pb(),37),(eBy(),tiX))))),d=0,h=0,u=0,n=t,s=e.Kc();s.Ob();)d+(l=(o=Pp(s.Pb(),37)).f).a>c&&(d=0,h+=u+t,u=0),eIn(o,d,h),n=eB4.Math.max(n,d+l.a),u=eB4.Math.max(u,l.b),d+=l.a+t;return new kl(n+t,h+u+t)}function eNd(e,t,n,r,i){var a,o,s,u,c,l,f;for(o=new fz(t);o.aa)return eYu(),tby;break;case 4:case 3:if(u<0)return eYu(),tbw;if(u+e.f>i)return eYu(),tbj}return(o=(s+e.g/2)/a)+(n=(u+e.f/2)/i)<=1&&o-n<=0?(eYu(),tbY):o+n>=1&&o-n>=0?(eYu(),tby):n<.5?(eYu(),tbw):(eYu(),tbj)}function eNp(e,t,n,r,i){var a,o;if(a=eft(WM(t[0],eH8),WM(r[0],eH8)),e[0]=jE(a),a=Fv(a,32),n>=i){for(o=1;o0&&(i.b[o++]=0,i.b[o++]=a.b[0]-1),t=1;t0&&(l0(u,u.d-i.d),i.c==(Xa(),tuU)&&lQ(u,u.a-i.d),u.d<=0&&u.i>0&&qQ(t,u,t.c.b,t.c));for(a=new fz(e.f);a.a0&&(l2(s,s.i-i.d),i.c==(Xa(),tuU)&&l1(s,s.b-i.d),s.i<=0&&s.d>0&&qQ(n,s,n.c.b,n.c))}function eNv(e,t,n){var r,i,a,o,s,u,c,l;for(ewG(n,"Processor compute fanout",1),Yy(e.b),Yy(e.a),s=null,a=epL(t.b,0);!s&&a.b!=a.d.c;)gN(LK(e_k(c=Pp(Vv(a),86),(eR6(),tcm))))&&(s=c);for(qQ(u=new _n,s,u.c.b,u.c),eYc(e,u),l=epL(t.b,0);l.b!=l.d.c;)o=Lq(e_k(c=Pp(Vv(l),86),(eR6(),tca))),eo3(c,tci,ell(i=null!=zg(e.b,o)?Pp(zg(e.b,o),19).a:0)),eo3(c,tcn,ell(r=1+(null!=zg(e.a,o)?Pp(zg(e.a,o),19).a:0)));eEj(n)}function eNy(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p;for(u=0,d=eyG(e,n);u0),r.a.Xb(r.c=--r.b),f>d+u&&BH(r);for(o=new fz(h);o.a0),r.a.Xb(r.c=--r.b)}}function eNw(){var e,t,n,r,i,a;if(eBG(),tyg)return tyg;for(e=(++tyv,new WZ(4)),ePR(e,eYB(e1_,!0)),ej0(e,eYB("M",!0)),ej0(e,eYB("C",!0)),a=(++tyv,new WZ(4)),r=0;r<11;r++)eLw(a,r,r);return t=(++tyv,new WZ(4)),ePR(t,eYB("M",!0)),eLw(t,4448,4607),eLw(t,65438,65439),i=(++tyv,new Mr(2)),eRv(i,e),eRv(i,tye),(n=(++tyv,new Mr(2))).$l(jS(a,eYB("L",!0))),n.$l(t),n=(++tyv,new qa(3,n)),tyg=n=(++tyv,new YD(i,n))}function eN_(e){var t,n;if(t=Lq(eT8(e,(eBB(),tdQ))),!eae(t,e)&&!X2(e,th6)&&(0!=(e.a||(e.a=new FQ(e6k,e,10,11)),e.a).i||gN(LK(eT8(e,thh))))){if(null==t||0==e_H(t).length){if(!eae(eG1,e))throw eFh(e,n=xM(xM(new O0("Unable to load default layout algorithm "),eG1)," for unconfigured node ")),p7(new gq(n.a))}else throw eFh(e,n=xM(xM(new O0("Layout algorithm '"),t),"' not found for ")),p7(new gq(n.a))}}function eNE(e){var t,n,r,i,a,o,s,u,c,l,f,d,h;if(n=e.i,t=e.n,0==e.b)for(h=n.c+t.b,d=n.b-t.b-t.c,o=e.a,u=0,l=o.length;u0&&(f-=r[0]+e.c,r[0]+=e.c),r[2]>0&&(f-=r[2]+e.c),r[1]=eB4.Math.max(r[1],f),jQ(e.a[1],n.c+t.b+r[0]-(r[1]-f)/2,r[1]);for(a=e.a,s=0,c=a.length;s0?(e.n.c.length-1)*e.i:0,r=new fz(e.n);r.a1)for(r=epL(i,0);r.b!=r.d.c;)for(n=Pp(Vv(r),231),a=0,u=new fz(n.e);u.a0&&(t[0]+=e.c,f-=t[0]),t[2]>0&&(f-=t[2]+e.c),t[1]=eB4.Math.max(t[1],f),j1(e.a[1],r.d+n.d+t[0]-(t[1]-f)/2,t[1]);else for(p=r.d+n.d,h=r.a-n.d-n.a,o=e.a,u=0,l=o.length;u=0&&a!=n)throw p7(new gL(eXB));for(u=0,i=0;u=efT(e.b.c,i.b.c+i.b.b)&&0>=efT(i.b.c,e.b.c+e.b.b)&&0>=efT(e.b.d,i.b.d+i.b.a)&&0>=efT(i.b.d,e.b.d+e.b.a)){if(0==efT(i.b.c,e.b.c+e.b.b)&&r.a<0||0==efT(i.b.c+i.b.b,e.b.c)&&r.a>0||0==efT(i.b.d,e.b.d+e.b.a)&&r.b<0||0==efT(i.b.d+i.b.a,e.b.d)&&r.b>0){s=0;break}}else s=eB4.Math.min(s,ekg(e,i,r));s=eB4.Math.min(s,eNC(e,a,s,r))}return s}function eNI(e,t){var n,r,i,a,o,s,u;if(e.b<2)throw p7(new gL("The vector chain must contain at least a source and a target point."));for(Tj(t,(i=(A6(0!=e.b),Pp(e.a.a.c,8))).a,i.b),u=new AF((t.a||(t.a=new O_(e6h,t,5)),t.a)),o=epL(e,1);o.agP(Ot(o.g,o.d[0]).a)?(A6(u.b>0),u.a.Xb(u.c=--u.b),CD(u,o),i=!0):s.e&&s.e.gc()>0&&(a=(s.e||(s.e=new p0),s.e).Mc(t),c=(s.e||(s.e=new p0),s.e).Mc(n),(a||c)&&((s.e||(s.e=new p0),s.e).Fc(o),++o.c));i||(r.c[r.c.length]=o)}function eNH(e){var t,n,r;if(TM(Pp(e_k(e,(eBy(),tol)),98)))for(n=new fz(e.j);n.a>>0).toString(16),n.length-2,n.length):e>=eH3?"\\v"+Az(n="0"+(t=e>>>0).toString(16),n.length-6,n.length):""+String.fromCharCode(e&eHd)}return r}function eNz(e,t){var n,r,i,a,o,s,u,c,l,f;if(o=e.e,0==(u=t.e))return e;if(0==o)return 0==t.e?t:new F7(-t.e,t.d,t.a);if((a=e.d)+(s=t.d)==2)return n=WM(e.a[0],eH8),r=WM(t.a[0],eH8),o<0&&(n=QC(n)),u<0&&(r=QC(r)),ep_(efe(n,r));if(-1==(i=a!=s?a>s?1:-1:es8(e.a,t.a,a)))f=-u,l=o==u?Z1(t.a,s,e.a,a):X7(t.a,s,e.a,a);else if(f=o,o==u){if(0==i)return eLQ(),e08;l=Z1(e.a,a,t.a,s)}else l=X7(e.a,a,t.a,s);return c=new F7(f,l.length,l),Ku(c),c}function eNG(e){var t,n,r,i,a,o;for(this.e=new p0,this.a=new p0,n=e.b-1;n<3;n++)Ls(e,0,Pp(ep3(e,0),8));if(e.b<4)throw p7(new gL("At (least dimension + 1) control points are necessary!"));for(this.b=3,this.d=!0,this.c=!1,eMO(this,e.b+this.b-1),o=new p0,a=new fz(this.e),t=0;t=t.o&&n.f<=t.f||.5*t.a<=n.f&&1.5*t.a>=n.f){if((o=Pp(RJ(t.n,t.n.c.length-1),211)).e+o.d+n.g+i<=r&&((a=Pp(RJ(t.n,t.n.c.length-1),211)).f-e.f+n.f<=e.b||1==e.a.c.length))return efg(t,n),!0;if(t.s+n.g<=r&&(t.t+t.d+n.f+i<=e.b||1==e.a.c.length))return P_(t.b,n),s=Pp(RJ(t.n,t.n.c.length-1),211),P_(t.n,new zO(t.s,s.f+s.a+t.i,t.i)),eml(Pp(RJ(t.n,t.n.c.length-1),211),n),eNk(t,n),!0}return!1}function eNV(e,t,n){var r,i,a,o;return e.ej()?(i=null,a=e.fj(),r=e.Zi(1,o=ees(e,t,n),n,t,a),e.bj()&&!(e.ni()&&null!=o?ecX(o,n):xc(o)===xc(n))?(null!=o&&(i=e.dj(o,i)),i=e.cj(n,i),e.ij()&&(i=e.lj(o,n,i)),i?(i.Ei(r),i.Fi()):e.$i(r)):(e.ij()&&(i=e.lj(o,n,i)),i?(i.Ei(r),i.Fi()):e.$i(r)),o):(o=ees(e,t,n),e.bj()&&!(e.ni()&&null!=o?ecX(o,n):xc(o)===xc(n))&&(i=null,null!=o&&(i=e.dj(o,null)),(i=e.cj(n,i))&&i.Fi()),o)}function eNq(e,t){var n,r,i,a,o,s,u,c;t%=24,e.q.getHours()!=t&&((r=new eB4.Date(e.q.getTime())).setDate(r.getDate()+1),(s=e.q.getTimezoneOffset()-r.getTimezoneOffset())>0&&(u=s/60|0,c=s%60,i=e.q.getDate(),(n=e.q.getHours())+u>=24&&++i,a=new eB4.Date(e.q.getFullYear(),e.q.getMonth(),i,t+u,e.q.getMinutes()+c,e.q.getSeconds(),e.q.getMilliseconds()),e.q.setTime(a.getTime()))),o=e.q.getTime(),e.q.setTime(o+36e5),e.q.getHours()!=t&&e.q.setTime(o)}function eNZ(e,t){var n,r,i,a,o;if(ewG(t,"Path-Like Graph Wrapping",1),0==e.b.c.length||(n=(o=(null==(i=new eTN(e)).i&&(i.i=eis(i,new iP)),gP(i.i)*i.f))/(null==i.i&&(i.i=eis(i,new iP)),gP(i.i)),i.b>n)){eEj(t);return}switch(Pp(e_k(e,(eBy(),toq)),337).g){case 2:a=new iF;break;case 0:a=new iO;break;default:a=new iY}if(r=a.Vf(e,i),!a.Wf())switch(Pp(e_k(e,to0),338).g){case 2:r=ekE(i,r);break;case 1:r=ewQ(i,r)}eRw(e,i,r),eEj(t)}function eNX(e,t){var n,r,i,a;if(GW(e.d,e.e),e.c.a.$b(),0!=gP(LV(e_k(t.j,(eBy(),ti3))))||0!=gP(LV(e_k(t.j,ti3))))for(n=ezq,xc(e_k(t.j,ti9))!==xc((esn(),tsM))&&eo3(t.j,(eBU(),tt2),(OQ(),!0)),a=Pp(e_k(t.j,to$),19).a,i=0;i(i=(GK(s+1,t.c.length),Pp(t.c[s+1],19)).a-r)&&++c,P_(o,(GK(s+c,t.c.length),Pp(t.c[s+c],19))),u+=(GK(s+c,t.c.length),Pp(t.c[s+c],19)).a-r,++n;n1&&(u>jl(s)*jc(s)/2||0==o.b)&&(f=new etD(d),l=jl(s)/jc(s),c=eY9(f,t,new mp,n,r,i,l),C5(xB(f.e),c),s=f,h.c[h.c.length]=f,u=0,d.c=Je(e1R,eUp,1,0,5,1)));return eoc(h,d),h}function eN2(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b;if(n.mh(t)&&(l=(h=t)?Pp(r,49).xh(h):null)){if(b=n.bh(t,e.a),(p=t.t)>1||-1==p){if(f=Pp(b,69),d=Pp(l,69),f.dc())d.$b();else for(o=!!ebY(t),a=0,s=e.a?f.Kc():f.Zh();s.Ob();)c=Pp(s.Pb(),56),(i=Pp(eef(e,c),56))?(o?-1==(u=d.Xc(i))?d.Xh(a,i):a!=u&&d.ji(a,i):d.Xh(a,i),++a):e.b&&!o&&(d.Xh(a,c),++a)}else null==b?l.Wb(null):null==(i=eef(e,b))?e.b&&!ebY(t)&&l.Wb(b):l.Wb(i)}}function eN3(e,t){var n,r,i,a,o,s,u,l;for(n=new nf,i=new Fa(OH(efu(t).a.Kc(),new c));eTk(i);)if(r=Pp(ZC(i),17),!q8(r)&&ewg(s=r.c.i,e8q)){if(-1==(l=eCu(e,s,e8q,e8V)))continue;n.b=eB4.Math.max(n.b,l),n.a||(n.a=new p0),P_(n.a,s)}for(o=new Fa(OH(efc(t).a.Kc(),new c));eTk(o);)if(a=Pp(ZC(o),17),!q8(a)&&ewg(u=a.d.i,e8V)){if(-1==(l=eCu(e,u,e8V,e8q)))continue;n.d=eB4.Math.max(n.d,l),n.c||(n.c=new p0),P_(n.c,u)}return n}function eN4(e){var t,n,r,i;if(exX(),t=zy(e),e1e6)throw p7(new g_("power of ten too big"));if(e<=eUu)return ZA(exT(e2t[1],t),t);for(i=r=exT(e2t[1],eUu),n=eap(e-eUu),t=zy(e%eUu);ecd(n,eUu)>0;)i=eeD(i,r),n=efe(n,eUu);for(i=eeD(i,exT(e2t[1],t)),i=ZA(i,eUu),n=eap(e-eUu);ecd(n,eUu)>0;)i=ZA(i,eUu),n=efe(n,eUu);return ZA(i,t)}function eN5(e,t){var n,r,i,a,o,s,u,c,l;for(ewG(t,"Hierarchical port dummy size processing",1),u=new p0,l=new p0,n=2*(r=gP(LV(e_k(e,(eBy(),toA))))),a=new fz(e.b);a.ac&&r>c)l=s,c=gP(t.p[s.p])+gP(t.d[s.p])+s.o.b+s.d.a;else{i=!1,n.n&&P3(n,"bk node placement breaks on "+s+" which should have been after "+l);break}if(!i)break}return n.n&&P3(n,t+" is feasible: "+i),i}function ePr(e,t,n,r){var i,a,o,s,u,c,l;for(s=-1,l=new fz(e);l.a=m&&e.e[u.p]>p*e.b||y>=n*m)&&(d.c[d.c.length]=s,s=new p0,er7(o,a),a.a.$b(),c-=l,h=eB4.Math.max(h,c*e.b+b),c+=y,v=y,y=0,l=0,b=0);return new kD(h,d)}function ePs(e){var t,n,r,i,a,o,s,u,c,l,f,d,h;for(n=(c=new fT(e.c.b).a.vc().Kc(),new fN(c));n.a.Ob();)null==(i=(t=(s=Pp(n.a.Pb(),42),Pp(s.dd(),149))).a)&&(i=""),(r=L8(e.c,i))||0!=i.length||(r=ecj(e)),r&&!eds(r.c,t,!1)&&P7(r.c,t);for(o=epL(e.a,0);o.b!=o.d.c;)a=Pp(Vv(o),478),l=Zc(e.c,a.a),h=Zc(e.c,a.b),l&&h&&P7(l.c,new kD(h,a.c));for(HC(e.a),d=epL(e.b,0);d.b!=d.d.c;)f=Pp(Vv(d),478),t=L9(e.c,f.a),u=Zc(e.c,f.b),t&&u&&_U(t,u,f.c);HC(e.b)}function ePu(e,t,n){var r,i,a,o,s,u,c,l,f,d,h;a=new lD(e),o=new eg6,i=(Ze(o.g),Ze(o.j),Yy(o.b),Ze(o.d),Ze(o.i),Yy(o.k),Yy(o.c),Yy(o.e),h=ekH(o,a,null),eMA(o,a),h),t&&(s=ePA(c=new lD(t)),eEh(i,eow(vx(e5q,1),eUp,527,0,[s]))),d=!1,f=!1,n&&(eXW in(c=new lD(n)).a&&(d=zR(c,eXW).ge().a),eXK in c.a&&(f=zR(c,eXK).ge().a)),l=yr(eny(new mV,d),f),eER(new or,i,l),eXW in a.a&&ee3(a,eXW,null),(d||f)&&(eNj(l,u=new gu,d,f),ee3(a,eXW,u)),r=new pp(o),esA(new TY(i),r)}function ePc(e,t,n){var r,i,a,o,s,u,c,l,f;for(u=0,o=new evI,c=eow(vx(ty_,1),eHT,25,15,[0]),i=-1,a=0,r=0;u0){if(i<0&&l.a&&(i=u,a=c[0],r=0),i>=0){if(s=l.b,u==i&&0==(s-=r++))return 0;if(!eYw(t,c,l,s,o)){u=i-1,c[0]=a;continue}}else if(i=-1,!eYw(t,c,l,0,o))return 0}else{if(i=-1,32==UI(l.c,0)){if(f=c[0],eey(t,c),c[0]>f)continue}else if($D(t,l.c,c[0])){c[0]+=l.c.length;continue}return 0}return eYn(o,n)?c[0]:0}function ePl(e){var t,n,r,i,a,o,s,u;if(!e.f){if(u=new su,s=new su,null==(o=(t=tgz).a.zc(e,t))){for(a=new Ow($E(e));a.e!=a.i.gc();)i=Pp(epH(a),26),Y4(u,ePl(i));t.a.Bc(e),t.a.gc()}for(r=(e.s||(e.s=new FQ(tm6,e,21,17)),new Ow(e.s));r.e!=r.i.gc();)n=Pp(epH(r),170),M4(n,99)&&JL(s,Pp(n,18));euI(s),e.r=new PX(e,(Pp(etj(H9((BM(),tgv).o),6),18),s.i),s.g),Y4(u,e.r),euI(u),e.f=new xQ((Pp(etj(H9(tgv.o),5),18),u.i),u.g),Zd(e).b&=-3}return e.f}function ePf(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p;for(c=0,r=Je(ty_,eHT,25,o=e.o,15,1),i=Je(ty_,eHT,25,o,15,1),t=Je(ty_,eHT,25,n=e.p,15,1),a=Je(ty_,eHT,25,n,15,1);c=0&&!emy(e,l,f);)--f;i[l]=f}for(h=0;h=0&&!emy(e,s,p);)--s;a[p]=s}for(u=0;ut[d]&&dr[u]&&eCQ(e,u,d,!1,!0)}function ePd(e){var t,n,r,i,a,o,s,u;n=gN(LK(e_k(e,(eCk(),e9b)))),a=e.a.c.d,s=e.a.d.d,n?(o=Ol(C6(new kl(s.a,s.b),a),.5),u=Ol(MB(e.e),.5),t=C6(C5(new kl(a.a,a.b),o),u),Lf(e.d,t)):(i=gP(LV(e_k(e.a,e9I))),r=e.d,a.a>=s.a?a.b>=s.b?(r.a=s.a+(a.a-s.a)/2+i,r.b=s.b+(a.b-s.b)/2-i-e.e.b):(r.a=s.a+(a.a-s.a)/2+i,r.b=a.b+(s.b-a.b)/2+i):a.b>=s.b?(r.a=a.a+(s.a-a.a)/2+i,r.b=s.b+(a.b-s.b)/2+i):(r.a=a.a+(s.a-a.a)/2+i,r.b=a.b+(s.b-a.b)/2-i-e.e.b))}function ePh(e,t){var n,r,i,a,o,s,u;if(null==e)return null;if(0==(a=e.length))return"";for(u=Je(tyw,eHl,25,a,15,1),Ji(0,a,e.length),Ji(0,a,u.length),YF(e,0,a,u,0),n=null,s=t,i=0,o=0;i0?Az(n.a,0,a-1):"":e.substr(0,a-1):n?n.a:e}function ePp(e){_Y(e,new ewB(vQ(vq(vJ(vX(new oc,ezH),"ELK DisCo"),"Layouter for arranging unconnected subgraphs. The subgraphs themselves are, by default, not laid out."),new e4))),KE(e,ezH,ez$,epB(e67)),KE(e,ezH,ezz,epB(e63)),KE(e,ezH,ezG,epB(e6J)),KE(e,ezH,ezW,epB(e64)),KE(e,ezH,e$Q,epB(e69)),KE(e,ezH,e$1,epB(e66)),KE(e,ezH,e$J,epB(e68)),KE(e,ezH,e$0,epB(e65)),KE(e,ezH,ezj,epB(e61)),KE(e,ezH,ezF,epB(e6Q)),KE(e,ezH,ezY,epB(e60)),KE(e,ezH,ezB,epB(e62))}function ePb(e,t,n,r){var i,a,o,s,u,c,l,f,d;if(a=new eb$(e),lK(a,(eEn(),e8P)),eo3(a,(eBy(),tol),(ewf(),tbo)),i=0,t){for(o=new eES,eo3(o,(eBU(),tnc),t),eo3(a,tnc,t.i),ekv(o,(eYu(),tbY)),Gc(o,a),d=Kp(t.e),l=0,f=(c=d).length;lenR(e)?1:0,n=e.e,i=(r.length,eB4.Math.abs(zy(e.e)),new vl),1==t&&(i.a+="-"),e.e>0){if((n-=r.length-t)>=0){for(i.a+="0.";n>e0Z.length;n-=e0Z.length)RX(i,e0Z);CA(i,e0Z,zy(n)),xM(i,r.substr(t))}else n=t-n,xM(i,Az(r,t,zy(n))),i.a+=".",xM(i,xy(r,zy(n)))}else{for(xM(i,r.substr(t));n<-e0Z.length;n+=e0Z.length)RX(i,e0Z);CA(i,e0Z,zy(-n))}return i.a}function ePv(e,t,n,r){var i,a,o,s,u,c,l,f,d;return(c=(u=C6(new kl(n.a,n.b),e)).a*t.b-u.b*t.a,l=t.a*r.b-t.b*r.a,f=(u.a*r.b-u.b*r.a)/l,d=c/l,0!=l)?f>=0&&f<=1&&d>=0&&d<=1?C5(new kl(e.a,e.b),Ol(new kl(t.a,t.b),f)):null:0!=c?null:(a=Jh(e,i=C5(new kl(n.a,n.b),Ol(new kl(r.a,r.b),.5))),o=Jh(C5(new kl(e.a,e.b),t),i),s=.5*eB4.Math.sqrt(r.a*r.a+r.b*r.b),at.a&&(r.Hc((eyY(),tdW))?e.c.a+=(n.a-t.a)/2:r.Hc(tdV)&&(e.c.a+=n.a-t.a)),n.b>t.b&&(r.Hc((eyY(),tdZ))?e.c.b+=(n.b-t.b)/2:r.Hc(tdq)&&(e.c.b+=n.b-t.b)),Pp(e_k(e,(eBU(),tt3)),21).Hc((eLR(),ttw))&&(n.a>t.a||n.b>t.b))for(s=new fz(e.a);s.at.a&&(r.Hc((eyY(),tdW))?e.c.a+=(n.a-t.a)/2:r.Hc(tdV)&&(e.c.a+=n.a-t.a)),n.b>t.b&&(r.Hc((eyY(),tdZ))?e.c.b+=(n.b-t.b)/2:r.Hc(tdq)&&(e.c.b+=n.b-t.b)),Pp(e_k(e,(eBU(),tt3)),21).Hc((eLR(),ttw))&&(n.a>t.a||n.b>t.b))for(o=new fz(e.a);o.at&&(i=0,a+=l.b+n,f.c[f.c.length]=l,l=new W6(a,n),r=new es$(0,l.f,l,n),enN(l,r),i=0),0==r.b.c.length||u.f>=r.o&&u.f<=r.f||.5*r.a<=u.f&&1.5*r.a>=u.f?efg(r,u):(o=new es$(r.s+r.r+n,l.f,l,n),enN(l,o),efg(o,u)),i=u.i+u.g;return f.c[f.c.length]=l,f}function ePk(e){var t,n,r,i,a,o,s,u;if(!e.a){if(e.o=null,u=new pj(e),t=new sc,null==(s=(n=tgz).a.zc(e,n))){for(o=new Ow($E(e));o.e!=o.i.gc();)a=Pp(epH(o),26),Y4(u,ePk(a));n.a.Bc(e),n.a.gc()}for(i=(e.s||(e.s=new FQ(tm6,e,21,17)),new Ow(e.s));i.e!=i.i.gc();)r=Pp(epH(i),170),M4(r,322)&&JL(t,Pp(r,34));euI(t),e.k=new PZ(e,(Pp(etj(H9((BM(),tgv).o),7),18),t.i),t.g),Y4(u,e.k),euI(u),e.a=new xQ((Pp(etj(H9(tgv.o),4),18),u.i),u.g),Zd(e).b&=-2}return e.a}function ePx(e,t,n,r,i,a,o){var s,u,c,l,f,d;return f=!1,u=eO4(n.q,t.f+t.b-n.q.f),!((d=i-(n.q.e+u-o))=(GK(a,e.c.length),Pp(e.c[a],200)).e,(!((l=(s=ePI(r,d,!1)).a)>t.b)||!!c)&&((c||l<=t.b)&&(c&&l>t.b?(n.d=l,JR(n,eEP(n,l))):(eyC(n.q,u),n.c=!0),JR(r,i-(n.s+n.r)),ebP(r,n.q.e+n.q.d,t.f),enN(t,r),e.c.length>a&&(eva((GK(a,e.c.length),Pp(e.c[a],200)),r),0==(GK(a,e.c.length),Pp(e.c[a],200)).a.c.length&&ZV(e,a)),f=!0),f))}function ePT(e,t,n,r){var i,a,o,s,u,c,l;if(l=eAY(e.e.Tg(),t),i=0,a=Pp(e.g,119),u=null,_4(),Pp(t,66).Oj()){for(s=0;se.o.a&&(l=(u-e.o.a)/2,s.b=eB4.Math.max(s.b,l),s.c=eB4.Math.max(s.c,l))}}function ePA(e){var t,n,r,i,a,o,s,u;for(a=new W8,Tp(a,(eoM(),tdr)),r=(i=erG(e,Je(e17,eUP,2,0,6,1)),new fE(new g$(new wY(e,i).b)));r.b0?e.i:0)>t&&u>0&&(a=0,o+=u+e.i,i=eB4.Math.max(i,d),r+=u+e.i,u=0,d=0,n&&(++f,P_(e.n,new zO(e.s,o,e.i))),s=0),d+=c.g+(s>0?e.i:0),u=eB4.Math.max(u,c.f),n&&eml(Pp(RJ(e.n,f),211),c),a+=c.g+(s>0?e.i:0),++s;return i=eB4.Math.max(i,d),r+=u,n&&(e.r=i,e.d=r,egf(e.j)),new Hr(e.s,e.t,i,r)}function ePD(e,t,n,r,i){var a,o,s,u,c,l,f,d,h;if(wK(),Yh(e,"src"),Yh(n,"dest"),d=esF(e),u=esF(n),Pz((4&d.i)!=0,"srcType is not an array"),Pz((4&u.i)!=0,"destType is not an array"),f=d.c,o=u.c,Pz((1&f.i)!=0?f==o:(1&o.i)==0,"Array types don't match"),h=e.length,c=n.length,t<0||r<0||i<0||t+i>h||r+i>c)throw p7(new bE);if((1&f.i)==0&&d!=u){if(l=etG(e),a=etG(n),xc(e)===xc(n)&&tr;)Bc(a,s,l[--t]);else for(s=r+i;r0&&ekp(e,t,n,r,i,!0)}function ePN(){ePN=A,e07=eow(vx(ty_,1),eHT,25,15,[eHt,1162261467,eU2,1220703125,362797056,1977326743,eU2,387420489,eHK,214358881,429981696,815730721,1475789056,170859375,268435456,410338673,612220032,893871739,128e7,1801088541,113379904,148035889,191102976,244140625,308915776,387420489,481890304,594823321,729e6,887503681,eU2,1291467969,1544804416,1838265625,60466176]),e2e=eow(vx(ty_,1),eHT,25,15,[-1,-1,31,19,15,13,11,11,10,9,9,8,8,8,8,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5])}function ePP(e){var t,n,r,i,a,o,s,u;for(i=new fz(e.b);i.a=e.b.length?(a[i++]=o.b[r++],a[i++]=o.b[r++]):r>=o.b.length?(a[i++]=e.b[n++],a[i++]=e.b[n++]):o.b[r]0?e.i:0)),++t;for(efX(e.n,u),e.d=n,e.r=r,e.g=0,e.f=0,e.e=0,e.o=eHQ,e.p=eHQ,a=new fz(e.b);a.a0&&(i=(e.n||(e.n=new FQ(e6S,e,1,7)),Pp(etj(e.n,0),137)).a)&&xM(xM((t.a+=' "',t),i),'"')),(n=(e.b||(e.b=new Ih(e6m,e,4,7)),!(e.b.i<=1&&(e.c||(e.c=new Ih(e6m,e,5,8)),e.c.i<=1))))?(t.a+=" [",t):(t.a+=" ",t),xM(t,OU(new ve,new Ow(e.b))),n&&(t.a+="]"),t.a+=eGH,n&&(t.a+="["),xM(t,OU(new ve,new Ow(e.c))),n&&(t.a+="]"),t.a)}function ePB(e,t){var n,r,i,a,o,s,u;if(e.a){if(s=e.a.ne(),u=null,null!=s?t.a+=""+s:null!=(o=e.a.Dj())&&(-1!=(a=x7(o,e_n(91)))?(u=o.substr(a),t.a+=""+Az(null==o?eUg:(BJ(o),o),0,a)):t.a+=""+o),e.d&&0!=e.d.i){for(i=!0,t.a+="<",r=new Ow(e.d);r.e!=r.i.gc();)n=Pp(epH(r),87),i?i=!1:(t.a+=eUd,t),ePB(n,t);t.a+=">"}null!=u&&(t.a+=""+u)}else e.e?null!=(s=e.e.zb)&&(t.a+=""+s):(t.a+="?",e.b?(t.a+=" super ",ePB(e.b,t)):e.f&&(t.a+=" extends ",ePB(e.f,t)))}function ePU(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T;for(_=e.c,E=t.c,n=QI(_.a,e,0),r=QI(E.a,t,0),y=Pp(edE(e,(enY(),tsD)).Kc().Pb(),11),x=Pp(edE(e,tsN).Kc().Pb(),11),w=Pp(edE(t,tsD).Kc().Pb(),11),T=Pp(edE(t,tsN).Kc().Pb(),11),g=Kp(y.e),S=Kp(x.g),v=Kp(w.e),k=Kp(T.g),egU(e,r,E),l=0,p=(o=v).length;ll?new GT((Xa(),tuH),n,t,c-l):c>0&&l>0&&(new GT((Xa(),tuH),t,n,0),new GT(tuH,n,t,0))),o)}function ePz(e,t){var n,r,i,a,o,s;for(o=new esz(new fS(e.f.b).a);o.b;){if(a=etz(o),i=Pp(a.cd(),594),1==t){if(i.gf()!=(ec3(),tpy)&&i.gf()!=tpb)continue}else if(i.gf()!=(ec3(),tpm)&&i.gf()!=tpg)continue;switch(r=Pp(Pp(a.dd(),46).b,81),n=(s=Pp(Pp(a.dd(),46).a,189)).c,i.gf().g){case 2:r.g.c=e.e.a,r.g.b=eB4.Math.max(1,r.g.b+n);break;case 1:r.g.c=r.g.c+n,r.g.b=eB4.Math.max(1,r.g.b-n);break;case 4:r.g.d=e.e.b,r.g.a=eB4.Math.max(1,r.g.a+n);break;case 3:r.g.d=r.g.d+n,r.g.a=eB4.Math.max(1,r.g.a-n)}}}function ePG(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b;for(s=Je(ty_,eHT,25,t.b.c.length,15,1),c=Je(e4P,eU4,267,t.b.c.length,0,1),u=Je(e4N,eGW,10,t.b.c.length,0,1),f=e.a,d=0,h=f.length;d0&&u[r]&&(p=Mj(e.b,u[r],i)),b=eB4.Math.max(b,i.c.c.b+p);for(a=new fz(l.e);a.a1)throw p7(new gL(eQ$));u||(a=V4(t,r.Kc().Pb()),o.Fc(a))}return eo0(e,eSu(e,t,n),o)}function ePZ(e,t){var n,r,i,a;for(etY(t.b.j),_r(UQ(new R1(null,new Gq(t.d,16)),new iy),new iw),a=new fz(t.d);a.ae.o.b||(n=efr(e,tby),(s=t.d+t.a+(n.gc()-1)*o)>e.o.b)))}function eP5(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p;if(o=e.e,u=t.e,0==o)return t;if(0==u)return e;if((a=e.d)+(s=t.d)==2)return(n=WM(e.a[0],eH8),r=WM(t.a[0],eH8),o==u)?(p=jE(l=eft(n,r)),0==(h=jE(Fy(l,32)))?new XE(o,p):new F7(o,2,eow(vx(ty_,1),eHT,25,15,[p,h]))):ep_(o<0?efe(r,n):efe(n,r));if(o==u)d=o,f=a>=s?X7(e.a,a,t.a,s):X7(t.a,s,e.a,a);else{if(0==(i=a!=s?a>s?1:-1:es8(e.a,t.a,a)))return eLQ(),e08;1==i?(d=o,f=Z1(e.a,a,t.a,s)):(d=u,f=Z1(t.a,s,e.a,a))}return c=new F7(d,f.length,f),Ku(c),c}function eP6(e,t,n,r,i,a,o){var s,u,c,l,f,d,h;return f=gN(LK(e_k(t,(eBy(),taV)))),d=null,a==(enY(),tsD)&&r.c.i==n?d=r.c:a==tsN&&r.d.i==n&&(d=r.d),(c=o)&&f&&!d?(P_(c.e,r),h=eB4.Math.max(gP(LV(e_k(c.d,tak))),gP(LV(e_k(r,tak)))),eo3(c.d,tak,h)):(l=(eYu(),tbF),d?l=d.j:TM(Pp(e_k(n,tol),98))&&(l=a==tsD?tbY:tby),u=eP8(e,t,n,a,l,r),s=ZD((Bq(n),r)),a==tsD?(Gs(s,Pp(RJ(u.j,0),11)),Go(s,i)):(Gs(s,i),Go(s,Pp(RJ(u.j,0),11))),c=new ec8(r,s,u,Pp(e_k(u,(eBU(),tnc)),11),a,!d)),exg(e.a,r,new DT(c.d,t,a)),c}function eP9(e,t){var n,r,i,a,o,s,u,c,l,f;if(l=null,e.d&&(l=Pp(zg(e.d,t),138)),!l){if(f=(a=e.a.Mh()).i,!e.d||wq(e.d)!=f){for(u=new p2,e.d&&eij(u,e.d),s=c=u.f.c+u.g.c;s0?(h=(p-1)*n,s&&(h+=r),l&&(h+=r),!(h=e.b[i+1])i+=2;else if(n0)for(r=new I4(Pp(Zq(e.a,a),21)),Hj(),Mv(r,new dT(t)),i=new KB(a.b,0);i.b_)?(u=2,o=eUu):0==u?(u=1,o=S):(u=0,o=S):(h=S>=o||o-S0?1:Te(isNaN(r),isNaN(0)))>=0^(enj(eVU),(eB4.Math.abs(s)<=eVU||0==s||isNaN(s)&&isNaN(0)?0:s<0?-1:s>0?1:Te(isNaN(s),isNaN(0)))>=0))?eB4.Math.max(s,r):(enj(eVU),(eB4.Math.abs(r)<=eVU||0==r||isNaN(r)&&isNaN(0)?0:r<0?-1:r>0?1:Te(isNaN(r),isNaN(0)))>0)?eB4.Math.sqrt(s*s+r*r):-eB4.Math.sqrt(s*s+r*r)}function eRv(e,t){var n,r,i,a,o,s;if(t){if(e.a||(e.a=new bZ),2==e.e){bY(e.a,t);return}if(1==t.e){for(i=0;i=eH3?xk(n,el1(r)):Bf(n,r&eHd),o=(++tyv,new zc(10,null,0)),Yu(e.a,o,s-1)):xk(n=(o.bm().length,new vu),o.bm()),0==t.e?(r=t._l())>=eH3?xk(n,el1(r)):Bf(n,r&eHd):xk(n,t.bm()),Pp(o,521).b=n.a}}function eRy(e){var t,n,r,i,a;return null!=e.g?e.g:e.a<32?(e.g=eYS(eap(e.f),zy(e.e)),e.g):(i=eBw((e.c||(e.c=euK(e.f)),e.c),0),0==e.e)?i:(t=(e.c||(e.c=euK(e.f)),e.c).e<0?2:1,n=i.length,r=-e.e+n-t,a=new vc,a.a+=""+i,e.e>0&&r>=-6?r>=0?Gn(a,n-zy(e.e),"."):(a.a=Az(a.a,0,t-1)+"0."+xy(a.a,t-1),Gn(a,t+1,ehv(e0Z,0,-zy(r)-1))):(n-t>=1&&(Gn(a,t,"."),++n),Gn(a,n,"E"),r>0&&Gn(a,++n,"+"),Gn(a,++n,""+Fb(eap(r)))),e.g=a.a,e.g)}function eRw(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m;if(!n.dc()){for(s=0,d=0,p=Pp((r=n.Kc()).Pb(),19).a;s1&&(u=c.mg(u,e.a,s));return 1==u.c.length?Pp(RJ(u,u.c.length-1),220):2==u.c.length?eRr((GK(0,u.c.length),Pp(u.c[0],220)),(GK(1,u.c.length),Pp(u.c[1],220)),o,a):null}function eRk(e){var t,n,r,i,a,o;for(ety(e.a,new eJ),n=new fz(e.a);n.a=eB4.Math.abs(r.b)?(r.b=0,a.d+a.a>o.d&&a.do.c&&a.c0){if(t=new xt(e.i,e.g),a=(n=e.i)<100?null:new yf(n),e.ij())for(r=0;r0){for(s=e.g,c=e.i,ZG(e),a=c<100?null:new yf(c),r=0;r>13|(15&e.m)<<9,i=e.m>>4&8191,a=e.m>>17|(255&e.h)<<5,o=(1048320&e.h)>>8,s=8191&t.l,u=t.l>>13|(15&t.m)<<9,c=t.m>>4&8191,l=t.m>>17|(255&t.h)<<5,f=(1048320&t.h)>>8,k=n*s,x=r*s,T=i*s,M=a*s,O=o*s,0!=u&&(x+=n*u,T+=r*u,M+=i*u,O+=a*u),0!=c&&(T+=n*c,M+=r*c,O+=i*c),0!=l&&(M+=n*l,O+=r*l),0!=f&&(O+=n*f),d=(h=k&eHH)+(p=(511&x)<<13),m=k>>22,g=x>>9,b=m+g+(v=(262143&T)<<4)+(y=(31&M)<<17),_=T>>18,w=_+(E=M>>5)+(S=(4095&O)<<8),b+=d>>22,d&=eHH,w+=b>>22,Mk(d,b&=eHH,w&=eH$)}function eRA(e){var t,n,r,i,a,o,s;if(0!=(s=Pp(RJ(e.j,0),11)).g.c.length&&0!=s.e.c.length)throw p7(new gC("Interactive layout does not support NORTH/SOUTH ports with incoming _and_ outgoing edges."));if(0!=s.g.c.length){for(a=eHQ,n=new fz(s.g);n.a4){if(!e.wj(t))return!1;if(e.rk()){if(u=(r=(i=Pp(t,49)).Ug())==e.e&&(e.Dk()?i.Og(i.Vg(),e.zk())==e.Ak():-1-i.Vg()==e.aj()),e.Ek()&&!u&&!r&&i.Zg()){for(a=0;a0&&(c=e.n.a/a);break;case 2:case 4:(i=e.i.o.b)>0&&(c=e.n.b/i)}eo3(e,(eBU(),tnv),c)}if(u=e.o,o=e.a,r)o.a=r.a,o.b=r.b,e.d=!0;else if(t!=tbc&&t!=tbl&&s!=tbF)switch(s.g){case 1:o.a=u.a/2;break;case 2:o.a=u.a,o.b=u.b/2;break;case 3:o.a=u.a/2,o.b=u.b;break;case 4:o.b=u.b/2}else o.a=u.a/2,o.b=u.b/2}function eRP(e){var t,n,r,i,a,o,s,u,c,l;if(e.ej()){if(l=e.Vi(),u=e.fj(),l>0){if(t=new eiP(e.Gi()),a=(n=l)<100?null:new yf(n),Cf(e,n,t.g),i=1==n?e.Zi(4,etj(t,0),null,0,u):e.Zi(6,t,null,-1,u),e.bj()){for(r=new Ow(t);r.e!=r.i.gc();)a=e.dj(epH(r),a);a?(a.Ei(i),a.Fi()):e.$i(i)}else a?(a.Ei(i),a.Fi()):e.$i(i)}else Cf(e,e.Vi(),e.Wi()),e.$i(e.Zi(6,(Hj(),e2r),null,-1,u))}else if(e.bj()){if((l=e.Vi())>0){for(s=e.Wi(),c=l,Cf(e,l,s),a=c<100?null:new yf(c),r=0;re.d[o.p]&&(n+=qq(e.b,a)*Pp(u.b,19).a,Vw(e.a,ell(a)));for(;!gY(e.a);)eek(e.b,Pp(Yn(e.a),19).a)}return n}function eRF(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b,m;for((f=new TS(Pp(eT8(e,(e_C(),tdB)),8))).a=eB4.Math.max(f.a-n.b-n.c,0),f.b=eB4.Math.max(f.b-n.d-n.a,0),(null==(i=LV(eT8(e,tdN)))||(BJ(i),i<=0))&&(i=1.3),s=new p0,p=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));p.e!=p.i.gc();)h=Pp(epH(p),33),o=new Lp(h),s.c[s.c.length]=o;switch((d=Pp(eT8(e,tdP),311)).g){case 3:m=eDQ(s,t,f.a,f.b,(c=r,BJ(i),c));break;case 1:m=eN0(s,t,f.a,f.b,(l=r,BJ(i),l));break;default:m=eRH(s,t,f.a,f.b,(u=r,BJ(i),u))}a=new etD(m),b=eY9(a,t,n,f.a,f.b,r,(BJ(i),i)),eYx(e,b.a,b.b,!1,!0)}function eRY(e,t){var n,r,i,a;n=t.b,a=new I4(n.j),i=0,(r=n.j).c=Je(e1R,eUp,1,0,5,1),Y$(Pp(eay(e.b,(eYu(),tbw),(erX(),tep)),15),n),i=emQ(a,i,new r3,r),Y$(Pp(eay(e.b,tbw,teh),15),n),i=emQ(a,i,new r2,r),Y$(Pp(eay(e.b,tbw,ted),15),n),Y$(Pp(eay(e.b,tby,tep),15),n),Y$(Pp(eay(e.b,tby,teh),15),n),i=emQ(a,i,new r4,r),Y$(Pp(eay(e.b,tby,ted),15),n),Y$(Pp(eay(e.b,tbj,tep),15),n),i=emQ(a,i,new r5,r),Y$(Pp(eay(e.b,tbj,teh),15),n),i=emQ(a,i,new r6,r),Y$(Pp(eay(e.b,tbj,ted),15),n),Y$(Pp(eay(e.b,tbY,tep),15),n),i=emQ(a,i,new ic,r),Y$(Pp(eay(e.b,tbY,teh),15),n),Y$(Pp(eay(e.b,tbY,ted),15),n)}function eRB(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b;for(ewG(t,"Layer size calculation",1),l=eHQ,c=eH1,i=!1,s=new fz(e.b);s.a.5?g-=2*o*(p-.5):p<.5&&(g+=2*a*(.5-p)),g<(i=s.d.b)&&(g=i),b=s.d.c,g>m.a-b-l&&(g=m.a-b-l),s.n.a=t+g}}function eRH(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p,b,m;for(s=Je(tyx,eH5,25,e.c.length,15,1),d=new Fz(new oB),egV(d,e),c=0,b=new p0;0!=d.b.c.length;)if(o=Pp(0==d.b.c.length?null:RJ(d.b,0),157),c>1&&jl(o)*jc(o)/2>s[0]){for(a=0;as[a];)++a;p=new Gz(b,0,a+1),f=new etD(p),l=jl(o)/jc(o),u=eY9(f,t,new mp,n,r,i,l),C5(xB(f.e),u),Ja(e_s(d,f)),egV(d,h=new Gz(b,a+1,b.c.length)),b.c=Je(e1R,eUp,1,0,5,1),c=0,jA(s,s.length,0)}else null!=(m=0==d.b.c.length?null:RJ(d.b,0))&&erD(d,0),c>0&&(s[c]=s[c-1]),s[c]+=jl(o)*jc(o),++c,b.c[b.c.length]=o;return b}function eR$(e){var t,n,r,i,a;if((r=Pp(e_k(e,(eBy(),taY)),163))==(ef_(),tnN)){for(n=new Fa(OH(efu(e).a.Kc(),new c));eTk(n);)if(t=Pp(ZC(n),17),!ZI(t))throw p7(new gq(eWr+egs(e)+"' has its layer constraint set to FIRST_SEPARATE, but has at least one incoming edge. FIRST_SEPARATE nodes must not have incoming edges."))}else if(r==tnR){for(a=new Fa(OH(efc(e).a.Kc(),new c));eTk(a);)if(i=Pp(ZC(a),17),!ZI(i))throw p7(new gq(eWr+egs(e)+"' has its layer constraint set to LAST_SEPARATE, but has at least one outgoing edge. LAST_SEPARATE nodes must not have outgoing edges."))}}function eRz(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p;for(ewG(t,"Label dummy removal",1),r=gP(LV(e_k(e,(eBy(),toL)))),i=gP(LV(e_k(e,toN))),c=Pp(e_k(e,tal),103),u=new fz(e.b);u.a0&&eE9(e,s,f);for(i=new fz(f);i.a>19!=0&&(t=eoQ(t),u=!u),o=eOy(t),a=!1,i=!1,r=!1,e.h==eHz&&0==e.m&&0==e.l){if(i=!0,a=!0,-1!=o)return s=eTC(e,o),u&&esh(s),n&&(e0A=Mk(0,0,0)),s;e=Tr((Q2(),e0L)),r=!0,u=!u}else e.h>>19!=0&&(a=!0,e=eoQ(e),r=!0,u=!u);return -1!=o?esk(e,o,u,a,n):0>evy(e,t)?(n&&(e0A=a?eoQ(e):Mk(e.l,e.m,e.h)),Mk(0,0,0)):eDr(r?e:Mk(e.l,e.m,e.h),t,u,a,i,n)}function eRq(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p;if(e.e&&e.c.ct.f)&&!(t.g>e.f)){for(n=0,r=0,o=e.w.a.ec().Kc();o.Ob();)i=Pp(o.Pb(),11),euz(esp(eow(vx(e50,1),eUP,8,0,[i.i.n,i.n,i.a])).b,t.g,t.f)&&++n;for(s=e.r.a.ec().Kc();s.Ob();)i=Pp(s.Pb(),11),euz(esp(eow(vx(e50,1),eUP,8,0,[i.i.n,i.n,i.a])).b,t.g,t.f)&&--n;for(u=t.w.a.ec().Kc();u.Ob();)i=Pp(u.Pb(),11),euz(esp(eow(vx(e50,1),eUP,8,0,[i.i.n,i.n,i.a])).b,e.g,e.f)&&++r;for(a=t.r.a.ec().Kc();a.Ob();)i=Pp(a.Pb(),11),euz(esp(eow(vx(e50,1),eUP,8,0,[i.i.n,i.n,i.a])).b,e.g,e.f)&&--r;n=0)return i=efd(e,t.substr(1,o-1)),eYF(e,l=t.substr(o+1,u-(o+1)),i)}else{if(n=-1,null==e0F&&(e0F=RegExp("\\d")),e0F.test(String.fromCharCode(s))&&(n=IO(t,e_n(46),u-1))>=0){r=Pp(ZN(e,etm(e,t.substr(1,n-1)),!1),58),c=0;try{c=eDa(t.substr(n+1),eHt,eUu)}catch(d){if(d=eoa(d),M4(d,127))throw a=d,p7(new QH(a));throw p7(d)}if(c=0)return n;switch(Ur(QZ(e,n))){case 2:if(IE("",ecG(e,n.Hj()).ne())){if(u=U$(QZ(e,n)),s=UH(QZ(e,n)),l=eMv(e,t,u,s))return l;for(o=0,f=(i=eIx(e,t)).gc();o1)throw p7(new gL(eQ$));for(o=0,l=eAY(e.e.Tg(),t),r=Pp(e.g,119);o1,c=new Z4(d.b);My(c.a)||My(c.b);)f=(u=Pp(My(c.a)?Wx(c.a):Wx(c.b),17)).c==d?u.d:u.c,eB4.Math.abs(esp(eow(vx(e50,1),eUP,8,0,[f.i.n,f.n,f.a])).b-o.b)>1&&eAZ(e,u,o,a,d)}}function eR8(e){var t,n,r,i,a,o;if(i=new KB(e.e,0),r=new KB(e.a,0),e.d)for(n=0;neVW;){for(a=t,o=0;eB4.Math.abs(t-a)0),i.a.Xb(i.c=--i.b),eNy(e,e.b-o,a,r,i),A6(i.b0),r.a.Xb(r.c=--r.b)}if(!e.d)for(n=0;n0?(e.f[l.p]=h/(l.e.c.length+l.g.c.length),e.c=eB4.Math.min(e.c,e.f[l.p]),e.b=eB4.Math.max(e.b,e.f[l.p])):s&&(e.f[l.p]=h)}}function ejt(e){e.b=null,e.bb=null,e.fb=null,e.qb=null,e.a=null,e.c=null,e.d=null,e.e=null,e.f=null,e.n=null,e.M=null,e.L=null,e.Q=null,e.R=null,e.K=null,e.db=null,e.eb=null,e.g=null,e.i=null,e.j=null,e.k=null,e.gb=null,e.o=null,e.p=null,e.q=null,e.r=null,e.$=null,e.ib=null,e.S=null,e.T=null,e.t=null,e.s=null,e.u=null,e.v=null,e.w=null,e.B=null,e.A=null,e.C=null,e.D=null,e.F=null,e.G=null,e.H=null,e.I=null,e.J=null,e.P=null,e.Z=null,e.U=null,e.V=null,e.W=null,e.X=null,e.Y=null,e._=null,e.ab=null,e.cb=null,e.hb=null,e.nb=null,e.lb=null,e.mb=null,e.ob=null,e.pb=null,e.jb=null,e.kb=null,e.N=!1,e.O=!1}function ejn(e,t,n){var r,i,a,o;for(ewG(n,"Graph transformation ("+e.a+")",1),o=WC(t.a),a=new fz(t.b);a.a0&&(e.a=u+(p-1)*a,t.c.b+=e.a,t.f.b+=e.a),0!=b.a.gc()&&(p=ejF(h=new YJ(1,a),t,b,m,t.f.b+u-t.c.b))>0&&(t.f.b+=u+(p-1)*a)}function eji(e,t){var n,r,i,a;a=e.F,null==t?(e.F=null,euc(e,null)):(e.F=(BJ(t),t),-1!=(r=x7(t,e_n(60)))?(i=t.substr(0,r),-1!=x7(t,e_n(46))||IE(i,eUi)||IE(i,eJZ)||IE(i,eJX)||IE(i,eJJ)||IE(i,eJQ)||IE(i,eJ1)||IE(i,eJ0)||IE(i,eJ2)||(i=eJ3),-1!=(n=O8(t,e_n(62)))&&(i+=""+t.substr(n+1)),euc(e,i)):(i=t,-1==x7(t,e_n(46))&&(-1!=(r=x7(t,e_n(91)))&&(i=t.substr(0,r)),IE(i,eUi)||IE(i,eJZ)||IE(i,eJX)||IE(i,eJJ)||IE(i,eJQ)||IE(i,eJ1)||IE(i,eJ0)||IE(i,eJ2)?i=t:(i=eJ3,-1!=r&&(i+=""+t.substr(r)))),euc(e,i),i==t&&(e.F=e.D))),(4&e.Db)!=0&&(1&e.Db)==0&&eam(e,new FX(e,1,5,a,t))}function eja(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y;if(!((b=t.b.c.length)<3)){for(h=Je(ty_,eHT,25,b,15,1),f=0,l=new fz(t.b);l.ao)&&Yf(e.b,Pp(m.b,17));++s}a=o}}}function ejo(e,t){var n;if(null==t||IE(t,eUg)||0==t.length&&e.k!=(eSd(),tdy))return null;switch(e.k.g){case 1:return ehZ(t,eq6)?(OQ(),e0P):ehZ(t,eq9)?(OQ(),e0N):null;case 2:try{return ell(eDa(t,eHt,eUu))}catch(r){if(r=eoa(r),M4(r,127))return null;throw p7(r)}case 4:try{return eEu(t)}catch(i){if(i=eoa(i),M4(i,127))return null;throw p7(i)}case 3:return t;case 5:return euC(e),exs(e,t);case 6:return euC(e),eMj(e,e.a,t);case 7:try{return(n=eTh(e)).Jf(t),n}catch(a){if(a=eoa(a),M4(a,32))return null;throw p7(a)}default:throw p7(new gC("Invalid type set for this layout option."))}}function ejs(e){var t,n,r,i,a,o,s;for(eeP(),s=new b6,n=new fz(e);n.a=s.b.c)&&(s.b=t),(!s.c||t.c<=s.c.c)&&(s.d=s.c,s.c=t),(!s.e||t.d>=s.e.d)&&(s.e=t),(!s.f||t.d<=s.f.d)&&(s.f=t);return r=new epG((eok(),e8f)),Kv(e,e8y,new g$(eow(vx(e4M,1),eUp,369,0,[r]))),o=new epG(e8p),Kv(e,e8v,new g$(eow(vx(e4M,1),eUp,369,0,[o]))),i=new epG(e8d),Kv(e,e8g,new g$(eow(vx(e4M,1),eUp,369,0,[i]))),a=new epG(e8h),Kv(e,e8m,new g$(eow(vx(e4M,1),eUp,369,0,[a]))),eOk(r.c,e8f),eOk(i.c,e8d),eOk(a.c,e8h),eOk(o.c,e8p),s.a.c=Je(e1R,eUp,1,0,5,1),eoc(s.a,r.c),eoc(s.a,eaa(i.c)),eoc(s.a,a.c),eoc(s.a,eaa(o.c)),s}function eju(e){var t;switch(e.d){case 1:if(e.hj())return -2!=e.o;break;case 2:if(e.hj())return -2==e.o;break;case 3:case 5:case 4:case 6:case 7:return e.o>-2;default:return!1}switch(t=e.gj(),e.p){case 0:return null!=t&&gN(LK(t))!=xg(e.k,0);case 1:return null!=t&&Pp(t,217).a!=jE(e.k)<<24>>24;case 2:return null!=t&&Pp(t,172).a!=(jE(e.k)&eHd);case 6:return null!=t&&xg(Pp(t,162).a,e.k);case 5:return null!=t&&Pp(t,19).a!=jE(e.k);case 7:return null!=t&&Pp(t,184).a!=jE(e.k)<<16>>16;case 3:return null!=t&&gP(LV(t))!=e.j;case 4:return null!=t&&Pp(t,155).a!=e.j;default:return null==t?null!=e.n:!ecX(t,e.n)}}function ejc(e,t,n){var r,i,a,o;return e.Fk()&&e.Ek()&&(o=FU(e,Pp(n,56)),xc(o)!==xc(n))?(e.Oi(t),e.Ui(t,J6(e,t,o)),e.rk()&&(a=(i=Pp(n,49),e.Dk()?e.Bk()?i.ih(e.b,ebY(Pp(ee2($S(e.b),e.aj()),18)).n,Pp(ee2($S(e.b),e.aj()).Yj(),26).Bj(),null):i.ih(e.b,edv(i.Tg(),ebY(Pp(ee2($S(e.b),e.aj()),18))),null,null):i.ih(e.b,-1-e.aj(),null,null)),Pp(o,49).eh()||(a=(r=Pp(o,49),e.Dk()?e.Bk()?r.gh(e.b,ebY(Pp(ee2($S(e.b),e.aj()),18)).n,Pp(ee2($S(e.b),e.aj()).Yj(),26).Bj(),a):r.gh(e.b,edv(r.Tg(),ebY(Pp(ee2($S(e.b),e.aj()),18))),null,a):r.gh(e.b,-1-e.aj(),null,a))),a&&a.Fi()),TO(e.b)&&e.$i(e.Zi(9,n,o,t,!1)),o):n}function ejl(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;for(l=gP(LV(e_k(e,(eBy(),toC)))),r=gP(LV(e_k(e,toG))),eo3(d=new oG,toC,l+r),g=(c=t).d,b=c.c.i,v=c.d.i,m=Tl(b.c),y=Tl(v.c),i=new p0,f=m;f<=y;f++)s=new eb$(e),lK(s,(eEn(),e8D)),eo3(s,(eBU(),tnc),c),eo3(s,tol,(ewf(),tbo)),eo3(s,toD,d),h=Pp(RJ(e.b,f),29),f==m?egU(s,h.a.c.length-n,h):Gu(s,h),(w=gP(LV(e_k(c,tak))))<0&&eo3(c,tak,w=0),s.o.b=w,p=eB4.Math.floor(w/2),o=new eES,ekv(o,(eYu(),tbY)),Gc(o,s),o.n.b=p,u=new eES,ekv(u,tby),Gc(u,s),u.n.b=p,Go(c,o),a=new $b,eaW(a,c),eo3(a,taR,null),Gs(a,u),Go(a,g),evT(s,c,a),i.c[i.c.length]=a,c=a;return i}function ejf(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y;for(u=Pp(eEC(e,(eYu(),tbY)).Kc().Pb(),11).e,h=Pp(eEC(e,tby).Kc().Pb(),11).g,s=u.c.length,y=GX(Pp(RJ(e.j,0),11));s-- >0;){for(b=(GK(0,u.c.length),Pp(u.c[0],17)),a=QI(v=(i=(GK(0,h.c.length),Pp(h.c[0],17))).d.e,i,0),KW(b,i.d,a),Gs(i,null),Go(i,null),p=b.a,t&&P7(p,new TS(y)),r=epL(i.a,0);r.b!=r.d.c;)n=Pp(Vv(r),8),P7(p,new TS(n));for(g=b.b,d=new fz(i.b);d.a0&&(o=eB4.Math.max(o,eix(e.C.b+r.d.b,i))),l=r,f=i,d=a;e.C&&e.C.c>0&&(h=d+e.C.c,c&&(h+=l.d.c),o=eB4.Math.max(o,(Mc(),enj(ezs),eB4.Math.abs(f-1)<=ezs||1==f||isNaN(f)&&isNaN(1)?0:h/(1-f)))),n.n.b=0,n.a.a=o}function ejh(e,t){var n,r,i,a,o,s,u,c,l,f,d,h;if(n=Pp(UA(e.b,t),124),(u=Pp(Pp(Zq(e.r,t),21),84)).dc()){n.n.d=0,n.n.a=0;return}for(c=e.u.Hc((ekU(),tbp)),o=0,e.A.Hc((ed6(),tbq))&&eCN(e,t),s=u.Kc(),l=null,d=0,f=0;s.Ob();)a=gP(LV((r=Pp(s.Pb(),111)).b.We((Ab(),e4a)))),i=r.b.rf().b,l?(h=f+l.d.a+e.w+r.d.d,o=eB4.Math.max(o,(Mc(),enj(ezs),eB4.Math.abs(d-a)<=ezs||d==a||isNaN(d)&&isNaN(a)?0:h/(a-d)))):e.C&&e.C.d>0&&(o=eB4.Math.max(o,eix(e.C.d+r.d.d,a))),l=r,d=a,f=i;e.C&&e.C.a>0&&(h=f+e.C.a,c&&(h+=l.d.a),o=eB4.Math.max(o,(Mc(),enj(ezs),eB4.Math.abs(d-1)<=ezs||1==d||isNaN(d)&&isNaN(1)?0:h/(1-d)))),n.n.d=0,n.a.b=o}function ejp(e,t,n){var r,i,a,o,s,u;for(o=0,this.g=e,s=t.d.length,u=n.d.length,this.d=Je(e4N,eGW,10,s+u,0,1);o0?etU(this,this.f/this.a):null!=Ot(t.g,t.d[0]).a&&null!=Ot(n.g,n.d[0]).a?etU(this,(gP(Ot(t.g,t.d[0]).a)+gP(Ot(n.g,n.d[0]).a))/2):null!=Ot(t.g,t.d[0]).a?etU(this,Ot(t.g,t.d[0]).a):null!=Ot(n.g,n.d[0]).a&&etU(this,Ot(n.g,n.d[0]).a)}function ejb(e,t){var n,r,i,a,o,s,u,c,l,f;for(e.a=new Bv(eiG(e55)),r=new fz(t.a);r.a=1&&(m-o>0&&f>=0?(u.n.a+=b,u.n.b+=a*o):m-o<0&&l>=0&&(u.n.a+=b*m,u.n.b+=a));e.o.a=t.a,e.o.b=t.b,eo3(e,(eBy(),ta4),(ed6(),r=Pp(yw(e6o),9),new I1(r,Pp(CY(r,r.length),9),0)))}function ej_(e,t,n,r,i,a){var o;if(!(null==t||!efz(t,tmJ,tmQ)))throw p7(new gL("invalid scheme: "+t));if(!e&&!(null!=n&&-1==x7(n,e_n(35))&&n.length>0&&(GV(0,n.length),47!=n.charCodeAt(0))))throw p7(new gL("invalid opaquePart: "+n));if(e&&!(null!=t&&wZ(tm$,t.toLowerCase()))&&!(null==n||!efz(n,tm1,tm0))||e&&null!=t&&wZ(tm$,t.toLowerCase())&&!eyQ(n))throw p7(new gL(eJI+n));if(!ef$(r))throw p7(new gL("invalid device: "+r));if(!ece(i))throw o=null==i?"invalid segments: null":"invalid segment: "+euR(i),p7(new gL(o));if(!(null==a||-1==x7(a,e_n(35))))throw p7(new gL("invalid query: "+a))}function ejE(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g;for(ewG(t,"Calculate Graph Size",1),t.n&&e&&WG(t,KS(e),(eup(),tmr)),s=ezq,u=ezq,a=eqe,o=eqe,f=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));f.e!=f.i.gc();)p=(c=Pp(epH(f),33)).i,b=c.j,g=c.g,r=c.f,i=Pp(eT8(c,(eBB(),thy)),142),s=eB4.Math.min(s,p-i.b),u=eB4.Math.min(u,b-i.d),a=eB4.Math.max(a,p+g+i.c),o=eB4.Math.max(o,b+r+i.a);for(h=Pp(eT8(e,(eBB(),thN)),116),d=new kl(s-h.b,u-h.d),l=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));l.e!=l.i.gc();)c=Pp(epH(l),33),eno(c,c.i-d.a),ens(c,c.j-d.b);m=a-s+(h.b+h.c),n=o-u+(h.d+h.a),ena(e,m),eni(e,n),t.n&&e&&WG(t,KS(e),(eup(),tmr))}function ejS(e){var t,n,r,i,a,o,s,u,c,l;for(r=new p0,o=new fz(e.e.a);o.a0){epV(e,n,0),n.a+=String.fromCharCode(r),epV(e,n,i=ehR(t,a)),a+=i-1;continue}39==r?a+11)for(b=Je(ty_,eHT,25,e.b.b.c.length,15,1),f=0,c=new fz(e.b.b);c.a=s&&i<=u)s<=i&&a<=u?(n[l++]=i,n[l++]=a,r+=2):s<=i?(n[l++]=i,n[l++]=u,e.b[r]=u+1,o+=2):a<=u?(n[l++]=s,n[l++]=a,r+=2):(n[l++]=s,n[l++]=u,e.b[r]=u+1);else if(ueHe)&&s<10)vR(e.c,new tf),ejM(e),Ym(e.c),ejv(e.f)}function ejL(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m;if(gN(LK(e_k(n,(eBy(),taI)))))for(s=new fz(n.j);s.a=2){for(o=Pp(Vv(u=epL(n,0)),8),s=Pp(Vv(u),8);s.a0&&eoY(l,!0,(ec3(),tpg)),s.k==(eEn(),e8C)&&UP(l),Um(e.f,s,t)}}function ejN(e,t,n){var r,i,a,o,s,u,c,l,f,d;switch(ewG(n,"Node promotion heuristic",1),e.g=t,eYs(e),e.q=Pp(e_k(t,(eBy(),taz)),260),l=Pp(e_k(e.g,ta$),19).a,a=new nH,e.q.g){case 2:case 1:default:eRn(e,a);break;case 3:for(e.q=(eOJ(),tsk),eRn(e,a),u=0,s=new fz(e.a);s.ae.j&&(e.q=tsv,eRn(e,a));break;case 4:for(e.q=(eOJ(),tsk),eRn(e,a),c=0,i=new fz(e.b);i.ae.k&&(e.q=ts_,eRn(e,a));break;case 6:d=zy(eB4.Math.ceil(e.f.length*l/100)),eRn(e,new dq(d));break;case 5:f=zy(eB4.Math.ceil(e.d*l/100)),eRn(e,new dZ(f))}eLC(e,t),eEj(n)}function ejP(e,t,n){var r,i,a,o;this.j=e,this.e=ewi(e),this.o=this.j.e,this.i=!!this.o,this.p=this.i?Pp(RJ(n,Bq(this.o).p),214):null,i=Pp(e_k(e,(eBU(),tt3)),21),this.g=i.Hc((eLR(),ttw)),this.b=new p0,this.d=new ed0(this.e),o=Pp(e_k(this.j,tnw),230),this.q=eaG(t,o,this.e),this.k=new zX(this),a=ZW(eow(vx(e4H,1),eUp,225,0,[this,this.d,this.k,this.q])),t!=(enU(),tur)||gN(LK(e_k(e,(eBy(),ti7))))?t==tur&&gN(LK(e_k(e,(eBy(),ti7))))?(r=new ews(this.e),a.c[a.c.length]=r,this.c=new erB(r,o,Pp(this.q,402))):this.c=new Sr(t,this):(r=new ews(this.e),a.c[a.c.length]=r,this.c=new K5(r,o,Pp(this.q,402))),P_(a,this.c),eP0(a,this.e),this.s=eY0(this.k)}function ejR(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;for(p=(f=Pp(M2((o=epL(new hz(t).a.d,0),new hG(o))),86))?Pp(e_k(f,(eR6(),tco)),86):null,i=1;f&&p;){for(s=0,u=0,w=0,n=f,r=p;s=e.i?(++e.i,P_(e.a,ell(1)),P_(e.b,f)):(r=e.c[t.p][1],q1(e.a,l,ell(Pp(RJ(e.a,l),19).a+1-r)),q1(e.b,l,gP(LV(RJ(e.b,l)))+f-r*e.e)),(e.q==(eOJ(),tsv)&&(Pp(RJ(e.a,l),19).a>e.j||Pp(RJ(e.a,l-1),19).a>e.j)||e.q==ts_&&(gP(LV(RJ(e.b,l)))>e.k||gP(LV(RJ(e.b,l-1)))>e.k))&&(u=!1),o=new Fa(OH(efu(t).a.Kc(),new c));eTk(o);)s=(a=Pp(ZC(o),17)).c.i,e.f[s.p]==l&&(d=ejj(e,s),i+=Pp(d.a,19).a,u=u&&gN(LK(d.b)));return e.f[t.p]=l,i+=e.c[t.p][0],new kD(ell(i),(OQ(),!!u))}function ejF(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p,b,m,g;for(f=new p2,o=new p0,ekD(e,n,e.d.fg(),o,f),ekD(e,r,e.d.gg(),o,f),e.b=.2*(b=eTZ(eeh(new R1(null,new Gq(o,16)),new aL)),m=eTZ(eeh(new R1(null,new Gq(o,16)),new aC)),eB4.Math.min(b,m)),a=0,s=0;s=2&&(g=eOY(o,!0,d),e.e||(e.e=new h$(e)),ehB(e.e,g,o,e.b)),ewv(o,d),eFn(o),h=-1,l=new fz(o);l.as)}function ejU(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b;for(n=Pp(e_k(e,(eBy(),tol)),98),o=e.f,a=e.d,s=o.a+a.b+a.c,u=0-a.d-e.c.b,l=o.b+a.d+a.a-e.c.b,c=new p0,f=new p0,i=new fz(t);i.a0),Pp(l.a.Xb(l.c=--l.b),17));a!=r&&l.b>0;)e.a[a.p]=!0,e.a[r.p]=!0,a=(A6(l.b>0),Pp(l.a.Xb(l.c=--l.b),17));l.b>0&&BH(l)}}function ejZ(e,t,n){var r,i,a,o,s,u,c,l,f;if(e.a!=t.Aj())throw p7(new gL(eZ5+t.ne()+eZ6));if(r=ecG((eSp(),tvc),t).$k())return r.Aj().Nh().Ih(r,n);if(o=ecG(tvc,t).al()){if(null==n)return null;if((s=Pp(n,15)).dc())return"";for(f=new vs,a=s.Kc();a.Ob();)i=a.Pb(),xk(f,o.Aj().Nh().Ih(o,i)),f.a+=" ";return x3(f,f.a.length-1)}if(!(l=ecG(tvc,t).bl()).dc()){for(c=l.Kc();c.Ob();)if((u=Pp(c.Pb(),148)).wj(n))try{if(f=u.Aj().Nh().Ih(u,n),null!=f)return f}catch(d){if(d=eoa(d),!M4(d,102))throw p7(d)}throw p7(new gL("Invalid value: '"+n+"' for datatype :"+t.ne()))}return Pp(t,834).Fj(),null==n?null:M4(n,172)?""+Pp(n,172).a:esF(n)==e1Q?MU(tmS[0],Pp(n,199)):efF(n)}function ejX(e){var t,n,r,i,a,o,s,u,c,l;for(c=new _n,s=new _n,a=new fz(e);a.a-1){for(i=epL(s,0);i.b!=i.d.c;)(r=Pp(Vv(i),128)).v=o;for(;0!=s.b;)for(r=Pp(egW(s,0),128),n=new fz(r.i);n.a0&&(n+=u.n.a+u.o.a/2,++f),p=new fz(u.j);p.a0&&(n/=f),g=Je(tyx,eH5,25,r.a.c.length,15,1),s=0,c=new fz(r.a);c.a=s&&i<=u)s<=i&&a<=u?r+=2:s<=i?(e.b[r]=u+1,o+=2):a<=u?(n[l++]=i,n[l++]=s-1,r+=2):(n[l++]=i,n[l++]=s-1,e.b[r]=u+1,o+=2);else if(u0?i-=864e5:i+=864e5,u=new LZ(eft(eap(t.q.getTime()),i))),l=new vl,c=e.a.length,a=0;a=97&&r<=122||r>=65&&r<=90){for(o=a+1;o=c)throw p7(new gL("Missing trailing '"));o+10&&0==n.c&&(t||(t=new p0),t.c[t.c.length]=n);if(t)for(;0!=t.c.length;){if((n=Pp(ZV(t,0),233)).b&&n.b.c.length>0){for(a=(n.b||(n.b=new p0),new fz(n.b));a.aQI(e,n,0))return new kD(i,n)}else if(gP(Ot(i.g,i.d[0]).a)>gP(Ot(n.g,n.d[0]).a))return new kD(i,n)}for(s=(n.e||(n.e=new p0),n.e).Kc();s.Ob();)u=((o=Pp(s.Pb(),233)).b||(o.b=new p0),o.b),Gp(0,u.c.length),Ew(u.c,0,n),o.c==u.c.length&&(t.c[t.c.length]=o)}return null}function eFe(e,t){var n,r,i,a,o,s,u,c,l;if(null==e)return eUg;if(null!=(u=t.a.zc(e,t)))return"[...]";for(a=0,n=new eaP(eUd,"[","]"),o=(i=e).length;a=14&&l<=16)?t.a._b(r)?(n.a?xM(n.a,n.b):n.a=new O0(n.d),xx(n.a,"[...]")):ZJ(n,eFe(s=etG(r),c=new Rq(t))):M4(r,177)?ZJ(n,ekd(Pp(r,177))):M4(r,190)?ZJ(n,ewh(Pp(r,190))):M4(r,195)?ZJ(n,eEm(Pp(r,195))):M4(r,2012)?ZJ(n,ewp(Pp(r,2012))):M4(r,48)?ZJ(n,ekf(Pp(r,48))):M4(r,364)?ZJ(n,ekG(Pp(r,364))):M4(r,832)?ZJ(n,ekl(Pp(r,832))):M4(r,104)&&ZJ(n,ekc(Pp(r,104))):ZJ(n,null==r?eUg:efF(r));return n.a?0==n.e.length?n.a.a:n.a.a+""+n.e:n.c}function eFt(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y;for(s=eLO(t,!1,!1),g=eEF(s),r&&(g=esP(g)),y=gP(LV(eT8(t,(epz(),e63)))),m=(A6(0!=g.b),Pp(g.a.a.c,8)),f=Pp(ep3(g,1),8),g.b>2?(l=new p0,eoc(l,new Gz(g,1,g.b)),a=eBk(l,y+e.a),v=new eTI(a),eaW(v,t),n.c[n.c.length]=v):v=r?Pp(Bp(e.b,e_I(t)),266):Pp(Bp(e.b,e_P(t)),266),u=e_I(t),r&&(u=e_P(t)),o=eEJ(m,u),c=y+e.a,o.a?(c+=eB4.Math.abs(m.b-f.b),b=new kl(f.a,(f.b+m.b)/2)):(c+=eB4.Math.abs(m.a-f.a),b=new kl((f.a+m.a)/2,f.b)),r?Um(e.d,t,new emL(v,o,b,c)):Um(e.c,t,new emL(v,o,b,c)),Um(e.b,t,v),p=(t.n||(t.n=new FQ(e6S,t,1,7)),t.n),h=new Ow(p);h.e!=h.i.gc();)d=Pp(epH(h),137),i=eIt(e,d,!0,0,0),n.c[n.c.length]=i}function eFn(e){var t,n,r,i,a,o,s,u,c,l;for(c=new p0,s=new p0,o=new fz(e);o.a-1){for(a=new fz(s);a.a0)&&(l3(u,eB4.Math.min(u.o,i.o-1)),l2(u,u.i-1),0==u.i&&(s.c[s.c.length]=u))}}function eFr(e,t,n){var r,i,a,o,s,u,c;if(c=e.c,t||(t=tgK),e.c=t,(4&e.Db)!=0&&(1&e.Db)==0&&(u=new FX(e,1,2,c,e.c),n?n.Ei(u):n=u),c!=t){if(M4(e.Cb,284))e.Db>>16==-10?n=Pp(e.Cb,284).nk(t,n):e.Db>>16==-15&&(t||(t=(eBK(),tgA)),c||(c=(eBK(),tgA)),e.Cb.nh()&&(u=new Q$(e.Cb,1,13,c,t,ebv(QX(Pp(e.Cb,59)),e),!1),n?n.Ei(u):n=u));else if(M4(e.Cb,88))e.Db>>16==-23&&(M4(t,88)||(t=(eBK(),tgI)),M4(c,88)||(c=(eBK(),tgI)),e.Cb.nh()&&(u=new Q$(e.Cb,1,10,c,t,ebv(qt(Pp(e.Cb,26)),e),!1),n?n.Ei(u):n=u));else if(M4(e.Cb,444))for(o=((s=Pp(e.Cb,836)).b||(s.b=new pG(new mR)),s.b),a=(r=new esz(new fS(o.a).a),new pW(r));a.a.b;)n=eFr(i=Pp(etz(a.a).cd(),87),eOl(i,s),n)}return n}function eFi(e,t){var n,r,i,a,o,s,u,c,l,f,d;for(o=gN(LK(eT8(e,(eBy(),taI)))),d=Pp(eT8(e,toh),21),u=!1,c=!1,f=new Ow((e.c||(e.c=new FQ(e6x,e,9,9)),e.c));f.e!=f.i.gc()&&(!u||!c);){for(a=Pp(epH(f),118),s=0,i=Y_(enM(eow(vx(e1B,1),eUp,20,0,[(a.d||(a.d=new Ih(e6g,a,8,5)),a.d),(a.e||(a.e=new Ih(e6g,a,7,4)),a.e)])));eTk(i)&&(r=Pp(ZC(i),79),l=o&&exb(r)&&gN(LK(eT8(r,taD))),n=eRL((r.b||(r.b=new Ih(e6m,r,4,7)),r.b),a)?e==z$(ewH(Pp(etj((r.c||(r.c=new Ih(e6m,r,5,8)),r.c),0),82))):e==z$(ewH(Pp(etj((r.b||(r.b=new Ih(e6m,r,4,7)),r.b),0),82))),!((l||n)&&++s>1)););s>0?u=!0:d.Hc((ekU(),tbp))&&(a.n||(a.n=new FQ(e6S,a,1,7)),a.n).i>0&&(u=!0),s>1&&(c=!0)}u&&t.Fc((eLR(),ttw)),c&&t.Fc((eLR(),tt_))}function eFa(e){var t,n,r,i,a,o,s,u,c,l,f,d;if((d=Pp(eT8(e,(eBB(),thx)),21)).dc())return null;if(s=0,o=0,d.Hc((ed6(),tbV))){for(l=Pp(eT8(e,thV),98),r=2,n=2,i=2,a=2,t=z$(e)?Pp(eT8(z$(e),the),103):Pp(eT8(e,the),103),c=new Ow((e.c||(e.c=new FQ(e6x,e,9,9)),e.c));c.e!=c.i.gc();)if(u=Pp(epH(c),118),(f=Pp(eT8(u,th0),61))==(eYu(),tbF)&&(f=eNh(u,t),ebu(u,th0,f)),l==(ewf(),tbo))switch(f.g){case 1:r=eB4.Math.max(r,u.i+u.g);break;case 2:n=eB4.Math.max(n,u.j+u.f);break;case 3:i=eB4.Math.max(i,u.i+u.g);break;case 4:a=eB4.Math.max(a,u.j+u.f)}else switch(f.g){case 1:r+=u.g+2;break;case 2:n+=u.f+2;break;case 3:i+=u.g+2;break;case 4:a+=u.f+2}s=eB4.Math.max(r,i),o=eB4.Math.max(n,a)}return eYx(e,s,o,!0,!0)}function eFo(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;for(v=Pp(qE(etc(UJ(new R1(null,new Gq(t.d,16)),new hc(n)),new hl(n)),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[(eum(),e2U)]))),15),f=eUu,l=eHt,u=new fz(t.b.j);u.a0)?c&&(d=g.p,o?++d:--d,h=!(eOV(r=eoZ(f=Pp(RJ(g.c.a,d),10)),E,n[0])||FF(r,E,n[0]))):h=!0),p=!1,(_=t.D.i)&&_.c&&s.e&&((l=o&&_.p>0||!o&&_.p<_.c.a.c.length-1)?(d=_.p,o?--d:++d,p=!(eOV(r=eoZ(f=Pp(RJ(_.c.a,d),10)),n[0],k)||FF(r,n[0],k))):p=!0),h&&p&&P7(e.a,S),h||enD(e.a,eow(vx(e50,1),eUP,8,0,[b,m])),p||enD(e.a,eow(vx(e50,1),eUP,8,0,[w,y]))}function eFh(e,t){var n,r,i,a,o,s,u,c;if(M4(e.Ug(),160)?(eFh(Pp(e.Ug(),160),t),t.a+=" > "):t.a+="Root ",IE((n=e.Tg().zb).substr(0,3),"Elk")?xM(t,n.substr(3)):(t.a+=""+n,t),i=e.zg()){xM((t.a+=" ",t),i);return}if(M4(e,354)&&(c=Pp(e,137).a)){xM((t.a+=" ",t),c);return}for(o=new Ow(e.Ag());o.e!=o.i.gc();)if(c=(a=Pp(epH(o),137)).a){xM((t.a+=" ",t),c);return}if(M4(e,352)&&((r=Pp(e,79)).b||(r.b=new Ih(e6m,r,4,7)),0!=r.b.i&&(r.c||(r.c=new Ih(e6m,r,5,8)),0!=r.c.i))){for(t.a+=" (",s=new AF((r.b||(r.b=new Ih(e6m,r,4,7)),r.b));s.e!=s.i.gc();)s.e>0&&(t.a+=eUd),eFh(Pp(epH(s),160),t);for(t.a+=eGH,u=new AF((r.c||(r.c=new Ih(e6m,r,5,8)),r.c));u.e!=u.i.gc();)u.e>0&&(t.a+=eUd),eFh(Pp(epH(u),160),t);t.a+=")"}}function eFp(e,t,n){var r,i,a,o,s,u,c,l,f,d,h;if(a=Pp(e_k(e,(eBU(),tnc)),79)){for(r=e.a,C5(i=new TS(n),eyr(e)),eag(e.d.i,e.c.i)?(d=e.c,f=esp(eow(vx(e50,1),eUP,8,0,[d.n,d.a])),C6(f,n)):f=GX(e.c),qQ(r,f,r.a,r.a.a),h=GX(e.d),null!=e_k(e,tnC)&&C5(h,Pp(e_k(e,tnC),8)),qQ(r,h,r.c.b,r.c),etH(r,i),o=eLO(a,!0,!0),ern(o,Pp(etj((a.b||(a.b=new Ih(e6m,a,4,7)),a.b),0),82)),err(o,Pp(etj((a.c||(a.c=new Ih(e6m,a,5,8)),a.c),0),82)),eNI(r,o),l=new fz(e.b);l.a=0){for(u=null,s=new KB(l.a,c+1);s.bo?1:Te(isNaN(0),isNaN(o)))<0&&(enj(eVU),(eB4.Math.abs(o-1)<=eVU||1==o||isNaN(o)&&isNaN(1)?0:o<1?-1:o>1?1:Te(isNaN(o),isNaN(1)))<0)&&(enj(eVU),(eB4.Math.abs(0-s)<=eVU||0==s||isNaN(0)&&isNaN(s)?0:0s?1:Te(isNaN(0),isNaN(s)))<0)&&(enj(eVU),(eB4.Math.abs(s-1)<=eVU||1==s||isNaN(s)&&isNaN(1)?0:s<1?-1:s>1?1:Te(isNaN(s),isNaN(1)))<0)))}function eFg(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E;for(f=new BU(new fQ(e));f.b!=f.c.a.d;)for(b=0,s=Pp((l=JO(f)).d,56),t=Pp(l.e,56),w=(null==(o=s.Tg()).i&&eNT(o),o.i).length;b=0&&b=c.c.c.length?VJ((eEn(),e8N),e8D):VJ((eEn(),e8D),e8D),l*=2,a=n.a.g,n.a.g=eB4.Math.max(a,a+(l-a)),o=n.b.g,n.b.g=eB4.Math.max(o,o+(l-o)),i=t}}}function eFw(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_;for(_=Pg(e),l=new p0,f=(s=e.c.length)-1,d=s+1;0!=_.a.c;){for(;0!=n.b;)y=(A6(0!=n.b),Pp(etw(n,n.a.a),112)),zS(_.a,y),y.g=f--,eNg(y,t,n,r);for(;0!=t.b;)w=(A6(0!=t.b),Pp(etw(t,t.a.a),112)),zS(_.a,w),w.g=d++,eNg(w,t,n,r);for(c=eHt,g=(o=new C1(new Ap(new fP(_.a).a).b),new fR(o));Et(g.a.a);){if(m=(a=AJ(g.a),Pp(a.cd(),112)),!r&&m.b>0&&m.a<=0){l.c=Je(e1R,eUp,1,0,5,1),l.c[l.c.length]=m;break}(b=m.i-m.d)>=c&&(b>c&&(l.c=Je(e1R,eUp,1,0,5,1),c=b),l.c[l.c.length]=m)}0!=l.c.length&&(u=Pp(RJ(l,ebO(i,l.c.length)),112),zS(_.a,u),u.g=d++,eNg(u,t,n,r),l.c=Je(e1R,eUp,1,0,5,1))}for(v=e.c.length+1,p=new fz(e);p.a0&&(d.d+=l.n.d,d.d+=l.d),d.a>0&&(d.a+=l.n.a,d.a+=l.d),d.b>0&&(d.b+=l.n.b,d.b+=l.d),d.c>0&&(d.c+=l.n.c,d.c+=l.d),d}function eFx(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p;for(d=n.d,f=n.c,o=(a=new kl(n.f.a+n.d.b+n.d.c,n.f.b+n.d.d+n.d.a)).b,c=new fz(e.a);c.a=(l=Pp(Pp(Zq(e.r,t),21),84)).gc()||t==(eYu(),tby)||t==(eYu(),tbY)){eYY(e,t);return}for(b=e.u.Hc((ekU(),tbg)),n=t==(eYu(),tbw)?(eaY(),e4c):(eaY(),e4o),g=t==tbw?(QQ(),e3U):(QQ(),e3$),r=vN(DP(n),e.s),m=t==tbw?eHQ:eH1,c=l.Kc();c.Ob();)(s=Pp(c.Pb(),111)).c&&!(s.c.d.c.length<=0)&&(p=s.b.rf(),h=s.e,(d=(f=s.c).i).b=(a=f.n,f.e.a+a.b+a.c),d.a=(o=f.n,f.e.b+o.d+o.a),b?(d.c=h.a-(i=f.n,f.e.a+i.b+i.c)-e.s,b=!1):d.c=h.a+p.a+e.s,$C(g,ezr),f.f=g,JC(f,(Qs(),e3Y)),P_(r.d,new jH(d,elO(r,d))),m=t==tbw?eB4.Math.min(m,h.b):eB4.Math.max(m,h.b+s.b.rf().b));for(m+=t==tbw?-e.t:e.t,edp((r.e=m,r)),u=l.Kc();u.Ob();)(s=Pp(u.Pb(),111)).c&&!(s.c.d.c.length<=0)&&(d=s.c.i,d.c-=s.e.a,d.d-=s.e.b)}function eFA(e,t,n){var r;if(ewG(n,"StretchWidth layering",1),0==t.a.c.length){eEj(n);return}for(e.c=t,e.t=0,e.u=0,e.i=eHQ,e.g=eH1,e.d=gP(LV(e_k(t,(eBy(),toO)))),ebn(e),eTR(e),eTP(e),eyo(e),ed2(e),e.i=eB4.Math.max(1,e.i),e.g=eB4.Math.max(1,e.g),e.d=e.d/e.i,e.f=e.g/e.i,e.s=ebZ(e),r=new By(e.c),P_(e.c.b,r),e.r=WC(e.p),e.n=zb(e.k,e.k.length);0!=e.r.c.length;)e.o=ecu(e),!e.o||ess(e)&&0!=e.b.a.gc()?(ey6(e,r),r=new By(e.c),P_(e.c.b,r),er7(e.a,e.b),e.b.a.$b(),e.t=e.u,e.u=0):ess(e)?(e.c.b.c=Je(e1R,eUp,1,0,5,1),r=new By(e.c),P_(e.c.b,r),e.t=0,e.u=0,e.b.a.$b(),e.a.a.$b(),++e.f,e.r=WC(e.p),e.n=zb(e.k,e.k.length)):(Gu(e.o,r),QA(e.r,e.o),Yf(e.b,e.o),e.t=e.t-e.k[e.o.p]*e.d+e.j[e.o.p],e.u+=e.e[e.o.p]*e.d);t.a.c=Je(e1R,eUp,1,0,5,1),eSj(t.b),eEj(n)}function eFL(e){var t,n,r,i;for(_r(UJ(new R1(null,new Gq(e.a.b,16)),new rH),new r$),eyR(e),_r(UJ(new R1(null,new Gq(e.a.b,16)),new rz),new rG),e.c==(efE(),tpM)&&(_r(UJ(eeh(new R1(null,new Gq(new fk(e.f),1)),new rW),new rK),new hn(e)),_r(UJ(UQ(eeh(eeh(new R1(null,new Gq(e.d.b,16)),new rV),new rq),new rZ),new rX),new hi(e))),i=new kl(eHQ,eHQ),t=new kl(eH1,eH1),r=new fz(e.a.b);r.a0&&(e.c[t.c.p][t.p].d+=eMU(e.i,24)*e$h*.07000000029802322-.03500000014901161,e.c[t.c.p][t.p].a=e.c[t.c.p][t.p].d/e.c[t.c.p][t.p].b)}}function eFD(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m;for(p=new fz(e);p.ar.d,r.d=eB4.Math.max(r.d,t),s&&n&&(r.d=eB4.Math.max(r.d,r.a),r.a=r.d+i);break;case 3:n=t>r.a,r.a=eB4.Math.max(r.a,t),s&&n&&(r.a=eB4.Math.max(r.a,r.d),r.d=r.a+i);break;case 2:n=t>r.c,r.c=eB4.Math.max(r.c,t),s&&n&&(r.c=eB4.Math.max(r.b,r.c),r.b=r.c+i);break;case 4:n=t>r.b,r.b=eB4.Math.max(r.b,t),s&&n&&(r.b=eB4.Math.max(r.b,r.c),r.c=r.b+i)}}}function eFj(e){var t,n,r,i,a,o,s,u,c,l,f;for(c=new fz(e);c.a0||l.j==tbY&&l.e.c.length-l.g.c.length<0)){t=!1;break}for(i=new fz(l.g);i.a=c&&_>=m&&(d+=p.n.b+b.n.b+b.a.b-w,++s));if(n)for(o=new fz(v.e);o.a=c&&_>=m&&(d+=p.n.b+b.n.b+b.a.b-w,++s))}s>0&&(E+=d/s,++h)}h>0?(t.a=i*E/h,t.g=h):(t.a=0,t.g=0)}function eFY(e,t){var n,r,i,a,o,s,u,c,l,f,d;for(i=new fz(e.a.b);i.aeH1||t.o==tuE&&l0&&eno(g,w*E),_>0&&ens(g,_*S);for(ear(e.b,new te),t=new p0,s=new esz(new fS(e.c).a);s.b;)o=etz(s),r=Pp(o.cd(),79),n=Pp(o.dd(),395).a,i=eLO(r,!1,!1),f=ewM(e_I(r),eEF(i),n),eNI(f,i),(y=e_D(r))&&-1==QI(t,y,0)&&(t.c[t.c.length]=y,Hw(y,(A6(0!=f.b),Pp(f.a.a.c,8)),n));for(m=new esz(new fS(e.d).a);m.b;)b=etz(m),r=Pp(b.cd(),79),n=Pp(b.dd(),395).a,i=eLO(r,!1,!1),f=ewM(e_P(r),esP(eEF(i)),n),eNI(f=esP(f),i),(y=e_N(r))&&-1==QI(t,y,0)&&(t.c[t.c.length]=y,Hw(y,(A6(0!=f.b),Pp(f.c.b.c,8)),n))}function eFz(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k;if(0!=n.c.length){for(p=new p0,h=new fz(n);h.aeB4.Math.abs(v-m))continue;v1)for(h=new eRM(p,y,r),qX(y,new SV(e,h)),o.c[o.c.length]=h,f=y.a.ec().Kc();f.Ob();)QA(a,(l=Pp(f.Pb(),46)).b);if(s.a.gc()>1)for(h=new eRM(p,s,r),qX(s,new Sq(e,h)),o.c[o.c.length]=h,f=s.a.ec().Kc();f.Ob();)QA(a,(l=Pp(f.Pb(),46)).b)}}function eFJ(e){_Y(e,new ewB(vZ(vQ(vq(vJ(vX(new oc,eqp),"ELK Radial"),'A radial layout provider which is based on the algorithm of Peter Eades published in "Drawing free trees.", published by International Institute for Advanced Study of Social Information Science, Fujitsu Limited in 1991. The radial layouter takes a tree and places the nodes in radial order around the root. The nodes of the same tree level are placed on the same radius.'),new aW),eqp))),KE(e,eqp,eVT,epB(tlw)),KE(e,eqp,eGi,epB(tlS)),KE(e,eqp,eGh,epB(tlh)),KE(e,eqp,eGM,epB(tlp)),KE(e,eqp,eGd,epB(tlb)),KE(e,eqp,eGp,epB(tld)),KE(e,eqp,eGf,epB(tlm)),KE(e,eqp,eGb,epB(tly)),KE(e,eqp,eql,epB(tll)),KE(e,eqp,eqc,epB(tlf)),KE(e,eqp,eqh,epB(tlg)),KE(e,eqp,eqs,epB(tlv)),KE(e,eqp,equ,epB(tl_)),KE(e,eqp,eqf,epB(tlE)),KE(e,eqp,eqd,epB(tlk))}function eFQ(e){var t;if(this.r=U2(new ex,new eT),this.b=new efY(Pp(Y9(e6a),290)),this.p=new efY(Pp(Y9(e6a),290)),this.i=new efY(Pp(Y9(e3n),290)),this.e=e,this.o=new TS(e.rf()),this.D=e.Df()||gN(LK(e.We((eBB(),thh)))),this.A=Pp(e.We((eBB(),thx)),21),this.B=Pp(e.We(thL),21),this.q=Pp(e.We(thV),98),this.u=Pp(e.We(thJ),21),!e_y(this.u))throw p7(new gq("Invalid port label placement: "+this.u));if(this.v=gN(LK(e.We(th1))),this.j=Pp(e.We(thS),21),!eM1(this.j))throw p7(new gq("Invalid node label placement: "+this.j));this.n=Pp(egG(e,th_),116),this.k=gP(LV(egG(e,tps))),this.d=gP(LV(egG(e,tpo))),this.w=gP(LV(egG(e,tpp))),this.s=gP(LV(egG(e,tpu))),this.t=gP(LV(egG(e,tpc))),this.C=Pp(egG(e,tpd),142),this.c=2*this.d,t=!this.B.Hc((eI3(),tbX)),this.f=new eh6(0,t,0),this.g=new eh6(1,t,0),gh(this.f,(etx(),e3N),this.g)}function eF1(e,t,n,r,i){var a,o,s,u,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M;for(w=0,b=0,p=0,h=1,y=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));y.e!=y.i.gc();)g=Pp(epH(y),33),h+=VG(new Fa(OH(eOi(g).a.Kc(),new c))),x=g.g,b=eB4.Math.max(b,x),d=g.f,p=eB4.Math.max(p,d),w+=x*d;for(m=(e.a||(e.a=new FQ(e6k,e,10,11)),e.a).i,o=w+2*r*r*h*m,a=eB4.Math.sqrt(o),u=eB4.Math.max(a*n,b),s=eB4.Math.max(a/n,p),v=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));v.e!=v.i.gc();)g=Pp(epH(v),33),T=i.b+(eMU(t,26)*e$l+eMU(t,27)*e$f)*(u-g.g),M=i.b+(eMU(t,26)*e$l+eMU(t,27)*e$f)*(s-g.f),eno(g,T),ens(g,M);for(k=u+(i.b+i.c),S=s+(i.d+i.a),E=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));E.e!=E.i.gc();)for(_=Pp(epH(E),33),f=new Fa(OH(eOi(_).a.Kc(),new c));eTk(f);)l=Pp(ZC(f),79),eTc(l)||eBv(l,t,k,S);eYx(e,k+=i.b+i.c,S+=i.d+i.a,!1,!0)}function eF0(e){var t,n,r,i,a,o,s,u,c,l,f;if(null==e)throw p7(new vo(eUg));if(c=e,a=e.length,u=!1,a>0&&(45==(t=(GV(0,e.length),e.charCodeAt(0)))||43==t)&&(e=e.substr(1),--a,u=45==t),0==a)throw p7(new vo(eHJ+c+'"'));for(;e.length>0&&(GV(0,e.length),48==e.charCodeAt(0));)e=e.substr(1),--a;if(a>(eDZ(),e0G)[10])throw p7(new vo(eHJ+c+'"'));for(i=0;i0&&(f=-parseInt(e.substr(0,r),10),e=e.substr(r),a-=r,n=!1);a>=o;){if(r=parseInt(e.substr(0,o),10),e=e.substr(o),a-=o,n)n=!1;else{if(0>ecd(f,s))throw p7(new vo(eHJ+c+'"'));f=efn(f,l)}f=efe(f,r)}if(ecd(f,0)>0||!u&&(f=QC(f),0>ecd(f,0)))throw p7(new vo(eHJ+c+'"'));return f}function eF2(e,t){var n,r,i,a,o,s,u;if(Rm(),this.a=new MW(this),this.b=e,this.c=t,this.f=Yg(QZ((eSp(),tvc),t)),this.f.dc()){if((s=ev1(tvc,e))==t)for(this.e=!0,this.d=new p0,this.f=new o5,this.f.Fc(eQB),Pp(eP9(Qq(tvc,etP(e)),""),26)==e&&this.f.Fc(Fr(tvc,etP(e))),i=eIT(tvc,e).Kc();i.Ob();)switch(Ur(QZ(tvc,r=Pp(i.Pb(),170)))){case 4:this.d.Fc(r);break;case 5:this.f.Gc(Yg(QZ(tvc,r)))}else if(_4(),Pp(t,66).Oj())for(o=0,this.e=!0,this.f=null,this.d=new p0,u=(null==e.i&&eNT(e),e.i).length;o=0&&o0&&(Pp(UA(e.b,t),124).a.b=n)}function eF4(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g;for(ewG(t,"Comment pre-processing",1),n=0,u=new fz(e.a);u.a0&&64!=(u=(GV(0,t.length),t.charCodeAt(0)))){if(37==u&&(f=t.lastIndexOf("%"),c=!1,0!=f&&(f==d-1||(c=(GV(f+1,t.length),46==t.charCodeAt(f+1)))))){if(y=IE("%",o=t.substr(1,f-1))?null:eYy(o),r=0,c)try{r=eDa(t.substr(f+2),eHt,eUu)}catch(w){if(w=eoa(w),M4(w,127))throw s=w,p7(new QH(s));throw p7(w)}for(m=erW(e.Wg());m.Ob();)if(M4(p=eaO(m),510)&&(v=(i=Pp(p,590)).d,(null==y?null==v:IE(y,v))&&0==r--))return i;return null}if(h=-1==(l=t.lastIndexOf("."))?t:t.substr(0,l),n=0,-1!=l)try{n=eDa(t.substr(l+1),eHt,eUu)}catch(_){if(_=eoa(_),M4(_,127))h=t;else throw p7(_)}for(h=IE("%",h)?null:eYy(h),b=erW(e.Wg());b.Ob();)if(M4(p=eaO(b),191)&&(g=(a=Pp(p,191)).ne(),(null==h?null==g:IE(h,g))&&0==n--))return a;return null}return eR2(e,t)}function eF8(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M;for(E=new p0,p=new fz(e.b);p.a=e.length)return{done:!0};var r=e[n++];return{value:[r,t.get(r)],done:!1}}}},eCi()||(e.prototype.createObject=function(){return{}},e.prototype.get=function(e){return this.obj[":"+e]},e.prototype.set=function(e,t){this.obj[":"+e]=t},e.prototype[e$c]=function(e){delete this.obj[":"+e]},e.prototype.keys=function(){var e=[];for(var t in this.obj)58==t.charCodeAt(0)&&e.push(t.substring(1));return e}),e}function eYt(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m;if(eNl(),null==e)return null;if(0==(f=8*e.length))return"";for(u=0,s=f%24,h=f/24|0,a=null,a=Je(tyw,eHl,25,4*(d=0!=s?h+1:h),15,1),c=0,l=0,t=0,n=0,r=0,o=0,i=0;u>24,c=(3&t)<<24>>24,p=(-128&t)==0?t>>2<<24>>24:(t>>2^192)<<24>>24,b=(-128&n)==0?n>>4<<24>>24:(n>>4^240)<<24>>24,m=(-128&r)==0?r>>6<<24>>24:(r>>6^252)<<24>>24,a[o++]=tvQ[p],a[o++]=tvQ[b|c<<4],a[o++]=tvQ[l<<2|m],a[o++]=tvQ[63&r];return 8==s?(c=(3&(t=e[i]))<<24>>24,p=(-128&t)==0?t>>2<<24>>24:(t>>2^192)<<24>>24,a[o++]=tvQ[p],a[o++]=tvQ[c<<4],a[o++]=61,a[o++]=61):16==s&&(t=e[i],l=(15&(n=e[i+1]))<<24>>24,c=(3&t)<<24>>24,p=(-128&t)==0?t>>2<<24>>24:(t>>2^192)<<24>>24,b=(-128&n)==0?n>>4<<24>>24:(n>>4^240)<<24>>24,a[o++]=tvQ[p],a[o++]=tvQ[b|c<<4],a[o++]=tvQ[l<<2],a[o++]=61),ehv(a,0,a.length)}function eYn(e,t){var n,r,i,a,o,s,u;if(0==e.e&&e.p>0&&(e.p=-(e.p-1)),e.p>eHt&&V9(t,e.p-eHx),o=t.q.getDate(),zC(t,1),e.k>=0&&z7(t,e.k),e.c>=0?zC(t,e.c):e.k>=0?(r=35-(u=new est(t.q.getFullYear()-eHx,t.q.getMonth(),35)).q.getDate(),zC(t,eB4.Math.min(r,o))):zC(t,o),e.f<0&&(e.f=t.q.getHours()),e.b>0&&e.f<12&&(e.f+=12),M5(t,24==e.f&&e.g?0:e.f),e.j>=0&&Z0(t,e.j),e.n>=0&&Jf(t,e.n),e.i>=0&&xN(t,eft(efn(eyt(eap(t.q.getTime()),eHf),eHf),e.i)),e.a&&(V9(i=new wW,i.q.getFullYear()-eHx-80),Ei(eap(t.q.getTime()),eap(i.q.getTime()))&&V9(t,i.q.getFullYear()-eHx+100)),e.d>=0){if(-1==e.c)(n=(7+e.d-t.q.getDay())%7)>3&&(n-=7),s=t.q.getMonth(),zC(t,t.q.getDate()+n),t.q.getMonth()!=s&&zC(t,t.q.getDate()+(n>0?-7:7));else if(t.q.getDay()!=e.d)return!1}return e.o>eHt&&(a=t.q.getTimezoneOffset(),xN(t,eft(eap(t.q.getTime()),(e.o-a)*60*eHf))),!0}function eYr(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;if(i=e_k(t,(eBU(),tnc)),M4(i,239)){for(p=Pp(i,33),b=t.e,d=new TS(t.c),a=t.d,d.a+=a.b,d.b+=a.d,w=Pp(eT8(p,(eBy(),ta9)),174),Aa(w,(eI3(),tbJ))&&(h=Pp(eT8(p,ta7),116),lR(h,a.a),lG(h,a.d),lj(h,a.b),lW(h,a.c)),n=new p0,l=new fz(t.a);l.a0&&P_(e.p,f),P_(e.o,f);t-=r,p=u+t,l+=t*e.e,q1(e.a,s,ell(p)),q1(e.b,s,l),e.j=eB4.Math.max(e.j,p),e.k=eB4.Math.max(e.k,l),e.d+=t,t+=m}}function eYu(){var e;eYu=A,tbF=new kS(ezo,0),tbw=new kS(ezb,1),tby=new kS(ezm,2),tbj=new kS(ezg,3),tbY=new kS(ezv,4),tbx=(Hj(),new vd((e=Pp(yw(e6a),9),new I1(e,Pp(CY(e,e.length),9),0)))),tbT=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[]))),tb_=ecO(jL(tby,eow(vx(e6a,1),eGj,61,0,[]))),tbN=ecO(jL(tbj,eow(vx(e6a,1),eGj,61,0,[]))),tbR=ecO(jL(tbY,eow(vx(e6a,1),eGj,61,0,[]))),tbC=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tbj]))),tbk=ecO(jL(tby,eow(vx(e6a,1),eGj,61,0,[tbY]))),tbD=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tbY]))),tbM=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tby]))),tbP=ecO(jL(tbj,eow(vx(e6a,1),eGj,61,0,[tbY]))),tbE=ecO(jL(tby,eow(vx(e6a,1),eGj,61,0,[tbj]))),tbL=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tby,tbY]))),tbS=ecO(jL(tby,eow(vx(e6a,1),eGj,61,0,[tbj,tbY]))),tbI=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tbj,tbY]))),tbO=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tby,tbj]))),tbA=ecO(jL(tbw,eow(vx(e6a,1),eGj,61,0,[tby,tbj,tbY])))}function eYc(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y;if(0!=t.b){for(h=new _n,s=null,p=null,r=zy(eB4.Math.floor(eB4.Math.log(t.b)*eB4.Math.LOG10E)+1),u=0,y=epL(t,0);y.b!=y.d.c;)for(g=Pp(Vv(y),86),xc(p)!==xc(e_k(g,(eR6(),tca)))&&(p=Lq(e_k(g,tca)),u=0),eo3(g,tca,s=null!=p?p+WB(u++,r):WB(u++,r)),m=(i=epL(new hz(g).a.d,0),new hG(i));yV(m.a);)qQ(h,b=Pp(Vv(m.a),188).c,h.c.b,h.c),eo3(b,tca,s);for(o=0,d=new p2;o=u){A6(g.b>0),g.a.Xb(g.c=--g.b);break}b.a>c&&(i?(eoc(i.b,b.b),i.a=eB4.Math.max(i.a,b.a),BH(g)):(P_(b.b,f),b.c=eB4.Math.min(b.c,c),b.a=eB4.Math.max(b.a,u),i=b))}i||((i=new mi).c=c,i.a=u,CD(g,i),P_(i.b,f))}for(s=t.b,l=0,m=new fz(r);m.as?1:0:(e.b&&(e.b._b(a)&&(i=Pp(e.b.xc(a),19).a),e.b._b(u)&&(s=Pp(e.b.xc(u),19).a)),is?1:0);return 0!=t.e.c.length&&0!=n.g.c.length?1:-1}function eYd(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S;for(ewG(t,eWo,1),b=new p0,E=new p0,c=new fz(e.b);c.a0&&(w-=p),eRU(o,w),f=0,h=new fz(o.a);h.a0),s.a.Xb(s.c=--s.b)),u=.4*r*f,!a&&s.bt.d.c){if((p=e.c[t.a.d])==(g=e.c[d.a.d]))continue;eAx(_f(_l(_d(_c(new bQ,1),100),p),g))}}}}}}function eYy(e){var t,n,r,i,a,o,s,u;if(eRe(),null==e)return null;if((i=x7(e,e_n(37)))<0)return e;for(u=new O0(e.substr(0,i)),t=Je(tyk,eZ8,25,4,15,1),s=0,r=0,o=e.length;ii+2&&eoV((GV(i+1,e.length),e.charCodeAt(i+1)),tmZ,tmX)&&eoV((GV(i+2,e.length),e.charCodeAt(i+2)),tmZ,tmX)){if(n=P0((GV(i+1,e.length),e.charCodeAt(i+1)),(GV(i+2,e.length),e.charCodeAt(i+2))),i+=2,r>0?(192&n)==128?t[s++]=n<<24>>24:r=0:n>=128&&((224&n)==192?(t[s++]=n<<24>>24,r=2):(240&n)==224?(t[s++]=n<<24>>24,r=3):(248&n)==240&&(t[s++]=n<<24>>24,r=4)),r>0){if(s==r){switch(s){case 2:Bd(u,((31&t[0])<<6|63&t[1])&eHd);break;case 3:Bd(u,((15&t[0])<<12|(63&t[1])<<6|63&t[2])&eHd)}s=0,r=0}}else{for(a=0;a0){if(o+r>e.length)return!1;s=exf(e.substr(0,o+r),t)}else s=exf(e,t)}switch(a){case 71:return s=ew6(e,o,eow(vx(e17,1),eUP,2,6,[eHM,eHO]),t),i.e=s,!0;case 77:return eLY(e,t,i,s,o);case 76:return eLB(e,t,i,s,o);case 69:return eS$(e,t,o,i);case 99:return eSz(e,t,o,i);case 97:return s=ew6(e,o,eow(vx(e17,1),eUP,2,6,["AM","PM"]),t),i.b=s,!0;case 121:return eLU(e,t,o,s,n,i);case 100:if(s<=0)return!1;return i.c=s,!0;case 83:if(s<0)return!1;return edc(s,o,t[0],i);case 104:12==s&&(s=0);case 75:case 72:if(s<0)return!1;return i.f=s,i.g=!1,!0;case 107:if(s<0)return!1;return i.f=s,i.g=!0,!0;case 109:if(s<0)return!1;return i.j=s,!0;case 115:if(s<0)return!1;return i.n=s,!0;case 90:if(oE&&(p.c=E-p.b),P_(o.d,new jH(p,elO(o,p))),v=t==tbw?eB4.Math.max(v,b.b+c.b.rf().b):eB4.Math.min(v,b.b));for(v+=t==tbw?e.t:-e.t,(y=edp((o.e=v,o)))>0&&(Pp(UA(e.b,t),124).a.b=y),l=d.Kc();l.Ob();)(c=Pp(l.Pb(),111)).c&&!(c.c.d.c.length<=0)&&(p=c.c.i,p.c-=c.e.a,p.d-=c.e.b)}function eYE(e){var t,n,r,i,a,o,s,u,l,f,d,h,p;for(t=new p2,u=new Ow(e);u.e!=u.i.gc();){for(s=Pp(epH(u),33),n=new bV,Um(e9t,s,n),p=new e5,i=Pp(qE(new R1(null,new YI(new Fa(OH(eOr(s).a.Kc(),new c)))),jD(p,JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[(eum(),e2U)])))),83),enC(n,Pp(i.xc((OQ(),!0)),14),new e6),o=(r=Pp(qE(UJ(Pp(i.xc(!1),15).Lc(),new e9),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[e2U]))),15)).Kc();o.Ob();)(h=e_D(a=Pp(o.Pb(),79)))&&((l=Pp(xu($I(t.f,h)),21))||(l=eA7(h),eS9(t.f,h,l)),er7(n,l));for(i=Pp(qE(new R1(null,new YI(new Fa(OH(eOi(s).a.Kc(),new c)))),jD(p,JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[e2U])))),83),enC(n,Pp(i.xc(!0),14),new e8),d=(r=Pp(qE(UJ(Pp(i.xc(!1),15).Lc(),new e7),JF(new U,new B,new en,eow(vx(e2L,1),eU4,132,0,[e2U]))),15)).Kc();d.Ob();)(h=e_N(f=Pp(d.Pb(),79)))&&((l=Pp(xu($I(t.f,h)),21))||(l=eA7(h),eS9(t.f,h,l)),er7(n,l))}}function eYS(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b;if(ePN(),(u=0>ecd(e,0))&&(e=QC(e)),0==ecd(e,0))switch(t){case 0:return"0";case 1:return e$e;case 2:return"0.00";case 3:return"0.000";case 4:return"0.0000";case 5:return"0.00000";case 6:return"0.000000";default:return h=new vc,t<0?(h.a+="0E+",h):(h.a+="0E",h),h.a+=t==eHt?"2147483648":""+-t,h.a}f=Je(tyw,eHl,25,(l=18)+1,15,1),n=l,b=e;do c=b,b=eyt(b,10),f[--n]=jE(eft(48,efe(c,efn(b,10))))&eHd;while(0!=ecd(b,0))if(i=efe(efe(efe(l,n),t),1),0==t)return u&&(f[--n]=45),ehv(f,n,l-n);if(t>0&&ecd(i,-6)>=0){if(ecd(i,0)>=0){for(a=n+jE(i),s=l-1;s>=a;s--)f[s+1]=f[s];return f[++a]=46,u&&(f[--n]=45),ehv(f,n,l-n+1)}for(o=2;Ei(o,eft(QC(i),1));o++)f[--n]=48;return f[--n]=46,f[--n]=48,u&&(f[--n]=45),ehv(f,n,l-n)}return p=n+1,r=l,d=new vl,u&&(d.a+="-"),r-p>=1?(Bd(d,f[n]),d.a+=".",d.a+=ehv(f,n+1,l-n-1)):d.a+=ehv(f,n,l-n),d.a+="E",ecd(i,0)>0&&(d.a+="+"),d.a+=""+Fb(i),d.a}function eYk(e,t,n){var r,i,a,o,s,u,c,l,f,d,h;if(e.e.a.$b(),e.f.a.$b(),e.c.c=Je(e1R,eUp,1,0,5,1),e.i.c=Je(e1R,eUp,1,0,5,1),e.g.a.$b(),t)for(o=new fz(t.a);o.a=1&&(_-c>0&&p>=0?(eno(f,f.i+w),ens(f,f.j+u*c)):_-c<0&&h>=0&&(eno(f,f.i+w*_),ens(f,f.j+u)));return ebu(e,(eBB(),thx),(ed6(),a=Pp(yw(e6o),9),new I1(a,Pp(CY(a,a.length),9),0))),new kl(E,l)}function eYT(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p;if(h=z$(ewH(Pp(etj((e.b||(e.b=new Ih(e6m,e,4,7)),e.b),0),82))),p=z$(ewH(Pp(etj((e.c||(e.c=new Ih(e6m,e,5,8)),e.c),0),82))),f=h==p,s=new yb,(t=Pp(eT8(e,(euw(),tpj)),74))&&t.b>=2){if(0==(e.a||(e.a=new FQ(e6v,e,6,6)),e.a).i)n=(yT(),i=new oQ),JL((e.a||(e.a=new FQ(e6v,e,6,6)),e.a),n);else if((e.a||(e.a=new FQ(e6v,e,6,6)),e.a).i>1)for(d=new AF((e.a||(e.a=new FQ(e6v,e,6,6)),e.a));d.e!=d.i.gc();)ey_(d);eNI(t,Pp(etj((e.a||(e.a=new FQ(e6v,e,6,6)),e.a),0),202))}if(f)for(r=new Ow((e.a||(e.a=new FQ(e6v,e,6,6)),e.a));r.e!=r.i.gc();)for(n=Pp(epH(r),202),c=new Ow((n.a||(n.a=new O_(e6h,n,5)),n.a));c.e!=c.i.gc();)u=Pp(epH(c),469),s.a=eB4.Math.max(s.a,u.a),s.b=eB4.Math.max(s.b,u.b);for(o=new Ow((e.n||(e.n=new FQ(e6S,e,1,7)),e.n));o.e!=o.i.gc();)a=Pp(epH(o),137),(l=Pp(eT8(a,tp$),8))&&TP(a,l.a,l.b),f&&(s.a=eB4.Math.max(s.a,a.i+a.g),s.b=eB4.Math.max(s.b,a.j+a.f));return s}function eYM(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,k,x;for(s=0,y=t.c.length,i=new eIW(e.a,n,null,null),x=Je(tyx,eH5,25,y,15,1),b=Je(tyx,eH5,25,y,15,1),p=Je(tyx,eH5,25,y,15,1),m=0;sx[u]&&(m=u),f=new fz(e.a.b);f.ah&&(a&&(xL(E,d),xL(k,ell(c.b-1))),A=n.b,L+=d+t,d=0,l=eB4.Math.max(l,n.b+n.c+O)),eno(s,A),ens(s,L),l=eB4.Math.max(l,A+O+n.c),d=eB4.Math.max(d,f),A+=O+t;if(l=eB4.Math.max(l,r),(M=L+d+n.a)ez8,x=eB4.Math.abs(d.b-p.b)>ez8,(!n&&k&&x||n&&(k||x))&&P7(m.a,w)),er7(m.a,r),d=0==r.b?w:(A6(0!=r.b),Pp(r.c.b.c,8)),ea1(h,f,b),eiy(i)==S&&(Bq(S.i)!=i.a&&eSb(b=new yb,Bq(S.i),v),eo3(m,tnC,b)),eEw(h,m,v),l.a.zc(h,l);Gs(m,_),Go(m,S)}for(c=l.a.ec().Kc();c.Ob();)Gs(u=Pp(c.Pb(),17),null),Go(u,null);eEj(t)}function eYC(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;if(1==e.gc())return Pp(e.Xb(0),231);if(0>=e.gc())return new Z5;for(i=e.Kc();i.Ob();){for(n=Pp(i.Pb(),231),p=0,l=eUu,f=eUu,u=eHt,c=eHt,h=new fz(n.e);h.as&&(y=0,w+=o+g,o=0),eIJ(b,n,y,w),t=eB4.Math.max(t,y+m.a),o=eB4.Math.max(o,m.b),y+=m.a+g;return b}function eYI(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p;switch(l=new mE,e.a.g){case 3:d=Pp(e_k(t.e,(eBU(),tnO)),15),h=Pp(e_k(t.j,tnO),15),p=Pp(e_k(t.f,tnO),15),n=Pp(e_k(t.e,tnT),15),r=Pp(e_k(t.j,tnT),15),i=Pp(e_k(t.f,tnT),15),o=new p0,eoc(o,d),h.Jc(new iN),eoc(o,M4(h,152)?ZK(Pp(h,152)):M4(h,131)?Pp(h,131).a:M4(h,54)?new gn(h):new w$(h)),eoc(o,p),a=new p0,eoc(a,n),eoc(a,M4(r,152)?ZK(Pp(r,152)):M4(r,131)?Pp(r,131).a:M4(r,54)?new gn(r):new w$(r)),eoc(a,i),eo3(t.f,tnO,o),eo3(t.f,tnT,a),eo3(t.f,tnA,t.f),eo3(t.e,tnO,null),eo3(t.e,tnT,null),eo3(t.j,tnO,null),eo3(t.j,tnT,null);break;case 1:er7(l,t.e.a),P7(l,t.i.n),er7(l,eaa(t.j.a)),P7(l,t.a.n),er7(l,t.f.a);break;default:er7(l,t.e.a),er7(l,eaa(t.j.a)),er7(l,t.f.a)}HC(t.f.a),er7(t.f.a,l),Gs(t.f,t.e.c),s=Pp(e_k(t.e,(eBy(),taR)),74),c=Pp(e_k(t.j,taR),74),u=Pp(e_k(t.f,taR),74),(s||c||u)&&(Yp(f=new mE,u),Yp(f,c),Yp(f,s),eo3(t.f,taR,f)),Gs(t.j,null),Go(t.j,null),Gs(t.e,null),Go(t.e,null),Gu(t.a,null),Gu(t.i,null),t.g&&eYI(e,t.g)}function eYD(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m;if(eNl(),null==e||(a=Q4(e),(p=elw(a))%4!=0))return null;if(0==(b=p/4|0))return Je(tyk,eZ8,25,0,15,1);for(f=null,t=0,n=0,r=0,i=0,o=0,s=0,u=0,c=0,h=0,d=0,l=0,f=Je(tyk,eZ8,25,3*b,15,1);h>4)<<24>>24,f[d++]=((15&n)<<4|r>>2&15)<<24>>24,f[d++]=(r<<6|i)<<24>>24}if(!wl(o=a[l++])||!wl(s=a[l++]))return null;if(t=tvJ[o],n=tvJ[s],u=a[l++],c=a[l++],-1==tvJ[u]||-1==tvJ[c])return 61==u&&61==c?(15&n)!=0?null:(m=Je(tyk,eZ8,25,3*h+1,15,1),ePD(f,0,m,0,3*h),m[d]=(t<<2|n>>4)<<24>>24,m):61==u||61!=c?null:(3&(r=tvJ[u]))!=0?null:(m=Je(tyk,eZ8,25,3*h+2,15,1),ePD(f,0,m,0,3*h),m[d++]=(t<<2|n>>4)<<24>>24,m[d]=((15&n)<<4|r>>2&15)<<24>>24,m);return r=tvJ[u],i=tvJ[c],f[d++]=(t<<2|n>>4)<<24>>24,f[d++]=((15&n)<<4|r>>2&15)<<24>>24,f[d++]=(r<<6|i)<<24>>24,f}function eYN(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_;for(ewG(t,eWo,1),p=Pp(e_k(e,(eBy(),tag)),218),i=new fz(e.b);i.a=2){for(b=!0,n=Pp(Wx(d=new fz(a.j)),11),h=null;d.a0&&(i=Pp(RJ(m.c.a,E-1),10),o=e.i[i.p],k=eB4.Math.ceil(Mj(e.n,i,m)),a=_.a.e-m.d.d-(o.a.e+i.o.b+i.d.a)-k),c=eHQ,E0&&S.a.e.e-S.a.a-(S.b.e.e-S.b.a)<0,p=y.a.e.e-y.a.a-(y.b.e.e-y.b.a)<0&&S.a.e.e-S.a.a-(S.b.e.e-S.b.a)>0,h=y.a.e.e+y.b.aS.b.e.e+S.a.a,w=0,!b&&!p&&(d?a+f>0?w=f:c-r>0&&(w=r):h&&(a+s>0?w=s:c-v>0&&(w=v))),_.a.e+=w,_.b&&(_.d.e+=w),!1))}function eYR(e,t,n){var r,i,a,o,s,u,c,l,f,d;if(r=new Hr(t.qf().a,t.qf().b,t.rf().a,t.rf().b),i=new TE,e.c)for(o=new fz(t.wf());o.ac&&(r.a+=M3(Je(tyw,eHl,25,-c,15,1))),r.a+="Is",x7(u,e_n(32))>=0)for(i=0;i=r.o.b/2}v?(g=Pp(e_k(r,(eBU(),tnI)),15))?d?a=g:(i=Pp(e_k(r,ttB),15))?a=g.gc()<=i.gc()?g:i:(a=new p0,eo3(r,ttB,a)):(a=new p0,eo3(r,tnI,a)):(i=Pp(e_k(r,(eBU(),ttB)),15))?f?a=i:(g=Pp(e_k(r,tnI),15))?a=i.gc()<=g.gc()?i:g:(a=new p0,eo3(r,tnI,a)):(a=new p0,eo3(r,ttB,a)),a.Fc(e),eo3(e,(eBU(),ttH),n),t.d==n?(Go(t,null),n.e.c.length+n.g.c.length==0&&Gc(n,null),esQ(n)):(Gs(t,null),n.e.c.length+n.g.c.length==0&&Gc(n,null)),HC(t.a)}function eYH(e,t){var n,r,i,a,o,s,u,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A,L;for(y=new KB(e.b,0),f=t.Kc(),b=0,l=Pp(f.Pb(),19).a,E=0,n=new bV,k=new Tw;y.b=e.a&&(r=eN3(e,y),f=eB4.Math.max(f,r.b),_=eB4.Math.max(_,r.d),P_(s,new kD(y,r)));for(l=0,x=new p0;l0),g.a.Xb(g.c=--g.b),T=new By(e.b),CD(g,T),A6(g.b0?(c=0,m&&(c+=s),c+=(x-1)*o,y&&(c+=s),k&&y&&(c=eB4.Math.max(c,eAD(y,o,v,S))),!(c0){for(i=0,d=l<100?null:new yf(l),p=(c=new eiP(t)).g,g=Je(ty_,eHT,25,l,15,1),r=0,w=new eta(l);i=0;)if(null!=h?ecX(h,p[u]):xc(h)===xc(p[u])){g.length<=r&&(m=g,g=Je(ty_,eHT,25,2*g.length,15,1),ePD(m,0,g,0,r)),g[r++]=i,JL(w,p[u]);break v}if(xc(h)===xc(s))break}}if(c=w,p=w.g,l=r,r>g.length&&(m=g,g=Je(ty_,eHT,25,r,15,1),ePD(m,0,g,0,r)),r>0){for(a=0,y=!0;a=0;)egk(e,g[o]);if(r!=l){for(i=l;--i>=r;)egk(c,i);m=g,g=Je(ty_,eHT,25,r,15,1),ePD(m,0,g,0,r)}t=c}}}else for(t=egh(e,t),i=e.i;--i>=0;)t.Hc(e.g[i])&&(egk(e,i),y=!0);if(!y)return!1;if(null!=g){for(f=1==(n=t.gc())?Gt(e,4,t.Kc().Pb(),null,g[0],b):Gt(e,6,t,g,g[0],b),d=n<100?null:new yf(n),i=t.Kc();i.Ob();)d=IW(e,Pp(h=i.Pb(),72),d);d?(d.Ei(f),d.Fi()):eam(e.e,f)}else{for(d=IP(t.gc()),i=t.Kc();i.Ob();)d=IW(e,Pp(h=i.Pb(),72),d);d&&d.Fi()}return!0}function eYV(e,t){var n,r,i,a,o,s,u,l,f,d,h,p,b,m,g,v,y,w;for((n=new eb_(t)).a||eDc(t),l=eCx(t),u=new zu,g=new eLy,m=new fz(t.a);m.a0||n.o==tuS&&i0?(f=Pp(RJ(d.c.a,o-1),10),k=Mj(e.b,d,f),m=d.n.b-d.d.d-(f.n.b+f.o.b+f.d.a+k)):m=d.n.b-d.d.d,c=eB4.Math.min(m,c),oo?eIc(e,t,n):eIc(e,n,t),io?1:0}return r=Pp(e_k(t,(eBU(),tnu)),19).a,a=Pp(e_k(n,tnu),19).a,r>a?eIc(e,t,n):eIc(e,n,t),ra?1:0}function eYQ(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b,m,g,v;if(gN(LK(eT8(t,(eBB(),thI))))||(c=0!=(t.a||(t.a=new FQ(e6k,t,10,11)),t.a).i,l=!(f=ekq(t)).dc(),!c&&!l))return Hj(),Hj(),e2r;if(!(i=Pp(eT8(t,th6),149)))throw p7(new gq("Resolved algorithm is not set; apply a LayoutAlgorithmResolver before computing layout."));if(v=ka(i,(eTy(),tmC)),ept(t),!c&&l&&!v)return Hj(),Hj(),e2r;if(u=new p0,xc(eT8(t,thl))===xc((eck(),tpz))&&(ka(i,tmO)||ka(i,tmM)))for(h=eCL(e,t),er7(p=new _n,(t.a||(t.a=new FQ(e6k,t,10,11)),t.a));0!=p.b;)ept(d=Pp(0==p.b?null:(A6(0!=p.b),etw(p,p.a.a)),33)),(g=xc(eT8(d,thl))===xc(tpW))||X2(d,tdQ)&&!Zs(i,eT8(d,th6))?(s=eYQ(e,d,n,r),eoc(u,s),ebu(d,thl,tpW),eIU(d)):er7(p,(d.a||(d.a=new FQ(e6k,d,10,11)),d.a));else for(h=(t.a||(t.a=new FQ(e6k,t,10,11)),t.a).i,o=new Ow((t.a||(t.a=new FQ(e6k,t,10,11)),t.a));o.e!=o.i.gc();)a=Pp(epH(o),33),s=eYQ(e,a,n,r),eoc(u,s),eIU(a);for(m=new fz(u);m.a=0?ef9(s):elC(ef9(s)),e.Ye(tob,h)),c=new yb,d=!1,e.Xe(tou)?(Lf(c,Pp(e.We(tou),8)),d=!0):Oc(c,o.a/2,o.b/2),h.g){case 4:eo3(l,taY,(ef_(),tnN)),eo3(l,ttV,(eoG(),te0)),l.o.b=o.b,b<0&&(l.o.a=-b),ekv(f,(eYu(),tby)),d||(c.a=o.a),c.a-=o.a;break;case 2:eo3(l,taY,(ef_(),tnR)),eo3(l,ttV,(eoG(),teQ)),l.o.b=o.b,b<0&&(l.o.a=-b),ekv(f,(eYu(),tbY)),d||(c.a=0);break;case 1:eo3(l,tt9,(Q1(),ttN)),l.o.a=o.a,b<0&&(l.o.b=-b),ekv(f,(eYu(),tbj)),d||(c.b=o.b),c.b-=o.b;break;case 3:eo3(l,tt9,(Q1(),ttI)),l.o.a=o.a,b<0&&(l.o.b=-b),ekv(f,(eYu(),tbw)),d||(c.b=0)}if(Lf(f.n,c),eo3(l,tou,c),t==tba||t==tbs||t==tbo){if(p=0,t==tba&&e.Xe(tof))switch(h.g){case 1:case 2:p=Pp(e.We(tof),19).a;break;case 3:case 4:p=-Pp(e.We(tof),19).a}else switch(h.g){case 4:case 2:p=a.b,t==tbs&&(p/=i.b);break;case 1:case 3:p=a.a,t==tbs&&(p/=i.a)}eo3(l,tnv,p)}return eo3(l,tt1,h),l}function eY0(e){var t,n,r,i,a,o,s,u,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T;if((n=gP(LV(e_k(e.a.j,(eBy(),tar)))))<-1||!e.a.i||IR(Pp(e_k(e.a.o,tol),98))||2>efr(e.a.o,(eYu(),tby)).gc()&&2>efr(e.a.o,tbY).gc())return!0;if(e.a.c.Rf())return!1;for(E=0,_=0,w=new p0,u=e.a.e,l=0,f=u.length;l=n}function eY2(){function n(e){var t=this;this.dispatch=function(t){var n=t.data;switch(n.cmd){case"algorithms":var r=edh((Hj(),new fF(new fT(tmF.b))));e.postMessage({id:n.id,data:r});break;case"categories":var i=edh((Hj(),new fF(new fT(tmF.c))));e.postMessage({id:n.id,data:i});break;case"options":var a=edh((Hj(),new fF(new fT(tmF.d))));e.postMessage({id:n.id,data:a});break;case"register":ejy(n.algorithms),e.postMessage({id:n.id});break;case"layout":ePu(n.graph,n.layoutOptions||{},n.options||{}),e.postMessage({id:n.id,data:n.graph})}},this.saveDispatch=function(n){try{t.dispatch(n)}catch(r){e.postMessage({id:n.data.id,error:r})}}}function r(e){var t=this;this.dispatcher=new n({postMessage:function(e){t.onmessage({data:e})}}),this.postMessage=function(e){setTimeout(function(){t.dispatcher.saveDispatch({data:e})},0)}}if(yC(),typeof document===e$E&&typeof self!==e$E){var i=new n(self);self.onmessage=i.saveDispatch}else"object"!==e$E&&e.exports&&(Object.defineProperty(t,"__esModule",{value:!0}),e.exports={default:r,Worker:r})}function eY3(e){e.N||(e.N=!0,e.b=eak(e,0),er6(e.b,0),er6(e.b,1),er6(e.b,2),e.bb=eak(e,1),er6(e.bb,0),er6(e.bb,1),e.fb=eak(e,2),er6(e.fb,3),er6(e.fb,4),er9(e.fb,5),e.qb=eak(e,3),er6(e.qb,0),er9(e.qb,1),er9(e.qb,2),er6(e.qb,3),er6(e.qb,4),er9(e.qb,5),er6(e.qb,6),e.a=eax(e,4),e.c=eax(e,5),e.d=eax(e,6),e.e=eax(e,7),e.f=eax(e,8),e.g=eax(e,9),e.i=eax(e,10),e.j=eax(e,11),e.k=eax(e,12),e.n=eax(e,13),e.o=eax(e,14),e.p=eax(e,15),e.q=eax(e,16),e.s=eax(e,17),e.r=eax(e,18),e.t=eax(e,19),e.u=eax(e,20),e.v=eax(e,21),e.w=eax(e,22),e.B=eax(e,23),e.A=eax(e,24),e.C=eax(e,25),e.D=eax(e,26),e.F=eax(e,27),e.G=eax(e,28),e.H=eax(e,29),e.J=eax(e,30),e.I=eax(e,31),e.K=eax(e,32),e.M=eax(e,33),e.L=eax(e,34),e.P=eax(e,35),e.Q=eax(e,36),e.R=eax(e,37),e.S=eax(e,38),e.T=eax(e,39),e.U=eax(e,40),e.V=eax(e,41),e.X=eax(e,42),e.W=eax(e,43),e.Y=eax(e,44),e.Z=eax(e,45),e.$=eax(e,46),e._=eax(e,47),e.ab=eax(e,48),e.cb=eax(e,49),e.db=eax(e,50),e.eb=eax(e,51),e.gb=eax(e,52),e.hb=eax(e,53),e.ib=eax(e,54),e.jb=eax(e,55),e.kb=eax(e,56),e.lb=eax(e,57),e.mb=eax(e,58),e.nb=eax(e,59),e.ob=eax(e,60),e.pb=eax(e,61))}function eY4(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w;if(v=0,0==t.f.a)for(m=new fz(e);m.ac&&0==(GK(c,t.c.length),Pp(t.c[c],200)).a.c.length;)QA(t,(GK(c,t.c.length),t.c[c]));if(!u){--a;continue}if(eDk(t,l,i,u,d,n,c,r)){f=!0;continue}if(d){if(ePx(t,l,i,u,n,c,r)){f=!0;continue}if(eu4(l,i)){i.c=!0,f=!0;continue}}else if(eu4(l,i)){i.c=!0,f=!0;continue}if(f)continue}if(eu4(l,i)){i.c=!0,f=!0,u&&(u.k=!1);continue}emG(i.q)}return f}function eY9(e,t,n,r,i,a,o){var s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A,L;for(b=0,T=0,c=new fz(e.b);c.ab&&(a&&(xL(E,h),xL(k,ell(l.b-1)),P_(e.d,p),s.c=Je(e1R,eUp,1,0,5,1)),A=n.b,L+=h+t,h=0,f=eB4.Math.max(f,n.b+n.c+O)),s.c[s.c.length]=u,epW(u,A,L),f=eB4.Math.max(f,A+O+n.c),h=eB4.Math.max(h,d),A+=O+t,p=u;if(eoc(e.a,s),P_(e.d,Pp(RJ(s,s.c.length-1),157)),f=eB4.Math.max(f,r),(M=L+h+n.a)1&&(o=eB4.Math.min(o,eB4.Math.abs(Pp(ep3(s.a,1),8).b-l.b)))));else for(b=new fz(t.j);b.ai&&(a=d.a-i,o=eUu,r.c=Je(e1R,eUp,1,0,5,1),i=d.a),d.a>=i&&(r.c[r.c.length]=s,s.a.b>1&&(o=eB4.Math.min(o,eB4.Math.abs(Pp(ep3(s.a,s.a.b-2),8).b-d.b)))));if(0!=r.c.length&&a>t.o.a/2&&o>t.o.b/2){for(h=new eES,Gc(h,t),ekv(h,(eYu(),tbw)),h.n.a=t.o.a/2,g=new eES,Gc(g,t),ekv(g,tbj),g.n.a=t.o.a/2,g.n.b=t.o.b,u=new fz(r);u.a=c.b?Gs(s,g):Gs(s,h)):(c=Pp(P$(s.a),8),(m=0==s.a.b?GX(s.c):Pp(AZ(s.a),8)).b>=c.b?Go(s,g):Go(s,h)),(f=Pp(e_k(s,(eBy(),taR)),74))&&eds(f,c,!0);t.n.a=i-t.o.a/2}}function eBe(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A,L,C,I;if(T=null,O=t,M=V0(e,VF(n),O),ert(M,KJ(O,eXS)),A=Pp(etJ(e.g,ekZ(zR(O,eXi))),33),d=zR(O,"sourcePort"),r=null,d&&(r=ekZ(d)),L=Pp(etJ(e.j,r),118),!A)throw b=(p="An edge must have a source node (edge id: '"+(s=ehM(O)))+eXO,p7(new gK(b));if(L&&!BG(zY(L),A))throw g=(m="The source port of an edge must be a port of the edge's source node (edge id: '"+(u=KJ(O,eXS)))+eXO,p7(new gK(g));if(k=(M.b||(M.b=new Ih(e6m,M,4,7)),M.b),a=null,JL(k,a=L||A),C=Pp(etJ(e.g,ekZ(zR(O,eXC))),33),h=zR(O,"targetPort"),i=null,h&&(i=ekZ(h)),I=Pp(etJ(e.j,i),118),!C)throw y=(v="An edge must have a target node (edge id: '"+(f=ehM(O)))+eXO,p7(new gK(y));if(I&&!BG(zY(I),C))throw _=(w="The target port of an edge must be a port of the edge's target node (edge id: '"+(c=KJ(O,eXS)))+eXO,p7(new gK(_));if(x=(M.c||(M.c=new Ih(e6m,M,5,8)),M.c),o=null,JL(x,o=I||C),0==(M.b||(M.b=new Ih(e6m,M,4,7)),M.b).i||0==(M.c||(M.c=new Ih(e6m,M,5,8)),M.c).i)throw S=(E=eXM+(l=KJ(O,eXS)))+eXO,p7(new gK(S));return ewU(O,M),eMu(O,M),T=esv(e,O,M)}function eBt(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T;return f=eNf(A_(e,(eYu(),tbx)),t),p=em9(A_(e,tbT),t),w=em9(A_(e,tbN),t),k=em8(A_(e,tbR),t),d=em8(A_(e,tb_),t),v=em9(A_(e,tbD),t),b=em9(A_(e,tbM),t),E=em9(A_(e,tbP),t),_=em9(A_(e,tbE),t),x=em8(A_(e,tbk),t),g=em9(A_(e,tbC),t),y=em9(A_(e,tbL),t),S=em9(A_(e,tbS),t),T=em8(A_(e,tbI),t),h=em8(A_(e,tbO),t),m=em9(A_(e,tbA),t),n=esm(eow(vx(tyx,1),eH5,25,15,[v.a,k.a,E.a,T.a])),r=esm(eow(vx(tyx,1),eH5,25,15,[p.a,f.a,w.a,m.a])),i=g.a,a=esm(eow(vx(tyx,1),eH5,25,15,[b.a,d.a,_.a,h.a])),c=esm(eow(vx(tyx,1),eH5,25,15,[v.b,p.b,b.b,y.b])),u=esm(eow(vx(tyx,1),eH5,25,15,[k.b,f.b,d.b,m.b])),l=x.b,s=esm(eow(vx(tyx,1),eH5,25,15,[E.b,w.b,_.b,S.b])),JD(A_(e,tbx),n+i,c+l),JD(A_(e,tbA),n+i,c+l),JD(A_(e,tbT),n+i,0),JD(A_(e,tbN),n+i,c+l+u),JD(A_(e,tbR),0,c+l),JD(A_(e,tb_),n+i+r,c+l),JD(A_(e,tbM),n+i+r,0),JD(A_(e,tbP),0,c+l+u),JD(A_(e,tbE),n+i+r,c+l+u),JD(A_(e,tbk),0,c),JD(A_(e,tbC),n,0),JD(A_(e,tbS),0,c+l+u),JD(A_(e,tbO),n+i+r,0),(o=new yb).a=esm(eow(vx(tyx,1),eH5,25,15,[n+r+i+a,x.a,y.a,S.a])),o.b=esm(eow(vx(tyx,1),eH5,25,15,[c+u+l+s,g.b,T.b,h.b])),o}function eBn(e){var t,n,r,i,a,o,s,u,l,f,d,h,p,b,m,g;for(m=new p0,h=new fz(e.d.b);h.ai.d.d+i.d.a?f.f.d=!0:(f.f.d=!0,f.f.a=!0))),r.b!=r.d.c&&(t=n);f&&(a=Pp(Bp(e.f,o.d.i),57),t.ba.d.d+a.d.a?f.f.d=!0:(f.f.d=!0,f.f.a=!0))}for(s=new Fa(OH(efu(p).a.Kc(),new c));eTk(s);)0!=(o=Pp(ZC(s),17)).a.b&&(t=Pp(AZ(o.a),8),o.d.j==(eYu(),tbw)&&((g=new ePe(t,new kl(t.a,i.d.d),i,o)).f.a=!0,g.a=o.d,m.c[m.c.length]=g),o.d.j==tbj&&((g=new ePe(t,new kl(t.a,i.d.d+i.d.a),i,o)).f.d=!0,g.a=o.d,m.c[m.c.length]=g))}return m}function eBr(e,t,n){var r,i,a,o,s,u,c,l,f;if(ewG(n,"Network simplex node placement",1),e.e=t,e.n=Pp(e_k(t,(eBU(),tnx)),304),eRx(e),ey8(e),_r(eeh(new R1(null,new Gq(e.e.b,16)),new i2),new hR(e)),_r(UJ(eeh(UJ(eeh(new R1(null,new Gq(e.e.b,16)),new aa),new ao),new as),new au),new hP(e)),gN(LK(e_k(e.e,(eBy(),taQ))))&&(o=eiI(n,1),ewG(o,"Straight Edges Pre-Processing",1),eFy(e),eEj(o)),ebR(e.f),a=Pp(e_k(t,to$),19).a*e.f.a.c.length,eIX(vC(vI(DN(e.f),a),!1),eiI(n,1)),0!=e.d.a.gc()){for(o=eiI(n,1),ewG(o,"Flexible Where Space Processing",1),s=Pp(Af(FM(UQ(new R1(null,new Gq(e.f.a,16)),new i3),new iZ)),19).a,c=(u=Pp(Af(FT(UQ(new R1(null,new Gq(e.f.a,16)),new i4),new iX)),19).a)-s,l=Al(new b1,e.f),f=Al(new b1,e.f),eAx(_f(_l(_c(_d(new bQ,2e4),c),l),f)),_r(UJ(UJ(Yw(e.i),new i5),new i6),new Hn(s,l,c,f)),i=e.d.a.ec().Kc();i.Ob();)(r=Pp(i.Pb(),213)).g=1;eIX(vC(vI(DN(e.f),a),!1),eiI(o,1)),eEj(o)}gN(LK(e_k(t,taQ)))&&(o=eiI(n,1),ewG(o,"Straight Edges Post-Processing",1),eSf(e),eEj(o)),ej3(e),e.e=null,e.f=null,e.i=null,e.c=null,Yy(e.k),e.j=null,e.a=null,e.o=null,e.d.a.$b(),eEj(n)}function eBi(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_;for(s=new fz(e.a.b);s.a0){if(r=f.gc(),c=zy(eB4.Math.floor((r+1)/2))-1,i=zy(eB4.Math.ceil((r+1)/2))-1,t.o==tuS)for(l=i;l>=c;l--)t.a[w.p]==w&&(b=Pp(f.Xb(l),46),p=Pp(b.a,10),!w0(n,b.b)&&h>e.b.e[p.p]&&(t.a[p.p]=w,t.g[w.p]=t.g[p.p],t.a[w.p]=t.g[w.p],t.f[t.g[w.p].p]=(OQ(),!!(gN(t.f[t.g[w.p].p])&w.k==(eEn(),e8D))),h=e.b.e[p.p]));else for(l=c;l<=i;l++)t.a[w.p]==w&&(g=Pp(f.Xb(l),46),m=Pp(g.a,10),!w0(n,g.b)&&h=p&&(v>p&&(h.c=Je(e1R,eUp,1,0,5,1),p=v),h.c[h.c.length]=o);0!=h.c.length&&(d=Pp(RJ(h,ebO(t,h.c.length)),128),M.a.Bc(d),d.s=b++,eM4(d,x,E),h.c=Je(e1R,eUp,1,0,5,1))}for(w=e.c.length+1,s=new fz(e);s.aT.s&&(BH(n),QA(T.i,r),r.c>0&&(r.a=T,P_(T.t,r),r.b=S,P_(S.i,r)))}function eBs(e){var t,n,r,i,a;switch(t=e.c){case 11:return e.Ml();case 12:return e.Ol();case 14:return e.Ql();case 15:return e.Tl();case 16:return e.Rl();case 17:return e.Ul();case 21:return eBM(e),eBG(),eBG(),tye;case 10:switch(e.a){case 65:return e.yl();case 90:return e.Dl();case 122:return e.Kl();case 98:return e.El();case 66:return e.zl();case 60:return e.Jl();case 62:return e.Hl()}}switch(a=eY8(e),t=e.c){case 3:return e.Zl(a);case 4:return e.Xl(a);case 5:return e.Yl(a);case 0:if(123==e.a&&e.d=48&&t<=57){for(r=t-48;i=48&&t<=57;)if((r=10*r+t-48)<0)throw p7(new gX(eBJ((Mo(),eJ_))))}else throw p7(new gX(eBJ((Mo(),eJg))));if(n=r,44==t){if(i>=e.j)throw p7(new gX(eBJ((Mo(),eJy))));if((t=UI(e.i,i++))>=48&&t<=57){for(n=t-48;i=48&&t<=57;)if((n=10*n+t-48)<0)throw p7(new gX(eBJ((Mo(),eJ_))));if(r>n)throw p7(new gX(eBJ((Mo(),eJw))))}else n=-1}if(125!=t)throw p7(new gX(eBJ((Mo(),eJv))));e.sl(i)?(a=(eBG(),eBG(),++tyv,new qa(9,a)),e.d=i+1):(a=(eBG(),eBG(),++tyv,new qa(3,a)),e.d=i),a.dm(r),a.cm(n),eBM(e)}}return a}function eBu(e,t,n,r,i){var a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M;for(b=new XM(t.b),w=new XM(t.b),d=new XM(t.b),k=new XM(t.b),m=new XM(t.b),S=epL(t,0);S.b!=S.d.c;)for(_=Pp(Vv(S),11),s=new fz(_.g);s.a0,g=_.g.c.length>0,c&&g?d.c[d.c.length]=_:c?b.c[b.c.length]=_:g&&(w.c[w.c.length]=_);for(p=new fz(b);p.aefT(Jh(y.d,x),Jh(y.d,y.a))&&(a.c[a.c.length]=y);for(n.c=Je(e1R,eUp,1,0,5,1),w=new fz(a);w.a1)for(p=new AF((e.a||(e.a=new FQ(e6v,e,6,6)),e.a));p.e!=p.i.gc();)ey_(p);for(o=Pp(etj((e.a||(e.a=new FQ(e6v,e,6,6)),e.a),0),202),m=A,A>_+w?m=_+w:A<_-w&&(m=_-w),g=L,L>E+b?g=E+b:L_-w&&m<_+w&&g>E-b&&gA+O?k=A+O:_L+S?x=L+S:EA-O&&kL-S&&xn&&(d=n-1),(h=P+eMU(t,24)*e$h*f-f/2)<0?h=1:h>r&&(h=r-1),i=(yT(),u=new oJ),ent(i,d),enn(i,h),JL((o.a||(o.a=new O_(e6h,o,5)),o.a),i)}function eBy(){eBy=A,tox=(eBB(),th7),toT=tpe,toM=tpt,toO=tpn,toL=tpr,toC=tpi,toN=tpo,toR=tpu,toj=tpc,toP=tps,toF=tpl,toB=tpf,toH=tpp,toD=tpa,tok=(eBH(),tih),toA=tip,toI=tib,toY=tim,tov=new T2(th4,ell(0)),toy=til,tow=tif,to_=tid,toQ=tiB,toG=tiy,toW=tiE,toq=tiL,toK=tix,toV=tiM,to0=tiG,to1=tiH,toX=tiR,toZ=tiN,toJ=tiF,ta0=tit,ta2=tin,taE=trE,taS=trx,toe=new T3(12),ta7=new T2(thN,toe),tav=(efE(),tpx),tag=new T2(tha,tav),toc=new T2(thK,0),toE=new T2(th5,ell(1)),tiX=new T2(td2,eGt),ta8=thI,tol=thV,tob=th0,tac=td7,tiq=td1,taM=thl,toS=new T2(th8,(OQ(),!0)),taI=thh,taD=thp,ta4=thx,ta9=thL,ta5=thM,tad=(ec3(),tpv),tal=new T2(the,tad),taZ=thS,taq=th_,toh=thJ,tod=thX,top=th1,tor=(epT(),tbr),new T2(thB,tor),toa=th$,too=thz,tos=thG,toi=thH,toz=tiv,taG=trZ,taz=trV,to$=tig,taY=trB,tau=trs,tas=tra,ti7=tn1,tae=tn0,tan=tn6,tat=tn2,tao=trr,taK=trJ,taV=trQ,taP=trD,ta3=tio,taJ=tr3,tax=trO,ta1=tr7,taw=trg,ta_=trw,ti8=td9,taX=tr1,ti0=tn$,ti1=tnU,tiQ=tnB,taA=trC,taO=trL,taL=trI,ta6=thO,taR=thg,tak=ths,tab=thr,tap=thn,tar=tn7,tof=thZ,tiJ=td6,taC=thd,tou=thW,tot=thR,ton=thF,taU=tr$,taH=trG,tog=th3,tiZ=tnY,ta$=trK,tam=trh,tah=trf,taW=thy,taj=trj,taQ=tr6,toU=tpd,taf=trc,tom=tiu,tay=trb,taF=trY,tai=trt,taN=thm,taB=trH,taa=trn,ti9=tnJ,ti5=tnq,ti3=tnK,ti4=tnV,ti6=tnX,ti2=tnG,taT=trA}function eBw(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A;if(ePN(),k=e.e,p=e.d,i=e.a,0==k)switch(t){case 0:return"0";case 1:return e$e;case 2:return"0.00";case 3:return"0.000";case 4:return"0.0000";case 5:return"0.00000";case 6:return"0.000000";default:return E=new vc,t<0?(E.a+="0E+",E):(E.a+="0E",E),E.a+=-t,E.a}if(w=Je(tyw,eHl,25,(y=10*p+1+7)+1,15,1),n=y,1==p){if((s=i[0])<0){A=WM(s,eH8);do b=A,A=eyt(A,10),w[--n]=48+jE(efe(b,efn(A,10)))&eHd;while(0!=ecd(A,0))}else{A=s;do b=A,A=A/10|0,w[--n]=48+(b-10*A)&eHd;while(0!=A)}}else{T=Je(ty_,eHT,25,p,15,1),ePD(i,0,T,0,O=p);I:for(;;){for(S=0,c=O-1;c>=0;c--)g=ewT(M=eft(Fg(S,32),WM(T[c],eH8))),T[c]=jE(g),S=jE(Fv(g,32));v=jE(S),m=n;do w[--n]=48+v%10&eHd;while(0!=(v=v/10|0)&&0!=n)for(u=0,r=9-m+n;u0;u++)w[--n]=48;for(f=O-1;0==T[f];f--)if(0==f)break I;O=f+1}for(;48==w[n];)++n}if(h=k<0,o=y-n-t-1,0==t)return h&&(w[--n]=45),ehv(w,n,y-n);if(t>0&&o>=-6){if(o>=0){for(l=n+o,d=y-1;d>=l;d--)w[d+1]=w[d];return w[++l]=46,h&&(w[--n]=45),ehv(w,n,y-n+1)}for(f=2;f<-o+1;f++)w[--n]=48;return w[--n]=46,w[--n]=48,h&&(w[--n]=45),ehv(w,n,y-n)}return x=n+1,a=y,_=new vl,h&&(_.a+="-"),a-x>=1?(Bd(_,w[n]),_.a+=".",_.a+=ehv(w,n+1,y-n-1)):_.a+=ehv(w,n,y-n),_.a+="E",o>0&&(_.a+="+"),_.a+=""+o,_.a}function eB_(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E;switch(e.c=t,e.g=new p2,n=(_q(),new gM(e.c)),efJ(r=new dp(n)),y=Lq(eT8(e.c,(e_L(),tfD))),u=Pp(eT8(e.c,tfP),316),_=Pp(eT8(e.c,tfR),429),o=Pp(eT8(e.c,tfO),482),w=Pp(eT8(e.c,tfN),430),e.j=gP(LV(eT8(e.c,tfj))),s=e.a,u.g){case 0:s=e.a;break;case 1:s=e.b;break;case 2:s=e.i;break;case 3:s=e.e;break;case 4:s=e.f;break;default:throw p7(new gL(eqN+(null!=u.f?u.f:""+u.g)))}if(e.d=new zM(s,_,o),eo3(e.d,(ei6(),e6F),LK(eT8(e.c,tfL))),e.d.c=gN(LK(eT8(e.c,tfA))),0==H8(e.c).i)return e.d;for(f=new Ow(H8(e.c));f.e!=f.i.gc();){for(h=(l=Pp(epH(f),33)).g/2,d=l.f/2,E=new kl(l.i+h,l.j+d);F9(e.g,E);)Lu(E,(eB4.Math.random()-.5)*ez8,(eB4.Math.random()-.5)*ez8);b=Pp(eT8(l,(eBB(),thy)),142),m=new Gd(E,new Hr(E.a-h-e.j/2-b.b,E.b-d-e.j/2-b.d,l.g+e.j+(b.b+b.c),l.f+e.j+(b.d+b.a))),P_(e.d.i,m),Um(e.g,E,new kD(m,l))}switch(w.g){case 0:if(null==y)e.d.d=Pp(RJ(e.d.i,0),65);else for(v=new fz(e.d.i);v.a1&&qQ(l,g,l.c.b,l.c),etu(i)));g=v}return l}function eBS(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A,L,C,I,D;for(ewG(n,"Greedy cycle removal",1),D=(y=t.a).c.length,e.a=Je(ty_,eHT,25,D,15,1),e.c=Je(ty_,eHT,25,D,15,1),e.b=Je(ty_,eHT,25,D,15,1),c=0,g=new fz(y);g.a0?O+1:1);for(o=new fz(E.g);o.a0?O+1:1)}0==e.c[c]?P7(e.e,b):0==e.a[c]&&P7(e.f,b),++c}for(p=-1,h=1,f=new p0,e.d=Pp(e_k(t,(eBU(),tnw)),230);D>0;){for(;0!=e.e.b;)L=Pp(PH(e.e),10),e.b[L.p]=p--,eIQ(e,L),--D;for(;0!=e.f.b;)C=Pp(PH(e.f),10),e.b[C.p]=h++,eIQ(e,C),--D;if(D>0){for(d=eHt,v=new fz(y);v.a=d&&(w>d&&(f.c=Je(e1R,eUp,1,0,5,1),d=w),f.c[f.c.length]=b);l=e.Zf(f),e.b[l.p]=h++,eIQ(e,l),--D}}for(c=0,A=y.c.length+1;ce.b[I]&&(eNF(r,!0),eo3(t,ttK,(OQ(),!0)));e.a=null,e.c=null,e.b=null,HC(e.f),HC(e.e),eEj(n)}function eBk(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g;for(r=new p0,s=new p0,m=t/2,h=e.gc(),i=Pp(e.Xb(0),8),g=Pp(e.Xb(1),8),p=eT5(i.a,i.b,g.a,g.b,m),P_(r,(GK(0,p.c.length),Pp(p.c[0],8))),P_(s,(GK(1,p.c.length),Pp(p.c[1],8))),c=2;c=0;u--)P7(n,(GK(u,o.c.length),Pp(o.c[u],8)));return n}function eBx(e){var t,n,r,i,a,o,s,u,c,l,f,d,h;if(o=!0,f=null,r=null,i=null,t=!1,h=tmH,c=null,a=null,(u=epm(e,s=0,tmJ,tmQ))=0&&IE(e.substr(s,2),"//")?(s+=2,u=epm(e,s,tm1,tm0),r=e.substr(s,u-s),s=u):null!=f&&(s==e.length||(GV(s,e.length),47!=e.charCodeAt(s)))&&(o=!1,-1==(u=O7(e,e_n(35),s))&&(u=e.length),r=e.substr(s,u-s),s=u);if(!n&&s0&&58==UI(l,l.length-1)&&(i=l,s=u)),s=e.j){e.a=-1,e.c=1;return}if(t=UI(e.i,e.d++),e.a=t,1==e.b){switch(t){case 92:if(r=10,e.d>=e.j)throw p7(new gX(eBJ((Mo(),eXZ))));e.a=UI(e.i,e.d++);break;case 45:(512&e.e)==512&&e.d=e.j||63!=UI(e.i,e.d))break;if(++e.d>=e.j)throw p7(new gX(eBJ((Mo(),eXX))));switch(t=UI(e.i,e.d++)){case 58:r=13;break;case 61:r=14;break;case 33:r=15;break;case 91:r=19;break;case 62:r=18;break;case 60:if(e.d>=e.j)throw p7(new gX(eBJ((Mo(),eXX))));if(61==(t=UI(e.i,e.d++)))r=16;else if(33==t)r=17;else throw p7(new gX(eBJ((Mo(),eXJ))));break;case 35:for(;e.d=e.j)throw p7(new gX(eBJ((Mo(),eXZ))));e.a=UI(e.i,e.d++);break;default:r=0}e.c=r}function eBO(e){var t,n,r,i,a,o,s,u,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A;if((k=Pp(e_k(e,(eBy(),tol)),98))!=(ewf(),tbc)&&k!=tbl){for(p=(b=e.b).c.length,f=new XM((enG(p+2,eU6),ee1(eft(eft(5,p+2),(p+2)/10|0)))),m=new XM((enG(p+2,eU6),ee1(eft(eft(5,p+2),(p+2)/10|0)))),P_(f,new p2),P_(f,new p2),P_(m,new p0),P_(m,new p0),S=new p0,t=0;t=E||!ehf(v,r))&&(r=GY(t,f)),Gu(v,r),a=new Fa(OH(efu(v).a.Kc(),new c));eTk(a);)i=Pp(ZC(a),17),!e.a[i.p]&&(m=i.c.i,--e.e[m.p],0==e.e[m.p]&&Ja(e_s(p,m)));for(l=f.c.length-1;l>=0;--l)P_(t.b,(GK(l,f.c.length),Pp(f.c[l],29)));t.a.c=Je(e1R,eUp,1,0,5,1),eEj(n)}function eBL(e){var t,n,r,i,a,o,s,u,c;for(e.b=1,eBM(e),t=null,0==e.c&&94==e.a?(eBM(e),t=(eBG(),eBG(),++tyv,new WZ(4)),eLw(t,0,e1f),s=(++tyv,new WZ(4))):s=(eBG(),eBG(),++tyv,new WZ(4)),i=!0;1!=(c=e.c);){if(0==c&&93==e.a&&!i){t&&(ej0(t,s),s=t);break}if(n=e.a,r=!1,10==c)switch(n){case 100:case 68:case 119:case 87:case 115:case 83:ePR(s,eDu(n)),r=!0;break;case 105:case 73:case 99:case 67:(n=(ePR(s,eDu(n)),-1))<0&&(r=!0);break;case 112:case 80:if(!(u=ext(e,n)))throw p7(new gX(eBJ((Mo(),eJe))));ePR(s,u),r=!0;break;default:n=eCn(e)}else if(24==c&&!i){if(t&&(ej0(t,s),s=t),a=eBL(e),ej0(s,a),0!=e.c||93!=e.a)throw p7(new gX(eBJ((Mo(),eJi))));break}if(eBM(e),!r){if(0==c){if(91==n)throw p7(new gX(eBJ((Mo(),eJa))));if(93==n)throw p7(new gX(eBJ((Mo(),eJo))));if(45==n&&!i&&93!=e.a)throw p7(new gX(eBJ((Mo(),eJs))))}if(0!=e.c||45!=e.a||45==n&&i)eLw(s,n,n);else{if(eBM(e),1==(c=e.c))throw p7(new gX(eBJ((Mo(),eJn))));if(0==c&&93==e.a)eLw(s,n,n),eLw(s,45,45);else if(0==c&&93==e.a||24==c)throw p7(new gX(eBJ((Mo(),eJs))));else{if(o=e.a,0==c){if(91==o)throw p7(new gX(eBJ((Mo(),eJa))));if(93==o)throw p7(new gX(eBJ((Mo(),eJo))));if(45==o)throw p7(new gX(eBJ((Mo(),eJs))))}else 10==c&&(o=eCn(e));if(eBM(e),n>o)throw p7(new gX(eBJ((Mo(),eJl))));eLw(s,n,o)}}}i=!1}if(1==e.c)throw p7(new gX(eBJ((Mo(),eJn))));return eMS(s),eRo(s),e.b=0,eBM(e),s}function eBC(e){eMV(e.c,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#decimal"])),eMV(e.d,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#integer"])),eMV(e.e,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#boolean"])),eMV(e.f,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EBoolean",eXP,"EBoolean:Object"])),eMV(e.i,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#byte"])),eMV(e.g,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#hexBinary"])),eMV(e.j,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EByte",eXP,"EByte:Object"])),eMV(e.n,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EChar",eXP,"EChar:Object"])),eMV(e.t,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#double"])),eMV(e.u,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EDouble",eXP,"EDouble:Object"])),eMV(e.F,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#float"])),eMV(e.G,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EFloat",eXP,"EFloat:Object"])),eMV(e.I,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#int"])),eMV(e.J,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EInt",eXP,"EInt:Object"])),eMV(e.N,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#long"])),eMV(e.O,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"ELong",eXP,"ELong:Object"])),eMV(e.Z,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#short"])),eMV(e.$,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"EShort",eXP,"EShort:Object"])),eMV(e._,eJ7,eow(vx(e17,1),eUP,2,6,[eQd,"http://www.w3.org/2001/XMLSchema#string"]))}function eBI(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O;if(1==e.c.length)return GK(0,e.c.length),Pp(e.c[0],135);if(e.c.length<=0)return new Xn;for(u=new fz(e);u.af&&(M=0,O+=l+S,l=0),eOd(_,o,M,O),t=eB4.Math.max(t,M+E.a),l=eB4.Math.max(l,E.b),M+=E.a+S;for(w=new p2,n=new p2,x=new fz(e);x.aeMg(a))&&(f=a);for(f||(f=(GK(0,m.c.length),Pp(m.c[0],180))),b=new fz(t.b);b.a=-1900?1:0,n>=4?xM(e,eow(vx(e17,1),eUP,2,6,[eHM,eHO])[s]):xM(e,eow(vx(e17,1),eUP,2,6,["BC","AD"])[s]);break;case 121:epA(e,n,r);break;case 77:eIZ(e,n,r);break;case 107:0==(u=i.q.getHours())?eeE(e,24,n):eeE(e,u,n);break;case 83:eOT(e,n,i);break;case 69:l=r.q.getDay(),5==n?xM(e,eow(vx(e17,1),eUP,2,6,["S","M","T","W","T","F","S"])[l]):4==n?xM(e,eow(vx(e17,1),eUP,2,6,[eHA,eHL,eHC,eHI,eHD,eHN,eHP])[l]):xM(e,eow(vx(e17,1),eUP,2,6,["Sun","Mon","Tue","Wed","Thu","Fri","Sat"])[l]);break;case 97:i.q.getHours()>=12&&24>i.q.getHours()?xM(e,eow(vx(e17,1),eUP,2,6,["AM","PM"])[1]):xM(e,eow(vx(e17,1),eUP,2,6,["AM","PM"])[0]);break;case 104:0==(f=i.q.getHours()%12)?eeE(e,12,n):eeE(e,f,n);break;case 75:eeE(e,d=i.q.getHours()%12,n);break;case 72:eeE(e,h=i.q.getHours(),n);break;case 99:p=r.q.getDay(),5==n?xM(e,eow(vx(e17,1),eUP,2,6,["S","M","T","W","T","F","S"])[p]):4==n?xM(e,eow(vx(e17,1),eUP,2,6,[eHA,eHL,eHC,eHI,eHD,eHN,eHP])[p]):3==n?xM(e,eow(vx(e17,1),eUP,2,6,["Sun","Mon","Tue","Wed","Thu","Fri","Sat"])[p]):eeE(e,p,1);break;case 76:b=r.q.getMonth(),5==n?xM(e,eow(vx(e17,1),eUP,2,6,["J","F","M","A","M","J","J","A","S","O","N","D"])[b]):4==n?xM(e,eow(vx(e17,1),eUP,2,6,[eHh,eHp,eHb,eHm,eHg,eHv,eHy,eHw,eH_,eHE,eHS,eHk])[b]):3==n?xM(e,eow(vx(e17,1),eUP,2,6,["Jan","Feb","Mar","Apr",eHg,"Jun","Jul","Aug","Sep","Oct","Nov","Dec"])[b]):eeE(e,b+1,n);break;case 81:m=r.q.getMonth()/3|0,n<4?xM(e,eow(vx(e17,1),eUP,2,6,["Q1","Q2","Q3","Q4"])[m]):xM(e,eow(vx(e17,1),eUP,2,6,["1st quarter","2nd quarter","3rd quarter","4th quarter"])[m]);break;case 100:eeE(e,g=r.q.getDate(),n);break;case 109:eeE(e,c=i.q.getMinutes(),n);break;case 115:eeE(e,o=i.q.getSeconds(),n);break;case 122:n<4?xM(e,a.c[0]):xM(e,a.c[1]);break;case 118:xM(e,a.b);break;case 90:n<3?xM(e,ekA(a)):3==n?xM(e,ek$(a)):xM(e,ekz(a.a));break;default:return!1}return!0}function eBF(e,t,n,r){var i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A;if(eIi(t),u=Pp(etj((t.b||(t.b=new Ih(e6m,t,4,7)),t.b),0),82),l=Pp(etj((t.c||(t.c=new Ih(e6m,t,5,8)),t.c),0),82),s=ewH(u),c=ewH(l),o=0==(t.a||(t.a=new FQ(e6v,t,6,6)),t.a).i?null:Pp(etj((t.a||(t.a=new FQ(e6v,t,6,6)),t.a),0),202),S=Pp(Bp(e.a,s),10),M=Pp(Bp(e.a,c),10),k=null,O=null,M4(u,186)&&(M4(E=Pp(Bp(e.a,u),299),11)?k=Pp(E,11):M4(E,10)&&(S=Pp(E,10),k=Pp(RJ(S.j,0),11))),M4(l,186)&&(M4(T=Pp(Bp(e.a,l),299),11)?O=Pp(T,11):M4(T,10)&&(M=Pp(T,10),O=Pp(RJ(M.j,0),11))),!S||!M)throw p7(new gZ("The source or the target of edge "+t+" could not be found. This usually happens when an edge connects a node laid out by ELK Layered to a node in another level of hierarchy laid out by either another instance of ELK Layered or another layout algorithm alltogether. The former can be solved by setting the hierarchyHandling option to INCLUDE_CHILDREN."));for(b=new $b,eaW(b,t),eo3(b,(eBU(),tnc),t),eo3(b,(eBy(),taR),null),h=Pp(e_k(r,tt3),21),S==M&&h.Fc((eLR(),ttT)),k||(_=(enY(),tsN),x=null,o&&TM(Pp(e_k(S,tol),98))&&(V2(x=new kl(o.j,o.k),zF(t)),qZ(x,n),etg(c,s)&&(_=tsD,C5(x,S.n))),k=ePH(S,x,_,r)),O||(_=(enY(),tsD),A=null,o&&TM(Pp(e_k(M,tol),98))&&(V2(A=new kl(o.b,o.c),zF(t)),qZ(A,n)),O=ePH(M,A,_,Bq(M))),Gs(b,k),Go(b,O),(k.e.c.length>1||k.g.c.length>1||O.e.c.length>1||O.g.c.length>1)&&h.Fc((eLR(),tt_)),d=new Ow((t.n||(t.n=new FQ(e6S,t,1,7)),t.n));d.e!=d.i.gc();)if(f=Pp(epH(d),137),!gN(LK(eT8(f,ta8)))&&f.a)switch(m=eca(f),P_(b.b,m),Pp(e_k(m,tab),272).g){case 1:case 2:h.Fc((eLR(),tty));break;case 0:h.Fc((eLR(),ttg)),eo3(m,tab,(etT(),tp_))}if(a=Pp(e_k(r,tas),314),g=Pp(e_k(r,ta3),315),i=a==(en7(),teR)||g==(ebG(),tsd),o&&0!=(o.a||(o.a=new O_(e6h,o,5)),o.a).i&&i){for(v=eEF(o),p=new mE,w=epL(v,0);w.b!=w.d.c;)y=Pp(Vv(w),8),P7(p,new TS(y));eo3(b,tnl,p)}return b}function eBY(e){e.gb||(e.gb=!0,e.b=eak(e,0),er6(e.b,18),er9(e.b,19),e.a=eak(e,1),er6(e.a,1),er9(e.a,2),er9(e.a,3),er9(e.a,4),er9(e.a,5),e.o=eak(e,2),er6(e.o,8),er6(e.o,9),er9(e.o,10),er9(e.o,11),er9(e.o,12),er9(e.o,13),er9(e.o,14),er9(e.o,15),er9(e.o,16),er9(e.o,17),er9(e.o,18),er9(e.o,19),er9(e.o,20),er9(e.o,21),er9(e.o,22),er9(e.o,23),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),ee9(e.o),e.p=eak(e,3),er6(e.p,2),er6(e.p,3),er6(e.p,4),er6(e.p,5),er9(e.p,6),er9(e.p,7),ee9(e.p),ee9(e.p),e.q=eak(e,4),er6(e.q,8),e.v=eak(e,5),er9(e.v,9),ee9(e.v),ee9(e.v),ee9(e.v),e.w=eak(e,6),er6(e.w,2),er6(e.w,3),er6(e.w,4),er9(e.w,5),e.B=eak(e,7),er9(e.B,1),ee9(e.B),ee9(e.B),ee9(e.B),e.Q=eak(e,8),er9(e.Q,0),ee9(e.Q),e.R=eak(e,9),er6(e.R,1),e.S=eak(e,10),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),ee9(e.S),e.T=eak(e,11),er9(e.T,10),er9(e.T,11),er9(e.T,12),er9(e.T,13),er9(e.T,14),ee9(e.T),ee9(e.T),e.U=eak(e,12),er6(e.U,2),er6(e.U,3),er9(e.U,4),er9(e.U,5),er9(e.U,6),er9(e.U,7),ee9(e.U),e.V=eak(e,13),er9(e.V,10),e.W=eak(e,14),er6(e.W,18),er6(e.W,19),er6(e.W,20),er9(e.W,21),er9(e.W,22),er9(e.W,23),e.bb=eak(e,15),er6(e.bb,10),er6(e.bb,11),er6(e.bb,12),er6(e.bb,13),er6(e.bb,14),er6(e.bb,15),er6(e.bb,16),er9(e.bb,17),ee9(e.bb),ee9(e.bb),e.eb=eak(e,16),er6(e.eb,2),er6(e.eb,3),er6(e.eb,4),er6(e.eb,5),er6(e.eb,6),er6(e.eb,7),er9(e.eb,8),er9(e.eb,9),e.ab=eak(e,17),er6(e.ab,0),er6(e.ab,1),e.H=eak(e,18),er9(e.H,0),er9(e.H,1),er9(e.H,2),er9(e.H,3),er9(e.H,4),er9(e.H,5),ee9(e.H),e.db=eak(e,19),er9(e.db,2),e.c=eax(e,20),e.d=eax(e,21),e.e=eax(e,22),e.f=eax(e,23),e.i=eax(e,24),e.g=eax(e,25),e.j=eax(e,26),e.k=eax(e,27),e.n=eax(e,28),e.r=eax(e,29),e.s=eax(e,30),e.t=eax(e,31),e.u=eax(e,32),e.fb=eax(e,33),e.A=eax(e,34),e.C=eax(e,35),e.D=eax(e,36),e.F=eax(e,37),e.G=eax(e,38),e.I=eax(e,39),e.J=eax(e,40),e.L=eax(e,41),e.M=eax(e,42),e.N=eax(e,43),e.O=eax(e,44),e.P=eax(e,45),e.X=eax(e,46),e.Y=eax(e,47),e.Z=eax(e,48),e.$=eax(e,49),e._=eax(e,50),e.cb=eax(e,51),e.K=eax(e,52))}function eBB(){var e,t;eBB=A,tdQ=new pO(eZi),th6=new pO(eZa),td0=(ebx(),tdM),td1=new xX(eVi,td0),new pQ,td2=new xX(ezG,null),td3=new pO(eZo),td8=(eyY(),jL(tdX,eow(vx(e54,1),eU4,291,0,[tdK]))),td9=new xX(eVg,td8),td7=new xX(eVr,(OQ(),!1)),tht=(ec3(),tpv),the=new xX(eVu,tht),tho=(efE(),tpO),tha=new xX(eKB,tho),thc=new xX(eqC,!1),thf=(eck(),tpG),thl=new xX(eKP,thf),thP=new T3(12),thN=new xX(ezW,thP),thb=new xX(eGu,!1),thm=new xX(eVA,!1),thD=new xX(eGf,!1),thq=(ewf(),tbl),thV=new xX(eGc,thq),th3=new pO(eVT),th4=new pO(eGr),th5=new pO(eGo),th8=new pO(eGs),thv=new mE,thg=new xX(eVv,thv),td6=new xX(eV_,!1),thd=new xX(eVE,!1),new pO(eZs),thw=new mh,thy=new xX(eVM,thw),thI=new xX(eVt,!1),new pQ,th9=new xX(eZu,1),new xX(eZc,!0),ell(0),new xX(eZl,ell(100)),new xX(eZf,!1),ell(0),new xX(eZd,ell(4e3)),ell(0),new xX(eZh,ell(400)),new xX(eZp,!1),new xX(eZb,!1),new xX(eZm,!0),new xX(eZg,!1),td5=(edM(),tme),td4=new xX(eZr,td5),th7=new xX(eKQ,10),tpe=new xX(eK1,10),tpt=new xX(ez$,20),tpn=new xX(eK0,10),tpr=new xX(eGa,2),tpi=new xX(eK2,10),tpo=new xX(eK3,0),tps=new xX(eK6,5),tpu=new xX(eK4,1),tpc=new xX(eK5,1),tpl=new xX(eGi,20),tpf=new xX(eK9,10),tpp=new xX(eK8,10),tpa=new pO(eK7),tph=new T_,tpd=new xX(eVO,tph),thF=new pO(eVx),thj=!1,thR=new xX(eVk,thj),thE=new T3(5),th_=new xX(eVc,thE),thk=(eT7(),t=Pp(yw(e6t),9),new I1(t,Pp(CY(t,t.length),9),0)),thS=new xX(eGp,thk),thU=(epT(),tbt),thB=new xX(eVd,thU),th$=new pO(eVh),thz=new pO(eVp),thG=new pO(eVb),thH=new pO(eVm),thT=(e=Pp(yw(e6o),9),new I1(e,Pp(CY(e,e.length),9),0)),thx=new xX(eGh,thT),thC=el9((eI3(),tbQ)),thL=new xX(eGd,thC),thA=new kl(0,0),thO=new xX(eGM,thA),thM=new xX(eVs,!1),thi=(etT(),tp_),thr=new xX(eVy,thi),thn=new xX(eGl,!1),new pO(eZv),ell(1),new xX(eZy,null),thW=new pO(eVS),thZ=new pO(eVw),th2=(eYu(),tbF),th0=new xX(eVn,th2),thK=new pO(eVe),thQ=(ekU(),el9(tbm)),thJ=new xX(eGb,thQ),thX=new xX(eVl,!1),th1=new xX(eVf,!0),thh=new xX(eVa,!1),thp=new xX(eVo,!1),ths=new xX(ezz,1),thu=(e_a(),tpN),new xX(eZw,thu),thY=!0}function eBU(){var e,t;eBU=A,tnc=new pO(eGm),ttz=new pO("coordinateOrigin"),tny=new pO("processors"),tt$=new Cm("compoundNode",(OQ(),!1)),tt6=new Cm("insideConnections",!1),tnl=new pO("originalBendpoints"),tnf=new pO("originalDummyNodePosition"),tnd=new pO("originalLabelEdge"),tn_=new pO("representedLabels"),ttq=new pO("endLabels"),ttZ=new pO("endLabel.origin"),tnt=new Cm("labelSide",(egF(),tpX)),tns=new Cm("maxEdgeThickness",0),tnE=new Cm("reversed",!1),tnw=new pO(eGg),tni=new Cm("longEdgeSource",null),tna=new Cm("longEdgeTarget",null),tnr=new Cm("longEdgeHasLabelDummies",!1),tnn=new Cm("longEdgeBeforeLabelDummy",!1),ttV=new Cm("edgeConstraint",(eoG(),te1)),tt8=new pO("inLayerLayoutUnit"),tt9=new Cm("inLayerConstraint",(Q1(),ttD)),tt7=new Cm("inLayerSuccessorConstraint",new p0),tne=new Cm("inLayerSuccessorConstraintBetweenNonDummies",!1),tng=new pO("portDummy"),ttG=new Cm("crossingHint",ell(0)),tt3=new Cm("graphProperties",(t=Pp(yw(e44),9),new I1(t,Pp(CY(t,t.length),9),0))),tt1=new Cm("externalPortSide",(eYu(),tbF)),tt0=new Cm("externalPortSize",new yb),ttJ=new pO("externalPortReplacedDummies"),ttQ=new pO("externalPortReplacedDummy"),ttX=new Cm("externalPortConnections",(e=Pp(yw(e6a),9),new I1(e,Pp(CY(e,e.length),9),0))),tnv=new Cm(ezf,0),ttY=new pO("barycenterAssociates"),tnI=new pO("TopSideComments"),ttB=new pO("BottomSideComments"),ttH=new pO("CommentConnectionPort"),tt5=new Cm("inputCollect",!1),tnb=new Cm("outputCollect",!1),ttK=new Cm("cyclic",!1),ttW=new pO("crossHierarchyMap"),tnC=new pO("targetOffset"),new Cm("splineLabelSize",new yb),tnx=new pO("spacings"),tnm=new Cm("partitionConstraint",!1),ttU=new pO("breakingPoint.info"),tnA=new pO("splines.survivingEdge"),tnO=new pO("splines.route.start"),tnT=new pO("splines.edgeChain"),tnp=new pO("originalPortConstraints"),tnk=new pO("selfLoopHolder"),tnM=new pO("splines.nsPortY"),tnu=new pO("modelOrder"),tno=new pO("longEdgeTargetNode"),tt2=new Cm(eW_,!1),tnS=new Cm(eW_,!1),tt4=new pO("layerConstraints.hiddenNodes"),tnh=new pO("layerConstraints.opposidePort"),tnL=new pO("targetNode.modelOrder")}function eBH(){eBH=A,trl=(eeF(),teZ),trc=new xX(eWE,trl),trO=new xX(eWS,(OQ(),!1)),trN=(K6(),ttR),trD=new xX(eWk,trN),trJ=new xX(eWx,!1),trQ=new xX(eWT,!0),tnY=new xX(eWM,!1),tic=(Q0(),tsL),tiu=new xX(eWO,tic),ell(1),tig=new xX(eWA,ell(7)),tiv=new xX(eWL,!1),trA=new xX(eWC,!1),tru=(eb6(),teG),trs=new xX(eWI,tru),trX=(ewY(),to7),trZ=new xX(eWD,trX),trU=(ef_(),tnj),trB=new xX(eWN,trU),ell(-1),trY=new xX(eWP,ell(-1)),ell(-1),trH=new xX(eWR,ell(-1)),ell(-1),tr$=new xX(eWj,ell(4)),ell(-1),trG=new xX(eWF,ell(2)),trq=(eOJ(),tsS),trV=new xX(eWY,trq),ell(0),trK=new xX(eWB,ell(0)),trj=new xX(eWU,ell(eUu)),tro=(en7(),tej),tra=new xX(eWH,tro),tn1=new xX(eW$,!1),tn7=new xX(eWz,.1),trr=new xX(eWG,!1),ell(-1),trt=new xX(eWW,ell(-1)),ell(-1),trn=new xX(eWK,ell(-1)),ell(0),tn0=new xX(eWV,ell(40)),tn9=(eaU(),ttL),tn6=new xX(eWq,tn9),tn3=ttO,tn2=new xX(eWZ,tn3),tis=(ebG(),tsf),tio=new xX(eWX,tis),tr6=new pO(eWJ),tr0=(Qx(),tte),tr1=new xX(eWQ,tr0),tr4=(eyd(),tto),tr3=new xX(eW1,tr4),new pQ,tr7=new xX(eW0,.3),tit=new pO(eW2),tir=(ebk(),tsu),tin=new xX(eW3,tir),trv=(ei0(),tsF),trg=new xX(eW4,trv),tr_=(Xo(),tsH),trw=new xX(eW5,tr_),trS=(euy(),tsW),trE=new xX(eW6,trS),trx=new xX(eW9,.2),trb=new xX(eW8,2),tih=new xX(eW7,null),tib=new xX(eKe,10),tip=new xX(eKt,10),tim=new xX(eKn,20),ell(0),til=new xX(eKr,ell(0)),ell(0),tif=new xX(eKi,ell(0)),ell(0),tid=new xX(eKa,ell(0)),tnB=new xX(eKo,!1),tnz=(e_3(),ttp),tn$=new xX(eKs,tnz),tnH=(Jp(),teN),tnU=new xX(eKu,tnH),trC=new xX(eKc,!1),ell(0),trL=new xX(eKl,ell(16)),ell(0),trI=new xX(eKf,ell(5)),tiU=(eox(),tsQ),tiB=new xX(eKd,tiU),tiy=new xX(eKh,10),tiE=new xX(eKp,1),tiC=(enB(),teH),tiL=new xX(eKb,tiC),tix=new pO(eKm),tiO=ell(1),ell(0),tiM=new xX(eKg,tiO),tiW=(eiO(),tsV),tiG=new xX(eKv,tiW),tiH=new pO(eKy),tiR=new xX(eKw,!0),tiN=new xX(eK_,2),tiF=new xX(eKE,!0),trp=(eEf(),te9),trh=new xX(eKS,trp),trd=(eSg(),teO),trf=new xX(eKk,trd),tnQ=(esn(),tsM),tnJ=new xX(eKx,tnQ),tnX=new xX(eKT,!1),tnW=(ec4(),e8x),tnG=new xX(eKM,tnW),tnZ=(euJ(),tsn),tnq=new xX(eKO,tnZ),tnK=new xX(eKA,0),tnV=new xX(eKL,0),trR=teK,trP=teR,trz=to8,trW=to8,trF=to5,tre=(eck(),tpz),tri=tej,tn8=tej,tn4=tej,tn5=tpz,tr9=tsp,tr8=tsf,tr2=tsf,tr5=tsf,tie=tsh,tia=tsp,tii=tsp,trk=(efE(),tpM),trT=tpM,trM=tsW,trm=tpT,tiw=ts1,ti_=tsJ,tiS=ts1,tik=tsJ,tiI=ts1,tiD=tsJ,tiT=teU,tiA=teH,tiK=ts1,tiV=tsJ,ti$=ts1,tiz=tsJ,tij=tsJ,tiP=tsJ,tiY=tsJ}function eB$(){eB$=A,e85=new Eq("DIRECTION_PREPROCESSOR",0),e82=new Eq("COMMENT_PREPROCESSOR",1),e86=new Eq("EDGE_AND_LAYER_CONSTRAINT_EDGE_REVERSER",2),e7d=new Eq("INTERACTIVE_EXTERNAL_PORT_POSITIONER",3),e7C=new Eq("PARTITION_PREPROCESSOR",4),e7m=new Eq("LABEL_DUMMY_INSERTER",5),e7j=new Eq("SELF_LOOP_PREPROCESSOR",6),e7_=new Eq("LAYER_CONSTRAINT_PREPROCESSOR",7),e7A=new Eq("PARTITION_MIDPROCESSOR",8),e7s=new Eq("HIGH_DEGREE_NODE_LAYER_PROCESSOR",9),e7x=new Eq("NODE_PROMOTION",10),e7w=new Eq("LAYER_CONSTRAINT_POSTPROCESSOR",11),e7L=new Eq("PARTITION_POSTPROCESSOR",12),e7r=new Eq("HIERARCHICAL_PORT_CONSTRAINT_PROCESSOR",13),e7Y=new Eq("SEMI_INTERACTIVE_CROSSMIN_PROCESSOR",14),e8Z=new Eq("BREAKING_POINT_INSERTER",15),e7k=new Eq("LONG_EDGE_SPLITTER",16),e7D=new Eq("PORT_SIDE_PROCESSOR",17),e7h=new Eq("INVERTED_PORT_PROCESSOR",18),e7I=new Eq("PORT_LIST_SORTER",19),e7U=new Eq("SORT_BY_INPUT_ORDER_OF_MODEL",20),e7M=new Eq("NORTH_SOUTH_PORT_PREPROCESSOR",21),e8X=new Eq("BREAKING_POINT_PROCESSOR",22),e7O=new Eq(eG7,23),e7H=new Eq(eWe,24),e7P=new Eq("SELF_LOOP_PORT_RESTORER",25),e7B=new Eq("SINGLE_EDGE_GRAPH_WRAPPER",26),e7p=new Eq("IN_LAYER_CONSTRAINT_PROCESSOR",27),e7e=new Eq("END_NODE_PORT_LABEL_MANAGEMENT_PROCESSOR",28),e7b=new Eq("LABEL_AND_NODE_SIZE_PROCESSOR",29),e7f=new Eq("INNERMOST_NODE_MARGIN_CALCULATOR",30),e7F=new Eq("SELF_LOOP_ROUTER",31),e81=new Eq("COMMENT_NODE_MARGIN_CALCULATOR",32),e88=new Eq("END_LABEL_PREPROCESSOR",33),e7v=new Eq("LABEL_DUMMY_SWITCHER",34),e8Q=new Eq("CENTER_LABEL_MANAGEMENT_PROCESSOR",35),e7y=new Eq("LABEL_SIDE_SELECTOR",36),e7c=new Eq("HYPEREDGE_DUMMY_MERGER",37),e7i=new Eq("HIERARCHICAL_PORT_DUMMY_SIZE_PROCESSOR",38),e7E=new Eq("LAYER_SIZE_AND_GRAPH_HEIGHT_CALCULATOR",39),e7o=new Eq("HIERARCHICAL_PORT_POSITION_PROCESSOR",40),e83=new Eq("CONSTRAINTS_POSTPROCESSOR",41),e80=new Eq("COMMENT_POSTPROCESSOR",42),e7l=new Eq("HYPERNODE_PROCESSOR",43),e7a=new Eq("HIERARCHICAL_PORT_ORTHOGONAL_EDGE_ROUTER",44),e7S=new Eq("LONG_EDGE_JOINER",45),e7R=new Eq("SELF_LOOP_POSTPROCESSOR",46),e8J=new Eq("BREAKING_POINT_REMOVER",47),e7T=new Eq("NORTH_SOUTH_PORT_POSTPROCESSOR",48),e7u=new Eq("HORIZONTAL_COMPACTOR",49),e7g=new Eq("LABEL_DUMMY_REMOVER",50),e7t=new Eq("FINAL_SPLINE_BENDPOINTS_CALCULATOR",51),e87=new Eq("END_LABEL_SORTER",52),e7N=new Eq("REVERSED_EDGE_RESTORER",53),e89=new Eq("END_LABEL_POSTPROCESSOR",54),e7n=new Eq("HIERARCHICAL_NODE_RESIZER",55),e84=new Eq("DIRECTION_POSTPROCESSOR",56)}function eBz(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A,L,C,I,D,N,P,R,j,F,Y,B,U,H,$,z,G,W,K,V,q,Z,X,J,Q,ee,et,en,er,ei,ea,eo;for(I=0,X=0,P=(A=t).length;I0&&(e.a[H.p]=X++)}for(D=0,en=0,R=(L=n).length;D0;){for(H=(A6(W.b>0),Pp(W.a.Xb(W.c=--W.b),11)),G=0,s=new fz(H.e);s.a0&&(H.j==(eYu(),tbw)?(e.a[H.p]=en,++en):(e.a[H.p]=en+j+Y,++Y))}en+=Y}for(C=0,z=new p2,p=new Tw,N=(O=t).length;Cc.b&&(c.b=K)):H.i.c==Z&&(Kc.c&&(c.c=K));for(Qe(b,0,b.length,null),et=Je(ty_,eHT,25,b.length,15,1),r=Je(ty_,eHT,25,en+1,15,1),g=0;g0;)S%2>0&&(i+=ea[S+1]),S=(S-1)/2|0,++ea[S];for(w=0,x=Je(e5g,eUp,362,2*b.length,0,1);w'?":IE(eXJ,e)?"'(?<' or '(? toIndex: ",e$M=", toIndex: ",e$O="Index: ",e$A=", Size: ",e$L="org.eclipse.elk.alg.common",e$C={62:1},e$I="org.eclipse.elk.alg.common.compaction",e$D="Scanline/EventHandler",e$N="org.eclipse.elk.alg.common.compaction.oned",e$P="CNode belongs to another CGroup.",e$R="ISpacingsHandler/1",e$j="The ",e$F=" instance has been finished already.",e$Y="The direction ",e$B=" is not supported by the CGraph instance.",e$U="OneDimensionalCompactor",e$H="OneDimensionalCompactor/lambda$0$Type",e$$="Quadruplet",e$z="ScanlineConstraintCalculator",e$G="ScanlineConstraintCalculator/ConstraintsScanlineHandler",e$W="ScanlineConstraintCalculator/ConstraintsScanlineHandler/lambda$0$Type",e$K="ScanlineConstraintCalculator/Timestamp",e$V="ScanlineConstraintCalculator/lambda$0$Type",e$q={169:1,45:1},e$Z="org.eclipse.elk.alg.common.compaction.options",e$X="org.eclipse.elk.core.data",e$J="org.eclipse.elk.polyomino.traversalStrategy",e$Q="org.eclipse.elk.polyomino.lowLevelSort",e$1="org.eclipse.elk.polyomino.highLevelSort",e$0="org.eclipse.elk.polyomino.fill",e$2={130:1},e$3="polyomino",e$4="org.eclipse.elk.alg.common.networksimplex",e$5={177:1,3:1,4:1},e$6="org.eclipse.elk.alg.common.nodespacing",e$9="org.eclipse.elk.alg.common.nodespacing.cellsystem",e$8="CENTER",e$7={212:1,326:1},eze={3:1,4:1,5:1,595:1},ezt="LEFT",ezn="RIGHT",ezr="Vertical alignment cannot be null",ezi="BOTTOM",eza="org.eclipse.elk.alg.common.nodespacing.internal",ezo="UNDEFINED",ezs=.01,ezu="org.eclipse.elk.alg.common.nodespacing.internal.algorithm",ezc="LabelPlacer/lambda$0$Type",ezl="LabelPlacer/lambda$1$Type",ezf="portRatioOrPosition",ezd="org.eclipse.elk.alg.common.overlaps",ezh="DOWN",ezp="org.eclipse.elk.alg.common.polyomino",ezb="NORTH",ezm="EAST",ezg="SOUTH",ezv="WEST",ezy="org.eclipse.elk.alg.common.polyomino.structures",ezw="Direction",ez_="Grid is only of size ",ezE=". Requested point (",ezS=") is out of bounds.",ezk=" Given center based coordinates were (",ezx="org.eclipse.elk.graph.properties",ezT="IPropertyHolder",ezM={3:1,94:1,134:1},ezO="org.eclipse.elk.alg.common.spore",ezA="org.eclipse.elk.alg.common.utils",ezL={209:1},ezC="org.eclipse.elk.core",ezI="Connected Components Compaction",ezD="org.eclipse.elk.alg.disco",ezN="org.eclipse.elk.alg.disco.graph",ezP="org.eclipse.elk.alg.disco.options",ezR="CompactionStrategy",ezj="org.eclipse.elk.disco.componentCompaction.strategy",ezF="org.eclipse.elk.disco.componentCompaction.componentLayoutAlgorithm",ezY="org.eclipse.elk.disco.debug.discoGraph",ezB="org.eclipse.elk.disco.debug.discoPolys",ezU="componentCompaction",ezH="org.eclipse.elk.disco",ez$="org.eclipse.elk.spacing.componentComponent",ezz="org.eclipse.elk.edge.thickness",ezG="org.eclipse.elk.aspectRatio",ezW="org.eclipse.elk.padding",ezK="org.eclipse.elk.alg.disco.transform",ezV=1.5707963267948966,ezq=17976931348623157e292,ezZ={3:1,4:1,5:1,192:1},ezX={3:1,6:1,4:1,5:1,106:1,120:1},ezJ="org.eclipse.elk.alg.force",ezQ="ComponentsProcessor",ez1="ComponentsProcessor/1",ez0="org.eclipse.elk.alg.force.graph",ez2="Component Layout",ez3="org.eclipse.elk.alg.force.model",ez4="org.eclipse.elk.force.model",ez5="org.eclipse.elk.force.iterations",ez6="org.eclipse.elk.force.repulsivePower",ez9="org.eclipse.elk.force.temperature",ez8=.001,ez7="org.eclipse.elk.force.repulsion",eGe="org.eclipse.elk.alg.force.options",eGt=1.600000023841858,eGn="org.eclipse.elk.force",eGr="org.eclipse.elk.priority",eGi="org.eclipse.elk.spacing.nodeNode",eGa="org.eclipse.elk.spacing.edgeLabel",eGo="org.eclipse.elk.randomSeed",eGs="org.eclipse.elk.separateConnectedComponents",eGu="org.eclipse.elk.interactive",eGc="org.eclipse.elk.portConstraints",eGl="org.eclipse.elk.edgeLabels.inline",eGf="org.eclipse.elk.omitNodeMicroLayout",eGd="org.eclipse.elk.nodeSize.options",eGh="org.eclipse.elk.nodeSize.constraints",eGp="org.eclipse.elk.nodeLabels.placement",eGb="org.eclipse.elk.portLabels.placement",eGm="origin",eGg="random",eGv="boundingBox.upLeft",eGy="boundingBox.lowRight",eGw="org.eclipse.elk.stress.fixed",eG_="org.eclipse.elk.stress.desiredEdgeLength",eGE="org.eclipse.elk.stress.dimension",eGS="org.eclipse.elk.stress.epsilon",eGk="org.eclipse.elk.stress.iterationLimit",eGx="org.eclipse.elk.stress",eGT="ELK Stress",eGM="org.eclipse.elk.nodeSize.minimum",eGO="org.eclipse.elk.alg.force.stress",eGA="Layered layout",eGL="org.eclipse.elk.alg.layered",eGC="org.eclipse.elk.alg.layered.compaction.components",eGI="org.eclipse.elk.alg.layered.compaction.oned",eGD="org.eclipse.elk.alg.layered.compaction.oned.algs",eGN="org.eclipse.elk.alg.layered.compaction.recthull",eGP="org.eclipse.elk.alg.layered.components",eGR="NONE",eGj={3:1,6:1,4:1,9:1,5:1,122:1},eGF={3:1,6:1,4:1,5:1,141:1,106:1,120:1},eGY="org.eclipse.elk.alg.layered.compound",eGB={51:1},eGU="org.eclipse.elk.alg.layered.graph",eGH=" -> ",eG$="Not supported by LGraph",eGz="Port side is undefined",eGG={3:1,6:1,4:1,5:1,474:1,141:1,106:1,120:1},eGW={3:1,6:1,4:1,5:1,141:1,193:1,203:1,106:1,120:1},eGK={3:1,6:1,4:1,5:1,141:1,1943:1,203:1,106:1,120:1},eGV="([{\"' \r\n",eGq=")]}\"' \r\n",eGZ="The given string contains parts that cannot be parsed as numbers.",eGX="org.eclipse.elk.core.math",eGJ={3:1,4:1,142:1,207:1,414:1},eGQ={3:1,4:1,116:1,207:1,414:1},eG1="org.eclipse.elk.layered",eG0="org.eclipse.elk.alg.layered.graph.transform",eG2="ElkGraphImporter",eG3="ElkGraphImporter/lambda$0$Type",eG4="ElkGraphImporter/lambda$1$Type",eG5="ElkGraphImporter/lambda$2$Type",eG6="ElkGraphImporter/lambda$4$Type",eG9="Node margin calculation",eG8="org.eclipse.elk.alg.layered.intermediate",eG7="ONE_SIDED_GREEDY_SWITCH",eWe="TWO_SIDED_GREEDY_SWITCH",eWt="No implementation is available for the layout processor ",eWn="IntermediateProcessorStrategy",eWr="Node '",eWi="FIRST_SEPARATE",eWa="LAST_SEPARATE",eWo="Odd port side processing",eWs="org.eclipse.elk.alg.layered.intermediate.compaction",eWu="org.eclipse.elk.alg.layered.intermediate.greedyswitch",eWc="org.eclipse.elk.alg.layered.p3order.counting",eWl={225:1},eWf="org.eclipse.elk.alg.layered.intermediate.loops",eWd="org.eclipse.elk.alg.layered.intermediate.loops.ordering",eWh="org.eclipse.elk.alg.layered.intermediate.loops.routing",eWp="org.eclipse.elk.alg.layered.intermediate.preserveorder",eWb="org.eclipse.elk.alg.layered.intermediate.wrapping",eWm="org.eclipse.elk.alg.layered.options",eWg="INTERACTIVE",eWv="DEPTH_FIRST",eWy="EDGE_LENGTH",eWw="SELF_LOOPS",eW_="firstTryWithInitialOrder",eWE="org.eclipse.elk.layered.directionCongruency",eWS="org.eclipse.elk.layered.feedbackEdges",eWk="org.eclipse.elk.layered.interactiveReferencePoint",eWx="org.eclipse.elk.layered.mergeEdges",eWT="org.eclipse.elk.layered.mergeHierarchyEdges",eWM="org.eclipse.elk.layered.allowNonFlowPortsToSwitchSides",eWO="org.eclipse.elk.layered.portSortingStrategy",eWA="org.eclipse.elk.layered.thoroughness",eWL="org.eclipse.elk.layered.unnecessaryBendpoints",eWC="org.eclipse.elk.layered.generatePositionAndLayerIds",eWI="org.eclipse.elk.layered.cycleBreaking.strategy",eWD="org.eclipse.elk.layered.layering.strategy",eWN="org.eclipse.elk.layered.layering.layerConstraint",eWP="org.eclipse.elk.layered.layering.layerChoiceConstraint",eWR="org.eclipse.elk.layered.layering.layerId",eWj="org.eclipse.elk.layered.layering.minWidth.upperBoundOnWidth",eWF="org.eclipse.elk.layered.layering.minWidth.upperLayerEstimationScalingFactor",eWY="org.eclipse.elk.layered.layering.nodePromotion.strategy",eWB="org.eclipse.elk.layered.layering.nodePromotion.maxIterations",eWU="org.eclipse.elk.layered.layering.coffmanGraham.layerBound",eWH="org.eclipse.elk.layered.crossingMinimization.strategy",eW$="org.eclipse.elk.layered.crossingMinimization.forceNodeModelOrder",eWz="org.eclipse.elk.layered.crossingMinimization.hierarchicalSweepiness",eWG="org.eclipse.elk.layered.crossingMinimization.semiInteractive",eWW="org.eclipse.elk.layered.crossingMinimization.positionChoiceConstraint",eWK="org.eclipse.elk.layered.crossingMinimization.positionId",eWV="org.eclipse.elk.layered.crossingMinimization.greedySwitch.activationThreshold",eWq="org.eclipse.elk.layered.crossingMinimization.greedySwitch.type",eWZ="org.eclipse.elk.layered.crossingMinimization.greedySwitchHierarchical.type",eWX="org.eclipse.elk.layered.nodePlacement.strategy",eWJ="org.eclipse.elk.layered.nodePlacement.favorStraightEdges",eWQ="org.eclipse.elk.layered.nodePlacement.bk.edgeStraightening",eW1="org.eclipse.elk.layered.nodePlacement.bk.fixedAlignment",eW0="org.eclipse.elk.layered.nodePlacement.linearSegments.deflectionDampening",eW2="org.eclipse.elk.layered.nodePlacement.networkSimplex.nodeFlexibility",eW3="org.eclipse.elk.layered.nodePlacement.networkSimplex.nodeFlexibility.default",eW4="org.eclipse.elk.layered.edgeRouting.selfLoopDistribution",eW5="org.eclipse.elk.layered.edgeRouting.selfLoopOrdering",eW6="org.eclipse.elk.layered.edgeRouting.splines.mode",eW9="org.eclipse.elk.layered.edgeRouting.splines.sloppy.layerSpacingFactor",eW8="org.eclipse.elk.layered.edgeRouting.polyline.slopedEdgeZoneWidth",eW7="org.eclipse.elk.layered.spacing.baseValue",eKe="org.eclipse.elk.layered.spacing.edgeNodeBetweenLayers",eKt="org.eclipse.elk.layered.spacing.edgeEdgeBetweenLayers",eKn="org.eclipse.elk.layered.spacing.nodeNodeBetweenLayers",eKr="org.eclipse.elk.layered.priority.direction",eKi="org.eclipse.elk.layered.priority.shortness",eKa="org.eclipse.elk.layered.priority.straightness",eKo="org.eclipse.elk.layered.compaction.connectedComponents",eKs="org.eclipse.elk.layered.compaction.postCompaction.strategy",eKu="org.eclipse.elk.layered.compaction.postCompaction.constraints",eKc="org.eclipse.elk.layered.highDegreeNodes.treatment",eKl="org.eclipse.elk.layered.highDegreeNodes.threshold",eKf="org.eclipse.elk.layered.highDegreeNodes.treeHeight",eKd="org.eclipse.elk.layered.wrapping.strategy",eKh="org.eclipse.elk.layered.wrapping.additionalEdgeSpacing",eKp="org.eclipse.elk.layered.wrapping.correctionFactor",eKb="org.eclipse.elk.layered.wrapping.cutting.strategy",eKm="org.eclipse.elk.layered.wrapping.cutting.cuts",eKg="org.eclipse.elk.layered.wrapping.cutting.msd.freedom",eKv="org.eclipse.elk.layered.wrapping.validify.strategy",eKy="org.eclipse.elk.layered.wrapping.validify.forbiddenIndices",eKw="org.eclipse.elk.layered.wrapping.multiEdge.improveCuts",eK_="org.eclipse.elk.layered.wrapping.multiEdge.distancePenalty",eKE="org.eclipse.elk.layered.wrapping.multiEdge.improveWrappedEdges",eKS="org.eclipse.elk.layered.edgeLabels.sideSelection",eKk="org.eclipse.elk.layered.edgeLabels.centerLabelPlacementStrategy",eKx="org.eclipse.elk.layered.considerModelOrder.strategy",eKT="org.eclipse.elk.layered.considerModelOrder.noModelOrder",eKM="org.eclipse.elk.layered.considerModelOrder.components",eKO="org.eclipse.elk.layered.considerModelOrder.longEdgeStrategy",eKA="org.eclipse.elk.layered.considerModelOrder.crossingCounterNodeInfluence",eKL="org.eclipse.elk.layered.considerModelOrder.crossingCounterPortInfluence",eKC="layering",eKI="layering.minWidth",eKD="layering.nodePromotion",eKN="crossingMinimization",eKP="org.eclipse.elk.hierarchyHandling",eKR="crossingMinimization.greedySwitch",eKj="nodePlacement",eKF="nodePlacement.bk",eKY="edgeRouting",eKB="org.eclipse.elk.edgeRouting",eKU="spacing",eKH="priority",eK$="compaction",eKz="compaction.postCompaction",eKG="Specifies whether and how post-process compaction is applied.",eKW="highDegreeNodes",eKK="wrapping",eKV="wrapping.cutting",eKq="wrapping.validify",eKZ="wrapping.multiEdge",eKX="edgeLabels",eKJ="considerModelOrder",eKQ="org.eclipse.elk.spacing.commentComment",eK1="org.eclipse.elk.spacing.commentNode",eK0="org.eclipse.elk.spacing.edgeEdge",eK2="org.eclipse.elk.spacing.edgeNode",eK3="org.eclipse.elk.spacing.labelLabel",eK4="org.eclipse.elk.spacing.labelPortHorizontal",eK5="org.eclipse.elk.spacing.labelPortVertical",eK6="org.eclipse.elk.spacing.labelNode",eK9="org.eclipse.elk.spacing.nodeSelfLoop",eK8="org.eclipse.elk.spacing.portPort",eK7="org.eclipse.elk.spacing.individual",eVe="org.eclipse.elk.port.borderOffset",eVt="org.eclipse.elk.noLayout",eVn="org.eclipse.elk.port.side",eVr="org.eclipse.elk.debugMode",eVi="org.eclipse.elk.alignment",eVa="org.eclipse.elk.insideSelfLoops.activate",eVo="org.eclipse.elk.insideSelfLoops.yo",eVs="org.eclipse.elk.nodeSize.fixedGraphSize",eVu="org.eclipse.elk.direction",eVc="org.eclipse.elk.nodeLabels.padding",eVl="org.eclipse.elk.portLabels.nextToPortIfPossible",eVf="org.eclipse.elk.portLabels.treatAsGroup",eVd="org.eclipse.elk.portAlignment.default",eVh="org.eclipse.elk.portAlignment.north",eVp="org.eclipse.elk.portAlignment.south",eVb="org.eclipse.elk.portAlignment.west",eVm="org.eclipse.elk.portAlignment.east",eVg="org.eclipse.elk.contentAlignment",eVv="org.eclipse.elk.junctionPoints",eVy="org.eclipse.elk.edgeLabels.placement",eVw="org.eclipse.elk.port.index",eV_="org.eclipse.elk.commentBox",eVE="org.eclipse.elk.hypernode",eVS="org.eclipse.elk.port.anchor",eVk="org.eclipse.elk.partitioning.activate",eVx="org.eclipse.elk.partitioning.partition",eVT="org.eclipse.elk.position",eVM="org.eclipse.elk.margins",eVO="org.eclipse.elk.spacing.portsSurrounding",eVA="org.eclipse.elk.interactiveLayout",eVL="org.eclipse.elk.core.util",eVC={3:1,4:1,5:1,593:1},eVI="NETWORK_SIMPLEX",eVD={123:1,51:1},eVN="org.eclipse.elk.alg.layered.p1cycles",eVP="org.eclipse.elk.alg.layered.p2layers",eVR={402:1,225:1},eVj={832:1,3:1,4:1},eVF="org.eclipse.elk.alg.layered.p3order",eVY="org.eclipse.elk.alg.layered.p4nodes",eVB={3:1,4:1,5:1,840:1},eVU=1e-5,eVH="org.eclipse.elk.alg.layered.p4nodes.bk",eV$="org.eclipse.elk.alg.layered.p5edges",eVz="org.eclipse.elk.alg.layered.p5edges.orthogonal",eVG="org.eclipse.elk.alg.layered.p5edges.orthogonal.direction",eVW=1e-6,eVK="org.eclipse.elk.alg.layered.p5edges.splines",eVV=.09999999999999998,eVq=1e-8,eVZ=4.71238898038469,eVX=3.141592653589793,eVJ="org.eclipse.elk.alg.mrtree",eVQ="org.eclipse.elk.alg.mrtree.graph",eV1="org.eclipse.elk.alg.mrtree.intermediate",eV0="Set neighbors in level",eV2="DESCENDANTS",eV3="org.eclipse.elk.mrtree.weighting",eV4="org.eclipse.elk.mrtree.searchOrder",eV5="org.eclipse.elk.alg.mrtree.options",eV6="org.eclipse.elk.mrtree",eV9="org.eclipse.elk.tree",eV8="org.eclipse.elk.alg.radial",eV7=6.283185307179586,eqe=5e-324,eqt="org.eclipse.elk.alg.radial.intermediate",eqn="org.eclipse.elk.alg.radial.intermediate.compaction",eqr={3:1,4:1,5:1,106:1},eqi="org.eclipse.elk.alg.radial.intermediate.optimization",eqa="No implementation is available for the layout option ",eqo="org.eclipse.elk.alg.radial.options",eqs="org.eclipse.elk.radial.orderId",equ="org.eclipse.elk.radial.radius",eqc="org.eclipse.elk.radial.compactor",eql="org.eclipse.elk.radial.compactionStepSize",eqf="org.eclipse.elk.radial.sorter",eqd="org.eclipse.elk.radial.wedgeCriteria",eqh="org.eclipse.elk.radial.optimizationCriteria",eqp="org.eclipse.elk.radial",eqb="org.eclipse.elk.alg.radial.p1position.wedge",eqm="org.eclipse.elk.alg.radial.sorting",eqg=5.497787143782138,eqv=3.9269908169872414,eqy=2.356194490192345,eqw="org.eclipse.elk.alg.rectpacking",eq_="org.eclipse.elk.alg.rectpacking.firstiteration",eqE="org.eclipse.elk.alg.rectpacking.options",eqS="org.eclipse.elk.rectpacking.optimizationGoal",eqk="org.eclipse.elk.rectpacking.lastPlaceShift",eqx="org.eclipse.elk.rectpacking.currentPosition",eqT="org.eclipse.elk.rectpacking.desiredPosition",eqM="org.eclipse.elk.rectpacking.onlyFirstIteration",eqO="org.eclipse.elk.rectpacking.rowCompaction",eqA="org.eclipse.elk.rectpacking.expandToAspectRatio",eqL="org.eclipse.elk.rectpacking.targetWidth",eqC="org.eclipse.elk.expandNodes",eqI="org.eclipse.elk.rectpacking",eqD="org.eclipse.elk.alg.rectpacking.util",eqN="No implementation available for ",eqP="org.eclipse.elk.alg.spore",eqR="org.eclipse.elk.alg.spore.options",eqj="org.eclipse.elk.sporeCompaction",eqF="org.eclipse.elk.underlyingLayoutAlgorithm",eqY="org.eclipse.elk.processingOrder.treeConstruction",eqB="org.eclipse.elk.processingOrder.spanningTreeCostFunction",eqU="org.eclipse.elk.processingOrder.preferredRoot",eqH="org.eclipse.elk.processingOrder.rootSelection",eq$="org.eclipse.elk.structure.structureExtractionStrategy",eqz="org.eclipse.elk.compaction.compactionStrategy",eqG="org.eclipse.elk.compaction.orthogonal",eqW="org.eclipse.elk.overlapRemoval.maxIterations",eqK="org.eclipse.elk.overlapRemoval.runScanline",eqV="processingOrder",eqq="overlapRemoval",eqZ="org.eclipse.elk.sporeOverlap",eqX="org.eclipse.elk.alg.spore.p1structure",eqJ="org.eclipse.elk.alg.spore.p2processingorder",eqQ="org.eclipse.elk.alg.spore.p3execution",eq1="Invalid index: ",eq0="org.eclipse.elk.core.alg",eq2={331:1},eq3={288:1},eq4="Make sure its type is registered with the ",eq5=" utility class.",eq6="true",eq9="false",eq8="Couldn't clone property '",eq7=.05,eZe="org.eclipse.elk.core.options",eZt=1.2999999523162842,eZn="org.eclipse.elk.box",eZr="org.eclipse.elk.box.packingMode",eZi="org.eclipse.elk.algorithm",eZa="org.eclipse.elk.resolvedAlgorithm",eZo="org.eclipse.elk.bendPoints",eZs="org.eclipse.elk.labelManager",eZu="org.eclipse.elk.scaleFactor",eZc="org.eclipse.elk.animate",eZl="org.eclipse.elk.animTimeFactor",eZf="org.eclipse.elk.layoutAncestors",eZd="org.eclipse.elk.maxAnimTime",eZh="org.eclipse.elk.minAnimTime",eZp="org.eclipse.elk.progressBar",eZb="org.eclipse.elk.validateGraph",eZm="org.eclipse.elk.validateOptions",eZg="org.eclipse.elk.zoomToFit",eZv="org.eclipse.elk.font.name",eZy="org.eclipse.elk.font.size",eZw="org.eclipse.elk.edge.type",eZ_="partitioning",eZE="nodeLabels",eZS="portAlignment",eZk="nodeSize",eZx="port",eZT="portLabels",eZM="insideSelfLoops",eZO="org.eclipse.elk.fixed",eZA="org.eclipse.elk.random",eZL="port must have a parent node to calculate the port side",eZC="The edge needs to have exactly one edge section. Found: ",eZI="org.eclipse.elk.core.util.adapters",eZD="org.eclipse.emf.ecore",eZN="org.eclipse.elk.graph",eZP="EMapPropertyHolder",eZR="ElkBendPoint",eZj="ElkGraphElement",eZF="ElkConnectableShape",eZY="ElkEdge",eZB="ElkEdgeSection",eZU="EModelElement",eZH="ENamedElement",eZ$="ElkLabel",eZz="ElkNode",eZG="ElkPort",eZW={92:1,90:1},eZK="org.eclipse.emf.common.notify.impl",eZV="The feature '",eZq="' is not a valid changeable feature",eZZ="Expecting null",eZX="' is not a valid feature",eZJ="The feature ID",eZQ=" is not a valid feature ID",eZ1=32768,eZ0={105:1,92:1,90:1,56:1,49:1,97:1},eZ2="org.eclipse.emf.ecore.impl",eZ3="org.eclipse.elk.graph.impl",eZ4="Recursive containment not allowed for ",eZ5="The datatype '",eZ6="' is not a valid classifier",eZ9="The value '",eZ8={190:1,3:1,4:1},eZ7="The class '",eXe="http://www.eclipse.org/elk/ElkGraph",eXt=1024,eXn="property",eXr="value",eXi="source",eXa="properties",eXo="identifier",eXs="height",eXu="width",eXc="parent",eXl="text",eXf="children",eXd="hierarchical",eXh="sources",eXp="targets",eXb="sections",eXm="bendPoints",eXg="outgoingShape",eXv="incomingShape",eXy="outgoingSections",eXw="incomingSections",eX_="org.eclipse.emf.common.util",eXE="Severe implementation error in the Json to ElkGraph importer.",eXS="id",eXk="org.eclipse.elk.graph.json",eXx="Unhandled parameter types: ",eXT="startPoint",eXM="An edge must have at least one source and one target (edge id: '",eXO="').",eXA="Referenced edge section does not exist: ",eXL=" (edge id: '",eXC="target",eXI="sourcePoint",eXD="targetPoint",eXN="group",eXP="name",eXR="connectableShape cannot be null",eXj="edge cannot be null",eXF="Passed edge is not 'simple'.",eXY="org.eclipse.elk.graph.util",eXB="The 'no duplicates' constraint is violated",eXU="targetIndex=",eXH=", size=",eX$="sourceIndex=",eXz={3:1,4:1,20:1,28:1,52:1,14:1,15:1,54:1,67:1,63:1,58:1},eXG={3:1,4:1,20:1,28:1,52:1,14:1,47:1,15:1,54:1,67:1,63:1,58:1,588:1},eXW="logging",eXK="measureExecutionTime",eXV="parser.parse.1",eXq="parser.parse.2",eXZ="parser.next.1",eXX="parser.next.2",eXJ="parser.next.3",eXQ="parser.next.4",eX1="parser.factor.1",eX0="parser.factor.2",eX2="parser.factor.3",eX3="parser.factor.4",eX4="parser.factor.5",eX5="parser.factor.6",eX6="parser.atom.1",eX9="parser.atom.2",eX8="parser.atom.3",eX7="parser.atom.4",eJe="parser.atom.5",eJt="parser.cc.1",eJn="parser.cc.2",eJr="parser.cc.3",eJi="parser.cc.5",eJa="parser.cc.6",eJo="parser.cc.7",eJs="parser.cc.8",eJu="parser.ope.1",eJc="parser.ope.2",eJl="parser.ope.3",eJf="parser.descape.1",eJd="parser.descape.2",eJh="parser.descape.3",eJp="parser.descape.4",eJb="parser.descape.5",eJm="parser.process.1",eJg="parser.quantifier.1",eJv="parser.quantifier.2",eJy="parser.quantifier.3",eJw="parser.quantifier.4",eJ_="parser.quantifier.5",eJE="org.eclipse.emf.common.notify",eJS={415:1,672:1},eJk={3:1,4:1,20:1,28:1,52:1,14:1,15:1,67:1,58:1},eJx={366:1,143:1},eJT="index=",eJM={3:1,4:1,5:1,126:1},eJO={3:1,4:1,20:1,28:1,52:1,14:1,15:1,54:1,67:1,58:1},eJA={3:1,6:1,4:1,5:1,192:1},eJL={3:1,4:1,5:1,165:1,367:1},eJC=";/?:@&=+$,",eJI="invalid authority: ",eJD="EAnnotation",eJN="ETypedElement",eJP="EStructuralFeature",eJR="EAttribute",eJj="EClassifier",eJF="EEnumLiteral",eJY="EGenericType",eJB="EOperation",eJU="EParameter",eJH="EReference",eJ$="ETypeParameter",eJz="org.eclipse.emf.ecore.util",eJG={76:1},eJW={3:1,20:1,14:1,15:1,58:1,589:1,76:1,69:1,95:1},eJK="org.eclipse.emf.ecore.util.FeatureMap$Entry",eJV=8192,eJq=2048,eJZ="byte",eJX="char",eJJ="double",eJQ="float",eJ1="int",eJ0="long",eJ2="short",eJ3="java.lang.Object",eJ4={3:1,4:1,5:1,247:1},eJ5={3:1,4:1,5:1,673:1},eJ6={3:1,4:1,20:1,28:1,52:1,14:1,15:1,54:1,67:1,63:1,58:1,69:1},eJ9={3:1,4:1,20:1,28:1,52:1,14:1,15:1,54:1,67:1,63:1,58:1,76:1,69:1,95:1},eJ8="mixed",eJ7="http:///org/eclipse/emf/ecore/util/ExtendedMetaData",eQe="kind",eQt={3:1,4:1,5:1,674:1},eQn={3:1,4:1,20:1,28:1,52:1,14:1,15:1,67:1,58:1,76:1,69:1,95:1},eQr={20:1,28:1,52:1,14:1,15:1,58:1,69:1},eQi={47:1,125:1,279:1},eQa={72:1,332:1},eQo="The value of type '",eQs="' must be of type '",eQu=1316,eQc="http://www.eclipse.org/emf/2002/Ecore",eQl=-32768,eQf="constraints",eQd="baseType",eQh="getEStructuralFeature",eQp="getFeatureID",eQb="feature",eQm="getOperationID",eQg="operation",eQv="defaultValue",eQy="eTypeParameters",eQw="isInstance",eQ_="getEEnumLiteral",eQE="eContainingClass",eQS={55:1},eQk={3:1,4:1,5:1,119:1},eQx="org.eclipse.emf.ecore.resource",eQT={92:1,90:1,591:1,1935:1},eQM="org.eclipse.emf.ecore.resource.impl",eQO="unspecified",eQA="simple",eQL="attribute",eQC="attributeWildcard",eQI="element",eQD="elementWildcard",eQN="collapse",eQP="itemType",eQR="namespace",eQj="##targetNamespace",eQF="whiteSpace",eQY="wildcards",eQB="http://www.eclipse.org/emf/2003/XMLType",eQU="##any",eQH="uninitialized",eQ$="The multiplicity constraint is violated",eQz="org.eclipse.emf.ecore.xml.type",eQG="ProcessingInstruction",eQW="SimpleAnyType",eQK="XMLTypeDocumentRoot",eQV="org.eclipse.emf.ecore.xml.type.impl",eQq="INF",eQZ="processing",eQX="ENTITIES_._base",eQJ="minLength",eQQ="ENTITY",eQ1="NCName",eQ0="IDREFS_._base",eQ2="integer",eQ3="token",eQ4="pattern",eQ5="[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*",eQ6="\\i\\c*",eQ9="[\\i-[:]][\\c-[:]]*",eQ8="nonPositiveInteger",eQ7="maxInclusive",e1e="NMTOKEN",e1t="NMTOKENS_._base",e1n="nonNegativeInteger",e1r="minInclusive",e1i="normalizedString",e1a="unsignedByte",e1o="unsignedInt",e1s="18446744073709551615",e1u="unsignedShort",e1c="processingInstruction",e1l="org.eclipse.emf.ecore.xml.type.internal",e1f=1114111,e1d="Internal Error: shorthands: \\u",e1h="xml:isDigit",e1p="xml:isWord",e1b="xml:isSpace",e1m="xml:isNameChar",e1g="xml:isInitialNameChar",e1v="09٠٩۰۹०९০৯੦੯૦૯୦୯௧௯౦౯೦೯൦൯๐๙໐໙༠༩",e1y="AZaz\xc0\xd6\xd8\xf6\xf8ıĴľŁňŊžƀǃǍǰǴǵǺȗɐʨʻˁΆΆΈΊΌΌΎΡΣώϐϖϚϚϜϜϞϞϠϠϢϳЁЌЎяёќўҁҐӄӇӈӋӌӐӫӮӵӸӹԱՖՙՙաֆאתװײءغفيٱڷںھۀێېۓەەۥۦअहऽऽक़ॡঅঌএঐওনপরললশহড়ঢ়য়ৡৰৱਅਊਏਐਓਨਪਰਲਲ਼ਵਸ਼ਸਹਖ਼ੜਫ਼ਫ਼ੲੴઅઋઍઍએઑઓનપરલળવહઽઽૠૠଅଌଏଐଓନପରଲଳଶହଽଽଡ଼ଢ଼ୟୡஅஊஎஐஒகஙசஜஜஞடணதநபமவஷஹఅఌఎఐఒనపళవహౠౡಅಌಎಐಒನಪಳವಹೞೞೠೡഅഌഎഐഒനപഹൠൡกฮะะาำเๅກຂຄຄງຈຊຊຍຍດທນຟມຣລລວວສຫອຮະະາຳຽຽເໄཀཇཉཀྵႠჅაჶᄀᄀᄂᄃᄅᄇᄉᄉᄋᄌᄎᄒᄼᄼᄾᄾᅀᅀᅌᅌᅎᅎᅐᅐᅔᅕᅙᅙᅟᅡᅣᅣᅥᅥᅧᅧᅩᅩᅭᅮᅲᅳᅵᅵᆞᆞᆨᆨᆫᆫᆮᆯᆷᆸᆺᆺᆼᇂᇫᇫᇰᇰᇹᇹḀẛẠỹἀἕἘἝἠὅὈὍὐὗὙὙὛὛὝὝὟώᾀᾴᾶᾼιιῂῄῆῌῐΐῖΊῠῬῲῴῶῼΩΩKÅ℮℮ↀↂ〇〇〡〩ぁゔァヺㄅㄬ一龥가힣",e1w="Private Use",e1_="ASSIGNED",e1E="\0\x7f\x80\xffĀſƀɏɐʯʰ˿̀ͯͰϿЀӿ԰֏֐׿؀ۿ܀ݏހ޿ऀॿঀ৿਀੿઀૿଀୿஀௿ఀ౿ಀ೿ഀൿ඀෿฀๿຀໿ༀ࿿က႟Ⴀჿᄀᇿሀ፿Ꭰ᏿᐀ᙿ ᚟ᚠ᛿ក៿᠀᢯Ḁỿἀ῿ ⁰₟₠⃏⃐⃿℀⅏⅐↏←⇿∀⋿⌀⏿␀␿⑀⑟①⓿─╿▀▟■◿☀⛿✀➿⠀⣿⺀⻿⼀⿟⿰⿿ 〿぀ゟ゠ヿ㄀ㄯ㄰㆏㆐㆟ㆠㆿ㈀㋿㌀㏿㐀䶵一鿿ꀀ꒏꒐꓏가힣豈﫿ffﭏﭐ﷿︠︯︰﹏﹐﹯ﹰ﻾\uFEFF\uFEFF＀￯",e1S="UNASSIGNED",e1k={3:1,117:1},e1x="org.eclipse.emf.ecore.xml.type.util",e1T={3:1,4:1,5:1,368:1},e1M="org.eclipse.xtext.xbase.lib",e1O="Cannot add elements to a Range",e1A="Cannot set elements in a Range",e1L="Cannot remove elements from a Range",e1C="locale",e1I="default",e1D="user.agent",e1N=null;eB4.goog=eB4.goog||{},eB4.goog.global=eB4.goog.global||eB4,e_Q(),eTS(1,null,{},r),eUe.Fb=function(e){return x5(this,e)},eUe.Gb=function(){return this.gm},eUe.Hb=function(){return Ao(this)},eUe.Ib=function(){var e;return yx(esF(this))+"@"+(e=esj(this)>>>0).toString(16)},eUe.equals=function(e){return this.Fb(e)},eUe.hashCode=function(){return this.Hb()},eUe.toString=function(){return this.Ib()},eTS(290,1,{290:1,2026:1},ese),eUe.le=function(e){var t;return(t=new ese).i=4,e>1?t.c=z9(this,e-1):t.c=this,t},eUe.me=function(){return LW(this),this.b},eUe.ne=function(){return yx(this)},eUe.oe=function(){return LW(this),this.k},eUe.pe=function(){return(4&this.i)!=0},eUe.qe=function(){return(1&this.i)!=0},eUe.Ib=function(){return ee6(this)},eUe.i=0;var e1P=1,e1R=Y5(eUc,"Object",1),e1j=Y5(eUc,"Class",290);eTS(1998,1,eUl),Y5(eUf,"Optional",1998),eTS(1170,1998,eUl,i),eUe.Fb=function(e){return e===this},eUe.Hb=function(){return 2040732332},eUe.Ib=function(){return"Optional.absent()"},eUe.Jb=function(e){return Y9(e),m4(),e0l},Y5(eUf,"Absent",1170),eTS(628,1,{},ve),Y5(eUf,"Joiner",628);var e1F=RL(eUf,"Predicate");eTS(582,1,{169:1,582:1,3:1,45:1},c4),eUe.Mb=function(e){return es_(this,e)},eUe.Lb=function(e){return es_(this,e)},eUe.Fb=function(e){var t;return!!M4(e,582)&&(t=Pp(e,582),eT$(this.a,t.a))},eUe.Hb=function(){return esS(this.a)+306654252},eUe.Ib=function(){return eE7(this.a)},Y5(eUf,"Predicates/AndPredicate",582),eTS(408,1998,{408:1,3:1},c5),eUe.Fb=function(e){var t;return!!M4(e,408)&&(t=Pp(e,408),ecX(this.a,t.a))},eUe.Hb=function(){return 1502476572+esj(this.a)},eUe.Ib=function(){return eUm+this.a+")"},eUe.Jb=function(e){return new c5(H5(e.Kb(this.a),"the Function passed to Optional.transform() must not return null."))},Y5(eUf,"Present",408),eTS(198,1,eUv),eUe.Nb=function(e){F8(this,e)},eUe.Qb=function(){g4()},Y5(eUy,"UnmodifiableIterator",198),eTS(1978,198,eUw),eUe.Qb=function(){g4()},eUe.Rb=function(e){throw p7(new bO)},eUe.Wb=function(e){throw p7(new bO)},Y5(eUy,"UnmodifiableListIterator",1978),eTS(386,1978,eUw),eUe.Ob=function(){return this.c0},eUe.Pb=function(){if(this.c>=this.d)throw p7(new bC);return this.Xb(this.c++)},eUe.Tb=function(){return this.c},eUe.Ub=function(){if(this.c<=0)throw p7(new bC);return this.Xb(--this.c)},eUe.Vb=function(){return this.c-1},eUe.c=0,eUe.d=0,Y5(eUy,"AbstractIndexedListIterator",386),eTS(699,198,eUv),eUe.Ob=function(){return erE(this)},eUe.Pb=function(){return QR(this)},eUe.e=1,Y5(eUy,"AbstractIterator",699),eTS(1986,1,{224:1}),eUe.Zb=function(){var e;return(e=this.f)||(this.f=this.ac())},eUe.Fb=function(e){return es5(this,e)},eUe.Hb=function(){return esj(this.Zb())},eUe.dc=function(){return 0==this.gc()},eUe.ec=function(){return Fh(this)},eUe.Ib=function(){return efF(this.Zb())},Y5(eUy,"AbstractMultimap",1986),eTS(726,1986,eU_),eUe.$b=function(){enK(this)},eUe._b=function(e){return yy(this,e)},eUe.ac=function(){return new wI(this,this.c)},eUe.ic=function(e){return this.hc()},eUe.bc=function(){return new OC(this,this.c)},eUe.jc=function(){return this.mc(this.hc())},eUe.kc=function(){return new m$(this)},eUe.lc=function(){return ew4(this.c.vc().Nc(),new o,64,this.d)},eUe.cc=function(e){return Zq(this,e)},eUe.fc=function(e){return eu9(this,e)},eUe.gc=function(){return this.d},eUe.mc=function(e){return Hj(),new fF(e)},eUe.nc=function(){return new mH(this)},eUe.oc=function(){return ew4(this.c.Cc().Nc(),new a,64,this.d)},eUe.pc=function(e,t){return new XS(this,e,t,null)},eUe.d=0,Y5(eUy,"AbstractMapBasedMultimap",726),eTS(1631,726,eU_),eUe.hc=function(){return new XM(this.a)},eUe.jc=function(){return Hj(),Hj(),e2r},eUe.cc=function(e){return Pp(Zq(this,e),15)},eUe.fc=function(e){return Pp(eu9(this,e),15)},eUe.Zb=function(){return HU(this)},eUe.Fb=function(e){return es5(this,e)},eUe.qc=function(e){return Pp(Zq(this,e),15)},eUe.rc=function(e){return Pp(eu9(this,e),15)},eUe.mc=function(e){return $a(Pp(e,15))},eUe.pc=function(e,t){return Vu(this,e,Pp(t,15),null)},Y5(eUy,"AbstractListMultimap",1631),eTS(732,1,eUE),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return this.c.Ob()||this.e.Ob()},eUe.Pb=function(){var e;return this.e.Ob()||(e=Pp(this.c.Pb(),42),this.b=e.cd(),this.a=Pp(e.dd(),14),this.e=this.a.Kc()),this.sc(this.b,this.e.Pb())},eUe.Qb=function(){this.e.Qb(),this.a.dc()&&this.c.Qb(),--this.d.d},Y5(eUy,"AbstractMapBasedMultimap/Itr",732),eTS(1099,732,eUE,mH),eUe.sc=function(e,t){return t},Y5(eUy,"AbstractMapBasedMultimap/1",1099),eTS(1100,1,{},a),eUe.Kb=function(e){return Pp(e,14).Nc()},Y5(eUy,"AbstractMapBasedMultimap/1methodref$spliterator$Type",1100),eTS(1101,732,eUE,m$),eUe.sc=function(e,t){return new wD(e,t)},Y5(eUy,"AbstractMapBasedMultimap/2",1101);var e1Y=RL(eUS,"Map");eTS(1967,1,eUk),eUe.wc=function(e){ear(this,e)},eUe.yc=function(e,t,n){return el6(this,e,t,n)},eUe.$b=function(){this.vc().$b()},eUe.tc=function(e){return emT(this,e)},eUe._b=function(e){return!!ewt(this,e,!1)},eUe.uc=function(e){var t,n,r;for(n=this.vc().Kc();n.Ob();)if(r=(t=Pp(n.Pb(),42)).dd(),xc(e)===xc(r)||null!=e&&ecX(e,r))return!0;return!1},eUe.Fb=function(e){var t,n,r;if(e===this)return!0;if(!M4(e,83)||(r=Pp(e,83),this.gc()!=r.gc()))return!1;for(n=r.vc().Kc();n.Ob();)if(t=Pp(n.Pb(),42),!this.tc(t))return!1;return!0},eUe.xc=function(e){return xu(ewt(this,e,!1))},eUe.Hb=function(){return eoP(this.vc())},eUe.dc=function(){return 0==this.gc()},eUe.ec=function(){return new fk(this)},eUe.zc=function(e,t){throw p7(new gW("Put not supported on this map"))},eUe.Ac=function(e){eij(this,e)},eUe.Bc=function(e){return xu(ewt(this,e,!0))},eUe.gc=function(){return this.vc().gc()},eUe.Ib=function(){return ewb(this)},eUe.Cc=function(){return new fT(this)},Y5(eUS,"AbstractMap",1967),eTS(1987,1967,eUk),eUe.bc=function(){return new wU(this)},eUe.vc=function(){return Fd(this)},eUe.ec=function(){var e;return(e=this.g)||(this.g=this.bc())},eUe.Cc=function(){var e;return(e=this.i)||(this.i=new wH(this))},Y5(eUy,"Maps/ViewCachingAbstractMap",1987),eTS(389,1987,eUk,wI),eUe.xc=function(e){return etl(this,e)},eUe.Bc=function(e){return euT(this,e)},eUe.$b=function(){this.d==this.e.c?this.e.$b():RG(new RK(this))},eUe._b=function(e){return ecD(this.d,e)},eUe.Ec=function(){return new c7(this)},eUe.Dc=function(){return this.Ec()},eUe.Fb=function(e){return this===e||ecX(this.d,e)},eUe.Hb=function(){return esj(this.d)},eUe.ec=function(){return this.e.ec()},eUe.gc=function(){return this.d.gc()},eUe.Ib=function(){return efF(this.d)},Y5(eUy,"AbstractMapBasedMultimap/AsMap",389);var e1B=RL(eUc,"Iterable");eTS(28,1,eUx),eUe.Jc=function(e){qX(this,e)},eUe.Lc=function(){return this.Oc()},eUe.Nc=function(){return new Gq(this,0)},eUe.Oc=function(){return new R1(null,this.Nc())},eUe.Fc=function(e){throw p7(new gW("Add not supported on this collection"))},eUe.Gc=function(e){return er7(this,e)},eUe.$b=function(){UG(this)},eUe.Hc=function(e){return eds(this,e,!1)},eUe.Ic=function(e){return eot(this,e)},eUe.dc=function(){return 0==this.gc()},eUe.Mc=function(e){return eds(this,e,!0)},eUe.Pc=function(){return Fn(this)},eUe.Qc=function(e){return emk(this,e)},eUe.Ib=function(){return e_F(this)},Y5(eUS,"AbstractCollection",28);var e1U=RL(eUS,"Set");eTS(eUT,28,eUM),eUe.Nc=function(){return new Gq(this,1)},eUe.Fb=function(e){return ehN(this,e)},eUe.Hb=function(){return eoP(this)},Y5(eUS,"AbstractSet",eUT),eTS(1970,eUT,eUM),Y5(eUy,"Sets/ImprovedAbstractSet",1970),eTS(1971,1970,eUM),eUe.$b=function(){this.Rc().$b()},eUe.Hc=function(e){return edz(this,e)},eUe.dc=function(){return this.Rc().dc()},eUe.Mc=function(e){var t;return!!this.Hc(e)&&(t=Pp(e,42),this.Rc().ec().Mc(t.cd()))},eUe.gc=function(){return this.Rc().gc()},Y5(eUy,"Maps/EntrySet",1971),eTS(1097,1971,eUM,c7),eUe.Hc=function(e){return ecC(this.a.d.vc(),e)},eUe.Kc=function(){return new RK(this.a)},eUe.Rc=function(){return this.a},eUe.Mc=function(e){var t;return!!ecC(this.a.d.vc(),e)&&(t=Pp(e,42),ZM(this.a.e,t.cd()),!0)},eUe.Nc=function(){return Pl(this.a.d.vc().Nc(),new le(this.a))},Y5(eUy,"AbstractMapBasedMultimap/AsMap/AsMapEntries",1097),eTS(1098,1,{},le),eUe.Kb=function(e){return qJ(this.a,Pp(e,42))},Y5(eUy,"AbstractMapBasedMultimap/AsMap/AsMapEntries/0methodref$wrapEntry$Type",1098),eTS(730,1,eUE,RK),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){var e;return e=Pp(this.b.Pb(),42),this.a=Pp(e.dd(),14),qJ(this.c,e)},eUe.Ob=function(){return this.b.Ob()},eUe.Qb=function(){eah(!!this.a),this.b.Qb(),this.c.e.d-=this.a.gc(),this.a.$b(),this.a=null},Y5(eUy,"AbstractMapBasedMultimap/AsMap/AsMapIterator",730),eTS(532,1970,eUM,wU),eUe.$b=function(){this.b.$b()},eUe.Hc=function(e){return this.b._b(e)},eUe.Jc=function(e){Y9(e),this.b.wc(new lk(e))},eUe.dc=function(){return this.b.dc()},eUe.Kc=function(){return new gr(this.b.vc().Kc())},eUe.Mc=function(e){return!!this.b._b(e)&&(this.b.Bc(e),!0)},eUe.gc=function(){return this.b.gc()},Y5(eUy,"Maps/KeySet",532),eTS(318,532,eUM,OC),eUe.$b=function(){var e;RG((e=this.b.vc().Kc(),new wg(this,e)))},eUe.Ic=function(e){return this.b.ec().Ic(e)},eUe.Fb=function(e){return this===e||ecX(this.b.ec(),e)},eUe.Hb=function(){return esj(this.b.ec())},eUe.Kc=function(){var e;return e=this.b.vc().Kc(),new wg(this,e)},eUe.Mc=function(e){var t,n;return n=0,(t=Pp(this.b.Bc(e),14))&&(n=t.gc(),t.$b(),this.a.d-=n),n>0},eUe.Nc=function(){return this.b.ec().Nc()},Y5(eUy,"AbstractMapBasedMultimap/KeySet",318),eTS(731,1,eUE,wg),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return this.c.Ob()},eUe.Pb=function(){return this.a=Pp(this.c.Pb(),42),this.a.cd()},eUe.Qb=function(){var e;eah(!!this.a),e=Pp(this.a.dd(),14),this.c.Qb(),this.b.a.d-=e.gc(),e.$b(),this.a=null},Y5(eUy,"AbstractMapBasedMultimap/KeySet/1",731),eTS(491,389,{83:1,161:1},LX),eUe.bc=function(){return this.Sc()},eUe.ec=function(){return this.Tc()},eUe.Sc=function(){return new wb(this.c,this.Uc())},eUe.Tc=function(){var e;return(e=this.b)||(this.b=this.Sc())},eUe.Uc=function(){return Pp(this.d,161)},Y5(eUy,"AbstractMapBasedMultimap/SortedAsMap",491),eTS(542,491,eUO,LJ),eUe.bc=function(){return new wm(this.a,Pp(Pp(this.d,161),171))},eUe.Sc=function(){return new wm(this.a,Pp(Pp(this.d,161),171))},eUe.ec=function(){var e;return Pp((e=this.b)||(this.b=new wm(this.a,Pp(Pp(this.d,161),171))),271)},eUe.Tc=function(){var e;return Pp((e=this.b)||(this.b=new wm(this.a,Pp(Pp(this.d,161),171))),271)},eUe.Uc=function(){return Pp(Pp(this.d,161),171)},Y5(eUy,"AbstractMapBasedMultimap/NavigableAsMap",542),eTS(490,318,eUA,wb),eUe.Nc=function(){return this.b.ec().Nc()},Y5(eUy,"AbstractMapBasedMultimap/SortedKeySet",490),eTS(388,490,eUL,wm),Y5(eUy,"AbstractMapBasedMultimap/NavigableKeySet",388),eTS(541,28,eUx,XS),eUe.Fc=function(e){var t,n;return efH(this),n=this.d.dc(),(t=this.d.Fc(e))&&(++this.f.d,n&&CP(this)),t},eUe.Gc=function(e){var t,n,r;return!e.dc()&&(r=(efH(this),this.d.gc()),(t=this.d.Gc(e))&&(n=this.d.gc(),this.f.d+=n-r,0==r&&CP(this)),t)},eUe.$b=function(){var e;0!=(e=(efH(this),this.d.gc()))&&(this.d.$b(),this.f.d-=e,jY(this))},eUe.Hc=function(e){return efH(this),this.d.Hc(e)},eUe.Ic=function(e){return efH(this),this.d.Ic(e)},eUe.Fb=function(e){return e===this||(efH(this),ecX(this.d,e))},eUe.Hb=function(){return efH(this),esj(this.d)},eUe.Kc=function(){return efH(this),new PS(this)},eUe.Mc=function(e){var t;return efH(this),(t=this.d.Mc(e))&&(--this.f.d,jY(this)),t},eUe.gc=function(){return xw(this)},eUe.Nc=function(){return efH(this),this.d.Nc()},eUe.Ib=function(){return efH(this),efF(this.d)},Y5(eUy,"AbstractMapBasedMultimap/WrappedCollection",541);var e1H=RL(eUS,"List");eTS(728,541,{20:1,28:1,14:1,15:1},Fo),eUe.ad=function(e){er8(this,e)},eUe.Nc=function(){return efH(this),this.d.Nc()},eUe.Vc=function(e,t){var n;efH(this),n=this.d.dc(),Pp(this.d,15).Vc(e,t),++this.a.d,n&&CP(this)},eUe.Wc=function(e,t){var n,r,i;return!t.dc()&&(i=(efH(this),this.d.gc()),(n=Pp(this.d,15).Wc(e,t))&&(r=this.d.gc(),this.a.d+=r-i,0==i&&CP(this)),n)},eUe.Xb=function(e){return efH(this),Pp(this.d,15).Xb(e)},eUe.Xc=function(e){return efH(this),Pp(this.d,15).Xc(e)},eUe.Yc=function(){return efH(this),new Mb(this)},eUe.Zc=function(e){return efH(this),new HM(this,e)},eUe.$c=function(e){var t;return efH(this),t=Pp(this.d,15).$c(e),--this.a.d,jY(this),t},eUe._c=function(e,t){return efH(this),Pp(this.d,15)._c(e,t)},eUe.bd=function(e,t){return efH(this),Vu(this.a,this.e,Pp(this.d,15).bd(e,t),this.b?this.b:this)},Y5(eUy,"AbstractMapBasedMultimap/WrappedList",728),eTS(1096,728,{20:1,28:1,14:1,15:1,54:1},A7),Y5(eUy,"AbstractMapBasedMultimap/RandomAccessWrappedList",1096),eTS(620,1,eUE,PS),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return UW(this),this.b.Ob()},eUe.Pb=function(){return UW(this),this.b.Pb()},eUe.Qb=function(){OG(this)},Y5(eUy,"AbstractMapBasedMultimap/WrappedCollection/WrappedIterator",620),eTS(729,620,eUC,Mb,HM),eUe.Qb=function(){OG(this)},eUe.Rb=function(e){var t;t=0==xw(this.a),(UW(this),Pp(this.b,125)).Rb(e),++this.a.a.d,t&&CP(this.a)},eUe.Sb=function(){return(UW(this),Pp(this.b,125)).Sb()},eUe.Tb=function(){return(UW(this),Pp(this.b,125)).Tb()},eUe.Ub=function(){return(UW(this),Pp(this.b,125)).Ub()},eUe.Vb=function(){return(UW(this),Pp(this.b,125)).Vb()},eUe.Wb=function(e){(UW(this),Pp(this.b,125)).Wb(e)},Y5(eUy,"AbstractMapBasedMultimap/WrappedList/WrappedListIterator",729),eTS(727,541,eUA,L3),eUe.Nc=function(){return efH(this),this.d.Nc()},Y5(eUy,"AbstractMapBasedMultimap/WrappedSortedSet",727),eTS(1095,727,eUL,TB),Y5(eUy,"AbstractMapBasedMultimap/WrappedNavigableSet",1095),eTS(1094,541,eUM,L4),eUe.Nc=function(){return efH(this),this.d.Nc()},Y5(eUy,"AbstractMapBasedMultimap/WrappedSet",1094),eTS(1103,1,{},o),eUe.Kb=function(e){return Xb(Pp(e,42))},Y5(eUy,"AbstractMapBasedMultimap/lambda$1$Type",1103),eTS(1102,1,{},lt),eUe.Kb=function(e){return new wD(this.a,e)},Y5(eUy,"AbstractMapBasedMultimap/lambda$2$Type",1102);var e1$=RL(eUS,"Map/Entry");eTS(345,1,eUI),eUe.Fb=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),BG(this.cd(),t.cd())&&BG(this.dd(),t.dd()))},eUe.Hb=function(){var e,t;return e=this.cd(),t=this.dd(),(null==e?0:esj(e))^(null==t?0:esj(t))},eUe.ed=function(e){throw p7(new bO)},eUe.Ib=function(){return this.cd()+"="+this.dd()},Y5(eUy,eUD,345),eTS(1988,28,eUx),eUe.$b=function(){this.fd().$b()},eUe.Hc=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),Kr(this.fd(),t.cd(),t.dd()))},eUe.Mc=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),Ki(this.fd(),t.cd(),t.dd()))},eUe.gc=function(){return this.fd().d},Y5(eUy,"Multimaps/Entries",1988),eTS(733,1988,eUx,ln),eUe.Kc=function(){return this.a.kc()},eUe.fd=function(){return this.a},eUe.Nc=function(){return this.a.lc()},Y5(eUy,"AbstractMultimap/Entries",733),eTS(734,733,eUM,mz),eUe.Nc=function(){return this.a.lc()},eUe.Fb=function(e){return eEB(this,e)},eUe.Hb=function(){return eie(this)},Y5(eUy,"AbstractMultimap/EntrySet",734),eTS(735,28,eUx,lr),eUe.$b=function(){this.a.$b()},eUe.Hc=function(e){return eun(this.a,e)},eUe.Kc=function(){return this.a.nc()},eUe.gc=function(){return this.a.d},eUe.Nc=function(){return this.a.oc()},Y5(eUy,"AbstractMultimap/Values",735),eTS(1989,28,{835:1,20:1,28:1,14:1}),eUe.Jc=function(e){Y9(e),Uz(this).Jc(new lS(e))},eUe.Nc=function(){var e;return ew4(e=Uz(this).Nc(),new y,64|1296&e.qd(),this.a.d)},eUe.Fc=function(e){return g5(),!0},eUe.Gc=function(e){return Y9(this),Y9(e),M4(e,543)?KM(Pp(e,835)):!e.dc()&&eel(this,e.Kc())},eUe.Hc=function(e){var t;return((t=Pp(ecA(HU(this.a),e),14))?t.gc():0)>0},eUe.Fb=function(e){return eMc(this,e)},eUe.Hb=function(){return esj(Uz(this))},eUe.dc=function(){return Uz(this).dc()},eUe.Mc=function(e){return ekJ(this,e,1)>0},eUe.Ib=function(){return efF(Uz(this))},Y5(eUy,"AbstractMultiset",1989),eTS(1991,1970,eUM),eUe.$b=function(){enK(this.a.a)},eUe.Hc=function(e){var t,n;return!!M4(e,492)&&(n=Pp(e,416),!(0>=Pp(n.a.dd(),14).gc())&&(t=GB(this.a,n.a.cd()))==Pp(n.a.dd(),14).gc())},eUe.Mc=function(e){var t,n,r,i;return!!M4(e,492)&&(t=(n=Pp(e,416)).a.cd(),0!=(r=Pp(n.a.dd(),14).gc()))&&ekQ(i=this.a,t,r)},Y5(eUy,"Multisets/EntrySet",1991),eTS(1109,1991,eUM,li),eUe.Kc=function(){return new ga(Fd(HU(this.a.a)).Kc())},eUe.gc=function(){return HU(this.a.a).gc()},Y5(eUy,"AbstractMultiset/EntrySet",1109),eTS(619,726,eU_),eUe.hc=function(){return this.gd()},eUe.jc=function(){return this.hd()},eUe.cc=function(e){return this.jd(e)},eUe.fc=function(e){return this.kd(e)},eUe.Zb=function(){var e;return(e=this.f)||(this.f=this.ac())},eUe.hd=function(){return Hj(),Hj(),e2a},eUe.Fb=function(e){return es5(this,e)},eUe.jd=function(e){return Pp(Zq(this,e),21)},eUe.kd=function(e){return Pp(eu9(this,e),21)},eUe.mc=function(e){return Hj(),new vd(Pp(e,21))},eUe.pc=function(e,t){return new L4(this,e,Pp(t,21))},Y5(eUy,"AbstractSetMultimap",619),eTS(1657,619,eU_),eUe.hc=function(){return new yB(this.b)},eUe.gd=function(){return new yB(this.b)},eUe.jc=function(){return Bo(new yB(this.b))},eUe.hd=function(){return Bo(new yB(this.b))},eUe.cc=function(e){return Pp(Pp(Zq(this,e),21),84)},eUe.jd=function(e){return Pp(Pp(Zq(this,e),21),84)},eUe.fc=function(e){return Pp(Pp(eu9(this,e),21),84)},eUe.kd=function(e){return Pp(Pp(eu9(this,e),21),84)},eUe.mc=function(e){return M4(e,271)?Bo(Pp(e,271)):(Hj(),new O4(Pp(e,84)))},eUe.Zb=function(){var e;return(e=this.f)||(this.f=M4(this.c,171)?new LJ(this,Pp(this.c,171)):M4(this.c,161)?new LX(this,Pp(this.c,161)):new wI(this,this.c))},eUe.pc=function(e,t){return M4(t,271)?new TB(this,e,Pp(t,271)):new L3(this,e,Pp(t,84))},Y5(eUy,"AbstractSortedSetMultimap",1657),eTS(1658,1657,eU_),eUe.Zb=function(){var e;return Pp(Pp((e=this.f)||(this.f=M4(this.c,171)?new LJ(this,Pp(this.c,171)):M4(this.c,161)?new LX(this,Pp(this.c,161)):new wI(this,this.c)),161),171)},eUe.ec=function(){var e;return Pp(Pp((e=this.i)||(this.i=M4(this.c,171)?new wm(this,Pp(this.c,171)):M4(this.c,161)?new wb(this,Pp(this.c,161)):new OC(this,this.c)),84),271)},eUe.bc=function(){return M4(this.c,171)?new wm(this,Pp(this.c,171)):M4(this.c,161)?new wb(this,Pp(this.c,161)):new OC(this,this.c)},Y5(eUy,"AbstractSortedKeySortedSetMultimap",1658),eTS(2010,1,{1947:1}),eUe.Fb=function(e){return ev7(this,e)},eUe.Hb=function(){var e;return eoP((e=this.g)||(this.g=new la(this)))},eUe.Ib=function(){var e;return ewb((e=this.f)||(this.f=new OP(this)))},Y5(eUy,"AbstractTable",2010),eTS(665,eUT,eUM,la),eUe.$b=function(){g6()},eUe.Hc=function(e){var t,n;return!!M4(e,468)&&(t=Pp(e,682),!!(n=Pp(ecA(Y7(this.a),xh(t.c.e,t.b)),83))&&ecC(n.vc(),new wD(xh(t.c.c,t.a),X_(t.c,t.b,t.a))))},eUe.Kc=function(){return $e(this.a)},eUe.Mc=function(e){var t,n;return!!M4(e,468)&&(t=Pp(e,682),!!(n=Pp(ecA(Y7(this.a),xh(t.c.e,t.b)),83))&&ecI(n.vc(),new wD(xh(t.c.c,t.a),X_(t.c,t.b,t.a))))},eUe.gc=function(){return R8(this.a)},eUe.Nc=function(){return KH(this.a)},Y5(eUy,"AbstractTable/CellSet",665),eTS(1928,28,eUx,lo),eUe.$b=function(){g6()},eUe.Hc=function(e){return ewx(this.a,e)},eUe.Kc=function(){return $t(this.a)},eUe.gc=function(){return R8(this.a)},eUe.Nc=function(){return Kd(this.a)},Y5(eUy,"AbstractTable/Values",1928),eTS(1632,1631,eU_),Y5(eUy,"ArrayListMultimapGwtSerializationDependencies",1632),eTS(513,1632,eU_,gQ,G$),eUe.hc=function(){return new XM(this.a)},eUe.a=0,Y5(eUy,"ArrayListMultimap",513),eTS(664,2010,{664:1,1947:1,3:1},exj),Y5(eUy,"ArrayTable",664),eTS(1924,386,eUw,OI),eUe.Xb=function(e){return new eo7(this.a,e)},Y5(eUy,"ArrayTable/1",1924),eTS(1925,1,{},c6),eUe.ld=function(e){return new eo7(this.a,e)},Y5(eUy,"ArrayTable/1methodref$getCell$Type",1925),eTS(2011,1,{682:1}),eUe.Fb=function(e){var t;return e===this||!!M4(e,468)&&(t=Pp(e,682),BG(xh(this.c.e,this.b),xh(t.c.e,t.b))&&BG(xh(this.c.c,this.a),xh(t.c.c,t.a))&&BG(X_(this.c,this.b,this.a),X_(t.c,t.b,t.a)))},eUe.Hb=function(){return euF(eow(vx(e1R,1),eUp,1,5,[xh(this.c.e,this.b),xh(this.c.c,this.a),X_(this.c,this.b,this.a)]))},eUe.Ib=function(){return"("+xh(this.c.e,this.b)+","+xh(this.c.c,this.a)+")="+X_(this.c,this.b,this.a)},Y5(eUy,"Tables/AbstractCell",2011),eTS(468,2011,{468:1,682:1},eo7),eUe.a=0,eUe.b=0,eUe.d=0,Y5(eUy,"ArrayTable/2",468),eTS(1927,1,{},c9),eUe.ld=function(e){return Qo(this.a,e)},Y5(eUy,"ArrayTable/2methodref$getValue$Type",1927),eTS(1926,386,eUw,OD),eUe.Xb=function(e){return Qo(this.a,e)},Y5(eUy,"ArrayTable/3",1926),eTS(1979,1967,eUk),eUe.$b=function(){RG(this.kc())},eUe.vc=function(){return new lx(this)},eUe.lc=function(){return new Uq(this.kc(),this.gc())},Y5(eUy,"Maps/IteratorBasedAbstractMap",1979),eTS(828,1979,eUk),eUe.$b=function(){throw p7(new bO)},eUe._b=function(e){return yE(this.c,e)},eUe.kc=function(){return new ON(this,this.c.b.c.gc())},eUe.lc=function(){return Rj(this.c.b.c.gc(),16,new c8(this))},eUe.xc=function(e){var t;return(t=Pp(Iq(this.c,e),19))?this.nd(t.a):null},eUe.dc=function(){return this.c.b.c.dc()},eUe.ec=function(){return Fl(this.c)},eUe.zc=function(e,t){var n;if(!(n=Pp(Iq(this.c,e),19)))throw p7(new gL(this.md()+" "+e+" not in "+Fl(this.c)));return this.od(n.a,t)},eUe.Bc=function(e){throw p7(new bO)},eUe.gc=function(){return this.c.b.c.gc()},Y5(eUy,"ArrayTable/ArrayMap",828),eTS(1923,1,{},c8),eUe.ld=function(e){return Bs(this.a,e)},Y5(eUy,"ArrayTable/ArrayMap/0methodref$getEntry$Type",1923),eTS(1921,345,eUI,wk),eUe.cd=function(){return OB(this.a,this.b)},eUe.dd=function(){return this.a.nd(this.b)},eUe.ed=function(e){return this.a.od(this.b,e)},eUe.b=0,Y5(eUy,"ArrayTable/ArrayMap/1",1921),eTS(1922,386,eUw,ON),eUe.Xb=function(e){return Bs(this.a,e)},Y5(eUy,"ArrayTable/ArrayMap/2",1922),eTS(1920,828,eUk,F2),eUe.md=function(){return"Column"},eUe.nd=function(e){return X_(this.b,this.a,e)},eUe.od=function(e,t){return eoy(this.b,this.a,e,t)},eUe.a=0,Y5(eUy,"ArrayTable/Row",1920),eTS(829,828,eUk,OP),eUe.nd=function(e){return new F2(this.a,e)},eUe.zc=function(e,t){return Pp(t,83),g9()},eUe.od=function(e,t){return Pp(t,83),g8()},eUe.md=function(){return"Row"},Y5(eUy,"ArrayTable/RowMap",829),eTS(1120,1,eUj,wx),eUe.qd=function(){return -262&this.a.qd()},eUe.rd=function(){return this.a.rd()},eUe.Nb=function(e){this.a.Nb(new ww(e,this.b))},eUe.sd=function(e){return this.a.sd(new wy(e,this.b))},Y5(eUy,"CollectSpliterators/1",1120),eTS(1121,1,eUF,wy),eUe.td=function(e){this.a.td(this.b.Kb(e))},Y5(eUy,"CollectSpliterators/1/lambda$0$Type",1121),eTS(1122,1,eUF,ww),eUe.td=function(e){this.a.td(this.b.Kb(e))},Y5(eUy,"CollectSpliterators/1/lambda$1$Type",1122),eTS(1123,1,eUj,K4),eUe.qd=function(){return this.a},eUe.rd=function(){return this.d&&(this.b=MS(this.b,this.d.rd())),MS(this.b,0)},eUe.Nb=function(e){this.d&&(this.d.Nb(e),this.d=null),this.c.Nb(new wv(this.e,e)),this.b=0},eUe.sd=function(e){for(;;){if(this.d&&this.d.sd(e))return xg(this.b,eUY)&&(this.b=efe(this.b,1)),!0;if(this.d=null,!this.c.sd(new w_(this,this.e)))return!1}},eUe.a=0,eUe.b=0,Y5(eUy,"CollectSpliterators/1FlatMapSpliterator",1123),eTS(1124,1,eUF,w_),eUe.td=function(e){Iv(this.a,this.b,e)},Y5(eUy,"CollectSpliterators/1FlatMapSpliterator/lambda$0$Type",1124),eTS(1125,1,eUF,wv),eUe.td=function(e){M9(this.b,this.a,e)},Y5(eUy,"CollectSpliterators/1FlatMapSpliterator/lambda$1$Type",1125),eTS(1117,1,eUj,Ig),eUe.qd=function(){return 16464|this.b},eUe.rd=function(){return this.a.rd()},eUe.Nb=function(e){this.a.xe(new wS(e,this.c))},eUe.sd=function(e){return this.a.ye(new wE(e,this.c))},eUe.b=0,Y5(eUy,"CollectSpliterators/1WithCharacteristics",1117),eTS(1118,1,eUB,wE),eUe.ud=function(e){this.a.td(this.b.ld(e))},Y5(eUy,"CollectSpliterators/1WithCharacteristics/lambda$0$Type",1118),eTS(1119,1,eUB,wS),eUe.ud=function(e){this.a.td(this.b.ld(e))},Y5(eUy,"CollectSpliterators/1WithCharacteristics/lambda$1$Type",1119),eTS(245,1,eUU),eUe.wd=function(e){return this.vd(Pp(e,245))},eUe.vd=function(e){var t;return e==(m2(),e0d)?1:e==(m3(),e0f)?-1:0!=(t=(Rg(),eiK(this.a,e.a)))?t:M4(this,519)==M4(e,519)?0:M4(this,519)?1:-1},eUe.zd=function(){return this.a},eUe.Fb=function(e){return ehd(this,e)},Y5(eUy,"Cut",245),eTS(1761,245,eUU,vb),eUe.vd=function(e){return e==this?0:1},eUe.xd=function(e){throw p7(new b_)},eUe.yd=function(e){e.a+="+∞)"},eUe.zd=function(){throw p7(new gC(eUH))},eUe.Hb=function(){return wK(),ebh(this)},eUe.Ad=function(e){return!1},eUe.Ib=function(){return"+∞"},Y5(eUy,"Cut/AboveAll",1761),eTS(519,245,{245:1,519:1,3:1,35:1},OW),eUe.xd=function(e){xT((e.a+="(",e),this.a)},eUe.yd=function(e){Bd(xT(e,this.a),93)},eUe.Hb=function(){return~esj(this.a)},eUe.Ad=function(e){return Rg(),0>eiK(this.a,e)},eUe.Ib=function(){return"/"+this.a+"\\"},Y5(eUy,"Cut/AboveValue",519),eTS(1760,245,eUU,vm),eUe.vd=function(e){return e==this?0:-1},eUe.xd=function(e){e.a+="(-∞"},eUe.yd=function(e){throw p7(new b_)},eUe.zd=function(){throw p7(new gC(eUH))},eUe.Hb=function(){return wK(),ebh(this)},eUe.Ad=function(e){return!0},eUe.Ib=function(){return"-∞"},Y5(eUy,"Cut/BelowAll",1760),eTS(1762,245,eUU,OK),eUe.xd=function(e){xT((e.a+="[",e),this.a)},eUe.yd=function(e){Bd(xT(e,this.a),41)},eUe.Hb=function(){return esj(this.a)},eUe.Ad=function(e){return Rg(),0>=eiK(this.a,e)},eUe.Ib=function(){return"\\"+this.a+"/"},Y5(eUy,"Cut/BelowValue",1762),eTS(537,1,eU$),eUe.Jc=function(e){qX(this,e)},eUe.Ib=function(){return elq(Pp(H5(this,"use Optional.orNull() instead of Optional.or(null)"),20).Kc())},Y5(eUy,"FluentIterable",537),eTS(433,537,eU$,xq),eUe.Kc=function(){return new Fa(OH(this.a.Kc(),new c))},Y5(eUy,"FluentIterable/2",433),eTS(1046,537,eU$,xZ),eUe.Kc=function(){return Y_(this)},Y5(eUy,"FluentIterable/3",1046),eTS(708,386,eUw,Oj),eUe.Xb=function(e){return this.a[e].Kc()},Y5(eUy,"FluentIterable/3/1",708),eTS(1972,1,{}),eUe.Ib=function(){return efF(this.Bd().b)},Y5(eUy,"ForwardingObject",1972),eTS(1973,1972,eUz),eUe.Bd=function(){return this.Cd()},eUe.Jc=function(e){qX(this,e)},eUe.Lc=function(){return this.Oc()},eUe.Nc=function(){return new Gq(this,0)},eUe.Oc=function(){return new R1(null,this.Nc())},eUe.Fc=function(e){return this.Cd(),yD()},eUe.Gc=function(e){return this.Cd(),yN()},eUe.$b=function(){this.Cd(),yP()},eUe.Hc=function(e){return this.Cd().Hc(e)},eUe.Ic=function(e){return this.Cd().Ic(e)},eUe.dc=function(){return this.Cd().b.dc()},eUe.Kc=function(){return this.Cd().Kc()},eUe.Mc=function(e){return this.Cd(),yR()},eUe.gc=function(){return this.Cd().b.gc()},eUe.Pc=function(){return this.Cd().Pc()},eUe.Qc=function(e){return this.Cd().Qc(e)},Y5(eUy,"ForwardingCollection",1973),eTS(1980,28,eUG),eUe.Kc=function(){return this.Ed()},eUe.Fc=function(e){throw p7(new bO)},eUe.Gc=function(e){throw p7(new bO)},eUe.$b=function(){throw p7(new bO)},eUe.Hc=function(e){return null!=e&&eds(this,e,!1)},eUe.Dd=function(){switch(this.gc()){case 0:return Bx(),Bx(),e0h;case 1:return Bx(),new Rz(Y9(this.Ed().Pb()));default:return new F3(this,this.Pc())}},eUe.Mc=function(e){throw p7(new bO)},Y5(eUy,"ImmutableCollection",1980),eTS(712,1980,eUG,bb),eUe.Kc=function(){return JJ(this.a.Kc())},eUe.Hc=function(e){return null!=e&&this.a.Hc(e)},eUe.Ic=function(e){return this.a.Ic(e)},eUe.dc=function(){return this.a.dc()},eUe.Ed=function(){return JJ(this.a.Kc())},eUe.gc=function(){return this.a.gc()},eUe.Pc=function(){return this.a.Pc()},eUe.Qc=function(e){return this.a.Qc(e)},eUe.Ib=function(){return efF(this.a)},Y5(eUy,"ForwardingImmutableCollection",712),eTS(152,1980,eUW),eUe.Kc=function(){return this.Ed()},eUe.Yc=function(){return this.Fd(0)},eUe.Zc=function(e){return this.Fd(e)},eUe.ad=function(e){er8(this,e)},eUe.Nc=function(){return new Gq(this,16)},eUe.bd=function(e,t){return this.Gd(e,t)},eUe.Vc=function(e,t){throw p7(new bO)},eUe.Wc=function(e,t){throw p7(new bO)},eUe.Fb=function(e){return eTJ(this,e)},eUe.Hb=function(){return eaI(this)},eUe.Xc=function(e){return null==e?-1:emx(this,e)},eUe.Ed=function(){return this.Fd(0)},eUe.Fd=function(e){return AR(this,e)},eUe.$c=function(e){throw p7(new bO)},eUe._c=function(e,t){throw p7(new bO)},eUe.Gd=function(e,t){var n;return ecT((n=new wz(this),new Gz(n,e,t)))},Y5(eUy,"ImmutableList",152),eTS(2006,152,eUW),eUe.Kc=function(){return JJ(this.Hd().Kc())},eUe.bd=function(e,t){return ecT(this.Hd().bd(e,t))},eUe.Hc=function(e){return null!=e&&this.Hd().Hc(e)},eUe.Ic=function(e){return this.Hd().Ic(e)},eUe.Fb=function(e){return ecX(this.Hd(),e)},eUe.Xb=function(e){return xh(this,e)},eUe.Hb=function(){return esj(this.Hd())},eUe.Xc=function(e){return this.Hd().Xc(e)},eUe.dc=function(){return this.Hd().dc()},eUe.Ed=function(){return JJ(this.Hd().Kc())},eUe.gc=function(){return this.Hd().gc()},eUe.Gd=function(e,t){return ecT(this.Hd().bd(e,t))},eUe.Pc=function(){return this.Hd().Qc(Je(e1R,eUp,1,this.Hd().gc(),5,1))},eUe.Qc=function(e){return this.Hd().Qc(e)},eUe.Ib=function(){return efF(this.Hd())},Y5(eUy,"ForwardingImmutableList",2006),eTS(714,1,eUV),eUe.vc=function(){return Fc(this)},eUe.wc=function(e){ear(this,e)},eUe.ec=function(){return Fl(this)},eUe.yc=function(e,t,n){return el6(this,e,t,n)},eUe.Cc=function(){return this.Ld()},eUe.$b=function(){throw p7(new bO)},eUe._b=function(e){return null!=this.xc(e)},eUe.uc=function(e){return this.Ld().Hc(e)},eUe.Jd=function(){return new bm(this)},eUe.Kd=function(){return new bg(this)},eUe.Fb=function(e){return eua(this,e)},eUe.Hb=function(){return Fc(this).Hb()},eUe.dc=function(){return 0==this.gc()},eUe.zc=function(e,t){return g7()},eUe.Bc=function(e){throw p7(new bO)},eUe.Ib=function(){return eEo(this)},eUe.Ld=function(){return this.e?this.e:this.e=this.Kd()},eUe.c=null,eUe.d=null,eUe.e=null,Y5(eUy,"ImmutableMap",714),eTS(715,714,eUV),eUe._b=function(e){return yE(this,e)},eUe.uc=function(e){return w1(this.b,e)},eUe.Id=function(){return ecM(new lu(this))},eUe.Jd=function(){return ecM(Uk(this.b))},eUe.Kd=function(){return Dn(),new bb(UE(this.b))},eUe.Fb=function(e){return w2(this.b,e)},eUe.xc=function(e){return Iq(this,e)},eUe.Hb=function(){return esj(this.b.c)},eUe.dc=function(){return this.b.c.dc()},eUe.gc=function(){return this.b.c.gc()},eUe.Ib=function(){return efF(this.b.c)},Y5(eUy,"ForwardingImmutableMap",715),eTS(1974,1973,eUq),eUe.Bd=function(){return this.Md()},eUe.Cd=function(){return this.Md()},eUe.Nc=function(){return new Gq(this,1)},eUe.Fb=function(e){return e===this||this.Md().Fb(e)},eUe.Hb=function(){return this.Md().Hb()},Y5(eUy,"ForwardingSet",1974),eTS(1069,1974,eUq,lu),eUe.Bd=function(){return US(this.a.b)},eUe.Cd=function(){return US(this.a.b)},eUe.Hc=function(e){if(M4(e,42)&&null==Pp(e,42).cd())return!1;try{return wQ(US(this.a.b),e)}catch(t){if(t=eoa(t),M4(t,205))return!1;throw p7(t)}},eUe.Md=function(){return US(this.a.b)},eUe.Qc=function(e){var t;return t=$L(US(this.a.b),e),US(this.a.b).b.gc()=0?"+":"")+(n/60|0),t=Tt(eB4.Math.abs(n)%60),(e_E(),e2l)[this.q.getDay()]+" "+e2f[this.q.getMonth()]+" "+Tt(this.q.getDate())+" "+Tt(this.q.getHours())+":"+Tt(this.q.getMinutes())+":"+Tt(this.q.getSeconds())+" GMT"+e+t+" "+this.q.getFullYear()};var e1Q=Y5(eUS,"Date",199);eTS(1915,199,eHB,evI),eUe.a=!1,eUe.b=0,eUe.c=0,eUe.d=0,eUe.e=0,eUe.f=0,eUe.g=!1,eUe.i=0,eUe.j=0,eUe.k=0,eUe.n=0,eUe.o=0,eUe.p=0,Y5("com.google.gwt.i18n.shared.impl","DateRecord",1915),eTS(1966,1,{}),eUe.fe=function(){return null},eUe.ge=function(){return null},eUe.he=function(){return null},eUe.ie=function(){return null},eUe.je=function(){return null},Y5(eHU,"JSONValue",1966),eTS(216,1966,{216:1},lN,lL),eUe.Fb=function(e){return!!M4(e,216)&&W$(this.a,Pp(e,216).a)},eUe.ee=function(){return be},eUe.Hb=function(){return $n(this.a)},eUe.fe=function(){return this},eUe.Ib=function(){var e,t,n;for(t=0,n=new O0("["),e=this.a.length;t0&&(n.a+=","),xT(n,eep(this,t));return n.a+="]",n.a},Y5(eHU,"JSONArray",216),eTS(483,1966,{483:1},lC),eUe.ee=function(){return bt},eUe.ge=function(){return this},eUe.Ib=function(){return OQ(),""+this.a},eUe.a=!1,Y5(eHU,"JSONBoolean",483),eTS(985,60,eHr,gs),Y5(eHU,"JSONException",985),eTS(1023,1966,{},g),eUe.ee=function(){return bo},eUe.Ib=function(){return eUg},Y5(eHU,"JSONNull",1023),eTS(258,1966,{258:1},lI),eUe.Fb=function(e){return!!M4(e,258)&&this.a==Pp(e,258).a},eUe.ee=function(){return bn},eUe.Hb=function(){return Ti(this.a)},eUe.he=function(){return this},eUe.Ib=function(){return this.a+""},eUe.a=0,Y5(eHU,"JSONNumber",258),eTS(183,1966,{183:1},gu,lD),eUe.Fb=function(e){return!!M4(e,183)&&W$(this.a,Pp(e,183).a)},eUe.ee=function(){return br},eUe.Hb=function(){return $n(this.a)},eUe.ie=function(){return this},eUe.Ib=function(){var e,t,n,r,i,a,o;for(r=0,o=new O0("{"),e=!0,i=(n=a=erG(this,Je(e17,eUP,2,0,6,1))).length;r=0?":"+this.c:"")+")"},eUe.c=0;var e18=Y5(eUc,"StackTraceElement",310);e0c={3:1,475:1,35:1,2:1};var e17=Y5(eUc,eHa,2);eTS(107,418,{475:1},vs,vu,O1),Y5(eUc,"StringBuffer",107),eTS(100,418,{475:1},vc,vl,O0),Y5(eUc,"StringBuilder",100),eTS(687,73,eHZ,vf),Y5(eUc,"StringIndexOutOfBoundsException",687),eTS(2043,1,{}),eTS(844,1,{},N),eUe.Kb=function(e){return Pp(e,78).e},Y5(eUc,"Throwable/lambda$0$Type",844),eTS(41,60,{3:1,102:1,60:1,78:1,41:1},bO,gW),Y5(eUc,"UnsupportedOperationException",41),eTS(240,236,{3:1,35:1,236:1,240:1},eew,yY),eUe.wd=function(e){return eDG(this,Pp(e,240))},eUe.ke=function(){return eEu(eRy(this))},eUe.Fb=function(e){var t;return this===e||!!M4(e,240)&&(t=Pp(e,240),this.e==t.e&&0==eDG(this,t))},eUe.Hb=function(){var e;return 0!=this.b?this.b:this.a<54?(e=eap(this.f),this.b=jE(WM(e,-1)),this.b=33*this.b+jE(WM(Fv(e,32),-1)),this.b=17*this.b+zy(this.e),this.b):(this.b=17*ect(this.c)+zy(this.e),this.b)},eUe.Ib=function(){return eRy(this)},eUe.a=0,eUe.b=0,eUe.d=0,eUe.e=0,eUe.f=0;var e0e=Y5("java.math","BigDecimal",240);eTS(91,236,{3:1,35:1,236:1,91:1},ep4,XE,F7,ey$,eh5,TU),eUe.wd=function(e){return ehI(this,Pp(e,91))},eUe.ke=function(){return eEu(eBw(this,0))},eUe.Fb=function(e){return ef6(this,e)},eUe.Hb=function(){return ect(this)},eUe.Ib=function(){return eBw(this,0)},eUe.b=-2,eUe.c=0,eUe.d=0,eUe.e=0;var e0t=Y5("java.math","BigInteger",91);eTS(488,1967,eUk),eUe.$b=function(){Yy(this)},eUe._b=function(e){return F9(this,e)},eUe.uc=function(e){return euo(this,e,this.g)||euo(this,e,this.f)},eUe.vc=function(){return new fS(this)},eUe.xc=function(e){return Bp(this,e)},eUe.zc=function(e,t){return Um(this,e,t)},eUe.Bc=function(e){return Z3(this,e)},eUe.gc=function(){return wq(this)},Y5(eUS,"AbstractHashMap",488),eTS(261,eUT,eUM,fS),eUe.$b=function(){this.a.$b()},eUe.Hc=function(e){return KN(this,e)},eUe.Kc=function(){return new esz(this.a)},eUe.Mc=function(e){var t;return!!KN(this,e)&&(t=Pp(e,42).cd(),this.a.Bc(t),!0)},eUe.gc=function(){return this.a.gc()},Y5(eUS,"AbstractHashMap/EntrySet",261),eTS(262,1,eUE,esz),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return etz(this)},eUe.Ob=function(){return this.b},eUe.Qb=function(){JM(this)},eUe.b=!1,Y5(eUS,"AbstractHashMap/EntrySetIterator",262),eTS(417,1,eUE,fE),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return Et(this)},eUe.Pb=function(){return HL(this)},eUe.Qb=function(){BH(this)},eUe.b=0,eUe.c=-1,Y5(eUS,"AbstractList/IteratorImpl",417),eTS(96,417,eUC,KB),eUe.Qb=function(){BH(this)},eUe.Rb=function(e){CD(this,e)},eUe.Sb=function(){return this.b>0},eUe.Tb=function(){return this.b},eUe.Ub=function(){return A6(this.b>0),this.a.Xb(this.c=--this.b)},eUe.Vb=function(){return this.b-1},eUe.Wb=function(e){A4(-1!=this.c),this.a._c(this.c,e)},Y5(eUS,"AbstractList/ListIteratorImpl",96),eTS(219,52,eU5,Gz),eUe.Vc=function(e,t){Gp(e,this.b),this.c.Vc(this.a+e,t),++this.b},eUe.Xb=function(e){return GK(e,this.b),this.c.Xb(this.a+e)},eUe.$c=function(e){var t;return GK(e,this.b),t=this.c.$c(this.a+e),--this.b,t},eUe._c=function(e,t){return GK(e,this.b),this.c._c(this.a+e,t)},eUe.gc=function(){return this.b},eUe.a=0,eUe.b=0,Y5(eUS,"AbstractList/SubList",219),eTS(384,eUT,eUM,fk),eUe.$b=function(){this.a.$b()},eUe.Hc=function(e){return this.a._b(e)},eUe.Kc=function(){var e;return e=this.a.vc().Kc(),new fx(e)},eUe.Mc=function(e){return!!this.a._b(e)&&(this.a.Bc(e),!0)},eUe.gc=function(){return this.a.gc()},Y5(eUS,"AbstractMap/1",384),eTS(691,1,eUE,fx),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return this.a.Ob()},eUe.Pb=function(){var e;return(e=Pp(this.a.Pb(),42)).cd()},eUe.Qb=function(){this.a.Qb()},Y5(eUS,"AbstractMap/1/1",691),eTS(226,28,eUx,fT),eUe.$b=function(){this.a.$b()},eUe.Hc=function(e){return this.a.uc(e)},eUe.Kc=function(){var e;return e=this.a.vc().Kc(),new fN(e)},eUe.gc=function(){return this.a.gc()},Y5(eUS,"AbstractMap/2",226),eTS(294,1,eUE,fN),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return this.a.Ob()},eUe.Pb=function(){var e;return(e=Pp(this.a.Pb(),42)).dd()},eUe.Qb=function(){this.a.Qb()},Y5(eUS,"AbstractMap/2/1",294),eTS(484,1,{484:1,42:1}),eUe.Fb=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),UT(this.d,t.cd())&&UT(this.e,t.dd()))},eUe.cd=function(){return this.d},eUe.dd=function(){return this.e},eUe.Hb=function(){return TK(this.d)^TK(this.e)},eUe.ed=function(e){return CL(this,e)},eUe.Ib=function(){return this.d+"="+this.e},Y5(eUS,"AbstractMap/AbstractEntry",484),eTS(383,484,{484:1,383:1,42:1},EE),Y5(eUS,"AbstractMap/SimpleEntry",383),eTS(1984,1,e$t),eUe.Fb=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),UT(this.cd(),t.cd())&&UT(this.dd(),t.dd()))},eUe.Hb=function(){return TK(this.cd())^TK(this.dd())},eUe.Ib=function(){return this.cd()+"="+this.dd()},Y5(eUS,eUD,1984),eTS(1992,1967,eUO),eUe.tc=function(e){return ZO(this,e)},eUe._b=function(e){return IY(this,e)},eUe.vc=function(){return new fj(this)},eUe.xc=function(e){var t;return xu(esq(this,t=e))},eUe.ec=function(){return new fP(this)},Y5(eUS,"AbstractNavigableMap",1992),eTS(739,eUT,eUM,fj),eUe.Hc=function(e){return M4(e,42)&&ZO(this.b,Pp(e,42))},eUe.Kc=function(){return new C1(this.b)},eUe.Mc=function(e){var t;return!!M4(e,42)&&(t=Pp(e,42),Jl(this.b,t))},eUe.gc=function(){return this.b.c},Y5(eUS,"AbstractNavigableMap/EntrySet",739),eTS(493,eUT,eUL,fP),eUe.Nc=function(){return new Ec(this)},eUe.$b=function(){gl(this.a)},eUe.Hc=function(e){return IY(this.a,e)},eUe.Kc=function(){var e;return e=new C1(new Ap(this.a).b),new fR(e)},eUe.Mc=function(e){return!!IY(this.a,e)&&(zS(this.a,e),!0)},eUe.gc=function(){return this.a.c},Y5(eUS,"AbstractNavigableMap/NavigableKeySet",493),eTS(494,1,eUE,fR),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return Et(this.a.a)},eUe.Pb=function(){var e;return(e=AJ(this.a)).cd()},eUe.Qb=function(){I5(this.a)},Y5(eUS,"AbstractNavigableMap/NavigableKeySet/1",494),eTS(2004,28,eUx),eUe.Fc=function(e){return Ja(e_s(this,e)),!0},eUe.Gc=function(e){return BJ(e),PG(e!=this,"Can't add a queue to itself"),er7(this,e)},eUe.$b=function(){for(;null!=eev(this););},Y5(eUS,"AbstractQueue",2004),eTS(302,28,{4:1,20:1,28:1,14:1},p1,GZ),eUe.Fc=function(e){return Vy(this,e),!0},eUe.$b=function(){qr(this)},eUe.Hc=function(e){return eos(new UN(this),e)},eUe.dc=function(){return gY(this)},eUe.Kc=function(){return new UN(this)},eUe.Mc=function(e){return zP(new UN(this),e)},eUe.gc=function(){return this.c-this.b&this.a.length-1},eUe.Nc=function(){return new Gq(this,272)},eUe.Qc=function(e){var t;return t=this.c-this.b&this.a.length-1,e.lengtht&&Bc(e,t,null),e},eUe.b=0,eUe.c=0,Y5(eUS,"ArrayDeque",302),eTS(446,1,eUE,UN),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return this.a!=this.b},eUe.Pb=function(){return ecn(this)},eUe.Qb=function(){enP(this)},eUe.a=0,eUe.b=0,eUe.c=-1,Y5(eUS,"ArrayDeque/IteratorImpl",446),eTS(12,52,e$n,p0,XM,I4),eUe.Vc=function(e,t){jO(this,e,t)},eUe.Fc=function(e){return P_(this,e)},eUe.Wc=function(e,t){return euP(this,e,t)},eUe.Gc=function(e){return eoc(this,e)},eUe.$b=function(){this.c=Je(e1R,eUp,1,0,5,1)},eUe.Hc=function(e){return -1!=QI(this,e,0)},eUe.Jc=function(e){ety(this,e)},eUe.Xb=function(e){return RJ(this,e)},eUe.Xc=function(e){return QI(this,e,0)},eUe.dc=function(){return 0==this.c.length},eUe.Kc=function(){return new fz(this)},eUe.$c=function(e){return ZV(this,e)},eUe.Mc=function(e){return QA(this,e)},eUe.Ud=function(e,t){GG(this,e,t)},eUe._c=function(e,t){return q1(this,e,t)},eUe.gc=function(){return this.c.length},eUe.ad=function(e){Mv(this,e)},eUe.Pc=function(){return AW(this)},eUe.Qc=function(e){return epg(this,e)};var e0n=Y5(eUS,"ArrayList",12);eTS(7,1,eUE,fz),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return My(this)},eUe.Pb=function(){return Wx(this)},eUe.Qb=function(){Yv(this)},eUe.a=0,eUe.b=-1,Y5(eUS,"ArrayList/1",7),eTS(2013,eB4.Function,{},S),eUe.te=function(e,t){return elN(e,t)},eTS(154,52,e$r,g$),eUe.Hc=function(e){return -1!=enW(this,e)},eUe.Jc=function(e){var t,n,r,i;for(BJ(e),n=this.a,r=0,i=n.length;r>>0).toString(16))},eUe.f=0,eUe.i=eH1;var e2X=Y5(e$N,"CNode",57);eTS(814,1,{},b5),Y5(e$N,"CNode/CNodeBuilder",814),eTS(1525,1,{},eh),eUe.Oe=function(e,t){return 0},eUe.Pe=function(e,t){return 0},Y5(e$N,e$R,1525),eTS(1790,1,{},ep),eUe.Le=function(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b;for(c=eHQ,r=new fz(e.a.b);r.ar.d.c||r.d.c==a.d.c&&r.d.b0?e+this.n.d+this.n.a:0},eUe.Se=function(){var e,t,n,r,i;if(i=0,this.e)this.b?i=this.b.a:this.a[1][1]&&(i=this.a[1][1].Se());else if(this.g)i=efV(this,evf(this,null,!0));else for(t=(etx(),eow(vx(e26,1),eU4,232,0,[e3D,e3N,e3P])),n=0,r=t.length;n0?i+this.n.b+this.n.c:0},eUe.Te=function(){var e,t,n,r,i;if(this.g)for(e=evf(this,null,!1),n=(etx(),eow(vx(e26,1),eU4,232,0,[e3D,e3N,e3P])),r=0,i=n.length;r0&&(r[0]+=this.d,n-=r[0]),r[2]>0&&(r[2]+=this.d,n-=r[2]),this.c.a=eB4.Math.max(0,n),this.c.d=t.d+e.d+(this.c.a-n)/2,r[1]=eB4.Math.max(r[1],n),ZP(this,e3N,t.d+e.d+r[0]-(r[1]-n)/2,r)},eUe.b=null,eUe.d=0,eUe.e=!1,eUe.f=!1,eUe.g=!1;var e29=0,e28=0;Y5(e$9,"GridContainerCell",1473),eTS(461,22,{3:1,35:1,22:1,461:1},EY);var e27=enw(e$9,"HorizontalLabelAlignment",461,e1G,G1,Dc);eTS(306,212,{212:1,306:1},zf,etr,$Y),eUe.Re=function(){return Rf(this)},eUe.Se=function(){return Rd(this)},eUe.a=0,eUe.c=!1;var e3e=Y5(e$9,"LabelCell",306);eTS(244,326,{212:1,326:1,244:1},eh6),eUe.Re=function(){return ek1(this)},eUe.Se=function(){return ek0(this)},eUe.Te=function(){eNE(this)},eUe.Ue=function(){eNM(this)},eUe.b=0,eUe.c=0,eUe.d=!1,Y5(e$9,"StripContainerCell",244),eTS(1626,1,eU8,e_),eUe.Mb=function(e){return gU(Pp(e,212))},Y5(e$9,"StripContainerCell/lambda$0$Type",1626),eTS(1627,1,{},eE),eUe.Fe=function(e){return Pp(e,212).Se()},Y5(e$9,"StripContainerCell/lambda$1$Type",1627),eTS(1628,1,eU8,eS),eUe.Mb=function(e){return gH(Pp(e,212))},Y5(e$9,"StripContainerCell/lambda$2$Type",1628),eTS(1629,1,{},ek),eUe.Fe=function(e){return Pp(e,212).Re()},Y5(e$9,"StripContainerCell/lambda$3$Type",1629),eTS(462,22,{3:1,35:1,22:1,462:1},EB);var e3t=enw(e$9,"VerticalLabelAlignment",462,e1G,G0,Dl);eTS(789,1,{},eFQ),eUe.c=0,eUe.d=0,eUe.k=0,eUe.s=0,eUe.t=0,eUe.v=!1,eUe.w=0,eUe.D=!1,Y5(eza,"NodeContext",789),eTS(1471,1,e$C,ex),eUe.ue=function(e,t){return To(Pp(e,61),Pp(t,61))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eza,"NodeContext/0methodref$comparePortSides$Type",1471),eTS(1472,1,e$C,eT),eUe.ue=function(e,t){return ew9(Pp(e,111),Pp(t,111))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eza,"NodeContext/1methodref$comparePortContexts$Type",1472),eTS(159,22,{3:1,35:1,22:1,159:1},ei_);var e3n=enw(eza,"NodeLabelLocation",159,e1G,epE,Df);eTS(111,1,{111:1},exz),eUe.a=!1,Y5(eza,"PortContext",111),eTS(1476,1,eUF,eM),eUe.td=function(e){yQ(Pp(e,306))},Y5(ezu,ezc,1476),eTS(1477,1,eU8,eO),eUe.Mb=function(e){return!!Pp(e,111).c},Y5(ezu,ezl,1477),eTS(1478,1,eUF,eA),eUe.td=function(e){yQ(Pp(e,111).c)},Y5(ezu,"LabelPlacer/lambda$2$Type",1478),eTS(1475,1,eUF,eC),eUe.td=function(e){Cn(),bu(Pp(e,111))},Y5(ezu,"NodeLabelAndSizeUtilities/lambda$0$Type",1475),eTS(790,1,eUF,Dx),eUe.td=function(e){_H(this.b,this.c,this.a,Pp(e,181))},eUe.a=!1,eUe.c=!1,Y5(ezu,"NodeLabelCellCreator/lambda$0$Type",790),eTS(1474,1,eUF,db),eUe.td=function(e){bB(this.a,Pp(e,181))},Y5(ezu,"PortContextCreator/lambda$0$Type",1474),eTS(1829,1,{},eI),Y5(ezd,"GreedyRectangleStripOverlapRemover",1829),eTS(1830,1,e$C,eL),eUe.ue=function(e,t){return Ay(Pp(e,222),Pp(t,222))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezd,"GreedyRectangleStripOverlapRemover/0methodref$compareByYCoordinate$Type",1830),eTS(1786,1,{},me),eUe.a=5,eUe.e=0,Y5(ezd,"RectangleStripOverlapRemover",1786),eTS(1787,1,e$C,eN),eUe.ue=function(e,t){return Aw(Pp(e,222),Pp(t,222))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezd,"RectangleStripOverlapRemover/0methodref$compareLeftRectangleBorders$Type",1787),eTS(1789,1,e$C,eP),eUe.ue=function(e,t){return YY(Pp(e,222),Pp(t,222))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezd,"RectangleStripOverlapRemover/1methodref$compareRightRectangleBorders$Type",1789),eTS(406,22,{3:1,35:1,22:1,406:1},EU);var e3r=enw(ezd,"RectangleStripOverlapRemover/OverlapRemovalDirection",406,e1G,Vn,Dd);eTS(222,1,{222:1},jH),Y5(ezd,"RectangleStripOverlapRemover/RectangleNode",222),eTS(1788,1,eUF,dm),eUe.td=function(e){emA(this.a,Pp(e,222))},Y5(ezd,"RectangleStripOverlapRemover/lambda$1$Type",1788),eTS(1304,1,e$C,eR),eUe.ue=function(e,t){return eRu(Pp(e,167),Pp(t,167))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/CornerCasesGreaterThanRestComparator",1304),eTS(1307,1,{},ej),eUe.Kb=function(e){return Pp(e,324).a},Y5(ezp,"PolyominoCompactor/CornerCasesGreaterThanRestComparator/lambda$0$Type",1307),eTS(1308,1,eU8,eF),eUe.Mb=function(e){return Pp(e,323).a},Y5(ezp,"PolyominoCompactor/CornerCasesGreaterThanRestComparator/lambda$1$Type",1308),eTS(1309,1,eU8,eY),eUe.Mb=function(e){return Pp(e,323).a},Y5(ezp,"PolyominoCompactor/CornerCasesGreaterThanRestComparator/lambda$2$Type",1309),eTS(1302,1,e$C,eB),eUe.ue=function(e,t){return eC9(Pp(e,167),Pp(t,167))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/MinNumOfExtensionDirectionsComparator",1302),eTS(1305,1,{},eD),eUe.Kb=function(e){return Pp(e,324).a},Y5(ezp,"PolyominoCompactor/MinNumOfExtensionDirectionsComparator/lambda$0$Type",1305),eTS(767,1,e$C,eU),eUe.ue=function(e,t){return eaq(Pp(e,167),Pp(t,167))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/MinNumOfExtensionsComparator",767),eTS(1300,1,e$C,eH),eUe.ue=function(e,t){return ery(Pp(e,321),Pp(t,321))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/MinPerimeterComparator",1300),eTS(1301,1,e$C,e$),eUe.ue=function(e,t){return ebg(Pp(e,321),Pp(t,321))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/MinPerimeterComparatorWithShape",1301),eTS(1303,1,e$C,ez),eUe.ue=function(e,t){return eIz(Pp(e,167),Pp(t,167))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezp,"PolyominoCompactor/SingleExtensionSideGreaterThanRestComparator",1303),eTS(1306,1,{},eG),eUe.Kb=function(e){return Pp(e,324).a},Y5(ezp,"PolyominoCompactor/SingleExtensionSideGreaterThanRestComparator/lambda$0$Type",1306),eTS(777,1,{},EC),eUe.Ce=function(e,t){return KG(this,Pp(e,46),Pp(t,167))},Y5(ezp,"SuccessorCombination",777),eTS(644,1,{},eW),eUe.Ce=function(e,t){var n;return exd((n=Pp(e,46),Pp(t,167),n))},Y5(ezp,"SuccessorJitter",644),eTS(643,1,{},eK),eUe.Ce=function(e,t){var n;return eAW((n=Pp(e,46),Pp(t,167),n))},Y5(ezp,"SuccessorLineByLine",643),eTS(568,1,{},eV),eUe.Ce=function(e,t){var n;return eMl((n=Pp(e,46),Pp(t,167),n))},Y5(ezp,"SuccessorManhattan",568),eTS(1356,1,{},eq),eUe.Ce=function(e,t){var n;return eAt((n=Pp(e,46),Pp(t,167),n))},Y5(ezp,"SuccessorMaxNormWindingInMathPosSense",1356),eTS(400,1,{},dg),eUe.Ce=function(e,t){return YO(this,e,t)},eUe.c=!1,eUe.d=!1,eUe.e=!1,eUe.f=!1,Y5(ezp,"SuccessorQuadrantsGeneric",400),eTS(1357,1,{},eZ),eUe.Kb=function(e){return Pp(e,324).a},Y5(ezp,"SuccessorQuadrantsGeneric/lambda$0$Type",1357),eTS(323,22,{3:1,35:1,22:1,323:1},EN),eUe.a=!1;var e3i=enw(ezy,ezw,323,e1G,Va,Dh);eTS(1298,1,{}),eUe.Ib=function(){var e,t,n,r,i,a;for(i=0,n=" ",e=ell(0);i=0?"b"+e+"["+q2(this.a)+"]":"b["+q2(this.a)+"]":"b_"+Ao(this)},Y5(ez0,"FBendpoint",559),eTS(282,134,{3:1,282:1,94:1,134:1},CH),eUe.Ib=function(){return q2(this)},Y5(ez0,"FEdge",282),eTS(231,134,{3:1,231:1,94:1,134:1},Z5);var e4_=Y5(ez0,"FGraph",231);eTS(447,357,{3:1,447:1,357:1,94:1,134:1},qp),eUe.Ib=function(){return null==this.b||0==this.b.length?"l["+q2(this.a)+"]":"l_"+this.b},Y5(ez0,"FLabel",447),eTS(144,357,{3:1,144:1,357:1,94:1,134:1},Bw),eUe.Ib=function(){return WH(this)},eUe.b=0,Y5(ez0,"FNode",144),eTS(2003,1,{}),eUe.bf=function(e){eD2(this,e)},eUe.cf=function(){emz(this)},eUe.d=0,Y5(ez3,"AbstractForceModel",2003),eTS(631,2003,{631:1},eaR),eUe.af=function(e,t){var n,r,i,a,o;return ekL(this.f,e,t),i=C6(MB(t.d),e.d),o=eB4.Math.sqrt(i.a*i.a+i.b*i.b),r=eB4.Math.max(0,o-B$(e.e)/2-B$(t.e)/2),a=(n=esT(this.e,e,t))>0?-YT(r,this.c)*n:Li(r,this.b)*Pp(e_k(e,(eCk(),e9M)),19).a,Ol(i,a/o),i},eUe.bf=function(e){eD2(this,e),this.a=Pp(e_k(e,(eCk(),e9g)),19).a,this.c=gP(LV(e_k(e,e9D))),this.b=gP(LV(e_k(e,e9A)))},eUe.df=function(e){return e0&&(a-=gg(r,this.a)*n),Ol(i,a*this.b/o),i},eUe.bf=function(e){var t,n,r,i,a,o,s;for(eD2(this,e),this.b=gP(LV(e_k(e,(eCk(),e9N)))),this.c=this.b/Pp(e_k(e,e9g),19).a,r=e.e.c.length,a=0,i=0,s=new fz(e.e);s.a0},eUe.a=0,eUe.b=0,eUe.c=0,Y5(ez3,"FruchtermanReingoldModel",632),eTS(849,1,e$2,cu),eUe.Qe=function(e){efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez4),""),"Force Model"),"Determines the model for force calculation."),e9a),(eSd(),tdv)),e4E),el9((epx(),tdh))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez5),""),"Iterations"),"The number of iterations on the force model."),ell(300)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez6),""),"Repulsive Power"),"Determines how many bend points are added to the edge; such bend points are regarded as repelling particles in the force model"),ell(0)),tdw),e15),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez9),""),"FR Temperature"),"The temperature is used as a scaling factor for particle displacements."),ez8),tdg),e13),el9(tdh)))),K_(e,ez9,ez4,e9l),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez7),""),"Eades Repulsion"),"Factor for repulsive forces in Eades' model."),5),tdg),e13),el9(tdh)))),K_(e,ez7,ez4,e9s),eYi((new cc,e))},Y5(eGe,"ForceMetaDataProvider",849),eTS(424,22,{3:1,35:1,22:1,424:1},EH);var e4E=enw(eGe,"ForceModelStrategy",424,e1G,$9,Dm);eTS(988,1,e$2,cc),eUe.Qe=function(e){eYi(e)},Y5(eGe,"ForceOptions",988),eTS(989,1,{},tr),eUe.$e=function(){return new b0},eUe._e=function(e){},Y5(eGe,"ForceOptions/ForceFactory",989),eTS(850,1,e$2,cl),eUe.Qe=function(e){efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGw),""),"Fixed Position"),"Prevent that the node is moved by the layout algorithm."),(OQ(),!1)),(eSd(),tdm)),e11),el9((epx(),tdd))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eG_),""),"Desired Edge Length"),"Either specified for parent nodes or for individual edges, where the latter takes higher precedence."),100),tdg),e13),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdl]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGE),""),"Layout Dimension"),"Dimensions that are permitted to be altered during layout."),e9U),tdv),e4S),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGS),""),"Stress Epsilon"),"Termination criterion for the iterative process."),ez8),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGk),""),"Iteration Limit"),"Maximum number of performed iterations. Takes higher precedence than 'epsilon'."),ell(eUu)),tdw),e15),el9(tdh)))),ejQ((new cf,e))},Y5(eGe,"StressMetaDataProvider",850),eTS(992,1,e$2,cf),eUe.Qe=function(e){ejQ(e)},Y5(eGe,"StressOptions",992),eTS(993,1,{},ti),eUe.$e=function(){return new C$},eUe._e=function(e){},Y5(eGe,"StressOptions/StressFactory",993),eTS(1128,209,ezL,C$),eUe.Ze=function(e,t){var n,r,i,a,o;for(ewG(t,eGT,1),gN(LK(eT8(e,(egq(),e9q))))?gN(LK(eT8(e,e90)))||zh(n=new df((_q(),new gM(e)))):eOs(new b0,e,eiI(t,1)),i=eo4(e),o=(r=eNx(this.a,i)).Kc();o.Ob();)!((a=Pp(o.Pb(),231)).e.c.length<=1)&&(eRa(this.b,a),eMn(this.b),ety(a.d,new ta));i=eYC(r),eYh(i),eEj(t)},Y5(eGO,"StressLayoutProvider",1128),eTS(1129,1,eUF,ta),eUe.td=function(e){ePd(Pp(e,447))},Y5(eGO,"StressLayoutProvider/lambda$0$Type",1129),eTS(990,1,{},bP),eUe.c=0,eUe.e=0,eUe.g=0,Y5(eGO,"StressMajorization",990),eTS(379,22,{3:1,35:1,22:1,379:1},E$);var e4S=enw(eGO,"StressMajorization/Dimension",379,e1G,G3,Dg);eTS(991,1,e$C,dE),eUe.ue=function(e,t){return IA(this.a,Pp(e,144),Pp(t,144))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGO,"StressMajorization/lambda$0$Type",991),eTS(1229,1,{},W9),Y5(eGL,"ElkLayered",1229),eTS(1230,1,eUF,to),eUe.td=function(e){exn(Pp(e,37))},Y5(eGL,"ElkLayered/lambda$0$Type",1230),eTS(1231,1,eUF,dS),eUe.td=function(e){IL(this.a,Pp(e,37))},Y5(eGL,"ElkLayered/lambda$1$Type",1231),eTS(1263,1,{},MC),Y5(eGL,"GraphConfigurator",1263),eTS(759,1,eUF,dk),eUe.td=function(e){e_1(this.a,Pp(e,10))},Y5(eGL,"GraphConfigurator/lambda$0$Type",759),eTS(760,1,{},ts),eUe.Kb=function(e){return evR(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eGL,"GraphConfigurator/lambda$1$Type",760),eTS(761,1,eUF,dx),eUe.td=function(e){e_1(this.a,Pp(e,10))},Y5(eGL,"GraphConfigurator/lambda$2$Type",761),eTS(1127,209,ezL,b3),eUe.Ze=function(e,t){var n;n=eN7(new mn,e),xc(eT8(e,(eBy(),taM)))===xc((eck(),tpz))?ef0(this.a,n,t):exD(this.a,n,t),eYr(new ch,n)},Y5(eGL,"LayeredLayoutProvider",1127),eTS(356,22,{3:1,35:1,22:1,356:1},Ez);var e4k=enw(eGL,"LayeredPhases",356,e1G,q4,Dv);eTS(1651,1,{},enX),eUe.i=0,Y5(eGC,"ComponentsToCGraphTransformer",1651),eTS(1652,1,{},tu),eUe.ef=function(e,t){return eB4.Math.min(null!=e.a?gP(e.a):e.c.i,null!=t.a?gP(t.a):t.c.i)},eUe.ff=function(e,t){return eB4.Math.min(null!=e.a?gP(e.a):e.c.i,null!=t.a?gP(t.a):t.c.i)},Y5(eGC,"ComponentsToCGraphTransformer/1",1652),eTS(81,1,{81:1}),eUe.i=0,eUe.k=!0,eUe.o=eH1;var e4x=Y5(eGI,"CNode",81);eTS(460,81,{460:1,81:1},Ah,eh3),eUe.Ib=function(){return""},Y5(eGC,"ComponentsToCGraphTransformer/CRectNode",460),eTS(1623,1,{},tc),Y5(eGC,"OneDimensionalComponentsCompaction",1623),eTS(1624,1,{},tl),eUe.Kb=function(e){return Gm(Pp(e,46))},eUe.Fb=function(e){return this===e},Y5(eGC,"OneDimensionalComponentsCompaction/lambda$0$Type",1624),eTS(1625,1,{},tf),eUe.Kb=function(e){return edl(Pp(e,46))},eUe.Fb=function(e){return this===e},Y5(eGC,"OneDimensionalComponentsCompaction/lambda$1$Type",1625),eTS(1654,1,{},Bv),Y5(eGI,"CGraph",1654),eTS(189,1,{189:1},eh4),eUe.b=0,eUe.c=0,eUe.e=0,eUe.g=!0,eUe.i=eH1,Y5(eGI,"CGroup",189),eTS(1653,1,{},tb),eUe.ef=function(e,t){return eB4.Math.max(null!=e.a?gP(e.a):e.c.i,null!=t.a?gP(t.a):t.c.i)},eUe.ff=function(e,t){return eB4.Math.max(null!=e.a?gP(e.a):e.c.i,null!=t.a?gP(t.a):t.c.i)},Y5(eGI,e$R,1653),eTS(1655,1,{},exO),eUe.d=!1;var e4T=Y5(eGI,e$U,1655);eTS(1656,1,{},tm),eUe.Kb=function(e){return _T(),OQ(),0!=Pp(Pp(e,46).a,81).d.e},eUe.Fb=function(e){return this===e},Y5(eGI,e$H,1656),eTS(823,1,{},R$),eUe.a=!1,eUe.b=!1,eUe.c=!1,eUe.d=!1,Y5(eGI,e$$,823),eTS(1825,1,{},j$),Y5(eGD,e$z,1825);var e4M=RL(eGN,e$D);eTS(1826,1,{369:1},$h),eUe.Ke=function(e){eLh(this,Pp(e,466))},Y5(eGD,e$G,1826),eTS(1827,1,e$C,tg),eUe.ue=function(e,t){return Hy(Pp(e,81),Pp(t,81))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGD,e$W,1827),eTS(466,1,{466:1},E6),eUe.a=!1,Y5(eGD,e$K,466),eTS(1828,1,e$C,tv),eUe.ue=function(e,t){return evP(Pp(e,466),Pp(t,466))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGD,e$V,1828),eTS(140,1,{140:1},Se,PW),eUe.Fb=function(e){var t;return null!=e&&e4O==esF(e)&&(t=Pp(e,140),UT(this.c,t.c)&&UT(this.d,t.d))},eUe.Hb=function(){return euF(eow(vx(e1R,1),eUp,1,5,[this.c,this.d]))},eUe.Ib=function(){return"("+this.c+eUd+this.d+(this.a?"cx":"")+this.b+")"},eUe.a=!0,eUe.c=0,eUe.d=0;var e4O=Y5(eGN,"Point",140);eTS(405,22,{3:1,35:1,22:1,405:1},EG);var e4A=enw(eGN,"Point/Quadrant",405,e1G,Vo,Dy);eTS(1642,1,{},b6),eUe.b=null,eUe.c=null,eUe.d=null,eUe.e=null,eUe.f=null,Y5(eGN,"RectilinearConvexHull",1642),eTS(574,1,{369:1},epG),eUe.Ke=function(e){J4(this,Pp(e,140))},eUe.b=0,Y5(eGN,"RectilinearConvexHull/MaximalElementsEventHandler",574),eTS(1644,1,e$C,th),eUe.ue=function(e,t){return U3(LV(e),LV(t))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/MaximalElementsEventHandler/lambda$0$Type",1644),eTS(1643,1,{369:1},ete),eUe.Ke=function(e){eAo(this,Pp(e,140))},eUe.a=0,eUe.b=null,eUe.c=null,eUe.d=null,eUe.e=null,Y5(eGN,"RectilinearConvexHull/RectangleEventHandler",1643),eTS(1645,1,e$C,tp),eUe.ue=function(e,t){return WI(Pp(e,140),Pp(t,140))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/lambda$0$Type",1645),eTS(1646,1,e$C,td),eUe.ue=function(e,t){return WD(Pp(e,140),Pp(t,140))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/lambda$1$Type",1646),eTS(1647,1,e$C,ty),eUe.ue=function(e,t){return WP(Pp(e,140),Pp(t,140))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/lambda$2$Type",1647),eTS(1648,1,e$C,tw),eUe.ue=function(e,t){return WN(Pp(e,140),Pp(t,140))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/lambda$3$Type",1648),eTS(1649,1,e$C,t_),eUe.ue=function(e,t){return e_M(Pp(e,140),Pp(t,140))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGN,"RectilinearConvexHull/lambda$4$Type",1649),eTS(1650,1,{},Gf),Y5(eGN,"Scanline",1650),eTS(2005,1,{}),Y5(eGP,"AbstractGraphPlacer",2005),eTS(325,1,{325:1},Lm),eUe.mf=function(e){return!!this.nf(e)&&(exg(this.b,Pp(e_k(e,(eBU(),ttX)),21),e),!0)},eUe.nf=function(e){var t,n,r,i;for(t=Pp(e_k(e,(eBU(),ttX)),21),r=(i=Pp(Zq(e8E,t),21)).Kc();r.Ob();)if(n=Pp(r.Pb(),21),!Pp(Zq(this.b,n),15).dc())return!1;return!0},Y5(eGP,"ComponentGroup",325),eTS(765,2005,{},b9),eUe.of=function(e){var t,n;for(n=new fz(this.a);n.ah&&(_=0,E+=d+i,d=0),m=o.c,eIn(o,_+m.a,E+m.b),xB(m),n=eB4.Math.max(n,_+v.a),d=eB4.Math.max(d,v.b),_+=v.a+i;if(t.f.a=n,t.f.b=E+d,gN(LK(e_k(a,tiQ)))){for(eBb(r=new tE,e,i),f=e.Kc();f.Ob();)C5(xB((l=Pp(f.Pb(),37)).c),r.e);C5(xB(t.f),r.a)}JN(t,e)},Y5(eGP,"SimpleRowGraphPlacer",1291),eTS(1292,1,e$C,tx),eUe.ue=function(e,t){return eaV(Pp(e,37),Pp(t,37))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGP,"SimpleRowGraphPlacer/1",1292),eTS(1262,1,e$q,tT),eUe.Lb=function(e){var t;return!!(t=Pp(e_k(Pp(e,243).b,(eBy(),taR)),74))&&0!=t.b},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){var t;return!!(t=Pp(e_k(Pp(e,243).b,(eBy(),taR)),74))&&0!=t.b},Y5(eGY,"CompoundGraphPostprocessor/1",1262),eTS(1261,1,eGB,mr),eUe.pf=function(e,t){ebL(this,Pp(e,37),t)},Y5(eGY,"CompoundGraphPreprocessor",1261),eTS(441,1,{441:1},ec8),eUe.c=!1,Y5(eGY,"CompoundGraphPreprocessor/ExternalPort",441),eTS(243,1,{243:1},DT),eUe.Ib=function(){return AV(this.c)+":"+ek5(this.b)},Y5(eGY,"CrossHierarchyEdge",243),eTS(763,1,e$C,dT),eUe.ue=function(e,t){return egB(this,Pp(e,243),Pp(t,243))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eGY,"CrossHierarchyEdgeComparator",763),eTS(299,134,{3:1,299:1,94:1,134:1}),eUe.p=0,Y5(eGU,"LGraphElement",299),eTS(17,299,{3:1,17:1,299:1,94:1,134:1},$b),eUe.Ib=function(){return ek5(this)};var e4C=Y5(eGU,"LEdge",17);eTS(37,299,{3:1,20:1,37:1,299:1,94:1,134:1},enJ),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){return new fz(this.b)},eUe.Ib=function(){return 0==this.b.c.length?"G-unlayered"+e_F(this.a):0==this.a.c.length?"G-layered"+e_F(this.b):"G[layerless"+e_F(this.a)+", layers"+e_F(this.b)+"]"};var e4I=Y5(eGU,"LGraph",37);eTS(657,1,{}),eUe.qf=function(){return this.e.n},eUe.We=function(e){return e_k(this.e,e)},eUe.rf=function(){return this.e.o},eUe.sf=function(){return this.e.p},eUe.Xe=function(e){return Ln(this.e,e)},eUe.tf=function(e){this.e.n.a=e.a,this.e.n.b=e.b},eUe.uf=function(e){this.e.o.a=e.a,this.e.o.b=e.b},eUe.vf=function(e){this.e.p=e},Y5(eGU,"LGraphAdapters/AbstractLShapeAdapter",657),eTS(577,1,{839:1},dM),eUe.wf=function(){var e,t;if(!this.b)for(this.b=AH(this.a.b.c.length),t=new fz(this.a.b);t.a0&&eu7((GV(t-1,e.length),e.charCodeAt(t-1)),eGq);)--t;if(a> ",e),egu(n)),xM(xT((e.a+="[",e),n.i),"]")),e.a},eUe.c=!0,eUe.d=!1;var e4j=Y5(eGU,"LPort",11);eTS(397,1,eU$,dA),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){var e;return e=new fz(this.a.e),new dL(e)},Y5(eGU,"LPort/1",397),eTS(1290,1,eUE,dL),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return Pp(Wx(this.a),17).c},eUe.Ob=function(){return My(this.a)},eUe.Qb=function(){Yv(this.a)},Y5(eGU,"LPort/1/1",1290),eTS(359,1,eU$,dC),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){var e;return e=new fz(this.a.g),new dI(e)},Y5(eGU,"LPort/2",359),eTS(762,1,eUE,dI),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return Pp(Wx(this.a),17).d},eUe.Ob=function(){return My(this.a)},eUe.Qb=function(){Yv(this.a)},Y5(eGU,"LPort/2/1",762),eTS(1283,1,eU$,E5),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){return new Z4(this)},Y5(eGU,"LPort/CombineIter",1283),eTS(201,1,eUE,Z4),eUe.Nb=function(e){F8(this,e)},eUe.Qb=function(){yI()},eUe.Ob=function(){return Ak(this)},eUe.Pb=function(){return My(this.a)?Wx(this.a):Wx(this.b)},Y5(eGU,"LPort/CombineIter/1",201),eTS(1285,1,e$q,tA),eUe.Lb=function(e){return FO(e)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),0!=Pp(e,11).e.c.length},Y5(eGU,"LPort/lambda$0$Type",1285),eTS(1284,1,e$q,tL),eUe.Lb=function(e){return FA(e)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),0!=Pp(e,11).g.c.length},Y5(eGU,"LPort/lambda$1$Type",1284),eTS(1286,1,e$q,tC),eUe.Lb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbw)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbw)},Y5(eGU,"LPort/lambda$2$Type",1286),eTS(1287,1,e$q,tI),eUe.Lb=function(e){return eiA(),Pp(e,11).j==(eYu(),tby)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),Pp(e,11).j==(eYu(),tby)},Y5(eGU,"LPort/lambda$3$Type",1287),eTS(1288,1,e$q,tD),eUe.Lb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbj)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbj)},Y5(eGU,"LPort/lambda$4$Type",1288),eTS(1289,1,e$q,tN),eUe.Lb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbY)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eiA(),Pp(e,11).j==(eYu(),tbY)},Y5(eGU,"LPort/lambda$5$Type",1289),eTS(29,299,{3:1,20:1,299:1,29:1,94:1,134:1},By),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){return new fz(this.a)},eUe.Ib=function(){return"L_"+QI(this.b.b,this,0)+e_F(this.a)},Y5(eGU,"Layer",29),eTS(1342,1,{},mn),Y5(eG0,eG2,1342),eTS(1346,1,{},tP),eUe.Kb=function(e){return ewH(Pp(e,82))},Y5(eG0,"ElkGraphImporter/0methodref$connectableShapeToNode$Type",1346),eTS(1349,1,{},tR),eUe.Kb=function(e){return ewH(Pp(e,82))},Y5(eG0,"ElkGraphImporter/1methodref$connectableShapeToNode$Type",1349),eTS(1343,1,eUF,dD),eUe.td=function(e){exW(this.a,Pp(e,118))},Y5(eG0,eG3,1343),eTS(1344,1,eUF,dN),eUe.td=function(e){exW(this.a,Pp(e,118))},Y5(eG0,eG4,1344),eTS(1345,1,{},tj),eUe.Kb=function(e){return new R1(null,new Gq(UF(Pp(e,79)),16))},Y5(eG0,eG5,1345),eTS(1347,1,eU8,dP),eUe.Mb=function(e){return TV(this.a,Pp(e,33))},Y5(eG0,eG6,1347),eTS(1348,1,{},tF),eUe.Kb=function(e){return new R1(null,new Gq(UY(Pp(e,79)),16))},Y5(eG0,"ElkGraphImporter/lambda$5$Type",1348),eTS(1350,1,eU8,dR),eUe.Mb=function(e){return Tq(this.a,Pp(e,33))},Y5(eG0,"ElkGraphImporter/lambda$7$Type",1350),eTS(1351,1,eU8,tY),eUe.Mb=function(e){return HH(Pp(e,79))},Y5(eG0,"ElkGraphImporter/lambda$8$Type",1351),eTS(1278,1,{},ch),Y5(eG0,"ElkGraphLayoutTransferrer",1278),eTS(1279,1,eU8,dj),eUe.Mb=function(e){return It(this.a,Pp(e,17))},Y5(eG0,"ElkGraphLayoutTransferrer/lambda$0$Type",1279),eTS(1280,1,eUF,dF),eUe.td=function(e){_k(),P_(this.a,Pp(e,17))},Y5(eG0,"ElkGraphLayoutTransferrer/lambda$1$Type",1280),eTS(1281,1,eU8,dY),eUe.Mb=function(e){return Ca(this.a,Pp(e,17))},Y5(eG0,"ElkGraphLayoutTransferrer/lambda$2$Type",1281),eTS(1282,1,eUF,dB),eUe.td=function(e){_k(),P_(this.a,Pp(e,17))},Y5(eG0,"ElkGraphLayoutTransferrer/lambda$3$Type",1282),eTS(1485,1,eGB,tB),eUe.pf=function(e,t){eiu(Pp(e,37),t)},Y5(eG8,"CommentNodeMarginCalculator",1485),eTS(1486,1,{},tU),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"CommentNodeMarginCalculator/lambda$0$Type",1486),eTS(1487,1,eUF,tH),eUe.td=function(e){ePO(Pp(e,10))},Y5(eG8,"CommentNodeMarginCalculator/lambda$1$Type",1487),eTS(1488,1,eGB,t$),eUe.pf=function(e,t){eLA(Pp(e,37),t)},Y5(eG8,"CommentPostprocessor",1488),eTS(1489,1,eGB,tz),eUe.pf=function(e,t){eF4(Pp(e,37),t)},Y5(eG8,"CommentPreprocessor",1489),eTS(1490,1,eGB,tG),eUe.pf=function(e,t){eOf(Pp(e,37),t)},Y5(eG8,"ConstraintsPostprocessor",1490),eTS(1491,1,eGB,tW),eUe.pf=function(e,t){eau(Pp(e,37),t)},Y5(eG8,"EdgeAndLayerConstraintEdgeReverser",1491),eTS(1492,1,eGB,tK),eUe.pf=function(e,t){edC(Pp(e,37),t)},Y5(eG8,"EndLabelPostprocessor",1492),eTS(1493,1,{},tV),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"EndLabelPostprocessor/lambda$0$Type",1493),eTS(1494,1,eU8,tq),eUe.Mb=function(e){return $T(Pp(e,10))},Y5(eG8,"EndLabelPostprocessor/lambda$1$Type",1494),eTS(1495,1,eUF,tZ),eUe.td=function(e){evj(Pp(e,10))},Y5(eG8,"EndLabelPostprocessor/lambda$2$Type",1495),eTS(1496,1,eGB,tX),eUe.pf=function(e,t){eSF(Pp(e,37),t)},Y5(eG8,"EndLabelPreprocessor",1496),eTS(1497,1,{},tJ),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"EndLabelPreprocessor/lambda$0$Type",1497),eTS(1498,1,eUF,DA),eUe.td=function(e){_$(this.a,this.b,this.c,Pp(e,10))},eUe.a=0,eUe.b=0,eUe.c=!1,Y5(eG8,"EndLabelPreprocessor/lambda$1$Type",1498),eTS(1499,1,eU8,tQ),eUe.Mb=function(e){return xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tpS))},Y5(eG8,"EndLabelPreprocessor/lambda$2$Type",1499),eTS(1500,1,eUF,dU),eUe.td=function(e){P7(this.a,Pp(e,70))},Y5(eG8,"EndLabelPreprocessor/lambda$3$Type",1500),eTS(1501,1,eU8,t1),eUe.Mb=function(e){return xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tpE))},Y5(eG8,"EndLabelPreprocessor/lambda$4$Type",1501),eTS(1502,1,eUF,dH),eUe.td=function(e){P7(this.a,Pp(e,70))},Y5(eG8,"EndLabelPreprocessor/lambda$5$Type",1502),eTS(1551,1,eGB,cd),eUe.pf=function(e,t){elP(Pp(e,37),t)},Y5(eG8,"EndLabelSorter",1551),eTS(1552,1,e$C,t0),eUe.ue=function(e,t){return epc(Pp(e,456),Pp(t,456))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"EndLabelSorter/1",1552),eTS(456,1,{456:1},HP),Y5(eG8,"EndLabelSorter/LabelGroup",456),eTS(1553,1,{},t2),eUe.Kb=function(e){return _O(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"EndLabelSorter/lambda$0$Type",1553),eTS(1554,1,eU8,t3),eUe.Mb=function(e){return _O(),Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"EndLabelSorter/lambda$1$Type",1554),eTS(1555,1,eUF,t4),eUe.td=function(e){eEr(Pp(e,10))},Y5(eG8,"EndLabelSorter/lambda$2$Type",1555),eTS(1556,1,eU8,t5),eUe.Mb=function(e){return _O(),xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tpE))},Y5(eG8,"EndLabelSorter/lambda$3$Type",1556),eTS(1557,1,eU8,t6),eUe.Mb=function(e){return _O(),xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tpS))},Y5(eG8,"EndLabelSorter/lambda$4$Type",1557),eTS(1503,1,eGB,t9),eUe.pf=function(e,t){eP2(this,Pp(e,37))},eUe.b=0,eUe.c=0,Y5(eG8,"FinalSplineBendpointsCalculator",1503),eTS(1504,1,{},t8),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$0$Type",1504),eTS(1505,1,{},t7),eUe.Kb=function(e){return new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$1$Type",1505),eTS(1506,1,eU8,ne),eUe.Mb=function(e){return!q8(Pp(e,17))},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$2$Type",1506),eTS(1507,1,eU8,nt),eUe.Mb=function(e){return Ln(Pp(e,17),(eBU(),tnO))},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$3$Type",1507),eTS(1508,1,eUF,d$),eUe.td=function(e){eIV(this.a,Pp(e,128))},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$4$Type",1508),eTS(1509,1,eUF,nn),eUe.td=function(e){eSj(Pp(e,17).a)},Y5(eG8,"FinalSplineBendpointsCalculator/lambda$5$Type",1509),eTS(792,1,eGB,dz),eUe.pf=function(e,t){ejn(this,Pp(e,37),t)},Y5(eG8,"GraphTransformer",792),eTS(511,22,{3:1,35:1,22:1,511:1},EV);var e4F=enw(eG8,"GraphTransformer/Mode",511,e1G,$8,NF);eTS(1510,1,eGB,nr),eUe.pf=function(e,t){eAP(Pp(e,37),t)},Y5(eG8,"HierarchicalNodeResizingProcessor",1510),eTS(1511,1,eGB,ni),eUe.pf=function(e,t){erP(Pp(e,37),t)},Y5(eG8,"HierarchicalPortConstraintProcessor",1511),eTS(1512,1,e$C,na),eUe.ue=function(e,t){return epZ(Pp(e,10),Pp(t,10))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"HierarchicalPortConstraintProcessor/NodeComparator",1512),eTS(1513,1,eGB,no),eUe.pf=function(e,t){eN5(Pp(e,37),t)},Y5(eG8,"HierarchicalPortDummySizeProcessor",1513),eTS(1514,1,eGB,ns),eUe.pf=function(e,t){eCf(this,Pp(e,37),t)},eUe.a=0,Y5(eG8,"HierarchicalPortOrthogonalEdgeRouter",1514),eTS(1515,1,e$C,nu),eUe.ue=function(e,t){return Av(Pp(e,10),Pp(t,10))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"HierarchicalPortOrthogonalEdgeRouter/1",1515),eTS(1516,1,e$C,nc),eUe.ue=function(e,t){return JW(Pp(e,10),Pp(t,10))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"HierarchicalPortOrthogonalEdgeRouter/2",1516),eTS(1517,1,eGB,nl),eUe.pf=function(e,t){e_O(Pp(e,37),t)},Y5(eG8,"HierarchicalPortPositionProcessor",1517),eTS(1518,1,eGB,cp),eUe.pf=function(e,t){eYG(this,Pp(e,37))},eUe.a=0,eUe.c=0,Y5(eG8,"HighDegreeNodeLayeringProcessor",1518),eTS(571,1,{571:1},nf),eUe.b=-1,eUe.d=-1,Y5(eG8,"HighDegreeNodeLayeringProcessor/HighDegreeNodeInformation",571),eTS(1519,1,{},nd),eUe.Kb=function(e){return DR(),efu(Pp(e,10))},eUe.Fb=function(e){return this===e},Y5(eG8,"HighDegreeNodeLayeringProcessor/lambda$0$Type",1519),eTS(1520,1,{},nh),eUe.Kb=function(e){return DR(),efc(Pp(e,10))},eUe.Fb=function(e){return this===e},Y5(eG8,"HighDegreeNodeLayeringProcessor/lambda$1$Type",1520),eTS(1526,1,eGB,np),eUe.pf=function(e,t){eD8(this,Pp(e,37),t)},Y5(eG8,"HyperedgeDummyMerger",1526),eTS(793,1,{},DL),eUe.a=!1,eUe.b=!1,eUe.c=!1,Y5(eG8,"HyperedgeDummyMerger/MergeState",793),eTS(1527,1,{},nb),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"HyperedgeDummyMerger/lambda$0$Type",1527),eTS(1528,1,{},nm),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,10).j,16))},Y5(eG8,"HyperedgeDummyMerger/lambda$1$Type",1528),eTS(1529,1,eUF,ng),eUe.td=function(e){Pp(e,11).p=-1},Y5(eG8,"HyperedgeDummyMerger/lambda$2$Type",1529),eTS(1530,1,eGB,nv),eUe.pf=function(e,t){eD6(Pp(e,37),t)},Y5(eG8,"HypernodesProcessor",1530),eTS(1531,1,eGB,ny),eUe.pf=function(e,t){eD9(Pp(e,37),t)},Y5(eG8,"InLayerConstraintProcessor",1531),eTS(1532,1,eGB,nw),eUe.pf=function(e,t){eiW(Pp(e,37),t)},Y5(eG8,"InnermostNodeMarginCalculator",1532),eTS(1533,1,eGB,n_),eUe.pf=function(e,t){eFW(this,Pp(e,37))},eUe.a=eH1,eUe.b=eH1,eUe.c=eHQ,eUe.d=eHQ;var e4Y=Y5(eG8,"InteractiveExternalPortPositioner",1533);eTS(1534,1,{},nE),eUe.Kb=function(e){return Pp(e,17).d.i},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$0$Type",1534),eTS(1535,1,{},dG),eUe.Kb=function(e){return AE(this.a,LV(e))},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$1$Type",1535),eTS(1536,1,{},nS),eUe.Kb=function(e){return Pp(e,17).c.i},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$2$Type",1536),eTS(1537,1,{},dW),eUe.Kb=function(e){return AS(this.a,LV(e))},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$3$Type",1537),eTS(1538,1,{},dK),eUe.Kb=function(e){return C9(this.a,LV(e))},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$4$Type",1538),eTS(1539,1,{},dV),eUe.Kb=function(e){return C8(this.a,LV(e))},eUe.Fb=function(e){return this===e},Y5(eG8,"InteractiveExternalPortPositioner/lambda$5$Type",1539),eTS(77,22,{3:1,35:1,22:1,77:1,234:1},Eq),eUe.Kf=function(){switch(this.g){case 15:return new iA;case 22:return new iL;case 47:return new iD;case 28:case 35:return new nN;case 32:return new tB;case 42:return new t$;case 1:return new tz;case 41:return new tG;case 56:return new dz((erq(),e8W));case 0:return new dz((erq(),e8G));case 2:return new tW;case 54:return new tK;case 33:return new tX;case 51:return new t9;case 55:return new nr;case 13:return new ni;case 38:return new no;case 44:return new ns;case 40:return new nl;case 9:return new cp;case 49:return new AU;case 37:return new np;case 43:return new nv;case 27:return new ny;case 30:return new nw;case 3:return new n_;case 18:return new nx;case 29:return new nT;case 5:return new cb;case 50:return new nk;case 34:return new cm;case 36:return new nP;case 52:return new cd;case 11:return new nj;case 7:return new cv;case 39:return new nF;case 45:return new nY;case 16:return new nB;case 10:return new nU;case 48:return new n$;case 21:return new nz;case 23:return new gx((enU(),tui));case 8:return new nW;case 12:return new nV;case 4:return new nq;case 19:return new cE;case 17:return new n5;case 53:return new n6;case 6:return new rc;case 25:return new ms;case 46:return new rn;case 31:return new CV;case 14:return new rg;case 26:return new iB;case 20:return new rE;case 24:return new gx((enU(),tua));default:throw p7(new gL(eWt+(null!=this.f?this.f:""+this.g)))}};var e4B=enw(eG8,eWn,77,e1G,eAn,Nj);eTS(1540,1,eGB,nx),eUe.pf=function(e,t){eFq(Pp(e,37),t)},Y5(eG8,"InvertedPortProcessor",1540),eTS(1541,1,eGB,nT),eUe.pf=function(e,t){eIR(Pp(e,37),t)},Y5(eG8,"LabelAndNodeSizeProcessor",1541),eTS(1542,1,eU8,nM),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"LabelAndNodeSizeProcessor/lambda$0$Type",1542),eTS(1543,1,eU8,nO),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8C)},Y5(eG8,"LabelAndNodeSizeProcessor/lambda$1$Type",1543),eTS(1544,1,eUF,DC),eUe.td=function(e){_z(this.b,this.a,this.c,Pp(e,10))},eUe.a=!1,eUe.c=!1,Y5(eG8,"LabelAndNodeSizeProcessor/lambda$2$Type",1544),eTS(1545,1,eGB,cb),eUe.pf=function(e,t){eFu(Pp(e,37),t)},Y5(eG8,"LabelDummyInserter",1545),eTS(1546,1,e$q,nA),eUe.Lb=function(e){return xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tp_))},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return xc(e_k(Pp(e,70),(eBy(),tab)))===xc((etT(),tp_))},Y5(eG8,"LabelDummyInserter/1",1546),eTS(1547,1,eGB,nk),eUe.pf=function(e,t){eRz(Pp(e,37),t)},Y5(eG8,"LabelDummyRemover",1547),eTS(1548,1,eU8,nL),eUe.Mb=function(e){return gN(LK(e_k(Pp(e,70),(eBy(),tap))))},Y5(eG8,"LabelDummyRemover/lambda$0$Type",1548),eTS(1359,1,eGB,cm),eUe.pf=function(e,t){ejC(this,Pp(e,37),t)},eUe.a=null,Y5(eG8,"LabelDummySwitcher",1359),eTS(286,1,{286:1},eIu),eUe.c=0,eUe.d=null,eUe.f=0,Y5(eG8,"LabelDummySwitcher/LabelDummyInfo",286),eTS(1360,1,{},nC),eUe.Kb=function(e){return erJ(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"LabelDummySwitcher/lambda$0$Type",1360),eTS(1361,1,eU8,nI),eUe.Mb=function(e){return erJ(),Pp(e,10).k==(eEn(),e8I)},Y5(eG8,"LabelDummySwitcher/lambda$1$Type",1361),eTS(1362,1,{},dX),eUe.Kb=function(e){return Co(this.a,Pp(e,10))},Y5(eG8,"LabelDummySwitcher/lambda$2$Type",1362),eTS(1363,1,eUF,dJ),eUe.td=function(e){BO(this.a,Pp(e,286))},Y5(eG8,"LabelDummySwitcher/lambda$3$Type",1363),eTS(1364,1,e$C,nD),eUe.ue=function(e,t){return FL(Pp(e,286),Pp(t,286))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"LabelDummySwitcher/lambda$4$Type",1364),eTS(791,1,eGB,nN),eUe.pf=function(e,t){XT(Pp(e,37),t)},Y5(eG8,"LabelManagementProcessor",791),eTS(1549,1,eGB,nP),eUe.pf=function(e,t){eLr(Pp(e,37),t)},Y5(eG8,"LabelSideSelector",1549),eTS(1550,1,eU8,nR),eUe.Mb=function(e){return gN(LK(e_k(Pp(e,70),(eBy(),tap))))},Y5(eG8,"LabelSideSelector/lambda$0$Type",1550),eTS(1558,1,eGB,nj),eUe.pf=function(e,t){eN6(Pp(e,37),t)},Y5(eG8,"LayerConstraintPostprocessor",1558),eTS(1559,1,eGB,cv),eUe.pf=function(e,t){eMr(Pp(e,37),t)},Y5(eG8,"LayerConstraintPreprocessor",1559),eTS(360,22,{3:1,35:1,22:1,360:1},EZ);var e4U=enw(eG8,"LayerConstraintPreprocessor/HiddenNodeConnections",360,e1G,Vs,DF);eTS(1560,1,eGB,nF),eUe.pf=function(e,t){eRB(Pp(e,37),t)},Y5(eG8,"LayerSizeAndGraphHeightCalculator",1560),eTS(1561,1,eGB,nY),eUe.pf=function(e,t){eOw(Pp(e,37),t)},Y5(eG8,"LongEdgeJoiner",1561),eTS(1562,1,eGB,nB),eUe.pf=function(e,t){eRf(Pp(e,37),t)},Y5(eG8,"LongEdgeSplitter",1562),eTS(1563,1,eGB,nU),eUe.pf=function(e,t){ejN(this,Pp(e,37),t)},eUe.d=0,eUe.e=0,eUe.i=0,eUe.j=0,eUe.k=0,eUe.n=0,Y5(eG8,"NodePromotion",1563),eTS(1564,1,{},nH),eUe.Kb=function(e){return Pp(e,46),OQ(),!0},eUe.Fb=function(e){return this===e},Y5(eG8,"NodePromotion/lambda$0$Type",1564),eTS(1565,1,{},dq),eUe.Kb=function(e){return UM(this.a,Pp(e,46))},eUe.Fb=function(e){return this===e},eUe.a=0,Y5(eG8,"NodePromotion/lambda$1$Type",1565),eTS(1566,1,{},dZ),eUe.Kb=function(e){return UO(this.a,Pp(e,46))},eUe.Fb=function(e){return this===e},eUe.a=0,Y5(eG8,"NodePromotion/lambda$2$Type",1566),eTS(1567,1,eGB,n$),eUe.pf=function(e,t){eYN(Pp(e,37),t)},Y5(eG8,"NorthSouthPortPostprocessor",1567),eTS(1568,1,eGB,nz),eUe.pf=function(e,t){eYd(Pp(e,37),t)},Y5(eG8,"NorthSouthPortPreprocessor",1568),eTS(1569,1,e$C,nG),eUe.ue=function(e,t){return ea2(Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"NorthSouthPortPreprocessor/lambda$0$Type",1569),eTS(1570,1,eGB,nW),eUe.pf=function(e,t){eDx(Pp(e,37),t)},Y5(eG8,"PartitionMidprocessor",1570),eTS(1571,1,eU8,nK),eUe.Mb=function(e){return Ln(Pp(e,10),(eBy(),ton))},Y5(eG8,"PartitionMidprocessor/lambda$0$Type",1571),eTS(1572,1,eUF,dQ),eUe.td=function(e){H$(this.a,Pp(e,10))},Y5(eG8,"PartitionMidprocessor/lambda$1$Type",1572),eTS(1573,1,eGB,nV),eUe.pf=function(e,t){eO3(Pp(e,37),t)},Y5(eG8,"PartitionPostprocessor",1573),eTS(1574,1,eGB,nq),eUe.pf=function(e,t){exQ(Pp(e,37),t)},Y5(eG8,"PartitionPreprocessor",1574),eTS(1575,1,eU8,nZ),eUe.Mb=function(e){return Ln(Pp(e,10),(eBy(),ton))},Y5(eG8,"PartitionPreprocessor/lambda$0$Type",1575),eTS(1576,1,{},nX),eUe.Kb=function(e){return new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eG8,"PartitionPreprocessor/lambda$1$Type",1576),eTS(1577,1,eU8,nJ),eUe.Mb=function(e){return epe(Pp(e,17))},Y5(eG8,"PartitionPreprocessor/lambda$2$Type",1577),eTS(1578,1,eUF,nQ),eUe.td=function(e){eoL(Pp(e,17))},Y5(eG8,"PartitionPreprocessor/lambda$3$Type",1578),eTS(1579,1,eGB,cE),eUe.pf=function(e,t){eDe(Pp(e,37),t)},Y5(eG8,"PortListSorter",1579),eTS(1580,1,{},n1),eUe.Kb=function(e){return euv(),Pp(e,11).e},Y5(eG8,"PortListSorter/lambda$0$Type",1580),eTS(1581,1,{},n0),eUe.Kb=function(e){return euv(),Pp(e,11).g},Y5(eG8,"PortListSorter/lambda$1$Type",1581),eTS(1582,1,e$C,n2),eUe.ue=function(e,t){return qy(Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"PortListSorter/lambda$2$Type",1582),eTS(1583,1,e$C,n3),eUe.ue=function(e,t){return eg_(Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"PortListSorter/lambda$3$Type",1583),eTS(1584,1,e$C,n4),eUe.ue=function(e,t){return eDK(Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"PortListSorter/lambda$4$Type",1584),eTS(1585,1,eGB,n5),eUe.pf=function(e,t){eT3(Pp(e,37),t)},Y5(eG8,"PortSideProcessor",1585),eTS(1586,1,eGB,n6),eUe.pf=function(e,t){eCH(Pp(e,37),t)},Y5(eG8,"ReversedEdgeRestorer",1586),eTS(1591,1,eGB,ms),eUe.pf=function(e,t){emJ(this,Pp(e,37),t)},Y5(eG8,"SelfLoopPortRestorer",1591),eTS(1592,1,{},n9),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"SelfLoopPortRestorer/lambda$0$Type",1592),eTS(1593,1,eU8,n8),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"SelfLoopPortRestorer/lambda$1$Type",1593),eTS(1594,1,eU8,n7),eUe.Mb=function(e){return Ln(Pp(e,10),(eBU(),tnk))},Y5(eG8,"SelfLoopPortRestorer/lambda$2$Type",1594),eTS(1595,1,{},re),eUe.Kb=function(e){return Pp(e_k(Pp(e,10),(eBU(),tnk)),403)},Y5(eG8,"SelfLoopPortRestorer/lambda$3$Type",1595),eTS(1596,1,eUF,d1),eUe.td=function(e){eE_(this.a,Pp(e,403))},Y5(eG8,"SelfLoopPortRestorer/lambda$4$Type",1596),eTS(794,1,eUF,rt),eUe.td=function(e){eEq(Pp(e,101))},Y5(eG8,"SelfLoopPortRestorer/lambda$5$Type",794),eTS(1597,1,eGB,rn),eUe.pf=function(e,t){ep1(Pp(e,37),t)},Y5(eG8,"SelfLoopPostProcessor",1597),eTS(1598,1,{},rr),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"SelfLoopPostProcessor/lambda$0$Type",1598),eTS(1599,1,eU8,ri),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"SelfLoopPostProcessor/lambda$1$Type",1599),eTS(1600,1,eU8,ra),eUe.Mb=function(e){return Ln(Pp(e,10),(eBU(),tnk))},Y5(eG8,"SelfLoopPostProcessor/lambda$2$Type",1600),eTS(1601,1,eUF,ro),eUe.td=function(e){eyi(Pp(e,10))},Y5(eG8,"SelfLoopPostProcessor/lambda$3$Type",1601),eTS(1602,1,{},rs),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,101).f,1))},Y5(eG8,"SelfLoopPostProcessor/lambda$4$Type",1602),eTS(1603,1,eUF,d0),eUe.td=function(e){Vf(this.a,Pp(e,409))},Y5(eG8,"SelfLoopPostProcessor/lambda$5$Type",1603),eTS(1604,1,eU8,ru),eUe.Mb=function(e){return!!Pp(e,101).i},Y5(eG8,"SelfLoopPostProcessor/lambda$6$Type",1604),eTS(1605,1,eUF,d2),eUe.td=function(e){gb(this.a,Pp(e,101))},Y5(eG8,"SelfLoopPostProcessor/lambda$7$Type",1605),eTS(1587,1,eGB,rc),eUe.pf=function(e,t){eMJ(Pp(e,37),t)},Y5(eG8,"SelfLoopPreProcessor",1587),eTS(1588,1,{},rl),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,101).f,1))},Y5(eG8,"SelfLoopPreProcessor/lambda$0$Type",1588),eTS(1589,1,{},rf),eUe.Kb=function(e){return Pp(e,409).a},Y5(eG8,"SelfLoopPreProcessor/lambda$1$Type",1589),eTS(1590,1,eUF,rd),eUe.td=function(e){MH(Pp(e,17))},Y5(eG8,"SelfLoopPreProcessor/lambda$2$Type",1590),eTS(1606,1,eGB,CV),eUe.pf=function(e,t){eEi(this,Pp(e,37),t)},Y5(eG8,"SelfLoopRouter",1606),eTS(1607,1,{},rh),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,29).a,16))},Y5(eG8,"SelfLoopRouter/lambda$0$Type",1607),eTS(1608,1,eU8,rp),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"SelfLoopRouter/lambda$1$Type",1608),eTS(1609,1,eU8,rb),eUe.Mb=function(e){return Ln(Pp(e,10),(eBU(),tnk))},Y5(eG8,"SelfLoopRouter/lambda$2$Type",1609),eTS(1610,1,{},rm),eUe.Kb=function(e){return Pp(e_k(Pp(e,10),(eBU(),tnk)),403)},Y5(eG8,"SelfLoopRouter/lambda$3$Type",1610),eTS(1611,1,eUF,EX),eUe.td=function(e){Hs(this.a,this.b,Pp(e,403))},Y5(eG8,"SelfLoopRouter/lambda$4$Type",1611),eTS(1612,1,eGB,rg),eUe.pf=function(e,t){eAz(Pp(e,37),t)},Y5(eG8,"SemiInteractiveCrossMinProcessor",1612),eTS(1613,1,eU8,rv),eUe.Mb=function(e){return Pp(e,10).k==(eEn(),e8N)},Y5(eG8,"SemiInteractiveCrossMinProcessor/lambda$0$Type",1613),eTS(1614,1,eU8,ry),eUe.Mb=function(e){return R9(Pp(e,10))._b((eBy(),tog))},Y5(eG8,"SemiInteractiveCrossMinProcessor/lambda$1$Type",1614),eTS(1615,1,e$C,rw),eUe.ue=function(e,t){return erF(Pp(e,10),Pp(t,10))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eG8,"SemiInteractiveCrossMinProcessor/lambda$2$Type",1615),eTS(1616,1,{},r_),eUe.Ce=function(e,t){return H4(Pp(e,10),Pp(t,10))},Y5(eG8,"SemiInteractiveCrossMinProcessor/lambda$3$Type",1616),eTS(1618,1,eGB,rE),eUe.pf=function(e,t){eN8(Pp(e,37),t)},Y5(eG8,"SortByInputModelProcessor",1618),eTS(1619,1,eU8,rS),eUe.Mb=function(e){return 0!=Pp(e,11).g.c.length},Y5(eG8,"SortByInputModelProcessor/lambda$0$Type",1619),eTS(1620,1,eUF,d3),eUe.td=function(e){eE6(this.a,Pp(e,11))},Y5(eG8,"SortByInputModelProcessor/lambda$1$Type",1620),eTS(1693,803,{},erY),eUe.Me=function(e){var t,n,r,i;switch(this.c=e,this.a.g){case 2:t=new p0,_r(UJ(new R1(null,new Gq(this.c.a.b,16)),new rj),new E2(this,t)),eS2(this,new rT),ety(t,new rM),t.c=Je(e1R,eUp,1,0,5,1),_r(UJ(new R1(null,new Gq(this.c.a.b,16)),new rO),new d5(t)),eS2(this,new rA),ety(t,new rL),t.c=Je(e1R,eUp,1,0,5,1),n=M_(eim(U1(new R1(null,new Gq(this.c.a.b,16)),new d6(this))),new rC),_r(new R1(null,new Gq(this.c.a.a,16)),new EQ(n,t)),eS2(this,new rD),ety(t,new rk),t.c=Je(e1R,eUp,1,0,5,1);break;case 3:r=new p0,eS2(this,new rx),i=M_(eim(U1(new R1(null,new Gq(this.c.a.b,16)),new d4(this))),new rI),_r(UJ(new R1(null,new Gq(this.c.a.b,16)),new rN),new E0(i,r)),eS2(this,new rP),ety(r,new rR),r.c=Je(e1R,eUp,1,0,5,1);break;default:throw p7(new bI)}},eUe.b=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation",1693),eTS(1694,1,e$q,rx),eUe.Lb=function(e){return M4(Pp(e,57).g,145)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return M4(Pp(e,57).g,145)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$0$Type",1694),eTS(1695,1,{},d4),eUe.Fe=function(e){return eky(this.a,Pp(e,57))},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$1$Type",1695),eTS(1703,1,eU7,EJ),eUe.Vd=function(){ev_(this.a,this.b,-1)},eUe.b=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$10$Type",1703),eTS(1705,1,e$q,rT),eUe.Lb=function(e){return M4(Pp(e,57).g,145)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return M4(Pp(e,57).g,145)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$11$Type",1705),eTS(1706,1,eUF,rM),eUe.td=function(e){Pp(e,365).Vd()},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$12$Type",1706),eTS(1707,1,eU8,rO),eUe.Mb=function(e){return M4(Pp(e,57).g,10)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$13$Type",1707),eTS(1709,1,eUF,d5),eUe.td=function(e){efw(this.a,Pp(e,57))},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$14$Type",1709),eTS(1708,1,eU7,E9),eUe.Vd=function(){ev_(this.b,this.a,-1)},eUe.a=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$15$Type",1708),eTS(1710,1,e$q,rA),eUe.Lb=function(e){return M4(Pp(e,57).g,10)},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return M4(Pp(e,57).g,10)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$16$Type",1710),eTS(1711,1,eUF,rL),eUe.td=function(e){Pp(e,365).Vd()},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$17$Type",1711),eTS(1712,1,{},d6),eUe.Fe=function(e){return ekw(this.a,Pp(e,57))},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$18$Type",1712),eTS(1713,1,{},rC),eUe.De=function(){return 0},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$19$Type",1713),eTS(1696,1,{},rI),eUe.De=function(){return 0},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$2$Type",1696),eTS(1715,1,eUF,EQ),eUe.td=function(e){jq(this.a,this.b,Pp(e,307))},eUe.a=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$20$Type",1715),eTS(1714,1,eU7,E1),eUe.Vd=function(){eT4(this.a,this.b,-1)},eUe.b=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$21$Type",1714),eTS(1716,1,e$q,rD),eUe.Lb=function(e){return Pp(e,57),!0},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return Pp(e,57),!0},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$22$Type",1716),eTS(1717,1,eUF,rk),eUe.td=function(e){Pp(e,365).Vd()},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$23$Type",1717),eTS(1697,1,eU8,rN),eUe.Mb=function(e){return M4(Pp(e,57).g,10)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$3$Type",1697),eTS(1699,1,eUF,E0),eUe.td=function(e){jZ(this.a,this.b,Pp(e,57))},eUe.a=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$4$Type",1699),eTS(1698,1,eU7,E8),eUe.Vd=function(){ev_(this.b,this.a,-1)},eUe.a=0,Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$5$Type",1698),eTS(1700,1,e$q,rP),eUe.Lb=function(e){return Pp(e,57),!0},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return Pp(e,57),!0},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$6$Type",1700),eTS(1701,1,eUF,rR),eUe.td=function(e){Pp(e,365).Vd()},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$7$Type",1701),eTS(1702,1,eU8,rj),eUe.Mb=function(e){return M4(Pp(e,57).g,145)},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$8$Type",1702),eTS(1704,1,eUF,E2),eUe.td=function(e){eth(this.a,this.b,Pp(e,57))},Y5(eWs,"EdgeAwareScanlineConstraintCalculation/lambda$9$Type",1704),eTS(1521,1,eGB,AU),eUe.pf=function(e,t){eRE(this,Pp(e,37),t)},Y5(eWs,"HorizontalGraphCompactor",1521),eTS(1522,1,{},d9),eUe.Oe=function(e,t){var n,r,i;return Q8(e,t)?0:(n=KT(e),r=KT(t),n&&n.k==(eEn(),e8C)||r&&r.k==(eEn(),e8C))?0:(i=Pp(e_k(this.a.a,(eBU(),tnx)),304),Ax(i,n?n.k:(eEn(),e8D),r?r.k:(eEn(),e8D)))},eUe.Pe=function(e,t){var n,r,i;return Q8(e,t)?1:(n=KT(e),r=KT(t),i=Pp(e_k(this.a.a,(eBU(),tnx)),304),AT(i,n?n.k:(eEn(),e8D),r?r.k:(eEn(),e8D)))},Y5(eWs,"HorizontalGraphCompactor/1",1522),eTS(1523,1,{},rF),eUe.Ne=function(e,t){return _L(),0==e.a.i},Y5(eWs,"HorizontalGraphCompactor/lambda$0$Type",1523),eTS(1524,1,{},d8),eUe.Ne=function(e,t){return HZ(this.a,e,t)},Y5(eWs,"HorizontalGraphCompactor/lambda$1$Type",1524),eTS(1664,1,{},QF),Y5(eWs,"LGraphToCGraphTransformer",1664),eTS(1672,1,eU8,rY),eUe.Mb=function(e){return null!=e},Y5(eWs,"LGraphToCGraphTransformer/0methodref$nonNull$Type",1672),eTS(1665,1,{},rB),eUe.Kb=function(e){return Dj(),efF(e_k(Pp(Pp(e,57).g,10),(eBU(),tnc)))},Y5(eWs,"LGraphToCGraphTransformer/lambda$0$Type",1665),eTS(1666,1,{},rU),eUe.Kb=function(e){return Dj(),ecR(Pp(Pp(e,57).g,145))},Y5(eWs,"LGraphToCGraphTransformer/lambda$1$Type",1666),eTS(1675,1,eU8,rH),eUe.Mb=function(e){return Dj(),M4(Pp(e,57).g,10)},Y5(eWs,"LGraphToCGraphTransformer/lambda$10$Type",1675),eTS(1676,1,eUF,r$),eUe.td=function(e){Hq(Pp(e,57))},Y5(eWs,"LGraphToCGraphTransformer/lambda$11$Type",1676),eTS(1677,1,eU8,rz),eUe.Mb=function(e){return Dj(),M4(Pp(e,57).g,145)},Y5(eWs,"LGraphToCGraphTransformer/lambda$12$Type",1677),eTS(1681,1,eUF,rG),eUe.td=function(e){ecP(Pp(e,57))},Y5(eWs,"LGraphToCGraphTransformer/lambda$13$Type",1681),eTS(1678,1,eUF,d7),eUe.td=function(e){Tm(this.a,Pp(e,8))},eUe.a=0,Y5(eWs,"LGraphToCGraphTransformer/lambda$14$Type",1678),eTS(1679,1,eUF,he),eUe.td=function(e){Tv(this.a,Pp(e,110))},eUe.a=0,Y5(eWs,"LGraphToCGraphTransformer/lambda$15$Type",1679),eTS(1680,1,eUF,ht),eUe.td=function(e){Tg(this.a,Pp(e,8))},eUe.a=0,Y5(eWs,"LGraphToCGraphTransformer/lambda$16$Type",1680),eTS(1682,1,{},rW),eUe.Kb=function(e){return Dj(),new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eWs,"LGraphToCGraphTransformer/lambda$17$Type",1682),eTS(1683,1,eU8,rK),eUe.Mb=function(e){return Dj(),q8(Pp(e,17))},Y5(eWs,"LGraphToCGraphTransformer/lambda$18$Type",1683),eTS(1684,1,eUF,hn),eUe.td=function(e){eex(this.a,Pp(e,17))},Y5(eWs,"LGraphToCGraphTransformer/lambda$19$Type",1684),eTS(1668,1,eUF,hr),eUe.td=function(e){Wj(this.a,Pp(e,145))},Y5(eWs,"LGraphToCGraphTransformer/lambda$2$Type",1668),eTS(1685,1,{},rV),eUe.Kb=function(e){return Dj(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eWs,"LGraphToCGraphTransformer/lambda$20$Type",1685),eTS(1686,1,{},rq),eUe.Kb=function(e){return Dj(),new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eWs,"LGraphToCGraphTransformer/lambda$21$Type",1686),eTS(1687,1,{},rZ),eUe.Kb=function(e){return Dj(),Pp(e_k(Pp(e,17),(eBU(),tnO)),15)},Y5(eWs,"LGraphToCGraphTransformer/lambda$22$Type",1687),eTS(1688,1,eU8,rX),eUe.Mb=function(e){return AN(Pp(e,15))},Y5(eWs,"LGraphToCGraphTransformer/lambda$23$Type",1688),eTS(1689,1,eUF,hi),eUe.td=function(e){ekn(this.a,Pp(e,15))},Y5(eWs,"LGraphToCGraphTransformer/lambda$24$Type",1689),eTS(1667,1,eUF,E3),eUe.td=function(e){VK(this.a,this.b,Pp(e,145))},Y5(eWs,"LGraphToCGraphTransformer/lambda$3$Type",1667),eTS(1669,1,{},rJ),eUe.Kb=function(e){return Dj(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eWs,"LGraphToCGraphTransformer/lambda$4$Type",1669),eTS(1670,1,{},rQ),eUe.Kb=function(e){return Dj(),new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eWs,"LGraphToCGraphTransformer/lambda$5$Type",1670),eTS(1671,1,{},r1),eUe.Kb=function(e){return Dj(),Pp(e_k(Pp(e,17),(eBU(),tnO)),15)},Y5(eWs,"LGraphToCGraphTransformer/lambda$6$Type",1671),eTS(1673,1,eUF,ha),eUe.td=function(e){exr(this.a,Pp(e,15))},Y5(eWs,"LGraphToCGraphTransformer/lambda$8$Type",1673),eTS(1674,1,eUF,E4),eUe.td=function(e){MN(this.a,this.b,Pp(e,145))},Y5(eWs,"LGraphToCGraphTransformer/lambda$9$Type",1674),eTS(1663,1,{},r0),eUe.Le=function(e){var t,n,r,i,a;for(this.a=e,this.d=new bX,this.c=Je(e24,eUp,121,this.a.a.a.c.length,0,1),this.b=0,n=new fz(this.a.a.a);n.a=b&&(P_(a,ell(l)),v=eB4.Math.max(v,y[l-1]-f),s+=p,m+=y[l-1]-m,f=y[l-1],p=u[l]),p=eB4.Math.max(p,u[l]),++l;s+=p}(h=eB4.Math.min(1/v,1/t.b/s))>r&&(r=h,n=a)}return n},eUe.Wf=function(){return!1},Y5(eWb,"MSDCutIndexHeuristic",802),eTS(1617,1,eGB,iB),eUe.pf=function(e,t){eNZ(Pp(e,37),t)},Y5(eWb,"SingleEdgeGraphWrapper",1617),eTS(227,22,{3:1,35:1,22:1,227:1},Ss);var e4K=enw(eWm,"CenterEdgeLabelPlacementStrategy",227,e1G,Jv,DU);eTS(422,22,{3:1,35:1,22:1,422:1},Su);var e4V=enw(eWm,"ConstraintCalculationStrategy",422,e1G,$G,DH);eTS(314,22,{3:1,35:1,22:1,314:1,246:1,234:1},Sc),eUe.Kf=function(){return ekF(this)},eUe.Xf=function(){return ekF(this)};var e4q=enw(eWm,"CrossingMinimizationStrategy",314,e1G,G5,D$);eTS(337,22,{3:1,35:1,22:1,337:1},Sl);var e4Z=enw(eWm,"CuttingStrategy",337,e1G,G6,DW);eTS(335,22,{3:1,35:1,22:1,335:1,246:1,234:1},Sf),eUe.Kf=function(){return eTW(this)},eUe.Xf=function(){return eTW(this)};var e4X=enw(eWm,"CycleBreakingStrategy",335,e1G,Zv,DK);eTS(419,22,{3:1,35:1,22:1,419:1},Sd);var e4J=enw(eWm,"DirectionCongruency",419,e1G,$z,DV);eTS(450,22,{3:1,35:1,22:1,450:1},Sh);var e4Q=enw(eWm,"EdgeConstraint",450,e1G,G9,Dq);eTS(276,22,{3:1,35:1,22:1,276:1},Sp);var e41=enw(eWm,"EdgeLabelSideSelection",276,e1G,JE,DZ);eTS(479,22,{3:1,35:1,22:1,479:1},Sb);var e40=enw(eWm,"EdgeStraighteningStrategy",479,e1G,$$,DX);eTS(274,22,{3:1,35:1,22:1,274:1},Sm);var e42=enw(eWm,"FixedAlignment",274,e1G,Jw,DJ);eTS(275,22,{3:1,35:1,22:1,275:1},Sg);var e43=enw(eWm,"GraphCompactionStrategy",275,e1G,Jy,DQ);eTS(256,22,{3:1,35:1,22:1,256:1},Sv);var e44=enw(eWm,"GraphProperties",256,e1G,eiT,D1);eTS(292,22,{3:1,35:1,22:1,292:1},Sy);var e45=enw(eWm,"GreedySwitchType",292,e1G,We,D0);eTS(303,22,{3:1,35:1,22:1,303:1},Sw);var e46=enw(eWm,"InLayerConstraint",303,e1G,G7,D2);eTS(420,22,{3:1,35:1,22:1,420:1},S_);var e49=enw(eWm,"InteractiveReferencePoint",420,e1G,$W,D3);eTS(163,22,{3:1,35:1,22:1,163:1},ST);var e48=enw(eWm,"LayerConstraint",163,e1G,Z_,D4);eTS(848,1,e$2,cT),eUe.Qe=function(e){efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWE),""),"Direction Congruency"),"Specifies how drawings of the same graph with different layout directions compare to each other: either a natural reading direction is preserved or the drawings are rotated versions of each other."),trl),(eSd(),tdv)),e4J),el9((epx(),tdh))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWS),""),"Feedback Edges"),"Whether feedback edges should be highlighted by routing around the nodes."),(OQ(),!1)),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWk),""),"Interactive Reference Point"),"Determines which point of a node is considered by interactive layout phases."),trN),tdv),e49),el9(tdh)))),K_(e,eWk,eWI,trR),K_(e,eWk,eWH,trP),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWx),""),"Merge Edges"),"Edges that have no ports are merged so they touch the connected nodes at the same points. When this option is disabled, one port is created for each edge directly connected to a node. When it is enabled, all such incoming edges share an input port, and all outgoing edges share an output port."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWT),""),"Merge Hierarchy-Crossing Edges"),"If hierarchical layout is active, hierarchy-crossing edges use as few hierarchical ports as possible. They are broken by the algorithm, with hierarchical ports inserted as required. Usually, one such port is created for each edge at each hierarchy crossing point. With this option set to true, we try to create as few hierarchical ports as possible in the process. In particular, all edges that form a hyperedge can share a port."),!0),tdm),e11),el9(tdh)))),efO(e,new eE8(v8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWM),""),"Allow Non-Flow Ports To Switch Sides"),"Specifies whether non-flow ports may switch sides if their node's port constraints are either FIXED_SIDE or FIXED_ORDER. A non-flow port is a port on a side that is not part of the currently configured layout flow. For instance, given a left-to-right layout direction, north and south ports would be considered non-flow ports. Further note that the underlying criterium whether to switch sides or not solely relies on the minimization of edge crossings. Hence, edge length and other aesthetics criteria are not addressed."),!1),tdm),e11),el9(tdp)),eow(vx(e17,1),eUP,2,6,["org.eclipse.elk.layered.northOrSouthPort"])))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWO),""),"Port Sorting Strategy"),"Only relevant for nodes with FIXED_SIDE port constraints. Determines the way a node's ports are distributed on the sides of a node if their order is not prescribed. The option is set on parent nodes."),tic),tdv),e5a),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWA),""),"Thoroughness"),"How much effort should be spent to produce a nice layout."),ell(7)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWL),""),"Add Unnecessary Bendpoints"),"Adds bend points even if an edge does not change direction. If true, each long edge dummy will contribute a bend point to its edges and hierarchy-crossing edges will always get a bend point where they cross hierarchy boundaries. By default, bend points are only added where an edge changes direction."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWC),""),"Generate Position and Layer IDs"),"If enabled position id and layer id are generated, which are usually only used internally when setting the interactiveLayout option. This option should be specified on the root node."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWI),"cycleBreaking"),"Cycle Breaking Strategy"),"Strategy for cycle breaking. Cycle breaking looks for cycles in the graph and determines which edges to reverse to break the cycles. Reversed edges will end up pointing to the opposite direction of regular edges (that is, reversed edges will point left if edges usually point right)."),tru),tdv),e4X),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWD),eKC),"Node Layering Strategy"),"Strategy for node layering."),trX),tdv),e47),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWN),eKC),"Layer Constraint"),"Determines a constraint on the placement of the node regarding the layering."),trU),tdv),e48),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWP),eKC),"Layer Choice Constraint"),"Allows to set a constraint regarding the layer placement of a node. Let i be the value of teh constraint. Assumed the drawing has n layers and i < n. If set to i, it expresses that the node should be placed in i-th layer. Should i>=n be true then the node is placed in the last layer of the drawing. Note that this option is not part of any of ELK Layered's default configurations but is only evaluated as part of the `InteractiveLayeredGraphVisitor`, which must be applied manually or used via the `DiagramLayoutEngine."),ell(-1)),tdw),e15),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWR),eKC),"Layer ID"),"Layer identifier that was calculated by ELK Layered for a node. This is only generated if interactiveLayot or generatePositionAndLayerIds is set."),ell(-1)),tdw),e15),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWj),eKI),"Upper Bound On Width [MinWidth Layerer]"),"Defines a loose upper bound on the width of the MinWidth layerer. If set to '-1' multiple values are tested and the best result is selected."),ell(4)),tdw),e15),el9(tdh)))),K_(e,eWj,eWD,trz),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWF),eKI),"Upper Layer Estimation Scaling Factor [MinWidth Layerer]"),"Multiplied with Upper Bound On Width for defining an upper bound on the width of layers which haven't been determined yet, but whose maximum width had been (roughly) estimated by the MinWidth algorithm. Compensates for too high estimations. If set to '-1' multiple values are tested and the best result is selected."),ell(2)),tdw),e15),el9(tdh)))),K_(e,eWF,eWD,trW),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWY),eKD),"Node Promotion Strategy"),"Reduces number of dummy nodes after layering phase (if possible)."),trq),tdv),e5r),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWB),eKD),"Max Node Promotion Iterations"),"Limits the number of iterations for node promotion."),ell(0)),tdw),e15),el9(tdh)))),K_(e,eWB,eWY,null),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWU),"layering.coffmanGraham"),"Layer Bound"),"The maximum number of nodes allowed per layer."),ell(eUu)),tdw),e15),el9(tdh)))),K_(e,eWU,eWD,trF),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWH),eKN),"Crossing Minimization Strategy"),"Strategy for crossing minimization."),tro),tdv),e4q),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW$),eKN),"Force Node Model Order"),"The node order given by the model does not change to produce a better layout. E.g. if node A is before node B in the model this is not changed during crossing minimization. This assumes that the node model order is already respected before crossing minimization. This can be achieved by setting considerModelOrder.strategy to NODES_AND_EDGES."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWz),eKN),"Hierarchical Sweepiness"),"How likely it is to use cross-hierarchy (1) vs bottom-up (-1)."),.1),tdg),e13),el9(tdh)))),K_(e,eWz,eKP,tre),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWG),eKN),"Semi-Interactive Crossing Minimization"),"Preserves the order of nodes within a layer but still minimizes crossings between edges connecting long edge dummies. Derives the desired order from positions specified by the 'org.eclipse.elk.position' layout option. Requires a crossing minimization strategy that is able to process 'in-layer' constraints."),!1),tdm),e11),el9(tdh)))),K_(e,eWG,eWH,tri),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWW),eKN),"Position Choice Constraint"),"Allows to set a constraint regarding the position placement of a node in a layer. Assumed the layer in which the node placed includes n other nodes and i < n. If set to i, it expresses that the node should be placed at the i-th position. Should i>=n be true then the node is placed at the last position in the layer. Note that this option is not part of any of ELK Layered's default configurations but is only evaluated as part of the `InteractiveLayeredGraphVisitor`, which must be applied manually or used via the `DiagramLayoutEngine."),ell(-1)),tdw),e15),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWK),eKN),"Position ID"),"Position within a layer that was determined by ELK Layered for a node. This is only generated if interactiveLayot or generatePositionAndLayerIds is set."),ell(-1)),tdw),e15),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWV),eKR),"Greedy Switch Activation Threshold"),"By default it is decided automatically if the greedy switch is activated or not. The decision is based on whether the size of the input graph (without dummy nodes) is smaller than the value of this option. A '0' enforces the activation."),ell(40)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWq),eKR),"Greedy Switch Crossing Minimization"),"Greedy Switch strategy for crossing minimization. The greedy switch heuristic is executed after the regular crossing minimization as a post-processor. Note that if 'hierarchyHandling' is set to 'INCLUDE_CHILDREN', the 'greedySwitchHierarchical.type' option must be used."),tn9),tdv),e45),el9(tdh)))),K_(e,eWq,eWH,tn8),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWZ),"crossingMinimization.greedySwitchHierarchical"),"Greedy Switch Crossing Minimization (hierarchical)"),"Activates the greedy switch heuristic in case hierarchical layout is used. The differences to the non-hierarchical case (see 'greedySwitch.type') are: 1) greedy switch is inactive by default, 3) only the option value set on the node at which hierarchical layout starts is relevant, and 2) if it's activated by the user, it properly addresses hierarchy-crossing edges."),tn3),tdv),e45),el9(tdh)))),K_(e,eWZ,eWH,tn4),K_(e,eWZ,eKP,tn5),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWX),eKj),"Node Placement Strategy"),"Strategy for node placement."),tis),tdv),e5n),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eWJ),eKj),"Favor Straight Edges Over Balancing"),"Favor straight edges over a balanced node placement. The default behavior is determined automatically based on the used 'edgeRouting'. For an orthogonal style it is set to true, for all other styles to false."),tdm),e11),el9(tdh)))),K_(e,eWJ,eWX,tr9),K_(e,eWJ,eWX,tr8),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eWQ),eKF),"BK Edge Straightening"),"Specifies whether the Brandes Koepf node placer tries to increase the number of straight edges at the expense of diagram size. There is a subtle difference to the 'favorStraightEdges' option, which decides whether a balanced placement of the nodes is desired, or not. In bk terms this means combining the four alignments into a single balanced one, or not. This option on the other hand tries to straighten additional edges during the creation of each of the four alignments."),tr0),tdv),e40),el9(tdh)))),K_(e,eWQ,eWX,tr2),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW1),eKF),"BK Fixed Alignment"),"Tells the BK node placer to use a certain alignment (out of its four) instead of the one producing the smallest height, or the combination of all four."),tr4),tdv),e42),el9(tdh)))),K_(e,eW1,eWX,tr5),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW0),"nodePlacement.linearSegments"),"Linear Segments Deflection Dampening"),"Dampens the movement of nodes to keep the diagram from getting too large."),.3),tdg),e13),el9(tdh)))),K_(e,eW0,eWX,tie),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eW2),"nodePlacement.networkSimplex"),"Node Flexibility"),"Aims at shorter and straighter edges. Two configurations are possible: (a) allow ports to move freely on the side they are assigned to (the order is always defined beforehand), (b) additionally allow to enlarge a node wherever it helps. If this option is not configured for a node, the 'nodeFlexibility.default' value is used, which is specified for the node's parent."),tdv),e5t),el9(tdd)))),K_(e,eW2,eWX,tia),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW3),"nodePlacement.networkSimplex.nodeFlexibility"),"Node Flexibility Default"),"Default value of the 'nodeFlexibility' option for the children of a hierarchical node."),tir),tdv),e5t),el9(tdh)))),K_(e,eW3,eWX,tii),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW4),eKY),"Self-Loop Distribution"),"Alter the distribution of the loops around the node. It only takes effect for PortConstraints.FREE."),trv),tdv),e5s),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW5),eKY),"Self-Loop Ordering"),"Alter the ordering of the loops they can either be stacked or sequenced. It only takes effect for PortConstraints.FREE."),tr_),tdv),e5u),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW6),"edgeRouting.splines"),"Spline Routing Mode"),"Specifies the way control points are assembled for each individual edge. CONSERVATIVE ensures that edges are properly routed around the nodes but feels rather orthogonal at times. SLOPPY uses fewer control points to obtain curvier edge routes but may result in edges overlapping nodes."),trS),tdv),e5c),el9(tdh)))),K_(e,eW6,eKB,trk),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW9),"edgeRouting.splines.sloppy"),"Sloppy Spline Layer Spacing Factor"),"Spacing factor for routing area between layers when using sloppy spline routing."),.2),tdg),e13),el9(tdh)))),K_(e,eW9,eKB,trT),K_(e,eW9,eW6,trM),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eW8),"edgeRouting.polyline"),"Sloped Edge Zone Width"),"Width of the strip to the left and to the right of each layer where the polyline edge router is allowed to refrain from ensuring that edges are routed horizontally. This prevents awkward bend points for nodes that extent almost to the edge of their layer."),2),tdg),e13),el9(tdh)))),K_(e,eW8,eKB,trm),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eW7),eKU),"Spacing Base Value"),"An optional base value for all other layout options of the 'spacing' group. It can be used to conveniently alter the overall 'spaciousness' of the drawing. Whenever an explicit value is set for the other layout options, this base value will have no effect. The base value is not inherited, i.e. it must be set for each hierarchical node."),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKe),eKU),"Edge Node Between Layers Spacing"),"The spacing to be preserved between nodes and edges that are routed next to the node's layer. For the spacing between nodes and edges that cross the node's layer 'spacing.edgeNode' is used."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKt),eKU),"Edge Edge Between Layer Spacing"),"Spacing to be preserved between pairs of edges that are routed between the same pair of layers. Note that 'spacing.edgeEdge' is used for the spacing between pairs of edges crossing the same layer."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKn),eKU),"Node Node Between Layers Spacing"),"The spacing to be preserved between any pair of nodes of two adjacent layers. Note that 'spacing.nodeNode' is used for the spacing between nodes within the layer itself."),20),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKr),eKH),"Direction Priority"),"Defines how important it is to have a certain edge point into the direction of the overall layout. This option is evaluated during the cycle breaking phase."),ell(0)),tdw),e15),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKi),eKH),"Shortness Priority"),"Defines how important it is to keep an edge as short as possible. This option is evaluated during the layering phase."),ell(0)),tdw),e15),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKa),eKH),"Straightness Priority"),"Defines how important it is to keep an edge straight, i.e. aligned with one of the two axes. This option is evaluated during node placement."),ell(0)),tdw),e15),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKo),eK$),ezI),"Tries to further compact components (disconnected sub-graphs)."),!1),tdm),e11),el9(tdh)))),K_(e,eKo,eGs,!0),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKs),eKz),"Post Compaction Strategy"),eKG),tnz),tdv),e43),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKu),eKz),"Post Compaction Constraint Calculation"),eKG),tnH),tdv),e4V),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKc),eKW),"High Degree Node Treatment"),"Makes room around high degree nodes to place leafs and trees."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKl),eKW),"High Degree Node Threshold"),"Whether a node is considered to have a high degree."),ell(16)),tdw),e15),el9(tdh)))),K_(e,eKl,eKc,!0),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKf),eKW),"High Degree Node Maximum Tree Height"),"Maximum height of a subtree connected to a high degree node to be moved to separate layers."),ell(5)),tdw),e15),el9(tdh)))),K_(e,eKf,eKc,!0),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKd),eKK),"Graph Wrapping Strategy"),"For certain graphs and certain prescribed drawing areas it may be desirable to split the laid out graph into chunks that are placed side by side. The edges that connect different chunks are 'wrapped' around from the end of one chunk to the start of the other chunk. The points between the chunks are referred to as 'cuts'."),tiU),tdv),e5f),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKh),eKK),"Additional Wrapped Edges Spacing"),"To visually separate edges that are wrapped from regularly routed edges an additional spacing value can be specified in form of this layout option. The spacing is added to the regular edgeNode spacing."),10),tdg),e13),el9(tdh)))),K_(e,eKh,eKd,tiw),K_(e,eKh,eKd,ti_),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKp),eKK),"Correction Factor for Wrapping"),"At times and for certain types of graphs the executed wrapping may produce results that are consistently biased in the same fashion: either wrapping to often or to rarely. This factor can be used to correct the bias. Internally, it is simply multiplied with the 'aspect ratio' layout option."),1),tdg),e13),el9(tdh)))),K_(e,eKp,eKd,tiS),K_(e,eKp,eKd,tik),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKb),eKV),"Cutting Strategy"),"The strategy by which the layer indexes are determined at which the layering crumbles into chunks."),tiC),tdv),e4Z),el9(tdh)))),K_(e,eKb,eKd,tiI),K_(e,eKb,eKd,tiD),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eKm),eKV),"Manually Specified Cuts"),"Allows the user to specify her own cuts for a certain graph."),td_),e1H),el9(tdh)))),K_(e,eKm,eKb,tiT),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKg),"wrapping.cutting.msd"),"MSD Freedom"),"The MSD cutting strategy starts with an initial guess on the number of chunks the graph should be split into. The freedom specifies how much the strategy may deviate from this guess. E.g. if an initial number of 3 is computed, a freedom of 1 allows 2, 3, and 4 cuts."),tiO),tdw),e15),el9(tdh)))),K_(e,eKg,eKb,tiA),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKv),eKq),"Validification Strategy"),"When wrapping graphs, one can specify indices that are not allowed as split points. The validification strategy makes sure every computed split point is allowed."),tiW),tdv),e5l),el9(tdh)))),K_(e,eKv,eKd,tiK),K_(e,eKv,eKd,tiV),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eKy),eKq),"Valid Indices for Wrapping"),null),td_),e1H),el9(tdh)))),K_(e,eKy,eKd,ti$),K_(e,eKy,eKd,tiz),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKw),eKZ),"Improve Cuts"),"For general graphs it is important that not too many edges wrap backwards. Thus a compromise between evenly-distributed cuts and the total number of cut edges is sought."),!0),tdm),e11),el9(tdh)))),K_(e,eKw,eKd,tij),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK_),eKZ),"Distance Penalty When Improving Cuts"),null),2),tdg),e13),el9(tdh)))),K_(e,eK_,eKd,tiP),K_(e,eK_,eKw,!0),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKE),eKZ),"Improve Wrapped Edges"),"The initial wrapping is performed in a very simple way. As a consequence, edges that wrap from one chunk to another may be unnecessarily long. Activating this option tries to shorten such edges."),!0),tdm),e11),el9(tdh)))),K_(e,eKE,eKd,tiY),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKS),eKX),"Edge Label Side Selection"),"Method to decide on edge label sides."),trp),tdv),e41),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKk),eKX),"Edge Center Label Placement Strategy"),"Determines in which layer center labels of long edges should be placed."),trd),tdv),e4K),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdf]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKx),eKJ),"Consider Model Order"),"Preserves the order of nodes and edges in the model file if this does not lead to additional edge crossings. Depending on the strategy this is not always possible since the node and edge order might be conflicting."),tnQ),tdv),e5i),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKT),eKJ),"No Model Order"),"Set on a node to not set a model order for this node even though it is a real node."),!1),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKM),eKJ),"Consider Model Order for Components"),"If set to NONE the usual ordering strategy (by cumulative node priority and size of nodes) is used. INSIDE_PORT_SIDES orders the components with external ports only inside the groups with the same port side. FORCE_MODEL_ORDER enforces the mode order on components. This option might produce bad alignments and sub optimal drawings in terms of used area since the ordering should be respected."),tnW),tdv),e4L),el9(tdh)))),K_(e,eKM,eGs,null),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKO),eKJ),"Long Edge Ordering Strategy"),"Indicates whether long edges are sorted under, over, or equal to nodes that have no connection to a previous layer in a left-to-right or right-to-left layout. Under and over changes to right and left in a vertical layout."),tnZ),tdv),e5e),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKA),eKJ),"Crossing Counter Node Order Influence"),"Indicates with what percentage (1 for 100%) violations of the node model order are weighted against the crossings e.g. a value of 0.5 means two model order violations are as important as on edge crossing. This allows some edge crossings in favor of preserving the model order. It is advised to set this value to a very small positive value (e.g. 0.001) to have minimal crossing and a optimal node order. Defaults to no influence (0)."),0),tdg),e13),el9(tdh)))),K_(e,eKA,eKx,null),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKL),eKJ),"Crossing Counter Port Order Influence"),"Indicates with what percentage (1 for 100%) violations of the port model order are weighted against the crossings e.g. a value of 0.5 means two model order violations are as important as on edge crossing. This allows some edge crossings in favor of preserving the model order. It is advised to set this value to a very small positive value (e.g. 0.001) to have minimal crossing and a optimal port order. Defaults to no influence (0)."),0),tdg),e13),el9(tdh)))),K_(e,eKL,eKx,null),eBq((new cA,e))},Y5(eWm,"LayeredMetaDataProvider",848),eTS(986,1,e$2,cA),eUe.Qe=function(e){eBq(e)},Y5(eWm,"LayeredOptions",986),eTS(987,1,{},iH),eUe.$e=function(){return new b3},eUe._e=function(e){},Y5(eWm,"LayeredOptions/LayeredFactory",987),eTS(1372,1,{}),eUe.a=0,Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder",1372),eTS(779,1372,{},ef4),Y5(eWm,"LayeredSpacings/LayeredSpacingsBuilder",779),eTS(313,22,{3:1,35:1,22:1,313:1,246:1,234:1},SE),eUe.Kf=function(){return eM3(this)},eUe.Xf=function(){return eM3(this)};var e47=enw(eWm,"LayeringStrategy",313,e1G,J_,D5);eTS(378,22,{3:1,35:1,22:1,378:1},SS);var e5e=enw(eWm,"LongEdgeOrderingStrategy",378,e1G,G4,D6);eTS(197,22,{3:1,35:1,22:1,197:1},Sk);var e5t=enw(eWm,"NodeFlexibility",197,e1G,VT,D9);eTS(315,22,{3:1,35:1,22:1,315:1,246:1,234:1},Sx),eUe.Kf=function(){return eTG(this)},eUe.Xf=function(){return eTG(this)};var e5n=enw(eWm,"NodePlacementStrategy",315,e1G,Zg,Nr);eTS(260,22,{3:1,35:1,22:1,260:1},SM);var e5r=enw(eWm,"NodePromotionStrategy",260,e1G,etL,D7);eTS(339,22,{3:1,35:1,22:1,339:1},SO);var e5i=enw(eWm,"OrderingStrategy",339,e1G,Wn,Ne);eTS(421,22,{3:1,35:1,22:1,421:1},SA);var e5a=enw(eWm,"PortSortingStrategy",421,e1G,$K,Nt);eTS(452,22,{3:1,35:1,22:1,452:1},SL);var e5o=enw(eWm,"PortType",452,e1G,Wt,D8);eTS(375,22,{3:1,35:1,22:1,375:1},SC);var e5s=enw(eWm,"SelfLoopDistributionStrategy",375,e1G,Wr,Nn);eTS(376,22,{3:1,35:1,22:1,376:1},SI);var e5u=enw(eWm,"SelfLoopOrderingStrategy",376,e1G,$H,Ni);eTS(304,1,{304:1},ejm),Y5(eWm,"Spacings",304),eTS(336,22,{3:1,35:1,22:1,336:1},SD);var e5c=enw(eWm,"SplineRoutingMode",336,e1G,Wa,Na);eTS(338,22,{3:1,35:1,22:1,338:1},SN);var e5l=enw(eWm,"ValidifyStrategy",338,e1G,Wo,No);eTS(377,22,{3:1,35:1,22:1,377:1},SP);var e5f=enw(eWm,"WrappingStrategy",377,e1G,Wi,Ns);eTS(1383,1,eVD,cL),eUe.Yf=function(e){return Pp(e,37),ts2},eUe.pf=function(e,t){eRb(this,Pp(e,37),t)},Y5(eVN,"DepthFirstCycleBreaker",1383),eTS(782,1,eVD,jG),eUe.Yf=function(e){return Pp(e,37),ts3},eUe.pf=function(e,t){eBS(this,Pp(e,37),t)},eUe.Zf=function(e){return Pp(RJ(e,ebO(this.d,e.c.length)),10)},Y5(eVN,"GreedyCycleBreaker",782),eTS(1386,782,eVD,kQ),eUe.Zf=function(e){var t,n,r,i;for(i=null,t=eUu,r=new fz(e);r.a1&&(gN(LK(e_k(Bq((GK(0,e.c.length),Pp(e.c[0],10))),(eBy(),ti7))))?eMR(e,this.d,Pp(this,660)):(Hj(),Mv(e,this.d)),eaz(this.e,e))},eUe.Sf=function(e,t,n,r){var i,a,o,s,u,c,l;for(t!=ja(n,e.length)&&(a=e[t-(n?1:-1)],Xy(this.f,a,n?(enY(),tsN):(enY(),tsD))),i=e[t][0],l=!r||i.k==(eEn(),e8C),c=ZW(e[t]),this.ag(c,l,!1,n),o=0,u=new fz(c);u.a"),e0?zJ(this.a,e[t-1],e[t]):!n&&t1&&(gN(LK(e_k(Bq((GK(0,e.c.length),Pp(e.c[0],10))),(eBy(),ti7))))?eMR(e,this.d,this):(Hj(),Mv(e,this.d)),gN(LK(e_k(Bq((GK(0,e.c.length),Pp(e.c[0],10))),ti7)))||eaz(this.e,e))},Y5(eVF,"ModelOrderBarycenterHeuristic",660),eTS(1803,1,e$C,hx),eUe.ue=function(e,t){return eED(this.a,Pp(e,10),Pp(t,10))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVF,"ModelOrderBarycenterHeuristic/lambda$0$Type",1803),eTS(1403,1,eVD,cF),eUe.Yf=function(e){var t;return Pp(e,37),t=TL(tus),RI(t,(e_x(),e8n),(eB$(),e7I)),t},eUe.pf=function(e,t){$w((Pp(e,37),t))},Y5(eVF,"NoCrossingMinimizer",1403),eTS(796,402,eVR,yu),eUe.$f=function(e,t,n){var r,i,a,o,s,u,c,l,f,d,h;switch(f=this.g,n.g){case 1:for(i=0,a=0,l=new fz(e.j);l.a1&&(i.j==(eYu(),tby)?this.b[e]=!0:i.j==tbY&&e>0&&(this.b[e-1]=!0))},eUe.f=0,Y5(eWc,"AllCrossingsCounter",1798),eTS(587,1,{},erH),eUe.b=0,eUe.d=0,Y5(eWc,"BinaryIndexedTree",587),eTS(524,1,{},IQ),Y5(eWc,"CrossingsCounter",524),eTS(1906,1,e$C,hT),eUe.ue=function(e,t){return je(this.a,Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eWc,"CrossingsCounter/lambda$0$Type",1906),eTS(1907,1,e$C,hM),eUe.ue=function(e,t){return jt(this.a,Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eWc,"CrossingsCounter/lambda$1$Type",1907),eTS(1908,1,e$C,hO),eUe.ue=function(e,t){return jn(this.a,Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eWc,"CrossingsCounter/lambda$2$Type",1908),eTS(1909,1,e$C,hA),eUe.ue=function(e,t){return jr(this.a,Pp(e,11),Pp(t,11))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eWc,"CrossingsCounter/lambda$3$Type",1909),eTS(1910,1,eUF,hL),eUe.td=function(e){QT(this.a,Pp(e,11))},Y5(eWc,"CrossingsCounter/lambda$4$Type",1910),eTS(1911,1,eU8,hC),eUe.Mb=function(e){return kq(this.a,Pp(e,11))},Y5(eWc,"CrossingsCounter/lambda$5$Type",1911),eTS(1912,1,eUF,hI),eUe.td=function(e){kV(this,e)},Y5(eWc,"CrossingsCounter/lambda$6$Type",1912),eTS(1913,1,eUF,SF),eUe.td=function(e){var t;Pj(),Vw(this.b,(t=this.a,Pp(e,11),t))},Y5(eWc,"CrossingsCounter/lambda$7$Type",1913),eTS(826,1,e$q,iq),eUe.Lb=function(e){return Pj(),Ln(Pp(e,11),(eBU(),tng))},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return Pj(),Ln(Pp(e,11),(eBU(),tng))},Y5(eWc,"CrossingsCounter/lambda$8$Type",826),eTS(1905,1,{},hD),Y5(eWc,"HyperedgeCrossingsCounter",1905),eTS(467,1,{35:1,467:1},Cq),eUe.wd=function(e){return ehq(this,Pp(e,467))},eUe.b=0,eUe.c=0,eUe.e=0,eUe.f=0;var e5m=Y5(eWc,"HyperedgeCrossingsCounter/Hyperedge",467);eTS(362,1,{35:1,362:1},He),eUe.wd=function(e){return eMf(this,Pp(e,362))},eUe.b=0,eUe.c=0;var e5g=Y5(eWc,"HyperedgeCrossingsCounter/HyperedgeCorner",362);eTS(523,22,{3:1,35:1,22:1,523:1},Sj);var e5v=enw(eWc,"HyperedgeCrossingsCounter/HyperedgeCorner/Type",523,e1G,$V,Nc);eTS(1405,1,eVD,cO),eUe.Yf=function(e){return Pp(e_k(Pp(e,37),(eBU(),tt3)),21).Hc((eLR(),ttw))?tuh:null},eUe.pf=function(e,t){evK(this,Pp(e,37),t)},Y5(eVY,"InteractiveNodePlacer",1405),eTS(1406,1,eVD,cM),eUe.Yf=function(e){return Pp(e_k(Pp(e,37),(eBU(),tt3)),21).Hc((eLR(),ttw))?tup:null},eUe.pf=function(e,t){emS(this,Pp(e,37),t)},Y5(eVY,"LinearSegmentsNodePlacer",1406),eTS(257,1,{35:1,257:1},ma),eUe.wd=function(e){return vH(this,Pp(e,257))},eUe.Fb=function(e){var t;return!!M4(e,257)&&(t=Pp(e,257),this.b==t.b)},eUe.Hb=function(){return this.b},eUe.Ib=function(){return"ls"+e_F(this.e)},eUe.a=0,eUe.b=0,eUe.c=-1,eUe.d=-1,eUe.g=0;var e5y=Y5(eVY,"LinearSegmentsNodePlacer/LinearSegment",257);eTS(1408,1,eVD,jW),eUe.Yf=function(e){return Pp(e_k(Pp(e,37),(eBU(),tt3)),21).Hc((eLR(),ttw))?tug:null},eUe.pf=function(e,t){eBr(this,Pp(e,37),t)},eUe.b=0,eUe.g=0,Y5(eVY,"NetworkSimplexPlacer",1408),eTS(1427,1,e$C,iZ),eUe.ue=function(e,t){return ME(Pp(e,19).a,Pp(t,19).a)},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVY,"NetworkSimplexPlacer/0methodref$compare$Type",1427),eTS(1429,1,e$C,iX),eUe.ue=function(e,t){return ME(Pp(e,19).a,Pp(t,19).a)},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVY,"NetworkSimplexPlacer/1methodref$compare$Type",1429),eTS(649,1,{649:1},SY);var e5w=Y5(eVY,"NetworkSimplexPlacer/EdgeRep",649);eTS(401,1,{401:1},Ht),eUe.b=!1;var e5_=Y5(eVY,"NetworkSimplexPlacer/NodeRep",401);eTS(508,12,{3:1,4:1,20:1,28:1,52:1,12:1,14:1,15:1,54:1,508:1},mu),Y5(eVY,"NetworkSimplexPlacer/Path",508),eTS(1409,1,{},iJ),eUe.Kb=function(e){return Pp(e,17).d.i.k},Y5(eVY,"NetworkSimplexPlacer/Path/lambda$0$Type",1409),eTS(1410,1,eU8,iQ),eUe.Mb=function(e){return Pp(e,267)==(eEn(),e8D)},Y5(eVY,"NetworkSimplexPlacer/Path/lambda$1$Type",1410),eTS(1411,1,{},i1),eUe.Kb=function(e){return Pp(e,17).d.i},Y5(eVY,"NetworkSimplexPlacer/Path/lambda$2$Type",1411),eTS(1412,1,eU8,hN),eUe.Mb=function(e){return Ct(edH(Pp(e,10)))},Y5(eVY,"NetworkSimplexPlacer/Path/lambda$3$Type",1412),eTS(1413,1,eU8,i0),eUe.Mb=function(e){return RM(Pp(e,11))},Y5(eVY,"NetworkSimplexPlacer/lambda$0$Type",1413),eTS(1414,1,eUF,SB),eUe.td=function(e){MP(this.a,this.b,Pp(e,11))},Y5(eVY,"NetworkSimplexPlacer/lambda$1$Type",1414),eTS(1423,1,eUF,hP),eUe.td=function(e){ekS(this.a,Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$10$Type",1423),eTS(1424,1,{},i2),eUe.Kb=function(e){return GE(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eVY,"NetworkSimplexPlacer/lambda$11$Type",1424),eTS(1425,1,eUF,hR),eUe.td=function(e){eCe(this.a,Pp(e,10))},Y5(eVY,"NetworkSimplexPlacer/lambda$12$Type",1425),eTS(1426,1,{},i3),eUe.Kb=function(e){return GE(),ell(Pp(e,121).e)},Y5(eVY,"NetworkSimplexPlacer/lambda$13$Type",1426),eTS(1428,1,{},i4),eUe.Kb=function(e){return GE(),ell(Pp(e,121).e)},Y5(eVY,"NetworkSimplexPlacer/lambda$15$Type",1428),eTS(1430,1,eU8,i5),eUe.Mb=function(e){return GE(),Pp(e,401).c.k==(eEn(),e8N)},Y5(eVY,"NetworkSimplexPlacer/lambda$17$Type",1430),eTS(1431,1,eU8,i6),eUe.Mb=function(e){return GE(),Pp(e,401).c.j.c.length>1},Y5(eVY,"NetworkSimplexPlacer/lambda$18$Type",1431),eTS(1432,1,eUF,Hn),eUe.td=function(e){ef2(this.c,this.b,this.d,this.a,Pp(e,401))},eUe.c=0,eUe.d=0,Y5(eVY,"NetworkSimplexPlacer/lambda$19$Type",1432),eTS(1415,1,{},i9),eUe.Kb=function(e){return GE(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eVY,"NetworkSimplexPlacer/lambda$2$Type",1415),eTS(1433,1,eUF,hj),eUe.td=function(e){MD(this.a,Pp(e,11))},eUe.a=0,Y5(eVY,"NetworkSimplexPlacer/lambda$20$Type",1433),eTS(1434,1,{},i8),eUe.Kb=function(e){return GE(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eVY,"NetworkSimplexPlacer/lambda$21$Type",1434),eTS(1435,1,eUF,hF),eUe.td=function(e){Oi(this.a,Pp(e,10))},Y5(eVY,"NetworkSimplexPlacer/lambda$22$Type",1435),eTS(1436,1,eU8,i7),eUe.Mb=function(e){return Ct(e)},Y5(eVY,"NetworkSimplexPlacer/lambda$23$Type",1436),eTS(1437,1,{},ae),eUe.Kb=function(e){return GE(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eVY,"NetworkSimplexPlacer/lambda$24$Type",1437),eTS(1438,1,eU8,hY),eUe.Mb=function(e){return xH(this.a,Pp(e,10))},Y5(eVY,"NetworkSimplexPlacer/lambda$25$Type",1438),eTS(1439,1,eUF,SU),eUe.td=function(e){eSl(this.a,this.b,Pp(e,10))},Y5(eVY,"NetworkSimplexPlacer/lambda$26$Type",1439),eTS(1440,1,eU8,at),eUe.Mb=function(e){return GE(),!q8(Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$27$Type",1440),eTS(1441,1,eU8,an),eUe.Mb=function(e){return GE(),!q8(Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$28$Type",1441),eTS(1442,1,{},hB),eUe.Ce=function(e,t){return M8(this.a,Pp(e,29),Pp(t,29))},Y5(eVY,"NetworkSimplexPlacer/lambda$29$Type",1442),eTS(1416,1,{},ar),eUe.Kb=function(e){return GE(),new R1(null,new YI(new Fa(OH(efc(Pp(e,10)).a.Kc(),new c))))},Y5(eVY,"NetworkSimplexPlacer/lambda$3$Type",1416),eTS(1417,1,eU8,ai),eUe.Mb=function(e){return GE(),Km(Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$4$Type",1417),eTS(1418,1,eUF,hU),eUe.td=function(e){eNB(this.a,Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$5$Type",1418),eTS(1419,1,{},aa),eUe.Kb=function(e){return GE(),new R1(null,new Gq(Pp(e,29).a,16))},Y5(eVY,"NetworkSimplexPlacer/lambda$6$Type",1419),eTS(1420,1,eU8,ao),eUe.Mb=function(e){return GE(),Pp(e,10).k==(eEn(),e8N)},Y5(eVY,"NetworkSimplexPlacer/lambda$7$Type",1420),eTS(1421,1,{},as),eUe.Kb=function(e){return GE(),new R1(null,new YI(new Fa(OH(efs(Pp(e,10)).a.Kc(),new c))))},Y5(eVY,"NetworkSimplexPlacer/lambda$8$Type",1421),eTS(1422,1,eU8,au),eUe.Mb=function(e){return GE(),Rc(Pp(e,17))},Y5(eVY,"NetworkSimplexPlacer/lambda$9$Type",1422),eTS(1404,1,eVD,cz),eUe.Yf=function(e){return Pp(e_k(Pp(e,37),(eBU(),tt3)),21).Hc((eLR(),ttw))?tuv:null},eUe.pf=function(e,t){ePV(Pp(e,37),t)},Y5(eVY,"SimpleNodePlacer",1404),eTS(180,1,{180:1},eIW),eUe.Ib=function(){var e;return e="",this.c==(zs(),tuw)?e+=ezn:this.c==tuy&&(e+=ezt),this.o==(zQ(),tuE)?e+=ezh:this.o==tuS?e+="UP":e+="BALANCED",e},Y5(eVH,"BKAlignedLayout",180),eTS(516,22,{3:1,35:1,22:1,516:1},Sz);var e5E=enw(eVH,"BKAlignedLayout/HDirection",516,e1G,$Z,Nl);eTS(515,22,{3:1,35:1,22:1,515:1},S$);var e5S=enw(eVH,"BKAlignedLayout/VDirection",515,e1G,$X,Nf);eTS(1634,1,{},SH),Y5(eVH,"BKAligner",1634),eTS(1637,1,{},eg$),Y5(eVH,"BKCompactor",1637),eTS(654,1,{654:1},ac),eUe.a=0,Y5(eVH,"BKCompactor/ClassEdge",654),eTS(458,1,{458:1},mo),eUe.a=null,eUe.b=0,Y5(eVH,"BKCompactor/ClassNode",458),eTS(1407,1,eVD,kX),eUe.Yf=function(e){return Pp(e_k(Pp(e,37),(eBU(),tt3)),21).Hc((eLR(),ttw))?tux:null},eUe.pf=function(e,t){eBP(this,Pp(e,37),t)},eUe.d=!1,Y5(eVH,"BKNodePlacer",1407),eTS(1635,1,{},al),eUe.d=0,Y5(eVH,"NeighborhoodInformation",1635),eTS(1636,1,e$C,hH),eUe.ue=function(e,t){return etp(this,Pp(e,46),Pp(t,46))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVH,"NeighborhoodInformation/NeighborComparator",1636),eTS(808,1,{}),Y5(eVH,"ThresholdStrategy",808),eTS(1763,808,{},mm),eUe.bg=function(e,t,n){return this.a.o==(zQ(),tuS)?eHQ:eH1},eUe.cg=function(){},Y5(eVH,"ThresholdStrategy/NullThresholdStrategy",1763),eTS(579,1,{579:1},SG),eUe.c=!1,eUe.d=!1,Y5(eVH,"ThresholdStrategy/Postprocessable",579),eTS(1764,808,{},mg),eUe.bg=function(e,t,n){var r,i,a;return(i=t==n,r=this.a.a[n.p]==t,i||r)?(a=e,this.a.c,zs(),i&&(a=ePX(this,t,!0)),isNaN(a)||isFinite(a)||!r||(a=ePX(this,n,!1)),a):e},eUe.cg=function(){for(var e,t,n,r,i;0!=this.d.b;){if((r=eDJ(this,i=Pp(zv(this.d),579))).a)e=r.a,((n=gN(this.a.f[this.a.g[i.b.p].p]))||q8(e)||e.c.i.c!=e.d.i.c)&&((t=eMd(this,i))||Th(this.e,i))}for(;0!=this.e.a.c.length;)eMd(this,Pp(euO(this.e),579))},Y5(eVH,"ThresholdStrategy/SimpleThresholdStrategy",1764),eTS(635,1,{635:1,246:1,234:1},af),eUe.Kf=function(){return eaM(this)},eUe.Xf=function(){return eaM(this)},Y5(eV$,"EdgeRouterFactory",635),eTS(1458,1,eVD,cG),eUe.Yf=function(e){return eLb(Pp(e,37))},eUe.pf=function(e,t){eP7(Pp(e,37),t)},Y5(eV$,"OrthogonalEdgeRouter",1458),eTS(1451,1,eVD,kJ),eUe.Yf=function(e){return ev4(Pp(e,37))},eUe.pf=function(e,t){eYg(this,Pp(e,37),t)},Y5(eV$,"PolylineEdgeRouter",1451),eTS(1452,1,e$q,ad),eUe.Lb=function(e){return eaQ(Pp(e,10))},eUe.Fb=function(e){return this===e},eUe.Mb=function(e){return eaQ(Pp(e,10))},Y5(eV$,"PolylineEdgeRouter/1",1452),eTS(1809,1,eU8,ah),eUe.Mb=function(e){return Pp(e,129).c==(Xa(),tuU)},Y5(eVz,"HyperEdgeCycleDetector/lambda$0$Type",1809),eTS(1810,1,{},ap),eUe.Ge=function(e){return Pp(e,129).d},Y5(eVz,"HyperEdgeCycleDetector/lambda$1$Type",1810),eTS(1811,1,eU8,ab),eUe.Mb=function(e){return Pp(e,129).c==(Xa(),tuU)},Y5(eVz,"HyperEdgeCycleDetector/lambda$2$Type",1811),eTS(1812,1,{},am),eUe.Ge=function(e){return Pp(e,129).d},Y5(eVz,"HyperEdgeCycleDetector/lambda$3$Type",1812),eTS(1813,1,{},ag),eUe.Ge=function(e){return Pp(e,129).d},Y5(eVz,"HyperEdgeCycleDetector/lambda$4$Type",1813),eTS(1814,1,{},av),eUe.Ge=function(e){return Pp(e,129).d},Y5(eVz,"HyperEdgeCycleDetector/lambda$5$Type",1814),eTS(112,1,{35:1,112:1},ea$),eUe.wd=function(e){return v$(this,Pp(e,112))},eUe.Fb=function(e){var t;return!!M4(e,112)&&(t=Pp(e,112),this.g==t.g)},eUe.Hb=function(){return this.g},eUe.Ib=function(){var e,t,n,r;for(e=new O0("{"),r=new fz(this.n);r.a"+this.b+" ("+AK(this.c)+")"},eUe.d=0,Y5(eVz,"HyperEdgeSegmentDependency",129),eTS(520,22,{3:1,35:1,22:1,520:1},SW);var e5k=enw(eVz,"HyperEdgeSegmentDependency/DependencyType",520,e1G,$q,Nd);eTS(1815,1,{},h$),Y5(eVz,"HyperEdgeSegmentSplitter",1815),eTS(1816,1,{},ym),eUe.a=0,eUe.b=0,Y5(eVz,"HyperEdgeSegmentSplitter/AreaRating",1816),eTS(329,1,{329:1},N4),eUe.a=0,eUe.b=0,eUe.c=0,Y5(eVz,"HyperEdgeSegmentSplitter/FreeArea",329),eTS(1817,1,e$C,aT),eUe.ue=function(e,t){return ID(Pp(e,112),Pp(t,112))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVz,"HyperEdgeSegmentSplitter/lambda$0$Type",1817),eTS(1818,1,eUF,Hi),eUe.td=function(e){V5(this.a,this.d,this.c,this.b,Pp(e,112))},eUe.b=0,Y5(eVz,"HyperEdgeSegmentSplitter/lambda$1$Type",1818),eTS(1819,1,{},aM),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,112).e,16))},Y5(eVz,"HyperEdgeSegmentSplitter/lambda$2$Type",1819),eTS(1820,1,{},aO),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,112).j,16))},Y5(eVz,"HyperEdgeSegmentSplitter/lambda$3$Type",1820),eTS(1821,1,{},aA),eUe.Fe=function(e){return gP(LV(e))},Y5(eVz,"HyperEdgeSegmentSplitter/lambda$4$Type",1821),eTS(655,1,{},YJ),eUe.a=0,eUe.b=0,eUe.c=0,Y5(eVz,"OrthogonalRoutingGenerator",655),eTS(1638,1,{},aL),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,112).e,16))},Y5(eVz,"OrthogonalRoutingGenerator/lambda$0$Type",1638),eTS(1639,1,{},aC),eUe.Kb=function(e){return new R1(null,new Gq(Pp(e,112).j,16))},Y5(eVz,"OrthogonalRoutingGenerator/lambda$1$Type",1639),eTS(661,1,{}),Y5(eVG,"BaseRoutingDirectionStrategy",661),eTS(1807,661,{},mv),eUe.dg=function(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b;if(!e.r||e.q)for(l=t+e.o*n,c=new fz(e.n);c.aez8&&(a=l,i=e,r=new kl(f,a),P7(o.a,r),eDD(this,o,i,r,!1),(d=e.r)&&(h=gP(LV(ep3(d.e,0))),r=new kl(h,a),P7(o.a,r),eDD(this,o,i,r,!1),a=t+d.o*n,i=d,r=new kl(h,a),P7(o.a,r),eDD(this,o,i,r,!1)),r=new kl(b,a),P7(o.a,r),eDD(this,o,i,r,!1)))},eUe.eg=function(e){return e.i.n.a+e.n.a+e.a.a},eUe.fg=function(){return eYu(),tbj},eUe.gg=function(){return eYu(),tbw},Y5(eVG,"NorthToSouthRoutingStrategy",1807),eTS(1808,661,{},my),eUe.dg=function(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b;if(!e.r||e.q)for(l=t-e.o*n,c=new fz(e.n);c.aez8&&(a=l,i=e,r=new kl(f,a),P7(o.a,r),eDD(this,o,i,r,!1),(d=e.r)&&(h=gP(LV(ep3(d.e,0))),r=new kl(h,a),P7(o.a,r),eDD(this,o,i,r,!1),a=t-d.o*n,i=d,r=new kl(h,a),P7(o.a,r),eDD(this,o,i,r,!1)),r=new kl(b,a),P7(o.a,r),eDD(this,o,i,r,!1)))},eUe.eg=function(e){return e.i.n.a+e.n.a+e.a.a},eUe.fg=function(){return eYu(),tbw},eUe.gg=function(){return eYu(),tbj},Y5(eVG,"SouthToNorthRoutingStrategy",1808),eTS(1806,661,{},mw),eUe.dg=function(e,t,n){var r,i,a,o,s,u,c,l,f,d,h,p,b;if(!e.r||e.q)for(l=t+e.o*n,c=new fz(e.n);c.aez8&&(a=l,i=e,r=new kl(a,f),P7(o.a,r),eDD(this,o,i,r,!0),(d=e.r)&&(h=gP(LV(ep3(d.e,0))),r=new kl(a,h),P7(o.a,r),eDD(this,o,i,r,!0),a=t+d.o*n,i=d,r=new kl(a,h),P7(o.a,r),eDD(this,o,i,r,!0)),r=new kl(a,b),P7(o.a,r),eDD(this,o,i,r,!0)))},eUe.eg=function(e){return e.i.n.b+e.n.b+e.a.b},eUe.fg=function(){return eYu(),tby},eUe.gg=function(){return eYu(),tbY},Y5(eVG,"WestToEastRoutingStrategy",1806),eTS(813,1,{},eNG),eUe.Ib=function(){return e_F(this.a)},eUe.b=0,eUe.c=!1,eUe.d=!1,eUe.f=0,Y5(eVK,"NubSpline",813),eTS(407,1,{407:1},eA2,za),Y5(eVK,"NubSpline/PolarCP",407),eTS(1453,1,eVD,egt),eUe.Yf=function(e){return ewy(Pp(e,37))},eUe.pf=function(e,t){eYW(this,Pp(e,37),t)},Y5(eVK,"SplineEdgeRouter",1453),eTS(268,1,{268:1},Xt),eUe.Ib=function(){return this.a+" ->("+this.c+") "+this.b},eUe.c=0,Y5(eVK,"SplineEdgeRouter/Dependency",268),eTS(455,22,{3:1,35:1,22:1,455:1},SK);var e5x=enw(eVK,"SplineEdgeRouter/SideToProcess",455,e1G,$J,Nh);eTS(1454,1,eU8,ak),eUe.Mb=function(e){return eAq(),!Pp(e,128).o},Y5(eVK,"SplineEdgeRouter/lambda$0$Type",1454),eTS(1455,1,{},aS),eUe.Ge=function(e){return eAq(),Pp(e,128).v+1},Y5(eVK,"SplineEdgeRouter/lambda$1$Type",1455),eTS(1456,1,eUF,SV),eUe.td=function(e){Rw(this.a,this.b,Pp(e,46))},Y5(eVK,"SplineEdgeRouter/lambda$2$Type",1456),eTS(1457,1,eUF,Sq),eUe.td=function(e){R_(this.a,this.b,Pp(e,46))},Y5(eVK,"SplineEdgeRouter/lambda$3$Type",1457),eTS(128,1,{35:1,128:1},eSB,eRM),eUe.wd=function(e){return vz(this,Pp(e,128))},eUe.b=0,eUe.e=!1,eUe.f=0,eUe.g=0,eUe.j=!1,eUe.k=!1,eUe.n=0,eUe.o=!1,eUe.p=!1,eUe.q=!1,eUe.s=0,eUe.u=0,eUe.v=0,eUe.F=0,Y5(eVK,"SplineSegment",128),eTS(459,1,{459:1},ax),eUe.a=0,eUe.b=!1,eUe.c=!1,eUe.d=!1,eUe.e=!1,eUe.f=0,Y5(eVK,"SplineSegment/EdgeInformation",459),eTS(1234,1,{},ay),Y5(eVJ,ezQ,1234),eTS(1235,1,e$C,aw),eUe.ue=function(e,t){return ek4(Pp(e,135),Pp(t,135))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVJ,ez1,1235),eTS(1233,1,{},y2),Y5(eVJ,"MrTree",1233),eTS(393,22,{3:1,35:1,22:1,393:1,246:1,234:1},SZ),eUe.Kf=function(){return ek6(this)},eUe.Xf=function(){return ek6(this)};var e5T=enw(eVJ,"TreeLayoutPhases",393,e1G,VM,Np);eTS(1130,209,ezL,CJ),eUe.Ze=function(e,t){var n,r,i,a,o,s,u;for(gN(LK(eT8(e,(eTj(),tcA))))||zh(n=new df((_q(),new gM(e)))),o=(eaW(s=new Xn,e),eo3(s,(eR6(),tcl),e),u=new p2,eDf(e,s,u),eDU(e,s,u),s),a=eDO(this.a,o),i=new fz(a);i.a"+WU(this.c):"e_"+esj(this)},Y5(eVQ,"TEdge",188),eTS(135,134,{3:1,135:1,94:1,134:1},Xn),eUe.Ib=function(){var e,t,n,r,i;for(i=null,r=epL(this.b,0);r.b!=r.d.c;)i+=(null==(n=Pp(Vv(r),86)).c||0==n.c.length?"n_"+n.g:"n_"+n.c)+"\n";for(t=epL(this.a,0);t.b!=t.d.c;)i+=((e=Pp(Vv(t),188)).b&&e.c?WU(e.b)+"->"+WU(e.c):"e_"+esj(e))+"\n";return i};var e5M=Y5(eVQ,"TGraph",135);eTS(633,502,{3:1,502:1,633:1,94:1,134:1}),Y5(eVQ,"TShape",633),eTS(86,633,{3:1,502:1,86:1,633:1,94:1,134:1},esH),eUe.Ib=function(){return WU(this)};var e5O=Y5(eVQ,"TNode",86);eTS(255,1,eU$,hz),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){var e;return e=epL(this.a.d,0),new hG(e)},Y5(eVQ,"TNode/2",255),eTS(358,1,eUE,hG),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return Pp(Vv(this.a),188).c},eUe.Ob=function(){return yV(this.a)},eUe.Qb=function(){etu(this.a)},Y5(eVQ,"TNode/2/1",358),eTS(1840,1,eGB,CX),eUe.pf=function(e,t){eNv(this,Pp(e,135),t)},Y5(eV1,"FanProcessor",1840),eTS(327,22,{3:1,35:1,22:1,327:1,234:1},SX),eUe.Kf=function(){switch(this.g){case 0:return new mX;case 1:return new CX;case 2:return new aN;case 3:return new aI;case 4:return new aR;case 5:return new aj;default:throw p7(new gL(eWt+(null!=this.f?this.f:""+this.g)))}};var e5A=enw(eV1,eWn,327,e1G,JS,Nb);eTS(1843,1,eGB,aI),eUe.pf=function(e,t){eMo(this,Pp(e,135),t)},eUe.a=0,Y5(eV1,"LevelHeightProcessor",1843),eTS(1844,1,eU$,aD),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){return Hj(),wV(),e2o},Y5(eV1,"LevelHeightProcessor/1",1844),eTS(1841,1,eGB,aN),eUe.pf=function(e,t){eSP(this,Pp(e,135),t)},eUe.a=0,Y5(eV1,"NeighborsProcessor",1841),eTS(1842,1,eU$,aP),eUe.Jc=function(e){qX(this,e)},eUe.Kc=function(){return Hj(),wV(),e2o},Y5(eV1,"NeighborsProcessor/1",1842),eTS(1845,1,eGB,aR),eUe.pf=function(e,t){eMa(this,Pp(e,135),t)},eUe.a=0,Y5(eV1,"NodePositionProcessor",1845),eTS(1839,1,eGB,mX),eUe.pf=function(e,t){eRm(this,Pp(e,135))},Y5(eV1,"RootProcessor",1839),eTS(1846,1,eGB,aj),eUe.pf=function(e,t){elE(Pp(e,135))},Y5(eV1,"Untreeifyer",1846),eTS(851,1,e$2,c$),eUe.Qe=function(e){efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eV3),""),"Weighting of Nodes"),"Which weighting to use when computing a node order."),tcE),(eSd(),tdv)),e5L),el9((epx(),tdh))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eV4),""),"Search Order"),"Which search order to use when computing a spanning tree."),tcw),tdv),e5C),el9(tdh)))),ejG((new cH,e))},Y5(eV5,"MrTreeMetaDataProvider",851),eTS(994,1,e$2,cH),eUe.Qe=function(e){ejG(e)},Y5(eV5,"MrTreeOptions",994),eTS(995,1,{},aF),eUe.$e=function(){return new CJ},eUe._e=function(e){},Y5(eV5,"MrTreeOptions/MrtreeFactory",995),eTS(480,22,{3:1,35:1,22:1,480:1},SJ);var e5L=enw(eV5,"OrderWeighting",480,e1G,$1,Nm);eTS(425,22,{3:1,35:1,22:1,425:1},SQ);var e5C=enw(eV5,"TreeifyingOrder",425,e1G,$Q,Nv);eTS(1459,1,eVD,cD),eUe.Yf=function(e){return Pp(e,135),tcz},eUe.pf=function(e,t){eiD(this,Pp(e,135),t)},Y5("org.eclipse.elk.alg.mrtree.p1treeify","DFSTreeifyer",1459),eTS(1460,1,eVD,cN),eUe.Yf=function(e){return Pp(e,135),tcG},eUe.pf=function(e,t){eSZ(this,Pp(e,135),t)},Y5("org.eclipse.elk.alg.mrtree.p2order","NodeOrderer",1460),eTS(1461,1,eVD,cI),eUe.Yf=function(e){return Pp(e,135),tcW},eUe.pf=function(e,t){eCh(this,Pp(e,135),t)},eUe.a=0,Y5("org.eclipse.elk.alg.mrtree.p3place","NodePlacer",1461),eTS(1462,1,eVD,cP),eUe.Yf=function(e){return Pp(e,135),tcK},eUe.pf=function(e,t){evm(Pp(e,135),t)},Y5("org.eclipse.elk.alg.mrtree.p4route","EdgeRouter",1462),eTS(495,22,{3:1,35:1,22:1,495:1,246:1,234:1},S1),eUe.Kf=function(){return ede(this)},eUe.Xf=function(){return ede(this)};var e5I=enw(eV8,"RadialLayoutPhases",495,e1G,$0,Ng);eTS(1131,209,ezL,y0),eUe.Ze=function(e,t){var n,r,i,a,o,s;if(n=eS8(this,e),ewG(t,"Radial layout",n.c.length),gN(LK(eT8(e,(egj(),tlm))))||zh(r=new df((_q(),new gM(e)))),s=ewE(e),ebu(e,(Lj(),tcV),s),!s)throw p7(new gL("The given graph is not a tree!"));for(0==(i=gP(LV(eT8(e,tl_))))&&(i=ekB(e)),ebu(e,tl_,i),o=new fz(eS8(this,e));o.a0&&eu8((GV(t-1,e.length),e.charCodeAt(t-1)),eGq);)--t;if(r>=t)throw p7(new gL("The given string does not contain any numbers."));if(2!=(i=eIk(e.substr(r,t-r),",|;|\r|\n")).length)throw p7(new gL("Exactly two numbers are expected, "+i.length+" were found."));try{this.a=eEu(e_H(i[0])),this.b=eEu(e_H(i[1]))}catch(a){if(a=eoa(a),M4(a,127))throw n=a,p7(new gL(eGZ+n));throw p7(a)}},eUe.Ib=function(){return"("+this.a+","+this.b+")"},eUe.a=0,eUe.b=0;var e50=Y5(eGX,"KVector",8);eTS(74,68,{3:1,4:1,20:1,28:1,52:1,14:1,68:1,15:1,74:1,414:1},mE,yc,Lb),eUe.Pc=function(){return euE(this)},eUe.Jf=function(e){var t,n,r,i,a,o;r=eIk(e,",|;|\\(|\\)|\\[|\\]|\\{|\\}| | |\n"),HC(this);try{for(n=0,a=0,i=0,o=0;n0&&(a%2==0?i=eEu(r[n]):o=eEu(r[n]),a>0&&a%2!=0&&P7(this,new kl(i,o)),++a),++n}catch(s){if(s=eoa(s),M4(s,127))throw t=s,p7(new gL("The given string does not match the expected format for vectors."+t));throw p7(s)}},eUe.Ib=function(){var e,t,n;for(e=new O0("("),t=epL(this,0);t.b!=t.d.c;)xM(e,(n=Pp(Vv(t),8)).a+","+n.b),t.b!=t.d.c&&(e.a+="; ");return(e.a+=")",e).a};var e52=Y5(eGX,"KVectorChain",74);eTS(248,22,{3:1,35:1,22:1,248:1},kf);var e53=enw(eZe,"Alignment",248,e1G,Jg,NP);eTS(979,1,e$2,cq),eUe.Qe=function(e){eDj(e)},Y5(eZe,"BoxLayouterOptions",979),eTS(980,1,{},oA),eUe.$e=function(){return new oF},eUe._e=function(e){},Y5(eZe,"BoxLayouterOptions/BoxFactory",980),eTS(291,22,{3:1,35:1,22:1,291:1},kd);var e54=enw(eZe,"ContentAlignment",291,e1G,Jm,NR);eTS(684,1,e$2,cZ),eUe.Qe=function(e){efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZi),""),"Layout Algorithm"),"Select a specific layout algorithm."),(eSd(),tdE)),e17),el9((epx(),tdh))))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZa),""),"Resolved Layout Algorithm"),"Meta data associated with the selected algorithm."),td_),e5X),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVi),""),"Alignment"),"Alignment of the selected node relative to other nodes; the exact meaning depends on the used algorithm."),td0),tdv),e53),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,ezG),""),"Aspect Ratio"),"The desired aspect ratio of the drawing, that is the quotient of width by height."),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZo),""),"Bend Points"),"A fixed list of bend points for the edge. This is used by the 'Fixed Layout' algorithm to specify a pre-defined routing for an edge. The vector chain must include the source point, any bend points, and the target point, so it must have at least two points."),td_),e52),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVg),""),"Content Alignment"),"Specifies how the content of a node are aligned. Each node can individually control the alignment of its contents. I.e. if a node should be aligned top left in its parent node, the parent node should specify that option."),td8),tdy),e54),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVr),""),"Debug Mode"),"Whether additional debug information shall be generated."),(OQ(),!1)),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVu),""),ezw),"Overall direction of edges: horizontal (right / left) or vertical (down / up)."),tht),tdv),e55),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKB),""),"Edge Routing"),"What kind of edge routing style should be applied for the content of a parent node. Algorithms may also set this option to single edges in order to mark them as splines. The bend point list of edges with this option set to SPLINES must be interpreted as control points for a piecewise cubic spline."),tho),tdv),e59),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eqC),""),"Expand Nodes"),"If active, nodes are expanded to fill the area of their parent."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKP),""),"Hierarchy Handling"),"Determines whether separate layout runs are triggered for different compound nodes in a hierarchical graph. Setting a node's hierarchy handling to `INCLUDE_CHILDREN` will lay out that node and all of its descendants in a single layout run, until a descendant is encountered which has its hierarchy handling set to `SEPARATE_CHILDREN`. In general, `SEPARATE_CHILDREN` will ensure that a new layout run is triggered for a node with that setting. Including multiple levels of hierarchy in a single layout run may allow cross-hierarchical edges to be laid out properly. If the root node is set to `INHERIT` (or not set at all), the default behavior is `SEPARATE_CHILDREN`."),thf),tdv),e57),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdd]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ezW),""),"Padding"),"The padding to be left to a parent element's border when placing child elements. This can also serve as an output option of a layout algorithm if node size calculation is setup appropriately."),thP),td_),e4R),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdd]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGu),""),"Interactive"),"Whether the algorithm should be run in interactive mode for the content of a parent node. What this means exactly depends on how the specific algorithm interprets this option. Usually in the interactive mode algorithms try to modify the current layout as little as possible."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVA),""),"interactive Layout"),"Whether the graph should be changeable interactively and by setting constraints"),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGf),""),"Omit Node Micro Layout"),"Node micro layout comprises the computation of node dimensions (if requested), the placement of ports and their labels, and the placement of node labels. The functionality is implemented independent of any specific layout algorithm and shouldn't have any negative impact on the layout algorithm's performance itself. Yet, if any unforeseen behavior occurs, this option allows to deactivate the micro layout."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGc),""),"Port Constraints"),"Defines constraints of the position of the ports of a node."),thq),tdv),e6r),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVT),""),"Position"),"The position of a node, port, or label. This is used by the 'Fixed Layout' algorithm to specify a pre-defined position."),td_),e50),jL(tdd,eow(vx(e5Q,1),eU4,175,0,[tdp,tdf]))))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eGr),""),"Priority"),"Defines the priority of an object; its meaning depends on the specific layout algorithm and the context where it is used."),tdw),e15),jL(tdd,eow(vx(e5Q,1),eU4,175,0,[tdl]))))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eGo),""),"Randomization Seed"),"Seed used for pseudo-random number generators to control the layout algorithm. If the value is 0, the seed shall be determined pseudo-randomly (e.g. from the system time)."),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eGs),""),"Separate Connected Components"),"Whether each connected component should be processed separately."),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVv),""),"Junction Points"),"This option is not used as option, but as output of the layout algorithms. It is attached to edges and determines the points where junction symbols should be drawn in order to represent hyperedges with orthogonal routing. Whether such points are computed depends on the chosen layout algorithm and edge routing style. The points are put into the vector chain with no specific order."),thv),td_),e52),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eV_),""),"Comment Box"),"Whether the node should be regarded as a comment box instead of a regular node. In that case its placement should be similar to how labels are handled. Any edges incident to a comment box specify to which graph elements the comment is related."),!1),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVE),""),"Hypernode"),"Whether the node should be handled as a hypernode."),!1),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZs),""),"Label Manager"),"Label managers can shorten labels upon a layout algorithm's request."),td_),tyO),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdf]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVM),""),"Margins"),"Margins define additional space around the actual bounds of a graph element. For instance, ports or labels being placed on the outside of a node's border might introduce such a margin. The margin is used to guarantee non-overlap of other graph elements with those ports or labels."),thw),td_),e4D),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVt),""),"No Layout"),"No layout is done for the associated element. This is used to mark parts of a diagram to avoid their inclusion in the layout graph, or to mark parts of the layout graph to prevent layout engines from processing them. If you wish to exclude the contents of a compound node from automatic layout, while the node itself is still considered on its own layer, use the 'Fixed Layout' algorithm for that node."),!1),tdm),e11),jL(tdd,eow(vx(e5Q,1),eU4,175,0,[tdl,tdp,tdf]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZu),""),"Scale Factor"),"The scaling factor to be applied to the corresponding node in recursive layout. It causes the corresponding node's size to be adjusted, and its ports and labels to be sized and placed accordingly after the layout of that node has been determined (and before the node itself and its siblings are arranged). The scaling is not reverted afterwards, so the resulting layout graph contains the adjusted size and position data. This option is currently not supported if 'Layout Hierarchy' is set."),1),tdg),e13),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZc),""),"Animate"),"Whether the shift from the old layout to the new computed layout shall be animated."),!0),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZl),""),"Animation Time Factor"),"Factor for computation of animation time. The higher the value, the longer the animation time. If the value is 0, the resulting time is always equal to the minimum defined by 'Minimal Animation Time'."),ell(100)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZf),""),"Layout Ancestors"),"Whether the hierarchy levels on the path from the selected element to the root of the diagram shall be included in the layout process."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZd),""),"Maximal Animation Time"),"The maximal time for animations, in milliseconds."),ell(4e3)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZh),""),"Minimal Animation Time"),"The minimal time for animations, in milliseconds."),ell(400)),tdw),e15),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZp),""),"Progress Bar"),"Whether a progress bar shall be displayed during layout computations."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZb),""),"Validate Graph"),"Whether the graph shall be validated before any layout algorithm is applied. If this option is enabled and at least one error is found, the layout process is aborted and a message is shown to the user."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZm),""),"Validate Options"),"Whether layout options shall be validated before any layout algorithm is applied. If this option is enabled and at least one error is found, the layout process is aborted and a message is shown to the user."),!0),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZg),""),"Zoom to Fit"),"Whether the zoom level shall be set to view the whole diagram after layout."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZr),"box"),"Box Layout Mode"),"Configures the packing mode used by the {@link BoxLayoutProvider}. If SIMPLE is not required (neither priorities are used nor the interactive mode), GROUP_DEC can improve the packing and decrease the area. GROUP_MIXED and GROUP_INC may, in very specific scenarios, work better."),td5),tdv),e6u),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eKQ),eKU),"Comment Comment Spacing"),"Spacing to be preserved between a comment box and other comment boxes connected to the same node. The space left between comment boxes of different nodes is controlled by the node-node spacing."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK1),eKU),"Comment Node Spacing"),"Spacing to be preserved between a node and its connected comment boxes. The space left between a node and the comments of another node is controlled by the node-node spacing."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ez$),eKU),"Components Spacing"),"Spacing to be preserved between pairs of connected components. This option is only relevant if 'separateConnectedComponents' is activated."),20),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK0),eKU),"Edge Spacing"),"Spacing to be preserved between any two edges. Note that while this can somewhat easily be satisfied for the segments of orthogonally drawn edges, it is harder for general polylines or splines."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGa),eKU),"Edge Label Spacing"),"The minimal distance to be preserved between a label and the edge it is associated with. Note that the placement of a label is influenced by the 'edgelabels.placement' option."),2),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK2),eKU),"Edge Node Spacing"),"Spacing to be preserved between nodes and edges."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK3),eKU),"Label Spacing"),"Determines the amount of space to be left between two labels of the same graph element."),0),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK6),eKU),"Label Node Spacing"),"Spacing to be preserved between labels and the border of node they are associated with. Note that the placement of a label is influenced by the 'nodelabels.placement' option."),5),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK4),eKU),"Horizontal spacing between Label and Port"),"Horizontal spacing to be preserved between labels and the ports they are associated with. Note that the placement of a label is influenced by the 'portlabels.placement' option."),1),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK5),eKU),"Vertical spacing between Label and Port"),"Vertical spacing to be preserved between labels and the ports they are associated with. Note that the placement of a label is influenced by the 'portlabels.placement' option."),1),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGi),eKU),"Node Spacing"),"The minimal distance to be preserved between each two nodes."),20),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK9),eKU),"Node Self Loop Spacing"),"Spacing to be preserved between a node and its self loops."),10),tdg),e13),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eK8),eKU),"Port Spacing"),"Spacing between pairs of ports of the same node."),10),tdg),e13),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdd]))))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eK7),eKU),"Individual Spacing"),"Allows to specify individual spacing values for graph elements that shall be different from the value specified for the element's parent."),td_),e6c),jL(tdd,eow(vx(e5Q,1),eU4,175,0,[tdl,tdp,tdf]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVO),eKU),"Additional Port Space"),"Additional space around the sets of ports on each node side. For each side of a node, this option can reserve additional space before and after the ports on each side. For example, a top spacing of 20 makes sure that the first port on the western and eastern side is 20 units away from the northern border."),tph),td_),e4D),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVx),eZ_),"Layout Partition"),"Partition to which the node belongs. This requires Layout Partitioning to be active. Nodes with lower partition IDs will appear to the left of nodes with higher partition IDs (assuming a left-to-right layout direction)."),tdw),e15),jL(tdh,eow(vx(e5Q,1),eU4,175,0,[tdd]))))),K_(e,eVx,eVk,thY),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVk),eZ_),"Layout Partitioning"),"Whether to activate partitioned layout. This will allow to group nodes through the Layout Partition option. a pair of nodes with different partition indices is then placed such that the node with lower index is placed to the left of the other node (with left-to-right layout direction). Depending on the layout algorithm, this may only be guaranteed to work if all nodes have a layout partition configured, or at least if edges that cross partitions are not part of a partition-crossing cycle."),thj),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVc),eZE),"Node Label Padding"),"Define padding for node labels that are placed inside of a node."),thE),td_),e4R),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGp),eZE),"Node Label Placement"),"Hints for where node labels are to be placed; if empty, the node label's position is not modified."),thk),tdy),e6t),jL(tdd,eow(vx(e5Q,1),eU4,175,0,[tdf]))))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVd),eZS),"Port Alignment"),"Defines the default port distribution for a node. May be overridden for each side individually."),thU),tdv),e6n),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVh),eZS),"Port Alignment (North)"),"Defines how ports on the northern side are placed, overriding the node's general port alignment."),tdv),e6n),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVp),eZS),"Port Alignment (South)"),"Defines how ports on the southern side are placed, overriding the node's general port alignment."),tdv),e6n),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVb),eZS),"Port Alignment (West)"),"Defines how ports on the western side are placed, overriding the node's general port alignment."),tdv),e6n),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVm),eZS),"Port Alignment (East)"),"Defines how ports on the eastern side are placed, overriding the node's general port alignment."),tdv),e6n),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGh),eZk),"Node Size Constraints"),"What should be taken into account when calculating a node's size. Empty size constraints specify that a node's size is already fixed and should not be changed."),thT),tdy),e6o),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGd),eZk),"Node Size Options"),"Options modifying the behavior of the size constraints set on a node. Each member of the set specifies something that should be taken into account when calculating node sizes. The empty set corresponds to no further modifications."),thC),tdy),e6s),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGM),eZk),"Node Size Minimum"),"The minimal size to which a node can be reduced."),thA),td_),e50),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVs),eZk),"Fixed Graph Size"),"By default, the fixed layout provider will enlarge a graph until it is large enough to contain its children. If this option is set, it won't do so."),!1),tdm),e11),el9(tdh)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVy),eKX),"Edge Label Placement"),"Gives a hint on where to put edge labels."),thi),tdv),e56),el9(tdf)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGl),eKX),"Inline Edge Labels"),"If true, an edge label is placed directly on its edge. May only apply to center edge labels. This kind of label placement is only advisable if the label's rendering is such that it is not crossed by its edge and thus stays legible."),!1),tdm),e11),el9(tdf)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZv),"font"),"Font Name"),"Font name used for a label."),tdE),e17),el9(tdf)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eZy),"font"),"Font Size"),"Font size used for a label."),tdw),e15),el9(tdf)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVS),eZx),"Port Anchor Offset"),"The offset to the port position where connections shall be attached."),td_),e50),el9(tdp)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVw),eZx),"Port Index"),"The index of a port in the fixed order around a node. The order is assumed as clockwise, starting with the leftmost port on the top side. This option must be set if 'Port Constraints' is set to FIXED_ORDER and no specific positions are given for the ports. Additionally, the option 'Port Side' must be defined in this case."),tdw),e15),el9(tdp)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVn),eZx),"Port Side"),"The side of a node on which a port is situated. This option must be set if 'Port Constraints' is set to FIXED_SIDE or FIXED_ORDER and no specific positions are given for the ports."),th2),tdv),e6a),el9(tdp)))),efO(e,new eE8(yt(ye(yn(v4(v7(v6(v9(new oN,eVe),eZx),"Port Border Offset"),"The offset of ports on the node border. With a positive offset the port is moved outside of the node, while with a negative offset the port is moved towards the inside. An offset of 0 means that the port is placed directly on the node border, i.e. if the port side is north, the port's south border touches the nodes's north border; if the port side is east, the port's west border touches the nodes's east border; if the port side is south, the port's north border touches the node's south border; if the port side is west, the port's east border touches the node's west border."),tdg),e13),el9(tdp)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eGb),eZT),"Port Label Placement"),"Decides on a placement method for port labels; if empty, the node label's position is not modified."),thQ),tdy),e6i),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVl),eZT),"Port Labels Next to Port"),"Use 'portLabels.placement': NEXT_TO_PORT_OF_POSSIBLE."),!1),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVf),eZT),"Treat Port Labels as Group"),"If this option is true (default), the labels of a port will be treated as a group when it comes to centering them next to their port. If this option is false, only the first label will be centered next to the port, with the others being placed below. This only applies to labels of eastern and western ports and will have no effect if labels are not placed next to their port."),!0),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVa),eZM),"Activate Inside Self Loops"),"Whether this node allows to route self loops inside of it instead of around it. If set to true, this will make the node a compound node if it isn't already, and will require the layout algorithm to support compound nodes with hierarchical ports."),!1),tdm),e11),el9(tdd)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eVo),eZM),"Inside Self Loop"),"Whether a self loop should be routed inside a node instead of around that node."),!1),tdm),e11),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,ezz),"edge"),"Edge Thickness"),"The thickness of an edge. This is a hint on the line width used to draw an edge, possibly requiring more space to be reserved for it."),1),tdg),e13),el9(tdl)))),efO(e,new eE8(yt(ye(yn(v5(v4(v7(v6(v9(new oN,eZw),"edge"),"Edge Type"),"The type of an edge. This is usually used for UML class diagrams, where associations must be handled differently from generalizations."),thu),tdv),e58),el9(tdl)))),_B(e,new GM(v0(v3(v2(new of,eG1),"Layered"),'The layer-based method was introduced by Sugiyama, Tagawa and Toda in 1981. It emphasizes the direction of edges by pointing as many edges as possible into the same direction. The nodes are arranged in layers, which are sometimes called "hierarchies", and then reordered such that the number of edge crossings is minimized. Afterwards, concrete coordinates are computed for the nodes and edge bend points.'))),_B(e,new GM(v0(v3(v2(new of,"org.eclipse.elk.orthogonal"),"Orthogonal"),'Orthogonal methods that follow the "topology-shape-metrics" approach by Batini, Nardelli and Tamassia \'86. The first phase determines the topology of the drawing by applying a planarization technique, which results in a planar representation of the graph. The orthogonal shape is computed in the second phase, which aims at minimizing the number of edge bends, and is called orthogonalization. The third phase leads to concrete coordinates for nodes and edge bend points by applying a compaction method, thus defining the metrics.'))),_B(e,new GM(v0(v3(v2(new of,eGn),"Force"),"Layout algorithms that follow physical analogies by simulating a system of attractive and repulsive forces. The first successful method of this kind was proposed by Eades in 1984."))),_B(e,new GM(v0(v3(v2(new of,"org.eclipse.elk.circle"),"Circle"),"Circular layout algorithms emphasize cycles or biconnected components of a graph by arranging them in circles. This is useful if a drawing is desired where such components are clearly grouped, or where cycles are shown as prominent OPTIONS of the graph."))),_B(e,new GM(v0(v3(v2(new of,eV9),"Tree"),"Specialized layout methods for trees, i.e. acyclic graphs. The regular structure of graphs that have no undirected cycles can be emphasized using an algorithm of this type."))),_B(e,new GM(v0(v3(v2(new of,"org.eclipse.elk.planar"),"Planar"),"Algorithms that require a planar or upward planar graph. Most of these algorithms are theoretically interesting, but not practically usable."))),_B(e,new GM(v0(v3(v2(new of,eqp),"Radial"),"Radial layout algorithms usually position the nodes of the graph on concentric circles."))),eIm((new cX,e)),eDj((new cq,e)),eL6((new cJ,e))},Y5(eZe,"CoreOptions",684),eTS(103,22,{3:1,35:1,22:1,103:1},kh);var e55=enw(eZe,ezw,103,e1G,Zh,NY);eTS(272,22,{3:1,35:1,22:1,272:1},kp);var e56=enw(eZe,"EdgeLabelPlacement",272,e1G,Wp,NB);eTS(218,22,{3:1,35:1,22:1,218:1},kb);var e59=enw(eZe,"EdgeRouting",218,e1G,VC,NU);eTS(312,22,{3:1,35:1,22:1,312:1},km);var e58=enw(eZe,"EdgeType",312,e1G,Jx,NH);eTS(977,1,e$2,cX),eUe.Qe=function(e){eIm(e)},Y5(eZe,"FixedLayouterOptions",977),eTS(978,1,{},o$),eUe.$e=function(){return new oR},eUe._e=function(e){},Y5(eZe,"FixedLayouterOptions/FixedFactory",978),eTS(334,22,{3:1,35:1,22:1,334:1},kg);var e57=enw(eZe,"HierarchyHandling",334,e1G,Wh,N$);eTS(285,22,{3:1,35:1,22:1,285:1},kv);var e6e=enw(eZe,"LabelSide",285,e1G,VL,Nz);eTS(93,22,{3:1,35:1,22:1,93:1},ky);var e6t=enw(eZe,"NodeLabelPlacement",93,e1G,ene,NG);eTS(249,22,{3:1,35:1,22:1,249:1},kw);var e6n=enw(eZe,"PortAlignment",249,e1G,Zp,NW);eTS(98,22,{3:1,35:1,22:1,98:1},k_);var e6r=enw(eZe,"PortConstraints",98,e1G,X0,NK);eTS(273,22,{3:1,35:1,22:1,273:1},kE);var e6i=enw(eZe,"PortLabelPlacement",273,e1G,Jk,NV);eTS(61,22,{3:1,35:1,22:1,61:1},kS);var e6a=enw(eZe,"PortSide",61,e1G,q5,NX);eTS(981,1,e$2,cJ),eUe.Qe=function(e){eL6(e)},Y5(eZe,"RandomLayouterOptions",981),eTS(982,1,{},oz),eUe.$e=function(){return new oV},eUe._e=function(e){},Y5(eZe,"RandomLayouterOptions/RandomFactory",982),eTS(374,22,{3:1,35:1,22:1,374:1},kk);var e6o=enw(eZe,"SizeConstraint",374,e1G,VA,Nq);eTS(259,22,{3:1,35:1,22:1,259:1},kx);var e6s=enw(eZe,"SizeOptions",259,e1G,en2,NZ);eTS(370,1,{1949:1},mV),eUe.b=!1,eUe.c=0,eUe.d=-1,eUe.e=null,eUe.f=null,eUe.g=-1,eUe.j=!1,eUe.k=!1,eUe.n=!1,eUe.o=0,eUe.q=0,eUe.r=0,Y5(eVL,"BasicProgressMonitor",370),eTS(972,209,ezL,oF),eUe.Ze=function(e,t){var n,r,i,a,o,s,u,c,l;(ewG(t,"Box layout",2),i=gR(LV(eT8(e,(e_C(),tdG)))),a=Pp(eT8(e,tdH),116),n=gN(LK(eT8(e,tdj))),r=gN(LK(eT8(e,tdF))),0===Pp(eT8(e,tdP),311).g)?(o=(s=new I4((e.a||(e.a=new FQ(e6k,e,10,11)),e.a)),Hj(),Mv(s,new h3(r)),s),u=eSI(e),(null==(c=LV(eT8(e,tdN)))||(BJ(c),c<=0))&&(c=1.3),l=eYA(o,i,a,u.a,u.b,n,(BJ(c),c)),eYx(e,l.a,l.b,!1,!0)):eRF(e,i,a,n),eEj(t)},Y5(eVL,"BoxLayoutProvider",972),eTS(973,1,e$C,h3),eUe.ue=function(e,t){return eOQ(this,Pp(e,33),Pp(t,33))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},eUe.a=!1,Y5(eVL,"BoxLayoutProvider/1",973),eTS(157,1,{157:1},etD,Lp),eUe.Ib=function(){return this.c?eC4(this.c):e_F(this.b)},Y5(eVL,"BoxLayoutProvider/Group",157),eTS(311,22,{3:1,35:1,22:1,311:1},kT);var e6u=enw(eVL,"BoxLayoutProvider/PackingMode",311,e1G,VI,NJ);eTS(974,1,e$C,oY),eUe.ue=function(e,t){return HK(Pp(e,157),Pp(t,157))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVL,"BoxLayoutProvider/lambda$0$Type",974),eTS(975,1,e$C,oB),eUe.ue=function(e,t){return Hm(Pp(e,157),Pp(t,157))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVL,"BoxLayoutProvider/lambda$1$Type",975),eTS(976,1,e$C,oU),eUe.ue=function(e,t){return Hg(Pp(e,157),Pp(t,157))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eVL,"BoxLayoutProvider/lambda$2$Type",976),eTS(1365,1,{831:1},oH),eUe.qg=function(e,t){return _R(),!M4(t,160)||yX((eoM(),Pp(e,160)),t)},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$0$Type",1365),eTS(1366,1,eUF,h4),eUe.td=function(e){eux(this.a,Pp(e,146))},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$1$Type",1366),eTS(1367,1,eUF,oj),eUe.td=function(e){Pp(e,94),_R()},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$2$Type",1367),eTS(1371,1,eUF,h5),eUe.td=function(e){erQ(this.a,Pp(e,94))},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$3$Type",1371),eTS(1369,1,eU8,kM),eUe.Mb=function(e){return esI(this.a,this.b,Pp(e,146))},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$4$Type",1369),eTS(1368,1,eU8,kO),eUe.Mb=function(e){return Lt(this.a,this.b,Pp(e,831))},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$5$Type",1368),eTS(1370,1,eUF,kA),eUe.td=function(e){Fj(this.a,this.b,Pp(e,146))},Y5(eVL,"ElkSpacings/AbstractSpacingsBuilder/lambda$6$Type",1370),eTS(935,1,{},oP),eUe.Kb=function(e){return TA(e)},eUe.Fb=function(e){return this===e},Y5(eVL,"ElkUtil/lambda$0$Type",935),eTS(936,1,eUF,kL),eUe.td=function(e){exS(this.a,this.b,Pp(e,79))},eUe.a=0,eUe.b=0,Y5(eVL,"ElkUtil/lambda$1$Type",936),eTS(937,1,eUF,kC),eUe.td=function(e){gp(this.a,this.b,Pp(e,202))},eUe.a=0,eUe.b=0,Y5(eVL,"ElkUtil/lambda$2$Type",937),eTS(938,1,eUF,kI),eUe.td=function(e){Me(this.a,this.b,Pp(e,137))},eUe.a=0,eUe.b=0,Y5(eVL,"ElkUtil/lambda$3$Type",938),eTS(939,1,eUF,h6),eUe.td=function(e){RE(this.a,Pp(e,469))},Y5(eVL,"ElkUtil/lambda$4$Type",939),eTS(342,1,{35:1,342:1},pQ),eUe.wd=function(e){return Os(this,Pp(e,236))},eUe.Fb=function(e){var t;return!!M4(e,342)&&(t=Pp(e,342),this.a==t.a)},eUe.Hb=function(){return zy(this.a)},eUe.Ib=function(){return this.a+" (exclusive)"},eUe.a=0,Y5(eVL,"ExclusiveBounds/ExclusiveLowerBound",342),eTS(1138,209,ezL,oR),eUe.Ze=function(e,t){var n,r,i,a,o,s,u,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x;for(ewG(t,"Fixed Layout",1),a=Pp(eT8(e,(eBB(),tha)),218),d=0,h=0,y=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));y.e!=y.i.gc();){for(g=Pp(epH(y),33),(x=Pp(eT8(g,(euw(),tp$)),8))&&(TP(g,x.a,x.b),Pp(eT8(g,tpF),174).Hc((ed6(),tbW))&&(p=Pp(eT8(g,tpB),8)).a>0&&p.b>0&&eYx(g,p.a,p.b,!0,!0)),d=eB4.Math.max(d,g.i+g.g),h=eB4.Math.max(h,g.j+g.f),l=new Ow((g.n||(g.n=new FQ(e6S,g,1,7)),g.n));l.e!=l.i.gc();)s=Pp(epH(l),137),(x=Pp(eT8(s,tp$),8))&&TP(s,x.a,x.b),d=eB4.Math.max(d,g.i+s.i+s.g),h=eB4.Math.max(h,g.j+s.j+s.f);for(E=new Ow((g.c||(g.c=new FQ(e6x,g,9,9)),g.c));E.e!=E.i.gc();)for(_=Pp(epH(E),118),(x=Pp(eT8(_,tp$),8))&&TP(_,x.a,x.b),S=g.i+_.i,k=g.j+_.j,d=eB4.Math.max(d,S+_.g),h=eB4.Math.max(h,k+_.f),u=new Ow((_.n||(_.n=new FQ(e6S,_,1,7)),_.n));u.e!=u.i.gc();)s=Pp(epH(u),137),(x=Pp(eT8(s,tp$),8))&&TP(s,x.a,x.b),d=eB4.Math.max(d,S+s.i+s.g),h=eB4.Math.max(h,k+s.j+s.f);for(i=new Fa(OH(eOi(g).a.Kc(),new c));eTk(i);)n=Pp(ZC(i),79),f=eYT(n),d=eB4.Math.max(d,f.a),h=eB4.Math.max(h,f.b);for(r=new Fa(OH(eOr(g).a.Kc(),new c));eTk(r);)n=Pp(ZC(r),79),z$(e_I(n))!=e&&(f=eYT(n),d=eB4.Math.max(d,f.a),h=eB4.Math.max(h,f.b))}if(a==(efE(),tpx))for(v=new Ow((e.a||(e.a=new FQ(e6k,e,10,11)),e.a));v.e!=v.i.gc();)for(g=Pp(epH(v),33),r=new Fa(OH(eOi(g).a.Kc(),new c));eTk(r);)n=Pp(ZC(r),79),0==(o=eDX(n)).b?ebu(n,thg,null):ebu(n,thg,o);gN(LK(eT8(e,(euw(),tpY))))||(w=Pp(eT8(e,tpU),116),eYx(e,m=d+w.b+w.c,b=h+w.d+w.a,!0,!0)),eEj(t)},Y5(eVL,"FixedLayoutProvider",1138),eTS(373,134,{3:1,414:1,373:1,94:1,134:1},oG,eer),eUe.Jf=function(e){var t,n,r,i,a,o,s,u,c;if(e)try{for(a=u=eIk(e,";,;"),o=0,s=a.length;o>16&eHd|t^r<<16},eUe.Kc=function(){return new h9(this)},eUe.Ib=function(){return null==this.a&&null==this.b?"pair(null,null)":null==this.a?"pair(null,"+efF(this.b)+")":null==this.b?"pair("+efF(this.a)+",null)":"pair("+efF(this.a)+","+efF(this.b)+")"},Y5(eVL,"Pair",46),eTS(983,1,eUE,h9),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return!this.c&&(!this.b&&null!=this.a.a||null!=this.a.b)},eUe.Pb=function(){if(!this.c&&!this.b&&null!=this.a.a)return this.b=!0,this.a.a;if(!this.c&&null!=this.a.b)return this.c=!0,this.a.b;throw p7(new bC)},eUe.Qb=function(){throw this.c&&null!=this.a.b?this.a.b=null:this.b&&null!=this.a.a&&(this.a.a=null),p7(new bT)},eUe.b=!1,eUe.c=!1,Y5(eVL,"Pair/1",983),eTS(448,1,{448:1},Ho),eUe.Fb=function(e){return UT(this.a,Pp(e,448).a)&&UT(this.c,Pp(e,448).c)&&UT(this.d,Pp(e,448).d)&&UT(this.b,Pp(e,448).b)},eUe.Hb=function(){return euF(eow(vx(e1R,1),eUp,1,5,[this.a,this.c,this.d,this.b]))},eUe.Ib=function(){return"("+this.a+eUd+this.c+eUd+this.d+eUd+this.b+")"},Y5(eVL,"Quadruple",448),eTS(1126,209,ezL,oV),eUe.Ze=function(e,t){var n,r,i,a,o;if(ewG(t,"Random Layout",1),0==(e.a||(e.a=new FQ(e6k,e,10,11)),e.a).i){eEj(t);return}i=(a=Pp(eT8(e,(ed5(),tbz)),19))&&0!=a.a?new qS(a.a):new efo,n=gR(LV(eT8(e,tbU))),o=gR(LV(eT8(e,tbG))),r=Pp(eT8(e,tbH),116),eF1(e,i,n,o,r),eEj(t)},Y5(eVL,"RandomLayoutProvider",1126),eTS(553,1,{}),eUe.qf=function(){return new kl(this.f.i,this.f.j)},eUe.We=function(e){return $k(e,(eBB(),thK))?eT8(this.f,tmu):eT8(this.f,e)},eUe.rf=function(){return new kl(this.f.g,this.f.f)},eUe.sf=function(){return this.g},eUe.Xe=function(e){return X2(this.f,e)},eUe.tf=function(e){eno(this.f,e.a),ens(this.f,e.b)},eUe.uf=function(e){ena(this.f,e.a),eni(this.f,e.b)},eUe.vf=function(e){this.g=e},eUe.g=0,Y5(eZI,"ElkGraphAdapters/AbstractElkGraphElementAdapter",553),eTS(554,1,{839:1},h8),eUe.wf=function(){var e,t;if(!this.b)for(this.b=K$(UB(this.a).i),t=new Ow(UB(this.a));t.e!=t.i.gc();)e=Pp(epH(t),137),P_(this.b,new gO(e));return this.b},eUe.b=null,Y5(eZI,"ElkGraphAdapters/ElkEdgeAdapter",554),eTS(301,553,{},gM),eUe.xf=function(){return em3(this)},eUe.a=null,Y5(eZI,"ElkGraphAdapters/ElkGraphAdapter",301),eTS(630,553,{181:1},gO),Y5(eZI,"ElkGraphAdapters/ElkLabelAdapter",630),eTS(629,553,{680:1},AC),eUe.wf=function(){return em0(this)},eUe.Af=function(){var e;return(e=Pp(eT8(this.f,(eBB(),thy)),142))||(e=new mh),e},eUe.Cf=function(){return em2(this)},eUe.Ef=function(e){var t;t=new Dk(e),ebu(this.f,(eBB(),thy),t)},eUe.Ff=function(e){ebu(this.f,(eBB(),thN),new DS(e))},eUe.yf=function(){return this.d},eUe.zf=function(){var e,t;if(!this.a)for(this.a=new p0,t=new Fa(OH(eOr(Pp(this.f,33)).a.Kc(),new c));eTk(t);)e=Pp(ZC(t),79),P_(this.a,new h8(e));return this.a},eUe.Bf=function(){var e,t;if(!this.c)for(this.c=new p0,t=new Fa(OH(eOi(Pp(this.f,33)).a.Kc(),new c));eTk(t);)e=Pp(ZC(t),79),P_(this.c,new h8(e));return this.c},eUe.Df=function(){return 0!=H8(Pp(this.f,33)).i||gN(LK(Pp(this.f,33).We((eBB(),thh))))},eUe.Gf=function(){QV(this,(_q(),tms))},eUe.a=null,eUe.b=null,eUe.c=null,eUe.d=null,eUe.e=null,Y5(eZI,"ElkGraphAdapters/ElkNodeAdapter",629),eTS(1266,553,{838:1},pA),eUe.wf=function(){return egd(this)},eUe.zf=function(){var e,t;if(!this.a)for(this.a=AH(Pp(this.f,118).xg().i),t=new Ow(Pp(this.f,118).xg());t.e!=t.i.gc();)e=Pp(epH(t),79),P_(this.a,new h8(e));return this.a},eUe.Bf=function(){var e,t;if(!this.c)for(this.c=AH(Pp(this.f,118).yg().i),t=new Ow(Pp(this.f,118).yg());t.e!=t.i.gc();)e=Pp(epH(t),79),P_(this.c,new h8(e));return this.c},eUe.Hf=function(){return Pp(Pp(this.f,118).We((eBB(),th0)),61)},eUe.If=function(){var e,t,n,r,i,a,o,s;for(r=zY(Pp(this.f,118)),n=new Ow(Pp(this.f,118).yg());n.e!=n.i.gc();)for(e=Pp(epH(n),79),s=new Ow((e.c||(e.c=new Ih(e6m,e,5,8)),e.c));s.e!=s.i.gc();)if(o=Pp(epH(s),82),etg(ewH(o),r)||ewH(o)==r&&gN(LK(eT8(e,(eBB(),thp)))))return!0;for(t=new Ow(Pp(this.f,118).xg());t.e!=t.i.gc();)for(e=Pp(epH(t),79),a=new Ow((e.b||(e.b=new Ih(e6m,e,4,7)),e.b));a.e!=a.i.gc();)if(i=Pp(epH(a),82),etg(ewH(i),r))return!0;return!1},eUe.a=null,eUe.b=null,eUe.c=null,Y5(eZI,"ElkGraphAdapters/ElkPortAdapter",1266),eTS(1267,1,e$C,oq),eUe.ue=function(e,t){return eC3(Pp(e,118),Pp(t,118))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(eZI,"ElkGraphAdapters/PortComparator",1267);var e6f=RL(eZD,"EObject"),e6d=RL(eZN,eZP),e6h=RL(eZN,eZR),e6p=RL(eZN,eZj),e6b=RL(eZN,"ElkShape"),e6m=RL(eZN,eZF),e6g=RL(eZN,eZY),e6v=RL(eZN,eZB),e6y=RL(eZD,eZU),e6w=RL(eZD,"EFactory"),e6_=RL(eZD,eZH),e6E=RL(eZD,"EPackage"),e6S=RL(eZN,eZ$),e6k=RL(eZN,eZz),e6x=RL(eZN,eZG);eTS(90,1,eZW),eUe.Jg=function(){return this.Kg(),null},eUe.Kg=function(){return null},eUe.Lg=function(){return this.Kg(),!1},eUe.Mg=function(){return!1},eUe.Ng=function(e){eam(this,e)},Y5(eZK,"BasicNotifierImpl",90),eTS(97,90,eZ0),eUe.nh=function(){return TO(this)},eUe.Og=function(e,t){return e},eUe.Pg=function(){throw p7(new bO)},eUe.Qg=function(e){var t;return t=ebY(Pp(ee2(this.Tg(),this.Vg()),18)),this.eh().ih(this,t.n,t.f,e)},eUe.Rg=function(e,t){throw p7(new bO)},eUe.Sg=function(e,t,n){return eDg(this,e,t,n)},eUe.Tg=function(){var e;return this.Pg()&&(e=this.Pg().ck())?e:this.zh()},eUe.Ug=function(){return eTp(this)},eUe.Vg=function(){throw p7(new bO)},eUe.Wg=function(){var e,t;return(t=this.ph().dk())||this.Pg().ik(t=(_0(),null==(e=zr(eNT(this.Tg())))?tgV:new AA(this,e))),t},eUe.Xg=function(e,t){return e},eUe.Yg=function(e){var t;return(t=e.Gj())?e.aj():edv(this.Tg(),e)},eUe.Zg=function(){var e;return(e=this.Pg())?e.fk():null},eUe.$g=function(){return this.Pg()?this.Pg().ck():null},eUe._g=function(e,t,n){return ebl(this,e,t,n)},eUe.ah=function(e){return JG(this,e)},eUe.bh=function(e,t){return ZN(this,e,t)},eUe.dh=function(){var e;return!!(e=this.Pg())&&e.gk()},eUe.eh=function(){throw p7(new bO)},eUe.fh=function(){return ehO(this)},eUe.gh=function(e,t,n,r){return ep0(this,e,t,r)},eUe.hh=function(e,t,n){var r;return(r=Pp(ee2(this.Tg(),t),66)).Nj().Qj(this,this.yh(),t-this.Ah(),e,n)},eUe.ih=function(e,t,n,r){return $7(this,e,t,r)},eUe.jh=function(e,t,n){var r;return(r=Pp(ee2(this.Tg(),t),66)).Nj().Rj(this,this.yh(),t-this.Ah(),e,n)},eUe.kh=function(){return!!this.Pg()&&!!this.Pg().ek()},eUe.lh=function(e){return epY(this,e)},eUe.mh=function(e){return zz(this,e)},eUe.oh=function(e){return eR2(this,e)},eUe.ph=function(){throw p7(new bO)},eUe.qh=function(){return this.Pg()?this.Pg().ek():null},eUe.rh=function(){return ehO(this)},eUe.sh=function(e,t){eS5(this,e,t)},eUe.th=function(e){this.ph().hk(e)},eUe.uh=function(e){this.ph().kk(e)},eUe.vh=function(e){this.ph().jk(e)},eUe.wh=function(e,t){var n,r,i,a;return(a=this.Zg())&&e&&(t=ep6(a.Vk(),this,t),a.Zk(this)),(r=this.eh())&&((eIy(this,this.eh(),this.Vg()).Bb&eH3)!=0?(i=r.fh())&&(e?a||i.Zk(this):i.Yk(this)):(t=(n=this.Vg())>=0?this.Qg(t):this.eh().ih(this,-1-n,null,t),t=this.Sg(null,-1,t))),this.uh(e),t},eUe.xh=function(e){var t,n,r,i,a,o,s,u;if((a=edv(n=this.Tg(),e))>=(t=this.Ah()))return Pp(e,66).Nj().Uj(this,this.yh(),a-t);if(a<=-1){if(o=eR3((eSp(),tvc),n,e)){if(_4(),Pp(o,66).Oj()||(o=Wk(QZ(tvc,o))),i=Pp((r=this.Yg(o))>=0?this._g(r,!0,!0):exk(this,o,!0),153),(u=o.Zj())>1||-1==u)return Pp(Pp(i,215).hl(e,!1),76)}else throw p7(new gL(eZV+e.ne()+eZX))}else if(e.$j())return Pp((r=this.Yg(e))>=0?this._g(r,!1,!0):exk(this,e,!1),76);return new k4(this,e)},eUe.yh=function(){return Q5(this)},eUe.zh=function(){return(BM(),tgv).S},eUe.Ah=function(){return Y1(this.zh())},eUe.Bh=function(e){eSi(this,e)},eUe.Ib=function(){return eMT(this)},Y5(eZ2,"BasicEObjectImpl",97),eTS(114,97,{105:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1}),eUe.Ch=function(e){var t;return(t=Q6(this))[e]},eUe.Dh=function(e,t){var n;n=Q6(this),Bc(n,e,t)},eUe.Eh=function(e){var t;t=Q6(this),Bc(t,e,null)},eUe.Jg=function(){return Pp(eaS(this,4),126)},eUe.Kg=function(){throw p7(new bO)},eUe.Lg=function(){return(4&this.Db)!=0},eUe.Pg=function(){throw p7(new bO)},eUe.Fh=function(e){ehU(this,2,e)},eUe.Rg=function(e,t){this.Db=t<<16|255&this.Db,this.Fh(e)},eUe.Tg=function(){return $S(this)},eUe.Vg=function(){return this.Db>>16},eUe.Wg=function(){var e,t;return _0(),null==(t=zr(eNT((e=Pp(eaS(this,16),26))||this.zh())))?tgV:new AA(this,t)},eUe.Mg=function(){return(1&this.Db)==0},eUe.Zg=function(){return Pp(eaS(this,128),1935)},eUe.$g=function(){return Pp(eaS(this,16),26)},eUe.dh=function(){return(32&this.Db)!=0},eUe.eh=function(){return Pp(eaS(this,2),49)},eUe.kh=function(){return(64&this.Db)!=0},eUe.ph=function(){throw p7(new bO)},eUe.qh=function(){return Pp(eaS(this,64),281)},eUe.th=function(e){ehU(this,16,e)},eUe.uh=function(e){ehU(this,128,e)},eUe.vh=function(e){ehU(this,64,e)},eUe.yh=function(){return ehH(this)},eUe.Db=0,Y5(eZ2,"MinimalEObjectImpl",114),eTS(115,114,{105:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1}),eUe.Fh=function(e){this.Cb=e},eUe.eh=function(){return this.Cb},Y5(eZ2,"MinimalEObjectImpl/Container",115),eTS(1985,115,{105:1,413:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1}),eUe._g=function(e,t,n){return egp(this,e,t,n)},eUe.jh=function(e,t,n){return e_9(this,e,t,n)},eUe.lh=function(e){return Wz(this,e)},eUe.sh=function(e,t){esU(this,e,t)},eUe.zh=function(){return eBa(),tm_},eUe.Bh=function(e){eoF(this,e)},eUe.Ve=function(){return epD(this)},eUe.We=function(e){return eT8(this,e)},eUe.Xe=function(e){return X2(this,e)},eUe.Ye=function(e,t){return ebu(this,e,t)},Y5(eZ3,"EMapPropertyHolderImpl",1985),eTS(567,115,{105:1,469:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},oJ),eUe._g=function(e,t,n){switch(e){case 0:return this.a;case 1:return this.b}return ebl(this,e,t,n)},eUe.lh=function(e){switch(e){case 0:return 0!=this.a;case 1:return 0!=this.b}return epY(this,e)},eUe.sh=function(e,t){switch(e){case 0:ent(this,gP(LV(t)));return;case 1:enn(this,gP(LV(t)));return}eS5(this,e,t)},eUe.zh=function(){return eBa(),tmf},eUe.Bh=function(e){switch(e){case 0:ent(this,0);return;case 1:enn(this,0);return}eSi(this,e)},eUe.Ib=function(){var e;return(64&this.Db)!=0?eMT(this):(e=new O1(eMT(this)),e.a+=" (x: ",y$(e,this.a),e.a+=", y: ",y$(e,this.b),e.a+=")",e.a)},eUe.a=0,eUe.b=0,Y5(eZ3,"ElkBendPointImpl",567),eTS(723,1985,{105:1,413:1,160:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1}),eUe._g=function(e,t,n){return ec2(this,e,t,n)},eUe.hh=function(e,t,n){return ew0(this,e,t,n)},eUe.jh=function(e,t,n){return ea9(this,e,t,n)},eUe.lh=function(e){return eaT(this,e)},eUe.sh=function(e,t){eyb(this,e,t)},eUe.zh=function(){return eBa(),tmb},eUe.Bh=function(e){ecx(this,e)},eUe.zg=function(){return this.k},eUe.Ag=function(){return UB(this)},eUe.Ib=function(){return el4(this)},eUe.k=null,Y5(eZ3,"ElkGraphElementImpl",723),eTS(724,723,{105:1,413:1,160:1,470:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1}),eUe._g=function(e,t,n){return efN(this,e,t,n)},eUe.lh=function(e){return ef8(this,e)},eUe.sh=function(e,t){eym(this,e,t)},eUe.zh=function(){return eBa(),tmw},eUe.Bh=function(e){edS(this,e)},eUe.Bg=function(){return this.f},eUe.Cg=function(){return this.g},eUe.Dg=function(){return this.i},eUe.Eg=function(){return this.j},eUe.Fg=function(e,t){TN(this,e,t)},eUe.Gg=function(e,t){TP(this,e,t)},eUe.Hg=function(e){eno(this,e)},eUe.Ig=function(e){ens(this,e)},eUe.Ib=function(){return eEp(this)},eUe.f=0,eUe.g=0,eUe.i=0,eUe.j=0,Y5(eZ3,"ElkShapeImpl",724),eTS(725,724,{105:1,413:1,82:1,160:1,470:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1}),eUe._g=function(e,t,n){return ebQ(this,e,t,n)},eUe.hh=function(e,t,n){return evZ(this,e,t,n)},eUe.jh=function(e,t,n){return evX(this,e,t,n)},eUe.lh=function(e){return esM(this,e)},eUe.sh=function(e,t){eTH(this,e,t)},eUe.zh=function(){return eBa(),tmd},eUe.Bh=function(e){ep2(this,e)},eUe.xg=function(){return this.d||(this.d=new Ih(e6g,this,8,5)),this.d},eUe.yg=function(){return this.e||(this.e=new Ih(e6g,this,7,4)),this.e},Y5(eZ3,"ElkConnectableShapeImpl",725),eTS(352,723,{105:1,413:1,79:1,160:1,352:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},oX),eUe.Qg=function(e){return evo(this,e)},eUe._g=function(e,t,n){switch(e){case 3:return zF(this);case 4:return this.b||(this.b=new Ih(e6m,this,4,7)),this.b;case 5:return this.c||(this.c=new Ih(e6m,this,5,8)),this.c;case 6:return this.a||(this.a=new FQ(e6v,this,6,6)),this.a;case 7:return OQ(),this.b||(this.b=new Ih(e6m,this,4,7)),!(this.b.i<=1)||(this.c||(this.c=new Ih(e6m,this,5,8)),!(this.c.i<=1));case 8:return OQ(),!!eTc(this);case 9:return OQ(),!!exb(this);case 10:return OQ(),this.b||(this.b=new Ih(e6m,this,4,7)),0!=this.b.i&&(this.c||(this.c=new Ih(e6m,this,5,8)),0!=this.c.i)}return ec2(this,e,t,n)},eUe.hh=function(e,t,n){var r;switch(t){case 3:return this.Cb&&(n=(r=this.Db>>16)>=0?evo(this,n):this.Cb.ih(this,-1-r,null,n)),Cu(this,Pp(e,33),n);case 4:return this.b||(this.b=new Ih(e6m,this,4,7)),edF(this.b,e,n);case 5:return this.c||(this.c=new Ih(e6m,this,5,8)),edF(this.c,e,n);case 6:return this.a||(this.a=new FQ(e6v,this,6,6)),edF(this.a,e,n)}return ew0(this,e,t,n)},eUe.jh=function(e,t,n){switch(t){case 3:return Cu(this,null,n);case 4:return this.b||(this.b=new Ih(e6m,this,4,7)),ep6(this.b,e,n);case 5:return this.c||(this.c=new Ih(e6m,this,5,8)),ep6(this.c,e,n);case 6:return this.a||(this.a=new FQ(e6v,this,6,6)),ep6(this.a,e,n)}return ea9(this,e,t,n)},eUe.lh=function(e){switch(e){case 3:return!!zF(this);case 4:return!!this.b&&0!=this.b.i;case 5:return!!this.c&&0!=this.c.i;case 6:return!!this.a&&0!=this.a.i;case 7:return this.b||(this.b=new Ih(e6m,this,4,7)),!(this.b.i<=1&&(this.c||(this.c=new Ih(e6m,this,5,8)),this.c.i<=1));case 8:return eTc(this);case 9:return exb(this);case 10:return this.b||(this.b=new Ih(e6m,this,4,7)),0!=this.b.i&&(this.c||(this.c=new Ih(e6m,this,5,8)),0!=this.c.i)}return eaT(this,e)},eUe.sh=function(e,t){switch(e){case 3:eOC(this,Pp(t,33));return;case 4:this.b||(this.b=new Ih(e6m,this,4,7)),eRT(this.b),this.b||(this.b=new Ih(e6m,this,4,7)),Y4(this.b,Pp(t,14));return;case 5:this.c||(this.c=new Ih(e6m,this,5,8)),eRT(this.c),this.c||(this.c=new Ih(e6m,this,5,8)),Y4(this.c,Pp(t,14));return;case 6:this.a||(this.a=new FQ(e6v,this,6,6)),eRT(this.a),this.a||(this.a=new FQ(e6v,this,6,6)),Y4(this.a,Pp(t,14));return}eyb(this,e,t)},eUe.zh=function(){return eBa(),tmh},eUe.Bh=function(e){switch(e){case 3:eOC(this,null);return;case 4:this.b||(this.b=new Ih(e6m,this,4,7)),eRT(this.b);return;case 5:this.c||(this.c=new Ih(e6m,this,5,8)),eRT(this.c);return;case 6:this.a||(this.a=new FQ(e6v,this,6,6)),eRT(this.a);return}ecx(this,e)},eUe.Ib=function(){return ePY(this)},Y5(eZ3,"ElkEdgeImpl",352),eTS(439,1985,{105:1,413:1,202:1,439:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},oQ),eUe.Qg=function(e){return eg1(this,e)},eUe._g=function(e,t,n){switch(e){case 1:return this.j;case 2:return this.k;case 3:return this.b;case 4:return this.c;case 5:return this.a||(this.a=new O_(e6h,this,5)),this.a;case 6:return zB(this);case 7:if(t)return ebF(this);return this.i;case 8:if(t)return ebj(this);return this.f;case 9:return this.g||(this.g=new Ih(e6v,this,9,10)),this.g;case 10:return this.e||(this.e=new Ih(e6v,this,10,9)),this.e;case 11:return this.d}return egp(this,e,t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 6:return this.Cb&&(n=(i=this.Db>>16)>=0?eg1(this,n):this.Cb.ih(this,-1-i,null,n)),Cc(this,Pp(e,79),n);case 9:return this.g||(this.g=new Ih(e6v,this,9,10)),edF(this.g,e,n);case 10:return this.e||(this.e=new Ih(e6v,this,10,9)),edF(this.e,e,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBa(),tmp),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBa(),tmp)),e,n)},eUe.jh=function(e,t,n){switch(t){case 5:return this.a||(this.a=new O_(e6h,this,5)),ep6(this.a,e,n);case 6:return Cc(this,null,n);case 9:return this.g||(this.g=new Ih(e6v,this,9,10)),ep6(this.g,e,n);case 10:return this.e||(this.e=new Ih(e6v,this,10,9)),ep6(this.e,e,n)}return e_9(this,e,t,n)},eUe.lh=function(e){switch(e){case 1:return 0!=this.j;case 2:return 0!=this.k;case 3:return 0!=this.b;case 4:return 0!=this.c;case 5:return!!this.a&&0!=this.a.i;case 6:return!!zB(this);case 7:return!!this.i;case 8:return!!this.f;case 9:return!!this.g&&0!=this.g.i;case 10:return!!this.e&&0!=this.e.i;case 11:return null!=this.d}return Wz(this,e)},eUe.sh=function(e,t){switch(e){case 1:enu(this,gP(LV(t)));return;case 2:enl(this,gP(LV(t)));return;case 3:enr(this,gP(LV(t)));return;case 4:enc(this,gP(LV(t)));return;case 5:this.a||(this.a=new O_(e6h,this,5)),eRT(this.a),this.a||(this.a=new O_(e6h,this,5)),Y4(this.a,Pp(t,14));return;case 6:eOA(this,Pp(t,79));return;case 7:err(this,Pp(t,82));return;case 8:ern(this,Pp(t,82));return;case 9:this.g||(this.g=new Ih(e6v,this,9,10)),eRT(this.g),this.g||(this.g=new Ih(e6v,this,9,10)),Y4(this.g,Pp(t,14));return;case 10:this.e||(this.e=new Ih(e6v,this,10,9)),eRT(this.e),this.e||(this.e=new Ih(e6v,this,10,9)),Y4(this.e,Pp(t,14));return;case 11:erO(this,Lq(t));return}esU(this,e,t)},eUe.zh=function(){return eBa(),tmp},eUe.Bh=function(e){switch(e){case 1:enu(this,0);return;case 2:enl(this,0);return;case 3:enr(this,0);return;case 4:enc(this,0);return;case 5:this.a||(this.a=new O_(e6h,this,5)),eRT(this.a);return;case 6:eOA(this,null);return;case 7:err(this,null);return;case 8:ern(this,null);return;case 9:this.g||(this.g=new Ih(e6v,this,9,10)),eRT(this.g);return;case 10:this.e||(this.e=new Ih(e6v,this,10,9)),eRT(this.e);return;case 11:erO(this,null);return}eoF(this,e)},eUe.Ib=function(){return ex2(this)},eUe.b=0,eUe.c=0,eUe.d=null,eUe.j=0,eUe.k=0,Y5(eZ3,"ElkEdgeSectionImpl",439),eTS(150,115,{105:1,92:1,90:1,147:1,56:1,108:1,49:1,97:1,150:1,114:1,115:1}),eUe._g=function(e,t,n){var r;return 0==e?(this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab):Qt(this,e-Y1(this.zh()),ee2((r=Pp(eaS(this,16),26))||this.zh(),e),t,n)},eUe.hh=function(e,t,n){var r,i;return 0==t?(this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n)):(i=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Qj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.jh=function(e,t,n){var r,i;return 0==t?(this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n)):(i=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Rj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.lh=function(e){var t;return 0==e?!!this.Ab&&0!=this.Ab.i:VP(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.oh=function(e){return eF9(this,e)},eUe.sh=function(e,t){var n;if(0===e){this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return}efL(this,e-Y1(this.zh()),ee2((n=Pp(eaS(this,16),26))||this.zh(),e),t)},eUe.uh=function(e){ehU(this,128,e)},eUe.zh=function(){return eBK(),tgL},eUe.Bh=function(e){var t;if(0===e){this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return}ec6(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.Gh=function(){this.Bb|=1},eUe.Hh=function(e){return eDM(this,e)},eUe.Bb=0,Y5(eZ2,"EModelElementImpl",150),eTS(704,150,{105:1,92:1,90:1,471:1,147:1,56:1,108:1,49:1,97:1,150:1,114:1,115:1},cQ),eUe.Ih=function(e,t){return ejZ(this,e,t)},eUe.Jh=function(e){var t,n,r,i,a;if(this.a!=etP(e)||(256&e.Bb)!=0)throw p7(new gL(eZ7+e.zb+eZ6));for(r=$E(e);0!=qt(r.a).i;){if(n=Pp(ejc(r,0,(a=(t=Pp(etj(qt(r.a),0),87)).c,M4(a,88)?Pp(a,26):(eBK(),tgI))),26),em4(n))return i=etP(n).Nh().Jh(n),Pp(i,49).th(e),i;r=$E(n)}return(null!=e.D?e.D:e.B)=="java.util.Map$Entry"?new RO(e):new Pq(e)},eUe.Kh=function(e,t){return eBd(this,e,t)},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.a}return Qt(this,e-Y1((eBK(),tgM)),ee2((r=Pp(eaS(this,16),26))||tgM,e),t,n)},eUe.hh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 1:return this.a&&(n=Pp(this.a,49).ih(this,4,e6E,n)),ecb(this,Pp(e,235),n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgM),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgM)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 1:return ecb(this,null,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgM),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgM)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return!!this.a}return VP(this,e-Y1((eBK(),tgM)),ee2((t=Pp(eaS(this,16),26))||tgM,e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:e_B(this,Pp(t,235));return}efL(this,e-Y1((eBK(),tgM)),ee2((n=Pp(eaS(this,16),26))||tgM,e),t)},eUe.zh=function(){return eBK(),tgM},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:e_B(this,null);return}ec6(this,e-Y1((eBK(),tgM)),ee2((t=Pp(eaS(this,16),26))||tgM,e))},Y5(eZ2,"EFactoryImpl",704),eTS(eXt,704,{105:1,2014:1,92:1,90:1,471:1,147:1,56:1,108:1,49:1,97:1,150:1,114:1,115:1},o1),eUe.Ih=function(e,t){switch(e.yj()){case 12:return Pp(t,146).tg();case 13:return efF(t);default:throw p7(new gL(eZ5+e.ne()+eZ6))}},eUe.Jh=function(e){var t;switch(-1==e.G&&(e.G=(t=etP(e))?ebv(t.Mh(),e):-1),e.G){case 4:return new o0;case 6:return new mS;case 7:return new mk;case 8:return new oX;case 9:return new oJ;case 10:return new oQ;case 11:return new o3;default:throw p7(new gL(eZ7+e.zb+eZ6))}},eUe.Kh=function(e,t){switch(e.yj()){case 13:case 12:return null;default:throw p7(new gL(eZ5+e.ne()+eZ6))}},Y5(eZ3,"ElkGraphFactoryImpl",eXt),eTS(438,150,{105:1,92:1,90:1,147:1,191:1,56:1,108:1,49:1,97:1,150:1,114:1,115:1}),eUe.Wg=function(){var e,t;return null==(t=zr(eNT((e=Pp(eaS(this,16),26))||this.zh())))?(_0(),_0(),tgV):new Lg(this,t)},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.ne()}return Qt(this,e-Y1(this.zh()),ee2((r=Pp(eaS(this,16),26))||this.zh(),e),t,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb}return VP(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:this.Lh(Lq(t));return}efL(this,e-Y1(this.zh()),ee2((n=Pp(eaS(this,16),26))||this.zh(),e),t)},eUe.zh=function(){return eBK(),tgC},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:this.Lh(null);return}ec6(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.ne=function(){return this.zb},eUe.Lh=function(e){er3(this,e)},eUe.Ib=function(){return ecF(this)},eUe.zb=null,Y5(eZ2,"ENamedElementImpl",438),eTS(179,438,{105:1,92:1,90:1,147:1,191:1,56:1,235:1,108:1,49:1,97:1,150:1,179:1,114:1,115:1,675:1},$y),eUe.Qg=function(e){return eg5(this,e)},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return this.yb;case 3:return this.xb;case 4:return this.sb;case 5:return this.rb||(this.rb=new Fq(this,tm8,this)),this.rb;case 6:return this.vb||(this.vb=new Ia(e6E,this,6,7)),this.vb;case 7:if(t)return this.Db>>16==7?Pp(this.Cb,235):null;return zU(this)}return Qt(this,e-Y1((eBK(),tgP)),ee2((r=Pp(eaS(this,16),26))||tgP,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 4:return this.sb&&(n=Pp(this.sb,49).ih(this,1,e6w,n)),ecY(this,Pp(e,471),n);case 5:return this.rb||(this.rb=new Fq(this,tm8,this)),edF(this.rb,e,n);case 6:return this.vb||(this.vb=new Ia(e6E,this,6,7)),edF(this.vb,e,n);case 7:return this.Cb&&(n=(i=this.Db>>16)>=0?eg5(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,7,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgP),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgP)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 4:return ecY(this,null,n);case 5:return this.rb||(this.rb=new Fq(this,tm8,this)),ep6(this.rb,e,n);case 6:return this.vb||(this.vb=new Ia(e6E,this,6,7)),ep6(this.vb,e,n);case 7:return eDg(this,null,7,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgP),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgP)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return null!=this.yb;case 3:return null!=this.xb;case 4:return!!this.sb;case 5:return!!this.rb&&0!=this.rb.i;case 6:return!!this.vb&&0!=this.vb.i;case 7:return!!zU(this)}return VP(this,e-Y1((eBK(),tgP)),ee2((t=Pp(eaS(this,16),26))||tgP,e))},eUe.oh=function(e){var t;return(t=eAd(this,e))||eF9(this,e)},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:er3(this,Lq(t));return;case 2:er5(this,Lq(t));return;case 3:er4(this,Lq(t));return;case 4:e_8(this,Pp(t,471));return;case 5:this.rb||(this.rb=new Fq(this,tm8,this)),eRT(this.rb),this.rb||(this.rb=new Fq(this,tm8,this)),Y4(this.rb,Pp(t,14));return;case 6:this.vb||(this.vb=new Ia(e6E,this,6,7)),eRT(this.vb),this.vb||(this.vb=new Ia(e6E,this,6,7)),Y4(this.vb,Pp(t,14));return}efL(this,e-Y1((eBK(),tgP)),ee2((n=Pp(eaS(this,16),26))||tgP,e),t)},eUe.vh=function(e){var t,n;if(e&&this.rb)for(n=new Ow(this.rb);n.e!=n.i.gc();)t=epH(n),M4(t,351)&&(Pp(t,351).w=null);ehU(this,64,e)},eUe.zh=function(){return eBK(),tgP},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:er3(this,null);return;case 2:er5(this,null);return;case 3:er4(this,null);return;case 4:e_8(this,null);return;case 5:this.rb||(this.rb=new Fq(this,tm8,this)),eRT(this.rb);return;case 6:this.vb||(this.vb=new Ia(e6E,this,6,7)),eRT(this.vb);return}ec6(this,e-Y1((eBK(),tgP)),ee2((t=Pp(eaS(this,16),26))||tgP,e))},eUe.Gh=function(){egb(this)},eUe.Mh=function(){return this.rb||(this.rb=new Fq(this,tm8,this)),this.rb},eUe.Nh=function(){return this.sb},eUe.Oh=function(){return this.ub},eUe.Ph=function(){return this.xb},eUe.Qh=function(){return this.yb},eUe.Rh=function(e){this.ub=e},eUe.Ib=function(){var e;return(64&this.Db)!=0?ecF(this):(e=new O1(ecF(this)),e.a+=" (nsURI: ",xk(e,this.yb),e.a+=", nsPrefix: ",xk(e,this.xb),e.a+=")",e.a)},eUe.xb=null,eUe.yb=null,Y5(eZ2,"EPackageImpl",179),eTS(555,179,{105:1,2016:1,555:1,92:1,90:1,147:1,191:1,56:1,235:1,108:1,49:1,97:1,150:1,179:1,114:1,115:1,675:1},eTv),eUe.q=!1,eUe.r=!1;var e6T=!1;Y5(eZ3,"ElkGraphPackageImpl",555),eTS(354,724,{105:1,413:1,160:1,137:1,470:1,354:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},o0),eUe.Qg=function(e){return eg0(this,e)},eUe._g=function(e,t,n){switch(e){case 7:return zH(this);case 8:return this.a}return efN(this,e,t,n)},eUe.hh=function(e,t,n){var r;return 7===t?(this.Cb&&(n=(r=this.Db>>16)>=0?eg0(this,n):this.Cb.ih(this,-1-r,null,n)),j2(this,Pp(e,160),n)):ew0(this,e,t,n)},eUe.jh=function(e,t,n){return 7==t?j2(this,null,n):ea9(this,e,t,n)},eUe.lh=function(e){switch(e){case 7:return!!zH(this);case 8:return!IE("",this.a)}return ef8(this,e)},eUe.sh=function(e,t){switch(e){case 7:eAu(this,Pp(t,160));return;case 8:eri(this,Lq(t));return}eym(this,e,t)},eUe.zh=function(){return eBa(),tmm},eUe.Bh=function(e){switch(e){case 7:eAu(this,null);return;case 8:eri(this,"");return}edS(this,e)},eUe.Ib=function(){return eE1(this)},eUe.a="",Y5(eZ3,"ElkLabelImpl",354),eTS(239,725,{105:1,413:1,82:1,160:1,33:1,470:1,239:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},mS),eUe.Qg=function(e){return evs(this,e)},eUe._g=function(e,t,n){switch(e){case 9:return this.c||(this.c=new FQ(e6x,this,9,9)),this.c;case 10:return this.a||(this.a=new FQ(e6k,this,10,11)),this.a;case 11:return z$(this);case 12:return this.b||(this.b=new FQ(e6g,this,12,3)),this.b;case 13:return OQ(),this.a||(this.a=new FQ(e6k,this,10,11)),this.a.i>0}return ebQ(this,e,t,n)},eUe.hh=function(e,t,n){var r;switch(t){case 9:return this.c||(this.c=new FQ(e6x,this,9,9)),edF(this.c,e,n);case 10:return this.a||(this.a=new FQ(e6k,this,10,11)),edF(this.a,e,n);case 11:return this.Cb&&(n=(r=this.Db>>16)>=0?evs(this,n):this.Cb.ih(this,-1-r,null,n)),C4(this,Pp(e,33),n);case 12:return this.b||(this.b=new FQ(e6g,this,12,3)),edF(this.b,e,n)}return evZ(this,e,t,n)},eUe.jh=function(e,t,n){switch(t){case 9:return this.c||(this.c=new FQ(e6x,this,9,9)),ep6(this.c,e,n);case 10:return this.a||(this.a=new FQ(e6k,this,10,11)),ep6(this.a,e,n);case 11:return C4(this,null,n);case 12:return this.b||(this.b=new FQ(e6g,this,12,3)),ep6(this.b,e,n)}return evX(this,e,t,n)},eUe.lh=function(e){switch(e){case 9:return!!this.c&&0!=this.c.i;case 10:return!!this.a&&0!=this.a.i;case 11:return!!z$(this);case 12:return!!this.b&&0!=this.b.i;case 13:return this.a||(this.a=new FQ(e6k,this,10,11)),this.a.i>0}return esM(this,e)},eUe.sh=function(e,t){switch(e){case 9:this.c||(this.c=new FQ(e6x,this,9,9)),eRT(this.c),this.c||(this.c=new FQ(e6x,this,9,9)),Y4(this.c,Pp(t,14));return;case 10:this.a||(this.a=new FQ(e6k,this,10,11)),eRT(this.a),this.a||(this.a=new FQ(e6k,this,10,11)),Y4(this.a,Pp(t,14));return;case 11:eO$(this,Pp(t,33));return;case 12:this.b||(this.b=new FQ(e6g,this,12,3)),eRT(this.b),this.b||(this.b=new FQ(e6g,this,12,3)),Y4(this.b,Pp(t,14));return}eTH(this,e,t)},eUe.zh=function(){return eBa(),tmg},eUe.Bh=function(e){switch(e){case 9:this.c||(this.c=new FQ(e6x,this,9,9)),eRT(this.c);return;case 10:this.a||(this.a=new FQ(e6k,this,10,11)),eRT(this.a);return;case 11:eO$(this,null);return;case 12:this.b||(this.b=new FQ(e6g,this,12,3)),eRT(this.b);return}ep2(this,e)},eUe.Ib=function(){return eC4(this)},Y5(eZ3,"ElkNodeImpl",239),eTS(186,725,{105:1,413:1,82:1,160:1,118:1,470:1,186:1,94:1,92:1,90:1,56:1,108:1,49:1,97:1,114:1,115:1},mk),eUe.Qg=function(e){return eg2(this,e)},eUe._g=function(e,t,n){return 9==e?zY(this):ebQ(this,e,t,n)},eUe.hh=function(e,t,n){var r;return 9===t?(this.Cb&&(n=(r=this.Db>>16)>=0?eg2(this,n):this.Cb.ih(this,-1-r,null,n)),Cl(this,Pp(e,33),n)):evZ(this,e,t,n)},eUe.jh=function(e,t,n){return 9==t?Cl(this,null,n):evX(this,e,t,n)},eUe.lh=function(e){return 9==e?!!zY(this):esM(this,e)},eUe.sh=function(e,t){if(9===e){eOL(this,Pp(t,33));return}eTH(this,e,t)},eUe.zh=function(){return eBa(),tmv},eUe.Bh=function(e){if(9===e){eOL(this,null);return}ep2(this,e)},eUe.Ib=function(){return eC5(this)},Y5(eZ3,"ElkPortImpl",186);var e6M=RL(eX_,"BasicEMap/Entry");eTS(1092,115,{105:1,42:1,92:1,90:1,133:1,56:1,108:1,49:1,97:1,114:1,115:1},o3),eUe.Fb=function(e){return this===e},eUe.cd=function(){return this.b},eUe.Hb=function(){return Ao(this)},eUe.Uh=function(e){era(this,Pp(e,146))},eUe._g=function(e,t,n){switch(e){case 0:return this.b;case 1:return this.c}return ebl(this,e,t,n)},eUe.lh=function(e){switch(e){case 0:return!!this.b;case 1:return null!=this.c}return epY(this,e)},eUe.sh=function(e,t){switch(e){case 0:era(this,Pp(t,146));return;case 1:eru(this,t);return}eS5(this,e,t)},eUe.zh=function(){return eBa(),tmy},eUe.Bh=function(e){switch(e){case 0:era(this,null);return;case 1:eru(this,null);return}eSi(this,e)},eUe.Sh=function(){var e;return -1==this.a&&(e=this.b,this.a=e?esj(e):0),this.a},eUe.dd=function(){return this.c},eUe.Th=function(e){this.a=e},eUe.ed=function(e){var t;return t=this.c,eru(this,e),t},eUe.Ib=function(){var e;return(64&this.Db)!=0?eMT(this):(xM(xM(xM(e=new vc,this.b?this.b.tg():eUg),eGH),Ae(this.c)),e.a)},eUe.a=-1,eUe.c=null;var e6O=Y5(eZ3,"ElkPropertyToValueMapEntryImpl",1092);eTS(984,1,{},o6),Y5(eXk,"JsonAdapter",984),eTS(210,60,eHr,gK),Y5(eXk,"JsonImportException",210),eTS(857,1,{},eg6),Y5(eXk,"JsonImporter",857),eTS(891,1,{},kP),Y5(eXk,"JsonImporter/lambda$0$Type",891),eTS(892,1,{},kR),Y5(eXk,"JsonImporter/lambda$1$Type",892),eTS(900,1,{},h7),Y5(eXk,"JsonImporter/lambda$10$Type",900),eTS(902,1,{},kj),Y5(eXk,"JsonImporter/lambda$11$Type",902),eTS(903,1,{},kF),Y5(eXk,"JsonImporter/lambda$12$Type",903),eTS(909,1,{},HE),Y5(eXk,"JsonImporter/lambda$13$Type",909),eTS(908,1,{},H_),Y5(eXk,"JsonImporter/lambda$14$Type",908),eTS(904,1,{},kY),Y5(eXk,"JsonImporter/lambda$15$Type",904),eTS(905,1,{},kB),Y5(eXk,"JsonImporter/lambda$16$Type",905),eTS(906,1,{},kU),Y5(eXk,"JsonImporter/lambda$17$Type",906),eTS(907,1,{},kH),Y5(eXk,"JsonImporter/lambda$18$Type",907),eTS(912,1,{},pe),Y5(eXk,"JsonImporter/lambda$19$Type",912),eTS(893,1,{},pt),Y5(eXk,"JsonImporter/lambda$2$Type",893),eTS(910,1,{},pn),Y5(eXk,"JsonImporter/lambda$20$Type",910),eTS(911,1,{},pr),Y5(eXk,"JsonImporter/lambda$21$Type",911),eTS(915,1,{},pi),Y5(eXk,"JsonImporter/lambda$22$Type",915),eTS(913,1,{},pa),Y5(eXk,"JsonImporter/lambda$23$Type",913),eTS(914,1,{},po),Y5(eXk,"JsonImporter/lambda$24$Type",914),eTS(917,1,{},ps),Y5(eXk,"JsonImporter/lambda$25$Type",917),eTS(916,1,{},pu),Y5(eXk,"JsonImporter/lambda$26$Type",916),eTS(918,1,eUF,k$),eUe.td=function(e){JH(this.b,this.a,Lq(e))},Y5(eXk,"JsonImporter/lambda$27$Type",918),eTS(919,1,eUF,kz),eUe.td=function(e){J$(this.b,this.a,Lq(e))},Y5(eXk,"JsonImporter/lambda$28$Type",919),eTS(920,1,{},kG),Y5(eXk,"JsonImporter/lambda$29$Type",920),eTS(896,1,{},pc),Y5(eXk,"JsonImporter/lambda$3$Type",896),eTS(921,1,{},kW),Y5(eXk,"JsonImporter/lambda$30$Type",921),eTS(922,1,{},pl),Y5(eXk,"JsonImporter/lambda$31$Type",922),eTS(923,1,{},pf),Y5(eXk,"JsonImporter/lambda$32$Type",923),eTS(924,1,{},pd),Y5(eXk,"JsonImporter/lambda$33$Type",924),eTS(925,1,{},ph),Y5(eXk,"JsonImporter/lambda$34$Type",925),eTS(859,1,{},pp),Y5(eXk,"JsonImporter/lambda$35$Type",859),eTS(929,1,{},N8),Y5(eXk,"JsonImporter/lambda$36$Type",929),eTS(926,1,eUF,pb),eUe.td=function(e){qW(this.a,Pp(e,469))},Y5(eXk,"JsonImporter/lambda$37$Type",926),eTS(927,1,eUF,k0),eUe.td=function(e){xC(this.a,this.b,Pp(e,202))},Y5(eXk,"JsonImporter/lambda$38$Type",927),eTS(928,1,eUF,k2),eUe.td=function(e){xI(this.a,this.b,Pp(e,202))},Y5(eXk,"JsonImporter/lambda$39$Type",928),eTS(894,1,{},pm),Y5(eXk,"JsonImporter/lambda$4$Type",894),eTS(930,1,eUF,pg),eUe.td=function(e){qK(this.a,Pp(e,8))},Y5(eXk,"JsonImporter/lambda$40$Type",930),eTS(895,1,{},pv),Y5(eXk,"JsonImporter/lambda$5$Type",895),eTS(899,1,{},py),Y5(eXk,"JsonImporter/lambda$6$Type",899),eTS(897,1,{},pw),Y5(eXk,"JsonImporter/lambda$7$Type",897),eTS(898,1,{},p_),Y5(eXk,"JsonImporter/lambda$8$Type",898),eTS(901,1,{},pE),Y5(eXk,"JsonImporter/lambda$9$Type",901),eTS(948,1,eUF,pS),eUe.td=function(e){BC(this.a,new B_(Lq(e)))},Y5(eXk,"JsonMetaDataConverter/lambda$0$Type",948),eTS(949,1,eUF,pk),eUe.td=function(e){Bm(this.a,Pp(e,237))},Y5(eXk,"JsonMetaDataConverter/lambda$1$Type",949),eTS(950,1,eUF,px),eUe.td=function(e){GR(this.a,Pp(e,149))},Y5(eXk,"JsonMetaDataConverter/lambda$2$Type",950),eTS(951,1,eUF,pT),eUe.td=function(e){Bg(this.a,Pp(e,175))},Y5(eXk,"JsonMetaDataConverter/lambda$3$Type",951),eTS(237,22,{3:1,35:1,22:1,237:1},k1);var e6A=enw(ezx,"GraphFeature",237,e1G,etM,N1);eTS(13,1,{35:1,146:1},pO,Cm,xX,T2),eUe.wd=function(e){return Oo(this,Pp(e,146))},eUe.Fb=function(e){return $k(this,e)},eUe.wg=function(){return epB(this)},eUe.tg=function(){return this.b},eUe.Hb=function(){return ebA(this.b)},eUe.Ib=function(){return this.b},Y5(ezx,"Property",13),eTS(818,1,e$C,pM),eUe.ue=function(e,t){return elW(this,Pp(e,94),Pp(t,94))},eUe.Fb=function(e){return this===e},eUe.ve=function(){return new fZ(this)},Y5(ezx,"PropertyHolderComparator",818),eTS(695,1,eUE,pL),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return JZ(this)},eUe.Qb=function(){yI()},eUe.Ob=function(){return!!this.a},Y5(eXY,"ElkGraphUtil/AncestorIterator",695);var e6L=RL(eX_,"EList");eTS(67,52,{20:1,28:1,52:1,14:1,15:1,67:1,58:1}),eUe.Vc=function(e,t){elm(this,e,t)},eUe.Fc=function(e){return JL(this,e)},eUe.Wc=function(e,t){return eo0(this,e,t)},eUe.Gc=function(e){return Y4(this,e)},eUe.Zh=function(){return new AY(this)},eUe.$h=function(){return new AB(this)},eUe._h=function(e){return enH(this,e)},eUe.ai=function(){return!0},eUe.bi=function(e,t){},eUe.ci=function(){},eUe.di=function(e,t){X8(this,e,t)},eUe.ei=function(e,t,n){},eUe.fi=function(e,t){},eUe.gi=function(e,t,n){},eUe.Fb=function(e){return eCc(this,e)},eUe.Hb=function(){return eov(this)},eUe.hi=function(){return!1},eUe.Kc=function(){return new Ow(this)},eUe.Yc=function(){return new AF(this)},eUe.Zc=function(e){var t;if(t=this.gc(),e<0||e>t)throw p7(new Ii(e,t));return new YC(this,e)},eUe.ji=function(e,t){this.ii(e,this.Xc(t))},eUe.Mc=function(e){return eeu(this,e)},eUe.li=function(e,t){return t},eUe._c=function(e,t){return eby(this,e,t)},eUe.Ib=function(){return efq(this)},eUe.ni=function(){return!0},eUe.oi=function(e,t){return euu(this,t)},Y5(eX_,"AbstractEList",67),eTS(63,67,eXz,o7,eta,eiP),eUe.Vh=function(e,t){return ew2(this,e,t)},eUe.Wh=function(e){return emp(this,e)},eUe.Xh=function(e,t){ecW(this,e,t)},eUe.Yh=function(e){Zz(this,e)},eUe.pi=function(e){return J5(this,e)},eUe.$b=function(){ZG(this)},eUe.Hc=function(e){return ev9(this,e)},eUe.Xb=function(e){return etj(this,e)},eUe.qi=function(e){var t,n,r;++this.j,e>(n=null==this.g?0:this.g.length)&&(r=this.g,(t=n+(n/2|0)+4)=0&&(this.$c(t),!0)},eUe.mi=function(e,t){return this.Ui(e,this.oi(e,t))},eUe.gc=function(){return this.Vi()},eUe.Pc=function(){return this.Wi()},eUe.Qc=function(e){return this.Xi(e)},eUe.Ib=function(){return this.Yi()},Y5(eX_,"DelegatingEList",1995),eTS(1996,1995,eJk),eUe.Vh=function(e,t){return eD1(this,e,t)},eUe.Wh=function(e){return this.Vh(this.Vi(),e)},eUe.Xh=function(e,t){eTf(this,e,t)},eUe.Yh=function(e){exq(this,e)},eUe.ai=function(){return!this.bj()},eUe.$b=function(){eRP(this)},eUe.Zi=function(e,t,n,r,i){return new $P(this,e,t,n,r,i)},eUe.$i=function(e){eam(this.Ai(),e)},eUe._i=function(){return null},eUe.aj=function(){return -1},eUe.Ai=function(){return null},eUe.bj=function(){return!1},eUe.cj=function(e,t){return t},eUe.dj=function(e,t){return t},eUe.ej=function(){return!1},eUe.fj=function(){return!this.Ri()},eUe.ii=function(e,t){var n,r;return this.ej()?(r=this.fj(),n=e_R(this,e,t),this.$i(this.Zi(7,ell(t),n,e,r)),n):e_R(this,e,t)},eUe.$c=function(e){var t,n,r,i;return this.ej()?(n=null,r=this.fj(),t=this.Zi(4,i=RC(this,e),null,e,r),this.bj()&&i?(n=this.dj(i,n))?(n.Ei(t),n.Fi()):this.$i(t):n?(n.Ei(t),n.Fi()):this.$i(t),i):(i=RC(this,e),this.bj()&&i&&(n=this.dj(i,null))&&n.Fi(),i)},eUe.mi=function(e,t){return eD0(this,e,t)},Y5(eZK,"DelegatingNotifyingListImpl",1996),eTS(143,1,eJx),eUe.Ei=function(e){return ey7(this,e)},eUe.Fi=function(){QU(this)},eUe.xi=function(){return this.d},eUe._i=function(){return null},eUe.gj=function(){return null},eUe.yi=function(e){return -1},eUe.zi=function(){return eLo(this)},eUe.Ai=function(){return null},eUe.Bi=function(){return eLs(this)},eUe.Ci=function(){return this.o<0?this.o<-2?-2-this.o-1:-1:this.o},eUe.hj=function(){return!1},eUe.Di=function(e){var t,n,r,i,a,o,s,u,c,l,f;switch(this.d){case 1:case 2:switch(i=e.xi()){case 1:case 2:if(xc(a=e.Ai())===xc(this.Ai())&&this.yi(null)==e.yi(null))return this.g=e.zi(),1==e.xi()&&(this.d=1),!0}case 4:if(4===(i=e.xi())&&xc(a=e.Ai())===xc(this.Ai())&&this.yi(null)==e.yi(null))return c=eju(this),u=this.o<0?this.o<-2?-2-this.o-1:-1:this.o,o=e.Ci(),this.d=6,f=new eta(2),u<=o?(JL(f,this.n),JL(f,e.Bi()),this.g=eow(vx(ty_,1),eHT,25,15,[this.o=u,o+1])):(JL(f,e.Bi()),JL(f,this.n),this.g=eow(vx(ty_,1),eHT,25,15,[this.o=o,u])),this.n=f,c||(this.o=-2-this.o-1),!0;break;case 6:if(4===(i=e.xi())&&xc(a=e.Ai())===xc(this.Ai())&&this.yi(null)==e.yi(null)){for(c=eju(this),o=e.Ci(),r=Je(ty_,eHT,25,(l=Pp(this.g,48)).length+1,15,1),t=0;t>>0).toString(16)),r.a+=" (eventType: ",this.d){case 1:r.a+="SET";break;case 2:r.a+="UNSET";break;case 3:r.a+="ADD";break;case 5:r.a+="ADD_MANY";break;case 4:r.a+="REMOVE";break;case 6:r.a+="REMOVE_MANY";break;case 7:r.a+="MOVE";break;case 8:r.a+="REMOVING_ADAPTER";break;case 9:r.a+="RESOLVE";break;default:yz(r,this.d)}if(eIb(this)&&(r.a+=", touch: true"),r.a+=", position: ",yz(r,this.o<0?this.o<-2?-2-this.o-1:-1:this.o),r.a+=", notifier: ",xS(r,this.Ai()),r.a+=", feature: ",xS(r,this._i()),r.a+=", oldValue: ",xS(r,eLs(this)),r.a+=", newValue: ",6==this.d&&M4(this.g,48)){for(n=Pp(this.g,48),r.a+="[",e=0;e10?(this.b&&this.c.j==this.a||(this.b=new Rq(this),this.a=this.j),w0(this.b,e)):ev9(this,e)},eUe.ni=function(){return!0},eUe.a=0,Y5(eX_,"AbstractEList/1",953),eTS(295,73,eHZ,Ii),Y5(eX_,"AbstractEList/BasicIndexOutOfBoundsException",295),eTS(40,1,eUE,Ow),eUe.Nb=function(e){F8(this,e)},eUe.mj=function(){if(this.i.j!=this.f)throw p7(new bA)},eUe.nj=function(){return epH(this)},eUe.Ob=function(){return this.e!=this.i.gc()},eUe.Pb=function(){return this.nj()},eUe.Qb=function(){ey_(this)},eUe.e=0,eUe.f=0,eUe.g=-1,Y5(eX_,"AbstractEList/EIterator",40),eTS(278,40,eUC,AF,YC),eUe.Qb=function(){ey_(this)},eUe.Rb=function(e){edq(this,e)},eUe.oj=function(){var e;try{return e=this.d.Xb(--this.e),this.mj(),this.g=this.e,e}catch(t){if(t=eoa(t),M4(t,73))throw this.mj(),p7(new bC);throw p7(t)}},eUe.pj=function(e){emE(this,e)},eUe.Sb=function(){return 0!=this.e},eUe.Tb=function(){return this.e},eUe.Ub=function(){return this.oj()},eUe.Vb=function(){return this.e-1},eUe.Wb=function(e){this.pj(e)},Y5(eX_,"AbstractEList/EListIterator",278),eTS(341,40,eUE,AY),eUe.nj=function(){return ep$(this)},eUe.Qb=function(){throw p7(new bO)},Y5(eX_,"AbstractEList/NonResolvingEIterator",341),eTS(385,278,eUC,AB,IB),eUe.Rb=function(e){throw p7(new bO)},eUe.nj=function(){var e;try{return e=this.c.ki(this.e),this.mj(),this.g=this.e++,e}catch(t){if(t=eoa(t),M4(t,73))throw this.mj(),p7(new bC);throw p7(t)}},eUe.oj=function(){var e;try{return e=this.c.ki(--this.e),this.mj(),this.g=this.e,e}catch(t){if(t=eoa(t),M4(t,73))throw this.mj(),p7(new bC);throw p7(t)}},eUe.Qb=function(){throw p7(new bO)},eUe.Wb=function(e){throw p7(new bO)},Y5(eX_,"AbstractEList/NonResolvingEListIterator",385),eTS(1982,67,eJO),eUe.Vh=function(e,t){var n,r,i,a,o,s,u,c,l,f,d;if(0==(i=t.gc()))return++this.j,!1;for(r=eue(this,d=(l=null==(c=Pp(eaS(this.a,4),126))?0:c.length)+i),(f=l-e)>0&&ePD(c,e,r,e+i,f),u=t.Kc(),o=0;on)throw p7(new Ii(e,n));return new Uu(this,e)},eUe.$b=function(){var e,t;++this.j,t=null==(e=Pp(eaS(this.a,4),126))?0:e.length,eps(this,null),X8(this,t,e)},eUe.Hc=function(e){var t,n,r,i,a;if(null!=(t=Pp(eaS(this.a,4),126))){if(null!=e){for(i=0,a=(r=t).length;i=(n=null==(t=Pp(eaS(this.a,4),126))?0:t.length))throw p7(new Ii(e,n));return t[e]},eUe.Xc=function(e){var t,n,r;if(null!=(t=Pp(eaS(this.a,4),126))){if(null!=e){for(n=0,r=t.length;nn)throw p7(new Ii(e,n));return new Us(this,e)},eUe.ii=function(e,t){var n,r,i;if(i=null==(n=ehc(this))?0:n.length,e>=i)throw p7(new gE(eXU+e+eXH+i));if(t>=i)throw p7(new gE(eX$+t+eXH+i));return r=n[t],e!=t&&(e0&&ePD(e,0,t,0,n),t},eUe.Qc=function(e){var t,n,r;return(r=null==(t=Pp(eaS(this.a,4),126))?0:t.length)>0&&(e.lengthr&&Bc(e,r,null),e},Y5(eX_,"ArrayDelegatingEList",1982),eTS(1038,40,eUE,Zl),eUe.mj=function(){if(this.b.j!=this.f||xc(Pp(eaS(this.b.a,4),126))!==xc(this.a))throw p7(new bA)},eUe.Qb=function(){ey_(this),this.a=Pp(eaS(this.b.a,4),126)},Y5(eX_,"ArrayDelegatingEList/EIterator",1038),eTS(706,278,eUC,FK,Us),eUe.mj=function(){if(this.b.j!=this.f||xc(Pp(eaS(this.b.a,4),126))!==xc(this.a))throw p7(new bA)},eUe.pj=function(e){emE(this,e),this.a=Pp(eaS(this.b.a,4),126)},eUe.Qb=function(){ey_(this),this.a=Pp(eaS(this.b.a,4),126)},Y5(eX_,"ArrayDelegatingEList/EListIterator",706),eTS(1039,341,eUE,Zf),eUe.mj=function(){if(this.b.j!=this.f||xc(Pp(eaS(this.b.a,4),126))!==xc(this.a))throw p7(new bA)},Y5(eX_,"ArrayDelegatingEList/NonResolvingEIterator",1039),eTS(707,385,eUC,FV,Uu),eUe.mj=function(){if(this.b.j!=this.f||xc(Pp(eaS(this.b.a,4),126))!==xc(this.a))throw p7(new bA)},Y5(eX_,"ArrayDelegatingEList/NonResolvingEListIterator",707),eTS(606,295,eHZ,xJ),Y5(eX_,"BasicEList/BasicIndexOutOfBoundsException",606),eTS(696,63,eXz,xt),eUe.Vc=function(e,t){throw p7(new bO)},eUe.Fc=function(e){throw p7(new bO)},eUe.Wc=function(e,t){throw p7(new bO)},eUe.Gc=function(e){throw p7(new bO)},eUe.$b=function(){throw p7(new bO)},eUe.qi=function(e){throw p7(new bO)},eUe.Kc=function(){return this.Zh()},eUe.Yc=function(){return this.$h()},eUe.Zc=function(e){return this._h(e)},eUe.ii=function(e,t){throw p7(new bO)},eUe.ji=function(e,t){throw p7(new bO)},eUe.$c=function(e){throw p7(new bO)},eUe.Mc=function(e){throw p7(new bO)},eUe._c=function(e,t){throw p7(new bO)},Y5(eX_,"BasicEList/UnmodifiableEList",696),eTS(705,1,{3:1,20:1,14:1,15:1,58:1,589:1}),eUe.Vc=function(e,t){Mq(this,e,Pp(t,42))},eUe.Fc=function(e){return LA(this,Pp(e,42))},eUe.Jc=function(e){qX(this,e)},eUe.Xb=function(e){return Pp(etj(this.c,e),133)},eUe.ii=function(e,t){return Pp(this.c.ii(e,t),42)},eUe.ji=function(e,t){MZ(this,e,Pp(t,42))},eUe.Lc=function(){return new R1(null,new Gq(this,16))},eUe.$c=function(e){return Pp(this.c.$c(e),42)},eUe._c=function(e,t){return YV(this,e,Pp(t,42))},eUe.ad=function(e){er8(this,e)},eUe.Nc=function(){return new Gq(this,16)},eUe.Oc=function(){return new R1(null,new Gq(this,16))},eUe.Wc=function(e,t){return this.c.Wc(e,t)},eUe.Gc=function(e){return this.c.Gc(e)},eUe.$b=function(){this.c.$b()},eUe.Hc=function(e){return this.c.Hc(e)},eUe.Ic=function(e){return eot(this.c,e)},eUe.qj=function(){var e,t,n;if(null==this.d){for(this.d=Je(e6C,eJA,63,2*this.f+1,0,1),n=this.e,this.f=0,t=this.c.Kc();t.e!=t.i.gc();)ebB(this,e=Pp(t.nj(),133));this.e=n}},eUe.Fb=function(e){return Ij(this,e)},eUe.Hb=function(){return eov(this.c)},eUe.Xc=function(e){return this.c.Xc(e)},eUe.rj=function(){this.c=new pC(this)},eUe.dc=function(){return 0==this.f},eUe.Kc=function(){return this.c.Kc()},eUe.Yc=function(){return this.c.Yc()},eUe.Zc=function(e){return this.c.Zc(e)},eUe.sj=function(){return X6(this)},eUe.tj=function(e,t,n){return new N7(e,t,n)},eUe.uj=function(){return new st},eUe.Mc=function(e){return en$(this,e)},eUe.gc=function(){return this.f},eUe.bd=function(e,t){return new Gz(this.c,e,t)},eUe.Pc=function(){return this.c.Pc()},eUe.Qc=function(e){return this.c.Qc(e)},eUe.Ib=function(){return efq(this.c)},eUe.e=0,eUe.f=0,Y5(eX_,"BasicEMap",705),eTS(1033,63,eXz,pC),eUe.bi=function(e,t){bH(this,Pp(t,133))},eUe.ei=function(e,t,n){var r;++(r=this,Pp(t,133),r).a.e},eUe.fi=function(e,t){b$(this,Pp(t,133))},eUe.gi=function(e,t,n){AO(this,Pp(t,133),Pp(n,133))},eUe.di=function(e,t){eac(this.a)},Y5(eX_,"BasicEMap/1",1033),eTS(1034,63,eXz,st),eUe.ri=function(e){return Je(e6R,eJL,612,e,0,1)},Y5(eX_,"BasicEMap/2",1034),eTS(1035,eUT,eUM,pI),eUe.$b=function(){this.a.c.$b()},eUe.Hc=function(e){return edG(this.a,e)},eUe.Kc=function(){return 0==this.a.f?(LF(),tmB.a):new yd(this.a)},eUe.Mc=function(e){var t;return t=this.a.f,ehx(this.a,e),this.a.f!=t},eUe.gc=function(){return this.a.f},Y5(eX_,"BasicEMap/3",1035),eTS(1036,28,eUx,pD),eUe.$b=function(){this.a.c.$b()},eUe.Hc=function(e){return eCl(this.a,e)},eUe.Kc=function(){return 0==this.a.f?(LF(),tmB.a):new yh(this.a)},eUe.gc=function(){return this.a.f},Y5(eX_,"BasicEMap/4",1036),eTS(1037,eUT,eUM,pN),eUe.$b=function(){this.a.c.$b()},eUe.Hc=function(e){var t,n,r,i,a,o,s,u,c;if(this.a.f>0&&M4(e,42)&&(this.a.qj(),i=null==(s=(u=Pp(e,42)).cd())?0:esj(s),a=Cb(this.a,i),t=this.a.d[a])){for(o=0,n=Pp(t.g,367),c=t.i;o"+this.c},eUe.a=0;var e6R=Y5(eX_,"BasicEMap/EntryImpl",612);eTS(536,1,{},o2),Y5(eX_,"BasicEMap/View",536),eTS(768,1,{}),eUe.Fb=function(e){return eT$((Hj(),e2r),e)},eUe.Hb=function(){return esS((Hj(),e2r))},eUe.Ib=function(){return e_F((Hj(),e2r))},Y5(eX_,"ECollections/BasicEmptyUnmodifiableEList",768),eTS(1312,1,eUC,sn),eUe.Nb=function(e){F8(this,e)},eUe.Rb=function(e){throw p7(new bO)},eUe.Ob=function(){return!1},eUe.Sb=function(){return!1},eUe.Pb=function(){throw p7(new bC)},eUe.Tb=function(){return 0},eUe.Ub=function(){throw p7(new bC)},eUe.Vb=function(){return -1},eUe.Qb=function(){throw p7(new bO)},eUe.Wb=function(e){throw p7(new bO)},Y5(eX_,"ECollections/BasicEmptyUnmodifiableEList/1",1312),eTS(1310,768,{20:1,14:1,15:1,58:1},mx),eUe.Vc=function(e,t){y5()},eUe.Fc=function(e){return y6()},eUe.Wc=function(e,t){return y9()},eUe.Gc=function(e){return y8()},eUe.$b=function(){y7()},eUe.Hc=function(e){return!1},eUe.Ic=function(e){return!1},eUe.Jc=function(e){qX(this,e)},eUe.Xb=function(e){return xY((Hj(),e)),null},eUe.Xc=function(e){return -1},eUe.dc=function(){return!0},eUe.Kc=function(){return this.a},eUe.Yc=function(){return this.a},eUe.Zc=function(e){return this.a},eUe.ii=function(e,t){return we()},eUe.ji=function(e,t){wt()},eUe.Lc=function(){return new R1(null,new Gq(this,16))},eUe.$c=function(e){return wn()},eUe.Mc=function(e){return wr()},eUe._c=function(e,t){return wi()},eUe.gc=function(){return 0},eUe.ad=function(e){er8(this,e)},eUe.Nc=function(){return new Gq(this,16)},eUe.Oc=function(){return new R1(null,new Gq(this,16))},eUe.bd=function(e,t){return Hj(),new Gz(e2r,e,t)},eUe.Pc=function(){return Fn((Hj(),e2r))},eUe.Qc=function(e){return Hj(),emk(e2r,e)},Y5(eX_,"ECollections/EmptyUnmodifiableEList",1310),eTS(1311,768,{20:1,14:1,15:1,58:1,589:1},mT),eUe.Vc=function(e,t){y5()},eUe.Fc=function(e){return y6()},eUe.Wc=function(e,t){return y9()},eUe.Gc=function(e){return y8()},eUe.$b=function(){y7()},eUe.Hc=function(e){return!1},eUe.Ic=function(e){return!1},eUe.Jc=function(e){qX(this,e)},eUe.Xb=function(e){return xY((Hj(),e)),null},eUe.Xc=function(e){return -1},eUe.dc=function(){return!0},eUe.Kc=function(){return this.a},eUe.Yc=function(){return this.a},eUe.Zc=function(e){return this.a},eUe.ii=function(e,t){return we()},eUe.ji=function(e,t){wt()},eUe.Lc=function(){return new R1(null,new Gq(this,16))},eUe.$c=function(e){return wn()},eUe.Mc=function(e){return wr()},eUe._c=function(e,t){return wi()},eUe.gc=function(){return 0},eUe.ad=function(e){er8(this,e)},eUe.Nc=function(){return new Gq(this,16)},eUe.Oc=function(){return new R1(null,new Gq(this,16))},eUe.bd=function(e,t){return Hj(),new Gz(e2r,e,t)},eUe.Pc=function(){return Fn((Hj(),e2r))},eUe.Qc=function(e){return Hj(),emk(e2r,e)},eUe.sj=function(){return Hj(),Hj(),e2i},Y5(eX_,"ECollections/EmptyUnmodifiableEMap",1311);var e6j=RL(eX_,"Enumerator");eTS(281,1,{281:1},eCg),eUe.Fb=function(e){var t;return this===e||!!M4(e,281)&&(t=Pp(e,281),this.f==t.f&&jx(this.i,t.i)&&jk(this.a,(256&this.f)!=0?(256&t.f)!=0?t.a:null:(256&t.f)!=0?null:t.a)&&jk(this.d,t.d)&&jk(this.g,t.g)&&jk(this.e,t.e)&&epK(this,t))},eUe.Hb=function(){return this.f},eUe.Ib=function(){return eDv(this)},eUe.f=0;var e6F,e6Y,e6B,e6U,e6H,e6$,e6z,e6G,e6W,e6K,e6V,e6q,e6Z,e6X,e6J,e6Q,e61,e60,e62,e63,e64,e65,e66,e69,e68,e67,e9e,e9t,e9n,e9r,e9i,e9a,e9o,e9s,e9u,e9c,e9l,e9f,e9d,e9h,e9p,e9b,e9m,e9g,e9v,e9y,e9w,e9_,e9E,e9S,e9k,e9x,e9T,e9M,e9O,e9A,e9L,e9C,e9I,e9D,e9N,e9P,e9R,e9j,e9F,e9Y,e9B,e9U,e9H,e9$,e9z,e9G,e9W,e9K,e9V,e9q,e9Z,e9X,e9J,e9Q,e91,e90,e92,e93,e94,e95,e96,e99,e98,e97,e8e,e8t,e8n,e8r,e8i,e8a,e8o,e8s,e8u,e8c,e8l,e8f,e8d,e8h,e8p,e8b,e8m,e8g,e8v,e8y,e8w,e8_,e8E,e8S,e8k,e8x,e8T,e8M,e8O,e8A,e8L,e8C,e8I,e8D,e8N,e8P,e8R,e8j,e8F,e8Y,e8B,e8U,e8H,e8$,e8z,e8G,e8W,e8K,e8V,e8q,e8Z,e8X,e8J,e8Q,e81,e80,e82,e83,e84,e85,e86,e89,e88,e87,e7e,e7t,e7n,e7r,e7i,e7a,e7o,e7s,e7u,e7c,e7l,e7f,e7d,e7h,e7p,e7b,e7m,e7g,e7v,e7y,e7w,e7_,e7E,e7S,e7k,e7x,e7T,e7M,e7O,e7A,e7L,e7C,e7I,e7D,e7N,e7P,e7R,e7j,e7F,e7Y,e7B,e7U,e7H,e7$,e7z,e7G,e7W,e7K,e7V,e7q,e7Z,e7X,e7J,e7Q,e71,e70,e72,e73,e74,e75,e76,e79,e78,e77,tee,tet,ten,ter,tei,tea,teo,tes,teu,tec,tel,tef,ted,teh,tep,teb,tem,teg,tev,tey,tew,te_,teE,teS,tek,tex,teT,teM,teO,teA,teL,teC,teI,teD,teN,teP,teR,tej,teF,teY,teB,teU,teH,te$,tez,teG,teW,teK,teV,teq,teZ,teX,teJ,teQ,te1,te0,te2,te3,te4,te5,te6,te9,te8,te7,tte,ttt,ttn,ttr,tti,tta,tto,tts,ttu,ttc,ttl,ttf,ttd,tth,ttp,ttb,ttm,ttg,ttv,tty,ttw,tt_,ttE,ttS,ttk,ttx,ttT,ttM,ttO,ttA,ttL,ttC,ttI,ttD,ttN,ttP,ttR,ttj,ttF,ttY,ttB,ttU,ttH,tt$,ttz,ttG,ttW,ttK,ttV,ttq,ttZ,ttX,ttJ,ttQ,tt1,tt0,tt2,tt3,tt4,tt5,tt6,tt9,tt8,tt7,tne,tnt,tnn,tnr,tni,tna,tno,tns,tnu,tnc,tnl,tnf,tnd,tnh,tnp,tnb,tnm,tng,tnv,tny,tnw,tn_,tnE,tnS,tnk,tnx,tnT,tnM,tnO,tnA,tnL,tnC,tnI,tnD,tnN,tnP,tnR,tnj,tnF,tnY,tnB,tnU,tnH,tn$,tnz,tnG,tnW,tnK,tnV,tnq,tnZ,tnX,tnJ,tnQ,tn1,tn0,tn2,tn3,tn4,tn5,tn6,tn9,tn8,tn7,tre,trt,trn,trr,tri,tra,tro,trs,tru,trc,trl,trf,trd,trh,trp,trb,trm,trg,trv,trw,tr_,trE,trS,trk,trx,trT,trM,trO,trA,trL,trC,trI,trD,trN,trP,trR,trj,trF,trY,trB,trU,trH,tr$,trz,trG,trW,trK,trV,trq,trZ,trX,trJ,trQ,tr1,tr0,tr2,tr3,tr4,tr5,tr6,tr9,tr8,tr7,tie,tit,tin,tir,tii,tia,tio,tis,tiu,tic,til,tif,tid,tih,tip,tib,tim,tig,tiv,tiy,tiw,ti_,tiE,tiS,tik,tix,tiT,tiM,tiO,tiA,tiL,tiC,tiI,tiD,tiN,tiP,tiR,tij,tiF,tiY,tiB,tiU,tiH,ti$,tiz,tiG,tiW,tiK,tiV,tiq,tiZ,tiX,tiJ,tiQ,ti1,ti0,ti2,ti3,ti4,ti5,ti6,ti9,ti8,ti7,tae,tat,tan,tar,tai,taa,tao,tas,tau,tac,tal,taf,tad,tah,tap,tab,tam,tag,tav,tay,taw,ta_,taE,taS,tak,tax,taT,taM,taO,taA,taL,taC,taI,taD,taN,taP,taR,taj,taF,taY,taB,taU,taH,ta$,taz,taG,taW,taK,taV,taq,taZ,taX,taJ,taQ,ta1,ta0,ta2,ta3,ta4,ta5,ta6,ta9,ta8,ta7,toe,tot,ton,tor,toi,toa,too,tos,tou,toc,tol,tof,tod,toh,top,tob,tom,tog,tov,toy,tow,to_,toE,toS,tok,tox,toT,toM,toO,toA,toL,toC,toI,toD,toN,toP,toR,toj,toF,toY,toB,toU,toH,to$,toz,toG,toW,toK,toV,toq,toZ,toX,toJ,toQ,to1,to0,to2,to3,to4,to5,to6,to9,to8,to7,tse,tst,tsn,tsr,tsi,tsa,tso,tss,tsu,tsc,tsl,tsf,tsd,tsh,tsp,tsb,tsm,tsg,tsv,tsy,tsw,ts_,tsE,tsS,tsk,tsx,tsT,tsM,tsO,tsA,tsL,tsC,tsI,tsD,tsN,tsP,tsR,tsj,tsF,tsY,tsB,tsU,tsH,ts$,tsz,tsG,tsW,tsK,tsV,tsq,tsZ,tsX,tsJ,tsQ,ts1,ts0,ts2,ts3,ts4,ts5,ts6,ts9,ts8,ts7,tue,tut,tun,tur,tui,tua,tuo,tus,tuu,tuc,tul,tuf,tud,tuh,tup,tub,tum,tug,tuv,tuy,tuw,tu_,tuE,tuS,tuk,tux,tuT,tuM,tuO,tuA,tuL,tuC,tuI,tuD,tuN,tuP,tuR,tuj,tuF,tuY,tuB,tuU,tuH,tu$,tuz,tuG,tuW,tuK,tuV,tuq,tuZ,tuX,tuJ,tuQ,tu1,tu0,tu2,tu3,tu4,tu5,tu6,tu9,tu8,tu7,tce,tct,tcn,tcr,tci,tca,tco,tcs,tcu,tcc,tcl,tcf,tcd,tch,tcp,tcb,tcm,tcg,tcv,tcy,tcw,tc_,tcE,tcS,tck,tcx,tcT,tcM,tcO,tcA,tcL,tcC,tcI,tcD,tcN,tcP,tcR,tcj,tcF,tcY,tcB,tcU,tcH,tc$,tcz,tcG,tcW,tcK,tcV,tcq,tcZ,tcX,tcJ,tcQ,tc1,tc0,tc2,tc3,tc4,tc5,tc6,tc9,tc8,tc7,tle,tlt,tln,tlr,tli,tla,tlo,tls,tlu,tlc,tll,tlf,tld,tlh,tlp,tlb,tlm,tlg,tlv,tly,tlw,tl_,tlE,tlS,tlk,tlx,tlT,tlM,tlO,tlA,tlL,tlC,tlI,tlD,tlN,tlP,tlR,tlj,tlF,tlY,tlB,tlU,tlH,tl$,tlz,tlG,tlW,tlK,tlV,tlq,tlZ,tlX,tlJ,tlQ,tl1,tl0,tl2,tl3,tl4,tl5,tl6,tl9,tl8,tl7,tfe,tft,tfn,tfr,tfi,tfa,tfo,tfs,tfu,tfc,tfl,tff,tfd,tfh,tfp,tfb,tfm,tfg,tfv,tfy,tfw,tf_,tfE,tfS,tfk,tfx,tfT,tfM,tfO,tfA,tfL,tfC,tfI,tfD,tfN,tfP,tfR,tfj,tfF,tfY,tfB,tfU,tfH,tf$,tfz,tfG,tfW,tfK,tfV,tfq,tfZ,tfX,tfJ,tfQ,tf1,tf0,tf2,tf3,tf4,tf5,tf6,tf9,tf8,tf7,tde,tdt,tdn,tdr,tdi,tda,tdo,tds,tdu,tdc,tdl,tdf,tdd,tdh,tdp,tdb,tdm,tdg,tdv,tdy,tdw,td_,tdE,tdS,tdk,tdx,tdT,tdM,tdO,tdA,tdL,tdC,tdI,tdD,tdN,tdP,tdR,tdj,tdF,tdY,tdB,tdU,tdH,td$,tdz,tdG,tdW,tdK,tdV,tdq,tdZ,tdX,tdJ,tdQ,td1,td0,td2,td3,td4,td5,td6,td9,td8,td7,the,tht,thn,thr,thi,tha,tho,ths,thu,thc,thl,thf,thd,thh,thp,thb,thm,thg,thv,thy,thw,th_,thE,thS,thk,thx,thT,thM,thO,thA,thL,thC,thI,thD,thN,thP,thR,thj,thF,thY,thB,thU,thH,th$,thz,thG,thW,thK,thV,thq,thZ,thX,thJ,thQ,th1,th0,th2,th3,th4,th5,th6,th9,th8,th7,tpe,tpt,tpn,tpr,tpi,tpa,tpo,tps,tpu,tpc,tpl,tpf,tpd,tph,tpp,tpb,tpm,tpg,tpv,tpy,tpw,tp_,tpE,tpS,tpk,tpx,tpT,tpM,tpO,tpA,tpL,tpC,tpI,tpD,tpN,tpP,tpR,tpj,tpF,tpY,tpB,tpU,tpH,tp$,tpz,tpG,tpW,tpK,tpV,tpq,tpZ,tpX,tpJ,tpQ,tp1,tp0,tp2,tp3,tp4,tp5,tp6,tp9,tp8,tp7,tbe,tbt,tbn,tbr,tbi,tba,tbo,tbs,tbu,tbc,tbl,tbf,tbd,tbh,tbp,tbb,tbm,tbg,tbv,tby,tbw,tb_,tbE,tbS,tbk,tbx,tbT,tbM,tbO,tbA,tbL,tbC,tbI,tbD,tbN,tbP,tbR,tbj,tbF,tbY,tbB,tbU,tbH,tb$,tbz,tbG,tbW,tbK,tbV,tbq,tbZ,tbX,tbJ,tbQ,tb1,tb0,tb2,tb3,tb4,tb5,tb6,tb9,tb8,tb7,tme,tmt,tmn,tmr,tmi,tma,tmo,tms,tmu,tmc,tml,tmf,tmd,tmh,tmp,tmb,tmm,tmg,tmv,tmy,tmw,tm_,tmE,tmS,tmk,tmx,tmT,tmM,tmO,tmA,tmL,tmC,tmI,tmD,tmN,tmP,tmR,tmj,tmF,tmY,tmB,tmU,tmH,tm$,tmz,tmG=0,tmW=0,tmK=0,tmV=0,tmq=0,tmZ=0,tmX=0,tmJ=0,tmQ=0,tm1=0,tm0=0,tm2=0,tm3=0;Y5(eX_,"URI",281),eTS(1091,43,e$s,mM),eUe.zc=function(e,t){return Pp(Ge(this,Lq(e),Pp(t,281)),281)},Y5(eX_,"URI/URICache",1091),eTS(497,63,eXz,o5,jf),eUe.hi=function(){return!0},Y5(eX_,"UniqueEList",497),eTS(581,60,eHr,QH),Y5(eX_,"WrappedException",581);var tm4=RL(eZD,eJD),tm5=RL(eZD,eJN),tm6=RL(eZD,eJP),tm9=RL(eZD,eJR),tm8=RL(eZD,eJj),tm7=RL(eZD,"EClass"),tge=RL(eZD,"EDataType");eTS(1183,43,e$s,mO),eUe.xc=function(e){return xd(e)?zg(this,e):xu($I(this.f,e))},Y5(eZD,"EDataType/Internal/ConversionDelegate/Factory/Registry/Impl",1183);var tgt=RL(eZD,"EEnum"),tgn=RL(eZD,eJF),tgr=RL(eZD,eJY),tgi=RL(eZD,eJB),tga=RL(eZD,eJU),tgo=RL(eZD,eJH);eTS(1029,1,{},o4),eUe.Ib=function(){return"NIL"},Y5(eZD,"EStructuralFeature/Internal/DynamicValueHolder/1",1029),eTS(1028,43,e$s,mA),eUe.xc=function(e){return xd(e)?zg(this,e):xu($I(this.f,e))},Y5(eZD,"EStructuralFeature/Internal/SettingDelegate/Factory/Registry/Impl",1028);var tgs=RL(eZD,eJ$),tgu=RL(eZD,"EValidator/PatternMatcher"),tgc=RL(eJz,"FeatureMap/Entry");eTS(535,1,{72:1},k3),eUe.ak=function(){return this.a},eUe.dd=function(){return this.b},Y5(eZ2,"BasicEObjectImpl/1",535),eTS(1027,1,eJG,k4),eUe.Wj=function(e){return ZN(this.a,this.b,e)},eUe.fj=function(){return zz(this.a,this.b)},eUe.Wb=function(e){zx(this.a,this.b,e)},eUe.Xj=function(){B4(this.a,this.b)},Y5(eZ2,"BasicEObjectImpl/4",1027),eTS(1983,1,{108:1}),eUe.bk=function(e){this.e=0==e?tgH:Je(e1R,eUp,1,e,5,1)},eUe.Ch=function(e){return this.e[e]},eUe.Dh=function(e,t){this.e[e]=t},eUe.Eh=function(e){this.e[e]=null},eUe.ck=function(){return this.c},eUe.dk=function(){throw p7(new bO)},eUe.ek=function(){throw p7(new bO)},eUe.fk=function(){return this.d},eUe.gk=function(){return null!=this.e},eUe.hk=function(e){this.c=e},eUe.ik=function(e){throw p7(new bO)},eUe.jk=function(e){throw p7(new bO)},eUe.kk=function(e){this.d=e},Y5(eZ2,"BasicEObjectImpl/EPropertiesHolderBaseImpl",1983),eTS(185,1983,{108:1},c1),eUe.dk=function(){return this.a},eUe.ek=function(){return this.b},eUe.ik=function(e){this.a=e},eUe.jk=function(e){this.b=e},Y5(eZ2,"BasicEObjectImpl/EPropertiesHolderImpl",185),eTS(506,97,eZ0,sr),eUe.Kg=function(){return this.f},eUe.Pg=function(){return this.k},eUe.Rg=function(e,t){this.g=e,this.i=t},eUe.Tg=function(){return(2&this.j)==0?this.zh():this.ph().ck()},eUe.Vg=function(){return this.i},eUe.Mg=function(){return(1&this.j)!=0},eUe.eh=function(){return this.g},eUe.kh=function(){return(4&this.j)!=0},eUe.ph=function(){return this.k||(this.k=new c1),this.k},eUe.th=function(e){this.ph().hk(e),e?this.j|=2:this.j&=-3},eUe.vh=function(e){this.ph().jk(e),e?this.j|=4:this.j&=-5},eUe.zh=function(){return(BM(),tgv).S},eUe.i=0,eUe.j=1,Y5(eZ2,"EObjectImpl",506),eTS(780,506,{105:1,92:1,90:1,56:1,108:1,49:1,97:1},Pq),eUe.Ch=function(e){return this.e[e]},eUe.Dh=function(e,t){this.e[e]=t},eUe.Eh=function(e){this.e[e]=null},eUe.Tg=function(){return this.d},eUe.Yg=function(e){return edv(this.d,e)},eUe.$g=function(){return this.d},eUe.dh=function(){return null!=this.e},eUe.ph=function(){return this.k||(this.k=new si),this.k},eUe.th=function(e){this.d=e},eUe.yh=function(){var e;return null==this.e&&(e=Y1(this.d),this.e=0==e?tg$:Je(e1R,eUp,1,e,5,1)),this},eUe.Ah=function(){return 0},Y5(eZ2,"DynamicEObjectImpl",780),eTS(1376,780,{105:1,42:1,92:1,90:1,133:1,56:1,108:1,49:1,97:1},RO),eUe.Fb=function(e){return this===e},eUe.Hb=function(){return Ao(this)},eUe.th=function(e){this.d=e,this.b=eAh(e,"key"),this.c=eAh(e,eXr)},eUe.Sh=function(){var e;return -1==this.a&&(e=Q9(this,this.b),this.a=null==e?0:esj(e)),this.a},eUe.cd=function(){return Q9(this,this.b)},eUe.dd=function(){return Q9(this,this.c)},eUe.Th=function(e){this.a=e},eUe.Uh=function(e){zx(this,this.b,e)},eUe.ed=function(e){var t;return t=Q9(this,this.c),zx(this,this.c,e),t},eUe.a=0,Y5(eZ2,"DynamicEObjectImpl/BasicEMapEntry",1376),eTS(1377,1,{108:1},si),eUe.bk=function(e){throw p7(new bO)},eUe.Ch=function(e){throw p7(new bO)},eUe.Dh=function(e,t){throw p7(new bO)},eUe.Eh=function(e){throw p7(new bO)},eUe.ck=function(){throw p7(new bO)},eUe.dk=function(){return this.a},eUe.ek=function(){return this.b},eUe.fk=function(){return this.c},eUe.gk=function(){throw p7(new bO)},eUe.hk=function(e){throw p7(new bO)},eUe.ik=function(e){this.a=e},eUe.jk=function(e){this.b=e},eUe.kk=function(e){this.c=e},Y5(eZ2,"DynamicEObjectImpl/DynamicEPropertiesHolderImpl",1377),eTS(510,150,{105:1,92:1,90:1,590:1,147:1,56:1,108:1,49:1,97:1,510:1,150:1,114:1,115:1},sa),eUe.Qg=function(e){return eg4(this,e)},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.d;case 2:return n?(this.b||(this.b=new L_((eBK(),tgF),tgf,this)),this.b):(this.b||(this.b=new L_((eBK(),tgF),tgf,this)),X6(this.b));case 3:return z4(this);case 4:return this.a||(this.a=new O_(e6f,this,4)),this.a;case 5:return this.c||(this.c=new OT(e6f,this,5)),this.c}return Qt(this,e-Y1((eBK(),tgy)),ee2((r=Pp(eaS(this,16),26))||tgy,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 3:return this.Cb&&(n=(i=this.Db>>16)>=0?eg4(this,n):this.Cb.ih(this,-1-i,null,n)),j3(this,Pp(e,147),n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgy),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgy)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 2:return this.b||(this.b=new L_((eBK(),tgF),tgf,this)),Iz(this.b,e,n);case 3:return j3(this,null,n);case 4:return this.a||(this.a=new O_(e6f,this,4)),ep6(this.a,e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgy),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgy)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.d;case 2:return!!this.b&&0!=this.b.f;case 3:return!!z4(this);case 4:return!!this.a&&0!=this.a.i;case 5:return!!this.c&&0!=this.c.i}return VP(this,e-Y1((eBK(),tgy)),ee2((t=Pp(eaS(this,16),26))||tgy,e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:RN(this,Lq(t));return;case 2:this.b||(this.b=new L_((eBK(),tgF),tgf,this)),eai(this.b,t);return;case 3:eAc(this,Pp(t,147));return;case 4:this.a||(this.a=new O_(e6f,this,4)),eRT(this.a),this.a||(this.a=new O_(e6f,this,4)),Y4(this.a,Pp(t,14));return;case 5:this.c||(this.c=new OT(e6f,this,5)),eRT(this.c),this.c||(this.c=new OT(e6f,this,5)),Y4(this.c,Pp(t,14));return}efL(this,e-Y1((eBK(),tgy)),ee2((n=Pp(eaS(this,16),26))||tgy,e),t)},eUe.zh=function(){return eBK(),tgy},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:erl(this,null);return;case 2:this.b||(this.b=new L_((eBK(),tgF),tgf,this)),this.b.c.$b();return;case 3:eAc(this,null);return;case 4:this.a||(this.a=new O_(e6f,this,4)),eRT(this.a);return;case 5:this.c||(this.c=new OT(e6f,this,5)),eRT(this.c);return}ec6(this,e-Y1((eBK(),tgy)),ee2((t=Pp(eaS(this,16),26))||tgy,e))},eUe.Ib=function(){return eln(this)},eUe.d=null,Y5(eZ2,"EAnnotationImpl",510),eTS(151,705,eJW,JY),eUe.Xh=function(e,t){T7(this,e,Pp(t,42))},eUe.lk=function(e,t){return I$(this,Pp(e,42),t)},eUe.pi=function(e){return Pp(Pp(this.c,69).pi(e),133)},eUe.Zh=function(){return Pp(this.c,69).Zh()},eUe.$h=function(){return Pp(this.c,69).$h()},eUe._h=function(e){return Pp(this.c,69)._h(e)},eUe.mk=function(e,t){return Iz(this,e,t)},eUe.Wj=function(e){return Pp(this.c,76).Wj(e)},eUe.rj=function(){},eUe.fj=function(){return Pp(this.c,76).fj()},eUe.tj=function(e,t,n){var r;return(r=Pp(etP(this.b).Nh().Jh(this.b),133)).Th(e),r.Uh(t),r.ed(n),r},eUe.uj=function(){return new pZ(this)},eUe.Wb=function(e){eai(this,e)},eUe.Xj=function(){Pp(this.c,76).Xj()},Y5(eJz,"EcoreEMap",151),eTS(158,151,eJW,L_),eUe.qj=function(){var e,t,n,r,i,a;if(null==this.d){for(a=Je(e6C,eJA,63,2*this.f+1,0,1),n=this.c.Kc();n.e!=n.i.gc();)(e=a[i=((r=(t=Pp(n.nj(),133)).Sh())&eUu)%a.length])||(e=a[i]=new pZ(this)),e.Fc(t);this.d=a}},Y5(eZ2,"EAnnotationImpl/1",158),eTS(284,438,{105:1,92:1,90:1,147:1,191:1,56:1,108:1,472:1,49:1,97:1,150:1,284:1,114:1,115:1}),eUe._g=function(e,t,n){var r,i;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),!!this.$j();case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q}return Qt(this,e-Y1(this.zh()),ee2((r=Pp(eaS(this,16),26))||this.zh(),e),t,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 9:return Y3(this,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Rj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.lh=function(e){var t,n;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return this.$j();case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i)}return VP(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.sh=function(e,t){var n,r;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:this.Lh(Lq(t));return;case 2:eli(this,gN(LK(t)));return;case 3:els(this,gN(LK(t)));return;case 4:end(this,Pp(t,19).a);return;case 5:this.ok(Pp(t,19).a);return;case 8:eu2(this,Pp(t,138));return;case 9:(r=ew3(this,Pp(t,87),null))&&r.Fi();return}efL(this,e-Y1(this.zh()),ee2((n=Pp(eaS(this,16),26))||this.zh(),e),t)},eUe.zh=function(){return eBK(),tgB},eUe.Bh=function(e){var t,n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:this.Lh(null);return;case 2:eli(this,!0);return;case 3:els(this,!0);return;case 4:end(this,0);return;case 5:this.ok(1);return;case 8:eu2(this,null);return;case 9:(n=ew3(this,null,null))&&n.Fi();return}ec6(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.Gh=function(){evl(this),this.Bb|=1},eUe.Yj=function(){return evl(this)},eUe.Zj=function(){return this.t},eUe.$j=function(){var e;return(e=this.t)>1||-1==e},eUe.hi=function(){return(512&this.Bb)!=0},eUe.nk=function(e,t){return ecz(this,e,t)},eUe.ok=function(e){enh(this,e)},eUe.Ib=function(){return ex3(this)},eUe.s=0,eUe.t=1,Y5(eZ2,"ETypedElementImpl",284),eTS(449,284,{105:1,92:1,90:1,147:1,191:1,56:1,170:1,66:1,108:1,472:1,49:1,97:1,150:1,449:1,284:1,114:1,115:1,677:1}),eUe.Qg=function(e){return egx(this,e)},eUe._g=function(e,t,n){var r,i;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),!!this.$j();case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q;case 10:return OQ(),(this.Bb&eXt)!=0;case 11:return OQ(),(this.Bb&eJq)!=0;case 12:return OQ(),(this.Bb&eH0)!=0;case 13:return this.j;case 14:return eOI(this);case 15:return OQ(),(this.Bb&eJV)!=0;case 16:return OQ(),(this.Bb&eUR)!=0;case 17:return z6(this)}return Qt(this,e-Y1(this.zh()),ee2((r=Pp(eaS(this,16),26))||this.zh(),e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 17:return this.Cb&&(n=(i=this.Db>>16)>=0?egx(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,17,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Qj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 9:return Y3(this,n);case 17:return eDg(this,null,17,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Rj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.lh=function(e){var t,n;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return this.$j();case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i);case 10:return(this.Bb&eXt)==0;case 11:return(this.Bb&eJq)!=0;case 12:return(this.Bb&eH0)!=0;case 13:return null!=this.j;case 14:return null!=eOI(this);case 15:return(this.Bb&eJV)!=0;case 16:return(this.Bb&eUR)!=0;case 17:return!!z6(this)}return VP(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.sh=function(e,t){var n,r;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GD(this,Lq(t));return;case 2:eli(this,gN(LK(t)));return;case 3:els(this,gN(LK(t)));return;case 4:end(this,Pp(t,19).a);return;case 5:this.ok(Pp(t,19).a);return;case 8:eu2(this,Pp(t,138));return;case 9:(r=ew3(this,Pp(t,87),null))&&r.Fi();return;case 10:elF(this,gN(LK(t)));return;case 11:elU(this,gN(LK(t)));return;case 12:elY(this,gN(LK(t)));return;case 13:xi(this,Lq(t));return;case 15:elB(this,gN(LK(t)));return;case 16:elZ(this,gN(LK(t)));return}efL(this,e-Y1(this.zh()),ee2((n=Pp(eaS(this,16),26))||this.zh(),e),t)},eUe.zh=function(){return eBK(),tgY},eUe.Bh=function(e){var t,n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,88)&&eko(Zd(Pp(this.Cb,88)),4),er3(this,null);return;case 2:eli(this,!0);return;case 3:els(this,!0);return;case 4:end(this,0);return;case 5:this.ok(1);return;case 8:eu2(this,null);return;case 9:(n=ew3(this,null,null))&&n.Fi();return;case 10:elF(this,!0);return;case 11:elU(this,!1);return;case 12:elY(this,!1);return;case 13:this.i=null,erA(this,null);return;case 15:elB(this,!1);return;case 16:elZ(this,!1);return}ec6(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.Gh=function(){UH(QZ((eSp(),tvc),this)),evl(this),this.Bb|=1},eUe.Gj=function(){return this.f},eUe.zj=function(){return eOI(this)},eUe.Hj=function(){return z6(this)},eUe.Lj=function(){return null},eUe.pk=function(){return this.k},eUe.aj=function(){return this.n},eUe.Mj=function(){return eyD(this)},eUe.Nj=function(){var e,t,n,r,i,a,o,s,u;return this.p||((null==(n=z6(this)).i&&eNT(n),n.i).length,(r=this.Lj())&&Y1(z6(r)),e=(o=(i=evl(this)).Bj())?(1&o.i)!=0?o==tyE?e11:o==ty_?e15:o==tyT?e14:o==tyx?e13:o==tyS?e16:o==tyM?e19:o==tyk?e10:e12:o:null,t=eOI(this),s=i.zj(),efl(this),(this.Bb&eUR)!=0&&((a=ev1((eSp(),tvc),n))&&a!=this||(a=Wk(QZ(tvc,this))))?this.p=new k6(this,a):this.$j()?this.rk()?r?(this.Bb&eJV)!=0?e?this.sk()?this.p=new HS(47,e,this,r):this.p=new HS(5,e,this,r):this.sk()?this.p=new qc(46,this,r):this.p=new qc(4,this,r):e?this.sk()?this.p=new HS(49,e,this,r):this.p=new HS(7,e,this,r):this.sk()?this.p=new qc(48,this,r):this.p=new qc(6,this,r):(this.Bb&eJV)!=0?e?e==e1$?this.p=new Pe(50,e6M,this):this.sk()?this.p=new Pe(43,e,this):this.p=new Pe(1,e,this):this.sk()?this.p=new $F(42,this):this.p=new $F(0,this):e?e==e1$?this.p=new Pe(41,e6M,this):this.sk()?this.p=new Pe(45,e,this):this.p=new Pe(3,e,this):this.sk()?this.p=new $F(44,this):this.p=new $F(2,this):M4(i,148)?e==tgc?this.p=new $F(40,this):(512&this.Bb)!=0?(this.Bb&eJV)!=0?e?this.p=new Pe(9,e,this):this.p=new $F(8,this):e?this.p=new Pe(11,e,this):this.p=new $F(10,this):(this.Bb&eJV)!=0?e?this.p=new Pe(13,e,this):this.p=new $F(12,this):e?this.p=new Pe(15,e,this):this.p=new $F(14,this):r?(u=r.t)>1||-1==u?this.sk()?(this.Bb&eJV)!=0?e?this.p=new HS(25,e,this,r):this.p=new qc(24,this,r):e?this.p=new HS(27,e,this,r):this.p=new qc(26,this,r):(this.Bb&eJV)!=0?e?this.p=new HS(29,e,this,r):this.p=new qc(28,this,r):e?this.p=new HS(31,e,this,r):this.p=new qc(30,this,r):this.sk()?(this.Bb&eJV)!=0?e?this.p=new HS(33,e,this,r):this.p=new qc(32,this,r):e?this.p=new HS(35,e,this,r):this.p=new qc(34,this,r):(this.Bb&eJV)!=0?e?this.p=new HS(37,e,this,r):this.p=new qc(36,this,r):e?this.p=new HS(39,e,this,r):this.p=new qc(38,this,r):this.sk()?(this.Bb&eJV)!=0?e?this.p=new Pe(17,e,this):this.p=new $F(16,this):e?this.p=new Pe(19,e,this):this.p=new $F(18,this):(this.Bb&eJV)!=0?e?this.p=new Pe(21,e,this):this.p=new $F(20,this):e?this.p=new Pe(23,e,this):this.p=new $F(22,this):this.qk()?this.sk()?this.p=new Pt(Pp(i,26),this,r):this.p=new zl(Pp(i,26),this,r):M4(i,148)?e==tgc?this.p=new $F(40,this):(this.Bb&eJV)!=0?e?this.p=new j9(t,s,this,(edO(),o==ty_?tg2:o==tyE?tgX:o==tyS?tg3:o==tyT?tg0:o==tyx?tg1:o==tyM?tg5:o==tyk?tgJ:o==tyw?tgQ:tg4)):this.p=new HT(Pp(i,148),t,s,this):e?this.p=new j6(t,s,this,(edO(),o==ty_?tg2:o==tyE?tgX:o==tyS?tg3:o==tyT?tg0:o==tyx?tg1:o==tyM?tg5:o==tyk?tgJ:o==tyw?tgQ:tg4)):this.p=new Hx(Pp(i,148),t,s,this):this.rk()?r?(this.Bb&eJV)!=0?this.sk()?this.p=new Ps(Pp(i,26),this,r):this.p=new Po(Pp(i,26),this,r):this.sk()?this.p=new Pa(Pp(i,26),this,r):this.p=new Pn(Pp(i,26),this,r):(this.Bb&eJV)!=0?this.sk()?this.p=new Lx(Pp(i,26),this):this.p=new Lk(Pp(i,26),this):this.sk()?this.p=new LS(Pp(i,26),this):this.p=new LE(Pp(i,26),this):this.sk()?r?(this.Bb&eJV)!=0?this.p=new Pu(Pp(i,26),this,r):this.p=new Pr(Pp(i,26),this,r):(this.Bb&eJV)!=0?this.p=new LM(Pp(i,26),this):this.p=new LT(Pp(i,26),this):r?(this.Bb&eJV)!=0?this.p=new Pc(Pp(i,26),this,r):this.p=new Pi(Pp(i,26),this,r):(this.Bb&eJV)!=0?this.p=new LO(Pp(i,26),this):this.p=new jd(Pp(i,26),this)),this.p},eUe.Ij=function(){return(this.Bb&eXt)!=0},eUe.qk=function(){return!1},eUe.rk=function(){return!1},eUe.Jj=function(){return(this.Bb&eUR)!=0},eUe.Oj=function(){return eec(this)},eUe.sk=function(){return!1},eUe.Kj=function(){return(this.Bb&eJV)!=0},eUe.tk=function(e){this.k=e},eUe.Lh=function(e){GD(this,e)},eUe.Ib=function(){return eCR(this)},eUe.e=!1,eUe.n=0,Y5(eZ2,"EStructuralFeatureImpl",449),eTS(322,449,{105:1,92:1,90:1,34:1,147:1,191:1,56:1,170:1,66:1,108:1,472:1,49:1,97:1,322:1,150:1,449:1,284:1,114:1,115:1,677:1},mC),eUe._g=function(e,t,n){var r,i;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),!!ek7(this);case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q;case 10:return OQ(),(this.Bb&eXt)!=0;case 11:return OQ(),(this.Bb&eJq)!=0;case 12:return OQ(),(this.Bb&eH0)!=0;case 13:return this.j;case 14:return eOI(this);case 15:return OQ(),(this.Bb&eJV)!=0;case 16:return OQ(),(this.Bb&eUR)!=0;case 17:return z6(this);case 18:return OQ(),(this.Bb&eZ1)!=0;case 19:if(t)return eoe(this);return Xl(this)}return Qt(this,e-Y1((eBK(),tgw)),ee2((r=Pp(eaS(this,16),26))||tgw,e),t,n)},eUe.lh=function(e){var t,n;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return ek7(this);case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i);case 10:return(this.Bb&eXt)==0;case 11:return(this.Bb&eJq)!=0;case 12:return(this.Bb&eH0)!=0;case 13:return null!=this.j;case 14:return null!=eOI(this);case 15:return(this.Bb&eJV)!=0;case 16:return(this.Bb&eUR)!=0;case 17:return!!z6(this);case 18:return(this.Bb&eZ1)!=0;case 19:return!!Xl(this)}return VP(this,e-Y1((eBK(),tgw)),ee2((t=Pp(eaS(this,16),26))||tgw,e))},eUe.sh=function(e,t){var n,r;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GD(this,Lq(t));return;case 2:eli(this,gN(LK(t)));return;case 3:els(this,gN(LK(t)));return;case 4:end(this,Pp(t,19).a);return;case 5:yg(this,Pp(t,19).a);return;case 8:eu2(this,Pp(t,138));return;case 9:(r=ew3(this,Pp(t,87),null))&&r.Fi();return;case 10:elF(this,gN(LK(t)));return;case 11:elU(this,gN(LK(t)));return;case 12:elY(this,gN(LK(t)));return;case 13:xi(this,Lq(t));return;case 15:elB(this,gN(LK(t)));return;case 16:elZ(this,gN(LK(t)));return;case 18:elX(this,gN(LK(t)));return}efL(this,e-Y1((eBK(),tgw)),ee2((n=Pp(eaS(this,16),26))||tgw,e),t)},eUe.zh=function(){return eBK(),tgw},eUe.Bh=function(e){var t,n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,88)&&eko(Zd(Pp(this.Cb,88)),4),er3(this,null);return;case 2:eli(this,!0);return;case 3:els(this,!0);return;case 4:end(this,0);return;case 5:this.b=0,enh(this,1);return;case 8:eu2(this,null);return;case 9:(n=ew3(this,null,null))&&n.Fi();return;case 10:elF(this,!0);return;case 11:elU(this,!1);return;case 12:elY(this,!1);return;case 13:this.i=null,erA(this,null);return;case 15:elB(this,!1);return;case 16:elZ(this,!1);return;case 18:elX(this,!1);return}ec6(this,e-Y1((eBK(),tgw)),ee2((t=Pp(eaS(this,16),26))||tgw,e))},eUe.Gh=function(){eoe(this),UH(QZ((eSp(),tvc),this)),evl(this),this.Bb|=1},eUe.$j=function(){return ek7(this)},eUe.nk=function(e,t){return this.b=0,this.a=null,ecz(this,e,t)},eUe.ok=function(e){yg(this,e)},eUe.Ib=function(){var e;return(64&this.Db)!=0?eCR(this):(e=new O1(eCR(this)),e.a+=" (iD: ",yG(e,(this.Bb&eZ1)!=0),e.a+=")",e.a)},eUe.b=0,Y5(eZ2,"EAttributeImpl",322),eTS(351,438,{105:1,92:1,90:1,138:1,147:1,191:1,56:1,108:1,49:1,97:1,351:1,150:1,114:1,115:1,676:1}),eUe.uk=function(e){return e.Tg()==this},eUe.Qg=function(e){return egn(this,e)},eUe.Rg=function(e,t){this.w=null,this.Db=t<<16|255&this.Db,this.Cb=e},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return null!=this.D?this.D:this.B;case 3:return em4(this);case 4:return this.zj();case 5:return this.F;case 6:if(t)return etP(this);return z5(this);case 7:return this.A||(this.A=new OS(tgs,this,7)),this.A}return Qt(this,e-Y1(this.zh()),ee2((r=Pp(eaS(this,16),26))||this.zh(),e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 6:return this.Cb&&(n=(i=this.Db>>16)>=0?egn(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,6,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Qj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 6:return eDg(this,null,6,n);case 7:return this.A||(this.A=new OS(tgs,this,7)),ep6(this.A,e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||this.zh(),t),66)).Nj().Rj(this,ehH(this),t-Y1(this.zh()),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return null!=this.D&&this.D==this.F;case 3:return!!em4(this);case 4:return null!=this.zj();case 5:return null!=this.F&&this.F!=this.D&&this.F!=this.B;case 6:return!!z5(this);case 7:return!!this.A&&0!=this.A.i}return VP(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GN(this,Lq(t));return;case 2:TF(this,Lq(t));return;case 5:eji(this,Lq(t));return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A),this.A||(this.A=new OS(tgs,this,7)),Y4(this.A,Pp(t,14));return}efL(this,e-Y1(this.zh()),ee2((n=Pp(eaS(this,16),26))||this.zh(),e),t)},eUe.zh=function(){return eBK(),tgE},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,179)&&(Pp(this.Cb,179).tb=null),er3(this,null);return;case 2:euc(this,null),enp(this,this.D);return;case 5:eji(this,null);return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A);return}ec6(this,e-Y1(this.zh()),ee2((t=Pp(eaS(this,16),26))||this.zh(),e))},eUe.yj=function(){var e;return -1==this.G&&(this.G=(e=etP(this))?ebv(e.Mh(),this):-1),this.G},eUe.zj=function(){return null},eUe.Aj=function(){return etP(this)},eUe.vk=function(){return this.v},eUe.Bj=function(){return em4(this)},eUe.Cj=function(){return null!=this.D?this.D:this.B},eUe.Dj=function(){return this.F},eUe.wj=function(e){return eNc(this,e)},eUe.wk=function(e){this.v=e},eUe.xk=function(e){eia(this,e)},eUe.yk=function(e){this.C=e},eUe.Lh=function(e){GN(this,e)},eUe.Ib=function(){return edb(this)},eUe.C=null,eUe.D=null,eUe.G=-1,Y5(eZ2,"EClassifierImpl",351),eTS(88,351,{105:1,92:1,90:1,26:1,138:1,147:1,191:1,56:1,108:1,49:1,97:1,88:1,351:1,150:1,473:1,114:1,115:1,676:1},c0),eUe.uk=function(e){return C7(this,e.Tg())},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return null!=this.D?this.D:this.B;case 3:return em4(this);case 4:return null;case 5:return this.F;case 6:if(t)return etP(this);return z5(this);case 7:return this.A||(this.A=new OS(tgs,this,7)),this.A;case 8:return OQ(),(256&this.Bb)!=0;case 9:return OQ(),(512&this.Bb)!=0;case 10:return $E(this);case 11:return this.q||(this.q=new FQ(tgi,this,11,10)),this.q;case 12:return ePk(this);case 13:return ePl(this);case 14:return ePl(this),this.r;case 15:return ePk(this),this.k;case 16:return eSD(this);case 17:return eNQ(this);case 18:return eNT(this);case 19:return eOg(this);case 20:return ePk(this),this.o;case 21:return this.s||(this.s=new FQ(tm6,this,21,17)),this.s;case 22:return qt(this);case 23:return eCt(this)}return Qt(this,e-Y1((eBK(),tg_)),ee2((r=Pp(eaS(this,16),26))||tg_,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 6:return this.Cb&&(n=(i=this.Db>>16)>=0?egn(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,6,n);case 11:return this.q||(this.q=new FQ(tgi,this,11,10)),edF(this.q,e,n);case 21:return this.s||(this.s=new FQ(tm6,this,21,17)),edF(this.s,e,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tg_),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tg_)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 6:return eDg(this,null,6,n);case 7:return this.A||(this.A=new OS(tgs,this,7)),ep6(this.A,e,n);case 11:return this.q||(this.q=new FQ(tgi,this,11,10)),ep6(this.q,e,n);case 21:return this.s||(this.s=new FQ(tm6,this,21,17)),ep6(this.s,e,n);case 22:return ep6(qt(this),e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tg_),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tg_)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return null!=this.D&&this.D==this.F;case 3:return!!em4(this);case 4:return!1;case 5:return null!=this.F&&this.F!=this.D&&this.F!=this.B;case 6:return!!z5(this);case 7:return!!this.A&&0!=this.A.i;case 8:return(256&this.Bb)!=0;case 9:return(512&this.Bb)!=0;case 10:return!!this.u&&0!=qt(this.u.a).i&&!(this.n&&ebV(this.n));case 11:return!!this.q&&0!=this.q.i;case 12:return 0!=ePk(this).i;case 13:return 0!=ePl(this).i;case 14:return ePl(this),0!=this.r.i;case 15:return ePk(this),0!=this.k.i;case 16:return 0!=eSD(this).i;case 17:return 0!=eNQ(this).i;case 18:return 0!=eNT(this).i;case 19:return 0!=eOg(this).i;case 20:return ePk(this),!!this.o;case 21:return!!this.s&&0!=this.s.i;case 22:return!!this.n&&ebV(this.n);case 23:return 0!=eCt(this).i}return VP(this,e-Y1((eBK(),tg_)),ee2((t=Pp(eaS(this,16),26))||tg_,e))},eUe.oh=function(e){var t;return(t=null==this.i||this.q&&0!=this.q.i?null:eAh(this,e))||eF9(this,e)},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GN(this,Lq(t));return;case 2:TF(this,Lq(t));return;case 5:eji(this,Lq(t));return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A),this.A||(this.A=new OS(tgs,this,7)),Y4(this.A,Pp(t,14));return;case 8:ela(this,gN(LK(t)));return;case 9:elu(this,gN(LK(t)));return;case 10:eRP($E(this)),Y4($E(this),Pp(t,14));return;case 11:this.q||(this.q=new FQ(tgi,this,11,10)),eRT(this.q),this.q||(this.q=new FQ(tgi,this,11,10)),Y4(this.q,Pp(t,14));return;case 21:this.s||(this.s=new FQ(tm6,this,21,17)),eRT(this.s),this.s||(this.s=new FQ(tm6,this,21,17)),Y4(this.s,Pp(t,14));return;case 22:eRT(qt(this)),Y4(qt(this),Pp(t,14));return}efL(this,e-Y1((eBK(),tg_)),ee2((n=Pp(eaS(this,16),26))||tg_,e),t)},eUe.zh=function(){return eBK(),tg_},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,179)&&(Pp(this.Cb,179).tb=null),er3(this,null);return;case 2:euc(this,null),enp(this,this.D);return;case 5:eji(this,null);return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A);return;case 8:ela(this,!1);return;case 9:elu(this,!1);return;case 10:this.u&&eRP(this.u);return;case 11:this.q||(this.q=new FQ(tgi,this,11,10)),eRT(this.q);return;case 21:this.s||(this.s=new FQ(tm6,this,21,17)),eRT(this.s);return;case 22:this.n&&eRT(this.n);return}ec6(this,e-Y1((eBK(),tg_)),ee2((t=Pp(eaS(this,16),26))||tg_,e))},eUe.Gh=function(){var e,t;if(ePk(this),ePl(this),eSD(this),eNQ(this),eNT(this),eOg(this),eCt(this),ZG(Pw(Zd(this))),this.s)for(e=0,t=this.s.i;e=0;--t)etj(this,t);return edj(this,e)},eUe.Xj=function(){eRT(this)},eUe.oi=function(e,t){return env(this,e,t)},Y5(eJz,"EcoreEList",622),eTS(496,622,eJ9,PK),eUe.ai=function(){return!1},eUe.aj=function(){return this.c},eUe.bj=function(){return!1},eUe.Fk=function(){return!0},eUe.hi=function(){return!0},eUe.li=function(e,t){return t},eUe.ni=function(){return!1},eUe.c=0,Y5(eJz,"EObjectEList",496),eTS(85,496,eJ9,O_),eUe.bj=function(){return!0},eUe.Dk=function(){return!1},eUe.rk=function(){return!0},Y5(eJz,"EObjectContainmentEList",85),eTS(545,85,eJ9,OE),eUe.ci=function(){this.b=!0},eUe.fj=function(){return this.b},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.b,this.b=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.b=!1},eUe.b=!1,Y5(eJz,"EObjectContainmentEList/Unsettable",545),eTS(1140,545,eJ9,j4),eUe.ii=function(e,t){var n,r;return n=Pp(elR(this,e,t),87),TO(this.e)&&bz(this,new JU(this.a,7,(eBK(),tgS),ell(t),M4(r=n.c,88)?Pp(r,26):tgI,e)),n},eUe.jj=function(e,t){return edB(this,Pp(e,87),t)},eUe.kj=function(e,t){return edY(this,Pp(e,87),t)},eUe.lj=function(e,t,n){return eyl(this,Pp(e,87),Pp(t,87),n)},eUe.Zi=function(e,t,n,r,i){switch(e){case 3:return Gt(this,e,t,n,r,this.i>1);case 5:return Gt(this,e,t,n,r,this.i-Pp(n,15).gc()>0);default:return new Q$(this.e,e,this.c,t,n,r,!0)}},eUe.ij=function(){return!0},eUe.fj=function(){return ebV(this)},eUe.Xj=function(){eRT(this)},Y5(eZ2,"EClassImpl/1",1140),eTS(1154,1153,eJS),eUe.ui=function(e){var t,n,r,i,a,o,s;if(8!=(n=e.xi())){if(0==(r=epM(e)))switch(n){case 1:case 9:null!=(s=e.Bi())&&((t=Zd(Pp(s,473))).c||(t.c=new sk),eeu(t.c,e.Ai())),null!=(o=e.zi())&&(1&(i=Pp(o,473)).Bb)==0&&((t=Zd(i)).c||(t.c=new sk),JL(t.c,Pp(e.Ai(),26)));break;case 3:null!=(o=e.zi())&&(1&(i=Pp(o,473)).Bb)==0&&((t=Zd(i)).c||(t.c=new sk),JL(t.c,Pp(e.Ai(),26)));break;case 5:if(null!=(o=e.zi()))for(a=Pp(o,14).Kc();a.Ob();)(1&(i=Pp(a.Pb(),473)).Bb)==0&&((t=Zd(i)).c||(t.c=new sk),JL(t.c,Pp(e.Ai(),26)));break;case 4:null!=(s=e.Bi())&&(1&(i=Pp(s,473)).Bb)==0&&((t=Zd(i)).c||(t.c=new sk),eeu(t.c,e.Ai()));break;case 6:if(null!=(s=e.Bi()))for(a=Pp(s,14).Kc();a.Ob();)(1&(i=Pp(a.Pb(),473)).Bb)==0&&((t=Zd(i)).c||(t.c=new sk),eeu(t.c,e.Ai()))}this.Hk(r)}},eUe.Hk=function(e){eCO(this,e)},eUe.b=63,Y5(eZ2,"ESuperAdapter",1154),eTS(1155,1154,eJS,pR),eUe.Hk=function(e){eko(this,e)},Y5(eZ2,"EClassImpl/10",1155),eTS(1144,696,eJ9),eUe.Vh=function(e,t){return ew2(this,e,t)},eUe.Wh=function(e){return emp(this,e)},eUe.Xh=function(e,t){ecW(this,e,t)},eUe.Yh=function(e){Zz(this,e)},eUe.pi=function(e){return J5(this,e)},eUe.mi=function(e,t){return ees(this,e,t)},eUe.lk=function(e,t){throw p7(new bO)},eUe.Zh=function(){return new AY(this)},eUe.$h=function(){return new AB(this)},eUe._h=function(e){return enH(this,e)},eUe.mk=function(e,t){throw p7(new bO)},eUe.Wj=function(e){return this},eUe.fj=function(){return 0!=this.i},eUe.Wb=function(e){throw p7(new bO)},eUe.Xj=function(){throw p7(new bO)},Y5(eJz,"EcoreEList/UnmodifiableEList",1144),eTS(319,1144,eJ9,xQ),eUe.ni=function(){return!1},Y5(eJz,"EcoreEList/UnmodifiableEList/FastCompare",319),eTS(1147,319,eJ9,eo8),eUe.Xc=function(e){var t,n,r;if(M4(e,170)&&-1!=(n=(t=Pp(e,170)).aj())){for(r=this.i;n4){if(!this.wj(e))return!1;if(this.rk()){if(s=(n=(r=Pp(e,49)).Ug())==this.b&&(this.Dk()?r.Og(r.Vg(),Pp(ee2($S(this.b),this.aj()).Yj(),26).Bj())==ebY(Pp(ee2($S(this.b),this.aj()),18)).n:-1-r.Vg()==this.aj()),this.Ek()&&!s&&!n&&r.Zg()){for(i=0;i1||-1==r)},eUe.Dk=function(){var e,t,n;return t=ee2($S(this.b),this.aj()),!!M4(t,99)&&!!(n=ebY(e=Pp(t,18)))},eUe.Ek=function(){var e,t;return t=ee2($S(this.b),this.aj()),!!M4(t,99)&&((e=Pp(t,18)).Bb&eH3)!=0},eUe.Xc=function(e){var t,n,r,i;if((r=this.Qi(e))>=0)return r;if(this.Fk()){for(n=0,i=this.Vi();n=0;--e)ejc(this,e,this.Oi(e));return this.Wi()},eUe.Qc=function(e){var t;if(this.Ek())for(t=this.Vi()-1;t>=0;--t)ejc(this,t,this.Oi(t));return this.Xi(e)},eUe.Xj=function(){eRP(this)},eUe.oi=function(e,t){return J6(this,e,t)},Y5(eJz,"DelegatingEcoreEList",742),eTS(1150,742,eQn,Cw),eUe.Hi=function(e,t){LP(this,e,Pp(t,26))},eUe.Ii=function(e){Mt(this,Pp(e,26))},eUe.Oi=function(e){var t,n;return n=(t=Pp(etj(qt(this.a),e),87)).c,M4(n,88)?Pp(n,26):(eBK(),tgI)},eUe.Ti=function(e){var t,n;return n=(t=Pp(eLN(qt(this.a),e),87)).c,M4(n,88)?Pp(n,26):(eBK(),tgI)},eUe.Ui=function(e,t){return emm(this,e,Pp(t,26))},eUe.ai=function(){return!1},eUe.Zi=function(e,t,n,r,i){return null},eUe.Ji=function(){return new pF(this)},eUe.Ki=function(){eRT(qt(this.a))},eUe.Li=function(e){return ec7(this,e)},eUe.Mi=function(e){var t,n;for(n=e.Kc();n.Ob();)if(!ec7(this,t=n.Pb()))return!1;return!0},eUe.Ni=function(e){var t,n,r;if(M4(e,15)&&(r=Pp(e,15)).gc()==qt(this.a).i){for(t=r.Kc(),n=new Ow(this);t.Ob();)if(xc(t.Pb())!==xc(epH(n)))return!1;return!0}return!1},eUe.Pi=function(){var e,t,n,r,i;for(n=1,t=new Ow(qt(this.a));t.e!=t.i.gc();)e=Pp(epH(t),87),r=M4(i=e.c,88)?Pp(i,26):(eBK(),tgI),n=31*n+(r?Ao(r):0);return n},eUe.Qi=function(e){var t,n,r,i;for(r=0,n=new Ow(qt(this.a));n.e!=n.i.gc();){if(t=Pp(epH(n),87),xc(e)===xc(M4(i=t.c,88)?Pp(i,26):(eBK(),tgI)))return r;++r}return -1},eUe.Ri=function(){return 0==qt(this.a).i},eUe.Si=function(){return null},eUe.Vi=function(){return qt(this.a).i},eUe.Wi=function(){var e,t,n,r,i,a;for(a=qt(this.a).i,i=Je(e1R,eUp,1,a,5,1),n=0,t=new Ow(qt(this.a));t.e!=t.i.gc();)e=Pp(epH(t),87),i[n++]=M4(r=e.c,88)?Pp(r,26):(eBK(),tgI);return i},eUe.Xi=function(e){var t,n,r,i,a,o,s;for(s=qt(this.a).i,e.lengths&&Bc(e,s,null),r=0,n=new Ow(qt(this.a));n.e!=n.i.gc();)t=Pp(epH(n),87),a=M4(o=t.c,88)?Pp(o,26):(eBK(),tgI),Bc(e,r++,a);return e},eUe.Yi=function(){var e,t,n,r,i;for(i=new vs,i.a+="[",e=qt(this.a),t=0,r=qt(this.a).i;t>16)>=0?egn(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,6,n);case 9:return this.a||(this.a=new FQ(tgn,this,9,5)),edF(this.a,e,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgx),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgx)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 6:return eDg(this,null,6,n);case 7:return this.A||(this.A=new OS(tgs,this,7)),ep6(this.A,e,n);case 9:return this.a||(this.a=new FQ(tgn,this,9,5)),ep6(this.a,e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgx),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgx)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return null!=this.D&&this.D==this.F;case 3:return!!em4(this);case 4:return!!euS(this);case 5:return null!=this.F&&this.F!=this.D&&this.F!=this.B;case 6:return!!z5(this);case 7:return!!this.A&&0!=this.A.i;case 8:return(256&this.Bb)==0;case 9:return!!this.a&&0!=this.a.i}return VP(this,e-Y1((eBK(),tgx)),ee2((t=Pp(eaS(this,16),26))||tgx,e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GN(this,Lq(t));return;case 2:TF(this,Lq(t));return;case 5:eji(this,Lq(t));return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A),this.A||(this.A=new OS(tgs,this,7)),Y4(this.A,Pp(t,14));return;case 8:elo(this,gN(LK(t)));return;case 9:this.a||(this.a=new FQ(tgn,this,9,5)),eRT(this.a),this.a||(this.a=new FQ(tgn,this,9,5)),Y4(this.a,Pp(t,14));return}efL(this,e-Y1((eBK(),tgx)),ee2((n=Pp(eaS(this,16),26))||tgx,e),t)},eUe.zh=function(){return eBK(),tgx},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,179)&&(Pp(this.Cb,179).tb=null),er3(this,null);return;case 2:euc(this,null),enp(this,this.D);return;case 5:eji(this,null);return;case 7:this.A||(this.A=new OS(tgs,this,7)),eRT(this.A);return;case 8:elo(this,!0);return;case 9:this.a||(this.a=new FQ(tgn,this,9,5)),eRT(this.a);return}ec6(this,e-Y1((eBK(),tgx)),ee2((t=Pp(eaS(this,16),26))||tgx,e))},eUe.Gh=function(){var e,t;if(this.a)for(e=0,t=this.a.i;e>16==5?Pp(this.Cb,671):null}return Qt(this,e-Y1((eBK(),tgT)),ee2((r=Pp(eaS(this,16),26))||tgT,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 5:return this.Cb&&(n=(i=this.Db>>16)>=0?eg3(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,5,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgT),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgT)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 5:return eDg(this,null,5,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgT),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgT)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return 0!=this.d;case 3:return!!this.b;case 4:return null!=this.c;case 5:return!!(this.Db>>16==5?Pp(this.Cb,671):null)}return VP(this,e-Y1((eBK(),tgT)),ee2((t=Pp(eaS(this,16),26))||tgT,e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:er3(this,Lq(t));return;case 2:enf(this,Pp(t,19).a);return;case 3:exP(this,Pp(t,1940));return;case 4:erc(this,Lq(t));return}efL(this,e-Y1((eBK(),tgT)),ee2((n=Pp(eaS(this,16),26))||tgT,e),t)},eUe.zh=function(){return eBK(),tgT},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:er3(this,null);return;case 2:enf(this,0);return;case 3:exP(this,null);return;case 4:erc(this,null);return}ec6(this,e-Y1((eBK(),tgT)),ee2((t=Pp(eaS(this,16),26))||tgT,e))},eUe.Ib=function(){var e;return null==(e=this.c)?this.zb:e},eUe.b=null,eUe.c=null,eUe.d=0,Y5(eZ2,"EEnumLiteralImpl",573);var tgl=RL(eZ2,"EFactoryImpl/InternalEDateTimeFormat");eTS(489,1,{2015:1},pY),Y5(eZ2,"EFactoryImpl/1ClientInternalEDateTimeFormat",489),eTS(241,115,{105:1,92:1,90:1,87:1,56:1,108:1,49:1,97:1,241:1,114:1,115:1},p5),eUe.Sg=function(e,t,n){var r;return n=eDg(this,e,t,n),this.e&&M4(e,170)&&(r=eOl(this,this.e))!=this.c&&(n=eFr(this,r,n)),n},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.f;case 1:return this.d||(this.d=new O_(tgr,this,1)),this.d;case 2:if(t)return eD5(this);return this.c;case 3:return this.b;case 4:return this.e;case 5:if(t)return eb1(this);return this.a}return Qt(this,e-Y1((eBK(),tgO)),ee2((r=Pp(eaS(this,16),26))||tgO,e),t,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return ecg(this,null,n);case 1:return this.d||(this.d=new O_(tgr,this,1)),ep6(this.d,e,n);case 3:return ecm(this,null,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgO),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgO)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.f;case 1:return!!this.d&&0!=this.d.i;case 2:return!!this.c;case 3:return!!this.b;case 4:return!!this.e;case 5:return!!this.a}return VP(this,e-Y1((eBK(),tgO)),ee2((t=Pp(eaS(this,16),26))||tgO,e))},eUe.sh=function(e,t){var n;switch(e){case 0:eyK(this,Pp(t,87));return;case 1:this.d||(this.d=new O_(tgr,this,1)),eRT(this.d),this.d||(this.d=new O_(tgr,this,1)),Y4(this.d,Pp(t,14));return;case 3:eyW(this,Pp(t,87));return;case 4:e_U(this,Pp(t,836));return;case 5:etV(this,Pp(t,138));return}efL(this,e-Y1((eBK(),tgO)),ee2((n=Pp(eaS(this,16),26))||tgO,e),t)},eUe.zh=function(){return eBK(),tgO},eUe.Bh=function(e){var t;switch(e){case 0:eyK(this,null);return;case 1:this.d||(this.d=new O_(tgr,this,1)),eRT(this.d);return;case 3:eyW(this,null);return;case 4:e_U(this,null);return;case 5:etV(this,null);return}ec6(this,e-Y1((eBK(),tgO)),ee2((t=Pp(eaS(this,16),26))||tgO,e))},eUe.Ib=function(){var e;return e=new O0(eMT(this)),e.a+=" (expression: ",ePB(this,e),e.a+=")",e.a},Y5(eZ2,"EGenericTypeImpl",241),eTS(1969,1964,eQr),eUe.Xh=function(e,t){Ch(this,e,t)},eUe.lk=function(e,t){return Ch(this,this.gc(),e),t},eUe.pi=function(e){return ep3(this.Gi(),e)},eUe.Zh=function(){return this.$h()},eUe.Gi=function(){return new pV(this)},eUe.$h=function(){return this._h(0)},eUe._h=function(e){return this.Gi().Zc(e)},eUe.mk=function(e,t){return eds(this,e,!0),t},eUe.ii=function(e,t){var n,r;return r=egW(this,t),(n=this.Zc(e)).Rb(r),r},eUe.ji=function(e,t){var n;eds(this,t,!0),(n=this.Zc(e)).Rb(t)},Y5(eJz,"AbstractSequentialInternalEList",1969),eTS(486,1969,eQr,AA),eUe.pi=function(e){return ep3(this.Gi(),e)},eUe.Zh=function(){return null==this.b?(_2(),_2(),tgq):this.Jk()},eUe.Gi=function(){return new x0(this.a,this.b)},eUe.$h=function(){return null==this.b?(_2(),_2(),tgq):this.Jk()},eUe._h=function(e){var t,n;if(null==this.b){if(e<0||e>1)throw p7(new gE(eJT+e+", size=0"));return _2(),_2(),tgq}for(t=0,n=this.Jk();t0;)if(t=this.c[--this.d],(!this.e||t.Gj()!=e6d||0!=t.aj())&&(!this.Mk()||this.b.mh(t))){if(a=this.b.bh(t,this.Lk()),this.f=(_4(),Pp(t,66).Oj()),this.f||t.$j()){if(this.Lk()?(r=Pp(a,15),this.k=r):(r=Pp(a,69),this.k=this.j=r),M4(this.k,54)?(this.o=this.k.gc(),this.n=this.o):this.p=this.j?this.j._h(this.k.gc()):this.k.Zc(this.k.gc()),this.p?eSs(this,this.p):eSQ(this))return i=this.p?this.p.Ub():this.j?this.j.pi(--this.n):this.k.Xb(--this.n),this.f?((e=Pp(i,72)).ak(),n=e.dd(),this.i=n):(n=i,this.i=n),this.g=-3,!0}else if(null!=a)return this.k=null,this.p=null,n=a,this.i=n,this.g=-2,!0}return this.k=null,this.p=null,this.g=-1,!1}},eUe.Pb=function(){return eaO(this)},eUe.Tb=function(){return this.a},eUe.Ub=function(){var e;if(this.g<-1||this.Sb())return--this.a,this.g=0,e=this.i,this.Sb(),e;throw p7(new bC)},eUe.Vb=function(){return this.a-1},eUe.Qb=function(){throw p7(new bO)},eUe.Lk=function(){return!1},eUe.Wb=function(e){throw p7(new bO)},eUe.Mk=function(){return!0},eUe.a=0,eUe.d=0,eUe.f=!1,eUe.g=0,eUe.n=0,eUe.o=0,Y5(eJz,"EContentsEList/FeatureIteratorImpl",279),eTS(697,279,eQi,Lv),eUe.Lk=function(){return!0},Y5(eJz,"EContentsEList/ResolvingFeatureIteratorImpl",697),eTS(1157,697,eQi,Lw),eUe.Mk=function(){return!1},Y5(eZ2,"ENamedElementImpl/1/1",1157),eTS(1158,279,eQi,Ly),eUe.Mk=function(){return!1},Y5(eZ2,"ENamedElementImpl/1/2",1158),eTS(36,143,eJx,qo,qs,FX,JB,Q$,ZB,en_,WX,enE,WJ,Zj,WQ,enx,W1,ZF,W0,enS,W2,FJ,JU,H0,enk,W3,ZY,W4),eUe._i=function(){return JA(this)},eUe.gj=function(){var e;return(e=JA(this))?e.zj():null},eUe.yi=function(e){return -1==this.b&&this.a&&(this.b=this.c.Xg(this.a.aj(),this.a.Gj())),this.c.Og(this.b,e)},eUe.Ai=function(){return this.c},eUe.hj=function(){var e;return!!(e=JA(this))&&e.Kj()},eUe.b=-1,Y5(eZ2,"ENotificationImpl",36),eTS(399,284,{105:1,92:1,90:1,147:1,191:1,56:1,59:1,108:1,472:1,49:1,97:1,150:1,399:1,284:1,114:1,115:1},mD),eUe.Qg=function(e){return evu(this,e)},eUe._g=function(e,t,n){var r,i,a;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),(a=this.t)>1||-1==a;case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q;case 10:return this.Db>>16==10?Pp(this.Cb,26):null;case 11:return this.d||(this.d=new OS(tgs,this,11)),this.d;case 12:return this.c||(this.c=new FQ(tga,this,12,10)),this.c;case 13:return this.a||(this.a=new C_(this,this)),this.a;case 14:return QX(this)}return Qt(this,e-Y1((eBK(),tgD)),ee2((r=Pp(eaS(this,16),26))||tgD,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 10:return this.Cb&&(n=(i=this.Db>>16)>=0?evu(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,10,n);case 12:return this.c||(this.c=new FQ(tga,this,12,10)),edF(this.c,e,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgD),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgD)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 9:return Y3(this,n);case 10:return eDg(this,null,10,n);case 11:return this.d||(this.d=new OS(tgs,this,11)),ep6(this.d,e,n);case 12:return this.c||(this.c=new FQ(tga,this,12,10)),ep6(this.c,e,n);case 14:return ep6(QX(this),e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgD),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgD)),e,n)},eUe.lh=function(e){var t,n,r;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return(r=this.t)>1||-1==r;case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i);case 10:return!!(this.Db>>16==10?Pp(this.Cb,26):null);case 11:return!!this.d&&0!=this.d.i;case 12:return!!this.c&&0!=this.c.i;case 13:return!!this.a&&0!=QX(this.a.a).i&&!(this.b&&ebq(this.b));case 14:return!!this.b&&ebq(this.b)}return VP(this,e-Y1((eBK(),tgD)),ee2((t=Pp(eaS(this,16),26))||tgD,e))},eUe.sh=function(e,t){var n,r;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:er3(this,Lq(t));return;case 2:eli(this,gN(LK(t)));return;case 3:els(this,gN(LK(t)));return;case 4:end(this,Pp(t,19).a);return;case 5:enh(this,Pp(t,19).a);return;case 8:eu2(this,Pp(t,138));return;case 9:(r=ew3(this,Pp(t,87),null))&&r.Fi();return;case 11:this.d||(this.d=new OS(tgs,this,11)),eRT(this.d),this.d||(this.d=new OS(tgs,this,11)),Y4(this.d,Pp(t,14));return;case 12:this.c||(this.c=new FQ(tga,this,12,10)),eRT(this.c),this.c||(this.c=new FQ(tga,this,12,10)),Y4(this.c,Pp(t,14));return;case 13:this.a||(this.a=new C_(this,this)),eRP(this.a),this.a||(this.a=new C_(this,this)),Y4(this.a,Pp(t,14));return;case 14:eRT(QX(this)),Y4(QX(this),Pp(t,14));return}efL(this,e-Y1((eBK(),tgD)),ee2((n=Pp(eaS(this,16),26))||tgD,e),t)},eUe.zh=function(){return eBK(),tgD},eUe.Bh=function(e){var t,n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:er3(this,null);return;case 2:eli(this,!0);return;case 3:els(this,!0);return;case 4:end(this,0);return;case 5:enh(this,1);return;case 8:eu2(this,null);return;case 9:(n=ew3(this,null,null))&&n.Fi();return;case 11:this.d||(this.d=new OS(tgs,this,11)),eRT(this.d);return;case 12:this.c||(this.c=new FQ(tga,this,12,10)),eRT(this.c);return;case 13:this.a&&eRP(this.a);return;case 14:this.b&&eRT(this.b);return}ec6(this,e-Y1((eBK(),tgD)),ee2((t=Pp(eaS(this,16),26))||tgD,e))},eUe.Gh=function(){var e,t;if(this.c)for(e=0,t=this.c.i;es&&Bc(e,s,null),r=0,n=new Ow(QX(this.a));n.e!=n.i.gc();)a=(o=(t=Pp(epH(n),87)).c)||(eBK(),tgA),Bc(e,r++,a);return e},eUe.Yi=function(){var e,t,n,r,i;for(i=new vs,i.a+="[",e=QX(this.a),t=0,r=QX(this.a).i;t1);case 5:return Gt(this,e,t,n,r,this.i-Pp(n,15).gc()>0);default:return new Q$(this.e,e,this.c,t,n,r,!0)}},eUe.ij=function(){return!0},eUe.fj=function(){return ebq(this)},eUe.Xj=function(){eRT(this)},Y5(eZ2,"EOperationImpl/2",1341),eTS(498,1,{1938:1,498:1},k5),Y5(eZ2,"EPackageImpl/1",498),eTS(16,85,eJ9,FQ),eUe.zk=function(){return this.d},eUe.Ak=function(){return this.b},eUe.Dk=function(){return!0},eUe.b=0,Y5(eJz,"EObjectContainmentWithInverseEList",16),eTS(353,16,eJ9,Ia),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectContainmentWithInverseEList/Resolving",353),eTS(298,353,eJ9,Fq),eUe.ci=function(){this.a.tb=null},Y5(eZ2,"EPackageImpl/2",298),eTS(1228,1,{},sh),Y5(eZ2,"EPackageImpl/3",1228),eTS(718,43,e$s,mP),eUe._b=function(e){return xd(e)?$r(this,e):!!$I(this.f,e)},Y5(eZ2,"EPackageRegistryImpl",718),eTS(509,284,{105:1,92:1,90:1,147:1,191:1,56:1,2017:1,108:1,472:1,49:1,97:1,150:1,509:1,284:1,114:1,115:1},mN),eUe.Qg=function(e){return evc(this,e)},eUe._g=function(e,t,n){var r,i,a;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),(a=this.t)>1||-1==a;case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q;case 10:return this.Db>>16==10?Pp(this.Cb,59):null}return Qt(this,e-Y1((eBK(),tgR)),ee2((r=Pp(eaS(this,16),26))||tgR,e),t,n)},eUe.hh=function(e,t,n){var r,i,a;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),edF(this.Ab,e,n);case 10:return this.Cb&&(n=(i=this.Db>>16)>=0?evc(this,n):this.Cb.ih(this,-1-i,null,n)),eDg(this,e,10,n)}return(a=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgR),t),66)).Nj().Qj(this,ehH(this),t-Y1((eBK(),tgR)),e,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 9:return Y3(this,n);case 10:return eDg(this,null,10,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgR),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgR)),e,n)},eUe.lh=function(e){var t,n,r;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return(r=this.t)>1||-1==r;case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i);case 10:return!!(this.Db>>16==10?Pp(this.Cb,59):null)}return VP(this,e-Y1((eBK(),tgR)),ee2((t=Pp(eaS(this,16),26))||tgR,e))},eUe.zh=function(){return eBK(),tgR},Y5(eZ2,"EParameterImpl",509),eTS(99,449,{105:1,92:1,90:1,147:1,191:1,56:1,18:1,170:1,66:1,108:1,472:1,49:1,97:1,150:1,99:1,449:1,284:1,114:1,115:1,677:1},LB),eUe._g=function(e,t,n){var r,i,a,o;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return OQ(),(256&this.Bb)!=0;case 3:return OQ(),(512&this.Bb)!=0;case 4:return ell(this.s);case 5:return ell(this.t);case 6:return OQ(),(o=this.t)>1||-1==o;case 7:return OQ(),(i=this.s)>=1;case 8:if(t)return evl(this);return this.r;case 9:return this.q;case 10:return OQ(),(this.Bb&eXt)!=0;case 11:return OQ(),(this.Bb&eJq)!=0;case 12:return OQ(),(this.Bb&eH0)!=0;case 13:return this.j;case 14:return eOI(this);case 15:return OQ(),(this.Bb&eJV)!=0;case 16:return OQ(),(this.Bb&eUR)!=0;case 17:return z6(this);case 18:return OQ(),(this.Bb&eZ1)!=0;case 19:return OQ(),!!(a=ebY(this))&&(a.Bb&eZ1)!=0;case 20:return OQ(),(this.Bb&eH3)!=0;case 21:if(t)return ebY(this);return this.b;case 22:if(t)return esd(this);return ZS(this);case 23:return this.a||(this.a=new OT(tm9,this,23)),this.a}return Qt(this,e-Y1((eBK(),tgj)),ee2((r=Pp(eaS(this,16),26))||tgj,e),t,n)},eUe.lh=function(e){var t,n,r,i;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return(256&this.Bb)==0;case 3:return(512&this.Bb)==0;case 4:return 0!=this.s;case 5:return 1!=this.t;case 6:return(i=this.t)>1||-1==i;case 7:return(n=this.s)>=1;case 8:return!!this.r&&!this.q.e&&0==BX(this.q).i;case 9:return!!this.q&&!(this.r&&!this.q.e&&0==BX(this.q).i);case 10:return(this.Bb&eXt)==0;case 11:return(this.Bb&eJq)!=0;case 12:return(this.Bb&eH0)!=0;case 13:return null!=this.j;case 14:return null!=eOI(this);case 15:return(this.Bb&eJV)!=0;case 16:return(this.Bb&eUR)!=0;case 17:return!!z6(this);case 18:return(this.Bb&eZ1)!=0;case 19:return!!(r=ebY(this))&&(r.Bb&eZ1)!=0;case 20:return(this.Bb&eH3)==0;case 21:return!!this.b;case 22:return!!ZS(this);case 23:return!!this.a&&0!=this.a.i}return VP(this,e-Y1((eBK(),tgj)),ee2((t=Pp(eaS(this,16),26))||tgj,e))},eUe.sh=function(e,t){var n,r;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:GD(this,Lq(t));return;case 2:eli(this,gN(LK(t)));return;case 3:els(this,gN(LK(t)));return;case 4:end(this,Pp(t,19).a);return;case 5:enh(this,Pp(t,19).a);return;case 8:eu2(this,Pp(t,138));return;case 9:(r=ew3(this,Pp(t,87),null))&&r.Fi();return;case 10:elF(this,gN(LK(t)));return;case 11:elU(this,gN(LK(t)));return;case 12:elY(this,gN(LK(t)));return;case 13:xi(this,Lq(t));return;case 15:elB(this,gN(LK(t)));return;case 16:elZ(this,gN(LK(t)));return;case 18:GI(this,gN(LK(t)));return;case 20:elQ(this,gN(LK(t)));return;case 21:erM(this,Pp(t,18));return;case 23:this.a||(this.a=new OT(tm9,this,23)),eRT(this.a),this.a||(this.a=new OT(tm9,this,23)),Y4(this.a,Pp(t,14));return}efL(this,e-Y1((eBK(),tgj)),ee2((n=Pp(eaS(this,16),26))||tgj,e),t)},eUe.zh=function(){return eBK(),tgj},eUe.Bh=function(e){var t,n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:M4(this.Cb,88)&&eko(Zd(Pp(this.Cb,88)),4),er3(this,null);return;case 2:eli(this,!0);return;case 3:els(this,!0);return;case 4:end(this,0);return;case 5:enh(this,1);return;case 8:eu2(this,null);return;case 9:(n=ew3(this,null,null))&&n.Fi();return;case 10:elF(this,!0);return;case 11:elU(this,!1);return;case 12:elY(this,!1);return;case 13:this.i=null,erA(this,null);return;case 15:elB(this,!1);return;case 16:elZ(this,!1);return;case 18:elJ(this,!1),M4(this.Cb,88)&&eko(Zd(Pp(this.Cb,88)),2);return;case 20:elQ(this,!0);return;case 21:erM(this,null);return;case 23:this.a||(this.a=new OT(tm9,this,23)),eRT(this.a);return}ec6(this,e-Y1((eBK(),tgj)),ee2((t=Pp(eaS(this,16),26))||tgj,e))},eUe.Gh=function(){esd(this),UH(QZ((eSp(),tvc),this)),evl(this),this.Bb|=1},eUe.Lj=function(){return ebY(this)},eUe.qk=function(){var e;return!!(e=ebY(this))&&(e.Bb&eZ1)!=0},eUe.rk=function(){return(this.Bb&eZ1)!=0},eUe.sk=function(){return(this.Bb&eH3)!=0},eUe.nk=function(e,t){return this.c=null,ecz(this,e,t)},eUe.Ib=function(){var e;return(64&this.Db)!=0?eCR(this):(e=new O1(eCR(this)),e.a+=" (containment: ",yG(e,(this.Bb&eZ1)!=0),e.a+=", resolveProxies: ",yG(e,(this.Bb&eH3)!=0),e.a+=")",e.a)},Y5(eZ2,"EReferenceImpl",99),eTS(548,115,{105:1,42:1,92:1,90:1,133:1,56:1,108:1,49:1,97:1,548:1,114:1,115:1},sp),eUe.Fb=function(e){return this===e},eUe.cd=function(){return this.b},eUe.dd=function(){return this.c},eUe.Hb=function(){return Ao(this)},eUe.Uh=function(e){RP(this,Lq(e))},eUe.ed=function(e){return P5(this,Lq(e))},eUe._g=function(e,t,n){var r;switch(e){case 0:return this.b;case 1:return this.c}return Qt(this,e-Y1((eBK(),tgF)),ee2((r=Pp(eaS(this,16),26))||tgF,e),t,n)},eUe.lh=function(e){var t;switch(e){case 0:return null!=this.b;case 1:return null!=this.c}return VP(this,e-Y1((eBK(),tgF)),ee2((t=Pp(eaS(this,16),26))||tgF,e))},eUe.sh=function(e,t){var n;switch(e){case 0:RR(this,Lq(t));return;case 1:ers(this,Lq(t));return}efL(this,e-Y1((eBK(),tgF)),ee2((n=Pp(eaS(this,16),26))||tgF,e),t)},eUe.zh=function(){return eBK(),tgF},eUe.Bh=function(e){var t;switch(e){case 0:ero(this,null);return;case 1:ers(this,null);return}ec6(this,e-Y1((eBK(),tgF)),ee2((t=Pp(eaS(this,16),26))||tgF,e))},eUe.Sh=function(){var e;return -1==this.a&&(e=this.b,this.a=null==e?0:ebA(e)),this.a},eUe.Th=function(e){this.a=e},eUe.Ib=function(){var e;return(64&this.Db)!=0?eMT(this):(e=new O1(eMT(this)),e.a+=" (key: ",xk(e,this.b),e.a+=", value: ",xk(e,this.c),e.a+=")",e.a)},eUe.a=-1,eUe.b=null,eUe.c=null;var tgf=Y5(eZ2,"EStringToStringMapEntryImpl",548),tgd=RL(eJz,"FeatureMap/Entry/Internal");eTS(565,1,eQa),eUe.Ok=function(e){return this.Pk(Pp(e,49))},eUe.Pk=function(e){return this.Ok(e)},eUe.Fb=function(e){var t,n;return this===e||!!M4(e,72)&&(t=Pp(e,72)).ak()==this.c&&(null==(n=this.dd())?null==t.dd():ecX(n,t.dd()))},eUe.ak=function(){return this.c},eUe.Hb=function(){var e;return e=this.dd(),esj(this.c)^(null==e?0:esj(e))},eUe.Ib=function(){var e,t;return t=etP((e=this.c).Hj()).Ph(),e.ne(),(null!=t&&0!=t.length?t+":"+e.ne():e.ne())+"="+this.dd()},Y5(eZ2,"EStructuralFeatureImpl/BasicFeatureMapEntry",565),eTS(776,565,eQa,Cg),eUe.Pk=function(e){return new Cg(this.c,e)},eUe.dd=function(){return this.a},eUe.Qk=function(e,t,n){return eiY(this,e,this.a,t,n)},eUe.Rk=function(e,t,n){return eiB(this,e,this.a,t,n)},Y5(eZ2,"EStructuralFeatureImpl/ContainmentUpdatingFeatureMapEntry",776),eTS(1314,1,{},k6),eUe.Pj=function(e,t,n,r,i){var a;return(a=Pp(JG(e,this.b),215)).nl(this.a).Wj(r)},eUe.Qj=function(e,t,n,r,i){var a;return(a=Pp(JG(e,this.b),215)).el(this.a,r,i)},eUe.Rj=function(e,t,n,r,i){var a;return(a=Pp(JG(e,this.b),215)).fl(this.a,r,i)},eUe.Sj=function(e,t,n){var r;return(r=Pp(JG(e,this.b),215)).nl(this.a).fj()},eUe.Tj=function(e,t,n,r){var i;(i=Pp(JG(e,this.b),215)).nl(this.a).Wb(r)},eUe.Uj=function(e,t,n){return Pp(JG(e,this.b),215).nl(this.a)},eUe.Vj=function(e,t,n){var r;(r=Pp(JG(e,this.b),215)).nl(this.a).Xj()},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateFeatureMapDelegator",1314),eTS(89,1,{},Pe,HS,$F,qc),eUe.Pj=function(e,t,n,r,i){var a;if(null==(a=t.Ch(n))&&t.Dh(n,a=eBN(this,e)),!i)switch(this.e){case 50:case 41:return Pp(a,589).sj();case 40:return Pp(a,215).kl()}return a},eUe.Qj=function(e,t,n,r,i){var a,o;return null==(o=t.Ch(n))&&t.Dh(n,o=eBN(this,e)),a=Pp(o,69).lk(r,i)},eUe.Rj=function(e,t,n,r,i){var a;return null!=(a=t.Ch(n))&&(i=Pp(a,69).mk(r,i)),i},eUe.Sj=function(e,t,n){var r;return null!=(r=t.Ch(n))&&Pp(r,76).fj()},eUe.Tj=function(e,t,n,r){var i;(i=Pp(t.Ch(n),76))||t.Dh(n,i=eBN(this,e)),i.Wb(r)},eUe.Uj=function(e,t,n){var r,i;return(null==(i=t.Ch(n))&&t.Dh(n,i=eBN(this,e)),M4(i,76))?Pp(i,76):(r=Pp(t.Ch(n),15),new pz(r))},eUe.Vj=function(e,t,n){var r;(r=Pp(t.Ch(n),76))||t.Dh(n,r=eBN(this,e)),r.Xj()},eUe.b=0,eUe.e=0,Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateMany",89),eTS(504,1,{}),eUe.Qj=function(e,t,n,r,i){throw p7(new bO)},eUe.Rj=function(e,t,n,r,i){throw p7(new bO)},eUe.Uj=function(e,t,n){return new Hk(this,e,t,n)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingle",504),eTS(1331,1,eJG,Hk),eUe.Wj=function(e){return this.a.Pj(this.c,this.d,this.b,e,!0)},eUe.fj=function(){return this.a.Sj(this.c,this.d,this.b)},eUe.Wb=function(e){this.a.Tj(this.c,this.d,this.b,e)},eUe.Xj=function(){this.a.Vj(this.c,this.d,this.b)},eUe.b=0,Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingle/1",1331),eTS(769,504,{},zl),eUe.Pj=function(e,t,n,r,i){return eIy(e,e.eh(),e.Vg())==this.b?this.sk()&&r?eTp(e):e.eh():null},eUe.Qj=function(e,t,n,r,i){var a,o;return e.eh()&&(i=(a=e.Vg())>=0?e.Qg(i):e.eh().ih(e,-1-a,null,i)),o=edv(e.Tg(),this.e),e.Sg(r,o,i)},eUe.Rj=function(e,t,n,r,i){var a;return a=edv(e.Tg(),this.e),e.Sg(null,a,i)},eUe.Sj=function(e,t,n){var r;return r=edv(e.Tg(),this.e),!!e.eh()&&e.Vg()==r},eUe.Tj=function(e,t,n,r){var i,a,o,s,u;if(null!=r&&!eNc(this.a,r))throw p7(new gA(eQo+(M4(r,56)?eyB(Pp(r,56).Tg()):ee6(esF(r)))+eQs+this.a+"'"));if(i=e.eh(),o=edv(e.Tg(),this.e),xc(r)!==xc(i)||e.Vg()!=o&&null!=r){if(eg7(e,Pp(r,56)))throw p7(new gL(eZ4+e.Ib()));u=null,i&&(u=(a=e.Vg())>=0?e.Qg(u):e.eh().ih(e,-1-a,null,u)),(s=Pp(r,49))&&(u=s.gh(e,edv(s.Tg(),this.b),null,u)),(u=e.Sg(s,o,u))&&u.Fi()}else e.Lg()&&e.Mg()&&eam(e,new FX(e,1,o,r,r))},eUe.Vj=function(e,t,n){var r,i,a,o;(r=e.eh())?(o=(i=e.Vg())>=0?e.Qg(null):e.eh().ih(e,-1-i,null,null),a=edv(e.Tg(),this.e),(o=e.Sg(null,a,o))&&o.Fi()):e.Lg()&&e.Mg()&&eam(e,new FJ(e,1,this.e,null,null))},eUe.sk=function(){return!1},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleContainer",769),eTS(1315,769,{},Pt),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleContainerResolving",1315),eTS(563,504,{}),eUe.Pj=function(e,t,n,r,i){var a;return null==(a=t.Ch(n))?this.b:xc(a)===xc(tgZ)?null:a},eUe.Sj=function(e,t,n){var r;return null!=(r=t.Ch(n))&&(xc(r)===xc(tgZ)||!ecX(r,this.b))},eUe.Tj=function(e,t,n,r){var i,a;e.Lg()&&e.Mg()?(i=null==(a=t.Ch(n))?this.b:xc(a)===xc(tgZ)?null:a,null==r?null!=this.c?(t.Dh(n,null),r=this.b):null!=this.b?t.Dh(n,tgZ):t.Dh(n,null):(this.Sk(r),t.Dh(n,r)),eam(e,this.d.Tk(e,1,this.e,i,r))):null==r?null!=this.c?t.Dh(n,null):null!=this.b?t.Dh(n,tgZ):t.Dh(n,null):(this.Sk(r),t.Dh(n,r))},eUe.Vj=function(e,t,n){var r,i;e.Lg()&&e.Mg()?(r=null==(i=t.Ch(n))?this.b:xc(i)===xc(tgZ)?null:i,t.Eh(n),eam(e,this.d.Tk(e,1,this.e,r,this.b))):t.Eh(n)},eUe.Sk=function(e){throw p7(new bk)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData",563),eTS(eQu,1,{},sb),eUe.Tk=function(e,t,n,r,i){return new FJ(e,t,n,r,i)},eUe.Uk=function(e,t,n,r,i,a){return new H0(e,t,n,r,i,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator",eQu),eTS(1332,eQu,{},sm),eUe.Tk=function(e,t,n,r,i){return new ZY(e,t,n,gN(LK(r)),gN(LK(i)))},eUe.Uk=function(e,t,n,r,i,a){return new W4(e,t,n,gN(LK(r)),gN(LK(i)),a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/1",1332),eTS(1333,eQu,{},sg),eUe.Tk=function(e,t,n,r,i){return new en_(e,t,n,Pp(r,217).a,Pp(i,217).a)},eUe.Uk=function(e,t,n,r,i,a){return new WX(e,t,n,Pp(r,217).a,Pp(i,217).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/2",1333),eTS(1334,eQu,{},sv),eUe.Tk=function(e,t,n,r,i){return new enE(e,t,n,Pp(r,172).a,Pp(i,172).a)},eUe.Uk=function(e,t,n,r,i,a){return new WJ(e,t,n,Pp(r,172).a,Pp(i,172).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/3",1334),eTS(1335,eQu,{},sy),eUe.Tk=function(e,t,n,r,i){return new Zj(e,t,n,gP(LV(r)),gP(LV(i)))},eUe.Uk=function(e,t,n,r,i,a){return new WQ(e,t,n,gP(LV(r)),gP(LV(i)),a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/4",1335),eTS(1336,eQu,{},sw),eUe.Tk=function(e,t,n,r,i){return new enx(e,t,n,Pp(r,155).a,Pp(i,155).a)},eUe.Uk=function(e,t,n,r,i,a){return new W1(e,t,n,Pp(r,155).a,Pp(i,155).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/5",1336),eTS(1337,eQu,{},s_),eUe.Tk=function(e,t,n,r,i){return new ZF(e,t,n,Pp(r,19).a,Pp(i,19).a)},eUe.Uk=function(e,t,n,r,i,a){return new W0(e,t,n,Pp(r,19).a,Pp(i,19).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/6",1337),eTS(1338,eQu,{},sE),eUe.Tk=function(e,t,n,r,i){return new enS(e,t,n,Pp(r,162).a,Pp(i,162).a)},eUe.Uk=function(e,t,n,r,i,a){return new W2(e,t,n,Pp(r,162).a,Pp(i,162).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/7",1338),eTS(1339,eQu,{},sS),eUe.Tk=function(e,t,n,r,i){return new enk(e,t,n,Pp(r,184).a,Pp(i,184).a)},eUe.Uk=function(e,t,n,r,i,a){return new W3(e,t,n,Pp(r,184).a,Pp(i,184).a,a)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleData/NotificationCreator/8",1339),eTS(1317,563,{},Hx),eUe.Sk=function(e){if(!this.a.wj(e))throw p7(new gA(eQo+esF(e)+eQs+this.a+"'"))},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleDataDynamic",1317),eTS(1318,563,{},j6),eUe.Sk=function(e){},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleDataStatic",1318),eTS(770,563,{}),eUe.Sj=function(e,t,n){var r;return null!=(r=t.Ch(n))},eUe.Tj=function(e,t,n,r){var i,a;e.Lg()&&e.Mg()?(i=!0,null==(a=t.Ch(n))?(i=!1,a=this.b):xc(a)===xc(tgZ)&&(a=null),null==r?null!=this.c?(t.Dh(n,null),r=this.b):t.Dh(n,tgZ):(this.Sk(r),t.Dh(n,r)),eam(e,this.d.Uk(e,1,this.e,a,r,!i))):null==r?null!=this.c?t.Dh(n,null):t.Dh(n,tgZ):(this.Sk(r),t.Dh(n,r))},eUe.Vj=function(e,t,n){var r,i;e.Lg()&&e.Mg()?(r=!0,null==(i=t.Ch(n))?(r=!1,i=this.b):xc(i)===xc(tgZ)&&(i=null),t.Eh(n),eam(e,this.d.Uk(e,2,this.e,i,this.b,r))):t.Eh(n)},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleDataUnsettable",770),eTS(1319,770,{},HT),eUe.Sk=function(e){if(!this.a.wj(e))throw p7(new gA(eQo+esF(e)+eQs+this.a+"'"))},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleDataUnsettableDynamic",1319),eTS(1320,770,{},j9),eUe.Sk=function(e){},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleDataUnsettableStatic",1320),eTS(398,504,{},jd),eUe.Pj=function(e,t,n,r,i){var a,o,s,u,c;if(c=t.Ch(n),this.Kj()&&xc(c)===xc(tgZ))return null;if(!this.sk()||!r||null==c)return c;if((s=Pp(c,49)).kh()&&(u=ecv(e,s),s!=u)){if(!eNc(this.a,u))throw p7(new gA(eQo+esF(u)+eQs+this.a+"'"));t.Dh(n,c=u),this.rk()&&(a=Pp(u,49),o=s.ih(e,this.b?edv(s.Tg(),this.b):-1-edv(e.Tg(),this.e),null,null),a.eh()||(o=a.gh(e,this.b?edv(a.Tg(),this.b):-1-edv(e.Tg(),this.e),null,o)),o&&o.Fi()),e.Lg()&&e.Mg()&&eam(e,new FJ(e,9,this.e,s,u))}return c},eUe.Qj=function(e,t,n,r,i){var a,o;return xc(o=t.Ch(n))===xc(tgZ)&&(o=null),t.Dh(n,r),this.bj()?xc(o)!==xc(r)&&null!=o&&(i=(a=Pp(o,49)).ih(e,edv(a.Tg(),this.b),null,i)):this.rk()&&null!=o&&(i=Pp(o,49).ih(e,-1-edv(e.Tg(),this.e),null,i)),e.Lg()&&e.Mg()&&(i||(i=new yf(4)),i.Ei(new FJ(e,1,this.e,o,r))),i},eUe.Rj=function(e,t,n,r,i){var a;return xc(a=t.Ch(n))===xc(tgZ)&&(a=null),t.Eh(n),e.Lg()&&e.Mg()&&(i||(i=new yf(4)),this.Kj()?i.Ei(new FJ(e,2,this.e,a,null)):i.Ei(new FJ(e,1,this.e,a,null))),i},eUe.Sj=function(e,t,n){var r;return null!=(r=t.Ch(n))},eUe.Tj=function(e,t,n,r){var i,a,o,s,u;if(null!=r&&!eNc(this.a,r))throw p7(new gA(eQo+(M4(r,56)?eyB(Pp(r,56).Tg()):ee6(esF(r)))+eQs+this.a+"'"));s=null!=(u=t.Ch(n)),this.Kj()&&xc(u)===xc(tgZ)&&(u=null),o=null,this.bj()?xc(u)!==xc(r)&&(null!=u&&(o=(i=Pp(u,49)).ih(e,edv(i.Tg(),this.b),null,o)),null!=r&&(o=(i=Pp(r,49)).gh(e,edv(i.Tg(),this.b),null,o))):this.rk()&&xc(u)!==xc(r)&&(null!=u&&(o=Pp(u,49).ih(e,-1-edv(e.Tg(),this.e),null,o)),null!=r&&(o=Pp(r,49).gh(e,-1-edv(e.Tg(),this.e),null,o))),null==r&&this.Kj()?t.Dh(n,tgZ):t.Dh(n,r),e.Lg()&&e.Mg()?(a=new H0(e,1,this.e,u,r,this.Kj()&&!s),o?(o.Ei(a),o.Fi()):eam(e,a)):o&&o.Fi()},eUe.Vj=function(e,t,n){var r,i,a,o,s;o=null!=(s=t.Ch(n)),this.Kj()&&xc(s)===xc(tgZ)&&(s=null),a=null,null!=s&&(this.bj()?a=(r=Pp(s,49)).ih(e,edv(r.Tg(),this.b),null,a):this.rk()&&(a=Pp(s,49).ih(e,-1-edv(e.Tg(),this.e),null,a))),t.Eh(n),e.Lg()&&e.Mg()?(i=new H0(e,this.Kj()?2:1,this.e,s,null,o),a?(a.Ei(i),a.Fi()):eam(e,i)):a&&a.Fi()},eUe.bj=function(){return!1},eUe.rk=function(){return!1},eUe.sk=function(){return!1},eUe.Kj=function(){return!1},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObject",398),eTS(564,398,{},LE),eUe.rk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainment",564),eTS(1323,564,{},LS),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentResolving",1323),eTS(772,564,{},Lk),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentUnsettable",772),eTS(1325,772,{},Lx),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentUnsettableResolving",1325),eTS(640,564,{},Pn),eUe.bj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentWithInverse",640),eTS(1324,640,{},Pa),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentWithInverseResolving",1324),eTS(773,640,{},Po),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentWithInverseUnsettable",773),eTS(1326,773,{},Ps),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectContainmentWithInverseUnsettableResolving",1326),eTS(641,398,{},LT),eUe.sk=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectResolving",641),eTS(1327,641,{},LM),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectResolvingUnsettable",1327),eTS(774,641,{},Pr),eUe.bj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectResolvingWithInverse",774),eTS(1328,774,{},Pu),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectResolvingWithInverseUnsettable",1328),eTS(1321,398,{},LO),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectUnsettable",1321),eTS(771,398,{},Pi),eUe.bj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectWithInverse",771),eTS(1322,771,{},Pc),eUe.Kj=function(){return!0},Y5(eZ2,"EStructuralFeatureImpl/InternalSettingDelegateSingleEObjectWithInverseUnsettable",1322),eTS(775,565,eQa,Bj),eUe.Pk=function(e){return new Bj(this.a,this.c,e)},eUe.dd=function(){return this.b},eUe.Qk=function(e,t,n){return Jt(this,e,this.b,n)},eUe.Rk=function(e,t,n){return Jn(this,e,this.b,n)},Y5(eZ2,"EStructuralFeatureImpl/InverseUpdatingFeatureMapEntry",775),eTS(1329,1,eJG,pz),eUe.Wj=function(e){return this.a},eUe.fj=function(){return M4(this.a,95)?Pp(this.a,95).fj():!this.a.dc()},eUe.Wb=function(e){this.a.$b(),this.a.Gc(Pp(e,15))},eUe.Xj=function(){M4(this.a,95)?Pp(this.a,95).Xj():this.a.$b()},Y5(eZ2,"EStructuralFeatureImpl/SettingMany",1329),eTS(1330,565,eQa,qf),eUe.Ok=function(e){return new Cv((eR7(),tvK),this.b.Ih(this.a,e))},eUe.dd=function(){return null},eUe.Qk=function(e,t,n){return n},eUe.Rk=function(e,t,n){return n},Y5(eZ2,"EStructuralFeatureImpl/SimpleContentFeatureMapEntry",1330),eTS(642,565,eQa,Cv),eUe.Ok=function(e){return new Cv(this.c,e)},eUe.dd=function(){return this.a},eUe.Qk=function(e,t,n){return n},eUe.Rk=function(e,t,n){return n},Y5(eZ2,"EStructuralFeatureImpl/SimpleFeatureMapEntry",642),eTS(391,497,eXz,sk),eUe.ri=function(e){return Je(tm7,eUp,26,e,0,1)},eUe.ni=function(){return!1},Y5(eZ2,"ESuperAdapter/1",391),eTS(444,438,{105:1,92:1,90:1,147:1,191:1,56:1,108:1,836:1,49:1,97:1,150:1,444:1,114:1,115:1},sx),eUe._g=function(e,t,n){var r;switch(e){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),this.Ab;case 1:return this.zb;case 2:return this.a||(this.a=new jh(this,tgr,this)),this.a}return Qt(this,e-Y1((eBK(),tgU)),ee2((r=Pp(eaS(this,16),26))||tgU,e),t,n)},eUe.jh=function(e,t,n){var r,i;switch(t){case 0:return this.Ab||(this.Ab=new FQ(tm4,this,0,3)),ep6(this.Ab,e,n);case 2:return this.a||(this.a=new jh(this,tgr,this)),ep6(this.a,e,n)}return(i=Pp(ee2((r=Pp(eaS(this,16),26))||(eBK(),tgU),t),66)).Nj().Rj(this,ehH(this),t-Y1((eBK(),tgU)),e,n)},eUe.lh=function(e){var t;switch(e){case 0:return!!this.Ab&&0!=this.Ab.i;case 1:return null!=this.zb;case 2:return!!this.a&&0!=this.a.i}return VP(this,e-Y1((eBK(),tgU)),ee2((t=Pp(eaS(this,16),26))||tgU,e))},eUe.sh=function(e,t){var n;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab),this.Ab||(this.Ab=new FQ(tm4,this,0,3)),Y4(this.Ab,Pp(t,14));return;case 1:er3(this,Lq(t));return;case 2:this.a||(this.a=new jh(this,tgr,this)),eRT(this.a),this.a||(this.a=new jh(this,tgr,this)),Y4(this.a,Pp(t,14));return}efL(this,e-Y1((eBK(),tgU)),ee2((n=Pp(eaS(this,16),26))||tgU,e),t)},eUe.zh=function(){return eBK(),tgU},eUe.Bh=function(e){var t;switch(e){case 0:this.Ab||(this.Ab=new FQ(tm4,this,0,3)),eRT(this.Ab);return;case 1:er3(this,null);return;case 2:this.a||(this.a=new jh(this,tgr,this)),eRT(this.a);return}ec6(this,e-Y1((eBK(),tgU)),ee2((t=Pp(eaS(this,16),26))||tgU,e))},Y5(eZ2,"ETypeParameterImpl",444),eTS(445,85,eJ9,jh),eUe.cj=function(e,t){return ewV(this,Pp(e,87),t)},eUe.dj=function(e,t){return ewq(this,Pp(e,87),t)},Y5(eZ2,"ETypeParameterImpl/1",445),eTS(634,43,e$s,mR),eUe.ec=function(){return new pG(this)},Y5(eZ2,"ETypeParameterImpl/2",634),eTS(556,eUT,eUM,pG),eUe.Fc=function(e){return Ie(this,Pp(e,87))},eUe.Gc=function(e){var t,n,r;for(r=!1,n=e.Kc();n.Ob();)t=Pp(n.Pb(),87),null==Um(this.a,t,"")&&(r=!0);return r},eUe.$b=function(){Yy(this.a)},eUe.Hc=function(e){return F9(this.a,e)},eUe.Kc=function(){var e;return e=new esz(new fS(this.a).a),new pW(e)},eUe.Mc=function(e){return Xp(this,e)},eUe.gc=function(){return wq(this.a)},Y5(eZ2,"ETypeParameterImpl/2/1",556),eTS(557,1,eUE,pW),eUe.Nb=function(e){F8(this,e)},eUe.Pb=function(){return Pp(etz(this.a).cd(),87)},eUe.Ob=function(){return this.a.b},eUe.Qb=function(){JM(this.a)},Y5(eZ2,"ETypeParameterImpl/2/1/1",557),eTS(1276,43,e$s,mj),eUe._b=function(e){return xd(e)?$r(this,e):!!$I(this.f,e)},eUe.xc=function(e){var t,n;return M4(t=xd(e)?zg(this,e):xu($I(this.f,e)),837)?(t=(n=Pp(t,837))._j(),Um(this,Pp(e,235),t),t):null!=t?t:null==e?(_3(),tvh):null},Y5(eZ2,"EValidatorRegistryImpl",1276),eTS(1313,704,{105:1,92:1,90:1,471:1,147:1,56:1,108:1,1941:1,49:1,97:1,150:1,114:1,115:1},sT),eUe.Ih=function(e,t){switch(e.yj()){case 21:case 22:case 23:case 24:case 26:case 31:case 32:case 37:case 38:case 39:case 40:case 43:case 44:case 48:case 49:case 20:return null==t?null:efF(t);case 25:return etR(t);case 27:return Qn(t);case 28:return Qr(t);case 29:return null==t?null:MU(tmS[0],Pp(t,199));case 41:return null==t?"":yx(Pp(t,290));case 42:return efF(t);case 50:return Lq(t);default:throw p7(new gL(eZ5+e.ne()+eZ6))}},eUe.Jh=function(e){var t;switch(-1==e.G&&(e.G=(t=etP(e))?ebv(t.Mh(),e):-1),e.G){case 0:return new mC;case 1:return new sa;case 2:return new c0;case 4:return new bN;case 5:return new mI;case 6:return new bD;case 7:return new cQ;case 10:return new sr;case 11:return new mD;case 12:return new $y;case 13:return new mN;case 14:return new LB;case 17:return new sp;case 18:return new p5;case 19:return new sx;default:throw p7(new gL(eZ7+e.zb+eZ6))}},eUe.Kh=function(e,t){switch(e.yj()){case 20:return null==t?null:new yY(t);case 21:return null==t?null:new TU(t);case 23:case 22:return null==t?null:ehL(t);case 26:case 24:return null==t?null:eeT(eDa(t,-128,127)<<24>>24);case 25:return eMp(t);case 27:return egg(t);case 28:return egv(t);case 29:return e__(t);case 32:case 31:return null==t?null:eEu(t);case 38:case 37:return null==t?null:new bK(t);case 40:case 39:return null==t?null:ell(eDa(t,eHt,eUu));case 41:case 42:return null;case 44:case 43:return null==t?null:ehQ(eF0(t));case 49:case 48:return null==t?null:elf(eDa(t,eQl,32767)<<16>>16);case 50:return t;default:throw p7(new gL(eZ5+e.ne()+eZ6))}},Y5(eZ2,"EcoreFactoryImpl",1313),eTS(547,179,{105:1,92:1,90:1,147:1,191:1,56:1,235:1,108:1,1939:1,49:1,97:1,150:1,179:1,547:1,114:1,115:1,675:1},Uh),eUe.gb=!1,eUe.hb=!1;var tgh,tgp,tgb,tgm,tgg,tgv,tgy,tgw,tg_,tgE,tgS,tgk,tgx,tgT,tgM,tgO,tgA,tgL,tgC,tgI,tgD,tgN,tgP,tgR,tgj,tgF,tgY,tgB,tgU,tgH,tg$,tgz,tgG,tgW,tgK,tgV,tgq,tgZ,tgX,tgJ,tgQ,tg1,tg0,tg2,tg3,tg4,tg5,tg6,tg9=!1;Y5(eZ2,"EcorePackageImpl",547),eTS(1184,1,{837:1},sM),eUe._j=function(){return OJ(),tvp},Y5(eZ2,"EcorePackageImpl/1",1184),eTS(1193,1,eQS,sO),eUe.wj=function(e){return M4(e,147)},eUe.xj=function(e){return Je(e6y,eUp,147,e,0,1)},Y5(eZ2,"EcorePackageImpl/10",1193),eTS(1194,1,eQS,sA),eUe.wj=function(e){return M4(e,191)},eUe.xj=function(e){return Je(e6_,eUp,191,e,0,1)},Y5(eZ2,"EcorePackageImpl/11",1194),eTS(1195,1,eQS,sL),eUe.wj=function(e){return M4(e,56)},eUe.xj=function(e){return Je(e6f,eUp,56,e,0,1)},Y5(eZ2,"EcorePackageImpl/12",1195),eTS(1196,1,eQS,sC),eUe.wj=function(e){return M4(e,399)},eUe.xj=function(e){return Je(tgi,eJ5,59,e,0,1)},Y5(eZ2,"EcorePackageImpl/13",1196),eTS(1197,1,eQS,sI),eUe.wj=function(e){return M4(e,235)},eUe.xj=function(e){return Je(e6E,eUp,235,e,0,1)},Y5(eZ2,"EcorePackageImpl/14",1197),eTS(1198,1,eQS,sD),eUe.wj=function(e){return M4(e,509)},eUe.xj=function(e){return Je(tga,eUp,2017,e,0,1)},Y5(eZ2,"EcorePackageImpl/15",1198),eTS(1199,1,eQS,sN),eUe.wj=function(e){return M4(e,99)},eUe.xj=function(e){return Je(tgo,eJ4,18,e,0,1)},Y5(eZ2,"EcorePackageImpl/16",1199),eTS(1200,1,eQS,sP),eUe.wj=function(e){return M4(e,170)},eUe.xj=function(e){return Je(tm6,eJ4,170,e,0,1)},Y5(eZ2,"EcorePackageImpl/17",1200),eTS(1201,1,eQS,sR),eUe.wj=function(e){return M4(e,472)},eUe.xj=function(e){return Je(tm5,eUp,472,e,0,1)},Y5(eZ2,"EcorePackageImpl/18",1201),eTS(1202,1,eQS,sj),eUe.wj=function(e){return M4(e,548)},eUe.xj=function(e){return Je(tgf,eJL,548,e,0,1)},Y5(eZ2,"EcorePackageImpl/19",1202),eTS(1185,1,eQS,sF),eUe.wj=function(e){return M4(e,322)},eUe.xj=function(e){return Je(tm9,eJ4,34,e,0,1)},Y5(eZ2,"EcorePackageImpl/2",1185),eTS(1203,1,eQS,sY),eUe.wj=function(e){return M4(e,241)},eUe.xj=function(e){return Je(tgr,eQt,87,e,0,1)},Y5(eZ2,"EcorePackageImpl/20",1203),eTS(1204,1,eQS,sB),eUe.wj=function(e){return M4(e,444)},eUe.xj=function(e){return Je(tgs,eUp,836,e,0,1)},Y5(eZ2,"EcorePackageImpl/21",1204),eTS(1205,1,eQS,sU),eUe.wj=function(e){return xl(e)},eUe.xj=function(e){return Je(e11,eUP,476,e,8,1)},Y5(eZ2,"EcorePackageImpl/22",1205),eTS(1206,1,eQS,sH),eUe.wj=function(e){return M4(e,190)},eUe.xj=function(e){return Je(tyk,eUP,190,e,0,2)},Y5(eZ2,"EcorePackageImpl/23",1206),eTS(1207,1,eQS,s$),eUe.wj=function(e){return M4(e,217)},eUe.xj=function(e){return Je(e10,eUP,217,e,0,1)},Y5(eZ2,"EcorePackageImpl/24",1207),eTS(1208,1,eQS,sz),eUe.wj=function(e){return M4(e,172)},eUe.xj=function(e){return Je(e12,eUP,172,e,0,1)},Y5(eZ2,"EcorePackageImpl/25",1208),eTS(1209,1,eQS,sG),eUe.wj=function(e){return M4(e,199)},eUe.xj=function(e){return Je(e1Q,eUP,199,e,0,1)},Y5(eZ2,"EcorePackageImpl/26",1209),eTS(1210,1,eQS,sW),eUe.wj=function(e){return!1},eUe.xj=function(e){return Je(tyA,eUp,2110,e,0,1)},Y5(eZ2,"EcorePackageImpl/27",1210),eTS(1211,1,eQS,sK),eUe.wj=function(e){return xf(e)},eUe.xj=function(e){return Je(e13,eUP,333,e,7,1)},Y5(eZ2,"EcorePackageImpl/28",1211),eTS(1212,1,eQS,sV),eUe.wj=function(e){return M4(e,58)},eUe.xj=function(e){return Je(e6L,ezZ,58,e,0,1)},Y5(eZ2,"EcorePackageImpl/29",1212),eTS(1186,1,eQS,sq),eUe.wj=function(e){return M4(e,510)},eUe.xj=function(e){return Je(tm4,{3:1,4:1,5:1,1934:1},590,e,0,1)},Y5(eZ2,"EcorePackageImpl/3",1186),eTS(1213,1,eQS,sZ),eUe.wj=function(e){return M4(e,573)},eUe.xj=function(e){return Je(e6j,eUp,1940,e,0,1)},Y5(eZ2,"EcorePackageImpl/30",1213),eTS(1214,1,eQS,sX),eUe.wj=function(e){return M4(e,153)},eUe.xj=function(e){return Je(tg7,ezZ,153,e,0,1)},Y5(eZ2,"EcorePackageImpl/31",1214),eTS(1215,1,eQS,sJ),eUe.wj=function(e){return M4(e,72)},eUe.xj=function(e){return Je(tgc,eQk,72,e,0,1)},Y5(eZ2,"EcorePackageImpl/32",1215),eTS(1216,1,eQS,sQ),eUe.wj=function(e){return M4(e,155)},eUe.xj=function(e){return Je(e14,eUP,155,e,0,1)},Y5(eZ2,"EcorePackageImpl/33",1216),eTS(1217,1,eQS,s1),eUe.wj=function(e){return M4(e,19)},eUe.xj=function(e){return Je(e15,eUP,19,e,0,1)},Y5(eZ2,"EcorePackageImpl/34",1217),eTS(1218,1,eQS,s0),eUe.wj=function(e){return M4(e,290)},eUe.xj=function(e){return Je(e1j,eUp,290,e,0,1)},Y5(eZ2,"EcorePackageImpl/35",1218),eTS(1219,1,eQS,s2),eUe.wj=function(e){return M4(e,162)},eUe.xj=function(e){return Je(e16,eUP,162,e,0,1)},Y5(eZ2,"EcorePackageImpl/36",1219),eTS(1220,1,eQS,s3),eUe.wj=function(e){return M4(e,83)},eUe.xj=function(e){return Je(e1Y,eUp,83,e,0,1)},Y5(eZ2,"EcorePackageImpl/37",1220),eTS(1221,1,eQS,s4),eUe.wj=function(e){return M4(e,591)},eUe.xj=function(e){return Je(tg8,eUp,591,e,0,1)},Y5(eZ2,"EcorePackageImpl/38",1221),eTS(1222,1,eQS,s5),eUe.wj=function(e){return!1},eUe.xj=function(e){return Je(tyL,eUp,2111,e,0,1)},Y5(eZ2,"EcorePackageImpl/39",1222),eTS(1187,1,eQS,s6),eUe.wj=function(e){return M4(e,88)},eUe.xj=function(e){return Je(tm7,eUp,26,e,0,1)},Y5(eZ2,"EcorePackageImpl/4",1187),eTS(1223,1,eQS,s9),eUe.wj=function(e){return M4(e,184)},eUe.xj=function(e){return Je(e19,eUP,184,e,0,1)},Y5(eZ2,"EcorePackageImpl/40",1223),eTS(1224,1,eQS,s8),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eZ2,"EcorePackageImpl/41",1224),eTS(1225,1,eQS,s7),eUe.wj=function(e){return M4(e,588)},eUe.xj=function(e){return Je(e6I,eUp,588,e,0,1)},Y5(eZ2,"EcorePackageImpl/42",1225),eTS(1226,1,eQS,ue),eUe.wj=function(e){return!1},eUe.xj=function(e){return Je(tyC,eUP,2112,e,0,1)},Y5(eZ2,"EcorePackageImpl/43",1226),eTS(1227,1,eQS,ut),eUe.wj=function(e){return M4(e,42)},eUe.xj=function(e){return Je(e1$,eUK,42,e,0,1)},Y5(eZ2,"EcorePackageImpl/44",1227),eTS(1188,1,eQS,un),eUe.wj=function(e){return M4(e,138)},eUe.xj=function(e){return Je(tm8,eUp,138,e,0,1)},Y5(eZ2,"EcorePackageImpl/5",1188),eTS(1189,1,eQS,ur),eUe.wj=function(e){return M4(e,148)},eUe.xj=function(e){return Je(tge,eUp,148,e,0,1)},Y5(eZ2,"EcorePackageImpl/6",1189),eTS(1190,1,eQS,ui),eUe.wj=function(e){return M4(e,457)},eUe.xj=function(e){return Je(tgt,eUp,671,e,0,1)},Y5(eZ2,"EcorePackageImpl/7",1190),eTS(1191,1,eQS,ua),eUe.wj=function(e){return M4(e,573)},eUe.xj=function(e){return Je(tgn,eUp,678,e,0,1)},Y5(eZ2,"EcorePackageImpl/8",1191),eTS(1192,1,eQS,uo),eUe.wj=function(e){return M4(e,471)},eUe.xj=function(e){return Je(e6w,eUp,471,e,0,1)},Y5(eZ2,"EcorePackageImpl/9",1192),eTS(1025,1982,eJO,gT),eUe.bi=function(e,t){ecV(this,Pp(t,415))},eUe.fi=function(e,t){eSU(this,e,Pp(t,415))},Y5(eZ2,"MinimalEObjectImpl/1ArrayDelegatingAdapterList",1025),eTS(1026,143,eJx,BF),eUe.Ai=function(){return this.a.a},Y5(eZ2,"MinimalEObjectImpl/1ArrayDelegatingAdapterList/1",1026),eTS(1053,1052,{},Ms),Y5("org.eclipse.emf.ecore.plugin","EcorePlugin",1053);var tg8=RL(eQx,"Resource");eTS(781,1378,eQT),eUe.Yk=function(e){},eUe.Zk=function(e){},eUe.Vk=function(){return this.a||(this.a=new pK(this)),this.a},eUe.Wk=function(e){var t,n,r,i,a;if((r=e.length)>0){if(GV(0,e.length),47==e.charCodeAt(0)){for(t=1,a=new XM(4),i=1;t0&&(e=e.substr(0,n))}return ekX(this,e)},eUe.Xk=function(){return this.c},eUe.Ib=function(){var e;return yx(this.gm)+"@"+(e=esj(this)>>>0).toString(16)+" uri='"+this.d+"'"},eUe.b=!1,Y5(eQM,"ResourceImpl",781),eTS(1379,781,eQT,pq),Y5(eQM,"BinaryResourceImpl",1379),eTS(1169,694,eXG),eUe.si=function(e){return M4(e,56)?$x(this,Pp(e,56)):M4(e,591)?new Ow(Pp(e,591).Vk()):xc(e)===xc(this.f)?Pp(e,14).Kc():(LF(),tmB.a)},eUe.Ob=function(){return exI(this)},eUe.a=!1,Y5(eJz,"EcoreUtil/ContentTreeIterator",1169),eTS(1380,1169,eXG,F0),eUe.si=function(e){return xc(e)===xc(this.f)?Pp(e,15).Kc():new K0(Pp(e,56))},Y5(eQM,"ResourceImpl/5",1380),eTS(648,1994,eJ6,pK),eUe.Hc=function(e){return this.i<=4?ev9(this,e):M4(e,49)&&Pp(e,49).Zg()==this.a},eUe.bi=function(e,t){e==this.i-1&&(this.a.b||(this.a.b=!0))},eUe.di=function(e,t){0==e?this.a.b||(this.a.b=!0):X8(this,e,t)},eUe.fi=function(e,t){},eUe.gi=function(e,t,n){},eUe.aj=function(){return 2},eUe.Ai=function(){return this.a},eUe.bj=function(){return!0},eUe.cj=function(e,t){var n;return t=(n=Pp(e,49)).wh(this.a,t)},eUe.dj=function(e,t){var n;return(n=Pp(e,49)).wh(null,t)},eUe.ej=function(){return!1},eUe.hi=function(){return!0},eUe.ri=function(e){return Je(e6f,eUp,56,e,0,1)},eUe.ni=function(){return!1},Y5(eQM,"ResourceImpl/ContentsEList",648),eTS(957,1964,eU5,pV),eUe.Zc=function(e){return this.a._h(e)},eUe.gc=function(){return this.a.gc()},Y5(eJz,"AbstractSequentialInternalEList/1",957),eTS(624,1,{},PQ),Y5(eJz,"BasicExtendedMetaData",624),eTS(1160,1,{},k9),eUe.$k=function(){return null},eUe._k=function(){return -2==this.a&&fi(this,e_f(this.d,this.b)),this.a},eUe.al=function(){return null},eUe.bl=function(){return Hj(),Hj(),e2r},eUe.ne=function(){return this.c==eQH&&fo(this,eh1(this.d,this.b)),this.c},eUe.cl=function(){return 0},eUe.a=-2,eUe.c=eQH,Y5(eJz,"BasicExtendedMetaData/EClassExtendedMetaDataImpl",1160),eTS(1161,1,{},Ke),eUe.$k=function(){return this.a==(ZE(),tvf)&&fa(this,eO9(this.f,this.b)),this.a},eUe._k=function(){return 0},eUe.al=function(){return this.c==(ZE(),tvf)&&fs(this,eO8(this.f,this.b)),this.c},eUe.bl=function(){return this.d||fu(this,eIA(this.f,this.b)),this.d},eUe.ne=function(){return this.e==eQH&&fc(this,eh1(this.f,this.b)),this.e},eUe.cl=function(){return -2==this.g&&fl(this,ewd(this.f,this.b)),this.g},eUe.e=eQH,eUe.g=-2,Y5(eJz,"BasicExtendedMetaData/EDataTypeExtendedMetaDataImpl",1161),eTS(1159,1,{},xn),eUe.b=!1,eUe.c=!1,Y5(eJz,"BasicExtendedMetaData/EPackageExtendedMetaDataImpl",1159),eTS(1162,1,{},W7),eUe.c=-2,eUe.e=eQH,eUe.f=eQH,Y5(eJz,"BasicExtendedMetaData/EStructuralFeatureExtendedMetaDataImpl",1162),eTS(585,622,eJ9,PJ),eUe.aj=function(){return this.c},eUe.Fk=function(){return!1},eUe.li=function(e,t){return t},eUe.c=0,Y5(eJz,"EDataTypeEList",585);var tg7=RL(eJz,"FeatureMap");eTS(75,585,{3:1,4:1,20:1,28:1,52:1,14:1,15:1,54:1,67:1,63:1,58:1,76:1,153:1,215:1,1937:1,69:1,95:1},eiR),eUe.Vc=function(e,t){eO0(this,e,Pp(t,72))},eUe.Fc=function(e){return eM6(this,Pp(e,72))},eUe.Yh=function(e){Y2(this,Pp(e,72))},eUe.cj=function(e,t){return IG(this,Pp(e,72),t)},eUe.dj=function(e,t){return IW(this,Pp(e,72),t)},eUe.ii=function(e,t){return eI7(this,e,t)},eUe.li=function(e,t){return ejg(this,e,Pp(t,72))},eUe._c=function(e,t){return eA6(this,e,Pp(t,72))},eUe.jj=function(e,t){return IK(this,Pp(e,72),t)},eUe.kj=function(e,t){return IV(this,Pp(e,72),t)},eUe.lj=function(e,t,n){return eyU(this,Pp(e,72),Pp(t,72),n)},eUe.oi=function(e,t){return ewk(this,e,Pp(t,72))},eUe.dl=function(e,t){return eIF(this,e,t)},eUe.Wc=function(e,t){var n,r,i,a,o,s,u,c,l;for(c=new eta(t.gc()),i=t.Kc();i.Ob();)if(a=(r=Pp(i.Pb(),72)).ak(),eLt(this.e,a))a.hi()&&(Vq(this,a,r.dd())||ev9(c,r))||JL(c,r);else{for(s=0,l=eAY(this.e.Tg(),a),n=Pp(this.g,119),o=!0;s=0;)if(t=e[this.c],this.k.rl(t.ak()))return this.j=this.f?t:t.dd(),this.i=-2,!0;return this.i=-1,this.g=-1,!1},Y5(eJz,"BasicFeatureMap/FeatureEIterator",410),eTS(662,410,eUC,x1),eUe.Lk=function(){return!0},Y5(eJz,"BasicFeatureMap/ResolvingFeatureEIterator",662),eTS(955,486,eQr,Mz),eUe.Gi=function(){return this},Y5(eJz,"EContentsEList/1",955),eTS(956,486,eQr,x0),eUe.Lk=function(){return!1},Y5(eJz,"EContentsEList/2",956),eTS(954,279,eQi,MG),eUe.Nk=function(e){},eUe.Ob=function(){return!1},eUe.Sb=function(){return!1},Y5(eJz,"EContentsEList/FeatureIteratorImpl/1",954),eTS(825,585,eJ9,OM),eUe.ci=function(){this.a=!0},eUe.fj=function(){return this.a},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.a,this.a=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.a=!1},eUe.a=!1,Y5(eJz,"EDataTypeEList/Unsettable",825),eTS(1849,585,eJ9,OO),eUe.hi=function(){return!0},Y5(eJz,"EDataTypeUniqueEList",1849),eTS(1850,825,eJ9,OA),eUe.hi=function(){return!0},Y5(eJz,"EDataTypeUniqueEList/Unsettable",1850),eTS(139,85,eJ9,OS),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectContainmentEList/Resolving",139),eTS(1163,545,eJ9,Ok),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectContainmentEList/Unsettable/Resolving",1163),eTS(748,16,eJ9,Io),eUe.ci=function(){this.a=!0},eUe.fj=function(){return this.a},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.a,this.a=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.a=!1},eUe.a=!1,Y5(eJz,"EObjectContainmentWithInverseEList/Unsettable",748),eTS(1173,748,eJ9,Is),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectContainmentWithInverseEList/Unsettable/Resolving",1173),eTS(743,496,eJ9,Ox),eUe.ci=function(){this.a=!0},eUe.fj=function(){return this.a},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.a,this.a=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.a=!1},eUe.a=!1,Y5(eJz,"EObjectEList/Unsettable",743),eTS(328,496,eJ9,OT),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectResolvingEList",328),eTS(1641,743,eJ9,OL),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectResolvingEList/Unsettable",1641),eTS(1381,1,{},us),Y5(eJz,"EObjectValidator",1381),eTS(546,496,eJ9,F1),eUe.zk=function(){return this.d},eUe.Ak=function(){return this.b},eUe.bj=function(){return!0},eUe.Dk=function(){return!0},eUe.b=0,Y5(eJz,"EObjectWithInverseEList",546),eTS(1176,546,eJ9,Iu),eUe.Ck=function(){return!0},Y5(eJz,"EObjectWithInverseEList/ManyInverse",1176),eTS(625,546,eJ9,Ic),eUe.ci=function(){this.a=!0},eUe.fj=function(){return this.a},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.a,this.a=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.a=!1},eUe.a=!1,Y5(eJz,"EObjectWithInverseEList/Unsettable",625),eTS(1175,625,eJ9,If),eUe.Ck=function(){return!0},Y5(eJz,"EObjectWithInverseEList/Unsettable/ManyInverse",1175),eTS(749,546,eJ9,Il),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectWithInverseResolvingEList",749),eTS(31,749,eJ9,Ih),eUe.Ck=function(){return!0},Y5(eJz,"EObjectWithInverseResolvingEList/ManyInverse",31),eTS(750,625,eJ9,Id),eUe.Ek=function(){return!0},eUe.li=function(e,t){return ex7(this,e,Pp(t,56))},Y5(eJz,"EObjectWithInverseResolvingEList/Unsettable",750),eTS(1174,750,eJ9,Ip),eUe.Ck=function(){return!0},Y5(eJz,"EObjectWithInverseResolvingEList/Unsettable/ManyInverse",1174),eTS(1164,622,eJ9),eUe.ai=function(){return(1792&this.b)==0},eUe.ci=function(){this.b|=1},eUe.Bk=function(){return(4&this.b)!=0},eUe.bj=function(){return(40&this.b)!=0},eUe.Ck=function(){return(16&this.b)!=0},eUe.Dk=function(){return(8&this.b)!=0},eUe.Ek=function(){return(this.b&eJq)!=0},eUe.rk=function(){return(32&this.b)!=0},eUe.Fk=function(){return(this.b&eXt)!=0},eUe.wj=function(e){return this.d?VB(this.d,e):this.ak().Yj().wj(e)},eUe.fj=function(){return(2&this.b)!=0?(1&this.b)!=0:0!=this.i},eUe.hi=function(){return(128&this.b)!=0},eUe.Xj=function(){var e;eRT(this),(2&this.b)!=0&&(TO(this.e)?(e=(1&this.b)!=0,this.b&=-2,bz(this,new ZB(this.e,2,edv(this.e.Tg(),this.ak()),e,!1))):this.b&=-2)},eUe.ni=function(){return(1536&this.b)==0},eUe.b=0,Y5(eJz,"EcoreEList/Generic",1164),eTS(1165,1164,eJ9,H2),eUe.ak=function(){return this.a},Y5(eJz,"EcoreEList/Dynamic",1165),eTS(747,63,eXz,pZ),eUe.ri=function(e){return enb(this.a.a,e)},Y5(eJz,"EcoreEMap/1",747),eTS(746,85,eJ9,FZ),eUe.bi=function(e,t){ebB(this.b,Pp(t,133))},eUe.di=function(e,t){eac(this.b)},eUe.ei=function(e,t,n){var r;++(r=this.b,Pp(t,133),r).e},eUe.fi=function(e,t){elj(this.b,Pp(t,133))},eUe.gi=function(e,t,n){elj(this.b,Pp(n,133)),xc(n)===xc(t)&&Pp(n,133).Th(Mi(Pp(t,133).cd())),ebB(this.b,Pp(t,133))},Y5(eJz,"EcoreEMap/DelegateEObjectContainmentEList",746),eTS(1171,151,eJW,enQ),Y5(eJz,"EcoreEMap/Unsettable",1171),eTS(1172,746,eJ9,Ib),eUe.ci=function(){this.a=!0},eUe.fj=function(){return this.a},eUe.Xj=function(){var e;eRT(this),TO(this.e)?(e=this.a,this.a=!1,eam(this.e,new ZB(this.e,2,this.c,e,!1))):this.a=!1},eUe.a=!1,Y5(eJz,"EcoreEMap/Unsettable/UnsettableDelegateEObjectContainmentEList",1172),eTS(1168,228,e$s,YQ),eUe.a=!1,eUe.b=!1,Y5(eJz,"EcoreUtil/Copier",1168),eTS(745,1,eUE,K0),eUe.Nb=function(e){F8(this,e)},eUe.Ob=function(){return edV(this)},eUe.Pb=function(){var e;return edV(this),e=this.b,this.b=null,e},eUe.Qb=function(){this.a.Qb()},Y5(eJz,"EcoreUtil/ProperContentIterator",745),eTS(1382,1381,{},c2),Y5(eJz,"EcoreValidator",1382),RL(eJz,"FeatureMapUtil/Validator"),eTS(1260,1,{1942:1},uu),eUe.rl=function(e){return!0},Y5(eJz,"FeatureMapUtil/1",1260),eTS(757,1,{1942:1},eF2),eUe.rl=function(e){var t;return this.c==e||(null!=(t=LK(Bp(this.a,e)))?t==(OQ(),e0P):eCV(this,e)?(Z$(this.a,e,(OQ(),e0P)),!0):(Z$(this.a,e,(OQ(),e0N)),!1))},eUe.e=!1,Y5(eJz,"FeatureMapUtil/BasicValidator",757),eTS(758,43,e$s,MW),Y5(eJz,"FeatureMapUtil/BasicValidator/Cache",758),eTS(501,52,{20:1,28:1,52:1,14:1,15:1,58:1,76:1,69:1,95:1},xe),eUe.Vc=function(e,t){eLe(this.c,this.b,e,t)},eUe.Fc=function(e){return eIF(this.c,this.b,e)},eUe.Wc=function(e,t){return ePq(this.c,this.b,e,t)},eUe.Gc=function(e){return MJ(this,e)},eUe.Xh=function(e,t){ee7(this.c,this.b,e,t)},eUe.lk=function(e,t){return eCB(this.c,this.b,e,t)},eUe.pi=function(e){return ePL(this.c,this.b,e,!1)},eUe.Zh=function(){return TC(this.c,this.b)},eUe.$h=function(){return TI(this.c,this.b)},eUe._h=function(e){return X9(this.c,this.b,e)},eUe.mk=function(e,t){return Cp(this,e,t)},eUe.$b=function(){bG(this)},eUe.Hc=function(e){return Vq(this.c,this.b,e)},eUe.Ic=function(e){return eiF(this.c,this.b,e)},eUe.Xb=function(e){return ePL(this.c,this.b,e,!0)},eUe.Wj=function(e){return this},eUe.Xc=function(e){return VZ(this.c,this.b,e)},eUe.dc=function(){return xs(this)},eUe.fj=function(){return!edK(this.c,this.b)},eUe.Kc=function(){return eei(this.c,this.b)},eUe.Yc=function(){return eea(this.c,this.b)},eUe.Zc=function(e){return ely(this.c,this.b,e)},eUe.ii=function(e,t){return eNn(this.c,this.b,e,t)},eUe.ji=function(e,t){Xx(this.c,this.b,e,t)},eUe.$c=function(e){return eE0(this.c,this.b,e)},eUe.Mc=function(e){return eIC(this.c,this.b,e)},eUe._c=function(e,t){return eNL(this.c,this.b,e,t)},eUe.Wb=function(e){exZ(this.c,this.b),MJ(this,Pp(e,15))},eUe.gc=function(){return elG(this.c,this.b)},eUe.Pc=function(){return Wb(this.c,this.b)},eUe.Qc=function(e){return VX(this.c,this.b,e)},eUe.Ib=function(){var e,t;for(t=new vs,t.a+="[",e=TC(this.c,this.b);euf(e);)xk(t,Ae(ebm(e))),euf(e)&&(t.a+=eUd);return t.a+="]",t.a},eUe.Xj=function(){exZ(this.c,this.b)},Y5(eJz,"FeatureMapUtil/FeatureEList",501),eTS(627,36,eJx,qu),eUe.yi=function(e){return elc(this,e)},eUe.Di=function(e){var t,n,r,i,a,o,s;switch(this.d){case 1:case 2:if(xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return this.g=e.zi(),1==e.xi()&&(this.d=1),!0;break;case 3:if(3===(i=e.xi())&&xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return this.d=5,JL(t=new eta(2),this.g),JL(t,e.zi()),this.g=t,!0;break;case 5:if(3===(i=e.xi())&&xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return(n=Pp(this.g,14)).Fc(e.zi()),!0;break;case 4:switch(i=e.xi()){case 3:if(xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return this.d=1,this.g=e.zi(),!0;break;case 4:if(xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return this.d=6,JL(s=new eta(2),this.n),JL(s,e.Bi()),this.n=s,o=eow(vx(ty_,1),eHT,25,15,[this.o,e.Ci()]),this.g=o,!0}break;case 6:if(4===(i=e.xi())&&xc(a=e.Ai())===xc(this.c)&&elc(this,null)==e.yi(null))return(n=Pp(this.n,14)).Fc(e.Bi()),r=Je(ty_,eHT,25,(o=Pp(this.g,48)).length+1,15,1),ePD(o,0,r,0,o.length),r[o.length]=e.Ci(),this.g=r,!0}return!1},Y5(eJz,"FeatureMapUtil/FeatureENotificationImpl",627),eTS(552,501,{20:1,28:1,52:1,14:1,15:1,58:1,76:1,153:1,215:1,1937:1,69:1,95:1},RA),eUe.dl=function(e,t){return eIF(this.c,e,t)},eUe.el=function(e,t,n){return eCB(this.c,e,t,n)},eUe.fl=function(e,t,n){return ePT(this.c,e,t,n)},eUe.gl=function(){return this},eUe.hl=function(e,t){return ePC(this.c,e,t)},eUe.il=function(e){return Pp(ePL(this.c,this.b,e,!1),72).ak()},eUe.jl=function(e){return Pp(ePL(this.c,this.b,e,!1),72).dd()},eUe.kl=function(){return this.a},eUe.ll=function(e){return!edK(this.c,e)},eUe.ml=function(e,t){ePJ(this.c,e,t)},eUe.nl=function(e){return erp(this.c,e)},eUe.ol=function(e){emY(this.c,e)},Y5(eJz,"FeatureMapUtil/FeatureFeatureMap",552),eTS(1259,1,eJG,xr),eUe.Wj=function(e){return ePL(this.b,this.a,-1,e)},eUe.fj=function(){return!edK(this.b,this.a)},eUe.Wb=function(e){ePJ(this.b,this.a,e)},eUe.Xj=function(){exZ(this.b,this.a)},Y5(eJz,"FeatureMapUtil/FeatureValue",1259);var tve=RL(eQz,"AnyType");eTS(666,60,eHr,gV),Y5(eQz,"InvalidDatatypeValueException",666);var tvt=RL(eQz,eQG),tvn=RL(eQz,eQW),tvr=RL(eQz,eQK);eTS(830,506,{105:1,92:1,90:1,56:1,49:1,97:1,843:1},mF),eUe._g=function(e,t,n){switch(e){case 0:if(n)return this.c||(this.c=new eiR(this,0)),this.c;return this.c||(this.c=new eiR(this,0)),this.c.b;case 1:if(n)return this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153);return(this.c||(this.c=new eiR(this,0)),Pp(Pp(GP(this.c,(eR7(),tvx)),153),215)).kl();case 2:if(n)return this.b||(this.b=new eiR(this,2)),this.b;return this.b||(this.b=new eiR(this,2)),this.b.b}return Qt(this,e-Y1(this.zh()),ee2((2&this.j)==0?this.zh():(this.k||(this.k=new c1),this.k).ck(),e),t,n)},eUe.jh=function(e,t,n){var r;switch(t){case 0:return this.c||(this.c=new eiR(this,0)),eIM(this.c,e,n);case 1:return(this.c||(this.c=new eiR(this,0)),Pp(Pp(GP(this.c,(eR7(),tvx)),153),69)).mk(e,n);case 2:return this.b||(this.b=new eiR(this,2)),eIM(this.b,e,n)}return(r=Pp(ee2((2&this.j)==0?this.zh():(this.k||(this.k=new c1),this.k).ck(),t),66)).Nj().Rj(this,Q5(this),t-Y1(this.zh()),e,n)},eUe.lh=function(e){switch(e){case 0:return!!this.c&&0!=this.c.i;case 1:return!(this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153)).dc();case 2:return!!this.b&&0!=this.b.i}return VP(this,e-Y1(this.zh()),ee2((2&this.j)==0?this.zh():(this.k||(this.k=new c1),this.k).ck(),e))},eUe.sh=function(e,t){switch(e){case 0:this.c||(this.c=new eiR(this,0)),YH(this.c,t);return;case 1:(this.c||(this.c=new eiR(this,0)),Pp(Pp(GP(this.c,(eR7(),tvx)),153),215)).Wb(t);return;case 2:this.b||(this.b=new eiR(this,2)),YH(this.b,t);return}efL(this,e-Y1(this.zh()),ee2((2&this.j)==0?this.zh():(this.k||(this.k=new c1),this.k).ck(),e),t)},eUe.zh=function(){return eR7(),tvk},eUe.Bh=function(e){switch(e){case 0:this.c||(this.c=new eiR(this,0)),eRT(this.c);return;case 1:(this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153)).$b();return;case 2:this.b||(this.b=new eiR(this,2)),eRT(this.b);return}ec6(this,e-Y1(this.zh()),ee2((2&this.j)==0?this.zh():(this.k||(this.k=new c1),this.k).ck(),e))},eUe.Ib=function(){var e;return(4&this.j)!=0?eMT(this):(e=new O1(eMT(this)),e.a+=" (mixed: ",xS(e,this.c),e.a+=", anyAttribute: ",xS(e,this.b),e.a+=")",e.a)},Y5(eQV,"AnyTypeImpl",830),eTS(667,506,{105:1,92:1,90:1,56:1,49:1,97:1,2021:1,667:1},ul),eUe._g=function(e,t,n){switch(e){case 0:return this.a;case 1:return this.b}return Qt(this,e-Y1((eR7(),tvj)),ee2((2&this.j)==0?tvj:(this.k||(this.k=new c1),this.k).ck(),e),t,n)},eUe.lh=function(e){switch(e){case 0:return null!=this.a;case 1:return null!=this.b}return VP(this,e-Y1((eR7(),tvj)),ee2((2&this.j)==0?tvj:(this.k||(this.k=new c1),this.k).ck(),e))},eUe.sh=function(e,t){switch(e){case 0:fg(this,Lq(t));return;case 1:fv(this,Lq(t));return}efL(this,e-Y1((eR7(),tvj)),ee2((2&this.j)==0?tvj:(this.k||(this.k=new c1),this.k).ck(),e),t)},eUe.zh=function(){return eR7(),tvj},eUe.Bh=function(e){switch(e){case 0:this.a=null;return;case 1:this.b=null;return}ec6(this,e-Y1((eR7(),tvj)),ee2((2&this.j)==0?tvj:(this.k||(this.k=new c1),this.k).ck(),e))},eUe.Ib=function(){var e;return(4&this.j)!=0?eMT(this):(e=new O1(eMT(this)),e.a+=" (data: ",xk(e,this.a),e.a+=", target: ",xk(e,this.b),e.a+=")",e.a)},eUe.a=null,eUe.b=null,Y5(eQV,"ProcessingInstructionImpl",667),eTS(668,830,{105:1,92:1,90:1,56:1,49:1,97:1,843:1,2022:1,668:1},mB),eUe._g=function(e,t,n){switch(e){case 0:if(n)return this.c||(this.c=new eiR(this,0)),this.c;return this.c||(this.c=new eiR(this,0)),this.c.b;case 1:if(n)return this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153);return(this.c||(this.c=new eiR(this,0)),Pp(Pp(GP(this.c,(eR7(),tvx)),153),215)).kl();case 2:if(n)return this.b||(this.b=new eiR(this,2)),this.b;return this.b||(this.b=new eiR(this,2)),this.b.b;case 3:return this.c||(this.c=new eiR(this,0)),Lq(ePC(this.c,(eR7(),tvB),!0));case 4:return Iy(this.a,(this.c||(this.c=new eiR(this,0)),Lq(ePC(this.c,(eR7(),tvB),!0))));case 5:return this.a}return Qt(this,e-Y1((eR7(),tvY)),ee2((2&this.j)==0?tvY:(this.k||(this.k=new c1),this.k).ck(),e),t,n)},eUe.lh=function(e){switch(e){case 0:return!!this.c&&0!=this.c.i;case 1:return!(this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153)).dc();case 2:return!!this.b&&0!=this.b.i;case 3:return this.c||(this.c=new eiR(this,0)),null!=Lq(ePC(this.c,(eR7(),tvB),!0));case 4:return null!=Iy(this.a,(this.c||(this.c=new eiR(this,0)),Lq(ePC(this.c,(eR7(),tvB),!0))));case 5:return!!this.a}return VP(this,e-Y1((eR7(),tvY)),ee2((2&this.j)==0?tvY:(this.k||(this.k=new c1),this.k).ck(),e))},eUe.sh=function(e,t){switch(e){case 0:this.c||(this.c=new eiR(this,0)),YH(this.c,t);return;case 1:(this.c||(this.c=new eiR(this,0)),Pp(Pp(GP(this.c,(eR7(),tvx)),153),215)).Wb(t);return;case 2:this.b||(this.b=new eiR(this,2)),YH(this.b,t);return;case 3:Kt(this,Lq(t));return;case 4:Kt(this,Iw(this.a,t));return;case 5:fy(this,Pp(t,148));return}efL(this,e-Y1((eR7(),tvY)),ee2((2&this.j)==0?tvY:(this.k||(this.k=new c1),this.k).ck(),e),t)},eUe.zh=function(){return eR7(),tvY},eUe.Bh=function(e){switch(e){case 0:this.c||(this.c=new eiR(this,0)),eRT(this.c);return;case 1:(this.c||(this.c=new eiR(this,0)),Pp(GP(this.c,(eR7(),tvx)),153)).$b();return;case 2:this.b||(this.b=new eiR(this,2)),eRT(this.b);return;case 3:this.c||(this.c=new eiR(this,0)),ePJ(this.c,(eR7(),tvB),null);return;case 4:Kt(this,Iw(this.a,null));return;case 5:this.a=null;return}ec6(this,e-Y1((eR7(),tvY)),ee2((2&this.j)==0?tvY:(this.k||(this.k=new c1),this.k).ck(),e))},Y5(eQV,"SimpleAnyTypeImpl",668),eTS(669,506,{105:1,92:1,90:1,56:1,49:1,97:1,2023:1,669:1},mY),eUe._g=function(e,t,n){switch(e){case 0:if(n)return this.a||(this.a=new eiR(this,0)),this.a;return this.a||(this.a=new eiR(this,0)),this.a.b;case 1:return n?(this.b||(this.b=new JY((eBK(),tgF),tgf,this,1)),this.b):(this.b||(this.b=new JY((eBK(),tgF),tgf,this,1)),X6(this.b));case 2:return n?(this.c||(this.c=new JY((eBK(),tgF),tgf,this,2)),this.c):(this.c||(this.c=new JY((eBK(),tgF),tgf,this,2)),X6(this.c));case 3:return this.a||(this.a=new eiR(this,0)),GP(this.a,(eR7(),tv$));case 4:return this.a||(this.a=new eiR(this,0)),GP(this.a,(eR7(),tvz));case 5:return this.a||(this.a=new eiR(this,0)),GP(this.a,(eR7(),tvW));case 6:return this.a||(this.a=new eiR(this,0)),GP(this.a,(eR7(),tvK))}return Qt(this,e-Y1((eR7(),tvH)),ee2((2&this.j)==0?tvH:(this.k||(this.k=new c1),this.k).ck(),e),t,n)},eUe.jh=function(e,t,n){var r;switch(t){case 0:return this.a||(this.a=new eiR(this,0)),eIM(this.a,e,n);case 1:return this.b||(this.b=new JY((eBK(),tgF),tgf,this,1)),Iz(this.b,e,n);case 2:return this.c||(this.c=new JY((eBK(),tgF),tgf,this,2)),Iz(this.c,e,n);case 5:return this.a||(this.a=new eiR(this,0)),Cp(GP(this.a,(eR7(),tvW)),e,n)}return(r=Pp(ee2((2&this.j)==0?(eR7(),tvH):(this.k||(this.k=new c1),this.k).ck(),t),66)).Nj().Rj(this,Q5(this),t-Y1((eR7(),tvH)),e,n)},eUe.lh=function(e){switch(e){case 0:return!!this.a&&0!=this.a.i;case 1:return!!this.b&&0!=this.b.f;case 2:return!!this.c&&0!=this.c.f;case 3:return this.a||(this.a=new eiR(this,0)),!xs(GP(this.a,(eR7(),tv$)));case 4:return this.a||(this.a=new eiR(this,0)),!xs(GP(this.a,(eR7(),tvz)));case 5:return this.a||(this.a=new eiR(this,0)),!xs(GP(this.a,(eR7(),tvW)));case 6:return this.a||(this.a=new eiR(this,0)),!xs(GP(this.a,(eR7(),tvK)))}return VP(this,e-Y1((eR7(),tvH)),ee2((2&this.j)==0?tvH:(this.k||(this.k=new c1),this.k).ck(),e))},eUe.sh=function(e,t){switch(e){case 0:this.a||(this.a=new eiR(this,0)),YH(this.a,t);return;case 1:this.b||(this.b=new JY((eBK(),tgF),tgf,this,1)),eai(this.b,t);return;case 2:this.c||(this.c=new JY((eBK(),tgF),tgf,this,2)),eai(this.c,t);return;case 3:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tv$))),this.a||(this.a=new eiR(this,0)),MJ(GP(this.a,tv$),Pp(t,14));return;case 4:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvz))),this.a||(this.a=new eiR(this,0)),MJ(GP(this.a,tvz),Pp(t,14));return;case 5:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvW))),this.a||(this.a=new eiR(this,0)),MJ(GP(this.a,tvW),Pp(t,14));return;case 6:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvK))),this.a||(this.a=new eiR(this,0)),MJ(GP(this.a,tvK),Pp(t,14));return}efL(this,e-Y1((eR7(),tvH)),ee2((2&this.j)==0?tvH:(this.k||(this.k=new c1),this.k).ck(),e),t)},eUe.zh=function(){return eR7(),tvH},eUe.Bh=function(e){switch(e){case 0:this.a||(this.a=new eiR(this,0)),eRT(this.a);return;case 1:this.b||(this.b=new JY((eBK(),tgF),tgf,this,1)),this.b.c.$b();return;case 2:this.c||(this.c=new JY((eBK(),tgF),tgf,this,2)),this.c.c.$b();return;case 3:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tv$)));return;case 4:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvz)));return;case 5:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvW)));return;case 6:this.a||(this.a=new eiR(this,0)),bG(GP(this.a,(eR7(),tvK)));return}ec6(this,e-Y1((eR7(),tvH)),ee2((2&this.j)==0?tvH:(this.k||(this.k=new c1),this.k).ck(),e))},eUe.Ib=function(){var e;return(4&this.j)!=0?eMT(this):(e=new O1(eMT(this)),e.a+=" (mixed: ",xS(e,this.a),e.a+=")",e.a)},Y5(eQV,"XMLTypeDocumentRootImpl",669),eTS(1919,704,{105:1,92:1,90:1,471:1,147:1,56:1,108:1,49:1,97:1,150:1,114:1,115:1,2024:1},uc),eUe.Ih=function(e,t){switch(e.yj()){case 7:case 8:case 9:case 10:case 16:case 22:case 23:case 24:case 25:case 26:case 32:case 33:case 34:case 36:case 37:case 44:case 45:case 50:case 51:case 53:case 55:case 56:case 57:case 58:case 60:case 61:case 4:return null==t?null:efF(t);case 19:case 28:case 29:case 35:case 38:case 39:case 41:case 46:case 52:case 54:case 5:return Lq(t);case 6:return LH(Pp(t,190));case 12:case 47:case 49:case 11:return ejZ(this,e,t);case 13:return null==t?null:ePg(Pp(t,240));case 15:case 14:return null==t?null:Yk(gP(LV(t)));case 17:return eyV((eR7(),t));case 18:return eyV(t);case 21:case 20:return null==t?null:Yx(Pp(t,155).a);case 27:return L$(Pp(t,190));case 30:return emB((eR7(),Pp(t,15)));case 31:return emB(Pp(t,15));case 40:return LG((eR7(),t));case 42:return eyq((eR7(),t));case 43:return eyq(t);case 59:case 48:return Lz((eR7(),t));default:throw p7(new gL(eZ5+e.ne()+eZ6))}},eUe.Jh=function(e){var t;switch(-1==e.G&&(e.G=(t=etP(e))?ebv(t.Mh(),e):-1),e.G){case 0:return new mF;case 1:return new ul;case 2:return new mB;case 3:return new mY;default:throw p7(new gL(eZ7+e.zb+eZ6))}},eUe.Kh=function(e,t){var n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g;switch(e.yj()){case 5:case 52:case 4:return t;case 6:return epU(t);case 8:case 7:return null==t?null:ewe(t);case 9:return null==t?null:eeT(eDa((r=ePh(t,!0)).length>0&&(GV(0,r.length),43==r.charCodeAt(0))?r.substr(1):r,-128,127)<<24>>24);case 10:return null==t?null:eeT(eDa((i=ePh(t,!0)).length>0&&(GV(0,i.length),43==i.charCodeAt(0))?i.substr(1):i,-128,127)<<24>>24);case 11:return Lq(eBd(this,(eR7(),tvO),t));case 12:return Lq(eBd(this,(eR7(),tvA),t));case 13:return null==t?null:new yY(ePh(t,!0));case 15:case 14:return eOa(t);case 16:return Lq(eBd(this,(eR7(),tvL),t));case 17:return ehy((eR7(),t));case 18:return ehy(t);case 28:case 29:case 35:case 38:case 39:case 41:case 54:case 19:return ePh(t,!0);case 21:case 20:return eOv(t);case 22:return Lq(eBd(this,(eR7(),tvC),t));case 23:return Lq(eBd(this,(eR7(),tvI),t));case 24:return Lq(eBd(this,(eR7(),tvD),t));case 25:return Lq(eBd(this,(eR7(),tvN),t));case 26:return Lq(eBd(this,(eR7(),tvP),t));case 27:return epw(t);case 30:return ehw((eR7(),t));case 31:return ehw(t);case 32:return null==t?null:ell(eDa((l=ePh(t,!0)).length>0&&(GV(0,l.length),43==l.charCodeAt(0))?l.substr(1):l,eHt,eUu));case 33:return null==t?null:new TU((f=ePh(t,!0)).length>0&&(GV(0,f.length),43==f.charCodeAt(0))?f.substr(1):f);case 34:return null==t?null:ell(eDa((d=ePh(t,!0)).length>0&&(GV(0,d.length),43==d.charCodeAt(0))?d.substr(1):d,eHt,eUu));case 36:return null==t?null:ehQ(eF0((h=ePh(t,!0)).length>0&&(GV(0,h.length),43==h.charCodeAt(0))?h.substr(1):h));case 37:return null==t?null:ehQ(eF0((p=ePh(t,!0)).length>0&&(GV(0,p.length),43==p.charCodeAt(0))?p.substr(1):p));case 40:return edR((eR7(),t));case 42:return eh_((eR7(),t));case 43:return eh_(t);case 44:return null==t?null:new TU((b=ePh(t,!0)).length>0&&(GV(0,b.length),43==b.charCodeAt(0))?b.substr(1):b);case 45:return null==t?null:new TU((m=ePh(t,!0)).length>0&&(GV(0,m.length),43==m.charCodeAt(0))?m.substr(1):m);case 46:return ePh(t,!1);case 47:return Lq(eBd(this,(eR7(),tvR),t));case 59:case 48:return edP((eR7(),t));case 49:return Lq(eBd(this,(eR7(),tvF),t));case 50:return null==t?null:elf(eDa((g=ePh(t,!0)).length>0&&(GV(0,g.length),43==g.charCodeAt(0))?g.substr(1):g,eQl,32767)<<16>>16);case 51:return null==t?null:elf(eDa((a=ePh(t,!0)).length>0&&(GV(0,a.length),43==a.charCodeAt(0))?a.substr(1):a,eQl,32767)<<16>>16);case 53:return Lq(eBd(this,(eR7(),tvU),t));case 55:return null==t?null:elf(eDa((o=ePh(t,!0)).length>0&&(GV(0,o.length),43==o.charCodeAt(0))?o.substr(1):o,eQl,32767)<<16>>16);case 56:return null==t?null:elf(eDa((s=ePh(t,!0)).length>0&&(GV(0,s.length),43==s.charCodeAt(0))?s.substr(1):s,eQl,32767)<<16>>16);case 57:return null==t?null:ehQ(eF0((u=ePh(t,!0)).length>0&&(GV(0,u.length),43==u.charCodeAt(0))?u.substr(1):u));case 58:return null==t?null:ehQ(eF0((c=ePh(t,!0)).length>0&&(GV(0,c.length),43==c.charCodeAt(0))?c.substr(1):c));case 60:return null==t?null:ell(eDa((n=ePh(t,!0)).length>0&&(GV(0,n.length),43==n.charCodeAt(0))?n.substr(1):n,eHt,eUu));case 61:return null==t?null:ell(eDa(ePh(t,!0),eHt,eUu));default:throw p7(new gL(eZ5+e.ne()+eZ6))}},Y5(eQV,"XMLTypeFactoryImpl",1919),eTS(586,179,{105:1,92:1,90:1,147:1,191:1,56:1,235:1,108:1,49:1,97:1,150:1,179:1,114:1,115:1,675:1,1945:1,586:1},Ud),eUe.N=!1,eUe.O=!1;var tvi=!1;Y5(eQV,"XMLTypePackageImpl",586),eTS(1852,1,{837:1},uf),eUe._j=function(){return eD4(),eB2},Y5(eQV,"XMLTypePackageImpl/1",1852),eTS(1861,1,eQS,ud),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/10",1861),eTS(1862,1,eQS,uh),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/11",1862),eTS(1863,1,eQS,up),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/12",1863),eTS(1864,1,eQS,ub),eUe.wj=function(e){return xf(e)},eUe.xj=function(e){return Je(e13,eUP,333,e,7,1)},Y5(eQV,"XMLTypePackageImpl/13",1864),eTS(1865,1,eQS,um),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/14",1865),eTS(1866,1,eQS,ug),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/15",1866),eTS(1867,1,eQS,uv),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/16",1867),eTS(1868,1,eQS,uy),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/17",1868),eTS(1869,1,eQS,uw),eUe.wj=function(e){return M4(e,155)},eUe.xj=function(e){return Je(e14,eUP,155,e,0,1)},Y5(eQV,"XMLTypePackageImpl/18",1869),eTS(1870,1,eQS,u_),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/19",1870),eTS(1853,1,eQS,uE),eUe.wj=function(e){return M4(e,843)},eUe.xj=function(e){return Je(tve,eUp,843,e,0,1)},Y5(eQV,"XMLTypePackageImpl/2",1853),eTS(1871,1,eQS,uS),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/20",1871),eTS(1872,1,eQS,uk),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/21",1872),eTS(1873,1,eQS,ux),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/22",1873),eTS(1874,1,eQS,uT),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/23",1874),eTS(1875,1,eQS,uM),eUe.wj=function(e){return M4(e,190)},eUe.xj=function(e){return Je(tyk,eUP,190,e,0,2)},Y5(eQV,"XMLTypePackageImpl/24",1875),eTS(1876,1,eQS,uO),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/25",1876),eTS(1877,1,eQS,uA),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/26",1877),eTS(1878,1,eQS,uL),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/27",1878),eTS(1879,1,eQS,uC),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/28",1879),eTS(1880,1,eQS,uI),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/29",1880),eTS(1854,1,eQS,uD),eUe.wj=function(e){return M4(e,667)},eUe.xj=function(e){return Je(tvt,eUp,2021,e,0,1)},Y5(eQV,"XMLTypePackageImpl/3",1854),eTS(1881,1,eQS,uN),eUe.wj=function(e){return M4(e,19)},eUe.xj=function(e){return Je(e15,eUP,19,e,0,1)},Y5(eQV,"XMLTypePackageImpl/30",1881),eTS(1882,1,eQS,uP),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/31",1882),eTS(1883,1,eQS,uR),eUe.wj=function(e){return M4(e,162)},eUe.xj=function(e){return Je(e16,eUP,162,e,0,1)},Y5(eQV,"XMLTypePackageImpl/32",1883),eTS(1884,1,eQS,uj),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/33",1884),eTS(1885,1,eQS,uF),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/34",1885),eTS(1886,1,eQS,uY),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/35",1886),eTS(1887,1,eQS,uB),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/36",1887),eTS(1888,1,eQS,uU),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/37",1888),eTS(1889,1,eQS,uH),eUe.wj=function(e){return M4(e,15)},eUe.xj=function(e){return Je(e1H,ezZ,15,e,0,1)},Y5(eQV,"XMLTypePackageImpl/38",1889),eTS(1890,1,eQS,u$),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/39",1890),eTS(1855,1,eQS,uz),eUe.wj=function(e){return M4(e,668)},eUe.xj=function(e){return Je(tvn,eUp,2022,e,0,1)},Y5(eQV,"XMLTypePackageImpl/4",1855),eTS(1891,1,eQS,uG),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/40",1891),eTS(1892,1,eQS,uW),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/41",1892),eTS(1893,1,eQS,uK),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/42",1893),eTS(1894,1,eQS,uV),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/43",1894),eTS(1895,1,eQS,uq),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/44",1895),eTS(1896,1,eQS,uZ),eUe.wj=function(e){return M4(e,184)},eUe.xj=function(e){return Je(e19,eUP,184,e,0,1)},Y5(eQV,"XMLTypePackageImpl/45",1896),eTS(1897,1,eQS,uX),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/46",1897),eTS(1898,1,eQS,uJ),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/47",1898),eTS(1899,1,eQS,uQ),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/48",1899),eTS(eHx,1,eQS,u1),eUe.wj=function(e){return M4(e,184)},eUe.xj=function(e){return Je(e19,eUP,184,e,0,1)},Y5(eQV,"XMLTypePackageImpl/49",eHx),eTS(1856,1,eQS,u0),eUe.wj=function(e){return M4(e,669)},eUe.xj=function(e){return Je(tvr,eUp,2023,e,0,1)},Y5(eQV,"XMLTypePackageImpl/5",1856),eTS(1901,1,eQS,u2),eUe.wj=function(e){return M4(e,162)},eUe.xj=function(e){return Je(e16,eUP,162,e,0,1)},Y5(eQV,"XMLTypePackageImpl/50",1901),eTS(1902,1,eQS,u3),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/51",1902),eTS(1903,1,eQS,u4),eUe.wj=function(e){return M4(e,19)},eUe.xj=function(e){return Je(e15,eUP,19,e,0,1)},Y5(eQV,"XMLTypePackageImpl/52",1903),eTS(1857,1,eQS,u5),eUe.wj=function(e){return xd(e)},eUe.xj=function(e){return Je(e17,eUP,2,e,6,1)},Y5(eQV,"XMLTypePackageImpl/6",1857),eTS(1858,1,eQS,u6),eUe.wj=function(e){return M4(e,190)},eUe.xj=function(e){return Je(tyk,eUP,190,e,0,2)},Y5(eQV,"XMLTypePackageImpl/7",1858),eTS(1859,1,eQS,u9),eUe.wj=function(e){return xl(e)},eUe.xj=function(e){return Je(e11,eUP,476,e,8,1)},Y5(eQV,"XMLTypePackageImpl/8",1859),eTS(1860,1,eQS,u8),eUe.wj=function(e){return M4(e,217)},eUe.xj=function(e){return Je(e10,eUP,217,e,0,1)},Y5(eQV,"XMLTypePackageImpl/9",1860),eTS(50,60,eHr,gX),Y5(e1l,"RegEx/ParseException",50),eTS(820,1,{},u7),eUe.sl=function(e){return e16*n)throw p7(new gX(eBJ((Mo(),eJd))));n=16*n+i}if(125!=this.a)throw p7(new gX(eBJ((Mo(),eJh))));if(n>e1f)throw p7(new gX(eBJ((Mo(),eJp))));e=n}else{if(i=0,0!=this.c||(i=eb0(this.a))<0||(n=i,eBM(this),0!=this.c||(i=eb0(this.a))<0))throw p7(new gX(eBJ((Mo(),eJf))));e=n=16*n+i}break;case 117:if(r=0,eBM(this),0!=this.c||(r=eb0(this.a))<0||(t=r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0))throw p7(new gX(eBJ((Mo(),eJf))));e=t=16*t+r;break;case 118:if(eBM(this),0!=this.c||(r=eb0(this.a))<0||(t=r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0)||(t=16*t+r,eBM(this),0!=this.c||(r=eb0(this.a))<0))throw p7(new gX(eBJ((Mo(),eJf))));if((t=16*t+r)>e1f)throw p7(new gX(eBJ((Mo(),"parser.descappe.4"))));e=t;break;case 65:case 90:case 122:throw p7(new gX(eBJ((Mo(),eJb))))}return e},eUe.ul=function(e){var t,n;switch(e){case 100:n=(32&this.e)==32?eYB("Nd",!0):(eBG(),tv8);break;case 68:n=(32&this.e)==32?eYB("Nd",!1):(eBG(),tyr);break;case 119:n=(32&this.e)==32?eYB("IsWord",!0):(eBG(),tyd);break;case 87:n=(32&this.e)==32?eYB("IsWord",!1):(eBG(),tya);break;case 115:n=(32&this.e)==32?eYB("IsSpace",!0):(eBG(),tys);break;case 83:n=(32&this.e)==32?eYB("IsSpace",!1):(eBG(),tyi);break;default:throw p7(new go(e1d+(t=e).toString(16)))}return n},eUe.vl=function(e){var t,n,r,i,a,o,s,u,c,l,f,d;for(this.b=1,eBM(this),t=null,0==this.c&&94==this.a?(eBM(this),e?l=(eBG(),eBG(),++tyv,new WZ(5)):(t=(eBG(),eBG(),++tyv,new WZ(4)),eLw(t,0,e1f),l=(++tyv,new WZ(4)))):l=(eBG(),eBG(),++tyv,new WZ(4)),i=!0;1!=(d=this.c)&&(0!=d||93!=this.a||i);){if(i=!1,n=this.a,r=!1,10==d)switch(n){case 100:case 68:case 119:case 87:case 115:case 83:ePR(l,this.ul(n)),r=!0;break;case 105:case 73:case 99:case 67:(n=this.Ll(l,n))<0&&(r=!0);break;case 112:case 80:if(!(f=ext(this,n)))throw p7(new gX(eBJ((Mo(),eJe))));ePR(l,f),r=!0;break;default:n=this.tl()}else if(20==d){if((o=AG(this.i,58,this.d))<0)throw p7(new gX(eBJ((Mo(),eJt))));if(s=!0,94==UI(this.i,this.d)&&(++this.d,s=!1),!(u=JI(a=Az(this.i,this.d,o),s,(512&this.e)==512)))throw p7(new gX(eBJ((Mo(),eJr))));if(ePR(l,u),r=!0,o+1>=this.j||93!=UI(this.i,o+1))throw p7(new gX(eBJ((Mo(),eJt))));this.d=o+2}if(eBM(this),!r){if(0!=this.c||45!=this.a)eLw(l,n,n);else{if(eBM(this),1==(d=this.c))throw p7(new gX(eBJ((Mo(),eJn))));0==d&&93==this.a?(eLw(l,n,n),eLw(l,45,45)):(c=this.a,10==d&&(c=this.tl()),eBM(this),eLw(l,n,c))}}(this.e&eXt)==eXt&&0==this.c&&44==this.a&&eBM(this)}if(1==this.c)throw p7(new gX(eBJ((Mo(),eJn))));return t&&(ej0(t,l),l=t),eMS(l),eRo(l),this.b=0,eBM(this),l},eUe.wl=function(){var e,t,n,r;for(n=this.vl(!1);7!=(r=this.c);)if(e=this.a,0==r&&(45==e||38==e)||4==r){if(eBM(this),9!=this.c)throw p7(new gX(eBJ((Mo(),eJu))));if(t=this.vl(!1),4==r)ePR(n,t);else if(45==e)ej0(n,t);else if(38==e)ejO(n,t);else throw p7(new go("ASSERT"))}else throw p7(new gX(eBJ((Mo(),eJc))));return eBM(this),n},eUe.xl=function(){var e,t;return e=this.a-48,t=(eBG(),eBG(),++tyv,new zc(12,null,e)),this.g||(this.g=new bZ),bY(this.g,new pX(e)),eBM(this),t},eUe.yl=function(){return eBM(this),eBG(),tyu},eUe.zl=function(){return eBM(this),eBG(),tyo},eUe.Al=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Bl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Cl=function(){return eBM(this),esV()},eUe.Dl=function(){return eBM(this),eBG(),tyl},eUe.El=function(){return eBM(this),eBG(),tyh},eUe.Fl=function(){var e;if(this.d>=this.j||(65504&(e=UI(this.i,this.d++)))!=64)throw p7(new gX(eBJ((Mo(),eX6))));return eBM(this),eBG(),eBG(),++tyv,new jb(0,e-64)},eUe.Gl=function(){return eBM(this),eNw()},eUe.Hl=function(){return eBM(this),eBG(),typ},eUe.Il=function(){var e;return e=(eBG(),eBG(),++tyv,new jb(0,105)),eBM(this),e},eUe.Jl=function(){return eBM(this),eBG(),tyf},eUe.Kl=function(){return eBM(this),eBG(),tyc},eUe.Ll=function(e,t){return this.tl()},eUe.Ml=function(){return eBM(this),eBG(),tyt},eUe.Nl=function(){var e,t,n,r,i;if(this.d+1>=this.j)throw p7(new gX(eBJ((Mo(),eX3))));if(r=-1,t=null,49<=(e=UI(this.i,this.d))&&e<=57){if(r=e-48,this.g||(this.g=new bZ),bY(this.g,new pX(r)),++this.d,41!=UI(this.i,this.d))throw p7(new gX(eBJ((Mo(),eX1))));++this.d}else switch(63==e&&--this.d,eBM(this),(t=eBs(this)).e){case 20:case 21:case 22:case 23:break;case 8:if(7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));break;default:throw p7(new gX(eBJ((Mo(),eX4))))}if(eBM(this),i=ehT(this),n=null,2==i.e){if(2!=i.em())throw p7(new gX(eBJ((Mo(),eX5))));n=i.am(1),i=i.am(0)}if(7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),eBG(),eBG(),++tyv,new ee_(r,t,i,n)},eUe.Ol=function(){return eBM(this),eBG(),tyn},eUe.Pl=function(){var e;if(eBM(this),e=F4(24,ehT(this)),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Ql=function(){var e;if(eBM(this),e=F4(20,ehT(this)),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Rl=function(){var e;if(eBM(this),e=F4(22,ehT(this)),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Sl=function(){var e,t,n,r,i;for(e=0,n=0,t=-1;this.d=this.j)throw p7(new gX(eBJ((Mo(),eX0))));if(45==t){for(++this.d;this.d=this.j)throw p7(new gX(eBJ((Mo(),eX0))))}if(58==t){if(++this.d,eBM(this),r=Bu(ehT(this),e,n),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));eBM(this)}else if(41==t)++this.d,eBM(this),r=Bu(ehT(this),e,n);else throw p7(new gX(eBJ((Mo(),eX2))));return r},eUe.Tl=function(){var e;if(eBM(this),e=F4(21,ehT(this)),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Ul=function(){var e;if(eBM(this),e=F4(23,ehT(this)),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Vl=function(){var e,t;if(eBM(this),e=this.f++,t=F5(ehT(this),e),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),t},eUe.Wl=function(){var e;if(eBM(this),e=F5(ehT(this),0),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Xl=function(e){return(eBM(this),5==this.c)?(eBM(this),jS(e,(eBG(),eBG(),++tyv,new qa(9,e)))):jS(e,(eBG(),eBG(),++tyv,new qa(3,e)))},eUe.Yl=function(e){var t;return eBM(this),t=(eBG(),eBG(),++tyv,new Mr(2)),5==this.c?(eBM(this),eRv(t,tye),eRv(t,e)):(eRv(t,e),eRv(t,tye)),t},eUe.Zl=function(e){return(eBM(this),5==this.c)?(eBM(this),eBG(),eBG(),++tyv,new qa(9,e)):(eBG(),eBG(),++tyv,new qa(3,e))},eUe.a=0,eUe.b=0,eUe.c=0,eUe.d=0,eUe.e=0,eUe.f=1,eUe.g=null,eUe.j=0,Y5(e1l,"RegEx/RegexParser",820),eTS(1824,820,{},mU),eUe.sl=function(e){return!1},eUe.tl=function(){return eCn(this)},eUe.ul=function(e){return eDu(e)},eUe.vl=function(e){return eBL(this)},eUe.wl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.xl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.yl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.zl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Al=function(){return eBM(this),eDu(67)},eUe.Bl=function(){return eBM(this),eDu(73)},eUe.Cl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Dl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.El=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Fl=function(){return eBM(this),eDu(99)},eUe.Gl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Hl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Il=function(){return eBM(this),eDu(105)},eUe.Jl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Kl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Ll=function(e,t){return ePR(e,eDu(t)),-1},eUe.Ml=function(){return eBM(this),eBG(),eBG(),++tyv,new jb(0,94)},eUe.Nl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Ol=function(){return eBM(this),eBG(),eBG(),++tyv,new jb(0,36)},eUe.Pl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Ql=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Rl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Sl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Tl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Ul=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Vl=function(){var e;if(eBM(this),e=F5(ehT(this),0),7!=this.c)throw p7(new gX(eBJ((Mo(),eX1))));return eBM(this),e},eUe.Wl=function(){throw p7(new gX(eBJ((Mo(),eJm))))},eUe.Xl=function(e){return eBM(this),jS(e,(eBG(),eBG(),++tyv,new qa(3,e)))},eUe.Yl=function(e){var t;return eBM(this),t=(eBG(),eBG(),++tyv,new Mr(2)),eRv(t,e),eRv(t,tye),t},eUe.Zl=function(e){return eBM(this),eBG(),eBG(),++tyv,new qa(3,e)};var tva=null,tvo=null;Y5(e1l,"RegEx/ParserForXMLSchema",1824),eTS(117,1,e1k,pJ),eUe.$l=function(e){throw p7(new go("Not supported."))},eUe._l=function(){return -1},eUe.am=function(e){return null},eUe.bm=function(){return null},eUe.cm=function(e){},eUe.dm=function(e){},eUe.em=function(){return 0},eUe.Ib=function(){return this.fm(0)},eUe.fm=function(e){return 11==this.e?".":""},eUe.e=0;var tvs,tvu,tvc,tvl,tvf,tvd,tvh,tvp,tvb,tvm,tvg,tvv,tvy,tvw,tv_,tvE,tvS,tvk,tvx,tvT,tvM,tvO,tvA,tvL,tvC,tvI,tvD,tvN,tvP,tvR,tvj,tvF,tvY,tvB,tvU,tvH,tv$,tvz,tvG,tvW,tvK,tvV,tvq,tvZ,tvX,tvJ,tvQ,tv1,tv0,tv2,tv3,tv4,tv5,tv6,tv9,tv8,tv7,tye,tyt,tyn,tyr,tyi,tya,tyo,tys,tyu,tyc,tyl,tyf,tyd,tyh,typ,tyb=null,tym=null,tyg=null,tyv=0,tyy=Y5(e1l,"RegEx/Token",117);eTS(136,117,{3:1,136:1,117:1},WZ),eUe.fm=function(e){var t,n,r;if(4==this.e){if(this==tv7)n=".";else if(this==tv8)n="\\d";else if(this==tyd)n="\\w";else if(this==tys)n="\\s";else{for(r=new vs,r.a+="[",t=0;t0&&(r.a+=","),this.b[t]===this.b[t+1]?xk(r,eN$(this.b[t])):(xk(r,eN$(this.b[t])),r.a+="-",xk(r,eN$(this.b[t+1])));r.a+="]",n=r.a}}else if(this==tyr)n="\\D";else if(this==tya)n="\\W";else if(this==tyi)n="\\S";else{for(r=new vs,r.a+="[^",t=0;t0&&(r.a+=","),this.b[t]===this.b[t+1]?xk(r,eN$(this.b[t])):(xk(r,eN$(this.b[t])),r.a+="-",xk(r,eN$(this.b[t+1])));r.a+="]",n=r.a}return n},eUe.a=!1,eUe.c=!1,Y5(e1l,"RegEx/RangeToken",136),eTS(584,1,{584:1},pX),eUe.a=0,Y5(e1l,"RegEx/RegexParser/ReferencePosition",584),eTS(583,1,{3:1,583:1},wu),eUe.Fb=function(e){var t;return!!(null!=e&&M4(e,583))&&(t=Pp(e,583),IE(this.b,t.b)&&this.a==t.a)},eUe.Hb=function(){return ebA(this.b+"/"+eAN(this.a))},eUe.Ib=function(){return this.c.fm(this.a)},eUe.a=0,Y5(e1l,"RegEx/RegularExpression",583),eTS(223,117,e1k,jb),eUe._l=function(){return this.a},eUe.fm=function(e){var t,n,r;switch(this.e){case 0:switch(this.a){case 124:case 42:case 43:case 63:case 40:case 41:case 46:case 91:case 123:case 92:r="\\"+CB(this.a&eHd);break;case 12:r="\\f";break;case 10:r="\\n";break;case 13:r="\\r";break;case 9:r="\\t";break;case 27:r="\\e";break;default:r=this.a>=eH3?"\\v"+Az(n="0"+(t=this.a>>>0).toString(16),n.length-6,n.length):""+CB(this.a&eHd)}break;case 8:r=this==tyt||this==tyn?""+CB(this.a&eHd):"\\"+CB(this.a&eHd);break;default:r=null}return r},eUe.a=0,Y5(e1l,"RegEx/Token/CharToken",223),eTS(309,117,e1k,qa),eUe.am=function(e){return this.a},eUe.cm=function(e){this.b=e},eUe.dm=function(e){this.c=e},eUe.em=function(){return 1},eUe.fm=function(e){var t;if(3==this.e){if(this.c<0&&this.b<0)t=this.a.fm(e)+"*";else if(this.c==this.b)t=this.a.fm(e)+"{"+this.c+"}";else if(this.c>=0&&this.b>=0)t=this.a.fm(e)+"{"+this.c+","+this.b+"}";else if(this.c>=0&&this.b<0)t=this.a.fm(e)+"{"+this.c+",}";else throw p7(new go("Token#toString(): CLOSURE "+this.c+eUd+this.b))}else if(this.c<0&&this.b<0)t=this.a.fm(e)+"*?";else if(this.c==this.b)t=this.a.fm(e)+"{"+this.c+"}?";else if(this.c>=0&&this.b>=0)t=this.a.fm(e)+"{"+this.c+","+this.b+"}?";else if(this.c>=0&&this.b<0)t=this.a.fm(e)+"{"+this.c+",}?";else throw p7(new go("Token#toString(): NONGREEDYCLOSURE "+this.c+eUd+this.b));return t},eUe.b=0,eUe.c=0,Y5(e1l,"RegEx/Token/ClosureToken",309),eTS(821,117,e1k,YD),eUe.am=function(e){return 0==e?this.a:this.b},eUe.em=function(){return 2},eUe.fm=function(e){var t;return 3==this.b.e&&this.b.am(0)==this.a?this.a.fm(e)+"+":9==this.b.e&&this.b.am(0)==this.a?this.a.fm(e)+"+?":this.a.fm(e)+""+this.b.fm(e)},Y5(e1l,"RegEx/Token/ConcatToken",821),eTS(1822,117,e1k,ee_),eUe.am=function(e){if(0==e)return this.d;if(1==e)return this.b;throw p7(new go("Internal Error: "+e))},eUe.em=function(){return this.b?2:1},eUe.fm=function(e){var t;return t=this.c>0?"(?("+this.c+")":8==this.a.e?"(?("+this.a+")":"(?"+this.a,this.b?t+=this.d+"|"+this.b+")":t+=this.d+")",t},eUe.c=0,Y5(e1l,"RegEx/Token/ConditionToken",1822),eTS(1823,117,e1k,Wq),eUe.am=function(e){return this.b},eUe.em=function(){return 1},eUe.fm=function(e){return"(?"+(0==this.a?"":eAN(this.a))+(0==this.c?"":eAN(this.c))+":"+this.b.fm(e)+")"},eUe.a=0,eUe.c=0,Y5(e1l,"RegEx/Token/ModifierToken",1823),eTS(822,117,e1k,BR),eUe.am=function(e){return this.a},eUe.em=function(){return 1},eUe.fm=function(e){var t;switch(t=null,this.e){case 6:t=0==this.b?"(?:"+this.a.fm(e)+")":"("+this.a.fm(e)+")";break;case 20:t="(?="+this.a.fm(e)+")";break;case 21:t="(?!"+this.a.fm(e)+")";break;case 22:t="(?<="+this.a.fm(e)+")";break;case 23:t="(?"+this.a.fm(e)+")"}return t},eUe.b=0,Y5(e1l,"RegEx/Token/ParenToken",822),eTS(521,117,{3:1,117:1,521:1},zc),eUe.bm=function(){return this.b},eUe.fm=function(e){return 12==this.e?"\\"+this.a:eTd(this.b)},eUe.a=0,Y5(e1l,"RegEx/Token/StringToken",521),eTS(465,117,e1k,Mr),eUe.$l=function(e){eRv(this,e)},eUe.am=function(e){return Pp(Bz(this.a,e),117)},eUe.em=function(){return this.a?this.a.a.c.length:0},eUe.fm=function(e){var t,n,r,i,a;if(1==this.e){if(2==this.a.a.c.length)t=Pp(Bz(this.a,0),117),i=3==(n=Pp(Bz(this.a,1),117)).e&&n.am(0)==t?t.fm(e)+"+":9==n.e&&n.am(0)==t?t.fm(e)+"+?":t.fm(e)+""+n.fm(e);else{for(r=0,a=new vs;r=this.c.b:this.a<=this.c.b},eUe.Sb=function(){return this.b>0},eUe.Tb=function(){return this.b},eUe.Vb=function(){return this.b-1},eUe.Qb=function(){throw p7(new gW(e1L))},eUe.a=0,eUe.b=0,Y5(e1M,"ExclusiveRange/RangeIterator",254);var tyw=Ui(eJX,"C"),ty_=Ui(eJ1,"I"),tyE=Ui(eUi,"Z"),tyS=Ui(eJ0,"J"),tyk=Ui(eJZ,"B"),tyx=Ui(eJJ,"D"),tyT=Ui(eJQ,"F"),tyM=Ui(eJ2,"S"),tyO=RL("org.eclipse.elk.core.labels","ILabelManager"),tyA=RL(eX_,"DiagnosticChain"),tyL=RL(eQx,"ResourceSet"),tyC=Y5(eX_,"InvocationTargetException",null),tyI=(vg(),q6),tyD=tyD=eyP;enI(bs),eiE("permProps",[[[e1C,e1I],[e1D,"gecko1_8"]],[[e1C,e1I],[e1D,"ie10"]],[[e1C,e1I],[e1D,"ie8"]],[[e1C,e1I],[e1D,"ie9"]],[[e1C,e1I],[e1D,"safari"]]]),tyD(null,"elk",null)},3379(e,t,n){"use strict";function r(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw ReferenceError("this hasn't been initialised - super() hasn't been called");return t&&("object"==typeof t||"function"==typeof t)?t:e}function a(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}var o=function(e){function t(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};r(this,t);var a=Object.assign({},e),o=!1;try{o=!0}catch(s){}if(e.workerUrl){if(o){var u=n(84763);a.workerFactory=function(e){return new u(e)}}else console.warn("Web worker requested but 'web-worker' package not installed. \nConsider installing the package or pass your own 'workerFactory' to ELK's constructor.\n... Falling back to non-web worker version.")}if(!a.workerFactory){var c=n(55273).Worker;a.workerFactory=function(e){return new c(e)}}return i(this,(t.__proto__||Object.getPrototypeOf(t)).call(this,a))}return a(t,e),t}(n(4005).default);Object.defineProperty(e.exports,"__esModule",{value:!0}),e.exports=o,o.default=o},17187(e){"use strict";var t,n="object"==typeof Reflect?Reflect:null,r=n&&"function"==typeof n.apply?n.apply:function(e,t,n){return Function.prototype.apply.call(e,t,n)};function i(e){console&&console.warn&&console.warn(e)}t=n&&"function"==typeof n.ownKeys?n.ownKeys:Object.getOwnPropertySymbols?function(e){return Object.getOwnPropertyNames(e).concat(Object.getOwnPropertySymbols(e))}:function(e){return Object.getOwnPropertyNames(e)};var a=Number.isNaN||function(e){return e!=e};function o(){o.init.call(this)}e.exports=o,e.exports.once=v,o.EventEmitter=o,o.prototype._events=void 0,o.prototype._eventsCount=0,o.prototype._maxListeners=void 0;var s=10;function u(e){if("function"!=typeof e)throw TypeError('The "listener" argument must be of type Function. Received type '+typeof e)}function c(e){return void 0===e._maxListeners?o.defaultMaxListeners:e._maxListeners}function l(e,t,n,r){if(u(n),void 0===(o=e._events)?(o=e._events=Object.create(null),e._eventsCount=0):(void 0!==o.newListener&&(e.emit("newListener",t,n.listener?n.listener:n),o=e._events),s=o[t]),void 0===s)s=o[t]=n,++e._eventsCount;else if("function"==typeof s?s=o[t]=r?[n,s]:[s,n]:r?s.unshift(n):s.push(n),(a=c(e))>0&&s.length>a&&!s.warned){s.warned=!0;var a,o,s,l=Error("Possible EventEmitter memory leak detected. "+s.length+" "+String(t)+" listeners added. Use emitter.setMaxListeners() to increase limit");l.name="MaxListenersExceededWarning",l.emitter=e,l.type=t,l.count=s.length,i(l)}return e}function f(){if(!this.fired)return(this.target.removeListener(this.type,this.wrapFn),this.fired=!0,0===arguments.length)?this.listener.call(this.target):this.listener.apply(this.target,arguments)}function d(e,t,n){var r={fired:!1,wrapFn:void 0,target:e,type:t,listener:n},i=f.bind(r);return i.listener=n,r.wrapFn=i,i}function h(e,t,n){var r=e._events;if(void 0===r)return[];var i=r[t];return void 0===i?[]:"function"==typeof i?n?[i.listener||i]:[i]:n?g(i):b(i,i.length)}function p(e){var t=this._events;if(void 0!==t){var n=t[e];if("function"==typeof n)return 1;if(void 0!==n)return n.length}return 0}function b(e,t){for(var n=Array(t),r=0;r0&&(o=t[0]),o instanceof Error)throw o;var o,s=Error("Unhandled error."+(o?" ("+o.message+")":""));throw s.context=o,s}var u=a[e];if(void 0===u)return!1;if("function"==typeof u)r(u,this,t);else for(var c=u.length,l=b(u,c),n=0;n=0;a--)if(n[a]===t||n[a].listener===t){o=n[a].listener,i=a;break}if(i<0)return this;0===i?n.shift():m(n,i),1===n.length&&(r[e]=n[0]),void 0!==r.removeListener&&this.emit("removeListener",e,o||t)}return this},o.prototype.off=o.prototype.removeListener,o.prototype.removeAllListeners=function(e){var t,n,r;if(void 0===(n=this._events))return this;if(void 0===n.removeListener)return 0===arguments.length?(this._events=Object.create(null),this._eventsCount=0):void 0!==n[e]&&(0==--this._eventsCount?this._events=Object.create(null):delete n[e]),this;if(0===arguments.length){var i,a=Object.keys(n);for(r=0;r=0;r--)this.removeListener(e,t[r]);return this},o.prototype.listeners=function(e){return h(this,e,!0)},o.prototype.rawListeners=function(e){return h(this,e,!1)},o.listenerCount=function(e,t){return"function"==typeof e.listenerCount?e.listenerCount(t):p.call(e,t)},o.prototype.listenerCount=p,o.prototype.eventNames=function(){return this._eventsCount>0?t(this._events):[]}},16839(e,t,n){var r=n(25323),i=n(31744),a=n(98361),o=n(4514);e.exports={graphlib:n(32478),read:r,readMany:i,write:a,version:o,type:"dot",buffer:!1}},11100(e,t,n){"use strict";var r=n(47755),i=n(32478).Graph;function a(e){var t="graph"!==e.type,n=!e.strict,a=[{node:{},edge:{}}],s=e.id,u=new i({directed:t,multigraph:n,compound:!0});return u.setGraph(null===s?{}:{id:s}),r.each(e.stmts,function(e){o(u,e,a)}),u}function o(e,t,n,r){switch(t.type){case"node":s(e,t,n,r);break;case"edge":u(e,t,n,r);break;case"subgraph":c(e,t,n,r);break;case"attr":l(e,t,n);break;case"inlineAttr":f(e,t,n,r)}}function s(e,t,n,i){var a=t.id,o=t.attrs;h(e,a,n,i),r.merge(e.node(a),o)}function u(e,t,n,i){var a,s,u=t.attrs;r.each(t.elems,function(t){switch(o(e,t,n,i),t.type){case"node":s=[t.id];break;case"subgraph":s=p(t)}r.each(a,function(t){r.each(s,function(i){var a;e.hasEdge(t,i)&&e.isMultigraph()&&(a=r.uniqueId("edge")),e.hasEdge(t,i,a)||e.setEdge(t,i,r.clone(r.last(n).edge),a),r.merge(e.edge(t,i,a),u)})}),a=s})}function c(e,t,n,i){var a=t.id;void 0===a&&(a=d(e)),n.push(r.clone(r.last(n))),h(e,a,n,i),r.each(t.stmts,function(t){o(e,t,n,a)}),e.children(a).length||e.removeNode(a),n.pop()}function l(e,t,n){r.merge(r.last(n)[t.attrType],t.attrs)}function f(e,t,n,i){r.merge(i?e.node(i):e.graph(),t.attrs)}function d(e){var t;do t=r.uniqueId("sg");while(e.hasNode(t))return t}function h(e,t,n,i){e.hasNode(t)||(e.setNode(t,r.clone(r.last(n).node)),e.setParent(t,i))}function p(e){var t,n={},i=[],a=i.push.bind(i);for(a(e);i.length;)switch((t=i.pop()).type){case"node":n[t.id]=!0;break;case"edge":r.each(t.elems,a);break;case"subgraph":r.each(t.stmts,a)}return r.keys(n)}e.exports=a},4644(e,t,n){e.exports=function(){function e(e,t){function n(){this.constructor=e}n.prototype=t.prototype,e.prototype=new n}function t(e,t,n,r,i,a){this.message=e,this.expected=t,this.found=n,this.offset=r,this.line=i,this.column=a,this.name="SyntaxError"}function r(e){var r,i,a=arguments.length>1?arguments[1]:{},o={},s={start:tf,graphStmt:td},u=tf,c=o,l=null,f="{",d={type:"literal",value:"{",description:'"{"'},h="}",p={type:"literal",value:"}",description:'"}"'},b=function(e,t,n,r){return{type:t,id:n,strict:null!==e,stmts:r}},m=";",g={type:"literal",value:";",description:'";"'},v=function(e,t){for(var n=[e],r=0;r",description:'"->"'},U=function(e,t){var n=[e];if(t)for(var r=0;rt&&(tr=0,ti={line:1,column:1,seenCR:!1}),n(ti,tr,t),tr=t),ti}function tc(e){!(ttta&&(ta=tt,to=[]),to.push(e))}function tl(n,r,i){function a(e){var t=1;for(e.sort(function(e,t){return e.descriptiont.description?1:0});t1?o.slice(0,-1).join(", ")+" or "+o[e.length-1]:o[0])+" but "+(i=t?'"'+n(t)+'"':"end of input")+" found."}var s=tu(i),u=itt?(s=e.charAt(tt),tt++):(s=o,0===ts&&tc(te)),s!==o?i=a=[a,s]:(tt=i,i=c)):(tt=i,i=c);i!==o;)r.push(i),i=tt,a=tt,ts++,e.substr(tt,2)===e8?(s=e8,tt+=2):(s=o,0===ts&&tc(e7)),ts--,s===o?a=F:(tt=a,a=c),a!==o?(e.length>tt?(s=e.charAt(tt),tt++):(s=o,0===ts&&tc(te)),s!==o?i=a=[a,s]:(tt=i,i=c)):(tt=i,i=c);r!==o?(e.substr(tt,2)===e8?(i=e8,tt+=2):(i=o,0===ts&&tc(e7)),i!==o?t=n=[n,r,i]:(tt=t,t=c)):(tt=t,t=c)}else tt=t,t=c}return ts--,t===o&&(n=o,0===ts&&tc(e0)),t}function tY(){var e;return(e=tj())===o&&(e=tF()),e}var tB=n(47755);if((i=u())!==o&&tt===e.length)return i;throw i!==o&&tt":"--",n=new f;e.isMultigraph()||n.write("strict "),n.writeLine((e.isDirected()?"digraph":"graph")+" {"),n.indent();var i=e.graph();return r.isObject(i)&&r.each(i,function(e,t){n.writeLine(l(t)+"="+l(e)+";")}),o(e,void 0,n),e.edges().forEach(function(r){u(e,r,t,n)}),n.unindent(),n.writeLine("}"),n.toString()}function o(e,t,n){var i=e.isCompound()?e.children(t):e.nodes();r.each(i,function(t){e.isCompound()&&e.children(t).length?(n.writeLine("subgraph "+l(t)+" {"),n.indent(),r.isObject(e.node(t))&&r.map(e.node(t),function(e,t){n.writeLine(l(t)+"="+l(e)+";")}),o(e,t,n),n.unindent(),n.writeLine("}")):s(e,t,n)})}function s(e,t,n){n.write(l(t)),c(e.node(t),n),n.writeLine()}function u(e,t,n,r){var i=t.v,a=t.w,o=e.edge(t);r.write(l(i)+" "+n+" "+l(a)),c(o,r),r.writeLine()}function c(e,t){if(r.isObject(e)){var n=r.map(e,function(e,t){return l(t)+"="+l(e)});n.length&&t.write(" ["+n.join(",")+"]")}}function l(e){return"number"==typeof e||e.toString().match(i)?e:'"'+e.toString().replace(/"/g,'\\"')+'"'}function f(){this._indent="",this._content="",this._shouldIndent=!0}f.prototype.INDENT=" ",f.prototype.indent=function(){this._indent+=this.INDENT},f.prototype.unindent=function(){this._indent=this._indent.slice(this.INDENT.length)},f.prototype.writeLine=function(e){this.write((e||"")+"\n"),this._shouldIndent=!0},f.prototype.write=function(e){this._shouldIndent&&(this._shouldIndent=!1,this._content+=this._indent),this._content+=e},f.prototype.toString=function(){return this._content}},28282(e,t,n){var r=n(82354);e.exports={Graph:r.Graph,json:n(28974),alg:n(12440),version:r.version}},2842(e,t,n){var r=n(89126);function i(e){var t,n={},i=[];function a(i){r.has(n,i)||(n[i]=!0,t.push(i),r.each(e.successors(i),a),r.each(e.predecessors(i),a))}return r.each(e.nodes(),function(e){t=[],a(e),t.length&&i.push(t)}),i}e.exports=i},53984(e,t,n){var r=n(89126);function i(e,t,n){r.isArray(t)||(t=[t]);var i=(e.isDirected()?e.successors:e.neighbors).bind(e),o=[],s={};return r.each(t,function(t){if(!e.hasNode(t))throw Error("Graph does not have node: "+t);a(e,t,"post"===n,s,i,o)}),o}function a(e,t,n,i,o,s){!r.has(i,t)&&(i[t]=!0,n||s.push(t),r.each(o(t),function(t){a(e,t,n,i,o,s)}),n&&s.push(t))}e.exports=i},84847(e,t,n){var r=n(63763),i=n(89126);function a(e,t,n){return i.transform(e.nodes(),function(i,a){i[a]=r(e,a,t,n)},{})}e.exports=a},63763(e,t,n){var r=n(89126),i=n(75639);e.exports=o;var a=r.constant(1);function o(e,t,n,r){return s(e,String(t),n||a,r||function(t){return e.outEdges(t)})}function s(e,t,n,r){var a,o,s={},u=new i,c=function(e){var t=e.v!==a?e.v:e.w,r=s[t],i=n(e),c=o.distance+i;if(i<0)throw Error("dijkstra does not allow negative edge weights. Bad edge: "+e+" Weight: "+i);c0&&(o=s[a=u.removeMin()]).distance!==Number.POSITIVE_INFINITY;)r(a).forEach(c);return s}},9096(e,t,n){var r=n(89126),i=n(5023);function a(e){return r.filter(i(e),function(t){return t.length>1||1===t.length&&e.hasEdge(t[0],t[0])})}e.exports=a},38924(e,t,n){var r=n(89126);e.exports=a;var i=r.constant(1);function a(e,t,n){return o(e,t||i,n||function(t){return e.outEdges(t)})}function o(e,t,n){var r={},i=e.nodes();return i.forEach(function(e){r[e]={},r[e][e]={distance:0},i.forEach(function(t){e!==t&&(r[e][t]={distance:Number.POSITIVE_INFINITY})}),n(e).forEach(function(n){var i=n.v===e?n.w:n.v,a=t(n);r[e][i]={distance:a,predecessor:e}})}),i.forEach(function(e){var t=r[e];i.forEach(function(n){var a=r[n];i.forEach(function(n){var r=a[e],i=t[n],o=a[n],s=r.distance+i.distance;s0;){if(n=u.removeMin(),r.has(s,n))o.setEdge(n,s[n]);else if(l)throw Error("Input graph is not connected: "+e);else l=!0;e.nodeEdges(n).forEach(c)}return o}e.exports=o},5023(e,t,n){var r=n(89126);function i(e){var t=0,n=[],i={},a=[];function o(s){var u=i[s]={onStack:!0,lowlink:t,index:t++};if(n.push(s),e.successors(s).forEach(function(e){r.has(i,e)?i[e].onStack&&(u.lowlink=Math.min(u.lowlink,i[e].index)):(o(e),u.lowlink=Math.min(u.lowlink,i[e].lowlink))}),u.lowlink===u.index){var c,l=[];do i[c=n.pop()].onStack=!1,l.push(c);while(s!==c)a.push(l)}}return e.nodes().forEach(function(e){r.has(i,e)||o(e)}),a}e.exports=i},2166(e,t,n){var r=n(89126);function i(e){var t={},n={},i=[];function o(s){if(r.has(n,s))throw new a;r.has(t,s)||(n[s]=!0,t[s]=!0,r.each(e.predecessors(s),o),delete n[s],i.push(s))}if(r.each(e.sinks(),o),r.size(t)!==e.nodeCount())throw new a;return i}function a(){}e.exports=i,i.CycleException=a,a.prototype=Error()},75639(e,t,n){var r=n(89126);function i(){this._arr=[],this._keyIndices={}}e.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map(function(e){return e.key})},i.prototype.has=function(e){return r.has(this._keyIndices,e)},i.prototype.priority=function(e){var t=this._keyIndices[e];if(void 0!==t)return this._arr[t].priority},i.prototype.min=function(){if(0===this.size())throw Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(e,t){var n=this._keyIndices;if(e=String(e),!r.has(n,e)){var i=this._arr,a=i.length;return n[e]=a,i.push({key:e,priority:t}),this._decrease(a),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var e=this._arr.pop();return delete this._keyIndices[e.key],this._heapify(0),e.key},i.prototype.decrease=function(e,t){var n=this._keyIndices[e];if(t>this._arr[n].priority)throw Error("New priority is greater than current priority. Key: "+e+" Old: "+this._arr[n].priority+" New: "+t);this._arr[n].priority=t,this._decrease(n)},i.prototype._heapify=function(e){var t=this._arr,n=2*e,r=n+1,i=e;n>1].priorityu){var c=s;s=u,u=c}return s+o+u+o+(r.isUndefined(a)?i:a)}function f(e,t,n,r){var i=""+t,a=""+n;if(!e&&i>a){var o=i;i=a,a=o}var s={v:i,w:a};return r&&(s.name=r),s}function d(e,t){return l(e,t.v,t.w,t.name)}s.prototype._nodeCount=0,s.prototype._edgeCount=0,s.prototype.isDirected=function(){return this._isDirected},s.prototype.isMultigraph=function(){return this._isMultigraph},s.prototype.isCompound=function(){return this._isCompound},s.prototype.setGraph=function(e){return this._label=e,this},s.prototype.graph=function(){return this._label},s.prototype.setDefaultNodeLabel=function(e){return r.isFunction(e)||(e=r.constant(e)),this._defaultNodeLabelFn=e,this},s.prototype.nodeCount=function(){return this._nodeCount},s.prototype.nodes=function(){return r.keys(this._nodes)},s.prototype.sources=function(){var e=this;return r.filter(this.nodes(),function(t){return r.isEmpty(e._in[t])})},s.prototype.sinks=function(){var e=this;return r.filter(this.nodes(),function(t){return r.isEmpty(e._out[t])})},s.prototype.setNodes=function(e,t){var n=arguments,i=this;return r.each(e,function(e){n.length>1?i.setNode(e,t):i.setNode(e)}),this},s.prototype.setNode=function(e,t){return r.has(this._nodes,e)?(arguments.length>1&&(this._nodes[e]=t),this):(this._nodes[e]=arguments.length>1?t:this._defaultNodeLabelFn(e),this._isCompound&&(this._parent[e]=a,this._children[e]={},this._children[a][e]=!0),this._in[e]={},this._preds[e]={},this._out[e]={},this._sucs[e]={},++this._nodeCount,this)},s.prototype.node=function(e){return this._nodes[e]},s.prototype.hasNode=function(e){return r.has(this._nodes,e)},s.prototype.removeNode=function(e){var t=this;if(r.has(this._nodes,e)){var n=function(e){t.removeEdge(t._edgeObjs[e])};delete this._nodes[e],this._isCompound&&(this._removeFromParentsChildList(e),delete this._parent[e],r.each(this.children(e),function(e){t.setParent(e)}),delete this._children[e]),r.each(r.keys(this._in[e]),n),delete this._in[e],delete this._preds[e],r.each(r.keys(this._out[e]),n),delete this._out[e],delete this._sucs[e],--this._nodeCount}return this},s.prototype.setParent=function(e,t){if(!this._isCompound)throw Error("Cannot set parent in a non-compound graph");if(r.isUndefined(t))t=a;else{t+="";for(var n=t;!r.isUndefined(n);n=this.parent(n))if(n===e)throw Error("Setting "+t+" as parent of "+e+" would create a cycle");this.setNode(t)}return this.setNode(e),this._removeFromParentsChildList(e),this._parent[e]=t,this._children[t][e]=!0,this},s.prototype._removeFromParentsChildList=function(e){delete this._children[this._parent[e]][e]},s.prototype.parent=function(e){if(this._isCompound){var t=this._parent[e];if(t!==a)return t}},s.prototype.children=function(e){if(r.isUndefined(e)&&(e=a),this._isCompound){var t=this._children[e];if(t)return r.keys(t)}else if(e===a)return this.nodes();else if(this.hasNode(e))return[]},s.prototype.predecessors=function(e){var t=this._preds[e];if(t)return r.keys(t)},s.prototype.successors=function(e){var t=this._sucs[e];if(t)return r.keys(t)},s.prototype.neighbors=function(e){var t=this.predecessors(e);if(t)return r.union(t,this.successors(e))},s.prototype.isLeaf=function(e){var t;return 0===(t=this.isDirected()?this.successors(e):this.neighbors(e)).length},s.prototype.filterNodes=function(e){var t=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});t.setGraph(this.graph());var n=this;r.each(this._nodes,function(n,r){e(r)&&t.setNode(r,n)}),r.each(this._edgeObjs,function(e){t.hasNode(e.v)&&t.hasNode(e.w)&&t.setEdge(e,n.edge(e))});var i={};function a(e){var r=n.parent(e);return void 0===r||t.hasNode(r)?(i[e]=r,r):r in i?i[r]:a(r)}return this._isCompound&&r.each(t.nodes(),function(e){t.setParent(e,a(e))}),t},s.prototype.setDefaultEdgeLabel=function(e){return r.isFunction(e)||(e=r.constant(e)),this._defaultEdgeLabelFn=e,this},s.prototype.edgeCount=function(){return this._edgeCount},s.prototype.edges=function(){return r.values(this._edgeObjs)},s.prototype.setPath=function(e,t){var n=this,i=arguments;return r.reduce(e,function(e,r){return i.length>1?n.setEdge(e,r,t):n.setEdge(e,r),r}),this},s.prototype.setEdge=function(){var e,t,n,i,a=!1,o=arguments[0];"object"==typeof o&&null!==o&&"v"in o?(e=o.v,t=o.w,n=o.name,2===arguments.length&&(i=arguments[1],a=!0)):(e=o,t=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],a=!0)),e=""+e,t=""+t,r.isUndefined(n)||(n=""+n);var s=l(this._isDirected,e,t,n);if(r.has(this._edgeLabels,s))return a&&(this._edgeLabels[s]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw Error("Cannot set a named edge when isMultigraph = false");this.setNode(e),this.setNode(t),this._edgeLabels[s]=a?i:this._defaultEdgeLabelFn(e,t,n);var c=f(this._isDirected,e,t,n);return e=c.v,t=c.w,Object.freeze(c),this._edgeObjs[s]=c,u(this._preds[t],e),u(this._sucs[e],t),this._in[t][s]=c,this._out[e][s]=c,this._edgeCount++,this},s.prototype.edge=function(e,t,n){var r=1===arguments.length?d(this._isDirected,arguments[0]):l(this._isDirected,e,t,n);return this._edgeLabels[r]},s.prototype.hasEdge=function(e,t,n){var i=1===arguments.length?d(this._isDirected,arguments[0]):l(this._isDirected,e,t,n);return r.has(this._edgeLabels,i)},s.prototype.removeEdge=function(e,t,n){var r=1===arguments.length?d(this._isDirected,arguments[0]):l(this._isDirected,e,t,n),i=this._edgeObjs[r];return i&&(e=i.v,t=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],c(this._preds[t],e),c(this._sucs[e],t),delete this._in[t][r],delete this._out[e][r],this._edgeCount--),this},s.prototype.inEdges=function(e,t){var n=this._in[e];if(n){var i=r.values(n);return t?r.filter(i,function(e){return e.v===t}):i}},s.prototype.outEdges=function(e,t){var n=this._out[e];if(n){var i=r.values(n);return t?r.filter(i,function(e){return e.w===t}):i}},s.prototype.nodeEdges=function(e,t){var n=this.inEdges(e,t);if(n)return n.concat(this.outEdges(e,t))}},82354(e,t,n){e.exports={Graph:n(30771),version:n(49631)}},28974(e,t,n){var r=n(89126),i=n(30771);function a(e){var t={options:{directed:e.isDirected(),multigraph:e.isMultigraph(),compound:e.isCompound()},nodes:o(e),edges:s(e)};return r.isUndefined(e.graph())||(t.value=r.clone(e.graph())),t}function o(e){return r.map(e.nodes(),function(t){var n=e.node(t),i=e.parent(t),a={v:t};return r.isUndefined(n)||(a.value=n),r.isUndefined(i)||(a.parent=i),a})}function s(e){return r.map(e.edges(),function(t){var n=e.edge(t),i={v:t.v,w:t.w};return r.isUndefined(t.name)||(i.name=t.name),r.isUndefined(n)||(i.value=n),i})}function u(e){var t=new i(e.options).setGraph(e.value);return r.each(e.nodes,function(e){t.setNode(e.v,e.value),e.parent&&t.setParent(e.v,e.parent)}),r.each(e.edges,function(e){t.setEdge({v:e.v,w:e.w,name:e.name},e.value)}),t}e.exports={write:a,read:u}},89126(e,t,n){var r;try{r={clone:n(66678),constant:n(75703),each:n(66073),filter:n(63105),has:n(18721),isArray:n(1469),isEmpty:n(41609),isFunction:n(23560),isUndefined:n(52353),keys:n(3674),map:n(35161),reduce:n(54061),size:n(84238),transform:n(68718),union:n(93386),values:n(52628)}}catch(i){}r||(r=window._),e.exports=r},49631(e){e.exports="2.1.8"},78892(e){"use strict";e.exports=n;var t=/[#.]/g;function n(e,n){for(var r,i,a,o=e||"",s=n||"div",u={},c=0;cC,q_:()=>F,ob:()=>y,PP:()=>B,Ep:()=>v,Hp:()=>w});var r=n(87462);function i(e){return"/"===e.charAt(0)}function a(e,t){for(var n=t,r=n+1,i=e.length;r=0;d--){var h=o[d];"."===h?a(o,d):".."===h?(a(o,d),f++):f&&(a(o,d),f--)}if(!c)for(;f--;f)o.unshift("..");!c||""===o[0]||o[0]&&i(o[0])||o.unshift("");var p=o.join("/");return n&&"/"!==p.substr(-1)&&(p+="/"),p}let s=o;function u(e){return e.valueOf?e.valueOf():Object.prototype.valueOf.call(e)}function c(e,t){if(e===t)return!0;if(null==e||null==t)return!1;if(Array.isArray(e))return Array.isArray(t)&&e.length===t.length&&e.every(function(e,n){return c(e,t[n])});if("object"==typeof e||"object"==typeof t){var n=u(e),r=u(t);return n!==e||r!==t?c(n,r):Object.keys(Object.assign({},e,t)).every(function(n){return c(e[n],t[n])})}return!1}let l=c;var f=n(2177);function d(e){return"/"===e.charAt(0)?e:"/"+e}function h(e){return"/"===e.charAt(0)?e.substr(1):e}function p(e,t){return 0===e.toLowerCase().indexOf(t.toLowerCase())&&-1!=="/?#".indexOf(e.charAt(t.length))}function b(e,t){return p(e,t)?e.substr(t.length):e}function m(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e}function g(e){var t=e||"/",n="",r="",i=t.indexOf("#");-1!==i&&(r=t.substr(i),t=t.substr(0,i));var a=t.indexOf("?");return -1!==a&&(n=t.substr(a),t=t.substr(0,a)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}}function v(e){var t=e.pathname,n=e.search,r=e.hash,i=t||"/";return n&&"?"!==n&&(i+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(i+="#"===r.charAt(0)?r:"#"+r),i}function y(e,t,n,i){var a;"string"==typeof e?(a=g(e)).state=t:(void 0===(a=(0,r.Z)({},e)).pathname&&(a.pathname=""),a.search?"?"!==a.search.charAt(0)&&(a.search="?"+a.search):a.search="",a.hash?"#"!==a.hash.charAt(0)&&(a.hash="#"+a.hash):a.hash="",void 0!==t&&void 0===a.state&&(a.state=t));try{a.pathname=decodeURI(a.pathname)}catch(o){if(o instanceof URIError)throw URIError('Pathname "'+a.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.');throw o}return n&&(a.key=n),i?a.pathname?"/"!==a.pathname.charAt(0)&&(a.pathname=s(a.pathname,i.pathname)):a.pathname=i.pathname:a.pathname||(a.pathname="/"),a}function w(e,t){return e.pathname===t.pathname&&e.search===t.search&&e.hash===t.hash&&e.key===t.key&&l(e.state,t.state)}function _(){var e=null;function t(t){return e=t,function(){e===t&&(e=null)}}function n(t,n,r,i){if(null!=e){var a="function"==typeof e?e(t,n):e;"string"==typeof a?"function"==typeof r?r(a,i):i(!0):i(!1!==a)}else i(!0)}var r=[];function i(e){var t=!0;function n(){t&&e.apply(void 0,arguments)}return r.push(n),function(){t=!1,r=r.filter(function(e){return e!==n})}}function a(){for(var e=arguments.length,t=Array(e),n=0;nn?a.splice(n,a.length-n,i):a.push(i),f({action:r,location:i,index:n,entries:a})}})}function g(e,t){var r="REPLACE",i=y(e,t,d(),M.location);l.confirmTransitionTo(i,r,n,function(e){e&&(M.entries[M.index]=i,f({action:r,location:i}))})}function w(e){var t=Y(M.index+e,0,M.entries.length-1),r="POP",i=M.entries[t];l.confirmTransitionTo(i,r,n,function(e){e?f({action:r,location:i,index:t}):f()})}function E(){w(-1)}function S(){w(1)}function k(e){var t=M.index+e;return t>=0&&tu});var r=/[A-Z]/g,i=/^ms-/,a={};function o(e){return"-"+e.toLowerCase()}function s(e){if(a.hasOwnProperty(e))return a[e];var t=e.replace(r,o);return a[e]=i.test(t)?"-"+t:t}let u=s},80645(e,t){/*! ieee754. BSD-3-Clause License. Feross Aboukhadijeh */ t.read=function(e,t,n,r,i){var a,o,s=8*i-r-1,u=(1<>1,l=-7,f=n?i-1:0,d=n?-1:1,h=e[t+f];for(f+=d,a=h&(1<<-l)-1,h>>=-l,l+=s;l>0;a=256*a+e[t+f],f+=d,l-=8);for(o=a&(1<<-l)-1,a>>=-l,l+=r;l>0;o=256*o+e[t+f],f+=d,l-=8);if(0===a)a=1-c;else{if(a===u)return o?NaN:(h?-1:1)*(1/0);o+=Math.pow(2,r),a-=c}return(h?-1:1)*o*Math.pow(2,a-r)},t.write=function(e,t,n,r,i,a){var o,s,u,c=8*a-i-1,l=(1<>1,d=23===i?5960464477539062e-23:0,h=r?0:a-1,p=r?1:-1,b=t<0||0===t&&1/t<0?1:0;for(isNaN(t=Math.abs(t))||t===1/0?(s=isNaN(t)?1:0,o=l):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),o+f>=1?t+=d/u:t+=d*Math.pow(2,1-f),t*u>=2&&(o++,u/=2),o+f>=l?(s=0,o=l):o+f>=1?(s=(t*u-1)*Math.pow(2,i),o+=f):(s=t*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;e[n+h]=255&s,h+=p,s/=256,i-=8);for(o=o<0;e[n+h]=255&o,h+=p,o/=256,c-=8);e[n+h-p]|=128*b}},35717(e){"function"==typeof Object.create?e.exports=function(e,t){t&&(e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}))}:e.exports=function(e,t){if(t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}}},46260(e){"use strict";function t(e){var t="string"==typeof e?e.charCodeAt(0):e;return t>=97&&t<=122||t>=65&&t<=90}e.exports=t},7961(e,t,n){"use strict";var r=n(46260),i=n(46195);function a(e){return r(e)||i(e)}e.exports=a},46195(e){"use strict";function t(e){var t="string"==typeof e?e.charCodeAt(0):e;return t>=48&&t<=57}e.exports=t},79480(e){"use strict";function t(e){var t="string"==typeof e?e.charCodeAt(0):e;return t>=97&&t<=102||t>=65&&t<=70||t>=48&&t<=57}e.exports=t},33827(e,t,n){"use strict";n.r(t),n.d(t,{default:()=>a,isBrowser:()=>i});var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},i=("undefined"==typeof window?"undefined":r(window))==="object"&&("undefined"==typeof document?"undefined":r(document))==="object"&&9===document.nodeType;let a=i},5826(e){e.exports=Array.isArray||function(e){return"[object Array]"==Object.prototype.toString.call(e)}},47798(e){"use strict";/*! + * isobject + * + * Copyright (c) 2014-2017, Jon Schlinkert. + * Released under the MIT License. + */ e.exports=function(e){return null!=e&&"object"==typeof e&&!1===Array.isArray(e)}},80204(e,t,n){e.exports=self.fetch||(self.fetch=n(25869).default||n(25869))},5690(e,t,n){e.exports=n(67946)},8126(e,t,n){"use strict";n.d(t,{Z:()=>tl});var r,i="en",a={},o={};function s(){return i}function u(e){i=e}function c(e){return a[e]}function l(e){if(!e)throw Error("No locale data passed");a[e.locale]=e,o[e.locale.toLowerCase()]=e.locale}function f(e){return a[e]?e:o[e.toLowerCase()]?o[e.toLowerCase()]:void 0}function d(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.localeMatcher||"lookup";switch(n){case"lookup":case"best fit":return h(e);default:throw RangeError('Invalid "localeMatcher" option: '.concat(n))}}function h(e){var t=f(e);if(t)return t;for(var n=e.split("-");e.length>1;){n.pop();var r=f(e=n.join("-"));if(r)return r}}var p={af:function(e){return 1==e?"one":"other"},am:function(e){return e>=0&&e<=1?"one":"other"},ar:function(e){var t=String(e).split("."),n=Number(t[0])==e&&t[0].slice(-2);return 0==e?"zero":1==e?"one":2==e?"two":n>=3&&n<=10?"few":n>=11&&n<=99?"many":"other"},ast:function(e){var t=!String(e).split(".")[1];return 1==e&&t?"one":"other"},be:function(e){var t=String(e).split("."),n=Number(t[0])==e,r=n&&t[0].slice(-1),i=n&&t[0].slice(-2);return 1==r&&11!=i?"one":r>=2&&r<=4&&(i<12||i>14)?"few":n&&0==r||r>=5&&r<=9||i>=11&&i<=14?"many":"other"},br:function(e){var t=String(e).split("."),n=Number(t[0])==e,r=n&&t[0].slice(-1),i=n&&t[0].slice(-2),a=n&&t[0].slice(-6);return 1==r&&11!=i&&71!=i&&91!=i?"one":2==r&&12!=i&&72!=i&&92!=i?"two":(3==r||4==r||9==r)&&(i<10||i>19)&&(i<70||i>79)&&(i<90||i>99)?"few":0!=e&&n&&0==a?"many":"other"},bs:function(e){var t=String(e).split("."),n=t[0],r=t[1]||"",i=!t[1],a=n.slice(-1),o=n.slice(-2),s=r.slice(-1),u=r.slice(-2);return i&&1==a&&11!=o||1==s&&11!=u?"one":i&&a>=2&&a<=4&&(o<12||o>14)||s>=2&&s<=4&&(u<12||u>14)?"few":"other"},cs:function(e){var t=String(e).split("."),n=t[0],r=!t[1];return 1==e&&r?"one":n>=2&&n<=4&&r?"few":r?"other":"many"},cy:function(e){return 0==e?"zero":1==e?"one":2==e?"two":3==e?"few":6==e?"many":"other"},da:function(e){var t=String(e).split("."),n=t[0],r=Number(t[0])==e;return 1!=e&&(r||0!=n&&1!=n)?"other":"one"},dsb:function(e){var t=String(e).split("."),n=t[0],r=t[1]||"",i=!t[1],a=n.slice(-2),o=r.slice(-2);return i&&1==a||1==o?"one":i&&2==a||2==o?"two":i&&(3==a||4==a)||3==o||4==o?"few":"other"},dz:function(e){return"other"},fil:function(e){var t=String(e).split("."),n=t[0],r=t[1]||"",i=!t[1],a=n.slice(-1),o=r.slice(-1);return i&&(1==n||2==n||3==n)||i&&4!=a&&6!=a&&9!=a||!i&&4!=o&&6!=o&&9!=o?"one":"other"},fr:function(e){return e>=0&&e<2?"one":"other"},ga:function(e){var t=Number(String(e).split(".")[0])==e;return 1==e?"one":2==e?"two":t&&e>=3&&e<=6?"few":t&&e>=7&&e<=10?"many":"other"},gd:function(e){var t=Number(String(e).split(".")[0])==e;return 1==e||11==e?"one":2==e||12==e?"two":t&&e>=3&&e<=10||t&&e>=13&&e<=19?"few":"other"},he:function(e){var t=String(e).split("."),n=t[0],r=!t[1],i=Number(t[0])==e,a=i&&t[0].slice(-1);return 1==e&&r?"one":2==n&&r?"two":r&&(e<0||e>10)&&i&&0==a?"many":"other"},is:function(e){var t=String(e).split("."),n=t[0],r=Number(t[0])==e,i=n.slice(-1),a=n.slice(-2);return r&&1==i&&11!=a||!r?"one":"other"},ksh:function(e){return 0==e?"zero":1==e?"one":"other"},lt:function(e){var t=String(e).split("."),n=t[1]||"",r=Number(t[0])==e,i=r&&t[0].slice(-1),a=r&&t[0].slice(-2);return 1==i&&(a<11||a>19)?"one":i>=2&&i<=9&&(a<11||a>19)?"few":0!=n?"many":"other"},lv:function(e){var t=String(e).split("."),n=t[1]||"",r=n.length,i=Number(t[0])==e,a=i&&t[0].slice(-1),o=i&&t[0].slice(-2),s=n.slice(-2),u=n.slice(-1);return i&&0==a||o>=11&&o<=19||2==r&&s>=11&&s<=19?"zero":1==a&&11!=o||2==r&&1==u&&11!=s||2!=r&&1==u?"one":"other"},mk:function(e){var t=String(e).split("."),n=t[0],r=t[1]||"",i=!t[1],a=n.slice(-1),o=n.slice(-2),s=r.slice(-1),u=r.slice(-2);return i&&1==a&&11!=o||1==s&&11!=u?"one":"other"},mt:function(e){var t=String(e).split("."),n=Number(t[0])==e&&t[0].slice(-2);return 1==e?"one":0==e||n>=2&&n<=10?"few":n>=11&&n<=19?"many":"other"},pa:function(e){return 0==e||1==e?"one":"other"},pl:function(e){var t=String(e).split("."),n=t[0],r=!t[1],i=n.slice(-1),a=n.slice(-2);return 1==e&&r?"one":r&&i>=2&&i<=4&&(a<12||a>14)?"few":r&&1!=n&&(0==i||1==i)||r&&i>=5&&i<=9||r&&a>=12&&a<=14?"many":"other"},pt:function(e){var t=String(e).split(".")[0];return 0==t||1==t?"one":"other"},ro:function(e){var t=String(e).split("."),n=!t[1],r=Number(t[0])==e&&t[0].slice(-2);return 1==e&&n?"one":!n||0==e||1!=e&&r>=1&&r<=19?"few":"other"},ru:function(e){var t=String(e).split("."),n=t[0],r=!t[1],i=n.slice(-1),a=n.slice(-2);return r&&1==i&&11!=a?"one":r&&i>=2&&i<=4&&(a<12||a>14)?"few":r&&0==i||r&&i>=5&&i<=9||r&&a>=11&&a<=14?"many":"other"},se:function(e){return 1==e?"one":2==e?"two":"other"},si:function(e){var t=String(e).split("."),n=t[0],r=t[1]||"";return 0==e||1==e||0==n&&1==r?"one":"other"},sl:function(e){var t=String(e).split("."),n=t[0],r=!t[1],i=n.slice(-2);return r&&1==i?"one":r&&2==i?"two":r&&(3==i||4==i)||!r?"few":"other"}};p.as=p.am,p.az=p.af,p.bg=p.af,p.bn=p.am,p.ca=p.ast,p.ce=p.af,p.chr=p.af,p.de=p.ast,p.ee=p.af,p.el=p.af,p.en=p.ast,p.es=p.af,p.et=p.ast,p.eu=p.af,p.fa=p.am,p.fi=p.ast,p.fo=p.af,p.fur=p.af,p.fy=p.ast,p.gl=p.ast,p.gu=p.am,p.hi=p.am,p.hr=p.bs,p.hsb=p.dsb,p.hu=p.af,p.hy=p.fr,p.ia=p.ast,p.id=p.dz,p.it=p.ast,p.ja=p.dz,p.jgo=p.af,p.jv=p.dz,p.ka=p.af,p.kea=p.dz,p.kk=p.af,p.kl=p.af,p.km=p.dz,p.kn=p.am,p.ko=p.dz,p.ku=p.af,p.ky=p.af,p.lb=p.af,p.lkt=p.dz,p.lo=p.dz,p.ml=p.af,p.mn=p.af,p.mr=p.am,p.ms=p.dz,p.my=p.dz,p.nb=p.af,p.ne=p.af,p.nl=p.ast,p.nn=p.af,p.or=p.af,p.ps=p.af,p["pt-PT"]=p.ast,p.sah=p.dz,p.sd=p.af,p.sk=p.cs,p.so=p.af,p.sq=p.af,p.sr=p.bs,p.sv=p.ast,p.sw=p.ast,p.ta=p.af,p.te=p.af,p.th=p.dz,p.ti=p.pa,p.tk=p.af,p.to=p.dz,p.tr=p.af,p.ug=p.af,p.uk=p.ru,p.ur=p.ast,p.uz=p.af,p.vi=p.dz,p.wae=p.af,p.yi=p.ast,p.yue=p.dz,p.zh=p.dz,p.zu=p.am;let b=p;function m(e){return"pt-PT"===e?e:v(e)}var g=/^([a-z0-9]+)/i;function v(e){var t=e.match(g);if(!t)throw TypeError("Invalid locale: ".concat(e));return t[1]}function y(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function w(e,t){for(var n=0;n0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};A(this,e),I(this,"numeric","always"),I(this,"style","long"),I(this,"localeMatcher","lookup");var r=n.numeric,i=n.style,a=n.localeMatcher;if(void 0!==r){if(0>N.indexOf(r))throw RangeError('Invalid "numeric" option: '.concat(r));this.numeric=r}if(void 0!==i){if(0>P.indexOf(i))throw RangeError('Invalid "style" option: '.concat(i));this.style=i}if(void 0!==a){if(0>R.indexOf(a))throw RangeError('Invalid "localeMatcher" option: '.concat(a));this.localeMatcher=a}if("string"==typeof t&&(t=[t]),t.push(s()),this.locale=e.supportedLocalesOf(t,{localeMatcher:this.localeMatcher})[0],!this.locale)throw Error("No supported locale was found");E.supportedLocalesOf(this.locale).length>0?this.pluralRules=new E(this.locale):console.warn('"'.concat(this.locale,'" locale is not supported')),"undefined"!=typeof Intl&&Intl.NumberFormat?(this.numberFormat=new Intl.NumberFormat(this.locale),this.numberingSystem=this.numberFormat.resolvedOptions().numberingSystem):this.numberingSystem="latn",this.locale=d(this.locale,{localeMatcher:this.localeMatcher})}return C(e,[{key:"format",value:function(){var e=z(arguments),t=x(e,2),n=t[0],r=t[1];return this.getRule(n,r).replace("{0}",this.formatNumber(Math.abs(n)))}},{key:"formatToParts",value:function(){var e=z(arguments),t=x(e,2),n=t[0],r=t[1],i=this.getRule(n,r),a=i.indexOf("{0}");if(a<0)return[{type:"literal",value:i}];var o=[];return a>0&&o.push({type:"literal",value:i.slice(0,a)}),o=o.concat(this.formatNumberToParts(Math.abs(n)).map(function(e){return k({},e,{unit:r})})),a+31&&void 0!==arguments[1]?arguments[1]:{};if("string"==typeof e)e=[e];else if(!Array.isArray(e))throw TypeError('Invalid "locales" argument');return e.filter(function(e){return d(e,t)})},j.addLocale=l,j.setDefaultLocale=u,j.getDefaultLocale=s,j.PluralRules=E;var F='Invalid "unit" argument';function Y(e){if("symbol"===S(e))throw TypeError(F);if("string"!=typeof e||("s"===e[e.length-1]&&(e=e.slice(0,e.length-1)),0>D.indexOf(e)))throw RangeError("".concat(F,": ").concat(e));return e}var B='Invalid "number" argument';function U(e){if(e=Number(e),Number.isFinite&&!Number.isFinite(e))throw RangeError("".concat(B,": ").concat(e));return e}function H(e){return 1/e==-1/0}function $(e){return e<0||0===e&&H(e)}function z(e){if(e.length<2)throw TypeError('"unit" argument is required');return[U(e[0]),Y(e[1])]}function G(e){return(G="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function W(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function K(e,t){for(var n=0;n=n.length)break;a=n[i++]}else{if((i=n.next()).done)break;a=i.value}var a,o=a;if(t(o))return o;for(var s=o.split("-");s.length>1;)if(s.pop(),t(o=s.join("-")))return o}throw Error("No locale data has been registered for any of the locales: ".concat(e.join(", ")))}function Q(){return("undefined"==typeof Intl?"undefined":X(Intl))==="object"&&"function"==typeof Intl.DateTimeFormat}var ee=60,et=60*ee,en=24*et,er=7*en,ei=30.44*en,ea=365.2425*en;function eo(e){switch(e){case"second":return 1;case"minute":return ee;case"hour":return et;case"day":return en;case"week":return er;case"month":return ei;case"year":return ea}}function es(e){return void 0!==e.factor?e.factor:eo(e.unit||e.formatAs)||1}function eu(e){return"floor"===e?Math.floor:(0,Math.round)}function ec(e){return"floor"===e?1:.5}function el(e){return(el="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function ef(e,t){var n,r=t.prevStep,i=t.timestamp,a=t.now,o=t.future,s=t.round;return r&&(r.id||r.unit)&&(n=e["threshold_for_".concat(r.id||r.unit)]),void 0===n&&void 0!==e.threshold&&"function"==typeof(n=e.threshold)&&(n=n(a,o)),void 0===n&&(n=e.minTime),"object"===el(n)&&(n=r&&r.id&&void 0!==n[r.id]?n[r.id]:n.default),"function"==typeof n&&(n=n(i,{future:o,getMinTimeForUnit:function(e,t){return ed(e,t||r&&r.formatAs,{round:s})}})),void 0===n&&e.test&&(n=e.test(i,{now:a,future:o})?0:9007199254740991),void 0===n&&(r?e.formatAs&&r.formatAs&&(n=ed(e.formatAs,r.formatAs,{round:s})):n=0),void 0===n&&console.warn("[javascript-time-ago] A step should specify `minTime`:\n"+JSON.stringify(e,null,2)),n}function ed(e,t,n){var r,i=n.round,a=eo(e);if(r="now"===t?eo(e):eo(t),void 0!==a&&void 0!==r)return a-r*(1-ec(i))}function eh(e){for(var t=1;t0?e[o-1]:s}}}function eg(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=ef(e[r],eh({prevStep:e[r-1],timestamp:n.now-1e3*t},n));return void 0===i||Math.abs(t)=0})}function ey(e,t,n){var r=n.now,i=n.round;if(eo(e)){var a=1e3*eo(e),o=t>r,s=Math.abs(t-r),u=eu(i)(s/a)*a;return o?u>0?s-u+e_(i,a):s-u+1:-(s-u)+ew(i,a)}}function ew(e,t){return ec(e)*t}function e_(e,t){return(1-ec(e))*t+1}var eE=31536e9;function eS(e,t,n){var r,i=n.prevStep,a=n.nextStep,o=n.now,s=n.future,u=n.round,c=e.getTime?e.getTime():e,l=function(e){return ey(e,c,{now:o,round:u})},f=ex(s?t:a,c,{future:s,now:o,round:u,prevStep:s?i:t});if(void 0!==f){if(t&&(t.getTimeToNextUpdate&&(r=t.getTimeToNextUpdate(c,{getTimeToNextUpdateForUnit:l,getRoundFunction:eu,now:o,future:s,round:u})),void 0===r)){var d=t.unit||t.formatAs;d&&(r=l(d))}return void 0===r?f:Math.min(r,f)}}function ek(e,t,n){var r,i=n.now,a=n.future,o=ef(e,{timestamp:t,now:i,future:a,round:n.round,prevStep:n.prevStep});return void 0===o?void 0:a?t-1e3*o+1:0===o&&t===i?eE:t+1e3*o}function ex(e,t,n){var r=n.now,i=n.future,a=n.round,o=n.prevStep;if(e){var s=ek(e,t,{now:r,future:i,round:a,prevStep:o});if(void 0===s)return;return s-r}return i?t-r+1:eE}var eT={};function eM(e){return eT[e]}function eO(e){if(!e)throw Error("[javascript-time-ago] No locale data passed.");eT[e.locale]=e}let eA=[{formatAs:"now"},{formatAs:"second"},{formatAs:"minute"},{formatAs:"hour"},{formatAs:"day"},{formatAs:"week"},{formatAs:"month"},{formatAs:"year"}],eL={steps:eA,labels:"long"};function eC(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:[],n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=n.polyfill;ts(this,e),"string"==typeof t&&(t=[t]),this.locale=J(t.concat(e.getDefaultLocale()),eM),"undefined"!=typeof Intl&&Intl.NumberFormat&&(this.numberFormat=new Intl.NumberFormat(this.locale)),!1===r?(this.IntlRelativeTimeFormat=Intl.RelativeTimeFormat,this.IntlPluralRules=Intl.PluralRules):(this.IntlRelativeTimeFormat=j,this.IntlPluralRules=j.PluralRules),this.relativeTimeFormatCache=new Z,this.pluralRulesCache=new Z}return tc(e,[{key:"format",value:function(e,t,n){n||(t&&!tv(t)?(n=t,t=void 0):n={}),t||(t=eD),"string"==typeof t&&(t=tt(t));var r,i=td(e),a=this.getLabels(t.flavour||t.labels),o=a.labels,s=a.labelsType;void 0!==t.now&&(r=t.now),void 0===r&&void 0!==n.now&&(r=n.now),void 0===r&&(r=Date.now());var u=(r-i)/1e3,c=n.future||u<0,l=tb(o,eM(this.locale).now,eM(this.locale).long,c);if(t.custom){var f=t.custom({now:r,date:new Date(i),time:i,elapsed:u,locale:this.locale});if(void 0!==f)return f}var d=tp(t.units,o,l),h=n.round||t.round,p=eb(t.gradation||t.steps||eD.steps,u,{now:r,units:d,round:h,future:c,getNextStep:!0}),b=tr(p,3),m=b[0],g=b[1],v=b[2],y=this.formatDateForStep(i,g,u,{labels:o,labelsType:s,nowLabel:l,now:r,future:c,round:h})||"";if(n.getTimeToNextUpdate){var w=eS(i,g,{nextStep:v,prevStep:m,now:r,future:c,round:h});return[y,w]}return y}},{key:"formatDateForStep",value:function(e,t,n,r){var i=this,a=r.labels,o=r.labelsType,s=r.nowLabel,u=r.now,c=r.future,l=r.round;if(t){if(t.format)return t.format(e,this.locale,{formatAs:function(e,t){return i.formatValue(t,e,{labels:a,future:c})},now:u,future:c});var f=t.unit||t.formatAs;if(!f)throw Error("[javascript-time-ago] Each step must define either `formatAs` or `format()`. Step: ".concat(JSON.stringify(t)));if("now"===f)return s;var d=Math.abs(n)/es(t);t.granularity&&(d=eu(l)(d/t.granularity)*t.granularity);var h=-1*Math.sign(n)*eu(l)(d);switch(0===h&&(h=0),o){case"long":case"short":case"narrow":return this.getFormatter(o).format(h,f);default:return this.formatValue(h,f,{labels:a,future:c})}}}},{key:"formatValue",value:function(e,t,n){var r=n.labels,i=n.future;return this.getFormattingRule(r,t,e,{future:i}).replace("{0}",this.formatNumber(Math.abs(e)))}},{key:"getFormattingRule",value:function(e,t,n,r){var i=r.future;if(this.locale,"string"==typeof(e=e[t]))return e;var a=e[0===n?i?"future":"past":n<0?"past":"future"]||e;return"string"==typeof a?a:a[this.getPluralRules().select(Math.abs(n))]||a.other}},{key:"formatNumber",value:function(e){return this.numberFormat?this.numberFormat.format(e):String(e)}},{key:"getFormatter",value:function(e){return this.relativeTimeFormatCache.get(this.locale,e)||this.relativeTimeFormatCache.put(this.locale,e,new this.IntlRelativeTimeFormat(this.locale,{style:e}))}},{key:"getPluralRules",value:function(){return this.pluralRulesCache.get(this.locale)||this.pluralRulesCache.put(this.locale,new this.IntlPluralRules(this.locale))}},{key:"getLabels",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:[];"string"==typeof e&&(e=[e]),e=(e=e.map(function(e){switch(e){case"tiny":case"mini-time":return"mini";default:return e}})).concat("long");for(var t=eM(this.locale),n=e,r=Array.isArray(n),i=0,n=r?n:n[Symbol.iterator]();;){if(r){if(i>=n.length)break;a=n[i++]}else{if((i=n.next()).done)break;a=i.value}var a,o=a;if(t[o])return{labelsType:o,labels:t[o]}}}}]),e}(),tf="en";function td(e){if(e.constructor===Date||th(e))return e.getTime();if("number"==typeof e)return e;throw Error("Unsupported relative time formatter input: ".concat(tn(e),", ").concat(e))}function th(e){return"object"===tn(e)&&"function"==typeof e.getTime}function tp(e,t,n){var r=Object.keys(t);return n&&r.push("now"),e&&(r=e.filter(function(e){return"now"===e||r.indexOf(e)>=0})),r}function tb(e,t,n,r){var i=e.now||t&&t.now;return i?"string"==typeof i?i:r?i.future:i.past:n&&n.second&&n.second.current?n.second.current:void 0}tl.getDefaultLocale=function(){return tf},tl.setDefaultLocale=function(e){return tf=e},tl.addDefaultLocale=function(e){if(r)throw Error("[javascript-time-ago] `TimeAgo.addDefaultLocale()` can only be called once. To add other locales, use `TimeAgo.addLocale()`.");r=!0,tl.setDefaultLocale(e.locale),tl.addLocale(e)},tl.addLocale=function(e){eO(e),j.addLocale(e)},tl.locale=tl.addLocale,tl.addLabels=function(e,t,n){var r=eM(e);r||(eO({locale:e}),r=eM(e)),r[t]=n};var tm={}.constructor;function tg(e){return void 0!==tn(e)&&null!==e&&e.constructor===tm}function tv(e){return"string"==typeof e||ty(e)}function ty(e){return tg(e)&&(Array.isArray(e.steps)||Array.isArray(e.gradation)||Array.isArray(e.flavour)||"string"==typeof e.flavour||Array.isArray(e.labels)||"string"==typeof e.labels||Array.isArray(e.units)||"function"==typeof e.custom)}},41800(e,t,n){e.exports=function(){"use strict";var e={121:function(e,t,r){r.r(t),r.d(t,{default:function(){return E}}),n(41539),n(21249),n(54747),n(15306),n(74916),n(47042),n(82526),n(41817),n(32165),n(78783),n(66992),n(33948),n(81486);var i=n(68929),a=r.n(i),o=n(1469),s=r.n(o),u=n(45220),c=r.n(u),l=n(3674),f=r.n(l),d=n(82492),h=r.n(d);function p(e){return(p="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function b(e){return s()(e)?e:[e]}function m(e){if(null===e||"object"!==p(e)||(t=e,"[object Date]"===Object.prototype.toString.call(t)))return e;if(s()(e))return e.map(m);var t,n={};return f()(e).forEach(function(t){n[a()(t)]=m(e[t])}),n}function g(e,t){var n=t.camelizeKeys,r=t.camelizeTypeValues,i={};return f()(e).forEach(function(t){var o=e[t],u=n?a()(t):t;i[u]={},void 0!==o.data&&(s()(o.data)?i[u].data=o.data.map(function(e){return{id:e.id,type:r?a()(e.type):e.type}}):c()(o.data)?i[u].data=o.data:i[u].data={id:o.data.id,type:r?a()(o.data.type):o.data.type}),o.links&&(i[u].links=n?m(o.links):o.links),o.meta&&(i[u].meta=n?m(o.meta):o.meta)}),i}function v(e,t){if(t.camelizeKeys){var n={};return f()(e).forEach(function(t){n[a()(t)]=m(e[t])}),n}return e}function y(e,t){var n=t.camelizeKeys,r=t.camelizeTypeValues,i={};return b(e).forEach(function(e){var t=n?a()(e.type):e.type;i[t]=i[t]||{},i[t][e.id]=i[t][e.id]||{id:e.id},i[t][e.id].type=r?a()(e.type):e.type,n?(i[t][e.id].attributes={},f()(e.attributes).forEach(function(n){i[t][e.id].attributes[a()(n)]=m(e.attributes[n])})):i[t][e.id].attributes=e.attributes,e.links&&(i[t][e.id].links={},f()(e.links).forEach(function(r){var o=n?a()(r):r;i[t][e.id].links[o]=e.links[r]})),e.relationships&&(i[t][e.id].relationships=g(e.relationships,{camelizeKeys:n,camelizeTypeValues:r})),e.meta&&(i[t][e.id].meta=v(e.meta,{camelizeKeys:n}))}),i}function w(e){return e.replace(/\?.*$/,"")}function _(e,t,n){var r,i=n.camelizeKeys,o=n.camelizeTypeValues,s={meta:{}};if(n.filterEndpoint)s.meta[t]={},r=s.meta[t];else{var u=w(t);s.meta[u]={},s.meta[u][t.slice(u.length)]={},r=s.meta[u][t.slice(u.length)]}if(r.data={},e.data){var c=[];b(e.data).forEach(function(e){var t={id:e.id,type:o?a()(e.type):e.type};e.relationships&&(t.relationships=g(e.relationships,{camelizeKeys:i,camelizeTypeValues:o})),c.push(t)}),r.data=c}return e.links&&(r.links=e.links,s.meta[w(t)].links=e.links),e.meta&&(r.meta=v(e.meta,{camelizeKeys:i})),s}function E(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.filterEndpoint,r=void 0===n||n,i=t.camelizeKeys,a=void 0===i||i,o=t.camelizeTypeValues,s=void 0===o||o,u=t.endpoint,c={};if(e.data&&h()(c,y(e.data,{camelizeKeys:a,camelizeTypeValues:s})),e.included&&h()(c,y(e.included,{camelizeKeys:a,camelizeTypeValues:s})),u){var l=r?w(u):u;h()(c,_(e,l,{camelizeKeys:a,camelizeTypeValues:s,filterEndpoint:r}))}return c}}},t={};function r(n){if(t[n])return t[n].exports;var i=t[n]={exports:{}};return e[n](i,i.exports,r),i.exports}return r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,{a:t}),t},r.d=function(e,t){for(var n in t)r.o(t,n)&&!r.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r(121)}()},63731:function(e){var t,n;t="undefined"!=typeof self?self:this,n=function(){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var i=t[r]={i:r,l:!1,exports:{}};return e[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t||4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var i in e)n.d(r,i,(function(t){return e[t]}).bind(null,i));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=0)}([function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(1),i=/["'&<>]/,a=function(e){var t=i.exec(e);if(null!==t){var n,r="",a=void 0,o=0;for(a=t.index;a")},e.prototype.space=function(){this.buffer.push(" ")},e.prototype.indent=function(e){if(e>0){for(var t="",n=0;n'+a(e)+""),this.buffer.push('"')},e.prototype.printString=function(e){this.buffer.push('"'),this.buffer.push(''+a(e)+""),this.buffer.push('"')},e.prototype.printBoolean=function(e){this.buffer.push(''+e+"")},e.prototype.printNumber=function(e){this.buffer.push(''+e+"")},e.prototype.printSelectionStart=function(){this.buffer.push(""),this.buffer.push('
')},e.prototype.printSelectionEnd=function(){this.buffer.push("
"),this.buffer.push('
')},Object.defineProperty(e.prototype,"printSelectionEndAtNewLine",{set:function(e){this._printSelectionEndAtNewLine=e},enumerable:!0,configurable:!0}),e.prototype.toString=function(){return this.buffer.join("")},e}(),s=function(e,t,n,r,i){t.checkCircular(e),t.print("{"),t.newLine();for(var a=Object.keys(e),o=0;o'):a.print('
'),Array.isArray(e)?u(e,a,0,t,i):s(e,a,0,t,i),a.print("
"),a.toString()}return""}},function(e,t,n){"use strict";n.r(t),n.d(t,"__extends",function(){return i}),n.d(t,"__assign",function(){return a}),n.d(t,"__rest",function(){return o}),n.d(t,"__decorate",function(){return s}),n.d(t,"__param",function(){return u}),n.d(t,"__metadata",function(){return c}),n.d(t,"__awaiter",function(){return l}),n.d(t,"__generator",function(){return f}),n.d(t,"__exportStar",function(){return d}),n.d(t,"__values",function(){return h}),n.d(t,"__read",function(){return p}),n.d(t,"__spread",function(){return b}),n.d(t,"__await",function(){return m}),n.d(t,"__asyncGenerator",function(){return g}),n.d(t,"__asyncDelegator",function(){return v}),n.d(t,"__asyncValues",function(){return y}),n.d(t,"__makeTemplateObject",function(){return w}),n.d(t,"__importStar",function(){return _}),n.d(t,"__importDefault",function(){return E});/*! ***************************************************************************** +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ var r=Object.setPrototypeOf||({__proto__:[]})instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n])};function i(e,t){function n(){this.constructor=e}r(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n)}var a=Object.assign||function(e){for(var t,n=1,r=arguments.length;nt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols){var i=0;for(r=Object.getOwnPropertySymbols(e);it.indexOf(r[i])&&(n[r[i]]=e[r[i]])}return n}function s(e,t,n,r){var i,a=arguments.length,o=a<3?t:null===r?r=Object.getOwnPropertyDescriptor(t,n):r;if("object"==typeof Reflect&&"function"==typeof Reflect.decorate)o=Reflect.decorate(e,t,n,r);else for(var s=e.length-1;s>=0;s--)(i=e[s])&&(o=(a<3?i(o):a>3?i(t,n,o):i(t,n))||o);return a>3&&o&&Object.defineProperty(t,n,o),o}function u(e,t){return function(n,r){t(n,r,e)}}function c(e,t){if("object"==typeof Reflect&&"function"==typeof Reflect.metadata)return Reflect.metadata(e,t)}function l(e,t,n,r){return new(n||(n=Promise))(function(i,a){function o(e){try{u(r.next(e))}catch(t){a(t)}}function s(e){try{u(r.throw(e))}catch(t){a(t)}}function u(e){e.done?i(e.value):new n(function(t){t(e.value)}).then(o,s)}u((r=r.apply(e,t||[])).next())})}function f(e,t){var n,r,i,a,o={label:0,sent:function(){if(1&i[0])throw i[1];return i[1]},trys:[],ops:[]};return a={next:s(0),throw:s(1),return:s(2)},"function"==typeof Symbol&&(a[Symbol.iterator]=function(){return this}),a;function s(a){return function(s){return function(a){if(n)throw TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(i=r[2&a[0]?"return":a[0]?"throw":"next"])&&!(i=i.call(r,a[1])).done)return i;switch(r=0,i&&(a=[0,i.value]),a[0]){case 0:case 1:i=a;break;case 4:return o.label++,{value:a[1],done:!1};case 5:o.label++,r=a[1],a=[0];continue;case 7:a=o.ops.pop(),o.trys.pop();continue;default:if(!(i=(i=o.trys).length>0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}}}function p(e,t){var n="function"==typeof Symbol&&e[Symbol.iterator];if(!n)return e;var r,i,a=n.call(e),o=[];try{for(;(void 0===t||t-- >0)&&!(r=a.next()).done;)o.push(r.value)}catch(s){i={error:s}}finally{try{r&&!r.done&&(n=a.return)&&n.call(a)}finally{if(i)throw i.error}}return o}function b(){for(var e=[],t=0;t1||s(e,t)})})}function s(e,t){try{var n;(n=i[e](t)).value instanceof m?Promise.resolve(n.value.v).then(u,c):l(a[0][2],n)}catch(r){l(a[0][3],r)}}function u(e){s("next",e)}function c(e){s("throw",e)}function l(e,t){e(t),a.shift(),a.length&&s(a[0][0],a[0][1])}}function v(e){var t,n;return t={},r("next"),r("throw",function(e){throw e}),r("return"),t[Symbol.iterator]=function(){return this},t;function r(r,i){e[r]&&(t[r]=function(t){return(n=!n)?{value:m(e[r](t)),done:"return"===r}:i?i(t):t})}}function y(e){if(!Symbol.asyncIterator)throw TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator];return t?t.call(e):h(e)}function w(e,t){return Object.defineProperty?Object.defineProperty(e,"raw",{value:t}):e.raw=t,e}function _(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var n in e)Object.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t.default=e,t}function E(e){return e&&e.__esModule?e:{default:e}}}])},e.exports=n()},35828(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=s;var r=n(25477),i=a(r);function a(e){return e&&e.__esModule?e:{default:e}}function o(e){var t={};for(var n in e)t[(0,i.default)(n)]=e[n];return e.fallbacks&&(Array.isArray(e.fallbacks)?t.fallbacks=e.fallbacks.map(o):t.fallbacks=o(e.fallbacks)),t}function s(){function e(e){if(Array.isArray(e)){for(var t=0;t0&&void 0!==arguments[0]?arguments[0]:{},t=s(e);function n(e,n){if("style"!==n.type)return e;for(var r in e)e[r]=c(r,e[r],t);return e}function r(e,n){return c(n,e,t)}return{onProcessStyle:n,onChangeValue:r}}},29059(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=Object.assign||function(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{};return e.createGenerateClassName&&(this.options.createGenerateClassName=e.createGenerateClassName,this.generateClassName=e.createGenerateClassName()),null!=e.insertionPoint&&(this.options.insertionPoint=e.insertionPoint),(e.virtual||e.Renderer)&&(this.options.Renderer=e.Renderer||(e.virtual?A.default:M.default)),e.plugins&&this.use.apply(this,e.plugins),this}},{key:"createStyleSheet",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.index;"number"!=typeof n&&(n=0===y.default.index?0:y.default.index+1);var r=new c.default(e,i({},t,{jss:this,generateClassName:t.generateClassName||this.generateClassName,insertionPoint:this.options.insertionPoint,Renderer:this.options.Renderer,index:n}));return this.plugins.onProcessSheet(r),r}},{key:"removeStyleSheet",value:function(e){return e.detach(),y.default.remove(e),this}},{key:"createRule",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};(void 0===e?"undefined":r(e))==="object"&&(n=t,t=e,e=void 0);var i=n;i.jss=this,i.Renderer=this.options.Renderer,i.generateClassName||(i.generateClassName=this.generateClassName),i.classes||(i.classes={});var a=(0,x.default)(e,t,i);return!i.selector&&a instanceof _.default&&(a.selector="."+i.generateClassName(a)),this.plugins.onProcessRule(a),a}},{key:"use",value:function(){for(var e=this,t=arguments.length,n=Array(t),r=0;r0&&(this.refs[t]--,0===this.refs[t]&&this.sheets[t].detach())}},{key:"size",get:function(){return this.keys.length}}]),e}();t.default=u},92122(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=function(){function e(e,t){for(var n=0;n=this.index){t.push(e);return}for(var r=0;rn){t.splice(r,0,e);return}}}},{key:"reset",value:function(){this.registry=[]}},{key:"remove",value:function(e){var t=this.registry.indexOf(e);this.registry.splice(t,1)}},{key:"toString",value:function(e){return this.registry.filter(function(e){return e.attached}).map(function(t){return t.toString(e)}).join("\n")}},{key:"index",get:function(){return 0===this.registry.length?0:this.registry[this.registry.length-1].options.index}}]),e}();t.default=i},26899(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=Object.assign||function(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:0;return e.substr(t,e.indexOf("{")-1)},function(e){if(e.type===y.STYLE_RULE)return e.selectorText;if(e.type===y.KEYFRAMES_RULE){var t=e.name;if(t)return"@keyframes "+t;var n=e.cssText;return"@"+v(n,n.indexOf("keyframes"))}return v(e.cssText)});function _(e,t){return e.selectorText=t,e.selectorText===t}var E,S,k=p(function(){return document.head||document.getElementsByTagName("head")[0]}),x=(E=void 0,S=!1,function(e){var t={};E||(E=document.createElement("style"));for(var n=0;nt.index&&r.options.insertionPoint===t.insertionPoint)return r}return null}function M(e,t){for(var n=e.length-1;n>=0;n--){var r=e[n];if(r.attached&&r.options.insertionPoint===t.insertionPoint)return r}return null}function O(e){for(var t=k(),n=0;n0){var n=T(t,e);if(n)return n.renderer.element;if(n=M(t,e))return n.renderer.element.nextElementSibling}var r=e.insertionPoint;if(r&&"string"==typeof r){var i=O(r);if(i)return i.nextSibling;(0,a.default)("jss"===r,'[JSS] Insertion point "%s" not found.',r)}return null}function L(e,t){var n=t.insertionPoint,r=A(t);if(r){var i=r.parentNode;i&&i.insertBefore(e,r);return}if(n&&"number"==typeof n.nodeType){var o=n,s=o.parentNode;s?s.insertBefore(e,o.nextSibling):(0,a.default)(!1,"[JSS] Insertion point is not in the DOM.");return}k().insertBefore(e,r)}var C=p(function(){var e=document.querySelector('meta[property="csp-nonce"]');return e?e.getAttribute("content"):null}),I=function(){function e(t){h(this,e),this.getPropertyValue=b,this.setProperty=m,this.removeProperty=g,this.setSelector=_,this.getKey=w,this.getUnescapedKeysMap=x,this.hasInsertedRules=!1,t&&s.default.add(t),this.sheet=t;var n=this.sheet?this.sheet.options:{},r=n.media,i=n.meta,a=n.element;this.element=a||document.createElement("style"),this.element.setAttribute("data-jss",""),r&&this.element.setAttribute("media",r),i&&this.element.setAttribute("data-meta",i);var o=C();o&&this.element.setAttribute("nonce",o)}return r(e,[{key:"attach",value:function(){!this.element.parentNode&&this.sheet&&(this.hasInsertedRules&&(this.deploy(),this.hasInsertedRules=!1),L(this.element,this.sheet.options))}},{key:"detach",value:function(){this.element.parentNode.removeChild(this.element)}},{key:"deploy",value:function(){this.sheet&&(this.element.textContent="\n"+this.sheet.toString()+"\n")}},{key:"insertRule",value:function(e,t){var n=this.element.sheet,r=n.cssRules,i=e.toString();if(t||(t=r.length),!i)return!1;try{n.insertRule(i,t)}catch(o){return(0,a.default)(!1,"[JSS] Can not insert an unsupported rule \n\r%s",e),!1}return this.hasInsertedRules=!0,r[t]}},{key:"deleteRule",value:function(e){var t=this.element.sheet,n=this.indexOf(e);return -1!==n&&(t.deleteRule(n),!0)}},{key:"indexOf",value:function(e){for(var t=this.element.sheet.cssRules,n=0;n0&&void 0!==arguments[0]?arguments[0]:{indent:1},t=this.rules.toString(e);return t?this.key+" {\n"+t+"\n}":""}}]),e}();t.default=c},12398(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=function(){function e(e,t){for(var n=0;n0&&void 0!==arguments[0]?arguments[0]:{indent:1},t=this.rules.toString(e);return t&&(t+="\n"),this.key+" {\n"+t+"}"}}]),e}();t.default=c},3486(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=function(){function e(e,t){for(var n=0;nc&&(0,i.default)(!1,"[JSS] You might have a memory leak. Rule counter is at %s.",e);var a=t,o="";return(r&&(a=r.options.classNamePrefix||t,null!=r.options.jss.id&&(o+=r.options.jss.id)),"production"===l)?""+a+s.default+o+e:a+n.key+"-"+s.default+(o&&"-"+o)+"-"+e}}},89380(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=l;var r=n(63189),i=c(r),a=n(15803),o=c(a),s=n(2808),u=c(s);function c(e){return e&&e.__esModule?e:{default:e}}function l(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"unnamed",t=arguments[1],n=arguments[2],r=n.jss,a=(0,u.default)(t),s=r.plugins.onCreateRule(e,a,n);return s||("@"===e[0]&&(0,i.default)(!1,"[JSS] Unknown at-rule %s",e),new o.default(e,a,n))}},55878(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n.g.CSS,i="production",a=/([[\].#*$><+~=|^:(),"'`])/g;t.default=function(e){return"production"===i?e:r&&r.escape?r.escape(e):e.replace(a,"\\$1")}},27343(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};function r(e){var t=null;for(var i in e){var a=e[i],o=void 0===a?"undefined":n(a);if("function"===o)t||(t={}),t[i]=a;else if("object"===o&&null!==a&&!Array.isArray(a)){var s=r(a);s&&(t||(t={}),t[i]=s)}}return t}t.default=r},97628(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r=n(67121),i=a(r);function a(e){return e&&e.__esModule?e:{default:e}}t.default=function(e){return e&&e[i.default]&&e===e[i.default]()}},94229(e,t){"use strict";function n(e,t){e.renderable=t,e.rules&&t.cssRules&&e.rules.link(t.cssRules)}Object.defineProperty(t,"__esModule",{value:!0}),t.default=n},141(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r="2f1acc6c3a606b082e5eef5e54414ffb";null==n.g[r]&&(n.g[r]=0),t.default=n.g[r]++},70084(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=s;var r=n(16229),i=a(r);function a(e){return e&&e.__esModule?e:{default:e}}function o(e,t){for(var n="",r=0;r2&&void 0!==arguments[2]?arguments[2]:{},r="";if(!t)return r;var a=n.indent,s=void 0===a?0:a,u=t.fallbacks;if(s++,u){if(Array.isArray(u))for(var c=0;c1&&void 0!==arguments[1]&&arguments[1];if(!Array.isArray(e))return e;var r="";if(Array.isArray(e[0]))for(var i=0;i0&&void 0!==arguments[0]?arguments[0]:{};s(this,e),this.cookieOptions=Object.assign({path:"/"},t),u=void 0===t.prefix?u:t.prefix}return r(e,[{key:"getItem",value:function(e){var t=a.default.parse(document.cookie);return t&&t.hasOwnProperty(u+e)?t[u+e]:null}},{key:"setItem",value:function(e,t){return document.cookie=a.default.serialize(u+e,t,this.cookieOptions),t}},{key:"removeItem",value:function(e){var t=Object.assign({},this.cookieOptions,{maxAge:-1});return document.cookie=a.default.serialize(u+e,"",t),null}},{key:"clear",value:function(){var e=a.default.parse(document.cookie);for(var t in e)0===t.indexOf(u)&&this.removeItem(t.substr(u.length));return null}}]),e}();function l(){var e=new c;try{var t="__test";e.setItem(t,"1");var n=e.getItem(t);return e.removeItem(t),"1"===n}catch(r){return!1}}t.default=c},90145(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n=function(){function e(e,t){for(var n=0;n0&&void 0!==arguments[0]?arguments[0]:"localStorage",t=String(e).replace(/storage$/i,"").toLowerCase();if("local"===t)return a("localStorage");if("session"===t)return a("sessionStorage");if("cookie"===t)return(0,r.hasCookies)();if("memory"===t)return!0;throw Error("Storage method `"+e+"` is not available.\n Please use one of the following: localStorage, sessionStorage, cookieStorage, memoryStorage.")}},72426(e,t){"use strict";/*! + * cookie + * Copyright(c) 2012-2014 Roman Shtylman + * Copyright(c) 2015 Douglas Christopher Wilson + * MIT Licensed + */ t.parse=o,t.serialize=s;var n=decodeURIComponent,r=encodeURIComponent,i=/; */,a=/^[\u0009\u0020-\u007e\u0080-\u00ff]+$/;function o(e,t){if("string"!=typeof e)throw TypeError("argument str must be a string");for(var r={},a=t||{},o=e.split(i),s=a.decode||n,c=0;cc});var r=n(56169);e=n.hmd(e);var i="object"==typeof exports&&exports&&!exports.nodeType&&exports,a=i&&e&&!e.nodeType&&e,o=a&&a.exports===i?r.Z.Buffer:void 0,s=o?o.allocUnsafe:void 0;function u(e,t){if(t)return e.slice();var n=e.length,r=s?s(n):new e.constructor(n);return e.copy(r),r}let c=u},48277(e,t,n){"use strict";n.d(t,{Z:()=>i});var r="object"==typeof n.g&&n.g&&n.g.Object===Object&&n.g;let i=r},79730(e,t,n){"use strict";n.d(t,{Z:()=>u});var r=n(48277);e=n.hmd(e);var i="object"==typeof exports&&exports&&!exports.nodeType&&exports,a=i&&e&&!e.nodeType&&e,o=a&&a.exports===i&&r.Z.process,s=function(){try{var e=a&&a.require&&a.require("util").types;if(e)return e;return o&&o.binding&&o.binding("util")}catch(t){}}();let u=s},56169(e,t,n){"use strict";n.d(t,{Z:()=>o});var r=n(48277),i="object"==typeof self&&self&&self.Object===Object&&self,a=r.Z||i||Function("return this")();let o=a},29710(e,t,n){"use strict";n.d(t,{Z:()=>l});var r=n(56169);function i(){return!1}let a=i;e=n.hmd(e);var o="object"==typeof exports&&exports&&!exports.nodeType&&exports,s=o&&e&&!e.nodeType&&e,u=s&&s.exports===o?r.Z.Buffer:void 0,c=(u?u.isBuffer:void 0)||a;let l=c},18552(e,t,n){var r=n(10852),i=n(55639),a=r(i,"DataView");e.exports=a},1989(e,t,n){var r=n(51789),i=n(80401),a=n(57667),o=n(21327),s=n(81866);function u(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t-1}e.exports=i},1196(e){function t(e,t,n){for(var r=-1,i=null==e?0:e.length;++r0&&n(l)?t>1?a(l,t-1,n,o,s):r(s,l):o||(s[s.length]=l)}return s}e.exports=a},28483(e,t,n){var r=n(25063)();e.exports=r},47816(e,t,n){var r=n(28483),i=n(3674);function a(e,t){return e&&r(e,t,i)}e.exports=a},97786(e,t,n){var r=n(71811),i=n(40327);function a(e,t){t=r(t,e);for(var n=0,a=t.length;null!=e&&ni?0:i+t),(n=n>i?i:n)<0&&(n+=i),i=t>n?0:n-t>>>0,t>>>=0;for(var a=Array(i);++r=c){var m=t?null:s(e);if(m)return u(m);h=!1,f=o,b=new r}else b=t?[]:p;outer:for(;++l=i?e:r(e,t,n)}e.exports=i},74318(e,t,n){var r=n(11149);function i(e){var t=new e.constructor(e.byteLength);return new r(t).set(new r(e)),t}e.exports=i},64626(e,t,n){e=n.nmd(e);var r=n(55639),i=t&&!t.nodeType&&t,a=i&&e&&!e.nodeType&&e,o=a&&a.exports===i?r.Buffer:void 0,s=o?o.allocUnsafe:void 0;function u(e,t){if(t)return e.slice();var n=e.length,r=s?s(n):new e.constructor(n);return e.copy(r),r}e.exports=u},57157(e,t,n){var r=n(74318);function i(e,t){var n=t?r(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.byteLength)}e.exports=i},93147(e){var t=/\w*$/;function n(e){var n=new e.constructor(e.source,t.exec(e));return n.lastIndex=e.lastIndex,n}e.exports=n},40419(e,t,n){var r=n(62705),i=r?r.prototype:void 0,a=i?i.valueOf:void 0;function o(e){return a?Object(a.call(e)):{}}e.exports=o},77133(e,t,n){var r=n(74318);function i(e,t){var n=t?r(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.length)}e.exports=i},278(e){function t(e,t){var n=-1,r=e.length;for(t||(t=Array(r));++n1?n[a-1]:void 0,s=a>2?n[2]:void 0;for(o=e.length>3&&"function"==typeof o?(a--,o):void 0,s&&i(n[0],n[1],s)&&(o=a<3?void 0:o,a=1),t=Object(t);++rd))return!1;var p=l.get(e),b=l.get(t);if(p&&b)return p==t&&b==e;var m=-1,g=!0,v=n&s?new r:void 0;for(l.set(e,t),l.set(t,e);++m-1&&e%1==0&&e-1}e.exports=i},13399(e,t,n){var r=n(18470);function i(e,t){var n=this.__data__,i=r(n,e);return i<0?(++this.size,n.push([e,t])):n[i][1]=t,this}e.exports=i},24785(e,t,n){var r=n(1989),i=n(38407),a=n(57071);function o(){this.size=0,this.__data__={hash:new r,map:new(a||i),string:new r}}e.exports=o},11285(e,t,n){var r=n(45050);function i(e){var t=r(this,e).delete(e);return this.size-=t?1:0,t}e.exports=i},96e3(e,t,n){var r=n(45050);function i(e){return r(this,e).get(e)}e.exports=i},49916(e,t,n){var r=n(45050);function i(e){return r(this,e).has(e)}e.exports=i},95265(e,t,n){var r=n(45050);function i(e,t){var n=r(this,e),i=n.size;return n.set(e,t),this.size+=n.size==i?0:1,this}e.exports=i},68776(e){function t(e){var t=-1,n=Array(e.size);return e.forEach(function(e,r){n[++t]=[r,e]}),n}e.exports=t},42634(e){function t(e,t){return function(n){return null!=n&&n[e]===t&&(void 0!==t||e in Object(n))}}e.exports=t},24523(e,t,n){var r=n(88306),i=500;function a(e){var t=r(e,function(e){return n.size===i&&n.clear(),e}),n=t.cache;return t}e.exports=a},94536(e,t,n){var r=n(10852)(Object,"create");e.exports=r},86916(e,t,n){var r=n(5569)(Object.keys,Object);e.exports=r},33498(e){function t(e){var t=[];if(null!=e)for(var n in Object(e))t.push(n);return t}e.exports=t},31167(e,t,n){e=n.nmd(e);var r=n(31957),i=t&&!t.nodeType&&t,a=i&&e&&!e.nodeType&&e,o=a&&a.exports===i&&r.process,s=function(){try{var e=a&&a.require&&a.require("util").types;if(e)return e;return o&&o.binding&&o.binding("util")}catch(t){}}();e.exports=s},2333(e){var t=Object.prototype.toString;function n(e){return t.call(e)}e.exports=n},5569(e){function t(e,t){return function(n){return e(t(n))}}e.exports=t},45357(e,t,n){var r=n(96874),i=Math.max;function a(e,t,n){return t=i(void 0===t?e.length-1:t,0),function(){for(var a=arguments,o=-1,s=i(a.length-t,0),u=Array(s);++o0){if(++i>=t)return arguments[0]}else i=0;return e.apply(void 0,arguments)}}e.exports=i},37465(e,t,n){var r=n(38407);function i(){this.__data__=new r,this.size=0}e.exports=i},63779(e){function t(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}e.exports=t},67599(e){function t(e){return this.__data__.get(e)}e.exports=t},44758(e){function t(e){return this.__data__.has(e)}e.exports=t},34309(e,t,n){var r=n(38407),i=n(57071),a=n(83369),o=200;function s(e,t){var n=this.__data__;if(n instanceof r){var s=n.__data__;if(!i||s.length=t||n<0||g&&r>=f}function S(){var e=i();if(E(e))return k(e);h=setTimeout(S,_(e))}function k(e){return(h=void 0,v&&c)?y(e):(c=l=void 0,d)}function x(){void 0!==h&&clearTimeout(h),b=0,c=p=l=h=void 0}function T(){return void 0===h?d:k(i())}function M(){var e=i(),n=E(e);if(c=arguments,l=this,p=e,n){if(void 0===h)return w(p);if(g)return clearTimeout(h),h=setTimeout(S,t),y(p)}return void 0===h&&(h=setTimeout(S,t)),d}return t=a(t)||0,r(n)&&(m=!!n.leading,f=(g="maxWait"in n)?s(a(n.maxWait)||0,t):f,v="trailing"in n?!!n.trailing:v),M.cancel=x,M.flush=T,M}e.exports=c},53816(e,t,n){var r=n(69389),i=n(79833),a=/[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g,o=RegExp("[\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]","g");function s(e){return(e=i(e))&&e.replace(a,r).replace(o,"")}e.exports=s},66073(e,t,n){e.exports=n(84486)},77813(e){function t(e,t){return e===t||e!=e&&t!=t}e.exports=t},63105(e,t,n){var r=n(34963),i=n(80760),a=n(67206),o=n(1469);function s(e,t){return(o(e)?r:i)(e,a(t,3))}e.exports=s},85564(e,t,n){var r=n(21078);function i(e){return(null==e?0:e.length)?r(e,1):[]}e.exports=i},84486(e,t,n){var r=n(77412),i=n(89881),a=n(54290),o=n(1469);function s(e,t){return(o(e)?r:i)(e,a(t))}e.exports=s},27361(e,t,n){var r=n(97786);function i(e,t,n){var i=null==e?void 0:r(e,t);return void 0===i?n:i}e.exports=i},18721(e,t,n){var r=n(78565),i=n(222);function a(e,t){return null!=e&&i(e,t,r)}e.exports=a},79095(e,t,n){var r=n(13),i=n(222);function a(e,t){return null!=e&&i(e,t,r)}e.exports=a},6557(e){function t(e){return e}e.exports=t},35694(e,t,n){var r=n(9454),i=n(37005),a=Object.prototype,o=a.hasOwnProperty,s=a.propertyIsEnumerable,u=r(function(){return arguments}())?r:function(e){return i(e)&&o.call(e,"callee")&&!s.call(e,"callee")};e.exports=u},1469(e){var t=Array.isArray;e.exports=t},98612(e,t,n){var r=n(23560),i=n(41780);function a(e){return null!=e&&i(e.length)&&!r(e)}e.exports=a},29246(e,t,n){var r=n(98612),i=n(37005);function a(e){return i(e)&&r(e)}e.exports=a},44144(e,t,n){e=n.nmd(e);var r=n(55639),i=n(95062),a=t&&!t.nodeType&&t,o=a&&e&&!e.nodeType&&e,s=o&&o.exports===a?r.Buffer:void 0,u=(s?s.isBuffer:void 0)||i;e.exports=u},41609(e,t,n){var r=n(280),i=n(64160),a=n(35694),o=n(1469),s=n(98612),u=n(44144),c=n(25726),l=n(36719),f="[object Map]",d="[object Set]",h=Object.prototype.hasOwnProperty;function p(e){if(null==e)return!0;if(s(e)&&(o(e)||"string"==typeof e||"function"==typeof e.splice||u(e)||l(e)||a(e)))return!e.length;var t=i(e);if(t==f||t==d)return!e.size;if(c(e))return!r(e).length;for(var n in e)if(h.call(e,n))return!1;return!0}e.exports=p},23560(e,t,n){var r=n(44239),i=n(13218),a="[object AsyncFunction]",o="[object Function]",s="[object GeneratorFunction]",u="[object Proxy]";function c(e){if(!i(e))return!1;var t=r(e);return t==o||t==s||t==a||t==u}e.exports=c},41780(e){var t=9007199254740991;function n(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=t}e.exports=n},56688(e,t,n){var r=n(25588),i=n(7518),a=n(31167),o=a&&a.isMap,s=o?i(o):r;e.exports=s},45220(e){function t(e){return null===e}e.exports=t},13218(e){function t(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}e.exports=t},37005(e){function t(e){return null!=e&&"object"==typeof e}e.exports=t},68630(e,t,n){var r=n(44239),i=n(85924),a=n(37005),o="[object Object]",s=Function.prototype,u=Object.prototype,c=s.toString,l=u.hasOwnProperty,f=c.call(Object);function d(e){if(!a(e)||r(e)!=o)return!1;var t=i(e);if(null===t)return!0;var n=l.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&c.call(n)==f}e.exports=d},72928(e,t,n){var r=n(29221),i=n(7518),a=n(31167),o=a&&a.isSet,s=o?i(o):r;e.exports=s},47037(e,t,n){var r=n(44239),i=n(1469),a=n(37005),o="[object String]";function s(e){return"string"==typeof e||!i(e)&&a(e)&&r(e)==o}e.exports=s},33448(e,t,n){var r=n(44239),i=n(37005),a="[object Symbol]";function o(e){return"symbol"==typeof e||i(e)&&r(e)==a}e.exports=o},36719(e,t,n){var r=n(38749),i=n(7518),a=n(31167),o=a&&a.isTypedArray,s=o?i(o):r;e.exports=s},52353(e){function t(e){return void 0===e}e.exports=t},3674(e,t,n){var r=n(14636),i=n(280),a=n(98612);function o(e){return a(e)?r(e):i(e)}e.exports=o},81704(e,t,n){var r=n(14636),i=n(35014),a=n(98612);function o(e){return a(e)?r(e,!0):i(e)}e.exports=o},96486:function(e,t,n){var r;e=n.nmd(e),(function(){var i,a="4.17.21",o=200,s="Unsupported core-js use. Try https://npms.io/search?q=ponyfill.",u="Expected a function",c="Invalid `variable` option passed into `_.template`",l="__lodash_hash_undefined__",f=500,d="__lodash_placeholder__",h=1,p=2,b=4,m=1,g=2,v=1,y=2,w=4,_=8,E=16,S=32,k=64,x=128,T=256,M=512,O=30,A="...",L=800,C=16,I=1,D=2,N=3,P=1/0,R=9007199254740991,j=17976931348623157e292,F=0/0,Y=4294967295,B=Y-1,U=Y>>>1,H=[["ary",x],["bind",v],["bindKey",y],["curry",_],["curryRight",E],["flip",M],["partial",S],["partialRight",k],["rearg",T]],$="[object Arguments]",z="[object Array]",G="[object AsyncFunction]",W="[object Boolean]",K="[object Date]",V="[object DOMException]",q="[object Error]",Z="[object Function]",X="[object GeneratorFunction]",J="[object Map]",Q="[object Number]",ee="[object Null]",et="[object Object]",en="[object Promise]",er="[object Proxy]",ei="[object RegExp]",ea="[object Set]",eo="[object String]",es="[object Symbol]",eu="[object Undefined]",ec="[object WeakMap]",el="[object WeakSet]",ef="[object ArrayBuffer]",ed="[object DataView]",eh="[object Float32Array]",ep="[object Float64Array]",eb="[object Int8Array]",em="[object Int16Array]",eg="[object Int32Array]",ev="[object Uint8Array]",ey="[object Uint8ClampedArray]",ew="[object Uint16Array]",e_="[object Uint32Array]",eE=/\b__p \+= '';/g,eS=/\b(__p \+=) '' \+/g,ek=/(__e\(.*?\)|\b__t\)) \+\n'';/g,ex=/&(?:amp|lt|gt|quot|#39);/g,eT=/[&<>"']/g,eM=RegExp(ex.source),eO=RegExp(eT.source),eA=/<%-([\s\S]+?)%>/g,eL=/<%([\s\S]+?)%>/g,eC=/<%=([\s\S]+?)%>/g,eI=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,eD=/^\w*$/,eN=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,eP=/[\\^$.*+?()[\]{}|]/g,eR=RegExp(eP.source),ej=/^\s+/,eF=/\s/,eY=/\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,eB=/\{\n\/\* \[wrapped with (.+)\] \*/,eU=/,? & /,eH=/[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g,e$=/[()=,{}\[\]\/\s]/,ez=/\\(\\)?/g,eG=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,eW=/\w*$/,eK=/^[-+]0x[0-9a-f]+$/i,eV=/^0b[01]+$/i,eq=/^\[object .+?Constructor\]$/,eZ=/^0o[0-7]+$/i,eX=/^(?:0|[1-9]\d*)$/,eJ=/[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g,eQ=/($^)/,e1=/['\n\r\u2028\u2029\\]/g,e0="\ud800-\udfff",e2="\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff",e3="\\u2700-\\u27bf",e4="a-z\\xdf-\\xf6\\xf8-\\xff",e5="A-Z\\xc0-\\xd6\\xd8-\\xde",e6="\\ufe0e\\ufe0f",e9="\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000",e8="['’]",e7="["+e0+"]",te="["+e9+"]",tt="["+e2+"]",tn="\\d+",tr="["+e3+"]",ti="["+e4+"]",ta="[^"+e0+e9+tn+e3+e4+e5+"]",to="\ud83c[\udffb-\udfff]",ts="[^"+e0+"]",tu="(?:\ud83c[\udde6-\uddff]){2}",tc="[\ud800-\udbff][\udc00-\udfff]",tl="["+e5+"]",tf="\\u200d",td="(?:"+ti+"|"+ta+")",th="(?:"+tl+"|"+ta+")",tp="(?:"+e8+"(?:d|ll|m|re|s|t|ve))?",tb="(?:"+e8+"(?:D|LL|M|RE|S|T|VE))?",tm="(?:"+tt+"|"+to+")?",tg="["+e6+"]?",tv="(?:"+tf+"(?:"+[ts,tu,tc].join("|")+")"+tg+tm+")*",ty="\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])",tw="\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])",t_=tg+tm+tv,tE="(?:"+[tr,tu,tc].join("|")+")"+t_,tS="(?:"+[ts+tt+"?",tt,tu,tc,e7].join("|")+")",tk=RegExp(e8,"g"),tx=RegExp(tt,"g"),tT=RegExp(to+"(?="+to+")|"+tS+t_,"g"),tM=RegExp([tl+"?"+ti+"+"+tp+"(?="+[te,tl,"$"].join("|")+")",th+"+"+tb+"(?="+[te,tl+td,"$"].join("|")+")",tl+"?"+td+"+"+tp,tl+"+"+tb,tw,ty,tn,tE].join("|"),"g"),tO=RegExp("["+tf+e0+e2+e6+"]"),tA=/[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/,tL=["Array","Buffer","DataView","Date","Error","Float32Array","Float64Array","Function","Int8Array","Int16Array","Int32Array","Map","Math","Object","Promise","RegExp","Set","String","Symbol","TypeError","Uint8Array","Uint8ClampedArray","Uint16Array","Uint32Array","WeakMap","_","clearTimeout","isFinite","parseInt","setTimeout"],tC=-1,tI={};tI[eh]=tI[ep]=tI[eb]=tI[em]=tI[eg]=tI[ev]=tI[ey]=tI[ew]=tI[e_]=!0,tI[$]=tI[z]=tI[ef]=tI[W]=tI[ed]=tI[K]=tI[q]=tI[Z]=tI[J]=tI[Q]=tI[et]=tI[ei]=tI[ea]=tI[eo]=tI[ec]=!1;var tD={};tD[$]=tD[z]=tD[ef]=tD[ed]=tD[W]=tD[K]=tD[eh]=tD[ep]=tD[eb]=tD[em]=tD[eg]=tD[J]=tD[Q]=tD[et]=tD[ei]=tD[ea]=tD[eo]=tD[es]=tD[ev]=tD[ey]=tD[ew]=tD[e_]=!0,tD[q]=tD[Z]=tD[ec]=!1;var tN={À:"A",Á:"A",Â:"A",Ã:"A",Ä:"A",Å:"A",à:"a",á:"a",â:"a",ã:"a",ä:"a",å:"a",Ç:"C",ç:"c",Ð:"D",ð:"d",È:"E",É:"E",Ê:"E",Ë:"E",è:"e",é:"e",ê:"e",ë:"e",Ì:"I",Í:"I",Î:"I",Ï:"I",ì:"i",í:"i",î:"i",ï:"i",Ñ:"N",ñ:"n",Ò:"O",Ó:"O",Ô:"O",Õ:"O",Ö:"O",Ø:"O",ò:"o",ó:"o",ô:"o",õ:"o",ö:"o",ø:"o",Ù:"U",Ú:"U",Û:"U",Ü:"U",ù:"u",ú:"u",û:"u",ü:"u",Ý:"Y",ý:"y",ÿ:"y",Æ:"Ae",æ:"ae",Þ:"Th",þ:"th",ß:"ss",Ā:"A",Ă:"A",Ą:"A",ā:"a",ă:"a",ą:"a",Ć:"C",Ĉ:"C",Ċ:"C",Č:"C",ć:"c",ĉ:"c",ċ:"c",č:"c",Ď:"D",Đ:"D",ď:"d",đ:"d",Ē:"E",Ĕ:"E",Ė:"E",Ę:"E",Ě:"E",ē:"e",ĕ:"e",ė:"e",ę:"e",ě:"e",Ĝ:"G",Ğ:"G",Ġ:"G",Ģ:"G",ĝ:"g",ğ:"g",ġ:"g",ģ:"g",Ĥ:"H",Ħ:"H",ĥ:"h",ħ:"h",Ĩ:"I",Ī:"I",Ĭ:"I",Į:"I",İ:"I",ĩ:"i",ī:"i",ĭ:"i",į:"i",ı:"i",Ĵ:"J",ĵ:"j",Ķ:"K",ķ:"k",ĸ:"k",Ĺ:"L",Ļ:"L",Ľ:"L",Ŀ:"L",Ł:"L",ĺ:"l",ļ:"l",ľ:"l",ŀ:"l",ł:"l",Ń:"N",Ņ:"N",Ň:"N",Ŋ:"N",ń:"n",ņ:"n",ň:"n",ŋ:"n",Ō:"O",Ŏ:"O",Ő:"O",ō:"o",ŏ:"o",ő:"o",Ŕ:"R",Ŗ:"R",Ř:"R",ŕ:"r",ŗ:"r",ř:"r",Ś:"S",Ŝ:"S",Ş:"S",Š:"S",ś:"s",ŝ:"s",ş:"s",š:"s",Ţ:"T",Ť:"T",Ŧ:"T",ţ:"t",ť:"t",ŧ:"t",Ũ:"U",Ū:"U",Ŭ:"U",Ů:"U",Ű:"U",Ų:"U",ũ:"u",ū:"u",ŭ:"u",ů:"u",ű:"u",ų:"u",Ŵ:"W",ŵ:"w",Ŷ:"Y",ŷ:"y",Ÿ:"Y",Ź:"Z",Ż:"Z",Ž:"Z",ź:"z",ż:"z",ž:"z",IJ:"IJ",ij:"ij",Œ:"Oe",œ:"oe",ʼn:"'n",ſ:"s"},tP={"&":"&","<":"<",">":">",'"':""","'":"'"},tR={"&":"&","<":"<",">":">",""":'"',"'":"'"},tj={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"},tF=parseFloat,tY=parseInt,tB="object"==typeof n.g&&n.g&&n.g.Object===Object&&n.g,tU="object"==typeof self&&self&&self.Object===Object&&self,tH=tB||tU||Function("return this")(),t$=t&&!t.nodeType&&t,tz=t$&&e&&!e.nodeType&&e,tG=tz&&tz.exports===t$,tW=tG&&tB.process,tK=function(){try{var e=tz&&tz.require&&tz.require("util").types;if(e)return e;return tW&&tW.binding&&tW.binding("util")}catch(t){}}(),tV=tK&&tK.isArrayBuffer,tq=tK&&tK.isDate,tZ=tK&&tK.isMap,tX=tK&&tK.isRegExp,tJ=tK&&tK.isSet,tQ=tK&&tK.isTypedArray;function t1(e,t,n){switch(n.length){case 0:return e.call(t);case 1:return e.call(t,n[0]);case 2:return e.call(t,n[0],n[1]);case 3:return e.call(t,n[0],n[1],n[2])}return e.apply(t,n)}function t0(e,t,n,r){for(var i=-1,a=null==e?0:e.length;++i-1}function t9(e,t,n){for(var r=-1,i=null==e?0:e.length;++r-1;);return n}function nk(e,t){for(var n=e.length;n--&&nu(t,e[n],0)>-1;);return n}function nx(e,t){for(var n=e.length,r=0;n--;)e[n]===t&&++r;return r}var nT=nh(tN),nM=nh(tP);function nO(e){return"\\"+tj[e]}function nA(e,t){return null==e?i:e[t]}function nL(e){return tO.test(e)}function nC(e){return tA.test(e)}function nI(e){for(var t,n=[];!(t=e.next()).done;)n.push(t.value);return n}function nD(e){var t=-1,n=Array(e.size);return e.forEach(function(e,r){n[++t]=[r,e]}),n}function nN(e,t){return function(n){return e(t(n))}}function nP(e,t){for(var n=-1,r=e.length,i=0,a=[];++n-1}function rh(e,t){var n=this.__data__,r=rP(n,e);return r<0?(++this.size,n.push([e,t])):n[r][1]=t,this}function rp(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t=t?e:t)),e}function rH(e,t,n,r,a,o){var s,u=t&h,c=t&p,l=t&b;if(n&&(s=a?n(e,r,a,o):n(e)),s!==i)return s;if(!u1(e))return e;var f=uF(e);if(f){if(s=a9(e),!u)return al(e,s)}else{var d=a3(e),m=d==Z||d==X;if(u$(e))return ae(e,u);if(d==et||d==$||m&&!a){if(s=c||m?{}:a8(e),!u)return c?ah(e,rF(s,e)):ad(e,rj(s,e))}else{if(!tD[d])return a?e:{};s=a7(e,d,u)}}o||(o=new rS);var g=o.get(e);if(g)return g;o.set(e,s),cr(e)?e.forEach(function(r){s.add(rH(r,t,n,r,e,o))}):u2(e)&&e.forEach(function(r,i){s.set(i,rH(r,t,n,i,e,o))});var v=l?c?aW:aG:c?c$:cH,y=f?i:v(e);return t2(y||e,function(r,i){y&&(r=e[i=r]),rN(s,i,rH(r,t,n,i,e,o))}),s}function r$(e){var t=cH(e);return function(n){return rz(n,e,t)}}function rz(e,t,n){var r=n.length;if(null==e)return!r;for(e=e4(e);r--;){var a=n[r],o=t[a],s=e[a];if(s===i&&!(a in e)||!o(s))return!1}return!0}function rG(e,t,n){if("function"!=typeof e)throw new e9(u);return o_(function(){e.apply(i,n)},t)}function rW(e,t,n,r){var i=-1,a=t6,s=!0,u=e.length,c=[],l=t.length;if(!u)return c;n&&(t=t8(t,nw(n))),r?(a=t9,s=!1):t.length>=o&&(a=nE,s=!1,t=new rw(t));outer:for(;++ia?0:a+n),(r=r===i||r>a?a:cp(r))<0&&(r+=a),r=n>r?0:cb(r);n0&&n(s)?t>1?rQ(s,t-1,n,r,i):t7(i,s):r||(i[i.length]=s)}return i}var r1=ag(),r0=ag(!0);function r2(e,t){return e&&r1(e,t,cH)}function r3(e,t){return e&&r0(e,t,cH)}function r4(e,t){return t5(t,function(t){return uX(e[t])})}function r5(e,t){t=i6(t,e);for(var n=0,r=t.length;null!=e&&nt}function r7(e,t){return null!=e&&tr.call(e,t)}function ie(e,t){return null!=e&&t in e4(e)}function it(e,t,n){return e>=tU(t,n)&&e=120&&f.length>=120)?new rw(s&&f):i}f=e[0];var d=-1,h=u[0];outer:for(;++d-1;)s!==e&&tg.call(s,u,1),tg.call(e,u,1);return e}function iD(e,t){for(var n=e?t.length:0,r=n-1;n--;){var i=t[n];if(n==r||i!==a){var a=i;on(i)?tg.call(e,i,1):iJ(e,i)}}return e}function iN(e,t){return e+tO(tW()*(t-e+1))}function iP(e,t,n,r){for(var i=-1,a=tB(tM((t-e)/(n||1)),0),o=eF(a);a--;)o[r?a:++i]=e,e+=n;return o}function iR(e,t){var n="";if(!e||t<1||t>R)return n;do t%2&&(n+=e),(t=tO(t/2))&&(e+=e);while(t)return n}function ij(e,t){return oE(om(e,t,lB),e+"")}function iF(e){return rL(c9(e))}function iY(e,t){var n=c9(e);return ox(n,rU(t,0,n.length))}function iB(e,t,n,r){if(!u1(e))return e;t=i6(t,e);for(var a=-1,o=t.length,s=o-1,u=e;null!=u&&++ai?0:i+t),(n=n>i?i:n)<0&&(n+=i),i=t>n?0:n-t>>>0,t>>>=0;for(var a=eF(i);++r>>1,o=e[a];null!==o&&!ca(o)&&(n?o<=t:o=o){var l=t?null:aP(e);if(l)return nR(l);s=!1,i=nE,c=new rw}else c=t?[]:u;outer:for(;++r=r?e:iz(e,t,n)}var i7=tE||function(e){return tH.clearTimeout(e)};function ae(e,t){if(t)return e.slice();var n=e.length,r=th?th(n):new e.constructor(n);return e.copy(r),r}function at(e){var t=new e.constructor(e.byteLength);return new td(t).set(new td(e)),t}function an(e,t){var n=t?at(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.byteLength)}function ar(e){var t=new e.constructor(e.source,eW.exec(e));return t.lastIndex=e.lastIndex,t}function ai(e){return n2?e4(n2.call(e)):{}}function aa(e,t){var n=t?at(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.length)}function ao(e,t){if(e!==t){var n=e!==i,r=null===e,a=e==e,o=ca(e),s=t!==i,u=null===t,c=t==t,l=ca(t);if(!u&&!l&&!o&&e>t||o&&s&&c&&!u&&!l||r&&s&&c||!n&&c||!a)return 1;if(!r&&!o&&!l&&e=s)return u;return u*("desc"==n[r]?-1:1)}}return e.index-t.index}function au(e,t,n,r){for(var i=-1,a=e.length,o=n.length,s=-1,u=t.length,c=tB(a-o,0),l=eF(u+c),f=!r;++s1?n[a-1]:i,s=a>2?n[2]:i;for(o=e.length>3&&"function"==typeof o?(a--,o):i,s&&or(n[0],n[1],s)&&(o=a<3?i:o,a=1),t=e4(t);++r-1?a[o?t[s]:s]:i}}function ak(e){return az(function(t){var n=t.length,r=n,a=n9.prototype.thru;for(e&&t.reverse();r--;){var o=t[r];if("function"!=typeof o)throw new e9(u);if(a&&!s&&"wrapper"==aV(o))var s=new n9([],!0)}for(r=s?r:n;++r1&&v.reverse(),f&&cu))return!1;var l=o.get(e),f=o.get(t);if(l&&f)return l==t&&f==e;var d=-1,h=!0,p=n&g?new rw:i;for(o.set(e,t),o.set(t,e);++d1?"& ":"")+t[r],t=t.join(n>2?", ":" "),e.replace(eY,"{\n/* [wrapped with "+t+"] */\n")}function ot(e){return uF(e)||uj(e)||!!(tv&&e&&e[tv])}function on(e,t){var n=typeof e;return!!(t=null==t?R:t)&&("number"==n||"symbol"!=n&&eX.test(e))&&e>-1&&e%1==0&&e0){if(++t>=L)return arguments[0]}else t=0;return e.apply(i,arguments)}}function ox(e,t){var n=-1,r=e.length,a=r-1;for(t=t===i?r:t;++n1?e[t-1]:i;return n="function"==typeof n?(e.pop(),n):i,sE(e,n)});function sC(e){var t=n4(e);return t.__chain__=!0,t}function sI(e,t){return t(e),e}function sD(e,t){return t(e)}var sN=az(function(e){var t=e.length,n=t?e[0]:0,r=this.__wrapped__,a=function(t){return rB(t,e)};return!(t>1)&&!this.__actions__.length&&r instanceof n8&&on(n)?((r=r.slice(n,+n+(t?1:0))).__actions__.push({func:sD,args:[a],thisArg:i}),new n9(r,this.__chain__).thru(function(e){return t&&!e.length&&e.push(i),e})):this.thru(a)});function sP(){return sC(this)}function sR(){return new n9(this.value(),this.__chain__)}function sj(){i===this.__values__&&(this.__values__=cd(this.value()));var e=this.__index__>=this.__values__.length,t=e?i:this.__values__[this.__index__++];return{done:e,value:t}}function sF(){return this}function sY(e){for(var t,n=this;n instanceof n6;){var r=oL(n);r.__index__=0,r.__values__=i,t?a.__wrapped__=r:t=r;var a=r;n=n.__wrapped__}return a.__wrapped__=e,t}function sB(){var e=this.__wrapped__;if(e instanceof n8){var t=e;return this.__actions__.length&&(t=new n8(this)),(t=t.reverse()).__actions__.push({func:sD,args:[se],thisArg:i}),new n9(t,this.__chain__)}return this.thru(se)}function sU(){return i0(this.__wrapped__,this.__actions__)}var sH=ap(function(e,t,n){tr.call(e,n)?++e[n]:rY(e,n,1)});function s$(e,t,n){var r=uF(e)?t4:rq;return n&&or(e,t,n)&&(t=i),r(e,aZ(t,3))}function sz(e,t){return(uF(e)?t5:rJ)(e,aZ(t,3))}var sG=aS(oH),sW=aS(o$);function sK(e,t){return rQ(s2(e,t),1)}function sV(e,t){return rQ(s2(e,t),P)}function sq(e,t,n){return n=n===i?1:cp(n),rQ(s2(e,t),n)}function sZ(e,t){return(uF(e)?t2:rK)(e,aZ(t,3))}function sX(e,t){return(uF(e)?t3:rV)(e,aZ(t,3))}var sJ=ap(function(e,t,n){tr.call(e,n)?e[n].push(t):rY(e,n,[t])});function sQ(e,t,n,r){e=uB(e)?e:c9(e),n=n&&!r?cp(n):0;var i=e.length;return n<0&&(n=tB(i+n,0)),ci(e)?n<=i&&e.indexOf(t,n)>-1:!!i&&nu(e,t,n)>-1}var s1=ij(function(e,t,n){var r=-1,i="function"==typeof t,a=uB(e)?eF(e.length):[];return rK(e,function(e){a[++r]=i?t1(t,e,n):ia(e,t,n)}),a}),s0=ap(function(e,t,n){rY(e,n,t)});function s2(e,t){return(uF(e)?t8:iE)(e,aZ(t,3))}function s3(e,t,n,r){return null==e?[]:(uF(t)||(t=null==t?[]:[t]),n=r?i:n,uF(n)||(n=null==n?[]:[n]),iO(e,t,n))}var s4=ap(function(e,t,n){e[n?0:1].push(t)},function(){return[[],[]]});function s5(e,t,n){var r=uF(e)?ne:np,i=arguments.length<3;return r(e,aZ(t,4),n,i,rK)}function s6(e,t,n){var r=uF(e)?nt:np,i=arguments.length<3;return r(e,aZ(t,4),n,i,rV)}function s9(e,t){return(uF(e)?t5:rJ)(e,ug(aZ(t,3)))}function s8(e){return(uF(e)?rL:iF)(e)}function s7(e,t,n){return t=(n?or(e,t,n):t===i)?1:cp(t),(uF(e)?rC:iY)(e,t)}function ue(e){return(uF(e)?rI:i$)(e)}function ut(e){if(null==e)return 0;if(uB(e))return ci(e)?nB(e):e.length;var t=a3(e);return t==J||t==ea?e.size:iy(e).length}function un(e,t,n){var r=uF(e)?nn:iG;return n&&or(e,t,n)&&(t=i),r(e,aZ(t,3))}var ur=ij(function(e,t){if(null==e)return[];var n=t.length;return n>1&&or(e,t[0],t[1])?t=[]:n>2&&or(t[0],t[1],t[2])&&(t=[t[0]]),iO(e,rQ(t,1),[])}),ui=tS||function(){return tH.Date.now()};function ua(e,t){if("function"!=typeof t)throw new e9(u);return e=cp(e),function(){if(--e<1)return t.apply(this,arguments)}}function uo(e,t,n){return t=n?i:t,t=e&&null==t?e.length:t,aj(e,x,i,i,i,i,t)}function us(e,t){var n;if("function"!=typeof t)throw new e9(u);return e=cp(e),function(){return--e>0&&(n=t.apply(this,arguments)),e<=1&&(t=i),n}}var uu=ij(function(e,t,n){var r=v;if(n.length){var i=nP(n,aq(uu));r|=S}return aj(e,r,t,n,i)}),uc=ij(function(e,t,n){var r=v|y;if(n.length){var i=nP(n,aq(uc));r|=S}return aj(t,r,e,n,i)});function ul(e,t,n){t=n?i:t;var r=aj(e,_,i,i,i,i,i,t);return r.placeholder=ul.placeholder,r}function uf(e,t,n){t=n?i:t;var r=aj(e,E,i,i,i,i,i,t);return r.placeholder=uf.placeholder,r}function ud(e,t,n){var r,a,o,s,c,l,f=0,d=!1,h=!1,p=!0;if("function"!=typeof e)throw new e9(u);function b(t){var n=r,o=a;return r=a=i,f=t,s=e.apply(o,n)}function m(e){return f=e,c=o_(y,t),d?b(e):s}function g(e){var n=e-l,r=e-f,i=t-n;return h?tU(i,o-r):i}function v(e){var n=e-l,r=e-f;return l===i||n>=t||n<0||h&&r>=o}function y(){var e=ui();if(v(e))return w(e);c=o_(y,g(e))}function w(e){return(c=i,p&&r)?b(e):(r=a=i,s)}function _(){c!==i&&i7(c),f=0,r=l=a=c=i}function E(){return c===i?s:w(ui())}function S(){var e=ui(),n=v(e);if(r=arguments,a=this,l=e,n){if(c===i)return m(l);if(h)return i7(c),c=o_(y,t),b(l)}return c===i&&(c=o_(y,t)),s}return t=cm(t)||0,u1(n)&&(d=!!n.leading,o=(h="maxWait"in n)?tB(cm(n.maxWait)||0,t):o,p="trailing"in n?!!n.trailing:p),S.cancel=_,S.flush=E,S}var uh=ij(function(e,t){return rG(e,1,t)}),up=ij(function(e,t,n){return rG(e,cm(t)||0,n)});function ub(e){return aj(e,M)}function um(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw new e9(u);var n=function(){var r=arguments,i=t?t.apply(this,r):r[0],a=n.cache;if(a.has(i))return a.get(i);var o=e.apply(this,r);return n.cache=a.set(i,o)||a,o};return n.cache=new(um.Cache||rp),n}function ug(e){if("function"!=typeof e)throw new e9(u);return function(){var t=arguments;switch(t.length){case 0:return!e.call(this);case 1:return!e.call(this,t[0]);case 2:return!e.call(this,t[0],t[1]);case 3:return!e.call(this,t[0],t[1],t[2])}return!e.apply(this,t)}}function uv(e){return us(2,e)}um.Cache=rp;var uy=i9(function(e,t){var n=(t=1==t.length&&uF(t[0])?t8(t[0],nw(aZ())):t8(rQ(t,1),nw(aZ()))).length;return ij(function(r){for(var i=-1,a=tU(r.length,n);++i=t}),uj=io(function(){return arguments}())?io:function(e){return u0(e)&&tr.call(e,"callee")&&!tm.call(e,"callee")},uF=eF.isArray,uY=tV?nw(tV):is;function uB(e){return null!=e&&uQ(e.length)&&!uX(e)}function uU(e){return u0(e)&&uB(e)}function uH(e){return!0===e||!1===e||u0(e)&&r9(e)==W}var u$=tN||l4,uz=tq?nw(tq):iu;function uG(e){return u0(e)&&1===e.nodeType&&!ce(e)}function uW(e){if(null==e)return!0;if(uB(e)&&(uF(e)||"string"==typeof e||"function"==typeof e.splice||u$(e)||co(e)||uj(e)))return!e.length;var t=a3(e);if(t==J||t==ea)return!e.size;if(oc(e))return!iy(e).length;for(var n in e)if(tr.call(e,n))return!1;return!0}function uK(e,t){return ic(e,t)}function uV(e,t,n){var r=(n="function"==typeof n?n:i)?n(e,t):i;return r===i?ic(e,t,i,n):!!r}function uq(e){if(!u0(e))return!1;var t=r9(e);return t==q||t==V||"string"==typeof e.message&&"string"==typeof e.name&&!ce(e)}function uZ(e){return"number"==typeof e&&tP(e)}function uX(e){if(!u1(e))return!1;var t=r9(e);return t==Z||t==X||t==G||t==er}function uJ(e){return"number"==typeof e&&e==cp(e)}function uQ(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=R}function u1(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}function u0(e){return null!=e&&"object"==typeof e}var u2=tZ?nw(tZ):id;function u3(e,t){return e===t||ih(e,t,aJ(t))}function u4(e,t,n){return n="function"==typeof n?n:i,ih(e,t,aJ(t),n)}function u5(e){return u7(e)&&e!=+e}function u6(e){if(ou(e))throw new e0(s);return ip(e)}function u9(e){return null===e}function u8(e){return null==e}function u7(e){return"number"==typeof e||u0(e)&&r9(e)==Q}function ce(e){if(!u0(e)||r9(e)!=et)return!1;var t=tp(e);if(null===t)return!0;var n=tr.call(t,"constructor")&&t.constructor;return"function"==typeof n&&n instanceof n&&tn.call(n)==ts}var ct=tX?nw(tX):ib;function cn(e){return uJ(e)&&e>=-R&&e<=R}var cr=tJ?nw(tJ):im;function ci(e){return"string"==typeof e||!uF(e)&&u0(e)&&r9(e)==eo}function ca(e){return"symbol"==typeof e||u0(e)&&r9(e)==es}var co=tQ?nw(tQ):ig;function cs(e){return e===i}function cu(e){return u0(e)&&a3(e)==ec}function cc(e){return u0(e)&&r9(e)==el}var cl=aI(i_),cf=aI(function(e,t){return e<=t});function cd(e){if(!e)return[];if(uB(e))return ci(e)?nU(e):al(e);if(ty&&e[ty])return nI(e[ty]());var t=a3(e);return(t==J?nD:t==ea?nR:c9)(e)}function ch(e){return e?(e=cm(e))===P||e===-P?(e<0?-1:1)*j:e==e?e:0:0===e?e:0}function cp(e){var t=ch(e),n=t%1;return t==t?n?t-n:t:0}function cb(e){return e?rU(cp(e),0,Y):0}function cm(e){if("number"==typeof e)return e;if(ca(e))return F;if(u1(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=u1(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=ny(e);var n=eV.test(e);return n||eZ.test(e)?tY(e.slice(2),n?2:8):eK.test(e)?F:+e}function cg(e){return af(e,c$(e))}function cv(e){return e?rU(cp(e),-R,R):0===e?e:0}function cy(e){return null==e?"":iZ(e)}var cw=ab(function(e,t){if(oc(t)||uB(t)){af(t,cH(t),e);return}for(var n in t)tr.call(t,n)&&rN(e,n,t[n])}),c_=ab(function(e,t){af(t,c$(t),e)}),cE=ab(function(e,t,n,r){af(t,c$(t),e,r)}),cS=ab(function(e,t,n,r){af(t,cH(t),e,r)}),ck=az(rB);function cx(e,t){var n=n5(e);return null==t?n:rj(n,t)}var cT=ij(function(e,t){e=e4(e);var n=-1,r=t.length,a=r>2?t[2]:i;for(a&&or(t[0],t[1],a)&&(r=1);++n1),t}),af(e,aW(e),n),r&&(n=rH(n,h|p|b,aB));for(var i=t.length;i--;)iJ(n,t[i]);return n});function cq(e,t){return cX(e,ug(aZ(t)))}var cZ=az(function(e,t){return null==e?{}:iA(e,t)});function cX(e,t){if(null==e)return{};var n=t8(aW(e),function(e){return[e]});return t=aZ(t),iL(e,n,function(e,n){return t(e,n[0])})}function cJ(e,t,n){t=i6(t,e);var r=-1,a=t.length;for(a||(a=1,e=i);++rt){var r=e;e=t,t=r}if(n||e%1||t%1){var a=tW();return tU(e+a*(t-e+tF("1e-"+((a+"").length-1))),t)}return iN(e,t)}var ln=aw(function(e,t,n){return t=t.toLowerCase(),e+(n?lr(t):t)});function lr(e){return lL(cy(e).toLowerCase())}function li(e){return(e=cy(e))&&e.replace(eJ,nT).replace(tx,"")}function la(e,t,n){e=cy(e),t=iZ(t);var r=e.length,a=n=n===i?r:rU(cp(n),0,r);return(n-=t.length)>=0&&e.slice(n,a)==t}function lo(e){return(e=cy(e))&&eO.test(e)?e.replace(eT,nM):e}function ls(e){return(e=cy(e))&&eR.test(e)?e.replace(eP,"\\$&"):e}var lu=aw(function(e,t,n){return e+(n?"-":"")+t.toLowerCase()}),lc=aw(function(e,t,n){return e+(n?" ":"")+t.toLowerCase()}),ll=ay("toLowerCase");function lf(e,t,n){e=cy(e);var r=(t=cp(t))?nB(e):0;if(!t||r>=t)return e;var i=(t-r)/2;return aA(tO(i),n)+e+aA(tM(i),n)}function ld(e,t,n){e=cy(e);var r=(t=cp(t))?nB(e):0;return t&&r>>0)?(e=cy(e))&&("string"==typeof t||null!=t&&!ct(t))&&!(t=iZ(t))&&nL(e)?i8(nU(e),0,n):e.split(t,n):[]}var ly=aw(function(e,t,n){return e+(n?" ":"")+lL(t)});function lw(e,t,n){return e=cy(e),n=null==n?0:rU(cp(n),0,e.length),t=iZ(t),e.slice(n,n+t.length)==t}function l_(e,t,n){var r=n4.templateSettings;n&&or(e,t,n)&&(t=i),e=cy(e),t=cE({},t,r,aF);var a,o,s=cE({},t.imports,r.imports,aF),u=cH(s),l=n_(s,u),f=0,d=t.interpolate||eQ,h="__p += '",p=e5((t.escape||eQ).source+"|"+d.source+"|"+(d===eC?eG:eQ).source+"|"+(t.evaluate||eQ).source+"|$","g"),b="//# sourceURL="+(tr.call(t,"sourceURL")?(t.sourceURL+"").replace(/\s/g," "):"lodash.templateSources["+ ++tC+"]")+"\n";e.replace(p,function(t,n,r,i,s,u){return r||(r=i),h+=e.slice(f,u).replace(e1,nO),n&&(a=!0,h+="' +\n__e("+n+") +\n'"),s&&(o=!0,h+="';\n"+s+";\n__p += '"),r&&(h+="' +\n((__t = ("+r+")) == null ? '' : __t) +\n'"),f=u+t.length,t}),h+="';\n";var m=tr.call(t,"variable")&&t.variable;if(m){if(e$.test(m))throw new e0(c)}else h="with (obj) {\n"+h+"\n}\n";h=(o?h.replace(eE,""):h).replace(eS,"$1").replace(ek,"$1;"),h="function("+(m||"obj")+") {\n"+(m?"":"obj || (obj = {});\n")+"var __t, __p = ''"+(a?", __e = _.escape":"")+(o?", __j = Array.prototype.join;\nfunction print() { __p += __j.call(arguments, '') }\n":";\n")+h+"return __p\n}";var g=lI(function(){return e2(u,b+"return "+h).apply(i,l)});if(g.source=h,uq(g))throw g;return g}function lE(e){return cy(e).toLowerCase()}function lS(e){return cy(e).toUpperCase()}function lk(e,t,n){if((e=cy(e))&&(n||t===i))return ny(e);if(!e||!(t=iZ(t)))return e;var r=nU(e),a=nU(t),o=nS(r,a),s=nk(r,a)+1;return i8(r,o,s).join("")}function lx(e,t,n){if((e=cy(e))&&(n||t===i))return e.slice(0,nH(e)+1);if(!e||!(t=iZ(t)))return e;var r=nU(e),a=nk(r,nU(t))+1;return i8(r,0,a).join("")}function lT(e,t,n){if((e=cy(e))&&(n||t===i))return e.replace(ej,"");if(!e||!(t=iZ(t)))return e;var r=nU(e),a=nS(r,nU(t));return i8(r,a).join("")}function lM(e,t){var n=O,r=A;if(u1(t)){var a="separator"in t?t.separator:a;n="length"in t?cp(t.length):n,r="omission"in t?iZ(t.omission):r}var o=(e=cy(e)).length;if(nL(e)){var s=nU(e);o=s.length}if(n>=o)return e;var u=n-nB(r);if(u<1)return r;var c=s?i8(s,0,u).join(""):e.slice(0,u);if(a===i)return c+r;if(s&&(u+=c.length-u),ct(a)){if(e.slice(u).search(a)){var l,f=c;for(a.global||(a=e5(a.source,cy(eW.exec(a))+"g")),a.lastIndex=0;l=a.exec(f);)var d=l.index;c=c.slice(0,d===i?u:d)}}else if(e.indexOf(iZ(a),u)!=u){var h=c.lastIndexOf(a);h>-1&&(c=c.slice(0,h))}return c+r}function lO(e){return(e=cy(e))&&eM.test(e)?e.replace(ex,n$):e}var lA=aw(function(e,t,n){return e+(n?" ":"")+t.toUpperCase()}),lL=ay("toUpperCase");function lC(e,t,n){return(e=cy(e),i===(t=n?i:t))?nC(e)?nW(e):na(e):e.match(t)||[]}var lI=ij(function(e,t){try{return t1(e,i,t)}catch(n){return uq(n)?n:new e0(n)}}),lD=az(function(e,t){return t2(t,function(t){t=oM(t),rY(e,t,uu(e[t],e))}),e});function lN(e){var t=null==e?0:e.length,n=aZ();return e=t?t8(e,function(e){if("function"!=typeof e[1])throw new e9(u);return[n(e[0]),e[1]]}):[],ij(function(n){for(var r=-1;++rR)return[];var n=Y,r=tU(e,Y);t=aZ(t),e-=Y;for(var i=ng(r,t);++n0||t<0)?new n8(n):(e<0?n=n.takeRight(-e):e&&(n=n.drop(e)),t!==i&&(n=(t=cp(t))<0?n.dropRight(-t):n.take(t-e)),n)},n8.prototype.takeRightWhile=function(e){return this.reverse().takeWhile(e).reverse()},n8.prototype.toArray=function(){return this.take(Y)},r2(n8.prototype,function(e,t){var n=/^(?:filter|find|map|reject)|While$/.test(t),r=/^(?:head|last)$/.test(t),a=n4[r?"take"+("last"==t?"Right":""):t],o=r||/^find/.test(t);a&&(n4.prototype[t]=function(){var t=this.__wrapped__,s=r?[1]:arguments,u=t instanceof n8,c=s[0],l=u||uF(t),f=function(e){var t=a.apply(n4,t7([e],s));return r&&d?t[0]:t};l&&n&&"function"==typeof c&&1!=c.length&&(u=l=!1);var d=this.__chain__,h=!!this.__actions__.length,p=o&&!d,b=u&&!h;if(!o&&l){t=b?t:new n8(this);var m=e.apply(t,s);return m.__actions__.push({func:sD,args:[f],thisArg:i}),new n9(m,d)}return p&&b?e.apply(this,s):(m=this.thru(f),p?r?m.value()[0]:m.value():m)})}),t2(["pop","push","shift","sort","splice","unshift"],function(e){var t=e8[e],n=/^(?:push|sort|unshift)$/.test(e)?"tap":"thru",r=/^(?:pop|shift)$/.test(e);n4.prototype[e]=function(){var e=arguments;if(r&&!this.__chain__){var i=this.value();return t.apply(uF(i)?i:[],e)}return this[n](function(n){return t.apply(uF(n)?n:[],e)})}}),r2(n8.prototype,function(e,t){var n=n4[t];if(n){var r=n.name+"";tr.call(nq,r)||(nq[r]=[]),nq[r].push({name:t,func:n})}}),nq[ax(i,y).name]=[{name:"wrapper",func:i}],n8.prototype.clone=n7,n8.prototype.reverse=re,n8.prototype.value=rt,n4.prototype.at=sN,n4.prototype.chain=sP,n4.prototype.commit=sR,n4.prototype.next=sj,n4.prototype.plant=sY,n4.prototype.reverse=sB,n4.prototype.toJSON=n4.prototype.valueOf=n4.prototype.value=sU,n4.prototype.first=n4.prototype.head,ty&&(n4.prototype[ty]=sF),n4}();tH._=nK,i!==(r=(function(){return nK}).call(t,n,t,e))&&(e.exports=r)}).call(this)},35161(e,t,n){var r=n(29932),i=n(67206),a=n(69199),o=n(1469);function s(e,t){return(o(e)?r:a)(e,i(t,3))}e.exports=s},67523(e,t,n){var r=n(89465),i=n(47816),a=n(67206);function o(e,t){var n={};return t=a(t,3),i(e,function(e,i,a){r(n,t(e,i,a),e)}),n}e.exports=o},66604(e,t,n){var r=n(89465),i=n(47816),a=n(67206);function o(e,t){var n={};return t=a(t,3),i(e,function(e,i,a){r(n,i,t(e,i,a))}),n}e.exports=o},88306(e,t,n){var r=n(83369),i="Expected a function";function a(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw TypeError(i);var n=function(){var r=arguments,i=t?t.apply(this,r):r[0],a=n.cache;if(a.has(i))return a.get(i);var o=e.apply(this,r);return n.cache=a.set(i,o)||a,o};return n.cache=new(a.Cache||r),n}a.Cache=r,e.exports=a},82492(e,t,n){var r=n(42980),i=n(21463)(function(e,t,n){r(e,t,n)});e.exports=i},50308(e){function t(){}e.exports=t},7771(e,t,n){var r=n(55639),i=function(){return r.Date.now()};e.exports=i},78718(e,t,n){var r=n(25970),i=n(99021)(function(e,t){return null==e?{}:r(e,t)});e.exports=i},39601(e,t,n){var r=n(40371),i=n(79152),a=n(15403),o=n(40327);function s(e){return a(e)?r(o(e)):i(e)}e.exports=s},54061(e,t,n){var r=n(62663),i=n(89881),a=n(67206),o=n(10107),s=n(1469);function u(e,t,n){var u=s(e)?r:o,c=arguments.length<3;return u(e,a(t,4),n,c,i)}e.exports=u},84238(e,t,n){var r=n(280),i=n(64160),a=n(98612),o=n(47037),s=n(88016),u="[object Map]",c="[object Set]";function l(e){if(null==e)return 0;if(a(e))return o(e)?s(e):e.length;var t=i(e);return t==u||t==c?e.size:r(e).length}e.exports=l},11865(e,t,n){var r=n(35393)(function(e,t,n){return e+(n?"_":"")+t.toLowerCase()});e.exports=r},70479(e){function t(){return[]}e.exports=t},95062(e){function t(){return!1}e.exports=t},14841(e,t,n){var r=n(27561),i=n(13218),a=n(33448),o=0/0,s=/^[-+]0x[0-9a-f]+$/i,u=/^0b[01]+$/i,c=/^0o[0-7]+$/i,l=parseInt;function f(e){if("number"==typeof e)return e;if(a(e))return o;if(i(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=i(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=r(e);var n=u.test(e);return n||c.test(e)?l(e.slice(2),n?2:8):s.test(e)?o:+e}e.exports=f},59881(e,t,n){var r=n(98363),i=n(81704);function a(e){return r(e,i(e))}e.exports=a},79833(e,t,n){var r=n(80531);function i(e){return null==e?"":r(e)}e.exports=i},68718(e,t,n){var r=n(77412),i=n(3118),a=n(47816),o=n(67206),s=n(85924),u=n(1469),c=n(44144),l=n(23560),f=n(13218),d=n(36719);function h(e,t,n){var h=u(e),p=h||c(e)||d(e);if(t=o(t,4),null==n){var b=e&&e.constructor;n=p?h?new b:[]:f(e)&&l(b)?i(s(e)):{}}return(p?r:a)(e,function(e,r,i){return t(n,e,r,i)}),n}e.exports=h},93386(e,t,n){var r=n(21078),i=n(5976),a=n(45652),o=n(29246),s=i(function(e){return a(r(e,1,o,!0))});e.exports=s},11700(e,t,n){var r=n(98805)("toUpperCase");e.exports=r},52628(e,t,n){var r=n(47415),i=n(3674);function a(e){return null==e?[]:r(e,i(e))}e.exports=a},58748(e,t,n){var r=n(49029),i=n(93157),a=n(79833),o=n(2757);function s(e,t,n){return(e=a(e),void 0===(t=n?void 0:t))?i(e)?o(e):r(e):e.match(t)||[]}e.exports=s},42786:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("af",{months:"Januarie_Februarie_Maart_April_Mei_Junie_Julie_Augustus_September_Oktober_November_Desember".split("_"),monthsShort:"Jan_Feb_Mrt_Apr_Mei_Jun_Jul_Aug_Sep_Okt_Nov_Des".split("_"),weekdays:"Sondag_Maandag_Dinsdag_Woensdag_Donderdag_Vrydag_Saterdag".split("_"),weekdaysShort:"Son_Maa_Din_Woe_Don_Vry_Sat".split("_"),weekdaysMin:"So_Ma_Di_Wo_Do_Vr_Sa".split("_"),meridiemParse:/vm|nm/i,isPM:function(e){return/^nm$/i.test(e)},meridiem:function(e,t,n){return e<12?n?"vm":"VM":n?"nm":"NM"},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Vandag om] LT",nextDay:"[M\xf4re om] LT",nextWeek:"dddd [om] LT",lastDay:"[Gister om] LT",lastWeek:"[Laas] dddd [om] LT",sameElse:"L"},relativeTime:{future:"oor %s",past:"%s gelede",s:"'n paar sekondes",ss:"%d sekondes",m:"'n minuut",mm:"%d minute",h:"'n uur",hh:"%d ure",d:"'n dag",dd:"%d dae",M:"'n maand",MM:"%d maande",y:"'n jaar",yy:"%d jaar"},dayOfMonthOrdinalParse:/\d{1,2}(ste|de)/,ordinal:function(e){return e+(1===e||8===e||e>=20?"ste":"de")},week:{dow:1,doy:4}})})(n(30381))},14130:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},n={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية",],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة",],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة",],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم",],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d شهر",],y:["أقل من عام","عام واحد",["عامان","عامين"],"%d أعوام","%d عامًا","%d عام",]},r=function(e){return function(r,i,a,o){var s=t(r),u=n[e][t(r)];return 2===s&&(u=u[i?0:1]),u.replace(/%d/i,r)}},i=["جانفي","فيفري","مارس","أفريل","ماي","جوان","جويلية","أوت","سبتمبر","أكتوبر","نوفمبر","ديسمبر",];return e.defineLocale("ar-dz",{months:i,monthsShort:i,weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/‏M/‏YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(e){return"م"===e},meridiem:function(e,t,n){return e<12?"ص":"م"},calendar:{sameDay:"[اليوم عند الساعة] LT",nextDay:"[غدًا عند الساعة] LT",nextWeek:"dddd [عند الساعة] LT",lastDay:"[أمس عند الساعة] LT",lastWeek:"dddd [عند الساعة] LT",sameElse:"L"},relativeTime:{future:"بعد %s",past:"منذ %s",s:r("s"),ss:r("s"),m:r("m"),mm:r("m"),h:r("h"),hh:r("h"),d:r("d"),dd:r("d"),M:r("M"),MM:r("M"),y:r("y"),yy:r("y")},postformat:function(e){return e.replace(/,/g,"،")},week:{dow:0,doy:4}})})(n(30381))},96135:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ar-kw",{months:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),monthsShort:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),weekdays:"الأحد_الإتنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"احد_اتنين_ثلاثاء_اربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",ss:"%d ثانية",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},week:{dow:0,doy:12}})})(n(30381))},56440:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",0:"0"},n=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},r={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية",],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة",],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة",],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم",],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d شهر",],y:["أقل من عام","عام واحد",["عامان","عامين"],"%d أعوام","%d عامًا","%d عام",]},i=function(e){return function(t,i,a,o){var s=n(t),u=r[e][n(t)];return 2===s&&(u=u[i?0:1]),u.replace(/%d/i,t)}},a=["يناير","فبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوفمبر","ديسمبر",];return e.defineLocale("ar-ly",{months:a,monthsShort:a,weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/‏M/‏YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(e){return"م"===e},meridiem:function(e,t,n){return e<12?"ص":"م"},calendar:{sameDay:"[اليوم عند الساعة] LT",nextDay:"[غدًا عند الساعة] LT",nextWeek:"dddd [عند الساعة] LT",lastDay:"[أمس عند الساعة] LT",lastWeek:"dddd [عند الساعة] LT",sameElse:"L"},relativeTime:{future:"بعد %s",past:"منذ %s",s:i("s"),ss:i("s"),m:i("m"),mm:i("m"),h:i("h"),hh:i("h"),d:i("d"),dd:i("d"),M:i("M"),MM:i("M"),y:i("y"),yy:i("y")},preparse:function(e){return e.replace(/،/g,",")},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]}).replace(/,/g,"،")},week:{dow:6,doy:12}})})(n(30381))},47702:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ar-ma",{months:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),monthsShort:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"احد_اثنين_ثلاثاء_اربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",ss:"%d ثانية",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},week:{dow:1,doy:4}})})(n(30381))},16040:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"١",2:"٢",3:"٣",4:"٤",5:"٥",6:"٦",7:"٧",8:"٨",9:"٩",0:"٠"},n={"١":"1","٢":"2","٣":"3","٤":"4","٥":"5","٦":"6","٧":"7","٨":"8","٩":"9","٠":"0"};return e.defineLocale("ar-sa",{months:"يناير_فبراير_مارس_أبريل_مايو_يونيو_يوليو_أغسطس_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),monthsShort:"يناير_فبراير_مارس_أبريل_مايو_يونيو_يوليو_أغسطس_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(e){return"م"===e},meridiem:function(e,t,n){return e<12?"ص":"م"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",ss:"%d ثانية",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},preparse:function(e){return e.replace(/[١٢٣٤٥٦٧٨٩٠]/g,function(e){return n[e]}).replace(/،/g,",")},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]}).replace(/,/g,"،")},week:{dow:0,doy:6}})})(n(30381))},37100:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ar-tn",{months:"جانفي_فيفري_مارس_أفريل_ماي_جوان_جويلية_أوت_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),monthsShort:"جانفي_فيفري_مارس_أفريل_ماي_جوان_جويلية_أوت_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",ss:"%d ثانية",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},week:{dow:1,doy:4}})})(n(30381))},30867:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"١",2:"٢",3:"٣",4:"٤",5:"٥",6:"٦",7:"٧",8:"٨",9:"٩",0:"٠"},n={"١":"1","٢":"2","٣":"3","٤":"4","٥":"5","٦":"6","٧":"7","٨":"8","٩":"9","٠":"0"},r=function(e){return 0===e?0:1===e?1:2===e?2:e%100>=3&&e%100<=10?3:e%100>=11?4:5},i={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية",],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة",],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة",],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم",],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d شهر",],y:["أقل من عام","عام واحد",["عامان","عامين"],"%d أعوام","%d عامًا","%d عام",]},a=function(e){return function(t,n,a,o){var s=r(t),u=i[e][r(t)];return 2===s&&(u=u[n?0:1]),u.replace(/%d/i,t)}},o=["يناير","فبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوفمبر","ديسمبر",];return e.defineLocale("ar",{months:o,monthsShort:o,weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/‏M/‏YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(e){return"م"===e},meridiem:function(e,t,n){return e<12?"ص":"م"},calendar:{sameDay:"[اليوم عند الساعة] LT",nextDay:"[غدًا عند الساعة] LT",nextWeek:"dddd [عند الساعة] LT",lastDay:"[أمس عند الساعة] LT",lastWeek:"dddd [عند الساعة] LT",sameElse:"L"},relativeTime:{future:"بعد %s",past:"منذ %s",s:a("s"),ss:a("s"),m:a("m"),mm:a("m"),h:a("h"),hh:a("h"),d:a("d"),dd:a("d"),M:a("M"),MM:a("M"),y:a("y"),yy:a("y")},preparse:function(e){return e.replace(/[١٢٣٤٥٦٧٨٩٠]/g,function(e){return n[e]}).replace(/،/g,",")},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]}).replace(/,/g,"،")},week:{dow:6,doy:12}})})(n(30381))},31083:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"-inci",5:"-inci",8:"-inci",70:"-inci",80:"-inci",2:"-nci",7:"-nci",20:"-nci",50:"-nci",3:"-\xfcnc\xfc",4:"-\xfcnc\xfc",100:"-\xfcnc\xfc",6:"-ncı",9:"-uncu",10:"-uncu",30:"-uncu",60:"-ıncı",90:"-ıncı"};return e.defineLocale("az",{months:"yanvar_fevral_mart_aprel_may_iyun_iyul_avqust_sentyabr_oktyabr_noyabr_dekabr".split("_"),monthsShort:"yan_fev_mar_apr_may_iyn_iyl_avq_sen_okt_noy_dek".split("_"),weekdays:"Bazar_Bazar ertəsi_\xc7ərşənbə axşamı_\xc7ərşənbə_C\xfcmə axşamı_C\xfcmə_Şənbə".split("_"),weekdaysShort:"Baz_BzE_\xc7Ax_\xc7ər_CAx_C\xfcm_Şən".split("_"),weekdaysMin:"Bz_BE_\xc7A_\xc7ə_CA_C\xfc_Şə".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bug\xfcn saat] LT",nextDay:"[sabah saat] LT",nextWeek:"[gələn həftə] dddd [saat] LT",lastDay:"[d\xfcnən] LT",lastWeek:"[ke\xe7ən həftə] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s sonra",past:"%s əvvəl",s:"bir ne\xe7ə saniyə",ss:"%d saniyə",m:"bir dəqiqə",mm:"%d dəqiqə",h:"bir saat",hh:"%d saat",d:"bir g\xfcn",dd:"%d g\xfcn",M:"bir ay",MM:"%d ay",y:"bir il",yy:"%d il"},meridiemParse:/gecə|səhər|gündüz|axşam/,isPM:function(e){return/^(gündüz|axşam)$/.test(e)},meridiem:function(e,t,n){return e<4?"gecə":e<12?"səhər":e<17?"g\xfcnd\xfcz":"axşam"},dayOfMonthOrdinalParse:/\d{1,2}-(ıncı|inci|nci|üncü|ncı|uncu)/,ordinal:function(e){if(0===e)return e+"-ıncı";var n=e%10,r=e%100-n,i=e>=100?100:null;return e+(t[n]||t[r]||t[i])},week:{dow:1,doy:7}})})(n(30381))},9808:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t){var n=e.split("_");return t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function n(e,n,r){var i={ss:n?"секунда_секунды_секунд":"секунду_секунды_секунд",mm:n?"хвіліна_хвіліны_хвілін":"хвіліну_хвіліны_хвілін",hh:n?"гадзіна_гадзіны_гадзін":"гадзіну_гадзіны_гадзін",dd:"дзень_дні_дзён",MM:"месяц_месяцы_месяцаў",yy:"год_гады_гадоў"};return"m"===r?n?"хвіліна":"хвіліну":"h"===r?n?"гадзіна":"гадзіну":e+" "+t(i[r],+e)}return e.defineLocale("be",{months:{format:"студзеня_лютага_сакавіка_красавіка_траўня_чэрвеня_ліпеня_жніўня_верасня_кастрычніка_лістапада_снежня".split("_"),standalone:"студзень_люты_сакавік_красавік_травень_чэрвень_ліпень_жнівень_верасень_кастрычнік_лістапад_снежань".split("_")},monthsShort:"студ_лют_сак_крас_трав_чэрв_ліп_жнів_вер_каст_ліст_снеж".split("_"),weekdays:{format:"нядзелю_панядзелак_аўторак_сераду_чацвер_пятніцу_суботу".split("_"),standalone:"нядзеля_панядзелак_аўторак_серада_чацвер_пятніца_субота".split("_"),isFormat:/\[ ?[Ууў] ?(?:мінулую|наступную)? ?\] ?dddd/},weekdaysShort:"нд_пн_ат_ср_чц_пт_сб".split("_"),weekdaysMin:"нд_пн_ат_ср_чц_пт_сб".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY г.",LLL:"D MMMM YYYY г., HH:mm",LLLL:"dddd, D MMMM YYYY г., HH:mm"},calendar:{sameDay:"[Сёння ў] LT",nextDay:"[Заўтра ў] LT",lastDay:"[Учора ў] LT",nextWeek:function(){return"[У] dddd [ў] LT"},lastWeek:function(){switch(this.day()){case 0:case 3:case 5:case 6:return"[У мінулую] dddd [ў] LT";case 1:case 2:case 4:return"[У мінулы] dddd [ў] LT"}},sameElse:"L"},relativeTime:{future:"праз %s",past:"%s таму",s:"некалькі секунд",m:n,mm:n,h:n,hh:n,d:"дзень",dd:n,M:"месяц",MM:n,y:"год",yy:n},meridiemParse:/ночы|раніцы|дня|вечара/,isPM:function(e){return/^(дня|вечара)$/.test(e)},meridiem:function(e,t,n){return e<4?"ночы":e<12?"раніцы":e<17?"дня":"вечара"},dayOfMonthOrdinalParse:/\d{1,2}-(і|ы|га)/,ordinal:function(e,t){switch(t){case"M":case"d":case"DDD":case"w":case"W":return(e%10==2||e%10==3)&&e%100!=12&&e%100!=13?e+"-і":e+"-ы";case"D":return e+"-га";default:return e}},week:{dow:1,doy:7}})})(n(30381))},68338:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("bg",{months:"януари_февруари_март_април_май_юни_юли_август_септември_октомври_ноември_декември".split("_"),monthsShort:"яну_фев_мар_апр_май_юни_юли_авг_сеп_окт_ное_дек".split("_"),weekdays:"неделя_понеделник_вторник_сряда_четвъртък_петък_събота".split("_"),weekdaysShort:"нед_пон_вто_сря_чет_пет_съб".split("_"),weekdaysMin:"нд_пн_вт_ср_чт_пт_сб".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[Днес в] LT",nextDay:"[Утре в] LT",nextWeek:"dddd [в] LT",lastDay:"[Вчера в] LT",lastWeek:function(){switch(this.day()){case 0:case 3:case 6:return"[Миналата] dddd [в] LT";case 1:case 2:case 4:case 5:return"[Миналия] dddd [в] LT"}},sameElse:"L"},relativeTime:{future:"след %s",past:"преди %s",s:"няколко секунди",ss:"%d секунди",m:"минута",mm:"%d минути",h:"час",hh:"%d часа",d:"ден",dd:"%d дена",w:"седмица",ww:"%d седмици",M:"месец",MM:"%d месеца",y:"година",yy:"%d години"},dayOfMonthOrdinalParse:/\d{1,2}-(ев|ен|ти|ви|ри|ми)/,ordinal:function(e){var t=e%10,n=e%100;if(0===e)return e+"-ев";if(0===n)return e+"-ен";if(n>10&&n<20)return e+"-ти";if(1===t)return e+"-ви";if(2===t)return e+"-ри";else if(7===t||8===t)return e+"-ми";else return e+"-ти"},week:{dow:1,doy:7}})})(n(30381))},67438:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("bm",{months:"Zanwuyekalo_Fewuruyekalo_Marisikalo_Awirilikalo_Mɛkalo_Zuwɛnkalo_Zuluyekalo_Utikalo_Sɛtanburukalo_ɔkutɔburukalo_Nowanburukalo_Desanburukalo".split("_"),monthsShort:"Zan_Few_Mar_Awi_Mɛ_Zuw_Zul_Uti_Sɛt_ɔku_Now_Des".split("_"),weekdays:"Kari_Ntɛnɛn_Tarata_Araba_Alamisa_Juma_Sibiri".split("_"),weekdaysShort:"Kar_Ntɛ_Tar_Ara_Ala_Jum_Sib".split("_"),weekdaysMin:"Ka_Nt_Ta_Ar_Al_Ju_Si".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"MMMM [tile] D [san] YYYY",LLL:"MMMM [tile] D [san] YYYY [lɛrɛ] HH:mm",LLLL:"dddd MMMM [tile] D [san] YYYY [lɛrɛ] HH:mm"},calendar:{sameDay:"[Bi lɛrɛ] LT",nextDay:"[Sini lɛrɛ] LT",nextWeek:"dddd [don lɛrɛ] LT",lastDay:"[Kunu lɛrɛ] LT",lastWeek:"dddd [tɛmɛnen lɛrɛ] LT",sameElse:"L"},relativeTime:{future:"%s kɔnɔ",past:"a bɛ %s bɔ",s:"sanga dama dama",ss:"sekondi %d",m:"miniti kelen",mm:"miniti %d",h:"lɛrɛ kelen",hh:"lɛrɛ %d",d:"tile kelen",dd:"tile %d",M:"kalo kelen",MM:"kalo %d",y:"san kelen",yy:"san %d"},week:{dow:1,doy:4}})})(n(30381))},76225:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"১",2:"২",3:"৩",4:"৪",5:"৫",6:"৬",7:"৭",8:"৮",9:"৯",0:"০"},n={"১":"1","২":"2","৩":"3","৪":"4","৫":"5","৬":"6","৭":"7","৮":"8","৯":"9","০":"0"};return e.defineLocale("bn-bd",{months:"জানুয়ারি_ফেব্রুয়ারি_মার্চ_এপ্রিল_মে_জুন_জুলাই_আগস্ট_সেপ্টেম্বর_অক্টোবর_নভেম্বর_ডিসেম্বর".split("_"),monthsShort:"জানু_ফেব্রু_মার্চ_এপ্রিল_মে_জুন_জুলাই_আগস্ট_সেপ্ট_অক্টো_নভে_ডিসে".split("_"),weekdays:"রবিবার_সোমবার_মঙ্গলবার_বুধবার_বৃহস্পতিবার_শুক্রবার_শনিবার".split("_"),weekdaysShort:"রবি_সোম_মঙ্গল_বুধ_বৃহস্পতি_শুক্র_শনি".split("_"),weekdaysMin:"রবি_সোম_মঙ্গল_বুধ_বৃহ_শুক্র_শনি".split("_"),longDateFormat:{LT:"A h:mm সময়",LTS:"A h:mm:ss সময়",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm সময়",LLLL:"dddd, D MMMM YYYY, A h:mm সময়"},calendar:{sameDay:"[আজ] LT",nextDay:"[আগামীকাল] LT",nextWeek:"dddd, LT",lastDay:"[গতকাল] LT",lastWeek:"[গত] dddd, LT",sameElse:"L"},relativeTime:{future:"%s পরে",past:"%s আগে",s:"কয়েক সেকেন্ড",ss:"%d সেকেন্ড",m:"এক মিনিট",mm:"%d মিনিট",h:"এক ঘন্টা",hh:"%d ঘন্টা",d:"এক দিন",dd:"%d দিন",M:"এক মাস",MM:"%d মাস",y:"এক বছর",yy:"%d বছর"},preparse:function(e){return e.replace(/[১২৩৪৫৬৭৮৯০]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/রাত|ভোর|সকাল|দুপুর|বিকাল|সন্ধ্যা|রাত/,meridiemHour:function(e,t){if(12===e&&(e=0),"রাত"===t)return e<4?e:e+12;if("ভোর"===t)return e;if("সকাল"===t)return e;if("দুপুর"===t)return e>=3?e:e+12;if("বিকাল"===t)return e+12;else if("সন্ধ্যা"===t)return e+12},meridiem:function(e,t,n){if(e<4)return"রাত";if(e<6)return"ভোর";if(e<12)return"সকাল";if(e<15)return"দুপুর";if(e<18)return"বিকাল";else if(e<20)return"সন্ধ্যা";else return"রাত"},week:{dow:0,doy:6}})})(n(30381))},8905:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"১",2:"২",3:"৩",4:"৪",5:"৫",6:"৬",7:"৭",8:"৮",9:"৯",0:"০"},n={"১":"1","২":"2","৩":"3","৪":"4","৫":"5","৬":"6","৭":"7","৮":"8","৯":"9","০":"0"};return e.defineLocale("bn",{months:"জানুয়ারি_ফেব্রুয়ারি_মার্চ_এপ্রিল_মে_জুন_জুলাই_আগস্ট_সেপ্টেম্বর_অক্টোবর_নভেম্বর_ডিসেম্বর".split("_"),monthsShort:"জানু_ফেব্রু_মার্চ_এপ্রিল_মে_জুন_জুলাই_আগস্ট_সেপ্ট_অক্টো_নভে_ডিসে".split("_"),weekdays:"রবিবার_সোমবার_মঙ্গলবার_বুধবার_বৃহস্পতিবার_শুক্রবার_শনিবার".split("_"),weekdaysShort:"রবি_সোম_মঙ্গল_বুধ_বৃহস্পতি_শুক্র_শনি".split("_"),weekdaysMin:"রবি_সোম_মঙ্গল_বুধ_বৃহ_শুক্র_শনি".split("_"),longDateFormat:{LT:"A h:mm সময়",LTS:"A h:mm:ss সময়",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm সময়",LLLL:"dddd, D MMMM YYYY, A h:mm সময়"},calendar:{sameDay:"[আজ] LT",nextDay:"[আগামীকাল] LT",nextWeek:"dddd, LT",lastDay:"[গতকাল] LT",lastWeek:"[গত] dddd, LT",sameElse:"L"},relativeTime:{future:"%s পরে",past:"%s আগে",s:"কয়েক সেকেন্ড",ss:"%d সেকেন্ড",m:"এক মিনিট",mm:"%d মিনিট",h:"এক ঘন্টা",hh:"%d ঘন্টা",d:"এক দিন",dd:"%d দিন",M:"এক মাস",MM:"%d মাস",y:"এক বছর",yy:"%d বছর"},preparse:function(e){return e.replace(/[১২৩৪৫৬৭৮৯০]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/রাত|সকাল|দুপুর|বিকাল|রাত/,meridiemHour:function(e,t){return(12===e&&(e=0),"রাত"===t&&e>=4||"দুপুর"===t&&e<5||"বিকাল"===t)?e+12:e},meridiem:function(e,t,n){return e<4?"রাত":e<10?"সকাল":e<17?"দুপুর":e<20?"বিকাল":"রাত"},week:{dow:0,doy:6}})})(n(30381))},11560:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"༡",2:"༢",3:"༣",4:"༤",5:"༥",6:"༦",7:"༧",8:"༨",9:"༩",0:"༠"},n={"༡":"1","༢":"2","༣":"3","༤":"4","༥":"5","༦":"6","༧":"7","༨":"8","༩":"9","༠":"0"};return e.defineLocale("bo",{months:"ཟླ་བ་དང་པོ_ཟླ་བ་གཉིས་པ_ཟླ་བ་གསུམ་པ_ཟླ་བ་བཞི་པ_ཟླ་བ་ལྔ་པ_ཟླ་བ་དྲུག་པ_ཟླ་བ་བདུན་པ_ཟླ་བ་བརྒྱད་པ_ཟླ་བ་དགུ་པ_ཟླ་བ་བཅུ་པ_ཟླ་བ་བཅུ་གཅིག་པ_ཟླ་བ་བཅུ་གཉིས་པ".split("_"),monthsShort:"ཟླ་1_ཟླ་2_ཟླ་3_ཟླ་4_ཟླ་5_ཟླ་6_ཟླ་7_ཟླ་8_ཟླ་9_ཟླ་10_ཟླ་11_ཟླ་12".split("_"),monthsShortRegex:/^(ཟླ་\d{1,2})/,monthsParseExact:!0,weekdays:"གཟའ་ཉི་མ་_གཟའ་ཟླ་བ་_གཟའ་མིག་དམར་_གཟའ་ལྷག་པ་_གཟའ་ཕུར་བུ_གཟའ་པ་སངས་_གཟའ་སྤེན་པ་".split("_"),weekdaysShort:"ཉི་མ་_ཟླ་བ་_མིག་དམར་_ལྷག་པ་_ཕུར་བུ_པ་སངས་_སྤེན་པ་".split("_"),weekdaysMin:"ཉི_ཟླ_མིག_ལྷག_ཕུར_སངས_སྤེན".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[དི་རིང] LT",nextDay:"[སང་ཉིན] LT",nextWeek:"[བདུན་ཕྲག་རྗེས་མ], LT",lastDay:"[ཁ་སང] LT",lastWeek:"[བདུན་ཕྲག་མཐའ་མ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s ལ་",past:"%s སྔན་ལ",s:"ལམ་སང",ss:"%d སྐར་ཆ།",m:"སྐར་མ་གཅིག",mm:"%d སྐར་མ",h:"ཆུ་ཚོད་གཅིག",hh:"%d ཆུ་ཚོད",d:"ཉིན་གཅིག",dd:"%d ཉིན་",M:"ཟླ་བ་གཅིག",MM:"%d ཟླ་བ",y:"ལོ་གཅིག",yy:"%d ལོ"},preparse:function(e){return e.replace(/[༡༢༣༤༥༦༧༨༩༠]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/མཚན་མོ|ཞོགས་ཀས|ཉིན་གུང|དགོང་དག|མཚན་མོ/,meridiemHour:function(e,t){return(12===e&&(e=0),"མཚན་མོ"===t&&e>=4||"ཉིན་གུང"===t&&e<5||"དགོང་དག"===t)?e+12:e},meridiem:function(e,t,n){return e<4?"མཚན་མོ":e<10?"ཞོགས་ཀས":e<17?"ཉིན་གུང":e<20?"དགོང་དག":"མཚན་མོ"},week:{dow:0,doy:6}})})(n(30381))},1278:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n){return e+" "+i({mm:"munutenn",MM:"miz",dd:"devezh"}[n],e)}function n(e){switch(r(e)){case 1:case 3:case 4:case 5:case 9:return e+" bloaz";default:return e+" vloaz"}}function r(e){return e>9?r(e%10):e}function i(e,t){return 2===t?a(e):e}function a(e){var t={m:"v",b:"v",d:"z"};return void 0===t[e.charAt(0)]?e:t[e.charAt(0)]+e.substring(1)}var o=[/^gen/i,/^c[ʼ\']hwe/i,/^meu/i,/^ebr/i,/^mae/i,/^(mez|eve)/i,/^gou/i,/^eos/i,/^gwe/i,/^her/i,/^du/i,/^ker/i,],s=/^(genver|c[ʼ\']hwevrer|meurzh|ebrel|mae|mezheven|gouere|eost|gwengolo|here|du|kerzu|gen|c[ʼ\']hwe|meu|ebr|mae|eve|gou|eos|gwe|her|du|ker)/i,u=/^(genver|c[ʼ\']hwevrer|meurzh|ebrel|mae|mezheven|gouere|eost|gwengolo|here|du|kerzu)/i,c=/^(gen|c[ʼ\']hwe|meu|ebr|mae|eve|gou|eos|gwe|her|du|ker)/i,l=[/^sul/i,/^lun/i,/^meurzh/i,/^merc[ʼ\']her/i,/^yaou/i,/^gwener/i,/^sadorn/i,],f=[/^Sul/i,/^Lun/i,/^Meu/i,/^Mer/i,/^Yao/i,/^Gwe/i,/^Sad/i,],d=[/^Su/i,/^Lu/i,/^Me([^r]|$)/i,/^Mer/i,/^Ya/i,/^Gw/i,/^Sa/i,];return e.defineLocale("br",{months:"Genver_Cʼhwevrer_Meurzh_Ebrel_Mae_Mezheven_Gouere_Eost_Gwengolo_Here_Du_Kerzu".split("_"),monthsShort:"Gen_Cʼhwe_Meu_Ebr_Mae_Eve_Gou_Eos_Gwe_Her_Du_Ker".split("_"),weekdays:"Sul_Lun_Meurzh_Mercʼher_Yaou_Gwener_Sadorn".split("_"),weekdaysShort:"Sul_Lun_Meu_Mer_Yao_Gwe_Sad".split("_"),weekdaysMin:"Su_Lu_Me_Mer_Ya_Gw_Sa".split("_"),weekdaysParse:d,fullWeekdaysParse:l,shortWeekdaysParse:f,minWeekdaysParse:d,monthsRegex:s,monthsShortRegex:s,monthsStrictRegex:u,monthsShortStrictRegex:c,monthsParse:o,longMonthsParse:o,shortMonthsParse:o,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [a viz] MMMM YYYY",LLL:"D [a viz] MMMM YYYY HH:mm",LLLL:"dddd, D [a viz] MMMM YYYY HH:mm"},calendar:{sameDay:"[Hiziv da] LT",nextDay:"[Warcʼhoazh da] LT",nextWeek:"dddd [da] LT",lastDay:"[Decʼh da] LT",lastWeek:"dddd [paset da] LT",sameElse:"L"},relativeTime:{future:"a-benn %s",past:"%s ʼzo",s:"un nebeud segondenno\xf9",ss:"%d eilenn",m:"ur vunutenn",mm:t,h:"un eur",hh:"%d eur",d:"un devezh",dd:t,M:"ur miz",MM:t,y:"ur bloaz",yy:n},dayOfMonthOrdinalParse:/\d{1,2}(añ|vet)/,ordinal:function(e){var t=1===e?"a\xf1":"vet";return e+t},week:{dow:1,doy:4},meridiemParse:/a.m.|g.m./,isPM:function(e){return"g.m."===e},meridiem:function(e,t,n){return e<12?"a.m.":"g.m."}})})(n(30381))},80622:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n){var r=e+" ";switch(n){case"ss":return 1===e?r+="sekunda":2===e||3===e||4===e?r+="sekunde":r+="sekundi",r;case"m":return t?"jedna minuta":"jedne minute";case"mm":return 1===e?r+="minuta":2===e||3===e||4===e?r+="minute":r+="minuta",r;case"h":return t?"jedan sat":"jednog sata";case"hh":return 1===e?r+="sat":2===e||3===e||4===e?r+="sata":r+="sati",r;case"dd":return 1===e?r+="dan":r+="dana",r;case"MM":return 1===e?r+="mjesec":2===e||3===e||4===e?r+="mjeseca":r+="mjeseci",r;case"yy":return 1===e?r+="godina":2===e||3===e||4===e?r+="godine":r+="godina",r}}return e.defineLocale("bs",{months:"januar_februar_mart_april_maj_juni_juli_august_septembar_oktobar_novembar_decembar".split("_"),monthsShort:"jan._feb._mar._apr._maj._jun._jul._aug._sep._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[jučer u] LT",lastWeek:function(){switch(this.day()){case 0:case 3:return"[prošlu] dddd [u] LT";case 6:return"[prošle] [subote] [u] LT";case 1:case 2:case 4:case 5:return"[prošli] dddd [u] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"par sekundi",ss:t,m:t,mm:t,h:t,hh:t,d:"dan",dd:t,M:"mjesec",MM:t,y:"godinu",yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},2468:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ca",{months:{standalone:"gener_febrer_mar\xe7_abril_maig_juny_juliol_agost_setembre_octubre_novembre_desembre".split("_"),format:"de gener_de febrer_de mar\xe7_d'abril_de maig_de juny_de juliol_d'agost_de setembre_d'octubre_de novembre_de desembre".split("_"),isFormat:/D[oD]?(\s)+MMMM/},monthsShort:"gen._febr._mar\xe7_abr._maig_juny_jul._ag._set._oct._nov._des.".split("_"),monthsParseExact:!0,weekdays:"diumenge_dilluns_dimarts_dimecres_dijous_divendres_dissabte".split("_"),weekdaysShort:"dg._dl._dt._dc._dj._dv._ds.".split("_"),weekdaysMin:"dg_dl_dt_dc_dj_dv_ds".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM [de] YYYY",ll:"D MMM YYYY",LLL:"D MMMM [de] YYYY [a les] H:mm",lll:"D MMM YYYY, H:mm",LLLL:"dddd D MMMM [de] YYYY [a les] H:mm",llll:"ddd D MMM YYYY, H:mm"},calendar:{sameDay:function(){return"[avui a "+(1!==this.hours()?"les":"la")+"] LT"},nextDay:function(){return"[dem\xe0 a "+(1!==this.hours()?"les":"la")+"] LT"},nextWeek:function(){return"dddd [a "+(1!==this.hours()?"les":"la")+"] LT"},lastDay:function(){return"[ahir a "+(1!==this.hours()?"les":"la")+"] LT"},lastWeek:function(){return"[el] dddd [passat a "+(1!==this.hours()?"les":"la")+"] LT"},sameElse:"L"},relativeTime:{future:"d'aqu\xed %s",past:"fa %s",s:"uns segons",ss:"%d segons",m:"un minut",mm:"%d minuts",h:"una hora",hh:"%d hores",d:"un dia",dd:"%d dies",M:"un mes",MM:"%d mesos",y:"un any",yy:"%d anys"},dayOfMonthOrdinalParse:/\d{1,2}(r|n|t|è|a)/,ordinal:function(e,t){var n=1===e?"r":2===e?"n":3===e?"r":4===e?"t":"\xe8";return("w"===t||"W"===t)&&(n="a"),e+n},week:{dow:1,doy:4}})})(n(30381))},5822:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="leden_\xfanor_březen_duben_květen_červen_červenec_srpen_z\xe1ř\xed_ř\xedjen_listopad_prosinec".split("_"),n="led_\xfano_bře_dub_kvě_čvn_čvc_srp_z\xe1ř_ř\xedj_lis_pro".split("_"),r=[/^led/i,/^úno/i,/^bře/i,/^dub/i,/^kvě/i,/^(čvn|červen$|června)/i,/^(čvc|červenec|července)/i,/^srp/i,/^zář/i,/^říj/i,/^lis/i,/^pro/i,],i=/^(leden|únor|březen|duben|květen|červenec|července|červen|června|srpen|září|říjen|listopad|prosinec|led|úno|bře|dub|kvě|čvn|čvc|srp|zář|říj|lis|pro)/i;function a(e){return e>1&&e<5&&1!=~~(e/10)}function o(e,t,n,r){var i=e+" ";switch(n){case"s":return t||r?"p\xe1r sekund":"p\xe1r sekundami";case"ss":if(t||r)return i+(a(e)?"sekundy":"sekund");return i+"sekundami";case"m":return t?"minuta":r?"minutu":"minutou";case"mm":if(t||r)return i+(a(e)?"minuty":"minut");return i+"minutami";case"h":return t?"hodina":r?"hodinu":"hodinou";case"hh":if(t||r)return i+(a(e)?"hodiny":"hodin");return i+"hodinami";case"d":return t||r?"den":"dnem";case"dd":if(t||r)return i+(a(e)?"dny":"dn\xed");return i+"dny";case"M":return t||r?"měs\xedc":"měs\xedcem";case"MM":if(t||r)return i+(a(e)?"měs\xedce":"měs\xedců");return i+"měs\xedci";case"y":return t||r?"rok":"rokem";case"yy":if(t||r)return i+(a(e)?"roky":"let");return i+"lety"}}return e.defineLocale("cs",{months:t,monthsShort:n,monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(leden|ledna|února|únor|březen|března|duben|dubna|květen|května|červenec|července|červen|června|srpen|srpna|září|říjen|října|listopadu|listopad|prosinec|prosince)/i,monthsShortStrictRegex:/^(led|úno|bře|dub|kvě|čvn|čvc|srp|zář|říj|lis|pro)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"neděle_ponděl\xed_\xfater\xfd_středa_čtvrtek_p\xe1tek_sobota".split("_"),weekdaysShort:"ne_po_\xfat_st_čt_p\xe1_so".split("_"),weekdaysMin:"ne_po_\xfat_st_čt_p\xe1_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm",l:"D. M. YYYY"},calendar:{sameDay:"[dnes v] LT",nextDay:"[z\xedtra v] LT",nextWeek:function(){switch(this.day()){case 0:return"[v neděli v] LT";case 1:case 2:return"[v] dddd [v] LT";case 3:return"[ve středu v] LT";case 4:return"[ve čtvrtek v] LT";case 5:return"[v p\xe1tek v] LT";case 6:return"[v sobotu v] LT"}},lastDay:"[včera v] LT",lastWeek:function(){switch(this.day()){case 0:return"[minulou neděli v] LT";case 1:case 2:return"[minul\xe9] dddd [v] LT";case 3:return"[minulou středu v] LT";case 4:case 5:return"[minul\xfd] dddd [v] LT";case 6:return"[minulou sobotu v] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"před %s",s:o,ss:o,m:o,mm:o,h:o,hh:o,d:o,dd:o,M:o,MM:o,y:o,yy:o},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},50877:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("cv",{months:"кӑрлач_нарӑс_пуш_ака_май_ҫӗртме_утӑ_ҫурла_авӑн_юпа_чӳк_раштав".split("_"),monthsShort:"кӑр_нар_пуш_ака_май_ҫӗр_утӑ_ҫур_авн_юпа_чӳк_раш".split("_"),weekdays:"вырсарникун_тунтикун_ытларикун_юнкун_кӗҫнерникун_эрнекун_шӑматкун".split("_"),weekdaysShort:"выр_тун_ытл_юн_кӗҫ_эрн_шӑм".split("_"),weekdaysMin:"вр_тн_ыт_юн_кҫ_эр_шм".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ]",LLL:"YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ], HH:mm",LLLL:"dddd, YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ], HH:mm"},calendar:{sameDay:"[Паян] LT [сехетре]",nextDay:"[Ыран] LT [сехетре]",lastDay:"[Ӗнер] LT [сехетре]",nextWeek:"[Ҫитес] dddd LT [сехетре]",lastWeek:"[Иртнӗ] dddd LT [сехетре]",sameElse:"L"},relativeTime:{future:function(e){var t=/сехет$/i.exec(e)?"рен":/ҫул$/i.exec(e)?"тан":"ран";return e+t},past:"%s каялла",s:"пӗр-ик ҫеккунт",ss:"%d ҫеккунт",m:"пӗр минут",mm:"%d минут",h:"пӗр сехет",hh:"%d сехет",d:"пӗр кун",dd:"%d кун",M:"пӗр уйӑх",MM:"%d уйӑх",y:"пӗр ҫул",yy:"%d ҫул"},dayOfMonthOrdinalParse:/\d{1,2}-мӗш/,ordinal:"%d-мӗш",week:{dow:1,doy:7}})})(n(30381))},47373:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("cy",{months:"Ionawr_Chwefror_Mawrth_Ebrill_Mai_Mehefin_Gorffennaf_Awst_Medi_Hydref_Tachwedd_Rhagfyr".split("_"),monthsShort:"Ion_Chwe_Maw_Ebr_Mai_Meh_Gor_Aws_Med_Hyd_Tach_Rhag".split("_"),weekdays:"Dydd Sul_Dydd Llun_Dydd Mawrth_Dydd Mercher_Dydd Iau_Dydd Gwener_Dydd Sadwrn".split("_"),weekdaysShort:"Sul_Llun_Maw_Mer_Iau_Gwe_Sad".split("_"),weekdaysMin:"Su_Ll_Ma_Me_Ia_Gw_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Heddiw am] LT",nextDay:"[Yfory am] LT",nextWeek:"dddd [am] LT",lastDay:"[Ddoe am] LT",lastWeek:"dddd [diwethaf am] LT",sameElse:"L"},relativeTime:{future:"mewn %s",past:"%s yn \xf4l",s:"ychydig eiliadau",ss:"%d eiliad",m:"munud",mm:"%d munud",h:"awr",hh:"%d awr",d:"diwrnod",dd:"%d diwrnod",M:"mis",MM:"%d mis",y:"blwyddyn",yy:"%d flynedd"},dayOfMonthOrdinalParse:/\d{1,2}(fed|ain|af|il|ydd|ed|eg)/,ordinal:function(e){var t=e,n="",r=["","af","il","ydd","ydd","ed","ed","ed","fed","fed","fed","eg","fed","eg","eg","fed","eg","eg","fed","eg","fed"];return t>20?n=40===t||50===t||60===t||80===t||100===t?"fed":"ain":t>0&&(n=r[t]),e+n},week:{dow:1,doy:4}})})(n(30381))},24780:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("da",{months:"januar_februar_marts_april_maj_juni_juli_august_september_oktober_november_december".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aug_sep_okt_nov_dec".split("_"),weekdays:"s\xf8ndag_mandag_tirsdag_onsdag_torsdag_fredag_l\xf8rdag".split("_"),weekdaysShort:"s\xf8n_man_tir_ons_tor_fre_l\xf8r".split("_"),weekdaysMin:"s\xf8_ma_ti_on_to_fr_l\xf8".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd [d.] D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[i dag kl.] LT",nextDay:"[i morgen kl.] LT",nextWeek:"p\xe5 dddd [kl.] LT",lastDay:"[i g\xe5r kl.] LT",lastWeek:"[i] dddd[s kl.] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"%s siden",s:"f\xe5 sekunder",ss:"%d sekunder",m:"et minut",mm:"%d minutter",h:"en time",hh:"%d timer",d:"en dag",dd:"%d dage",M:"en m\xe5ned",MM:"%d m\xe5neder",y:"et \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},60217:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={m:["eine Minute","einer Minute"],h:["eine Stunde","einer Stunde"],d:["ein Tag","einem Tag"],dd:[e+" Tage",e+" Tagen"],w:["eine Woche","einer Woche"],M:["ein Monat","einem Monat"],MM:[e+" Monate",e+" Monaten"],y:["ein Jahr","einem Jahr"],yy:[e+" Jahre",e+" Jahren"]};return t?i[n][0]:i[n][1]}return e.defineLocale("de-at",{months:"J\xe4nner_Februar_M\xe4rz_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"J\xe4n._Feb._M\xe4rz_Apr._Mai_Juni_Juli_Aug._Sep._Okt._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag".split("_"),weekdaysShort:"So._Mo._Di._Mi._Do._Fr._Sa.".split("_"),weekdaysMin:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd, D. MMMM YYYY HH:mm"},calendar:{sameDay:"[heute um] LT [Uhr]",sameElse:"L",nextDay:"[morgen um] LT [Uhr]",nextWeek:"dddd [um] LT [Uhr]",lastDay:"[gestern um] LT [Uhr]",lastWeek:"[letzten] dddd [um] LT [Uhr]"},relativeTime:{future:"in %s",past:"vor %s",s:"ein paar Sekunden",ss:"%d Sekunden",m:t,mm:"%d Minuten",h:t,hh:"%d Stunden",d:t,dd:t,w:t,ww:"%d Wochen",M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},60894:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={m:["eine Minute","einer Minute"],h:["eine Stunde","einer Stunde"],d:["ein Tag","einem Tag"],dd:[e+" Tage",e+" Tagen"],w:["eine Woche","einer Woche"],M:["ein Monat","einem Monat"],MM:[e+" Monate",e+" Monaten"],y:["ein Jahr","einem Jahr"],yy:[e+" Jahre",e+" Jahren"]};return t?i[n][0]:i[n][1]}return e.defineLocale("de-ch",{months:"Januar_Februar_M\xe4rz_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Feb._M\xe4rz_Apr._Mai_Juni_Juli_Aug._Sep._Okt._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag".split("_"),weekdaysShort:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),weekdaysMin:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd, D. MMMM YYYY HH:mm"},calendar:{sameDay:"[heute um] LT [Uhr]",sameElse:"L",nextDay:"[morgen um] LT [Uhr]",nextWeek:"dddd [um] LT [Uhr]",lastDay:"[gestern um] LT [Uhr]",lastWeek:"[letzten] dddd [um] LT [Uhr]"},relativeTime:{future:"in %s",past:"vor %s",s:"ein paar Sekunden",ss:"%d Sekunden",m:t,mm:"%d Minuten",h:t,hh:"%d Stunden",d:t,dd:t,w:t,ww:"%d Wochen",M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},59740:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={m:["eine Minute","einer Minute"],h:["eine Stunde","einer Stunde"],d:["ein Tag","einem Tag"],dd:[e+" Tage",e+" Tagen"],w:["eine Woche","einer Woche"],M:["ein Monat","einem Monat"],MM:[e+" Monate",e+" Monaten"],y:["ein Jahr","einem Jahr"],yy:[e+" Jahre",e+" Jahren"]};return t?i[n][0]:i[n][1]}return e.defineLocale("de",{months:"Januar_Februar_M\xe4rz_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Feb._M\xe4rz_Apr._Mai_Juni_Juli_Aug._Sep._Okt._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag".split("_"),weekdaysShort:"So._Mo._Di._Mi._Do._Fr._Sa.".split("_"),weekdaysMin:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd, D. MMMM YYYY HH:mm"},calendar:{sameDay:"[heute um] LT [Uhr]",sameElse:"L",nextDay:"[morgen um] LT [Uhr]",nextWeek:"dddd [um] LT [Uhr]",lastDay:"[gestern um] LT [Uhr]",lastWeek:"[letzten] dddd [um] LT [Uhr]"},relativeTime:{future:"in %s",past:"vor %s",s:"ein paar Sekunden",ss:"%d Sekunden",m:t,mm:"%d Minuten",h:t,hh:"%d Stunden",d:t,dd:t,w:t,ww:"%d Wochen",M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},5300:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=["ޖެނުއަރީ","ފެބްރުއަރީ","މާރިޗު","އޭޕްރީލު","މޭ","ޖޫން","ޖުލައި","އޯގަސްޓު","ސެޕްޓެމްބަރު","އޮކްޓޯބަރު","ނޮވެމްބަރު","ޑިސެމްބަރު",],n=["އާދިއްތަ","ހޯމަ","އަންގާރަ","ބުދަ","ބުރާސްފަތި","ހުކުރު","ހޮނިހިރު",];return e.defineLocale("dv",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:"އާދި_ހޯމަ_އަން_ބުދަ_ބުރާ_ހުކު_ހޮނި".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/M/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/މކ|މފ/,isPM:function(e){return"މފ"===e},meridiem:function(e,t,n){return e<12?"މކ":"މފ"},calendar:{sameDay:"[މިއަދު] LT",nextDay:"[މާދަމާ] LT",nextWeek:"dddd LT",lastDay:"[އިއްޔެ] LT",lastWeek:"[ފާއިތުވި] dddd LT",sameElse:"L"},relativeTime:{future:"ތެރޭގައި %s",past:"ކުރިން %s",s:"ސިކުންތުކޮޅެއް",ss:"d% ސިކުންތު",m:"މިނިޓެއް",mm:"މިނިޓު %d",h:"ގަޑިއިރެއް",hh:"ގަޑިއިރު %d",d:"ދުވަހެއް",dd:"ދުވަސް %d",M:"މަހެއް",MM:"މަސް %d",y:"އަހަރެއް",yy:"އަހަރު %d"},preparse:function(e){return e.replace(/،/g,",")},postformat:function(e){return e.replace(/,/g,"،")},week:{dow:7,doy:12}})})(n(30381))},50837:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e){return"undefined"!=typeof Function&&e instanceof Function||"[object Function]"===Object.prototype.toString.call(e)}return e.defineLocale("el",{monthsNominativeEl:"Ιανουάριος_Φεβρουάριος_Μάρτιος_Απρίλιος_Μάιος_Ιούνιος_Ιούλιος_Αύγουστος_Σεπτέμβριος_Οκτώβριος_Νοέμβριος_Δεκέμβριος".split("_"),monthsGenitiveEl:"Ιανουαρίου_Φεβρουαρίου_Μαρτίου_Απριλίου_Μαΐου_Ιουνίου_Ιουλίου_Αυγούστου_Σεπτεμβρίου_Οκτωβρίου_Νοεμβρίου_Δεκεμβρίου".split("_"),months:function(e,t){return e?"string"==typeof t&&/D/.test(t.substring(0,t.indexOf("MMMM")))?this._monthsGenitiveEl[e.month()]:this._monthsNominativeEl[e.month()]:this._monthsNominativeEl},monthsShort:"Ιαν_Φεβ_Μαρ_Απρ_Μαϊ_Ιουν_Ιουλ_Αυγ_Σεπ_Οκτ_Νοε_Δεκ".split("_"),weekdays:"Κυριακή_Δευτέρα_Τρίτη_Τετάρτη_Πέμπτη_Παρασκευή_Σάββατο".split("_"),weekdaysShort:"Κυρ_Δευ_Τρι_Τετ_Πεμ_Παρ_Σαβ".split("_"),weekdaysMin:"Κυ_Δε_Τρ_Τε_Πε_Πα_Σα".split("_"),meridiem:function(e,t,n){return e>11?n?"μμ":"ΜΜ":n?"πμ":"ΠΜ"},isPM:function(e){return"μ"===(e+"").toLowerCase()[0]},meridiemParse:/[ΠΜ]\.?Μ?\.?/i,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendarEl:{sameDay:"[Σήμερα {}] LT",nextDay:"[Αύριο {}] LT",nextWeek:"dddd [{}] LT",lastDay:"[Χθες {}] LT",lastWeek:function(){return 6===this.day()?"[το προηγούμενο] dddd [{}] LT":"[την προηγούμενη] dddd [{}] LT"},sameElse:"L"},calendar:function(e,n){var r=this._calendarEl[e],i=n&&n.hours();return t(r)&&(r=r.apply(n)),r.replace("{}",i%12==1?"στη":"στις")},relativeTime:{future:"σε %s",past:"%s πριν",s:"λίγα δευτερόλεπτα",ss:"%d δευτερόλεπτα",m:"ένα λεπτό",mm:"%d λεπτά",h:"μία ώρα",hh:"%d ώρες",d:"μία μέρα",dd:"%d μέρες",M:"ένας μήνας",MM:"%d μήνες",y:"ένας χρόνος",yy:"%d χρόνια"},dayOfMonthOrdinalParse:/\d{1,2}η/,ordinal:"%dη",week:{dow:1,doy:4}})})(n(30381))},78348:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-au",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:0,doy:4}})})(n(30381))},77925:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-ca",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"YYYY-MM-DD",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n}})})(n(30381))},22243:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-gb",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},46436:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-ie",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},47207:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-il",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n}})})(n(30381))},44175:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-in",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:0,doy:6}})})(n(30381))},76319:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-nz",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},31662:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("en-sg",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},92915:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("eo",{months:"januaro_februaro_marto_aprilo_majo_junio_julio_aŭgusto_septembro_oktobro_novembro_decembro".split("_"),monthsShort:"jan_feb_mart_apr_maj_jun_jul_aŭg_sept_okt_nov_dec".split("_"),weekdays:"dimanĉo_lundo_mardo_merkredo_ĵaŭdo_vendredo_sabato".split("_"),weekdaysShort:"dim_lun_mard_merk_ĵaŭ_ven_sab".split("_"),weekdaysMin:"di_lu_ma_me_ĵa_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"[la] D[-an de] MMMM, YYYY",LLL:"[la] D[-an de] MMMM, YYYY HH:mm",LLLL:"dddd[n], [la] D[-an de] MMMM, YYYY HH:mm",llll:"ddd, [la] D[-an de] MMM, YYYY HH:mm"},meridiemParse:/[ap]\.t\.m/i,isPM:function(e){return"p"===e.charAt(0).toLowerCase()},meridiem:function(e,t,n){return e>11?n?"p.t.m.":"P.T.M.":n?"a.t.m.":"A.T.M."},calendar:{sameDay:"[Hodiaŭ je] LT",nextDay:"[Morgaŭ je] LT",nextWeek:"dddd[n je] LT",lastDay:"[Hieraŭ je] LT",lastWeek:"[pasintan] dddd[n je] LT",sameElse:"L"},relativeTime:{future:"post %s",past:"antaŭ %s",s:"kelkaj sekundoj",ss:"%d sekundoj",m:"unu minuto",mm:"%d minutoj",h:"unu horo",hh:"%d horoj",d:"unu tago",dd:"%d tagoj",M:"unu monato",MM:"%d monatoj",y:"unu jaro",yy:"%d jaroj"},dayOfMonthOrdinalParse:/\d{1,2}a/,ordinal:"%da",week:{dow:1,doy:7}})})(n(30381))},55251:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),n="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),r=[/^ene/i,/^feb/i,/^mar/i,/^abr/i,/^may/i,/^jun/i,/^jul/i,/^ago/i,/^sep/i,/^oct/i,/^nov/i,/^dic/i,],i=/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre|ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i;return e.defineLocale("es-do",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)/i,monthsShortStrictRegex:/^(ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"domingo_lunes_martes_mi\xe9rcoles_jueves_viernes_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._mi\xe9._jue._vie._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY h:mm A",LLLL:"dddd, D [de] MMMM [de] YYYY h:mm A"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[ma\xf1ana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",w:"una semana",ww:"%d semanas",M:"un mes",MM:"%d meses",y:"un a\xf1o",yy:"%d a\xf1os"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},96112:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),n="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),r=[/^ene/i,/^feb/i,/^mar/i,/^abr/i,/^may/i,/^jun/i,/^jul/i,/^ago/i,/^sep/i,/^oct/i,/^nov/i,/^dic/i,],i=/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre|ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i;return e.defineLocale("es-mx",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)/i,monthsShortStrictRegex:/^(ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"domingo_lunes_martes_mi\xe9rcoles_jueves_viernes_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._mi\xe9._jue._vie._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY H:mm",LLLL:"dddd, D [de] MMMM [de] YYYY H:mm"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[ma\xf1ana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",w:"una semana",ww:"%d semanas",M:"un mes",MM:"%d meses",y:"un a\xf1o",yy:"%d a\xf1os"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:0,doy:4},invalidDate:"Fecha inv\xe1lida"})})(n(30381))},71146:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),n="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),r=[/^ene/i,/^feb/i,/^mar/i,/^abr/i,/^may/i,/^jun/i,/^jul/i,/^ago/i,/^sep/i,/^oct/i,/^nov/i,/^dic/i,],i=/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre|ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i;return e.defineLocale("es-us",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)/i,monthsShortStrictRegex:/^(ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"domingo_lunes_martes_mi\xe9rcoles_jueves_viernes_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._mi\xe9._jue._vie._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"MM/DD/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY h:mm A",LLLL:"dddd, D [de] MMMM [de] YYYY h:mm A"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[ma\xf1ana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",w:"una semana",ww:"%d semanas",M:"un mes",MM:"%d meses",y:"un a\xf1o",yy:"%d a\xf1os"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:0,doy:6}})})(n(30381))},55655:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_"),n="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),r=[/^ene/i,/^feb/i,/^mar/i,/^abr/i,/^may/i,/^jun/i,/^jul/i,/^ago/i,/^sep/i,/^oct/i,/^nov/i,/^dic/i,],i=/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre|ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i;return e.defineLocale("es",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(enero|febrero|marzo|abril|mayo|junio|julio|agosto|septiembre|octubre|noviembre|diciembre)/i,monthsShortStrictRegex:/^(ene\.?|feb\.?|mar\.?|abr\.?|may\.?|jun\.?|jul\.?|ago\.?|sep\.?|oct\.?|nov\.?|dic\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"domingo_lunes_martes_mi\xe9rcoles_jueves_viernes_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._mi\xe9._jue._vie._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY H:mm",LLLL:"dddd, D [de] MMMM [de] YYYY H:mm"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[ma\xf1ana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",w:"una semana",ww:"%d semanas",M:"un mes",MM:"%d meses",y:"un a\xf1o",yy:"%d a\xf1os"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4},invalidDate:"Fecha inv\xe1lida"})})(n(30381))},5603:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={s:["m\xf5ne sekundi","m\xf5ni sekund","paar sekundit"],ss:[e+"sekundi",e+"sekundit"],m:["\xfche minuti","\xfcks minut"],mm:[e+" minuti",e+" minutit"],h:["\xfche tunni","tund aega","\xfcks tund"],hh:[e+" tunni",e+" tundi"],d:["\xfche p\xe4eva","\xfcks p\xe4ev"],M:["kuu aja","kuu aega","\xfcks kuu"],MM:[e+" kuu",e+" kuud"],y:["\xfche aasta","aasta","\xfcks aasta"],yy:[e+" aasta",e+" aastat"]};return t?i[n][2]?i[n][2]:i[n][1]:r?i[n][0]:i[n][1]}return e.defineLocale("et",{months:"jaanuar_veebruar_m\xe4rts_aprill_mai_juuni_juuli_august_september_oktoober_november_detsember".split("_"),monthsShort:"jaan_veebr_m\xe4rts_apr_mai_juuni_juuli_aug_sept_okt_nov_dets".split("_"),weekdays:"p\xfchap\xe4ev_esmasp\xe4ev_teisip\xe4ev_kolmap\xe4ev_neljap\xe4ev_reede_laup\xe4ev".split("_"),weekdaysShort:"P_E_T_K_N_R_L".split("_"),weekdaysMin:"P_E_T_K_N_R_L".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[T\xe4na,] LT",nextDay:"[Homme,] LT",nextWeek:"[J\xe4rgmine] dddd LT",lastDay:"[Eile,] LT",lastWeek:"[Eelmine] dddd LT",sameElse:"L"},relativeTime:{future:"%s p\xe4rast",past:"%s tagasi",s:t,ss:t,m:t,mm:t,h:t,hh:t,d:t,dd:"%d p\xe4eva",M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},77763:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("eu",{months:"urtarrila_otsaila_martxoa_apirila_maiatza_ekaina_uztaila_abuztua_iraila_urria_azaroa_abendua".split("_"),monthsShort:"urt._ots._mar._api._mai._eka._uzt._abu._ira._urr._aza._abe.".split("_"),monthsParseExact:!0,weekdays:"igandea_astelehena_asteartea_asteazkena_osteguna_ostirala_larunbata".split("_"),weekdaysShort:"ig._al._ar._az._og._ol._lr.".split("_"),weekdaysMin:"ig_al_ar_az_og_ol_lr".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY[ko] MMMM[ren] D[a]",LLL:"YYYY[ko] MMMM[ren] D[a] HH:mm",LLLL:"dddd, YYYY[ko] MMMM[ren] D[a] HH:mm",l:"YYYY-M-D",ll:"YYYY[ko] MMM D[a]",lll:"YYYY[ko] MMM D[a] HH:mm",llll:"ddd, YYYY[ko] MMM D[a] HH:mm"},calendar:{sameDay:"[gaur] LT[etan]",nextDay:"[bihar] LT[etan]",nextWeek:"dddd LT[etan]",lastDay:"[atzo] LT[etan]",lastWeek:"[aurreko] dddd LT[etan]",sameElse:"L"},relativeTime:{future:"%s barru",past:"duela %s",s:"segundo batzuk",ss:"%d segundo",m:"minutu bat",mm:"%d minutu",h:"ordu bat",hh:"%d ordu",d:"egun bat",dd:"%d egun",M:"hilabete bat",MM:"%d hilabete",y:"urte bat",yy:"%d urte"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},76959:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"۱",2:"۲",3:"۳",4:"۴",5:"۵",6:"۶",7:"۷",8:"۸",9:"۹",0:"۰"},n={"۱":"1","۲":"2","۳":"3","۴":"4","۵":"5","۶":"6","۷":"7","۸":"8","۹":"9","۰":"0"};return e.defineLocale("fa",{months:"ژانویه_فوریه_مارس_آوریل_مه_ژوئن_ژوئیه_اوت_سپتامبر_اکتبر_نوامبر_دسامبر".split("_"),monthsShort:"ژانویه_فوریه_مارس_آوریل_مه_ژوئن_ژوئیه_اوت_سپتامبر_اکتبر_نوامبر_دسامبر".split("_"),weekdays:"یک‌شنبه_دوشنبه_سه‌شنبه_چهارشنبه_پنج‌شنبه_جمعه_شنبه".split("_"),weekdaysShort:"یک‌شنبه_دوشنبه_سه‌شنبه_چهارشنبه_پنج‌شنبه_جمعه_شنبه".split("_"),weekdaysMin:"ی_د_س_چ_پ_ج_ش".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},meridiemParse:/قبل از ظهر|بعد از ظهر/,isPM:function(e){return/بعد از ظهر/.test(e)},meridiem:function(e,t,n){return e<12?"قبل از ظهر":"بعد از ظهر"},calendar:{sameDay:"[امروز ساعت] LT",nextDay:"[فردا ساعت] LT",nextWeek:"dddd [ساعت] LT",lastDay:"[دیروز ساعت] LT",lastWeek:"dddd [پیش] [ساعت] LT",sameElse:"L"},relativeTime:{future:"در %s",past:"%s پیش",s:"چند ثانیه",ss:"%d ثانیه",m:"یک دقیقه",mm:"%d دقیقه",h:"یک ساعت",hh:"%d ساعت",d:"یک روز",dd:"%d روز",M:"یک ماه",MM:"%d ماه",y:"یک سال",yy:"%d سال"},preparse:function(e){return e.replace(/[۰-۹]/g,function(e){return n[e]}).replace(/،/g,",")},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]}).replace(/,/g,"،")},dayOfMonthOrdinalParse:/\d{1,2}م/,ordinal:"%dم",week:{dow:6,doy:12}})})(n(30381))},11897:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="nolla yksi kaksi kolme nelj\xe4 viisi kuusi seitsem\xe4n kahdeksan yhdeks\xe4n".split(" "),n=["nolla","yhden","kahden","kolmen","nelj\xe4n","viiden","kuuden",t[7],t[8],t[9],];function r(e,t,n,r){var a="";switch(n){case"s":return r?"muutaman sekunnin":"muutama sekunti";case"ss":a=r?"sekunnin":"sekuntia";break;case"m":return r?"minuutin":"minuutti";case"mm":a=r?"minuutin":"minuuttia";break;case"h":return r?"tunnin":"tunti";case"hh":a=r?"tunnin":"tuntia";break;case"d":return r?"p\xe4iv\xe4n":"p\xe4iv\xe4";case"dd":a=r?"p\xe4iv\xe4n":"p\xe4iv\xe4\xe4";break;case"M":return r?"kuukauden":"kuukausi";case"MM":a=r?"kuukauden":"kuukautta";break;case"y":return r?"vuoden":"vuosi";case"yy":a=r?"vuoden":"vuotta"}return i(e,r)+" "+a}function i(e,r){return e<10?r?n[e]:t[e]:e}return e.defineLocale("fi",{months:"tammikuu_helmikuu_maaliskuu_huhtikuu_toukokuu_kes\xe4kuu_hein\xe4kuu_elokuu_syyskuu_lokakuu_marraskuu_joulukuu".split("_"),monthsShort:"tammi_helmi_maalis_huhti_touko_kes\xe4_hein\xe4_elo_syys_loka_marras_joulu".split("_"),weekdays:"sunnuntai_maanantai_tiistai_keskiviikko_torstai_perjantai_lauantai".split("_"),weekdaysShort:"su_ma_ti_ke_to_pe_la".split("_"),weekdaysMin:"su_ma_ti_ke_to_pe_la".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD.MM.YYYY",LL:"Do MMMM[ta] YYYY",LLL:"Do MMMM[ta] YYYY, [klo] HH.mm",LLLL:"dddd, Do MMMM[ta] YYYY, [klo] HH.mm",l:"D.M.YYYY",ll:"Do MMM YYYY",lll:"Do MMM YYYY, [klo] HH.mm",llll:"ddd, Do MMM YYYY, [klo] HH.mm"},calendar:{sameDay:"[t\xe4n\xe4\xe4n] [klo] LT",nextDay:"[huomenna] [klo] LT",nextWeek:"dddd [klo] LT",lastDay:"[eilen] [klo] LT",lastWeek:"[viime] dddd[na] [klo] LT",sameElse:"L"},relativeTime:{future:"%s p\xe4\xe4st\xe4",past:"%s sitten",s:r,ss:r,m:r,mm:r,h:r,hh:r,d:r,dd:r,M:r,MM:r,y:r,yy:r},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},42549:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("fil",{months:"Enero_Pebrero_Marso_Abril_Mayo_Hunyo_Hulyo_Agosto_Setyembre_Oktubre_Nobyembre_Disyembre".split("_"),monthsShort:"Ene_Peb_Mar_Abr_May_Hun_Hul_Ago_Set_Okt_Nob_Dis".split("_"),weekdays:"Linggo_Lunes_Martes_Miyerkules_Huwebes_Biyernes_Sabado".split("_"),weekdaysShort:"Lin_Lun_Mar_Miy_Huw_Biy_Sab".split("_"),weekdaysMin:"Li_Lu_Ma_Mi_Hu_Bi_Sab".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"MM/D/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY HH:mm",LLLL:"dddd, MMMM DD, YYYY HH:mm"},calendar:{sameDay:"LT [ngayong araw]",nextDay:"[Bukas ng] LT",nextWeek:"LT [sa susunod na] dddd",lastDay:"LT [kahapon]",lastWeek:"LT [noong nakaraang] dddd",sameElse:"L"},relativeTime:{future:"sa loob ng %s",past:"%s ang nakalipas",s:"ilang segundo",ss:"%d segundo",m:"isang minuto",mm:"%d minuto",h:"isang oras",hh:"%d oras",d:"isang araw",dd:"%d araw",M:"isang buwan",MM:"%d buwan",y:"isang taon",yy:"%d taon"},dayOfMonthOrdinalParse:/\d{1,2}/,ordinal:function(e){return e},week:{dow:1,doy:4}})})(n(30381))},94694:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("fo",{months:"januar_februar_mars_apr\xedl_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan_feb_mar_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_"),weekdays:"sunnudagur_m\xe1nadagur_t\xfdsdagur_mikudagur_h\xf3sdagur_fr\xedggjadagur_leygardagur".split("_"),weekdaysShort:"sun_m\xe1n_t\xfds_mik_h\xf3s_fr\xed_ley".split("_"),weekdaysMin:"su_m\xe1_t\xfd_mi_h\xf3_fr_le".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D. MMMM, YYYY HH:mm"},calendar:{sameDay:"[\xcd dag kl.] LT",nextDay:"[\xcd morgin kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[\xcd gj\xe1r kl.] LT",lastWeek:"[s\xed\xf0stu] dddd [kl] LT",sameElse:"L"},relativeTime:{future:"um %s",past:"%s s\xed\xf0ani",s:"f\xe1 sekund",ss:"%d sekundir",m:"ein minuttur",mm:"%d minuttir",h:"ein t\xedmi",hh:"%d t\xedmar",d:"ein dagur",dd:"%d dagar",M:"ein m\xe1na\xf0ur",MM:"%d m\xe1na\xf0ir",y:"eitt \xe1r",yy:"%d \xe1r"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},63049:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("fr-ca",{months:"janvier_f\xe9vrier_mars_avril_mai_juin_juillet_ao\xfbt_septembre_octobre_novembre_d\xe9cembre".split("_"),monthsShort:"janv._f\xe9vr._mars_avr._mai_juin_juil._ao\xfbt_sept._oct._nov._d\xe9c.".split("_"),monthsParseExact:!0,weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"di_lu_ma_me_je_ve_sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd’hui \xe0] LT",nextDay:"[Demain \xe0] LT",nextWeek:"dddd [\xe0] LT",lastDay:"[Hier \xe0] LT",lastWeek:"dddd [dernier \xe0] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",ss:"%d secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},dayOfMonthOrdinalParse:/\d{1,2}(er|e)/,ordinal:function(e,t){switch(t){default:case"M":case"Q":case"D":case"DDD":case"d":return e+(1===e?"er":"e");case"w":case"W":return e+(1===e?"re":"e")}}})})(n(30381))},52330:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("fr-ch",{months:"janvier_f\xe9vrier_mars_avril_mai_juin_juillet_ao\xfbt_septembre_octobre_novembre_d\xe9cembre".split("_"),monthsShort:"janv._f\xe9vr._mars_avr._mai_juin_juil._ao\xfbt_sept._oct._nov._d\xe9c.".split("_"),monthsParseExact:!0,weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"di_lu_ma_me_je_ve_sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd’hui \xe0] LT",nextDay:"[Demain \xe0] LT",nextWeek:"dddd [\xe0] LT",lastDay:"[Hier \xe0] LT",lastWeek:"dddd [dernier \xe0] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",ss:"%d secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},dayOfMonthOrdinalParse:/\d{1,2}(er|e)/,ordinal:function(e,t){switch(t){default:case"M":case"Q":case"D":case"DDD":case"d":return e+(1===e?"er":"e");case"w":case"W":return e+(1===e?"re":"e")}},week:{dow:1,doy:4}})})(n(30381))},94470:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=/^(janvier|février|mars|avril|mai|juin|juillet|août|septembre|octobre|novembre|décembre)/i,n=/(janv\.?|févr\.?|mars|avr\.?|mai|juin|juil\.?|août|sept\.?|oct\.?|nov\.?|déc\.?)/i,r=/(janv\.?|févr\.?|mars|avr\.?|mai|juin|juil\.?|août|sept\.?|oct\.?|nov\.?|déc\.?|janvier|février|mars|avril|mai|juin|juillet|août|septembre|octobre|novembre|décembre)/i,i=[/^janv/i,/^févr/i,/^mars/i,/^avr/i,/^mai/i,/^juin/i,/^juil/i,/^août/i,/^sept/i,/^oct/i,/^nov/i,/^déc/i,];return e.defineLocale("fr",{months:"janvier_f\xe9vrier_mars_avril_mai_juin_juillet_ao\xfbt_septembre_octobre_novembre_d\xe9cembre".split("_"),monthsShort:"janv._f\xe9vr._mars_avr._mai_juin_juil._ao\xfbt_sept._oct._nov._d\xe9c.".split("_"),monthsRegex:r,monthsShortRegex:r,monthsStrictRegex:t,monthsShortStrictRegex:n,monthsParse:i,longMonthsParse:i,shortMonthsParse:i,weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"di_lu_ma_me_je_ve_sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd’hui \xe0] LT",nextDay:"[Demain \xe0] LT",nextWeek:"dddd [\xe0] LT",lastDay:"[Hier \xe0] LT",lastWeek:"dddd [dernier \xe0] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",ss:"%d secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",w:"une semaine",ww:"%d semaines",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},dayOfMonthOrdinalParse:/\d{1,2}(er|)/,ordinal:function(e,t){switch(t){case"D":return e+(1===e?"er":"");default:case"M":case"Q":case"DDD":case"d":return e+(1===e?"er":"e");case"w":case"W":return e+(1===e?"re":"e")}},week:{dow:1,doy:4}})})(n(30381))},5044:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="jan._feb._mrt._apr._mai_jun._jul._aug._sep._okt._nov._des.".split("_"),n="jan_feb_mrt_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_");return e.defineLocale("fy",{months:"jannewaris_febrewaris_maart_april_maaie_juny_july_augustus_septimber_oktober_novimber_desimber".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsParseExact:!0,weekdays:"snein_moandei_tiisdei_woansdei_tongersdei_freed_sneon".split("_"),weekdaysShort:"si._mo._ti._wo._to._fr._so.".split("_"),weekdaysMin:"Si_Mo_Ti_Wo_To_Fr_So".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[hjoed om] LT",nextDay:"[moarn om] LT",nextWeek:"dddd [om] LT",lastDay:"[juster om] LT",lastWeek:"[\xf4fr\xfbne] dddd [om] LT",sameElse:"L"},relativeTime:{future:"oer %s",past:"%s lyn",s:"in pear sekonden",ss:"%d sekonden",m:"ien min\xfat",mm:"%d minuten",h:"ien oere",hh:"%d oeren",d:"ien dei",dd:"%d dagen",M:"ien moanne",MM:"%d moannen",y:"ien jier",yy:"%d jierren"},dayOfMonthOrdinalParse:/\d{1,2}(ste|de)/,ordinal:function(e){return e+(1===e||8===e||e>=20?"ste":"de")},week:{dow:1,doy:4}})})(n(30381))},29295:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=["Ean\xe1ir","Feabhra","M\xe1rta","Aibre\xe1n","Bealtaine","Meitheamh","I\xfail","L\xfanasa","Me\xe1n F\xf3mhair","Deireadh F\xf3mhair","Samhain","Nollaig",],n=["Ean","Feabh","M\xe1rt","Aib","Beal","Meith","I\xfail","L\xfan","M.F.","D.F.","Samh","Noll",],r=["D\xe9 Domhnaigh","D\xe9 Luain","D\xe9 M\xe1irt","D\xe9 C\xe9adaoin","D\xe9ardaoin","D\xe9 hAoine","D\xe9 Sathairn",],i=["Domh","Luan","M\xe1irt","C\xe9ad","D\xe9ar","Aoine","Sath"],a=["Do","Lu","M\xe1","C\xe9","D\xe9","A","Sa"];return e.defineLocale("ga",{months:t,monthsShort:n,monthsParseExact:!0,weekdays:r,weekdaysShort:i,weekdaysMin:a,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Inniu ag] LT",nextDay:"[Am\xe1rach ag] LT",nextWeek:"dddd [ag] LT",lastDay:"[Inn\xe9 ag] LT",lastWeek:"dddd [seo caite] [ag] LT",sameElse:"L"},relativeTime:{future:"i %s",past:"%s \xf3 shin",s:"c\xfapla soicind",ss:"%d soicind",m:"n\xf3im\xe9ad",mm:"%d n\xf3im\xe9ad",h:"uair an chloig",hh:"%d uair an chloig",d:"l\xe1",dd:"%d l\xe1",M:"m\xed",MM:"%d m\xedonna",y:"bliain",yy:"%d bliain"},dayOfMonthOrdinalParse:/\d{1,2}(d|na|mh)/,ordinal:function(e){var t=1===e?"d":e%10==2?"na":"mh";return e+t},week:{dow:1,doy:4}})})(n(30381))},2101:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=["Am Faoilleach","An Gearran","Am M\xe0rt","An Giblean","An C\xe8itean","An t-\xd2gmhios","An t-Iuchar","An L\xf9nastal","An t-Sultain","An D\xe0mhair","An t-Samhain","An D\xf9bhlachd",],n=["Faoi","Gear","M\xe0rt","Gibl","C\xe8it","\xd2gmh","Iuch","L\xf9n","Sult","D\xe0mh","Samh","D\xf9bh",],r=["Did\xf2mhnaich","Diluain","Dim\xe0irt","Diciadain","Diardaoin","Dihaoine","Disathairne",],i=["Did","Dil","Dim","Dic","Dia","Dih","Dis"],a=["D\xf2","Lu","M\xe0","Ci","Ar","Ha","Sa"];return e.defineLocale("gd",{months:t,monthsShort:n,monthsParseExact:!0,weekdays:r,weekdaysShort:i,weekdaysMin:a,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[An-diugh aig] LT",nextDay:"[A-m\xe0ireach aig] LT",nextWeek:"dddd [aig] LT",lastDay:"[An-d\xe8 aig] LT",lastWeek:"dddd [seo chaidh] [aig] LT",sameElse:"L"},relativeTime:{future:"ann an %s",past:"bho chionn %s",s:"beagan diogan",ss:"%d diogan",m:"mionaid",mm:"%d mionaidean",h:"uair",hh:"%d uairean",d:"latha",dd:"%d latha",M:"m\xecos",MM:"%d m\xecosan",y:"bliadhna",yy:"%d bliadhna"},dayOfMonthOrdinalParse:/\d{1,2}(d|na|mh)/,ordinal:function(e){var t=1===e?"d":e%10==2?"na":"mh";return e+t},week:{dow:1,doy:4}})})(n(30381))},38794:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("gl",{months:"xaneiro_febreiro_marzo_abril_maio_xu\xf1o_xullo_agosto_setembro_outubro_novembro_decembro".split("_"),monthsShort:"xan._feb._mar._abr._mai._xu\xf1._xul._ago._set._out._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"domingo_luns_martes_m\xe9rcores_xoves_venres_s\xe1bado".split("_"),weekdaysShort:"dom._lun._mar._m\xe9r._xov._ven._s\xe1b.".split("_"),weekdaysMin:"do_lu_ma_m\xe9_xo_ve_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY H:mm",LLLL:"dddd, D [de] MMMM [de] YYYY H:mm"},calendar:{sameDay:function(){return"[hoxe "+(1!==this.hours()?"\xe1s":"\xe1")+"] LT"},nextDay:function(){return"[ma\xf1\xe1 "+(1!==this.hours()?"\xe1s":"\xe1")+"] LT"},nextWeek:function(){return"dddd ["+(1!==this.hours()?"\xe1s":"a")+"] LT"},lastDay:function(){return"[onte "+(1!==this.hours()?"\xe1":"a")+"] LT"},lastWeek:function(){return"[o] dddd [pasado "+(1!==this.hours()?"\xe1s":"a")+"] LT"},sameElse:"L"},relativeTime:{future:function(e){return 0===e.indexOf("un")?"n"+e:"en "+e},past:"hai %s",s:"uns segundos",ss:"%d segundos",m:"un minuto",mm:"%d minutos",h:"unha hora",hh:"%d horas",d:"un d\xeda",dd:"%d d\xedas",M:"un mes",MM:"%d meses",y:"un ano",yy:"%d anos"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},27884:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={s:["थोडया सॅकंडांनी","थोडे सॅकंड"],ss:[e+" सॅकंडांनी",e+" सॅकंड"],m:["एका मिणटान","एक मिनूट"],mm:[e+" मिणटांनी",e+" मिणटां"],h:["एका वरान","एक वर"],hh:[e+" वरांनी",e+" वरां"],d:["एका दिसान","एक दीस"],dd:[e+" दिसांनी",e+" दीस"],M:["एका म्हयन्यान","एक म्हयनो"],MM:[e+" म्हयन्यानी",e+" म्हयने"],y:["एका वर्सान","एक वर्स"],yy:[e+" वर्सांनी",e+" वर्सां"]};return r?i[n][0]:i[n][1]}return e.defineLocale("gom-deva",{months:{standalone:"जानेवारी_फेब्रुवारी_मार्च_एप्रील_मे_जून_जुलय_ऑगस्ट_सप्टेंबर_ऑक्टोबर_नोव्हेंबर_डिसेंबर".split("_"),format:"जानेवारीच्या_फेब्रुवारीच्या_मार्चाच्या_एप्रीलाच्या_मेयाच्या_जूनाच्या_जुलयाच्या_ऑगस्टाच्या_सप्टेंबराच्या_ऑक्टोबराच्या_नोव्हेंबराच्या_डिसेंबराच्या".split("_"),isFormat:/MMMM(\s)+D[oD]?/},monthsShort:"जाने._फेब्रु._मार्च_एप्री._मे_जून_जुल._ऑग._सप्टें._ऑक्टो._नोव्हें._डिसें.".split("_"),monthsParseExact:!0,weekdays:"आयतार_सोमार_मंगळार_बुधवार_बिरेस्तार_सुक्रार_शेनवार".split("_"),weekdaysShort:"आयत._सोम._मंगळ._बुध._ब्रेस्त._सुक्र._शेन.".split("_"),weekdaysMin:"आ_सो_मं_बु_ब्रे_सु_शे".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"A h:mm [वाजतां]",LTS:"A h:mm:ss [वाजतां]",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY A h:mm [वाजतां]",LLLL:"dddd, MMMM Do, YYYY, A h:mm [वाजतां]",llll:"ddd, D MMM YYYY, A h:mm [वाजतां]"},calendar:{sameDay:"[आयज] LT",nextDay:"[फाल्यां] LT",nextWeek:"[फुडलो] dddd[,] LT",lastDay:"[काल] LT",lastWeek:"[फाटलो] dddd[,] LT",sameElse:"L"},relativeTime:{future:"%s",past:"%s आदीं",s:t,ss:t,m:t,mm:t,h:t,hh:t,d:t,dd:t,M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}(वेर)/,ordinal:function(e,t){return"D"===t?e+"वेर":e},week:{dow:0,doy:3},meridiemParse:/राती|सकाळीं|दनपारां|सांजे/,meridiemHour:function(e,t){return(12===e&&(e=0),"राती"===t)?e<4?e:e+12:"सकाळीं"===t?e:"दनपारां"===t?e>12?e:e+12:"सांजे"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"राती":e<12?"सकाळीं":e<16?"दनपारां":e<20?"सांजे":"राती"}})})(n(30381))},23168:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={s:["thoddea sekondamni","thodde sekond"],ss:[e+" sekondamni",e+" sekond"],m:["eka mintan","ek minut"],mm:[e+" mintamni",e+" mintam"],h:["eka voran","ek vor"],hh:[e+" voramni",e+" voram"],d:["eka disan","ek dis"],dd:[e+" disamni",e+" dis"],M:["eka mhoinean","ek mhoino"],MM:[e+" mhoineamni",e+" mhoine"],y:["eka vorsan","ek voros"],yy:[e+" vorsamni",e+" vorsam"]};return r?i[n][0]:i[n][1]}return e.defineLocale("gom-latn",{months:{standalone:"Janer_Febrer_Mars_Abril_Mai_Jun_Julai_Agost_Setembr_Otubr_Novembr_Dezembr".split("_"),format:"Janerachea_Febrerachea_Marsachea_Abrilachea_Maiachea_Junachea_Julaiachea_Agostachea_Setembrachea_Otubrachea_Novembrachea_Dezembrachea".split("_"),isFormat:/MMMM(\s)+D[oD]?/},monthsShort:"Jan._Feb._Mars_Abr._Mai_Jun_Jul._Ago._Set._Otu._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Aitar_Somar_Mongllar_Budhvar_Birestar_Sukrar_Son'var".split("_"),weekdaysShort:"Ait._Som._Mon._Bud._Bre._Suk._Son.".split("_"),weekdaysMin:"Ai_Sm_Mo_Bu_Br_Su_Sn".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"A h:mm [vazta]",LTS:"A h:mm:ss [vazta]",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY A h:mm [vazta]",LLLL:"dddd, MMMM Do, YYYY, A h:mm [vazta]",llll:"ddd, D MMM YYYY, A h:mm [vazta]"},calendar:{sameDay:"[Aiz] LT",nextDay:"[Faleam] LT",nextWeek:"[Fuddlo] dddd[,] LT",lastDay:"[Kal] LT",lastWeek:"[Fattlo] dddd[,] LT",sameElse:"L"},relativeTime:{future:"%s",past:"%s adim",s:t,ss:t,m:t,mm:t,h:t,hh:t,d:t,dd:t,M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}(er)/,ordinal:function(e,t){return"D"===t?e+"er":e},week:{dow:0,doy:3},meridiemParse:/rati|sokallim|donparam|sanje/,meridiemHour:function(e,t){return(12===e&&(e=0),"rati"===t)?e<4?e:e+12:"sokallim"===t?e:"donparam"===t?e>12?e:e+12:"sanje"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"rati":e<12?"sokallim":e<16?"donparam":e<20?"sanje":"rati"}})})(n(30381))},95349:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"૧",2:"૨",3:"૩",4:"૪",5:"૫",6:"૬",7:"૭",8:"૮",9:"૯",0:"૦"},n={"૧":"1","૨":"2","૩":"3","૪":"4","૫":"5","૬":"6","૭":"7","૮":"8","૯":"9","૦":"0"};return e.defineLocale("gu",{months:"જાન્યુઆરી_ફેબ્રુઆરી_માર્ચ_એપ્રિલ_મે_જૂન_જુલાઈ_ઑગસ્ટ_સપ્ટેમ્બર_ઑક્ટ્બર_નવેમ્બર_ડિસેમ્બર".split("_"),monthsShort:"જાન્યુ._ફેબ્રુ._માર્ચ_એપ્રિ._મે_જૂન_જુલા._ઑગ._સપ્ટે._ઑક્ટ્._નવે._ડિસે.".split("_"),monthsParseExact:!0,weekdays:"રવિવાર_સોમવાર_મંગળવાર_બુધ્વાર_ગુરુવાર_શુક્રવાર_શનિવાર".split("_"),weekdaysShort:"રવિ_સોમ_મંગળ_બુધ્_ગુરુ_શુક્ર_શનિ".split("_"),weekdaysMin:"ર_સો_મં_બુ_ગુ_શુ_શ".split("_"),longDateFormat:{LT:"A h:mm વાગ્યે",LTS:"A h:mm:ss વાગ્યે",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm વાગ્યે",LLLL:"dddd, D MMMM YYYY, A h:mm વાગ્યે"},calendar:{sameDay:"[આજ] LT",nextDay:"[કાલે] LT",nextWeek:"dddd, LT",lastDay:"[ગઇકાલે] LT",lastWeek:"[પાછલા] dddd, LT",sameElse:"L"},relativeTime:{future:"%s મા",past:"%s પહેલા",s:"અમુક પળો",ss:"%d સેકંડ",m:"એક મિનિટ",mm:"%d મિનિટ",h:"એક કલાક",hh:"%d કલાક",d:"એક દિવસ",dd:"%d દિવસ",M:"એક મહિનો",MM:"%d મહિનો",y:"એક વર્ષ",yy:"%d વર્ષ"},preparse:function(e){return e.replace(/[૧૨૩૪૫૬૭૮૯૦]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/રાત|બપોર|સવાર|સાંજ/,meridiemHour:function(e,t){return(12===e&&(e=0),"રાત"===t)?e<4?e:e+12:"સવાર"===t?e:"બપોર"===t?e>=10?e:e+12:"સાંજ"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"રાત":e<10?"સવાર":e<17?"બપોર":e<20?"સાંજ":"રાત"},week:{dow:0,doy:6}})})(n(30381))},24206:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("he",{months:"ינואר_פברואר_מרץ_אפריל_מאי_יוני_יולי_אוגוסט_ספטמבר_אוקטובר_נובמבר_דצמבר".split("_"),monthsShort:"ינו׳_פבר׳_מרץ_אפר׳_מאי_יוני_יולי_אוג׳_ספט׳_אוק׳_נוב׳_דצמ׳".split("_"),weekdays:"ראשון_שני_שלישי_רביעי_חמישי_שישי_שבת".split("_"),weekdaysShort:"א׳_ב׳_ג׳_ד׳_ה׳_ו׳_ש׳".split("_"),weekdaysMin:"א_ב_ג_ד_ה_ו_ש".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [ב]MMMM YYYY",LLL:"D [ב]MMMM YYYY HH:mm",LLLL:"dddd, D [ב]MMMM YYYY HH:mm",l:"D/M/YYYY",ll:"D MMM YYYY",lll:"D MMM YYYY HH:mm",llll:"ddd, D MMM YYYY HH:mm"},calendar:{sameDay:"[היום ב־]LT",nextDay:"[מחר ב־]LT",nextWeek:"dddd [בשעה] LT",lastDay:"[אתמול ב־]LT",lastWeek:"[ביום] dddd [האחרון בשעה] LT",sameElse:"L"},relativeTime:{future:"בעוד %s",past:"לפני %s",s:"מספר שניות",ss:"%d שניות",m:"דקה",mm:"%d דקות",h:"שעה",hh:function(e){return 2===e?"שעתיים":e+" שעות"},d:"יום",dd:function(e){return 2===e?"יומיים":e+" ימים"},M:"חודש",MM:function(e){return 2===e?"חודשיים":e+" חודשים"},y:"שנה",yy:function(e){return 2===e?"שנתיים":e%10==0&&10!==e?e+" שנה":e+" שנים"}},meridiemParse:/אחה"צ|לפנה"צ|אחרי הצהריים|לפני הצהריים|לפנות בוקר|בבוקר|בערב/i,isPM:function(e){return/^(אחה"צ|אחרי הצהריים|בערב)$/.test(e)},meridiem:function(e,t,n){return e<5?"לפנות בוקר":e<10?"בבוקר":e<12?n?'לפנה"צ':"לפני הצהריים":e<18?n?'אחה"צ':"אחרי הצהריים":"בערב"}})})(n(30381))},30094:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"},n={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"},r=[/^जन/i,/^फ़र|फर/i,/^मार्च/i,/^अप्रै/i,/^मई/i,/^जून/i,/^जुल/i,/^अग/i,/^सितं|सित/i,/^अक्टू/i,/^नव|नवं/i,/^दिसं|दिस/i,],i=[/^जन/i,/^फ़र/i,/^मार्च/i,/^अप्रै/i,/^मई/i,/^जून/i,/^जुल/i,/^अग/i,/^सित/i,/^अक्टू/i,/^नव/i,/^दिस/i,];return e.defineLocale("hi",{months:{format:"जनवरी_फ़रवरी_मार्च_अप्रैल_मई_जून_जुलाई_अगस्त_सितम्बर_अक्टूबर_नवम्बर_दिसम्बर".split("_"),standalone:"जनवरी_फरवरी_मार्च_अप्रैल_मई_जून_जुलाई_अगस्त_सितंबर_अक्टूबर_नवंबर_दिसंबर".split("_")},monthsShort:"जन._फ़र._मार्च_अप्रै._मई_जून_जुल._अग._सित._अक्टू._नव._दिस.".split("_"),weekdays:"रविवार_सोमवार_मंगलवार_बुधवार_गुरूवार_शुक्रवार_शनिवार".split("_"),weekdaysShort:"रवि_सोम_मंगल_बुध_गुरू_शुक्र_शनि".split("_"),weekdaysMin:"र_सो_मं_बु_गु_शु_श".split("_"),longDateFormat:{LT:"A h:mm बजे",LTS:"A h:mm:ss बजे",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm बजे",LLLL:"dddd, D MMMM YYYY, A h:mm बजे"},monthsParse:r,longMonthsParse:r,shortMonthsParse:i,monthsRegex:/^(जनवरी|जन\.?|फ़रवरी|फरवरी|फ़र\.?|मार्च?|अप्रैल|अप्रै\.?|मई?|जून?|जुलाई|जुल\.?|अगस्त|अग\.?|सितम्बर|सितंबर|सित\.?|अक्टूबर|अक्टू\.?|नवम्बर|नवंबर|नव\.?|दिसम्बर|दिसंबर|दिस\.?)/i,monthsShortRegex:/^(जनवरी|जन\.?|फ़रवरी|फरवरी|फ़र\.?|मार्च?|अप्रैल|अप्रै\.?|मई?|जून?|जुलाई|जुल\.?|अगस्त|अग\.?|सितम्बर|सितंबर|सित\.?|अक्टूबर|अक्टू\.?|नवम्बर|नवंबर|नव\.?|दिसम्बर|दिसंबर|दिस\.?)/i,monthsStrictRegex:/^(जनवरी?|फ़रवरी|फरवरी?|मार्च?|अप्रैल?|मई?|जून?|जुलाई?|अगस्त?|सितम्बर|सितंबर|सित?\.?|अक्टूबर|अक्टू\.?|नवम्बर|नवंबर?|दिसम्बर|दिसंबर?)/i,monthsShortStrictRegex:/^(जन\.?|फ़र\.?|मार्च?|अप्रै\.?|मई?|जून?|जुल\.?|अग\.?|सित\.?|अक्टू\.?|नव\.?|दिस\.?)/i,calendar:{sameDay:"[आज] LT",nextDay:"[कल] LT",nextWeek:"dddd, LT",lastDay:"[कल] LT",lastWeek:"[पिछले] dddd, LT",sameElse:"L"},relativeTime:{future:"%s में",past:"%s पहले",s:"कुछ ही क्षण",ss:"%d सेकंड",m:"एक मिनट",mm:"%d मिनट",h:"एक घंटा",hh:"%d घंटे",d:"एक दिन",dd:"%d दिन",M:"एक महीने",MM:"%d महीने",y:"एक वर्ष",yy:"%d वर्ष"},preparse:function(e){return e.replace(/[१२३४५६७८९०]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/रात|सुबह|दोपहर|शाम/,meridiemHour:function(e,t){return(12===e&&(e=0),"रात"===t)?e<4?e:e+12:"सुबह"===t?e:"दोपहर"===t?e>=10?e:e+12:"शाम"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"रात":e<10?"सुबह":e<17?"दोपहर":e<20?"शाम":"रात"},week:{dow:0,doy:6}})})(n(30381))},30316:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n){var r=e+" ";switch(n){case"ss":return 1===e?r+="sekunda":2===e||3===e||4===e?r+="sekunde":r+="sekundi",r;case"m":return t?"jedna minuta":"jedne minute";case"mm":return 1===e?r+="minuta":2===e||3===e||4===e?r+="minute":r+="minuta",r;case"h":return t?"jedan sat":"jednog sata";case"hh":return 1===e?r+="sat":2===e||3===e||4===e?r+="sata":r+="sati",r;case"dd":return 1===e?r+="dan":r+="dana",r;case"MM":return 1===e?r+="mjesec":2===e||3===e||4===e?r+="mjeseca":r+="mjeseci",r;case"yy":return 1===e?r+="godina":2===e||3===e||4===e?r+="godine":r+="godina",r}}return e.defineLocale("hr",{months:{format:"siječnja_veljače_ožujka_travnja_svibnja_lipnja_srpnja_kolovoza_rujna_listopada_studenoga_prosinca".split("_"),standalone:"siječanj_veljača_ožujak_travanj_svibanj_lipanj_srpanj_kolovoz_rujan_listopad_studeni_prosinac".split("_")},monthsShort:"sij._velj._ožu._tra._svi._lip._srp._kol._ruj._lis._stu._pro.".split("_"),monthsParseExact:!0,weekdays:"nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"Do MMMM YYYY",LLL:"Do MMMM YYYY H:mm",LLLL:"dddd, Do MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[jučer u] LT",lastWeek:function(){switch(this.day()){case 0:return"[prošlu] [nedjelju] [u] LT";case 3:return"[prošlu] [srijedu] [u] LT";case 6:return"[prošle] [subote] [u] LT";case 1:case 2:case 4:case 5:return"[prošli] dddd [u] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"par sekundi",ss:t,m:t,mm:t,h:t,hh:t,d:"dan",dd:t,M:"mjesec",MM:t,y:"godinu",yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},22138:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="vas\xe1rnap h\xe9tfőn kedden szerd\xe1n cs\xfct\xf6rt\xf6k\xf6n p\xe9nteken szombaton".split(" ");function n(e,t,n,r){var i=e;switch(n){case"s":return r||t?"n\xe9h\xe1ny m\xe1sodperc":"n\xe9h\xe1ny m\xe1sodperce";case"ss":return i+(r||t)?" m\xe1sodperc":" m\xe1sodperce";case"m":return"egy"+(r||t?" perc":" perce");case"mm":return i+(r||t?" perc":" perce");case"h":return"egy"+(r||t?" \xf3ra":" \xf3r\xe1ja");case"hh":return i+(r||t?" \xf3ra":" \xf3r\xe1ja");case"d":return"egy"+(r||t?" nap":" napja");case"dd":return i+(r||t?" nap":" napja");case"M":return"egy"+(r||t?" h\xf3nap":" h\xf3napja");case"MM":return i+(r||t?" h\xf3nap":" h\xf3napja");case"y":return"egy"+(r||t?" \xe9v":" \xe9ve");case"yy":return i+(r||t?" \xe9v":" \xe9ve")}return""}function r(e){return(e?"":"[m\xfalt] ")+"["+t[this.day()]+"] LT[-kor]"}return e.defineLocale("hu",{months:"janu\xe1r_febru\xe1r_m\xe1rcius_\xe1prilis_m\xe1jus_j\xfanius_j\xfalius_augusztus_szeptember_okt\xf3ber_november_december".split("_"),monthsShort:"jan._feb._m\xe1rc._\xe1pr._m\xe1j._j\xfan._j\xfal._aug._szept._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"vas\xe1rnap_h\xe9tfő_kedd_szerda_cs\xfct\xf6rt\xf6k_p\xe9ntek_szombat".split("_"),weekdaysShort:"vas_h\xe9t_kedd_sze_cs\xfct_p\xe9n_szo".split("_"),weekdaysMin:"v_h_k_sze_cs_p_szo".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"YYYY.MM.DD.",LL:"YYYY. MMMM D.",LLL:"YYYY. MMMM D. H:mm",LLLL:"YYYY. MMMM D., dddd H:mm"},meridiemParse:/de|du/i,isPM:function(e){return"u"===e.charAt(1).toLowerCase()},meridiem:function(e,t,n){return e<12?!0===n?"de":"DE":!0===n?"du":"DU"},calendar:{sameDay:"[ma] LT[-kor]",nextDay:"[holnap] LT[-kor]",nextWeek:function(){return r.call(this,!0)},lastDay:"[tegnap] LT[-kor]",lastWeek:function(){return r.call(this,!1)},sameElse:"L"},relativeTime:{future:"%s m\xfalva",past:"%s",s:n,ss:n,m:n,mm:n,h:n,hh:n,d:n,dd:n,M:n,MM:n,y:n,yy:n},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},11423:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("hy-am",{months:{format:"հունվարի_փետրվարի_մարտի_ապրիլի_մայիսի_հունիսի_հուլիսի_օգոստոսի_սեպտեմբերի_հոկտեմբերի_նոյեմբերի_դեկտեմբերի".split("_"),standalone:"հունվար_փետրվար_մարտ_ապրիլ_մայիս_հունիս_հուլիս_օգոստոս_սեպտեմբեր_հոկտեմբեր_նոյեմբեր_դեկտեմբեր".split("_")},monthsShort:"հնվ_փտր_մրտ_ապր_մյս_հնս_հլս_օգս_սպտ_հկտ_նմբ_դկտ".split("_"),weekdays:"կիրակի_երկուշաբթի_երեքշաբթի_չորեքշաբթի_հինգշաբթի_ուրբաթ_շաբաթ".split("_"),weekdaysShort:"կրկ_երկ_երք_չրք_հնգ_ուրբ_շբթ".split("_"),weekdaysMin:"կրկ_երկ_երք_չրք_հնգ_ուրբ_շբթ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY թ.",LLL:"D MMMM YYYY թ., HH:mm",LLLL:"dddd, D MMMM YYYY թ., HH:mm"},calendar:{sameDay:"[այսօր] LT",nextDay:"[վաղը] LT",lastDay:"[երեկ] LT",nextWeek:function(){return"dddd [օրը ժամը] LT"},lastWeek:function(){return"[անցած] dddd [օրը ժամը] LT"},sameElse:"L"},relativeTime:{future:"%s հետո",past:"%s առաջ",s:"մի քանի վայրկյան",ss:"%d վայրկյան",m:"րոպե",mm:"%d րոպե",h:"ժամ",hh:"%d ժամ",d:"օր",dd:"%d օր",M:"ամիս",MM:"%d ամիս",y:"տարի",yy:"%d տարի"},meridiemParse:/գիշերվա|առավոտվա|ցերեկվա|երեկոյան/,isPM:function(e){return/^(ցերեկվա|երեկոյան)$/.test(e)},meridiem:function(e){return e<4?"գիշերվա":e<12?"առավոտվա":e<17?"ցերեկվա":"երեկոյան"},dayOfMonthOrdinalParse:/\d{1,2}|\d{1,2}-(ին|րդ)/,ordinal:function(e,t){switch(t){case"DDD":case"w":case"W":case"DDDo":if(1===e)return e+"-ին";return e+"-րդ";default:return e}},week:{dow:1,doy:7}})})(n(30381))},29218:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("id",{months:"Januari_Februari_Maret_April_Mei_Juni_Juli_Agustus_September_Oktober_November_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Agt_Sep_Okt_Nov_Des".split("_"),weekdays:"Minggu_Senin_Selasa_Rabu_Kamis_Jumat_Sabtu".split("_"),weekdaysShort:"Min_Sen_Sel_Rab_Kam_Jum_Sab".split("_"),weekdaysMin:"Mg_Sn_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|siang|sore|malam/,meridiemHour:function(e,t){return(12===e&&(e=0),"pagi"===t)?e:"siang"===t?e>=11?e:e+12:"sore"===t||"malam"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"pagi":e<15?"siang":e<19?"sore":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Besok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kemarin pukul] LT",lastWeek:"dddd [lalu pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lalu",s:"beberapa detik",ss:"%d detik",m:"semenit",mm:"%d menit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:0,doy:6}})})(n(30381))},90135:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e){if(e%100==11);else if(e%10==1)return!1;return!0}function n(e,n,r,i){var a=e+" ";switch(r){case"s":return n||i?"nokkrar sek\xfandur":"nokkrum sek\xfandum";case"ss":if(t(e))return a+(n||i?"sek\xfandur":"sek\xfandum");return a+"sek\xfanda";case"m":return n?"m\xedn\xfata":"m\xedn\xfatu";case"mm":if(t(e))return a+(n||i?"m\xedn\xfatur":"m\xedn\xfatum");if(n)return a+"m\xedn\xfata";return a+"m\xedn\xfatu";case"hh":if(t(e))return a+(n||i?"klukkustundir":"klukkustundum");return a+"klukkustund";case"d":if(n)return"dagur";return i?"dag":"degi";case"dd":if(t(e)){if(n)return a+"dagar";return a+(i?"daga":"d\xf6gum")}if(n)return a+"dagur";return a+(i?"dag":"degi");case"M":if(n)return"m\xe1nu\xf0ur";return i?"m\xe1nu\xf0":"m\xe1nu\xf0i";case"MM":if(t(e)){if(n)return a+"m\xe1nu\xf0ir";return a+(i?"m\xe1nu\xf0i":"m\xe1nu\xf0um")}if(n)return a+"m\xe1nu\xf0ur";return a+(i?"m\xe1nu\xf0":"m\xe1nu\xf0i");case"y":return n||i?"\xe1r":"\xe1ri";case"yy":if(t(e))return a+(n||i?"\xe1r":"\xe1rum");return a+(n||i?"\xe1r":"\xe1ri")}}return e.defineLocale("is",{months:"jan\xfaar_febr\xfaar_mars_apr\xedl_ma\xed_j\xfan\xed_j\xfal\xed_\xe1g\xfast_september_okt\xf3ber_n\xf3vember_desember".split("_"),monthsShort:"jan_feb_mar_apr_ma\xed_j\xfan_j\xfal_\xe1g\xfa_sep_okt_n\xf3v_des".split("_"),weekdays:"sunnudagur_m\xe1nudagur_\xferi\xf0judagur_mi\xf0vikudagur_fimmtudagur_f\xf6studagur_laugardagur".split("_"),weekdaysShort:"sun_m\xe1n_\xferi_mi\xf0_fim_f\xf6s_lau".split("_"),weekdaysMin:"Su_M\xe1_\xder_Mi_Fi_F\xf6_La".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] H:mm",LLLL:"dddd, D. MMMM YYYY [kl.] H:mm"},calendar:{sameDay:"[\xed dag kl.] LT",nextDay:"[\xe1 morgun kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[\xed g\xe6r kl.] LT",lastWeek:"[s\xed\xf0asta] dddd [kl.] LT",sameElse:"L"},relativeTime:{future:"eftir %s",past:"fyrir %s s\xed\xf0an",s:n,ss:n,m:n,mm:n,h:"klukkustund",hh:n,d:n,dd:n,M:n,MM:n,y:n,yy:n},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},10150:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("it-ch",{months:"gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre".split("_"),monthsShort:"gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic".split("_"),weekdays:"domenica_luned\xec_marted\xec_mercoled\xec_gioved\xec_venerd\xec_sabato".split("_"),weekdaysShort:"dom_lun_mar_mer_gio_ven_sab".split("_"),weekdaysMin:"do_lu_ma_me_gi_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Oggi alle] LT",nextDay:"[Domani alle] LT",nextWeek:"dddd [alle] LT",lastDay:"[Ieri alle] LT",lastWeek:function(){return 0===this.day()?"[la scorsa] dddd [alle] LT":"[lo scorso] dddd [alle] LT"},sameElse:"L"},relativeTime:{future:function(e){return(/^[0-9].+$/.test(e)?"tra":"in")+" "+e},past:"%s fa",s:"alcuni secondi",ss:"%d secondi",m:"un minuto",mm:"%d minuti",h:"un'ora",hh:"%d ore",d:"un giorno",dd:"%d giorni",M:"un mese",MM:"%d mesi",y:"un anno",yy:"%d anni"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},90626:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("it",{months:"gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre".split("_"),monthsShort:"gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic".split("_"),weekdays:"domenica_luned\xec_marted\xec_mercoled\xec_gioved\xec_venerd\xec_sabato".split("_"),weekdaysShort:"dom_lun_mar_mer_gio_ven_sab".split("_"),weekdaysMin:"do_lu_ma_me_gi_ve_sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:function(){return"[Oggi a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},nextDay:function(){return"[Domani a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},nextWeek:function(){return"dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},lastDay:function(){return"[Ieri a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},lastWeek:function(){return 0===this.day()?"[La scorsa] dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT":"[Lo scorso] dddd [a"+(this.hours()>1?"lle ":0===this.hours()?" ":"ll'")+"]LT"},sameElse:"L"},relativeTime:{future:"tra %s",past:"%s fa",s:"alcuni secondi",ss:"%d secondi",m:"un minuto",mm:"%d minuti",h:"un'ora",hh:"%d ore",d:"un giorno",dd:"%d giorni",w:"una settimana",ww:"%d settimane",M:"un mese",MM:"%d mesi",y:"un anno",yy:"%d anni"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},39183:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ja",{eras:[{since:"2019-05-01",offset:1,name:"令和",narrow:"㋿",abbr:"R"},{since:"1989-01-08",until:"2019-04-30",offset:1,name:"平成",narrow:"㍻",abbr:"H"},{since:"1926-12-25",until:"1989-01-07",offset:1,name:"昭和",narrow:"㍼",abbr:"S"},{since:"1912-07-30",until:"1926-12-24",offset:1,name:"大正",narrow:"㍽",abbr:"T"},{since:"1873-01-01",until:"1912-07-29",offset:6,name:"明治",narrow:"㍾",abbr:"M"},{since:"0001-01-01",until:"1873-12-31",offset:1,name:"西暦",narrow:"AD",abbr:"AD"},{since:"0000-12-31",until:-1/0,offset:1,name:"紀元前",narrow:"BC",abbr:"BC"},],eraYearOrdinalRegex:/(元|\d+)年/,eraYearOrdinalParse:function(e,t){return"元"===t[1]?1:parseInt(t[1]||e,10)},months:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"日曜日_月曜日_火曜日_水曜日_木曜日_金曜日_土曜日".split("_"),weekdaysShort:"日_月_火_水_木_金_土".split("_"),weekdaysMin:"日_月_火_水_木_金_土".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY年M月D日",LLL:"YYYY年M月D日 HH:mm",LLLL:"YYYY年M月D日 dddd HH:mm",l:"YYYY/MM/DD",ll:"YYYY年M月D日",lll:"YYYY年M月D日 HH:mm",llll:"YYYY年M月D日(ddd) HH:mm"},meridiemParse:/午前|午後/i,isPM:function(e){return"午後"===e},meridiem:function(e,t,n){return e<12?"午前":"午後"},calendar:{sameDay:"[今日] LT",nextDay:"[明日] LT",nextWeek:function(e){return e.week()!==this.week()?"[来週]dddd LT":"dddd LT"},lastDay:"[昨日] LT",lastWeek:function(e){return this.week()!==e.week()?"[先週]dddd LT":"dddd LT"},sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}日/,ordinal:function(e,t){switch(t){case"y":return 1===e?"元年":e+"年";case"d":case"D":case"DDD":return e+"日";default:return e}},relativeTime:{future:"%s後",past:"%s前",s:"数秒",ss:"%d秒",m:"1分",mm:"%d分",h:"1時間",hh:"%d時間",d:"1日",dd:"%d日",M:"1ヶ月",MM:"%dヶ月",y:"1年",yy:"%d年"}})})(n(30381))},24286:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("jv",{months:"Januari_Februari_Maret_April_Mei_Juni_Juli_Agustus_September_Oktober_Nopember_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Ags_Sep_Okt_Nop_Des".split("_"),weekdays:"Minggu_Senen_Seloso_Rebu_Kemis_Jemuwah_Septu".split("_"),weekdaysShort:"Min_Sen_Sel_Reb_Kem_Jem_Sep".split("_"),weekdaysMin:"Mg_Sn_Sl_Rb_Km_Jm_Sp".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/enjing|siyang|sonten|ndalu/,meridiemHour:function(e,t){return(12===e&&(e=0),"enjing"===t)?e:"siyang"===t?e>=11?e:e+12:"sonten"===t||"ndalu"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"enjing":e<15?"siyang":e<19?"sonten":"ndalu"},calendar:{sameDay:"[Dinten puniko pukul] LT",nextDay:"[Mbenjang pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kala wingi pukul] LT",lastWeek:"dddd [kepengker pukul] LT",sameElse:"L"},relativeTime:{future:"wonten ing %s",past:"%s ingkang kepengker",s:"sawetawis detik",ss:"%d detik",m:"setunggal menit",mm:"%d menit",h:"setunggal jam",hh:"%d jam",d:"sedinten",dd:"%d dinten",M:"sewulan",MM:"%d wulan",y:"setaun",yy:"%d taun"},week:{dow:1,doy:7}})})(n(30381))},12105:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ka",{months:"იანვარი_თებერვალი_მარტი_აპრილი_მაისი_ივნისი_ივლისი_აგვისტო_სექტემბერი_ოქტომბერი_ნოემბერი_დეკემბერი".split("_"),monthsShort:"იან_თებ_მარ_აპრ_მაი_ივნ_ივლ_აგვ_სექ_ოქტ_ნოე_დეკ".split("_"),weekdays:{standalone:"კვირა_ორშაბათი_სამშაბათი_ოთხშაბათი_ხუთშაბათი_პარასკევი_შაბათი".split("_"),format:"კვირას_ორშაბათს_სამშაბათს_ოთხშაბათს_ხუთშაბათს_პარასკევს_შაბათს".split("_"),isFormat:/(წინა|შემდეგ)/},weekdaysShort:"კვი_ორშ_სამ_ოთხ_ხუთ_პარ_შაბ".split("_"),weekdaysMin:"კვ_ორ_სა_ოთ_ხუ_პა_შა".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[დღეს] LT[-ზე]",nextDay:"[ხვალ] LT[-ზე]",lastDay:"[გუშინ] LT[-ზე]",nextWeek:"[შემდეგ] dddd LT[-ზე]",lastWeek:"[წინა] dddd LT-ზე",sameElse:"L"},relativeTime:{future:function(e){return e.replace(/(წამ|წუთ|საათ|წელ|დღ|თვ)(ი|ე)/,function(e,t,n){return"ი"===n?t+"ში":t+n+"ში"})},past:function(e){return/(წამი|წუთი|საათი|დღე|თვე)/.test(e)?e.replace(/(ი|ე)$/,"ის წინ"):/წელი/.test(e)?e.replace(/წელი$/,"წლის წინ"):e},s:"რამდენიმე წამი",ss:"%d წამი",m:"წუთი",mm:"%d წუთი",h:"საათი",hh:"%d საათი",d:"დღე",dd:"%d დღე",M:"თვე",MM:"%d თვე",y:"წელი",yy:"%d წელი"},dayOfMonthOrdinalParse:/0|1-ლი|მე-\d{1,2}|\d{1,2}-ე/,ordinal:function(e){return 0===e?e:1===e?e+"-ლი":e<20||e<=100&&e%20==0||e%100==0?"მე-"+e:e+"-ე"},week:{dow:1,doy:7}})})(n(30381))},47772:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={0:"-ші",1:"-ші",2:"-ші",3:"-ші",4:"-ші",5:"-ші",6:"-шы",7:"-ші",8:"-ші",9:"-шы",10:"-шы",20:"-шы",30:"-шы",40:"-шы",50:"-ші",60:"-шы",70:"-ші",80:"-ші",90:"-шы",100:"-ші"};return e.defineLocale("kk",{months:"қаңтар_ақпан_наурыз_сәуір_мамыр_маусым_шілде_тамыз_қыркүйек_қазан_қараша_желтоқсан".split("_"),monthsShort:"қаң_ақп_нау_сәу_мам_мау_шіл_там_қыр_қаз_қар_жел".split("_"),weekdays:"жексенбі_дүйсенбі_сейсенбі_сәрсенбі_бейсенбі_жұма_сенбі".split("_"),weekdaysShort:"жек_дүй_сей_сәр_бей_жұм_сен".split("_"),weekdaysMin:"жк_дй_сй_ср_бй_жм_сн".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Бүгін сағат] LT",nextDay:"[Ертең сағат] LT",nextWeek:"dddd [сағат] LT",lastDay:"[Кеше сағат] LT",lastWeek:"[Өткен аптаның] dddd [сағат] LT",sameElse:"L"},relativeTime:{future:"%s ішінде",past:"%s бұрын",s:"бірнеше секунд",ss:"%d секунд",m:"бір минут",mm:"%d минут",h:"бір сағат",hh:"%d сағат",d:"бір күн",dd:"%d күн",M:"бір ай",MM:"%d ай",y:"бір жыл",yy:"%d жыл"},dayOfMonthOrdinalParse:/\d{1,2}-(ші|шы)/,ordinal:function(e){var n=e%10,r=e>=100?100:null;return e+(t[e]||t[n]||t[r])},week:{dow:1,doy:7}})})(n(30381))},18758:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"១",2:"២",3:"៣",4:"៤",5:"៥",6:"៦",7:"៧",8:"៨",9:"៩",0:"០"},n={"១":"1","២":"2","៣":"3","៤":"4","៥":"5","៦":"6","៧":"7","៨":"8","៩":"9","០":"0"};return e.defineLocale("km",{months:"មករា_កុម្ភៈ_មីនា_មេសា_ឧសភា_មិថុនា_កក្កដា_សីហា_កញ្ញា_តុលា_វិច្ឆិកា_ធ្នូ".split("_"),monthsShort:"មករា_កុម្ភៈ_មីនា_មេសា_ឧសភា_មិថុនា_កក្កដា_សីហា_កញ្ញា_តុលា_វិច្ឆិកា_ធ្នូ".split("_"),weekdays:"អាទិត្យ_ច័ន្ទ_អង្គារ_ពុធ_ព្រហស្បតិ៍_សុក្រ_សៅរ៍".split("_"),weekdaysShort:"អា_ច_អ_ព_ព្រ_សុ_ស".split("_"),weekdaysMin:"អា_ច_អ_ព_ព្រ_សុ_ស".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},meridiemParse:/ព្រឹក|ល្ងាច/,isPM:function(e){return"ល្ងាច"===e},meridiem:function(e,t,n){return e<12?"ព្រឹក":"ល្ងាច"},calendar:{sameDay:"[ថ្ងៃនេះ ម៉ោង] LT",nextDay:"[ស្អែក ម៉ោង] LT",nextWeek:"dddd [ម៉ោង] LT",lastDay:"[ម្សិលមិញ ម៉ោង] LT",lastWeek:"dddd [សប្តាហ៍មុន] [ម៉ោង] LT",sameElse:"L"},relativeTime:{future:"%sទៀត",past:"%sមុន",s:"ប៉ុន្មានវិនាទី",ss:"%d វិនាទី",m:"មួយនាទី",mm:"%d នាទី",h:"មួយម៉ោង",hh:"%d ម៉ោង",d:"មួយថ្ងៃ",dd:"%d ថ្ងៃ",M:"មួយខែ",MM:"%d ខែ",y:"មួយឆ្នាំ",yy:"%d ឆ្នាំ"},dayOfMonthOrdinalParse:/ទី\d{1,2}/,ordinal:"ទី%d",preparse:function(e){return e.replace(/[១២៣៤៥៦៧៨៩០]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},week:{dow:1,doy:4}})})(n(30381))},79282:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"೧",2:"೨",3:"೩",4:"೪",5:"೫",6:"೬",7:"೭",8:"೮",9:"೯",0:"೦"},n={"೧":"1","೨":"2","೩":"3","೪":"4","೫":"5","೬":"6","೭":"7","೮":"8","೯":"9","೦":"0"};return e.defineLocale("kn",{months:"ಜನವರಿ_ಫೆಬ್ರವರಿ_ಮಾರ್ಚ್_ಏಪ್ರಿಲ್_ಮೇ_ಜೂನ್_ಜುಲೈ_ಆಗಸ್ಟ್_ಸೆಪ್ಟೆಂಬರ್_ಅಕ್ಟೋಬರ್_ನವೆಂಬರ್_ಡಿಸೆಂಬರ್".split("_"),monthsShort:"ಜನ_ಫೆಬ್ರ_ಮಾರ್ಚ್_ಏಪ್ರಿಲ್_ಮೇ_ಜೂನ್_ಜುಲೈ_ಆಗಸ್ಟ್_ಸೆಪ್ಟೆಂ_ಅಕ್ಟೋ_ನವೆಂ_ಡಿಸೆಂ".split("_"),monthsParseExact:!0,weekdays:"ಭಾನುವಾರ_ಸೋಮವಾರ_ಮಂಗಳವಾರ_ಬುಧವಾರ_ಗುರುವಾರ_ಶುಕ್ರವಾರ_ಶನಿವಾರ".split("_"),weekdaysShort:"ಭಾನು_ಸೋಮ_ಮಂಗಳ_ಬುಧ_ಗುರು_ಶುಕ್ರ_ಶನಿ".split("_"),weekdaysMin:"ಭಾ_ಸೋ_ಮಂ_ಬು_ಗು_ಶು_ಶ".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[ಇಂದು] LT",nextDay:"[ನಾಳೆ] LT",nextWeek:"dddd, LT",lastDay:"[ನಿನ್ನೆ] LT",lastWeek:"[ಕೊನೆಯ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s ನಂತರ",past:"%s ಹಿಂದೆ",s:"ಕೆಲವು ಕ್ಷಣಗಳು",ss:"%d ಸೆಕೆಂಡುಗಳು",m:"ಒಂದು ನಿಮಿಷ",mm:"%d ನಿಮಿಷ",h:"ಒಂದು ಗಂಟೆ",hh:"%d ಗಂಟೆ",d:"ಒಂದು ದಿನ",dd:"%d ದಿನ",M:"ಒಂದು ತಿಂಗಳು",MM:"%d ತಿಂಗಳು",y:"ಒಂದು ವರ್ಷ",yy:"%d ವರ್ಷ"},preparse:function(e){return e.replace(/[೧೨೩೪೫೬೭೮೯೦]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/ರಾತ್ರಿ|ಬೆಳಿಗ್ಗೆ|ಮಧ್ಯಾಹ್ನ|ಸಂಜೆ/,meridiemHour:function(e,t){return(12===e&&(e=0),"ರಾತ್ರಿ"===t)?e<4?e:e+12:"ಬೆಳಿಗ್ಗೆ"===t?e:"ಮಧ್ಯಾಹ್ನ"===t?e>=10?e:e+12:"ಸಂಜೆ"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"ರಾತ್ರಿ":e<10?"ಬೆಳಿಗ್ಗೆ":e<17?"ಮಧ್ಯಾಹ್ನ":e<20?"ಸಂಜೆ":"ರಾತ್ರಿ"},dayOfMonthOrdinalParse:/\d{1,2}(ನೇ)/,ordinal:function(e){return e+"ನೇ"},week:{dow:0,doy:6}})})(n(30381))},33730:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ko",{months:"1월_2월_3월_4월_5월_6월_7월_8월_9월_10월_11월_12월".split("_"),monthsShort:"1월_2월_3월_4월_5월_6월_7월_8월_9월_10월_11월_12월".split("_"),weekdays:"일요일_월요일_화요일_수요일_목요일_금요일_토요일".split("_"),weekdaysShort:"일_월_화_수_목_금_토".split("_"),weekdaysMin:"일_월_화_수_목_금_토".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"YYYY.MM.DD.",LL:"YYYY년 MMMM D일",LLL:"YYYY년 MMMM D일 A h:mm",LLLL:"YYYY년 MMMM D일 dddd A h:mm",l:"YYYY.MM.DD.",ll:"YYYY년 MMMM D일",lll:"YYYY년 MMMM D일 A h:mm",llll:"YYYY년 MMMM D일 dddd A h:mm"},calendar:{sameDay:"오늘 LT",nextDay:"내일 LT",nextWeek:"dddd LT",lastDay:"어제 LT",lastWeek:"지난주 dddd LT",sameElse:"L"},relativeTime:{future:"%s 후",past:"%s 전",s:"몇 초",ss:"%d초",m:"1분",mm:"%d분",h:"한 시간",hh:"%d시간",d:"하루",dd:"%d일",M:"한 달",MM:"%d달",y:"일 년",yy:"%d년"},dayOfMonthOrdinalParse:/\d{1,2}(일|월|주)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"일";case"M":return e+"월";case"w":case"W":return e+"주";default:return e}},meridiemParse:/오전|오후/,isPM:function(e){return"오후"===e},meridiem:function(e,t,n){return e<12?"오전":"오후"}})})(n(30381))},1408:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"١",2:"٢",3:"٣",4:"٤",5:"٥",6:"٦",7:"٧",8:"٨",9:"٩",0:"٠"},n={"١":"1","٢":"2","٣":"3","٤":"4","٥":"5","٦":"6","٧":"7","٨":"8","٩":"9","٠":"0"},r=["کانونی دووەم","شوبات","ئازار","نیسان","ئایار","حوزەیران","تەمموز","ئاب","ئەیلوول","تشرینی یەكەم","تشرینی دووەم","كانونی یەکەم",];return e.defineLocale("ku",{months:r,monthsShort:r,weekdays:"یه‌كشه‌ممه‌_دووشه‌ممه‌_سێشه‌ممه‌_چوارشه‌ممه‌_پێنجشه‌ممه‌_هه‌ینی_شه‌ممه‌".split("_"),weekdaysShort:"یه‌كشه‌م_دووشه‌م_سێشه‌م_چوارشه‌م_پێنجشه‌م_هه‌ینی_شه‌ممه‌".split("_"),weekdaysMin:"ی_د_س_چ_پ_ه_ش".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},meridiemParse:/ئێواره‌|به‌یانی/,isPM:function(e){return/ئێواره‌/.test(e)},meridiem:function(e,t,n){return e<12?"به‌یانی":"ئێواره‌"},calendar:{sameDay:"[ئه‌مرۆ كاتژمێر] LT",nextDay:"[به‌یانی كاتژمێر] LT",nextWeek:"dddd [كاتژمێر] LT",lastDay:"[دوێنێ كاتژمێر] LT",lastWeek:"dddd [كاتژمێر] LT",sameElse:"L"},relativeTime:{future:"له‌ %s",past:"%s",s:"چه‌ند چركه‌یه‌ك",ss:"چركه‌ %d",m:"یه‌ك خوله‌ك",mm:"%d خوله‌ك",h:"یه‌ك كاتژمێر",hh:"%d كاتژمێر",d:"یه‌ك ڕۆژ",dd:"%d ڕۆژ",M:"یه‌ك مانگ",MM:"%d مانگ",y:"یه‌ك ساڵ",yy:"%d ساڵ"},preparse:function(e){return e.replace(/[١٢٣٤٥٦٧٨٩٠]/g,function(e){return n[e]}).replace(/،/g,",")},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]}).replace(/,/g,"،")},week:{dow:6,doy:12}})})(n(30381))},33291:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={0:"-чү",1:"-чи",2:"-чи",3:"-чү",4:"-чү",5:"-чи",6:"-чы",7:"-чи",8:"-чи",9:"-чу",10:"-чу",20:"-чы",30:"-чу",40:"-чы",50:"-чү",60:"-чы",70:"-чи",80:"-чи",90:"-чу",100:"-чү"};return e.defineLocale("ky",{months:"январь_февраль_март_апрель_май_июнь_июль_август_сентябрь_октябрь_ноябрь_декабрь".split("_"),monthsShort:"янв_фев_март_апр_май_июнь_июль_авг_сен_окт_ноя_дек".split("_"),weekdays:"Жекшемби_Дүйшөмбү_Шейшемби_Шаршемби_Бейшемби_Жума_Ишемби".split("_"),weekdaysShort:"Жек_Дүй_Шей_Шар_Бей_Жум_Ише".split("_"),weekdaysMin:"Жк_Дй_Шй_Шр_Бй_Жм_Иш".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Бүгүн саат] LT",nextDay:"[Эртең саат] LT",nextWeek:"dddd [саат] LT",lastDay:"[Кечээ саат] LT",lastWeek:"[Өткөн аптанын] dddd [күнү] [саат] LT",sameElse:"L"},relativeTime:{future:"%s ичинде",past:"%s мурун",s:"бирнече секунд",ss:"%d секунд",m:"бир мүнөт",mm:"%d мүнөт",h:"бир саат",hh:"%d саат",d:"бир күн",dd:"%d күн",M:"бир ай",MM:"%d ай",y:"бир жыл",yy:"%d жыл"},dayOfMonthOrdinalParse:/\d{1,2}-(чи|чы|чү|чу)/,ordinal:function(e){var n=e%10,r=e>=100?100:null;return e+(t[e]||t[n]||t[r])},week:{dow:1,doy:7}})})(n(30381))},36841:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i={m:["eng Minutt","enger Minutt"],h:["eng Stonn","enger Stonn"],d:["een Dag","engem Dag"],M:["ee Mount","engem Mount"],y:["ee Joer","engem Joer"]};return t?i[n][0]:i[n][1]}function n(e){return i(e.substr(0,e.indexOf(" ")))?"a "+e:"an "+e}function r(e){return i(e.substr(0,e.indexOf(" ")))?"viru "+e:"virun "+e}function i(e){if(e=parseInt(e,10),isNaN(e))return!1;if(e<0)return!0;if(e<10)return!!(4<=e)&&!!(e<=7);if(e<100){var t=e%10,n=e/10;return 0===t?i(n):i(t)}if(!(e<1e4))return i(e/=1e3);for(;e>=10;)e/=10;return i(e)}return e.defineLocale("lb",{months:"Januar_Februar_M\xe4erz_Abr\xebll_Mee_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Febr._Mrz._Abr._Mee_Jun._Jul._Aug._Sept._Okt._Nov._Dez.".split("_"),monthsParseExact:!0,weekdays:"Sonndeg_M\xe9indeg_D\xebnschdeg_M\xebttwoch_Donneschdeg_Freideg_Samschdeg".split("_"),weekdaysShort:"So._M\xe9._D\xeb._M\xeb._Do._Fr._Sa.".split("_"),weekdaysMin:"So_M\xe9_D\xeb_M\xeb_Do_Fr_Sa".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm [Auer]",LTS:"H:mm:ss [Auer]",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm [Auer]",LLLL:"dddd, D. MMMM YYYY H:mm [Auer]"},calendar:{sameDay:"[Haut um] LT",sameElse:"L",nextDay:"[Muer um] LT",nextWeek:"dddd [um] LT",lastDay:"[G\xebschter um] LT",lastWeek:function(){switch(this.day()){case 2:case 4:return"[Leschten] dddd [um] LT";default:return"[Leschte] dddd [um] LT"}}},relativeTime:{future:n,past:r,s:"e puer Sekonnen",ss:"%d Sekonnen",m:t,mm:"%d Minutten",h:t,hh:"%d Stonnen",d:t,dd:"%d Deeg",M:t,MM:"%d M\xe9int",y:t,yy:"%d Joer"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},55466:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("lo",{months:"ມັງກອນ_ກຸມພາ_ມີນາ_ເມສາ_ພຶດສະພາ_ມິຖຸນາ_ກໍລະກົດ_ສິງຫາ_ກັນຍາ_ຕຸລາ_ພະຈິກ_ທັນວາ".split("_"),monthsShort:"ມັງກອນ_ກຸມພາ_ມີນາ_ເມສາ_ພຶດສະພາ_ມິຖຸນາ_ກໍລະກົດ_ສິງຫາ_ກັນຍາ_ຕຸລາ_ພະຈິກ_ທັນວາ".split("_"),weekdays:"ອາທິດ_ຈັນ_ອັງຄານ_ພຸດ_ພະຫັດ_ສຸກ_ເສົາ".split("_"),weekdaysShort:"ທິດ_ຈັນ_ອັງຄານ_ພຸດ_ພະຫັດ_ສຸກ_ເສົາ".split("_"),weekdaysMin:"ທ_ຈ_ອຄ_ພ_ພຫ_ສກ_ສ".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"ວັນdddd D MMMM YYYY HH:mm"},meridiemParse:/ຕອນເຊົ້າ|ຕອນແລງ/,isPM:function(e){return"ຕອນແລງ"===e},meridiem:function(e,t,n){return e<12?"ຕອນເຊົ້າ":"ຕອນແລງ"},calendar:{sameDay:"[ມື້ນີ້ເວລາ] LT",nextDay:"[ມື້ອື່ນເວລາ] LT",nextWeek:"[ວັນ]dddd[ໜ້າເວລາ] LT",lastDay:"[ມື້ວານນີ້ເວລາ] LT",lastWeek:"[ວັນ]dddd[ແລ້ວນີ້ເວລາ] LT",sameElse:"L"},relativeTime:{future:"ອີກ %s",past:"%sຜ່ານມາ",s:"ບໍ່ເທົ່າໃດວິນາທີ",ss:"%d ວິນາທີ",m:"1 ນາທີ",mm:"%d ນາທີ",h:"1 ຊົ່ວໂມງ",hh:"%d ຊົ່ວໂມງ",d:"1 ມື້",dd:"%d ມື້",M:"1 ເດືອນ",MM:"%d ເດືອນ",y:"1 ປີ",yy:"%d ປີ"},dayOfMonthOrdinalParse:/(ທີ່)\d{1,2}/,ordinal:function(e){return"ທີ່"+e}})})(n(30381))},57010:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={ss:"sekundė_sekundžių_sekundes",m:"minutė_minutės_minutę",mm:"minutės_minučių_minutes",h:"valanda_valandos_valandą",hh:"valandos_valandų_valandas",d:"diena_dienos_dieną",dd:"dienos_dienų_dienas",M:"mėnuo_mėnesio_mėnesį",MM:"mėnesiai_mėnesių_mėnesius",y:"metai_metų_metus",yy:"metai_metų_metus"};function n(e,t,n,r){return t?"kelios sekundės":r?"kelių sekundžių":"kelias sekundes"}function r(e,t,n,r){return t?a(n)[0]:r?a(n)[1]:a(n)[2]}function i(e){return e%10==0||e>10&&e<20}function a(e){return t[e].split("_")}function o(e,t,n,o){var s=e+" ";return 1===e?s+r(e,t,n[0],o):t?s+(i(e)?a(n)[1]:a(n)[0]):o?s+a(n)[1]:s+(i(e)?a(n)[1]:a(n)[2])}return e.defineLocale("lt",{months:{format:"sausio_vasario_kovo_balandžio_gegužės_birželio_liepos_rugpjūčio_rugsėjo_spalio_lapkričio_gruodžio".split("_"),standalone:"sausis_vasaris_kovas_balandis_gegužė_birželis_liepa_rugpjūtis_rugsėjis_spalis_lapkritis_gruodis".split("_"),isFormat:/D[oD]?(\[[^\[\]]*\]|\s)+MMMM?|MMMM?(\[[^\[\]]*\]|\s)+D[oD]?/},monthsShort:"sau_vas_kov_bal_geg_bir_lie_rgp_rgs_spa_lap_grd".split("_"),weekdays:{format:"sekmadienį_pirmadienį_antradienį_trečiadienį_ketvirtadienį_penktadienį_šeštadienį".split("_"),standalone:"sekmadienis_pirmadienis_antradienis_trečiadienis_ketvirtadienis_penktadienis_šeštadienis".split("_"),isFormat:/dddd HH:mm/},weekdaysShort:"Sek_Pir_Ant_Tre_Ket_Pen_Šeš".split("_"),weekdaysMin:"S_P_A_T_K_Pn_Š".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY [m.] MMMM D [d.]",LLL:"YYYY [m.] MMMM D [d.], HH:mm [val.]",LLLL:"YYYY [m.] MMMM D [d.], dddd, HH:mm [val.]",l:"YYYY-MM-DD",ll:"YYYY [m.] MMMM D [d.]",lll:"YYYY [m.] MMMM D [d.], HH:mm [val.]",llll:"YYYY [m.] MMMM D [d.], ddd, HH:mm [val.]"},calendar:{sameDay:"[Šiandien] LT",nextDay:"[Rytoj] LT",nextWeek:"dddd LT",lastDay:"[Vakar] LT",lastWeek:"[Praėjusį] dddd LT",sameElse:"L"},relativeTime:{future:"po %s",past:"prieš %s",s:n,ss:o,m:r,mm:o,h:r,hh:o,d:r,dd:o,M:r,MM:o,y:r,yy:o},dayOfMonthOrdinalParse:/\d{1,2}-oji/,ordinal:function(e){return e+"-oji"},week:{dow:1,doy:4}})})(n(30381))},37595:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={ss:"sekundes_sekundēm_sekunde_sekundes".split("_"),m:"minūtes_minūtēm_minūte_minūtes".split("_"),mm:"minūtes_minūtēm_minūte_minūtes".split("_"),h:"stundas_stundām_stunda_stundas".split("_"),hh:"stundas_stundām_stunda_stundas".split("_"),d:"dienas_dienām_diena_dienas".split("_"),dd:"dienas_dienām_diena_dienas".split("_"),M:"mēneša_mēnešiem_mēnesis_mēneši".split("_"),MM:"mēneša_mēnešiem_mēnesis_mēneši".split("_"),y:"gada_gadiem_gads_gadi".split("_"),yy:"gada_gadiem_gads_gadi".split("_")};function n(e,t,n){return n?t%10==1&&t%100!=11?e[2]:e[3]:t%10==1&&t%100!=11?e[0]:e[1]}function r(e,r,i){return e+" "+n(t[i],e,r)}function i(e,r,i){return n(t[i],e,r)}function a(e,t){return t?"dažas sekundes":"dažām sekundēm"}return e.defineLocale("lv",{months:"janvāris_februāris_marts_aprīlis_maijs_jūnijs_jūlijs_augusts_septembris_oktobris_novembris_decembris".split("_"),monthsShort:"jan_feb_mar_apr_mai_jūn_jūl_aug_sep_okt_nov_dec".split("_"),weekdays:"svētdiena_pirmdiena_otrdiena_trešdiena_ceturtdiena_piektdiena_sestdiena".split("_"),weekdaysShort:"Sv_P_O_T_C_Pk_S".split("_"),weekdaysMin:"Sv_P_O_T_C_Pk_S".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY.",LL:"YYYY. [gada] D. MMMM",LLL:"YYYY. [gada] D. MMMM, HH:mm",LLLL:"YYYY. [gada] D. MMMM, dddd, HH:mm"},calendar:{sameDay:"[Šodien pulksten] LT",nextDay:"[Rīt pulksten] LT",nextWeek:"dddd [pulksten] LT",lastDay:"[Vakar pulksten] LT",lastWeek:"[Pagājušā] dddd [pulksten] LT",sameElse:"L"},relativeTime:{future:"pēc %s",past:"pirms %s",s:a,ss:r,m:i,mm:r,h:i,hh:r,d:i,dd:r,M:i,MM:r,y:i,yy:r},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},39861:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={words:{ss:["sekund","sekunda","sekundi"],m:["jedan minut","jednog minuta"],mm:["minut","minuta","minuta"],h:["jedan sat","jednog sata"],hh:["sat","sata","sati"],dd:["dan","dana","dana"],MM:["mjesec","mjeseca","mjeseci"],yy:["godina","godine","godina"]},correctGrammaticalCase:function(e,t){return 1===e?t[0]:e>=2&&e<=4?t[1]:t[2]},translate:function(e,n,r){var i=t.words[r];return 1===r.length?n?i[0]:i[1]:e+" "+t.correctGrammaticalCase(e,i)}};return e.defineLocale("me",{months:"januar_februar_mart_april_maj_jun_jul_avgust_septembar_oktobar_novembar_decembar".split("_"),monthsShort:"jan._feb._mar._apr._maj_jun_jul_avg._sep._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sjutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[juče u] LT",lastWeek:function(){return["[prošle] [nedjelje] [u] LT","[prošlog] [ponedjeljka] [u] LT","[prošlog] [utorka] [u] LT","[prošle] [srijede] [u] LT","[prošlog] [četvrtka] [u] LT","[prošlog] [petka] [u] LT","[prošle] [subote] [u] LT",][this.day()]},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"nekoliko sekundi",ss:t.translate,m:t.translate,mm:t.translate,h:t.translate,hh:t.translate,d:"dan",dd:t.translate,M:"mjesec",MM:t.translate,y:"godinu",yy:t.translate},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},35493:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("mi",{months:"Kohi-tāte_Hui-tanguru_Poutū-te-rangi_Paenga-whāwhā_Haratua_Pipiri_Hōngoingoi_Here-turi-kōkā_Mahuru_Whiringa-ā-nuku_Whiringa-ā-rangi_Hakihea".split("_"),monthsShort:"Kohi_Hui_Pou_Pae_Hara_Pipi_Hōngoi_Here_Mahu_Whi-nu_Whi-ra_Haki".split("_"),monthsRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsStrictRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsShortRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,3}/i,monthsShortStrictRegex:/(?:['a-z\u0101\u014D\u016B]+\-?){1,2}/i,weekdays:"Rātapu_Mane_Tūrei_Wenerei_Tāite_Paraire_Hātarei".split("_"),weekdaysShort:"Ta_Ma_Tū_We_Tāi_Pa_Hā".split("_"),weekdaysMin:"Ta_Ma_Tū_We_Tāi_Pa_Hā".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [i] HH:mm",LLLL:"dddd, D MMMM YYYY [i] HH:mm"},calendar:{sameDay:"[i teie mahana, i] LT",nextDay:"[apopo i] LT",nextWeek:"dddd [i] LT",lastDay:"[inanahi i] LT",lastWeek:"dddd [whakamutunga i] LT",sameElse:"L"},relativeTime:{future:"i roto i %s",past:"%s i mua",s:"te hēkona ruarua",ss:"%d hēkona",m:"he meneti",mm:"%d meneti",h:"te haora",hh:"%d haora",d:"he ra",dd:"%d ra",M:"he marama",MM:"%d marama",y:"he tau",yy:"%d tau"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},95966:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("mk",{months:"јануари_февруари_март_април_мај_јуни_јули_август_септември_октомври_ноември_декември".split("_"),monthsShort:"јан_фев_мар_апр_мај_јун_јул_авг_сеп_окт_ное_дек".split("_"),weekdays:"недела_понеделник_вторник_среда_четврток_петок_сабота".split("_"),weekdaysShort:"нед_пон_вто_сре_чет_пет_саб".split("_"),weekdaysMin:"нe_пo_вт_ср_че_пе_сa".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[Денес во] LT",nextDay:"[Утре во] LT",nextWeek:"[Во] dddd [во] LT",lastDay:"[Вчера во] LT",lastWeek:function(){switch(this.day()){case 0:case 3:case 6:return"[Изминатата] dddd [во] LT";case 1:case 2:case 4:case 5:return"[Изминатиот] dddd [во] LT"}},sameElse:"L"},relativeTime:{future:"за %s",past:"пред %s",s:"неколку секунди",ss:"%d секунди",m:"една минута",mm:"%d минути",h:"еден час",hh:"%d часа",d:"еден ден",dd:"%d дена",M:"еден месец",MM:"%d месеци",y:"една година",yy:"%d години"},dayOfMonthOrdinalParse:/\d{1,2}-(ев|ен|ти|ви|ри|ми)/,ordinal:function(e){var t=e%10,n=e%100;if(0===e)return e+"-ев";if(0===n)return e+"-ен";if(n>10&&n<20)return e+"-ти";if(1===t)return e+"-ви";if(2===t)return e+"-ри";else if(7===t||8===t)return e+"-ми";else return e+"-ти"},week:{dow:1,doy:7}})})(n(30381))},87341:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ml",{months:"ജനുവരി_ഫെബ്രുവരി_മാർച്ച്_ഏപ്രിൽ_മേയ്_ജൂൺ_ജൂലൈ_ഓഗസ്റ്റ്_സെപ്റ്റംബർ_ഒക്ടോബർ_നവംബർ_ഡിസംബർ".split("_"),monthsShort:"ജനു._ഫെബ്രു._മാർ._ഏപ്രി._മേയ്_ജൂൺ_ജൂലൈ._ഓഗ._സെപ്റ്റ._ഒക്ടോ._നവം._ഡിസം.".split("_"),monthsParseExact:!0,weekdays:"ഞായറാഴ്ച_തിങ്കളാഴ്ച_ചൊവ്വാഴ്ച_ബുധനാഴ്ച_വ്യാഴാഴ്ച_വെള്ളിയാഴ്ച_ശനിയാഴ്ച".split("_"),weekdaysShort:"ഞായർ_തിങ്കൾ_ചൊവ്വ_ബുധൻ_വ്യാഴം_വെള്ളി_ശനി".split("_"),weekdaysMin:"ഞാ_തി_ചൊ_ബു_വ്യാ_വെ_ശ".split("_"),longDateFormat:{LT:"A h:mm -നു",LTS:"A h:mm:ss -നു",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm -നു",LLLL:"dddd, D MMMM YYYY, A h:mm -നു"},calendar:{sameDay:"[ഇന്ന്] LT",nextDay:"[നാളെ] LT",nextWeek:"dddd, LT",lastDay:"[ഇന്നലെ] LT",lastWeek:"[കഴിഞ്ഞ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s കഴിഞ്ഞ്",past:"%s മുൻപ്",s:"അൽപ നിമിഷങ്ങൾ",ss:"%d സെക്കൻഡ്",m:"ഒരു മിനിറ്റ്",mm:"%d മിനിറ്റ്",h:"ഒരു മണിക്കൂർ",hh:"%d മണിക്കൂർ",d:"ഒരു ദിവസം",dd:"%d ദിവസം",M:"ഒരു മാസം",MM:"%d മാസം",y:"ഒരു വർഷം",yy:"%d വർഷം"},meridiemParse:/രാത്രി|രാവിലെ|ഉച്ച കഴിഞ്ഞ്|വൈകുന്നേരം|രാത്രി/i,meridiemHour:function(e,t){return(12===e&&(e=0),"രാത്രി"===t&&e>=4||"ഉച്ച കഴിഞ്ഞ്"===t||"വൈകുന്നേരം"===t)?e+12:e},meridiem:function(e,t,n){return e<4?"രാത്രി":e<12?"രാവിലെ":e<17?"ഉച്ച കഴിഞ്ഞ്":e<20?"വൈകുന്നേരം":"രാത്രി"}})})(n(30381))},5115:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){switch(n){case"s":return t?"хэдхэн секунд":"хэдхэн секундын";case"ss":return e+(t?" секунд":" секундын");case"m":case"mm":return e+(t?" минут":" минутын");case"h":case"hh":return e+(t?" цаг":" цагийн");case"d":case"dd":return e+(t?" өдөр":" өдрийн");case"M":case"MM":return e+(t?" сар":" сарын");case"y":case"yy":return e+(t?" жил":" жилийн");default:return e}}return e.defineLocale("mn",{months:"Нэгдүгээр сар_Хоёрдугаар сар_Гуравдугаар сар_Дөрөвдүгээр сар_Тавдугаар сар_Зургадугаар сар_Долдугаар сар_Наймдугаар сар_Есдүгээр сар_Аравдугаар сар_Арван нэгдүгээр сар_Арван хоёрдугаар сар".split("_"),monthsShort:"1 сар_2 сар_3 сар_4 сар_5 сар_6 сар_7 сар_8 сар_9 сар_10 сар_11 сар_12 сар".split("_"),monthsParseExact:!0,weekdays:"Ням_Даваа_Мягмар_Лхагва_Пүрэв_Баасан_Бямба".split("_"),weekdaysShort:"Ням_Дав_Мяг_Лха_Пүр_Баа_Бям".split("_"),weekdaysMin:"Ня_Да_Мя_Лх_Пү_Ба_Бя".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY оны MMMMын D",LLL:"YYYY оны MMMMын D HH:mm",LLLL:"dddd, YYYY оны MMMMын D HH:mm"},meridiemParse:/ҮӨ|ҮХ/i,isPM:function(e){return"ҮХ"===e},meridiem:function(e,t,n){return e<12?"ҮӨ":"ҮХ"},calendar:{sameDay:"[Өнөөдөр] LT",nextDay:"[Маргааш] LT",nextWeek:"[Ирэх] dddd LT",lastDay:"[Өчигдөр] LT",lastWeek:"[Өнгөрсөн] dddd LT",sameElse:"L"},relativeTime:{future:"%s дараа",past:"%s өмнө",s:t,ss:t,m:t,mm:t,h:t,hh:t,d:t,dd:t,M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2} өдөр/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+" өдөр";default:return e}}})})(n(30381))},10370:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"},n={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"};function r(e,t,n,r){var i="";if(t)switch(n){case"s":i="काही सेकंद";break;case"ss":i="%d सेकंद";break;case"m":i="एक मिनिट";break;case"mm":i="%d मिनिटे";break;case"h":i="एक तास";break;case"hh":i="%d तास";break;case"d":i="एक दिवस";break;case"dd":i="%d दिवस";break;case"M":i="एक महिना";break;case"MM":i="%d महिने";break;case"y":i="एक वर्ष";break;case"yy":i="%d वर्षे"}else switch(n){case"s":i="काही सेकंदां";break;case"ss":i="%d सेकंदां";break;case"m":i="एका मिनिटा";break;case"mm":i="%d मिनिटां";break;case"h":i="एका तासा";break;case"hh":i="%d तासां";break;case"d":i="एका दिवसा";break;case"dd":i="%d दिवसां";break;case"M":i="एका महिन्या";break;case"MM":i="%d महिन्यां";break;case"y":i="एका वर्षा";break;case"yy":i="%d वर्षां"}return i.replace(/%d/i,e)}return e.defineLocale("mr",{months:"जानेवारी_फेब्रुवारी_मार्च_एप्रिल_मे_जून_जुलै_ऑगस्ट_सप्टेंबर_ऑक्टोबर_नोव्हेंबर_डिसेंबर".split("_"),monthsShort:"जाने._फेब्रु._मार्च._एप्रि._मे._जून._जुलै._ऑग._सप्टें._ऑक्टो._नोव्हें._डिसें.".split("_"),monthsParseExact:!0,weekdays:"रविवार_सोमवार_मंगळवार_बुधवार_गुरूवार_शुक्रवार_शनिवार".split("_"),weekdaysShort:"रवि_सोम_मंगळ_बुध_गुरू_शुक्र_शनि".split("_"),weekdaysMin:"र_सो_मं_बु_गु_शु_श".split("_"),longDateFormat:{LT:"A h:mm वाजता",LTS:"A h:mm:ss वाजता",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm वाजता",LLLL:"dddd, D MMMM YYYY, A h:mm वाजता"},calendar:{sameDay:"[आज] LT",nextDay:"[उद्या] LT",nextWeek:"dddd, LT",lastDay:"[काल] LT",lastWeek:"[मागील] dddd, LT",sameElse:"L"},relativeTime:{future:"%sमध्ये",past:"%sपूर्वी",s:r,ss:r,m:r,mm:r,h:r,hh:r,d:r,dd:r,M:r,MM:r,y:r,yy:r},preparse:function(e){return e.replace(/[१२३४५६७८९०]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/पहाटे|सकाळी|दुपारी|सायंकाळी|रात्री/,meridiemHour:function(e,t){return(12===e&&(e=0),"पहाटे"===t||"सकाळी"===t)?e:"दुपारी"===t||"सायंकाळी"===t||"रात्री"===t?e>=12?e:e+12:void 0},meridiem:function(e,t,n){return e>=0&&e<6?"पहाटे":e<12?"सकाळी":e<17?"दुपारी":e<20?"सायंकाळी":"रात्री"},week:{dow:0,doy:6}})})(n(30381))},41237:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ms-my",{months:"Januari_Februari_Mac_April_Mei_Jun_Julai_Ogos_September_Oktober_November_Disember".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ogs_Sep_Okt_Nov_Dis".split("_"),weekdays:"Ahad_Isnin_Selasa_Rabu_Khamis_Jumaat_Sabtu".split("_"),weekdaysShort:"Ahd_Isn_Sel_Rab_Kha_Jum_Sab".split("_"),weekdaysMin:"Ah_Is_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|tengahari|petang|malam/,meridiemHour:function(e,t){return(12===e&&(e=0),"pagi"===t)?e:"tengahari"===t?e>=11?e:e+12:"petang"===t||"malam"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"pagi":e<15?"tengahari":e<19?"petang":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Esok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kelmarin pukul] LT",lastWeek:"dddd [lepas pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lepas",s:"beberapa saat",ss:"%d saat",m:"seminit",mm:"%d minit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}})})(n(30381))},9847:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ms",{months:"Januari_Februari_Mac_April_Mei_Jun_Julai_Ogos_September_Oktober_November_Disember".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ogs_Sep_Okt_Nov_Dis".split("_"),weekdays:"Ahad_Isnin_Selasa_Rabu_Khamis_Jumaat_Sabtu".split("_"),weekdaysShort:"Ahd_Isn_Sel_Rab_Kha_Jum_Sab".split("_"),weekdaysMin:"Ah_Is_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|tengahari|petang|malam/,meridiemHour:function(e,t){return(12===e&&(e=0),"pagi"===t)?e:"tengahari"===t?e>=11?e:e+12:"petang"===t||"malam"===t?e+12:void 0},meridiem:function(e,t,n){return e<11?"pagi":e<15?"tengahari":e<19?"petang":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Esok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kelmarin pukul] LT",lastWeek:"dddd [lepas pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lepas",s:"beberapa saat",ss:"%d saat",m:"seminit",mm:"%d minit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}})})(n(30381))},72126:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("mt",{months:"Jannar_Frar_Marzu_April_Mejju_Ġunju_Lulju_Awwissu_Settembru_Ottubru_Novembru_Diċembru".split("_"),monthsShort:"Jan_Fra_Mar_Apr_Mej_Ġun_Lul_Aww_Set_Ott_Nov_Diċ".split("_"),weekdays:"Il-Ħadd_It-Tnejn_It-Tlieta_L-Erbgħa_Il-Ħamis_Il-Ġimgħa_Is-Sibt".split("_"),weekdaysShort:"Ħad_Tne_Tli_Erb_Ħam_Ġim_Sib".split("_"),weekdaysMin:"Ħa_Tn_Tl_Er_Ħa_Ġi_Si".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Illum fil-]LT",nextDay:"[Għada fil-]LT",nextWeek:"dddd [fil-]LT",lastDay:"[Il-bieraħ fil-]LT",lastWeek:"dddd [li għadda] [fil-]LT",sameElse:"L"},relativeTime:{future:"f’ %s",past:"%s ilu",s:"ftit sekondi",ss:"%d sekondi",m:"minuta",mm:"%d minuti",h:"siegħa",hh:"%d siegħat",d:"ġurnata",dd:"%d ġranet",M:"xahar",MM:"%d xhur",y:"sena",yy:"%d sni"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},56165:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"၁",2:"၂",3:"၃",4:"၄",5:"၅",6:"၆",7:"၇",8:"၈",9:"၉",0:"၀"},n={"၁":"1","၂":"2","၃":"3","၄":"4","၅":"5","၆":"6","၇":"7","၈":"8","၉":"9","၀":"0"};return e.defineLocale("my",{months:"ဇန်နဝါရီ_ဖေဖော်ဝါရီ_မတ်_ဧပြီ_မေ_ဇွန်_ဇူလိုင်_သြဂုတ်_စက်တင်ဘာ_အောက်တိုဘာ_နိုဝင်ဘာ_ဒီဇင်ဘာ".split("_"),monthsShort:"ဇန်_ဖေ_မတ်_ပြီ_မေ_ဇွန်_လိုင်_သြ_စက်_အောက်_နို_ဒီ".split("_"),weekdays:"တနင်္ဂနွေ_တနင်္လာ_အင်္ဂါ_ဗုဒ္ဓဟူး_ကြာသပတေး_သောကြာ_စနေ".split("_"),weekdaysShort:"နွေ_လာ_ဂါ_ဟူး_ကြာ_သော_နေ".split("_"),weekdaysMin:"နွေ_လာ_ဂါ_ဟူး_ကြာ_သော_နေ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[ယနေ.] LT [မှာ]",nextDay:"[မနက်ဖြန်] LT [မှာ]",nextWeek:"dddd LT [မှာ]",lastDay:"[မနေ.က] LT [မှာ]",lastWeek:"[ပြီးခဲ့သော] dddd LT [မှာ]",sameElse:"L"},relativeTime:{future:"လာမည့် %s မှာ",past:"လွန်ခဲ့သော %s က",s:"စက္ကန်.အနည်းငယ်",ss:"%d စက္ကန့်",m:"တစ်မိနစ်",mm:"%d မိနစ်",h:"တစ်နာရီ",hh:"%d နာရီ",d:"တစ်ရက်",dd:"%d ရက်",M:"တစ်လ",MM:"%d လ",y:"တစ်နှစ်",yy:"%d နှစ်"},preparse:function(e){return e.replace(/[၁၂၃၄၅၆၇၈၉၀]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},week:{dow:1,doy:4}})})(n(30381))},64924:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("nb",{months:"januar_februar_mars_april_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan._feb._mars_apr._mai_juni_juli_aug._sep._okt._nov._des.".split("_"),monthsParseExact:!0,weekdays:"s\xf8ndag_mandag_tirsdag_onsdag_torsdag_fredag_l\xf8rdag".split("_"),weekdaysShort:"s\xf8._ma._ti._on._to._fr._l\xf8.".split("_"),weekdaysMin:"s\xf8_ma_ti_on_to_fr_l\xf8".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] HH:mm",LLLL:"dddd D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[i dag kl.] LT",nextDay:"[i morgen kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[i g\xe5r kl.] LT",lastWeek:"[forrige] dddd [kl.] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"%s siden",s:"noen sekunder",ss:"%d sekunder",m:"ett minutt",mm:"%d minutter",h:"en time",hh:"%d timer",d:"en dag",dd:"%d dager",w:"en uke",ww:"%d uker",M:"en m\xe5ned",MM:"%d m\xe5neder",y:"ett \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},16744:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"},n={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"};return e.defineLocale("ne",{months:"जनवरी_फेब्रुवरी_मार्च_अप्रिल_मई_जुन_जुलाई_अगष्ट_सेप्टेम्बर_अक्टोबर_नोभेम्बर_डिसेम्बर".split("_"),monthsShort:"जन._फेब्रु._मार्च_अप्रि._मई_जुन_जुलाई._अग._सेप्ट._अक्टो._नोभे._डिसे.".split("_"),monthsParseExact:!0,weekdays:"आइतबार_सोमबार_मङ्गलबार_बुधबार_बिहिबार_शुक्रबार_शनिबार".split("_"),weekdaysShort:"आइत._सोम._मङ्गल._बुध._बिहि._शुक्र._शनि.".split("_"),weekdaysMin:"आ._सो._मं._बु._बि._शु._श.".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"Aको h:mm बजे",LTS:"Aको h:mm:ss बजे",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, Aको h:mm बजे",LLLL:"dddd, D MMMM YYYY, Aको h:mm बजे"},preparse:function(e){return e.replace(/[१२३४५६७८९०]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/राति|बिहान|दिउँसो|साँझ/,meridiemHour:function(e,t){return(12===e&&(e=0),"राति"===t)?e<4?e:e+12:"बिहान"===t?e:"दिउँसो"===t?e>=10?e:e+12:"साँझ"===t?e+12:void 0},meridiem:function(e,t,n){return e<3?"राति":e<12?"बिहान":e<16?"दिउँसो":e<20?"साँझ":"राति"},calendar:{sameDay:"[आज] LT",nextDay:"[भोलि] LT",nextWeek:"[आउँदो] dddd[,] LT",lastDay:"[हिजो] LT",lastWeek:"[गएको] dddd[,] LT",sameElse:"L"},relativeTime:{future:"%sमा",past:"%s अगाडि",s:"केही क्षण",ss:"%d सेकेण्ड",m:"एक मिनेट",mm:"%d मिनेट",h:"एक घण्टा",hh:"%d घण्टा",d:"एक दिन",dd:"%d दिन",M:"एक महिना",MM:"%d महिना",y:"एक बर्ष",yy:"%d बर्ष"},week:{dow:0,doy:6}})})(n(30381))},59814:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="jan._feb._mrt._apr._mei_jun._jul._aug._sep._okt._nov._dec.".split("_"),n="jan_feb_mrt_apr_mei_jun_jul_aug_sep_okt_nov_dec".split("_"),r=[/^jan/i,/^feb/i,/^maart|mrt.?$/i,/^apr/i,/^mei$/i,/^jun[i.]?$/i,/^jul[i.]?$/i,/^aug/i,/^sep/i,/^okt/i,/^nov/i,/^dec/i,],i=/^(januari|februari|maart|april|mei|ju[nl]i|augustus|september|oktober|november|december|jan\.?|feb\.?|mrt\.?|apr\.?|ju[nl]\.?|aug\.?|sep\.?|okt\.?|nov\.?|dec\.?)/i;return e.defineLocale("nl-be",{months:"januari_februari_maart_april_mei_juni_juli_augustus_september_oktober_november_december".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(januari|februari|maart|april|mei|ju[nl]i|augustus|september|oktober|november|december)/i,monthsShortStrictRegex:/^(jan\.?|feb\.?|mrt\.?|apr\.?|mei|ju[nl]\.?|aug\.?|sep\.?|okt\.?|nov\.?|dec\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"zondag_maandag_dinsdag_woensdag_donderdag_vrijdag_zaterdag".split("_"),weekdaysShort:"zo._ma._di._wo._do._vr._za.".split("_"),weekdaysMin:"zo_ma_di_wo_do_vr_za".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[vandaag om] LT",nextDay:"[morgen om] LT",nextWeek:"dddd [om] LT",lastDay:"[gisteren om] LT",lastWeek:"[afgelopen] dddd [om] LT",sameElse:"L"},relativeTime:{future:"over %s",past:"%s geleden",s:"een paar seconden",ss:"%d seconden",m:"\xe9\xe9n minuut",mm:"%d minuten",h:"\xe9\xe9n uur",hh:"%d uur",d:"\xe9\xe9n dag",dd:"%d dagen",M:"\xe9\xe9n maand",MM:"%d maanden",y:"\xe9\xe9n jaar",yy:"%d jaar"},dayOfMonthOrdinalParse:/\d{1,2}(ste|de)/,ordinal:function(e){return e+(1===e||8===e||e>=20?"ste":"de")},week:{dow:1,doy:4}})})(n(30381))},93901:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="jan._feb._mrt._apr._mei_jun._jul._aug._sep._okt._nov._dec.".split("_"),n="jan_feb_mrt_apr_mei_jun_jul_aug_sep_okt_nov_dec".split("_"),r=[/^jan/i,/^feb/i,/^maart|mrt.?$/i,/^apr/i,/^mei$/i,/^jun[i.]?$/i,/^jul[i.]?$/i,/^aug/i,/^sep/i,/^okt/i,/^nov/i,/^dec/i,],i=/^(januari|februari|maart|april|mei|ju[nl]i|augustus|september|oktober|november|december|jan\.?|feb\.?|mrt\.?|apr\.?|ju[nl]\.?|aug\.?|sep\.?|okt\.?|nov\.?|dec\.?)/i;return e.defineLocale("nl",{months:"januari_februari_maart_april_mei_juni_juli_augustus_september_oktober_november_december".split("_"),monthsShort:function(e,r){return e?/-MMM-/.test(r)?n[e.month()]:t[e.month()]:t},monthsRegex:i,monthsShortRegex:i,monthsStrictRegex:/^(januari|februari|maart|april|mei|ju[nl]i|augustus|september|oktober|november|december)/i,monthsShortStrictRegex:/^(jan\.?|feb\.?|mrt\.?|apr\.?|mei|ju[nl]\.?|aug\.?|sep\.?|okt\.?|nov\.?|dec\.?)/i,monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"zondag_maandag_dinsdag_woensdag_donderdag_vrijdag_zaterdag".split("_"),weekdaysShort:"zo._ma._di._wo._do._vr._za.".split("_"),weekdaysMin:"zo_ma_di_wo_do_vr_za".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[vandaag om] LT",nextDay:"[morgen om] LT",nextWeek:"dddd [om] LT",lastDay:"[gisteren om] LT",lastWeek:"[afgelopen] dddd [om] LT",sameElse:"L"},relativeTime:{future:"over %s",past:"%s geleden",s:"een paar seconden",ss:"%d seconden",m:"\xe9\xe9n minuut",mm:"%d minuten",h:"\xe9\xe9n uur",hh:"%d uur",d:"\xe9\xe9n dag",dd:"%d dagen",w:"\xe9\xe9n week",ww:"%d weken",M:"\xe9\xe9n maand",MM:"%d maanden",y:"\xe9\xe9n jaar",yy:"%d jaar"},dayOfMonthOrdinalParse:/\d{1,2}(ste|de)/,ordinal:function(e){return e+(1===e||8===e||e>=20?"ste":"de")},week:{dow:1,doy:4}})})(n(30381))},83877:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("nn",{months:"januar_februar_mars_april_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan._feb._mars_apr._mai_juni_juli_aug._sep._okt._nov._des.".split("_"),monthsParseExact:!0,weekdays:"sundag_m\xe5ndag_tysdag_onsdag_torsdag_fredag_laurdag".split("_"),weekdaysShort:"su._m\xe5._ty._on._to._fr._lau.".split("_"),weekdaysMin:"su_m\xe5_ty_on_to_fr_la".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] H:mm",LLLL:"dddd D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[I dag klokka] LT",nextDay:"[I morgon klokka] LT",nextWeek:"dddd [klokka] LT",lastDay:"[I g\xe5r klokka] LT",lastWeek:"[F\xf8reg\xe5ande] dddd [klokka] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"%s sidan",s:"nokre sekund",ss:"%d sekund",m:"eit minutt",mm:"%d minutt",h:"ein time",hh:"%d timar",d:"ein dag",dd:"%d dagar",w:"ei veke",ww:"%d veker",M:"ein m\xe5nad",MM:"%d m\xe5nader",y:"eit \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},92135:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("oc-lnc",{months:{standalone:"geni\xe8r_febri\xe8r_mar\xe7_abril_mai_junh_julhet_agost_setembre_oct\xf2bre_novembre_decembre".split("_"),format:"de geni\xe8r_de febri\xe8r_de mar\xe7_d'abril_de mai_de junh_de julhet_d'agost_de setembre_d'oct\xf2bre_de novembre_de decembre".split("_"),isFormat:/D[oD]?(\s)+MMMM/},monthsShort:"gen._febr._mar\xe7_abr._mai_junh_julh._ago._set._oct._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"dimenge_diluns_dimars_dim\xe8cres_dij\xf2us_divendres_dissabte".split("_"),weekdaysShort:"dg._dl._dm._dc._dj._dv._ds.".split("_"),weekdaysMin:"dg_dl_dm_dc_dj_dv_ds".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM [de] YYYY",ll:"D MMM YYYY",LLL:"D MMMM [de] YYYY [a] H:mm",lll:"D MMM YYYY, H:mm",LLLL:"dddd D MMMM [de] YYYY [a] H:mm",llll:"ddd D MMM YYYY, H:mm"},calendar:{sameDay:"[u\xe8i a] LT",nextDay:"[deman a] LT",nextWeek:"dddd [a] LT",lastDay:"[i\xe8r a] LT",lastWeek:"dddd [passat a] LT",sameElse:"L"},relativeTime:{future:"d'aqu\xed %s",past:"fa %s",s:"unas segondas",ss:"%d segondas",m:"una minuta",mm:"%d minutas",h:"una ora",hh:"%d oras",d:"un jorn",dd:"%d jorns",M:"un mes",MM:"%d meses",y:"un an",yy:"%d ans"},dayOfMonthOrdinalParse:/\d{1,2}(r|n|t|è|a)/,ordinal:function(e,t){var n=1===e?"r":2===e?"n":3===e?"r":4===e?"t":"\xe8";return("w"===t||"W"===t)&&(n="a"),e+n},week:{dow:1,doy:4}})})(n(30381))},15858:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"੧",2:"੨",3:"੩",4:"੪",5:"੫",6:"੬",7:"੭",8:"੮",9:"੯",0:"੦"},n={"੧":"1","੨":"2","੩":"3","੪":"4","੫":"5","੬":"6","੭":"7","੮":"8","੯":"9","੦":"0"};return e.defineLocale("pa-in",{months:"ਜਨਵਰੀ_ਫ਼ਰਵਰੀ_ਮਾਰਚ_ਅਪ੍ਰੈਲ_ਮਈ_ਜੂਨ_ਜੁਲਾਈ_ਅਗਸਤ_ਸਤੰਬਰ_ਅਕਤੂਬਰ_ਨਵੰਬਰ_ਦਸੰਬਰ".split("_"),monthsShort:"ਜਨਵਰੀ_ਫ਼ਰਵਰੀ_ਮਾਰਚ_ਅਪ੍ਰੈਲ_ਮਈ_ਜੂਨ_ਜੁਲਾਈ_ਅਗਸਤ_ਸਤੰਬਰ_ਅਕਤੂਬਰ_ਨਵੰਬਰ_ਦਸੰਬਰ".split("_"),weekdays:"ਐਤਵਾਰ_ਸੋਮਵਾਰ_ਮੰਗਲਵਾਰ_ਬੁਧਵਾਰ_ਵੀਰਵਾਰ_ਸ਼ੁੱਕਰਵਾਰ_ਸ਼ਨੀਚਰਵਾਰ".split("_"),weekdaysShort:"ਐਤ_ਸੋਮ_ਮੰਗਲ_ਬੁਧ_ਵੀਰ_ਸ਼ੁਕਰ_ਸ਼ਨੀ".split("_"),weekdaysMin:"ਐਤ_ਸੋਮ_ਮੰਗਲ_ਬੁਧ_ਵੀਰ_ਸ਼ੁਕਰ_ਸ਼ਨੀ".split("_"),longDateFormat:{LT:"A h:mm ਵਜੇ",LTS:"A h:mm:ss ਵਜੇ",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm ਵਜੇ",LLLL:"dddd, D MMMM YYYY, A h:mm ਵਜੇ"},calendar:{sameDay:"[ਅਜ] LT",nextDay:"[ਕਲ] LT",nextWeek:"[ਅਗਲਾ] dddd, LT",lastDay:"[ਕਲ] LT",lastWeek:"[ਪਿਛਲੇ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s ਵਿੱਚ",past:"%s ਪਿਛਲੇ",s:"ਕੁਝ ਸਕਿੰਟ",ss:"%d ਸਕਿੰਟ",m:"ਇਕ ਮਿੰਟ",mm:"%d ਮਿੰਟ",h:"ਇੱਕ ਘੰਟਾ",hh:"%d ਘੰਟੇ",d:"ਇੱਕ ਦਿਨ",dd:"%d ਦਿਨ",M:"ਇੱਕ ਮਹੀਨਾ",MM:"%d ਮਹੀਨੇ",y:"ਇੱਕ ਸਾਲ",yy:"%d ਸਾਲ"},preparse:function(e){return e.replace(/[੧੨੩੪੫੬੭੮੯੦]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/ਰਾਤ|ਸਵੇਰ|ਦੁਪਹਿਰ|ਸ਼ਾਮ/,meridiemHour:function(e,t){return(12===e&&(e=0),"ਰਾਤ"===t)?e<4?e:e+12:"ਸਵੇਰ"===t?e:"ਦੁਪਹਿਰ"===t?e>=10?e:e+12:"ਸ਼ਾਮ"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"ਰਾਤ":e<10?"ਸਵੇਰ":e<17?"ਦੁਪਹਿਰ":e<20?"ਸ਼ਾਮ":"ਰਾਤ"},week:{dow:0,doy:6}})})(n(30381))},64495:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="styczeń_luty_marzec_kwiecień_maj_czerwiec_lipiec_sierpień_wrzesień_październik_listopad_grudzień".split("_"),n="stycznia_lutego_marca_kwietnia_maja_czerwca_lipca_sierpnia_września_października_listopada_grudnia".split("_"),r=[/^sty/i,/^lut/i,/^mar/i,/^kwi/i,/^maj/i,/^cze/i,/^lip/i,/^sie/i,/^wrz/i,/^paź/i,/^lis/i,/^gru/i,];function i(e){return e%10<5&&e%10>1&&~~(e/10)%10!=1}function a(e,t,n){var r=e+" ";switch(n){case"ss":return r+(i(e)?"sekundy":"sekund");case"m":return t?"minuta":"minutę";case"mm":return r+(i(e)?"minuty":"minut");case"h":return t?"godzina":"godzinę";case"hh":return r+(i(e)?"godziny":"godzin");case"ww":return r+(i(e)?"tygodnie":"tygodni");case"MM":return r+(i(e)?"miesiące":"miesięcy");case"yy":return r+(i(e)?"lata":"lat")}}return e.defineLocale("pl",{months:function(e,r){return e?/D MMMM/.test(r)?n[e.month()]:t[e.month()]:t},monthsShort:"sty_lut_mar_kwi_maj_cze_lip_sie_wrz_paź_lis_gru".split("_"),monthsParse:r,longMonthsParse:r,shortMonthsParse:r,weekdays:"niedziela_poniedziałek_wtorek_środa_czwartek_piątek_sobota".split("_"),weekdaysShort:"ndz_pon_wt_śr_czw_pt_sob".split("_"),weekdaysMin:"Nd_Pn_Wt_Śr_Cz_Pt_So".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Dziś o] LT",nextDay:"[Jutro o] LT",nextWeek:function(){switch(this.day()){case 0:return"[W niedzielę o] LT";case 2:return"[We wtorek o] LT";case 3:return"[W środę o] LT";case 6:return"[W sobotę o] LT";default:return"[W] dddd [o] LT"}},lastDay:"[Wczoraj o] LT",lastWeek:function(){switch(this.day()){case 0:return"[W zeszłą niedzielę o] LT";case 3:return"[W zeszłą środę o] LT";case 6:return"[W zeszłą sobotę o] LT";default:return"[W zeszły] dddd [o] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"%s temu",s:"kilka sekund",ss:a,m:a,mm:a,h:a,hh:a,d:"1 dzień",dd:"%d dni",w:"tydzień",ww:a,M:"miesiąc",MM:a,y:"rok",yy:a},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},57971:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("pt-br",{months:"janeiro_fevereiro_mar\xe7o_abril_maio_junho_julho_agosto_setembro_outubro_novembro_dezembro".split("_"),monthsShort:"jan_fev_mar_abr_mai_jun_jul_ago_set_out_nov_dez".split("_"),weekdays:"domingo_segunda-feira_ter\xe7a-feira_quarta-feira_quinta-feira_sexta-feira_s\xe1bado".split("_"),weekdaysShort:"dom_seg_ter_qua_qui_sex_s\xe1b".split("_"),weekdaysMin:"do_2\xaa_3\xaa_4\xaa_5\xaa_6\xaa_s\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY [\xe0s] HH:mm",LLLL:"dddd, D [de] MMMM [de] YYYY [\xe0s] HH:mm"},calendar:{sameDay:"[Hoje \xe0s] LT",nextDay:"[Amanh\xe3 \xe0s] LT",nextWeek:"dddd [\xe0s] LT",lastDay:"[Ontem \xe0s] LT",lastWeek:function(){return 0===this.day()||6===this.day()?"[\xdaltimo] dddd [\xe0s] LT":"[\xdaltima] dddd [\xe0s] LT"},sameElse:"L"},relativeTime:{future:"em %s",past:"h\xe1 %s",s:"poucos segundos",ss:"%d segundos",m:"um minuto",mm:"%d minutos",h:"uma hora",hh:"%d horas",d:"um dia",dd:"%d dias",M:"um m\xeas",MM:"%d meses",y:"um ano",yy:"%d anos"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",invalidDate:"Data inv\xe1lida"})})(n(30381))},89520:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("pt",{months:"janeiro_fevereiro_mar\xe7o_abril_maio_junho_julho_agosto_setembro_outubro_novembro_dezembro".split("_"),monthsShort:"jan_fev_mar_abr_mai_jun_jul_ago_set_out_nov_dez".split("_"),weekdays:"Domingo_Segunda-feira_Ter\xe7a-feira_Quarta-feira_Quinta-feira_Sexta-feira_S\xe1bado".split("_"),weekdaysShort:"Dom_Seg_Ter_Qua_Qui_Sex_S\xe1b".split("_"),weekdaysMin:"Do_2\xaa_3\xaa_4\xaa_5\xaa_6\xaa_S\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY HH:mm",LLLL:"dddd, D [de] MMMM [de] YYYY HH:mm"},calendar:{sameDay:"[Hoje \xe0s] LT",nextDay:"[Amanh\xe3 \xe0s] LT",nextWeek:"dddd [\xe0s] LT",lastDay:"[Ontem \xe0s] LT",lastWeek:function(){return 0===this.day()||6===this.day()?"[\xdaltimo] dddd [\xe0s] LT":"[\xdaltima] dddd [\xe0s] LT"},sameElse:"L"},relativeTime:{future:"em %s",past:"h\xe1 %s",s:"segundos",ss:"%d segundos",m:"um minuto",mm:"%d minutos",h:"uma hora",hh:"%d horas",d:"um dia",dd:"%d dias",w:"uma semana",ww:"%d semanas",M:"um m\xeas",MM:"%d meses",y:"um ano",yy:"%d anos"},dayOfMonthOrdinalParse:/\d{1,2}º/,ordinal:"%d\xba",week:{dow:1,doy:4}})})(n(30381))},96459:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n){var r=" ";return(e%100>=20||e>=100&&e%100==0)&&(r=" de "),e+r+({ss:"secunde",mm:"minute",hh:"ore",dd:"zile",ww:"săptăm\xe2ni",MM:"luni",yy:"ani"})[n]}return e.defineLocale("ro",{months:"ianuarie_februarie_martie_aprilie_mai_iunie_iulie_august_septembrie_octombrie_noiembrie_decembrie".split("_"),monthsShort:"ian._feb._mart._apr._mai_iun._iul._aug._sept._oct._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"duminică_luni_marți_miercuri_joi_vineri_s\xe2mbătă".split("_"),weekdaysShort:"Dum_Lun_Mar_Mie_Joi_Vin_S\xe2m".split("_"),weekdaysMin:"Du_Lu_Ma_Mi_Jo_Vi_S\xe2".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[azi la] LT",nextDay:"[m\xe2ine la] LT",nextWeek:"dddd [la] LT",lastDay:"[ieri la] LT",lastWeek:"[fosta] dddd [la] LT",sameElse:"L"},relativeTime:{future:"peste %s",past:"%s \xeen urmă",s:"c\xe2teva secunde",ss:t,m:"un minut",mm:t,h:"o oră",hh:t,d:"o zi",dd:t,w:"o săptăm\xe2nă",ww:t,M:"o lună",MM:t,y:"un an",yy:t},week:{dow:1,doy:7}})})(n(30381))},21793:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t){var n=e.split("_");return t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function n(e,n,r){var i={ss:n?"секунда_секунды_секунд":"секунду_секунды_секунд",mm:n?"минута_минуты_минут":"минуту_минуты_минут",hh:"час_часа_часов",dd:"день_дня_дней",ww:"неделя_недели_недель",MM:"месяц_месяца_месяцев",yy:"год_года_лет"};return"m"===r?n?"минута":"минуту":e+" "+t(i[r],+e)}var r=[/^янв/i,/^фев/i,/^мар/i,/^апр/i,/^ма[йя]/i,/^июн/i,/^июл/i,/^авг/i,/^сен/i,/^окт/i,/^ноя/i,/^дек/i,];return e.defineLocale("ru",{months:{format:"января_февраля_марта_апреля_мая_июня_июля_августа_сентября_октября_ноября_декабря".split("_"),standalone:"январь_февраль_март_апрель_май_июнь_июль_август_сентябрь_октябрь_ноябрь_декабрь".split("_")},monthsShort:{format:"янв._февр._мар._апр._мая_июня_июля_авг._сент._окт._нояб._дек.".split("_"),standalone:"янв._февр._март_апр._май_июнь_июль_авг._сент._окт._нояб._дек.".split("_")},weekdays:{standalone:"воскресенье_понедельник_вторник_среда_четверг_пятница_суббота".split("_"),format:"воскресенье_понедельник_вторник_среду_четверг_пятницу_субботу".split("_"),isFormat:/\[ ?[Вв] ?(?:прошлую|следующую|эту)? ?] ?dddd/},weekdaysShort:"вс_пн_вт_ср_чт_пт_сб".split("_"),weekdaysMin:"вс_пн_вт_ср_чт_пт_сб".split("_"),monthsParse:r,longMonthsParse:r,shortMonthsParse:r,monthsRegex:/^(январ[ья]|янв\.?|феврал[ья]|февр?\.?|марта?|мар\.?|апрел[ья]|апр\.?|ма[йя]|июн[ья]|июн\.?|июл[ья]|июл\.?|августа?|авг\.?|сентябр[ья]|сент?\.?|октябр[ья]|окт\.?|ноябр[ья]|нояб?\.?|декабр[ья]|дек\.?)/i,monthsShortRegex:/^(январ[ья]|янв\.?|феврал[ья]|февр?\.?|марта?|мар\.?|апрел[ья]|апр\.?|ма[йя]|июн[ья]|июн\.?|июл[ья]|июл\.?|августа?|авг\.?|сентябр[ья]|сент?\.?|октябр[ья]|окт\.?|ноябр[ья]|нояб?\.?|декабр[ья]|дек\.?)/i,monthsStrictRegex:/^(январ[яь]|феврал[яь]|марта?|апрел[яь]|ма[яй]|июн[яь]|июл[яь]|августа?|сентябр[яь]|октябр[яь]|ноябр[яь]|декабр[яь])/i,monthsShortStrictRegex:/^(янв\.|февр?\.|мар[т.]|апр\.|ма[яй]|июн[ья.]|июл[ья.]|авг\.|сент?\.|окт\.|нояб?\.|дек\.)/i,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY г.",LLL:"D MMMM YYYY г., H:mm",LLLL:"dddd, D MMMM YYYY г., H:mm"},calendar:{sameDay:"[Сегодня, в] LT",nextDay:"[Завтра, в] LT",lastDay:"[Вчера, в] LT",nextWeek:function(e){if(e.week()===this.week())return 2===this.day()?"[Во] dddd, [в] LT":"[В] dddd, [в] LT";switch(this.day()){case 0:return"[В следующее] dddd, [в] LT";case 1:case 2:case 4:return"[В следующий] dddd, [в] LT";case 3:case 5:case 6:return"[В следующую] dddd, [в] LT"}},lastWeek:function(e){if(e.week()===this.week())return 2===this.day()?"[Во] dddd, [в] LT":"[В] dddd, [в] LT";switch(this.day()){case 0:return"[В прошлое] dddd, [в] LT";case 1:case 2:case 4:return"[В прошлый] dddd, [в] LT";case 3:case 5:case 6:return"[В прошлую] dddd, [в] LT"}},sameElse:"L"},relativeTime:{future:"через %s",past:"%s назад",s:"несколько секунд",ss:n,m:n,mm:n,h:"час",hh:n,d:"день",dd:n,w:"неделя",ww:n,M:"месяц",MM:n,y:"год",yy:n},meridiemParse:/ночи|утра|дня|вечера/i,isPM:function(e){return/^(дня|вечера)$/.test(e)},meridiem:function(e,t,n){return e<4?"ночи":e<12?"утра":e<17?"дня":"вечера"},dayOfMonthOrdinalParse:/\d{1,2}-(й|го|я)/,ordinal:function(e,t){switch(t){case"M":case"d":case"DDD":return e+"-й";case"D":return e+"-го";case"w":case"W":return e+"-я";default:return e}},week:{dow:1,doy:4}})})(n(30381))},40950:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=["جنوري","فيبروري","مارچ","اپريل","مئي","جون","جولاءِ","آگسٽ","سيپٽمبر","آڪٽوبر","نومبر","ڊسمبر",],n=["آچر","سومر","اڱارو","اربع","خميس","جمع","ڇنڇر"];return e.defineLocale("sd",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return e<12?"صبح":"شام"},calendar:{sameDay:"[اڄ] LT",nextDay:"[سڀاڻي] LT",nextWeek:"dddd [اڳين هفتي تي] LT",lastDay:"[ڪالهه] LT",lastWeek:"[گزريل هفتي] dddd [تي] LT",sameElse:"L"},relativeTime:{future:"%s پوء",past:"%s اڳ",s:"چند سيڪنڊ",ss:"%d سيڪنڊ",m:"هڪ منٽ",mm:"%d منٽ",h:"هڪ ڪلاڪ",hh:"%d ڪلاڪ",d:"هڪ ڏينهن",dd:"%d ڏينهن",M:"هڪ مهينو",MM:"%d مهينا",y:"هڪ سال",yy:"%d سال"},preparse:function(e){return e.replace(/،/g,",")},postformat:function(e){return e.replace(/,/g,"،")},week:{dow:1,doy:4}})})(n(30381))},10490:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("se",{months:"ođđajagem\xe1nnu_guovvam\xe1nnu_njukčam\xe1nnu_cuoŋom\xe1nnu_miessem\xe1nnu_geassem\xe1nnu_suoidnem\xe1nnu_borgem\xe1nnu_čakčam\xe1nnu_golggotm\xe1nnu_sk\xe1bmam\xe1nnu_juovlam\xe1nnu".split("_"),monthsShort:"ođđj_guov_njuk_cuo_mies_geas_suoi_borg_čakč_golg_sk\xe1b_juov".split("_"),weekdays:"sotnabeaivi_vuoss\xe1rga_maŋŋeb\xe1rga_gaskavahkku_duorastat_bearjadat_l\xe1vvardat".split("_"),weekdaysShort:"sotn_vuos_maŋ_gask_duor_bear_l\xe1v".split("_"),weekdaysMin:"s_v_m_g_d_b_L".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"MMMM D. [b.] YYYY",LLL:"MMMM D. [b.] YYYY [ti.] HH:mm",LLLL:"dddd, MMMM D. [b.] YYYY [ti.] HH:mm"},calendar:{sameDay:"[otne ti] LT",nextDay:"[ihttin ti] LT",nextWeek:"dddd [ti] LT",lastDay:"[ikte ti] LT",lastWeek:"[ovddit] dddd [ti] LT",sameElse:"L"},relativeTime:{future:"%s geažes",past:"maŋit %s",s:"moadde sekunddat",ss:"%d sekunddat",m:"okta minuhta",mm:"%d minuhtat",h:"okta diimmu",hh:"%d diimmut",d:"okta beaivi",dd:"%d beaivvit",M:"okta m\xe1nnu",MM:"%d m\xe1nut",y:"okta jahki",yy:"%d jagit"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},90124:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("si",{months:"ජනවාරි_පෙබරවාරි_මාර්තු_අප්‍රේල්_මැයි_ජූනි_ජූලි_අගෝස්තු_සැප්තැම්බර්_ඔක්තෝබර්_නොවැම්බර්_දෙසැම්බර්".split("_"),monthsShort:"ජන_පෙබ_මාර්_අප්_මැයි_ජූනි_ජූලි_අගෝ_සැප්_ඔක්_නොවැ_දෙසැ".split("_"),weekdays:"ඉරිදා_සඳුදා_අඟහරුවාදා_බදාදා_බ්‍රහස්පතින්දා_සිකුරාදා_සෙනසුරාදා".split("_"),weekdaysShort:"ඉරි_සඳු_අඟ_බදා_බ්‍රහ_සිකු_සෙන".split("_"),weekdaysMin:"ඉ_ස_අ_බ_බ්‍ර_සි_සෙ".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"a h:mm",LTS:"a h:mm:ss",L:"YYYY/MM/DD",LL:"YYYY MMMM D",LLL:"YYYY MMMM D, a h:mm",LLLL:"YYYY MMMM D [වැනි] dddd, a h:mm:ss"},calendar:{sameDay:"[අද] LT[ට]",nextDay:"[හෙට] LT[ට]",nextWeek:"dddd LT[ට]",lastDay:"[ඊයේ] LT[ට]",lastWeek:"[පසුගිය] dddd LT[ට]",sameElse:"L"},relativeTime:{future:"%sකින්",past:"%sකට පෙර",s:"තත්පර කිහිපය",ss:"තත්පර %d",m:"මිනිත්තුව",mm:"මිනිත්තු %d",h:"පැය",hh:"පැය %d",d:"දිනය",dd:"දින %d",M:"මාසය",MM:"මාස %d",y:"වසර",yy:"වසර %d"},dayOfMonthOrdinalParse:/\d{1,2} වැනි/,ordinal:function(e){return e+" වැනි"},meridiemParse:/පෙර වරු|පස් වරු|පෙ.ව|ප.ව./,isPM:function(e){return"ප.ව."===e||"පස් වරු"===e},meridiem:function(e,t,n){return e>11?n?"ප.ව.":"පස් වරු":n?"පෙ.ව.":"පෙර වරු"}})})(n(30381))},64249:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="janu\xe1r_febru\xe1r_marec_apr\xedl_m\xe1j_j\xfan_j\xfal_august_september_okt\xf3ber_november_december".split("_"),n="jan_feb_mar_apr_m\xe1j_j\xfan_j\xfal_aug_sep_okt_nov_dec".split("_");function r(e){return e>1&&e<5}function i(e,t,n,i){var a=e+" ";switch(n){case"s":return t||i?"p\xe1r sek\xfand":"p\xe1r sekundami";case"ss":if(t||i)return a+(r(e)?"sekundy":"sek\xfand");return a+"sekundami";case"m":return t?"min\xfata":i?"min\xfatu":"min\xfatou";case"mm":if(t||i)return a+(r(e)?"min\xfaty":"min\xfat");return a+"min\xfatami";case"h":return t?"hodina":i?"hodinu":"hodinou";case"hh":if(t||i)return a+(r(e)?"hodiny":"hod\xedn");return a+"hodinami";case"d":return t||i?"deň":"dňom";case"dd":if(t||i)return a+(r(e)?"dni":"dn\xed");return a+"dňami";case"M":return t||i?"mesiac":"mesiacom";case"MM":if(t||i)return a+(r(e)?"mesiace":"mesiacov");return a+"mesiacmi";case"y":return t||i?"rok":"rokom";case"yy":if(t||i)return a+(r(e)?"roky":"rokov");return a+"rokmi"}}return e.defineLocale("sk",{months:t,monthsShort:n,weekdays:"nedeľa_pondelok_utorok_streda_štvrtok_piatok_sobota".split("_"),weekdaysShort:"ne_po_ut_st_št_pi_so".split("_"),weekdaysMin:"ne_po_ut_st_št_pi_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm"},calendar:{sameDay:"[dnes o] LT",nextDay:"[zajtra o] LT",nextWeek:function(){switch(this.day()){case 0:return"[v nedeľu o] LT";case 1:case 2:return"[v] dddd [o] LT";case 3:return"[v stredu o] LT";case 4:return"[vo štvrtok o] LT";case 5:return"[v piatok o] LT";case 6:return"[v sobotu o] LT"}},lastDay:"[včera o] LT",lastWeek:function(){switch(this.day()){case 0:return"[minul\xfa nedeľu o] LT";case 1:case 2:case 4:case 5:return"[minul\xfd] dddd [o] LT";case 3:return"[minul\xfa stredu o] LT";case 6:return"[minul\xfa sobotu o] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"pred %s",s:i,ss:i,m:i,mm:i,h:i,hh:i,d:i,dd:i,M:i,MM:i,y:i,yy:i},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},14985:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t,n,r){var i=e+" ";switch(n){case"s":return t||r?"nekaj sekund":"nekaj sekundami";case"ss":return 1===e?i+=t?"sekundo":"sekundi":2===e?i+=t||r?"sekundi":"sekundah":e<5?i+=t||r?"sekunde":"sekundah":i+="sekund",i;case"m":return t?"ena minuta":"eno minuto";case"mm":return 1===e?i+=t?"minuta":"minuto":2===e?i+=t||r?"minuti":"minutama":e<5?i+=t||r?"minute":"minutami":i+=t||r?"minut":"minutami",i;case"h":return t?"ena ura":"eno uro";case"hh":return 1===e?i+=t?"ura":"uro":2===e?i+=t||r?"uri":"urama":e<5?i+=t||r?"ure":"urami":i+=t||r?"ur":"urami",i;case"d":return t||r?"en dan":"enim dnem";case"dd":return 1===e?i+=t||r?"dan":"dnem":2===e?i+=t||r?"dni":"dnevoma":i+=t||r?"dni":"dnevi",i;case"M":return t||r?"en mesec":"enim mesecem";case"MM":return 1===e?i+=t||r?"mesec":"mesecem":2===e?i+=t||r?"meseca":"mesecema":e<5?i+=t||r?"mesece":"meseci":i+=t||r?"mesecev":"meseci",i;case"y":return t||r?"eno leto":"enim letom";case"yy":return 1===e?i+=t||r?"leto":"letom":2===e?i+=t||r?"leti":"letoma":e<5?i+=t||r?"leta":"leti":i+=t||r?"let":"leti",i}}return e.defineLocale("sl",{months:"januar_februar_marec_april_maj_junij_julij_avgust_september_oktober_november_december".split("_"),monthsShort:"jan._feb._mar._apr._maj._jun._jul._avg._sep._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"nedelja_ponedeljek_torek_sreda_četrtek_petek_sobota".split("_"),weekdaysShort:"ned._pon._tor._sre._čet._pet._sob.".split("_"),weekdaysMin:"ne_po_to_sr_če_pe_so".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danes ob] LT",nextDay:"[jutri ob] LT",nextWeek:function(){switch(this.day()){case 0:return"[v] [nedeljo] [ob] LT";case 3:return"[v] [sredo] [ob] LT";case 6:return"[v] [soboto] [ob] LT";case 1:case 2:case 4:case 5:return"[v] dddd [ob] LT"}},lastDay:"[včeraj ob] LT",lastWeek:function(){switch(this.day()){case 0:return"[prejšnjo] [nedeljo] [ob] LT";case 3:return"[prejšnjo] [sredo] [ob] LT";case 6:return"[prejšnjo] [soboto] [ob] LT";case 1:case 2:case 4:case 5:return"[prejšnji] dddd [ob] LT"}},sameElse:"L"},relativeTime:{future:"čez %s",past:"pred %s",s:t,ss:t,m:t,mm:t,h:t,hh:t,d:t,dd:t,M:t,MM:t,y:t,yy:t},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},51104:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("sq",{months:"Janar_Shkurt_Mars_Prill_Maj_Qershor_Korrik_Gusht_Shtator_Tetor_N\xebntor_Dhjetor".split("_"),monthsShort:"Jan_Shk_Mar_Pri_Maj_Qer_Kor_Gus_Sht_Tet_N\xebn_Dhj".split("_"),weekdays:"E Diel_E H\xebn\xeb_E Mart\xeb_E M\xebrkur\xeb_E Enjte_E Premte_E Shtun\xeb".split("_"),weekdaysShort:"Die_H\xebn_Mar_M\xebr_Enj_Pre_Sht".split("_"),weekdaysMin:"D_H_Ma_M\xeb_E_P_Sh".split("_"),weekdaysParseExact:!0,meridiemParse:/PD|MD/,isPM:function(e){return"M"===e.charAt(0)},meridiem:function(e,t,n){return e<12?"PD":"MD"},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Sot n\xeb] LT",nextDay:"[Nes\xebr n\xeb] LT",nextWeek:"dddd [n\xeb] LT",lastDay:"[Dje n\xeb] LT",lastWeek:"dddd [e kaluar n\xeb] LT",sameElse:"L"},relativeTime:{future:"n\xeb %s",past:"%s m\xeb par\xeb",s:"disa sekonda",ss:"%d sekonda",m:"nj\xeb minut\xeb",mm:"%d minuta",h:"nj\xeb or\xeb",hh:"%d or\xeb",d:"nj\xeb dit\xeb",dd:"%d dit\xeb",M:"nj\xeb muaj",MM:"%d muaj",y:"nj\xeb vit",yy:"%d vite"},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},79915:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={words:{ss:["секунда","секунде","секунди"],m:["један минут","једне минуте"],mm:["минут","минуте","минута"],h:["један сат","једног сата"],hh:["сат","сата","сати"],dd:["дан","дана","дана"],MM:["месец","месеца","месеци"],yy:["година","године","година"]},correctGrammaticalCase:function(e,t){return 1===e?t[0]:e>=2&&e<=4?t[1]:t[2]},translate:function(e,n,r){var i=t.words[r];return 1===r.length?n?i[0]:i[1]:e+" "+t.correctGrammaticalCase(e,i)}};return e.defineLocale("sr-cyrl",{months:"јануар_фебруар_март_април_мај_јун_јул_август_септембар_октобар_новембар_децембар".split("_"),monthsShort:"јан._феб._мар._апр._мај_јун_јул_авг._сеп._окт._нов._дец.".split("_"),monthsParseExact:!0,weekdays:"недеља_понедељак_уторак_среда_четвртак_петак_субота".split("_"),weekdaysShort:"нед._пон._уто._сре._чет._пет._суб.".split("_"),weekdaysMin:"не_по_ут_ср_че_пе_су".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D. M. YYYY.",LL:"D. MMMM YYYY.",LLL:"D. MMMM YYYY. H:mm",LLLL:"dddd, D. MMMM YYYY. H:mm"},calendar:{sameDay:"[данас у] LT",nextDay:"[сутра у] LT",nextWeek:function(){switch(this.day()){case 0:return"[у] [недељу] [у] LT";case 3:return"[у] [среду] [у] LT";case 6:return"[у] [суботу] [у] LT";case 1:case 2:case 4:case 5:return"[у] dddd [у] LT"}},lastDay:"[јуче у] LT",lastWeek:function(){return["[прошле] [недеље] [у] LT","[прошлог] [понедељка] [у] LT","[прошлог] [уторка] [у] LT","[прошле] [среде] [у] LT","[прошлог] [четвртка] [у] LT","[прошлог] [петка] [у] LT","[прошле] [суботе] [у] LT",][this.day()]},sameElse:"L"},relativeTime:{future:"за %s",past:"пре %s",s:"неколико секунди",ss:t.translate,m:t.translate,mm:t.translate,h:t.translate,hh:t.translate,d:"дан",dd:t.translate,M:"месец",MM:t.translate,y:"годину",yy:t.translate},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},49131:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={words:{ss:["sekunda","sekunde","sekundi"],m:["jedan minut","jedne minute"],mm:["minut","minute","minuta"],h:["jedan sat","jednog sata"],hh:["sat","sata","sati"],dd:["dan","dana","dana"],MM:["mesec","meseca","meseci"],yy:["godina","godine","godina"]},correctGrammaticalCase:function(e,t){return 1===e?t[0]:e>=2&&e<=4?t[1]:t[2]},translate:function(e,n,r){var i=t.words[r];return 1===r.length?n?i[0]:i[1]:e+" "+t.correctGrammaticalCase(e,i)}};return e.defineLocale("sr",{months:"januar_februar_mart_april_maj_jun_jul_avgust_septembar_oktobar_novembar_decembar".split("_"),monthsShort:"jan._feb._mar._apr._maj_jun_jul_avg._sep._okt._nov._dec.".split("_"),monthsParseExact:!0,weekdays:"nedelja_ponedeljak_utorak_sreda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sre._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D. M. YYYY.",LL:"D. MMMM YYYY.",LLL:"D. MMMM YYYY. H:mm",LLLL:"dddd, D. MMMM YYYY. H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedelju] [u] LT";case 3:return"[u] [sredu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[juče u] LT",lastWeek:function(){return["[prošle] [nedelje] [u] LT","[prošlog] [ponedeljka] [u] LT","[prošlog] [utorka] [u] LT","[prošle] [srede] [u] LT","[prošlog] [četvrtka] [u] LT","[prošlog] [petka] [u] LT","[prošle] [subote] [u] LT",][this.day()]},sameElse:"L"},relativeTime:{future:"za %s",past:"pre %s",s:"nekoliko sekundi",ss:t.translate,m:t.translate,mm:t.translate,h:t.translate,hh:t.translate,d:"dan",dd:t.translate,M:"mesec",MM:t.translate,y:"godinu",yy:t.translate},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}})})(n(30381))},85893:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ss",{months:"Bhimbidvwane_Indlovana_Indlov'lenkhulu_Mabasa_Inkhwekhweti_Inhlaba_Kholwane_Ingci_Inyoni_Imphala_Lweti_Ingongoni".split("_"),monthsShort:"Bhi_Ina_Inu_Mab_Ink_Inh_Kho_Igc_Iny_Imp_Lwe_Igo".split("_"),weekdays:"Lisontfo_Umsombuluko_Lesibili_Lesitsatfu_Lesine_Lesihlanu_Umgcibelo".split("_"),weekdaysShort:"Lis_Umb_Lsb_Les_Lsi_Lsh_Umg".split("_"),weekdaysMin:"Li_Us_Lb_Lt_Ls_Lh_Ug".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Namuhla nga] LT",nextDay:"[Kusasa nga] LT",nextWeek:"dddd [nga] LT",lastDay:"[Itolo nga] LT",lastWeek:"dddd [leliphelile] [nga] LT",sameElse:"L"},relativeTime:{future:"nga %s",past:"wenteka nga %s",s:"emizuzwana lomcane",ss:"%d mzuzwana",m:"umzuzu",mm:"%d emizuzu",h:"lihora",hh:"%d emahora",d:"lilanga",dd:"%d emalanga",M:"inyanga",MM:"%d tinyanga",y:"umnyaka",yy:"%d iminyaka"},meridiemParse:/ekuseni|emini|entsambama|ebusuku/,meridiem:function(e,t,n){return e<11?"ekuseni":e<15?"emini":e<19?"entsambama":"ebusuku"},meridiemHour:function(e,t){return(12===e&&(e=0),"ekuseni"===t)?e:"emini"===t?e>=11?e:e+12:"entsambama"===t||"ebusuku"===t?0===e?0:e+12:void 0},dayOfMonthOrdinalParse:/\d{1,2}/,ordinal:"%d",week:{dow:1,doy:4}})})(n(30381))},98760:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("sv",{months:"januari_februari_mars_april_maj_juni_juli_augusti_september_oktober_november_december".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aug_sep_okt_nov_dec".split("_"),weekdays:"s\xf6ndag_m\xe5ndag_tisdag_onsdag_torsdag_fredag_l\xf6rdag".split("_"),weekdaysShort:"s\xf6n_m\xe5n_tis_ons_tor_fre_l\xf6r".split("_"),weekdaysMin:"s\xf6_m\xe5_ti_on_to_fr_l\xf6".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [kl.] HH:mm",LLLL:"dddd D MMMM YYYY [kl.] HH:mm",lll:"D MMM YYYY HH:mm",llll:"ddd D MMM YYYY HH:mm"},calendar:{sameDay:"[Idag] LT",nextDay:"[Imorgon] LT",lastDay:"[Ig\xe5r] LT",nextWeek:"[P\xe5] dddd LT",lastWeek:"[I] dddd[s] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"f\xf6r %s sedan",s:"n\xe5gra sekunder",ss:"%d sekunder",m:"en minut",mm:"%d minuter",h:"en timme",hh:"%d timmar",d:"en dag",dd:"%d dagar",M:"en m\xe5nad",MM:"%d m\xe5nader",y:"ett \xe5r",yy:"%d \xe5r"},dayOfMonthOrdinalParse:/\d{1,2}(\:e|\:a)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?":e":1===t?":a":2===t?":a":":e";return e+n},week:{dow:1,doy:4}})})(n(30381))},91172:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("sw",{months:"Januari_Februari_Machi_Aprili_Mei_Juni_Julai_Agosti_Septemba_Oktoba_Novemba_Desemba".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ago_Sep_Okt_Nov_Des".split("_"),weekdays:"Jumapili_Jumatatu_Jumanne_Jumatano_Alhamisi_Ijumaa_Jumamosi".split("_"),weekdaysShort:"Jpl_Jtat_Jnne_Jtan_Alh_Ijm_Jmos".split("_"),weekdaysMin:"J2_J3_J4_J5_Al_Ij_J1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"hh:mm A",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[leo saa] LT",nextDay:"[kesho saa] LT",nextWeek:"[wiki ijayo] dddd [saat] LT",lastDay:"[jana] LT",lastWeek:"[wiki iliyopita] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s baadaye",past:"tokea %s",s:"hivi punde",ss:"sekunde %d",m:"dakika moja",mm:"dakika %d",h:"saa limoja",hh:"masaa %d",d:"siku moja",dd:"siku %d",M:"mwezi mmoja",MM:"miezi %d",y:"mwaka mmoja",yy:"miaka %d"},week:{dow:1,doy:7}})})(n(30381))},27333:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"௧",2:"௨",3:"௩",4:"௪",5:"௫",6:"௬",7:"௭",8:"௮",9:"௯",0:"௦"},n={"௧":"1","௨":"2","௩":"3","௪":"4","௫":"5","௬":"6","௭":"7","௮":"8","௯":"9","௦":"0"};return e.defineLocale("ta",{months:"ஜனவரி_பிப்ரவரி_மார்ச்_ஏப்ரல்_மே_ஜூன்_ஜூலை_ஆகஸ்ட்_செப்டெம்பர்_அக்டோபர்_நவம்பர்_டிசம்பர்".split("_"),monthsShort:"ஜனவரி_பிப்ரவரி_மார்ச்_ஏப்ரல்_மே_ஜூன்_ஜூலை_ஆகஸ்ட்_செப்டெம்பர்_அக்டோபர்_நவம்பர்_டிசம்பர்".split("_"),weekdays:"ஞாயிற்றுக்கிழமை_திங்கட்கிழமை_செவ்வாய்கிழமை_புதன்கிழமை_வியாழக்கிழமை_வெள்ளிக்கிழமை_சனிக்கிழமை".split("_"),weekdaysShort:"ஞாயிறு_திங்கள்_செவ்வாய்_புதன்_வியாழன்_வெள்ளி_சனி".split("_"),weekdaysMin:"ஞா_தி_செ_பு_வி_வெ_ச".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, HH:mm",LLLL:"dddd, D MMMM YYYY, HH:mm"},calendar:{sameDay:"[இன்று] LT",nextDay:"[நாளை] LT",nextWeek:"dddd, LT",lastDay:"[நேற்று] LT",lastWeek:"[கடந்த வாரம்] dddd, LT",sameElse:"L"},relativeTime:{future:"%s இல்",past:"%s முன்",s:"ஒரு சில விநாடிகள்",ss:"%d விநாடிகள்",m:"ஒரு நிமிடம்",mm:"%d நிமிடங்கள்",h:"ஒரு மணி நேரம்",hh:"%d மணி நேரம்",d:"ஒரு நாள்",dd:"%d நாட்கள்",M:"ஒரு மாதம்",MM:"%d மாதங்கள்",y:"ஒரு வருடம்",yy:"%d ஆண்டுகள்"},dayOfMonthOrdinalParse:/\d{1,2}வது/,ordinal:function(e){return e+"வது"},preparse:function(e){return e.replace(/[௧௨௩௪௫௬௭௮௯௦]/g,function(e){return n[e]})},postformat:function(e){return e.replace(/\d/g,function(e){return t[e]})},meridiemParse:/யாமம்|வைகறை|காலை|நண்பகல்|எற்பாடு|மாலை/,meridiem:function(e,t,n){if(e<2)return" யாமம்";if(e<6)return" வைகறை";if(e<10)return" காலை";if(e<14)return" நண்பகல்";if(e<18)return" எற்பாடு";else if(e<22)return" மாலை";else return" யாமம்"},meridiemHour:function(e,t){return(12===e&&(e=0),"யாமம்"===t)?e<2?e:e+12:"வைகறை"===t||"காலை"===t?e:"நண்பகல்"===t?e>=10?e:e+12:e+12},week:{dow:0,doy:6}})})(n(30381))},23110:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("te",{months:"జనవరి_ఫిబ్రవరి_మార్చి_ఏప్రిల్_మే_జూన్_జులై_ఆగస్టు_సెప్టెంబర్_అక్టోబర్_నవంబర్_డిసెంబర్".split("_"),monthsShort:"జన._ఫిబ్ర._మార్చి_ఏప్రి._మే_జూన్_జులై_ఆగ._సెప్._అక్టో._నవ._డిసె.".split("_"),monthsParseExact:!0,weekdays:"ఆదివారం_సోమవారం_మంగళవారం_బుధవారం_గురువారం_శుక్రవారం_శనివారం".split("_"),weekdaysShort:"ఆది_సోమ_మంగళ_బుధ_గురు_శుక్ర_శని".split("_"),weekdaysMin:"ఆ_సో_మం_బు_గు_శు_శ".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[నేడు] LT",nextDay:"[రేపు] LT",nextWeek:"dddd, LT",lastDay:"[నిన్న] LT",lastWeek:"[గత] dddd, LT",sameElse:"L"},relativeTime:{future:"%s లో",past:"%s క్రితం",s:"కొన్ని క్షణాలు",ss:"%d సెకన్లు",m:"ఒక నిమిషం",mm:"%d నిమిషాలు",h:"ఒక గంట",hh:"%d గంటలు",d:"ఒక రోజు",dd:"%d రోజులు",M:"ఒక నెల",MM:"%d నెలలు",y:"ఒక సంవత్సరం",yy:"%d సంవత్సరాలు"},dayOfMonthOrdinalParse:/\d{1,2}వ/,ordinal:"%dవ",meridiemParse:/రాత్రి|ఉదయం|మధ్యాహ్నం|సాయంత్రం/,meridiemHour:function(e,t){return(12===e&&(e=0),"రాత్రి"===t)?e<4?e:e+12:"ఉదయం"===t?e:"మధ్యాహ్నం"===t?e>=10?e:e+12:"సాయంత్రం"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"రాత్రి":e<10?"ఉదయం":e<17?"మధ్యాహ్నం":e<20?"సాయంత్రం":"రాత్రి"},week:{dow:0,doy:6}})})(n(30381))},52095:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("tet",{months:"Janeiru_Fevereiru_Marsu_Abril_Maiu_Ju\xf1u_Jullu_Agustu_Setembru_Outubru_Novembru_Dezembru".split("_"),monthsShort:"Jan_Fev_Mar_Abr_Mai_Jun_Jul_Ago_Set_Out_Nov_Dez".split("_"),weekdays:"Domingu_Segunda_Tersa_Kuarta_Kinta_Sesta_Sabadu".split("_"),weekdaysShort:"Dom_Seg_Ters_Kua_Kint_Sest_Sab".split("_"),weekdaysMin:"Do_Seg_Te_Ku_Ki_Ses_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Ohin iha] LT",nextDay:"[Aban iha] LT",nextWeek:"dddd [iha] LT",lastDay:"[Horiseik iha] LT",lastWeek:"dddd [semana kotuk] [iha] LT",sameElse:"L"},relativeTime:{future:"iha %s",past:"%s liuba",s:"segundu balun",ss:"segundu %d",m:"minutu ida",mm:"minutu %d",h:"oras ida",hh:"oras %d",d:"loron ida",dd:"loron %d",M:"fulan ida",MM:"fulan %d",y:"tinan ida",yy:"tinan %d"},dayOfMonthOrdinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},27321:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={0:"-ум",1:"-ум",2:"-юм",3:"-юм",4:"-ум",5:"-ум",6:"-ум",7:"-ум",8:"-ум",9:"-ум",10:"-ум",12:"-ум",13:"-ум",20:"-ум",30:"-юм",40:"-ум",50:"-ум",60:"-ум",70:"-ум",80:"-ум",90:"-ум",100:"-ум"};return e.defineLocale("tg",{months:{format:"январи_феврали_марти_апрели_майи_июни_июли_августи_сентябри_октябри_ноябри_декабри".split("_"),standalone:"январ_феврал_март_апрел_май_июн_июл_август_сентябр_октябр_ноябр_декабр".split("_")},monthsShort:"янв_фев_мар_апр_май_июн_июл_авг_сен_окт_ноя_дек".split("_"),weekdays:"якшанбе_душанбе_сешанбе_чоршанбе_панҷшанбе_ҷумъа_шанбе".split("_"),weekdaysShort:"яшб_дшб_сшб_чшб_пшб_ҷум_шнб".split("_"),weekdaysMin:"яш_дш_сш_чш_пш_ҷм_шб".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Имрӯз соати] LT",nextDay:"[Фардо соати] LT",lastDay:"[Дирӯз соати] LT",nextWeek:"dddd[и] [ҳафтаи оянда соати] LT",lastWeek:"dddd[и] [ҳафтаи гузашта соати] LT",sameElse:"L"},relativeTime:{future:"баъди %s",past:"%s пеш",s:"якчанд сония",m:"як дақиқа",mm:"%d дақиқа",h:"як соат",hh:"%d соат",d:"як рӯз",dd:"%d рӯз",M:"як моҳ",MM:"%d моҳ",y:"як сол",yy:"%d сол"},meridiemParse:/шаб|субҳ|рӯз|бегоҳ/,meridiemHour:function(e,t){return(12===e&&(e=0),"шаб"===t)?e<4?e:e+12:"субҳ"===t?e:"рӯз"===t?e>=11?e:e+12:"бегоҳ"===t?e+12:void 0},meridiem:function(e,t,n){return e<4?"шаб":e<11?"субҳ":e<16?"рӯз":e<19?"бегоҳ":"шаб"},dayOfMonthOrdinalParse:/\d{1,2}-(ум|юм)/,ordinal:function(e){var n=e%10,r=e>=100?100:null;return e+(t[e]||t[n]||t[r])},week:{dow:1,doy:7}})})(n(30381))},9041:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("th",{months:"มกราคม_กุมภาพันธ์_มีนาคม_เมษายน_พฤษภาคม_มิถุนายน_กรกฎาคม_สิงหาคม_กันยายน_ตุลาคม_พฤศจิกายน_ธันวาคม".split("_"),monthsShort:"ม.ค._ก.พ._มี.ค._เม.ย._พ.ค._มิ.ย._ก.ค._ส.ค._ก.ย._ต.ค._พ.ย._ธ.ค.".split("_"),monthsParseExact:!0,weekdays:"อาทิตย์_จันทร์_อังคาร_พุธ_พฤหัสบดี_ศุกร์_เสาร์".split("_"),weekdaysShort:"อาทิตย์_จันทร์_อังคาร_พุธ_พฤหัส_ศุกร์_เสาร์".split("_"),weekdaysMin:"อา._จ._อ._พ._พฤ._ศ._ส.".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY เวลา H:mm",LLLL:"วันddddที่ D MMMM YYYY เวลา H:mm"},meridiemParse:/ก่อนเที่ยง|หลังเที่ยง/,isPM:function(e){return"หลังเที่ยง"===e},meridiem:function(e,t,n){return e<12?"ก่อนเที่ยง":"หลังเที่ยง"},calendar:{sameDay:"[วันนี้ เวลา] LT",nextDay:"[พรุ่งนี้ เวลา] LT",nextWeek:"dddd[หน้า เวลา] LT",lastDay:"[เมื่อวานนี้ เวลา] LT",lastWeek:"[วัน]dddd[ที่แล้ว เวลา] LT",sameElse:"L"},relativeTime:{future:"อีก %s",past:"%sที่แล้ว",s:"ไม่กี่วินาที",ss:"%d วินาที",m:"1 นาที",mm:"%d นาที",h:"1 ชั่วโมง",hh:"%d ชั่วโมง",d:"1 วัน",dd:"%d วัน",w:"1 สัปดาห์",ww:"%d สัปดาห์",M:"1 เดือน",MM:"%d เดือน",y:"1 ปี",yy:"%d ปี"}})})(n(30381))},19005:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"'inji",5:"'inji",8:"'inji",70:"'inji",80:"'inji",2:"'nji",7:"'nji",20:"'nji",50:"'nji",3:"'\xfcnji",4:"'\xfcnji",100:"'\xfcnji",6:"'njy",9:"'unjy",10:"'unjy",30:"'unjy",60:"'ynjy",90:"'ynjy"};return e.defineLocale("tk",{months:"\xddanwar_Fewral_Mart_Aprel_Ma\xfd_I\xfdun_I\xfdul_Awgust_Sent\xfdabr_Okt\xfdabr_No\xfdabr_Dekabr".split("_"),monthsShort:"\xddan_Few_Mar_Apr_Ma\xfd_I\xfdn_I\xfdl_Awg_Sen_Okt_No\xfd_Dek".split("_"),weekdays:"\xddekşenbe_Duşenbe_Sişenbe_\xc7arşenbe_Penşenbe_Anna_Şenbe".split("_"),weekdaysShort:"\xddek_Duş_Siş_\xc7ar_Pen_Ann_Şen".split("_"),weekdaysMin:"\xddk_Dş_Sş_\xc7r_Pn_An_Şn".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bug\xfcn sagat] LT",nextDay:"[ertir sagat] LT",nextWeek:"[indiki] dddd [sagat] LT",lastDay:"[d\xfc\xfdn] LT",lastWeek:"[ge\xe7en] dddd [sagat] LT",sameElse:"L"},relativeTime:{future:"%s soň",past:"%s \xf6ň",s:"birn\xe4\xe7e sekunt",m:"bir minut",mm:"%d minut",h:"bir sagat",hh:"%d sagat",d:"bir g\xfcn",dd:"%d g\xfcn",M:"bir a\xfd",MM:"%d a\xfd",y:"bir \xfdyl",yy:"%d \xfdyl"},ordinal:function(e,n){switch(n){case"d":case"D":case"Do":case"DD":return e;default:if(0===e)return e+"'unjy";var r=e%10,i=e%100-r,a=e>=100?100:null;return e+(t[r]||t[i]||t[a])}},week:{dow:1,doy:7}})})(n(30381))},75768:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("tl-ph",{months:"Enero_Pebrero_Marso_Abril_Mayo_Hunyo_Hulyo_Agosto_Setyembre_Oktubre_Nobyembre_Disyembre".split("_"),monthsShort:"Ene_Peb_Mar_Abr_May_Hun_Hul_Ago_Set_Okt_Nob_Dis".split("_"),weekdays:"Linggo_Lunes_Martes_Miyerkules_Huwebes_Biyernes_Sabado".split("_"),weekdaysShort:"Lin_Lun_Mar_Miy_Huw_Biy_Sab".split("_"),weekdaysMin:"Li_Lu_Ma_Mi_Hu_Bi_Sab".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"MM/D/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY HH:mm",LLLL:"dddd, MMMM DD, YYYY HH:mm"},calendar:{sameDay:"LT [ngayong araw]",nextDay:"[Bukas ng] LT",nextWeek:"LT [sa susunod na] dddd",lastDay:"LT [kahapon]",lastWeek:"LT [noong nakaraang] dddd",sameElse:"L"},relativeTime:{future:"sa loob ng %s",past:"%s ang nakalipas",s:"ilang segundo",ss:"%d segundo",m:"isang minuto",mm:"%d minuto",h:"isang oras",hh:"%d oras",d:"isang araw",dd:"%d araw",M:"isang buwan",MM:"%d buwan",y:"isang taon",yy:"%d taon"},dayOfMonthOrdinalParse:/\d{1,2}/,ordinal:function(e){return e},week:{dow:1,doy:4}})})(n(30381))},89444:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t="pagh_wa’_cha’_wej_loS_vagh_jav_Soch_chorgh_Hut".split("_");function n(e){var t=e;return -1!==e.indexOf("jaj")?t.slice(0,-3)+"leS":-1!==e.indexOf("jar")?t.slice(0,-3)+"waQ":-1!==e.indexOf("DIS")?t.slice(0,-3)+"nem":t+" pIq"}function r(e){var t=e;return -1!==e.indexOf("jaj")?t.slice(0,-3)+"Hu’":-1!==e.indexOf("jar")?t.slice(0,-3)+"wen":-1!==e.indexOf("DIS")?t.slice(0,-3)+"ben":t+" ret"}function i(e,t,n,r){var i=a(e);switch(n){case"ss":return i+" lup";case"mm":return i+" tup";case"hh":return i+" rep";case"dd":return i+" jaj";case"MM":return i+" jar";case"yy":return i+" DIS"}}function a(e){var n=Math.floor(e%1e3/100),r=Math.floor(e%100/10),i=e%10,a="";return n>0&&(a+=t[n]+"vatlh"),r>0&&(a+=(""!==a?" ":"")+t[r]+"maH"),i>0&&(a+=(""!==a?" ":"")+t[i]),""===a?"pagh":a}return e.defineLocale("tlh",{months:"tera’ jar wa’_tera’ jar cha’_tera’ jar wej_tera’ jar loS_tera’ jar vagh_tera’ jar jav_tera’ jar Soch_tera’ jar chorgh_tera’ jar Hut_tera’ jar wa’maH_tera’ jar wa’maH wa’_tera’ jar wa’maH cha’".split("_"),monthsShort:"jar wa’_jar cha’_jar wej_jar loS_jar vagh_jar jav_jar Soch_jar chorgh_jar Hut_jar wa’maH_jar wa’maH wa’_jar wa’maH cha’".split("_"),monthsParseExact:!0,weekdays:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),weekdaysShort:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),weekdaysMin:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[DaHjaj] LT",nextDay:"[wa’leS] LT",nextWeek:"LLL",lastDay:"[wa’Hu’] LT",lastWeek:"LLL",sameElse:"L"},relativeTime:{future:n,past:r,s:"puS lup",ss:i,m:"wa’ tup",mm:i,h:"wa’ rep",hh:i,d:"wa’ jaj",dd:i,M:"wa’ jar",MM:i,y:"wa’ DIS",yy:i},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}})})(n(30381))},72397:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t={1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:"'nci",50:"'nci",3:"'\xfcnc\xfc",4:"'\xfcnc\xfc",100:"'\xfcnc\xfc",6:"'ncı",9:"'uncu",10:"'uncu",30:"'uncu",60:"'ıncı",90:"'ıncı"};return e.defineLocale("tr",{months:"Ocak_Şubat_Mart_Nisan_Mayıs_Haziran_Temmuz_Ağustos_Eyl\xfcl_Ekim_Kasım_Aralık".split("_"),monthsShort:"Oca_Şub_Mar_Nis_May_Haz_Tem_Ağu_Eyl_Eki_Kas_Ara".split("_"),weekdays:"Pazar_Pazartesi_Salı_\xc7arşamba_Perşembe_Cuma_Cumartesi".split("_"),weekdaysShort:"Paz_Pts_Sal_\xc7ar_Per_Cum_Cts".split("_"),weekdaysMin:"Pz_Pt_Sa_\xc7a_Pe_Cu_Ct".split("_"),meridiem:function(e,t,n){return e<12?n?"\xf6\xf6":"\xd6\xd6":n?"\xf6s":"\xd6S"},meridiemParse:/öö|ÖÖ|ös|ÖS/,isPM:function(e){return"\xf6s"===e||"\xd6S"===e},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bug\xfcn saat] LT",nextDay:"[yarın saat] LT",nextWeek:"[gelecek] dddd [saat] LT",lastDay:"[d\xfcn] LT",lastWeek:"[ge\xe7en] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s sonra",past:"%s \xf6nce",s:"birka\xe7 saniye",ss:"%d saniye",m:"bir dakika",mm:"%d dakika",h:"bir saat",hh:"%d saat",d:"bir g\xfcn",dd:"%d g\xfcn",w:"bir hafta",ww:"%d hafta",M:"bir ay",MM:"%d ay",y:"bir yıl",yy:"%d yıl"},ordinal:function(e,n){switch(n){case"d":case"D":case"Do":case"DD":return e;default:if(0===e)return e+"'ıncı";var r=e%10,i=e%100-r,a=e>=100?100:null;return e+(t[r]||t[i]||t[a])}},week:{dow:1,doy:7}})})(n(30381))},28254:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=e.defineLocale("tzl",{months:"Januar_Fevraglh_Mar\xe7_Avr\xefu_Mai_G\xfcn_Julia_Guscht_Setemvar_Listop\xe4ts_Noemvar_Zecemvar".split("_"),monthsShort:"Jan_Fev_Mar_Avr_Mai_G\xfcn_Jul_Gus_Set_Lis_Noe_Zec".split("_"),weekdays:"S\xfaladi_L\xfane\xe7i_Maitzi_M\xe1rcuri_Xh\xfaadi_Vi\xe9ner\xe7i_S\xe1turi".split("_"),weekdaysShort:"S\xfal_L\xfan_Mai_M\xe1r_Xh\xfa_Vi\xe9_S\xe1t".split("_"),weekdaysMin:"S\xfa_L\xfa_Ma_M\xe1_Xh_Vi_S\xe1".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD.MM.YYYY",LL:"D. MMMM [dallas] YYYY",LLL:"D. MMMM [dallas] YYYY HH.mm",LLLL:"dddd, [li] D. MMMM [dallas] YYYY HH.mm"},meridiemParse:/d\'o|d\'a/i,isPM:function(e){return"d'o"===e.toLowerCase()},meridiem:function(e,t,n){return e>11?n?"d'o":"D'O":n?"d'a":"D'A"},calendar:{sameDay:"[oxhi \xe0] LT",nextDay:"[dem\xe0 \xe0] LT",nextWeek:"dddd [\xe0] LT",lastDay:"[ieiri \xe0] LT",lastWeek:"[s\xfcr el] dddd [lasteu \xe0] LT",sameElse:"L"},relativeTime:{future:"osprei %s",past:"ja%s",s:n,ss:n,m:n,mm:n,h:n,hh:n,d:n,dd:n,M:n,MM:n,y:n,yy:n},dayOfMonthOrdinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}});function n(e,t,n,r){var i={s:["viensas secunds","'iensas secunds"],ss:[e+" secunds",""+e+" secunds"],m:["'n m\xedut","'iens m\xedut"],mm:[e+" m\xeduts",""+e+" m\xeduts"],h:["'n \xfeora","'iensa \xfeora"],hh:[e+" \xfeoras",""+e+" \xfeoras"],d:["'n ziua","'iensa ziua"],dd:[e+" ziuas",""+e+" ziuas"],M:["'n mes","'iens mes"],MM:[e+" mesen",""+e+" mesen"],y:["'n ar","'iens ar"],yy:[e+" ars",""+e+" ars"]};return r?i[n][0]:t?i[n][0]:i[n][1]}return t})(n(30381))},30699:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("tzm-latn",{months:"innayr_brˤayrˤ_marˤsˤ_ibrir_mayyw_ywnyw_ywlywz_ɣwšt_šwtanbir_ktˤwbrˤ_nwwanbir_dwjnbir".split("_"),monthsShort:"innayr_brˤayrˤ_marˤsˤ_ibrir_mayyw_ywnyw_ywlywz_ɣwšt_šwtanbir_ktˤwbrˤ_nwwanbir_dwjnbir".split("_"),weekdays:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),weekdaysShort:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),weekdaysMin:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[asdkh g] LT",nextDay:"[aska g] LT",nextWeek:"dddd [g] LT",lastDay:"[assant g] LT",lastWeek:"dddd [g] LT",sameElse:"L"},relativeTime:{future:"dadkh s yan %s",past:"yan %s",s:"imik",ss:"%d imik",m:"minuḍ",mm:"%d minuḍ",h:"saɛa",hh:"%d tassaɛin",d:"ass",dd:"%d ossan",M:"ayowr",MM:"%d iyyirn",y:"asgas",yy:"%d isgasn"},week:{dow:6,doy:12}})})(n(30381))},51106:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("tzm",{months:"ⵉⵏⵏⴰⵢⵔ_ⴱⵕⴰⵢⵕ_ⵎⴰⵕⵚ_ⵉⴱⵔⵉⵔ_ⵎⴰⵢⵢⵓ_ⵢⵓⵏⵢⵓ_ⵢⵓⵍⵢⵓⵣ_ⵖⵓⵛⵜ_ⵛⵓⵜⴰⵏⴱⵉⵔ_ⴽⵟⵓⴱⵕ_ⵏⵓⵡⴰⵏⴱⵉⵔ_ⴷⵓⵊⵏⴱⵉⵔ".split("_"),monthsShort:"ⵉⵏⵏⴰⵢⵔ_ⴱⵕⴰⵢⵕ_ⵎⴰⵕⵚ_ⵉⴱⵔⵉⵔ_ⵎⴰⵢⵢⵓ_ⵢⵓⵏⵢⵓ_ⵢⵓⵍⵢⵓⵣ_ⵖⵓⵛⵜ_ⵛⵓⵜⴰⵏⴱⵉⵔ_ⴽⵟⵓⴱⵕ_ⵏⵓⵡⴰⵏⴱⵉⵔ_ⴷⵓⵊⵏⴱⵉⵔ".split("_"),weekdays:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),weekdaysShort:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),weekdaysMin:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[ⴰⵙⴷⵅ ⴴ] LT",nextDay:"[ⴰⵙⴽⴰ ⴴ] LT",nextWeek:"dddd [ⴴ] LT",lastDay:"[ⴰⵚⴰⵏⵜ ⴴ] LT",lastWeek:"dddd [ⴴ] LT",sameElse:"L"},relativeTime:{future:"ⴷⴰⴷⵅ ⵙ ⵢⴰⵏ %s",past:"ⵢⴰⵏ %s",s:"ⵉⵎⵉⴽ",ss:"%d ⵉⵎⵉⴽ",m:"ⵎⵉⵏⵓⴺ",mm:"%d ⵎⵉⵏⵓⴺ",h:"ⵙⴰⵄⴰ",hh:"%d ⵜⴰⵙⵙⴰⵄⵉⵏ",d:"ⴰⵙⵙ",dd:"%d oⵙⵙⴰⵏ",M:"ⴰⵢoⵓⵔ",MM:"%d ⵉⵢⵢⵉⵔⵏ",y:"ⴰⵙⴳⴰⵙ",yy:"%d ⵉⵙⴳⴰⵙⵏ"},week:{dow:6,doy:12}})})(n(30381))},9288:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("ug-cn",{months:"يانۋار_فېۋرال_مارت_ئاپرېل_ماي_ئىيۇن_ئىيۇل_ئاۋغۇست_سېنتەبىر_ئۆكتەبىر_نويابىر_دېكابىر".split("_"),monthsShort:"يانۋار_فېۋرال_مارت_ئاپرېل_ماي_ئىيۇن_ئىيۇل_ئاۋغۇست_سېنتەبىر_ئۆكتەبىر_نويابىر_دېكابىر".split("_"),weekdays:"يەكشەنبە_دۈشەنبە_سەيشەنبە_چارشەنبە_پەيشەنبە_جۈمە_شەنبە".split("_"),weekdaysShort:"يە_دۈ_سە_چا_پە_جۈ_شە".split("_"),weekdaysMin:"يە_دۈ_سە_چا_پە_جۈ_شە".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY-يىلىM-ئاينىڭD-كۈنى",LLL:"YYYY-يىلىM-ئاينىڭD-كۈنى، HH:mm",LLLL:"dddd، YYYY-يىلىM-ئاينىڭD-كۈنى، HH:mm"},meridiemParse:/يېرىم كېچە|سەھەر|چۈشتىن بۇرۇن|چۈش|چۈشتىن كېيىن|كەچ/,meridiemHour:function(e,t){return(12===e&&(e=0),"يېرىم كېچە"===t||"سەھەر"===t||"چۈشتىن بۇرۇن"===t)?e:"چۈشتىن كېيىن"===t||"كەچ"===t?e+12:e>=11?e:e+12},meridiem:function(e,t,n){var r=100*e+t;if(r<600)return"يېرىم كېچە";if(r<900)return"سەھەر";if(r<1130)return"چۈشتىن بۇرۇن";if(r<1230)return"چۈش";if(r<1800)return"چۈشتىن كېيىن";else return"كەچ"},calendar:{sameDay:"[بۈگۈن سائەت] LT",nextDay:"[ئەتە سائەت] LT",nextWeek:"[كېلەركى] dddd [سائەت] LT",lastDay:"[تۆنۈگۈن] LT",lastWeek:"[ئالدىنقى] dddd [سائەت] LT",sameElse:"L"},relativeTime:{future:"%s كېيىن",past:"%s بۇرۇن",s:"نەچچە سېكونت",ss:"%d سېكونت",m:"بىر مىنۇت",mm:"%d مىنۇت",h:"بىر سائەت",hh:"%d سائەت",d:"بىر كۈن",dd:"%d كۈن",M:"بىر ئاي",MM:"%d ئاي",y:"بىر يىل",yy:"%d يىل"},dayOfMonthOrdinalParse:/\d{1,2}(-كۈنى|-ئاي|-ھەپتە)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"-كۈنى";case"w":case"W":return e+"-ھەپتە";default:return e}},preparse:function(e){return e.replace(/،/g,",")},postformat:function(e){return e.replace(/,/g,"،")},week:{dow:1,doy:7}})})(n(30381))},67691:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +function t(e,t){var n=e.split("_");return t%10==1&&t%100!=11?n[0]:t%10>=2&&t%10<=4&&(t%100<10||t%100>=20)?n[1]:n[2]}function n(e,n,r){var i={ss:n?"секунда_секунди_секунд":"секунду_секунди_секунд",mm:n?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:n?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===r?n?"хвилина":"хвилину":"h"===r?n?"година":"годину":e+" "+t(i[r],+e)}function r(e,t){var n,r={nominative:"неділя_понеділок_вівторок_середа_четвер_п’ятниця_субота".split("_"),accusative:"неділю_понеділок_вівторок_середу_четвер_п’ятницю_суботу".split("_"),genitive:"неділі_понеділка_вівторка_середи_четверга_п’ятниці_суботи".split("_")};return!0===e?r.nominative.slice(1,7).concat(r.nominative.slice(0,1)):e?r[n=/(\[[ВвУу]\]) ?dddd/.test(t)?"accusative":/\[?(?:минулої|наступної)? ?\] ?dddd/.test(t)?"genitive":"nominative"][e.day()]:r.nominative}function i(e){return function(){return e+"о"+(11===this.hours()?"б":"")+"] LT"}}return e.defineLocale("uk",{months:{format:"січня_лютого_березня_квітня_травня_червня_липня_серпня_вересня_жовтня_листопада_грудня".split("_"),standalone:"січень_лютий_березень_квітень_травень_червень_липень_серпень_вересень_жовтень_листопад_грудень".split("_")},monthsShort:"січ_лют_бер_квіт_трав_черв_лип_серп_вер_жовт_лист_груд".split("_"),weekdays:r,weekdaysShort:"нд_пн_вт_ср_чт_пт_сб".split("_"),weekdaysMin:"нд_пн_вт_ср_чт_пт_сб".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY р.",LLL:"D MMMM YYYY р., HH:mm",LLLL:"dddd, D MMMM YYYY р., HH:mm"},calendar:{sameDay:i("[Сьогодні "),nextDay:i("[Завтра "),lastDay:i("[Вчора "),nextWeek:i("[У] dddd ["),lastWeek:function(){switch(this.day()){case 0:case 3:case 5:case 6:return i("[Минулої] dddd [").call(this);case 1:case 2:case 4:return i("[Минулого] dddd [").call(this)}},sameElse:"L"},relativeTime:{future:"за %s",past:"%s тому",s:"декілька секунд",ss:n,m:n,mm:n,h:"годину",hh:n,d:"день",dd:n,M:"місяць",MM:n,y:"рік",yy:n},meridiemParse:/ночі|ранку|дня|вечора/,isPM:function(e){return/^(дня|вечора)$/.test(e)},meridiem:function(e,t,n){return e<4?"ночі":e<12?"ранку":e<17?"дня":"вечора"},dayOfMonthOrdinalParse:/\d{1,2}-(й|го)/,ordinal:function(e,t){switch(t){case"M":case"d":case"DDD":case"w":case"W":return e+"-й";case"D":return e+"-го";default:return e}},week:{dow:1,doy:7}})})(n(30381))},13795:function(e,t,n){var r,i;r=this,(i=function(e){"use strict";//! moment.js locale configuration +var t=["جنوری","فروری","مارچ","اپریل","مئی","جون","جولائی","اگست","ستمبر","اکتوبر","نومبر","دسمبر",],n=["اتوار","پیر","منگل","بدھ","جمعرات","جمعہ","ہفتہ"];return e.defineLocale("ur",{months:t,monthsShort:t,weekdays:n,weekdaysShort:n,weekdaysMin:n,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd، D MMMM YYYY HH:mm"},meridiemParse:/صبح|شام/,isPM:function(e){return"شام"===e},meridiem:function(e,t,n){return e<12?"صبح":"شام"},calendar:{sameDay:"[آج بوقت] LT",nextDay:"[کل بوقت] LT",nextWeek:"dddd [بوقت] LT",lastDay:"[گذشتہ روز بوقت] LT",lastWeek:"[گذشتہ] dddd [بوقت] LT",sameElse:"L"},relativeTime:{future:"%s بعد",past:"%s قبل",s:"چند سیکنڈ",ss:"%d سیکنڈ",m:"ایک منٹ",mm:"%d منٹ",h:"ایک گھنٹہ",hh:"%d گھنٹے",d:"ایک دن",dd:"%d دن",M:"ایک ماہ",MM:"%d ماہ",y:"ایک سال",yy:"%d سال"},preparse:function(e){return e.replace(/،/g,",")},postformat:function(e){return e.replace(/,/g,"،")},week:{dow:1,doy:4}})})(n(30381))},60588:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("uz-latn",{months:"Yanvar_Fevral_Mart_Aprel_May_Iyun_Iyul_Avgust_Sentabr_Oktabr_Noyabr_Dekabr".split("_"),monthsShort:"Yan_Fev_Mar_Apr_May_Iyun_Iyul_Avg_Sen_Okt_Noy_Dek".split("_"),weekdays:"Yakshanba_Dushanba_Seshanba_Chorshanba_Payshanba_Juma_Shanba".split("_"),weekdaysShort:"Yak_Dush_Sesh_Chor_Pay_Jum_Shan".split("_"),weekdaysMin:"Ya_Du_Se_Cho_Pa_Ju_Sha".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"D MMMM YYYY, dddd HH:mm"},calendar:{sameDay:"[Bugun soat] LT [da]",nextDay:"[Ertaga] LT [da]",nextWeek:"dddd [kuni soat] LT [da]",lastDay:"[Kecha soat] LT [da]",lastWeek:"[O'tgan] dddd [kuni soat] LT [da]",sameElse:"L"},relativeTime:{future:"Yaqin %s ichida",past:"Bir necha %s oldin",s:"soniya",ss:"%d soniya",m:"bir daqiqa",mm:"%d daqiqa",h:"bir soat",hh:"%d soat",d:"bir kun",dd:"%d kun",M:"bir oy",MM:"%d oy",y:"bir yil",yy:"%d yil"},week:{dow:1,doy:7}})})(n(30381))},6791:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("uz",{months:"январ_феврал_март_апрел_май_июн_июл_август_сентябр_октябр_ноябр_декабр".split("_"),monthsShort:"янв_фев_мар_апр_май_июн_июл_авг_сен_окт_ноя_дек".split("_"),weekdays:"Якшанба_Душанба_Сешанба_Чоршанба_Пайшанба_Жума_Шанба".split("_"),weekdaysShort:"Якш_Душ_Сеш_Чор_Пай_Жум_Шан".split("_"),weekdaysMin:"Як_Ду_Се_Чо_Па_Жу_Ша".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"D MMMM YYYY, dddd HH:mm"},calendar:{sameDay:"[Бугун соат] LT [да]",nextDay:"[Эртага] LT [да]",nextWeek:"dddd [куни соат] LT [да]",lastDay:"[Кеча соат] LT [да]",lastWeek:"[Утган] dddd [куни соат] LT [да]",sameElse:"L"},relativeTime:{future:"Якин %s ичида",past:"Бир неча %s олдин",s:"фурсат",ss:"%d фурсат",m:"бир дакика",mm:"%d дакика",h:"бир соат",hh:"%d соат",d:"бир кун",dd:"%d кун",M:"бир ой",MM:"%d ой",y:"бир йил",yy:"%d йил"},week:{dow:1,doy:7}})})(n(30381))},65666:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("vi",{months:"th\xe1ng 1_th\xe1ng 2_th\xe1ng 3_th\xe1ng 4_th\xe1ng 5_th\xe1ng 6_th\xe1ng 7_th\xe1ng 8_th\xe1ng 9_th\xe1ng 10_th\xe1ng 11_th\xe1ng 12".split("_"),monthsShort:"Thg 01_Thg 02_Thg 03_Thg 04_Thg 05_Thg 06_Thg 07_Thg 08_Thg 09_Thg 10_Thg 11_Thg 12".split("_"),monthsParseExact:!0,weekdays:"chủ nhật_thứ hai_thứ ba_thứ tư_thứ năm_thứ s\xe1u_thứ bảy".split("_"),weekdaysShort:"CN_T2_T3_T4_T5_T6_T7".split("_"),weekdaysMin:"CN_T2_T3_T4_T5_T6_T7".split("_"),weekdaysParseExact:!0,meridiemParse:/sa|ch/i,isPM:function(e){return/^ch$/i.test(e)},meridiem:function(e,t,n){return e<12?n?"sa":"SA":n?"ch":"CH"},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM [năm] YYYY",LLL:"D MMMM [năm] YYYY HH:mm",LLLL:"dddd, D MMMM [năm] YYYY HH:mm",l:"DD/M/YYYY",ll:"D MMM YYYY",lll:"D MMM YYYY HH:mm",llll:"ddd, D MMM YYYY HH:mm"},calendar:{sameDay:"[H\xf4m nay l\xfac] LT",nextDay:"[Ng\xe0y mai l\xfac] LT",nextWeek:"dddd [tuần tới l\xfac] LT",lastDay:"[H\xf4m qua l\xfac] LT",lastWeek:"dddd [tuần trước l\xfac] LT",sameElse:"L"},relativeTime:{future:"%s tới",past:"%s trước",s:"v\xe0i gi\xe2y",ss:"%d gi\xe2y",m:"một ph\xfat",mm:"%d ph\xfat",h:"một giờ",hh:"%d giờ",d:"một ng\xe0y",dd:"%d ng\xe0y",w:"một tuần",ww:"%d tuần",M:"một th\xe1ng",MM:"%d th\xe1ng",y:"một năm",yy:"%d năm"},dayOfMonthOrdinalParse:/\d{1,2}/,ordinal:function(e){return e},week:{dow:1,doy:4}})})(n(30381))},14378:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("x-pseudo",{months:"J~\xe1\xf1\xfa\xe1~r\xfd_F~\xe9br\xfa~\xe1r\xfd_~M\xe1rc~h_\xc1p~r\xedl_~M\xe1\xfd_~J\xfa\xf1\xe9~_J\xfal~\xfd_\xc1\xfa~g\xfast~_S\xe9p~t\xe9mb~\xe9r_\xd3~ct\xf3b~\xe9r_\xd1~\xf3v\xe9m~b\xe9r_~D\xe9c\xe9~mb\xe9r".split("_"),monthsShort:"J~\xe1\xf1_~F\xe9b_~M\xe1r_~\xc1pr_~M\xe1\xfd_~J\xfa\xf1_~J\xfal_~\xc1\xfag_~S\xe9p_~\xd3ct_~\xd1\xf3v_~D\xe9c".split("_"),monthsParseExact:!0,weekdays:"S~\xfa\xf1d\xe1~\xfd_M\xf3~\xf1d\xe1\xfd~_T\xfa\xe9~sd\xe1\xfd~_W\xe9d~\xf1\xe9sd~\xe1\xfd_T~h\xfars~d\xe1\xfd_~Fr\xedd~\xe1\xfd_S~\xe1t\xfar~d\xe1\xfd".split("_"),weekdaysShort:"S~\xfa\xf1_~M\xf3\xf1_~T\xfa\xe9_~W\xe9d_~Th\xfa_~Fr\xed_~S\xe1t".split("_"),weekdaysMin:"S~\xfa_M\xf3~_T\xfa_~W\xe9_T~h_Fr~_S\xe1".split("_"),weekdaysParseExact:!0,longDateFormat:{LT:"HH:mm",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[T~\xf3d\xe1~\xfd \xe1t] LT",nextDay:"[T~\xf3m\xf3~rr\xf3~w \xe1t] LT",nextWeek:"dddd [\xe1t] LT",lastDay:"[\xdd~\xe9st~\xe9rd\xe1~\xfd \xe1t] LT",lastWeek:"[L~\xe1st] dddd [\xe1t] LT",sameElse:"L"},relativeTime:{future:"\xed~\xf1 %s",past:"%s \xe1~g\xf3",s:"\xe1 ~f\xe9w ~s\xe9c\xf3~\xf1ds",ss:"%d s~\xe9c\xf3\xf1~ds",m:"\xe1 ~m\xed\xf1~\xfat\xe9",mm:"%d m~\xed\xf1\xfa~t\xe9s",h:"\xe1~\xf1 h\xf3~\xfar",hh:"%d h~\xf3\xfars",d:"\xe1 ~d\xe1\xfd",dd:"%d d~\xe1\xfds",M:"\xe1 ~m\xf3\xf1~th",MM:"%d m~\xf3\xf1t~hs",y:"\xe1 ~\xfd\xe9\xe1r",yy:"%d \xfd~\xe9\xe1rs"},dayOfMonthOrdinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(e){var t=e%10,n=1==~~(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n},week:{dow:1,doy:4}})})(n(30381))},75805:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("yo",{months:"Sẹ́rẹ́_Èrèlè_Ẹrẹ̀nà_Ìgbé_Èbibi_Òkùdu_Agẹmo_Ògún_Owewe_Ọ̀wàrà_Bélú_Ọ̀pẹ̀̀".split("_"),monthsShort:"Sẹ́r_Èrl_Ẹrn_Ìgb_Èbi_Òkù_Agẹ_Ògú_Owe_Ọ̀wà_Bél_Ọ̀pẹ̀̀".split("_"),weekdays:"Àìkú_Ajé_Ìsẹ́gun_Ọjọ́rú_Ọjọ́bọ_Ẹtì_Àbámẹ́ta".split("_"),weekdaysShort:"Àìk_Ajé_Ìsẹ́_Ọjr_Ọjb_Ẹtì_Àbá".split("_"),weekdaysMin:"Àì_Aj_Ìs_Ọr_Ọb_Ẹt_Àb".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Ònì ni] LT",nextDay:"[Ọ̀la ni] LT",nextWeek:"dddd [Ọsẹ̀ tón'bọ] [ni] LT",lastDay:"[Àna ni] LT",lastWeek:"dddd [Ọsẹ̀ tólọ́] [ni] LT",sameElse:"L"},relativeTime:{future:"ní %s",past:"%s kọjá",s:"ìsẹjú aayá die",ss:"aayá %d",m:"ìsẹjú kan",mm:"ìsẹjú %d",h:"wákati kan",hh:"wákati %d",d:"ọjọ́ kan",dd:"ọjọ́ %d",M:"osù kan",MM:"osù %d",y:"ọdún kan",yy:"ọdún %d"},dayOfMonthOrdinalParse:/ọjọ́\s\d{1,2}/,ordinal:"ọjọ́ %d",week:{dow:1,doy:4}})})(n(30381))},83839:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("zh-cn",{months:"一月_二月_三月_四月_五月_六月_七月_八月_九月_十月_十一月_十二月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"星期日_星期一_星期二_星期三_星期四_星期五_星期六".split("_"),weekdaysShort:"周日_周一_周二_周三_周四_周五_周六".split("_"),weekdaysMin:"日_一_二_三_四_五_六".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY年M月D日",LLL:"YYYY年M月D日Ah点mm分",LLLL:"YYYY年M月D日ddddAh点mm分",l:"YYYY/M/D",ll:"YYYY年M月D日",lll:"YYYY年M月D日 HH:mm",llll:"YYYY年M月D日dddd HH:mm"},meridiemParse:/凌晨|早上|上午|中午|下午|晚上/,meridiemHour:function(e,t){return(12===e&&(e=0),"凌晨"===t||"早上"===t||"上午"===t)?e:"下午"===t||"晚上"===t?e+12:e>=11?e:e+12},meridiem:function(e,t,n){var r=100*e+t;if(r<600)return"凌晨";if(r<900)return"早上";if(r<1130)return"上午";if(r<1230)return"中午";if(r<1800)return"下午";else return"晚上"},calendar:{sameDay:"[今天]LT",nextDay:"[明天]LT",nextWeek:function(e){return e.week()!==this.week()?"[下]dddLT":"[本]dddLT"},lastDay:"[昨天]LT",lastWeek:function(e){return this.week()!==e.week()?"[上]dddLT":"[本]dddLT"},sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(日|月|周)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"日";case"M":return e+"月";case"w":case"W":return e+"周";default:return e}},relativeTime:{future:"%s后",past:"%s前",s:"几秒",ss:"%d 秒",m:"1 分钟",mm:"%d 分钟",h:"1 小时",hh:"%d 小时",d:"1 天",dd:"%d 天",w:"1 周",ww:"%d 周",M:"1 个月",MM:"%d 个月",y:"1 年",yy:"%d 年"},week:{dow:1,doy:4}})})(n(30381))},55726:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("zh-hk",{months:"一月_二月_三月_四月_五月_六月_七月_八月_九月_十月_十一月_十二月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"星期日_星期一_星期二_星期三_星期四_星期五_星期六".split("_"),weekdaysShort:"週日_週一_週二_週三_週四_週五_週六".split("_"),weekdaysMin:"日_一_二_三_四_五_六".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY年M月D日",LLL:"YYYY年M月D日 HH:mm",LLLL:"YYYY年M月D日dddd HH:mm",l:"YYYY/M/D",ll:"YYYY年M月D日",lll:"YYYY年M月D日 HH:mm",llll:"YYYY年M月D日dddd HH:mm"},meridiemParse:/凌晨|早上|上午|中午|下午|晚上/,meridiemHour:function(e,t){return(12===e&&(e=0),"凌晨"===t||"早上"===t||"上午"===t)?e:"中午"===t?e>=11?e:e+12:"下午"===t||"晚上"===t?e+12:void 0},meridiem:function(e,t,n){var r=100*e+t;if(r<600)return"凌晨";if(r<900)return"早上";if(r<1200)return"上午";if(1200===r)return"中午";if(r<1800)return"下午";else return"晚上"},calendar:{sameDay:"[今天]LT",nextDay:"[明天]LT",nextWeek:"[下]ddddLT",lastDay:"[昨天]LT",lastWeek:"[上]ddddLT",sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(日|月|週)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"日";case"M":return e+"月";case"w":case"W":return e+"週";default:return e}},relativeTime:{future:"%s後",past:"%s前",s:"幾秒",ss:"%d 秒",m:"1 分鐘",mm:"%d 分鐘",h:"1 小時",hh:"%d 小時",d:"1 天",dd:"%d 天",M:"1 個月",MM:"%d 個月",y:"1 年",yy:"%d 年"}})})(n(30381))},99807:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("zh-mo",{months:"一月_二月_三月_四月_五月_六月_七月_八月_九月_十月_十一月_十二月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"星期日_星期一_星期二_星期三_星期四_星期五_星期六".split("_"),weekdaysShort:"週日_週一_週二_週三_週四_週五_週六".split("_"),weekdaysMin:"日_一_二_三_四_五_六".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"YYYY年M月D日",LLL:"YYYY年M月D日 HH:mm",LLLL:"YYYY年M月D日dddd HH:mm",l:"D/M/YYYY",ll:"YYYY年M月D日",lll:"YYYY年M月D日 HH:mm",llll:"YYYY年M月D日dddd HH:mm"},meridiemParse:/凌晨|早上|上午|中午|下午|晚上/,meridiemHour:function(e,t){return(12===e&&(e=0),"凌晨"===t||"早上"===t||"上午"===t)?e:"中午"===t?e>=11?e:e+12:"下午"===t||"晚上"===t?e+12:void 0},meridiem:function(e,t,n){var r=100*e+t;if(r<600)return"凌晨";if(r<900)return"早上";if(r<1130)return"上午";if(r<1230)return"中午";if(r<1800)return"下午";else return"晚上"},calendar:{sameDay:"[今天] LT",nextDay:"[明天] LT",nextWeek:"[下]dddd LT",lastDay:"[昨天] LT",lastWeek:"[上]dddd LT",sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(日|月|週)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"日";case"M":return e+"月";case"w":case"W":return e+"週";default:return e}},relativeTime:{future:"%s內",past:"%s前",s:"幾秒",ss:"%d 秒",m:"1 分鐘",mm:"%d 分鐘",h:"1 小時",hh:"%d 小時",d:"1 天",dd:"%d 天",M:"1 個月",MM:"%d 個月",y:"1 年",yy:"%d 年"}})})(n(30381))},74152:function(e,t,n){var r,i;r=this,(i=function(e){return e.defineLocale("zh-tw",{months:"一月_二月_三月_四月_五月_六月_七月_八月_九月_十月_十一月_十二月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"星期日_星期一_星期二_星期三_星期四_星期五_星期六".split("_"),weekdaysShort:"週日_週一_週二_週三_週四_週五_週六".split("_"),weekdaysMin:"日_一_二_三_四_五_六".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY/MM/DD",LL:"YYYY年M月D日",LLL:"YYYY年M月D日 HH:mm",LLLL:"YYYY年M月D日dddd HH:mm",l:"YYYY/M/D",ll:"YYYY年M月D日",lll:"YYYY年M月D日 HH:mm",llll:"YYYY年M月D日dddd HH:mm"},meridiemParse:/凌晨|早上|上午|中午|下午|晚上/,meridiemHour:function(e,t){return(12===e&&(e=0),"凌晨"===t||"早上"===t||"上午"===t)?e:"中午"===t?e>=11?e:e+12:"下午"===t||"晚上"===t?e+12:void 0},meridiem:function(e,t,n){var r=100*e+t;if(r<600)return"凌晨";if(r<900)return"早上";if(r<1130)return"上午";if(r<1230)return"中午";if(r<1800)return"下午";else return"晚上"},calendar:{sameDay:"[今天] LT",nextDay:"[明天] LT",nextWeek:"[下]dddd LT",lastDay:"[昨天] LT",lastWeek:"[上]dddd LT",sameElse:"L"},dayOfMonthOrdinalParse:/\d{1,2}(日|月|週)/,ordinal:function(e,t){switch(t){case"d":case"D":case"DDD":return e+"日";case"M":return e+"月";case"w":case"W":return e+"週";default:return e}},relativeTime:{future:"%s後",past:"%s前",s:"幾秒",ss:"%d 秒",m:"1 分鐘",mm:"%d 分鐘",h:"1 小時",hh:"%d 小時",d:"1 天",dd:"%d 天",M:"1 個月",MM:"%d 個月",y:"1 年",yy:"%d 年"}})})(n(30381))},46700(e,t,n){var r={"./af":42786,"./af.js":42786,"./ar":30867,"./ar-dz":14130,"./ar-dz.js":14130,"./ar-kw":96135,"./ar-kw.js":96135,"./ar-ly":56440,"./ar-ly.js":56440,"./ar-ma":47702,"./ar-ma.js":47702,"./ar-sa":16040,"./ar-sa.js":16040,"./ar-tn":37100,"./ar-tn.js":37100,"./ar.js":30867,"./az":31083,"./az.js":31083,"./be":9808,"./be.js":9808,"./bg":68338,"./bg.js":68338,"./bm":67438,"./bm.js":67438,"./bn":8905,"./bn-bd":76225,"./bn-bd.js":76225,"./bn.js":8905,"./bo":11560,"./bo.js":11560,"./br":1278,"./br.js":1278,"./bs":80622,"./bs.js":80622,"./ca":2468,"./ca.js":2468,"./cs":5822,"./cs.js":5822,"./cv":50877,"./cv.js":50877,"./cy":47373,"./cy.js":47373,"./da":24780,"./da.js":24780,"./de":59740,"./de-at":60217,"./de-at.js":60217,"./de-ch":60894,"./de-ch.js":60894,"./de.js":59740,"./dv":5300,"./dv.js":5300,"./el":50837,"./el.js":50837,"./en-au":78348,"./en-au.js":78348,"./en-ca":77925,"./en-ca.js":77925,"./en-gb":22243,"./en-gb.js":22243,"./en-ie":46436,"./en-ie.js":46436,"./en-il":47207,"./en-il.js":47207,"./en-in":44175,"./en-in.js":44175,"./en-nz":76319,"./en-nz.js":76319,"./en-sg":31662,"./en-sg.js":31662,"./eo":92915,"./eo.js":92915,"./es":55655,"./es-do":55251,"./es-do.js":55251,"./es-mx":96112,"./es-mx.js":96112,"./es-us":71146,"./es-us.js":71146,"./es.js":55655,"./et":5603,"./et.js":5603,"./eu":77763,"./eu.js":77763,"./fa":76959,"./fa.js":76959,"./fi":11897,"./fi.js":11897,"./fil":42549,"./fil.js":42549,"./fo":94694,"./fo.js":94694,"./fr":94470,"./fr-ca":63049,"./fr-ca.js":63049,"./fr-ch":52330,"./fr-ch.js":52330,"./fr.js":94470,"./fy":5044,"./fy.js":5044,"./ga":29295,"./ga.js":29295,"./gd":2101,"./gd.js":2101,"./gl":38794,"./gl.js":38794,"./gom-deva":27884,"./gom-deva.js":27884,"./gom-latn":23168,"./gom-latn.js":23168,"./gu":95349,"./gu.js":95349,"./he":24206,"./he.js":24206,"./hi":30094,"./hi.js":30094,"./hr":30316,"./hr.js":30316,"./hu":22138,"./hu.js":22138,"./hy-am":11423,"./hy-am.js":11423,"./id":29218,"./id.js":29218,"./is":90135,"./is.js":90135,"./it":90626,"./it-ch":10150,"./it-ch.js":10150,"./it.js":90626,"./ja":39183,"./ja.js":39183,"./jv":24286,"./jv.js":24286,"./ka":12105,"./ka.js":12105,"./kk":47772,"./kk.js":47772,"./km":18758,"./km.js":18758,"./kn":79282,"./kn.js":79282,"./ko":33730,"./ko.js":33730,"./ku":1408,"./ku.js":1408,"./ky":33291,"./ky.js":33291,"./lb":36841,"./lb.js":36841,"./lo":55466,"./lo.js":55466,"./lt":57010,"./lt.js":57010,"./lv":37595,"./lv.js":37595,"./me":39861,"./me.js":39861,"./mi":35493,"./mi.js":35493,"./mk":95966,"./mk.js":95966,"./ml":87341,"./ml.js":87341,"./mn":5115,"./mn.js":5115,"./mr":10370,"./mr.js":10370,"./ms":9847,"./ms-my":41237,"./ms-my.js":41237,"./ms.js":9847,"./mt":72126,"./mt.js":72126,"./my":56165,"./my.js":56165,"./nb":64924,"./nb.js":64924,"./ne":16744,"./ne.js":16744,"./nl":93901,"./nl-be":59814,"./nl-be.js":59814,"./nl.js":93901,"./nn":83877,"./nn.js":83877,"./oc-lnc":92135,"./oc-lnc.js":92135,"./pa-in":15858,"./pa-in.js":15858,"./pl":64495,"./pl.js":64495,"./pt":89520,"./pt-br":57971,"./pt-br.js":57971,"./pt.js":89520,"./ro":96459,"./ro.js":96459,"./ru":21793,"./ru.js":21793,"./sd":40950,"./sd.js":40950,"./se":10490,"./se.js":10490,"./si":90124,"./si.js":90124,"./sk":64249,"./sk.js":64249,"./sl":14985,"./sl.js":14985,"./sq":51104,"./sq.js":51104,"./sr":49131,"./sr-cyrl":79915,"./sr-cyrl.js":79915,"./sr.js":49131,"./ss":85893,"./ss.js":85893,"./sv":98760,"./sv.js":98760,"./sw":91172,"./sw.js":91172,"./ta":27333,"./ta.js":27333,"./te":23110,"./te.js":23110,"./tet":52095,"./tet.js":52095,"./tg":27321,"./tg.js":27321,"./th":9041,"./th.js":9041,"./tk":19005,"./tk.js":19005,"./tl-ph":75768,"./tl-ph.js":75768,"./tlh":89444,"./tlh.js":89444,"./tr":72397,"./tr.js":72397,"./tzl":28254,"./tzl.js":28254,"./tzm":51106,"./tzm-latn":30699,"./tzm-latn.js":30699,"./tzm.js":51106,"./ug-cn":9288,"./ug-cn.js":9288,"./uk":67691,"./uk.js":67691,"./ur":13795,"./ur.js":13795,"./uz":6791,"./uz-latn":60588,"./uz-latn.js":60588,"./uz.js":6791,"./vi":65666,"./vi.js":65666,"./x-pseudo":14378,"./x-pseudo.js":14378,"./yo":75805,"./yo.js":75805,"./zh-cn":83839,"./zh-cn.js":83839,"./zh-hk":55726,"./zh-hk.js":55726,"./zh-mo":99807,"./zh-mo.js":99807,"./zh-tw":74152,"./zh-tw.js":74152};function i(e){return n(a(e))}function a(e){if(!n.o(r,e)){var t=Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}return r[e]}i.keys=function(){return Object.keys(r)},i.resolve=a,e.exports=i,i.id=46700},30381:function(e,t,n){var r,i;e=n.nmd(e),r=this,i=function(){"use strict";function t(){return em.apply(null,arguments)}function r(e){em=e}function i(e){return e instanceof Array||"[object Array]"===Object.prototype.toString.call(e)}function a(e){return null!=e&&"[object Object]"===Object.prototype.toString.call(e)}function o(e,t){return Object.prototype.hasOwnProperty.call(e,t)}function s(e){var t;if(Object.getOwnPropertyNames)return 0===Object.getOwnPropertyNames(e).length;for(t in e)if(o(e,t))return!1;return!0}function u(e){return void 0===e}function c(e){return"number"==typeof e||"[object Number]"===Object.prototype.toString.call(e)}function l(e){return e instanceof Date||"[object Date]"===Object.prototype.toString.call(e)}function f(e,t){var n,r=[];for(n=0;n>>0;for(t=0;t0)for(n=0;n=0?n?"+":"":"-")+Math.pow(10,Math.max(0,t-i.length)).toString().substr(1)+i}var R=/(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|N{1,5}|YYYYYY|YYYYY|YYYY|YY|y{2,4}|yo?|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g,j=/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,F={},Y={};function B(e,t,n,r){var i=r;"string"==typeof r&&(i=function(){return this[r]()}),e&&(Y[e]=i),t&&(Y[t[0]]=function(){return P(i.apply(this,arguments),t[1],t[2])}),n&&(Y[n]=function(){return this.localeData().ordinal(i.apply(this,arguments),e)})}function U(e){return e.match(/\[[\s\S]/)?e.replace(/^\[|\]$/g,""):e.replace(/\\/g,"")}function H(e){var t,n,r=e.match(R);for(t=0,n=r.length;t=0&&j.test(e);)e=e.replace(j,r),j.lastIndex=0,n-=1;return e}var G={LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"};function W(e){var t=this._longDateFormat[e],n=this._longDateFormat[e.toUpperCase()];return t||!n?t:(this._longDateFormat[e]=n.match(R).map(function(e){return"MMMM"===e||"MM"===e||"DD"===e||"dddd"===e?e.slice(1):e}).join(""),this._longDateFormat[e])}var K="Invalid date";function V(){return this._invalidDate}var q="%d",Z=/\d{1,2}/;function X(e){return this._ordinal.replace("%d",e)}var J={future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",w:"a week",ww:"%d weeks",M:"a month",MM:"%d months",y:"a year",yy:"%d years"};function Q(e,t,n,r){var i=this._relativeTime[n];return A(i)?i(e,t,n,r):i.replace(/%d/i,e)}function ee(e,t){var n=this._relativeTime[e>0?"future":"past"];return A(n)?n(t):n.replace(/%s/i,t)}var et={};function en(e,t){var n=e.toLowerCase();et[n]=et[n+"s"]=et[t]=e}function er(e){return"string"==typeof e?et[e]||et[e.toLowerCase()]:void 0}function ei(e){var t,n,r={};for(n in e)o(e,n)&&(t=er(n))&&(r[t]=e[n]);return r}var ea={};function eo(e,t){ea[e]=t}function es(e){var t,n=[];for(t in e)o(e,t)&&n.push({unit:t,priority:ea[t]});return n.sort(function(e,t){return e.priority-t.priority}),n}function eu(e){return e%4==0&&e%100!=0||e%400==0}function ec(e){return e<0?Math.ceil(e)||0:Math.floor(e)}function el(e){var t=+e,n=0;return 0!==t&&isFinite(t)&&(n=ec(t)),n}function ef(e,n){return function(r){return null!=r?(eh(this,e,r),t.updateOffset(this,n),this):ed(this,e)}}function ed(e,t){return e.isValid()?e._d["get"+(e._isUTC?"UTC":"")+t]():NaN}function eh(e,t,n){e.isValid()&&!isNaN(n)&&("FullYear"===t&&eu(e.year())&&1===e.month()&&29===e.date()?(n=el(n),e._d["set"+(e._isUTC?"UTC":"")+t](n,e.month(),e0(n,e.month()))):e._d["set"+(e._isUTC?"UTC":"")+t](n))}function ep(e){return A(this[e=er(e)])?this[e]():this}function eb(e,t){if("object"==typeof e){e=ei(e);var n,r=es(e);for(n=0;n68?1900:2e3)};var tu=ef("FullYear",!0);function tc(){return eu(this.year())}function tl(e,t,n,r,i,a,o){var s;return e<100&&e>=0?(s=new Date(e+400,t,n,r,i,a,o),isFinite(s.getFullYear())&&s.setFullYear(e)):s=new Date(e,t,n,r,i,a,o),s}function tf(e){var t,n;return e<100&&e>=0?(n=Array.prototype.slice.call(arguments),n[0]=e+400,t=new Date(Date.UTC.apply(null,n)),isFinite(t.getUTCFullYear())&&t.setUTCFullYear(e)):t=new Date(Date.UTC.apply(null,arguments)),t}function td(e,t,n){var r=7+t-n;return-((7+tf(e,0,r).getUTCDay()-t)%7)+r-1}function th(e,t,n,r,i){var a,o,s=(7+n-r)%7,u=td(e,r,i),c=1+7*(t-1)+s+u;return c<=0?o=ts(a=e-1)+c:c>ts(e)?(a=e+1,o=c-ts(e)):(a=e,o=c),{year:a,dayOfYear:o}}function tp(e,t,n){var r,i,a=td(e.year(),t,n),o=Math.floor((e.dayOfYear()-a-1)/7)+1;return o<1?r=o+tb(i=e.year()-1,t,n):o>tb(e.year(),t,n)?(r=o-tb(e.year(),t,n),i=e.year()+1):(i=e.year(),r=o),{week:r,year:i}}function tb(e,t,n){var r=td(e,t,n),i=td(e+1,t,n);return(ts(e)-r+i)/7}function tm(e){return tp(e,this._week.dow,this._week.doy).week}B("w",["ww",2],"wo","week"),B("W",["WW",2],"Wo","isoWeek"),en("week","w"),en("isoWeek","W"),eo("week",5),eo("isoWeek",5),ej("w",ex),ej("ww",ex,e_),ej("W",ex),ej("WW",ex,e_),e$(["w","ww","W","WW"],function(e,t,n,r){t[r.substr(0,1)]=el(e)});var tg={dow:0,doy:6};function tv(){return this._week.dow}function ty(){return this._week.doy}function tw(e){var t=this.localeData().week(this);return null==e?t:this.add((e-t)*7,"d")}function t_(e){var t=tp(this,1,4).week;return null==e?t:this.add((e-t)*7,"d")}function tE(e,t){return"string"!=typeof e?e:isNaN(e)?"number"==typeof(e=t.weekdaysParse(e))?e:null:parseInt(e,10)}function tS(e,t){return"string"==typeof e?t.weekdaysParse(e)%7||7:isNaN(e)?null:e}function tk(e,t){return e.slice(t,7).concat(e.slice(0,t))}B("d",0,"do","day"),B("dd",0,0,function(e){return this.localeData().weekdaysMin(this,e)}),B("ddd",0,0,function(e){return this.localeData().weekdaysShort(this,e)}),B("dddd",0,0,function(e){return this.localeData().weekdays(this,e)}),B("e",0,0,"weekday"),B("E",0,0,"isoWeekday"),en("day","d"),en("weekday","e"),en("isoWeekday","E"),eo("day",11),eo("weekday",11),eo("isoWeekday",11),ej("d",ex),ej("e",ex),ej("E",ex),ej("dd",function(e,t){return t.weekdaysMinRegex(e)}),ej("ddd",function(e,t){return t.weekdaysShortRegex(e)}),ej("dddd",function(e,t){return t.weekdaysRegex(e)}),e$(["dd","ddd","dddd"],function(e,t,n,r){var i=n._locale.weekdaysParse(e,r,n._strict);null!=i?t.d=i:b(n).invalidWeekday=e}),e$(["d","e","E"],function(e,t,n,r){t[r]=el(e)});var tx="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),tT="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),tM="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),tO=eR,tA=eR,tL=eR;function tC(e,t){var n=i(this._weekdays)?this._weekdays:this._weekdays[e&&!0!==e&&this._weekdays.isFormat.test(t)?"format":"standalone"];return!0===e?tk(n,this._week.dow):e?n[e.day()]:n}function tI(e){return!0===e?tk(this._weekdaysShort,this._week.dow):e?this._weekdaysShort[e.day()]:this._weekdaysShort}function tD(e){return!0===e?tk(this._weekdaysMin,this._week.dow):e?this._weekdaysMin[e.day()]:this._weekdaysMin}function tN(e,t,n){var r,i,a,o=e.toLocaleLowerCase();if(!this._weekdaysParse)for(r=0,this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[];r<7;++r)a=h([2e3,1]).day(r),this._minWeekdaysParse[r]=this.weekdaysMin(a,"").toLocaleLowerCase(),this._shortWeekdaysParse[r]=this.weekdaysShort(a,"").toLocaleLowerCase(),this._weekdaysParse[r]=this.weekdays(a,"").toLocaleLowerCase();return n?"dddd"===t?-1!==(i=tX.call(this._weekdaysParse,o))?i:null:"ddd"===t?-1!==(i=tX.call(this._shortWeekdaysParse,o))?i:null:-1!==(i=tX.call(this._minWeekdaysParse,o))?i:null:"dddd"===t?-1!==(i=tX.call(this._weekdaysParse,o))||-1!==(i=tX.call(this._shortWeekdaysParse,o))?i:-1!==(i=tX.call(this._minWeekdaysParse,o))?i:null:"ddd"===t?-1!==(i=tX.call(this._shortWeekdaysParse,o))||-1!==(i=tX.call(this._weekdaysParse,o))?i:-1!==(i=tX.call(this._minWeekdaysParse,o))?i:null:-1!==(i=tX.call(this._minWeekdaysParse,o))||-1!==(i=tX.call(this._weekdaysParse,o))?i:-1!==(i=tX.call(this._shortWeekdaysParse,o))?i:null}function tP(e,t,n){var r,i,a;if(this._weekdaysParseExact)return tN.call(this,e,t,n);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),r=0;r<7;r++){if(i=h([2e3,1]).day(r),n&&!this._fullWeekdaysParse[r]&&(this._fullWeekdaysParse[r]=RegExp("^"+this.weekdays(i,"").replace(".","\\.?")+"$","i"),this._shortWeekdaysParse[r]=RegExp("^"+this.weekdaysShort(i,"").replace(".","\\.?")+"$","i"),this._minWeekdaysParse[r]=RegExp("^"+this.weekdaysMin(i,"").replace(".","\\.?")+"$","i")),this._weekdaysParse[r]||(a="^"+this.weekdays(i,"")+"|^"+this.weekdaysShort(i,"")+"|^"+this.weekdaysMin(i,""),this._weekdaysParse[r]=RegExp(a.replace(".",""),"i")),n&&"dddd"===t&&this._fullWeekdaysParse[r].test(e))return r;if(n&&"ddd"===t&&this._shortWeekdaysParse[r].test(e))return r;if(n&&"dd"===t&&this._minWeekdaysParse[r].test(e))return r;else if(!n&&this._weekdaysParse[r].test(e))return r}}function tR(e){if(!this.isValid())return null!=e?this:NaN;var t=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=e?(e=tE(e,this.localeData()),this.add(e-t,"d")):t}function tj(e){if(!this.isValid())return null!=e?this:NaN;var t=(this.day()+7-this.localeData()._week.dow)%7;return null==e?t:this.add(e-t,"d")}function tF(e){if(!this.isValid())return null!=e?this:NaN;if(null==e)return this.day()||7;var t=tS(e,this.localeData());return this.day(this.day()%7?t:t-7)}function tY(e){return this._weekdaysParseExact?(o(this,"_weekdaysRegex")||tH.call(this),e)?this._weekdaysStrictRegex:this._weekdaysRegex:(o(this,"_weekdaysRegex")||(this._weekdaysRegex=tO),this._weekdaysStrictRegex&&e?this._weekdaysStrictRegex:this._weekdaysRegex)}function tB(e){return this._weekdaysParseExact?(o(this,"_weekdaysRegex")||tH.call(this),e)?this._weekdaysShortStrictRegex:this._weekdaysShortRegex:(o(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=tA),this._weekdaysShortStrictRegex&&e?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function tU(e){return this._weekdaysParseExact?(o(this,"_weekdaysRegex")||tH.call(this),e)?this._weekdaysMinStrictRegex:this._weekdaysMinRegex:(o(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=tL),this._weekdaysMinStrictRegex&&e?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function tH(){function e(e,t){return t.length-e.length}var t,n,r,i,a,o=[],s=[],u=[],c=[];for(t=0;t<7;t++)n=h([2e3,1]).day(t),r=eB(this.weekdaysMin(n,"")),i=eB(this.weekdaysShort(n,"")),a=eB(this.weekdays(n,"")),o.push(r),s.push(i),u.push(a),c.push(r),c.push(i),c.push(a);o.sort(e),s.sort(e),u.sort(e),c.sort(e),this._weekdaysRegex=RegExp("^("+c.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=RegExp("^("+u.join("|")+")","i"),this._weekdaysShortStrictRegex=RegExp("^("+s.join("|")+")","i"),this._weekdaysMinStrictRegex=RegExp("^("+o.join("|")+")","i")}function t$(){return this.hours()%12||12}function tz(){return this.hours()||24}function tG(e,t){B(e,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),t)})}function tW(e,t){return t._meridiemParse}function tK(e){return"p"===(e+"").toLowerCase().charAt(0)}B("H",["HH",2],0,"hour"),B("h",["hh",2],0,t$),B("k",["kk",2],0,tz),B("hmm",0,0,function(){return""+t$.apply(this)+P(this.minutes(),2)}),B("hmmss",0,0,function(){return""+t$.apply(this)+P(this.minutes(),2)+P(this.seconds(),2)}),B("Hmm",0,0,function(){return""+this.hours()+P(this.minutes(),2)}),B("Hmmss",0,0,function(){return""+this.hours()+P(this.minutes(),2)+P(this.seconds(),2)}),tG("a",!0),tG("A",!1),en("hour","h"),eo("hour",13),ej("a",tW),ej("A",tW),ej("H",ex),ej("h",ex),ej("k",ex),ej("HH",ex,e_),ej("hh",ex,e_),ej("kk",ex,e_),ej("hmm",eT),ej("hmmss",eM),ej("Hmm",eT),ej("Hmmss",eM),eH(["H","HH"],eV),eH(["k","kk"],function(e,t,n){var r=el(e);t[eV]=24===r?0:r}),eH(["a","A"],function(e,t,n){n._isPm=n._locale.isPM(e),n._meridiem=e}),eH(["h","hh"],function(e,t,n){t[eV]=el(e),b(n).bigHour=!0}),eH("hmm",function(e,t,n){var r=e.length-2;t[eV]=el(e.substr(0,r)),t[eq]=el(e.substr(r)),b(n).bigHour=!0}),eH("hmmss",function(e,t,n){var r=e.length-4,i=e.length-2;t[eV]=el(e.substr(0,r)),t[eq]=el(e.substr(r,2)),t[eZ]=el(e.substr(i)),b(n).bigHour=!0}),eH("Hmm",function(e,t,n){var r=e.length-2;t[eV]=el(e.substr(0,r)),t[eq]=el(e.substr(r))}),eH("Hmmss",function(e,t,n){var r=e.length-4,i=e.length-2;t[eV]=el(e.substr(0,r)),t[eq]=el(e.substr(r,2)),t[eZ]=el(e.substr(i))});var tV=/[ap]\.?m?\.?/i,tq=ef("Hours",!0);function tZ(e,t,n){return e>11?n?"pm":"PM":n?"am":"AM"}var tX,tJ,tQ={calendar:D,longDateFormat:G,invalidDate:K,ordinal:q,dayOfMonthOrdinalParse:Z,relativeTime:J,months:e2,monthsShort:e3,week:tg,weekdays:tx,weekdaysMin:tM,weekdaysShort:tT,meridiemParse:tV},t1={},t0={};function t2(e,t){var n,r=Math.min(e.length,t.length);for(n=0;n0;){if(r=t5(i.slice(0,t).join("-")))return r;if(n&&n.length>=t&&t2(i,n)>=t-1)break;t--}a++}return tJ}function t5(t){var r,i=null;if(void 0===t1[t]&&e&&e.exports)try{i=tJ._abbr,r=void 0,n(46700)("./"+t),t6(i)}catch(a){t1[t]=null}return t1[t]}function t6(e,t){var n;return e&&((n=u(t)?t7(e):t9(e,t))?tJ=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+e+" not found. Did you forget to load it?")),tJ._abbr}function t9(e,t){if(null===t)return delete t1[e],null;var n,r=tQ;if(t.abbr=e,null!=t1[e])O("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=t1[e]._config;else if(null!=t.parentLocale){if(null!=t1[t.parentLocale])r=t1[t.parentLocale]._config;else{if(null==(n=t5(t.parentLocale)))return t0[t.parentLocale]||(t0[t.parentLocale]=[]),t0[t.parentLocale].push({name:e,config:t}),null;r=n._config}}return t1[e]=new I(C(r,t)),t0[e]&&t0[e].forEach(function(e){t9(e.name,e.config)}),t6(e),t1[e]}function t8(e,t){if(null!=t){var n,r,i=tQ;null!=t1[e]&&null!=t1[e].parentLocale?t1[e].set(C(t1[e]._config,t)):(null!=(r=t5(e))&&(i=r._config),t=C(i,t),null==r&&(t.abbr=e),(n=new I(t)).parentLocale=t1[e],t1[e]=n),t6(e)}else null!=t1[e]&&(null!=t1[e].parentLocale?(t1[e]=t1[e].parentLocale,e===t6()&&t6(e)):null!=t1[e]&&delete t1[e]);return t1[e]}function t7(e){var t;if(e&&e._locale&&e._locale._abbr&&(e=e._locale._abbr),!e)return tJ;if(!i(e)){if(t=t5(e))return t;e=[e]}return t4(e)}function ne(){return ev(t1)}function nt(e){var t,n=e._a;return n&&-2===b(e).overflow&&(t=n[eW]<0||n[eW]>11?eW:n[eK]<1||n[eK]>e0(n[eG],n[eW])?eK:n[eV]<0||n[eV]>24||24===n[eV]&&(0!==n[eq]||0!==n[eZ]||0!==n[eX])?eV:n[eq]<0||n[eq]>59?eq:n[eZ]<0||n[eZ]>59?eZ:n[eX]<0||n[eX]>999?eX:-1,b(e)._overflowDayOfYear&&(teK)&&(t=eK),b(e)._overflowWeeks&&-1===t&&(t=eJ),b(e)._overflowWeekday&&-1===t&&(t=eQ),b(e).overflow=t),e}var nn=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,nr=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d|))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ni=/Z|[+-]\d\d(?::?\d\d)?/,na=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/],["YYYYMM",/\d{6}/,!1],["YYYY",/\d{4}/,!1],],no=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/],],ns=/^\/?Date\((-?\d+)/i,nu=/^(?:(Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\s)?(\d{1,2})\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(\d{2,4})\s(\d\d):(\d\d)(?::(\d\d))?\s(?:(UT|GMT|[ECMP][SD]T)|([Zz])|([+-]\d{4}))$/,nc={UT:0,GMT:0,EDT:-240,EST:-300,CDT:-300,CST:-360,MDT:-360,MST:-420,PDT:-420,PST:-480};function nl(e){var t,n,r,i,a,o,s=e._i,u=nn.exec(s)||nr.exec(s);if(u){for(t=0,b(e).iso=!0,n=na.length;tts(a)||0===e._dayOfYear)&&(b(e)._overflowDayOfYear=!0),n=tf(a,0,e._dayOfYear),e._a[eW]=n.getUTCMonth(),e._a[eK]=n.getUTCDate()),t=0;t<3&&null==e._a[t];++t)e._a[t]=o[t]=r[t];for(;t<7;t++)e._a[t]=o[t]=null==e._a[t]?2===t?1:0:e._a[t];24===e._a[eV]&&0===e._a[eq]&&0===e._a[eZ]&&0===e._a[eX]&&(e._nextDay=!0,e._a[eV]=0),e._d=(e._useUTC?tf:tl).apply(null,o),i=e._useUTC?e._d.getUTCDay():e._d.getDay(),null!=e._tzm&&e._d.setUTCMinutes(e._d.getUTCMinutes()-e._tzm),e._nextDay&&(e._a[eV]=24),e._w&&void 0!==e._w.d&&e._w.d!==i&&(b(e).weekdayMismatch=!0)}}function n_(e){var t,n,r,i,a,o,s,u,c;null!=(t=e._w).GG||null!=t.W||null!=t.E?(a=1,o=4,n=nv(t.GG,e._a[eG],tp(nL(),1,4).year),r=nv(t.W,1),((i=nv(t.E,1))<1||i>7)&&(u=!0)):(a=e._locale._week.dow,o=e._locale._week.doy,c=tp(nL(),a,o),n=nv(t.gg,e._a[eG],c.year),r=nv(t.w,c.week),null!=t.d?((i=t.d)<0||i>6)&&(u=!0):null!=t.e?(i=t.e+a,(t.e<0||t.e>6)&&(u=!0)):i=a),r<1||r>tb(n,a,o)?b(e)._overflowWeeks=!0:null!=u?b(e)._overflowWeekday=!0:(s=th(n,r,i,a,o),e._a[eG]=s.year,e._dayOfYear=s.dayOfYear)}function nE(e){if(e._f===t.ISO_8601){nl(e);return}if(e._f===t.RFC_2822){nm(e);return}e._a=[],b(e).empty=!0;var n,r,i,a,o,s,u=""+e._i,c=u.length,l=0;for(n=0,i=z(e._f,e._locale).match(R)||[];n0&&b(e).unusedInput.push(o),u=u.slice(u.indexOf(r)+r.length),l+=r.length),Y[a]?(r?b(e).empty=!1:b(e).unusedTokens.push(a),ez(a,r,e)):e._strict&&!r&&b(e).unusedTokens.push(a);b(e).charsLeftOver=c-l,u.length>0&&b(e).unusedInput.push(u),e._a[eV]<=12&&!0===b(e).bigHour&&e._a[eV]>0&&(b(e).bigHour=void 0),b(e).parsedDateParts=e._a.slice(0),b(e).meridiem=e._meridiem,e._a[eV]=nS(e._locale,e._a[eV],e._meridiem),null!==(s=b(e).era)&&(e._a[eG]=e._locale.erasConvertYear(s,e._a[eG])),nw(e),nt(e)}function nS(e,t,n){var r;return null==n?t:null!=e.meridiemHour?e.meridiemHour(t,n):(null!=e.isPM&&((r=e.isPM(n))&&t<12&&(t+=12),r||12!==t||(t=0)),t)}function nk(e){var t,n,r,i,a,o,s=!1;if(0===e._f.length){b(e).invalidFormat=!0,e._d=new Date(NaN);return}for(i=0;ithis?this:e:g()});function nD(e,t){var n,r;if(1===t.length&&i(t[0])&&(t=t[0]),!t.length)return nL();for(r=1,n=t[0];rMath.abs(e)&&!r&&(e*=60);return!this._isUTC&&n&&(i=nq(this)),this._offset=e,this._isUTC=!0,null!=i&&this.add(i,"m"),a===e||(!n||this._changeInProgress?ri(this,n7(e-a,"m"),1,!1):this._changeInProgress||(this._changeInProgress=!0,t.updateOffset(this,!0),this._changeInProgress=null)),this}function nX(e,t){return null!=e?("string"!=typeof e&&(e=-e),this.utcOffset(e,t),this):-this.utcOffset()}function nJ(e){return this.utcOffset(0,e)}function nQ(e){return this._isUTC&&(this.utcOffset(0,e),this._isUTC=!1,e&&this.subtract(nq(this),"m")),this}function n1(){if(null!=this._tzm)this.utcOffset(this._tzm,!1,!0);else if("string"==typeof this._i){var e=nK(eD,this._i);null!=e?this.utcOffset(e):this.utcOffset(0,!0)}return this}function n0(e){return!!this.isValid()&&(e=e?nL(e).utcOffset():0,(this.utcOffset()-e)%60==0)}function n2(){return this.utcOffset()>this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function n3(){if(!u(this._isDSTShifted))return this._isDSTShifted;var e,t={};return E(t,this),(t=nM(t))._a?(e=t._isUTC?h(t._a):nL(t._a),this._isDSTShifted=this.isValid()&&nz(t._a,e.toArray())>0):this._isDSTShifted=!1,this._isDSTShifted}function n4(){return!!this.isValid()&&!this._isUTC}function n5(){return!!this.isValid()&&this._isUTC}function n6(){return!!this.isValid()&&this._isUTC&&0===this._offset}t.updateOffset=function(){};var n9=/^(-|\+)?(?:(\d*)[. ])?(\d+):(\d+)(?::(\d+)(\.\d*)?)?$/,n8=/^(-|\+)?P(?:([-+]?[0-9,.]*)Y)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)W)?(?:([-+]?[0-9,.]*)D)?(?:T(?:([-+]?[0-9,.]*)H)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)S)?)?$/;function n7(e,t){var n,r,i,a=e,s=null;return nH(e)?a={ms:e._milliseconds,d:e._days,M:e._months}:c(e)||!isNaN(+e)?(a={},t?a[t]=+e:a.milliseconds=+e):(s=n9.exec(e))?(n="-"===s[1]?-1:1,a={y:0,d:el(s[eK])*n,h:el(s[eV])*n,m:el(s[eq])*n,s:el(s[eZ])*n,ms:el(n$(1e3*s[eX]))*n}):(s=n8.exec(e))?(n="-"===s[1]?-1:1,a={y:re(s[2],n),M:re(s[3],n),w:re(s[4],n),d:re(s[5],n),h:re(s[6],n),m:re(s[7],n),s:re(s[8],n)}):null==a?a={}:"object"==typeof a&&("from"in a||"to"in a)&&(i=rn(nL(a.from),nL(a.to)),(a={}).ms=i.milliseconds,a.M=i.months),r=new nU(a),nH(e)&&o(e,"_locale")&&(r._locale=e._locale),nH(e)&&o(e,"_isValid")&&(r._isValid=e._isValid),r}function re(e,t){var n=e&&parseFloat(e.replace(",","."));return(isNaN(n)?0:n)*t}function rt(e,t){var n={};return n.months=t.month()-e.month()+(t.year()-e.year())*12,e.clone().add(n.months,"M").isAfter(t)&&--n.months,n.milliseconds=+t-+e.clone().add(n.months,"M"),n}function rn(e,t){var n;return e.isValid()&&t.isValid()?(t=nV(t,e),e.isBefore(t)?n=rt(e,t):((n=rt(t,e)).milliseconds=-n.milliseconds,n.months=-n.months),n):{milliseconds:0,months:0}}function rr(e,t){return function(n,r){var i,a;return null===r||isNaN(+r)||(O(t,"moment()."+t+"(period, number) is deprecated. Please use moment()."+t+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),a=n,n=r,r=a),i=n7(n,r),ri(this,i,e),this}}function ri(e,n,r,i){var a=n._milliseconds,o=n$(n._days),s=n$(n._months);e.isValid()&&(i=null==i||i,s&&tt(e,ed(e,"Month")+s*r),o&&eh(e,"Date",ed(e,"Date")+o*r),a&&e._d.setTime(e._d.valueOf()+a*r),i&&t.updateOffset(e,o||s))}n7.fn=nU.prototype,n7.invalid=nB;var ra=rr(1,"add"),ro=rr(-1,"subtract");function rs(e){return"string"==typeof e||e instanceof String}function ru(e){return k(e)||l(e)||rs(e)||c(e)||rl(e)||rc(e)||null==e}function rc(e){var t,n,r=a(e)&&!s(e),i=!1,u=["years","year","y","months","month","M","days","day","d","dates","date","D","hours","hour","h","minutes","minute","m","seconds","second","s","milliseconds","millisecond","ms",];for(t=0;tn.valueOf():n.valueOf()n.year()||n.year()>9999?$(n,t?"YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]":"YYYYYY-MM-DD[T]HH:mm:ss.SSSZ"):A(Date.prototype.toISOString)?t?this.toDate().toISOString():new Date(this.valueOf()+6e4*this.utcOffset()).toISOString().replace("Z",$(n,"Z")):$(n,t?"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]":"YYYY-MM-DD[T]HH:mm:ss.SSSZ")}function rx(){if(!this.isValid())return"moment.invalid(/* "+this._i+" */)";var e,t,n,r,i="moment",a="";return this.isLocal()||(i=0===this.utcOffset()?"moment.utc":"moment.parseZone",a="Z"),e="["+i+'("]',t=0<=this.year()&&9999>=this.year()?"YYYY":"YYYYYY",n="-MM-DD[T]HH:mm:ss.SSS",r=a+'[")]',this.format(e+t+n+r)}function rT(e){e||(e=this.isUtc()?t.defaultFormatUtc:t.defaultFormat);var n=$(this,e);return this.localeData().postformat(n)}function rM(e,t){return this.isValid()&&(k(e)&&e.isValid()||nL(e).isValid())?n7({to:this,from:e}).locale(this.locale()).humanize(!t):this.localeData().invalidDate()}function rO(e){return this.from(nL(),e)}function rA(e,t){return this.isValid()&&(k(e)&&e.isValid()||nL(e).isValid())?n7({from:this,to:e}).locale(this.locale()).humanize(!t):this.localeData().invalidDate()}function rL(e){return this.to(nL(),e)}function rC(e){var t;return void 0===e?this._locale._abbr:(null!=(t=t7(e))&&(this._locale=t),this)}t.defaultFormat="YYYY-MM-DDTHH:mm:ssZ",t.defaultFormatUtc="YYYY-MM-DDTHH:mm:ss[Z]";var rI=T("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(e){return void 0===e?this.localeData():this.locale(e)});function rD(){return this._locale}var rN=1e3,rP=60*rN,rR=60*rP,rj=3506328*rR;function rF(e,t){return(e%t+t)%t}function rY(e,t,n){return e<100&&e>=0?new Date(e+400,t,n)-rj:new Date(e,t,n).valueOf()}function rB(e,t,n){return e<100&&e>=0?Date.UTC(e+400,t,n)-rj:Date.UTC(e,t,n)}function rU(e){var n,r;if(void 0===(e=er(e))||"millisecond"===e||!this.isValid())return this;switch(r=this._isUTC?rB:rY,e){case"year":n=r(this.year(),0,1);break;case"quarter":n=r(this.year(),this.month()-this.month()%3,1);break;case"month":n=r(this.year(),this.month(),1);break;case"week":n=r(this.year(),this.month(),this.date()-this.weekday());break;case"isoWeek":n=r(this.year(),this.month(),this.date()-(this.isoWeekday()-1));break;case"day":case"date":n=r(this.year(),this.month(),this.date());break;case"hour":n=this._d.valueOf(),n-=rF(n+(this._isUTC?0:this.utcOffset()*rP),rR);break;case"minute":n=this._d.valueOf(),n-=rF(n,rP);break;case"second":n=this._d.valueOf(),n-=rF(n,rN)}return this._d.setTime(n),t.updateOffset(this,!0),this}function rH(e){var n,r;if(void 0===(e=er(e))||"millisecond"===e||!this.isValid())return this;switch(r=this._isUTC?rB:rY,e){case"year":n=r(this.year()+1,0,1)-1;break;case"quarter":n=r(this.year(),this.month()-this.month()%3+3,1)-1;break;case"month":n=r(this.year(),this.month()+1,1)-1;break;case"week":n=r(this.year(),this.month(),this.date()-this.weekday()+7)-1;break;case"isoWeek":n=r(this.year(),this.month(),this.date()-(this.isoWeekday()-1)+7)-1;break;case"day":case"date":n=r(this.year(),this.month(),this.date()+1)-1;break;case"hour":n=this._d.valueOf(),n+=rR-rF(n+(this._isUTC?0:this.utcOffset()*rP),rR)-1;break;case"minute":n=this._d.valueOf(),n+=rP-rF(n,rP)-1;break;case"second":n=this._d.valueOf(),n+=rN-rF(n,rN)-1}return this._d.setTime(n),t.updateOffset(this,!0),this}function r$(){return this._d.valueOf()-6e4*(this._offset||0)}function rz(){return Math.floor(this.valueOf()/1e3)}function rG(){return new Date(this.valueOf())}function rW(){var e=this;return[e.year(),e.month(),e.date(),e.hour(),e.minute(),e.second(),e.millisecond(),]}function rK(){var e=this;return{years:e.year(),months:e.month(),date:e.date(),hours:e.hours(),minutes:e.minutes(),seconds:e.seconds(),milliseconds:e.milliseconds()}}function rV(){return this.isValid()?this.toISOString():null}function rq(){return m(this)}function rZ(){return d({},b(this))}function rX(){return b(this).overflow}function rJ(){return{input:this._i,format:this._f,locale:this._locale,isUTC:this._isUTC,strict:this._strict}}function rQ(e,n){var r,i,a,o=this._eras||t7("en")._eras;for(r=0,i=o.length;r=0)return u[r]}function r0(e,n){var r=e.since<=e.until?1:-1;return void 0===n?t(e.since).year():t(e.since).year()+(n-e.offset)*r}function r2(){var e,t,n,r=this.localeData().eras();for(e=0,t=r.length;ea&&(t=a),ip.call(this,e,t,n,r,i))}function ip(e,t,n,r,i){var a=th(e,t,n,r,i),o=tf(a.year,0,a.dayOfYear);return this.year(o.getUTCFullYear()),this.month(o.getUTCMonth()),this.date(o.getUTCDate()),this}function ib(e){return null==e?Math.ceil((this.month()+1)/3):this.month((e-1)*3+this.month()%3)}B("N",0,0,"eraAbbr"),B("NN",0,0,"eraAbbr"),B("NNN",0,0,"eraAbbr"),B("NNNN",0,0,"eraName"),B("NNNNN",0,0,"eraNarrow"),B("y",["y",1],"yo","eraYear"),B("y",["yy",2],0,"eraYear"),B("y",["yyy",3],0,"eraYear"),B("y",["yyyy",4],0,"eraYear"),ej("N",r7),ej("NN",r7),ej("NNN",r7),ej("NNNN",ie),ej("NNNNN",it),eH(["N","NN","NNN","NNNN","NNNNN"],function(e,t,n,r){var i=n._locale.erasParse(e,r,n._strict);i?b(n).era=i:b(n).invalidEra=e}),ej("y",eC),ej("yy",eC),ej("yyy",eC),ej("yyyy",eC),ej("yo",ir),eH(["y","yy","yyy","yyyy"],eG),eH(["yo"],function(e,t,n,r){var i;n._locale._eraYearOrdinalRegex&&(i=e.match(n._locale._eraYearOrdinalRegex)),n._locale.eraYearOrdinalParse?t[eG]=n._locale.eraYearOrdinalParse(e,i):t[eG]=parseInt(e,10)}),B(0,["gg",2],0,function(){return this.weekYear()%100}),B(0,["GG",2],0,function(){return this.isoWeekYear()%100}),ia("gggg","weekYear"),ia("ggggg","weekYear"),ia("GGGG","isoWeekYear"),ia("GGGGG","isoWeekYear"),en("weekYear","gg"),en("isoWeekYear","GG"),eo("weekYear",1),eo("isoWeekYear",1),ej("G",eI),ej("g",eI),ej("GG",ex,e_),ej("gg",ex,e_),ej("GGGG",eA,eS),ej("gggg",eA,eS),ej("GGGGG",eL,ek),ej("ggggg",eL,ek),e$(["gggg","ggggg","GGGG","GGGGG"],function(e,t,n,r){t[r.substr(0,2)]=el(e)}),e$(["gg","GG"],function(e,n,r,i){n[i]=t.parseTwoDigitYear(e)}),B("Q",0,"Qo","quarter"),en("quarter","Q"),eo("quarter",7),ej("Q",ew),eH("Q",function(e,t){t[eW]=(el(e)-1)*3}),B("D",["DD",2],"Do","date"),en("date","D"),eo("date",9),ej("D",ex),ej("DD",ex,e_),ej("Do",function(e,t){return e?t._dayOfMonthOrdinalParse||t._ordinalParse:t._dayOfMonthOrdinalParseLenient}),eH(["D","DD"],eK),eH("Do",function(e,t){t[eK]=el(e.match(ex)[0])});var im=ef("Date",!0);function ig(e){var t=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==e?t:this.add(e-t,"d")}B("DDD",["DDDD",3],"DDDo","dayOfYear"),en("dayOfYear","DDD"),eo("dayOfYear",4),ej("DDD",eO),ej("DDDD",eE),eH(["DDD","DDDD"],function(e,t,n){n._dayOfYear=el(e)}),B("m",["mm",2],0,"minute"),en("minute","m"),eo("minute",14),ej("m",ex),ej("mm",ex,e_),eH(["m","mm"],eq);var iv=ef("Minutes",!1);B("s",["ss",2],0,"second"),en("second","s"),eo("second",15),ej("s",ex),ej("ss",ex,e_),eH(["s","ss"],eZ);var iy=ef("Seconds",!1);for(B("S",0,0,function(){return~~(this.millisecond()/100)}),B(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),B(0,["SSS",3],0,"millisecond"),B(0,["SSSS",4],0,function(){return 10*this.millisecond()}),B(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),B(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond()}),B(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),B(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),B(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),en("millisecond","ms"),eo("millisecond",16),ej("S",eO,ew),ej("SS",eO,e_),ej("SSS",eO,eE),v="SSSS";v.length<=9;v+="S")ej(v,eC);function iw(e,t){t[eX]=el(("0."+e)*1e3)}for(v="S";v.length<=9;v+="S")eH(v,iw);function i_(){return this._isUTC?"UTC":""}function iE(){return this._isUTC?"Coordinated Universal Time":""}y=ef("Milliseconds",!1),B("z",0,0,"zoneAbbr"),B("zz",0,0,"zoneName");var iS=S.prototype;function ik(e){return nL(1e3*e)}function ix(){return nL.apply(null,arguments).parseZone()}function iT(e){return e}iS.add=ra,iS.calendar=rh,iS.clone=rp,iS.diff=r_,iS.endOf=rH,iS.format=rT,iS.from=rM,iS.fromNow=rO,iS.to=rA,iS.toNow=rL,iS.get=ep,iS.invalidAt=rX,iS.isAfter=rb,iS.isBefore=rm,iS.isBetween=rg,iS.isSame=rv,iS.isSameOrAfter=ry,iS.isSameOrBefore=rw,iS.isValid=rq,iS.lang=rI,iS.locale=rC,iS.localeData=rD,iS.max=nI,iS.min=nC,iS.parsingFlags=rZ,iS.set=eb,iS.startOf=rU,iS.subtract=ro,iS.toArray=rW,iS.toObject=rK,iS.toDate=rG,iS.toISOString=rk,iS.inspect=rx,"undefined"!=typeof Symbol&&null!=Symbol.for&&(iS[Symbol.for("nodejs.util.inspect.custom")]=function(){return"Moment<"+this.format()+">"}),iS.toJSON=rV,iS.toString=rS,iS.unix=rz,iS.valueOf=r$,iS.creationData=rJ,iS.eraName=r2,iS.eraNarrow=r3,iS.eraAbbr=r4,iS.eraYear=r5,iS.year=tu,iS.isLeapYear=tc,iS.weekYear=io,iS.isoWeekYear=is,iS.quarter=iS.quarters=ib,iS.month=tn,iS.daysInMonth=tr,iS.week=iS.weeks=tw,iS.isoWeek=iS.isoWeeks=t_,iS.weeksInYear=il,iS.weeksInWeekYear=id,iS.isoWeeksInYear=iu,iS.isoWeeksInISOWeekYear=ic,iS.date=im,iS.day=iS.days=tR,iS.weekday=tj,iS.isoWeekday=tF,iS.dayOfYear=ig,iS.hour=iS.hours=tq,iS.minute=iS.minutes=iv,iS.second=iS.seconds=iy,iS.millisecond=iS.milliseconds=y,iS.utcOffset=nZ,iS.utc=nJ,iS.local=nQ,iS.parseZone=n1,iS.hasAlignedHourOffset=n0,iS.isDST=n2,iS.isLocal=n4,iS.isUtcOffset=n5,iS.isUtc=n6,iS.isUTC=n6,iS.zoneAbbr=i_,iS.zoneName=iE,iS.dates=T("dates accessor is deprecated. Use date instead.",im),iS.months=T("months accessor is deprecated. Use month instead",tn),iS.years=T("years accessor is deprecated. Use year instead",tu),iS.zone=T("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",nX),iS.isDSTShifted=T("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",n3);var iM=I.prototype;function iO(e,t,n,r){var i=t7(),a=h().set(r,t);return i[n](a,e)}function iA(e,t,n){if(c(e)&&(t=e,e=void 0),e=e||"",null!=t)return iO(e,t,n,"month");var r,i=[];for(r=0;r<12;r++)i[r]=iO(e,r,n,"month");return i}function iL(e,t,n,r){"boolean"==typeof e?(c(t)&&(n=t,t=void 0),t=t||""):(n=t=e,e=!1,c(t)&&(n=t,t=void 0),t=t||"");var i,a=t7(),o=e?a._week.dow:0,s=[];if(null!=n)return iO(t,(n+o)%7,r,"day");for(i=0;i<7;i++)s[i]=iO(t,(i+o)%7,r,"day");return s}function iC(e,t){return iA(e,t,"months")}function iI(e,t){return iA(e,t,"monthsShort")}function iD(e,t,n){return iL(e,t,n,"weekdays")}function iN(e,t,n){return iL(e,t,n,"weekdaysShort")}function iP(e,t,n){return iL(e,t,n,"weekdaysMin")}iM.calendar=N,iM.longDateFormat=W,iM.invalidDate=V,iM.ordinal=X,iM.preparse=iT,iM.postformat=iT,iM.relativeTime=Q,iM.pastFuture=ee,iM.set=L,iM.eras=rQ,iM.erasParse=r1,iM.erasConvertYear=r0,iM.erasAbbrRegex=r9,iM.erasNameRegex=r6,iM.erasNarrowRegex=r8,iM.months=e9,iM.monthsShort=e8,iM.monthsParse=te,iM.monthsRegex=ta,iM.monthsShortRegex=ti,iM.week=tm,iM.firstDayOfYear=ty,iM.firstDayOfWeek=tv,iM.weekdays=tC,iM.weekdaysMin=tD,iM.weekdaysShort=tI,iM.weekdaysParse=tP,iM.weekdaysRegex=tY,iM.weekdaysShortRegex=tB,iM.weekdaysMinRegex=tU,iM.isPM=tK,iM.meridiem=tZ,t6("en",{eras:[{since:"0001-01-01",until:Infinity,offset:1,name:"Anno Domini",narrow:"AD",abbr:"AD"},{since:"0000-12-31",until:-1/0,offset:1,name:"Before Christ",narrow:"BC",abbr:"BC"},],dayOfMonthOrdinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(e){var t=e%10,n=1===el(e%100/10)?"th":1===t?"st":2===t?"nd":3===t?"rd":"th";return e+n}}),t.lang=T("moment.lang is deprecated. Use moment.locale instead.",t6),t.langData=T("moment.langData is deprecated. Use moment.localeData instead.",t7);var iR=Math.abs;function ij(){var e=this._data;return this._milliseconds=iR(this._milliseconds),this._days=iR(this._days),this._months=iR(this._months),e.milliseconds=iR(e.milliseconds),e.seconds=iR(e.seconds),e.minutes=iR(e.minutes),e.hours=iR(e.hours),e.months=iR(e.months),e.years=iR(e.years),this}function iF(e,t,n,r){var i=n7(t,n);return e._milliseconds+=r*i._milliseconds,e._days+=r*i._days,e._months+=r*i._months,e._bubble()}function iY(e,t){return iF(this,e,t,1)}function iB(e,t){return iF(this,e,t,-1)}function iU(e){return e<0?Math.floor(e):Math.ceil(e)}function iH(){var e,t,n,r,i,a=this._milliseconds,o=this._days,s=this._months,u=this._data;return a>=0&&o>=0&&s>=0||a<=0&&o<=0&&s<=0||(a+=864e5*iU(iz(s)+o),o=0,s=0),u.milliseconds=a%1e3,e=ec(a/1e3),u.seconds=e%60,t=ec(e/60),u.minutes=t%60,n=ec(t/60),u.hours=n%24,o+=ec(n/24),s+=i=ec(i$(o)),o-=iU(iz(i)),r=ec(s/12),s%=12,u.days=o,u.months=s,u.years=r,this}function i$(e){return 4800*e/146097}function iz(e){return 146097*e/4800}function iG(e){if(!this.isValid())return NaN;var t,n,r=this._milliseconds;if("month"===(e=er(e))||"quarter"===e||"year"===e)switch(t=this._days+r/864e5,n=this._months+i$(t),e){case"month":return n;case"quarter":return n/3;case"year":return n/12}else switch(t=this._days+Math.round(iz(this._months)),e){case"week":return t/7+r/6048e5;case"day":return t+r/864e5;case"hour":return 24*t+r/36e5;case"minute":return 1440*t+r/6e4;case"second":return 86400*t+r/1e3;case"millisecond":return Math.floor(864e5*t)+r;default:throw Error("Unknown unit "+e)}}function iW(){return this.isValid()?this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*el(this._months/12):NaN}function iK(e){return function(){return this.as(e)}}var iV=iK("ms"),iq=iK("s"),iZ=iK("m"),iX=iK("h"),iJ=iK("d"),iQ=iK("w"),i1=iK("M"),i0=iK("Q"),i2=iK("y");function i3(){return n7(this)}function i4(e){return e=er(e),this.isValid()?this[e+"s"]():NaN}function i5(e){return function(){return this.isValid()?this._data[e]:NaN}}var i6=i5("milliseconds"),i9=i5("seconds"),i8=i5("minutes"),i7=i5("hours"),ae=i5("days"),at=i5("months"),an=i5("years");function ar(){return ec(this.days()/7)}var ai=Math.round,aa={ss:44,s:45,m:45,h:22,d:26,w:null,M:11};function ao(e,t,n,r,i){return i.relativeTime(t||1,!!n,e,r)}function as(e,t,n,r){var i=n7(e).abs(),a=ai(i.as("s")),o=ai(i.as("m")),s=ai(i.as("h")),u=ai(i.as("d")),c=ai(i.as("M")),l=ai(i.as("w")),f=ai(i.as("y")),d=a<=n.ss&&["s",a]||a0,d[4]=r,ao.apply(null,d)}function au(e){return void 0===e?ai:"function"==typeof e&&(ai=e,!0)}function ac(e,t){return void 0!==aa[e]&&(void 0===t?aa[e]:(aa[e]=t,"s"===e&&(aa.ss=t-1),!0))}function al(e,t){if(!this.isValid())return this.localeData().invalidDate();var n,r,i=!1,a=aa;return"object"==typeof e&&(t=e,e=!1),"boolean"==typeof e&&(i=e),"object"==typeof t&&(a=Object.assign({},aa,t),null!=t.s&&null==t.ss&&(a.ss=t.s-1)),r=as(this,!i,a,n=this.localeData()),i&&(r=n.pastFuture(+this,r)),n.postformat(r)}var af=Math.abs;function ad(e){return(e>0)-(e<0)||+e}function ah(){if(!this.isValid())return this.localeData().invalidDate();var e,t,n,r,i,a,o,s,u=af(this._milliseconds)/1e3,c=af(this._days),l=af(this._months),f=this.asSeconds();return f?(e=ec(u/60),t=ec(e/60),u%=60,e%=60,n=ec(l/12),l%=12,r=u?u.toFixed(3).replace(/\.?0+$/,""):"",i=f<0?"-":"",a=ad(this._months)!==ad(f)?"-":"",o=ad(this._days)!==ad(f)?"-":"",s=ad(this._milliseconds)!==ad(f)?"-":"",i+"P"+(n?a+n+"Y":"")+(l?a+l+"M":"")+(c?o+c+"D":"")+(t||e||u?"T":"")+(t?s+t+"H":"")+(e?s+e+"M":"")+(u?s+r+"S":"")):"P0D"}var ap=nU.prototype;return ap.isValid=nY,ap.abs=ij,ap.add=iY,ap.subtract=iB,ap.as=iG,ap.asMilliseconds=iV,ap.asSeconds=iq,ap.asMinutes=iZ,ap.asHours=iX,ap.asDays=iJ,ap.asWeeks=iQ,ap.asMonths=i1,ap.asQuarters=i0,ap.asYears=i2,ap.valueOf=iW,ap._bubble=iH,ap.clone=i3,ap.get=i4,ap.milliseconds=i6,ap.seconds=i9,ap.minutes=i8,ap.hours=i7,ap.days=ae,ap.weeks=ar,ap.months=at,ap.years=an,ap.humanize=al,ap.toISOString=ah,ap.toString=ah,ap.toJSON=ah,ap.locale=rC,ap.localeData=rD,ap.toIsoString=T("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",ah),ap.lang=rI,B("X",0,0,"unix"),B("x",0,0,"valueOf"),ej("x",eI),ej("X",eP),eH("X",function(e,t,n){n._d=new Date(1e3*parseFloat(e))}),eH("x",function(e,t,n){n._d=new Date(el(e))}),//! moment.js +t.version="2.29.1",r(nL),t.fn=iS,t.min=nN,t.max=nP,t.now=nR,t.utc=h,t.unix=ik,t.months=iC,t.isDate=l,t.locale=t6,t.invalid=g,t.duration=n7,t.isMoment=k,t.weekdays=iD,t.parseZone=ix,t.localeData=t7,t.isDuration=nH,t.monthsShort=iI,t.weekdaysMin=iP,t.defineLocale=t9,t.updateLocale=t8,t.locales=ne,t.weekdaysShort=iN,t.normalizeUnits=er,t.relativeTimeRounding=au,t.relativeTimeThreshold=ac,t.calendarFormat=rd,t.prototype=iS,t.HTML5_FMT={DATETIME_LOCAL:"YYYY-MM-DDTHH:mm",DATETIME_LOCAL_SECONDS:"YYYY-MM-DDTHH:mm:ss",DATETIME_LOCAL_MS:"YYYY-MM-DDTHH:mm:ss.SSS",DATE:"YYYY-MM-DD",TIME:"HH:mm",TIME_SECONDS:"HH:mm:ss",TIME_MS:"HH:mm:ss.SSS",WEEK:"GGGG-[W]WW",MONTH:"YYYY-MM"},t},e.exports=i()},46417(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var n,r=!!("undefined"!=typeof window&&window.document&&window.document.createElement);function i(e){n=e}function a(){if(n)return n;if(!r||!window.document.body)return"indeterminate";var e=window.document.createElement("div");return e.appendChild(document.createTextNode("ABCD")),e.dir="rtl",e.style.fontSize="14px",e.style.width="4px",e.style.height="1px",e.style.position="absolute",e.style.top="-1000px",e.style.overflow="scroll",document.body.appendChild(e),n="reverse",e.scrollLeft>0?n="default":(e.scrollLeft=1,0===e.scrollLeft&&(n="negative")),document.body.removeChild(e),n}function o(e,t){var n=e.scrollLeft;if("rtl"!==t)return n;var r=a();if("indeterminate"===r)return Number.NaN;switch(r){case"negative":return e.scrollWidth-e.clientWidth+n;case"reverse":return e.scrollWidth-e.clientWidth-n}return n}function s(e,t,n){if("rtl"!==n){e.scrollLeft=t;return}var r=a();if("indeterminate"!==r)switch(r){case"negative":e.scrollLeft=e.clientWidth-e.scrollWidth+t;break;case"reverse":e.scrollLeft=e.scrollWidth-e.clientWidth-t;break;default:e.scrollLeft=t}}t._setScrollType=i,t.detectScrollType=a,t.getNormalizedScrollLeft=o,t.setNormalizedScrollLeft=s},27418(e){"use strict";/* +object-assign +(c) Sindre Sorhus +@license MIT +*/ var t=Object.getOwnPropertySymbols,n=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;function i(e){if(null==e)throw TypeError("Object.assign cannot be called with null or undefined");return Object(e)}function a(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;var r=Object.getOwnPropertyNames(t).map(function(e){return t[e]});if("0123456789"!==r.join(""))return!1;var i={};if("abcdefghijklmnopqrst".split("").forEach(function(e){i[e]=e}),"abcdefghijklmnopqrst"!==Object.keys(Object.assign({},i)).join(""))return!1;return!0}catch(a){return!1}}e.exports=a()?Object.assign:function(e,a){for(var o,s,u=i(e),c=1;c65535&&(Y-=65536,G+=l(Y>>>10|55296),Y=56320|1023&Y),Y=G+l(Y))):q!==x&&$(D,Q)),Y?(ew(),X=ev(),ed=ee-1,ep+=ee-V+1,eg.push(Y),J=ev(),J.offset++,ei&&ei.call(es,Y,{start:X,end:J},e.slice(V-1,ee)),X=J):(em+=d=e.slice(V-1,ee),ep+=d.length,ed=ee-1)}else 10===F&&(eb++,eh++,ep=0),F==F?(em+=l(F),ep++):ew();return eg.join("");function ev(){return{line:eb,column:ep,offset:ed+(ec.offset||0)}}function ey(e,t){var n=ev();n.column+=t,n.offset+=t,ea.call(eu,j[e],n,e)}function ew(){em&&(eg.push(em),er&&er.call(eo,em,{start:X,end:ev()}),em="")}}function B(e){return e>=55296&&e<=57343||e>1114111}function U(e){return e>=1&&e<=8||11===e||e>=13&&e<=31||e>=127&&e<=159||e>=64976&&e<=65007||(65535&e)==65535||(65535&e)==65534}j[L]="Named character references must be terminated by a semicolon",j[C]="Numeric character references must be terminated by a semicolon",j[I]="Named character references cannot be empty",j[D]="Numeric character references cannot be empty",j[N]="Named character references must be known",j[P]="Numeric character references cannot be disallowed",j[R]="Numeric character references cannot be outside the permissible Unicode range"},14779(e){e.exports=b,e.exports.match=a,e.exports.regexpToFunction=o,e.exports.parse=r,e.exports.compile=i,e.exports.tokensToFunction=s,e.exports.tokensToRegExp=p;var t="/",n=RegExp("(\\\\.)|(?:\\:(\\w+)(?:\\(((?:\\\\.|[^\\\\()])+)\\))?|\\(((?:\\\\.|[^\\\\()])+)\\))([+*?])?","g");function r(e,r){for(var i,a=[],o=0,s=0,l="",f=r&&r.delimiter||t,d=r&&r.whitelist||void 0,h=!1;null!==(i=n.exec(e));){var p=i[0],b=i[1],m=i.index;if(l+=e.slice(s,m),s=m+p.length,b){l+=b[1],h=!0;continue}var g="",v=i[2],y=i[3],w=i[4],_=i[5];if(!h&&l.length){var E=l.length-1,S=l[E];(!d||d.indexOf(S)>-1)&&(g=S,l=l.slice(0,E))}l&&(a.push(l),l="",h=!1);var k="+"===_||"*"===_,x="?"===_||"*"===_,T=y||w,M=g||f;a.push({name:v||o++,prefix:g,delimiter:M,optional:x,repeat:k,pattern:T?c(T):"[^"+u(M===f?M:M+f)+"]+?"})}return(l||seM});/**! + * @fileOverview Kickass library to create and place poppers near their reference elements. + * @version 1.16.0 + * @license + * Copyright (c) 2016 Federico Zivolo and contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ var r="undefined"!=typeof window&&"undefined"!=typeof document&&"undefined"!=typeof navigator,i=function(){for(var e=["Edge","Trident","Firefox"],t=0;t=0)return 1;return 0}();function a(e){var t=!1;return function(){!t&&(t=!0,window.Promise.resolve().then(function(){t=!1,e()}))}}function o(e){var t=!1;return function(){t||(t=!0,setTimeout(function(){t=!1,e()},i))}}var s=r&&window.Promise?a:o;function u(e){var t={};return e&&"[object Function]"===t.toString.call(e)}function c(e,t){if(1!==e.nodeType)return[];var n=e.ownerDocument.defaultView.getComputedStyle(e,null);return t?n[t]:n}function l(e){return"HTML"===e.nodeName?e:e.parentNode||e.host}function f(e){if(!e)return document.body;switch(e.nodeName){case"HTML":case"BODY":return e.ownerDocument.body;case"#document":return e.body}var t=c(e),n=t.overflow,r=t.overflowX,i=t.overflowY;return/(auto|scroll|overlay)/.test(n+i+r)?e:f(l(e))}function d(e){return e&&e.referenceNode?e.referenceNode:e}var h=r&&!!(window.MSInputMethodContext&&document.documentMode),p=r&&/MSIE 10/.test(navigator.userAgent);function b(e){return 11===e?h:10===e?p:h||p}function m(e){if(!e)return document.documentElement;for(var t=b(10)?document.body:null,n=e.offsetParent||null;n===t&&e.nextElementSibling;)n=(e=e.nextElementSibling).offsetParent;var r=n&&n.nodeName;return r&&"BODY"!==r&&"HTML"!==r?-1!==["TH","TD","TABLE"].indexOf(n.nodeName)&&"static"===c(n,"position")?m(n):n:e?e.ownerDocument.documentElement:document.documentElement}function g(e){var t=e.nodeName;return"BODY"!==t&&("HTML"===t||m(e.firstElementChild)===e)}function v(e){return null!==e.parentNode?v(e.parentNode):e}function y(e,t){if(!e||!e.nodeType||!t||!t.nodeType)return document.documentElement;var n=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,r=n?e:t,i=n?t:e,a=document.createRange();a.setStart(r,0),a.setEnd(i,0);var o=a.commonAncestorContainer;if(e!==o&&t!==o||r.contains(i))return g(o)?o:m(o);var s=v(e);return s.host?y(s.host,t):y(e,v(t).host)}function w(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"top",n="top"===t?"scrollTop":"scrollLeft",r=e.nodeName;if("BODY"===r||"HTML"===r){var i=e.ownerDocument.documentElement;return(e.ownerDocument.scrollingElement||i)[n]}return e[n]}function _(e,t){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=w(t,"top"),i=w(t,"left"),a=n?-1:1;return e.top+=r*a,e.bottom+=r*a,e.left+=i*a,e.right+=i*a,e}function E(e,t){var n="x"===t?"Left":"Top",r="Left"===n?"Right":"Bottom";return parseFloat(e["border"+n+"Width"],10)+parseFloat(e["border"+r+"Width"],10)}function S(e,t,n,r){return Math.max(t["offset"+e],t["scroll"+e],n["client"+e],n["offset"+e],n["scroll"+e],b(10)?parseInt(n["offset"+e])+parseInt(r["margin"+("Height"===e?"Top":"Left")])+parseInt(r["margin"+("Height"===e?"Bottom":"Right")]):0)}function k(e){var t=e.body,n=e.documentElement,r=b(10)&&getComputedStyle(n);return{height:S("Height",t,n,r),width:S("Width",t,n,r)}}var x=function(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")},T=function(){function e(e,t){for(var n=0;n2&&void 0!==arguments[2]&&arguments[2],r=b(10),i="HTML"===t.nodeName,a=L(e),o=L(t),s=f(e),u=c(t),l=parseFloat(u.borderTopWidth,10),d=parseFloat(u.borderLeftWidth,10);n&&i&&(o.top=Math.max(o.top,0),o.left=Math.max(o.left,0));var h=A({top:a.top-o.top-l,left:a.left-o.left-d,width:a.width,height:a.height});if(h.marginTop=0,h.marginLeft=0,!r&&i){var p=parseFloat(u.marginTop,10),m=parseFloat(u.marginLeft,10);h.top-=l-p,h.bottom-=l-p,h.left-=d-m,h.right-=d-m,h.marginTop=p,h.marginLeft=m}return(r&&!n?t.contains(s):t===s&&"BODY"!==s.nodeName)&&(h=_(h,t)),h}function I(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=e.ownerDocument.documentElement,r=C(e,n),i=Math.max(n.clientWidth,window.innerWidth||0),a=Math.max(n.clientHeight,window.innerHeight||0),o=t?0:w(n),s=t?0:w(n,"left");return A({top:o-r.top+r.marginTop,left:s-r.left+r.marginLeft,width:i,height:a})}function D(e){var t=e.nodeName;if("BODY"===t||"HTML"===t)return!1;if("fixed"===c(e,"position"))return!0;var n=l(e);return!!n&&D(n)}function N(e){if(!e||!e.parentElement||b())return document.documentElement;for(var t=e.parentElement;t&&"none"===c(t,"transform");)t=t.parentElement;return t||document.documentElement}function P(e,t,n,r){var i=arguments.length>4&&void 0!==arguments[4]&&arguments[4],a={top:0,left:0},o=i?N(e):y(e,d(t));if("viewport"===r)a=I(o,i);else{var s=void 0;"scrollParent"===r?"BODY"===(s=f(l(t))).nodeName&&(s=e.ownerDocument.documentElement):s="window"===r?e.ownerDocument.documentElement:r;var u=C(s,o,i);if("HTML"!==s.nodeName||D(o))a=u;else{var c=k(e.ownerDocument),h=c.height,p=c.width;a.top+=u.top-u.marginTop,a.bottom=h+u.top,a.left+=u.left-u.marginLeft,a.right=p+u.left}}var b="number"==typeof(n=n||0);return a.left+=b?n:n.left||0,a.top+=b?n:n.top||0,a.right-=b?n:n.right||0,a.bottom-=b?n:n.bottom||0,a}function R(e){var t;return e.width*e.height}function j(e,t,n,r,i){var a=arguments.length>5&&void 0!==arguments[5]?arguments[5]:0;if(-1===e.indexOf("auto"))return e;var o=P(n,r,a,i),s={top:{width:o.width,height:t.top-o.top},right:{width:o.right-t.right,height:o.height},bottom:{width:o.width,height:o.bottom-t.bottom},left:{width:t.left-o.left,height:o.height}},u=Object.keys(s).map(function(e){return O({key:e},s[e],{area:R(s[e])})}).sort(function(e,t){return t.area-e.area}),c=u.filter(function(e){var t=e.width,r=e.height;return t>=n.clientWidth&&r>=n.clientHeight}),l=c.length>0?c[0].key:u[0].key,f=e.split("-")[1];return l+(f?"-"+f:"")}function F(e,t,n){var r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:null,i=r?N(t):y(t,d(n));return C(n,i,r)}function Y(e){var t=e.ownerDocument.defaultView.getComputedStyle(e),n=parseFloat(t.marginTop||0)+parseFloat(t.marginBottom||0),r=parseFloat(t.marginLeft||0)+parseFloat(t.marginRight||0);return{width:e.offsetWidth+r,height:e.offsetHeight+n}}function B(e){var t={left:"right",right:"left",bottom:"top",top:"bottom"};return e.replace(/left|right|bottom|top/g,function(e){return t[e]})}function U(e,t,n){n=n.split("-")[0];var r=Y(e),i={width:r.width,height:r.height},a=-1!==["right","left"].indexOf(n),o=a?"top":"left",s=a?"left":"top",u=a?"height":"width",c=a?"width":"height";return i[o]=t[o]+t[u]/2-r[u]/2,n===s?i[s]=t[s]-r[c]:i[s]=t[B(s)],i}function H(e,t){return Array.prototype.find?e.find(t):e.filter(t)[0]}function $(e,t,n){if(Array.prototype.findIndex)return e.findIndex(function(e){return e[t]===n});var r=H(e,function(e){return e[t]===n});return e.indexOf(r)}function z(e,t,n){return(void 0===n?e:e.slice(0,$(e,"name",n))).forEach(function(e){e.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var n=e.function||e.fn;e.enabled&&u(n)&&(t.offsets.popper=A(t.offsets.popper),t.offsets.reference=A(t.offsets.reference),t=n(t,e))}),t}function G(){if(!this.state.isDestroyed){var e={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};e.offsets.reference=F(this.state,this.popper,this.reference,this.options.positionFixed),e.placement=j(this.options.placement,e.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),e.originalPlacement=e.placement,e.positionFixed=this.options.positionFixed,e.offsets.popper=U(this.popper,e.offsets.reference,e.placement),e.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",e=z(this.modifiers,e),this.state.isCreated?this.options.onUpdate(e):(this.state.isCreated=!0,this.options.onCreate(e))}}function W(e,t){return e.some(function(e){var n=e.name;return e.enabled&&n===t})}function K(e){for(var t=[!1,"ms","Webkit","Moz","O"],n=e.charAt(0).toUpperCase()+e.slice(1),r=0;ro[p]&&(e.offsets.popper[d]+=s[d]+b-o[p]),e.offsets.popper=A(e.offsets.popper);var m=s[d]+s[l]/2-b/2,g=c(e.instance.popper),v=parseFloat(g["margin"+f],10),y=parseFloat(g["border"+f+"Width"],10),w=m-e.offsets.popper[d]-v-y;return w=Math.max(Math.min(o[l]-b,w),0),e.arrowElement=r,e.offsets.arrow=(M(n={},d,Math.round(w)),M(n,h,""),n),e}function ef(e){return"end"===e?"start":"start"===e?"end":e}var ed=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],eh=ed.slice(3);function ep(e){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=eh.indexOf(e),r=eh.slice(n+1).concat(eh.slice(0,n));return t?r.reverse():r}var eb={FLIP:"flip",CLOCKWISE:"clockwise",COUNTERCLOCKWISE:"counterclockwise"};function em(e,t){if(W(e.instance.modifiers,"inner")||e.flipped&&e.placement===e.originalPlacement)return e;var n=P(e.instance.popper,e.instance.reference,t.padding,t.boundariesElement,e.positionFixed),r=e.placement.split("-")[0],i=B(r),a=e.placement.split("-")[1]||"",o=[];switch(t.behavior){case eb.FLIP:o=[r,i];break;case eb.CLOCKWISE:o=ep(r);break;case eb.COUNTERCLOCKWISE:o=ep(r,!0);break;default:o=t.behavior}return o.forEach(function(s,u){if(r!==s||o.length===u+1)return e;i=B(r=e.placement.split("-")[0]);var c=e.offsets.popper,l=e.offsets.reference,f=Math.floor,d="left"===r&&f(c.right)>f(l.left)||"right"===r&&f(c.left)f(l.top)||"bottom"===r&&f(c.top)f(n.right),b=f(c.top)f(n.bottom),g="left"===r&&h||"right"===r&&p||"top"===r&&b||"bottom"===r&&m,v=-1!==["top","bottom"].indexOf(r),y=!!t.flipVariations&&(v&&"start"===a&&h||v&&"end"===a&&p||!v&&"start"===a&&b||!v&&"end"===a&&m),w=!!t.flipVariationsByContent&&(v&&"start"===a&&p||v&&"end"===a&&h||!v&&"start"===a&&m||!v&&"end"===a&&b),_=y||w;(d||g||_)&&(e.flipped=!0,(d||g)&&(r=o[u+1]),_&&(a=ef(a)),e.placement=r+(a?"-"+a:""),e.offsets.popper=O({},e.offsets.popper,U(e.instance.popper,e.offsets.reference,e.placement)),e=z(e.instance.modifiers,e,"flip"))}),e}function eg(e){var t=e.offsets,n=t.popper,r=t.reference,i=e.placement.split("-")[0],a=Math.floor,o=-1!==["top","bottom"].indexOf(i),s=o?"right":"bottom",u=o?"left":"top",c=o?"width":"height";return n[s]a(r[s])&&(e.offsets.popper[u]=a(r[s])),e}function ev(e,t,n,r){var i=e.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),a=+i[1],o=i[2];if(!a)return e;if(0===o.indexOf("%")){var s=void 0;return A(s="%p"===o?n:r)[t]/100*a}if("vh"!==o&&"vw"!==o)return a;var u=void 0;return(u="vh"===o?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*a}function ey(e,t,n,r){var i=[0,0],a=-1!==["right","left"].indexOf(r),o=e.split(/(\+|\-)/).map(function(e){return e.trim()}),s=o.indexOf(H(o,function(e){return -1!==e.search(/,|\s/)}));o[s]&&-1===o[s].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var u=/\s*,\s*|\s+/,c=-1!==s?[o.slice(0,s).concat([o[s].split(u)[0]]),[o[s].split(u)[1]].concat(o.slice(s+1))]:[o];return(c=c.map(function(e,r){var i=(1===r?!a:a)?"height":"width",o=!1;return e.reduce(function(e,t){return""===e[e.length-1]&&-1!==["+","-"].indexOf(t)?(e[e.length-1]=t,o=!0,e):o?(e[e.length-1]+=t,o=!1,e):e.concat(t)},[]).map(function(e){return ev(e,i,t,n)})})).forEach(function(e,t){e.forEach(function(n,r){et(n)&&(i[t]+=n*("-"===e[r-1]?-1:1))})}),i}function ew(e,t){var n=t.offset,r=e.placement,i=e.offsets,a=i.popper,o=i.reference,s=r.split("-")[0],u=void 0;return u=et(+n)?[+n,0]:ey(n,a,o,s),"left"===s?(a.top+=u[0],a.left-=u[1]):"right"===s?(a.top+=u[0],a.left+=u[1]):"top"===s?(a.left+=u[0],a.top-=u[1]):"bottom"===s&&(a.left+=u[0],a.top+=u[1]),e.popper=a,e}function e_(e,t){var n=t.boundariesElement||m(e.instance.popper);e.instance.reference===n&&(n=m(n));var r=K("transform"),i=e.instance.popper.style,a=i.top,o=i.left,s=i[r];i.top="",i.left="",i[r]="";var u=P(e.instance.popper,e.instance.reference,t.padding,n,e.positionFixed);i.top=a,i.left=o,i[r]=s,t.boundaries=u;var c=t.priority,l=e.offsets.popper,f={primary:function(e){var n=l[e];return l[e]u[e]&&!t.escapeWithReference&&(r=Math.min(l[n],u[e]-("right"===e?l.width:l.height))),M({},n,r)}};return c.forEach(function(e){l=O({},l,f[-1!==["left","top"].indexOf(e)?"primary":"secondary"](e))}),e.offsets.popper=l,e}function eE(e){var t=e.placement,n=t.split("-")[0],r=t.split("-")[1];if(r){var i=e.offsets,a=i.reference,o=i.popper,s=-1!==["bottom","top"].indexOf(n),u=s?"left":"top",c=s?"width":"height",l={start:M({},u,a[u]),end:M({},u,a[u]+a[c]-o[c])};e.offsets.popper=O({},o,l[r])}return e}function eS(e){if(!ec(e.instance.modifiers,"hide","preventOverflow"))return e;var t=e.offsets.reference,n=H(e.instance.modifiers,function(e){return"preventOverflow"===e.name}).boundaries;if(t.bottomn.right||t.top>n.bottom||t.right2&&void 0!==arguments[2]?arguments[2]:{};x(this,e),this.scheduleUpdate=function(){return requestAnimationFrame(r.update)},this.update=s(this.update.bind(this)),this.options=O({},e.Defaults,i),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=t&&t.jquery?t[0]:t,this.popper=n&&n.jquery?n[0]:n,this.options.modifiers={},Object.keys(O({},e.Defaults.modifiers,i.modifiers)).forEach(function(t){r.options.modifiers[t]=O({},e.Defaults.modifiers[t]||{},i.modifiers?i.modifiers[t]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(e){return O({name:e},r.options.modifiers[e])}).sort(function(e,t){return e.order-t.order}),this.modifiers.forEach(function(e){e.enabled&&u(e.onLoad)&&e.onLoad(r.reference,r.popper,r.options,e,r.state)}),this.update();var a=this.options.eventsEnabled;a&&this.enableEventListeners(),this.state.eventsEnabled=a}return T(e,[{key:"update",value:function(){return G.call(this)}},{key:"destroy",value:function(){return V.call(this)}},{key:"enableEventListeners",value:function(){return J.call(this)}},{key:"disableEventListeners",value:function(){return ee.call(this)}}]),e}();eT.Utils=("undefined"!=typeof window?window:n.g).PopperUtils,eT.placements=ed,eT.Defaults=ex;let eM=eT},92703(e,t,n){"use strict";var r=n(50414);function i(){}function a(){}a.resetWarningCache=i,e.exports=function(){function e(e,t,n,i,a,o){if(o!==r){var s=Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw s.name="Invariant Violation",s}}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:a,resetWarningCache:i};return n.PropTypes=n,n}},45697(e,t,n){e.exports=n(92703)()},50414(e){"use strict";var t="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED";e.exports=t},55760(e){"use strict";function t(e){this._maxSize=e,this.clear()}t.prototype.clear=function(){this._size=0,this._values=Object.create(null)},t.prototype.get=function(e){return this._values[e]},t.prototype.set=function(e,t){return this._size>=this._maxSize&&this.clear(),!(e in this._values)&&this._size++,this._values[e]=t};var n=/[^.^\]^[]+|(?=\[\]|\.\.)/g,r=/^\d+$/,i=/^\d/,a=/[~`!#$%\^&*+=\-\[\]\\';,/{}|\\":<>\?]/g,o=/^\s*(['"]?)(.*?)(\1)\s*$/,s=512,u=new t(s),c=new t(s),l=new t(s);function f(e){return u.get(e)||u.set(e,d(e).map(function(e){return e.replace(o,"$2")}))}function d(e){return e.match(n)}function h(e,t,n){var r,i,a,o,s=e.length;for(i=0;i4&&n.slice(0,4)===o&&s.test(t)&&("-"===t.charAt(4)?u=f(t):t=d(t),c=i),new c(u,t))}function f(e){var t=e.slice(5).replace(u,p);return o+t.charAt(0).toUpperCase()+t.slice(1)}function d(e){var t=e.slice(4);return u.test(t)?e:("-"!==(t=t.replace(c,h)).charAt(0)&&(t="-"+t),o+t)}function h(e){return"-"+e.toLowerCase()}function p(e){return e.charAt(1).toUpperCase()}},97247(e,t,n){"use strict";var r=n(19940),i=n(8289),a=n(5812),o=n(94397),s=n(67716),u=n(61805);e.exports=r([a,i,o,s,u])},67716(e,t,n){"use strict";var r=n(17e3),i=n(17596),a=r.booleanish,o=r.number,s=r.spaceSeparated;function u(e,t){return"role"===t?t:"aria-"+t.slice(4).toLowerCase()}e.exports=i({transform:u,properties:{ariaActiveDescendant:null,ariaAtomic:a,ariaAutoComplete:null,ariaBusy:a,ariaChecked:a,ariaColCount:o,ariaColIndex:o,ariaColSpan:o,ariaControls:s,ariaCurrent:null,ariaDescribedBy:s,ariaDetails:null,ariaDisabled:a,ariaDropEffect:s,ariaErrorMessage:null,ariaExpanded:a,ariaFlowTo:s,ariaGrabbed:a,ariaHasPopup:null,ariaHidden:a,ariaInvalid:null,ariaKeyShortcuts:null,ariaLabel:null,ariaLabelledBy:s,ariaLevel:o,ariaLive:null,ariaModal:a,ariaMultiLine:a,ariaMultiSelectable:a,ariaOrientation:null,ariaOwns:s,ariaPlaceholder:null,ariaPosInSet:o,ariaPressed:a,ariaReadOnly:a,ariaRelevant:null,ariaRequired:a,ariaRoleDescription:s,ariaRowCount:o,ariaRowIndex:o,ariaRowSpan:o,ariaSelected:a,ariaSetSize:o,ariaSort:null,ariaValueMax:o,ariaValueMin:o,ariaValueNow:o,ariaValueText:null,role:null}})},61805(e,t,n){"use strict";var r=n(17e3),i=n(17596),a=n(10855),o=r.boolean,s=r.overloadedBoolean,u=r.booleanish,c=r.number,l=r.spaceSeparated,f=r.commaSeparated;e.exports=i({space:"html",attributes:{acceptcharset:"accept-charset",classname:"class",htmlfor:"for",httpequiv:"http-equiv"},transform:a,mustUseProperty:["checked","multiple","muted","selected"],properties:{abbr:null,accept:f,acceptCharset:l,accessKey:l,action:null,allow:null,allowFullScreen:o,allowPaymentRequest:o,allowUserMedia:o,alt:null,as:null,async:o,autoCapitalize:null,autoComplete:l,autoFocus:o,autoPlay:o,capture:o,charSet:null,checked:o,cite:null,className:l,cols:c,colSpan:null,content:null,contentEditable:u,controls:o,controlsList:l,coords:c|f,crossOrigin:null,data:null,dateTime:null,decoding:null,default:o,defer:o,dir:null,dirName:null,disabled:o,download:s,draggable:u,encType:null,enterKeyHint:null,form:null,formAction:null,formEncType:null,formMethod:null,formNoValidate:o,formTarget:null,headers:l,height:c,hidden:o,high:c,href:null,hrefLang:null,htmlFor:l,httpEquiv:l,id:null,imageSizes:null,imageSrcSet:f,inputMode:null,integrity:null,is:null,isMap:o,itemId:null,itemProp:l,itemRef:l,itemScope:o,itemType:l,kind:null,label:null,lang:null,language:null,list:null,loading:null,loop:o,low:c,manifest:null,max:null,maxLength:c,media:null,method:null,min:null,minLength:c,multiple:o,muted:o,name:null,nonce:null,noModule:o,noValidate:o,onAbort:null,onAfterPrint:null,onAuxClick:null,onBeforePrint:null,onBeforeUnload:null,onBlur:null,onCancel:null,onCanPlay:null,onCanPlayThrough:null,onChange:null,onClick:null,onClose:null,onContextMenu:null,onCopy:null,onCueChange:null,onCut:null,onDblClick:null,onDrag:null,onDragEnd:null,onDragEnter:null,onDragExit:null,onDragLeave:null,onDragOver:null,onDragStart:null,onDrop:null,onDurationChange:null,onEmptied:null,onEnded:null,onError:null,onFocus:null,onFormData:null,onHashChange:null,onInput:null,onInvalid:null,onKeyDown:null,onKeyPress:null,onKeyUp:null,onLanguageChange:null,onLoad:null,onLoadedData:null,onLoadedMetadata:null,onLoadEnd:null,onLoadStart:null,onMessage:null,onMessageError:null,onMouseDown:null,onMouseEnter:null,onMouseLeave:null,onMouseMove:null,onMouseOut:null,onMouseOver:null,onMouseUp:null,onOffline:null,onOnline:null,onPageHide:null,onPageShow:null,onPaste:null,onPause:null,onPlay:null,onPlaying:null,onPopState:null,onProgress:null,onRateChange:null,onRejectionHandled:null,onReset:null,onResize:null,onScroll:null,onSecurityPolicyViolation:null,onSeeked:null,onSeeking:null,onSelect:null,onSlotChange:null,onStalled:null,onStorage:null,onSubmit:null,onSuspend:null,onTimeUpdate:null,onToggle:null,onUnhandledRejection:null,onUnload:null,onVolumeChange:null,onWaiting:null,onWheel:null,open:o,optimum:c,pattern:null,ping:l,placeholder:null,playsInline:o,poster:null,preload:null,readOnly:o,referrerPolicy:null,rel:l,required:o,reversed:o,rows:c,rowSpan:c,sandbox:l,scope:null,scoped:o,seamless:o,selected:o,shape:null,size:c,sizes:null,slot:null,span:c,spellCheck:u,src:null,srcDoc:null,srcLang:null,srcSet:f,start:c,step:null,style:null,tabIndex:c,target:null,title:null,translate:null,type:null,typeMustMatch:o,useMap:null,value:u,width:c,wrap:null,align:null,aLink:null,archive:l,axis:null,background:null,bgColor:null,border:c,borderColor:null,bottomMargin:c,cellPadding:null,cellSpacing:null,char:null,charOff:null,classId:null,clear:null,code:null,codeBase:null,codeType:null,color:null,compact:o,declare:o,event:null,face:null,frame:null,frameBorder:null,hSpace:c,leftMargin:c,link:null,longDesc:null,lowSrc:null,marginHeight:c,marginWidth:c,noResize:o,noHref:o,noShade:o,noWrap:o,object:null,profile:null,prompt:null,rev:null,rightMargin:c,rules:null,scheme:null,scrolling:u,standby:null,summary:null,text:null,topMargin:c,valueType:null,version:null,vAlign:null,vLink:null,vSpace:c,allowTransparency:null,autoCorrect:null,autoSave:null,disablePictureInPicture:o,disableRemotePlayback:o,prefix:null,property:null,results:c,security:null,unselectable:null}})},10855(e,t,n){"use strict";var r=n(28740);function i(e,t){return r(e,t.toLowerCase())}e.exports=i},28740(e){"use strict";function t(e,t){return t in e?e[t]:t}e.exports=t},17596(e,t,n){"use strict";var r=n(66632),i=n(99607),a=n(81674);function o(e){var t,n,o=e.space,s=e.mustUseProperty||[],u=e.attributes||{},c=e.properties,l=e.transform,f={},d={};for(t in c)n=new a(t,l(u,t),c[t],o),-1!==s.indexOf(t)&&(n.mustUseProperty=!0),f[t]=n,d[r(t)]=t,d[r(n.attribute)]=t;return new i(f,d,o)}e.exports=o},81674(e,t,n){"use strict";var r=n(57643),i=n(17e3);e.exports=s,s.prototype=new r,s.prototype.defined=!0;var a=["boolean","booleanish","overloadedBoolean","number","commaSeparated","spaceSeparated","commaOrSpaceSeparated"],o=a.length;function s(e,t,n,s){var c,l=-1;for(u(this,"space",s),r.call(this,e,t);++l=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}function l(e,t){if(null==e)return{};var n,r,i={},a=Object.keys(e);for(r=0;r=0||(i[n]=e[n]);return i}function f(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function d(e,t){for(var n=0;n1&&void 0!==arguments[1]&&arguments[1];return n._tick((0,d.updateNodeHighlightedValue)(n.state.nodes,n.state.links,n.state.config,e,t))}),O(S(n),"_tick",function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1?arguments[1]:void 0;return t?n.setState(e,t):n.setState(e)}),O(S(n),"_zoomConfig",function(){var e=(0,o.select)("#".concat(n.state.id,"-").concat(u.default.GRAPH_WRAPPER_ID)),t=(0,s.zoom)().scaleExtent([n.state.config.minZoom,n.state.config.maxZoom]);n.state.config.freezeAllDragEvents||t.on("zoom",n._zoomed),null!==n.state.config.initialZoom&&t.scaleTo(e,n.state.config.initialZoom),e.call(t).on("dblclick.zoom",null)}),O(S(n),"_zoomed",function(){var e=o.event.transform;(0,o.selectAll)("#".concat(n.state.id,"-").concat(u.default.GRAPH_CONTAINER_ID)).attr("transform",e),n.state.config.panAndZoom&&n.setState({transform:e.k}),n.debouncedOnZoomChange&&n.state.previousZoom!==e.k&&(n.debouncedOnZoomChange(n.state.previousZoom,e.k),n.setState({previousZoom:e.k}))}),O(S(n),"onClickGraph",function(e){n.state.enableFocusAnimation&&n.setState({enableFocusAnimation:!1});var t,r,i,a=e.target&&e.target.tagName,o=null==e?void 0:null===(t=e.target)||void 0===t?void 0:null===(r=t.attributes)||void 0===r?void 0:null===(i=r.name)||void 0===i?void 0:i.value,s="svg-container-".concat(n.state.id);"SVG"===a.toUpperCase()&&o===s&&n.props.onClickGraph&&n.props.onClickGraph(e)}),O(S(n),"onClickNode",function(e){var t=n.state.nodes[e];if(n.state.config.collapsible){var r=(0,f.getTargetLeafConnections)(e,n.state.links,n.state.config),i=(0,f.toggleLinksMatrixConnections)(n.state.links,r,n.state.config),a=(0,f.toggleLinksConnections)(n.state.d3Links,i),o=null==r?void 0:r["0"],s=!1;o&&(s=1===i[o.source][o.target]),n._tick({links:i,d3Links:a},function(){n.props.onClickNode&&n.props.onClickNode(e,t),s&&n._graphNodeDragConfig()})}else n.nodeClickTimer?(n.props.onDoubleClickNode&&n.props.onDoubleClickNode(e,t),n.nodeClickTimer=clearTimeout(n.nodeClickTimer)):n.nodeClickTimer=setTimeout(function(){n.props.onClickNode&&n.props.onClickNode(e,t),n.nodeClickTimer=null},u.default.TTL_DOUBLE_CLICK_IN_MS)}),O(S(n),"onRightClickNode",function(e,t){var r=n.state.nodes[t];n.props.onRightClickNode&&n.props.onRightClickNode(e,t,r)}),O(S(n),"onMouseOverNode",function(e){if(!n.isDraggingNode){var t=n.state.nodes[e];n.props.onMouseOverNode&&n.props.onMouseOverNode(e,t),n.state.config.nodeHighlightBehavior&&n._setNodeHighlightedValue(e,!0)}}),O(S(n),"onMouseOutNode",function(e){if(!n.isDraggingNode){var t=n.state.nodes[e];n.props.onMouseOutNode&&n.props.onMouseOutNode(e,t),n.state.config.nodeHighlightBehavior&&n._setNodeHighlightedValue(e,!1)}}),O(S(n),"onMouseOverLink",function(e,t){if(n.props.onMouseOverLink&&n.props.onMouseOverLink(e,t),n.state.config.linkHighlightBehavior){var r={source:e,target:t};n._tick({highlightedLink:r})}}),O(S(n),"onMouseOutLink",function(e,t){if(n.props.onMouseOutLink&&n.props.onMouseOutLink(e,t),n.state.config.linkHighlightBehavior){var r=void 0;n._tick({highlightedLink:r})}}),O(S(n),"onNodePositionChange",function(e){if(n.props.onNodePositionChange){var t=e.id,r=e.x,i=e.y;n.props.onNodePositionChange(t,r,i)}}),O(S(n),"pauseSimulation",function(){return n.state.simulation.stop()}),O(S(n),"resetNodesPositions",function(){if(!n.state.config.staticGraph){var e=(0,d.initializeNodes)(n.props.data.nodes);for(var t in n.state.nodes){var r=n.state.nodes[t];if(r.fx&&r.fy&&(Reflect.deleteProperty(r,"fx"),Reflect.deleteProperty(r,"fy")),t in e){var i=e[t];r.x=i.x,r.y=i.y}}n.state.simulation.alphaTarget(n.state.config.d3.alphaTarget).restart(),n._tick()}}),O(S(n),"restartSimulation",function(){return!n.state.config.staticGraph&&n.state.simulation.restart()}),n.props.id||(0,p.throwErr)(n.constructor.name,l.default.GRAPH_NO_ID_PROP),n.focusAnimationTimeout=null,n.nodeClickTimer=null,n.isDraggingNode=!1,n.state=(0,d.initializeGraphState)(n.props,n.state),n.debouncedOnZoomChange=n.props.onZoomChange?(0,p.debounce)(n.props.onZoomChange,100):null,n}return T(t,e),x(t,[{key:"_graphLinkForceConfig",value:function(){var e=(0,a.forceLink)(this.state.d3Links).id(function(e){return e.id}).distance(this.state.config.d3.linkLength).strength(this.state.config.d3.linkStrength);this.state.simulation.force(u.default.PLI_CLASS_NAME,e)}},{key:"_graphNodeDragConfig",value:function(){var e=(0,i.drag)().on("start",this._onDragStart).on("drag",this._onDragMove).on("end",this._onDragEnd);(0,o.select)("#".concat(this.state.id,"-").concat(u.default.GRAPH_WRAPPER_ID)).selectAll(".node").call(e)}},{key:"_graphBindD3ToReactComponent",value:function(){this.state.config.d3.disableLinkForce||(this.state.simulation.nodes(this.state.d3Nodes).on("tick",this._tick),this._graphLinkForceConfig()),this.state.config.freezeAllDragEvents||this._graphNodeDragConfig()}}]),x(t,[{key:"UNSAFE_componentWillReceiveProps",value:function(e){var t=(0,d.checkForGraphElementsChanges)(e,this.state),n=t.graphElementsUpdated,r=t.newGraphElements,i=n?(0,d.initializeGraphState)(e,this.state):this.state,a=e.config||{},o=(0,d.checkForGraphConfigChanges)(e,this.state),s=o.configUpdated,l=o.d3ConfigUpdated,f=s?(0,p.merge)(c.default,a):this.state.config;r&&this.pauseSimulation();var h=a.panAndZoom!==this.state.config.panAndZoom?1:this.state.transform,b=e.data.focusedNodeId,m=this.state.d3Nodes.find(function(e){return"".concat(e.id)==="".concat(b)}),g="".concat(this.state.id,"-").concat(u.default.GRAPH_WRAPPER_ID),v=(0,d.getCenterAndZoomTransformation)(m,this.state.config,g)||this.state.focusTransformation,w=this.props.data.focusedNodeId!==e.data.focusedNodeId;e.onZoomChange&&(this.debouncedOnZoomChange=(0,p.debounce)(e.onZoomChange,100)),this.setState(y({},i,{config:f,configUpdated:s,d3ConfigUpdated:l,newGraphElements:r,transform:h,focusedNodeId:b,enableFocusAnimation:w,focusTransformation:v}))}},{key:"componentDidUpdate",value:function(){(this.state.config.staticGraph||this.state.config.staticGraphWithDragAndDrop)&&this.pauseSimulation(),!this.state.config.staticGraph&&(this.state.newGraphElements||this.state.d3ConfigUpdated)?(this._graphBindD3ToReactComponent(),this.state.config.staticGraphWithDragAndDrop||this.restartSimulation(),this.setState({newGraphElements:!1,d3ConfigUpdated:!1})):this.state.configUpdated&&this._graphNodeDragConfig(),this.state.configUpdated&&(this._zoomConfig(),this.setState({configUpdated:!1}))}},{key:"componentDidMount",value:function(){this.state.config.staticGraph||this._graphBindD3ToReactComponent(),this._zoomConfig()}},{key:"componentWillUnmount",value:function(){this.pauseSimulation(),this.nodeClickTimer&&(clearTimeout(this.nodeClickTimer),this.nodeClickTimer=null),this.focusAnimationTimeout&&(clearTimeout(this.focusAnimationTimeout),this.focusAnimationTimeout=null)}},{key:"render",value:function(){var e=(0,h.renderGraph)(this.state.nodes,{onClickNode:this.onClickNode,onDoubleClickNode:this.onDoubleClickNode,onRightClickNode:this.onRightClickNode,onMouseOverNode:this.onMouseOverNode,onMouseOut:this.onMouseOutNode},this.state.d3Links,this.state.links,{onClickLink:this.props.onClickLink,onRightClickLink:this.props.onRightClickLink,onMouseOverLink:this.onMouseOverLink,onMouseOutLink:this.onMouseOutLink},this.state.config,this.state.highlightedNode,this.state.highlightedLink,this.state.transform),t=e.nodes,n=e.links,i=e.defs,a={height:this.state.config.height,width:this.state.config.width},o=this._generateFocusAnimationProps();return r.default.createElement("div",{id:"".concat(this.state.id,"-").concat(u.default.GRAPH_WRAPPER_ID)},r.default.createElement("svg",{name:"svg-container-".concat(this.state.id),style:a,onClick:this.onClickGraph},i,r.default.createElement("g",g({id:"".concat(this.state.id,"-").concat(u.default.GRAPH_CONTAINER_ID)},o),n,t)))}}]),t}(r.default.Component);t.default=A},37973(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.computeNodeDegree=l,t.getTargetLeafConnections=f,t.isNodeVisible=d,t.toggleLinksConnections=h,t.toggleLinksMatrixConnections=p;var r=n(52694);function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable})),n.push.apply(n,r)}return n}function a(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:{};return Object.keys(t).reduce(function(n,r){return t[r]?Object.keys(t[r]).reduce(function(n,i){return e===r&&(n.outDegree+=t[e][i]),e===i&&(n.inDegree+=t[r][e]),n},n):n},{inDegree:0,outDegree:0})}function f(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2?arguments[2]:void 0,r=n.directed;return(t[e]?Object.keys(t[e]):[]).reduce(function(n,i){return c(i,t,r)&&n.push({source:e,target:i}),n},[])}function d(e,t,n){if(!t[e])return!1;if(t[e]._orphan)return!0;var r=l(e,n),i=r.inDegree,a=r.outDegree;return i>0||a>0}function h(e,t){return e.map(function(e){var n=e.source,i=e.target,o=(0,r.getId)(n),s=(0,r.getId)(i);return a({},e,{isHidden:!(t&&t[o]&&t[o][s])})})}function p(e,t,n){var r=n.directed;return t.reduce(function(e,t){e[t.source]||(e[t.source]={}),e[t.source][t.target]||(e[t.source][t.target]=0);var n=0===e[t.source][t.target]?1:0;return e[t.source][t.target]=n,r||(e[t.target][t.source]=n),e},a({},e))}n(69901)},99182(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.buildLinkProps=h,t.buildNodeProps=p;var r=s(n(53880)),i=n(37109),a=n(80362),o=n(52694);function s(e){return e&&e.__esModule?e:{default:e}}function u(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable})),n.push.apply(n,r)}return n}function c(e){for(var t=1;t2&&void 0!==arguments[2]?arguments[2]:{},i=arguments.length>3?arguments[3]:void 0,a=arguments.length>4?arguments[4]:void 0,o=arguments.length>5?arguments[5]:void 0,s=e.highlighted||e.id===(a&&a.source)||e.id===(a&&a.target),u=d(e,i,a,t),l=e.color||t.node.color;s&&t.node.highlightColor!==r.default.KEYWORDS.SAME&&(l=t.node.highlightColor);var h=e.strokeColor||t.node.strokeColor;s&&t.node.highlightStrokeColor!==r.default.KEYWORDS.SAME&&(h=t.node.highlightStrokeColor);var p=e[t.node.labelProperty]||e.id;"function"==typeof t.node.labelProperty&&(p=t.node.labelProperty(e));var b=e.labelPosition||t.node.labelPosition,m=e.strokeWidth||t.node.strokeWidth;s&&t.node.highlightStrokeWidth!==r.default.KEYWORDS.SAME&&(m=t.node.highlightStrokeWidth);var g=1/o,v=e.size||t.node.size,y="object"!==f(v),w=0;y?w=v:"top"===b||"bottom"===b?w=v.height:("right"===b||"left"===b)&&(w=v.width);var _=e.fontSize||t.node.fontSize,E=e.highlightFontSize||t.node.highlightFontSize,S=s?E:_,k=S*g+w/100+1.5,x=e.svg||t.node.svg,T=e.fontColor||t.node.fontColor,M=t.node.renderLabel;return void 0!==e.renderLabel&&"boolean"==typeof e.renderLabel&&(M=e.renderLabel),c({},e,{className:r.default.NODE_CLASS_NAME,cursor:t.node.mouseCursor,cx:(null==e?void 0:e.x)||"0",cy:(null==e?void 0:e.y)||"0",dx:k,fill:l,fontColor:T,fontSize:S*g,fontWeight:s?t.node.highlightFontWeight:t.node.fontWeight,id:e.id,label:p,labelPosition:b,opacity:u,overrideGlobalViewGenerator:!e.viewGenerator&&e.svg,renderLabel:M,size:y?v*g:{height:v.height*g,width:v.width*g},stroke:h,strokeWidth:m*g,svg:x,type:e.symbolType||t.node.symbolType,viewGenerator:e.viewGenerator||t.node.viewGenerator,onClickNode:n.onClickNode,onMouseOut:n.onMouseOut,onMouseOverNode:n.onMouseOverNode,onRightClickNode:n.onRightClickNode})}},98510(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={automaticRearrangeAfterDropNode:!1,collapsible:!1,directed:!1,focusAnimationDuration:.75,focusZoom:1,freezeAllDragEvents:!1,height:400,highlightDegree:1,highlightOpacity:1,linkHighlightBehavior:!1,maxZoom:8,minZoom:.1,initialZoom:null,nodeHighlightBehavior:!1,panAndZoom:!1,staticGraph:!1,staticGraphWithDragAndDrop:!1,width:800,d3:{alphaTarget:.05,gravity:-100,linkLength:100,linkStrength:1,disableLinkForce:!1},node:{color:"#d3d3d3",fontColor:"black",fontSize:8,fontWeight:"normal",highlightColor:"SAME",highlightFontSize:8,highlightFontWeight:"normal",highlightStrokeColor:"SAME",highlightStrokeWidth:"SAME",labelProperty:"id",labelPosition:null,mouseCursor:"pointer",opacity:1,renderLabel:!0,size:200,strokeColor:"none",strokeWidth:1.5,svg:"",symbolType:"circle",viewGenerator:null},link:{color:"#d3d3d3",fontColor:"black",fontSize:8,fontWeight:"normal",highlightColor:"SAME",highlightFontSize:8,highlightFontWeight:"normal",labelProperty:"label",mouseCursor:"pointer",opacity:1,renderLabel:!1,semanticStrokeWidth:!1,strokeWidth:1.5,markerHeight:6,markerWidth:6,type:"STRAIGHT",strokeDasharray:0,strokeDashoffset:0,strokeLinecap:"butt"}};t.default=n},53880(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=i(n(11041));function i(e){return e&&e.__esModule?e:{default:e}}function a(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable})),n.push.apply(n,r)}return n}function o(e){for(var t=1;t2&&void 0!==arguments[2]?arguments[2]:[],r=arguments.length>3?arguments[3]:void 0,i=arguments.length>4&&void 0!==arguments[4]?arguments[4]:{},a=n.find(function(t){return t.source.id===e.source&&t.target.id===e.target}),o=a&&(0,c.pick)(a,m),s=(0,c.antiPick)(e,["source","target"]);if(o){var u=i.config&&Object.prototype.hasOwnProperty.call(i.config,"directed")&&r.directed!==i.config.directed,l=h({index:t},o,{},s);return u?h({},l,{isHidden:!1}):r.collapsible?l:h({},l,{isHidden:!1})}var f=!1,d={id:e.source,highlighted:f},p={id:e.target,highlighted:f};return h({index:t,source:d,target:p},s)}function _(e,t){return Object.keys(e).reduce(function(n,r){var i=(0,l.computeNodeDegree)(r,t),a=i.inDegree,o=i.outDegree,s=e[r],u=0===a&&0===o?h({},s,{_orphan:!0}):s;return n[r]=u,n},{})}function E(e){e.nodes&&e.nodes.length||((0,c.logWarning)("Graph",u.default.INSUFFICIENT_DATA),e.nodes=[]),e.links||((0,c.logWarning)("Graph",u.default.INSUFFICIENT_PLIS),e.links=[]);for(var t=e.links.length,n=function(t){var n=e.links[t];e.nodes.find(function(e){return e.id===n.source})||(0,c.throwErr)("Graph","".concat(u.default.INVALID_PLIS,' - "').concat(n.source,'" is not a valid source node id')),e.nodes.find(function(e){return e.id===n.target})||(0,c.throwErr)("Graph","".concat(u.default.INVALID_PLIS,' - "').concat(n.target,'" is not a valid target node id')),n&&void 0!==n.value&&"number"!=typeof n.value&&(0,c.throwErr)("Graph","".concat(u.default.INVALID_PLI_VALUE,' - found in link with source "').concat(n.source,'" and target "').concat(n.target,'"'))},r=0;rx?o.focusZoom=x:T4&&void 0!==arguments[4]&&arguments[4],a=i?r:"",o=h({},e[r],{highlighted:i}),s=h({},e,p({},r,o));return t[r]&&0!==n.highlightDegree&&(s=Object.keys(t[r]).reduce(function(e,t){var n=h({},s[t],{highlighted:i});return e[t]=n,e},s)),{nodes:s,highlightedNode:a}}function I(e){var t=Math.sqrt(Math.pow(e.x,2)+Math.pow(e.y,2));return 0===t?e:{x:e.x/t,y:e.y/t}}var D=new Set([o.default.SYMBOLS.CIRCLE]);function N(e,t,n,r){var i=e.sourceId,a=e.targetId,s=e.sourceCoords,u=void 0===s?{}:s,c=e.targetCoords,l=void 0===c?{}:c,f=null==t?void 0:t[i],d=null==t?void 0:t[a];if(!f||!d||(null===(_=n.node)||void 0===_?void 0:_.viewGenerator)||(null==f?void 0:f.viewGenerator)||(null==d?void 0:d.viewGenerator))return{sourceCoords:u,targetCoords:l};var h=f.symbolType||(null===(E=n.node)||void 0===E?void 0:E.symbolType),p=d.symbolType||(null===(S=n.node)||void 0===S?void 0:S.symbolType);if(!D.has(h)&&!D.has(p))return{sourceCoords:u,targetCoords:l};var b=u.x,m=u.y,g=l.x,v=l.y,y=I({x:g-b,y:v-m});if(h===o.default.SYMBOLS.CIRCLE){var w=(null==f?void 0:f.size)||n.node.size;b+=(w=.95*Math.sqrt(w/Math.PI))*y.x,m+=w*y.y}if(p===o.default.SYMBOLS.CIRCLE){var _,E,S,k,x,T=r*Math.min((null===(k=n.link)||void 0===k?void 0:k.markerWidth)||0,(null===(x=n.link)||void 0===x?void 0:x.markerHeight)||0),M=(null==d?void 0:d.size)||n.node.size;g-=((M=.95*Math.sqrt(M/Math.PI))+(n.directed?T:0))*y.x,v-=(M+(n.directed?T:0))*y.y}return{sourceCoords:{x:b,y:m},targetCoords:{x:g,y:v}}}},75791(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.renderGraph=E;var r=h(n(67294)),i=h(n(53880)),a=n(7619),o=h(n(33938)),s=h(n(61740)),u=h(n(28017)),c=n(99182),l=n(52694),f=n(37973),d=n(80362);function h(e){return e&&e.__esModule?e:{default:e}}function p(){return(p=Object.assign||function(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:i.LINE_TYPES.STRAIGHT,r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:[],o=e.x,s=e.y,u=p(i.LINE_TYPES[n]||i.LINE_TYPES.STRAIGHT),c=[].concat(a(r),[t]),l=c.map(function(t,n){var r,i=t.x,a=t.y,o=n>0?c[n-1]:e,s=u(o.x,o.y,i,a);return" A".concat(s,",").concat(s," 0 0,1 ").concat(i,",").concat(a)}).join("");return"M".concat(o,",").concat(s).concat(l)}},28017(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var r=i(n(67294));function i(e){return e&&e.__esModule?e:{default:e}}function a(e){return(a="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function o(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function s(e,t){for(var n=0;n=t&&e0&&void 0!==arguments[0]?arguments[0]:i.default.DEFAULT_NODE_SIZE,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:i.default.SYMBOLS.CIRCLE;return(0,r.symbol)().size(function(){return e}).type(function(){return o(t)})()}function u(e,t){switch(t){case"right":return{dx:e?"".concat(e):i.default.NODE_LABEL_DX,dy:"0",dominantBaseline:"middle",textAnchor:"start"};case"left":return{dx:e?"".concat(-e):"-".concat(i.default.NODE_LABEL_DX),dy:"0",dominantBaseline:"middle",textAnchor:"end"};case"top":return{dx:"0",dy:e?"".concat(-e):"-".concat(i.default.NODE_LABEL_DX),dominantBaseline:"baseline",textAnchor:"middle"};case"bottom":return{dx:"0",dy:e?"".concat(e):i.default.NODE_LABEL_DX,dominantBaseline:"hanging",textAnchor:"middle"};case"center":return{dx:"0",dy:"0",dominantBaseline:"middle",textAnchor:"middle"};default:return{dx:e?"".concat(e):i.default.NODE_LABEL_DX,dy:i.default.NODE_LABEL_DY}}}var c={buildSvgSymbol:s,getLabelPlacementProps:u};t.default=c},11041(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={SYMBOLS:{CIRCLE:"circle",CROSS:"cross",DIAMOND:"diamond",SQUARE:"square",STAR:"star",TRIANGLE:"triangle",WYE:"wye"}};t.default=n},34214(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=void 0;var n={GRAPH_NO_ID_PROP:"id prop not defined! id property is mandatory and it should be unique.",INSUFFICIENT_PLIS:"you are passing invalid data to react-d3-graph. You must include a links array, even if empty, in the data object you're passing down to the component.",INVALID_PLIS:"you provided a invalid links data structure. Links source and target attributes must point to an existent node",INSUFFICIENT_DATA:"you have not provided enough data for react-d3-graph to render something. You need to provide at least one node",INVALID_PLI_VALUE:"links 'value' attribute must be of type number"};t.default=n},94164(e,t,n){"use strict";r={value:!0},Object.defineProperty(t,"kJ",{enumerable:!0,get:function(){return i.default}}),r={enumerable:!0,get:function(){return a.default}},r={enumerable:!0,get:function(){return o.default}};var r,i=s(n(82623)),a=s(n(61740)),o=s(n(33938));function s(e){return e&&e.__esModule?e:{default:e}}},69901(e,t){"use strict";function n(e){return(n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}Object.defineProperty(t,"__esModule",{value:!0}),t.isDeepEqual=a,t.isEmptyObject=o,t.deepClone=s,t.merge=u,t.pick=c,t.antiPick=l,t.debounce=f,t.throwErr=h,t.logError=p,t.logWarning=b;var r=20;function i(e,t){return!!e&&Object.prototype.hasOwnProperty.call(e,t)&&"object"===n(e[t])&&null!==e[t]&&!o(e[t])}function a(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,s=[];if(0===n&&e===t)return!0;if(o(e)&&!o(t)||!o(e)&&o(t))return!1;var u=Object.keys(e),c=Object.keys(t);if(u.length!==c.length)return!1;for(var l=0,f=u;l1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,a=Object.keys(e),o=0,u=a;o0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,a={};if(0===Object.keys(e||{}).length)return t&&!o(t)?t:{};for(var s=0,c=Object.keys(e);s1&&void 0!==arguments[1]?arguments[1]:[];return t.reduce(function(t,n){return Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]),t},{})}function l(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=Object.keys(e).filter(function(e){return!t.includes(e)});return c(e,n)}function f(e,t){var n;return function(){for(var r=arguments.length,i=Array(r),a=0;a0&&void 0!==arguments[0]?arguments[0]:"N/A",t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"N/A";return"react-d3-graph :: ".concat(e," :: ").concat(t)}function h(e,t){throw Error(d(e,t))}function p(e,t){console.error(d(e,t))}function b(e,t){var n="react-d3-graph :: ".concat(e," :: ").concat(t);console.warn(n)}},64448(e,t,n){"use strict";/** @license React v16.12.0 + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ var r,i,a,o,s,u=n(67294),c=n(27418),l=n(63840);function f(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;nt}return!1}function eM(e,t,n,r,i,a){this.acceptsBooleans=2===t||3===t||4===t,this.attributeName=r,this.attributeNamespace=i,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=a}var eO={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){eO[e]=new eM(e,0,!1,e,null,!1)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];eO[t]=new eM(t,1,!1,e[1],null,!1)}),["contentEditable","draggable","spellCheck","value"].forEach(function(e){eO[e]=new eM(e,2,!1,e.toLowerCase(),null,!1)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){eO[e]=new eM(e,2,!1,e,null,!1)}),"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){eO[e]=new eM(e,3,!1,e.toLowerCase(),null,!1)}),["checked","multiple","muted","selected"].forEach(function(e){eO[e]=new eM(e,3,!0,e,null,!1)}),["capture","download"].forEach(function(e){eO[e]=new eM(e,4,!1,e,null,!1)}),["cols","rows","size","span"].forEach(function(e){eO[e]=new eM(e,6,!1,e,null,!1)}),["rowSpan","start"].forEach(function(e){eO[e]=new eM(e,5,!1,e.toLowerCase(),null,!1)});var eA=/[\-:]([a-z])/g;function eL(e){return e[1].toUpperCase()}function eC(e){switch(typeof e){case"boolean":case"number":case"object":case"string":case"undefined":return e;default:return""}}function eI(e,t,n,r){var i=eO.hasOwnProperty(t)?eO[t]:null;(null!==i?0===i.type:!r&&2=t.length))throw Error(f(93));t=t[0]}n=t}null==n&&(n="")}e._wrapperState={initialValue:eC(n)}}function eV(e,t){var n=eC(t.value),r=eC(t.defaultValue);null!=n&&((n=""+n)!==e.value&&(e.value=n),null==t.defaultValue&&e.defaultValue!==n&&(e.defaultValue=n)),null!=r&&(e.defaultValue=""+r)}function eq(e){var t=e.textContent;t===e._wrapperState.initialValue&&""!==t&&null!==t&&(e.value=t)}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(eA,eL);eO[t]=new eM(t,1,!1,e,null,!1)}),"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(eA,eL);eO[t]=new eM(t,1,!1,e,"http://www.w3.org/1999/xlink",!1)}),["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(eA,eL);eO[t]=new eM(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1)}),["tabIndex","crossOrigin"].forEach(function(e){eO[e]=new eM(e,1,!1,e.toLowerCase(),null,!1)}),eO.xlinkHref=new eM("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0),["src","href","action","formAction"].forEach(function(e){eO[e]=new eM(e,1,!1,e.toLowerCase(),null,!0)});var eZ={html:"http://www.w3.org/1999/xhtml",mathml:"http://www.w3.org/1998/Math/MathML",svg:"http://www.w3.org/2000/svg"};function eX(e){switch(e){case"svg":return"http://www.w3.org/2000/svg";case"math":return"http://www.w3.org/1998/Math/MathML";default:return"http://www.w3.org/1999/xhtml"}}function eJ(e,t){return null==e||"http://www.w3.org/1999/xhtml"===e?eX(t):"http://www.w3.org/2000/svg"===e&&"foreignObject"===t?"http://www.w3.org/1999/xhtml":e}var eQ,e1,e0=(eQ=function(e,t){if(e.namespaceURI!==eZ.svg||"innerHTML"in e)e.innerHTML=t;else{for((e1=e1||document.createElement("div")).innerHTML=""+t.valueOf().toString()+"",t=e1.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}},"undefined"!=typeof MSApp&&MSApp.execUnsafeLocalFunction?function(e,t,n,r){MSApp.execUnsafeLocalFunction(function(){return eQ(e,t,n,r)})}:eQ);function e2(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&3===n.nodeType){n.nodeValue=t;return}}e.textContent=t}function e3(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var e4={animationend:e3("Animation","AnimationEnd"),animationiteration:e3("Animation","AnimationIteration"),animationstart:e3("Animation","AnimationStart"),transitionend:e3("Transition","TransitionEnd")},e5={},e6={};function e9(e){if(e5[e])return e5[e];if(!e4[e])return e;var t,n=e4[e];for(t in n)if(n.hasOwnProperty(t)&&t in e6)return e5[e]=n[t];return e}eo&&(e6=document.createElement("div").style,"AnimationEvent"in window||(delete e4.animationend.animation,delete e4.animationiteration.animation,delete e4.animationstart.animation),"TransitionEvent"in window||delete e4.transitionend.transition);var e8=e9("animationend"),e7=e9("animationiteration"),te=e9("animationstart"),tt=e9("transitionend"),tn="abort canplay canplaythrough durationchange emptied encrypted ended error loadeddata loadedmetadata loadstart pause play playing progress ratechange seeked seeking stalled suspend timeupdate volumechange waiting".split(" ");function tr(e){var t=e,n=e;if(e.alternate)for(;t.return;)t=t.return;else{e=t;do 0!=(1026&(t=e).effectTag)&&(n=t.return),e=t.return;while(e)}return 3===t.tag?n:null}function ti(e){if(13===e.tag){var t=e.memoizedState;if(null===t&&null!==(e=e.alternate)&&(t=e.memoizedState),null!==t)return t.dehydrated}return null}function ta(e){if(tr(e)!==e)throw Error(f(188))}function to(e){var t=e.alternate;if(!t){if(null===(t=tr(e)))throw Error(f(188));return t!==e?null:e}for(var n=e,r=t;;){var i=n.return;if(null===i)break;var a=i.alternate;if(null===a){if(null!==(r=i.return)){n=r;continue}break}if(i.child===a.child){for(a=i.child;a;){if(a===n)return ta(i),e;if(a===r)return ta(i),t;a=a.sibling}throw Error(f(188))}if(n.return!==r.return)n=i,r=a;else{for(var o=!1,s=i.child;s;){if(s===n){o=!0,n=i,r=a;break}if(s===r){o=!0,r=i,n=a;break}s=s.sibling}if(!o){for(s=a.child;s;){if(s===n){o=!0,n=a,r=i;break}if(s===r){o=!0,r=a,n=i;break}s=s.sibling}if(!o)throw Error(f(189))}}if(n.alternate!==r)throw Error(f(190))}if(3!==n.tag)throw Error(f(188));return n.stateNode.current===n?e:t}function ts(e){if(!(e=to(e)))return null;for(var t=e;;){if(5===t.tag||6===t.tag)return t;if(t.child)t.child.return=t,t=t.child;else{if(t===e)break;for(;!t.sibling;){if(!t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}}return null}var tu,tc,tl,tf=!1,td=[],th=null,tp=null,tb=null,tm=new Map,tg=new Map,tv=[],ty="mousedown mouseup touchcancel touchend touchstart auxclick dblclick pointercancel pointerdown pointerup dragend dragstart drop compositionend compositionstart keydown keypress keyup input textInput close cancel copy cut paste click change contextmenu reset submit".split(" "),tw="focus blur dragenter dragleave mouseover mouseout pointerover pointerout gotpointercapture lostpointercapture".split(" ");function t_(e){var t=nA(e);ty.forEach(function(n){nL(n,e,t)}),tw.forEach(function(n){nL(n,e,t)})}function tE(e,t,n,r){return{blockedOn:e,topLevelType:t,eventSystemFlags:32|n,nativeEvent:r}}function tS(e,t){switch(e){case"focus":case"blur":th=null;break;case"dragenter":case"dragleave":tp=null;break;case"mouseover":case"mouseout":tb=null;break;case"pointerover":case"pointerout":tm.delete(t.pointerId);break;case"gotpointercapture":case"lostpointercapture":tg.delete(t.pointerId)}}function tk(e,t,n,r,i){return null===e||e.nativeEvent!==i?(e=tE(t,n,r,i),null!==t&&null!==(t=n7(t))&&tc(t),e):(e.eventSystemFlags|=r,e)}function tx(e,t,n,r){switch(t){case"focus":return th=tk(th,e,t,n,r),!0;case"dragenter":return tp=tk(tp,e,t,n,r),!0;case"mouseover":return tb=tk(tb,e,t,n,r),!0;case"pointerover":var i=r.pointerId;return tm.set(i,tk(tm.get(i)||null,e,t,n,r)),!0;case"gotpointercapture":return i=r.pointerId,tg.set(i,tk(tg.get(i)||null,e,t,n,r)),!0}return!1}function tT(e){var t=n8(e.target);if(null!==t){var n=tr(t);if(null!==n){if(13===(t=n.tag)){if(null!==(t=ti(n))){e.blockedOn=t,l.unstable_runWithPriority(e.priority,function(){tl(n)});return}}else if(3===t&&n.stateNode.hydrate){e.blockedOn=3===n.tag?n.stateNode.containerInfo:null;return}}}e.blockedOn=null}function tM(e){if(null!==e.blockedOn)return!1;var t=nT(e.topLevelType,e.eventSystemFlags,e.nativeEvent);if(null!==t){var n=n7(t);return null!==n&&tc(n),e.blockedOn=t,!1}return!0}function tO(e,t,n){tM(e)&&n.delete(t)}function tA(){for(tf=!1;0this.eventPool.length&&this.eventPool.push(e)}function tz(e){e.eventPool=[],e.getPooled=tH,e.release=t$}c(tU.prototype,{preventDefault:function(){this.defaultPrevented=!0;var e=this.nativeEvent;e&&(e.preventDefault?e.preventDefault():"unknown"!=typeof e.returnValue&&(e.returnValue=!1),this.isDefaultPrevented=tY)},stopPropagation:function(){var e=this.nativeEvent;e&&(e.stopPropagation?e.stopPropagation():"unknown"!=typeof e.cancelBubble&&(e.cancelBubble=!0),this.isPropagationStopped=tY)},persist:function(){this.isPersistent=tY},isPersistent:tB,destructor:function(){var e,t=this.constructor.Interface;for(e in t)this[e]=null;this.nativeEvent=this._targetInst=this.dispatchConfig=null,this.isPropagationStopped=this.isDefaultPrevented=tB,this._dispatchInstances=this._dispatchListeners=null}}),tU.Interface={type:null,target:null,currentTarget:function(){return null},eventPhase:null,bubbles:null,cancelable:null,timeStamp:function(e){return e.timeStamp||Date.now()},defaultPrevented:null,isTrusted:null},tU.extend=function(e){function t(){}function n(){return r.apply(this,arguments)}var r=this;t.prototype=r.prototype;var i=new t;return c(i,n.prototype),n.prototype=i,n.prototype.constructor=n,n.Interface=c({},r.Interface,e),n.extend=r.extend,tz(n),n},tz(tU);var tG=tU.extend({animationName:null,elapsedTime:null,pseudoElement:null}),tW=tU.extend({clipboardData:function(e){return"clipboardData"in e?e.clipboardData:window.clipboardData}}),tK=tU.extend({view:null,detail:null}),tV=tK.extend({relatedTarget:null});function tq(e){var t=e.keyCode;return"charCode"in e?0===(e=e.charCode)&&13===t&&(e=13):e=t,10===e&&(e=13),32<=e||13===e?e:0}var tZ={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},tX={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"},tJ={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function tQ(e){var t=this.nativeEvent;return t.getModifierState?t.getModifierState(e):!!(e=tJ[e])&&!!t[e]}function t1(){return tQ}for(var t0=tK.extend({key:function(e){if(e.key){var t=tZ[e.key]||e.key;if("Unidentified"!==t)return t}return"keypress"===e.type?13===(e=tq(e))?"Enter":String.fromCharCode(e):"keydown"===e.type||"keyup"===e.type?tX[e.keyCode]||"Unidentified":""},location:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,repeat:null,locale:null,getModifierState:t1,charCode:function(e){return"keypress"===e.type?tq(e):0},keyCode:function(e){return"keydown"===e.type||"keyup"===e.type?e.keyCode:0},which:function(e){return"keypress"===e.type?tq(e):"keydown"===e.type||"keyup"===e.type?e.keyCode:0}}),t2=0,t3=0,t4=!1,t5=!1,t6=tK.extend({screenX:null,screenY:null,clientX:null,clientY:null,pageX:null,pageY:null,ctrlKey:null,shiftKey:null,altKey:null,metaKey:null,getModifierState:t1,button:null,buttons:null,relatedTarget:function(e){return e.relatedTarget||(e.fromElement===e.srcElement?e.toElement:e.fromElement)},movementX:function(e){if(("movementX"in e))return e.movementX;var t=t2;return t2=e.screenX,t4?"mousemove"===e.type?e.screenX-t:0:(t4=!0,0)},movementY:function(e){if(("movementY"in e))return e.movementY;var t=t3;return t3=e.screenY,t5?"mousemove"===e.type?e.screenY-t:0:(t5=!0,0)}}),t9=t6.extend({pointerId:null,width:null,height:null,pressure:null,tangentialPressure:null,tiltX:null,tiltY:null,twist:null,pointerType:null,isPrimary:null}),t8=t6.extend({dataTransfer:null}),t7=tK.extend({touches:null,targetTouches:null,changedTouches:null,altKey:null,metaKey:null,ctrlKey:null,shiftKey:null,getModifierState:t1}),ne=tU.extend({propertyName:null,elapsedTime:null,pseudoElement:null}),nt=t6.extend({deltaX:function(e){return("deltaX"in e)?e.deltaX:("wheelDeltaX"in e)?-e.wheelDeltaX:0},deltaY:function(e){return("deltaY"in e)?e.deltaY:("wheelDeltaY"in e)?-e.wheelDeltaY:("wheelDelta"in e)?-e.wheelDelta:0},deltaZ:null,deltaMode:null}),nn=[["blur","blur",0],["cancel","cancel",0],["click","click",0],["close","close",0],["contextmenu","contextMenu",0],["copy","copy",0],["cut","cut",0],["auxclick","auxClick",0],["dblclick","doubleClick",0],["dragend","dragEnd",0],["dragstart","dragStart",0],["drop","drop",0],["focus","focus",0],["input","input",0],["invalid","invalid",0],["keydown","keyDown",0],["keypress","keyPress",0],["keyup","keyUp",0],["mousedown","mouseDown",0],["mouseup","mouseUp",0],["paste","paste",0],["pause","pause",0],["play","play",0],["pointercancel","pointerCancel",0],["pointerdown","pointerDown",0],["pointerup","pointerUp",0],["ratechange","rateChange",0],["reset","reset",0],["seeked","seeked",0],["submit","submit",0],["touchcancel","touchCancel",0],["touchend","touchEnd",0],["touchstart","touchStart",0],["volumechange","volumeChange",0],["drag","drag",1],["dragenter","dragEnter",1],["dragexit","dragExit",1],["dragleave","dragLeave",1],["dragover","dragOver",1],["mousemove","mouseMove",1],["mouseout","mouseOut",1],["mouseover","mouseOver",1],["pointermove","pointerMove",1],["pointerout","pointerOut",1],["pointerover","pointerOver",1],["scroll","scroll",1],["toggle","toggle",1],["touchmove","touchMove",1],["wheel","wheel",1],["abort","abort",2],[e8,"animationEnd",2],[e7,"animationIteration",2],[te,"animationStart",2],["canplay","canPlay",2],["canplaythrough","canPlayThrough",2],["durationchange","durationChange",2],["emptied","emptied",2],["encrypted","encrypted",2],["ended","ended",2],["error","error",2],["gotpointercapture","gotPointerCapture",2],["load","load",2],["loadeddata","loadedData",2],["loadedmetadata","loadedMetadata",2],["loadstart","loadStart",2],["lostpointercapture","lostPointerCapture",2],["playing","playing",2],["progress","progress",2],["seeking","seeking",2],["stalled","stalled",2],["suspend","suspend",2],["timeupdate","timeUpdate",2],[tt,"transitionEnd",2],["waiting","waiting",2]],nr={},ni={},na=0;na=t)return{node:r,offset:t-e};e=n}a:{for(;r;){if(r.nextSibling){r=r.nextSibling;break a}r=r.parentNode}r=void 0}r=nU(r)}}function n$(e,t){return!!e&&!!t&&(e===t||(!e||3!==e.nodeType)&&(t&&3===t.nodeType?n$(e,t.parentNode):"contains"in e?e.contains(t):!!e.compareDocumentPosition&&!!(16&e.compareDocumentPosition(t))))}function nz(){for(var e=window,t=nB();t instanceof e.HTMLIFrameElement;){try{var n="string"==typeof t.contentWindow.location.href}catch(r){n=!1}if(n)e=t.contentWindow;else break;t=nB(e.document)}return t}function nG(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var nW="$",nK="/$",nV="$?",nq="$!",nZ=null,nX=null;function nJ(e,t){switch(e){case"button":case"input":case"select":case"textarea":return!!t.autoFocus}return!1}function nQ(e,t){return"textarea"===e||"option"===e||"noscript"===e||"string"==typeof t.children||"number"==typeof t.children||"object"==typeof t.dangerouslySetInnerHTML&&null!==t.dangerouslySetInnerHTML&&null!=t.dangerouslySetInnerHTML.__html}var n1="function"==typeof setTimeout?setTimeout:void 0,n0="function"==typeof clearTimeout?clearTimeout:void 0;function n2(e){for(;null!=e;e=e.nextSibling){var t=e.nodeType;if(1===t||3===t)break}return e}function n3(e){e=e.previousSibling;for(var t=0;e;){if(8===e.nodeType){var n=e.data;if(n===nW||n===nq||n===nV){if(0===t)return e;t--}else n===nK&&t++}e=e.previousSibling}return null}var n4=Math.random().toString(36).slice(2),n5="__reactInternalInstance$"+n4,n6="__reactEventHandlers$"+n4,n9="__reactContainere$"+n4;function n8(e){var t=e[n5];if(t)return t;for(var n=e.parentNode;n;){if(t=n[n9]||n[n5]){if(n=t.alternate,null!==t.child||null!==n&&null!==n.child)for(e=n3(e);null!==e;){if(n=e[n5])return n;e=n3(e)}return t}n=(e=n).parentNode}return null}function n7(e){return(e=e[n5]||e[n9])&&(5===e.tag||6===e.tag||13===e.tag||3===e.tag)?e:null}function re(e){if(5===e.tag||6===e.tag)return e.stateNode;throw Error(f(33))}function rt(e){return e[n6]||null}var rn=null,rr=null,ri=null;function ra(){if(ri)return ri;var e,t,n=rr,r=n.length,i="value"in rn?rn.value:rn.textContent,a=i.length;for(e=0;e=rl),rh=" ",rp={beforeInput:{phasedRegistrationNames:{bubbled:"onBeforeInput",captured:"onBeforeInputCapture"},dependencies:["compositionend","keypress","textInput","paste"]},compositionEnd:{phasedRegistrationNames:{bubbled:"onCompositionEnd",captured:"onCompositionEndCapture"},dependencies:"blur compositionend keydown keypress keyup mousedown".split(" ")},compositionStart:{phasedRegistrationNames:{bubbled:"onCompositionStart",captured:"onCompositionStartCapture"},dependencies:"blur compositionstart keydown keypress keyup mousedown".split(" ")},compositionUpdate:{phasedRegistrationNames:{bubbled:"onCompositionUpdate",captured:"onCompositionUpdateCapture"},dependencies:"blur compositionupdate keydown keypress keyup mousedown".split(" ")}},rb=!1;function rm(e,t){switch(e){case"keyup":return -1!==ru.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"blur":return!0;default:return!1}}function rg(e){return"object"==typeof(e=e.detail)&&"data"in e?e.data:null}var rv=!1;function ry(e,t){switch(e){case"compositionend":return rg(t);case"keypress":if(32!==t.which)return null;return rb=!0,rh;case"textInput":return(e=t.data)===rh&&rb?null:e;default:return null}}function rw(e,t){if(rv)return"compositionend"===e||!rc&&rm(e,t)?(e=ra(),ri=rr=rn=null,rv=!1,e):null;switch(e){case"paste":default:return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=document.documentMode,rK={select:{phasedRegistrationNames:{bubbled:"onSelect",captured:"onSelectCapture"},dependencies:"blur contextmenu dragend focus keydown keyup mousedown mouseup selectionchange".split(" ")}},rV=null,rq=null,rZ=null,rX=!1;function rJ(e,t){var n=t.window===t?t.document:9===t.nodeType?t:t.ownerDocument;return rX||null==rV||rV!==nB(n)?null:(n="selectionStart"in(n=rV)&&nG(n)?{start:n.selectionStart,end:n.selectionEnd}:{anchorNode:(n=(n.ownerDocument&&n.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:n.anchorOffset,focusNode:n.focusNode,focusOffset:n.focusOffset},rZ&&rG(rZ,n)?null:(rZ=n,(e=tU.getPooled(rK.select,rq,e,t)).type="select",e.target=rV,tF(e),e))}var rQ={eventTypes:rK,extractEvents:function(e,t,n,r){var i,a=r.window===r?r.document:9===r.nodeType?r:r.ownerDocument;if(!(i=!a)){a:{a=nA(a),i=y.onSelect;for(var o=0;or2||(e.current=r0[r2],r0[r2]=null,r2--)}function r4(e,t){r0[++r2]=e.current,e.current=t}var r5={},r6={current:r5},r9={current:!1},r8=r5;function r7(e,t){var n=e.type.contextTypes;if(!n)return r5;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var i,a={};for(i in n)a[i]=t[i];return r&&((e=e.stateNode).__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=a),a}function ie(e){return null!=(e=e.childContextTypes)}function it(e){r3(r9,e),r3(r6,e)}function ir(e){r3(r9,e),r3(r6,e)}function ii(e,t,n){if(r6.current!==r5)throw Error(f(168));r4(r6,t,e),r4(r9,n,e)}function ia(e,t,n){var r=e.stateNode;if(e=t.childContextTypes,"function"!=typeof r.getChildContext)return n;for(var i in r=r.getChildContext())if(!(i in e))throw Error(f(108,ei(t)||"Unknown",i));return c({},n,{},r)}function io(e){var t=e.stateNode;return t=t&&t.__reactInternalMemoizedMergedChildContext||r5,r8=r6.current,r4(r6,t,e),r4(r9,r9.current,e),!0}function is(e,t,n){var r=e.stateNode;if(!r)throw Error(f(169));n?(t=ia(e,t,r8),r.__reactInternalMemoizedMergedChildContext=t,r3(r9,e),r3(r6,e),r4(r6,t,e)):r3(r9,e),r4(r9,n,e)}var iu=l.unstable_runWithPriority,ic=l.unstable_scheduleCallback,il=l.unstable_cancelCallback,id=l.unstable_shouldYield,ih=l.unstable_requestPaint,ip=l.unstable_now,ib=l.unstable_getCurrentPriorityLevel,im=l.unstable_ImmediatePriority,ig=l.unstable_UserBlockingPriority,iv=l.unstable_NormalPriority,iy=l.unstable_LowPriority,iw=l.unstable_IdlePriority,i_={},iE=void 0!==ih?ih:function(){},iS=null,ik=null,ix=!1,iT=ip(),iM=1e4>iT?ip:function(){return ip()-iT};function iO(){switch(ib()){case im:return 99;case ig:return 98;case iv:return 97;case iy:return 96;case iw:return 95;default:throw Error(f(332))}}function iA(e){switch(e){case 99:return im;case 98:return ig;case 97:return iv;case 96:return iy;case 95:return iw;default:throw Error(f(332))}}function iL(e,t){return e=iA(e),iu(e,t)}function iC(e,t,n){return e=iA(e),ic(e,t,n)}function iI(e){return null===iS?(iS=[e],ik=ic(im,iN)):iS.push(e),i_}function iD(){if(null!==ik){var e=ik;ik=null,il(e)}iN()}function iN(){if(!ix&&null!==iS){ix=!0;var e=0;try{var t=iS;iL(99,function(){for(;e=t&&(oo=!0),e.firstContext=null)}function iK(e,t){if(iU!==e&&!1!==t&&0!==t){if(("number"!=typeof t||1073741823===t)&&(iU=e,t=1073741823),t={context:e,observedBits:t,next:null},null===iB){if(null===iY)throw Error(f(308));iB=t,iY.dependencies={expirationTime:0,firstContext:t,responders:null}}else iB=iB.next=t}return e._currentValue}var iV=!1;function iq(e){return{baseState:e,firstUpdate:null,lastUpdate:null,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function iZ(e){return{baseState:e.baseState,firstUpdate:e.firstUpdate,lastUpdate:e.lastUpdate,firstCapturedUpdate:null,lastCapturedUpdate:null,firstEffect:null,lastEffect:null,firstCapturedEffect:null,lastCapturedEffect:null}}function iX(e,t){return{expirationTime:e,suspenseConfig:t,tag:0,payload:null,callback:null,next:null,nextEffect:null}}function iJ(e,t){null===e.lastUpdate?e.firstUpdate=e.lastUpdate=t:(e.lastUpdate.next=t,e.lastUpdate=t)}function iQ(e,t){var n=e.alternate;if(null===n){var r=e.updateQueue,i=null;null===r&&(r=e.updateQueue=iq(e.memoizedState))}else r=e.updateQueue,i=n.updateQueue,null===r?null===i?(r=e.updateQueue=iq(e.memoizedState),i=n.updateQueue=iq(n.memoizedState)):r=e.updateQueue=iZ(i):null===i&&(i=n.updateQueue=iZ(r));null===i||r===i?iJ(r,t):null===r.lastUpdate||null===i.lastUpdate?(iJ(r,t),iJ(i,t)):(iJ(r,t),i.lastUpdate=t)}function i1(e,t){var n=e.updateQueue;null===(n=null===n?e.updateQueue=iq(e.memoizedState):i0(e,n)).lastCapturedUpdate?n.firstCapturedUpdate=n.lastCapturedUpdate=t:(n.lastCapturedUpdate.next=t,n.lastCapturedUpdate=t)}function i0(e,t){var n=e.alternate;return null!==n&&t===n.updateQueue&&(t=e.updateQueue=iZ(t)),t}function i2(e,t,n,r,i,a){switch(n.tag){case 1:return"function"==typeof(e=n.payload)?e.call(a,r,i):e;case 3:e.effectTag=-4097&e.effectTag|64;case 0:if(null==(i="function"==typeof(e=n.payload)?e.call(a,r,i):e))break;return c({},r,i);case 2:iV=!0}return r}function i3(e,t,n,r,i){iV=!1,t=i0(e,t);for(var a=t.baseState,o=null,s=0,u=t.firstUpdate,c=a;null!==u;){var l=u.expirationTime;lb?(m=f,f=null):m=f.sibling;var g=h(i,f,s[b],u);if(null===g){null===f&&(f=m);break}e&&f&&null===g.alternate&&t(i,f),o=a(g,o,b),null===l?c=g:l.sibling=g,l=g,f=m}if(b===s.length)return n(i,f),c;if(null===f){for(;bm?(g=b,b=null):g=b.sibling;var y=h(i,b,v.value,u);if(null===y){null===b&&(b=g);break}e&&b&&null===y.alternate&&t(i,b),o=a(y,o,m),null===l?c=y:l.sibling=y,l=y,b=g}if(v.done)return n(i,b),c;if(null===b){for(;!v.done;m++,v=s.next())null!==(v=d(i,v.value,u))&&(o=a(v,o,m),null===l?c=v:l.sibling=v,l=v);return c}for(b=r(i,b);!v.done;m++,v=s.next())null!==(v=p(b,i,m,v.value,u))&&(e&&null!==v.alternate&&b.delete(null===v.key?m:v.key),o=a(v,o,m),null===l?c=v:l.sibling=v,l=v);return e&&b.forEach(function(e){return t(i,e)}),c}return function(e,r,a,s){var u="object"==typeof a&&null!==a&&a.type===z&&null===a.key;u&&(a=a.props.children);var c="object"==typeof a&&null!==a;if(c)switch(a.$$typeof){case H:a:{for(c=a.key,u=r;null!==u;){if(u.key===c){if(7===u.tag?a.type===z:u.elementType===a.type){n(e,u.sibling),(r=i(u,a.type===z?a.props.children:a.props,s)).ref=aa(e,u,a),r.return=e,e=r;break a}n(e,u);break}t(e,u),u=u.sibling}a.type===z?((r=s1(a.props.children,e.mode,s,a.key)).return=e,e=r):((s=sQ(a.type,a.key,a.props,null,e.mode,s)).ref=aa(e,r,a),s.return=e,e=s)}return o(e);case $:a:{for(u=a.key;null!==r;){if(r.key===u){if(4===r.tag&&r.stateNode.containerInfo===a.containerInfo&&r.stateNode.implementation===a.implementation){n(e,r.sibling),(r=i(r,a.children||[],s)).return=e,e=r;break a}n(e,r);break}t(e,r),r=r.sibling}(r=s2(a,e.mode,s)).return=e,e=r}return o(e)}if("string"==typeof a||"number"==typeof a)return a=""+a,null!==r&&6===r.tag?(n(e,r.sibling),(r=i(r,a,s)).return=e,e=r):(n(e,r),(r=s0(a,e.mode,s)).return=e,e=r),o(e);if(ai(a))return b(e,r,a,s);if(en(a))return m(e,r,a,s);if(c&&ao(e,a),void 0===a&&!u)switch(e.tag){case 1:case 0:throw Error(f(152,(e=e.type).displayName||e.name||"Component"))}return n(e,r)}}var au=as(!0),ac=as(!1),al={},af={current:al},ad={current:al},ah={current:al};function ap(e){if(e===al)throw Error(f(174));return e}function ab(e,t){r4(ah,t,e),r4(ad,e,e),r4(af,al,e);var n=t.nodeType;switch(n){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:eJ(null,"");break;default:t=eJ(t=(n=8===n?t.parentNode:t).namespaceURI||null,n=n.tagName)}r3(af,e),r4(af,t,e)}function am(e){r3(af,e),r3(ad,e),r3(ah,e)}function ag(e){ap(ah.current);var t=ap(af.current),n=eJ(t,e.type);t!==n&&(r4(ad,e,e),r4(af,n,e))}function av(e){ad.current===e&&(r3(af,e),r3(ad,e))}var ay={current:0};function aw(e){for(var t=e;null!==t;){if(13===t.tag){var n=t.memoizedState;if(null!==n&&(null===(n=n.dehydrated)||n.data===nV||n.data===nq))return t}else if(19===t.tag&&void 0!==t.memoizedProps.revealOrder){if(0!=(64&t.effectTag))return t}else if(null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}function a_(e,t){return{responder:e,props:t}}var aE=Y.ReactCurrentDispatcher,aS=Y.ReactCurrentBatchConfig,ak=0,ax=null,aT=null,aM=null,aO=null,aA=null,aL=null,aC=0,aI=null,aD=0,aN=!1,aP=null,aR=0;function aj(){throw Error(f(321))}function aF(e,t){if(null===t)return!1;for(var n=0;naC&&sL(aC=l)):(sA(l,u.suspenseConfig),a=u.eagerReducer===e?u.eagerState:e(a,u.action)),o=u,u=u.next}while(null!==u&&u!==r)c||(s=o,i=a),r$(a,t.memoizedState)||(oo=!0),t.memoizedState=a,t.baseUpdate=s,t.baseState=i,n.lastRenderedState=a}return[t.memoizedState,n.dispatch]}function aG(e){var t=aU();return"function"==typeof e&&(e=e()),t.memoizedState=t.baseState=e,e=(e=t.queue={last:null,dispatch:null,lastRenderedReducer:a$,lastRenderedState:e}).dispatch=a2.bind(null,ax,e),[t.memoizedState,e]}function aW(e){return az(a$,e)}function aK(e,t,n,r){return e={tag:e,create:t,destroy:n,deps:r,next:null},null===aI?(aI={lastEffect:null}).lastEffect=e.next=e:null===(t=aI.lastEffect)?aI.lastEffect=e.next=e:(n=t.next,t.next=e,e.next=n,aI.lastEffect=e),e}function aV(e,t,n,r){var i=aU();aD|=e,i.memoizedState=aK(t,n,void 0,void 0===r?null:r)}function aq(e,t,n,r){var i=aH();r=void 0===r?null:r;var a=void 0;if(null!==aT){var o=aT.memoizedState;if(a=o.destroy,null!==r&&aF(r,o.deps)){aK(0,n,a,r);return}}aD|=e,i.memoizedState=aK(t,n,a,r)}function aZ(e,t){return aV(516,192,e,t)}function aX(e,t){return aq(516,192,e,t)}function aJ(e,t){return"function"==typeof t?(t(e=e()),function(){t(null)}):null!=t?(e=e(),t.current=e,function(){t.current=null}):void 0}function aQ(){}function a1(e,t){return aU().memoizedState=[e,void 0===t?null:t],e}function a0(e,t){var n=aH();t=void 0===t?null:t;var r=n.memoizedState;return null!==r&&null!==t&&aF(t,r[1])?r[0]:(n.memoizedState=[e,t],e)}function a2(e,t,n){if(!(25>aR))throw Error(f(301));var r=e.alternate;if(e===ax||null!==r&&r===ax){if(aN=!0,e={expirationTime:ak,suspenseConfig:null,action:n,eagerReducer:null,eagerState:null,next:null},null===aP&&(aP=new Map),void 0===(n=aP.get(t)))aP.set(t,e);else{for(t=n;null!==t.next;)t=t.next;t.next=e}}else{var i=sb(),a=i6.suspense;a={expirationTime:i=sm(i,e,a),suspenseConfig:a,action:n,eagerReducer:null,eagerState:null,next:null};var o=t.last;if(null===o)a.next=a;else{var s=o.next;null!==s&&(a.next=s),o.next=a}if(t.last=a,0===e.expirationTime&&(null===r||0===r.expirationTime)&&null!==(r=t.lastRenderedReducer))try{var u=t.lastRenderedState,c=r(u,n);if(a.eagerReducer=r,a.eagerState=c,r$(c,u))return}catch(l){}finally{}sg(e,i)}}var a3={readContext:iK,useCallback:aj,useContext:aj,useEffect:aj,useImperativeHandle:aj,useLayoutEffect:aj,useMemo:aj,useReducer:aj,useRef:aj,useState:aj,useDebugValue:aj,useResponder:aj,useDeferredValue:aj,useTransition:aj},a4={readContext:iK,useCallback:a1,useContext:iK,useEffect:aZ,useImperativeHandle:function(e,t,n){return n=null!=n?n.concat([e]):null,aV(4,36,aJ.bind(null,t,e),n)},useLayoutEffect:function(e,t){return aV(4,36,e,t)},useMemo:function(e,t){var n=aU();return t=void 0===t?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=aU();return t=void 0!==n?n(t):t,r.memoizedState=r.baseState=t,e=(e=r.queue={last:null,dispatch:null,lastRenderedReducer:e,lastRenderedState:t}).dispatch=a2.bind(null,ax,e),[r.memoizedState,e]},useRef:function(e){var t=aU();return e={current:e},t.memoizedState=e},useState:aG,useDebugValue:aQ,useResponder:a_,useDeferredValue:function(e,t){var n=aG(e),r=n[0],i=n[1];return aZ(function(){l.unstable_next(function(){var n=aS.suspense;aS.suspense=void 0===t?null:t;try{i(e)}finally{aS.suspense=n}})},[e,t]),r},useTransition:function(e){var t=aG(!1),n=t[0],r=t[1];return[a1(function(t){r(!0),l.unstable_next(function(){var n=aS.suspense;aS.suspense=void 0===e?null:e;try{r(!1),t()}finally{aS.suspense=n}})},[e,n]),n]}},a5={readContext:iK,useCallback:a0,useContext:iK,useEffect:aX,useImperativeHandle:function(e,t,n){return n=null!=n?n.concat([e]):null,aq(4,36,aJ.bind(null,t,e),n)},useLayoutEffect:function(e,t){return aq(4,36,e,t)},useMemo:function(e,t){var n=aH();t=void 0===t?null:t;var r=n.memoizedState;return null!==r&&null!==t&&aF(t,r[1])?r[0]:(e=e(),n.memoizedState=[e,t],e)},useReducer:az,useRef:function(){return aH().memoizedState},useState:aW,useDebugValue:aQ,useResponder:a_,useDeferredValue:function(e,t){var n=aW(e),r=n[0],i=n[1];return aX(function(){l.unstable_next(function(){var n=aS.suspense;aS.suspense=void 0===t?null:t;try{i(e)}finally{aS.suspense=n}})},[e,t]),r},useTransition:function(e){var t=aW(!1),n=t[0],r=t[1];return[a0(function(t){r(!0),l.unstable_next(function(){var n=aS.suspense;aS.suspense=void 0===e?null:e;try{r(!1),t()}finally{aS.suspense=n}})},[e,n]),n]}},a6=null,a9=null,a8=!1;function a7(e,t){var n=sq(5,null,null,0);n.elementType="DELETED",n.type="DELETED",n.stateNode=t,n.return=e,n.effectTag=8,null!==e.lastEffect?(e.lastEffect.nextEffect=n,e.lastEffect=n):e.firstEffect=e.lastEffect=n}function oe(e,t){switch(e.tag){case 5:var n=e.type;return null!==(t=1!==t.nodeType||n.toLowerCase()!==t.nodeName.toLowerCase()?null:t)&&(e.stateNode=t,!0);case 6:return null!==(t=""===e.pendingProps||3!==t.nodeType?null:t)&&(e.stateNode=t,!0);default:return!1}}function ot(e){if(a8){var t=a9;if(t){var n=t;if(!oe(e,t)){if(!(t=n2(n.nextSibling))||!oe(e,t)){e.effectTag=-1025&e.effectTag|2,a8=!1,a6=e;return}a7(a6,n)}a6=e,a9=n2(t.firstChild)}else e.effectTag=-1025&e.effectTag|2,a8=!1,a6=e}}function on(e){for(e=e.return;null!==e&&5!==e.tag&&3!==e.tag&&13!==e.tag;)e=e.return;a6=e}function or(e){if(e!==a6)return!1;if(!a8)return on(e),a8=!0,!1;var t=e.type;if(5!==e.tag||"head"!==t&&"body"!==t&&!nQ(t,e.memoizedProps))for(t=a9;t;)a7(e,t),t=n2(t.nextSibling);if(on(e),13===e.tag){if(!(e=null!==(e=e.memoizedState)?e.dehydrated:null))throw Error(f(317));a:{for(t=0,e=e.nextSibling;e;){if(8===e.nodeType){var n=e.data;if(n===nK){if(0===t){a9=n2(e.nextSibling);break a}t--}else n!==nW&&n!==nq&&n!==nV||t++}e=e.nextSibling}a9=null}}else a9=a6?n2(e.stateNode.nextSibling):null;return!0}function oi(){a9=a6=null,a8=!1}var oa=Y.ReactCurrentOwner,oo=!1;function os(e,t,n,r){t.child=null===e?ac(t,null,n,r):au(t,e.child,n,r)}function ou(e,t,n,r,i){n=n.render;var a=t.ref;return(iW(t,i),r=aY(e,t,n,r,a,i),null===e||oo)?(t.effectTag|=1,os(e,t,r,i),t.child):(t.updateQueue=e.updateQueue,t.effectTag&=-517,e.expirationTime<=i&&(e.expirationTime=0),o_(e,t,i))}function oc(e,t,n,r,i,a){if(null===e){var o=n.type;return"function"!=typeof o||sZ(o)||void 0!==o.defaultProps||null!==n.compare||void 0!==n.defaultProps?((e=sQ(n.type,null,r,null,t.mode,a)).ref=t.ref,e.return=t,t.child=e):(t.tag=15,t.type=o,ol(e,t,o,r,i,a))}return(o=e.child,it)&&sf.set(e,t))}}function sv(e,t){e.expirationTime(e=e.nextKnownPendingLevel)?t:e:t}function sw(e){if(0!==e.lastExpiredTime)e.callbackExpirationTime=1073741823,e.callbackPriority=99,e.callbackNode=iI(sE.bind(null,e));else{var t=sy(e),n=e.callbackNode;if(0===t)null!==n&&(e.callbackNode=null,e.callbackExpirationTime=0,e.callbackPriority=90);else{var r=sb();if(r=1073741823===t?99:1===t||2===t?95:0>=(r=10*(1073741821-t)-10*(1073741821-r))?99:250>=r?98:5250>=r?97:95,null!==n){var i=e.callbackPriority;if(e.callbackExpirationTime===t&&i>=r)return;n!==i_&&il(n)}e.callbackExpirationTime=t,e.callbackPriority=r,t=1073741823===t?iI(sE.bind(null,e)):iC(r,s_.bind(null,e),{timeout:10*(1073741821-t)-iM()}),e.callbackNode=t}}}function s_(e,t){if(sp=0,t)return t=sb(),s9(e,t),sw(e),null;var n=sy(e);if(0!==n){if(t=e.callbackNode,(o0&(oK|oV))!==oG)throw Error(f(327));if(sY(),e===o2&&n===o4||sT(e,n),null!==o3){var r=o0;o0|=oK;for(var i=sO(e);;)try{sI();break}catch(a){sM(e,a)}if(iH(),o0=r,o$.current=i,o5===oZ)throw t=o6,sT(e,n),s5(e,n),sw(e),t;if(null===o3)switch(i=e.finishedWork=e.current.alternate,e.finishedExpirationTime=n,o2=null,r=o5){case oq:case oZ:throw Error(f(345));case oX:s9(e,2=n){e.lastPingedTime=n,sT(e,n);break}}if(0!==(o=sy(e))&&o!==n)break;if(0!==r&&r!==n){e.lastPingedTime=r;break}e.timeoutHandle=n1(sR.bind(null,e),i);break}sR(e);break;case oQ:if(s5(e,n),n===(r=e.lastSuspendedTime)&&(e.nextKnownPendingLevel=sP(i)),st&&(0===(i=e.lastPingedTime)||i>=n)){e.lastPingedTime=n,sT(e,n);break}if(0!==(i=sy(e))&&i!==n)break;if(0!==r&&r!==n){e.lastPingedTime=r;break}if(1073741823!==o8?r=10*(1073741821-o8)-iM():1073741823===o9?r=0:(r=10*(1073741821-o9)-5e3,n=10*(1073741821-n)-(i=iM()),0>(r=i-r)&&(r=0),n<(r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*oH(r/1960))-r)&&(r=n)),10=(r=0|s.busyMinDurationMs)?r=0:(i=0|s.busyDelayMs,r=(o=iM()-(10*(1073741821-o)-(0|s.timeoutMs||5e3)))<=i?0:i+r-o),10 component higher in the tree to provide a loading indicator or placeholder to display."+ea(i))}o5!==o1&&(o5=oX),a=ox(a,i),c=r;do{switch(c.tag){case 3:s=a,c.effectTag|=4096,c.expirationTime=t;var g=oB(c,s,t);i1(c,g);break a;case 1:s=a;var v=c.type,y=c.stateNode;if(0==(64&c.effectTag)&&("function"==typeof v.getDerivedStateFromError||null!==y&&"function"==typeof y.componentDidCatch&&(null===ss||!ss.has(y)))){c.effectTag|=4096,c.expirationTime=t;var w=oU(c,s,t);i1(c,w);break a}}c=c.return}while(null!==c)}o3=sN(o3)}catch(_){t=_;continue}break}}function sO(){var e=o$.current;return o$.current=a3,null===e?a3:e}function sA(e,t){ese&&(se=e)}function sC(){for(;null!==o3;)o3=sD(o3)}function sI(){for(;null!==o3&&!id();)o3=sD(o3)}function sD(e){var t=s(e.alternate,e,o4);return e.memoizedProps=e.pendingProps,null===t&&(t=sN(e)),oz.current=null,t}function sN(e){o3=e;do{var t=o3.alternate;if(e=o3.return,0==(2048&o3.effectTag)){a:{var n=t;t=o3;var s=o4,u=t.pendingProps;switch(t.tag){case 2:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:case 20:case 21:break;case 1:case 17:ie(t.type)&&it(t);break;case 3:am(t),ir(t),(u=t.stateNode).pendingContext&&(u.context=u.pendingContext,u.pendingContext=null),(null===n||null===n.child)&&or(t)&&oE(t),i(t);break;case 5:av(t),s=ap(ah.current);var l=t.type;if(null!==n&&null!=t.stateNode)a(n,t,l,u,s),n.ref!==t.ref&&(t.effectTag|=128);else if(u){var d=ap(af.current);if(or(t)){var h=(u=t).stateNode;n=u.type;var p=u.memoizedProps,b=s;switch(h[n5]=u,h[n6]=p,l=void 0,s=h,n){case"iframe":case"object":case"embed":nw("load",s);break;case"video":case"audio":for(h=0;h",h=p.removeChild(p.firstChild)):"string"==typeof p.is?h=h.createElement(b,{is:p.is}):(h=h.createElement(b),"select"===b&&(b=h,p.multiple?b.multiple=!0:p.size&&(b.size=p.size))):h=h.createElementNS(d,b),(p=h)[n5]=n,p[n6]=u,r(p,t,!1,!1),t.stateNode=p,b=l;var m=s,g=nj(b,n=u);switch(b){case"iframe":case"object":case"embed":nw("load",p),s=n;break;case"video":case"audio":for(s=0;su.tailExpiration&&1l&&(l=n),p>l&&(l=p),s=s.sibling;u.childExpirationTime=l}if(null!==t)return t;null!==e&&0==(2048&e.effectTag)&&(null===e.firstEffect&&(e.firstEffect=o3.firstEffect),null!==o3.lastEffect&&(null!==e.lastEffect&&(e.lastEffect.nextEffect=o3.firstEffect),e.lastEffect=o3.lastEffect),1(e=e.childExpirationTime)?t:e}function sR(e){var t=iO();return iL(99,sj.bind(null,e,t)),null}function sj(e,t){do sY();while(null!==sc)if((o0&(oK|oV))!==oG)throw Error(f(327));var n=e.finishedWork,r=e.finishedExpirationTime;if(null===n)return null;if(e.finishedWork=null,e.finishedExpirationTime=0,n===e.current)throw Error(f(177));e.callbackNode=null,e.callbackExpirationTime=0,e.callbackPriority=90,e.nextKnownPendingLevel=0;var i=sP(n);if(e.firstPendingTime=i,r<=e.lastSuspendedTime?e.firstSuspendedTime=e.lastSuspendedTime=e.nextKnownPendingLevel=0:r<=e.firstSuspendedTime&&(e.firstSuspendedTime=r-1),r<=e.lastPingedTime&&(e.lastPingedTime=0),r<=e.lastExpiredTime&&(e.lastExpiredTime=0),e===o2&&(o3=o2=null,o4=0),1s&&(l=s,s=o,o=l),l=nH(E,o),d=nH(E,s),l&&d&&(1!==k.rangeCount||k.anchorNode!==l.node||k.anchorOffset!==l.offset||k.focusNode!==d.node||k.focusOffset!==d.offset)&&((S=S.createRange()).setStart(l.node,l.offset),k.removeAllRanges(),o>s?(k.addRange(S),k.extend(d.node,d.offset)):(S.setEnd(d.node,d.offset),k.addRange(S))))),S=[],k=E;k=k.parentNode;)1===k.nodeType&&S.push({element:k,left:k.scrollLeft,top:k.scrollTop});for("function"==typeof E.focus&&E.focus(),E=0;E=n)return og(e,t,n);return r4(ay,1&ay.current,t),null!==(t=o_(e,t,n))?t.sibling:null}r4(ay,1&ay.current,t);break;case 19:if(r=t.childExpirationTime>=n,0!=(64&e.effectTag)){if(r)return ow(e,t,n);t.effectTag|=64}if(null!==(i=t.memoizedState)&&(i.rendering=null,i.tail=null),r4(ay,ay.current,t),!r)return null}return o_(e,t,n)}oo=!1}}else oo=!1;switch(t.expirationTime=0,t.tag){case 2:if(r=t.type,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),e=t.pendingProps,i=r7(t,r6.current),iW(t,n),i=aY(null,t,r,e,i,n),t.effectTag|=1,"object"==typeof i&&null!==i&&"function"==typeof i.render&&void 0===i.$$typeof){if(t.tag=1,aB(),ie(r)){var a=!0;io(t)}else a=!1;t.memoizedState=null!==i.state&&void 0!==i.state?i.state:null;var o=r.getDerivedStateFromProps;"function"==typeof o&&i8(t,r,o,e),i.updater=i7,t.stateNode=i,i._reactInternalFiber=t,ar(t,r,e,n),t=op(null,t,r,!0,a,n)}else t.tag=0,os(null,t,i,n),t=t.child;return t;case 16:if(i=t.elementType,null!==e&&(e.alternate=null,t.alternate=null,t.effectTag|=2),e=t.pendingProps,er(i),1!==i._status)throw i._result;switch(i=i._result,t.type=i,a=t.tag=sX(i),e=ij(i,e),a){case 0:t=od(null,t,i,e,n);break;case 1:t=oh(null,t,i,e,n);break;case 11:t=ou(null,t,i,e,n);break;case 14:t=oc(null,t,i,ij(i.type,e),r,n);break;default:throw Error(f(306,i,""))}return t;case 0:return r=t.type,i=t.pendingProps,i=t.elementType===r?i:ij(r,i),od(e,t,r,i,n);case 1:return r=t.type,i=t.pendingProps,i=t.elementType===r?i:ij(r,i),oh(e,t,r,i,n);case 3:if(ob(t),null===(r=t.updateQueue))throw Error(f(282));if(i=null!==(i=t.memoizedState)?i.element:null,i3(t,r,t.pendingProps,null,n),(r=t.memoizedState.element)===i)oi(),t=o_(e,t,n);else{if((i=t.stateNode.hydrate)&&(a9=n2(t.stateNode.containerInfo.firstChild),a6=t,i=a8=!0),i)for(n=ac(t,null,r,n),t.child=n;n;)n.effectTag=-3&n.effectTag|1024,n=n.sibling;else os(e,t,r,n),oi();t=t.child}return t;case 5:return ag(t),null===e&&ot(t),r=t.type,i=t.pendingProps,a=null!==e?e.memoizedProps:null,o=i.children,nQ(r,i)?o=null:null!==a&&nQ(r,a)&&(t.effectTag|=16),of(e,t),4&t.mode&&1!==n&&i.hidden?(t.expirationTime=t.childExpirationTime=1,t=null):(os(e,t,o,n),t=t.child),t;case 6:return null===e&&ot(t),null;case 13:return og(e,t,n);case 4:return ab(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=au(t,null,r,n):os(e,t,r,n),t.child;case 11:return r=t.type,i=t.pendingProps,i=t.elementType===r?i:ij(r,i),ou(e,t,r,i,n);case 7:return os(e,t,t.pendingProps,n),t.child;case 8:case 12:return os(e,t,t.pendingProps.children,n),t.child;case 10:a:{if(r=t.type._context,i=t.pendingProps,o=t.memoizedProps,i$(t,a=i.value),null!==o){var s=o.value;if(0==(a=r$(s,a)?0:("function"==typeof r._calculateChangedBits?r._calculateChangedBits(s,a):1073741823)|0)){if(o.children===i.children&&!r9.current){t=o_(e,t,n);break a}}else for(null!==(s=t.child)&&(s.return=t);null!==s;){var u=s.dependencies;if(null!==u){o=s.child;for(var c=u.firstContext;null!==c;){if(c.context===r&&0!=(c.observedBits&a)){1===s.tag&&((c=iX(n,null)).tag=2,iQ(s,c)),s.expirationTime=t&&e<=t}function s5(e,t){var n=e.firstSuspendedTime,r=e.lastSuspendedTime;nt||0===n)&&(e.lastSuspendedTime=t),t<=e.lastPingedTime&&(e.lastPingedTime=0),t<=e.lastExpiredTime&&(e.lastExpiredTime=0)}function s6(e,t){t>e.firstPendingTime&&(e.firstPendingTime=t);var n=e.firstSuspendedTime;0!==n&&(t>=n?e.firstSuspendedTime=e.lastSuspendedTime=e.nextKnownPendingLevel=0:t>=e.lastSuspendedTime&&(e.lastSuspendedTime=t+1),t>e.nextKnownPendingLevel&&(e.nextKnownPendingLevel=t))}function s9(e,t){var n=e.lastExpiredTime;(0===n||n>t)&&(e.lastExpiredTime=t)}function s8(e,t,n,r){var i=t.current,a=sb(),o=i6.suspense;a=sm(a,i,o);a:if(n){n=n._reactInternalFiber;b:{if(tr(n)!==n||1!==n.tag)throw Error(f(170));var s=n;do{switch(s.tag){case 3:s=s.stateNode.context;break b;case 1:if(ie(s.type)){s=s.stateNode.__reactInternalMemoizedMergedChildContext;break b}}s=s.return}while(null!==s)throw Error(f(171))}if(1===n.tag){var u=n.type;if(ie(u)){n=ia(n,u,s);break a}}n=s}else n=r5;return null===t.context?t.context=n:t.pendingContext=n,(t=iX(a,o)).payload={element:e},null!==(r=void 0===r?null:r)&&(t.callback=r),iQ(i,t),sg(i,a),a}function s7(e){return(e=e.current).child?(e.child.tag,e.child.stateNode):null}function ue(e,t){null!==(e=e.memoizedState)&&null!==e.dehydrated&&e.retryTime1&&void 0!==arguments[1]?arguments[1]:this.props,n=t.target;if(n){var r=n;"string"==typeof n&&(r=window[n]),_(t,e.bind(null,r))}}},{key:"render",value:function(){return this.props.children||null}}]),t}(h.PureComponent);S.propTypes={},t.withOptions=E,t.default=S},69590(e){"use strict";var t=Array.isArray,n=Object.keys,r=Object.prototype.hasOwnProperty,i="undefined"!=typeof Element;function a(e,o){if(e===o)return!0;if(e&&o&&"object"==typeof e&&"object"==typeof o){var s,u,c,l=t(e),f=t(o);if(l&&f){if((u=e.length)!=o.length)return!1;for(s=u;0!=s--;)if(!a(e[s],o[s]))return!1;return!0}if(l!=f)return!1;var d=e instanceof Date,h=o instanceof Date;if(d!=h)return!1;if(d&&h)return e.getTime()==o.getTime();var p=e instanceof RegExp,b=o instanceof RegExp;if(p!=b)return!1;if(p&&b)return e.toString()==o.toString();var m=n(e);if((u=m.length)!==n(o).length)return!1;for(s=u;0!=s--;)if(!r.call(o,m[s]))return!1;if(i&&e instanceof Element&&o instanceof Element)return e===o;for(s=u;0!=s--;)if(("_owner"!==(c=m[s])||!e.$$typeof)&&!a(e[c],o[c]))return!1;return!0}return e!=e&&o!=o}e.exports=function(e,t){try{return a(e,t)}catch(n){if(n.message&&n.message.match(/stack|recursion/i)||-2146828260===n.number)return console.warn("Warning: react-fast-compare does not handle circular references.",n.name,n.message),!1;throw n}}},57209(e,t,n){"use strict";function r(e){return e&&"object"==typeof e&&"default"in e?e.default:e}i={value:!0};var i,a=r(n(67294));function o(e){return o.warnAboutHMRDisabled&&(o.warnAboutHMRDisabled=!0,console.error("React-Hot-Loader: misconfiguration detected, using production version in non-production environment."),console.error("React-Hot-Loader: Hot Module Replacement is not enabled.")),a.Children.only(e.children)}o.warnAboutHMRDisabled=!1;var s=function e(){return e.shouldWrapWithAppContainer?function(e){return function(t){return a.createElement(o,null,a.createElement(e,t))}}:function(e){return e}};s.shouldWrapWithAppContainer=!1;var u=function(e,t){return e===t},c=function(){},l=function(e){return e},f=function(){};t.zj=o,t.wU=s,i=u,i=c,i=l,i=f},69921(e,t){"use strict";/** @license React v16.13.1 + * react-is.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ var n="function"==typeof Symbol&&Symbol.for,r=n?Symbol.for("react.element"):60103,i=n?Symbol.for("react.portal"):60106,a=n?Symbol.for("react.fragment"):60107,o=n?Symbol.for("react.strict_mode"):60108,s=n?Symbol.for("react.profiler"):60114,u=n?Symbol.for("react.provider"):60109,c=n?Symbol.for("react.context"):60110,l=n?Symbol.for("react.async_mode"):60111,f=n?Symbol.for("react.concurrent_mode"):60111,d=n?Symbol.for("react.forward_ref"):60112,h=n?Symbol.for("react.suspense"):60113,p=n?Symbol.for("react.suspense_list"):60120,b=n?Symbol.for("react.memo"):60115,m=n?Symbol.for("react.lazy"):60116,g=n?Symbol.for("react.block"):60121,v=n?Symbol.for("react.fundamental"):60117,y=n?Symbol.for("react.responder"):60118,w=n?Symbol.for("react.scope"):60119;function _(e){if("object"==typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case l:case f:case a:case s:case o:case h:return e;default:switch(e=e&&e.$$typeof){case c:case d:case m:case b:case u:return e;default:return t}}case i:return t}}}function E(e){return _(e)===f}t.AsyncMode=l,t.ConcurrentMode=f,t.ContextConsumer=c,t.ContextProvider=u,t.Element=r,t.ForwardRef=d,t.Fragment=a,t.Lazy=m,t.Memo=b,t.Portal=i,t.Profiler=s,t.StrictMode=o,t.Suspense=h,t.isAsyncMode=function(e){return E(e)||_(e)===l},t.isConcurrentMode=E,t.isContextConsumer=function(e){return _(e)===c},t.isContextProvider=function(e){return _(e)===u},t.isElement=function(e){return"object"==typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return _(e)===d},t.isFragment=function(e){return _(e)===a},t.isLazy=function(e){return _(e)===m},t.isMemo=function(e){return _(e)===b},t.isPortal=function(e){return _(e)===i},t.isProfiler=function(e){return _(e)===s},t.isStrictMode=function(e){return _(e)===o},t.isSuspense=function(e){return _(e)===h},t.isValidElementType=function(e){return"string"==typeof e||"function"==typeof e||e===a||e===f||e===s||e===o||e===h||e===p||"object"==typeof e&&null!==e&&(e.$$typeof===m||e.$$typeof===b||e.$$typeof===u||e.$$typeof===c||e.$$typeof===d||e.$$typeof===v||e.$$typeof===y||e.$$typeof===w||e.$$typeof===g)},t.typeOf=_},59864(e,t,n){"use strict";e.exports=n(69921)},46871(e,t,n){"use strict";function r(){var e=this.constructor.getDerivedStateFromProps(this.props,this.state);null!=e&&this.setState(e)}function i(e){function t(t){var n=this.constructor.getDerivedStateFromProps(e,t);return null!=n?n:null}this.setState(t.bind(this))}function a(e,t){try{var n=this.props,r=this.state;this.props=e,this.state=t,this.__reactInternalSnapshotFlag=!0,this.__reactInternalSnapshot=this.getSnapshotBeforeUpdate(n,r)}finally{this.props=n,this.state=r}}function o(e){var t,n=e.prototype;if(!n||!n.isReactComponent)throw Error("Can only polyfill class components");if("function"!=typeof e.getDerivedStateFromProps&&"function"!=typeof n.getSnapshotBeforeUpdate)return e;var o=null,s=null,u=null;if("function"==typeof n.componentWillMount?o="componentWillMount":"function"==typeof n.UNSAFE_componentWillMount&&(o="UNSAFE_componentWillMount"),"function"==typeof n.componentWillReceiveProps?s="componentWillReceiveProps":"function"==typeof n.UNSAFE_componentWillReceiveProps&&(s="UNSAFE_componentWillReceiveProps"),"function"==typeof n.componentWillUpdate?u="componentWillUpdate":"function"==typeof n.UNSAFE_componentWillUpdate&&(u="UNSAFE_componentWillUpdate"),null!==o||null!==s||null!==u){throw Error("Unsafe legacy lifecycles will not be called for components using new component APIs.\n\n"+(e.displayName||e.name)+" uses "+("function"==typeof e.getDerivedStateFromProps?"getDerivedStateFromProps()":"getSnapshotBeforeUpdate()")+" but also contains the following legacy lifecycles:"+(null!==o?"\n "+o:"")+(null!==s?"\n "+s:"")+(null!==u?"\n "+u:"")+"\n\nThe above lifecycles should be removed. Learn more about this warning here:\nhttps://fb.me/react-async-component-lifecycle-hooks")}if("function"==typeof e.getDerivedStateFromProps&&(n.componentWillMount=r,n.componentWillReceiveProps=i),"function"==typeof n.getSnapshotBeforeUpdate){if("function"!=typeof n.componentDidUpdate)throw Error("Cannot polyfill getSnapshotBeforeUpdate() for components that do not define componentDidUpdate() on the prototype");n.componentWillUpdate=a;var c=n.componentDidUpdate;n.componentDidUpdate=function(e,t,n){var r=this.__reactInternalSnapshotFlag?this.__reactInternalSnapshot:n;c.call(this,e,t,r)}}return e}n.r(t),n.d(t,{polyfill:()=>o}),r.__suppressDeprecationWarning=!0,i.__suppressDeprecationWarning=!0,a.__suppressDeprecationWarning=!0},55977(e,t,n){"use strict";n.d(t,{zt:()=>h,$j:()=>J,wU:()=>A,I0:()=>er,v9:()=>es});var r=n(67294);n(45697);var i=r.createContext(null);function a(e){e()}var o=a,s=function(e){return o=e},u=function(){return o},c={notify:function(){}};function l(){var e=u(),t=null,n=null;return{clear:function(){t=null,n=null},notify:function(){e(function(){for(var e=t;e;)e.callback(),e=e.next})},get:function(){for(var e=[],n=t;n;)e.push(n),n=n.next;return e},subscribe:function(e){var r=!0,i=n={callback:e,next:null,prev:n};return i.prev?i.prev.next=i:t=i,function(){r&&null!==t&&(r=!1,i.next?i.next.prev=i.prev:n=i.prev,i.prev?i.prev.next=i.next:t=i.next)}}}}var f=function(){function e(e,t){this.store=e,this.parentSub=t,this.unsubscribe=null,this.listeners=c,this.handleChangeWrapper=this.handleChangeWrapper.bind(this)}var t=e.prototype;return t.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},t.notifyNestedSubs=function(){this.listeners.notify()},t.handleChangeWrapper=function(){this.onStateChange&&this.onStateChange()},t.isSubscribed=function(){return Boolean(this.unsubscribe)},t.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.handleChangeWrapper):this.store.subscribe(this.handleChangeWrapper),this.listeners=l())},t.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=c)},e}();function d(e){var t=e.store,n=e.context,a=e.children,o=(0,r.useMemo)(function(){var e=new f(t);return e.onStateChange=e.notifyNestedSubs,{store:t,subscription:e}},[t]),s=(0,r.useMemo)(function(){return t.getState()},[t]);(0,r.useEffect)(function(){var e=o.subscription;return e.trySubscribe(),s!==t.getState()&&e.notifyNestedSubs(),function(){e.tryUnsubscribe(),e.onStateChange=null}},[o,s]);var u=n||i;return r.createElement(u.Provider,{value:o},a)}let h=d;var p=n(87462);function b(e,t){if(null==e)return{};var n,r,i={},a=Object.keys(e);for(r=0;r=0||(i[n]=e[n]);return i}var m=n(8679),g=n.n(m),v=n(59864),y="undefined"!=typeof window&&void 0!==window.document&&void 0!==window.document.createElement?r.useLayoutEffect:r.useEffect,w=[],_=[null,null];function E(e,t){var n=e[1];return[t.payload,n+1]}function S(e,t,n){y(function(){return e.apply(void 0,t)},n)}function k(e,t,n,r,i,a,o){e.current=r,t.current=i,n.current=!1,a.current&&(a.current=null,o())}function x(e,t,n,r,i,a,o,s,u,c){if(e){var l,f=!1,d=null,h=function(){if(!f){var e,n,l=t.getState();try{e=r(l,i.current)}catch(h){n=h,d=h}n||(d=null),e===a.current?o.current||u():(a.current=e,s.current=e,o.current=!0,c({type:"STORE_UPDATED",payload:{error:n}}))}};return n.onStateChange=h,n.trySubscribe(),h(),function(){if(f=!0,n.tryUnsubscribe(),n.onStateChange=null,d)throw d}}}var T=function(){return[null,0]};function M(e,t){void 0===t&&(t={});var n=t,a=n.getDisplayName,o=void 0===a?function(e){return"ConnectAdvanced("+e+")"}:a,s=n.methodName,u=void 0===s?"connectAdvanced":s,c=n.renderCountProp,l=void 0===c?void 0:c,d=n.shouldHandleStateChanges,h=void 0===d||d,m=n.storeKey,y=void 0===m?"store":m,M=(n.withRef,n.forwardRef),O=void 0!==M&&M,A=n.context,L=void 0===A?i:A,C=b(n,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef","forwardRef","context"]),I=L;return function(t){var n=t.displayName||t.name||"Component",i=o(n),a=(0,p.Z)({},C,{getDisplayName:o,methodName:u,renderCountProp:l,shouldHandleStateChanges:h,storeKey:y,displayName:i,wrappedComponentName:n,WrappedComponent:t}),s=C.pure;function c(t){return e(t.dispatch,a)}var d=s?r.useMemo:function(e){return e()};function m(e){var n=(0,r.useMemo)(function(){var t=e.reactReduxForwardedRef,n=b(e,["reactReduxForwardedRef"]);return[e.context,t,n]},[e]),i=n[0],a=n[1],o=n[2],s=(0,r.useMemo)(function(){return i&&i.Consumer&&(0,v.isContextConsumer)(r.createElement(i.Consumer,null))?i:I},[i,I]),u=(0,r.useContext)(s),l=Boolean(e.store)&&Boolean(e.store.getState)&&Boolean(e.store.dispatch);Boolean(u)&&u.store;var m=l?e.store:u.store,g=(0,r.useMemo)(function(){return c(m)},[m]),y=(0,r.useMemo)(function(){if(!h)return _;var e=new f(m,l?null:u.subscription),t=e.notifyNestedSubs.bind(e);return[e,t]},[m,l,u]),M=y[0],O=y[1],A=(0,r.useMemo)(function(){return l?u:(0,p.Z)({},u,{subscription:M})},[l,u,M]),L=(0,r.useReducer)(E,w,T),C=L[0][0],D=L[1];if(C&&C.error)throw C.error;var N=(0,r.useRef)(),P=(0,r.useRef)(o),R=(0,r.useRef)(),j=(0,r.useRef)(!1),F=d(function(){return R.current&&o===P.current?R.current:g(m.getState(),o)},[m,C,o]);S(k,[P,N,j,o,F,R,O]),S(x,[h,m,M,g,P,N,j,R,O,D],[m,M,g]);var Y=(0,r.useMemo)(function(){return r.createElement(t,(0,p.Z)({},F,{ref:a}))},[a,t,F]);return(0,r.useMemo)(function(){return h?r.createElement(s.Provider,{value:A},Y):Y},[s,Y,A])}var M=s?r.memo(m):m;if(M.WrappedComponent=t,M.displayName=i,O){var A=r.forwardRef(function(e,t){return r.createElement(M,(0,p.Z)({},e,{reactReduxForwardedRef:t}))});return A.displayName=i,A.WrappedComponent=t,g()(A,t)}return g()(M,t)}}function O(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function A(e,t){if(O(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var n=Object.keys(e),r=Object.keys(t);if(n.length!==r.length)return!1;for(var i=0;i=0;r--){var i=t[r](e);if(i)return i}return function(t,r){throw Error("Invalid value of type "+typeof e+" for "+n+" argument when connecting component "+r.wrappedComponentName+".")}}function Z(e,t){return e===t}function X(e){var t=void 0===e?{}:e,n=t.connectHOC,r=void 0===n?M:n,i=t.mapStateToPropsFactories,a=void 0===i?B:i,o=t.mapDispatchToPropsFactories,s=void 0===o?j:o,u=t.mergePropsFactories,c=void 0===u?G:u,l=t.selectorFactory,f=void 0===l?V:l;return function(e,t,n,i){void 0===i&&(i={});var o=i,u=o.pure,l=void 0===u||u,d=o.areStatesEqual,h=void 0===d?Z:d,m=o.areOwnPropsEqual,g=void 0===m?A:m,v=o.areStatePropsEqual,y=void 0===v?A:v,w=o.areMergedPropsEqual,_=void 0===w?A:w,E=b(o,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),S=q(e,a,"mapStateToProps"),k=q(t,s,"mapDispatchToProps"),x=q(n,c,"mergeProps");return r(f,(0,p.Z)({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:S,initMapDispatchToProps:k,initMergeProps:x,pure:l,areStatesEqual:h,areOwnPropsEqual:g,areStatePropsEqual:y,areMergedPropsEqual:_},E))}}let J=X();function Q(){var e;return(0,r.useContext)(i)}function ee(e){void 0===e&&(e=i);var t=e===i?Q:function(){return(0,r.useContext)(e)};return function(){return t().store}}var et=ee();function en(e){void 0===e&&(e=i);var t=e===i?et:ee(e);return function(){return t().dispatch}}var er=en(),ei=function(e,t){return e===t};function ea(e,t,n,i){var a,o=(0,r.useReducer)(function(e){return e+1},0)[1],s=(0,r.useMemo)(function(){return new f(n,i)},[n,i]),u=(0,r.useRef)(),c=(0,r.useRef)(),l=(0,r.useRef)(),d=(0,r.useRef)(),h=n.getState();try{a=e!==c.current||h!==l.current||u.current?e(h):d.current}catch(p){throw u.current&&(p.message+="\nThe error may be correlated with this previous error:\n"+u.current.stack+"\n\n"),p}return y(function(){c.current=e,l.current=h,d.current=a,u.current=void 0}),y(function(){function e(){try{var e=c.current(n.getState());if(t(e,d.current))return;d.current=e}catch(r){u.current=r}o()}return s.onStateChange=e,s.trySubscribe(),e(),function(){return s.tryUnsubscribe()}},[n,s]),a}function eo(e){void 0===e&&(e=i);var t=e===i?Q:function(){return(0,r.useContext)(e)};return function(e,n){void 0===n&&(n=ei);var i,a=t(),o=ea(e,n,a.store,a.subscription);return(0,r.useDebugValue)(o),o}}var es=eo();s(n(73935).unstable_batchedUpdates)},76(e,t,n){"use strict";n.d(t,{VK:()=>f,rU:()=>v});var r=n(47886);function i(e,t){return(i=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e})(e,t)}function a(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,i(e,t)}var o=n(67294),s=n(90071);function u(){return(u=Object.assign||function(e){for(var t=1;t=0||(i[n]=e[n]);return i}n(45697);var l=n(2177),f=function(e){function t(){for(var t,n=arguments.length,r=Array(n),i=0;iN,AW:()=>U,F0:()=>M,rs:()=>$,s6:()=>T,LX:()=>Y,k6:()=>G,TH:()=>W,UO:()=>K,$B:()=>V});var a=n(67294),o=n(45697),s=n.n(o),u=n(90071);function c(e,t){return(c=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e})(e,t)}function l(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,c(e,t)}var f=1073741823,d="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:void 0!==n.g?n.g:{};function h(){var e="__global_unique_id__";return d[e]=(d[e]||0)+1}function p(e,t){return e===t?0!==e||1/e==1/t:e!=e&&t!=t}function b(e){var t=[];return{on:function(e){t.push(e)},off:function(e){t=t.filter(function(t){return t!==e})},get:function(){return e},set:function(n,r){e=n,t.forEach(function(t){return t(e,r)})}}}function m(e){return Array.isArray(e)?e[0]:e}function g(e,t){var n,r,i="__create-react-context-"+h()+"__",o=function(e){function n(){var t;return t=e.apply(this,arguments)||this,t.emitter=b(t.props.value),t}l(n,e);var r=n.prototype;return r.getChildContext=function(){var e;return(e={})[i]=this.emitter,e},r.componentWillReceiveProps=function(e){if(this.props.value!==e.value){var n,r=this.props.value,i=e.value;p(r,i)?n=0:(n="function"==typeof t?t(r,i):f,0!=(n|=0)&&this.emitter.set(e.value,n))}},r.render=function(){return this.props.children},n}(a.Component);o.childContextTypes=((n={})[i]=s().object.isRequired,n);var u=function(t){function n(){var e;return e=t.apply(this,arguments)||this,e.state={value:e.getValue()},e.onUpdate=function(t,n){((0|e.observedBits)&n)!=0&&e.setState({value:e.getValue()})},e}l(n,t);var r=n.prototype;return r.componentWillReceiveProps=function(e){var t=e.observedBits;this.observedBits=null==t?f:t},r.componentDidMount=function(){this.context[i]&&this.context[i].on(this.onUpdate);var e=this.props.observedBits;this.observedBits=null==e?f:e},r.componentWillUnmount=function(){this.context[i]&&this.context[i].off(this.onUpdate)},r.getValue=function(){return this.context[i]?this.context[i].get():e},r.render=function(){return m(this.props.children)(this.state.value)},n}(a.Component);return u.contextTypes=((r={})[i]=s().object,r),{Provider:o,Consumer:u}}var v=a.createContext||g;let y=v;var w=n(2177);function _(){return(_=Object.assign||function(e){for(var t=1;t=0||(i[n]=e[n]);return i}function l(e,t){e.prototype=Object.create(t.prototype),e.prototype.constructor=e,e.__proto__=t}n(54726);var f="unmounted";t.UNMOUNTED=f;var d="exited";t.EXITED=d;var h="entering";t.ENTERING=h;var p="entered";t.ENTERED=p;var b="exiting";t.EXITING=b;var m=function(e){function t(t,n){r=e.call(this,t,n)||this;var r,i,a=n.transitionGroup,o=a&&!a.isMounting?t.enter:t.appear;return r.appearStatus=null,t.in?o?(i=d,r.appearStatus=h):i=p:i=t.unmountOnExit||t.mountOnEnter?f:d,r.state={status:i},r.nextCallback=null,r}l(t,e);var n=t.prototype;return n.getChildContext=function(){return{transitionGroup:null}},t.getDerivedStateFromProps=function(e,t){return e.in&&t.status===f?{status:d}:null},n.componentDidMount=function(){this.updateStatus(!0,this.appearStatus)},n.componentDidUpdate=function(e){var t=null;if(e!==this.props){var n=this.state.status;this.props.in?n!==h&&n!==p&&(t=h):(n===h||n===p)&&(t=b)}this.updateStatus(!1,t)},n.componentWillUnmount=function(){this.cancelNextCallback()},n.getTimeouts=function(){var e,t,n,r=this.props.timeout;return e=t=n=r,null!=r&&"number"!=typeof r&&(e=r.exit,t=r.enter,n=void 0!==r.appear?r.appear:t),{exit:e,enter:t,appear:n}},n.updateStatus=function(e,t){if(void 0===e&&(e=!1),null!==t){this.cancelNextCallback();var n=a.default.findDOMNode(this);t===h?this.performEnter(n,e):this.performExit(n)}else this.props.unmountOnExit&&this.state.status===d&&this.setState({status:f})},n.performEnter=function(e,t){var n=this,r=this.props.enter,i=this.context.transitionGroup?this.context.transitionGroup.isMounting:t,a=this.getTimeouts(),o=i?a.appear:a.enter;if(!t&&!r){this.safeSetState({status:p},function(){n.props.onEntered(e)});return}this.props.onEnter(e,i),this.safeSetState({status:h},function(){n.props.onEntering(e,i),n.onTransitionEnd(e,o,function(){n.safeSetState({status:p},function(){n.props.onEntered(e,i)})})})},n.performExit=function(e){var t=this,n=this.props.exit,r=this.getTimeouts();if(!n){this.safeSetState({status:d},function(){t.props.onExited(e)});return}this.props.onExit(e),this.safeSetState({status:b},function(){t.props.onExiting(e),t.onTransitionEnd(e,r.exit,function(){t.safeSetState({status:d},function(){t.props.onExited(e)})})})},n.cancelNextCallback=function(){null!==this.nextCallback&&(this.nextCallback.cancel(),this.nextCallback=null)},n.safeSetState=function(e,t){t=this.setNextCallback(t),this.setState(e,t)},n.setNextCallback=function(e){var t=this,n=!0;return this.nextCallback=function(r){n&&(n=!1,t.nextCallback=null,e(r))},this.nextCallback.cancel=function(){n=!1},this.nextCallback},n.onTransitionEnd=function(e,t,n){this.setNextCallback(n);var r=null==t&&!this.props.addEndListener;if(!e||r){setTimeout(this.nextCallback,0);return}this.props.addEndListener&&this.props.addEndListener(e,this.nextCallback),null!=t&&setTimeout(this.nextCallback,t)},n.render=function(){var e=this.state.status;if(e===f)return null;var t=this.props,n=t.children,r=c(t,["children"]);if(delete r.in,delete r.mountOnEnter,delete r.unmountOnExit,delete r.appear,delete r.enter,delete r.exit,delete r.timeout,delete r.addEndListener,delete r.onEnter,delete r.onEntering,delete r.onEntered,delete r.onExit,delete r.onExiting,delete r.onExited,"function"==typeof n)return n(e,r);var a=i.default.Children.only(n);return i.default.cloneElement(a,r)},t}(i.default.Component);function g(){}m.contextTypes={transitionGroup:r.object},m.childContextTypes={transitionGroup:function(){}},m.propTypes={},m.defaultProps={in:!1,mountOnEnter:!1,unmountOnExit:!1,appear:!1,enter:!0,exit:!0,onEnter:g,onEntering:g,onEntered:g,onExit:g,onExiting:g,onExited:g},m.UNMOUNTED=0,m.EXITED=1,m.ENTERING=2,m.ENTERED=3,m.EXITING=4;var v=(0,o.polyfill)(m);t.default=v},92381(e,t,n){"use strict";t.__esModule=!0,t.default=void 0;var r=s(n(45697)),i=s(n(67294)),a=n(46871),o=n(40537);function s(e){return e&&e.__esModule?e:{default:e}}function u(e,t){if(null==e)return{};var n,r,i={},a=Object.keys(e);for(r=0;r=0||(i[n]=e[n]);return i}function c(){return(c=Object.assign||function(e){for(var t=1;tI.length&&I.push(e)}function P(e,t,n,r){var i=typeof e;("undefined"===i||"boolean"===i)&&(e=null);var s=!1;if(null===e)s=!0;else switch(i){case"string":case"number":s=!0;break;case"object":switch(e.$$typeof){case a:case o:s=!0}}if(s)return n(r,e,""===t?"."+j(e,0):t),1;if(s=0,t=""===t?".":t+":",Array.isArray(e))for(var u=0;u2)?"one of ".concat(t," ").concat(e.slice(0,n-1).join(", "),", or ")+e[n-1]:2===n?"one of ".concat(t," ").concat(e[0]," or ").concat(e[1]):"of ".concat(t," ").concat(e[0])}function a(e,t,n){return e.substr(!n||n<0?0:+n,t.length)===t}function o(e,t,n){return(void 0===n||n>e.length)&&(n=e.length),e.substring(n-t.length,n)===t}function s(e,t,n){return"number"!=typeof n&&(n=0),!(n+t.length>e.length)&&-1!==e.indexOf(t,n)}r("ERR_INVALID_OPT_VALUE",function(e,t){return'The value "'+t+'" is invalid for option "'+e+'"'},TypeError),r("ERR_INVALID_ARG_TYPE",function(e,t,n){if("string"==typeof t&&a(t,"not ")?(r="must not be",t=t.replace(/^not /,"")):r="must be",o(e," argument"))u="The ".concat(e," ").concat(r," ").concat(i(t,"type"));else{var r,u,c=s(e,".")?"property":"argument";u='The "'.concat(e,'" ').concat(c," ").concat(r," ").concat(i(t,"type"))}return u+". Received type ".concat(typeof n)},TypeError),r("ERR_STREAM_PUSH_AFTER_EOF","stream.push() after EOF"),r("ERR_METHOD_NOT_IMPLEMENTED",function(e){return"The "+e+" method is not implemented"}),r("ERR_STREAM_PREMATURE_CLOSE","Premature close"),r("ERR_STREAM_DESTROYED",function(e){return"Cannot call "+e+" after a stream was destroyed"}),r("ERR_MULTIPLE_CALLBACK","Callback called multiple times"),r("ERR_STREAM_CANNOT_PIPE","Cannot pipe, not readable"),r("ERR_STREAM_WRITE_AFTER_END","write after end"),r("ERR_STREAM_NULL_VALUES","May not write null values to stream",TypeError),r("ERR_UNKNOWN_ENCODING",function(e){return"Unknown encoding: "+e},TypeError),r("ERR_STREAM_UNSHIFT_AFTER_END_EVENT","stream.unshift() after end event"),e.exports.q=n},56753(e,t,n){"use strict";var r=Object.keys||function(e){var t=[];for(var n in e)t.push(n);return t};e.exports=c;var i=n(79481),a=n(64229);n(35717)(c,i);for(var o=r(a.prototype),s=0;s0){if("string"==typeof t||u.objectMode||Object.getPrototypeOf(t)===a.prototype||(t=s(t)),r)u.endEmitted?S(e,new E):A(e,u,t,!0);else if(u.ended)S(e,new w);else{if(u.destroyed)return!1;u.reading=!1,u.decoder&&!n?(t=u.decoder.write(t),u.objectMode||0!==t.length?A(e,u,t,!1):j(e,u)):A(e,u,t,!1)}}else r||(u.reading=!1,j(e,u));return!u.ended&&(u.length=C?e=C:(e--,e|=e>>>1,e|=e>>>2,e|=e>>>4,e|=e>>>8,e|=e>>>16,e++),e}function D(e,t){return e<=0||0===t.length&&t.ended?0:t.objectMode?1:e!=e?t.flowing&&t.length?t.buffer.head.data.length:t.length:(e>t.highWaterMark&&(t.highWaterMark=I(e)),e<=t.length)?e:t.ended?t.length:(t.needReadable=!0,0)}function N(e,t){if(f("onEofChunk"),!t.ended){if(t.decoder){var n=t.decoder.end();n&&n.length&&(t.buffer.push(n),t.length+=t.objectMode?1:n.length)}t.ended=!0,t.sync?P(e):(t.needReadable=!1,t.emittedReadable||(t.emittedReadable=!0,R(e)))}}function P(e){var t=e._readableState;f("emitReadable",t.needReadable,t.emittedReadable),t.needReadable=!1,t.emittedReadable||(f("emitReadable",t.flowing),t.emittedReadable=!0,process.nextTick(R,e))}function R(e){var t=e._readableState;f("emitReadable_",t.destroyed,t.length,t.ended),!t.destroyed&&(t.length||t.ended)&&(e.emit("readable"),t.emittedReadable=!1),t.needReadable=!t.flowing&&!t.ended&&t.length<=t.highWaterMark,z(e)}function j(e,t){t.readingMore||(t.readingMore=!0,process.nextTick(F,e,t))}function F(e,t){for(;!t.reading&&!t.ended&&(t.length0,t.resumeScheduled&&!t.paused?t.flowing=!0:e.listenerCount("data")>0&&e.resume()}function U(e){f("readable nexttick read 0"),e.read(0)}function H(e,t){t.resumeScheduled||(t.resumeScheduled=!0,process.nextTick($,e,t))}function $(e,t){f("resume",t.reading),t.reading||e.read(0),t.resumeScheduled=!1,e.emit("resume"),z(e),t.flowing&&!t.reading&&e.read(0)}function z(e){var t=e._readableState;for(f("flow",t.flowing);t.flowing&&null!==e.read(););}function G(e,t){var n;return 0===t.length?null:(t.objectMode?n=t.buffer.shift():!e||e>=t.length?(n=t.decoder?t.buffer.join(""):1===t.buffer.length?t.buffer.first():t.buffer.concat(t.length),t.buffer.clear()):n=t.buffer.consume(e,t.decoder),n)}function W(e){var t=e._readableState;f("endReadable",t.endEmitted),t.endEmitted||(t.ended=!0,process.nextTick(K,t,e))}function K(e,t){if(f("endReadableNT",e.endEmitted,e.length),!e.endEmitted&&0===e.length&&(e.endEmitted=!0,t.readable=!1,t.emit("end"),e.autoDestroy)){var n=t._writableState;(!n||n.autoDestroy&&n.finished)&&t.destroy()}}function V(e,t){for(var n=0,r=e.length;n=n.highWaterMark:n.length>0)||n.ended))return f("read: emitReadable",n.length,n.ended),0===n.length&&n.ended?W(this):P(this),null;if(0===(e=D(e,n))&&n.ended)return 0===n.length&&W(this),null;var i=n.needReadable;return f("need readable",i),(0===n.length||n.length-e0?G(e,n):null)?(n.needReadable=n.length<=n.highWaterMark,e=0):(n.length-=e,n.awaitDrain=0),0===n.length&&(n.ended||(n.needReadable=!0),r!==e&&n.ended&&W(this)),null!==t&&this.emit("data",t),t},M.prototype._read=function(e){S(this,new _("_read()"))},M.prototype.pipe=function(e,t){var n=this,i=this._readableState;switch(i.pipesCount){case 0:i.pipes=e;break;case 1:i.pipes=[i.pipes,e];break;default:i.pipes.push(e)}i.pipesCount+=1,f("pipe count=%d opts=%j",i.pipesCount,t);var a=t&&!1===t.end||e===process.stdout||e===process.stderr?m:s;function o(e,t){f("onunpipe"),e===n&&t&&!1===t.hasUnpiped&&(t.hasUnpiped=!0,l())}function s(){f("onend"),e.end()}i.endEmitted?process.nextTick(a):n.once("end",a),e.on("unpipe",o);var u=Y(n);e.on("drain",u);var c=!1;function l(){f("cleanup"),e.removeListener("close",p),e.removeListener("finish",b),e.removeListener("drain",u),e.removeListener("error",h),e.removeListener("unpipe",o),n.removeListener("end",s),n.removeListener("end",m),n.removeListener("data",d),c=!0,i.awaitDrain&&(!e._writableState||e._writableState.needDrain)&&u()}function d(t){f("ondata");var r=e.write(t);f("dest.write",r),!1===r&&((1===i.pipesCount&&i.pipes===e||i.pipesCount>1&&-1!==V(i.pipes,e))&&!c&&(f("false write response, pause",i.awaitDrain),i.awaitDrain++),n.pause())}function h(t){f("onerror",t),m(),e.removeListener("error",h),0===r(e,"error")&&S(e,t)}function p(){e.removeListener("finish",b),m()}function b(){f("onfinish"),e.removeListener("close",p),m()}function m(){f("unpipe"),n.unpipe(e)}return n.on("data",d),x(e,"error",h),e.once("close",p),e.once("finish",b),e.emit("pipe",n),i.flowing||(f("pipe resume"),n.resume()),e},M.prototype.unpipe=function(e){var t=this._readableState,n={hasUnpiped:!1};if(0===t.pipesCount)return this;if(1===t.pipesCount)return e&&e!==t.pipes||(e||(e=t.pipes),t.pipes=null,t.pipesCount=0,t.flowing=!1,e&&e.emit("unpipe",this,n)),this;if(!e){var r=t.pipes,i=t.pipesCount;t.pipes=null,t.pipesCount=0,t.flowing=!1;for(var a=0;a0,!1!==r.flowing&&this.resume()):"readable"!==e||r.endEmitted||r.readableListening||(r.readableListening=r.needReadable=!0,r.flowing=!1,r.emittedReadable=!1,f("on readable",r.length,r.reading),r.length?P(this):r.reading||process.nextTick(U,this)),n},M.prototype.addListener=M.prototype.on,M.prototype.removeListener=function(e,t){var n=i.prototype.removeListener.call(this,e,t);return"readable"===e&&process.nextTick(B,this),n},M.prototype.removeAllListeners=function(e){var t=i.prototype.removeAllListeners.apply(this,arguments);return("readable"===e||void 0===e)&&process.nextTick(B,this),t},M.prototype.resume=function(){var e=this._readableState;return e.flowing||(f("resume"),e.flowing=!e.readableListening,H(this,e)),e.paused=!1,this},M.prototype.pause=function(){return f("call pause flowing=%j",this._readableState.flowing),!1!==this._readableState.flowing&&(f("pause"),this._readableState.flowing=!1,this.emit("pause")),this._readableState.paused=!0,this},M.prototype.wrap=function(e){var t=this,n=this._readableState,r=!1;for(var i in e.on("end",function(){if(f("wrapped end"),n.decoder&&!n.ended){var e=n.decoder.end();e&&e.length&&t.push(e)}t.push(null)}),e.on("data",function(i){if(f("wrapped data"),n.decoder&&(i=n.decoder.write(i)),!n.objectMode||null!=i)(n.objectMode||i&&i.length)&&(t.push(i)||(r=!0,e.pause()))}),e)void 0===this[i]&&"function"==typeof e[i]&&(this[i]=function(t){return function(){return e[t].apply(e,arguments)}}(i));for(var a=0;a-1))throw new E(e);return this._writableState.defaultEncoding=e,this},Object.defineProperty(T.prototype,"writableBuffer",{enumerable:!1,get:function(){return this._writableState&&this._writableState.getBuffer()}}),Object.defineProperty(T.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),T.prototype._write=function(e,t,n){n(new m("_write()"))},T.prototype._writev=null,T.prototype.end=function(e,t,n){var r=this._writableState;return"function"==typeof e?(n=e,e=null,t=null):"function"==typeof t&&(n=t,t=null),null!=e&&this.write(e,t),r.corked&&(r.corked=1,this.uncork()),r.ending||H(this,r,n),this},Object.defineProperty(T.prototype,"writableLength",{enumerable:!1,get:function(){return this._writableState.length}}),Object.defineProperty(T.prototype,"destroyed",{enumerable:!1,get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(e){this._writableState&&(this._writableState.destroyed=e)}}),T.prototype.destroy=d.destroy,T.prototype._undestroy=d.undestroy,T.prototype._destroy=function(e,t){t(e)}},45850(e,t,n){"use strict";function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}var i,a=n(8610),o=Symbol("lastResolve"),s=Symbol("lastReject"),u=Symbol("error"),c=Symbol("ended"),l=Symbol("lastPromise"),f=Symbol("handlePromise"),d=Symbol("stream");function h(e,t){return{value:e,done:t}}function p(e){var t=e[o];if(null!==t){var n=e[d].read();null!==n&&(e[l]=null,e[o]=null,e[s]=null,t(h(n,!1)))}}function b(e){process.nextTick(p,e)}function m(e,t){return function(n,r){e.then(function(){if(t[c]){n(h(void 0,!0));return}t[f](n,r)},r)}}var g=Object.getPrototypeOf(function(){}),v=Object.setPrototypeOf((i={get stream(){return this[d]},next:function(){var e,t=this,n=this[u];if(null!==n)return Promise.reject(n);if(this[c])return Promise.resolve(h(void 0,!0));if(this[d].destroyed)return new Promise(function(e,n){process.nextTick(function(){t[u]?n(t[u]):e(h(void 0,!0))})});var r=this[l];if(r)e=new Promise(m(r,this));else{var i=this[d].read();if(null!==i)return Promise.resolve(h(i,!1));e=new Promise(this[f])}return this[l]=e,e}},r(i,Symbol.asyncIterator,function(){return this}),r(i,"return",function(){var e=this;return new Promise(function(t,n){e[d].destroy(null,function(e){if(e){n(e);return}t(h(void 0,!0))})})}),i),g),y=function(e){var t,n=Object.create(v,(r(t={},d,{value:e,writable:!0}),r(t,o,{value:null,writable:!0}),r(t,s,{value:null,writable:!0}),r(t,u,{value:null,writable:!0}),r(t,c,{value:e._readableState.endEmitted,writable:!0}),r(t,f,{value:function(e,t){var r=n[d].read();r?(n[l]=null,n[o]=null,n[s]=null,e(h(r,!1))):(n[o]=e,n[s]=t)},writable:!0}),t));return n[l]=null,a(e,function(e){if(e&&"ERR_STREAM_PREMATURE_CLOSE"!==e.code){var t=n[s];null!==t&&(n[l]=null,n[o]=null,n[s]=null,t(e)),n[u]=e;return}var r=n[o];null!==r&&(n[l]=null,n[o]=null,n[s]=null,r(h(void 0,!0))),n[c]=!0}),e.on("readable",b.bind(null,n)),n};e.exports=y},77086(e,t,n){"use strict";function r(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter(function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable})),n.push.apply(n,r)}return n}function i(e){for(var t=1;t0?this.tail.next=t:this.head=t,this.tail=t,++this.length}},{key:"unshift",value:function(e){var t={data:e,next:this.head};0===this.length&&(this.tail=t),this.head=t,++this.length}},{key:"shift",value:function(){if(0!==this.length){var e=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,e}}},{key:"clear",value:function(){this.head=this.tail=null,this.length=0}},{key:"join",value:function(e){if(0===this.length)return"";for(var t=this.head,n=""+t.data;t=t.next;)n+=e+t.data;return n}},{key:"concat",value:function(e){if(0===this.length)return c.alloc(0);for(var t=c.allocUnsafe(e>>>0),n=this.head,r=0;n;)d(n.data,t,r),r+=n.data.length,n=n.next;return t}},{key:"consume",value:function(e,t){var n;return ei.length?i.length:e;if(a===i.length?r+=i:r+=i.slice(0,e),0==(e-=a)){a===i.length?(++n,t.next?this.head=t.next:this.head=this.tail=null):(this.head=t,t.data=i.slice(a));break}++n}return this.length-=n,r}},{key:"_getBuffer",value:function(e){var t=c.allocUnsafe(e),n=this.head,r=1;for(n.data.copy(t),e-=n.data.length;n=n.next;){var i=n.data,a=e>i.length?i.length:e;if(i.copy(t,t.length-e,0,a),0==(e-=a)){a===i.length?(++r,n.next?this.head=n.next:this.head=this.tail=null):(this.head=n,n.data=i.slice(a));break}++r}return this.length-=r,t}},{key:f,value:function(e,t){return l(this,i({},t,{depth:0,customInspect:!1}))}}]),e}()},61195(e){"use strict";function t(e,t){var i=this,o=this._readableState&&this._readableState.destroyed,s=this._writableState&&this._writableState.destroyed;return o||s?(t?t(e):e&&(this._writableState?this._writableState.errorEmitted||(this._writableState.errorEmitted=!0,process.nextTick(a,this,e)):process.nextTick(a,this,e)),this):(this._readableState&&(this._readableState.destroyed=!0),this._writableState&&(this._writableState.destroyed=!0),this._destroy(e||null,function(e){!t&&e?i._writableState?i._writableState.errorEmitted?process.nextTick(r,i):(i._writableState.errorEmitted=!0,process.nextTick(n,i,e)):process.nextTick(n,i,e):t?(process.nextTick(r,i),t(e)):process.nextTick(r,i)}),this)}function n(e,t){a(e,t),r(e)}function r(e){(!e._writableState||e._writableState.emitClose)&&(!e._readableState||e._readableState.emitClose)&&e.emit("close")}function i(){this._readableState&&(this._readableState.destroyed=!1,this._readableState.reading=!1,this._readableState.ended=!1,this._readableState.endEmitted=!1),this._writableState&&(this._writableState.destroyed=!1,this._writableState.ended=!1,this._writableState.ending=!1,this._writableState.finalCalled=!1,this._writableState.prefinished=!1,this._writableState.finished=!1,this._writableState.errorEmitted=!1)}function a(e,t){e.emit("error",t)}function o(e,t){var n=e._readableState,r=e._writableState;n&&n.autoDestroy||r&&r.autoDestroy?e.destroy(t):e.emit("error",t)}e.exports={destroy:t,undestroy:i,errorOrDestroy:o}},8610(e,t,n){"use strict";var r=n(94281).q.ERR_STREAM_PREMATURE_CLOSE;function i(e){var t=!1;return function(){if(!t){t=!0;for(var n=arguments.length,r=Array(n),i=0;i0,function(t){e||(e=t),t&&a.forEach(f),o||(a.forEach(f),i(e))})});return n.reduce(d)}e.exports=p},82457(e,t,n){"use strict";var r=n(94281).q.ERR_INVALID_OPT_VALUE;function i(e,t,n){return null!=e.highWaterMark?e.highWaterMark:t?e[n]:null}function a(e,t,n,a){var o=i(t,a,n);if(null!=o){if(!(isFinite(o)&&Math.floor(o)===o)||o<0){var s=a?n:"highWaterMark";throw new r(s,o)}return Math.floor(o)}return e.objectMode?16:16384}e.exports={getHighWaterMark:a}},22503(e,t,n){e.exports=n(17187).EventEmitter},61566(e,t){"use strict";t.__esModule=!0,t.default=void 0;var n=function(e){return"string"==typeof e?e:e?e.displayName||e.name||"Component":void 0};t.default=n},60375(e){"use strict";var t=Object.prototype.hasOwnProperty;function n(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function r(e,r){if(n(e,r))return!0;if("object"!=typeof e||null===e||"object"!=typeof r||null===r)return!1;var i=Object.keys(e),a=Object.keys(r);if(i.length!==a.length)return!1;for(var o=0;og,DE:()=>b,UY:()=>h,qC:()=>m,MT:()=>f});var s="function"==typeof Symbol&&Symbol.observable||"@@observable",u=function(){return Math.random().toString(36).substring(7).split("").join(".")},c={INIT:"@@redux/INIT"+u(),REPLACE:"@@redux/REPLACE"+u(),PROBE_UNKNOWN_ACTION:function(){return"@@redux/PROBE_UNKNOWN_ACTION"+u()}};function l(e){if("object"!=typeof e||null===e)return!1;for(var t=e;null!==Object.getPrototypeOf(t);)t=Object.getPrototypeOf(t);return Object.getPrototypeOf(e)===t}function f(e,t,n){if("function"==typeof t&&"function"==typeof n||"function"==typeof n&&"function"==typeof arguments[3])throw Error(o(0));if("function"==typeof t&&void 0===n&&(n=t,t=void 0),void 0!==n){if("function"!=typeof n)throw Error(o(1));return n(f)(e,t)}if("function"!=typeof e)throw Error(o(2));var r,i=e,a=t,u=[],d=u,h=!1;function p(){d===u&&(d=u.slice())}function b(){if(h)throw Error(o(3));return a}function m(e){if("function"!=typeof e)throw Error(o(4));if(h)throw Error(o(5));var t=!0;return p(),d.push(e),function(){if(t){if(h)throw Error(o(6));t=!1,p();var n=d.indexOf(e);d.splice(n,1),u=null}}}function g(e){if(!l(e))throw Error(o(7));if(void 0===e.type)throw Error(o(8));if(h)throw Error(o(9));try{h=!0,a=i(a,e)}finally{h=!1}for(var t=u=d,n=0;n]?|>=?|\?=|[-+\/=])(?=\s)/,lookbehind:!0},"string-operator":{pattern:/(\s)&&?(?=\s)/,lookbehind:!0,alias:"keyword"},"token-operator":[{pattern:/(\w)(?:->?|=>|[~|{}])(?=\w)/,lookbehind:!0,alias:"punctuation"},{pattern:/[|{}]/,alias:"punctuation"}],punctuation:/[,.:()]/}}e.exports=t,t.displayName="abap",t.aliases=[]},68313(e){"use strict";function t(e){var t,n;n="(?:ALPHA|BIT|CHAR|CR|CRLF|CTL|DIGIT|DQUOTE|HEXDIG|HTAB|LF|LWSP|OCTET|SP|VCHAR|WSP)",(t=e).languages.abnf={comment:/;.*/,string:{pattern:/(?:%[is])?"[^"\n\r]*"/,greedy:!0,inside:{punctuation:/^%[is]/}},range:{pattern:/%(?:b[01]+-[01]+|d\d+-\d+|x[A-F\d]+-[A-F\d]+)/i,alias:"number"},terminal:{pattern:/%(?:b[01]+(?:\.[01]+)*|d\d+(?:\.\d+)*|x[A-F\d]+(?:\.[A-F\d]+)*)/i,alias:"number"},repetition:{pattern:/(^|[^\w-])(?:\d*\*\d*|\d+)/,lookbehind:!0,alias:"operator"},definition:{pattern:/(^[ \t]*)(?:[a-z][\w-]*|<[^<>\r\n]*>)(?=\s*=)/m,lookbehind:!0,alias:"keyword",inside:{punctuation:/<|>/}},"core-rule":{pattern:RegExp("(?:(^|[^<\\w-])"+n+"|<"+n+">)(?![\\w-])","i"),lookbehind:!0,alias:["rule","constant"],inside:{punctuation:/<|>/}},rule:{pattern:/(^|[^<\w-])[a-z][\w-]*|<[^<>\r\n]*>/i,lookbehind:!0,inside:{punctuation:/<|>/}},operator:/=\/?|\//,punctuation:/[()\[\]]/}}e.exports=t,t.displayName="abnf",t.aliases=[]},5199(e){"use strict";function t(e){e.languages.actionscript=e.languages.extend("javascript",{keyword:/\b(?:as|break|case|catch|class|const|default|delete|do|else|extends|finally|for|function|if|implements|import|in|instanceof|interface|internal|is|native|new|null|package|private|protected|public|return|super|switch|this|throw|try|typeof|use|var|void|while|with|dynamic|each|final|get|include|namespace|override|set|static)\b/,operator:/\+\+|--|(?:[+\-*\/%^]|&&?|\|\|?|<>?>?|[!=]=?)=?|[~?@]/}),e.languages.actionscript["class-name"].alias="function",e.languages.markup&&e.languages.insertBefore("actionscript","string",{xml:{pattern:/(^|[^.])<\/?\w+(?:\s+[^\s>\/=]+=("|')(?:\\[\s\S]|(?!\2)[^\\])*\2)*\s*\/?>/,lookbehind:!0,inside:e.languages.markup}})}e.exports=t,t.displayName="actionscript",t.aliases=[]},89693(e){"use strict";function t(e){e.languages.ada={comment:/--.*/,string:/"(?:""|[^"\r\f\n])*"/i,number:[{pattern:/\b\d(?:_?\d)*#[\dA-F](?:_?[\dA-F])*(?:\.[\dA-F](?:_?[\dA-F])*)?#(?:E[+-]?\d(?:_?\d)*)?/i},{pattern:/\b\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:E[+-]?\d(?:_?\d)*)?\b/i}],"attr-name":/\b'\w+/i,keyword:/\b(?:abort|abs|abstract|accept|access|aliased|all|and|array|at|begin|body|case|constant|declare|delay|delta|digits|do|else|new|return|elsif|end|entry|exception|exit|for|function|generic|goto|if|in|interface|is|limited|loop|mod|not|null|of|others|out|overriding|package|pragma|private|procedure|protected|raise|range|record|rem|renames|requeue|reverse|select|separate|some|subtype|synchronized|tagged|task|terminate|then|type|until|use|when|while|with|xor)\b/i,boolean:/\b(?:true|false)\b/i,operator:/<[=>]?|>=?|=>?|:=|\/=?|\*\*?|[&+-]/,punctuation:/\.\.?|[,;():]/,char:/'.'/,variable:/\b[a-z](?:\w)*\b/i}}e.exports=t,t.displayName="ada",t.aliases=[]},24001(e){"use strict";function t(e){var t;(t=e).languages.agda={comment:/\{-[\s\S]*?(?:-\}|$)|--.*/,string:{pattern:/"(?:\\(?:\r\n|[\s\S])|[^\\\r\n"])*"/,greedy:!0},punctuation:/[(){}⦃⦄.;@]/,"class-name":{pattern:/((?:data|record) +)\S+/,lookbehind:!0},function:{pattern:/(^[ \t]*)(?!\s)[^:\r\n]+(?=:)/m,lookbehind:!0},operator:{pattern:/(^\s*|\s)(?:[=|:∀→λ\\?_]|->)(?=\s)/,lookbehind:!0},keyword:/\b(?:Set|abstract|constructor|data|eta-equality|field|forall|hiding|import|in|inductive|infix|infixl|infixr|instance|let|macro|module|mutual|no-eta-equality|open|overlap|pattern|postulate|primitive|private|public|quote|quoteContext|quoteGoal|quoteTerm|record|renaming|rewrite|syntax|tactic|unquote|unquoteDecl|unquoteDef|using|variable|where|with)\b/}}e.exports=t,t.displayName="agda",t.aliases=[]},18018(e){"use strict";function t(e){e.languages.al={comment:/\/\/.*|\/\*[\s\S]*?\*\//,string:{pattern:/'(?:''|[^'\r\n])*'(?!')|"(?:""|[^"\r\n])*"(?!")/,greedy:!0},function:{pattern:/(\b(?:event|procedure|trigger)\s+|(?:^|[^.])\.\s*)[a-z_]\w*(?=\s*\()/i,lookbehind:!0},keyword:[/\b(?:array|asserterror|begin|break|case|do|downto|else|end|event|exit|for|foreach|function|if|implements|in|indataset|interface|internal|local|of|procedure|program|protected|repeat|runonclient|securityfiltering|suppressdispose|temporary|then|to|trigger|until|var|while|with|withevents)\b/i,/\b(?:action|actions|addafter|addbefore|addfirst|addlast|area|assembly|chartpart|codeunit|column|controladdin|cuegroup|customizes|dataitem|dataset|dotnet|elements|enum|enumextension|extends|field|fieldattribute|fieldelement|fieldgroup|fieldgroups|fields|filter|fixed|grid|group|key|keys|label|labels|layout|modify|moveafter|movebefore|movefirst|movelast|page|pagecustomization|pageextension|part|profile|query|repeater|report|requestpage|schema|separator|systempart|table|tableelement|tableextension|textattribute|textelement|type|usercontrol|value|xmlport)\b/i],number:/\b(?:0x[\da-f]+|(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?)(?:F|U(?:LL?)?|LL?)?\b/i,boolean:/\b(?:false|true)\b/i,variable:/\b(?:Curr(?:FieldNo|Page|Report)|RequestOptionsPage|x?Rec)\b/,"class-name":/\b(?:automation|biginteger|bigtext|blob|boolean|byte|char|clienttype|code|completiontriggererrorlevel|connectiontype|database|dataclassification|datascope|date|dateformula|datetime|decimal|defaultlayout|dialog|dictionary|dotnetassembly|dotnettypedeclaration|duration|errorinfo|errortype|executioncontext|executionmode|fieldclass|fieldref|fieldtype|file|filterpagebuilder|guid|httpclient|httpcontent|httpheaders|httprequestmessage|httpresponsemessage|instream|integer|joker|jsonarray|jsonobject|jsontoken|jsonvalue|keyref|list|moduledependencyinfo|moduleinfo|none|notification|notificationscope|objecttype|option|outstream|pageresult|record|recordid|recordref|reportformat|securityfilter|sessionsettings|tableconnectiontype|tablefilter|testaction|testfield|testfilterfield|testpage|testpermissions|testrequestpage|text|textbuilder|textconst|textencoding|time|transactionmodel|transactiontype|variant|verbosity|version|view|views|webserviceactioncontext|webserviceactionresultcode|xmlattribute|xmlattributecollection|xmlcdata|xmlcomment|xmldeclaration|xmldocument|xmldocumenttype|xmlelement|xmlnamespacemanager|xmlnametable|xmlnode|xmlnodelist|xmlprocessinginstruction|xmlreadoptions|xmltext|xmlwriteoptions)\b/i,operator:/\.\.|:[=:]|[-+*/]=?|<>|[<>]=?|=|\b(?:and|div|mod|not|or|xor)\b/i,punctuation:/[()\[\]{}:.;,]/}}e.exports=t,t.displayName="al",t.aliases=[]},36363(e){"use strict";function t(e){e.languages.antlr4={comment:/\/\/.*|\/\*[\s\S]*?(?:\*\/|$)/,string:{pattern:/'(?:\\.|[^\\'\r\n])*'/,greedy:!0},"character-class":{pattern:/\[(?:\\.|[^\\\]\r\n])*\]/,greedy:!0,alias:"regex",inside:{range:{pattern:/([^[]|(?:^|[^\\])(?:\\\\)*\\\[)-(?!\])/,lookbehind:!0,alias:"punctuation"},escape:/\\(?:u(?:[a-fA-F\d]{4}|\{[a-fA-F\d]+\})|[pP]\{[=\w-]+\}|[^\r\nupP])/,punctuation:/[\[\]]/}},action:{pattern:/\{(?:[^{}]|\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\})*\}/,greedy:!0,inside:{content:{pattern:/(\{)[\s\S]+(?=\})/,lookbehind:!0},punctuation:/[{}]/}},command:{pattern:/(->\s*(?!\s))(?:\s*(?:,\s*)?\b[a-z]\w*(?:\s*\([^()\r\n]*\))?)+(?=\s*;)/i,lookbehind:!0,inside:{function:/\b\w+(?=\s*(?:[,(]|$))/,punctuation:/[,()]/}},annotation:{pattern:/@\w+(?:::\w+)*/,alias:"keyword"},label:{pattern:/#[ \t]*\w+/,alias:"punctuation"},keyword:/\b(?:catch|channels|finally|fragment|grammar|import|lexer|locals|mode|options|parser|returns|throws|tokens)\b/,definition:[{pattern:/\b[a-z]\w*(?=\s*:)/,alias:["rule","class-name"]},{pattern:/\b[A-Z]\w*(?=\s*:)/,alias:["token","constant"]}],constant:/\b[A-Z][A-Z_]*\b/,operator:/\.\.|->|[|~]|[*+?]\??/,punctuation:/[;:()=]/},e.languages.g4=e.languages.antlr4}e.exports=t,t.displayName="antlr4",t.aliases=["g4"]},35281(e){"use strict";function t(e){e.languages.apacheconf={comment:/#.*/,"directive-inline":{pattern:/(^[\t ]*)\b(?:AcceptFilter|AcceptPathInfo|AccessFileName|Action|Add(?:Alt|AltByEncoding|AltByType|Charset|DefaultCharset|Description|Encoding|Handler|Icon|IconByEncoding|IconByType|InputFilter|Language|ModuleInfo|OutputFilter|OutputFilterByType|Type)|Alias|AliasMatch|Allow(?:CONNECT|EncodedSlashes|Methods|Override|OverrideList)?|Anonymous(?:_LogEmail|_MustGiveEmail|_NoUserID|_VerifyEmail)?|AsyncRequestWorkerFactor|Auth(?:BasicAuthoritative|BasicFake|BasicProvider|BasicUseDigestAlgorithm|DBDUserPWQuery|DBDUserRealmQuery|DBMGroupFile|DBMType|DBMUserFile|Digest(?:Algorithm|Domain|NonceLifetime|Provider|Qop|ShmemSize)|Form(?:Authoritative|Body|DisableNoStore|FakeBasicAuth|Location|LoginRequiredLocation|LoginSuccessLocation|LogoutLocation|Method|Mimetype|Password|Provider|SitePassphrase|Size|Username)|GroupFile|LDAP(?:AuthorizePrefix|BindAuthoritative|BindDN|BindPassword|CharsetConfig|CompareAsUser|CompareDNOnServer|DereferenceAliases|GroupAttribute|GroupAttributeIsDN|InitialBindAsUser|InitialBindPattern|MaxSubGroupDepth|RemoteUserAttribute|RemoteUserIsDN|SearchAsUser|SubGroupAttribute|SubGroupClass|Url)|Merging|Name|Type|UserFile|nCache(?:Context|Enable|ProvideFor|SOCache|Timeout)|nzFcgiCheckAuthnProvider|nzFcgiDefineProvider|zDBDLoginToReferer|zDBDQuery|zDBDRedirectQuery|zDBMType|zSendForbiddenOnFailure)|BalancerGrowth|BalancerInherit|BalancerMember|BalancerPersist|BrowserMatch|BrowserMatchNoCase|BufferSize|BufferedLogs|CGIDScriptTimeout|CGIMapExtension|Cache(?:DefaultExpire|DetailHeader|DirLength|DirLevels|Disable|Enable|File|Header|IgnoreCacheControl|IgnoreHeaders|IgnoreNoLastMod|IgnoreQueryString|IgnoreURLSessionIdentifiers|KeyBaseURL|LastModifiedFactor|Lock|LockMaxAge|LockPath|MaxExpire|MaxFileSize|MinExpire|MinFileSize|NegotiatedDocs|QuickHandler|ReadSize|ReadTime|Root|Socache(?:MaxSize|MaxTime|MinTime|ReadSize|ReadTime)?|StaleOnError|StoreExpired|StoreNoStore|StorePrivate)|CharsetDefault|CharsetOptions|CharsetSourceEnc|CheckCaseOnly|CheckSpelling|ChrootDir|ContentDigest|CookieDomain|CookieExpires|CookieName|CookieStyle|CookieTracking|CoreDumpDirectory|CustomLog|DBDExptime|DBDInitSQL|DBDKeep|DBDMax|DBDMin|DBDParams|DBDPersist|DBDPrepareSQL|DBDriver|DTracePrivileges|Dav|DavDepthInfinity|DavGenericLockDB|DavLockDB|DavMinTimeout|DefaultIcon|DefaultLanguage|DefaultRuntimeDir|DefaultType|Define|Deflate(?:BufferSize|CompressionLevel|FilterNote|InflateLimitRequestBody|InflateRatio(?:Burst|Limit)|MemLevel|WindowSize)|Deny|DirectoryCheckHandler|DirectoryIndex|DirectoryIndexRedirect|DirectorySlash|DocumentRoot|DumpIOInput|DumpIOOutput|EnableExceptionHook|EnableMMAP|EnableSendfile|Error|ErrorDocument|ErrorLog|ErrorLogFormat|Example|ExpiresActive|ExpiresByType|ExpiresDefault|ExtFilterDefine|ExtFilterOptions|ExtendedStatus|FallbackResource|FileETag|FilterChain|FilterDeclare|FilterProtocol|FilterProvider|FilterTrace|ForceLanguagePriority|ForceType|ForensicLog|GprofDir|GracefulShutdownTimeout|Group|Header|HeaderName|Heartbeat(?:Address|Listen|MaxServers|Storage)|HostnameLookups|ISAPI(?:AppendLogToErrors|AppendLogToQuery|CacheFile|FakeAsync|LogNotSupported|ReadAheadBuffer)|IdentityCheck|IdentityCheckTimeout|ImapBase|ImapDefault|ImapMenu|Include|IncludeOptional|Index(?:HeadInsert|Ignore|IgnoreReset|Options|OrderDefault|StyleSheet)|InputSed|KeepAlive|KeepAliveTimeout|KeptBodySize|LDAP(?:CacheEntries|CacheTTL|ConnectionPoolTTL|ConnectionTimeout|LibraryDebug|OpCacheEntries|OpCacheTTL|ReferralHopLimit|Referrals|Retries|RetryDelay|SharedCacheFile|SharedCacheSize|Timeout|TrustedClientCert|TrustedGlobalCert|TrustedMode|VerifyServerCert)|LanguagePriority|Limit(?:InternalRecursion|Request(?:Body|FieldSize|Fields|Line)|XMLRequestBody)|Listen|ListenBackLog|LoadFile|LoadModule|LogFormat|LogLevel|LogMessage|LuaAuthzProvider|LuaCodeCache|Lua(?:Hook(?:AccessChecker|AuthChecker|CheckUserID|Fixups|InsertFilter|Log|MapToStorage|TranslateName|TypeChecker)|Inherit|InputFilter|MapHandler|OutputFilter|PackageCPath|PackagePath|QuickHandler|Root|Scope)|MMapFile|Max(?:ConnectionsPerChild|KeepAliveRequests|MemFree|RangeOverlaps|RangeReversals|Ranges|RequestWorkers|SpareServers|SpareThreads|Threads)|MergeTrailers|MetaDir|MetaFiles|MetaSuffix|MimeMagicFile|MinSpareServers|MinSpareThreads|ModMimeUsePathInfo|ModemStandard|MultiviewsMatch|Mutex|NWSSLTrustedCerts|NWSSLUpgradeable|NameVirtualHost|NoProxy|Options|Order|OutputSed|PassEnv|PidFile|PrivilegesMode|Protocol|ProtocolEcho|Proxy(?:AddHeaders|BadHeader|Block|Domain|ErrorOverride|ExpressDBMFile|ExpressDBMType|ExpressEnable|FtpDirCharset|FtpEscapeWildcards|FtpListOnWildcard|HTML(?:BufSize|CharsetOut|DocType|Enable|Events|Extended|Fixups|Interp|Links|Meta|StripComments|URLMap)|IOBufferSize|MaxForwards|Pass(?:Inherit|InterpolateEnv|Match|Reverse|ReverseCookieDomain|ReverseCookiePath)?|PreserveHost|ReceiveBufferSize|Remote|RemoteMatch|Requests|SCGIInternalRedirect|SCGISendfile|Set|SourceAddress|Status|Timeout|Via)|RLimitCPU|RLimitMEM|RLimitNPROC|ReadmeName|ReceiveBufferSize|Redirect|RedirectMatch|RedirectPermanent|RedirectTemp|ReflectorHeader|RemoteIP(?:Header|InternalProxy|InternalProxyList|ProxiesHeader|TrustedProxy|TrustedProxyList)|RemoveCharset|RemoveEncoding|RemoveHandler|RemoveInputFilter|RemoveLanguage|RemoveOutputFilter|RemoveType|RequestHeader|RequestReadTimeout|Require|Rewrite(?:Base|Cond|Engine|Map|Options|Rule)|SSIETag|SSIEndTag|SSIErrorMsg|SSILastModified|SSILegacyExprParser|SSIStartTag|SSITimeFormat|SSIUndefinedEcho|SSL(?:CACertificateFile|CACertificatePath|CADNRequestFile|CADNRequestPath|CARevocationCheck|CARevocationFile|CARevocationPath|CertificateChainFile|CertificateFile|CertificateKeyFile|CipherSuite|Compression|CryptoDevice|Engine|FIPS|HonorCipherOrder|InsecureRenegotiation|OCSP(?:DefaultResponder|Enable|OverrideResponder|ResponderTimeout|ResponseMaxAge|ResponseTimeSkew|UseRequestNonce)|OpenSSLConfCmd|Options|PassPhraseDialog|Protocol|Proxy(?:CACertificateFile|CACertificatePath|CARevocation(?:Check|File|Path)|CheckPeer(?:CN|Expire|Name)|CipherSuite|Engine|MachineCertificate(?:ChainFile|File|Path)|Protocol|Verify|VerifyDepth)|RandomSeed|RenegBufferSize|Require|RequireSSL|SRPUnknownUserSeed|SRPVerifierFile|Session(?:Cache|CacheTimeout|TicketKeyFile|Tickets)|Stapling(?:Cache|ErrorCacheTimeout|FakeTryLater|ForceURL|ResponderTimeout|ResponseMaxAge|ResponseTimeSkew|ReturnResponderErrors|StandardCacheTimeout)|StrictSNIVHostCheck|UseStapling|UserName|VerifyClient|VerifyDepth)|Satisfy|ScoreBoardFile|Script(?:Alias|AliasMatch|InterpreterSource|Log|LogBuffer|LogLength|Sock)?|SecureListen|SeeRequestTail|SendBufferSize|Server(?:Admin|Alias|Limit|Name|Path|Root|Signature|Tokens)|Session(?:Cookie(?:Name|Name2|Remove)|Crypto(?:Cipher|Driver|Passphrase|PassphraseFile)|DBD(?:CookieName|CookieName2|CookieRemove|DeleteLabel|InsertLabel|PerUser|SelectLabel|UpdateLabel)|Env|Exclude|Header|Include|MaxAge)?|SetEnv|SetEnvIf|SetEnvIfExpr|SetEnvIfNoCase|SetHandler|SetInputFilter|SetOutputFilter|StartServers|StartThreads|Substitute|Suexec|SuexecUserGroup|ThreadLimit|ThreadStackSize|ThreadsPerChild|TimeOut|TraceEnable|TransferLog|TypesConfig|UnDefine|UndefMacro|UnsetEnv|Use|UseCanonicalName|UseCanonicalPhysicalPort|User|UserDir|VHostCGIMode|VHostCGIPrivs|VHostGroup|VHostPrivs|VHostSecure|VHostUser|Virtual(?:DocumentRoot|ScriptAlias)(?:IP)?|WatchdogInterval|XBitHack|xml2EncAlias|xml2EncDefault|xml2StartParse)\b/im,lookbehind:!0,alias:"property"},"directive-block":{pattern:/<\/?\b(?:Auth[nz]ProviderAlias|Directory|DirectoryMatch|Else|ElseIf|Files|FilesMatch|If|IfDefine|IfModule|IfVersion|Limit|LimitExcept|Location|LocationMatch|Macro|Proxy|Require(?:All|Any|None)|VirtualHost)\b.*>/i,inside:{"directive-block":{pattern:/^<\/?\w+/,inside:{punctuation:/^<\/?/},alias:"tag"},"directive-block-parameter":{pattern:/.*[^>]/,inside:{punctuation:/:/,string:{pattern:/("|').*\1/,inside:{variable:/[$%]\{?(?:\w\.?[-+:]?)+\}?/}}},alias:"attr-value"},punctuation:/>/},alias:"tag"},"directive-flags":{pattern:/\[(?:[\w=],?)+\]/,alias:"keyword"},string:{pattern:/("|').*\1/,inside:{variable:/[$%]\{?(?:\w\.?[-+:]?)+\}?/}},variable:/[$%]\{?(?:\w\.?[-+:]?)+\}?/,regex:/\^?.*\$|\^.*\$?/}}e.exports=t,t.displayName="apacheconf",t.aliases=[]},10433(e,t,n){"use strict";var r=n(11114);function i(e){e.register(r),function(e){var t=/\b(?:abstract|activate|and|any|array|as|asc|autonomous|begin|bigdecimal|blob|boolean|break|bulk|by|byte|case|cast|catch|char|class|collect|commit|const|continue|currency|date|datetime|decimal|default|delete|desc|do|double|else|end|enum|exception|exit|export|extends|final|finally|float|for|from|global|goto|group|having|hint|if|implements|import|in|inner|insert|instanceof|int|integer|interface|into|join|like|limit|list|long|loop|map|merge|new|not|null|nulls|number|object|of|on|or|outer|override|package|parallel|pragma|private|protected|public|retrieve|return|rollback|select|set|short|sObject|sort|static|string|super|switch|synchronized|system|testmethod|then|this|throw|time|transaction|transient|trigger|try|undelete|update|upsert|using|virtual|void|webservice|when|where|while|get(?=\s*[{};])|(?:after|before)(?=\s+[a-z])|(?:inherited|with|without)\s+sharing)\b/i,n=/\b(?:(?=[a-z_]\w*\s*[<\[])|(?!))[A-Z_]\w*(?:\s*\.\s*[A-Z_]\w*)*\b(?:\s*(?:\[\s*\]|<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>))*/.source.replace(//g,function(){return t.source});function r(e){return RegExp(e.replace(//g,function(){return n}),"i")}var i={keyword:t,punctuation:/[()\[\]{};,:.<>]/};e.languages.apex={comment:e.languages.clike.comment,string:e.languages.clike.string,sql:{pattern:/((?:[=,({:]|\breturn)\s*)\[[^\[\]]*\]/i,lookbehind:!0,greedy:!0,alias:"language-sql",inside:e.languages.sql},annotation:{pattern:/@\w+\b/,alias:"punctuation"},"class-name":[{pattern:r(/(\b(?:class|enum|extends|implements|instanceof|interface|new|trigger\s+\w+\s+on)\s+)/.source),lookbehind:!0,inside:i},{pattern:r(/(\(\s*)(?=\s*\)\s*[\w(])/.source),lookbehind:!0,inside:i},{pattern:r(/(?=\s*\w+\s*[;=,(){:])/.source),inside:i}],trigger:{pattern:/(\btrigger\s+)\w+\b/i,lookbehind:!0,alias:"class-name"},keyword:t,function:/\b[a-z_]\w*(?=\s*\()/i,boolean:/\b(?:false|true)\b/i,number:/(?:\B\.\d+|\b\d+(?:\.\d+|L)?)\b/i,operator:/[!=](?:==?)?|\?\.?|&&|\|\||--|\+\+|[-+*/^&|]=?|:|<{1,3}=?/,punctuation:/[()\[\]{};,.]/}}(e)}e.exports=i,i.displayName="apex",i.aliases=[]},84039(e){"use strict";function t(e){e.languages.apl={comment:/(?:⍝|#[! ]).*$/m,string:{pattern:/'(?:[^'\r\n]|'')*'/,greedy:!0},number:/¯?(?:\d*\.?\b\d+(?:e[+¯]?\d+)?|¯|∞)(?:j¯?(?:(?:\d+(?:\.\d+)?|\.\d+)(?:e[+¯]?\d+)?|¯|∞))?/i,statement:/:[A-Z][a-z][A-Za-z]*\b/,"system-function":{pattern:/⎕[A-Z]+/i,alias:"function"},constant:/[⍬⌾#⎕⍞]/,function:/[-+×÷⌈⌊∣|⍳⍸?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⊆⊇⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⍯↗¤→]/,"monadic-operator":{pattern:/[\\\/⌿⍀¨⍨⌶&∥]/,alias:"operator"},"dyadic-operator":{pattern:/[.⍣⍠⍤∘⌸@⌺⍥]/,alias:"operator"},assignment:{pattern:/←/,alias:"keyword"},punctuation:/[\[;\]()◇⋄]/,dfn:{pattern:/[{}⍺⍵⍶⍹∇⍫:]/,alias:"builtin"}}}e.exports=t,t.displayName="apl",t.aliases=[]},71336(e){"use strict";function t(e){e.languages.applescript={comment:[/\(\*(?:\(\*(?:[^*]|\*(?!\)))*\*\)|(?!\(\*)[\s\S])*?\*\)/,/--.+/,/#.+/],string:/"(?:\\.|[^"\\\r\n])*"/,number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e-?\d+)?\b/i,operator:[/[&=≠≤≥*+\-\/÷^]|[<>]=?/,/\b(?:(?:start|begin|end)s? with|(?:(?:does not|doesn't) contain|contains?)|(?:is|isn't|is not) (?:in|contained by)|(?:(?:is|isn't|is not) )?(?:greater|less) than(?: or equal)?(?: to)?|(?:(?:does not|doesn't) come|comes) (?:before|after)|(?:is|isn't|is not) equal(?: to)?|(?:(?:does not|doesn't) equal|equals|equal to|isn't|is not)|(?:a )?(?:ref(?: to)?|reference to)|(?:and|or|div|mod|as|not))\b/],keyword:/\b(?:about|above|after|against|apart from|around|aside from|at|back|before|beginning|behind|below|beneath|beside|between|but|by|considering|continue|copy|does|eighth|else|end|equal|error|every|exit|false|fifth|first|for|fourth|from|front|get|given|global|if|ignoring|in|instead of|into|is|it|its|last|local|me|middle|my|ninth|of|on|onto|out of|over|prop|property|put|repeat|return|returning|second|set|seventh|since|sixth|some|tell|tenth|that|the|then|third|through|thru|timeout|times|to|transaction|true|try|until|where|while|whose|with|without)\b/,class:{pattern:/\b(?:alias|application|boolean|class|constant|date|file|integer|list|number|POSIX file|real|record|reference|RGB color|script|text|centimetres|centimeters|feet|inches|kilometres|kilometers|metres|meters|miles|yards|square feet|square kilometres|square kilometers|square metres|square meters|square miles|square yards|cubic centimetres|cubic centimeters|cubic feet|cubic inches|cubic metres|cubic meters|cubic yards|gallons|litres|liters|quarts|grams|kilograms|ounces|pounds|degrees Celsius|degrees Fahrenheit|degrees Kelvin)\b/,alias:"builtin"},punctuation:/[{}():,¬«»《》]/}}e.exports=t,t.displayName="applescript",t.aliases=[]},4481(e){"use strict";function t(e){e.languages.aql={comment:/\/\/.*|\/\*[\s\S]*?\*\//,property:{pattern:/([{,]\s*)(?:(?!\d)\w+|(["'´`])(?:(?!\2)[^\\\r\n]|\\.)*\2)(?=\s*:)/,lookbehind:!0,greedy:!0},string:{pattern:/(["'´`])(?:(?!\1)[^\\\r\n]|\\.)*\1/,greedy:!0},variable:/@@?\w+/,keyword:[{pattern:/(\bWITH\s+)COUNT(?=\s+INTO\b)/i,lookbehind:!0},/\b(?:AGGREGATE|ALL|AND|ANY|ASC|COLLECT|DESC|DISTINCT|FILTER|FOR|GRAPH|IN|INBOUND|INSERT|INTO|K_PATHS|K_SHORTEST_PATHS|LET|LIKE|LIMIT|NONE|NOT|NULL|OR|OUTBOUND|REMOVE|REPLACE|RETURN|SHORTEST_PATH|SORT|UPDATE|UPSERT|WINDOW|WITH)\b/i,{pattern:/(^|[^\w.[])(?:KEEP|PRUNE|SEARCH|TO)\b/i,lookbehind:!0},{pattern:/(^|[^\w.[])(?:CURRENT|NEW|OLD)\b/,lookbehind:!0},{pattern:/\bOPTIONS(?=\s*\{)/i}],function:/\b(?!\d)\w+(?=\s*\()/,boolean:/\b(?:true|false)\b/i,range:{pattern:/\.\./,alias:"operator"},number:[/\b0b[01]+/i,/\b0x[0-9a-f]+/i,/(?:\B\.\d+|\b(?:0|[1-9]\d*)(?:\.\d+)?)(?:e[+-]?\d+)?/i],operator:/\*{2,}|[=!]~|[!=<>]=?|&&|\|\||[-+*/%]/,punctuation:/::|[?.:,;()[\]{}]/}}e.exports=t,t.displayName="aql",t.aliases=[]},2159(e,t,n){"use strict";var r=n(80096);function i(e){e.register(r),e.languages.arduino=e.languages.extend("cpp",{constant:/\b(?:DIGITAL_MESSAGE|FIRMATA_STRING|ANALOG_MESSAGE|REPORT_DIGITAL|REPORT_ANALOG|INPUT_PULLUP|SET_PIN_MODE|INTERNAL2V56|SYSTEM_RESET|LED_BUILTIN|INTERNAL1V1|SYSEX_START|INTERNAL|EXTERNAL|DEFAULT|OUTPUT|INPUT|HIGH|LOW)\b/,keyword:/\b(?:setup|if|else|while|do|for|return|in|instanceof|default|function|loop|goto|switch|case|new|try|throw|catch|finally|null|break|continue|boolean|bool|void|byte|word|string|String|array|int|long|integer|double)\b/,builtin:/\b(?:KeyboardController|MouseController|SoftwareSerial|EthernetServer|EthernetClient|LiquidCrystal|LiquidCrystal_I2C|RobotControl|GSMVoiceCall|EthernetUDP|EsploraTFT|HttpClient|RobotMotor|WiFiClient|GSMScanner|FileSystem|Scheduler|GSMServer|YunClient|YunServer|IPAddress|GSMClient|GSMModem|Keyboard|Ethernet|Console|GSMBand|Esplora|Stepper|Process|WiFiUDP|GSM_SMS|Mailbox|USBHost|Firmata|PImage|Client|Server|GSMPIN|FileIO|Bridge|Serial|EEPROM|Stream|Mouse|Audio|Servo|File|Task|GPRS|WiFi|Wire|TFT|GSM|SPI|SD|runShellCommandAsynchronously|analogWriteResolution|retrieveCallingNumber|printFirmwareVersion|analogReadResolution|sendDigitalPortPair|noListenOnLocalhost|readJoystickButton|setFirmwareVersion|readJoystickSwitch|scrollDisplayRight|getVoiceCallStatus|scrollDisplayLeft|writeMicroseconds|delayMicroseconds|beginTransmission|getSignalStrength|runAsynchronously|getAsynchronously|listenOnLocalhost|getCurrentCarrier|readAccelerometer|messageAvailable|sendDigitalPorts|lineFollowConfig|countryNameWrite|runShellCommand|readStringUntil|rewindDirectory|readTemperature|setClockDivider|readLightSensor|endTransmission|analogReference|detachInterrupt|countryNameRead|attachInterrupt|encryptionType|readBytesUntil|robotNameWrite|readMicrophone|robotNameRead|cityNameWrite|userNameWrite|readJoystickY|readJoystickX|mouseReleased|openNextFile|scanNetworks|noInterrupts|digitalWrite|beginSpeaker|mousePressed|isActionDone|mouseDragged|displayLogos|noAutoscroll|addParameter|remoteNumber|getModifiers|keyboardRead|userNameRead|waitContinue|processInput|parseCommand|printVersion|readNetworks|writeMessage|blinkVersion|cityNameRead|readMessage|setDataMode|parsePacket|isListening|setBitOrder|beginPacket|isDirectory|motorsWrite|drawCompass|digitalRead|clearScreen|serialEvent|rightToLeft|setTextSize|leftToRight|requestFrom|keyReleased|compassRead|analogWrite|interrupts|WiFiServer|disconnect|playMelody|parseFloat|autoscroll|getPINUsed|setPINUsed|setTimeout|sendAnalog|readSlider|analogRead|beginWrite|createChar|motorsStop|keyPressed|tempoWrite|readButton|subnetMask|debugPrint|macAddress|writeGreen|randomSeed|attachGPRS|readString|sendString|remotePort|releaseAll|mouseMoved|background|getXChange|getYChange|answerCall|getResult|voiceCall|endPacket|constrain|getSocket|writeJSON|getButton|available|connected|findUntil|readBytes|exitValue|readGreen|writeBlue|startLoop|isPressed|sendSysex|pauseMode|gatewayIP|setCursor|getOemKey|tuneWrite|noDisplay|loadImage|switchPIN|onRequest|onReceive|changePIN|playFile|noBuffer|parseInt|overflow|checkPIN|knobRead|beginTFT|bitClear|updateIR|bitWrite|position|writeRGB|highByte|writeRed|setSpeed|readBlue|noStroke|remoteIP|transfer|shutdown|hangCall|beginSMS|endWrite|attached|maintain|noCursor|checkReg|checkPUK|shiftOut|isValid|shiftIn|pulseIn|connect|println|localIP|pinMode|getIMEI|display|noBlink|process|getBand|running|beginSD|drawBMP|lowByte|setBand|release|bitRead|prepare|pointTo|readRed|setMode|noFill|remove|listen|stroke|detach|attach|noTone|exists|buffer|height|bitSet|circle|config|cursor|random|IRread|setDNS|endSMS|getKey|micros|millis|begin|print|write|ready|flush|width|isPIN|blink|clear|press|mkdir|rmdir|close|point|yield|image|BSSID|click|delay|read|text|move|peek|beep|rect|line|open|seek|fill|size|turn|stop|home|find|step|tone|sqrt|RSSI|SSID|end|bit|tan|cos|sin|pow|map|abs|max|min|get|run|put)\b/})}e.exports=i,i.displayName="arduino",i.aliases=[]},60274(e){"use strict";function t(e){e.languages.arff={comment:/%.*/,string:{pattern:/(["'])(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:/@(?:attribute|data|end|relation)\b/i,number:/\b\d+(?:\.\d+)?\b/,punctuation:/[{},]/}}e.exports=t,t.displayName="arff",t.aliases=[]},18738(e){"use strict";function t(e){!function(e){var t={pattern:/(^[ \t]*)\[(?!\[)(?:(["'$`])(?:(?!\2)[^\\]|\\.)*\2|\[(?:[^\[\]\\]|\\.)*\]|[^\[\]\\"'$`]|\\.)*\]/m,lookbehind:!0,inside:{quoted:{pattern:/([$`])(?:(?!\1)[^\\]|\\.)*\1/,inside:{punctuation:/^[$`]|[$`]$/}},interpreted:{pattern:/'(?:[^'\\]|\\.)*'/,inside:{punctuation:/^'|'$/}},string:/"(?:[^"\\]|\\.)*"/,variable:/\w+(?==)/,punctuation:/^\[|\]$|,/,operator:/=/,"attr-value":/(?!^\s+$).+/}},n=e.languages.asciidoc={"comment-block":{pattern:/^(\/{4,})(?:\r?\n|\r)(?:[\s\S]*(?:\r?\n|\r))??\1/m,alias:"comment"},table:{pattern:/^\|={3,}(?:(?:\r?\n|\r(?!\n)).*)*?(?:\r?\n|\r)\|={3,}$/m,inside:{specifiers:{pattern:/(?!\|)(?:(?:(?:\d+(?:\.\d+)?|\.\d+)[+*])?(?:[<^>](?:\.[<^>])?|\.[<^>])?[a-z]*)(?=\|)/,alias:"attr-value"},punctuation:{pattern:/(^|[^\\])[|!]=*/,lookbehind:!0}}},"passthrough-block":{pattern:/^(\+{4,})(?:\r?\n|\r)(?:[\s\S]*(?:\r?\n|\r))??\1$/m,inside:{punctuation:/^\++|\++$/}},"literal-block":{pattern:/^(-{4,}|\.{4,})(?:\r?\n|\r)(?:[\s\S]*(?:\r?\n|\r))??\1$/m,inside:{punctuation:/^(?:-+|\.+)|(?:-+|\.+)$/}},"other-block":{pattern:/^(--|\*{4,}|_{4,}|={4,})(?:\r?\n|\r)(?:[\s\S]*(?:\r?\n|\r))??\1$/m,inside:{punctuation:/^(?:-+|\*+|_+|=+)|(?:-+|\*+|_+|=+)$/}},"list-punctuation":{pattern:/(^[ \t]*)(?:-|\*{1,5}|\.{1,5}|(?:[a-z]|\d+)\.|[xvi]+\))(?= )/im,lookbehind:!0,alias:"punctuation"},"list-label":{pattern:/(^[ \t]*)[a-z\d].+(?::{2,4}|;;)(?=\s)/im,lookbehind:!0,alias:"symbol"},"indented-block":{pattern:/((\r?\n|\r)\2)([ \t]+)\S.*(?:(?:\r?\n|\r)\3.+)*(?=\2{2}|$)/,lookbehind:!0},comment:/^\/\/.*/m,title:{pattern:/^.+(?:\r?\n|\r)(?:={3,}|-{3,}|~{3,}|\^{3,}|\+{3,})$|^={1,5} .+|^\.(?![\s.]).*/m,alias:"important",inside:{punctuation:/^(?:\.|=+)|(?:=+|-+|~+|\^+|\++)$/}},"attribute-entry":{pattern:/^:[^:\r\n]+:(?: .*?(?: \+(?:\r?\n|\r).*?)*)?$/m,alias:"tag"},attributes:t,hr:{pattern:/^'{3,}$/m,alias:"punctuation"},"page-break":{pattern:/^<{3,}$/m,alias:"punctuation"},admonition:{pattern:/^(?:TIP|NOTE|IMPORTANT|WARNING|CAUTION):/m,alias:"keyword"},callout:[{pattern:/(^[ \t]*)/m,lookbehind:!0,alias:"symbol"},{pattern:/<\d+>/,alias:"symbol"}],macro:{pattern:/\b[a-z\d][a-z\d-]*::?(?:[^\s\[\]]*\[(?:[^\]\\"']|(["'])(?:(?!\1)[^\\]|\\.)*\1|\\.)*\])/,inside:{function:/^[a-z\d-]+(?=:)/,punctuation:/^::?/,attributes:{pattern:/(?:\[(?:[^\]\\"']|(["'])(?:(?!\1)[^\\]|\\.)*\1|\\.)*\])/,inside:t.inside}}},inline:{pattern:/(^|[^\\])(?:(?:\B\[(?:[^\]\\"']|(["'])(?:(?!\2)[^\\]|\\.)*\2|\\.)*\])?(?:\b_(?!\s)(?: _|[^_\\\r\n]|\\.)+(?:(?:\r?\n|\r)(?: _|[^_\\\r\n]|\\.)+)*_\b|\B``(?!\s).+?(?:(?:\r?\n|\r).+?)*''\B|\B`(?!\s)(?:[^`'\s]|\s+\S)+['`]\B|\B(['*+#])(?!\s)(?: \3|(?!\3)[^\\\r\n]|\\.)+(?:(?:\r?\n|\r)(?: \3|(?!\3)[^\\\r\n]|\\.)+)*\3\B)|(?:\[(?:[^\]\\"']|(["'])(?:(?!\4)[^\\]|\\.)*\4|\\.)*\])?(?:(__|\*\*|\+\+\+?|##|\$\$|[~^]).+?(?:(?:\r?\n|\r).+?)*\5|\{[^}\r\n]+\}|\[\[\[?.+?(?:(?:\r?\n|\r).+?)*\]?\]\]|<<.+?(?:(?:\r?\n|\r).+?)*>>|\(\(\(?.+?(?:(?:\r?\n|\r).+?)*\)?\)\)))/m,lookbehind:!0,inside:{attributes:t,url:{pattern:/^(?:\[\[\[?.+?\]?\]\]|<<.+?>>)$/,inside:{punctuation:/^(?:\[\[\[?|<<)|(?:\]\]\]?|>>)$/}},"attribute-ref":{pattern:/^\{.+\}$/,inside:{variable:{pattern:/(^\{)[a-z\d,+_-]+/,lookbehind:!0},operator:/^[=?!#%@$]|!(?=[:}])/,punctuation:/^\{|\}$|::?/}},italic:{pattern:/^(['_])[\s\S]+\1$/,inside:{punctuation:/^(?:''?|__?)|(?:''?|__?)$/}},bold:{pattern:/^\*[\s\S]+\*$/,inside:{punctuation:/^\*\*?|\*\*?$/}},punctuation:/^(?:``?|\+{1,3}|##?|\$\$|[~^]|\(\(\(?)|(?:''?|\+{1,3}|##?|\$\$|[~^`]|\)?\)\))$/}},replacement:{pattern:/\((?:C|TM|R)\)/,alias:"builtin"},entity:/&#?[\da-z]{1,8};/i,"line-continuation":{pattern:/(^| )\+$/m,lookbehind:!0,alias:"punctuation"}};function r(e){e=e.split(" ");for(var t={},r=0,i=e.length;r/i,alias:"tag",inside:{"page-directive":{pattern:/<%\s*@\s*(?:Assembly|Control|Implements|Import|Master(?:Type)?|OutputCache|Page|PreviousPageType|Reference|Register)?|%>/i,alias:"tag"},rest:e.languages.markup.tag.inside}},directive:{pattern:/<%.*%>/i,alias:"tag",inside:{directive:{pattern:/<%\s*?[$=%#:]{0,2}|%>/i,alias:"tag"},rest:e.languages.csharp}}}),e.languages.aspnet.tag.pattern=/<(?!%)\/?[^\s>\/]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|[^\s'">=]+))?)*\s*\/?>/i,e.languages.insertBefore("inside","punctuation",{directive:e.languages.aspnet.directive},e.languages.aspnet.tag.inside["attr-value"]),e.languages.insertBefore("aspnet","comment",{"asp-comment":{pattern:/<%--[\s\S]*?--%>/,alias:["asp","comment"]}}),e.languages.insertBefore("aspnet",e.languages.javascript?"script":"tag",{"asp-script":{pattern:/(]*>)[\s\S]*?(?=<\/script>)/i,lookbehind:!0,alias:["asp","script"],inside:e.languages.csharp||{}}})}e.exports=i,i.displayName="aspnet",i.aliases=[]},6681(e){"use strict";function t(e){e.languages.autohotkey={comment:[{pattern:/(^|\s);.*/,lookbehind:!0},{pattern:/(^[\t ]*)\/\*(?:[\r\n](?![ \t]*\*\/)|[^\r\n])*(?:[\r\n][ \t]*\*\/)?/m,lookbehind:!0,greedy:!0}],tag:{pattern:/^([ \t]*)[^\s,`":]+(?=:[ \t]*$)/m,lookbehind:!0},string:/"(?:[^"\n\r]|"")*"/m,variable:/%\w+%/,number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/\?|\/\/?=?|:=|\|[=|]?|&[=&]?|\+[=+]?|-[=-]?|\*[=*]?|<(?:<=?|>|=)?|>>?=?|[.^!=~]=?|\b(?:AND|NOT|OR)\b/,boolean:/\b(?:true|false)\b/,selector:/\b(?:AutoTrim|BlockInput|Break|Click|ClipWait|Continue|Control|ControlClick|ControlFocus|ControlGet|ControlGetFocus|ControlGetPos|ControlGetText|ControlMove|ControlSend|ControlSendRaw|ControlSetText|CoordMode|Critical|DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|DriveSpaceFree|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|FileDelete|FileEncoding|FileGetAttrib|FileGetShortcut|FileGetSize|FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|GuiControlGet|Hotkey|ImageSearch|IniDelete|IniRead|IniWrite|Input|InputBox|KeyWait|ListHotkeys|ListLines|ListVars|Loop|Menu|MouseClick|MouseClickDrag|MouseGetPos|MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|PixelSearch|PostMessage|Process|Progress|Random|RegDelete|RegRead|RegWrite|Reload|Repeat|Return|Run|RunAs|RunWait|Send|SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|SetBatchLines|SetCapslockState|SetControlDelay|SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|SetMouseDelay|SetNumlockState|SetRegView|SetScrollLockState|SetStoreCapslockMode|SetTimer|SetTitleMatchMode|SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|StringGetPos|StringLeft|StringLen|StringLower|StringMid|StringReplace|StringRight|StringSplit|StringTrimLeft|StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|Transform|TrayTip|URLDownloadToFile|WinActivate|WinActivateBottom|WinClose|WinGet|WinGetActiveStats|WinGetActiveTitle|WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinHide|WinKill|WinMaximize|WinMenuSelectItem|WinMinimize|WinMinimizeAll|WinMinimizeAllUndo|WinMove|WinRestore|WinSet|WinSetTitle|WinShow|WinWait|WinWaitActive|WinWaitClose|WinWaitNotActive)\b/i,constant:/\b(?:a_ahkpath|a_ahkversion|a_appdata|a_appdatacommon|a_autotrim|a_batchlines|a_caretx|a_carety|a_computername|a_controldelay|a_cursor|a_dd|a_ddd|a_dddd|a_defaultmousespeed|a_desktop|a_desktopcommon|a_detecthiddentext|a_detecthiddenwindows|a_endchar|a_eventinfo|a_exitreason|a_fileencoding|a_formatfloat|a_formatinteger|a_gui|a_guievent|a_guicontrol|a_guicontrolevent|a_guiheight|a_guiwidth|a_guix|a_guiy|a_hour|a_iconfile|a_iconhidden|a_iconnumber|a_icontip|a_index|a_ipaddress1|a_ipaddress2|a_ipaddress3|a_ipaddress4|a_is64bitos|a_isadmin|a_iscompiled|a_iscritical|a_ispaused|a_issuspended|a_isunicode|a_keydelay|a_language|a_lasterror|a_linefile|a_linenumber|a_loopfield|a_loopfileattrib|a_loopfiledir|a_loopfileext|a_loopfilefullpath|a_loopfilelongpath|a_loopfilename|a_loopfileshortname|a_loopfileshortpath|a_loopfilesize|a_loopfilesizekb|a_loopfilesizemb|a_loopfiletimeaccessed|a_loopfiletimecreated|a_loopfiletimemodified|a_loopreadline|a_loopregkey|a_loopregname|a_loopregsubkey|a_loopregtimemodified|a_loopregtype|a_mday|a_min|a_mm|a_mmm|a_mmmm|a_mon|a_mousedelay|a_msec|a_mydocuments|a_now|a_nowutc|a_numbatchlines|a_ostype|a_osversion|a_priorhotkey|a_priorkey|programfiles|a_programfiles|a_programs|a_programscommon|a_ptrsize|a_regview|a_screendpi|a_screenheight|a_screenwidth|a_scriptdir|a_scriptfullpath|a_scripthwnd|a_scriptname|a_sec|a_space|a_startmenu|a_startmenucommon|a_startup|a_startupcommon|a_stringcasesense|a_tab|a_temp|a_thisfunc|a_thishotkey|a_thislabel|a_thismenu|a_thismenuitem|a_thismenuitempos|a_tickcount|a_timeidle|a_timeidlephysical|a_timesincepriorhotkey|a_timesincethishotkey|a_titlematchmode|a_titlematchmodespeed|a_username|a_wday|a_windelay|a_windir|a_workingdir|a_yday|a_year|a_yweek|a_yyyy|clipboard|clipboardall|comspec|errorlevel)\b/i,builtin:/\b(?:abs|acos|asc|asin|atan|ceil|chr|class|comobjactive|comobjarray|comobjconnect|comobjcreate|comobjerror|comobjflags|comobjget|comobjquery|comobjtype|comobjvalue|cos|dllcall|exp|fileexist|Fileopen|floor|format|il_add|il_create|il_destroy|instr|substr|isfunc|islabel|IsObject|ln|log|lv_add|lv_delete|lv_deletecol|lv_getcount|lv_getnext|lv_gettext|lv_insert|lv_insertcol|lv_modify|lv_modifycol|lv_setimagelist|ltrim|rtrim|mod|onmessage|numget|numput|registercallback|regexmatch|regexreplace|round|sin|tan|sqrt|strlen|strreplace|sb_seticon|sb_setparts|sb_settext|strsplit|tv_add|tv_delete|tv_getchild|tv_getcount|tv_getnext|tv_get|tv_getparent|tv_getprev|tv_getselection|tv_gettext|tv_modify|varsetcapacity|winactive|winexist|__New|__Call|__Get|__Set)\b/i,symbol:/\b(?:alt|altdown|altup|appskey|backspace|browser_back|browser_favorites|browser_forward|browser_home|browser_refresh|browser_search|browser_stop|bs|capslock|ctrl|ctrlbreak|ctrldown|ctrlup|del|delete|down|end|enter|esc|escape|f1|f10|f11|f12|f13|f14|f15|f16|f17|f18|f19|f2|f20|f21|f22|f23|f24|f3|f4|f5|f6|f7|f8|f9|home|ins|insert|joy1|joy10|joy11|joy12|joy13|joy14|joy15|joy16|joy17|joy18|joy19|joy2|joy20|joy21|joy22|joy23|joy24|joy25|joy26|joy27|joy28|joy29|joy3|joy30|joy31|joy32|joy4|joy5|joy6|joy7|joy8|joy9|joyaxes|joybuttons|joyinfo|joyname|joypov|joyr|joyu|joyv|joyx|joyy|joyz|lalt|launch_app1|launch_app2|launch_mail|launch_media|lbutton|lcontrol|lctrl|left|lshift|lwin|lwindown|lwinup|mbutton|media_next|media_play_pause|media_prev|media_stop|numlock|numpad0|numpad1|numpad2|numpad3|numpad4|numpad5|numpad6|numpad7|numpad8|numpad9|numpadadd|numpadclear|numpaddel|numpaddiv|numpaddot|numpaddown|numpadend|numpadenter|numpadhome|numpadins|numpadleft|numpadmult|numpadpgdn|numpadpgup|numpadright|numpadsub|numpadup|pgdn|pgup|printscreen|ralt|rbutton|rcontrol|rctrl|right|rshift|rwin|rwindown|rwinup|scrolllock|shift|shiftdown|shiftup|space|tab|up|volume_down|volume_mute|volume_up|wheeldown|wheelleft|wheelright|wheelup|xbutton1|xbutton2)\b/i,important:/#\b(?:AllowSameLineComments|ClipboardTimeout|CommentFlag|DerefChar|ErrorStdOut|EscapeChar|HotkeyInterval|HotkeyModifierTimeout|Hotstring|If|IfTimeout|IfWinActive|IfWinExist|IfWinNotActive|IfWinNotExist|Include|IncludeAgain|InputLevel|InstallKeybdHook|InstallMouseHook|KeyHistory|MaxHotkeysPerInterval|MaxMem|MaxThreads|MaxThreadsBuffer|MaxThreadsPerHotkey|MenuMaskKey|NoEnv|NoTrayIcon|Persistent|SingleInstance|UseHook|Warn|WinActivateForce)\b/i,keyword:/\b(?:Abort|AboveNormal|Add|ahk_class|ahk_exe|ahk_group|ahk_id|ahk_pid|All|Alnum|Alpha|AltSubmit|AltTab|AltTabAndMenu|AltTabMenu|AltTabMenuDismiss|AlwaysOnTop|AutoSize|Background|BackgroundTrans|BelowNormal|between|BitAnd|BitNot|BitOr|BitShiftLeft|BitShiftRight|BitXOr|Bold|Border|Button|ByRef|Checkbox|Checked|CheckedGray|Choose|ChooseString|Close|Color|ComboBox|Contains|ControlList|Count|Date|DateTime|Days|DDL|Default|DeleteAll|Delimiter|Deref|Destroy|Digit|Disable|Disabled|DropDownList|Edit|Eject|Else|Enable|Enabled|Error|Exist|Expand|ExStyle|FileSystem|First|Flash|Float|FloatFast|Focus|Font|for|global|Grid|Group|GroupBox|GuiClose|GuiContextMenu|GuiDropFiles|GuiEscape|GuiSize|Hdr|Hidden|Hide|High|HKCC|HKCR|HKCU|HKEY_CLASSES_ROOT|HKEY_CURRENT_CONFIG|HKEY_CURRENT_USER|HKEY_LOCAL_MACHINE|HKEY_USERS|HKLM|HKU|Hours|HScroll|Icon|IconSmall|ID|IDLast|If|IfEqual|IfExist|IfGreater|IfGreaterOrEqual|IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|IfWinNotExist|Ignore|ImageList|in|Integer|IntegerFast|Interrupt|is|italic|Join|Label|LastFound|LastFoundExist|Limit|Lines|List|ListBox|ListView|local|Lock|Logoff|Low|Lower|Lowercase|MainWindow|Margin|Maximize|MaximizeBox|MaxSize|Minimize|MinimizeBox|MinMax|MinSize|Minutes|MonthCal|Mouse|Move|Multi|NA|No|NoActivate|NoDefault|NoHide|NoIcon|NoMainWindow|norm|Normal|NoSort|NoSortHdr|NoStandard|Not|NoTab|NoTimers|Number|Off|Ok|On|OwnDialogs|Owner|Parse|Password|Picture|Pixel|Pos|Pow|Priority|ProcessName|Radio|Range|Read|ReadOnly|Realtime|Redraw|REG_BINARY|REG_DWORD|REG_EXPAND_SZ|REG_MULTI_SZ|REG_SZ|Region|Relative|Rename|Report|Resize|Restore|Retry|RGB|Screen|Seconds|Section|Serial|SetLabel|ShiftAltTab|Show|Single|Slider|SortDesc|Standard|static|Status|StatusBar|StatusCD|strike|Style|Submit|SysMenu|Tab2|TabStop|Text|Theme|Tile|ToggleCheck|ToggleEnable|ToolWindow|Top|Topmost|TransColor|Transparent|Tray|TreeView|TryAgain|Throw|Try|Catch|Finally|Type|UnCheck|underline|Unicode|Unlock|Until|UpDown|Upper|Uppercase|UseErrorLevel|Vis|VisFirst|Visible|VScroll|Wait|WaitClose|WantCtrlA|WantF2|WantReturn|While|Wrap|Xdigit|xm|xp|xs|Yes|ym|yp|ys)\b/i,function:/[^(); \t,\n+*\-=?>:\\\/<&%\[\]]+(?=\()/m,punctuation:/[{}[\]():,]/}}e.exports=t,t.displayName="autohotkey",t.aliases=[]},53358(e){"use strict";function t(e){e.languages.autoit={comment:[/;.*/,{pattern:/(^[\t ]*)#(?:comments-start|cs)[\s\S]*?^[ \t]*#(?:comments-end|ce)/m,lookbehind:!0}],url:{pattern:/(^[\t ]*#include\s+)(?:<[^\r\n>]+>|"[^\r\n"]+")/m,lookbehind:!0},string:{pattern:/(["'])(?:\1\1|(?!\1)[^\r\n])*\1/,greedy:!0,inside:{variable:/([%$@])\w+\1/}},directive:{pattern:/(^[\t ]*)#\w+/m,lookbehind:!0,alias:"keyword"},function:/\b\w+(?=\()/,variable:/[$@]\w+/,keyword:/\b(?:Case|Const|Continue(?:Case|Loop)|Default|Dim|Do|Else(?:If)?|End(?:Func|If|Select|Switch|With)|Enum|Exit(?:Loop)?|For|Func|Global|If|In|Local|Next|Null|ReDim|Select|Static|Step|Switch|Then|To|Until|Volatile|WEnd|While|With)\b/i,number:/\b(?:0x[\da-f]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)\b/i,boolean:/\b(?:True|False)\b/i,operator:/<[=>]?|[-+*\/=&>]=?|[?^]|\b(?:And|Or|Not)\b/i,punctuation:/[\[\]().,:]/}}e.exports=t,t.displayName="autoit",t.aliases=[]},6979(e){"use strict";function t(e){!function(e){var t="\\b(?:BASH|BASHOPTS|BASH_ALIASES|BASH_ARGC|BASH_ARGV|BASH_CMDS|BASH_COMPLETION_COMPAT_DIR|BASH_LINENO|BASH_REMATCH|BASH_SOURCE|BASH_VERSINFO|BASH_VERSION|COLORTERM|COLUMNS|COMP_WORDBREAKS|DBUS_SESSION_BUS_ADDRESS|DEFAULTS_PATH|DESKTOP_SESSION|DIRSTACK|DISPLAY|EUID|GDMSESSION|GDM_LANG|GNOME_KEYRING_CONTROL|GNOME_KEYRING_PID|GPG_AGENT_INFO|GROUPS|HISTCONTROL|HISTFILE|HISTFILESIZE|HISTSIZE|HOME|HOSTNAME|HOSTTYPE|IFS|INSTANCE|JOB|LANG|LANGUAGE|LC_ADDRESS|LC_ALL|LC_IDENTIFICATION|LC_MEASUREMENT|LC_MONETARY|LC_NAME|LC_NUMERIC|LC_PAPER|LC_TELEPHONE|LC_TIME|LESSCLOSE|LESSOPEN|LINES|LOGNAME|LS_COLORS|MACHTYPE|MAILCHECK|MANDATORY_PATH|NO_AT_BRIDGE|OLDPWD|OPTERR|OPTIND|ORBIT_SOCKETDIR|OSTYPE|PAPERSIZE|PATH|PIPESTATUS|PPID|PS1|PS2|PS3|PS4|PWD|RANDOM|REPLY|SECONDS|SELINUX_INIT|SESSION|SESSIONTYPE|SESSION_MANAGER|SHELL|SHELLOPTS|SHLVL|SSH_AUTH_SOCK|TERM|UID|UPSTART_EVENTS|UPSTART_INSTANCE|UPSTART_JOB|UPSTART_SESSION|USER|WINDOWID|XAUTHORITY|XDG_CONFIG_DIRS|XDG_CURRENT_DESKTOP|XDG_DATA_DIRS|XDG_GREETER_DATA_DIR|XDG_MENU_PREFIX|XDG_RUNTIME_DIR|XDG_SEAT|XDG_SEAT_PATH|XDG_SESSION_DESKTOP|XDG_SESSION_ID|XDG_SESSION_PATH|XDG_SESSION_TYPE|XDG_VTNR|XMODIFIERS)\\b",n={pattern:/(^(["']?)\w+\2)[ \t]+\S.*/,lookbehind:!0,alias:"punctuation",inside:null},r={bash:n,environment:{pattern:RegExp("\\$"+t),alias:"constant"},variable:[{pattern:/\$?\(\([\s\S]+?\)\)/,greedy:!0,inside:{variable:[{pattern:/(^\$\(\([\s\S]+)\)\)/,lookbehind:!0},/^\$\(\(/],number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/--|\+\+|\*\*=?|<<=?|>>=?|&&|\|\||[=!+\-*/%<>^&|]=?|[?~:]/,punctuation:/\(\(?|\)\)?|,|;/}},{pattern:/\$\((?:\([^)]+\)|[^()])+\)|`[^`]+`/,greedy:!0,inside:{variable:/^\$\(|^`|\)$|`$/}},{pattern:/\$\{[^}]+\}/,greedy:!0,inside:{operator:/:[-=?+]?|[!\/]|##?|%%?|\^\^?|,,?/,punctuation:/[\[\]]/,environment:{pattern:RegExp("(\\{)"+t),lookbehind:!0,alias:"constant"}}},/\$(?:\w+|[#?*!@$])/],entity:/\\(?:[abceEfnrtv\\"]|O?[0-7]{1,3}|x[0-9a-fA-F]{1,2}|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8})/};e.languages.bash={shebang:{pattern:/^#!\s*\/.*/,alias:"important"},comment:{pattern:/(^|[^"{\\$])#.*/,lookbehind:!0},"function-name":[{pattern:/(\bfunction\s+)[\w-]+(?=(?:\s*\(?:\s*\))?\s*\{)/,lookbehind:!0,alias:"function"},{pattern:/\b[\w-]+(?=\s*\(\s*\)\s*\{)/,alias:"function"}],"for-or-select":{pattern:/(\b(?:for|select)\s+)\w+(?=\s+in\s)/,alias:"variable",lookbehind:!0},"assign-left":{pattern:/(^|[\s;|&]|[<>]\()\w+(?=\+?=)/,inside:{environment:{pattern:RegExp("(^|[\\s;|&]|[<>]\\()"+t),lookbehind:!0,alias:"constant"}},alias:"variable",lookbehind:!0},string:[{pattern:/((?:^|[^<])<<-?\s*)(\w+)\s[\s\S]*?(?:\r?\n|\r)\2/,lookbehind:!0,greedy:!0,inside:r},{pattern:/((?:^|[^<])<<-?\s*)(["'])(\w+)\2\s[\s\S]*?(?:\r?\n|\r)\3/,lookbehind:!0,greedy:!0,inside:{bash:n}},{pattern:/(^|[^\\](?:\\\\)*)"(?:\\[\s\S]|\$\([^)]+\)|\$(?!\()|`[^`]+`|[^"\\`$])*"/,lookbehind:!0,greedy:!0,inside:r},{pattern:/(^|[^$\\])'[^']*'/,lookbehind:!0,greedy:!0},{pattern:/\$'(?:[^'\\]|\\[\s\S])*'/,greedy:!0,inside:{entity:r.entity}}],environment:{pattern:RegExp("\\$?"+t),alias:"constant"},variable:r.variable,function:{pattern:/(^|[\s;|&]|[<>]\()(?:add|apropos|apt|aptitude|apt-cache|apt-get|aspell|automysqlbackup|awk|basename|bash|bc|bconsole|bg|bzip2|cal|cat|cfdisk|chgrp|chkconfig|chmod|chown|chroot|cksum|clear|cmp|column|comm|composer|cp|cron|crontab|csplit|curl|cut|date|dc|dd|ddrescue|debootstrap|df|diff|diff3|dig|dir|dircolors|dirname|dirs|dmesg|du|egrep|eject|env|ethtool|expand|expect|expr|fdformat|fdisk|fg|fgrep|file|find|fmt|fold|format|free|fsck|ftp|fuser|gawk|git|gparted|grep|groupadd|groupdel|groupmod|groups|grub-mkconfig|gzip|halt|head|hg|history|host|hostname|htop|iconv|id|ifconfig|ifdown|ifup|import|install|ip|jobs|join|kill|killall|less|link|ln|locate|logname|logrotate|look|lpc|lpr|lprint|lprintd|lprintq|lprm|ls|lsof|lynx|make|man|mc|mdadm|mkconfig|mkdir|mke2fs|mkfifo|mkfs|mkisofs|mknod|mkswap|mmv|more|most|mount|mtools|mtr|mutt|mv|nano|nc|netstat|nice|nl|nohup|notify-send|npm|nslookup|op|open|parted|passwd|paste|pathchk|ping|pkill|pnpm|popd|pr|printcap|printenv|ps|pushd|pv|quota|quotacheck|quotactl|ram|rar|rcp|reboot|remsync|rename|renice|rev|rm|rmdir|rpm|rsync|scp|screen|sdiff|sed|sendmail|seq|service|sftp|sh|shellcheck|shuf|shutdown|sleep|slocate|sort|split|ssh|stat|strace|su|sudo|sum|suspend|swapon|sync|tac|tail|tar|tee|time|timeout|top|touch|tr|traceroute|tsort|tty|umount|uname|unexpand|uniq|units|unrar|unshar|unzip|update-grub|uptime|useradd|userdel|usermod|users|uudecode|uuencode|v|vdir|vi|vim|virsh|vmstat|wait|watch|wc|wget|whereis|which|who|whoami|write|xargs|xdg-open|yarn|yes|zenity|zip|zsh|zypper)(?=$|[)\s;|&])/,lookbehind:!0},keyword:{pattern:/(^|[\s;|&]|[<>]\()(?:if|then|else|elif|fi|for|while|in|case|esac|function|select|do|done|until)(?=$|[)\s;|&])/,lookbehind:!0},builtin:{pattern:/(^|[\s;|&]|[<>]\()(?:\.|:|break|cd|continue|eval|exec|exit|export|getopts|hash|pwd|readonly|return|shift|test|times|trap|umask|unset|alias|bind|builtin|caller|command|declare|echo|enable|help|let|local|logout|mapfile|printf|read|readarray|source|type|typeset|ulimit|unalias|set|shopt)(?=$|[)\s;|&])/,lookbehind:!0,alias:"class-name"},boolean:{pattern:/(^|[\s;|&]|[<>]\()(?:true|false)(?=$|[)\s;|&])/,lookbehind:!0},"file-descriptor":{pattern:/\B&\d\b/,alias:"important"},operator:{pattern:/\d?<>|>\||\+=|=[=~]?|!=?|<<[<-]?|[&\d]?>>|\d[<>]&?|[<>][&=]?|&[>&]?|\|[&|]?/,inside:{"file-descriptor":{pattern:/^\d/,alias:"important"}}},punctuation:/\$?\(\(?|\)\)?|\.\.|[{}[\];\\]/,number:{pattern:/(^|\s)(?:[1-9]\d*|0)(?:[.,]\d+)?\b/,lookbehind:!0}},n.inside=e.languages.bash;for(var i=["comment","function-name","for-or-select","assign-left","string","environment","function","keyword","builtin","boolean","file-descriptor","operator","punctuation","number"],a=r.variable[1].inside,o=0;o?^\w +\-.])*"/i,greedy:!0},number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:E[+-]?\d+)?/i,keyword:/\b(?:AS|BEEP|BLOAD|BSAVE|CALL(?: ABSOLUTE)?|CASE|CHAIN|CHDIR|CLEAR|CLOSE|CLS|COM|COMMON|CONST|DATA|DECLARE|DEF(?: FN| SEG|DBL|INT|LNG|SNG|STR)|DIM|DO|DOUBLE|ELSE|ELSEIF|END|ENVIRON|ERASE|ERROR|EXIT|FIELD|FILES|FOR|FUNCTION|GET|GOSUB|GOTO|IF|INPUT|INTEGER|IOCTL|KEY|KILL|LINE INPUT|LOCATE|LOCK|LONG|LOOP|LSET|MKDIR|NAME|NEXT|OFF|ON(?: COM| ERROR| KEY| TIMER)?|OPEN|OPTION BASE|OUT|POKE|PUT|READ|REDIM|REM|RESTORE|RESUME|RETURN|RMDIR|RSET|RUN|SHARED|SINGLE|SELECT CASE|SHELL|SLEEP|STATIC|STEP|STOP|STRING|SUB|SWAP|SYSTEM|THEN|TIMER|TO|TROFF|TRON|TYPE|UNLOCK|UNTIL|USING|VIEW PRINT|WAIT|WEND|WHILE|WRITE)(?:\$|\b)/i,function:/\b(?:ABS|ACCESS|ACOS|ANGLE|AREA|ARITHMETIC|ARRAY|ASIN|ASK|AT|ATN|BASE|BEGIN|BREAK|CAUSE|CEIL|CHR|CLIP|COLLATE|COLOR|CON|COS|COSH|COT|CSC|DATE|DATUM|DEBUG|DECIMAL|DEF|DEG|DEGREES|DELETE|DET|DEVICE|DISPLAY|DOT|ELAPSED|EPS|ERASABLE|EXLINE|EXP|EXTERNAL|EXTYPE|FILETYPE|FIXED|FP|GO|GRAPH|HANDLER|IDN|IMAGE|IN|INT|INTERNAL|IP|IS|KEYED|LBOUND|LCASE|LEFT|LEN|LENGTH|LET|LINE|LINES|LOG|LOG10|LOG2|LTRIM|MARGIN|MAT|MAX|MAXNUM|MID|MIN|MISSING|MOD|NATIVE|NUL|NUMERIC|OF|OPTION|ORD|ORGANIZATION|OUTIN|OUTPUT|PI|POINT|POINTER|POINTS|POS|PRINT|PROGRAM|PROMPT|RAD|RADIANS|RANDOMIZE|RECORD|RECSIZE|RECTYPE|RELATIVE|REMAINDER|REPEAT|REST|RETRY|REWRITE|RIGHT|RND|ROUND|RTRIM|SAME|SEC|SELECT|SEQUENTIAL|SET|SETTER|SGN|SIN|SINH|SIZE|SKIP|SQR|STANDARD|STATUS|STR|STREAM|STYLE|TAB|TAN|TANH|TEMPLATE|TEXT|THERE|TIME|TIMEOUT|TRACE|TRANSFORM|TRUNCATE|UBOUND|UCASE|USE|VAL|VARIABLE|VIEWPORT|WHEN|WINDOW|WITH|ZER|ZONEWIDTH)(?:\$|\b)/i,operator:/<[=>]?|>=?|[+\-*\/^=&]|\b(?:AND|EQV|IMP|NOT|OR|XOR)\b/i,punctuation:/[,;:()]/}}e.exports=t,t.displayName="basic",t.aliases=[]},94781(e){"use strict";function t(e){var t,n,r,i,a;n=/%%?[~:\w]+%?|!\S+!/,r={pattern:/\/[a-z?]+(?=[ :]|$):?|-[a-z]\b|--[a-z-]+\b/im,alias:"attr-name",inside:{punctuation:/:/}},i=/"(?:[\\"]"|[^"])*"(?!")/,a=/(?:\b|-)\d+\b/,(t=e).languages.batch={comment:[/^::.*/m,{pattern:/((?:^|[&(])[ \t]*)rem\b(?:[^^&)\r\n]|\^(?:\r\n|[\s\S]))*/im,lookbehind:!0}],label:{pattern:/^:.*/m,alias:"property"},command:[{pattern:/((?:^|[&(])[ \t]*)for(?: \/[a-z?](?:[ :](?:"[^"]*"|[^\s"/]\S*))?)* \S+ in \([^)]+\) do/im,lookbehind:!0,inside:{keyword:/^for\b|\b(?:in|do)\b/i,string:i,parameter:r,variable:n,number:a,punctuation:/[()',]/}},{pattern:/((?:^|[&(])[ \t]*)if(?: \/[a-z?](?:[ :](?:"[^"]*"|[^\s"/]\S*))?)* (?:not )?(?:cmdextversion \d+|defined \w+|errorlevel \d+|exist \S+|(?:"[^"]*"|(?!")(?:(?!==)\S)+)?(?:==| (?:equ|neq|lss|leq|gtr|geq) )(?:"[^"]*"|[^\s"]\S*))/im,lookbehind:!0,inside:{keyword:/^if\b|\b(?:not|cmdextversion|defined|errorlevel|exist)\b/i,string:i,parameter:r,variable:n,number:a,operator:/\^|==|\b(?:equ|neq|lss|leq|gtr|geq)\b/i}},{pattern:/((?:^|[&()])[ \t]*)else\b/im,lookbehind:!0,inside:{keyword:/^else\b/i}},{pattern:/((?:^|[&(])[ \t]*)set(?: \/[a-z](?:[ :](?:"[^"]*"|[^\s"/]\S*))?)* (?:[^^&)\r\n]|\^(?:\r\n|[\s\S]))*/im,lookbehind:!0,inside:{keyword:/^set\b/i,string:i,parameter:r,variable:[n,/\w+(?=(?:[*\/%+\-&^|]|<<|>>)?=)/],number:a,operator:/[*\/%+\-&^|]=?|<<=?|>>=?|[!~_=]/,punctuation:/[()',]/}},{pattern:/((?:^|[&(])[ \t]*@?)\w+\b(?:"(?:[\\"]"|[^"])*"(?!")|[^"^&)\r\n]|\^(?:\r\n|[\s\S]))*/im,lookbehind:!0,inside:{keyword:/^\w+\b/i,string:i,parameter:r,label:{pattern:/(^\s*):\S+/m,lookbehind:!0,alias:"property"},variable:n,number:a,operator:/\^/}}],operator:/[&@]/,punctuation:/[()']/}}e.exports=t,t.displayName="batch",t.aliases=[]},62260(e){"use strict";function t(e){e.languages.bbcode={tag:{pattern:/\[\/?[^\s=\]]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'"\]=]+))?(?:\s+[^\s=\]]+\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'"\]=]+))*\s*\]/,inside:{tag:{pattern:/^\[\/?[^\s=\]]+/,inside:{punctuation:/^\[\/?/}},"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'"\]=]+)/i,inside:{punctuation:[/^=/,{pattern:/^(\s*)["']|["']$/,lookbehind:!0}]}},punctuation:/\]/,"attr-name":/[^\s=\]]+/}}},e.languages.shortcode=e.languages.bbcode}e.exports=t,t.displayName="bbcode",t.aliases=["shortcode"]},59258(e){"use strict";function t(e){e.languages.birb=e.languages.extend("clike",{string:{pattern:/r?("|')(?:\\.|(?!\1)[^\\])*\1/,greedy:!0},"class-name":[/\b[A-Z](?:[\d_]*[a-zA-Z]\w*)?\b/,/\b[A-Z]\w*(?=\s+\w+\s*[;,=()])/],keyword:/\b(?:assert|break|case|class|const|default|else|enum|final|follows|for|grab|if|nest|next|new|noSeeb|return|static|switch|throw|var|void|while)\b/,operator:/\+\+|--|&&|\|\||<<=?|>>=?|~(?:\/=?)?|[+\-*\/%&^|=!<>]=?|\?|:/,variable:/\b[a-z_]\w*\b/}),e.languages.insertBefore("birb","function",{metadata:{pattern:/<\w+>/,greedy:!0,alias:"symbol"}})}e.exports=t,t.displayName="birb",t.aliases=[]},62890(e,t,n){"use strict";var r=n(65806);function i(e){e.register(r),e.languages.bison=e.languages.extend("c",{}),e.languages.insertBefore("bison","comment",{bison:{pattern:/^(?:[^%]|%(?!%))*%%[\s\S]*?%%/,inside:{c:{pattern:/%\{[\s\S]*?%\}|\{(?:\{[^}]*\}|[^{}])*\}/,inside:{delimiter:{pattern:/^%?\{|%?\}$/,alias:"punctuation"},"bison-variable":{pattern:/[$@](?:<[^\s>]+>)?[\w$]+/,alias:"variable",inside:{punctuation:/<|>/}},rest:e.languages.c}},comment:e.languages.c.comment,string:e.languages.c.string,property:/\S+(?=:)/,keyword:/%\w+/,number:{pattern:/(^|[^@])\b(?:0x[\da-f]+|\d+)/i,lookbehind:!0},punctuation:/%[%?]|[|:;\[\]<>]/}}})}e.exports=i,i.displayName="bison",i.aliases=[]},15958(e){"use strict";function t(e){e.languages.bnf={string:{pattern:/"[^\r\n"]*"|'[^\r\n']*'/},definition:{pattern:/<[^<>\r\n\t]+>(?=\s*::=)/,alias:["rule","keyword"],inside:{punctuation:/^<|>$/}},rule:{pattern:/<[^<>\r\n\t]+>/,inside:{punctuation:/^<|>$/}},operator:/::=|[|()[\]{}*+?]|\.{3}/},e.languages.rbnf=e.languages.bnf}e.exports=t,t.displayName="bnf",t.aliases=["rbnf"]},61321(e){"use strict";function t(e){e.languages.brainfuck={pointer:{pattern:/<|>/,alias:"keyword"},increment:{pattern:/\+/,alias:"inserted"},decrement:{pattern:/-/,alias:"deleted"},branching:{pattern:/\[|\]/,alias:"important"},operator:/[.,]/,comment:/\S+/}}e.exports=t,t.displayName="brainfuck",t.aliases=[]},77856(e){"use strict";function t(e){e.languages.brightscript={comment:/(?:\brem|').*/i,"directive-statement":{pattern:/(^[\t ]*)#(?:const|else(?:[\t ]+if)?|end[\t ]+if|error|if).*/im,lookbehind:!0,alias:"property",inside:{"error-message":{pattern:/(^#error).+/,lookbehind:!0},directive:{pattern:/^#(?:const|else(?:[\t ]+if)?|end[\t ]+if|error|if)/,alias:"keyword"},expression:{pattern:/[\s\S]+/,inside:null}}},property:{pattern:/([\r\n{,][\t ]*)(?:(?!\d)\w+|"(?:[^"\r\n]|"")*"(?!"))(?=[ \t]*:)/,lookbehind:!0,greedy:!0},string:{pattern:/"(?:[^"\r\n]|"")*"(?!")/,greedy:!0},"class-name":{pattern:/(\bAs[\t ]+)\w+/i,lookbehind:!0},keyword:/\b(?:As|Dim|Each|Else|Elseif|End|Exit|For|Function|Goto|If|In|Print|Return|Step|Stop|Sub|Then|To|While)\b/i,boolean:/\b(?:true|false)\b/i,function:/\b(?!\d)\w+(?=[\t ]*\()/i,number:/(?:\b\d+(?:\.\d+)?(?:[ed][+-]\d+)?|&h[a-f\d]+)\b[%&!#]?/i,operator:/--|\+\+|>>=?|<<=?|<>|[-+*/\\<>]=?|[:^=?]|\b(?:and|mod|not|or)\b/i,punctuation:/[.,;()[\]{}]/,constant:/\b(?:LINE_NUM)\b/i},e.languages.brightscript["directive-statement"].inside.expression.inside=e.languages.brightscript}e.exports=t,t.displayName="brightscript",t.aliases=[]},90741(e){"use strict";function t(e){e.languages.bro={comment:{pattern:/(^|[^\\$])#.*/,lookbehind:!0,inside:{italic:/\b(?:TODO|FIXME|XXX)\b/}},string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},boolean:/\b[TF]\b/,function:{pattern:/(?:function|hook|event) \w+(?:::\w+)?/,inside:{keyword:/^(?:function|hook|event)/}},variable:{pattern:/(?:global|local) \w+/i,inside:{keyword:/(?:global|local)/}},builtin:/(?:@(?:load(?:-(?:sigs|plugin))?|unload|prefixes|ifn?def|else|(?:end)?if|DIR|FILENAME))|(?:&?(?:redef|priority|log|optional|default|add_func|delete_func|expire_func|read_expire|write_expire|create_expire|synchronized|persistent|rotate_interval|rotate_size|encrypt|raw_output|mergeable|group|error_handler|type_column))/,constant:{pattern:/const \w+/i,inside:{keyword:/const/}},keyword:/\b(?:break|next|continue|alarm|using|of|add|delete|export|print|return|schedule|when|timeout|addr|any|bool|count|double|enum|file|int|interval|pattern|opaque|port|record|set|string|subnet|table|time|vector|for|if|else|in|module|function)\b/,operator:/--?|\+\+?|!=?=?|<=?|>=?|==?=?|&&|\|\|?|\?|\*|\/|~|\^|%/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,punctuation:/[{}[\];(),.:]/}}e.exports=t,t.displayName="bro",t.aliases=[]},83410(e){"use strict";function t(e){e.languages.bsl={comment:/\/\/.*/,string:[{pattern:/"(?:[^"]|"")*"(?!")/,greedy:!0},{pattern:/'(?:[^'\r\n\\]|\\.)*'/}],keyword:[{pattern:/(^|[^\w\u0400-\u0484\u0487-\u052f\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f])(?:пока|для|новый|прервать|попытка|исключение|вызватьисключение|иначе|конецпопытки|неопределено|функция|перем|возврат|конецфункции|если|иначеесли|процедура|конецпроцедуры|тогда|знач|экспорт|конецесли|из|каждого|истина|ложь|по|цикл|конеццикла|выполнить)(?![\w\u0400-\u0484\u0487-\u052f\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f])/i,lookbehind:!0},{pattern:/\b(?:while|for|new|break|try|except|raise|else|endtry|undefined|function|var|return|endfunction|null|if|elseif|procedure|endprocedure|then|val|export|endif|in|each|true|false|to|do|enddo|execute)\b/i}],number:{pattern:/(^(?=\d)|[^\w\u0400-\u0484\u0487-\u052f\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f])(?:\d+(?:\.\d*)?|\.\d+)(?:E[+-]?\d+)?/i,lookbehind:!0},operator:[/[<>+\-*/]=?|[%=]/,{pattern:/(^|[^\w\u0400-\u0484\u0487-\u052f\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f])(?:и|или|не)(?![\w\u0400-\u0484\u0487-\u052f\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua69f\ufe2e\ufe2f])/i,lookbehind:!0},{pattern:/\b(?:and|or|not)\b/i}],punctuation:/\(\.|\.\)|[()\[\]:;,.]/,directive:[{pattern:/^(\s*)&.*/m,lookbehind:!0,alias:"important"},{pattern:/^\s*#.*/gm,alias:"important"}]},e.languages.oscript=e.languages.bsl}e.exports=t,t.displayName="bsl",t.aliases=[]},65806(e){"use strict";function t(e){e.languages.c=e.languages.extend("clike",{comment:{pattern:/\/\/(?:[^\r\n\\]|\\(?:\r\n?|\n|(?![\r\n])))*|\/\*[\s\S]*?(?:\*\/|$)/,greedy:!0},"class-name":{pattern:/(\b(?:enum|struct)\s+(?:__attribute__\s*\(\([\s\S]*?\)\)\s*)?)\w+|\b[a-z]\w*_t\b/,lookbehind:!0},keyword:/\b(?:__attribute__|_Alignas|_Alignof|_Atomic|_Bool|_Complex|_Generic|_Imaginary|_Noreturn|_Static_assert|_Thread_local|asm|typeof|inline|auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)\b/,function:/\b[a-z_]\w*(?=\s*\()/i,number:/(?:\b0x(?:[\da-f]+(?:\.[\da-f]*)?|\.[\da-f]+)(?:p[+-]?\d+)?|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?)[ful]{0,4}/i,operator:/>>=?|<<=?|->|([-+&|:])\1|[?:~]|[-+*/%&|^!=<>]=?/}),e.languages.insertBefore("c","string",{macro:{pattern:/(^[\t ]*)#\s*[a-z](?:[^\r\n\\/]|\/(?!\*)|\/\*(?:[^*]|\*(?!\/))*\*\/|\\(?:\r\n|[\s\S]))*/im,lookbehind:!0,greedy:!0,alias:"property",inside:{string:[{pattern:/^(#\s*include\s*)<[^>]+>/,lookbehind:!0},e.languages.c.string],comment:e.languages.c.comment,"macro-name":[{pattern:/(^#\s*define\s+)\w+\b(?!\()/i,lookbehind:!0},{pattern:/(^#\s*define\s+)\w+\b(?=\()/i,lookbehind:!0,alias:"function"}],directive:{pattern:/^(#\s*)[a-z]+/,lookbehind:!0,alias:"keyword"},"directive-hash":/^#/,punctuation:/##|\\(?=[\r\n])/,expression:{pattern:/\S[\s\S]*/,inside:e.languages.c}}},constant:/\b(?:__FILE__|__LINE__|__DATE__|__TIME__|__TIMESTAMP__|__func__|EOF|NULL|SEEK_CUR|SEEK_END|SEEK_SET|stdin|stdout|stderr)\b/}),delete e.languages.c.boolean}e.exports=t,t.displayName="c",t.aliases=[]},33039(e){"use strict";function t(e){e.languages.cfscript=e.languages.extend("clike",{comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,inside:{annotation:{pattern:/(?:^|[^.])@[\w\.]+/,alias:"punctuation"}}},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],keyword:/\b(?:abstract|break|catch|component|continue|default|do|else|extends|final|finally|for|function|if|in|include|package|private|property|public|remote|required|rethrow|return|static|switch|throw|try|var|while|xml)\b(?!\s*=)/,operator:[/\+\+|--|&&|\|\||::|=>|[!=]==|<=?|>=?|[-+*/%&|^!=<>]=?|\?(?:\.|:)?|[?:]/,/\b(?:and|contains|eq|equal|eqv|gt|gte|imp|is|lt|lte|mod|not|or|xor)\b/],scope:{pattern:/\b(?:application|arguments|cgi|client|cookie|local|session|super|this|variables)\b/,alias:"global"},type:{pattern:/\b(?:any|array|binary|boolean|date|guid|numeric|query|string|struct|uuid|void|xml)\b/,alias:"builtin"}}),e.languages.insertBefore("cfscript","keyword",{"function-variable":{pattern:/[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"}}),delete e.languages.cfscript["class-name"],e.languages.cfc=e.languages.cfscript}e.exports=t,t.displayName="cfscript",t.aliases=[]},85082(e,t,n){"use strict";var r=n(80096);function i(e){e.register(r),e.languages.chaiscript=e.languages.extend("clike",{string:{pattern:/(^|[^\\])'(?:[^'\\]|\\[\s\S])*'/,lookbehind:!0,greedy:!0},"class-name":[{pattern:/(\bclass\s+)\w+/,lookbehind:!0},{pattern:/(\b(?:attr|def)\s+)\w+(?=\s*::)/,lookbehind:!0}],keyword:/\b(?:attr|auto|break|case|catch|class|continue|def|default|else|finally|for|fun|global|if|return|switch|this|try|var|while)\b/,number:[e.languages.cpp.number,/\b(?:Infinity|NaN)\b/],operator:/>>=?|<<=?|\|\||&&|:[:=]?|--|\+\+|[=!<>+\-*/%|&^]=?|[?~]|`[^`\r\n]{1,4}`/}),e.languages.insertBefore("chaiscript","operator",{"parameter-type":{pattern:/([,(]\s*)\w+(?=\s+\w)/,lookbehind:!0,alias:"class-name"}}),e.languages.insertBefore("chaiscript","string",{"string-interpolation":{pattern:/(^|[^\\])"(?:[^"$\\]|\\[\s\S]|\$(?!\{)|\$\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\})*"/,lookbehind:!0,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\}/,lookbehind:!0,inside:{"interpolation-expression":{pattern:/(^\$\{)[\s\S]+(?=\}$)/,lookbehind:!0,inside:e.languages.chaiscript},"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"}}},string:/[\s\S]+/}}})}e.exports=i,i.displayName="chaiscript",i.aliases=[]},79415(e){"use strict";function t(e){e.languages.cil={comment:/\/\/.*/,string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},directive:{pattern:/(^|\W)\.[a-z]+(?=\s)/,lookbehind:!0,alias:"class-name"},variable:/\[[\w\.]+\]/,keyword:/\b(?:abstract|ansi|assembly|auto|autochar|beforefieldinit|bool|bstr|byvalstr|catch|char|cil|class|currency|date|decimal|default|enum|error|explicit|extends|extern|famandassem|family|famorassem|final(?:ly)?|float32|float64|hidebysig|iant|idispatch|implements|import|initonly|instance|u?int(?:8|16|32|64)?|interface|iunknown|literal|lpstr|lpstruct|lptstr|lpwstr|managed|method|native(?:Type)?|nested|newslot|object(?:ref)?|pinvokeimpl|private|privatescope|public|reqsecobj|rtspecialname|runtime|sealed|sequential|serializable|specialname|static|string|struct|syschar|tbstr|unicode|unmanagedexp|unsigned|value(?:type)?|variant|virtual|void)\b/,function:/\b(?:(?:constrained|unaligned|volatile|readonly|tail|no)\.)?(?:conv\.(?:[iu][1248]?|ovf\.[iu][1248]?(?:\.un)?|r\.un|r4|r8)|ldc\.(?:i4(?:\.[0-9]+|\.[mM]1|\.s)?|i8|r4|r8)|ldelem(?:\.[iu][1248]?|\.r[48]|\.ref|a)?|ldind\.(?:[iu][1248]?|r[48]|ref)|stelem\.?(?:i[1248]?|r[48]|ref)?|stind\.(?:i[1248]?|r[48]|ref)?|end(?:fault|filter|finally)|ldarg(?:\.[0-3s]|a(?:\.s)?)?|ldloc(?:\.[0-9]+|\.s)?|sub(?:\.ovf(?:\.un)?)?|mul(?:\.ovf(?:\.un)?)?|add(?:\.ovf(?:\.un)?)?|stloc(?:\.[0-3s])?|refany(?:type|val)|blt(?:\.un)?(?:\.s)?|ble(?:\.un)?(?:\.s)?|bgt(?:\.un)?(?:\.s)?|bge(?:\.un)?(?:\.s)?|unbox(?:\.any)?|init(?:blk|obj)|call(?:i|virt)?|brfalse(?:\.s)?|bne\.un(?:\.s)?|ldloca(?:\.s)?|brzero(?:\.s)?|brtrue(?:\.s)?|brnull(?:\.s)?|brinst(?:\.s)?|starg(?:\.s)?|leave(?:\.s)?|shr(?:\.un)?|rem(?:\.un)?|div(?:\.un)?|clt(?:\.un)?|alignment|ldvirtftn|castclass|beq(?:\.s)?|mkrefany|localloc|ckfinite|rethrow|ldtoken|ldsflda|cgt\.un|arglist|switch|stsfld|sizeof|newobj|newarr|ldsfld|ldnull|ldflda|isinst|throw|stobj|stfld|ldstr|ldobj|ldlen|ldftn|ldfld|cpobj|cpblk|break|br\.s|xor|shl|ret|pop|not|nop|neg|jmp|dup|cgt|ceq|box|and|or|br)\b/,boolean:/\b(?:true|false)\b/,number:/\b-?(?:0x[0-9a-f]+|[0-9]+)(?:\.[0-9a-f]+)?\b/i,punctuation:/[{}[\];(),:=]|IL_[0-9A-Za-z]+/}}e.exports=t,t.displayName="cil",t.aliases=[]},29726(e){"use strict";function t(e){e.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/(\b(?:class|interface|extends|implements|trait|instanceof|new)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:if|else|while|do|for|return|in|instanceof|function|new|try|throw|catch|finally|null|break|continue)\b/,boolean:/\b(?:true|false)\b/,function:/\b\w+(?=\()/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/,punctuation:/[{}[\];(),.:]/}}e.exports=t,t.displayName="clike",t.aliases=[]},62849(e){"use strict";function t(e){e.languages.clojure={comment:/;.*/,string:{pattern:/"(?:[^"\\]|\\.)*"/,greedy:!0},operator:/(?:::|[:|'])\b[a-z][\w*+!?-]*\b/i,keyword:{pattern:/([^\w+*'?-])(?:def|if|do|let|\.\.|quote|var|->>|->|fn|loop|recur|throw|try|monitor-enter|\.|new|set!|def-|defn|defn-|defmacro|defmulti|defmethod|defstruct|defonce|declare|definline|definterface|defprotocol|==|defrecord|>=|deftype|<=|defproject|ns|\*|\+|-|\/|<|=|>|accessor|agent|agent-errors|aget|alength|all-ns|alter|and|append-child|apply|array-map|aset|aset-boolean|aset-byte|aset-char|aset-double|aset-float|aset-int|aset-long|aset-short|assert|assoc|await|await-for|bean|binding|bit-and|bit-not|bit-or|bit-shift-left|bit-shift-right|bit-xor|boolean|branch\?|butlast|byte|cast|char|children|class|clear-agent-errors|comment|commute|comp|comparator|complement|concat|conj|cons|constantly|cond|if-not|construct-proxy|contains\?|count|create-ns|create-struct|cycle|dec|deref|difference|disj|dissoc|distinct|doall|doc|dorun|doseq|dosync|dotimes|doto|double|down|drop|drop-while|edit|end\?|ensure|eval|every\?|false\?|ffirst|file-seq|filter|find|find-doc|find-ns|find-var|first|float|flush|for|fnseq|frest|gensym|get-proxy-class|get|hash-map|hash-set|identical\?|identity|if-let|import|in-ns|inc|index|insert-child|insert-left|insert-right|inspect-table|inspect-tree|instance\?|int|interleave|intersection|into|into-array|iterate|join|key|keys|keyword|keyword\?|last|lazy-cat|lazy-cons|left|lefts|line-seq|list\*|list|load|load-file|locking|long|macroexpand|macroexpand-1|make-array|make-node|map|map-invert|map\?|mapcat|max|max-key|memfn|merge|merge-with|meta|min|min-key|name|namespace|neg\?|newline|next|nil\?|node|not|not-any\?|not-every\?|not=|ns-imports|ns-interns|ns-map|ns-name|ns-publics|ns-refers|ns-resolve|ns-unmap|nth|nthrest|or|parse|partial|path|peek|pop|pos\?|pr|pr-str|print|print-str|println|println-str|prn|prn-str|project|proxy|proxy-mappings|quot|rand|rand-int|range|re-find|re-groups|re-matcher|re-matches|re-pattern|re-seq|read|read-line|reduce|ref|ref-set|refer|rem|remove|remove-method|remove-ns|rename|rename-keys|repeat|replace|replicate|resolve|rest|resultset-seq|reverse|rfirst|right|rights|root|rrest|rseq|second|select|select-keys|send|send-off|seq|seq-zip|seq\?|set|short|slurp|some|sort|sort-by|sorted-map|sorted-map-by|sorted-set|special-symbol\?|split-at|split-with|str|string\?|struct|struct-map|subs|subvec|symbol|symbol\?|sync|take|take-nth|take-while|test|time|to-array|to-array-2d|tree-seq|true\?|union|up|update-proxy|val|vals|var-get|var-set|var\?|vector|vector-zip|vector\?|when|when-first|when-let|when-not|with-local-vars|with-meta|with-open|with-out-str|xml-seq|xml-zip|zero\?|zipmap|zipper)(?=[^\w+*'?-])/,lookbehind:!0},boolean:/\b(?:true|false|nil)\b/,number:/\b[\da-f]+\b/i,punctuation:/[{}\[\](),]/}}e.exports=t,t.displayName="clojure",t.aliases=[]},55773(e){"use strict";function t(e){e.languages.cmake={comment:/#.*/,string:{pattern:/"(?:[^\\"]|\\.)*"/,greedy:!0,inside:{interpolation:{pattern:/\$\{(?:[^{}$]|\$\{[^{}$]*\})*\}/,inside:{punctuation:/\$\{|\}/,variable:/\w+/}}}},variable:/\b(?:CMAKE_\w+|\w+_(?:VERSION(?:_MAJOR|_MINOR|_PATCH|_TWEAK)?|(?:BINARY|SOURCE)_DIR|DESCRIPTION|HOMEPAGE_URL|ROOT)|(?:ANDROID|APPLE|BORLAND|BUILD_SHARED_LIBS|CACHE|CPACK_(?:ABSOLUTE_DESTINATION_FILES|COMPONENT_INCLUDE_TOPLEVEL_DIRECTORY|ERROR_ON_ABSOLUTE_INSTALL_DESTINATION|INCLUDE_TOPLEVEL_DIRECTORY|INSTALL_DEFAULT_DIRECTORY_PERMISSIONS|INSTALL_SCRIPT|PACKAGING_INSTALL_PREFIX|SET_DESTDIR|WARN_ON_ABSOLUTE_INSTALL_DESTINATION)|CTEST_(?:BINARY_DIRECTORY|BUILD_COMMAND|BUILD_NAME|BZR_COMMAND|BZR_UPDATE_OPTIONS|CHANGE_ID|CHECKOUT_COMMAND|CONFIGURATION_TYPE|CONFIGURE_COMMAND|COVERAGE_COMMAND|COVERAGE_EXTRA_FLAGS|CURL_OPTIONS|CUSTOM_(?:COVERAGE_EXCLUDE|ERROR_EXCEPTION|ERROR_MATCH|ERROR_POST_CONTEXT|ERROR_PRE_CONTEXT|MAXIMUM_FAILED_TEST_OUTPUT_SIZE|MAXIMUM_NUMBER_OF_(?:ERRORS|WARNINGS)|MAXIMUM_PASSED_TEST_OUTPUT_SIZE|MEMCHECK_IGNORE|POST_MEMCHECK|POST_TEST|PRE_MEMCHECK|PRE_TEST|TESTS_IGNORE|WARNING_EXCEPTION|WARNING_MATCH)|CVS_CHECKOUT|CVS_COMMAND|CVS_UPDATE_OPTIONS|DROP_LOCATION|DROP_METHOD|DROP_SITE|DROP_SITE_CDASH|DROP_SITE_PASSWORD|DROP_SITE_USER|EXTRA_COVERAGE_GLOB|GIT_COMMAND|GIT_INIT_SUBMODULES|GIT_UPDATE_CUSTOM|GIT_UPDATE_OPTIONS|HG_COMMAND|HG_UPDATE_OPTIONS|LABELS_FOR_SUBPROJECTS|MEMORYCHECK_(?:COMMAND|COMMAND_OPTIONS|SANITIZER_OPTIONS|SUPPRESSIONS_FILE|TYPE)|NIGHTLY_START_TIME|P4_CLIENT|P4_COMMAND|P4_OPTIONS|P4_UPDATE_OPTIONS|RUN_CURRENT_SCRIPT|SCP_COMMAND|SITE|SOURCE_DIRECTORY|SUBMIT_URL|SVN_COMMAND|SVN_OPTIONS|SVN_UPDATE_OPTIONS|TEST_LOAD|TEST_TIMEOUT|TRIGGER_SITE|UPDATE_COMMAND|UPDATE_OPTIONS|UPDATE_VERSION_ONLY|USE_LAUNCHERS)|CYGWIN|ENV|EXECUTABLE_OUTPUT_PATH|GHS-MULTI|IOS|LIBRARY_OUTPUT_PATH|MINGW|MSVC(?:10|11|12|14|60|70|71|80|90|_IDE|_TOOLSET_VERSION|_VERSION)?|MSYS|PROJECT_(?:BINARY_DIR|DESCRIPTION|HOMEPAGE_URL|NAME|SOURCE_DIR|VERSION|VERSION_(?:MAJOR|MINOR|PATCH|TWEAK))|UNIX|WIN32|WINCE|WINDOWS_PHONE|WINDOWS_STORE|XCODE|XCODE_VERSION))\b/,property:/\b(?:cxx_\w+|(?:ARCHIVE_OUTPUT_(?:DIRECTORY|NAME)|COMPILE_DEFINITIONS|COMPILE_PDB_NAME|COMPILE_PDB_OUTPUT_DIRECTORY|EXCLUDE_FROM_DEFAULT_BUILD|IMPORTED_(?:IMPLIB|LIBNAME|PLI_DEPENDENT_LIBRARIES|PLI_INTERFACE_LANGUAGES|PLI_INTERFACE_LIBRARIES|PLI_INTERFACE_MULTIPLICITY|LOCATION|NO_SONAME|OBJECTS|SONAME)|INTERPROCEDURAL_OPTIMIZATION|LIBRARY_OUTPUT_DIRECTORY|LIBRARY_OUTPUT_NAME|PLI_FLAGS|PLI_INTERFACE_LIBRARIES|PLI_INTERFACE_MULTIPLICITY|LOCATION|MAP_IMPORTED_CONFIG|OSX_ARCHITECTURES|OUTPUT_NAME|PDB_NAME|PDB_OUTPUT_DIRECTORY|RUNTIME_OUTPUT_DIRECTORY|RUNTIME_OUTPUT_NAME|STATIC_LIBRARY_FLAGS|VS_CSHARP|VS_DOTNET_REFERENCEPROP|VS_DOTNET_REFERENCE|VS_GLOBAL_SECTION_POST|VS_GLOBAL_SECTION_PRE|VS_GLOBAL|XCODE_ATTRIBUTE)_\w+|\w+_(?:CLANG_TIDY|COMPILER_LAUNCHER|CPPCHECK|CPPLINT|INCLUDE_WHAT_YOU_USE|OUTPUT_NAME|POSTFIX|VISIBILITY_PRESET)|ABSTRACT|ADDITIONAL_MAKE_CLEAN_FILES|ADVANCED|ALIASED_TARGET|ALLOW_DUPLICATE_CUSTOM_TARGETS|ANDROID_(?:ANT_ADDITIONAL_OPTIONS|API|API_MIN|ARCH|ASSETS_DIRECTORIES|GUI|JAR_DEPENDENCIES|NATIVE_LIB_DEPENDENCIES|NATIVE_LIB_DIRECTORIES|PROCESS_MAX|PROGUARD|PROGUARD_CONFIG_PATH|SECURE_PROPS_PATH|SKIP_ANT_STEP|STL_TYPE)|ARCHIVE_OUTPUT_DIRECTORY|ATTACHED_FILES|ATTACHED_FILES_ON_FAIL|AUTOGEN_(?:BUILD_DIR|ORIGIN_DEPENDS|PARALLEL|SOURCE_GROUP|TARGETS_FOLDER|TARGET_DEPENDS)|AUTOMOC|AUTOMOC_(?:COMPILER_PREDEFINES|DEPEND_FILTERS|EXECUTABLE|MACRO_NAMES|MOC_OPTIONS|SOURCE_GROUP|TARGETS_FOLDER)|AUTORCC|AUTORCC_EXECUTABLE|AUTORCC_OPTIONS|AUTORCC_SOURCE_GROUP|AUTOUIC|AUTOUIC_EXECUTABLE|AUTOUIC_OPTIONS|AUTOUIC_SEARCH_PATHS|BINARY_DIR|BUILDSYSTEM_TARGETS|BUILD_RPATH|BUILD_RPATH_USE_ORIGIN|BUILD_WITH_INSTALL_NAME_DIR|BUILD_WITH_INSTALL_RPATH|BUNDLE|BUNDLE_EXTENSION|CACHE_VARIABLES|CLEAN_NO_CUSTOM|COMMON_LANGUAGE_RUNTIME|COMPATIBLE_INTERFACE_(?:BOOL|NUMBER_MAX|NUMBER_MIN|STRING)|COMPILE_(?:DEFINITIONS|FEATURES|FLAGS|OPTIONS|PDB_NAME|PDB_OUTPUT_DIRECTORY)|COST|CPACK_DESKTOP_SHORTCUTS|CPACK_NEVER_OVERWRITE|CPACK_PERMANENT|CPACK_STARTUP_SHORTCUTS|CPACK_START_MENU_SHORTCUTS|CPACK_WIX_ACL|CROSSCOMPILING_EMULATOR|CUDA_EXTENSIONS|CUDA_PTX_COMPILATION|CUDA_RESOLVE_DEVICE_SYMBOLS|CUDA_SEPARABLE_COMPILATION|CUDA_STANDARD|CUDA_STANDARD_REQUIRED|CXX_EXTENSIONS|CXX_STANDARD|CXX_STANDARD_REQUIRED|C_EXTENSIONS|C_STANDARD|C_STANDARD_REQUIRED|DEBUG_CONFIGURATIONS|DEFINE_SYMBOL|DEFINITIONS|DEPENDS|DEPLOYMENT_ADDITIONAL_FILES|DEPLOYMENT_REMOTE_DIRECTORY|DISABLED|DISABLED_FEATURES|ECLIPSE_EXTRA_CPROJECT_CONTENTS|ECLIPSE_EXTRA_NATURES|ENABLED_FEATURES|ENABLED_LANGUAGES|ENABLE_EXPORTS|ENVIRONMENT|EXCLUDE_FROM_ALL|EXCLUDE_FROM_DEFAULT_BUILD|EXPORT_NAME|EXPORT_PROPERTIES|EXTERNAL_OBJECT|EchoString|FAIL_REGULAR_EXPRESSION|FIND_LIBRARY_USE_LIB32_PATHS|FIND_LIBRARY_USE_LIB64_PATHS|FIND_LIBRARY_USE_LIBX32_PATHS|FIND_LIBRARY_USE_OPENBSD_VERSIONING|FIXTURES_CLEANUP|FIXTURES_REQUIRED|FIXTURES_SETUP|FOLDER|FRAMEWORK|Fortran_FORMAT|Fortran_MODULE_DIRECTORY|GENERATED|GENERATOR_FILE_NAME|GENERATOR_IS_MULTI_CONFIG|GHS_INTEGRITY_APP|GHS_NO_SOURCE_GROUP_FILE|GLOBAL_DEPENDS_DEBUG_MODE|GLOBAL_DEPENDS_NO_CYCLES|GNUtoMS|HAS_CXX|HEADER_FILE_ONLY|HELPSTRING|IMPLICIT_DEPENDS_INCLUDE_TRANSFORM|IMPORTED|IMPORTED_(?:COMMON_LANGUAGE_RUNTIME|CONFIGURATIONS|GLOBAL|IMPLIB|LIBNAME|PLI_DEPENDENT_LIBRARIES|PLI_INTERFACE_(?:LANGUAGES|LIBRARIES|MULTIPLICITY)|LOCATION|NO_SONAME|OBJECTS|SONAME)|IMPORT_PREFIX|IMPORT_SUFFIX|INCLUDE_DIRECTORIES|INCLUDE_REGULAR_EXPRESSION|INSTALL_NAME_DIR|INSTALL_RPATH|INSTALL_RPATH_USE_PLI_PATH|INTERFACE_(?:AUTOUIC_OPTIONS|COMPILE_DEFINITIONS|COMPILE_FEATURES|COMPILE_OPTIONS|INCLUDE_DIRECTORIES|PLI_DEPENDS|PLI_DIRECTORIES|PLI_LIBRARIES|PLI_OPTIONS|POSITION_INDEPENDENT_CODE|SOURCES|SYSTEM_INCLUDE_DIRECTORIES)|INTERPROCEDURAL_OPTIMIZATION|IN_TRY_COMPILE|IOS_INSTALL_COMBINED|JOB_POOLS|JOB_POOL_COMPILE|JOB_POOL_PLI|KEEP_EXTENSION|LABELS|LANGUAGE|LIBRARY_OUTPUT_DIRECTORY|PLIER_LANGUAGE|PLI_(?:DEPENDS|DEPENDS_NO_SHARED|DIRECTORIES|FLAGS|INTERFACE_LIBRARIES|INTERFACE_MULTIPLICITY|LIBRARIES|OPTIONS|SEARCH_END_STATIC|SEARCH_START_STATIC|WHAT_YOU_USE)|LISTFILE_STACK|LOCATION|MACOSX_BUNDLE|MACOSX_BUNDLE_INFO_PLIST|MACOSX_FRAMEWORK_INFO_PLIST|MACOSX_PACKAGE_LOCATION|MACOSX_RPATH|MACROS|MANUALLY_ADDED_DEPENDENCIES|MEASUREMENT|MODIFIED|NAME|NO_SONAME|NO_SYSTEM_FROM_IMPORTED|OBJECT_DEPENDS|OBJECT_OUTPUTS|OSX_ARCHITECTURES|OUTPUT_NAME|PACKAGES_FOUND|PACKAGES_NOT_FOUND|PARENT_DIRECTORY|PASS_REGULAR_EXPRESSION|PDB_NAME|PDB_OUTPUT_DIRECTORY|POSITION_INDEPENDENT_CODE|POST_INSTALL_SCRIPT|PREDEFINED_TARGETS_FOLDER|PREFIX|PRE_INSTALL_SCRIPT|PRIVATE_HEADER|PROCESSORS|PROCESSOR_AFFINITY|PROJECT_LABEL|PUBLIC_HEADER|REPORT_UNDEFINED_PROPERTIES|REQUIRED_FILES|RESOURCE|RESOURCE_LOCK|RULE_LAUNCH_COMPILE|RULE_LAUNCH_CUSTOM|RULE_LAUNCH_PLI|RULE_MESSAGES|RUNTIME_OUTPUT_DIRECTORY|RUN_SERIAL|SKIP_AUTOGEN|SKIP_AUTOMOC|SKIP_AUTORCC|SKIP_AUTOUIC|SKIP_BUILD_RPATH|SKIP_RETURN_CODE|SOURCES|SOURCE_DIR|SOVERSION|STATIC_LIBRARY_FLAGS|STATIC_LIBRARY_OPTIONS|STRINGS|SUBDIRECTORIES|SUFFIX|SYMBOLIC|TARGET_ARCHIVES_MAY_BE_SHARED_LIBS|TARGET_MESSAGES|TARGET_SUPPORTS_SHARED_LIBS|TESTS|TEST_INCLUDE_FILE|TEST_INCLUDE_FILES|TIMEOUT|TIMEOUT_AFTER_MATCH|TYPE|USE_FOLDERS|VALUE|VARIABLES|VERSION|VISIBILITY_INLINES_HIDDEN|VS_(?:CONFIGURATION_TYPE|COPY_TO_OUT_DIR|DEBUGGER_(?:COMMAND|COMMAND_ARGUMENTS|ENVIRONMENT|WORKING_DIRECTORY)|DEPLOYMENT_CONTENT|DEPLOYMENT_LOCATION|DOTNET_REFERENCES|DOTNET_REFERENCES_COPY_LOCAL|GLOBAL_KEYWORD|GLOBAL_PROJECT_TYPES|GLOBAL_ROOTNAMESPACE|INCLUDE_IN_VSIX|IOT_STARTUP_TASK|KEYWORD|RESOURCE_GENERATOR|SCC_AUXPATH|SCC_LOCALPATH|SCC_PROJECTNAME|SCC_PROVIDER|SDK_REFERENCES|SHADER_(?:DISABLE_OPTIMIZATIONS|ENABLE_DEBUG|ENTRYPOINT|FLAGS|MODEL|OBJECT_FILE_NAME|OUTPUT_HEADER_FILE|TYPE|VARIABLE_NAME)|STARTUP_PROJECT|TOOL_OVERRIDE|USER_PROPS|WINRT_COMPONENT|WINRT_EXTENSIONS|WINRT_REFERENCES|XAML_TYPE)|WILL_FAIL|WIN32_EXECUTABLE|WINDOWS_EXPORT_ALL_SYMBOLS|WORKING_DIRECTORY|WRAP_EXCLUDE|XCODE_(?:EMIT_EFFECTIVE_PLATFORM_NAME|EXPLICIT_FILE_TYPE|FILE_ATTRIBUTES|LAST_KNOWN_FILE_TYPE|PRODUCT_TYPE|SCHEME_(?:ADDRESS_SANITIZER|ADDRESS_SANITIZER_USE_AFTER_RETURN|ARGUMENTS|DISABLE_MAIN_THREAD_CHECKER|DYNAMIC_LIBRARY_LOADS|DYNAMIC_PLIER_API_USAGE|ENVIRONMENT|EXECUTABLE|GUARD_MALLOC|MAIN_THREAD_CHECKER_STOP|MALLOC_GUARD_EDGES|MALLOC_SCRIBBLE|MALLOC_STACK|THREAD_SANITIZER(?:_STOP)?|UNDEFINED_BEHAVIOUR_SANITIZER(?:_STOP)?|ZOMBIE_OBJECTS))|XCTEST)\b/,keyword:/\b(?:add_compile_definitions|add_compile_options|add_custom_command|add_custom_target|add_definitions|add_dependencies|add_executable|add_library|add_link_options|add_subdirectory|add_test|aux_source_directory|break|build_command|build_name|cmake_host_system_information|cmake_minimum_required|cmake_parse_arguments|cmake_policy|configure_file|continue|create_test_sourcelist|ctest_build|ctest_configure|ctest_coverage|ctest_empty_binary_directory|ctest_memcheck|ctest_read_custom_files|ctest_run_script|ctest_sleep|ctest_start|ctest_submit|ctest_test|ctest_update|ctest_upload|define_property|else|elseif|enable_language|enable_testing|endforeach|endfunction|endif|endmacro|endwhile|exec_program|execute_process|export|export_library_dependencies|file|find_file|find_library|find_package|find_path|find_program|fltk_wrap_ui|foreach|function|get_cmake_property|get_directory_property|get_filename_component|get_property|get_source_file_property|get_target_property|get_test_property|if|include|include_directories|include_external_msproject|include_guard|include_regular_expression|install|install_files|install_programs|install_targets|link_directories|link_libraries|list|load_cache|load_command|macro|make_directory|mark_as_advanced|math|message|option|output_required_files|project|qt_wrap_cpp|qt_wrap_ui|remove|remove_definitions|return|separate_arguments|set|set_directory_properties|set_property|set_source_files_properties|set_target_properties|set_tests_properties|site_name|source_group|string|subdir_depends|subdirs|target_compile_definitions|target_compile_features|target_compile_options|target_include_directories|target_link_directories|target_link_libraries|target_link_options|target_sources|try_compile|try_run|unset|use_mangled_mesa|utility_source|variable_requires|variable_watch|while|write_file)(?=\s*\()\b/,boolean:/\b(?:ON|OFF|TRUE|FALSE)\b/,namespace:/\b(?:PROPERTIES|SHARED|PRIVATE|STATIC|PUBLIC|INTERFACE|TARGET_OBJECTS)\b/,operator:/\b(?:NOT|AND|OR|MATCHES|LESS|GREATER|EQUAL|STRLESS|STRGREATER|STREQUAL|VERSION_LESS|VERSION_EQUAL|VERSION_GREATER|DEFINED)\b/,inserted:{pattern:/\b\w+::\w+\b/,alias:"class-name"},number:/\b\d+(?:\.\d+)*\b/,function:/\b[a-z_]\w*(?=\s*\()\b/i,punctuation:/[()>}]|\$[<{]/}}e.exports=t,t.displayName="cmake",t.aliases=[]},32762(e){"use strict";function t(e){e.languages.cobol={comment:{pattern:/\*>.*|(^[ \t]*)\*.*/m,lookbehind:!0,greedy:!0},string:{pattern:/[xzgn]?(?:"(?:[^\r\n"]|"")*"(?!")|'(?:[^\r\n']|'')*'(?!'))/i,greedy:!0},level:{pattern:/(^[ \t]*)\d+\b/m,lookbehind:!0,greedy:!0,alias:"number"},"class-name":{pattern:/(\bpic(?:ture)?\s+)(?:(?:[-\w$/,:*+<>]|\.(?!\s|$))(?:\(\d+\))?)+/i,lookbehind:!0,inside:{number:{pattern:/(\()\d+/,lookbehind:!0},punctuation:/[()]/}},keyword:{pattern:/(^|[^\w-])(?:ABORT|ACCEPT|ACCESS|ADD|ADDRESS|ADVANCING|AFTER|ALIGNED|ALL|ALPHABET|ALPHABETIC|ALPHABETIC-LOWER|ALPHABETIC-UPPER|ALPHANUMERIC|ALPHANUMERIC-EDITED|ALSO|ALTER|ALTERNATE|ANY|ARE|AREA|AREAS|AS|ASCENDING|ASCII|ASSIGN|ASSOCIATED-DATA|ASSOCIATED-DATA-LENGTH|AT|ATTRIBUTE|AUTHOR|AUTO|AUTO-SKIP|BACKGROUND-COLOR|BACKGROUND-COLOUR|BASIS|BEEP|BEFORE|BEGINNING|BELL|BINARY|BIT|BLANK|BPLI|BLOCK|BOUNDS|BOTTOM|BY|BYFUNCTION|BYTITLE|CALL|CANCEL|CAPABLE|CCSVERSION|CD|CF|CH|CHAINING|CHANGED|CHANNEL|CHARACTER|CHARACTERS|CLASS|CLASS-ID|CLOCK-UNITS|CLOSE|CLOSE-DISPOSITION|COBOL|CODE|CODE-SET|COLLATING|COL|COLUMN|COM-REG|COMMA|COMMITMENT|COMMON|COMMUNICATION|COMP|COMP-1|COMP-2|COMP-3|COMP-4|COMP-5|COMPUTATIONAL|COMPUTATIONAL-1|COMPUTATIONAL-2|COMPUTATIONAL-3|COMPUTATIONAL-4|COMPUTATIONAL-5|COMPUTE|CONFIGURATION|CONTAINS|CONTENT|CONTINUE|CONTROL|CONTROL-POINT|CONTROLS|CONVENTION|CONVERTING|COPY|CORR|CORRESPONDING|COUNT|CRUNCH|CURRENCY|CURSOR|DATA|DATA-BASE|DATE|DATE-COMPILED|DATE-WRITTEN|DAY|DAY-OF-WEEK|DBCS|DE|DEBUG-CONTENTS|DEBUG-ITEM|DEBUG-LINE|DEBUG-NAME|DEBUG-SUB-1|DEBUG-SUB-2|DEBUG-SUB-3|DEBUGGING|DECIMAL-POINT|DECLARATIVES|DEFAULT|DEFAULT-DISPLAY|DEFINITION|DELETE|DELIMITED|DELIMITER|DEPENDING|DESCENDING|DESTINATION|DETAIL|DFHRESP|DFHVALUE|DISABLE|DISK|DISPLAY|DISPLAY-1|DIVIDE|DIVISION|DONTCARE|DOUBLE|DOWN|DUPLICATES|DYNAMIC|EBCDIC|EGCS|EGI|ELSE|EMI|EMPTY-CHECK|ENABLE|END|END-ACCEPT|END-ADD|END-CALL|END-COMPUTE|END-DELETE|END-DIVIDE|END-EVALUATE|END-IF|END-MULTIPLY|END-OF-PAGE|END-PERFORM|END-READ|END-RECEIVE|END-RETURN|END-REWRITE|END-SEARCH|END-START|END-STRING|END-SUBTRACT|END-UNSTRING|END-WRITE|ENDING|ENTER|ENTRY|ENTRY-PROCEDURE|ENVIRONMENT|EOP|ERASE|ERROR|EOL|EOS|ESCAPE|ESI|EVALUATE|EVENT|EVERY|EXCEPTION|EXCLUSIVE|EXHIBIT|EXIT|EXPORT|EXTEND|EXTENDED|EXTERNAL|FD|FILE|FILE-CONTROL|FILLER|FINAL|FIRST|FOOTING|FOR|FOREGROUND-COLOR|FOREGROUND-COLOUR|FROM|FULL|FUNCTION|FUNCTIONNAME|FUNCTION-POINTER|GENERATE|GOBACK|GIVING|GLOBAL|GO|GRID|GROUP|HEADING|HIGHLIGHT|HIGH-VALUE|HIGH-VALUES|I-O|I-O-CONTROL|ID|IDENTIFICATION|IF|IMPLICIT|IMPORT|IN|INDEX|INDEXED|INDICATE|INITIAL|INITIALIZE|INITIATE|INPUT|INPUT-OUTPUT|INSPECT|INSTALLATION|INTEGER|INTO|INVALID|INVOKE|IS|JUST|JUSTIFIED|KANJI|KEPT|KEY|KEYBOARD|LABEL|LANGUAGE|LAST|LB|LD|LEADING|LEFT|LEFTLINE|LENGTH|LENGTH-CHECK|LIBACCESS|LIBPARAMETER|LIBRARY|LIMIT|LIMITS|LINAGE|LINAGE-COUNTER|LINE|LINES|LINE-COUNTER|PLIAGE|LIST|LOCAL|LOCAL-STORAGE|LOCK|LONG-DATE|LONG-TIME|LOWER|LOWLIGHT|LOW-VALUE|LOW-VALUES|MEMORY|MERGE|MESSAGE|MMDDYYYY|MODE|MODULES|MORE-LABELS|MOVE|MULTIPLE|MULTIPLY|NAMED|NATIONAL|NATIONAL-EDITED|NATIVE|NEGATIVE|NETWORK|NEXT|NO|NO-ECHO|NULL|NULLS|NUMBER|NUMERIC|NUMERIC-DATE|NUMERIC-EDITED|NUMERIC-TIME|OBJECT-COMPUTER|OCCURS|ODT|OF|OFF|OMITTED|ON|OPEN|OPTIONAL|ORDER|ORDERLY|ORGANIZATION|OTHER|OUTPUT|OVERFLOW|OVERLINE|OWN|PACKED-DECIMAL|PADDING|PAGE|PAGE-COUNTER|PASSWORD|PERFORM|PF|PH|PIC|PICTURE|PLUS|POINTER|POSITION|POSITIVE|PORT|PRINTER|PRINTING|PRIVATE|PROCEDURE|PROCEDURE-POINTER|PROCEDURES|PROCEED|PROCESS|PROGRAM|PROGRAM-ID|PROGRAM-LIBRARY|PROMPT|PURGE|QUEUE|QUOTE|QUOTES|RANDOM|READER|REMOTE|RD|REAL|READ|RECEIVE|RECEIVED|RECORD|RECORDING|RECORDS|RECURSIVE|REDEFINES|REEL|REF|REFERENCE|REFERENCES|RELATIVE|RELEASE|REMAINDER|REMARKS|REMOVAL|REMOVE|RENAMES|REPLACE|REPLACING|REPORT|REPORTING|REPORTS|REQUIRED|RERUN|RESERVE|REVERSE-VIDEO|RESET|RETURN|RETURN-CODE|RETURNING|REVERSED|REWIND|REWRITE|RF|RH|RIGHT|ROUNDED|RUN|SAME|SAVE|SCREEN|SD|SEARCH|SECTION|SECURE|SECURITY|SEGMENT|SEGMENT-LIMIT|SELECT|SEND|SENTENCE|SEPARATE|SEQUENCE|SEQUENTIAL|SET|SHARED|SHAREDBYALL|SHAREDBYRUNUNIT|SHARING|SHIFT-IN|SHIFT-OUT|SHORT-DATE|SIGN|SIZE|SORT|SORT-CONTROL|SORT-CORE-SIZE|SORT-FILE-SIZE|SORT-MERGE|SORT-MESSAGE|SORT-MODE-SIZE|SORT-RETURN|SOURCE|SOURCE-COMPUTER|SPACE|SPACES|SPECIAL-NAMES|STANDARD|STANDARD-1|STANDARD-2|START|STATUS|STOP|STRING|SUB-QUEUE-1|SUB-QUEUE-2|SUB-QUEUE-3|SUBTRACT|SUM|SUPPRESS|SYMBOL|SYMBOLIC|SYNC|SYNCHRONIZED|TABLE|TALLY|TALLYING|TASK|TAPE|TERMINAL|TERMINATE|TEST|TEXT|THEN|THREAD|THREAD-LOCAL|THROUGH|THRU|TIME|TIMER|TIMES|TITLE|TO|TODAYS-DATE|TODAYS-NAME|TOP|TRAILING|TRUNCATED|TYPE|TYPEDEF|UNDERLINE|UNIT|UNSTRING|UNTIL|UP|UPON|USAGE|USE|USING|VALUE|VALUES|VARYING|VIRTUAL|WAIT|WHEN|WHEN-COMPILED|WITH|WORDS|WORKING-STORAGE|WRITE|YEAR|YYYYMMDD|YYYYDDD|ZERO-FILL|ZEROS|ZEROES)(?![\w-])/i,lookbehind:!0},boolean:{pattern:/(^|[^\w-])(?:false|true)(?![\w-])/i,lookbehind:!0},number:{pattern:/(^|[^\w-])(?:[+-]?(?:(?:\d+(?:[.,]\d+)?|[.,]\d+)(?:e[+-]?\d+)?|zero))(?![\w-])/i,lookbehind:!0},operator:[/<>|[<>]=?|[=+*/&]/,{pattern:/(^|[^\w-])(?:-|and|equal|greater|less|not|or|than)(?![\w-])/i,lookbehind:!0}],punctuation:/[.:,()]/}}e.exports=t,t.displayName="cobol",t.aliases=[]},43576(e){"use strict";function t(e){var t,n,r;n=/#(?!\{).+/,r={pattern:/#\{[^}]+\}/,alias:"variable"},(t=e).languages.coffeescript=t.languages.extend("javascript",{comment:n,string:[{pattern:/'(?:\\[\s\S]|[^\\'])*'/,greedy:!0},{pattern:/"(?:\\[\s\S]|[^\\"])*"/,greedy:!0,inside:{interpolation:r}}],keyword:/\b(?:and|break|by|catch|class|continue|debugger|delete|do|each|else|extend|extends|false|finally|for|if|in|instanceof|is|isnt|let|loop|namespace|new|no|not|null|of|off|on|or|own|return|super|switch|then|this|throw|true|try|typeof|undefined|unless|until|when|while|window|with|yes|yield)\b/,"class-member":{pattern:/@(?!\d)\w+/,alias:"variable"}}),t.languages.insertBefore("coffeescript","comment",{"multiline-comment":{pattern:/###[\s\S]+?###/,alias:"comment"},"block-regex":{pattern:/\/{3}[\s\S]*?\/{3}/,alias:"regex",inside:{comment:n,interpolation:r}}}),t.languages.insertBefore("coffeescript","string",{"inline-javascript":{pattern:/`(?:\\[\s\S]|[^\\`])*`/,inside:{delimiter:{pattern:/^`|`$/,alias:"punctuation"},script:{pattern:/[\s\S]+/,alias:"language-javascript",inside:t.languages.javascript}}},"multiline-string":[{pattern:/'''[\s\S]*?'''/,greedy:!0,alias:"string"},{pattern:/"""[\s\S]*?"""/,greedy:!0,alias:"string",inside:{interpolation:r}}]}),t.languages.insertBefore("coffeescript","keyword",{property:/(?!\d)\w+(?=\s*:(?!:))/}),delete t.languages.coffeescript["template-string"],t.languages.coffee=t.languages.coffeescript}e.exports=t,t.displayName="coffeescript",t.aliases=["coffee"]},71794(e){"use strict";function t(e){e.languages.concurnas={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],langext:{pattern:/\b\w+\s*\|\|[\s\S]+?\|\|/,greedy:!0,alias:"string"},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/,lookbehind:!0},keyword:/\b(?:abstract|actor|also|annotation|assert|async|await|bool|boolean|break|byte|case|catch|changed|char|class|closed|constant|continue|def|default|del|double|elif|else|enum|every|extends|false|finally|float|for|from|global|gpudef|gpukernel|if|import|in|init|inject|int|lambda|local|long|loop|match|new|nodefault|null|of|onchange|open|out|override|package|parfor|parforsync|post|pre|private|protected|provide|provider|public|return|shared|short|single|size_t|sizeof|super|sync|this|throw|trait|trans|transient|true|try|typedef|unchecked|using|val|var|void|while|with)\b/,boolean:/\b(?:false|true)\b/,number:/\b0b[01][01_]*L?\b|\b0x(?:[\da-f_]*\.)?[\da-f_p+-]+\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?\d[\d_]*)?[dfls]?/i,punctuation:/[{}[\];(),.:]/,operator:/<==|>==|=>|->|<-|<>|\^|&==|&<>|!|\?:?|\.\?|\+\+|--|[-+*/=<>]=?|\b(?:and|as|band|bor|bxor|comp|is|isnot|mod|or)\b=?/,annotation:{pattern:/@(?:\w+:)?(?:\w+|\[[^\]]+\])?/,alias:"builtin"}},e.languages.insertBefore("concurnas","langext",{string:{pattern:/[rs]?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}/,lookbehind:!0,inside:e.languages.concurnas},string:/[\s\S]+/}}}),e.languages.conc=e.languages.concurnas}e.exports=t,t.displayName="concurnas",t.aliases=["conc"]},1315(e){"use strict";function t(e){!function(e){for(var t=/\(\*(?:[^(*]|\((?!\*)|\*(?!\))|)*\*\)/.source,n=0;n<2;n++)t=t.replace(//g,function(){return t});t=t.replace(//g,"[]"),e.languages.coq={comment:RegExp(t),string:{pattern:/"(?:[^"]|"")*"(?!")/,greedy:!0},attribute:[{pattern:RegExp(/#\[(?:[^\]("]|"(?:[^"]|"")*"(?!")|\((?!\*)|)*\]/.source.replace(//g,function(){return t})),greedy:!0,alias:"attr-name",inside:{comment:RegExp(t),string:{pattern:/"(?:[^"]|"")*"(?!")/,greedy:!0},operator:/=/,punctuation:/^#\[|\]$|[,()]/}},{pattern:/\b(?:Cumulative|Global|Local|Monomorphic|NonCumulative|Polymorphic|Private|Program)\b/,alias:"attr-name"}],keyword:/\b(?:_|Abort|About|Add|Admit|Admitted|All|apply|Arguments|as|As|Assumptions|at|Axiom|Axioms|Back|BackTo|Backtrace|Bind|BinOp|BinOpSpec|BinRel|Blacklist|by|Canonical|Case|Cd|Check|Class|Classes|Close|Coercion|Coercions|cofix|CoFixpoint|CoInductive|Collection|Combined|Compute|Conjecture|Conjectures|Constant|Constants|Constraint|Constructors|Context|Corollary|Create|CstOp|Custom|Cut|Debug|Declare|Defined|Definition|Delimit|Dependencies|Dependent|Derive|Diffs|Drop|Elimination|else|end|End|Entry|Equality|Eval|Example|Existential|Existentials|Existing|exists|exists2|Export|Extern|Extraction|Fact|Fail|Field|File|Firstorder|fix|Fixpoint|Flags|Focus|for|forall|From|fun|Funclass|Function|Functional|GC|Generalizable|Goal|Grab|Grammar|Graph|Guarded|Haskell|Heap|Hide|Hint|HintDb|Hints|Hypotheses|Hypothesis|Identity|if|IF|Immediate|Implicit|Implicits|Import|in|Include|Induction|Inductive|Infix|Info|Initial|InjTyp|Inline|Inspect|Instance|Instances|Intro|Intros|Inversion|Inversion_clear|JSON|Language|Left|Lemma|let|Let|Lia|Libraries|Library|Load|LoadPath|Locate|Ltac|Ltac2|match|Match|measure|Method|Minimality|ML|Module|Modules|Morphism|move|Next|NoInline|Notation|Number|Obligation|Obligations|OCaml|Opaque|Open|Optimize|Parameter|Parameters|Parametric|Path|Paths|Prenex|Preterm|Primitive|Print|Profile|Projections|Proof|Prop|PropBinOp|Property|PropOp|Proposition|PropUOp|Pwd|Qed|Quit|Rec|Record|Recursive|Redirect|Reduction|Register|Relation|Remark|Remove|removed|Require|Reserved|Reset|Resolve|Restart|return|Rewrite|Right|Ring|Rings|Saturate|Save|Scheme|Scope|Scopes|Search|SearchHead|SearchPattern|SearchRewrite|Section|Separate|Set|Setoid|Show|Signatures|Solve|Solver|Sort|Sortclass|Sorted|Spec|SProp|Step|Strategies|Strategy|String|struct|Structure|SubClass|Subgraph|SuchThat|Tactic|Term|TestCompile|then|Theorem|Time|Timeout|To|Transparent|Type|Typeclasses|Types|Typing|Undelimit|Undo|Unfocus|Unfocused|Unfold|Universe|Universes|UnOp|UnOpSpec|Unshelve|using|Variable|Variables|Variant|Verbose|View|Visibility|wf|where|with|Zify)\b/,number:/\b(?:0x[a-f0-9][a-f0-9_]*(?:\.[a-f0-9_]+)?(?:p[+-]?\d[\d_]*)?|\d[\d_]*(?:\.[\d_]+)?(?:e[+-]?\d[\d_]*)?)\b/i,punct:{pattern:/@\{|\{\||\[=|:>/,alias:"punctuation"},operator:/\/\\|\\\/|\.{2,3}|:{1,2}=|\*\*|[-=]>|<(?:->?|[+:=>]|<:)|>(?:=|->)|\|[-|]?|[-!%&*+/<=>?@^~']/,punctuation:/\.\(|`\(|@\{|`\{|\{\||\[=|:>|[:.,;(){}\[\]]/}}(e)}e.exports=t,t.displayName="coq",t.aliases=[]},80096(e,t,n){"use strict";var r=n(65806);function i(e){var t,n,i;e.register(r),t=e,n=/\b(?:alignas|alignof|asm|auto|bool|break|case|catch|char|char8_t|char16_t|char32_t|class|compl|concept|const|consteval|constexpr|constinit|const_cast|continue|co_await|co_return|co_yield|decltype|default|delete|do|double|dynamic_cast|else|enum|explicit|export|extern|final|float|for|friend|goto|if|import|inline|int|int8_t|int16_t|int32_t|int64_t|uint8_t|uint16_t|uint32_t|uint64_t|long|module|mutable|namespace|new|noexcept|nullptr|operator|override|private|protected|public|register|reinterpret_cast|requires|return|short|signed|sizeof|static|static_assert|static_cast|struct|switch|template|this|thread_local|throw|try|typedef|typeid|typename|union|unsigned|using|virtual|void|volatile|wchar_t|while)\b/,i=/\b(?!)\w+(?:\s*\.\s*\w+)*\b/.source.replace(//g,function(){return n.source}),t.languages.cpp=t.languages.extend("c",{"class-name":[{pattern:RegExp(/(\b(?:class|concept|enum|struct|typename)\s+)(?!)\w+/.source.replace(//g,function(){return n.source})),lookbehind:!0},/\b[A-Z]\w*(?=\s*::\s*\w+\s*\()/,/\b[A-Z_]\w*(?=\s*::\s*~\w+\s*\()/i,/\b\w+(?=\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>\s*::\s*\w+\s*\()/],keyword:n,number:{pattern:/(?:\b0b[01']+|\b0x(?:[\da-f']+(?:\.[\da-f']*)?|\.[\da-f']+)(?:p[+-]?[\d']+)?|(?:\b[\d']+(?:\.[\d']*)?|\B\.[\d']+)(?:e[+-]?[\d']+)?)[ful]{0,4}/i,greedy:!0},operator:/>>=?|<<=?|->|--|\+\+|&&|\|\||[?:~]|<=>|[-+*/%&|^!=<>]=?|\b(?:and|and_eq|bitand|bitor|not|not_eq|or|or_eq|xor|xor_eq)\b/,boolean:/\b(?:true|false)\b/}),t.languages.insertBefore("cpp","string",{module:{pattern:RegExp(/(\b(?:module|import)\s+)/.source+"(?:"+/"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|<[^<>\r\n]*>/.source+"|"+/(?:\s*:\s*)?|:\s*/.source.replace(//g,function(){return i})+")"),lookbehind:!0,greedy:!0,inside:{string:/^[<"][\s\S]+/,operator:/:/,punctuation:/\./}},"raw-string":{pattern:/R"([^()\\ ]{0,16})\([\s\S]*?\)\1"/,alias:"string",greedy:!0}}),t.languages.insertBefore("cpp","keyword",{"generic-function":{pattern:/\b[a-z_]\w*\s*<(?:[^<>]|<(?:[^<>])*>)*>(?=\s*\()/i,inside:{function:/^\w+/,generic:{pattern:/<[\s\S]+/,alias:"class-name",inside:t.languages.cpp}}}}),t.languages.insertBefore("cpp","operator",{"double-colon":{pattern:/::/,alias:"punctuation"}}),t.languages.insertBefore("cpp","class-name",{"base-clause":{pattern:/(\b(?:class|struct)\s+\w+\s*:\s*)[^;{}"'\s]+(?:\s+[^;{}"'\s]+)*(?=\s*[;{])/,lookbehind:!0,greedy:!0,inside:t.languages.extend("cpp",{})}}),t.languages.insertBefore("inside","double-colon",{"class-name":/\b[a-z_]\w*\b(?!\s*::)/i},t.languages.cpp["base-clause"])}e.exports=i,i.displayName="cpp",i.aliases=[]},99176(e,t,n){"use strict";var r=n(56939);function i(e){var t;e.register(r),(t=e).languages.crystal=t.languages.extend("ruby",{keyword:[/\b(?:abstract|alias|as|asm|begin|break|case|class|def|do|else|elsif|end|ensure|enum|extend|for|fun|if|include|instance_sizeof|lib|macro|module|next|of|out|pointerof|private|protected|rescue|return|require|select|self|sizeof|struct|super|then|type|typeof|uninitialized|union|unless|until|when|while|with|yield|__DIR__|__END_LINE__|__FILE__|__LINE__)\b/,{pattern:/(\.\s*)(?:is_a|responds_to)\?/,lookbehind:!0}],number:/\b(?:0b[01_]*[01]|0o[0-7_]*[0-7]|0x[\da-fA-F_]*[\da-fA-F]|(?:\d(?:[\d_]*\d)?)(?:\.[\d_]*\d)?(?:[eE][+-]?[\d_]*\d)?)(?:_(?:[uif](?:8|16|32|64))?)?\b/}),t.languages.insertBefore("crystal","string",{attribute:{pattern:/@\[.+?\]/,alias:"attr-name",inside:{delimiter:{pattern:/^@\[|\]$/,alias:"tag"},rest:t.languages.crystal}},expansion:[{pattern:/\{\{.+?\}\}/,inside:{delimiter:{pattern:/^\{\{|\}\}$/,alias:"tag"},rest:t.languages.crystal}},{pattern:/\{%.+?%\}/,inside:{delimiter:{pattern:/^\{%|%\}$/,alias:"tag"},rest:t.languages.crystal}}]})}e.exports=i,i.displayName="crystal",i.aliases=[]},61958(e){"use strict";function t(e){!function(e){function t(e,t){return e.replace(/<<(\d+)>>/g,function(e,n){return"(?:"+t[+n]+")"})}function n(e,n,r){return RegExp(t(e,n),r||"")}function r(e,t){for(var n=0;n>/g,function(){return"(?:"+e+")"});return e.replace(/<>/g,"[^\\s\\S]")}var i={type:"bool byte char decimal double dynamic float int long object sbyte short string uint ulong ushort var void",typeDeclaration:"class enum interface struct",contextual:"add alias and ascending async await by descending from get global group into join let nameof not notnull on or orderby partial remove select set unmanaged value when where",other:"abstract as base break case catch checked const continue default delegate do else event explicit extern finally fixed for foreach goto if implicit in internal is lock namespace new null operator out override params private protected public readonly ref return sealed sizeof stackalloc static switch this throw try typeof unchecked unsafe using virtual volatile while yield"};function a(e){return"\\b(?:"+e.trim().replace(/ /g,"|")+")\\b"}var o=a(i.typeDeclaration),s=RegExp(a(i.type+" "+i.typeDeclaration+" "+i.contextual+" "+i.other)),u=a(i.typeDeclaration+" "+i.contextual+" "+i.other),c=a(i.type+" "+i.typeDeclaration+" "+i.other),l=r(/<(?:[^<>;=+\-*/%&|^]|<>)*>/.source,2),f=r(/\((?:[^()]|<>)*\)/.source,2),d=/@?\b[A-Za-z_]\w*\b/.source,h=t(/<<0>>(?:\s*<<1>>)?/.source,[d,l]),p=t(/(?!<<0>>)<<1>>(?:\s*\.\s*<<1>>)*/.source,[u,h]),b=/\[\s*(?:,\s*)*\]/.source,m=t(/<<0>>(?:\s*(?:\?\s*)?<<1>>)*(?:\s*\?)?/.source,[p,b]),g=t(/[^,()<>[\];=+\-*/%&|^]|<<0>>|<<1>>|<<2>>/.source,[l,f,b]),v=t(/\(<<0>>+(?:,<<0>>+)+\)/.source,[g]),y=t(/(?:<<0>>|<<1>>)(?:\s*(?:\?\s*)?<<2>>)*(?:\s*\?)?/.source,[v,p,b]),w={keyword:s,punctuation:/[<>()?,.:[\]]/},_=/'(?:[^\r\n'\\]|\\.|\\[Uux][\da-fA-F]{1,8})'/.source,E=/"(?:\\.|[^\\"\r\n])*"/.source,S=/@"(?:""|\\[\s\S]|[^\\"])*"(?!")/.source;e.languages.csharp=e.languages.extend("clike",{string:[{pattern:n(/(^|[^$\\])<<0>>/.source,[S]),lookbehind:!0,greedy:!0},{pattern:n(/(^|[^@$\\])<<0>>/.source,[E]),lookbehind:!0,greedy:!0},{pattern:RegExp(_),greedy:!0,alias:"character"}],"class-name":[{pattern:n(/(\busing\s+static\s+)<<0>>(?=\s*;)/.source,[p]),lookbehind:!0,inside:w},{pattern:n(/(\busing\s+<<0>>\s*=\s*)<<1>>(?=\s*;)/.source,[d,y]),lookbehind:!0,inside:w},{pattern:n(/(\busing\s+)<<0>>(?=\s*=)/.source,[d]),lookbehind:!0},{pattern:n(/(\b<<0>>\s+)<<1>>/.source,[o,h]),lookbehind:!0,inside:w},{pattern:n(/(\bcatch\s*\(\s*)<<0>>/.source,[p]),lookbehind:!0,inside:w},{pattern:n(/(\bwhere\s+)<<0>>/.source,[d]),lookbehind:!0},{pattern:n(/(\b(?:is(?:\s+not)?|as)\s+)<<0>>/.source,[m]),lookbehind:!0,inside:w},{pattern:n(/\b<<0>>(?=\s+(?!<<1>>)<<2>>(?:\s*[=,;:{)\]]|\s+(?:in|when)\b))/.source,[y,c,d]),inside:w}],keyword:s,number:/(?:\b0(?:x[\da-f_]*[\da-f]|b[01_]*[01])|(?:\B\.\d+(?:_+\d+)*|\b\d+(?:_+\d+)*(?:\.\d+(?:_+\d+)*)?)(?:e[-+]?\d+(?:_+\d+)*)?)(?:ul|lu|[dflmu])?\b/i,operator:/>>=?|<<=?|[-=]>|([-+&|])\1|~|\?\?=?|[-+*/%&|^!=<>]=?/,punctuation:/\?\.?|::|[{}[\];(),.:]/}),e.languages.insertBefore("csharp","number",{range:{pattern:/\.\./,alias:"operator"}}),e.languages.insertBefore("csharp","punctuation",{"named-parameter":{pattern:n(/([(,]\s*)<<0>>(?=\s*:)/.source,[d]),lookbehind:!0,alias:"punctuation"}}),e.languages.insertBefore("csharp","class-name",{namespace:{pattern:n(/(\b(?:namespace|using)\s+)<<0>>(?:\s*\.\s*<<0>>)*(?=\s*[;{])/.source,[d]),lookbehind:!0,inside:{punctuation:/\./}},"type-expression":{pattern:n(/(\b(?:default|typeof|sizeof)\s*\(\s*(?!\s))(?:[^()\s]|\s(?!\s)|<<0>>)*(?=\s*\))/.source,[f]),lookbehind:!0,alias:"class-name",inside:w},"return-type":{pattern:n(/<<0>>(?=\s+(?:<<1>>\s*(?:=>|[({]|\.\s*this\s*\[)|this\s*\[))/.source,[y,p]),inside:w,alias:"class-name"},"constructor-invocation":{pattern:n(/(\bnew\s+)<<0>>(?=\s*[[({])/.source,[y]),lookbehind:!0,inside:w,alias:"class-name"},"generic-method":{pattern:n(/<<0>>\s*<<1>>(?=\s*\()/.source,[d,l]),inside:{function:n(/^<<0>>/.source,[d]),generic:{pattern:RegExp(l),alias:"class-name",inside:w}}},"type-list":{pattern:n(/\b((?:<<0>>\s+<<1>>|where\s+<<2>>)\s*:\s*)(?:<<3>>|<<4>>)(?:\s*,\s*(?:<<3>>|<<4>>))*(?=\s*(?:where|[{;]|=>|$))/.source,[o,h,d,y,s.source]),lookbehind:!0,inside:{keyword:s,"class-name":{pattern:RegExp(y),greedy:!0,inside:w},punctuation:/,/}},preprocessor:{pattern:/(^[\t ]*)#.*/m,lookbehind:!0,alias:"property",inside:{directive:{pattern:/(#)\b(?:define|elif|else|endif|endregion|error|if|line|pragma|region|undef|warning)\b/,lookbehind:!0,alias:"keyword"}}}});var k=E+"|"+_,x=t(/\/(?![*/])|\/\/[^\r\n]*[\r\n]|\/\*(?:[^*]|\*(?!\/))*\*\/|<<0>>/.source,[k]),T=r(t(/[^"'/()]|<<0>>|\(<>*\)/.source,[x]),2),M=/\b(?:assembly|event|field|method|module|param|property|return|type)\b/.source,O=t(/<<0>>(?:\s*\(<<1>>*\))?/.source,[p,T]);e.languages.insertBefore("csharp","class-name",{attribute:{pattern:n(/((?:^|[^\s\w>)?])\s*\[\s*)(?:<<0>>\s*:\s*)?<<1>>(?:\s*,\s*<<1>>)*(?=\s*\])/.source,[M,O]),lookbehind:!0,greedy:!0,inside:{target:{pattern:n(/^<<0>>(?=\s*:)/.source,[M]),alias:"keyword"},"attribute-arguments":{pattern:n(/\(<<0>>*\)/.source,[T]),inside:e.languages.csharp},"class-name":{pattern:RegExp(p),inside:{punctuation:/\./}},punctuation:/[:,]/}}});var A=/:[^}\r\n]+/.source,L=r(t(/[^"'/()]|<<0>>|\(<>*\)/.source,[x]),2),C=t(/\{(?!\{)(?:(?![}:])<<0>>)*<<1>>?\}/.source,[L,A]),I=r(t(/[^"'/()]|\/(?!\*)|\/\*(?:[^*]|\*(?!\/))*\*\/|<<0>>|\(<>*\)/.source,[k]),2),D=t(/\{(?!\{)(?:(?![}:])<<0>>)*<<1>>?\}/.source,[I,A]);function N(t,r){return{interpolation:{pattern:n(/((?:^|[^{])(?:\{\{)*)<<0>>/.source,[t]),lookbehind:!0,inside:{"format-string":{pattern:n(/(^\{(?:(?![}:])<<0>>)*)<<1>>(?=\}$)/.source,[r,A]),lookbehind:!0,inside:{punctuation:/^:/}},punctuation:/^\{|\}$/,expression:{pattern:/[\s\S]+/,alias:"language-csharp",inside:e.languages.csharp}}},string:/[\s\S]+/}}e.languages.insertBefore("csharp","string",{"interpolation-string":[{pattern:n(/(^|[^\\])(?:\$@|@\$)"(?:""|\\[\s\S]|\{\{|<<0>>|[^\\{"])*"/.source,[C]),lookbehind:!0,greedy:!0,inside:N(C,L)},{pattern:n(/(^|[^@\\])\$"(?:\\.|\{\{|<<0>>|[^\\"{])*"/.source,[D]),lookbehind:!0,greedy:!0,inside:N(D,I)}]})}(e),e.languages.dotnet=e.languages.cs=e.languages.csharp}e.exports=t,t.displayName="csharp",t.aliases=["dotnet","cs"]},65447(e){"use strict";function t(e){e.languages.csp={directive:{pattern:/(^|[^-\da-z])(?:base-uri|block-all-mixed-content|(?:child|connect|default|font|frame|img|manifest|media|object|prefetch|script|style|worker)-src|disown-opener|form-action|frame-(?:ancestors|options)|input-protection(?:-(?:clip|selectors))?|navigate-to|plugin-types|policy-uri|referrer|reflected-xss|report-(?:to|uri)|require-sri-for|sandbox|(?:script|style)-src-(?:attr|elem)|upgrade-insecure-requests)(?=[^-\da-z]|$)/i,lookbehind:!0,alias:"keyword"},safe:{pattern:/'(?:deny|none|report-sample|self|strict-dynamic|top-only|(?:nonce|sha(?:256|384|512))-[-+/\w=]+)'/i,alias:"selector"},unsafe:{pattern:/(?:'unsafe-(?:allow-redirects|dynamic|eval|hash-attributes|hashed-attributes|hashes|inline)'|\*)/i,alias:"function"}}}e.exports=t,t.displayName="csp",t.aliases=[]},4762(e){"use strict";function t(e){var t,n,r,i,a;r=/("|')(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,(t=e).languages.css.selector={pattern:t.languages.css.selector.pattern,lookbehind:!0,inside:n={"pseudo-element":/:(?:after|before|first-letter|first-line|selection)|::[-\w]+/,"pseudo-class":/:[-\w]+/,class:/\.[-\w]+/,id:/#[-\w]+/,attribute:{pattern:RegExp("\\[(?:[^[\\]\"']|"+r.source+")*\\]"),greedy:!0,inside:{punctuation:/^\[|\]$/,"case-sensitivity":{pattern:/(\s)[si]$/i,lookbehind:!0,alias:"keyword"},namespace:{pattern:/^(\s*)(?:(?!\s)[-*\w\xA0-\uFFFF])*\|(?!=)/,lookbehind:!0,inside:{punctuation:/\|$/}},"attr-name":{pattern:/^(\s*)(?:(?!\s)[-\w\xA0-\uFFFF])+/,lookbehind:!0},"attr-value":[r,{pattern:/(=\s*)(?:(?!\s)[-\w\xA0-\uFFFF])+(?=\s*$)/,lookbehind:!0}],operator:/[|~*^$]?=/}},"n-th":[{pattern:/(\(\s*)[+-]?\d*[\dn](?:\s*[+-]\s*\d+)?(?=\s*\))/,lookbehind:!0,inside:{number:/[\dn]+/,operator:/[+-]/}},{pattern:/(\(\s*)(?:even|odd)(?=\s*\))/i,lookbehind:!0}],combinator:/>|\+|~|\|\|/,punctuation:/[(),]/}},t.languages.css.atrule.inside["selector-function-argument"].inside=n,t.languages.insertBefore("css","property",{variable:{pattern:/(^|[^-\w\xA0-\uFFFF])--(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*/i,lookbehind:!0}}),i={pattern:/(\b\d+)(?:%|[a-z]+\b)/,lookbehind:!0},a={pattern:/(^|[^\w.-])-?(?:\d+(?:\.\d+)?|\.\d+)/,lookbehind:!0},t.languages.insertBefore("css","function",{operator:{pattern:/(\s)[+\-*\/](?=\s)/,lookbehind:!0},hexcode:{pattern:/\B#[\da-f]{3,8}\b/i,alias:"color"},color:[/\b(?:AliceBlue|AntiqueWhite|Aqua|Aquamarine|Azure|Beige|Bisque|Black|BlanchedAlmond|Blue|BlueViolet|Brown|BurlyWood|CadetBlue|Chartreuse|Chocolate|Coral|CornflowerBlue|Cornsilk|Crimson|Cyan|DarkBlue|DarkCyan|DarkGoldenRod|DarkGr[ae]y|DarkGreen|DarkKhaki|DarkMagenta|DarkOliveGreen|DarkOrange|DarkOrchid|DarkRed|DarkSalmon|DarkSeaGreen|DarkSlateBlue|DarkSlateGr[ae]y|DarkTurquoise|DarkViolet|DeepPink|DeepSkyBlue|DimGr[ae]y|DodgerBlue|FireBrick|FloralWhite|ForestGreen|Fuchsia|Gainsboro|GhostWhite|Gold|GoldenRod|Gr[ae]y|Green|GreenYellow|HoneyDew|HotPink|IndianRed|Indigo|Ivory|Khaki|Lavender|LavenderBlush|LawnGreen|LemonChiffon|LightBlue|LightCoral|LightCyan|LightGoldenRodYellow|LightGr[ae]y|LightGreen|LightPink|LightSalmon|LightSeaGreen|LightSkyBlue|LightSlateGr[ae]y|LightSteelBlue|LightYellow|Lime|LimeGreen|Linen|Magenta|Maroon|MediumAquaMarine|MediumBlue|MediumOrchid|MediumPurple|MediumSeaGreen|MediumSlateBlue|MediumSpringGreen|MediumTurquoise|MediumVioletRed|MidnightBlue|MintCream|MistyRose|Moccasin|NavajoWhite|Navy|OldLace|Olive|OliveDrab|Orange|OrangeRed|Orchid|PaleGoldenRod|PaleGreen|PaleTurquoise|PaleVioletRed|PapayaWhip|PeachPuff|Peru|Pink|Plum|PowderBlue|Purple|Red|RosyBrown|RoyalBlue|SaddleBrown|Salmon|SandyBrown|SeaGreen|SeaShell|Sienna|Silver|SkyBlue|SlateBlue|SlateGr[ae]y|Snow|SpringGreen|SteelBlue|Tan|Teal|Thistle|Tomato|Transparent|Turquoise|Violet|Wheat|White|WhiteSmoke|Yellow|YellowGreen)\b/i,{pattern:/\b(?:rgb|hsl)\(\s*\d{1,3}\s*,\s*\d{1,3}%?\s*,\s*\d{1,3}%?\s*\)\B|\b(?:rgb|hsl)a\(\s*\d{1,3}\s*,\s*\d{1,3}%?\s*,\s*\d{1,3}%?\s*,\s*(?:0|0?\.\d+|1)\s*\)\B/i,inside:{unit:i,number:a,function:/[\w-]+(?=\()/,punctuation:/[(),]/}}],entity:/\\[\da-f]{1,8}/i,unit:i,number:a})}e.exports=t,t.displayName="cssExtras",t.aliases=[]},12049(e){"use strict";function t(e){var t,n,r;n=/(?:"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n])*')/,(t=e).languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:/@[\w-](?:[^;{\s]|\s+(?![\s{]))*(?:;|(?=\s*\{))/,inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+n.source+"|"+/(?:[^\\\r\n()"']|\\[\s\S])*/.source+")\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+n.source+"$"),alias:"url"}}},selector:{pattern:RegExp("(^|[{}\\s])[^{}\\s](?:[^{};\"'\\s]|\\s+(?![\\s{])|"+n.source+")*(?=\\s*\\{)"),lookbehind:!0},string:{pattern:n,greedy:!0},property:{pattern:/(^|[^-\w\xA0-\uFFFF])(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,lookbehind:!0},important:/!important\b/i,function:{pattern:/(^|[^-a-z0-9])[-a-z0-9]+(?=\()/i,lookbehind:!0},punctuation:/[(){};:,]/},t.languages.css.atrule.inside.rest=t.languages.css,(r=t.languages.markup)&&(r.tag.addInlined("style","css"),r.tag.addAttribute("style","css"))}e.exports=t,t.displayName="css",t.aliases=[]},78090(e){"use strict";function t(e){e.languages.csv={value:/[^\r\n,"]+|"(?:[^"]|"")*"(?!")/,punctuation:/,/}}e.exports=t,t.displayName="csv",t.aliases=[]},40315(e){"use strict";function t(e){e.languages.cypher={comment:/\/\/.*/,string:{pattern:/"(?:[^"\\\r\n]|\\.)*"|'(?:[^'\\\r\n]|\\.)*'/,greedy:!0},"class-name":{pattern:/(:\s*)(?:\w+|`(?:[^`\\\r\n])*`)(?=\s*[{):])/,lookbehind:!0,greedy:!0},relationship:{pattern:/(-\[\s*(?:\w+\s*|`(?:[^`\\\r\n])*`\s*)?:\s*|\|\s*:\s*)(?:\w+|`(?:[^`\\\r\n])*`)/,lookbehind:!0,greedy:!0,alias:"property"},identifier:{pattern:/`(?:[^`\\\r\n])*`/,greedy:!0,alias:"symbol"},variable:/\$\w+/,keyword:/\b(?:ADD|ALL|AND|AS|ASC|ASCENDING|ASSERT|BY|CALL|CASE|COMMIT|CONSTRAINT|CONTAINS|CREATE|CSV|DELETE|DESC|DESCENDING|DETACH|DISTINCT|DO|DROP|ELSE|END|ENDS|EXISTS|FOR|FOREACH|IN|INDEX|IS|JOIN|KEY|LIMIT|LOAD|MANDATORY|MATCH|MERGE|NODE|NOT|OF|ON|OPTIONAL|OR|ORDER(?=\s+BY)|PERIODIC|REMOVE|REQUIRE|RETURN|SCALAR|SCAN|SET|SKIP|START|STARTS|THEN|UNION|UNIQUE|UNWIND|USING|WHEN|WHERE|WITH|XOR|YIELD)\b/i,function:/\b\w+\b(?=\s*\()/,boolean:/\b(?:true|false|null)\b/i,number:/\b(?:0x[\da-fA-F]+|\d+(?:\.\d+)?(?:[eE][+-]?\d+)?)\b/,operator:/:|<--?|--?>?|<>|=~?|[<>]=?|[+*/%^|]|\.\.\.?/,punctuation:/[()[\]{},;.]/}}e.exports=t,t.displayName="cypher",t.aliases=[]},7902(e){"use strict";function t(e){e.languages.d=e.languages.extend("clike",{comment:[{pattern:/^\s*#!.+/,greedy:!0},{pattern:RegExp(/(^|[^\\])/.source+"(?:"+[/\/\+(?:\/\+(?:[^+]|\+(?!\/))*\+\/|(?!\/\+)[\s\S])*?\+\//.source,/\/\/.*/.source,/\/\*[\s\S]*?\*\//.source].join("|")+")"),lookbehind:!0,greedy:!0}],string:[{pattern:RegExp([/\b[rx]"(?:\\[\s\S]|[^\\"])*"[cwd]?/.source,/\bq"(?:\[[\s\S]*?\]|\([\s\S]*?\)|<[\s\S]*?>|\{[\s\S]*?\})"/.source,/\bq"((?!\d)\w+)$[\s\S]*?^\1"/.source,/\bq"(.)[\s\S]*?\2"/.source,/'(?:\\(?:\W|\w+)|[^\\])'/.source,/(["`])(?:\\[\s\S]|(?!\3)[^\\])*\3[cwd]?/.source].join("|"),"m"),greedy:!0},{pattern:/\bq\{(?:\{[^{}]*\}|[^{}])*\}/,greedy:!0,alias:"token-string"}],keyword:/\$|\b(?:abstract|alias|align|asm|assert|auto|body|bool|break|byte|case|cast|catch|cdouble|cent|cfloat|char|class|const|continue|creal|dchar|debug|default|delegate|delete|deprecated|do|double|else|enum|export|extern|false|final|finally|float|for|foreach|foreach_reverse|function|goto|idouble|if|ifloat|immutable|import|inout|int|interface|invariant|ireal|lazy|long|macro|mixin|module|new|nothrow|null|out|override|package|pragma|private|protected|public|pure|real|ref|return|scope|shared|short|static|struct|super|switch|synchronized|template|this|throw|true|try|typedef|typeid|typeof|ubyte|ucent|uint|ulong|union|unittest|ushort|version|void|volatile|wchar|while|with|__(?:(?:FILE|MODULE|LINE|FUNCTION|PRETTY_FUNCTION|DATE|EOF|TIME|TIMESTAMP|VENDOR|VERSION)__|gshared|traits|vector|parameters)|string|wstring|dstring|size_t|ptrdiff_t)\b/,number:[/\b0x\.?[a-f\d_]+(?:(?!\.\.)\.[a-f\d_]*)?(?:p[+-]?[a-f\d_]+)?[ulfi]{0,4}/i,{pattern:/((?:\.\.)?)(?:\b0b\.?|\b|\.)\d[\d_]*(?:(?!\.\.)\.[\d_]*)?(?:e[+-]?\d[\d_]*)?[ulfi]{0,4}/i,lookbehind:!0}],operator:/\|[|=]?|&[&=]?|\+[+=]?|-[-=]?|\.?\.\.|=[>=]?|!(?:i[ns]\b|<>?=?|>=?|=)?|\bi[ns]\b|(?:<[<>]?|>>?>?|\^\^|[*\/%^~])=?/}),e.languages.insertBefore("d","keyword",{property:/\B@\w*/}),e.languages.insertBefore("d","function",{register:{pattern:/\b(?:[ABCD][LHX]|E[ABCD]X|E?(?:BP|SP|DI|SI)|[ECSDGF]S|CR[0234]|DR[012367]|TR[3-7]|X?MM[0-7]|R[ABCD]X|[BS]PL|R[BS]P|[DS]IL|R[DS]I|R(?:[89]|1[0-5])[BWD]?|XMM(?:[89]|1[0-5])|YMM(?:1[0-5]|\d))\b|\bST(?:\([0-7]\)|\b)/,alias:"variable"}})}e.exports=t,t.displayName="d",t.aliases=[]},28651(e){"use strict";function t(e){var t,n,r,i;t=e,n=[/\b(?:async|sync|yield)\*/,/\b(?:abstract|assert|async|await|break|case|catch|class|const|continue|covariant|default|deferred|do|dynamic|else|enum|export|extension|external|extends|factory|final|finally|for|get|hide|if|implements|interface|import|in|library|mixin|new|null|on|operator|part|rethrow|return|set|show|static|super|switch|sync|this|throw|try|typedef|var|void|while|with|yield)\b/],i={pattern:RegExp((r=/(^|[^\w.])(?:[a-z]\w*\s*\.\s*)*(?:[A-Z]\w*\s*\.\s*)*/.source)+/[A-Z](?:[\d_A-Z]*[a-z]\w*)?\b/.source),lookbehind:!0,inside:{namespace:{pattern:/^[a-z]\w*(?:\s*\.\s*[a-z]\w*)*(?:\s*\.)?/,inside:{punctuation:/\./}}}},t.languages.dart=t.languages.extend("clike",{string:[{pattern:/r?("""|''')[\s\S]*?\1/,greedy:!0},{pattern:/r?(["'])(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0}],"class-name":[i,{pattern:RegExp(r+/[A-Z]\w*(?=\s+\w+\s*[;,=()])/.source),lookbehind:!0,inside:i.inside}],keyword:n,operator:/\bis!|\b(?:as|is)\b|\+\+|--|&&|\|\||<<=?|>>=?|~(?:\/=?)?|[+\-*\/%&^|=!<>]=?|\?/}),t.languages.insertBefore("dart","function",{metadata:{pattern:/@\w+/,alias:"symbol"}}),t.languages.insertBefore("dart","class-name",{generics:{pattern:/<(?:[\w\s,.&?]|<(?:[\w\s,.&?]|<(?:[\w\s,.&?]|<[\w\s,.&?]*>)*>)*>)*>/,inside:{"class-name":i,keyword:n,punctuation:/[<>(),.:]/,operator:/[?&|]/}}})}e.exports=t,t.displayName="dart",t.aliases=[]},55579(e){"use strict";function t(e){var t;(t=e).languages.dataweave={url:/\b[A-Za-z]+:\/\/[\w/:.?=&-]+|\burn:[\w:.?=&-]+/,property:{pattern:/(?:\b\w+#)?(?:"(?:\\.|[^\\"\r\n])*"|\b\w+)(?=\s*[:@])/,greedy:!0},string:{pattern:/(["'`])(?:\\[\s\S]|(?!\1)[^\\])*\1/,greedy:!0},"mime-type":/\b(?:text|audio|video|application|multipart|image)\/[\w+-]+/,date:{pattern:/\|[\w:+-]+\|/,greedy:!0},comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],regex:{pattern:/\/(?:[^\\\/\r\n]|\\[^\r\n])+\//,greedy:!0},function:/\b[A-Z_]\w*(?=\s*\()/i,number:/-?\b\d+(?:\.\d+)?(?:e[+-]?\d+)?\b/i,punctuation:/[{}[\];(),.:@]/,operator:/<<|>>|->|[<>~=]=?|!=|--?-?|\+\+?|!|\?/,boolean:/\b(?:true|false)\b/,keyword:/\b(?:match|input|output|ns|type|update|null|if|else|using|unless|at|is|as|case|do|fun|var|not|and|or)\b/}}e.exports=t,t.displayName="dataweave",t.aliases=[]},93685(e){"use strict";function t(e){e.languages.dax={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/).*)/,lookbehind:!0},"data-field":{pattern:/'(?:[^']|'')*'(?!')(?:\[[ \w\xA0-\uFFFF]+\])?|\w+\[[ \w\xA0-\uFFFF]+\]/,alias:"symbol"},measure:{pattern:/\[[ \w\xA0-\uFFFF]+\]/,alias:"constant"},string:{pattern:/"(?:[^"]|"")*"(?!")/,greedy:!0},function:/\b(?:ABS|ACOS|ACOSH|ACOT|ACOTH|ADDCOLUMNS|ADDMISSINGITEMS|ALL|ALLCROSSFILTERED|ALLEXCEPT|ALLNOBLANKROW|ALLSELECTED|AND|APPROXIMATEDISTINCTCOUNT|ASIN|ASINH|ATAN|ATANH|AVERAGE|AVERAGEA|AVERAGEX|BETA\.DIST|BETA\.INV|BLANK|CALCULATE|CALCULATETABLE|CALENDAR|CALENDARAUTO|CEILING|CHISQ\.DIST|CHISQ\.DIST\.RT|CHISQ\.INV|CHISQ\.INV\.RT|CLOSINGBALANCEMONTH|CLOSINGBALANCEQUARTER|CLOSINGBALANCEYEAR|COALESCE|COMBIN|COMBINA|COMBINEVALUES|CONCATENATE|CONCATENATEX|CONFIDENCE\.NORM|CONFIDENCE\.T|CONTAINS|CONTAINSROW|CONTAINSSTRING|CONTAINSSTRINGEXACT|CONVERT|COS|COSH|COT|COTH|COUNT|COUNTA|COUNTAX|COUNTBLANK|COUNTROWS|COUNTX|CROSSFILTER|CROSSJOIN|CURRENCY|CURRENTGROUP|CUSTOMDATA|DATATABLE|DATE|DATEADD|DATEDIFF|DATESBETWEEN|DATESINPERIOD|DATESMTD|DATESQTD|DATESYTD|DATEVALUE|DAY|DEGREES|DETAILROWS|DISTINCT|DISTINCTCOUNT|DISTINCTCOUNTNOBLANK|DIVIDE|EARLIER|EARLIEST|EDATE|ENDOFMONTH|ENDOFQUARTER|ENDOFYEAR|EOMONTH|ERROR|EVEN|EXACT|EXCEPT|EXP|EXPON\.DIST|FACT|FALSE|FILTER|FILTERS|FIND|FIRSTDATE|FIRSTNONBLANK|FIRSTNONBLANKVALUE|FIXED|FLOOR|FORMAT|GCD|GENERATE|GENERATEALL|GENERATESERIES|GEOMEAN|GEOMEANX|GROUPBY|HASONEFILTER|HASONEVALUE|HOUR|IF|IF\.EAGER|IFERROR|IGNORE|INT|INTERSECT|ISBLANK|ISCROSSFILTERED|ISEMPTY|ISERROR|ISEVEN|ISFILTERED|ISINSCOPE|ISLOGICAL|ISNONTEXT|ISNUMBER|ISO\.CEILING|ISODD|ISONORAFTER|ISSELECTEDMEASURE|ISSUBTOTAL|ISTEXT|KEEPFILTERS|KEYWORDMATCH|LASTDATE|LASTNONBLANK|LASTNONBLANKVALUE|LCM|LEFT|LEN|LN|LOG|LOG10|LOOKUPVALUE|LOWER|MAX|MAXA|MAXX|MEDIAN|MEDIANX|MID|MIN|MINA|MINUTE|MINX|MOD|MONTH|MROUND|NATURALINNERJOIN|NATURALLEFTOUTERJOIN|NEXTDAY|NEXTMONTH|NEXTQUARTER|NEXTYEAR|NONVISUAL|NORM\.DIST|NORM\.INV|NORM\.S\.DIST|NORM\.S\.INV|NOT|NOW|ODD|OPENINGBALANCEMONTH|OPENINGBALANCEQUARTER|OPENINGBALANCEYEAR|OR|PARALLELPERIOD|PATH|PATHCONTAINS|PATHITEM|PATHITEMREVERSE|PATHLENGTH|PERCENTILE\.EXC|PERCENTILE\.INC|PERCENTILEX\.EXC|PERCENTILEX\.INC|PERMUT|PI|POISSON\.DIST|POWER|PREVIOUSDAY|PREVIOUSMONTH|PREVIOUSQUARTER|PREVIOUSYEAR|PRODUCT|PRODUCTX|QUARTER|QUOTIENT|RADIANS|RAND|RANDBETWEEN|RANK\.EQ|RANKX|RELATED|RELATEDTABLE|REMOVEFILTERS|REPLACE|REPT|RIGHT|ROLLUP|ROLLUPADDISSUBTOTAL|ROLLUPGROUP|ROLLUPISSUBTOTAL|ROUND|ROUNDDOWN|ROUNDUP|ROW|SAMEPERIODLASTYEAR|SAMPLE|SEARCH|SECOND|SELECTCOLUMNS|SELECTEDMEASURE|SELECTEDMEASUREFORMATSTRING|SELECTEDMEASURENAME|SELECTEDVALUE|SIGN|SIN|SINH|SQRT|SQRTPI|STARTOFMONTH|STARTOFQUARTER|STARTOFYEAR|STDEV\.P|STDEV\.S|STDEVX\.P|STDEVX\.S|SUBSTITUTE|SUBSTITUTEWITHINDEX|SUM|SUMMARIZE|SUMMARIZECOLUMNS|SUMX|SWITCH|T\.DIST|T\.DIST\.2T|T\.DIST\.RT|T\.INV|T\.INV\.2T|TAN|TANH|TIME|TIMEVALUE|TODAY|TOPN|TOPNPERLEVEL|TOPNSKIP|TOTALMTD|TOTALQTD|TOTALYTD|TREATAS|TRIM|TRUE|TRUNC|UNICHAR|UNICODE|UNION|UPPER|USERELATIONSHIP|USERNAME|USEROBJECTID|USERPRINCIPALNAME|UTCNOW|UTCTODAY|VALUE|VALUES|VAR\.P|VAR\.S|VARX\.P|VARX\.S|WEEKDAY|WEEKNUM|XIRR|XNPV|YEAR|YEARFRAC)(?=\s*\()/i,keyword:/\b(?:DEFINE|MEASURE|EVALUATE|ORDER\s+BY|RETURN|VAR|START\s+AT|ASC|DESC)\b/i,boolean:{pattern:/\b(?:TRUE|FALSE|NULL)\b/i,alias:"constant"},number:/\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/:=|[-+*\/=^]|&&?|\|\||<(?:=>?|<|>)?|>[>=]?|\b(?:IN|NOT)\b/i,punctuation:/[;\[\](){}`,.]/}}e.exports=t,t.displayName="dax",t.aliases=[]},13934(e){"use strict";function t(e){e.languages.dhall={comment:/--.*|\{-(?:[^-{]|-(?!\})|\{(?!-)|\{-(?:[^-{]|-(?!\})|\{(?!-))*-\})*-\}/,string:{pattern:/"(?:[^"\\]|\\.)*"|''(?:[^']|'(?!')|'''|''\$\{)*''(?!'|\$)/,greedy:!0,inside:{interpolation:{pattern:/\$\{[^{}]*\}/,inside:{expression:{pattern:/(^\$\{)[\s\S]+(?=\}$)/,lookbehind:!0,alias:"language-dhall",inside:null},punctuation:/\$\{|\}/}}}},label:{pattern:/`[^`]*`/,greedy:!0},url:{pattern:/\bhttps?:\/\/[\w.:%!$&'*+;=@~-]+(?:\/[\w.:%!$&'*+;=@~-]*)*(?:\?[/?\w.:%!$&'*+;=@~-]*)?/,greedy:!0},env:{pattern:/\benv:(?:(?!\d)\w+|"(?:[^"\\=]|\\.)*")/,greedy:!0,inside:{function:/^env/,operator:/^:/,variable:/[\s\S]+/}},hash:{pattern:/\bsha256:[\da-fA-F]{64}\b/,inside:{function:/sha256/,operator:/:/,number:/[\da-fA-F]{64}/}},keyword:/\b(?:as|assert|else|forall|if|in|let|merge|missing|then|toMap|using|with)\b|\u2200/,builtin:/\b(?:Some|None)\b/,boolean:/\b(?:False|True)\b/,number:/\bNaN\b|-?\bInfinity\b|[+-]?\b(?:0x[\da-fA-F]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)\b/,operator:/\/\\|\/\/\\\\|&&|\|\||===|[!=]=|\/\/|->|\+\+|::|[+*#@=:?<>|\\\u2227\u2a53\u2261\u2afd\u03bb\u2192]/,punctuation:/\.\.|[{}\[\](),./]/,"class-name":/\b[A-Z]\w*\b/},e.languages.dhall.string.inside.interpolation.inside.expression.inside=e.languages.dhall}e.exports=t,t.displayName="dhall",t.aliases=[]},93336(e){"use strict";function t(e){var t,n;(t=e).languages.diff={coord:[/^(?:\*{3}|-{3}|\+{3}).*$/m,/^@@.*@@$/m,/^\d.*$/m]},Object.keys(n={"deleted-sign":"-","deleted-arrow":"<","inserted-sign":"+","inserted-arrow":">",unchanged:" ",diff:"!"}).forEach(function(e){var r=n[e],i=[];/^\w+$/.test(e)||i.push(/\w+/.exec(e)[0]),"diff"===e&&i.push("bold"),t.languages.diff[e]={pattern:RegExp("^(?:["+r+"].*(?:\r\n?|\n|(?![\\s\\S])))+","m"),alias:i,inside:{line:{pattern:/(.)(?=[\s\S]).*(?:\r\n?|\n)?/,lookbehind:!0},prefix:{pattern:/[\s\S]/,alias:/\w+/.exec(e)[0]}}}}),Object.defineProperty(t.languages.diff,"PREFIXES",{value:n})}e.exports=t,t.displayName="diff",t.aliases=[]},13294(e,t,n){"use strict";var r=n(93205);function i(e){var t,n,i;e.register(r),(t=e).languages.django={comment:/^\{#[\s\S]*?#\}$/,tag:{pattern:/(^\{%[+-]?\s*)\w+/,lookbehind:!0,alias:"keyword"},delimiter:{pattern:/^\{[{%][+-]?|[+-]?[}%]\}$/,alias:"punctuation"},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},filter:{pattern:/(\|)\w+/,lookbehind:!0,alias:"function"},test:{pattern:/(\bis\s+(?:not\s+)?)(?!not\b)\w+/,lookbehind:!0,alias:"function"},function:/\b[a-z_]\w+(?=\s*\()/i,keyword:/\b(?:and|as|by|else|for|if|import|in|is|loop|not|or|recursive|with|without)\b/,operator:/[-+%=]=?|!=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,number:/\b\d+(?:\.\d+)?\b/,boolean:/[Tt]rue|[Ff]alse|[Nn]one/,variable:/\b\w+?\b/,punctuation:/[{}[\](),.:;]/},n=/\{\{[\s\S]*?\}\}|\{%[\s\S]*?%\}|\{#[\s\S]*?#\}/g,i=t.languages["markup-templating"],t.hooks.add("before-tokenize",function(e){i.buildPlaceholders(e,"django",n)}),t.hooks.add("after-tokenize",function(e){i.tokenizePlaceholders(e,"django")}),t.languages.jinja2=t.languages.django,t.hooks.add("before-tokenize",function(e){i.buildPlaceholders(e,"jinja2",n)}),t.hooks.add("after-tokenize",function(e){i.tokenizePlaceholders(e,"jinja2")})}e.exports=i,i.displayName="django",i.aliases=["jinja2"]},38223(e){"use strict";function t(e){e.languages["dns-zone-file"]={comment:/;.*/,string:{pattern:/"(?:\\.|[^"\\\r\n])*"/,greedy:!0},variable:[{pattern:/(^\$ORIGIN[ \t]+)\S+/m,lookbehind:!0},{pattern:/(^|\s)@(?=\s|$)/,lookbehind:!0}],keyword:/^\$(?:ORIGIN|INCLUDE|TTL)(?=\s|$)/m,class:{pattern:/(^|\s)(?:IN|CH|CS|HS)(?=\s|$)/,lookbehind:!0,alias:"keyword"},type:{pattern:/(^|\s)(?:A|A6|AAAA|AFSDB|APL|ATMA|CAA|CDNSKEY|CDS|CERT|CNAME|DHCID|DLV|DNAME|DNSKEY|DS|EID|GID|GPOS|HINFO|HIP|IPSECKEY|ISDN|KEY|KX|LOC|MAILA|MAILB|MB|MD|MF|MG|MINFO|MR|MX|NAPTR|NB|NBSTAT|NIMLOC|NINFO|NS|NSAP|NSAP-PTR|NSEC|NSEC3|NSEC3PARAM|NULL|NXT|OPENPGPKEY|PTR|PX|RKEY|RP|RRSIG|RT|SIG|SINK|SMIMEA|SOA|SPF|SRV|SSHFP|TA|TKEY|TLSA|TSIG|TXT|UID|UINFO|UNSPEC|URI|WKS|X25)(?=\s|$)/,lookbehind:!0,alias:"keyword"},punctuation:/[()]/},e.languages["dns-zone"]=e.languages["dns-zone-file"]}e.exports=t,t.displayName="dnsZoneFile",t.aliases=[]},97266(e){"use strict";function t(e){!function(e){var t=/\\[\r\n](?:\s|\\[\r\n]|#.*(?!.))*(?![\s#]|\\[\r\n])/.source,n=/(?:[ \t]+(?![ \t])(?:)?|)/.source.replace(//g,function(){return t}),r=/"(?:[^"\\\r\n]|\\(?:\r\n|[\s\S]))*"|'(?:[^'\\\r\n]|\\(?:\r\n|[\s\S]))*'/.source,i=/--[\w-]+=(?:|(?!["'])(?:[^\s\\]|\\.)+)/.source.replace(//g,function(){return r}),a={pattern:RegExp(r),greedy:!0},o={pattern:/(^[ \t]*)#.*/m,lookbehind:!0,greedy:!0};function s(e,t){return e=e.replace(//g,function(){return i}).replace(//g,function(){return n}),RegExp(e,t)}e.languages.docker={instruction:{pattern:/(^[ \t]*)(?:ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|ONBUILD|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)(?=\s)(?:\\.|[^\r\n\\])*(?:\\$(?:\s|#.*$)*(?![\s#])(?:\\.|[^\r\n\\])*)*/im,lookbehind:!0,greedy:!0,inside:{options:{pattern:s(/(^(?:ONBUILD)?\w+)(?:)*/.source,"i"),lookbehind:!0,greedy:!0,inside:{property:{pattern:/(^|\s)--[\w-]+/,lookbehind:!0},string:[a,{pattern:/(=)(?!["'])(?:[^\s\\]|\\.)+/,lookbehind:!0}],operator:/\\$/m,punctuation:/=/}},keyword:[{pattern:s(/(^(?:ONBUILD)?HEALTHCHECK(?:)*)(?:CMD|NONE)\b/.source,"i"),lookbehind:!0,greedy:!0},{pattern:s(/(^(?:ONBUILD)?FROM(?:)*(?!--)[^ \t\\]+)AS/.source,"i"),lookbehind:!0,greedy:!0},{pattern:s(/(^ONBUILD)\w+/.source,"i"),lookbehind:!0,greedy:!0},{pattern:/^\w+/,greedy:!0}],comment:o,string:a,variable:/\$(?:\w+|\{[^{}"'\\]*\})/,operator:/\\$/m}},comment:o},e.languages.dockerfile=e.languages.docker}(e)}e.exports=t,t.displayName="docker",t.aliases=["dockerfile"]},80636(e){"use strict";function t(e){!function(e){var t="(?:"+[/[a-zA-Z_\x80-\uFFFF][\w\x80-\uFFFF]*/.source,/-?(?:\.\d+|\d+(?:\.\d*)?)/.source,/"[^"\\]*(?:\\[\s\S][^"\\]*)*"/.source,/<(?:[^<>]|(?!)*>/.source].join("|")+")",n={markup:{pattern:/(^<)[\s\S]+(?=>$)/,lookbehind:!0,alias:["language-markup","language-html","language-xml"],inside:e.languages.markup}};function r(e,n){return RegExp(e.replace(//g,function(){return t}),n)}e.languages.dot={comment:{pattern:/\/\/.*|\/\*[\s\S]*?\*\/|^#.*/m,greedy:!0},"graph-name":{pattern:r(/(\b(?:digraph|graph|subgraph)[ \t\r\n]+)/.source,"i"),lookbehind:!0,greedy:!0,alias:"class-name",inside:n},"attr-value":{pattern:r(/(=[ \t\r\n]*)/.source),lookbehind:!0,greedy:!0,inside:n},"attr-name":{pattern:r(/([\[;, \t\r\n])(?=[ \t\r\n]*=)/.source),lookbehind:!0,greedy:!0,inside:n},keyword:/\b(?:digraph|edge|graph|node|strict|subgraph)\b/i,"compass-point":{pattern:/(:[ \t\r\n]*)(?:[ns][ew]?|[ewc_])(?![\w\x80-\uFFFF])/,lookbehind:!0,alias:"builtin"},node:{pattern:r(/(^|[^-.\w\x80-\uFFFF\\])/.source),lookbehind:!0,greedy:!0,inside:n},operator:/[=:]|-[->]/,punctuation:/[\[\]{};,]/},e.languages.gv=e.languages.dot}(e)}e.exports=t,t.displayName="dot",t.aliases=["gv"]},36500(e){"use strict";function t(e){e.languages.ebnf={comment:/\(\*[\s\S]*?\*\)/,string:{pattern:/"[^"\r\n]*"|'[^'\r\n]*'/,greedy:!0},special:{pattern:/\?[^?\r\n]*\?/,greedy:!0,alias:"class-name"},definition:{pattern:/^([\t ]*)[a-z]\w*(?:[ \t]+[a-z]\w*)*(?=\s*=)/im,lookbehind:!0,alias:["rule","keyword"]},rule:/\b[a-z]\w*(?:[ \t]+[a-z]\w*)*\b/i,punctuation:/\([:/]|[:/]\)|[.,;()[\]{}]/,operator:/[-=|*/!]/}}e.exports=t,t.displayName="ebnf",t.aliases=[]},30296(e){"use strict";function t(e){e.languages.editorconfig={comment:/[;#].*/,section:{pattern:/(^[ \t]*)\[.+\]/m,lookbehind:!0,alias:"keyword",inside:{regex:/\\\\[\[\]{},!?.*]/,operator:/[!?]|\.\.|\*{1,2}/,punctuation:/[\[\]{},]/}},property:{pattern:/(^[ \t]*)[^\s=]+(?=[ \t]*=)/m,lookbehind:!0},value:{pattern:/=.*/,alias:"string",inside:{punctuation:/^=/}}}}e.exports=t,t.displayName="editorconfig",t.aliases=[]},50115(e){"use strict";function t(e){e.languages.eiffel={comment:/--.*/,string:[{pattern:/"([^[]*)\[[\s\S]*?\]\1"/,greedy:!0},{pattern:/"([^{]*)\{[\s\S]*?\}\1"/,greedy:!0},{pattern:/"(?:%(?:(?!\n)\s)*\n\s*%|%\S|[^%"\r\n])*"/,greedy:!0}],char:/'(?:%.|[^%'\r\n])+'/,keyword:/\b(?:across|agent|alias|all|and|attached|as|assign|attribute|check|class|convert|create|Current|debug|deferred|detachable|do|else|elseif|end|ensure|expanded|export|external|feature|from|frozen|if|implies|inherit|inspect|invariant|like|local|loop|not|note|obsolete|old|once|or|Precursor|redefine|rename|require|rescue|Result|retry|select|separate|some|then|undefine|until|variant|Void|when|xor)\b/i,boolean:/\b(?:True|False)\b/i,"class-name":{pattern:/\b[A-Z][\dA-Z_]*\b/,alias:"builtin"},number:[/\b0[xcb][\da-f](?:_*[\da-f])*\b/i,/(?:\b\d(?:_*\d)*)?\.(?:(?:\d(?:_*\d)*)?e[+-]?)?\d(?:_*\d)*\b|\b\d(?:_*\d)*\b\.?/i],punctuation:/:=|<<|>>|\(\||\|\)|->|\.(?=\w)|[{}[\];(),:?]/,operator:/\\\\|\|\.\.\||\.\.|\/[~\/=]?|[><]=?|[-+*^=~]/}}e.exports=t,t.displayName="eiffel",t.aliases=[]},20791(e,t,n){"use strict";var r=n(93205);function i(e){var t;e.register(r),(t=e).languages.ejs={delimiter:{pattern:/^<%[-_=]?|[-_]?%>$/,alias:"punctuation"},comment:/^#[\s\S]*/,"language-javascript":{pattern:/[\s\S]+/,inside:t.languages.javascript}},t.hooks.add("before-tokenize",function(e){var n=/<%(?!%)[\s\S]+?%>/g;t.languages["markup-templating"].buildPlaceholders(e,"ejs",n)}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"ejs")}),t.languages.eta=t.languages.ejs}e.exports=i,i.displayName="ejs",i.aliases=["eta"]},11974(e){"use strict";function t(e){e.languages.elixir={doc:{pattern:/@(?:doc|moduledoc)\s+(?:("""|''')[\s\S]*?\1|("|')(?:\\(?:\r\n|[\s\S])|(?!\2)[^\\\r\n])*\2)/,inside:{attribute:/^@\w+/,string:/['"][\s\S]+/}},comment:{pattern:/#.*/m,greedy:!0},regex:{pattern:/~[rR](?:("""|''')(?:\\[\s\S]|(?!\1)[^\\])+\1|([\/|"'])(?:\\.|(?!\2)[^\\\r\n])+\2|\((?:\\.|[^\\)\r\n])+\)|\[(?:\\.|[^\\\]\r\n])+\]|\{(?:\\.|[^\\}\r\n])+\}|<(?:\\.|[^\\>\r\n])+>)[uismxfr]*/,greedy:!0},string:[{pattern:/~[cCsSwW](?:("""|''')(?:\\[\s\S]|(?!\1)[^\\])+\1|([\/|"'])(?:\\.|(?!\2)[^\\\r\n])+\2|\((?:\\.|[^\\)\r\n])+\)|\[(?:\\.|[^\\\]\r\n])+\]|\{(?:\\.|#\{[^}]+\}|#(?!\{)|[^#\\}\r\n])+\}|<(?:\\.|[^\\>\r\n])+>)[csa]?/,greedy:!0,inside:{}},{pattern:/("""|''')[\s\S]*?\1/,greedy:!0,inside:{}},{pattern:/("|')(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0,inside:{}}],atom:{pattern:/(^|[^:]):\w+/,lookbehind:!0,alias:"symbol"},module:{pattern:/\b[A-Z]\w*\b/,alias:"class-name"},"attr-name":/\b\w+\??:(?!:)/,argument:{pattern:/(^|[^&])&\d+/,lookbehind:!0,alias:"variable"},attribute:{pattern:/@\w+/,alias:"variable"},function:/\b[_a-zA-Z]\w*[?!]?(?:(?=\s*(?:\.\s*)?\()|(?=\/\d))/,number:/\b(?:0[box][a-f\d_]+|\d[\d_]*)(?:\.[\d_]+)?(?:e[+-]?[\d_]+)?\b/i,keyword:/\b(?:after|alias|and|case|catch|cond|def(?:callback|delegate|exception|impl|macro|module|n|np|p|protocol|struct)?|do|else|end|fn|for|if|import|not|or|quote|raise|require|rescue|try|unless|unquote|use|when)\b/,boolean:/\b(?:true|false|nil)\b/,operator:[/\bin\b|&&?|\|[|>]?|\\\\|::|\.\.\.?|\+\+?|-[->]?|<[-=>]|>=|!==?|\B!|=(?:==?|[>~])?|[*\/^]/,{pattern:/([^<])<(?!<)/,lookbehind:!0},{pattern:/([^>])>(?!>)/,lookbehind:!0}],punctuation:/<<|>>|[.,%\[\]{}()]/},e.languages.elixir.string.forEach(function(t){t.inside={interpolation:{pattern:/#\{[^}]+\}/,inside:{delimiter:{pattern:/^#\{|\}$/,alias:"punctuation"},rest:e.languages.elixir}}}})}e.exports=t,t.displayName="elixir",t.aliases=[]},8645(e){"use strict";function t(e){e.languages.elm={comment:/--.*|\{-[\s\S]*?-\}/,char:{pattern:/'(?:[^\\'\r\n]|\\(?:[abfnrtv\\']|\d+|x[0-9a-fA-F]+))'/,greedy:!0},string:[{pattern:/"""[\s\S]*?"""/,greedy:!0},{pattern:/"(?:[^\\"\r\n]|\\.)*"/,greedy:!0}],"import-statement":{pattern:/(^[\t ]*)import\s+[A-Z]\w*(?:\.[A-Z]\w*)*(?:\s+as\s+(?:[A-Z]\w*)(?:\.[A-Z]\w*)*)?(?:\s+exposing\s+)?/m,lookbehind:!0,inside:{keyword:/\b(?:import|as|exposing)\b/}},keyword:/\b(?:alias|as|case|else|exposing|if|in|infixl|infixr|let|module|of|then|type)\b/,builtin:/\b(?:abs|acos|always|asin|atan|atan2|ceiling|clamp|compare|cos|curry|degrees|e|flip|floor|fromPolar|identity|isInfinite|isNaN|logBase|max|min|negate|never|not|pi|radians|rem|round|sin|sqrt|tan|toFloat|toPolar|toString|truncate|turns|uncurry|xor)\b/,number:/\b(?:\d+(?:\.\d+)?(?:e[+-]?\d+)?|0x[0-9a-f]+)\b/i,operator:/\s\.\s|[+\-/*=.$<>:&|^?%#@~!]{2,}|[+\-/*=$<>:&|^?%#@~!]/,hvariable:/\b(?:[A-Z]\w*\.)*[a-z]\w*\b/,constant:/\b(?:[A-Z]\w*\.)*[A-Z]\w*\b/,punctuation:/[{}[\]|(),.:]/}}e.exports=t,t.displayName="elm",t.aliases=[]},84790(e,t,n){"use strict";var r=n(56939),i=n(93205);function a(e){var t;e.register(r),e.register(i),(t=e).languages.erb=t.languages.extend("ruby",{}),t.languages.insertBefore("erb","comment",{delimiter:{pattern:/^<%=?|%>$/,alias:"punctuation"}}),t.hooks.add("before-tokenize",function(e){var n=/<%=?(?:[^\r\n]|[\r\n](?!=begin)|[\r\n]=begin\s(?:[^\r\n]|[\r\n](?!=end))*[\r\n]=end)+?%>/gm;t.languages["markup-templating"].buildPlaceholders(e,"erb",n)}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"erb")})}e.exports=a,a.displayName="erb",a.aliases=[]},4502(e){"use strict";function t(e){e.languages.erlang={comment:/%.+/,string:{pattern:/"(?:\\.|[^\\"\r\n])*"/,greedy:!0},"quoted-function":{pattern:/'(?:\\.|[^\\'\r\n])+'(?=\()/,alias:"function"},"quoted-atom":{pattern:/'(?:\\.|[^\\'\r\n])+'/,alias:"atom"},boolean:/\b(?:true|false)\b/,keyword:/\b(?:fun|when|case|of|end|if|receive|after|try|catch)\b/,number:[/\$\\?./,/\b\d+#[a-z0-9]+/i,/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i],function:/\b[a-z][\w@]*(?=\()/,variable:{pattern:/(^|[^@])(?:\b|\?)[A-Z_][\w@]*/,lookbehind:!0},operator:[/[=\/<>:]=|=[:\/]=|\+\+?|--?|[=*\/!]|\b(?:bnot|div|rem|band|bor|bxor|bsl|bsr|not|and|or|xor|orelse|andalso)\b/,{pattern:/(^|[^<])<(?!<)/,lookbehind:!0},{pattern:/(^|[^>])>(?!>)/,lookbehind:!0}],atom:/\b[a-z][\w@]*/,punctuation:/[()[\]{}:;,.#|]|<<|>>/}}e.exports=t,t.displayName="erlang",t.aliases=[]},66055(e,t,n){"use strict";var r=n(59803),i=n(93205);function a(e){var t;e.register(r),e.register(i),(t=e).languages.etlua={delimiter:{pattern:/^<%[-=]?|-?%>$/,alias:"punctuation"},"language-lua":{pattern:/[\s\S]+/,inside:t.languages.lua}},t.hooks.add("before-tokenize",function(e){var n=/<%[\s\S]+?%>/g;t.languages["markup-templating"].buildPlaceholders(e,"etlua",n)}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"etlua")})}e.exports=a,a.displayName="etlua",a.aliases=[]},68876(e){"use strict";function t(e){e.languages["excel-formula"]={comment:{pattern:/(\bN\(\s*)"(?:[^"]|"")*"(?=\s*\))/i,lookbehind:!0,greedy:!0},string:{pattern:/"(?:[^"]|"")*"(?!")/,greedy:!0},reference:{pattern:/(?:'[^']*'|(?:[^\s()[\]{}<>*?"';,$&]*\[[^^\s()[\]{}<>*?"']+\])?\w+)!/,greedy:!0,alias:"string",inside:{operator:/!$/,punctuation:/'/,sheet:{pattern:/[^[\]]+$/,alias:"function"},file:{pattern:/\[[^[\]]+\]$/,inside:{punctuation:/[[\]]/}},path:/[\s\S]+/}},"function-name":{pattern:/\b[A-Z]\w*(?=\()/i,alias:"keyword"},range:{pattern:/\$?\b(?:[A-Z]+\$?\d+:\$?[A-Z]+\$?\d+|[A-Z]+:\$?[A-Z]+|\d+:\$?\d+)\b/i,alias:"property",inside:{operator:/:/,cell:/\$?[A-Z]+\$?\d+/i,column:/\$?[A-Z]+/i,row:/\$?\d+/}},cell:{pattern:/\b[A-Z]+\d+\b|\$[A-Za-z]+\$?\d+\b|\b[A-Za-z]+\$\d+\b/,alias:"property"},number:/(?:\b\d+(?:\.\d+)?|\B\.\d+)(?:e[+-]?\d+)?\b/i,boolean:/\b(?:TRUE|FALSE)\b/i,operator:/[-+*/^%=&,]|<[=>]?|>=?/,punctuation:/[[\]();{}|]/},e.languages.xlsx=e.languages.xls=e.languages["excel-formula"]}e.exports=t,t.displayName="excelFormula",t.aliases=[]},95126(e){"use strict";function t(e){var t,n,r,i,a,o,s,u;t=e,i={comment:[{pattern:/(^|\s)(?:! .*|!$)/,lookbehind:!0,inside:n={function:/\b(?:TODOS?|FIX(?:MES?)?|NOTES?|BUGS?|XX+|HACKS?|WARN(?:ING)?|\?{2,}|!{2,})\b/}},{pattern:/(^|\s)\/\*\s[\s\S]*?\*\/(?=\s|$)/,lookbehind:!0,greedy:!0,inside:n},{pattern:/(^|\s)!\[(={0,6})\[\s[\s\S]*?\]\2\](?=\s|$)/,lookbehind:!0,greedy:!0,inside:n}],number:[{pattern:/(^|\s)[+-]?\d+(?=\s|$)/,lookbehind:!0},{pattern:/(^|\s)[+-]?0(?:b[01]+|o[0-7]+|d\d+|x[\dA-F]+)(?=\s|$)/i,lookbehind:!0},{pattern:/(^|\s)[+-]?\d+\/\d+\.?(?=\s|$)/,lookbehind:!0},{pattern:/(^|\s)\+?\d+\+\d+\/\d+(?=\s|$)/,lookbehind:!0},{pattern:/(^|\s)-\d+-\d+\/\d+(?=\s|$)/,lookbehind:!0},{pattern:/(^|\s)[+-]?(?:\d*\.\d+|\d+\.\d*|\d+)(?:e[+-]?\d+)?(?=\s|$)/i,lookbehind:!0},{pattern:/(^|\s)NAN:\s+[\da-fA-F]+(?=\s|$)/,lookbehind:!0},{pattern:/(^|\s)[+-]?0(?:b1\.[01]*|o1\.[0-7]*|d1\.\d*|x1\.[\dA-F]*)p\d+(?=\s|$)/i,lookbehind:!0}],regexp:{pattern:/(^|\s)R\/\s(?:\\\S|[^\\/])*\/(?:[idmsr]*|[idmsr]+-[idmsr]+)(?=\s|$)/,lookbehind:!0,alias:"number",inside:{variable:/\\\S/,keyword:/[+?*\[\]^$(){}.|]/,operator:{pattern:/(\/)[idmsr]+(?:-[idmsr]+)?/,lookbehind:!0}}},boolean:{pattern:/(^|\s)[tf](?=\s|$)/,lookbehind:!0},"custom-string":{pattern:/(^|\s)[A-Z0-9\-]+"\s(?:\\\S|[^"\\])*"/,lookbehind:!0,greedy:!0,alias:"string",inside:{number:/\\\S|%\w|\//}},"multiline-string":[{pattern:/(^|\s)STRING:\s+\S+(?:\n|\r\n).*(?:\n|\r\n)\s*;(?=\s|$)/,lookbehind:!0,greedy:!0,alias:"string",inside:{number:(r={number:/\\[^\s']|%\w/}).number,"semicolon-or-setlocal":{pattern:/([\r\n][ \t]*);(?=\s|$)/,lookbehind:!0,alias:"function"}}},{pattern:/(^|\s)HEREDOC:\s+\S+(?:\n|\r\n).*(?:\n|\r\n)\s*\S+(?=\s|$)/,lookbehind:!0,greedy:!0,alias:"string",inside:r},{pattern:/(^|\s)\[(={0,6})\[\s[\s\S]*?\]\2\](?=\s|$)/,lookbehind:!0,greedy:!0,alias:"string",inside:r}],"special-using":{pattern:/(^|\s)USING:(?:\s\S+)*(?=\s+;(?:\s|$))/,lookbehind:!0,alias:"function",inside:{string:{pattern:/(\s)[^:\s]+/,lookbehind:!0}}},"stack-effect-delimiter":[{pattern:/(^|\s)(?:call|execute|eval)?\((?=\s)/,lookbehind:!0,alias:"operator"},{pattern:/(\s)--(?=\s)/,lookbehind:!0,alias:"operator"},{pattern:/(\s)\)(?=\s|$)/,lookbehind:!0,alias:"operator"}],combinators:{pattern:null,lookbehind:!0,alias:"keyword"},"kernel-builtin":{pattern:null,lookbehind:!0,alias:"variable"},"sequences-builtin":{pattern:null,lookbehind:!0,alias:"variable"},"math-builtin":{pattern:null,lookbehind:!0,alias:"variable"},"constructor-word":{pattern:/(^|\s)<(?!=+>|-+>)\S+>(?=\s|$)/,lookbehind:!0,alias:"keyword"},"other-builtin-syntax":{pattern:null,lookbehind:!0,alias:"operator"},"conventionally-named-word":{pattern:/(^|\s)(?!")(?:(?:set|change|with|new)-\S+|\$\S+|>[^>\s]+|[^:>\s]+>|[^>\s]+>[^>\s]+|\+[^+\s]+\+|[^?\s]+\?|\?[^?\s]+|[^>\s]+>>|>>[^>\s]+|[^<\s]+<<|\([^()\s]+\)|[^!\s]+!|[^*\s]\S*\*|[^.\s]\S*\.)(?=\s|$)/,lookbehind:!0,alias:"keyword"},"colon-syntax":{pattern:/(^|\s)(?:[A-Z0-9\-]+#?)?:{1,2}\s+(?:;\S+|(?!;)\S+)(?=\s|$)/,lookbehind:!0,greedy:!0,alias:"function"},"semicolon-or-setlocal":{pattern:/(\s)(?:;|:>)(?=\s|$)/,lookbehind:!0,alias:"function"},"curly-brace-literal-delimiter":[{pattern:/(^|\s)[a-z]*\{(?=\s)/i,lookbehind:!0,alias:"operator"},{pattern:/(\s)\}(?=\s|$)/,lookbehind:!0,alias:"operator"}],"quotation-delimiter":[{pattern:/(^|\s)\[(?=\s)/,lookbehind:!0,alias:"operator"},{pattern:/(\s)\](?=\s|$)/,lookbehind:!0,alias:"operator"}],"normal-word":{pattern:/(^|\s)[^"\s]\S*(?=\s|$)/,lookbehind:!0},string:{pattern:/"(?:\\\S|[^"\\])*"/,greedy:!0,inside:r}},a=function(e){return(e+"").replace(/([.?*+\^$\[\]\\(){}|\-])/g,"\\$1")},o=function(e){return RegExp("(^|\\s)(?:"+e.map(a).join("|")+")(?=\\s|$)")},Object.keys(s={"kernel-builtin":["or","2nipd","4drop","tuck","wrapper","nip","wrapper?","callstack>array","die","dupd","callstack","callstack?","3dup","hashcode","pick","4nip","build",">boolean","nipd","clone","5nip","eq?","?","=","swapd","2over","clear","2dup","get-retainstack","not","tuple?","dup","3nipd","call","-rotd","object","drop","assert=","assert?","-rot","execute","boa","get-callstack","curried?","3drop","pickd","overd","over","roll","3nip","swap","and","2nip","rotd","throw","(clone)","hashcode*","spin","reach","4dup","equal?","get-datastack","assert","2drop","","boolean?","identity-hashcode","identity-tuple?","null","composed?","new","5drop","rot","-roll","xor","identity-tuple","boolean"],"other-builtin-syntax":["=======","recursive","flushable",">>","<<<<<<","M\\","B","PRIVATE>","\\","======","final","inline","delimiter","deprecated",">>>>>","<<<<<<<","parse-complex","malformed-complex","read-only",">>>>>>>","call-next-method","<<","foldable","$","$[","${"],"sequences-builtin":["member-eq?","mismatch","append","assert-sequence=","longer","repetition","clone-like","3sequence","assert-sequence?","last-index-from","reversed","index-from","cut*","pad-tail","join-as","remove-eq!","concat-as","but-last","snip","nths","nth","sequence","longest","slice?","","remove-nth","tail-slice","empty?","tail*","member?","virtual-sequence?","set-length","drop-prefix","iota","unclip","bounds-error?","unclip-last-slice","non-negative-integer-expected","non-negative-integer-expected?","midpoint@","longer?","?set-nth","?first","rest-slice","prepend-as","prepend","fourth","sift","subseq-start","new-sequence","?last","like","first4","1sequence","reverse","slice","virtual@","repetition?","set-last","index","4sequence","max-length","set-second","immutable-sequence","first2","first3","supremum","unclip-slice","suffix!","insert-nth","tail","3append","short","suffix","concat","flip","immutable?","reverse!","2sequence","sum","delete-all","indices","snip-slice","","check-slice","sequence?","head","append-as","halves","sequence=","collapse-slice","?second","slice-error?","product","bounds-check?","bounds-check","immutable","virtual-exemplar","harvest","remove","pad-head","last","set-fourth","cartesian-product","remove-eq","shorten","shorter","reversed?","shorter?","shortest","head-slice","pop*","tail-slice*","but-last-slice","iota?","append!","cut-slice","new-resizable","head-slice*","sequence-hashcode","pop","set-nth","?nth","second","join","immutable-sequence?","","3append-as","virtual-sequence","subseq?","remove-nth!","length","last-index","lengthen","assert-sequence","copy","move","third","first","tail?","set-first","prefix","bounds-error","","exchange","surround","cut","min-length","set-third","push-all","head?","subseq-start-from","delete-slice","rest","sum-lengths","head*","infimum","remove!","glue","slice-error","subseq","push","replace-slice","subseq-as","unclip-last"],"math-builtin":["number=","next-power-of-2","?1+","fp-special?","imaginary-part","float>bits","number?","fp-infinity?","bignum?","fp-snan?","denominator","gcd","*","+","fp-bitwise=","-","u>=","/",">=","bitand","power-of-2?","log2-expects-positive","neg?","<","log2",">","integer?","number","bits>double","2/","zero?","bits>float","float?","shift","ratio?","rect>","even?","ratio","fp-sign","bitnot",">fixnum","complex?","/i","integer>fixnum","/f","sgn",">bignum","next-float","u<","u>","mod","recip","rational",">float","2^","integer","fixnum?","neg","fixnum","sq","bignum",">rect","bit?","fp-qnan?","simple-gcd","complex","","real",">fraction","double>bits","bitor","rem","fp-nan-payload","real-part","log2-expects-positive?","prev-float","align","unordered?","float","fp-nan?","abs","bitxor","integer>fixnum-strict","u<=","odd?","<=","/mod",">integer","real?","rational?","numerator"]}).forEach(function(e){i[e].pattern=o(s[e])}),u=["2bi","while","2tri","bi*","4dip","both?","same?","tri@","curry","prepose","3bi","?if","tri*","2keep","3keep","curried","2keepd","when","2bi*","2tri*","4keep","bi@","keepdd","do","unless*","tri-curry","if*","loop","bi-curry*","when*","2bi@","2tri@","with","2with","either?","bi","until","3dip","3curry","tri-curry*","tri-curry@","bi-curry","keepd","compose","2dip","if","3tri","unless","tuple","keep","2curry","tri","most","while*","dip","composed","bi-curry@","find-last-from","trim-head-slice","map-as","each-from","none?","trim-tail","partition","if-empty","accumulate*","reject!","find-from","accumulate-as","collector-for-as","reject","map","map-sum","accumulate!","2each-from","follow","supremum-by","map!","unless-empty","collector","padding","reduce-index","replicate-as","infimum-by","trim-tail-slice","count","find-index","filter","accumulate*!","reject-as","map-integers","map-find","reduce","selector","interleave","2map","filter-as","binary-reduce","map-index-as","find","produce","filter!","replicate","cartesian-map","cartesian-each","find-index-from","map-find-last","3map-as","3map","find-last","selector-as","2map-as","2map-reduce","accumulate","each","each-index","accumulate*-as","when-empty","all?","collector-as","push-either","new-like","collector-for","2selector","push-if","2all?","map-reduce","3each","any?","trim-slice","2reduce","change-nth","produce-as","2each","trim","trim-head","cartesian-find","map-index","if-zero","each-integer","unless-zero","(find-integer)","when-zero","find-last-integer","(all-integers?)","times","(each-integer)","find-integer","all-integers?","unless-negative","if-positive","when-positive","when-negative","unless-positive","if-negative","case","2cleave","cond>quot","case>quot","3cleave","wrong-values","to-fixed-point","alist>quot","cond","cleave","call-effect","recursive-hashcode","spread","deep-spread>quot","2||","0||","n||","0&&","2&&","3||","1||","1&&","n&&","3&&","smart-unless*","keep-inputs","reduce-outputs","smart-when*","cleave>array","smart-with","smart-apply","smart-if","inputs/outputs","output>sequence-n","map-outputs","map-reduce-outputs","dropping","output>array","smart-map-reduce","smart-2map-reduce","output>array-n","nullary","inputsequence"],i.combinators.pattern=o(u),t.languages.factor=i}e.exports=t,t.displayName="factor",t.aliases=[]},74644(e){"use strict";function t(e){var t;(t=e).languages.false={comment:{pattern:/\{[^}]*\}/},string:{pattern:/"[^"]*"/,greedy:!0},"character-code":{pattern:/'(?:[^\r]|\r\n?)/,alias:"number"},"assembler-code":{pattern:/\d+`/,alias:"important"},number:/\d+/,operator:/[-!#$%&'*+,./:;=>?@\\^_`|~ßø]/,punctuation:/\[|\]/,variable:/[a-z]/,"non-standard":{pattern:/[()!=]=?|[-+*/%]|\b(?:in|is)\b/}),delete e.languages["firestore-security-rules"]["class-name"],e.languages.insertBefore("firestore-security-rules","keyword",{path:{pattern:/(^|[\s(),])(?:\/(?:[\w\xA0-\uFFFF]+|\{[\w\xA0-\uFFFF]+(?:=\*\*)?\}|\$\([\w\xA0-\uFFFF.]+\)))+/,lookbehind:!0,greedy:!0,inside:{variable:{pattern:/\{[\w\xA0-\uFFFF]+(?:=\*\*)?\}|\$\([\w\xA0-\uFFFF.]+\)/,inside:{operator:/=/,keyword:/\*\*/,punctuation:/[.$(){}]/}},punctuation:/\//}},method:{pattern:/(\ballow\s+)[a-z]+(?:\s*,\s*[a-z]+)*(?=\s*[:;])/,lookbehind:!0,alias:"builtin",inside:{punctuation:/,/}}})}e.exports=t,t.displayName="firestoreSecurityRules",t.aliases=[]},37225(e){"use strict";function t(e){var t;(t=e).languages.flow=t.languages.extend("javascript",{}),t.languages.insertBefore("flow","keyword",{type:[{pattern:/\b(?:[Nn]umber|[Ss]tring|[Bb]oolean|Function|any|mixed|null|void)\b/,alias:"tag"}]}),t.languages.flow["function-variable"].pattern=/(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=\s*(?:function\b|(?:\([^()]*\)(?:\s*:\s*\w+)?|(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/i,delete t.languages.flow.parameter,t.languages.insertBefore("flow","operator",{"flow-punctuation":{pattern:/\{\||\|\}/,alias:"punctuation"}}),Array.isArray(t.languages.flow.keyword)||(t.languages.flow.keyword=[t.languages.flow.keyword]),t.languages.flow.keyword.unshift({pattern:/(^|[^$]\b)(?:type|opaque|declare|Class)\b(?!\$)/,lookbehind:!0},{pattern:/(^|[^$]\B)\$(?:await|Diff|Exact|Keys|ObjMap|PropertyType|Shape|Record|Supertype|Subtype|Enum)\b(?!\$)/,lookbehind:!0})}e.exports=t,t.displayName="flow",t.aliases=[]},16725(e){"use strict";function t(e){e.languages.fortran={"quoted-number":{pattern:/[BOZ](['"])[A-F0-9]+\1/i,alias:"number"},string:{pattern:/(?:\b\w+_)?(['"])(?:\1\1|&(?:\r\n?|\n)(?:[ \t]*!.*(?:\r\n?|\n)|(?![ \t]*!))|(?!\1).)*(?:\1|&)/,inside:{comment:{pattern:/(&(?:\r\n?|\n)\s*)!.*/,lookbehind:!0}}},comment:{pattern:/!.*/,greedy:!0},boolean:/\.(?:TRUE|FALSE)\.(?:_\w+)?/i,number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[ED][+-]?\d+)?(?:_\w+)?/i,keyword:[/\b(?:INTEGER|REAL|DOUBLE ?PRECISION|COMPLEX|CHARACTER|LOGICAL)\b/i,/\b(?:END ?)?(?:BLOCK ?DATA|DO|FILE|FORALL|FUNCTION|IF|INTERFACE|MODULE(?! PROCEDURE)|PROGRAM|SELECT|SUBROUTINE|TYPE|WHERE)\b/i,/\b(?:ALLOCATABLE|ALLOCATE|BACKSPACE|CALL|CASE|CLOSE|COMMON|CONTAINS|CONTINUE|CYCLE|DATA|DEALLOCATE|DIMENSION|DO|END|EQUIVALENCE|EXIT|EXTERNAL|FORMAT|GO ?TO|IMPLICIT(?: NONE)?|INQUIRE|INTENT|INTRINSIC|MODULE PROCEDURE|NAMELIST|NULLIFY|OPEN|OPTIONAL|PARAMETER|POINTER|PRINT|PRIVATE|PUBLIC|READ|RETURN|REWIND|SAVE|SELECT|STOP|TARGET|WHILE|WRITE)\b/i,/\b(?:ASSIGNMENT|DEFAULT|ELEMENTAL|ELSE|ELSEWHERE|ELSEIF|ENTRY|IN|INCLUDE|INOUT|KIND|NULL|ONLY|OPERATOR|OUT|PURE|RECURSIVE|RESULT|SEQUENCE|STAT|THEN|USE)\b/i],operator:[/\*\*|\/\/|=>|[=\/]=|[<>]=?|::|[+\-*=%]|\.[A-Z]+\./i,{pattern:/(^|(?!\().)\/(?!\))/,lookbehind:!0}],punctuation:/\(\/|\/\)|[(),;:&]/}}e.exports=t,t.displayName="fortran",t.aliases=[]},95559(e){"use strict";function t(e){e.languages.fsharp=e.languages.extend("clike",{comment:[{pattern:/(^|[^\\])\(\*(?!\))[\s\S]*?\*\)/,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0}],string:{pattern:/(?:"""[\s\S]*?"""|@"(?:""|[^"])*"|"(?:\\[\s\S]|[^\\"])*")B?|'(?:[^\\']|\\(?:.|\d{3}|x[a-fA-F\d]{2}|u[a-fA-F\d]{4}|U[a-fA-F\d]{8}))'B?/,greedy:!0},"class-name":{pattern:/(\b(?:exception|inherit|interface|new|of|type)\s+|\w\s*:\s*|\s:\??>\s*)[.\w]+\b(?:\s*(?:->|\*)\s*[.\w]+\b)*(?!\s*[:.])/,lookbehind:!0,inside:{operator:/->|\*/,punctuation:/\./}},keyword:/\b(?:let|return|use|yield)(?:!\B|\b)|\b(?:abstract|and|as|assert|base|begin|class|default|delegate|do|done|downcast|downto|elif|else|end|exception|extern|false|finally|for|fun|function|global|if|in|inherit|inline|interface|internal|lazy|match|member|module|mutable|namespace|new|not|null|of|open|or|override|private|public|rec|select|static|struct|then|to|true|try|type|upcast|val|void|when|while|with|asr|land|lor|lsl|lsr|lxor|mod|sig|atomic|break|checked|component|const|constraint|constructor|continue|eager|event|external|fixed|functor|include|method|mixin|object|parallel|process|protected|pure|sealed|tailcall|trait|virtual|volatile)\b/,number:[/\b0x[\da-fA-F]+(?:un|lf|LF)?\b/,/\b0b[01]+(?:y|uy)?\b/,/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[fm]|e[+-]?\d+)?\b/i,/\b\d+(?:[IlLsy]|u[lsy]?|UL)?\b/],operator:/([<>~&^])\1\1|([*.:<>&])\2|<-|->|[!=:]=|?|\??(?:<=|>=|<>|[-+*/%=<>])\??|[!?^&]|~[+~-]|:>|:\?>?/}),e.languages.insertBefore("fsharp","keyword",{preprocessor:{pattern:/(^[\t ]*)#.*/m,lookbehind:!0,alias:"property",inside:{directive:{pattern:/(^#)\b(?:else|endif|if|light|line|nowarn)\b/,lookbehind:!0,alias:"keyword"}}}}),e.languages.insertBefore("fsharp","punctuation",{"computation-expression":{pattern:/\b[_a-z]\w*(?=\s*\{)/i,alias:"keyword"}}),e.languages.insertBefore("fsharp","string",{annotation:{pattern:/\[<.+?>\]/,inside:{punctuation:/^\[<|>\]$/,"class-name":{pattern:/^\w+$|(^|;\s*)[A-Z]\w*(?=\()/,lookbehind:!0},"annotation-content":{pattern:/[\s\S]+/,inside:e.languages.fsharp}}}})}e.exports=t,t.displayName="fsharp",t.aliases=[]},82114(e,t,n){"use strict";var r=n(93205);function i(e){e.register(r),function(e){for(var t=/[^<()"']|\((?:)*\)|<(?!#--)|<#--(?:[^-]|-(?!->))*-->|"(?:[^\\"]|\\.)*"|'(?:[^\\']|\\.)*'/.source,n=0;n<2;n++)t=t.replace(//g,function(){return t});t=t.replace(//g,/[^\s\S]/.source);var r={comment:/<#--[\s\S]*?-->/,string:[{pattern:/\br("|')(?:(?!\1)[^\\]|\\.)*\1/,greedy:!0},{pattern:RegExp(/("|')(?:(?!\1|\$\{)[^\\]|\\.|\$\{(?:(?!\})(?:))*\})*\1/.source.replace(//g,function(){return t})),greedy:!0,inside:{interpolation:{pattern:RegExp(/((?:^|[^\\])(?:\\\\)*)\$\{(?:(?!\})(?:))*\}/.source.replace(//g,function(){return t})),lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:null}}}}],keyword:/\b(?:as)\b/,boolean:/\b(?:true|false)\b/,"builtin-function":{pattern:/((?:^|[^?])\?\s*)\w+/,lookbehind:!0,alias:"function"},function:/\b\w+(?=\s*\()/,number:/\b\d+(?:\.\d+)?\b/,operator:/\.\.[<*!]?|->|--|\+\+|&&|\|\||\?{1,2}|[-+*/%!=<>]=?|\b(?:gt|gte|lt|lte)\b/,punctuation:/[,;.:()[\]{}]/};r.string[1].inside.interpolation.inside.rest=r,e.languages.ftl={"ftl-comment":{pattern:/^<#--[\s\S]*/,alias:"comment"},"ftl-directive":{pattern:/^<[\s\S]+>$/,inside:{directive:{pattern:/(^<\/?)[#@][a-z]\w*/i,lookbehind:!0,alias:"keyword"},punctuation:/^<\/?|\/?>$/,content:{pattern:/\s*\S[\s\S]*/,alias:"ftl",inside:r}}},"ftl-interpolation":{pattern:/^\$\{[\s\S]*\}$/,inside:{punctuation:/^\$\{|\}$/,content:{pattern:/\s*\S[\s\S]*/,alias:"ftl",inside:r}}}},e.hooks.add("before-tokenize",function(n){var r=RegExp(/<#--[\s\S]*?-->|<\/?[#@][a-zA-Z](?:)*?>|\$\{(?:)*?\}/.source.replace(//g,function(){return t}),"gi");e.languages["markup-templating"].buildPlaceholders(n,"ftl",r)}),e.hooks.add("after-tokenize",function(t){e.languages["markup-templating"].tokenizePlaceholders(t,"ftl")})}(e)}e.exports=i,i.displayName="ftl",i.aliases=[]},12208(e){"use strict";function t(e){e.languages.gcode={comment:/;.*|\B\(.*?\)\B/,string:{pattern:/"(?:""|[^"])*"/,greedy:!0},keyword:/\b[GM]\d+(?:\.\d+)?\b/,property:/\b[A-Z]/,checksum:{pattern:/\*\d+/,alias:"punctuation"},punctuation:/:/}}e.exports=t,t.displayName="gcode",t.aliases=[]},62728(e){"use strict";function t(e){e.languages.gdscript={comment:/#.*/,string:{pattern:/@?(?:("|')(?:(?!\1)[^\n\\]|\\[\s\S])*\1(?!"|')|"""(?:[^\\]|\\[\s\S])*?""")/,greedy:!0},"class-name":{pattern:/(^(?:class_name|class|extends)[ \t]+|^export\([ \t]*|\bas[ \t]+|(?:\b(?:const|var)[ \t]|[,(])[ \t]*\w+[ \t]*:[ \t]*|->[ \t]*)[a-zA-Z_]\w*/m,lookbehind:!0},keyword:/\b(?:and|as|assert|break|breakpoint|class|class_name|const|continue|elif|else|enum|export|extends|for|func|if|in|is|master|mastersync|match|not|null|onready|or|pass|preload|puppet|puppetsync|remote|remotesync|return|self|setget|signal|static|tool|var|while|yield)\b/,function:/\b[a-z_]\w*(?=[ \t]*\()/i,variable:/\$\w+/,number:[/\b0b[01_]+\b|\b0x[\da-fA-F_]+\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.[\d_]+)(?:e[+-]?[\d_]+)?\b/,/\b(?:INF|NAN|PI|TAU)\b/],constant:/\b[A-Z][A-Z_\d]*\b/,boolean:/\b(?:false|true)\b/,operator:/->|:=|&&|\|\||<<|>>|[-+*/%&|!<>=]=?|[~^]/,punctuation:/[.:,;()[\]{}]/}}e.exports=t,t.displayName="gdscript",t.aliases=[]},81549(e){"use strict";function t(e){e.languages.gedcom={"line-value":{pattern:/(^[\t ]*\d+ +(?:@\w[\w!"$%&'()*+,\-./:;<=>?[\\\]^`{|}~\x80-\xfe #]*@ +)?\w+ ).+/m,lookbehind:!0,inside:{pointer:{pattern:/^@\w[\w!"$%&'()*+,\-./:;<=>?[\\\]^`{|}~\x80-\xfe #]*@$/,alias:"variable"}}},tag:{pattern:/(^[\t ]*\d+ +(?:@\w[\w!"$%&'()*+,\-./:;<=>?[\\\]^`{|}~\x80-\xfe #]*@ +)?)\w+/m,lookbehind:!0,alias:"string"},level:{pattern:/(^[\t ]*)\d+/m,lookbehind:!0,alias:"number"},pointer:{pattern:/@\w[\w!"$%&'()*+,\-./:;<=>?[\\\]^`{|}~\x80-\xfe #]*@/,alias:"variable"}}}e.exports=t,t.displayName="gedcom",t.aliases=[]},6024(e){"use strict";function t(e){var t,n;n=/(?:\r?\n|\r)[ \t]*\|.+\|(?:(?!\|).)*/.source,(t=e).languages.gherkin={pystring:{pattern:/("""|''')[\s\S]+?\1/,alias:"string"},comment:{pattern:/(^[ \t]*)#.*/m,lookbehind:!0},tag:{pattern:/(^[ \t]*)@\S*/m,lookbehind:!0},feature:{pattern:/((?:^|\r?\n|\r)[ \t]*)(?:Ability|Ahoy matey!|Arwedd|Aspekt|Besigheid Behoefte|Business Need|Caracteristica|Característica|Egenskab|Egenskap|Eiginleiki|Feature|Fīča|Fitur|Fonctionnalité|Fonksyonalite|Funcionalidade|Funcionalitat|Functionalitate|Funcţionalitate|Funcționalitate|Functionaliteit|Fungsi|Funkcia|Funkcija|Funkcionalitāte|Funkcionalnost|Funkcja|Funksie|Funktionalität|Funktionalitéit|Funzionalità|Hwaet|Hwæt|Jellemző|Karakteristik|laH|Lastnost|Mak|Mogucnost|Mogućnost|Moznosti|Možnosti|OH HAI|Omadus|Ominaisuus|Osobina|Özellik|perbogh|poQbogh malja'|Potrzeba biznesowa|Požadavek|Požiadavka|Pretty much|Qap|Qu'meH 'ut|Savybė|Tính năng|Trajto|Vermoë|Vlastnosť|Właściwość|Značilnost|Δυνατότητα|Λειτουργία|Могућност|Мөмкинлек|Особина|Свойство|Үзенчәлеклелек|Функционал|Функционалност|Функция|Функціонал|תכונה|خاصية|خصوصیت|صلاحیت|کاروبار کی ضرورت|وِیژگی|रूप लेख|ਖਾਸੀਅਤ|ਨਕਸ਼ ਨੁਹਾਰ|ਮੁਹਾਂਦਰਾ|గుణము|ಹೆಚ್ಚಳ|ความต้องการทางธุรกิจ|ความสามารถ|โครงหลัก|기능|フィーチャ|功能|機能):(?:[^:\r\n]+(?:\r?\n|\r|$))*/,lookbehind:!0,inside:{important:{pattern:/(:)[^\r\n]+/,lookbehind:!0},keyword:/[^:\r\n]+:/}},scenario:{pattern:/(^[ \t]*)(?:Abstract Scenario|Abstrakt Scenario|Achtergrond|Aer|Ær|Agtergrond|All y'all|Antecedentes|Antecedents|Atburðarás|Atburðarásir|Awww, look mate|B4|Background|Baggrund|Bakgrund|Bakgrunn|Bakgrunnur|Beispiele|Beispiller|Bối cảnh|Cefndir|Cenario|Cenário|Cenario de Fundo|Cenário de Fundo|Cenarios|Cenários|Contesto|Context|Contexte|Contexto|Conto|Contoh|Contone|Dæmi|Dasar|Dead men tell no tales|Delineacao do Cenario|Delineação do Cenário|Dis is what went down|Dữ liệu|Dyagram senaryo|Dyagram Senaryo|Egzanp|Ejemplos|Eksempler|Ekzemploj|Enghreifftiau|Esbozo do escenario|Escenari|Escenario|Esempi|Esquema de l'escenari|Esquema del escenario|Esquema do Cenario|Esquema do Cenário|Examples|EXAMPLZ|Exempel|Exemple|Exemples|Exemplos|First off|Fono|Forgatókönyv|Forgatókönyv vázlat|Fundo|Geçmiş|ghantoH|Grundlage|Hannergrond|Háttér|Heave to|Istorik|Juhtumid|Keadaan|Khung kịch bản|Khung tình huống|Kịch bản|Koncept|Konsep skenario|Kontèks|Kontekst|Kontekstas|Konteksts|Kontext|Konturo de la scenaro|Latar Belakang|lut|lut chovnatlh|lutmey|Lýsing Atburðarásar|Lýsing Dæma|Menggariskan Senario|MISHUN|MISHUN SRSLY|mo'|Náčrt Scenára|Náčrt Scénáře|Náčrt Scenáru|Oris scenarija|Örnekler|Osnova|Osnova Scenára|Osnova scénáře|Osnutek|Ozadje|Paraugs|Pavyzdžiai|Példák|Piemēri|Plan du scénario|Plan du Scénario|Plan senaryo|Plan Senaryo|Plang vum Szenario|Pozadí|Pozadie|Pozadina|Príklady|Příklady|Primer|Primeri|Primjeri|Przykłady|Raamstsenaarium|Reckon it's like|Rerefons|Scenár|Scénář|Scenarie|Scenarij|Scenarijai|Scenarijaus šablonas|Scenariji|Scenārijs|Scenārijs pēc parauga|Scenarijus|Scenario|Scénario|Scenario Amlinellol|Scenario Outline|Scenario Template|Scenariomal|Scenariomall|Scenarios|Scenariu|Scenariusz|Scenaro|Schema dello scenario|Se ðe|Se the|Se þe|Senario|Senaryo|Senaryo deskripsyon|Senaryo Deskripsyon|Senaryo taslağı|Shiver me timbers|Situācija|Situai|Situasie|Situasie Uiteensetting|Skenario|Skenario konsep|Skica|Structura scenariu|Structură scenariu|Struktura scenarija|Stsenaarium|Swa|Swa hwaer swa|Swa hwær swa|Szablon scenariusza|Szenario|Szenariogrundriss|Tapaukset|Tapaus|Tapausaihio|Taust|Tausta|Template Keadaan|Template Senario|Template Situai|The thing of it is|Tình huống|Variantai|Voorbeelde|Voorbeelden|Wharrimean is|Yo-ho-ho|You'll wanna|Założenia|Παραδείγματα|Περιγραφή Σεναρίου|Σενάρια|Σενάριο|Υπόβαθρο|Кереш|Контекст|Концепт|Мисаллар|Мисоллар|Основа|Передумова|Позадина|Предистория|Предыстория|Приклади|Пример|Примери|Примеры|Рамка на сценарий|Скица|Структура сценарија|Структура сценария|Структура сценарію|Сценарий|Сценарий структураси|Сценарийның төзелеше|Сценарији|Сценарио|Сценарій|Тарих|Үрнәкләр|דוגמאות|רקע|תבנית תרחיש|תרחיש|الخلفية|الگوی سناریو|امثلة|پس منظر|زمینه|سناریو|سيناريو|سيناريو مخطط|مثالیں|منظر نامے کا خاکہ|منظرنامہ|نمونه ها|उदाहरण|परिदृश्य|परिदृश्य रूपरेखा|पृष्ठभूमि|ਉਦਾਹਰਨਾਂ|ਪਟਕਥਾ|ਪਟਕਥਾ ਢਾਂਚਾ|ਪਟਕਥਾ ਰੂਪ ਰੇਖਾ|ਪਿਛੋਕੜ|ఉదాహరణలు|కథనం|నేపథ్యం|సన్నివేశం|ಉದಾಹರಣೆಗಳು|ಕಥಾಸಾರಾಂಶ|ವಿವರಣೆ|ಹಿನ್ನೆಲೆ|โครงสร้างของเหตุการณ์|ชุดของตัวอย่าง|ชุดของเหตุการณ์|แนวคิด|สรุปเหตุการณ์|เหตุการณ์|배경|시나리오|시나리오 개요|예|サンプル|シナリオ|シナリオアウトライン|シナリオテンプレ|シナリオテンプレート|テンプレ|例|例子|剧本|剧本大纲|劇本|劇本大綱|场景|场景大纲|場景|場景大綱|背景):[^:\r\n]*/m,lookbehind:!0,inside:{important:{pattern:/(:)[^\r\n]*/,lookbehind:!0},keyword:/[^:\r\n]+:/}},"table-body":{pattern:RegExp("("+n+")(?:"+n+")+"),lookbehind:!0,inside:{outline:{pattern:/<[^>]+>/,alias:"variable"},td:{pattern:/\s*[^\s|][^|]*/,alias:"string"},punctuation:/\|/}},"table-head":{pattern:RegExp(n),inside:{th:{pattern:/\s*[^\s|][^|]*/,alias:"variable"},punctuation:/\|/}},atrule:{pattern:/(^[ \t]+)(?:'ach|'a|'ej|7|a|A také|A taktiež|A tiež|A zároveň|Aber|Ac|Adott|Akkor|Ak|Aleshores|Ale|Ali|Allora|Alors|Als|Ama|Amennyiben|Amikor|Ampak|an|AN|Ananging|And y'all|And|Angenommen|Anrhegedig a|An|Apabila|Atès|Atesa|Atunci|Avast!|Aye|A|awer|Bagi|Banjur|Bet|Biết|Blimey!|Buh|But at the end of the day I reckon|But y'all|But|BUT|Cal|Când|Cando|Cand|Ce|Cuando|Če|Ða ðe|Ða|Dadas|Dada|Dados|Dado|DaH ghu' bejlu'|dann|Dann|Dano|Dan|Dar|Dat fiind|Data|Date fiind|Date|Dati fiind|Dati|Daţi fiind|Dați fiind|Dato|DEN|Den youse gotta|Dengan|De|Diberi|Diyelim ki|Donada|Donat|Donitaĵo|Do|Dun|Duota|Ðurh|Eeldades|Ef|Eğer ki|Entao|Então|Entón|Entonces|En|Epi|E|És|Etant donnée|Etant donné|Et|Étant données|Étant donnée|Étant donné|Etant données|Etant donnés|Étant donnés|Fakat|Gangway!|Gdy|Gegeben seien|Gegeben sei|Gegeven|Gegewe|ghu' noblu'|Gitt|Given y'all|Given|Givet|Givun|Ha|Cho|I CAN HAZ|In|Ir|It's just unbelievable|I|Ja|Jeśli|Jeżeli|Kadar|Kada|Kad|Kai|Kaj|Když|Keď|Kemudian|Ketika|Khi|Kiedy|Ko|Kuid|Kui|Kun|Lan|latlh|Le sa a|Let go and haul|Le|Lè sa a|Lè|Logo|Lorsqu'<|Lorsque|mä|Maar|Mais|Mając|Majd|Maka|Manawa|Mas|Ma|Menawa|Men|Mutta|Nalikaning|Nalika|Nanging|Når|När|Nato|Nhưng|Niin|Njuk|O zaman|Og|Och|Oletetaan|Onda|Ond|Oraz|Pak|Pero|Però|Podano|Pokiaľ|Pokud|Potem|Potom|Privzeto|Pryd|qaSDI'|Quando|Quand|Quan|Så|Sed|Se|Siis|Sipoze ke|Sipoze Ke|Sipoze|Si|Şi|Și|Soit|Stel|Tada|Tad|Takrat|Tak|Tapi|Ter|Tetapi|Tha the|Tha|Then y'all|Then|Thì|Thurh|Toda|Too right|ugeholl|Und|Un|Và|vaj|Vendar|Ve|wann|Wanneer|WEN|Wenn|When y'all|When|Wtedy|Wun|Y'know|Yeah nah|Yna|Youse know like when|Youse know when youse got|Y|Za predpokladu|Za předpokladu|Zadani|Zadano|Zadan|Zadate|Zadato|Zakładając|Zaradi|Zatati|Þa þe|Þa|Þá|Þegar|Þurh|Αλλά|Δεδομένου|Και|Όταν|Τότε|А також|Агар|Але|Али|Аммо|А|Әгәр|Әйтик|Әмма|Бирок|Ва|Вә|Дадено|Дано|Допустим|Если|Задате|Задати|Задато|И|І|К тому же|Када|Кад|Когато|Когда|Коли|Ләкин|Лекин|Нәтиҗәдә|Нехай|Но|Онда|Припустимо, що|Припустимо|Пусть|Также|Та|Тогда|Тоді|То|Унда|Һәм|Якщо|אבל|אזי|אז|בהינתן|וגם|כאשר|آنگاه|اذاً|اگر|اما|اور|با فرض|بالفرض|بفرض|پھر|تب|ثم|جب|عندما|فرض کیا|لكن|لیکن|متى|هنگامی|و|अगर|और|कदा|किन्तु|चूंकि|जब|तथा|तदा|तब|परन्तु|पर|यदि|ਅਤੇ|ਜਦੋਂ|ਜਿਵੇਂ ਕਿ|ਜੇਕਰ|ਤਦ|ਪਰ|అప్పుడు|ఈ పరిస్థితిలో|కాని|చెప్పబడినది|మరియు|ಆದರೆ|ನಂತರ|ನೀಡಿದ|ಮತ್ತು|ಸ್ಥಿತಿಯನ್ನು|กำหนดให้|ดังนั้น|แต่|เมื่อ|และ|그러면<|그리고<|단<|만약<|만일<|먼저<|조건<|하지만<|かつ<|しかし<|ただし<|ならば<|もし<|並且<|但し<|但是<|假如<|假定<|假設<|假设<|前提<|同时<|同時<|并且<|当<|當<|而且<|那么<|那麼<)(?=[ \t])/m,lookbehind:!0},string:{pattern:/"(?:\\.|[^"\\\r\n])*"|'(?:\\.|[^'\\\r\n])*'/,inside:{outline:{pattern:/<[^>]+>/,alias:"variable"}}},outline:{pattern:/<[^>]+>/,alias:"variable"}}}e.exports=t,t.displayName="gherkin",t.aliases=[]},13600(e){"use strict";function t(e){e.languages.git={comment:/^#.*/m,deleted:/^[-–].*/m,inserted:/^\+.*/m,string:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/m,command:{pattern:/^.*\$ git .*$/m,inside:{parameter:/\s--?\w+/m}},coord:/^@@.*@@$/m,"commit-sha1":/^commit \w{40}$/m}}e.exports=t,t.displayName="git",t.aliases=[]},3322(e,t,n){"use strict";var r=n(65806);function i(e){e.register(r),e.languages.glsl=e.languages.extend("c",{keyword:/\b(?:attribute|const|uniform|varying|buffer|shared|coherent|volatile|restrict|readonly|writeonly|atomic_uint|layout|centroid|flat|smooth|noperspective|patch|sample|break|continue|do|for|while|switch|case|default|if|else|subroutine|in|out|inout|float|double|int|void|bool|true|false|invariant|precise|discard|return|d?mat[234](?:x[234])?|[ibdu]?vec[234]|uint|lowp|mediump|highp|precision|[iu]?sampler[123]D|[iu]?samplerCube|sampler[12]DShadow|samplerCubeShadow|[iu]?sampler[12]DArray|sampler[12]DArrayShadow|[iu]?sampler2DRect|sampler2DRectShadow|[iu]?samplerBuffer|[iu]?sampler2DMS(?:Array)?|[iu]?samplerCubeArray|samplerCubeArrayShadow|[iu]?image[123]D|[iu]?image2DRect|[iu]?imageCube|[iu]?imageBuffer|[iu]?image[12]DArray|[iu]?imageCubeArray|[iu]?image2DMS(?:Array)?|struct|common|partition|active|asm|class|union|enum|typedef|template|this|resource|goto|inline|noinline|public|static|extern|external|interface|long|short|half|fixed|unsigned|superp|input|output|hvec[234]|fvec[234]|sampler3DRect|filter|sizeof|cast|namespace|using)\b/})}e.exports=i,i.displayName="glsl",i.aliases=[]},53877(e){"use strict";function t(e){e.languages.gamemakerlanguage=e.languages.gml=e.languages.extend("clike",{keyword:/\b(?:if|else|switch|case|default|break|for|repeat|while|do|until|continue|exit|return|globalvar|var|enum)\b/,number:/(?:\b0x[\da-f]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?)[ulf]{0,4}/i,operator:/[-+%=]=?|!=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]|\b(?:or|and|not|with|at|xor)\b/,constant:/\b(?:self|other|all|noone|global|local|undefined|pointer_(?:invalid|null)|action_(?:stop|restart|continue|reverse)|pi|GM_build_date|GM_version|timezone_(?:local|utc)|gamespeed_(?:fps|microseconds)|ev_(?:create|destroy|step|alarm|keyboard|mouse|collision|other|draw|draw_(?:begin|end|pre|post)|keypress|keyrelease|trigger|(?:left|right|middle|no)_button|(?:left|right|middle)_press|(?:left|right|middle)_release|mouse_(?:enter|leave|wheel_up|wheel_down)|global_(?:left|right|middle)_button|global_(?:left|right|middle)_press|global_(?:left|right|middle)_release|joystick(?:1|2)_(?:left|right|up|down|button1|button2|button3|button4|button5|button6|button7|button8)|outside|boundary|game_start|game_end|room_start|room_end|no_more_lives|animation_end|end_of_path|no_more_health|user\d|step_(?:normal|begin|end)|gui|gui_begin|gui_end)|vk_(?:nokey|anykey|enter|return|shift|control|alt|escape|space|backspace|tab|pause|printscreen|left|right|up|down|home|end|delete|insert|pageup|pagedown|f\d|numpad\d|divide|multiply|subtract|add|decimal|lshift|lcontrol|lalt|rshift|rcontrol|ralt)|mb_(?:any|none|left|right|middle)|c_(?:aqua|black|blue|dkgray|fuchsia|gray|green|lime|ltgray|maroon|navy|olive|purple|red|silver|teal|white|yellow|orange)|fa_(?:left|center|right|top|middle|bottom|readonly|hidden|sysfile|volumeid|directory|archive)|pr_(?:pointlist|linelist|linestrip|trianglelist|trianglestrip|trianglefan)|bm_(?:complex|normal|add|max|subtract|zero|one|src_colour|inv_src_colour|src_color|inv_src_color|src_alpha|inv_src_alpha|dest_alpha|inv_dest_alpha|dest_colour|inv_dest_colour|dest_color|inv_dest_color|src_alpha_sat)|audio_(?:falloff_(?:none|inverse_distance|inverse_distance_clamped|linear_distance|linear_distance_clamped|exponent_distance|exponent_distance_clamped)|old_system|new_system|mono|stereo|3d)|cr_(?:default|none|arrow|cross|beam|size_nesw|size_ns|size_nwse|size_we|uparrow|hourglass|drag|appstart|handpoint|size_all)|asset_(?:object|unknown|sprite|sound|room|path|script|font|timeline|tiles|shader)|ds_type_(?:map|list|stack|queue|grid|priority)|ef_(?:explosion|ring|ellipse|firework|smoke|smokeup|star|spark|flare|cloud|rain|snow)|pt_shape_(?:pixel|disk|square|line|star|circle|ring|sphere|flare|spark|explosion|cloud|smoke|snow)|ps_(?:distr|shape)_(?:linear|gaussian|invgaussian|rectangle|ellipse|diamond|line)|ty_(?:real|string)|dll_(?:cdel|cdecl|stdcall)|matrix_(?:view|projection|world)|os_(?:win32|windows|macosx|ios|android|linux|unknown|winphone|win8native|psvita|ps4|xboxone|ps3|uwp)|browser_(?:not_a_browser|unknown|ie|firefox|chrome|safari|safari_mobile|opera|tizen|windows_store|ie_mobile)|device_ios_(?:unknown|iphone|iphone_retina|ipad|ipad_retina|iphone5|iphone6|iphone6plus)|device_(?:emulator|tablet)|display_(?:landscape|landscape_flipped|portrait|portrait_flipped)|of_challenge_(?:win|lose|tie)|leaderboard_type_(?:number|time_mins_secs)|cmpfunc_(?:never|less|equal|lessequal|greater|notequal|greaterequal|always)|cull_(?:noculling|clockwise|counterclockwise)|lighttype_(?:dir|point)|iap_(?:ev_storeload|ev_product|ev_purchase|ev_consume|ev_restore|storeload_ok|storeload_failed|status_uninitialised|status_unavailable|status_loading|status_available|status_processing|status_restoring|failed|unavailable|available|purchased|canceled|refunded)|fb_login_(?:default|fallback_to_webview|no_fallback_to_webview|forcing_webview|use_system_account|forcing_safari)|phy_joint_(?:anchor_1_x|anchor_1_y|anchor_2_x|anchor_2_y|reaction_force_x|reaction_force_y|reaction_torque|motor_speed|angle|motor_torque|max_motor_torque|translation|speed|motor_force|max_motor_force|length_1|length_2|damping_ratio|frequency|lower_angle_limit|upper_angle_limit|angle_limits|max_length|max_torque|max_force)|phy_debug_render_(?:aabb|collision_pairs|coms|core_shapes|joints|obb|shapes)|phy_particle_flag_(?:water|zombie|wall|spring|elastic|viscous|powder|tensile|colourmixing|colormixing)|phy_particle_group_flag_(?:solid|rigid)|phy_particle_data_flag_(?:typeflags|position|velocity|colour|color|category)|achievement_(?:our_info|friends_info|leaderboard_info|info|filter_(?:all_players|friends_only|favorites_only)|type_challenge|type_score_challenge|pic_loaded|show_(?:ui|profile|leaderboard|achievement|bank|friend_picker|purchase_prompt))|network_(?:socket_(?:tcp|udp|bluetooth)|type_(?:connect|disconnect|data|non_blocking_connect)|config_(?:connect_timeout|use_non_blocking_socket|enable_reliable_udp|disable_reliable_udp))|buffer_(?:fixed|grow|wrap|fast|vbuffer|network|u8|s8|u16|s16|u32|s32|u64|f16|f32|f64|bool|text|string|seek_start|seek_relative|seek_end|generalerror|outofspace|outofbounds|invalidtype)|gp_(?:face\d|shoulderl|shoulderr|shoulderlb|shoulderrb|select|start|stickl|stickr|padu|padd|padl|padr|axislh|axislv|axisrh|axisrv)|ov_(?:friends|community|players|settings|gamegroup|achievements)|lb_sort_(?:none|ascending|descending)|lb_disp_(?:none|numeric|time_sec|time_ms)|ugc_(?:result_success|filetype_(?:community|microtrans)|visibility_(?:public|friends_only|private)|query_RankedBy(?:Vote|PublicationDate|Trend|NumTimesReported|TotalVotesAsc|VotesUp|TextSearch)|query_(?:AcceptedForGameRankedByAcceptanceDate|FavoritedByFriendsRankedByPublicationDate|CreatedByFriendsRankedByPublicationDate|NotYetRated)|sortorder_CreationOrder(?:Desc|Asc)|sortorder_(?:TitleAsc|LastUpdatedDesc|SubscriptionDateDesc|VoteScoreDesc|ForModeration)|list_(?:Published|VotedOn|VotedUp|VotedDown|WillVoteLater|Favorited|Subscribed|UsedOrPlayed|Followed)|match_(?:Items|Items_Mtx|Items_ReadyToUse|Collections|Artwork|Videos|Screenshots|AllGuides|WebGuides|IntegratedGuides|UsableInGame|ControllerBindings))|vertex_usage_(?:position|colour|color|normal|texcoord|textcoord|blendweight|blendindices|psize|tangent|binormal|fog|depth|sample)|vertex_type_(?:float\d|colour|color|ubyte4)|layerelementtype_(?:undefined|background|instance|oldtilemap|sprite|tilemap|particlesystem|tile)|tile_(?:rotate|flip|mirror|index_mask)|input_type|se_(?:chorus|compressor|echo|equalizer|flanger|gargle|none|reverb)|text_type|(?:obj|scr|spr|rm)\w+)\b/,variable:/\b(?:x|y|(?:x|y)(?:previous|start)|(?:h|v)speed|direction|speed|friction|gravity|gravity_direction|path_(?:index|position|positionprevious|speed|scale|orientation|endaction)|object_index|id|solid|persistent|mask_index|instance_(?:count|id)|alarm|timeline_(?:index|position|speed|running|loop)|visible|sprite_(?:index|width|height|xoffset|yoffset)|image_(?:number|index|speed|depth|xscale|yscale|angle|alpha|blend)|bbox_(?:left|right|top|bottom)|layer|phy_(?:rotation|(?:position|linear_velocity|speed|com|collision|col_normal)_(?:x|y)|angular_(?:velocity|damping)|position_(?:x|y)previous|speed|linear_damping|bullet|fixed_rotation|active|mass|inertia|dynamic|kinematic|sleeping|collision_points)|working_directory|webgl_enabled|view_(?:(?:y|x|w|h)view|(?:y|x|w|h)port|(?:v|h)(?:speed|border)|visible|surface_id|object|enabled|current|angle)|undefined|transition_(?:steps|kind|color)|temp_directory|show_(?:score|lives|health)|secure_mode|score|room_(?:width|speed|persistent|last|height|first|caption)|room|pointer_(?:null|invalid)|os_(?:version|type|device|browser)|mouse_(?:y|x|lastbutton|button)|lives|keyboard_(?:string|lastkey|lastchar|key)|iap_data|health|gamemaker_(?:version|registered|pro)|game_(?:save|project|display)_(?:id|name)|fps_real|fps|event_(?:type|object|number|action)|error_(?:occurred|last)|display_aa|delta_time|debug_mode|cursor_sprite|current_(?:year|weekday|time|second|month|minute|hour|day)|caption_(?:score|lives|health)|browser_(?:width|height)|background_(?:yscale|y|xscale|x|width|vtiled|vspeed|visible|showcolour|showcolor|index|htiled|hspeed|height|foreground|colour|color|blend|alpha)|async_load|application_surface|argument(?:_relitive|_count|\d)|argument|global|local|self|other)\b/})}e.exports=t,t.displayName="gml",t.aliases=[]},51519(e){"use strict";function t(e){e.languages.go=e.languages.extend("clike",{string:{pattern:/(["'`])(?:\\[\s\S]|(?!\1)[^\\])*\1/,greedy:!0},keyword:/\b(?:break|case|chan|const|continue|default|defer|else|fallthrough|for|func|go(?:to)?|if|import|interface|map|package|range|return|select|struct|switch|type|var)\b/,boolean:/\b(?:_|iota|nil|true|false)\b/,number:/(?:\b0x[a-f\d]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[-+]?\d+)?)i?/i,operator:/[*\/%^!=]=?|\+[=+]?|-[=-]?|\|[=|]?|&(?:=|&|\^=?)?|>(?:>=?|=)?|<(?:<=?|=|-)?|:=|\.\.\./,builtin:/\b(?:bool|byte|complex(?:64|128)|error|float(?:32|64)|rune|string|u?int(?:8|16|32|64)?|uintptr|append|cap|close|complex|copy|delete|imag|len|make|new|panic|print(?:ln)?|real|recover)\b/}),delete e.languages.go["class-name"]}e.exports=t,t.displayName="go",t.aliases=[]},94055(e){"use strict";function t(e){e.languages.graphql={comment:/#.*/,description:{pattern:/(?:"""(?:[^"]|(?!""")")*"""|"(?:\\.|[^\\"\r\n])*")(?=\s*[a-z_])/i,greedy:!0,alias:"string",inside:{"language-markdown":{pattern:/(^"(?:"")?)(?!\1)[\s\S]+(?=\1$)/,lookbehind:!0,inside:e.languages.markdown}}},string:{pattern:/"""(?:[^"]|(?!""")")*"""|"(?:\\.|[^\\"\r\n])*"/,greedy:!0},number:/(?:\B-|\b)\d+(?:\.\d+)?(?:e[+-]?\d+)?\b/i,boolean:/\b(?:true|false)\b/,variable:/\$[a-z_]\w*/i,directive:{pattern:/@[a-z_]\w*/i,alias:"function"},"attr-name":{pattern:/[a-z_]\w*(?=\s*(?:\((?:[^()"]|"(?:\\.|[^\\"\r\n])*")*\))?:)/i,greedy:!0},"atom-input":{pattern:/[A-Z]\w*Input(?=!?.*$)/m,alias:"class-name"},scalar:/\b(?:Boolean|Float|ID|Int|String)\b/,constant:/\b[A-Z][A-Z_\d]*\b/,"class-name":{pattern:/(\b(?:enum|implements|interface|on|scalar|type|union)\s+|&\s*|:\s*|\[)[A-Z_]\w*/,lookbehind:!0},fragment:{pattern:/(\bfragment\s+|\.{3}\s*(?!on\b))[a-zA-Z_]\w*/,lookbehind:!0,alias:"function"},"definition-mutation":{pattern:/(\bmutation\s+)[a-zA-Z_]\w*/,lookbehind:!0,alias:"function"},"definition-query":{pattern:/(\bquery\s+)[a-zA-Z_]\w*/,lookbehind:!0,alias:"function"},keyword:/\b(?:directive|enum|extend|fragment|implements|input|interface|mutation|on|query|repeatable|scalar|schema|subscription|type|union)\b/,operator:/[!=|&]|\.{3}/,"property-query":/\w+(?=\s*\()/,object:/\w+(?=\s*\{)/,punctuation:/[!(){}\[\]:=,]/,property:/\w+/},e.hooks.add("after-tokenize",function(e){if("graphql"===e.language){for(var t=e.tokens.filter(function(e){return"string"!=typeof e&&"comment"!==e.type&&"scalar"!==e.type}),n=0;n0)){var s=d(/^\{$/,/^\}$/);if(-1===s)continue;for(var u=n;u=0&&h(c,"variable-input")}}}}}function l(e){return t[n+e]}function f(e,t){t=t||0;for(var n=0;n]?|\+[+=]?|!=?|<(?:<=?|=>?)?|>(?:>>?=?|=)?|&[&=]?|\|[|=]?|\/=?|\^=?|%=?)/,lookbehind:!0},punctuation:/\.+|[{}[\];(),:$]/}),e.languages.insertBefore("groovy","string",{shebang:{pattern:/#!.+/,alias:"comment"}}),e.languages.insertBefore("groovy","punctuation",{"spock-block":/\b(?:setup|given|when|then|and|cleanup|expect|where):/}),e.languages.insertBefore("groovy","function",{annotation:{pattern:/(^|[^.])@\w+/,lookbehind:!0,alias:"punctuation"}}),e.hooks.add("wrap",function(t){if("groovy"===t.language&&"string"===t.type){var n=t.content.value[0];if("'"!=n){var r=/([^\\])(?:\$(?:\{.*?\}|[\w.]+))/;"$"===n&&(r=/([^\$])(?:\$(?:\{.*?\}|[\w.]+))/),t.content.value=t.content.value.replace(/</g,"<").replace(/&/g,"&"),t.content=e.highlight(t.content.value,{expression:{pattern:r,lookbehind:!0,inside:e.languages.groovy}}),t.classes.push("/"===n?"regex":"gstring")}}})}e.exports=t,t.displayName="groovy",t.aliases=[]},29536(e,t,n){"use strict";var r=n(56939);function i(e){e.register(r),function(e){e.languages.haml={"multiline-comment":{pattern:/((?:^|\r?\n|\r)([\t ]*))(?:\/|-#).*(?:(?:\r?\n|\r)\2[\t ].+)*/,lookbehind:!0,alias:"comment"},"multiline-code":[{pattern:/((?:^|\r?\n|\r)([\t ]*)(?:[~-]|[&!]?=)).*,[\t ]*(?:(?:\r?\n|\r)\2[\t ].*,[\t ]*)*(?:(?:\r?\n|\r)\2[\t ].+)/,lookbehind:!0,inside:e.languages.ruby},{pattern:/((?:^|\r?\n|\r)([\t ]*)(?:[~-]|[&!]?=)).*\|[\t ]*(?:(?:\r?\n|\r)\2[\t ].*\|[\t ]*)*/,lookbehind:!0,inside:e.languages.ruby}],filter:{pattern:/((?:^|\r?\n|\r)([\t ]*)):[\w-]+(?:(?:\r?\n|\r)(?:\2[\t ].+|\s*?(?=\r?\n|\r)))+/,lookbehind:!0,inside:{"filter-name":{pattern:/^:[\w-]+/,alias:"variable"}}},markup:{pattern:/((?:^|\r?\n|\r)[\t ]*)<.+/,lookbehind:!0,inside:e.languages.markup},doctype:{pattern:/((?:^|\r?\n|\r)[\t ]*)!!!(?: .+)?/,lookbehind:!0},tag:{pattern:/((?:^|\r?\n|\r)[\t ]*)[%.#][\w\-#.]*[\w\-](?:\([^)]+\)|\{(?:\{[^}]+\}|[^{}])+\}|\[[^\]]+\])*[\/<>]*/,lookbehind:!0,inside:{attributes:[{pattern:/(^|[^#])\{(?:\{[^}]+\}|[^{}])+\}/,lookbehind:!0,inside:e.languages.ruby},{pattern:/\([^)]+\)/,inside:{"attr-value":{pattern:/(=\s*)(?:"(?:\\.|[^\\"\r\n])*"|[^)\s]+)/,lookbehind:!0},"attr-name":/[\w:-]+(?=\s*!?=|\s*[,)])/,punctuation:/[=(),]/}},{pattern:/\[[^\]]+\]/,inside:e.languages.ruby}],punctuation:/[<>]/}},code:{pattern:/((?:^|\r?\n|\r)[\t ]*(?:[~-]|[&!]?=)).+/,lookbehind:!0,inside:e.languages.ruby},interpolation:{pattern:/#\{[^}]+\}/,inside:{delimiter:{pattern:/^#\{|\}$/,alias:"punctuation"},rest:e.languages.ruby}},punctuation:{pattern:/((?:^|\r?\n|\r)[\t ]*)[~=\-&!]+/,lookbehind:!0}};for(var t="((?:^|\\r?\\n|\\r)([\\t ]*)):{{filter_name}}(?:(?:\\r?\\n|\\r)(?:\\2[\\t ].+|\\s*?(?=\\r?\\n|\\r)))+",n=["css",{filter:"coffee",language:"coffeescript"},"erb","javascript","less","markdown","ruby","scss","textile"],r={},i=0,a=n.length;i@\[\\\]^`{|}~]/,variable:/[^!"#%&'()*+,\/;<=>@\[\\\]^`{|}~\s]+/},t.hooks.add("before-tokenize",function(e){var n=/\{\{\{[\s\S]+?\}\}\}|\{\{[\s\S]+?\}\}/g;t.languages["markup-templating"].buildPlaceholders(e,"handlebars",n)}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"handlebars")}),t.languages.hbs=t.languages.handlebars}e.exports=i,i.displayName="handlebars",i.aliases=["hbs"]},58090(e){"use strict";function t(e){e.languages.haskell={comment:{pattern:/(^|[^-!#$%*+=?&@|~.:<>^\\\/])(?:--(?:(?=.)[^-!#$%*+=?&@|~.:<>^\\\/].*|$)|\{-[\s\S]*?-\})/m,lookbehind:!0},char:{pattern:/'(?:[^\\']|\\(?:[abfnrtv\\"'&]|\^[A-Z@[\]^_]|NUL|SOH|STX|ETX|EOT|ENQ|ACK|BEL|BS|HT|LF|VT|FF|CR|SO|SI|DLE|DC1|DC2|DC3|DC4|NAK|SYN|ETB|CAN|EM|SUB|ESC|FS|GS|RS|US|SP|DEL|\d+|o[0-7]+|x[0-9a-fA-F]+))'/,alias:"string"},string:{pattern:/"(?:[^\\"]|\\(?:\S|\s+\\))*"/,greedy:!0},keyword:/\b(?:case|class|data|deriving|do|else|if|in|infixl|infixr|instance|let|module|newtype|of|primitive|then|type|where)\b/,"import-statement":{pattern:/(^[\t ]*)import\s+(?:qualified\s+)?(?:[A-Z][\w']*)(?:\.[A-Z][\w']*)*(?:\s+as\s+(?:[A-Z][\w']*)(?:\.[A-Z][\w']*)*)?(?:\s+hiding\b)?/m,lookbehind:!0,inside:{keyword:/\b(?:import|qualified|as|hiding)\b/}},builtin:/\b(?:abs|acos|acosh|all|and|any|appendFile|approxRational|asTypeOf|asin|asinh|atan|atan2|atanh|basicIORun|break|catch|ceiling|chr|compare|concat|concatMap|const|cos|cosh|curry|cycle|decodeFloat|denominator|digitToInt|div|divMod|drop|dropWhile|either|elem|encodeFloat|enumFrom|enumFromThen|enumFromThenTo|enumFromTo|error|even|exp|exponent|fail|filter|flip|floatDigits|floatRadix|floatRange|floor|fmap|foldl|foldl1|foldr|foldr1|fromDouble|fromEnum|fromInt|fromInteger|fromIntegral|fromRational|fst|gcd|getChar|getContents|getLine|group|head|id|inRange|index|init|intToDigit|interact|ioError|isAlpha|isAlphaNum|isAscii|isControl|isDenormalized|isDigit|isHexDigit|isIEEE|isInfinite|isLower|isNaN|isNegativeZero|isOctDigit|isPrint|isSpace|isUpper|iterate|last|lcm|length|lex|lexDigits|lexLitChar|lines|log|logBase|lookup|map|mapM|mapM_|max|maxBound|maximum|maybe|min|minBound|minimum|mod|negate|not|notElem|null|numerator|odd|or|ord|otherwise|pack|pi|pred|primExitWith|print|product|properFraction|putChar|putStr|putStrLn|quot|quotRem|range|rangeSize|read|readDec|readFile|readFloat|readHex|readIO|readInt|readList|readLitChar|readLn|readOct|readParen|readSigned|reads|readsPrec|realToFrac|recip|rem|repeat|replicate|return|reverse|round|scaleFloat|scanl|scanl1|scanr|scanr1|seq|sequence|sequence_|show|showChar|showInt|showList|showLitChar|showParen|showSigned|showString|shows|showsPrec|significand|signum|sin|sinh|snd|sort|span|splitAt|sqrt|subtract|succ|sum|tail|take|takeWhile|tan|tanh|threadToIOResult|toEnum|toInt|toInteger|toLower|toRational|toUpper|truncate|uncurry|undefined|unlines|until|unwords|unzip|unzip3|userError|words|writeFile|zip|zip3|zipWith|zipWith3)\b/,number:/\b(?:\d+(?:\.\d+)?(?:e[+-]?\d+)?|0o[0-7]+|0x[0-9a-f]+)\b/i,operator:/\s\.\s|[-!#$%*+=?&@|~:<>^\\\/]*\.[-!#$%*+=?&@|~.:<>^\\\/]+|[-!#$%*+=?&@|~.:<>^\\\/]+\.[-!#$%*+=?&@|~:<>^\\\/]*|[-!#$%*+=?&@|~:<>^\\\/]+|`(?:[A-Z][\w']*\.)*[_a-z][\w']*`/,hvariable:/\b(?:[A-Z][\w']*\.)*[_a-z][\w']*\b/,constant:/\b(?:[A-Z][\w']*\.)*[A-Z][\w']*\b/,punctuation:/[{}[\];(),.:]/},e.languages.hs=e.languages.haskell}e.exports=t,t.displayName="haskell",t.aliases=["hs"]},95121(e){"use strict";function t(e){e.languages.haxe=e.languages.extend("clike",{string:{pattern:/(["'])(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0,inside:{interpolation:{pattern:/(^|[^\\])\$(?:\w+|\{[^}]+\})/,lookbehind:!0,inside:{interpolation:{pattern:/^\$\w*/,alias:"variable"}}}}},keyword:/\bthis\b|\b(?:abstract|as|break|case|cast|catch|class|continue|default|do|dynamic|else|enum|extends|extern|from|for|function|if|implements|import|in|inline|interface|macro|new|null|override|public|private|return|static|super|switch|throw|to|try|typedef|using|var|while)(?!\.)\b/,operator:/\.{3}|\+\+?|-[->]?|[=!]=?|&&?|\|\|?|<[<=]?|>[>=]?|[*\/%~^]/}),e.languages.insertBefore("haxe","class-name",{regex:{pattern:/~\/(?:[^\/\\\r\n]|\\.)+\/[igmsu]*/,greedy:!0}}),e.languages.insertBefore("haxe","keyword",{preprocessor:{pattern:/#\w+/,alias:"builtin"},metadata:{pattern:/@:?\w+/,alias:"symbol"},reification:{pattern:/\$(?:\w+|(?=\{))/,alias:"variable"}}),e.languages.haxe.string.inside.interpolation.inside.rest=e.languages.haxe,delete e.languages.haxe["class-name"]}e.exports=t,t.displayName="haxe",t.aliases=[]},59904(e){"use strict";function t(e){e.languages.hcl={comment:/(?:\/\/|#).*|\/\*[\s\S]*?(?:\*\/|$)/,heredoc:{pattern:/<<-?(\w+\b)[\s\S]*?^[ \t]*\1/m,greedy:!0,alias:"string"},keyword:[{pattern:/(?:resource|data)\s+(?:"(?:\\[\s\S]|[^\\"])*")(?=\s+"[\w-]+"\s+\{)/i,inside:{type:{pattern:/(resource|data|\s+)(?:"(?:\\[\s\S]|[^\\"])*")/i,lookbehind:!0,alias:"variable"}}},{pattern:/(?:provider|provisioner|variable|output|module|backend)\s+(?:[\w-]+|"(?:\\[\s\S]|[^\\"])*")\s+(?=\{)/i,inside:{type:{pattern:/(provider|provisioner|variable|output|module|backend)\s+(?:[\w-]+|"(?:\\[\s\S]|[^\\"])*")\s+/i,lookbehind:!0,alias:"variable"}}},/[\w-]+(?=\s+\{)/],property:[/[-\w\.]+(?=\s*=(?!=))/,/"(?:\\[\s\S]|[^\\"])+"(?=\s*[:=])/],string:{pattern:/"(?:[^\\$"]|\\[\s\S]|\$(?:(?=")|\$+(?!\$)|[^"${])|\$\{(?:[^{}"]|"(?:[^\\"]|\\[\s\S])*")*\})*"/,greedy:!0,inside:{interpolation:{pattern:/(^|[^$])\$\{(?:[^{}"]|"(?:[^\\"]|\\[\s\S])*")*\}/,lookbehind:!0,inside:{type:{pattern:/(\b(?:terraform|var|self|count|module|path|data|local)\b\.)[\w\*]+/i,lookbehind:!0,alias:"variable"},keyword:/\b(?:terraform|var|self|count|module|path|data|local)\b/i,function:/\w+(?=\()/,string:{pattern:/"(?:\\[\s\S]|[^\\"])*"/,greedy:!0},number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?(?:e[+-]?\d+)?/i,punctuation:/[!\$#%&'()*+,.\/;<=>@\[\\\]^`{|}~?:]/}}}},number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?(?:e[+-]?\d+)?/i,boolean:/\b(?:true|false)\b/i,punctuation:/[=\[\]{}]/}}e.exports=t,t.displayName="hcl",t.aliases=[]},9436(e,t,n){"use strict";var r=n(65806);function i(e){e.register(r),e.languages.hlsl=e.languages.extend("c",{"class-name":[e.languages.c["class-name"],/\b(?:AppendStructuredBuffer|BlendState|Buffer|ByteAddressBuffer|CompileShader|ComputeShader|ConsumeStructuredBuffer|DepthStencilState|DepthStencilView|DomainShader|GeometryShader|Hullshader|InputPatch|LineStream|OutputPatch|PixelShader|PointStream|RasterizerState|RenderTargetView|RWBuffer|RWByteAddressBuffer|RWStructuredBuffer|RWTexture(?:1D|1DArray|2D|2DArray|3D)|SamplerComparisonState|SamplerState|StructuredBuffer|Texture(?:1D|1DArray|2D|2DArray|2DMS|2DMSArray|3D|Cube|CubeArray)|TriangleStream|VertexShader)\b/],keyword:[/\b(?:asm|asm_fragment|auto|break|case|catch|cbuffer|centroid|char|class|column_major|compile|compile_fragment|const|const_cast|continue|default|delete|discard|do|dynamic_cast|else|enum|explicit|export|extern|for|friend|fxgroup|goto|groupshared|if|in|inline|inout|interface|line|lineadj|linear|long|matrix|mutable|namespace|new|nointerpolation|noperspective|operator|out|packoffset|pass|pixelfragment|point|precise|private|protected|public|register|reinterpret_cast|return|row_major|sample|sampler|shared|short|signed|sizeof|snorm|stateblock|stateblock_state|static|static_cast|string|struct|switch|tbuffer|technique|technique10|technique11|template|texture|this|throw|triangle|triangleadj|try|typedef|typename|uniform|union|unorm|unsigned|using|vector|vertexfragment|virtual|void|volatile|while)\b/,/\b(?:bool|double|dword|float|half|int|min(?:10float|12int|16(?:float|int|uint))|uint)(?:[1-4](?:x[1-4])?)?\b/],number:/(?:(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[eE][+-]?\d+)?|\b0x[\da-fA-F]+)[fFhHlLuU]?\b/,boolean:/\b(?:false|true)\b/})}e.exports=i,i.displayName="hlsl",i.aliases=[]},76942(e){"use strict";function t(e){e.languages.hpkp={directive:{pattern:/\b(?:(?:includeSubDomains|preload|strict)(?: |;)|pin-sha256="[a-zA-Z\d+=/]+"|(?:max-age|report-uri)=|report-to )/,alias:"keyword"},safe:{pattern:/\b\d{7,}\b/,alias:"selector"},unsafe:{pattern:/\b\d{1,6}\b/,alias:"function"}}}e.exports=t,t.displayName="hpkp",t.aliases=[]},60561(e){"use strict";function t(e){e.languages.hsts={directive:{pattern:/\b(?:max-age=|includeSubDomains|preload)/,alias:"keyword"},safe:{pattern:/\b\d{8,}\b/,alias:"selector"},unsafe:{pattern:/\b\d{1,7}\b/,alias:"function"}}}e.exports=t,t.displayName="hsts",t.aliases=[]},49660(e){"use strict";function t(e){!function(e){e.languages.http={"request-line":{pattern:/^(?:GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH|PRI|SEARCH)\s(?:https?:\/\/|\/)\S*\sHTTP\/[0-9.]+/m,inside:{method:{pattern:/^[A-Z]+\b/,alias:"property"},"request-target":{pattern:/^(\s)(?:https?:\/\/|\/)\S*(?=\s)/,lookbehind:!0,alias:"url",inside:e.languages.uri},"http-version":{pattern:/^(\s)HTTP\/[0-9.]+/,lookbehind:!0,alias:"property"}}},"response-status":{pattern:/^HTTP\/[0-9.]+ \d+ .+/m,inside:{"http-version":{pattern:/^HTTP\/[0-9.]+/,alias:"property"},"status-code":{pattern:/^(\s)\d+(?=\s)/,lookbehind:!0,alias:"number"},"reason-phrase":{pattern:/^(\s).+/,lookbehind:!0,alias:"string"}}},"header-name":{pattern:/^[\w-]+:(?=.)/m,alias:"keyword"}};var t,n=e.languages,r={"application/javascript":n.javascript,"application/json":n.json||n.javascript,"application/xml":n.xml,"text/xml":n.xml,"text/html":n.html,"text/css":n.css},i={"application/json":!0,"application/xml":!0};function a(e){var t="\\w+/(?:[\\w.-]+\\+)+"+e.replace(/^[a-z]+\//,"")+"(?![+\\w.-])";return"(?:"+e+"|"+t+")"}for(var o in r)if(r[o]){t=t||{};var s=i[o]?a(o):o;t[o.replace(/\//g,"-")]={pattern:RegExp("(content-type:\\s*"+s+"(?:(?:\\r\\n?|\\n).+)*)(?:\\r?\\n|\\r){2}[\\s\\S]*","i"),lookbehind:!0,inside:r[o]}}t&&e.languages.insertBefore("http","header-name",t)}(e)}e.exports=t,t.displayName="http",t.aliases=[]},30615(e){"use strict";function t(e){e.languages.ichigojam={comment:/(?:\B'|REM)(?:[^\n\r]*)/i,string:{pattern:/"(?:""|[!#$%&'()*,\/:;<=>?^\w +\-.])*"/i,greedy:!0},number:/\B#[0-9A-F]+|\B`[01]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:E[+-]?\d+)?/i,keyword:/\b(?:BEEP|BPS|CASE|CLEAR|CLK|CLO|CLP|CLS|CLT|CLV|CONT|COPY|ELSE|END|FILE|FILES|FOR|GOSUB|GSB|GOTO|IF|INPUT|KBD|LED|LET|LIST|LOAD|LOCATE|LRUN|NEW|NEXT|OUT|RIGHT|PLAY|POKE|PRINT|PWM|REM|RENUM|RESET|RETURN|RTN|RUN|SAVE|SCROLL|SLEEP|SRND|STEP|STOP|SUB|TEMPO|THEN|TO|UART|VIDEO|WAIT)(?:\$|\b)/i,function:/\b(?:ABS|ANA|ASC|BIN|BTN|DEC|END|FREE|HELP|HEX|I2CR|I2CW|IN|INKEY|LEN|LINE|PEEK|RND|SCR|SOUND|STR|TICK|USR|VER|VPEEK|ZER)(?:\$|\b)/i,label:/(?:\B@\S+)/i,operator:/<[=>]?|>=?|\|\||&&|[+\-*\/=|&^~!]|\b(?:AND|NOT|OR)\b/i,punctuation:/[\[,;:()\]]/}}e.exports=t,t.displayName="ichigojam",t.aliases=[]},93865(e){"use strict";function t(e){e.languages.icon={comment:/#.*/,string:{pattern:/(["'])(?:(?!\1)[^\\\r\n_]|\\.|_(?!\1)(?:\r\n|[\s\S]))*\1/,greedy:!0},number:/\b(?:\d+r[a-z\d]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)\b|\.\d+\b/i,"builtin-keyword":{pattern:/&(?:allocated|ascii|clock|collections|cset|current|date|dateline|digits|dump|e|error(?:number|text|value)?|errout|fail|features|file|host|input|lcase|letters|level|line|main|null|output|phi|pi|pos|progname|random|regions|source|storage|subject|time|trace|ucase|version)\b/,alias:"variable"},directive:{pattern:/\$\w+/,alias:"builtin"},keyword:/\b(?:break|by|case|create|default|do|else|end|every|fail|global|if|initial|invocable|link|local|next|not|of|procedure|record|repeat|return|static|suspend|then|to|until|while)\b/,function:/\b(?!\d)\w+(?=\s*[({]|\s*!\s*\[)/,operator:/[+-]:(?!=)|(?:[\/?@^%&]|\+\+?|--?|==?=?|~==?=?|\*\*?|\|\|\|?|<(?:->?|>?=?)(?::=)?|:(?:=:?)?|[!.\\|~]/,punctuation:/[\[\](){},;]/}}e.exports=t,t.displayName="icon",t.aliases=[]},51078(e){"use strict";function t(e){!function(e){function t(e,n){return n<=0?/[]/.source:e.replace(//g,function(){return t(e,n-1)})}var n=/'[{}:=,](?:[^']|'')*'(?!')/,r={pattern:/''/,greedy:!0,alias:"operator"},i={pattern:n,greedy:!0,inside:{escape:r}},a=t(/\{(?:[^{}']|'(?![{},'])|''||)*\}/.source.replace(//g,function(){return n.source}),8),o={pattern:RegExp(a),inside:{message:{pattern:/^(\{)[\s\S]+(?=\}$)/,lookbehind:!0,inside:null},"message-delimiter":{pattern:/./,alias:"punctuation"}}};e.languages["icu-message-format"]={argument:{pattern:RegExp(a),greedy:!0,inside:{content:{pattern:/^(\{)[\s\S]+(?=\}$)/,lookbehind:!0,inside:{"argument-name":{pattern:/^(\s*)[^{}:=,\s]+/,lookbehind:!0},"choice-style":{pattern:/^(\s*,\s*choice\s*,\s*)\S(?:[\s\S]*\S)?/,lookbehind:!0,inside:{punctuation:/\|/,range:{pattern:/^(\s*)[+-]?(?:\d+(?:\.\d*)?|\u221e)\s*[<#\u2264]/,lookbehind:!0,inside:{operator:/[<#\u2264]/,number:/\S+/}},rest:null}},"plural-style":{pattern:/^(\s*,\s*(?:plural|selectordinal)\s*,\s*)\S(?:[\s\S]*\S)?/,lookbehind:!0,inside:{offset:/^offset:\s*\d+/,"nested-message":o,selector:{pattern:/=\d+|[^{}:=,\s]+/,inside:{keyword:/^(?:zero|one|two|few|many|other)$/}}}},"select-style":{pattern:/^(\s*,\s*select\s*,\s*)\S(?:[\s\S]*\S)?/,lookbehind:!0,inside:{"nested-message":o,selector:{pattern:/[^{}:=,\s]+/,inside:{keyword:/^other$/}}}},keyword:/\b(?:choice|plural|select|selectordinal)\b/,"arg-type":{pattern:/\b(?:number|date|time|spellout|ordinal|duration)\b/,alias:"keyword"},"arg-skeleton":{pattern:/(,\s*)::[^{}:=,\s]+/,lookbehind:!0},"arg-style":{pattern:/(,\s*)(?:short|medium|long|full|integer|currency|percent)(?=\s*$)/,lookbehind:!0},"arg-style-text":{pattern:RegExp(/(^\s*,\s*(?=\S))/.source+t(/(?:[^{}']|'[^']*'|\{(?:)?\})+/.source,8)+"$"),lookbehind:!0,alias:"string"},punctuation:/,/}},"argument-delimiter":{pattern:/./,alias:"operator"}}},escape:r,string:i},o.inside.message.inside=e.languages["icu-message-format"],e.languages["icu-message-format"].argument.inside.content.inside["choice-style"].inside.rest=e.languages["icu-message-format"]}(e)}e.exports=t,t.displayName="icuMessageFormat",t.aliases=[]},91178(e,t,n){"use strict";var r=n(58090);function i(e){e.register(r),e.languages.idris=e.languages.extend("haskell",{comment:{pattern:/(?:(?:--|\|\|\|).*$|\{-[\s\S]*?-\})/m},keyword:/\b(?:Type|case|class|codata|constructor|corecord|data|do|dsl|else|export|if|implementation|implicit|import|impossible|in|infix|infixl|infixr|instance|interface|let|module|mutual|namespace|of|parameters|partial|postulate|private|proof|public|quoteGoal|record|rewrite|syntax|then|total|using|where|with)\b/,"import-statement":{pattern:/(^\s*)import\s+(?:[A-Z][\w']*)(?:\.[A-Z][\w']*)*/m,lookbehind:!0},builtin:void 0}),e.languages.idr=e.languages.idris}e.exports=i,i.displayName="idris",i.aliases=["idr"]},40011(e){"use strict";function t(e){e.languages.iecst={comment:[{pattern:/(^|[^\\])(?:\/\*[\s\S]*?(?:\*\/|$)|\(\*[\s\S]*?(?:\*\)|$)|\{[\s\S]*?(?:\}|$))/,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":/\b(?:END_)?(?:PROGRAM|CONFIGURATION|INTERFACE|FUNCTION_BLOCK|FUNCTION|ACTION|TRANSITION|TYPE|STRUCT|(?:INITIAL_)?STEP|NAMESPACE|LIBRARY|CHANNEL|FOLDER|RESOURCE|VAR_(?:GLOBAL|INPUT|PUTPUT|IN_OUT|ACCESS|TEMP|EXTERNAL|CONFIG)|VAR|METHOD|PROPERTY)\b/i,keyword:/\b(?:(?:END_)?(?:IF|WHILE|REPEAT|CASE|FOR)|ELSE|FROM|THEN|ELSIF|DO|TO|BY|PRIVATE|PUBLIC|PROTECTED|CONSTANT|RETURN|EXIT|CONTINUE|GOTO|JMP|AT|RETAIN|NON_RETAIN|TASK|WITH|UNTIL|USING|EXTENDS|IMPLEMENTS|GET|SET|__TRY|__CATCH|__FINALLY|__ENDTRY)\b/,variable:/\b(?:AT|BOOL|BYTE|(?:D|L)?WORD|U?(?:S|D|L)?INT|L?REAL|TIME(?:_OF_DAY)?|TOD|DT|DATE(?:_AND_TIME)?|STRING|ARRAY|ANY|POINTER)\b/,symbol:/%[IQM][XBWDL][\d.]*|%[IQ][\d.]*/,number:/\b(?:16#[\da-f]+|2#[01_]+|0x[\da-f]+)\b|\b(?:T|D|DT|TOD)#[\d_shmd:]*|\b[A-Z]*#[\d.,_]*|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,boolean:/\b(?:TRUE|FALSE|NULL)\b/,function:/\w+(?=\()/,operator:/(?:S?R?:?=>?|&&?|\*\*?|<=?|>=?|[-:^/+])|\b(?:OR|AND|MOD|NOT|XOR|LE|GE|EQ|NE|GT|LT)\b/,punctuation:/[();]/,type:{pattern:/#/,alias:"selector"}}}e.exports=t,t.displayName="iecst",t.aliases=[]},12017(e){"use strict";function t(e){var t;(t=e).languages.ignore={comment:/^#.*/m,entry:{pattern:/\S(?:.*(?:(?:\\ )|\S))?/,alias:"string",inside:{operator:/^!|\*\*?|\?/,regex:{pattern:/(^|[^\\])\[[^\[\]]*\]/,lookbehind:!0},punctuation:/\//}}},t.languages.gitignore=t.languages.ignore,t.languages.hgignore=t.languages.ignore,t.languages.npmignore=t.languages.ignore}e.exports=t,t.displayName="ignore",t.aliases=["gitignore","hgignore","npmignore"]},65175(e){"use strict";function t(e){e.languages.inform7={string:{pattern:/"[^"]*"/,inside:{substitution:{pattern:/\[[^\[\]]+\]/,inside:{delimiter:{pattern:/\[|\]/,alias:"punctuation"}}}}},comment:{pattern:/\[[^\[\]]+\]/,greedy:!0},title:{pattern:/^[ \t]*(?:volume|book|part(?! of)|chapter|section|table)\b.+/im,alias:"important"},number:{pattern:/(^|[^-])(?:\b\d+(?:\.\d+)?(?:\^\d+)?(?:(?!\d)\w+)?|\b(?:one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve))\b(?!-)/i,lookbehind:!0},verb:{pattern:/(^|[^-])\b(?:applying to|are|attacking|answering|asking|be(?:ing)?|burning|buying|called|carries|carry(?! out)|carrying|climbing|closing|conceal(?:s|ing)?|consulting|contain(?:s|ing)?|cutting|drinking|dropping|eating|enclos(?:es?|ing)|entering|examining|exiting|getting|giving|going|ha(?:ve|s|ving)|hold(?:s|ing)?|impl(?:y|ies)|incorporat(?:es?|ing)|inserting|is|jumping|kissing|listening|locking|looking|mean(?:s|ing)?|opening|provid(?:es?|ing)|pulling|pushing|putting|relat(?:es?|ing)|removing|searching|see(?:s|ing)?|setting|showing|singing|sleeping|smelling|squeezing|switching|support(?:s|ing)?|swearing|taking|tasting|telling|thinking|throwing|touching|turning|tying|unlock(?:s|ing)?|var(?:y|ies|ying)|waiting|waking|waving|wear(?:s|ing)?)\b(?!-)/i,lookbehind:!0,alias:"operator"},keyword:{pattern:/(^|[^-])\b(?:after|before|carry out|check|continue the action|definition(?= *:)|do nothing|else|end (?:if|unless|the story)|every turn|if|include|instead(?: of)?|let|move|no|now|otherwise|repeat|report|resume the story|rule for|running through|say(?:ing)?|stop the action|test|try(?:ing)?|understand|unless|use|when|while|yes)\b(?!-)/i,lookbehind:!0},property:{pattern:/(^|[^-])\b(?:adjacent(?! to)|carried|closed|concealed|contained|dark|described|edible|empty|enclosed|enterable|even|female|fixed in place|full|handled|held|improper-named|incorporated|inedible|invisible|lighted|lit|lock(?:able|ed)|male|marked for listing|mentioned|negative|neuter|non-(?:empty|full|recurring)|odd|opaque|open(?:able)?|plural-named|portable|positive|privately-named|proper-named|provided|publically-named|pushable between rooms|recurring|related|rubbing|scenery|seen|singular-named|supported|swinging|switch(?:able|ed(?: on| off)?)|touch(?:able|ed)|transparent|unconcealed|undescribed|unlit|unlocked|unmarked for listing|unmentioned|unopenable|untouchable|unvisited|variable|visible|visited|wearable|worn)\b(?!-)/i,lookbehind:!0,alias:"symbol"},position:{pattern:/(^|[^-])\b(?:above|adjacent to|back side of|below|between|down|east|everywhere|front side|here|in|inside(?: from)?|north(?:east|west)?|nowhere|on(?: top of)?|other side|outside(?: from)?|parts? of|regionally in|south(?:east|west)?|through|up|west|within)\b(?!-)/i,lookbehind:!0,alias:"keyword"},type:{pattern:/(^|[^-])\b(?:actions?|activit(?:y|ies)|actors?|animals?|backdrops?|containers?|devices?|directions?|doors?|holders?|kinds?|lists?|m[ae]n|nobody|nothing|nouns?|numbers?|objects?|people|persons?|player(?:'s holdall)?|regions?|relations?|rooms?|rule(?:book)?s?|scenes?|someone|something|supporters?|tables?|texts?|things?|time|vehicles?|wom[ae]n)\b(?!-)/i,lookbehind:!0,alias:"variable"},punctuation:/[.,:;(){}]/},e.languages.inform7.string.inside.substitution.inside.rest=e.languages.inform7,e.languages.inform7.string.inside.substitution.inside.rest.text={pattern:/\S(?:\s*\S)*/,alias:"comment"}}e.exports=t,t.displayName="inform7",t.aliases=[]},14970(e){"use strict";function t(e){e.languages.ini={comment:{pattern:/(^[ \f\t\v]*)[#;][^\n\r]*/m,lookbehind:!0},header:{pattern:/(^[ \f\t\v]*)\[[^\n\r\]]*\]?/m,lookbehind:!0,inside:{"section-name":{pattern:/(^\[[ \f\t\v]*)[^ \f\t\v\]]+(?:[ \f\t\v]+[^ \f\t\v\]]+)*/,lookbehind:!0,alias:"selector"},punctuation:/\[|\]/}},key:{pattern:/(^[ \f\t\v]*)[^ \f\n\r\t\v=]+(?:[ \f\t\v]+[^ \f\n\r\t\v=]+)*(?=[ \f\t\v]*=)/m,lookbehind:!0,alias:"attr-name"},value:{pattern:/(=[ \f\t\v]*)[^ \f\n\r\t\v]+(?:[ \f\t\v]+[^ \f\n\r\t\v]+)*/,lookbehind:!0,alias:"attr-value",inside:{"inner-value":{pattern:/^("|').+(?=\1$)/,lookbehind:!0}}},punctuation:/=/}}e.exports=t,t.displayName="ini",t.aliases=[]},30764(e){"use strict";function t(e){e.languages.io={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0},{pattern:/(^|[^\\])\/\/.*/,lookbehind:!0},{pattern:/(^|[^\\])#.*/,lookbehind:!0}],"triple-quoted-string":{pattern:/"""(?:\\[\s\S]|(?!""")[^\\])*"""/,greedy:!0,alias:"string"},string:{pattern:/"(?:\\.|[^\\\r\n"])*"/,greedy:!0},keyword:/\b(?:activate|activeCoroCount|asString|block|break|catch|clone|collectGarbage|compileString|continue|do|doFile|doMessage|doString|else|elseif|exit|for|foreach|forward|getSlot|getEnvironmentVariable|hasSlot|if|ifFalse|ifNil|ifNilEval|ifTrue|isActive|isNil|isResumable|list|message|method|parent|pass|pause|perform|performWithArgList|print|println|proto|raise|raiseResumable|removeSlot|resend|resume|schedulerSleepSeconds|self|sender|setSchedulerSleepSeconds|setSlot|shallowCopy|slotNames|super|system|then|thisBlock|thisContext|call|try|type|uniqueId|updateSlot|wait|while|write|yield)\b/,builtin:/\b(?:Array|AudioDevice|AudioMixer|Block|Box|Buffer|CFunction|CGI|Color|Curses|DBM|DNSResolver|DOConnection|DOProxy|DOServer|Date|Directory|Duration|DynLib|Error|Exception|FFT|File|Fnmatch|Font|Future|GL|GLE|GLScissor|GLU|GLUCylinder|GLUQuadric|GLUSphere|GLUT|Host|Image|Importer|LinkList|List|Lobby|Locals|MD5|MP3Decoder|MP3Encoder|Map|Message|Movie|Notification|Number|Object|OpenGL|Point|Protos|Regex|SGML|SGMLElement|SGMLParser|SQLite|Server|Sequence|ShowMessage|SleepyCat|SleepyCatCursor|Socket|SocketManager|Sound|Soup|Store|String|Tree|UDPSender|UPDReceiver|URL|User|Warning|WeakLink|Random|BigNum)\b/,boolean:/\b(?:true|false|nil)\b/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e-?\d+)?/i,operator:/[=!*/%+\-^&|]=|>>?=?|<+*\-%$|,#][.:]?|[?^]\.?|[;\[]:?|[~}"i][.:]|[ACeEIjLor]\.|(?:[_\/\\qsux]|_?\d):)/,alias:"keyword"},number:/\b_?(?:(?!\d:)\d+(?:\.\d+)?(?:(?:[ejpx]|ad|ar)_?\d+(?:\.\d+)?)*(?:b_?[\da-z]+(?:\.[\da-z]+)?)?|_\b(?!\.))/,adverb:{pattern:/[~}]|[\/\\]\.?|[bfM]\.|t[.:]/,alias:"builtin"},operator:/[=a][.:]|_\./,conjunction:{pattern:/&(?:\.:?|:)?|[.:@][.:]?|[!D][.:]|[;dHT]\.|`:?|[\^LS]:|"/,alias:"variable"},punctuation:/[()]/}}e.exports=t,t.displayName="j",t.aliases=[]},15909(e){"use strict";function t(e){var t,n,r,i;t=e,n=/\b(?:abstract|assert|boolean|break|byte|case|catch|char|class|const|continue|default|do|double|else|enum|exports|extends|final|finally|float|for|goto|if|implements|import|instanceof|int|interface|long|module|native|new|non-sealed|null|open|opens|package|permits|private|protected|provides|public|record|requires|return|sealed|short|static|strictfp|super|switch|synchronized|this|throw|throws|to|transient|transitive|try|uses|var|void|volatile|while|with|yield)\b/,i={pattern:RegExp((r=/(^|[^\w.])(?:[a-z]\w*\s*\.\s*)*(?:[A-Z]\w*\s*\.\s*)*/.source)+/[A-Z](?:[\d_A-Z]*[a-z]\w*)?\b/.source),lookbehind:!0,inside:{namespace:{pattern:/^[a-z]\w*(?:\s*\.\s*[a-z]\w*)*(?:\s*\.)?/,inside:{punctuation:/\./}},punctuation:/\./}},t.languages.java=t.languages.extend("clike",{"class-name":[i,{pattern:RegExp(r+/[A-Z]\w*(?=\s+\w+\s*[;,=()])/.source),lookbehind:!0,inside:i.inside}],keyword:n,function:[t.languages.clike.function,{pattern:/(::\s*)[a-z_]\w*/,lookbehind:!0}],number:/\b0b[01][01_]*L?\b|\b0x(?:\.[\da-f_p+-]+|[\da-f_]+(?:\.[\da-f_p+-]+)?)\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?\d[\d_]*)?[dfl]?/i,operator:{pattern:/(^|[^.])(?:<<=?|>>>?=?|->|--|\+\+|&&|\|\||::|[?:~]|[-+*/%&|^!=<>]=?)/m,lookbehind:!0}}),t.languages.insertBefore("java","string",{"triple-quoted-string":{pattern:/"""[ \t]*[\r\n](?:(?:"|"")?(?:\\.|[^"\\]))*"""/,greedy:!0,alias:"string"}}),t.languages.insertBefore("java","class-name",{annotation:{pattern:/(^|[^.])@\w+(?:\s*\.\s*\w+)*/,lookbehind:!0,alias:"punctuation"},generics:{pattern:/<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&))*>)*>)*>)*>/,inside:{"class-name":i,keyword:n,punctuation:/[<>(),.:]/,operator:/[?&|]/}},namespace:{pattern:RegExp(/(\b(?:exports|import(?:\s+static)?|module|open|opens|package|provides|requires|to|transitive|uses|with)\s+)(?!)[a-z]\w*(?:\.[a-z]\w*)*\.?/.source.replace(//g,function(){return n.source})),lookbehind:!0,inside:{punctuation:/\./}}})}e.exports=t,t.displayName="java",t.aliases=[]},36553(e,t,n){"use strict";var r=n(15909),i=n(9858);function a(e){var t,n,a,o;e.register(r),e.register(i),t=e,n=/(^(?:[\t ]*(?:\*\s*)*))[^*\s].*$/m,a=/#\s*\w+(?:\s*\([^()]*\))?/.source,o=/(?:\b[a-zA-Z]\w+\s*\.\s*)*\b[A-Z]\w*(?:\s*)?|/.source.replace(//g,function(){return a}),t.languages.javadoc=t.languages.extend("javadoclike",{}),t.languages.insertBefore("javadoc","keyword",{reference:{pattern:RegExp(/(@(?:exception|throws|see|link|linkplain|value)\s+(?:\*\s*)?)/.source+"(?:"+o+")"),lookbehind:!0,inside:{function:{pattern:/(#\s*)\w+(?=\s*\()/,lookbehind:!0},field:{pattern:/(#\s*)\w+/,lookbehind:!0},namespace:{pattern:/\b(?:[a-z]\w*\s*\.\s*)+/,inside:{punctuation:/\./}},"class-name":/\b[A-Z]\w*/,keyword:t.languages.java.keyword,punctuation:/[#()[\],.]/}},"class-name":{pattern:/(@param\s+)<[A-Z]\w*>/,lookbehind:!0,inside:{punctuation:/[.<>]/}},"code-section":[{pattern:/(\{@code\s+(?!\s))(?:[^\s{}]|\s+(?![\s}])|\{(?:[^{}]|\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\})*\})+(?=\s*\})/,lookbehind:!0,inside:{code:{pattern:n,lookbehind:!0,inside:t.languages.java,alias:"language-java"}}},{pattern:/(<(code|pre|tt)>(?!)\s*)\S(?:\S|\s+\S)*?(?=\s*<\/\2>)/,lookbehind:!0,inside:{line:{pattern:n,lookbehind:!0,inside:{tag:t.languages.markup.tag,entity:t.languages.markup.entity,code:{pattern:/.+/,inside:t.languages.java,alias:"language-java"}}}}}],tag:t.languages.markup.tag,entity:t.languages.markup.entity}),t.languages.javadoclike.addSupport("java",t.languages.javadoc)}e.exports=a,a.displayName="javadoc",a.aliases=[]},9858(e){"use strict";function t(e){!function(e){var t=e.languages.javadoclike={parameter:{pattern:/(^[\t ]*(?:\/{3}|\*|\/\*\*)\s*@(?:param|arg|arguments)\s+)\w+/m,lookbehind:!0},keyword:{pattern:/(^[\t ]*(?:\/{3}|\*|\/\*\*)\s*|\{)@[a-z][a-zA-Z-]+\b/m,lookbehind:!0},punctuation:/[{}]/};function n(t,n){var r="doc-comment",i=e.languages[t];if(i){var a=i[r];if(!a){var o={};o[r]={pattern:/(^|[^\\])\/\*\*[^/][\s\S]*?(?:\*\/|$)/,lookbehind:!0,alias:"comment"},a=(i=e.languages.insertBefore(t,"comment",o))[r]}if(a instanceof RegExp&&(a=i[r]={pattern:a}),Array.isArray(a))for(var s=0,u=a.length;s|&&=?|\|\|=?|[!=]==|<<=?|>>>?=?|[-+*/%&|^!=<>]=?|\.{3}|\?\?=?|\?\.?|[~:]/}),e.languages.javascript["class-name"][0].pattern=/(\b(?:class|interface|extends|implements|instanceof|new)\s+)[\w.\\]+/,e.languages.insertBefore("javascript","keyword",{regex:{pattern:/((?:^|[^$\w\xA0-\uFFFF."'\])\s]|\b(?:return|yield))\s*)\/(?:\[(?:[^\]\\\r\n]|\\.)*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}(?=(?:\s|\/\*(?:[^*]|\*(?!\/))*\*\/)*(?:$|[\r\n,.;:})\]]|\/\/))/,lookbehind:!0,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:e.languages.regex},"regex-delimiter":/^\/|\/$/,"regex-flags":/^[a-z]+$/}},"function-variable":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"},parameter:[{pattern:/(function(?:\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)?\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\))/,lookbehind:!0,inside:e.languages.javascript},{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=>)/i,lookbehind:!0,inside:e.languages.javascript},{pattern:/(\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*=>)/,lookbehind:!0,inside:e.languages.javascript},{pattern:/((?:\b|\s|^)(?!(?:as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)(?![$\w\xA0-\uFFFF]))(?:(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*)\(\s*|\]\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*\{)/,lookbehind:!0,inside:e.languages.javascript}],constant:/\b[A-Z](?:[A-Z_]|\dx?)*\b/}),e.languages.insertBefore("javascript","string",{hashbang:{pattern:/^#!.*/,greedy:!0,alias:"comment"},"template-string":{pattern:/`(?:\\[\s\S]|\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}|(?!\$\{)[^\\`])*`/,greedy:!0,inside:{"template-punctuation":{pattern:/^`|`$/,alias:"string"},interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}/,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:e.languages.javascript}},string:/[\s\S]+/}}}),e.languages.markup&&(e.languages.markup.tag.addInlined("script","javascript"),e.languages.markup.tag.addAttribute(/on(?:abort|blur|change|click|composition(?:end|start|update)|dblclick|error|focus(?:in|out)?|key(?:down|up)|load|mouse(?:down|enter|leave|move|out|over|up)|reset|resize|scroll|select|slotchange|submit|unload|wheel)/.source,"javascript")),e.languages.js=e.languages.javascript}e.exports=t,t.displayName="javascript",t.aliases=["js"]},11223(e){"use strict";function t(e){e.languages.javastacktrace={summary:{pattern:/^[\t ]*(?:(?:Caused by:|Suppressed:|Exception in thread "[^"]*")[\t ]+)?[\w$.]+(?::.*)?$/m,inside:{keyword:{pattern:/^(\s*)(?:(?:Caused by|Suppressed)(?=:)|Exception in thread)/m,lookbehind:!0},string:{pattern:/^(\s*)"[^"]*"/,lookbehind:!0},exceptions:{pattern:/^(:?\s*)[\w$.]+(?=:|$)/,lookbehind:!0,inside:{"class-name":/[\w$]+(?=$|:)/,namespace:/[a-z]\w*/,punctuation:/[.:]/}},message:{pattern:/(:\s*)\S.*/,lookbehind:!0,alias:"string"},punctuation:/:/}},"stack-frame":{pattern:/^[\t ]*at (?:[\w$./]|@[\w$.+-]*\/)+(?:)?\([^()]*\)/m,inside:{keyword:{pattern:/^(\s*)at(?= )/,lookbehind:!0},source:[{pattern:/(\()\w+\.\w+:\d+(?=\))/,lookbehind:!0,inside:{file:/^\w+\.\w+/,punctuation:/:/,"line-number":{pattern:/\d+/,alias:"number"}}},{pattern:/(\()[^()]*(?=\))/,lookbehind:!0,inside:{keyword:/^(?:Unknown Source|Native Method)$/}}],"class-name":/[\w$]+(?=\.(?:|[\w$]+)\()/,function:/(?:|[\w$]+)(?=\()/,"class-loader":{pattern:/(\s)[a-z]\w*(?:\.[a-z]\w*)*(?=\/[\w@$.]*\/)/,lookbehind:!0,alias:"namespace",inside:{punctuation:/\./}},module:{pattern:/([\s/])[a-z]\w*(?:\.[a-z]\w*)*(?:@[\w$.+-]*)?(?=\/)/,lookbehind:!0,inside:{version:{pattern:/(@)[\s\S]+/,lookbehind:!0,alias:"number"},punctuation:/[@.]/}},namespace:{pattern:/(?:[a-z]\w*\.)+/,inside:{punctuation:/\./}},punctuation:/[()/.]/}},more:{pattern:/^[\t ]*\.{3} \d+ [a-z]+(?: [a-z]+)*/m,inside:{punctuation:/\.{3}/,number:/\d+/,keyword:/\b[a-z]+(?: [a-z]+)*\b/}}}}e.exports=t,t.displayName="javastacktrace",t.aliases=[]},57957(e){"use strict";function t(e){e.languages.jexl={string:/(["'])(?:\\[\s\S]|(?!\1)[^\\])*\1/,transform:{pattern:/(\|\s*)[a-zA-Zа-яА-Я_\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF$][\wа-яА-Я\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF$]*/,alias:"function",lookbehind:!0},function:/[a-zA-Zа-яА-Я_\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF$][\wа-яА-Я\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u00FF$]*\s*(?=\()/,number:/\b\d+(?:\.\d+)?\b|\B\.\d+\b/,operator:/[<>!]=?|-|\+|&&|==|\|\|?|\/\/?|[?:*^%]/,boolean:/\b(?:true|false)\b/,keyword:/\bin\b/,punctuation:/[{}[\](),.]/}}e.exports=t,t.displayName="jexl",t.aliases=[]},75807(e){"use strict";function t(e){e.languages.jolie=e.languages.extend("clike",{string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:/\b(?:include|define|is_defined|undef|main|init|outputPort|inputPort|Location|Protocol|Interfaces|RequestResponse|OneWay|type|interface|extender|throws|cset|csets|forward|Aggregates|Redirects|embedded|courier|execution|sequential|concurrent|single|scope|install|throw|comp|cH|default|global|linkIn|linkOut|synchronized|this|new|for|if|else|while|in|Jolie|Java|Javascript|nullProcess|spawn|constants|with|provide|until|exit|foreach|instanceof|over|service)\b/,number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?l?/i,operator:/-[-=>]?|\+[+=]?|<[<=]?|[>=*!]=?|&&|\|\||[:?\/%^]/,punctuation:/[,.]/,builtin:/\b(?:undefined|string|int|void|long|Byte|bool|double|float|char|any)\b/,symbol:/[|;@]/}),delete e.languages.jolie["class-name"],e.languages.insertBefore("jolie","keyword",{function:{pattern:/((?:\b(?:outputPort|inputPort|in|service|courier)\b|@)\s*)\w+/,lookbehind:!0},aggregates:{pattern:/(\bAggregates\s*:\s*)(?:\w+(?:\s+with\s+\w+)?\s*,\s*)*\w+(?:\s+with\s+\w+)?/,lookbehind:!0,inside:{"with-extension":{pattern:/\bwith\s+\w+/,inside:{keyword:/\bwith\b/}},function:{pattern:/\w+/},punctuation:{pattern:/,/}}},redirects:{pattern:/(\bRedirects\s*:\s*)(?:\w+\s*=>\s*\w+\s*,\s*)*(?:\w+\s*=>\s*\w+)/,lookbehind:!0,inside:{punctuation:{pattern:/,/},function:{pattern:/\w+/},symbol:{pattern:/=>/}}}})}e.exports=t,t.displayName="jolie",t.aliases=[]},77935(e){"use strict";function t(e){var t,n,r,i,a;t=e,n=/\\\((?:[^()]|\([^()]*\))*\)/.source,r=RegExp(/"(?:[^"\r\n\\]|\\[^\r\n(]|__)*"/.source.replace(/__/g,function(){return n})),i={interpolation:{pattern:RegExp(/((?:^|[^\\])(?:\\{2})*)/.source+n),lookbehind:!0,inside:{content:{pattern:/^(\\\()[\s\S]+(?=\)$)/,lookbehind:!0,inside:null},punctuation:/^\\\(|\)$/}}},a=t.languages.jq={comment:/#.*/,property:{pattern:RegExp(r.source+/(?=\s*:(?!:))/.source),greedy:!0,inside:i},string:{pattern:r,greedy:!0,inside:i},function:{pattern:/(\bdef\s+)[a-z_]\w+/i,lookbehind:!0},variable:/\B\$\w+/,"property-literal":{pattern:/\b[a-z_]\w*(?=\s*:(?!:))/i,alias:"property"},keyword:/\b(?:as|break|catch|def|elif|else|end|foreach|if|import|include|label|module|modulemeta|null|reduce|then|try|while)\b/,boolean:/\b(?:true|false)\b/,number:/(?:\b\d+\.|\B\.)?\b\d+(?:[eE][+-]?\d+)?\b/,operator:[{pattern:/\|=?/,alias:"pipe"},/\.\.|[!=<>]?=|\?\/\/|\/\/=?|[-+*/%]=?|[<>?]|\b(?:and|or|not)\b/],"c-style-function":{pattern:/\b[a-z_]\w*(?=\s*\()/i,alias:"function"},punctuation:/::|[()\[\]{},:;]|\.(?=\s*[\[\w$])/,dot:{pattern:/\./,alias:"important"}},i.interpolation.inside.content.inside=a}e.exports=t,t.displayName="jq",t.aliases=[]},46155(e){"use strict";function t(e){!function(e){function t(e,t){return RegExp(e.replace(//g,function(){return/(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*/.source}),t)}e.languages.insertBefore("javascript","function-variable",{"method-variable":{pattern:RegExp("(\\.\\s*)"+e.languages.javascript["function-variable"].pattern.source),lookbehind:!0,alias:["function-variable","method","function","property-access"]}}),e.languages.insertBefore("javascript","function",{method:{pattern:RegExp("(\\.\\s*)"+e.languages.javascript.function.source),lookbehind:!0,alias:["function","property-access"]}}),e.languages.insertBefore("javascript","constant",{"known-class-name":[{pattern:/\b(?:(?:(?:Uint|Int)(?:8|16|32)|Uint8Clamped|Float(?:32|64))?Array|ArrayBuffer|BigInt|Boolean|DataView|Date|Error|Function|Intl|JSON|Math|Number|Object|Promise|Proxy|Reflect|RegExp|String|Symbol|(?:Weak)?(?:Set|Map)|WebAssembly)\b/,alias:"class-name"},{pattern:/\b(?:[A-Z]\w*)Error\b/,alias:"class-name"}]}),e.languages.insertBefore("javascript","keyword",{imports:{pattern:t(/(\bimport\b\s*)(?:(?:\s*,\s*(?:\*\s*as\s+|\{[^{}]*\}))?|\*\s*as\s+|\{[^{}]*\})(?=\s*\bfrom\b)/.source),lookbehind:!0,inside:e.languages.javascript},exports:{pattern:t(/(\bexport\b\s*)(?:\*(?:\s*as\s+)?(?=\s*\bfrom\b)|\{[^{}]*\})/.source),lookbehind:!0,inside:e.languages.javascript}}),e.languages.javascript.keyword.unshift({pattern:/\b(?:as|default|export|from|import)\b/,alias:"module"},{pattern:/\b(?:await|break|catch|continue|do|else|for|finally|if|return|switch|throw|try|while|yield)\b/,alias:"control-flow"},{pattern:/\bnull\b/,alias:["null","nil"]},{pattern:/\bundefined\b/,alias:"nil"}),e.languages.insertBefore("javascript","operator",{spread:{pattern:/\.{3}/,alias:"operator"},arrow:{pattern:/=>/,alias:"operator"}}),e.languages.insertBefore("javascript","punctuation",{"property-access":{pattern:t(/(\.\s*)#?/.source),lookbehind:!0},"maybe-class-name":{pattern:/(^|[^$\w\xA0-\uFFFF])[A-Z][$\w\xA0-\uFFFF]+/,lookbehind:!0},dom:{pattern:/\b(?:document|location|navigator|performance|(?:local|session)Storage|window)\b/,alias:"variable"},console:{pattern:/\bconsole(?=\s*\.)/,alias:"class-name"}});for(var n=["function","function-variable","method","method-variable","property-access"],r=0;r=h.length)return;var n=e[t];if("string"==typeof n||"string"==typeof n.content){var r=h[o],i="string"==typeof n?n:n.content,a=i.indexOf(r);if(-1!==a){++o;var s=i.substring(0,a),u=c(l[r]),f=i.substring(a+r.length),d=[];if(s&&d.push(s),d.push(u),f){var b=[f];p(b),d.push.apply(d,b)}"string"==typeof n?(e.splice.apply(e,[t,1].concat(d)),t+=d.length-1):n.content=d}}else{var m=n.content;Array.isArray(m)?p(m):p([m])}}}return o=0,p(d),new e.Token(r,d,"language-"+r,t)}e.languages.javascript["template-string"]=[o("css",/\b(?:styled(?:\([^)]*\))?(?:\s*\.\s*\w+(?:\([^)]*\))*)*|css(?:\s*\.\s*(?:global|resolve))?|createGlobalStyle|keyframes)/.source),o("html",/\bhtml|\.\s*(?:inner|outer)HTML\s*\+?=/.source),o("svg",/\bsvg/.source),o("markdown",/\b(?:md|markdown)/.source),o("graphql",/\b(?:gql|graphql(?:\s*\.\s*experimental)?)/.source),o("sql",/\bsql/.source),t].filter(Boolean);var f={javascript:!0,js:!0,typescript:!0,ts:!0,jsx:!0,tsx:!0};function d(e){return"string"==typeof e?e:Array.isArray(e)?e.map(d).join(""):d(e.content)}e.hooks.add("after-tokenize",function(t){t.language in f&&n(t.tokens);function n(t){for(var r=0,i=t.length;r\s+)?)[A-Z]\w*(?:\.[A-Z]\w*)*/.source.replace(//g,function(){return a})),lookbehind:!0,inside:{punctuation:/\./}},{pattern:RegExp("(@[a-z]+\\s+)"+a),lookbehind:!0,inside:{string:n.string,number:n.number,boolean:n.boolean,keyword:t.languages.typescript.keyword,operator:/=>|\.\.\.|[&|?:*]/,punctuation:/[.,;=<>{}()[\]]/}}],example:{pattern:/(@example\s+(?!\s))(?:[^@\s]|\s+(?!\s))+?(?=\s*(?:\*\s*)?(?:@\w|\*\/))/,lookbehind:!0,inside:{code:{pattern:/^([\t ]*(?:\*\s*)?)\S.*$/m,lookbehind:!0,inside:n,alias:"language-javascript"}}}}),t.languages.javadoclike.addSupport("javascript",t.languages.jsdoc)}e.exports=a,a.displayName="jsdoc",a.aliases=[]},45950(e){"use strict";function t(e){e.languages.json={property:{pattern:/(^|[^\\])"(?:\\.|[^\\"\r\n])*"(?=\s*:)/,lookbehind:!0,greedy:!0},string:{pattern:/(^|[^\\])"(?:\\.|[^\\"\r\n])*"(?!\s*:)/,lookbehind:!0,greedy:!0},comment:{pattern:/\/\/.*|\/\*[\s\S]*?(?:\*\/|$)/,greedy:!0},number:/-?\b\d+(?:\.\d+)?(?:e[+-]?\d+)?\b/i,punctuation:/[{}[\],]/,operator:/:/,boolean:/\b(?:true|false)\b/,null:{pattern:/\bnull\b/,alias:"keyword"}},e.languages.webmanifest=e.languages.json}e.exports=t,t.displayName="json",t.aliases=["webmanifest"]},50235(e,t,n){"use strict";var r=n(45950);function i(e){var t,n;e.register(r),n=/("|')(?:\\(?:\r\n?|\n|.)|(?!\1)[^\\\r\n])*\1/,(t=e).languages.json5=t.languages.extend("json",{property:[{pattern:RegExp(n.source+"(?=\\s*:)"),greedy:!0},{pattern:/(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*:)/,alias:"unquoted"}],string:{pattern:n,greedy:!0},number:/[+-]?\b(?:NaN|Infinity|0x[a-fA-F\d]+)\b|[+-]?(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[eE][+-]?\d+\b)?/})}e.exports=i,i.displayName="json5",i.aliases=[]},80963(e,t,n){"use strict";var r=n(45950);function i(e){e.register(r),e.languages.jsonp=e.languages.extend("json",{punctuation:/[{}[\]();,.]/}),e.languages.insertBefore("jsonp","punctuation",{function:/(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*\()/})}e.exports=i,i.displayName="jsonp",i.aliases=[]},79358(e){"use strict";function t(e){e.languages.jsstacktrace={"error-message":{pattern:/^\S.*/m,alias:"string"},"stack-frame":{pattern:/(^[ \t]+)at[ \t].*/m,lookbehind:!0,inside:{"not-my-code":{pattern:/^at[ \t]+(?!\s)(?:node\.js||.*(?:node_modules|\(\)|\(|$|\(internal\/|\(node\.js)).*/m,alias:"comment"},filename:{pattern:/(\bat\s+(?!\s)|\()(?:[a-zA-Z]:)?[^():]+(?=:)/,lookbehind:!0,alias:"url"},function:{pattern:/(at\s+(?:new\s+)?)(?!\s)[_$a-zA-Z\xA0-\uFFFF<][.$\w\xA0-\uFFFF<>]*/,lookbehind:!0,inside:{punctuation:/\./}},punctuation:/[()]/,keyword:/\b(?:at|new)\b/,alias:{pattern:/\[(?:as\s+)?(?!\s)[_$a-zA-Z\xA0-\uFFFF][$\w\xA0-\uFFFF]*\]/,alias:"variable"},"line-number":{pattern:/:[0-9]+(?::[0-9]+)?\b/,alias:"number",inside:{punctuation:/:/}}}}}}e.exports=t,t.displayName="jsstacktrace",t.aliases=[]},96412(e){"use strict";function t(e){!function(e){var t=e.util.clone(e.languages.javascript),n=/(?:\s|\/\/.*(?!.)|\/\*(?:[^*]|\*(?!\/))\*\/)/.source,r=/(?:\{(?:\{(?:\{[^{}]*\}|[^{}])*\}|[^{}])*\})/.source,i=/(?:\{*\.{3}(?:[^{}]|)*\})/.source;function a(e,t){return RegExp(e=e.replace(//g,function(){return n}).replace(//g,function(){return r}).replace(//g,function(){return i}),t)}i=a(i).source,e.languages.jsx=e.languages.extend("markup",t),e.languages.jsx.tag.pattern=a(/<\/?(?:[\w.:-]+(?:+(?:[\w.:$-]+(?:=(?:"(?:\\[\s\S]|[^\\"])*"|'(?:\\[\s\S]|[^\\'])*'|[^\s{'"/>=]+|))?|))**\/?)?>/.source),e.languages.jsx.tag.inside.tag.pattern=/^<\/?[^\s>\/]*/i,e.languages.jsx.tag.inside["attr-value"].pattern=/=(?!\{)(?:"(?:\\[\s\S]|[^\\"])*"|'(?:\\[\s\S]|[^\\'])*'|[^\s'">]+)/i,e.languages.jsx.tag.inside.tag.inside["class-name"]=/^[A-Z]\w*(?:\.[A-Z]\w*)*$/,e.languages.jsx.tag.inside.comment=t.comment,e.languages.insertBefore("inside","attr-name",{spread:{pattern:a(//.source),inside:e.languages.jsx}},e.languages.jsx.tag),e.languages.insertBefore("inside","special-attr",{script:{pattern:a(/=/.source),inside:{"script-punctuation":{pattern:/^=(?=\{)/,alias:"punctuation"},rest:e.languages.jsx},alias:"language-javascript"}},e.languages.jsx.tag);var o=function(e){return e?"string"==typeof e?e:"string"==typeof e.content?e.content:e.content.map(o).join(""):""},s=function(t){for(var n=[],r=0;r0&&n[n.length-1].tagName===o(i.content[0].content[1])&&n.pop():"/>"===i.content[i.content.length-1].content||n.push({tagName:o(i.content[0].content[1]),openedBraces:0}):n.length>0&&"punctuation"===i.type&&"{"===i.content?n[n.length-1].openedBraces++:n.length>0&&n[n.length-1].openedBraces>0&&"punctuation"===i.type&&"}"===i.content?n[n.length-1].openedBraces--:a=!0),(a||"string"==typeof i)&&n.length>0&&0===n[n.length-1].openedBraces){var u=o(i);r0&&("string"==typeof t[r-1]||"plain-text"===t[r-1].type)&&(u=o(t[r-1])+u,t.splice(r-1,1),r--),t[r]=new e.Token("plain-text",u,null,u)}i.content&&"string"!=typeof i.content&&s(i.content)}};e.hooks.add("after-tokenize",function(e){("jsx"===e.language||"tsx"===e.language)&&s(e.tokens)})}(e)}e.exports=t,t.displayName="jsx",t.aliases=[]},39259(e){"use strict";function t(e){e.languages.julia={comment:{pattern:/(^|[^\\])(?:#=(?:[^#=]|=(?!#)|#(?!=)|#=(?:[^#=]|=(?!#)|#(?!=))*=#)*=#|#.*)/,lookbehind:!0},regex:{pattern:/r"(?:\\.|[^"\\\r\n])*"[imsx]{0,4}/,greedy:!0},string:{pattern:/"""[\s\S]+?"""|(?:\b\w+)?"(?:\\.|[^"\\\r\n])*"|(^|[^\w'])'(?:\\[^\r\n][^'\r\n]*|[^\\\r\n])'|`(?:[^\\`\r\n]|\\.)*`/,lookbehind:!0,greedy:!0},keyword:/\b(?:abstract|baremodule|begin|bitstype|break|catch|ccall|const|continue|do|else|elseif|end|export|finally|for|function|global|if|immutable|import|importall|in|let|local|macro|module|print|println|quote|return|struct|try|type|typealias|using|while)\b/,boolean:/\b(?:true|false)\b/,number:/(?:\b(?=\d)|\B(?=\.))(?:0[box])?(?:[\da-f]+(?:_[\da-f]+)*(?:\.(?:\d+(?:_\d+)*)?)?|\.\d+(?:_\d+)*)(?:[efp][+-]?\d+(?:_\d+)*)?j?/i,operator:/&&|\|\||[-+*^%÷⊻&$\\]=?|\/[\/=]?|!=?=?|\|[=>]?|<(?:<=?|[=:|])?|>(?:=|>>?=?)?|==?=?|[~≠≤≥'√∛]/,punctuation:/::?|[{}[\]();,.?]/,constant:/\b(?:(?:NaN|Inf)(?:16|32|64)?|im|pi)\b|[πℯ]/}}e.exports=t,t.displayName="julia",t.aliases=[]},35760(e){"use strict";function t(e){e.languages.keyman={comment:/\bc\s.*/i,function:/\[\s*(?:(?:CTRL|SHIFT|ALT|LCTRL|RCTRL|LALT|RALT|CAPS|NCAPS)\s+)*(?:[TKU]_[\w?]+|".+?"|'.+?')\s*\]/i,string:/("|').*?\1/,bold:[/&(?:baselayout|bitmap|capsononly|capsalwaysoff|shiftfreescaps|copyright|ethnologuecode|hotkey|includecodes|keyboardversion|kmw_embedcss|kmw_embedjs|kmw_helpfile|kmw_helptext|kmw_rtl|language|layer|layoutfile|message|mnemoniclayout|name|oldcharposmatching|platform|targets|version|visualkeyboard|windowslanguages)\b/i,/\b(?:bitmap|bitmaps|caps on only|caps always off|shift frees caps|copyright|hotkey|language|layout|message|name|version)\b/i],keyword:/\b(?:any|baselayout|beep|call|context|deadkey|dk|if|index|layer|notany|nul|outs|platform|return|reset|save|set|store|use)\b/i,atrule:/\b(?:ansi|begin|unicode|group|using keys|match|nomatch)\b/i,number:/\b(?:U\+[\dA-F]+|d\d+|x[\da-f]+|\d+)\b/i,operator:/[+>\\,()]/,tag:/\$(?:keyman|kmfl|weaver|keymanweb|keymanonly):/i}}e.exports=t,t.displayName="keyman",t.aliases=[]},19715(e){"use strict";function t(e){var t,n;(t=e).languages.kotlin=t.languages.extend("clike",{keyword:{pattern:/(^|[^.])\b(?:abstract|actual|annotation|as|break|by|catch|class|companion|const|constructor|continue|crossinline|data|do|dynamic|else|enum|expect|external|final|finally|for|fun|get|if|import|in|infix|init|inline|inner|interface|internal|is|lateinit|noinline|null|object|open|operator|out|override|package|private|protected|public|reified|return|sealed|set|super|suspend|tailrec|this|throw|to|try|typealias|val|var|vararg|when|where|while)\b/,lookbehind:!0},function:[{pattern:/(?:`[^\r\n`]+`|\b\w+)(?=\s*\()/,greedy:!0},{pattern:/(\.)(?:`[^\r\n`]+`|\w+)(?=\s*\{)/,lookbehind:!0,greedy:!0}],number:/\b(?:0[xX][\da-fA-F]+(?:_[\da-fA-F]+)*|0[bB][01]+(?:_[01]+)*|\d+(?:_\d+)*(?:\.\d+(?:_\d+)*)?(?:[eE][+-]?\d+(?:_\d+)*)?[fFL]?)\b/,operator:/\+[+=]?|-[-=>]?|==?=?|!(?:!|==?)?|[\/*%<>]=?|[?:]:?|\.\.|&&|\|\||\b(?:and|inv|or|shl|shr|ushr|xor)\b/}),delete t.languages.kotlin["class-name"],t.languages.insertBefore("kotlin","string",{"raw-string":{pattern:/("""|''')[\s\S]*?\1/,alias:"string"}}),t.languages.insertBefore("kotlin","keyword",{annotation:{pattern:/\B@(?:\w+:)?(?:[A-Z]\w*|\[[^\]]+\])/,alias:"builtin"}}),t.languages.insertBefore("kotlin","function",{label:{pattern:/\b\w+@|@\w+\b/,alias:"symbol"}}),n=[{pattern:/\$\{[^}]+\}/,inside:{delimiter:{pattern:/^\$\{|\}$/,alias:"variable"},rest:t.languages.kotlin}},{pattern:/\$\w+/,alias:"variable"}],t.languages.kotlin.string.inside=t.languages.kotlin["raw-string"].inside={interpolation:n},t.languages.kt=t.languages.kotlin,t.languages.kts=t.languages.kotlin}e.exports=t,t.displayName="kotlin",t.aliases=["kt","kts"]},27614(e){"use strict";function t(e){!function(e){var t=/\s\x00-\x1f\x22-\x2f\x3a-\x3f\x5b-\x5e\x60\x7b-\x7e/.source;function n(e,n){return RegExp(e.replace(//g,t),n)}e.languages.kumir={comment:{pattern:/\|.*/},prolog:{pattern:/#.*/,greedy:!0},string:{pattern:/"[^\n\r"]*"|'[^\n\r']*'/,greedy:!0},boolean:{pattern:n(/(^|[])(?:да|нет)(?=[]|$)/.source),lookbehind:!0},"operator-word":{pattern:n(/(^|[])(?:и|или|не)(?=[]|$)/.source),lookbehind:!0,alias:"keyword"},"system-variable":{pattern:n(/(^|[])знач(?=[]|$)/.source),lookbehind:!0,alias:"keyword"},type:[{pattern:n(/(^|[])(?:вещ|лит|лог|сим|цел)(?:\x20*таб)?(?=[]|$)/.source),lookbehind:!0,alias:"builtin"},{pattern:n(/(^|[])(?:компл|сканкод|файл|цвет)(?=[]|$)/.source),lookbehind:!0,alias:"important"}],keyword:{pattern:n(/(^|[])(?:алг|арг(?:\x20*рез)?|ввод|ВКЛЮЧИТЬ|вс[её]|выбор|вывод|выход|дано|для|до|дс|если|иначе|исп|использовать|кон(?:(?:\x20+|_)исп)?|кц(?:(?:\x20+|_)при)?|надо|нач|нс|нц|от|пауза|пока|при|раза?|рез|стоп|таб|то|утв|шаг)(?=[]|$)/.source),lookbehind:!0},name:{pattern:n(/(^|[])[^\d][^]*(?:\x20+[^]+)*(?=[]|$)/.source),lookbehind:!0},number:{pattern:n(/(^|[])(?:\B\$[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?)(?=[]|$)/.source,"i"),lookbehind:!0},punctuation:/:=|[(),:;\[\]]/,"operator-char":{pattern:/\*\*?|<[=>]?|>=?|[-+/=]/,alias:"operator"}},e.languages.kum=e.languages.kumir}(e)}e.exports=t,t.displayName="kumir",t.aliases=["kum"]},42876(e){"use strict";function t(e){var t,n,r;t=e,r={"equation-command":{pattern:n=/\\(?:[^a-z()[\]]|[a-z*]+)/i,alias:"regex"}},t.languages.latex={comment:/%.*/m,cdata:{pattern:/(\\begin\{((?:verbatim|lstlisting)\*?)\})[\s\S]*?(?=\\end\{\2\})/,lookbehind:!0},equation:[{pattern:/\$\$(?:\\[\s\S]|[^\\$])+\$\$|\$(?:\\[\s\S]|[^\\$])+\$|\\\([\s\S]*?\\\)|\\\[[\s\S]*?\\\]/,inside:r,alias:"string"},{pattern:/(\\begin\{((?:equation|math|eqnarray|align|multline|gather)\*?)\})[\s\S]*?(?=\\end\{\2\})/,lookbehind:!0,inside:r,alias:"string"}],keyword:{pattern:/(\\(?:begin|end|ref|cite|label|usepackage|documentclass)(?:\[[^\]]+\])?\{)[^}]+(?=\})/,lookbehind:!0},url:{pattern:/(\\url\{)[^}]+(?=\})/,lookbehind:!0},headline:{pattern:/(\\(?:part|chapter|section|subsection|frametitle|subsubsection|paragraph|subparagraph|subsubparagraph|subsubsubparagraph)\*?(?:\[[^\]]+\])?\{)[^}]+(?=\})/,lookbehind:!0,alias:"class-name"},function:{pattern:n,alias:"selector"},punctuation:/[[\]{}&]/},t.languages.tex=t.languages.latex,t.languages.context=t.languages.latex}e.exports=t,t.displayName="latex",t.aliases=["tex","context"]},2980(e,t,n){"use strict";var r=n(93205),i=n(88262);function a(e){var t,n;e.register(r),e.register(i),(t=e).languages.latte={comment:/^\{\*[\s\S]*/,ld:{pattern:/^\{(?:[=_]|\/?(?!\d|\w+\()\w+)?/,inside:{punctuation:/^\{\/?/,tag:{pattern:/.+/,alias:"important"}}},rd:{pattern:/\}$/,inside:{punctuation:/.+/}},php:{pattern:/\S(?:[\s\S]*\S)?/,alias:"language-php",inside:t.languages.php}},n=t.languages.extend("markup",{}),t.languages.insertBefore("inside","attr-value",{"n-attr":{pattern:/n:[\w-]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+))?/,inside:{"attr-name":{pattern:/^[^\s=]+/,alias:"important"},"attr-value":{pattern:/=[\s\S]+/,inside:{punctuation:[/^=/,{pattern:/^(\s*)["']|["']$/,lookbehind:!0}],php:{pattern:/\S(?:[\s\S]*\S)?/,inside:t.languages.php}}}}}},n.tag),t.hooks.add("before-tokenize",function(e){if("latte"===e.language){var r=/\{\*[\s\S]*?\*\}|\{[^'"\s{}*](?:[^"'/{}]|\/(?![*/])|("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|\/\*(?:[^*]|\*(?!\/))*\*\/)*?\}/g;t.languages["markup-templating"].buildPlaceholders(e,"latte",r),e.grammar=n}}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"latte")})}e.exports=a,a.displayName="latte",a.aliases=[]},41701(e){"use strict";function t(e){e.languages.less=e.languages.extend("css",{comment:[/\/\*[\s\S]*?\*\//,{pattern:/(^|[^\\])\/\/.*/,lookbehind:!0}],atrule:{pattern:/@[\w-](?:\((?:[^(){}]|\([^(){}]*\))*\)|[^(){};\s]|\s+(?!\s))*?(?=\s*\{)/,inside:{punctuation:/[:()]/}},selector:{pattern:/(?:@\{[\w-]+\}|[^{};\s@])(?:@\{[\w-]+\}|\((?:[^(){}]|\([^(){}]*\))*\)|[^(){};@\s]|\s+(?!\s))*?(?=\s*\{)/,inside:{variable:/@+[\w-]+/}},property:/(?:@\{[\w-]+\}|[\w-])+(?:\+_?)?(?=\s*:)/i,operator:/[+\-*\/]/}),e.languages.insertBefore("less","property",{variable:[{pattern:/@[\w-]+\s*:/,inside:{punctuation:/:/}},/@@?[\w-]+/],"mixin-usage":{pattern:/([{;]\s*)[.#](?!\d)[\w-].*?(?=[(;])/,lookbehind:!0,alias:"function"}})}e.exports=t,t.displayName="less",t.aliases=[]},42491(e,t,n){"use strict";var r=n(9997);function i(e){e.register(r),function(e){for(var t=/\((?:[^();"#\\]|\\[\s\S]|;.*(?!.)|"(?:[^"\\]|\\.)*"|#(?:\{(?:(?!#\})[\s\S])*#\}|[^{])|)*\)/.source,n=5,r=0;r/g,function(){return t});t=t.replace(//g,/[^\s\S]/.source);var i=e.languages.lilypond={comment:/%(?:(?!\{).*|\{[\s\S]*?%\})/,"embedded-scheme":{pattern:RegExp(/(^|[=\s])#(?:"(?:[^"\\]|\\.)*"|[^\s()"]*(?:[^\s()]|))/.source.replace(//g,function(){return t}),"m"),lookbehind:!0,greedy:!0,inside:{scheme:{pattern:/^(#)[\s\S]+$/,lookbehind:!0,alias:"language-scheme",inside:{"embedded-lilypond":{pattern:/#\{[\s\S]*?#\}/,greedy:!0,inside:{punctuation:/^#\{|#\}$/,lilypond:{pattern:/[\s\S]+/,alias:"language-lilypond",inside:null}}},rest:e.languages.scheme}},punctuation:/#/}},string:{pattern:/"(?:[^"\\]|\\.)*"/,greedy:!0},"class-name":{pattern:/(\\new\s+)[\w-]+/,lookbehind:!0},keyword:{pattern:/\\[a-z][-\w]*/i,inside:{punctuation:/^\\/}},operator:/[=|]|<<|>>/,punctuation:{pattern:/(^|[a-z\d])(?:'+|,+|[_^]?-[_^]?(?:[-+^!>._]|(?=\d))|[_^]\.?|[.!])|[{}()[\]<>^~]|\\[()[\]<>\\!]|--|__/,lookbehind:!0},number:/\b\d+(?:\/\d+)?\b/};i["embedded-scheme"].inside.scheme.inside["embedded-lilypond"].inside.lilypond.inside=i,e.languages.ly=i}(e)}e.exports=i,i.displayName="lilypond",i.aliases=[]},34927(e,t,n){"use strict";var r=n(93205);function i(e){e.register(r),e.languages.liquid={comment:{pattern:/(^\{%\s*comment\s*%\})[\s\S]+(?=\{%\s*endcomment\s*%\}$)/,lookbehind:!0},delimiter:{pattern:/^\{(?:\{\{|[%\{])-?|-?(?:\}\}|[%\}])\}$/,alias:"punctuation"},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},keyword:/\b(?:as|assign|break|continue|cycle|decrement|echo|else|elsif|(?:end)?(?:capture|case|comment|for|form|if|paginate|style|raw|tablerow|unless)|in|include|increment|limit|liquid|offset|range|render|reversed|section|when|with)\b/,function:[{pattern:/(\|\s*)\w+/,lookbehind:!0,alias:"filter"},{pattern:/(\.\s*)(?:first|last|size)/,lookbehind:!0}],boolean:/\b(?:true|false|nil)\b/,range:{pattern:/\.\./,alias:"operator"},number:/\b\d+(?:\.\d+)?\b/,operator:/[!=]=|<>|[<>]=?|[|?:=-]|\b(?:and|or|contains(?=\s))\b/,punctuation:/[.,\[\]()]/},e.hooks.add("before-tokenize",function(t){var n=/\{%\s*comment\s*%\}[\s\S]*?\{%\s*endcomment\s*%\}|\{(?:%[\s\S]*?%|\{\{[\s\S]*?\}\}|\{[\s\S]*?\})\}/g,r=!1;e.languages["markup-templating"].buildPlaceholders(t,"liquid",n,function(e){var t=/^\{%-?\s*(\w+)/.exec(e);if(t){var n=t[1];if("raw"===n&&!r)return r=!0,!0;if("endraw"===n)return r=!1,!0}return!r})}),e.hooks.add("after-tokenize",function(t){e.languages["markup-templating"].tokenizePlaceholders(t,"liquid")})}e.exports=i,i.displayName="liquid",i.aliases=[]},3848(e){"use strict";function t(e){!function(e){function t(e){return RegExp("(\\()"+e+"(?=[\\s\\)])")}function n(e){return RegExp("([\\s([])"+e+"(?=[\\s)])")}var r="[-+*/_~!@$%^=<>{}\\w]+",i="&"+r,a="(\\()",o="(?=\\))",s="(?=\\s)",u={heading:{pattern:/;;;.*/,alias:["comment","title"]},comment:/;.*/,string:{pattern:/"(?:[^"\\]|\\.)*"/,greedy:!0,inside:{argument:/[-A-Z]+(?=[.,\s])/,symbol:RegExp("`"+r+"'")}},"quoted-symbol":{pattern:RegExp("#?'"+r),alias:["variable","symbol"]},"lisp-property":{pattern:RegExp(":"+r),alias:"property"},splice:{pattern:RegExp(",@?"+r),alias:["symbol","variable"]},keyword:[{pattern:RegExp(a+"(?:(?:lexical-)?let\\*?|(?:cl-)?letf|if|when|while|unless|cons|cl-loop|and|or|not|cond|setq|error|message|null|require|provide|use-package)"+s),lookbehind:!0},{pattern:RegExp(a+"(?:for|do|collect|return|finally|append|concat|in|by)"+s),lookbehind:!0}],declare:{pattern:t("declare"),lookbehind:!0,alias:"keyword"},interactive:{pattern:t("interactive"),lookbehind:!0,alias:"keyword"},boolean:{pattern:n("(?:t|nil)"),lookbehind:!0},number:{pattern:n("[-+]?\\d+(?:\\.\\d*)?"),lookbehind:!0},defvar:{pattern:RegExp(a+"def(?:var|const|custom|group)\\s+"+r),lookbehind:!0,inside:{keyword:/^def[a-z]+/,variable:RegExp(r)}},defun:{pattern:RegExp(a+"(?:cl-)?(?:defun\\*?|defmacro)\\s+"+r+"\\s+\\([\\s\\S]*?\\)"),lookbehind:!0,inside:{keyword:/^(?:cl-)?def\S+/,arguments:null,function:{pattern:RegExp("(^\\s)"+r),lookbehind:!0},punctuation:/[()]/}},lambda:{pattern:RegExp(a+"lambda\\s+\\(\\s*(?:&?"+r+"(?:\\s+&?"+r+")*\\s*)?\\)"),lookbehind:!0,inside:{keyword:/^lambda/,arguments:null,punctuation:/[()]/}},car:{pattern:RegExp(a+r),lookbehind:!0},punctuation:[/(?:['`,]?\(|[)\[\]])/,{pattern:/(\s)\.(?=\s)/,lookbehind:!0}]},c={"lisp-marker":RegExp(i),rest:{argument:{pattern:RegExp(r),alias:"variable"},varform:{pattern:RegExp(a+r+"\\s+\\S[\\s\\S]*"+o),lookbehind:!0,inside:{string:u.string,boolean:u.boolean,number:u.number,symbol:u.symbol,punctuation:/[()]/}}}},l="\\S+(?:\\s+\\S+)*",f={pattern:RegExp(a+"[\\s\\S]*"+o),lookbehind:!0,inside:{"rest-vars":{pattern:RegExp("&(?:rest|body)\\s+"+l),inside:c},"other-marker-vars":{pattern:RegExp("&(?:optional|aux)\\s+"+l),inside:c},keys:{pattern:RegExp("&key\\s+"+l+"(?:\\s+&allow-other-keys)?"),inside:c},argument:{pattern:RegExp(r),alias:"variable"},punctuation:/[()]/}};u.lambda.inside.arguments=f,u.defun.inside.arguments=e.util.clone(f),u.defun.inside.arguments.inside.sublist=f,e.languages.lisp=u,e.languages.elisp=u,e.languages.emacs=u,e.languages["emacs-lisp"]=u}(e)}e.exports=t,t.displayName="lisp",t.aliases=[]},41469(e){"use strict";function t(e){e.languages.livescript={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?\*\//,lookbehind:!0},{pattern:/(^|[^\\])#.*/,lookbehind:!0}],"interpolated-string":{pattern:/(^|[^"])("""|")(?:\\[\s\S]|(?!\2)[^\\])*\2(?!")/,lookbehind:!0,greedy:!0,inside:{variable:{pattern:/(^|[^\\])#[a-z_](?:-?[a-z]|[\d_])*/m,lookbehind:!0},interpolation:{pattern:/(^|[^\\])#\{[^}]+\}/m,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^#\{|\}$/,alias:"variable"}}},string:/[\s\S]+/}},string:[{pattern:/('''|')(?:\\[\s\S]|(?!\1)[^\\])*\1/,greedy:!0},{pattern:/<\[[\s\S]*?\]>/,greedy:!0},/\\[^\s,;\])}]+/],regex:[{pattern:/\/\/(?:\[[^\r\n\]]*\]|\\.|(?!\/\/)[^\\\[])+\/\/[gimyu]{0,5}/,greedy:!0,inside:{comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0}}},{pattern:/\/(?:\[[^\r\n\]]*\]|\\.|[^/\\\r\n\[])+\/[gimyu]{0,5}/,greedy:!0}],keyword:{pattern:/(^|(?!-).)\b(?:break|case|catch|class|const|continue|default|do|else|extends|fallthrough|finally|for(?: ever)?|function|if|implements|it|let|loop|new|null|otherwise|own|return|super|switch|that|then|this|throw|try|unless|until|var|void|when|while|yield)(?!-)\b/m,lookbehind:!0},"keyword-operator":{pattern:/(^|[^-])\b(?:(?:delete|require|typeof)!|(?:and|by|delete|export|from|import(?: all)?|in|instanceof|is(?:nt| not)?|not|of|or|til|to|typeof|with|xor)(?!-)\b)/m,lookbehind:!0,alias:"operator"},boolean:{pattern:/(^|[^-])\b(?:false|no|off|on|true|yes)(?!-)\b/m,lookbehind:!0},argument:{pattern:/(^|(?!\.&\.)[^&])&(?!&)\d*/m,lookbehind:!0,alias:"variable"},number:/\b(?:\d+~[\da-z]+|\d[\d_]*(?:\.\d[\d_]*)?(?:[a-z]\w*)?)/i,identifier:/[a-z_](?:-?[a-z]|[\d_])*/i,operator:[{pattern:/( )\.(?= )/,lookbehind:!0},/\.(?:[=~]|\.\.?)|\.(?:[&|^]|<<|>>>?)\.|:(?:=|:=?)|&&|\|[|>]|<(?:<[>=?]?|-(?:->?|>)?|\+\+?|@@?|%%?|\*\*?|!(?:~?=|--?>|~?~>)?|~(?:~?>|=)?|==?|\^\^?|[\/?]/],punctuation:/[(){}\[\]|.,:;`]/},e.languages.livescript["interpolated-string"].inside.interpolation.inside.rest=e.languages.livescript}e.exports=t,t.displayName="livescript",t.aliases=[]},73070(e){"use strict";function t(e){var t;(t=e).languages.llvm={comment:/;.*/,string:{pattern:/"[^"]*"/,greedy:!0},boolean:/\b(?:true|false)\b/,variable:/[%@!#](?:(?!\d)(?:[-$.\w]|\\[a-f\d]{2})+|\d+)/i,label:/(?!\d)(?:[-$.\w]|\\[a-f\d]{2})+:/i,type:{pattern:/\b(?:double|float|fp128|half|i[1-9]\d*|label|metadata|ppc_fp128|token|void|x86_fp80|x86_mmx)\b/,alias:"class-name"},keyword:/\b[a-z_][a-z_0-9]*\b/,number:/[+-]?\b\d+(?:\.\d+)?(?:[eE][+-]?\d+)?\b|\b0x[\dA-Fa-f]+\b|\b0xK[\dA-Fa-f]{20}\b|\b0x[ML][\dA-Fa-f]{32}\b|\b0xH[\dA-Fa-f]{4}\b/,punctuation:/[{}[\];(),.!*=<>]/}}e.exports=t,t.displayName="llvm",t.aliases=[]},35049(e){"use strict";function t(e){e.languages.log={string:{pattern:/"(?:[^"\\\r\n]|\\.)*"|'(?![st] | \w)(?:[^'\\\r\n]|\\.)*'/,greedy:!0},level:[{pattern:/\b(?:ALERT|CRIT|CRITICAL|EMERG|EMERGENCY|ERR|ERROR|FAILURE|FATAL|SEVERE)\b/,alias:["error","important"]},{pattern:/\b(?:WARN|WARNING|WRN)\b/,alias:["warning","important"]},{pattern:/\b(?:DISPLAY|INF|INFO|NOTICE|STATUS)\b/,alias:["info","keyword"]},{pattern:/\b(?:DBG|DEBUG|FINE)\b/,alias:["debug","keyword"]},{pattern:/\b(?:FINER|FINEST|TRACE|TRC|VERBOSE|VRB)\b/,alias:["trace","comment"]}],property:{pattern:/((?:^|[\]|])[ \t]*)[a-z_](?:[\w-]|\b\/\b)*(?:[. ]\(?\w(?:[\w-]|\b\/\b)*\)?)*:(?=\s)/im,lookbehind:!0},separator:{pattern:/(^|[^-+])-{3,}|={3,}|\*{3,}|- - /m,lookbehind:!0,alias:"comment"},url:/\b(?:https?|ftp|file):\/\/[^\s|,;'"]*[^\s|,;'">.]/,email:{pattern:/(^|\s)[-\w+.]+@[a-z][a-z0-9-]*(?:\.[a-z][a-z0-9-]*)+(?=\s)/,lookbehind:!0,alias:"url"},"ip-address":{pattern:/\b(?:\d{1,3}(?:\.\d{1,3}){3})\b/i,alias:"constant"},"mac-address":{pattern:/\b[a-f0-9]{2}(?::[a-f0-9]{2}){5}\b/i,alias:"constant"},domain:{pattern:/(^|\s)[a-z][a-z0-9-]*(?:\.[a-z][a-z0-9-]*)*\.[a-z][a-z0-9-]+(?=\s)/,lookbehind:!0,alias:"constant"},uuid:{pattern:/\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/i,alias:"constant"},hash:{pattern:/\b(?:[a-f0-9]{32}){1,2}\b/i,alias:"constant"},"file-path":{pattern:/\b[a-z]:[\\/][^\s|,;:(){}\[\]"']+|(^|[\s:\[\](>|])\.{0,2}\/\w[^\s|,;:(){}\[\]"']*/i,lookbehind:!0,greedy:!0,alias:"string"},date:{pattern:RegExp(/\b\d{4}[-/]\d{2}[-/]\d{2}(?:T(?=\d{1,2}:)|(?=\s\d{1,2}:))/.source+"|"+/\b\d{1,4}[-/ ](?:\d{1,2}|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[-/ ]\d{2,4}T?\b/.source+"|"+/\b(?:(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)(?:\s{1,2}(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec))?|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s{1,2}\d{1,2}\b/.source,"i"),alias:"number"},time:{pattern:/\b\d{1,2}:\d{1,2}:\d{1,2}(?:[.,:]\d+)?(?:\s?[+-]\d{2}:?\d{2}|Z)?\b/,alias:"number"},boolean:/\b(?:true|false|null)\b/i,number:{pattern:/(^|[^.\w])(?:0x[a-f0-9]+|0o[0-7]+|0b[01]+|v?\d[\da-f]*(?:\.\d+)*(?:e[+-]?\d+)?[a-z]{0,3}\b)\b(?!\.\w)/i,lookbehind:!0},operator:/[;:?<=>~/@!$%&+\-|^(){}*#]/,punctuation:/[\[\].,]/}}e.exports=t,t.displayName="log",t.aliases=[]},8789(e){"use strict";function t(e){e.languages.lolcode={comment:[/\bOBTW\s[\s\S]*?\sTLDR\b/,/\bBTW.+/],string:{pattern:/"(?::.|[^":])*"/,inside:{variable:/:\{[^}]+\}/,symbol:[/:\([a-f\d]+\)/i,/:\[[^\]]+\]/,/:[)>o":]/]},greedy:!0},number:/(?:\B-)?(?:\b\d+(?:\.\d*)?|\B\.\d+)/,symbol:{pattern:/(^|\s)(?:A )?(?:YARN|NUMBR|NUMBAR|TROOF|BUKKIT|NOOB)(?=\s|,|$)/,lookbehind:!0,inside:{keyword:/A(?=\s)/}},label:{pattern:/((?:^|\s)(?:IM IN YR|IM OUTTA YR) )[a-zA-Z]\w*/,lookbehind:!0,alias:"string"},function:{pattern:/((?:^|\s)(?:I IZ|HOW IZ I|IZ) )[a-zA-Z]\w*/,lookbehind:!0},keyword:[{pattern:/(^|\s)(?:O HAI IM|KTHX|HAI|KTHXBYE|I HAS A|ITZ(?: A)?|R|AN|MKAY|SMOOSH|MAEK|IS NOW(?: A)?|VISIBLE|GIMMEH|O RLY\?|YA RLY|NO WAI|OIC|MEBBE|WTF\?|OMG|OMGWTF|GTFO|IM IN YR|IM OUTTA YR|FOUND YR|YR|TIL|WILE|UPPIN|NERFIN|I IZ|HOW IZ I|IF U SAY SO|SRS|HAS A|LIEK(?: A)?|IZ)(?=\s|,|$)/,lookbehind:!0},/'Z(?=\s|,|$)/],boolean:{pattern:/(^|\s)(?:WIN|FAIL)(?=\s|,|$)/,lookbehind:!0},variable:{pattern:/(^|\s)IT(?=\s|,|$)/,lookbehind:!0},operator:{pattern:/(^|\s)(?:NOT|BOTH SAEM|DIFFRINT|(?:SUM|DIFF|PRODUKT|QUOSHUNT|MOD|BIGGR|SMALLR|BOTH|EITHER|WON|ALL|ANY) OF)(?=\s|,|$)/,lookbehind:!0},punctuation:/\.{3}|…|,|!/}}e.exports=t,t.displayName="lolcode",t.aliases=[]},59803(e){"use strict";function t(e){e.languages.lua={comment:/^#!.+|--(?:\[(=*)\[[\s\S]*?\]\1\]|.*)/m,string:{pattern:/(["'])(?:(?!\1)[^\\\r\n]|\\z(?:\r\n|\s)|\\(?:\r\n|[^z]))*\1|\[(=*)\[[\s\S]*?\]\2\]/,greedy:!0},number:/\b0x[a-f\d]+(?:\.[a-f\d]*)?(?:p[+-]?\d+)?\b|\b\d+(?:\.\B|(?:\.\d*)?(?:e[+-]?\d+)?\b)|\B\.\d+(?:e[+-]?\d+)?\b/i,keyword:/\b(?:and|break|do|else|elseif|end|false|for|function|goto|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,function:/(?!\d)\w+(?=\s*(?:[({]))/,operator:[/[-+*%^&|#]|\/\/?|<[<=]?|>[>=]?|[=~]=?/,{pattern:/(^|[^.])\.\.(?!\.)/,lookbehind:!0}],punctuation:/[\[\](){},;]|\.+|:+/}}e.exports=t,t.displayName="lua",t.aliases=[]},33055(e){"use strict";function t(e){e.languages.makefile={comment:{pattern:/(^|[^\\])#(?:\\(?:\r\n|[\s\S])|[^\\\r\n])*/,lookbehind:!0},string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},builtin:/\.[A-Z][^:#=\s]+(?=\s*:(?!=))/,symbol:{pattern:/^(?:[^:=\s]|[ \t]+(?![\s:]))+(?=\s*:(?!=))/m,inside:{variable:/\$+(?:(?!\$)[^(){}:#=\s]+|(?=[({]))/}},variable:/\$+(?:(?!\$)[^(){}:#=\s]+|\([@*%<^+?][DF]\)|(?=[({]))/,keyword:[/-include\b|\b(?:define|else|endef|endif|export|ifn?def|ifn?eq|include|override|private|sinclude|undefine|unexport|vpath)\b/,{pattern:/(\()(?:addsuffix|abspath|and|basename|call|dir|error|eval|file|filter(?:-out)?|findstring|firstword|flavor|foreach|guile|if|info|join|lastword|load|notdir|or|origin|patsubst|realpath|shell|sort|strip|subst|suffix|value|warning|wildcard|word(?:s|list)?)(?=[ \t])/,lookbehind:!0}],operator:/(?:::|[?:+!])?=|[|@]/,punctuation:/[:;(){}]/}}e.exports=t,t.displayName="makefile",t.aliases=[]},90542(e){"use strict";function t(e){!function(e){var t=/(?:\\.|[^\\\n\r]|(?:\n|\r\n?)(?![\r\n]))/.source;function n(e){return e=e.replace(//g,function(){return t}),RegExp(/((?:^|[^\\])(?:\\{2})*)/.source+"(?:"+e+")")}var r=/(?:\\.|``(?:[^`\r\n]|`(?!`))+``|`[^`\r\n]+`|[^\\|\r\n`])+/.source,i=/\|?__(?:\|__)+\|?(?:(?:\n|\r\n?)|(?![\s\S]))/.source.replace(/__/g,function(){return r}),a=/\|?[ \t]*:?-{3,}:?[ \t]*(?:\|[ \t]*:?-{3,}:?[ \t]*)+\|?(?:\n|\r\n?)/.source;e.languages.markdown=e.languages.extend("markup",{}),e.languages.insertBefore("markdown","prolog",{"front-matter-block":{pattern:/(^(?:\s*[\r\n])?)---(?!.)[\s\S]*?[\r\n]---(?!.)/,lookbehind:!0,greedy:!0,inside:{punctuation:/^---|---$/,"font-matter":{pattern:/\S+(?:\s+\S+)*/,alias:["yaml","language-yaml"],inside:e.languages.yaml}}},blockquote:{pattern:/^>(?:[\t ]*>)*/m,alias:"punctuation"},table:{pattern:RegExp("^"+i+a+"(?:"+i+")*","m"),inside:{"table-data-rows":{pattern:RegExp("^("+i+a+")(?:"+i+")*$"),lookbehind:!0,inside:{"table-data":{pattern:RegExp(r),inside:e.languages.markdown},punctuation:/\|/}},"table-line":{pattern:RegExp("^("+i+")"+a+"$"),lookbehind:!0,inside:{punctuation:/\||:?-{3,}:?/}},"table-header-row":{pattern:RegExp("^"+i+"$"),inside:{"table-header":{pattern:RegExp(r),alias:"important",inside:e.languages.markdown},punctuation:/\|/}}}},code:[{pattern:/((?:^|\n)[ \t]*\n|(?:^|\r\n?)[ \t]*\r\n?)(?: {4}|\t).+(?:(?:\n|\r\n?)(?: {4}|\t).+)*/,lookbehind:!0,alias:"keyword"},{pattern:/^```[\s\S]*?^```$/m,greedy:!0,inside:{"code-block":{pattern:/^(```.*(?:\n|\r\n?))[\s\S]+?(?=(?:\n|\r\n?)^```$)/m,lookbehind:!0},"code-language":{pattern:/^(```).+/,lookbehind:!0},punctuation:/```/}}],title:[{pattern:/\S.*(?:\n|\r\n?)(?:==+|--+)(?=[ \t]*$)/m,alias:"important",inside:{punctuation:/==+$|--+$/}},{pattern:/(^\s*)#.+/m,lookbehind:!0,alias:"important",inside:{punctuation:/^#+|#+$/}}],hr:{pattern:/(^\s*)([*-])(?:[\t ]*\2){2,}(?=\s*$)/m,lookbehind:!0,alias:"punctuation"},list:{pattern:/(^\s*)(?:[*+-]|\d+\.)(?=[\t ].)/m,lookbehind:!0,alias:"punctuation"},"url-reference":{pattern:/!?\[[^\]]+\]:[\t ]+(?:\S+|<(?:\\.|[^>\\])+>)(?:[\t ]+(?:"(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|\((?:\\.|[^)\\])*\)))?/,inside:{variable:{pattern:/^(!?\[)[^\]]+/,lookbehind:!0},string:/(?:"(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|\((?:\\.|[^)\\])*\))$/,punctuation:/^[\[\]!:]|[<>]/},alias:"url"},bold:{pattern:n(/\b__(?:(?!_)|_(?:(?!_))+_)+__\b|\*\*(?:(?!\*)|\*(?:(?!\*))+\*)+\*\*/.source),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^..)[\s\S]+(?=..$)/,lookbehind:!0,inside:{}},punctuation:/\*\*|__/}},italic:{pattern:n(/\b_(?:(?!_)|__(?:(?!_))+__)+_\b|\*(?:(?!\*)|\*\*(?:(?!\*))+\*\*)+\*/.source),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^.)[\s\S]+(?=.$)/,lookbehind:!0,inside:{}},punctuation:/[*_]/}},strike:{pattern:n(/(~~?)(?:(?!~))+\2/.source),lookbehind:!0,greedy:!0,inside:{content:{pattern:/(^~~?)[\s\S]+(?=\1$)/,lookbehind:!0,inside:{}},punctuation:/~~?/}},"code-snippet":{pattern:/(^|[^\\`])(?:``[^`\r\n]+(?:`[^`\r\n]+)*``(?!`)|`[^`\r\n]+`(?!`))/,lookbehind:!0,greedy:!0,alias:["code","keyword"]},url:{pattern:n(/!?\[(?:(?!\]))+\](?:\([^\s)]+(?:[\t ]+"(?:\\.|[^"\\])*")?\)|[ \t]?\[(?:(?!\]))+\])/.source),lookbehind:!0,greedy:!0,inside:{operator:/^!/,content:{pattern:/(^\[)[^\]]+(?=\])/,lookbehind:!0,inside:{}},variable:{pattern:/(^\][ \t]?\[)[^\]]+(?=\]$)/,lookbehind:!0},url:{pattern:/(^\]\()[^\s)]+/,lookbehind:!0},string:{pattern:/(^[ \t]+)"(?:\\.|[^"\\])*"(?=\)$)/,lookbehind:!0}}}}),["url","bold","italic","strike"].forEach(function(t){["url","bold","italic","strike","code-snippet"].forEach(function(n){t!==n&&(e.languages.markdown[t].inside.content.inside[n]=e.languages.markdown[n])})}),e.hooks.add("after-tokenize",function(e){("markdown"===e.language||"md"===e.language)&&t(e.tokens);function t(e){if(e&&"string"!=typeof e)for(var n=0,r=e.length;n=a.length);u++){var c=s[u];if("string"==typeof c||c.content&&"string"==typeof c.content){var l=a[i],f=n.tokenStack[l],d="string"==typeof c?c:c.content,h=t(r,l),p=d.indexOf(h);if(p>-1){++i;var b=d.substring(0,p),m=new e.Token(r,e.tokenize(f,n.grammar),"language-"+r,f),g=d.substring(p+h.length),v=[];b&&v.push.apply(v,o([b])),v.push(m),g&&v.push.apply(v,o([g])),"string"==typeof c?s.splice.apply(s,[u,1].concat(v)):c.content=v}}else c.content&&o(c.content)}return s}}}})}(e)}e.exports=t,t.displayName="markupTemplating",t.aliases=[]},2717(e){"use strict";function t(e){e.languages.markup={comment://,prolog:/<\?[\s\S]+?\?>/,doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(^[^\[]*\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/,name:/[^\s<>'"]+/}},cdata://i,tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"special-attr":[],"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},e.languages.markup.tag.inside["attr-value"].inside.entity=e.languages.markup.entity,e.languages.markup.doctype.inside["internal-subset"].inside=e.languages.markup,e.hooks.add("wrap",function(e){"entity"===e.type&&(e.attributes.title=e.content.value.replace(/&/,"&"))}),Object.defineProperty(e.languages.markup.tag,"addInlined",{value:function(t,n){var r={};r["language-"+n]={pattern:/(^$)/i,lookbehind:!0,inside:e.languages[n]},r.cdata=/^$/i;var i={"included-cdata":{pattern://i,inside:r}};i["language-"+n]={pattern:/[\s\S]+/,inside:e.languages[n]};var a={};a[t]={pattern:RegExp(/(<__[^>]*>)(?:))*\]\]>|(?!)/.source.replace(/__/g,function(){return t}),"i"),lookbehind:!0,greedy:!0,inside:i},e.languages.insertBefore("markup","cdata",a)}}),Object.defineProperty(e.languages.markup.tag,"addAttribute",{value:function(t,n){e.languages.markup.tag.inside["special-attr"].push({pattern:RegExp(/(^|["'\s])/.source+"(?:"+t+")"+/\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))/.source,"i"),lookbehind:!0,inside:{"attr-name":/^[^\s=]+/,"attr-value":{pattern:/=[\s\S]+/,inside:{value:{pattern:/(^=\s*(["']|(?!["'])))\S[\s\S]*(?=\2$)/,lookbehind:!0,alias:[n,"language-"+n],inside:e.languages[n]},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}}}})}}),e.languages.html=e.languages.markup,e.languages.mathml=e.languages.markup,e.languages.svg=e.languages.markup,e.languages.xml=e.languages.extend("markup",{}),e.languages.ssml=e.languages.xml,e.languages.atom=e.languages.xml,e.languages.rss=e.languages.xml}e.exports=t,t.displayName="markup",t.aliases=["html","mathml","svg","xml","ssml","atom","rss"]},27992(e){"use strict";function t(e){e.languages.matlab={comment:[/%\{[\s\S]*?\}%/,/%.+/],string:{pattern:/\B'(?:''|[^'\r\n])*'/,greedy:!0},number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[eE][+-]?\d+)?(?:[ij])?|\b[ij]\b/,keyword:/\b(?:break|case|catch|continue|else|elseif|end|for|function|if|inf|NaN|otherwise|parfor|pause|pi|return|switch|try|while)\b/,function:/\b(?!\d)\w+(?=\s*\()/,operator:/\.?[*^\/\\']|[+\-:@]|[<>=~]=?|&&?|\|\|?/,punctuation:/\.{3}|[.,;\[\](){}!]/}}e.exports=t,t.displayName="matlab",t.aliases=[]},606(e){"use strict";function t(e){e.languages.mel={comment:/\/\/.*/,code:{pattern:/`(?:\\.|[^\\`\r\n])*`/,greedy:!0,alias:"italic",inside:{delimiter:{pattern:/^`|`$/,alias:"punctuation"}}},string:{pattern:/"(?:\\.|[^\\"\r\n])*"/,greedy:!0},variable:/\$\w+/,number:/\b0x[\da-fA-F]+\b|\b\d+(?:\.\d*)?|\B\.\d+/,flag:{pattern:/-[^\d\W]\w*/,alias:"operator"},keyword:/\b(?:break|case|continue|default|do|else|float|for|global|if|in|int|matrix|proc|return|string|switch|vector|while)\b/,function:/\b\w+(?=\()|\b(?:about|abs|addAttr|addAttributeEditorNodeHelp|addDynamic|addNewShelfTab|addPP|addPanelCategory|addPrefixToName|advanceToNextDrivenKey|affectedNet|affects|aimConstraint|air|alias|aliasAttr|align|alignCtx|alignCurve|alignSurface|allViewFit|ambientLight|angle|angleBetween|animCone|animCurveEditor|animDisplay|animView|annotate|appendStringArray|applicationName|applyAttrPreset|applyTake|arcLenDimContext|arcLengthDimension|arclen|arrayMapper|art3dPaintCtx|artAttrCtx|artAttrPaintVertexCtx|artAttrSkinPaintCtx|artAttrTool|artBuildPaintMenu|artFluidAttrCtx|artPuttyCtx|artSelectCtx|artSetPaintCtx|artUserPaintCtx|assignCommand|assignInputDevice|assignViewportFactories|attachCurve|attachDeviceAttr|attachSurface|attrColorSliderGrp|attrCompatibility|attrControlGrp|attrEnumOptionMenu|attrEnumOptionMenuGrp|attrFieldGrp|attrFieldSliderGrp|attrNavigationControlGrp|attrPresetEditWin|attributeExists|attributeInfo|attributeMenu|attributeQuery|autoKeyframe|autoPlace|bakeClip|bakeFluidShading|bakePartialHistory|bakeResults|bakeSimulation|basename|basenameEx|batchRender|bessel|bevel|bevelPlus|binMembership|bindSkin|blend2|blendShape|blendShapeEditor|blendShapePanel|blendTwoAttr|blindDataType|boneLattice|boundary|boxDollyCtx|boxZoomCtx|bufferCurve|buildBookmarkMenu|buildKeyframeMenu|button|buttonManip|CBG|cacheFile|cacheFileCombine|cacheFileMerge|cacheFileTrack|camera|cameraView|canCreateManip|canvas|capitalizeString|catch|catchQuiet|ceil|changeSubdivComponentDisplayLevel|changeSubdivRegion|channelBox|character|characterMap|characterOutlineEditor|characterize|chdir|checkBox|checkBoxGrp|checkDefaultRenderGlobals|choice|circle|circularFillet|clamp|clear|clearCache|clip|clipEditor|clipEditorCurrentTimeCtx|clipSchedule|clipSchedulerOutliner|clipTrimBefore|closeCurve|closeSurface|cluster|cmdFileOutput|cmdScrollFieldExecuter|cmdScrollFieldReporter|cmdShell|coarsenSubdivSelectionList|collision|color|colorAtPoint|colorEditor|colorIndex|colorIndexSliderGrp|colorSliderButtonGrp|colorSliderGrp|columnLayout|commandEcho|commandLine|commandPort|compactHairSystem|componentEditor|compositingInterop|computePolysetVolume|condition|cone|confirmDialog|connectAttr|connectControl|connectDynamic|connectJoint|connectionInfo|constrain|constrainValue|constructionHistory|container|containsMultibyte|contextInfo|control|convertFromOldLayers|convertIffToPsd|convertLightmap|convertSolidTx|convertTessellation|convertUnit|copyArray|copyFlexor|copyKey|copySkinWeights|cos|cpButton|cpCache|cpClothSet|cpCollision|cpConstraint|cpConvClothToMesh|cpForces|cpGetSolverAttr|cpPanel|cpProperty|cpRigidCollisionFilter|cpSeam|cpSetEdit|cpSetSolverAttr|cpSolver|cpSolverTypes|cpTool|cpUpdateClothUVs|createDisplayLayer|createDrawCtx|createEditor|createLayeredPsdFile|createMotionField|createNewShelf|createNode|createRenderLayer|createSubdivRegion|cross|crossProduct|ctxAbort|ctxCompletion|ctxEditMode|ctxTraverse|currentCtx|currentTime|currentTimeCtx|currentUnit|curve|curveAddPtCtx|curveCVCtx|curveEPCtx|curveEditorCtx|curveIntersect|curveMoveEPCtx|curveOnSurface|curveSketchCtx|cutKey|cycleCheck|cylinder|dagPose|date|defaultLightListCheckBox|defaultNavigation|defineDataServer|defineVirtualDevice|deformer|deg_to_rad|delete|deleteAttr|deleteShadingGroupsAndMaterials|deleteShelfTab|deleteUI|deleteUnusedBrushes|delrandstr|detachCurve|detachDeviceAttr|detachSurface|deviceEditor|devicePanel|dgInfo|dgdirty|dgeval|dgtimer|dimWhen|directKeyCtx|directionalLight|dirmap|dirname|disable|disconnectAttr|disconnectJoint|diskCache|displacementToPoly|displayAffected|displayColor|displayCull|displayLevelOfDetail|displayPref|displayRGBColor|displaySmoothness|displayStats|displayString|displaySurface|distanceDimContext|distanceDimension|doBlur|dolly|dollyCtx|dopeSheetEditor|dot|dotProduct|doubleProfileBirailSurface|drag|dragAttrContext|draggerContext|dropoffLocator|duplicate|duplicateCurve|duplicateSurface|dynCache|dynControl|dynExport|dynExpression|dynGlobals|dynPaintEditor|dynParticleCtx|dynPref|dynRelEdPanel|dynRelEditor|dynamicLoad|editAttrLimits|editDisplayLayerGlobals|editDisplayLayerMembers|editRenderLayerAdjustment|editRenderLayerGlobals|editRenderLayerMembers|editor|editorTemplate|effector|emit|emitter|enableDevice|encodeString|endString|endsWith|env|equivalent|equivalentTol|erf|error|eval|evalDeferred|evalEcho|event|exactWorldBoundingBox|exclusiveLightCheckBox|exec|executeForEachObject|exists|exp|expression|expressionEditorListen|extendCurve|extendSurface|extrude|fcheck|fclose|feof|fflush|fgetline|fgetword|file|fileBrowserDialog|fileDialog|fileExtension|fileInfo|filetest|filletCurve|filter|filterCurve|filterExpand|filterStudioImport|findAllIntersections|findAnimCurves|findKeyframe|findMenuItem|findRelatedSkinCluster|finder|firstParentOf|fitBspline|flexor|floatEq|floatField|floatFieldGrp|floatScrollBar|floatSlider|floatSlider2|floatSliderButtonGrp|floatSliderGrp|floor|flow|fluidCacheInfo|fluidEmitter|fluidVoxelInfo|flushUndo|fmod|fontDialog|fopen|formLayout|format|fprint|frameLayout|fread|freeFormFillet|frewind|fromNativePath|fwrite|gamma|gauss|geometryConstraint|getApplicationVersionAsFloat|getAttr|getClassification|getDefaultBrush|getFileList|getFluidAttr|getInputDeviceRange|getMayaPanelTypes|getModifiers|getPanel|getParticleAttr|getPluginResource|getenv|getpid|glRender|glRenderEditor|globalStitch|gmatch|goal|gotoBindPose|grabColor|gradientControl|gradientControlNoAttr|graphDollyCtx|graphSelectContext|graphTrackCtx|gravity|grid|gridLayout|group|groupObjectsByName|HfAddAttractorToAS|HfAssignAS|HfBuildEqualMap|HfBuildFurFiles|HfBuildFurImages|HfCancelAFR|HfConnectASToHF|HfCreateAttractor|HfDeleteAS|HfEditAS|HfPerformCreateAS|HfRemoveAttractorFromAS|HfSelectAttached|HfSelectAttractors|HfUnAssignAS|hardenPointCurve|hardware|hardwareRenderPanel|headsUpDisplay|headsUpMessage|help|helpLine|hermite|hide|hilite|hitTest|hotBox|hotkey|hotkeyCheck|hsv_to_rgb|hudButton|hudSlider|hudSliderButton|hwReflectionMap|hwRender|hwRenderLoad|hyperGraph|hyperPanel|hyperShade|hypot|iconTextButton|iconTextCheckBox|iconTextRadioButton|iconTextRadioCollection|iconTextScrollList|iconTextStaticLabel|ikHandle|ikHandleCtx|ikHandleDisplayScale|ikSolver|ikSplineHandleCtx|ikSystem|ikSystemInfo|ikfkDisplayMethod|illustratorCurves|image|imfPlugins|inheritTransform|insertJoint|insertJointCtx|insertKeyCtx|insertKnotCurve|insertKnotSurface|instance|instanceable|instancer|intField|intFieldGrp|intScrollBar|intSlider|intSliderGrp|interToUI|internalVar|intersect|iprEngine|isAnimCurve|isConnected|isDirty|isParentOf|isSameObject|isTrue|isValidObjectName|isValidString|isValidUiName|isolateSelect|itemFilter|itemFilterAttr|itemFilterRender|itemFilterType|joint|jointCluster|jointCtx|jointDisplayScale|jointLattice|keyTangent|keyframe|keyframeOutliner|keyframeRegionCurrentTimeCtx|keyframeRegionDirectKeyCtx|keyframeRegionDollyCtx|keyframeRegionInsertKeyCtx|keyframeRegionMoveKeyCtx|keyframeRegionScaleKeyCtx|keyframeRegionSelectKeyCtx|keyframeRegionSetKeyCtx|keyframeRegionTrackCtx|keyframeStats|lassoContext|lattice|latticeDeformKeyCtx|launch|launchImageEditor|layerButton|layeredShaderPort|layeredTexturePort|layout|layoutDialog|lightList|lightListEditor|lightListPanel|lightlink|lineIntersection|linearPrecision|linstep|listAnimatable|listAttr|listCameras|listConnections|listDeviceAttachments|listHistory|listInputDeviceAxes|listInputDeviceButtons|listInputDevices|listMenuAnnotation|listNodeTypes|listPanelCategories|listRelatives|listSets|listTransforms|listUnselected|listerEditor|loadFluid|loadNewShelf|loadPlugin|loadPluginLanguageResources|loadPrefObjects|localizedPanelLabel|lockNode|loft|log|longNameOf|lookThru|ls|lsThroughFilter|lsType|lsUI|Mayatomr|mag|makeIdentity|makeLive|makePaintable|makeRoll|makeSingleSurface|makeTubeOn|makebot|manipMoveContext|manipMoveLimitsCtx|manipOptions|manipRotateContext|manipRotateLimitsCtx|manipScaleContext|manipScaleLimitsCtx|marker|match|max|memory|menu|menuBarLayout|menuEditor|menuItem|menuItemToShelf|menuSet|menuSetPref|messageLine|min|minimizeApp|mirrorJoint|modelCurrentTimeCtx|modelEditor|modelPanel|mouse|movIn|movOut|move|moveIKtoFK|moveKeyCtx|moveVertexAlongDirection|multiProfileBirailSurface|mute|nParticle|nameCommand|nameField|namespace|namespaceInfo|newPanelItems|newton|nodeCast|nodeIconButton|nodeOutliner|nodePreset|nodeType|noise|nonLinear|normalConstraint|normalize|nurbsBoolean|nurbsCopyUVSet|nurbsCube|nurbsEditUV|nurbsPlane|nurbsSelect|nurbsSquare|nurbsToPoly|nurbsToPolygonsPref|nurbsToSubdiv|nurbsToSubdivPref|nurbsUVSet|nurbsViewDirectionVector|objExists|objectCenter|objectLayer|objectType|objectTypeUI|obsoleteProc|oceanNurbsPreviewPlane|offsetCurve|offsetCurveOnSurface|offsetSurface|openGLExtension|openMayaPref|optionMenu|optionMenuGrp|optionVar|orbit|orbitCtx|orientConstraint|outlinerEditor|outlinerPanel|overrideModifier|paintEffectsDisplay|pairBlend|palettePort|paneLayout|panel|panelConfiguration|panelHistory|paramDimContext|paramDimension|paramLocator|parent|parentConstraint|particle|particleExists|particleInstancer|particleRenderInfo|partition|pasteKey|pathAnimation|pause|pclose|percent|performanceOptions|pfxstrokes|pickWalk|picture|pixelMove|planarSrf|plane|play|playbackOptions|playblast|plugAttr|plugNode|pluginInfo|pluginResourceUtil|pointConstraint|pointCurveConstraint|pointLight|pointMatrixMult|pointOnCurve|pointOnSurface|pointPosition|poleVectorConstraint|polyAppend|polyAppendFacetCtx|polyAppendVertex|polyAutoProjection|polyAverageNormal|polyAverageVertex|polyBevel|polyBlendColor|polyBlindData|polyBoolOp|polyBridgeEdge|polyCacheMonitor|polyCheck|polyChipOff|polyClipboard|polyCloseBorder|polyCollapseEdge|polyCollapseFacet|polyColorBlindData|polyColorDel|polyColorPerVertex|polyColorSet|polyCompare|polyCone|polyCopyUV|polyCrease|polyCreaseCtx|polyCreateFacet|polyCreateFacetCtx|polyCube|polyCut|polyCutCtx|polyCylinder|polyCylindricalProjection|polyDelEdge|polyDelFacet|polyDelVertex|polyDuplicateAndConnect|polyDuplicateEdge|polyEditUV|polyEditUVShell|polyEvaluate|polyExtrudeEdge|polyExtrudeFacet|polyExtrudeVertex|polyFlipEdge|polyFlipUV|polyForceUV|polyGeoSampler|polyHelix|polyInfo|polyInstallAction|polyLayoutUV|polyListComponentConversion|polyMapCut|polyMapDel|polyMapSew|polyMapSewMove|polyMergeEdge|polyMergeEdgeCtx|polyMergeFacet|polyMergeFacetCtx|polyMergeUV|polyMergeVertex|polyMirrorFace|polyMoveEdge|polyMoveFacet|polyMoveFacetUV|polyMoveUV|polyMoveVertex|polyNormal|polyNormalPerVertex|polyNormalizeUV|polyOptUvs|polyOptions|polyOutput|polyPipe|polyPlanarProjection|polyPlane|polyPlatonicSolid|polyPoke|polyPrimitive|polyPrism|polyProjection|polyPyramid|polyQuad|polyQueryBlindData|polyReduce|polySelect|polySelectConstraint|polySelectConstraintMonitor|polySelectCtx|polySelectEditCtx|polySeparate|polySetToFaceNormal|polySewEdge|polyShortestPathCtx|polySmooth|polySoftEdge|polySphere|polySphericalProjection|polySplit|polySplitCtx|polySplitEdge|polySplitRing|polySplitVertex|polyStraightenUVBorder|polySubdivideEdge|polySubdivideFacet|polyToSubdiv|polyTorus|polyTransfer|polyTriangulate|polyUVSet|polyUnite|polyWedgeFace|popen|popupMenu|pose|pow|preloadRefEd|print|progressBar|progressWindow|projFileViewer|projectCurve|projectTangent|projectionContext|projectionManip|promptDialog|propModCtx|propMove|psdChannelOutliner|psdEditTextureFile|psdExport|psdTextureFile|putenv|pwd|python|querySubdiv|quit|rad_to_deg|radial|radioButton|radioButtonGrp|radioCollection|radioMenuItemCollection|rampColorPort|rand|randomizeFollicles|randstate|rangeControl|readTake|rebuildCurve|rebuildSurface|recordAttr|recordDevice|redo|reference|referenceEdit|referenceQuery|refineSubdivSelectionList|refresh|refreshAE|registerPluginResource|rehash|reloadImage|removeJoint|removeMultiInstance|removePanelCategory|rename|renameAttr|renameSelectionList|renameUI|render|renderGlobalsNode|renderInfo|renderLayerButton|renderLayerParent|renderLayerPostProcess|renderLayerUnparent|renderManip|renderPartition|renderQualityNode|renderSettings|renderThumbnailUpdate|renderWindowEditor|renderWindowSelectContext|renderer|reorder|reorderDeformers|requires|reroot|resampleFluid|resetAE|resetPfxToPolyCamera|resetTool|resolutionNode|retarget|reverseCurve|reverseSurface|revolve|rgb_to_hsv|rigidBody|rigidSolver|roll|rollCtx|rootOf|rot|rotate|rotationInterpolation|roundConstantRadius|rowColumnLayout|rowLayout|runTimeCommand|runup|sampleImage|saveAllShelves|saveAttrPreset|saveFluid|saveImage|saveInitialState|saveMenu|savePrefObjects|savePrefs|saveShelf|saveToolSettings|scale|scaleBrushBrightness|scaleComponents|scaleConstraint|scaleKey|scaleKeyCtx|sceneEditor|sceneUIReplacement|scmh|scriptCtx|scriptEditorInfo|scriptJob|scriptNode|scriptTable|scriptToShelf|scriptedPanel|scriptedPanelType|scrollField|scrollLayout|sculpt|searchPathArray|seed|selLoadSettings|select|selectContext|selectCurveCV|selectKey|selectKeyCtx|selectKeyframeRegionCtx|selectMode|selectPref|selectPriority|selectType|selectedNodes|selectionConnection|separator|setAttr|setAttrEnumResource|setAttrMapping|setAttrNiceNameResource|setConstraintRestPosition|setDefaultShadingGroup|setDrivenKeyframe|setDynamic|setEditCtx|setEditor|setFluidAttr|setFocus|setInfinity|setInputDeviceMapping|setKeyCtx|setKeyPath|setKeyframe|setKeyframeBlendshapeTargetWts|setMenuMode|setNodeNiceNameResource|setNodeTypeFlag|setParent|setParticleAttr|setPfxToPolyCamera|setPluginResource|setProject|setStampDensity|setStartupMessage|setState|setToolTo|setUITemplate|setXformManip|sets|shadingConnection|shadingGeometryRelCtx|shadingLightRelCtx|shadingNetworkCompare|shadingNode|shapeCompare|shelfButton|shelfLayout|shelfTabLayout|shellField|shortNameOf|showHelp|showHidden|showManipCtx|showSelectionInTitle|showShadingGroupAttrEditor|showWindow|sign|simplify|sin|singleProfileBirailSurface|size|sizeBytes|skinCluster|skinPercent|smoothCurve|smoothTangentSurface|smoothstep|snap2to2|snapKey|snapMode|snapTogetherCtx|snapshot|soft|softMod|softModCtx|sort|sound|soundControl|source|spaceLocator|sphere|sphrand|spotLight|spotLightPreviewPort|spreadSheetEditor|spring|sqrt|squareSurface|srtContext|stackTrace|startString|startsWith|stitchAndExplodeShell|stitchSurface|stitchSurfacePoints|strcmp|stringArrayCatenate|stringArrayContains|stringArrayCount|stringArrayInsertAtIndex|stringArrayIntersector|stringArrayRemove|stringArrayRemoveAtIndex|stringArrayRemoveDuplicates|stringArrayRemoveExact|stringArrayToString|stringToStringArray|strip|stripPrefixFromName|stroke|subdAutoProjection|subdCleanTopology|subdCollapse|subdDuplicateAndConnect|subdEditUV|subdListComponentConversion|subdMapCut|subdMapSewMove|subdMatchTopology|subdMirror|subdToBlind|subdToPoly|subdTransferUVsToCache|subdiv|subdivCrease|subdivDisplaySmoothness|substitute|substituteAllString|substituteGeometry|substring|surface|surfaceSampler|surfaceShaderList|swatchDisplayPort|switchTable|symbolButton|symbolCheckBox|sysFile|system|tabLayout|tan|tangentConstraint|texLatticeDeformContext|texManipContext|texMoveContext|texMoveUVShellContext|texRotateContext|texScaleContext|texSelectContext|texSelectShortestPathCtx|texSmudgeUVContext|texWinToolCtx|text|textCurves|textField|textFieldButtonGrp|textFieldGrp|textManip|textScrollList|textToShelf|textureDisplacePlane|textureHairColor|texturePlacementContext|textureWindow|threadCount|threePointArcCtx|timeControl|timePort|timerX|toNativePath|toggle|toggleAxis|toggleWindowVisibility|tokenize|tokenizeList|tolerance|tolower|toolButton|toolCollection|toolDropped|toolHasOptions|toolPropertyWindow|torus|toupper|trace|track|trackCtx|transferAttributes|transformCompare|transformLimits|translator|trim|trunc|truncateFluidCache|truncateHairCache|tumble|tumbleCtx|turbulence|twoPointArcCtx|uiRes|uiTemplate|unassignInputDevice|undo|undoInfo|ungroup|uniform|unit|unloadPlugin|untangleUV|untitledFileName|untrim|upAxis|updateAE|userCtx|uvLink|uvSnapshot|validateShelfName|vectorize|view2dToolCtx|viewCamera|viewClipPlane|viewFit|viewHeadOn|viewLookAt|viewManip|viewPlace|viewSet|visor|volumeAxis|vortex|waitCursor|warning|webBrowser|webBrowserPrefs|whatIs|window|windowPref|wire|wireContext|workspace|wrinkle|wrinkleContext|writeTake|xbmLangPathList|xform)\b/,operator:[/\+[+=]?|-[-=]?|&&|\|\||[<>]=|[*\/!=]=?|[%^]/,{pattern:/(^|[^<])<(?!<)/,lookbehind:!0},{pattern:/(^|[^>])>(?!>)/,lookbehind:!0}],punctuation:/<<|>>|[.,:;?\[\](){}]/},e.languages.mel.code.inside.rest=e.languages.mel}e.exports=t,t.displayName="mel",t.aliases=[]},23388(e){"use strict";function t(e){e.languages.mizar={comment:/::.+/,keyword:/@proof\b|\b(?:according|aggregate|all|and|antonym|are|as|associativity|assume|asymmetry|attr|be|begin|being|by|canceled|case|cases|clusters?|coherence|commutativity|compatibility|connectedness|consider|consistency|constructors|contradiction|correctness|def|deffunc|define|definitions?|defpred|do|does|equals|end|environ|ex|exactly|existence|for|from|func|given|hence|hereby|holds|idempotence|identity|iff?|implies|involutiveness|irreflexivity|is|it|let|means|mode|non|not|notations?|now|of|or|otherwise|over|per|pred|prefix|projectivity|proof|provided|qua|reconsider|redefine|reduce|reducibility|reflexivity|registrations?|requirements|reserve|sch|schemes?|section|selector|set|sethood|st|struct|such|suppose|symmetry|synonym|take|that|the|then|theorems?|thesis|thus|to|transitivity|uniqueness|vocabular(?:y|ies)|when|where|with|wrt)\b/,parameter:{pattern:/\$(?:10|\d)/,alias:"variable"},variable:/\b\w+(?=:)/,number:/(?:\b|-)\d+\b/,operator:/\.\.\.|->|&|\.?=/,punctuation:/\(#|#\)|[,:;\[\](){}]/}}e.exports=t,t.displayName="mizar",t.aliases=[]},90596(e){"use strict";function t(e){var t,n,r,i;t=e,r=["ObjectId","Code","BinData","DBRef","Timestamp","NumberLong","NumberDecimal","MaxKey","MinKey","RegExp","ISODate","UUID"],i="(?:"+(n=(n=["$eq","$gt","$gte","$in","$lt","$lte","$ne","$nin","$and","$not","$nor","$or","$exists","$type","$expr","$jsonSchema","$mod","$regex","$text","$where","$geoIntersects","$geoWithin","$near","$nearSphere","$all","$elemMatch","$size","$bitsAllClear","$bitsAllSet","$bitsAnyClear","$bitsAnySet","$comment","$elemMatch","$meta","$slice","$currentDate","$inc","$min","$max","$mul","$rename","$set","$setOnInsert","$unset","$addToSet","$pop","$pull","$push","$pullAll","$each","$position","$slice","$sort","$bit","$addFields","$bucket","$bucketAuto","$collStats","$count","$currentOp","$facet","$geoNear","$graphLookup","$group","$indexStats","$limit","$listLocalSessions","$listSessions","$lookup","$match","$merge","$out","$planCacheStats","$project","$redact","$replaceRoot","$replaceWith","$sample","$set","$skip","$sort","$sortByCount","$unionWith","$unset","$unwind","$abs","$accumulator","$acos","$acosh","$add","$addToSet","$allElementsTrue","$and","$anyElementTrue","$arrayElemAt","$arrayToObject","$asin","$asinh","$atan","$atan2","$atanh","$avg","$binarySize","$bsonSize","$ceil","$cmp","$concat","$concatArrays","$cond","$convert","$cos","$dateFromParts","$dateToParts","$dateFromString","$dateToString","$dayOfMonth","$dayOfWeek","$dayOfYear","$degreesToRadians","$divide","$eq","$exp","$filter","$first","$floor","$function","$gt","$gte","$hour","$ifNull","$in","$indexOfArray","$indexOfBytes","$indexOfCP","$isArray","$isNumber","$isoDayOfWeek","$isoWeek","$isoWeekYear","$last","$last","$let","$literal","$ln","$log","$log10","$lt","$lte","$ltrim","$map","$max","$mergeObjects","$meta","$min","$millisecond","$minute","$mod","$month","$multiply","$ne","$not","$objectToArray","$or","$pow","$push","$radiansToDegrees","$range","$reduce","$regexFind","$regexFindAll","$regexMatch","$replaceOne","$replaceAll","$reverseArray","$round","$rtrim","$second","$setDifference","$setEquals","$setIntersection","$setIsSubset","$setUnion","$size","$sin","$slice","$split","$sqrt","$stdDevPop","$stdDevSamp","$strcasecmp","$strLenBytes","$strLenCP","$substr","$substrBytes","$substrCP","$subtract","$sum","$switch","$tan","$toBool","$toDate","$toDecimal","$toDouble","$toInt","$toLong","$toObjectId","$toString","$toLower","$toUpper","$trim","$trunc","$type","$week","$year","$zip","$comment","$explain","$hint","$max","$maxTimeMS","$min","$orderby","$query","$returnKey","$showDiskLoc","$natural"]).map(function(e){return e.replace("$","\\$")})).join("|")+")\\b",t.languages.mongodb=t.languages.extend("javascript",{}),t.languages.insertBefore("mongodb","string",{property:{pattern:/(?:(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)(?=\s*:)/,greedy:!0,inside:{keyword:RegExp("^(['\"])?"+i+"(?:\\1)?$")}}}),t.languages.mongodb.string.inside={url:{pattern:/https?:\/\/[-\w@:%.+~#=]{1,256}\.[a-z0-9()]{1,6}\b[-\w()@:%+.~#?&/=]*/i,greedy:!0},entity:{pattern:/\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b/,greedy:!0}},t.languages.insertBefore("mongodb","constant",{builtin:{pattern:RegExp("\\b(?:"+r.join("|")+")\\b"),alias:"keyword"}})}e.exports=t,t.displayName="mongodb",t.aliases=[]},95721(e){"use strict";function t(e){e.languages.monkey={string:/"[^"\r\n]*"/,comment:[{pattern:/^#Rem\s[\s\S]*?^#End/im,greedy:!0},{pattern:/'.+/,greedy:!0}],preprocessor:{pattern:/(^[ \t]*)#.+/m,lookbehind:!0,alias:"comment"},function:/\b\w+(?=\()/,"type-char":{pattern:/(\w)[?%#$]/,lookbehind:!0,alias:"variable"},number:{pattern:/((?:\.\.)?)(?:(?:\b|\B-\.?|\B\.)\d+(?:(?!\.\.)\.\d*)?|\$[\da-f]+)/i,lookbehind:!0},keyword:/\b(?:Void|Strict|Public|Private|Property|Bool|Int|Float|String|Array|Object|Continue|Exit|Import|Extern|New|Self|Super|Try|Catch|Eachin|True|False|Extends|Abstract|Final|Select|Case|Default|Const|Local|Global|Field|Method|Function|Class|End|If|Then|Else|ElseIf|EndIf|While|Wend|Repeat|Until|Forever|For|To|Step|Next|Return|Module|Interface|Implements|Inline|Throw|Null)\b/i,operator:/\.\.|<[=>]?|>=?|:?=|(?:[+\-*\/&~|]|\b(?:Mod|Shl|Shr)\b)=?|\b(?:And|Not|Or)\b/i,punctuation:/[.,:;()\[\]]/}}e.exports=t,t.displayName="monkey",t.aliases=[]},64262(e){"use strict";function t(e){e.languages.moonscript={comment:/--.*/,string:[{pattern:/'[^']*'|\[(=*)\[[\s\S]*?\]\1\]/,greedy:!0},{pattern:/"[^"]*"/,greedy:!0,inside:{interpolation:{pattern:/#\{[^{}]*\}/,inside:{moonscript:{pattern:/(^#\{)[\s\S]+(?=\})/,lookbehind:!0,inside:null},"interpolation-punctuation":{pattern:/#\{|\}/,alias:"punctuation"}}}}}],"class-name":[{pattern:/(\b(?:class|extends)[ \t]+)\w+/,lookbehind:!0},/\b[A-Z]\w*/],keyword:/\b(?:class|continue|do|else|elseif|export|extends|for|from|if|import|in|local|nil|return|self|super|switch|then|unless|using|when|while|with)\b/,variable:/@@?\w*/,property:{pattern:/\b(?!\d)\w+(?=:)|(:)(?!\d)\w+/,lookbehind:!0},function:{pattern:/\b(?:_G|_VERSION|assert|collectgarbage|coroutine\.(?:running|create|resume|status|wrap|yield)|debug\.(?:debug|gethook|getinfo|getlocal|getupvalue|setlocal|setupvalue|sethook|traceback|getfenv|getmetatable|getregistry|setfenv|setmetatable)|dofile|error|getfenv|getmetatable|io\.(?:stdin|stdout|stderr|close|flush|input|lines|open|output|popen|read|tmpfile|type|write)|ipairs|load|loadfile|loadstring|math\.(?:abs|acos|asin|atan|atan2|ceil|sin|cos|tan|deg|exp|floor|log|log10|max|min|fmod|modf|cosh|sinh|tanh|pow|rad|sqrt|frexp|ldexp|random|randomseed|pi)|module|next|os\.(?:clock|date|difftime|execute|exit|getenv|remove|rename|setlocale|time|tmpname)|package\.(?:cpath|loaded|loadlib|path|preload|seeall)|pairs|pcall|print|rawequal|rawget|rawset|require|select|setfenv|setmetatable|string\.(?:byte|char|dump|find|len|lower|rep|sub|upper|format|gsub|gmatch|match|reverse)|table\.(?:maxn|concat|sort|insert|remove)|tonumber|tostring|type|unpack|xpcall)\b/,inside:{punctuation:/\./}},boolean:/\b(?:false|true)\b/,number:/(?:\B\.\d+|\b\d+\.\d+|\b\d+(?=[eE]))(?:[eE][-+]?\d+)?\b|\b(?:0x[a-fA-F\d]+|\d+)(?:U?LL)?\b/,operator:/\.{3}|[-=]>|~=|(?:[-+*/%<>!=]|\.\.)=?|[:#^]|\b(?:and|or)\b=?|\b(?:not)\b/,punctuation:/[.,()[\]{}\\]/},e.languages.moonscript.string[1].inside.interpolation.inside.moonscript.inside=e.languages.moonscript,e.languages.moon=e.languages.moonscript}e.exports=t,t.displayName="moonscript",t.aliases=["moon"]},18190(e){"use strict";function t(e){e.languages.n1ql={comment:/\/\*[\s\S]*?(?:$|\*\/)/,parameter:/\$[\w.]+/,string:{pattern:/(["'])(?:\\[\s\S]|(?!\1)[^\\]|\1\1)*\1/,greedy:!0},identifier:{pattern:/`(?:\\[\s\S]|[^\\`]|``)*`/,greedy:!0},function:/\b(?:ABS|ACOS|ARRAY_AGG|ARRAY_APPEND|ARRAY_AVG|ARRAY_CONCAT|ARRAY_CONTAINS|ARRAY_COUNT|ARRAY_DISTINCT|ARRAY_FLATTEN|ARRAY_IFNULL|ARRAY_INSERT|ARRAY_INTERSECT|ARRAY_LENGTH|ARRAY_MAX|ARRAY_MIN|ARRAY_POSITION|ARRAY_PREPEND|ARRAY_PUT|ARRAY_RANGE|ARRAY_REMOVE|ARRAY_REPEAT|ARRAY_REPLACE|ARRAY_REVERSE|ARRAY_SORT|ARRAY_STAR|ARRAY_SUM|ARRAY_SYMDIFF|ARRAY_SYMDIFFN|ARRAY_UNION|ASIN|ATAN|ATAN2|AVG|BASE64|BASE64_DECODE|BASE64_ENCODE|BITAND|BITCLEAR|BITNOT|BITOR|BITSET|BITSHIFT|BITTEST|BITXOR|CEIL|CLOCK_LOCAL|CLOCK_MILLIS|CLOCK_STR|CLOCK_TZ|CLOCK_UTC|CONTAINS|CONTAINS_TOKEN|CONTAINS_TOKEN_LIKE|CONTAINS_TOKEN_REGEXP|COS|COUNT|CURL|DATE_ADD_MILLIS|DATE_ADD_STR|DATE_DIFF_MILLIS|DATE_DIFF_STR|DATE_FORMAT_STR|DATE_PART_MILLIS|DATE_PART_STR|DATE_RANGE_MILLIS|DATE_RANGE_STR|DATE_TRUNC_MILLIS|DATE_TRUNC_STR|DECODE_JSON|DEGREES|DURATION_TO_STR|E|ENCODED_SIZE|ENCODE_JSON|EXP|FLOOR|GREATEST|HAS_TOKEN|IFINF|IFMISSING|IFMISSINGORNULL|IFNAN|IFNANORINF|IFNULL|INITCAP|ISARRAY|ISATOM|ISBOOLEAN|ISNUMBER|ISOBJECT|ISSTRING|IsBitSET|LEAST|LENGTH|LN|LOG|LOWER|LTRIM|MAX|META|MILLIS|MILLIS_TO_LOCAL|MILLIS_TO_STR|MILLIS_TO_TZ|MILLIS_TO_UTC|MILLIS_TO_ZONE_NAME|MIN|MISSINGIF|NANIF|NEGINFIF|NOW_LOCAL|NOW_MILLIS|NOW_STR|NOW_TZ|NOW_UTC|NULLIF|OBJECT_ADD|OBJECT_CONCAT|OBJECT_INNER_PAIRS|OBJECT_INNER_VALUES|OBJECT_LENGTH|OBJECT_NAMES|OBJECT_PAIRS|OBJECT_PUT|OBJECT_REMOVE|OBJECT_RENAME|OBJECT_REPLACE|OBJECT_UNWRAP|OBJECT_VALUES|PAIRS|PI|POLY_LENGTH|POSINFIF|POSITION|POWER|RADIANS|RANDOM|REGEXP_CONTAINS|REGEXP_LIKE|REGEXP_POSITION|REGEXP_REPLACE|REPEAT|REPLACE|REVERSE|ROUND|RTRIM|SIGN|SIN|SPLIT|SQRT|STR_TO_DURATION|STR_TO_MILLIS|STR_TO_TZ|STR_TO_UTC|STR_TO_ZONE_NAME|SUBSTR|SUFFIXES|SUM|TAN|TITLE|TOARRAY|TOATOM|TOBOOLEAN|TOKENS|TONUMBER|TOOBJECT|TOSTRING|TRIM|TRUNC|TYPE|UPPER|WEEKDAY_MILLIS|WEEKDAY_STR)(?=\s*\()/i,keyword:/\b(?:ALL|ALTER|ANALYZE|AS|ASC|BEGIN|BINARY|BOOLEAN|BREAK|BUCKET|BUILD|BY|CALL|CAST|CLUSTER|COLLATE|COLLECTION|COMMIT|CONNECT|CONTINUE|CORRELATE|COVER|CREATE|DATABASE|DATASET|DATASTORE|DECLARE|DECREMENT|DELETE|DERIVED|DESC|DESCRIBE|DISTINCT|DO|DROP|EACH|ELEMENT|EXCEPT|EXCLUDE|EXECUTE|EXPLAIN|FETCH|FLATTEN|FOR|FORCE|FROM|FUNCTION|GRANT|GROUP|GSI|HAVING|IF|IGNORE|ILIKE|INCLUDE|INCREMENT|INDEX|INFER|INLINE|INNER|INSERT|INTERSECT|INTO|IS|JOIN|KEY|KEYS|KEYSPACE|KNOWN|LAST|LEFT|LET|LETTING|LIMIT|LSM|MAP|MAPPING|MATCHED|MATERIALIZED|MERGE|MINUS|MISSING|NAMESPACE|NEST|NULL|NUMBER|OBJECT|OFFSET|ON|OPTION|ORDER|OUTER|OVER|PARSE|PARTITION|PASSWORD|PATH|POOL|PREPARE|PRIMARY|PRIVATE|PRIVILEGE|PROCEDURE|PUBLIC|RAW|REALM|REDUCE|RENAME|RETURN|RETURNING|REVOKE|RIGHT|ROLE|ROLLBACK|SATISFIES|SCHEMA|SELECT|SELF|SEMI|SET|SHOW|SOME|START|STATISTICS|STRING|SYSTEM|TO|TRANSACTION|TRIGGER|TRUNCATE|UNDER|UNION|UNIQUE|UNKNOWN|UNNEST|UNSET|UPDATE|UPSERT|USE|USER|USING|VALIDATE|VALUE|VALUES|VIA|VIEW|WHERE|WHILE|WITH|WORK|XOR)\b/i,boolean:/\b(?:TRUE|FALSE)\b/i,number:/(?:\b\d+\.|\B\.)\d+e[+\-]?\d+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/%]|!=|==?|\|\||<[>=]?|>=?|\b(?:AND|ANY|ARRAY|BETWEEN|CASE|ELSE|END|EVERY|EXISTS|FIRST|IN|LIKE|NOT|OR|THEN|VALUED|WHEN|WITHIN)\b/i,punctuation:/[;[\](),.{}:]/}}e.exports=t,t.displayName="n1ql",t.aliases=[]},70896(e){"use strict";function t(e){e.languages.n4js=e.languages.extend("javascript",{keyword:/\b(?:any|Array|boolean|break|case|catch|class|const|constructor|continue|debugger|declare|default|delete|do|else|enum|export|extends|false|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|module|new|null|number|package|private|protected|public|return|set|static|string|super|switch|this|throw|true|try|typeof|var|void|while|with|yield)\b/}),e.languages.insertBefore("n4js","constant",{annotation:{pattern:/@+\w+/,alias:"operator"}}),e.languages.n4jsd=e.languages.n4js}e.exports=t,t.displayName="n4js",t.aliases=["n4jsd"]},42242(e){"use strict";function t(e){e.languages["nand2tetris-hdl"]={comment:/\/\/.*|\/\*[\s\S]*?(?:\*\/|$)/,keyword:/\b(?:CHIP|IN|OUT|PARTS|BUILTIN|CLOCKED)\b/,boolean:/\b(?:true|false)\b/,function:/\b[A-Za-z][A-Za-z0-9]*(?=\()/,number:/\b\d+\b/,operator:/=|\.\./,punctuation:/[{}[\];(),:]/}}e.exports=t,t.displayName="nand2tetrisHdl",t.aliases=[]},37943(e){"use strict";function t(e){!function(e){var t=/\{[^\r\n\[\]{}]*\}/,n={"quoted-string":{pattern:/"(?:[^"\\]|\\.)*"/,alias:"operator"},"command-param-id":{pattern:/(\s)\w+:/,lookbehind:!0,alias:"property"},"command-param-value":[{pattern:t,alias:"selector"},{pattern:/([\t ])\S+/,lookbehind:!0,greedy:!0,alias:"operator"},{pattern:/\S(?:.*\S)?/,alias:"operator"}]};function r(e){for(var t="[]{}",n=[],r=0;r.+/m,alias:"tag",inside:{value:{pattern:/(^>\w+[\t ]+)(?!\s)[^{}\r\n]+/,lookbehind:!0,alias:"operator"},key:{pattern:/(^>)\w+/,lookbehind:!0}}},label:{pattern:/^([\t ]*)#[\t ]*\w+[\t ]*$/m,lookbehind:!0,alias:"regex"},command:{pattern:/^([\t ]*)@\w+(?=[\t ]|$).*/m,lookbehind:!0,alias:"function",inside:{"command-name":/^@\w+/,expression:{pattern:t,greedy:!0,alias:"selector"},"command-params":{pattern:/\s*\S[\s\S]*/,inside:n}}},"generic-text":{pattern:/(^[ \t]*)[^#@>;\s].*/m,lookbehind:!0,alias:"punctuation",inside:{"escaped-char":/\\[{}\[\]"]/,expression:{pattern:t,greedy:!0,alias:"selector"},"inline-command":{pattern:/\[[\t ]*\w[^\r\n\[\]]*\]/,greedy:!0,alias:"function",inside:{"command-params":{pattern:/(^\[[\t ]*\w+\b)[\s\S]+(?=\]$)/,lookbehind:!0,inside:n},"command-param-name":{pattern:/^(\[[\t ]*)\w+/,lookbehind:!0,alias:"name"},"start-stop-char":/[\[\]]/}}}}},e.languages.nani=e.languages.naniscript,e.hooks.add("after-tokenize",function(e){e.tokens.forEach(function(e){if("string"!=typeof e&&"generic-text"===e.type){var t=i(e);r(t)||(e.type="bad-line",e.content=t)}})})}(e)}e.exports=t,t.displayName="naniscript",t.aliases=[]},293(e){"use strict";function t(e){e.languages.nasm={comment:/;.*$/m,string:/(["'`])(?:\\.|(?!\1)[^\\\r\n])*\1/,label:{pattern:/(^\s*)[A-Za-z._?$][\w.?$@~#]*:/m,lookbehind:!0,alias:"function"},keyword:[/\[?BITS (?:16|32|64)\]?/,{pattern:/(^\s*)section\s*[a-z.]+:?/im,lookbehind:!0},/(?:extern|global)[^;\r\n]*/i,/(?:CPU|FLOAT|DEFAULT).*$/m],register:{pattern:/\b(?:st\d|[xyz]mm\d\d?|[cdt]r\d|r\d\d?[bwd]?|[er]?[abcd]x|[abcd][hl]|[er]?(?:bp|sp|si|di)|[cdefgs]s)\b/i,alias:"variable"},number:/(?:\b|(?=\$))(?:0[hx](?:\.[\da-f]+|[\da-f]+(?:\.[\da-f]+)?)(?:p[+-]?\d+)?|\d[\da-f]+[hx]|\$\d[\da-f]*|0[oq][0-7]+|[0-7]+[oq]|0[by][01]+|[01]+[by]|0[dt]\d+|(?:\d+(?:\.\d+)?|\.\d+)(?:\.?e[+-]?\d+)?[dt]?)\b/i,operator:/[\[\]*+\-\/%<>=&|$!]/}}e.exports=t,t.displayName="nasm",t.aliases=[]},83873(e){"use strict";function t(e){e.languages.neon={comment:{pattern:/#.*/,greedy:!0},datetime:{pattern:/(^|[[{(=:,\s])\d\d\d\d-\d\d?-\d\d?(?:(?:[Tt]| +)\d\d?:\d\d:\d\d(?:\.\d*)? *(?:Z|[-+]\d\d?(?::?\d\d)?)?)?(?=$|[\]}),\s])/,lookbehind:!0,alias:"number"},key:{pattern:/(^|[[{(,\s])[^,:=[\]{}()'"\s]+(?=\s*:(?:$|[\]}),\s])|\s*=)/,lookbehind:!0,alias:"atrule"},number:{pattern:/(^|[[{(=:,\s])[+-]?(?:0x[\da-fA-F]+|0o[0-7]+|0b[01]+|(?:\d+(?:\.\d*)?|\.?\d+)(?:[eE][+-]?\d+)?)(?=$|[\]}),:=\s])/,lookbehind:!0},boolean:{pattern:/(^|[[{(=:,\s])(?:true|false|yes|no)(?=$|[\]}),:=\s])/i,lookbehind:!0},null:{pattern:/(^|[[{(=:,\s])(?:null)(?=$|[\]}),:=\s])/i,lookbehind:!0,alias:"keyword"},string:{pattern:/(^|[[{(=:,\s])(?:('''|""")\r?\n(?:(?:[^\r\n]|\r?\n(?![\t ]*\2))*\r?\n)?[\t ]*\2|'[^'\r\n]*'|"(?:\\.|[^\\"\r\n])*")/,lookbehind:!0,greedy:!0},literal:{pattern:/(^|[[{(=:,\s])(?:[^#"',:=[\]{}()\s`-]|[:-][^"',=[\]{}()\s])(?:[^,:=\]})(\s]|:(?![\s,\]})]|$)|[ \t]+[^#,:=\]})(\s])*/,lookbehind:!0,alias:"string"},punctuation:/[,:=[\]{}()-]/}}e.exports=t,t.displayName="neon",t.aliases=[]},75932(e){"use strict";function t(e){e.languages.nevod={comment:/\/\/.*|(?:\/\*[\s\S]*?(?:\*\/|$))/,string:{pattern:/(?:"(?:""|[^"])*"(?!")|'(?:''|[^'])*'(?!'))!?\*?/,greedy:!0,inside:{"string-attrs":/!$|!\*$|\*$/}},namespace:{pattern:/(@namespace\s+)[a-zA-Z0-9\-.]+(?=\s*\{)/,lookbehind:!0},pattern:{pattern:/(@pattern\s+)?#?[a-zA-Z0-9\-.]+(?:\s*\(\s*(?:~\s*)?[a-zA-Z0-9\-.]+\s*(?:,\s*(?:~\s*)?[a-zA-Z0-9\-.]*)*\))?(?=\s*=)/,lookbehind:!0,inside:{"pattern-name":{pattern:/^#?[a-zA-Z0-9\-.]+/,alias:"class-name"},fields:{pattern:/\(.*\)/,inside:{"field-name":{pattern:/[a-zA-Z0-9\-.]+/,alias:"variable"},punctuation:/[,()]/,operator:{pattern:/~/,alias:"field-hidden-mark"}}}}},search:{pattern:/(@search\s+|#)[a-zA-Z0-9\-.]+(?:\.\*)?(?=\s*;)/,alias:"function",lookbehind:!0},keyword:/@(?:require|namespace|pattern|search|inside|outside|having|where)\b/,"standard-pattern":{pattern:/\b(?:Word|Punct|Symbol|Space|LineBreak|Start|End|Alpha|AlphaNum|Num|NumAlpha|Blank|WordBreak|Any)(?:\([a-zA-Z0-9\-.,\s+]*\))?/,inside:{"standard-pattern-name":{pattern:/^[a-zA-Z0-9\-.]+/,alias:"builtin"},quantifier:{pattern:/\b\d+(?:\s*\+|\s*-\s*\d+)?(?!\w)/,alias:"number"},"standard-pattern-attr":{pattern:/[a-zA-Z0-9\-.]+/,alias:"builtin"},punctuation:/[,()]/}},quantifier:{pattern:/\b\d+(?:\s*\+|\s*-\s*\d+)?(?!\w)/,alias:"number"},operator:[{pattern:/=/,alias:"pattern-def"},{pattern:/&/,alias:"conjunction"},{pattern:/~/,alias:"exception"},{pattern:/\?/,alias:"optionality"},{pattern:/[[\]]/,alias:"repetition"},{pattern:/[{}]/,alias:"variation"},{pattern:/[+_]/,alias:"sequence"},{pattern:/\.{2,3}/,alias:"span"}],"field-capture":[{pattern:/([a-zA-Z0-9\-.]+\s*\()\s*[a-zA-Z0-9\-.]+\s*:\s*[a-zA-Z0-9\-.]+(?:\s*,\s*[a-zA-Z0-9\-.]+\s*:\s*[a-zA-Z0-9\-.]+)*(?=\s*\))/,lookbehind:!0,inside:{"field-name":{pattern:/[a-zA-Z0-9\-.]+/,alias:"variable"},colon:/:/}},{pattern:/[a-zA-Z0-9\-.]+\s*:/,inside:{"field-name":{pattern:/[a-zA-Z0-9\-.]+/,alias:"variable"},colon:/:/}}],punctuation:/[:;,()]/,name:/[a-zA-Z0-9\-.]+/}}e.exports=t,t.displayName="nevod",t.aliases=[]},60221(e){"use strict";function t(e){var t,n;n=/\$(?:\w[a-z\d]*(?:_[^\x00-\x1F\s"'\\()$]*)?|\{[^}\s"'\\]+\})/i,(t=e).languages.nginx={comment:{pattern:/(^|[\s{};])#.*/,lookbehind:!0},directive:{pattern:/(^|\s)\w(?:[^;{}"'\\\s]|\\.|"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*'|\s+(?:#.*(?!.)|(?![#\s])))*?(?=\s*[;{])/,lookbehind:!0,greedy:!0,inside:{string:{pattern:/((?:^|[^\\])(?:\\\\)*)(?:"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*')/,lookbehind:!0,inside:{escape:{pattern:/\\["'\\nrt]/,alias:"entity"},variable:n}},comment:{pattern:/(\s)#.*/,lookbehind:!0,greedy:!0},keyword:{pattern:/^\S+/,greedy:!0},boolean:{pattern:/(\s)(?:off|on)(?!\S)/,lookbehind:!0},number:{pattern:/(\s)\d+[a-z]*(?!\S)/i,lookbehind:!0},variable:n}},punctuation:/[{};]/}}e.exports=t,t.displayName="nginx",t.aliases=[]},44188(e){"use strict";function t(e){e.languages.nim={comment:/#.*/,string:{pattern:/(?:(?:\b(?!\d)(?:\w|\\x[8-9a-fA-F][0-9a-fA-F])+)?(?:"""[\s\S]*?"""(?!")|"(?:\\[\s\S]|""|[^"\\])*")|'(?:\\(?:\d+|x[\da-fA-F]{2}|.)|[^'])')/,greedy:!0},number:/\b(?:0[xXoObB][\da-fA-F_]+|\d[\d_]*(?:(?!\.\.)\.[\d_]*)?(?:[eE][+-]?\d[\d_]*)?)(?:'?[iuf]\d*)?/,keyword:/\b(?:addr|as|asm|atomic|bind|block|break|case|cast|concept|const|continue|converter|defer|discard|distinct|do|elif|else|end|enum|except|export|finally|for|from|func|generic|if|import|include|interface|iterator|let|macro|method|mixin|nil|object|out|proc|ptr|raise|ref|return|static|template|try|tuple|type|using|var|when|while|with|without|yield)\b/,function:{pattern:/(?:(?!\d)(?:\w|\\x[8-9a-fA-F][0-9a-fA-F])+|`[^`\r\n]+`)\*?(?:\[[^\]]+\])?(?=\s*\()/,inside:{operator:/\*$/}},ignore:{pattern:/`[^`\r\n]+`/,inside:{punctuation:/`/}},operator:{pattern:/(^|[({\[](?=\.\.)|(?![({\[]\.).)(?:(?:[=+\-*\/<>@$~&%|!?^:\\]|\.\.|\.(?![)}\]]))+|\b(?:and|div|of|or|in|is|isnot|mod|not|notin|shl|shr|xor)\b)/m,lookbehind:!0},punctuation:/[({\[]\.|\.[)}\]]|[`(){}\[\],:]/}}e.exports=t,t.displayName="nim",t.aliases=[]},74426(e){"use strict";function t(e){e.languages.nix={comment:/\/\*[\s\S]*?\*\/|#.*/,string:{pattern:/"(?:[^"\\]|\\[\s\S])*"|''(?:(?!'')[\s\S]|''(?:'|\\|\$\{))*''/,greedy:!0,inside:{interpolation:{pattern:/(^|(?:^|(?!'').)[^\\])\$\{(?:[^{}]|\{[^}]*\})*\}/,lookbehind:!0,inside:{antiquotation:{pattern:/^\$(?=\{)/,alias:"variable"}}}}},url:[/\b(?:[a-z]{3,7}:\/\/)[\w\-+%~\/.:#=?&]+/,{pattern:/([^\/])(?:[\w\-+%~.:#=?&]*(?!\/\/)[\w\-+%~\/.:#=?&])?(?!\/\/)\/[\w\-+%~\/.:#=?&]*/,lookbehind:!0}],antiquotation:{pattern:/\$(?=\{)/,alias:"variable"},number:/\b\d+\b/,keyword:/\b(?:assert|builtins|else|if|in|inherit|let|null|or|then|with)\b/,function:/\b(?:abort|add|all|any|attrNames|attrValues|baseNameOf|compareVersions|concatLists|currentSystem|deepSeq|derivation|dirOf|div|elem(?:At)?|fetch(?:url|Tarball)|filter(?:Source)?|fromJSON|genList|getAttr|getEnv|hasAttr|hashString|head|import|intersectAttrs|is(?:Attrs|Bool|Function|Int|List|Null|String)|length|lessThan|listToAttrs|map|mul|parseDrvName|pathExists|read(?:Dir|File)|removeAttrs|replaceStrings|seq|sort|stringLength|sub(?:string)?|tail|throw|to(?:File|JSON|Path|String|XML)|trace|typeOf)\b|\bfoldl'\B/,boolean:/\b(?:true|false)\b/,operator:/[=!<>]=?|\+\+?|\|\||&&|\/\/|->?|[?@]/,punctuation:/[{}()[\].,:;]/},e.languages.nix.string.inside.interpolation.inside.rest=e.languages.nix}e.exports=t,t.displayName="nix",t.aliases=[]},88447(e){"use strict";function t(e){e.languages.nsis={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|[#;].*)/,lookbehind:!0},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:{pattern:/(^[\t ]*)(?:Abort|Add(?:BrandingImage|Size)|AdvSplash|Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|Banner|BG(?:Font|Gradient|Image)|BrandingText|BringToFront|Call(?:InstDLL)?|Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|ComponentText|CopyFiles|CRCCheck|Create(?:Directory|Font|ShortCut)|Delete(?:INISec|INIStr|RegKey|RegValue)?|Detail(?:Print|sButtonText)|Dialer|Dir(?:Text|Var|Verify)|EnableWindow|Enum(?:RegKey|RegValue)|Exch|Exec(?:Shell(?:Wait)?|Wait)?|ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|Read|ReadByte|ReadUTF16LE|ReadWord|WriteUTF16LE|Seek|Write|WriteByte|WriteWord)?|Find(?:Close|First|Next|Window)|FlushINI|Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|ErrorLevel|FileTime(?:Local)?|FullPathName|Function(?:Address|End)?|InstDirError|LabelAddress|TempFileName)|Goto|HideWindow|Icon|If(?:Abort|Errors|FileExists|RebootFlag|Silent)|InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|InstProgressFlags|Inst(?:Type(?:GetText|SetText)?)|Int(?:64|Ptr)?CmpU?|Int(?:64)?Fmt|Int(?:Ptr)?Op|IsWindow|Lang(?:DLL|String)|License(?:BkColor|Data|ForceSelection|LangString|Text)|LoadLanguageFile|LockWindow|Log(?:Set|Text)|Manifest(?:DPIAware|SupportedOS)|Math|MessageBox|MiscButtonText|Name|Nop|ns(?:Dialogs|Exec)|NSISdl|OutFile|Page(?:Callbacks)?|PE(?:DllCharacteristics|SubsysVer)|Pop|Push|Quit|Read(?:EnvStr|INIStr|RegDWORD|RegStr)|Reboot|RegDLL|Rename|RequestExecutionLevel|ReserveFile|Return|RMDir|SearchPath|Section(?:End|GetFlags|GetInstTypes|GetSize|GetText|Group|In|SetFlags|SetInstTypes|SetSize|SetText)?|SendMessage|Set(?:AutoClose|BrandingImage|Compress|Compressor(?:DictSize)?|CtlColors|CurInstType|DatablockOptimize|DateSave|Details(?:Print|View)|ErrorLevel|Errors|FileAttributes|Font|OutPath|Overwrite|PluginUnload|RebootFlag|RegView|ShellVarContext|Silent)|Show(?:InstDetails|UninstDetails|Window)|Silent(?:Install|UnInstall)|Sleep|SpaceTexts|Splash|StartMenu|Str(?:CmpS?|Cpy|Len)|SubCaption|System|Unicode|Uninstall(?:ButtonText|Caption|Icon|SubCaption|Text)|UninstPage|UnRegDLL|UserInfo|Var|VI(?:AddVersionKey|FileVersion|ProductVersion)|VPatch|WindowIcon|Write(?:INIStr|Reg(?:Bin|DWORD|ExpandStr|MultiStr|None|Str)|Uninstaller)|XPStyle)\b/m,lookbehind:!0},property:/\b(?:admin|all|auto|both|colored|false|force|hide|highest|lastused|leave|listonly|none|normal|notset|off|on|open|print|show|silent|silentlog|smooth|textonly|true|user|ARCHIVE|FILE_(?:ATTRIBUTE_ARCHIVE|ATTRIBUTE_NORMAL|ATTRIBUTE_OFFLINE|ATTRIBUTE_READONLY|ATTRIBUTE_SYSTEM|ATTRIBUTE_TEMPORARY)|HK(?:(?:CR|CU|LM)(?:32|64)?|DD|PD|U)|HKEY_(?:CLASSES_ROOT|CURRENT_CONFIG|CURRENT_USER|DYN_DATA|LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|MB_(?:ABORTRETRYIGNORE|DEFBUTTON1|DEFBUTTON2|DEFBUTTON3|DEFBUTTON4|ICONEXCLAMATION|ICONINFORMATION|ICONQUESTION|ICONSTOP|OK|OKCANCEL|RETRYCANCEL|RIGHT|RTLREADING|SETFOREGROUND|TOPMOST|USERICON|YESNO)|NORMAL|OFFLINE|READONLY|SHCTX|SHELL_CONTEXT|SYSTEM|TEMPORARY)\b/,constant:/\$\{[\w\.:\^-]+\}|\$\([\w\.:\^-]+\)/i,variable:/\$\w+/i,number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/--?|\+\+?|<=?|>=?|==?=?|&&?|\|\|?|[?*\/~^%]/,punctuation:/[{}[\];(),.:]/,important:{pattern:/(^[\t ]*)!(?:addincludedir|addplugindir|appendfile|cd|define|delfile|echo|else|endif|error|execute|finalize|getdllversion|gettlbversion|ifdef|ifmacrodef|ifmacrondef|ifndef|if|include|insertmacro|macroend|macro|makensis|packhdr|pragma|searchparse|searchreplace|system|tempfile|undef|verbose|warning)\b/im,lookbehind:!0}}}e.exports=t,t.displayName="nsis",t.aliases=[]},16032(e,t,n){"use strict";var r=n(65806);function i(e){e.register(r),e.languages.objectivec=e.languages.extend("c",{string:/("|')(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1|@"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"/,keyword:/\b(?:asm|typeof|inline|auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while|in|self|super)\b|(?:@interface|@end|@implementation|@protocol|@class|@public|@protected|@private|@property|@try|@catch|@finally|@throw|@synthesize|@dynamic|@selector)\b/,operator:/-[->]?|\+\+?|!=?|<>?=?|==?|&&?|\|\|?|[~^%?*\/@]/}),delete e.languages.objectivec["class-name"],e.languages.objc=e.languages.objectivec}e.exports=i,i.displayName="objectivec",i.aliases=["objc"]},33607(e){"use strict";function t(e){e.languages.ocaml={comment:/\(\*[\s\S]*?\*\)/,string:[{pattern:/"(?:\\.|[^\\\r\n"])*"/,greedy:!0},{pattern:/(['`])(?:\\(?:\d+|x[\da-f]+|.)|(?!\1)[^\\\r\n])\1/i,greedy:!0}],number:/\b(?:0x[\da-f][\da-f_]+|(?:0[bo])?\d[\d_]*(?:\.[\d_]*)?(?:e[+-]?[\d_]+)?)/i,directive:{pattern:/\B#\w+/,alias:"important"},label:{pattern:/\B~\w+/,alias:"function"},"type-variable":{pattern:/\B'\w+/,alias:"function"},variant:{pattern:/`\w+/,alias:"variable"},module:{pattern:/\b[A-Z]\w+/,alias:"variable"},keyword:/\b(?:as|assert|begin|class|constraint|do|done|downto|else|end|exception|external|for|fun|function|functor|if|in|include|inherit|initializer|lazy|let|match|method|module|mutable|new|nonrec|object|of|open|private|rec|sig|struct|then|to|try|type|val|value|virtual|when|where|while|with)\b/,boolean:/\b(?:false|true)\b/,operator:/:=|[=<>@^|&+\-*\/$%!?~][!$%&*+\-.\/:<=>?@^|~]*|\b(?:and|asr|land|lor|lsl|lsr|lxor|mod|or)\b/,punctuation:/[(){}\[\]|.,:;]|\b_\b/}}e.exports=t,t.displayName="ocaml",t.aliases=[]},22001(e,t,n){"use strict";var r=n(65806);function i(e){var t,n;e.register(r),(t=e).languages.opencl=t.languages.extend("c",{keyword:/\b(?:__attribute__|(?:__)?(?:constant|global|kernel|local|private|read_only|read_write|write_only)|auto|break|case|complex|const|continue|default|do|(?:float|double)(?:16(?:x(?:1|16|2|4|8))?|1x(?:1|16|2|4|8)|2(?:x(?:1|16|2|4|8))?|3|4(?:x(?:1|16|2|4|8))?|8(?:x(?:1|16|2|4|8))?)?|else|enum|extern|for|goto|(?:u?(?:char|short|int|long)|half|quad|bool)(?:2|3|4|8|16)?|if|imaginary|inline|packed|pipe|register|restrict|return|signed|sizeof|static|struct|switch|typedef|uniform|union|unsigned|void|volatile|while)\b/,number:/(?:\b0x(?:[\da-f]+(?:\.[\da-f]*)?|\.[\da-f]+)(?:p[+-]?\d+)?|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?)[fuhl]{0,4}/i,boolean:/\b(?:false|true)\b/,"constant-opencl-kernel":{pattern:/\b(?:CHAR_(?:BIT|MAX|MIN)|CLK_(?:ADDRESS_(?:CLAMP(?:_TO_EDGE)?|NONE|REPEAT)|FILTER_(?:LINEAR|NEAREST)|(?:LOCAL|GLOBAL)_MEM_FENCE|NORMALIZED_COORDS_(?:FALSE|TRUE))|CL_(?:BGRA|(?:HALF_)?FLOAT|INTENSITY|LUMINANCE|A?R?G?B?[Ax]?|(?:(?:UN)?SIGNED|[US]NORM)_(?:INT(?:8|16|32))|UNORM_(?:INT_101010|SHORT_(?:555|565)))|(?:DBL|FLT|HALF)_(?:DIG|EPSILON|MANT_DIG|(?:MIN|MAX)(?:(?:_10)?_EXP)?)|FLT_RADIX|HUGE_VALF?|INFINITY|(?:INT|LONG|SCHAR|SHRT)_(?:MAX|MIN)|(?:UCHAR|USHRT|UINT|ULONG)_MAX|MAXFLOAT|M_(?:[12]_PI|2_SQRTPI|E|LN(?:2|10)|LOG(?:10|2)E?|PI(?:_[24])?|SQRT(?:1_2|2))(?:_F|_H)?|NAN)\b/,alias:"constant"}}),t.languages.insertBefore("opencl","class-name",{"builtin-type":{pattern:/\b(?:_cl_(?:command_queue|context|device_id|event|kernel|mem|platform_id|program|sampler)|cl_(?:image_format|mem_fence_flags)|clk_event_t|event_t|image(?:1d_(?:array_|buffer_)?t|2d_(?:array_(?:depth_|msaa_depth_|msaa_)?|depth_|msaa_depth_|msaa_)?t|3d_t)|intptr_t|ndrange_t|ptrdiff_t|queue_t|reserve_id_t|sampler_t|size_t|uintptr_t)\b/,alias:"keyword"}}),n={"type-opencl-host":{pattern:/\b(?:cl_(?:GLenum|GLint|GLuin|addressing_mode|bitfield|bool|buffer_create_type|build_status|channel_(?:order|type)|(?:u?(?:char|short|int|long)|float|double)(?:2|3|4|8|16)?|command_(?:queue(?:_info|_properties)?|type)|context(?:_info|_properties)?|device_(?:exec_capabilities|fp_config|id|info|local_mem_type|mem_cache_type|type)|(?:event|sampler)(?:_info)?|filter_mode|half|image_info|kernel(?:_info|_work_group_info)?|map_flags|mem(?:_flags|_info|_object_type)?|platform_(?:id|info)|profiling_info|program(?:_build_info|_info)?))\b/,alias:"keyword"},"boolean-opencl-host":{pattern:/\bCL_(?:TRUE|FALSE)\b/,alias:"boolean"},"constant-opencl-host":{pattern:/\bCL_(?:A|ABGR|ADDRESS_(?:CLAMP(?:_TO_EDGE)?|MIRRORED_REPEAT|NONE|REPEAT)|ARGB|BGRA|BLOCKING|BUFFER_CREATE_TYPE_REGION|BUILD_(?:ERROR|IN_PROGRESS|NONE|PROGRAM_FAILURE|SUCCESS)|COMMAND_(?:ACQUIRE_GL_OBJECTS|BARRIER|COPY_(?:BUFFER(?:_RECT|_TO_IMAGE)?|IMAGE(?:_TO_BUFFER)?)|FILL_(?:BUFFER|IMAGE)|MAP(?:_BUFFER|_IMAGE)|MARKER|MIGRATE(?:_SVM)?_MEM_OBJECTS|NATIVE_KERNEL|NDRANGE_KERNEL|READ_(?:BUFFER(?:_RECT)?|IMAGE)|RELEASE_GL_OBJECTS|SVM_(?:FREE|MAP|MEMCPY|MEMFILL|UNMAP)|TASK|UNMAP_MEM_OBJECT|USER|WRITE_(?:BUFFER(?:_RECT)?|IMAGE))|COMPILER_NOT_AVAILABLE|COMPILE_PROGRAM_FAILURE|COMPLETE|CONTEXT_(?:DEVICES|INTEROP_USER_SYNC|NUM_DEVICES|PLATFORM|PROPERTIES|REFERENCE_COUNT)|DEPTH(?:_STENCIL)?|DEVICE_(?:ADDRESS_BITS|AFFINITY_DOMAIN_(?:L[1-4]_CACHE|NEXT_PARTITIONABLE|NUMA)|AVAILABLE|BUILT_IN_KERNELS|COMPILER_AVAILABLE|DOUBLE_FP_CONFIG|ENDIAN_LITTLE|ERROR_CORRECTION_SUPPORT|EXECUTION_CAPABILITIES|EXTENSIONS|GLOBAL_(?:MEM_(?:CACHELINE_SIZE|CACHE_SIZE|CACHE_TYPE|SIZE)|VARIABLE_PREFERRED_TOTAL_SIZE)|HOST_UNIFIED_MEMORY|IL_VERSION|IMAGE(?:2D_MAX_(?:HEIGHT|WIDTH)|3D_MAX_(?:DEPTH|HEIGHT|WIDTH)|_BASE_ADDRESS_ALIGNMENT|_MAX_ARRAY_SIZE|_MAX_BUFFER_SIZE|_PITCH_ALIGNMENT|_SUPPORT)|PLIER_AVAILABLE|LOCAL_MEM_SIZE|LOCAL_MEM_TYPE|MAX_(?:CLOCK_FREQUENCY|COMPUTE_UNITS|CONSTANT_ARGS|CONSTANT_BUFFER_SIZE|GLOBAL_VARIABLE_SIZE|MEM_ALLOC_SIZE|NUM_SUB_GROUPS|ON_DEVICE_(?:EVENTS|QUEUES)|PARAMETER_SIZE|PIPE_ARGS|READ_IMAGE_ARGS|READ_WRITE_IMAGE_ARGS|SAMPLERS|WORK_GROUP_SIZE|WORK_ITEM_DIMENSIONS|WORK_ITEM_SIZES|WRITE_IMAGE_ARGS)|MEM_BASE_ADDR_ALIGN|MIN_DATA_TYPE_ALIGN_SIZE|NAME|NATIVE_VECTOR_WIDTH_(?:CHAR|DOUBLE|FLOAT|HALF|INT|LONG|SHORT)|NOT_(?:AVAILABLE|FOUND)|OPENCL_C_VERSION|PARENT_DEVICE|PARTITION_(?:AFFINITY_DOMAIN|BY_AFFINITY_DOMAIN|BY_COUNTS|BY_COUNTS_LIST_END|EQUALLY|FAILED|MAX_SUB_DEVICES|PROPERTIES|TYPE)|PIPE_MAX_(?:ACTIVE_RESERVATIONS|PACKET_SIZE)|PLATFORM|PREFERRED_(?:GLOBAL_ATOMIC_ALIGNMENT|INTEROP_USER_SYNC|LOCAL_ATOMIC_ALIGNMENT|PLATFORM_ATOMIC_ALIGNMENT|VECTOR_WIDTH_(?:CHAR|DOUBLE|FLOAT|HALF|INT|LONG|SHORT))|PRINTF_BUFFER_SIZE|PROFILE|PROFILING_TIMER_RESOLUTION|QUEUE_(?:ON_(?:DEVICE_(?:MAX_SIZE|PREFERRED_SIZE|PROPERTIES)|HOST_PROPERTIES)|PROPERTIES)|REFERENCE_COUNT|SINGLE_FP_CONFIG|SUB_GROUP_INDEPENDENT_FORWARD_PROGRESS|SVM_(?:ATOMICS|CAPABILITIES|COARSE_GRAIN_BUFFER|FINE_GRAIN_BUFFER|FINE_GRAIN_SYSTEM)|TYPE(?:_ACCELERATOR|_ALL|_CPU|_CUSTOM|_DEFAULT|_GPU)?|VENDOR(?:_ID)?|VERSION)|DRIVER_VERSION|EVENT_(?:COMMAND_(?:EXECUTION_STATUS|QUEUE|TYPE)|CONTEXT|REFERENCE_COUNT)|EXEC_(?:KERNEL|NATIVE_KERNEL|STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST)|FILTER_(?:LINEAR|NEAREST)|FLOAT|FP_(?:CORRECTLY_ROUNDED_DIVIDE_SQRT|DENORM|FMA|INF_NAN|ROUND_TO_INF|ROUND_TO_NEAREST|ROUND_TO_ZERO|SOFT_FLOAT)|GLOBAL|HALF_FLOAT|IMAGE_(?:ARRAY_SIZE|BUFFER|DEPTH|ELEMENT_SIZE|FORMAT|FORMAT_MISMATCH|FORMAT_NOT_SUPPORTED|HEIGHT|NUM_MIP_LEVELS|NUM_SAMPLES|ROW_PITCH|SLICE_PITCH|WIDTH)|INTENSITY|INVALID_(?:ARG_INDEX|ARG_SIZE|ARG_VALUE|BINARY|BUFFER_SIZE|BUILD_OPTIONS|COMMAND_QUEUE|COMPILER_OPTIONS|CONTEXT|DEVICE|DEVICE_PARTITION_COUNT|DEVICE_QUEUE|DEVICE_TYPE|EVENT|EVENT_WAIT_LIST|GLOBAL_OFFSET|GLOBAL_WORK_SIZE|GL_OBJECT|HOST_PTR|IMAGE_DESCRIPTOR|IMAGE_FORMAT_DESCRIPTOR|IMAGE_SIZE|KERNEL|KERNEL_ARGS|KERNEL_DEFINITION|KERNEL_NAME|PLIER_OPTIONS|MEM_OBJECT|MIP_LEVEL|OPERATION|PIPE_SIZE|PLATFORM|PROGRAM|PROGRAM_EXECUTABLE|PROPERTY|QUEUE_PROPERTIES|SAMPLER|VALUE|WORK_DIMENSION|WORK_GROUP_SIZE|WORK_ITEM_SIZE)|KERNEL_(?:ARG_(?:ACCESS_(?:NONE|QUALIFIER|READ_ONLY|READ_WRITE|WRITE_ONLY)|ADDRESS_(?:CONSTANT|GLOBAL|LOCAL|PRIVATE|QUALIFIER)|INFO_NOT_AVAILABLE|NAME|TYPE_(?:CONST|NAME|NONE|PIPE|QUALIFIER|RESTRICT|VOLATILE))|ATTRIBUTES|COMPILE_NUM_SUB_GROUPS|COMPILE_WORK_GROUP_SIZE|CONTEXT|EXEC_INFO_SVM_FINE_GRAIN_SYSTEM|EXEC_INFO_SVM_PTRS|FUNCTION_NAME|GLOBAL_WORK_SIZE|LOCAL_MEM_SIZE|LOCAL_SIZE_FOR_SUB_GROUP_COUNT|MAX_NUM_SUB_GROUPS|MAX_SUB_GROUP_SIZE_FOR_NDRANGE|NUM_ARGS|PREFERRED_WORK_GROUP_SIZE_MULTIPLE|PRIVATE_MEM_SIZE|PROGRAM|REFERENCE_COUNT|SUB_GROUP_COUNT_FOR_NDRANGE|WORK_GROUP_SIZE)|PLIER_NOT_AVAILABLE|PLI_PROGRAM_FAILURE|LOCAL|LUMINANCE|MAP_(?:FAILURE|READ|WRITE|WRITE_INVALIDATE_REGION)|MEM_(?:ALLOC_HOST_PTR|ASSOCIATED_MEMOBJECT|CONTEXT|COPY_HOST_PTR|COPY_OVERLAP|FLAGS|HOST_NO_ACCESS|HOST_PTR|HOST_READ_ONLY|HOST_WRITE_ONLY|KERNEL_READ_AND_WRITE|MAP_COUNT|OBJECT_(?:ALLOCATION_FAILURE|BUFFER|IMAGE1D|IMAGE1D_ARRAY|IMAGE1D_BUFFER|IMAGE2D|IMAGE2D_ARRAY|IMAGE3D|PIPE)|OFFSET|READ_ONLY|READ_WRITE|REFERENCE_COUNT|SIZE|SVM_ATOMICS|SVM_FINE_GRAIN_BUFFER|TYPE|USES_SVM_POINTER|USE_HOST_PTR|WRITE_ONLY)|MIGRATE_MEM_OBJECT_(?:CONTENT_UNDEFINED|HOST)|MISALIGNED_SUB_BUFFER_OFFSET|NONE|NON_BLOCKING|OUT_OF_(?:HOST_MEMORY|RESOURCES)|PIPE_(?:MAX_PACKETS|PACKET_SIZE)|PLATFORM_(?:EXTENSIONS|HOST_TIMER_RESOLUTION|NAME|PROFILE|VENDOR|VERSION)|PROFILING_(?:COMMAND_(?:COMPLETE|END|QUEUED|START|SUBMIT)|INFO_NOT_AVAILABLE)|PROGRAM_(?:BINARIES|BINARY_SIZES|BINARY_TYPE(?:_COMPILED_OBJECT|_EXECUTABLE|_LIBRARY|_NONE)?|BUILD_(?:GLOBAL_VARIABLE_TOTAL_SIZE|LOG|OPTIONS|STATUS)|CONTEXT|DEVICES|IL|KERNEL_NAMES|NUM_DEVICES|NUM_KERNELS|REFERENCE_COUNT|SOURCE)|QUEUED|QUEUE_(?:CONTEXT|DEVICE|DEVICE_DEFAULT|ON_DEVICE|ON_DEVICE_DEFAULT|OUT_OF_ORDER_EXEC_MODE_ENABLE|PROFILING_ENABLE|PROPERTIES|REFERENCE_COUNT|SIZE)|R|RA|READ_(?:ONLY|WRITE)_CACHE|RG|RGB|RGBA|RGBx|RGx|RUNNING|Rx|SAMPLER_(?:ADDRESSING_MODE|CONTEXT|FILTER_MODE|LOD_MAX|LOD_MIN|MIP_FILTER_MODE|NORMALIZED_COORDS|REFERENCE_COUNT)|(?:UN)?SIGNED_INT(?:8|16|32)|SNORM_INT(?:8|16)|SUBMITTED|SUCCESS|UNORM_INT(?:16|24|8|_101010|_101010_2)|UNORM_SHORT_(?:555|565)|VERSION_(?:1_0|1_1|1_2|2_0|2_1)|sBGRA|sRGB|sRGBA|sRGBx)\b/,alias:"constant"},"function-opencl-host":{pattern:/\bcl(?:BuildProgram|CloneKernel|CompileProgram|Create(?:Buffer|CommandQueue(?:WithProperties)?|Context|ContextFromType|Image|Image2D|Image3D|Kernel|KernelsInProgram|Pipe|ProgramWith(?:Binary|BuiltInKernels|IL|Source)|Sampler|SamplerWithProperties|SubBuffer|SubDevices|UserEvent)|Enqueue(?:(?:Barrier|Marker)(?:WithWaitList)?|Copy(?:Buffer(?:Rect|ToImage)?|Image(?:ToBuffer)?)|(?:Fill|Map)(?:Buffer|Image)|MigrateMemObjects|NDRangeKernel|NativeKernel|(?:Read|Write)(?:Buffer(?:Rect)?|Image)|SVM(?:Free|Map|MemFill|Memcpy|MigrateMem|Unmap)|Task|UnmapMemObject|WaitForEvents)|Finish|Flush|Get(?:CommandQueueInfo|ContextInfo|Device(?:AndHostTimer|IDs|Info)|Event(?:Profiling)?Info|ExtensionFunctionAddress(?:ForPlatform)?|HostTimer|ImageInfo|Kernel(?:ArgInfo|Info|SubGroupInfo|WorkGroupInfo)|MemObjectInfo|PipeInfo|Platform(?:IDs|Info)|Program(?:Build)?Info|SamplerInfo|SupportedImageFormats)|LinkProgram|(?:Release|Retain)(?:CommandQueue|Context|Device|Event|Kernel|MemObject|Program|Sampler)|SVM(?:Alloc|Free)|Set(?:CommandQueueProperty|DefaultDeviceCommandQueue|EventCallback|Kernel(?:Arg(?:SVMPointer)?|ExecInfo)|Kernel|MemObjectDestructorCallback|UserEventStatus)|Unload(?:Platform)?Compiler|WaitForEvents)\b/,alias:"function"}},t.languages.insertBefore("c","keyword",n),t.languages.cpp&&(n["type-opencl-host-cpp"]={pattern:/\b(?:Buffer|BufferGL|BufferRenderGL|CommandQueue|Context|Device|DeviceCommandQueue|EnqueueArgs|Event|Image|Image1D|Image1DArray|Image1DBuffer|Image2D|Image2DArray|Image2DGL|Image3D|Image3DGL|ImageFormat|ImageGL|Kernel|KernelFunctor|LocalSpaceArg|Memory|NDRange|Pipe|Platform|Program|Sampler|SVMAllocator|SVMTraitAtomic|SVMTraitCoarse|SVMTraitFine|SVMTraitReadOnly|SVMTraitReadWrite|SVMTraitWriteOnly|UserEvent)\b/,alias:"keyword"},t.languages.insertBefore("cpp","keyword",n))}e.exports=i,i.displayName="opencl",i.aliases=[]},22950(e){"use strict";function t(e){e.languages.openqasm={comment:/\/\*[\s\S]*?\*\/|\/\/.*/,string:{pattern:/"[^"\r\n\t]*"|'[^'\r\n\t]*'/,greedy:!0},keyword:/\b(?:barrier|boxas|boxto|break|const|continue|ctrl|def|defcal|defcalgrammar|delay|else|end|for|gate|gphase|if|in|include|inv|kernel|lengthof|let|measure|pow|reset|return|rotary|stretchinf|while|CX|OPENQASM|U)\b|#pragma\b/,"class-name":/\b(?:angle|bit|bool|creg|fixed|float|int|length|qreg|qubit|stretch|uint)\b/,function:/\b(?:sin|cos|tan|exp|ln|sqrt|rotl|rotr|popcount)\b(?=\s*\()/,constant:/\b(?:pi|tau|euler)\b|π|𝜏|ℇ/,number:{pattern:/(^|[^.\w$])(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?(?:dt|ns|us|µs|ms|s)?/i,lookbehind:!0},operator:/->|>>=?|<<=?|&&|\|\||\+\+|--|[!=<>&|~^+\-*/%]=?|@/,punctuation:/[(){}\[\];,:.]/},e.languages.qasm=e.languages.openqasm}e.exports=t,t.displayName="openqasm",t.aliases=["qasm"]},23254(e){"use strict";function t(e){e.languages.oz={comment:/\/\*[\s\S]*?\*\/|%.*/,string:{pattern:/"(?:[^"\\]|\\[\s\S])*"/,greedy:!0},atom:{pattern:/'(?:[^'\\]|\\[\s\S])*'/,greedy:!0,alias:"builtin"},keyword:/\$|\[\]|\b(?:_|at|attr|case|catch|choice|class|cond|declare|define|dis|else(?:case|if)?|end|export|fail|false|feat|finally|from|fun|functor|if|import|in|local|lock|meth|nil|not|of|or|prepare|proc|prop|raise|require|self|skip|then|thread|true|try|unit)\b/,function:[/\b[a-z][A-Za-z\d]*(?=\()/,{pattern:/(\{)[A-Z][A-Za-z\d]*\b/,lookbehind:!0}],number:/\b(?:0[bx][\da-f]+|\d+(?:\.\d*)?(?:e~?\d+)?)\b|&(?:[^\\]|\\(?:\d{3}|.))/i,variable:/\b[A-Z][A-Za-z\d]*|`(?:[^`\\]|\\.)+`/,"attr-name":/\b\w+(?=:)/,operator:/:(?:=|::?)|<[-:=]?|=(?:=|=?:?|\\=:?|!!?|[|#+\-*\/,~^@]|\b(?:andthen|div|mod|orelse)\b/,punctuation:/[\[\](){}.:;?]/}}e.exports=t,t.displayName="oz",t.aliases=[]},92694(e){"use strict";function t(e){var t;e.languages.parigp={comment:/\/\*[\s\S]*?\*\/|\\\\.*/,string:{pattern:/"(?:[^"\\\r\n]|\\.)*"/,greedy:!0},keyword:RegExp("\\b(?:"+(t=(t=["breakpoint","break","dbg_down","dbg_err","dbg_up","dbg_x","forcomposite","fordiv","forell","forpart","forprime","forstep","forsubgroup","forvec","for","iferr","if","local","my","next","return","until","while"]).map(function(e){return e.split("").join(" *")}).join("|"))+")\\b"),function:/\b\w(?:[\w ]*\w)?(?= *\()/,number:{pattern:/((?:\. *\. *)?)(?:\b\d(?: *\d)*(?: *(?!\. *\.)\.(?: *\d)*)?|\. *\d(?: *\d)*)(?: *e *(?:[+-] *)?\d(?: *\d)*)?/i,lookbehind:!0},operator:/\. *\.|[*\/!](?: *=)?|%(?: *=|(?: *#)?(?: *')*)?|\+(?: *[+=])?|-(?: *[-=>])?|<(?: *>|(?: *<)?(?: *=)?)?|>(?: *>)?(?: *=)?|=(?: *=){0,2}|\\(?: *\/)?(?: *=)?|&(?: *&)?|\| *\||['#~^]/,punctuation:/[\[\]{}().,:;|]/}}e.exports=t,t.displayName="parigp",t.aliases=[]},43273(e){"use strict";function t(e){var t,n;n=(t=e).languages.parser=t.languages.extend("markup",{keyword:{pattern:/(^|[^^])(?:\^(?:case|eval|for|if|switch|throw)\b|@(?:BASE|CLASS|GET(?:_DEFAULT)?|OPTIONS|SET_DEFAULT|USE)\b)/,lookbehind:!0},variable:{pattern:/(^|[^^])\B\$(?:\w+|(?=[.{]))(?:(?:\.|::?)\w+)*(?:\.|::?)?/,lookbehind:!0,inside:{punctuation:/\.|:+/}},function:{pattern:/(^|[^^])\B[@^]\w+(?:(?:\.|::?)\w+)*(?:\.|::?)?/,lookbehind:!0,inside:{keyword:{pattern:/(^@)(?:GET_|SET_)/,lookbehind:!0},punctuation:/\.|:+/}},escape:{pattern:/\^(?:[$^;@()\[\]{}"':]|#[a-f\d]*)/i,alias:"builtin"},punctuation:/[\[\](){};]/}),n=t.languages.insertBefore("parser","keyword",{"parser-comment":{pattern:/(\s)#.*/,lookbehind:!0,alias:"comment"},expression:{pattern:/(^|[^^])\((?:[^()]|\((?:[^()]|\((?:[^()])*\))*\))*\)/,greedy:!0,lookbehind:!0,inside:{string:{pattern:/(^|[^^])(["'])(?:(?!\2)[^^]|\^[\s\S])*\2/,lookbehind:!0},keyword:n.keyword,variable:n.variable,function:n.function,boolean:/\b(?:true|false)\b/,number:/\b(?:0x[a-f\d]+|\d+(?:\.\d*)?(?:e[+-]?\d+)?)\b/i,escape:n.escape,operator:/[~+*\/\\%]|!(?:\|\|?|=)?|&&?|\|\|?|==|<[<=]?|>[>=]?|-[fd]?|\b(?:def|eq|ge|gt|in|is|le|lt|ne)\b/,punctuation:n.punctuation}}}),t.languages.insertBefore("inside","punctuation",{expression:n.expression,keyword:n.keyword,variable:n.variable,function:n.function,escape:n.escape,"parser-punctuation":{pattern:n.punctuation,alias:"punctuation"}},n.tag.inside["attr-value"])}e.exports=t,t.displayName="parser",t.aliases=[]},60718(e){"use strict";function t(e){e.languages.pascal={comment:[/\(\*[\s\S]+?\*\)/,/\{[\s\S]+?\}/,/\/\/.*/],string:{pattern:/(?:'(?:''|[^'\r\n])*'(?!')|#[&$%]?[a-f\d]+)+|\^[a-z]/i,greedy:!0},keyword:[{pattern:/(^|[^&])\b(?:absolute|array|asm|begin|case|const|constructor|destructor|do|downto|else|end|file|for|function|goto|if|implementation|inherited|inline|interface|label|nil|object|of|operator|packed|procedure|program|record|reintroduce|repeat|self|set|string|then|to|type|unit|until|uses|var|while|with)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:dispose|exit|false|new|true)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:class|dispinterface|except|exports|finalization|finally|initialization|inline|library|on|out|packed|property|raise|resourcestring|threadvar|try)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:absolute|abstract|alias|assembler|bitpacked|break|cdecl|continue|cppdecl|cvar|default|deprecated|dynamic|enumerator|experimental|export|external|far|far16|forward|generic|helper|implements|index|interrupt|iochecks|local|message|name|near|nodefault|noreturn|nostackframe|oldfpccall|otherwise|overload|override|pascal|platform|private|protected|public|published|read|register|reintroduce|result|safecall|saveregisters|softfloat|specialize|static|stdcall|stored|strict|unaligned|unimplemented|varargs|virtual|write)\b/i,lookbehind:!0}],number:[/(?:[&%]\d+|\$[a-f\d]+)/i,/\b\d+(?:\.\d+)?(?:e[+-]?\d+)?/i],operator:[/\.\.|\*\*|:=|<[<=>]?|>[>=]?|[+\-*\/]=?|[@^=]/i,{pattern:/(^|[^&])\b(?:and|as|div|exclude|in|include|is|mod|not|or|shl|shr|xor)\b/,lookbehind:!0}],punctuation:/\(\.|\.\)|[()\[\]:;,.]/},e.languages.objectpascal=e.languages.pascal}e.exports=t,t.displayName="pascal",t.aliases=["objectpascal"]},39303(e){"use strict";function t(e){var t,n,r,i,a;t=e,n=/\((?:[^()]|\((?:[^()]|\([^()]*\))*\))*\)/.source,r=/(?:\b\w+(?:)?|)/.source.replace(//g,function(){return n}),i=t.languages.pascaligo={comment:/\(\*[\s\S]+?\*\)|\/\/.*/,string:{pattern:/(["'`])(?:\\[\s\S]|(?!\1)[^\\])*\1|\^[a-z]/i,greedy:!0},"class-name":[{pattern:RegExp(/(\btype\s+\w+\s+is\s+)/.source.replace(//g,function(){return r}),"i"),lookbehind:!0,inside:null},{pattern:RegExp(/(?=\s+is\b)/.source.replace(//g,function(){return r}),"i"),inside:null},{pattern:RegExp(/(:\s*)/.source.replace(//g,function(){return r})),lookbehind:!0,inside:null}],keyword:{pattern:/(^|[^&])\b(?:begin|block|case|const|else|end|fail|for|from|function|if|is|nil|of|remove|return|skip|then|type|var|while|with)\b/i,lookbehind:!0},boolean:{pattern:/(^|[^&])\b(?:True|False)\b/i,lookbehind:!0},builtin:{pattern:/(^|[^&])\b(?:bool|int|list|map|nat|record|string|unit)\b/i,lookbehind:!0},function:/\b\w+(?=\s*\()/i,number:[/%[01]+|&[0-7]+|\$[a-f\d]+/i,/\b\d+(?:\.\d+)?(?:e[+-]?\d+)?(?:mtz|n)?/i],operator:/->|=\/=|\.\.|\*\*|:=|<[<=>]?|>[>=]?|[+\-*\/]=?|[@^=|]|\b(?:and|mod|or)\b/,punctuation:/\(\.|\.\)|[()\[\]:;,.{}]/},a=["comment","keyword","builtin","operator","punctuation"].reduce(function(e,t){return e[t]=i[t],e},{}),i["class-name"].forEach(function(e){e.inside=a})}e.exports=t,t.displayName="pascaligo",t.aliases=[]},77393(e){"use strict";function t(e){e.languages.pcaxis={string:/"[^"]*"/,keyword:{pattern:/((?:^|;)\s*)[-A-Z\d]+(?:\s*\[[-\w]+\])?(?:\s*\("[^"]*"(?:,\s*"[^"]*")*\))?(?=\s*=)/,lookbehind:!0,greedy:!0,inside:{keyword:/^[-A-Z\d]+/,language:{pattern:/^(\s*)\[[-\w]+\]/,lookbehind:!0,inside:{punctuation:/^\[|\]$/,property:/[-\w]+/}},"sub-key":{pattern:/^(\s*)\S[\s\S]*/,lookbehind:!0,inside:{parameter:{pattern:/"[^"]*"/,alias:"property"},punctuation:/^\(|\)$|,/}}}},operator:/=/,tlist:{pattern:/TLIST\s*\(\s*\w+(?:(?:\s*,\s*"[^"]*")+|\s*,\s*"[^"]*"-"[^"]*")?\s*\)/,greedy:!0,inside:{function:/^TLIST/,property:{pattern:/^(\s*\(\s*)\w+/,lookbehind:!0},string:/"[^"]*"/,punctuation:/[(),]/,operator:/-/}},punctuation:/[;,]/,number:{pattern:/(^|\s)\d+(?:\.\d+)?(?!\S)/,lookbehind:!0},boolean:/YES|NO/},e.languages.px=e.languages.pcaxis}e.exports=t,t.displayName="pcaxis",t.aliases=["px"]},19023(e){"use strict";function t(e){e.languages.peoplecode={comment:RegExp([/\/\*[\s\S]*?\*\//.source,/\bREM[^;]*;/.source,/<\*(?:[^<*]|\*(?!>)|<(?!\*)|<\*(?:(?!\*>)[\s\S])*\*>)*\*>/.source,/\/\+[\s\S]*?\+\//.source].join("|")),string:{pattern:/'(?:''|[^'\r\n])*'(?!')|"(?:""|[^"\r\n])*"(?!")/,greedy:!0},variable:/%\w+/,"function-definition":{pattern:/((?:^|[^\w-])(?:function|method)\s+)\w+/i,lookbehind:!0,alias:"function"},"class-name":{pattern:/((?:^|[^-\w])(?:as|catch|class|component|create|extends|global|implements|instance|local|of|property|returns)\s+)\w+(?::\w+)*/i,lookbehind:!0,inside:{punctuation:/:/}},keyword:/\b(?:abstract|alias|as|catch|class|component|constant|create|declare|else|end-(?:class|evaluate|for|function|get|if|method|set|try|while)|evaluate|extends|for|function|get|global|implements|import|instance|if|library|local|method|null|of|out|peopleCode|private|program|property|protected|readonly|ref|repeat|returns?|set|step|then|throw|to|try|until|value|when(?:-other)?|while)\b/i,"operator-keyword":{pattern:/\b(?:and|not|or)\b/i,alias:"operator"},function:/[_a-z]\w*(?=\s*\()/i,boolean:/\b(?:false|true)\b/i,number:/\b\d+(?:\.\d+)?\b/,operator:/<>|[<>]=?|!=|\*\*|[-+*/|=@]/,punctuation:/[:.;,()[\]]/},e.languages.pcode=e.languages.peoplecode}e.exports=t,t.displayName="peoplecode",t.aliases=["pcode"]},74212(e){"use strict";function t(e){e.languages.perl={comment:[{pattern:/(^\s*)=\w[\s\S]*?=cut.*/m,lookbehind:!0},{pattern:/(^|[^\\$])#.*/,lookbehind:!0}],string:[{pattern:/\b(?:q|qq|qx|qw)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s+([a-zA-Z0-9])(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\((?:[^()\\]|\\[\s\S])*\)/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\{(?:[^{}\\]|\\[\s\S])*\}/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*\[(?:[^[\]\\]|\\[\s\S])*\]/,greedy:!0},{pattern:/\b(?:q|qq|qx|qw)\s*<(?:[^<>\\]|\\[\s\S])*>/,greedy:!0},{pattern:/("|`)(?:(?!\1)[^\\]|\\[\s\S])*\1/,greedy:!0},{pattern:/'(?:[^'\\\r\n]|\\.)*'/,greedy:!0}],regex:[{pattern:/\b(?:m|qr)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s+([a-zA-Z0-9])(?:(?!\1)[^\\]|\\[\s\S])*\1[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\((?:[^()\\]|\\[\s\S])*\)[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\{(?:[^{}\\]|\\[\s\S])*\}[msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*\[(?:[^[\]\\]|\\[\s\S])*\][msixpodualngc]*/,greedy:!0},{pattern:/\b(?:m|qr)\s*<(?:[^<>\\]|\\[\s\S])*>[msixpodualngc]*/,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*([^a-zA-Z0-9\s{(\[<])(?:(?!\2)[^\\]|\\[\s\S])*\2(?:(?!\2)[^\\]|\\[\s\S])*\2[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s+([a-zA-Z0-9])(?:(?!\2)[^\\]|\\[\s\S])*\2(?:(?!\2)[^\\]|\\[\s\S])*\2[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\((?:[^()\\]|\\[\s\S])*\)\s*\((?:[^()\\]|\\[\s\S])*\)[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\{(?:[^{}\\]|\\[\s\S])*\}\s*\{(?:[^{}\\]|\\[\s\S])*\}[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*\[(?:[^[\]\\]|\\[\s\S])*\]\s*\[(?:[^[\]\\]|\\[\s\S])*\][msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^-]\b)(?:s|tr|y)\s*<(?:[^<>\\]|\\[\s\S])*>\s*<(?:[^<>\\]|\\[\s\S])*>[msixpodualngcer]*/,lookbehind:!0,greedy:!0},{pattern:/\/(?:[^\/\\\r\n]|\\.)*\/[msixpodualngc]*(?=\s*(?:$|[\r\n,.;})&|\-+*~<>!?^]|(?:lt|gt|le|ge|eq|ne|cmp|not|and|or|xor|x)\b))/,greedy:!0}],variable:[/[&*$@%]\{\^[A-Z]+\}/,/[&*$@%]\^[A-Z_]/,/[&*$@%]#?(?=\{)/,/[&*$@%]#?(?:(?:::)*'?(?!\d)[\w$]+(?![\w$]))+(?:::)*/i,/[&*$@%]\d+/,/(?!%=)[$@%][!"#$%&'()*+,\-.\/:;<=>?@[\\\]^_`{|}~]/],filehandle:{pattern:/<(?![<=])\S*>|\b_\b/,alias:"symbol"},vstring:{pattern:/v\d+(?:\.\d+)*|\d+(?:\.\d+){2,}/,alias:"string"},function:{pattern:/sub \w+/i,inside:{keyword:/sub/}},keyword:/\b(?:any|break|continue|default|delete|die|do|else|elsif|eval|for|foreach|given|goto|if|last|local|my|next|our|package|print|redo|require|return|say|state|sub|switch|undef|unless|until|use|when|while)\b/,number:/\b(?:0x[\dA-Fa-f](?:_?[\dA-Fa-f])*|0b[01](?:_?[01])*|(?:(?:\d(?:_?\d)*)?\.)?\d(?:_?\d)*(?:[Ee][+-]?\d+)?)\b/,operator:/-[rwxoRWXOezsfdlpSbctugkTBMAC]\b|\+[+=]?|-[-=>]?|\*\*?=?|\/\/?=?|=[=~>]?|~[~=]?|\|\|?=?|&&?=?|<(?:=>?|<=?)?|>>?=?|![~=]?|[%^]=?|\.(?:=|\.\.?)?|[\\?]|\bx(?:=|\b)|\b(?:lt|gt|le|ge|eq|ne|cmp|not|and|or|xor)\b/,punctuation:/[{}[\];(),:]/}}e.exports=t,t.displayName="perl",t.aliases=[]},5137(e,t,n){"use strict";var r=n(88262);function i(e){e.register(r),e.languages.insertBefore("php","variable",{this:/\$this\b/,global:/\$(?:_(?:SERVER|GET|POST|FILES|REQUEST|SESSION|ENV|COOKIE)|GLOBALS|HTTP_RAW_POST_DATA|argc|argv|php_errormsg|http_response_header)\b/,scope:{pattern:/\b[\w\\]+::/,inside:{keyword:/static|self|parent/,punctuation:/::|\\/}}})}e.exports=i,i.displayName="phpExtras",i.aliases=[]},88262(e,t,n){"use strict";var r=n(93205);function i(e){var t,n,i,a,o,s,u,c;e.register(r),n=/\/\*[\s\S]*?\*\/|\/\/.*|#(?!\[).*/,i=[{pattern:/\b(?:false|true)\b/i,alias:"boolean"},{pattern:/(::\s*)\b[a-z_]\w*\b(?!\s*\()/i,greedy:!0,lookbehind:!0},{pattern:/(\b(?:case|const)\s+)\b[a-z_]\w*(?=\s*[;=])/i,greedy:!0,lookbehind:!0},/\b(?:null)\b/i,/\b[A-Z_][A-Z0-9_]*\b(?!\s*\()/],a=/\b0b[01]+(?:_[01]+)*\b|\b0o[0-7]+(?:_[0-7]+)*\b|\b0x[\da-f]+(?:_[\da-f]+)*\b|(?:\b\d+(?:_\d+)*\.?(?:\d+(?:_\d+)*)?|\B\.\d+)(?:e[+-]?\d+)?/i,o=/|\?\?=?|\.{3}|\??->|[!=]=?=?|::|\*\*=?|--|\+\+|&&|\|\||<<|>>|[?~]|[/^|%*&<>.+-]=?/,s=/[{}\[\](),:;]/,(t=e).languages.php={delimiter:{pattern:/\?>$|^<\?(?:php(?=\s)|=)?/i,alias:"important"},comment:n,variable:/\$+(?:\w+\b|(?=\{))/i,package:{pattern:/(namespace\s+|use\s+(?:function\s+)?)(?:\\?\b[a-z_]\w*)+\b(?!\\)/i,lookbehind:!0,inside:{punctuation:/\\/}},"class-name-definition":{pattern:/(\b(?:class|enum|interface|trait)\s+)\b[a-z_]\w*(?!\\)\b/i,lookbehind:!0,alias:"class-name"},"function-definition":{pattern:/(\bfunction\s+)[a-z_]\w*(?=\s*\()/i,lookbehind:!0,alias:"function"},keyword:[{pattern:/(\(\s*)\b(?:bool|boolean|int|integer|float|string|object|array)\b(?=\s*\))/i,alias:"type-casting",greedy:!0,lookbehind:!0},{pattern:/([(,?]\s*)\b(?:bool|int|float|string|object|array(?!\s*\()|mixed|self|static|callable|iterable|(?:null|false)(?=\s*\|))\b(?=\s*\$)/i,alias:"type-hint",greedy:!0,lookbehind:!0},{pattern:/([(,?]\s*[\w|]\|\s*)(?:null|false)\b(?=\s*\$)/i,alias:"type-hint",greedy:!0,lookbehind:!0},{pattern:/(\)\s*:\s*(?:\?\s*)?)\b(?:bool|int|float|string|object|void|array(?!\s*\()|mixed|self|static|callable|iterable|(?:null|false)(?=\s*\|))\b/i,alias:"return-type",greedy:!0,lookbehind:!0},{pattern:/(\)\s*:\s*(?:\?\s*)?[\w|]\|\s*)(?:null|false)\b/i,alias:"return-type",greedy:!0,lookbehind:!0},{pattern:/\b(?:bool|int|float|string|object|void|array(?!\s*\()|mixed|iterable|(?:null|false)(?=\s*\|))\b/i,alias:"type-declaration",greedy:!0},{pattern:/(\|\s*)(?:null|false)\b/i,alias:"type-declaration",greedy:!0,lookbehind:!0},{pattern:/\b(?:parent|self|static)(?=\s*::)/i,alias:"static-context",greedy:!0},{pattern:/(\byield\s+)from\b/i,lookbehind:!0},/\bclass\b/i,{pattern:/((?:^|[^\s>:]|(?:^|[^-])>|(?:^|[^:]):)\s*)\b(?:__halt_compiler|abstract|and|array|as|break|callable|case|catch|clone|const|continue|declare|default|die|do|echo|else|elseif|empty|enddeclare|endfor|endforeach|endif|endswitch|endwhile|enum|eval|exit|extends|final|finally|fn|for|foreach|function|global|goto|if|implements|include|include_once|instanceof|insteadof|interface|isset|list|namespace|match|new|or|parent|print|private|protected|public|require|require_once|return|self|static|switch|throw|trait|try|unset|use|var|while|xor|yield)\b/i,lookbehind:!0}],"argument-name":{pattern:/([(,]\s+)\b[a-z_]\w*(?=\s*:(?!:))/i,lookbehind:!0},"class-name":[{pattern:/(\b(?:extends|implements|instanceof|new(?!\s+self|\s+static))\s+|\bcatch\s*\()\b[a-z_]\w*(?!\\)\b/i,greedy:!0,lookbehind:!0},{pattern:/(\|\s*)\b[a-z_]\w*(?!\\)\b/i,greedy:!0,lookbehind:!0},{pattern:/\b[a-z_]\w*(?!\\)\b(?=\s*\|)/i,greedy:!0},{pattern:/(\|\s*)(?:\\?\b[a-z_]\w*)+\b/i,alias:"class-name-fully-qualified",greedy:!0,lookbehind:!0,inside:{punctuation:/\\/}},{pattern:/(?:\\?\b[a-z_]\w*)+\b(?=\s*\|)/i,alias:"class-name-fully-qualified",greedy:!0,inside:{punctuation:/\\/}},{pattern:/(\b(?:extends|implements|instanceof|new(?!\s+self\b|\s+static\b))\s+|\bcatch\s*\()(?:\\?\b[a-z_]\w*)+\b(?!\\)/i,alias:"class-name-fully-qualified",greedy:!0,lookbehind:!0,inside:{punctuation:/\\/}},{pattern:/\b[a-z_]\w*(?=\s*\$)/i,alias:"type-declaration",greedy:!0},{pattern:/(?:\\?\b[a-z_]\w*)+(?=\s*\$)/i,alias:["class-name-fully-qualified","type-declaration"],greedy:!0,inside:{punctuation:/\\/}},{pattern:/\b[a-z_]\w*(?=\s*::)/i,alias:"static-context",greedy:!0},{pattern:/(?:\\?\b[a-z_]\w*)+(?=\s*::)/i,alias:["class-name-fully-qualified","static-context"],greedy:!0,inside:{punctuation:/\\/}},{pattern:/([(,?]\s*)[a-z_]\w*(?=\s*\$)/i,alias:"type-hint",greedy:!0,lookbehind:!0},{pattern:/([(,?]\s*)(?:\\?\b[a-z_]\w*)+(?=\s*\$)/i,alias:["class-name-fully-qualified","type-hint"],greedy:!0,lookbehind:!0,inside:{punctuation:/\\/}},{pattern:/(\)\s*:\s*(?:\?\s*)?)\b[a-z_]\w*(?!\\)\b/i,alias:"return-type",greedy:!0,lookbehind:!0},{pattern:/(\)\s*:\s*(?:\?\s*)?)(?:\\?\b[a-z_]\w*)+\b(?!\\)/i,alias:["class-name-fully-qualified","return-type"],greedy:!0,lookbehind:!0,inside:{punctuation:/\\/}}],constant:i,function:{pattern:/(^|[^\\\w])\\?[a-z_](?:[\w\\]*\w)?(?=\s*\()/i,lookbehind:!0,inside:{punctuation:/\\/}},property:{pattern:/(->\s*)\w+/,lookbehind:!0},number:a,operator:o,punctuation:s},c=[{pattern:/<<<'([^']+)'[\r\n](?:.*[\r\n])*?\1;/,alias:"nowdoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<<'[^']+'|[a-z_]\w*;$/i,alias:"symbol",inside:{punctuation:/^<<<'?|[';]$/}}}},{pattern:/<<<(?:"([^"]+)"[\r\n](?:.*[\r\n])*?\1;|([a-z_]\w*)[\r\n](?:.*[\r\n])*?\2;)/i,alias:"heredoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<<(?:"[^"]+"|[a-z_]\w*)|[a-z_]\w*;$/i,alias:"symbol",inside:{punctuation:/^<<<"?|[";]$/}},interpolation:u={pattern:/\{\$(?:\{(?:\{[^{}]+\}|[^{}]+)\}|[^{}])+\}|(^|[^\\{])\$+(?:\w+(?:\[[^\r\n\[\]]+\]|->\w+)?)/,lookbehind:!0,inside:t.languages.php}}},{pattern:/`(?:\\[\s\S]|[^\\`])*`/,alias:"backtick-quoted-string",greedy:!0},{pattern:/'(?:\\[\s\S]|[^\\'])*'/,alias:"single-quoted-string",greedy:!0},{pattern:/"(?:\\[\s\S]|[^\\"])*"/,alias:"double-quoted-string",greedy:!0,inside:{interpolation:u}}],t.languages.insertBefore("php","variable",{string:c,attribute:{pattern:/#\[(?:[^"'\/#]|\/(?![*/])|\/\/.*$|#(?!\[).*$|\/\*(?:[^*]|\*(?!\/))*\*\/|"(?:\\[\s\S]|[^\\"])*"|'(?:\\[\s\S]|[^\\'])*')+\](?=\s*[a-z$#])/im,greedy:!0,inside:{"attribute-content":{pattern:/^(#\[)[\s\S]+(?=\]$)/,lookbehind:!0,inside:{comment:n,string:c,"attribute-class-name":[{pattern:/([^:]|^)\b[a-z_]\w*(?!\\)\b/i,alias:"class-name",greedy:!0,lookbehind:!0},{pattern:/([^:]|^)(?:\\?\b[a-z_]\w*)+/i,alias:["class-name","class-name-fully-qualified"],greedy:!0,lookbehind:!0,inside:{punctuation:/\\/}}],constant:i,number:a,operator:o,punctuation:s}},delimiter:{pattern:/^#\[|\]$/,alias:"punctuation"}}}}),t.hooks.add("before-tokenize",function(e){if(/<\?/.test(e.code)){var n=/<\?(?:[^"'/#]|\/(?![*/])|("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|(?:\/\/|#(?!\[))(?:[^?\n\r]|\?(?!>))*(?=$|\?>|[\r\n])|#\[|\/\*(?:[^*]|\*(?!\/))*(?:\*\/|$))*?(?:\?>|$)/gi;t.languages["markup-templating"].buildPlaceholders(e,"php",n)}}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"php")})}e.exports=i,i.displayName="php",i.aliases=[]},63632(e,t,n){"use strict";var r=n(88262),i=n(9858);function a(e){var t,n;e.register(r),e.register(i),n=/(?:\b[a-zA-Z]\w*|[|\\[\]])+/.source,(t=e).languages.phpdoc=t.languages.extend("javadoclike",{parameter:{pattern:RegExp("(@(?:global|param|property(?:-read|-write)?|var)\\s+(?:"+n+"\\s+)?)\\$\\w+"),lookbehind:!0}}),t.languages.insertBefore("phpdoc","keyword",{"class-name":[{pattern:RegExp("(@(?:global|package|param|property(?:-read|-write)?|return|subpackage|throws|var)\\s+)"+n),lookbehind:!0,inside:{keyword:/\b(?:callback|resource|boolean|integer|double|object|string|array|false|float|mixed|bool|null|self|true|void|int)\b/,punctuation:/[|\\[\]()]/}}]}),t.languages.javadoclike.addSupport("php",t.languages.phpdoc)}e.exports=a,a.displayName="phpdoc",a.aliases=[]},59149(e,t,n){"use strict";var r=n(11114);function i(e){var t,n,i,a;e.register(r),Array.isArray(i=(n=(t=e).languages.plsql=t.languages.extend("sql",{comment:[/\/\*[\s\S]*?\*\//,/--.*/]})).keyword)||(i=n.keyword=[i]),i.unshift(/\b(?:ACCESS|AGENT|AGGREGATE|ARRAY|ARROW|AT|ATTRIBUTE|AUDIT|AUTHID|BFILE_BASE|BLOB_BASE|BLOCK|BODY|BOTH|BOUND|BYTE|CALLING|CHAR_BASE|CHARSET(?:FORM|ID)|CLOB_BASE|COLAUTH|COLLECT|CLUSTERS?|COMPILED|COMPRESS|CONSTANT|CONSTRUCTOR|CONTEXT|CRASH|CUSTOMDATUM|DANGLING|DATE_BASE|DEFINE|DETERMINISTIC|DURATION|ELEMENT|EMPTY|EXCEPTIONS?|EXCLUSIVE|EXTERNAL|FINAL|FORALL|FORM|FOUND|GENERAL|HEAP|HIDDEN|IDENTIFIED|IMMEDIATE|INCLUDING|INCREMENT|INDICATOR|INDEXES|INDICES|INFINITE|INITIAL|ISOPEN|INSTANTIABLE|INTERFACE|INVALIDATE|JAVA|LARGE|LEADING|LENGTH|LIBRARY|LIKE[24C]|LIMITED|LONG|LOOP|MAP|MAXEXTENTS|MAXLEN|MEMBER|MINUS|MLSLABEL|MULTISET|NAME|NAN|NATIVE|NEW|NOAUDIT|NOCOMPRESS|NOCOPY|NOTFOUND|NOWAIT|NUMBER(?:_BASE)?|OBJECT|OCI(?:COLL|DATE|DATETIME|DURATION|INTERVAL|LOBLOCATOR|NUMBER|RAW|REF|REFCURSOR|ROWID|STRING|TYPE)|OFFLINE|ONLINE|ONLY|OPAQUE|OPERATOR|ORACLE|ORADATA|ORGANIZATION|ORL(?:ANY|VARY)|OTHERS|OVERLAPS|OVERRIDING|PACKAGE|PARALLEL_ENABLE|PARAMETERS?|PASCAL|PCTFREE|PIPE(?:LINED)?|PRAGMA|PRIOR|PRIVATE|RAISE|RANGE|RAW|RECORD|REF|REFERENCE|REM|REMAINDER|RESULT|RESOURCE|RETURNING|REVERSE|ROW(?:ID|NUM|TYPE)|SAMPLE|SB[124]|SEGMENT|SELF|SEPARATE|SEQUENCE|SHORT|SIZE(?:_T)?|SPARSE|SQL(?:CODE|DATA|NAME|STATE)|STANDARD|STATIC|STDDEV|STORED|STRING|STRUCT|STYLE|SUBMULTISET|SUBPARTITION|SUBSTITUTABLE|SUBTYPE|SUCCESSFUL|SYNONYM|SYSDATE|TABAUTH|TDO|THE|TIMEZONE_(?:ABBR|HOUR|MINUTE|REGION)|TRAILING|TRANSAC(?:TIONAL)?|TRUSTED|UB[124]|UID|UNDER|UNTRUSTED|VALIDATE|VALIST|VARCHAR2|VARIABLE|VARIANCE|VARRAY|VIEWS|VOID|WHENEVER|WRAPPED|ZONE)\b/i),Array.isArray(a=n.operator)||(a=n.operator=[a]),a.unshift(/:=/)}e.exports=i,i.displayName="plsql",i.aliases=[]},50256(e){"use strict";function t(e){e.languages.powerquery={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:\/\/).*)/,lookbehind:!0},"quoted-identifier":{pattern:/#"(?:[^"\r\n]|"")*"(?!")/,greedy:!0,alias:"variable"},string:{pattern:/"(?:[^"\r\n]|"")*"(?!")/,greedy:!0},constant:[/\bDay\.(?:Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday)\b/,/\bTraceLevel\.(?:Critical|Error|Information|Verbose|Warning)\b/,/\bOccurrence\.(?:First|Last|All)\b/,/\bOrder\.(?:Ascending|Descending)\b/,/\bRoundingMode\.(?:AwayFromZero|Down|ToEven|TowardZero|Up)\b/,/\bMissingField\.(?:Error|Ignore|UseNull)\b/,/\bQuoteStyle\.(?:Csv|None)\b/,/\bJoinKind\.(?:Inner|LeftOuter|RightOuter|FullOuter|LeftAnti|RightAnti)\b/,/\bGroupKind\.(?:Global|Local)\b/,/\bExtraValues\.(?:List|Ignore|Error)\b/,/\bJoinAlgorithm\.(?:Dynamic|PairwiseHash|SortMerge|LeftHash|RightHash|LeftIndex|RightIndex)\b/,/\bJoinSide\.(?:Left|Right)\b/,/\bPrecision\.(?:Double|Decimal)\b/,/\bRelativePosition\.From(?:End|Start)\b/,/\bTextEncoding\.(?:Ascii|BigEndianUnicode|Unicode|Utf8|Utf16|Windows)\b/,/\b(?:Any|Binary|Date|DateTime|DateTimeZone|Duration|Int8|Int16|Int32|Int64|Function|List|Logical|None|Number|Record|Table|Text|Time)\.Type\b/,/\bnull\b/],boolean:/\b(?:true|false)\b/,keyword:/\b(?:and|as|each|else|error|if|in|is|let|meta|not|nullable|optional|or|otherwise|section|shared|then|try|type)\b|#(?:binary|date|datetime|datetimezone|duration|infinity|nan|sections|shared|table|time)\b/,function:{pattern:/(^|[^#\w.])(?!\d)[\w.]+(?=\s*\()/,lookbehind:!0},"data-type":{pattern:/\b(?:any|anynonnull|binary|date|datetime|datetimezone|duration|function|list|logical|none|number|record|table|text|time|type)\b/,alias:"variable"},number:{pattern:/\b0x[\da-f]+\b|(?:[+-]?(?:\b\d+\.)?\b\d+|[+-]\.\d+|(^|[^.])\B\.\d+)(?:e[+-]?\d+)?\b/i,lookbehind:!0},operator:/[-+*\/&?@^]|<(?:=>?|>)?|>=?|=>?|\.\.\.?/,punctuation:/[,;\[\](){}]/},e.languages.pq=e.languages.powerquery,e.languages.mscript=e.languages.powerquery}e.exports=t,t.displayName="powerquery",t.aliases=[]},61777(e){"use strict";function t(e){var t,n,r;(r=(n=(t=e).languages.powershell={comment:[{pattern:/(^|[^`])<#[\s\S]*?#>/,lookbehind:!0},{pattern:/(^|[^`])#.*/,lookbehind:!0}],string:[{pattern:/"(?:`[\s\S]|[^`"])*"/,greedy:!0,inside:{function:{pattern:/(^|[^`])\$\((?:\$\([^\r\n()]*\)|(?!\$\()[^\r\n)])*\)/,lookbehind:!0,inside:{}}}},{pattern:/'(?:[^']|'')*'/,greedy:!0}],namespace:/\[[a-z](?:\[(?:\[[^\]]*\]|[^\[\]])*\]|[^\[\]])*\]/i,boolean:/\$(?:true|false)\b/i,variable:/\$\w+\b/,function:[/\b(?:Add|Approve|Assert|Backup|Block|Checkpoint|Clear|Close|Compare|Complete|Compress|Confirm|Connect|Convert|ConvertFrom|ConvertTo|Copy|Debug|Deny|Disable|Disconnect|Dismount|Edit|Enable|Enter|Exit|Expand|Export|Find|ForEach|Format|Get|Grant|Group|Hide|Import|Initialize|Install|Invoke|Join|Limit|Lock|Measure|Merge|Move|New|Open|Optimize|Out|Ping|Pop|Protect|Publish|Push|Read|Receive|Redo|Register|Remove|Rename|Repair|Request|Reset|Resize|Resolve|Restart|Restore|Resume|Revoke|Save|Search|Select|Send|Set|Show|Skip|Sort|Split|Start|Step|Stop|Submit|Suspend|Switch|Sync|Tee|Test|Trace|Unblock|Undo|Uninstall|Unlock|Unprotect|Unpublish|Unregister|Update|Use|Wait|Watch|Where|Write)-[a-z]+\b/i,/\b(?:ac|cat|chdir|clc|cli|clp|clv|compare|copy|cp|cpi|cpp|cvpa|dbp|del|diff|dir|ebp|echo|epal|epcsv|epsn|erase|fc|fl|ft|fw|gal|gbp|gc|gci|gcs|gdr|gi|gl|gm|gp|gps|group|gsv|gu|gv|gwmi|iex|ii|ipal|ipcsv|ipsn|irm|iwmi|iwr|kill|lp|ls|measure|mi|mount|move|mp|mv|nal|ndr|ni|nv|ogv|popd|ps|pushd|pwd|rbp|rd|rdr|ren|ri|rm|rmdir|rni|rnp|rp|rv|rvpa|rwmi|sal|saps|sasv|sbp|sc|select|set|shcm|si|sl|sleep|sls|sort|sp|spps|spsv|start|sv|swmi|tee|trcm|type|write)\b/i],keyword:/\b(?:Begin|Break|Catch|Class|Continue|Data|Define|Do|DynamicParam|Else|ElseIf|End|Exit|Filter|Finally|For|ForEach|From|Function|If|InlineScript|Parallel|Param|Process|Return|Sequence|Switch|Throw|Trap|Try|Until|Using|Var|While|Workflow)\b/i,operator:{pattern:/(\W?)(?:!|-(?:eq|ne|gt|ge|lt|le|sh[lr]|not|b?(?:and|x?or)|(?:Not)?(?:Like|Match|Contains|In)|Replace|Join|is(?:Not)?|as)\b|-[-=]?|\+[+=]?|[*\/%]=?)/i,lookbehind:!0},punctuation:/[|{}[\];(),.]/}).string[0].inside).boolean=n.boolean,r.variable=n.variable,r.function.inside=n}e.exports=t,t.displayName="powershell",t.aliases=[]},3623(e){"use strict";function t(e){e.languages.processing=e.languages.extend("clike",{keyword:/\b(?:break|catch|case|class|continue|default|else|extends|final|for|if|implements|import|new|null|private|public|return|static|super|switch|this|try|void|while)\b/,operator:/<[<=]?|>[>=]?|&&?|\|\|?|[%?]|[!=+\-*\/]=?/}),e.languages.insertBefore("processing","number",{constant:/\b(?!XML\b)[A-Z][A-Z\d_]+\b/,type:{pattern:/\b(?:boolean|byte|char|color|double|float|int|[A-Z]\w*)\b/,alias:"variable"}}),e.languages.processing.function=/\b\w+(?=\s*\()/,e.languages.processing["class-name"].alias="variable"}e.exports=t,t.displayName="processing",t.aliases=[]},82707(e){"use strict";function t(e){e.languages.prolog={comment:[/%.+/,/\/\*[\s\S]*?\*\//],string:{pattern:/(["'])(?:\1\1|\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},builtin:/\b(?:fx|fy|xf[xy]?|yfx?)\b/,variable:/\b[A-Z_]\w*/,function:/\b[a-z]\w*(?:(?=\()|\/\d+)/,number:/\b\d+(?:\.\d*)?/,operator:/[:\\=><\-?*@\/;+^|!$.]+|\b(?:is|mod|not|xor)\b/,punctuation:/[(){}\[\],]/}}e.exports=t,t.displayName="prolog",t.aliases=[]},59338(e){"use strict";function t(e){var t,n,r;t=e,r=["sum","min","max","avg","group","stddev","stdvar","count","count_values","bottomk","topk","quantile"].concat(n=["on","ignoring","group_right","group_left","by","without"],["offset"]),t.languages.promql={comment:{pattern:/(^[ \t]*)#.*/m,lookbehind:!0},"vector-match":{pattern:RegExp("((?:"+n.join("|")+")\\s*)\\([^)]*\\)"),lookbehind:!0,inside:{"label-key":{pattern:/\b[^,]*\b/,alias:"attr-name"},punctuation:/[(),]/}},"context-labels":{pattern:/\{[^{}]*\}/,inside:{"label-key":{pattern:/\b[a-z_]\w*(?=\s*(?:=|![=~]))/,alias:"attr-name"},"label-value":{pattern:/(["'`])(?:\\[\s\S]|(?!\1)[^\\])*\1/,greedy:!0,alias:"attr-value"},punctuation:/\{|\}|=~?|![=~]|,/}},"context-range":[{pattern:/\[[\w\s:]+\]/,inside:{punctuation:/\[|\]|:/,"range-duration":{pattern:/\b(?:\d+(?:[smhdwy]|ms))+\b/i,alias:"number"}}},{pattern:/(\boffset\s+)\w+/,lookbehind:!0,inside:{"range-duration":{pattern:/\b(?:\d+(?:[smhdwy]|ms))+\b/i,alias:"number"}}}],keyword:RegExp("\\b(?:"+r.join("|")+")\\b","i"),function:/\b[a-z_]\w*(?=\s*\()/i,number:/[-+]?(?:(?:\b\d+(?:\.\d+)?|\B\.\d+)(?:e[-+]?\d+)?\b|\b(?:0x[0-9a-f]+|nan|inf)\b)/i,operator:/[\^*/%+-]|==|!=|<=|<|>=|>|\b(?:and|unless|or)\b/i,punctuation:/[{};()`,.[\]]/}}e.exports=t,t.displayName="promql",t.aliases=[]},56267(e){"use strict";function t(e){e.languages.properties={comment:/^[ \t]*[#!].*$/m,"attr-value":{pattern:/(^[ \t]*(?:\\(?:\r\n|[\s\S])|[^\\\s:=])+(?: *[=:] *(?! )| ))(?:\\(?:\r\n|[\s\S])|[^\\\r\n])+/m,lookbehind:!0},"attr-name":/^[ \t]*(?:\\(?:\r\n|[\s\S])|[^\\\s:=])+(?= *[=:]| )/m,punctuation:/[=:]/}}e.exports=t,t.displayName="properties",t.aliases=[]},98809(e){"use strict";function t(e){var t,n;n=/\b(?:double|float|[su]?int(?:32|64)|s?fixed(?:32|64)|bool|string|bytes)\b/,(t=e).languages.protobuf=t.languages.extend("clike",{"class-name":[{pattern:/(\b(?:enum|extend|message|service)\s+)[A-Za-z_]\w*(?=\s*\{)/,lookbehind:!0},{pattern:/(\b(?:rpc\s+\w+|returns)\s*\(\s*(?:stream\s+)?)\.?[A-Za-z_]\w*(?:\.[A-Za-z_]\w*)*(?=\s*\))/,lookbehind:!0}],keyword:/\b(?:enum|extend|extensions|import|message|oneof|option|optional|package|public|repeated|required|reserved|returns|rpc(?=\s+\w)|service|stream|syntax|to)\b(?!\s*=\s*\d)/,function:/\b[a-z_]\w*(?=\s*\()/i}),t.languages.insertBefore("protobuf","operator",{map:{pattern:/\bmap<\s*[\w.]+\s*,\s*[\w.]+\s*>(?=\s+[a-z_]\w*\s*[=;])/i,alias:"class-name",inside:{punctuation:/[<>.,]/,builtin:n}},builtin:n,"positional-class-name":{pattern:/(?:\b|\B\.)[a-z_]\w*(?:\.[a-z_]\w*)*(?=\s+[a-z_]\w*\s*[=;])/i,alias:"class-name",inside:{punctuation:/\./}},annotation:{pattern:/(\[\s*)[a-z_]\w*(?=\s*=)/i,lookbehind:!0}})}e.exports=t,t.displayName="protobuf",t.aliases=[]},37548(e){"use strict";function t(e){e.languages.psl={comment:{pattern:/#.*/,greedy:!0},string:{pattern:/"(?:\\.|[^\\"])*"/,greedy:!0,inside:{symbol:/\\[ntrbA-Z"\\]/}},"heredoc-string":{pattern:/<<<([a-zA-Z_]\w*)[\r\n](?:.*[\r\n])*?\1\b/,alias:"string",greedy:!0},keyword:/\b(?:__multi|__single|case|default|do|else|elsif|exit|export|for|foreach|function|if|last|line|local|next|requires|return|switch|until|while|word)\b/,constant:/\b(?:ALARM|CHART_ADD_GRAPH|CHART_DELETE_GRAPH|CHART_DESTROY|CHART_LOAD|CHART_PRINT|EOF|FALSE|False|false|NO|No|no|OFFLINE|OK|PSL_PROF_LOG|R_CHECK_HORIZ|R_CHECK_VERT|R_CLICKER|R_COLUMN|R_FRAME|R_ICON|R_LABEL|R_LABEL_CENTER|R_LIST_MULTIPLE|R_LIST_MULTIPLE_ND|R_LIST_SINGLE|R_LIST_SINGLE_ND|R_MENU|R_POPUP|R_POPUP_SCROLLED|R_RADIO_HORIZ|R_RADIO_VERT|R_ROW|R_SCALE_HORIZ|R_SCALE_VERT|R_SPINNER|R_TEXT_FIELD|R_TEXT_FIELD_LABEL|R_TOGGLE|TRIM_LEADING|TRIM_LEADING_AND_TRAILING|TRIM_REDUNDANT|TRIM_TRAILING|TRUE|True|true|VOID|WARN)\b/,variable:/\b(?:errno|exit_status|PslDebug)\b/,builtin:{pattern:/\b(?:acos|add_diary|annotate|annotate_get|asctime|asin|atan|atexit|ascii_to_ebcdic|batch_set|blackout|cat|ceil|chan_exists|change_state|close|code_cvt|cond_signal|cond_wait|console_type|convert_base|convert_date|convert_locale_date|cos|cosh|create|destroy_lock|dump_hist|date|destroy|difference|dget_text|dcget_text|ebcdic_to_ascii|encrypt|event_archive|event_catalog_get|event_check|event_query|event_range_manage|event_range_query|event_report|event_schedule|event_trigger|event_trigger2|execute|exists|exp|fabs|floor|fmod|full_discovery|file|fopen|ftell|fseek|grep|get_vars|getenv|get|get_chan_info|get_ranges|get_text|gethostinfo|getpid|getpname|history_get_retention|history|index|int|is_var|intersection|isnumber|internal|in_transition|join|kill|length|lines|lock|lock_info|log|loge|log10|matchline|msg_check|msg_get_format|msg_get_severity|msg_printf|msg_sprintf|ntharg|num_consoles|nthargf|nthline|nthlinef|num_bytes|print|proc_exists|process|popen|printf|pconfig|poplines|pow|PslExecute|PslFunctionCall|PslFunctionExists|PslSetOptions|random|read|readln|refresh_parameters|remote_check|remote_close|remote_event_query|remote_event_trigger|remote_file_send|remote_open|remove|replace|rindex|sec_check_priv|sec_store_get|sec_store_set|set_alarm_ranges|set_locale|share|sin|sinh|sleep|sopen|sqrt|srandom|subset|set|substr|system|sprintf|sort|snmp_agent_config|_snmp_debug|snmp_agent_stop|snmp_agent_start|snmp_h_set|snmp_h_get_next|snmp_h_get|snmp_set|snmp_walk|snmp_get_next|snmp_get|snmp_config|snmp_close|snmp_open|snmp_trap_receive|snmp_trap_ignore|snmp_trap_listen|snmp_trap_send|snmp_trap_raise_std_trap|snmp_trap_register_im|splitline|strcasecmp|str_repeat|trim|tail|tan|tanh|time|tmpnam|tolower|toupper|trace_psl_process|text_domain|unlock|unique|union|unset|va_arg|va_start|write)\b/,alias:"builtin-function"},"foreach-variable":{pattern:/(\bforeach\s+(?:(?:\w+\b|"(?:\\.|[^\\"])*")\s+){0,2})[_a-zA-Z]\w*(?=\s*\()/,lookbehind:!0,greedy:!0},function:{pattern:/\b[_a-z]\w*\b(?=\s*\()/i},number:/\b(?:0x[0-9a-f]+|[0-9]+(?:\.[0-9]+)?)\b/i,operator:/--|\+\+|&&=?|\|\|=?|<<=?|>>=?|[=!]~|[-+*/%&|^!=<>]=?|\.|[:?]/,punctuation:/[(){}\[\];,]/}}e.exports=t,t.displayName="psl",t.aliases=[]},82161(e){"use strict";function t(e){!function(e){e.languages.pug={comment:{pattern:/(^([\t ]*))\/\/.*(?:(?:\r?\n|\r)\2[\t ].+)*/m,lookbehind:!0},"multiline-script":{pattern:/(^([\t ]*)script\b.*\.[\t ]*)(?:(?:\r?\n|\r(?!\n))(?:\2[\t ].+|\s*?(?=\r?\n|\r)))+/m,lookbehind:!0,inside:e.languages.javascript},filter:{pattern:/(^([\t ]*)):.+(?:(?:\r?\n|\r(?!\n))(?:\2[\t ].+|\s*?(?=\r?\n|\r)))+/m,lookbehind:!0,inside:{"filter-name":{pattern:/^:[\w-]+/,alias:"variable"}}},"multiline-plain-text":{pattern:/(^([\t ]*)[\w\-#.]+\.[\t ]*)(?:(?:\r?\n|\r(?!\n))(?:\2[\t ].+|\s*?(?=\r?\n|\r)))+/m,lookbehind:!0},markup:{pattern:/(^[\t ]*)<.+/m,lookbehind:!0,inside:e.languages.markup},doctype:{pattern:/((?:^|\n)[\t ]*)doctype(?: .+)?/,lookbehind:!0},"flow-control":{pattern:/(^[\t ]*)(?:if|unless|else|case|when|default|each|while)\b(?: .+)?/m,lookbehind:!0,inside:{each:{pattern:/^each .+? in\b/,inside:{keyword:/\b(?:each|in)\b/,punctuation:/,/}},branch:{pattern:/^(?:if|unless|else|case|when|default|while)\b/,alias:"keyword"},rest:e.languages.javascript}},keyword:{pattern:/(^[\t ]*)(?:block|extends|include|append|prepend)\b.+/m,lookbehind:!0},mixin:[{pattern:/(^[\t ]*)mixin .+/m,lookbehind:!0,inside:{keyword:/^mixin/,function:/\w+(?=\s*\(|\s*$)/,punctuation:/[(),.]/}},{pattern:/(^[\t ]*)\+.+/m,lookbehind:!0,inside:{name:{pattern:/^\+\w+/,alias:"function"},rest:e.languages.javascript}}],script:{pattern:/(^[\t ]*script(?:(?:&[^(]+)?\([^)]+\))*[\t ]).+/m,lookbehind:!0,inside:e.languages.javascript},"plain-text":{pattern:/(^[\t ]*(?!-)[\w\-#.]*[\w\-](?:(?:&[^(]+)?\([^)]+\))*\/?[\t ]).+/m,lookbehind:!0},tag:{pattern:/(^[\t ]*)(?!-)[\w\-#.]*[\w\-](?:(?:&[^(]+)?\([^)]+\))*\/?:?/m,lookbehind:!0,inside:{attributes:[{pattern:/&[^(]+\([^)]+\)/,inside:e.languages.javascript},{pattern:/\([^)]+\)/,inside:{"attr-value":{pattern:/(=\s*(?!\s))(?:\{[^}]*\}|[^,)\r\n]+)/,lookbehind:!0,inside:e.languages.javascript},"attr-name":/[\w-]+(?=\s*!?=|\s*[,)])/,punctuation:/[!=(),]+/}}],punctuation:/:/,"attr-id":/#[\w\-]+/,"attr-class":/\.[\w\-]+/}},code:[{pattern:/(^[\t ]*(?:-|!?=)).+/m,lookbehind:!0,inside:e.languages.javascript}],punctuation:/[.\-!=|]+/};for(var t=/(^([\t ]*)):(?:(?:\r?\n|\r(?!\n))(?:\2[\t ].+|\s*?(?=\r?\n|\r)))+/.source,n=[{filter:"atpl",language:"twig"},{filter:"coffee",language:"coffeescript"},"ejs","handlebars","less","livescript","markdown",{filter:"sass",language:"scss"},"stylus"],r={},i=0,a=n.length;i",function(){return o.filter}),"m"),lookbehind:!0,inside:{"filter-name":{pattern:/^:[\w-]+/,alias:"variable"},rest:e.languages[o.language]}})}e.languages.insertBefore("pug","filter",r)}(e)}e.exports=t,t.displayName="pug",t.aliases=[]},80625(e){"use strict";function t(e){var t,n;(t=e).languages.puppet={heredoc:[{pattern:/(@\("([^"\r\n\/):]+)"(?:\/[nrts$uL]*)?\).*(?:\r?\n|\r))(?:.*(?:\r?\n|\r(?!\n)))*?[ \t]*(?:\|[ \t]*)?(?:-[ \t]*)?\2/,lookbehind:!0,alias:"string",inside:{punctuation:/(?=\S).*\S(?= *$)/}},{pattern:/(@\(([^"\r\n\/):]+)(?:\/[nrts$uL]*)?\).*(?:\r?\n|\r))(?:.*(?:\r?\n|\r(?!\n)))*?[ \t]*(?:\|[ \t]*)?(?:-[ \t]*)?\2/,lookbehind:!0,greedy:!0,alias:"string",inside:{punctuation:/(?=\S).*\S(?= *$)/}},{pattern:/@\("?(?:[^"\r\n\/):]+)"?(?:\/[nrts$uL]*)?\)/,alias:"string",inside:{punctuation:{pattern:/(\().+?(?=\))/,lookbehind:!0}}}],"multiline-comment":{pattern:/(^|[^\\])\/\*[\s\S]*?\*\//,lookbehind:!0,greedy:!0,alias:"comment"},regex:{pattern:/((?:\bnode\s+|[~=\(\[\{,]\s*|[=+]>\s*|^\s*))\/(?:[^\/\\]|\\[\s\S])+\/(?:[imx]+\b|\B)/,lookbehind:!0,greedy:!0,inside:{"extended-regex":{pattern:/^\/(?:[^\/\\]|\\[\s\S])+\/[im]*x[im]*$/,inside:{comment:/#.*/}}}},comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},string:{pattern:/(["'])(?:\$\{(?:[^'"}]|(["'])(?:(?!\2)[^\\]|\\[\s\S])*\2)+\}|\$(?!\{)|(?!\1)[^\\$]|\\[\s\S])*\1/,greedy:!0,inside:{"double-quoted":{pattern:/^"[\s\S]*"$/,inside:{}}}},variable:{pattern:/\$(?:::)?\w+(?:::\w+)*/,inside:{punctuation:/::/}},"attr-name":/(?:\b\w+|\*)(?=\s*=>)/,function:[{pattern:/(\.)(?!\d)\w+/,lookbehind:!0},/\b(?:contain|debug|err|fail|include|info|notice|realize|require|tag|warning)\b|\b(?!\d)\w+(?=\()/],number:/\b(?:0x[a-f\d]+|\d+(?:\.\d+)?(?:e-?\d+)?)\b/i,boolean:/\b(?:true|false)\b/,keyword:/\b(?:application|attr|case|class|consumes|default|define|else|elsif|function|if|import|inherits|node|private|produces|type|undef|unless)\b/,datatype:{pattern:/\b(?:Any|Array|Boolean|Callable|Catalogentry|Class|Collection|Data|Default|Enum|Float|Hash|Integer|NotUndef|Numeric|Optional|Pattern|Regexp|Resource|Runtime|Scalar|String|Struct|Tuple|Type|Undef|Variant)\b/,alias:"symbol"},operator:/=[=~>]?|![=~]?|<(?:<\|?|[=~|-])?|>[>=]?|->?|~>|\|>?>?|[*\/%+?]|\b(?:and|in|or)\b/,punctuation:/[\[\]{}().,;]|:+/},n=[{pattern:/(^|[^\\])\$\{(?:[^'"{}]|\{[^}]*\}|(["'])(?:(?!\2)[^\\]|\\[\s\S])*\2)+\}/,lookbehind:!0,inside:{"short-variable":{pattern:/(^\$\{)(?!\w+\()(?:::)?\w+(?:::\w+)*/,lookbehind:!0,alias:"variable",inside:{punctuation:/::/}},delimiter:{pattern:/^\$/,alias:"variable"},rest:t.languages.puppet}},{pattern:/(^|[^\\])\$(?:::)?\w+(?:::\w+)*/,lookbehind:!0,alias:"variable",inside:{punctuation:/::/}}],t.languages.puppet.heredoc[0].inside.interpolation=n,t.languages.puppet.string.inside["double-quoted"].inside.interpolation=n}e.exports=t,t.displayName="puppet",t.aliases=[]},88393(e){"use strict";function t(e){var t,n,r;(t=e).languages.pure={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?\*\//,lookbehind:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0},/#!.+/],"inline-lang":{pattern:/%<[\s\S]+?%>/,greedy:!0,inside:{lang:{pattern:/(^%< *)-\*-.+?-\*-/,lookbehind:!0,alias:"comment"},delimiter:{pattern:/^%<.*|%>$/,alias:"punctuation"}}},string:{pattern:/"(?:\\.|[^"\\\r\n])*"/,greedy:!0},number:{pattern:/((?:\.\.)?)(?:\b(?:inf|nan)\b|\b0x[\da-f]+|(?:\b(?:0b)?\d+(?:\.\d+)?|\B\.\d+)(?:e[+-]?\d+)?L?)/i,lookbehind:!0},keyword:/\b(?:ans|break|bt|case|catch|cd|clear|const|def|del|dump|else|end|exit|extern|false|force|help|if|infix[lr]?|interface|let|ls|mem|namespace|nonfix|NULL|of|otherwise|outfix|override|postfix|prefix|private|public|pwd|quit|run|save|show|stats|then|throw|trace|true|type|underride|using|when|with)\b/,function:/\b(?:abs|add_(?:(?:fundef|interface|macdef|typedef)(?:_at)?|addr|constdef|vardef)|all|any|applp?|arity|bigintp?|blob(?:_crc|_size|p)?|boolp?|byte_(?:matrix|pointer)|byte_c?string(?:_pointer)?|calloc|cat|catmap|ceil|char[ps]?|check_ptrtag|chr|clear_sentry|clearsym|closurep?|cmatrixp?|cols?|colcat(?:map)?|colmap|colrev|colvector(?:p|seq)?|complex(?:_float_(?:matrix|pointer)|_matrix(?:_view)?|_pointer|p)?|conj|cookedp?|cst|cstring(?:_(?:dup|list|vector))?|curry3?|cyclen?|del_(?:constdef|fundef|interface|macdef|typedef|vardef)|delete|diag(?:mat)?|dim|dmatrixp?|do|double(?:_matrix(?:_view)?|_pointer|p)?|dowith3?|drop|dropwhile|eval(?:cmd)?|exactp|filter|fix|fixity|flip|float(?:_matrix|_pointer)|floor|fold[lr]1?|frac|free|funp?|functionp?|gcd|get(?:_(?:byte|constdef|double|float|fundef|int(?:64)?|interface(?:_typedef)?|long|macdef|pointer|ptrtag|short|sentry|string|typedef|vardef))?|globsym|hash|head|id|im|imatrixp?|index|inexactp|infp|init|insert|int(?:_matrix(?:_view)?|_pointer|p)?|int64_(?:matrix|pointer)|integerp?|iteraten?|iterwhile|join|keys?|lambdap?|last(?:err(?:pos)?)?|lcd|list[2p]?|listmap|make_ptrtag|malloc|map|matcat|matrixp?|max|member|min|nanp|nargs|nmatrixp?|null|numberp?|ord|pack(?:ed)?|pointer(?:_cast|_tag|_type|p)?|pow|pred|ptrtag|put(?:_(?:byte|double|float|int(?:64)?|long|pointer|short|string))?|rationalp?|re|realp?|realloc|recordp?|redim|reduce(?:_with)?|refp?|repeatn?|reverse|rlistp?|round|rows?|rowcat(?:map)?|rowmap|rowrev|rowvector(?:p|seq)?|same|scan[lr]1?|sentry|sgn|short_(?:matrix|pointer)|slice|smatrixp?|sort|split|str|strcat|stream|stride|string(?:_(?:dup|list|vector)|p)?|subdiag(?:mat)?|submat|subseq2?|substr|succ|supdiag(?:mat)?|symbolp?|tail|take|takewhile|thunkp?|transpose|trunc|tuplep?|typep|ubyte|uint(?:64)?|ulong|uncurry3?|unref|unzip3?|update|ushort|vals?|varp?|vector(?:p|seq)?|void|zip3?|zipwith3?)\b/,special:{pattern:/\b__[a-z]+__\b/i,alias:"builtin"},operator:/(?:[!"#$%&'*+,\-.\/:<=>?@\\^`|~\u00a1-\u00bf\u00d7-\u00f7\u20d0-\u2bff]|\b_+\b)+|\b(?:and|div|mod|not|or)\b/,punctuation:/[(){}\[\];,|]/},r=/%< *-\*- *\d* *-\*-[\s\S]+?%>/.source,(n=["c",{lang:"c++",alias:"cpp"},"fortran"]).forEach(function(e){var n=e;if("string"!=typeof e&&(n=e.alias,e=e.lang),t.languages[n]){var i={};i["inline-lang-"+n]={pattern:RegExp(r.replace("",e.replace(/([.+*?\/\\(){}\[\]])/g,"\\$1")),"i"),inside:t.util.clone(t.languages.pure["inline-lang"].inside)},i["inline-lang-"+n].inside.rest=t.util.clone(t.languages[n]),t.languages.insertBefore("pure","inline-lang",i)}}),t.languages.c&&(t.languages.pure["inline-lang"].inside.rest=t.util.clone(t.languages.c))}e.exports=t,t.displayName="pure",t.aliases=[]},78404(e){"use strict";function t(e){e.languages.purebasic=e.languages.extend("clike",{comment:/;.*/,keyword:/\b(?:declarecdll|declaredll|compilerselect|compilercase|compilerdefault|compilerendselect|compilererror|enableexplicit|disableexplicit|not|and|or|xor|calldebugger|debuglevel|enabledebugger|disabledebugger|restore|read|includepath|includebinary|threaded|runtime|with|endwith|structureunion|endstructureunion|align|newlist|newmap|interface|endinterface|extends|enumeration|endenumeration|swap|foreach|continue|fakereturn|goto|gosub|return|break|module|endmodule|declaremodule|enddeclaremodule|declare|declarec|prototype|prototypec|enableasm|disableasm|dim|redim|data|datasection|enddatasection|to|procedurereturn|debug|default|case|select|endselect|as|import|endimport|importc|compilerif|compilerelse|compilerendif|compilerelseif|end|structure|endstructure|while|wend|for|next|step|if|else|elseif|endif|repeat|until|procedure|proceduredll|procedurec|procedurecdll|endprocedure|protected|shared|static|global|define|includefile|xincludefile|macro|endmacro)\b/i,function:/\b\w+(?:\.\w+)?\s*(?=\()/,number:/(?:\$[\da-f]+|\b-?(?:\d+(?:\.\d+)?|\.\d+)(?:e[+-]?\d+)?)\b/i,operator:/(?:@\*?|\?|\*)\w+|-[>-]?|\+\+?|!=?|<>?=?|==?|&&?|\|?\||[~^%?*/@]/}),e.languages.insertBefore("purebasic","keyword",{tag:/#\w+/,asm:{pattern:/(^[\t ]*)!.*/m,lookbehind:!0,alias:"tag",inside:{comment:/;.*/,string:{pattern:/(["'`])(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},"label-reference-anonymous":{pattern:/(!\s*j[a-z]+\s+)@[fb]/i,lookbehind:!0,alias:"fasm-label"},"label-reference-addressed":{pattern:/(!\s*j[a-z]+\s+)[A-Z._?$@][\w.?$@~#]*/i,lookbehind:!0,alias:"fasm-label"},function:{pattern:/^([\t ]*!\s*)[\da-z]+(?=\s|$)/im,lookbehind:!0},"function-inline":{pattern:/(:\s*)[\da-z]+(?=\s)/i,lookbehind:!0,alias:"function"},label:{pattern:/^([\t ]*!\s*)[A-Za-z._?$@][\w.?$@~#]*(?=:)/m,lookbehind:!0,alias:"fasm-label"},keyword:[/\b(?:extern|global)\b[^;\r\n]*/i,/\b(?:CPU|FLOAT|DEFAULT)\b.*/],register:/\b(?:st\d|[xyz]mm\d\d?|[cdt]r\d|r\d\d?[bwd]?|[er]?[abcd]x|[abcd][hl]|[er]?(?:bp|sp|si|di)|[cdefgs]s|mm\d+)\b/i,number:/(?:\b|-|(?=\$))(?:0[hx](?:[\da-f]*\.)?[\da-f]+(?:p[+-]?\d+)?|\d[\da-f]+[hx]|\$\d[\da-f]*|0[oq][0-7]+|[0-7]+[oq]|0[by][01]+|[01]+[by]|0[dt]\d+|(?:\d+(?:\.\d+)?|\.\d+)(?:\.?e[+-]?\d+)?[dt]?)\b/i,operator:/[\[\]*+\-/%<>=&|$!,.:]/}}}),delete e.languages.purebasic["class-name"],delete e.languages.purebasic.boolean,e.languages.pbfasm=e.languages.purebasic}e.exports=t,t.displayName="purebasic",t.aliases=[]},92923(e,t,n){"use strict";var r=n(58090);function i(e){e.register(r),e.languages.purescript=e.languages.extend("haskell",{keyword:/\b(?:ado|case|class|data|derive|do|else|forall|if|in|infixl|infixr|instance|let|module|newtype|of|primitive|then|type|where)\b/,"import-statement":{pattern:/(^[\t ]*)import\s+[A-Z][\w']*(?:\.[A-Z][\w']*)*(?:\s+as\s+[A-Z][\w']*(?:\.[A-Z][\w']*)*)?(?:\s+hiding\b)?/m,lookbehind:!0,inside:{keyword:/\b(?:import|as|hiding)\b/}},builtin:/\b(?:absurd|add|ap|append|apply|between|bind|bottom|clamp|compare|comparing|compose|conj|const|degree|discard|disj|div|eq|flap|flip|gcd|identity|ifM|join|lcm|liftA1|liftM1|map|max|mempty|min|mod|mul|negate|not|notEq|one|otherwise|recip|show|sub|top|unit|unless|unlessM|void|when|whenM|zero)\b/}),e.languages.purs=e.languages.purescript}e.exports=i,i.displayName="purescript",i.aliases=["purs"]},52992(e){"use strict";function t(e){e.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0},"string-interpolation":{pattern:/(?:f|rf|fr)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|rb|br)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|rb|br)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/im,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:and|as|assert|async|await|break|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:True|False|None)\b/,number:/(?:\b(?=\d)|\B(?=\.))(?:0[bo])?(?:(?:\d|0x[\da-f])[\da-f]*(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?j?\b/i,operator:/[-+%=]=?|!=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},e.languages.python["string-interpolation"].inside.interpolation.inside.rest=e.languages.python,e.languages.py=e.languages.python}e.exports=t,t.displayName="python",t.aliases=["py"]},55762(e){"use strict";function t(e){e.languages.q={string:/"(?:\\.|[^"\\\r\n])*"/,comment:[{pattern:/([\t )\]}])\/.*/,lookbehind:!0,greedy:!0},{pattern:/(^|\r?\n|\r)\/[\t ]*(?:(?:\r?\n|\r)(?:.*(?:\r?\n|\r(?!\n)))*?(?:\\(?=[\t ]*(?:\r?\n|\r))|$)|\S.*)/,lookbehind:!0,greedy:!0},{pattern:/^\\[\t ]*(?:\r?\n|\r)[\s\S]+/m,greedy:!0},{pattern:/^#!.+/m,greedy:!0}],symbol:/`(?::\S+|[\w.]*)/,datetime:{pattern:/0N[mdzuvt]|0W[dtz]|\d{4}\.\d\d(?:m|\.\d\d(?:T(?:\d\d(?::\d\d(?::\d\d(?:[.:]\d\d\d)?)?)?)?)?[dz]?)|\d\d:\d\d(?::\d\d(?:[.:]\d\d\d)?)?[uvt]?/,alias:"number"},number:/\b(?![01]:)(?:0[wn]|0W[hj]?|0N[hje]?|0x[\da-fA-F]+|\d+(?:\.\d*)?(?:e[+-]?\d+)?[hjfeb]?)/,keyword:/\\\w+\b|\b(?:abs|acos|aj0?|all|and|any|asc|asin|asof|atan|attr|avgs?|binr?|by|ceiling|cols|cor|cos|count|cov|cross|csv|cut|delete|deltas|desc|dev|differ|distinct|div|do|dsave|ej|enlist|eval|except|exec|exit|exp|fby|fills|first|fkeys|flip|floor|from|get|getenv|group|gtime|hclose|hcount|hdel|hopen|hsym|iasc|identity|idesc|if|ij|in|insert|inter|inv|keys?|last|like|list|ljf?|load|log|lower|lsq|ltime|ltrim|mavg|maxs?|mcount|md5|mdev|med|meta|mins?|mmax|mmin|mmu|mod|msum|neg|next|not|null|or|over|parse|peach|pj|plist|prds?|prev|prior|rand|rank|ratios|raze|read0|read1|reciprocal|reval|reverse|rload|rotate|rsave|rtrim|save|scan|scov|sdev|select|set|setenv|show|signum|sin|sqrt|ssr?|string|sublist|sums?|sv|svar|system|tables|tan|til|trim|txf|type|uj|ungroup|union|update|upper|upsert|value|var|views?|vs|wavg|where|while|within|wj1?|wsum|ww|xasc|xbar|xcols?|xdesc|xexp|xgroup|xkey|xlog|xprev|xrank)\b/,adverb:{pattern:/['\/\\]:?|\beach\b/,alias:"function"},verb:{pattern:/(?:\B\.\B|\b[01]:|<[=>]?|>=?|[:+\-*%,!?~=|$&#@^]):?|\b_\b:?/,alias:"operator"},punctuation:/[(){}\[\];.]/}}e.exports=t,t.displayName="q",t.aliases=[]},4137(e){"use strict";function t(e){!function(e){for(var t=/"(?:\\.|[^\\"\r\n])*"|'(?:\\.|[^\\'\r\n])*'/.source,n=/\/\/.*(?!.)|\/\*(?:[^*]|\*(?!\/))*\*\//.source,r=/(?:[^\\()[\]{}"'/]||\/(?![*/])||\(*\)|\[*\]|\{*\}|\\[\s\S])/.source.replace(//g,function(){return t}).replace(//g,function(){return n}),i=0;i<2;i++)r=r.replace(//g,function(){return r});r=r.replace(//g,"[^\\s\\S]"),e.languages.qml={comment:{pattern:/\/\/.*|\/\*[\s\S]*?\*\//,greedy:!0},"javascript-function":{pattern:RegExp(/((?:^|;)[ \t]*)function\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*\(*\)\s*\{*\}/.source.replace(//g,function(){return r}),"m"),lookbehind:!0,greedy:!0,alias:"language-javascript",inside:e.languages.javascript},"class-name":{pattern:/((?:^|[:;])[ \t]*)(?!\d)\w+(?=[ \t]*\{|[ \t]+on\b)/m,lookbehind:!0},property:[{pattern:/((?:^|[;{])[ \t]*)(?!\d)\w+(?:\.\w+)*(?=[ \t]*:)/m,lookbehind:!0},{pattern:/((?:^|[;{])[ \t]*)property[ \t]+(?!\d)\w+(?:\.\w+)*[ \t]+(?!\d)\w+(?:\.\w+)*(?=[ \t]*:)/m,lookbehind:!0,inside:{keyword:/^property/,property:/\w+(?:\.\w+)*/}}],"javascript-expression":{pattern:RegExp(/(:[ \t]*)(?![\s;}[])(?:(?!$|[;}]))+/.source.replace(//g,function(){return r}),"m"),lookbehind:!0,greedy:!0,alias:"language-javascript",inside:e.languages.javascript},string:/"(?:\\.|[^\\"\r\n])*"/,keyword:/\b(?:as|import|on)\b/,punctuation:/[{}[\]:;,]/}}(e)}e.exports=t,t.displayName="qml",t.aliases=[]},28260(e){"use strict";function t(e){e.languages.qore=e.languages.extend("clike",{comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:\/\/|#).*)/,lookbehind:!0},string:{pattern:/("|')(?:\\[\s\S]|(?!\1)[^\\])*\1/,greedy:!0},keyword:/\b(?:abstract|any|assert|binary|bool|boolean|break|byte|case|catch|char|class|code|const|continue|data|default|do|double|else|enum|extends|final|finally|float|for|goto|hash|if|implements|import|inherits|instanceof|int|interface|long|my|native|new|nothing|null|object|our|own|private|reference|rethrow|return|short|soft(?:int|float|number|bool|string|date|list)|static|strictfp|string|sub|super|switch|synchronized|this|throw|throws|transient|try|void|volatile|while)\b/,boolean:/\b(?:true|false)\b/i,function:/\$?\b(?!\d)\w+(?=\()/,number:/\b(?:0b[01]+|0x(?:[\da-f]*\.)?[\da-fp\-]+|(?:\d+(?:\.\d+)?|\.\d+)(?:e\d+)?[df]|(?:\d+(?:\.\d+)?|\.\d+))\b/i,operator:{pattern:/(^|[^.])(?:\+[+=]?|-[-=]?|[!=](?:==?|~)?|>>?=?|<(?:=>?|<=?)?|&[&=]?|\|[|=]?|[*\/%^]=?|[~?])/,lookbehind:!0},variable:/\$(?!\d)\w+\b/})}e.exports=t,t.displayName="qore",t.aliases=[]},71360(e){"use strict";function t(e){!function(e){function t(e,t){return e.replace(/<<(\d+)>>/g,function(e,n){return"(?:"+t[+n]+")"})}function n(e,n,r){return RegExp(t(e,n),r||"")}function r(e,t){for(var n=0;n>/g,function(){return"(?:"+e+")"});return e.replace(/<>/g,"[^\\s\\S]")}var i={type:"Adj BigInt Bool Ctl Double false Int One Pauli PauliI PauliX PauliY PauliZ Qubit Range Result String true Unit Zero",other:"Adjoint adjoint apply as auto body borrow borrowing Controlled controlled distribute elif else fail fixup for function if in internal intrinsic invert is let mutable namespace new newtype open operation repeat return self set until use using while within"};function a(e){return"\\b(?:"+e.trim().replace(/ /g,"|")+")\\b"}var o=RegExp(a(i.type+" "+i.other)),s=/\b[A-Za-z_]\w*\b/.source,u=t(/<<0>>(?:\s*\.\s*<<0>>)*/.source,[s]),c={keyword:o,punctuation:/[<>()?,.:[\]]/},l=/"(?:\\.|[^\\"])*"/.source;e.languages.qsharp=e.languages.extend("clike",{comment:/\/\/.*/,string:[{pattern:n(/(^|[^$\\])<<0>>/.source,[l]),lookbehind:!0,greedy:!0}],"class-name":[{pattern:n(/(\b(?:as|open)\s+)<<0>>(?=\s*(?:;|as\b))/.source,[u]),lookbehind:!0,inside:c},{pattern:n(/(\bnamespace\s+)<<0>>(?=\s*\{)/.source,[u]),lookbehind:!0,inside:c}],keyword:o,number:/(?:\b0(?:x[\da-f]+|b[01]+|o[0-7]+)|(?:\B\.\d+|\b\d+(?:\.\d*)?)(?:e[-+]?\d+)?)l?\b/i,operator:/\band=|\bor=|\band\b|\bor\b|\bnot\b|<[-=]|[-=]>|>>>=?|<<<=?|\^\^\^=?|\|\|\|=?|&&&=?|w\/=?|~~~|[*\/+\-^=!%]=?/,punctuation:/::|[{}[\];(),.:]/}),e.languages.insertBefore("qsharp","number",{range:{pattern:/\.\./,alias:"operator"}});var f=r(t(/\{(?:[^"{}]|<<0>>|<>)*\}/.source,[l]),2);e.languages.insertBefore("qsharp","string",{"interpolation-string":{pattern:n(/\$"(?:\\.|<<0>>|[^\\"{])*"/.source,[f]),greedy:!0,inside:{interpolation:{pattern:n(/((?:^|[^\\])(?:\\\\)*)<<0>>/.source,[f]),lookbehind:!0,inside:{punctuation:/^\{|\}$/,expression:{pattern:/[\s\S]+/,alias:"language-qsharp",inside:e.languages.qsharp}}},string:/[\s\S]+/}}})}(e),e.languages.qs=e.languages.qsharp}e.exports=t,t.displayName="qsharp",t.aliases=["qs"]},29308(e){"use strict";function t(e){e.languages.r={comment:/#.*/,string:{pattern:/(['"])(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},"percent-operator":{pattern:/%[^%\s]*%/,alias:"operator"},boolean:/\b(?:TRUE|FALSE)\b/,ellipsis:/\.\.(?:\.|\d+)/,number:[/\b(?:NaN|Inf)\b/,/(?:\b0x[\dA-Fa-f]+(?:\.\d*)?|\b\d+(?:\.\d*)?|\B\.\d+)(?:[EePp][+-]?\d+)?[iL]?/],keyword:/\b(?:if|else|repeat|while|function|for|in|next|break|NULL|NA|NA_integer_|NA_real_|NA_complex_|NA_character_)\b/,operator:/->?>?|<(?:=|=!]=?|::?|&&?|\|\|?|[+*\/^$@~]/,punctuation:/[(){}\[\],;]/}}e.exports=t,t.displayName="r",t.aliases=[]},32168(e,t,n){"use strict";var r=n(9997);function i(e){e.register(r),e.languages.racket=e.languages.extend("scheme",{"lambda-parameter":{pattern:/([(\[]lambda\s+[(\[])[^()\[\]'\s]+/,lookbehind:!0}}),e.languages.insertBefore("racket","string",{lang:{pattern:/^#lang.+/m,greedy:!0,alias:"keyword"}}),e.languages.rkt=e.languages.racket}e.exports=i,i.displayName="racket",i.aliases=["rkt"]},5755(e){"use strict";function t(e){e.languages.reason=e.languages.extend("clike",{string:{pattern:/"(?:\\(?:\r\n|[\s\S])|[^\\\r\n"])*"/,greedy:!0},"class-name":/\b[A-Z]\w*/,keyword:/\b(?:and|as|assert|begin|class|constraint|do|done|downto|else|end|exception|external|for|fun|function|functor|if|in|include|inherit|initializer|lazy|let|method|module|mutable|new|nonrec|object|of|open|or|private|rec|sig|struct|switch|then|to|try|type|val|virtual|when|while|with)\b/,operator:/\.{3}|:[:=]|\|>|->|=(?:==?|>)?|<=?|>=?|[|^?'#!~`]|[+\-*\/]\.?|\b(?:mod|land|lor|lxor|lsl|lsr|asr)\b/}),e.languages.insertBefore("reason","class-name",{character:{pattern:/'(?:\\x[\da-f]{2}|\\o[0-3][0-7][0-7]|\\\d{3}|\\.|[^'\\\r\n])'/,alias:"string"},constructor:{pattern:/\b[A-Z]\w*\b(?!\s*\.)/,alias:"variable"},label:{pattern:/\b[a-z]\w*(?=::)/,alias:"symbol"}}),delete e.languages.reason.function}e.exports=t,t.displayName="reason",t.aliases=[]},54105(e){"use strict";function t(e){var t,n,r,i,a,o,s,u;t=e,n={pattern:/\\[\\(){}[\]^$+*?|.]/,alias:"escape"},i={pattern:/\.|\\[wsd]|\\p\{[^{}]+\}/i,alias:"class-name"},a={pattern:/\\[wsd]|\\p\{[^{}]+\}/i,alias:"class-name"},s=RegExp((o="(?:[^\\\\-]|"+(r=/\\(?:x[\da-fA-F]{2}|u[\da-fA-F]{4}|u\{[\da-fA-F]+\}|c[a-zA-Z]|0[0-7]{0,2}|[123][0-7]{2}|.)/).source+")")+"-"+o),u={pattern:/(<|')[^<>']+(?=[>']$)/,lookbehind:!0,alias:"variable"},t.languages.regex={charset:{pattern:/((?:^|[^\\])(?:\\\\)*)\[(?:[^\\\]]|\\[\s\S])*\]/,lookbehind:!0,inside:{"charset-negation":{pattern:/(^\[)\^/,lookbehind:!0,alias:"operator"},"charset-punctuation":{pattern:/^\[|\]$/,alias:"punctuation"},range:{pattern:s,inside:{escape:r,"range-punctuation":{pattern:/-/,alias:"operator"}}},"special-escape":n,charclass:a,escape:r}},"special-escape":n,charclass:i,backreference:[{pattern:/\\(?![123][0-7]{2})[1-9]/,alias:"keyword"},{pattern:/\\k<[^<>']+>/,alias:"keyword",inside:{"group-name":u}}],anchor:{pattern:/[$^]|\\[ABbGZz]/,alias:"function"},escape:r,group:[{pattern:/\((?:\?(?:<[^<>']+>|'[^<>']+'|[>:]|:=]=?|!=|\b_\b/,punctuation:/[,;.\[\]{}()]/}}e.exports=t,t.displayName="rego",t.aliases=[]},35108(e){"use strict";function t(e){e.languages.renpy={comment:{pattern:/(^|[^\\])#.+/,lookbehind:!0},string:{pattern:/("""|''')[\s\S]+?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2|(?:^#?(?:[0-9a-fA-F]{6}|(?:[0-9a-fA-F]){3})$)/m,greedy:!0},function:/\b[a-z_]\w*(?=\()/i,property:/\b(?:insensitive|idle|hover|selected_idle|selected_hover|background|position|alt|xpos|ypos|pos|xanchor|yanchor|anchor|xalign|yalign|align|xcenter|ycenter|xofsset|yoffset|ymaximum|maximum|xmaximum|xminimum|yminimum|minimum|xsize|ysizexysize|xfill|yfill|area|antialias|black_color|bold|caret|color|first_indent|font|size|italic|justify|kerning|language|layout|line_leading|line_overlap_split|line_spacing|min_width|newline_indent|outlines|rest_indent|ruby_style|slow_cps|slow_cps_multiplier|strikethrough|text_align|underline|hyperlink_functions|vertical|hinting|foreground|left_margin|xmargin|top_margin|bottom_margin|ymargin|left_padding|right_padding|xpadding|top_padding|bottom_padding|ypadding|size_group|child|hover_sound|activate_sound|mouse|focus_mask|keyboard_focus|bar_vertical|bar_invert|bar_resizing|left_gutter|right_gutter|top_gutter|bottom_gutter|left_bar|right_bar|top_bar|bottom_bar|thumb|thumb_shadow|thumb_offset|unscrollable|spacing|first_spacing|box_reverse|box_wrap|order_reverse|fit_first|ysize|thumbnail_width|thumbnail_height|help|text_ypos|text_xpos|idle_color|hover_color|selected_idle_color|selected_hover_color|insensitive_color|alpha|insensitive_background|hover_background|zorder|value|width|xadjustment|xanchoraround|xaround|xinitial|xoffset|xzoom|yadjustment|yanchoraround|yaround|yinitial|yzoom|zoom|ground|height|text_style|text_y_fudge|selected_insensitive|has_sound|has_music|has_voice|focus|hovered|image_style|length|minwidth|mousewheel|offset|prefix|radius|range|right_margin|rotate|rotate_pad|developer|screen_width|screen_height|window_title|name|version|windows_icon|default_fullscreen|default_text_cps|default_afm_time|main_menu_music|sample_sound|enter_sound|exit_sound|save_directory|enter_transition|exit_transition|intra_transition|main_game_transition|game_main_transition|end_splash_transition|end_game_transition|after_load_transition|window_show_transition|window_hide_transition|adv_nvl_transition|nvl_adv_transition|enter_yesno_transition|exit_yesno_transition|enter_replay_transition|exit_replay_transition|say_attribute_transition|directory_name|executable_name|include_update|window_icon|modal|google_play_key|google_play_salt|drag_name|drag_handle|draggable|dragged|droppable|dropped|narrator_menu|action|default_afm_enable|version_name|version_tuple|inside|fadeout|fadein|layers|layer_clipping|linear|scrollbars|side_xpos|side_ypos|side_spacing|edgescroll|drag_joined|drag_raise|drop_shadow|drop_shadow_color|subpixel|easein|easeout|time|crop|auto|update|get_installed_packages|can_update|UpdateVersion|Update|overlay_functions|translations|window_left_padding|show_side_image|show_two_window)\b/,tag:/\b(?:label|image|menu|[hv]box|frame|text|imagemap|imagebutton|bar|vbar|screen|textbutton|buttoscreenn|fixed|grid|input|key|mousearea|side|timer|viewport|window|hotspot|hotbar|self|button|drag|draggroup|tag|mm_menu_frame|nvl|block|parallel)\b|\$/,keyword:/\b(?:as|assert|break|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|pass|print|raise|return|try|while|yield|adjustment|alignaround|allow|angle|around|box_layout|cache|changed|child_size|clicked|clipping|corner1|corner2|default|delay|exclude|scope|slow|slow_abortable|slow_done|sound|style_group|substitute|suffix|transform_anchor|transpose|unhovered|config|theme|mm_root|gm_root|rounded_window|build|disabled_text|disabled|widget_selected|widget_text|widget_hover|widget|updater|behind|call|expression|hide|init|jump|onlayer|python|renpy|scene|set|show|transform|play|queue|stop|pause|define|window|repeat|contains|choice|on|function|event|animation|clockwise|counterclockwise|circles|knot|null|None|random|has|add|use|fade|dissolve|style|store|id|voice|center|left|right|less_rounded|music|movie|clear|persistent|ui)\b/,boolean:/\b(?:[Tt]rue|[Ff]alse)\b/,number:/(?:\b(?:0[bo])?(?:(?:\d|0x[\da-f])[\da-f]*(?:\.\d*)?)|\B\.\d+)(?:e[+-]?\d+)?j?/i,operator:/[-+%=]=?|!=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]|\b(?:or|and|not|with|at)\b/,punctuation:/[{}[\];(),.:]/},e.languages.rpy=e.languages.renpy}e.exports=t,t.displayName="renpy",t.aliases=["rpy"]},46678(e){"use strict";function t(e){e.languages.rest={table:[{pattern:/(^[\t ]*)(?:\+[=-]+)+\+(?:\r?\n|\r)(?:\1[+|].+[+|](?:\r?\n|\r))+\1(?:\+[=-]+)+\+/m,lookbehind:!0,inside:{punctuation:/\||(?:\+[=-]+)+\+/}},{pattern:/(^[\t ]*)=+ [ =]*=(?:(?:\r?\n|\r)\1.+)+(?:\r?\n|\r)\1=+ [ =]*=(?=(?:\r?\n|\r){2}|\s*$)/m,lookbehind:!0,inside:{punctuation:/[=-]+/}}],"substitution-def":{pattern:/(^[\t ]*\.\. )\|(?:[^|\s](?:[^|]*[^|\s])?)\| [^:]+::/m,lookbehind:!0,inside:{substitution:{pattern:/^\|(?:[^|\s]|[^|\s][^|]*[^|\s])\|/,alias:"attr-value",inside:{punctuation:/^\||\|$/}},directive:{pattern:/( )(?! )[^:]+::/,lookbehind:!0,alias:"function",inside:{punctuation:/::$/}}}},"link-target":[{pattern:/(^[\t ]*\.\. )\[[^\]]+\]/m,lookbehind:!0,alias:"string",inside:{punctuation:/^\[|\]$/}},{pattern:/(^[\t ]*\.\. )_(?:`[^`]+`|(?:[^:\\]|\\.)+):/m,lookbehind:!0,alias:"string",inside:{punctuation:/^_|:$/}}],directive:{pattern:/(^[\t ]*\.\. )[^:]+::/m,lookbehind:!0,alias:"function",inside:{punctuation:/::$/}},comment:{pattern:/(^[\t ]*\.\.)(?:(?: .+)?(?:(?:\r?\n|\r).+)+| .+)(?=(?:\r?\n|\r){2}|$)/m,lookbehind:!0},title:[{pattern:/^(([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2+)(?:\r?\n|\r).+(?:\r?\n|\r)\1$/m,inside:{punctuation:/^[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+|[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+$/,important:/.+/}},{pattern:/(^|(?:\r?\n|\r){2}).+(?:\r?\n|\r)([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2+(?=\r?\n|\r|$)/,lookbehind:!0,inside:{punctuation:/[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+$/,important:/.+/}}],hr:{pattern:/((?:\r?\n|\r){2})([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2{3,}(?=(?:\r?\n|\r){2})/,lookbehind:!0,alias:"punctuation"},field:{pattern:/(^[\t ]*):[^:\r\n]+:(?= )/m,lookbehind:!0,alias:"attr-name"},"command-line-option":{pattern:/(^[\t ]*)(?:[+-][a-z\d]|(?:--|\/)[a-z\d-]+)(?:[ =](?:[a-z][\w-]*|<[^<>]+>))?(?:, (?:[+-][a-z\d]|(?:--|\/)[a-z\d-]+)(?:[ =](?:[a-z][\w-]*|<[^<>]+>))?)*(?=(?:\r?\n|\r)? {2,}\S)/im,lookbehind:!0,alias:"symbol"},"literal-block":{pattern:/::(?:\r?\n|\r){2}([ \t]+)(?![ \t]).+(?:(?:\r?\n|\r)\1.+)*/,inside:{"literal-block-punctuation":{pattern:/^::/,alias:"punctuation"}}},"quoted-literal-block":{pattern:/::(?:\r?\n|\r){2}([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]).*(?:(?:\r?\n|\r)\1.*)*/,inside:{"literal-block-punctuation":{pattern:/^(?:::|([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\1*)/m,alias:"punctuation"}}},"list-bullet":{pattern:/(^[\t ]*)(?:[*+\-•‣⁃]|\(?(?:\d+|[a-z]|[ivxdclm]+)\)|(?:\d+|[a-z]|[ivxdclm]+)\.)(?= )/im,lookbehind:!0,alias:"punctuation"},"doctest-block":{pattern:/(^[\t ]*)>>> .+(?:(?:\r?\n|\r).+)*/m,lookbehind:!0,inside:{punctuation:/^>>>/}},inline:[{pattern:/(^|[\s\-:\/'"<(\[{])(?::[^:]+:`.*?`|`.*?`:[^:]+:|(\*\*?|``?|\|)(?!\s)(?:(?!\2).)*\S\2(?=[\s\-.,:;!?\\\/'")\]}]|$))/m,lookbehind:!0,inside:{bold:{pattern:/(^\*\*).+(?=\*\*$)/,lookbehind:!0},italic:{pattern:/(^\*).+(?=\*$)/,lookbehind:!0},"inline-literal":{pattern:/(^``).+(?=``$)/,lookbehind:!0,alias:"symbol"},role:{pattern:/^:[^:]+:|:[^:]+:$/,alias:"function",inside:{punctuation:/^:|:$/}},"interpreted-text":{pattern:/(^`).+(?=`$)/,lookbehind:!0,alias:"attr-value"},substitution:{pattern:/(^\|).+(?=\|$)/,lookbehind:!0,alias:"attr-value"},punctuation:/\*\*?|``?|\|/}}],link:[{pattern:/\[[^\[\]]+\]_(?=[\s\-.,:;!?\\\/'")\]}]|$)/,alias:"string",inside:{punctuation:/^\[|\]_$/}},{pattern:/(?:\b[a-z\d]+(?:[_.:+][a-z\d]+)*_?_|`[^`]+`_?_|_`[^`]+`)(?=[\s\-.,:;!?\\\/'")\]}]|$)/i,alias:"string",inside:{punctuation:/^_?`|`$|`?_?_$/}}],punctuation:{pattern:/(^[\t ]*)(?:\|(?= |$)|(?:---?|—|\.\.|__)(?= )|\.\.$)/m,lookbehind:!0}}}e.exports=t,t.displayName="rest",t.aliases=[]},47496(e){"use strict";function t(e){e.languages.rip={comment:/#.*/,keyword:/(?:=>|->)|\b(?:class|if|else|switch|case|return|exit|try|catch|finally|raise)\b/,builtin:/@|\bSystem\b/,boolean:/\b(?:true|false)\b/,date:/\b\d{4}-\d{2}-\d{2}\b/,time:/\b\d{2}:\d{2}:\d{2}\b/,datetime:/\b\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\b/,character:/\B`[^\s`'",.:;#\/\\()<>\[\]{}]\b/,regex:{pattern:/(^|[^/])\/(?!\/)(?:\[[^\n\r\]]*\]|\\.|[^/\\\r\n\[])+\/(?=\s*(?:$|[\r\n,.;})]))/,lookbehind:!0,greedy:!0},symbol:/:[^\d\s`'",.:;#\/\\()<>\[\]{}][^\s`'",.:;#\/\\()<>\[\]{}]*/,string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},number:/[+-]?\b(?:\d+\.\d+|\d+)\b/,punctuation:/(?:\.{2,3})|[`,.:;=\/\\()<>\[\]{}]/,reference:/[^\d\s`'",.:;#\/\\()<>\[\]{}][^\s`'",.:;#\/\\()<>\[\]{}]*/}}e.exports=t,t.displayName="rip",t.aliases=[]},30527(e){"use strict";function t(e){e.languages.roboconf={comment:/#.*/,keyword:{pattern:/(^|\s)(?:(?:facet|instance of)(?=[ \t]+[\w-]+[ \t]*\{)|(?:external|import)\b)/,lookbehind:!0},component:{pattern:/[\w-]+(?=[ \t]*\{)/,alias:"variable"},property:/[\w.-]+(?=[ \t]*:)/,value:{pattern:/(=[ \t]*(?![ \t]))[^,;]+/,lookbehind:!0,alias:"attr-value"},optional:{pattern:/\(optional\)/,alias:"builtin"},wildcard:{pattern:/(\.)\*/,lookbehind:!0,alias:"operator"},punctuation:/[{},.;:=]/}}e.exports=t,t.displayName="roboconf",t.aliases=[]},5261(e){"use strict";function t(e){!function(e){var t={pattern:/(^[ \t]*| {2}|\t)#.*/m,lookbehind:!0,greedy:!0},n={pattern:/((?:^|[^\\])(?:\\{2})*)[$@&%]\{(?:[^{}\r\n]|\{[^{}\r\n]*\})*\}/,lookbehind:!0,inside:{punctuation:/^[$@&%]\{|\}$/}};function r(e,r){var i={};for(var a in i["section-header"]={pattern:/^ ?\*{3}.+?\*{3}/,alias:"keyword"},r)i[a]=r[a];return i.tag={pattern:/([\r\n](?: {2}|\t)[ \t]*)\[[-\w]+\]/,lookbehind:!0,inside:{punctuation:/\[|\]/}},i.variable=n,i.comment=t,{pattern:RegExp(/^ ?\*{3}[ \t]*[ \t]*\*{3}(?:.|[\r\n](?!\*{3}))*/.source.replace(//g,function(){return e}),"im"),alias:"section",inside:i}}var i={pattern:/(\[Documentation\](?: {2}|\t)[ \t]*)(?![ \t]|#)(?:.|(?:\r\n?|\n)[ \t]*\.{3})+/,lookbehind:!0,alias:"string"},a={pattern:/([\r\n] ?)(?!#)(?:\S(?:[ \t]\S)*)+/,lookbehind:!0,alias:"function",inside:{variable:n}},o={pattern:/([\r\n](?: {2}|\t)[ \t]*)(?!\[|\.{3}|#)(?:\S(?:[ \t]\S)*)+/,lookbehind:!0,inside:{variable:n}};e.languages.robotframework={settings:r("Settings",{documentation:{pattern:/([\r\n] ?Documentation(?: {2}|\t)[ \t]*)(?![ \t]|#)(?:.|(?:\r\n?|\n)[ \t]*\.{3})+/,lookbehind:!0,alias:"string"},property:{pattern:/([\r\n] ?)(?!\.{3}|#)(?:\S(?:[ \t]\S)*)+/,lookbehind:!0}}),variables:r("Variables"),"test-cases":r("Test Cases",{"test-name":a,documentation:i,property:o}),keywords:r("Keywords",{"keyword-name":a,documentation:i,property:o}),tasks:r("Tasks",{"task-name":a,documentation:i,property:o}),comment:t},e.languages.robot=e.languages.robotframework}(e)}e.exports=t,t.displayName="robotframework",t.aliases=[]},56939(e){"use strict";function t(e){var t,n;(t=e).languages.ruby=t.languages.extend("clike",{comment:[/#.*/,{pattern:/^=begin\s[\s\S]*?^=end/m,greedy:!0}],"class-name":{pattern:/(\b(?:class)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:alias|and|BEGIN|begin|break|case|class|def|define_method|defined|do|each|else|elsif|END|end|ensure|extend|for|if|in|include|module|new|next|nil|not|or|prepend|protected|private|public|raise|redo|require|rescue|retry|return|self|super|then|throw|undef|unless|until|when|while|yield)\b/}),n={pattern:/#\{[^}]+\}/,inside:{delimiter:{pattern:/^#\{|\}$/,alias:"tag"},rest:t.languages.ruby}},delete t.languages.ruby.function,t.languages.insertBefore("ruby","keyword",{regex:[{pattern:RegExp(/%r/.source+"(?:"+[/([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1/.source,/\((?:[^()\\]|\\[\s\S])*\)/.source,/\{(?:[^#{}\\]|#(?:\{[^}]+\})?|\\[\s\S])*\}/.source,/\[(?:[^\[\]\\]|\\[\s\S])*\]/.source,/<(?:[^<>\\]|\\[\s\S])*>/.source].join("|")+")"+/[egimnosux]{0,6}/.source),greedy:!0,inside:{interpolation:n}},{pattern:/(^|[^/])\/(?!\/)(?:\[[^\r\n\]]+\]|\\.|[^[/\\\r\n])+\/[egimnosux]{0,6}(?=\s*(?:$|[\r\n,.;})#]))/,lookbehind:!0,greedy:!0,inside:{interpolation:n}}],variable:/[@$]+[a-zA-Z_]\w*(?:[?!]|\b)/,symbol:{pattern:/(^|[^:]):[a-zA-Z_]\w*(?:[?!]|\b)/,lookbehind:!0},"method-definition":{pattern:/(\bdef\s+)[\w.]+/,lookbehind:!0,inside:{function:/\w+$/,rest:t.languages.ruby}}}),t.languages.insertBefore("ruby","number",{builtin:/\b(?:Array|Bignum|Binding|Class|Continuation|Dir|Exception|FalseClass|File|Stat|Fixnum|Float|Hash|Integer|IO|MatchData|Method|Module|NilClass|Numeric|Object|Proc|Range|Regexp|String|Struct|TMS|Symbol|ThreadGroup|Thread|Time|TrueClass)\b/,constant:/\b[A-Z]\w*(?:[?!]|\b)/}),t.languages.ruby.string=[{pattern:RegExp(/%[qQiIwWxs]?/.source+"(?:"+[/([^a-zA-Z0-9\s{(\[<])(?:(?!\1)[^\\]|\\[\s\S])*\1/.source,/\((?:[^()\\]|\\[\s\S])*\)/.source,/\{(?:[^#{}\\]|#(?:\{[^}]+\})?|\\[\s\S])*\}/.source,/\[(?:[^\[\]\\]|\\[\s\S])*\]/.source,/<(?:[^<>\\]|\\[\s\S])*>/.source].join("|")+")"),greedy:!0,inside:{interpolation:n}},{pattern:/("|')(?:#\{[^}]+\}|#(?!\{)|\\(?:\r\n|[\s\S])|(?!\1)[^\\#\r\n])*\1/,greedy:!0,inside:{interpolation:n}},{pattern:/<<[-~]?([a-z_]\w*)[\r\n](?:.*[\r\n])*?[\t ]*\1/i,alias:"heredoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<[-~]?[a-z_]\w*|[a-z_]\w*$/i,alias:"symbol",inside:{punctuation:/^<<[-~]?/}},interpolation:n}},{pattern:/<<[-~]?'([a-z_]\w*)'[\r\n](?:.*[\r\n])*?[\t ]*\1/i,alias:"heredoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<[-~]?'[a-z_]\w*'|[a-z_]\w*$/i,alias:"symbol",inside:{punctuation:/^<<[-~]?'|'$/}}}}],t.languages.rb=t.languages.ruby}e.exports=t,t.displayName="ruby",t.aliases=["rb"]},83648(e){"use strict";function t(e){!function(e){for(var t=/\/\*(?:[^*/]|\*(?!\/)|\/(?!\*)|)*\*\//.source,n=0;n<2;n++)t=t.replace(//g,function(){return t});t=t.replace(//g,function(){return/[^\s\S]/.source}),e.languages.rust={comment:[{pattern:RegExp(/(^|[^\\])/.source+t),lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/b?"(?:\\[\s\S]|[^\\"])*"|b?r(#*)"(?:[^"]|"(?!\1))*"\1/,greedy:!0},char:{pattern:/b?'(?:\\(?:x[0-7][\da-fA-F]|u\{(?:[\da-fA-F]_*){1,6}\}|.)|[^\\\r\n\t'])'/,greedy:!0,alias:"string"},attribute:{pattern:/#!?\[(?:[^\[\]"]|"(?:\\[\s\S]|[^\\"])*")*\]/,greedy:!0,alias:"attr-name",inside:{string:null}},"closure-params":{pattern:/([=(,:]\s*|\bmove\s*)\|[^|]*\||\|[^|]*\|(?=\s*(?:\{|->))/,lookbehind:!0,greedy:!0,inside:{"closure-punctuation":{pattern:/^\||\|$/,alias:"punctuation"},rest:null}},"lifetime-annotation":{pattern:/'\w+/,alias:"symbol"},"fragment-specifier":{pattern:/(\$\w+:)[a-z]+/,lookbehind:!0,alias:"punctuation"},variable:/\$\w+/,"function-definition":{pattern:/(\bfn\s+)\w+/,lookbehind:!0,alias:"function"},"type-definition":{pattern:/(\b(?:enum|struct|union)\s+)\w+/,lookbehind:!0,alias:"class-name"},"module-declaration":[{pattern:/(\b(?:crate|mod)\s+)[a-z][a-z_\d]*/,lookbehind:!0,alias:"namespace"},{pattern:/(\b(?:crate|self|super)\s*)::\s*[a-z][a-z_\d]*\b(?:\s*::(?:\s*[a-z][a-z_\d]*\s*::)*)?/,lookbehind:!0,alias:"namespace",inside:{punctuation:/::/}}],keyword:[/\b(?:abstract|as|async|await|become|box|break|const|continue|crate|do|dyn|else|enum|extern|final|fn|for|if|impl|in|let|loop|macro|match|mod|move|mut|override|priv|pub|ref|return|self|Self|static|struct|super|trait|try|type|typeof|union|unsafe|unsized|use|virtual|where|while|yield)\b/,/\b(?:[ui](?:8|16|32|64|128|size)|f(?:32|64)|bool|char|str)\b/],function:/\b[a-z_]\w*(?=\s*(?:::\s*<|\())/,macro:{pattern:/\b\w+!/,alias:"property"},constant:/\b[A-Z_][A-Z_\d]+\b/,"class-name":/\b[A-Z]\w*\b/,namespace:{pattern:/(?:\b[a-z][a-z_\d]*\s*::\s*)*\b[a-z][a-z_\d]*\s*::(?!\s*<)/,inside:{punctuation:/::/}},number:/\b(?:0x[\dA-Fa-f](?:_?[\dA-Fa-f])*|0o[0-7](?:_?[0-7])*|0b[01](?:_?[01])*|(?:(?:\d(?:_?\d)*)?\.)?\d(?:_?\d)*(?:[Ee][+-]?\d+)?)(?:_?(?:[iu](?:8|16|32|64|size)?|f32|f64))?\b/,boolean:/\b(?:false|true)\b/,punctuation:/->|\.\.=|\.{1,3}|::|[{}[\];(),:]/,operator:/[-+*\/%!^]=?|=[=>]?|&[&=]?|\|[|=]?|<>?=?|[@?]/},e.languages.rust["closure-params"].inside.rest=e.languages.rust,e.languages.rust.attribute.inside.string=e.languages.rust.string}(e)}e.exports=t,t.displayName="rust",t.aliases=[]},16009(e){"use strict";function t(e){var t,n,r,i,a,o,s,u,c,l,f,d,h,p,b,m,g,v,y;t=e,n=/(?:"(?:""|[^"])*"(?!")|'(?:''|[^'])*'(?!'))/.source,r=/\b(?:\d[\da-f]*x|\d+(?:\.\d+)?(?:e[+-]?\d+)?)\b/i,i={pattern:RegExp(n+"[bx]"),alias:"number"},a={pattern:/&[a-z_]\w*/i},o={pattern:/((?:^|\s|=|\())%(?:ABORT|BY|CMS|COPY|DISPLAY|DO|ELSE|END|EVAL|GLOBAL|GO|GOTO|IF|INC|INCLUDE|INDEX|INPUT|KTRIM|LENGTH|LET|LIST|LOCAL|PUT|QKTRIM|QSCAN|QSUBSTR|QSYSFUNC|QUPCASE|RETURN|RUN|SCAN|SUBSTR|SUPERQ|SYMDEL|SYMGLOBL|SYMLOCAL|SYMEXIST|SYSCALL|SYSEVALF|SYSEXEC|SYSFUNC|SYSGET|SYSRPUT|THEN|TO|TSO|UNQUOTE|UNTIL|UPCASE|WHILE|WINDOW)\b/i,lookbehind:!0,alias:"keyword"},s={pattern:/(^|\s)(?:proc\s+\w+|quit|run|data(?!=))\b/i,alias:"keyword",lookbehind:!0},u=[/\/\*[\s\S]*?\*\//,{pattern:/(^[ \t]*|;\s*)\*[^;]*;/m,lookbehind:!0}],c={pattern:RegExp(n),greedy:!0},d={function:f={pattern:/%?\b\w+(?=\()/,alias:"keyword"},"arg-value":{pattern:/(=\s*)[A-Z\.]+/i,lookbehind:!0},operator:/=/,"macro-variable":a,arg:{pattern:/[A-Z]+/i,alias:"keyword"},number:r,"numeric-constant":i,punctuation:l=/[$%@.(){}\[\];,\\]/,string:c},h={pattern:/\b(?:format|put)\b=?[\w'$.]+/im,inside:{keyword:/^(?:format|put)(?==)/i,equals:/=/,format:{pattern:/(?:\w|\$\d)+\.\d?/i,alias:"number"}}},p={pattern:/\b(?:format|put)\s+[\w']+(?:\s+[$.\w]+)+(?=;)/i,inside:{keyword:/^(?:format|put)/i,format:{pattern:/[\w$]+\.\d?/,alias:"number"}}},b={pattern:/((?:^|\s)=?)(?:catname|checkpoint execute_always|dm|endsas|filename|footnote|%include|libname|%list|lock|missing|options|page|resetline|%run|sasfile|skip|sysecho|title\d?)\b/i,lookbehind:!0,alias:"keyword"},m={pattern:/(^|\s)(?:submit(?:\s+(?:load|parseonly|norun))?|endsubmit)\b/i,lookbehind:!0,alias:"keyword"},g=/accessControl|cdm|aggregation|aStore|ruleMining|audio|autotune|bayesianNetClassifier|bioMedImage|boolRule|builtins|cardinality|sccasl|clustering|copula|countreg|dataDiscovery|dataPreprocess|dataSciencePilot|dataStep|decisionTree|deepLearn|deepNeural|varReduce|simSystem|ds2|deduplication|ecm|entityRes|espCluster|explainModel|factmac|fastKnn|fcmpact|fedSql|freqTab|gam|gleam|graphSemiSupLearn|gVarCluster|hiddenMarkovModel|hyperGroup|image|iml|ica|kernalPca|langModel|ldaTopic|sparseML|mlTools|mixed|modelPublishing|mbc|network|optNetwork|neuralNet|nonlinear|nmf|nonParametricBayes|optimization|panel|pls|percentile|pca|phreg|qkb|qlim|quantreg|recommend|tsReconcile|deepRnn|regression|reinforcementLearn|robustPca|sampling|sparkEmbeddedProcess|search(?:Analytics)?|sentimentAnalysis|sequence|configuration|session(?:Prop)?|severity|simple|smartData|sandwich|spatialreg|stabilityMonitoring|spc|loadStreams|svDataDescription|svm|table|conditionalRandomFields|text(?:Rule(?:Develop|Score)|Mining|Parse|Topic|Util|Filters|Frequency)|tsInfo|timeData|transpose|uniTimeSeries/.source,v={pattern:RegExp(/(^|\s)(?:action\s+)?(?:)\.[a-z]+\b[^;]+/.source.replace(//g,function(){return g}),"i"),lookbehind:!0,inside:{keyword:RegExp(/(?:)\.[a-z]+\b/.source.replace(//g,function(){return g}),"i"),action:{pattern:/(?:action)/i,alias:"keyword"},comment:u,function:f,"arg-value":d["arg-value"],operator:d.operator,argument:d.arg,number:r,"numeric-constant":i,punctuation:l,string:c}},y={pattern:/((?:^|\s)=?)(?:after|analysis|and|array|barchart|barwidth|begingraph|by|call|cas|cbarline|cfill|class(?:lev)?|close|column|computed?|contains|continue|data(?==)|define|delete|describe|document|do\s+over|do|dol|drop|dul|end(?:source|comp)?|entryTitle|else|eval(?:uate)?|exec(?:ute)?|exit|fill(?:attrs)?|file(?:name)?|flist|fnc|function(?:list)?|goto|global|group(?:by)?|headline|headskip|histogram|if|infile|keep|keylabel|keyword|label|layout|leave|legendlabel|length|libname|loadactionset|merge|midpoints|name|noobs|nowd|_?null_|ods|options|or|otherwise|out(?:put)?|over(?:lay)?|plot|put|print|raise|ranexp|rannor|rbreak|retain|return|select|set|session|sessref|source|statgraph|sum|summarize|table|temp|terminate|then\s+do|then|title\d?|to|var|when|where|xaxisopts|yaxisopts|y2axisopts)\b/i,lookbehind:!0},t.languages.sas={datalines:{pattern:/^([ \t]*)(?:(?:data)?lines|cards);[\s\S]+?^[ \t]*;/im,lookbehind:!0,alias:"string",inside:{keyword:{pattern:/^(?:(?:data)?lines|cards)/i},punctuation:/;/}},"proc-sql":{pattern:/(^proc\s+(?:fed)?sql(?:\s+[\w|=]+)?;)[\s\S]+?(?=^(?:proc\s+\w+|quit|run|data);|(?![\s\S]))/im,lookbehind:!0,inside:{sql:{pattern:RegExp(/^[ \t]*(?:select|alter\s+table|(?:create|describe|drop)\s+(?:index|table(?:\s+constraints)?|view)|create\s+unique\s+index|insert\s+into|update)(?:|[^;"'])+;/.source.replace(//g,function(){return n}),"im"),alias:"language-sql",inside:t.languages.sql},"global-statements":b,"sql-statements":{pattern:/(^|\s)(?:disconnect\s+from|exec(?:ute)?|begin|commit|rollback|reset|validate)\b/i,lookbehind:!0,alias:"keyword"},number:r,"numeric-constant":i,punctuation:l,string:c}},"proc-groovy":{pattern:/(^proc\s+groovy(?:\s+[\w|=]+)?;)[\s\S]+?(?=^(?:proc\s+\w+|quit|run|data);|(?![\s\S]))/im,lookbehind:!0,inside:{comment:u,groovy:{pattern:RegExp(/(^[ \t]*submit(?:\s+(?:load|parseonly|norun))?)(?:|[^"'])+?(?=endsubmit;)/.source.replace(//g,function(){return n}),"im"),lookbehind:!0,alias:"language-groovy",inside:t.languages.groovy},keyword:y,"submit-statement":m,"global-statements":b,number:r,"numeric-constant":i,punctuation:l,string:c}},"proc-lua":{pattern:/(^proc\s+lua(?:\s+[\w|=]+)?;)[\s\S]+?(?=^(?:proc\s+\w+|quit|run|data);|(?![\s\S]))/im,lookbehind:!0,inside:{comment:u,lua:{pattern:RegExp(/(^[ \t]*submit(?:\s+(?:load|parseonly|norun))?)(?:|[^"'])+?(?=endsubmit;)/.source.replace(//g,function(){return n}),"im"),lookbehind:!0,alias:"language-lua",inside:t.languages.lua},keyword:y,"submit-statement":m,"global-statements":b,number:r,"numeric-constant":i,punctuation:l,string:c}},"proc-cas":{pattern:/(^proc\s+cas(?:\s+[\w|=]+)?;)[\s\S]+?(?=^(?:proc\s+\w+|quit|data);|(?![\s\S]))/im,lookbehind:!0,inside:{comment:u,"statement-var":{pattern:/((?:^|\s)=?)saveresult\s[^;]+/im,lookbehind:!0,inside:{statement:{pattern:/^saveresult\s+\S+/i,inside:{keyword:/^(?:saveresult)/i}},rest:d}},"cas-actions":v,statement:{pattern:/((?:^|\s)=?)(?:default|(?:un)?set|on|output|upload)[^;]+/im,lookbehind:!0,inside:d},step:s,keyword:y,function:f,format:h,altformat:p,"global-statements":b,number:r,"numeric-constant":i,punctuation:l,string:c}},"proc-args":{pattern:RegExp(/(^proc\s+\w+\s+)(?!\s)(?:[^;"']|)+;/.source.replace(//g,function(){return n}),"im"),lookbehind:!0,inside:d},"macro-keyword":o,"macro-variable":a,"macro-string-functions":{pattern:/((?:^|\s|=))%(?:NRBQUOTE|NRQUOTE|NRSTR|BQUOTE|QUOTE|STR)\(.*?(?:[^%]\))/i,lookbehind:!0,inside:{function:{pattern:/%(?:NRBQUOTE|NRQUOTE|NRSTR|BQUOTE|QUOTE|STR)/i,alias:"keyword"},"macro-keyword":o,"macro-variable":a,"escaped-char":{pattern:/%['"()<>=¬^~;,#]/i},punctuation:l}},"macro-declaration":{pattern:/^%macro[^;]+(?=;)/im,inside:{keyword:/%macro/i}},"macro-end":{pattern:/^%mend[^;]+(?=;)/im,inside:{keyword:/%mend/i}},macro:{pattern:/%_\w+(?=\()/,alias:"keyword"},input:{pattern:/\binput\s[-\w\s/*.$&]+;/i,inside:{input:{alias:"keyword",pattern:/^input/i},comment:u,number:r,"numeric-constant":i}},"options-args":{pattern:/(^options)[-'"|/\\<>*+=:()\w\s]*(?=;)/im,lookbehind:!0,inside:d},"cas-actions":v,comment:u,function:f,format:h,altformat:p,"numeric-constant":i,datetime:{pattern:RegExp(n+"(?:dt?|t)"),alias:"number"},string:c,step:s,keyword:y,"operator-keyword":{pattern:/\b(?:eq|ne|gt|lt|ge|le|in|not)\b/i,alias:"operator"},number:r,operator:/\*\*?|\|\|?|!!?|¦¦?|<[>=]?|>[<=]?|[-+\/=&]|[~¬^]=?/i,punctuation:l}}e.exports=t,t.displayName="sas",t.aliases=[]},41720(e){"use strict";function t(e){var t,n,r;(t=e).languages.sass=t.languages.extend("css",{comment:{pattern:/^([ \t]*)\/[\/*].*(?:(?:\r?\n|\r)\1[ \t].+)*/m,lookbehind:!0}}),t.languages.insertBefore("sass","atrule",{"atrule-line":{pattern:/^(?:[ \t]*)[@+=].+/m,inside:{atrule:/(?:@[\w-]+|[+=])/m}}}),delete t.languages.sass.atrule,n=/\$[-\w]+|#\{\$[-\w]+\}/,r=[/[+*\/%]|[=!]=|<=?|>=?|\b(?:and|or|not)\b/,{pattern:/(\s)-(?=\s)/,lookbehind:!0}],t.languages.insertBefore("sass","property",{"variable-line":{pattern:/^[ \t]*\$.+/m,inside:{punctuation:/:/,variable:n,operator:r}},"property-line":{pattern:/^[ \t]*(?:[^:\s]+ *:.*|:[^:\s].*)/m,inside:{property:[/[^:\s]+(?=\s*:)/,{pattern:/(:)[^:\s]+/,lookbehind:!0}],punctuation:/:/,variable:n,operator:r,important:t.languages.sass.important}}}),delete t.languages.sass.property,delete t.languages.sass.important,t.languages.insertBefore("sass","punctuation",{selector:{pattern:/([ \t]*)\S(?:,[^,\r\n]+|[^,\r\n]*)(?:,[^,\r\n]+)*(?:,(?:\r?\n|\r)\1[ \t]+\S(?:,[^,\r\n]+|[^,\r\n]*)(?:,[^,\r\n]+)*)*/,lookbehind:!0}})}e.exports=t,t.displayName="sass",t.aliases=[]},6054(e,t,n){"use strict";var r=n(15909);function i(e){e.register(r),e.languages.scala=e.languages.extend("java",{"triple-quoted-string":{pattern:/"""[\s\S]*?"""/,greedy:!0,alias:"string"},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:/<-|=>|\b(?:abstract|case|catch|class|def|do|else|extends|final|finally|for|forSome|if|implicit|import|lazy|match|new|null|object|override|package|private|protected|return|sealed|self|super|this|throw|trait|try|type|val|var|while|with|yield)\b/,number:/\b0x(?:[\da-f]*\.)?[\da-f]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e\d+)?[dfl]?/i,builtin:/\b(?:String|Int|Long|Short|Byte|Boolean|Double|Float|Char|Any|AnyRef|AnyVal|Unit|Nothing)\b/,symbol:/'[^\d\s\\]\w*/}),delete e.languages.scala["class-name"],delete e.languages.scala.function}e.exports=i,i.displayName="scala",i.aliases=[]},9997(e){"use strict";function t(e){!function(e){e.languages.scheme={comment:/;.*|#;\s*(?:\((?:[^()]|\([^()]*\))*\)|\[(?:[^\[\]]|\[[^\[\]]*\])*\])|#\|(?:[^#|]|#(?!\|)|\|(?!#)|#\|(?:[^#|]|#(?!\|)|\|(?!#))*\|#)*\|#/,string:{pattern:/"(?:[^"\\]|\\.)*"/,greedy:!0},symbol:{pattern:/'[^()\[\]#'\s]+/,greedy:!0},character:{pattern:/#\\(?:[ux][a-fA-F\d]+\b|[-a-zA-Z]+\b|[\uD800-\uDBFF][\uDC00-\uDFFF]|\S)/,greedy:!0,alias:"string"},"lambda-parameter":[{pattern:/((?:^|[^'`#])[(\[]lambda\s+)(?:[^|()\[\]'\s]+|\|(?:[^\\|]|\\.)*\|)/,lookbehind:!0},{pattern:/((?:^|[^'`#])[(\[]lambda\s+[(\[])[^()\[\]']+/,lookbehind:!0}],keyword:{pattern:/((?:^|[^'`#])[(\[])(?:begin|case(?:-lambda)?|cond(?:-expand)?|define(?:-library|-macro|-record-type|-syntax|-values)?|defmacro|delay(?:-force)?|do|else|export|except|guard|if|import|include(?:-ci|-library-declarations)?|lambda|let(?:rec)?(?:-syntax|-values|\*)?|let\*-values|only|parameterize|prefix|(?:quasi-?)?quote|rename|set!|syntax-(?:case|rules)|unless|unquote(?:-splicing)?|when)(?=[()\[\]\s]|$)/,lookbehind:!0},builtin:{pattern:/((?:^|[^'`#])[(\[])(?:abs|and|append|apply|assoc|ass[qv]|binary-port\?|boolean=?\?|bytevector(?:-append|-copy|-copy!|-length|-u8-ref|-u8-set!|\?)?|caar|cadr|call-with-(?:current-continuation|port|values)|call\/cc|car|cdar|cddr|cdr|ceiling|char(?:->integer|-ready\?|\?|<\?|<=\?|=\?|>\?|>=\?)|close-(?:input-port|output-port|port)|complex\?|cons|current-(?:error|input|output)-port|denominator|dynamic-wind|eof-object\??|eq\?|equal\?|eqv\?|error|error-object(?:-irritants|-message|\?)|eval|even\?|exact(?:-integer-sqrt|-integer\?|\?)?|expt|features|file-error\?|floor(?:-quotient|-remainder|\/)?|flush-output-port|for-each|gcd|get-output-(?:bytevector|string)|inexact\??|input-port(?:-open\?|\?)|integer(?:->char|\?)|lcm|length|list(?:->string|->vector|-copy|-ref|-set!|-tail|\?)?|make-(?:bytevector|list|parameter|string|vector)|map|max|member|memq|memv|min|modulo|negative\?|newline|not|null\?|number(?:->string|\?)|numerator|odd\?|open-(?:input|output)-(?:bytevector|string)|or|output-port(?:-open\?|\?)|pair\?|peek-char|peek-u8|port\?|positive\?|procedure\?|quotient|raise|raise-continuable|rational\?|rationalize|read-(?:bytevector|bytevector!|char|error\?|line|string|u8)|real\?|remainder|reverse|round|set-c[ad]r!|square|string(?:->list|->number|->symbol|->utf8|->vector|-append|-copy|-copy!|-fill!|-for-each|-length|-map|-ref|-set!|\?|<\?|<=\?|=\?|>\?|>=\?)?|substring|symbol(?:->string|\?|=\?)|syntax-error|textual-port\?|truncate(?:-quotient|-remainder|\/)?|u8-ready\?|utf8->string|values|vector(?:->list|->string|-append|-copy|-copy!|-fill!|-for-each|-length|-map|-ref|-set!|\?)?|with-exception-handler|write-(?:bytevector|char|string|u8)|zero\?)(?=[()\[\]\s]|$)/,lookbehind:!0},operator:{pattern:/((?:^|[^'`#])[(\[])(?:[-+*%/]|[<>]=?|=>?)(?=[()\[\]\s]|$)/,lookbehind:!0},number:{pattern:RegExp(t({"":/\d+(?:\/\d+)|(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?/.source,"":/[+-]?|[+-](?:inf|nan)\.0/.source,"":/[+-](?:|(?:inf|nan)\.0)?i/.source,"":/(?:@|)?|/.source,"":/(?:#d(?:#[ei])?|#[ei](?:#d)?)?/.source,"":/[0-9a-f]+(?:\/[0-9a-f]+)?/.source,"":/[+-]?|[+-](?:inf|nan)\.0/.source,"":/[+-](?:|(?:inf|nan)\.0)?i/.source,"":/(?:@|)?|/.source,"":/#[box](?:#[ei])?|(?:#[ei])?#[box]/.source,"":/(^|[()\[\]\s])(?:|)(?=[()\[\]\s]|$)/.source}),"i"),lookbehind:!0},boolean:{pattern:/(^|[()\[\]\s])#(?:[ft]|false|true)(?=[()\[\]\s]|$)/,lookbehind:!0},function:{pattern:/((?:^|[^'`#])[(\[])(?:[^|()\[\]'\s]+|\|(?:[^\\|]|\\.)*\|)(?=[()\[\]\s]|$)/,lookbehind:!0},identifier:{pattern:/(^|[()\[\]\s])\|(?:[^\\|]|\\.)*\|(?=[()\[\]\s]|$)/,lookbehind:!0,greedy:!0},punctuation:/[()\[\]']/};function t(e){for(var t in e)e[t]=e[t].replace(/<[\w\s]+>/g,function(t){return"(?:"+e[t].trim()+")"});return e[t]}}(e)}e.exports=t,t.displayName="scheme",t.aliases=[]},24296(e){"use strict";function t(e){e.languages.scss=e.languages.extend("css",{comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|\/\/.*)/,lookbehind:!0},atrule:{pattern:/@[\w-](?:\([^()]+\)|[^()\s]|\s+(?!\s))*?(?=\s+[{;])/,inside:{rule:/@[\w-]+/}},url:/(?:[-a-z]+-)?url(?=\()/i,selector:{pattern:/(?=\S)[^@;{}()]?(?:[^@;{}()\s]|\s+(?!\s)|#\{\$[-\w]+\})+(?=\s*\{(?:\}|\s|[^}][^:{}]*[:{][^}]))/m,inside:{parent:{pattern:/&/,alias:"important"},placeholder:/%[-\w]+/,variable:/\$[-\w]+|#\{\$[-\w]+\}/}},property:{pattern:/(?:[-\w]|\$[-\w]|#\{\$[-\w]+\})+(?=\s*:)/,inside:{variable:/\$[-\w]+|#\{\$[-\w]+\}/}}}),e.languages.insertBefore("scss","atrule",{keyword:[/@(?:if|else(?: if)?|forward|for|each|while|import|use|extend|debug|warn|mixin|include|function|return|content)\b/i,{pattern:/( )(?:from|through)(?= )/,lookbehind:!0}]}),e.languages.insertBefore("scss","important",{variable:/\$[-\w]+|#\{\$[-\w]+\}/}),e.languages.insertBefore("scss","function",{"module-modifier":{pattern:/\b(?:as|with|show|hide)\b/i,alias:"keyword"},placeholder:{pattern:/%[-\w]+/,alias:"selector"},statement:{pattern:/\B!(?:default|optional)\b/i,alias:"keyword"},boolean:/\b(?:true|false)\b/,null:{pattern:/\bnull\b/,alias:"keyword"},operator:{pattern:/(\s)(?:[-+*\/%]|[=!]=|<=?|>=?|and|or|not)(?=\s)/,lookbehind:!0}}),e.languages.scss.atrule.inside.rest=e.languages.scss}e.exports=t,t.displayName="scss",t.aliases=[]},49246(e,t,n){"use strict";var r=n(6979);function i(e){var t,n;e.register(r),n=[/"(?:\\[\s\S]|\$\([^)]+\)|\$(?!\()|`[^`]+`|[^"\\`$])*"/.source,/'[^']*'/.source,/\$'(?:[^'\\]|\\[\s\S])*'/.source,/<<-?\s*(["']?)(\w+)\1\s[\s\S]*?[\r\n]\2/.source].join("|"),(t=e).languages["shell-session"]={command:{pattern:RegExp(/^(?:[^\s@:$#*!/\\]+@[^\r\n@:$#*!/\\]+(?::[^\0-\x1F$#*?"<>:;|]+)?|[^\0-\x1F$#*?"<>@:;|]+)?/.source+/[$#]/.source+/(?:[^\\\r\n'"<$]|\\(?:[^\r]|\r\n?)|\$(?!')|<>)+/.source.replace(/<>/g,function(){return n}),"m"),greedy:!0,inside:{info:{pattern:/^[^#$]+/,alias:"punctuation",inside:{user:/^[^\s@:$#*!/\\]+@[^\r\n@:$#*!/\\]+/,punctuation:/:/,path:/[\s\S]+/}},bash:{pattern:/(^[$#]\s*)\S[\s\S]*/,lookbehind:!0,alias:"language-bash",inside:t.languages.bash},"shell-symbol":{pattern:/^[$#]/,alias:"important"}}},output:/.(?:.*(?:[\r\n]|.$))*/},t.languages["sh-session"]=t.languages.shellsession=t.languages["shell-session"]}e.exports=i,i.displayName="shellSession",i.aliases=[]},18890(e){"use strict";function t(e){e.languages.smali={comment:/#.*/,string:{pattern:/"(?:[^\r\n\\"]|\\.)*"|'(?:[^\r\n\\']|\\(?:.|u[\da-fA-F]{4}))'/,greedy:!0},"class-name":{pattern:/(^|[^L])L(?:(?:\w+|`[^`\r\n]*`)\/)*(?:[\w$]+|`[^`\r\n]*`)(?=\s*;)/,lookbehind:!0,inside:{"class-name":{pattern:/(^L|\/)(?:[\w$]+|`[^`\r\n]*`)$/,lookbehind:!0},namespace:{pattern:/^(L)(?:(?:\w+|`[^`\r\n]*`)\/)+/,lookbehind:!0,inside:{punctuation:/\//}},builtin:/^L/}},builtin:[{pattern:/([();\[])[BCDFIJSVZ]+/,lookbehind:!0},{pattern:/([\w$>]:)[BCDFIJSVZ]/,lookbehind:!0}],keyword:[{pattern:/(\.end\s+)[\w-]+/,lookbehind:!0},{pattern:/(^|[^\w.-])\.(?!\d)[\w-]+/,lookbehind:!0},{pattern:/(^|[^\w.-])(?:abstract|annotation|bridge|constructor|enum|final|interface|private|protected|public|runtime|static|synthetic|system|transient)(?![\w.-])/,lookbehind:!0}],function:{pattern:/(^|[^\w.-])(?:\w+|<[\w$-]+>)(?=\()/,lookbehind:!0},field:{pattern:/[\w$]+(?=:)/,alias:"variable"},register:{pattern:/(^|[^\w.-])[vp]\d(?![\w.-])/,lookbehind:!0,alias:"variable"},boolean:{pattern:/(^|[^\w.-])(?:true|false)(?![\w.-])/,lookbehind:!0},number:{pattern:/(^|[^/\w.-])-?(?:NAN|INFINITY|0x(?:[\dA-F]+(?:\.[\dA-F]*)?|\.[\dA-F]+)(?:p[+-]?[\dA-F]+)?|(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?)[dflst]?(?![\w.-])/i,lookbehind:!0},label:{pattern:/(:)\w+/,lookbehind:!0,alias:"property"},operator:/->|\.\.|[\[=]/,punctuation:/[{}(),;:]/}}e.exports=t,t.displayName="smali",t.aliases=[]},11037(e){"use strict";function t(e){e.languages.smalltalk={comment:/"(?:""|[^"])*"/,character:{pattern:/\$./,alias:"string"},string:/'(?:''|[^'])*'/,symbol:/#[\da-z]+|#(?:-|([+\/\\*~<>=@%|&?!])\1?)|#(?=\()/i,"block-arguments":{pattern:/(\[\s*):[^\[|]*\|/,lookbehind:!0,inside:{variable:/:[\da-z]+/i,punctuation:/\|/}},"temporary-variables":{pattern:/\|[^|]+\|/,inside:{variable:/[\da-z]+/i,punctuation:/\|/}},keyword:/\b(?:nil|true|false|self|super|new)\b/,number:[/\d+r-?[\dA-Z]+(?:\.[\dA-Z]+)?(?:e-?\d+)?/,/\b\d+(?:\.\d+)?(?:e-?\d+)?/],operator:/[<=]=?|:=|~[~=]|\/\/?|\\\\|>[>=]?|[!^+\-*&|,@]/,punctuation:/[.;:?\[\](){}]/}}e.exports=t,t.displayName="smalltalk",t.aliases=[]},64020(e,t,n){"use strict";var r=n(93205);function i(e){var t;e.register(r),(t=e).languages.smarty={comment:/\{\*[\s\S]*?\*\}/,delimiter:{pattern:/^\{|\}$/i,alias:"punctuation"},string:/(["'])(?:\\.|(?!\1)[^\\\r\n])*\1/,number:/\b0x[\dA-Fa-f]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee][-+]?\d+)?/,variable:[/\$(?!\d)\w+/,/#(?!\d)\w+#/,{pattern:/(\.|->)(?!\d)\w+/,lookbehind:!0},{pattern:/(\[)(?!\d)\w+(?=\])/,lookbehind:!0}],function:[{pattern:/(\|\s*)@?(?!\d)\w+/,lookbehind:!0},/^\/?(?!\d)\w+/,/(?!\d)\w+(?=\()/],"attr-name":{pattern:/\w+\s*=\s*(?:(?!\d)\w+)?/,inside:{variable:{pattern:/(=\s*)(?!\d)\w+/,lookbehind:!0},operator:/=/}},punctuation:[/[\[\]().,:`]|->/],operator:[/[+\-*\/%]|==?=?|[!<>]=?|&&|\|\|?/,/\bis\s+(?:not\s+)?(?:div|even|odd)(?:\s+by)?\b/,/\b(?:eq|neq?|gt|lt|gt?e|lt?e|not|mod|or|and)\b/],keyword:/\b(?:false|off|on|no|true|yes)\b/},t.hooks.add("before-tokenize",function(e){var n=/\{\*[\s\S]*?\*\}|\{[\s\S]+?\}/g,r="{literal}",i="{/literal}",a=!1;t.languages["markup-templating"].buildPlaceholders(e,"smarty",n,function(e){return e===i&&(a=!1),!a&&(e===r&&(a=!0),!0)})}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"smarty")})}e.exports=i,i.displayName="smarty",i.aliases=[]},49760(e){"use strict";function t(e){var t,n;n=/\b(?:abstype|and|andalso|as|case|datatype|do|else|end|eqtype|exception|fn|fun|functor|handle|if|in|include|infix|infixr|let|local|nonfix|of|op|open|orelse|raise|rec|sharing|sig|signature|struct|structure|then|type|val|where|while|with|withtype)\b/i,(t=e).languages.sml={comment:/\(\*(?:[^*(]|\*(?!\))|\((?!\*)|\(\*(?:[^*(]|\*(?!\))|\((?!\*))*\*\))*\*\)/,string:{pattern:/#?"(?:[^"\\]|\\.)*"/,greedy:!0},"class-name":[{pattern:RegExp(/((?:^|[^:]):\s*)(?:\s*(?:(?:\*|->)\s*|,\s*(?:(?=)|(?!)\s+)))*/.source.replace(//g,function(){return/\s*(?:[*,]|->)/.source}).replace(//g,function(){return/(?:'[\w']*||\((?:[^()]|\([^()]*\))*\)|\{(?:[^{}]|\{[^{}]*\})*\})(?:\s+)*/.source}).replace(//g,function(){return/(?!)[a-z\d_][\w'.]*/.source}).replace(//g,function(){return n.source}),"i"),lookbehind:!0,greedy:!0,inside:null},{pattern:/((?:^|[^\w'])(?:datatype|exception|functor|signature|structure|type)\s+)[a-z_][\w'.]*/i,lookbehind:!0}],function:{pattern:/((?:^|[^\w'])fun\s+)[a-z_][\w'.]*/i,lookbehind:!0},keyword:n,variable:{pattern:/(^|[^\w'])'[\w']*/,lookbehind:!0},number:/~?\b(?:\d+(?:\.\d+)?(?:e~?\d+)?|0x[\da-f]+)\b/i,word:{pattern:/\b0w(?:\d+|x[\da-f]+)\b/i,alias:"constant"},boolean:/\b(?:false|true)\b/i,operator:/\.\.\.|:[>=:]|=>?|->|[<>]=?|[!+\-*/^#|@~]/,punctuation:/[(){}\[\].:,;]/},t.languages.sml["class-name"][0].inside=t.languages.sml,t.languages.smlnj=t.languages.sml}e.exports=t,t.displayName="sml",t.aliases=["smlnj"]},33351(e){"use strict";function t(e){e.languages.solidity=e.languages.extend("clike",{"class-name":{pattern:/(\b(?:contract|enum|interface|library|new|struct|using)\s+)(?!\d)[\w$]+/,lookbehind:!0},keyword:/\b(?:_|anonymous|as|assembly|assert|break|calldata|case|constant|constructor|continue|contract|default|delete|do|else|emit|enum|event|external|for|from|function|if|import|indexed|inherited|interface|internal|is|let|library|mapping|memory|modifier|new|payable|pragma|private|public|pure|require|returns?|revert|selfdestruct|solidity|storage|struct|suicide|switch|this|throw|using|var|view|while)\b/,operator:/=>|->|:=|=:|\*\*|\+\+|--|\|\||&&|<<=?|>>=?|[-+*/%^&|<>!=]=?|[~?]/}),e.languages.insertBefore("solidity","keyword",{builtin:/\b(?:address|bool|string|u?int(?:8|16|24|32|40|48|56|64|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208|216|224|232|240|248|256)?|byte|bytes(?:[1-9]|[12]\d|3[0-2])?)\b/}),e.languages.insertBefore("solidity","number",{version:{pattern:/([<>]=?|\^)\d+\.\d+\.\d+\b/,lookbehind:!0,alias:"number"}}),e.languages.sol=e.languages.solidity}e.exports=t,t.displayName="solidity",t.aliases=["sol"]},13570(e){"use strict";function t(e){var t,n;n={pattern:/\{[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}\}/i,alias:"constant",inside:{punctuation:/[{}]/}},(t=e).languages["solution-file"]={comment:{pattern:/#.*/,greedy:!0},string:{pattern:/"[^"\r\n]*"|'[^'\r\n]*'/,greedy:!0,inside:{guid:n}},object:{pattern:/^([ \t]*)(?:([A-Z]\w*)\b(?=.*(?:\r\n?|\n)(?:\1[ \t].*(?:\r\n?|\n))*\1End\2(?=[ \t]*$))|End[A-Z]\w*(?=[ \t]*$))/m,lookbehind:!0,greedy:!0,alias:"keyword"},property:{pattern:/^([ \t]*)(?!\s)[^\r\n"#=()]*[^\s"#=()](?=\s*=)/m,lookbehind:!0,inside:{guid:n}},guid:n,number:/\b\d+(?:\.\d+)*\b/,boolean:/\b(?:FALSE|TRUE)\b/,operator:/=/,punctuation:/[(),]/},t.languages.sln=t.languages["solution-file"]}e.exports=t,t.displayName="solutionFile",t.aliases=[]},38181(e,t,n){"use strict";var r=n(93205);function i(e){var t,n,i;e.register(r),n=/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,i=/\b\d+(?:\.\d+)?(?:[eE][+-]?\d+)?\b|\b0x[\dA-F]+\b/,(t=e).languages.soy={comment:[/\/\*[\s\S]*?\*\//,{pattern:/(\s)\/\/.*/,lookbehind:!0,greedy:!0}],"command-arg":{pattern:/(\{+\/?\s*(?:alias|call|delcall|delpackage|deltemplate|namespace|template)\s+)\.?[\w.]+/,lookbehind:!0,alias:"string",inside:{punctuation:/\./}},parameter:{pattern:/(\{+\/?\s*@?param\??\s+)\.?[\w.]+/,lookbehind:!0,alias:"variable"},keyword:[{pattern:/(\{+\/?[^\S\r\n]*)(?:\\[nrt]|alias|call|case|css|default|delcall|delpackage|deltemplate|else(?:if)?|fallbackmsg|for(?:each)?|if(?:empty)?|lb|let|literal|msg|namespace|nil|@?param\??|rb|sp|switch|template|xid)/,lookbehind:!0},/\b(?:any|as|attributes|bool|css|float|in|int|js|html|list|map|null|number|string|uri)\b/],delimiter:{pattern:/^\{+\/?|\/?\}+$/,alias:"punctuation"},property:/\w+(?==)/,variable:{pattern:/\$[^\W\d]\w*(?:\??(?:\.\w+|\[[^\]]+\]))*/,inside:{string:{pattern:n,greedy:!0},number:i,punctuation:/[\[\].?]/}},string:{pattern:n,greedy:!0},function:[/\w+(?=\()/,{pattern:/(\|[^\S\r\n]*)\w+/,lookbehind:!0}],boolean:/\b(?:true|false)\b/,number:i,operator:/\?:?|<=?|>=?|==?|!=|[+*/%-]|\b(?:and|not|or)\b/,punctuation:/[{}()\[\]|.,:]/},t.hooks.add("before-tokenize",function(e){var n=/\{\{.+?\}\}|\{.+?\}|\s\/\/.*|\/\*[\s\S]*?\*\//g,r="{literal}",i="{/literal}",a=!1;t.languages["markup-templating"].buildPlaceholders(e,"soy",n,function(e){return e===i&&(a=!1),!a&&(e===r&&(a=!0),!0)})}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"soy")})}e.exports=i,i.displayName="soy",i.aliases=[]},98774(e,t,n){"use strict";var r=n(24691);function i(e){e.register(r),e.languages.sparql=e.languages.extend("turtle",{boolean:/\b(?:true|false)\b/i,variable:{pattern:/[?$]\w+/,greedy:!0}}),e.languages.insertBefore("sparql","punctuation",{keyword:[/\b(?:A|ADD|ALL|AS|ASC|ASK|BNODE|BY|CLEAR|CONSTRUCT|COPY|CREATE|DATA|DEFAULT|DELETE|DESC|DESCRIBE|DISTINCT|DROP|EXISTS|FILTER|FROM|GROUP|HAVING|INSERT|INTO|LIMIT|LOAD|MINUS|MOVE|NAMED|NOT|NOW|OFFSET|OPTIONAL|ORDER|RAND|REDUCED|SELECT|SEPARATOR|SERVICE|SILENT|STRUUID|UNION|USING|UUID|VALUES|WHERE)\b/i,/\b(?:ABS|AVG|BIND|BOUND|CEIL|COALESCE|CONCAT|CONTAINS|COUNT|DATATYPE|DAY|ENCODE_FOR_URI|FLOOR|GROUP_CONCAT|HOURS|IF|IRI|isBLANK|isIRI|isLITERAL|isNUMERIC|isURI|LANG|LANGMATCHES|LCASE|MAX|MD5|MIN|MINUTES|MONTH|ROUND|REGEX|REPLACE|sameTerm|SAMPLE|SECONDS|SHA1|SHA256|SHA384|SHA512|STR|STRAFTER|STRBEFORE|STRDT|STRENDS|STRLANG|STRLEN|STRSTARTS|SUBSTR|SUM|TIMEZONE|TZ|UCASE|URI|YEAR)\b(?=\s*\()/i,/\b(?:GRAPH|BASE|PREFIX)\b/i]}),e.languages.rq=e.languages.sparql}e.exports=i,i.displayName="sparql",i.aliases=["rq"]},22855(e){"use strict";function t(e){e.languages["splunk-spl"]={comment:/`comment\("(?:\\.|[^\\"])*"\)`/,string:{pattern:/"(?:\\.|[^\\"])*"/,greedy:!0},keyword:/\b(?:abstract|accum|addcoltotals|addinfo|addtotals|analyzefields|anomalies|anomalousvalue|anomalydetection|append|appendcols|appendcsv|appendlookup|appendpipe|arules|associate|audit|autoregress|bin|bucket|bucketdir|chart|cluster|cofilter|collect|concurrency|contingency|convert|correlate|datamodel|dbinspect|dedup|delete|delta|diff|erex|eval|eventcount|eventstats|extract|fieldformat|fields|fieldsummary|filldown|fillnull|findtypes|folderize|foreach|format|from|gauge|gentimes|geom|geomfilter|geostats|head|highlight|history|iconify|input|inputcsv|inputlookup|iplocation|join|kmeans|kv|kvform|loadjob|localize|localop|lookup|makecontinuous|makemv|makeresults|map|mcollect|metadata|metasearch|meventcollect|mstats|multikv|multisearch|mvcombine|mvexpand|nomv|outlier|outputcsv|outputlookup|outputtext|overlap|pivot|predict|rangemap|rare|regex|relevancy|reltime|rename|replace|rest|return|reverse|rex|rtorder|run|savedsearch|script|scrub|search|searchtxn|selfjoin|sendemail|set|setfields|sichart|sirare|sistats|sitimechart|sitop|sort|spath|stats|strcat|streamstats|table|tags|tail|timechart|timewrap|top|transaction|transpose|trendline|tscollect|tstats|typeahead|typelearner|typer|union|uniq|untable|where|x11|xmlkv|xmlunescape|xpath|xyseries)\b/i,"operator-word":{pattern:/\b(?:and|as|by|not|or|xor)\b/i,alias:"operator"},function:/\b\w+(?=\s*\()/,property:/\b\w+(?=\s*=(?!=))/,date:{pattern:/\b\d{1,2}\/\d{1,2}\/\d{1,4}(?:(?::\d{1,2}){3})?\b/,alias:"number"},number:/\b\d+(?:\.\d+)?\b/,boolean:/\b(?:f|false|t|true)\b/i,operator:/[<>=]=?|[-+*/%|]/,punctuation:/[()[\],]/}}e.exports=t,t.displayName="splunkSpl",t.aliases=[]},29611(e){"use strict";function t(e){e.languages.sqf=e.languages.extend("clike",{string:{pattern:/"(?:(?:"")?[^"])*"(?!")|'(?:[^'])*'/,greedy:!0},keyword:/\b(?:breakOut|breakTo|call|case|catch|default|do|echo|else|execVM|execFSM|exitWith|for|forEach|forEachMember|forEachMemberAgent|forEachMemberTeam|from|goto|if|nil|preprocessFile|preprocessFileLineNumbers|private|scopeName|spawn|step|switch|then|throw|to|try|while|with)\b/i,boolean:/\b(?:true|false)\b/i,function:/\b(?:abs|accTime|acos|action|actionIDs|actionKeys|actionKeysImages|actionKeysNames|actionKeysNamesArray|actionName|actionParams|activateAddons|activatedAddons|activateKey|add3DENConnection|add3DENEventHandler|add3DENLayer|addAction|addBackpack|addBackpackCargo|addBackpackCargoGlobal|addBackpackGlobal|addCamShake|addCuratorAddons|addCuratorCameraArea|addCuratorEditableObjects|addCuratorEditingArea|addCuratorPoints|addEditorObject|addEventHandler|addForce|addForceGeneratorRTD|addGoggles|addGroupIcon|addHandgunItem|addHeadgear|addItem|addItemCargo|addItemCargoGlobal|addItemPool|addItemToBackpack|addItemToUniform|addItemToVest|addLiveStats|addMagazine|addMagazineAmmoCargo|addMagazineCargo|addMagazineCargoGlobal|addMagazineGlobal|addMagazinePool|addMagazines|addMagazineTurret|addMenu|addMenuItem|addMissionEventHandler|addMPEventHandler|addMusicEventHandler|addOwnedMine|addPlayerScores|addPrimaryWeaponItem|addPublicVariableEventHandler|addRating|addResources|addScore|addScoreSide|addSecondaryWeaponItem|addSwitchableUnit|addTeamMember|addToRemainsCollector|addTorque|addUniform|addVehicle|addVest|addWaypoint|addWeapon|addWeaponCargo|addWeaponCargoGlobal|addWeaponGlobal|addWeaponItem|addWeaponPool|addWeaponTurret|admin|agent|agents|AGLToASL|aimedAtTarget|aimPos|airDensityCurveRTD|airDensityRTD|airplaneThrottle|airportSide|AISFinishHeal|alive|all3DENEntities|allAirports|allControls|allCurators|allCutLayers|allDead|allDeadMen|allDisplays|allGroups|allMapMarkers|allMines|allMissionObjects|allow3DMode|allowCrewInImmobile|allowCuratorLogicIgnoreAreas|allowDamage|allowDammage|allowFileOperations|allowFleeing|allowGetIn|allowSprint|allPlayers|allSimpleObjects|allSites|allTurrets|allUnits|allUnitsUAV|allVariables|ammo|ammoOnPylon|animate|animateBay|animateDoor|animatePylon|animateSource|animationNames|animationPhase|animationSourcePhase|animationState|append|apply|armoryPoints|arrayIntersect|asin|ASLToAGL|ASLToATL|assert|assignAsCargo|assignAsCargoIndex|assignAsCommander|assignAsDriver|assignAsGunner|assignAsTurret|assignCurator|assignedCargo|assignedCommander|assignedDriver|assignedGunner|assignedItems|assignedTarget|assignedTeam|assignedVehicle|assignedVehicleRole|assignItem|assignTeam|assignToAirport|atan|atan2|atg|ATLToASL|attachedObject|attachedObjects|attachedTo|attachObject|attachTo|attackEnabled|backpack|backpackCargo|backpackContainer|backpackItems|backpackMagazines|backpackSpaceFor|behaviour|benchmark|binocular|blufor|boundingBox|boundingBoxReal|boundingCenter|briefingName|buildingExit|buildingPos|buldozer_EnableRoadDiag|buldozer_IsEnabledRoadDiag|buldozer_LoadNewRoads|buldozer_reloadOperMap|buttonAction|buttonSetAction|cadetMode|callExtension|camCommand|camCommit|camCommitPrepared|camCommitted|camConstuctionSetParams|camCreate|camDestroy|cameraEffect|cameraEffectEnableHUD|cameraInterest|cameraOn|cameraView|campaignConfigFile|camPreload|camPreloaded|camPrepareBank|camPrepareDir|camPrepareDive|camPrepareFocus|camPrepareFov|camPrepareFovRange|camPreparePos|camPrepareRelPos|camPrepareTarget|camSetBank|camSetDir|camSetDive|camSetFocus|camSetFov|camSetFovRange|camSetPos|camSetRelPos|camSetTarget|camTarget|camUseNVG|canAdd|canAddItemToBackpack|canAddItemToUniform|canAddItemToVest|cancelSimpleTaskDestination|canFire|canMove|canSlingLoad|canStand|canSuspend|canTriggerDynamicSimulation|canUnloadInCombat|canVehicleCargo|captive|captiveNum|cbChecked|cbSetChecked|ceil|channelEnabled|cheatsEnabled|checkAIFeature|checkVisibility|civilian|className|clear3DENAttribute|clear3DENInventory|clearAllItemsFromBackpack|clearBackpackCargo|clearBackpackCargoGlobal|clearForcesRTD|clearGroupIcons|clearItemCargo|clearItemCargoGlobal|clearItemPool|clearMagazineCargo|clearMagazineCargoGlobal|clearMagazinePool|clearOverlay|clearRadio|clearVehicleInit|clearWeaponCargo|clearWeaponCargoGlobal|clearWeaponPool|clientOwner|closeDialog|closeDisplay|closeOverlay|collapseObjectTree|collect3DENHistory|collectiveRTD|combatMode|commandArtilleryFire|commandChat|commander|commandFire|commandFollow|commandFSM|commandGetOut|commandingMenu|commandMove|commandRadio|commandStop|commandSuppressiveFire|commandTarget|commandWatch|comment|commitOverlay|compile|compileFinal|completedFSM|composeText|configClasses|configFile|configHierarchy|configName|configNull|configProperties|configSourceAddonList|configSourceMod|configSourceModList|confirmSensorTarget|connectTerminalToUAV|controlNull|controlsGroupCtrl|copyFromClipboard|copyToClipboard|copyWaypoints|cos|count|countEnemy|countFriendly|countSide|countType|countUnknown|create3DENComposition|create3DENEntity|createAgent|createCenter|createDialog|createDiaryLink|createDiaryRecord|createDiarySubject|createDisplay|createGearDialog|createGroup|createGuardedPoint|createLocation|createMarker|createMarkerLocal|createMenu|createMine|createMissionDisplay|createMPCampaignDisplay|createSimpleObject|createSimpleTask|createSite|createSoundSource|createTask|createTeam|createTrigger|createUnit|createVehicle|createVehicleCrew|createVehicleLocal|crew|ctAddHeader|ctAddRow|ctClear|ctCurSel|ctData|ctFindHeaderRows|ctFindRowHeader|ctHeaderControls|ctHeaderCount|ctRemoveHeaders|ctRemoveRows|ctrlActivate|ctrlAddEventHandler|ctrlAngle|ctrlAutoScrollDelay|ctrlAutoScrollRewind|ctrlAutoScrollSpeed|ctrlChecked|ctrlClassName|ctrlCommit|ctrlCommitted|ctrlCreate|ctrlDelete|ctrlEnable|ctrlEnabled|ctrlFade|ctrlHTMLLoaded|ctrlIDC|ctrlIDD|ctrlMapAnimAdd|ctrlMapAnimClear|ctrlMapAnimCommit|ctrlMapAnimDone|ctrlMapCursor|ctrlMapMouseOver|ctrlMapScale|ctrlMapScreenToWorld|ctrlMapWorldToScreen|ctrlModel|ctrlModelDirAndUp|ctrlModelScale|ctrlParent|ctrlParentControlsGroup|ctrlPosition|ctrlRemoveAllEventHandlers|ctrlRemoveEventHandler|ctrlScale|ctrlSetActiveColor|ctrlSetAngle|ctrlSetAutoScrollDelay|ctrlSetAutoScrollRewind|ctrlSetAutoScrollSpeed|ctrlSetBackgroundColor|ctrlSetChecked|ctrlSetDisabledColor|ctrlSetEventHandler|ctrlSetFade|ctrlSetFocus|ctrlSetFont|ctrlSetFontH1|ctrlSetFontH1B|ctrlSetFontH2|ctrlSetFontH2B|ctrlSetFontH3|ctrlSetFontH3B|ctrlSetFontH4|ctrlSetFontH4B|ctrlSetFontH5|ctrlSetFontH5B|ctrlSetFontH6|ctrlSetFontH6B|ctrlSetFontHeight|ctrlSetFontHeightH1|ctrlSetFontHeightH2|ctrlSetFontHeightH3|ctrlSetFontHeightH4|ctrlSetFontHeightH5|ctrlSetFontHeightH6|ctrlSetFontHeightSecondary|ctrlSetFontP|ctrlSetFontPB|ctrlSetFontSecondary|ctrlSetForegroundColor|ctrlSetModel|ctrlSetModelDirAndUp|ctrlSetModelScale|ctrlSetPixelPrecision|ctrlSetPosition|ctrlSetScale|ctrlSetStructuredText|ctrlSetText|ctrlSetTextColor|ctrlSetTextColorSecondary|ctrlSetTextSecondary|ctrlSetTooltip|ctrlSetTooltipColorBox|ctrlSetTooltipColorShade|ctrlSetTooltipColorText|ctrlShow|ctrlShown|ctrlText|ctrlTextHeight|ctrlTextSecondary|ctrlTextWidth|ctrlType|ctrlVisible|ctRowControls|ctRowCount|ctSetCurSel|ctSetData|ctSetHeaderTemplate|ctSetRowTemplate|ctSetValue|ctValue|curatorAddons|curatorCamera|curatorCameraArea|curatorCameraAreaCeiling|curatorCoef|curatorEditableObjects|curatorEditingArea|curatorEditingAreaType|curatorMouseOver|curatorPoints|curatorRegisteredObjects|curatorSelected|curatorWaypointCost|current3DENOperation|currentChannel|currentCommand|currentMagazine|currentMagazineDetail|currentMagazineDetailTurret|currentMagazineTurret|currentMuzzle|currentNamespace|currentTask|currentTasks|currentThrowable|currentVisionMode|currentWaypoint|currentWeapon|currentWeaponMode|currentWeaponTurret|currentZeroing|cursorObject|cursorTarget|customChat|customRadio|cutFadeOut|cutObj|cutRsc|cutText|damage|date|dateToNumber|daytime|deActivateKey|debriefingText|debugFSM|debugLog|deg|delete3DENEntities|deleteAt|deleteCenter|deleteCollection|deleteEditorObject|deleteGroup|deleteGroupWhenEmpty|deleteIdentity|deleteLocation|deleteMarker|deleteMarkerLocal|deleteRange|deleteResources|deleteSite|deleteStatus|deleteTeam|deleteVehicle|deleteVehicleCrew|deleteWaypoint|detach|detectedMines|diag_activeMissionFSMs|diag_activeScripts|diag_activeSQFScripts|diag_activeSQSScripts|diag_captureFrame|diag_captureFrameToFile|diag_captureSlowFrame|diag_codePerformance|diag_drawMode|diag_dynamicSimulationEnd|diag_enable|diag_enabled|diag_fps|diag_fpsMin|diag_frameNo|diag_lightNewLoad|diag_list|diag_log|diag_logSlowFrame|diag_mergeConfigFile|diag_recordTurretLimits|diag_setLightNew|diag_tickTime|diag_toggle|dialog|diarySubjectExists|didJIP|didJIPOwner|difficulty|difficultyEnabled|difficultyEnabledRTD|difficultyOption|direction|directSay|disableAI|disableCollisionWith|disableConversation|disableDebriefingStats|disableMapIndicators|disableNVGEquipment|disableRemoteSensors|disableSerialization|disableTIEquipment|disableUAVConnectability|disableUserInput|displayAddEventHandler|displayCtrl|displayNull|displayParent|displayRemoveAllEventHandlers|displayRemoveEventHandler|displaySetEventHandler|dissolveTeam|distance|distance2D|distanceSqr|distributionRegion|do3DENAction|doArtilleryFire|doFire|doFollow|doFSM|doGetOut|doMove|doorPhase|doStop|doSuppressiveFire|doTarget|doWatch|drawArrow|drawEllipse|drawIcon|drawIcon3D|drawLine|drawLine3D|drawLink|drawLocation|drawPolygon|drawRectangle|drawTriangle|driver|drop|dynamicSimulationDistance|dynamicSimulationDistanceCoef|dynamicSimulationEnabled|dynamicSimulationSystemEnabled|east|edit3DENMissionAttributes|editObject|editorSetEventHandler|effectiveCommander|emptyPositions|enableAI|enableAIFeature|enableAimPrecision|enableAttack|enableAudioFeature|enableAutoStartUpRTD|enableAutoTrimRTD|enableCamShake|enableCaustics|enableChannel|enableCollisionWith|enableCopilot|enableDebriefingStats|enableDiagLegend|enableDynamicSimulation|enableDynamicSimulationSystem|enableEndDialog|enableEngineArtillery|enableEnvironment|enableFatigue|enableGunLights|enableInfoPanelComponent|enableIRLasers|enableMimics|enablePersonTurret|enableRadio|enableReload|enableRopeAttach|enableSatNormalOnDetail|enableSaving|enableSentences|enableSimulation|enableSimulationGlobal|enableStamina|enableStressDamage|enableTeamSwitch|enableTraffic|enableUAVConnectability|enableUAVWaypoints|enableVehicleCargo|enableVehicleSensor|enableWeaponDisassembly|endl|endLoadingScreen|endMission|engineOn|enginesIsOnRTD|enginesPowerRTD|enginesRpmRTD|enginesTorqueRTD|entities|environmentEnabled|estimatedEndServerTime|estimatedTimeLeft|evalObjectArgument|everyBackpack|everyContainer|exec|execEditorScript|exp|expectedDestination|exportJIPMessages|eyeDirection|eyePos|face|faction|fadeMusic|fadeRadio|fadeSound|fadeSpeech|failMission|fillWeaponsFromPool|find|findCover|findDisplay|findEditorObject|findEmptyPosition|findEmptyPositionReady|findIf|findNearestEnemy|finishMissionInit|finite|fire|fireAtTarget|firstBackpack|flag|flagAnimationPhase|flagOwner|flagSide|flagTexture|fleeing|floor|flyInHeight|flyInHeightASL|fog|fogForecast|fogParams|forceAddUniform|forceAtPositionRTD|forcedMap|forceEnd|forceFlagTexture|forceFollowRoad|forceGeneratorRTD|forceMap|forceRespawn|forceSpeed|forceWalk|forceWeaponFire|forceWeatherChange|forgetTarget|format|formation|formationDirection|formationLeader|formationMembers|formationPosition|formationTask|formatText|formLeader|freeLook|fromEditor|fuel|fullCrew|gearIDCAmmoCount|gearSlotAmmoCount|gearSlotData|get3DENActionState|get3DENAttribute|get3DENCamera|get3DENConnections|get3DENEntity|get3DENEntityID|get3DENGrid|get3DENIconsVisible|get3DENLayerEntities|get3DENLinesVisible|get3DENMissionAttribute|get3DENMouseOver|get3DENSelected|getAimingCoef|getAllEnvSoundControllers|getAllHitPointsDamage|getAllOwnedMines|getAllSoundControllers|getAmmoCargo|getAnimAimPrecision|getAnimSpeedCoef|getArray|getArtilleryAmmo|getArtilleryComputerSettings|getArtilleryETA|getAssignedCuratorLogic|getAssignedCuratorUnit|getBackpackCargo|getBleedingRemaining|getBurningValue|getCameraViewDirection|getCargoIndex|getCenterOfMass|getClientState|getClientStateNumber|getCompatiblePylonMagazines|getConnectedUAV|getContainerMaxLoad|getCursorObjectParams|getCustomAimCoef|getDammage|getDescription|getDir|getDirVisual|getDLCAssetsUsage|getDLCAssetsUsageByName|getDLCs|getDLCUsageTime|getEditorCamera|getEditorMode|getEditorObjectScope|getElevationOffset|getEngineTargetRpmRTD|getEnvSoundController|getFatigue|getFieldManualStartPage|getForcedFlagTexture|getFriend|getFSMVariable|getFuelCargo|getGroupIcon|getGroupIconParams|getGroupIcons|getHideFrom|getHit|getHitIndex|getHitPointDamage|getItemCargo|getMagazineCargo|getMarkerColor|getMarkerPos|getMarkerSize|getMarkerType|getMass|getMissionConfig|getMissionConfigValue|getMissionDLCs|getMissionLayerEntities|getMissionLayers|getModelInfo|getMousePosition|getMusicPlayedTime|getNumber|getObjectArgument|getObjectChildren|getObjectDLC|getObjectMaterials|getObjectProxy|getObjectTextures|getObjectType|getObjectViewDistance|getOxygenRemaining|getPersonUsedDLCs|getPilotCameraDirection|getPilotCameraPosition|getPilotCameraRotation|getPilotCameraTarget|getPlateNumber|getPlayerChannel|getPlayerScores|getPlayerUID|getPlayerUIDOld|getPos|getPosASL|getPosASLVisual|getPosASLW|getPosATL|getPosATLVisual|getPosVisual|getPosWorld|getPylonMagazines|getRelDir|getRelPos|getRemoteSensorsDisabled|getRepairCargo|getResolution|getRotorBrakeRTD|getShadowDistance|getShotParents|getSlingLoad|getSoundController|getSoundControllerResult|getSpeed|getStamina|getStatValue|getSuppression|getTerrainGrid|getTerrainHeightASL|getText|getTotalDLCUsageTime|getTrimOffsetRTD|getUnitLoadout|getUnitTrait|getUserMFDText|getUserMFDValue|getVariable|getVehicleCargo|getWeaponCargo|getWeaponSway|getWingsOrientationRTD|getWingsPositionRTD|getWPPos|glanceAt|globalChat|globalRadio|goggles|group|groupChat|groupFromNetId|groupIconSelectable|groupIconsVisible|groupId|groupOwner|groupRadio|groupSelectedUnits|groupSelectUnit|grpNull|gunner|gusts|halt|handgunItems|handgunMagazine|handgunWeapon|handsHit|hasInterface|hasPilotCamera|hasWeapon|hcAllGroups|hcGroupParams|hcLeader|hcRemoveAllGroups|hcRemoveGroup|hcSelected|hcSelectGroup|hcSetGroup|hcShowBar|hcShownBar|headgear|hideBody|hideObject|hideObjectGlobal|hideSelection|hint|hintC|hintCadet|hintSilent|hmd|hostMission|htmlLoad|HUDMovementLevels|humidity|image|importAllGroups|importance|in|inArea|inAreaArray|incapacitatedState|independent|inflame|inflamed|infoPanel|infoPanelComponentEnabled|infoPanelComponents|infoPanels|inGameUISetEventHandler|inheritsFrom|initAmbientLife|inPolygon|inputAction|inRangeOfArtillery|insertEditorObject|intersect|is3DEN|is3DENMultiplayer|isAbleToBreathe|isAgent|isAimPrecisionEnabled|isArray|isAutoHoverOn|isAutonomous|isAutoStartUpEnabledRTD|isAutotest|isAutoTrimOnRTD|isBleeding|isBurning|isClass|isCollisionLightOn|isCopilotEnabled|isDamageAllowed|isDedicated|isDLCAvailable|isEngineOn|isEqualTo|isEqualType|isEqualTypeAll|isEqualTypeAny|isEqualTypeArray|isEqualTypeParams|isFilePatchingEnabled|isFlashlightOn|isFlatEmpty|isForcedWalk|isFormationLeader|isGroupDeletedWhenEmpty|isHidden|isInRemainsCollector|isInstructorFigureEnabled|isIRLaserOn|isKeyActive|isKindOf|isLaserOn|isLightOn|isLocalized|isManualFire|isMarkedForCollection|isMultiplayer|isMultiplayerSolo|isNil|isNull|isNumber|isObjectHidden|isObjectRTD|isOnRoad|isPipEnabled|isPlayer|isRealTime|isRemoteExecuted|isRemoteExecutedJIP|isServer|isShowing3DIcons|isSimpleObject|isSprintAllowed|isStaminaEnabled|isSteamMission|isStreamFriendlyUIEnabled|isStressDamageEnabled|isText|isTouchingGround|isTurnedOut|isTutHintsEnabled|isUAVConnectable|isUAVConnected|isUIContext|isUniformAllowed|isVehicleCargo|isVehicleRadarOn|isVehicleSensorEnabled|isWalking|isWeaponDeployed|isWeaponRested|itemCargo|items|itemsWithMagazines|join|joinAs|joinAsSilent|joinSilent|joinString|kbAddDatabase|kbAddDatabaseTargets|kbAddTopic|kbHasTopic|kbReact|kbRemoveTopic|kbTell|kbWasSaid|keyImage|keyName|knowsAbout|land|landAt|landResult|language|laserTarget|lbAdd|lbClear|lbColor|lbColorRight|lbCurSel|lbData|lbDelete|lbIsSelected|lbPicture|lbPictureRight|lbSelection|lbSetColor|lbSetColorRight|lbSetCurSel|lbSetData|lbSetPicture|lbSetPictureColor|lbSetPictureColorDisabled|lbSetPictureColorSelected|lbSetPictureRight|lbSetPictureRightColor|lbSetPictureRightColorDisabled|lbSetPictureRightColorSelected|lbSetSelectColor|lbSetSelectColorRight|lbSetSelected|lbSetText|lbSetTextRight|lbSetTooltip|lbSetValue|lbSize|lbSort|lbSortByValue|lbText|lbTextRight|lbValue|leader|leaderboardDeInit|leaderboardGetRows|leaderboardInit|leaderboardRequestRowsFriends|leaderboardRequestRowsGlobal|leaderboardRequestRowsGlobalAroundUser|leaderboardsRequestUploadScore|leaderboardsRequestUploadScoreKeepBest|leaderboardState|leaveVehicle|libraryCredits|libraryDisclaimers|lifeState|lightAttachObject|lightDetachObject|lightIsOn|lightnings|limitSpeed|linearConversion|lineBreak|lineIntersects|lineIntersectsObjs|lineIntersectsSurfaces|lineIntersectsWith|linkItem|list|listObjects|listRemoteTargets|listVehicleSensors|ln|lnbAddArray|lnbAddColumn|lnbAddRow|lnbClear|lnbColor|lnbColorRight|lnbCurSelRow|lnbData|lnbDeleteColumn|lnbDeleteRow|lnbGetColumnsPosition|lnbPicture|lnbPictureRight|lnbSetColor|lnbSetColorRight|lnbSetColumnsPos|lnbSetCurSelRow|lnbSetData|lnbSetPicture|lnbSetPictureColor|lnbSetPictureColorRight|lnbSetPictureColorSelected|lnbSetPictureColorSelectedRight|lnbSetPictureRight|lnbSetText|lnbSetTextRight|lnbSetValue|lnbSize|lnbSort|lnbSortByValue|lnbText|lnbTextRight|lnbValue|load|loadAbs|loadBackpack|loadFile|loadGame|loadIdentity|loadMagazine|loadOverlay|loadStatus|loadUniform|loadVest|local|localize|locationNull|locationPosition|lock|lockCameraTo|lockCargo|lockDriver|locked|lockedCargo|lockedDriver|lockedTurret|lockIdentity|lockTurret|lockWP|log|logEntities|logNetwork|logNetworkTerminate|lookAt|lookAtPos|magazineCargo|magazines|magazinesAllTurrets|magazinesAmmo|magazinesAmmoCargo|magazinesAmmoFull|magazinesDetail|magazinesDetailBackpack|magazinesDetailUniform|magazinesDetailVest|magazinesTurret|magazineTurretAmmo|mapAnimAdd|mapAnimClear|mapAnimCommit|mapAnimDone|mapCenterOnCamera|mapGridPosition|markAsFinishedOnSteam|markerAlpha|markerBrush|markerColor|markerDir|markerPos|markerShape|markerSize|markerText|markerType|max|members|menuAction|menuAdd|menuChecked|menuClear|menuCollapse|menuData|menuDelete|menuEnable|menuEnabled|menuExpand|menuHover|menuPicture|menuSetAction|menuSetCheck|menuSetData|menuSetPicture|menuSetValue|menuShortcut|menuShortcutText|menuSize|menuSort|menuText|menuURL|menuValue|min|mineActive|mineDetectedBy|missionConfigFile|missionDifficulty|missionName|missionNamespace|missionStart|missionVersion|modelToWorld|modelToWorldVisual|modelToWorldVisualWorld|modelToWorldWorld|modParams|moonIntensity|moonPhase|morale|move|move3DENCamera|moveInAny|moveInCargo|moveInCommander|moveInDriver|moveInGunner|moveInTurret|moveObjectToEnd|moveOut|moveTime|moveTo|moveToCompleted|moveToFailed|musicVolume|name|nameSound|nearEntities|nearestBuilding|nearestLocation|nearestLocations|nearestLocationWithDubbing|nearestObject|nearestObjects|nearestTerrainObjects|nearObjects|nearObjectsReady|nearRoads|nearSupplies|nearTargets|needReload|netId|netObjNull|newOverlay|nextMenuItemIndex|nextWeatherChange|nMenuItems|numberOfEnginesRTD|numberToDate|objectCurators|objectFromNetId|objectParent|objNull|objStatus|onBriefingGear|onBriefingGroup|onBriefingNotes|onBriefingPlan|onBriefingTeamSwitch|onCommandModeChanged|onDoubleClick|onEachFrame|onGroupIconClick|onGroupIconOverEnter|onGroupIconOverLeave|onHCGroupSelectionChanged|onMapSingleClick|onPlayerConnected|onPlayerDisconnected|onPreloadFinished|onPreloadStarted|onShowNewObject|onTeamSwitch|openCuratorInterface|openDLCPage|openDSInterface|openMap|openSteamApp|openYoutubeVideo|opfor|orderGetIn|overcast|overcastForecast|owner|param|params|parseNumber|parseSimpleArray|parseText|parsingNamespace|particlesQuality|pi|pickWeaponPool|pitch|pixelGrid|pixelGridBase|pixelGridNoUIScale|pixelH|pixelW|playableSlotsNumber|playableUnits|playAction|playActionNow|player|playerRespawnTime|playerSide|playersNumber|playGesture|playMission|playMove|playMoveNow|playMusic|playScriptedMission|playSound|playSound3D|position|positionCameraToWorld|posScreenToWorld|posWorldToScreen|ppEffectAdjust|ppEffectCommit|ppEffectCommitted|ppEffectCreate|ppEffectDestroy|ppEffectEnable|ppEffectEnabled|ppEffectForceInNVG|precision|preloadCamera|preloadObject|preloadSound|preloadTitleObj|preloadTitleRsc|primaryWeapon|primaryWeaponItems|primaryWeaponMagazine|priority|processDiaryLink|processInitCommands|productVersion|profileName|profileNamespace|profileNameSteam|progressLoadingScreen|progressPosition|progressSetPosition|publicVariable|publicVariableClient|publicVariableServer|pushBack|pushBackUnique|putWeaponPool|queryItemsPool|queryMagazinePool|queryWeaponPool|rad|radioChannelAdd|radioChannelCreate|radioChannelRemove|radioChannelSetCallSign|radioChannelSetLabel|radioVolume|rain|rainbow|random|rank|rankId|rating|rectangular|registeredTasks|registerTask|reload|reloadEnabled|remoteControl|remoteExec|remoteExecCall|remoteExecutedOwner|remove3DENConnection|remove3DENEventHandler|remove3DENLayer|removeAction|removeAll3DENEventHandlers|removeAllActions|removeAllAssignedItems|removeAllContainers|removeAllCuratorAddons|removeAllCuratorCameraAreas|removeAllCuratorEditingAreas|removeAllEventHandlers|removeAllHandgunItems|removeAllItems|removeAllItemsWithMagazines|removeAllMissionEventHandlers|removeAllMPEventHandlers|removeAllMusicEventHandlers|removeAllOwnedMines|removeAllPrimaryWeaponItems|removeAllWeapons|removeBackpack|removeBackpackGlobal|removeCuratorAddons|removeCuratorCameraArea|removeCuratorEditableObjects|removeCuratorEditingArea|removeDrawIcon|removeDrawLinks|removeEventHandler|removeFromRemainsCollector|removeGoggles|removeGroupIcon|removeHandgunItem|removeHeadgear|removeItem|removeItemFromBackpack|removeItemFromUniform|removeItemFromVest|removeItems|removeMagazine|removeMagazineGlobal|removeMagazines|removeMagazinesTurret|removeMagazineTurret|removeMenuItem|removeMissionEventHandler|removeMPEventHandler|removeMusicEventHandler|removeOwnedMine|removePrimaryWeaponItem|removeSecondaryWeaponItem|removeSimpleTask|removeSwitchableUnit|removeTeamMember|removeUniform|removeVest|removeWeapon|removeWeaponAttachmentCargo|removeWeaponCargo|removeWeaponGlobal|removeWeaponTurret|reportRemoteTarget|requiredVersion|resetCamShake|resetSubgroupDirection|resistance|resize|resources|respawnVehicle|restartEditorCamera|reveal|revealMine|reverse|reversedMouseY|roadAt|roadsConnectedTo|roleDescription|ropeAttachedObjects|ropeAttachedTo|ropeAttachEnabled|ropeAttachTo|ropeCreate|ropeCut|ropeDestroy|ropeDetach|ropeEndPosition|ropeLength|ropes|ropeUnwind|ropeUnwound|rotorsForcesRTD|rotorsRpmRTD|round|runInitScript|safeZoneH|safeZoneW|safeZoneWAbs|safeZoneX|safeZoneXAbs|safeZoneY|save3DENInventory|saveGame|saveIdentity|saveJoysticks|saveOverlay|saveProfileNamespace|saveStatus|saveVar|savingEnabled|say|say2D|say3D|score|scoreSide|screenshot|screenToWorld|scriptDone|scriptName|scriptNull|scudState|secondaryWeapon|secondaryWeaponItems|secondaryWeaponMagazine|select|selectBestPlaces|selectDiarySubject|selectedEditorObjects|selectEditorObject|selectionNames|selectionPosition|selectLeader|selectMax|selectMin|selectNoPlayer|selectPlayer|selectRandom|selectRandomWeighted|selectWeapon|selectWeaponTurret|sendAUMessage|sendSimpleCommand|sendTask|sendTaskResult|sendUDPMessage|serverCommand|serverCommandAvailable|serverCommandExecutable|serverName|serverTime|set|set3DENAttribute|set3DENAttributes|set3DENGrid|set3DENIconsVisible|set3DENLayer|set3DENLinesVisible|set3DENLogicType|set3DENMissionAttribute|set3DENMissionAttributes|set3DENModelsVisible|set3DENObjectType|set3DENSelected|setAccTime|setActualCollectiveRTD|setAirplaneThrottle|setAirportSide|setAmmo|setAmmoCargo|setAmmoOnPylon|setAnimSpeedCoef|setAperture|setApertureNew|setArmoryPoints|setAttributes|setAutonomous|setBehaviour|setBleedingRemaining|setBrakesRTD|setCameraInterest|setCamShakeDefParams|setCamShakeParams|setCamUseTI|setCaptive|setCenterOfMass|setCollisionLight|setCombatMode|setCompassOscillation|setConvoySeparation|setCuratorCameraAreaCeiling|setCuratorCoef|setCuratorEditingAreaType|setCuratorWaypointCost|setCurrentChannel|setCurrentTask|setCurrentWaypoint|setCustomAimCoef|setCustomWeightRTD|setDamage|setDammage|setDate|setDebriefingText|setDefaultCamera|setDestination|setDetailMapBlendPars|setDir|setDirection|setDrawIcon|setDriveOnPath|setDropInterval|setDynamicSimulationDistance|setDynamicSimulationDistanceCoef|setEditorMode|setEditorObjectScope|setEffectCondition|setEngineRpmRTD|setFace|setFaceAnimation|setFatigue|setFeatureType|setFlagAnimationPhase|setFlagOwner|setFlagSide|setFlagTexture|setFog|setForceGeneratorRTD|setFormation|setFormationTask|setFormDir|setFriend|setFromEditor|setFSMVariable|setFuel|setFuelCargo|setGroupIcon|setGroupIconParams|setGroupIconsSelectable|setGroupIconsVisible|setGroupId|setGroupIdGlobal|setGroupOwner|setGusts|setHideBehind|setHit|setHitIndex|setHitPointDamage|setHorizonParallaxCoef|setHUDMovementLevels|setIdentity|setImportance|setInfoPanel|setLeader|setLightAmbient|setLightAttenuation|setLightBrightness|setLightColor|setLightDayLight|setLightFlareMaxDistance|setLightFlareSize|setLightIntensity|setLightnings|setLightUseFlare|setLocalWindParams|setMagazineTurretAmmo|setMarkerAlpha|setMarkerAlphaLocal|setMarkerBrush|setMarkerBrushLocal|setMarkerColor|setMarkerColorLocal|setMarkerDir|setMarkerDirLocal|setMarkerPos|setMarkerPosLocal|setMarkerShape|setMarkerShapeLocal|setMarkerSize|setMarkerSizeLocal|setMarkerText|setMarkerTextLocal|setMarkerType|setMarkerTypeLocal|setMass|setMimic|setMousePosition|setMusicEffect|setMusicEventHandler|setName|setNameSound|setObjectArguments|setObjectMaterial|setObjectMaterialGlobal|setObjectProxy|setObjectTexture|setObjectTextureGlobal|setObjectViewDistance|setOvercast|setOwner|setOxygenRemaining|setParticleCircle|setParticleClass|setParticleFire|setParticleParams|setParticleRandom|setPilotCameraDirection|setPilotCameraRotation|setPilotCameraTarget|setPilotLight|setPiPEffect|setPitch|setPlateNumber|setPlayable|setPlayerRespawnTime|setPos|setPosASL|setPosASL2|setPosASLW|setPosATL|setPosition|setPosWorld|setPylonLoadOut|setPylonsPriority|setRadioMsg|setRain|setRainbow|setRandomLip|setRank|setRectangular|setRepairCargo|setRotorBrakeRTD|setShadowDistance|setShotParents|setSide|setSimpleTaskAlwaysVisible|setSimpleTaskCustomData|setSimpleTaskDescription|setSimpleTaskDestination|setSimpleTaskTarget|setSimpleTaskType|setSimulWeatherLayers|setSize|setSkill|setSlingLoad|setSoundEffect|setSpeaker|setSpeech|setSpeedMode|setStamina|setStaminaScheme|setStatValue|setSuppression|setSystemOfUnits|setTargetAge|setTaskMarkerOffset|setTaskResult|setTaskState|setTerrainGrid|setText|setTimeMultiplier|setTitleEffect|setToneMapping|setToneMappingParams|setTrafficDensity|setTrafficDistance|setTrafficGap|setTrafficSpeed|setTriggerActivation|setTriggerArea|setTriggerStatements|setTriggerText|setTriggerTimeout|setTriggerType|setType|setUnconscious|setUnitAbility|setUnitLoadout|setUnitPos|setUnitPosWeak|setUnitRank|setUnitRecoilCoefficient|setUnitTrait|setUnloadInCombat|setUserActionText|setUserMFDText|setUserMFDValue|setVariable|setVectorDir|setVectorDirAndUp|setVectorUp|setVehicleAmmo|setVehicleAmmoDef|setVehicleArmor|setVehicleCargo|setVehicleId|setVehicleInit|setVehicleLock|setVehiclePosition|setVehicleRadar|setVehicleReceiveRemoteTargets|setVehicleReportOwnPosition|setVehicleReportRemoteTargets|setVehicleTIPars|setVehicleVarName|setVelocity|setVelocityModelSpace|setVelocityTransformation|setViewDistance|setVisibleIfTreeCollapsed|setWantedRpmRTD|setWaves|setWaypointBehaviour|setWaypointCombatMode|setWaypointCompletionRadius|setWaypointDescription|setWaypointForceBehaviour|setWaypointFormation|setWaypointHousePosition|setWaypointLoiterRadius|setWaypointLoiterType|setWaypointName|setWaypointPosition|setWaypointScript|setWaypointSpeed|setWaypointStatements|setWaypointTimeout|setWaypointType|setWaypointVisible|setWeaponReloadingTime|setWind|setWindDir|setWindForce|setWindStr|setWingForceScaleRTD|setWPPos|show3DIcons|showChat|showCinemaBorder|showCommandingMenu|showCompass|showCuratorCompass|showGPS|showHUD|showLegend|showMap|shownArtilleryComputer|shownChat|shownCompass|shownCuratorCompass|showNewEditorObject|shownGPS|shownHUD|shownMap|shownPad|shownRadio|shownScoretable|shownUAVFeed|shownWarrant|shownWatch|showPad|showRadio|showScoretable|showSubtitles|showUAVFeed|showWarrant|showWatch|showWaypoint|showWaypoints|side|sideAmbientLife|sideChat|sideEmpty|sideEnemy|sideFriendly|sideLogic|sideRadio|sideUnknown|simpleTasks|simulationEnabled|simulCloudDensity|simulCloudOcclusion|simulInClouds|simulWeatherSync|sin|size|sizeOf|skill|skillFinal|skipTime|sleep|sliderPosition|sliderRange|sliderSetPosition|sliderSetRange|sliderSetSpeed|sliderSpeed|slingLoadAssistantShown|soldierMagazines|someAmmo|sort|soundVolume|speaker|speed|speedMode|splitString|sqrt|squadParams|stance|startLoadingScreen|stop|stopEngineRTD|stopped|str|sunOrMoon|supportInfo|suppressFor|surfaceIsWater|surfaceNormal|surfaceType|swimInDepth|switchableUnits|switchAction|switchCamera|switchGesture|switchLight|switchMove|synchronizedObjects|synchronizedTriggers|synchronizedWaypoints|synchronizeObjectsAdd|synchronizeObjectsRemove|synchronizeTrigger|synchronizeWaypoint|systemChat|systemOfUnits|tan|targetKnowledge|targets|targetsAggregate|targetsQuery|taskAlwaysVisible|taskChildren|taskCompleted|taskCustomData|taskDescription|taskDestination|taskHint|taskMarkerOffset|taskNull|taskParent|taskResult|taskState|taskType|teamMember|teamMemberNull|teamName|teams|teamSwitch|teamSwitchEnabled|teamType|terminate|terrainIntersect|terrainIntersectASL|terrainIntersectAtASL|text|textLog|textLogFormat|tg|time|timeMultiplier|titleCut|titleFadeOut|titleObj|titleRsc|titleText|toArray|toFixed|toLower|toString|toUpper|triggerActivated|triggerActivation|triggerArea|triggerAttachedVehicle|triggerAttachObject|triggerAttachVehicle|triggerDynamicSimulation|triggerStatements|triggerText|triggerTimeout|triggerTimeoutCurrent|triggerType|turretLocal|turretOwner|turretUnit|tvAdd|tvClear|tvCollapse|tvCollapseAll|tvCount|tvCurSel|tvData|tvDelete|tvExpand|tvExpandAll|tvPicture|tvPictureRight|tvSetColor|tvSetCurSel|tvSetData|tvSetPicture|tvSetPictureColor|tvSetPictureColorDisabled|tvSetPictureColorSelected|tvSetPictureRight|tvSetPictureRightColor|tvSetPictureRightColorDisabled|tvSetPictureRightColorSelected|tvSetSelectColor|tvSetText|tvSetTooltip|tvSetValue|tvSort|tvSortByValue|tvText|tvTooltip|tvValue|type|typeName|typeOf|UAVControl|uiNamespace|uiSleep|unassignCurator|unassignItem|unassignTeam|unassignVehicle|underwater|uniform|uniformContainer|uniformItems|uniformMagazines|unitAddons|unitAimPosition|unitAimPositionVisual|unitBackpack|unitIsUAV|unitPos|unitReady|unitRecoilCoefficient|units|unitsBelowHeight|unlinkItem|unlockAchievement|unregisterTask|updateDrawIcon|updateMenuItem|updateObjectTree|useAIOperMapObstructionTest|useAISteeringComponent|useAudioTimeForMoves|userInputDisabled|vectorAdd|vectorCos|vectorCrossProduct|vectorDiff|vectorDir|vectorDirVisual|vectorDistance|vectorDistanceSqr|vectorDotProduct|vectorFromTo|vectorMagnitude|vectorMagnitudeSqr|vectorModelToWorld|vectorModelToWorldVisual|vectorMultiply|vectorNormalized|vectorUp|vectorUpVisual|vectorWorldToModel|vectorWorldToModelVisual|vehicle|vehicleCargoEnabled|vehicleChat|vehicleRadio|vehicleReceiveRemoteTargets|vehicleReportOwnPosition|vehicleReportRemoteTargets|vehicles|vehicleVarName|velocity|velocityModelSpace|verifySignature|vest|vestContainer|vestItems|vestMagazines|viewDistance|visibleCompass|visibleGPS|visibleMap|visiblePosition|visiblePositionASL|visibleScoretable|visibleWatch|waitUntil|waves|waypointAttachedObject|waypointAttachedVehicle|waypointAttachObject|waypointAttachVehicle|waypointBehaviour|waypointCombatMode|waypointCompletionRadius|waypointDescription|waypointForceBehaviour|waypointFormation|waypointHousePosition|waypointLoiterRadius|waypointLoiterType|waypointName|waypointPosition|waypoints|waypointScript|waypointsEnabledUAV|waypointShow|waypointSpeed|waypointStatements|waypointTimeout|waypointTimeoutCurrent|waypointType|waypointVisible|weaponAccessories|weaponAccessoriesCargo|weaponCargo|weaponDirection|weaponInertia|weaponLowered|weapons|weaponsItems|weaponsItemsCargo|weaponState|weaponsTurret|weightRTD|west|WFSideText|wind|windDir|windRTD|windStr|wingsForcesRTD|worldName|worldSize|worldToModel|worldToModelVisual|worldToScreen)\b/i,number:/(?:\$|\b0x)[\da-f]+\b|(?:\B\.\d+|\b\d+(?:\.\d+)?)(?:e[+-]?\d+)?\b/i,operator:/##|>>|&&|\|\||[!=<>]=?|[-+*/%#^]|\b(?:and|mod|not|or)\b/i,"magic-variable":{pattern:/\b(?:_exception|_fnc_scriptName|_fnc_scriptNameParent|_forEachIndex|_this|_thisEventHandler|_thisFSM|_thisScript|_x|this|thisList|thisTrigger)\b/i,alias:"keyword"},constant:/\bDIK(?:_[a-z\d]+)+\b/i}),e.languages.insertBefore("sqf","string",{macro:{pattern:/(^[ \t]*)#[a-z](?:[^\r\n\\]|\\(?:\r\n|[\s\S]))*/im,lookbehind:!0,greedy:!0,alias:"property",inside:{directive:{pattern:/#[a-z]+\b/i,alias:"keyword"},comment:e.languages.sqf.comment}}}),delete e.languages.sqf["class-name"]}e.exports=t,t.displayName="sqf",t.aliases=[]},11114(e){"use strict";function t(e){e.languages.sql={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,lookbehind:!0},variable:[{pattern:/@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,greedy:!0},/@[\w.$]+/],string:{pattern:/(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\]|\2\2)*\2/,greedy:!0,lookbehind:!0},function:/\b(?:AVG|COUNT|FIRST|FORMAT|LAST|LCASE|LEN|MAX|MID|MIN|MOD|NOW|ROUND|SUM|UCASE)(?=\s*\()/i,keyword:/\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR(?:ACTER|SET)?|CHECK(?:POINT)?|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMNS?|COMMENT|COMMIT(?:TED)?|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS(?:TABLE)?|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|CYCLE|DATA(?:BASES?)?|DATE(?:TIME)?|DAY|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITERS?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE|ELSE(?:IF)?|ENABLE|ENCLOSED|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPED?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|HOUR|IDENTITY(?:_INSERT|COL)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTERVAL|INTO|INVOKER|ISOLATION|ITERATE|JOIN|KEYS?|KILL|LANGUAGE|LAST|LEAVE|LEFT|LEVEL|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|LOOP|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MINUTE|MODE|MODIFIES|MODIFY|MONTH|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL|NATURAL|NCHAR|NEXT|NO|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREPARE|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READS?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEAT(?:ABLE)?|REPLACE|REPLICATION|REQUIRE|RESIGNAL|RESTORE|RESTRICT|RETURN(?:S|ING)?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SECOND|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|SQL|START(?:ING)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED|TEXT(?:SIZE)?|THEN|TIME(?:STAMP)?|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNLOCK|UNPIVOT|UNSIGNED|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?|YEAR)\b/i,boolean:/\b(?:TRUE|FALSE|NULL)\b/i,number:/\b0x[\da-f]+\b|\b\d+(?:\.\d*)?|\B\.\d+\b/i,operator:/[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|DIV|IN|ILIKE|IS|LIKE|NOT|OR|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,punctuation:/[;[\]()`,.]/}}e.exports=t,t.displayName="sql",t.aliases=[]},67386(e){"use strict";function t(e){e.languages.squirrel=e.languages.extend("clike",{comment:[e.languages.clike.comment[0],{pattern:/(^|[^\\:])(?:\/\/|#).*/,lookbehind:!0,greedy:!0}],string:[{pattern:/(^|[^\\"'@])(?:@"(?:[^"]|"")*"(?!")|"(?:[^\\\r\n"]|\\.)*")/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\"'])'(?:[^\\']|\\(?:[xuU][0-9a-fA-F]{0,8}|[\s\S]))'/,lookbehind:!0,greedy:!0}],"class-name":{pattern:/(\b(?:class|enum|extends|instanceof)\s+)\w+(?:\.\w+)*/,lookbehind:!0,inside:{punctuation:/\./}},keyword:/\b(?:base|break|case|catch|class|clone|const|constructor|continue|default|delete|else|enum|extends|for|foreach|function|if|in|instanceof|local|null|resume|return|static|switch|this|throw|try|typeof|while|yield|__LINE__|__FILE__)\b/,number:/\b(?:0x[0-9a-fA-F]+|\d+(?:\.(?:\d+|[eE][+-]?\d+))?)\b/,operator:/\+\+|--|<=>|<[-<]|>>>?|&&?|\|\|?|[-+*/%!=<>]=?|[~^]|::?/,punctuation:/[(){}\[\],;.]/}),e.languages.insertBefore("squirrel","operator",{"attribute-punctuation":{pattern:/<\/|\/>/,alias:"important"},lambda:{pattern:/@(?=\()/,alias:"operator"}})}e.exports=t,t.displayName="squirrel",t.aliases=[]},28067(e){"use strict";function t(e){e.languages.stan={comment:/\/\/.*|\/\*[\s\S]*?\*\/|#(?!include).*/,string:{pattern:/"[\x20\x21\x23-\x5B\x5D-\x7E]*"/,greedy:!0},directive:{pattern:/^([ \t]*)#include\b.*/m,lookbehind:!0,alias:"property"},"function-arg":{pattern:/(\b(?:algebra_solver|integrate_1d|integrate_ode|integrate_ode_bdf|integrate_ode_rk45|map_rect)\s*\(\s*)[a-zA-Z]\w*/,lookbehind:!0,alias:"function"},constraint:{pattern:/(\b(?:int|matrix|real|row_vector|vector)\s*)<[^<>]*>/,lookbehind:!0,inside:{expression:{pattern:/(=\s*)\S(?:\S|\s+(?!\s))*?(?=\s*(?:>$|,\s*\w+\s*=))/,lookbehind:!0,inside:null},property:/\b[a-z]\w*(?=\s*=)/i,operator:/=/,punctuation:/^<|>$|,/}},keyword:[/\b(?:break|cholesky_factor_corr|cholesky_factor_cov|continue|corr_matrix|cov_matrix|data|else|for|functions|generated|if|in|increment_log_prob|int|matrix|model|ordered|parameters|positive_ordered|print|quantities|real|reject|return|row_vector|simplex|target|transformed|unit_vector|vector|void|while)\b/,/\b(?:algebra_solver|integrate_1d|integrate_ode|integrate_ode_bdf|integrate_ode_rk45|map_rect)\b/],function:/\b[a-z]\w*(?=\s*\()/i,number:/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:E[+-]?\d+)?\b/i,boolean:/\b(?:false|true)\b/,operator:/<-|\.[*/]=?|\|\|?|&&|[!=<>+\-*/]=?|['^%~?:]/,punctuation:/[()\[\]{},;]/},e.languages.stan.constraint.inside.expression.inside=e.languages.stan}e.exports=t,t.displayName="stan",t.aliases=[]},49168(e){"use strict";function t(e){var t,n,r,i;t=e,(i={comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|\/\/.*)/,lookbehind:!0},url:{pattern:/\burl\((["']?).*?\1\)/i,greedy:!0},string:{pattern:/("|')(?:(?!\1)[^\\\r\n]|\\(?:\r\n|[\s\S]))*\1/,greedy:!0},interpolation:null,func:null,important:/\B!(?:important|optional)\b/i,keyword:{pattern:/(^|\s+)(?:(?:if|else|for|return|unless)(?=\s|$)|@[\w-]+)/,lookbehind:!0},hexcode:/#[\da-f]{3,6}/i,color:[/\b(?:AliceBlue|AntiqueWhite|Aqua|Aquamarine|Azure|Beige|Bisque|Black|BlanchedAlmond|Blue|BlueViolet|Brown|BurlyWood|CadetBlue|Chartreuse|Chocolate|Coral|CornflowerBlue|Cornsilk|Crimson|Cyan|DarkBlue|DarkCyan|DarkGoldenRod|DarkGr[ae]y|DarkGreen|DarkKhaki|DarkMagenta|DarkOliveGreen|DarkOrange|DarkOrchid|DarkRed|DarkSalmon|DarkSeaGreen|DarkSlateBlue|DarkSlateGr[ae]y|DarkTurquoise|DarkViolet|DeepPink|DeepSkyBlue|DimGr[ae]y|DodgerBlue|FireBrick|FloralWhite|ForestGreen|Fuchsia|Gainsboro|GhostWhite|Gold|GoldenRod|Gr[ae]y|Green|GreenYellow|HoneyDew|HotPink|IndianRed|Indigo|Ivory|Khaki|Lavender|LavenderBlush|LawnGreen|LemonChiffon|LightBlue|LightCoral|LightCyan|LightGoldenRodYellow|LightGr[ae]y|LightGreen|LightPink|LightSalmon|LightSeaGreen|LightSkyBlue|LightSlateGr[ae]y|LightSteelBlue|LightYellow|Lime|LimeGreen|Linen|Magenta|Maroon|MediumAquaMarine|MediumBlue|MediumOrchid|MediumPurple|MediumSeaGreen|MediumSlateBlue|MediumSpringGreen|MediumTurquoise|MediumVioletRed|MidnightBlue|MintCream|MistyRose|Moccasin|NavajoWhite|Navy|OldLace|Olive|OliveDrab|Orange|OrangeRed|Orchid|PaleGoldenRod|PaleGreen|PaleTurquoise|PaleVioletRed|PapayaWhip|PeachPuff|Peru|Pink|Plum|PowderBlue|Purple|Red|RosyBrown|RoyalBlue|SaddleBrown|Salmon|SandyBrown|SeaGreen|SeaShell|Sienna|Silver|SkyBlue|SlateBlue|SlateGr[ae]y|Snow|SpringGreen|SteelBlue|Tan|Teal|Thistle|Tomato|Transparent|Turquoise|Violet|Wheat|White|WhiteSmoke|Yellow|YellowGreen)\b/i,{pattern:/\b(?:rgb|hsl)\(\s*\d{1,3}\s*,\s*\d{1,3}%?\s*,\s*\d{1,3}%?\s*\)\B|\b(?:rgb|hsl)a\(\s*\d{1,3}\s*,\s*\d{1,3}%?\s*,\s*\d{1,3}%?\s*,\s*(?:0|0?\.\d+|1)\s*\)\B/i,inside:{unit:n={pattern:/(\b\d+)(?:%|[a-z]+)/,lookbehind:!0},number:r={pattern:/(^|[^\w.-])-?(?:\d+(?:\.\d+)?|\.\d+)/,lookbehind:!0},function:/[\w-]+(?=\()/,punctuation:/[(),]/}}],entity:/\\[\da-f]{1,8}/i,unit:n,boolean:/\b(?:true|false)\b/,operator:[/~|[+!\/%<>?=]=?|[-:]=|\*[*=]?|\.{2,3}|&&|\|\||\B-\B|\b(?:and|in|is(?: a| defined| not|nt)?|not|or)\b/],number:r,punctuation:/[{}()\[\];:,]/}).interpolation={pattern:/\{[^\r\n}:]+\}/,alias:"variable",inside:{delimiter:{pattern:/^\{|\}$/,alias:"punctuation"},rest:i}},i.func={pattern:/[\w-]+\([^)]*\).*/,inside:{function:/^[^(]+/,rest:i}},t.languages.stylus={"atrule-declaration":{pattern:/(^[ \t]*)@.+/m,lookbehind:!0,inside:{atrule:/^@[\w-]+/,rest:i}},"variable-declaration":{pattern:/(^[ \t]*)[\w$-]+\s*.?=[ \t]*(?:\{[^{}]*\}|\S.*|$)/m,lookbehind:!0,inside:{variable:/^\S+/,rest:i}},statement:{pattern:/(^[ \t]*)(?:if|else|for|return|unless)[ \t].+/m,lookbehind:!0,inside:{keyword:/^\S+/,rest:i}},"property-declaration":{pattern:/((?:^|\{)([ \t]*))(?:[\w-]|\{[^}\r\n]+\})+(?:\s*:\s*|[ \t]+)(?!\s)[^{\r\n]*(?:;|[^{\r\n,]$(?!(?:\r?\n|\r)(?:\{|\2[ \t])))/m,lookbehind:!0,inside:{property:{pattern:/^[^\s:]+/,inside:{interpolation:i.interpolation}},rest:i}},selector:{pattern:/(^[ \t]*)(?:(?=\S)(?:[^{}\r\n:()]|::?[\w-]+(?:\([^)\r\n]*\)|(?![\w-]))|\{[^}\r\n]+\})+)(?:(?:\r?\n|\r)(?:\1(?:(?=\S)(?:[^{}\r\n:()]|::?[\w-]+(?:\([^)\r\n]*\)|(?![\w-]))|\{[^}\r\n]+\})+)))*(?:,$|\{|(?=(?:\r?\n|\r)(?:\{|\1[ \t])))/m,lookbehind:!0,inside:{interpolation:i.interpolation,comment:i.comment,punctuation:/[{},]/}},func:i.func,string:i.string,comment:{pattern:/(^|[^\\])(?:\/\*[\s\S]*?\*\/|\/\/.*)/,lookbehind:!0,greedy:!0},interpolation:i.interpolation,punctuation:/[{}()\[\];:.]/}}e.exports=t,t.displayName="stylus",t.aliases=[]},23651(e){"use strict";function t(e){e.languages.swift=e.languages.extend("clike",{string:{pattern:/("|')(?:\\(?:\((?:[^()]|\([^)]+\))+\)|\r\n|[^(])|(?!\1)[^\\\r\n])*\1/,greedy:!0,inside:{interpolation:{pattern:/\\\((?:[^()]|\([^)]+\))+\)/,inside:{delimiter:{pattern:/^\\\(|\)$/,alias:"variable"}}}}},keyword:/\b(?:as|associativity|break|case|catch|class|continue|convenience|default|defer|deinit|didSet|do|dynamic(?:Type)?|else|enum|extension|fallthrough|final|for|func|get|guard|if|import|in|infix|init|inout|internal|is|lazy|left|let|mutating|new|none|nonmutating|operator|optional|override|postfix|precedence|prefix|private|protocol|public|repeat|required|rethrows|return|right|safe|self|Self|set|some|static|struct|subscript|super|switch|throws?|try|Type|typealias|unowned|unsafe|var|weak|where|while|willSet|__(?:COLUMN__|FILE__|FUNCTION__|LINE__))\b/,number:/\b(?:[\d_]+(?:\.[\de_]+)?|0x[a-f0-9_]+(?:\.[a-f0-9p_]+)?|0b[01_]+|0o[0-7_]+)\b/i,constant:/\b(?:nil|[A-Z_]{2,}|k[A-Z][A-Za-z_]+)\b/,atrule:/@\b(?:IB(?:Outlet|Designable|Action|Inspectable)|class_protocol|exported|noreturn|NS(?:Copying|Managed)|objc|UIApplicationMain|auto_closure)\b/,builtin:/\b(?:[A-Z]\S+|abs|advance|alignof(?:Value)?|assert|contains|count(?:Elements)?|debugPrint(?:ln)?|distance|drop(?:First|Last)|dump|enumerate|equal|filter|find|first|getVaList|indices|isEmpty|join|last|lexicographicalCompare|map|max(?:Element)?|min(?:Element)?|numericCast|overlaps|partition|print(?:ln)?|reduce|reflect|reverse|sizeof(?:Value)?|sort(?:ed)?|split|startsWith|stride(?:of(?:Value)?)?|suffix|swap|toDebugString|toString|transcode|underestimateCount|unsafeBitCast|with(?:ExtendedLifetime|Unsafe(?:MutablePointers?|Pointers?)|VaList))\b/}),e.languages.swift.string.inside.interpolation.inside.rest=e.languages.swift}e.exports=t,t.displayName="swift",t.aliases=[]},32268(e,t,n){"use strict";var r=n(2329),i=n(61958);function a(e){e.register(r),e.register(i),e.languages.t4=e.languages["t4-cs"]=e.languages["t4-templating"].createT4("csharp")}e.exports=a,a.displayName="t4Cs",a.aliases=[]},2329(e){"use strict";function t(e){!function(e){function t(e,t,n){return{pattern:RegExp("<#"+e+"[\\s\\S]*?#>"),alias:"block",inside:{delimiter:{pattern:RegExp("^<#"+e+"|#>$"),alias:"important"},content:{pattern:/[\s\S]+/,inside:t,alias:n}}}}function n(n){var r=e.languages[n],i="language-"+n;return{block:{pattern:/<#[\s\S]+?#>/,inside:{directive:t("@",{"attr-value":{pattern:/=(?:("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|[^\s'">=]+)/,inside:{punctuation:/^=|^["']|["']$/}},keyword:/\b\w+(?=\s)/,"attr-name":/\b\w+/}),expression:t("=",r,i),"class-feature":t("\\+",r,i),standard:t("",r,i)}}}}e.languages["t4-templating"]=Object.defineProperty({},"createT4",{value:n})}(e)}e.exports=t,t.displayName="t4Templating",t.aliases=[]},82996(e,t,n){"use strict";var r=n(2329),i=n(53813);function a(e){e.register(r),e.register(i),e.languages["t4-vb"]=e.languages["t4-templating"].createT4("vbnet")}e.exports=a,a.displayName="t4Vb",a.aliases=[]},17290(e,t,n){"use strict";var r=n(65039);function i(e){e.register(r),e.languages.tap={fail:/not ok[^#{\n\r]*/,pass:/ok[^#{\n\r]*/,pragma:/pragma [+-][a-z]+/,bailout:/bail out!.*/i,version:/TAP version \d+/i,plan:/\b\d+\.\.\d+(?: +#.*)?/,subtest:{pattern:/# Subtest(?:: .*)?/,greedy:!0},punctuation:/[{}]/,directive:/#.*/,yamlish:{pattern:/(^[ \t]*)---[\s\S]*?[\r\n][ \t]*\.\.\.$/m,lookbehind:!0,inside:e.languages.yaml,alias:"language-yaml"}}}e.exports=i,i.displayName="tap",i.aliases=[]},67989(e){"use strict";function t(e){e.languages.tcl={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0},string:{pattern:/"(?:[^"\\\r\n]|\\(?:\r\n|[\s\S]))*"/,greedy:!0},variable:[{pattern:/(\$)(?:::)?(?:[a-zA-Z0-9]+::)*\w+/,lookbehind:!0},{pattern:/(\$)\{[^}]+\}/,lookbehind:!0},{pattern:/(^[\t ]*set[ \t]+)(?:::)?(?:[a-zA-Z0-9]+::)*\w+/m,lookbehind:!0}],function:{pattern:/(^[\t ]*proc[ \t]+)\S+/m,lookbehind:!0},builtin:[{pattern:/(^[\t ]*)(?:proc|return|class|error|eval|exit|for|foreach|if|switch|while|break|continue)\b/m,lookbehind:!0},/\b(?:elseif|else)\b/],scope:{pattern:/(^[\t ]*)(?:global|upvar|variable)\b/m,lookbehind:!0,alias:"constant"},keyword:{pattern:/(^[\t ]*|\[)(?:after|append|apply|array|auto_(?:execok|import|load|mkindex|qualify|reset)|automkindex_old|bgerror|binary|catch|cd|chan|clock|close|concat|dde|dict|encoding|eof|exec|expr|fblocked|fconfigure|fcopy|file(?:event|name)?|flush|gets|glob|history|http|incr|info|interp|join|lappend|lassign|lindex|linsert|list|llength|load|lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|math(?:func|op)|memory|msgcat|namespace|open|package|parray|pid|pkg_mkIndex|platform|puts|pwd|re_syntax|read|refchan|regexp|registry|regsub|rename|Safe_Base|scan|seek|set|socket|source|split|string|subst|Tcl|tcl(?:_endOfWord|_findLibrary|startOf(?:Next|Previous)Word|wordBreak(?:After|Before)|test|vars)|tell|time|tm|trace|unknown|unload|unset|update|uplevel|vwait)\b/m,lookbehind:!0},operator:/!=?|\*\*?|==|&&?|\|\|?|<[=<]?|>[=>]?|[-+~\/%?^]|\b(?:eq|ne|in|ni)\b/,punctuation:/[{}()\[\]]/}}e.exports=t,t.displayName="tcl",t.aliases=[]},31065(e){"use strict";function t(e){!function(e){var t=/\([^|()\n]+\)|\[[^\]\n]+\]|\{[^}\n]+\}/.source,n=/\)|\((?![^|()\n]+\))/.source;function r(e,r){return RegExp(e.replace(//g,function(){return"(?:"+t+")"}).replace(//g,function(){return"(?:"+n+")"}),r||"")}var i={css:{pattern:/\{[^{}]+\}/,inside:{rest:e.languages.css}},"class-id":{pattern:/(\()[^()]+(?=\))/,lookbehind:!0,alias:"attr-value"},lang:{pattern:/(\[)[^\[\]]+(?=\])/,lookbehind:!0,alias:"attr-value"},punctuation:/[\\\/]\d+|\S/},a=e.languages.textile=e.languages.extend("markup",{phrase:{pattern:/(^|\r|\n)\S[\s\S]*?(?=$|\r?\n\r?\n|\r\r)/,lookbehind:!0,inside:{"block-tag":{pattern:r(/^[a-z]\w*(?:||[<>=])*\./.source),inside:{modifier:{pattern:r(/(^[a-z]\w*)(?:||[<>=])+(?=\.)/.source),lookbehind:!0,inside:i},tag:/^[a-z]\w*/,punctuation:/\.$/}},list:{pattern:r(/^[*#]+*\s+\S.*/.source,"m"),inside:{modifier:{pattern:r(/(^[*#]+)+/.source),lookbehind:!0,inside:i},punctuation:/^[*#]+/}},table:{pattern:r(/^(?:(?:||[<>=^~])+\.\s*)?(?:\|(?:(?:||[<>=^~_]|[\\/]\d+)+\.|(?!(?:||[<>=^~_]|[\\/]\d+)+\.))[^|]*)+\|/.source,"m"),inside:{modifier:{pattern:r(/(^|\|(?:\r?\n|\r)?)(?:||[<>=^~_]|[\\/]\d+)+(?=\.)/.source),lookbehind:!0,inside:i},punctuation:/\||^\./}},inline:{pattern:r(/(^|[^a-zA-Z\d])(\*\*|__|\?\?|[*_%@+\-^~])*.+?\2(?![a-zA-Z\d])/.source),lookbehind:!0,inside:{bold:{pattern:r(/(^(\*\*?)*).+?(?=\2)/.source),lookbehind:!0},italic:{pattern:r(/(^(__?)*).+?(?=\2)/.source),lookbehind:!0},cite:{pattern:r(/(^\?\?*).+?(?=\?\?)/.source),lookbehind:!0,alias:"string"},code:{pattern:r(/(^@*).+?(?=@)/.source),lookbehind:!0,alias:"keyword"},inserted:{pattern:r(/(^\+*).+?(?=\+)/.source),lookbehind:!0},deleted:{pattern:r(/(^-*).+?(?=-)/.source),lookbehind:!0},span:{pattern:r(/(^%*).+?(?=%)/.source),lookbehind:!0},modifier:{pattern:r(/(^\*\*|__|\?\?|[*_%@+\-^~])+/.source),lookbehind:!0,inside:i},punctuation:/[*_%?@+\-^~]+/}},"link-ref":{pattern:/^\[[^\]]+\]\S+$/m,inside:{string:{pattern:/(^\[)[^\]]+(?=\])/,lookbehind:!0},url:{pattern:/(^\])\S+$/,lookbehind:!0},punctuation:/[\[\]]/}},link:{pattern:r(/"*[^"]+":.+?(?=[^\w/]?(?:\s|$))/.source),inside:{text:{pattern:r(/(^"*)[^"]+(?=")/.source),lookbehind:!0},modifier:{pattern:r(/(^")+/.source),lookbehind:!0,inside:i},url:{pattern:/(:).+/,lookbehind:!0},punctuation:/[":]/}},image:{pattern:r(/!(?:||[<>=])*(?![<>=])[^!\s()]+(?:\([^)]+\))?!(?::.+?(?=[^\w/]?(?:\s|$)))?/.source),inside:{source:{pattern:r(/(^!(?:||[<>=])*)(?![<>=])[^!\s()]+(?:\([^)]+\))?(?=!)/.source),lookbehind:!0,alias:"url"},modifier:{pattern:r(/(^!)(?:||[<>=])+/.source),lookbehind:!0,inside:i},url:{pattern:/(:).+/,lookbehind:!0},punctuation:/[!:]/}},footnote:{pattern:/\b\[\d+\]/,alias:"comment",inside:{punctuation:/\[|\]/}},acronym:{pattern:/\b[A-Z\d]+\([^)]+\)/,inside:{comment:{pattern:/(\()[^()]+(?=\))/,lookbehind:!0},punctuation:/[()]/}},mark:{pattern:/\b\((?:TM|R|C)\)/,alias:"comment",inside:{punctuation:/[()]/}}}}}),o=a.phrase.inside,s={inline:o.inline,link:o.link,image:o.image,footnote:o.footnote,acronym:o.acronym,mark:o.mark};a.tag.pattern=/<\/?(?!\d)[a-z0-9]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\[\s\S]|(?!\1)[^\\])*\1|[^\s'">=]+))?)*\s*\/?>/i;var u=o.inline.inside;u.bold.inside=s,u.italic.inside=s,u.inserted.inside=s,u.deleted.inside=s,u.span.inside=s;var c=o.table.inside;c.inline=s.inline,c.link=s.link,c.image=s.image,c.footnote=s.footnote,c.acronym=s.acronym,c.mark=s.mark}(e)}e.exports=t,t.displayName="textile",t.aliases=[]},85572(e){"use strict";function t(e){!function(e){var t=/(?:[\w-]+|'[^'\n\r]*'|"(?:\\.|[^\\"\r\n])*")/.source;function n(e){return e.replace(/__/g,function(){return t})}e.languages.toml={comment:{pattern:/#.*/,greedy:!0},table:{pattern:RegExp(n(/(^[\t ]*\[\s*(?:\[\s*)?)__(?:\s*\.\s*__)*(?=\s*\])/.source),"m"),lookbehind:!0,greedy:!0,alias:"class-name"},key:{pattern:RegExp(n(/(^[\t ]*|[{,]\s*)__(?:\s*\.\s*__)*(?=\s*=)/.source),"m"),lookbehind:!0,greedy:!0,alias:"property"},string:{pattern:/"""(?:\\[\s\S]|[^\\])*?"""|'''[\s\S]*?'''|'[^'\n\r]*'|"(?:\\.|[^\\"\r\n])*"/,greedy:!0},date:[{pattern:/\b\d{4}-\d{2}-\d{2}(?:[T\s]\d{2}:\d{2}:\d{2}(?:\.\d+)?(?:Z|[+-]\d{2}:\d{2})?)?\b/i,alias:"number"},{pattern:/\b\d{2}:\d{2}:\d{2}(?:\.\d+)?\b/,alias:"number"}],number:/(?:\b0(?:x[\da-zA-Z]+(?:_[\da-zA-Z]+)*|o[0-7]+(?:_[0-7]+)*|b[10]+(?:_[10]+)*))\b|[-+]?\b\d+(?:_\d+)*(?:\.\d+(?:_\d+)*)?(?:[eE][+-]?\d+(?:_\d+)*)?\b|[-+]?\b(?:inf|nan)\b/,boolean:/\b(?:true|false)\b/,punctuation:/[.,=[\]{}]/}}(e)}e.exports=t,t.displayName="toml",t.aliases=[]},87041(e,t,n){"use strict";var r=n(96412),i=n(4979);function a(e){var t,n,a;e.register(r),e.register(i),n=(t=e).util.clone(t.languages.typescript),t.languages.tsx=t.languages.extend("jsx",n),(a=t.languages.tsx.tag).pattern=RegExp(/(^|[^\w$]|(?=<\/))/.source+"(?:"+a.pattern.source+")",a.pattern.flags),a.lookbehind=!0}e.exports=a,a.displayName="tsx",a.aliases=[]},61028(e,t,n){"use strict";var r=n(93205);function i(e){var t;e.register(r),(t=e).languages.tt2=t.languages.extend("clike",{comment:/#.*|\[%#[\s\S]*?%\]/,keyword:/\b(?:BLOCK|CALL|CASE|CATCH|CLEAR|DEBUG|DEFAULT|ELSE|ELSIF|END|FILTER|FINAL|FOREACH|GET|IF|IN|INCLUDE|INSERT|LAST|MACRO|META|NEXT|PERL|PROCESS|RAWPERL|RETURN|SET|STOP|TAGS|THROW|TRY|SWITCH|UNLESS|USE|WHILE|WRAPPER)\b/,punctuation:/[[\]{},()]/}),t.languages.insertBefore("tt2","number",{operator:/=[>=]?|!=?|<=?|>=?|&&|\|\|?|\b(?:and|or|not)\b/,variable:{pattern:/\b[a-z]\w*(?:\s*\.\s*(?:\d+|\$?[a-z]\w*))*\b/i}}),t.languages.insertBefore("tt2","keyword",{delimiter:{pattern:/^(?:\[%|%%)-?|-?%\]$/,alias:"punctuation"}}),t.languages.insertBefore("tt2","string",{"single-quoted-string":{pattern:/'[^\\']*(?:\\[\s\S][^\\']*)*'/,greedy:!0,alias:"string"},"double-quoted-string":{pattern:/"[^\\"]*(?:\\[\s\S][^\\"]*)*"/,greedy:!0,alias:"string",inside:{variable:{pattern:/\$(?:[a-z]\w*(?:\.(?:\d+|\$?[a-z]\w*))*)/i}}}}),delete t.languages.tt2.string,t.hooks.add("before-tokenize",function(e){var n=/\[%[\s\S]+?%\]/g;t.languages["markup-templating"].buildPlaceholders(e,"tt2",n)}),t.hooks.add("after-tokenize",function(e){t.languages["markup-templating"].tokenizePlaceholders(e,"tt2")})}e.exports=i,i.displayName="tt2",i.aliases=[]},24691(e){"use strict";function t(e){e.languages.turtle={comment:{pattern:/#.*/,greedy:!0},"multiline-string":{pattern:/"""(?:(?:""?)?(?:[^"\\]|\\.))*"""|'''(?:(?:''?)?(?:[^'\\]|\\.))*'''/,greedy:!0,alias:"string",inside:{comment:/#.*/}},string:{pattern:/"(?:[^\\"\r\n]|\\.)*"|'(?:[^\\'\r\n]|\\.)*'/,greedy:!0},url:{pattern:/<(?:[^\x00-\x20<>"{}|^`\\]|\\(?:u[\da-fA-F]{4}|U[\da-fA-F]{8}))*>/,greedy:!0,inside:{punctuation:/[<>]/}},function:{pattern:/(?:(?![-.\d\xB7])[-.\w\xB7\xC0-\uFFFD]+)?:(?:(?![-.])(?:[-.:\w\xC0-\uFFFD]|%[\da-f]{2}|\\.)+)?/i,inside:{"local-name":{pattern:/([^:]*:)[\s\S]+/,lookbehind:!0},prefix:{pattern:/[\s\S]+/,inside:{punctuation:/:/}}}},number:/[+-]?\b\d+(?:\.\d*)?(?:e[+-]?\d+)?/i,punctuation:/[{}.,;()[\]]|\^\^/,boolean:/\b(?:true|false)\b/,keyword:[/(?:\ba|@prefix|@base)\b|=/,/\b(?:graph|base|prefix)\b/i],tag:{pattern:/@[a-z]+(?:-[a-z\d]+)*/i,inside:{punctuation:/@/}}},e.languages.trig=e.languages.turtle}e.exports=t,t.displayName="turtle",t.aliases=[]},19892(e){"use strict";function t(e){e.languages.twig={comment:/\{#[\s\S]*?#\}/,tag:{pattern:/\{\{[\s\S]*?\}\}|\{%[\s\S]*?%\}/,inside:{ld:{pattern:/^(?:\{\{-?|\{%-?\s*\w+)/,inside:{punctuation:/^(?:\{\{|\{%)-?/,keyword:/\w+/}},rd:{pattern:/-?(?:%\}|\}\})$/,inside:{punctuation:/.+/}},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,inside:{punctuation:/^['"]|['"]$/}},keyword:/\b(?:even|if|odd)\b/,boolean:/\b(?:true|false|null)\b/,number:/\b0x[\dA-Fa-f]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee][-+]?\d+)?/,operator:[{pattern:/(\s)(?:and|b-and|b-xor|b-or|ends with|in|is|matches|not|or|same as|starts with)(?=\s)/,lookbehind:!0},/[=<>]=?|!=|\*\*?|\/\/?|\?:?|[-+~%|]/],property:/\b[a-zA-Z_]\w*\b/,punctuation:/[()\[\]{}:.,]/}},other:{pattern:/\S(?:[\s\S]*\S)?/,inside:e.languages.markup}}}e.exports=t,t.displayName="twig",t.aliases=[]},4979(e){"use strict";function t(e){var t,n;(t=e).languages.typescript=t.languages.extend("javascript",{"class-name":{pattern:/(\b(?:class|extends|implements|instanceof|interface|new|type)\s+)(?!keyof\b)(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?:\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>)?/,lookbehind:!0,greedy:!0,inside:null},builtin:/\b(?:string|Function|any|number|boolean|Array|symbol|console|Promise|unknown|never)\b/}),t.languages.typescript.keyword.push(/\b(?:abstract|as|declare|implements|is|keyof|readonly|require)\b/,/\b(?:asserts|infer|interface|module|namespace|type)(?!\s*[^\s_${}*a-zA-Z\xA0-\uFFFF])/),delete t.languages.typescript.parameter,delete(n=t.languages.extend("typescript",{}))["class-name"],t.languages.typescript["class-name"].inside=n,t.languages.insertBefore("typescript","function",{decorator:{pattern:/@[$\w\xA0-\uFFFF]+/,inside:{at:{pattern:/^@/,alias:"operator"},function:/^[\s\S]+/}},"generic-function":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>(?=\s*\()/,greedy:!0,inside:{function:/^#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*/,generic:{pattern:/<[\s\S]+/,alias:"class-name",inside:n}}}}),t.languages.ts=t.languages.typescript}e.exports=t,t.displayName="typescript",t.aliases=["ts"]},23159(e){"use strict";function t(e){var t,n;n=/\b(?:ACT|ACTIFSUB|CARRAY|CASE|CLEARGIF|COA|COA_INT|CONSTANTS|CONTENT|CUR|EDITPANEL|EFFECT|EXT|FILE|FLUIDTEMPLATE|FORM|FRAME|FRAMESET|GIFBUILDER|GMENU|GMENU_FOLDOUT|GMENU_LAYERS|GP|HMENU|HRULER|HTML|IENV|IFSUB|IMAGE|IMGMENU|IMGMENUITEM|IMGTEXT|IMG_RESOURCE|INCLUDE_TYPOSCRIPT|JSMENU|JSMENUITEM|LLL|LOAD_REGISTER|NO|PAGE|RECORDS|RESTORE_REGISTER|TEMPLATE|TEXT|TMENU|TMENUITEM|TMENU_LAYERS|USER|USER_INT|_GIFBUILDER|global|globalString|globalVar)\b/,(t=e).languages.typoscript={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0},{pattern:/(^|[^\\:= \t]|(?:^|[^= \t])[ \t]+)\/\/.*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^"'])#.*/,lookbehind:!0,greedy:!0}],function:[{pattern://,inside:{string:{pattern:/"[^"\r\n]*"|'[^'\r\n]*'/,inside:{keyword:n}},keyword:{pattern:/INCLUDE_TYPOSCRIPT/}}},{pattern:/@import\s*(?:"[^"\r\n]*"|'[^'\r\n]*')/,inside:{string:/"[^"\r\n]*"|'[^'\r\n]*'/}}],string:{pattern:/^([^=]*=[< ]?)(?:(?!\]\n).)*/,lookbehind:!0,inside:{function:/\{\$.*\}/,keyword:n,number:/^[0-9]+$/,punctuation:/[,|:]/}},keyword:n,number:{pattern:/\b[0-9]+\s*[.{=]/,inside:{operator:/[.{=]/}},tag:{pattern:/\.?[-\w\\]+\.?/,inside:{punctuation:/\./}},punctuation:/[{}[\];(),.:|]/,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/},t.languages.tsconfig=t.languages.typoscript}e.exports=t,t.displayName="typoscript",t.aliases=["tsconfig"]},34966(e){"use strict";function t(e){e.languages.unrealscript={comment:/\/\/.*|\/\*[\s\S]*?\*\//,string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},category:{pattern:/(\b(?:(?:autoexpand|hide|show)categories|var)\s*\()[^()]+(?=\))/,lookbehind:!0,greedy:!0,alias:"property"},metadata:{pattern:/(\w\s*)<\s*\w+\s*=[^<>|=\r\n]+(?:\|\s*\w+\s*=[^<>|=\r\n]+)*>/,lookbehind:!0,greedy:!0,inside:{property:/\b\w+(?=\s*=)/,operator:/=/,punctuation:/[<>|]/}},macro:{pattern:/`\w+/,alias:"property"},"class-name":{pattern:/(\b(?:class|enum|extends|interface|state(?:\(\))?|struct|within)\s+)\w+/,lookbehind:!0},keyword:/\b(?:abstract|actor|array|auto|autoexpandcategories|bool|break|byte|case|class|classgroup|client|coerce|collapsecategories|config|const|continue|default|defaultproperties|delegate|dependson|deprecated|do|dontcollapsecategories|editconst|editinlinenew|else|enum|event|exec|export|extends|final|float|for|forcescriptorder|foreach|function|goto|guid|hidecategories|hidedropdown|if|ignores|implements|inherits|input|int|interface|iterator|latent|local|material|name|native|nativereplication|noexport|nontransient|noteditinlinenew|notplaceable|operator|optional|out|pawn|perobjectconfig|perobjectlocalized|placeable|postoperator|preoperator|private|protected|reliable|replication|return|server|showcategories|simulated|singular|state|static|string|struct|structdefault|structdefaultproperties|switch|texture|transient|travel|unreliable|until|var|vector|while|within)\b/,function:/\b[a-z_]\w*(?=\s*\()/i,boolean:/\b(?:false|true)\b/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/>>|<<|--|\+\+|\*\*|[-+*/~!=<>$@]=?|&&?|\|\|?|\^\^?|[?:%]|\b(?:Cross|Dot|ClockwiseFrom)\b/,punctuation:/[()[\]{};,.]/},e.languages.uc=e.languages.uscript=e.languages.unrealscript}e.exports=t,t.displayName="unrealscript",t.aliases=["uc","uscript"]},38521(e){"use strict";function t(e){e.languages.uri={scheme:{pattern:/^[a-z][a-z0-9+.-]*:/im,greedy:!0,inside:{"scheme-delimiter":/:$/}},fragment:{pattern:/#[\w\-.~!$&'()*+,;=%:@/?]*/,inside:{"fragment-delimiter":/^#/}},query:{pattern:/\?[\w\-.~!$&'()*+,;=%:@/?]*/,inside:{"query-delimiter":{pattern:/^\?/,greedy:!0},"pair-delimiter":/[&;]/,pair:{pattern:/^[^=][\s\S]*/,inside:{key:/^[^=]+/,value:{pattern:/(^=)[\s\S]+/,lookbehind:!0}}}}},authority:{pattern:RegExp(/^\/\//.source+/(?:[\w\-.~!$&'()*+,;=%:]*@)?/.source+("(?:"+/\[(?:[0-9a-fA-F:.]{2,48}|v[0-9a-fA-F]+\.[\w\-.~!$&'()*+,;=]+)\]/.source+"|")+/[\w\-.~!$&'()*+,;=%]*/.source+")"+/(?::\d*)?/.source,"m"),inside:{"authority-delimiter":/^\/\//,"user-info-segment":{pattern:/^[\w\-.~!$&'()*+,;=%:]*@/,inside:{"user-info-delimiter":/@$/,"user-info":/^[\w\-.~!$&'()*+,;=%:]+/}},"port-segment":{pattern:/:\d*$/,inside:{"port-delimiter":/^:/,port:/^\d+/}},host:{pattern:/[\s\S]+/,inside:{"ip-literal":{pattern:/^\[[\s\S]+\]$/,inside:{"ip-literal-delimiter":/^\[|\]$/,"ipv-future":/^v[\s\S]+/,"ipv6-address":/^[\s\S]+/}},"ipv4-address":/^(?:(?:[03-9]\d?|[12]\d{0,2})\.){3}(?:[03-9]\d?|[12]{0,2})$/}}}},path:{pattern:/^[\w\-.~!$&'()*+,;=%:@/]+/m,inside:{"path-separator":/\//}}},e.languages.url=e.languages.uri}e.exports=t,t.displayName="uri",t.aliases=["url"]},7255(e){"use strict";function t(e){var t,n;n={pattern:/[\s\S]+/,inside:null},(t=e).languages.v=t.languages.extend("clike",{string:[{pattern:/`(?:\\`|\\?[^`]{1,2})`/,alias:"rune"},{pattern:/r?(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,alias:"quoted-string",greedy:!0,inside:{interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$(?:\{[^{}]*\}|\w+(?:\.\w+(?:\([^\(\)]*\))?|\[[^\[\]]+\])*)/,lookbehind:!0,inside:{"interpolation-variable":{pattern:/^\$\w[\s\S]*$/,alias:"variable"},"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},"interpolation-expression":n}}}}],"class-name":{pattern:/(\b(?:enum|interface|struct|type)\s+)(?:C\.)?\w+/,lookbehind:!0},keyword:/(?:\b(?:as|asm|assert|atomic|break|chan|const|continue|defer|else|embed|enum|fn|for|__global|go(?:to)?|if|import|in|interface|is|lock|match|module|mut|none|or|pub|return|rlock|select|shared|sizeof|static|struct|type(?:of)?|union|unsafe)|\$(?:if|else|for)|#(?:include|flag))\b/,number:/\b(?:0x[a-f\d]+(?:_[a-f\d]+)*|0b[01]+(?:_[01]+)*|0o[0-7]+(?:_[0-7]+)*|\d+(?:_\d+)*(?:\.\d+(?:_\d+)*)?)\b/i,operator:/~|\?|[*\/%^!=]=?|\+[=+]?|-[=-]?|\|[=|]?|&(?:=|&|\^=?)?|>(?:>=?|=)?|<(?:<=?|=|-)?|:=|\.\.\.?/,builtin:/\b(?:any(?:_int|_float)?|bool|byte(?:ptr)?|charptr|f(?:32|64)|i(?:8|16|nt|64|128)|rune|size_t|string|u(?:16|32|64|128)|voidptr)\b/}),n.inside=t.languages.v,t.languages.insertBefore("v","operator",{attribute:{pattern:/(^[\t ]*)\[(?:deprecated|unsafe_fn|typedef|live|inline|flag|ref_only|windows_stdcall|direct_array_access)\]/m,lookbehind:!0,alias:"annotation",inside:{punctuation:/[\[\]]/,keyword:/\w+/}},generic:{pattern:/<\w+>(?=\s*[\)\{])/,inside:{punctuation:/[<>]/,"class-name":/\w+/}}}),t.languages.insertBefore("v","function",{"generic-function":{pattern:/\b\w+\s*<\w+>(?=\()/,inside:{function:/^\w+/,generic:{pattern:/<\w+>/,inside:t.languages.v.generic.inside}}}})}e.exports=t,t.displayName="v",t.aliases=[]},28173(e){"use strict";function t(e){e.languages.vala=e.languages.extend("clike",{"class-name":[{pattern:/\b[A-Z]\w*(?:\.\w+)*\b(?=(?:\?\s+|\*?\s+\*?)\w)/,inside:{punctuation:/\./}},{pattern:/(\[)[A-Z]\w*(?:\.\w+)*\b/,lookbehind:!0,inside:{punctuation:/\./}},{pattern:/(\b(?:class|interface)\s+[A-Z]\w*(?:\.\w+)*\s*:\s*)[A-Z]\w*(?:\.\w+)*\b/,lookbehind:!0,inside:{punctuation:/\./}},{pattern:/((?:\b(?:class|interface|new|struct|enum)\s+)|(?:catch\s+\())[A-Z]\w*(?:\.\w+)*\b/,lookbehind:!0,inside:{punctuation:/\./}}],keyword:/\b(?:bool|char|double|float|null|size_t|ssize_t|string|unichar|void|int|int8|int16|int32|int64|long|short|uchar|uint|uint8|uint16|uint32|uint64|ulong|ushort|class|delegate|enum|errordomain|interface|namespace|struct|break|continue|do|for|foreach|return|while|else|if|switch|assert|case|default|abstract|const|dynamic|ensures|extern|inline|internal|override|private|protected|public|requires|signal|static|virtual|volatile|weak|async|owned|unowned|try|catch|finally|throw|as|base|construct|delete|get|in|is|lock|new|out|params|ref|sizeof|set|this|throws|typeof|using|value|var|yield)\b/i,function:/\b\w+(?=\s*\()/,number:/(?:\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?)(?:f|u?l?)?/i,operator:/\+\+|--|&&|\|\||<<=?|>>=?|=>|->|~|[+\-*\/%&^|=!<>]=?|\?\??|\.\.\./,punctuation:/[{}[\];(),.:]/,constant:/\b[A-Z0-9_]+\b/}),e.languages.insertBefore("vala","string",{"raw-string":{pattern:/"""[\s\S]*?"""/,greedy:!0,alias:"string"},"template-string":{pattern:/@"[\s\S]*?"/,greedy:!0,inside:{interpolation:{pattern:/\$(?:\([^)]*\)|[a-zA-Z]\w*)/,inside:{delimiter:{pattern:/^\$\(?|\)$/,alias:"punctuation"},rest:e.languages.vala}},string:/[\s\S]+/}}}),e.languages.insertBefore("vala","keyword",{regex:{pattern:/\/(?:\[(?:[^\]\\\r\n]|\\.)*\]|\\.|[^/\\\[\r\n])+\/[imsx]{0,4}(?=\s*(?:$|[\r\n,.;})\]]))/,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:e.languages.regex},"regex-delimiter":/^\//,"regex-flags":/^[a-z]+$/}}})}e.exports=t,t.displayName="vala",t.aliases=[]},53813(e,t,n){"use strict";var r=n(46241);function i(e){e.register(r),e.languages.vbnet=e.languages.extend("basic",{comment:[{pattern:/(?:!|REM\b).+/i,inside:{keyword:/^REM/i}},{pattern:/(^|[^\\:])'.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(^|[^"])"(?:""|[^"])*"(?!")/i,lookbehind:!0,greedy:!0},keyword:/(?:\b(?:ADDHANDLER|ADDRESSOF|ALIAS|AND|ANDALSO|AS|BEEP|BLOAD|BOOLEAN|BSAVE|BYREF|BYTE|BYVAL|CALL(?: ABSOLUTE)?|CASE|CATCH|CBOOL|CBYTE|CCHAR|CDATE|CDEC|CDBL|CHAIN|CHAR|CHDIR|CINT|CLASS|CLEAR|CLNG|CLOSE|CLS|COBJ|COM|COMMON|CONST|CONTINUE|CSBYTE|CSHORT|CSNG|CSTR|CTYPE|CUINT|CULNG|CUSHORT|DATA|DATE|DECIMAL|DECLARE|DEFAULT|DEF(?: FN| SEG|DBL|INT|LNG|SNG|STR)|DELEGATE|DIM|DIRECTCAST|DO|DOUBLE|ELSE|ELSEIF|END|ENUM|ENVIRON|ERASE|ERROR|EVENT|EXIT|FALSE|FIELD|FILES|FINALLY|FOR(?: EACH)?|FRIEND|FUNCTION|GET|GETTYPE|GETXMLNAMESPACE|GLOBAL|GOSUB|GOTO|HANDLES|IF|IMPLEMENTS|IMPORTS|IN|INHERITS|INPUT|INTEGER|INTERFACE|IOCTL|IS|ISNOT|KEY|KILL|LINE INPUT|LET|LIB|LIKE|LOCATE|LOCK|LONG|LOOP|LSET|ME|MKDIR|MOD|MODULE|MUSTINHERIT|MUSTOVERRIDE|MYBASE|MYCLASS|NAME|NAMESPACE|NARROWING|NEW|NEXT|NOT|NOTHING|NOTINHERITABLE|NOTOVERRIDABLE|OBJECT|OF|OFF|ON(?: COM| ERROR| KEY| TIMER)?|OPERATOR|OPEN|OPTION(?: BASE)?|OPTIONAL|OR|ORELSE|OUT|OVERLOADS|OVERRIDABLE|OVERRIDES|PARAMARRAY|PARTIAL|POKE|PRIVATE|PROPERTY|PROTECTED|PUBLIC|PUT|RAISEEVENT|READ|READONLY|REDIM|REM|REMOVEHANDLER|RESTORE|RESUME|RETURN|RMDIR|RSET|RUN|SBYTE|SELECT(?: CASE)?|SET|SHADOWS|SHARED|SHORT|SINGLE|SHELL|SLEEP|STATIC|STEP|STOP|STRING|STRUCTURE|SUB|SYNCLOCK|SWAP|SYSTEM|THEN|THROW|TIMER|TO|TROFF|TRON|TRUE|TRY|TRYCAST|TYPE|TYPEOF|UINTEGER|ULONG|UNLOCK|UNTIL|USHORT|USING|VIEW PRINT|WAIT|WEND|WHEN|WHILE|WIDENING|WITH|WITHEVENTS|WRITE|WRITEONLY|XOR)|\B(?:#CONST|#ELSE|#ELSEIF|#END|#IF))(?:\$|\b)/i,punctuation:/[,;:(){}]/})}e.exports=i,i.displayName="vbnet",i.aliases=[]},46891(e){"use strict";function t(e){var t,n;(t=e).languages.velocity=t.languages.extend("markup",{}),(n={variable:{pattern:/(^|[^\\](?:\\\\)*)\$!?(?:[a-z][\w-]*(?:\([^)]*\))?(?:\.[a-z][\w-]*(?:\([^)]*\))?|\[[^\]]+\])*|\{[^}]+\})/i,lookbehind:!0,inside:{}},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},number:/\b\d+\b/,boolean:/\b(?:true|false)\b/,operator:/[=!<>]=?|[+*/%-]|&&|\|\||\.\.|\b(?:eq|g[et]|l[et]|n(?:e|ot))\b/,punctuation:/[(){}[\]:,.]/}).variable.inside={string:n.string,function:{pattern:/([^\w-])[a-z][\w-]*(?=\()/,lookbehind:!0},number:n.number,boolean:n.boolean,punctuation:n.punctuation},t.languages.insertBefore("velocity","comment",{unparsed:{pattern:/(^|[^\\])#\[\[[\s\S]*?\]\]#/,lookbehind:!0,greedy:!0,inside:{punctuation:/^#\[\[|\]\]#$/}},"velocity-comment":[{pattern:/(^|[^\\])#\*[\s\S]*?\*#/,lookbehind:!0,greedy:!0,alias:"comment"},{pattern:/(^|[^\\])##.*/,lookbehind:!0,greedy:!0,alias:"comment"}],directive:{pattern:/(^|[^\\](?:\\\\)*)#@?(?:[a-z][\w-]*|\{[a-z][\w-]*\})(?:\s*\((?:[^()]|\([^()]*\))*\))?/i,lookbehind:!0,inside:{keyword:{pattern:/^#@?(?:[a-z][\w-]*|\{[a-z][\w-]*\})|\bin\b/,inside:{punctuation:/[{}]/}},rest:n}},variable:n.variable}),t.languages.velocity.tag.inside["attr-value"].inside.rest=t.languages.velocity}e.exports=t,t.displayName="velocity",t.aliases=[]},91824(e){"use strict";function t(e){e.languages.verilog={comment:/\/\/.*|\/\*[\s\S]*?\*\//,string:{pattern:/"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"/,greedy:!0},property:/\B\$\w+\b/,constant:/\B`\w+\b/,function:/\b\w+(?=\()/,keyword:/\b(?:alias|and|assert|assign|assume|automatic|before|begin|bind|bins|binsof|bit|break|buf|bufif0|bufif1|byte|class|case|casex|casez|cell|chandle|clocking|cmos|config|const|constraint|context|continue|cover|covergroup|coverpoint|cross|deassign|default|defparam|design|disable|dist|do|edge|else|end|endcase|endclass|endclocking|endconfig|endfunction|endgenerate|endgroup|endinterface|endmodule|endpackage|endprimitive|endprogram|endproperty|endspecify|endsequence|endtable|endtask|enum|event|expect|export|extends|extern|final|first_match|for|force|foreach|forever|fork|forkjoin|function|generate|genvar|highz0|highz1|if|iff|ifnone|ignore_bins|illegal_bins|import|incdir|include|initial|inout|input|inside|instance|int|integer|interface|intersect|join|join_any|join_none|large|liblist|library|local|localparam|logic|longint|macromodule|matches|medium|modport|module|nand|negedge|new|nmos|nor|noshowcancelled|not|notif0|notif1|null|or|output|package|packed|parameter|pmos|posedge|primitive|priority|program|property|protected|pull0|pull1|pulldown|pullup|pulsestyle_onevent|pulsestyle_ondetect|pure|rand|randc|randcase|randsequence|rcmos|real|realtime|ref|reg|release|repeat|return|rnmos|rpmos|rtran|rtranif0|rtranif1|scalared|sequence|shortint|shortreal|showcancelled|signed|small|solve|specify|specparam|static|string|strong0|strong1|struct|super|supply0|supply1|table|tagged|task|this|throughout|time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|tri1|triand|trior|trireg|type|typedef|union|unique|unsigned|use|uwire|var|vectored|virtual|void|wait|wait_order|wand|weak0|weak1|while|wildcard|wire|with|within|wor|xnor|xor)\b/,important:/\b(?:always_latch|always_comb|always_ff|always)\b ?@?/,number:/\B##?\d+|(?:\b\d+)?'[odbh] ?[\da-fzx_?]+|\b(?:\d*[._])?\d+(?:e[-+]?\d+)?/i,operator:/[-+{}^~%*\/?=!<>&|]+/,punctuation:/[[\];(),.:]/}}e.exports=t,t.displayName="verilog",t.aliases=[]},9447(e){"use strict";function t(e){e.languages.vhdl={comment:/--.+/,"vhdl-vectors":{pattern:/\b[oxb]"[\da-f_]+"|"[01uxzwlh-]+"/i,alias:"number"},"quoted-function":{pattern:/"\S+?"(?=\()/,alias:"function"},string:/"(?:[^\\"\r\n]|\\(?:\r\n|[\s\S]))*"/,constant:/\b(?:use|library)\b/i,keyword:/\b(?:'active|'ascending|'base|'delayed|'driving|'driving_value|'event|'high|'image|'instance_name|'last_active|'last_event|'last_value|'left|'leftof|'length|'low|'path_name|'pos|'pred|'quiet|'range|'reverse_range|'right|'rightof|'simple_name|'stable|'succ|'transaction|'val|'value|access|after|alias|all|architecture|array|assert|attribute|begin|block|body|buffer|bus|case|component|configuration|constant|disconnect|downto|else|elsif|end|entity|exit|file|for|function|generate|generic|group|guarded|if|impure|in|inertial|inout|is|label|library|linkage|literal|loop|map|new|next|null|of|on|open|others|out|package|port|postponed|procedure|process|pure|range|record|register|reject|report|return|select|severity|shared|signal|subtype|then|to|transport|type|unaffected|units|until|use|variable|wait|when|while|with)\b/i,boolean:/\b(?:true|false)\b/i,function:/\w+(?=\()/,number:/'[01uxzwlh-]'|\b(?:\d+#[\da-f_.]+#|\d[\d_.]*)(?:e[-+]?\d+)?/i,operator:/[<>]=?|:=|[-+*/&=]|\b(?:abs|not|mod|rem|sll|srl|sla|sra|rol|ror|and|or|nand|xnor|xor|nor)\b/i,punctuation:/[{}[\];(),.:]/}}e.exports=t,t.displayName="vhdl",t.aliases=[]},53062(e){"use strict";function t(e){e.languages.vim={string:/"(?:[^"\\\r\n]|\\.)*"|'(?:[^'\r\n]|'')*'/,comment:/".*/,function:/\b\w+(?=\()/,keyword:/\b(?:ab|abbreviate|abc|abclear|abo|aboveleft|al|all|arga|argadd|argd|argdelete|argdo|arge|argedit|argg|argglobal|argl|arglocal|ar|args|argu|argument|as|ascii|bad|badd|ba|ball|bd|bdelete|be|bel|belowright|bf|bfirst|bl|blast|bm|bmodified|bn|bnext|bN|bNext|bo|botright|bp|bprevious|brea|break|breaka|breakadd|breakd|breakdel|breakl|breaklist|br|brewind|bro|browse|bufdo|b|buffer|buffers|bun|bunload|bw|bwipeout|ca|cabbrev|cabc|cabclear|caddb|caddbuffer|cad|caddexpr|caddf|caddfile|cal|call|cat|catch|cb|cbuffer|cc|ccl|cclose|cd|ce|center|cex|cexpr|cf|cfile|cfir|cfirst|cgetb|cgetbuffer|cgete|cgetexpr|cg|cgetfile|c|change|changes|chd|chdir|che|checkpath|checkt|checktime|cla|clast|cl|clist|clo|close|cmapc|cmapclear|cnew|cnewer|cn|cnext|cN|cNext|cnf|cnfile|cNfcNfile|cnorea|cnoreabbrev|col|colder|colo|colorscheme|comc|comclear|comp|compiler|conf|confirm|con|continue|cope|copen|co|copy|cpf|cpfile|cp|cprevious|cq|cquit|cr|crewind|cuna|cunabbrev|cu|cunmap|cw|cwindow|debugg|debuggreedy|delc|delcommand|d|delete|delf|delfunction|delm|delmarks|diffg|diffget|diffoff|diffpatch|diffpu|diffput|diffsplit|diffthis|diffu|diffupdate|dig|digraphs|di|display|dj|djump|dl|dlist|dr|drop|ds|dsearch|dsp|dsplit|earlier|echoe|echoerr|echom|echomsg|echon|e|edit|el|else|elsei|elseif|em|emenu|endfo|endfor|endf|endfunction|endfun|en|endif|endt|endtry|endw|endwhile|ene|enew|ex|exi|exit|exu|exusage|f|file|files|filetype|fina|finally|fin|find|fini|finish|fir|first|fix|fixdel|fo|fold|foldc|foldclose|folddoc|folddoclosed|foldd|folddoopen|foldo|foldopen|for|fu|fun|function|go|goto|gr|grep|grepa|grepadd|ha|hardcopy|h|help|helpf|helpfind|helpg|helpgrep|helpt|helptags|hid|hide|his|history|ia|iabbrev|iabc|iabclear|if|ij|ijump|il|ilist|imapc|imapclear|in|inorea|inoreabbrev|isearch|isp|isplit|iuna|iunabbrev|iu|iunmap|j|join|ju|jumps|k|keepalt|keepj|keepjumps|kee|keepmarks|laddb|laddbuffer|lad|laddexpr|laddf|laddfile|lan|language|la|last|later|lb|lbuffer|lc|lcd|lch|lchdir|lcl|lclose|let|left|lefta|leftabove|lex|lexpr|lf|lfile|lfir|lfirst|lgetb|lgetbuffer|lgete|lgetexpr|lg|lgetfile|lgr|lgrep|lgrepa|lgrepadd|lh|lhelpgrep|l|list|ll|lla|llast|lli|llist|lmak|lmake|lm|lmap|lmapc|lmapclear|lnew|lnewer|lne|lnext|lN|lNext|lnf|lnfile|lNf|lNfile|ln|lnoremap|lo|loadview|loc|lockmarks|lockv|lockvar|lol|lolder|lop|lopen|lpf|lpfile|lp|lprevious|lr|lrewind|ls|lt|ltag|lu|lunmap|lv|lvimgrep|lvimgrepa|lvimgrepadd|lw|lwindow|mak|make|ma|mark|marks|mat|match|menut|menutranslate|mk|mkexrc|mks|mksession|mksp|mkspell|mkvie|mkview|mkv|mkvimrc|mod|mode|m|move|mzf|mzfile|mz|mzscheme|nbkey|new|n|next|N|Next|nmapc|nmapclear|noh|nohlsearch|norea|noreabbrev|nu|number|nun|nunmap|omapc|omapclear|on|only|o|open|opt|options|ou|ounmap|pc|pclose|ped|pedit|pe|perl|perld|perldo|po|pop|popu|popup|pp|ppop|pre|preserve|prev|previous|p|print|P|Print|profd|profdel|prof|profile|promptf|promptfind|promptr|promptrepl|ps|psearch|pta|ptag|ptf|ptfirst|ptj|ptjump|ptl|ptlast|ptn|ptnext|ptN|ptNext|ptp|ptprevious|ptr|ptrewind|pts|ptselect|pu|put|pw|pwd|pyf|pyfile|py|python|qa|qall|q|quit|quita|quitall|r|read|rec|recover|redi|redir|red|redo|redr|redraw|redraws|redrawstatus|reg|registers|res|resize|ret|retab|retu|return|rew|rewind|ri|right|rightb|rightbelow|rub|ruby|rubyd|rubydo|rubyf|rubyfile|ru|runtime|rv|rviminfo|sal|sall|san|sandbox|sa|sargument|sav|saveas|sba|sball|sbf|sbfirst|sbl|sblast|sbm|sbmodified|sbn|sbnext|sbN|sbNext|sbp|sbprevious|sbr|sbrewind|sb|sbuffer|scripte|scriptencoding|scrip|scriptnames|se|set|setf|setfiletype|setg|setglobal|setl|setlocal|sf|sfind|sfir|sfirst|sh|shell|sign|sil|silent|sim|simalt|sla|slast|sl|sleep|sm|smagic|smap|smapc|smapclear|sme|smenu|sn|snext|sN|sNext|sni|sniff|sno|snomagic|snor|snoremap|snoreme|snoremenu|sor|sort|so|source|spelld|spelldump|spe|spellgood|spelli|spellinfo|spellr|spellrepall|spellu|spellundo|spellw|spellwrong|sp|split|spr|sprevious|sre|srewind|sta|stag|startg|startgreplace|star|startinsert|startr|startreplace|stj|stjump|st|stop|stopi|stopinsert|sts|stselect|sun|sunhide|sunm|sunmap|sus|suspend|sv|sview|syncbind|t|tab|tabc|tabclose|tabd|tabdo|tabe|tabedit|tabf|tabfind|tabfir|tabfirst|tabl|tablast|tabm|tabmove|tabnew|tabn|tabnext|tabN|tabNext|tabo|tabonly|tabp|tabprevious|tabr|tabrewind|tabs|ta|tag|tags|tc|tcl|tcld|tcldo|tclf|tclfile|te|tearoff|tf|tfirst|th|throw|tj|tjump|tl|tlast|tm|tmenu|tn|tnext|tN|tNext|to|topleft|tp|tprevious|tr|trewind|try|ts|tselect|tu|tunmenu|una|unabbreviate|u|undo|undoj|undojoin|undol|undolist|unh|unhide|unlet|unlo|unlockvar|unm|unmap|up|update|verb|verbose|ve|version|vert|vertical|vie|view|vim|vimgrep|vimgrepa|vimgrepadd|vi|visual|viu|viusage|vmapc|vmapclear|vne|vnew|vs|vsplit|vu|vunmap|wa|wall|wh|while|winc|wincmd|windo|winp|winpos|win|winsize|wn|wnext|wN|wNext|wp|wprevious|wq|wqa|wqall|w|write|ws|wsverb|wv|wviminfo|X|xa|xall|x|xit|xm|xmap|xmapc|xmapclear|xme|xmenu|XMLent|XMLns|xn|xnoremap|xnoreme|xnoremenu|xu|xunmap|y|yank)\b/,builtin:/\b(?:autocmd|acd|ai|akm|aleph|allowrevins|altkeymap|ambiwidth|ambw|anti|antialias|arab|arabic|arabicshape|ari|arshape|autochdir|autoindent|autoread|autowrite|autowriteall|aw|awa|background|backspace|backup|backupcopy|backupdir|backupext|backupskip|balloondelay|ballooneval|balloonexpr|bdir|bdlay|beval|bex|bexpr|bg|bh|bin|binary|biosk|bioskey|bk|bkc|bomb|breakat|brk|browsedir|bs|bsdir|bsk|bt|bufhidden|buflisted|buftype|casemap|ccv|cdpath|cedit|cfu|ch|charconvert|ci|cin|cindent|cink|cinkeys|cino|cinoptions|cinw|cinwords|clipboard|cmdheight|cmdwinheight|cmp|cms|columns|com|comments|commentstring|compatible|complete|completefunc|completeopt|consk|conskey|copyindent|cot|cpo|cpoptions|cpt|cscopepathcomp|cscopeprg|cscopequickfix|cscopetag|cscopetagorder|cscopeverbose|cspc|csprg|csqf|cst|csto|csverb|cuc|cul|cursorcolumn|cursorline|cwh|debug|deco|def|define|delcombine|dex|dg|dict|dictionary|diff|diffexpr|diffopt|digraph|dip|dir|directory|dy|ea|ead|eadirection|eb|ed|edcompatible|ef|efm|ei|ek|enc|encoding|endofline|eol|ep|equalalways|equalprg|errorbells|errorfile|errorformat|esckeys|et|eventignore|expandtab|exrc|fcl|fcs|fdc|fde|fdi|fdl|fdls|fdm|fdn|fdo|fdt|fen|fenc|fencs|fex|ff|ffs|fileencoding|fileencodings|fileformat|fileformats|fillchars|fk|fkmap|flp|fml|fmr|foldcolumn|foldenable|foldexpr|foldignore|foldlevel|foldlevelstart|foldmarker|foldmethod|foldminlines|foldnestmax|foldtext|formatexpr|formatlistpat|formatoptions|formatprg|fp|fs|fsync|ft|gcr|gd|gdefault|gfm|gfn|gfs|gfw|ghr|gp|grepformat|grepprg|gtl|gtt|guicursor|guifont|guifontset|guifontwide|guiheadroom|guioptions|guipty|guitablabel|guitabtooltip|helpfile|helpheight|helplang|hf|hh|hi|hidden|highlight|hk|hkmap|hkmapp|hkp|hl|hlg|hls|hlsearch|ic|icon|iconstring|ignorecase|im|imactivatekey|imak|imc|imcmdline|imd|imdisable|imi|iminsert|ims|imsearch|inc|include|includeexpr|incsearch|inde|indentexpr|indentkeys|indk|inex|inf|infercase|insertmode|isf|isfname|isi|isident|isk|iskeyword|isprint|joinspaces|js|key|keymap|keymodel|keywordprg|km|kmp|kp|langmap|langmenu|laststatus|lazyredraw|lbr|lcs|linebreak|lines|linespace|lisp|lispwords|listchars|loadplugins|lpl|lsp|lz|macatsui|magic|makeef|makeprg|matchpairs|matchtime|maxcombine|maxfuncdepth|maxmapdepth|maxmem|maxmempattern|maxmemtot|mco|mef|menuitems|mfd|mh|mis|mkspellmem|ml|mls|mm|mmd|mmp|mmt|modeline|modelines|modifiable|modified|more|mouse|mousef|mousefocus|mousehide|mousem|mousemodel|mouses|mouseshape|mouset|mousetime|mp|mps|msm|mzq|mzquantum|nf|nrformats|numberwidth|nuw|odev|oft|ofu|omnifunc|opendevice|operatorfunc|opfunc|osfiletype|pa|para|paragraphs|paste|pastetoggle|patchexpr|patchmode|path|pdev|penc|pex|pexpr|pfn|ph|pheader|pi|pm|pmbcs|pmbfn|popt|preserveindent|previewheight|previewwindow|printdevice|printencoding|printexpr|printfont|printheader|printmbcharset|printmbfont|printoptions|prompt|pt|pumheight|pvh|pvw|qe|quoteescape|readonly|remap|report|restorescreen|revins|rightleft|rightleftcmd|rl|rlc|ro|rs|rtp|ruf|ruler|rulerformat|runtimepath|sbo|sc|scb|scr|scroll|scrollbind|scrolljump|scrolloff|scrollopt|scs|sect|sections|secure|sel|selection|selectmode|sessionoptions|sft|shcf|shellcmdflag|shellpipe|shellquote|shellredir|shellslash|shelltemp|shelltype|shellxquote|shiftround|shiftwidth|shm|shortmess|shortname|showbreak|showcmd|showfulltag|showmatch|showmode|showtabline|shq|si|sidescroll|sidescrolloff|siso|sj|slm|smartcase|smartindent|smarttab|smc|smd|softtabstop|sol|spc|spell|spellcapcheck|spellfile|spelllang|spellsuggest|spf|spl|splitbelow|splitright|sps|sr|srr|ss|ssl|ssop|stal|startofline|statusline|stl|stmp|su|sua|suffixes|suffixesadd|sw|swapfile|swapsync|swb|swf|switchbuf|sws|sxq|syn|synmaxcol|syntax|tabline|tabpagemax|tabstop|tagbsearch|taglength|tagrelative|tagstack|tal|tb|tbi|tbidi|tbis|tbs|tenc|term|termbidi|termencoding|terse|textauto|textmode|textwidth|tgst|thesaurus|tildeop|timeout|timeoutlen|title|titlelen|titleold|titlestring|toolbar|toolbariconsize|top|tpm|tsl|tsr|ttimeout|ttimeoutlen|ttm|tty|ttybuiltin|ttyfast|ttym|ttymouse|ttyscroll|ttytype|tw|tx|uc|ul|undolevels|updatecount|updatetime|ut|vb|vbs|vdir|verbosefile|vfile|viewdir|viewoptions|viminfo|virtualedit|visualbell|vop|wak|warn|wb|wc|wcm|wd|weirdinvert|wfh|wfw|whichwrap|wi|wig|wildchar|wildcharm|wildignore|wildmenu|wildmode|wildoptions|wim|winaltkeys|window|winfixheight|winfixwidth|winheight|winminheight|winminwidth|winwidth|wiv|wiw|wm|wmh|wmnu|wmw|wop|wrap|wrapmargin|wrapscan|writeany|writebackup|writedelay|ww|noacd|noai|noakm|noallowrevins|noaltkeymap|noanti|noantialias|noar|noarab|noarabic|noarabicshape|noari|noarshape|noautochdir|noautoindent|noautoread|noautowrite|noautowriteall|noaw|noawa|nobackup|noballooneval|nobeval|nobin|nobinary|nobiosk|nobioskey|nobk|nobl|nobomb|nobuflisted|nocf|noci|nocin|nocindent|nocompatible|noconfirm|noconsk|noconskey|nocopyindent|nocp|nocscopetag|nocscopeverbose|nocst|nocsverb|nocuc|nocul|nocursorcolumn|nocursorline|nodeco|nodelcombine|nodg|nodiff|nodigraph|nodisable|noea|noeb|noed|noedcompatible|noek|noendofline|noeol|noequalalways|noerrorbells|noesckeys|noet|noex|noexpandtab|noexrc|nofen|nofk|nofkmap|nofoldenable|nogd|nogdefault|noguipty|nohid|nohidden|nohk|nohkmap|nohkmapp|nohkp|nohls|noic|noicon|noignorecase|noim|noimc|noimcmdline|noimd|noincsearch|noinf|noinfercase|noinsertmode|nois|nojoinspaces|nojs|nolazyredraw|nolbr|nolinebreak|nolisp|nolist|noloadplugins|nolpl|nolz|noma|nomacatsui|nomagic|nomh|noml|nomod|nomodeline|nomodifiable|nomodified|nomore|nomousef|nomousefocus|nomousehide|nonu|nonumber|noodev|noopendevice|nopaste|nopi|nopreserveindent|nopreviewwindow|noprompt|nopvw|noreadonly|noremap|norestorescreen|norevins|nori|norightleft|norightleftcmd|norl|norlc|noro|nors|noru|noruler|nosb|nosc|noscb|noscrollbind|noscs|nosecure|nosft|noshellslash|noshelltemp|noshiftround|noshortname|noshowcmd|noshowfulltag|noshowmatch|noshowmode|nosi|nosm|nosmartcase|nosmartindent|nosmarttab|nosmd|nosn|nosol|nospell|nosplitbelow|nosplitright|nospr|nosr|nossl|nosta|nostartofline|nostmp|noswapfile|noswf|nota|notagbsearch|notagrelative|notagstack|notbi|notbidi|notbs|notermbidi|noterse|notextauto|notextmode|notf|notgst|notildeop|notimeout|notitle|noto|notop|notr|nottimeout|nottybuiltin|nottyfast|notx|novb|novisualbell|nowa|nowarn|nowb|noweirdinvert|nowfh|nowfw|nowildmenu|nowinfixheight|nowinfixwidth|nowiv|nowmnu|nowrap|nowrapscan|nowrite|nowriteany|nowritebackup|nows|invacd|invai|invakm|invallowrevins|invaltkeymap|invanti|invantialias|invar|invarab|invarabic|invarabicshape|invari|invarshape|invautochdir|invautoindent|invautoread|invautowrite|invautowriteall|invaw|invawa|invbackup|invballooneval|invbeval|invbin|invbinary|invbiosk|invbioskey|invbk|invbl|invbomb|invbuflisted|invcf|invci|invcin|invcindent|invcompatible|invconfirm|invconsk|invconskey|invcopyindent|invcp|invcscopetag|invcscopeverbose|invcst|invcsverb|invcuc|invcul|invcursorcolumn|invcursorline|invdeco|invdelcombine|invdg|invdiff|invdigraph|invdisable|invea|inveb|inved|invedcompatible|invek|invendofline|inveol|invequalalways|inverrorbells|invesckeys|invet|invex|invexpandtab|invexrc|invfen|invfk|invfkmap|invfoldenable|invgd|invgdefault|invguipty|invhid|invhidden|invhk|invhkmap|invhkmapp|invhkp|invhls|invhlsearch|invic|invicon|invignorecase|invim|invimc|invimcmdline|invimd|invincsearch|invinf|invinfercase|invinsertmode|invis|invjoinspaces|invjs|invlazyredraw|invlbr|invlinebreak|invlisp|invlist|invloadplugins|invlpl|invlz|invma|invmacatsui|invmagic|invmh|invml|invmod|invmodeline|invmodifiable|invmodified|invmore|invmousef|invmousefocus|invmousehide|invnu|invnumber|invodev|invopendevice|invpaste|invpi|invpreserveindent|invpreviewwindow|invprompt|invpvw|invreadonly|invremap|invrestorescreen|invrevins|invri|invrightleft|invrightleftcmd|invrl|invrlc|invro|invrs|invru|invruler|invsb|invsc|invscb|invscrollbind|invscs|invsecure|invsft|invshellslash|invshelltemp|invshiftround|invshortname|invshowcmd|invshowfulltag|invshowmatch|invshowmode|invsi|invsm|invsmartcase|invsmartindent|invsmarttab|invsmd|invsn|invsol|invspell|invsplitbelow|invsplitright|invspr|invsr|invssl|invsta|invstartofline|invstmp|invswapfile|invswf|invta|invtagbsearch|invtagrelative|invtagstack|invtbi|invtbidi|invtbs|invtermbidi|invterse|invtextauto|invtextmode|invtf|invtgst|invtildeop|invtimeout|invtitle|invto|invtop|invtr|invttimeout|invttybuiltin|invttyfast|invtx|invvb|invvisualbell|invwa|invwarn|invwb|invweirdinvert|invwfh|invwfw|invwildmenu|invwinfixheight|invwinfixwidth|invwiv|invwmnu|invwrap|invwrapscan|invwrite|invwriteany|invwritebackup|invws|t_AB|t_AF|t_al|t_AL|t_bc|t_cd|t_ce|t_Ce|t_cl|t_cm|t_Co|t_cs|t_Cs|t_CS|t_CV|t_da|t_db|t_dl|t_DL|t_EI|t_F1|t_F2|t_F3|t_F4|t_F5|t_F6|t_F7|t_F8|t_F9|t_fs|t_IE|t_IS|t_k1|t_K1|t_k2|t_k3|t_K3|t_k4|t_K4|t_k5|t_K5|t_k6|t_K6|t_k7|t_K7|t_k8|t_K8|t_k9|t_K9|t_KA|t_kb|t_kB|t_KB|t_KC|t_kd|t_kD|t_KD|t_ke|t_KE|t_KF|t_KG|t_kh|t_KH|t_kI|t_KI|t_KJ|t_KK|t_kl|t_KL|t_kN|t_kP|t_kr|t_ks|t_ku|t_le|t_mb|t_md|t_me|t_mr|t_ms|t_nd|t_op|t_RI|t_RV|t_Sb|t_se|t_Sf|t_SI|t_so|t_sr|t_te|t_ti|t_ts|t_ue|t_us|t_ut|t_vb|t_ve|t_vi|t_vs|t_WP|t_WS|t_xs|t_ZH|t_ZR)\b/,number:/\b(?:0x[\da-f]+|\d+(?:\.\d+)?)\b/i,operator:/\|\||&&|[-+.]=?|[=!](?:[=~][#?]?)?|[<>]=?[#?]?|[*\/%?]|\b(?:is(?:not)?)\b/,punctuation:/[{}[\](),;:]/}}e.exports=t,t.displayName="vim",t.aliases=[]},46215(e){"use strict";function t(e){e.languages["visual-basic"]={comment:{pattern:/(?:['‘’]|REM\b)(?:[^\r\n_]|_(?:\r\n?|\n)?)*/i,inside:{keyword:/^REM/i}},directive:{pattern:/#(?:Const|Else|ElseIf|End|ExternalChecksum|ExternalSource|If|Region)(?:[^\S\r\n]_[^\S\r\n]*(?:\r\n?|\n)|.)+/i,alias:"comment",greedy:!0},string:{pattern:/\$?["“”](?:["“”]{2}|[^"“”])*["“”]C?/i,greedy:!0},date:{pattern:/#[^\S\r\n]*(?:\d+([/-])\d+\1\d+(?:[^\S\r\n]+(?:\d+[^\S\r\n]*(?:AM|PM)|\d+:\d+(?::\d+)?(?:[^\S\r\n]*(?:AM|PM))?))?|\d+[^\S\r\n]*(?:AM|PM)|\d+:\d+(?::\d+)?(?:[^\S\r\n]*(?:AM|PM))?)[^\S\r\n]*#/i,alias:"builtin"},number:/(?:(?:\b\d+(?:\.\d+)?|\.\d+)(?:E[+-]?\d+)?|&[HO][\dA-F]+)(?:U?[ILS]|[FRD])?/i,boolean:/\b(?:True|False|Nothing)\b/i,keyword:/\b(?:AddHandler|AddressOf|Alias|And(?:Also)?|As|Boolean|ByRef|Byte|ByVal|Call|Case|Catch|C(?:Bool|Byte|Char|Date|Dbl|Dec|Int|Lng|Obj|SByte|Short|Sng|Str|Type|UInt|ULng|UShort)|Char|Class|Const|Continue|Currency|Date|Decimal|Declare|Default|Delegate|Dim|DirectCast|Do|Double|Each|Else(?:If)?|End(?:If)?|Enum|Erase|Error|Event|Exit|Finally|For|Friend|Function|Get(?:Type|XMLNamespace)?|Global|GoSub|GoTo|Handles|If|Implements|Imports|In|Inherits|Integer|Interface|Is|IsNot|Let|Lib|Like|Long|Loop|Me|Mod|Module|Must(?:Inherit|Override)|My(?:Base|Class)|Namespace|Narrowing|New|Next|Not(?:Inheritable|Overridable)?|Object|Of|On|Operator|Option(?:al)?|Or(?:Else)?|Out|Overloads|Overridable|Overrides|ParamArray|Partial|Private|Property|Protected|Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|Return|SByte|Select|Set|Shadows|Shared|short|Single|Static|Step|Stop|String|Structure|Sub|SyncLock|Then|Throw|To|Try|TryCast|Type|TypeOf|U(?:Integer|Long|Short)|Using|Variant|Wend|When|While|Widening|With(?:Events)?|WriteOnly|Until|Xor)\b/i,operator:[/[+\-*/\\^<=>&#@$%!]/,{pattern:/([^\S\r\n])_(?=[^\S\r\n]*[\r\n])/,lookbehind:!0}],punctuation:/[{}().,:?]/},e.languages.vb=e.languages["visual-basic"],e.languages.vba=e.languages["visual-basic"]}e.exports=t,t.displayName="visualBasic",t.aliases=[]},10784(e){"use strict";function t(e){e.languages.warpscript={comment:/#.*|\/\/.*|\/\*[\s\S]*?\*\//,string:{pattern:/"(?:[^"\\\r\n]|\\.)*"|'(?:[^'\\\r\n]|\\.)*'|<'(?:[^\\']|'(?!>)|\\.)*'>/,greedy:!0},variable:/\$\S+/,macro:{pattern:/@\S+/,alias:"property"},keyword:/\b(?:BREAK|CHECKMACRO|CONTINUE|CUDF|DEFINED|DEFINEDMACRO|EVAL|FAIL|FOR|FOREACH|FORSTEP|IFT|IFTE|MSGFAIL|NRETURN|RETHROW|RETURN|SWITCH|TRY|UDF|UNTIL|WHILE)\b/,number:/[+-]?\b(?:NaN|Infinity|\d+(?:\.\d*)?(?:[Ee][+-]?\d+)?|0x[\da-fA-F]+|0b[01]+)\b/,boolean:/\b(?:false|true|F|T)\b/,punctuation:/<%|%>|[{}[\]()]/,operator:/==|&&?|\|\|?|\*\*?|>>>?|<<|[<>!~]=?|[-/%^]|\+!?|\b(?:AND|NOT|OR)\b/}}e.exports=t,t.displayName="warpscript",t.aliases=[]},17684(e){"use strict";function t(e){e.languages.wasm={comment:[/\(;[\s\S]*?;\)/,{pattern:/;;.*/,greedy:!0}],string:{pattern:/"(?:\\[\s\S]|[^"\\])*"/,greedy:!0},keyword:[{pattern:/\b(?:align|offset)=/,inside:{operator:/=/}},{pattern:/\b(?:(?:f32|f64|i32|i64)(?:\.(?:abs|add|and|ceil|clz|const|convert_[su]\/i(?:32|64)|copysign|ctz|demote\/f64|div(?:_[su])?|eqz?|extend_[su]\/i32|floor|ge(?:_[su])?|gt(?:_[su])?|le(?:_[su])?|load(?:(?:8|16|32)_[su])?|lt(?:_[su])?|max|min|mul|nearest|neg?|or|popcnt|promote\/f32|reinterpret\/[fi](?:32|64)|rem_[su]|rot[lr]|shl|shr_[su]|store(?:8|16|32)?|sqrt|sub|trunc(?:_[su]\/f(?:32|64))?|wrap\/i64|xor))?|memory\.(?:grow|size))\b/,inside:{punctuation:/\./}},/\b(?:anyfunc|block|br(?:_if|_table)?|call(?:_indirect)?|data|drop|elem|else|end|export|func|get_(?:global|local)|global|if|import|local|loop|memory|module|mut|nop|offset|param|result|return|select|set_(?:global|local)|start|table|tee_local|then|type|unreachable)\b/],variable:/\$[\w!#$%&'*+\-./:<=>?@\\^`|~]+/i,number:/[+-]?\b(?:\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:[eE][+-]?\d(?:_?\d)*)?|0x[\da-fA-F](?:_?[\da-fA-F])*(?:\.[\da-fA-F](?:_?[\da-fA-D])*)?(?:[pP][+-]?\d(?:_?\d)*)?)\b|\binf\b|\bnan(?::0x[\da-fA-F](?:_?[\da-fA-D])*)?\b/,punctuation:/[()]/}}e.exports=t,t.displayName="wasm",t.aliases=[]},18191(e){"use strict";function t(e){e.languages.wiki=e.languages.extend("markup",{"block-comment":{pattern:/(^|[^\\])\/\*[\s\S]*?\*\//,lookbehind:!0,alias:"comment"},heading:{pattern:/^(=+)[^=\r\n].*?\1/m,inside:{punctuation:/^=+|=+$/,important:/.+/}},emphasis:{pattern:/('{2,5}).+?\1/,inside:{"bold-italic":{pattern:/(''''').+?(?=\1)/,lookbehind:!0,alias:["bold","italic"]},bold:{pattern:/(''')[^'](?:.*?[^'])?(?=\1)/,lookbehind:!0},italic:{pattern:/('')[^'](?:.*?[^'])?(?=\1)/,lookbehind:!0},punctuation:/^''+|''+$/}},hr:{pattern:/^-{4,}/m,alias:"punctuation"},url:[/ISBN +(?:97[89][ -]?)?(?:\d[ -]?){9}[\dx]\b|(?:RFC|PMID) +\d+/i,/\[\[.+?\]\]|\[.+?\]/],variable:[/__[A-Z]+__/,/\{{3}.+?\}{3}/,/\{\{.+?\}\}/],symbol:[/^#redirect/im,/~{3,5}/],"table-tag":{pattern:/((?:^|[|!])[|!])[^|\r\n]+\|(?!\|)/m,lookbehind:!0,inside:{"table-bar":{pattern:/\|$/,alias:"punctuation"},rest:e.languages.markup.tag.inside}},punctuation:/^(?:\{\||\|\}|\|-|[*#:;!|])|\|\||!!/m}),e.languages.insertBefore("wiki","tag",{nowiki:{pattern:/<(nowiki|pre|source)\b[^>]*>[\s\S]*?<\/\1>/i,inside:{tag:{pattern:/<(?:nowiki|pre|source)\b[^>]*>|<\/(?:nowiki|pre|source)>/i,inside:e.languages.markup.tag.inside}}}})}e.exports=t,t.displayName="wiki",t.aliases=[]},75242(e){"use strict";function t(e){e.languages.wolfram={comment:/\(\*(?:\(\*(?:[^*]|\*(?!\)))*\*\)|(?!\(\*)[\s\S])*?\*\)/,string:{pattern:/"(?:\\.|[^"\\\r\n])*"/,greedy:!0},keyword:/\b(?:Abs|AbsArg|Accuracy|Block|Do|For|Function|If|Manipulate|Module|Nest|NestList|None|Return|Switch|Table|Which|While)\b/,context:{pattern:/\w+`+\w*/,alias:"class-name"},blank:{pattern:/\b\w+_\b/,alias:"regex"},"global-variable":{pattern:/\$\w+/,alias:"variable"},boolean:/\b(?:True|False)\b/,number:/(?:\b(?=\d)|\B(?=\.))(?:0[bo])?(?:(?:\d|0x[\da-f])[\da-f]*(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?j?\b/i,operator:/\/\.|;|=\.|\^=|\^:=|:=|<<|>>|<\||\|>|:>|\|->|->|<-|@@@|@@|@|\/@|=!=|===|==|=|\+|-|\^|\[\/-+%=\]=?|!=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[\|{}[\];(),.:]/},e.languages.mathematica=e.languages.wolfram,e.languages.wl=e.languages.wolfram,e.languages.nb=e.languages.wolfram}e.exports=t,t.displayName="wolfram",t.aliases=["mathematica","wl","nb"]},97202(e){"use strict";function t(e){var t;(t=e).languages.xeora=t.languages.extend("markup",{constant:{pattern:/\$(?:DomainContents|PageRenderDuration)\$/,inside:{punctuation:{pattern:/\$/}}},variable:{pattern:/\$@?(?:#+|[-+*~=^])?[\w.]+\$/,inside:{punctuation:{pattern:/[$.]/},operator:{pattern:/#+|[-+*~=^@]/}}},"function-inline":{pattern:/\$F:[-\w.]+\?[-\w.]+(?:,(?:(?:@[-#]*\w+\.[\w+.]\.*)*\|)*(?:(?:[\w+]|[-#*.~^]+[\w+]|=\S)(?:[^$=]|=+[^=])*=*|(?:@[-#]*\w+\.[\w+.]\.*)+(?:(?:[\w+]|[-#*~^][-#*.~^]*[\w+]|=\S)(?:[^$=]|=+[^=])*=*)?)?)?\$/,inside:{variable:{pattern:/(?:[,|])@?(?:#+|[-+*~=^])?[\w.]+/,inside:{punctuation:{pattern:/[,.|]/},operator:{pattern:/#+|[-+*~=^@]/}}},punctuation:{pattern:/\$\w:|[$:?.,|]/}},alias:"function"},"function-block":{pattern:/\$XF:\{[-\w.]+\?[-\w.]+(?:,(?:(?:@[-#]*\w+\.[\w+.]\.*)*\|)*(?:(?:[\w+]|[-#*.~^]+[\w+]|=\S)(?:[^$=]|=+[^=])*=*|(?:@[-#]*\w+\.[\w+.]\.*)+(?:(?:[\w+]|[-#*~^][-#*.~^]*[\w+]|=\S)(?:[^$=]|=+[^=])*=*)?)?)?\}:XF\$/,inside:{punctuation:{pattern:/[$:{}?.,|]/}},alias:"function"},"directive-inline":{pattern:/\$\w(?:#\d+\+?)?(?:\[[-\w.]+\])?:[-\/\w.]+\$/,inside:{punctuation:{pattern:/\$(?:\w:|C(?:\[|#\d))?|[:{[\]]/,inside:{tag:{pattern:/#\d/}}}},alias:"function"},"directive-block-open":{pattern:/\$\w+:\{|\$\w(?:#\d+\+?)?(?:\[[-\w.]+\])?:[-\w.]+:\{(?:![A-Z]+)?/,inside:{punctuation:{pattern:/\$(?:\w:|C(?:\[|#\d))?|[:{[\]]/,inside:{tag:{pattern:/#\d/}}},attribute:{pattern:/![A-Z]+$/,inside:{punctuation:{pattern:/!/}},alias:"keyword"}},alias:"function"},"directive-block-separator":{pattern:/\}:[-\w.]+:\{/,inside:{punctuation:{pattern:/[:{}]/}},alias:"function"},"directive-block-close":{pattern:/\}:[-\w.]+\$/,inside:{punctuation:{pattern:/[:{}$]/}},alias:"function"}}),t.languages.insertBefore("inside","punctuation",{variable:t.languages.xeora["function-inline"].inside.variable},t.languages.xeora["function-block"]),t.languages.xeoracube=t.languages.xeora}e.exports=t,t.displayName="xeora",t.aliases=["xeoracube"]},13808(e){"use strict";function t(e){!function(e){function t(t,n){e.languages[t]&&e.languages.insertBefore(t,"comment",{"doc-comment":n})}var n=e.languages.markup.tag,r={pattern:/\/\/\/.*/,greedy:!0,alias:"comment",inside:{tag:n}},i={pattern:/'''.*/,greedy:!0,alias:"comment",inside:{tag:n}};t("csharp",r),t("fsharp",r),t("vbnet",i)}(e)}e.exports=t,t.displayName="xmlDoc",t.aliases=[]},21301(e){"use strict";function t(e){e.languages.xojo={comment:{pattern:/(?:'|\/\/|Rem\b).+/i},string:{pattern:/"(?:""|[^"])*"/,greedy:!0},number:[/(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:E[+-]?\d+)?/i,/&[bchou][a-z\d]+/i],symbol:/#(?:If|Else|ElseIf|Endif|Pragma)\b/i,keyword:/\b(?:AddHandler|App|Array|As(?:signs)?|Auto|By(?:Ref|Val)|Boolean|Break|Byte|Call|Case|Catch|CFStringRef|CGFloat|Class|Color|Const|Continue|CString|Currency|CurrentMethodName|Declare|Delegate|Dim|Do(?:uble|wnTo)?|Each|Else(?:If)?|End|Enumeration|Event|Exception|Exit|Extends|False|Finally|For|Function|Get|GetTypeInfo|Global|GOTO|If|Implements|In|Inherits|Int(?:erface|eger|8|16|32|64)?|Lib|Loop|Me|Module|Next|Nil|Object|Optional|OSType|ParamArray|Private|Property|Protected|PString|Ptr|Raise(?:Event)?|ReDim|RemoveHandler|Return|Select(?:or)?|Self|Set|Single|Shared|Short|Soft|Static|Step|String|Sub|Super|Text|Then|To|True|Try|Ubound|UInt(?:eger|8|16|32|64)?|Until|Using|Var(?:iant)?|Wend|While|WindowPtr|WString)\b/i,operator:/<[=>]?|>=?|[+\-*\/\\^=]|\b(?:AddressOf|And|Ctype|IsA?|Mod|New|Not|Or|Xor|WeakAddressOf)\b/i,punctuation:/[.,;:()]/}}e.exports=t,t.displayName="xojo",t.aliases=[]},20349(e){"use strict";function t(e){var t,n,r;(t=e).languages.xquery=t.languages.extend("markup",{"xquery-comment":{pattern:/\(:[\s\S]*?:\)/,greedy:!0,alias:"comment"},string:{pattern:/(["'])(?:\1\1|(?!\1)[\s\S])*\1/,greedy:!0},extension:{pattern:/\(#.+?#\)/,alias:"symbol"},variable:/\$[-\w:]+/,axis:{pattern:/(^|[^-])(?:ancestor(?:-or-self)?|attribute|child|descendant(?:-or-self)?|following(?:-sibling)?|parent|preceding(?:-sibling)?|self)(?=::)/,lookbehind:!0,alias:"operator"},"keyword-operator":{pattern:/(^|[^:-])\b(?:and|castable as|div|eq|except|ge|gt|idiv|instance of|intersect|is|le|lt|mod|ne|or|union)\b(?=$|[^:-])/,lookbehind:!0,alias:"operator"},keyword:{pattern:/(^|[^:-])\b(?:as|ascending|at|base-uri|boundary-space|case|cast as|collation|construction|copy-namespaces|declare|default|descending|else|empty (?:greatest|least)|encoding|every|external|for|function|if|import|in|inherit|lax|let|map|module|namespace|no-inherit|no-preserve|option|order(?: by|ed|ing)?|preserve|return|satisfies|schema|some|stable|strict|strip|then|to|treat as|typeswitch|unordered|validate|variable|version|where|xquery)\b(?=$|[^:-])/,lookbehind:!0},function:/[\w-]+(?::[\w-]+)*(?=\s*\()/,"xquery-element":{pattern:/(element\s+)[\w-]+(?::[\w-]+)*/,lookbehind:!0,alias:"tag"},"xquery-attribute":{pattern:/(attribute\s+)[\w-]+(?::[\w-]+)*/,lookbehind:!0,alias:"attr-name"},builtin:{pattern:/(^|[^:-])\b(?:attribute|comment|document|element|processing-instruction|text|xs:(?:anyAtomicType|anyType|anyURI|base64Binary|boolean|byte|date|dateTime|dayTimeDuration|decimal|double|duration|ENTITIES|ENTITY|float|gDay|gMonth|gMonthDay|gYear|gYearMonth|hexBinary|ID|IDREFS?|int|integer|language|long|Name|NCName|negativeInteger|NMTOKENS?|nonNegativeInteger|nonPositiveInteger|normalizedString|NOTATION|positiveInteger|QName|short|string|time|token|unsigned(?:Byte|Int|Long|Short)|untyped(?:Atomic)?|yearMonthDuration))\b(?=$|[^:-])/,lookbehind:!0},number:/\b\d+(?:\.\d+)?(?:E[+-]?\d+)?/,operator:[/[+*=?|@]|\.\.?|:=|!=|<[=<]?|>[=>]?/,{pattern:/(\s)-(?=\s)/,lookbehind:!0}],punctuation:/[[\](){},;:/]/}),t.languages.xquery.tag.pattern=/<\/?(?!\d)[^\s>\/=$<%]+(?:\s+[^\s>\/=]+(?:=(?:("|')(?:\\[\s\S]|\{(?!\{)(?:\{(?:\{[^{}]*\}|[^{}])*\}|[^{}])+\}|(?!\1)[^\\])*\1|[^\s'">=]+))?)*\s*\/?>/i,t.languages.xquery.tag.inside["attr-value"].pattern=/=(?:("|')(?:\\[\s\S]|\{(?!\{)(?:\{(?:\{[^{}]*\}|[^{}])*\}|[^{}])+\}|(?!\1)[^\\])*\1|[^\s'">=]+)/i,t.languages.xquery.tag.inside["attr-value"].inside.punctuation=/^="|"$/,t.languages.xquery.tag.inside["attr-value"].inside.expression={pattern:/\{(?!\{)(?:\{(?:\{[^{}]*\}|[^{}])*\}|[^{}])+\}/,inside:t.languages.xquery,alias:"language-xquery"},n=function(e){return"string"==typeof e?e:"string"==typeof e.content?e.content:e.content.map(n).join("")},r=function(e){for(var i=[],a=0;a0&&i[i.length-1].tagName===n(o.content[0].content[1])&&i.pop():"/>"===o.content[o.content.length-1].content||i.push({tagName:n(o.content[0].content[1]),openedBraces:0}):!(i.length>0)||"punctuation"!==o.type||"{"!==o.content||e[a+1]&&"punctuation"===e[a+1].type&&"{"===e[a+1].content||e[a-1]&&"plain-text"===e[a-1].type&&"{"===e[a-1].content?i.length>0&&i[i.length-1].openedBraces>0&&"punctuation"===o.type&&"}"===o.content?i[i.length-1].openedBraces--:"comment"!==o.type&&(s=!0):i[i.length-1].openedBraces++),(s||"string"==typeof o)&&i.length>0&&0===i[i.length-1].openedBraces){var u=n(o);a0&&("string"==typeof e[a-1]||"plain-text"===e[a-1].type)&&(u=n(e[a-1])+u,e.splice(a-1,1),a--),/^\s+$/.test(u)?e[a]=u:e[a]=new t.Token("plain-text",u,null,u)}o.content&&"string"!=typeof o.content&&r(o.content)}},t.hooks.add("after-tokenize",function(e){"xquery"===e.language&&r(e.tokens)})}e.exports=t,t.displayName="xquery",t.aliases=[]},65039(e){"use strict";function t(e){!function(e){var t=/[*&][^\s[\]{},]+/,n=/!(?:<[\w\-%#;/?:@&=+$,.!~*'()[\]]+>|(?:[a-zA-Z\d-]*!)?[\w\-%#;/?:@&=+$.~*'()]+)?/,r="(?:"+n.source+"(?:[ ]+"+t.source+")?|"+t.source+"(?:[ ]+"+n.source+")?)",i=/(?:[^\s\x00-\x08\x0e-\x1f!"#%&'*,\-:>?@[\]`{|}\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]|[?:-])(?:[ \t]*(?:(?![#:])|:))*/.source.replace(//g,function(){return/[^\s\x00-\x08\x0e-\x1f,[\]{}\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]/.source}),a=/"(?:[^"\\\r\n]|\\.)*"|'(?:[^'\\\r\n]|\\.)*'/.source;function o(e,t){return t=(t||"").replace(/m/g,"")+"m",RegExp(/([:\-,[{]\s*(?:\s<>[ \t]+)?)(?:<>)(?=[ \t]*(?:$|,|\]|\}|(?:[\r\n]\s*)?#))/.source.replace(/<>/g,function(){return r}).replace(/<>/g,function(){return e}),t)}e.languages.yaml={scalar:{pattern:RegExp(/([\-:]\s*(?:\s<>[ \t]+)?[|>])[ \t]*(?:((?:\r?\n|\r)[ \t]+)\S[^\r\n]*(?:\2[^\r\n]+)*)/.source.replace(/<>/g,function(){return r})),lookbehind:!0,alias:"string"},comment:/#.*/,key:{pattern:RegExp(/((?:^|[:\-,[{\r\n?])[ \t]*(?:<>[ \t]+)?)<>(?=\s*:\s)/.source.replace(/<>/g,function(){return r}).replace(/<>/g,function(){return"(?:"+i+"|"+a+")"})),lookbehind:!0,greedy:!0,alias:"atrule"},directive:{pattern:/(^[ \t]*)%.+/m,lookbehind:!0,alias:"important"},datetime:{pattern:o(/\d{4}-\d\d?-\d\d?(?:[tT]|[ \t]+)\d\d?:\d{2}:\d{2}(?:\.\d*)?(?:[ \t]*(?:Z|[-+]\d\d?(?::\d{2})?))?|\d{4}-\d{2}-\d{2}|\d\d?:\d{2}(?::\d{2}(?:\.\d*)?)?/.source),lookbehind:!0,alias:"number"},boolean:{pattern:o(/true|false/.source,"i"),lookbehind:!0,alias:"important"},null:{pattern:o(/null|~/.source,"i"),lookbehind:!0,alias:"important"},string:{pattern:o(a),lookbehind:!0,greedy:!0},number:{pattern:o(/[+-]?(?:0x[\da-f]+|0o[0-7]+|(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?|\.inf|\.nan)/.source,"i"),lookbehind:!0},tag:n,important:t,punctuation:/---|[:[\]{}\-,|>?]|\.\.\./},e.languages.yml=e.languages.yaml}(e)}e.exports=t,t.displayName="yaml",t.aliases=["yml"]},80741(e){"use strict";function t(e){e.languages.yang={comment:/\/\*[\s\S]*?\*\/|\/\/.*/,string:{pattern:/"(?:[^\\"]|\\.)*"|'[^']*'/,greedy:!0},keyword:{pattern:/(^|[{};\r\n][ \t]*)[a-z_][\w.-]*/i,lookbehind:!0},namespace:{pattern:/(\s)[a-z_][\w.-]*(?=:)/i,lookbehind:!0},boolean:/\b(?:false|true)\b/,operator:/\+/,punctuation:/[{};:]/}}e.exports=t,t.displayName="yang",t.aliases=[]},86528(e){"use strict";function t(e){!function(e){function t(e){return function(){return e}}var n=/\b(?:align|allowzero|and|asm|async|await|break|cancel|catch|comptime|const|continue|defer|else|enum|errdefer|error|export|extern|fn|for|if|inline|linksection|nakedcc|noalias|null|or|orelse|packed|promise|pub|resume|return|stdcallcc|struct|suspend|switch|test|threadlocal|try|undefined|union|unreachable|usingnamespace|var|volatile|while)\b/,r="\\b(?!"+n.source+")(?!\\d)\\w+\\b",i=/align\s*\((?:[^()]|\([^()]*\))*\)/.source,a=/(?:\?|\bpromise->|(?:\[[^[\]]*\]|\*(?!\*)|\*\*)(?:\s*|\s*const\b|\s*volatile\b|\s*allowzero\b)*)/.source.replace(//g,t(i)),o=/(?:\bpromise\b|(?:\berror\.)?(?:\.)*(?!\s+))/.source.replace(//g,t(r)),s="(?!\\s)(?:!?\\s*(?:"+a+"\\s*)*"+o+")+";e.languages.zig={comment:[{pattern:/\/{3}.*/,alias:"doc-comment"},/\/{2}.*/],string:[{pattern:/(^|[^\\@])c?"(?:[^"\\\r\n]|\\.)*"/,lookbehind:!0,greedy:!0},{pattern:/([\r\n])([ \t]+c?\\{2}).*(?:(?:\r\n?|\n)\2.*)*/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\])'(?:[^'\\\r\n]|\\(?:.|x[a-fA-F\d]{2}|u\{[a-fA-F\d]{1,6}\}))'/,lookbehind:!0,greedy:!0}],builtin:/\B@(?!\d)\w+(?=\s*\()/,label:{pattern:/(\b(?:break|continue)\s*:\s*)\w+\b|\b(?!\d)\w+\b(?=\s*:\s*(?:\{|while\b))/,lookbehind:!0},"class-name":[/\b(?!\d)\w+(?=\s*=\s*(?:(?:extern|packed)\s+)?(?:enum|struct|union)\s*[({])/,{pattern:RegExp(/(:\s*)(?=\s*(?:\s*)?[=;,)])|(?=\s*(?:\s*)?\{)/.source.replace(//g,t(s)).replace(//g,t(i))),lookbehind:!0,inside:null},{pattern:RegExp(/(\)\s*)(?=\s*(?:\s*)?;)/.source.replace(//g,t(s)).replace(//g,t(i))),lookbehind:!0,inside:null}],"builtin-types":{pattern:/\b(?:anyerror|bool|c_u?(?:short|int|long|longlong)|c_longdouble|c_void|comptime_(?:float|int)|[iu](?:8|16|32|64|128|size)|f(?:16|32|64|128)|noreturn|type|void)\b/,alias:"keyword"},keyword:n,function:/\b(?!\d)\w+(?=\s*\()/,number:/\b(?:0b[01]+|0o[0-7]+|0x[a-fA-F\d]+(?:\.[a-fA-F\d]*)?(?:[pP][+-]?[a-fA-F\d]+)?|\d+(?:\.\d*)?(?:[eE][+-]?\d+)?)\b/,boolean:/\b(?:false|true)\b/,operator:/\.[*?]|\.{2,3}|[-=]>|\*\*|\+\+|\|\||(?:<<|>>|[-+*]%|[-+*/%^&|<>!=])=?|[?~]/,punctuation:/[.:,;(){}[\]]/},e.languages.zig["class-name"].forEach(function(t){null===t.inside&&(t.inside=e.languages.zig)})}(e)}e.exports=t,t.displayName="zig",t.aliases=[]},59216(e,t,n){var r=function(e){var t=/\blang(?:uage)?-([\w-]+)\b/i,n=0,r={},i={manual:e.Prism&&e.Prism.manual,disableWorkerMessageHandler:e.Prism&&e.Prism.disableWorkerMessageHandler,util:{encode:function e(t){return t instanceof a?new a(t.type,e(t.content),t.alias):Array.isArray(t)?t.map(e):t.replace(/&/g,"&").replace(/=f.reach));S+=E.value.length,E=E.next){var k,x=E.value;if(t.length>e.length)return;if(!(x instanceof a)){var T=1;if(v){if(!(k=o(_,S,e,g)))break;var M=k.index,O=k.index+k[0].length,A=S;for(A+=E.value.length;M>=A;)A+=(E=E.next).value.length;if(A-=E.value.length,S=A,E.value instanceof a)continue;for(var L=E;L!==t.tail&&(Af.reach&&(f.reach=N);var P=E.prev;I&&(P=c(t,P,I),S+=I.length),l(t,P,T);var R=new a(d,m?i.tokenize(C,m):C,y,C);if(E=c(t,P,R),D&&c(t,E,D),T>1){var j={cause:d+","+p,reach:N};s(e,t,n,E.prev,S,j),f&&j.reach>f.reach&&(f.reach=j.reach)}}}}}}function u(){var e={value:null,prev:null,next:null},t={value:null,prev:e,next:null};e.next=t,this.head=e,this.tail=t,this.length=0}function c(e,t,n){var r=t.next,i={value:n,prev:t,next:r};return t.next=i,r.prev=i,e.length++,i}function l(e,t,n){for(var r=t.next,i=0;i"+a.content+""},!e.document)return e.addEventListener&&(i.disableWorkerMessageHandler||e.addEventListener("message",function(t){var n=JSON.parse(t.data),r=n.language,a=n.code,o=n.immediateClose;e.postMessage(i.highlight(a,i.languages[r],r)),o&&e.close()},!1)),i;var d=i.util.currentScript();function h(){i.manual||i.highlightAll()}if(d&&(i.filename=d.src,d.hasAttribute("data-manual")&&(i.manual=!0)),!i.manual){var p=document.readyState;"loading"===p||"interactive"===p&&d&&d.defer?document.addEventListener("DOMContentLoaded",h):window.requestAnimationFrame?window.requestAnimationFrame(h):window.setTimeout(h,16)}return i}("undefined"!=typeof window?window:"undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{});e.exports&&(e.exports=r),void 0!==n.g&&(n.g.Prism=r)},89509(e,t,n){/*! safe-buffer. MIT License. Feross Aboukhadijeh */ var r=n(48764),i=r.Buffer;function a(e,t){for(var n in e)t[n]=e[n]}function o(e,t,n){return i(e,t,n)}i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow?e.exports=r:(a(r,t),t.Buffer=o),o.prototype=Object.create(i.prototype),a(i,o),o.from=function(e,t,n){if("number"==typeof e)throw TypeError("Argument must not be a number");return i(e,t,n)},o.alloc=function(e,t,n){if("number"!=typeof e)throw TypeError("Argument must be a number");var r=i(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},o.allocUnsafe=function(e){if("number"!=typeof e)throw TypeError("Argument must be a number");return i(e)},o.allocUnsafeSlow=function(e){if("number"!=typeof e)throw TypeError("Argument must be a number");return r.SlowBuffer(e)}},60053(e,t){"use strict";if(/** @license React v0.18.0 + * scheduler.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ Object.defineProperty(t,"__esModule",{value:!0}),"undefined"==typeof window||"function"!=typeof MessageChannel){var n,r,i,a,o,s=null,u=null,c=function(){if(null!==s)try{var e=t.unstable_now();s(!0,e),s=null}catch(n){throw setTimeout(c,0),n}},l=Date.now();t.unstable_now=function(){return Date.now()-l},n=function(e){null!==s?setTimeout(n,0,e):(s=e,setTimeout(c,0))},r=function(e,t){u=setTimeout(e,t)},i=function(){clearTimeout(u)},a=function(){return!1},o=t.unstable_forceFrameRate=function(){}}else{var f=window.performance,d=window.Date,h=window.setTimeout,p=window.clearTimeout;if("undefined"!=typeof console){var b=window.cancelAnimationFrame;"function"!=typeof window.requestAnimationFrame&&console.error("This browser doesn't support requestAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills"),"function"!=typeof b&&console.error("This browser doesn't support cancelAnimationFrame. Make sure that you load a polyfill in older browsers. https://fb.me/react-polyfills")}if("object"==typeof f&&"function"==typeof f.now)t.unstable_now=function(){return f.now()};else{var m=d.now();t.unstable_now=function(){return d.now()-m}}var g=!1,v=null,y=-1,w=5,_=0;a=function(){return t.unstable_now()>=_},o=function(){},t.unstable_forceFrameRate=function(e){0>e||125M(o,n))void 0!==u&&0>M(u,o)?(e[r]=u,e[s]=n,r=s):(e[r]=o,e[a]=n,r=a);else if(void 0!==u&&0>M(u,n))e[r]=u,e[s]=n,r=s;else break a}}return t}return null}function M(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}var O=[],A=[],L=1,C=null,I=3,D=!1,N=!1,P=!1;function R(e){for(var t=x(A);null!==t;){if(null===t.callback)T(A);else if(t.startTime<=e)T(A),t.sortIndex=t.expirationTime,k(O,t);else break;t=x(A)}}function j(e){if(P=!1,R(e),!N){if(null!==x(O))N=!0,n(F);else{var t=x(A);null!==t&&r(j,t.startTime-e)}}}function F(e,n){N=!1,P&&(P=!1,i()),D=!0;var o=I;try{for(R(n),C=x(O);null!==C&&(!(C.expirationTime>n)||e&&!a());){var s=C.callback;if(null!==s){C.callback=null,I=C.priorityLevel;var u=s(C.expirationTime<=n);n=t.unstable_now(),"function"==typeof u?C.callback=u:C===x(O)&&T(O),R(n)}else T(O);C=x(O)}if(null!==C)var c=!0;else{var l=x(A);null!==l&&r(j,l.startTime-n),c=!1}return c}finally{C=null,I=o,D=!1}}function Y(e){switch(e){case 1:return -1;case 2:return 250;case 5:return 1073741823;case 4:return 1e4;default:return 5e3}}var B=o;t.unstable_ImmediatePriority=1,t.unstable_UserBlockingPriority=2,t.unstable_NormalPriority=3,t.unstable_IdlePriority=5,t.unstable_LowPriority=4,t.unstable_runWithPriority=function(e,t){switch(e){case 1:case 2:case 3:case 4:case 5:break;default:e=3}var n=I;I=e;try{return t()}finally{I=n}},t.unstable_next=function(e){switch(I){case 1:case 2:case 3:var t=3;break;default:t=I}var n=I;I=t;try{return e()}finally{I=n}},t.unstable_scheduleCallback=function(e,a,o){var s=t.unstable_now();if("object"==typeof o&&null!==o){var u=o.delay;u="number"==typeof u&&0s?(e.sortIndex=u,k(A,e),null===x(O)&&e===x(A)&&(P?i():P=!0,r(j,u-s))):(e.sortIndex=o,k(O,e),N||D||(N=!0,n(F))),e},t.unstable_cancelCallback=function(e){e.callback=null},t.unstable_wrapCallback=function(e){var t=I;return function(){var n=I;I=t;try{return e.apply(this,arguments)}finally{I=n}}},t.unstable_getCurrentPriorityLevel=function(){return I},t.unstable_shouldYield=function(){var e=t.unstable_now();R(e);var n=x(O);return n!==C&&null!==C&&null!==n&&null!==n.callback&&n.startTime<=e&&n.expirationTime>5==6?2:e>>4==14?3:e>>3==30?4:e>>6==2?-1:-2}function c(e,t,n){var r=t.length-1;if(r=0?(i>0&&(e.lastNeed=i-1),i):--r=0?(i>0&&(e.lastNeed=i-2),i):--r=0?(i>0&&(2===i?i=0:e.lastNeed=i-3),i):0}function l(e,t,n){if((192&t[0])!=128)return e.lastNeed=0,"�";if(e.lastNeed>1&&t.length>1){if((192&t[1])!=128)return e.lastNeed=1,"�";if(e.lastNeed>2&&t.length>2&&(192&t[2])!=128)return e.lastNeed=2,"�"}}function f(e){var t=this.lastTotal-this.lastNeed,n=l(this,e,t);return void 0!==n?n:this.lastNeed<=e.length?(e.copy(this.lastChar,t,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):void(e.copy(this.lastChar,t,0,e.length),this.lastNeed-=e.length)}function d(e,t){var n=c(this,e,t);if(!this.lastNeed)return e.toString("utf8",t);this.lastTotal=n;var r=e.length-(n-this.lastNeed);return e.copy(this.lastChar,0,r),e.toString("utf8",t,r)}function h(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+"�":t}function p(e,t){if((e.length-t)%2==0){var n=e.toString("utf16le",t);if(n){var r=n.charCodeAt(n.length-1);if(r>=55296&&r<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1],n.slice(0,-1)}return n}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=e[e.length-1],e.toString("utf16le",t,e.length-1)}function b(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed){var n=this.lastTotal-this.lastNeed;return t+this.lastChar.toString("utf16le",0,n)}return t}function m(e,t){var n=(e.length-t)%3;return 0===n?e.toString("base64",t):(this.lastNeed=3-n,this.lastTotal=3,1===n?this.lastChar[0]=e[e.length-1]:(this.lastChar[0]=e[e.length-2],this.lastChar[1]=e[e.length-1]),e.toString("base64",t,e.length-n))}function g(e){var t=e&&e.length?this.write(e):"";return this.lastNeed?t+this.lastChar.toString("base64",0,3-this.lastNeed):t}function v(e){return e.toString(this.encoding)}function y(e){return e&&e.length?this.write(e):""}t.s=s,s.prototype.write=function(e){var t,n;if(0===e.length)return"";if(this.lastNeed){if(void 0===(t=this.fillLast(e)))return"";n=this.lastNeed,this.lastNeed=0}else n=0;return nOF});var r,i,a,o,s,u,c,l=n(67294),f=n.t(l,2),d=n(97779),h=n(47886),p=n(57209),b=n(32316),m=n(95880),g=n(17051),v=n(71381),y=n(81701),w=n(3022),_=n(60323),E=n(87591),S=n(25649),k=n(28902),x=n(71426),T=n(48884),M=n(94184),O=n.n(M),A=n(55977),L=n(73935),C=function(){if("undefined"!=typeof Map)return Map;function e(e,t){var n=-1;return e.some(function(e,r){return e[0]===t&&(n=r,!0)}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(t){var n=e(this.__entries__,t),r=this.__entries__[n];return r&&r[1]},t.prototype.set=function(t,n){var r=e(this.__entries__,t);~r?this.__entries__[r][1]=n:this.__entries__.push([t,n])},t.prototype.delete=function(t){var n=this.__entries__,r=e(n,t);~r&&n.splice(r,1)},t.prototype.has=function(t){return!!~e(this.__entries__,t)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(e,t){void 0===t&&(t=null);for(var n=0,r=this.__entries__;n0},e.prototype.connect_=function(){I&&!this.connected_&&(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),Y?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){I&&this.connected_&&(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(e){var t=e.propertyName,n=void 0===t?"":t;F.some(function(e){return!!~n.indexOf(e)})&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),U=function(e,t){for(var n=0,r=Object.keys(t);n0},e}(),er="undefined"!=typeof WeakMap?new WeakMap:new C,ei=function(){function e(t){if(!(this instanceof e))throw TypeError("Cannot call a class as a function.");if(!arguments.length)throw TypeError("1 argument required, but only 0 present.");var n=B.getInstance(),r=new en(t,n,this);er.set(this,r)}return e}();["observe","unobserve","disconnect"].forEach(function(e){ei.prototype[e]=function(){var t;return(t=er.get(this))[e].apply(t,arguments)}});var ea=void 0!==D.ResizeObserver?D.ResizeObserver:ei;let eo=ea;var es=function(e){var t=[],n=null,r=function(){for(var r=arguments.length,i=Array(r),a=0;a=t||n<0||f&&r>=a}function g(){var e=eb();if(m(e))return v(e);s=setTimeout(g,b(e))}function v(e){return(s=void 0,d&&r)?h(e):(r=i=void 0,o)}function y(){void 0!==s&&clearTimeout(s),c=0,r=u=i=s=void 0}function w(){return void 0===s?o:v(eb())}function _(){var e=eb(),n=m(e);if(r=arguments,i=this,u=e,n){if(void 0===s)return p(u);if(f)return clearTimeout(s),s=setTimeout(g,t),h(u)}return void 0===s&&(s=setTimeout(g,t)),o}return t=ez(t)||0,ed(n)&&(l=!!n.leading,a=(f="maxWait"in n)?eW(ez(n.maxWait)||0,t):a,d="trailing"in n?!!n.trailing:d),_.cancel=y,_.flush=w,_}let eq=eV;var eZ="Expected a function";function eX(e,t,n){var r=!0,i=!0;if("function"!=typeof e)throw TypeError(eZ);return ed(n)&&(r="leading"in n?!!n.leading:r,i="trailing"in n?!!n.trailing:i),eq(e,t,{leading:r,maxWait:t,trailing:i})}let eJ=eX;var eQ={debounce:eq,throttle:eJ},e1=function(e){return eQ[e]},e0=function(e){return"function"==typeof e},e2=function(){return"undefined"==typeof window},e3=function(e){return e instanceof Element||e instanceof HTMLDocument};function e4(e){return(e4="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function e5(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function e6(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&l.createElement(tG.Z,{variant:"indeterminate",classes:r}))};tK.propTypes={fetchCount:el().number.isRequired};let tV=(0,b.withStyles)(tW)(tK);var tq=n(5536);let tZ=n.p+"ba8bbf16ebf8e1d05bef.svg";function tX(){return(tX=Object.assign||function(e){for(var t=1;t120){for(var d=Math.floor(u/80),h=u%80,p=[],b=0;b0},name:{enumerable:!1},nodes:{enumerable:!1},source:{enumerable:!1},positions:{enumerable:!1},originalError:{enumerable:!1}}),null!=s&&s.stack)?(Object.defineProperty(nf(b),"stack",{value:s.stack,writable:!0,configurable:!0}),nl(b)):(Error.captureStackTrace?Error.captureStackTrace(nf(b),n):Object.defineProperty(nf(b),"stack",{value:Error().stack,writable:!0,configurable:!0}),b)}return ns(n,[{key:"toString",value:function(){return nw(this)}},{key:t4.YF,get:function(){return"Object"}}]),n}(nd(Error));function ny(e){return void 0===e||0===e.length?void 0:e}function nw(e){var t=e.message;if(e.nodes)for(var n=0,r=e.nodes;n",EOF:"",BANG:"!",DOLLAR:"$",AMP:"&",PAREN_L:"(",PAREN_R:")",SPREAD:"...",COLON:":",EQUALS:"=",AT:"@",BRACKET_L:"[",BRACKET_R:"]",BRACE_L:"{",PIPE:"|",BRACE_R:"}",NAME:"Name",INT:"Int",FLOAT:"Float",STRING:"String",BLOCK_STRING:"BlockString",COMMENT:"Comment"}),nx=n(10143),nT=Object.freeze({QUERY:"QUERY",MUTATION:"MUTATION",SUBSCRIPTION:"SUBSCRIPTION",FIELD:"FIELD",FRAGMENT_DEFINITION:"FRAGMENT_DEFINITION",FRAGMENT_SPREAD:"FRAGMENT_SPREAD",INLINE_FRAGMENT:"INLINE_FRAGMENT",VARIABLE_DEFINITION:"VARIABLE_DEFINITION",SCHEMA:"SCHEMA",SCALAR:"SCALAR",OBJECT:"OBJECT",FIELD_DEFINITION:"FIELD_DEFINITION",ARGUMENT_DEFINITION:"ARGUMENT_DEFINITION",INTERFACE:"INTERFACE",UNION:"UNION",ENUM:"ENUM",ENUM_VALUE:"ENUM_VALUE",INPUT_OBJECT:"INPUT_OBJECT",INPUT_FIELD_DEFINITION:"INPUT_FIELD_DEFINITION"}),nM=n(87392),nO=function(){function e(e){var t=new nS.WU(nk.SOF,0,0,0,0,null);this.source=e,this.lastToken=t,this.token=t,this.line=1,this.lineStart=0}var t=e.prototype;return t.advance=function(){return this.lastToken=this.token,this.token=this.lookahead()},t.lookahead=function(){var e,t=this.token;if(t.kind!==nk.EOF)do t=null!==(e=t.next)&&void 0!==e?e:t.next=nC(this,t);while(t.kind===nk.COMMENT)return t},e}();function nA(e){return e===nk.BANG||e===nk.DOLLAR||e===nk.AMP||e===nk.PAREN_L||e===nk.PAREN_R||e===nk.SPREAD||e===nk.COLON||e===nk.EQUALS||e===nk.AT||e===nk.BRACKET_L||e===nk.BRACKET_R||e===nk.BRACE_L||e===nk.PIPE||e===nk.BRACE_R}function nL(e){return isNaN(e)?nk.EOF:e<127?JSON.stringify(String.fromCharCode(e)):'"\\u'.concat(("00"+e.toString(16).toUpperCase()).slice(-4),'"')}function nC(e,t){for(var n=e.source,r=n.body,i=r.length,a=t.end;a31||9===a))return new nS.WU(nk.COMMENT,t,s,n,r,i,o.slice(t+1,s))}function nN(e,t,n,r,i,a){var o=e.body,s=n,u=t,c=!1;if(45===s&&(s=o.charCodeAt(++u)),48===s){if((s=o.charCodeAt(++u))>=48&&s<=57)throw n_(e,u,"Invalid number, unexpected digit after 0: ".concat(nL(s),"."))}else u=nP(e,u,s),s=o.charCodeAt(u);if(46===s&&(c=!0,s=o.charCodeAt(++u),u=nP(e,u,s),s=o.charCodeAt(u)),(69===s||101===s)&&(c=!0,(43===(s=o.charCodeAt(++u))||45===s)&&(s=o.charCodeAt(++u)),u=nP(e,u,s),s=o.charCodeAt(u)),46===s||nU(s))throw n_(e,u,"Invalid number, expected digit but got: ".concat(nL(s),"."));return new nS.WU(c?nk.FLOAT:nk.INT,t,u,r,i,a,o.slice(t,u))}function nP(e,t,n){var r=e.body,i=t,a=n;if(a>=48&&a<=57){do a=r.charCodeAt(++i);while(a>=48&&a<=57)return i}throw n_(e,i,"Invalid number, expected digit but got: ".concat(nL(a),"."))}function nR(e,t,n,r,i){for(var a=e.body,o=t+1,s=o,u=0,c="";o=48&&e<=57?e-48:e>=65&&e<=70?e-55:e>=97&&e<=102?e-87:-1}function nB(e,t,n,r,i){for(var a=e.body,o=a.length,s=t+1,u=0;s!==o&&!isNaN(u=a.charCodeAt(s))&&(95===u||u>=48&&u<=57||u>=65&&u<=90||u>=97&&u<=122);)++s;return new nS.WU(nk.NAME,t,s,n,r,i,a.slice(t,s))}function nU(e){return 95===e||e>=65&&e<=90||e>=97&&e<=122}function nH(e,t){return new n$(e,t).parseDocument()}var n$=function(){function e(e,t){var n=(0,nx.T)(e)?e:new nx.H(e);this._lexer=new nO(n),this._options=t}var t=e.prototype;return t.parseName=function(){var e=this.expectToken(nk.NAME);return{kind:nE.h.NAME,value:e.value,loc:this.loc(e)}},t.parseDocument=function(){var e=this._lexer.token;return{kind:nE.h.DOCUMENT,definitions:this.many(nk.SOF,this.parseDefinition,nk.EOF),loc:this.loc(e)}},t.parseDefinition=function(){if(this.peek(nk.NAME))switch(this._lexer.token.value){case"query":case"mutation":case"subscription":return this.parseOperationDefinition();case"fragment":return this.parseFragmentDefinition();case"schema":case"scalar":case"type":case"interface":case"union":case"enum":case"input":case"directive":return this.parseTypeSystemDefinition();case"extend":return this.parseTypeSystemExtension()}else if(this.peek(nk.BRACE_L))return this.parseOperationDefinition();else if(this.peekDescription())return this.parseTypeSystemDefinition();throw this.unexpected()},t.parseOperationDefinition=function(){var e,t=this._lexer.token;if(this.peek(nk.BRACE_L))return{kind:nE.h.OPERATION_DEFINITION,operation:"query",name:void 0,variableDefinitions:[],directives:[],selectionSet:this.parseSelectionSet(),loc:this.loc(t)};var n=this.parseOperationType();return this.peek(nk.NAME)&&(e=this.parseName()),{kind:nE.h.OPERATION_DEFINITION,operation:n,name:e,variableDefinitions:this.parseVariableDefinitions(),directives:this.parseDirectives(!1),selectionSet:this.parseSelectionSet(),loc:this.loc(t)}},t.parseOperationType=function(){var e=this.expectToken(nk.NAME);switch(e.value){case"query":return"query";case"mutation":return"mutation";case"subscription":return"subscription"}throw this.unexpected(e)},t.parseVariableDefinitions=function(){return this.optionalMany(nk.PAREN_L,this.parseVariableDefinition,nk.PAREN_R)},t.parseVariableDefinition=function(){var e=this._lexer.token;return{kind:nE.h.VARIABLE_DEFINITION,variable:this.parseVariable(),type:(this.expectToken(nk.COLON),this.parseTypeReference()),defaultValue:this.expectOptionalToken(nk.EQUALS)?this.parseValueLiteral(!0):void 0,directives:this.parseDirectives(!0),loc:this.loc(e)}},t.parseVariable=function(){var e=this._lexer.token;return this.expectToken(nk.DOLLAR),{kind:nE.h.VARIABLE,name:this.parseName(),loc:this.loc(e)}},t.parseSelectionSet=function(){var e=this._lexer.token;return{kind:nE.h.SELECTION_SET,selections:this.many(nk.BRACE_L,this.parseSelection,nk.BRACE_R),loc:this.loc(e)}},t.parseSelection=function(){return this.peek(nk.SPREAD)?this.parseFragment():this.parseField()},t.parseField=function(){var e,t,n=this._lexer.token,r=this.parseName();return this.expectOptionalToken(nk.COLON)?(e=r,t=this.parseName()):t=r,{kind:nE.h.FIELD,alias:e,name:t,arguments:this.parseArguments(!1),directives:this.parseDirectives(!1),selectionSet:this.peek(nk.BRACE_L)?this.parseSelectionSet():void 0,loc:this.loc(n)}},t.parseArguments=function(e){var t=e?this.parseConstArgument:this.parseArgument;return this.optionalMany(nk.PAREN_L,t,nk.PAREN_R)},t.parseArgument=function(){var e=this._lexer.token,t=this.parseName();return this.expectToken(nk.COLON),{kind:nE.h.ARGUMENT,name:t,value:this.parseValueLiteral(!1),loc:this.loc(e)}},t.parseConstArgument=function(){var e=this._lexer.token;return{kind:nE.h.ARGUMENT,name:this.parseName(),value:(this.expectToken(nk.COLON),this.parseValueLiteral(!0)),loc:this.loc(e)}},t.parseFragment=function(){var e=this._lexer.token;this.expectToken(nk.SPREAD);var t=this.expectOptionalKeyword("on");return!t&&this.peek(nk.NAME)?{kind:nE.h.FRAGMENT_SPREAD,name:this.parseFragmentName(),directives:this.parseDirectives(!1),loc:this.loc(e)}:{kind:nE.h.INLINE_FRAGMENT,typeCondition:t?this.parseNamedType():void 0,directives:this.parseDirectives(!1),selectionSet:this.parseSelectionSet(),loc:this.loc(e)}},t.parseFragmentDefinition=function(){var e,t=this._lexer.token;return(this.expectKeyword("fragment"),(null===(e=this._options)||void 0===e?void 0:e.experimentalFragmentVariables)===!0)?{kind:nE.h.FRAGMENT_DEFINITION,name:this.parseFragmentName(),variableDefinitions:this.parseVariableDefinitions(),typeCondition:(this.expectKeyword("on"),this.parseNamedType()),directives:this.parseDirectives(!1),selectionSet:this.parseSelectionSet(),loc:this.loc(t)}:{kind:nE.h.FRAGMENT_DEFINITION,name:this.parseFragmentName(),typeCondition:(this.expectKeyword("on"),this.parseNamedType()),directives:this.parseDirectives(!1),selectionSet:this.parseSelectionSet(),loc:this.loc(t)}},t.parseFragmentName=function(){if("on"===this._lexer.token.value)throw this.unexpected();return this.parseName()},t.parseValueLiteral=function(e){var t=this._lexer.token;switch(t.kind){case nk.BRACKET_L:return this.parseList(e);case nk.BRACE_L:return this.parseObject(e);case nk.INT:return this._lexer.advance(),{kind:nE.h.INT,value:t.value,loc:this.loc(t)};case nk.FLOAT:return this._lexer.advance(),{kind:nE.h.FLOAT,value:t.value,loc:this.loc(t)};case nk.STRING:case nk.BLOCK_STRING:return this.parseStringLiteral();case nk.NAME:switch(this._lexer.advance(),t.value){case"true":return{kind:nE.h.BOOLEAN,value:!0,loc:this.loc(t)};case"false":return{kind:nE.h.BOOLEAN,value:!1,loc:this.loc(t)};case"null":return{kind:nE.h.NULL,loc:this.loc(t)};default:return{kind:nE.h.ENUM,value:t.value,loc:this.loc(t)}}case nk.DOLLAR:if(!e)return this.parseVariable()}throw this.unexpected()},t.parseStringLiteral=function(){var e=this._lexer.token;return this._lexer.advance(),{kind:nE.h.STRING,value:e.value,block:e.kind===nk.BLOCK_STRING,loc:this.loc(e)}},t.parseList=function(e){var t=this,n=this._lexer.token,r=function(){return t.parseValueLiteral(e)};return{kind:nE.h.LIST,values:this.any(nk.BRACKET_L,r,nk.BRACKET_R),loc:this.loc(n)}},t.parseObject=function(e){var t=this,n=this._lexer.token,r=function(){return t.parseObjectField(e)};return{kind:nE.h.OBJECT,fields:this.any(nk.BRACE_L,r,nk.BRACE_R),loc:this.loc(n)}},t.parseObjectField=function(e){var t=this._lexer.token,n=this.parseName();return this.expectToken(nk.COLON),{kind:nE.h.OBJECT_FIELD,name:n,value:this.parseValueLiteral(e),loc:this.loc(t)}},t.parseDirectives=function(e){for(var t=[];this.peek(nk.AT);)t.push(this.parseDirective(e));return t},t.parseDirective=function(e){var t=this._lexer.token;return this.expectToken(nk.AT),{kind:nE.h.DIRECTIVE,name:this.parseName(),arguments:this.parseArguments(e),loc:this.loc(t)}},t.parseTypeReference=function(){var e,t=this._lexer.token;return(this.expectOptionalToken(nk.BRACKET_L)?(e=this.parseTypeReference(),this.expectToken(nk.BRACKET_R),e={kind:nE.h.LIST_TYPE,type:e,loc:this.loc(t)}):e=this.parseNamedType(),this.expectOptionalToken(nk.BANG))?{kind:nE.h.NON_NULL_TYPE,type:e,loc:this.loc(t)}:e},t.parseNamedType=function(){var e=this._lexer.token;return{kind:nE.h.NAMED_TYPE,name:this.parseName(),loc:this.loc(e)}},t.parseTypeSystemDefinition=function(){var e=this.peekDescription()?this._lexer.lookahead():this._lexer.token;if(e.kind===nk.NAME)switch(e.value){case"schema":return this.parseSchemaDefinition();case"scalar":return this.parseScalarTypeDefinition();case"type":return this.parseObjectTypeDefinition();case"interface":return this.parseInterfaceTypeDefinition();case"union":return this.parseUnionTypeDefinition();case"enum":return this.parseEnumTypeDefinition();case"input":return this.parseInputObjectTypeDefinition();case"directive":return this.parseDirectiveDefinition()}throw this.unexpected(e)},t.peekDescription=function(){return this.peek(nk.STRING)||this.peek(nk.BLOCK_STRING)},t.parseDescription=function(){if(this.peekDescription())return this.parseStringLiteral()},t.parseSchemaDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("schema");var n=this.parseDirectives(!0),r=this.many(nk.BRACE_L,this.parseOperationTypeDefinition,nk.BRACE_R);return{kind:nE.h.SCHEMA_DEFINITION,description:t,directives:n,operationTypes:r,loc:this.loc(e)}},t.parseOperationTypeDefinition=function(){var e=this._lexer.token,t=this.parseOperationType();this.expectToken(nk.COLON);var n=this.parseNamedType();return{kind:nE.h.OPERATION_TYPE_DEFINITION,operation:t,type:n,loc:this.loc(e)}},t.parseScalarTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("scalar");var n=this.parseName(),r=this.parseDirectives(!0);return{kind:nE.h.SCALAR_TYPE_DEFINITION,description:t,name:n,directives:r,loc:this.loc(e)}},t.parseObjectTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("type");var n=this.parseName(),r=this.parseImplementsInterfaces(),i=this.parseDirectives(!0),a=this.parseFieldsDefinition();return{kind:nE.h.OBJECT_TYPE_DEFINITION,description:t,name:n,interfaces:r,directives:i,fields:a,loc:this.loc(e)}},t.parseImplementsInterfaces=function(){var e;if(!this.expectOptionalKeyword("implements"))return[];if((null===(e=this._options)||void 0===e?void 0:e.allowLegacySDLImplementsInterfaces)===!0){var t=[];this.expectOptionalToken(nk.AMP);do t.push(this.parseNamedType());while(this.expectOptionalToken(nk.AMP)||this.peek(nk.NAME))return t}return this.delimitedMany(nk.AMP,this.parseNamedType)},t.parseFieldsDefinition=function(){var e;return(null===(e=this._options)||void 0===e?void 0:e.allowLegacySDLEmptyFields)===!0&&this.peek(nk.BRACE_L)&&this._lexer.lookahead().kind===nk.BRACE_R?(this._lexer.advance(),this._lexer.advance(),[]):this.optionalMany(nk.BRACE_L,this.parseFieldDefinition,nk.BRACE_R)},t.parseFieldDefinition=function(){var e=this._lexer.token,t=this.parseDescription(),n=this.parseName(),r=this.parseArgumentDefs();this.expectToken(nk.COLON);var i=this.parseTypeReference(),a=this.parseDirectives(!0);return{kind:nE.h.FIELD_DEFINITION,description:t,name:n,arguments:r,type:i,directives:a,loc:this.loc(e)}},t.parseArgumentDefs=function(){return this.optionalMany(nk.PAREN_L,this.parseInputValueDef,nk.PAREN_R)},t.parseInputValueDef=function(){var e,t=this._lexer.token,n=this.parseDescription(),r=this.parseName();this.expectToken(nk.COLON);var i=this.parseTypeReference();this.expectOptionalToken(nk.EQUALS)&&(e=this.parseValueLiteral(!0));var a=this.parseDirectives(!0);return{kind:nE.h.INPUT_VALUE_DEFINITION,description:n,name:r,type:i,defaultValue:e,directives:a,loc:this.loc(t)}},t.parseInterfaceTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("interface");var n=this.parseName(),r=this.parseImplementsInterfaces(),i=this.parseDirectives(!0),a=this.parseFieldsDefinition();return{kind:nE.h.INTERFACE_TYPE_DEFINITION,description:t,name:n,interfaces:r,directives:i,fields:a,loc:this.loc(e)}},t.parseUnionTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("union");var n=this.parseName(),r=this.parseDirectives(!0),i=this.parseUnionMemberTypes();return{kind:nE.h.UNION_TYPE_DEFINITION,description:t,name:n,directives:r,types:i,loc:this.loc(e)}},t.parseUnionMemberTypes=function(){return this.expectOptionalToken(nk.EQUALS)?this.delimitedMany(nk.PIPE,this.parseNamedType):[]},t.parseEnumTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("enum");var n=this.parseName(),r=this.parseDirectives(!0),i=this.parseEnumValuesDefinition();return{kind:nE.h.ENUM_TYPE_DEFINITION,description:t,name:n,directives:r,values:i,loc:this.loc(e)}},t.parseEnumValuesDefinition=function(){return this.optionalMany(nk.BRACE_L,this.parseEnumValueDefinition,nk.BRACE_R)},t.parseEnumValueDefinition=function(){var e=this._lexer.token,t=this.parseDescription(),n=this.parseName(),r=this.parseDirectives(!0);return{kind:nE.h.ENUM_VALUE_DEFINITION,description:t,name:n,directives:r,loc:this.loc(e)}},t.parseInputObjectTypeDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("input");var n=this.parseName(),r=this.parseDirectives(!0),i=this.parseInputFieldsDefinition();return{kind:nE.h.INPUT_OBJECT_TYPE_DEFINITION,description:t,name:n,directives:r,fields:i,loc:this.loc(e)}},t.parseInputFieldsDefinition=function(){return this.optionalMany(nk.BRACE_L,this.parseInputValueDef,nk.BRACE_R)},t.parseTypeSystemExtension=function(){var e=this._lexer.lookahead();if(e.kind===nk.NAME)switch(e.value){case"schema":return this.parseSchemaExtension();case"scalar":return this.parseScalarTypeExtension();case"type":return this.parseObjectTypeExtension();case"interface":return this.parseInterfaceTypeExtension();case"union":return this.parseUnionTypeExtension();case"enum":return this.parseEnumTypeExtension();case"input":return this.parseInputObjectTypeExtension()}throw this.unexpected(e)},t.parseSchemaExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("schema");var t=this.parseDirectives(!0),n=this.optionalMany(nk.BRACE_L,this.parseOperationTypeDefinition,nk.BRACE_R);if(0===t.length&&0===n.length)throw this.unexpected();return{kind:nE.h.SCHEMA_EXTENSION,directives:t,operationTypes:n,loc:this.loc(e)}},t.parseScalarTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("scalar");var t=this.parseName(),n=this.parseDirectives(!0);if(0===n.length)throw this.unexpected();return{kind:nE.h.SCALAR_TYPE_EXTENSION,name:t,directives:n,loc:this.loc(e)}},t.parseObjectTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("type");var t=this.parseName(),n=this.parseImplementsInterfaces(),r=this.parseDirectives(!0),i=this.parseFieldsDefinition();if(0===n.length&&0===r.length&&0===i.length)throw this.unexpected();return{kind:nE.h.OBJECT_TYPE_EXTENSION,name:t,interfaces:n,directives:r,fields:i,loc:this.loc(e)}},t.parseInterfaceTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("interface");var t=this.parseName(),n=this.parseImplementsInterfaces(),r=this.parseDirectives(!0),i=this.parseFieldsDefinition();if(0===n.length&&0===r.length&&0===i.length)throw this.unexpected();return{kind:nE.h.INTERFACE_TYPE_EXTENSION,name:t,interfaces:n,directives:r,fields:i,loc:this.loc(e)}},t.parseUnionTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("union");var t=this.parseName(),n=this.parseDirectives(!0),r=this.parseUnionMemberTypes();if(0===n.length&&0===r.length)throw this.unexpected();return{kind:nE.h.UNION_TYPE_EXTENSION,name:t,directives:n,types:r,loc:this.loc(e)}},t.parseEnumTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("enum");var t=this.parseName(),n=this.parseDirectives(!0),r=this.parseEnumValuesDefinition();if(0===n.length&&0===r.length)throw this.unexpected();return{kind:nE.h.ENUM_TYPE_EXTENSION,name:t,directives:n,values:r,loc:this.loc(e)}},t.parseInputObjectTypeExtension=function(){var e=this._lexer.token;this.expectKeyword("extend"),this.expectKeyword("input");var t=this.parseName(),n=this.parseDirectives(!0),r=this.parseInputFieldsDefinition();if(0===n.length&&0===r.length)throw this.unexpected();return{kind:nE.h.INPUT_OBJECT_TYPE_EXTENSION,name:t,directives:n,fields:r,loc:this.loc(e)}},t.parseDirectiveDefinition=function(){var e=this._lexer.token,t=this.parseDescription();this.expectKeyword("directive"),this.expectToken(nk.AT);var n=this.parseName(),r=this.parseArgumentDefs(),i=this.expectOptionalKeyword("repeatable");this.expectKeyword("on");var a=this.parseDirectiveLocations();return{kind:nE.h.DIRECTIVE_DEFINITION,description:t,name:n,arguments:r,repeatable:i,locations:a,loc:this.loc(e)}},t.parseDirectiveLocations=function(){return this.delimitedMany(nk.PIPE,this.parseDirectiveLocation)},t.parseDirectiveLocation=function(){var e=this._lexer.token,t=this.parseName();if(void 0!==nT[t.value])return t;throw this.unexpected(e)},t.loc=function(e){var t;if((null===(t=this._options)||void 0===t?void 0:t.noLocation)!==!0)return new nS.Ye(e,this._lexer.lastToken,this._lexer.source)},t.peek=function(e){return this._lexer.token.kind===e},t.expectToken=function(e){var t=this._lexer.token;if(t.kind===e)return this._lexer.advance(),t;throw n_(this._lexer.source,t.start,"Expected ".concat(nG(e),", found ").concat(nz(t),"."))},t.expectOptionalToken=function(e){var t=this._lexer.token;if(t.kind===e)return this._lexer.advance(),t},t.expectKeyword=function(e){var t=this._lexer.token;if(t.kind===nk.NAME&&t.value===e)this._lexer.advance();else throw n_(this._lexer.source,t.start,'Expected "'.concat(e,'", found ').concat(nz(t),"."))},t.expectOptionalKeyword=function(e){var t=this._lexer.token;return t.kind===nk.NAME&&t.value===e&&(this._lexer.advance(),!0)},t.unexpected=function(e){var t=null!=e?e:this._lexer.token;return n_(this._lexer.source,t.start,"Unexpected ".concat(nz(t),"."))},t.any=function(e,t,n){this.expectToken(e);for(var r=[];!this.expectOptionalToken(n);)r.push(t.call(this));return r},t.optionalMany=function(e,t,n){if(this.expectOptionalToken(e)){var r=[];do r.push(t.call(this));while(!this.expectOptionalToken(n))return r}return[]},t.many=function(e,t,n){this.expectToken(e);var r=[];do r.push(t.call(this));while(!this.expectOptionalToken(n))return r},t.delimitedMany=function(e,t){this.expectOptionalToken(e);var n=[];do n.push(t.call(this));while(this.expectOptionalToken(e))return n},e}();function nz(e){var t=e.value;return nG(e.kind)+(null!=t?' "'.concat(t,'"'):"")}function nG(e){return nA(e)?'"'.concat(e,'"'):e}var nW=new Map,nK=new Map,nV=!0,nq=!1;function nZ(e){return e.replace(/[\s,]+/g," ").trim()}function nX(e){return nZ(e.source.body.substring(e.start,e.end))}function nJ(e){var t=new Set,n=[];return e.definitions.forEach(function(e){if("FragmentDefinition"===e.kind){var r=e.name.value,i=nX(e.loc),a=nK.get(r);a&&!a.has(i)?nV&&console.warn("Warning: fragment with name "+r+" already exists.\ngraphql-tag enforces all fragment names across your application to be unique; read more about\nthis in the docs: http://dev.apollodata.com/core/fragments.html#unique-names"):a||nK.set(r,a=new Set),a.add(i),t.has(i)||(t.add(i),n.push(e))}else n.push(e)}),(0,t0.pi)((0,t0.pi)({},e),{definitions:n})}function nQ(e){var t=new Set(e.definitions);t.forEach(function(e){e.loc&&delete e.loc,Object.keys(e).forEach(function(n){var r=e[n];r&&"object"==typeof r&&t.add(r)})});var n=e.loc;return n&&(delete n.startToken,delete n.endToken),e}function n1(e){var t=nZ(e);if(!nW.has(t)){var n=nH(e,{experimentalFragmentVariables:nq,allowLegacyFragmentVariables:nq});if(!n||"Document"!==n.kind)throw Error("Not a valid GraphQL document.");nW.set(t,nQ(nJ(n)))}return nW.get(t)}function n0(e){for(var t=[],n=1;n, or pass an ApolloClient instance in via options.'):(0,n7.kG)(!!n,32),n}var rb=n(10542),rm=n(53712),rg=n(21436),rv=Object.prototype.hasOwnProperty;function ry(e,t){return void 0===t&&(t=Object.create(null)),rw(rp(t.client),e).useQuery(t)}function rw(e,t){var n=(0,l.useRef)();n.current&&e===n.current.client&&t===n.current.query||(n.current=new r_(e,t,n.current));var r=n.current,i=(0,l.useState)(0),a=(i[0],i[1]);return r.forceUpdate=function(){a(function(e){return e+1})},r}var r_=function(){function e(e,t,n){this.client=e,this.query=t,this.ssrDisabledResult=(0,rb.J)({loading:!0,data:void 0,error:void 0,networkStatus:rc.I.loading}),this.skipStandbyResult=(0,rb.J)({loading:!1,data:void 0,error:void 0,networkStatus:rc.I.ready}),this.toQueryResultCache=new(re.mr?WeakMap:Map),rh(t,r.Query);var i=n&&n.result,a=i&&i.data;a&&(this.previousData=a)}return e.prototype.forceUpdate=function(){__DEV__&&n7.kG.warn("Calling default no-op implementation of InternalState#forceUpdate")},e.prototype.executeQuery=function(e){var t,n=this;e.query&&Object.assign(this,{query:e.query}),this.watchQueryOptions=this.createWatchQueryOptions(this.queryHookOptions=e);var r=this.observable.reobserveAsConcast(this.getObsQueryOptions());return this.previousData=(null===(t=this.result)||void 0===t?void 0:t.data)||this.previousData,this.result=void 0,this.forceUpdate(),new Promise(function(e){var t;r.subscribe({next:function(e){t=e},error:function(){e(n.toQueryResult(n.observable.getCurrentResult()))},complete:function(){e(n.toQueryResult(t))}})})},e.prototype.useQuery=function(e){var t=this;this.renderPromises=(0,l.useContext)((0,rs.K)()).renderPromises,this.useOptions(e);var n=this.useObservableQuery(),r=rn((0,l.useCallback)(function(){if(t.renderPromises)return function(){};var e=function(){var e=t.result,r=n.getCurrentResult();!(e&&e.loading===r.loading&&e.networkStatus===r.networkStatus&&(0,ra.D)(e.data,r.data))&&t.setResult(r)},r=function(a){var o=n.last;i.unsubscribe();try{n.resetLastResults(),i=n.subscribe(e,r)}finally{n.last=o}if(!rv.call(a,"graphQLErrors"))throw a;var s=t.result;(!s||s&&s.loading||!(0,ra.D)(a,s.error))&&t.setResult({data:s&&s.data,error:a,loading:!1,networkStatus:rc.I.error})},i=n.subscribe(e,r);return function(){return setTimeout(function(){return i.unsubscribe()})}},[n,this.renderPromises,this.client.disableNetworkFetches,]),function(){return t.getCurrentResult()},function(){return t.getCurrentResult()});return this.unsafeHandlePartialRefetch(r),this.toQueryResult(r)},e.prototype.useOptions=function(t){var n,r=this.createWatchQueryOptions(this.queryHookOptions=t),i=this.watchQueryOptions;!(0,ra.D)(r,i)&&(this.watchQueryOptions=r,i&&this.observable&&(this.observable.reobserve(this.getObsQueryOptions()),this.previousData=(null===(n=this.result)||void 0===n?void 0:n.data)||this.previousData,this.result=void 0)),this.onCompleted=t.onCompleted||e.prototype.onCompleted,this.onError=t.onError||e.prototype.onError,(this.renderPromises||this.client.disableNetworkFetches)&&!1===this.queryHookOptions.ssr&&!this.queryHookOptions.skip?this.result=this.ssrDisabledResult:this.queryHookOptions.skip||"standby"===this.watchQueryOptions.fetchPolicy?this.result=this.skipStandbyResult:(this.result===this.ssrDisabledResult||this.result===this.skipStandbyResult)&&(this.result=void 0)},e.prototype.getObsQueryOptions=function(){var e=[],t=this.client.defaultOptions.watchQuery;return t&&e.push(t),this.queryHookOptions.defaultOptions&&e.push(this.queryHookOptions.defaultOptions),e.push((0,rm.o)(this.observable&&this.observable.options,this.watchQueryOptions)),e.reduce(ro.J)},e.prototype.createWatchQueryOptions=function(e){void 0===e&&(e={});var t,n=e.skip,r=Object.assign((e.ssr,e.onCompleted,e.onError,e.defaultOptions,(0,n8._T)(e,["skip","ssr","onCompleted","onError","defaultOptions"])),{query:this.query});if(this.renderPromises&&("network-only"===r.fetchPolicy||"cache-and-network"===r.fetchPolicy)&&(r.fetchPolicy="cache-first"),r.variables||(r.variables={}),n){var i=r.fetchPolicy,a=void 0===i?this.getDefaultFetchPolicy():i,o=r.initialFetchPolicy;Object.assign(r,{initialFetchPolicy:void 0===o?a:o,fetchPolicy:"standby"})}else r.fetchPolicy||(r.fetchPolicy=(null===(t=this.observable)||void 0===t?void 0:t.options.initialFetchPolicy)||this.getDefaultFetchPolicy());return r},e.prototype.getDefaultFetchPolicy=function(){var e,t;return(null===(e=this.queryHookOptions.defaultOptions)||void 0===e?void 0:e.fetchPolicy)||(null===(t=this.client.defaultOptions.watchQuery)||void 0===t?void 0:t.fetchPolicy)||"cache-first"},e.prototype.onCompleted=function(e){},e.prototype.onError=function(e){},e.prototype.useObservableQuery=function(){var e=this.observable=this.renderPromises&&this.renderPromises.getSSRObservable(this.watchQueryOptions)||this.observable||this.client.watchQuery(this.getObsQueryOptions());this.obsQueryFields=(0,l.useMemo)(function(){return{refetch:e.refetch.bind(e),reobserve:e.reobserve.bind(e),fetchMore:e.fetchMore.bind(e),updateQuery:e.updateQuery.bind(e),startPolling:e.startPolling.bind(e),stopPolling:e.stopPolling.bind(e),subscribeToMore:e.subscribeToMore.bind(e)}},[e]);var t=!(!1===this.queryHookOptions.ssr||this.queryHookOptions.skip);return this.renderPromises&&t&&(this.renderPromises.registerSSRObservable(e),e.getCurrentResult().loading&&this.renderPromises.addObservableQueryPromise(e)),e},e.prototype.setResult=function(e){var t=this.result;t&&t.data&&(this.previousData=t.data),this.result=e,this.forceUpdate(),this.handleErrorOrCompleted(e)},e.prototype.handleErrorOrCompleted=function(e){var t=this;if(!e.loading){var n=this.toApolloError(e);Promise.resolve().then(function(){n?t.onError(n):e.data&&t.onCompleted(e.data)}).catch(function(e){__DEV__&&n7.kG.warn(e)})}},e.prototype.toApolloError=function(e){return(0,rg.O)(e.errors)?new ru.cA({graphQLErrors:e.errors}):e.error},e.prototype.getCurrentResult=function(){return this.result||this.handleErrorOrCompleted(this.result=this.observable.getCurrentResult()),this.result},e.prototype.toQueryResult=function(e){var t=this.toQueryResultCache.get(e);if(t)return t;var n=e.data,r=(e.partial,(0,n8._T)(e,["data","partial"]));return this.toQueryResultCache.set(e,t=(0,n8.pi)((0,n8.pi)((0,n8.pi)({data:n},r),this.obsQueryFields),{client:this.client,observable:this.observable,variables:this.observable.variables,called:!this.queryHookOptions.skip,previousData:this.previousData})),!t.error&&(0,rg.O)(e.errors)&&(t.error=new ru.cA({graphQLErrors:e.errors})),t},e.prototype.unsafeHandlePartialRefetch=function(e){e.partial&&this.queryHookOptions.partialRefetch&&!e.loading&&(!e.data||0===Object.keys(e.data).length)&&"cache-only"!==this.observable.options.fetchPolicy&&(Object.assign(e,{loading:!0,networkStatus:rc.I.refetch}),this.observable.refetch())},e}();function rE(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]?arguments[0]:{};return ry(i$,e)},iG=function(){var e=iF(),t=parseInt(e.get("page")||"1",10),n=parseInt(e.get("per")||"50",10),r=iz({variables:{offset:(t-1)*n,limit:n},fetchPolicy:"network-only"}),i=r.data,a=r.loading,o=r.error;return a?l.createElement(ij,null):o?l.createElement(iN,{error:o}):i?l.createElement(iD,{chains:i.chains.results,page:t,pageSize:n,total:i.chains.metadata.total}):null},iW=n(67932),iK=n(8126),iV="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};function iq(e){if(iZ())return Intl.DateTimeFormat.supportedLocalesOf(e)[0]}function iZ(){return("undefined"==typeof Intl?"undefined":iV(Intl))==="object"&&"function"==typeof Intl.DateTimeFormat}var iX="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},iJ=function(){function e(e,t){for(var n=0;n=i.length)break;s=i[o++]}else{if((o=i.next()).done)break;s=o.value}var s,u=s;if((void 0===e?"undefined":iX(e))!=="object")return;e=e[u]}return e}},{key:"put",value:function(){for(var e=arguments.length,t=Array(e),n=0;n=o.length)break;c=o[u++]}else{if((u=o.next()).done)break;c=u.value}var c,l=c;"object"!==iX(a[l])&&(a[l]={}),a=a[l]}return a[i]=r}}]),e}();let i0=i1;var i2=new i0;function i3(e,t){if(!iZ())return function(e){return e.toString()};var n=i5(e),r=JSON.stringify(t),i=i2.get(String(n),r)||i2.put(String(n),r,new Intl.DateTimeFormat(n,t));return function(e){return i.format(e)}}var i4={};function i5(e){var t=e.toString();return i4[t]?i4[t]:i4[t]=iq(e)}var i6="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};function i9(e){return i8(e)?e:new Date(e)}function i8(e){return e instanceof Date||i7(e)}function i7(e){return(void 0===e?"undefined":i6(e))==="object"&&"function"==typeof e.getTime}var ae=n(54087),at=n.n(ae);function an(e,t){if(0===e.length)return 0;for(var n=0,r=e.length-1,i=void 0;n<=r;){var a=t(e[i=Math.floor((r+n)/2)]);if(0===a)return i;if(a<0){if((n=i+1)>r)return n}else if((r=i-1)=t.nextUpdateTime)ao(t,this.instances);else break}},scheduleNextTick:function(){var e=this;this.scheduledTick=at()(function(){e.tick(),e.scheduleNextTick()})},start:function(){this.scheduleNextTick()},stop:function(){at().cancel(this.scheduledTick)}};function aa(e){var t=ar(e.getNextValue(),2),n=t[0],r=t[1];e.setValue(n),e.nextUpdateTime=r}function ao(e,t){aa(e),au(t,e),as(t,e)}function as(e,t){var n=ac(e,t);e.splice(n,0,t)}function au(e,t){var n=e.indexOf(t);e.splice(n,1)}function ac(e,t){var n=t.nextUpdateTime;return an(e,function(e){return e.nextUpdateTime===n?0:e.nextUpdateTime>n?1:-1})}var al=(0,ec.oneOfType)([(0,ec.shape)({minTime:ec.number,formatAs:ec.string.isRequired}),(0,ec.shape)({test:ec.func,formatAs:ec.string.isRequired}),(0,ec.shape)({minTime:ec.number,format:ec.func.isRequired}),(0,ec.shape)({test:ec.func,format:ec.func.isRequired})]),af=(0,ec.oneOfType)([ec.string,(0,ec.shape)({steps:(0,ec.arrayOf)(al).isRequired,labels:(0,ec.oneOfType)([ec.string,(0,ec.arrayOf)(ec.string)]).isRequired,round:ec.string})]),ad=Object.assign||function(e){for(var t=1;t=0)&&Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}function ab(e){var t=e.date,n=e.future,r=e.timeStyle,i=e.round,a=e.minTimeLeft,o=e.tooltip,s=e.component,u=e.container,c=e.wrapperComponent,f=e.wrapperProps,d=e.locale,h=e.locales,p=e.formatVerboseDate,b=e.verboseDateFormat,m=e.updateInterval,g=e.tick,v=ap(e,["date","future","timeStyle","round","minTimeLeft","tooltip","component","container","wrapperComponent","wrapperProps","locale","locales","formatVerboseDate","verboseDateFormat","updateInterval","tick"]),y=(0,l.useMemo)(function(){return d&&(h=[d]),h.concat(iK.Z.getDefaultLocale())},[d,h]),w=(0,l.useMemo)(function(){return new iK.Z(y)},[y]);t=(0,l.useMemo)(function(){return i9(t)},[t]);var _=(0,l.useCallback)(function(){var e=Date.now(),o=void 0;if(n&&e>=t.getTime()&&(e=t.getTime(),o=!0),void 0!==a){var s=t.getTime()-1e3*a;e>s&&(e=s,o=!0)}var u=w.format(t,r,{getTimeToNextUpdate:!0,now:e,future:n,round:i}),c=ah(u,2),l=c[0],f=c[1];return f=o?av:m||f||6e4,[l,e+f]},[t,n,r,m,i,a,w]),E=(0,l.useRef)();E.current=_;var S=(0,l.useMemo)(_,[]),k=ah(S,2),x=k[0],T=k[1],M=(0,l.useState)(x),O=ah(M,2),A=O[0],L=O[1],C=ah((0,l.useState)(),2),I=C[0],D=C[1],N=(0,l.useRef)();(0,l.useEffect)(function(){if(g)return N.current=ai.add({getNextValue:function(){return E.current()},setValue:L,nextUpdateTime:T}),function(){return N.current.stop()}},[g]),(0,l.useEffect)(function(){if(N.current)N.current.forceUpdate();else{var e=_(),t=ah(e,1)[0];L(t)}},[_]),(0,l.useEffect)(function(){D(!0)},[]);var P=(0,l.useMemo)(function(){if("undefined"!=typeof window)return i3(y,b)},[y,b]),R=(0,l.useMemo)(function(){if("undefined"!=typeof window)return p?p(t):P(t)},[t,p,P]),j=l.createElement(s,ad({date:t,verboseDate:I?R:void 0,tooltip:o},v),A),F=c||u;return F?l.createElement(F,ad({},f,{verboseDate:I?R:void 0}),j):j}ab.propTypes={date:el().oneOfType([el().instanceOf(Date),el().number]).isRequired,locale:el().string,locales:el().arrayOf(el().string),future:el().bool,timeStyle:af,round:el().string,minTimeLeft:el().number,component:el().elementType.isRequired,tooltip:el().bool.isRequired,formatVerboseDate:el().func,verboseDateFormat:el().object,updateInterval:el().oneOfType([el().number,el().arrayOf(el().shape({threshold:el().number,interval:el().number.isRequired}))]),tick:el().bool,wrapperComponent:el().func,wrapperProps:el().object},ab.defaultProps={locales:[],component:ay,tooltip:!0,verboseDateFormat:{weekday:"long",day:"numeric",month:"long",year:"numeric",hour:"numeric",minute:"2-digit",second:"2-digit"},tick:!0},ab=l.memo(ab);let am=ab;var ag,av=31536e9;function ay(e){var t=e.date,n=e.verboseDate,r=e.tooltip,i=e.children,a=ap(e,["date","verboseDate","tooltip","children"]),o=(0,l.useMemo)(function(){return t.toISOString()},[t]);return l.createElement("time",ad({},a,{dateTime:o,title:r?n:void 0}),i)}ay.propTypes={date:el().instanceOf(Date).isRequired,verboseDate:el().string,tooltip:el().bool.isRequired,children:el().string.isRequired};var aw=n(30381),a_=n.n(aw),aE=n(31657);function aS(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function ak(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0?new ru.cA({graphQLErrors:i}):void 0;if(u===s.current.mutationId&&!c.ignoreResults){var f={called:!0,loading:!1,data:r,error:l,client:a};s.current.isMounted&&!(0,ra.D)(s.current.result,f)&&o(s.current.result=f)}var d=e.onCompleted||(null===(n=s.current.options)||void 0===n?void 0:n.onCompleted);return null==d||d(t.data,c),t}).catch(function(t){if(u===s.current.mutationId&&s.current.isMounted){var n,r={loading:!1,error:t,data:void 0,called:!0,client:a};(0,ra.D)(s.current.result,r)||o(s.current.result=r)}var i=e.onError||(null===(n=s.current.options)||void 0===n?void 0:n.onError);if(i)return i(t,c),{data:void 0,errors:t};throw t})},[]),c=(0,l.useCallback)(function(){s.current.isMounted&&o({called:!1,loading:!1,client:n})},[]);return(0,l.useEffect)(function(){return s.current.isMounted=!0,function(){s.current.isMounted=!1}},[]),[u,(0,n8.pi)({reset:c},a)]}var ou=n(59067),oc=n(28428),ol=n(11186),of=n(78513);function od(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}var oh=function(e){return(0,b.createStyles)({paper:{display:"flex",margin:"".concat(2.5*e.spacing.unit,"px 0"),padding:"".concat(3*e.spacing.unit,"px ").concat(3.5*e.spacing.unit,"px")},content:{flex:1,width:"100%"},actions:od({marginTop:-(1.5*e.spacing.unit),marginLeft:-(4*e.spacing.unit)},e.breakpoints.up("sm"),{marginLeft:0,marginRight:-(1.5*e.spacing.unit)}),itemBlock:{border:"1px solid rgba(224, 224, 224, 1)",borderRadius:e.shape.borderRadius,padding:2*e.spacing.unit,marginTop:e.spacing.unit},itemBlockText:{overflowWrap:"anywhere"}})},op=(0,b.withStyles)(oh)(function(e){var t=e.actions,n=e.children,r=e.classes;return l.createElement(ia.default,{className:r.paper},l.createElement("div",{className:r.content},n),t&&l.createElement("div",{className:r.actions},t))}),ob=function(e){var t=e.title;return l.createElement(x.default,{variant:"subtitle2",gutterBottom:!0},t)},om=function(e){var t=e.children,n=e.value;return l.createElement(x.default,{variant:"body1",noWrap:!0},t||n)},og=(0,b.withStyles)(oh)(function(e){var t=e.children,n=e.classes,r=e.value;return l.createElement("div",{className:n.itemBlock},l.createElement(x.default,{variant:"body1",className:n.itemBlockText},t||r))});function ov(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]-1}let sZ=sq;function sX(e,t){var n=this.__data__,r=s$(n,e);return r<0?(++this.size,n.push([e,t])):n[r][1]=t,this}let sJ=sX;function sQ(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t-1&&e%1==0&&e-1&&e%1==0&&e<=cI}let cN=cD;var cP="[object Arguments]",cR="[object Array]",cj="[object Boolean]",cF="[object Date]",cY="[object Error]",cB="[object Function]",cU="[object Map]",cH="[object Number]",c$="[object Object]",cz="[object RegExp]",cG="[object Set]",cW="[object String]",cK="[object WeakMap]",cV="[object ArrayBuffer]",cq="[object DataView]",cZ="[object Float64Array]",cX="[object Int8Array]",cJ="[object Int16Array]",cQ="[object Int32Array]",c1="[object Uint8Array]",c0="[object Uint8ClampedArray]",c2="[object Uint16Array]",c3="[object Uint32Array]",c4={};function c5(e){return eD(e)&&cN(e.length)&&!!c4[eC(e)]}c4["[object Float32Array]"]=c4[cZ]=c4[cX]=c4[cJ]=c4[cQ]=c4[c1]=c4[c0]=c4[c2]=c4[c3]=!0,c4[cP]=c4[cR]=c4[cV]=c4[cj]=c4[cq]=c4[cF]=c4[cY]=c4[cB]=c4[cU]=c4[cH]=c4[c$]=c4[cz]=c4[cG]=c4[cW]=c4[cK]=!1;let c6=c5;function c9(e){return function(t){return e(t)}}let c8=c9;var c7=n(79730),le=c7.Z&&c7.Z.isTypedArray,lt=le?c8(le):c6;let ln=lt;var lr=Object.prototype.hasOwnProperty;function li(e,t){var n=cT(e),r=!n&&ck(e),i=!n&&!r&&(0,cM.Z)(e),a=!n&&!r&&!i&&ln(e),o=n||r||i||a,s=o?cm(e.length,String):[],u=s.length;for(var c in e)(t||lr.call(e,c))&&!(o&&("length"==c||i&&("offset"==c||"parent"==c)||a&&("buffer"==c||"byteLength"==c||"byteOffset"==c)||cC(c,u)))&&s.push(c);return s}let la=li;var lo=Object.prototype;function ls(e){var t=e&&e.constructor;return e===("function"==typeof t&&t.prototype||lo)}let lu=ls;var lc=sM(Object.keys,Object);let ll=lc;var lf=Object.prototype.hasOwnProperty;function ld(e){if(!lu(e))return ll(e);var t=[];for(var n in Object(e))lf.call(e,n)&&"constructor"!=n&&t.push(n);return t}let lh=ld;function lp(e){return null!=e&&cN(e.length)&&!ui(e)}let lb=lp;function lm(e){return lb(e)?la(e):lh(e)}let lg=lm;function lv(e,t){return e&&cp(t,lg(t),e)}let ly=lv;function lw(e){var t=[];if(null!=e)for(var n in Object(e))t.push(n);return t}let l_=lw;var lE=Object.prototype.hasOwnProperty;function lS(e){if(!ed(e))return l_(e);var t=lu(e),n=[];for(var r in e)"constructor"==r&&(t||!lE.call(e,r))||n.push(r);return n}let lk=lS;function lx(e){return lb(e)?la(e,!0):lk(e)}let lT=lx;function lM(e,t){return e&&cp(t,lT(t),e)}let lO=lM;var lA=n(42896);function lL(e,t){var n=-1,r=e.length;for(t||(t=Array(r));++n=0||(i[n]=e[n]);return i}function hc(e){if(void 0===e)throw ReferenceError("this hasn't been initialised - super() hasn't been called");return e}var hl=function(e){return Array.isArray(e)&&0===e.length},hf=function(e){return"function"==typeof e},hd=function(e){return null!==e&&"object"==typeof e},hh=function(e){return String(Math.floor(Number(e)))===e},hp=function(e){return"[object String]"===Object.prototype.toString.call(e)},hb=function(e){return 0===l.Children.count(e)},hm=function(e){return hd(e)&&hf(e.then)};function hg(e,t,n,r){void 0===r&&(r=0);for(var i=d8(t);e&&r=0?[]:{}}}return(0===a?e:i)[o[a]]===n?e:(void 0===n?delete i[o[a]]:i[o[a]]=n,0===a&&void 0===n&&delete r[o[a]],r)}function hy(e,t,n,r){void 0===n&&(n=new WeakMap),void 0===r&&(r={});for(var i=0,a=Object.keys(e);i0?t.map(function(t){return x(t,hg(e,t))}):[Promise.resolve("DO_NOT_DELETE_YOU_WILL_BE_FIRED")]).then(function(e){return e.reduce(function(e,n,r){return"DO_NOT_DELETE_YOU_WILL_BE_FIRED"===n||n&&(e=hv(e,t[r],n)),e},{})})},[x]),M=(0,l.useCallback)(function(e){return Promise.all([T(e),h.validationSchema?k(e):{},h.validate?S(e):{}]).then(function(e){var t=e[0],n=e[1],r=e[2];return sx.all([t,n,r],{arrayMerge:hC})})},[h.validate,h.validationSchema,T,S,k]),O=hP(function(e){return void 0===e&&(e=_.values),E({type:"SET_ISVALIDATING",payload:!0}),M(e).then(function(e){return v.current&&(E({type:"SET_ISVALIDATING",payload:!1}),sh()(_.errors,e)||E({type:"SET_ERRORS",payload:e})),e})});(0,l.useEffect)(function(){o&&!0===v.current&&sh()(p.current,h.initialValues)&&O(p.current)},[o,O]);var A=(0,l.useCallback)(function(e){var t=e&&e.values?e.values:p.current,n=e&&e.errors?e.errors:b.current?b.current:h.initialErrors||{},r=e&&e.touched?e.touched:m.current?m.current:h.initialTouched||{},i=e&&e.status?e.status:g.current?g.current:h.initialStatus;p.current=t,b.current=n,m.current=r,g.current=i;var a=function(){E({type:"RESET_FORM",payload:{isSubmitting:!!e&&!!e.isSubmitting,errors:n,touched:r,status:i,values:t,isValidating:!!e&&!!e.isValidating,submitCount:e&&e.submitCount&&"number"==typeof e.submitCount?e.submitCount:0}})};if(h.onReset){var o=h.onReset(_.values,V);hm(o)?o.then(a):a()}else a()},[h.initialErrors,h.initialStatus,h.initialTouched]);(0,l.useEffect)(function(){!0===v.current&&!sh()(p.current,h.initialValues)&&(c&&(p.current=h.initialValues,A()),o&&O(p.current))},[c,h.initialValues,A,o,O]),(0,l.useEffect)(function(){c&&!0===v.current&&!sh()(b.current,h.initialErrors)&&(b.current=h.initialErrors||hk,E({type:"SET_ERRORS",payload:h.initialErrors||hk}))},[c,h.initialErrors]),(0,l.useEffect)(function(){c&&!0===v.current&&!sh()(m.current,h.initialTouched)&&(m.current=h.initialTouched||hx,E({type:"SET_TOUCHED",payload:h.initialTouched||hx}))},[c,h.initialTouched]),(0,l.useEffect)(function(){c&&!0===v.current&&!sh()(g.current,h.initialStatus)&&(g.current=h.initialStatus,E({type:"SET_STATUS",payload:h.initialStatus}))},[c,h.initialStatus,h.initialTouched]);var L=hP(function(e){if(y.current[e]&&hf(y.current[e].validate)){var t=hg(_.values,e),n=y.current[e].validate(t);return hm(n)?(E({type:"SET_ISVALIDATING",payload:!0}),n.then(function(e){return e}).then(function(t){E({type:"SET_FIELD_ERROR",payload:{field:e,value:t}}),E({type:"SET_ISVALIDATING",payload:!1})})):(E({type:"SET_FIELD_ERROR",payload:{field:e,value:n}}),Promise.resolve(n))}return h.validationSchema?(E({type:"SET_ISVALIDATING",payload:!0}),k(_.values,e).then(function(e){return e}).then(function(t){E({type:"SET_FIELD_ERROR",payload:{field:e,value:t[e]}}),E({type:"SET_ISVALIDATING",payload:!1})})):Promise.resolve()}),C=(0,l.useCallback)(function(e,t){var n=t.validate;y.current[e]={validate:n}},[]),I=(0,l.useCallback)(function(e){delete y.current[e]},[]),D=hP(function(e,t){return E({type:"SET_TOUCHED",payload:e}),(void 0===t?i:t)?O(_.values):Promise.resolve()}),N=(0,l.useCallback)(function(e){E({type:"SET_ERRORS",payload:e})},[]),P=hP(function(e,t){var r=hf(e)?e(_.values):e;return E({type:"SET_VALUES",payload:r}),(void 0===t?n:t)?O(r):Promise.resolve()}),R=(0,l.useCallback)(function(e,t){E({type:"SET_FIELD_ERROR",payload:{field:e,value:t}})},[]),j=hP(function(e,t,r){return E({type:"SET_FIELD_VALUE",payload:{field:e,value:t}}),(void 0===r?n:r)?O(hv(_.values,e,t)):Promise.resolve()}),F=(0,l.useCallback)(function(e,t){var n,r=t,i=e;if(!hp(e)){e.persist&&e.persist();var a=e.target?e.target:e.currentTarget,o=a.type,s=a.name,u=a.id,c=a.value,l=a.checked,f=(a.outerHTML,a.options),d=a.multiple;r=t||s||u,i=/number|range/.test(o)?(n=parseFloat(c),isNaN(n)?"":n):/checkbox/.test(o)?hD(hg(_.values,r),l,c):d?hI(f):c}r&&j(r,i)},[j,_.values]),Y=hP(function(e){if(hp(e))return function(t){return F(t,e)};F(e)}),B=hP(function(e,t,n){return void 0===t&&(t=!0),E({type:"SET_FIELD_TOUCHED",payload:{field:e,value:t}}),(void 0===n?i:n)?O(_.values):Promise.resolve()}),U=(0,l.useCallback)(function(e,t){e.persist&&e.persist();var n,r=e.target,i=r.name,a=r.id;r.outerHTML,B(t||i||a,!0)},[B]),H=hP(function(e){if(hp(e))return function(t){return U(t,e)};U(e)}),$=(0,l.useCallback)(function(e){hf(e)?E({type:"SET_FORMIK_STATE",payload:e}):E({type:"SET_FORMIK_STATE",payload:function(){return e}})},[]),z=(0,l.useCallback)(function(e){E({type:"SET_STATUS",payload:e})},[]),G=(0,l.useCallback)(function(e){E({type:"SET_ISSUBMITTING",payload:e})},[]),W=hP(function(){return E({type:"SUBMIT_ATTEMPT"}),O().then(function(e){var t,n=e instanceof Error;if(!n&&0===Object.keys(e).length){try{if(void 0===(t=q()))return}catch(r){throw r}return Promise.resolve(t).then(function(e){return v.current&&E({type:"SUBMIT_SUCCESS"}),e}).catch(function(e){if(v.current)throw E({type:"SUBMIT_FAILURE"}),e})}if(v.current&&(E({type:"SUBMIT_FAILURE"}),n))throw e})}),K=hP(function(e){e&&e.preventDefault&&hf(e.preventDefault)&&e.preventDefault(),e&&e.stopPropagation&&hf(e.stopPropagation)&&e.stopPropagation(),W().catch(function(e){console.warn("Warning: An unhandled error was caught from submitForm()",e)})}),V={resetForm:A,validateForm:O,validateField:L,setErrors:N,setFieldError:R,setFieldTouched:B,setFieldValue:j,setStatus:z,setSubmitting:G,setTouched:D,setValues:P,setFormikState:$,submitForm:W},q=hP(function(){return f(_.values,V)}),Z=hP(function(e){e&&e.preventDefault&&hf(e.preventDefault)&&e.preventDefault(),e&&e.stopPropagation&&hf(e.stopPropagation)&&e.stopPropagation(),A()}),X=(0,l.useCallback)(function(e){return{value:hg(_.values,e),error:hg(_.errors,e),touched:!!hg(_.touched,e),initialValue:hg(p.current,e),initialTouched:!!hg(m.current,e),initialError:hg(b.current,e)}},[_.errors,_.touched,_.values]),J=(0,l.useCallback)(function(e){return{setValue:function(t,n){return j(e,t,n)},setTouched:function(t,n){return B(e,t,n)},setError:function(t){return R(e,t)}}},[j,B,R]),Q=(0,l.useCallback)(function(e){var t=hd(e),n=t?e.name:e,r=hg(_.values,n),i={name:n,value:r,onChange:Y,onBlur:H};if(t){var a=e.type,o=e.value,s=e.as,u=e.multiple;"checkbox"===a?void 0===o?i.checked=!!r:(i.checked=!!(Array.isArray(r)&&~r.indexOf(o)),i.value=o):"radio"===a?(i.checked=r===o,i.value=o):"select"===s&&u&&(i.value=i.value||[],i.multiple=!0)}return i},[H,Y,_.values]),ee=(0,l.useMemo)(function(){return!sh()(p.current,_.values)},[p.current,_.values]),et=(0,l.useMemo)(function(){return void 0!==s?ee?_.errors&&0===Object.keys(_.errors).length:!1!==s&&hf(s)?s(h):s:_.errors&&0===Object.keys(_.errors).length},[s,ee,_.errors,h]);return ho({},_,{initialValues:p.current,initialErrors:b.current,initialTouched:m.current,initialStatus:g.current,handleBlur:H,handleChange:Y,handleReset:Z,handleSubmit:K,resetForm:A,setErrors:N,setFormikState:$,setFieldTouched:B,setFieldValue:j,setFieldError:R,setStatus:z,setSubmitting:G,setTouched:D,setValues:P,submitForm:W,validateForm:O,validateField:L,isValid:et,dirty:ee,unregisterField:I,registerField:C,getFieldProps:Q,getFieldMeta:X,getFieldHelpers:J,validateOnBlur:i,validateOnChange:n,validateOnMount:o})}function hM(e){var t=hT(e),n=e.component,r=e.children,i=e.render,a=e.innerRef;return(0,l.useImperativeHandle)(a,function(){return t}),(0,l.createElement)(h_,{value:t},n?(0,l.createElement)(n,t):i?i(t):r?hf(r)?r(t):hb(r)?null:l.Children.only(r):null)}function hO(e){var t={};if(e.inner){if(0===e.inner.length)return hv(t,e.path,e.message);for(var n=e.inner,r=Array.isArray(n),i=0,n=r?n:n[Symbol.iterator]();;){if(r){if(i>=n.length)break;a=n[i++]}else{if((i=n.next()).done)break;a=i.value}var a,o=a;hg(t,o.path)||(t=hv(t,o.path,o.message))}}return t}function hA(e,t,n,r){void 0===n&&(n=!1),void 0===r&&(r={});var i=hL(e);return t[n?"validateSync":"validate"](i,{abortEarly:!1,context:r})}function hL(e){var t=Array.isArray(e)?[]:{};for(var n in e)if(Object.prototype.hasOwnProperty.call(e,n)){var r=String(n);!0===Array.isArray(e[r])?t[r]=e[r].map(function(e){return!0===Array.isArray(e)||sj(e)?hL(e):""!==e?e:void 0}):sj(e[r])?t[r]=hL(e[r]):t[r]=""!==e[r]?e[r]:void 0}return t}function hC(e,t,n){var r=e.slice();return t.forEach(function(t,i){if(void 0===r[i]){var a=!1!==n.clone&&n.isMergeableObject(t);r[i]=a?sx(Array.isArray(t)?[]:{},t,n):t}else n.isMergeableObject(t)?r[i]=sx(e[i],t,n):-1===e.indexOf(t)&&r.push(t)}),r}function hI(e){return Array.from(e).filter(function(e){return e.selected}).map(function(e){return e.value})}function hD(e,t,n){if("boolean"==typeof e)return Boolean(t);var r=[],i=!1,a=-1;if(Array.isArray(e))r=e,i=(a=e.indexOf(n))>=0;else if(!n||"true"==n||"false"==n)return Boolean(t);return t&&n&&!i?r.concat(n):i?r.slice(0,a).concat(r.slice(a+1)):r}var hN="undefined"!=typeof window&&void 0!==window.document&&void 0!==window.document.createElement?l.useLayoutEffect:l.useEffect;function hP(e){var t=(0,l.useRef)(e);return hN(function(){t.current=e}),(0,l.useCallback)(function(){for(var e=arguments.length,n=Array(e),r=0;re?t:e},0);return Array.from(ho({},e,{length:t+1}))};(function(e){function t(t){var n;return(n=e.call(this,t)||this).updateArrayField=function(e,t,r){var i=n.props,a=i.name;(0,i.formik.setFormikState)(function(n){var i="function"==typeof r?r:e,o="function"==typeof t?t:e,s=hv(n.values,a,e(hg(n.values,a))),u=r?i(hg(n.errors,a)):void 0,c=t?o(hg(n.touched,a)):void 0;return hl(u)&&(u=void 0),hl(c)&&(c=void 0),ho({},n,{values:s,errors:r?hv(n.errors,a,u):n.errors,touched:t?hv(n.touched,a,c):n.touched})})},n.push=function(e){return n.updateArrayField(function(t){return[].concat(hH(t),[ha(e)])},!1,!1)},n.handlePush=function(e){return function(){return n.push(e)}},n.swap=function(e,t){return n.updateArrayField(function(n){return hY(n,e,t)},!0,!0)},n.handleSwap=function(e,t){return function(){return n.swap(e,t)}},n.move=function(e,t){return n.updateArrayField(function(n){return hF(n,e,t)},!0,!0)},n.handleMove=function(e,t){return function(){return n.move(e,t)}},n.insert=function(e,t){return n.updateArrayField(function(n){return hB(n,e,t)},function(t){return hB(t,e,null)},function(t){return hB(t,e,null)})},n.handleInsert=function(e,t){return function(){return n.insert(e,t)}},n.replace=function(e,t){return n.updateArrayField(function(n){return hU(n,e,t)},!1,!1)},n.handleReplace=function(e,t){return function(){return n.replace(e,t)}},n.unshift=function(e){var t=-1;return n.updateArrayField(function(n){var r=n?[e].concat(n):[e];return t<0&&(t=r.length),r},function(e){var n=e?[null].concat(e):[null];return t<0&&(t=n.length),n},function(e){var n=e?[null].concat(e):[null];return t<0&&(t=n.length),n}),t},n.handleUnshift=function(e){return function(){return n.unshift(e)}},n.handleRemove=function(e){return function(){return n.remove(e)}},n.handlePop=function(){return function(){return n.pop()}},n.remove=n.remove.bind(hc(n)),n.pop=n.pop.bind(hc(n)),n}hs(t,e);var n=t.prototype;return n.componentDidUpdate=function(e){this.props.validateOnChange&&this.props.formik.validateOnChange&&!sh()(hg(e.formik.values,e.name),hg(this.props.formik.values,this.props.name))&&this.props.formik.validateForm(this.props.formik.values)},n.remove=function(e){var t;return this.updateArrayField(function(n){var r=n?hH(n):[];return t||(t=r[e]),hf(r.splice)&&r.splice(e,1),r},!0,!0),t},n.pop=function(){var e;return this.updateArrayField(function(t){var n=t;return e||(e=n&&n.pop&&n.pop()),n},!0,!0),e},n.render=function(){var e={push:this.push,pop:this.pop,swap:this.swap,move:this.move,insert:this.insert,replace:this.replace,unshift:this.unshift,remove:this.remove,handlePush:this.handlePush,handlePop:this.handlePop,handleSwap:this.handleSwap,handleMove:this.handleMove,handleInsert:this.handleInsert,handleReplace:this.handleReplace,handleUnshift:this.handleUnshift,handleRemove:this.handleRemove},t=this.props,n=t.component,r=t.render,i=t.children,a=t.name,o=hu(t.formik,["validate","validationSchema"]),s=ho({},e,{form:o,name:a});return n?(0,l.createElement)(n,s):r?r(s):i?"function"==typeof i?i(s):hb(i)?null:l.Children.only(i):null},t})(l.Component).defaultProps={validateOnChange:!0},l.Component,l.Component;var h$=n(24802),hz=n(71209),hG=n(91750),hW=n(11970),hK=n(4689),hV=n(67598),hq=function(){return(hq=Object.assign||function(e){for(var t,n=1,r=arguments.length;nt.indexOf(r)&&(n[r]=e[r]);if(null!=e&&"function"==typeof Object.getOwnPropertySymbols)for(var i=0,r=Object.getOwnPropertySymbols(e);it.indexOf(r[i])&&(n[r[i]]=e[r[i]]);return n}function hX(e){var t=e.disabled,n=e.field,r=n.onBlur,i=hZ(n,["onBlur"]),a=e.form,o=a.isSubmitting,s=a.touched,u=a.errors,c=e.onBlur,l=e.helperText,f=hZ(e,["disabled","field","form","onBlur","helperText"]),d=hg(u,i.name),h=hg(s,i.name)&&!!d;return hq(hq({variant:f.variant,error:h,helperText:h?d:l,disabled:null!=t?t:o,onBlur:null!=c?c:function(e){r(null!=e?e:i.name)}},i),f)}function hJ(e){var t=e.children,n=hZ(e,["children"]);return(0,l.createElement)(i_.Z,hq({},hX(n)),t)}function hQ(e){var t=e.disabled,n=e.field,r=n.onBlur,i=hZ(n,["onBlur"]),a=e.form.isSubmitting,o=(e.type,e.onBlur),s=hZ(e,["disabled","field","form","type","onBlur"]);return hq(hq({disabled:null!=t?t:a,onBlur:null!=o?o:function(e){r(null!=e?e:i.name)}},i),s)}function h1(e){return(0,l.createElement)(h$.Z,hq({},hQ(e)))}function h0(e){var t,n=e.disabled,r=e.field,i=r.onBlur,a=hZ(r,["onBlur"]),o=e.form.isSubmitting,s=(e.type,e.onBlur),u=hZ(e,["disabled","field","form","type","onBlur"]);return hq(hq({disabled:null!=n?n:o,indeterminate:!Array.isArray(a.value)&&null==a.value,onBlur:null!=s?s:function(e){i(null!=e?e:a.name)}},a),u)}function h2(e){return(0,l.createElement)(hz.Z,hq({},h0(e)))}function h3(e){var t=e.Label,n=hZ(e,["Label"]);return(0,l.createElement)(hG.Z,hq({control:(0,l.createElement)(hz.Z,hq({},h0(n)))},t))}function h4(e){var t=e.disabled,n=e.field,r=n.onBlur,i=hZ(n,["onBlur"]),a=e.form.isSubmitting,o=e.onBlur,s=hZ(e,["disabled","field","form","onBlur"]);return hq(hq({disabled:null!=t?t:a,onBlur:null!=o?o:function(e){r(null!=e?e:i.name)}},i),s)}function h5(e){return(0,l.createElement)(hW.default,hq({},h4(e)))}function h6(e){var t=e.field,n=t.onBlur,r=hZ(t,["onBlur"]),i=(e.form,e.onBlur),a=hZ(e,["field","form","onBlur"]);return hq(hq({onBlur:null!=i?i:function(e){n(null!=e?e:r.name)}},r),a)}function h9(e){return(0,l.createElement)(hK.Z,hq({},h6(e)))}function h8(e){var t=e.disabled,n=e.field,r=n.onBlur,i=hZ(n,["onBlur"]),a=e.form.isSubmitting,o=e.onBlur,s=hZ(e,["disabled","field","form","onBlur"]);return hq(hq({disabled:null!=t?t:a,onBlur:null!=o?o:function(e){r(null!=e?e:i.name)}},i),s)}function h7(e){return(0,l.createElement)(hV.default,hq({},h8(e)))}hJ.displayName="FormikMaterialUITextField",h1.displayName="FormikMaterialUISwitch",h2.displayName="FormikMaterialUICheckbox",h3.displayName="FormikMaterialUICheckboxWithLabel",h5.displayName="FormikMaterialUISelect",h9.displayName="FormikMaterialUIRadioGroup",h7.displayName="FormikMaterialUIInputBase";try{a=Map}catch(pe){}try{o=Set}catch(pt){}function pn(e,t,n){if(!e||"object"!=typeof e||"function"==typeof e)return e;if(e.nodeType&&"cloneNode"in e)return e.cloneNode(!0);if(e instanceof Date)return new Date(e.getTime());if(e instanceof RegExp)return RegExp(e);if(Array.isArray(e))return e.map(pr);if(a&&e instanceof a)return new Map(Array.from(e.entries()));if(o&&e instanceof o)return new Set(Array.from(e.values()));if(e instanceof Object){t.push(e);var r=Object.create(e);for(var i in n.push(r),e){var s=t.findIndex(function(t){return t===e[i]});r[i]=s>-1?n[s]:pn(e[i],t,n)}return r}return e}function pr(e){return pn(e,[],[])}let pi=Object.prototype.toString,pa=Error.prototype.toString,po=RegExp.prototype.toString,ps="undefined"!=typeof Symbol?Symbol.prototype.toString:()=>"",pu=/^Symbol\((.*)\)(.*)$/;function pc(e){if(e!=+e)return"NaN";let t=0===e&&1/e<0;return t?"-0":""+e}function pl(e,t=!1){if(null==e||!0===e||!1===e)return""+e;let n=typeof e;if("number"===n)return pc(e);if("string"===n)return t?`"${e}"`:e;if("function"===n)return"[Function "+(e.name||"anonymous")+"]";if("symbol"===n)return ps.call(e).replace(pu,"Symbol($1)");let r=pi.call(e).slice(8,-1);return"Date"===r?isNaN(e.getTime())?""+e:e.toISOString(e):"Error"===r||e instanceof Error?"["+pa.call(e)+"]":"RegExp"===r?po.call(e):null}function pf(e,t){let n=pl(e,t);return null!==n?n:JSON.stringify(e,function(e,n){let r=pl(this[e],t);return null!==r?r:n},2)}let pd={default:"${path} is invalid",required:"${path} is a required field",oneOf:"${path} must be one of the following values: ${values}",notOneOf:"${path} must not be one of the following values: ${values}",notType({path:e,type:t,value:n,originalValue:r}){let i=null!=r&&r!==n,a=`${e} must be a \`${t}\` type, but the final value was: \`${pf(n,!0)}\``+(i?` (cast from the value \`${pf(r,!0)}\`).`:".");return null===n&&(a+='\n If "null" is intended as an empty value be sure to mark the schema as `.nullable()`'),a},defined:"${path} must be defined"},ph={length:"${path} must be exactly ${length} characters",min:"${path} must be at least ${min} characters",max:"${path} must be at most ${max} characters",matches:'${path} must match the following: "${regex}"',email:"${path} must be a valid email",url:"${path} must be a valid URL",uuid:"${path} must be a valid UUID",trim:"${path} must be a trimmed string",lowercase:"${path} must be a lowercase string",uppercase:"${path} must be a upper case string"},pp={min:"${path} must be greater than or equal to ${min}",max:"${path} must be less than or equal to ${max}",lessThan:"${path} must be less than ${less}",moreThan:"${path} must be greater than ${more}",positive:"${path} must be a positive number",negative:"${path} must be a negative number",integer:"${path} must be an integer"},pb={min:"${path} field must be later than ${min}",max:"${path} field must be at earlier than ${max}"},pm={isValue:"${path} field must be ${value}"},pg={noUnknown:"${path} field has unspecified keys: ${unknown}"},pv={min:"${path} field must have at least ${min} items",max:"${path} field must have less than or equal to ${max} items",length:"${path} must be have ${length} items"};Object.assign(Object.create(null),{mixed:pd,string:ph,number:pp,date:pb,object:pg,array:pv,boolean:pm});var py=n(18721),pw=n.n(py);let p_=e=>e&&e.__isYupSchema__;class pE{constructor(e,t){if(this.refs=e,this.refs=e,"function"==typeof t){this.fn=t;return}if(!pw()(t,"is"))throw TypeError("`is:` is required for `when()` conditions");if(!t.then&&!t.otherwise)throw TypeError("either `then:` or `otherwise:` is required for `when()` conditions");let{is:n,then:r,otherwise:i}=t,a="function"==typeof n?n:(...e)=>e.every(e=>e===n);this.fn=function(...e){let t=e.pop(),n=e.pop(),o=a(...e)?r:i;if(o)return"function"==typeof o?o(n):n.concat(o.resolve(t))}}resolve(e,t){let n=this.refs.map(e=>e.getValue(null==t?void 0:t.value,null==t?void 0:t.parent,null==t?void 0:t.context)),r=this.fn.apply(e,n.concat(e,t));if(void 0===r||r===e)return e;if(!p_(r))throw TypeError("conditions must return a schema object");return r.resolve(t)}}let pS=pE;function pk(e){return null==e?[]:[].concat(e)}function px(){return(px=Object.assign||function(e){for(var t=1;tpf(t[n])):"function"==typeof e?e(t):e}static isError(e){return e&&"ValidationError"===e.name}constructor(e,t,n,r){super(),this.name="ValidationError",this.value=t,this.path=n,this.type=r,this.errors=[],this.inner=[],pk(e).forEach(e=>{pM.isError(e)?(this.errors.push(...e.errors),this.inner=this.inner.concat(e.inner.length?e.inner:e)):this.errors.push(e)}),this.message=this.errors.length>1?`${this.errors.length} errors occurred`:this.errors[0],Error.captureStackTrace&&Error.captureStackTrace(this,pM)}}let pO=e=>{let t=!1;return(...n)=>{t||(t=!0,e(...n))}};function pA(e,t){let{endEarly:n,tests:r,args:i,value:a,errors:o,sort:s,path:u}=e,c=pO(t),l=r.length,f=[];if(o=o||[],!l)return o.length?c(new pM(o,a,u)):c(null,a);for(let d=0;d=0||(i[n]=e[n]);return i}function pj(e){function t(t,n){let{value:r,path:i="",label:a,options:o,originalValue:s,sync:u}=t,c=pR(t,["value","path","label","options","originalValue","sync"]),{name:l,test:f,params:d,message:h}=e,{parent:p,context:b}=o;function m(e){return pN.isRef(e)?e.getValue(r,p,b):e}function g(e={}){let t=pC()(pP({value:r,originalValue:s,label:a,path:e.path||i},d,e.params),m),n=new pM(pM.formatError(e.message||h,t),r,t.path,e.type||l);return n.params=t,n}let v=pP({path:i,parent:p,type:l,createError:g,resolve:m,options:o,originalValue:s},c);if(!u){try{Promise.resolve(f.call(v,r,v)).then(e=>{pM.isError(e)?n(e):e?n(null,e):n(g())})}catch(y){n(y)}return}let w;try{var _;if(w=f.call(v,r,v),"function"==typeof(null==(_=w)?void 0:_.then))throw Error(`Validation test of type: "${v.type}" returned a Promise during a synchronous validate. This test will finish after the validate call has returned`)}catch(E){n(E);return}pM.isError(w)?n(w):w?n(null,w):n(g())}return t.OPTIONS=e,t}pN.prototype.__isYupRef=!0;let pF=e=>e.substr(0,e.length-1).substr(1);function pY(e,t,n,r=n){let i,a,o;return t?((0,pI.forEach)(t,(s,u,c)=>{let l=u?pF(s):s;if((e=e.resolve({context:r,parent:i,value:n})).innerType){let f=c?parseInt(l,10):0;if(n&&f>=n.length)throw Error(`Yup.reach cannot resolve an array item at index: ${s}, in the path: ${t}. because there is no value at that index. `);i=n,n=n&&n[f],e=e.innerType}if(!c){if(!e.fields||!e.fields[l])throw Error(`The schema does not contain the path: ${t}. (failed at: ${o} which is a type: "${e._type}")`);i=n,n=n&&n[l],e=e.fields[l]}a=l,o=u?"["+s+"]":"."+s}),{schema:e,parent:i,parentPath:a}):{parent:i,parentPath:t,schema:e}}class pB{constructor(){this.list=new Set,this.refs=new Map}get size(){return this.list.size+this.refs.size}describe(){let e=[];for(let t of this.list)e.push(t);for(let[,n]of this.refs)e.push(n.describe());return e}toArray(){return Array.from(this.list).concat(Array.from(this.refs.values()))}add(e){pN.isRef(e)?this.refs.set(e.key,e):this.list.add(e)}delete(e){pN.isRef(e)?this.refs.delete(e.key):this.list.delete(e)}has(e,t){if(this.list.has(e))return!0;let n,r=this.refs.values();for(;!(n=r.next()).done;)if(t(n.value)===e)return!0;return!1}clone(){let e=new pB;return e.list=new Set(this.list),e.refs=new Map(this.refs),e}merge(e,t){let n=this.clone();return e.list.forEach(e=>n.add(e)),e.refs.forEach(e=>n.add(e)),t.list.forEach(e=>n.delete(e)),t.refs.forEach(e=>n.delete(e)),n}}function pU(){return(pU=Object.assign||function(e){for(var t=1;t{this.typeError(pd.notType)}),this.type=(null==e?void 0:e.type)||"mixed",this.spec=pU({strip:!1,strict:!1,abortEarly:!0,recursive:!0,nullable:!1,presence:"optional"},null==e?void 0:e.spec)}get _type(){return this.type}_typeCheck(e){return!0}clone(e){if(this._mutate)return e&&Object.assign(this.spec,e),this;let t=Object.create(Object.getPrototypeOf(this));return t.type=this.type,t._typeError=this._typeError,t._whitelistError=this._whitelistError,t._blacklistError=this._blacklistError,t._whitelist=this._whitelist.clone(),t._blacklist=this._blacklist.clone(),t.exclusiveTests=pU({},this.exclusiveTests),t.deps=[...this.deps],t.conditions=[...this.conditions],t.tests=[...this.tests],t.transforms=[...this.transforms],t.spec=pr(pU({},this.spec,e)),t}label(e){var t=this.clone();return t.spec.label=e,t}meta(...e){if(0===e.length)return this.spec.meta;let t=this.clone();return t.spec.meta=Object.assign(t.spec.meta||{},e[0]),t}withMutation(e){let t=this._mutate;this._mutate=!0;let n=e(this);return this._mutate=t,n}concat(e){if(!e||e===this)return this;if(e.type!==this.type&&"mixed"!==this.type)throw TypeError(`You cannot \`concat()\` schema's of different types: ${this.type} and ${e.type}`);let t=this,n=e.clone(),r=pU({},t.spec,n.spec);return n.spec=r,n._typeError||(n._typeError=t._typeError),n._whitelistError||(n._whitelistError=t._whitelistError),n._blacklistError||(n._blacklistError=t._blacklistError),n._whitelist=t._whitelist.merge(e._whitelist,e._blacklist),n._blacklist=t._blacklist.merge(e._blacklist,e._whitelist),n.tests=t.tests,n.exclusiveTests=t.exclusiveTests,n.withMutation(t=>{e.tests.forEach(e=>{t.test(e.OPTIONS)})}),n}isType(e){return!!this.spec.nullable&&null===e||this._typeCheck(e)}resolve(e){let t=this;if(t.conditions.length){let n=t.conditions;(t=t.clone()).conditions=[],t=(t=n.reduce((t,n)=>n.resolve(t,e),t)).resolve(e)}return t}cast(e,t={}){let n=this.resolve(pU({value:e},t)),r=n._cast(e,t);if(void 0!==e&&!1!==t.assert&&!0!==n.isType(r)){let i=pf(e),a=pf(r);throw TypeError(`The value of ${t.path||"field"} could not be cast to a value that satisfies the schema type: "${n._type}". + +attempted value: ${i} +`+(a!==i?`result of cast: ${a}`:""))}return r}_cast(e,t){let n=void 0===e?e:this.transforms.reduce((t,n)=>n.call(this,t,e,this),e);return void 0===n&&(n=this.getDefault()),n}_validate(e,t={},n){let{sync:r,path:i,from:a=[],originalValue:o=e,strict:s=this.spec.strict,abortEarly:u=this.spec.abortEarly}=t,c=e;s||(c=this._cast(c,pU({assert:!1},t)));let l={value:c,path:i,options:t,originalValue:o,schema:this,label:this.spec.label,sync:r,from:a},f=[];this._typeError&&f.push(this._typeError),this._whitelistError&&f.push(this._whitelistError),this._blacklistError&&f.push(this._blacklistError),pA({args:l,value:c,path:i,sync:r,tests:f,endEarly:u},e=>{if(e)return void n(e,c);pA({tests:this.tests,args:l,path:i,sync:r,value:c,endEarly:u},n)})}validate(e,t,n){let r=this.resolve(pU({},t,{value:e}));return"function"==typeof n?r._validate(e,t,n):new Promise((n,i)=>r._validate(e,t,(e,t)=>{e?i(e):n(t)}))}validateSync(e,t){let n;return this.resolve(pU({},t,{value:e}))._validate(e,pU({},t,{sync:!0}),(e,t)=>{if(e)throw e;n=t}),n}isValid(e,t){return this.validate(e,t).then(()=>!0,e=>{if(pM.isError(e))return!1;throw e})}isValidSync(e,t){try{return this.validateSync(e,t),!0}catch(n){if(pM.isError(n))return!1;throw n}}_getDefault(){let e=this.spec.default;return null==e?e:"function"==typeof e?e.call(this):pr(e)}getDefault(e){return this.resolve(e||{})._getDefault()}default(e){return 0===arguments.length?this._getDefault():this.clone({default:e})}strict(e=!0){var t=this.clone();return t.spec.strict=e,t}_isPresent(e){return null!=e}defined(e=pd.defined){return this.test({message:e,name:"defined",exclusive:!0,test:e=>void 0!==e})}required(e=pd.required){return this.clone({presence:"required"}).withMutation(t=>t.test({message:e,name:"required",exclusive:!0,test(e){return this.schema._isPresent(e)}}))}notRequired(){var e=this.clone({presence:"optional"});return e.tests=e.tests.filter(e=>"required"!==e.OPTIONS.name),e}nullable(e=!0){return this.clone({nullable:!1!==e})}transform(e){var t=this.clone();return t.transforms.push(e),t}test(...e){let t;if(void 0===(t=1===e.length?"function"==typeof e[0]?{test:e[0]}:e[0]:2===e.length?{name:e[0],test:e[1]}:{name:e[0],message:e[1],test:e[2]}).message&&(t.message=pd.default),"function"!=typeof t.test)throw TypeError("`test` is a required parameters");let n=this.clone(),r=pj(t),i=t.exclusive||t.name&&!0===n.exclusiveTests[t.name];if(t.exclusive&&!t.name)throw TypeError("Exclusive tests must provide a unique `name` identifying the test");return t.name&&(n.exclusiveTests[t.name]=!!t.exclusive),n.tests=n.tests.filter(e=>e.OPTIONS.name!==t.name||!i&&e.OPTIONS.test!==r.OPTIONS.test),n.tests.push(r),n}when(e,t){Array.isArray(e)||"string"==typeof e||(t=e,e=".");let n=this.clone(),r=pk(e).map(e=>new pN(e));return r.forEach(e=>{e.isSibling&&n.deps.push(e.key)}),n.conditions.push(new pS(r,t)),n}typeError(e){var t=this.clone();return t._typeError=pj({message:e,name:"typeError",test(e){return!!(void 0===e||this.schema.isType(e))||this.createError({params:{type:this.schema._type}})}}),t}oneOf(e,t=pd.oneOf){var n=this.clone();return e.forEach(e=>{n._whitelist.add(e),n._blacklist.delete(e)}),n._whitelistError=pj({message:t,name:"oneOf",test(e){if(void 0===e)return!0;let t=this.schema._whitelist;return!!t.has(e,this.resolve)||this.createError({params:{values:t.toArray().join(", ")}})}}),n}notOneOf(e,t=pd.notOneOf){var n=this.clone();return e.forEach(e=>{n._blacklist.add(e),n._whitelist.delete(e)}),n._blacklistError=pj({message:t,name:"notOneOf",test(e){let t=this.schema._blacklist;return!t.has(e,this.resolve)||this.createError({params:{values:t.toArray().join(", ")}})}}),n}strip(e=!0){let t=this.clone();return t.spec.strip=e,t}describe(){let e=this.clone(),{label:t,meta:n}=e.spec,r={meta:n,label:t,type:e.type,oneOf:e._whitelist.describe(),notOneOf:e._blacklist.describe(),tests:e.tests.map(e=>({name:e.OPTIONS.name,params:e.OPTIONS.params})).filter((e,t,n)=>n.findIndex(t=>t.name===e.name)===t)};return r}}for(let p$ of(pH.prototype.__isYupSchema__=!0,["validate","validateSync"]))pH.prototype[`${p$}At`]=function(e,t,n={}){let{parent:r,parentPath:i,schema:a}=pY(this,e,t,n.context);return a[p$](r&&r[i],pU({},n,{parent:r,path:e}))};for(let pz of["equals","is"])pH.prototype[pz]=pH.prototype.oneOf;for(let pG of["not","nope"])pH.prototype[pG]=pH.prototype.notOneOf;pH.prototype.optional=pH.prototype.notRequired;let pW=pH;function pK(){return new pW}pK.prototype=pW.prototype;let pV=e=>null==e;function pq(){return new pZ}class pZ extends pH{constructor(){super({type:"boolean"}),this.withMutation(()=>{this.transform(function(e){if(!this.isType(e)){if(/^(true|1)$/i.test(String(e)))return!0;if(/^(false|0)$/i.test(String(e)))return!1}return e})})}_typeCheck(e){return e instanceof Boolean&&(e=e.valueOf()),"boolean"==typeof e}isTrue(e=pm.isValue){return this.test({message:e,name:"is-value",exclusive:!0,params:{value:"true"},test:e=>pV(e)||!0===e})}isFalse(e=pm.isValue){return this.test({message:e,name:"is-value",exclusive:!0,params:{value:"false"},test:e=>pV(e)||!1===e})}}pq.prototype=pZ.prototype;let pX=/^((([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+(\.([a-z]|\d|[!#\$%&'\*\+\-\/=\?\^_`{\|}~]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])+)*)|((\x22)((((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(([\x01-\x08\x0b\x0c\x0e-\x1f\x7f]|\x21|[\x23-\x5b]|[\x5d-\x7e]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(\\([\x01-\x09\x0b\x0c\x0d-\x7f]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]))))*(((\x20|\x09)*(\x0d\x0a))?(\x20|\x09)+)?(\x22)))@((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))$/i,pJ=/^((https?|ftp):)?\/\/(((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:)*@)?(((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.(\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5]))|((([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|\d|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.)+(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])*([a-z]|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])))\.?)(:\d*)?)(\/((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)+(\/(([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)*)*)?)?(\?((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|[\uE000-\uF8FF]|\/|\?)*)?(\#((([a-z]|\d|-|\.|_|~|[\u00A0-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF])|(%[\da-f]{2})|[!\$&'\(\)\*\+,;=]|:|@)|\/|\?)*)?$/i,pQ=/^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i,p1=e=>pV(e)||e===e.trim(),p0=({}).toString();function p2(){return new p3}class p3 extends pH{constructor(){super({type:"string"}),this.withMutation(()=>{this.transform(function(e){if(this.isType(e)||Array.isArray(e))return e;let t=null!=e&&e.toString?e.toString():e;return t===p0?e:t})})}_typeCheck(e){return e instanceof String&&(e=e.valueOf()),"string"==typeof e}_isPresent(e){return super._isPresent(e)&&!!e.length}length(e,t=ph.length){return this.test({message:t,name:"length",exclusive:!0,params:{length:e},test(t){return pV(t)||t.length===this.resolve(e)}})}min(e,t=ph.min){return this.test({message:t,name:"min",exclusive:!0,params:{min:e},test(t){return pV(t)||t.length>=this.resolve(e)}})}max(e,t=ph.max){return this.test({name:"max",exclusive:!0,message:t,params:{max:e},test(t){return pV(t)||t.length<=this.resolve(e)}})}matches(e,t){let n=!1,r,i;return t&&("object"==typeof t?{excludeEmptyString:n=!1,message:r,name:i}=t:r=t),this.test({name:i||"matches",message:r||ph.matches,params:{regex:e},test:t=>pV(t)||""===t&&n||-1!==t.search(e)})}email(e=ph.email){return this.matches(pX,{name:"email",message:e,excludeEmptyString:!0})}url(e=ph.url){return this.matches(pJ,{name:"url",message:e,excludeEmptyString:!0})}uuid(e=ph.uuid){return this.matches(pQ,{name:"uuid",message:e,excludeEmptyString:!1})}ensure(){return this.default("").transform(e=>null===e?"":e)}trim(e=ph.trim){return this.transform(e=>null!=e?e.trim():e).test({message:e,name:"trim",test:p1})}lowercase(e=ph.lowercase){return this.transform(e=>pV(e)?e:e.toLowerCase()).test({message:e,name:"string_case",exclusive:!0,test:e=>pV(e)||e===e.toLowerCase()})}uppercase(e=ph.uppercase){return this.transform(e=>pV(e)?e:e.toUpperCase()).test({message:e,name:"string_case",exclusive:!0,test:e=>pV(e)||e===e.toUpperCase()})}}p2.prototype=p3.prototype;let p4=e=>e!=+e;function p5(){return new p6}class p6 extends pH{constructor(){super({type:"number"}),this.withMutation(()=>{this.transform(function(e){let t=e;if("string"==typeof t){if(""===(t=t.replace(/\s/g,"")))return NaN;t=+t}return this.isType(t)?t:parseFloat(t)})})}_typeCheck(e){return e instanceof Number&&(e=e.valueOf()),"number"==typeof e&&!p4(e)}min(e,t=pp.min){return this.test({message:t,name:"min",exclusive:!0,params:{min:e},test(t){return pV(t)||t>=this.resolve(e)}})}max(e,t=pp.max){return this.test({message:t,name:"max",exclusive:!0,params:{max:e},test(t){return pV(t)||t<=this.resolve(e)}})}lessThan(e,t=pp.lessThan){return this.test({message:t,name:"max",exclusive:!0,params:{less:e},test(t){return pV(t)||tthis.resolve(e)}})}positive(e=pp.positive){return this.moreThan(0,e)}negative(e=pp.negative){return this.lessThan(0,e)}integer(e=pp.integer){return this.test({name:"integer",message:e,test:e=>pV(e)||Number.isInteger(e)})}truncate(){return this.transform(e=>pV(e)?e:0|e)}round(e){var t,n=["ceil","floor","round","trunc"];if("trunc"===(e=(null==(t=e)?void 0:t.toLowerCase())||"round"))return this.truncate();if(-1===n.indexOf(e.toLowerCase()))throw TypeError("Only valid options for round() are: "+n.join(", "));return this.transform(t=>pV(t)?t:Math[e](t))}}p5.prototype=p6.prototype;var p9=/^(\d{4}|[+\-]\d{6})(?:-?(\d{2})(?:-?(\d{2}))?)?(?:[ T]?(\d{2}):?(\d{2})(?::?(\d{2})(?:[,\.](\d{1,}))?)?(?:(Z)|([+\-])(\d{2})(?::?(\d{2}))?)?)?$/;function p8(e){var t,n,r=[1,4,5,6,7,10,11],i=0;if(n=p9.exec(e)){for(var a,o=0;a=r[o];++o)n[a]=+n[a]||0;n[2]=(+n[2]||1)-1,n[3]=+n[3]||1,n[7]=n[7]?String(n[7]).substr(0,3):0,(void 0===n[8]||""===n[8])&&(void 0===n[9]||""===n[9])?t=+new Date(n[1],n[2],n[3],n[4],n[5],n[6],n[7]):("Z"!==n[8]&&void 0!==n[9]&&(i=60*n[10]+n[11],"+"===n[9]&&(i=0-i)),t=Date.UTC(n[1],n[2],n[3],n[4],n[5]+i,n[6],n[7]))}else t=Date.parse?Date.parse(e):NaN;return t}let p7=new Date(""),be=e=>"[object Date]"===Object.prototype.toString.call(e);function bt(){return new bn}class bn extends pH{constructor(){super({type:"date"}),this.withMutation(()=>{this.transform(function(e){return this.isType(e)?e:(e=p8(e),isNaN(e)?p7:new Date(e))})})}_typeCheck(e){return be(e)&&!isNaN(e.getTime())}prepareParam(e,t){let n;if(pN.isRef(e))n=e;else{let r=this.cast(e);if(!this._typeCheck(r))throw TypeError(`\`${t}\` must be a Date or a value that can be \`cast()\` to a Date`);n=r}return n}min(e,t=pb.min){let n=this.prepareParam(e,"min");return this.test({message:t,name:"min",exclusive:!0,params:{min:e},test(e){return pV(e)||e>=this.resolve(n)}})}max(e,t=pb.max){var n=this.prepareParam(e,"max");return this.test({message:t,name:"max",exclusive:!0,params:{max:e},test(e){return pV(e)||e<=this.resolve(n)}})}}bn.INVALID_DATE=p7,bt.prototype=bn.prototype,bt.INVALID_DATE=p7;var br=n(11865),bi=n.n(br),ba=n(68929),bo=n.n(ba),bs=n(67523),bu=n.n(bs),bc=n(94633),bl=n.n(bc);function bf(e,t=[]){let n=[],r=[];function i(e,i){var a=(0,pI.split)(e)[0];~r.indexOf(a)||r.push(a),~t.indexOf(`${i}-${a}`)||n.push([i,a])}for(let a in e)if(pw()(e,a)){let o=e[a];~r.indexOf(a)||r.push(a),pN.isRef(o)&&o.isSibling?i(o.path,a):p_(o)&&"deps"in o&&o.deps.forEach(e=>i(e,a))}return bl().array(r,n).reverse()}function bd(e,t){let n=1/0;return e.some((e,r)=>{var i;if((null==(i=t.path)?void 0:i.indexOf(e))!==-1)return n=r,!0}),n}function bh(e){return(t,n)=>bd(e,t)-bd(e,n)}function bp(){return(bp=Object.assign||function(e){for(var t=1;t"[object Object]"===Object.prototype.toString.call(e);function bm(e,t){let n=Object.keys(e.fields);return Object.keys(t).filter(e=>-1===n.indexOf(e))}let bg=bh([]);class bv extends pH{constructor(e){super({type:"object"}),this.fields=Object.create(null),this._sortErrors=bg,this._nodes=[],this._excludedEdges=[],this.withMutation(()=>{this.transform(function(e){if("string"==typeof e)try{e=JSON.parse(e)}catch(t){e=null}return this.isType(e)?e:null}),e&&this.shape(e)})}_typeCheck(e){return bb(e)||"function"==typeof e}_cast(e,t={}){var n;let r=super._cast(e,t);if(void 0===r)return this.getDefault();if(!this._typeCheck(r))return r;let i=this.fields,a=null!=(n=t.stripUnknown)?n:this.spec.noUnknown,o=this._nodes.concat(Object.keys(r).filter(e=>-1===this._nodes.indexOf(e))),s={},u=bp({},t,{parent:s,__validating:t.__validating||!1}),c=!1;for(let l of o){let f=i[l],d=pw()(r,l);if(f){let h,p=r[l];u.path=(t.path?`${t.path}.`:"")+l;let b="spec"in(f=f.resolve({value:p,context:t.context,parent:s}))?f.spec:void 0,m=null==b?void 0:b.strict;if(null==b?void 0:b.strip){c=c||l in r;continue}void 0!==(h=t.__validating&&m?r[l]:f.cast(r[l],u))&&(s[l]=h)}else d&&!a&&(s[l]=r[l]);s[l]!==r[l]&&(c=!0)}return c?s:r}_validate(e,t={},n){let r=[],{sync:i,from:a=[],originalValue:o=e,abortEarly:s=this.spec.abortEarly,recursive:u=this.spec.recursive}=t;a=[{schema:this,value:o},...a],t.__validating=!0,t.originalValue=o,t.from=a,super._validate(e,t,(e,c)=>{if(e){if(!pM.isError(e)||s)return void n(e,c);r.push(e)}if(!u||!bb(c)){n(r[0]||null,c);return}o=o||c;let l=this._nodes.map(e=>(n,r)=>{let i=-1===e.indexOf(".")?(t.path?`${t.path}.`:"")+e:`${t.path||""}["${e}"]`,s=this.fields[e];if(s&&"validate"in s){s.validate(c[e],bp({},t,{path:i,from:a,strict:!0,parent:c,originalValue:o[e]}),r);return}r(null)});pA({sync:i,tests:l,value:c,errors:r,endEarly:s,sort:this._sortErrors,path:t.path},n)})}clone(e){let t=super.clone(e);return t.fields=bp({},this.fields),t._nodes=this._nodes,t._excludedEdges=this._excludedEdges,t._sortErrors=this._sortErrors,t}concat(e){let t=super.concat(e),n=t.fields;for(let[r,i]of Object.entries(this.fields)){let a=n[r];void 0===a?n[r]=i:a instanceof pH&&i instanceof pH&&(n[r]=i.concat(a))}return t.withMutation(()=>t.shape(n))}getDefaultFromShape(){let e={};return this._nodes.forEach(t=>{let n=this.fields[t];e[t]="default"in n?n.getDefault():void 0}),e}_getDefault(){return"default"in this.spec?super._getDefault():this._nodes.length?this.getDefaultFromShape():void 0}shape(e,t=[]){let n=this.clone(),r=Object.assign(n.fields,e);if(n.fields=r,n._sortErrors=bh(Object.keys(r)),t.length){Array.isArray(t[0])||(t=[t]);let i=t.map(([e,t])=>`${e}-${t}`);n._excludedEdges=n._excludedEdges.concat(i)}return n._nodes=bf(r,n._excludedEdges),n}pick(e){let t={};for(let n of e)this.fields[n]&&(t[n]=this.fields[n]);return this.clone().withMutation(e=>(e.fields={},e.shape(t)))}omit(e){let t=this.clone(),n=t.fields;for(let r of(t.fields={},e))delete n[r];return t.withMutation(()=>t.shape(n))}from(e,t,n){let r=(0,pI.getter)(e,!0);return this.transform(i=>{if(null==i)return i;let a=i;return pw()(i,e)&&(a=bp({},i),n||delete a[e],a[t]=r(i)),a})}noUnknown(e=!0,t=pg.noUnknown){"string"==typeof e&&(t=e,e=!0);let n=this.test({name:"noUnknown",exclusive:!0,message:t,test(t){if(null==t)return!0;let n=bm(this.schema,t);return!e||0===n.length||this.createError({params:{unknown:n.join(", ")}})}});return n.spec.noUnknown=e,n}unknown(e=!0,t=pg.noUnknown){return this.noUnknown(!e,t)}transformKeys(e){return this.transform(t=>t&&bu()(t,(t,n)=>e(n)))}camelCase(){return this.transformKeys(bo())}snakeCase(){return this.transformKeys(bi())}constantCase(){return this.transformKeys(e=>bi()(e).toUpperCase())}describe(){let e=super.describe();return e.fields=pC()(this.fields,e=>e.describe()),e}}function by(e){return new bv(e)}function bw(){return(bw=Object.assign||function(e){for(var t=1;t{this.transform(function(e){if("string"==typeof e)try{e=JSON.parse(e)}catch(t){e=null}return this.isType(e)?e:null})})}_typeCheck(e){return Array.isArray(e)}get _subType(){return this.innerType}_cast(e,t){let n=super._cast(e,t);if(!this._typeCheck(n)||!this.innerType)return n;let r=!1,i=n.map((e,n)=>{let i=this.innerType.cast(e,bw({},t,{path:`${t.path||""}[${n}]`}));return i!==e&&(r=!0),i});return r?i:n}_validate(e,t={},n){var r,i;let a=[],o=t.sync,s=t.path,u=this.innerType,c=null!=(r=t.abortEarly)?r:this.spec.abortEarly,l=null!=(i=t.recursive)?i:this.spec.recursive,f=null!=t.originalValue?t.originalValue:e;super._validate(e,t,(e,r)=>{if(e){if(!pM.isError(e)||c)return void n(e,r);a.push(e)}if(!l||!u||!this._typeCheck(r)){n(a[0]||null,r);return}f=f||r;let i=Array(r.length);for(let d=0;du.validate(h,b,t)}pA({sync:o,path:s,value:r,errors:a,endEarly:c,tests:i},n)})}clone(e){let t=super.clone(e);return t.innerType=this.innerType,t}concat(e){let t=super.concat(e);return t.innerType=this.innerType,e.innerType&&(t.innerType=t.innerType?t.innerType.concat(e.innerType):e.innerType),t}of(e){let t=this.clone();if(!p_(e))throw TypeError("`array.of()` sub-schema must be a valid yup schema not: "+pf(e));return t.innerType=e,t}length(e,t=pv.length){return this.test({message:t,name:"length",exclusive:!0,params:{length:e},test(t){return pV(t)||t.length===this.resolve(e)}})}min(e,t){return t=t||pv.min,this.test({message:t,name:"min",exclusive:!0,params:{min:e},test(t){return pV(t)||t.length>=this.resolve(e)}})}max(e,t){return t=t||pv.max,this.test({message:t,name:"max",exclusive:!0,params:{max:e},test(t){return pV(t)||t.length<=this.resolve(e)}})}ensure(){return this.default(()=>[]).transform((e,t)=>this._typeCheck(e)?e:null==t?[]:[].concat(t))}compact(e){let t=e?(t,n,r)=>!e(t,n,r):e=>!!e;return this.transform(e=>null!=e?e.filter(t):e)}describe(){let e=super.describe();return this.innerType&&(e.innerType=this.innerType.describe()),e}nullable(e=!0){return super.nullable(e)}defined(){return super.defined()}required(e){return super.required(e)}}b_.prototype=bE.prototype;var bS=by().shape({name:p2().required("Required"),url:p2().required("Required")}),bk=function(e){var t=e.initialValues,n=e.onSubmit,r=e.submitButtonText,i=e.nameDisabled,a=void 0!==i&&i;return l.createElement(hM,{initialValues:t,validationSchema:bS,onSubmit:n},function(e){var t=e.isSubmitting;return l.createElement(l.Fragment,null,l.createElement(hj,{"data-testid":"bridge-form",noValidate:!0},l.createElement(d.Z,{container:!0,spacing:16},l.createElement(d.Z,{item:!0,xs:12,md:7},l.createElement(hR,{component:hJ,id:"name",name:"name",label:"Name",disabled:a,required:!0,fullWidth:!0,FormHelperTextProps:{"data-testid":"name-helper-text"}})),l.createElement(d.Z,{item:!0,xs:12,md:7},l.createElement(hR,{component:hJ,id:"url",name:"url",label:"Bridge URL",placeholder:"https://",required:!0,fullWidth:!0,FormHelperTextProps:{"data-testid":"url-helper-text"}})),l.createElement(d.Z,{item:!0,xs:12,md:7},l.createElement(d.Z,{container:!0,spacing:16},l.createElement(d.Z,{item:!0,xs:7},l.createElement(hR,{component:hJ,id:"minimumContractPayment",name:"minimumContractPayment",label:"Minimum Contract Payment",placeholder:"0",fullWidth:!0,inputProps:{min:0},FormHelperTextProps:{"data-testid":"minimumContractPayment-helper-text"}})),l.createElement(d.Z,{item:!0,xs:7},l.createElement(hR,{component:hJ,id:"confirmations",name:"confirmations",label:"Confirmations",placeholder:"0",type:"number",fullWidth:!0,inputProps:{min:0},FormHelperTextProps:{"data-testid":"confirmations-helper-text"}})))),l.createElement(d.Z,{item:!0,xs:12,md:7},l.createElement(ox.default,{variant:"contained",color:"primary",type:"submit",disabled:t,size:"large"},r)))))})},bx=function(e){var t=e.bridge,n=e.onSubmit,r={name:t.name,url:t.url,minimumContractPayment:t.minimumContractPayment,confirmations:t.confirmations};return l.createElement(iv,null,l.createElement(d.Z,{container:!0,spacing:40},l.createElement(d.Z,{item:!0,xs:12,md:11,lg:9},l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"Edit Bridge",action:l.createElement(aL.Z,{component:tz,href:"/bridges/".concat(t.id)},"Cancel")}),l.createElement(aK.Z,null,l.createElement(bk,{nameDisabled:!0,initialValues:r,onSubmit:n,submitButtonText:"Save Bridge"}))))))};function bT(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]&&arguments[0],t=e?function(){return l.createElement(x.default,{variant:"body1"},"Loading...")}:function(){return null};return{isLoading:e,LoadingPlaceholder:t}},ml=n(76023);function mf(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]=0||(i[n]=e[n]);return i}function mB(e,t){if(null==e)return{};var n,r,i=mY(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}function mU(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n=4?[e[0],e[1],e[2],e[3],"".concat(e[0],".").concat(e[1]),"".concat(e[0],".").concat(e[2]),"".concat(e[0],".").concat(e[3]),"".concat(e[1],".").concat(e[0]),"".concat(e[1],".").concat(e[2]),"".concat(e[1],".").concat(e[3]),"".concat(e[2],".").concat(e[0]),"".concat(e[2],".").concat(e[1]),"".concat(e[2],".").concat(e[3]),"".concat(e[3],".").concat(e[0]),"".concat(e[3],".").concat(e[1]),"".concat(e[3],".").concat(e[2]),"".concat(e[0],".").concat(e[1],".").concat(e[2]),"".concat(e[0],".").concat(e[1],".").concat(e[3]),"".concat(e[0],".").concat(e[2],".").concat(e[1]),"".concat(e[0],".").concat(e[2],".").concat(e[3]),"".concat(e[0],".").concat(e[3],".").concat(e[1]),"".concat(e[0],".").concat(e[3],".").concat(e[2]),"".concat(e[1],".").concat(e[0],".").concat(e[2]),"".concat(e[1],".").concat(e[0],".").concat(e[3]),"".concat(e[1],".").concat(e[2],".").concat(e[0]),"".concat(e[1],".").concat(e[2],".").concat(e[3]),"".concat(e[1],".").concat(e[3],".").concat(e[0]),"".concat(e[1],".").concat(e[3],".").concat(e[2]),"".concat(e[2],".").concat(e[0],".").concat(e[1]),"".concat(e[2],".").concat(e[0],".").concat(e[3]),"".concat(e[2],".").concat(e[1],".").concat(e[0]),"".concat(e[2],".").concat(e[1],".").concat(e[3]),"".concat(e[2],".").concat(e[3],".").concat(e[0]),"".concat(e[2],".").concat(e[3],".").concat(e[1]),"".concat(e[3],".").concat(e[0],".").concat(e[1]),"".concat(e[3],".").concat(e[0],".").concat(e[2]),"".concat(e[3],".").concat(e[1],".").concat(e[0]),"".concat(e[3],".").concat(e[1],".").concat(e[2]),"".concat(e[3],".").concat(e[2],".").concat(e[0]),"".concat(e[3],".").concat(e[2],".").concat(e[1]),"".concat(e[0],".").concat(e[1],".").concat(e[2],".").concat(e[3]),"".concat(e[0],".").concat(e[1],".").concat(e[3],".").concat(e[2]),"".concat(e[0],".").concat(e[2],".").concat(e[1],".").concat(e[3]),"".concat(e[0],".").concat(e[2],".").concat(e[3],".").concat(e[1]),"".concat(e[0],".").concat(e[3],".").concat(e[1],".").concat(e[2]),"".concat(e[0],".").concat(e[3],".").concat(e[2],".").concat(e[1]),"".concat(e[1],".").concat(e[0],".").concat(e[2],".").concat(e[3]),"".concat(e[1],".").concat(e[0],".").concat(e[3],".").concat(e[2]),"".concat(e[1],".").concat(e[2],".").concat(e[0],".").concat(e[3]),"".concat(e[1],".").concat(e[2],".").concat(e[3],".").concat(e[0]),"".concat(e[1],".").concat(e[3],".").concat(e[0],".").concat(e[2]),"".concat(e[1],".").concat(e[3],".").concat(e[2],".").concat(e[0]),"".concat(e[2],".").concat(e[0],".").concat(e[1],".").concat(e[3]),"".concat(e[2],".").concat(e[0],".").concat(e[3],".").concat(e[1]),"".concat(e[2],".").concat(e[1],".").concat(e[0],".").concat(e[3]),"".concat(e[2],".").concat(e[1],".").concat(e[3],".").concat(e[0]),"".concat(e[2],".").concat(e[3],".").concat(e[0],".").concat(e[1]),"".concat(e[2],".").concat(e[3],".").concat(e[1],".").concat(e[0]),"".concat(e[3],".").concat(e[0],".").concat(e[1],".").concat(e[2]),"".concat(e[3],".").concat(e[0],".").concat(e[2],".").concat(e[1]),"".concat(e[3],".").concat(e[1],".").concat(e[0],".").concat(e[2]),"".concat(e[3],".").concat(e[1],".").concat(e[2],".").concat(e[0]),"".concat(e[3],".").concat(e[2],".").concat(e[0],".").concat(e[1]),"".concat(e[3],".").concat(e[2],".").concat(e[1],".").concat(e[0])]:void 0}var mX={};function mJ(e){if(0===e.length||1===e.length)return e;var t=e.join(".");return mX[t]||(mX[t]=mZ(e)),mX[t]}function mQ(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2?arguments[2]:void 0;return mJ(e.filter(function(e){return"token"!==e})).reduce(function(e,t){return mV({},e,n[t])},t)}function m1(e){return e.join(" ")}function m0(e,t){var n=0;return function(r){return n+=1,r.map(function(r,i){return m2({node:r,stylesheet:e,useInlineStyles:t,key:"code-segment-".concat(n,"-").concat(i)})})}}function m2(e){var t=e.node,n=e.stylesheet,r=e.style,i=void 0===r?{}:r,a=e.useInlineStyles,o=e.key,s=t.properties,u=t.type,c=t.tagName,f=t.value;if("text"===u)return f;if(c){var d,h=m0(n,a);if(a){var p=Object.keys(n).reduce(function(e,t){return t.split(".").forEach(function(t){e.includes(t)||e.push(t)}),e},[]),b=s.className&&s.className.includes("token")?["token"]:[],m=s.className&&b.concat(s.className.filter(function(e){return!p.includes(e)}));d=mV({},s,{className:m1(m)||void 0,style:mQ(s.className,Object.assign({},s.style,i),n)})}else d=mV({},s,{className:m1(s.className)});var g=h(t.children);return l.createElement(c,mq({key:o},d),g)}}let m3=function(e,t){return -1!==e.listLanguages().indexOf(t)};var m4=/\n/g;function m5(e){return e.match(m4)}function m6(e){var t=e.lines,n=e.startingLineNumber,r=e.style;return t.map(function(e,t){var i=t+n;return l.createElement("span",{key:"line-".concat(t),className:"react-syntax-highlighter-line-number",style:"function"==typeof r?r(i):r},"".concat(i,"\n"))})}function m9(e){var t=e.codeString,n=e.codeStyle,r=e.containerStyle,i=void 0===r?{float:"left",paddingRight:"10px"}:r,a=e.numberStyle,o=void 0===a?{}:a,s=e.startingLineNumber;return l.createElement("code",{style:Object.assign({},n,i)},m6({lines:t.replace(/\n$/,"").split("\n"),style:o,startingLineNumber:s}))}function m8(e){return"".concat(e.toString().length,".25em")}function m7(e,t){return{type:"element",tagName:"span",properties:{key:"line-number--".concat(e),className:["comment","linenumber","react-syntax-highlighter-line-number"],style:t},children:[{type:"text",value:e}]}}function ge(e,t,n){var r,i={display:"inline-block",minWidth:m8(n),paddingRight:"1em",textAlign:"right",userSelect:"none"};return mV({},i,"function"==typeof e?e(t):e)}function gt(e){var t=e.children,n=e.lineNumber,r=e.lineNumberStyle,i=e.largestLineNumber,a=e.showInlineLineNumbers,o=e.lineProps,s=void 0===o?{}:o,u=e.className,c=void 0===u?[]:u,l=e.showLineNumbers,f=e.wrapLongLines,d="function"==typeof s?s(n):s;if(d.className=c,n&&a){var h=ge(r,n,i);t.unshift(m7(n,h))}return f&l&&(d.style=mV({},d.style,{display:"flex"})),{type:"element",tagName:"span",properties:d,children:t}}function gn(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],r=0;r2&&void 0!==arguments[2]?arguments[2]:[];return gt({children:e,lineNumber:t,lineNumberStyle:s,largestLineNumber:o,showInlineLineNumbers:i,lineProps:n,className:a,showLineNumbers:r,wrapLongLines:u})}function b(e,t){if(r&&t&&i){var n=ge(s,t,o);e.unshift(m7(t,n))}return e}function m(e,n){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return t||r.length>0?p(e,n,r):b(e,n)}for(var g=function(){var e=l[h],t=e.children[0].value;if(m5(t)){var n=t.split("\n");n.forEach(function(t,i){var o=r&&f.length+a,s={type:"text",value:"".concat(t,"\n")};if(0===i){var u=l.slice(d+1,h).concat(gt({children:[s],className:e.properties.className})),c=m(u,o);f.push(c)}else if(i===n.length-1){if(l[h+1]&&l[h+1].children&&l[h+1].children[0]){var p={type:"text",value:"".concat(t)},b=gt({children:[p],className:e.properties.className});l.splice(h+1,0,b)}else{var g=[s],v=m(g,o,e.properties.className);f.push(v)}}else{var y=[s],w=m(y,o,e.properties.className);f.push(w)}}),d=h}h++};h code[class*="language-"]':{background:"#f5f2f0",padding:".1em",borderRadius:".3em",whiteSpace:"normal"},comment:{color:"slategray"},prolog:{color:"slategray"},doctype:{color:"slategray"},cdata:{color:"slategray"},punctuation:{color:"#999"},namespace:{Opacity:".7"},property:{color:"#905"},tag:{color:"#905"},boolean:{color:"#905"},number:{color:"#905"},constant:{color:"#905"},symbol:{color:"#905"},deleted:{color:"#905"},selector:{color:"#690"},"attr-name":{color:"#690"},string:{color:"#690"},char:{color:"#690"},builtin:{color:"#690"},inserted:{color:"#690"},operator:{color:"#9a6e3a",background:"hsla(0, 0%, 100%, .5)"},entity:{color:"#9a6e3a",background:"hsla(0, 0%, 100%, .5)",cursor:"help"},url:{color:"#9a6e3a",background:"hsla(0, 0%, 100%, .5)"},".language-css .token.string":{color:"#9a6e3a",background:"hsla(0, 0%, 100%, .5)"},".style .token.string":{color:"#9a6e3a",background:"hsla(0, 0%, 100%, .5)"},atrule:{color:"#07a"},"attr-value":{color:"#07a"},keyword:{color:"#07a"},function:{color:"#DD4A68"},"class-name":{color:"#DD4A68"},regex:{color:"#e90"},important:{color:"#e90",fontWeight:"bold"},variable:{color:"#e90"},bold:{fontWeight:"bold"},italic:{fontStyle:"italic"}};var gc=n(98695),gl=n.n(gc);let gf=["abap","abnf","actionscript","ada","agda","al","antlr4","apacheconf","apl","applescript","aql","arduino","arff","asciidoc","asm6502","aspnet","autohotkey","autoit","bash","basic","batch","bbcode","birb","bison","bnf","brainfuck","brightscript","bro","bsl","c","cil","clike","clojure","cmake","coffeescript","concurnas","cpp","crystal","csharp","csp","css-extras","css","cypher","d","dart","dax","dhall","diff","django","dns-zone-file","docker","ebnf","editorconfig","eiffel","ejs","elixir","elm","erb","erlang","etlua","excel-formula","factor","firestore-security-rules","flow","fortran","fsharp","ftl","gcode","gdscript","gedcom","gherkin","git","glsl","gml","go","graphql","groovy","haml","handlebars","haskell","haxe","hcl","hlsl","hpkp","hsts","http","ichigojam","icon","iecst","ignore","inform7","ini","io","j","java","javadoc","javadoclike","javascript","javastacktrace","jolie","jq","js-extras","js-templates","jsdoc","json","json5","jsonp","jsstacktrace","jsx","julia","keyman","kotlin","latex","latte","less","lilypond","liquid","lisp","livescript","llvm","lolcode","lua","makefile","markdown","markup-templating","markup","matlab","mel","mizar","mongodb","monkey","moonscript","n1ql","n4js","nand2tetris-hdl","naniscript","nasm","neon","nginx","nim","nix","nsis","objectivec","ocaml","opencl","oz","parigp","parser","pascal","pascaligo","pcaxis","peoplecode","perl","php-extras","php","phpdoc","plsql","powerquery","powershell","processing","prolog","properties","protobuf","pug","puppet","pure","purebasic","purescript","python","q","qml","qore","r","racket","reason","regex","renpy","rest","rip","roboconf","robotframework","ruby","rust","sas","sass","scala","scheme","scss","shell-session","smali","smalltalk","smarty","sml","solidity","solution-file","soy","sparql","splunk-spl","sqf","sql","stan","stylus","swift","t4-cs","t4-templating","t4-vb","tap","tcl","textile","toml","tsx","tt2","turtle","twig","typescript","typoscript","unrealscript","vala","vbnet","velocity","verilog","vhdl","vim","visual-basic","warpscript","wasm","wiki","xeora","xml-doc","xojo","xquery","yaml","yang","zig"];var gd=gs(gl(),gu);gd.supportedLanguages=gf;let gh=gd;var gp=n(64566);function gb(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function gm(){var e=gb(["\n query FetchConfigV2 {\n configv2 {\n user\n effective\n }\n }\n"]);return gm=function(){return e},e}var gg=n0(gm()),gv=function(e){var t=e.children;return l.createElement(ii.Z,null,l.createElement(ie.default,{component:"th",scope:"row",colSpan:3},t))},gy=function(){return l.createElement(gv,null,"...")},gw=function(e){var t=e.children;return l.createElement(gv,null,t)},g_=function(e){var t=e.loading,n=e.toml,r=e.error,i=void 0===r?"":r,a=e.title,o=e.expanded;if(i)return l.createElement(gw,null,i);if(t)return l.createElement(gy,null);a||(a="TOML");var s={display:"block"};return l.createElement(x.default,null,l.createElement(mR.Z,{defaultExpanded:o},l.createElement(mj.Z,{expandIcon:l.createElement(gp.Z,null)},a),l.createElement(mF.Z,{style:s},l.createElement(gh,{language:"toml",style:gu},n))))},gE=function(){var e=ry(gg,{fetchPolicy:"cache-and-network"}),t=e.data,n=e.loading,r=e.error;return(null==t?void 0:t.configv2.effective)=="N/A"?l.createElement(l.Fragment,null,l.createElement(d.Z,{item:!0,xs:12},l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"TOML Configuration"}),l.createElement(g_,{title:"V2 config dump:",error:null==r?void 0:r.message,loading:n,toml:null==t?void 0:t.configv2.user,showHead:!0})))):l.createElement(l.Fragment,null,l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:12},l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"TOML Configuration"}),l.createElement(g_,{title:"User specified:",error:null==r?void 0:r.message,loading:n,toml:null==t?void 0:t.configv2.user,showHead:!0,expanded:!0}),l.createElement(g_,{title:"Effective (with defaults):",error:null==r?void 0:r.message,loading:n,toml:null==t?void 0:t.configv2.effective,showHead:!0})))))},gS=n(34823),gk=function(e){return(0,b.createStyles)({cell:{paddingTop:1.5*e.spacing.unit,paddingBottom:1.5*e.spacing.unit}})},gx=(0,b.withStyles)(gk)(function(e){var t=e.classes,n=(0,A.I0)();(0,l.useEffect)(function(){n((0,ty.DQ)())});var r=(0,A.v9)(gS.N,A.wU);return l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"Node"}),l.createElement(r8.Z,null,l.createElement(r7.Z,null,l.createElement(ii.Z,null,l.createElement(ie.default,{className:t.cell},l.createElement(x.default,null,"Version"),l.createElement(x.default,{variant:"subtitle1",color:"textSecondary"},r.version))),l.createElement(ii.Z,null,l.createElement(ie.default,{className:t.cell},l.createElement(x.default,null,"SHA"),l.createElement(x.default,{variant:"subtitle1",color:"textSecondary"},r.commitSHA))))))}),gT=function(){return l.createElement(iv,null,l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,sm:12,md:8},l.createElement(d.Z,{container:!0},l.createElement(gE,null))),l.createElement(d.Z,{item:!0,sm:12,md:4},l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:12},l.createElement(gx,null)),l.createElement(d.Z,{item:!0,xs:12},l.createElement(mP,null)),l.createElement(d.Z,{item:!0,xs:12},l.createElement(mS,null))))))},gM=function(){return l.createElement(gT,null)},gO=function(){return l.createElement(gM,null)},gA=n(44431),gL=1e18,gC=function(e){return new gA.BigNumber(e).dividedBy(gL).toFixed(8)},gI=function(e){var t=e.keys,n=e.chainID,r=e.hideHeaderTitle;return l.createElement(l.Fragment,null,l.createElement(sf.Z,{title:!r&&"Account Balances",subheader:"Chain ID "+n}),l.createElement(aK.Z,null,l.createElement(w.default,{dense:!1,disablePadding:!0},t&&t.map(function(e,r){return l.createElement(l.Fragment,null,l.createElement(_.default,{disableGutters:!0,key:["acc-balance",n.toString(),r.toString()].join("-")},l.createElement(E.Z,{primary:l.createElement(l.Fragment,null,l.createElement(d.Z,{container:!0,spacing:16},l.createElement(d.Z,{item:!0,xs:12},l.createElement(ob,{title:"Address"}),l.createElement(om,{value:e.address})),l.createElement(d.Z,{item:!0,xs:6},l.createElement(ob,{title:"Native Token Balance"}),l.createElement(om,{value:e.ethBalance||"--"})),l.createElement(d.Z,{item:!0,xs:6},l.createElement(ob,{title:"PLI Balance"}),l.createElement(om,{value:e.linkBalance?gC(e.linkBalance):"--"}))))})),r+1s&&l.createElement(gU.Z,null,l.createElement(ii.Z,null,l.createElement(ie.default,{className:r.footer},l.createElement(aL.Z,{href:"/runs",component:tz},"View More"))))))});function vn(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function vr(){var e=vn(["\n ","\n query FetchRecentJobRuns($offset: Int, $limit: Int) {\n jobRuns(offset: $offset, limit: $limit) {\n results {\n ...RecentJobRunsPayload_ResultsFields\n }\n metadata {\n total\n }\n }\n }\n"]);return vr=function(){return e},e}var vi=5,va=n0(vr(),g7),vo=function(){var e=ry(va,{variables:{offset:0,limit:vi},fetchPolicy:"cache-and-network"}),t=e.data,n=e.loading,r=e.error;return l.createElement(vt,{data:t,errorMsg:null==r?void 0:r.message,loading:n,maxRunsSize:vi})},vs=function(e){return(0,b.createStyles)({style:{textAlign:"center",padding:2.5*e.spacing.unit,position:"fixed",left:"0",bottom:"0",width:"100%",borderRadius:0},bareAnchor:{color:e.palette.common.black,textDecoration:"none"}})},vu=(0,b.withStyles)(vs)(function(e){var t=e.classes,n=(0,A.v9)(gS.N,A.wU),r=(0,A.I0)();return(0,l.useEffect)(function(){r((0,ty.DQ)())}),l.createElement(ia.default,{className:t.style},l.createElement(x.default,null,"Plugin Node ",n.version," at commit"," ",l.createElement("a",{target:"_blank",rel:"noopener noreferrer",href:"https://github.com/goplugin/pluginv3.0/commit/".concat(n.commitSHA),className:t.bareAnchor},n.commitSHA)))}),vc=function(e){return(0,b.createStyles)({cell:{borderColor:e.palette.divider,borderTop:"1px solid",borderBottom:"none",paddingTop:2*e.spacing.unit,paddingBottom:2*e.spacing.unit,paddingLeft:2*e.spacing.unit},block:{display:"block"},overflowEllipsis:{textOverflow:"ellipsis",overflow:"hidden"}})},vl=(0,b.withStyles)(vc)(function(e){var t=e.classes,n=e.job;return l.createElement(ii.Z,null,l.createElement(ie.default,{scope:"row",className:t.cell},l.createElement(d.Z,{container:!0,spacing:0},l.createElement(d.Z,{item:!0,xs:12},l.createElement(ip,{href:"/jobs/".concat(n.id),classes:{linkContent:t.block}},l.createElement(x.default,{className:t.overflowEllipsis,variant:"body1",component:"span",color:"primary"},n.name||n.id))),l.createElement(d.Z,{item:!0,xs:12},l.createElement(x.default,{variant:"body1",color:"textSecondary"},"Created ",l.createElement(aA,{tooltip:!0},n.createdAt))))))});function vf(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function vd(){var e=vf(["\n fragment RecentJobsPayload_ResultsFields on Job {\n id\n name\n createdAt\n }\n"]);return vd=function(){return e},e}var vh=n0(vd()),vp=function(){return(0,b.createStyles)({cardHeader:{borderBottom:0},table:{tableLayout:"fixed"}})},vb=(0,b.withStyles)(vp)(function(e){var t,n,r=e.classes,i=e.data,a=e.errorMsg,o=e.loading;return l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"Recent Jobs",className:r.cardHeader}),l.createElement(r8.Z,{className:r.table},l.createElement(r7.Z,null,l.createElement(gz,{visible:o}),l.createElement(gG,{visible:(null===(t=null==i?void 0:i.jobs.results)||void 0===t?void 0:t.length)===0},"No recently created jobs"),l.createElement(gH,{msg:a}),null===(n=null==i?void 0:i.jobs.results)||void 0===n?void 0:n.map(function(e,t){return l.createElement(vl,{job:e,key:t})}))))});function vm(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function vg(){var e=vm(["\n ","\n query FetchRecentJobs($offset: Int, $limit: Int) {\n jobs(offset: $offset, limit: $limit) {\n results {\n ...RecentJobsPayload_ResultsFields\n }\n }\n }\n"]);return vg=function(){return e},e}var vv=5,vy=n0(vg(),vh),vw=function(){var e=ry(vy,{variables:{offset:0,limit:vv},fetchPolicy:"cache-and-network"}),t=e.data,n=e.loading,r=e.error;return l.createElement(vb,{data:t,errorMsg:null==r?void 0:r.message,loading:n})},v_=function(){return l.createElement(iv,null,l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:8},l.createElement(vo,null)),l.createElement(d.Z,{item:!0,xs:4},l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:12},l.createElement(gB,null)),l.createElement(d.Z,{item:!0,xs:12},l.createElement(vw,null))))),l.createElement(vu,null))},vE=function(){return l.createElement(v_,null)},vS=function(){return l.createElement(vE,null)},vk=n(87239),vx=function(e){switch(e){case"DirectRequestSpec":return"Direct Request";case"FluxMonitorSpec":return"Flux Monitor";default:return e.replace(/Spec$/,"")}},vT=n(5022),vM=n(78718),vO=n.n(vM);function vA(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n1?t-1:0),r=1;r1?t-1:0),r=1;re.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&n.map(function(e){return l.createElement(ii.Z,{key:e.id,style:{cursor:"pointer"},onClick:function(){return r.push("/runs/".concat(e.id))}},l.createElement(ie.default,{className:t.idCell,scope:"row"},l.createElement("div",{className:t.runDetails},l.createElement(x.default,{variant:"h5",color:"primary",component:"span"},e.id))),l.createElement(ie.default,{className:t.stampCell},l.createElement(x.default,{variant:"body1",color:"textSecondary",className:t.stamp},"Created ",l.createElement(aA,{tooltip:!0},e.createdAt))),l.createElement(ie.default,{className:t.statusCell,scope:"row"},l.createElement(x.default,{variant:"body1",className:O()(t.status,yp(t,e.status))},e.status.toLowerCase())))})))}),ym=n(16839),yg=n.n(ym);function yv(e){var t=e.replace(/\w+\s*=\s*<([^>]|[\r\n])*>/g,""),n=yg().read(t),r=n.edges();return n.nodes().map(function(e){var t={id:e,parentIds:r.filter(function(t){return t.w===e}).map(function(e){return e.v})};return Object.keys(n.node(e)).length>0&&(t.attributes=n.node(e)),t})}var yy=n(94164),yw=function(e){var t=e.data,n=[];return(null==t?void 0:t.attributes)&&Object.keys(t.attributes).forEach(function(e){var r;n.push(l.createElement("div",{key:e},l.createElement(x.default,{variant:"body1",color:"textSecondary",component:"div"},l.createElement("b",null,e,":")," ",null===(r=t.attributes)||void 0===r?void 0:r[e])))}),l.createElement("div",null,t&&l.createElement(x.default,{variant:"body1",color:"textPrimary"},l.createElement("b",null,t.id)),n)},y_=n(73343),yE=n(3379),yS=n.n(yE);function yk(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);nwindow.innerWidth?u-r.getBoundingClientRect().width-a:u+a,n=c+r.getBoundingClientRect().height+i>window.innerHeight?c-r.getBoundingClientRect().height-a:c+a,r.style.opacity=String(1),r.style.top="".concat(n,"px"),r.style.left="".concat(t,"px"),r.style.zIndex=String(1)}},h=function(e){var t=document.getElementById("tooltip-d3-chart-".concat(e));t&&(t.style.opacity=String(0),t.style.zIndex=String(-1))};return l.createElement("div",{style:{fontFamily:"sans-serif",fontWeight:"normal"}},l.createElement(yy.kJ,{id:"task-list-graph-d3",data:i,config:s,onMouseOverNode:d,onMouseOutNode:h},"D3 chart"),n.map(function(e){return l.createElement("div",{key:"d3-tooltip-key-".concat(e.id),id:"tooltip-d3-chart-".concat(e.id),style:{position:"absolute",opacity:"0",border:"1px solid rgba(0, 0, 0, 0.1)",padding:y_.r.spacing.unit,background:"white",borderRadius:5,zIndex:-1,inlineSize:"min-content"}},l.createElement(yw,{data:e}))}))};function yC(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);nyB&&l.createElement("div",{className:t.runDetails},l.createElement(aL.Z,{href:"/jobs/".concat(n.id,"/runs"),component:tz},"View more")))),l.createElement(d.Z,{item:!0,xs:12,sm:6},l.createElement(yY,{observationSource:n.observationSource})))});function y$(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&void 0!==arguments[0]?arguments[0]:"";try{return vT.parse(e),!0}catch(t){return!1}})}),wK=function(e){var t=e.initialValues,n=e.onSubmit,r=e.onTOMLChange;return l.createElement(hM,{initialValues:t,validationSchema:wW,onSubmit:n},function(e){var t=e.isSubmitting,n=e.values;return r&&r(n.toml),l.createElement(hj,{"data-testid":"job-form",noValidate:!0},l.createElement(d.Z,{container:!0,spacing:16},l.createElement(d.Z,{item:!0,xs:12},l.createElement(hR,{component:hJ,id:"toml",name:"toml",label:"Job Spec (TOML)",required:!0,fullWidth:!0,multiline:!0,rows:10,rowsMax:25,variant:"outlined",autoComplete:"off",FormHelperTextProps:{"data-testid":"toml-helper-text"}})),l.createElement(d.Z,{item:!0,xs:12,md:7},l.createElement(ox.default,{variant:"contained",color:"primary",type:"submit",disabled:t,size:"large"},"Create Job"))))})},wV=n(50109),wq="persistSpec";function wZ(e){var t=e.query,n=new URLSearchParams(t).get("definition");return n?(wV.t8(wq,n),{toml:n}):{toml:wV.U2(wq)||""}}var wX=function(e){var t=e.onSubmit,n=e.onTOMLChange,r=wZ({query:(0,h.TH)().search}),i=function(e){var t=e.replace(/[\u200B-\u200D\uFEFF]/g,"");wV.t8("".concat(wq),t),n&&n(t)};return l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"New Job"}),l.createElement(aK.Z,null,l.createElement(wK,{initialValues:r,onSubmit:t,onTOMLChange:i})))};function wJ(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n1&&void 0!==arguments[1]?arguments[1]:{},n=t.start,r=void 0===n?6:n,i=t.end,a=void 0===i?4:i;return e.substring(0,r)+"..."+e.substring(e.length-a)}function _O(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&void 0!==arguments[0]?arguments[0]:{};return ry(_K,e)},_q=function(){var e=_V({fetchPolicy:"cache-and-network"}),t=e.data,n=e.loading,r=e.error,i=e.refetch;return l.createElement(_H,{loading:n,data:t,errorMsg:null==r?void 0:r.message,refetch:i})},_Z=function(e){var t=e.csaKey;return l.createElement(ii.Z,{hover:!0},l.createElement(ie.default,null,l.createElement(x.default,{variant:"body1"},t.publicKey," ",l.createElement(_T,{data:t.publicKey}))))};function _X(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function _J(){var e=_X(["\n fragment CSAKeysPayload_ResultsFields on CSAKey {\n id\n publicKey\n }\n"]);return _J=function(){return e},e}var _Q=n0(_J()),_1=function(e){var t,n,r,i=e.data,a=e.errorMsg,o=e.loading,s=e.onCreate;return l.createElement(r9.Z,null,l.createElement(sf.Z,{action:(null===(t=null==i?void 0:i.csaKeys.results)||void 0===t?void 0:t.length)===0&&l.createElement(ox.default,{variant:"outlined",color:"primary",onClick:s},"New CSA Key"),title:"CSA Key",subheader:"Manage your CSA Key"}),l.createElement(r8.Z,null,l.createElement(it.Z,null,l.createElement(ii.Z,null,l.createElement(ie.default,null,"Public Key"))),l.createElement(r7.Z,null,l.createElement(gz,{visible:o}),l.createElement(gG,{visible:(null===(n=null==i?void 0:i.csaKeys.results)||void 0===n?void 0:n.length)===0}),l.createElement(gH,{msg:a}),null===(r=null==i?void 0:i.csaKeys.results)||void 0===r?void 0:r.map(function(e,t){return l.createElement(_Z,{csaKey:e,key:t})}))))};function _0(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&void 0!==arguments[0]?arguments[0]:{};return ry(EO,e)};function EL(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&void 0!==arguments[0]?arguments[0]:{};return ry(EQ,e)},E4=function(){return os(E1)},E5=function(){return os(E0)},E6=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return ry(E2,e)};function E9(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&void 0!==arguments[0]?arguments[0]:{};return ry(SV,e)};function SZ(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);n=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}function kq(e,t){if(null==e)return{};var n,r,i={},a=Object.keys(e);for(r=0;r=0||(i[n]=e[n]);return i}var kZ=function(e){var t=e.run,n=l.useMemo(function(){var e=t.inputs,n=t.outputs,r=t.taskRuns,i=kV(t,["inputs","outputs","taskRuns"]),a={};try{a=JSON.parse(e)}catch(o){a={}}return kK(kG({},i),{inputs:a,outputs:n,taskRuns:r})},[t]);return l.createElement(r9.Z,null,l.createElement(aK.Z,null,l.createElement(k$,{object:n})))};function kX(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function kJ(e){for(var t=1;t0&&l.createElement(ki,{errors:t.allErrors})),l.createElement(d.Z,{item:!0,xs:12},l.createElement(h.rs,null,l.createElement(h.AW,{path:"".concat(n,"/json")},l.createElement(kZ,{run:t})),l.createElement(h.AW,{path:n},t.taskRuns.length>0&&l.createElement(kP,{taskRuns:t.taskRuns,observationSource:t.job.observationSource}))))))))};function k9(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function k8(){var e=k9(["\n ","\n query FetchJobRun($id: ID!) {\n jobRun(id: $id) {\n __typename\n ... on JobRun {\n ...JobRunPayload_Fields\n }\n ... on NotFoundError {\n message\n }\n }\n }\n"]);return k8=function(){return e},e}var k7=n0(k8(),k5),xe=function(){var e=ry(k7,{variables:{id:(0,h.UO)().id}}),t=e.data,n=e.loading,r=e.error;if(n)return l.createElement(ij,null);if(r)return l.createElement(iN,{error:r});var i=null==t?void 0:t.jobRun;switch(null==i?void 0:i.__typename){case"JobRun":return l.createElement(k6,{run:i});case"NotFoundError":return l.createElement(oo,null);default:return null}};function xt(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function xn(){var e=xt(["\n fragment JobRunsPayload_ResultsFields on JobRun {\n id\n allErrors\n createdAt\n finishedAt\n status\n job {\n id\n }\n }\n"]);return xn=function(){return e},e}var xr=n0(xn()),xi=function(e){var t=e.loading,n=e.data,r=e.page,i=e.pageSize,a=(0,h.k6)(),o=l.useMemo(function(){return null==n?void 0:n.jobRuns.results.map(function(e){var t,n=e.allErrors,r=e.id,i=e.createdAt;return{id:r,createdAt:i,errors:n,finishedAt:e.finishedAt,status:e.status}})},[n]);return l.createElement(iv,null,l.createElement(d.Z,{container:!0,spacing:32},l.createElement(d.Z,{item:!0,xs:12},l.createElement(iw,null,"Job Runs")),t&&l.createElement(ij,null),n&&o&&l.createElement(d.Z,{item:!0,xs:12},l.createElement(r9.Z,null,l.createElement(yb,{runs:o}),l.createElement(ir.Z,{component:"div",count:n.jobRuns.metadata.total,rowsPerPage:i,rowsPerPageOptions:[i],page:r-1,onChangePage:function(e,t){a.push("/runs?page=".concat(t+1,"&per=").concat(i))},onChangeRowsPerPage:function(){},backIconButtonProps:{"aria-label":"prev-page"},nextIconButtonProps:{"aria-label":"next-page"}})))))};function xa(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function xo(){var e=xa(["\n ","\n query FetchJobRuns($offset: Int, $limit: Int) {\n jobRuns(offset: $offset, limit: $limit) {\n results {\n ...JobRunsPayload_ResultsFields\n }\n metadata {\n total\n }\n }\n }\n"]);return xo=function(){return e},e}var xs=n0(xo(),xr),xu=function(){var e=iF(),t=parseInt(e.get("page")||"1",10),n=parseInt(e.get("per")||"25",10),r=ry(xs,{variables:{offset:(t-1)*n,limit:n},fetchPolicy:"cache-and-network"}),i=r.data,a=r.loading,o=r.error;return o?l.createElement(iN,{error:o}):l.createElement(xi,{loading:a,data:i,page:t,pageSize:n})},xc=function(){var e=(0,h.$B)().path;return l.createElement(h.rs,null,l.createElement(h.AW,{exact:!0,path:e},l.createElement(xu,null)),l.createElement(h.AW,{path:"".concat(e,"/:id")},l.createElement(xe,null)))},xl=by().shape({name:p2().required("Required"),uri:p2().required("Required"),publicKey:p2().required("Required")}),xf=function(e){var t=e.initialValues,n=e.onSubmit;return l.createElement(hM,{initialValues:t,validationSchema:xl,onSubmit:n},function(e){var t=e.isSubmitting,n=e.submitForm;return l.createElement(hj,{"data-testid":"feeds-manager-form"},l.createElement(d.Z,{container:!0,spacing:16},l.createElement(d.Z,{item:!0,xs:12,md:6},l.createElement(hR,{component:hJ,id:"name",name:"name",label:"Name",required:!0,fullWidth:!0,FormHelperTextProps:{"data-testid":"name-helper-text"}})),l.createElement(d.Z,{item:!0,xs:!1,md:6}),l.createElement(d.Z,{item:!0,xs:12,md:6},l.createElement(hR,{component:hJ,id:"uri",name:"uri",label:"URI",required:!0,fullWidth:!0,helperText:"Provided by the Feeds Manager operator",FormHelperTextProps:{"data-testid":"uri-helper-text"}})),l.createElement(d.Z,{item:!0,xs:12,md:6},l.createElement(hR,{component:hJ,id:"publicKey",name:"publicKey",label:"Public Key",required:!0,fullWidth:!0,helperText:"Provided by the Feeds Manager operator",FormHelperTextProps:{"data-testid":"publicKey-helper-text"}})),l.createElement(d.Z,{item:!0,xs:12},l.createElement(ox.default,{variant:"contained",color:"primary",disabled:t,onClick:n},"Submit"))))})},xd=function(e){var t=e.data,n=e.onSubmit,r={name:t.name,uri:t.uri,publicKey:t.publicKey};return l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:12,md:11,lg:9},l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"Edit Feeds Manager"}),l.createElement(aK.Z,null,l.createElement(xf,{initialValues:r,onSubmit:n})))))};function xh(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function xp(){var e=xh(["\n query FetchFeedsManagers {\n feedsManagers {\n results {\n __typename\n id\n name\n uri\n publicKey\n isConnectionActive\n createdAt\n }\n }\n }\n"]);return xp=function(){return e},e}var xb=n0(xp()),xm=function(){return ry(xb)};function xg(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&void 0!==arguments[0]?arguments[0]:{};return ry(xZ,e)};function xJ(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);n0?n.feedsManagers.results[0]:void 0;return n&&a?l.createElement(TH,{manager:a}):l.createElement(h.l_,{to:{pathname:"/feeds_manager/new",state:{from:e}}})},Tz={name:"Plugin Feeds Manager",uri:"",publicKey:""},TG=function(e){var t=e.onSubmit;return l.createElement(d.Z,{container:!0},l.createElement(d.Z,{item:!0,xs:12,md:11,lg:9},l.createElement(r9.Z,null,l.createElement(sf.Z,{title:"Register Feeds Manager"}),l.createElement(aK.Z,null,l.createElement(xf,{initialValues:Tz,onSubmit:t})))))};function TW(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);nt.version?e:t})},[o]),g=l.useMemo(function(){return Mp(o).sort(function(e,t){return t.version-e.version})},[o]),v=function(e,t,n){switch(e){case"PENDING":return l.createElement(l.Fragment,null,l.createElement(ox.default,{variant:"text",color:"secondary",onClick:function(){return b("reject",t)}},"Reject"),m.id===t&&"DELETED"!==n.status&&"REVOKED"!==n.status&&l.createElement(ox.default,{variant:"contained",color:"primary",onClick:function(){return b("approve",t)}},"Approve"),m.id===t&&"DELETED"===n.status&&n.pendingUpdate&&l.createElement(l.Fragment,null,l.createElement(ox.default,{variant:"contained",color:"primary",onClick:function(){return b("cancel",t)}},"Cancel"),l.createElement(x.default,{color:"error"},"This proposal was deleted. Cancel the spec to delete any running jobs")));case"APPROVED":return l.createElement(l.Fragment,null,l.createElement(ox.default,{variant:"contained",onClick:function(){return b("cancel",t)}},"Cancel"),"DELETED"===n.status&&n.pendingUpdate&&l.createElement(x.default,{color:"error"},"This proposal was deleted. Cancel the spec to delete any running jobs"));case"CANCELLED":if(m.id===t&&"DELETED"!==n.status&&"REVOKED"!==n.status)return l.createElement(ox.default,{variant:"contained",color:"primary",onClick:function(){return b("approve",t)}},"Approve");return null;default:return null}};return l.createElement("div",null,g.map(function(e,n){return l.createElement(mR.Z,{defaultExpanded:0===n,key:n},l.createElement(mj.Z,{expandIcon:l.createElement(gp.Z,null)},l.createElement(x.default,{className:t.versionText},"Version ",e.version),l.createElement(Eu.Z,{label:e.status,color:"APPROVED"===e.status?"primary":"default",variant:"REJECTED"===e.status||"CANCELLED"===e.status?"outlined":"default"}),l.createElement("div",{className:t.proposedAtContainer},l.createElement(x.default,null,"Proposed ",l.createElement(aA,{tooltip:!0},e.createdAt)))),l.createElement(mF.Z,{className:t.expansionPanelDetails},l.createElement("div",{className:t.actions},l.createElement("div",{className:t.editContainer},0===n&&("PENDING"===e.status||"CANCELLED"===e.status)&&"DELETED"!==s.status&&"REVOKED"!==s.status&&l.createElement(ox.default,{variant:"contained",onClick:function(){return p(!0)}},"Edit")),l.createElement("div",{className:t.actionsContainer},v(e.status,e.id,s))),l.createElement(gh,{language:"toml",style:gu,"data-testid":"codeblock"},e.definition)))}),l.createElement(oI,{open:null!=c,title:c?My[c.action].title:"",body:c?My[c.action].body:"",onConfirm:function(){if(c){switch(c.action){case"approve":n(c.id);break;case"cancel":r(c.id);break;case"reject":i(c.id)}f(null)}},cancelButtonText:"Cancel",onCancel:function(){return f(null)}}),l.createElement(Mi,{open:h,onClose:function(){return p(!1)},initialValues:{definition:m.definition,id:m.id},onSubmit:a}))});function M_(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function ME(){var e=M_(["\n ","\n fragment JobProposalPayloadFields on JobProposal {\n id\n externalJobID\n remoteUUID\n jobID\n specs {\n ...JobProposal_SpecsFields\n }\n status\n pendingUpdate\n }\n"]);return ME=function(){return e},e}var MS=n0(ME(),Mg),Mk=function(e){var t=e.onApprove,n=e.onCancel,r=e.onReject,i=e.onUpdateSpec,a=e.proposal;return l.createElement(iv,null,l.createElement(d.Z,{container:!0,spacing:32},l.createElement(d.Z,{item:!0,xs:9},l.createElement(iw,null,"Job Proposal #",a.id))),l.createElement(T8,{proposal:a}),l.createElement(d.Z,{container:!0,spacing:32},l.createElement(d.Z,{item:!0,xs:9},l.createElement(TU,null,"Specs"))),l.createElement(d.Z,{container:!0,spacing:32},l.createElement(d.Z,{item:!0,xs:12},l.createElement(Mw,{proposal:a,specs:a.specs,onReject:r,onApprove:t,onCancel:n,onUpdateSpec:i}))))};function Mx(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]e.length)&&(t=e.length);for(var n=0,r=Array(t);ne.length)&&(t=e.length);for(var n=0,r=Array(t);nU,tA:()=>$,KL:()=>H,Iw:()=>V,DQ:()=>W,cB:()=>T,LO:()=>M,t5:()=>k,qt:()=>x,Jc:()=>C,L7:()=>Y,EO:()=>B});var r,i,a=n(66289),o=n(41800),s=n.n(o),u=n(67932);(i=r||(r={})).IN_PROGRESS="in_progress",i.PENDING_INCOMING_CONFIRMATIONS="pending_incoming_confirmations",i.PENDING_CONNECTION="pending_connection",i.PENDING_BRIDGE="pending_bridge",i.PENDING_SLEEP="pending_sleep",i.ERRORED="errored",i.COMPLETED="completed";var c=n(87013),l=n(19084),f=n(34823);function d(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]j,v2:()=>F});var r=n(66289);function i(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var a="/sessions",o="/sessions",s=function e(t){var n=this;i(this,e),this.api=t,this.createSession=function(e){return n.create(e)},this.destroySession=function(){return n.destroy()},this.create=this.api.createResource(a),this.destroy=this.api.deleteResource(o)};function u(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var c="/v2/bulk_delete_runs",l=function e(t){var n=this;u(this,e),this.api=t,this.bulkDeleteJobRuns=function(e){return n.destroy(e)},this.destroy=this.api.deleteResource(c)};function f(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var d="/v2/chains/evm",h="".concat(d,"/:id"),p=function e(t){var n=this;f(this,e),this.api=t,this.getChains=function(){return n.index()},this.createChain=function(e){return n.create(e)},this.destroyChain=function(e){return n.destroy(void 0,{id:e})},this.updateChain=function(e,t){return n.update(t,{id:e})},this.index=this.api.fetchResource(d),this.create=this.api.createResource(d),this.destroy=this.api.deleteResource(h),this.update=this.api.updateResource(h)};function b(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var m="/v2/keys/evm/chain",g=function e(t){var n=this;b(this,e),this.api=t,this.chain=function(e){var t=new URLSearchParams;t.append("address",e.address),t.append("evmChainID",e.evmChainID),null!==e.nextNonce&&t.append("nextNonce",e.nextNonce),null!==e.abandon&&t.append("abandon",String(e.abandon)),null!==e.enabled&&t.append("enabled",String(e.enabled));var r=m+"?"+t.toString();return n.api.createResource(r)()}};function v(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var y="/v2/jobs",w="".concat(y,"/:specId/runs"),_=function e(t){var n=this;v(this,e),this.api=t,this.createJobRunV2=function(e,t){return n.post(t,{specId:e})},this.post=this.api.createResource(w,!0)};function E(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var S="/v2/log",k=function e(t){var n=this;E(this,e),this.api=t,this.getLogConfig=function(){return n.show()},this.updateLogConfig=function(e){return n.update(e)},this.show=this.api.fetchResource(S),this.update=this.api.updateResource(S)};function x(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var T="/v2/nodes",M=function e(t){var n=this;x(this,e),this.api=t,this.getNodes=function(){return n.index()},this.createNode=function(e){return n.create(e)},this.index=this.api.fetchResource(T),this.create=this.api.createResource(T)};function O(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var A="/v2/enroll_webauthn",L=function e(t){var n=this;O(this,e),this.api=t,this.beginKeyRegistration=function(e){return n.create(e)},this.finishKeyRegistration=function(e){return n.put(e)},this.create=this.api.fetchResource(A),this.put=this.api.createResource(A)};function C(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var I="/v2/build_info",D=function e(t){var n=this;C(this,e),this.api=t,this.show=function(){return n.api.GET(I)()}};function N(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}var P=function e(t){N(this,e),this.api=t,this.buildInfo=new D(this.api),this.bulkDeleteRuns=new l(this.api),this.chains=new p(this.api),this.logConfig=new k(this.api),this.nodes=new M(this.api),this.jobs=new _(this.api),this.webauthn=new L(this.api),this.evmKeys=new g(this.api)},R=new r.V0({base:void 0}),j=new s(R),F=new P(R)},1398(e,t,n){"use strict";n.d(t,{Z:()=>d});var r=n(67294),i=n(32316),a=n(83638),o=n(94184),s=n.n(o);function u(){return(u=Object.assign||function(e){for(var t=1;tc});var r=n(67294),i=n(32316);function a(){return(a=Object.assign||function(e){for(var t=1;tx,jK:()=>v});var r=n(67294),i=n(55977),a=n(45697),o=n.n(a),s=n(82204),u=n(71426),c=n(94184),l=n.n(c),f=n(32316),d=function(e){var t=e.palette.success||{},n=e.palette.warning||{};return{base:{paddingLeft:5*e.spacing.unit,paddingRight:5*e.spacing.unit},success:{backgroundColor:t.main,color:t.contrastText},error:{backgroundColor:e.palette.error.dark,color:e.palette.error.contrastText},warning:{backgroundColor:n.contrastText,color:n.main}}},h=function(e){var t,n=e.success,r=e.error,i=e.warning,a=e.classes,o=e.className;return n?t=a.success:r?t=a.error:i&&(t=a.warning),l()(a.base,o,t)},p=function(e){return r.createElement(s.Z,{className:h(e),square:!0},r.createElement(u.default,{variant:"body2",color:"inherit",component:"div"},e.children))};p.defaultProps={success:!1,error:!1,warning:!1},p.propTypes={success:o().bool,error:o().bool,warning:o().bool};let b=(0,f.withStyles)(d)(p);var m=function(){return r.createElement(r.Fragment,null,"Unhandled error. Please help us by opening a"," ",r.createElement("a",{href:"https://github.com/goplugin/pluginv3.0/issues/new"},"bug report"))};let g=m;function v(e){return"string"==typeof e?e:e.component?e.component(e.props):r.createElement(g,null)}function y(e,t){var n;return n="string"==typeof e?e:e.component?e.component(e.props):r.createElement(g,null),r.createElement("p",{key:t},n)}var w=function(e){var t=e.notifications;return r.createElement(b,{error:!0},t.map(y))},_=function(e){var t=e.notifications;return r.createElement(b,{success:!0},t.map(y))},E=function(e){var t=e.errors,n=e.successes;return r.createElement("div",null,(null==t?void 0:t.length)>0&&r.createElement(w,{notifications:t}),n.length>0&&r.createElement(_,{notifications:n}))},S=function(e){return{errors:e.notifications.errors,successes:e.notifications.successes}},k=(0,i.$j)(S)(E);let x=k},9409(e,t,n){"use strict";n.d(t,{ZP:()=>j});var r=n(67294),i=n(55977),a=n(47886),o=n(32316),s=n(1398),u=n(82204),c=n(30060),l=n(71426),f=n(60520),d=n(97779),h=n(57209),p=n(26842),b=n(3950),m=n(5536),g=n(45697),v=n.n(g);let y=n.p+"9f6d832ef97e8493764e.svg";function w(){return(w=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&_.map(function(e,t){return r.createElement(d.Z,{item:!0,xs:12,key:t},r.createElement(u.Z,{raised:!1,className:v.error},r.createElement(c.Z,null,r.createElement(l.default,{variant:"body1",className:v.errorText},(0,b.jK)(e)))))}),r.createElement(d.Z,{item:!0,xs:12},r.createElement(f.Z,{id:"email",label:"Email",margin:"normal",value:n,onChange:m("email"),error:_.length>0,variant:"outlined",fullWidth:!0})),r.createElement(d.Z,{item:!0,xs:12},r.createElement(f.Z,{id:"password",label:"Password",type:"password",autoComplete:"password",margin:"normal",value:h,onChange:m("password"),error:_.length>0,variant:"outlined",fullWidth:!0})),r.createElement(d.Z,{item:!0,xs:12},r.createElement(d.Z,{container:!0,spacing:0,justify:"center"},r.createElement(d.Z,{item:!0},r.createElement(s.Z,{type:"submit",variant:"primary"},"Access Account")))),y&&r.createElement(l.default,{variant:"body1",color:"textSecondary"},"Signing in...")))))))},P=function(e){return{fetching:e.authentication.fetching,authenticated:e.authentication.allowed,errors:e.notifications.errors}},R=(0,i.$j)(P,x({submitSignIn:p.L7}))(N);let j=(0,h.wU)(e)((0,o.withStyles)(D)(R))},16353(e,t,n){"use strict";n.d(t,{ZP:()=>H,rH:()=>U});var r,i=n(55977),a=n(15857),o=n(9541),s=n(19084);function u(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function c(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:h,t=arguments.length>1?arguments[1]:void 0;switch(t.type){case s.Mk.RECEIVE_SIGNOUT_SUCCESS:case s.Mk.RECEIVE_SIGNIN_SUCCESS:var n={allowed:t.authenticated};return o.Ks(n),f(c({},e,n),{errors:[]});case s.Mk.RECEIVE_SIGNIN_FAIL:var r={allowed:!1};return o.Ks(r),f(c({},e,r),{errors:[]});case s.Mk.RECEIVE_SIGNIN_ERROR:case s.Mk.RECEIVE_SIGNOUT_ERROR:var i={allowed:!1};return o.Ks(i),f(c({},e,i),{errors:t.errors||[]});default:return e}};let b=p;function m(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function g(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:_,t=arguments.length>1?arguments[1]:void 0;return t.type?t.type.startsWith(r.REQUEST)?y(g({},e),{count:e.count+1}):t.type.startsWith(r.RECEIVE)?y(g({},e),{count:Math.max(e.count-1,0)}):t.type.startsWith(r.RESPONSE)?y(g({},e),{count:Math.max(e.count-1,0)}):t.type===s.di.REDIRECT?y(g({},e),{count:0}):e:e};let S=E;function k(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function x(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:O,t=arguments.length>1?arguments[1]:void 0;switch(t.type){case s.di.MATCH_ROUTE:return M(x({},O),{currentUrl:t.pathname});case s.Ih.NOTIFY_SUCCESS:var n={component:t.component,props:t.props};return M(x({},e),{successes:[n],errors:[]});case s.Ih.NOTIFY_SUCCESS_MSG:return M(x({},e),{successes:[t.msg],errors:[]});case s.Ih.NOTIFY_ERROR:var r=t.error.errors,i=null==r?void 0:r.map(function(e){return L(t,e)});return M(x({},e),{successes:[],errors:i});case s.Ih.NOTIFY_ERROR_MSG:return M(x({},e),{successes:[],errors:[t.msg]});case s.Mk.RECEIVE_SIGNIN_FAIL:return M(x({},e),{successes:[],errors:["Your email or password is incorrect. Please try again"]});default:return e}};function L(e,t){return{component:e.component,props:{msg:t.detail}}}let C=A;function I(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function D(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:R,t=arguments.length>1?arguments[1]:void 0;switch(t.type){case s.di.REDIRECT:return P(D({},e),{to:t.to});case s.di.MATCH_ROUTE:return P(D({},e),{to:void 0});default:return e}};let F=j;var Y=n(87013),B=(0,a.UY)({authentication:b,fetching:S,notifications:C,redirect:F,buildInfo:Y.Z});B(void 0,{type:"INITIAL_STATE"});var U=i.v9;let H=B},19084(e,t,n){"use strict";var r,i,a,o,s,u,c,l,f,d;n.d(t,{Ih:()=>i,Mk:()=>a,Y0:()=>s,di:()=>r,jp:()=>o}),n(67294),(u=r||(r={})).REDIRECT="REDIRECT",u.MATCH_ROUTE="MATCH_ROUTE",(c=i||(i={})).NOTIFY_SUCCESS="NOTIFY_SUCCESS",c.NOTIFY_SUCCESS_MSG="NOTIFY_SUCCESS_MSG",c.NOTIFY_ERROR="NOTIFY_ERROR",c.NOTIFY_ERROR_MSG="NOTIFY_ERROR_MSG",(l=a||(a={})).REQUEST_SIGNIN="REQUEST_SIGNIN",l.RECEIVE_SIGNIN_SUCCESS="RECEIVE_SIGNIN_SUCCESS",l.RECEIVE_SIGNIN_FAIL="RECEIVE_SIGNIN_FAIL",l.RECEIVE_SIGNIN_ERROR="RECEIVE_SIGNIN_ERROR",l.RECEIVE_SIGNOUT_SUCCESS="RECEIVE_SIGNOUT_SUCCESS",l.RECEIVE_SIGNOUT_ERROR="RECEIVE_SIGNOUT_ERROR",(f=o||(o={})).RECEIVE_CREATE_ERROR="RECEIVE_CREATE_ERROR",f.RECEIVE_CREATE_SUCCESS="RECEIVE_CREATE_SUCCESS",f.RECEIVE_DELETE_ERROR="RECEIVE_DELETE_ERROR",f.RECEIVE_DELETE_SUCCESS="RECEIVE_DELETE_SUCCESS",f.RECEIVE_UPDATE_ERROR="RECEIVE_UPDATE_ERROR",f.RECEIVE_UPDATE_SUCCESS="RECEIVE_UPDATE_SUCCESS",f.REQUEST_CREATE="REQUEST_CREATE",f.REQUEST_DELETE="REQUEST_DELETE",f.REQUEST_UPDATE="REQUEST_UPDATE",f.UPSERT_CONFIGURATION="UPSERT_CONFIGURATION",f.UPSERT_JOB_RUN="UPSERT_JOB_RUN",f.UPSERT_JOB_RUNS="UPSERT_JOB_RUNS",f.UPSERT_TRANSACTION="UPSERT_TRANSACTION",f.UPSERT_TRANSACTIONS="UPSERT_TRANSACTIONS",f.UPSERT_BUILD_INFO="UPSERT_BUILD_INFO",(d=s||(s={})).FETCH_BUILD_INFO_REQUESTED="FETCH_BUILD_INFO_REQUESTED",d.FETCH_BUILD_INFO_SUCCEEDED="FETCH_BUILD_INFO_SUCCEEDED",d.FETCH_BUILD_INFO_FAILED="FETCH_BUILD_INFO_FAILED"},87013(e,t,n){"use strict";n.d(t,{Y:()=>o,Z:()=>u});var r=n(19084);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function a(e){for(var t=1;t0&&void 0!==arguments[0]?arguments[0]:o,t=arguments.length>1?arguments[1]:void 0;return t.type===r.Y0.FETCH_BUILD_INFO_SUCCEEDED?a({},t.buildInfo):e};let u=s},34823(e,t,n){"use strict";n.d(t,{N:()=>r});var r=function(e){return e.buildInfo}},73343(e,t,n){"use strict";n.d(t,{r:()=>u});var r=n(19350),i=n(32316),a=n(59114),o=n(5324),s={props:{MuiGrid:{spacing:3*o.default.unit},MuiCardHeader:{titleTypographyProps:{color:"secondary"}}},palette:{action:{hoverOpacity:.3},primary:{light:"#E5F1FF",main:"#3c40c6",contrastText:"#fff"},secondary:{main:"#3d5170"},success:{light:"#e8faf1",main:r.ek.A700,dark:r.ek[700],contrastText:r.y0.white},warning:{light:"#FFFBF1",main:"#fff6b6",contrastText:"#fad27a"},error:{light:"#ffdada",main:"#f44336",dark:"#d32f2f",contrastText:"#fff"},background:{default:"#f5f6f8",appBar:"#3c40c6"},text:{primary:(0,a.darken)(r.BA.A700,.7),secondary:"#818ea3"},listPendingStatus:{background:"#fef7e5",color:"#fecb4c"},listCompletedStatus:{background:"#e9faf2",color:"#4ed495"}},shape:{borderRadius:o.default.unit},overrides:{MuiButton:{root:{borderRadius:o.default.unit/2,textTransform:"none"},sizeLarge:{padding:void 0,fontSize:void 0,paddingTop:o.default.unit,paddingBottom:o.default.unit,paddingLeft:5*o.default.unit,paddingRight:5*o.default.unit}},MuiTableCell:{body:{fontSize:"1rem"},head:{fontSize:"1rem",fontWeight:400}},MuiCardHeader:{root:{borderBottom:"1px solid rgba(0, 0, 0, 0.12)"},action:{marginTop:-2,marginRight:0,"& >*":{marginLeft:2*o.default.unit}},subheader:{marginTop:.5*o.default.unit}}},typography:{useNextVariants:!0,fontFamily:"-apple-system,BlinkMacSystemFont,Roboto,Helvetica,Arial,sans-serif",button:{textTransform:"none",fontSize:"1.2em"},body1:{fontSize:"1.0rem",fontWeight:400,lineHeight:"1.46429em",color:"rgba(0, 0, 0, 0.87)",letterSpacing:-.4},body2:{fontSize:"1.0rem",fontWeight:500,lineHeight:"1.71429em",color:"rgba(0, 0, 0, 0.87)",letterSpacing:-.4},body1Next:{color:"rgb(29, 29, 29)",fontWeight:400,fontSize:"1rem",lineHeight:1.5,letterSpacing:-.4},body2Next:{color:"rgb(29, 29, 29)",fontWeight:400,fontSize:"0.875rem",lineHeight:1.5,letterSpacing:-.4},display1:{color:"#818ea3",fontSize:"2.125rem",fontWeight:400,lineHeight:"1.20588em",letterSpacing:-.4},display2:{color:"#818ea3",fontSize:"2.8125rem",fontWeight:400,lineHeight:"1.13333em",marginLeft:"-.02em",letterSpacing:-.4},display3:{color:"#818ea3",fontSize:"3.5rem",fontWeight:400,lineHeight:"1.30357em",marginLeft:"-.02em",letterSpacing:-.4},display4:{fontSize:14,fontWeightLight:300,fontWeightMedium:500,fontWeightRegular:400,letterSpacing:-.4},h1:{color:"rgb(29, 29, 29)",fontSize:"6rem",fontWeight:300,lineHeight:1},h2:{color:"rgb(29, 29, 29)",fontSize:"3.75rem",fontWeight:300,lineHeight:1},h3:{color:"rgb(29, 29, 29)",fontSize:"3rem",fontWeight:400,lineHeight:1.04},h4:{color:"rgb(29, 29, 29)",fontSize:"2.125rem",fontWeight:400,lineHeight:1.17},h5:{color:"rgb(29, 29, 29)",fontSize:"1.5rem",fontWeight:400,lineHeight:1.33,letterSpacing:-.4},h6:{fontSize:"0.8rem",fontWeight:450,lineHeight:"1.71429em",color:"rgba(0, 0, 0, 0.87)",letterSpacing:-.4},subheading:{color:"rgb(29, 29, 29)",fontSize:"1rem",fontWeight:400,lineHeight:"1.5em",letterSpacing:-.4},subtitle1:{color:"rgb(29, 29, 29)",fontSize:"1rem",fontWeight:400,lineHeight:1.75,letterSpacing:-.4},subtitle2:{color:"rgb(29, 29, 29)",fontSize:"0.875rem",fontWeight:500,lineHeight:1.57,letterSpacing:-.4}},shadows:["none","0px 1px 3px 0px rgba(0, 0, 0, 0.1),0px 1px 1px 0px rgba(0, 0, 0, 0.04),0px 2px 1px -1px rgba(0, 0, 0, 0.02)","0px 1px 5px 0px rgba(0, 0, 0, 0.1),0px 2px 2px 0px rgba(0, 0, 0, 0.04),0px 3px 1px -2px rgba(0, 0, 0, 0.02)","0px 1px 8px 0px rgba(0, 0, 0, 0.1),0px 3px 4px 0px rgba(0, 0, 0, 0.04),0px 3px 3px -2px rgba(0, 0, 0, 0.02)","0px 2px 4px -1px rgba(0, 0, 0, 0.1),0px 4px 5px 0px rgba(0, 0, 0, 0.04),0px 1px 10px 0px rgba(0, 0, 0, 0.02)","0px 3px 5px -1px rgba(0, 0, 0, 0.1),0px 5px 8px 0px rgba(0, 0, 0, 0.04),0px 1px 14px 0px rgba(0, 0, 0, 0.02)","0px 3px 5px -1px rgba(0, 0, 0, 0.1),0px 6px 10px 0px rgba(0, 0, 0, 0.04),0px 1px 18px 0px rgba(0, 0, 0, 0.02)","0px 4px 5px -2px rgba(0, 0, 0, 0.1),0px 7px 10px 1px rgba(0, 0, 0, 0.04),0px 2px 16px 1px rgba(0, 0, 0, 0.02)","0px 5px 5px -3px rgba(0, 0, 0, 0.1),0px 8px 10px 1px rgba(0, 0, 0, 0.04),0px 3px 14px 2px rgba(0, 0, 0, 0.02)","0px 5px 6px -3px rgba(0, 0, 0, 0.1),0px 9px 12px 1px rgba(0, 0, 0, 0.04),0px 3px 16px 2px rgba(0, 0, 0, 0.02)","0px 6px 6px -3px rgba(0, 0, 0, 0.1),0px 10px 14px 1px rgba(0, 0, 0, 0.04),0px 4px 18px 3px rgba(0, 0, 0, 0.02)","0px 6px 7px -4px rgba(0, 0, 0, 0.1),0px 11px 15px 1px rgba(0, 0, 0, 0.04),0px 4px 20px 3px rgba(0, 0, 0, 0.02)","0px 7px 8px -4px rgba(0, 0, 0, 0.1),0px 12px 17px 2px rgba(0, 0, 0, 0.04),0px 5px 22px 4px rgba(0, 0, 0, 0.02)","0px 7px 8px -4px rgba(0, 0, 0, 0.1),0px 13px 19px 2px rgba(0, 0, 0, 0.04),0px 5px 24px 4px rgba(0, 0, 0, 0.02)","0px 7px 9px -4px rgba(0, 0, 0, 0.1),0px 14px 21px 2px rgba(0, 0, 0, 0.04),0px 5px 26px 4px rgba(0, 0, 0, 0.02)","0px 8px 9px -5px rgba(0, 0, 0, 0.1),0px 15px 22px 2px rgba(0, 0, 0, 0.04),0px 6px 28px 5px rgba(0, 0, 0, 0.02)","0px 8px 10px -5px rgba(0, 0, 0, 0.1),0px 16px 24px 2px rgba(0, 0, 0, 0.04),0px 6px 30px 5px rgba(0, 0, 0, 0.02)","0px 8px 11px -5px rgba(0, 0, 0, 0.1),0px 17px 26px 2px rgba(0, 0, 0, 0.04),0px 6px 32px 5px rgba(0, 0, 0, 0.02)","0px 9px 11px -5px rgba(0, 0, 0, 0.1),0px 18px 28px 2px rgba(0, 0, 0, 0.04),0px 7px 34px 6px rgba(0, 0, 0, 0.02)","0px 9px 12px -6px rgba(0, 0, 0, 0.1),0px 19px 29px 2px rgba(0, 0, 0, 0.04),0px 7px 36px 6px rgba(0, 0, 0, 0.02)","0px 10px 13px -6px rgba(0, 0, 0, 0.1),0px 20px 31px 3px rgba(0, 0, 0, 0.04),0px 8px 38px 7px rgba(0, 0, 0, 0.02)","0px 10px 13px -6px rgba(0, 0, 0, 0.1),0px 21px 33px 3px rgba(0, 0, 0, 0.04),0px 8px 40px 7px rgba(0, 0, 0, 0.02)","0px 10px 14px -6px rgba(0, 0, 0, 0.1),0px 22px 35px 3px rgba(0, 0, 0, 0.04),0px 8px 42px 7px rgba(0, 0, 0, 0.02)","0px 11px 14px -7px rgba(0, 0, 0, 0.1),0px 23px 36px 3px rgba(0, 0, 0, 0.04),0px 9px 44px 8px rgba(0, 0, 0, 0.02)","0px 11px 15px -7px rgba(0, 0, 0, 0.1),0px 24px 38px 3px rgba(0, 0, 0, 0.04),0px 9px 46px 8px rgba(0, 0, 0, 0.02)",]},u=(0,i.createMuiTheme)(s)},66289(e,t,n){"use strict";function r(e){if(void 0===e)throw ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function i(e,t){if(!(e instanceof t))throw TypeError("Cannot call a class as a function")}function a(){if("undefined"==typeof Reflect||!Reflect.construct||Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(e){return!1}}function o(e,t,n){return(o=a()?Reflect.construct:function(e,t,n){var r=[null];r.push.apply(r,t);var i=new(Function.bind.apply(e,r));return n&&f(i,n.prototype),i}).apply(null,arguments)}function s(e){return(s=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)})(e)}function u(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&f(e,t)}function c(e){return -1!==Function.toString.call(e).indexOf("[native code]")}function l(e,t){return t&&("object"===p(t)||"function"==typeof t)?t:r(e)}function f(e,t){return(f=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e})(e,t)}n.d(t,{V0:()=>B,_7:()=>v});var d,h,p=function(e){return e&&"undefined"!=typeof Symbol&&e.constructor===Symbol?"symbol":typeof e};function b(e){var t="function"==typeof Map?new Map:void 0;return(b=function(e){if(null===e||!c(e))return e;if("function"!=typeof e)throw TypeError("Super expression must either be null or a function");if(void 0!==t){if(t.has(e))return t.get(e);t.set(e,n)}function n(){return o(e,arguments,s(this).constructor)}return n.prototype=Object.create(e.prototype,{constructor:{value:n,enumerable:!1,writable:!0,configurable:!0}}),f(n,e)})(e)}function m(){if("undefined"==typeof Reflect||!Reflect.construct||Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch(e){return!1}}function g(e){var t=m();return function(){var n,r=s(e);if(t){var i=s(this).constructor;n=Reflect.construct(r,arguments,i)}else n=r.apply(this,arguments);return l(this,n)}}var v=function(e){u(n,e);var t=g(n);function n(e){var r;return i(this,n),(r=t.call(this,"AuthenticationError(".concat(e.statusText,")"))).errors=[{status:e.status,detail:e},],r}return n}(b(Error)),y=function(e){u(n,e);var t=g(n);function n(e){var r,a=e.errors;return i(this,n),(r=t.call(this,"BadRequestError")).errors=a,r}return n}(b(Error)),w=function(e){u(n,e);var t=g(n);function n(e){var r;return i(this,n),(r=t.call(this,"UnprocessableEntityError")).errors=e,r}return n}(b(Error)),_=function(e){u(n,e);var t=g(n);function n(e){var r;return i(this,n),(r=t.call(this,"ServerError")).errors=e,r}return n}(b(Error)),E=function(e){u(n,e);var t=g(n);function n(e){var r,a=e.errors;return i(this,n),(r=t.call(this,"ConflictError")).errors=a,r}return n}(b(Error)),S=function(e){u(n,e);var t=g(n);function n(e){var r;return i(this,n),(r=t.call(this,"UnknownResponseError(".concat(e.statusText,")"))).errors=[{status:e.status,detail:e.statusText},],r}return n}(b(Error));function k(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:2e4;return Promise.race([fetch(e,t),new Promise(function(e,t){return setTimeout(function(){return t(Error("timeout"))},n)}),])}function x(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n0&&i[i.length-1])&&(6===a[0]||2===a[0])){o=0;continue}if(3===a[0]&&(!i||a[1]>i[0]&&a[1]=200&&e.status<300))return[3,2];return[2,e.json()];case 2:if(400!==e.status)return[3,3];return[2,e.json().then(function(e){throw new y(e)})];case 3:if(401!==e.status)return[3,4];throw new v(e);case 4:if(422!==e.status)return[3,6];return[4,$(e)];case 5:throw n=i.sent(),new w(n);case 6:if(409!==e.status)return[3,7];return[2,e.json().then(function(e){throw new E(e)})];case 7:if(!(e.status>=500))return[3,9];return[4,$(e)];case 8:throw r=i.sent(),new _(r);case 9:throw new S(e);case 10:return[2]}})})).apply(this,arguments)}function $(e){return z.apply(this,arguments)}function z(){return(z=j(function(e){return Y(this,function(t){return[2,e.json().then(function(t){return t.errors?t.errors.map(function(t){return{status:e.status,detail:t.detail}}):G(e)}).catch(function(){return G(e)})]})})).apply(this,arguments)}function G(e){return[{status:e.status,detail:e.statusText},]}},50109(e,t,n){"use strict";n.d(t,{LK:()=>o,U2:()=>i,eT:()=>s,t8:()=>a});var r=n(12795);function i(e){return r.ZP.getItem("plugin.".concat(e))}function a(e,t){r.ZP.setItem("plugin.".concat(e),t)}function o(e){var t=i(e),n={};if(t)try{return JSON.parse(t)}catch(r){}return n}function s(e,t){a(e,JSON.stringify(t))}},9541(e,t,n){"use strict";n.d(t,{Ks:()=>u,Tp:()=>a,iR:()=>o,pm:()=>s});var r=n(50109),i="persistURL";function a(){return r.U2(i)||""}function o(e){r.t8(i,e)}function s(){return r.LK("authentication")}function u(e){r.eT("authentication",e)}},67121(e,t,n){"use strict";function r(e){var t,n=e.Symbol;return"function"==typeof n?n.observable?t=n.observable:(t=n("observable"),n.observable=t):t="@@observable",t}n.r(t),n.d(t,{default:()=>o}),e=n.hmd(e),i="undefined"!=typeof self?self:"undefined"!=typeof window?window:void 0!==n.g?n.g:e;var i,a=r(i);let o=a},2177(e,t,n){"use strict";n.d(t,{Z:()=>o});var r=!0,i="Invariant failed";function a(e,t){if(!e){if(r)throw Error(i);throw Error(i+": "+(t||""))}}let o=a},11742(e){e.exports=function(){var e=document.getSelection();if(!e.rangeCount)return function(){};for(var t=document.activeElement,n=[],r=0;ri,pi:()=>a});var r=function(e,t){return(r=Object.setPrototypeOf||({__proto__:[]})instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])})(e,t)};function i(e,t){if("function"!=typeof t&&null!==t)throw TypeError("Class extends value "+String(t)+" is not a constructor or null");function n(){this.constructor=e}r(e,t),e.prototype=null===t?Object.create(t):(n.prototype=t.prototype,new n)}var a=function(){return(a=Object.assign||function(e){for(var t,n=1,r=arguments.length;nr})},94927(e,t,n){function r(e,t){if(i("noDeprecation"))return e;var n=!1;function r(){if(!n){if(i("throwDeprecation"))throw Error(t);i("traceDeprecation")?console.trace(t):console.warn(t),n=!0}return e.apply(this,arguments)}return r}function i(e){try{if(!n.g.localStorage)return!1}catch(t){return!1}var r=n.g.localStorage[e];return null!=r&&"true"===String(r).toLowerCase()}e.exports=r},42473(e){"use strict";var t=function(){};e.exports=t},84763(e){e.exports=Worker},47529(e){e.exports=n;var t=Object.prototype.hasOwnProperty;function n(){for(var e={},n=0;nr,O:()=>a}),(i=r||(r={}))[i.loading=1]="loading",i[i.setVariables=2]="setVariables",i[i.fetchMore=3]="fetchMore",i[i.refetch=4]="refetch",i[i.poll=6]="poll",i[i.ready=7]="ready",i[i.error=8]="error"},30990(e,t,n){"use strict";n.d(t,{MS:()=>s,YG:()=>a,cA:()=>c,ls:()=>o});var r=n(23564);n(83952);var i=n(13154),a=Symbol();function o(e){return!!e.extensions&&Array.isArray(e.extensions[a])}function s(e){return e.hasOwnProperty("graphQLErrors")}var u=function(e){var t=(0,r.ev)((0,r.ev)((0,r.ev)([],e.graphQLErrors,!0),e.clientErrors,!0),e.protocolErrors,!0);return e.networkError&&t.push(e.networkError),t.map(function(e){return(0,i.s)(e)&&e.message||"Error message not found."}).join("\n")},c=function(e){function t(n){var r=n.graphQLErrors,i=n.protocolErrors,a=n.clientErrors,o=n.networkError,s=n.errorMessage,c=n.extraInfo,l=e.call(this,s)||this;return l.name="ApolloError",l.graphQLErrors=r||[],l.protocolErrors=i||[],l.clientErrors=a||[],l.networkError=o||null,l.message=s||u(l),l.extraInfo=c,l.__proto__=t.prototype,l}return(0,r.ZT)(t,e),t}(Error)},85317(e,t,n){"use strict";n.d(t,{K:()=>a});var r=n(67294),i=n(30320).aS?Symbol.for("__APOLLO_CONTEXT__"):"__APOLLO_CONTEXT__";function a(){var e=r.createContext[i];return e||(Object.defineProperty(r.createContext,i,{value:e=r.createContext({}),enumerable:!1,writable:!1,configurable:!0}),e.displayName="ApolloContext"),e}},21436(e,t,n){"use strict";n.d(t,{O:()=>i,k:()=>r});var r=Array.isArray;function i(e){return Array.isArray(e)&&e.length>0}},30320(e,t,n){"use strict";n.d(t,{DN:()=>s,JC:()=>l,aS:()=>o,mr:()=>i,sy:()=>a});var r=n(83952),i="function"==typeof WeakMap&&"ReactNative"!==(0,r.wY)(function(){return navigator.product}),a="function"==typeof WeakSet,o="function"==typeof Symbol&&"function"==typeof Symbol.for,s=o&&Symbol.asyncIterator,u="function"==typeof(0,r.wY)(function(){return window.document.createElement}),c=(0,r.wY)(function(){return navigator.userAgent.indexOf("jsdom")>=0})||!1,l=u&&!c},53712(e,t,n){"use strict";function r(){for(var e=[],t=0;tr})},10542(e,t,n){"use strict";n.d(t,{J:()=>o}),n(83952);var r=n(13154);function i(e){var t=new Set([e]);return t.forEach(function(e){(0,r.s)(e)&&a(e)===e&&Object.getOwnPropertyNames(e).forEach(function(n){(0,r.s)(e[n])&&t.add(e[n])})}),e}function a(e){if(__DEV__&&!Object.isFrozen(e))try{Object.freeze(e)}catch(t){if(t instanceof TypeError)return null;throw t}return e}function o(e){return __DEV__&&i(e),e}},14012(e,t,n){"use strict";n.d(t,{J:()=>a});var r=n(23564),i=n(53712);function a(e,t){return(0,i.o)(e,t,t.variables&&{variables:(0,r.pi)((0,r.pi)({},e&&e.variables),t.variables)})}},13154(e,t,n){"use strict";function r(e){return null!==e&&"object"==typeof e}n.d(t,{s:()=>r})},83952(e,t,n){"use strict";n.d(t,{ej:()=>u,kG:()=>c,wY:()=>h});var r,i=n(70655),a="Invariant Violation",o=Object.setPrototypeOf,s=void 0===o?function(e,t){return e.__proto__=t,e}:o,u=function(e){function t(n){void 0===n&&(n=a);var r=e.call(this,"number"==typeof n?a+": "+n+" (see https://github.com/apollographql/invariant-packages)":n)||this;return r.framesToPop=1,r.name=a,s(r,t.prototype),r}return(0,i.ZT)(t,e),t}(Error);function c(e,t){if(!e)throw new u(t)}var l=["debug","log","warn","error","silent"],f=l.indexOf("log");function d(e){return function(){if(l.indexOf(e)>=f)return(console[e]||console.log).apply(console,arguments)}}function h(e){try{return e()}catch(t){}}(r=c||(c={})).debug=d("debug"),r.log=d("log"),r.warn=d("warn"),r.error=d("error");let p=h(function(){return globalThis})||h(function(){return window})||h(function(){return self})||h(function(){return global})||h(function(){return h.constructor("return this")()});var b="__",m=[b,b].join("DEV");function g(){try{return Boolean(__DEV__)}catch(e){return Object.defineProperty(p,m,{value:"production"!==h(function(){return"production"}),enumerable:!1,configurable:!0,writable:!0}),p[m]}}let v=g();function y(e){try{return e()}catch(t){}}var w=y(function(){return globalThis})||y(function(){return window})||y(function(){return self})||y(function(){return global})||y(function(){return y.constructor("return this")()}),_=!1;function E(){!w||y(function(){return"production"})||y(function(){return process})||(Object.defineProperty(w,"process",{value:{env:{NODE_ENV:"production"}},configurable:!0,enumerable:!1,writable:!0}),_=!0)}function S(){_&&(delete w.process,_=!1)}E();var k=n(10143);function x(){return k.H,S()}function T(){__DEV__?c("boolean"==typeof v,v):c("boolean"==typeof v,39)}x(),T()},87462(e,t,n){"use strict";function r(){return(r=Object.assign||function(e){for(var t=1;tr})},25821(e,t,n){"use strict";n.d(t,{Z:()=>s});var r=n(45695);function i(e){return(i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}var a=10,o=2;function s(e){return u(e,[])}function u(e,t){switch(i(e)){case"string":return JSON.stringify(e);case"function":return e.name?"[function ".concat(e.name,"]"):"[function]";case"object":if(null===e)return"null";return c(e,t);default:return String(e)}}function c(e,t){if(-1!==t.indexOf(e))return"[Circular]";var n=[].concat(t,[e]),r=d(e);if(void 0!==r){var i=r.call(e);if(i!==e)return"string"==typeof i?i:u(i,n)}else if(Array.isArray(e))return f(e,n);return l(e,n)}function l(e,t){var n=Object.keys(e);return 0===n.length?"{}":t.length>o?"["+h(e)+"]":"{ "+n.map(function(n){var r=u(e[n],t);return n+": "+r}).join(", ")+" }"}function f(e,t){if(0===e.length)return"[]";if(t.length>o)return"[Array]";for(var n=Math.min(a,e.length),r=e.length-n,i=[],s=0;s1&&i.push("... ".concat(r," more items")),"["+i.join(", ")+"]"}function d(e){var t=e[String(r.Z)];return"function"==typeof t?t:"function"==typeof e.inspect?e.inspect:void 0}function h(e){var t=Object.prototype.toString.call(e).replace(/^\[object /,"").replace(/]$/,"");if("Object"===t&&"function"==typeof e.constructor){var n=e.constructor.name;if("string"==typeof n&&""!==n)return n}return t}},45695(e,t,n){"use strict";n.d(t,{Z:()=>i});var r="function"==typeof Symbol&&"function"==typeof Symbol.for?Symbol.for("nodejs.util.inspect.custom"):void 0;let i=r},25217(e,t,n){"use strict";function r(e,t){if(!Boolean(e))throw Error(null!=t?t:"Unexpected invariant triggered.")}n.d(t,{Ye:()=>o,WU:()=>s,UG:()=>u});var i=n(45695);function a(e){var t=e.prototype.toJSON;"function"==typeof t||r(0),e.prototype.inspect=t,i.Z&&(e.prototype[i.Z]=t)}var o=function(){function e(e,t,n){this.start=e.start,this.end=t.end,this.startToken=e,this.endToken=t,this.source=n}return e.prototype.toJSON=function(){return{start:this.start,end:this.end}},e}();a(o);var s=function(){function e(e,t,n,r,i,a,o){this.kind=e,this.start=t,this.end=n,this.line=r,this.column=i,this.value=o,this.prev=a,this.next=null}return e.prototype.toJSON=function(){return{kind:this.kind,value:this.value,line:this.line,column:this.column}},e}();function u(e){return null!=e&&"string"==typeof e.kind}a(s)},87392(e,t,n){"use strict";function r(e){var t=e.split(/\r\n|[\n\r]/g),n=a(e);if(0!==n)for(var r=1;ro&&i(t[s-1]);)--s;return t.slice(o,s).join("\n")}function i(e){for(var t=0;t1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],r=-1===e.indexOf("\n"),i=" "===e[0]||" "===e[0],a='"'===e[e.length-1],o="\\"===e[e.length-1],s=!r||a||o||n,u="";return s&&!(r&&i)&&(u+="\n"+t),u+=t?e.replace(/\n/g,"\n"+t):e,s&&(u+="\n"),'"""'+u.replace(/"""/g,'\\"""')+'"""'}n.d(t,{LZ:()=>o,W7:()=>r})},97359(e,t,n){"use strict";n.d(t,{h:()=>r});var r=Object.freeze({NAME:"Name",DOCUMENT:"Document",OPERATION_DEFINITION:"OperationDefinition",VARIABLE_DEFINITION:"VariableDefinition",SELECTION_SET:"SelectionSet",FIELD:"Field",ARGUMENT:"Argument",FRAGMENT_SPREAD:"FragmentSpread",INLINE_FRAGMENT:"InlineFragment",FRAGMENT_DEFINITION:"FragmentDefinition",VARIABLE:"Variable",INT:"IntValue",FLOAT:"FloatValue",STRING:"StringValue",BOOLEAN:"BooleanValue",NULL:"NullValue",ENUM:"EnumValue",LIST:"ListValue",OBJECT:"ObjectValue",OBJECT_FIELD:"ObjectField",DIRECTIVE:"Directive",NAMED_TYPE:"NamedType",LIST_TYPE:"ListType",NON_NULL_TYPE:"NonNullType",SCHEMA_DEFINITION:"SchemaDefinition",OPERATION_TYPE_DEFINITION:"OperationTypeDefinition",SCALAR_TYPE_DEFINITION:"ScalarTypeDefinition",OBJECT_TYPE_DEFINITION:"ObjectTypeDefinition",FIELD_DEFINITION:"FieldDefinition",INPUT_VALUE_DEFINITION:"InputValueDefinition",INTERFACE_TYPE_DEFINITION:"InterfaceTypeDefinition",UNION_TYPE_DEFINITION:"UnionTypeDefinition",ENUM_TYPE_DEFINITION:"EnumTypeDefinition",ENUM_VALUE_DEFINITION:"EnumValueDefinition",INPUT_OBJECT_TYPE_DEFINITION:"InputObjectTypeDefinition",DIRECTIVE_DEFINITION:"DirectiveDefinition",SCHEMA_EXTENSION:"SchemaExtension",SCALAR_TYPE_EXTENSION:"ScalarTypeExtension",OBJECT_TYPE_EXTENSION:"ObjectTypeExtension",INTERFACE_TYPE_EXTENSION:"InterfaceTypeExtension",UNION_TYPE_EXTENSION:"UnionTypeExtension",ENUM_TYPE_EXTENSION:"EnumTypeExtension",INPUT_OBJECT_TYPE_EXTENSION:"InputObjectTypeExtension"})},10143(e,t,n){"use strict";n.d(t,{H:()=>c,T:()=>l});var r=n(99763),i=n(25821);function a(e,t){if(!Boolean(e))throw Error(t)}let o=function(e,t){return e instanceof t};function s(e,t){for(var n=0;n1&&void 0!==arguments[1]?arguments[1]:"GraphQL request",n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{line:1,column:1};"string"==typeof e||a(0,"Body must be a string. Received: ".concat((0,i.Z)(e),".")),this.body=e,this.name=t,this.locationOffset=n,this.locationOffset.line>0||a(0,"line in locationOffset is 1-indexed and must be positive."),this.locationOffset.column>0||a(0,"column in locationOffset is 1-indexed and must be positive.")}return u(e,[{key:r.YF,get:function(){return"Source"}}]),e}();function l(e){return o(e,c)}},99763(e,t,n){"use strict";n.d(t,{YF:()=>r});var r="function"==typeof Symbol&&null!=Symbol.toStringTag?Symbol.toStringTag:"@@toStringTag"},37452(e){"use strict";e.exports=JSON.parse('{"AElig":"\xc6","AMP":"&","Aacute":"\xc1","Acirc":"\xc2","Agrave":"\xc0","Aring":"\xc5","Atilde":"\xc3","Auml":"\xc4","COPY":"\xa9","Ccedil":"\xc7","ETH":"\xd0","Eacute":"\xc9","Ecirc":"\xca","Egrave":"\xc8","Euml":"\xcb","GT":">","Iacute":"\xcd","Icirc":"\xce","Igrave":"\xcc","Iuml":"\xcf","LT":"<","Ntilde":"\xd1","Oacute":"\xd3","Ocirc":"\xd4","Ograve":"\xd2","Oslash":"\xd8","Otilde":"\xd5","Ouml":"\xd6","QUOT":"\\"","REG":"\xae","THORN":"\xde","Uacute":"\xda","Ucirc":"\xdb","Ugrave":"\xd9","Uuml":"\xdc","Yacute":"\xdd","aacute":"\xe1","acirc":"\xe2","acute":"\xb4","aelig":"\xe6","agrave":"\xe0","amp":"&","aring":"\xe5","atilde":"\xe3","auml":"\xe4","brvbar":"\xa6","ccedil":"\xe7","cedil":"\xb8","cent":"\xa2","copy":"\xa9","curren":"\xa4","deg":"\xb0","divide":"\xf7","eacute":"\xe9","ecirc":"\xea","egrave":"\xe8","eth":"\xf0","euml":"\xeb","frac12":"\xbd","frac14":"\xbc","frac34":"\xbe","gt":">","iacute":"\xed","icirc":"\xee","iexcl":"\xa1","igrave":"\xec","iquest":"\xbf","iuml":"\xef","laquo":"\xab","lt":"<","macr":"\xaf","micro":"\xb5","middot":"\xb7","nbsp":"\xa0","not":"\xac","ntilde":"\xf1","oacute":"\xf3","ocirc":"\xf4","ograve":"\xf2","ordf":"\xaa","ordm":"\xba","oslash":"\xf8","otilde":"\xf5","ouml":"\xf6","para":"\xb6","plusmn":"\xb1","pound":"\xa3","quot":"\\"","raquo":"\xbb","reg":"\xae","sect":"\xa7","shy":"\xad","sup1":"\xb9","sup2":"\xb2","sup3":"\xb3","szlig":"\xdf","thorn":"\xfe","times":"\xd7","uacute":"\xfa","ucirc":"\xfb","ugrave":"\xf9","uml":"\xa8","uuml":"\xfc","yacute":"\xfd","yen":"\xa5","yuml":"\xff"}')},93580(e){"use strict";e.exports=JSON.parse('{"0":"�","128":"€","130":"‚","131":"ƒ","132":"„","133":"…","134":"†","135":"‡","136":"ˆ","137":"‰","138":"Š","139":"‹","140":"Œ","142":"Ž","145":"‘","146":"’","147":"“","148":"”","149":"•","150":"–","151":"—","152":"˜","153":"™","154":"š","155":"›","156":"œ","158":"ž","159":"Ÿ"}')},67946(e){"use strict";e.exports=JSON.parse('{"locale":"en","long":{"year":{"previous":"last year","current":"this year","next":"next year","past":{"one":"{0} year ago","other":"{0} years ago"},"future":{"one":"in {0} year","other":"in {0} years"}},"quarter":{"previous":"last quarter","current":"this quarter","next":"next quarter","past":{"one":"{0} quarter ago","other":"{0} quarters ago"},"future":{"one":"in {0} quarter","other":"in {0} quarters"}},"month":{"previous":"last month","current":"this month","next":"next month","past":{"one":"{0} month ago","other":"{0} months ago"},"future":{"one":"in {0} month","other":"in {0} months"}},"week":{"previous":"last week","current":"this week","next":"next week","past":{"one":"{0} week ago","other":"{0} weeks ago"},"future":{"one":"in {0} week","other":"in {0} weeks"}},"day":{"previous":"yesterday","current":"today","next":"tomorrow","past":{"one":"{0} day ago","other":"{0} days ago"},"future":{"one":"in {0} day","other":"in {0} days"}},"hour":{"current":"this hour","past":{"one":"{0} hour ago","other":"{0} hours ago"},"future":{"one":"in {0} hour","other":"in {0} hours"}},"minute":{"current":"this minute","past":{"one":"{0} minute ago","other":"{0} minutes ago"},"future":{"one":"in {0} minute","other":"in {0} minutes"}},"second":{"current":"now","past":{"one":"{0} second ago","other":"{0} seconds ago"},"future":{"one":"in {0} second","other":"in {0} seconds"}}},"short":{"year":{"previous":"last yr.","current":"this yr.","next":"next yr.","past":"{0} yr. ago","future":"in {0} yr."},"quarter":{"previous":"last qtr.","current":"this qtr.","next":"next qtr.","past":{"one":"{0} qtr. ago","other":"{0} qtrs. ago"},"future":{"one":"in {0} qtr.","other":"in {0} qtrs."}},"month":{"previous":"last mo.","current":"this mo.","next":"next mo.","past":"{0} mo. ago","future":"in {0} mo."},"week":{"previous":"last wk.","current":"this wk.","next":"next wk.","past":"{0} wk. ago","future":"in {0} wk."},"day":{"previous":"yesterday","current":"today","next":"tomorrow","past":{"one":"{0} day ago","other":"{0} days ago"},"future":{"one":"in {0} day","other":"in {0} days"}},"hour":{"current":"this hour","past":"{0} hr. ago","future":"in {0} hr."},"minute":{"current":"this minute","past":"{0} min. ago","future":"in {0} min."},"second":{"current":"now","past":"{0} sec. ago","future":"in {0} sec."}},"narrow":{"year":{"previous":"last yr.","current":"this yr.","next":"next yr.","past":"{0} yr. ago","future":"in {0} yr."},"quarter":{"previous":"last qtr.","current":"this qtr.","next":"next qtr.","past":{"one":"{0} qtr. ago","other":"{0} qtrs. ago"},"future":{"one":"in {0} qtr.","other":"in {0} qtrs."}},"month":{"previous":"last mo.","current":"this mo.","next":"next mo.","past":"{0} mo. ago","future":"in {0} mo."},"week":{"previous":"last wk.","current":"this wk.","next":"next wk.","past":"{0} wk. ago","future":"in {0} wk."},"day":{"previous":"yesterday","current":"today","next":"tomorrow","past":{"one":"{0} day ago","other":"{0} days ago"},"future":{"one":"in {0} day","other":"in {0} days"}},"hour":{"current":"this hour","past":"{0} hr. ago","future":"in {0} hr."},"minute":{"current":"this minute","past":"{0} min. ago","future":"in {0} min."},"second":{"current":"now","past":"{0} sec. ago","future":"in {0} sec."}},"now":{"now":{"current":"now","future":"in a moment","past":"just now"}},"mini":{"year":"{0}yr","month":"{0}mo","week":"{0}wk","day":"{0}d","hour":"{0}h","minute":"{0}m","second":"{0}s","now":"now"},"short-time":{"year":"{0} yr.","month":"{0} mo.","week":"{0} wk.","day":{"one":"{0} day","other":"{0} days"},"hour":"{0} hr.","minute":"{0} min.","second":"{0} sec."},"long-time":{"year":{"one":"{0} year","other":"{0} years"},"month":{"one":"{0} month","other":"{0} months"},"week":{"one":"{0} week","other":"{0} weeks"},"day":{"one":"{0} day","other":"{0} days"},"hour":{"one":"{0} hour","other":"{0} hours"},"minute":{"one":"{0} minute","other":"{0} minutes"},"second":{"one":"{0} second","other":"{0} seconds"}}}')}},__webpack_module_cache__={};function __webpack_require__(e){var t=__webpack_module_cache__[e];if(void 0!==t)return t.exports;var n=__webpack_module_cache__[e]={id:e,loaded:!1,exports:{}};return __webpack_modules__[e].call(n.exports,n,n.exports,__webpack_require__),n.loaded=!0,n.exports}__webpack_require__.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return __webpack_require__.d(t,{a:t}),t},(()=>{var e,t=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__;__webpack_require__.t=function(n,r){if(1&r&&(n=this(n)),8&r||"object"==typeof n&&n&&(4&r&&n.__esModule||16&r&&"function"==typeof n.then))return n;var i=Object.create(null);__webpack_require__.r(i);var a={};e=e||[null,t({}),t([]),t(t)];for(var o=2&r&&n;"object"==typeof o&&!~e.indexOf(o);o=t(o))Object.getOwnPropertyNames(o).forEach(e=>a[e]=()=>n[e]);return a.default=()=>n,__webpack_require__.d(i,a),i}})(),__webpack_require__.d=(e,t)=>{for(var n in t)__webpack_require__.o(t,n)&&!__webpack_require__.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:t[n]})},__webpack_require__.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||Function("return this")()}catch(e){if("object"==typeof window)return window}}(),__webpack_require__.hmd=e=>((e=Object.create(e)).children||(e.children=[]),Object.defineProperty(e,"exports",{enumerable:!0,set(){throw Error("ES Modules may not assign module.exports or exports.*, Use ESM export syntax, instead: "+e.id)}}),e),__webpack_require__.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),__webpack_require__.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},__webpack_require__.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),__webpack_require__.p="/assets/",__webpack_require__.nc=void 0;var __webpack_exports__={};(()=>{"use strict";var e,t,n,r,i=__webpack_require__(32316),a=__webpack_require__(8126),o=__webpack_require__(5690),s=__webpack_require__(30381),u=__webpack_require__.n(s),c=__webpack_require__(67294),l=__webpack_require__(73935),f=__webpack_require__.n(l),d=__webpack_require__(57209),h=__webpack_require__(55977),p=__webpack_require__(15857),b=__webpack_require__(28500);function m(e){return function(t){var n=t.dispatch,r=t.getState;return function(t){return function(i){return"function"==typeof i?i(n,r,e):t(i)}}}}var g=m();g.withExtraArgument=m;let v=g;var y=__webpack_require__(76489);function w(e){return function(t){return function(n){return function(r){n(r);var i=e||document&&document.cookie||"",a=t.getState();if("MATCH_ROUTE"===r.type&&"/signin"!==a.notifications.currentUrl){var o=(0,y.Q)(i);if(o.explorer)try{var s=JSON.parse(o.explorer);if("error"===s.status){var u=_(s.url);n({type:"NOTIFY_ERROR_MSG",msg:u})}}catch(c){n({type:"NOTIFY_ERROR_MSG",msg:"Invalid explorer status"})}}}}}}function _(e){var t="Can't connect to explorer: ".concat(e);return e.match(/^wss?:.+/)?t:"".concat(t,". You must use a websocket.")}var E=__webpack_require__(16353);function S(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n=e.length?{done:!0}:{done:!1,value:e[r++]}}}throw TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}function ei(e,t){if(e){if("string"==typeof e)return ea(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);if("Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n)return Array.from(e);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return ea(e,t)}}function ea(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=Array(t);n1,i=!1,a=arguments[1],o=a;return new n(function(n){return t.subscribe({next:function(t){var a=!i;if(i=!0,!a||r)try{o=e(o,t)}catch(s){return n.error(s)}else o=t},error:function(e){n.error(e)},complete:function(){if(!i&&!r)return n.error(TypeError("Cannot reduce an empty sequence"));n.next(o),n.complete()}})})},t.concat=function(){for(var e=this,t=arguments.length,n=Array(t),r=0;r=0&&i.splice(e,1),o()}});i.push(s)},error:function(e){r.error(e)},complete:function(){o()}});function o(){a.closed&&0===i.length&&r.complete()}return function(){i.forEach(function(e){return e.unsubscribe()}),a.unsubscribe()}})},t[ed]=function(){return this},e.from=function(t){var n="function"==typeof this?this:e;if(null==t)throw TypeError(t+" is not an object");var r=ep(t,ed);if(r){var i=r.call(t);if(Object(i)!==i)throw TypeError(i+" is not an object");return em(i)&&i.constructor===n?i:new n(function(e){return i.subscribe(e)})}if(ec("iterator")&&(r=ep(t,ef)))return new n(function(e){ev(function(){if(!e.closed){for(var n,i=er(r.call(t));!(n=i()).done;){var a=n.value;if(e.next(a),e.closed)return}e.complete()}})});if(Array.isArray(t))return new n(function(e){ev(function(){if(!e.closed){for(var n=0;n0))return n.connection.key;var r=n.connection.filter?n.connection.filter:[];r.sort();var i={};return r.forEach(function(e){i[e]=t[e]}),"".concat(n.connection.key,"(").concat(eV(i),")")}var a=e;if(t){var o=eV(t);a+="(".concat(o,")")}return n&&Object.keys(n).forEach(function(e){-1===eW.indexOf(e)&&(n[e]&&Object.keys(n[e]).length?a+="@".concat(e,"(").concat(eV(n[e]),")"):a+="@".concat(e))}),a},{setStringify:function(e){var t=eV;return eV=e,t}}),eV=function(e){return JSON.stringify(e,eq)};function eq(e,t){return(0,eO.s)(t)&&!Array.isArray(t)&&(t=Object.keys(t).sort().reduce(function(e,n){return e[n]=t[n],e},{})),t}function eZ(e,t){if(e.arguments&&e.arguments.length){var n={};return e.arguments.forEach(function(e){var r;return ez(n,e.name,e.value,t)}),n}return null}function eX(e){return e.alias?e.alias.value:e.name.value}function eJ(e,t,n){for(var r,i=0,a=t.selections;it.indexOf(i))throw __DEV__?new Q.ej("illegal argument: ".concat(i)):new Q.ej(27)}return e}function tt(e,t){return t?t(e):eT.of()}function tn(e){return"function"==typeof e?new ta(e):e}function tr(e){return e.request.length<=1}var ti=function(e){function t(t,n){var r=e.call(this,t)||this;return r.link=n,r}return(0,en.ZT)(t,e),t}(Error),ta=function(){function e(e){e&&(this.request=e)}return e.empty=function(){return new e(function(){return eT.of()})},e.from=function(t){return 0===t.length?e.empty():t.map(tn).reduce(function(e,t){return e.concat(t)})},e.split=function(t,n,r){var i=tn(n),a=tn(r||new e(tt));return new e(tr(i)&&tr(a)?function(e){return t(e)?i.request(e)||eT.of():a.request(e)||eT.of()}:function(e,n){return t(e)?i.request(e,n)||eT.of():a.request(e,n)||eT.of()})},e.execute=function(e,t){return e.request(eM(t.context,e7(te(t))))||eT.of()},e.concat=function(t,n){var r=tn(t);if(tr(r))return __DEV__&&Q.kG.warn(new ti("You are calling concat on a terminating link, which will have no effect",r)),r;var i=tn(n);return new e(tr(i)?function(e){return r.request(e,function(e){return i.request(e)||eT.of()})||eT.of()}:function(e,t){return r.request(e,function(e){return i.request(e,t)||eT.of()})||eT.of()})},e.prototype.split=function(t,n,r){return this.concat(e.split(t,n,r||new e(tt)))},e.prototype.concat=function(t){return e.concat(this,t)},e.prototype.request=function(e,t){throw __DEV__?new Q.ej("request is not implemented"):new Q.ej(22)},e.prototype.onError=function(e,t){if(t&&t.error)return t.error(e),!1;throw e},e.prototype.setOnError=function(e){return this.onError=e,this},e}(),to=__webpack_require__(25821),ts=__webpack_require__(25217),tu={Name:[],Document:["definitions"],OperationDefinition:["name","variableDefinitions","directives","selectionSet"],VariableDefinition:["variable","type","defaultValue","directives"],Variable:["name"],SelectionSet:["selections"],Field:["alias","name","arguments","directives","selectionSet"],Argument:["name","value"],FragmentSpread:["name","directives"],InlineFragment:["typeCondition","directives","selectionSet"],FragmentDefinition:["name","variableDefinitions","typeCondition","directives","selectionSet"],IntValue:[],FloatValue:[],StringValue:[],BooleanValue:[],NullValue:[],EnumValue:[],ListValue:["values"],ObjectValue:["fields"],ObjectField:["name","value"],Directive:["name","arguments"],NamedType:["name"],ListType:["type"],NonNullType:["type"],SchemaDefinition:["description","directives","operationTypes"],OperationTypeDefinition:["type"],ScalarTypeDefinition:["description","name","directives"],ObjectTypeDefinition:["description","name","interfaces","directives","fields"],FieldDefinition:["description","name","arguments","type","directives"],InputValueDefinition:["description","name","type","defaultValue","directives"],InterfaceTypeDefinition:["description","name","interfaces","directives","fields"],UnionTypeDefinition:["description","name","directives","types"],EnumTypeDefinition:["description","name","directives","values"],EnumValueDefinition:["description","name","directives"],InputObjectTypeDefinition:["description","name","directives","fields"],DirectiveDefinition:["description","name","arguments","locations"],SchemaExtension:["directives","operationTypes"],ScalarTypeExtension:["name","directives"],ObjectTypeExtension:["name","interfaces","directives","fields"],InterfaceTypeExtension:["name","interfaces","directives","fields"],UnionTypeExtension:["name","directives","types"],EnumTypeExtension:["name","directives","values"],InputObjectTypeExtension:["name","directives","fields"]},tc=Object.freeze({});function tl(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:tu,r=void 0,i=Array.isArray(e),a=[e],o=-1,s=[],u=void 0,c=void 0,l=void 0,f=[],d=[],h=e;do{var p,b=++o===a.length,m=b&&0!==s.length;if(b){if(c=0===d.length?void 0:f[f.length-1],u=l,l=d.pop(),m){if(i)u=u.slice();else{for(var g={},v=0,y=Object.keys(u);v1)for(var r=new tB,i=1;i=0;--a){var o=i[a],s=isNaN(+o)?{}:[];s[o]=t,t=s}n=r.merge(n,t)}),n}var tW=Object.prototype.hasOwnProperty;function tK(e,t){var n,r,i,a,o;return(0,en.mG)(this,void 0,void 0,function(){var s,u,c,l,f,d,h,p,b,m,g,v,y,w,_,E,S,k,x,T,M,O,A;return(0,en.Jh)(this,function(L){switch(L.label){case 0:if(void 0===TextDecoder)throw Error("TextDecoder must be defined in the environment: please import a polyfill.");s=new TextDecoder("utf-8"),u=null===(n=e.headers)||void 0===n?void 0:n.get("content-type"),c="boundary=",l=(null==u?void 0:u.includes(c))?null==u?void 0:u.substring((null==u?void 0:u.indexOf(c))+c.length).replace(/['"]/g,"").replace(/\;(.*)/gm,"").trim():"-",f="\r\n--".concat(l),d="",h=tI(e),p=!0,L.label=1;case 1:if(!p)return[3,3];return[4,h.next()];case 2:for(m=(b=L.sent()).value,g=b.done,v="string"==typeof m?m:s.decode(m),y=d.length-f.length+1,p=!g,d+=v,w=d.indexOf(f,y);w>-1;){if(_=void 0,_=(O=[d.slice(0,w),d.slice(w+f.length),])[0],d=O[1],E=_.indexOf("\r\n\r\n"),(k=(S=tV(_.slice(0,E)))["content-type"])&&-1===k.toLowerCase().indexOf("application/json"))throw Error("Unsupported patch content type: application/json is required.");if(x=_.slice(E))try{T=tq(e,x),Object.keys(T).length>1||"data"in T||"incremental"in T||"errors"in T||"payload"in T?tz(T)?(M={},"payload"in T&&(M=(0,en.pi)({},T.payload)),"errors"in T&&(M=(0,en.pi)((0,en.pi)({},M),{extensions:(0,en.pi)((0,en.pi)({},"extensions"in M?M.extensions:null),((A={})[tN.YG]=T.errors,A))})),null===(r=t.next)||void 0===r||r.call(t,M)):null===(i=t.next)||void 0===i||i.call(t,T):1===Object.keys(T).length&&"hasNext"in T&&!T.hasNext&&(null===(a=t.complete)||void 0===a||a.call(t))}catch(C){tZ(C,t)}w=d.indexOf(f)}return[3,1];case 3:return null===(o=t.complete)||void 0===o||o.call(t),[2]}})})}function tV(e){var t={};return e.split("\n").forEach(function(e){var n=e.indexOf(":");if(n>-1){var r=e.slice(0,n).trim().toLowerCase(),i=e.slice(n+1).trim();t[r]=i}}),t}function tq(e,t){e.status>=300&&tD(e,function(){try{return JSON.parse(t)}catch(e){return t}}(),"Response not successful: Received status code ".concat(e.status));try{return JSON.parse(t)}catch(n){var r=n;throw r.name="ServerParseError",r.response=e,r.statusCode=e.status,r.bodyText=t,r}}function tZ(e,t){var n,r;"AbortError"!==e.name&&(e.result&&e.result.errors&&e.result.data&&(null===(n=t.next)||void 0===n||n.call(t,e.result)),null===(r=t.error)||void 0===r||r.call(t,e))}function tX(e,t,n){tJ(t)(e).then(function(e){var t,r;null===(t=n.next)||void 0===t||t.call(n,e),null===(r=n.complete)||void 0===r||r.call(n)}).catch(function(e){return tZ(e,n)})}function tJ(e){return function(t){return t.text().then(function(e){return tq(t,e)}).then(function(n){return t.status>=300&&tD(t,n,"Response not successful: Received status code ".concat(t.status)),Array.isArray(n)||tW.call(n,"data")||tW.call(n,"errors")||tD(t,n,"Server response was missing for query '".concat(Array.isArray(e)?e.map(function(e){return e.operationName}):e.operationName,"'.")),n})}}var tQ=function(e){if(!e&&"undefined"==typeof fetch)throw __DEV__?new Q.ej("\n\"fetch\" has not been found globally and no fetcher has been configured. To fix this, install a fetch package (like https://www.npmjs.com/package/cross-fetch), instantiate the fetcher, and pass it into your HttpLink constructor. For example:\n\nimport fetch from 'cross-fetch';\nimport { ApolloClient, HttpLink } from '@apollo/client';\nconst client = new ApolloClient({\n link: new HttpLink({ uri: '/graphql', fetch })\n});\n "):new Q.ej(23)},t1=__webpack_require__(87392);function t0(e){return tl(e,{leave:t3})}var t2=80,t3={Name:function(e){return e.value},Variable:function(e){return"$"+e.name},Document:function(e){return t5(e.definitions,"\n\n")+"\n"},OperationDefinition:function(e){var t=e.operation,n=e.name,r=t9("(",t5(e.variableDefinitions,", "),")"),i=t5(e.directives," "),a=e.selectionSet;return n||i||r||"query"!==t?t5([t,t5([n,r]),i,a]," "):a},VariableDefinition:function(e){var t=e.variable,n=e.type,r=e.defaultValue,i=e.directives;return t+": "+n+t9(" = ",r)+t9(" ",t5(i," "))},SelectionSet:function(e){return t6(e.selections)},Field:function(e){var t=e.alias,n=e.name,r=e.arguments,i=e.directives,a=e.selectionSet,o=t9("",t,": ")+n,s=o+t9("(",t5(r,", "),")");return s.length>t2&&(s=o+t9("(\n",t8(t5(r,"\n")),"\n)")),t5([s,t5(i," "),a]," ")},Argument:function(e){var t;return e.name+": "+e.value},FragmentSpread:function(e){var t;return"..."+e.name+t9(" ",t5(e.directives," "))},InlineFragment:function(e){var t=e.typeCondition,n=e.directives,r=e.selectionSet;return t5(["...",t9("on ",t),t5(n," "),r]," ")},FragmentDefinition:function(e){var t=e.name,n=e.typeCondition,r=e.variableDefinitions,i=e.directives,a=e.selectionSet;return"fragment ".concat(t).concat(t9("(",t5(r,", "),")")," ")+"on ".concat(n," ").concat(t9("",t5(i," ")," "))+a},IntValue:function(e){return e.value},FloatValue:function(e){return e.value},StringValue:function(e,t){var n=e.value;return e.block?(0,t1.LZ)(n,"description"===t?"":" "):JSON.stringify(n)},BooleanValue:function(e){return e.value?"true":"false"},NullValue:function(){return"null"},EnumValue:function(e){return e.value},ListValue:function(e){return"["+t5(e.values,", ")+"]"},ObjectValue:function(e){return"{"+t5(e.fields,", ")+"}"},ObjectField:function(e){var t;return e.name+": "+e.value},Directive:function(e){var t;return"@"+e.name+t9("(",t5(e.arguments,", "),")")},NamedType:function(e){return e.name},ListType:function(e){return"["+e.type+"]"},NonNullType:function(e){return e.type+"!"},SchemaDefinition:t4(function(e){var t=e.directives,n=e.operationTypes;return t5(["schema",t5(t," "),t6(n)]," ")}),OperationTypeDefinition:function(e){var t;return e.operation+": "+e.type},ScalarTypeDefinition:t4(function(e){var t;return t5(["scalar",e.name,t5(e.directives," ")]," ")}),ObjectTypeDefinition:t4(function(e){var t=e.name,n=e.interfaces,r=e.directives,i=e.fields;return t5(["type",t,t9("implements ",t5(n," & ")),t5(r," "),t6(i)]," ")}),FieldDefinition:t4(function(e){var t=e.name,n=e.arguments,r=e.type,i=e.directives;return t+(ne(n)?t9("(\n",t8(t5(n,"\n")),"\n)"):t9("(",t5(n,", "),")"))+": "+r+t9(" ",t5(i," "))}),InputValueDefinition:t4(function(e){var t=e.name,n=e.type,r=e.defaultValue,i=e.directives;return t5([t+": "+n,t9("= ",r),t5(i," ")]," ")}),InterfaceTypeDefinition:t4(function(e){var t=e.name,n=e.interfaces,r=e.directives,i=e.fields;return t5(["interface",t,t9("implements ",t5(n," & ")),t5(r," "),t6(i)]," ")}),UnionTypeDefinition:t4(function(e){var t=e.name,n=e.directives,r=e.types;return t5(["union",t,t5(n," "),r&&0!==r.length?"= "+t5(r," | "):""]," ")}),EnumTypeDefinition:t4(function(e){var t=e.name,n=e.directives,r=e.values;return t5(["enum",t,t5(n," "),t6(r)]," ")}),EnumValueDefinition:t4(function(e){var t;return t5([e.name,t5(e.directives," ")]," ")}),InputObjectTypeDefinition:t4(function(e){var t=e.name,n=e.directives,r=e.fields;return t5(["input",t,t5(n," "),t6(r)]," ")}),DirectiveDefinition:t4(function(e){var t=e.name,n=e.arguments,r=e.repeatable,i=e.locations;return"directive @"+t+(ne(n)?t9("(\n",t8(t5(n,"\n")),"\n)"):t9("(",t5(n,", "),")"))+(r?" repeatable":"")+" on "+t5(i," | ")}),SchemaExtension:function(e){var t=e.directives,n=e.operationTypes;return t5(["extend schema",t5(t," "),t6(n)]," ")},ScalarTypeExtension:function(e){var t;return t5(["extend scalar",e.name,t5(e.directives," ")]," ")},ObjectTypeExtension:function(e){var t=e.name,n=e.interfaces,r=e.directives,i=e.fields;return t5(["extend type",t,t9("implements ",t5(n," & ")),t5(r," "),t6(i)]," ")},InterfaceTypeExtension:function(e){var t=e.name,n=e.interfaces,r=e.directives,i=e.fields;return t5(["extend interface",t,t9("implements ",t5(n," & ")),t5(r," "),t6(i)]," ")},UnionTypeExtension:function(e){var t=e.name,n=e.directives,r=e.types;return t5(["extend union",t,t5(n," "),r&&0!==r.length?"= "+t5(r," | "):""]," ")},EnumTypeExtension:function(e){var t=e.name,n=e.directives,r=e.values;return t5(["extend enum",t,t5(n," "),t6(r)]," ")},InputObjectTypeExtension:function(e){var t=e.name,n=e.directives,r=e.fields;return t5(["extend input",t,t5(n," "),t6(r)]," ")}};function t4(e){return function(t){return t5([t.description,e(t)],"\n")}}function t5(e){var t,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return null!==(t=null==e?void 0:e.filter(function(e){return e}).join(n))&&void 0!==t?t:""}function t6(e){return t9("{\n",t8(t5(e,"\n")),"\n}")}function t9(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"";return null!=t&&""!==t?e+t+n:""}function t8(e){return t9(" ",e.replace(/\n/g,"\n "))}function t7(e){return -1!==e.indexOf("\n")}function ne(e){return null!=e&&e.some(t7)}var nt,nn,nr,ni={http:{includeQuery:!0,includeExtensions:!1,preserveHeaderCase:!1},headers:{accept:"*/*","content-type":"application/json"},options:{method:"POST"}},na=function(e,t){return t(e)};function no(e,t){for(var n=[],r=2;rObject.create(null),{forEach:nv,slice:ny}=Array.prototype,{hasOwnProperty:nw}=Object.prototype;class n_{constructor(e=!0,t=ng){this.weakness=e,this.makeData=t}lookup(...e){return this.lookupArray(e)}lookupArray(e){let t=this;return nv.call(e,e=>t=t.getChildTrie(e)),nw.call(t,"data")?t.data:t.data=this.makeData(ny.call(e))}peek(...e){return this.peekArray(e)}peekArray(e){let t=this;for(let n=0,r=e.length;t&&n=0;--o)t.definitions[o].kind===nL.h.OPERATION_DEFINITION&&++a;var s=nN(e),u=e.some(function(e){return e.remove}),c=function(e){return u&&e&&e.some(s)},l=new Map,f=!1,d={enter:function(e){if(c(e.directives))return f=!0,null}},h=tl(t,{Field:d,InlineFragment:d,VariableDefinition:{enter:function(){return!1}},Variable:{enter:function(e,t,n,r,a){var o=i(a);o&&o.variables.add(e.name.value)}},FragmentSpread:{enter:function(e,t,n,r,a){if(c(e.directives))return f=!0,null;var o=i(a);o&&o.fragmentSpreads.add(e.name.value)}},FragmentDefinition:{enter:function(e,t,n,r){l.set(JSON.stringify(r),e)},leave:function(e,t,n,i){return e===l.get(JSON.stringify(i))?e:a>0&&e.selectionSet.selections.every(function(e){return e.kind===nL.h.FIELD&&"__typename"===e.name.value})?(r(e.name.value).removed=!0,f=!0,null):void 0}},Directive:{leave:function(e){if(s(e))return f=!0,null}}});if(!f)return t;var p=function(e){return e.transitiveVars||(e.transitiveVars=new Set(e.variables),e.removed||e.fragmentSpreads.forEach(function(t){p(r(t)).transitiveVars.forEach(function(t){e.transitiveVars.add(t)})})),e},b=new Set;h.definitions.forEach(function(e){e.kind===nL.h.OPERATION_DEFINITION?p(n(e.name&&e.name.value)).fragmentSpreads.forEach(function(e){b.add(e)}):e.kind!==nL.h.FRAGMENT_DEFINITION||0!==a||r(e.name.value).removed||b.add(e.name.value)}),b.forEach(function(e){p(r(e)).fragmentSpreads.forEach(function(e){b.add(e)})});var m=function(e){return!!(!b.has(e)||r(e).removed)},g={enter:function(e){if(m(e.name.value))return null}};return nD(tl(h,{FragmentSpread:g,FragmentDefinition:g,OperationDefinition:{leave:function(e){if(e.variableDefinitions){var t=p(n(e.name&&e.name.value)).transitiveVars;if(t.size0},t.prototype.tearDownQuery=function(){this.isTornDown||(this.concast&&this.observer&&(this.concast.removeObserver(this.observer),delete this.concast,delete this.observer),this.stopPolling(),this.subscriptions.forEach(function(e){return e.unsubscribe()}),this.subscriptions.clear(),this.queryManager.stopQuery(this.queryId),this.observers.clear(),this.isTornDown=!0)},t}(eT);function n4(e){var t=e.options,n=t.fetchPolicy,r=t.nextFetchPolicy;return"cache-and-network"===n||"network-only"===n?e.reobserve({fetchPolicy:"cache-first",nextFetchPolicy:function(){return(this.nextFetchPolicy=r,"function"==typeof r)?r.apply(this,arguments):n}}):e.reobserve()}function n5(e){__DEV__&&Q.kG.error("Unhandled error",e.message,e.stack)}function n6(e){__DEV__&&e&&__DEV__&&Q.kG.debug("Missing cache result fields: ".concat(JSON.stringify(e)),e)}function n9(e){return"network-only"===e||"no-cache"===e||"standby"===e}nK(n3);function n8(e){return e.kind===nL.h.FIELD||e.kind===nL.h.FRAGMENT_SPREAD||e.kind===nL.h.INLINE_FRAGMENT}function n7(e){return e.kind===Kind.SCALAR_TYPE_DEFINITION||e.kind===Kind.OBJECT_TYPE_DEFINITION||e.kind===Kind.INTERFACE_TYPE_DEFINITION||e.kind===Kind.UNION_TYPE_DEFINITION||e.kind===Kind.ENUM_TYPE_DEFINITION||e.kind===Kind.INPUT_OBJECT_TYPE_DEFINITION}function re(e){return e.kind===Kind.SCALAR_TYPE_EXTENSION||e.kind===Kind.OBJECT_TYPE_EXTENSION||e.kind===Kind.INTERFACE_TYPE_EXTENSION||e.kind===Kind.UNION_TYPE_EXTENSION||e.kind===Kind.ENUM_TYPE_EXTENSION||e.kind===Kind.INPUT_OBJECT_TYPE_EXTENSION}var rt=function(){return Object.create(null)},rn=Array.prototype,rr=rn.forEach,ri=rn.slice,ra=function(){function e(e,t){void 0===e&&(e=!0),void 0===t&&(t=rt),this.weakness=e,this.makeData=t}return e.prototype.lookup=function(){for(var e=[],t=0;tclass{constructor(){this.id=["slot",rc++,Date.now(),Math.random().toString(36).slice(2),].join(":")}hasValue(){for(let e=rs;e;e=e.parent)if(this.id in e.slots){let t=e.slots[this.id];if(t===ru)break;return e!==rs&&(rs.slots[this.id]=t),!0}return rs&&(rs.slots[this.id]=ru),!1}getValue(){if(this.hasValue())return rs.slots[this.id]}withValue(e,t,n,r){let i={__proto__:null,[this.id]:e},a=rs;rs={parent:a,slots:i};try{return t.apply(r,n)}finally{rs=a}}static bind(e){let t=rs;return function(){let n=rs;try{return rs=t,e.apply(this,arguments)}finally{rs=n}}}static noContext(e,t,n){if(!rs)return e.apply(n,t);{let r=rs;try{return rs=null,e.apply(n,t)}finally{rs=r}}}};function rf(e){try{return e()}catch(t){}}let rd="@wry/context:Slot",rh=rf(()=>globalThis)||rf(()=>global)||Object.create(null),rp=rh,rb=rp[rd]||Array[rd]||function(e){try{Object.defineProperty(rp,rd,{value:e,enumerable:!1,writable:!1,configurable:!0})}finally{return e}}(rl()),{bind:rm,noContext:rg}=rb;function rv(){}var ry=function(){function e(e,t){void 0===e&&(e=1/0),void 0===t&&(t=rv),this.max=e,this.dispose=t,this.map=new Map,this.newest=null,this.oldest=null}return e.prototype.has=function(e){return this.map.has(e)},e.prototype.get=function(e){var t=this.getNode(e);return t&&t.value},e.prototype.getNode=function(e){var t=this.map.get(e);if(t&&t!==this.newest){var n=t.older,r=t.newer;r&&(r.older=n),n&&(n.newer=r),t.older=this.newest,t.older.newer=t,t.newer=null,this.newest=t,t===this.oldest&&(this.oldest=r)}return t},e.prototype.set=function(e,t){var n=this.getNode(e);return n?n.value=t:(n={key:e,value:t,newer:null,older:this.newest},this.newest&&(this.newest.newer=n),this.newest=n,this.oldest=this.oldest||n,this.map.set(e,n),n.value)},e.prototype.clean=function(){for(;this.oldest&&this.map.size>this.max;)this.delete(this.oldest.key)},e.prototype.delete=function(e){var t=this.map.get(e);return!!t&&(t===this.newest&&(this.newest=t.older),t===this.oldest&&(this.oldest=t.newer),t.newer&&(t.newer.older=t.older),t.older&&(t.older.newer=t.newer),this.map.delete(e),this.dispose(t.value,e),!0)},e}(),rw=new rb,r_=Object.prototype.hasOwnProperty,rE=void 0===(n=Array.from)?function(e){var t=[];return e.forEach(function(e){return t.push(e)}),t}:n;function rS(e){var t=e.unsubscribe;"function"==typeof t&&(e.unsubscribe=void 0,t())}var rk=[],rx=100;function rT(e,t){if(!e)throw Error(t||"assertion failure")}function rM(e,t){var n=e.length;return n>0&&n===t.length&&e[n-1]===t[n-1]}function rO(e){switch(e.length){case 0:throw Error("unknown value");case 1:return e[0];case 2:throw e[1]}}function rA(e){return e.slice(0)}var rL=function(){function e(t){this.fn=t,this.parents=new Set,this.childValues=new Map,this.dirtyChildren=null,this.dirty=!0,this.recomputing=!1,this.value=[],this.deps=null,++e.count}return e.prototype.peek=function(){if(1===this.value.length&&!rN(this))return rC(this),this.value[0]},e.prototype.recompute=function(e){return rT(!this.recomputing,"already recomputing"),rC(this),rN(this)?rI(this,e):rO(this.value)},e.prototype.setDirty=function(){this.dirty||(this.dirty=!0,this.value.length=0,rR(this),rS(this))},e.prototype.dispose=function(){var e=this;this.setDirty(),rH(this),rF(this,function(t,n){t.setDirty(),r$(t,e)})},e.prototype.forget=function(){this.dispose()},e.prototype.dependOn=function(e){e.add(this),this.deps||(this.deps=rk.pop()||new Set),this.deps.add(e)},e.prototype.forgetDeps=function(){var e=this;this.deps&&(rE(this.deps).forEach(function(t){return t.delete(e)}),this.deps.clear(),rk.push(this.deps),this.deps=null)},e.count=0,e}();function rC(e){var t=rw.getValue();if(t)return e.parents.add(t),t.childValues.has(e)||t.childValues.set(e,[]),rN(e)?rY(t,e):rB(t,e),t}function rI(e,t){return rH(e),rw.withValue(e,rD,[e,t]),rz(e,t)&&rP(e),rO(e.value)}function rD(e,t){e.recomputing=!0,e.value.length=0;try{e.value[0]=e.fn.apply(null,t)}catch(n){e.value[1]=n}e.recomputing=!1}function rN(e){return e.dirty||!!(e.dirtyChildren&&e.dirtyChildren.size)}function rP(e){e.dirty=!1,!rN(e)&&rj(e)}function rR(e){rF(e,rY)}function rj(e){rF(e,rB)}function rF(e,t){var n=e.parents.size;if(n)for(var r=rE(e.parents),i=0;i0&&e.childValues.forEach(function(t,n){r$(e,n)}),e.forgetDeps(),rT(null===e.dirtyChildren)}function r$(e,t){t.parents.delete(e),e.childValues.delete(t),rU(e,t)}function rz(e,t){if("function"==typeof e.subscribe)try{rS(e),e.unsubscribe=e.subscribe.apply(null,t)}catch(n){return e.setDirty(),!1}return!0}var rG={setDirty:!0,dispose:!0,forget:!0};function rW(e){var t=new Map,n=e&&e.subscribe;function r(e){var r=rw.getValue();if(r){var i=t.get(e);i||t.set(e,i=new Set),r.dependOn(i),"function"==typeof n&&(rS(i),i.unsubscribe=n(e))}}return r.dirty=function(e,n){var r=t.get(e);if(r){var i=n&&r_.call(rG,n)?n:"setDirty";rE(r).forEach(function(e){return e[i]()}),t.delete(e),rS(r)}},r}function rK(){var e=new ra("function"==typeof WeakMap);return function(){return e.lookupArray(arguments)}}var rV=rK(),rq=new Set;function rZ(e,t){void 0===t&&(t=Object.create(null));var n=new ry(t.max||65536,function(e){return e.dispose()}),r=t.keyArgs,i=t.makeCacheKey||rK(),a=function(){var a=i.apply(null,r?r.apply(null,arguments):arguments);if(void 0===a)return e.apply(null,arguments);var o=n.get(a);o||(n.set(a,o=new rL(e)),o.subscribe=t.subscribe,o.forget=function(){return n.delete(a)});var s=o.recompute(Array.prototype.slice.call(arguments));return n.set(a,o),rq.add(n),rw.hasValue()||(rq.forEach(function(e){return e.clean()}),rq.clear()),s};function o(e){var t=n.get(e);t&&t.setDirty()}function s(e){var t=n.get(e);if(t)return t.peek()}function u(e){return n.delete(e)}return Object.defineProperty(a,"size",{get:function(){return n.map.size},configurable:!1,enumerable:!1}),a.dirtyKey=o,a.dirty=function(){o(i.apply(null,arguments))},a.peekKey=s,a.peek=function(){return s(i.apply(null,arguments))},a.forgetKey=u,a.forget=function(){return u(i.apply(null,arguments))},a.makeCacheKey=i,a.getKey=r?function(){return i.apply(null,r.apply(null,arguments))}:i,Object.freeze(a)}var rX=new rb,rJ=new WeakMap;function rQ(e){var t=rJ.get(e);return t||rJ.set(e,t={vars:new Set,dep:rW()}),t}function r1(e){rQ(e).vars.forEach(function(t){return t.forgetCache(e)})}function r0(e){rQ(e).vars.forEach(function(t){return t.attachCache(e)})}function r2(e){var t=new Set,n=new Set,r=function(a){if(arguments.length>0){if(e!==a){e=a,t.forEach(function(e){rQ(e).dep.dirty(r),r3(e)});var o=Array.from(n);n.clear(),o.forEach(function(t){return t(e)})}}else{var s=rX.getValue();s&&(i(s),rQ(s).dep(r))}return e};r.onNextChange=function(e){return n.add(e),function(){n.delete(e)}};var i=r.attachCache=function(e){return t.add(e),rQ(e).vars.add(r),r};return r.forgetCache=function(e){return t.delete(e)},r}function r3(e){e.broadcastWatches&&e.broadcastWatches()}var r4=function(){function e(e){var t=e.cache,n=e.client,r=e.resolvers,i=e.fragmentMatcher;this.selectionsToResolveCache=new WeakMap,this.cache=t,n&&(this.client=n),r&&this.addResolvers(r),i&&this.setFragmentMatcher(i)}return e.prototype.addResolvers=function(e){var t=this;this.resolvers=this.resolvers||{},Array.isArray(e)?e.forEach(function(e){t.resolvers=tj(t.resolvers,e)}):this.resolvers=tj(this.resolvers,e)},e.prototype.setResolvers=function(e){this.resolvers={},this.addResolvers(e)},e.prototype.getResolvers=function(){return this.resolvers||{}},e.prototype.runResolvers=function(e){var t=e.document,n=e.remoteResult,r=e.context,i=e.variables,a=e.onlyRunForcedResolvers,o=void 0!==a&&a;return(0,en.mG)(this,void 0,void 0,function(){return(0,en.Jh)(this,function(e){return t?[2,this.resolveDocument(t,n.data,r,i,this.fragmentMatcher,o).then(function(e){return(0,en.pi)((0,en.pi)({},n),{data:e.result})})]:[2,n]})})},e.prototype.setFragmentMatcher=function(e){this.fragmentMatcher=e},e.prototype.getFragmentMatcher=function(){return this.fragmentMatcher},e.prototype.clientQuery=function(e){return tb(["client"],e)&&this.resolvers?e:null},e.prototype.serverQuery=function(e){return n$(e)},e.prototype.prepareContext=function(e){var t=this.cache;return(0,en.pi)((0,en.pi)({},e),{cache:t,getCacheKey:function(e){return t.identify(e)}})},e.prototype.addExportedVariables=function(e,t,n){return void 0===t&&(t={}),void 0===n&&(n={}),(0,en.mG)(this,void 0,void 0,function(){return(0,en.Jh)(this,function(r){return e?[2,this.resolveDocument(e,this.buildRootValueFromCache(e,t)||{},this.prepareContext(n),t).then(function(e){return(0,en.pi)((0,en.pi)({},t),e.exportedVariables)})]:[2,(0,en.pi)({},t)]})})},e.prototype.shouldForceResolvers=function(e){var t=!1;return tl(e,{Directive:{enter:function(e){if("client"===e.name.value&&e.arguments&&(t=e.arguments.some(function(e){return"always"===e.name.value&&"BooleanValue"===e.value.kind&&!0===e.value.value})))return tc}}}),t},e.prototype.buildRootValueFromCache=function(e,t){return this.cache.diff({query:nH(e),variables:t,returnPartialData:!0,optimistic:!1}).result},e.prototype.resolveDocument=function(e,t,n,r,i,a){return void 0===n&&(n={}),void 0===r&&(r={}),void 0===i&&(i=function(){return!0}),void 0===a&&(a=!1),(0,en.mG)(this,void 0,void 0,function(){var o,s,u,c,l,f,d,h,p,b,m;return(0,en.Jh)(this,function(g){return o=e9(e),s=e4(e),u=eL(s),c=this.collectSelectionsToResolve(o,u),f=(l=o.operation)?l.charAt(0).toUpperCase()+l.slice(1):"Query",d=this,h=d.cache,p=d.client,b={fragmentMap:u,context:(0,en.pi)((0,en.pi)({},n),{cache:h,client:p}),variables:r,fragmentMatcher:i,defaultOperationType:f,exportedVariables:{},selectionsToResolve:c,onlyRunForcedResolvers:a},m=!1,[2,this.resolveSelectionSet(o.selectionSet,m,t,b).then(function(e){return{result:e,exportedVariables:b.exportedVariables}})]})})},e.prototype.resolveSelectionSet=function(e,t,n,r){return(0,en.mG)(this,void 0,void 0,function(){var i,a,o,s,u,c=this;return(0,en.Jh)(this,function(l){return i=r.fragmentMap,a=r.context,o=r.variables,s=[n],u=function(e){return(0,en.mG)(c,void 0,void 0,function(){var u,c;return(0,en.Jh)(this,function(l){return(t||r.selectionsToResolve.has(e))&&td(e,o)?eQ(e)?[2,this.resolveField(e,t,n,r).then(function(t){var n;void 0!==t&&s.push(((n={})[eX(e)]=t,n))})]:(e1(e)?u=e:(u=i[e.name.value],__DEV__?(0,Q.kG)(u,"No fragment named ".concat(e.name.value)):(0,Q.kG)(u,11)),u&&u.typeCondition&&(c=u.typeCondition.name.value,r.fragmentMatcher(n,c,a)))?[2,this.resolveSelectionSet(u.selectionSet,t,n,r).then(function(e){s.push(e)})]:[2]:[2]})})},[2,Promise.all(e.selections.map(u)).then(function(){return tF(s)})]})})},e.prototype.resolveField=function(e,t,n,r){return(0,en.mG)(this,void 0,void 0,function(){var i,a,o,s,u,c,l,f,d,h=this;return(0,en.Jh)(this,function(p){return n?(i=r.variables,a=e.name.value,o=eX(e),s=a!==o,c=Promise.resolve(u=n[o]||n[a]),(!r.onlyRunForcedResolvers||this.shouldForceResolvers(e))&&(l=n.__typename||r.defaultOperationType,(f=this.resolvers&&this.resolvers[l])&&(d=f[s?a:o])&&(c=Promise.resolve(rX.withValue(this.cache,d,[n,eZ(e,i),r.context,{field:e,fragmentMap:r.fragmentMap},])))),[2,c.then(function(n){if(void 0===n&&(n=u),e.directives&&e.directives.forEach(function(e){"export"===e.name.value&&e.arguments&&e.arguments.forEach(function(e){"as"===e.name.value&&"StringValue"===e.value.kind&&(r.exportedVariables[e.value.value]=n)})}),!e.selectionSet||null==n)return n;var i,a,o=null!==(a=null===(i=e.directives)||void 0===i?void 0:i.some(function(e){return"client"===e.name.value}))&&void 0!==a&&a;return Array.isArray(n)?h.resolveSubSelectedArray(e,t||o,n,r):e.selectionSet?h.resolveSelectionSet(e.selectionSet,t||o,n,r):void 0})]):[2,null]})})},e.prototype.resolveSubSelectedArray=function(e,t,n,r){var i=this;return Promise.all(n.map(function(n){return null===n?null:Array.isArray(n)?i.resolveSubSelectedArray(e,t,n,r):e.selectionSet?i.resolveSelectionSet(e.selectionSet,t,n,r):void 0}))},e.prototype.collectSelectionsToResolve=function(e,t){var n=function(e){return!Array.isArray(e)},r=this.selectionsToResolveCache;function i(e){if(!r.has(e)){var a=new Set;r.set(e,a),tl(e,{Directive:function(e,t,r,i,o){"client"===e.name.value&&o.forEach(function(e){n(e)&&n8(e)&&a.add(e)})},FragmentSpread:function(e,r,o,s,u){var c=t[e.name.value];__DEV__?(0,Q.kG)(c,"No fragment named ".concat(e.name.value)):(0,Q.kG)(c,12);var l=i(c);l.size>0&&(u.forEach(function(e){n(e)&&n8(e)&&a.add(e)}),a.add(e),l.forEach(function(e){a.add(e)}))}})}return r.get(e)}return i(e)},e}(),r5=new(t_.mr?WeakMap:Map);function r6(e,t){var n=e[t];"function"==typeof n&&(e[t]=function(){return r5.set(e,(r5.get(e)+1)%1e15),n.apply(this,arguments)})}function r9(e){e.notifyTimeout&&(clearTimeout(e.notifyTimeout),e.notifyTimeout=void 0)}var r8=function(){function e(e,t){void 0===t&&(t=e.generateQueryId()),this.queryId=t,this.listeners=new Set,this.document=null,this.lastRequestId=1,this.subscriptions=new Set,this.stopped=!1,this.dirty=!1,this.observableQuery=null;var n=this.cache=e.cache;r5.has(n)||(r5.set(n,0),r6(n,"evict"),r6(n,"modify"),r6(n,"reset"))}return e.prototype.init=function(e){var t=e.networkStatus||nZ.I.loading;return this.variables&&this.networkStatus!==nZ.I.loading&&!(0,nm.D)(this.variables,e.variables)&&(t=nZ.I.setVariables),(0,nm.D)(e.variables,this.variables)||(this.lastDiff=void 0),Object.assign(this,{document:e.document,variables:e.variables,networkError:null,graphQLErrors:this.graphQLErrors||[],networkStatus:t}),e.observableQuery&&this.setObservableQuery(e.observableQuery),e.lastRequestId&&(this.lastRequestId=e.lastRequestId),this},e.prototype.reset=function(){r9(this),this.dirty=!1},e.prototype.getDiff=function(e){void 0===e&&(e=this.variables);var t=this.getDiffOptions(e);if(this.lastDiff&&(0,nm.D)(t,this.lastDiff.options))return this.lastDiff.diff;this.updateWatch(this.variables=e);var n=this.observableQuery;if(n&&"no-cache"===n.options.fetchPolicy)return{complete:!1};var r=this.cache.diff(t);return this.updateLastDiff(r,t),r},e.prototype.updateLastDiff=function(e,t){this.lastDiff=e?{diff:e,options:t||this.getDiffOptions()}:void 0},e.prototype.getDiffOptions=function(e){var t;return void 0===e&&(e=this.variables),{query:this.document,variables:e,returnPartialData:!0,optimistic:!0,canonizeResults:null===(t=this.observableQuery)||void 0===t?void 0:t.options.canonizeResults}},e.prototype.setDiff=function(e){var t=this,n=this.lastDiff&&this.lastDiff.diff;this.updateLastDiff(e),this.dirty||(0,nm.D)(n&&n.result,e&&e.result)||(this.dirty=!0,this.notifyTimeout||(this.notifyTimeout=setTimeout(function(){return t.notify()},0)))},e.prototype.setObservableQuery=function(e){var t=this;e!==this.observableQuery&&(this.oqListener&&this.listeners.delete(this.oqListener),this.observableQuery=e,e?(e.queryInfo=this,this.listeners.add(this.oqListener=function(){t.getDiff().fromOptimisticTransaction?e.observe():n4(e)})):delete this.oqListener)},e.prototype.notify=function(){var e=this;r9(this),this.shouldNotify()&&this.listeners.forEach(function(t){return t(e)}),this.dirty=!1},e.prototype.shouldNotify=function(){if(!this.dirty||!this.listeners.size)return!1;if((0,nZ.O)(this.networkStatus)&&this.observableQuery){var e=this.observableQuery.options.fetchPolicy;if("cache-only"!==e&&"cache-and-network"!==e)return!1}return!0},e.prototype.stop=function(){if(!this.stopped){this.stopped=!0,this.reset(),this.cancel(),this.cancel=e.prototype.cancel,this.subscriptions.forEach(function(e){return e.unsubscribe()});var t=this.observableQuery;t&&t.stopPolling()}},e.prototype.cancel=function(){},e.prototype.updateWatch=function(e){var t=this;void 0===e&&(e=this.variables);var n=this.observableQuery;if(!n||"no-cache"!==n.options.fetchPolicy){var r=(0,en.pi)((0,en.pi)({},this.getDiffOptions(e)),{watcher:this,callback:function(e){return t.setDiff(e)}});this.lastWatch&&(0,nm.D)(r,this.lastWatch)||(this.cancel(),this.cancel=this.cache.watch(this.lastWatch=r))}},e.prototype.resetLastWrite=function(){this.lastWrite=void 0},e.prototype.shouldWrite=function(e,t){var n=this.lastWrite;return!(n&&n.dmCount===r5.get(this.cache)&&(0,nm.D)(t,n.variables)&&(0,nm.D)(e.data,n.result.data))},e.prototype.markResult=function(e,t,n,r){var i=this,a=new tB,o=(0,tP.O)(e.errors)?e.errors.slice(0):[];if(this.reset(),"incremental"in e&&(0,tP.O)(e.incremental)){var s=tG(this.getDiff().result,e);e.data=s}else if("hasNext"in e&&e.hasNext){var u=this.getDiff();e.data=a.merge(u.result,e.data)}this.graphQLErrors=o,"no-cache"===n.fetchPolicy?this.updateLastDiff({result:e.data,complete:!0},this.getDiffOptions(n.variables)):0!==r&&(r7(e,n.errorPolicy)?this.cache.performTransaction(function(a){if(i.shouldWrite(e,n.variables))a.writeQuery({query:t,data:e.data,variables:n.variables,overwrite:1===r}),i.lastWrite={result:e,variables:n.variables,dmCount:r5.get(i.cache)};else if(i.lastDiff&&i.lastDiff.diff.complete){e.data=i.lastDiff.diff.result;return}var o=i.getDiffOptions(n.variables),s=a.diff(o);i.stopped||i.updateWatch(n.variables),i.updateLastDiff(s,o),s.complete&&(e.data=s.result)}):this.lastWrite=void 0)},e.prototype.markReady=function(){return this.networkError=null,this.networkStatus=nZ.I.ready},e.prototype.markError=function(e){return this.networkStatus=nZ.I.error,this.lastWrite=void 0,this.reset(),e.graphQLErrors&&(this.graphQLErrors=e.graphQLErrors),e.networkError&&(this.networkError=e.networkError),e},e}();function r7(e,t){void 0===t&&(t="none");var n="ignore"===t||"all"===t,r=!nO(e);return!r&&n&&e.data&&(r=!0),r}var ie=Object.prototype.hasOwnProperty,it=function(){function e(e){var t=e.cache,n=e.link,r=e.defaultOptions,i=e.queryDeduplication,a=void 0!==i&&i,o=e.onBroadcast,s=e.ssrMode,u=void 0!==s&&s,c=e.clientAwareness,l=void 0===c?{}:c,f=e.localState,d=e.assumeImmutableResults;this.clientAwareness={},this.queries=new Map,this.fetchCancelFns=new Map,this.transformCache=new(t_.mr?WeakMap:Map),this.queryIdCounter=1,this.requestIdCounter=1,this.mutationIdCounter=1,this.inFlightLinkObservables=new Map,this.cache=t,this.link=n,this.defaultOptions=r||Object.create(null),this.queryDeduplication=a,this.clientAwareness=l,this.localState=f||new r4({cache:t}),this.ssrMode=u,this.assumeImmutableResults=!!d,(this.onBroadcast=o)&&(this.mutationStore=Object.create(null))}return e.prototype.stop=function(){var e=this;this.queries.forEach(function(t,n){e.stopQueryNoBroadcast(n)}),this.cancelPendingFetches(__DEV__?new Q.ej("QueryManager stopped while query was in flight"):new Q.ej(14))},e.prototype.cancelPendingFetches=function(e){this.fetchCancelFns.forEach(function(t){return t(e)}),this.fetchCancelFns.clear()},e.prototype.mutate=function(e){var t,n,r=e.mutation,i=e.variables,a=e.optimisticResponse,o=e.updateQueries,s=e.refetchQueries,u=void 0===s?[]:s,c=e.awaitRefetchQueries,l=void 0!==c&&c,f=e.update,d=e.onQueryUpdated,h=e.fetchPolicy,p=void 0===h?(null===(t=this.defaultOptions.mutate)||void 0===t?void 0:t.fetchPolicy)||"network-only":h,b=e.errorPolicy,m=void 0===b?(null===(n=this.defaultOptions.mutate)||void 0===n?void 0:n.errorPolicy)||"none":b,g=e.keepRootFields,v=e.context;return(0,en.mG)(this,void 0,void 0,function(){var e,t,n,s,c,h;return(0,en.Jh)(this,function(b){switch(b.label){case 0:if(__DEV__?(0,Q.kG)(r,"mutation option is required. You must specify your GraphQL document in the mutation option."):(0,Q.kG)(r,15),__DEV__?(0,Q.kG)("network-only"===p||"no-cache"===p,"Mutations support only 'network-only' or 'no-cache' fetchPolicy strings. The default `network-only` behavior automatically writes mutation results to the cache. Passing `no-cache` skips the cache write."):(0,Q.kG)("network-only"===p||"no-cache"===p,16),e=this.generateMutationId(),n=(t=this.transform(r)).document,s=t.hasClientExports,r=this.cache.transformForLink(n),i=this.getVariables(r,i),!s)return[3,2];return[4,this.localState.addExportedVariables(r,i,v)];case 1:i=b.sent(),b.label=2;case 2:return c=this.mutationStore&&(this.mutationStore[e]={mutation:r,variables:i,loading:!0,error:null}),a&&this.markMutationOptimistic(a,{mutationId:e,document:r,variables:i,fetchPolicy:p,errorPolicy:m,context:v,updateQueries:o,update:f,keepRootFields:g}),this.broadcastQueries(),h=this,[2,new Promise(function(t,n){return nM(h.getObservableFromLink(r,(0,en.pi)((0,en.pi)({},v),{optimisticResponse:a}),i,!1),function(t){if(nO(t)&&"none"===m)throw new tN.cA({graphQLErrors:nA(t)});c&&(c.loading=!1,c.error=null);var n=(0,en.pi)({},t);return"function"==typeof u&&(u=u(n)),"ignore"===m&&nO(n)&&delete n.errors,h.markMutationResult({mutationId:e,result:n,document:r,variables:i,fetchPolicy:p,errorPolicy:m,context:v,update:f,updateQueries:o,awaitRefetchQueries:l,refetchQueries:u,removeOptimistic:a?e:void 0,onQueryUpdated:d,keepRootFields:g})}).subscribe({next:function(e){h.broadcastQueries(),"hasNext"in e&&!1!==e.hasNext||t(e)},error:function(t){c&&(c.loading=!1,c.error=t),a&&h.cache.removeOptimistic(e),h.broadcastQueries(),n(t instanceof tN.cA?t:new tN.cA({networkError:t}))}})})]}})})},e.prototype.markMutationResult=function(e,t){var n=this;void 0===t&&(t=this.cache);var r=e.result,i=[],a="no-cache"===e.fetchPolicy;if(!a&&r7(r,e.errorPolicy)){if(tU(r)||i.push({result:r.data,dataId:"ROOT_MUTATION",query:e.document,variables:e.variables}),tU(r)&&(0,tP.O)(r.incremental)){var o=t.diff({id:"ROOT_MUTATION",query:this.transform(e.document).asQuery,variables:e.variables,optimistic:!1,returnPartialData:!0}),s=void 0;o.result&&(s=tG(o.result,r)),void 0!==s&&(r.data=s,i.push({result:s,dataId:"ROOT_MUTATION",query:e.document,variables:e.variables}))}var u=e.updateQueries;u&&this.queries.forEach(function(e,a){var o=e.observableQuery,s=o&&o.queryName;if(s&&ie.call(u,s)){var c,l=u[s],f=n.queries.get(a),d=f.document,h=f.variables,p=t.diff({query:d,variables:h,returnPartialData:!0,optimistic:!1}),b=p.result;if(p.complete&&b){var m=l(b,{mutationResult:r,queryName:d&&e3(d)||void 0,queryVariables:h});m&&i.push({result:m,dataId:"ROOT_QUERY",query:d,variables:h})}}})}if(i.length>0||e.refetchQueries||e.update||e.onQueryUpdated||e.removeOptimistic){var c=[];if(this.refetchQueries({updateCache:function(t){a||i.forEach(function(e){return t.write(e)});var o=e.update,s=!t$(r)||tU(r)&&!r.hasNext;if(o){if(!a){var u=t.diff({id:"ROOT_MUTATION",query:n.transform(e.document).asQuery,variables:e.variables,optimistic:!1,returnPartialData:!0});u.complete&&("incremental"in(r=(0,en.pi)((0,en.pi)({},r),{data:u.result}))&&delete r.incremental,"hasNext"in r&&delete r.hasNext)}s&&o(t,r,{context:e.context,variables:e.variables})}a||e.keepRootFields||!s||t.modify({id:"ROOT_MUTATION",fields:function(e,t){var n=t.fieldName,r=t.DELETE;return"__typename"===n?e:r}})},include:e.refetchQueries,optimistic:!1,removeOptimistic:e.removeOptimistic,onQueryUpdated:e.onQueryUpdated||null}).forEach(function(e){return c.push(e)}),e.awaitRefetchQueries||e.onQueryUpdated)return Promise.all(c).then(function(){return r})}return Promise.resolve(r)},e.prototype.markMutationOptimistic=function(e,t){var n=this,r="function"==typeof e?e(t.variables):e;return this.cache.recordOptimisticTransaction(function(e){try{n.markMutationResult((0,en.pi)((0,en.pi)({},t),{result:{data:r}}),e)}catch(i){__DEV__&&Q.kG.error(i)}},t.mutationId)},e.prototype.fetchQuery=function(e,t,n){return this.fetchQueryObservable(e,t,n).promise},e.prototype.getQueryStore=function(){var e=Object.create(null);return this.queries.forEach(function(t,n){e[n]={variables:t.variables,networkStatus:t.networkStatus,networkError:t.networkError,graphQLErrors:t.graphQLErrors}}),e},e.prototype.resetErrors=function(e){var t=this.queries.get(e);t&&(t.networkError=void 0,t.graphQLErrors=[])},e.prototype.transform=function(e){var t=this.transformCache;if(!t.has(e)){var n=this.cache.transformDocument(e),r=nY(n),i=this.localState.clientQuery(n),a=r&&this.localState.serverQuery(r),o={document:n,hasClientExports:tm(n),hasForcedResolvers:this.localState.shouldForceResolvers(n),clientQuery:i,serverQuery:a,defaultVars:e8(e2(n)),asQuery:(0,en.pi)((0,en.pi)({},n),{definitions:n.definitions.map(function(e){return"OperationDefinition"===e.kind&&"query"!==e.operation?(0,en.pi)((0,en.pi)({},e),{operation:"query"}):e})})},s=function(e){e&&!t.has(e)&&t.set(e,o)};s(e),s(n),s(i),s(a)}return t.get(e)},e.prototype.getVariables=function(e,t){return(0,en.pi)((0,en.pi)({},this.transform(e).defaultVars),t)},e.prototype.watchQuery=function(e){void 0===(e=(0,en.pi)((0,en.pi)({},e),{variables:this.getVariables(e.query,e.variables)})).notifyOnNetworkStatusChange&&(e.notifyOnNetworkStatusChange=!1);var t=new r8(this),n=new n3({queryManager:this,queryInfo:t,options:e});return this.queries.set(n.queryId,t),t.init({document:n.query,observableQuery:n,variables:n.variables}),n},e.prototype.query=function(e,t){var n=this;return void 0===t&&(t=this.generateQueryId()),__DEV__?(0,Q.kG)(e.query,"query option is required. You must specify your GraphQL document in the query option."):(0,Q.kG)(e.query,17),__DEV__?(0,Q.kG)("Document"===e.query.kind,'You must wrap the query string in a "gql" tag.'):(0,Q.kG)("Document"===e.query.kind,18),__DEV__?(0,Q.kG)(!e.returnPartialData,"returnPartialData option only supported on watchQuery."):(0,Q.kG)(!e.returnPartialData,19),__DEV__?(0,Q.kG)(!e.pollInterval,"pollInterval option only supported on watchQuery."):(0,Q.kG)(!e.pollInterval,20),this.fetchQuery(t,e).finally(function(){return n.stopQuery(t)})},e.prototype.generateQueryId=function(){return String(this.queryIdCounter++)},e.prototype.generateRequestId=function(){return this.requestIdCounter++},e.prototype.generateMutationId=function(){return String(this.mutationIdCounter++)},e.prototype.stopQueryInStore=function(e){this.stopQueryInStoreNoBroadcast(e),this.broadcastQueries()},e.prototype.stopQueryInStoreNoBroadcast=function(e){var t=this.queries.get(e);t&&t.stop()},e.prototype.clearStore=function(e){return void 0===e&&(e={discardWatches:!0}),this.cancelPendingFetches(__DEV__?new Q.ej("Store reset while query was in flight (not completed in link chain)"):new Q.ej(21)),this.queries.forEach(function(e){e.observableQuery?e.networkStatus=nZ.I.loading:e.stop()}),this.mutationStore&&(this.mutationStore=Object.create(null)),this.cache.reset(e)},e.prototype.getObservableQueries=function(e){var t=this;void 0===e&&(e="active");var n=new Map,r=new Map,i=new Set;return Array.isArray(e)&&e.forEach(function(e){"string"==typeof e?r.set(e,!1):eN(e)?r.set(t.transform(e).document,!1):(0,eO.s)(e)&&e.query&&i.add(e)}),this.queries.forEach(function(t,i){var a=t.observableQuery,o=t.document;if(a){if("all"===e){n.set(i,a);return}var s=a.queryName;if("standby"===a.options.fetchPolicy||"active"===e&&!a.hasObservers())return;("active"===e||s&&r.has(s)||o&&r.has(o))&&(n.set(i,a),s&&r.set(s,!0),o&&r.set(o,!0))}}),i.size&&i.forEach(function(e){var r=nG("legacyOneTimeQuery"),i=t.getQuery(r).init({document:e.query,variables:e.variables}),a=new n3({queryManager:t,queryInfo:i,options:(0,en.pi)((0,en.pi)({},e),{fetchPolicy:"network-only"})});(0,Q.kG)(a.queryId===r),i.setObservableQuery(a),n.set(r,a)}),__DEV__&&r.size&&r.forEach(function(e,t){!e&&__DEV__&&Q.kG.warn("Unknown query ".concat("string"==typeof t?"named ":"").concat(JSON.stringify(t,null,2)," requested in refetchQueries options.include array"))}),n},e.prototype.reFetchObservableQueries=function(e){var t=this;void 0===e&&(e=!1);var n=[];return this.getObservableQueries(e?"all":"active").forEach(function(r,i){var a=r.options.fetchPolicy;r.resetLastResults(),(e||"standby"!==a&&"cache-only"!==a)&&n.push(r.refetch()),t.getQuery(i).setDiff(null)}),this.broadcastQueries(),Promise.all(n)},e.prototype.setObservableQuery=function(e){this.getQuery(e.queryId).setObservableQuery(e)},e.prototype.startGraphQLSubscription=function(e){var t=this,n=e.query,r=e.fetchPolicy,i=e.errorPolicy,a=e.variables,o=e.context,s=void 0===o?{}:o;n=this.transform(n).document,a=this.getVariables(n,a);var u=function(e){return t.getObservableFromLink(n,s,e).map(function(a){"no-cache"!==r&&(r7(a,i)&&t.cache.write({query:n,result:a.data,dataId:"ROOT_SUBSCRIPTION",variables:e}),t.broadcastQueries());var o=nO(a),s=(0,tN.ls)(a);if(o||s){var u={};throw o&&(u.graphQLErrors=a.errors),s&&(u.protocolErrors=a.extensions[tN.YG]),new tN.cA(u)}return a})};if(this.transform(n).hasClientExports){var c=this.localState.addExportedVariables(n,a,s).then(u);return new eT(function(e){var t=null;return c.then(function(n){return t=n.subscribe(e)},e.error),function(){return t&&t.unsubscribe()}})}return u(a)},e.prototype.stopQuery=function(e){this.stopQueryNoBroadcast(e),this.broadcastQueries()},e.prototype.stopQueryNoBroadcast=function(e){this.stopQueryInStoreNoBroadcast(e),this.removeQuery(e)},e.prototype.removeQuery=function(e){this.fetchCancelFns.delete(e),this.queries.has(e)&&(this.getQuery(e).stop(),this.queries.delete(e))},e.prototype.broadcastQueries=function(){this.onBroadcast&&this.onBroadcast(),this.queries.forEach(function(e){return e.notify()})},e.prototype.getLocalState=function(){return this.localState},e.prototype.getObservableFromLink=function(e,t,n,r){var i,a,o=this;void 0===r&&(r=null!==(i=null==t?void 0:t.queryDeduplication)&&void 0!==i?i:this.queryDeduplication);var s=this.transform(e).serverQuery;if(s){var u=this,c=u.inFlightLinkObservables,l=u.link,f={query:s,variables:n,operationName:e3(s)||void 0,context:this.prepareContext((0,en.pi)((0,en.pi)({},t),{forceFetch:!r}))};if(t=f.context,r){var d=c.get(s)||new Map;c.set(s,d);var h=nx(n);if(!(a=d.get(h))){var p=new nq([np(l,f)]);d.set(h,a=p),p.beforeNext(function(){d.delete(h)&&d.size<1&&c.delete(s)})}}else a=new nq([np(l,f)])}else a=new nq([eT.of({data:{}})]),t=this.prepareContext(t);var b=this.transform(e).clientQuery;return b&&(a=nM(a,function(e){return o.localState.runResolvers({document:b,remoteResult:e,context:t,variables:n})})),a},e.prototype.getResultsFromLink=function(e,t,n){var r=e.lastRequestId=this.generateRequestId(),i=this.cache.transformForLink(this.transform(e.document).document);return nM(this.getObservableFromLink(i,n.context,n.variables),function(a){var o=nA(a),s=o.length>0;if(r>=e.lastRequestId){if(s&&"none"===n.errorPolicy)throw e.markError(new tN.cA({graphQLErrors:o}));e.markResult(a,i,n,t),e.markReady()}var u={data:a.data,loading:!1,networkStatus:nZ.I.ready};return s&&"ignore"!==n.errorPolicy&&(u.errors=o,u.networkStatus=nZ.I.error),u},function(t){var n=(0,tN.MS)(t)?t:new tN.cA({networkError:t});throw r>=e.lastRequestId&&e.markError(n),n})},e.prototype.fetchQueryObservable=function(e,t,n){return this.fetchConcastWithInfo(e,t,n).concast},e.prototype.fetchConcastWithInfo=function(e,t,n){var r,i,a=this;void 0===n&&(n=nZ.I.loading);var o=this.transform(t.query).document,s=this.getVariables(o,t.variables),u=this.getQuery(e),c=this.defaultOptions.watchQuery,l=t.fetchPolicy,f=void 0===l?c&&c.fetchPolicy||"cache-first":l,d=t.errorPolicy,h=void 0===d?c&&c.errorPolicy||"none":d,p=t.returnPartialData,b=void 0!==p&&p,m=t.notifyOnNetworkStatusChange,g=void 0!==m&&m,v=t.context,y=void 0===v?{}:v,w=Object.assign({},t,{query:o,variables:s,fetchPolicy:f,errorPolicy:h,returnPartialData:b,notifyOnNetworkStatusChange:g,context:y}),_=function(e){w.variables=e;var r=a.fetchQueryByPolicy(u,w,n);return"standby"!==w.fetchPolicy&&r.sources.length>0&&u.observableQuery&&u.observableQuery.applyNextFetchPolicy("after-fetch",t),r},E=function(){return a.fetchCancelFns.delete(e)};if(this.fetchCancelFns.set(e,function(e){E(),setTimeout(function(){return r.cancel(e)})}),this.transform(w.query).hasClientExports)r=new nq(this.localState.addExportedVariables(w.query,w.variables,w.context).then(_).then(function(e){return e.sources})),i=!0;else{var S=_(w.variables);i=S.fromLink,r=new nq(S.sources)}return r.promise.then(E,E),{concast:r,fromLink:i}},e.prototype.refetchQueries=function(e){var t=this,n=e.updateCache,r=e.include,i=e.optimistic,a=void 0!==i&&i,o=e.removeOptimistic,s=void 0===o?a?nG("refetchQueries"):void 0:o,u=e.onQueryUpdated,c=new Map;r&&this.getObservableQueries(r).forEach(function(e,n){c.set(n,{oq:e,lastDiff:t.getQuery(n).getDiff()})});var l=new Map;return n&&this.cache.batch({update:n,optimistic:a&&s||!1,removeOptimistic:s,onWatchUpdated:function(e,t,n){var r=e.watcher instanceof r8&&e.watcher.observableQuery;if(r){if(u){c.delete(r.queryId);var i=u(r,t,n);return!0===i&&(i=r.refetch()),!1!==i&&l.set(r,i),i}null!==u&&c.set(r.queryId,{oq:r,lastDiff:n,diff:t})}}}),c.size&&c.forEach(function(e,n){var r,i=e.oq,a=e.lastDiff,o=e.diff;if(u){if(!o){var s=i.queryInfo;s.reset(),o=s.getDiff()}r=u(i,o,a)}u&&!0!==r||(r=i.refetch()),!1!==r&&l.set(i,r),n.indexOf("legacyOneTimeQuery")>=0&&t.stopQueryNoBroadcast(n)}),s&&this.cache.removeOptimistic(s),l},e.prototype.fetchQueryByPolicy=function(e,t,n){var r=this,i=t.query,a=t.variables,o=t.fetchPolicy,s=t.refetchWritePolicy,u=t.errorPolicy,c=t.returnPartialData,l=t.context,f=t.notifyOnNetworkStatusChange,d=e.networkStatus;e.init({document:this.transform(i).document,variables:a,networkStatus:n});var h=function(){return e.getDiff(a)},p=function(t,n){void 0===n&&(n=e.networkStatus||nZ.I.loading);var o=t.result;!__DEV__||c||(0,nm.D)(o,{})||n6(t.missing);var s=function(e){return eT.of((0,en.pi)({data:e,loading:(0,nZ.O)(n),networkStatus:n},t.complete?null:{partial:!0}))};return o&&r.transform(i).hasForcedResolvers?r.localState.runResolvers({document:i,remoteResult:{data:o},context:l,variables:a,onlyRunForcedResolvers:!0}).then(function(e){return s(e.data||void 0)}):"none"===u&&n===nZ.I.refetch&&Array.isArray(t.missing)?s(void 0):s(o)},b="no-cache"===o?0:n===nZ.I.refetch&&"merge"!==s?1:2,m=function(){return r.getResultsFromLink(e,b,{variables:a,context:l,fetchPolicy:o,errorPolicy:u})},g=f&&"number"==typeof d&&d!==n&&(0,nZ.O)(n);switch(o){default:case"cache-first":var v=h();if(v.complete)return{fromLink:!1,sources:[p(v,e.markReady())]};if(c||g)return{fromLink:!0,sources:[p(v),m()]};return{fromLink:!0,sources:[m()]};case"cache-and-network":var v=h();if(v.complete||c||g)return{fromLink:!0,sources:[p(v),m()]};return{fromLink:!0,sources:[m()]};case"cache-only":return{fromLink:!1,sources:[p(h(),e.markReady())]};case"network-only":if(g)return{fromLink:!0,sources:[p(h()),m()]};return{fromLink:!0,sources:[m()]};case"no-cache":if(g)return{fromLink:!0,sources:[p(e.getDiff()),m(),]};return{fromLink:!0,sources:[m()]};case"standby":return{fromLink:!1,sources:[]}}},e.prototype.getQuery=function(e){return e&&!this.queries.has(e)&&this.queries.set(e,new r8(this,e)),this.queries.get(e)},e.prototype.prepareContext=function(e){void 0===e&&(e={});var t=this.localState.prepareContext(e);return(0,en.pi)((0,en.pi)({},t),{clientAwareness:this.clientAwareness})},e}(),ir=__webpack_require__(14012),ii=!1,ia=function(){function e(e){var t=this;this.resetStoreCallbacks=[],this.clearStoreCallbacks=[];var n=e.uri,r=e.credentials,i=e.headers,a=e.cache,o=e.ssrMode,s=void 0!==o&&o,u=e.ssrForceFetchDelay,c=void 0===u?0:u,l=e.connectToDevTools,f=void 0===l?"object"==typeof window&&!window.__APOLLO_CLIENT__&&__DEV__:l,d=e.queryDeduplication,h=void 0===d||d,p=e.defaultOptions,b=e.assumeImmutableResults,m=void 0!==b&&b,g=e.resolvers,v=e.typeDefs,y=e.fragmentMatcher,w=e.name,_=e.version,E=e.link;if(E||(E=n?new nh({uri:n,credentials:r,headers:i}):ta.empty()),!a)throw __DEV__?new Q.ej("To initialize Apollo Client, you must specify a 'cache' property in the options object. \nFor more information, please visit: https://go.apollo.dev/c/docs"):new Q.ej(9);if(this.link=E,this.cache=a,this.disableNetworkFetches=s||c>0,this.queryDeduplication=h,this.defaultOptions=p||Object.create(null),this.typeDefs=v,c&&setTimeout(function(){return t.disableNetworkFetches=!1},c),this.watchQuery=this.watchQuery.bind(this),this.query=this.query.bind(this),this.mutate=this.mutate.bind(this),this.resetStore=this.resetStore.bind(this),this.reFetchObservableQueries=this.reFetchObservableQueries.bind(this),f&&"object"==typeof window&&(window.__APOLLO_CLIENT__=this),!ii&&f&&__DEV__&&(ii=!0,"undefined"!=typeof window&&window.document&&window.top===window.self&&!window.__APOLLO_DEVTOOLS_GLOBAL_HOOK__)){var S=window.navigator,k=S&&S.userAgent,x=void 0;"string"==typeof k&&(k.indexOf("Chrome/")>-1?x="https://chrome.google.com/webstore/detail/apollo-client-developer-t/jdkknkkbebbapilgoeccciglkfbmbnfm":k.indexOf("Firefox/")>-1&&(x="https://addons.mozilla.org/en-US/firefox/addon/apollo-developer-tools/")),x&&__DEV__&&Q.kG.log("Download the Apollo DevTools for a better development experience: "+x)}this.version=nb,this.localState=new r4({cache:a,client:this,resolvers:g,fragmentMatcher:y}),this.queryManager=new it({cache:this.cache,link:this.link,defaultOptions:this.defaultOptions,queryDeduplication:h,ssrMode:s,clientAwareness:{name:w,version:_},localState:this.localState,assumeImmutableResults:m,onBroadcast:f?function(){t.devToolsHookCb&&t.devToolsHookCb({action:{},state:{queries:t.queryManager.getQueryStore(),mutations:t.queryManager.mutationStore||{}},dataWithOptimisticResults:t.cache.extract(!0)})}:void 0})}return e.prototype.stop=function(){this.queryManager.stop()},e.prototype.watchQuery=function(e){return this.defaultOptions.watchQuery&&(e=(0,ir.J)(this.defaultOptions.watchQuery,e)),this.disableNetworkFetches&&("network-only"===e.fetchPolicy||"cache-and-network"===e.fetchPolicy)&&(e=(0,en.pi)((0,en.pi)({},e),{fetchPolicy:"cache-first"})),this.queryManager.watchQuery(e)},e.prototype.query=function(e){return this.defaultOptions.query&&(e=(0,ir.J)(this.defaultOptions.query,e)),__DEV__?(0,Q.kG)("cache-and-network"!==e.fetchPolicy,"The cache-and-network fetchPolicy does not work with client.query, because client.query can only return a single result. Please use client.watchQuery to receive multiple results from the cache and the network, or consider using a different fetchPolicy, such as cache-first or network-only."):(0,Q.kG)("cache-and-network"!==e.fetchPolicy,10),this.disableNetworkFetches&&"network-only"===e.fetchPolicy&&(e=(0,en.pi)((0,en.pi)({},e),{fetchPolicy:"cache-first"})),this.queryManager.query(e)},e.prototype.mutate=function(e){return this.defaultOptions.mutate&&(e=(0,ir.J)(this.defaultOptions.mutate,e)),this.queryManager.mutate(e)},e.prototype.subscribe=function(e){return this.queryManager.startGraphQLSubscription(e)},e.prototype.readQuery=function(e,t){return void 0===t&&(t=!1),this.cache.readQuery(e,t)},e.prototype.readFragment=function(e,t){return void 0===t&&(t=!1),this.cache.readFragment(e,t)},e.prototype.writeQuery=function(e){var t=this.cache.writeQuery(e);return!1!==e.broadcast&&this.queryManager.broadcastQueries(),t},e.prototype.writeFragment=function(e){var t=this.cache.writeFragment(e);return!1!==e.broadcast&&this.queryManager.broadcastQueries(),t},e.prototype.__actionHookForDevTools=function(e){this.devToolsHookCb=e},e.prototype.__requestRaw=function(e){return np(this.link,e)},e.prototype.resetStore=function(){var e=this;return Promise.resolve().then(function(){return e.queryManager.clearStore({discardWatches:!1})}).then(function(){return Promise.all(e.resetStoreCallbacks.map(function(e){return e()}))}).then(function(){return e.reFetchObservableQueries()})},e.prototype.clearStore=function(){var e=this;return Promise.resolve().then(function(){return e.queryManager.clearStore({discardWatches:!0})}).then(function(){return Promise.all(e.clearStoreCallbacks.map(function(e){return e()}))})},e.prototype.onResetStore=function(e){var t=this;return this.resetStoreCallbacks.push(e),function(){t.resetStoreCallbacks=t.resetStoreCallbacks.filter(function(t){return t!==e})}},e.prototype.onClearStore=function(e){var t=this;return this.clearStoreCallbacks.push(e),function(){t.clearStoreCallbacks=t.clearStoreCallbacks.filter(function(t){return t!==e})}},e.prototype.reFetchObservableQueries=function(e){return this.queryManager.reFetchObservableQueries(e)},e.prototype.refetchQueries=function(e){var t=this.queryManager.refetchQueries(e),n=[],r=[];t.forEach(function(e,t){n.push(t),r.push(e)});var i=Promise.all(r);return i.queries=n,i.results=r,i.catch(function(e){__DEV__&&Q.kG.debug("In client.refetchQueries, Promise.all promise rejected with error ".concat(e))}),i},e.prototype.getObservableQueries=function(e){return void 0===e&&(e="active"),this.queryManager.getObservableQueries(e)},e.prototype.extract=function(e){return this.cache.extract(e)},e.prototype.restore=function(e){return this.cache.restore(e)},e.prototype.addResolvers=function(e){this.localState.addResolvers(e)},e.prototype.setResolvers=function(e){this.localState.setResolvers(e)},e.prototype.getResolvers=function(){return this.localState.getResolvers()},e.prototype.setLocalStateFragmentMatcher=function(e){this.localState.setFragmentMatcher(e)},e.prototype.setLink=function(e){this.link=this.queryManager.link=e},e}(),io=function(){function e(){this.getFragmentDoc=rZ(eA)}return e.prototype.batch=function(e){var t,n=this,r="string"==typeof e.optimistic?e.optimistic:!1===e.optimistic?null:void 0;return this.performTransaction(function(){return t=e.update(n)},r),t},e.prototype.recordOptimisticTransaction=function(e,t){this.performTransaction(e,t)},e.prototype.transformDocument=function(e){return e},e.prototype.transformForLink=function(e){return e},e.prototype.identify=function(e){},e.prototype.gc=function(){return[]},e.prototype.modify=function(e){return!1},e.prototype.readQuery=function(e,t){return void 0===t&&(t=!!e.optimistic),this.read((0,en.pi)((0,en.pi)({},e),{rootId:e.id||"ROOT_QUERY",optimistic:t}))},e.prototype.readFragment=function(e,t){return void 0===t&&(t=!!e.optimistic),this.read((0,en.pi)((0,en.pi)({},e),{query:this.getFragmentDoc(e.fragment,e.fragmentName),rootId:e.id,optimistic:t}))},e.prototype.writeQuery=function(e){var t=e.id,n=e.data,r=(0,en._T)(e,["id","data"]);return this.write(Object.assign(r,{dataId:t||"ROOT_QUERY",result:n}))},e.prototype.writeFragment=function(e){var t=e.id,n=e.data,r=e.fragment,i=e.fragmentName,a=(0,en._T)(e,["id","data","fragment","fragmentName"]);return this.write(Object.assign(a,{query:this.getFragmentDoc(r,i),dataId:t,result:n}))},e.prototype.updateQuery=function(e,t){return this.batch({update:function(n){var r=n.readQuery(e),i=t(r);return null==i?r:(n.writeQuery((0,en.pi)((0,en.pi)({},e),{data:i})),i)}})},e.prototype.updateFragment=function(e,t){return this.batch({update:function(n){var r=n.readFragment(e),i=t(r);return null==i?r:(n.writeFragment((0,en.pi)((0,en.pi)({},e),{data:i})),i)}})},e}(),is=function(e){function t(n,r,i,a){var o,s=e.call(this,n)||this;if(s.message=n,s.path=r,s.query=i,s.variables=a,Array.isArray(s.path)){s.missing=s.message;for(var u=s.path.length-1;u>=0;--u)s.missing=((o={})[s.path[u]]=s.missing,o)}else s.missing=s.path;return s.__proto__=t.prototype,s}return(0,en.ZT)(t,e),t}(Error),iu=__webpack_require__(10542),ic=Object.prototype.hasOwnProperty;function il(e){return null==e}function id(e,t){var n=e.__typename,r=e.id,i=e._id;if("string"==typeof n&&(t&&(t.keyObject=il(r)?il(i)?void 0:{_id:i}:{id:r}),il(r)&&!il(i)&&(r=i),!il(r)))return"".concat(n,":").concat("number"==typeof r||"string"==typeof r?r:JSON.stringify(r))}var ih={dataIdFromObject:id,addTypename:!0,resultCaching:!0,canonizeResults:!1};function ip(e){return(0,n1.o)(ih,e)}function ib(e){var t=e.canonizeResults;return void 0===t?ih.canonizeResults:t}function im(e,t){return eD(t)?e.get(t.__ref,"__typename"):t&&t.__typename}var ig=/^[_a-z][_0-9a-z]*/i;function iv(e){var t=e.match(ig);return t?t[0]:e}function iy(e,t,n){return!!(0,eO.s)(t)&&((0,tP.k)(t)?t.every(function(t){return iy(e,t,n)}):e.selections.every(function(e){if(eQ(e)&&td(e,n)){var r=eX(e);return ic.call(t,r)&&(!e.selectionSet||iy(e.selectionSet,t[r],n))}return!0}))}function iw(e){return(0,eO.s)(e)&&!eD(e)&&!(0,tP.k)(e)}function i_(){return new tB}function iE(e,t){var n=eL(e4(e));return{fragmentMap:n,lookupFragment:function(e){var r=n[e];return!r&&t&&(r=t.lookup(e)),r||null}}}var iS=Object.create(null),ik=function(){return iS},ix=Object.create(null),iT=function(){function e(e,t){var n=this;this.policies=e,this.group=t,this.data=Object.create(null),this.rootIds=Object.create(null),this.refs=Object.create(null),this.getFieldValue=function(e,t){return(0,iu.J)(eD(e)?n.get(e.__ref,t):e&&e[t])},this.canRead=function(e){return eD(e)?n.has(e.__ref):"object"==typeof e},this.toReference=function(e,t){if("string"==typeof e)return eI(e);if(eD(e))return e;var r=n.policies.identify(e)[0];if(r){var i=eI(r);return t&&n.merge(r,e),i}}}return e.prototype.toObject=function(){return(0,en.pi)({},this.data)},e.prototype.has=function(e){return void 0!==this.lookup(e,!0)},e.prototype.get=function(e,t){if(this.group.depend(e,t),ic.call(this.data,e)){var n=this.data[e];if(n&&ic.call(n,t))return n[t]}return"__typename"===t&&ic.call(this.policies.rootTypenamesById,e)?this.policies.rootTypenamesById[e]:this instanceof iL?this.parent.get(e,t):void 0},e.prototype.lookup=function(e,t){return(t&&this.group.depend(e,"__exists"),ic.call(this.data,e))?this.data[e]:this instanceof iL?this.parent.lookup(e,t):this.policies.rootTypenamesById[e]?Object.create(null):void 0},e.prototype.merge=function(e,t){var n,r=this;eD(e)&&(e=e.__ref),eD(t)&&(t=t.__ref);var i="string"==typeof e?this.lookup(n=e):e,a="string"==typeof t?this.lookup(n=t):t;if(a){__DEV__?(0,Q.kG)("string"==typeof n,"store.merge expects a string ID"):(0,Q.kG)("string"==typeof n,1);var o=new tB(iI).merge(i,a);if(this.data[n]=o,o!==i&&(delete this.refs[n],this.group.caching)){var s=Object.create(null);i||(s.__exists=1),Object.keys(a).forEach(function(e){if(!i||i[e]!==o[e]){s[e]=1;var t=iv(e);t===e||r.policies.hasKeyArgs(o.__typename,t)||(s[t]=1),void 0!==o[e]||r instanceof iL||delete o[e]}}),s.__typename&&!(i&&i.__typename)&&this.policies.rootTypenamesById[n]===o.__typename&&delete s.__typename,Object.keys(s).forEach(function(e){return r.group.dirty(n,e)})}}},e.prototype.modify=function(e,t){var n=this,r=this.lookup(e);if(r){var i=Object.create(null),a=!1,o=!0,s={DELETE:iS,INVALIDATE:ix,isReference:eD,toReference:this.toReference,canRead:this.canRead,readField:function(t,r){return n.policies.readField("string"==typeof t?{fieldName:t,from:r||eI(e)}:t,{store:n})}};if(Object.keys(r).forEach(function(u){var c=iv(u),l=r[u];if(void 0!==l){var f="function"==typeof t?t:t[u]||t[c];if(f){var d=f===ik?iS:f((0,iu.J)(l),(0,en.pi)((0,en.pi)({},s),{fieldName:c,storeFieldName:u,storage:n.getStorage(e,u)}));d===ix?n.group.dirty(e,u):(d===iS&&(d=void 0),d!==l&&(i[u]=d,a=!0,l=d))}void 0!==l&&(o=!1)}}),a)return this.merge(e,i),o&&(this instanceof iL?this.data[e]=void 0:delete this.data[e],this.group.dirty(e,"__exists")),!0}return!1},e.prototype.delete=function(e,t,n){var r,i=this.lookup(e);if(i){var a=this.getFieldValue(i,"__typename"),o=t&&n?this.policies.getStoreFieldName({typename:a,fieldName:t,args:n}):t;return this.modify(e,o?((r={})[o]=ik,r):ik)}return!1},e.prototype.evict=function(e,t){var n=!1;return e.id&&(ic.call(this.data,e.id)&&(n=this.delete(e.id,e.fieldName,e.args)),this instanceof iL&&this!==t&&(n=this.parent.evict(e,t)||n),(e.fieldName||n)&&this.group.dirty(e.id,e.fieldName||"__exists")),n},e.prototype.clear=function(){this.replace(null)},e.prototype.extract=function(){var e=this,t=this.toObject(),n=[];return this.getRootIdSet().forEach(function(t){ic.call(e.policies.rootTypenamesById,t)||n.push(t)}),n.length&&(t.__META={extraRootIds:n.sort()}),t},e.prototype.replace=function(e){var t=this;if(Object.keys(this.data).forEach(function(n){e&&ic.call(e,n)||t.delete(n)}),e){var n=e.__META,r=(0,en._T)(e,["__META"]);Object.keys(r).forEach(function(e){t.merge(e,r[e])}),n&&n.extraRootIds.forEach(this.retain,this)}},e.prototype.retain=function(e){return this.rootIds[e]=(this.rootIds[e]||0)+1},e.prototype.release=function(e){if(this.rootIds[e]>0){var t=--this.rootIds[e];return t||delete this.rootIds[e],t}return 0},e.prototype.getRootIdSet=function(e){return void 0===e&&(e=new Set),Object.keys(this.rootIds).forEach(e.add,e),this instanceof iL?this.parent.getRootIdSet(e):Object.keys(this.policies.rootTypenamesById).forEach(e.add,e),e},e.prototype.gc=function(){var e=this,t=this.getRootIdSet(),n=this.toObject();t.forEach(function(r){ic.call(n,r)&&(Object.keys(e.findChildRefIds(r)).forEach(t.add,t),delete n[r])});var r=Object.keys(n);if(r.length){for(var i=this;i instanceof iL;)i=i.parent;r.forEach(function(e){return i.delete(e)})}return r},e.prototype.findChildRefIds=function(e){if(!ic.call(this.refs,e)){var t=this.refs[e]=Object.create(null),n=this.data[e];if(!n)return t;var r=new Set([n]);r.forEach(function(e){eD(e)&&(t[e.__ref]=!0),(0,eO.s)(e)&&Object.keys(e).forEach(function(t){var n=e[t];(0,eO.s)(n)&&r.add(n)})})}return this.refs[e]},e.prototype.makeCacheKey=function(){return this.group.keyMaker.lookupArray(arguments)},e}(),iM=function(){function e(e,t){void 0===t&&(t=null),this.caching=e,this.parent=t,this.d=null,this.resetCaching()}return e.prototype.resetCaching=function(){this.d=this.caching?rW():null,this.keyMaker=new n_(t_.mr)},e.prototype.depend=function(e,t){if(this.d){this.d(iO(e,t));var n=iv(t);n!==t&&this.d(iO(e,n)),this.parent&&this.parent.depend(e,t)}},e.prototype.dirty=function(e,t){this.d&&this.d.dirty(iO(e,t),"__exists"===t?"forget":"setDirty")},e}();function iO(e,t){return t+"#"+e}function iA(e,t){iD(e)&&e.group.depend(t,"__exists")}!function(e){var t=function(e){function t(t){var n=t.policies,r=t.resultCaching,i=void 0===r||r,a=t.seed,o=e.call(this,n,new iM(i))||this;return o.stump=new iC(o),o.storageTrie=new n_(t_.mr),a&&o.replace(a),o}return(0,en.ZT)(t,e),t.prototype.addLayer=function(e,t){return this.stump.addLayer(e,t)},t.prototype.removeLayer=function(){return this},t.prototype.getStorage=function(){return this.storageTrie.lookupArray(arguments)},t}(e);e.Root=t}(iT||(iT={}));var iL=function(e){function t(t,n,r,i){var a=e.call(this,n.policies,i)||this;return a.id=t,a.parent=n,a.replay=r,a.group=i,r(a),a}return(0,en.ZT)(t,e),t.prototype.addLayer=function(e,n){return new t(e,this,n,this.group)},t.prototype.removeLayer=function(e){var t=this,n=this.parent.removeLayer(e);return e===this.id?(this.group.caching&&Object.keys(this.data).forEach(function(e){var r=t.data[e],i=n.lookup(e);i?r?r!==i&&Object.keys(r).forEach(function(n){(0,nm.D)(r[n],i[n])||t.group.dirty(e,n)}):(t.group.dirty(e,"__exists"),Object.keys(i).forEach(function(n){t.group.dirty(e,n)})):t.delete(e)}),n):n===this.parent?this:n.addLayer(this.id,this.replay)},t.prototype.toObject=function(){return(0,en.pi)((0,en.pi)({},this.parent.toObject()),this.data)},t.prototype.findChildRefIds=function(t){var n=this.parent.findChildRefIds(t);return ic.call(this.data,t)?(0,en.pi)((0,en.pi)({},n),e.prototype.findChildRefIds.call(this,t)):n},t.prototype.getStorage=function(){for(var e=this.parent;e.parent;)e=e.parent;return e.getStorage.apply(e,arguments)},t}(iT),iC=function(e){function t(t){return e.call(this,"EntityStore.Stump",t,function(){},new iM(t.group.caching,t.group))||this}return(0,en.ZT)(t,e),t.prototype.removeLayer=function(){return this},t.prototype.merge=function(){return this.parent.merge.apply(this.parent,arguments)},t}(iL);function iI(e,t,n){var r=e[n],i=t[n];return(0,nm.D)(r,i)?r:i}function iD(e){return!!(e instanceof iT&&e.group.caching)}function iN(e){return[e.selectionSet,e.objectOrReference,e.context,e.context.canonizeResults,]}var iP=function(){function e(e){var t=this;this.knownResults=new(t_.mr?WeakMap:Map),this.config=(0,n1.o)(e,{addTypename:!1!==e.addTypename,canonizeResults:ib(e)}),this.canon=e.canon||new nk,this.executeSelectionSet=rZ(function(e){var n,r=e.context.canonizeResults,i=iN(e);i[3]=!r;var a=(n=t.executeSelectionSet).peek.apply(n,i);return a?r?(0,en.pi)((0,en.pi)({},a),{result:t.canon.admit(a.result)}):a:(iA(e.context.store,e.enclosingRef.__ref),t.execSelectionSetImpl(e))},{max:this.config.resultCacheMaxSize,keyArgs:iN,makeCacheKey:function(e,t,n,r){if(iD(n.store))return n.store.makeCacheKey(e,eD(t)?t.__ref:t,n.varString,r)}}),this.executeSubSelectedArray=rZ(function(e){return iA(e.context.store,e.enclosingRef.__ref),t.execSubSelectedArrayImpl(e)},{max:this.config.resultCacheMaxSize,makeCacheKey:function(e){var t=e.field,n=e.array,r=e.context;if(iD(r.store))return r.store.makeCacheKey(t,n,r.varString)}})}return e.prototype.resetCanon=function(){this.canon=new nk},e.prototype.diffQueryAgainstStore=function(e){var t,n=e.store,r=e.query,i=e.rootId,a=void 0===i?"ROOT_QUERY":i,o=e.variables,s=e.returnPartialData,u=void 0===s||s,c=e.canonizeResults,l=void 0===c?this.config.canonizeResults:c,f=this.config.cache.policies;o=(0,en.pi)((0,en.pi)({},e8(e5(r))),o);var d=eI(a),h=this.executeSelectionSet({selectionSet:e9(r).selectionSet,objectOrReference:d,enclosingRef:d,context:(0,en.pi)({store:n,query:r,policies:f,variables:o,varString:nx(o),canonizeResults:l},iE(r,this.config.fragments))});if(h.missing&&(t=[new is(iR(h.missing),h.missing,r,o)],!u))throw t[0];return{result:h.result,complete:!t,missing:t}},e.prototype.isFresh=function(e,t,n,r){if(iD(r.store)&&this.knownResults.get(e)===n){var i=this.executeSelectionSet.peek(n,t,r,this.canon.isKnown(e));if(i&&e===i.result)return!0}return!1},e.prototype.execSelectionSetImpl=function(e){var t,n=this,r=e.selectionSet,i=e.objectOrReference,a=e.enclosingRef,o=e.context;if(eD(i)&&!o.policies.rootTypenamesById[i.__ref]&&!o.store.has(i.__ref))return{result:this.canon.empty,missing:"Dangling reference to missing ".concat(i.__ref," object")};var s=o.variables,u=o.policies,c=o.store.getFieldValue(i,"__typename"),l=[],f=new tB;function d(e,n){var r;return e.missing&&(t=f.merge(t,((r={})[n]=e.missing,r))),e.result}this.config.addTypename&&"string"==typeof c&&!u.rootIdsByTypename[c]&&l.push({__typename:c});var h=new Set(r.selections);h.forEach(function(e){var r,p;if(td(e,s)){if(eQ(e)){var b=u.readField({fieldName:e.name.value,field:e,variables:o.variables,from:i},o),m=eX(e);void 0===b?nj.added(e)||(t=f.merge(t,((r={})[m]="Can't find field '".concat(e.name.value,"' on ").concat(eD(i)?i.__ref+" object":"object "+JSON.stringify(i,null,2)),r))):(0,tP.k)(b)?b=d(n.executeSubSelectedArray({field:e,array:b,enclosingRef:a,context:o}),m):e.selectionSet?null!=b&&(b=d(n.executeSelectionSet({selectionSet:e.selectionSet,objectOrReference:b,enclosingRef:eD(b)?b:a,context:o}),m)):o.canonizeResults&&(b=n.canon.pass(b)),void 0!==b&&l.push(((p={})[m]=b,p))}else{var g=eC(e,o.lookupFragment);if(!g&&e.kind===nL.h.FRAGMENT_SPREAD)throw __DEV__?new Q.ej("No fragment named ".concat(e.name.value)):new Q.ej(5);g&&u.fragmentMatches(g,c)&&g.selectionSet.selections.forEach(h.add,h)}}});var p={result:tF(l),missing:t},b=o.canonizeResults?this.canon.admit(p):(0,iu.J)(p);return b.result&&this.knownResults.set(b.result,r),b},e.prototype.execSubSelectedArrayImpl=function(e){var t,n=this,r=e.field,i=e.array,a=e.enclosingRef,o=e.context,s=new tB;function u(e,n){var r;return e.missing&&(t=s.merge(t,((r={})[n]=e.missing,r))),e.result}return r.selectionSet&&(i=i.filter(o.store.canRead)),i=i.map(function(e,t){return null===e?null:(0,tP.k)(e)?u(n.executeSubSelectedArray({field:r,array:e,enclosingRef:a,context:o}),t):r.selectionSet?u(n.executeSelectionSet({selectionSet:r.selectionSet,objectOrReference:e,enclosingRef:eD(e)?e:a,context:o}),t):(__DEV__&&ij(o.store,r,e),e)}),{result:o.canonizeResults?this.canon.admit(i):i,missing:t}},e}();function iR(e){try{JSON.stringify(e,function(e,t){if("string"==typeof t)throw t;return t})}catch(t){return t}}function ij(e,t,n){if(!t.selectionSet){var r=new Set([n]);r.forEach(function(n){(0,eO.s)(n)&&(__DEV__?(0,Q.kG)(!eD(n),"Missing selection set for object of type ".concat(im(e,n)," returned for query field ").concat(t.name.value)):(0,Q.kG)(!eD(n),6),Object.values(n).forEach(r.add,r))})}}function iF(e){var t=nG("stringifyForDisplay");return JSON.stringify(e,function(e,n){return void 0===n?t:n}).split(JSON.stringify(t)).join("")}var iY=Object.create(null);function iB(e){var t=JSON.stringify(e);return iY[t]||(iY[t]=Object.create(null))}function iU(e){var t=iB(e);return t.keyFieldsFn||(t.keyFieldsFn=function(t,n){var r=function(e,t){return n.readField(t,e)},i=n.keyObject=i$(e,function(e){var i=iW(n.storeObject,e,r);return void 0===i&&t!==n.storeObject&&ic.call(t,e[0])&&(i=iW(t,e,iG)),__DEV__?(0,Q.kG)(void 0!==i,"Missing field '".concat(e.join("."),"' while extracting keyFields from ").concat(JSON.stringify(t))):(0,Q.kG)(void 0!==i,2),i});return"".concat(n.typename,":").concat(JSON.stringify(i))})}function iH(e){var t=iB(e);return t.keyArgsFn||(t.keyArgsFn=function(t,n){var r=n.field,i=n.variables,a=n.fieldName,o=JSON.stringify(i$(e,function(e){var n=e[0],a=n.charAt(0);if("@"===a){if(r&&(0,tP.O)(r.directives)){var o=n.slice(1),s=r.directives.find(function(e){return e.name.value===o}),u=s&&eZ(s,i);return u&&iW(u,e.slice(1))}return}if("$"===a){var c=n.slice(1);if(i&&ic.call(i,c)){var l=e.slice(0);return l[0]=c,iW(i,l)}return}if(t)return iW(t,e)}));return(t||"{}"!==o)&&(a+=":"+o),a})}function i$(e,t){var n=new tB;return iz(e).reduce(function(e,r){var i,a=t(r);if(void 0!==a){for(var o=r.length-1;o>=0;--o)a=((i={})[r[o]]=a,i);e=n.merge(e,a)}return e},Object.create(null))}function iz(e){var t=iB(e);if(!t.paths){var n=t.paths=[],r=[];e.forEach(function(t,i){(0,tP.k)(t)?(iz(t).forEach(function(e){return n.push(r.concat(e))}),r.length=0):(r.push(t),(0,tP.k)(e[i+1])||(n.push(r.slice(0)),r.length=0))})}return t.paths}function iG(e,t){return e[t]}function iW(e,t,n){return n=n||iG,iK(t.reduce(function e(t,r){return(0,tP.k)(t)?t.map(function(t){return e(t,r)}):t&&n(t,r)},e))}function iK(e){return(0,eO.s)(e)?(0,tP.k)(e)?e.map(iK):i$(Object.keys(e).sort(),function(t){return iW(e,t)}):e}function iV(e){return void 0!==e.args?e.args:e.field?eZ(e.field,e.variables):null}eK.setStringify(nx);var iq=function(){},iZ=function(e,t){return t.fieldName},iX=function(e,t,n){return(0,n.mergeObjects)(e,t)},iJ=function(e,t){return t},iQ=function(){function e(e){this.config=e,this.typePolicies=Object.create(null),this.toBeAdded=Object.create(null),this.supertypeMap=new Map,this.fuzzySubtypes=new Map,this.rootIdsByTypename=Object.create(null),this.rootTypenamesById=Object.create(null),this.usingPossibleTypes=!1,this.config=(0,en.pi)({dataIdFromObject:id},e),this.cache=this.config.cache,this.setRootTypename("Query"),this.setRootTypename("Mutation"),this.setRootTypename("Subscription"),e.possibleTypes&&this.addPossibleTypes(e.possibleTypes),e.typePolicies&&this.addTypePolicies(e.typePolicies)}return e.prototype.identify=function(e,t){var n,r,i=this,a=t&&(t.typename||(null===(n=t.storeObject)||void 0===n?void 0:n.__typename))||e.__typename;if(a===this.rootTypenamesById.ROOT_QUERY)return["ROOT_QUERY"];for(var o=t&&t.storeObject||e,s=(0,en.pi)((0,en.pi)({},t),{typename:a,storeObject:o,readField:t&&t.readField||function(){var e=i0(arguments,o);return i.readField(e,{store:i.cache.data,variables:e.variables})}}),u=a&&this.getTypePolicy(a),c=u&&u.keyFn||this.config.dataIdFromObject;c;){var l=c((0,en.pi)((0,en.pi)({},e),o),s);if((0,tP.k)(l))c=iU(l);else{r=l;break}}return r=r?String(r):void 0,s.keyObject?[r,s.keyObject]:[r]},e.prototype.addTypePolicies=function(e){var t=this;Object.keys(e).forEach(function(n){var r=e[n],i=r.queryType,a=r.mutationType,o=r.subscriptionType,s=(0,en._T)(r,["queryType","mutationType","subscriptionType"]);i&&t.setRootTypename("Query",n),a&&t.setRootTypename("Mutation",n),o&&t.setRootTypename("Subscription",n),ic.call(t.toBeAdded,n)?t.toBeAdded[n].push(s):t.toBeAdded[n]=[s]})},e.prototype.updateTypePolicy=function(e,t){var n=this,r=this.getTypePolicy(e),i=t.keyFields,a=t.fields;function o(e,t){e.merge="function"==typeof t?t:!0===t?iX:!1===t?iJ:e.merge}o(r,t.merge),r.keyFn=!1===i?iq:(0,tP.k)(i)?iU(i):"function"==typeof i?i:r.keyFn,a&&Object.keys(a).forEach(function(t){var r=n.getFieldPolicy(e,t,!0),i=a[t];if("function"==typeof i)r.read=i;else{var s=i.keyArgs,u=i.read,c=i.merge;r.keyFn=!1===s?iZ:(0,tP.k)(s)?iH(s):"function"==typeof s?s:r.keyFn,"function"==typeof u&&(r.read=u),o(r,c)}r.read&&r.merge&&(r.keyFn=r.keyFn||iZ)})},e.prototype.setRootTypename=function(e,t){void 0===t&&(t=e);var n="ROOT_"+e.toUpperCase(),r=this.rootTypenamesById[n];t!==r&&(__DEV__?(0,Q.kG)(!r||r===e,"Cannot change root ".concat(e," __typename more than once")):(0,Q.kG)(!r||r===e,3),r&&delete this.rootIdsByTypename[r],this.rootIdsByTypename[t]=n,this.rootTypenamesById[n]=t)},e.prototype.addPossibleTypes=function(e){var t=this;this.usingPossibleTypes=!0,Object.keys(e).forEach(function(n){t.getSupertypeSet(n,!0),e[n].forEach(function(e){t.getSupertypeSet(e,!0).add(n);var r=e.match(ig);r&&r[0]===e||t.fuzzySubtypes.set(e,RegExp(e))})})},e.prototype.getTypePolicy=function(e){var t=this;if(!ic.call(this.typePolicies,e)){var n=this.typePolicies[e]=Object.create(null);n.fields=Object.create(null);var r=this.supertypeMap.get(e);r&&r.size&&r.forEach(function(e){var r=t.getTypePolicy(e),i=r.fields;Object.assign(n,(0,en._T)(r,["fields"])),Object.assign(n.fields,i)})}var i=this.toBeAdded[e];return i&&i.length&&i.splice(0).forEach(function(n){t.updateTypePolicy(e,n)}),this.typePolicies[e]},e.prototype.getFieldPolicy=function(e,t,n){if(e){var r=this.getTypePolicy(e).fields;return r[t]||n&&(r[t]=Object.create(null))}},e.prototype.getSupertypeSet=function(e,t){var n=this.supertypeMap.get(e);return!n&&t&&this.supertypeMap.set(e,n=new Set),n},e.prototype.fragmentMatches=function(e,t,n,r){var i=this;if(!e.typeCondition)return!0;if(!t)return!1;var a=e.typeCondition.name.value;if(t===a)return!0;if(this.usingPossibleTypes&&this.supertypeMap.has(a))for(var o=this.getSupertypeSet(t,!0),s=[o],u=function(e){var t=i.getSupertypeSet(e,!1);t&&t.size&&0>s.indexOf(t)&&s.push(t)},c=!!(n&&this.fuzzySubtypes.size),l=!1,f=0;f1?a:t}:(r=(0,en.pi)({},i),ic.call(r,"from")||(r.from=t)),__DEV__&&void 0===r.from&&__DEV__&&Q.kG.warn("Undefined 'from' passed to readField with arguments ".concat(iF(Array.from(e)))),void 0===r.variables&&(r.variables=n),r}function i2(e){return function(t,n){if((0,tP.k)(t)||(0,tP.k)(n))throw __DEV__?new Q.ej("Cannot automatically merge arrays"):new Q.ej(4);if((0,eO.s)(t)&&(0,eO.s)(n)){var r=e.getFieldValue(t,"__typename"),i=e.getFieldValue(n,"__typename");if(r&&i&&r!==i)return n;if(eD(t)&&iw(n))return e.merge(t.__ref,n),t;if(iw(t)&&eD(n))return e.merge(t,n.__ref),n;if(iw(t)&&iw(n))return(0,en.pi)((0,en.pi)({},t),n)}return n}}function i3(e,t,n){var r="".concat(t).concat(n),i=e.flavors.get(r);return i||e.flavors.set(r,i=e.clientOnly===t&&e.deferred===n?e:(0,en.pi)((0,en.pi)({},e),{clientOnly:t,deferred:n})),i}var i4=function(){function e(e,t,n){this.cache=e,this.reader=t,this.fragments=n}return e.prototype.writeToStore=function(e,t){var n=this,r=t.query,i=t.result,a=t.dataId,o=t.variables,s=t.overwrite,u=e2(r),c=i_();o=(0,en.pi)((0,en.pi)({},e8(u)),o);var l=(0,en.pi)((0,en.pi)({store:e,written:Object.create(null),merge:function(e,t){return c.merge(e,t)},variables:o,varString:nx(o)},iE(r,this.fragments)),{overwrite:!!s,incomingById:new Map,clientOnly:!1,deferred:!1,flavors:new Map}),f=this.processSelectionSet({result:i||Object.create(null),dataId:a,selectionSet:u.selectionSet,mergeTree:{map:new Map},context:l});if(!eD(f))throw __DEV__?new Q.ej("Could not identify object ".concat(JSON.stringify(i))):new Q.ej(7);return l.incomingById.forEach(function(t,r){var i=t.storeObject,a=t.mergeTree,o=t.fieldNodeSet,s=eI(r);if(a&&a.map.size){var u=n.applyMerges(a,s,i,l);if(eD(u))return;i=u}if(__DEV__&&!l.overwrite){var c=Object.create(null);o.forEach(function(e){e.selectionSet&&(c[e.name.value]=!0)});var f=function(e){return!0===c[iv(e)]},d=function(e){var t=a&&a.map.get(e);return Boolean(t&&t.info&&t.info.merge)};Object.keys(i).forEach(function(e){f(e)&&!d(e)&&at(s,i,e,l.store)})}e.merge(r,i)}),e.retain(f.__ref),f},e.prototype.processSelectionSet=function(e){var t=this,n=e.dataId,r=e.result,i=e.selectionSet,a=e.context,o=e.mergeTree,s=this.cache.policies,u=Object.create(null),c=n&&s.rootTypenamesById[n]||eJ(r,i,a.fragmentMap)||n&&a.store.get(n,"__typename");"string"==typeof c&&(u.__typename=c);var l=function(){var e=i0(arguments,u,a.variables);if(eD(e.from)){var t=a.incomingById.get(e.from.__ref);if(t){var n=s.readField((0,en.pi)((0,en.pi)({},e),{from:t.storeObject}),a);if(void 0!==n)return n}}return s.readField(e,a)},f=new Set;this.flattenFields(i,r,a,c).forEach(function(e,n){var i,a=r[eX(n)];if(f.add(n),void 0!==a){var d=s.getStoreFieldName({typename:c,fieldName:n.name.value,field:n,variables:e.variables}),h=i6(o,d),p=t.processFieldValue(a,n,n.selectionSet?i3(e,!1,!1):e,h),b=void 0;n.selectionSet&&(eD(p)||iw(p))&&(b=l("__typename",p));var m=s.getMergeFunction(c,n.name.value,b);m?h.info={field:n,typename:c,merge:m}:i7(o,d),u=e.merge(u,((i={})[d]=p,i))}else __DEV__&&!e.clientOnly&&!e.deferred&&!nj.added(n)&&!s.getReadFunction(c,n.name.value)&&__DEV__&&Q.kG.error("Missing field '".concat(eX(n),"' while writing result ").concat(JSON.stringify(r,null,2)).substring(0,1e3))});try{var d=s.identify(r,{typename:c,selectionSet:i,fragmentMap:a.fragmentMap,storeObject:u,readField:l}),h=d[0],p=d[1];n=n||h,p&&(u=a.merge(u,p))}catch(b){if(!n)throw b}if("string"==typeof n){var m=eI(n),g=a.written[n]||(a.written[n]=[]);if(g.indexOf(i)>=0||(g.push(i),this.reader&&this.reader.isFresh(r,m,i,a)))return m;var v=a.incomingById.get(n);return v?(v.storeObject=a.merge(v.storeObject,u),v.mergeTree=i9(v.mergeTree,o),f.forEach(function(e){return v.fieldNodeSet.add(e)})):a.incomingById.set(n,{storeObject:u,mergeTree:i8(o)?void 0:o,fieldNodeSet:f}),m}return u},e.prototype.processFieldValue=function(e,t,n,r){var i=this;return t.selectionSet&&null!==e?(0,tP.k)(e)?e.map(function(e,a){var o=i.processFieldValue(e,t,n,i6(r,a));return i7(r,a),o}):this.processSelectionSet({result:e,selectionSet:t.selectionSet,context:n,mergeTree:r}):__DEV__?nJ(e):e},e.prototype.flattenFields=function(e,t,n,r){void 0===r&&(r=eJ(t,e,n.fragmentMap));var i=new Map,a=this.cache.policies,o=new n_(!1);return function e(s,u){var c=o.lookup(s,u.clientOnly,u.deferred);c.visited||(c.visited=!0,s.selections.forEach(function(o){if(td(o,n.variables)){var s=u.clientOnly,c=u.deferred;if(!(s&&c)&&(0,tP.O)(o.directives)&&o.directives.forEach(function(e){var t=e.name.value;if("client"===t&&(s=!0),"defer"===t){var r=eZ(e,n.variables);r&&!1===r.if||(c=!0)}}),eQ(o)){var l=i.get(o);l&&(s=s&&l.clientOnly,c=c&&l.deferred),i.set(o,i3(n,s,c))}else{var f=eC(o,n.lookupFragment);if(!f&&o.kind===nL.h.FRAGMENT_SPREAD)throw __DEV__?new Q.ej("No fragment named ".concat(o.name.value)):new Q.ej(8);f&&a.fragmentMatches(f,r,t,n.variables)&&e(f.selectionSet,i3(n,s,c))}}}))}(e,n),i},e.prototype.applyMerges=function(e,t,n,r,i){var a=this;if(e.map.size&&!eD(n)){var o,s,u=!(0,tP.k)(n)&&(eD(t)||iw(t))?t:void 0,c=n;u&&!i&&(i=[eD(u)?u.__ref:u]);var l=function(e,t){return(0,tP.k)(e)?"number"==typeof t?e[t]:void 0:r.store.getFieldValue(e,String(t))};e.map.forEach(function(e,t){var n=l(u,t),o=l(c,t);if(void 0!==o){i&&i.push(t);var f=a.applyMerges(e,n,o,r,i);f!==o&&(s=s||new Map).set(t,f),i&&(0,Q.kG)(i.pop()===t)}}),s&&(n=(0,tP.k)(c)?c.slice(0):(0,en.pi)({},c),s.forEach(function(e,t){n[t]=e}))}return e.info?this.cache.policies.runMergeFunction(t,n,e.info,r,i&&(o=r.store).getStorage.apply(o,i)):n},e}(),i5=[];function i6(e,t){var n=e.map;return n.has(t)||n.set(t,i5.pop()||{map:new Map}),n.get(t)}function i9(e,t){if(e===t||!t||i8(t))return e;if(!e||i8(e))return t;var n=e.info&&t.info?(0,en.pi)((0,en.pi)({},e.info),t.info):e.info||t.info,r=e.map.size&&t.map.size,i=r?new Map:e.map.size?e.map:t.map,a={info:n,map:i};if(r){var o=new Set(t.map.keys());e.map.forEach(function(e,n){a.map.set(n,i9(e,t.map.get(n))),o.delete(n)}),o.forEach(function(n){a.map.set(n,i9(t.map.get(n),e.map.get(n)))})}return a}function i8(e){return!e||!(e.info||e.map.size)}function i7(e,t){var n=e.map,r=n.get(t);r&&i8(r)&&(i5.push(r),n.delete(t))}var ae=new Set;function at(e,t,n,r){var i=function(e){var t=r.getFieldValue(e,n);return"object"==typeof t&&t},a=i(e);if(a){var o=i(t);if(!(!o||eD(a)||(0,nm.D)(a,o)||Object.keys(a).every(function(e){return void 0!==r.getFieldValue(o,e)}))){var s=r.getFieldValue(e,"__typename")||r.getFieldValue(t,"__typename"),u=iv(n),c="".concat(s,".").concat(u);if(!ae.has(c)){ae.add(c);var l=[];(0,tP.k)(a)||(0,tP.k)(o)||[a,o].forEach(function(e){var t=r.getFieldValue(e,"__typename");"string"!=typeof t||l.includes(t)||l.push(t)}),__DEV__&&Q.kG.warn("Cache data may be lost when replacing the ".concat(u," field of a ").concat(s," object.\n\nThis could cause additional (usually avoidable) network requests to fetch data that were otherwise cached.\n\nTo address this problem (which is not a bug in Apollo Client), ").concat(l.length?"either ensure all objects of type "+l.join(" and ")+" have an ID or a custom merge function, or ":"","define a custom merge function for the ").concat(c," field, so InMemoryCache can safely merge these objects:\n\n existing: ").concat(JSON.stringify(a).slice(0,1e3),"\n incoming: ").concat(JSON.stringify(o).slice(0,1e3),"\n\nFor more information about these options, please refer to the documentation:\n\n * Ensuring entity objects have IDs: https://go.apollo.dev/c/generating-unique-identifiers\n * Defining custom merge functions: https://go.apollo.dev/c/merging-non-normalized-objects\n"))}}}}var an=function(e){function t(t){void 0===t&&(t={});var n=e.call(this)||this;return n.watches=new Set,n.typenameDocumentCache=new Map,n.makeVar=r2,n.txCount=0,n.config=ip(t),n.addTypename=!!n.config.addTypename,n.policies=new iQ({cache:n,dataIdFromObject:n.config.dataIdFromObject,possibleTypes:n.config.possibleTypes,typePolicies:n.config.typePolicies}),n.init(),n}return(0,en.ZT)(t,e),t.prototype.init=function(){var e=this.data=new iT.Root({policies:this.policies,resultCaching:this.config.resultCaching});this.optimisticData=e.stump,this.resetResultCache()},t.prototype.resetResultCache=function(e){var t=this,n=this.storeReader,r=this.config.fragments;this.storeWriter=new i4(this,this.storeReader=new iP({cache:this,addTypename:this.addTypename,resultCacheMaxSize:this.config.resultCacheMaxSize,canonizeResults:ib(this.config),canon:e?void 0:n&&n.canon,fragments:r}),r),this.maybeBroadcastWatch=rZ(function(e,n){return t.broadcastWatch(e,n)},{max:this.config.resultCacheMaxSize,makeCacheKey:function(e){var n=e.optimistic?t.optimisticData:t.data;if(iD(n)){var r=e.optimistic,i=e.id,a=e.variables;return n.makeCacheKey(e.query,e.callback,nx({optimistic:r,id:i,variables:a}))}}}),new Set([this.data.group,this.optimisticData.group,]).forEach(function(e){return e.resetCaching()})},t.prototype.restore=function(e){return this.init(),e&&this.data.replace(e),this},t.prototype.extract=function(e){return void 0===e&&(e=!1),(e?this.optimisticData:this.data).extract()},t.prototype.read=function(e){var t=e.returnPartialData,n=void 0!==t&&t;try{return this.storeReader.diffQueryAgainstStore((0,en.pi)((0,en.pi)({},e),{store:e.optimistic?this.optimisticData:this.data,config:this.config,returnPartialData:n})).result||null}catch(r){if(r instanceof is)return null;throw r}},t.prototype.write=function(e){try{return++this.txCount,this.storeWriter.writeToStore(this.data,e)}finally{--this.txCount||!1===e.broadcast||this.broadcastWatches()}},t.prototype.modify=function(e){if(ic.call(e,"id")&&!e.id)return!1;var t=e.optimistic?this.optimisticData:this.data;try{return++this.txCount,t.modify(e.id||"ROOT_QUERY",e.fields)}finally{--this.txCount||!1===e.broadcast||this.broadcastWatches()}},t.prototype.diff=function(e){return this.storeReader.diffQueryAgainstStore((0,en.pi)((0,en.pi)({},e),{store:e.optimistic?this.optimisticData:this.data,rootId:e.id||"ROOT_QUERY",config:this.config}))},t.prototype.watch=function(e){var t=this;return this.watches.size||r0(this),this.watches.add(e),e.immediate&&this.maybeBroadcastWatch(e),function(){t.watches.delete(e)&&!t.watches.size&&r1(t),t.maybeBroadcastWatch.forget(e)}},t.prototype.gc=function(e){nx.reset();var t=this.optimisticData.gc();return e&&!this.txCount&&(e.resetResultCache?this.resetResultCache(e.resetResultIdentities):e.resetResultIdentities&&this.storeReader.resetCanon()),t},t.prototype.retain=function(e,t){return(t?this.optimisticData:this.data).retain(e)},t.prototype.release=function(e,t){return(t?this.optimisticData:this.data).release(e)},t.prototype.identify=function(e){if(eD(e))return e.__ref;try{return this.policies.identify(e)[0]}catch(t){__DEV__&&Q.kG.warn(t)}},t.prototype.evict=function(e){if(!e.id){if(ic.call(e,"id"))return!1;e=(0,en.pi)((0,en.pi)({},e),{id:"ROOT_QUERY"})}try{return++this.txCount,this.optimisticData.evict(e,this.data)}finally{--this.txCount||!1===e.broadcast||this.broadcastWatches()}},t.prototype.reset=function(e){var t=this;return this.init(),nx.reset(),e&&e.discardWatches?(this.watches.forEach(function(e){return t.maybeBroadcastWatch.forget(e)}),this.watches.clear(),r1(this)):this.broadcastWatches(),Promise.resolve()},t.prototype.removeOptimistic=function(e){var t=this.optimisticData.removeLayer(e);t!==this.optimisticData&&(this.optimisticData=t,this.broadcastWatches())},t.prototype.batch=function(e){var t,n=this,r=e.update,i=e.optimistic,a=void 0===i||i,o=e.removeOptimistic,s=e.onWatchUpdated,u=function(e){var i=n,a=i.data,o=i.optimisticData;++n.txCount,e&&(n.data=n.optimisticData=e);try{return t=r(n)}finally{--n.txCount,n.data=a,n.optimisticData=o}},c=new Set;return s&&!this.txCount&&this.broadcastWatches((0,en.pi)((0,en.pi)({},e),{onWatchUpdated:function(e){return c.add(e),!1}})),"string"==typeof a?this.optimisticData=this.optimisticData.addLayer(a,u):!1===a?u(this.data):u(),"string"==typeof o&&(this.optimisticData=this.optimisticData.removeLayer(o)),s&&c.size?(this.broadcastWatches((0,en.pi)((0,en.pi)({},e),{onWatchUpdated:function(e,t){var n=s.call(this,e,t);return!1!==n&&c.delete(e),n}})),c.size&&c.forEach(function(e){return n.maybeBroadcastWatch.dirty(e)})):this.broadcastWatches(e),t},t.prototype.performTransaction=function(e,t){return this.batch({update:e,optimistic:t||null!==t})},t.prototype.transformDocument=function(e){if(this.addTypename){var t=this.typenameDocumentCache.get(e);return t||(t=nj(e),this.typenameDocumentCache.set(e,t),this.typenameDocumentCache.set(t,t)),t}return e},t.prototype.transformForLink=function(e){var t=this.config.fragments;return t?t.transform(e):e},t.prototype.broadcastWatches=function(e){var t=this;this.txCount||this.watches.forEach(function(n){return t.maybeBroadcastWatch(n,e)})},t.prototype.broadcastWatch=function(e,t){var n=e.lastDiff,r=this.diff(e);(!t||(e.optimistic&&"string"==typeof t.optimistic&&(r.fromOptimisticTransaction=!0),!t.onWatchUpdated||!1!==t.onWatchUpdated.call(this,e,r,n)))&&(n&&(0,nm.D)(n.result,r.result)||e.callback(e.lastDiff=r,n))},t}(io),ar={possibleTypes:{ApproveJobProposalSpecPayload:["ApproveJobProposalSpecSuccess","JobAlreadyExistsError","NotFoundError"],BridgePayload:["Bridge","NotFoundError"],CancelJobProposalSpecPayload:["CancelJobProposalSpecSuccess","NotFoundError"],ChainPayload:["Chain","NotFoundError"],CreateAPITokenPayload:["CreateAPITokenSuccess","InputErrors"],CreateBridgePayload:["CreateBridgeSuccess"],CreateCSAKeyPayload:["CSAKeyExistsError","CreateCSAKeySuccess"],CreateFeedsManagerChainConfigPayload:["CreateFeedsManagerChainConfigSuccess","InputErrors","NotFoundError"],CreateFeedsManagerPayload:["CreateFeedsManagerSuccess","InputErrors","NotFoundError","SingleFeedsManagerError"],CreateJobPayload:["CreateJobSuccess","InputErrors"],CreateOCR2KeyBundlePayload:["CreateOCR2KeyBundleSuccess"],CreateOCRKeyBundlePayload:["CreateOCRKeyBundleSuccess"],CreateP2PKeyPayload:["CreateP2PKeySuccess"],DeleteAPITokenPayload:["DeleteAPITokenSuccess","InputErrors"],DeleteBridgePayload:["DeleteBridgeConflictError","DeleteBridgeInvalidNameError","DeleteBridgeSuccess","NotFoundError"],DeleteCSAKeyPayload:["DeleteCSAKeySuccess","NotFoundError"],DeleteFeedsManagerChainConfigPayload:["DeleteFeedsManagerChainConfigSuccess","NotFoundError"],DeleteJobPayload:["DeleteJobSuccess","NotFoundError"],DeleteOCR2KeyBundlePayload:["DeleteOCR2KeyBundleSuccess","NotFoundError"],DeleteOCRKeyBundlePayload:["DeleteOCRKeyBundleSuccess","NotFoundError"],DeleteP2PKeyPayload:["DeleteP2PKeySuccess","NotFoundError"],DeleteVRFKeyPayload:["DeleteVRFKeySuccess","NotFoundError"],DismissJobErrorPayload:["DismissJobErrorSuccess","NotFoundError"],Error:["CSAKeyExistsError","DeleteBridgeConflictError","DeleteBridgeInvalidNameError","InputError","JobAlreadyExistsError","NotFoundError","RunJobCannotRunError","SingleFeedsManagerError"],EthTransactionPayload:["EthTransaction","NotFoundError"],FeaturesPayload:["Features"],FeedsManagerPayload:["FeedsManager","NotFoundError"],GetSQLLoggingPayload:["SQLLogging"],GlobalLogLevelPayload:["GlobalLogLevel"],JobPayload:["Job","NotFoundError"],JobProposalPayload:["JobProposal","NotFoundError"],JobRunPayload:["JobRun","NotFoundError"],JobSpec:["BlockHeaderFeederSpec","BlockhashStoreSpec","BootstrapSpec","CronSpec","DirectRequestSpec","FluxMonitorSpec","GatewaySpec","KeeperSpec","OCR2Spec","OCRSpec","VRFSpec","WebhookSpec"],NodePayload:["Node","NotFoundError"],PaginatedPayload:["BridgesPayload","ChainsPayload","EthTransactionAttemptsPayload","EthTransactionsPayload","JobRunsPayload","JobsPayload","NodesPayload"],RejectJobProposalSpecPayload:["NotFoundError","RejectJobProposalSpecSuccess"],RunJobPayload:["NotFoundError","RunJobCannotRunError","RunJobSuccess"],SetGlobalLogLevelPayload:["InputErrors","SetGlobalLogLevelSuccess"],SetSQLLoggingPayload:["SetSQLLoggingSuccess"],SetServicesLogLevelsPayload:["InputErrors","SetServicesLogLevelsSuccess"],UpdateBridgePayload:["NotFoundError","UpdateBridgeSuccess"],UpdateFeedsManagerChainConfigPayload:["InputErrors","NotFoundError","UpdateFeedsManagerChainConfigSuccess"],UpdateFeedsManagerPayload:["InputErrors","NotFoundError","UpdateFeedsManagerSuccess"],UpdateJobProposalSpecDefinitionPayload:["NotFoundError","UpdateJobProposalSpecDefinitionSuccess"],UpdatePasswordPayload:["InputErrors","UpdatePasswordSuccess"],VRFKeyPayload:["NotFoundError","VRFKeySuccess"]}};let ai=ar;var aa=(r=void 0,location.origin),ao=new nh({uri:"".concat(aa,"/query"),credentials:"include"}),as=new ia({cache:new an({possibleTypes:ai.possibleTypes}),link:ao});if(a.Z.locale(o),u().defaultFormat="YYYY-MM-DD h:mm:ss A","undefined"!=typeof document){var au,ac,al=f().hydrate;ac=X,al(c.createElement(et,{client:as},c.createElement(d.zj,null,c.createElement(i.MuiThemeProvider,{theme:J.r},c.createElement(ac,null)))),document.getElementById("root"))}})()})(); \ No newline at end of file diff --git a/core/web/assets/main.74b124ef5d2ef3614139.js.gz b/core/web/assets/main.74b124ef5d2ef3614139.js.gz new file mode 100644 index 00000000..667a96c1 Binary files /dev/null and b/core/web/assets/main.74b124ef5d2ef3614139.js.gz differ diff --git a/core/web/auth/auth.go b/core/web/auth/auth.go new file mode 100644 index 00000000..a0b7463a --- /dev/null +++ b/core/web/auth/auth.go @@ -0,0 +1,251 @@ +package auth + +import ( + "database/sql" + "net/http" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/static" +) + +const ( + // APIKey is the header name for the API token identifier for user authentication. + APIKey = "X-API-KEY" + + // APISecret is the header name for the API token secret for user authentication. + APISecret = "X-API-SECRET" + + // SessionName is the session name + SessionName = "clsession" + + // SessionIDKey is the session ID key in the session map + SessionIDKey = "clsession_id" + + // SessionUserKey is the User key in the session map + SessionUserKey = "user" + + // SessionExternalInitiatorKey is the External Initiator key in the session map + SessionExternalInitiatorKey = "external_initiator" +) + +// Authenticator defines the interface to authenticate requests against a +// datastore. +type Authenticator interface { + AuthorizedUserWithSession(sessionID string) (clsessions.User, error) + FindExternalInitiator(eia *auth.Token) (*bridges.ExternalInitiator, error) + FindUser(email string) (clsessions.User, error) + FindUserByAPIToken(apiToken string) (clsessions.User, error) +} + +// authMethod defines a method which can be used to authenticate a request. This +// can be implemented according to your authentication method (i.e by session, +// token, etc) +type authMethod func(ctx *gin.Context, store Authenticator) error + +// AuthenticateBySession authenticates the request by the session cookie. +// +// Implements authMethod +func AuthenticateBySession(c *gin.Context, authr Authenticator) error { + session := sessions.Default(c) + sessionID, ok := session.Get(SessionIDKey).(string) + if !ok { + return auth.ErrorAuthFailed + } + + user, err := authr.AuthorizedUserWithSession(sessionID) + if err != nil { + return err + } + + c.Set(SessionUserKey, &user) + + return nil +} + +var _ authMethod = AuthenticateBySession + +// AuthenticateByToken authenticates a User by their API token. +// +// Implements authMethod +func AuthenticateByToken(c *gin.Context, authr Authenticator) error { + token := &auth.Token{ + AccessKey: c.GetHeader(APIKey), + Secret: c.GetHeader(APISecret), + } + if token.AccessKey == "" { + return auth.ErrorAuthFailed + } + + if token.AccessKey == "" { + return auth.ErrorAuthFailed + } + + // We need to first load the user row so we can compare tokens using the stored salt + user, err := authr.FindUserByAPIToken(token.AccessKey) + if err != nil { + if errors.Is(err, sql.ErrNoRows) || errors.Is(err, clsessions.ErrUserSessionExpired) { + return auth.ErrorAuthFailed + } + return err + } + + ok, err := clsessions.AuthenticateUserByToken(token, &user) + if err != nil { + return err + } + if !ok { + return auth.ErrorAuthFailed + } + + c.Set(SessionUserKey, &user) + + return nil +} + +var _ authMethod = AuthenticateByToken + +// AuthenticateExternalInitiator authenticates an external initiator request. +// +// Implements authMethod +func AuthenticateExternalInitiator(c *gin.Context, store Authenticator) error { + eia := &auth.Token{ + AccessKey: c.GetHeader(static.ExternalInitiatorAccessKeyHeader), + Secret: c.GetHeader(static.ExternalInitiatorSecretHeader), + } + + ei, err := store.FindExternalInitiator(eia) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return auth.ErrorAuthFailed + } + + return errors.Wrap(err, "finding external initiator") + } + + ok, err := bridges.AuthenticateExternalInitiator(eia, ei) + if err != nil { + return err + } + if !ok { + return auth.ErrorAuthFailed + } + + c.Set(SessionExternalInitiatorKey, ei) + + // External initiator endpoints (wrapped with AuthenticateExternalInitiator) inherently assume the role + // of 'run' (required to trigger job runs) + c.Set(SessionExternalInitiatorKey, ei) + c.Set(SessionUserKey, &clsessions.User{Role: clsessions.UserRoleRun}) + + return nil +} + +var _ authMethod = AuthenticateExternalInitiator + +// Authenticate is middleware which authenticates the request by attempting to +// authenticate using all the provided methods. +func Authenticate(store Authenticator, methods ...authMethod) gin.HandlerFunc { + return func(c *gin.Context) { + var err error + for _, method := range methods { + err = method(c, store) + if !errors.Is(err, auth.ErrorAuthFailed) { + break + } + } + if err != nil { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, err) + + return + } + + c.Next() + } +} + +// GetAuthenticatedUser extracts the authentication user from the context. +func GetAuthenticatedUser(c *gin.Context) (*clsessions.User, bool) { + obj, ok := c.Get(SessionUserKey) + if !ok { + return nil, false + } + + user, ok := obj.(*clsessions.User) + + return user, ok +} + +// GetAuthenticatedExternalInitiator extracts the external initiator from the +// context. +func GetAuthenticatedExternalInitiator(c *gin.Context) (*bridges.ExternalInitiator, bool) { + obj, ok := c.Get(SessionExternalInitiatorKey) + if !ok { + return nil, false + } + + return obj.(*bridges.ExternalInitiator), ok +} + +// RequiresRunRole extracts the user object from the context, and asserts the user's role is at least +// 'run' +func RequiresRunRole(handler func(*gin.Context)) func(*gin.Context) { + return func(c *gin.Context) { + user, ok := GetAuthenticatedUser(c) + if !ok { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, errors.New("not a valid session")) + return + } + if user.Role == clsessions.UserRoleView { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, errors.New("Unauthorized")) + return + } + handler(c) + } +} + +// RequiresEditRole extracts the user object from the context, and asserts the user's role is at least +// 'edit' +func RequiresEditRole(handler func(*gin.Context)) func(*gin.Context) { + return func(c *gin.Context) { + user, ok := GetAuthenticatedUser(c) + if !ok { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, errors.New("not a valid session")) + return + } + if user.Role == clsessions.UserRoleView || user.Role == clsessions.UserRoleRun { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, errors.New("Unauthorized")) + return + } + handler(c) + } +} + +// RequiresAdminRole extracts the user object from the context, and asserts the user's role is 'admin' +func RequiresAdminRole(handler func(*gin.Context)) func(*gin.Context) { + return func(c *gin.Context) { + user, ok := GetAuthenticatedUser(c) + if !ok { + c.Abort() + jsonAPIError(c, http.StatusUnauthorized, errors.New("not a valid session")) + return + } + if user.Role != clsessions.UserRoleAdmin { + c.Abort() + addForbiddenErrorHeaders(c, "admin", string(user.Role), user.Email) + jsonAPIError(c, http.StatusForbidden, errors.New("Forbidden")) + return + } + handler(c) + } +} diff --git a/core/web/auth/auth_test.go b/core/web/auth/auth_test.go new file mode 100644 index 00000000..0c11239c --- /dev/null +++ b/core/web/auth/auth_test.go @@ -0,0 +1,514 @@ +package auth_test + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +func authError(*gin.Context, webauth.Authenticator) error { + return errors.New("random error") +} + +func authFailure(*gin.Context, webauth.Authenticator) error { + return auth.ErrorAuthFailed +} + +func authSuccess(*gin.Context, webauth.Authenticator) error { + return nil +} + +type userFindFailer struct { + sessions.AuthenticationProvider + err error +} + +func (u userFindFailer) FindUser(email string) (sessions.User, error) { + return sessions.User{}, u.err +} + +func (u userFindFailer) FindUserByAPIToken(token string) (sessions.User, error) { + return sessions.User{}, u.err +} + +type userFindSuccesser struct { + sessions.AuthenticationProvider + user sessions.User +} + +func (u userFindSuccesser) FindUser(email string) (sessions.User, error) { + return u.user, nil +} + +func (u userFindSuccesser) FindUserByAPIToken(token string) (sessions.User, error) { + return u.user, nil +} + +func TestAuthenticateByToken_Success(t *testing.T) { + user := cltest.MustRandomUser(t) + key, secret := uuid.New().String(), uuid.New().String() + apiToken := auth.Token{AccessKey: key, Secret: secret} + err := user.SetAuthToken(&apiToken) + require.NoError(t, err) + authr := userFindSuccesser{user: user} + + called := false + router := gin.New() + router.Use(webauth.Authenticate(authr, webauth.AuthenticateByToken)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + req.Header.Set(webauth.APIKey, key) + req.Header.Set(webauth.APISecret, secret) + router.ServeHTTP(w, req) + + assert.True(t, called) + assert.Equal(t, http.StatusText(http.StatusOK), http.StatusText(w.Code)) +} + +func TestAuthenticateByToken_AuthFailed(t *testing.T) { + authr := userFindFailer{err: auth.ErrorAuthFailed} + + called := false + router := gin.New() + router.Use(webauth.Authenticate(authr, webauth.AuthenticateByToken)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + req.Header.Set(webauth.APIKey, "bad-key") + req.Header.Set(webauth.APISecret, "bad-secret") + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, http.StatusText(http.StatusUnauthorized), http.StatusText(w.Code)) +} + +func TestAuthenticateByToken_RejectsBlankAccessKey(t *testing.T) { + user := cltest.MustRandomUser(t) + key, secret := "", uuid.New().String() + apiToken := auth.Token{AccessKey: key, Secret: secret} + err := user.SetAuthToken(&apiToken) + require.NoError(t, err) + authr := userFindSuccesser{user: user} + + called := false + router := gin.New() + router.Use(webauth.Authenticate(authr, webauth.AuthenticateByToken)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + req.Header.Set(webauth.APIKey, key) + req.Header.Set(webauth.APISecret, secret) + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, http.StatusText(http.StatusUnauthorized), http.StatusText(w.Code)) +} + +func TestRequireAuth_NoneRequired(t *testing.T) { + called := false + var authr webauth.Authenticator + + router := gin.New() + router.Use(webauth.Authenticate(authr)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + router.ServeHTTP(w, req) + + assert.True(t, called) + assert.Equal(t, http.StatusText(http.StatusOK), http.StatusText(w.Code)) +} + +func TestRequireAuth_AuthFailed(t *testing.T) { + called := false + var authr webauth.Authenticator + router := gin.New() + router.Use(webauth.Authenticate(authr, authFailure)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, http.StatusText(http.StatusUnauthorized), http.StatusText(w.Code)) +} + +func TestRequireAuth_LastAuthSuccess(t *testing.T) { + called := false + var authr webauth.Authenticator + router := gin.New() + router.Use(webauth.Authenticate(authr, authFailure, authSuccess)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + router.ServeHTTP(w, req) + + assert.True(t, called) + assert.Equal(t, http.StatusText(http.StatusOK), http.StatusText(w.Code)) +} + +func TestRequireAuth_Error(t *testing.T) { + called := false + var authr webauth.Authenticator + router := gin.New() + router.Use(webauth.Authenticate(authr, authError, authSuccess)) + router.GET("/", func(c *gin.Context) { + called = true + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + router.ServeHTTP(w, req) + + assert.False(t, called) + assert.Equal(t, http.StatusText(http.StatusUnauthorized), http.StatusText(w.Code)) +} + +// Test RBAC (Role based access control) of each route and their required user roles +// Admin is omitted from the fields here since admin should be able to access all routes +type routeRules struct { + verb string + path string + viewOnlyAllowed bool + editMinimalAllowed bool + EditAllowed bool +} + +// The following are admin only routes +var routesRolesMap = [...]routeRules{ + {"GET", "/v2/users", false, false, false}, + {"POST", "/v2/users", false, false, false}, + {"PATCH", "/v2/users", false, false, false}, + {"DELETE", "/v2/users/MOCK", false, false, false}, + {"PATCH", "/v2/user/password", true, true, true}, + {"POST", "/v2/user/token", true, true, true}, + {"POST", "/v2/user/token/delete", true, true, true}, + {"GET", "/v2/enroll_webauthn", true, true, true}, + {"POST", "/v2/enroll_webauthn", true, true, true}, + {"GET", "/v2/external_initiators", true, true, true}, + {"POST", "/v2/external_initiators", false, false, true}, + {"DELETE", "/v2/external_initiators/MOCK", false, false, true}, + {"GET", "/v2/bridge_types", true, true, true}, + {"POST", "/v2/bridge_types", false, false, true}, + {"GET", "/v2/bridge_types/MOCK", true, true, true}, + {"PATCH", "/v2/bridge_types/MOCK", false, false, true}, + {"DELETE", "/v2/bridge_types/MOCK", false, false, true}, + {"POST", "/v2/transfers", false, false, false}, + {"POST", "/v2/transfers/evm", false, false, false}, + {"POST", "/v2/transfers/cosmos", false, false, false}, + {"POST", "/v2/transfers/solana", false, false, false}, + {"GET", "/v2/config", true, true, true}, + {"GET", "/v2/config/v2", true, true, true}, + {"GET", "/v2/tx_attempts", true, true, true}, + {"GET", "/v2/tx_attempts/evm", true, true, true}, + {"GET", "/v2/transactions/evm", true, true, true}, + {"GET", "/v2/transactions/evm/MOCK", true, true, true}, + {"GET", "/v2/transactions", true, true, true}, + {"GET", "/v2/transactions/MOCK", true, true, true}, + {"POST", "/v2/replay_from_block/MOCK", false, true, true}, + {"GET", "/v2/keys/csa", true, true, true}, + {"POST", "/v2/keys/csa", false, false, true}, + {"POST", "/v2/keys/csa/import", false, false, false}, + {"POST", "/v2/keys/csa/export/MOCK", false, false, false}, + {"GET", "/v2/keys/eth", true, true, true}, + {"POST", "/v2/keys/eth", false, false, true}, + {"DELETE", "/v2/keys/eth/MOCK", false, false, false}, + {"POST", "/v2/keys/eth/import", false, false, false}, + {"POST", "/v2/keys/eth/export/MOCK", false, false, false}, + {"GET", "/v2/keys/ocr", true, true, true}, + {"POST", "/v2/keys/ocr", false, false, true}, + {"DELETE", "/v2/keys/ocr/:MOCKkeyID", false, false, false}, + {"POST", "/v2/keys/ocr/import", false, false, false}, + {"POST", "/v2/keys/ocr/export/MOCK", false, false, false}, + {"GET", "/v2/keys/ocr2", true, true, true}, + {"POST", "/v2/keys/ocr2/MOCK", false, false, true}, + {"DELETE", "/v2/keys/ocr2/MOCK", false, false, false}, + {"POST", "/v2/keys/ocr2/import", false, false, false}, + {"POST", "/v2/keys/ocr2/export/MOCK", false, false, false}, + {"GET", "/v2/keys/p2p", true, true, true}, + {"POST", "/v2/keys/p2p", false, false, true}, + {"DELETE", "/v2/keys/p2p/MOCK", false, false, false}, + {"POST", "/v2/keys/p2p/import", false, false, false}, + {"POST", "/v2/keys/p2p/export/MOCK", false, false, false}, + {"GET", "/v2/keys/solana", true, true, true}, + {"GET", "/v2/keys/cosmos", true, true, true}, + {"GET", "/v2/keys/dkgsign", true, true, true}, + {"POST", "/v2/keys/solana", false, false, true}, + {"POST", "/v2/keys/cosmos", false, false, true}, + {"POST", "/v2/keys/dkgsign", false, false, true}, + {"DELETE", "/v2/keys/solana/MOCK", false, false, false}, + {"DELETE", "/v2/keys/cosmos/MOCK", false, false, false}, + {"DELETE", "/v2/keys/dkgsign/MOCK", false, false, false}, + {"POST", "/v2/keys/solana/import", false, false, false}, + {"POST", "/v2/keys/cosmos/import", false, false, false}, + {"POST", "/v2/keys/dkgsign/import", false, false, false}, + {"POST", "/v2/keys/solana/export/MOCK", false, false, false}, + {"POST", "/v2/keys/cosmos/export/MOCK", false, false, false}, + {"POST", "/v2/keys/dkgsign/export/MOCK", false, false, false}, + {"GET", "/v2/keys/vrf", true, true, true}, + {"POST", "/v2/keys/vrf", false, false, true}, + {"DELETE", "/v2/keys/vrf/MOCK", false, false, false}, + {"POST", "/v2/keys/vrf/import", false, false, false}, + {"POST", "/v2/keys/vrf/export/MOCK", false, false, false}, + {"GET", "/v2/jobs", true, true, true}, + {"GET", "/v2/jobs/MOCK", true, true, true}, + {"POST", "/v2/jobs", false, false, true}, + {"DELETE", "/v2/jobs/MOCK", false, false, true}, + {"GET", "/v2/pipeline/runs", true, true, true}, + {"GET", "/v2/jobs/MOCK/runs", true, true, true}, + {"GET", "/v2/jobs/MOCK/runs/MOCK", true, true, true}, + {"GET", "/v2/features", true, true, true}, + {"DELETE", "/v2/pipeline/job_spec_errors/MOCK", false, false, true}, + {"GET", "/v2/log", true, true, true}, + {"PATCH", "/v2/log", false, false, false}, + {"GET", "/v2/chains/evm", true, true, true}, + {"GET", "/v2/chains/solana", true, true, true}, + {"GET", "/v2/chains/cosmos", true, true, true}, + {"GET", "/v2/chains/evm/MOCK", true, true, true}, + {"GET", "/v2/chains/cosmos/MOCK", true, true, true}, + {"GET", "/v2/nodes/", true, true, true}, + {"GET", "/v2/nodes/evm", true, true, true}, + {"GET", "/v2/nodes/solana", true, true, true}, + {"GET", "/v2/nodes/cosmos", true, true, true}, + {"GET", "/v2/chains/evm/MOCK/nodes", true, true, true}, + {"GET", "/v2/chains/solana/MOCK/nodes", true, true, true}, + {"GET", "/v2/chains/cosmos/MOCK/nodes", true, true, true}, + {"GET", "/v2/nodes/evm/forwarders", true, true, true}, + {"POST", "/v2/nodes/evm/forwarders/track", false, false, true}, + {"DELETE", "/v2/nodes/evm/forwarders/MOCK", false, false, true}, + {"GET", "/v2/build_info", true, true, true}, + {"GET", "/v2/ping", true, true, true}, + {"POST", "/v2/jobs/MOCK/runs", false, true, true}, +} + +// The following test implementations work by asserting only that "Unauthorized/Forbidden" errors are not returned (success case), +// because hitting the handler are not mocked and will crash as expected +// Iterate over the above routesRolesMap and assert each path is wrapped and +// the user role is enforced with the correct middleware +func TestRBAC_Routemap_Admin(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + // Assert all admin routes + // no endpoint should return StatusUnauthorized + client := app.NewHTTPClient(nil) + for _, route := range routesRolesMap { + func() { + var resp *http.Response + var cleanup func() + + switch route.verb { + case "GET": + resp, cleanup = client.Get(route.path) + case "POST": + resp, cleanup = client.Post(route.path, nil) + case "DELETE": + resp, cleanup = client.Delete(route.path) + case "PATCH": + resp, cleanup = client.Patch(route.path, nil) + case "PUT": + resp, cleanup = client.Put(route.path, nil) + default: + t.Fatalf("Unknown HTTP verb %s\n", route.verb) + } + defer cleanup() + + assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode) + assert.NotEqual(t, http.StatusForbidden, resp.StatusCode) + }() + } +} + +func TestRBAC_Routemap_Edit(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + // Create a test edit user to work with + u := &cltest.User{Role: sessions.UserRoleEdit} + client := app.NewHTTPClient(u) + + // Assert all edit routes + for _, route := range routesRolesMap { + func() { + var resp *http.Response + var cleanup func() + + switch route.verb { + case "GET": + resp, cleanup = client.Get(route.path) + case "POST": + resp, cleanup = client.Post(route.path, nil) + case "DELETE": + resp, cleanup = client.Delete(route.path) + case "PATCH": + resp, cleanup = client.Patch(route.path, nil) + case "PUT": + resp, cleanup = client.Put(route.path, nil) + default: + t.Fatalf("Unknown HTTP verb %s\n", route.verb) + } + defer cleanup() + + // If this route allows up to an edit role, don't expect an unauthorized response + if route.EditAllowed || route.editMinimalAllowed || route.viewOnlyAllowed { + assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode) + assert.NotEqual(t, http.StatusForbidden, resp.StatusCode) + } else if !route.EditAllowed { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + } + }() + } +} + +func TestRBAC_Routemap_Run(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + // Create a test run user to work with + u := &cltest.User{Role: sessions.UserRoleRun} + client := app.NewHTTPClient(u) + + // Assert all run routes + for _, route := range routesRolesMap { + func() { + var resp *http.Response + var cleanup func() + + switch route.verb { + case "GET": + resp, cleanup = client.Get(route.path) + case "POST": + resp, cleanup = client.Post(route.path, nil) + case "DELETE": + resp, cleanup = client.Delete(route.path) + case "PATCH": + resp, cleanup = client.Patch(route.path, nil) + case "PUT": + resp, cleanup = client.Put(route.path, nil) + default: + t.Fatalf("Unknown HTTP verb %s\n", route.verb) + } + defer cleanup() + + // If this route allows up to an edit minimal role, don't expect an unauthorized response + if route.editMinimalAllowed || route.viewOnlyAllowed { + assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode) + assert.NotEqual(t, http.StatusForbidden, resp.StatusCode) + } else if !route.EditAllowed { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + } + }() + } +} + +func TestRBAC_Routemap_ViewOnly(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + // Create a test run user to work with + u := &cltest.User{Role: sessions.UserRoleView} + client := app.NewHTTPClient(u) + + // Assert all view only routes + for i, route := range routesRolesMap { + route := route + t.Run(fmt.Sprintf("%d-%s-%s", i, route.verb, route.path), func(t *testing.T) { + var resp *http.Response + var cleanup func() + + switch route.verb { + case "GET": + resp, cleanup = client.Get(route.path) + case "POST": + resp, cleanup = client.Post(route.path, nil) + case "DELETE": + resp, cleanup = client.Delete(route.path) + case "PATCH": + resp, cleanup = client.Patch(route.path, nil) + case "PUT": + resp, cleanup = client.Put(route.path, nil) + default: + t.Fatalf("Unknown HTTP verb %s\n", route.verb) + } + defer cleanup() + + // If this route only allows view only, don't expect an unauthorized response + if route.viewOnlyAllowed { + assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode) + assert.NotEqual(t, http.StatusForbidden, resp.StatusCode) + } else if !route.EditAllowed { + assert.Equal(t, http.StatusForbidden, resp.StatusCode) + } else { + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + } + }) + } +} + +func mustRequest(t *testing.T, method, url string, body io.Reader) *http.Request { + ctx := testutils.Context(t) + req, err := http.NewRequestWithContext(ctx, method, url, body) + require.NoError(t, err) + return req +} diff --git a/core/web/auth/gql.go b/core/web/auth/gql.go new file mode 100644 index 00000000..3b47b3ad --- /dev/null +++ b/core/web/auth/gql.go @@ -0,0 +1,72 @@ +package auth + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" + + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +type sessionUserKey struct{} +type GQLSession struct { + SessionID string + User *clsessions.User +} + +// AuthenticateGQL middleware checks the session cookie for a user and sets it +// on the request context if it exists. It is the responsibility of each resolver +// to validate whether it requires an authenticated user. +// +// We currently only support GQL authentication by session cookie. +func AuthenticateGQL(authenticator Authenticator, lggr logger.Logger) gin.HandlerFunc { + return func(c *gin.Context) { + session := sessions.Default(c) + sessionID, ok := session.Get(SessionIDKey).(string) + if !ok { + return + } + + user, err := authenticator.AuthorizedUserWithSession(sessionID) + if err != nil { + if errors.Is(err, clsessions.ErrUserSessionExpired) { + lggr.Warnw("Failed to authenticate session", "err", err) + } else { + lggr.Errorw("Failed call to AuthorizedUserWithSession, unable to get user", "err", err) + } + return + } + + ctx := SetGQLAuthenticatedSession(c.Request.Context(), user, sessionID) + + c.Request = c.Request.WithContext(ctx) + } +} + +// SetGQLAuthenticatedSession sets the authenticated session in the context +// +// There shouldn't be a need to do this outside of testing +func SetGQLAuthenticatedSession(ctx context.Context, user clsessions.User, sessionID string) context.Context { + return context.WithValue( + ctx, + sessionUserKey{}, + &GQLSession{sessionID, &user}, + ) +} + +// GetGQLAuthenticatedSession extracts the authentication session from a context. +func GetGQLAuthenticatedSession(ctx context.Context) (*GQLSession, bool) { + obj := ctx.Value(sessionUserKey{}) + if obj == nil { + return nil, false + } + + session, ok := obj.(*GQLSession) + + return session, ok +} diff --git a/core/web/auth/gql_test.go b/core/web/auth/gql_test.go new file mode 100644 index 00000000..91ef673b --- /dev/null +++ b/core/web/auth/gql_test.go @@ -0,0 +1,85 @@ +package auth_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/gin-contrib/sessions" + "github.com/gin-contrib/sessions/cookie" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/logger" + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/sessions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +func Test_AuthenticateGQL_Unauthenticated(t *testing.T) { + t.Parallel() + + sessionORM := mocks.NewAuthenticationProvider(t) + sessionStore := cookie.NewStore([]byte("secret")) + + r := gin.Default() + r.Use(sessions.Sessions(auth.SessionName, sessionStore)) + r.Use(auth.AuthenticateGQL(sessionORM, logger.TestLogger(t))) + + r.GET("/", func(c *gin.Context) { + session, ok := auth.GetGQLAuthenticatedSession(c) + assert.False(t, ok) + assert.Nil(t, session) + + c.String(http.StatusOK, "") + }) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + r.ServeHTTP(w, req) +} + +func Test_AuthenticateGQL_Authenticated(t *testing.T) { + t.Parallel() + + sessionORM := mocks.NewAuthenticationProvider(t) + sessionStore := cookie.NewStore([]byte(cltest.SessionSecret)) + sessionID := "sessionID" + + r := gin.Default() + r.Use(sessions.Sessions(auth.SessionName, sessionStore)) + r.Use(auth.AuthenticateGQL(sessionORM, logger.TestLogger(t))) + + r.GET("/", func(c *gin.Context) { + session, ok := auth.GetGQLAuthenticatedSession(c.Request.Context()) + assert.True(t, ok) + assert.NotNil(t, session) + + c.String(http.StatusOK, "") + }) + + sessionORM.On("AuthorizedUserWithSession", sessionID).Return(clsessions.User{Email: cltest.APIEmailAdmin, Role: clsessions.UserRoleAdmin}, nil) + + w := httptest.NewRecorder() + req := mustRequest(t, "GET", "/", nil) + cookie := cltest.MustGenerateSessionCookie(t, sessionID) + req.AddCookie(cookie) + + r.ServeHTTP(w, req) +} + +func Test_GetAndSetGQLAuthenticatedSession(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + user := clsessions.User{Email: cltest.APIEmailAdmin, Role: clsessions.UserRoleAdmin} + + ctx = auth.SetGQLAuthenticatedSession(ctx, user, "sessionID") + + actual, ok := auth.GetGQLAuthenticatedSession(ctx) + assert.True(t, ok) + assert.Equal(t, &user, actual.User) + assert.Equal(t, "sessionID", actual.SessionID) +} diff --git a/core/web/auth/helpers.go b/core/web/auth/helpers.go new file mode 100644 index 00000000..490e491c --- /dev/null +++ b/core/web/auth/helpers.go @@ -0,0 +1,34 @@ +package auth + +import ( + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// jsonAPIError adds an error to the gin context and sets +// the JSON value of errors. +// +// This is duplicated code, but we plan to deprecate and remove the JSONAPI +// so this is ok for now +func jsonAPIError(c *gin.Context, statusCode int, err error) { + _ = c.Error(err).SetType(gin.ErrorTypePublic) + var jsonErr *models.JSONAPIErrors + if errors.As(err, &jsonErr) { + c.JSON(statusCode, jsonErr) + return + } + c.JSON(statusCode, models.NewJSONAPIErrorsWith(err.Error())) +} + +// addForbiddenErrorHeaders adds custom headers to the 403 (Forbidden) response +// so that they can be parsed by the remote client for friendly/actionable error messages. +// +// The fields are specific because Forbidden error is caused by the user not having the correct role +// for the required action +func addForbiddenErrorHeaders(c *gin.Context, requiredRole string, providedRole string, providedEmail string) { + c.Header("forbidden-required-role", requiredRole) + c.Header("forbidden-provided-role", providedRole) + c.Header("forbidden-provided-email", providedEmail) +} diff --git a/core/web/bridge_types_controller.go b/core/web/bridge_types_controller.go new file mode 100644 index 00000000..5265ce72 --- /dev/null +++ b/core/web/bridge_types_controller.go @@ -0,0 +1,227 @@ +package web + +import ( + "database/sql" + "fmt" + "net/http" + "strings" + + "github.com/jackc/pgconn" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +// ValidateBridgeTypeNotExist checks that a bridge has not already been created +func ValidateBridgeTypeNotExist(bt *bridges.BridgeTypeRequest, orm bridges.ORM) error { + fe := models.NewJSONAPIErrors() + _, err := orm.FindBridge(bt.Name) + if err == nil { + fe.Add(fmt.Sprintf("Bridge Type %v already exists", bt.Name)) + } + if err != nil && !errors.Is(err, sql.ErrNoRows) { + fe.Add(fmt.Sprintf("Error determining if bridge type %v already exists", bt.Name)) + } + return fe.CoerceEmptyToNil() +} + +// ValidateBridgeType checks that the bridge type has the required field with valid values. +func ValidateBridgeType(bt *bridges.BridgeTypeRequest) error { + fe := models.NewJSONAPIErrors() + if len(bt.Name.String()) < 1 { + fe.Add("No name specified") + } + if _, err := bridges.ParseBridgeName(bt.Name.String()); err != nil { + fe.Merge(err) + } + u := bt.URL.String() + if len(strings.TrimSpace(u)) == 0 { + fe.Add("URL must be present") + } + if bt.MinimumContractPayment != nil && + bt.MinimumContractPayment.Cmp(assets.NewLinkFromJuels(0)) < 0 { + fe.Add("MinimumContractPayment must be positive") + } + return fe.CoerceEmptyToNil() +} + +// BridgeTypesController manages BridgeType requests in the node. +type BridgeTypesController struct { + App plugin.Application +} + +// Create adds the BridgeType to the given context. +func (btc *BridgeTypesController) Create(c *gin.Context) { + btr := &bridges.BridgeTypeRequest{} + + if err := c.ShouldBindJSON(btr); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + bta, bt, err := bridges.NewBridgeType(btr) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + if e := ValidateBridgeType(btr); e != nil { + jsonAPIError(c, http.StatusBadRequest, e) + return + } + orm := btc.App.BridgeORM() + if e := ValidateBridgeTypeNotExist(btr, orm); e != nil { + jsonAPIError(c, http.StatusBadRequest, e) + return + } + if e := orm.CreateBridgeType(bt); e != nil { + jsonAPIError(c, http.StatusInternalServerError, e) + return + } + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + var apiErr error + if pgErr.ConstraintName == "external_initiators_name_key" { + apiErr = fmt.Errorf("bridge Type %v conflict", bt.Name) + } else { + apiErr = err + } + jsonAPIError(c, http.StatusConflict, apiErr) + return + } + resource := presenters.NewBridgeResource(*bt) + resource.IncomingToken = bta.IncomingToken + + btc.App.GetAuditLogger().Audit(audit.BridgeCreated, map[string]interface{}{ + "bridgeName": bta.Name, + "bridgeConfirmations": bta.Confirmations, + "bridgeMinimumContractPayment": bta.MinimumContractPayment, + "bridgeURL": bta.URL, + }) + + jsonAPIResponse(c, resource, "bridge") +} + +// Index lists Bridges, one page at a time. +func (btc *BridgeTypesController) Index(c *gin.Context, size, page, offset int) { + bridges, count, err := btc.App.BridgeORM().BridgeTypes(offset, size) + + var resources []presenters.BridgeResource + for _, bridge := range bridges { + resources = append(resources, *presenters.NewBridgeResource(bridge)) + } + + paginatedResponse(c, "Bridges", size, page, resources, count, err) +} + +// Show returns the details of a specific Bridge. +func (btc *BridgeTypesController) Show(c *gin.Context) { + name := c.Param("BridgeName") + + taskType, err := bridges.ParseBridgeName(name) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + bt, err := btc.App.BridgeORM().FindBridge(taskType) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("bridge not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponse(c, presenters.NewBridgeResource(bt), "bridge") +} + +// Update can change the restricted attributes for a bridge +func (btc *BridgeTypesController) Update(c *gin.Context) { + name := c.Param("BridgeName") + btr := &bridges.BridgeTypeRequest{} + + taskType, err := bridges.ParseBridgeName(name) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + orm := btc.App.BridgeORM() + bt, err := orm.FindBridge(taskType) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("bridge not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + if err := c.ShouldBindJSON(btr); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + if err := ValidateBridgeType(btr); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if err := orm.UpdateBridgeType(&bt, btr); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + btc.App.GetAuditLogger().Audit(audit.BridgeUpdated, map[string]interface{}{ + "bridgeName": bt.Name, + "bridgeConfirmations": bt.Confirmations, + "bridgeMinimumContractPayment": bt.MinimumContractPayment, + "bridgeURL": bt.URL, + }) + + jsonAPIResponse(c, presenters.NewBridgeResource(bt), "bridge") +} + +// Destroy removes a specific Bridge. +func (btc *BridgeTypesController) Destroy(c *gin.Context) { + name := c.Param("BridgeName") + + taskType, err := bridges.ParseBridgeName(name) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + orm := btc.App.BridgeORM() + bt, err := orm.FindBridge(taskType) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("bridge not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for bridge: %+v", err)) + return + } + jobsUsingBridge, err := btc.App.JobORM().FindJobIDsWithBridge(name) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for associated v2 jobs: %+v", err)) + return + } + if len(jobsUsingBridge) > 0 { + jsonAPIError(c, http.StatusConflict, fmt.Errorf("can't remove the bridge because jobs %v are associated with it", jobsUsingBridge)) + return + } + if err = orm.DeleteBridgeType(&bt); err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("failed to delete bridge: %+v", err)) + return + } + + btc.App.GetAuditLogger().Audit(audit.BridgeDeleted, map[string]interface{}{"name": name}) + + jsonAPIResponse(c, presenters.NewBridgeResource(bt), "bridge") +} diff --git a/core/web/bridge_types_controller_test.go b/core/web/bridge_types_controller_test.go new file mode 100644 index 00000000..c8f30812 --- /dev/null +++ b/core/web/bridge_types_controller_test.go @@ -0,0 +1,345 @@ +package web_test + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateBridgeType(t *testing.T) { + t.Parallel() + + tests := []struct { + description string + request bridges.BridgeTypeRequest + want error + }{ + { + "no adapter name", + bridges.BridgeTypeRequest{ + URL: cltest.WebURL(t, "https://denergy.eth"), + }, + models.NewJSONAPIErrorsWith("No name specified"), + }, + { + "invalid adapter name", + bridges.BridgeTypeRequest{ + Name: "invalid/adapter", + URL: cltest.WebURL(t, "https://denergy.eth"), + }, + models.NewJSONAPIErrorsWith("task type validation: name invalid/adapter contains invalid characters"), + }, + { + "invalid with blank url", + bridges.BridgeTypeRequest{ + Name: "validadaptername", + URL: cltest.WebURL(t, ""), + }, + models.NewJSONAPIErrorsWith("URL must be present"), + }, + { + "valid url", + bridges.BridgeTypeRequest{ + Name: "adapterwithvalidurl", + URL: cltest.WebURL(t, "//denergy"), + }, + nil, + }, + { + "valid docker url", + bridges.BridgeTypeRequest{ + Name: "adapterwithdockerurl", + URL: cltest.WebURL(t, "http://plugin_cmc-adapter_1:8080"), + }, + nil, + }, + { + "valid MinimumContractPayment positive", + bridges.BridgeTypeRequest{ + Name: "adapterwithdockerurl", + URL: cltest.WebURL(t, "http://plugin_cmc-adapter_1:8080"), + MinimumContractPayment: assets.NewLinkFromJuels(1), + }, + nil, + }, + { + "invalid MinimumContractPayment negative", + bridges.BridgeTypeRequest{ + Name: "adapterwithdockerurl", + URL: cltest.WebURL(t, "http://plugin_cmc-adapter_1:8080"), + MinimumContractPayment: assets.NewLinkFromJuels(-1), + }, + models.NewJSONAPIErrorsWith("MinimumContractPayment must be positive"), + }, + { + "existing core adapter (no longer fails since core adapters no longer exist)", + bridges.BridgeTypeRequest{ + Name: "ethtx", + URL: cltest.WebURL(t, "https://denergy.eth"), + }, + nil, + }, + { + "new external adapter", + bridges.BridgeTypeRequest{ + Name: "gdaxprice", + URL: cltest.WebURL(t, "https://denergy.eth"), + }, + nil, + }} + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + req := test.request + result := web.ValidateBridgeType(&req) + assert.Equal(t, test.want, result) + }) + } +} + +func TestValidateBridgeNotExist(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + orm := bridges.NewORM(db, logger.TestLogger(t), cfg) + + // Create a duplicate + bt := bridges.BridgeType{} + bt.Name = bridges.MustParseBridgeName("solargridreporting") + bt.URL = cltest.WebURL(t, "https://denergy.eth") + assert.NoError(t, orm.CreateBridgeType(&bt)) + + newBridge := bridges.BridgeTypeRequest{ + Name: "solargridreporting", + } + expected := models.NewJSONAPIErrorsWith("Bridge Type solargridreporting already exists") + result := web.ValidateBridgeTypeNotExist(&newBridge, orm) + assert.Equal(t, expected, result) +} + +func BenchmarkBridgeTypesController_Index(b *testing.B) { + app := cltest.NewApplication(b) + client := app.NewHTTPClient(nil) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + resp, cleanup := client.Get("/v2/bridge_types") + b.Cleanup(cleanup) + assert.Equal(b, http.StatusOK, resp.StatusCode, "Response should be successful") + } +} + +func TestBridgeTypesController_Index(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + bt, err := setupBridgeControllerIndex(t, app.BridgeORM()) + assert.NoError(t, err) + + resp, cleanup := client.Get("/v2/bridge_types?size=x") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) + + resp, cleanup = client.Get("/v2/bridge_types?size=1") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + var links jsonapi.Links + resources := []presenters.BridgeResource{} + + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &resources, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, resources, 1) + assert.Equal(t, bt[0].Name.String(), resources[0].Name, "should have the same Name") + assert.Equal(t, bt[0].URL.String(), resources[0].URL, "should have the same URL") + assert.Equal(t, bt[0].Confirmations, resources[0].Confirmations, "should have the same Confirmations") + + resp, cleanup = client.Get(links["next"].Href) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + resources = []presenters.BridgeResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &resources, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"]) + assert.NotEmpty(t, links["prev"]) + assert.Len(t, resources, 1) + assert.Equal(t, bt[1].Name.String(), resources[0].Name, "should have the same Name") + assert.Equal(t, bt[1].URL.String(), resources[0].URL, "should have the same URL") + assert.Equal(t, bt[1].Confirmations, resources[0].Confirmations, "should have the same Confirmations") +} + +// cannot randomize bridge names here since they are ordered by name on the API +// leading in random order for assertion... +func setupBridgeControllerIndex(t testing.TB, orm bridges.ORM) ([]*bridges.BridgeType, error) { + bt1 := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName("indexbridges1"), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + err := orm.CreateBridgeType(bt1) + if err != nil { + return nil, err + } + + bt2 := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName("indexbridges2"), + URL: cltest.WebURL(t, "https://testing.com/tari"), + Confirmations: 0, + } + err = orm.CreateBridgeType(bt2) + return []*bridges.BridgeType{bt1, bt2}, err +} + +func TestBridgeTypesController_Create_Success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post( + "/v2/bridge_types", + bytes.NewBuffer(cltest.MustReadFile(t, "../testdata/apiresponses/create_random_number_bridge_type.json")), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + respJSON := cltest.ParseJSON(t, resp.Body) + btName := respJSON.Get("data.attributes.name").String() + + assert.NotEmpty(t, respJSON.Get("data.attributes.incomingToken").String()) + assert.NotEmpty(t, respJSON.Get("data.attributes.outgoingToken").String()) + + bt, err := app.BridgeORM().FindBridge(bridges.MustParseBridgeName(btName)) + assert.NoError(t, err) + assert.Equal(t, "randomnumber", bt.Name.String()) + assert.Equal(t, uint32(10), bt.Confirmations) + assert.Equal(t, "https://example.com/randomNumber", bt.URL.String()) + assert.Equal(t, assets.NewLinkFromJuels(100), bt.MinimumContractPayment) + assert.NotEmpty(t, bt.OutgoingToken) +} + +func TestBridgeTypesController_Update_Success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + bridgeName := testutils.RandomizeName("BRidgea") + bt := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName(bridgeName), + URL: cltest.WebURL(t, "http://mybridge"), + } + require.NoError(t, app.BridgeORM().CreateBridgeType(bt)) + + body := fmt.Sprintf(`{"name": "%s","url":"http://yourbridge"}`, bridgeName) + ud := bytes.NewBuffer([]byte(body)) + resp, cleanup := client.Patch("/v2/bridge_types/"+bridgeName, ud) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + ubt, err := app.BridgeORM().FindBridge(bt.Name) + assert.NoError(t, err) + assert.Equal(t, cltest.WebURL(t, "http://yourbridge"), ubt.URL) +} + +func TestBridgeController_Show(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + bt := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName(testutils.RandomizeName("showbridge")), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + require.NoError(t, app.BridgeORM().CreateBridgeType(bt)) + + resp, cleanup := client.Get("/v2/bridge_types/" + bt.Name.String()) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, resp.StatusCode, "Response should be successful") + + var resource presenters.BridgeResource + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &resource)) + assert.Equal(t, bt.Name.String(), resource.Name, "should have the same name") + assert.Equal(t, bt.URL.String(), resource.URL, "should have the same URL") + assert.Equal(t, bt.Confirmations, resource.Confirmations, "should have the same Confirmations") + + resp, cleanup = client.Get("/v2/bridge_types/nosuchbridge") + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Response should be 404") +} + +func TestBridgeTypesController_Create_AdapterExistsError(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post( + "/v2/bridge_types", + bytes.NewBuffer(cltest.MustReadFile(t, "../testdata/apiresponses/existing_core_adapter.json")), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusBadRequest) +} + +func TestBridgeTypesController_Create_BindJSONError(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post( + "/v2/bridge_types", + bytes.NewBufferString("}"), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) +} + +func TestBridgeTypesController_Create_DatabaseError(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post( + "/v2/bridge_types", + bytes.NewBufferString(`{"url":"http://without.a.name"}`), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusBadRequest) +} diff --git a/core/web/build_info_controller.go b/core/web/build_info_controller.go new file mode 100644 index 00000000..752a7541 --- /dev/null +++ b/core/web/build_info_controller.go @@ -0,0 +1,20 @@ +package web + +import ( + "net/http" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/static" + + "github.com/gin-gonic/gin" +) + +// BuildVersonController has the build_info endpoint. +type BuildInfoController struct { + App plugin.Application +} + +// Show returns the build info. +func (eic *BuildInfoController) Show(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"version": static.Version, "commitSHA": static.Sha}) +} diff --git a/core/web/build_info_controller_test.go b/core/web/build_info_controller_test.go new file mode 100644 index 00000000..cd85fe6c --- /dev/null +++ b/core/web/build_info_controller_test.go @@ -0,0 +1,46 @@ +package web_test + +import ( + "net/http" + "strings" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + + "github.com/stretchr/testify/require" +) + +func TestBuildInfoController_Show_APICredentials(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Get("/v2/build_info") + defer cleanup() + cltest.AssertServerResponse(t, resp, http.StatusOK) + body := string(cltest.ParseResponseBody(t, resp)) + + require.Contains(t, strings.TrimSpace(body), "commitSHA") + require.Contains(t, strings.TrimSpace(body), "version") +} + +func TestBuildInfoController_Show_NoCredentials(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + url := app.Server.URL + "/v2/build_info" + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} diff --git a/core/web/chains_controller.go b/core/web/chains_controller.go new file mode 100644 index 00000000..f8064d65 --- /dev/null +++ b/core/web/chains_controller.go @@ -0,0 +1,90 @@ +package web + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +type ChainsController interface { + // Index lists chains. + Index(c *gin.Context, size, page, offset int) + // Show gets a chain by id. + Show(*gin.Context) +} + +type chainsController[R jsonapi.EntityNamer] struct { + network relay.Network + resourceName string + chainStats plugin.ChainStatuser + errNotEnabled error + newResource func(types.ChainStatus) R + lggr logger.Logger + auditLogger audit.AuditLogger +} + +type errChainDisabled struct { + name string + tomlKey string +} + +func (e errChainDisabled) Error() string { + return fmt.Sprintf("%s is disabled: Set %s=true to enable", e.name, e.tomlKey) +} + +func newChainsController[R jsonapi.EntityNamer](network relay.Network, chainStats plugin.ChainsNodesStatuser, errNotEnabled error, + newResource func(types.ChainStatus) R, lggr logger.Logger, auditLogger audit.AuditLogger) *chainsController[R] { + return &chainsController[R]{ + network: network, + resourceName: network + "_chain", + chainStats: chainStats, + errNotEnabled: errNotEnabled, + newResource: newResource, + lggr: lggr, + auditLogger: auditLogger, + } +} + +func (cc *chainsController[R]) Index(c *gin.Context, size, page, offset int) { + if cc.chainStats == nil { + jsonAPIError(c, http.StatusBadRequest, cc.errNotEnabled) + return + } + chains, count, err := cc.chainStats.ChainStatuses(c, offset, size) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + var resources []R + for _, chain := range chains { + resources = append(resources, cc.newResource(chain)) + } + + paginatedResponse(c, cc.resourceName, size, page, resources, count, err) +} + +func (cc *chainsController[R]) Show(c *gin.Context) { + if cc.chainStats == nil { + jsonAPIError(c, http.StatusBadRequest, cc.errNotEnabled) + return + } + relayID := relay.ID{Network: cc.network, ChainID: c.Param("ID")} + chain, err := cc.chainStats.ChainStatus(c, relayID) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIResponse(c, cc.newResource(chain), cc.resourceName) +} diff --git a/core/web/common.go b/core/web/common.go new file mode 100644 index 00000000..5c99a13f --- /dev/null +++ b/core/web/common.go @@ -0,0 +1,38 @@ +package web + +import ( + "math/big" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" +) + +var ( + ErrMissingChainID = errors.New("chain id does not match any local chains") + ErrEmptyChainID = errors.New("chainID is empty") + ErrInvalidChainID = errors.New("invalid chain id") + ErrMultipleChains = errors.New("more than one chain available, you must specify chain id parameter") +) + +func getChain(legacyChains legacyevm.LegacyChainContainer, chainIDstr string) (chain legacyevm.Chain, err error) { + + if chainIDstr != "" && chainIDstr != "" { + // evm keys are expected to be parsable as a big int + _, ok := big.NewInt(0).SetString(chainIDstr, 10) + if !ok { + return nil, ErrInvalidChainID + } + chain, err = legacyChains.Get(chainIDstr) + if err != nil { + return nil, ErrMissingChainID + } + return chain, nil + } + + if legacyChains.Len() > 1 { + return nil, ErrMultipleChains + } + + return nil, ErrEmptyChainID +} diff --git a/core/web/config_controller.go b/core/web/config_controller.go new file mode 100644 index 00000000..b48cb3aa --- /dev/null +++ b/core/web/config_controller.go @@ -0,0 +1,54 @@ +package web + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/utils" + + "github.com/gin-gonic/gin" +) + +// ConfigController manages config variables +type ConfigController struct { + App plugin.Application +} + +// Show returns the whitelist of config variables +// Example: +// +// "/config" +func (cc *ConfigController) Show(c *gin.Context) { + cfg := cc.App.GetConfig() + var userOnly bool + if s, has := c.GetQuery("userOnly"); has { + var err error + userOnly, err = strconv.ParseBool(s) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, fmt.Errorf("invalid bool for userOnly: %v", err)) + return + } + } + var toml string + user, effective := cfg.ConfigTOML() + if userOnly { + toml = user + } else { + toml = effective + } + jsonAPIResponse(c, ConfigV2Resource{toml}, "config") +} + +type ConfigV2Resource struct { + Config string `json:"config"` +} + +func (c ConfigV2Resource) GetID() string { + return utils.NewBytes32ID() +} + +func (c *ConfigV2Resource) SetID(string) error { + return nil +} diff --git a/core/web/cookies.go b/core/web/cookies.go new file mode 100644 index 00000000..d23dc89b --- /dev/null +++ b/core/web/cookies.go @@ -0,0 +1,16 @@ +package web + +import ( + "net/http" +) + +// FindSessionCookie returns the cookie with the "clsession" name +func FindSessionCookie(cookies []*http.Cookie) *http.Cookie { + for _, c := range cookies { + if c.Name == "clsession" { + return c + } + } + + return nil +} diff --git a/core/web/cors_test.go b/core/web/cors_test.go new file mode 100644 index 00000000..2670ece6 --- /dev/null +++ b/core/web/cors_test.go @@ -0,0 +1,72 @@ +package web_test + +import ( + "net/http" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func TestCors_DefaultOrigins(t *testing.T) { + t.Parallel() + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.WebServer.AllowOrigins = ptr("http://localhost:3000,http://localhost:6689") + }) + + tests := []struct { + origin string + statusCode int + }{ + {"http://localhost:3000", http.StatusOK}, + {"http://localhost:6689", http.StatusOK}, + {"http://localhost:1234", http.StatusForbidden}, + } + + for _, test := range tests { + t.Run(test.origin, func(t *testing.T) { + app := cltest.NewApplicationWithConfig(t, config) + + client := app.NewHTTPClient(nil) + + headers := map[string]string{"Origin": test.origin} + resp, cleanup := client.Get("/v2/chains/evm", headers) + defer cleanup() + cltest.AssertServerResponse(t, resp, test.statusCode) + }) + } +} + +func TestCors_OverrideOrigins(t *testing.T) { + t.Parallel() + + tests := []struct { + allow string + origin string + statusCode int + }{ + {"http://plugin.com", "http://plugin.com", http.StatusOK}, + {"http://plugin.com", "http://localhost:3000", http.StatusForbidden}, + {"*", "http://plugin.com", http.StatusOK}, + {"*", "http://localhost:3000", http.StatusOK}, + } + + for _, test := range tests { + t.Run(test.origin, func(t *testing.T) { + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.WebServer.AllowOrigins = ptr(test.allow) + }) + app := cltest.NewApplicationWithConfig(t, config) + + client := app.NewHTTPClient(nil) + + headers := map[string]string{"Origin": test.origin} + resp, cleanup := client.Get("/v2/chains/evm", headers) + defer cleanup() + cltest.AssertServerResponse(t, resp, test.statusCode) + }) + } +} diff --git a/core/web/cosmos_chains_controller.go b/core/web/cosmos_chains_controller.go new file mode 100644 index 00000000..330dd412 --- /dev/null +++ b/core/web/cosmos_chains_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewCosmosChainsController(app plugin.Application) ChainsController { + return newChainsController[presenters.CosmosChainResource]( + relay.Cosmos, + app.GetRelayers().List(plugin.FilterRelayersByType(relay.Cosmos)), + ErrCosmosNotEnabled, + presenters.NewCosmosChainResource, + app.GetLogger(), + app.GetAuditLogger()) +} diff --git a/core/web/cosmos_chains_controller_test.go b/core/web/cosmos_chains_controller_test.go new file mode 100644 index 00000000..d166ca4b --- /dev/null +++ b/core/web/cosmos_chains_controller_test.go @@ -0,0 +1,193 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/types" + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/cosmostest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func Test_CosmosChainsController_Show(t *testing.T) { + t.Parallel() + + const validId = "Plugin-12" + + testCases := []struct { + name string + inputId string + wantStatusCode int + want func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus + }{ + { + inputId: validId, + name: "success", + want: func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus { + return &types.ChainStatus{ + ID: validId, + Enabled: true, + Config: `ChainID = 'Plugin-12' +Enabled = true +Bech32Prefix = 'wasm' +BlockRate = '6s' +BlocksUntilTxTimeout = 30 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '9.999' +GasToken = 'ucosm' +GasLimitMultiplier = '1.55555' +MaxMsgsPerBatch = 100 +OCR2CachePollPeriod = '4s' +OCR2CacheTTL = '1m0s' +TxMsgTimeout = '10m0s' +Nodes = [] +`, + } + }, + wantStatusCode: http.StatusOK, + }, + { + inputId: "234", + name: "not found", + want: func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus { + return nil + }, + wantStatusCode: http.StatusBadRequest, + }, + } + + for _, testCase := range testCases { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + controller := setupCosmosChainsControllerTestV2(t, &coscfg.TOMLConfig{ + ChainID: ptr(validId), + Enabled: ptr(true), + Chain: coscfg.Chain{ + FallbackGasPrice: ptr(decimal.RequireFromString("9.999")), + GasLimitMultiplier: ptr(decimal.RequireFromString("1.55555")), + }}) + + wantedResult := tc.want(t, controller.app) + resp, cleanup := controller.client.Get( + fmt.Sprintf("/v2/chains/cosmos/%s", tc.inputId), + ) + t.Cleanup(cleanup) + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + + if wantedResult != nil { + resource1 := presenters.CosmosChainResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource1) + require.NoError(t, err) + + assert.Equal(t, wantedResult.ID, resource1.ID) + assert.Equal(t, wantedResult.Config, resource1.Config) + } + }) + } +} + +func Test_CosmosChainsController_Index(t *testing.T) { + t.Parallel() + + chainA := &coscfg.TOMLConfig{ + ChainID: ptr("a" + cosmostest.RandomChainID()), + Enabled: ptr(true), + Chain: coscfg.Chain{ + FallbackGasPrice: ptr(decimal.RequireFromString("9.999")), + }, + } + + chainB := &coscfg.TOMLConfig{ + ChainID: ptr("b" + cosmostest.RandomChainID()), + Enabled: ptr(true), + Chain: coscfg.Chain{ + GasLimitMultiplier: ptr(decimal.RequireFromString("1.55555")), + }, + } + controller := setupCosmosChainsControllerTestV2(t, chainA, chainB) + + badResp, cleanup := controller.client.Get("/v2/chains/cosmos?size=asd") + t.Cleanup(cleanup) + require.Equal(t, http.StatusUnprocessableEntity, badResp.StatusCode) + + resp, cleanup := controller.client.Get("/v2/chains/cosmos?size=1") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, 2, metaCount) + + var links jsonapi.Links + + var chains []presenters.CosmosChainResource + err = web.ParsePaginatedResponse(body, &chains, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, *chainA.ChainID, chains[0].ID) + tomlA, err := chainA.TOMLString() + require.NoError(t, err) + assert.Equal(t, tomlA, chains[0].Config) + + resp, cleanup = controller.client.Get(links["next"].Href) + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + chains = []presenters.CosmosChainResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &chains, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"].Href) + assert.NotEmpty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, *chainB.ChainID, chains[0].ID) + tomlB, err := chainB.TOMLString() + require.NoError(t, err) + assert.Equal(t, tomlB, chains[0].Config) + +} + +type TestCosmosChainsController struct { + app *cltest.TestApplication + client cltest.HTTPClientCleaner +} + +func setupCosmosChainsControllerTestV2(t *testing.T, cfgs ...*coscfg.TOMLConfig) *TestCosmosChainsController { + for i := range cfgs { + cfgs[i].SetDefaults() + } + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Cosmos = cfgs + c.EVM = nil + }) + app := cltest.NewApplicationWithConfig(t, cfg) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + return &TestCosmosChainsController{ + app: app, + client: client, + } +} diff --git a/core/web/cosmos_keys_controller.go b/core/web/cosmos_keys_controller.go new file mode 100644 index 00000000..6e4d417f --- /dev/null +++ b/core/web/cosmos_keys_controller.go @@ -0,0 +1,12 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewCosmosKeysController(app plugin.Application) KeysController { + return NewKeysController[cosmoskey.Key, presenters.CosmosKeyResource](app.GetKeyStore().Cosmos(), app.GetLogger(), app.GetAuditLogger(), + "cosmosKey", presenters.NewCosmosKeyResource, presenters.NewCosmosKeyResources) +} diff --git a/core/web/cosmos_keys_controller_test.go b/core/web/cosmos_keys_controller_test.go new file mode 100644 index 00000000..33b52684 --- /dev/null +++ b/core/web/cosmos_keys_controller_test.go @@ -0,0 +1,104 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCosmosKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupCosmosKeysControllerTests(t) + keys, _ := keyStore.Cosmos().GetAll() + + response, cleanup := client.Get("/v2/keys/cosmos") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.CosmosKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyStr(), resources[0].PubKey) +} + +func TestCosmosKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + keyStore := app.GetKeyStore() + + response, cleanup := client.Post("/v2/keys/cosmos", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.Cosmos().GetAll() + require.Len(t, keys, 1) + + resource := presenters.CosmosKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + assert.Equal(t, keys[0].ID(), resource.ID) + assert.Equal(t, keys[0].PublicKeyStr(), resource.PubKey) + + _, err = keyStore.Cosmos().Get(resource.ID) + require.NoError(t, err) +} + +func TestCosmosKeysController_Delete_NonExistentCosmosKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupCosmosKeysControllerTests(t) + + nonExistentCosmosKeyID := "foobar" + response, cleanup := client.Delete("/v2/keys/cosmos/" + nonExistentCosmosKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestCosmosKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupCosmosKeysControllerTests(t) + + keys, _ := keyStore.Cosmos().GetAll() + initialLength := len(keys) + key, _ := keyStore.Cosmos().Create() + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/cosmos/%s", key.ID())) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(keyStore.Cosmos().Get(key.ID()))) + + keys, _ = keyStore.Cosmos().GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupCosmosKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.Cosmos().Add(cltest.DefaultCosmosKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/cosmos_nodes_controller.go b/core/web/cosmos_nodes_controller.go new file mode 100644 index 00000000..2a03b429 --- /dev/null +++ b/core/web/cosmos_nodes_controller.go @@ -0,0 +1,18 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// ErrCosmosNotEnabled is returned when COSMOS_ENABLED is not true. +var ErrCosmosNotEnabled = errChainDisabled{name: "Cosmos", tomlKey: "Cosmos.Enabled"} + +func NewCosmosNodesController(app plugin.Application) NodesController { + scopedNodeStatuser := NewNetworkScopedNodeStatuser(app.GetRelayers(), relay.Cosmos) + + return newNodesController[presenters.CosmosNodeResource]( + scopedNodeStatuser, ErrCosmosNotEnabled, presenters.NewCosmosNodeResource, app.GetAuditLogger(), + ) +} diff --git a/core/web/cosmos_transfer_controller.go b/core/web/cosmos_transfer_controller.go new file mode 100644 index 00000000..fcf63f6e --- /dev/null +++ b/core/web/cosmos_transfer_controller.go @@ -0,0 +1,93 @@ +package web + +import ( + "fmt" + "net/http" + "slices" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + + coscfg "github.com/goplugin/plugin-cosmos/pkg/cosmos/config" + "github.com/goplugin/plugin-cosmos/pkg/cosmos/db" + "github.com/goplugin/plugin-cosmos/pkg/cosmos/denom" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + cosmosmodels "github.com/goplugin/pluginv3.0/v2/core/store/models/cosmos" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// CosmosTransfersController can send PLI tokens to another address +type CosmosTransfersController struct { + App plugin.Application +} + +// Create sends native coins from the Plugin's account to a specified address. +func (tc *CosmosTransfersController) Create(c *gin.Context) { + relayers := tc.App.GetRelayers().List(plugin.FilterRelayersByType(relay.Cosmos)) + if relayers == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + + var tr cosmosmodels.SendRequest + if err := c.ShouldBindJSON(&tr); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if tr.CosmosChainID == "" { + jsonAPIError(c, http.StatusBadRequest, errors.New("missing cosmosChainID")) + return + } + if tr.FromAddress.Empty() { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("withdrawal source address is missing: %v", tr.FromAddress)) + return + } + + relayerID := relay.ID{Network: relay.Cosmos, ChainID: tr.CosmosChainID} + relayer, err := relayers.Get(relayerID) + if err != nil { + if errors.Is(err, plugin.ErrNoSuchRelayer) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + var gasToken string + cfgs := tc.App.GetConfig().CosmosConfigs() + i := slices.IndexFunc(cfgs, func(config *coscfg.TOMLConfig) bool { return *config.ChainID == tr.CosmosChainID }) + if i == -1 { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("no config for chain id: %s", tr.CosmosChainID)) + return + } + gasToken = cfgs[i].GasToken() + + //TODO move this inside? + coin, err := denom.ConvertDecCoinToDenom(sdk.NewDecCoinFromDec(tr.Token, tr.Amount), gasToken) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("unable to convert %s to %s: %v", tr.Token, gasToken, err)) + return + } else if !coin.Amount.IsPositive() { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("amount must be greater than zero: %s", coin.Amount)) + return + } + + err = relayer.Transact(c, tr.FromAddress.String(), tr.DestinationAddress.String(), coin.Amount.BigInt(), !tr.AllowHigherAmounts) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, errors.Errorf("failed to send transaction: %v", err)) + return + } + + resource := presenters.NewCosmosMsgResource("cosmos_transfer_"+uuid.New().String(), tr.CosmosChainID, "") + resource.State = string(db.Unstarted) + tc.App.GetAuditLogger().Audit(audit.CosmosTransactionCreated, map[string]interface{}{ + "cosmosTransactionResource": resource, + }) + + jsonAPIResponse(c, resource, "cosmos_msg") +} diff --git a/core/web/csa_keys_controller.go b/core/web/csa_keys_controller.go new file mode 100644 index 00000000..b258719e --- /dev/null +++ b/core/web/csa_keys_controller.go @@ -0,0 +1,95 @@ +package web + +import ( + "errors" + "io" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// CSAKeysController manages CSA keys +type CSAKeysController struct { + App plugin.Application +} + +// Index lists CSA keys +// Example: +// "GET /keys/csa" +func (ctrl *CSAKeysController) Index(c *gin.Context) { + keys, err := ctrl.App.GetKeyStore().CSA().GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, presenters.NewCSAKeyResources(keys), "csaKeys") +} + +// Create and return a CSA key +// Example: +// "POST /keys/csa" +func (ctrl *CSAKeysController) Create(c *gin.Context) { + key, err := ctrl.App.GetKeyStore().CSA().Create() + if err != nil { + if errors.Is(err, keystore.ErrCSAKeyExists) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ctrl.App.GetAuditLogger().Audit(audit.CSAKeyCreated, map[string]interface{}{ + "CSAPublicKey": key.PublicKey, + "CSVersion": key.Version, + }) + + jsonAPIResponse(c, presenters.NewCSAKeyResource(key), "csaKeys") +} + +// Import imports a CSA key +func (ctrl *CSAKeysController) Import(c *gin.Context) { + defer ctrl.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + key, err := ctrl.App.GetKeyStore().CSA().Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ctrl.App.GetAuditLogger().Audit(audit.CSAKeyImported, map[string]interface{}{ + "CSAPublicKey": key.PublicKey, + "CSVersion": key.Version, + }) + + jsonAPIResponse(c, presenters.NewCSAKeyResource(key), "csaKey") +} + +// Export exports a key +func (ctrl *CSAKeysController) Export(c *gin.Context) { + defer ctrl.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export request body") + + keyID := c.Param("ID") + newPassword := c.Query("newpassword") + + bytes, err := ctrl.App.GetKeyStore().CSA().Export(keyID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ctrl.App.GetAuditLogger().Audit(audit.CSAKeyExported, map[string]interface{}{"keyID": keyID}) + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/dkgencrypt_keys_controller.go b/core/web/dkgencrypt_keys_controller.go new file mode 100644 index 00000000..6f9b7dfc --- /dev/null +++ b/core/web/dkgencrypt_keys_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewDKGEncryptKeysController(app plugin.Application) KeysController { + return NewKeysController[dkgencryptkey.Key, presenters.DKGEncryptKeyResource]( + app.GetKeyStore().DKGEncrypt(), + app.GetLogger(), + app.GetAuditLogger(), + "dkgencryptKey", + presenters.NewDKGEncryptKeyResource, + presenters.NewDKGEncryptKeyResources) +} diff --git a/core/web/dkgencrypt_keys_controller_test.go b/core/web/dkgencrypt_keys_controller_test.go new file mode 100644 index 00000000..6a542ca2 --- /dev/null +++ b/core/web/dkgencrypt_keys_controller_test.go @@ -0,0 +1,109 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestDKGEncryptKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGEncryptKeysControllerTests(t) + keys, _ := keyStore.DKGEncrypt().GetAll() + + response, cleanup := client.Get("/v2/keys/dkgencrypt") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.DKGEncryptKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + assert.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyString(), resources[0].PublicKey) +} + +func TestDKGEncryptKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGEncryptKeysControllerTests(t) + + response, cleanup := client.Post("/v2/keys/dkgencrypt", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.DKGEncrypt().GetAll() + assert.Len(t, keys, 2) + + resource := presenters.DKGEncryptKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + // the order in which keys are returned by GetAll is non-deterministic + // due to map iteration non-determinism. + found := false + for _, key := range keys { + if key.ID() == resource.ID { + assert.Equal(t, key.PublicKeyString(), resource.PublicKey) + found = true + } + } + assert.True(t, found) + + _, err = keyStore.DKGEncrypt().Get(resource.ID) + assert.NoError(t, err) +} + +func TestDKGEncryptKeysController_Delete_NonExistentDKGEncryptKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupDKGEncryptKeysControllerTests(t) + + response, cleanup := client.Delete("/v2/keys/dkgencrypt/" + "nonexistentKey") + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestDKGEncryptKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGEncryptKeysControllerTests(t) + + keys, _ := keyStore.DKGEncrypt().GetAll() + initialLength := len(keys) + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/dkgencrypt/%s", keys[0].ID())) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + assert.Error(t, utils.JustError(keyStore.DKGEncrypt().Get(keys[0].ID()))) + + afterKeys, err := keyStore.DKGEncrypt().GetAll() + assert.NoError(t, err) + assert.Equal(t, initialLength-1, len(afterKeys)) + +} + +func setupDKGEncryptKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.DKGEncrypt().Add(cltest.DefaultDKGEncryptKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/dkgsign_keys_controller.go b/core/web/dkgsign_keys_controller.go new file mode 100644 index 00000000..b0cc4f6a --- /dev/null +++ b/core/web/dkgsign_keys_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewDKGSignKeysController(app plugin.Application) KeysController { + return NewKeysController[dkgsignkey.Key, presenters.DKGSignKeyResource]( + app.GetKeyStore().DKGSign(), + app.GetLogger(), + app.GetAuditLogger(), + "dkgsignKey", + presenters.NewDKGSignKeyResource, + presenters.NewDKGSignKeyResources) +} diff --git a/core/web/dkgsign_keys_controller_test.go b/core/web/dkgsign_keys_controller_test.go new file mode 100644 index 00000000..01fbeebe --- /dev/null +++ b/core/web/dkgsign_keys_controller_test.go @@ -0,0 +1,109 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestDKGSignKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGSignKeysControllerTests(t) + keys, _ := keyStore.DKGSign().GetAll() + + response, cleanup := client.Get("/v2/keys/dkgsign") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.DKGSignKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + assert.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyString(), resources[0].PublicKey) +} + +func TestDKGSignKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGSignKeysControllerTests(t) + + response, cleanup := client.Post("/v2/keys/dkgsign", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.DKGSign().GetAll() + assert.Len(t, keys, 2) + + resource := presenters.DKGSignKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + // the order in which keys are returned by GetAll is non-deterministic + // due to map iteration non-determinism. + found := false + for _, key := range keys { + if key.ID() == resource.ID { + assert.Equal(t, key.PublicKeyString(), resource.PublicKey) + found = true + } + } + assert.True(t, found) + + _, err = keyStore.DKGSign().Get(resource.ID) + assert.NoError(t, err) +} + +func TestDKGSignKeysController_Delete_NonExistentDKGSignKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupDKGSignKeysControllerTests(t) + + response, cleanup := client.Delete("/v2/keys/dkgsign/" + "nonexistentKey") + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestDKGSignKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupDKGSignKeysControllerTests(t) + + keys, _ := keyStore.DKGSign().GetAll() + initialLength := len(keys) + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/dkgsign/%s", keys[0].ID())) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + assert.Error(t, utils.JustError(keyStore.DKGSign().Get(keys[0].ID()))) + + afterKeys, err := keyStore.DKGSign().GetAll() + assert.NoError(t, err) + assert.Equal(t, initialLength-1, len(afterKeys)) + +} + +func setupDKGSignKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.DKGSign().Add(cltest.DefaultDKGSignKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/eth_keys_controller.go b/core/web/eth_keys_controller.go new file mode 100644 index 00000000..96768898 --- /dev/null +++ b/core/web/eth_keys_controller.go @@ -0,0 +1,429 @@ +package web + +import ( + "context" + "io" + "math/big" + "net/http" + "sort" + "strconv" + "strings" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/ethereum/go-ethereum/common" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + "go.uber.org/multierr" +) + +// ETHKeysController manages account keys +type ETHKeysController struct { + app plugin.Application + lggr logger.Logger +} + +func NewETHKeysController(app plugin.Application) *ETHKeysController { + return ÐKeysController{ + app: app, + lggr: app.GetLogger().Named("ETHKeysController"), + } +} + +func createETHKeyResource(c *gin.Context, ekc *ETHKeysController, key ethkey.KeyV2, state ethkey.State) *presenters.ETHKeyResource { + ethBalance := ekc.getEthBalance(c.Request.Context(), state) + linkBalance := ekc.getLinkBalance(c.Request.Context(), state) + maxGasPrice := ekc.getKeyMaxGasPriceWei(state, key.Address) + + r := presenters.NewETHKeyResource(key, state, + ekc.setEthBalance(ethBalance), + ekc.setLinkBalance(linkBalance), + ekc.setKeyMaxGasPriceWei(maxGasPrice), + ) + + return r +} + +func (ekc *ETHKeysController) formatETHKeyResponse() gin.HandlerFunc { + return func(c *gin.Context) { + c.Next() + + // Check if the response has not been written yet + if !c.Writer.Written() { + // Get the key and state from the Gin context + key, keyExists := c.Get("key") + state, stateExists := c.Get("state") + + // If key and state exist, format the response + if keyExists && stateExists { + r := createETHKeyResource(c, ekc, key.(ethkey.KeyV2), state.(ethkey.State)) + jsonAPIResponse(c, r, "keys") + } else { + err := errors.Errorf("error getting eth key and state: %v", c) + jsonAPIError(c, http.StatusInternalServerError, err) + } + } + } +} + +// Index returns the node's Ethereum keys and the account balances of ETH & PLI. +// Example: +// +// "/keys/eth" +func (ekc *ETHKeysController) Index(c *gin.Context) { + ethKeyStore := ekc.app.GetKeyStore().Eth() + var keys []ethkey.KeyV2 + var err error + keys, err = ethKeyStore.GetAll() + if err != nil { + err = errors.Errorf("error getting unlocked keys: %v", err) + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + states, err := ethKeyStore.GetStatesForKeys(keys) + if err != nil { + err = errors.Errorf("error getting key states: %v", err) + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + var resources []presenters.ETHKeyResource + for _, state := range states { + key, err := ethKeyStore.Get(state.Address.Hex()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + r := createETHKeyResource(c, ekc, key, state) + + resources = append(resources, *r) + } + // Put disabled keys to the end + sort.SliceStable(resources, func(i, j int) bool { + return !resources[i].Disabled && resources[j].Disabled + }) + + jsonAPIResponseWithStatus(c, resources, "keys", http.StatusOK) + +} + +// Create adds a new account +// Example: +// +// "/keys/eth" +func (ekc *ETHKeysController) Create(c *gin.Context) { + ethKeyStore := ekc.app.GetKeyStore().Eth() + + cid := c.Query("evmChainID") + chain, ok := ekc.getChain(c, cid) + if !ok { + return + } + + if c.Query("maxGasPriceGWei") != "" { + jsonAPIError(c, http.StatusBadRequest, toml.ErrUnsupported) + return + } + + key, err := ethKeyStore.Create(chain.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + state, err := ethKeyStore.GetState(key.ID(), chain.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + c.Set("key", key) + c.Set("state", state) + + ekc.app.GetAuditLogger().Audit(audit.KeyCreated, map[string]interface{}{ + "type": "ethereum", + "id": key.ID(), + }) +} + +// Delete an ETH key bundle (irreversible!) +// Example: +// "DELETE /keys/eth/:keyID" +func (ekc *ETHKeysController) Delete(c *gin.Context) { + ethKeyStore := ekc.app.GetKeyStore().Eth() + + keyID := c.Param("address") + if !common.IsHexAddress(keyID) { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("invalid keyID: %s, must be hex address", keyID)) + return + } + + key, err := ethKeyStore.Get(keyID) + if err != nil { + if errors.Is(err, keystore.ErrKeyNotFound) { + jsonAPIError(c, http.StatusNotFound, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + state, err := ethKeyStore.GetStateForKey(key) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + _, err = ethKeyStore.Delete(keyID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + c.Set("key", key) + c.Set("state", state) + + ekc.app.GetAuditLogger().Audit(audit.KeyDeleted, map[string]interface{}{ + "type": "ethereum", + "id": keyID, + }) +} + +// Import imports a key +func (ekc *ETHKeysController) Import(c *gin.Context) { + ethKeyStore := ekc.app.GetKeyStore().Eth() + defer ekc.app.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + oldPassword := c.Query("oldpassword") + cid := c.Query("evmChainID") + chain, ok := ekc.getChain(c, cid) + if !ok { + return + } + + key, err := ethKeyStore.Import(bytes, oldPassword, chain.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + state, err := ethKeyStore.GetState(key.ID(), chain.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + c.Set("key", key) + c.Set("state", state) + c.Status(http.StatusCreated) + + ekc.app.GetAuditLogger().Audit(audit.KeyImported, map[string]interface{}{ + "type": "ethereum", + "id": key.ID(), + }) +} + +func (ekc *ETHKeysController) Export(c *gin.Context) { + defer ekc.app.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export request body") + + id := c.Param("address") + newPassword := c.Query("newpassword") + + bytes, err := ekc.app.GetKeyStore().Eth().Export(id, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ekc.app.GetAuditLogger().Audit(audit.KeyExported, map[string]interface{}{ + "type": "ethereum", + "id": id, + }) + + c.Data(http.StatusOK, MediaType, bytes) +} + +// Chain updates settings for a given chain for the key +func (ekc *ETHKeysController) Chain(c *gin.Context) { + var err error + kst := ekc.app.GetKeyStore().Eth() + defer ekc.app.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + keyID := c.Query("address") + if !common.IsHexAddress(keyID) { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("invalid address: %s, must be hex address", keyID)) + return + } + address := common.HexToAddress(keyID) + + cid := c.Query("evmChainID") + chain, ok := ekc.getChain(c, cid) + if !ok { + return + } + + abandon := false + if abandonStr := c.Query("abandon"); abandonStr != "" { + abandon, err = strconv.ParseBool(abandonStr) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, errors.Wrapf(err, "invalid value for abandon: expected boolean, got: %s", abandonStr)) + return + } + } + + // Reset the chain + if abandon { + var resetErr error + err = chain.TxManager().Reset(address, abandon) + err = multierr.Combine(err, resetErr) + if err != nil { + if strings.Contains(err.Error(), "key state not found with address") { + jsonAPIError(c, http.StatusNotFound, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + } + + enabledStr := c.Query("enabled") + if enabledStr != "" { + var enabled bool + enabled, err = strconv.ParseBool(enabledStr) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, errors.Wrap(err, "enabled must be bool")) + return + } + + if enabled { + err = kst.Enable(address, chain.ID()) + } else { + err = kst.Disable(address, chain.ID()) + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + } + + key, err := kst.Get(keyID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + state, err := kst.GetState(key.ID(), chain.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + c.Set("key", key) + c.Set("state", state) + c.Status(http.StatusOK) +} + +func (ekc *ETHKeysController) setEthBalance(bal *big.Int) presenters.NewETHKeyOption { + return presenters.SetETHKeyEthBalance((*assets.Eth)(bal)) +} + +// queries the EthClient for the ETH balance at the address associated with state +func (ekc *ETHKeysController) getEthBalance(ctx context.Context, state ethkey.State) *big.Int { + chainID := state.EVMChainID.ToInt() + chain, err := ekc.app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + if !errors.Is(errors.Cause(err), evmrelay.ErrNoChains) { + ekc.lggr.Errorw("Failed to get EVM Chain", "chainID", chainID, "address", state.Address, "err", err) + } + return nil + } + + ethClient := chain.Client() + bal, err := ethClient.BalanceAt(ctx, state.Address.Address(), nil) + if err != nil { + ekc.lggr.Errorw("Failed to get ETH balance", "chainID", chainID, "address", state.Address, "err", err) + return nil + } + + return bal + +} + +func (ekc *ETHKeysController) setLinkBalance(bal *commonassets.Link) presenters.NewETHKeyOption { + return presenters.SetETHKeyLinkBalance(bal) +} + +// queries the EthClient for the PLI balance at the address associated with state +func (ekc *ETHKeysController) getLinkBalance(ctx context.Context, state ethkey.State) *commonassets.Link { + var bal *commonassets.Link + chainID := state.EVMChainID.ToInt() + chain, err := ekc.app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + if !errors.Is(errors.Cause(err), evmrelay.ErrNoChains) { + ekc.lggr.Errorw("Failed to get EVM Chain", "chainID", chainID, "err", err) + } + } else { + ethClient := chain.Client() + addr := common.HexToAddress(chain.Config().EVM().LinkContractAddress()) + bal, err = ethClient.PLIBalance(ctx, state.Address.Address(), addr) + if err != nil { + ekc.lggr.Errorw("Failed to get PLI balance", "chainID", chainID, "address", state.Address, "err", err) + } + } + return bal +} + +// setKeyMaxGasPriceWei is a custom functional option for NewEthKeyResource which +// gets the key specific max gas price from the chain config and sets it on the +// resource. +func (ekc *ETHKeysController) setKeyMaxGasPriceWei(price *assets.Wei) presenters.NewETHKeyOption { + return presenters.SetETHKeyMaxGasPriceWei(ubig.New(price.ToInt())) +} + +func (ekc *ETHKeysController) getKeyMaxGasPriceWei(state ethkey.State, keyAddress common.Address) *assets.Wei { + var price *assets.Wei + chainID := state.EVMChainID.ToInt() + chain, err := ekc.app.GetRelayers().LegacyEVMChains().Get(chainID.String()) + if err != nil { + if !errors.Is(errors.Cause(err), evmrelay.ErrNoChains) { + ekc.lggr.Errorw("Failed to get EVM Chain", "chainID", chainID, "err", err) + } + } else { + price = chain.Config().EVM().GasEstimator().PriceMaxKey(keyAddress) + } + return price +} + +// getChain is a convenience wrapper to retrieve a chain for a given request +// and call the corresponding API response error function for 400, 404 and 500 results +func (ekc *ETHKeysController) getChain(c *gin.Context, chainIDstr string) (chain legacyevm.Chain, ok bool) { + chain, err := getChain(ekc.app.GetRelayers().LegacyEVMChains(), chainIDstr) + if err != nil { + if errors.Is(err, ErrInvalidChainID) || errors.Is(err, ErrMultipleChains) { + jsonAPIError(c, http.StatusBadRequest, err) + return nil, false + } else if errors.Is(err, ErrMissingChainID) { + jsonAPIError(c, http.StatusNotFound, err) + return nil, false + } + jsonAPIError(c, http.StatusInternalServerError, err) + return nil, false + } + return chain, true +} diff --git a/core/web/eth_keys_controller_test.go b/core/web/eth_keys_controller_test.go new file mode 100644 index 00000000..95080e0b --- /dev/null +++ b/core/web/eth_keys_controller_test.go @@ -0,0 +1,730 @@ +package web_test + +import ( + "math/big" + "net/http" + "net/url" + "testing" + + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/assets" + commontxmmocks "github.com/goplugin/pluginv3.0/v2/common/txmgr/types/mocks" + commonmocks "github.com/goplugin/pluginv3.0/v2/common/types/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + webpresenters "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/google/uuid" +) + +func TestETHKeysController_Index_Success(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled key + k0, addr0 := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + // disabled keys + k1, addr1 := cltest.RandomKey{Disabled: true}.MustInsert(t, app.KeyStore.Eth()) + k2, addr2 := cltest.RandomKey{Disabled: true}.MustInsert(t, app.KeyStore.Eth()) + expectedKeys := []ethkey.KeyV2{k0, k1, k2} + + ethClient.On("BalanceAt", mock.Anything, addr0, mock.Anything).Return(big.NewInt(256), nil).Once() + ethClient.On("BalanceAt", mock.Anything, addr1, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("BalanceAt", mock.Anything, addr2, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr0, mock.Anything).Return(assets.NewLinkFromJuels(256), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr1, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr2, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/keys/evm") + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var actualBalances []webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &actualBalances) + assert.NoError(t, err) + + require.Len(t, actualBalances, 3) + + for _, balance := range actualBalances { + if balance.Address == expectedKeys[0].Address.Hex() { + assert.Equal(t, "0.000000000000000256", balance.EthBalance.String()) + assert.Equal(t, "256", balance.LinkBalance.String()) + + } else { + assert.Equal(t, "0.000000000000000001", balance.EthBalance.String()) + assert.Equal(t, "1", balance.LinkBalance.String()) + + } + } +} + +func TestETHKeysController_Index_Errors(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + _, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr, mock.Anything).Return(nil, errors.New("fake error")).Once() + ethClient.On("PLIBalance", mock.Anything, addr, mock.Anything).Return(nil, errors.New("fake error")).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/keys/eth") + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var actualBalances []webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &actualBalances) + assert.NoError(t, err) + + require.Len(t, actualBalances, 1) + + balance := actualBalances[0] + assert.Equal(t, addr.String(), balance.Address) + assert.Nil(t, balance.EthBalance) + assert.Nil(t, balance.LinkBalance) + assert.Equal(t, "115792089237316195423570985008687907853269984665640564039457584007913129639935", balance.MaxGasPriceWei.String()) +} + +func TestETHKeysController_Index_Disabled(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].Enabled = ptr(false) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + _, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/keys/eth") + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var actualBalances []webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &actualBalances) + assert.NoError(t, err) + + require.Len(t, actualBalances, 1) + + balance := actualBalances[0] + assert.Equal(t, addr.String(), balance.Address) + assert.Nil(t, balance.EthBalance) + assert.Nil(t, balance.LinkBalance) + assert.Nil(t, balance.MaxGasPriceWei) +} + +func TestETHKeysController_Index_NotDev(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + }) + + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(256), nil).Once() + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(assets.NewLinkFromJuels(256), nil).Once() + + app := cltest.NewApplicationWithConfigAndKey(t, cfg, ethClient) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/keys/eth") + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + expectedKeys, err := app.KeyStore.Eth().GetAll() + require.NoError(t, err) + var actualBalances []webpresenters.ETHKeyResource + err = cltest.ParseJSONAPIResponse(t, resp, &actualBalances) + assert.NoError(t, err) + + require.Len(t, actualBalances, 1) + + only := actualBalances[0] + assert.Equal(t, expectedKeys[0].Address.Hex(), only.Address) + assert.Equal(t, "0.000000000000000256", only.EthBalance.String()) + assert.Equal(t, "256", only.LinkBalance.String()) +} + +func TestETHKeysController_Index_NoAccounts(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Get("/v2/keys/eth") + defer cleanup() + + balances := []webpresenters.ETHKeyResource{} + err := cltest.ParseJSONAPIResponse(t, resp, &balances) + assert.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Len(t, balances, 0) +} + +func TestETHKeysController_CreateSuccess(t *testing.T) { + t.Parallel() + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + ethClient := evmtest.NewEthClientMockWithDefaultChain(t) + app := cltest.NewApplicationWithConfigAndKey(t, config, ethClient) + + sub := commonmocks.NewSubscription(t) + cltest.MockApplicationEthCalls(t, app, ethClient, sub) + + ethBalanceInt := big.NewInt(100) + ethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(ethBalanceInt, nil) + linkBalance := assets.NewLinkFromJuels(42) + ethClient.On("PLIBalance", mock.Anything, mock.Anything, mock.Anything).Return(linkBalance, nil) + + client := app.NewHTTPClient(nil) + + require.NoError(t, app.Start(testutils.Context(t))) + + chainURL := url.URL{Path: "/v2/keys/evm"} + query := chainURL.Query() + query.Set("evmChainID", cltest.FixtureChainID.String()) + chainURL.RawQuery = query.Encode() + + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + cltest.AssertServerResponse(t, resp, http.StatusOK) + + var balance webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &balance) + assert.NoError(t, err) + + assert.Equal(t, ethBalanceInt, balance.EthBalance.ToInt()) + assert.Equal(t, linkBalance, balance.LinkBalance) + assert.Equal(t, "115792089237316195423570985008687907853269984665640564039457584007913129639935", balance.MaxGasPriceWei.String()) +} + +func TestETHKeysController_ChainSuccess_UpdateNonce(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled key + key, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var updatedKey webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &updatedKey) + assert.NoError(t, err) + + assert.Equal(t, cltest.FormatWithPrefixedChainID(cltest.FixtureChainID.String(), key.Address.String()), updatedKey.ID) + assert.Equal(t, key.Address.String(), updatedKey.Address) + assert.Equal(t, cltest.FixtureChainID.String(), updatedKey.EVMChainID.String()) + assert.Equal(t, false, updatedKey.Disabled) +} + +func TestETHKeysController_ChainSuccess_Disable(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled key + key, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + enabled := "false" + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + query.Set("enabled", enabled) + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var updatedKey webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &updatedKey) + assert.NoError(t, err) + + assert.Equal(t, cltest.FormatWithPrefixedChainID(updatedKey.EVMChainID.String(), key.Address.String()), updatedKey.ID) + assert.Equal(t, key.Address.String(), updatedKey.Address) + assert.Equal(t, cltest.FixtureChainID.String(), updatedKey.EVMChainID.String()) + assert.Equal(t, true, updatedKey.Disabled) +} + +func TestETHKeysController_ChainSuccess_Enable(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // disabled key + key, addr := cltest.RandomKey{Disabled: true}.MustInsert(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + enabled := "true" + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + query.Set("enabled", enabled) + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var updatedKey webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &updatedKey) + assert.NoError(t, err) + + assert.Equal(t, cltest.FormatWithPrefixedChainID(cltest.FixtureChainID.String(), key.Address.String()), updatedKey.ID) + assert.Equal(t, key.Address.String(), updatedKey.Address) + assert.Equal(t, cltest.FixtureChainID.String(), updatedKey.EVMChainID.String()) + assert.Equal(t, false, updatedKey.Disabled) +} + +func TestETHKeysController_ChainSuccess_ResetWithAbandon(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled key + key, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + chain := app.GetRelayers().LegacyEVMChains().Slice()[0] + subject := uuid.New() + strategy := commontxmmocks.NewTxStrategy(t) + strategy.On("Subject").Return(uuid.NullUUID{UUID: subject, Valid: true}) + strategy.On("PruneQueue", mock.Anything, mock.AnythingOfType("*txmgr.evmTxStore")).Return(nil, nil) + _, err := chain.TxManager().CreateTransaction(testutils.Context(t), txmgr.TxRequest{ + FromAddress: addr, + ToAddress: testutils.NewAddress(), + EncodedPayload: []byte{1, 2, 3}, + FeeLimit: uint32(1000), + Meta: nil, + Strategy: strategy, + }) + assert.NoError(t, err) + + db := app.GetSqlxDB() + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg.Database()) + + txes, err := txStore.FindTxesByFromAddressAndState(testutils.Context(t), addr, "fatal_error") + require.NoError(t, err) + require.Len(t, txes, 0) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + query.Set("abandon", "true") + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var updatedKey webpresenters.ETHKeyResource + err = cltest.ParseJSONAPIResponse(t, resp, &updatedKey) + assert.NoError(t, err) + + assert.Equal(t, cltest.FormatWithPrefixedChainID(cltest.FixtureChainID.String(), key.Address.String()), updatedKey.ID) + assert.Equal(t, key.Address.String(), updatedKey.Address) + assert.Equal(t, cltest.FixtureChainID.String(), updatedKey.EVMChainID.String()) + assert.Equal(t, false, updatedKey.Disabled) + + txes, err = txStore.FindTxesByFromAddressAndState(testutils.Context(t), addr, "fatal_error") + require.NoError(t, err) + require.Len(t, txes, 1) + + tx := txes[0] + assert.Equal(t, "abandoned", tx.Error.String) +} + +func TestETHKeysController_ChainFailure_InvalidAbandon(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + // enabled key + _, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + query.Set("abandon", "invalid") + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestETHKeysController_ChainFailure_InvalidEnabled(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + // enabled key + _, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", addr.Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + query.Set("enabled", "invalid") + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestETHKeysController_ChainFailure_InvalidAddress(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", "invalid address") + query.Set("evmChainID", cltest.FixtureChainID.String()) + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestETHKeysController_ChainFailure_MissingAddress(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", testutils.NewAddress().Hex()) + query.Set("evmChainID", cltest.FixtureChainID.String()) + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) +} + +func TestETHKeysController_ChainFailure_InvalidChainID(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", testutils.NewAddress().Hex()) + query.Set("evmChainID", "bad chain ID") + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestETHKeysController_ChainFailure_MissingChainID(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled key + _, addr := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/chain"} + query := chainURL.Query() + + query.Set("address", addr.Hex()) + query.Set("evmChainID", "123456789") + + chainURL.RawQuery = query.Encode() + resp, cleanup := client.Post(chainURL.String(), nil) + defer cleanup() + + assert.Equal(t, http.StatusNotFound, resp.StatusCode) +} + +func TestETHKeysController_DeleteSuccess(t *testing.T) { + t.Parallel() + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + // enabled keys + key0, addr0 := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + _, addr1 := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + ethClient.On("BalanceAt", mock.Anything, addr0, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("BalanceAt", mock.Anything, addr1, mock.Anything).Return(big.NewInt(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr0, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + ethClient.On("PLIBalance", mock.Anything, addr1, mock.Anything).Return(assets.NewLinkFromJuels(1), nil).Once() + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/" + addr0.Hex()} + resp, cleanup := client.Delete(chainURL.String()) + defer cleanup() + t.Log(resp) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + var deletedKey webpresenters.ETHKeyResource + err := cltest.ParseJSONAPIResponse(t, resp, &deletedKey) + assert.NoError(t, err) + + assert.Equal(t, cltest.FormatWithPrefixedChainID(cltest.FixtureChainID.String(), key0.Address.String()), deletedKey.ID) + assert.Equal(t, key0.Address.String(), deletedKey.Address) + assert.Equal(t, cltest.FixtureChainID.String(), deletedKey.EVMChainID.String()) + assert.Equal(t, false, deletedKey.Disabled) + + resp, cleanup2 := client.Get("/v2/keys/evm") + defer cleanup2() + require.Equal(t, http.StatusOK, resp.StatusCode) + + var actualBalances []webpresenters.ETHKeyResource + err = cltest.ParseJSONAPIResponse(t, resp, &actualBalances) + assert.NoError(t, err) + + require.Len(t, actualBalances, 1) + + balance := actualBalances[0] + assert.Equal(t, addr1.String(), balance.Address) +} + +func TestETHKeysController_DeleteFailure_InvalidAddress(t *testing.T) { + t.Parallel() + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm" + "/bad_address"} + + resp, cleanup := client.Delete(chainURL.String()) + defer cleanup() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) +} + +func TestETHKeysController_DeleteFailure_KeyMissing(t *testing.T) { + t.Parallel() + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.KeyStore.Unlock(cltest.Password)) + + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + chainURL := url.URL{Path: "/v2/keys/evm/" + testutils.NewAddress().Hex()} + + resp, cleanup := client.Delete(chainURL.String()) + defer cleanup() + t.Log(resp) + + assert.Equal(t, http.StatusNotFound, resp.StatusCode) +} diff --git a/core/web/evm_chains_controller.go b/core/web/evm_chains_controller.go new file mode 100644 index 00000000..c6d362cb --- /dev/null +++ b/core/web/evm_chains_controller.go @@ -0,0 +1,19 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +var ErrEVMNotEnabled = errChainDisabled{name: "EVM", tomlKey: "EVM.Enabled"} + +func NewEVMChainsController(app plugin.Application) ChainsController { + return newChainsController[presenters.EVMChainResource]( + relay.EVM, + app.GetRelayers().List(plugin.FilterRelayersByType(relay.EVM)), + ErrEVMNotEnabled, + presenters.NewEVMChainResource, + app.GetLogger(), + app.GetAuditLogger()) +} diff --git a/core/web/evm_chains_controller_test.go b/core/web/evm_chains_controller_test.go new file mode 100644 index 00000000..9c0e10d0 --- /dev/null +++ b/core/web/evm_chains_controller_test.go @@ -0,0 +1,215 @@ +package web_test + +import ( + "fmt" + "math/big" + "net/http" + "sort" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func Test_EVMChainsController_Show(t *testing.T) { + t.Parallel() + + validId := ubig.New(testutils.NewRandomEVMChainID()) + + testCases := []struct { + name string + inputId string + wantStatusCode int + want *evmcfg.EVMConfig + }{ + { + inputId: validId.String(), + name: "success", + want: &evmcfg.EVMConfig{ + ChainID: validId, + Enabled: ptr(true), + Chain: evmcfg.Defaults(nil, &evmcfg.Chain{ + GasEstimator: evmcfg.GasEstimator{ + EIP1559DynamicFees: ptr(true), + BlockHistory: evmcfg.BlockHistoryEstimator{ + BlockHistorySize: ptr[uint16](50), + }, + }, + RPCBlockQueryDelay: ptr[uint16](23), + MinIncomingConfirmations: ptr[uint32](12), + LinkContractAddress: ptr(ethkey.EIP55AddressFromAddress(testutils.NewAddress())), + }), + }, + wantStatusCode: http.StatusOK, + }, + { + inputId: "invalidid", + name: "invalid id", + want: nil, + wantStatusCode: http.StatusBadRequest, + }, + { + inputId: "234", + name: "not found", + want: nil, + wantStatusCode: http.StatusBadRequest, + }, + } + + for _, testCase := range testCases { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + controller := setupEVMChainsControllerTest(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + if tc.want != nil { + c.EVM = evmcfg.EVMConfigs{tc.want} + } + })) + + wantedResult := tc.want + resp, cleanup := controller.client.Get( + fmt.Sprintf("/v2/chains/evm/%s", tc.inputId), + ) + t.Cleanup(cleanup) + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + + if wantedResult != nil { + resource1 := presenters.EVMChainResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource1) + require.NoError(t, err) + + assert.Equal(t, resource1.ID, wantedResult.ChainID.String()) + toml, err := wantedResult.TOMLString() + require.NoError(t, err) + assert.Equal(t, toml, resource1.Config) + } + }) + } +} + +func Test_EVMChainsController_Index(t *testing.T) { + t.Parallel() + + // sort test chain ids to make expected comparison easy + chainIDs := []*big.Int{testutils.NewRandomEVMChainID(), testutils.NewRandomEVMChainID(), testutils.NewRandomEVMChainID()} + sort.Slice(chainIDs, func(i, j int) bool { + + return chainIDs[i].String() < chainIDs[j].String() + }) + + configuredChains := evmcfg.EVMConfigs{ + {ChainID: ubig.New(chainIDs[0]), Chain: evmcfg.Defaults(nil)}, + { + ChainID: ubig.New(chainIDs[1]), + Chain: evmcfg.Defaults(nil, &evmcfg.Chain{ + RPCBlockQueryDelay: ptr[uint16](13), + GasEstimator: evmcfg.GasEstimator{ + EIP1559DynamicFees: ptr(true), + BlockHistory: evmcfg.BlockHistoryEstimator{ + BlockHistorySize: ptr[uint16](1), + }, + }, + MinIncomingConfirmations: ptr[uint32](120), + }), + }, + { + ChainID: ubig.New(chainIDs[2]), + Chain: evmcfg.Defaults(nil, &evmcfg.Chain{ + RPCBlockQueryDelay: ptr[uint16](5), + GasEstimator: evmcfg.GasEstimator{ + EIP1559DynamicFees: ptr(false), + BlockHistory: evmcfg.BlockHistoryEstimator{ + BlockHistorySize: ptr[uint16](2), + }, + }, + MinIncomingConfirmations: ptr[uint32](30), + }), + }, + } + + assert.Len(t, configuredChains, 3) + controller := setupEVMChainsControllerTest(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM = append(c.EVM, configuredChains...) + })) + + badResp, cleanup := controller.client.Get("/v2/chains/evm?size=asd") + t.Cleanup(cleanup) + require.Equal(t, http.StatusUnprocessableEntity, badResp.StatusCode) + + resp, cleanup := controller.client.Get("/v2/chains/evm?size=3") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, 1+len(configuredChains), metaCount) + + var links jsonapi.Links + + var gotChains []presenters.EVMChainResource + err = web.ParsePaginatedResponse(body, &gotChains, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, links, 1) + // the difference in index value here seems to be due to the fact + // that cltest always has a default EVM chain, which is the off-by-one + // in the indices + assert.Equal(t, gotChains[2].ID, configuredChains[1].ChainID.String()) + toml, err := configuredChains[1].TOMLString() + require.NoError(t, err) + assert.Equal(t, toml, gotChains[2].Config) + + resp, cleanup = controller.client.Get(links["next"].Href) + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + gotChains = []presenters.EVMChainResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &gotChains, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"].Href) + assert.NotEmpty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, gotChains[0].ID, configuredChains[2].ChainID.String()) + toml, err = configuredChains[2].TOMLString() + require.NoError(t, err) + assert.Equal(t, toml, gotChains[0].Config) +} + +type TestEVMChainsController struct { + app *cltest.TestApplication + client cltest.HTTPClientCleaner +} + +func setupEVMChainsControllerTest(t *testing.T, cfg plugin.GeneralConfig) *TestEVMChainsController { + // Using this instead of `NewApplicationEVMDisabled` since we need the chain set to be loaded in the app + // for the sake of the API endpoints to work properly + app := cltest.NewApplicationWithConfig(t, cfg) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + return &TestEVMChainsController{ + app: app, + client: client, + } +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/web/evm_forwarders_controller.go b/core/web/evm_forwarders_controller.go new file mode 100644 index 00000000..baf34330 --- /dev/null +++ b/core/web/evm_forwarders_controller.go @@ -0,0 +1,107 @@ +package web + +import ( + "math/big" + "net/http" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/gin-gonic/gin" +) + +// EVMForwardersController manages EVM forwarders. +type EVMForwardersController struct { + App plugin.Application +} + +// Index lists EVM forwarders. +func (cc *EVMForwardersController) Index(c *gin.Context, size, page, offset int) { + orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) + fwds, count, err := orm.FindForwarders(0, size) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + var resources []presenters.EVMForwarderResource + for _, fwd := range fwds { + resources = append(resources, presenters.NewEVMForwarderResource(fwd)) + } + + paginatedResponse(c, "forwarder", size, page, resources, count, err) +} + +// TrackEVMForwarderRequest is a JSONAPI request for creating an EVM forwarder. +type TrackEVMForwarderRequest struct { + EVMChainID *ubig.Big `json:"evmChainId"` + Address common.Address `json:"address"` +} + +// Track adds a new EVM forwarder. +func (cc *EVMForwardersController) Track(c *gin.Context) { + request := &TrackEVMForwarderRequest{} + + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) + fwd, err := orm.CreateForwarder(request.Address, *request.EVMChainID) + + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + cc.App.GetAuditLogger().Audit(audit.ForwarderCreated, map[string]interface{}{ + "forwarderID": fwd.ID, + "forwarderAddress": fwd.Address, + "forwarderEVMChainID": fwd.EVMChainID, + }) + jsonAPIResponseWithStatus(c, presenters.NewEVMForwarderResource(fwd), "forwarder", http.StatusCreated) +} + +// Delete removes an EVM Forwarder. +func (cc *EVMForwardersController) Delete(c *gin.Context) { + id, err := stringutils.ToInt64(c.Param("fwdID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + filterCleanup := func(tx pg.Queryer, evmChainID int64, addr common.Address) error { + chain, err2 := cc.App.GetRelayers().LegacyEVMChains().Get(big.NewInt(evmChainID).String()) + if err2 != nil { + // If the chain id doesn't even exist, or logpoller is disabled, then there isn't any filter to clean up. Returning an error + // here could be dangerous as it would make it impossible to delete a forwarder with an invalid chain id + return nil + } + + if chain.LogPoller() == logpoller.LogPollerDisabled { + // handle same as non-existent chain id + return nil + } + return chain.LogPoller().UnregisterFilter(forwarders.FilterName(addr), pg.WithQueryer(tx)) + } + + orm := forwarders.NewORM(cc.App.GetSqlxDB(), cc.App.GetLogger(), cc.App.GetConfig().Database()) + err = orm.DeleteForwarder(id, filterCleanup) + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + cc.App.GetAuditLogger().Audit(audit.ForwarderDeleted, map[string]interface{}{"id": id}) + jsonAPIResponseWithStatus(c, nil, "forwarder", http.StatusNoContent) +} diff --git a/core/web/evm_forwarders_controller_test.go b/core/web/evm_forwarders_controller_test.go new file mode 100644 index 00000000..67597e97 --- /dev/null +++ b/core/web/evm_forwarders_controller_test.go @@ -0,0 +1,131 @@ +package web_test + +import ( + "bytes" + "encoding/json" + "net/http" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type TestEVMForwardersController struct { + app *cltest.TestApplication + client cltest.HTTPClientCleaner +} + +func setupEVMForwardersControllerTest(t *testing.T, overrideFn func(c *plugin.Config, s *plugin.Secrets)) *TestEVMForwardersController { + // Using this instead of `NewApplicationEVMDisabled` since we need the chain set to be loaded in the app + // for the sake of the API endpoints to work properly + app := cltest.NewApplicationWithConfig(t, configtest.NewGeneralConfig(t, overrideFn)) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + return &TestEVMForwardersController{ + app: app, + client: client, + } +} + +func Test_EVMForwardersController_Track(t *testing.T) { + t.Parallel() + + chainId := big.New(testutils.NewRandomEVMChainID()) + controller := setupEVMForwardersControllerTest(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM = evmcfg.EVMConfigs{ + {ChainID: chainId, Enabled: ptr(true), Chain: evmcfg.Defaults(chainId)}, + } + }) + + // Build EVMForwarderRequest + address := utils.RandomAddress() + body, err := json.Marshal(web.TrackEVMForwarderRequest{ + EVMChainID: chainId, + Address: address, + }, + ) + require.NoError(t, err) + + resp, cleanup := controller.client.Post("/v2/nodes/evm/forwarders/track", bytes.NewReader(body)) + t.Cleanup(cleanup) + require.Equal(t, http.StatusCreated, resp.StatusCode) + + resource := presenters.EVMForwarderResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource) + require.NoError(t, err) + + assert.Equal(t, resource.Address, address) + + require.Len(t, controller.app.GetRelayers().LegacyEVMChains().Slice(), 1) + + resp, cleanup = controller.client.Delete("/v2/nodes/evm/forwarders/" + resource.ID) + t.Cleanup(cleanup) + require.Equal(t, http.StatusNoContent, resp.StatusCode) + assert.NoError(t, err) +} + +func Test_EVMForwardersController_Index(t *testing.T) { + t.Parallel() + + chainId := big.New(testutils.NewRandomEVMChainID()) + controller := setupEVMForwardersControllerTest(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM = evmcfg.EVMConfigs{ + {ChainID: chainId, Enabled: ptr(true), Chain: evmcfg.Defaults(chainId)}, + } + }) + + // Build EVMForwarderRequest + fwdrs := []web.TrackEVMForwarderRequest{ + { + EVMChainID: chainId, + Address: utils.RandomAddress(), + }, + { + EVMChainID: chainId, + Address: utils.RandomAddress(), + }, + } + for _, fwdr := range fwdrs { + + body, err := json.Marshal(web.TrackEVMForwarderRequest{ + EVMChainID: chainId, + Address: fwdr.Address, + }, + ) + require.NoError(t, err) + + resp, cleanup := controller.client.Post("/v2/nodes/evm/forwarders/track", bytes.NewReader(body)) + t.Cleanup(cleanup) + require.Equal(t, http.StatusCreated, resp.StatusCode) + } + + resp, cleanup := controller.client.Get("/v2/nodes/evm/forwarders?size=2") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, len(fwdrs), metaCount) + + var links jsonapi.Links + + var fwdrcs []presenters.EVMForwarderResource + err = web.ParsePaginatedResponse(body, &fwdrcs, &links) + assert.NoError(t, err) + assert.Empty(t, links["prev"].Href) +} diff --git a/core/web/evm_nodes_controller.go b/core/web/evm_nodes_controller.go new file mode 100644 index 00000000..bc2f3fc2 --- /dev/null +++ b/core/web/evm_nodes_controller.go @@ -0,0 +1,14 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewEVMNodesController(app plugin.Application) NodesController { + scopedNodeStatuser := NewNetworkScopedNodeStatuser(app.GetRelayers(), relay.EVM) + + return newNodesController[presenters.EVMNodeResource]( + scopedNodeStatuser, ErrEVMNotEnabled, presenters.NewEVMNodeResource, app.GetAuditLogger()) +} diff --git a/core/web/evm_transactions_controller.go b/core/web/evm_transactions_controller.go new file mode 100644 index 00000000..2960121d --- /dev/null +++ b/core/web/evm_transactions_controller.go @@ -0,0 +1,49 @@ +package web + +import ( + "database/sql" + "net/http" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/ethereum/go-ethereum/common" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +// TransactionsController displays Ethereum transactions requests. +type TransactionsController struct { + App plugin.Application +} + +// Index returns paginated transactions +func (tc *TransactionsController) Index(c *gin.Context, size, page, offset int) { + txs, count, err := tc.App.TxmStorageService().TransactionsWithAttempts(offset, size) + ptxs := make([]presenters.EthTxResource, len(txs)) + for i, tx := range txs { + tx.TxAttempts[0].Tx = tx + ptxs[i] = presenters.NewEthTxResourceFromAttempt(tx.TxAttempts[0]) + } + paginatedResponse(c, "transactions", size, page, ptxs, count, err) +} + +// Show returns the details of a Ethereum Transaction details. +// Example: +// +// "/transactions/:TxHash" +func (tc *TransactionsController) Show(c *gin.Context) { + hash := common.HexToHash(c.Param("TxHash")) + + ethTxAttempt, err := tc.App.TxmStorageService().FindTxAttempt(hash) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("Transaction not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponse(c, presenters.NewEthTxResourceFromAttempt(*ethTxAttempt), "transaction") +} diff --git a/core/web/evm_transactions_controller_test.go b/core/web/evm_transactions_controller_test.go new file mode 100644 index 00000000..7567b7e1 --- /dev/null +++ b/core/web/evm_transactions_controller_test.go @@ -0,0 +1,127 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + txmgrtypes "github.com/goplugin/pluginv3.0/v2/common/txmgr/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTransactionsController_Index_Success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + db := app.GetSqlxDB() + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + ethKeyStore := cltest.NewKeyStore(t, db, app.Config.Database()).Eth() + client := app.NewHTTPClient(nil) + _, from := cltest.MustInsertRandomKey(t, ethKeyStore) + + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) // tx1 + tx2 := cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 3, 2, from) // tx2 + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 4, 4, from) // tx3 + + // add second tx attempt for tx2 + blockNum := int64(3) + attempt := cltest.NewLegacyEthTxAttempt(t, tx2.ID) + attempt.State = txmgrtypes.TxAttemptBroadcast + attempt.TxFee = gas.EvmFee{Legacy: assets.NewWeiI(3)} + attempt.BroadcastBeforeBlockNum = &blockNum + require.NoError(t, txStore.InsertTxAttempt(&attempt)) + + _, count, err := txStore.TransactionsWithAttempts(0, 100) + require.NoError(t, err) + require.Equal(t, count, 3) + + size := 2 + resp, cleanup := client.Get(fmt.Sprintf("/v2/transactions?size=%d", size)) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + var links jsonapi.Links + var txs []presenters.EthTxResource + body := cltest.ParseResponseBody(t, resp) + require.NoError(t, web.ParsePaginatedResponse(body, &txs, &links)) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + require.Len(t, txs, size) + require.Equal(t, "4", txs[0].SentAt, "expected tx attempts order by sentAt descending") + require.Equal(t, "3", txs[1].SentAt, "expected tx attempts order by sentAt descending") +} + +func TestTransactionsController_Index_Error(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/transactions?size=TrainingDay") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, 422) +} + +func TestTransactionsController_Show_Success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + client := app.NewHTTPClient(nil) + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + tx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, from) + require.Len(t, tx.TxAttempts, 1) + attempt := tx.TxAttempts[0] + attempt.Tx = tx + + resp, cleanup := client.Get("/v2/transactions/" + attempt.Hash.String()) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + ptx := presenters.EthTxResource{} + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &ptx)) + txp := presenters.NewEthTxResourceFromAttempt(attempt) + + assert.Equal(t, txp.State, ptx.State) + assert.Equal(t, txp.Data, ptx.Data) + assert.Equal(t, txp.GasLimit, ptx.GasLimit) + assert.Equal(t, txp.GasPrice, ptx.GasPrice) + assert.Equal(t, txp.Hash, ptx.Hash) + assert.Equal(t, txp.SentAt, ptx.SentAt) + assert.Equal(t, txp.To, ptx.To) + assert.Equal(t, txp.Value, ptx.Value) +} + +func TestTransactionsController_Show_NotFound(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + client := app.NewHTTPClient(nil) + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + tx := cltest.MustInsertUnconfirmedEthTxWithBroadcastLegacyAttempt(t, txStore, 1, from) + require.Len(t, tx.TxAttempts, 1) + attempt := tx.TxAttempts[0] + + resp, cleanup := client.Get("/v2/transactions/" + (attempt.Hash.String() + "1")) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusNotFound) +} diff --git a/core/web/evm_transfer_controller.go b/core/web/evm_transfer_controller.go new file mode 100644 index 00000000..b5f83ef9 --- /dev/null +++ b/core/web/evm_transfer_controller.go @@ -0,0 +1,159 @@ +package web + +import ( + "context" + "fmt" + "math/big" + "net/http" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + commontxmgr "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/gin-gonic/gin" +) + +// EVMTransfersController can send PLI tokens to another address +type EVMTransfersController struct { + App plugin.Application +} + +// Create sends ETH from the Plugin's account to a specified address. +// +// Example: "/withdrawals" +func (tc *EVMTransfersController) Create(c *gin.Context) { + var tr models.SendEtherRequest + if err := c.ShouldBindJSON(&tr); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + + chain, err := getChain(tc.App.GetRelayers().LegacyEVMChains(), tr.EVMChainID.String()) + if err != nil { + if errors.Is(err, ErrInvalidChainID) || errors.Is(err, ErrMultipleChains) || errors.Is(err, ErrMissingChainID) { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + if tr.FromAddress == utils.ZeroAddress { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("withdrawal source address is missing: %v", tr.FromAddress)) + return + } + + if !tr.AllowHigherAmounts { + err = ValidateEthBalanceForTransfer(c, chain, tr.FromAddress, tr.Amount) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("transaction failed: %v", err)) + return + } + } + + etx, err := chain.TxManager().SendNativeToken(c, chain.ID(), tr.FromAddress, tr.DestinationAddress, *tr.Amount.ToInt(), chain.Config().EVM().GasEstimator().LimitTransfer()) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, errors.Errorf("transaction failed: %v", err)) + return + } + + tc.App.GetAuditLogger().Audit(audit.EthTransactionCreated, map[string]interface{}{ + "ethTX": etx, + }) + + // skip waiting for txmgr to create TxAttempt + if tr.SkipWaitTxAttempt { + jsonAPIResponse(c, presenters.NewEthTxResource(etx), "eth_tx") + return + } + + timeout := 10 * time.Second // default + if tr.WaitAttemptTimeout != nil { + timeout = *tr.WaitAttemptTimeout + } + + // wait and retrieve tx attempt matching tx ID + attempt, err := FindTxAttempt(c, timeout, etx, tc.App.TxmStorageService().FindTxWithAttempts) + if err != nil { + jsonAPIError(c, http.StatusGatewayTimeout, fmt.Errorf("failed to find transaction within timeout: %w", err)) + return + } + jsonAPIResponse(c, presenters.NewEthTxResourceFromAttempt(attempt), "eth_tx") +} + +// ValidateEthBalanceForTransfer validates that the current balance can cover the transaction amount +func ValidateEthBalanceForTransfer(c *gin.Context, chain legacyevm.Chain, fromAddr common.Address, amount assets.Eth) error { + var err error + var balance *big.Int + + balanceMonitor := chain.BalanceMonitor() + + if balanceMonitor != nil { + balance = balanceMonitor.GetEthBalance(fromAddr).ToInt() + } else { + balance, err = chain.Client().BalanceAt(c, fromAddr, nil) + if err != nil { + return err + } + } + + zero := big.NewInt(0) + + if balance == nil || balance.Cmp(zero) == 0 { + return errors.Errorf("balance is too low for this transaction to be executed: %v", balance) + } + + gasLimit := chain.Config().EVM().GasEstimator().LimitTransfer() + estimator := chain.GasEstimator() + + amountWithFees, err := estimator.GetMaxCost(c, amount, nil, gasLimit, chain.Config().EVM().GasEstimator().PriceMaxKey(fromAddr)) + if err != nil { + return err + } + if balance.Cmp(amountWithFees) < 0 { + // ETH balance is less than the sent amount + fees + return errors.Errorf("balance is too low for this transaction to be executed: %v", balance) + } + + return nil +} + +func FindTxAttempt(ctx context.Context, timeout time.Duration, etx txmgr.Tx, FindTxWithAttempts func(int64) (txmgr.Tx, error)) (attempt txmgr.TxAttempt, err error) { + recheckTime := time.Second + tick := time.After(0) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case <-ctx.Done(): + return attempt, fmt.Errorf("%w - tx may still have been broadcast", ctx.Err()) + case <-tick: + etx, err = FindTxWithAttempts(etx.ID) + if err != nil { + return attempt, fmt.Errorf("failed to find transaction: %w", err) + } + } + + // exit if tx attempts are found + // also validate etx.State != unstarted (ensure proper tx state for tx with attempts) + if len(etx.TxAttempts) > 0 && etx.State != commontxmgr.TxUnstarted { + break + } + tick = time.After(recheckTime) + } + + // attach original tx to attempt + attempt = etx.TxAttempts[0] + attempt.Tx = etx + return attempt, nil +} diff --git a/core/web/evm_transfer_controller_test.go b/core/web/evm_transfer_controller_test.go new file mode 100644 index 00000000..faff91b6 --- /dev/null +++ b/core/web/evm_transfer_controller_test.go @@ -0,0 +1,409 @@ +package web_test + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math/big" + "net/http" + "testing" + "time" + + "github.com/jmoiron/sqlx" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestTransfersController_CreateSuccess_From(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + balance, err := assets.NewEthValueS("200") + require.NoError(t, err) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + + app := cltest.NewApplicationWithKey(t, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + request := models.SendEtherRequest{ + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + FromAddress: key.Address, + Amount: amount, + SkipWaitTxAttempt: true, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Len(t, errors.Errors, 0) + + validateTxCount(t, app.GetSqlxDB(), 1) +} + +func TestTransfersController_CreateSuccess_From_WEI(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + balance, err := assets.NewEthValueS("2") + require.NoError(t, err) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + + app := cltest.NewApplicationWithKey(t, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount := assets.NewEthValue(1000000000000000000) + + request := models.SendEtherRequest{ + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + FromAddress: key.Address, + Amount: amount, + SkipWaitTxAttempt: true, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Len(t, errors.Errors, 0) + + validateTxCount(t, app.GetSqlxDB(), 1) +} + +func TestTransfersController_CreateSuccess_From_BalanceMonitorDisabled(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + balance, err := assets.NewEthValueS("200") + require.NoError(t, err) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + + app := cltest.NewApplicationWithConfigAndKey(t, config, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + request := models.SendEtherRequest{ + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + FromAddress: key.Address, + Amount: amount, + SkipWaitTxAttempt: true, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Len(t, errors.Errors, 0) + + validateTxCount(t, app.GetSqlxDB(), 1) +} + +func TestTransfersController_TransferZeroAddressError(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + client := app.NewHTTPClient(nil) + request := models.SendEtherRequest{ + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + FromAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), + Amount: amount, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) +} + +func TestTransfersController_TransferBalanceToLowError(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(assets.NewEth(10).ToInt(), nil) + + app := cltest.NewApplicationWithKey(t, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + request := models.SendEtherRequest{ + FromAddress: key.Address, + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + Amount: amount, + AllowHigherAmounts: false, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) +} + +func TestTransfersController_TransferBalanceToLowError_ZeroBalance(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + balance, err := assets.NewEthValueS("0") + require.NoError(t, err) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + + app := cltest.NewApplicationWithKey(t, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + request := models.SendEtherRequest{ + FromAddress: key.Address, + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + Amount: amount, + AllowHigherAmounts: false, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, app.Config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) +} + +func TestTransfersController_JSONBindingError(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer([]byte(`{"address":""}`))) + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, resp, http.StatusBadRequest) +} + +func TestTransfersController_CreateSuccess_eip1559(t *testing.T) { + t.Parallel() + + key := cltest.MustGenerateRandomKey(t) + + ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) + + balance, err := assets.NewEthValueS("200") + require.NoError(t, err) + + ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil) + ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil) + ethClient.On("SequenceAt", mock.Anything, mock.Anything, mock.Anything).Return(evmtypes.Nonce(0), nil).Maybe() + + config := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true) + c.EVM[0].GasEstimator.Mode = ptr("FixedPrice") + c.EVM[0].ChainID = (*ubig.Big)(testutils.FixtureChainID) + // NOTE: FallbackPollInterval is used in this test to quickly create TxAttempts + // Testing triggers requires committing transactions and does not work with transactional tests + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(time.Second) + }) + + app := cltest.NewApplicationWithConfigAndKey(t, config, ethClient, key) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + amount, err := assets.NewEthValueS("100") + require.NoError(t, err) + + timeout := 5 * time.Second + request := models.SendEtherRequest{ + DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"), + FromAddress: key.Address, + Amount: amount, + WaitAttemptTimeout: &timeout, + EVMChainID: ubig.New(evmtest.MustGetDefaultChainID(t, config.EVMConfigs())), + } + + body, err := json.Marshal(&request) + assert.NoError(t, err) + + resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body)) + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, resp, http.StatusOK) + + resource := presenters.EthTxResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource) + assert.NoError(t, err) + + validateTxCount(t, app.GetSqlxDB(), 1) + + // check returned data + assert.NotEmpty(t, resource.Hash) + assert.NotEmpty(t, resource.To) + assert.NotEmpty(t, resource.From) + assert.NotEmpty(t, resource.Nonce) + assert.NotEqual(t, "unstarted", resource.State) +} + +func TestTransfersController_FindTxAttempt(t *testing.T) { + tx := txmgr.Tx{ID: 1} + attempt := txmgr.TxAttempt{ID: 2} + txWithAttempt := txmgr.Tx{ID: 1, TxAttempts: []txmgr.TxAttempt{attempt}} + + // happy path + t.Run("happy_path", func(t *testing.T) { + ctx := testutils.Context(t) + timeout := 5 * time.Second + var done bool + find := func(_ int64) (txmgr.Tx, error) { + if !done { + done = true + return tx, nil + } + return txWithAttempt, nil + } + a, err := web.FindTxAttempt(ctx, timeout, tx, find) + require.NoError(t, err) + assert.Equal(t, tx.ID, a.Tx.ID) + assert.Equal(t, attempt.ID, a.ID) + }) + + // failed to find tx + t.Run("failed to find tx", func(t *testing.T) { + ctx := testutils.Context(t) + find := func(_ int64) (txmgr.Tx, error) { + return txmgr.Tx{}, fmt.Errorf("ERRORED") + } + _, err := web.FindTxAttempt(ctx, time.Second, tx, find) + assert.ErrorContains(t, err, "failed to find transaction") + }) + + // timeout + t.Run("timeout", func(t *testing.T) { + ctx := testutils.Context(t) + find := func(_ int64) (txmgr.Tx, error) { + return tx, nil + } + _, err := web.FindTxAttempt(ctx, time.Second, tx, find) + assert.ErrorContains(t, err, "context deadline exceeded") + }) + + // context canceled + t.Run("context canceled", func(t *testing.T) { + ctx := testutils.Context(t) + find := func(_ int64) (txmgr.Tx, error) { + return tx, nil + } + + ctx, cancel := context.WithCancel(ctx) + go func() { + time.Sleep(1 * time.Second) + cancel() + }() + + _, err := web.FindTxAttempt(ctx, 5*time.Second, tx, find) + assert.ErrorContains(t, err, "context canceled") + }) +} + +func validateTxCount(t *testing.T, db *sqlx.DB, count int) { + cfg := pgtest.NewQConfig(false) + txStore := txmgr.NewTxStore(db, logger.TestLogger(t), cfg) + + txes, err := txStore.GetAllTxes(testutils.Context(t)) + require.NoError(t, err) + require.Len(t, txes, count) +} diff --git a/core/web/evm_tx_attempts_controller.go b/core/web/evm_tx_attempts_controller.go new file mode 100644 index 00000000..65d6a179 --- /dev/null +++ b/core/web/evm_tx_attempts_controller.go @@ -0,0 +1,23 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/gin-gonic/gin" +) + +// TxAttemptsController lists TxAttempts requests. +type TxAttemptsController struct { + App plugin.Application +} + +// Index returns paginated transaction attempts +func (tac *TxAttemptsController) Index(c *gin.Context, size, page, offset int) { + attempts, count, err := tac.App.TxmStorageService().TxAttempts(offset, size) + ptxs := make([]presenters.EthTxResource, len(attempts)) + for i, attempt := range attempts { + ptxs[i] = presenters.NewEthTxResourceFromAttempt(attempt) + } + paginatedResponse(c, "transactions", size, page, ptxs, count, err) +} diff --git a/core/web/evm_tx_attempts_controller_test.go b/core/web/evm_tx_attempts_controller_test.go new file mode 100644 index 00000000..c90b838d --- /dev/null +++ b/core/web/evm_tx_attempts_controller_test.go @@ -0,0 +1,58 @@ +package web_test + +import ( + "net/http" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTxAttemptsController_Index_Success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + txStore := cltest.NewTestTxStore(t, app.GetSqlxDB(), app.GetConfig().Database()) + client := app.NewHTTPClient(nil) + + _, from := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 0, 1, from) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 1, 2, from) + cltest.MustInsertConfirmedEthTxWithLegacyAttempt(t, txStore, 2, 3, from) + + resp, cleanup := client.Get("/v2/tx_attempts?size=2") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + var links jsonapi.Links + var attempts []presenters.EthTxResource + body := cltest.ParseResponseBody(t, resp) + + require.NoError(t, web.ParsePaginatedResponse(body, &attempts, &links)) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + require.Len(t, attempts, 2) + assert.Equal(t, "3", attempts[0].SentAt, "expected tx attempts order by sentAt descending") + assert.Equal(t, "2", attempts[1].SentAt, "expected tx attempts order by sentAt descending") +} + +func TestTxAttemptsController_Index_Error(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/v2/tx_attempts?size=TrainingDay") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, 422) +} diff --git a/core/web/external_initiators_controller.go b/core/web/external_initiators_controller.go new file mode 100644 index 00000000..f068f1a4 --- /dev/null +++ b/core/web/external_initiators_controller.go @@ -0,0 +1,113 @@ +package web + +import ( + "database/sql" + "fmt" + "net/http" + "regexp" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +var ( + externalInitiatorNameRegexp = regexp.MustCompile("^[a-zA-Z0-9-_]+$") +) + +// ValidateExternalInitiator checks whether External Initiator parameters are +// safe for processing. +func ValidateExternalInitiator( + exi *bridges.ExternalInitiatorRequest, + orm bridges.ORM, +) error { + fe := models.NewJSONAPIErrors() + if len([]rune(exi.Name)) == 0 { + fe.Add("No name specified") + } else if !externalInitiatorNameRegexp.MatchString(exi.Name) { + fe.Add("Name must be alphanumeric and may contain '_' or '-'") + } else if _, err := orm.FindExternalInitiatorByName(exi.Name); err == nil { + fe.Add(fmt.Sprintf("Name %v already exists", exi.Name)) + } else if !errors.Is(err, sql.ErrNoRows) { + return errors.Wrap(err, "validating external initiator") + } + return fe.CoerceEmptyToNil() +} + +// ExternalInitiatorsController manages external initiators +type ExternalInitiatorsController struct { + App plugin.Application +} + +func (eic *ExternalInitiatorsController) Index(c *gin.Context, size, page, offset int) { + eis, count, err := eic.App.BridgeORM().ExternalInitiators(offset, size) + var resources []presenters.ExternalInitiatorResource + for _, ei := range eis { + resources = append(resources, presenters.NewExternalInitiatorResource(ei)) + } + + paginatedResponse(c, "externalInitiators", size, page, resources, count, err) +} + +// Create builds and saves a new external initiator +func (eic *ExternalInitiatorsController) Create(c *gin.Context) { + eir := &bridges.ExternalInitiatorRequest{} + if !eic.App.GetConfig().JobPipeline().ExternalInitiatorsEnabled() { + err := errors.New("The External Initiator feature is disabled by configuration") + jsonAPIError(c, http.StatusMethodNotAllowed, err) + return + } + + eia := auth.NewToken() + if err := c.ShouldBindJSON(eir); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + ei, err := bridges.NewExternalInitiator(eia, eir) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + if err := ValidateExternalInitiator(eir, eic.App.BridgeORM()); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if err := eic.App.BridgeORM().CreateExternalInitiator(ei); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + eic.App.GetAuditLogger().Audit(audit.ExternalInitiatorCreated, map[string]interface{}{ + "externalInitiatorID": ei.ID, + "externalInitiatorName": ei.Name, + "externalInitiatorURL": ei.URL, + }) + + resp := presenters.NewExternalInitiatorAuthentication(*ei, *eia) + jsonAPIResponseWithStatus(c, resp, "external initiator authentication", http.StatusCreated) +} + +// Destroy deletes an ExternalInitiator +func (eic *ExternalInitiatorsController) Destroy(c *gin.Context) { + name := c.Param("Name") + exi, err := eic.App.BridgeORM().FindExternalInitiatorByName(name) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("external initiator not found")) + return + } + if err := eic.App.BridgeORM().DeleteExternalInitiator(exi.Name); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + eic.App.GetAuditLogger().Audit(audit.ExternalInitiatorDeleted, map[string]interface{}{"name": name}) + jsonAPIResponseWithStatus(c, nil, "external initiator", http.StatusNoContent) +} diff --git a/core/web/external_initiators_controller_test.go b/core/web/external_initiators_controller_test.go new file mode 100644 index 00000000..8c14073a --- /dev/null +++ b/core/web/external_initiators_controller_test.go @@ -0,0 +1,262 @@ +package web_test + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/pgtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateExternalInitiator(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + cfg := pgtest.NewQConfig(true) + orm := bridges.NewORM(db, logger.TestLogger(t), cfg) + + url := cltest.WebURL(t, "https://a.web.url") + + // Add duplicate + exi := bridges.ExternalInitiator{ + Name: "duplicate", + URL: &url, + } + + assert.NoError(t, orm.CreateExternalInitiator(&exi)) + + tests := []struct { + name string + input string + wantError bool + }{ + {"basic", `{"name":"bitcoin","url":"https://test.url"}`, false}, + {"basic w/ underscore", `{"name":"bit_coin","url":"https://test.url"}`, false}, + {"basic w/ underscore in url", `{"name":"bitcoin","url":"https://plugin_bit-coin_1.url"}`, false}, + {"missing url", `{"name":"missing_url"}`, false}, + {"duplicate name", `{"name":"duplicate","url":"https://test.url"}`, true}, + {"invalid name characters", `{"name":"","url":"https://test.url"}`, true}, + {"missing name", `{"url":"https://test.url"}`, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var exr bridges.ExternalInitiatorRequest + + assert.NoError(t, json.Unmarshal([]byte(test.input), &exr)) + result := web.ValidateExternalInitiator(&exr, orm) + + cltest.AssertError(t, test.wantError, result) + }) + } +} + +func TestExternalInitiatorsController_Index(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + db := app.GetSqlxDB() + borm := bridges.NewORM(db, logger.TestLogger(t), app.GetConfig().Database()) + + eiFoo := cltest.MustInsertExternalInitiatorWithOpts(t, borm, cltest.ExternalInitiatorOpts{ + NamePrefix: "foo", + URL: cltest.MustWebURL(t, "http://example.com/foo"), + OutgoingToken: "outgoing_token", + }) + eiBar := cltest.MustInsertExternalInitiatorWithOpts(t, borm, cltest.ExternalInitiatorOpts{NamePrefix: "bar"}) + + resp, cleanup := client.Get("/v2/external_initiators?size=x") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusUnprocessableEntity) + + resp, cleanup = client.Get("/v2/external_initiators?size=1") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, 2, metaCount) + + var links jsonapi.Links + var eis []presenters.ExternalInitiatorResource + err = web.ParsePaginatedResponse(body, &eis, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, eis, 1) + assert.Equal(t, fmt.Sprintf("%d", eiBar.ID), eis[0].ID) + assert.Equal(t, eiBar.Name, eis[0].Name) + assert.Nil(t, eis[0].URL) + assert.Equal(t, eiBar.AccessKey, eis[0].AccessKey) + assert.Equal(t, eiBar.OutgoingToken, eis[0].OutgoingToken) + + resp, cleanup = client.Get(links["next"].Href) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + eis = []presenters.ExternalInitiatorResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &eis, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"]) + assert.NotEmpty(t, links["prev"]) + + assert.Len(t, eis, 1) + assert.Equal(t, fmt.Sprintf("%d", eiFoo.ID), eis[0].ID) + assert.Equal(t, eiFoo.Name, eis[0].Name) + assert.Equal(t, eiFoo.URL.String(), eis[0].URL.String()) + assert.Equal(t, eiFoo.AccessKey, eis[0].AccessKey) + assert.Equal(t, eiFoo.OutgoingToken, eis[0].OutgoingToken) +} + +func TestExternalInitiatorsController_Create_success(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post("/v2/external_initiators", + bytes.NewBufferString(`{"name":"bitcoin","url":"http://without.a.name"}`), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusCreated) + ei := &presenters.ExternalInitiatorAuthentication{} + err := cltest.ParseJSONAPIResponse(t, resp, ei) + require.NoError(t, err) + + assert.Equal(t, "bitcoin", ei.Name) + assert.Equal(t, "http://without.a.name", ei.URL.String()) + assert.NotEmpty(t, ei.AccessKey) + assert.NotEmpty(t, ei.Secret) + assert.NotEmpty(t, ei.OutgoingToken) + assert.NotEmpty(t, ei.OutgoingSecret) +} + +func TestExternalInitiatorsController_Create_without_URL(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post("/v2/external_initiators", + bytes.NewBufferString(`{"name":"no-url"}`), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, 201) + ei := &presenters.ExternalInitiatorAuthentication{} + err := cltest.ParseJSONAPIResponse(t, resp, ei) + require.NoError(t, err) + + assert.Equal(t, "no-url", ei.Name) + assert.Equal(t, "", ei.URL.String()) + assert.NotEmpty(t, ei.AccessKey) + assert.NotEmpty(t, ei.Secret) + assert.NotEmpty(t, ei.OutgoingToken) + assert.NotEmpty(t, ei.OutgoingSecret) +} + +func TestExternalInitiatorsController_Create_invalid(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Post("/v2/external_initiators", + bytes.NewBufferString(`{"url":"http://without.a.name"}`), + ) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusBadRequest) +} + +func TestExternalInitiatorsController_Delete(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + exi := bridges.ExternalInitiator{ + Name: "abracadabra", + } + err := app.BridgeORM().CreateExternalInitiator(&exi) + require.NoError(t, err) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Delete("/v2/external_initiators/" + exi.Name) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusNoContent) +} + +func TestExternalInitiatorsController_DeleteNotFound(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationWithConfig(t, + configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.ExternalInitiatorsEnabled = ptr(true) + })) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + tests := []struct { + Name string + URL string + }{ + { + Name: "No external initiator specified", + URL: "/v2/external_initiators", + }, + { + Name: "Unknown initiator", + URL: "/v2/external_initiators/not-exist", + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + resp, cleanup := client.Delete(test.URL) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusText(http.StatusNotFound), http.StatusText(resp.StatusCode)) + }) + } +} diff --git a/core/web/features_controller.go b/core/web/features_controller.go new file mode 100644 index 00000000..280b4b15 --- /dev/null +++ b/core/web/features_controller.go @@ -0,0 +1,30 @@ +package web + +import ( + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// FeaturesController manages the feature flags +type FeaturesController struct { + App plugin.Application +} + +const ( + FeatureKeyCSA string = "csa" + FeatureKeyFeedsManager string = "feeds_manager" +) + +// Index retrieves the features +// Example: +// "GET /features" +func (fc *FeaturesController) Index(c *gin.Context) { + resources := []presenters.FeatureResource{ + *presenters.NewFeatureResource(FeatureKeyCSA, fc.App.GetConfig().Feature().UICSAKeys()), + *presenters.NewFeatureResource(FeatureKeyFeedsManager, fc.App.GetConfig().Feature().FeedsManager()), + } + + jsonAPIResponse(c, resources, "features") +} diff --git a/core/web/features_controller_test.go b/core/web/features_controller_test.go new file mode 100644 index 00000000..8ae07f6d --- /dev/null +++ b/core/web/features_controller_test.go @@ -0,0 +1,40 @@ +package web_test + +import ( + "net/http" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_FeaturesController_List(t *testing.T) { + app := cltest.NewApplicationWithConfig(t, configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + csa := true + c.Feature.UICSAKeys = &csa + })) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Get("/v2/features") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + resources := []presenters.FeatureResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resources) + require.NoError(t, err) + require.Len(t, resources, 2) + + assert.Equal(t, "csa", resources[0].ID) + assert.True(t, resources[0].Enabled) + + assert.Equal(t, "feeds_manager", resources[1].ID) + assert.True(t, resources[1].Enabled) +} diff --git a/core/web/fixtures/operator_ui/assets/index.html b/core/web/fixtures/operator_ui/assets/index.html new file mode 100644 index 00000000..e69de29b diff --git a/core/web/fixtures/operator_ui/assets/main.js b/core/web/fixtures/operator_ui/assets/main.js new file mode 100644 index 00000000..e69de29b diff --git a/core/web/fixtures/operator_ui/assets/main.js.gz b/core/web/fixtures/operator_ui/assets/main.js.gz new file mode 100644 index 00000000..e69de29b diff --git a/core/web/gqlscalar/map.go b/core/web/gqlscalar/map.go new file mode 100644 index 00000000..317e2294 --- /dev/null +++ b/core/web/gqlscalar/map.go @@ -0,0 +1,31 @@ +package gqlscalar + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Map to contain configuration +type Map map[string]interface{} + +// ImplementsGraphQLType implements GraphQL type for Map +func (Map) ImplementsGraphQLType(name string) bool { return name == "Map" } + +// UnmarshalGraphQL sets the Map +func (m *Map) UnmarshalGraphQL(input interface{}) error { + switch input := input.(type) { + case Map: + *m = input + return nil + default: + return errors.New("wrong type") + } +} + +// MarshalJSON returns json +func (m Map) MarshalJSON() ([]byte, error) { + // Cast this so we don't have infinite recursion + // (don't want json.Marshal calling the MarshalJSON method on m) + return json.Marshal(map[string]interface{}(m)) +} diff --git a/core/web/gui_assets_test.go b/core/web/gui_assets_test.go new file mode 100644 index 00000000..e9c522d4 --- /dev/null +++ b/core/web/gui_assets_test.go @@ -0,0 +1,162 @@ +package web_test + +import ( + "embed" + "net/http" + "net/http/httptest" + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/web" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//go:embed fixtures/operator_ui/assets +var testFs embed.FS + +func TestGuiAssets_DefaultIndexHtml_OK(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + + // Make sure the test cases don't exceed the rate limit + testCases := []struct { + name string + path string + }{ + {name: "root path", path: "/"}, + {name: "nested path", path: "/job_specs/abc123"}, + {name: "potentially valid path", path: "/valid/route"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + req, err := http.NewRequestWithContext(testutils.Context(t), "GET", app.Server.URL+tc.path, nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + cltest.AssertServerResponse(t, resp, http.StatusOK) + }) + } +} + +func TestGuiAssets_DefaultIndexHtml_NotFound(t *testing.T) { + t.Parallel() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + + // Make sure the test cases don't exceed the rate limit + testCases := []struct { + name string + path string + }{ + {name: "with extension", path: "/invalidFile.json"}, + {name: "nested path with extension", path: "/another/invalidFile.css"}, + {name: "bad api route", path: "/v2/bad/route"}, + {name: "non existent api version", path: "/v3/new/api/version"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + req, err := http.NewRequestWithContext(testutils.Context(t), "GET", app.Server.URL+tc.path, nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + cltest.AssertServerResponse(t, resp, http.StatusNotFound) + }) + } +} + +func TestGuiAssets_DefaultIndexHtml_RateLimited(t *testing.T) { + t.Parallel() + + config := configtest.NewGeneralConfig(t, nil) + app := cltest.NewApplicationWithConfig(t, config) + require.NoError(t, app.Start(testutils.Context(t))) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + + // Make calls equal to the rate limit + rateLimit := 20 + for i := 0; i < rateLimit; i++ { + req, err := http.NewRequestWithContext(testutils.Context(t), "GET", app.Server.URL+"/", nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + cltest.AssertServerResponse(t, resp, http.StatusOK) + } + + // Last request fails + req, err := http.NewRequestWithContext(testutils.Context(t), "GET", app.Server.URL+"/", nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) +} + +func TestGuiAssets_AssetsFS(t *testing.T) { + t.Parallel() + + efs := web.NewEmbedFileSystem(testFs, "fixtures/operator_ui") + handler := web.ServeGzippedAssets("/fixtures/operator_ui/", efs, logger.TestLogger(t)) + + t.Run("it get exact assets if Accept-Encoding is not specified", func(t *testing.T) { + recorder := httptest.NewRecorder() + c, _ := gin.CreateTestContext(recorder) + var err error + c.Request, err = http.NewRequestWithContext(c, "GET", "http://localhost:6688/fixtures/operator_ui/assets/main.js", nil) + require.NoError(t, err) + handler(c) + + require.Equal(t, http.StatusOK, recorder.Result().StatusCode) + + recorder = httptest.NewRecorder() + c, _ = gin.CreateTestContext(recorder) + c.Request, err = http.NewRequestWithContext(c, "GET", "http://localhost:6688/fixtures/operator_ui/assets/kinda_main.js", nil) + require.NoError(t, err) + handler(c) + + require.Equal(t, http.StatusNotFound, recorder.Result().StatusCode) + }) + + t.Run("it respects Accept-Encoding header", func(t *testing.T) { + recorder := httptest.NewRecorder() + c, _ := gin.CreateTestContext(recorder) + var err error + c.Request, err = http.NewRequestWithContext(c, "GET", "http://localhost:6688/fixtures/operator_ui/assets/main.js", nil) + require.NoError(t, err) + c.Request.Header.Set("Accept-Encoding", "gzip") + handler(c) + + require.Equal(t, http.StatusOK, recorder.Result().StatusCode) + require.Equal(t, "gzip", recorder.Result().Header.Get("Content-Encoding")) + + recorder = httptest.NewRecorder() + c, _ = gin.CreateTestContext(recorder) + c.Request, err = http.NewRequestWithContext(c, "GET", "http://localhost:6688/fixtures/operator_ui/assets/kinda_main.js", nil) + require.NoError(t, err) + c.Request.Header.Set("Accept-Encoding", "gzip") + handler(c) + + require.Equal(t, http.StatusNotFound, recorder.Result().StatusCode) + }) +} diff --git a/core/web/health_controller.go b/core/web/health_controller.go new file mode 100644 index 00000000..81f2f8cb --- /dev/null +++ b/core/web/health_controller.go @@ -0,0 +1,286 @@ +package web + +import ( + "bytes" + "fmt" + "io" + "net/http" + "slices" + "strings" + + "github.com/gin-gonic/gin" + "golang.org/x/exp/maps" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type HealthController struct { + App plugin.Application +} + +const ( + HealthStatusPassing = "passing" + HealthStatusFailing = "failing" +) + +// NOTE: We only implement the k8s readiness check, *not* the liveness check. Liveness checks are only recommended in cases +// where the app doesn't crash itself on panic, and if implemented incorrectly can cause cascading failures. +// See the following for more information: +// - https://srcco.de/posts/kubernetes-liveness-probes-are-dangerous.html +func (hc *HealthController) Readyz(c *gin.Context) { + status := http.StatusOK + + checker := hc.App.GetHealthChecker() + + ready, errors := checker.IsReady() + + if !ready { + status = http.StatusServiceUnavailable + } + + c.Status(status) + + if _, ok := c.GetQuery("full"); !ok { + return + } + + checks := make([]presenters.Check, 0, len(errors)) + + for name, err := range errors { + status := HealthStatusPassing + var output string + + if err != nil { + status = HealthStatusFailing + output = err.Error() + } + + checks = append(checks, presenters.Check{ + JAID: presenters.NewJAID(name), + Name: name, + Status: status, + Output: output, + }) + } + + // return a json description of all the checks + jsonAPIResponse(c, checks, "checks") +} + +func (hc *HealthController) Health(c *gin.Context) { + status := http.StatusOK + + checker := hc.App.GetHealthChecker() + + healthy, errors := checker.IsHealthy() + + if !healthy { + status = http.StatusMultiStatus + } + + c.Status(status) + + checks := make([]presenters.Check, 0, len(errors)) + for name, err := range errors { + status := HealthStatusPassing + var output string + + if err != nil { + status = HealthStatusFailing + output = err.Error() + } + + checks = append(checks, presenters.Check{ + JAID: presenters.NewJAID(name), + Name: name, + Status: status, + Output: output, + }) + } + + switch c.NegotiateFormat(gin.MIMEJSON, gin.MIMEHTML, gin.MIMEPlain) { + case gin.MIMEJSON: + break // default + + case gin.MIMEHTML: + if err := newCheckTree(checks).WriteHTMLTo(c.Writer); err != nil { + hc.App.GetLogger().Errorw("Failed to write HTML health report", "err", err) + c.AbortWithStatus(http.StatusInternalServerError) + } + return + + case gin.MIMEPlain: + if err := writeTextTo(c.Writer, checks); err != nil { + hc.App.GetLogger().Errorw("Failed to write plaintext health report", "err", err) + c.AbortWithStatus(http.StatusInternalServerError) + } + return + } + + slices.SortFunc(checks, presenters.CmpCheckName) + jsonAPIResponseWithStatus(c, checks, "checks", status) +} + +func writeTextTo(w io.Writer, checks []presenters.Check) error { + slices.SortFunc(checks, presenters.CmpCheckName) + for _, ch := range checks { + status := "? " + switch ch.Status { + case HealthStatusPassing: + status = "ok " + case HealthStatusFailing: + status = "! " + } + if _, err := fmt.Fprintf(w, "%s%s\n", status, ch.Name); err != nil { + return err + } + if ch.Output != "" { + if _, err := fmt.Fprintf(newLinePrefixWriter(w, "\t"), "\t%s", ch.Output); err != nil { + return err + } + if _, err := fmt.Fprintln(w); err != nil { + return err + } + } + } + return nil +} + +type checkNode struct { + Name string // full + Status string + Output string + + Subs checkTree +} + +type checkTree map[string]checkNode + +func newCheckTree(checks []presenters.Check) checkTree { + slices.SortFunc(checks, presenters.CmpCheckName) + root := make(checkTree) + for _, c := range checks { + parts := strings.Split(c.Name, ".") + node := root + for _, short := range parts[:len(parts)-1] { + n, ok := node[short] + if !ok { + n = checkNode{Subs: make(checkTree)} + node[short] = n + } + node = n.Subs + } + p := parts[len(parts)-1] + node[p] = checkNode{ + Name: c.Name, + Status: c.Status, + Output: c.Output, + Subs: make(checkTree), + } + } + return root +} + +func (t checkTree) WriteHTMLTo(w io.Writer) error { + if _, err := io.WriteString(w, ``); err != nil { + return err + } + return t.writeHTMLTo(newLinePrefixWriter(w, "")) +} + +func (t checkTree) writeHTMLTo(w *linePrefixWriter) error { + keys := maps.Keys(t) + slices.Sort(keys) + for _, short := range keys { + node := t[short] + if _, err := io.WriteString(w, ` +
`); err != nil { + return err + } + var expand string + if node.Output == "" && len(node.Subs) == 0 { + expand = ` class="noexpand"` + } + if _, err := fmt.Fprintf(w, ` + %s`, node.Name, expand, node.Status, short); err != nil { + return err + } + if node.Output != "" { + if _, err := w.WriteRawLinef("
%s
", node.Output); err != nil { + return err + } + } + if len(node.Subs) > 0 { + if err := node.Subs.writeHTMLTo(w.new(" ")); err != nil { + return err + } + } + if _, err := io.WriteString(w, "\n
"); err != nil { + return err + } + } + return nil +} + +type linePrefixWriter struct { + w io.Writer + prefix string + prefixB []byte +} + +func newLinePrefixWriter(w io.Writer, prefix string) *linePrefixWriter { + prefix = "\n" + prefix + return &linePrefixWriter{w: w, prefix: prefix, prefixB: []byte(prefix)} +} + +func (w *linePrefixWriter) new(prefix string) *linePrefixWriter { + prefix = w.prefix + prefix + return &linePrefixWriter{w: w.w, prefix: prefix, prefixB: []byte(prefix)} +} + +func (w *linePrefixWriter) Write(b []byte) (int, error) { + return w.w.Write(bytes.ReplaceAll(b, []byte("\n"), w.prefixB)) +} + +func (w *linePrefixWriter) WriteString(s string) (n int, err error) { + return io.WriteString(w.w, strings.ReplaceAll(s, "\n", w.prefix)) +} + +// WriteRawLinef writes a new newline with prefix, followed by s without modification. +func (w *linePrefixWriter) WriteRawLinef(s string, args ...any) (n int, err error) { + return fmt.Fprintf(w.w, w.prefix+s, args...) +} diff --git a/core/web/health_controller_test.go b/core/web/health_controller_test.go new file mode 100644 index 00000000..716dadd3 --- /dev/null +++ b/core/web/health_controller_test.go @@ -0,0 +1,133 @@ +package web_test + +import ( + "bytes" + _ "embed" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/mocks" +) + +func TestHealthController_Readyz(t *testing.T) { + var tt = []struct { + name string + ready bool + status int + }{ + { + name: "not ready", + ready: false, + status: http.StatusServiceUnavailable, + }, + { + name: "ready", + ready: true, + status: http.StatusOK, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + app := cltest.NewApplicationWithKey(t) + healthChecker := new(mocks.Checker) + healthChecker.On("Start").Return(nil).Once() + healthChecker.On("IsReady").Return(tc.ready, nil).Once() + healthChecker.On("Close").Return(nil).Once() + + app.HealthChecker = healthChecker + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/readyz") + t.Cleanup(cleanup) + assert.Equal(t, tc.status, resp.StatusCode) + }) + } +} + +func TestHealthController_Health_status(t *testing.T) { + var tt = []struct { + name string + ready bool + status int + }{ + { + name: "not ready", + ready: false, + status: http.StatusMultiStatus, + }, + { + name: "ready", + ready: true, + status: http.StatusOK, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + app := cltest.NewApplicationWithKey(t) + healthChecker := new(mocks.Checker) + healthChecker.On("Start").Return(nil).Once() + healthChecker.On("IsHealthy").Return(tc.ready, nil).Once() + healthChecker.On("Close").Return(nil).Once() + + app.HealthChecker = healthChecker + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get("/health") + t.Cleanup(cleanup) + assert.Equal(t, tc.status, resp.StatusCode) + }) + } +} + +var ( + //go:embed testdata/body/health.json + bodyJSON string + //go:embed testdata/body/health.html + bodyHTML string + //go:embed testdata/body/health.txt + bodyTXT string +) + +func TestHealthController_Health_body(t *testing.T) { + for _, tc := range []struct { + name string + path string + headers map[string]string + expBody string + }{ + {"default", "/health", nil, bodyJSON}, + {"json", "/health", map[string]string{"Accept": gin.MIMEJSON}, bodyJSON}, + {"html", "/health", map[string]string{"Accept": gin.MIMEHTML}, bodyHTML}, + {"text", "/health", map[string]string{"Accept": gin.MIMEPlain}, bodyTXT}, + {".txt", "/health.txt", nil, bodyTXT}, + } { + t.Run(tc.name, func(t *testing.T) { + app := cltest.NewApplicationWithKey(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Get(tc.path, tc.headers) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusMultiStatus, resp.StatusCode) + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + if tc.expBody == bodyJSON { + // pretty print for comparison + var b bytes.Buffer + require.NoError(t, json.Indent(&b, body, "", " ")) + body = b.Bytes() + } + assert.Equal(t, tc.expBody, string(body)) + }) + } +} diff --git a/core/web/health_template_test.go b/core/web/health_template_test.go new file mode 100644 index 00000000..0f446724 --- /dev/null +++ b/core/web/health_template_test.go @@ -0,0 +1,53 @@ +package web + +import ( + "bytes" + _ "embed" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +var ( + //go:embed testdata/health.html + healthHTML string + + //go:embed testdata/health.txt + healthTXT string +) + +func checks() []presenters.Check { + const passing, failing = HealthStatusPassing, HealthStatusFailing + return []presenters.Check{ + {Name: "foo", Status: passing}, + {Name: "foo.bar", Status: failing, Output: "example error message"}, + {Name: "foo.bar.1", Status: passing}, + {Name: "foo.bar.1.A", Status: passing}, + {Name: "foo.bar.1.B", Status: passing}, + {Name: "foo.bar.2", Status: failing, Output: `error: +this is a multi-line error: +new line: +original error`}, + {Name: "foo.bar.2.A", Status: failing, Output: "failure!"}, + {Name: "foo.bar.2.B", Status: passing}, + {Name: "foo.baz", Status: passing}, + } + //TODO truncated error +} + +func Test_checkTree_WriteHTMLTo(t *testing.T) { + ct := newCheckTree(checks()) + var b bytes.Buffer + require.NoError(t, ct.WriteHTMLTo(&b)) + got := b.String() + require.Equalf(t, healthHTML, got, "got: %s", got) +} + +func Test_writeTextTo(t *testing.T) { + var b bytes.Buffer + require.NoError(t, writeTextTo(&b, checks())) + got := b.String() + require.Equalf(t, healthTXT, got, "got: %s", got) +} diff --git a/core/web/helpers.go b/core/web/helpers.go new file mode 100644 index 00000000..3b989a55 --- /dev/null +++ b/core/web/helpers.go @@ -0,0 +1,81 @@ +package web + +import ( + "database/sql" + "fmt" + "net/http" + "testing" + + "github.com/Depado/ginprom" + "github.com/gin-gonic/gin" + "github.com/manyminds/api2go/jsonapi" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// jsonAPIError adds an error to the gin context and sets +// the JSON value of errors. +func jsonAPIError(c *gin.Context, statusCode int, err error) { + _ = c.Error(err).SetType(gin.ErrorTypePublic) + var jsonErr *models.JSONAPIErrors + if errors.As(err, &jsonErr) { + c.JSON(statusCode, jsonErr) + return + } + c.JSON(statusCode, models.NewJSONAPIErrorsWith(err.Error())) +} + +func paginatedResponse( + c *gin.Context, + name string, + size int, + page int, + resource interface{}, + count int, + err error, +) { + if errors.Is(err, sql.ErrNoRows) { + err = nil + } + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error getting paged %s: %+v", name, err)) + } else if buffer, err := NewPaginatedResponse(*c.Request.URL, size, page, count, resource); err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("failed to marshal document: %+v", err)) + } else { + c.Data(http.StatusOK, MediaType, buffer) + } +} + +func paginatedRequest(action func(*gin.Context, int, int, int)) func(*gin.Context) { + return func(c *gin.Context) { + size, page, offset, err := ParsePaginatedRequest(c.Query("size"), c.Query("page")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + action(c, size, page, offset) + } +} + +func jsonAPIResponseWithStatus(c *gin.Context, resource interface{}, name string, status int) { + json, err := jsonapi.Marshal(resource) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("failed to marshal %s using jsonapi: %+v", name, err)) + } else { + c.Data(status, MediaType, json) + } +} + +func jsonAPIResponse(c *gin.Context, resource interface{}, name string) { + jsonAPIResponseWithStatus(c, resource, name, http.StatusOK) +} + +func Router(t testing.TB, app plugin.Application, prometheus *ginprom.Prometheus) *gin.Engine { + r, err := NewRouter(app, prometheus) + require.NoError(t, err) + return r +} diff --git a/core/web/jobs_controller.go b/core/web/jobs_controller.go new file mode 100644 index 00000000..be18cd32 --- /dev/null +++ b/core/web/jobs_controller.go @@ -0,0 +1,264 @@ +package web + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/blockheaderfeeder" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/cron" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/services/streams" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// JobsController manages jobs +type JobsController struct { + App plugin.Application +} + +// Index lists all jobs +// Example: +// "GET /jobs" +func (jc *JobsController) Index(c *gin.Context, size, page, offset int) { + // Temporary: if no size is passed in, use a large page size. Remove once frontend can handle pagination + if c.Query("size") == "" { + size = 1000 + } + + jobs, count, err := jc.App.JobORM().FindJobs(offset, size) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + var resources []presenters.JobResource + for _, individualJob := range jobs { + resources = append(resources, *presenters.NewJobResource(individualJob)) + } + + paginatedResponse(c, "jobs", size, page, resources, count, err) +} + +// Show returns the details of a job +// :ID could be both job ID and external job ID +// Example: +// "GET /jobs/:ID" +func (jc *JobsController) Show(c *gin.Context) { + var err error + jobSpec := job.Job{} + if externalJobID, pErr := uuid.Parse(c.Param("ID")); pErr == nil { + // Find a job by external job ID + jobSpec, err = jc.App.JobORM().FindJobByExternalJobID(externalJobID, pg.WithParentCtx(c.Request.Context())) + } else if pErr = jobSpec.SetID(c.Param("ID")); pErr == nil { + // Find a job by job ID + jobSpec, err = jc.App.JobORM().FindJobTx(c, jobSpec.ID) + } else { + jsonAPIError(c, http.StatusUnprocessableEntity, pErr) + return + } + if err != nil { + if errors.Is(errors.Cause(err), sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("job not found")) + } else { + jsonAPIError(c, http.StatusInternalServerError, err) + } + return + } + + jsonAPIResponse(c, presenters.NewJobResource(jobSpec), "jobs") +} + +// CreateJobRequest represents a request to create and start a job (V2). +type CreateJobRequest struct { + TOML string `json:"toml"` +} + +// Create validates, saves and starts a new job. +// Example: +// "POST /jobs" +func (jc *JobsController) Create(c *gin.Context) { + request := CreateJobRequest{} + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + jb, status, err := jc.validateJobSpec(request.TOML) + if err != nil { + jsonAPIError(c, status, err) + return + } + + ctx, cancel := context.WithTimeout(c.Request.Context(), 5*time.Second) + defer cancel() + err = jc.App.AddJobV2(ctx, &jb) + if err != nil { + if errors.Is(errors.Cause(err), job.ErrNoSuchKeyBundle) || errors.As(err, &keystore.KeyNotFoundError{}) || errors.Is(errors.Cause(err), job.ErrNoSuchTransmitterKey) || errors.Is(errors.Cause(err), job.ErrNoSuchSendingKey) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jbj, err := json.Marshal(jb) + if err == nil { + jc.App.GetAuditLogger().Audit(audit.JobCreated, map[string]interface{}{"job": string(jbj)}) + } else { + jc.App.GetLogger().Errorf("Could not send audit log for JobCreation", "err", err) + } + + jsonAPIResponse(c, presenters.NewJobResource(jb), jb.Type.String()) +} + +// Delete hard deletes a job spec. +// Example: +// "DELETE /specs/:ID" +func (jc *JobsController) Delete(c *gin.Context) { + j := job.Job{} + err := j.SetID(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + // Delete the job + err = jc.App.DeleteJob(c.Request.Context(), j.ID) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("JobSpec not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jc.App.GetAuditLogger().Audit(audit.JobDeleted, map[string]interface{}{"id": j.ID}) + jsonAPIResponseWithStatus(c, nil, "job", http.StatusNoContent) +} + +// UpdateJobRequest represents a request to update a job with new toml and start a job (V2). +type UpdateJobRequest struct { + TOML string `json:"toml"` +} + +// Update validates a new TOML for an existing job, stops and deletes existing job, saves and starts a new job. +// Example: +// "PUT /jobs/:ID" +func (jc *JobsController) Update(c *gin.Context) { + request := UpdateJobRequest{} + if err := c.ShouldBindJSON(&request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + jb, status, err := jc.validateJobSpec(request.TOML) + if err != nil { + jsonAPIError(c, status, err) + return + } + + err = jb.SetID(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + ctx, cancel := context.WithTimeout(c.Request.Context(), 5*time.Second) + defer cancel() + + // If the provided job id is not matching any job, delete will fail with 404 leaving state unchanged. + err = jc.App.DeleteJob(ctx, jb.ID) + // Error can be either come from ORM or from the activeJobs map. + if err != nil { + if errors.Is(err, sql.ErrNoRows) || strings.Contains(err.Error(), "job not found") { + jsonAPIError(c, http.StatusNotFound, errors.Wrap(err, "failed to update job")) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + err = jc.App.AddJobV2(ctx, &jb) + if err != nil { + if errors.Is(errors.Cause(err), job.ErrNoSuchKeyBundle) || errors.As(err, &keystore.KeyNotFoundError{}) || errors.Is(errors.Cause(err), job.ErrNoSuchTransmitterKey) || errors.Is(errors.Cause(err), job.ErrNoSuchSendingKey) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponse(c, presenters.NewJobResource(jb), jb.Type.String()) +} + +func (jc *JobsController) validateJobSpec(tomlString string) (jb job.Job, statusCode int, err error) { + jobType, err := job.ValidateSpec(tomlString) + if err != nil { + return jb, http.StatusUnprocessableEntity, errors.Wrap(err, "failed to parse TOML") + } + + config := jc.App.GetConfig() + switch jobType { + case job.OffchainReporting: + jb, err = ocr.ValidatedOracleSpecToml(jc.App.GetRelayers().LegacyEVMChains(), tomlString) + if !config.OCR().Enabled() { + return jb, http.StatusNotImplemented, errors.New("The Offchain Reporting feature is disabled by configuration") + } + case job.OffchainReporting2: + jb, err = validate.ValidatedOracleSpecToml(config.OCR2(), config.Insecure(), tomlString) + if !config.OCR2().Enabled() { + return jb, http.StatusNotImplemented, errors.New("The Offchain Reporting 2 feature is disabled by configuration") + } + case job.DirectRequest: + jb, err = directrequest.ValidatedDirectRequestSpec(tomlString) + case job.FluxMonitor: + jb, err = fluxmonitorv2.ValidatedFluxMonitorSpec(config.JobPipeline(), tomlString) + case job.Keeper: + jb, err = keeper.ValidatedKeeperSpec(tomlString) + case job.Cron: + jb, err = cron.ValidatedCronSpec(tomlString) + case job.VRF: + jb, err = vrfcommon.ValidatedVRFSpec(tomlString) + case job.Webhook: + jb, err = webhook.ValidatedWebhookSpec(tomlString, jc.App.GetExternalInitiatorManager()) + case job.BlockhashStore: + jb, err = blockhashstore.ValidatedSpec(tomlString) + case job.BlockHeaderFeeder: + jb, err = blockheaderfeeder.ValidatedSpec(tomlString) + case job.Bootstrap: + jb, err = ocrbootstrap.ValidatedBootstrapSpecToml(tomlString) + case job.Gateway: + jb, err = gateway.ValidatedGatewaySpec(tomlString) + case job.Stream: + jb, err = streams.ValidatedStreamSpec(tomlString) + default: + return jb, http.StatusUnprocessableEntity, errors.Errorf("unknown job type: %s", jobType) + } + + if err != nil { + return jb, http.StatusBadRequest, err + } + return jb, 0, nil +} diff --git a/core/web/jobs_controller_test.go b/core/web/jobs_controller_test.go new file mode 100644 index 00000000..15004204 --- /dev/null +++ b/core/web/jobs_controller_test.go @@ -0,0 +1,768 @@ +package web_test + +import ( + "bytes" + _ "embed" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/url" + "strconv" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/pelletier/go-toml" + ragep2ptypes "github.com/goplugin/libocr/ragep2p/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/jmoiron/sqlx" + + "github.com/goplugin/plugin-common/pkg/utils" + evmclimocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils/tomlutils" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestJobsController_Create_ValidationFailure_OffchainReportingSpec(t *testing.T) { + var ( + contractAddress = cltest.NewEIP55Address() + ) + + var peerID ragep2ptypes.PeerID + require.NoError(t, peerID.UnmarshalText([]byte(configtest.DefaultPeerID))) + randomBytes := testutils.Random32Byte() + + var tt = []struct { + name string + pid p2pkey.PeerID + kb string + taExists bool + expectedErr error + }{ + { + name: "invalid keybundle", + pid: p2pkey.PeerID(peerID), + kb: hex.EncodeToString(randomBytes[:]), + taExists: true, + expectedErr: job.ErrNoSuchKeyBundle, + }, + { + name: "invalid transmitter address", + pid: p2pkey.PeerID(peerID), + kb: cltest.DefaultOCRKeyBundleID, + taExists: false, + expectedErr: job.ErrNoSuchTransmitterKey, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ta, client := setupJobsControllerTests(t) + + var address ethkey.EIP55Address + if tc.taExists { + key, _ := cltest.MustInsertRandomKey(t, ta.KeyStore.Eth()) + address = key.EIP55Address + } else { + address = cltest.NewEIP55Address() + } + + require.NoError(t, ta.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + + sp := cltest.MinimalOCRNonBootstrapSpec(contractAddress, address, tc.pid, tc.kb) + body, _ := json.Marshal(web.CreateJobRequest{ + TOML: sp, + }) + resp, cleanup := client.Post("/v2/jobs", bytes.NewReader(body)) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.Contains(t, string(b), tc.expectedErr.Error()) + }) + } +} + +func TestJobController_Create_DirectRequest_Fast(t *testing.T) { + app, client := setupJobsControllerTests(t) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + + n := 10 + + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + body, err := json.Marshal(web.CreateJobRequest{ + TOML: fmt.Sprintf(testspecs.DirectRequestSpecNoExternalJobID, i), + }) + require.NoError(t, err) + + t.Logf("POSTing %d", i) + r, cleanup := client.Post("/v2/jobs", bytes.NewReader(body)) + defer cleanup() + require.Equal(t, http.StatusOK, r.StatusCode) + }(i) + } + wg.Wait() + cltest.AssertCount(t, app.GetSqlxDB(), "direct_request_specs", int64(n)) +} + +func mustInt32FromString(t *testing.T, s string) int32 { + i, err := strconv.ParseInt(s, 10, 32) + require.NoError(t, err) + return int32(i) +} + +func TestJobController_Create_HappyPath(t *testing.T) { + app, client := setupJobsControllerTests(t) + b1, b2 := setupBridges(t, app.GetSqlxDB(), app.GetConfig().Database()) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + var pks []vrfkey.KeyV2 + var k []p2pkey.KeyV2 + { + var err error + pks, err = app.KeyStore.VRF().GetAll() + require.NoError(t, err) + require.Len(t, pks, 1) + k, err = app.KeyStore.P2P().GetAll() + require.NoError(t, err) + require.Len(t, k, 1) + } + + jorm := app.JobORM() + var tt = []struct { + name string + tomlTemplate func(nameAndExternalJobID string) string + assertion func(t *testing.T, nameAndExternalJobID string, r *http.Response) + }{ + { + name: "offchain reporting", + tomlTemplate: func(nameAndExternalJobID string) string { + return testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + TransmitterAddress: app.Keys[0].Address.Hex(), + DS1BridgeName: b1, + DS2BridgeName: b2, + Name: nameAndExternalJobID, + }).Toml() + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, resource.OffChainReportingSpec) + + assert.Equal(t, nameAndExternalJobID, jb.Name.ValueOrZero()) + assert.Equal(t, jb.OCROracleSpec.P2PV2Bootstrappers, resource.OffChainReportingSpec.P2PV2Bootstrappers) + assert.Equal(t, jb.OCROracleSpec.IsBootstrapPeer, resource.OffChainReportingSpec.IsBootstrapPeer) + assert.Equal(t, jb.OCROracleSpec.EncryptedOCRKeyBundleID, resource.OffChainReportingSpec.EncryptedOCRKeyBundleID) + assert.Equal(t, jb.OCROracleSpec.TransmitterAddress, resource.OffChainReportingSpec.TransmitterAddress) + assert.Equal(t, jb.OCROracleSpec.ObservationTimeout, resource.OffChainReportingSpec.ObservationTimeout) + assert.Equal(t, jb.OCROracleSpec.BlockchainTimeout, resource.OffChainReportingSpec.BlockchainTimeout) + assert.Equal(t, jb.OCROracleSpec.ContractConfigTrackerSubscribeInterval, resource.OffChainReportingSpec.ContractConfigTrackerSubscribeInterval) + assert.Equal(t, jb.OCROracleSpec.ContractConfigTrackerSubscribeInterval, resource.OffChainReportingSpec.ContractConfigTrackerSubscribeInterval) + assert.Equal(t, jb.OCROracleSpec.ContractConfigConfirmations, resource.OffChainReportingSpec.ContractConfigConfirmations) + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + // Sanity check to make sure it inserted correctly + require.Equal(t, ethkey.EIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C"), jb.OCROracleSpec.ContractAddress) + }, + }, + { + name: "keeper", + tomlTemplate: func(nameAndExternalJobID string) string { + return fmt.Sprintf(` + type = "keeper" + schemaVersion = 1 + name = "%s" + contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" + fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" + evmChainID = 0 + minIncomingConfigurations = 1 + externalJobID = "%s" + `, nameAndExternalJobID, nameAndExternalJobID) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusInternalServerError, r.StatusCode) + + errs := cltest.ParseJSONAPIErrors(t, r.Body) + require.NotNil(t, errs) + require.Len(t, errs.Errors, 1) + // services failed to start + require.Contains(t, errs.Errors[0].Detail, "no contract code at given address") + // but the job should still exist + jb, err := jorm.FindJobByExternalJobID(uuid.MustParse(nameAndExternalJobID)) + require.NoError(t, err) + require.NotNil(t, jb.KeeperSpec) + + require.Equal(t, ethkey.EIP55Address("0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba"), jb.KeeperSpec.ContractAddress) + require.Equal(t, ethkey.EIP55Address("0xa8037A20989AFcBC51798de9762b351D63ff462e"), jb.KeeperSpec.FromAddress) + assert.Equal(t, nameAndExternalJobID, jb.Name.ValueOrZero()) + + // Sanity check to make sure it inserted correctly + require.Equal(t, ethkey.EIP55Address("0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba"), jb.KeeperSpec.ContractAddress) + require.Equal(t, ethkey.EIP55Address("0xa8037A20989AFcBC51798de9762b351D63ff462e"), jb.KeeperSpec.FromAddress) + }, + }, + { + name: "cron", + tomlTemplate: func(nameAndExternalJobID string) string { + return fmt.Sprintf(testspecs.CronSpecTemplate, nameAndExternalJobID) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.CronSpec) + + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + require.Equal(t, "CRON_TZ=UTC * 0 0 1 1 *", jb.CronSpec.CronSchedule) + }, + }, + { + name: "cron-dot-separator", + tomlTemplate: func(nameAndExternalJobID string) string { + return fmt.Sprintf(testspecs.CronSpecDotSepTemplate, nameAndExternalJobID) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.CronSpec) + + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + require.Equal(t, "CRON_TZ=UTC * 0 0 1 1 *", jb.CronSpec.CronSchedule) + }, + }, + { + name: "directrequest", + tomlTemplate: func(nameAndExternalJobID string) string { + return testspecs.GetDirectRequestSpecWithUUID(uuid.MustParse(nameAndExternalJobID)) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.DirectRequestSpec) + + assert.Equal(t, nameAndExternalJobID, jb.Name.ValueOrZero()) + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + // Sanity check to make sure it inserted correctly + require.Equal(t, ethkey.EIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C"), jb.DirectRequestSpec.ContractAddress) + require.Equal(t, jb.ExternalJobID.String(), nameAndExternalJobID) + }, + }, + { + name: "directrequest-with-requesters-and-min-contract-payment", + tomlTemplate: func(nameAndExternalJobID string) string { + return fmt.Sprintf(testspecs.DirectRequestSpecWithRequestersAndMinContractPaymentTemplate, nameAndExternalJobID, nameAndExternalJobID) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, r), &resource) + assert.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.DirectRequestSpec) + + assert.Equal(t, nameAndExternalJobID, jb.Name.ValueOrZero()) + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + assert.NotNil(t, resource.DirectRequestSpec.Requesters) + assert.Equal(t, "1000000000000000000000", resource.DirectRequestSpec.MinContractPayment.String()) + // Check requesters got saved properly + require.EqualValues(t, []common.Address{common.HexToAddress("0xAaAA1F8ee20f5565510b84f9353F1E333e753B7a"), common.HexToAddress("0xBbBb70f0E81c6F3430dfDc9fa02fB22bDD818c4E")}, jb.DirectRequestSpec.Requesters) + require.Equal(t, "1000000000000000000000", jb.DirectRequestSpec.MinContractPayment.String()) + require.Equal(t, jb.ExternalJobID.String(), nameAndExternalJobID) + }, + }, + { + name: "fluxmonitor", + tomlTemplate: func(nameAndExternalJobID string) string { + return fmt.Sprintf(testspecs.FluxMonitorSpecTemplate, nameAndExternalJobID, nameAndExternalJobID) + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + + require.Equal(t, http.StatusInternalServerError, r.StatusCode) + + errs := cltest.ParseJSONAPIErrors(t, r.Body) + require.NotNil(t, errs) + require.Len(t, errs.Errors, 1) + // services failed to start + require.Contains(t, errs.Errors[0].Detail, "no contract code at given address") + // but the job should still exist + jb, err := jorm.FindJobByExternalJobID(uuid.MustParse(nameAndExternalJobID)) + require.NoError(t, err) + require.NotNil(t, jb.FluxMonitorSpec) + + assert.Equal(t, nameAndExternalJobID, jb.Name.ValueOrZero()) + assert.NotNil(t, jb.PipelineSpec.DotDagSource) + assert.Equal(t, ethkey.EIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"), jb.FluxMonitorSpec.ContractAddress) + assert.Equal(t, time.Second, jb.FluxMonitorSpec.IdleTimerPeriod) + assert.Equal(t, false, jb.FluxMonitorSpec.IdleTimerDisabled) + assert.Equal(t, tomlutils.Float32(0.5), jb.FluxMonitorSpec.Threshold) + assert.Equal(t, tomlutils.Float32(0), jb.FluxMonitorSpec.AbsoluteThreshold) + }, + }, + { + name: "vrf", + tomlTemplate: func(_ string) string { + return testspecs.GenerateVRFSpec(testspecs.VRFSpecParams{PublicKey: pks[0].PublicKey.String()}).Toml() + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resp := cltest.ParseResponseBody(t, r) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(resp, &resource) + require.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.VRFSpec) + + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + assert.Equal(t, uint32(6), resource.VRFSpec.MinIncomingConfirmations) + assert.Equal(t, jb.VRFSpec.MinIncomingConfirmations, resource.VRFSpec.MinIncomingConfirmations) + assert.Equal(t, "0xABA5eDc1a551E55b1A570c0e1f1055e5BE11eca7", resource.VRFSpec.CoordinatorAddress.Hex()) + assert.Equal(t, jb.VRFSpec.CoordinatorAddress.Hex(), resource.VRFSpec.CoordinatorAddress.Hex()) + }, + }, + { + name: "stream", + tomlTemplate: func(_ string) string { + return testspecs.GenerateStreamSpec(testspecs.StreamSpecParams{Name: "ETH/USD", StreamID: 32}).Toml() + }, + assertion: func(t *testing.T, nameAndExternalJobID string, r *http.Response) { + require.Equal(t, http.StatusOK, r.StatusCode) + resp := cltest.ParseResponseBody(t, r) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(resp, &resource) + require.NoError(t, err) + + jb, err := jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) + require.NotNil(t, jb.PipelineSpec) + + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + assert.Equal(t, jb.Name.ValueOrZero(), resource.Name) + assert.Equal(t, jb.StreamID, resource.StreamID) + }, + }, + } + for _, tc := range tt { + c := tc + t.Run(c.name, func(t *testing.T) { + nameAndExternalJobID := uuid.New().String() + toml := c.tomlTemplate(nameAndExternalJobID) + body, err := json.Marshal(web.CreateJobRequest{ + TOML: toml, + }) + require.NoError(t, err) + response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body)) + defer cleanup() + c.assertion(t, nameAndExternalJobID, response) + }) + } +} + +func TestJobsController_Create_WebhookSpec(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + t.Cleanup(func() { assert.NoError(t, app.Stop()) }) + + _, fetchBridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + _, submitBridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + + client := app.NewHTTPClient(nil) + + tomlStr := testspecs.GetWebhookSpecNoBody(uuid.New(), fetchBridge.Name.String(), submitBridge.Name.String()) + body, _ := json.Marshal(web.CreateJobRequest{ + TOML: tomlStr, + }) + response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body)) + defer cleanup() + require.Equal(t, http.StatusOK, response.StatusCode) + resource := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + require.NoError(t, err) + assert.NotNil(t, resource.PipelineSpec.DotDAGSource) + + jorm := app.JobORM() + _, err = jorm.FindJob(testutils.Context(t), mustInt32FromString(t, resource.ID)) + require.NoError(t, err) +} + +//go:embed webhook-spec-template.yml +var webhookSpecTemplate string + +func TestJobsController_FailToCreate_EmptyJsonAttribute(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + nameAndExternalJobID := uuid.New() + spec := fmt.Sprintf(webhookSpecTemplate, nameAndExternalJobID, nameAndExternalJobID) + body, err := json.Marshal(web.CreateJobRequest{ + TOML: spec, + }) + require.NoError(t, err) + response, cleanup := client.Post("/v2/jobs", bytes.NewReader(body)) + defer cleanup() + + b, err := io.ReadAll(response.Body) + require.NoError(t, err) + require.Contains(t, string(b), "syntax is not supported. Please use \\\"{}\\\" instead") +} + +func TestJobsController_Index_HappyPath(t *testing.T) { + _, client, ocrJobSpecFromFile, _, ereJobSpecFromFile, _ := setupJobSpecsControllerTestsWithJobs(t) + + url := url.URL{Path: "/v2/jobs"} + query := url.Query() + query.Set("evmChainID", cltest.FixtureChainID.String()) + url.RawQuery = query.Encode() + + response, cleanup := client.Get(url.String()) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + var resources []presenters.JobResource + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, 2) + + runDirectRequestJobSpecAssertions(t, ereJobSpecFromFile, resources[0]) + runOCRJobSpecAssertions(t, ocrJobSpecFromFile, resources[1]) +} + +func TestJobsController_Show_HappyPath(t *testing.T) { + _, client, ocrJobSpecFromFile, jobID, ereJobSpecFromFile, jobID2 := setupJobSpecsControllerTestsWithJobs(t) + + response, cleanup := client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID)) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + ocrJob := presenters.JobResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ocrJob) + assert.NoError(t, err) + + runOCRJobSpecAssertions(t, ocrJobSpecFromFile, ocrJob) + + response, cleanup = client.Get("/v2/jobs/" + ocrJobSpecFromFile.ExternalJobID.String()) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + ocrJob = presenters.JobResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ocrJob) + assert.NoError(t, err) + + runOCRJobSpecAssertions(t, ocrJobSpecFromFile, ocrJob) + + response, cleanup = client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID2)) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + ereJob := presenters.JobResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ereJob) + assert.NoError(t, err) + + runDirectRequestJobSpecAssertions(t, ereJobSpecFromFile, ereJob) + + response, cleanup = client.Get("/v2/jobs/" + ereJobSpecFromFile.ExternalJobID.String()) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + ereJob = presenters.JobResource{} + err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &ereJob) + assert.NoError(t, err) + + runDirectRequestJobSpecAssertions(t, ereJobSpecFromFile, ereJob) +} + +func TestJobsController_Show_InvalidID(t *testing.T) { + _, client, _, _, _, _ := setupJobSpecsControllerTestsWithJobs(t) + + response, cleanup := client.Get("/v2/jobs/uuidLikeString") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusUnprocessableEntity) +} + +func TestJobsController_Show_NonExistentID(t *testing.T) { + _, client, _, _, _, _ := setupJobSpecsControllerTestsWithJobs(t) + + response, cleanup := client.Get("/v2/jobs/999999999") + t.Cleanup(cleanup) + + cltest.AssertServerResponse(t, response, http.StatusNotFound) +} + +func TestJobsController_Update_HappyPath(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + }) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey) + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.Start(testutils.Context(t))) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + _, bridge2 := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + + client := app.NewHTTPClient(nil) + + var jb job.Job + ocrspec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + Name: "old OCR job", + }) + err := toml.Unmarshal([]byte(ocrspec.Toml()), &jb) + require.NoError(t, err) + + // BCF-2095 + // disable fkey checks until the end of the test transaction + require.NoError(t, utils.JustError( + app.GetSqlxDB().Exec(`SET CONSTRAINTS job_spec_errors_v2_job_id_fkey DEFERRED`))) + + var ocrSpec job.OCROracleSpec + err = toml.Unmarshal([]byte(ocrspec.Toml()), &ocrSpec) + require.NoError(t, err) + jb.OCROracleSpec = &ocrSpec + jb.OCROracleSpec.TransmitterAddress = &app.Keys[0].EIP55Address + err = app.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + dbJb, err := app.JobORM().FindJob(testutils.Context(t), jb.ID) + require.NoError(t, err) + require.Equal(t, dbJb.Name.String, ocrspec.Name) + + // test Calling update on the job id with changed values should succeed. + updatedSpec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + DS1BridgeName: bridge2.Name.String(), + DS2BridgeName: bridge.Name.String(), + Name: "updated OCR job", + TransmitterAddress: app.Keys[0].Address.Hex(), + }) + require.NoError(t, err) + body, _ := json.Marshal(web.UpdateJobRequest{ + TOML: updatedSpec.Toml(), + }) + response, cleanup := client.Put("/v2/jobs/"+fmt.Sprintf("%v", jb.ID), bytes.NewReader(body)) + t.Cleanup(cleanup) + + dbJb, err = app.JobORM().FindJob(testutils.Context(t), jb.ID) + require.NoError(t, err) + require.Equal(t, dbJb.Name.String, updatedSpec.Name) + + cltest.AssertServerResponse(t, response, http.StatusOK) +} + +func TestJobsController_Update_NonExistentID(t *testing.T) { + ctx := testutils.Context(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + }) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey) + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.Start(ctx)) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + _, bridge2 := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + + client := app.NewHTTPClient(nil) + + var jb job.Job + ocrspec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + DS1BridgeName: bridge.Name.String(), + DS2BridgeName: bridge2.Name.String(), + Name: "old OCR job", + }) + err := toml.Unmarshal([]byte(ocrspec.Toml()), &jb) + require.NoError(t, err) + var ocrSpec job.OCROracleSpec + err = toml.Unmarshal([]byte(ocrspec.Toml()), &ocrSpec) + require.NoError(t, err) + jb.OCROracleSpec = &ocrSpec + jb.OCROracleSpec.TransmitterAddress = &app.Keys[0].EIP55Address + err = app.AddJobV2(ctx, &jb) + require.NoError(t, err) + + // test Calling update on the job id with changed values should succeed. + updatedSpec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{ + DS1BridgeName: bridge2.Name.String(), + DS2BridgeName: bridge.Name.String(), + Name: "updated OCR job", + TransmitterAddress: app.Keys[0].EIP55Address.String(), + }) + require.NoError(t, err) + body, _ := json.Marshal(web.UpdateJobRequest{ + TOML: updatedSpec.Toml(), + }) + response, cleanup := client.Put("/v2/jobs/99999", bytes.NewReader(body)) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusNotFound) +} + +func runOCRJobSpecAssertions(t *testing.T, ocrJobSpecFromFileDB job.Job, ocrJobSpecFromServer presenters.JobResource) { + ocrJobSpecFromFile := ocrJobSpecFromFileDB.OCROracleSpec + assert.Equal(t, ocrJobSpecFromFile.ContractAddress, ocrJobSpecFromServer.OffChainReportingSpec.ContractAddress) + assert.Equal(t, ocrJobSpecFromFile.P2PV2Bootstrappers, ocrJobSpecFromServer.OffChainReportingSpec.P2PV2Bootstrappers) + assert.Equal(t, ocrJobSpecFromFile.IsBootstrapPeer, ocrJobSpecFromServer.OffChainReportingSpec.IsBootstrapPeer) + assert.Equal(t, ocrJobSpecFromFile.EncryptedOCRKeyBundleID, ocrJobSpecFromServer.OffChainReportingSpec.EncryptedOCRKeyBundleID) + assert.Equal(t, ocrJobSpecFromFile.TransmitterAddress, ocrJobSpecFromServer.OffChainReportingSpec.TransmitterAddress) + assert.Equal(t, ocrJobSpecFromFile.ObservationTimeout, ocrJobSpecFromServer.OffChainReportingSpec.ObservationTimeout) + assert.Equal(t, ocrJobSpecFromFile.BlockchainTimeout, ocrJobSpecFromServer.OffChainReportingSpec.BlockchainTimeout) + assert.Equal(t, ocrJobSpecFromFile.ContractConfigTrackerSubscribeInterval, ocrJobSpecFromServer.OffChainReportingSpec.ContractConfigTrackerSubscribeInterval) + assert.Equal(t, ocrJobSpecFromFile.ContractConfigTrackerSubscribeInterval, ocrJobSpecFromServer.OffChainReportingSpec.ContractConfigTrackerSubscribeInterval) + assert.Equal(t, ocrJobSpecFromFile.ContractConfigConfirmations, ocrJobSpecFromServer.OffChainReportingSpec.ContractConfigConfirmations) + assert.Equal(t, ocrJobSpecFromFileDB.Pipeline.Source, ocrJobSpecFromServer.PipelineSpec.DotDAGSource) + + // Check that create and update dates are non empty values. + // Empty date value is "0001-01-01 00:00:00 +0000 UTC" so we are checking for the + // millennia and century characters to be present + assert.Contains(t, ocrJobSpecFromServer.OffChainReportingSpec.CreatedAt.String(), "20") + assert.Contains(t, ocrJobSpecFromServer.OffChainReportingSpec.UpdatedAt.String(), "20") +} + +func runDirectRequestJobSpecAssertions(t *testing.T, ereJobSpecFromFile job.Job, ereJobSpecFromServer presenters.JobResource) { + assert.Equal(t, ereJobSpecFromFile.DirectRequestSpec.ContractAddress, ereJobSpecFromServer.DirectRequestSpec.ContractAddress) + assert.Equal(t, ereJobSpecFromFile.Pipeline.Source, ereJobSpecFromServer.PipelineSpec.DotDAGSource) + // Check that create and update dates are non empty values. + // Empty date value is "0001-01-01 00:00:00 +0000 UTC" so we are checking for the + // millennia and century characters to be present + assert.Contains(t, ereJobSpecFromServer.DirectRequestSpec.CreatedAt.String(), "20") + assert.Contains(t, ereJobSpecFromServer.DirectRequestSpec.UpdatedAt.String(), "20") +} + +func setupBridges(t *testing.T, db *sqlx.DB, cfg pg.QConfig) (b1, b2 string) { + _, bridge := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, cfg) + _, bridge2 := cltest.MustCreateBridge(t, db, cltest.BridgeOpts{}, cfg) + return bridge.Name.String(), bridge2.Name.String() +} + +func setupJobsControllerTests(t *testing.T) (ta *cltest.TestApplication, cc cltest.HTTPClientCleaner) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + }) + ec := setupEthClientForControllerTests(t) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey, ec) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + vrfKeyStore := app.GetKeyStore().VRF() + _, err := vrfKeyStore.Create() + require.NoError(t, err) + return app, client +} + +func setupEthClientForControllerTests(t *testing.T) *evmclimocks.Client { + ec := cltest.NewEthMocksWithStartupAssertions(t) + ec.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe() + ec.On("LatestBlockHeight", mock.Anything).Return(big.NewInt(100), nil).Maybe() + ec.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Once().Return(big.NewInt(0), nil).Maybe() + return ec +} + +func setupJobSpecsControllerTestsWithJobs(t *testing.T) (*cltest.TestApplication, cltest.HTTPClientCleaner, job.Job, int32, job.Job, int32) { + ctx := testutils.Context(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + }) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey) + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.Start(ctx)) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + _, bridge2 := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig().Database()) + + client := app.NewHTTPClient(nil) + + var jb job.Job + ocrspec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{DS1BridgeName: bridge.Name.String(), DS2BridgeName: bridge2.Name.String(), EVMChainID: testutils.FixtureChainID.String()}) + err := toml.Unmarshal([]byte(ocrspec.Toml()), &jb) + require.NoError(t, err) + var ocrSpec job.OCROracleSpec + err = toml.Unmarshal([]byte(ocrspec.Toml()), &ocrSpec) + require.NoError(t, err) + jb.OCROracleSpec = &ocrSpec + jb.OCROracleSpec.TransmitterAddress = &app.Keys[0].EIP55Address + err = app.AddJobV2(ctx, &jb) + require.NoError(t, err) + + drSpec := fmt.Sprintf(` + type = "directrequest" + schemaVersion = 1 + evmChainID = "0" + name = "example eth request event spec" + contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" + externalJobID = "%s" + observationSource = """ + ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; + ds1_merge [type=merge left="{}"] + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=100]; + ds1 -> ds1_parse -> ds1_multiply; + """ + `, uuid.New()) + + erejb, err := directrequest.ValidatedDirectRequestSpec(drSpec) + require.NoError(t, err) + err = app.AddJobV2(ctx, &erejb) + require.NoError(t, err) + + return app, client, jb, jb.ID, erejb, erejb.ID +} diff --git a/core/web/keys_controller.go b/core/web/keys_controller.go new file mode 100644 index 00000000..4d70a6eb --- /dev/null +++ b/core/web/keys_controller.go @@ -0,0 +1,151 @@ +package web + +import ( + "fmt" + "io" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" +) + +type Keystore[K keystore.Key] interface { + Get(id string) (K, error) + GetAll() ([]K, error) + Create() (K, error) + Delete(id string) (K, error) + Import(keyJSON []byte, password string) (K, error) + Export(id string, password string) ([]byte, error) +} + +type KeysController interface { + // Index lists keys + Index(*gin.Context) + // Create and return a key + Create(*gin.Context) + // Delete a key + Delete(*gin.Context) + // Import imports a key + Import(*gin.Context) + // Export exports a key + Export(*gin.Context) +} + +type keysController[K keystore.Key, R jsonapi.EntityNamer] struct { + ks Keystore[K] + lggr logger.SugaredLogger + auditLogger audit.AuditLogger + typ string + resourceName string + newResource func(K) *R + newResources func([]K) []R +} + +func NewKeysController[K keystore.Key, R jsonapi.EntityNamer](ks Keystore[K], lggr logger.Logger, auditLogger audit.AuditLogger, resourceName string, + newResource func(K) *R, newResources func([]K) []R) KeysController { + var k K + typ, err := keystore.GetFieldNameForKey(k) + if err != nil { + panic(fmt.Errorf("unable to create keys controller: %v", err)) + } + return &keysController[K, R]{ + ks: ks, + lggr: logger.Sugared(lggr), + auditLogger: auditLogger, + typ: typ, + resourceName: resourceName, + newResource: newResource, + newResources: newResources, + } +} + +func (kc *keysController[K, R]) Index(c *gin.Context) { + keys, err := kc.ks.GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, kc.newResources(keys), kc.resourceName) +} + +func (kc *keysController[K, R]) Create(c *gin.Context) { + key, err := kc.ks.Create() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + kc.auditLogger.Audit(audit.KeyCreated, map[string]interface{}{ + "type": kc.typ, + "id": key.ID(), + }) + + jsonAPIResponse(c, kc.newResource(key), kc.resourceName) +} + +func (kc *keysController[K, R]) Delete(c *gin.Context) { + keyID := c.Param("keyID") + key, err := kc.ks.Get(keyID) + if err != nil { + jsonAPIError(c, http.StatusNotFound, err) + return + } + _, err = kc.ks.Delete(key.ID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + kc.auditLogger.Audit(audit.KeyDeleted, map[string]interface{}{ + "type": kc.typ, + "id": key.ID(), + }) + + jsonAPIResponse(c, kc.newResource(key), kc.resourceName) +} + +func (kc *keysController[K, R]) Import(c *gin.Context) { + defer kc.lggr.ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + key, err := kc.ks.Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + kc.auditLogger.Audit(audit.KeyImported, map[string]interface{}{ + "type": kc.typ, + "id": key.ID(), + }) + + jsonAPIResponse(c, kc.newResource(key), kc.resourceName) +} + +func (kc *keysController[K, R]) Export(c *gin.Context) { + defer kc.lggr.ErrorIfFn(c.Request.Body.Close, "Error closing Export request body") + + keyID := c.Param("ID") + newPassword := c.Query("newpassword") + bytes, err := kc.ks.Export(keyID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + kc.auditLogger.Audit(audit.KeyExported, map[string]interface{}{ + "type": kc.typ, + "id": keyID, + }) + + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/loader/chain.go b/core/web/loader/chain.go new file mode 100644 index 00000000..166025e8 --- /dev/null +++ b/core/web/loader/chain.go @@ -0,0 +1,59 @@ +package loader + +import ( + "context" + "slices" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +type chainBatcher struct { + app plugin.Application +} + +func (b *chainBatcher) loadByIDs(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var chainIDs []relay.ChainID + for ix, key := range keys { + chainIDs = append(chainIDs, key.String()) + keyOrder[key.String()] = ix + } + + var cs []types.ChainStatus + relayers := b.app.GetRelayers().Slice() + + for _, r := range relayers { + s, err := r.GetChainStatus(ctx) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + if slices.Contains(chainIDs, s.ID) { + cs = append(cs, s) + } + } + + results := make([]*dataloader.Result, len(keys)) + for _, c := range cs { + ix, ok := keyOrder[c.ID] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: c, Error: nil} + delete(keyOrder, c.ID) + } + } + + // fill array positions without any nodes + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: nil, Error: chains.ErrNotFound} + } + + return results +} diff --git a/core/web/loader/eth_transaction_attempt.go b/core/web/loader/eth_transaction_attempt.go new file mode 100644 index 00000000..e5538380 --- /dev/null +++ b/core/web/loader/eth_transaction_attempt.go @@ -0,0 +1,61 @@ +package loader + +import ( + "context" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type ethTransactionAttemptBatcher struct { + app plugin.Application +} + +func (b *ethTransactionAttemptBatcher) loadByEthTransactionIDs(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var ethTxsIDs []int64 + for ix, key := range keys { + id, err := stringutils.ToInt64(key.String()) + if err == nil { + ethTxsIDs = append(ethTxsIDs, id) + } + + keyOrder[key.String()] = ix + } + + attempts, err := b.app.TxmStorageService().FindTxAttemptConfirmedByTxIDs(ethTxsIDs) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Generate a map of attempts to txIDs + attemptsForTx := map[string][]txmgr.TxAttempt{} + for _, a := range attempts { + id := stringutils.FromInt64(a.TxID) + + attemptsForTx[id] = append(attemptsForTx[id], a) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, ns := range attemptsForTx { + ix, ok := keyOrder[k] + // if found, remove from index lookup map so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: ns, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any attempts as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []txmgr.TxAttempt{}, Error: nil} + } + + return results +} diff --git a/core/web/loader/feeds_manager.go b/core/web/loader/feeds_manager.go new file mode 100644 index 00000000..0bfda33d --- /dev/null +++ b/core/web/loader/feeds_manager.go @@ -0,0 +1,55 @@ +package loader + +import ( + "context" + "errors" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type feedsBatcher struct { + app plugin.Application +} + +func (b *feedsBatcher) loadByIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var managersIDs []int64 + for ix, key := range keys { + id, err := stringutils.ToInt64(key.String()) + if err == nil { + managersIDs = append(managersIDs, id) + } + keyOrder[key.String()] = ix + } + + // Fetch the feeds managers + managers, err := b.app.GetFeedsService().ListManagersByIDs(managersIDs) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for _, c := range managers { + id := stringutils.FromInt64(c.ID) + + ix, ok := keyOrder[id] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: c, Error: nil} + delete(keyOrder, id) + } + } + + // fill array positions without any feeds managers + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: nil, Error: errors.New("feeds manager not found")} + } + + return results +} diff --git a/core/web/loader/feeds_manager_chain_config.go b/core/web/loader/feeds_manager_chain_config.go new file mode 100644 index 00000000..007bb7e6 --- /dev/null +++ b/core/web/loader/feeds_manager_chain_config.go @@ -0,0 +1,49 @@ +package loader + +import ( + "context" + "strconv" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +type feedsManagerChainConfigBatcher struct { + app plugin.Application +} + +func (b *feedsManagerChainConfigBatcher) loadByManagerIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + ids, keyOrder := keyOrderInt64(keys) + + cfgs, err := b.app.GetFeedsService().ListChainConfigsByManagerIDs(ids) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Generate a map of specs to job proposal IDs + cfgsForManager := map[string][]feeds.ChainConfig{} + for _, cfg := range cfgs { + mgrID := strconv.Itoa(int(cfg.FeedsManagerID)) + cfgsForManager[mgrID] = append(cfgsForManager[mgrID], cfg) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, ns := range cfgsForManager { + ix, ok := keyOrder[k] + // if found, remove from index lookup map so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: ns, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any job proposals as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []feeds.ChainConfig{}, Error: nil} + } + + return results +} diff --git a/core/web/loader/getters.go b/core/web/loader/getters.go new file mode 100644 index 00000000..bdda6dad --- /dev/null +++ b/core/web/loader/getters.go @@ -0,0 +1,256 @@ +package loader + +import ( + "context" + + "github.com/graph-gophers/dataloader" + "github.com/pkg/errors" + "go.uber.org/multierr" + + commontypes "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +// ErrInvalidType indicates that results loaded is not the type expected +var ErrInvalidType = errors.New("invalid type") + +// GetChainByID fetches the chain by it's id. +func GetChainByID(ctx context.Context, id string) (*commontypes.ChainStatus, error) { + ldr := For(ctx) + + thunk := ldr.ChainsByIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + chain, ok := result.(commontypes.ChainStatus) + if !ok { + return nil, ErrInvalidType + } + + return &chain, nil +} + +// GetNodesByChainID fetches the nodes for a chain. +func GetNodesByChainID(ctx context.Context, id string) ([]types.Node, error) { + ldr := For(ctx) + + thunk := ldr.NodesByChainIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + nodes, ok := result.([]types.Node) + if !ok { + return nil, ErrInvalidType + } + + return nodes, nil +} + +// GetFeedsManagerByID fetches the feed manager by ID. +func GetFeedsManagerByID(ctx context.Context, id string) (*feeds.FeedsManager, error) { + ldr := For(ctx) + + thunk := ldr.FeedsManagersByIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + mgr, ok := result.(feeds.FeedsManager) + if !ok { + return nil, ErrInvalidType + } + + return &mgr, nil +} + +// GetJobRunsByIDs fetches the job runs by their ID. +func GetJobRunsByIDs(ctx context.Context, ids []int64) ([]pipeline.Run, error) { + ldr := For(ctx) + + strIDs := make([]string, len(ids)) + for i, id := range ids { + strIDs[i] = stringutils.FromInt64(id) + } + + thunk := ldr.JobRunsByIDLoader.LoadMany(ctx, dataloader.NewKeysFromStrings(strIDs)) + results, errs := thunk() + if errs != nil { + merr := multierr.Combine(errs...) + + return nil, errors.Wrap(merr, "errors fetching runs") + } + + runs := []pipeline.Run{} + for _, result := range results { + if run, ok := result.(pipeline.Run); ok { + runs = append(runs, run) + } + } + + return runs, nil +} + +// GetSpecsByJobProposalID fetches the spec for a job proposal id. +func GetSpecsByJobProposalID(ctx context.Context, jpID string) ([]feeds.JobProposalSpec, error) { + ldr := For(ctx) + + thunk := ldr.JobProposalSpecsByJobProposalID.Load(ctx, dataloader.StringKey(jpID)) + result, err := thunk() + if err != nil { + return nil, err + } + + specs, ok := result.([]feeds.JobProposalSpec) + if !ok { + return nil, ErrInvalidType + } + + return specs, nil +} + +// GetLatestSpecByJobProposalID fetches the latest spec for a job proposal id. +func GetLatestSpecByJobProposalID(ctx context.Context, jpID string) (*feeds.JobProposalSpec, error) { + ldr := For(ctx) + + thunk := ldr.JobProposalSpecsByJobProposalID.Load(ctx, dataloader.StringKey(jpID)) + result, err := thunk() + if err != nil { + return nil, err + } + + specs, ok := result.([]feeds.JobProposalSpec) + if !ok { + return nil, errors.Wrapf(ErrInvalidType, "Result : %T", result) + } + + max := specs[0] + for _, spec := range specs { + if spec.Version > max.Version { + max = spec + } + } + + return &max, nil +} + +// GetJobProposalsByFeedsManagerID fetches the job proposals by feeds manager ID. +func GetJobProposalsByFeedsManagerID(ctx context.Context, id string) ([]feeds.JobProposal, error) { + ldr := For(ctx) + + thunk := ldr.JobProposalsByManagerIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + jbRuns, ok := result.([]feeds.JobProposal) + if !ok { + return nil, ErrInvalidType + } + + return jbRuns, nil +} + +// GetJobByExternalJobID fetches the job proposals by external job ID +func GetJobByExternalJobID(ctx context.Context, id string) (*job.Job, error) { + ldr := For(ctx) + + thunk := ldr.JobsByExternalJobIDs.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + job, ok := result.(job.Job) + if !ok { + return nil, ErrInvalidType + } + + return &job, nil +} + +// GetJobByPipelineSpecID fetches the job by pipeline spec ID. +func GetJobByPipelineSpecID(ctx context.Context, id string) (*job.Job, error) { + ldr := For(ctx) + + thunk := ldr.JobsByPipelineSpecIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + jb, ok := result.(job.Job) + if !ok { + return nil, ErrInvalidType + } + + return &jb, nil +} + +// GetEthTxAttemptsByEthTxID fetches the attempts for an eth transaction. +func GetEthTxAttemptsByEthTxID(ctx context.Context, id string) ([]txmgr.TxAttempt, error) { + ldr := For(ctx) + + thunk := ldr.EthTxAttemptsByEthTxIDLoader.Load(ctx, dataloader.StringKey(id)) + result, err := thunk() + if err != nil { + return nil, err + } + + attempts, ok := result.([]txmgr.TxAttempt) + if !ok { + return nil, ErrInvalidType + } + + return attempts, nil +} + +func GetFeedsManagerChainConfigsByManagerID(ctx context.Context, mgrID int64) ([]feeds.ChainConfig, error) { + ldr := For(ctx) + + thunk := ldr.FeedsManagerChainConfigsByManagerIDLoader.Load(ctx, + dataloader.StringKey(stringutils.FromInt64(mgrID)), + ) + result, err := thunk() + if err != nil { + return nil, err + } + + cfgs, ok := result.([]feeds.ChainConfig) + if !ok { + return nil, ErrInvalidType + } + + return cfgs, nil +} + +// GetJobSpecErrorsByJobID fetches the Spec Errors for a Job. +func GetJobSpecErrorsByJobID(ctx context.Context, jobID int32) ([]job.SpecError, error) { + ldr := For(ctx) + + thunk := ldr.SpecErrorsByJobIDLoader.Load(ctx, + dataloader.StringKey(stringutils.FromInt32(jobID)), + ) + result, err := thunk() + if err != nil { + return nil, err + } + + specErrs, ok := result.([]job.SpecError) + if !ok { + return nil, ErrInvalidType + } + + return specErrs, nil +} diff --git a/core/web/loader/helpers.go b/core/web/loader/helpers.go new file mode 100644 index 00000000..334d4415 --- /dev/null +++ b/core/web/loader/helpers.go @@ -0,0 +1,25 @@ +package loader + +import ( + "strconv" + + "github.com/graph-gophers/dataloader" +) + +// keyOrderInt64 returns the keys cast to int64 and a mapping of each key to +// their index order. +func keyOrderInt64(keys dataloader.Keys) ([]int64, map[string]int) { + keyOrder := make(map[string]int, len(keys)) + + var ids []int64 + for ix, key := range keys { + id, err := strconv.ParseInt(key.String(), 10, 64) + if err == nil { + ids = append(ids, id) + } + + keyOrder[key.String()] = ix + } + + return ids, keyOrder +} diff --git a/core/web/loader/job.go b/core/web/loader/job.go new file mode 100644 index 00000000..8e65d060 --- /dev/null +++ b/core/web/loader/job.go @@ -0,0 +1,104 @@ +package loader + +import ( + "context" + "errors" + + "github.com/google/uuid" + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type jobBatcher struct { + app plugin.Application +} + +func (b *jobBatcher) loadByExternalJobIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var jobIDs []uuid.UUID + for ix, key := range keys { + id, err := uuid.Parse(key.String()) + if err == nil { + jobIDs = append(jobIDs, id) + } + + keyOrder[key.String()] = ix + } + + // Fetch the jobs + var jobs []job.Job + for _, id := range jobIDs { + job, err := b.app.JobORM().FindJobByExternalJobID(id) + + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + jobs = append(jobs, job) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for _, j := range jobs { + id := j.ExternalJobID.String() + + ix, ok := keyOrder[id] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: j, Error: nil} + delete(keyOrder, id) + } + } + + // fill array positions without any feeds managers + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: nil, Error: errors.New("feeds manager not found")} + } + + return results +} + +func (b *jobBatcher) loadByPipelineSpecIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var plSpecIDs []int32 + for ix, key := range keys { + id, err := stringutils.ToInt32(key.String()) + if err == nil { + plSpecIDs = append(plSpecIDs, id) + } + keyOrder[key.String()] = ix + } + + // Fetch the jobs + jobs, err := b.app.JobORM().FindJobsByPipelineSpecIDs(plSpecIDs) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for _, j := range jobs { + id := stringutils.FromInt32(j.PipelineSpecID) + + ix, ok := keyOrder[id] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: j, Error: nil} + delete(keyOrder, id) + } + } + + // fill array positions without any jobs + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: nil, Error: errors.New("job not found")} + } + + return results +} diff --git a/core/web/loader/job_proposal.go b/core/web/loader/job_proposal.go new file mode 100644 index 00000000..0aad0070 --- /dev/null +++ b/core/web/loader/job_proposal.go @@ -0,0 +1,60 @@ +package loader + +import ( + "context" + "strconv" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +type jobProposalBatcher struct { + app plugin.Application +} + +func (b *jobProposalBatcher) loadByManagersIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var mgrsIDs []int64 + for ix, key := range keys { + id, err := strconv.ParseInt(key.String(), 10, 64) + if err == nil { + mgrsIDs = append(mgrsIDs, id) + } + + keyOrder[key.String()] = ix + } + + jps, err := b.app.GetFeedsService().ListJobProposalsByManagersIDs(mgrsIDs) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Generate a map of job proposals to feeds managers IDs + jpsForMgr := map[string][]feeds.JobProposal{} + for _, jp := range jps { + mgrID := strconv.Itoa(int(jp.FeedsManagerID)) + jpsForMgr[mgrID] = append(jpsForMgr[mgrID], jp) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, ns := range jpsForMgr { + ix, ok := keyOrder[k] + // if found, remove from index lookup map so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: ns, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any job proposals as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []feeds.JobProposal{}, Error: nil} + } + + return results +} diff --git a/core/web/loader/job_proposal_spec.go b/core/web/loader/job_proposal_spec.go new file mode 100644 index 00000000..30ed7ef2 --- /dev/null +++ b/core/web/loader/job_proposal_spec.go @@ -0,0 +1,49 @@ +package loader + +import ( + "context" + "strconv" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +type jobProposalSpecBatcher struct { + app plugin.Application +} + +func (b *jobProposalSpecBatcher) loadByJobProposalsIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + ids, keyOrder := keyOrderInt64(keys) + + specs, err := b.app.GetFeedsService().ListSpecsByJobProposalIDs(ids) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Generate a map of specs to job proposal IDs + specsForJP := map[string][]feeds.JobProposalSpec{} + for _, spec := range specs { + jpID := strconv.Itoa(int(spec.JobProposalID)) + specsForJP[jpID] = append(specsForJP[jpID], spec) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, ns := range specsForJP { + ix, ok := keyOrder[k] + // if found, remove from index lookup map so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: ns, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any job proposals as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []feeds.JobProposalSpec{}, Error: nil} + } + + return results +} diff --git a/core/web/loader/job_run.go b/core/web/loader/job_run.go new file mode 100644 index 00000000..61b26625 --- /dev/null +++ b/core/web/loader/job_run.go @@ -0,0 +1,56 @@ +package loader + +import ( + "context" + "errors" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type jobRunBatcher struct { + app plugin.Application +} + +func (b *jobRunBatcher) loadByIDs(_ context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var runIDs []int64 + for ix, key := range keys { + id, err := stringutils.ToInt64(key.String()) + if err == nil { + runIDs = append(runIDs, id) + } + + keyOrder[key.String()] = ix + } + + // Fetch the runs + runs, err := b.app.JobORM().FindPipelineRunsByIDs(runIDs) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for _, r := range runs { + idStr := stringutils.FromInt64(r.ID) + + ix, ok := keyOrder[idStr] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: r, Error: nil} + delete(keyOrder, idStr) + } + } + + // fill array positions without any job runs + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: nil, Error: errors.New("run not found")} + } + + return results +} diff --git a/core/web/loader/job_spec_errors.go b/core/web/loader/job_spec_errors.go new file mode 100644 index 00000000..15e53371 --- /dev/null +++ b/core/web/loader/job_spec_errors.go @@ -0,0 +1,61 @@ +package loader + +import ( + "context" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type jobSpecErrorsBatcher struct { + app plugin.Application +} + +func (b *jobSpecErrorsBatcher) loadByJobIDs(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + var jobIDs []int32 + for ix, key := range keys { + id, err := stringutils.ToInt32(key.String()) + if err == nil { + jobIDs = append(jobIDs, id) + } + + keyOrder[key.String()] = ix + } + + specErrors, err := b.app.JobORM().FindSpecErrorsByJobIDs(jobIDs, pg.WithParentCtx(ctx)) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + + // Generate a map of jobIDs to spec errors + specErrorsForJobs := map[string][]job.SpecError{} + for _, s := range specErrors { + jobID := stringutils.FromInt32(s.JobID) + specErrorsForJobs[jobID] = append(specErrorsForJobs[jobID], s) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, s := range specErrorsForJobs { + ix, ok := keyOrder[k] + // if found, remove from index lookup map, so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: s, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any nodes as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []job.SpecError{}, Error: nil} + } + + return results +} diff --git a/core/web/loader/loader.go b/core/web/loader/loader.go new file mode 100644 index 00000000..2c2a6554 --- /dev/null +++ b/core/web/loader/loader.go @@ -0,0 +1,78 @@ +package loader + +import ( + "context" + + "github.com/gin-gonic/gin" + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +type loadersKey struct{} + +type Dataloader struct { + app plugin.Application + + ChainsByIDLoader *dataloader.Loader + EthTxAttemptsByEthTxIDLoader *dataloader.Loader + FeedsManagersByIDLoader *dataloader.Loader + FeedsManagerChainConfigsByManagerIDLoader *dataloader.Loader + JobProposalsByManagerIDLoader *dataloader.Loader + JobProposalSpecsByJobProposalID *dataloader.Loader + JobRunsByIDLoader *dataloader.Loader + JobsByExternalJobIDs *dataloader.Loader + JobsByPipelineSpecIDLoader *dataloader.Loader + NodesByChainIDLoader *dataloader.Loader + SpecErrorsByJobIDLoader *dataloader.Loader +} + +func New(app plugin.Application) *Dataloader { + var ( + nodes = &nodeBatcher{app: app} + chains = &chainBatcher{app: app} + mgrs = &feedsBatcher{app: app} + ccfgs = &feedsManagerChainConfigBatcher{app: app} + jobRuns = &jobRunBatcher{app: app} + jps = &jobProposalBatcher{app: app} + jpSpecs = &jobProposalSpecBatcher{app: app} + jbs = &jobBatcher{app: app} + attmpts = ðTransactionAttemptBatcher{app: app} + specErrs = &jobSpecErrorsBatcher{app: app} + ) + + return &Dataloader{ + app: app, + + ChainsByIDLoader: dataloader.NewBatchedLoader(chains.loadByIDs), + EthTxAttemptsByEthTxIDLoader: dataloader.NewBatchedLoader(attmpts.loadByEthTransactionIDs), + FeedsManagersByIDLoader: dataloader.NewBatchedLoader(mgrs.loadByIDs), + FeedsManagerChainConfigsByManagerIDLoader: dataloader.NewBatchedLoader(ccfgs.loadByManagerIDs), + JobProposalsByManagerIDLoader: dataloader.NewBatchedLoader(jps.loadByManagersIDs), + JobProposalSpecsByJobProposalID: dataloader.NewBatchedLoader(jpSpecs.loadByJobProposalsIDs), + JobRunsByIDLoader: dataloader.NewBatchedLoader(jobRuns.loadByIDs), + JobsByExternalJobIDs: dataloader.NewBatchedLoader(jbs.loadByExternalJobIDs), + JobsByPipelineSpecIDLoader: dataloader.NewBatchedLoader(jbs.loadByPipelineSpecIDs), + NodesByChainIDLoader: dataloader.NewBatchedLoader(nodes.loadByChainIDs), + SpecErrorsByJobIDLoader: dataloader.NewBatchedLoader(specErrs.loadByJobIDs), + } +} + +// Middleware injects the dataloader into a gin context. +func Middleware(app plugin.Application) gin.HandlerFunc { + return func(c *gin.Context) { + ctx := InjectDataloader(c.Request.Context(), app) + c.Request = c.Request.WithContext(ctx) + c.Next() + } +} + +// InjectDataloader injects the dataloader into the context. +func InjectDataloader(ctx context.Context, app plugin.Application) context.Context { + return context.WithValue(ctx, loadersKey{}, New(app)) +} + +// For returns the dataloader for a given context +func For(ctx context.Context) *Dataloader { + return ctx.Value(loadersKey{}).(*Dataloader) +} diff --git a/core/web/loader/loader_test.go b/core/web/loader/loader_test.go new file mode 100644 index 00000000..bbeb6016 --- /dev/null +++ b/core/web/loader/loader_test.go @@ -0,0 +1,410 @@ +package loader + +import ( + "database/sql" + "math/big" + "testing" + + "github.com/google/uuid" + "github.com/graph-gophers/dataloader" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/loop" + commontypes "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtxmgrmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + evmutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + coremocks "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + pluginmocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + feedsMocks "github.com/goplugin/pluginv3.0/v2/core/services/feeds/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + jobORMMocks "github.com/goplugin/pluginv3.0/v2/core/services/job/mocks" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + testutils2 "github.com/goplugin/pluginv3.0/v2/core/web/testutils" +) + +func TestLoader_Chains(t *testing.T) { + t.Parallel() + + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + one := ubig.NewI(1) + chain := toml.EVMConfig{ChainID: one, Chain: toml.Defaults(one)} + two := ubig.NewI(2) + chain2 := toml.EVMConfig{ChainID: two, Chain: toml.Defaults(two)} + config1, err := chain.TOMLString() + require.NoError(t, err) + config2, err := chain2.TOMLString() + require.NoError(t, err) + + app.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{ + testutils2.MockRelayer{ChainStatus: commontypes.ChainStatus{ + ID: "1", + Enabled: true, + Config: config1, + }}, testutils2.MockRelayer{ChainStatus: commontypes.ChainStatus{ + ID: "2", + Enabled: true, + Config: config2, + }}, + }}) + + batcher := chainBatcher{app} + keys := dataloader.NewKeysFromStrings([]string{"2", "1", "3"}) + results := batcher.loadByIDs(ctx, keys) + + assert.Len(t, results, 3) + + require.NoError(t, err) + want2 := commontypes.ChainStatus{ID: "2", Enabled: true, Config: config2} + assert.Equal(t, want2, results[0].Data.(commontypes.ChainStatus)) + + want1 := commontypes.ChainStatus{ID: "1", Enabled: true, Config: config1} + assert.Equal(t, want1, results[1].Data.(commontypes.ChainStatus)) + assert.Nil(t, results[2].Data) + assert.Error(t, results[2].Error) + assert.ErrorIs(t, results[2].Error, chains.ErrNotFound) +} + +func TestLoader_Nodes(t *testing.T) { + t.Parallel() + + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + chainID1, chainID2, notAnID := big.NewInt(1), big.NewInt(2), big.NewInt(3) + + genNodeStat := func(id string) commontypes.NodeStatus { + return commontypes.NodeStatus{ + Name: "test-node-" + id, + ChainID: id, + } + } + rcInterops := &pluginmocks.FakeRelayerChainInteroperators{Nodes: []commontypes.NodeStatus{ + genNodeStat(chainID2.String()), genNodeStat(chainID1.String()), + }} + + app.On("GetRelayers").Return(rcInterops) + batcher := nodeBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{chainID2.String(), chainID1.String(), notAnID.String()}) + found := batcher.loadByChainIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, []commontypes.NodeStatus{genNodeStat(chainID2.String())}, found[0].Data) + assert.Equal(t, []commontypes.NodeStatus{genNodeStat(chainID1.String())}, found[1].Data) + assert.Equal(t, []commontypes.NodeStatus{}, found[2].Data) +} + +func TestLoader_FeedsManagers(t *testing.T) { + t.Parallel() + + fsvc := feedsMocks.NewService(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + mgr1 := feeds.FeedsManager{ + ID: int64(1), + Name: "manager 1", + } + mgr2 := feeds.FeedsManager{ + ID: int64(2), + Name: "manager 2", + } + mgr3 := feeds.FeedsManager{ + ID: int64(3), + Name: "manager 3", + } + + fsvc.On("ListManagersByIDs", []int64{3, 1, 2, 5}).Return([]feeds.FeedsManager{ + mgr1, mgr2, mgr3, + }, nil) + app.On("GetFeedsService").Return(fsvc) + + batcher := feedsBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2", "5"}) + found := batcher.loadByIDs(ctx, keys) + + require.Len(t, found, 4) + assert.Equal(t, mgr3, found[0].Data) + assert.Equal(t, mgr1, found[1].Data) + assert.Equal(t, mgr2, found[2].Data) + assert.Nil(t, found[3].Data) + assert.Error(t, found[3].Error) + assert.Equal(t, "feeds manager not found", found[3].Error.Error()) +} + +func TestLoader_JobProposals(t *testing.T) { + t.Parallel() + + fsvc := feedsMocks.NewService(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + jp1 := feeds.JobProposal{ + ID: int64(1), + FeedsManagerID: int64(3), + Status: feeds.JobProposalStatusPending, + } + jp2 := feeds.JobProposal{ + ID: int64(2), + FeedsManagerID: int64(1), + Status: feeds.JobProposalStatusApproved, + } + jp3 := feeds.JobProposal{ + ID: int64(3), + FeedsManagerID: int64(1), + Status: feeds.JobProposalStatusRejected, + } + + fsvc.On("ListJobProposalsByManagersIDs", []int64{3, 1, 2}).Return([]feeds.JobProposal{ + jp1, jp3, jp2, + }, nil) + app.On("GetFeedsService").Return(fsvc) + + batcher := jobProposalBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByManagersIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, []feeds.JobProposal{jp1}, found[0].Data) + assert.Equal(t, []feeds.JobProposal{jp3, jp2}, found[1].Data) + assert.Equal(t, []feeds.JobProposal{}, found[2].Data) +} + +func TestLoader_JobRuns(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + run1 := pipeline.Run{ID: int64(1)} + run2 := pipeline.Run{ID: int64(2)} + run3 := pipeline.Run{ID: int64(3)} + + jobsORM.On("FindPipelineRunsByIDs", []int64{3, 1, 2}).Return([]pipeline.Run{ + run3, run1, run2, + }, nil) + app.On("JobORM").Return(jobsORM) + + batcher := jobRunBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, run3, found[0].Data) + assert.Equal(t, run1, found[1].Data) + assert.Equal(t, run2, found[2].Data) +} + +func TestLoader_JobsByPipelineSpecIDs(t *testing.T) { + t.Parallel() + + t.Run("with out errors", func(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + job1 := job.Job{ID: int32(2), PipelineSpecID: int32(1)} + job2 := job.Job{ID: int32(3), PipelineSpecID: int32(2)} + job3 := job.Job{ID: int32(4), PipelineSpecID: int32(3)} + + jobsORM.On("FindJobsByPipelineSpecIDs", []int32{3, 1, 2}).Return([]job.Job{ + job1, job2, job3, + }, nil) + app.On("JobORM").Return(jobsORM) + + batcher := jobBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByPipelineSpecIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, job3, found[0].Data) + assert.Equal(t, job1, found[1].Data) + assert.Equal(t, job2, found[2].Data) + }) + + t.Run("with errors", func(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + jobsORM.On("FindJobsByPipelineSpecIDs", []int32{3, 1, 2}).Return([]job.Job{}, sql.ErrNoRows) + app.On("JobORM").Return(jobsORM) + + batcher := jobBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByPipelineSpecIDs(ctx, keys) + + require.Len(t, found, 1) + assert.Nil(t, found[0].Data) + assert.ErrorIs(t, found[0].Error, sql.ErrNoRows) + }) +} + +func TestLoader_JobsByExternalJobIDs(t *testing.T) { + t.Parallel() + + t.Run("with out errors", func(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + ejID := uuid.New() + job := job.Job{ID: int32(2), ExternalJobID: ejID} + + jobsORM.On("FindJobByExternalJobID", ejID).Return(job, nil) + app.On("JobORM").Return(jobsORM) + + batcher := jobBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{ejID.String()}) + found := batcher.loadByExternalJobIDs(ctx, keys) + + require.Len(t, found, 1) + assert.Equal(t, job, found[0].Data) + }) +} + +func TestLoader_EthTransactionsAttempts(t *testing.T) { + t.Parallel() + + txStore := evmtxmgrmocks.NewEvmTxStore(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + ethTxIDs := []int64{1, 2, 3} + + attempt1 := txmgr.TxAttempt{ + ID: int64(1), + TxID: ethTxIDs[0], + } + attempt2 := txmgr.TxAttempt{ + ID: int64(1), + TxID: ethTxIDs[1], + } + + txStore.On("FindTxAttemptConfirmedByTxIDs", []int64{ethTxIDs[2], ethTxIDs[1], ethTxIDs[0]}).Return([]txmgr.TxAttempt{ + attempt1, attempt2, + }, nil) + app.On("TxmStorageService").Return(txStore) + + batcher := ethTransactionAttemptBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "2", "1"}) + found := batcher.loadByEthTransactionIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, []txmgr.TxAttempt{}, found[0].Data) + assert.Equal(t, []txmgr.TxAttempt{attempt2}, found[1].Data) + assert.Equal(t, []txmgr.TxAttempt{attempt1}, found[2].Data) +} + +func TestLoader_SpecErrorsByJobID(t *testing.T) { + t.Parallel() + + t.Run("without errors", func(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + specErr1 := job.SpecError{ID: int64(2), JobID: int32(1)} + specErr2 := job.SpecError{ID: int64(3), JobID: int32(2)} + specErr3 := job.SpecError{ID: int64(4), JobID: int32(3)} + + jobsORM.On("FindSpecErrorsByJobIDs", []int32{3, 1, 2}, mock.Anything).Return([]job.SpecError{ + specErr1, specErr2, specErr3, + }, nil) + app.On("JobORM").Return(jobsORM) + + batcher := jobSpecErrorsBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByJobIDs(ctx, keys) + + require.Len(t, found, 3) + assert.Equal(t, []job.SpecError{specErr3}, found[0].Data) + assert.Equal(t, []job.SpecError{specErr1}, found[1].Data) + assert.Equal(t, []job.SpecError{specErr2}, found[2].Data) + }) + + t.Run("with errors", func(t *testing.T) { + t.Parallel() + + jobsORM := jobORMMocks.NewORM(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + jobsORM.On("FindSpecErrorsByJobIDs", []int32{3, 1, 2}, mock.Anything).Return([]job.SpecError{}, sql.ErrNoRows) + app.On("JobORM").Return(jobsORM) + + batcher := jobSpecErrorsBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3", "1", "2"}) + found := batcher.loadByJobIDs(ctx, keys) + + require.Len(t, found, 1) + assert.Nil(t, found[0].Data) + assert.ErrorIs(t, found[0].Error, sql.ErrNoRows) + }) +} + +func TestLoader_loadByEthTransactionID(t *testing.T) { + t.Parallel() + + txStore := evmtxmgrmocks.NewEvmTxStore(t) + app := coremocks.NewApplication(t) + ctx := InjectDataloader(testutils.Context(t), app) + + ethTxID := int64(3) + ethTxHash := evmutils.NewHash() + + receipt := txmgr.Receipt{ + ID: int64(1), + TxHash: ethTxHash, + } + + attempt1 := txmgr.TxAttempt{ + ID: int64(1), + TxID: ethTxID, + Hash: ethTxHash, + Receipts: []txmgr.ChainReceipt{txmgr.DbReceiptToEvmReceipt(&receipt)}, + } + + txStore.On("FindTxAttemptConfirmedByTxIDs", []int64{ethTxID}).Return([]txmgr.TxAttempt{ + attempt1, + }, nil) + + app.On("TxmStorageService").Return(txStore) + + batcher := ethTransactionAttemptBatcher{app} + + keys := dataloader.NewKeysFromStrings([]string{"3"}) + found := batcher.loadByEthTransactionIDs(ctx, keys) + + require.Len(t, found, 1) + assert.Equal(t, []txmgr.TxAttempt{attempt1}, found[0].Data) +} diff --git a/core/web/loader/node.go b/core/web/loader/node.go new file mode 100644 index 00000000..f7e3f005 --- /dev/null +++ b/core/web/loader/node.go @@ -0,0 +1,58 @@ +package loader + +import ( + "context" + + "github.com/graph-gophers/dataloader" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +type nodeBatcher struct { + app plugin.Application +} + +func (b *nodeBatcher) loadByChainIDs(ctx context.Context, keys dataloader.Keys) []*dataloader.Result { + // Create a map for remembering the order of keys passed in + keyOrder := make(map[string]int, len(keys)) + // Collect the keys to search for + // note backward compatibility -- this only ever supported evm chains + evmrelayIDs := make([]relay.ID, 0, len(keys)) + + for ix, key := range keys { + rid := relay.ID{Network: relay.EVM, ChainID: key.String()} + evmrelayIDs = append(evmrelayIDs, rid) + keyOrder[key.String()] = ix + } + + allNodes, _, err := b.app.GetRelayers().NodeStatuses(ctx, 0, -1, evmrelayIDs...) + if err != nil { + return []*dataloader.Result{{Data: nil, Error: err}} + } + // Generate a map of nodes to chainIDs + nodesForChain := map[string][]types.NodeStatus{} + for _, n := range allNodes { + nodesForChain[n.ChainID] = append(nodesForChain[n.ChainID], n) + } + + // Construct the output array of dataloader results + results := make([]*dataloader.Result, len(keys)) + for k, ns := range nodesForChain { + ix, ok := keyOrder[k] + // if found, remove from index lookup map so we know elements were found + if ok { + results[ix] = &dataloader.Result{Data: ns, Error: nil} + delete(keyOrder, k) + } + } + + // fill array positions without any nodes as an empty slice + for _, ix := range keyOrder { + results[ix] = &dataloader.Result{Data: []types.NodeStatus{}, Error: nil} + } + + return results +} diff --git a/core/web/log_controller.go b/core/web/log_controller.go new file mode 100644 index 00000000..bb1488ba --- /dev/null +++ b/core/web/log_controller.go @@ -0,0 +1,105 @@ +package web + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// LogController manages the logger config +type LogController struct { + App plugin.Application +} + +type LogPatchRequest struct { + Level string `json:"level"` + SqlEnabled *bool `json:"sqlEnabled"` +} + +// Get retrieves the current log config settings +func (cc *LogController) Get(c *gin.Context) { + var svcs, lvls []string + svcs = append(svcs, "Global") + lvls = append(lvls, cc.App.GetConfig().Log().Level().String()) + + svcs = append(svcs, "IsSqlEnabled") + lvls = append(lvls, strconv.FormatBool(cc.App.GetConfig().Database().LogSQL())) + + response := &presenters.ServiceLogConfigResource{ + JAID: presenters.JAID{ + ID: "log", + }, + ServiceName: svcs, + LogLevel: lvls, + DefaultLogLevel: cc.App.GetConfig().Log().DefaultLevel().String(), + } + + jsonAPIResponse(c, response, "log") +} + +// Patch sets a log level and enables sql logging for the logger +func (cc *LogController) Patch(c *gin.Context) { + request := &LogPatchRequest{} + if err := c.ShouldBindJSON(request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + // Build log config response + var svcs, lvls []string + + // Validate request params + if request.Level == "" && request.SqlEnabled == nil { + jsonAPIError(c, http.StatusBadRequest, fmt.Errorf("please check request params, no params configured")) + return + } + + if request.Level != "" { + var ll zapcore.Level + err := ll.UnmarshalText([]byte(request.Level)) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if err := cc.App.SetLogLevel(ll); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + } + svcs = append(svcs, "Global") + lvls = append(lvls, cc.App.GetConfig().Log().Level().String()) + + if request.SqlEnabled != nil { + cc.App.GetConfig().SetLogSQL(*request.SqlEnabled) + } + + svcs = append(svcs, "IsSqlEnabled") + lvls = append(lvls, strconv.FormatBool(cc.App.GetConfig().Database().LogSQL())) + + response := &presenters.ServiceLogConfigResource{ + JAID: presenters.JAID{ + ID: "log", + }, + ServiceName: svcs, + LogLevel: lvls, + } + + cc.App.GetAuditLogger().Audit(audit.GlobalLogLevelSet, map[string]interface{}{"logLevel": request.Level}) + + if request.Level == "debug" { + if request.SqlEnabled != nil && *request.SqlEnabled { + cc.App.GetAuditLogger().Audit(audit.ConfigSqlLoggingEnabled, map[string]interface{}{}) + } else { + cc.App.GetAuditLogger().Audit(audit.ConfigSqlLoggingDisabled, map[string]interface{}{}) + } + } + + jsonAPIResponse(c, response, "log") +} diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go new file mode 100644 index 00000000..93f35579 --- /dev/null +++ b/core/web/log_controller_test.go @@ -0,0 +1,145 @@ +package web_test + +import ( + "bytes" + "encoding/json" + "net/http" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +type testCase struct { + Description string + logLevel string + logSql *bool + + expectedLogLevel zapcore.Level + expectedLogSQL bool + expectedErrorCode int +} + +func TestLogController_GetLogConfig(t *testing.T) { + t.Parallel() + + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Log.Level = ptr(toml.LogLevel(zapcore.WarnLevel)) + c.Database.LogQueries = ptr(true) + }) + + app := cltest.NewApplicationWithConfig(t, cfg) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, clean := client.Get("/v2/log") + t.Cleanup(clean) + + svcLogConfig := presenters.ServiceLogConfigResource{} + cltest.AssertServerResponse(t, resp, http.StatusOK) + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &svcLogConfig)) + + require.Equal(t, "warn", svcLogConfig.DefaultLogLevel) + + for i, svcName := range svcLogConfig.ServiceName { + + if svcName == "Global" { + assert.Equal(t, zapcore.WarnLevel.String(), svcLogConfig.LogLevel[i]) + } + + if svcName == "IsSqlEnabled" { + assert.Equal(t, strconv.FormatBool(true), svcLogConfig.LogLevel[i]) + } + } +} + +func TestLogController_PatchLogConfig(t *testing.T) { + t.Parallel() + + sqlTrue := true + sqlFalse := false + cases := []testCase{ + { + Description: "Set log level to debug", + logLevel: "debug", + logSql: nil, + expectedLogLevel: zapcore.DebugLevel, + }, + { + Description: "Set log level to info", + logLevel: "info", + logSql: nil, + expectedLogLevel: zapcore.InfoLevel, + }, + { + Description: "Set log level to info and log sql to true", + logLevel: "info", + logSql: &sqlTrue, + expectedLogLevel: zapcore.InfoLevel, + expectedLogSQL: true, + }, + { + Description: "Set log level to warn and log sql to false", + logLevel: "warn", + logSql: &sqlFalse, + expectedLogLevel: zapcore.WarnLevel, + expectedLogSQL: false, + }, + { + Description: "Send no params to updater", + expectedErrorCode: http.StatusBadRequest, + }, + { + Description: "Send bad log level request", + logLevel: "test", + expectedErrorCode: http.StatusBadRequest, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.Description, func(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + request := web.LogPatchRequest{Level: tc.logLevel, SqlEnabled: tc.logSql} + + requestData, _ := json.Marshal(request) + buf := bytes.NewBuffer(requestData) + + resp, cleanup := client.Patch("/v2/log", buf) + defer cleanup() + + svcLogConfig := presenters.ServiceLogConfigResource{} + if tc.expectedErrorCode != 0 { + cltest.AssertServerResponse(t, resp, tc.expectedErrorCode) + } else { + cltest.AssertServerResponse(t, resp, http.StatusOK) + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &svcLogConfig)) + + for i, svcName := range svcLogConfig.ServiceName { + + if svcName == "Global" { + assert.Equal(t, tc.expectedLogLevel.String(), svcLogConfig.LogLevel[i]) + } + + if svcName == "IsSqlEnabled" { + assert.Equal(t, strconv.FormatBool(tc.expectedLogSQL), svcLogConfig.LogLevel[i]) + } + } + } + }) + } +} diff --git a/core/web/loop_registry.go b/core/web/loop_registry.go new file mode 100644 index 00000000..7cdd353c --- /dev/null +++ b/core/web/loop_registry.go @@ -0,0 +1,140 @@ +package web + +import ( + "encoding/json" + "fmt" + "html" + "io" + "net/http" + "os" + "time" + + "github.com/gin-gonic/gin" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type LoopRegistryServer struct { + exposedPromPort int + discoveryHostName string // discovery endpoint hostname. must be accessible to external prom for scraping + loopHostName string // internal hostname of loopps. used by node to forward external prom requests + registry *plugins.LoopRegistry + logger logger.SugaredLogger + client *http.Client + + jsonMarshalFn func(any) ([]byte, error) +} + +func NewLoopRegistryServer(app plugin.Application) *LoopRegistryServer { + discoveryHostName, loopHostName := initHostNames() + return &LoopRegistryServer{ + exposedPromPort: int(app.GetConfig().WebServer().HTTPPort()), + registry: app.GetLoopRegistry(), + logger: app.GetLogger(), + jsonMarshalFn: json.Marshal, + discoveryHostName: discoveryHostName, + loopHostName: loopHostName, + client: &http.Client{Timeout: 1 * time.Second}, // some value much less than the prometheus poll interval will do there + } +} + +// discoveryHandler implements service discovery of prom endpoints for LOOPs in the registry +func (l *LoopRegistryServer) discoveryHandler(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + var groups []*targetgroup.Group + + // add node metrics to service discovery + groups = append(groups, metricTarget(l.discoveryHostName, l.exposedPromPort, "/metrics")) + + // add all the plugins + for _, registeredPlugin := range l.registry.List() { + groups = append(groups, metricTarget(l.discoveryHostName, l.exposedPromPort, pluginMetricPath(registeredPlugin.Name))) + } + + b, err := l.jsonMarshalFn(groups) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err = w.Write([]byte(err.Error())) + if err != nil { + l.logger.Error(err) + } + return + } + _, err = w.Write(b) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + l.logger.Error(err) + } + +} + +func metricTarget(hostName string, port int, path string) *targetgroup.Group { + return &targetgroup.Group{ + Targets: []model.LabelSet{ + // target address will be called by external prometheus + {model.AddressLabel: model.LabelValue(fmt.Sprintf("%s:%d", hostName, port))}, + }, + Labels: map[model.LabelName]model.LabelValue{ + model.MetricsPathLabel: model.LabelValue(path), + }, + } +} + +// pluginMetricHandlers routes from endpoints published in service discovery to the backing LOOP endpoint +func (l *LoopRegistryServer) pluginMetricHandler(gc *gin.Context) { + pluginName := gc.Param("name") + p, ok := l.registry.Get(pluginName) + if !ok { + gc.Data(http.StatusNotFound, "text/plain", []byte(fmt.Sprintf("plugin %q does not exist", html.EscapeString(pluginName)))) + return + } + + // unlike discovery, this endpoint is internal btw the node and plugin + pluginURL := fmt.Sprintf("http://%s:%d/metrics", l.loopHostName, p.EnvCfg.PrometheusPort) + res, err := l.client.Get(pluginURL) //nolint + if err != nil { + msg := fmt.Sprintf("plugin metric handler failed to get plugin url %s", html.EscapeString(pluginURL)) + l.logger.Errorw(msg, "err", err) + gc.Data(http.StatusInternalServerError, "text/plain", []byte(fmt.Sprintf("%s: %s", msg, err))) + return + } + defer res.Body.Close() + b, err := io.ReadAll(res.Body) + if err != nil { + msg := fmt.Sprintf("error reading plugin %q metrics", html.EscapeString(pluginName)) + l.logger.Errorw(msg, "err", err) + gc.Data(http.StatusInternalServerError, "text/plain", []byte(fmt.Sprintf("%s: %s", msg, err))) + return + } + + gc.Data(http.StatusOK, "text/plain", b) +} + +func initHostNames() (discoveryHost, loopHost string) { + var exists bool + discoveryHost, exists = env.PrometheusDiscoveryHostName.Lookup() + if !exists { + var err error + discoveryHost, err = os.Hostname() + if err != nil { + discoveryHost = "localhost" + } + } + + loopHost, exists = env.LOOPPHostName.Lookup() + if !exists { + // this is the expected case; no known uses for the env var other than + // as an escape hatch. + loopHost = "localhost" + } + return discoveryHost, loopHost +} + +func pluginMetricPath(name string) string { + return fmt.Sprintf("/plugins/%s/metrics", name) +} diff --git a/core/web/loop_registry_internal_test.go b/core/web/loop_registry_internal_test.go new file mode 100644 index 00000000..a2282178 --- /dev/null +++ b/core/web/loop_registry_internal_test.go @@ -0,0 +1,67 @@ +package web + +import ( + "encoding/json" + "net/http" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/plugins" +) + +type responseWriter struct { + statusCode int + header http.Header +} + +func (r *responseWriter) Write(b []byte) (int, error) { + return 0, errors.New("could not write to response") +} + +func (r *responseWriter) Header() http.Header { + return http.Header{} +} + +func (r *responseWriter) WriteHeader(statusCode int) { + r.statusCode = statusCode +} + +func newResponseWriter() *responseWriter { + return &responseWriter{header: make(http.Header)} +} + +func TestLoopRegistryServer_CantWriteToResponse(t *testing.T) { + l, o := logger.TestLoggerObserved(t, zap.ErrorLevel) + s := &LoopRegistryServer{ + exposedPromPort: 1, + registry: plugins.NewLoopRegistry(l, nil), + logger: l.(logger.SugaredLogger), + jsonMarshalFn: json.Marshal, + } + + rw := newResponseWriter() + s.discoveryHandler(rw, &http.Request{}) + assert.Equal(t, rw.statusCode, http.StatusInternalServerError) + assert.Equal(t, 1, o.FilterMessageSnippet("could not write to response").Len()) +} + +func TestLoopRegistryServer_CantMarshal(t *testing.T) { + l, o := logger.TestLoggerObserved(t, zap.ErrorLevel) + s := &LoopRegistryServer{ + exposedPromPort: 1, + registry: plugins.NewLoopRegistry(l, nil), + logger: l.(logger.SugaredLogger), + jsonMarshalFn: func(any) ([]byte, error) { + return []byte(""), errors.New("can't unmarshal") + }, + } + + rw := newResponseWriter() + s.discoveryHandler(rw, &http.Request{}) + assert.Equal(t, rw.statusCode, http.StatusInternalServerError) + assert.Equal(t, 1, o.FilterMessageSnippet("could not write to response").Len()) +} diff --git a/core/web/loop_registry_test.go b/core/web/loop_registry_test.go new file mode 100644 index 00000000..691be1e2 --- /dev/null +++ b/core/web/loop_registry_test.go @@ -0,0 +1,158 @@ +package web_test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "testing" + + "github.com/hashicorp/consul/sdk/freeport" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +type mockLoopImpl struct { + t *testing.T + *loop.PromServer +} + +// test prom var to avoid collision with real plugin metrics +var ( + testRegistry = prometheus.NewRegistry() + testHandler = promhttp.HandlerFor(testRegistry, promhttp.HandlerOpts{}) + testMetricName = "super_great_counter" + testMetric = prometheus.NewCounter(prometheus.CounterOpts{ + Name: testMetricName, + }) +) + +func configurePromRegistry() { + testRegistry.MustRegister(testMetric) +} + +func newMockLoopImpl(t *testing.T, port int) *mockLoopImpl { + return &mockLoopImpl{ + t: t, + PromServer: loop.PromServerOpts{Handler: testHandler}.New(port, logger.TestLogger(t).Named("mock-loop")), + } +} + +func (m *mockLoopImpl) start() { + require.NoError(m.t, m.PromServer.Start()) +} + +func (m *mockLoopImpl) close() { + require.NoError(m.t, m.PromServer.Close()) +} + +func (m *mockLoopImpl) run() { + testMetric.Inc() +} + +func TestLoopRegistry(t *testing.T) { + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + }) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, cltest.DefaultP2PKey) + // shim a reference to the promserver that is running in our mock loop + // this ensures the client.Get calls below have a reference to mock loop impl + + expectedLooppEndPoint, expectedCoreEndPoint := "/plugins/mockLoopImpl/metrics", "/metrics" + + // note we expect this to be an ordered result + expectedLabels := []model.LabelSet{ + model.LabelSet{"__metrics_path__": model.LabelValue(expectedCoreEndPoint)}, + model.LabelSet{"__metrics_path__": model.LabelValue(expectedLooppEndPoint)}, + } + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.Start(testutils.Context(t))) + + // register a mock loop + loop, err := app.GetLoopRegistry().Register("mockLoopImpl") + require.NoError(t, err) + require.NotNil(t, loop) + require.Len(t, app.GetLoopRegistry().List(), 1) + + // set up a test prometheus registry and test metric that is used by + // our mock loop impl and isolated from the default prom register + configurePromRegistry() + mockLoop := newMockLoopImpl(t, loop.EnvCfg.PrometheusPort) + mockLoop.start() + defer mockLoop.close() + mockLoop.run() + + client := app.NewHTTPClient(nil) + + t.Run("discovery endpoint", func(t *testing.T) { + // under the covers this is routing thru the app into loop registry + resp, cleanup := client.Get("/discovery") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("discovery response %s", b) + var got []*targetgroup.Group + require.NoError(t, json.Unmarshal(b, &got)) + + gotLabels := make([]model.LabelSet, 0) + for _, ls := range got { + gotLabels = append(gotLabels, ls.Labels) + } + assert.Equal(t, len(expectedLabels), len(gotLabels)) + for i := range expectedLabels { + assert.EqualValues(t, expectedLabels[i], gotLabels[i]) + } + }) + + t.Run("plugin metrics OK", func(t *testing.T) { + // plugin name `mockLoopImpl` matches key in PluginConfigs + resp, cleanup := client.Get(expectedLooppEndPoint) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("plugin metrics response %s", b) + + var ( + exceptedCount = 1 + expectedMetric = fmt.Sprintf("%s %d", testMetricName, exceptedCount) + ) + require.Contains(t, string(b), expectedMetric) + }) + + t.Run("core metrics OK", func(t *testing.T) { + // core node metrics endpoint + resp, cleanup := client.Get(expectedCoreEndPoint) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusOK) + + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + t.Logf("core metrics response %s", b) + }) + + t.Run("no existent plugin metrics ", func(t *testing.T) { + // request plugin that doesn't exist + resp, cleanup := client.Get("/plugins/noexist/metrics") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, resp, http.StatusNotFound) + }) +} diff --git a/core/web/middleware.go b/core/web/middleware.go new file mode 100644 index 00000000..4a37760c --- /dev/null +++ b/core/web/middleware.go @@ -0,0 +1,240 @@ +package web + +import ( + "embed" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// Go's new embed feature doesn't allow us to embed things outside of the current module. +// To get around this, we need to make sure that the assets we want to embed are available +// inside this module. To achieve this, we direct webpack to output all of the compiled assets +// in this module's folder under the "assets" directory. + +//go:generate ../../operator_ui/install.sh +//go:embed "assets" +var uiEmbedFs embed.FS + +// assetFs is the singleton file system instance that is used to serve the static +// assets for the operator UI. +var assetFs = NewEmbedFileSystem(uiEmbedFs, "assets") + +const ( + acceptEncodingHeader = "Accept-Encoding" + contentEncodingHeader = "Content-Encoding" + contentLengthHeader = "Content-Length" + rangeHeader = "Range" + varyHeader = "Vary" +) + +// ServeFileSystem wraps a http.FileSystem with an additional file existence check +type ServeFileSystem interface { + http.FileSystem + Exists(prefix string, path string) bool +} + +// EmbedFileSystem implements the ServeFileSystem interface using an embed.FS +// object. +type EmbedFileSystem struct { + embed.FS + http.FileSystem + pathPrefix string +} + +func NewEmbedFileSystem(efs embed.FS, pathPrefix string) ServeFileSystem { + return &EmbedFileSystem{ + FS: efs, + FileSystem: http.FS(efs), + pathPrefix: pathPrefix, + } +} + +// Exists implements the ServeFileSystem interface. +func (e *EmbedFileSystem) Exists(prefix string, filepath string) bool { + found := false + if p := path.Base(strings.TrimPrefix(filepath, prefix)); len(p) < len(filepath) { + //nolint:errcheck + fs.WalkDir(e.FS, ".", func(fpath string, d fs.DirEntry, err error) error { + fileName := path.Base(fpath) + if fileName == p { + found = true + // Return an error so that we terminate the search early. + // Otherwise, the search will continue for the rest of the file tree. + return errors.New("file found") + } + return nil + }) + } + + return found +} + +// Open implements the http.FileSystem interface. +func (e *EmbedFileSystem) Open(name string) (http.File, error) { + name = path.Join(e.pathPrefix, name) + return e.FileSystem.Open(name) +} + +// gzipFileHandler implements a http.Handler which can serve either the base +// file or the gzipped file depending on the Accept-Content header and the +// existence of the file +type gzipFileHandler struct { + root ServeFileSystem + lggr logger.SugaredLogger +} + +// GzipFileServer is a drop-in replacement for Go's standard http.FileServer +// which adds support for static resources precompressed with gzip, at +// the cost of removing the support for directory browsing. +func GzipFileServer(root ServeFileSystem, lggr logger.Logger) http.Handler { + return &gzipFileHandler{root, logger.Sugared(lggr.Named("GzipFilehandler"))} +} + +func (f *gzipFileHandler) openAndStat(path string) (http.File, os.FileInfo, error) { + file, err := f.root.Open(path) + var info os.FileInfo + // This slightly weird variable reuse is so we can get 100% test coverage + // without having to come up with a test file that can be opened, yet + // fails to stat. + if err == nil { + info, err = file.Stat() + } + if err != nil { + return file, nil, err + } + if info.IsDir() { + return file, nil, fmt.Errorf("%s is directory", path) + } + return file, info, nil +} + +// List of encodings we would prefer to use, in order of preference, best first. +// We only support gzip for now +var preferredEncodings = []string{"gzip"} + +// File extension to use for different encodings. +func extensionForEncoding(encname string) string { + switch encname { + case "gzip": + return ".gz" + } + return "" +} + +// Find the best file to serve based on the client's Accept-Encoding, and which +// files actually exist on the filesystem. If no file was found that can satisfy +// the request, the error field will be non-nil. +func (f *gzipFileHandler) findBestFile(w http.ResponseWriter, r *http.Request, fpath string) (http.File, os.FileInfo, error) { + ae := r.Header.Get(acceptEncodingHeader) + // Send the base file if no AcceptEncoding header is provided + if ae == "" { + return f.openAndStat(fpath) + } + + // Got an accept header? See what possible encodings we can send by looking for files + var available []string + for _, posenc := range preferredEncodings { + ext := extensionForEncoding(posenc) + fname := fpath + ext + + if f.root.Exists("/", fname) { + available = append(available, posenc) + } + } + + // Negotiate the best content encoding to use + negenc := negotiateContentEncoding(r, available) + if negenc == "" { + // If we fail to negotiate anything try the base file + return f.openAndStat(fpath) + } + + ext := extensionForEncoding(negenc) + if file, info, err := f.openAndStat(fpath + ext); err == nil { + wHeader := w.Header() + wHeader[contentEncodingHeader] = []string{negenc} + wHeader.Add(varyHeader, acceptEncodingHeader) + + if len(r.Header[rangeHeader]) == 0 { + // If not a range request then we can easily set the content length which the + // Go standard library does not do if "Content-Encoding" is set. + wHeader[contentLengthHeader] = []string{strconv.FormatInt(info.Size(), 10)} + } + return file, info, nil + } + + // If all else failed, fall back to base file once again + return f.openAndStat(fpath) +} + +// Determines the best encoding to use +func negotiateContentEncoding(r *http.Request, available []string) string { + values := strings.Split(r.Header.Get(acceptEncodingHeader), ",") + aes := []string{} + + // Clean the values + for _, v := range values { + aes = append(aes, strings.TrimSpace(v)) + } + + for _, a := range available { + for _, acceptEnc := range aes { + if acceptEnc == a { + return a + } + } + } + + return "" +} + +// Implements http.Handler +func (f *gzipFileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + upath := r.URL.Path + if !strings.HasPrefix(upath, "/") { + upath = "/" + upath + r.URL.Path = upath + } + + fpath := path.Clean(upath) + if strings.HasSuffix(fpath, "/") { + http.NotFound(w, r) + return + } + + // Find the best acceptable file, including trying uncompressed + if file, info, err := f.findBestFile(w, r, fpath); err == nil { + http.ServeContent(w, r, fpath, info.ModTime(), file) + f.lggr.ErrorIfFn(file.Close, "Error closing file") + return + } + + f.lggr.Infof("could not find file: %s", fpath) + http.NotFound(w, r) +} + +// ServeGzippedAssets returns a middleware handler that serves static files in the given directory. +func ServeGzippedAssets(urlPrefix string, fs ServeFileSystem, lggr logger.Logger) gin.HandlerFunc { + fileserver := GzipFileServer(fs, lggr) + if urlPrefix != "" { + fileserver = http.StripPrefix(urlPrefix, fileserver) + } + return func(c *gin.Context) { + if fs.Exists(urlPrefix, c.Request.URL.Path) { + fileserver.ServeHTTP(c.Writer, c.Request) + c.Abort() + } else { + c.AbortWithStatus(http.StatusNotFound) + } + } +} diff --git a/core/web/nodes_controller.go b/core/web/nodes_controller.go new file mode 100644 index 00000000..a7f41313 --- /dev/null +++ b/core/web/nodes_controller.go @@ -0,0 +1,94 @@ +package web + +import ( + "context" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" +) + +type NodesController interface { + // Index lists nodes, and optionally filters by chain id. + Index(c *gin.Context, size, page, offset int) +} + +type NetworkScopedNodeStatuser struct { + network relay.Network + relayers plugin.RelayerChainInteroperators +} + +func NewNetworkScopedNodeStatuser(relayers plugin.RelayerChainInteroperators, network relay.Network) *NetworkScopedNodeStatuser { + scoped := relayers.List(plugin.FilterRelayersByType(network)) + return &NetworkScopedNodeStatuser{ + network: network, + relayers: scoped, + } +} + +func (n *NetworkScopedNodeStatuser) NodeStatuses(ctx context.Context, offset, limit int, relayIDs ...relay.ID) (nodes []types.NodeStatus, count int, err error) { + return n.relayers.NodeStatuses(ctx, offset, limit, relayIDs...) +} + +type nodesController[R jsonapi.EntityNamer] struct { + nodeSet *NetworkScopedNodeStatuser + errNotEnabled error + newResource func(status types.NodeStatus) R + auditLogger audit.AuditLogger +} + +func newNodesController[R jsonapi.EntityNamer]( + nodeSet *NetworkScopedNodeStatuser, + errNotEnabled error, + newResource func(status types.NodeStatus) R, + auditLogger audit.AuditLogger, +) NodesController { + return &nodesController[R]{ + nodeSet: nodeSet, + errNotEnabled: errNotEnabled, + newResource: newResource, + auditLogger: auditLogger, + } +} + +func (n *nodesController[R]) Index(c *gin.Context, size, page, offset int) { + if n.nodeSet == nil { + jsonAPIError(c, http.StatusBadRequest, n.errNotEnabled) + return + } + + id := c.Param("ID") + + var nodes []types.NodeStatus + var count int + var err error + + if id == "" { + // fetch all nodes + nodes, count, err = n.nodeSet.NodeStatuses(c, offset, size) + } else { + // fetch nodes for chain ID + // backward compatibility + var rid relay.ID + err = rid.UnmarshalString(id) + if err != nil { + rid.ChainID = id + rid.Network = n.nodeSet.network + } + nodes, count, err = n.nodeSet.NodeStatuses(c, offset, size, rid) + } + + var resources []R + for _, node := range nodes { + res := n.newResource(node) + resources = append(resources, res) + } + + paginatedResponse(c, "node", size, page, resources, count, err) +} diff --git a/core/web/ocr2_keys_controller.go b/core/web/ocr2_keys_controller.go new file mode 100644 index 00000000..ec504eda --- /dev/null +++ b/core/web/ocr2_keys_controller.go @@ -0,0 +1,125 @@ +package web + +import ( + "io" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// OCRKeysController manages OCR key bundles +type OCR2KeysController struct { + App plugin.Application +} + +// Index lists OCR2 key bundles +// Example: +// "GET /keys/ocr" +func (ocr2kc *OCR2KeysController) Index(c *gin.Context) { + ekbs, err := ocr2kc.App.GetKeyStore().OCR2().GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, presenters.NewOCR2KeysBundleResources(ekbs), "offChainReporting2KeyBundle") +} + +// Create and return an OCR2 key bundle +// Example: +// "POST /keys/ocr" +func (ocr2kc *OCR2KeysController) Create(c *gin.Context) { + chainType := chaintype.ChainType(c.Param("chainType")) + key, err := ocr2kc.App.GetKeyStore().OCR2().Create(chainType) + if errors.Is(errors.Cause(err), chaintype.ErrInvalidChainType) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocr2kc.App.GetAuditLogger().Audit(audit.OCR2KeyBundleCreated, map[string]interface{}{ + "ocr2KeyID": key.ID(), + "ocr2KeyChainType": key.ChainType(), + "ocr2KeyConfigEncryptionPublicKey": key.ConfigEncryptionPublicKey(), + "ocr2KeyOffchainPublicKey": key.OffchainPublicKey(), + "ocr2KeyMaxSignatureLength": key.MaxSignatureLength(), + "ocr2KeyPublicKey": key.PublicKey(), + }) + jsonAPIResponse(c, presenters.NewOCR2KeysBundleResource(key), "offChainReporting2KeyBundle") +} + +// Delete an OCR2 key bundle +// Example: +// "DELETE /keys/ocr/:keyID" +func (ocr2kc *OCR2KeysController) Delete(c *gin.Context) { + id := c.Param("keyID") + key, err := ocr2kc.App.GetKeyStore().OCR2().Get(id) + if err != nil { + jsonAPIError(c, http.StatusNotFound, err) + return + } + err = ocr2kc.App.GetKeyStore().OCR2().Delete(id) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocr2kc.App.GetAuditLogger().Audit(audit.OCR2KeyBundleDeleted, map[string]interface{}{"id": id}) + jsonAPIResponse(c, presenters.NewOCR2KeysBundleResource(key), "offChainReporting2KeyBundle") +} + +// Import imports an OCR2 key bundle +// Example: +// "Post /keys/ocr/import" +func (ocr2kc *OCR2KeysController) Import(c *gin.Context) { + defer ocr2kc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + keyBundle, err := ocr2kc.App.GetKeyStore().OCR2().Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocr2kc.App.GetAuditLogger().Audit(audit.OCR2KeyBundleImported, map[string]interface{}{ + "ocr2KeyID": keyBundle.ID(), + "ocr2KeyChainType": keyBundle.ChainType(), + "ocr2KeyConfigEncryptionPublicKey": keyBundle.ConfigEncryptionPublicKey(), + "ocr2KeyOffchainPublicKey": keyBundle.OffchainPublicKey(), + "ocr2KeyMaxSignatureLength": keyBundle.MaxSignatureLength(), + "ocr2KeyPublicKey": keyBundle.PublicKey(), + }) + + jsonAPIResponse(c, presenters.NewOCR2KeysBundleResource(keyBundle), "offChainReporting2KeyBundle") +} + +// Export exports an OCR2 key bundle +// Example: +// "Post /keys/ocr/export" +func (ocr2kc *OCR2KeysController) Export(c *gin.Context) { + defer ocr2kc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export response body") + + stringID := c.Param("ID") + newPassword := c.Query("newpassword") + bytes, err := ocr2kc.App.GetKeyStore().OCR2().Export(stringID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocr2kc.App.GetAuditLogger().Audit(audit.OCR2KeyBundleExported, map[string]interface{}{"keyID": stringID}) + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/ocr2_keys_controller_test.go b/core/web/ocr2_keys_controller_test.go new file mode 100644 index 00000000..618eecde --- /dev/null +++ b/core/web/ocr2_keys_controller_test.go @@ -0,0 +1,109 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCR2KeysController_Index_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCR2KeysControllerTests(t) + + keys, _ := OCRKeyStore.GetAll() + + response, cleanup := client.Get("/v2/keys/ocr2") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.OCR2KeysBundleResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + require.NoError(t, err) + + require.Len(t, resources, len(keys)) + assert.Equal(t, keys[0].ID(), resources[0].ID) +} + +func TestOCR2KeysController_Create_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCR2KeysControllerTests(t) + + for _, test := range []struct { + name string + chainType chaintype.ChainType + }{ + {"EVM keys", "evm"}, + {"Solana Keys", "solana"}, + } { + t.Run(test.name, func(tt *testing.T) { + keys, _ := OCRKeyStore.GetAll() + initialLength := len(keys) + + response, cleanup := client.Post(fmt.Sprintf("/v2/keys/ocr2/%s", test.chainType), nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ = OCRKeyStore.GetAll() + require.Len(t, keys, initialLength+1) + + resource := presenters.OCR2KeysBundleResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + var ids []string + for _, key := range keys { + ids = append(ids, key.ID()) + } + require.Contains(t, ids, resource.ID) + + _, err = OCRKeyStore.Get(resource.ID) + require.NoError(t, err) + }) + } +} + +func TestOCR2KeysController_Delete_NonExistentOCRKeyID(t *testing.T) { + client, _ := setupOCR2KeysControllerTests(t) + + nonExistentOCRKeyID := "eb81f4a35033ac8dd68b9d33a039a713d6fd639af6852b81f47ffeda1c95de54" + response, cleanup := client.Delete("/v2/keys/ocr2/" + nonExistentOCRKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestOCR2KeysController_Delete_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCR2KeysControllerTests(t) + + keys, _ := OCRKeyStore.GetAll() + initialLength := len(keys) + key, _ := OCRKeyStore.Create("evm") + + response, cleanup := client.Delete("/v2/keys/ocr2/" + key.ID()) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(OCRKeyStore.Get(key.ID()))) + + keys, _ = OCRKeyStore.GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupOCR2KeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.OCR2) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + require.NoError(t, app.KeyStore.OCR2().Add(cltest.DefaultOCR2Key)) + + return client, app.GetKeyStore().OCR2() +} diff --git a/core/web/ocr_keys_controller.go b/core/web/ocr_keys_controller.go new file mode 100644 index 00000000..7d4176b1 --- /dev/null +++ b/core/web/ocr_keys_controller.go @@ -0,0 +1,112 @@ +package web + +import ( + "io" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// OCRKeysController manages OCR key bundles +type OCRKeysController struct { + App plugin.Application +} + +// Index lists OCR key bundles +// Example: +// "GET /keys/ocr" +func (ocrkc *OCRKeysController) Index(c *gin.Context) { + ekbs, err := ocrkc.App.GetKeyStore().OCR().GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, presenters.NewOCRKeysBundleResources(ekbs), "offChainReportingKeyBundle") +} + +// Create and return an OCR key bundle +// Example: +// "POST /keys/ocr" +func (ocrkc *OCRKeysController) Create(c *gin.Context) { + key, err := ocrkc.App.GetKeyStore().OCR().Create() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocrkc.App.GetAuditLogger().Audit(audit.OCRKeyBundleCreated, map[string]interface{}{ + "ocrKeyBundleID": key.ID(), + "ocrKeyBundlePublicKeyAddressOnChain": key.PublicKeyAddressOnChain(), + }) + jsonAPIResponse(c, presenters.NewOCRKeysBundleResource(key), "offChainReportingKeyBundle") +} + +// Delete an OCR key bundle +// Example: +// "DELETE /keys/ocr/:keyID" +// "DELETE /keys/ocr/:keyID?hard=true" +func (ocrkc *OCRKeysController) Delete(c *gin.Context) { + id := c.Param("keyID") + key, err := ocrkc.App.GetKeyStore().OCR().Get(id) + if err != nil { + jsonAPIError(c, http.StatusNotFound, err) + return + } + _, err = ocrkc.App.GetKeyStore().OCR().Delete(id) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocrkc.App.GetAuditLogger().Audit(audit.OCRKeyBundleDeleted, map[string]interface{}{"id": id}) + jsonAPIResponse(c, presenters.NewOCRKeysBundleResource(key), "offChainReportingKeyBundle") +} + +// Import imports an OCR key bundle +// Example: +// "Post /keys/ocr/import" +func (ocrkc *OCRKeysController) Import(c *gin.Context) { + defer ocrkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + encryptedOCRKeyBundle, err := ocrkc.App.GetKeyStore().OCR().Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocrkc.App.GetAuditLogger().Audit(audit.OCRKeyBundleImported, map[string]interface{}{ + "OCRID": encryptedOCRKeyBundle.GetID(), + "OCRPublicKeyAddressOnChain": encryptedOCRKeyBundle.PublicKeyAddressOnChain(), + "OCRPublicKeyOffChain": encryptedOCRKeyBundle.PublicKeyOffChain(), + }) + + jsonAPIResponse(c, encryptedOCRKeyBundle, "offChainReportingKeyBundle") +} + +// Export exports an OCR key bundle +// Example: +// "Post /keys/ocr/export" +func (ocrkc *OCRKeysController) Export(c *gin.Context) { + defer ocrkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export response body") + + stringID := c.Param("ID") + newPassword := c.Query("newpassword") + bytes, err := ocrkc.App.GetKeyStore().OCR().Export(stringID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + ocrkc.App.GetAuditLogger().Audit(audit.OCRKeyBundleExported, map[string]interface{}{"keyID": stringID}) + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/ocr_keys_controller_test.go b/core/web/ocr_keys_controller_test.go new file mode 100644 index 00000000..08cbb310 --- /dev/null +++ b/core/web/ocr_keys_controller_test.go @@ -0,0 +1,97 @@ +package web_test + +import ( + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOCRKeysController_Index_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCRKeysControllerTests(t) + + keys, _ := OCRKeyStore.GetAll() + + response, cleanup := client.Get("/v2/keys/ocr") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.OCRKeysBundleResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + require.NoError(t, err) + + require.Len(t, resources, len(keys)) + assert.Equal(t, keys[0].ID(), resources[0].ID) +} + +func TestOCRKeysController_Create_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCRKeysControllerTests(t) + + keys, _ := OCRKeyStore.GetAll() + initialLength := len(keys) + + response, cleanup := client.Post("/v2/keys/ocr", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ = OCRKeyStore.GetAll() + require.Len(t, keys, initialLength+1) + + resource := presenters.OCRKeysBundleResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + var ids []string + for _, key := range keys { + ids = append(ids, key.ID()) + } + require.Contains(t, ids, resource.ID) + + _, err = OCRKeyStore.Get(resource.ID) + require.NoError(t, err) +} + +func TestOCRKeysController_Delete_NonExistentOCRKeyID(t *testing.T) { + client, _ := setupOCRKeysControllerTests(t) + + nonExistentOCRKeyID := "eb81f4a35033ac8dd68b9d33a039a713d6fd639af6852b81f47ffeda1c95de54" + response, cleanup := client.Delete("/v2/keys/ocr/" + nonExistentOCRKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestOCRKeysController_Delete_HappyPath(t *testing.T) { + client, OCRKeyStore := setupOCRKeysControllerTests(t) + + keys, _ := OCRKeyStore.GetAll() + initialLength := len(keys) + key, _ := OCRKeyStore.Create() + + response, cleanup := client.Delete("/v2/keys/ocr/" + key.ID()) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(OCRKeyStore.Get(key.ID()))) + + keys, _ = OCRKeyStore.GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupOCRKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.OCR) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + + return client, app.GetKeyStore().OCR() +} diff --git a/core/web/p2p_keys_controller.go b/core/web/p2p_keys_controller.go new file mode 100644 index 00000000..9c086c7c --- /dev/null +++ b/core/web/p2p_keys_controller.go @@ -0,0 +1,137 @@ +package web + +import ( + "io" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// P2PKeysController manages P2P keys +type P2PKeysController struct { + App plugin.Application +} + +// Index lists P2P keys +// Example: +// "GET /keys/p2p" +func (p2pkc *P2PKeysController) Index(c *gin.Context) { + keys, err := p2pkc.App.GetKeyStore().P2P().GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, presenters.NewP2PKeyResources(keys), "p2pKey") +} + +const keyType = "Ed25519" + +// Create and return a P2P key +// Example: +// "POST /keys/p2p" +func (p2pkc *P2PKeysController) Create(c *gin.Context) { + key, err := p2pkc.App.GetKeyStore().P2P().Create() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + p2pkc.App.GetAuditLogger().Audit(audit.KeyCreated, map[string]interface{}{ + "type": "p2p", + "id": key.ID(), + "p2pPublicKey": key.PublicKeyHex(), + "p2pPeerID": key.PeerID(), + "p2pType": keyType, + }) + jsonAPIResponse(c, presenters.NewP2PKeyResource(key), "p2pKey") +} + +// Delete a P2P key +// Example: +// "DELETE /keys/p2p/:keyID" +// "DELETE /keys/p2p/:keyID?hard=true" +func (p2pkc *P2PKeysController) Delete(c *gin.Context) { + keyID, err := p2pkey.MakePeerID(c.Param("keyID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + key, err := p2pkc.App.GetKeyStore().P2P().Get(keyID) + if err != nil { + jsonAPIError(c, http.StatusNotFound, err) + return + } + _, err = p2pkc.App.GetKeyStore().P2P().Delete(key.PeerID()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + p2pkc.App.GetAuditLogger().Audit(audit.KeyDeleted, map[string]interface{}{ + "type": "p2p", + "id": keyID, + }) + + jsonAPIResponse(c, presenters.NewP2PKeyResource(key), "p2pKey") +} + +// Import imports a P2P key +// Example: +// "Post /keys/p2p/import" +func (p2pkc *P2PKeysController) Import(c *gin.Context) { + defer p2pkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + key, err := p2pkc.App.GetKeyStore().P2P().Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + p2pkc.App.GetAuditLogger().Audit(audit.KeyImported, map[string]interface{}{ + "type": "p2p", + "id": key.ID(), + "p2pPublicKey": key.PublicKeyHex(), + "p2pPeerID": key.PeerID(), + "p2pType": keyType, + }) + + jsonAPIResponse(c, presenters.NewP2PKeyResource(key), "p2pKey") +} + +// Export exports a P2P key +// Example: +// "Post /keys/p2p/export" +func (p2pkc *P2PKeysController) Export(c *gin.Context) { + defer p2pkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export request body") + + keyID, err := p2pkey.MakePeerID(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + newPassword := c.Query("newpassword") + bytes, err := p2pkc.App.GetKeyStore().P2P().Export(keyID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + p2pkc.App.GetAuditLogger().Audit(audit.KeyExported, map[string]interface{}{ + "type": "p2p", + "id": keyID, + }) + + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/p2p_keys_controller_test.go b/core/web/p2p_keys_controller_test.go new file mode 100644 index 00000000..dfdb85f6 --- /dev/null +++ b/core/web/p2p_keys_controller_test.go @@ -0,0 +1,121 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestP2PKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupP2PKeysControllerTests(t) + keys, _ := keyStore.P2P().GetAll() + + response, cleanup := client.Get("/v2/keys/p2p") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.P2PKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyHex(), resources[0].PubKey) + assert.Equal(t, keys[0].PeerID().String(), resources[0].PeerID) +} + +func TestP2PKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + keyStore := app.GetKeyStore() + + response, cleanup := client.Post("/v2/keys/p2p", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.P2P().GetAll() + require.Len(t, keys, 1) + + resource := presenters.P2PKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + assert.Equal(t, keys[0].ID(), resource.ID) + assert.Equal(t, keys[0].PublicKeyHex(), resource.PubKey) + assert.Equal(t, keys[0].PeerID().String(), resource.PeerID) + + var peerID p2pkey.PeerID + require.NoError(t, peerID.UnmarshalText([]byte(resource.PeerID))) + _, err = keyStore.P2P().Get(peerID) + require.NoError(t, err) +} + +func TestP2PKeysController_Delete_NonExistentP2PKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupP2PKeysControllerTests(t) + + nonExistentP2PKeyID := "12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6a" + response, cleanup := client.Delete("/v2/keys/p2p/" + nonExistentP2PKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestP2PKeysController_Delete_InvalidPeerID(t *testing.T) { + t.Parallel() + + client, _ := setupP2PKeysControllerTests(t) + + nonExistentP2PKeyID := "1234567890" + response, cleanup := client.Delete("/v2/keys/p2p/" + nonExistentP2PKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusUnprocessableEntity, response.StatusCode) +} + +func TestP2PKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupP2PKeysControllerTests(t) + + keys, _ := keyStore.P2P().GetAll() + initialLength := len(keys) + key, _ := keyStore.P2P().Create() + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/p2p/%s", key.ID())) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(keyStore.P2P().Get(key.PeerID()))) + + keys, _ = keyStore.P2P().GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupP2PKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.KeyStore.P2P().Add(cltest.DefaultP2PKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/ping_controller.go b/core/web/ping_controller.go new file mode 100644 index 00000000..2f9e7ae3 --- /dev/null +++ b/core/web/ping_controller.go @@ -0,0 +1,19 @@ +package web + +import ( + "net/http" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + + "github.com/gin-gonic/gin" +) + +// PingController has the ping endpoint. +type PingController struct { + App plugin.Application +} + +// Show returns pong. +func (eic *PingController) Show(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"message": "pong"}) +} diff --git a/core/web/ping_controller_test.go b/core/web/ping_controller_test.go new file mode 100644 index 00000000..5ca10fbd --- /dev/null +++ b/core/web/ping_controller_test.go @@ -0,0 +1,88 @@ +package web_test + +import ( + "net/http" + "strings" + "testing" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPingController_Show_APICredentials(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + resp, cleanup := client.Get("/v2/ping") + defer cleanup() + cltest.AssertServerResponse(t, resp, http.StatusOK) + body := string(cltest.ParseResponseBody(t, resp)) + require.Equal(t, `{"message":"pong"}`, strings.TrimSpace(body)) +} + +func TestPingController_Show_ExternalInitiatorCredentials(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + eia := &auth.Token{ + AccessKey: "abracadabra", + Secret: "opensesame", + } + eir_url := cltest.WebURL(t, "http://localhost:8888") + eir := &bridges.ExternalInitiatorRequest{ + Name: uuid.New().String(), + URL: &eir_url, + } + + ei, err := bridges.NewExternalInitiator(eia, eir) + require.NoError(t, err) + err = app.BridgeORM().CreateExternalInitiator(ei) + require.NoError(t, err) + + url := app.Server.URL + "/v2/ping" + request, err := http.NewRequestWithContext(testutils.Context(t), "GET", url, nil) + require.NoError(t, err) + request.Header.Set("Content-Type", web.MediaType) + request.Header.Set("X-Plugin-EA-AccessKey", eia.AccessKey) + request.Header.Set("X-Plugin-EA-Secret", eia.Secret) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + resp, err := client.Do(request) + require.NoError(t, err) + defer func() { assert.NoError(t, resp.Body.Close()) }() + + cltest.AssertServerResponse(t, resp, http.StatusOK) + body := string(cltest.ParseResponseBody(t, resp)) + require.Equal(t, `{"message":"pong"}`, strings.TrimSpace(body)) +} + +func TestPingController_Show_NoCredentials(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + url := app.Server.URL + "/v2/ping" + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} diff --git a/core/web/pipeline_job_spec_errors_controller.go b/core/web/pipeline_job_spec_errors_controller.go new file mode 100644 index 00000000..853c54e3 --- /dev/null +++ b/core/web/pipeline_job_spec_errors_controller.go @@ -0,0 +1,42 @@ +package web + +import ( + "database/sql" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// PipelineJobSpecErrorsController manages PipelineJobSpecError requests +type PipelineJobSpecErrorsController struct { + App plugin.Application +} + +// Destroy deletes a PipelineJobSpecError record from the database, effectively +// silencing the error notification +func (psec *PipelineJobSpecErrorsController) Destroy(c *gin.Context) { + jobSpec := job.SpecError{} + err := jobSpec.SetID(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + err = psec.App.JobORM().DismissError(c.Request.Context(), jobSpec.ID) + if errors.Is(err, sql.ErrNoRows) { + jsonAPIError(c, http.StatusNotFound, errors.New("PipelineJobSpecError not found")) + return + } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + psec.App.GetAuditLogger().Audit(audit.JobErrorDismissed, map[string]interface{}{"id": jobSpec.ID}) + jsonAPIResponseWithStatus(c, nil, "job", http.StatusNoContent) +} diff --git a/core/web/pipeline_job_spec_errors_controller_test.go b/core/web/pipeline_job_spec_errors_controller_test.go new file mode 100644 index 00000000..a46abb9b --- /dev/null +++ b/core/web/pipeline_job_spec_errors_controller_test.go @@ -0,0 +1,57 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" +) + +func TestPipelineJobSpecErrorsController_Delete_2(t *testing.T) { + app, client, _, jID, _, _ := setupJobSpecsControllerTestsWithJobs(t) + + description := "job spec error description" + + require.NoError(t, app.JobORM().RecordError(jID, description)) + + // FindJob -> find error + j, err := app.JobORM().FindJob(testutils.Context(t), jID) + require.NoError(t, err) + t.Log(j.JobSpecErrors) + require.GreaterOrEqual(t, len(j.JobSpecErrors), 1) // second 'got nil head' error may have occurred also + var id int64 = -1 + for i := range j.JobSpecErrors { + jse := j.JobSpecErrors[i] + if jse.Description == description { + id = jse.ID + break + } + } + require.NotEqual(t, -1, id, "error not found") + + resp, cleanup := client.Delete(fmt.Sprintf("/v2/pipeline/job_spec_errors/%v", id)) + defer cleanup() + cltest.AssertServerResponse(t, resp, http.StatusNoContent) + + // FindJob -> error is gone + j, err = app.JobORM().FindJob(testutils.Context(t), j.ID) + require.NoError(t, err) + for i := range j.JobSpecErrors { + jse := j.JobSpecErrors[i] + require.NotEqual(t, id, jse.ID) + } +} + +func TestPipelineJobSpecErrorsController_Delete_NotFound(t *testing.T) { + _, client, _, _, _, _ := setupJobSpecsControllerTestsWithJobs(t) + + resp, cleanup := client.Delete("/v2/pipeline/job_spec_errors/1") + defer cleanup() + + assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Response should be not found") +} diff --git a/core/web/pipeline_runs_controller.go b/core/web/pipeline_runs_controller.go new file mode 100644 index 00000000..844eaf42 --- /dev/null +++ b/core/web/pipeline_runs_controller.go @@ -0,0 +1,184 @@ +package web + +import ( + "encoding/json" + "io" + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// PipelineRunsController manages V2 job run requests. +type PipelineRunsController struct { + App plugin.Application +} + +// Index returns all pipeline runs for a job. +// Example: +// "GET /jobs/:ID/runs" +func (prc *PipelineRunsController) Index(c *gin.Context, size, page, offset int) { + id := c.Param("ID") + + // Temporary: if no size is passed in, use a large page size. Remove once frontend can handle pagination + if c.Query("size") == "" { + size = 1000 + } + + var pipelineRuns []pipeline.Run + var count int + var err error + + if id == "" { + pipelineRuns, count, err = prc.App.JobORM().PipelineRuns(nil, offset, size) + } else { + jobSpec := job.Job{} + err = jobSpec.SetID(c.Param("ID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + pipelineRuns, count, err = prc.App.JobORM().PipelineRuns(&jobSpec.ID, offset, size) + } + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + res := presenters.NewPipelineRunResources(pipelineRuns, prc.App.GetLogger()) + paginatedResponse(c, "pipelineRun", size, page, res, count, err) +} + +// Show returns a specified pipeline run. +// Example: +// "GET /jobs/:ID/runs/:runID" +func (prc *PipelineRunsController) Show(c *gin.Context) { + pipelineRun := pipeline.Run{} + err := pipelineRun.SetID(c.Param("runID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + pipelineRun, err = prc.App.PipelineORM().FindRun(pipelineRun.ID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + res := presenters.NewPipelineRunResource(pipelineRun, prc.App.GetLogger()) + jsonAPIResponse(c, res, "pipelineRun") +} + +// Create triggers a pipeline run for a job. +// Example: +// "POST /jobs/:ID/runs" +func (prc *PipelineRunsController) Create(c *gin.Context) { + respondWithPipelineRun := func(jobRunID int64) { + pipelineRun, err := prc.App.PipelineORM().FindRun(jobRunID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + res := presenters.NewPipelineRunResource(pipelineRun, prc.App.GetLogger()) + jsonAPIResponse(c, res, "pipelineRun") + } + + bodyBytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + idStr := c.Param("ID") + + user, isUser := auth.GetAuthenticatedUser(c) + ei, _ := auth.GetAuthenticatedExternalInitiator(c) + authorizer := webhook.NewAuthorizer(prc.App.GetSqlxDB().DB, user, ei) + + // Is it a UUID? Then process it as a webhook job + jobUUID, err := uuid.Parse(idStr) + if err == nil { + canRun, err2 := authorizer.CanRun(c.Request.Context(), prc.App.GetConfig().JobPipeline(), jobUUID) + if err2 != nil { + jsonAPIError(c, http.StatusInternalServerError, err2) + return + } + if canRun { + jobRunID, err3 := prc.App.RunWebhookJobV2(c.Request.Context(), jobUUID, string(bodyBytes), pipeline.JSONSerializable{}) + if errors.Is(err3, webhook.ErrJobNotExists) { + jsonAPIError(c, http.StatusNotFound, err3) + return + } else if err3 != nil { + jsonAPIError(c, http.StatusInternalServerError, err3) + return + } + respondWithPipelineRun(jobRunID) + } else { + jsonAPIError(c, http.StatusUnauthorized, errors.Errorf("external initiator %s is not allowed to run job %s", ei.Name, jobUUID)) + } + return + } + + // only users are allowed to run jobs using int IDs - EIs not allowed + if isUser { + // Is it an int32? Then process it regardless of type + var jobID int32 + jobID64, err := strconv.ParseInt(idStr, 10, 32) + if err == nil { + jobID = int32(jobID64) + jobRunID, err := prc.App.RunJobV2(c.Request.Context(), jobID, nil) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + respondWithPipelineRun(jobRunID) + return + } + } + + jsonAPIError(c, http.StatusUnprocessableEntity, errors.New("bad job ID")) +} + +// Resume finishes a task and resumes the pipeline run. +// Example: +// "PATCH /jobs/:ID/runs/:runID" +func (prc *PipelineRunsController) Resume(c *gin.Context) { + taskID, err := uuid.Parse(c.Param("runID")) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + rr := pipeline.ResumeRequest{} + decoder := json.NewDecoder(c.Request.Body) + err = errors.Wrap(decoder.Decode(&rr), "failed to unmarshal JSON body") + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + result, err := rr.ToResult() + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + if err := prc.App.ResumeJobV2(c.Request.Context(), taskID, result); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + prc.App.GetAuditLogger().Audit(audit.UnauthedRunResumed, map[string]interface{}{"runID": c.Param("runID")}) + c.Status(http.StatusOK) +} diff --git a/core/web/pipeline_runs_controller_test.go b/core/web/pipeline_runs_controller_test.go new file mode 100644 index 00000000..748c3e1d --- /dev/null +++ b/core/web/pipeline_runs_controller_test.go @@ -0,0 +1,317 @@ +package web_test + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/pelletier/go-toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestPipelineRunsController_CreateWithBody_HappyPath(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(2 * time.Second) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(10 * time.Millisecond) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.Start(testutils.Context(t))) + + // Setup the bridge + mockServer := cltest.NewHTTPMockServerWithRequest(t, 200, `{}`, func(r *http.Request) { + defer r.Body.Close() + bs, err := io.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, `{"result":"12345"}`, string(bs)) + }) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{URL: mockServer.URL}, app.GetConfig().Database()) + + // Add the job + uuid := uuid.New() + { + tomlStr := fmt.Sprintf(testspecs.WebhookSpecWithBodyTemplate, uuid, bridge.Name.String()) + jb, err := webhook.ValidatedWebhookSpec(tomlStr, app.GetExternalInitiatorManager()) + require.NoError(t, err) + + err = app.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + } + + // Give the job.Spawner ample time to discover the job and start its service + // (because Postgres events don't seem to work here) + time.Sleep(3 * time.Second) + + // Make the request + { + client := app.NewHTTPClient(nil) + body := strings.NewReader(`{"data":{"result":"123.45"}}`) + response, cleanup := client.Post("/v2/jobs/"+uuid.String()+"/runs", body) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse presenters.PipelineRunResource + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &parsedResponse) + assert.NoError(t, err) + assert.NotNil(t, parsedResponse.ID) + assert.NotNil(t, parsedResponse.CreatedAt) + assert.NotNil(t, parsedResponse.FinishedAt) + require.Len(t, parsedResponse.TaskRuns, 3) + } +} + +func TestPipelineRunsController_CreateNoBody_HappyPath(t *testing.T) { + t.Parallel() + + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.JobPipeline.HTTPRequest.DefaultTimeout = commonconfig.MustNewDuration(2 * time.Second) + c.Database.Listener.FallbackPollInterval = commonconfig.MustNewDuration(10 * time.Millisecond) + }) + + app := cltest.NewApplicationWithConfig(t, cfg, ethClient) + require.NoError(t, app.Start(testutils.Context(t))) + + // Setup the bridges + mockServer := cltest.NewHTTPMockServer(t, 200, "POST", `{"data":{"result":"123.45"}}`) + + _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{URL: mockServer.URL}, app.GetConfig().Database()) + + mockServer = cltest.NewHTTPMockServerWithRequest(t, 200, `{}`, func(r *http.Request) { + defer r.Body.Close() + bs, err := io.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, `{"result":"12345"}`, string(bs)) + }) + + _, submitBridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{URL: mockServer.URL}, app.GetConfig().Database()) + + // Add the job + uuid := uuid.New() + { + tomlStr := testspecs.GetWebhookSpecNoBody(uuid, bridge.Name.String(), submitBridge.Name.String()) + jb, err := webhook.ValidatedWebhookSpec(tomlStr, app.GetExternalInitiatorManager()) + require.NoError(t, err) + + err = app.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + } + + // Give the job.Spawner ample time to discover the job and start its service + // (because Postgres events don't seem to work here) + time.Sleep(3 * time.Second) + + // Make the request (authorized as user) + { + client := app.NewHTTPClient(nil) + response, cleanup := client.Post("/v2/jobs/"+uuid.String()+"/runs", nil) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse presenters.PipelineRunResource + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &parsedResponse) + bs, _ := json.MarshalIndent(parsedResponse, "", " ") + t.Log(string(bs)) + assert.NoError(t, err) + assert.NotNil(t, parsedResponse.ID) + assert.NotNil(t, parsedResponse.CreatedAt) + assert.NotNil(t, parsedResponse.FinishedAt) + require.Len(t, parsedResponse.TaskRuns, 4) + } +} + +func TestPipelineRunsController_Index_GlobalHappyPath(t *testing.T) { + client, jobID, runIDs := setupPipelineRunsControllerTests(t) + + url := url.URL{Path: "/v2/pipeline/runs"} + query := url.Query() + query.Set("evmChainID", cltest.FixtureChainID.String()) + url.RawQuery = query.Encode() + + response, cleanup := client.Get(url.String()) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse []presenters.PipelineRunResource + responseBytes := cltest.ParseResponseBody(t, response) + assert.Contains(t, string(responseBytes), `"outputs":["3"],"errors":[null],"allErrors":["uh oh"],"fatalErrors":[null],"inputs":{"answer":"3","ds1":"{\"USD\": 1}","ds1_multiply":"3","ds1_parse":1,"ds2":"{\"USD\": 1}","ds2_multiply":"3","ds2_parse":1,"ds3":{},"jobRun":{"meta":null}`) + + err := web.ParseJSONAPIResponse(responseBytes, &parsedResponse) + assert.NoError(t, err) + + require.Len(t, parsedResponse, 2) + // Job Run ID is returned in descending order by run ID (most recent run first) + assert.Equal(t, parsedResponse[1].ID, strconv.Itoa(int(runIDs[0]))) + assert.NotNil(t, parsedResponse[1].CreatedAt) + assert.NotNil(t, parsedResponse[1].FinishedAt) + assert.Equal(t, jobID, parsedResponse[1].PipelineSpec.JobID) + require.Len(t, parsedResponse[1].TaskRuns, 8) +} + +func TestPipelineRunsController_Index_HappyPath(t *testing.T) { + client, jobID, runIDs := setupPipelineRunsControllerTests(t) + + response, cleanup := client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID) + "/runs") + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse []presenters.PipelineRunResource + responseBytes := cltest.ParseResponseBody(t, response) + assert.Contains(t, string(responseBytes), `"outputs":["3"],"errors":[null],"allErrors":["uh oh"],"fatalErrors":[null],"inputs":{"answer":"3","ds1":"{\"USD\": 1}","ds1_multiply":"3","ds1_parse":1,"ds2":"{\"USD\": 1}","ds2_multiply":"3","ds2_parse":1,"ds3":{},"jobRun":{"meta":null}`) + + err := web.ParseJSONAPIResponse(responseBytes, &parsedResponse) + assert.NoError(t, err) + + require.Len(t, parsedResponse, 2) + // Job Run ID is returned in descending order by run ID (most recent run first) + assert.Equal(t, parsedResponse[1].ID, strconv.Itoa(int(runIDs[0]))) + assert.NotNil(t, parsedResponse[1].CreatedAt) + assert.NotNil(t, parsedResponse[1].FinishedAt) + assert.Equal(t, jobID, parsedResponse[1].PipelineSpec.JobID) + require.Len(t, parsedResponse[1].TaskRuns, 8) +} + +func TestPipelineRunsController_Index_Pagination(t *testing.T) { + client, jobID, runIDs := setupPipelineRunsControllerTests(t) + + response, cleanup := client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID) + "/runs?page=1&size=1") + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse []presenters.PipelineRunResource + responseBytes := cltest.ParseResponseBody(t, response) + assert.Contains(t, string(responseBytes), `"outputs":["3"],"errors":[null],"allErrors":["uh oh"],"fatalErrors":[null],"inputs":{"answer":"3","ds1":"{\"USD\": 1}","ds1_multiply":"3","ds1_parse":1,"ds2":"{\"USD\": 1}","ds2_multiply":"3","ds2_parse":1,"ds3":{},"jobRun":{"meta":null}`) + assert.Contains(t, string(responseBytes), `"meta":{"count":2}`) + + err := web.ParseJSONAPIResponse(responseBytes, &parsedResponse) + assert.NoError(t, err) + + require.Len(t, parsedResponse, 1) + assert.Equal(t, parsedResponse[0].ID, strconv.Itoa(int(runIDs[1]))) + assert.NotNil(t, parsedResponse[0].CreatedAt) + assert.NotNil(t, parsedResponse[0].FinishedAt) + require.Len(t, parsedResponse[0].TaskRuns, 8) +} + +func TestPipelineRunsController_Show_HappyPath(t *testing.T) { + client, jobID, runIDs := setupPipelineRunsControllerTests(t) + + response, cleanup := client.Get("/v2/jobs/" + fmt.Sprintf("%v", jobID) + "/runs/" + fmt.Sprintf("%v", runIDs[0])) + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusOK) + + var parsedResponse presenters.PipelineRunResource + responseBytes := cltest.ParseResponseBody(t, response) + assert.Contains(t, string(responseBytes), `"outputs":["3"],"errors":[null],"allErrors":["uh oh"],"fatalErrors":[null],"inputs":{"answer":"3","ds1":"{\"USD\": 1}","ds1_multiply":"3","ds1_parse":1,"ds2":"{\"USD\": 1}","ds2_multiply":"3","ds2_parse":1,"ds3":{},"jobRun":{"meta":null}`) + err := web.ParseJSONAPIResponse(responseBytes, &parsedResponse) + require.NoError(t, err) + + assert.Equal(t, parsedResponse.ID, strconv.Itoa(int(runIDs[0]))) + assert.NotNil(t, parsedResponse.CreatedAt) + assert.NotNil(t, parsedResponse.FinishedAt) + require.Len(t, parsedResponse.TaskRuns, 8) +} + +func TestPipelineRunsController_ShowRun_InvalidID(t *testing.T) { + t.Parallel() + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + + response, cleanup := client.Get("/v2/jobs/1/runs/invalid-run-ID") + defer cleanup() + cltest.AssertServerResponse(t, response, http.StatusUnprocessableEntity) +} + +func setupPipelineRunsControllerTests(t *testing.T) (cltest.HTTPClientCleaner, int32, []int64) { + t.Parallel() + ethClient := cltest.NewEthMocksWithStartupAssertions(t) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil) + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.OCR.Enabled = ptr(true) + c.P2P.V2.Enabled = ptr(true) + c.P2P.V2.ListenAddresses = &[]string{fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t))} + c.P2P.PeerID = &cltest.DefaultP2PPeerID + c.EVM[0].NonceAutoSync = ptr(false) + c.EVM[0].BalanceMonitor.Enabled = ptr(false) + }) + app := cltest.NewApplicationWithConfigAndKey(t, cfg, ethClient, cltest.DefaultP2PKey) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + client := app.NewHTTPClient(nil) + + key, _ := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) + + nameAndExternalJobID := uuid.New() + sp := fmt.Sprintf(` + type = "offchainreporting" + schemaVersion = 1 + externalJobID = "%s" + name = "%s" + contractAddress = "%s" + evmChainID = "0" + p2pv2Bootstrappers = ["12D3KooWHfYFQ8hGttAYbMCevQVESEQhzJAqFZokMVtom8bNxwGq@127.0.0.1:5001"] + keyBundleID = "%s" + transmitterAddress = "%s" + observationSource = """ + // data source 1 + ds1 [type=memo value=<"{\\"USD\\": 1}">]; + ds1_parse [type=jsonparse path="USD"]; + ds1_multiply [type=multiply times=3]; + + ds2 [type=memo value=<"{\\"USD\\": 1}">]; + ds2_parse [type=jsonparse path="USD"]; + ds2_multiply [type=multiply times=3]; + + ds3 [type=fail msg="uh oh"]; + + ds1 -> ds1_parse -> ds1_multiply -> answer; + ds2 -> ds2_parse -> ds2_multiply -> answer; + ds3 -> answer; + + answer [type=median index=0]; + """ + `, nameAndExternalJobID, nameAndExternalJobID, testutils.NewAddress().Hex(), cltest.DefaultOCRKeyBundleID, key.Address.Hex()) + var jb job.Job + err := toml.Unmarshal([]byte(sp), &jb) + require.NoError(t, err) + var os job.OCROracleSpec + err = toml.Unmarshal([]byte(sp), &os) + require.NoError(t, err) + jb.OCROracleSpec = &os + + err = app.AddJobV2(testutils.Context(t), &jb) + require.NoError(t, err) + + firstRunID, err := app.RunJobV2(testutils.Context(t), jb.ID, nil) + require.NoError(t, err) + secondRunID, err := app.RunJobV2(testutils.Context(t), jb.ID, nil) + require.NoError(t, err) + + return client, jb.ID, []int64{firstRunID, secondRunID} +} diff --git a/core/web/presenters/bridges.go b/core/web/presenters/bridges.go new file mode 100644 index 00000000..3ecc359d --- /dev/null +++ b/core/web/presenters/bridges.go @@ -0,0 +1,40 @@ +package presenters + +import ( + "time" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" +) + +// BridgeResource represents a Bridge JSONAPI resource. +type BridgeResource struct { + JAID + Name string `json:"name"` + URL string `json:"url"` + Confirmations uint32 `json:"confirmations"` + // The IncomingToken is only provided when creating a Bridge + IncomingToken string `json:"incomingToken,omitempty"` + OutgoingToken string `json:"outgoingToken"` + MinimumContractPayment *assets.Link `json:"minimumContractPayment"` + CreatedAt time.Time `json:"createdAt"` +} + +// GetName implements the api2go EntityNamer interface +func (r BridgeResource) GetName() string { + return "bridges" +} + +// NewBridgeResource constructs a new BridgeResource +func NewBridgeResource(b bridges.BridgeType) *BridgeResource { + return &BridgeResource{ + // Uses the name as the id...Should change this to the id + JAID: NewJAID(b.Name.String()), + Name: b.Name.String(), + URL: b.URL.String(), + Confirmations: b.Confirmations, + OutgoingToken: b.OutgoingToken, + MinimumContractPayment: b.MinimumContractPayment, + CreatedAt: b.CreatedAt, + } +} diff --git a/core/web/presenters/bridges_test.go b/core/web/presenters/bridges_test.go new file mode 100644 index 00000000..883194e1 --- /dev/null +++ b/core/web/presenters/bridges_test.go @@ -0,0 +1,81 @@ +package presenters + +import ( + "net/url" + "testing" + "time" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func TestBridgeResource(t *testing.T) { + t.Parallel() + + timestamp := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + url, err := url.Parse("https://bridge.example.com/api") + require.NoError(t, err) + + bridge := bridges.BridgeType{ + Name: "test", + URL: models.WebURL(*url), + Confirmations: 1, + OutgoingToken: "vjNL7X8Ea6GFJoa6PBsvK2ECzNK3b8IZ", + MinimumContractPayment: assets.NewLinkFromJuels(1), + CreatedAt: timestamp, + } + + r := NewBridgeResource(bridge) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := ` +{ + "data": { + "type":"bridges", + "id":"test", + "attributes":{ + "name":"test", + "url":"https://bridge.example.com/api", + "confirmations":1, + "outgoingToken":"vjNL7X8Ea6GFJoa6PBsvK2ECzNK3b8IZ", + "minimumContractPayment":"1", + "createdAt":"2000-01-01T00:00:00Z" + } + } +} +` + + assert.JSONEq(t, expected, string(b)) + + // Test insertion of IncomingToken + r.IncomingToken = "cd+OfGXy3UHEDAlD0y27F6/rJE14X1UI" + b, err = jsonapi.Marshal(r) + require.NoError(t, err) + + expected = ` +{ + "data": { + "type":"bridges", + "id":"test", + "attributes":{ + "name":"test", + "url":"https://bridge.example.com/api", + "confirmations":1, + "incomingToken": "cd+OfGXy3UHEDAlD0y27F6/rJE14X1UI", + "outgoingToken":"vjNL7X8Ea6GFJoa6PBsvK2ECzNK3b8IZ", + "minimumContractPayment":"1", + "createdAt":"2000-01-01T00:00:00Z" + } + } +} +` + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/chain.go b/core/web/presenters/chain.go new file mode 100644 index 00000000..99cf9a1d --- /dev/null +++ b/core/web/presenters/chain.go @@ -0,0 +1,15 @@ +package presenters + +type ChainResource struct { + JAID + Enabled bool `json:"enabled"` + Config string `json:"config"` // TOML +} + +type NodeResource struct { + JAID + ChainID string `json:"chainID"` + Name string `json:"name"` + Config string `json:"config"` // TOML + State string `json:"state"` +} diff --git a/core/web/presenters/chain_msg_test.go b/core/web/presenters/chain_msg_test.go new file mode 100644 index 00000000..851ebcdd --- /dev/null +++ b/core/web/presenters/chain_msg_test.go @@ -0,0 +1,69 @@ +package presenters + +import ( + "fmt" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/cosmostest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/solanatest" +) + +func TestSolanaMessageResource(t *testing.T) { + id := "1" + chainID := solanatest.RandomChainID() + r := NewSolanaMsgResource(id, chainID) + assert.Equal(t, chainID, r.ChainID) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"solana_messages", + "id":"%s/%s", + "attributes":{ + "ChainID":"%s", + "from":"", + "to":"", + "amount":0 + } + } + } + `, chainID, id, chainID) + + assert.JSONEq(t, expected, string(b)) +} + +func TestCosmosMessageResource(t *testing.T) { + id := "1" + chainID := cosmostest.RandomChainID() + contractID := "cosmos1p3ucd3ptpw902fluyjzkq3fflq4btddac9sa3s" + r := NewCosmosMsgResource(id, chainID, contractID) + assert.Equal(t, chainID, r.ChainID) + assert.Equal(t, contractID, r.ContractID) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"cosmos_messages", + "id":"%s/%s", + "attributes":{ + "ChainID":"%s", + "ContractID":"%s", + "State":"", + "TxHash":null + } + } + } + `, chainID, id, chainID, contractID) + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/check.go b/core/web/presenters/check.go new file mode 100644 index 00000000..4e1a2147 --- /dev/null +++ b/core/web/presenters/check.go @@ -0,0 +1,18 @@ +package presenters + +import "cmp" + +type Check struct { + JAID + Name string `json:"name"` + Status string `json:"status"` + Output string `json:"output"` +} + +func (c Check) GetName() string { + return "checks" +} + +func CmpCheckName(a, b Check) int { + return cmp.Compare(a.Name, b.Name) +} diff --git a/core/web/presenters/cosmos_chain.go b/core/web/presenters/cosmos_chain.go new file mode 100644 index 00000000..022f0de4 --- /dev/null +++ b/core/web/presenters/cosmos_chain.go @@ -0,0 +1,45 @@ +package presenters + +import ( + "github.com/goplugin/plugin-common/pkg/types" +) + +// CosmosChainResource is an Cosmos chain JSONAPI resource. +type CosmosChainResource struct { + ChainResource +} + +// GetName implements the api2go EntityNamer interface +func (r CosmosChainResource) GetName() string { + return "cosmos_chain" +} + +// NewCosmosChainResource returns a new CosmosChainResource for chain. +func NewCosmosChainResource(chain types.ChainStatus) CosmosChainResource { + return CosmosChainResource{ChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Config, + Enabled: chain.Enabled, + }} +} + +// CosmosNodeResource is a Cosmos node JSONAPI resource. +type CosmosNodeResource struct { + NodeResource +} + +// GetName implements the api2go EntityNamer interface +func (r CosmosNodeResource) GetName() string { + return "cosmos_node" +} + +// NewCosmosNodeResource returns a new CosmosNodeResource for node. +func NewCosmosNodeResource(node types.NodeStatus) CosmosNodeResource { + return CosmosNodeResource{NodeResource{ + JAID: NewPrefixedJAID(node.Name, node.ChainID), + ChainID: node.ChainID, + Name: node.Name, + State: node.State, + Config: node.Config, + }} +} diff --git a/core/web/presenters/cosmos_key.go b/core/web/presenters/cosmos_key.go new file mode 100644 index 00000000..3d8da3d6 --- /dev/null +++ b/core/web/presenters/cosmos_key.go @@ -0,0 +1,34 @@ +package presenters + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/cosmoskey" +) + +// CosmosKeyResource represents a Cosmos key JSONAPI resource. +type CosmosKeyResource struct { + JAID + PubKey string `json:"publicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (CosmosKeyResource) GetName() string { + return "cosmosKeys" +} + +func NewCosmosKeyResource(key cosmoskey.Key) *CosmosKeyResource { + r := &CosmosKeyResource{ + JAID: JAID{ID: key.ID()}, + PubKey: key.PublicKeyStr(), + } + + return r +} + +func NewCosmosKeyResources(keys []cosmoskey.Key) []CosmosKeyResource { + rs := []CosmosKeyResource{} + for _, key := range keys { + rs = append(rs, *NewCosmosKeyResource(key)) + } + + return rs +} diff --git a/core/web/presenters/cosmos_msg.go b/core/web/presenters/cosmos_msg.go new file mode 100644 index 00000000..ab43d394 --- /dev/null +++ b/core/web/presenters/cosmos_msg.go @@ -0,0 +1,24 @@ +package presenters + +// CosmosMsgResource repesents a Cosmos message JSONAPI resource. +type CosmosMsgResource struct { + JAID + ChainID string + ContractID string + State string + TxHash *string +} + +// GetName implements the api2go EntityNamer interface +func (CosmosMsgResource) GetName() string { + return "cosmos_messages" +} + +// NewCosmosMsgResource returns a new partial CosmosMsgResource. +func NewCosmosMsgResource(id string, chainID string, contractID string) CosmosMsgResource { + return CosmosMsgResource{ + JAID: NewPrefixedJAID(id, chainID), + ChainID: chainID, + ContractID: contractID, + } +} diff --git a/core/web/presenters/csa_key.go b/core/web/presenters/csa_key.go new file mode 100644 index 00000000..66b7005a --- /dev/null +++ b/core/web/presenters/csa_key.go @@ -0,0 +1,38 @@ +package presenters + +import ( + "fmt" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" +) + +// CSAKeyResource represents a CSA key JSONAPI resource. +type CSAKeyResource struct { + JAID + PubKey string `json:"publicKey"` + Version int `json:"version"` +} + +// GetName implements the api2go EntityNamer interface +func (CSAKeyResource) GetName() string { + return "csaKeys" +} + +func NewCSAKeyResource(key csakey.KeyV2) *CSAKeyResource { + r := &CSAKeyResource{ + JAID: NewJAID(key.ID()), + PubKey: fmt.Sprintf("csa_%s", key.PublicKeyString()), + Version: 1, + } + + return r +} + +func NewCSAKeyResources(keys []csakey.KeyV2) []CSAKeyResource { + rs := []CSAKeyResource{} + for _, key := range keys { + rs = append(rs, *NewCSAKeyResource(key)) + } + + return rs +} diff --git a/core/web/presenters/csa_key_test.go b/core/web/presenters/csa_key_test.go new file mode 100644 index 00000000..2bda0c9e --- /dev/null +++ b/core/web/presenters/csa_key_test.go @@ -0,0 +1,37 @@ +package presenters + +import ( + "fmt" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestCSAKeyResource(t *testing.T) { + key, err := csakey.New("passphrase", utils.FastScryptParams) + require.NoError(t, err) + key.ID = 1 + + r := NewCSAKeyResource(key.ToV2()) + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"csaKeys", + "id":"%s", + "attributes":{ + "publicKey": "csa_%s", + "version": 1 + } + } + }`, key.PublicKey.String(), key.PublicKey.String()) + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/dkgencrypt_key.go b/core/web/presenters/dkgencrypt_key.go new file mode 100644 index 00000000..df939b85 --- /dev/null +++ b/core/web/presenters/dkgencrypt_key.go @@ -0,0 +1,38 @@ +package presenters + +import ( + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgencryptkey" +) + +// DKGEncryptKeyResource is just that. +type DKGEncryptKeyResource struct { + JAID + PublicKey string `json:"publicKey"` +} + +var _ jsonapi.EntityNamer = DKGEncryptKeyResource{} + +// GetName implements jsonapi.EntityNamer +func (DKGEncryptKeyResource) GetName() string { + return "encryptedDKGEncryptKeys" +} + +// NewDKGEncryptKeyResource creates a new DKGEncryptKeyResource from the given DKG sign key. +func NewDKGEncryptKeyResource(key dkgencryptkey.Key) *DKGEncryptKeyResource { + return &DKGEncryptKeyResource{ + JAID: JAID{ + ID: key.ID(), + }, + PublicKey: key.PublicKeyString(), + } +} + +// NewDKGEncryptKeyResources creates many DKGEncryptKeyResource objects from the given DKG sign keys. +func NewDKGEncryptKeyResources(keys []dkgencryptkey.Key) (resources []DKGEncryptKeyResource) { + for _, key := range keys { + resources = append(resources, *NewDKGEncryptKeyResource(key)) + } + return +} diff --git a/core/web/presenters/dkgsign_key.go b/core/web/presenters/dkgsign_key.go new file mode 100644 index 00000000..dfacfdc8 --- /dev/null +++ b/core/web/presenters/dkgsign_key.go @@ -0,0 +1,38 @@ +package presenters + +import ( + "github.com/manyminds/api2go/jsonapi" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/dkgsignkey" +) + +// DKGSignKeyResource is just that. +type DKGSignKeyResource struct { + JAID + PublicKey string `json:"publicKey"` +} + +var _ jsonapi.EntityNamer = DKGSignKeyResource{} + +// GetName implements jsonapi.EntityNamer +func (DKGSignKeyResource) GetName() string { + return "encryptedDKGSignKeys" +} + +// NewDKGSignKeyResource creates a new DKGSignKeyResource from the given DKG sign key. +func NewDKGSignKeyResource(key dkgsignkey.Key) *DKGSignKeyResource { + return &DKGSignKeyResource{ + JAID: JAID{ + ID: key.ID(), + }, + PublicKey: key.PublicKeyString(), + } +} + +// NewDKGSignKeyResources creates many DKGSignKeyResource objects from the given DKG sign keys. +func NewDKGSignKeyResources(keys []dkgsignkey.Key) (resources []DKGSignKeyResource) { + for _, key := range keys { + resources = append(resources, *NewDKGSignKeyResource(key)) + } + return +} diff --git a/core/web/presenters/eth_key.go b/core/web/presenters/eth_key.go new file mode 100644 index 00000000..8bcaafc4 --- /dev/null +++ b/core/web/presenters/eth_key.go @@ -0,0 +1,76 @@ +package presenters + +import ( + "time" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" +) + +// ETHKeyResource represents a ETH key JSONAPI resource. It holds the hex +// representation of the address plus its ETH & PLI balances +type ETHKeyResource struct { + JAID + EVMChainID big.Big `json:"evmChainID"` + Address string `json:"address"` + EthBalance *assets.Eth `json:"ethBalance"` + LinkBalance *commonassets.Link `json:"linkBalance"` + Disabled bool `json:"disabled"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + MaxGasPriceWei *big.Big `json:"maxGasPriceWei"` +} + +// GetName implements the api2go EntityNamer interface +// +// This is named as such for backwards compatibility with the operator ui +// TODO - Standardise this to ethKeys +func (r ETHKeyResource) GetName() string { + return "eTHKeys" +} + +// NewETHKeyOption defines a functional option which allows customisation of the +// EthKeyResource +type NewETHKeyOption func(*ETHKeyResource) + +// NewETHKeyResource constructs a new ETHKeyResource from a Key. +// +// Use the functional options to inject the ETH and PLI balances +func NewETHKeyResource(k ethkey.KeyV2, state ethkey.State, opts ...NewETHKeyOption) *ETHKeyResource { + r := ÐKeyResource{ + JAID: NewPrefixedJAID(k.Address.Hex(), state.EVMChainID.String()), + EVMChainID: state.EVMChainID, + Address: k.Address.Hex(), + EthBalance: nil, + LinkBalance: nil, + Disabled: state.Disabled, + CreatedAt: state.CreatedAt, + UpdatedAt: state.UpdatedAt, + } + + for _, opt := range opts { + opt(r) + } + + return r +} + +func SetETHKeyEthBalance(ethBalance *assets.Eth) NewETHKeyOption { + return func(r *ETHKeyResource) { + r.EthBalance = ethBalance + } +} + +func SetETHKeyLinkBalance(linkBalance *commonassets.Link) NewETHKeyOption { + return func(r *ETHKeyResource) { + r.LinkBalance = linkBalance + } +} + +func SetETHKeyMaxGasPriceWei(maxGasPriceWei *big.Big) NewETHKeyOption { + return func(r *ETHKeyResource) { + r.MaxGasPriceWei = maxGasPriceWei + } +} diff --git a/core/web/presenters/eth_key_test.go b/core/web/presenters/eth_key_test.go new file mode 100644 index 00000000..41d7005e --- /dev/null +++ b/core/web/presenters/eth_key_test.go @@ -0,0 +1,104 @@ +package presenters + +import ( + "fmt" + "testing" + "time" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + + "github.com/ethereum/go-ethereum/common" + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestETHKeyResource(t *testing.T) { + var ( + now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + addressStr = "0x2aCFF2ec69aa9945Ed84f4F281eCCF6911A3B0eD" + address = common.HexToAddress(addressStr) + ) + eip55address, err := ethkey.NewEIP55Address(addressStr) + require.NoError(t, err) + key := ethkey.KeyV2{ + Address: address, + EIP55Address: eip55address, + } + + state := ethkey.State{ + ID: 1, + EVMChainID: *big.NewI(42), + Address: eip55address, + CreatedAt: now, + UpdatedAt: now, + Disabled: true, + } + + r := NewETHKeyResource(key, state, + SetETHKeyEthBalance(assets.NewEth(1)), + SetETHKeyLinkBalance(commonassets.NewLinkFromJuels(1)), + SetETHKeyMaxGasPriceWei(big.NewI(12345)), + ) + + assert.Equal(t, assets.NewEth(1), r.EthBalance) + assert.Equal(t, commonassets.NewLinkFromJuels(1), r.LinkBalance) + assert.Equal(t, big.NewI(12345), r.MaxGasPriceWei) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"eTHKeys", + "id":"42/%s", + "attributes":{ + "address":"%s", + "evmChainID":"42", + "ethBalance":"1", + "linkBalance":"1", + "disabled":true, + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "maxGasPriceWei":"12345" + } + } + } + `, addressStr, addressStr) + + assert.JSONEq(t, expected, string(b)) + + r = NewETHKeyResource(key, state, + SetETHKeyEthBalance(nil), + SetETHKeyLinkBalance(nil), + SetETHKeyMaxGasPriceWei(nil), + ) + b, err = jsonapi.Marshal(r) + require.NoError(t, err) + + expected = fmt.Sprintf(` + { + "data": { + "type":"eTHKeys", + "id":"42/%s", + "attributes":{ + "address":"%s", + "evmChainID":"42", + "ethBalance":null, + "linkBalance":null, + "disabled":true, + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "maxGasPriceWei":null + } + } + }`, + addressStr, addressStr, + ) + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/eth_tx.go b/core/web/presenters/eth_tx.go new file mode 100644 index 00000000..7d468714 --- /dev/null +++ b/core/web/presenters/eth_tx.go @@ -0,0 +1,79 @@ +package presenters + +import ( + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// EthTxResource represents a Ethereum Transaction JSONAPI resource. +type EthTxResource struct { + JAID + State string `json:"state"` + Data hexutil.Bytes `json:"data"` + From *common.Address `json:"from"` + GasLimit string `json:"gasLimit"` + GasPrice string `json:"gasPrice"` + Hash common.Hash `json:"hash"` + Hex string `json:"rawHex"` + Nonce string `json:"nonce"` + SentAt string `json:"sentAt"` + To *common.Address `json:"to"` + Value string `json:"value"` + EVMChainID big.Big `json:"evmChainID"` +} + +// GetName implements the api2go EntityNamer interface +func (EthTxResource) GetName() string { + return "evm_transactions" +} + +// NewEthTxResource generates a EthTxResource from an Eth.Tx. +// +// For backwards compatibility, there is no id set when initializing from an +// EthTx as the id being used was the EthTxAttempt Hash. +// This should really use it's proper id +func NewEthTxResource(tx txmgr.Tx) EthTxResource { + v := assets.Eth(tx.Value) + r := EthTxResource{ + Data: hexutil.Bytes(tx.EncodedPayload), + From: &tx.FromAddress, + GasLimit: strconv.FormatUint(uint64(tx.FeeLimit), 10), + State: string(tx.State), + To: &tx.ToAddress, + Value: v.String(), + } + + if tx.ChainID != nil { + r.EVMChainID = *big.New(tx.ChainID) + } + return r +} + +func NewEthTxResourceFromAttempt(txa txmgr.TxAttempt) EthTxResource { + tx := txa.Tx + + r := NewEthTxResource(tx) + r.JAID = NewJAID(txa.Hash.String()) + r.GasPrice = txa.TxFee.Legacy.ToInt().String() + r.Hash = txa.Hash + r.Hex = hexutil.Encode(txa.SignedRawTx) + + if txa.Tx.ChainID != nil { + r.EVMChainID = *big.New(txa.Tx.ChainID) + r.JAID = NewPrefixedJAID(r.JAID.ID, txa.Tx.ChainID.String()) + } + + if tx.Sequence != nil { + r.Nonce = strconv.FormatUint(uint64(*tx.Sequence), 10) + } + if txa.BroadcastBeforeBlockNum != nil { + r.SentAt = strconv.FormatUint(uint64(*txa.BroadcastBeforeBlockNum), 10) + } + return r +} diff --git a/core/web/presenters/eth_tx_test.go b/core/web/presenters/eth_tx_test.go new file mode 100644 index 00000000..f999820e --- /dev/null +++ b/core/web/presenters/eth_tx_test.go @@ -0,0 +1,110 @@ +package presenters + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" +) + +func TestEthTxResource(t *testing.T) { + t.Parallel() + + chainID := big.NewInt(54321) + tx := txmgr.Tx{ + ID: 1, + EncodedPayload: []byte(`{"data": "is wilding out"}`), + FromAddress: common.HexToAddress("0x1"), + ToAddress: common.HexToAddress("0x2"), + FeeLimit: uint32(5000), + ChainID: chainID, + State: txmgrcommon.TxConfirmed, + Value: big.Int(assets.NewEthValue(1)), + } + + r := NewEthTxResource(tx) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := ` + { + "data": { + "type": "evm_transactions", + "id": "", + "attributes": { + "state": "confirmed", + "data": "0x7b2264617461223a202269732077696c64696e67206f7574227d", + "from": "0x0000000000000000000000000000000000000001", + "gasLimit": "5000", + "gasPrice": "", + "hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "rawHex": "", + "nonce": "", + "sentAt": "", + "to": "0x0000000000000000000000000000000000000002", + "value": "0.000000000000000001", + "evmChainID": "54321" + } + } + } + ` + + assert.JSONEq(t, expected, string(b)) + + var ( + nonce = evmtypes.Nonce(100) + hash = common.BytesToHash([]byte{1, 2, 3}) + gasPrice = assets.NewWeiI(1000) + broadcastBefore = int64(300) + ) + + tx.Sequence = &nonce + txa := txmgr.TxAttempt{ + Tx: tx, + Hash: hash, + TxFee: gas.EvmFee{Legacy: gasPrice}, + SignedRawTx: hexutil.MustDecode("0xcafe"), + BroadcastBeforeBlockNum: &broadcastBefore, + } + + r = NewEthTxResourceFromAttempt(txa) + + b, err = jsonapi.Marshal(r) + require.NoError(t, err) + + expected = ` + { + "data": { + "type": "evm_transactions", + "id": "54321/0x0000000000000000000000000000000000000000000000000000000000010203", + "attributes": { + "state": "confirmed", + "data": "0x7b2264617461223a202269732077696c64696e67206f7574227d", + "from": "0x0000000000000000000000000000000000000001", + "gasLimit": "5000", + "gasPrice": "1000", + "hash": "0x0000000000000000000000000000000000000000000000000000000000010203", + "rawHex": "0xcafe", + "nonce": "100", + "sentAt": "300", + "to": "0x0000000000000000000000000000000000000002", + "value": "0.000000000000000001", + "evmChainID": "54321" + } + } + } + ` + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/evm_chain.go b/core/web/presenters/evm_chain.go new file mode 100644 index 00000000..e0b83766 --- /dev/null +++ b/core/web/presenters/evm_chain.go @@ -0,0 +1,43 @@ +package presenters + +import "github.com/goplugin/plugin-common/pkg/types" + +// EVMChainResource is an EVM chain JSONAPI resource. +type EVMChainResource struct { + ChainResource +} + +// GetName implements the api2go EntityNamer interface +func (r EVMChainResource) GetName() string { + return "evm_chain" +} + +// NewEVMChainResource returns a new EVMChainResource for chain. +func NewEVMChainResource(chain types.ChainStatus) EVMChainResource { + return EVMChainResource{ChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Config, + Enabled: chain.Enabled, + }} +} + +// EVMNodeResource is an EVM node JSONAPI resource. +type EVMNodeResource struct { + NodeResource +} + +// GetName implements the api2go EntityNamer interface +func (r EVMNodeResource) GetName() string { + return "evm_node" +} + +// NewEVMNodeResource returns a new EVMNodeResource for node. +func NewEVMNodeResource(node types.NodeStatus) EVMNodeResource { + return EVMNodeResource{NodeResource{ + JAID: NewPrefixedJAID(node.Name, node.ChainID), + ChainID: node.ChainID, + Name: node.Name, + State: node.State, + Config: node.Config, + }} +} diff --git a/core/web/presenters/evm_forwarder.go b/core/web/presenters/evm_forwarder.go new file mode 100644 index 00000000..7906d528 --- /dev/null +++ b/core/web/presenters/evm_forwarder.go @@ -0,0 +1,35 @@ +package presenters + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +// EVMForwarderResource is an EVM forwarder JSONAPI resource. +type EVMForwarderResource struct { + JAID + Address common.Address `json:"address"` + EVMChainID big.Big `json:"evmChainId"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// GetName implements the api2go EntityNamer interface +func (r EVMForwarderResource) GetName() string { + return "evm_forwarder" +} + +// NewEVMForwarderResource returns a new EVMForwarderResource for chain. +func NewEVMForwarderResource(fwd forwarders.Forwarder) EVMForwarderResource { + return EVMForwarderResource{ + JAID: NewJAIDInt64(fwd.ID), + Address: fwd.Address, + EVMChainID: fwd.EVMChainID, + CreatedAt: fwd.CreatedAt, + UpdatedAt: fwd.UpdatedAt, + } +} diff --git a/core/web/presenters/evm_forwarder_test.go b/core/web/presenters/evm_forwarder_test.go new file mode 100644 index 00000000..61ff0d9f --- /dev/null +++ b/core/web/presenters/evm_forwarder_test.go @@ -0,0 +1,64 @@ +package presenters + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/forwarders" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" +) + +func TestEVMForwarderResource(t *testing.T) { + var ( + ID = int64(1) + address = utils.RandomAddress() + chainID = *big.NewI(4) + createdAt = time.Now() + updatedAt = time.Now().Add(time.Second) + ) + fwd := forwarders.Forwarder{ + ID: ID, + Address: address, + EVMChainID: chainID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + } + + r := NewEVMForwarderResource(fwd) + assert.Equal(t, fmt.Sprint(ID), r.ID) + assert.Equal(t, address, r.Address) + assert.Equal(t, chainID, r.EVMChainID) + assert.Equal(t, createdAt, r.CreatedAt) + assert.Equal(t, updatedAt, r.UpdatedAt) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + createdAtMarshalled, err := createdAt.MarshalText() + require.NoError(t, err) + updatedAtMarshalled, err := updatedAt.MarshalText() + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"evm_forwarder", + "id":"%d", + "attributes":{ + "address":"%s", + "evmChainId":"%s", + "createdAt":"%s", + "updatedAt":"%s" + } + } + } + `, ID, strings.ToLower(address.String()), chainID.String(), string(createdAtMarshalled), string(updatedAtMarshalled)) + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/external_initiators.go b/core/web/presenters/external_initiators.go new file mode 100644 index 00000000..cefad294 --- /dev/null +++ b/core/web/presenters/external_initiators.go @@ -0,0 +1,82 @@ +package presenters + +import ( + "fmt" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// ExternalInitiatorAuthentication includes initiator and authentication details. +type ExternalInitiatorAuthentication struct { + Name string `json:"name,omitempty"` + URL models.WebURL `json:"url,omitempty"` + AccessKey string `json:"incomingAccessKey,omitempty"` + Secret string `json:"incomingSecret,omitempty"` + OutgoingToken string `json:"outgoingToken,omitempty"` + OutgoingSecret string `json:"outgoingSecret,omitempty"` +} + +// NewExternalInitiatorAuthentication creates an instance of ExternalInitiatorAuthentication. +func NewExternalInitiatorAuthentication( + ei bridges.ExternalInitiator, + eia auth.Token, +) *ExternalInitiatorAuthentication { + var result = &ExternalInitiatorAuthentication{ + Name: ei.Name, + AccessKey: ei.AccessKey, + Secret: eia.Secret, + OutgoingToken: ei.OutgoingToken, + OutgoingSecret: ei.OutgoingSecret, + } + if ei.URL != nil { + result.URL = *ei.URL + } + return result +} + +// GetID returns the jsonapi ID. +func (ei *ExternalInitiatorAuthentication) GetID() string { + return ei.Name +} + +// GetName returns the collection name for jsonapi. +func (*ExternalInitiatorAuthentication) GetName() string { + return "external initiators" +} + +// SetID is used to conform to the UnmarshallIdentifier interface for +// deserializing from jsonapi documents. +func (ei *ExternalInitiatorAuthentication) SetID(name string) error { + ei.Name = name + return nil +} + +type ExternalInitiatorResource struct { + JAID + Name string `json:"name"` + URL *models.WebURL `json:"url"` + AccessKey string `json:"accessKey"` + OutgoingToken string `json:"outgoingToken"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +func NewExternalInitiatorResource(ei bridges.ExternalInitiator) ExternalInitiatorResource { + return ExternalInitiatorResource{ + JAID: NewJAID(fmt.Sprintf("%d", ei.ID)), + Name: ei.Name, + URL: ei.URL, + AccessKey: ei.AccessKey, + OutgoingToken: ei.OutgoingToken, + CreatedAt: ei.CreatedAt, + UpdatedAt: ei.UpdatedAt, + } +} + +// GetName returns the collection name for jsonapi. +func (ExternalInitiatorResource) GetName() string { + return "externalInitiators" +} diff --git a/core/web/presenters/features.go b/core/web/presenters/features.go new file mode 100644 index 00000000..3aa7b053 --- /dev/null +++ b/core/web/presenters/features.go @@ -0,0 +1,20 @@ +package presenters + +// FeatureResource represents a Feature JSONAPI resource. +type FeatureResource struct { + JAID + Enabled bool `json:"enabled"` +} + +// GetName implements the api2go EntityNamer interface +func (r FeatureResource) GetName() string { + return "features" +} + +// NewFeedsManagerResource constructs a new FeedsManagerResource. +func NewFeatureResource(name string, enabled bool) *FeatureResource { + return &FeatureResource{ + JAID: NewJAID(name), + Enabled: enabled, + } +} diff --git a/core/web/presenters/job.go b/core/web/presenters/job.go new file mode 100644 index 00000000..b2433665 --- /dev/null +++ b/core/web/presenters/job.go @@ -0,0 +1,538 @@ +package presenters + +import ( + "time" + + "github.com/google/uuid" + "github.com/lib/pq" + "gopkg.in/guregu/null.v4" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// JobSpecType defines the the the spec type of the job +type JobSpecType string + +func (t JobSpecType) String() string { + return string(t) +} + +const ( + DirectRequestJobSpec JobSpecType = "directrequest" + FluxMonitorJobSpec JobSpecType = "fluxmonitor" + OffChainReportingJobSpec JobSpecType = "offchainreporting" + KeeperJobSpec JobSpecType = "keeper" + CronJobSpec JobSpecType = "cron" + VRFJobSpec JobSpecType = "vrf" + WebhookJobSpec JobSpecType = "webhook" + BlockhashStoreJobSpec JobSpecType = "blockhashstore" + BlockHeaderFeederJobSpec JobSpecType = "blockheaderfeeder" + BootstrapJobSpec JobSpecType = "bootstrap" + GatewayJobSpec JobSpecType = "gateway" +) + +// DirectRequestSpec defines the spec details of a DirectRequest Job +type DirectRequestSpec struct { + ContractAddress ethkey.EIP55Address `json:"contractAddress"` + MinIncomingConfirmations clnull.Uint32 `json:"minIncomingConfirmations"` + MinContractPayment *commonassets.Link `json:"minContractPaymentLinkJuels"` + Requesters models.AddressCollection `json:"requesters"` + Initiator string `json:"initiator"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *big.Big `json:"evmChainID"` +} + +// NewDirectRequestSpec initializes a new DirectRequestSpec from a +// job.DirectRequestSpec +func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec { + return &DirectRequestSpec{ + ContractAddress: spec.ContractAddress, + MinIncomingConfirmations: spec.MinIncomingConfirmations, + MinContractPayment: spec.MinContractPayment, + Requesters: spec.Requesters, + // This is hardcoded to runlog. When we support other initiators, we need + // to change this + Initiator: "runlog", + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + EVMChainID: spec.EVMChainID, + } +} + +// FluxMonitorSpec defines the spec details of a FluxMonitor Job +type FluxMonitorSpec struct { + ContractAddress ethkey.EIP55Address `json:"contractAddress"` + Threshold float32 `json:"threshold"` + AbsoluteThreshold float32 `json:"absoluteThreshold"` + PollTimerPeriod string `json:"pollTimerPeriod"` + PollTimerDisabled bool `json:"pollTimerDisabled"` + IdleTimerPeriod string `json:"idleTimerPeriod"` + IdleTimerDisabled bool `json:"idleTimerDisabled"` + DrumbeatEnabled bool `json:"drumbeatEnabled"` + DrumbeatSchedule *string `json:"drumbeatSchedule"` + DrumbeatRandomDelay *string `json:"drumbeatRandomDelay"` + MinPayment *commonassets.Link `json:"minPayment"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *big.Big `json:"evmChainID"` +} + +// NewFluxMonitorSpec initializes a new DirectFluxMonitorSpec from a +// job.FluxMonitorSpec +func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec { + var drumbeatSchedulePtr *string + if spec.DrumbeatEnabled { + drumbeatSchedulePtr = &spec.DrumbeatSchedule + } + var drumbeatRandomDelayPtr *string + if spec.DrumbeatRandomDelay > 0 { + drumbeatRandomDelay := spec.DrumbeatRandomDelay.String() + drumbeatRandomDelayPtr = &drumbeatRandomDelay + } + return &FluxMonitorSpec{ + ContractAddress: spec.ContractAddress, + Threshold: float32(spec.Threshold), + AbsoluteThreshold: float32(spec.AbsoluteThreshold), + PollTimerPeriod: spec.PollTimerPeriod.String(), + PollTimerDisabled: spec.PollTimerDisabled, + IdleTimerPeriod: spec.IdleTimerPeriod.String(), + IdleTimerDisabled: spec.IdleTimerDisabled, + DrumbeatEnabled: spec.DrumbeatEnabled, + DrumbeatSchedule: drumbeatSchedulePtr, + DrumbeatRandomDelay: drumbeatRandomDelayPtr, + MinPayment: spec.MinPayment, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + EVMChainID: spec.EVMChainID, + } +} + +// OffChainReportingSpec defines the spec details of a OffChainReporting Job +type OffChainReportingSpec struct { + ContractAddress ethkey.EIP55Address `json:"contractAddress"` + P2PV2Bootstrappers pq.StringArray `json:"p2pv2Bootstrappers"` + IsBootstrapPeer bool `json:"isBootstrapPeer"` + EncryptedOCRKeyBundleID *models.Sha256Hash `json:"keyBundleID"` + TransmitterAddress *ethkey.EIP55Address `json:"transmitterAddress"` + ObservationTimeout models.Interval `json:"observationTimeout"` + BlockchainTimeout models.Interval `json:"blockchainTimeout"` + ContractConfigTrackerSubscribeInterval models.Interval `json:"contractConfigTrackerSubscribeInterval"` + ContractConfigTrackerPollInterval models.Interval `json:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `json:"contractConfigConfirmations"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *big.Big `json:"evmChainID"` + DatabaseTimeout *models.Interval `json:"databaseTimeout"` + ObservationGracePeriod *models.Interval `json:"observationGracePeriod"` + ContractTransmitterTransmitTimeout *models.Interval `json:"contractTransmitterTransmitTimeout"` + CollectTelemetry bool `json:"collectTelemetry,omitempty"` +} + +// NewOffChainReportingSpec initializes a new OffChainReportingSpec from a +// job.OCROracleSpec +func NewOffChainReportingSpec(spec *job.OCROracleSpec) *OffChainReportingSpec { + return &OffChainReportingSpec{ + ContractAddress: spec.ContractAddress, + P2PV2Bootstrappers: spec.P2PV2Bootstrappers, + IsBootstrapPeer: spec.IsBootstrapPeer, + EncryptedOCRKeyBundleID: spec.EncryptedOCRKeyBundleID, + TransmitterAddress: spec.TransmitterAddress, + ObservationTimeout: spec.ObservationTimeout, + BlockchainTimeout: spec.BlockchainTimeout, + ContractConfigTrackerSubscribeInterval: spec.ContractConfigTrackerSubscribeInterval, + ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, + ContractConfigConfirmations: spec.ContractConfigConfirmations, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + EVMChainID: spec.EVMChainID, + DatabaseTimeout: spec.DatabaseTimeout, + ObservationGracePeriod: spec.ObservationGracePeriod, + ContractTransmitterTransmitTimeout: spec.ContractTransmitterTransmitTimeout, + CollectTelemetry: spec.CaptureEATelemetry, + } +} + +// OffChainReporting2Spec defines the spec details of a OffChainReporting2 Job +type OffChainReporting2Spec struct { + ContractID string `json:"contractID"` + Relay relay.Network `json:"relay"` + RelayConfig map[string]interface{} `json:"relayConfig"` + P2PV2Bootstrappers pq.StringArray `json:"p2pv2Bootstrappers"` + OCRKeyBundleID null.String `json:"ocrKeyBundleID"` + TransmitterID null.String `json:"transmitterID"` + ObservationTimeout models.Interval `json:"observationTimeout"` + BlockchainTimeout models.Interval `json:"blockchainTimeout"` + ContractConfigTrackerPollInterval models.Interval `json:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `json:"contractConfigConfirmations"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + CollectTelemetry bool `json:"collectTelemetry"` +} + +// NewOffChainReporting2Spec initializes a new OffChainReportingSpec from a +// job.OCR2OracleSpec +func NewOffChainReporting2Spec(spec *job.OCR2OracleSpec) *OffChainReporting2Spec { + return &OffChainReporting2Spec{ + ContractID: spec.ContractID, + Relay: spec.Relay, + RelayConfig: spec.RelayConfig, + P2PV2Bootstrappers: spec.P2PV2Bootstrappers, + OCRKeyBundleID: spec.OCRKeyBundleID, + TransmitterID: spec.TransmitterID, + BlockchainTimeout: spec.BlockchainTimeout, + ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, + ContractConfigConfirmations: spec.ContractConfigConfirmations, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + CollectTelemetry: spec.CaptureEATelemetry, + } +} + +// PipelineSpec defines the spec details of the pipeline +type PipelineSpec struct { + ID int32 `json:"id"` + JobID int32 `json:"jobID"` + DotDAGSource string `json:"dotDagSource"` +} + +// NewPipelineSpec generates a new PipelineSpec from a pipeline.Spec +func NewPipelineSpec(spec *pipeline.Spec) PipelineSpec { + return PipelineSpec{ + ID: spec.ID, + JobID: spec.JobID, + DotDAGSource: spec.DotDagSource, + } +} + +// KeeperSpec defines the spec details of a Keeper Job +type KeeperSpec struct { + ContractAddress ethkey.EIP55Address `json:"contractAddress"` + FromAddress ethkey.EIP55Address `json:"fromAddress"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *big.Big `json:"evmChainID"` +} + +// NewKeeperSpec generates a new KeeperSpec from a job.KeeperSpec +func NewKeeperSpec(spec *job.KeeperSpec) *KeeperSpec { + return &KeeperSpec{ + ContractAddress: spec.ContractAddress, + FromAddress: spec.FromAddress, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + EVMChainID: spec.EVMChainID, + } +} + +// WebhookSpec defines the spec details of a Webhook Job +type WebhookSpec struct { + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// NewWebhookSpec generates a new WebhookSpec from a job.WebhookSpec +func NewWebhookSpec(spec *job.WebhookSpec) *WebhookSpec { + return &WebhookSpec{ + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + } +} + +// CronSpec defines the spec details of a Cron Job +type CronSpec struct { + CronSchedule string `json:"schedule" tom:"schedule"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// NewCronSpec generates a new CronSpec from a job.CronSpec +func NewCronSpec(spec *job.CronSpec) *CronSpec { + return &CronSpec{ + CronSchedule: spec.CronSchedule, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + } +} + +type VRFSpec struct { + BatchCoordinatorAddress *ethkey.EIP55Address `json:"batchCoordinatorAddress"` + BatchFulfillmentEnabled bool `json:"batchFulfillmentEnabled"` + CustomRevertsPipelineEnabled *bool `json:"customRevertsPipelineEnabled,omitempty"` + BatchFulfillmentGasMultiplier float64 `json:"batchFulfillmentGasMultiplier"` + CoordinatorAddress ethkey.EIP55Address `json:"coordinatorAddress"` + PublicKey secp256k1.PublicKey `json:"publicKey"` + FromAddresses []ethkey.EIP55Address `json:"fromAddresses"` + PollPeriod commonconfig.Duration `json:"pollPeriod"` + MinIncomingConfirmations uint32 `json:"confirmations"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + EVMChainID *big.Big `json:"evmChainID"` + ChunkSize uint32 `json:"chunkSize"` + RequestTimeout commonconfig.Duration `json:"requestTimeout"` + BackoffInitialDelay commonconfig.Duration `json:"backoffInitialDelay"` + BackoffMaxDelay commonconfig.Duration `json:"backoffMaxDelay"` + GasLanePrice *assets.Wei `json:"gasLanePrice"` + RequestedConfsDelay int64 `json:"requestedConfsDelay"` + VRFOwnerAddress *ethkey.EIP55Address `json:"vrfOwnerAddress,omitempty"` +} + +func NewVRFSpec(spec *job.VRFSpec) *VRFSpec { + return &VRFSpec{ + BatchCoordinatorAddress: spec.BatchCoordinatorAddress, + BatchFulfillmentEnabled: spec.BatchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: float64(spec.BatchFulfillmentGasMultiplier), + CustomRevertsPipelineEnabled: &spec.CustomRevertsPipelineEnabled, + CoordinatorAddress: spec.CoordinatorAddress, + PublicKey: spec.PublicKey, + FromAddresses: spec.FromAddresses, + PollPeriod: *commonconfig.MustNewDuration(spec.PollPeriod), + MinIncomingConfirmations: spec.MinIncomingConfirmations, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + EVMChainID: spec.EVMChainID, + ChunkSize: spec.ChunkSize, + RequestTimeout: *commonconfig.MustNewDuration(spec.RequestTimeout), + BackoffInitialDelay: *commonconfig.MustNewDuration(spec.BackoffInitialDelay), + BackoffMaxDelay: *commonconfig.MustNewDuration(spec.BackoffMaxDelay), + GasLanePrice: spec.GasLanePrice, + RequestedConfsDelay: spec.RequestedConfsDelay, + VRFOwnerAddress: spec.VRFOwnerAddress, + } +} + +// BlockhashStoreSpec defines the job parameters for a blockhash store feeder job. +type BlockhashStoreSpec struct { + CoordinatorV1Address *ethkey.EIP55Address `json:"coordinatorV1Address"` + CoordinatorV2Address *ethkey.EIP55Address `json:"coordinatorV2Address"` + CoordinatorV2PlusAddress *ethkey.EIP55Address `json:"coordinatorV2PlusAddress"` + WaitBlocks int32 `json:"waitBlocks"` + LookbackBlocks int32 `json:"lookbackBlocks"` + HeartbeatPeriod time.Duration `json:"heartbeatPeriod"` + BlockhashStoreAddress ethkey.EIP55Address `json:"blockhashStoreAddress"` + TrustedBlockhashStoreAddress *ethkey.EIP55Address `json:"trustedBlockhashStoreAddress"` + TrustedBlockhashStoreBatchSize int32 `json:"trustedBlockhashStoreBatchSize"` + PollPeriod time.Duration `json:"pollPeriod"` + RunTimeout time.Duration `json:"runTimeout"` + EVMChainID *big.Big `json:"evmChainID"` + FromAddresses []ethkey.EIP55Address `json:"fromAddresses"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// NewBlockhashStoreSpec creates a new BlockhashStoreSpec for the given parameters. +func NewBlockhashStoreSpec(spec *job.BlockhashStoreSpec) *BlockhashStoreSpec { + return &BlockhashStoreSpec{ + CoordinatorV1Address: spec.CoordinatorV1Address, + CoordinatorV2Address: spec.CoordinatorV2Address, + CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, + WaitBlocks: spec.WaitBlocks, + LookbackBlocks: spec.LookbackBlocks, + HeartbeatPeriod: spec.HeartbeatPeriod, + BlockhashStoreAddress: spec.BlockhashStoreAddress, + TrustedBlockhashStoreAddress: spec.TrustedBlockhashStoreAddress, + TrustedBlockhashStoreBatchSize: spec.TrustedBlockhashStoreBatchSize, + PollPeriod: spec.PollPeriod, + RunTimeout: spec.RunTimeout, + EVMChainID: spec.EVMChainID, + FromAddresses: spec.FromAddresses, + } +} + +// BlockHeaderFeederSpec defines the job parameters for a blcok header feeder job. +type BlockHeaderFeederSpec struct { + CoordinatorV1Address *ethkey.EIP55Address `json:"coordinatorV1Address"` + CoordinatorV2Address *ethkey.EIP55Address `json:"coordinatorV2Address"` + CoordinatorV2PlusAddress *ethkey.EIP55Address `json:"coordinatorV2PlusAddress"` + WaitBlocks int32 `json:"waitBlocks"` + LookbackBlocks int32 `json:"lookbackBlocks"` + BlockhashStoreAddress ethkey.EIP55Address `json:"blockhashStoreAddress"` + BatchBlockhashStoreAddress ethkey.EIP55Address `json:"batchBlockhashStoreAddress"` + PollPeriod time.Duration `json:"pollPeriod"` + RunTimeout time.Duration `json:"runTimeout"` + EVMChainID *big.Big `json:"evmChainID"` + FromAddresses []ethkey.EIP55Address `json:"fromAddresses"` + GetBlockhashesBatchSize uint16 `json:"getBlockhashesBatchSize"` + StoreBlockhashesBatchSize uint16 `json:"storeBlockhashesBatchSize"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// NewBlockHeaderFeederSpec creates a new BlockHeaderFeederSpec for the given parameters. +func NewBlockHeaderFeederSpec(spec *job.BlockHeaderFeederSpec) *BlockHeaderFeederSpec { + return &BlockHeaderFeederSpec{ + CoordinatorV1Address: spec.CoordinatorV1Address, + CoordinatorV2Address: spec.CoordinatorV2Address, + CoordinatorV2PlusAddress: spec.CoordinatorV2PlusAddress, + WaitBlocks: spec.WaitBlocks, + LookbackBlocks: spec.LookbackBlocks, + BlockhashStoreAddress: spec.BlockhashStoreAddress, + BatchBlockhashStoreAddress: spec.BatchBlockhashStoreAddress, + PollPeriod: spec.PollPeriod, + RunTimeout: spec.RunTimeout, + EVMChainID: spec.EVMChainID, + FromAddresses: spec.FromAddresses, + GetBlockhashesBatchSize: spec.GetBlockhashesBatchSize, + StoreBlockhashesBatchSize: spec.StoreBlockhashesBatchSize, + } +} + +// BootstrapSpec defines the spec details of a BootstrapSpec Job +type BootstrapSpec struct { + ContractID string `json:"contractID"` + Relay relay.Network `json:"relay"` + RelayConfig map[string]interface{} `json:"relayConfig"` + BlockchainTimeout models.Interval `json:"blockchainTimeout"` + ContractConfigTrackerSubscribeInterval models.Interval `json:"contractConfigTrackerSubscribeInterval"` + ContractConfigTrackerPollInterval models.Interval `json:"contractConfigTrackerPollInterval"` + ContractConfigConfirmations uint16 `json:"contractConfigConfirmations"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// NewBootstrapSpec initializes a new BootstrapSpec from a job.BootstrapSpec +func NewBootstrapSpec(spec *job.BootstrapSpec) *BootstrapSpec { + return &BootstrapSpec{ + ContractID: spec.ContractID, + Relay: spec.Relay, + RelayConfig: spec.RelayConfig, + BlockchainTimeout: spec.BlockchainTimeout, + ContractConfigTrackerPollInterval: spec.ContractConfigTrackerPollInterval, + ContractConfigConfirmations: spec.ContractConfigConfirmations, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + } +} + +type GatewaySpec struct { + GatewayConfig map[string]interface{} `json:"gatewayConfig"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +func NewGatewaySpec(spec *job.GatewaySpec) *GatewaySpec { + return &GatewaySpec{ + GatewayConfig: spec.GatewayConfig, + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, + } +} + +// JobError represents errors on the job +type JobError struct { + ID int64 `json:"id"` + Description string `json:"description"` + Occurrences uint `json:"occurrences"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +func NewJobError(e job.SpecError) JobError { + return JobError{ + ID: e.ID, + Description: e.Description, + Occurrences: e.Occurrences, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + } +} + +// JobResource represents a JobResource +type JobResource struct { + JAID + Name string `json:"name"` + StreamID *uint32 `json:"streamID,omitempty"` + Type JobSpecType `json:"type"` + SchemaVersion uint32 `json:"schemaVersion"` + GasLimit clnull.Uint32 `json:"gasLimit"` + ForwardingAllowed bool `json:"forwardingAllowed"` + MaxTaskDuration models.Interval `json:"maxTaskDuration"` + ExternalJobID uuid.UUID `json:"externalJobID"` + DirectRequestSpec *DirectRequestSpec `json:"directRequestSpec"` + FluxMonitorSpec *FluxMonitorSpec `json:"fluxMonitorSpec"` + CronSpec *CronSpec `json:"cronSpec"` + OffChainReportingSpec *OffChainReportingSpec `json:"offChainReportingOracleSpec"` + OffChainReporting2Spec *OffChainReporting2Spec `json:"offChainReporting2OracleSpec"` + KeeperSpec *KeeperSpec `json:"keeperSpec"` + VRFSpec *VRFSpec `json:"vrfSpec"` + WebhookSpec *WebhookSpec `json:"webhookSpec"` + BlockhashStoreSpec *BlockhashStoreSpec `json:"blockhashStoreSpec"` + BlockHeaderFeederSpec *BlockHeaderFeederSpec `json:"blockHeaderFeederSpec"` + BootstrapSpec *BootstrapSpec `json:"bootstrapSpec"` + GatewaySpec *GatewaySpec `json:"gatewaySpec"` + PipelineSpec PipelineSpec `json:"pipelineSpec"` + Errors []JobError `json:"errors"` +} + +// NewJobResource initializes a new JSONAPI job resource +func NewJobResource(j job.Job) *JobResource { + resource := &JobResource{ + JAID: NewJAIDInt32(j.ID), + Name: j.Name.ValueOrZero(), + StreamID: j.StreamID, + Type: JobSpecType(j.Type), + SchemaVersion: j.SchemaVersion, + GasLimit: j.GasLimit, + ForwardingAllowed: j.ForwardingAllowed, + MaxTaskDuration: j.MaxTaskDuration, + PipelineSpec: NewPipelineSpec(j.PipelineSpec), + ExternalJobID: j.ExternalJobID, + } + + switch j.Type { + case job.DirectRequest: + resource.DirectRequestSpec = NewDirectRequestSpec(j.DirectRequestSpec) + case job.FluxMonitor: + resource.FluxMonitorSpec = NewFluxMonitorSpec(j.FluxMonitorSpec) + case job.Cron: + resource.CronSpec = NewCronSpec(j.CronSpec) + case job.OffchainReporting: + resource.OffChainReportingSpec = NewOffChainReportingSpec(j.OCROracleSpec) + case job.OffchainReporting2: + resource.OffChainReporting2Spec = NewOffChainReporting2Spec(j.OCR2OracleSpec) + case job.Keeper: + resource.KeeperSpec = NewKeeperSpec(j.KeeperSpec) + case job.VRF: + resource.VRFSpec = NewVRFSpec(j.VRFSpec) + case job.Webhook: + resource.WebhookSpec = NewWebhookSpec(j.WebhookSpec) + case job.BlockhashStore: + resource.BlockhashStoreSpec = NewBlockhashStoreSpec(j.BlockhashStoreSpec) + case job.BlockHeaderFeeder: + resource.BlockHeaderFeederSpec = NewBlockHeaderFeederSpec(j.BlockHeaderFeederSpec) + case job.Bootstrap: + resource.BootstrapSpec = NewBootstrapSpec(j.BootstrapSpec) + case job.Gateway: + resource.GatewaySpec = NewGatewaySpec(j.GatewaySpec) + case job.Stream: + // no spec; nothing to do + case job.Workflow: + // no spec; nothing to do + case job.LegacyGasStationServer, job.LegacyGasStationSidecar: + // unsupported + } + + jes := []JobError{} + for _, e := range j.JobSpecErrors { + jes = append(jes, NewJobError((e))) + } + resource.Errors = jes + + return resource +} + +// GetName implements the api2go EntityNamer interface +func (r JobResource) GetName() string { + return "jobs" +} diff --git a/core/web/presenters/job_test.go b/core/web/presenters/job_test.go new file mode 100644 index 00000000..35da13c3 --- /dev/null +++ b/core/web/presenters/job_test.go @@ -0,0 +1,936 @@ +package presenters_test + +import ( + "fmt" + "testing" + "time" + + "github.com/google/uuid" + + "github.com/lib/pq" + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/assets" + evmassets "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func TestJob(t *testing.T) { + // Used in multiple tests + timestamp := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + contractAddress, err := ethkey.NewEIP55Address("0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba") + require.NoError(t, err) + cronSchedule := "0 0 0 1 1 *" + evmChainID := big.NewI(42) + fromAddress, err := ethkey.NewEIP55Address("0xa8037A20989AFcBC51798de9762b351D63ff462e") + require.NoError(t, err) + + // Used in OCR tests + var ocrKeyBundleID = "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5" + ocrKeyID := models.MustSha256HashFromHex(ocrKeyBundleID) + transmitterAddress, err := ethkey.NewEIP55Address("0x27548a32b9aD5D64c5945EaE9Da5337bc3169D15") + require.NoError(t, err) + + // Used in blockhashstore test + v1CoordAddress, err := ethkey.NewEIP55Address("0x16988483b46e695f6c8D58e6e1461DC703e008e1") + require.NoError(t, err) + + v2CoordAddress, err := ethkey.NewEIP55Address("0x2C409DD6D4eBDdA190B5174Cc19616DD13884262") + require.NoError(t, err) + + v2PlusCoordAddress, err := ethkey.NewEIP55Address("0x92B5e28Ac583812874e4271380c7d070C5FB6E6b") + require.NoError(t, err) + + // Used in blockheaderfeeder test + batchBHSAddress, err := ethkey.NewEIP55Address("0xF6bB415b033D19EFf24A872a4785c6e1C4426103") + require.NoError(t, err) + + trustedBlockhashStoreAddress, err := ethkey.NewEIP55Address("0x0ad9FE7a58216242a8475ca92F222b0640E26B63") + require.NoError(t, err) + trustedBlockhashStoreBatchSize := int32(20) + + var specGasLimit uint32 = 1000 + vrfPubKey, _ := secp256k1.NewPublicKeyFromHex("0xede539e216e3a50e69d1c68aa9cc472085876c4002f6e1e6afee0ea63b50a78b00") + + testCases := []struct { + name string + job job.Job + want string + }{ + { + name: "direct request spec", + job: job.Job{ + ID: 1, + GasLimit: clnull.Uint32From(specGasLimit), + ForwardingAllowed: false, + DirectRequestSpec: &job.DirectRequestSpec{ + ContractAddress: contractAddress, + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: `ds1 [type=http method=GET url="https://pricesource1.com"`, + }, + Type: job.DirectRequest, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "directrequest", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "ds1 [type=http method=GET url=\"https://pricesource1.com\"", + "jobID": 0 + }, + "directRequestSpec": { + "contractAddress": "%s", + "minIncomingConfirmations": null, + "minContractPaymentLinkJuels": null, + "requesters": null, + "initiator": "runlog", + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "evmChainID": "42" + }, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "fluxMonitorSpec": null, + "gasLimit": 1000, + "forwardingAllowed": false, + "keeperSpec": null, + "cronSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, contractAddress), + }, + { + name: "fluxmonitor spec", + job: job.Job{ + ID: 1, + FluxMonitorSpec: &job.FluxMonitorSpec{ + ContractAddress: contractAddress, + Threshold: 0.5, + IdleTimerPeriod: 1 * time.Minute, + IdleTimerDisabled: false, + PollTimerPeriod: 1 * time.Second, + PollTimerDisabled: false, + MinPayment: assets.NewLinkFromJuels(1), + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: `ds1 [type=http method=GET url="https://pricesource1.com"`, + }, + Type: job.FluxMonitor, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "fluxmonitor", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "ds1 [type=http method=GET url=\"https://pricesource1.com\"", + "jobID": 0 + }, + "fluxMonitorSpec": { + "contractAddress": "%s", + "threshold": 0.5, + "absoluteThreshold": 0, + "idleTimerPeriod": "1m0s", + "idleTimerDisabled": false, + "pollTimerPeriod": "1s", + "pollTimerDisabled": false, + "drumbeatEnabled": false, + "drumbeatRandomDelay": null, + "drumbeatSchedule": null, + "minPayment": "1", + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "evmChainID": "42" + }, + "gasLimit": null, + "forwardingAllowed": false, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "directRequestSpec": null, + "keeperSpec": null, + "cronSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, contractAddress), + }, + { + name: "ocr spec", + job: job.Job{ + ID: 1, + OCROracleSpec: &job.OCROracleSpec{ + ContractAddress: contractAddress, + P2PV2Bootstrappers: pq.StringArray{"xxx:5001"}, + IsBootstrapPeer: true, + EncryptedOCRKeyBundleID: &ocrKeyID, + TransmitterAddress: &transmitterAddress, + ObservationTimeout: models.Interval(1 * time.Minute), + BlockchainTimeout: models.Interval(1 * time.Minute), + ContractConfigTrackerSubscribeInterval: models.Interval(1 * time.Minute), + ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute), + ContractConfigConfirmations: 1, + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + DatabaseTimeout: models.NewInterval(2 * time.Second), + ObservationGracePeriod: models.NewInterval(3 * time.Second), + ContractTransmitterTransmitTimeout: models.NewInterval(444 * time.Millisecond), + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: `ds1 [type=http method=GET url="https://pricesource1.com"`, + }, + Type: job.OffchainReporting, + SchemaVersion: 1, + Name: null.StringFrom("test"), + GasLimit: clnull.Uint32From(123), + ForwardingAllowed: true, + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "offchainreporting", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "ds1 [type=http method=GET url=\"https://pricesource1.com\"", + "jobID": 0 + }, + "offChainReportingOracleSpec": { + "contractAddress": "%s", + "p2pv2Bootstrappers": ["xxx:5001"], + "isBootstrapPeer": true, + "keyBundleID": "%s", + "transmitterAddress": "%s", + "observationTimeout": "1m0s", + "blockchainTimeout": "1m0s", + "contractConfigTrackerSubscribeInterval": "1m0s", + "contractConfigTrackerPollInterval": "1m0s", + "contractConfigConfirmations": 1, + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "evmChainID": "42", + "databaseTimeout": "2s", + "observationGracePeriod": "3s", + "contractTransmitterTransmitTimeout": "444ms" + }, + "offChainReporting2OracleSpec": null, + "fluxMonitorSpec": null, + "gasLimit": 123, + "forwardingAllowed": true, + "directRequestSpec": null, + "keeperSpec": null, + "cronSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, contractAddress, ocrKeyBundleID, transmitterAddress), + }, + { + name: "keeper spec", + job: job.Job{ + ID: 1, + KeeperSpec: &job.KeeperSpec{ + ContractAddress: contractAddress, + FromAddress: fromAddress, + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + Type: job.Keeper, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "keeper", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "", + "jobID": 0 + }, + "keeperSpec": { + "contractAddress": "%s", + "fromAddress": "%s", + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "evmChainID": "42" + }, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "directRequestSpec": null, + "cronSpec": null, + "webhookSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "cronSpec": null, + "vrfSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, contractAddress, fromAddress), + }, + { + name: "cron spec", + job: job.Job{ + ID: 1, + CronSpec: &job.CronSpec{ + CronSchedule: cronSchedule, + CreatedAt: timestamp, + UpdatedAt: timestamp, + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + Type: job.Cron, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "cron", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "", + "jobID": 0 + }, + "cronSpec": { + "schedule": "%s", + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z" + }, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "directRequestSpec": null, + "keeperSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, cronSchedule), + }, + { + name: "webhook spec", + job: job.Job{ + ID: 1, + WebhookSpec: &job.WebhookSpec{ + CreatedAt: timestamp, + UpdatedAt: timestamp, + }, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + Type: job.Webhook, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + }, + want: ` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "webhook", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "", + "jobID": 0 + }, + "webhookSpec": { + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z" + }, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "directRequestSpec": null, + "keeperSpec": null, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "vrfSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [] + } + } + }`, + }, + { + name: "vrf job spec", + job: job.Job{ + ID: 1, + Name: null.StringFrom("vrf_test"), + Type: job.VRF, + SchemaVersion: 1, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f47"), + VRFSpec: &job.VRFSpec{ + BatchCoordinatorAddress: &contractAddress, + BatchFulfillmentEnabled: true, + CustomRevertsPipelineEnabled: true, + MinIncomingConfirmations: 1, + CoordinatorAddress: contractAddress, + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + FromAddresses: []ethkey.EIP55Address{fromAddress}, + PublicKey: vrfPubKey, + RequestedConfsDelay: 10, + ChunkSize: 25, + BatchFulfillmentGasMultiplier: 1, + GasLanePrice: evmassets.GWei(200), + VRFOwnerAddress: nil, + }, + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + }, + want: fmt.Sprintf(` + { + "data": { + "type": "jobs", + "id": "1", + "attributes": { + "name": "vrf_test", + "type": "vrf", + "schemaVersion": 1, + "maxTaskDuration": "0s", + "externalJobID": "0eec7e1d-d0d2-476c-a1a8-72dfb6633f47", + "directRequestSpec": null, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "keeperSpec": null, + "vrfSpec": { + "batchCoordinatorAddress": "%s", + "batchFulfillmentEnabled": true, + "customRevertsPipelineEnabled": true, + "confirmations": 1, + "coordinatorAddress": "%s", + "createdAt": "2000-01-01T00:00:00Z", + "updatedAt": "2000-01-01T00:00:00Z", + "evmChainID": "42", + "fromAddresses": ["%s"], + "pollPeriod": "0s", + "publicKey": "%s", + "requestedConfsDelay": 10, + "requestTimeout": "0s", + "chunkSize": 25, + "batchFulfillmentGasMultiplier": 1, + "backoffInitialDelay": "0s", + "backoffMaxDelay": "0s", + "gasLanePrice": "200 gwei" + }, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "pipelineSpec": { + "id": 1, + "jobID": 0, + "dotDagSource": "" + }, + "gatewaySpec": null, + "errors": [] + } + } + }`, contractAddress, contractAddress, fromAddress, vrfPubKey.String()), + }, + { + name: "blockhash store spec", + job: job.Job{ + ID: 1, + BlockhashStoreSpec: &job.BlockhashStoreSpec{ + ID: 1, + CoordinatorV1Address: &v1CoordAddress, + CoordinatorV2Address: &v2CoordAddress, + CoordinatorV2PlusAddress: &v2PlusCoordAddress, + WaitBlocks: 123, + LookbackBlocks: 223, + HeartbeatPeriod: 375 * time.Second, + BlockhashStoreAddress: contractAddress, + PollPeriod: 25 * time.Second, + RunTimeout: 10 * time.Second, + EVMChainID: big.NewI(4), + FromAddresses: []ethkey.EIP55Address{fromAddress}, + TrustedBlockhashStoreAddress: &trustedBlockhashStoreAddress, + TrustedBlockhashStoreBatchSize: trustedBlockhashStoreBatchSize, + }, + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f46"), + Type: job.BlockhashStore, + SchemaVersion: 1, + Name: null.StringFrom("test"), + }, + want: ` + { + "data": { + "type": "jobs", + "id": "1", + "attributes": { + "name": "test", + "type": "blockhashstore", + "schemaVersion": 1, + "maxTaskDuration": "0s", + "externalJobID": "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "directRequestSpec": null, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "keeperSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": { + "coordinatorV1Address": "0x16988483b46e695f6c8D58e6e1461DC703e008e1", + "coordinatorV2Address": "0x2C409DD6D4eBDdA190B5174Cc19616DD13884262", + "coordinatorV2PlusAddress": "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b", + "waitBlocks": 123, + "lookbackBlocks": 223, + "heartbeatPeriod": 375000000000, + "blockhashStoreAddress": "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba", + "trustedBlockhashStoreAddress": "0x0ad9FE7a58216242a8475ca92F222b0640E26B63", + "trustedBlockhashStoreBatchSize": 20, + "pollPeriod": 25000000000, + "runTimeout": 10000000000, + "evmChainID": "4", + "fromAddresses": ["0xa8037A20989AFcBC51798de9762b351D63ff462e"], + "createdAt": "0001-01-01T00:00:00Z", + "updatedAt": "0001-01-01T00:00:00Z" + }, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "pipelineSpec": { + "id": 1, + "jobID": 0, + "dotDagSource": "" + }, + "gatewaySpec": null, + "errors": [] + } + } + }`, + }, + { + name: "block header feeder spec", + job: job.Job{ + ID: 1, + BlockHeaderFeederSpec: &job.BlockHeaderFeederSpec{ + ID: 1, + CoordinatorV1Address: &v1CoordAddress, + CoordinatorV2Address: &v2CoordAddress, + CoordinatorV2PlusAddress: &v2PlusCoordAddress, + WaitBlocks: 123, + LookbackBlocks: 223, + BlockhashStoreAddress: contractAddress, + BatchBlockhashStoreAddress: batchBHSAddress, + PollPeriod: 25 * time.Second, + RunTimeout: 10 * time.Second, + EVMChainID: big.NewI(4), + FromAddresses: []ethkey.EIP55Address{fromAddress}, + GetBlockhashesBatchSize: 5, + StoreBlockhashesBatchSize: 10, + }, + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f47"), + Type: job.BlockHeaderFeeder, + SchemaVersion: 1, + Name: null.StringFrom("blockheaderfeeder"), + }, + want: ` + { + "data": { + "type": "jobs", + "id": "1", + "attributes": { + "name": "blockheaderfeeder", + "type": "blockheaderfeeder", + "schemaVersion": 1, + "maxTaskDuration": "0s", + "externalJobID": "0eec7e1d-d0d2-476c-a1a8-72dfb6633f47", + "directRequestSpec": null, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "keeperSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": { + "coordinatorV1Address": "0x16988483b46e695f6c8D58e6e1461DC703e008e1", + "coordinatorV2Address": "0x2C409DD6D4eBDdA190B5174Cc19616DD13884262", + "coordinatorV2PlusAddress": "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b", + "waitBlocks": 123, + "lookbackBlocks": 223, + "blockhashStoreAddress": "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba", + "batchBlockhashStoreAddress": "0xF6bB415b033D19EFf24A872a4785c6e1C4426103", + "pollPeriod": 25000000000, + "runTimeout": 10000000000, + "evmChainID": "4", + "fromAddresses": ["0xa8037A20989AFcBC51798de9762b351D63ff462e"], + "getBlockhashesBatchSize": 5, + "storeBlockhashesBatchSize": 10, + "createdAt": "0001-01-01T00:00:00Z", + "updatedAt": "0001-01-01T00:00:00Z" + }, + "bootstrapSpec": null, + "pipelineSpec": { + "id": 1, + "jobID": 0, + "dotDagSource": "" + }, + "gatewaySpec": null, + "errors": [] + } + } + }`, + }, + { + name: "bootstrap spec", + job: job.Job{ + ID: 1, + BootstrapSpec: &job.BootstrapSpec{ + ID: 1, + ContractID: "0x16988483b46e695f6c8D58e6e1461DC703e008e1", + Relay: "evm", + RelayConfig: map[string]interface{}{"chainID": 1337}, + }, + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f46"), + Type: job.Bootstrap, + SchemaVersion: 1, + Name: null.StringFrom("test"), + }, + want: ` + { + "data": { + "type": "jobs", + "id": "1", + "attributes": { + "name": "test", + "type": "bootstrap", + "schemaVersion": 1, + "maxTaskDuration": "0s", + "externalJobID": "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "directRequestSpec": null, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "keeperSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": { + "blockchainTimeout":"0s", + "contractConfigConfirmations":0, + "contractConfigTrackerPollInterval":"0s", + "contractConfigTrackerSubscribeInterval":"0s", + "contractID":"0x16988483b46e695f6c8D58e6e1461DC703e008e1", + "createdAt":"0001-01-01T00:00:00Z", + "relay":"evm", + "relayConfig":{"chainID":1337}, + "updatedAt":"0001-01-01T00:00:00Z" + }, + "pipelineSpec": { + "id": 1, + "jobID": 0, + "dotDagSource": "" + }, + "gatewaySpec": null, + "errors": [] + } + } + }`, + }, + { + name: "gateway spec", + job: job.Job{ + ID: 1, + GatewaySpec: &job.GatewaySpec{ + ID: 3, + GatewayConfig: map[string]interface{}{ + "NodeServerConfig": map[string]interface{}{}, + }, + }, + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + ExternalJobID: uuid.MustParse("0eec7e1d-d0d2-476c-a1a8-72dfb6633f46"), + Type: job.Gateway, + SchemaVersion: 1, + Name: null.StringFrom("gateway test"), + }, + want: ` + { + "data": { + "type": "jobs", + "id": "1", + "attributes": { + "name": "gateway test", + "type": "gateway", + "schemaVersion": 1, + "maxTaskDuration": "0s", + "externalJobID": "0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "directRequestSpec": null, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "cronSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "keeperSpec": null, + "vrfSpec": null, + "webhookSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": { + "gatewayConfig": { + "NodeServerConfig": { + } + }, + "createdAt":"0001-01-01T00:00:00Z", + "updatedAt":"0001-01-01T00:00:00Z" + }, + "pipelineSpec": { + "id": 1, + "jobID": 0, + "dotDagSource": "" + }, + "errors": [] + } + } + }`, + }, + { + name: "with errors", + job: job.Job{ + ID: 1, + KeeperSpec: &job.KeeperSpec{ + ContractAddress: contractAddress, + FromAddress: fromAddress, + CreatedAt: timestamp, + UpdatedAt: timestamp, + EVMChainID: evmChainID, + }, + ExternalJobID: uuid.MustParse("0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46"), + PipelineSpec: &pipeline.Spec{ + ID: 1, + DotDagSource: "", + }, + Type: job.Keeper, + SchemaVersion: 1, + Name: null.StringFrom("test"), + MaxTaskDuration: models.Interval(1 * time.Minute), + JobSpecErrors: []job.SpecError{ + { + ID: 200, + JobID: 1, + Description: "some error", + Occurrences: 1, + CreatedAt: timestamp, + UpdatedAt: timestamp, + }, + }, + }, + want: fmt.Sprintf(` + { + "data":{ + "type":"jobs", + "id":"1", + "attributes":{ + "name": "test", + "schemaVersion": 1, + "type": "keeper", + "maxTaskDuration": "1m0s", + "externalJobID":"0eec7e1d-d0d2-476c-a1a8-72dfb6633f46", + "pipelineSpec": { + "id": 1, + "dotDagSource": "", + "jobID": 0 + }, + "keeperSpec": { + "contractAddress": "%s", + "fromAddress": "%s", + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z", + "evmChainID": "42" + }, + "fluxMonitorSpec": null, + "gasLimit": null, + "forwardingAllowed": false, + "directRequestSpec": null, + "cronSpec": null, + "webhookSpec": null, + "offChainReportingOracleSpec": null, + "offChainReporting2OracleSpec": null, + "vrfSpec": null, + "blockhashStoreSpec": null, + "blockHeaderFeederSpec": null, + "bootstrapSpec": null, + "gatewaySpec": null, + "errors": [{ + "id": 200, + "description": "some error", + "occurrences": 1, + "createdAt":"2000-01-01T00:00:00Z", + "updatedAt":"2000-01-01T00:00:00Z" + }] + } + } + }`, contractAddress, fromAddress), + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + r := presenters.NewJobResource(tc.job) + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + assert.JSONEq(t, tc.want, string(b)) + }) + } +} diff --git a/core/web/presenters/jsonapi.go b/core/web/presenters/jsonapi.go new file mode 100644 index 00000000..d14e24a7 --- /dev/null +++ b/core/web/presenters/jsonapi.go @@ -0,0 +1,43 @@ +package presenters + +import ( + "fmt" + "strconv" +) + +// JAID represents a JSON API ID. +// It implements the api2go MarshalIdentifier and UnmarshalIdentitier interface. +type JAID struct { + ID string `json:"-"` +} + +func NewJAID(id string) JAID { + return JAID{id} +} + +// NewPrefixedJAID prefixes JAID with chain id in %s/%s format. +func NewPrefixedJAID(id string, chainID string) JAID { + return JAID{ID: fmt.Sprintf("%s/%s", chainID, id)} +} + +// NewJAIDInt32 converts an int32 into a JAID +func NewJAIDInt32(id int32) JAID { + return JAID{strconv.Itoa(int(id))} +} + +// NewJAIDInt64 converts an int64 into a JAID +func NewJAIDInt64(id int64) JAID { + return JAID{strconv.Itoa(int(id))} +} + +// GetID implements the api2go MarshalIdentifier interface. +func (jaid JAID) GetID() string { + return jaid.ID +} + +// SetID implements the api2go UnmarshalIdentitier interface. +func (jaid *JAID) SetID(value string) error { + jaid.ID = value + + return nil +} diff --git a/core/web/presenters/log.go b/core/web/presenters/log.go new file mode 100644 index 00000000..1d3d5b28 --- /dev/null +++ b/core/web/presenters/log.go @@ -0,0 +1,13 @@ +package presenters + +type ServiceLogConfigResource struct { + JAID + ServiceName []string `json:"serviceName"` + LogLevel []string `json:"logLevel"` + DefaultLogLevel string `json:"defaultLogLevel"` +} + +// GetName implements the api2go EntityNamer interface +func (r ServiceLogConfigResource) GetName() string { + return "serviceLevelLogs" +} diff --git a/core/web/presenters/node_test.go b/core/web/presenters/node_test.go new file mode 100644 index 00000000..03b7150b --- /dev/null +++ b/core/web/presenters/node_test.go @@ -0,0 +1,92 @@ +package presenters + +import ( + "fmt" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/types" +) + +func TestNodeResource(t *testing.T) { + var nodeResource NodeResource + var r interface{} + state := "test" + cfg := "cfg" + testCases := []string{"solana", "cosmos", "starknet"} + for _, tc := range testCases { + chainID := fmt.Sprintf("%s chain ID", tc) + nodeName := fmt.Sprintf("%s_node", tc) + + switch tc { + case "evm": + evmNodeResource := NewEVMNodeResource( + types.NodeStatus{ + ChainID: chainID, + Name: nodeName, + Config: cfg, + State: state, + }) + r = evmNodeResource + nodeResource = evmNodeResource.NodeResource + case "solana": + solanaNodeResource := NewSolanaNodeResource( + types.NodeStatus{ + ChainID: chainID, + Name: nodeName, + Config: cfg, + State: state, + }) + r = solanaNodeResource + nodeResource = solanaNodeResource.NodeResource + case "cosmos": + cosmosNodeResource := NewCosmosNodeResource( + types.NodeStatus{ + ChainID: chainID, + Name: nodeName, + Config: cfg, + State: state, + }) + r = cosmosNodeResource + nodeResource = cosmosNodeResource.NodeResource + case "starknet": + starknetNodeResource := NewStarkNetNodeResource( + types.NodeStatus{ + ChainID: chainID, + Name: nodeName, + Config: cfg, + State: state, + }) + r = starknetNodeResource + nodeResource = starknetNodeResource.NodeResource + default: + t.Fail() + } + assert.Equal(t, chainID, nodeResource.ChainID) + assert.Equal(t, nodeName, nodeResource.Name) + assert.Equal(t, cfg, nodeResource.Config) + assert.Equal(t, state, nodeResource.State) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"%s_node", + "id":"%s/%s", + "attributes":{ + "chainID":"%s", + "name":"%s", + "config":"%s", + "state":"%s" + } + } + } + `, tc, chainID, nodeName, chainID, nodeName, cfg, state) + assert.JSONEq(t, expected, string(b)) + } +} diff --git a/core/web/presenters/ocr_keys.go b/core/web/presenters/ocr_keys.go new file mode 100644 index 00000000..1e9544a7 --- /dev/null +++ b/core/web/presenters/ocr_keys.go @@ -0,0 +1,83 @@ +package presenters + +import ( + "encoding/hex" + "fmt" + "sort" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +// OCRKeysBundleResource represents a bundle of OCRs keys as JSONAPI resource +type OCRKeysBundleResource struct { + JAID + OnChainSigningAddress ocrkey.OnChainSigningAddress `json:"onChainSigningAddress"` + OffChainPublicKey ocrkey.OffChainPublicKey `json:"offChainPublicKey"` + ConfigPublicKey ocrkey.ConfigPublicKey `json:"configPublicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (r OCRKeysBundleResource) GetName() string { + return "keyV2s" +} + +func NewOCRKeysBundleResource(key ocrkey.KeyV2) *OCRKeysBundleResource { + return &OCRKeysBundleResource{ + JAID: NewJAID(key.ID()), + OnChainSigningAddress: key.OnChainSigning.Address(), + OffChainPublicKey: key.OffChainSigning.PublicKey(), + ConfigPublicKey: key.PublicKeyConfig(), + } +} + +func NewOCRKeysBundleResources(keys []ocrkey.KeyV2) []OCRKeysBundleResource { + rs := []OCRKeysBundleResource{} + for _, key := range keys { + rs = append(rs, *NewOCRKeysBundleResource(key)) + } + + return rs +} + +// OCR2KeysBundleResource represents a bundle of OCRs keys as JSONAPI resource +type OCR2KeysBundleResource struct { + JAID + ChainType string `json:"chainType"` + OnchainPublicKey string `json:"onchainPublicKey"` + OffChainPublicKey string `json:"offchainPublicKey"` + ConfigPublicKey string `json:"configPublicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (r OCR2KeysBundleResource) GetName() string { + return "keyV2s" +} + +func NewOCR2KeysBundleResource(key ocr2key.KeyBundle) *OCR2KeysBundleResource { + configPublic := key.ConfigEncryptionPublicKey() + pubKey := key.OffchainPublicKey() + return &OCR2KeysBundleResource{ + JAID: NewJAID(key.ID()), + ChainType: string(key.ChainType()), + OnchainPublicKey: fmt.Sprintf("ocr2on_%s_%s", key.ChainType(), key.OnChainPublicKey()), + OffChainPublicKey: fmt.Sprintf("ocr2off_%s_%s", key.ChainType(), hex.EncodeToString(pubKey[:])), + ConfigPublicKey: fmt.Sprintf("ocr2cfg_%s_%s", key.ChainType(), hex.EncodeToString(configPublic[:])), + } +} + +func NewOCR2KeysBundleResources(keys []ocr2key.KeyBundle) []OCR2KeysBundleResource { + rs := []OCR2KeysBundleResource{} + for _, key := range keys { + rs = append(rs, *NewOCR2KeysBundleResource(key)) + } + // sort by chain type alphabetical, tie-break with ID + sort.SliceStable(rs, func(i, j int) bool { + if rs[i].ChainType == rs[j].ChainType { + return rs[i].ID < rs[j].ID + } + return rs[i].ChainType < rs[j].ChainType + }) + + return rs +} diff --git a/core/web/presenters/p2p_key.go b/core/web/presenters/p2p_key.go new file mode 100644 index 00000000..d5b77e88 --- /dev/null +++ b/core/web/presenters/p2p_key.go @@ -0,0 +1,36 @@ +package presenters + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +// P2PKeyResource represents a P2P key JSONAPI resource. +type P2PKeyResource struct { + JAID + PeerID string `json:"peerId"` + PubKey string `json:"publicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (P2PKeyResource) GetName() string { + return "encryptedP2PKeys" +} + +func NewP2PKeyResource(key p2pkey.KeyV2) *P2PKeyResource { + r := &P2PKeyResource{ + JAID: JAID{ID: key.ID()}, + PeerID: key.PeerID().String(), + PubKey: key.PublicKeyHex(), + } + + return r +} + +func NewP2PKeyResources(keys []p2pkey.KeyV2) []P2PKeyResource { + rs := []P2PKeyResource{} + for _, key := range keys { + rs = append(rs, *NewP2PKeyResource(key)) + } + + return rs +} diff --git a/core/web/presenters/p2p_key_test.go b/core/web/presenters/p2p_key_test.go new file mode 100644 index 00000000..61718e71 --- /dev/null +++ b/core/web/presenters/p2p_key_test.go @@ -0,0 +1,54 @@ +package presenters + +import ( + "fmt" + "testing" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/keystest" +) + +func TestP2PKeyResource(t *testing.T) { + key := keystest.NewP2PKeyV2(t) + peerID := key.PeerID() + peerIDStr := peerID.String() + + r := NewP2PKeyResource(key) + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := fmt.Sprintf(` + { + "data":{ + "type":"encryptedP2PKeys", + "id":"%s", + "attributes":{ + "peerId":"%s", + "publicKey": "%s" + } + } + }`, key.ID(), peerIDStr, key.PublicKeyHex()) + + assert.JSONEq(t, expected, string(b)) + + r = NewP2PKeyResource(key) + b, err = jsonapi.Marshal(r) + require.NoError(t, err) + + expected = fmt.Sprintf(` + { + "data": { + "type":"encryptedP2PKeys", + "id":"%s", + "attributes":{ + "peerId":"%s", + "publicKey": "%s" + } + } + }`, key.ID(), peerIDStr, key.PublicKeyHex()) + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/pipeline_run.go b/core/web/presenters/pipeline_run.go new file mode 100644 index 00000000..3da69e54 --- /dev/null +++ b/core/web/presenters/pipeline_run.go @@ -0,0 +1,105 @@ +package presenters + +import ( + "time" + + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +// Corresponds with models.d.ts PipelineRun +type PipelineRunResource struct { + JAID + Outputs []*string `json:"outputs"` + // XXX: Here for backwards compatibility, can be removed later + // Deprecated: Errors + Errors []*string `json:"errors"` + AllErrors []*string `json:"allErrors"` + FatalErrors []*string `json:"fatalErrors"` + Inputs pipeline.JSONSerializable `json:"inputs"` + TaskRuns []PipelineTaskRunResource `json:"taskRuns"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt null.Time `json:"finishedAt"` + PipelineSpec PipelineSpec `json:"pipelineSpec"` +} + +// GetName implements the api2go EntityNamer interface +func (r PipelineRunResource) GetName() string { + return "pipelineRun" +} + +func NewPipelineRunResource(pr pipeline.Run, lggr logger.Logger) PipelineRunResource { + lggr = lggr.Named("PipelineRunResource") + var trs []PipelineTaskRunResource + for i := range pr.PipelineTaskRuns { + trs = append(trs, NewPipelineTaskRunResource(pr.PipelineTaskRuns[i])) + } + + outputs, err := pr.StringOutputs() + if err != nil { + lggr.Errorw(err.Error(), "out", pr.Outputs) + } + + fatalErrors := pr.StringFatalErrors() + + return PipelineRunResource{ + JAID: NewJAIDInt64(pr.ID), + Outputs: outputs, + Errors: fatalErrors, + AllErrors: pr.StringAllErrors(), + FatalErrors: fatalErrors, + Inputs: pr.Inputs, + TaskRuns: trs, + CreatedAt: pr.CreatedAt, + FinishedAt: pr.FinishedAt, + PipelineSpec: NewPipelineSpec(&pr.PipelineSpec), + } +} + +// Corresponds with models.d.ts PipelineTaskRun +type PipelineTaskRunResource struct { + Type pipeline.TaskType `json:"type"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt null.Time `json:"finishedAt"` + Output *string `json:"output"` + Error *string `json:"error"` + DotID string `json:"dotId"` +} + +// GetName implements the api2go EntityNamer interface +func (r PipelineTaskRunResource) GetName() string { + return "taskRun" +} + +func NewPipelineTaskRunResource(tr pipeline.TaskRun) PipelineTaskRunResource { + var output *string + if tr.Output.Valid { + outputBytes, _ := tr.Output.MarshalJSON() + outputStr := string(outputBytes) + output = &outputStr + } + var errString *string + if tr.Error.Valid { + errString = &tr.Error.String + } + return PipelineTaskRunResource{ + Type: tr.Type, + CreatedAt: tr.CreatedAt, + FinishedAt: tr.FinishedAt, + Output: output, + Error: errString, + DotID: tr.GetDotID(), + } +} + +func NewPipelineRunResources(prs []pipeline.Run, lggr logger.Logger) []PipelineRunResource { + var out []PipelineRunResource + + for _, pr := range prs { + out = append(out, NewPipelineRunResource(pr, lggr)) + } + + return out +} diff --git a/core/web/presenters/solana_chain.go b/core/web/presenters/solana_chain.go new file mode 100644 index 00000000..52b52e7d --- /dev/null +++ b/core/web/presenters/solana_chain.go @@ -0,0 +1,45 @@ +package presenters + +import ( + "github.com/goplugin/plugin-common/pkg/types" +) + +// SolanaChainResource is an Solana chain JSONAPI resource. +type SolanaChainResource struct { + ChainResource +} + +// GetName implements the api2go EntityNamer interface +func (r SolanaChainResource) GetName() string { + return "solana_chain" +} + +// NewSolanaChainResource returns a new SolanaChainResource for chain. +func NewSolanaChainResource(chain types.ChainStatus) SolanaChainResource { + return SolanaChainResource{ChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Config, + Enabled: chain.Enabled, + }} +} + +// SolanaNodeResource is a Solana node JSONAPI resource. +type SolanaNodeResource struct { + NodeResource +} + +// GetName implements the api2go EntityNamer interface +func (r SolanaNodeResource) GetName() string { + return "solana_node" +} + +// NewSolanaNodeResource returns a new SolanaNodeResource for node. +func NewSolanaNodeResource(node types.NodeStatus) SolanaNodeResource { + return SolanaNodeResource{NodeResource{ + JAID: NewPrefixedJAID(node.Name, node.ChainID), + ChainID: node.ChainID, + Name: node.Name, + State: node.State, + Config: node.Config, + }} +} diff --git a/core/web/presenters/solana_key.go b/core/web/presenters/solana_key.go new file mode 100644 index 00000000..43a26721 --- /dev/null +++ b/core/web/presenters/solana_key.go @@ -0,0 +1,34 @@ +package presenters + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +// SolanaKeyResource represents a Solana key JSONAPI resource. +type SolanaKeyResource struct { + JAID + PubKey string `json:"publicKey"` +} + +// GetName implements the api2go EntityNamer interface +func (SolanaKeyResource) GetName() string { + return "encryptedSolanaKeys" +} + +func NewSolanaKeyResource(key solkey.Key) *SolanaKeyResource { + r := &SolanaKeyResource{ + JAID: JAID{ID: key.ID()}, + PubKey: key.PublicKeyStr(), + } + + return r +} + +func NewSolanaKeyResources(keys []solkey.Key) []SolanaKeyResource { + rs := []SolanaKeyResource{} + for _, key := range keys { + rs = append(rs, *NewSolanaKeyResource(key)) + } + + return rs +} diff --git a/core/web/presenters/solana_msg.go b/core/web/presenters/solana_msg.go new file mode 100644 index 00000000..3acf2aac --- /dev/null +++ b/core/web/presenters/solana_msg.go @@ -0,0 +1,23 @@ +package presenters + +// SolanaMsgResource repesents a Solana message JSONAPI resource. +type SolanaMsgResource struct { + JAID + ChainID string + From string `json:"from"` + To string `json:"to"` + Amount uint64 `json:"amount"` +} + +// GetName implements the api2go EntityNamer interface +func (SolanaMsgResource) GetName() string { + return "solana_messages" +} + +// NewSolanaMsgResource returns a new partial SolanaMsgResource. +func NewSolanaMsgResource(id string, chainID string) SolanaMsgResource { + return SolanaMsgResource{ + JAID: NewPrefixedJAID(id, chainID), + ChainID: chainID, + } +} diff --git a/core/web/presenters/starknet_chain.go b/core/web/presenters/starknet_chain.go new file mode 100644 index 00000000..c30a88f4 --- /dev/null +++ b/core/web/presenters/starknet_chain.go @@ -0,0 +1,45 @@ +package presenters + +import ( + "github.com/goplugin/plugin-common/pkg/types" +) + +// StarkNetChainResource is an StarkNet chain JSONAPI resource. +type StarkNetChainResource struct { + ChainResource +} + +// GetName implements the api2go EntityNamer interface +func (r StarkNetChainResource) GetName() string { + return "starknet_chain" +} + +// NewStarkNetChainResource returns a new StarkNetChainResource for chain. +func NewStarkNetChainResource(chain types.ChainStatus) StarkNetChainResource { + return StarkNetChainResource{ChainResource{ + JAID: NewJAID(chain.ID), + Config: chain.Config, + Enabled: chain.Enabled, + }} +} + +// StarkNetNodeResource is a StarkNet node JSONAPI resource. +type StarkNetNodeResource struct { + NodeResource +} + +// GetName implements the api2go EntityNamer interface +func (r StarkNetNodeResource) GetName() string { + return "starknet_node" +} + +// NewStarkNetNodeResource returns a new StarkNetNodeResource for node. +func NewStarkNetNodeResource(node types.NodeStatus) StarkNetNodeResource { + return StarkNetNodeResource{NodeResource{ + JAID: NewPrefixedJAID(node.Name, node.ChainID), + ChainID: node.ChainID, + Name: node.Name, + State: node.State, + Config: node.Config, + }} +} diff --git a/core/web/presenters/starknet_key.go b/core/web/presenters/starknet_key.go new file mode 100644 index 00000000..253f4844 --- /dev/null +++ b/core/web/presenters/starknet_key.go @@ -0,0 +1,34 @@ +package presenters + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" +) + +// StarkNetKeyResource represents a StarkNet key JSONAPI resource. +type StarkNetKeyResource struct { + JAID + StarkKey string `json:"starkPubKey"` +} + +// GetName implements the api2go EntityNamer interface +func (StarkNetKeyResource) GetName() string { + return "encryptedStarkNetKeys" +} + +func NewStarkNetKeyResource(key starkkey.Key) *StarkNetKeyResource { + r := &StarkNetKeyResource{ + JAID: JAID{ID: key.ID()}, + StarkKey: key.StarkKeyStr(), + } + + return r +} + +func NewStarkNetKeyResources(keys []starkkey.Key) []StarkNetKeyResource { + rs := []StarkNetKeyResource{} + for _, key := range keys { + rs = append(rs, *NewStarkNetKeyResource(key)) + } + + return rs +} diff --git a/core/web/presenters/user.go b/core/web/presenters/user.go new file mode 100644 index 00000000..67d2a7b6 --- /dev/null +++ b/core/web/presenters/user.go @@ -0,0 +1,48 @@ +package presenters + +import ( + "time" + + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +// UserResource represents a User JSONAPI resource. +type UserResource struct { + JAID + Email string `json:"email"` + Role sessions.UserRole `json:"role"` + HasActiveApiToken string `json:"hasActiveApiToken"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// GetName implements the api2go EntityNamer interface +func (r UserResource) GetName() string { + return "users" +} + +// NewUserResource constructs a new UserResource. +// +// A User does not have an ID primary key, so we must use the email +func NewUserResource(u sessions.User) *UserResource { + hasToken := "false" + if u.TokenKey.Valid { + hasToken = "true" + } + return &UserResource{ + JAID: NewJAID(u.Email), + Email: u.Email, + Role: u.Role, + HasActiveApiToken: hasToken, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, + } +} + +func NewUserResources(users []sessions.User) []UserResource { + us := []UserResource{} + for _, user := range users { + us = append(us, *NewUserResource(user)) + } + return us +} diff --git a/core/web/presenters/user_test.go b/core/web/presenters/user_test.go new file mode 100644 index 00000000..6ba3fc0d --- /dev/null +++ b/core/web/presenters/user_test.go @@ -0,0 +1,48 @@ +package presenters + +import ( + "testing" + "time" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +func TestUserResource(t *testing.T) { + var ( + ts = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + ) + + user := sessions.User{ + Email: "notreal@fakeemail.ch", + CreatedAt: ts, + UpdatedAt: ts, + Role: sessions.UserRoleAdmin, + } + + r := NewUserResource(user) + + b, err := jsonapi.Marshal(r) + require.NoError(t, err) + + expected := ` + { + "data": { + "type": "users", + "id": "notreal@fakeemail.ch", + "attributes": { + "email": "notreal@fakeemail.ch", + "createdAt": "2000-01-01T00:00:00Z", + "updatedAt": "2000-01-01T00:00:00Z", + "hasActiveApiToken": "false", + "role": "admin" + } + } + } + ` + + assert.JSONEq(t, expected, string(b)) +} diff --git a/core/web/presenters/vrf_key.go b/core/web/presenters/vrf_key.go new file mode 100644 index 00000000..331bf23d --- /dev/null +++ b/core/web/presenters/vrf_key.go @@ -0,0 +1,40 @@ +package presenters + +import ( + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +type VRFKeyResource struct { + JAID + Compressed string `json:"compressed"` + Uncompressed string `json:"uncompressed"` + Hash string `json:"hash"` +} + +// GetName implements the api2go EntityNamer interface +func (VRFKeyResource) GetName() string { + return "encryptedVRFKeys" +} + +func NewVRFKeyResource(key vrfkey.KeyV2, lggr logger.Logger) *VRFKeyResource { + uncompressed, err := key.PublicKey.StringUncompressed() + if err != nil { + lggr.Errorw("Unable to get uncompressed pk", "err", err) + } + return &VRFKeyResource{ + JAID: NewJAID(key.PublicKey.String()), + Compressed: key.PublicKey.String(), + Uncompressed: uncompressed, + Hash: key.PublicKey.MustHash().String(), + } +} + +func NewVRFKeyResources(keys []vrfkey.KeyV2, lggr logger.Logger) []VRFKeyResource { + rs := []VRFKeyResource{} + for _, key := range keys { + rs = append(rs, *NewVRFKeyResource(key, lggr)) + } + + return rs +} diff --git a/core/web/presenters/webauthn.go b/core/web/presenters/webauthn.go new file mode 100644 index 00000000..dc6f4d81 --- /dev/null +++ b/core/web/presenters/webauthn.go @@ -0,0 +1,25 @@ +package presenters + +import ( + "github.com/go-webauthn/webauthn/protocol" +) + +// RegistrationSettings represents an enrollment settings object +type RegistrationSettings struct { + JAID + Settings protocol.CredentialCreation `json:"settings"` +} + +// GetName implements the api2go EntityNamer interface +func (r RegistrationSettings) GetName() string { + return "registrationsettings" +} + +// NewRegistrationSettings creates a new structure to enroll a new hardware +// key for authentication +func NewRegistrationSettings(settings protocol.CredentialCreation) *RegistrationSettings { + return &RegistrationSettings{ + JAID: NewJAID("registration_settings"), + Settings: settings, + } +} diff --git a/core/web/replay_controller.go b/core/web/replay_controller.go new file mode 100644 index 00000000..bcbdb203 --- /dev/null +++ b/core/web/replay_controller.go @@ -0,0 +1,91 @@ +package web + +import ( + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +type ReplayController struct { + App plugin.Application +} + +// ReplayFromBlock causes the node to process blocks again from the given block number +// Example: +// +// "/v2/replay_from_block/:number" +func (bdc *ReplayController) ReplayFromBlock(c *gin.Context) { + if c.Param("number") == "" { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.New("missing 'number' parameter")) + return + } + + // check if "force" query string parameter provided + var force bool + var err error + if fb := c.Query("force"); fb != "" { + force, err = strconv.ParseBool(fb) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Wrap(err, "boolean value required for 'force' query string param")) + return + } + } + + blockNumber, err := strconv.ParseInt(c.Param("number"), 10, 0) + if err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + if blockNumber < 0 { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("block number cannot be negative: %v", blockNumber)) + return + } + + chain, err := getChain(bdc.App.GetRelayers().LegacyEVMChains(), c.Query("evmChainID")) + if err != nil { + if errors.Is(err, ErrInvalidChainID) || errors.Is(err, ErrMultipleChains) || errors.Is(err, ErrMissingChainID) { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + chainID := chain.ID() + + if err := bdc.App.ReplayFromBlock(chainID, uint64(blockNumber), force); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + response := ReplayResponse{ + Message: "Replay started", + EVMChainID: big.New(chainID), + } + jsonAPIResponse(c, &response, "response") +} + +type ReplayResponse struct { + Message string `json:"message"` + EVMChainID *big.Big `json:"evmChainID"` +} + +// GetID returns the jsonapi ID. +func (s ReplayResponse) GetID() string { + return "replayID" +} + +// GetName returns the collection name for jsonapi. +func (ReplayResponse) GetName() string { + return "replay" +} + +// SetID is used to conform to the UnmarshallIdentifier interface for +// deserializing from jsonapi documents. +func (*ReplayResponse) SetID(string) error { + return nil +} diff --git a/core/web/resolver/api_token.go b/core/web/resolver/api_token.go new file mode 100644 index 00000000..7cbb7277 --- /dev/null +++ b/core/web/resolver/api_token.go @@ -0,0 +1,109 @@ +package resolver + +import "github.com/goplugin/pluginv3.0/v2/core/auth" + +type APITokenResolver struct { + token auth.Token +} + +func NewAPIToken(token auth.Token) *APITokenResolver { + return &APITokenResolver{token} +} + +func (r *APITokenResolver) AccessKey() string { + return r.token.AccessKey +} + +func (r *APITokenResolver) Secret() string { + return r.token.Secret +} + +// -- CreateAPIToken Mutation -- + +type CreateAPITokenPayloadResolver struct { + token *auth.Token + inputErrs map[string]string +} + +func NewCreateAPITokenPayload(token *auth.Token, inputErrs map[string]string) *CreateAPITokenPayloadResolver { + return &CreateAPITokenPayloadResolver{token, inputErrs} +} + +func (r *CreateAPITokenPayloadResolver) ToCreateAPITokenSuccess() (*CreateAPITokenSuccessResolver, bool) { + if r.inputErrs != nil { + return nil, false + } + + return NewCreateAPITokenSuccess(r.token), true +} + +func (r *CreateAPITokenPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type CreateAPITokenSuccessResolver struct { + token *auth.Token +} + +func NewCreateAPITokenSuccess(token *auth.Token) *CreateAPITokenSuccessResolver { + return &CreateAPITokenSuccessResolver{token} +} + +func (r *CreateAPITokenSuccessResolver) Token() *APITokenResolver { + return NewAPIToken(*r.token) +} + +// -- DeleteAPIToken Mutation -- + +type DeleteAPITokenPayloadResolver struct { + token *auth.Token + inputErrs map[string]string +} + +func NewDeleteAPITokenPayload(token *auth.Token, inputErrs map[string]string) *DeleteAPITokenPayloadResolver { + return &DeleteAPITokenPayloadResolver{token, inputErrs} +} + +func (r *DeleteAPITokenPayloadResolver) ToDeleteAPITokenSuccess() (*DeleteAPITokenSuccessResolver, bool) { + if r.inputErrs != nil { + return nil, false + } + + return NewDeleteAPITokenSuccess(r.token), true +} + +func (r *DeleteAPITokenPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type DeleteAPITokenSuccessResolver struct { + token *auth.Token +} + +func NewDeleteAPITokenSuccess(token *auth.Token) *DeleteAPITokenSuccessResolver { + return &DeleteAPITokenSuccessResolver{token} +} + +func (r *DeleteAPITokenSuccessResolver) Token() *APITokenResolver { + return NewAPIToken(*r.token) +} diff --git a/core/web/resolver/api_token_test.go b/core/web/resolver/api_token_test.go new file mode 100644 index 00000000..d237282d --- /dev/null +++ b/core/web/resolver/api_token_test.go @@ -0,0 +1,324 @@ +package resolver + +import ( + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/utils" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +func TestResolver_CreateAPIToken(t *testing.T) { + t.Parallel() + + defaultPassword := "my-password" + mutation := ` + mutation CreateAPIToken($input: CreateAPITokenInput!) { + createAPIToken(input: $input) { + ... on CreateAPITokenSuccess { + token { + accessKey + secret + } + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables := map[string]interface{}{ + "input": map[string]interface{}{ + "password": defaultPassword, + }, + } + variablesIncorrect := map[string]interface{}{ + "input": map[string]interface{}{ + "password": "wrong-password", + }, + } + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createAPIToken"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil) + f.Mocks.authProvider.On("CreateAndSetAuthToken", session.User).Return(&auth.Token{ + Secret: "new-secret", + AccessKey: "new-access-key", + }, nil) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: ` + { + "createAPIToken": { + "token": { + "accessKey": "new-access-key", + "secret": "new-secret" + } + } + }`, + }, + { + name: "input errors", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, "wrong-password").Return(gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variablesIncorrect, + result: ` + { + "createAPIToken": { + "errors": [{ + "path": "password", + "message": "incorrect password", + "code": "INVALID_INPUT" + }] + } + }`, + }, + { + name: "failed to find user", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"createAPIToken"}, + Message: "error", + }, + }, + }, + { + name: "failed to generate token", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil) + f.Mocks.authProvider.On("CreateAndSetAuthToken", session.User).Return(nil, gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"createAPIToken"}, + Message: "error", + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DeleteAPIToken(t *testing.T) { + t.Parallel() + + defaultPassword := "my-password" + mutation := ` + mutation DeleteAPIToken($input: DeleteAPITokenInput!) { + deleteAPIToken(input: $input) { + ... on DeleteAPITokenSuccess { + token { + accessKey + } + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables := map[string]interface{}{ + "input": map[string]interface{}{ + "password": defaultPassword, + }, + } + variablesIncorrect := map[string]interface{}{ + "input": map[string]interface{}{ + "password": "wrong-password", + }, + } + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteAPIToken"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + err = session.User.TokenKey.UnmarshalText([]byte("new-access-key")) + require.NoError(t, err) + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil) + f.Mocks.authProvider.On("DeleteAuthToken", session.User).Return(nil) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteAPIToken": { + "token": { + "accessKey": "new-access-key" + } + } + }`, + }, + { + name: "input errors", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, "wrong-password").Return(gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variablesIncorrect, + result: ` + { + "deleteAPIToken": { + "errors": [{ + "path": "password", + "message": "incorrect password", + "code": "INVALID_INPUT" + }] + } + }`, + }, + { + name: "failed to find user", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"deleteAPIToken"}, + Message: "error", + }, + }, + }, + { + name: "failed to delete token", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := webauth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(defaultPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("TestPassword", session.User.Email, defaultPassword).Return(nil) + f.Mocks.authProvider.On("DeleteAuthToken", session.User).Return(gError) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"deleteAPIToken"}, + Message: "error", + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/auth.go b/core/web/resolver/auth.go new file mode 100644 index 00000000..6714f7cf --- /dev/null +++ b/core/web/resolver/auth.go @@ -0,0 +1,75 @@ +package resolver + +import ( + "context" + "fmt" + + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +// Authenticates the user from the session cookie, presence of user inherently provides 'view' access. +func authenticateUser(ctx context.Context) error { + if _, ok := auth.GetGQLAuthenticatedSession(ctx); !ok { + return unauthorizedError{} + } + return nil +} + +// Authenticates the user from the session cookie and asserts at least 'run' role. +func authenticateUserCanRun(ctx context.Context) error { + session, ok := auth.GetGQLAuthenticatedSession(ctx) + if !ok { + return unauthorizedError{} + } + if session.User.Role == sessions.UserRoleView { + return RoleNotPermittedErr{session.User.Role} + } + return nil +} + +// Authenticates the user from the session cookie and asserts at least 'edit' role. +func authenticateUserCanEdit(ctx context.Context) error { + session, ok := auth.GetGQLAuthenticatedSession(ctx) + if !ok { + return unauthorizedError{} + } + switch session.User.Role { + case sessions.UserRoleView, sessions.UserRoleRun: + return RoleNotPermittedErr{session.User.Role} + default: + } + return nil +} + +// Authenticates the user from the session cookie and asserts has 'admin' role +func authenticateUserIsAdmin(ctx context.Context) error { + session, ok := auth.GetGQLAuthenticatedSession(ctx) + if !ok { + return unauthorizedError{} + } + if session.User.Role != sessions.UserRoleAdmin { + return RoleNotPermittedErr{session.User.Role} + } + return nil +} + +type unauthorizedError struct{} + +func (e unauthorizedError) Error() string { + return "Unauthorized" +} + +func (e unauthorizedError) Extensions() map[string]interface{} { + return map[string]interface{}{ + "code": "UNAUTHORIZED", + } +} + +type RoleNotPermittedErr struct { + Role sessions.UserRole +} + +func (e RoleNotPermittedErr) Error() string { + return fmt.Sprintf("Not permitted with current role: %s", e.Role) +} diff --git a/core/web/resolver/bridge.go b/core/web/resolver/bridge.go new file mode 100644 index 00000000..b60acfb8 --- /dev/null +++ b/core/web/resolver/bridge.go @@ -0,0 +1,259 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/bridges" +) + +// BridgeResolver resolves the Bridge type. +type BridgeResolver struct { + bridge bridges.BridgeType +} + +func NewBridge(bridge bridges.BridgeType) *BridgeResolver { + return &BridgeResolver{bridge: bridge} +} + +func NewBridges(bridges []bridges.BridgeType) []*BridgeResolver { + var resolvers []*BridgeResolver + for _, b := range bridges { + resolvers = append(resolvers, NewBridge(b)) + } + + return resolvers +} + +// ID resolves the bridge's name as the id (Bridge does not have an id). +func (r *BridgeResolver) ID() graphql.ID { + return graphql.ID(r.bridge.Name.String()) +} + +// Name resolves the bridge's name. +func (r *BridgeResolver) Name() string { + return r.bridge.Name.String() +} + +// URL resolves the bridge's url. +func (r *BridgeResolver) URL() string { + return r.bridge.URL.String() +} + +// Confirmations resolves the bridge's url. +func (r *BridgeResolver) Confirmations() int32 { + return int32(r.bridge.Confirmations) +} + +// OutgoingToken resolves the bridge's outgoing token. +func (r *BridgeResolver) OutgoingToken() string { + return r.bridge.OutgoingToken +} + +// MinimumContractPayment resolves the bridge's minimum contract payment. +func (r *BridgeResolver) MinimumContractPayment() string { + return r.bridge.MinimumContractPayment.String() +} + +// CreatedAt resolves the bridge's created at field. +func (r *BridgeResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.bridge.CreatedAt} +} + +// BridgePayloadResolver resolves a single bridge response +type BridgePayloadResolver struct { + bridge bridges.BridgeType + NotFoundErrorUnionType +} + +func NewBridgePayload(bridge bridges.BridgeType, err error) *BridgePayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "bridge not found"} + + return &BridgePayloadResolver{bridge: bridge, NotFoundErrorUnionType: e} +} + +// ToBridge implements the Bridge union type of the payload +func (r *BridgePayloadResolver) ToBridge() (*BridgeResolver, bool) { + if r.err == nil { + return NewBridge(r.bridge), true + } + + return nil, false +} + +// BridgesPayloadResolver resolves a page of bridges +type BridgesPayloadResolver struct { + bridges []bridges.BridgeType + total int32 +} + +func NewBridgesPayload(bridges []bridges.BridgeType, total int32) *BridgesPayloadResolver { + return &BridgesPayloadResolver{ + bridges: bridges, + total: total, + } +} + +// Results returns the bridges. +func (r *BridgesPayloadResolver) Results() []*BridgeResolver { + return NewBridges(r.bridges) +} + +// Metadata returns the pagination metadata. +func (r *BridgesPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} + +// CreateBridgePayloadResolver +type CreateBridgePayloadResolver struct { + bridge bridges.BridgeType + incomingToken string +} + +func NewCreateBridgePayload(bridge bridges.BridgeType, incomingToken string) *CreateBridgePayloadResolver { + return &CreateBridgePayloadResolver{ + bridge: bridge, + incomingToken: incomingToken, + } +} + +func (r *CreateBridgePayloadResolver) ToCreateBridgeSuccess() (*CreateBridgeSuccessResolver, bool) { + return NewCreateBridgeSuccessResolver(r.bridge, r.incomingToken), true +} + +type CreateBridgeSuccessResolver struct { + bridge bridges.BridgeType + incomingToken string +} + +func NewCreateBridgeSuccessResolver(bridge bridges.BridgeType, incomingToken string) *CreateBridgeSuccessResolver { + return &CreateBridgeSuccessResolver{ + bridge: bridge, + incomingToken: incomingToken, + } +} + +// Bridge resolves the bridge. +func (r *CreateBridgeSuccessResolver) Bridge() *BridgeResolver { + return NewBridge(r.bridge) +} + +// Token resolves the bridge's incoming token. +func (r *CreateBridgeSuccessResolver) IncomingToken() string { + return r.incomingToken +} + +type UpdateBridgePayloadResolver struct { + bridge *bridges.BridgeType + NotFoundErrorUnionType +} + +func NewUpdateBridgePayload(bridge *bridges.BridgeType, err error) *UpdateBridgePayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "bridge not found"} + + return &UpdateBridgePayloadResolver{bridge: bridge, NotFoundErrorUnionType: e} +} + +func (r *UpdateBridgePayloadResolver) ToUpdateBridgeSuccess() (*UpdateBridgeSuccessResolver, bool) { + if r.bridge != nil { + return NewUpdateBridgeSuccess(*r.bridge), true + } + + return nil, false +} + +// UpdateBridgePayloadResolver resolves +type UpdateBridgeSuccessResolver struct { + bridge bridges.BridgeType +} + +func NewUpdateBridgeSuccess(bridge bridges.BridgeType) *UpdateBridgeSuccessResolver { + return &UpdateBridgeSuccessResolver{ + bridge: bridge, + } +} + +// Bridge resolves the success payload's bridge. +func (r *UpdateBridgeSuccessResolver) Bridge() *BridgeResolver { + return NewBridge(r.bridge) +} + +// -- DeleteBridge mutation -- + +type DeleteBridgePayloadResolver struct { + bridge *bridges.BridgeType + NotFoundErrorUnionType +} + +func NewDeleteBridgePayload(bridge *bridges.BridgeType, err error) *DeleteBridgePayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "bridge not found"} + + return &DeleteBridgePayloadResolver{bridge: bridge, NotFoundErrorUnionType: e} +} + +func (r *DeleteBridgePayloadResolver) ToDeleteBridgeSuccess() (*DeleteBridgeSuccessResolver, bool) { + if r.bridge != nil { + return NewDeleteBridgeSuccess(r.bridge), true + } + + return nil, false +} + +func (r *DeleteBridgePayloadResolver) ToDeleteBridgeConflictError() (*DeleteBridgeConflictErrorResolver, bool) { + if r.err != nil { + return NewDeleteBridgeConflictError(r.err.Error()), true + } + + return nil, false +} + +func (r *DeleteBridgePayloadResolver) ToDeleteBridgeInvalidNameError() (*DeleteBridgeInvalidNameErrorResolver, bool) { + if r.err != nil { + return NewDeleteBridgeInvalidNameError(r.err.Error()), true + } + + return nil, false +} + +type DeleteBridgeSuccessResolver struct { + bridge *bridges.BridgeType +} + +func NewDeleteBridgeSuccess(bridge *bridges.BridgeType) *DeleteBridgeSuccessResolver { + return &DeleteBridgeSuccessResolver{bridge: bridge} +} + +func (r *DeleteBridgeSuccessResolver) Bridge() *BridgeResolver { + return NewBridge(*r.bridge) +} + +type DeleteBridgeConflictErrorResolver struct { + message string +} + +func NewDeleteBridgeConflictError(message string) *DeleteBridgeConflictErrorResolver { + return &DeleteBridgeConflictErrorResolver{message: message} +} + +func (r *DeleteBridgeConflictErrorResolver) Message() string { + return r.message +} + +func (r *DeleteBridgeConflictErrorResolver) Code() ErrorCode { + return ErrorCodeStatusConflict +} + +type DeleteBridgeInvalidNameErrorResolver struct { + message string +} + +func NewDeleteBridgeInvalidNameError(message string) *DeleteBridgeInvalidNameErrorResolver { + return &DeleteBridgeInvalidNameErrorResolver{message: message} +} + +func (r *DeleteBridgeInvalidNameErrorResolver) Message() string { + return r.message +} + +func (r *DeleteBridgeInvalidNameErrorResolver) Code() ErrorCode { + return ErrorCodeUnprocessable +} diff --git a/core/web/resolver/bridge_test.go b/core/web/resolver/bridge_test.go new file mode 100644 index 00000000..0c08e826 --- /dev/null +++ b/core/web/resolver/bridge_test.go @@ -0,0 +1,499 @@ +package resolver + +import ( + "database/sql" + "encoding/json" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func Test_Bridges(t *testing.T) { + t.Parallel() + + var ( + query = ` + query GetBridges { + bridges { + results { + id + name + url + confirmations + outgoingToken + minimumContractPayment + createdAt + } + metadata { + total + } + } + }` + ) + + bridgeURL, err := url.Parse("https://external.adapter") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "bridges"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("BridgeTypes", PageDefaultOffset, PageDefaultLimit).Return([]bridges.BridgeType{ + { + Name: "bridge1", + URL: models.WebURL(*bridgeURL), + Confirmations: uint32(1), + OutgoingToken: "outgoingToken", + MinimumContractPayment: assets.NewLinkFromJuels(1), + CreatedAt: f.Timestamp(), + }, + }, 1, nil) + }, + query: query, + result: ` + { + "bridges": { + "results": [{ + "id": "bridge1", + "name": "bridge1", + "url": "https://external.adapter", + "confirmations": 1, + "outgoingToken": "outgoingToken", + "minimumContractPayment": "1", + "createdAt": "2021-01-01T00:00:00Z" + }], + "metadata": { + "total": 1 + } + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_Bridge(t *testing.T) { + t.Parallel() + + var ( + query = ` + query GetBridge{ + bridge(id: "bridge1") { + ... on Bridge { + id + name + url + confirmations + outgoingToken + minimumContractPayment + createdAt + } + ... on NotFoundError { + message + code + } + } + }` + + name = bridges.BridgeName("bridge1") + ) + bridgeURL, err := url.Parse("https://external.adapter") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "bridge"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{ + Name: name, + URL: models.WebURL(*bridgeURL), + Confirmations: uint32(1), + OutgoingToken: "outgoingToken", + MinimumContractPayment: assets.NewLinkFromJuels(1), + CreatedAt: f.Timestamp(), + }, nil) + }, + query: query, + result: `{ + "bridge": { + "id": "bridge1", + "name": "bridge1", + "url": "https://external.adapter", + "confirmations": 1, + "outgoingToken": "outgoingToken", + "minimumContractPayment": "1", + "createdAt": "2021-01-01T00:00:00Z" + } + }`, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{}, sql.ErrNoRows) + }, + query: query, + result: `{ + "bridge": { + "message": "bridge not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_CreateBridge(t *testing.T) { + t.Parallel() + + var ( + name = bridges.BridgeName("bridge1") + mutation = ` + mutation createBridge($input: CreateBridgeInput!) { + createBridge(input: $input) { + ... on CreateBridgeSuccess { + bridge { + id + name + url + confirmations + outgoingToken + minimumContractPayment + createdAt + } + } + } + }` + variables = map[string]interface{}{ + "input": map[string]interface{}{ + "name": "bridge1", + "url": "https://external.adapter", + "confirmations": 1, + "minimumContractPayment": "1", + }, + } + ) + bridgeURL, err := url.Parse("https://external.adapter") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createBridge"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{}, sql.ErrNoRows) + f.Mocks.bridgeORM.On("CreateBridgeType", mock.IsType(&bridges.BridgeType{})). + Run(func(args mock.Arguments) { + arg := args.Get(0).(*bridges.BridgeType) + *arg = bridges.BridgeType{ + Name: name, + URL: models.WebURL(*bridgeURL), + Confirmations: uint32(1), + OutgoingToken: "outgoingToken", + MinimumContractPayment: assets.NewLinkFromJuels(1), + CreatedAt: f.Timestamp(), + } + }). + Return(nil) + }, + query: mutation, + variables: variables, + // We should test equality for the generated token but since it is + // generated by a non mockable object, we can't do this right now. + result: ` + { + "createBridge": { + "bridge": { + "id": "bridge1", + "name": "bridge1", + "url": "https://external.adapter", + "confirmations": 1, + "outgoingToken": "outgoingToken", + "minimumContractPayment": "1", + "createdAt": "2021-01-01T00:00:00Z" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_UpdateBridge(t *testing.T) { + t.Parallel() + + var ( + name = bridges.BridgeName("bridge1") + mutation = ` + mutation updateBridge($id: ID!, $input: UpdateBridgeInput!) { + updateBridge(id: $id, input: $input) { + ... on UpdateBridgeSuccess { + bridge { + id + name + url + confirmations + outgoingToken + minimumContractPayment + createdAt + } + } + ... on NotFoundError { + message + code + } + } + }` + variables = map[string]interface{}{ + "id": "bridge1", + "input": map[string]interface{}{ + "name": "bridge-updated", + "url": "https://external.adapter.new", + "confirmations": 2, + "minimumContractPayment": "2", + }, + } + ) + bridgeURL, err := url.Parse("https://external.adapter") + require.NoError(t, err) + + newBridgeURL, err := url.Parse("https://external.adapter.new") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "updateBridge"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + // Initialize the existing bridge + bridge := bridges.BridgeType{ + Name: name, + URL: models.WebURL(*bridgeURL), + Confirmations: uint32(1), + OutgoingToken: "outgoingToken", + MinimumContractPayment: assets.NewLinkFromJuels(1), + CreatedAt: f.Timestamp(), + } + + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridge, nil) + + btr := &bridges.BridgeTypeRequest{ + Name: bridges.BridgeName("bridge-updated"), + URL: models.WebURL(*newBridgeURL), + Confirmations: 2, + MinimumContractPayment: assets.NewLinkFromJuels(2), + } + + f.Mocks.bridgeORM.On("UpdateBridgeType", mock.IsType(&bridges.BridgeType{}), btr). + Run(func(args mock.Arguments) { + arg := args.Get(0).(*bridges.BridgeType) + *arg = bridges.BridgeType{ + Name: "bridge-updated", + URL: models.WebURL(*newBridgeURL), + Confirmations: 2, + OutgoingToken: "outgoingToken", + MinimumContractPayment: assets.NewLinkFromJuels(2), + CreatedAt: f.Timestamp(), + } + }). + Return(nil) + }, + query: mutation, + variables: variables, + result: `{ + "updateBridge": { + "bridge": { + "id": "bridge-updated", + "name": "bridge-updated", + "url": "https://external.adapter.new", + "confirmations": 2, + "outgoingToken": "outgoingToken", + "minimumContractPayment": "2", + "createdAt": "2021-01-01T00:00:00Z" + } + } + }`, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{}, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: `{ + "updateBridge": { + "message": "bridge not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_DeleteBridgeMutation(t *testing.T) { + t.Parallel() + + name := bridges.BridgeName("bridge1") + + bridgeURL, err := url.Parse("https://test-url.com") + require.NoError(t, err) + + link := assets.Link{} + err = json.Unmarshal([]byte(`"1"`), &link) + assert.NoError(t, err) + + mutation := ` + mutation DeleteBridge($id: ID!) { + deleteBridge(id: $id) { + ... on DeleteBridgeSuccess { + bridge { + id + name + url + confirmations + outgoingToken + minimumContractPayment + } + } + ... on NotFoundError { + message + code + } + ... on DeleteBridgeInvalidNameError { + message + code + } + ... on DeleteBridgeConflictError { + message + code + } + } + }` + + variables := map[string]interface{}{ + "id": name.String(), + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteBridge"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + bridge := bridges.BridgeType{ + Name: name, + URL: models.WebURL(*bridgeURL), + Confirmations: 1, + OutgoingToken: "some-token", + MinimumContractPayment: &link, + } + + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridge, nil) + f.Mocks.bridgeORM.On("DeleteBridgeType", &bridge).Return(nil) + f.Mocks.jobORM.On("FindJobIDsWithBridge", name.String()).Return([]int32{}, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteBridge": { + "bridge": { + "id": "bridge1", + "name": "bridge1", + "url": "https://test-url.com", + "confirmations": 1, + "outgoingToken": "some-token", + "minimumContractPayment": "1" + } + } + }`, + }, + { + name: "invalid bridge type name", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "id": "][]$$$$324adfas", + }, + result: ` + { + "deleteBridge": { + "message": "task type validation: name ][]$$$$324adfas contains invalid characters", + "code": "UNPROCESSABLE" + } + }`, + }, + { + name: "invalid bridge type name", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "id": "bridge1", + }, + before: func(f *gqlTestFramework) { + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{}, sql.ErrNoRows) + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + }, + result: ` + { + "deleteBridge": { + "message": "bridge not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "bridge with jobs associated", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "id": "bridge1", + }, + before: func(f *gqlTestFramework) { + f.Mocks.bridgeORM.On("FindBridge", name).Return(bridges.BridgeType{}, nil) + f.Mocks.jobORM.On("FindJobIDsWithBridge", name.String()).Return([]int32{1}, nil) + f.App.On("BridgeORM").Return(f.Mocks.bridgeORM) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + result: ` + { + "deleteBridge": { + "message": "bridge has jobs associated with it", + "code": "UNPROCESSABLE" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/chain.go b/core/web/resolver/chain.go new file mode 100644 index 00000000..d5bca135 --- /dev/null +++ b/core/web/resolver/chain.go @@ -0,0 +1,76 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/plugin-common/pkg/types" +) + +// ChainResolver resolves the Chain type. +type ChainResolver struct { + chain types.ChainStatus +} + +func NewChain(chain types.ChainStatus) *ChainResolver { + return &ChainResolver{chain: chain} +} + +func NewChains(chains []types.ChainStatus) []*ChainResolver { + var resolvers []*ChainResolver + for _, c := range chains { + resolvers = append(resolvers, NewChain(c)) + } + + return resolvers +} + +// ID resolves the chain's unique identifier. +func (r *ChainResolver) ID() graphql.ID { + return graphql.ID(r.chain.ID) +} + +// Enabled resolves the chain's enabled field. +func (r *ChainResolver) Enabled() bool { + return r.chain.Enabled +} + +// Config resolves the chain's configuration field +func (r *ChainResolver) Config() string { + return r.chain.Config +} + +type ChainPayloadResolver struct { + chain types.ChainStatus + NotFoundErrorUnionType +} + +func NewChainPayload(chain types.ChainStatus, err error) *ChainPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "chain not found", isExpectedErrorFn: nil} + + return &ChainPayloadResolver{chain: chain, NotFoundErrorUnionType: e} +} + +func (r *ChainPayloadResolver) ToChain() (*ChainResolver, bool) { + if r.err != nil { + return nil, false + } + + return NewChain(r.chain), true +} + +type ChainsPayloadResolver struct { + chains []types.ChainStatus + total int32 +} + +func NewChainsPayload(chains []types.ChainStatus, total int32) *ChainsPayloadResolver { + return &ChainsPayloadResolver{chains: chains, total: total} +} + +func (r *ChainsPayloadResolver) Results() []*ChainResolver { + return NewChains(r.chains) +} + +func (r *ChainsPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} diff --git a/core/web/resolver/chain_test.go b/core/web/resolver/chain_test.go new file mode 100644 index 00000000..6dcc307f --- /dev/null +++ b/core/web/resolver/chain_test.go @@ -0,0 +1,228 @@ +package resolver + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/loop" + commontypes "github.com/goplugin/plugin-common/pkg/types" + evmtoml "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + pluginmocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + "github.com/goplugin/pluginv3.0/v2/core/web/testutils" +) + +func TestResolver_Chains(t *testing.T) { + var ( + chainID = *big.NewI(1) + query = ` + query GetChains { + chains { + results { + id + enabled + config + } + metadata { + total + } + } + }` + configTOML = `ChainID = '1' +Enabled = true +AutoCreateKey = false +BlockBackfillDepth = 100 +BlockBackfillSkip = true +ChainType = 'Optimism' +FinalityDepth = 42 +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' +LogBackfillBatchSize = 17 +LogPollInterval = '1m0s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 13 +MinContractPayment = '9.223372036854775807 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' +RPCDefaultBatchSize = 17 +RPCBlockQueryDelay = 10 +Nodes = [] + +[Transactions] +ForwardersEnabled = true +MaxInFlight = 19 +MaxQueued = 99 +ReaperInterval = '1m0s' +ReaperThreshold = '1m0s' +ResendAfterThreshold = '1h0m0s' +` + ) + var chain evmtoml.EVMConfig + err := toml.Unmarshal([]byte(configTOML), &chain) + require.NoError(t, err) + + configTOMLEscaped, err := json.Marshal(configTOML) + require.NoError(t, err) + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "chains"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + + chainConf := evmtoml.EVMConfig{ + ChainID: &chainID, + Enabled: chain.Enabled, + Chain: chain.Chain, + } + + chainConfToml, err2 := chainConf.TOMLString() + require.NoError(t, err2) + + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{ + testutils.MockRelayer{ChainStatus: commontypes.ChainStatus{ + ID: chainID.String(), + Enabled: *chain.Enabled, + Config: chainConfToml, + }}, + }}) + + }, + query: query, + result: fmt.Sprintf(` + { + "chains": { + "results": [{ + "id": "1", + "enabled": true, + "config": %s + }], + "metadata": { + "total": 1 + } + } + }`, configTOMLEscaped), + }, + unauthorizedTestCase(GQLTestCase{query: query}, "chains"), + { + name: "no chains", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{}}) + + }, + query: query, + result: ` + { + "chains": { + "results": [], + "metadata": { + "total": 0 + } + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_Chain(t *testing.T) { + var ( + chainID = *big.NewI(1) + query = ` + query GetChain { + chain(id: "1") { + ... on Chain { + id + enabled + config + } + ... on NotFoundError { + code + message + } + } + } + ` + configTOML = `ChainID = '1' +AutoCreateKey = false +BlockBackfillDepth = 100 +BlockBackfillSkip = true +ChainType = 'Optimism' +FinalityDepth = 42 +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' +LogBackfillBatchSize = 17 +LogPollInterval = '1m0s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 13 +MinContractPayment = '9.223372036854775807 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' +RPCDefaultBatchSize = 17 +RPCBlockQueryDelay = 10 +Nodes = [] + +[Transactions] +ForwardersEnabled = true +MaxInFlight = 19 +MaxQueued = 99 +ReaperInterval = '1m0s' +ReaperThreshold = '1m0s' +ResendAfterThreshold = '1h0m0s' +` + ) + var chain evmtoml.Chain + err := toml.Unmarshal([]byte(configTOML), &chain) + require.NoError(t, err) + + configTOMLEscaped, err := json.Marshal(configTOML) + require.NoError(t, err) + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "chain"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("EVMORM").Return(f.Mocks.evmORM) + f.Mocks.evmORM.PutChains(evmtoml.EVMConfig{ + ChainID: &chainID, + Chain: chain, + }) + }, + query: query, + result: fmt.Sprintf(` + { + "chain": { + "id": "1", + "enabled": true, + "config": %s + } + }`, configTOMLEscaped), + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("EVMORM").Return(f.Mocks.evmORM) + }, + query: query, + result: ` + { + "chain": { + "code": "NOT_FOUND", + "message": "chain not found" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/config_test.go b/core/web/resolver/config_test.go new file mode 100644 index 00000000..327190d7 --- /dev/null +++ b/core/web/resolver/config_test.go @@ -0,0 +1,89 @@ +package resolver + +import ( + _ "embed" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +var ( + //go:embed testdata/config-empty-effective.toml + configEmptyEffective string + //go:embed testdata/config-full.toml + configFull string + //go:embed testdata/config-multi-chain.toml + configMulti string + //go:embed testdata/config-multi-chain-effective.toml + configMultiEffective string +) + +func TestResolver_ConfigV2(t *testing.T) { + t.Parallel() + + query := ` + query FetchConfigV2 { + configv2 { + user + effective + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "configv2"), + { + name: "empty", + authenticated: true, + before: func(f *gqlTestFramework) { + opts := plugin.GeneralConfigOpts{} + cfg, err := opts.New() + require.NoError(t, err) + f.App.On("GetConfig").Return(cfg) + }, + query: query, + result: fmt.Sprintf(`{"configv2":{"user":"","effective":%s}}`, mustJSONMarshal(t, configEmptyEffective)), + }, + { + name: "full", + authenticated: true, + before: func(f *gqlTestFramework) { + opts := plugin.GeneralConfigOpts{ + ConfigStrings: []string{configFull}, + SecretsStrings: []string{}, + } + cfg, err := opts.New() + require.NoError(t, err) + f.App.On("GetConfig").Return(cfg) + }, + query: query, + result: fmt.Sprintf(`{"configv2":{"user":%s,"effective":%s}}`, mustJSONMarshal(t, configFull), mustJSONMarshal(t, configFull)), + }, + { + name: "partial", + authenticated: true, + before: func(f *gqlTestFramework) { + opts := plugin.GeneralConfigOpts{ + ConfigStrings: []string{configMulti}, + SecretsStrings: []string{}, + } + cfg, err := opts.New() + require.NoError(t, err) + f.App.On("GetConfig").Return(cfg) + }, + query: query, + result: fmt.Sprintf(`{"configv2":{"user":%s,"effective":%s}}`, mustJSONMarshal(t, configMulti), mustJSONMarshal(t, configMultiEffective)), + }, + } + + RunGQLTests(t, testCases) +} + +func mustJSONMarshal(t *testing.T, s string) string { + b, err := json.Marshal(s) + require.NoError(t, err) + return string(b) +} diff --git a/core/web/resolver/config_v2.go b/core/web/resolver/config_v2.go new file mode 100644 index 00000000..ccf3ad44 --- /dev/null +++ b/core/web/resolver/config_v2.go @@ -0,0 +1,18 @@ +package resolver + +type ConfigV2PayloadResolver struct { + user string + effective string +} + +func NewConfigV2Payload(user, effective string) *ConfigV2PayloadResolver { + return &ConfigV2PayloadResolver{user, effective} +} + +func (r *ConfigV2PayloadResolver) User() string { + return r.user +} + +func (r *ConfigV2PayloadResolver) Effective() string { + return r.effective +} diff --git a/core/web/resolver/csa_keys.go b/core/web/resolver/csa_keys.go new file mode 100644 index 00000000..0ea94a6d --- /dev/null +++ b/core/web/resolver/csa_keys.go @@ -0,0 +1,152 @@ +package resolver + +import ( + "fmt" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" +) + +// CSAKeyResolver resolves the single CSA Key object +type CSAKeyResolver struct { + key csakey.KeyV2 +} + +func NewCSAKey(key csakey.KeyV2) *CSAKeyResolver { + return &CSAKeyResolver{key: key} +} + +// ID resolves the CSA Key public key as the id. +func (r *CSAKeyResolver) ID() graphql.ID { + return graphql.ID(r.key.ID()) +} + +// PubKey resolves the CSA Key public key string. +func (r *CSAKeyResolver) PublicKey() string { + return fmt.Sprintf("csa_%s", r.key.PublicKeyString()) +} + +// Version resolves the CSA Key version number. +func (r *CSAKeyResolver) Version() int32 { + return int32(r.key.Version) +} + +// -- CSAKeys Query -- + +type CSAKeysPayloadResolver struct { + keys []csakey.KeyV2 +} + +func NewCSAKeysResolver(keys []csakey.KeyV2) *CSAKeysPayloadResolver { + return &CSAKeysPayloadResolver{keys: keys} +} + +func (r *CSAKeysPayloadResolver) Results() []*CSAKeyResolver { + return NewCSAKeys(r.keys) +} + +func NewCSAKeys(keys []csakey.KeyV2) []*CSAKeyResolver { + var resolvers []*CSAKeyResolver + + for _, k := range keys { + resolvers = append(resolvers, NewCSAKey(k)) + } + + return resolvers +} + +// -- CreateCSAKey Mutation -- + +type CreateCSAKeyPayloadResolver struct { + key *csakey.KeyV2 + err error +} + +func NewCreateCSAKeyPayload(key *csakey.KeyV2, err error) *CreateCSAKeyPayloadResolver { + return &CreateCSAKeyPayloadResolver{key: key, err: err} +} + +func (r *CreateCSAKeyPayloadResolver) ToCreateCSAKeySuccess() (*CreateCSAKeySuccessResolver, bool) { + if r.key != nil { + return NewCreateCSAKeySuccessResolver(r.key), true + } + + return nil, false +} + +func (r *CreateCSAKeyPayloadResolver) ToCSAKeyExistsError() (*CSAKeyExistsErrorResolver, bool) { + if r.err != nil && errors.Is(r.err, keystore.ErrCSAKeyExists) { + return NewCSAKeyExistsError(r.err.Error()), true + } + + return nil, false +} + +type CreateCSAKeySuccessResolver struct { + key *csakey.KeyV2 +} + +func NewCreateCSAKeySuccessResolver(key *csakey.KeyV2) *CreateCSAKeySuccessResolver { + return &CreateCSAKeySuccessResolver{key: key} +} + +func (r *CreateCSAKeySuccessResolver) CSAKey() *CSAKeyResolver { + return NewCSAKey(*r.key) +} + +type CSAKeyExistsErrorResolver struct { + message string +} + +func NewCSAKeyExistsError(message string) *CSAKeyExistsErrorResolver { + return &CSAKeyExistsErrorResolver{ + message: message, + } +} + +func (r *CSAKeyExistsErrorResolver) Message() string { + return r.message +} + +func (r *CSAKeyExistsErrorResolver) Code() ErrorCode { + return ErrorCodeUnprocessable +} + +type DeleteCSAKeySuccessResolver struct { + key csakey.KeyV2 +} + +func NewDeleteCSAKeySuccess(key csakey.KeyV2) *DeleteCSAKeySuccessResolver { + return &DeleteCSAKeySuccessResolver{key: key} +} + +func (r *DeleteCSAKeySuccessResolver) CSAKey() *CSAKeyResolver { + return NewCSAKey(r.key) +} + +type DeleteCSAKeyPayloadResolver struct { + key csakey.KeyV2 + NotFoundErrorUnionType +} + +func NewDeleteCSAKeyPayload(key csakey.KeyV2, err error) *DeleteCSAKeyPayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.As(err, &keystore.KeyNotFoundError{}) + }} + } + + return &DeleteCSAKeyPayloadResolver{key: key, NotFoundErrorUnionType: e} +} + +func (r *DeleteCSAKeyPayloadResolver) ToDeleteCSAKeySuccess() (*DeleteCSAKeySuccessResolver, bool) { + if r.err == nil { + return NewDeleteCSAKeySuccess(r.key), true + } + return nil, false +} diff --git a/core/web/resolver/csa_keys_test.go b/core/web/resolver/csa_keys_test.go new file mode 100644 index 00000000..e11bedb9 --- /dev/null +++ b/core/web/resolver/csa_keys_test.go @@ -0,0 +1,212 @@ +package resolver + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" +) + +type expectedKey struct { + ID string `json:"id"` + PubKey string `json:"publicKey"` + Version int `json:"version"` +} + +func Test_CSAKeysQuery(t *testing.T) { + query := ` + query GetCSAKeys { + csaKeys { + results { + id + publicKey + version + } + } + }` + + var fakeKeys []csakey.KeyV2 + var expectedKeys []expectedKey + + for i := 0; i < 5; i++ { + k, err := csakey.NewV2() + assert.NoError(t, err) + + fakeKeys = append(fakeKeys, k) + expectedKeys = append(expectedKeys, expectedKey{ + ID: k.ID(), + Version: k.Version, + PubKey: fmt.Sprintf("csa_%s", k.PublicKeyString()), + }) + } + + d, err := json.Marshal(map[string]interface{}{ + "csaKeys": map[string]interface{}{ + "results": expectedKeys, + }, + }) + assert.NoError(t, err) + expectedResult := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "csaKeys"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.csa.On("GetAll").Return(fakeKeys, nil) + f.Mocks.keystore.On("CSA").Return(f.Mocks.csa) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expectedResult, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_CreateCSAKey(t *testing.T) { + query := ` + mutation CreateCSAKey { + createCSAKey { + ... on CreateCSAKeySuccess { + csaKey { + id + version + publicKey + } + } + ... on CSAKeyExistsError { + message + code + } + } + }` + + fakeKey, err := csakey.NewV2() + assert.NoError(t, err) + + expected := ` + { + "createCSAKey": { + "csaKey": { + "id": "%s", + "version": %d, + "publicKey": "csa_%s" + } + } + } + ` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "createCSAKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.csa.On("Create").Return(fakeKey, nil) + f.Mocks.keystore.On("CSA").Return(f.Mocks.csa) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: fmt.Sprintf(expected, fakeKey.ID(), fakeKey.Version, fakeKey.PublicKeyString()), + }, + { + name: "csa key exists error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.csa.On("Create").Return(csakey.KeyV2{}, keystore.ErrCSAKeyExists) + f.Mocks.keystore.On("CSA").Return(f.Mocks.csa) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: fmt.Sprintf(` + { + "createCSAKey": { + "message": "%s", + "code": "UNPROCESSABLE" + } + }`, keystore.ErrCSAKeyExists.Error()), + }, + } + + RunGQLTests(t, testCases) +} + +func Test_DeleteCSAKey(t *testing.T) { + query := ` + mutation DeleteCSAKey($id: ID!) { + deleteCSAKey(id: $id) { + ... on DeleteCSAKeySuccess { + csaKey { + id + version + publicKey + } + } + ... on NotFoundError { + message + code + } + } + }` + + fakeKey, err := csakey.NewV2() + assert.NoError(t, err) + + expected := ` + { + "deleteCSAKey": { + "csaKey": { + "id": "%s", + "version": %d, + "publicKey": "csa_%s" + } + } + } + ` + variables := map[string]interface{}{"id": fakeKey.ID()} + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query, variables: variables}, "deleteCSAKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.Mocks.keystore.On("CSA").Return(f.Mocks.csa) + f.Mocks.csa.On("Delete", fakeKey.ID()).Return(fakeKey, nil) + }, + query: query, + variables: variables, + result: fmt.Sprintf(expected, fakeKey.ID(), fakeKey.Version, fakeKey.PublicKeyString()), + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.Mocks.keystore.On("CSA").Return(f.Mocks.csa) + f.Mocks.csa. + On("Delete", fakeKey.ID()). + Return(csakey.KeyV2{}, keystore.KeyNotFoundError{ID: fakeKey.ID(), KeyType: "CSA"}) + }, + query: query, + variables: variables, + result: fmt.Sprintf(` + { + "deleteCSAKey": { + "message": "unable to find CSA key with id %s", + "code": "NOT_FOUND" + } + }`, fakeKey.ID()), + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/errors.go b/core/web/resolver/errors.go new file mode 100644 index 00000000..2c28b24a --- /dev/null +++ b/core/web/resolver/errors.go @@ -0,0 +1,103 @@ +package resolver + +import ( + "database/sql" + + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains" +) + +type ErrorCode string + +const ( + ErrorCodeNotFound ErrorCode = "NOT_FOUND" + ErrorCodeInvalidInput ErrorCode = "INVALID_INPUT" + ErrorCodeUnprocessable ErrorCode = "UNPROCESSABLE" + ErrorCodeStatusConflict ErrorCode = "STATUS_CONFLICT" +) + +type NotFoundErrorUnionType struct { + err error + message string + isExpectedErrorFn func(err error) bool +} + +// ToNotFoundError resolves to the not found error resolver +func (e *NotFoundErrorUnionType) ToNotFoundError() (*NotFoundErrorResolver, bool) { + isErrFn := isNotFoundError + + if e.isExpectedErrorFn != nil { + isErrFn = e.isExpectedErrorFn + } + + if e.err != nil && isErrFn(e.err) { + return NewNotFoundError(e.message), true + } + + return nil, false +} + +func isNotFoundError(err error) bool { + return errors.Is(err, sql.ErrNoRows) || + errors.Is(err, chains.ErrNotFound) +} + +type NotFoundErrorResolver struct { + message string + code ErrorCode +} + +func NewNotFoundError(message string) *NotFoundErrorResolver { + return &NotFoundErrorResolver{ + message: message, + code: ErrorCodeNotFound, + } +} + +func (r *NotFoundErrorResolver) Message() string { + return r.message +} + +func (r *NotFoundErrorResolver) Code() ErrorCode { + return r.code +} + +type InputErrorResolver struct { + path string + message string +} + +func NewInputError(path, message string) *InputErrorResolver { + return &InputErrorResolver{ + path: path, + message: message, + } +} + +func (r *InputErrorResolver) Path() string { + return r.path +} + +func (r *InputErrorResolver) Message() string { + return r.message +} + +func (r *InputErrorResolver) Code() ErrorCode { + return ErrorCodeInvalidInput +} + +// InputErrorsResolver groups a slice of input errors +type InputErrorsResolver struct { + iers []*InputErrorResolver +} + +func NewInputErrors(iers []*InputErrorResolver) *InputErrorsResolver { + return &InputErrorsResolver{ + iers: iers, + } +} + +func (r *InputErrorsResolver) Errors() []*InputErrorResolver { + return r.iers +} diff --git a/core/web/resolver/eth_key.go b/core/web/resolver/eth_key.go new file mode 100644 index 00000000..b5f0a149 --- /dev/null +++ b/core/web/resolver/eth_key.go @@ -0,0 +1,132 @@ +package resolver + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +type ETHKey struct { + state ethkey.State + addr ethkey.EIP55Address + chain legacyevm.Chain +} + +type ETHKeyResolver struct { + key ETHKey +} + +func NewETHKey(key ETHKey) *ETHKeyResolver { + return ÐKeyResolver{key: key} +} + +func NewETHKeys(keys []ETHKey) []*ETHKeyResolver { + var resolvers []*ETHKeyResolver + + for _, k := range keys { + resolvers = append(resolvers, NewETHKey(k)) + } + + return resolvers +} + +func (r *ETHKeyResolver) Chain(ctx context.Context) (*ChainResolver, error) { + chain, err := loader.GetChainByID(ctx, r.key.state.EVMChainID.String()) + if err != nil { + return nil, err + } + + return NewChain(*chain), nil +} + +func (r *ETHKeyResolver) Address() string { + return r.key.addr.Hex() +} + +func (r *ETHKeyResolver) IsDisabled() bool { + return r.key.state.Disabled +} + +// ETHBalance returns the ETH balance available +func (r *ETHKeyResolver) ETHBalance(ctx context.Context) *string { + if r.key.chain == nil { + return nil + } + + balanceMonitor := r.key.chain.BalanceMonitor() + + if balanceMonitor == nil { + return nil + } + + balance := balanceMonitor.GetEthBalance(r.key.state.Address.Address()) + + if balance != nil { + val := balance.String() + return &val + } + + return nil +} + +func (r *ETHKeyResolver) PLIBalance(ctx context.Context) *string { + if r.key.chain == nil { + return nil + } + + client := r.key.chain.Client() + linkAddr := common.HexToAddress(r.key.chain.Config().EVM().LinkContractAddress()) + balance, err := client.PLIBalance(ctx, r.key.state.Address.Address(), linkAddr) + if err != nil { + return nil + } + + if balance != nil { + val := balance.String() + return &val + } + + return nil +} + +func (r *ETHKeyResolver) MaxGasPriceWei() *string { + if r.key.chain == nil { + return nil + } + + gasPrice := r.key.chain.Config().EVM().GasEstimator().PriceMaxKey(r.key.addr.Address()) + + if gasPrice != nil { + val := gasPrice.ToInt().String() + return &val + } + + return nil +} + +func (r *ETHKeyResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.key.state.CreatedAt} +} + +func (r *ETHKeyResolver) UpdatedAt() graphql.Time { + return graphql.Time{Time: r.key.state.UpdatedAt} +} + +// -- EthKeys query -- + +type ETHKeysPayloadResolver struct { + keys []ETHKey +} + +func NewETHKeysPayload(keys []ETHKey) *ETHKeysPayloadResolver { + return ÐKeysPayloadResolver{keys: keys} +} + +func (r *ETHKeysPayloadResolver) Results() []*ETHKeyResolver { + return NewETHKeys(r.keys) +} diff --git a/core/web/resolver/eth_key_test.go b/core/web/resolver/eth_key_test.go new file mode 100644 index 00000000..a38c6a65 --- /dev/null +++ b/core/web/resolver/eth_key_test.go @@ -0,0 +1,429 @@ +package resolver + +import ( + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common" + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config" + mocks2 "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/mocks" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/web/testutils" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +type mockEvmConfig struct { + config.EVM + linkAddr string + gasEstimatorMock *mocks2.GasEstimator +} + +func (m *mockEvmConfig) LinkContractAddress() string { return m.linkAddr } +func (m *mockEvmConfig) GasEstimator() config.GasEstimator { return m.gasEstimatorMock } + +func TestResolver_ETHKeys(t *testing.T) { + t.Parallel() + + query := ` + query GetETHKeys { + ethKeys { + results { + address + isDisabled + ethBalance + linkBalance + maxGasPriceWei + createdAt + updatedAt + chain { + id + } + } + } + }` + + address := common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81") + secondAddress := common.HexToAddress("0x1438087186fdbfd4c256fa2df446921e30e54df8") + keys := []ethkey.KeyV2{ + { + Address: address, + EIP55Address: ethkey.EIP55AddressFromAddress(address), + }, + { + Address: secondAddress, + EIP55Address: ethkey.EIP55AddressFromAddress(secondAddress), + }, + } + gError := errors.New("error") + keysError := fmt.Errorf("error getting unlocked keys: %v", gError) + statesError := fmt.Errorf("error getting key states: %v", gError) + + evmMockConfig := mockEvmConfig{linkAddr: "0x5431F5F973781809D18643b87B44921b11355d81", gasEstimatorMock: mocks2.NewGasEstimator(t)} + evmMockConfig.gasEstimatorMock.On("PriceMaxKey", mock.Anything).Return(assets.NewWeiI(1)) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "ethKeys"), + { + name: "success on prod", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.MustEIP55Address(address.Hex()), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + chainID := *big.NewI(12) + linkAddr := common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81") + + cfg := configtest.NewGeneralConfig(t, nil) + m := map[string]legacyevm.Chain{states[0].EVMChainID.String(): f.Mocks.chain} + legacyEVMChains := legacyevm.NewLegacyChains(m, cfg.EVMConfigs()) + + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.ethClient.On("PLIBalance", mock.Anything, address, linkAddr).Return(commonassets.NewLinkFromJuels(12), nil) + f.Mocks.chain.On("Client").Return(f.Mocks.ethClient) + f.Mocks.balM.On("GetEthBalance", address).Return(assets.NewEth(1)) + f.Mocks.chain.On("BalanceMonitor").Return(f.Mocks.balM) + f.Mocks.chain.On("Config").Return(f.Mocks.scfg) + f.Mocks.relayerChainInterops.EVMChains = legacyEVMChains + f.Mocks.relayerChainInterops.Relayers = []loop.Relayer{ + testutils.MockRelayer{ + ChainStatus: types.ChainStatus{ + ID: "12", + Enabled: true, + }, + NodeStatuses: nil, + }, + } + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + + f.Mocks.scfg.On("EVM").Return(&evmMockConfig) + }, + query: query, + result: ` + { + "ethKeys": { + "results": [ + { + "address": "0x5431F5F973781809D18643b87B44921b11355d81", + "isDisabled": false, + "ethBalance": "0.000000000000000001", + "linkBalance": "12", + "maxGasPriceWei": "1", + "createdAt": "2021-01-01T00:00:00Z", + "updatedAt": "2021-01-01T00:00:00Z", + "chain": { + "id": "12" + } + } + ] + } + }`, + }, + + { + name: "success with no chains", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.MustEIP55Address(address.Hex()), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + chainID := *big.NewI(12) + f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(nil, evmrelay.ErrNoChains) + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.Mocks.relayerChainInterops.Relayers = []loop.Relayer{ + testutils.MockRelayer{ + ChainStatus: types.ChainStatus{ + ID: "12", + Enabled: true, + }, + NodeStatuses: nil, + }, + } + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + }, + query: query, + result: ` + { + "ethKeys": { + "results": [ + { + "address": "0x5431F5F973781809D18643b87B44921b11355d81", + "isDisabled": false, + "ethBalance": null, + "linkBalance": null, + "maxGasPriceWei": null, + "createdAt": "2021-01-01T00:00:00Z", + "updatedAt": "2021-01-01T00:00:00Z", + "chain": { + "id": "12" + } + } + ] + } + }`, + }, + + { + name: "generic error on GetAll()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ethKs.On("GetAll").Return(nil, gError) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: keysError, + Path: []interface{}{"ethKeys"}, + Message: keysError.Error(), + }, + }, + }, + { + name: "generic error on GetStatesForKeys()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(nil, gError) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: statesError, + Path: []interface{}{"ethKeys"}, + Message: statesError.Error(), + }, + }, + }, + { + name: "generic error on Get()", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.MustEIP55Address(address.Hex()), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(ethkey.KeyV2{}, gError) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"ethKeys"}, + Message: gError.Error(), + }, + }, + }, + + { + name: "Empty set on legacy evm chains", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.MustEIP55Address(address.Hex()), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(ethkey.KeyV2{}, nil) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, gError) + f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: ` + { + "ethKeys": { + "results": [] + } + }`, + }, + { + name: "generic error on GetPLIBalance()", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.MustEIP55Address(address.Hex()), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + chainID := *big.NewI(12) + linkAddr := common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81") + + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.Mocks.ethClient.On("PLIBalance", mock.Anything, address, linkAddr).Return(commonassets.NewLinkFromJuels(12), gError) + f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, nil) + f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains + f.Mocks.relayerChainInterops.Relayers = []loop.Relayer{ + testutils.MockRelayer{ + ChainStatus: types.ChainStatus{ + ID: "12", + Enabled: true, + }, + NodeStatuses: nil, + }, + } + f.Mocks.chain.On("Client").Return(f.Mocks.ethClient) + f.Mocks.balM.On("GetEthBalance", address).Return(assets.NewEth(1)) + f.Mocks.chain.On("BalanceMonitor").Return(f.Mocks.balM) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.Mocks.chain.On("Config").Return(f.Mocks.scfg) + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + f.Mocks.scfg.On("EVM").Return(&evmMockConfig) + }, + query: query, + result: ` + { + "ethKeys": { + "results": [ + { + "address": "0x5431F5F973781809D18643b87B44921b11355d81", + "isDisabled": false, + "ethBalance": "0.000000000000000001", + "linkBalance": null, + "maxGasPriceWei": "1", + "createdAt": "2021-01-01T00:00:00Z", + "updatedAt": "2021-01-01T00:00:00Z", + "chain": { + "id": "12" + } + } + ] + } + }`, + }, + { + name: "success with no eth balance", + authenticated: true, + before: func(f *gqlTestFramework) { + states := []ethkey.State{ + { + Address: ethkey.EIP55AddressFromAddress(address), + EVMChainID: *big.NewI(12), + Disabled: false, + CreatedAt: f.Timestamp(), + UpdatedAt: f.Timestamp(), + }, + } + chainID := *big.NewI(12) + linkAddr := common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81") + + f.Mocks.ethKs.On("GetStatesForKeys", keys).Return(states, nil) + f.Mocks.ethKs.On("Get", keys[0].Address.Hex()).Return(keys[0], nil) + f.Mocks.ethKs.On("GetAll").Return(keys, nil) + f.Mocks.ethClient.On("PLIBalance", mock.Anything, address, linkAddr).Return(commonassets.NewLinkFromJuels(12), nil) + f.Mocks.chain.On("Client").Return(f.Mocks.ethClient) + f.Mocks.chain.On("BalanceMonitor").Return(nil) + f.Mocks.chain.On("Config").Return(f.Mocks.scfg) + f.Mocks.legacyEVMChains.On("Get", states[0].EVMChainID.String()).Return(f.Mocks.chain, nil) + f.Mocks.relayerChainInterops.EVMChains = f.Mocks.legacyEVMChains + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.Mocks.relayerChainInterops.Relayers = []loop.Relayer{ + testutils.MockRelayer{ + ChainStatus: types.ChainStatus{ + ID: "12", + Enabled: true, + }, + NodeStatuses: nil, + }, + } + f.Mocks.keystore.On("Eth").Return(f.Mocks.ethKs) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + f.Mocks.scfg.On("EVM").Return(&evmMockConfig) + }, + query: query, + result: ` + { + "ethKeys": { + "results": [ + { + "address": "0x5431F5F973781809D18643b87B44921b11355d81", + "isDisabled": false, + "ethBalance": null, + "linkBalance": "12", + "maxGasPriceWei": "1", + "createdAt": "2021-01-01T00:00:00Z", + "updatedAt": "2021-01-01T00:00:00Z", + "chain": { + "id": "12" + } + } + ] + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/eth_transaction.go b/core/web/resolver/eth_transaction.go new file mode 100644 index 00000000..808aba69 --- /dev/null +++ b/core/web/resolver/eth_transaction.go @@ -0,0 +1,166 @@ +package resolver + +import ( + "context" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +type EthTransactionResolver struct { + tx txmgr.Tx +} + +func NewEthTransaction(tx txmgr.Tx) *EthTransactionResolver { + return &EthTransactionResolver{tx: tx} +} + +func NewEthTransactions(results []txmgr.Tx) []*EthTransactionResolver { + var resolver []*EthTransactionResolver + + for _, tx := range results { + resolver = append(resolver, NewEthTransaction(tx)) + } + + return resolver +} + +func (r *EthTransactionResolver) State() string { + return string(r.tx.State) +} + +func (r *EthTransactionResolver) Data() hexutil.Bytes { + return hexutil.Bytes(r.tx.EncodedPayload) +} + +func (r *EthTransactionResolver) From() string { + return r.tx.FromAddress.String() +} + +func (r *EthTransactionResolver) To() string { + return r.tx.ToAddress.String() +} + +func (r *EthTransactionResolver) GasLimit() string { + return stringutils.FromInt64(int64(r.tx.FeeLimit)) +} + +func (r *EthTransactionResolver) GasPrice(ctx context.Context) string { + attempts, err := r.Attempts(ctx) + if err != nil || len(attempts) == 0 { + return "" + } + + return attempts[0].GasPrice() +} + +func (r *EthTransactionResolver) Value() string { + v := assets.Eth(r.tx.Value) + return v.String() +} + +func (r *EthTransactionResolver) EVMChainID() graphql.ID { + return graphql.ID(r.tx.ChainID.String()) +} + +func (r *EthTransactionResolver) Nonce() *string { + if r.tx.Sequence == nil { + return nil + } + + value := r.tx.Sequence.String() + + return &value +} + +func (r *EthTransactionResolver) Hash(ctx context.Context) string { + attempts, err := r.Attempts(ctx) + if err != nil || len(attempts) == 0 { + return "" + } + + return attempts[0].Hash() +} + +func (r *EthTransactionResolver) Hex(ctx context.Context) string { + attempts, err := r.Attempts(ctx) + if err != nil || len(attempts) == 0 { + return "" + } + + return attempts[0].Hex() +} + +// Chain resolves the node's chain object field. +func (r *EthTransactionResolver) Chain(ctx context.Context) (*ChainResolver, error) { + chain, err := loader.GetChainByID(ctx, string(r.EVMChainID())) + if err != nil { + return nil, err + } + + return NewChain(*chain), nil +} + +func (r *EthTransactionResolver) Attempts(ctx context.Context) ([]*EthTransactionAttemptResolver, error) { + id := stringutils.FromInt64(r.tx.ID) + attempts, err := loader.GetEthTxAttemptsByEthTxID(ctx, id) + if err != nil { + return nil, err + } + + return NewEthTransactionsAttempts(attempts), nil +} + +func (r *EthTransactionResolver) SentAt(ctx context.Context) *string { + attempts, err := r.Attempts(ctx) + if err != nil || len(attempts) == 0 { + return nil + } + + return attempts[0].SentAt() +} + +// -- EthTransaction Query -- + +type EthTransactionPayloadResolver struct { + tx *txmgr.Tx + NotFoundErrorUnionType +} + +func NewEthTransactionPayload(tx *txmgr.Tx, err error) *EthTransactionPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "transaction not found", isExpectedErrorFn: nil} + + return &EthTransactionPayloadResolver{tx: tx, NotFoundErrorUnionType: e} +} + +func (r *EthTransactionPayloadResolver) ToEthTransaction() (*EthTransactionResolver, bool) { + if r.err != nil { + return nil, false + } + + return NewEthTransaction(*r.tx), true +} + +// -- EthTransactions Query -- + +type EthTransactionsPayloadResolver struct { + results []txmgr.Tx + total int32 +} + +func NewEthTransactionsPayload(results []txmgr.Tx, total int32) *EthTransactionsPayloadResolver { + return &EthTransactionsPayloadResolver{results: results, total: total} +} + +func (r *EthTransactionsPayloadResolver) Results() []*EthTransactionResolver { + return NewEthTransactions(r.results) +} + +func (r *EthTransactionsPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} diff --git a/core/web/resolver/eth_transaction_attempt.go b/core/web/resolver/eth_transaction_attempt.go new file mode 100644 index 00000000..70b30207 --- /dev/null +++ b/core/web/resolver/eth_transaction_attempt.go @@ -0,0 +1,67 @@ +package resolver + +import ( + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +type EthTransactionAttemptResolver struct { + attmpt txmgr.TxAttempt +} + +func NewEthTransactionAttempt(attmpt txmgr.TxAttempt) *EthTransactionAttemptResolver { + return &EthTransactionAttemptResolver{attmpt: attmpt} +} + +func NewEthTransactionsAttempts(results []txmgr.TxAttempt) []*EthTransactionAttemptResolver { + var resolver []*EthTransactionAttemptResolver + + for _, tx := range results { + resolver = append(resolver, NewEthTransactionAttempt(tx)) + } + + return resolver +} + +func (r *EthTransactionAttemptResolver) GasPrice() string { + return r.attmpt.TxFee.Legacy.ToInt().String() +} + +func (r *EthTransactionAttemptResolver) Hash() string { + return r.attmpt.Hash.String() +} + +func (r *EthTransactionAttemptResolver) Hex() string { + return hexutil.Encode(r.attmpt.SignedRawTx) +} + +func (r *EthTransactionAttemptResolver) SentAt() *string { + if r.attmpt.BroadcastBeforeBlockNum == nil { + return nil + } + + value := stringutils.FromInt64(*r.attmpt.BroadcastBeforeBlockNum) + + return &value +} + +// -- EthTransactionAttempts Query -- + +type EthTransactionsAttemptsPayloadResolver struct { + results []txmgr.TxAttempt + total int32 +} + +func NewEthTransactionsAttemptsPayload(results []txmgr.TxAttempt, total int32) *EthTransactionsAttemptsPayloadResolver { + return &EthTransactionsAttemptsPayloadResolver{results: results, total: total} +} + +func (r *EthTransactionsAttemptsPayloadResolver) Results() []*EthTransactionAttemptResolver { + return NewEthTransactionsAttempts(r.results) +} + +func (r *EthTransactionsAttemptsPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} diff --git a/core/web/resolver/eth_transaction_test.go b/core/web/resolver/eth_transaction_test.go new file mode 100644 index 00000000..4a3c2229 --- /dev/null +++ b/core/web/resolver/eth_transaction_test.go @@ -0,0 +1,446 @@ +package resolver + +import ( + "database/sql" + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + gqlerrors "github.com/graph-gophers/graphql-go/errors" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + txmgrcommon "github.com/goplugin/pluginv3.0/v2/common/txmgr" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/gas" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + pluginmocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + "github.com/goplugin/pluginv3.0/v2/core/web/testutils" +) + +func TestResolver_EthTransaction(t *testing.T) { + t.Parallel() + + query := ` + query GetEthTransaction($hash: ID!) { + ethTransaction(hash: $hash) { + ... on EthTransaction { + from + to + state + data + gasLimit + gasPrice + value + evmChainID + chain { + id + } + nonce + hash + hex + sentAt + attempts { + hash + } + } + ... on NotFoundError { + code + message + } + } + }` + variables := map[string]interface{}{ + "hash": "0x5431F5F973781809D18643b87B44921b11355d81", + } + hash := common.HexToHash("0x5431F5F973781809D18643b87B44921b11355d81") + chainID := *ubig.NewI(22) + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query, variables: variables}, "ethTransaction"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("FindTxByHash", hash).Return(&txmgr.Tx{ + ID: 1, + ToAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + FromAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + State: txmgrcommon.TxInProgress, + EncodedPayload: []byte("encoded payload"), + FeeLimit: 100, + Value: big.Int(assets.NewEthValue(100)), + ChainID: big.NewInt(22), + Sequence: nil, + }, nil) + f.Mocks.txmStore.On("FindTxAttemptConfirmedByTxIDs", []int64{1}).Return([]txmgr.TxAttempt{ + { + TxID: 1, + Hash: hash, + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(12)}, + SignedRawTx: []byte("something"), + BroadcastBeforeBlockNum: nil, + }, + }, nil) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{ + Relayers: []loop.Relayer{ + testutils.MockRelayer{ChainStatus: types.ChainStatus{ + ID: "22", + Enabled: true, + Config: "", + }}, + }, + }) + }, + query: query, + variables: variables, + result: ` + { + "ethTransaction": { + "from": "0x5431F5F973781809D18643b87B44921b11355d81", + "to": "0x5431F5F973781809D18643b87B44921b11355d81", + "chain": { + "id": "22" + }, + "data": "0x656e636f646564207061796c6f6164", + "state": "in_progress", + "gasLimit": "100", + "gasPrice": "12", + "value": "0.000000000000000100", + "nonce": null, + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81", + "hex": "0x736f6d657468696e67", + "sentAt": null, + "evmChainID": "22", + "attempts": [{ + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81" + }] + } + }`, + }, + { + name: "success without nil values", + authenticated: true, + before: func(f *gqlTestFramework) { + num := int64(2) + nonce := evmtypes.Nonce(num) + + f.Mocks.txmStore.On("FindTxByHash", hash).Return(&txmgr.Tx{ + ID: 1, + ToAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + FromAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + State: txmgrcommon.TxInProgress, + EncodedPayload: []byte("encoded payload"), + FeeLimit: 100, + Value: big.Int(assets.NewEthValue(100)), + ChainID: big.NewInt(22), + Sequence: &nonce, + }, nil) + f.Mocks.txmStore.On("FindTxAttemptConfirmedByTxIDs", []int64{1}).Return([]txmgr.TxAttempt{ + { + TxID: 1, + Hash: hash, + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(12)}, + SignedRawTx: []byte("something"), + BroadcastBeforeBlockNum: &num, + }, + }, nil) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + f.Mocks.evmORM.PutChains(toml.EVMConfig{ChainID: &chainID}) + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{ + Relayers: []loop.Relayer{ + testutils.MockRelayer{ChainStatus: types.ChainStatus{ + ID: "22", + Enabled: true, + Config: "", + }}, + }, + }) + }, + query: query, + variables: variables, + result: ` + { + "ethTransaction": { + "from": "0x5431F5F973781809D18643b87B44921b11355d81", + "to": "0x5431F5F973781809D18643b87B44921b11355d81", + "chain": { + "id": "22" + }, + "data": "0x656e636f646564207061796c6f6164", + "state": "in_progress", + "gasLimit": "100", + "gasPrice": "12", + "value": "0.000000000000000100", + "nonce": "2", + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81", + "hex": "0x736f6d657468696e67", + "sentAt": "2", + "evmChainID": "22", + "attempts": [{ + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81" + }] + } + }`, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("FindTxByHash", hash).Return(nil, sql.ErrNoRows) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + variables: variables, + result: ` + { + "ethTransaction": { + "code": "NOT_FOUND", + "message": "transaction not found" + } + }`, + }, + { + name: "generic error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("FindTxByHash", hash).Return(nil, gError) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"ethTransaction"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_EthTransactions(t *testing.T) { + t.Parallel() + + query := ` + query GetEthTransactions { + ethTransactions { + results { + from + to + state + data + gasLimit + gasPrice + value + evmChainID + nonce + hash + hex + sentAt + } + metadata { + total + } + } + }` + hash := common.HexToHash("0x5431F5F973781809D18643b87B44921b11355d81") + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "ethTransactions"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + num := int64(2) + + f.Mocks.txmStore.On("Transactions", PageDefaultOffset, PageDefaultLimit).Return([]txmgr.Tx{ + { + ID: 1, + ToAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + FromAddress: common.HexToAddress("0x5431F5F973781809D18643b87B44921b11355d81"), + State: txmgrcommon.TxInProgress, + EncodedPayload: []byte("encoded payload"), + FeeLimit: 100, + Value: big.Int(assets.NewEthValue(100)), + ChainID: big.NewInt(22), + }, + }, 1, nil) + f.Mocks.txmStore.On("FindTxAttemptConfirmedByTxIDs", []int64{1}).Return([]txmgr.TxAttempt{ + { + TxID: 1, + Hash: hash, + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(12)}, + SignedRawTx: []byte("something"), + BroadcastBeforeBlockNum: &num, + }, + }, nil) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + result: ` + { + "ethTransactions": { + "results": [{ + "from": "0x5431F5F973781809D18643b87B44921b11355d81", + "to": "0x5431F5F973781809D18643b87B44921b11355d81", + "data": "0x656e636f646564207061796c6f6164", + "state": "in_progress", + "gasLimit": "100", + "gasPrice": "12", + "value": "0.000000000000000100", + "nonce": null, + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81", + "hex": "0x736f6d657468696e67", + "sentAt": "2", + "evmChainID": "22" + }], + "metadata": { + "total": 1 + } + } + }`, + }, + { + name: "generic error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("Transactions", PageDefaultOffset, PageDefaultLimit).Return(nil, 0, gError) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"ethTransactions"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_EthTransactionsAttempts(t *testing.T) { + t.Parallel() + + query := ` + query GetEthTransactionsAttempts { + ethTransactionsAttempts { + results { + gasPrice + hash + hex + sentAt + } + metadata { + total + } + } + }` + hash := common.HexToHash("0x5431F5F973781809D18643b87B44921b11355d81") + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "ethTransactionsAttempts"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + num := int64(2) + + f.Mocks.txmStore.On("TxAttempts", PageDefaultOffset, PageDefaultLimit).Return([]txmgr.TxAttempt{ + { + Hash: hash, + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(12)}, + SignedRawTx: []byte("something"), + BroadcastBeforeBlockNum: &num, + Tx: txmgr.Tx{}, + }, + }, 1, nil) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + result: ` + { + "ethTransactionsAttempts": { + "results": [{ + "gasPrice": "12", + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81", + "hex": "0x736f6d657468696e67", + "sentAt": "2" + }], + "metadata": { + "total": 1 + } + } + }`, + }, + { + name: "success with nil values", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("TxAttempts", PageDefaultOffset, PageDefaultLimit).Return([]txmgr.TxAttempt{ + { + Hash: hash, + TxFee: gas.EvmFee{Legacy: assets.NewWeiI(12)}, + SignedRawTx: []byte("something"), + BroadcastBeforeBlockNum: nil, + }, + }, 1, nil) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + result: ` + { + "ethTransactionsAttempts": { + "results": [{ + "gasPrice": "12", + "hash": "0x0000000000000000000000005431f5f973781809d18643b87b44921b11355d81", + "hex": "0x736f6d657468696e67", + "sentAt": null + }], + "metadata": { + "total": 1 + } + } + }`, + }, + { + name: "generic error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.txmStore.On("TxAttempts", PageDefaultOffset, PageDefaultLimit).Return(nil, 0, gError) + f.App.On("TxmStorageService").Return(f.Mocks.txmStore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"ethTransactionsAttempts"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/features.go b/core/web/resolver/features.go new file mode 100644 index 00000000..79cf5a14 --- /dev/null +++ b/core/web/resolver/features.go @@ -0,0 +1,33 @@ +package resolver + +import "github.com/goplugin/pluginv3.0/v2/core/config" + +type FeaturesResolver struct { + cfg config.Feature +} + +func NewFeaturesResolver(cfg config.Feature) *FeaturesResolver { + return &FeaturesResolver{cfg: cfg} +} + +// CSA resolves to whether CSA Keys are enabled +func (r *FeaturesResolver) CSA() bool { + return r.cfg.UICSAKeys() +} + +// FeedsManager resolves to whether the Feeds Manager is enabled for the UI +func (r *FeaturesResolver) FeedsManager() bool { + return r.cfg.FeedsManager() +} + +type FeaturesPayloadResolver struct { + cfg config.Feature +} + +func NewFeaturesPayloadResolver(cfg config.Feature) *FeaturesPayloadResolver { + return &FeaturesPayloadResolver{cfg: cfg} +} + +func (r *FeaturesPayloadResolver) ToFeatures() (*FeaturesResolver, bool) { + return NewFeaturesResolver(r.cfg), true +} diff --git a/core/web/resolver/features_test.go b/core/web/resolver/features_test.go new file mode 100644 index 00000000..a8dd9490 --- /dev/null +++ b/core/web/resolver/features_test.go @@ -0,0 +1,45 @@ +package resolver + +import ( + "testing" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" +) + +func Test_ToFeatures(t *testing.T) { + query := ` + { + features { + ... on Features { + csa + feedsManager + } + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "features"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetConfig").Return(configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + t, f := true, false + c.Feature.UICSAKeys = &f + c.Feature.FeedsManager = &t + })) + }, + query: query, + result: ` + { + "features": { + "csa": false, + "feedsManager": true + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/feeds_manager.go b/core/web/resolver/feeds_manager.go new file mode 100644 index 00000000..9e338e31 --- /dev/null +++ b/core/web/resolver/feeds_manager.go @@ -0,0 +1,256 @@ +package resolver + +import ( + "context" + "errors" + + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +// FeedsManagerResolver resolves the FeedsManager type. +type FeedsManagerResolver struct { + mgr feeds.FeedsManager +} + +func NewFeedsManager(mgr feeds.FeedsManager) *FeedsManagerResolver { + return &FeedsManagerResolver{mgr: mgr} +} + +func NewFeedsManagers(mgrs []feeds.FeedsManager) []*FeedsManagerResolver { + var resolvers []*FeedsManagerResolver + for _, mgr := range mgrs { + resolvers = append(resolvers, NewFeedsManager(mgr)) + } + + return resolvers +} + +// ID resolves the feed managers's unique identifier. +func (r *FeedsManagerResolver) ID() graphql.ID { + return int64GQLID(r.mgr.ID) +} + +// Name resolves the feed managers's name field. +func (r *FeedsManagerResolver) Name() string { + return r.mgr.Name +} + +// URI resolves the feed managers's uri field. +func (r *FeedsManagerResolver) URI() string { + return r.mgr.URI +} + +// PublicKey resolves the feed managers's public key field. +func (r *FeedsManagerResolver) PublicKey() string { + return r.mgr.PublicKey.String() +} + +func (r *FeedsManagerResolver) JobProposals(ctx context.Context) ([]*JobProposalResolver, error) { + jps, err := loader.GetJobProposalsByFeedsManagerID(ctx, stringutils.FromInt64(r.mgr.ID)) + if err != nil { + return nil, err + } + + return NewJobProposals(jps), nil +} + +// IsConnectionActive resolves the feed managers's isConnectionActive field. +func (r *FeedsManagerResolver) IsConnectionActive() bool { + return r.mgr.IsConnectionActive +} + +func (r *FeedsManagerResolver) ChainConfigs(ctx context.Context) ([]*FeedsManagerChainConfigResolver, error) { + cfgs, err := loader.GetFeedsManagerChainConfigsByManagerID(ctx, r.mgr.ID) + if err != nil { + return nil, err + } + + return NewFeedsManagerChainConfigs(cfgs), nil +} + +// CreatedAt resolves the chains's created at field. +func (r *FeedsManagerResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.mgr.CreatedAt} +} + +// -- FeedsManager Query -- + +type FeedsManagerPayloadResolver struct { + mgr *feeds.FeedsManager + NotFoundErrorUnionType +} + +func NewFeedsManagerPayload(mgr *feeds.FeedsManager, err error) *FeedsManagerPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "feeds manager not found", isExpectedErrorFn: nil} + + return &FeedsManagerPayloadResolver{mgr: mgr, NotFoundErrorUnionType: e} +} + +// ToFeedsManager implements the FeedsManager union type of the payload +func (r *FeedsManagerPayloadResolver) ToFeedsManager() (*FeedsManagerResolver, bool) { + if r.mgr != nil { + return NewFeedsManager(*r.mgr), true + } + + return nil, false +} + +// -- FeedsManagers Query -- + +// FeedsManagersPayloadResolver resolves a list of feeds managers +type FeedsManagersPayloadResolver struct { + feedsManagers []feeds.FeedsManager +} + +func NewFeedsManagersPayload(feedsManagers []feeds.FeedsManager) *FeedsManagersPayloadResolver { + return &FeedsManagersPayloadResolver{ + feedsManagers: feedsManagers, + } +} + +// Results returns the feeds managers. +func (r *FeedsManagersPayloadResolver) Results() []*FeedsManagerResolver { + return NewFeedsManagers(r.feedsManagers) +} + +// -- CreateFeedsManager Mutation -- + +// CreateFeedsManagerPayloadResolver - +type CreateFeedsManagerPayloadResolver struct { + mgr *feeds.FeedsManager + // inputErrors maps an input path to a string + inputErrs map[string]string + NotFoundErrorUnionType +} + +func NewCreateFeedsManagerPayload(mgr *feeds.FeedsManager, err error, inputErrs map[string]string) *CreateFeedsManagerPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "feeds manager not found", isExpectedErrorFn: nil} + + return &CreateFeedsManagerPayloadResolver{ + mgr: mgr, + inputErrs: inputErrs, + NotFoundErrorUnionType: e, + } +} + +func (r *CreateFeedsManagerPayloadResolver) ToCreateFeedsManagerSuccess() (*CreateFeedsManagerSuccessResolver, bool) { + if r.mgr != nil { + return NewCreateFeedsManagerSuccessResolver(*r.mgr), true + } + + return nil, false +} + +func (r *CreateFeedsManagerPayloadResolver) ToSingleFeedsManagerError() (*SingleFeedsManagerErrorResolver, bool) { + if r.err != nil && errors.Is(r.err, feeds.ErrSingleFeedsManager) { + return NewSingleFeedsManagerError(r.err.Error()), true + } + + return nil, false +} + +func (r *CreateFeedsManagerPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type CreateFeedsManagerSuccessResolver struct { + mgr feeds.FeedsManager +} + +func NewCreateFeedsManagerSuccessResolver(mgr feeds.FeedsManager) *CreateFeedsManagerSuccessResolver { + return &CreateFeedsManagerSuccessResolver{ + mgr: mgr, + } +} + +func (r *CreateFeedsManagerSuccessResolver) FeedsManager() *FeedsManagerResolver { + return NewFeedsManager(r.mgr) +} + +// SingleFeedsManagerErrorResolver - +type SingleFeedsManagerErrorResolver struct { + message string +} + +func NewSingleFeedsManagerError(message string) *SingleFeedsManagerErrorResolver { + return &SingleFeedsManagerErrorResolver{ + message: message, + } +} + +func (r *SingleFeedsManagerErrorResolver) Message() string { + return r.message +} + +func (r *SingleFeedsManagerErrorResolver) Code() ErrorCode { + return ErrorCodeUnprocessable +} + +// -- UpdateFeedsManager Mutation -- + +// UpdateFeedsManagerPayloadResolver - +type UpdateFeedsManagerPayloadResolver struct { + mgr *feeds.FeedsManager + inputErrs map[string]string + NotFoundErrorUnionType +} + +func NewUpdateFeedsManagerPayload(mgr *feeds.FeedsManager, err error, inputErrs map[string]string) *UpdateFeedsManagerPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "feeds manager not found", isExpectedErrorFn: nil} + + return &UpdateFeedsManagerPayloadResolver{ + mgr: mgr, + inputErrs: inputErrs, + NotFoundErrorUnionType: e, + } +} + +func (r *UpdateFeedsManagerPayloadResolver) ToUpdateFeedsManagerSuccess() (*UpdateFeedsManagerSuccessResolver, bool) { + if r.mgr != nil { + return NewUpdateFeedsManagerSuccessResolver(*r.mgr), true + } + + return nil, false +} + +func (r *UpdateFeedsManagerPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type UpdateFeedsManagerSuccessResolver struct { + mgr feeds.FeedsManager +} + +func NewUpdateFeedsManagerSuccessResolver(mgr feeds.FeedsManager) *UpdateFeedsManagerSuccessResolver { + return &UpdateFeedsManagerSuccessResolver{ + mgr: mgr, + } +} + +func (r *UpdateFeedsManagerSuccessResolver) FeedsManager() *FeedsManagerResolver { + return NewFeedsManager(r.mgr) +} diff --git a/core/web/resolver/feeds_manager_chain_config.go b/core/web/resolver/feeds_manager_chain_config.go new file mode 100644 index 00000000..5298d80b --- /dev/null +++ b/core/web/resolver/feeds_manager_chain_config.go @@ -0,0 +1,275 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +type FeedsManagerChainConfigResolver struct { + cfg feeds.ChainConfig +} + +func NewFeedsManagerChainConfig(cfg feeds.ChainConfig) *FeedsManagerChainConfigResolver { + return &FeedsManagerChainConfigResolver{cfg: cfg} +} + +func NewFeedsManagerChainConfigs(cfgs []feeds.ChainConfig) []*FeedsManagerChainConfigResolver { + var resolvers []*FeedsManagerChainConfigResolver + for _, cfg := range cfgs { + resolvers = append(resolvers, NewFeedsManagerChainConfig(cfg)) + } + + return resolvers +} + +// ID resolves the chain configs's unique identifier. +func (r *FeedsManagerChainConfigResolver) ID() graphql.ID { + return int64GQLID(r.cfg.ID) +} + +// ChainID resolves the chain configs's chain id. +func (r *FeedsManagerChainConfigResolver) ChainID() string { + return r.cfg.ChainID +} + +// ChainType resolves the chain configs's chain type. +func (r *FeedsManagerChainConfigResolver) ChainType() string { + return string(r.cfg.ChainType) +} + +// AccountAddr resolves the chain configs's account address. +func (r *FeedsManagerChainConfigResolver) AccountAddr() string { + return r.cfg.AccountAddress +} + +// AdminAddr resolves the chain configs's admin address. +func (r *FeedsManagerChainConfigResolver) AdminAddr() string { + return r.cfg.AdminAddress +} + +// FluxMonitorJobConfig resolves the chain configs's Flux Monitor Config. +func (r *FeedsManagerChainConfigResolver) FluxMonitorJobConfig() *FluxMonitorJobConfigResolver { + return &FluxMonitorJobConfigResolver{cfg: r.cfg.FluxMonitorConfig} +} + +// OCR1JobConfig resolves the chain configs's OCR1 Config. +func (r *FeedsManagerChainConfigResolver) OCR1JobConfig() *OCR1JobConfigResolver { + return &OCR1JobConfigResolver{cfg: r.cfg.OCR1Config} +} + +// OCR2JobConfig resolves the chain configs's OCR2 Config. +func (r *FeedsManagerChainConfigResolver) OCR2JobConfig() *OCR2JobConfigResolver { + return &OCR2JobConfigResolver{cfg: r.cfg.OCR2Config} +} + +type FluxMonitorJobConfigResolver struct { + cfg feeds.FluxMonitorConfig +} + +func (r *FluxMonitorJobConfigResolver) Enabled() bool { + return r.cfg.Enabled +} + +type OCR1JobConfigResolver struct { + cfg feeds.OCR1Config +} + +func (r *OCR1JobConfigResolver) Enabled() bool { + return r.cfg.Enabled +} + +func (r *OCR1JobConfigResolver) IsBootstrap() bool { + return r.cfg.IsBootstrap +} + +func (r *OCR1JobConfigResolver) Multiaddr() *string { + return r.cfg.Multiaddr.Ptr() +} + +func (r *OCR1JobConfigResolver) P2PPeerID() *string { + return r.cfg.P2PPeerID.Ptr() +} + +func (r *OCR1JobConfigResolver) KeyBundleID() *string { + return r.cfg.KeyBundleID.Ptr() +} + +type OCR2JobConfigResolver struct { + cfg feeds.OCR2ConfigModel +} + +func (r *OCR2JobConfigResolver) Enabled() bool { + return r.cfg.Enabled +} + +func (r *OCR2JobConfigResolver) IsBootstrap() bool { + return r.cfg.IsBootstrap +} + +func (r *OCR2JobConfigResolver) Multiaddr() *string { + return r.cfg.Multiaddr.Ptr() +} + +func (r *OCR2JobConfigResolver) ForwarderAddress() *string { + return r.cfg.ForwarderAddress.Ptr() +} + +func (r *OCR2JobConfigResolver) P2PPeerID() *string { + return r.cfg.P2PPeerID.Ptr() +} + +func (r *OCR2JobConfigResolver) KeyBundleID() *string { + return r.cfg.KeyBundleID.Ptr() +} + +func (r *OCR2JobConfigResolver) Plugins() *PluginsResolver { + return &PluginsResolver{plugins: r.cfg.Plugins} +} + +// -- CreateFeedsManagerChainConfig Mutation -- + +// CreateFeedsManagerChainConfigPayloadResolver resolves the response to +// CreateFeedsManagerChainConfig +type CreateFeedsManagerChainConfigPayloadResolver struct { + cfg *feeds.ChainConfig + // inputErrors maps an input path to a string + inputErrs map[string]string + NotFoundErrorUnionType +} + +func NewCreateFeedsManagerChainConfigPayload(cfg *feeds.ChainConfig, err error, inputErrs map[string]string) *CreateFeedsManagerChainConfigPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "chain config not found", isExpectedErrorFn: nil} + + return &CreateFeedsManagerChainConfigPayloadResolver{ + cfg: cfg, + inputErrs: inputErrs, + NotFoundErrorUnionType: e, + } +} + +func (r *CreateFeedsManagerChainConfigPayloadResolver) ToCreateFeedsManagerChainConfigSuccess() (*CreateFeedsManagerChainConfigSuccessResolver, bool) { + if r.cfg != nil { + return NewCreateFeedsManagerChainConfigSuccessResolver(r.cfg), true + } + + return nil, false +} + +func (r *CreateFeedsManagerChainConfigPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type CreateFeedsManagerChainConfigSuccessResolver struct { + cfg *feeds.ChainConfig +} + +func NewCreateFeedsManagerChainConfigSuccessResolver(cfg *feeds.ChainConfig) *CreateFeedsManagerChainConfigSuccessResolver { + return &CreateFeedsManagerChainConfigSuccessResolver{ + cfg: cfg, + } +} + +func (r *CreateFeedsManagerChainConfigSuccessResolver) ChainConfig() *FeedsManagerChainConfigResolver { + return NewFeedsManagerChainConfig(*r.cfg) +} + +// -- Delete FMS Chain Config -- + +type DeleteFeedsManagerChainConfigPayloadResolver struct { + cfg *feeds.ChainConfig + NotFoundErrorUnionType +} + +func NewDeleteFeedsManagerChainConfigPayload(cfg *feeds.ChainConfig, err error) *DeleteFeedsManagerChainConfigPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "chain config not found", isExpectedErrorFn: nil} + + return &DeleteFeedsManagerChainConfigPayloadResolver{cfg: cfg, NotFoundErrorUnionType: e} +} + +func (r *DeleteFeedsManagerChainConfigPayloadResolver) ToDeleteFeedsManagerChainConfigSuccess() (*DeleteFeedsManagerChainConfigSuccessResolver, bool) { + if r.cfg == nil { + return nil, false + } + + return NewDeleteFeedsManagerChainConfigSuccess(*r.cfg), true +} + +type DeleteFeedsManagerChainConfigSuccessResolver struct { + cfg feeds.ChainConfig +} + +func NewDeleteFeedsManagerChainConfigSuccess(cfg feeds.ChainConfig) *DeleteFeedsManagerChainConfigSuccessResolver { + return &DeleteFeedsManagerChainConfigSuccessResolver{cfg: cfg} +} + +func (r *DeleteFeedsManagerChainConfigSuccessResolver) ChainConfig() *FeedsManagerChainConfigResolver { + return NewFeedsManagerChainConfig(r.cfg) +} + +// -- UpdateFeedsManagerChainConfig Mutation -- + +// UpdateFeedsManagerChainConfigPayloadResolver resolves the response to +// UpdateFeedsManagerChainConfig +type UpdateFeedsManagerChainConfigPayloadResolver struct { + cfg *feeds.ChainConfig + // inputErrors maps an input path to a string + inputErrs map[string]string + NotFoundErrorUnionType +} + +func NewUpdateFeedsManagerChainConfigPayload(cfg *feeds.ChainConfig, err error, inputErrs map[string]string) *UpdateFeedsManagerChainConfigPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "chain config not found", isExpectedErrorFn: nil} + + return &UpdateFeedsManagerChainConfigPayloadResolver{ + cfg: cfg, + inputErrs: inputErrs, + NotFoundErrorUnionType: e, + } +} + +func (r *UpdateFeedsManagerChainConfigPayloadResolver) ToUpdateFeedsManagerChainConfigSuccess() (*UpdateFeedsManagerChainConfigSuccessResolver, bool) { + if r.cfg != nil { + return NewUpdateFeedsManagerChainConfigSuccessResolver(r.cfg), true + } + + return nil, false +} + +func (r *UpdateFeedsManagerChainConfigPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type UpdateFeedsManagerChainConfigSuccessResolver struct { + cfg *feeds.ChainConfig +} + +func NewUpdateFeedsManagerChainConfigSuccessResolver(cfg *feeds.ChainConfig) *UpdateFeedsManagerChainConfigSuccessResolver { + return &UpdateFeedsManagerChainConfigSuccessResolver{ + cfg: cfg, + } +} + +func (r *UpdateFeedsManagerChainConfigSuccessResolver) ChainConfig() *FeedsManagerChainConfigResolver { + return NewFeedsManagerChainConfig(*r.cfg) +} diff --git a/core/web/resolver/feeds_manager_chain_config_test.go b/core/web/resolver/feeds_manager_chain_config_test.go new file mode 100644 index 00000000..adaf68ea --- /dev/null +++ b/core/web/resolver/feeds_manager_chain_config_test.go @@ -0,0 +1,427 @@ +package resolver + +import ( + "database/sql" + "testing" + + "gopkg.in/guregu/null.v4" + + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +func Test_CreateFeedsManagerChainConfig(t *testing.T) { + var ( + mgrID = int64(100) + cfgID = int64(1) + chainID = "42" + accountAddr = "0x0000001" + adminAddr = "0x0000002" + forwarderAddr = "0x0000003" + peerID = null.StringFrom("p2p_12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw") + keyBundleID = null.StringFrom("6fdb8235e16e099de91df7ef8a8088e9deea0ed6ae106b133e5d985a8a9e1562") + + mutation = ` + mutation CreateFeedsManagerChainConfig($input: CreateFeedsManagerChainConfigInput!) { + createFeedsManagerChainConfig(input: $input) { + ... on CreateFeedsManagerChainConfigSuccess { + chainConfig { + id + } + } + ... on NotFoundError { + message + code + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables = map[string]interface{}{ + "input": map[string]interface{}{ + "feedsManagerID": stringutils.FromInt64(mgrID), + "chainID": chainID, + "chainType": "EVM", + "accountAddr": accountAddr, + "adminAddr": adminAddr, + "fluxMonitorEnabled": false, + "ocr1Enabled": true, + "ocr1IsBootstrap": false, + "ocr1P2PPeerID": peerID.String, + "ocr1KeyBundleID": keyBundleID.String, + "ocr2Enabled": true, + "ocr2IsBootstrap": false, + "ocr2P2PPeerID": peerID.String, + "ocr2KeyBundleID": keyBundleID.String, + "ocr2Plugins": `{"commit":true,"execute":true,"median":false,"mercury":true}`, + "ocr2ForwarderAddress": forwarderAddr, + }, + } + ) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createFeedsManagerChainConfig"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CreateChainConfig", mock.Anything, feeds.ChainConfig{ + FeedsManagerID: mgrID, + ChainType: feeds.ChainTypeEVM, + ChainID: chainID, + AccountAddress: accountAddr, + AdminAddress: adminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: false, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + ForwarderAddress: null.StringFrom(forwarderAddr), + Plugins: feeds.Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + }, + }).Return(cfgID, nil) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(&feeds.ChainConfig{ + ID: cfgID, + ChainType: feeds.ChainTypeEVM, + ChainID: chainID, + AccountAddress: accountAddr, + AdminAddress: adminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: false, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + ForwarderAddress: null.StringFrom(forwarderAddr), + Plugins: feeds.Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + }, + }, nil) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManagerChainConfig": { + "chainConfig": { + "id": "1" + } + } + }`, + }, + { + name: "create call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CreateChainConfig", mock.Anything, mock.IsType(feeds.ChainConfig{})).Return(int64(0), sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "get call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CreateChainConfig", mock.Anything, mock.IsType(feeds.ChainConfig{})).Return(cfgID, nil) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_DeleteFeedsManagerChainConfig(t *testing.T) { + var ( + cfgID = int64(1) + + mutation = ` + mutation DeleteFeedsManagerChainConfig($id: ID!) { + deleteFeedsManagerChainConfig(id: $id) { + ... on DeleteFeedsManagerChainConfigSuccess { + chainConfig { + id + } + } + ... on NotFoundError { + message + code + } + } + }` + variables = map[string]interface{}{ + "id": "1", + } + ) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteFeedsManagerChainConfig"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(&feeds.ChainConfig{ + ID: cfgID, + }, nil) + f.Mocks.feedsSvc.On("DeleteChainConfig", mock.Anything, cfgID).Return(cfgID, nil) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteFeedsManagerChainConfig": { + "chainConfig": { + "id": "1" + } + } + }`, + }, + { + name: "get call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "delete call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(&feeds.ChainConfig{ + ID: cfgID, + }, nil) + f.Mocks.feedsSvc.On("DeleteChainConfig", mock.Anything, cfgID).Return(int64(0), sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_UpdateFeedsManagerChainConfig(t *testing.T) { + var ( + cfgID = int64(1) + peerID = null.StringFrom("p2p_12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw") + keyBundleID = null.StringFrom("6fdb8235e16e099de91df7ef8a8088e9deea0ed6ae106b133e5d985a8a9e1562") + accountAddr = "0x0000001" + adminAddr = "0x0000002" + forwarderAddr = "0x0000003" + + mutation = ` + mutation UpdateFeedsManagerChainConfig($id: ID!, $input: UpdateFeedsManagerChainConfigInput!) { + updateFeedsManagerChainConfig(id: $id, input: $input) { + ... on UpdateFeedsManagerChainConfigSuccess { + chainConfig { + id + } + } + ... on NotFoundError { + message + code + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables = map[string]interface{}{ + "id": "1", + "input": map[string]interface{}{ + "accountAddr": accountAddr, + "adminAddr": adminAddr, + "fluxMonitorEnabled": false, + "ocr1Enabled": true, + "ocr1IsBootstrap": false, + "ocr1P2PPeerID": peerID.String, + "ocr1KeyBundleID": keyBundleID.String, + "ocr2Enabled": true, + "ocr2IsBootstrap": false, + "ocr2P2PPeerID": peerID.String, + "ocr2KeyBundleID": keyBundleID.String, + "ocr2Plugins": `{"commit":true,"execute":true,"median":false,"mercury":true}`, + "ocr2ForwarderAddress": forwarderAddr, + }, + } + ) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "updateFeedsManagerChainConfig"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateChainConfig", mock.Anything, feeds.ChainConfig{ + ID: cfgID, + AccountAddress: accountAddr, + AdminAddress: adminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: false, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + P2PPeerID: null.StringFrom(peerID.String), + KeyBundleID: null.StringFrom(keyBundleID.String), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + ForwarderAddress: null.StringFrom(forwarderAddr), + Plugins: feeds.Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + }, + }).Return(cfgID, nil) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(&feeds.ChainConfig{ + ID: cfgID, + AccountAddress: accountAddr, + AdminAddress: adminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: false, + }, + OCR1Config: feeds.OCR1Config{ + Enabled: true, + P2PPeerID: null.StringFrom(peerID.String), + KeyBundleID: null.StringFrom(keyBundleID.String), + }, + OCR2Config: feeds.OCR2ConfigModel{ + Enabled: true, + P2PPeerID: peerID, + KeyBundleID: keyBundleID, + ForwarderAddress: null.StringFrom(forwarderAddr), + Plugins: feeds.Plugins{ + Commit: true, + Execute: true, + Median: false, + Mercury: true, + }, + }, + }, nil) + }, + query: mutation, + variables: variables, + result: ` + { + "updateFeedsManagerChainConfig": { + "chainConfig": { + "id": "1" + } + } + }`, + }, + { + name: "update call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateChainConfig", mock.Anything, mock.IsType(feeds.ChainConfig{})).Return(int64(0), sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "updateFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "get call not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateChainConfig", mock.Anything, mock.IsType(feeds.ChainConfig{})).Return(cfgID, nil) + f.Mocks.feedsSvc.On("GetChainConfig", cfgID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "updateFeedsManagerChainConfig": { + "message": "chain config not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/feeds_manager_test.go b/core/web/resolver/feeds_manager_test.go new file mode 100644 index 00000000..2c7ba88a --- /dev/null +++ b/core/web/resolver/feeds_manager_test.go @@ -0,0 +1,438 @@ +package resolver + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" +) + +func Test_FeedsManagers(t *testing.T) { + var ( + query = ` + query GetFeedsManagers { + feedsManagers { + results { + id + name + uri + publicKey + isConnectionActive + createdAt + jobProposals { + id + status + } + } + } + }` + ) + + pubKey, err := crypto.PublicKeyFromHex("3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "feedsManagers"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("ListJobProposalsByManagersIDs", []int64{1}).Return([]feeds.JobProposal{ + { + ID: int64(100), + FeedsManagerID: int64(1), + Status: feeds.JobProposalStatusApproved, + }, + }, nil) + f.Mocks.feedsSvc.On("ListManagers").Return([]feeds.FeedsManager{ + { + ID: 1, + Name: "manager1", + URI: "localhost:2000", + PublicKey: *pubKey, + IsConnectionActive: true, + CreatedAt: f.Timestamp(), + }, + }, nil) + }, + query: query, + result: ` + { + "feedsManagers": { + "results": [{ + "id": "1", + "name": "manager1", + "uri": "localhost:2000", + "publicKey": "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808", + "isConnectionActive": true, + "createdAt": "2021-01-01T00:00:00Z", + "jobProposals": [{ + "id": "100", + "status": "APPROVED" + }] + }] + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_FeedsManager(t *testing.T) { + var ( + mgrID = int64(1) + query = ` + query GetFeedsManager { + feedsManager(id: 1) { + ... on FeedsManager { + id + name + uri + publicKey + isConnectionActive + createdAt + } + ... on NotFoundError { + message + code + } + } + }` + ) + pubKey, err := crypto.PublicKeyFromHex("3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808") + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "feedsManager"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(&feeds.FeedsManager{ + ID: mgrID, + Name: "manager1", + URI: "localhost:2000", + PublicKey: *pubKey, + IsConnectionActive: true, + CreatedAt: f.Timestamp(), + }, nil) + }, + query: query, + result: ` + { + "feedsManager": { + "id": "1", + "name": "manager1", + "uri": "localhost:2000", + "publicKey": "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808", + "isConnectionActive": true, + "createdAt": "2021-01-01T00:00:00Z" + } + }`, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(nil, sql.ErrNoRows) + }, + query: query, + result: ` + { + "feedsManager": { + "message": "feeds manager not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_CreateFeedsManager(t *testing.T) { + var ( + mgrID = int64(1) + name = "manager1" + uri = "localhost:2000" + pubKeyHex = "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808" + + mutation = ` + mutation CreateFeedsManager($input: CreateFeedsManagerInput!) { + createFeedsManager(input: $input) { + ... on CreateFeedsManagerSuccess { + feedsManager { + id + name + uri + publicKey + isConnectionActive + createdAt + } + } + ... on SingleFeedsManagerError { + message + code + } + ... on NotFoundError { + message + code + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables = map[string]interface{}{ + "input": map[string]interface{}{ + "name": name, + "uri": uri, + "publicKey": pubKeyHex, + "isBootstrapPeer": false, + }, + } + ) + pubKey, err := crypto.PublicKeyFromHex(pubKeyHex) + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createFeedsManager"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("RegisterManager", mock.Anything, feeds.RegisterManagerParams{ + Name: name, + URI: uri, + PublicKey: *pubKey, + }).Return(mgrID, nil) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(&feeds.FeedsManager{ + ID: mgrID, + Name: name, + URI: uri, + PublicKey: *pubKey, + IsConnectionActive: false, + CreatedAt: f.Timestamp(), + }, nil) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManager": { + "feedsManager": { + "id": "1", + "name": "manager1", + "uri": "localhost:2000", + "publicKey": "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808", + "isConnectionActive": false, + "createdAt": "2021-01-01T00:00:00Z" + } + } + }`, + }, + { + name: "single feeds manager error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc. + On("RegisterManager", mock.Anything, mock.IsType(feeds.RegisterManagerParams{})). + Return(int64(0), feeds.ErrSingleFeedsManager) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManager": { + "message": "only a single feeds manager is supported", + "code": "UNPROCESSABLE" + } + }`, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("RegisterManager", mock.Anything, mock.IsType(feeds.RegisterManagerParams{})).Return(mgrID, nil) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "createFeedsManager": { + "message": "feeds manager not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "invalid input public key", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "input": map[string]interface{}{ + "name": name, + "uri": uri, + "publicKey": "zzzzz", + }, + }, + result: ` + { + "createFeedsManager": { + "errors": [{ + "path": "input/publicKey", + "message": "invalid hex value", + "code": "INVALID_INPUT" + }] + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_UpdateFeedsManager(t *testing.T) { + var ( + mgrID = int64(1) + name = "manager1" + uri = "localhost:2000" + pubKeyHex = "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808" + + mutation = ` + mutation UpdateFeedsManager($id: ID!, $input: UpdateFeedsManagerInput!) { + updateFeedsManager(id: $id, input: $input) { + ... on UpdateFeedsManagerSuccess { + feedsManager { + id + name + uri + publicKey + isConnectionActive + createdAt + } + } + ... on NotFoundError { + message + code + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables = map[string]interface{}{ + "id": "1", + "input": map[string]interface{}{ + "name": name, + "uri": uri, + "publicKey": pubKeyHex, + }, + } + ) + pubKey, err := crypto.PublicKeyFromHex(pubKeyHex) + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "updateFeedsManager"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateManager", mock.Anything, feeds.FeedsManager{ + ID: mgrID, + Name: name, + URI: uri, + PublicKey: *pubKey, + }).Return(nil) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(&feeds.FeedsManager{ + ID: mgrID, + Name: name, + URI: uri, + PublicKey: *pubKey, + IsConnectionActive: false, + CreatedAt: f.Timestamp(), + }, nil) + }, + query: mutation, + variables: variables, + result: ` + { + "updateFeedsManager": { + "feedsManager": { + "id": "1", + "name": "manager1", + "uri": "localhost:2000", + "publicKey": "3b0f149627adb7b6fafe1497a9dfc357f22295a5440786c3bc566dfdb0176808", + "isConnectionActive": false, + "createdAt": "2021-01-01T00:00:00Z" + } + } + }`, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateManager", mock.Anything, mock.IsType(feeds.FeedsManager{})).Return(nil) + f.Mocks.feedsSvc.On("GetManager", mgrID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "updateFeedsManager": { + "message": "feeds manager not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "invalid input public key", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "id": "1", + "input": map[string]interface{}{ + "name": name, + "uri": uri, + "publicKey": "zzzzz", + }, + }, + result: ` + { + "updateFeedsManager": { + "errors": [{ + "path": "input/publicKey", + "message": "invalid hex value", + "code": "INVALID_INPUT" + }] + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/helpers.go b/core/web/resolver/helpers.go new file mode 100644 index 00000000..f16799fa --- /dev/null +++ b/core/web/resolver/helpers.go @@ -0,0 +1,90 @@ +package resolver + +import ( + "database/sql" + "fmt" + "strings" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +const ( + // PageDefaultOffset defines the default offset to use if none is provided + PageDefaultOffset = 0 + + // PageDefaultLimit defines the default limit to use if none is provided + PageDefaultLimit = 50 +) + +func int32GQLID(i int32) graphql.ID { + return graphql.ID(stringutils.FromInt32(i)) +} + +func int64GQLID(i int64) graphql.ID { + return graphql.ID(stringutils.FromInt64(i)) +} + +// pageOffset returns the default page offset if nil, otherwise it returns the +// provided offset. +func pageOffset(offset *int32) int { + if offset == nil { + return PageDefaultOffset + } + + return int(*offset) +} + +// pageLimit returns the default page limit if nil, otherwise it returns the +// provided limit. +func pageLimit(limit *int32) int { + if limit == nil { + return PageDefaultLimit + } + + return int(*limit) +} + +// ValidateBridgeTypeUniqueness checks that a bridge has not already been created +// +// / This validation function should be moved into a bridge service. +func ValidateBridgeTypeUniqueness(bt *bridges.BridgeTypeRequest, orm bridges.ORM) error { + _, err := orm.FindBridge(bt.Name) + if err == nil { + return fmt.Errorf("bridge type %v already exists", bt.Name) + } + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("error determining if bridge type %v already exists", bt.Name) + } + + return nil +} + +// ValidateBridgeType checks that the bridge type doesn't have a duplicate +// or invalid name or invalid url +// +// This validation function should be moved into a bridge service and return +// multiple errors. +func ValidateBridgeType(bt *bridges.BridgeTypeRequest) error { + if len(bt.Name.String()) < 1 { + return errors.New("No name specified") + } + if _, err := bridges.ParseBridgeName(bt.Name.String()); err != nil { + return errors.Wrap(err, "invalid bridge name") + } + u := bt.URL.String() + if len(strings.TrimSpace(u)) == 0 { + return errors.New("url must be present") + } + if bt.MinimumContractPayment != nil && + bt.MinimumContractPayment.Cmp(assets.NewLinkFromJuels(0)) < 0 { + + return errors.New("MinimumContractPayment must be positive") + } + + return nil +} diff --git a/core/web/resolver/job.go b/core/web/resolver/job.go new file mode 100644 index 00000000..9b81d1fc --- /dev/null +++ b/core/web/resolver/job.go @@ -0,0 +1,260 @@ +package resolver + +import ( + "context" + + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +// JobResolver resolves the Job type. +type JobResolver struct { + app plugin.Application + j job.Job +} + +func NewJob(app plugin.Application, j job.Job) *JobResolver { + return &JobResolver{app: app, j: j} +} + +func NewJobs(app plugin.Application, jobs []job.Job) []*JobResolver { + var resolvers []*JobResolver + for _, j := range jobs { + resolvers = append(resolvers, NewJob(app, j)) + } + + return resolvers +} + +// ID resolves the job's id. +func (r *JobResolver) ID() graphql.ID { + return int32GQLID(r.j.ID) +} + +// CreatedAt resolves the job's created at timestamp. +func (r *JobResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.j.CreatedAt} +} + +// Errors resolves the job's top level errors. +func (r *JobResolver) Errors(ctx context.Context) ([]*JobErrorResolver, error) { + specErrs, err := loader.GetJobSpecErrorsByJobID(ctx, r.j.ID) + if err != nil { + return nil, err + } + + return NewJobErrors(specErrs), nil +} + +// ExternalJobID resolves the job's external job id. +func (r *JobResolver) ExternalJobID() string { + return r.j.ExternalJobID.String() +} + +// MaxTaskDuration resolves the job's max task duration. +func (r *JobResolver) MaxTaskDuration() string { + return r.j.MaxTaskDuration.Duration().String() +} + +// Name resolves the job's name. +func (r *JobResolver) Name() string { + return r.j.Name.ValueOrZero() +} + +// ObservationSource resolves the job's observation source. +// +// This could potentially be moved to a dataloader in the future as we are +// fetching it from a relationship. +func (r *JobResolver) ObservationSource() string { + return r.j.PipelineSpec.DotDagSource +} + +// SchemaVersion resolves the job's schema version. +func (r *JobResolver) SchemaVersion() int32 { + return int32(r.j.SchemaVersion) +} + +// GasLimit resolves the job's gas limit. +func (r *JobResolver) GasLimit() *int32 { + if !r.j.GasLimit.Valid { + return nil + } + v := int32(r.j.GasLimit.Uint32) + return &v +} + +// ForwardingAllowed sets whether txs submitted by this job should be forwarded when possible. +func (r *JobResolver) ForwardingAllowed() *bool { + return &r.j.ForwardingAllowed +} + +// Type resolves the job's type. +func (r *JobResolver) Type() string { + return string(r.j.Type) +} + +// Spec resolves the job's spec. +func (r *JobResolver) Spec() *SpecResolver { + return NewSpec(r.j) +} + +// Runs fetches the runs for a Job. +func (r *JobResolver) Runs(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*JobRunsPayloadResolver, error) { + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + if limit > 100 { + limit = 100 + } + + ids, err := r.app.JobORM().FindPipelineRunIDsByJobID(r.j.ID, offset, limit) + if err != nil { + return nil, err + } + + runs, err := loader.GetJobRunsByIDs(ctx, ids) + if err != nil { + return nil, err + } + + count, err := r.app.JobORM().CountPipelineRunsByJobID(r.j.ID) + if err != nil { + return nil, err + } + + return NewJobRunsPayload(runs, count, r.app), nil +} + +// JobsPayloadResolver resolves a page of jobs +type JobsPayloadResolver struct { + app plugin.Application + jobs []job.Job + total int32 +} + +func NewJobsPayload(app plugin.Application, jobs []job.Job, total int32) *JobsPayloadResolver { + return &JobsPayloadResolver{ + app: app, + jobs: jobs, + total: total, + } +} + +// Results returns the jobs. +func (r *JobsPayloadResolver) Results() []*JobResolver { + return NewJobs(r.app, r.jobs) +} + +// Metadata returns the pagination metadata. +func (r *JobsPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} + +type JobPayloadResolver struct { + app plugin.Application + job *job.Job + NotFoundErrorUnionType +} + +func NewJobPayload(app plugin.Application, j *job.Job, err error) *JobPayloadResolver { + e := NotFoundErrorUnionType{err, "job not found", nil} + + return &JobPayloadResolver{app: app, job: j, NotFoundErrorUnionType: e} +} + +// ToJob implements the JobPayload union type of the payload +func (r *JobPayloadResolver) ToJob() (*JobResolver, bool) { + if r.job != nil { + return NewJob(r.app, *r.job), true + } + + return nil, false +} + +// -- CreateJob Mutation -- + +type CreateJobPayloadResolver struct { + app plugin.Application + j *job.Job + inputErrs map[string]string +} + +func NewCreateJobPayload(app plugin.Application, job *job.Job, inputErrs map[string]string) *CreateJobPayloadResolver { + return &CreateJobPayloadResolver{app: app, j: job, inputErrs: inputErrs} +} + +func (r *CreateJobPayloadResolver) ToCreateJobSuccess() (*CreateJobSuccessResolver, bool) { + if r.inputErrs != nil { + return nil, false + } + + return NewCreateJobSuccess(r.app, r.j), true +} + +func (r *CreateJobPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs == nil { + return nil, false + } + + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true +} + +type CreateJobSuccessResolver struct { + app plugin.Application + j *job.Job +} + +func NewCreateJobSuccess(app plugin.Application, job *job.Job) *CreateJobSuccessResolver { + return &CreateJobSuccessResolver{app: app, j: job} +} + +func (r *CreateJobSuccessResolver) Job() *JobResolver { + return NewJob(r.app, *r.j) +} + +// -- DeleteJob Mutation -- + +type DeleteJobPayloadResolver struct { + app plugin.Application + j *job.Job + NotFoundErrorUnionType +} + +func NewDeleteJobPayload(app plugin.Application, j *job.Job, err error) *DeleteJobPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "job not found"} + + return &DeleteJobPayloadResolver{app: app, j: j, NotFoundErrorUnionType: e} +} + +func (r *DeleteJobPayloadResolver) ToDeleteJobSuccess() (*DeleteJobSuccessResolver, bool) { + if r.j == nil { + return nil, false + } + + return NewDeleteJobSuccess(r.app, r.j), true +} + +type DeleteJobSuccessResolver struct { + app plugin.Application + j *job.Job +} + +func NewDeleteJobSuccess(app plugin.Application, job *job.Job) *DeleteJobSuccessResolver { + return &DeleteJobSuccessResolver{app: app, j: job} +} + +func (r *DeleteJobSuccessResolver) Job() *JobResolver { + return NewJob(r.app, *r.j) +} diff --git a/core/web/resolver/job_error.go b/core/web/resolver/job_error.go new file mode 100644 index 00000000..237b978f --- /dev/null +++ b/core/web/resolver/job_error.go @@ -0,0 +1,86 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +// JobErrorResolver resolves a Job Error +type JobErrorResolver struct { + // This is purposefully named Error instead of Err to differentiate it from + // a standard golang error. + specError job.SpecError +} + +func NewJobError(specError job.SpecError) *JobErrorResolver { + return &JobErrorResolver{specError: specError} +} + +func NewJobErrors(specErrors []job.SpecError) []*JobErrorResolver { + var resolvers []*JobErrorResolver + for _, e := range specErrors { + resolvers = append(resolvers, NewJobError(e)) + } + + return resolvers +} + +// ID resolves the job error's id. +func (r *JobErrorResolver) ID() graphql.ID { + return graphql.ID(stringutils.FromInt64(r.specError.ID)) +} + +// Description resolves the job error's description. +func (r *JobErrorResolver) Description() string { + return r.specError.Description +} + +// Occurrences resolves the job error's number of occurrences. +func (r *JobErrorResolver) Occurrences() int32 { + return int32(r.specError.Occurrences) +} + +// CreatedAt resolves the job error's created at timestamp. +func (r *JobErrorResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.specError.CreatedAt} +} + +// UpdatedAt resolves the job error's updated at timestamp. +func (r *JobErrorResolver) UpdatedAt() graphql.Time { + return graphql.Time{Time: r.specError.UpdatedAt} +} + +// -- DismissJobError Mutation -- + +type DismissJobErrorPayloadResolver struct { + specError *job.SpecError + NotFoundErrorUnionType +} + +func NewDismissJobErrorPayload(specError *job.SpecError, err error) *DismissJobErrorPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "JobSpecError not found"} + + return &DismissJobErrorPayloadResolver{specError: specError, NotFoundErrorUnionType: e} +} + +func (r *DismissJobErrorPayloadResolver) ToDismissJobErrorSuccess() (*DismissJobErrorSuccessResolver, bool) { + if r.err != nil { + return nil, false + } + + return NewDismissJobErrorSuccess(r.specError), true +} + +type DismissJobErrorSuccessResolver struct { + specError *job.SpecError +} + +func NewDismissJobErrorSuccess(specError *job.SpecError) *DismissJobErrorSuccessResolver { + return &DismissJobErrorSuccessResolver{specError: specError} +} + +func (r *DismissJobErrorSuccessResolver) JobError() *JobErrorResolver { + return NewJobError(*r.specError) +} diff --git a/core/web/resolver/job_error_test.go b/core/web/resolver/job_error_test.go new file mode 100644 index 00000000..fff70efc --- /dev/null +++ b/core/web/resolver/job_error_test.go @@ -0,0 +1,234 @@ +package resolver + +import ( + "database/sql" + "encoding/json" + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +// JobErrors are only embedded on the job and are not fetchable by it's own id, +// so we test the job error resolvers by fetching a job by id. + +func TestResolver_JobErrors(t *testing.T) { + var ( + id = int32(1) + errorID = int64(200) + ) + + testCases := []GQLTestCase{ + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + ID: int32(1), + }, nil) + f.Mocks.jobORM.On("FindSpecErrorsByJobIDs", []int32{1}, mock.Anything).Return([]job.SpecError{ + { + ID: errorID, + Description: "no contract code at given address", + Occurrences: 1, + CreatedAt: f.Timestamp(), + JobID: int32(1), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + errors { + id + description + occurrences + createdAt + } + } + } + } + `, + result: ` + { + "job": { + "errors": [{ + "id": "200", + "description": "no contract code at given address", + "occurrences": 1, + "createdAt": "2021-01-01T00:00:00Z" + }] + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DismissJobError(t *testing.T) { + t.Parallel() + + id := int64(1) + mutation := ` + mutation DismissJobError($id: ID!) { + dismissJobError(id: $id) { + ... on DismissJobErrorSuccess { + jobError { + id + description + occurrences + createdAt + } + } + ... on NotFoundError { + code + message + } + } + }` + variables := map[string]interface{}{ + "id": "1", + } + invalidVariables := map[string]interface{}{ + "id": "asdadada", + } + d, err := json.Marshal(map[string]interface{}{ + "dismissJobError": map[string]interface{}{ + "jobError": map[string]interface{}{ + "id": "1", + "occurrences": 5, + "description": "test-description", + "createdAt": "2021-01-01T00:00:00Z", + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + gError := errors.New("error") + + _, idError := stringutils.ToInt64("asdadada") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "dismissJobError"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindSpecError", id).Return(job.SpecError{ + ID: id, + Occurrences: 5, + Description: "test-description", + CreatedAt: f.Timestamp(), + }, nil) + f.Mocks.jobORM.On("DismissError", mock.Anything, id).Return(nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "not found on FindSpecError()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindSpecError", id).Return(job.SpecError{}, sql.ErrNoRows) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: ` + { + "dismissJobError": { + "code": "NOT_FOUND", + "message": "JobSpecError not found" + } + } + `, + }, + { + name: "not found on DismissError()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindSpecError", id).Return(job.SpecError{}, nil) + f.Mocks.jobORM.On("DismissError", mock.Anything, id).Return(sql.ErrNoRows) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: ` + { + "dismissJobError": { + "code": "NOT_FOUND", + "message": "JobSpecError not found" + } + } + `, + }, + { + name: "generic error on FindSpecError()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindSpecError", id).Return(job.SpecError{}, gError) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"dismissJobError"}, + Message: gError.Error(), + }, + }, + }, + { + name: "generic error on DismissError()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindSpecError", id).Return(job.SpecError{}, nil) + f.Mocks.jobORM.On("DismissError", mock.Anything, id).Return(gError) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"dismissJobError"}, + Message: gError.Error(), + }, + }, + }, + { + name: "error on ID parsing", + authenticated: true, + query: mutation, + variables: invalidVariables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: idError, + Path: []interface{}{"dismissJobError"}, + Message: idError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/job_proposal.go b/core/web/resolver/job_proposal.go new file mode 100644 index 00000000..0beab307 --- /dev/null +++ b/core/web/resolver/job_proposal.go @@ -0,0 +1,177 @@ +package resolver + +import ( + "context" + "strconv" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +var notFoundErrorMessage = "spec not found" + +type JobProposalStatus string + +const ( + PENDING JobProposalStatus = "PENDING" + APPROVED JobProposalStatus = "APPROVED" + REJECTED JobProposalStatus = "REJECTED" + CANCELLED JobProposalStatus = "CANCELLED" + DELETED JobProposalStatus = "DELETED" + REVOKED JobProposalStatus = "REVOKED" +) + +func ToJobProposalStatus(s feeds.JobProposalStatus) (JobProposalStatus, error) { + switch s { + case feeds.JobProposalStatusApproved: + return APPROVED, nil + case feeds.JobProposalStatusPending: + return PENDING, nil + case feeds.JobProposalStatusRejected: + return REJECTED, nil + case feeds.JobProposalStatusCancelled: + return CANCELLED, nil + case feeds.JobProposalStatusDeleted: + return DELETED, nil + case feeds.JobProposalStatusRevoked: + return REVOKED, nil + default: + return "", errors.New("invalid job proposal status") + } +} + +// JobProposalResolver resolves the Job Proposal type +type JobProposalResolver struct { + jp *feeds.JobProposal +} + +// NewJobProposal creates a new JobProposalResolver +func NewJobProposal(jp *feeds.JobProposal) *JobProposalResolver { + return &JobProposalResolver{jp: jp} +} + +func NewJobProposals(jps []feeds.JobProposal) []*JobProposalResolver { + var resolvers []*JobProposalResolver + + for i := range jps { + resolvers = append(resolvers, NewJobProposal(&jps[i])) + } + + return resolvers +} + +// ID resolves to the job proposal ID +func (r *JobProposalResolver) ID() graphql.ID { + return int64GQLID(r.jp.ID) +} + +// Name resolves to the job proposal name +func (r *JobProposalResolver) Name() *string { + return r.jp.Name.Ptr() +} + +// Status resolves to the job proposal Status +func (r *JobProposalResolver) Status() JobProposalStatus { + if status, err := ToJobProposalStatus(r.jp.Status); err == nil { + return status + } + return "" +} + +// ExternalJobID resolves to the job proposal ExternalJobID +func (r *JobProposalResolver) ExternalJobID() *string { + if r.jp.ExternalJobID.Valid { + id := r.jp.ExternalJobID.UUID.String() + return &id + } + + return nil +} + +// JobID resolves to the job proposal JobID if it has an ExternalJobID +func (r *JobProposalResolver) JobID(ctx context.Context) (*string, error) { + if !r.jp.ExternalJobID.Valid { + return nil, nil + } + + job, err := loader.GetJobByExternalJobID(ctx, r.jp.ExternalJobID.UUID.String()) + if err != nil { + return nil, err + } + + id := strconv.FormatInt(int64(job.ID), 10) + + return &id, err +} + +// FeedsManager resolves the job proposal's feeds manager object field. +func (r *JobProposalResolver) FeedsManager(ctx context.Context) (*FeedsManagerResolver, error) { + mgr, err := loader.GetFeedsManagerByID(ctx, strconv.FormatInt(r.jp.FeedsManagerID, 10)) + if err != nil { + return nil, err + } + + return NewFeedsManager(*mgr), nil +} + +// MultiAddrs resolves to the job proposal MultiAddrs +func (r *JobProposalResolver) MultiAddrs() []string { + return r.jp.Multiaddrs +} + +// PendingUpdate resolves to whether the job proposal has a pending update. +func (r *JobProposalResolver) PendingUpdate() bool { + return r.jp.PendingUpdate +} + +// Specs returns all spec proposals associated with the proposal. +func (r *JobProposalResolver) Specs(ctx context.Context) ([]*JobProposalSpecResolver, error) { + specs, err := loader.GetSpecsByJobProposalID(ctx, strconv.FormatInt(r.jp.ID, 10)) + if err != nil { + return nil, err + } + + return NewJobProposalSpecs(specs), nil +} + +// LatestSpec returns the spec with the highest version number. +func (r *JobProposalResolver) LatestSpec(ctx context.Context) (*JobProposalSpecResolver, error) { + spec, err := loader.GetLatestSpecByJobProposalID(ctx, strconv.FormatInt(r.jp.ID, 10)) + if err != nil { + return nil, err + } + + return NewJobProposalSpec(spec), nil +} + +// RemoteUUID returns the remote FMS UUID of the proposal. +func (r *JobProposalResolver) RemoteUUID(ctx context.Context) string { + return r.jp.RemoteUUID.String() +} + +// -- GetJobProposal Query -- + +// JobProposalPayloadResolver resolves the job proposal payload type +type JobProposalPayloadResolver struct { + jp *feeds.JobProposal + NotFoundErrorUnionType +} + +// NewJobProposalPayload creates a new job proposal payload +func NewJobProposalPayload(jp *feeds.JobProposal, err error) *JobProposalPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "job proposal not found"} + + return &JobProposalPayloadResolver{jp: jp, NotFoundErrorUnionType: e} +} + +// ToJobProposal resolves to the job proposal resolver +func (r *JobProposalPayloadResolver) ToJobProposal() (*JobProposalResolver, bool) { + if r.err == nil { + return NewJobProposal(r.jp), true + } + + return nil, false +} diff --git a/core/web/resolver/job_proposal_spec.go b/core/web/resolver/job_proposal_spec.go new file mode 100644 index 00000000..bee92ee7 --- /dev/null +++ b/core/web/resolver/job_proposal_spec.go @@ -0,0 +1,291 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +// SpecStatus defines the enum values for GQL +type SpecStatus string + +const ( + // revive:disable + SpecStatusUnknown SpecStatus = "UNKNOWN" + SpecStatusPending SpecStatus = "PENDING" + SpecStatusApproved SpecStatus = "APPROVED" + SpecStatusRejected SpecStatus = "REJECTED" + SpecStatusCancelled SpecStatus = "CANCELLED" + SpecStatusRevoked SpecStatus = "REVOKED" + // revive:enable +) + +// ToSpecStatus converts the feeds status into the enum value. +func ToSpecStatus(s feeds.SpecStatus) SpecStatus { + switch s { + case feeds.SpecStatusApproved: + return SpecStatusApproved + case feeds.SpecStatusPending: + return SpecStatusPending + case feeds.SpecStatusRejected: + return SpecStatusRejected + case feeds.SpecStatusCancelled: + return SpecStatusCancelled + case feeds.SpecStatusRevoked: + return SpecStatusRevoked + default: + return SpecStatusUnknown + } +} + +// JobProposalSpecResolver resolves the Job Proposal Spec type. +type JobProposalSpecResolver struct { + spec *feeds.JobProposalSpec +} + +// NewJobProposalSpec creates a new JobProposalSpecResolver. +func NewJobProposalSpec(spec *feeds.JobProposalSpec) *JobProposalSpecResolver { + return &JobProposalSpecResolver{spec: spec} +} + +// NewJobProposalSpecs creates a slice of JobProposalSpecResolvers. +func NewJobProposalSpecs(specs []feeds.JobProposalSpec) []*JobProposalSpecResolver { + var resolvers []*JobProposalSpecResolver + + for i := range specs { + resolvers = append(resolvers, NewJobProposalSpec(&specs[i])) + } + + return resolvers +} + +// ID resolves to the job proposal spec ID +func (r *JobProposalSpecResolver) ID() graphql.ID { + return int64GQLID(r.spec.ID) +} + +// Definition resolves to the job proposal spec definition +func (r *JobProposalSpecResolver) Definition() string { + return r.spec.Definition +} + +// Version resolves to the job proposal spec version +func (r *JobProposalSpecResolver) Version() int32 { + return r.spec.Version +} + +// Status resolves to the job proposal spec's status +func (r *JobProposalSpecResolver) Status() SpecStatus { + return ToSpecStatus(r.spec.Status) +} + +// StatusUpdatedAt resolves to the the last timestamp that the spec status was +// updated. +func (r *JobProposalSpecResolver) StatusUpdatedAt() graphql.Time { + return graphql.Time{Time: r.spec.StatusUpdatedAt} +} + +// CreatedAt resolves to the job proposal spec's created at timestamp +func (r *JobProposalSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// UpdatedAt resolves to the job proposal spec's updated at timestamp +func (r *JobProposalSpecResolver) UpdatedAt() graphql.Time { + return graphql.Time{Time: r.spec.UpdatedAt} +} + +// -- ApproveJobProposal Mutation -- + +// ApproveJobProposalSpecPayloadResolver resolves the spec payload. +type ApproveJobProposalSpecPayloadResolver struct { + spec *feeds.JobProposalSpec + NotFoundErrorUnionType +} + +// NewApproveJobProposalSpecPayload generates the spec payload resolver. +func NewApproveJobProposalSpecPayload(spec *feeds.JobProposalSpec, err error) *ApproveJobProposalSpecPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: notFoundErrorMessage} + + return &ApproveJobProposalSpecPayloadResolver{spec: spec, NotFoundErrorUnionType: e} +} + +// ToApproveJobProposalSpecSuccess resolves to the approval job proposal success +// resolver. +func (r *ApproveJobProposalSpecPayloadResolver) ToApproveJobProposalSpecSuccess() (*ApproveJobProposalSpecSuccessResolver, bool) { + if r.spec != nil { + return NewApproveJobProposalSpecSuccess(r.spec), true + } + + return nil, false +} + +// ToJobAlreadyExistsError - +func (r *ApproveJobProposalSpecPayloadResolver) ToJobAlreadyExistsError() (*JobAlreadyExistsErrorResolver, bool) { + if r.err != nil && errors.Is(r.err, feeds.ErrJobAlreadyExists) { + return NewJobAlreadyExistsError(r.err.Error()), true + } + + return nil, false +} + +// JobAlreadyExistsErrorResolver - +type JobAlreadyExistsErrorResolver struct { + message string +} + +// NewJobAlreadyExistsError - +func NewJobAlreadyExistsError(message string) *JobAlreadyExistsErrorResolver { + return &JobAlreadyExistsErrorResolver{ + message: message, + } +} + +// Message - +func (r *JobAlreadyExistsErrorResolver) Message() string { + return r.message +} + +// Code - +func (r *JobAlreadyExistsErrorResolver) Code() ErrorCode { + return ErrorCodeUnprocessable +} + +// ApproveJobProposalSpecSuccessResolver resolves the approval success response. +type ApproveJobProposalSpecSuccessResolver struct { + spec *feeds.JobProposalSpec +} + +// NewApproveJobProposalSpecSuccess generates the resolver. +func NewApproveJobProposalSpecSuccess(spec *feeds.JobProposalSpec) *ApproveJobProposalSpecSuccessResolver { + return &ApproveJobProposalSpecSuccessResolver{spec: spec} +} + +// Spec returns the job proposal spec. +func (r *ApproveJobProposalSpecSuccessResolver) Spec() *JobProposalSpecResolver { + return NewJobProposalSpec(r.spec) +} + +// -- CancelJobProposal Mutation -- + +// CancelJobProposalSpecPayloadResolver resolves the cancel payload response. +type CancelJobProposalSpecPayloadResolver struct { + spec *feeds.JobProposalSpec + NotFoundErrorUnionType +} + +// NewCancelJobProposalSpecPayload generates the resolver. +func NewCancelJobProposalSpecPayload(spec *feeds.JobProposalSpec, err error) *CancelJobProposalSpecPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: notFoundErrorMessage} + + return &CancelJobProposalSpecPayloadResolver{spec: spec, NotFoundErrorUnionType: e} +} + +// ToCancelJobProposalSpecSuccess resolves to the cancel job proposal spec +// success resolver. +func (r *CancelJobProposalSpecPayloadResolver) ToCancelJobProposalSpecSuccess() (*CancelJobProposalSpecSuccessResolver, bool) { + if r.spec != nil { + return NewCancelJobProposalSpecSuccess(r.spec), true + } + + return nil, false +} + +// CancelJobProposalSpecSuccessResolver resolves the cancellation success +// response. +type CancelJobProposalSpecSuccessResolver struct { + spec *feeds.JobProposalSpec +} + +// NewCancelJobProposalSpecSuccess generates the resolver. +func NewCancelJobProposalSpecSuccess(spec *feeds.JobProposalSpec) *CancelJobProposalSpecSuccessResolver { + return &CancelJobProposalSpecSuccessResolver{spec: spec} +} + +// Spec returns the job proposal spec. +func (r *CancelJobProposalSpecSuccessResolver) Spec() *JobProposalSpecResolver { + return NewJobProposalSpec(r.spec) +} + +// -- RejectJobProposalSpec Mutation -- + +// RejectJobProposalSpecPayloadResolver resolves the reject payload response. +type RejectJobProposalSpecPayloadResolver struct { + spec *feeds.JobProposalSpec + NotFoundErrorUnionType +} + +// NewRejectJobProposalSpecPayload constructs a RejectJobProposalSpecPayloadResolver. +func NewRejectJobProposalSpecPayload(spec *feeds.JobProposalSpec, err error) *RejectJobProposalSpecPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: notFoundErrorMessage} + + return &RejectJobProposalSpecPayloadResolver{spec: spec, NotFoundErrorUnionType: e} +} + +// ToRejectJobProposalSpecSuccess resolves to the reject job proposal spec +// success resolver. +func (r *RejectJobProposalSpecPayloadResolver) ToRejectJobProposalSpecSuccess() (*RejectJobProposalSpecSuccessResolver, bool) { + if r.spec != nil { + return NewRejectJobProposalSpecSuccess(r.spec), true + } + + return nil, false +} + +// RejectJobProposalSpecSuccessResolver resolves the rejection success response. +type RejectJobProposalSpecSuccessResolver struct { + spec *feeds.JobProposalSpec +} + +// NewRejectJobProposalSpecSuccess generates the resolver. +func NewRejectJobProposalSpecSuccess(spec *feeds.JobProposalSpec) *RejectJobProposalSpecSuccessResolver { + return &RejectJobProposalSpecSuccessResolver{spec: spec} +} + +// Spec returns the job proposal spec. +func (r *RejectJobProposalSpecSuccessResolver) Spec() *JobProposalSpecResolver { + return NewJobProposalSpec(r.spec) +} + +// -- UpdateJobProposalSpecDefinition Mutation -- + +// UpdateJobProposalSpecDefinitionPayloadResolver generates the update spec +// definition payload. +type UpdateJobProposalSpecDefinitionPayloadResolver struct { + spec *feeds.JobProposalSpec + NotFoundErrorUnionType +} + +// NewUpdateJobProposalSpecDefinitionPayload constructs UpdateJobProposalSpecDefinitionPayloadResolver. +func NewUpdateJobProposalSpecDefinitionPayload(spec *feeds.JobProposalSpec, err error) *UpdateJobProposalSpecDefinitionPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: notFoundErrorMessage} + + return &UpdateJobProposalSpecDefinitionPayloadResolver{spec: spec, NotFoundErrorUnionType: e} +} + +// ToUpdateJobProposalSpecDefinitionSuccess resolves to the update job proposal +// definition success resolver. +func (r *UpdateJobProposalSpecDefinitionPayloadResolver) ToUpdateJobProposalSpecDefinitionSuccess() (*UpdateJobProposalSpecDefinitionSuccessResolver, bool) { + if r.spec != nil { + return NewUpdateJobProposalSpecDefinitionSuccess(r.spec), true + } + + return nil, false +} + +// UpdateJobProposalSpecDefinitionSuccessResolver resolves the update success +// response. +type UpdateJobProposalSpecDefinitionSuccessResolver struct { + spec *feeds.JobProposalSpec +} + +// NewUpdateJobProposalSpecDefinitionSuccess constructs UpdateJobProposalSpecDefinitionSuccessResolver. +func NewUpdateJobProposalSpecDefinitionSuccess(spec *feeds.JobProposalSpec) *UpdateJobProposalSpecDefinitionSuccessResolver { + return &UpdateJobProposalSpecDefinitionSuccessResolver{spec: spec} +} + +// Spec returns the job proposal spec. +func (r *UpdateJobProposalSpecDefinitionSuccessResolver) Spec() *JobProposalSpecResolver { + return NewJobProposalSpec(r.spec) +} diff --git a/core/web/resolver/job_proposal_spec_test.go b/core/web/resolver/job_proposal_spec_test.go new file mode 100644 index 00000000..727e01fe --- /dev/null +++ b/core/web/resolver/job_proposal_spec_test.go @@ -0,0 +1,467 @@ +package resolver + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/mock" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +func TestResolver_ApproveJobProposalSpec(t *testing.T) { + t.Parallel() + + mutation := ` + mutation ApproveJobProposalSpec($id: ID!) { + approveJobProposalSpec(id: $id) { + ... on ApproveJobProposalSpecSuccess { + spec { + id + } + } + ... on NotFoundError { + message + code + } + ... on JobAlreadyExistsError { + message + code + } + } + }` + + specID := int64(1) + result := ` + { + "approveJobProposalSpec": { + "spec": { + "id": "1" + } + } + }` + variables := map[string]interface{}{ + "id": "1", + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "approveJobProposalSpec"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("ApproveSpec", mock.Anything, specID, false).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(&feeds.JobProposalSpec{ + ID: specID, + }, nil) + }, + query: mutation, + variables: variables, + result: result, + }, + { + name: "not found error on approval", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("ApproveSpec", mock.Anything, specID, false).Return(sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "approveJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "not found error on fetch", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("ApproveSpec", mock.Anything, specID, false).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "approveJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "unprocessable error on approval if job already exists", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("ApproveSpec", mock.Anything, specID, false).Return(feeds.ErrJobAlreadyExists) + }, + query: mutation, + variables: variables, + result: ` + { + "approveJobProposalSpec": { + "message": "a job for this contract address already exists - please use the 'force' option to replace it", + "code": "UNPROCESSABLE" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_CancelJobProposalSpec(t *testing.T) { + t.Parallel() + + mutation := ` + mutation CancelJobProposalSpec($id: ID!) { + cancelJobProposalSpec(id: $id) { + ... on CancelJobProposalSpecSuccess { + spec { + id + } + } + ... on NotFoundError { + message + code + } + } + }` + + specID := int64(1) + result := ` + { + "cancelJobProposalSpec": { + "spec": { + "id": "1" + } + } + }` + variables := map[string]interface{}{ + "id": "1", + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "cancelJobProposalSpec"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CancelSpec", mock.Anything, specID).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(&feeds.JobProposalSpec{ + ID: specID, + }, nil) + + }, + query: mutation, + variables: variables, + result: result, + }, + { + name: "not found error on cancel", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CancelSpec", mock.Anything, specID).Return(sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "cancelJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "not found error on fetch", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("CancelSpec", mock.Anything, specID).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "cancelJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_RejectJobProposalSpec(t *testing.T) { + t.Parallel() + + mutation := ` + mutation RejectJobProposalSpec($id: ID!) { + rejectJobProposalSpec(id: $id) { + ... on RejectJobProposalSpecSuccess { + spec { + id + } + } + ... on NotFoundError { + message + code + } + } + }` + + specID := int64(1) + result := ` + { + "rejectJobProposalSpec": { + "spec": { + "id": "1" + } + } + }` + variables := map[string]interface{}{ + "id": "1", + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "rejectJobProposalSpec"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("RejectSpec", mock.Anything, specID).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(&feeds.JobProposalSpec{ + ID: specID, + }, nil) + }, + query: mutation, + variables: variables, + result: result, + }, + { + name: "not found error on reject", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("RejectSpec", mock.Anything, specID).Return(sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "rejectJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "not found error on fetch", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("RejectSpec", mock.Anything, specID).Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "rejectJobProposalSpec": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_UpdateJobProposalSpecDefinition(t *testing.T) { + t.Parallel() + + mutation := ` + mutation UpdateJobProposalSpecDefinition($id: ID!, $input: UpdateJobProposalSpecDefinitionInput!) { + updateJobProposalSpecDefinition(id: $id, input: $input) { + ... on UpdateJobProposalSpecDefinitionSuccess { + spec { + id + } + } + ... on NotFoundError { + message + code + } + } + }` + + specID := int64(1) + result := ` + { + "updateJobProposalSpecDefinition": { + "spec": { + "id": "1" + } + } + }` + variables := map[string]interface{}{ + "id": "1", + "input": map[string]interface{}{ + "definition": "", + }, + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "updateJobProposalSpecDefinition"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateSpecDefinition", mock.Anything, specID, "").Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(&feeds.JobProposalSpec{ + ID: specID, + }, nil) + }, + query: mutation, + variables: variables, + result: result, + }, + { + name: "not found error on update", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateSpecDefinition", mock.Anything, specID, "").Return(sql.ErrNoRows) + + }, + query: mutation, + variables: variables, + result: ` + { + "updateJobProposalSpecDefinition": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + { + name: "not found error on fetch", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + f.Mocks.feedsSvc.On("UpdateSpecDefinition", mock.Anything, specID, "").Return(nil) + f.Mocks.feedsSvc.On("GetSpec", specID).Return(nil, sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "updateJobProposalSpecDefinition": { + "message": "spec not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +// Tests resolving a job proposal spec. Since there is not GetJobProposalSpec +// query, we rely on the GetJobProposal query to fetch the nested specs +func TestResolver_GetJobProposal_Spec(t *testing.T) { + t.Parallel() + + timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) + query := ` + query GetJobProposal { + jobProposal(id: "1") { + ... on JobProposal { + id + specs { + id + definition + status + version + statusUpdatedAt + createdAt + updatedAt + } + } + ... on NotFoundError { + message + code + } + } + }` + + jpID := int64(1) + spec := feeds.JobProposalSpec{ + ID: 100, + Definition: "name='spec'", + Status: feeds.SpecStatusPending, + JobProposalID: jpID, + Version: 1, + StatusUpdatedAt: timestamp, + CreatedAt: timestamp, + UpdatedAt: timestamp, + } + specs := []feeds.JobProposalSpec{spec} + result := ` + { + "jobProposal": { + "id": "1", + "specs": [{ + "id": "100", + "definition": "name='spec'", + "status": "PENDING", + "version": 1, + "statusUpdatedAt": "2021-01-01T00:00:00Z", + "createdAt": "2021-01-01T00:00:00Z", + "updatedAt": "2021-01-01T00:00:00Z" + }] + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "jobProposal"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.feedsSvc.On("GetJobProposal", jpID).Return(&feeds.JobProposal{ + ID: jpID, + Status: feeds.JobProposalStatusApproved, + FeedsManagerID: 1, + Multiaddrs: []string{"1", "2"}, + PendingUpdate: false, + }, nil) + f.Mocks.feedsSvc. + On("ListSpecsByJobProposalIDs", []int64{jpID}). + Return(specs, nil) + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + }, + query: query, + result: result, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/job_proposal_test.go b/core/web/resolver/job_proposal_test.go new file mode 100644 index 00000000..890e4594 --- /dev/null +++ b/core/web/resolver/job_proposal_test.go @@ -0,0 +1,107 @@ +package resolver + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/google/uuid" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +func TestResolver_GetJobProposal(t *testing.T) { + t.Parallel() + + query := ` + query GetJobProposal { + jobProposal(id: "1") { + ... on JobProposal { + id + name + status + externalJobID + remoteUUID + multiAddrs + pendingUpdate + feedsManager { + id + name + } + } + ... on NotFoundError { + message + code + } + } + }` + + jpID := int64(1) + ejID := uuid.NullUUID{UUID: uuid.New(), Valid: true} + rUUID := uuid.New() + name := "job_proposal_1" + result := ` + { + "jobProposal": { + "id": "1", + "name": "%s", + "status": "APPROVED", + "externalJobID": "%s", + "remoteUUID": "%s", + "multiAddrs": ["1", "2"], + "pendingUpdate": false, + "feedsManager": { + "id": "1", + "name": "manager" + } + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "jobProposal"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.feedsSvc.On("ListManagersByIDs", []int64{1}).Return([]feeds.FeedsManager{ + { + ID: 1, + Name: "manager", + }, + }, nil) + f.Mocks.feedsSvc.On("GetJobProposal", jpID).Return(&feeds.JobProposal{ + ID: jpID, + Name: null.StringFrom(name), + Status: feeds.JobProposalStatusApproved, + ExternalJobID: ejID, + RemoteUUID: rUUID, + FeedsManagerID: 1, + Multiaddrs: []string{"1", "2"}, + PendingUpdate: false, + }, nil) + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + }, + query: query, + result: fmt.Sprintf(result, name, ejID.UUID.String(), rUUID.String()), + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.feedsSvc.On("GetJobProposal", jpID).Return(nil, sql.ErrNoRows) + f.App.On("GetFeedsService").Return(f.Mocks.feedsSvc) + }, + query: query, + result: ` + { + "jobProposal": { + "message": "job proposal not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/job_run.go b/core/web/resolver/job_run.go new file mode 100644 index 00000000..357a786d --- /dev/null +++ b/core/web/resolver/job_run.go @@ -0,0 +1,269 @@ +package resolver + +import ( + "context" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +type JobRunStatus string + +const ( + JobRunStatusUnknown JobRunStatus = "UNKNOWN" + JobRunStatusRunning JobRunStatus = "RUNNING" + JobRunStatusSuspended JobRunStatus = "SUSPENDED" + JobRunStatusErrored JobRunStatus = "ERRORED" + JobRunStatusCompleted JobRunStatus = "COMPLETED" +) + +func NewJobRunStatus(status pipeline.RunStatus) JobRunStatus { + switch status { + case pipeline.RunStatusRunning: + return JobRunStatusRunning + case pipeline.RunStatusSuspended: + return JobRunStatusSuspended + case pipeline.RunStatusErrored: + return JobRunStatusErrored + case pipeline.RunStatusCompleted: + return JobRunStatusCompleted + default: + return JobRunStatusUnknown + } +} + +var outputRetrievalErrorStr = "error: unable to retrieve outputs" + +type JobRunResolver struct { + run pipeline.Run + app plugin.Application +} + +func NewJobRun(run pipeline.Run, app plugin.Application) *JobRunResolver { + return &JobRunResolver{run: run, app: app} +} + +func NewJobRuns(runs []pipeline.Run, app plugin.Application) []*JobRunResolver { + var resolvers []*JobRunResolver + + for _, run := range runs { + resolvers = append(resolvers, NewJobRun(run, app)) + } + + return resolvers +} + +func (r *JobRunResolver) ID() graphql.ID { + return int64GQLID(r.run.ID) +} + +func (r *JobRunResolver) Outputs() []*string { + if !r.run.Outputs.Valid { + return []*string{&outputRetrievalErrorStr} + } + + outputs, err := r.run.StringOutputs() + if err != nil { + errMsg := err.Error() + return []*string{&errMsg} + } + + return outputs +} + +func (r *JobRunResolver) PipelineSpecID() graphql.ID { + return int32GQLID(r.run.PipelineSpecID) +} + +func (r *JobRunResolver) FatalErrors() []string { + var errs []string + + for _, err := range r.run.StringFatalErrors() { + if err != nil { + errs = append(errs, *err) + } + } + + return errs +} + +func (r *JobRunResolver) AllErrors() []string { + var errs []string + + for _, err := range r.run.StringAllErrors() { + if err != nil { + errs = append(errs, *err) + } + } + + return errs +} + +func (r *JobRunResolver) Inputs() string { + val, err := r.run.Inputs.MarshalJSON() + if err != nil { + return "error: unable to retrieve inputs" + } + + return string(val) +} + +func (r *JobRunResolver) Status() JobRunStatus { + return NewJobRunStatus(r.run.State) +} + +// TaskRuns resolves the job run's task runs +// +// This could be moved to a data loader later, which means also modifying to ORM +// to not get everything at once +func (r *JobRunResolver) TaskRuns() []*TaskRunResolver { + if len(r.run.PipelineTaskRuns) > 0 { + return NewTaskRuns(r.run.PipelineTaskRuns) + } + + return []*TaskRunResolver{} +} + +func (r *JobRunResolver) Job(ctx context.Context) (*JobResolver, error) { + plnSpecID := stringutils.FromInt32(r.run.PipelineSpecID) + job, err := loader.GetJobByPipelineSpecID(ctx, plnSpecID) + if err != nil { + return nil, err + } + + return NewJob(r.app, *job), nil +} + +func (r *JobRunResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.run.CreatedAt} +} + +func (r *JobRunResolver) FinishedAt() *graphql.Time { + if r.run.FinishedAt.IsZero() { + return nil + } + return &graphql.Time{Time: r.run.FinishedAt.ValueOrZero()} +} + +// -- JobRun query -- + +type JobRunPayloadResolver struct { + jr *pipeline.Run + app plugin.Application + NotFoundErrorUnionType +} + +func NewJobRunPayload(jr *pipeline.Run, app plugin.Application, err error) *JobRunPayloadResolver { + e := NotFoundErrorUnionType{err: err, message: "job run not found", isExpectedErrorFn: nil} + + return &JobRunPayloadResolver{jr: jr, app: app, NotFoundErrorUnionType: e} +} + +func (r *JobRunPayloadResolver) ToJobRun() (*JobRunResolver, bool) { + if r.err != nil { + return nil, false + } + + return NewJobRun(*r.jr, r.app), true +} + +// JobRunsPayloadResolver resolves a page of job runs +type JobRunsPayloadResolver struct { + runs []pipeline.Run + total int32 + app plugin.Application +} + +func NewJobRunsPayload(runs []pipeline.Run, total int32, app plugin.Application) *JobRunsPayloadResolver { + return &JobRunsPayloadResolver{ + runs: runs, + total: total, + app: app, + } +} + +// Results returns the job runs. +func (r *JobRunsPayloadResolver) Results() []*JobRunResolver { + return NewJobRuns(r.runs, r.app) +} + +// Metadata returns the pagination metadata. +func (r *JobRunsPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} + +// -- RunJob Mutation -- + +type RunJobPayloadResolver struct { + run *pipeline.Run + app plugin.Application + NotFoundErrorUnionType +} + +func NewRunJobPayload(run *pipeline.Run, app plugin.Application, err error) *RunJobPayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.Is(err, webhook.ErrJobNotExists) + }} + } + + return &RunJobPayloadResolver{run: run, app: app, NotFoundErrorUnionType: e} +} + +func (r *RunJobPayloadResolver) ToRunJobSuccess() (*RunJobSuccessResolver, bool) { + if r.err != nil { + return nil, false + } + + return NewRunJobSuccess(*r.run, r.app), true +} + +func (r *RunJobPayloadResolver) ToRunJobCannotRunError() (*RunJobCannotRunErrorResolver, bool) { + if r.err == nil { + return nil, false + } + + if errors.Is(r.err, webhook.ErrJobNotExists) { + return nil, false + } + + return NewRunJobCannotRunError(r.err), true +} + +type RunJobSuccessResolver struct { + run pipeline.Run + app plugin.Application +} + +func NewRunJobSuccess(run pipeline.Run, app plugin.Application) *RunJobSuccessResolver { + return &RunJobSuccessResolver{run: run, app: app} +} + +func (r *RunJobSuccessResolver) JobRun() *JobRunResolver { + return NewJobRun(r.run, r.app) +} + +type RunJobCannotRunErrorResolver struct { + message string + code ErrorCode +} + +func NewRunJobCannotRunError(err error) *RunJobCannotRunErrorResolver { + return &RunJobCannotRunErrorResolver{message: "", code: ErrorCodeUnprocessable} +} + +func (r *RunJobCannotRunErrorResolver) Code() ErrorCode { + return r.code +} + +func (r *RunJobCannotRunErrorResolver) Message() string { + return r.message +} diff --git a/core/web/resolver/job_run_test.go b/core/web/resolver/job_run_test.go new file mode 100644 index 00000000..abb76e69 --- /dev/null +++ b/core/web/resolver/job_run_test.go @@ -0,0 +1,399 @@ +package resolver + +import ( + "database/sql" + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +func TestQuery_PaginatedJobRuns(t *testing.T) { + t.Parallel() + + query := ` + query GetJobsRuns { + jobRuns { + results { + id + } + metadata { + total + } + } + }` + + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "jobRuns"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("PipelineRuns", (*int32)(nil), PageDefaultOffset, PageDefaultLimit).Return([]pipeline.Run{ + { + ID: int64(200), + }, + }, 1, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: query, + result: ` + { + "jobRuns": { + "results": [{ + "id": "200" + }], + "metadata": { + "total": 1 + } + } + }`, + }, + { + name: "generic error on PipelineRuns()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("PipelineRuns", (*int32)(nil), PageDefaultOffset, PageDefaultLimit).Return(nil, 0, gError) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"jobRuns"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_JobRun(t *testing.T) { + t.Parallel() + + query := ` + query GetJobRun($id: ID!) { + jobRun(id: $id) { + ... on JobRun { + id + allErrors + createdAt + fatalErrors + finishedAt + inputs + job { + id + name + } + outputs + status + } + ... on NotFoundError { + code + message + } + } + } + ` + + variables := map[string]interface{}{ + "id": "2", + } + gError := errors.New("error") + _, idError := stringutils.ToInt64("asdasads") + + inputs := pipeline.JSONSerializable{} + err := inputs.UnmarshalJSON([]byte(`{"foo": "bar"}`)) + require.NoError(t, err) + + outputs := pipeline.JSONSerializable{} + err = outputs.UnmarshalJSON([]byte(`[{"baz": "bar"}]`)) + require.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query, variables: variables}, "jobRun"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindPipelineRunByID", int64(2)).Return(pipeline.Run{ + ID: 2, + PipelineSpecID: 5, + CreatedAt: f.Timestamp(), + FinishedAt: null.TimeFrom(f.Timestamp()), + AllErrors: pipeline.RunErrors{null.StringFrom("fatal error"), null.String{}}, + FatalErrors: pipeline.RunErrors{null.StringFrom("fatal error"), null.String{}}, + Inputs: inputs, + Outputs: outputs, + State: pipeline.RunStatusErrored, + }, nil) + f.Mocks.jobORM.On("FindJobsByPipelineSpecIDs", []int32{5}).Return([]job.Job{ + { + ID: 1, + PipelineSpecID: 2, + Name: null.StringFrom("first-one"), + }, + { + ID: 2, + PipelineSpecID: 5, + Name: null.StringFrom("second-one"), + }, + }, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: query, + variables: variables, + result: ` + { + "jobRun": { + "id": "2", + "allErrors": ["fatal error"], + "createdAt": "2021-01-01T00:00:00Z", + "fatalErrors": ["fatal error"], + "finishedAt": "2021-01-01T00:00:00Z", + "inputs": "{\"foo\":\"bar\"}", + "job": { + "id": "2", + "name": "second-one" + }, + "outputs": ["{\"baz\":\"bar\"}"], + "status": "ERRORED" + } + }`, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindPipelineRunByID", int64(2)).Return(pipeline.Run{}, sql.ErrNoRows) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: query, + variables: variables, + result: ` + { + "jobRun": { + "code": "NOT_FOUND", + "message": "job run not found" + } + }`, + }, + { + name: "generic error on FindPipelineRunByID()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindPipelineRunByID", int64(2)).Return(pipeline.Run{}, gError) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: query, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"jobRun"}, + Message: gError.Error(), + }, + }, + }, + { + name: "invalid ID error", + authenticated: true, + query: query, + variables: map[string]interface{}{ + "id": "asdasads", + }, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: idError, + Path: []interface{}{"jobRun"}, + Message: idError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_RunJob(t *testing.T) { + t.Parallel() + + mutation := ` + mutation RunJob($id: ID!) { + runJob(id: $id) { + ... on RunJobSuccess { + jobRun { + id + allErrors + createdAt + fatalErrors + finishedAt + inputs + outputs + status + } + } + ... on RunJobCannotRunError { + code + message + } + ... on NotFoundError { + code + message + } + } + }` + id := int32(12) + idStr := stringutils.FromInt32(id) + variables := map[string]interface{}{ + "id": idStr, + } + + inputs := pipeline.JSONSerializable{} + err := inputs.UnmarshalJSON([]byte(`{"foo": "bar"}`)) + require.NoError(t, err) + + outputs := pipeline.JSONSerializable{} + err = outputs.UnmarshalJSON([]byte(`[{"baz": "bar"}]`)) + require.NoError(t, err) + + gError := errors.New("error") + _, idErr := stringutils.ToInt32("some random ID with some specific length that should not work") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "runJob"), + { + name: "success without body", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("RunJobV2", mock.Anything, id, (map[string]interface{})(nil)).Return(int64(25), nil) + f.Mocks.pipelineORM.On("FindRun", int64(25)).Return(pipeline.Run{ + ID: 2, + PipelineSpecID: 5, + CreatedAt: f.Timestamp(), + FinishedAt: null.TimeFrom(f.Timestamp()), + AllErrors: pipeline.RunErrors{null.StringFrom("fatal error"), null.String{}}, + FatalErrors: pipeline.RunErrors{null.StringFrom("fatal error"), null.String{}}, + Inputs: inputs, + Outputs: outputs, + State: pipeline.RunStatusErrored, + }, nil) + f.App.On("PipelineORM").Return(f.Mocks.pipelineORM) + }, + query: mutation, + variables: variables, + result: ` + { + "runJob": { + "jobRun": { + "id": "2", + "allErrors": ["fatal error"], + "createdAt": "2021-01-01T00:00:00Z", + "fatalErrors": ["fatal error"], + "finishedAt": "2021-01-01T00:00:00Z", + "inputs": "{\"foo\":\"bar\"}", + "outputs": ["{\"baz\":\"bar\"}"], + "status": "ERRORED" + } + } + }`, + }, + { + name: "invalid ID error", + authenticated: true, + query: mutation, + variables: map[string]interface{}{ + "id": "some random ID with some specific length that should not work", + }, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: idErr, + Path: []interface{}{"runJob"}, + Message: idErr.Error(), + }, + }, + }, + { + name: "not found job error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("RunJobV2", mock.Anything, id, (map[string]interface{})(nil)).Return(int64(25), webhook.ErrJobNotExists) + }, + query: mutation, + variables: map[string]interface{}{ + "id": idStr, + }, + result: ` + { + "runJob": { + "code": "NOT_FOUND", + "message": "job does not exist" + } + }`, + }, + { + name: "generic error on RunJobV2", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("RunJobV2", mock.Anything, id, (map[string]interface{})(nil)).Return(int64(25), gError) + }, + query: mutation, + variables: map[string]interface{}{ + "id": idStr, + }, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"runJob"}, + Message: gError.Error(), + }, + }, + }, + { + name: "generic error on FindRun", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("RunJobV2", mock.Anything, id, (map[string]interface{})(nil)).Return(int64(25), nil) + f.Mocks.pipelineORM.On("FindRun", int64(25)).Return(pipeline.Run{}, gError) + f.App.On("PipelineORM").Return(f.Mocks.pipelineORM) + }, + query: mutation, + variables: map[string]interface{}{ + "id": idStr, + }, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"runJob"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/job_test.go b/core/web/resolver/job_test.go new file mode 100644 index 00000000..82ba579d --- /dev/null +++ b/core/web/resolver/job_test.go @@ -0,0 +1,564 @@ +package resolver + +import ( + "database/sql" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/chains" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/testdata/testspecs" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +// This tests the main fields on the job results. Embedded spec testing is done +// in the `spec_test` file +func TestResolver_Jobs(t *testing.T) { + var ( + externalJobID = uuid.MustParse(("00000000-0000-0000-0000-000000000001")) + + query = ` + query GetJobs { + jobs { + results { + id + createdAt + externalJobID + gasLimit + forwardingAllowed + maxTaskDuration + name + schemaVersion + spec { + __typename + } + runs { + __typename + results { + id + } + metadata { + total + } + } + observationSource + } + metadata { + total + } + } + }` + ) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "jobs"), + { + name: "get jobs success", + authenticated: true, + before: func(f *gqlTestFramework) { + plnSpecID := int32(12) + + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobs", 0, 50).Return([]job.Job{ + { + ID: 1, + Name: null.StringFrom("job1"), + SchemaVersion: 1, + MaxTaskDuration: models.Interval(1 * time.Second), + ExternalJobID: externalJobID, + CreatedAt: f.Timestamp(), + Type: job.OffchainReporting, + PipelineSpecID: plnSpecID, + OCROracleSpec: &job.OCROracleSpec{}, + PipelineSpec: &pipeline.Spec{ + DotDagSource: "ds1 [type=bridge name=voter_turnout];", + }, + }, + }, 1, nil) + f.Mocks.jobORM. + On("FindPipelineRunIDsByJobID", int32(1), 0, 50). + Return([]int64{200}, nil) + f.Mocks.jobORM. + On("FindPipelineRunsByIDs", []int64{200}). + Return([]pipeline.Run{{ID: 200}}, nil) + f.Mocks.jobORM. + On("CountPipelineRunsByJobID", int32(1)). + Return(int32(1), nil) + }, + query: query, + result: ` + { + "jobs": { + "results": [{ + "id": "1", + "createdAt": "2021-01-01T00:00:00Z", + "externalJobID": "00000000-0000-0000-0000-000000000001", + "gasLimit": null, + "forwardingAllowed": false, + "maxTaskDuration": "1s", + "name": "job1", + "schemaVersion": 1, + "spec": { + "__typename": "OCRSpec" + }, + "runs": { + "__typename": "JobRunsPayload", + "results": [{ + "id": "200" + }], + "metadata": { + "total": 1 + } + }, + "observationSource": "ds1 [type=bridge name=voter_turnout];" + }], + "metadata": { + "total": 1 + } + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_Job(t *testing.T) { + var ( + id = int32(1) + externalJobID = uuid.MustParse(("00000000-0000-0000-0000-000000000001")) + + query = ` + query GetJob { + job(id: "1") { + ... on Job { + id + createdAt + externalJobID + gasLimit + maxTaskDuration + name + schemaVersion + spec { + __typename + } + runs { + __typename + results { + id + } + metadata { + total + } + } + observationSource + } + ... on NotFoundError { + code + message + } + } + } + ` + exampleJobResult = ` + { + "job": { + "id": "1", + "createdAt": "2021-01-01T00:00:00Z", + "externalJobID": "00000000-0000-0000-0000-000000000001", + "gasLimit": 123, + "maxTaskDuration": "1s", + "name": "job1", + "schemaVersion": 1, + "spec": { + "__typename": "OCRSpec" + }, + "runs": { + "__typename": "JobRunsPayload", + "results": [{ + "id": "200" + }], + "metadata": { + "total": 1 + } + }, + "observationSource": "ds1 [type=bridge name=voter_turnout];" + } + } + ` + ) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "job"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + ID: 1, + Name: null.StringFrom("job1"), + SchemaVersion: 1, + GasLimit: clnull.Uint32From(123), + MaxTaskDuration: models.Interval(1 * time.Second), + ExternalJobID: externalJobID, + CreatedAt: f.Timestamp(), + Type: job.OffchainReporting, + OCROracleSpec: &job.OCROracleSpec{}, + PipelineSpec: &pipeline.Spec{ + DotDagSource: "ds1 [type=bridge name=voter_turnout];", + }, + }, nil) + f.Mocks.jobORM. + On("FindPipelineRunIDsByJobID", int32(1), 0, 50). + Return([]int64{200}, nil) + f.Mocks.jobORM. + On("FindPipelineRunsByIDs", []int64{200}). + Return([]pipeline.Run{{ID: 200}}, nil) + f.Mocks.jobORM. + On("CountPipelineRunsByJobID", int32(1)). + Return(int32(1), nil) + }, + query: query, + result: exampleJobResult, + }, + { + name: "not found", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{}, sql.ErrNoRows) + }, + query: query, + result: ` + { + "job": { + "code": "NOT_FOUND", + "message": "job not found" + } + } + `, + }, + { + name: "show job when chainID is disabled", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + ID: 1, + Name: null.StringFrom("job1"), + SchemaVersion: 1, + GasLimit: clnull.Uint32From(123), + MaxTaskDuration: models.Interval(1 * time.Second), + ExternalJobID: externalJobID, + CreatedAt: f.Timestamp(), + Type: job.OffchainReporting, + OCROracleSpec: &job.OCROracleSpec{}, + PipelineSpec: &pipeline.Spec{ + DotDagSource: "ds1 [type=bridge name=voter_turnout];", + }, + }, chains.ErrNoSuchChainID) + f.Mocks.jobORM. + On("FindPipelineRunIDsByJobID", int32(1), 0, 50). + Return([]int64{200}, nil) + f.Mocks.jobORM. + On("FindPipelineRunsByIDs", []int64{200}). + Return([]pipeline.Run{{ID: 200}}, nil) + f.Mocks.jobORM. + On("CountPipelineRunsByJobID", int32(1)). + Return(int32(1), nil) + }, + query: query, + result: exampleJobResult, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_CreateJob(t *testing.T) { + t.Parallel() + + mutation := ` + mutation CreateJob($input: CreateJobInput!) { + createJob(input: $input) { + ... on CreateJobSuccess { + job { + id + createdAt + externalJobID + maxTaskDuration + name + schemaVersion + } + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + uuid := uuid.New() + spec := fmt.Sprintf(testspecs.DirectRequestSpecTemplate, uuid, uuid) + variables := map[string]interface{}{ + "input": map[string]interface{}{ + "TOML": spec, + }, + } + invalid := map[string]interface{}{ + "input": map[string]interface{}{ + "TOML": "some wrong value", + }, + } + jb, err := directrequest.ValidatedDirectRequestSpec(spec) + assert.NoError(t, err) + + d, err := json.Marshal(map[string]interface{}{ + "createJob": map[string]interface{}{ + "job": map[string]interface{}{ + "id": "0", + "maxTaskDuration": "0s", + "name": jb.Name, + "schemaVersion": 1, + "createdAt": "0001-01-01T00:00:00Z", + "externalJobID": jb.ExternalJobID.String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createJob"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetConfig").Return(f.Mocks.cfg) + f.App.On("AddJobV2", mock.Anything, &jb).Return(nil) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "invalid TOML error", + authenticated: true, + query: mutation, + variables: invalid, + result: ` + { + "createJob": { + "errors": [{ + "code": "INVALID_INPUT", + "message": "failed to parse TOML: (1, 6): was expecting token =, but got \"wrong\" instead", + "path": "TOML spec" + }] + } + }`, + }, + { + name: "generic error when adding the job", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetConfig").Return(f.Mocks.cfg) + f.App.On("AddJobV2", mock.Anything, &jb).Return(gError) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"createJob"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DeleteJob(t *testing.T) { + t.Parallel() + + id := int32(123) + extJID := uuid.New() + mutation := ` + mutation DeleteJob($id: ID!) { + deleteJob(id: $id) { + ... on DeleteJobSuccess { + job { + id + createdAt + externalJobID + maxTaskDuration + name + schemaVersion + } + } + ... on NotFoundError { + code + message + } + } + }` + variables := map[string]interface{}{ + "id": "123", + } + invalidVariables := map[string]interface{}{ + "id": "asdadada", + } + d, err := json.Marshal(map[string]interface{}{ + "deleteJob": map[string]interface{}{ + "job": map[string]interface{}{ + "id": "123", + "maxTaskDuration": "2s", + "name": "test-job", + "schemaVersion": 0, + "createdAt": "2021-01-01T00:00:00Z", + "externalJobID": extJID.String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + gError := errors.New("error") + _, idError := stringutils.ToInt64("asdadada") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteJob"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + ID: id, + Name: null.StringFrom("test-job"), + ExternalJobID: extJID, + MaxTaskDuration: models.Interval(2 * time.Second), + CreatedAt: f.Timestamp(), + }, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.App.On("DeleteJob", mock.Anything, id).Return(nil) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "not found on FindJob()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{}, sql.ErrNoRows) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteJob": { + "code": "NOT_FOUND", + "message": "job not found" + } + } + `, + }, + { + name: "not found on DeleteJob()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{}, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.App.On("DeleteJob", mock.Anything, id).Return(sql.ErrNoRows) + }, + query: mutation, + variables: variables, + result: ` + { + "deleteJob": { + "code": "NOT_FOUND", + "message": "job not found" + } + } + `, + }, + { + name: "generic error on FindJob()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{}, gError) + f.App.On("JobORM").Return(f.Mocks.jobORM) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"deleteJob"}, + Message: gError.Error(), + }, + }, + }, + { + name: "generic error on DeleteJob()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{}, nil) + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.App.On("DeleteJob", mock.Anything, id).Return(gError) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"deleteJob"}, + Message: gError.Error(), + }, + }, + }, + { + name: "error on ID parsing", + authenticated: true, + query: mutation, + variables: invalidVariables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: idError, + Path: []interface{}{"deleteJob"}, + Message: idError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/log.go b/core/web/resolver/log.go new file mode 100644 index 00000000..3961b6f4 --- /dev/null +++ b/core/web/resolver/log.go @@ -0,0 +1,171 @@ +package resolver + +import ( + "strings" + + "github.com/pkg/errors" +) + +type LogLevel string + +const ( + LogLevelDebug = "DEBUG" + LogLevelInfo = "INFO" + LogLevelWarn = "WARN" + LogLevelError = "ERROR" +) + +func FromLogLevel(logLvl LogLevel) string { + switch logLvl { + case LogLevelDebug: + return "debug" + case LogLevelInfo: + return "info" + case LogLevelWarn: + return "warn" + case LogLevelError: + return "error" + default: + return strings.ToLower(string(logLvl)) + } +} + +func ToLogLevel(str string) (LogLevel, error) { + switch str { + case "debug": + return LogLevelDebug, nil + case "info": + return LogLevelInfo, nil + case "warn": + return LogLevelWarn, nil + case "error": + return LogLevelError, nil + default: + return "", errors.New("invalid log level") + } +} + +// SQL Logging config + +type SQLLoggingResolver struct { + enabled bool +} + +func NewSQLLogging(enabled bool) *SQLLoggingResolver { + return &SQLLoggingResolver{enabled: enabled} +} + +func (r *SQLLoggingResolver) Enabled() bool { + return r.enabled +} + +// -- GetSQLLogging Query -- + +type GetSQLLoggingPayloadResolver struct { + enabled bool +} + +func NewGetSQLLoggingPayload(enabled bool) *GetSQLLoggingPayloadResolver { + return &GetSQLLoggingPayloadResolver{enabled: enabled} +} + +func (r *GetSQLLoggingPayloadResolver) ToSQLLogging() (*SQLLoggingResolver, bool) { + return NewSQLLogging(r.enabled), true +} + +// -- SetSQLLogging Mutation -- + +type SetSQLLoggingPayloadResolver struct { + enabled bool +} + +func NewSetSQLLoggingPayload(enabled bool) *SetSQLLoggingPayloadResolver { + return &SetSQLLoggingPayloadResolver{enabled: enabled} +} + +func (r *SetSQLLoggingPayloadResolver) ToSetSQLLoggingSuccess() (*SetSQLLoggingSuccessResolver, bool) { + return NewSetSQLLoggingSuccess(r.enabled), true +} + +type SetSQLLoggingSuccessResolver struct { + enabled bool +} + +func NewSetSQLLoggingSuccess(enabled bool) *SetSQLLoggingSuccessResolver { + return &SetSQLLoggingSuccessResolver{enabled: enabled} +} + +func (r *SetSQLLoggingSuccessResolver) SQLLogging() *SQLLoggingResolver { + return NewSQLLogging(r.enabled) +} + +// -- GetLogLevel Query -- + +type GlobalLogLevelResolver struct { + lvl string +} + +func GlobalLogLevel(lvl string) *GlobalLogLevelResolver { + return &GlobalLogLevelResolver{lvl: lvl} +} + +func (r *GlobalLogLevelResolver) Level() (LogLevel, error) { + return ToLogLevel(r.lvl) +} + +type GlobalLogLevelPayloadResolver struct { + lgLvl string +} + +func NewGlobalLogLevelPayload(lgLvl string) *GlobalLogLevelPayloadResolver { + return &GlobalLogLevelPayloadResolver{lgLvl: lgLvl} +} + +func (r *GlobalLogLevelPayloadResolver) ToGlobalLogLevel() (*GlobalLogLevelResolver, bool) { + return GlobalLogLevel(r.lgLvl), true +} + +// -- UpdateGlobalLogLevel Mutation -- + +type SetGlobalLogLevelPayloadResolver struct { + lvl LogLevel + inputErrs map[string]string +} + +func NewSetGlobalLogLevelPayload(lvl LogLevel, inputErrs map[string]string) *SetGlobalLogLevelPayloadResolver { + return &SetGlobalLogLevelPayloadResolver{lvl: lvl, inputErrs: inputErrs} +} + +func (r *SetGlobalLogLevelPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +func (r *SetGlobalLogLevelPayloadResolver) ToSetGlobalLogLevelSuccess() (*SetGlobalLogLevelSuccessResolver, bool) { + if r.inputErrs != nil { + return nil, false + } + + return NewSetGlobalLogLevelSuccess(r.lvl), true +} + +type SetGlobalLogLevelSuccessResolver struct { + lvl LogLevel +} + +func NewSetGlobalLogLevelSuccess(lvl LogLevel) *SetGlobalLogLevelSuccessResolver { + return &SetGlobalLogLevelSuccessResolver{lvl: lvl} +} + +func (r *SetGlobalLogLevelSuccessResolver) GlobalLogLevel() *GlobalLogLevelResolver { + return GlobalLogLevel(FromLogLevel(r.lvl)) +} diff --git a/core/web/resolver/log_test.go b/core/web/resolver/log_test.go new file mode 100644 index 00000000..cc5a62d8 --- /dev/null +++ b/core/web/resolver/log_test.go @@ -0,0 +1,216 @@ +package resolver + +import ( + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/pluginv3.0/v2/core/config" +) + +func TestResolver_SetSQLLogging(t *testing.T) { + t.Parallel() + + mutation := ` + mutation SetSQLLogging($input: SetSQLLoggingInput!) { + setSQLLogging(input: $input) { + ... on SetSQLLoggingSuccess { + sqlLogging { + enabled + } + } + } + }` + variables := map[string]interface{}{ + "input": map[string]interface{}{ + "enabled": true, + }, + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "setSQLLogging"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.cfg.On("SetLogSQL", true).Return(nil) + f.App.On("GetConfig").Return(f.Mocks.cfg) + }, + query: mutation, + variables: variables, + result: ` + { + "setSQLLogging": { + "sqlLogging": { + "enabled": true + } + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +type databaseConfig struct { + config.Database + logSQL bool +} + +func (d *databaseConfig) LogSQL() bool { return d.logSQL } + +func TestResolver_SQLLogging(t *testing.T) { + t.Parallel() + + query := ` + query GetSQLLogging { + sqlLogging { + ... on SQLLogging { + enabled + } + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "sqlLogging"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.cfg.On("Database").Return(&databaseConfig{logSQL: false}) + f.App.On("GetConfig").Return(f.Mocks.cfg) + }, + query: query, + result: ` + { + "sqlLogging": { + "enabled": false + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +type log struct { + config.Log + level zapcore.Level +} + +func (l *log) Level() zapcore.Level { + return l.level +} + +func TestResolver_GlobalLogLevel(t *testing.T) { + t.Parallel() + + query := ` + query GetGlobalLogLevel { + globalLogLevel { + ... on GlobalLogLevel { + level + } + } + }` + + var warnLvl zapcore.Level + err := warnLvl.UnmarshalText([]byte("warn")) + assert.NoError(t, err) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "globalLogLevel"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.cfg.On("Log").Return(&log{level: warnLvl}) + f.App.On("GetConfig").Return(f.Mocks.cfg) + }, + query: query, + result: ` + { + "globalLogLevel": { + "level": "WARN" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_SetGlobalLogLevel(t *testing.T) { + t.Parallel() + + mutation := ` + mutation SetGlobalLogLevel($level: LogLevel!) { + setGlobalLogLevel(level: $level) { + ... on SetGlobalLogLevelSuccess { + globalLogLevel { + level + } + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + variables := map[string]interface{}{ + "level": LogLevelError, + } + + var errorLvl zapcore.Level + err := errorLvl.UnmarshalText([]byte("error")) + assert.NoError(t, err) + + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "setGlobalLogLevel"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("SetLogLevel", errorLvl).Return(nil) + }, + query: mutation, + variables: variables, + result: ` + { + "setGlobalLogLevel": { + "globalLogLevel": { + "level": "ERROR" + } + } + }`, + }, + { + name: "generic error on SetLogLevel", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("SetLogLevel", errorLvl).Return(gError) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"setGlobalLogLevel"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/mutation.go b/core/web/resolver/mutation.go new file mode 100644 index 00000000..5cc594ce --- /dev/null +++ b/core/web/resolver/mutation.go @@ -0,0 +1,1245 @@ +package resolver + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/blockhashstore" + "github.com/goplugin/pluginv3.0/v2/core/services/blockheaderfeeder" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/cron" + "github.com/goplugin/pluginv3.0/v2/core/services/directrequest" + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" + "github.com/goplugin/pluginv3.0/v2/core/services/fluxmonitorv2" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keeper" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/csakey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/validate" + "github.com/goplugin/pluginv3.0/v2/core/services/ocrbootstrap" + "github.com/goplugin/pluginv3.0/v2/core/services/vrf/vrfcommon" + "github.com/goplugin/pluginv3.0/v2/core/services/webhook" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/utils/crypto" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +type Resolver struct { + App plugin.Application +} + +type createBridgeInput struct { + Name string + URL string + Confirmations int32 + MinimumContractPayment string +} + +// CreateBridge creates a new bridge. +func (r *Resolver) CreateBridge(ctx context.Context, args struct{ Input createBridgeInput }) (*CreateBridgePayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + var webURL models.WebURL + if len(args.Input.URL) != 0 { + rURL, err := url.ParseRequestURI(args.Input.URL) + if err != nil { + return nil, err + } + webURL = models.WebURL(*rURL) + } + minContractPayment := &assets.Link{} + if err := minContractPayment.UnmarshalText([]byte(args.Input.MinimumContractPayment)); err != nil { + return nil, err + } + + btr := &bridges.BridgeTypeRequest{ + Name: bridges.BridgeName(args.Input.Name), + URL: webURL, + Confirmations: uint32(args.Input.Confirmations), + MinimumContractPayment: minContractPayment, + } + + bta, bt, err := bridges.NewBridgeType(btr) + if err != nil { + return nil, err + } + orm := r.App.BridgeORM() + if err = ValidateBridgeType(btr); err != nil { + return nil, err + } + if err = ValidateBridgeTypeUniqueness(btr, orm); err != nil { + return nil, err + } + if err := orm.CreateBridgeType(bt); err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.BridgeCreated, map[string]interface{}{ + "bridgeName": bta.Name, + "bridgeConfirmations": bta.Confirmations, + "bridgeMinimumContractPayment": bta.MinimumContractPayment, + "bridgeURL": bta.URL, + }) + + return NewCreateBridgePayload(*bt, bta.IncomingToken), nil +} + +func (r *Resolver) CreateCSAKey(ctx context.Context) (*CreateCSAKeyPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().CSA().Create() + if err != nil { + if errors.Is(err, keystore.ErrCSAKeyExists) { + return NewCreateCSAKeyPayload(nil, err), nil + } + + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.CSAKeyCreated, map[string]interface{}{ + "CSAPublicKey": key.PublicKey, + "CSVersion": key.Version, + }) + + return NewCreateCSAKeyPayload(&key, nil), nil +} + +func (r *Resolver) DeleteCSAKey(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteCSAKeyPayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().CSA().Delete(string(args.ID)) + if err != nil { + if errors.As(err, &keystore.KeyNotFoundError{}) { + return NewDeleteCSAKeyPayload(csakey.KeyV2{}, err), nil + } + + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.CSAKeyDeleted, map[string]interface{}{"id": args.ID}) + + return NewDeleteCSAKeyPayload(key, nil), nil +} + +type createFeedsManagerChainConfigInput struct { + FeedsManagerID string + ChainID string + ChainType string + AccountAddr string + AdminAddr string + FluxMonitorEnabled bool + OCR1Enabled bool + OCR1IsBootstrap *bool + OCR1Multiaddr *string + OCR1P2PPeerID *string + OCR1KeyBundleID *string + OCR2Enabled bool + OCR2IsBootstrap *bool + OCR2Multiaddr *string + OCR2ForwarderAddress *string + OCR2P2PPeerID *string + OCR2KeyBundleID *string + OCR2Plugins string +} + +func (r *Resolver) CreateFeedsManagerChainConfig(ctx context.Context, args struct { + Input *createFeedsManagerChainConfigInput +}) (*CreateFeedsManagerChainConfigPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + fsvc := r.App.GetFeedsService() + + fmID, err := stringutils.ToInt64(args.Input.FeedsManagerID) + if err != nil { + return nil, err + } + + ctype, err := feeds.NewChainType(args.Input.ChainType) + if err != nil { + return nil, err + } + + params := feeds.ChainConfig{ + FeedsManagerID: fmID, + ChainID: args.Input.ChainID, + ChainType: ctype, + AccountAddress: args.Input.AccountAddr, + AdminAddress: args.Input.AdminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: args.Input.FluxMonitorEnabled, + }, + } + + if args.Input.OCR1Enabled { + params.OCR1Config = feeds.OCR1Config{ + Enabled: args.Input.OCR1Enabled, + IsBootstrap: *args.Input.OCR1IsBootstrap, + Multiaddr: null.StringFromPtr(args.Input.OCR1Multiaddr), + P2PPeerID: null.StringFromPtr(args.Input.OCR1P2PPeerID), + KeyBundleID: null.StringFromPtr(args.Input.OCR1KeyBundleID), + } + } + + if args.Input.OCR2Enabled { + var plugins feeds.Plugins + if err = plugins.Scan(args.Input.OCR2Plugins); err != nil { + return nil, err + } + + params.OCR2Config = feeds.OCR2ConfigModel{ + Enabled: args.Input.OCR2Enabled, + IsBootstrap: *args.Input.OCR2IsBootstrap, + Multiaddr: null.StringFromPtr(args.Input.OCR2Multiaddr), + ForwarderAddress: null.StringFromPtr(args.Input.OCR2ForwarderAddress), + P2PPeerID: null.StringFromPtr(args.Input.OCR2P2PPeerID), + KeyBundleID: null.StringFromPtr(args.Input.OCR2KeyBundleID), + Plugins: plugins, + } + } + + id, err := fsvc.CreateChainConfig(ctx, params) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewCreateFeedsManagerChainConfigPayload(nil, err, nil), nil + } + + return nil, err + } + + ccfg, err := fsvc.GetChainConfig(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewCreateFeedsManagerChainConfigPayload(nil, err, nil), nil + } + + return nil, err + } + + fmj, _ := json.Marshal(ccfg) + r.App.GetAuditLogger().Audit(audit.FeedsManChainConfigCreated, map[string]interface{}{"feedsManager": fmj}) + + return NewCreateFeedsManagerChainConfigPayload(ccfg, nil, nil), nil +} + +func (r *Resolver) DeleteFeedsManagerChainConfig(ctx context.Context, args struct { + ID string +}) (*DeleteFeedsManagerChainConfigPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(args.ID) + if err != nil { + return nil, err + } + + fsvc := r.App.GetFeedsService() + + ccfg, err := fsvc.GetChainConfig(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDeleteFeedsManagerChainConfigPayload(nil, err), nil + } + + return nil, err + } + + if _, err := fsvc.DeleteChainConfig(ctx, id); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDeleteFeedsManagerChainConfigPayload(nil, err), nil + } + + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.FeedsManChainConfigDeleted, map[string]interface{}{"id": args.ID}) + + return NewDeleteFeedsManagerChainConfigPayload(ccfg, nil), nil +} + +type updateFeedsManagerChainConfigInput struct { + AccountAddr string + AdminAddr string + FluxMonitorEnabled bool + OCR1Enabled bool + OCR1IsBootstrap *bool + OCR1Multiaddr *string + OCR1P2PPeerID *string + OCR1KeyBundleID *string + OCR2Enabled bool + OCR2IsBootstrap *bool + OCR2Multiaddr *string + OCR2ForwarderAddress *string + OCR2P2PPeerID *string + OCR2KeyBundleID *string + OCR2Plugins string +} + +func (r *Resolver) UpdateFeedsManagerChainConfig(ctx context.Context, args struct { + ID string + Input *updateFeedsManagerChainConfigInput +}) (*UpdateFeedsManagerChainConfigPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + fsvc := r.App.GetFeedsService() + + id, err := stringutils.ToInt64(args.ID) + if err != nil { + return nil, err + } + + params := feeds.ChainConfig{ + ID: id, + AccountAddress: args.Input.AccountAddr, + AdminAddress: args.Input.AdminAddr, + FluxMonitorConfig: feeds.FluxMonitorConfig{ + Enabled: args.Input.FluxMonitorEnabled, + }, + } + + if args.Input.OCR1Enabled { + params.OCR1Config = feeds.OCR1Config{ + Enabled: args.Input.OCR1Enabled, + IsBootstrap: *args.Input.OCR1IsBootstrap, + Multiaddr: null.StringFromPtr(args.Input.OCR1Multiaddr), + P2PPeerID: null.StringFromPtr(args.Input.OCR1P2PPeerID), + KeyBundleID: null.StringFromPtr(args.Input.OCR1KeyBundleID), + } + } + + if args.Input.OCR2Enabled { + var plugins feeds.Plugins + if err = plugins.Scan(args.Input.OCR2Plugins); err != nil { + return nil, err + } + + params.OCR2Config = feeds.OCR2ConfigModel{ + Enabled: args.Input.OCR2Enabled, + IsBootstrap: *args.Input.OCR2IsBootstrap, + Multiaddr: null.StringFromPtr(args.Input.OCR2Multiaddr), + ForwarderAddress: null.StringFromPtr(args.Input.OCR2ForwarderAddress), + P2PPeerID: null.StringFromPtr(args.Input.OCR2P2PPeerID), + KeyBundleID: null.StringFromPtr(args.Input.OCR2KeyBundleID), + Plugins: plugins, + } + } + + id, err = fsvc.UpdateChainConfig(ctx, params) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewUpdateFeedsManagerChainConfigPayload(nil, err, nil), nil + } + + return nil, err + } + + ccfg, err := fsvc.GetChainConfig(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewUpdateFeedsManagerChainConfigPayload(nil, err, nil), nil + } + + return nil, err + } + + fmj, _ := json.Marshal(ccfg) + r.App.GetAuditLogger().Audit(audit.FeedsManChainConfigUpdated, map[string]interface{}{"feedsManager": fmj}) + + return NewUpdateFeedsManagerChainConfigPayload(ccfg, nil, nil), nil +} + +type createFeedsManagerInput struct { + Name string + URI string + PublicKey string +} + +func (r *Resolver) CreateFeedsManager(ctx context.Context, args struct { + Input *createFeedsManagerInput +}) (*CreateFeedsManagerPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + publicKey, err := crypto.PublicKeyFromHex(args.Input.PublicKey) + if err != nil { + return NewCreateFeedsManagerPayload(nil, nil, map[string]string{ + "input/publicKey": "invalid hex value", + }), nil + } + + params := feeds.RegisterManagerParams{ + Name: args.Input.Name, + URI: args.Input.URI, + PublicKey: *publicKey, + } + + feedsService := r.App.GetFeedsService() + + id, err := feedsService.RegisterManager(ctx, params) + if err != nil { + if errors.Is(err, feeds.ErrSingleFeedsManager) { + return NewCreateFeedsManagerPayload(nil, err, nil), nil + } + return nil, err + } + + mgr, err := feedsService.GetManager(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewCreateFeedsManagerPayload(nil, err, nil), nil + } + + return nil, err + } + + mgrj, _ := json.Marshal(mgr) + r.App.GetAuditLogger().Audit(audit.FeedsManCreated, map[string]interface{}{"mgrj": mgrj}) + + return NewCreateFeedsManagerPayload(mgr, nil, nil), nil +} + +type updateBridgeInput struct { + Name string + URL string + Confirmations int32 + MinimumContractPayment string +} + +func (r *Resolver) UpdateBridge(ctx context.Context, args struct { + ID graphql.ID + Input updateBridgeInput +}) (*UpdateBridgePayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + var webURL models.WebURL + if len(args.Input.URL) != 0 { + rURL, err := url.ParseRequestURI(args.Input.URL) + if err != nil { + return nil, err + } + webURL = models.WebURL(*rURL) + } + minContractPayment := &assets.Link{} + if err := minContractPayment.UnmarshalText([]byte(args.Input.MinimumContractPayment)); err != nil { + return nil, err + } + + btr := &bridges.BridgeTypeRequest{ + Name: bridges.BridgeName(args.Input.Name), + URL: webURL, + Confirmations: uint32(args.Input.Confirmations), + MinimumContractPayment: minContractPayment, + } + + taskType, err := bridges.ParseBridgeName(string(args.ID)) + if err != nil { + return nil, err + } + + // Find the bridge + orm := r.App.BridgeORM() + bridge, err := orm.FindBridge(taskType) + if errors.Is(err, sql.ErrNoRows) { + return NewUpdateBridgePayload(nil, err), nil + } + if err != nil { + return nil, err + } + + // Update the bridge + if err := ValidateBridgeType(btr); err != nil { + return nil, err + } + + if err := orm.UpdateBridgeType(&bridge, btr); err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.BridgeUpdated, map[string]interface{}{ + "bridgeName": bridge.Name, + "bridgeConfirmations": bridge.Confirmations, + "bridgeMinimumContractPayment": bridge.MinimumContractPayment, + "bridgeURL": bridge.URL, + }) + + return NewUpdateBridgePayload(&bridge, nil), nil +} + +type updateFeedsManagerInput struct { + Name string + URI string + PublicKey string +} + +func (r *Resolver) UpdateFeedsManager(ctx context.Context, args struct { + ID graphql.ID + Input *updateFeedsManagerInput +}) (*UpdateFeedsManagerPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + publicKey, err := crypto.PublicKeyFromHex(args.Input.PublicKey) + if err != nil { + return NewUpdateFeedsManagerPayload(nil, nil, map[string]string{ + "input/publicKey": "invalid hex value", + }), nil + } + + mgr := &feeds.FeedsManager{ + ID: id, + URI: args.Input.URI, + Name: args.Input.Name, + PublicKey: *publicKey, + } + + feedsService := r.App.GetFeedsService() + + if err = feedsService.UpdateManager(ctx, *mgr); err != nil { + return nil, err + } + + mgr, err = feedsService.GetManager(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewUpdateFeedsManagerPayload(nil, err, nil), nil + } + + return nil, err + } + + mgrj, _ := json.Marshal(mgr) + r.App.GetAuditLogger().Audit(audit.FeedsManUpdated, map[string]interface{}{"mgrj": mgrj}) + + return NewUpdateFeedsManagerPayload(mgr, nil, nil), nil +} + +func (r *Resolver) CreateOCRKeyBundle(ctx context.Context) (*CreateOCRKeyBundlePayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().OCR().Create() + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.OCRKeyBundleCreated, map[string]interface{}{ + "ocrKeyBundleID": key.ID(), + "ocrKeyBundlePublicKeyAddressOnChain": key.PublicKeyAddressOnChain(), + }) + + return NewCreateOCRKeyBundlePayload(&key), nil +} + +func (r *Resolver) DeleteOCRKeyBundle(ctx context.Context, args struct { + ID string +}) (*DeleteOCRKeyBundlePayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + deletedKey, err := r.App.GetKeyStore().OCR().Delete(args.ID) + if err != nil { + if errors.As(err, &keystore.KeyNotFoundError{}) { + return NewDeleteOCRKeyBundlePayloadResolver(ocrkey.KeyV2{}, err), nil + } + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.OCRKeyBundleDeleted, map[string]interface{}{"id": args.ID}) + return NewDeleteOCRKeyBundlePayloadResolver(deletedKey, nil), nil +} + +func (r *Resolver) DeleteBridge(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteBridgePayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + taskType, err := bridges.ParseBridgeName(string(args.ID)) + if err != nil { + return NewDeleteBridgePayload(nil, err), nil + } + + orm := r.App.BridgeORM() + bt, err := orm.FindBridge(taskType) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDeleteBridgePayload(nil, err), nil + } + + return nil, err + } + + jobsUsingBridge, err := r.App.JobORM().FindJobIDsWithBridge(string(args.ID)) + if err != nil { + return nil, err + } + if len(jobsUsingBridge) > 0 { + return NewDeleteBridgePayload(nil, fmt.Errorf("bridge has jobs associated with it")), nil + } + + if err = orm.DeleteBridgeType(&bt); err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.BridgeDeleted, map[string]interface{}{"name": bt.Name}) + return NewDeleteBridgePayload(&bt, nil), nil +} + +func (r *Resolver) CreateP2PKey(ctx context.Context) (*CreateP2PKeyPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().P2P().Create() + if err != nil { + return nil, err + } + + const keyType = "Ed25519" + r.App.GetAuditLogger().Audit(audit.KeyCreated, map[string]interface{}{ + "type": "p2p", + "id": key.ID(), + "p2pPublicKey": key.PublicKeyHex(), + "p2pPeerID": key.PeerID(), + "p2pType": keyType, + }) + + return NewCreateP2PKeyPayload(key), nil +} + +func (r *Resolver) DeleteP2PKey(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteP2PKeyPayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + keyID, err := p2pkey.MakePeerID(string(args.ID)) + if err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().P2P().Delete(keyID) + if err != nil { + if errors.As(err, &keystore.KeyNotFoundError{}) { + return NewDeleteP2PKeyPayload(p2pkey.KeyV2{}, err), nil + } + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.KeyDeleted, map[string]interface{}{ + "type": "p2p", + "id": args.ID, + }) + + return NewDeleteP2PKeyPayload(key, nil), nil +} + +func (r *Resolver) CreateVRFKey(ctx context.Context) (*CreateVRFKeyPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().VRF().Create() + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.KeyCreated, map[string]interface{}{ + "type": "vrf", + "id": key.ID(), + "vrfPublicKey": key.PublicKey, + "vrfPublicKeyAddress": key.PublicKey.Address(), + }) + + return NewCreateVRFKeyPayloadResolver(key), nil +} + +func (r *Resolver) DeleteVRFKey(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteVRFKeyPayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().VRF().Delete(string(args.ID)) + if err != nil { + if errors.Is(errors.Cause(err), keystore.ErrMissingVRFKey) { + return NewDeleteVRFKeyPayloadResolver(vrfkey.KeyV2{}, err), nil + } + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.KeyDeleted, map[string]interface{}{ + "type": "vrf", + "id": args.ID, + }) + + return NewDeleteVRFKeyPayloadResolver(key, nil), nil +} + +// ApproveJobProposalSpec approves the job proposal spec. +func (r *Resolver) ApproveJobProposalSpec(ctx context.Context, args struct { + ID graphql.ID + Force *bool +}) (*ApproveJobProposalSpecPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + forceApprove := false + if args.Force != nil { + forceApprove = *args.Force + } + + feedsSvc := r.App.GetFeedsService() + if err = feedsSvc.ApproveSpec(ctx, id, forceApprove); err != nil { + if errors.Is(err, sql.ErrNoRows) || errors.Is(err, feeds.ErrJobAlreadyExists) { + return NewApproveJobProposalSpecPayload(nil, err), nil + } + return nil, err + } + + spec, err := feedsSvc.GetSpec(id) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + + specj, _ := json.Marshal(spec) + r.App.GetAuditLogger().Audit(audit.JobProposalSpecApproved, map[string]interface{}{"spec": specj}) + + return NewApproveJobProposalSpecPayload(spec, err), nil +} + +// CancelJobProposalSpec cancels the job proposal spec. +func (r *Resolver) CancelJobProposalSpec(ctx context.Context, args struct { + ID graphql.ID +}) (*CancelJobProposalSpecPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + feedsSvc := r.App.GetFeedsService() + if err = feedsSvc.CancelSpec(ctx, id); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewCancelJobProposalSpecPayload(nil, err), nil + } + + return nil, err + } + + spec, err := feedsSvc.GetSpec(id) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + + specj, _ := json.Marshal(spec) + r.App.GetAuditLogger().Audit(audit.JobProposalSpecCanceled, map[string]interface{}{"spec": specj}) + + return NewCancelJobProposalSpecPayload(spec, err), nil +} + +// RejectJobProposalSpec rejects the job proposal spec. +func (r *Resolver) RejectJobProposalSpec(ctx context.Context, args struct { + ID graphql.ID +}) (*RejectJobProposalSpecPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + feedsSvc := r.App.GetFeedsService() + if err = feedsSvc.RejectSpec(ctx, id); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewRejectJobProposalSpecPayload(nil, err), nil + } + + return nil, err + } + + spec, err := feedsSvc.GetSpec(id) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + + specj, _ := json.Marshal(spec) + r.App.GetAuditLogger().Audit(audit.JobProposalSpecRejected, map[string]interface{}{"spec": specj}) + + return NewRejectJobProposalSpecPayload(spec, err), nil +} + +// UpdateJobProposalSpecDefinition updates the spec definition. +func (r *Resolver) UpdateJobProposalSpecDefinition(ctx context.Context, args struct { + ID graphql.ID + Input *struct{ Definition string } +}) (*UpdateJobProposalSpecDefinitionPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + feedsSvc := r.App.GetFeedsService() + + err = feedsSvc.UpdateSpecDefinition(ctx, id, args.Input.Definition) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewUpdateJobProposalSpecDefinitionPayload(nil, err), nil + } + + return nil, err + } + + spec, err := feedsSvc.GetSpec(id) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, err + } + } + + specj, _ := json.Marshal(spec) + r.App.GetAuditLogger().Audit(audit.JobProposalSpecUpdated, map[string]interface{}{"spec": specj}) + + return NewUpdateJobProposalSpecDefinitionPayload(spec, err), nil +} + +func (r *Resolver) UpdateUserPassword(ctx context.Context, args struct { + Input UpdatePasswordInput +}) (*UpdatePasswordPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + session, ok := webauth.GetGQLAuthenticatedSession(ctx) + if !ok { + return nil, errors.New("couldn't retrieve user session") + } + + dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email) + if err != nil { + return nil, err + } + + if !utils.CheckPasswordHash(args.Input.OldPassword, dbUser.HashedPassword) { + r.App.GetAuditLogger().Audit(audit.PasswordResetAttemptFailedMismatch, map[string]interface{}{"user": dbUser.Email}) + + return NewUpdatePasswordPayload(nil, map[string]string{ + "oldPassword": "old password does not match", + }), nil + } + + if err = r.App.AuthenticationProvider().ClearNonCurrentSessions(session.SessionID); err != nil { + return nil, clearSessionsError{} + } + + err = r.App.AuthenticationProvider().SetPassword(&dbUser, args.Input.NewPassword) + if err != nil { + return nil, failedPasswordUpdateError{} + } + + r.App.GetAuditLogger().Audit(audit.PasswordResetSuccess, map[string]interface{}{"user": dbUser.Email}) + return NewUpdatePasswordPayload(session.User, nil), nil +} + +func (r *Resolver) SetSQLLogging(ctx context.Context, args struct { + Input struct{ Enabled bool } +}) (*SetSQLLoggingPayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + r.App.GetConfig().SetLogSQL(args.Input.Enabled) + + if args.Input.Enabled { + r.App.GetAuditLogger().Audit(audit.ConfigSqlLoggingEnabled, map[string]interface{}{}) + } else { + r.App.GetAuditLogger().Audit(audit.ConfigSqlLoggingDisabled, map[string]interface{}{}) + } + + return NewSetSQLLoggingPayload(args.Input.Enabled), nil +} + +func (r *Resolver) CreateAPIToken(ctx context.Context, args struct { + Input struct{ Password string } +}) (*CreateAPITokenPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + session, ok := webauth.GetGQLAuthenticatedSession(ctx) + if !ok { + return nil, errors.New("Failed to obtain current user from context") + } + dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email) + if err != nil { + return nil, err + } + + err = r.App.AuthenticationProvider().TestPassword(dbUser.Email, args.Input.Password) + if err != nil { + r.App.GetAuditLogger().Audit(audit.APITokenCreateAttemptPasswordMismatch, map[string]interface{}{"user": dbUser.Email}) + + return NewCreateAPITokenPayload(nil, map[string]string{ + "password": "incorrect password", + }), nil + } + + newToken, err := r.App.AuthenticationProvider().CreateAndSetAuthToken(&dbUser) + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.APITokenCreated, map[string]interface{}{"user": dbUser.Email}) + return NewCreateAPITokenPayload(newToken, nil), nil +} + +func (r *Resolver) DeleteAPIToken(ctx context.Context, args struct { + Input struct{ Password string } +}) (*DeleteAPITokenPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + session, ok := webauth.GetGQLAuthenticatedSession(ctx) + if !ok { + return nil, errors.New("Failed to obtain current user from context") + } + dbUser, err := r.App.AuthenticationProvider().FindUser(session.User.Email) + if err != nil { + return nil, err + } + + err = r.App.AuthenticationProvider().TestPassword(dbUser.Email, args.Input.Password) + if err != nil { + r.App.GetAuditLogger().Audit(audit.APITokenDeleteAttemptPasswordMismatch, map[string]interface{}{"user": dbUser.Email}) + + return NewDeleteAPITokenPayload(nil, map[string]string{ + "password": "incorrect password", + }), nil + } + + err = r.App.AuthenticationProvider().DeleteAuthToken(&dbUser) + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.APITokenDeleted, map[string]interface{}{"user": dbUser.Email}) + + return NewDeleteAPITokenPayload(&auth.Token{ + AccessKey: dbUser.TokenKey.String, + }, nil), nil +} + +func (r *Resolver) CreateJob(ctx context.Context, args struct { + Input struct { + TOML string + } +}) (*CreateJobPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + jbt, err := job.ValidateSpec(args.Input.TOML) + if err != nil { + return NewCreateJobPayload(r.App, nil, map[string]string{ + "TOML spec": errors.Wrap(err, "failed to parse TOML").Error(), + }), nil + } + + var jb job.Job + config := r.App.GetConfig() + switch jbt { + case job.OffchainReporting: + jb, err = ocr.ValidatedOracleSpecToml(r.App.GetRelayers().LegacyEVMChains(), args.Input.TOML) + if !config.OCR().Enabled() { + return nil, errors.New("The Offchain Reporting feature is disabled by configuration") + } + case job.OffchainReporting2: + jb, err = validate.ValidatedOracleSpecToml(r.App.GetConfig().OCR2(), r.App.GetConfig().Insecure(), args.Input.TOML) + if !config.OCR2().Enabled() { + return nil, errors.New("The Offchain Reporting 2 feature is disabled by configuration") + } + case job.DirectRequest: + jb, err = directrequest.ValidatedDirectRequestSpec(args.Input.TOML) + case job.FluxMonitor: + jb, err = fluxmonitorv2.ValidatedFluxMonitorSpec(config.JobPipeline(), args.Input.TOML) + case job.Keeper: + jb, err = keeper.ValidatedKeeperSpec(args.Input.TOML) + case job.Cron: + jb, err = cron.ValidatedCronSpec(args.Input.TOML) + case job.VRF: + jb, err = vrfcommon.ValidatedVRFSpec(args.Input.TOML) + case job.Webhook: + jb, err = webhook.ValidatedWebhookSpec(args.Input.TOML, r.App.GetExternalInitiatorManager()) + case job.BlockhashStore: + jb, err = blockhashstore.ValidatedSpec(args.Input.TOML) + case job.BlockHeaderFeeder: + jb, err = blockheaderfeeder.ValidatedSpec(args.Input.TOML) + case job.Bootstrap: + jb, err = ocrbootstrap.ValidatedBootstrapSpecToml(args.Input.TOML) + case job.Gateway: + jb, err = gateway.ValidatedGatewaySpec(args.Input.TOML) + default: + return NewCreateJobPayload(r.App, nil, map[string]string{ + "Job Type": fmt.Sprintf("unknown job type: %s", jbt), + }), nil + } + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err = r.App.AddJobV2(ctx, &jb) + if err != nil { + return nil, err + } + + jbj, _ := json.Marshal(jb) + r.App.GetAuditLogger().Audit(audit.JobCreated, map[string]interface{}{"job": string(jbj)}) + + return NewCreateJobPayload(r.App, &jb, nil), nil +} + +func (r *Resolver) DeleteJob(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteJobPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt32(string(args.ID)) + if err != nil { + return nil, err + } + + j, err := r.App.JobORM().FindJobWithoutSpecErrors(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDeleteJobPayload(r.App, nil, err), nil + } + + return nil, err + } + + err = r.App.DeleteJob(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDeleteJobPayload(r.App, nil, err), nil + } + + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.JobDeleted, map[string]interface{}{"id": args.ID}) + return NewDeleteJobPayload(r.App, &j, nil), nil +} + +func (r *Resolver) DismissJobError(ctx context.Context, args struct { + ID graphql.ID +}) (*DismissJobErrorPayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + specErr, err := r.App.JobORM().FindSpecError(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDismissJobErrorPayload(nil, err), nil + } + + return nil, err + } + + err = r.App.JobORM().DismissError(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewDismissJobErrorPayload(nil, err), nil + } + + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.JobErrorDismissed, map[string]interface{}{"id": args.ID}) + return NewDismissJobErrorPayload(&specErr, nil), nil +} + +func (r *Resolver) RunJob(ctx context.Context, args struct { + ID graphql.ID +}) (*RunJobPayloadResolver, error) { + if err := authenticateUserCanRun(ctx); err != nil { + return nil, err + } + + jobID, err := stringutils.ToInt32(string(args.ID)) + if err != nil { + return nil, err + } + + jobRunID, err := r.App.RunJobV2(ctx, jobID, nil) + if err != nil { + if errors.Is(err, webhook.ErrJobNotExists) { + return NewRunJobPayload(nil, r.App, err), nil + } + + return nil, err + } + + plnRun, err := r.App.PipelineORM().FindRun(jobRunID) + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.JobRunSet, map[string]interface{}{"jobID": args.ID, "jobRunID": jobRunID, "planRunID": plnRun}) + return NewRunJobPayload(&plnRun, r.App, nil), nil +} + +func (r *Resolver) SetGlobalLogLevel(ctx context.Context, args struct { + Level LogLevel +}) (*SetGlobalLogLevelPayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + var lvl zapcore.Level + logLvl := FromLogLevel(args.Level) + + err := lvl.UnmarshalText([]byte(logLvl)) + if err != nil { + return NewSetGlobalLogLevelPayload("", map[string]string{ + "level": "invalid log level", + }), nil + } + + if err := r.App.SetLogLevel(lvl); err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.GlobalLogLevelSet, map[string]interface{}{"logLevel": args.Level}) + return NewSetGlobalLogLevelPayload(args.Level, nil), nil +} + +// CreateOCR2KeyBundle resolves a create OCR2 Key bundle mutation +func (r *Resolver) CreateOCR2KeyBundle(ctx context.Context, args struct { + ChainType OCR2ChainType +}) (*CreateOCR2KeyBundlePayloadResolver, error) { + if err := authenticateUserCanEdit(ctx); err != nil { + return nil, err + } + + ct := FromOCR2ChainType(args.ChainType) + + key, err := r.App.GetKeyStore().OCR2().Create(chaintype.ChainType(ct)) + if err != nil { + // Not covering the `chaintype.ErrInvalidChainType` since the GQL model would prevent a non-accepted chain-type from being received + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.OCR2KeyBundleCreated, map[string]interface{}{ + "ocrKeyID": key.ID(), + "ocrKeyChainType": key.ChainType(), + "ocrKeyConfigEncryptionPublicKey": key.ConfigEncryptionPublicKey(), + "ocrKeyOffchainPublicKey": key.OffchainPublicKey(), + "ocrKeyMaxSignatureLength": key.MaxSignatureLength(), + "ocrKeyPublicKey": key.PublicKey(), + }) + + return NewCreateOCR2KeyBundlePayload(&key), nil +} + +// DeleteOCR2KeyBundle resolves a create OCR2 Key bundle mutation +func (r *Resolver) DeleteOCR2KeyBundle(ctx context.Context, args struct { + ID graphql.ID +}) (*DeleteOCR2KeyBundlePayloadResolver, error) { + if err := authenticateUserIsAdmin(ctx); err != nil { + return nil, err + } + + id := string(args.ID) + key, err := r.App.GetKeyStore().OCR2().Get(id) + if err != nil { + return NewDeleteOCR2KeyBundlePayloadResolver(nil, err), nil + } + + err = r.App.GetKeyStore().OCR2().Delete(id) + if err != nil { + return nil, err + } + + r.App.GetAuditLogger().Audit(audit.OCR2KeyBundleDeleted, map[string]interface{}{"id": id}) + return NewDeleteOCR2KeyBundlePayloadResolver(&key, nil), nil +} diff --git a/core/web/resolver/node.go b/core/web/resolver/node.go new file mode 100644 index 00000000..6ba0881e --- /dev/null +++ b/core/web/resolver/node.go @@ -0,0 +1,139 @@ +package resolver + +import ( + "context" + "errors" + + "github.com/graph-gophers/graphql-go" + "github.com/pelletier/go-toml/v2" + + "github.com/goplugin/plugin-common/pkg/types" + + evmtoml "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" +) + +// NodeResolver resolves the Node type. +type NodeResolver struct { + node evmtoml.Node + status types.NodeStatus +} + +func NewNode(status types.NodeStatus) (nr *NodeResolver, warn error) { + nr = &NodeResolver{status: status} + warn = toml.Unmarshal([]byte(status.Config), &nr.node) + return +} + +func NewNodes(nodes []types.NodeStatus) (resolvers []*NodeResolver, warns error) { + for _, n := range nodes { + nr, warn := NewNode(n) + if warn != nil { + warns = errors.Join(warns, warn) + } + resolvers = append(resolvers, nr) + } + + return +} + +func orZero[P any](s *P) P { + if s == nil { + var zero P + return zero + } + return *s +} + +// ID resolves the node's unique identifier. +func (r *NodeResolver) ID() graphql.ID { + return graphql.ID(r.Name()) +} + +// Name resolves the node's name field. +func (r *NodeResolver) Name() string { + return orZero(r.node.Name) +} + +// WSURL resolves the node's websocket url field. +func (r *NodeResolver) WSURL() string { + if r.node.WSURL == nil { + return "" + } + return r.node.WSURL.String() +} + +// HTTPURL resolves the node's http url field. +func (r *NodeResolver) HTTPURL() string { + if r.node.HTTPURL == nil { + return "" + } + return r.node.HTTPURL.String() +} + +// State resolves the node state +func (r *NodeResolver) State() string { + return r.status.State +} + +// SendOnly resolves the node's sendOnly bool +func (r *NodeResolver) SendOnly() bool { + return orZero(r.node.SendOnly) +} + +// Order resolves the node's order field +func (r *NodeResolver) Order() *int32 { + return r.node.Order +} + +// Chain resolves the node's chain object field. +func (r *NodeResolver) Chain(ctx context.Context) (*ChainResolver, error) { + chain, err := loader.GetChainByID(ctx, r.status.ChainID) + if err != nil { + return nil, err + } + + return NewChain(*chain), nil +} + +// -- Node Query -- + +type NodePayloadResolver struct { + nr *NodeResolver + NotFoundErrorUnionType +} + +func NewNodePayloadResolver(node *types.NodeStatus, err error) (npr *NodePayloadResolver, warn error) { + e := NotFoundErrorUnionType{err: err, message: "node not found", isExpectedErrorFn: nil} + npr = &NodePayloadResolver{NotFoundErrorUnionType: e} + if node != nil { + npr.nr, warn = NewNode(*node) + } + return +} + +// ToNode resolves the Node object to be returned if it is found +func (r *NodePayloadResolver) ToNode() (*NodeResolver, bool) { + return r.nr, r.nr != nil +} + +// -- Nodes Query -- + +type NodesPayloadResolver struct { + nrs []*NodeResolver + total int32 +} + +func NewNodesPayload(nodes []types.NodeStatus, total int32) (npr *NodesPayloadResolver, warn error) { + npr = &NodesPayloadResolver{total: total} + npr.nrs, warn = NewNodes(nodes) + return +} + +func (r *NodesPayloadResolver) Results() []*NodeResolver { + return r.nrs +} + +func (r *NodesPayloadResolver) Metadata() *PaginationMetadataResolver { + return NewPaginationMetadata(r.total) +} diff --git a/core/web/resolver/node_test.go b/core/web/resolver/node_test.go new file mode 100644 index 00000000..b00cfabf --- /dev/null +++ b/core/web/resolver/node_test.go @@ -0,0 +1,165 @@ +package resolver + +import ( + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/types" + pluginmocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + "github.com/goplugin/pluginv3.0/v2/core/web/testutils" +) + +func TestResolver_Nodes(t *testing.T) { + t.Parallel() + + var ( + query = ` + query GetNodes { + nodes { + results { + id + name + chain { + id + } + } + metadata { + total + } + } + }` + ) + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "nodes"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{ + Nodes: []types.NodeStatus{ + { + ChainID: "1", + Name: "node-name", + Config: "Name='node-name'\nOrder=11\nHTTPURL='http://some-url'\nWSURL='ws://some-url'", + State: "alive", + }, + }, + Relayers: []loop.Relayer{ + testutils.MockRelayer{ChainStatus: types.ChainStatus{ + ID: "1", + Enabled: true, + Config: "", + }}, + }, + }) + + }, + query: query, + result: ` + { + "nodes": { + "results": [{ + "id": "node-name", + "name": "node-name", + "chain": { + "id": "1" + } + }], + "metadata": { + "total": 1 + } + } + }`, + }, + { + name: "generic error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.relayerChainInterops.NodesErr = gError + f.App.On("GetRelayers").Return(f.Mocks.relayerChainInterops) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"nodes"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func Test_NodeQuery(t *testing.T) { + t.Parallel() + + query := ` + query GetNode { + node(id: "node-name") { + ... on Node { + name + wsURL + httpURL + order + } + ... on NotFoundError { + message + code + } + } + }` + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "node"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{ + testutils.MockRelayer{NodeStatuses: []types.NodeStatus{ + { + Name: "node-name", + Config: "Name='node-name'\nOrder=11\nHTTPURL='http://some-url'\nWSURL='ws://some-url'", + }, + }}, + }}) + }, + query: query, + result: ` + { + "node": { + "name": "node-name", + "wsURL": "ws://some-url", + "httpURL": "http://some-url", + "order": 11 + } + }`, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("GetRelayers").Return(&pluginmocks.FakeRelayerChainInteroperators{Relayers: []loop.Relayer{}}) + }, + query: query, + result: ` + { + "node": { + "message": "node not found", + "code": "NOT_FOUND" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/ocr.go b/core/web/resolver/ocr.go new file mode 100644 index 00000000..813f83c5 --- /dev/null +++ b/core/web/resolver/ocr.go @@ -0,0 +1,117 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +type OCRKeyBundleResolver struct { + key ocrkey.KeyV2 +} + +func NewOCRKeyBundleResolver(key ocrkey.KeyV2) OCRKeyBundleResolver { + return OCRKeyBundleResolver{key: key} +} + +func (k OCRKeyBundleResolver) ID() graphql.ID { + return graphql.ID(k.key.ID()) +} + +func (k OCRKeyBundleResolver) ConfigPublicKey() string { + return ocrkey.ConfigPublicKey(k.key.PublicKeyConfig()).String() +} + +func (k OCRKeyBundleResolver) OffChainPublicKey() string { + return k.key.OffChainSigning.PublicKey().String() +} + +func (k OCRKeyBundleResolver) OnChainSigningAddress() string { + return k.key.OnChainSigning.Address().String() +} + +type OCRKeyBundlesPayloadResolver struct { + keys []ocrkey.KeyV2 +} + +func NewOCRKeyBundlesPayloadResolver(keys []ocrkey.KeyV2) *OCRKeyBundlesPayloadResolver { + return &OCRKeyBundlesPayloadResolver{keys: keys} +} + +func (r *OCRKeyBundlesPayloadResolver) Results() []OCRKeyBundleResolver { + var bundles []OCRKeyBundleResolver + for _, k := range r.keys { + bundles = append(bundles, NewOCRKeyBundleResolver(k)) + } + return bundles +} + +// -- CreateOCRKeyBundle Mutation -- + +type CreateOCRKeyBundlePayloadResolver struct { + key *ocrkey.KeyV2 +} + +func NewCreateOCRKeyBundlePayload(key *ocrkey.KeyV2) *CreateOCRKeyBundlePayloadResolver { + return &CreateOCRKeyBundlePayloadResolver{key: key} +} + +func (r *CreateOCRKeyBundlePayloadResolver) ToCreateOCRKeyBundleSuccess() (*CreateOCRKeyBundleSuccessResolver, bool) { + if r.key != nil { + return NewCreateOCRKeyBundleSuccess(r.key), true + } + + return nil, false +} + +type CreateOCRKeyBundleSuccessResolver struct { + key *ocrkey.KeyV2 +} + +func NewCreateOCRKeyBundleSuccess(key *ocrkey.KeyV2) *CreateOCRKeyBundleSuccessResolver { + return &CreateOCRKeyBundleSuccessResolver{key: key} +} + +func (r *CreateOCRKeyBundleSuccessResolver) Bundle() *OCRKeyBundleResolver { + return &OCRKeyBundleResolver{key: *r.key} +} + +// -- Delete -- + +type DeleteOCRKeyBundleSuccessResolver struct { + key ocrkey.KeyV2 +} + +func NewDeleteOCRKeyBundleSuccessResolver(key ocrkey.KeyV2) *DeleteOCRKeyBundleSuccessResolver { + return &DeleteOCRKeyBundleSuccessResolver{key: key} +} + +func (r *DeleteOCRKeyBundleSuccessResolver) Bundle() OCRKeyBundleResolver { + return OCRKeyBundleResolver{key: r.key} +} + +type DeleteOCRKeyBundlePayloadResolver struct { + key ocrkey.KeyV2 + NotFoundErrorUnionType +} + +func NewDeleteOCRKeyBundlePayloadResolver(key ocrkey.KeyV2, err error) *DeleteOCRKeyBundlePayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.As(err, &keystore.KeyNotFoundError{}) + }} + } + + return &DeleteOCRKeyBundlePayloadResolver{key: key, NotFoundErrorUnionType: e} +} + +func (r *DeleteOCRKeyBundlePayloadResolver) ToDeleteOCRKeyBundleSuccess() (*DeleteOCRKeyBundleSuccessResolver, bool) { + if r.err == nil { + return NewDeleteOCRKeyBundleSuccessResolver(r.key), true + } + return nil, false +} diff --git a/core/web/resolver/ocr2_keys.go b/core/web/resolver/ocr2_keys.go new file mode 100644 index 00000000..759fe30e --- /dev/null +++ b/core/web/resolver/ocr2_keys.go @@ -0,0 +1,208 @@ +package resolver + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" +) + +// OCR2ChainType defines OCR2 Chain Types accepted on this resolver +type OCR2ChainType string + +// These constants map to the enum type OCR2ChainType in ocr2_keys.graphql +const ( + // OCR2ChainTypeEVM defines OCR2 EVM Chain Type + OCR2ChainTypeEVM = "EVM" + // OCR2ChainTypeCosmos defines OCR2 Cosmos Chain Type + OCR2ChainTypeCosmos = "COSMOS" + // OCR2ChainTypeSolana defines OCR2 Solana Chain Type + OCR2ChainTypeSolana = "SOLANA" + // OCR2ChainTypeStarkNet defines OCR2 StarkNet Chain Type + OCR2ChainTypeStarkNet = "STARKNET" +) + +// ToOCR2ChainType turns a valid string into a OCR2ChainType +func ToOCR2ChainType(s string) (OCR2ChainType, error) { + switch s { + case string(chaintype.EVM): + return OCR2ChainTypeEVM, nil + case string(chaintype.Cosmos): + return OCR2ChainTypeCosmos, nil + case string(chaintype.Solana): + return OCR2ChainTypeSolana, nil + case string(chaintype.StarkNet): + return OCR2ChainTypeStarkNet, nil + default: + return "", errors.New("unknown ocr2 chain type") + } +} + +// FromOCR2ChainType returns the string (lowercased) value from a OCR2ChainType +func FromOCR2ChainType(ct OCR2ChainType) string { + switch ct { + case OCR2ChainTypeEVM: + return string(chaintype.EVM) + case OCR2ChainTypeCosmos: + return string(chaintype.Cosmos) + case OCR2ChainTypeSolana: + return string(chaintype.Solana) + case OCR2ChainTypeStarkNet: + return string(chaintype.StarkNet) + default: + return strings.ToLower(string(ct)) + } +} + +// OCR2KeyBundleResolver defines the OCR2 Key bundle on GQL +type OCR2KeyBundleResolver struct { + key ocr2key.KeyBundle +} + +// NewOCR2KeyBundle creates a new GQL OCR2 key bundle resolver +func NewOCR2KeyBundle(key ocr2key.KeyBundle) *OCR2KeyBundleResolver { + return &OCR2KeyBundleResolver{key: key} +} + +// ID returns the OCR2 Key bundle ID +func (r OCR2KeyBundleResolver) ID() graphql.ID { + return graphql.ID(r.key.ID()) +} + +// ChainType returns the OCR2 Key bundle chain type +func (r OCR2KeyBundleResolver) ChainType() *OCR2ChainType { + ct, err := ToOCR2ChainType(string(r.key.ChainType())) + if err != nil { + return nil + } + + return &ct +} + +// OnChainPublicKey returns the OCR2 Key bundle on-chain public key +func (r OCR2KeyBundleResolver) OnChainPublicKey() string { + return fmt.Sprintf("ocr2on_%s_%s", r.key.ChainType(), r.key.OnChainPublicKey()) +} + +// OffChainPublicKey returns the OCR2 Key bundle off-chain public key +func (r OCR2KeyBundleResolver) OffChainPublicKey() string { + pubKey := r.key.OffchainPublicKey() + return fmt.Sprintf("ocr2off_%s_%s", r.key.ChainType(), hex.EncodeToString(pubKey[:])) +} + +// ConfigPublicKey returns the OCR2 Key bundle config public key +func (r OCR2KeyBundleResolver) ConfigPublicKey() string { + configPublic := r.key.ConfigEncryptionPublicKey() + return fmt.Sprintf("ocr2cfg_%s_%s", r.key.ChainType(), hex.EncodeToString(configPublic[:])) +} + +// -- OCR2KeyBundles Query -- + +// OCR2KeyBundlesPayloadResolver defines the OCR2 Key bundles query resolver +type OCR2KeyBundlesPayloadResolver struct { + keys []ocr2key.KeyBundle +} + +// NewOCR2KeyBundlesPayload returns the OCR2 key bundles resolver +func NewOCR2KeyBundlesPayload(keys []ocr2key.KeyBundle) *OCR2KeyBundlesPayloadResolver { + return &OCR2KeyBundlesPayloadResolver{keys: keys} +} + +// Results resolves the list of OCR2 key bundles +func (r *OCR2KeyBundlesPayloadResolver) Results() []OCR2KeyBundleResolver { + var results []OCR2KeyBundleResolver + + for _, k := range r.keys { + results = append(results, *NewOCR2KeyBundle(k)) + } + + return results +} + +// -- CreateOCR2KeyBundle Mutation -- + +// CreateOCR2KeyBundlePayloadResolver defines the create OCR2 Key bundle mutation resolver +type CreateOCR2KeyBundlePayloadResolver struct { + key *ocr2key.KeyBundle +} + +// NewCreateOCR2KeyBundlePayload returns the create OCR2 key bundle resolver +func NewCreateOCR2KeyBundlePayload(key *ocr2key.KeyBundle) *CreateOCR2KeyBundlePayloadResolver { + return &CreateOCR2KeyBundlePayloadResolver{key: key} +} + +// ToCreateOCR2KeyBundleSuccess resolves the create OCR2 key bundle success +func (r *CreateOCR2KeyBundlePayloadResolver) ToCreateOCR2KeyBundleSuccess() (*CreateOCR2KeyBundleSuccessResolver, bool) { + if r.key == nil { + return nil, false + } + + return NewCreateOCR2KeyBundleSuccess(r.key), true +} + +// CreateOCR2KeyBundleSuccessResolver defines the create OCR2 key bundle success resolver +type CreateOCR2KeyBundleSuccessResolver struct { + key *ocr2key.KeyBundle +} + +// NewCreateOCR2KeyBundleSuccess returns the create OCR2 key bundle success resolver +func NewCreateOCR2KeyBundleSuccess(key *ocr2key.KeyBundle) *CreateOCR2KeyBundleSuccessResolver { + return &CreateOCR2KeyBundleSuccessResolver{key: key} +} + +// Bundle resolves the creates OCR2 key bundle +func (r *CreateOCR2KeyBundleSuccessResolver) Bundle() *OCR2KeyBundleResolver { + return NewOCR2KeyBundle(*r.key) +} + +// -- DeleteOCR2KeyBundle mutation -- + +// DeleteOCR2KeyBundlePayloadResolver defines the delete OCR2 Key bundle mutation resolver +type DeleteOCR2KeyBundlePayloadResolver struct { + key *ocr2key.KeyBundle + NotFoundErrorUnionType +} + +// NewDeleteOCR2KeyBundlePayloadResolver returns the delete OCR2 key bundle payload resolver +func NewDeleteOCR2KeyBundlePayloadResolver(key *ocr2key.KeyBundle, err error) *DeleteOCR2KeyBundlePayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + // returning true since the only error triggered by the search is a not found error + // and we don't want the default check to happen, since it is a SQL Not Found error check + return true + }} + } + + return &DeleteOCR2KeyBundlePayloadResolver{key: key, NotFoundErrorUnionType: e} +} + +// ToDeleteOCR2KeyBundleSuccess resolves the delete OCR2 key bundle success +func (r *DeleteOCR2KeyBundlePayloadResolver) ToDeleteOCR2KeyBundleSuccess() (*DeleteOCR2KeyBundleSuccessResolver, bool) { + if r.err == nil { + return NewDeleteOCR2KeyBundleSuccessResolver(r.key), true + } + + return nil, false +} + +// DeleteOCR2KeyBundleSuccessResolver defines the delete OCR2 key bundle success resolver +type DeleteOCR2KeyBundleSuccessResolver struct { + key *ocr2key.KeyBundle +} + +// NewDeleteOCR2KeyBundleSuccessResolver returns the delete OCR2 key bundle success resolver +func NewDeleteOCR2KeyBundleSuccessResolver(key *ocr2key.KeyBundle) *DeleteOCR2KeyBundleSuccessResolver { + return &DeleteOCR2KeyBundleSuccessResolver{key: key} +} + +// Bundle resolves the creates OCR2 key bundle +func (r *DeleteOCR2KeyBundleSuccessResolver) Bundle() *OCR2KeyBundleResolver { + return NewOCR2KeyBundle(*r.key) +} diff --git a/core/web/resolver/ocr2_keys_test.go b/core/web/resolver/ocr2_keys_test.go new file mode 100644 index 00000000..3bb1fa60 --- /dev/null +++ b/core/web/resolver/ocr2_keys_test.go @@ -0,0 +1,292 @@ +package resolver + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocr2key" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolver_GetOCR2KeyBundles(t *testing.T) { + t.Parallel() + + query := ` + query GetOCR2KeyBundles { + ocr2KeyBundles { + results { + id + chainType + configPublicKey + offChainPublicKey + onChainPublicKey + } + } + } + ` + + gError := errors.New("error") + fakeKeys := []ocr2key.KeyBundle{ + ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "evm"), + ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "cosmos"), + ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "solana"), + ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "starknet"), + } + expectedBundles := []map[string]interface{}{} + for _, k := range fakeKeys { + configPublic := k.ConfigEncryptionPublicKey() + ct, err := ToOCR2ChainType(string(k.ChainType())) + require.NoError(t, err) + pubKey := k.OffchainPublicKey() + expectedBundles = append(expectedBundles, map[string]interface{}{ + "id": k.ID(), + "chainType": ct, + "onChainPublicKey": fmt.Sprintf("ocr2on_%s_%s", k.ChainType(), k.OnChainPublicKey()), + "configPublicKey": fmt.Sprintf("ocr2cfg_%s_%s", k.ChainType(), hex.EncodeToString(configPublic[:])), + "offChainPublicKey": fmt.Sprintf("ocr2off_%s_%s", k.ChainType(), hex.EncodeToString(pubKey[:])), + }) + } + + d, err := json.Marshal(map[string]interface{}{ + "ocr2KeyBundles": map[string]interface{}{ + "results": expectedBundles, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "ocr2KeyBundles"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("GetAll").Return(fakeKeys, nil) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expected, + }, + { + name: "generic error on GetAll()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("GetAll").Return(nil, gError) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"ocr2KeyBundles"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_CreateOCR2KeyBundle(t *testing.T) { + t.Parallel() + + mutation := ` + mutation CreateOCR2KeyBundle($chainType: OCR2ChainType!) { + createOCR2KeyBundle(chainType: $chainType) { + ... on CreateOCR2KeyBundleSuccess { + bundle { + id + chainType + configPublicKey + offChainPublicKey + onChainPublicKey + } + } + } + } + ` + + gError := errors.New("error") + fakeKey := ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "evm") + ct, err := ToOCR2ChainType(string(fakeKey.ChainType())) + require.NoError(t, err) + + configPublic := fakeKey.ConfigEncryptionPublicKey() + pubKey := fakeKey.OffchainPublicKey() + d, err := json.Marshal(map[string]interface{}{ + "createOCR2KeyBundle": map[string]interface{}{ + "bundle": map[string]interface{}{ + "id": fakeKey.ID(), + "chainType": ct, + "onChainPublicKey": fmt.Sprintf("ocr2on_%s_%s", fakeKey.ChainType(), fakeKey.OnChainPublicKey()), + "configPublicKey": fmt.Sprintf("ocr2cfg_%s_%s", fakeKey.ChainType(), hex.EncodeToString(configPublic[:])), + "offChainPublicKey": fmt.Sprintf("ocr2off_%s_%s", fakeKey.ChainType(), hex.EncodeToString(pubKey[:])), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + variables := map[string]interface{}{ + "chainType": OCR2ChainTypeEVM, + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "createOCR2KeyBundle"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("Create", chaintype.ChainType("evm")).Return(fakeKey, nil) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "generic error on Create()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("Create", chaintype.ChainType("evm")).Return(nil, gError) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"createOCR2KeyBundle"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DeleteOCR2KeyBundle(t *testing.T) { + t.Parallel() + + fakeKey := ocr2key.MustNewInsecure(keystest.NewRandReaderFromSeed(1), "evm") + + mutation := ` + mutation DeleteOCR2KeyBundle($id: ID!) { + deleteOCR2KeyBundle(id: $id) { + ... on DeleteOCR2KeyBundleSuccess { + bundle { + id + chainType + configPublicKey + offChainPublicKey + onChainPublicKey + } + } + ... on NotFoundError { + message + code + } + } + } + ` + variables := map[string]interface{}{ + "id": fakeKey.ID(), + } + + ct, err := ToOCR2ChainType(string(fakeKey.ChainType())) + require.NoError(t, err) + + configPublic := fakeKey.ConfigEncryptionPublicKey() + pubKey := fakeKey.OffchainPublicKey() + d, err := json.Marshal(map[string]interface{}{ + "deleteOCR2KeyBundle": map[string]interface{}{ + "bundle": map[string]interface{}{ + "id": fakeKey.ID(), + "chainType": ct, + "onChainPublicKey": fmt.Sprintf("ocr2on_%s_%s", fakeKey.ChainType(), fakeKey.OnChainPublicKey()), + "configPublicKey": fmt.Sprintf("ocr2cfg_%s_%s", fakeKey.ChainType(), hex.EncodeToString(configPublic[:])), + "offChainPublicKey": fmt.Sprintf("ocr2off_%s_%s", fakeKey.ChainType(), hex.EncodeToString(pubKey[:])), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteOCR2KeyBundle"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("Delete", fakeKey.ID()).Return(nil) + f.Mocks.ocr2.On("Get", fakeKey.ID()).Return(fakeKey, nil) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("Get", fakeKey.ID()).Return(fakeKey, gError) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: `{ + "deleteOCR2KeyBundle": { + "code": "NOT_FOUND", + "message": "error" + } + }`, + }, + { + name: "generic error on Delete()", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr2.On("Delete", fakeKey.ID()).Return(gError) + f.Mocks.ocr2.On("Get", fakeKey.ID()).Return(fakeKey, nil) + f.Mocks.keystore.On("OCR2").Return(f.Mocks.ocr2) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"deleteOCR2KeyBundle"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/ocr_test.go b/core/web/resolver/ocr_test.go new file mode 100644 index 00000000..a963320b --- /dev/null +++ b/core/web/resolver/ocr_test.go @@ -0,0 +1,196 @@ +package resolver + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ocrkey" +) + +func TestResolver_GetOCRKeyBundles(t *testing.T) { + t.Parallel() + + query := ` + query GetOCRKeyBundles { + ocrKeyBundles { + results { + id + configPublicKey + offChainPublicKey + onChainSigningAddress + } + } + } + ` + + fakeKeys := []ocrkey.KeyV2{} + expectedBundles := []map[string]string{} + for i := 0; i < 2; i++ { + k := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + fakeKeys = append(fakeKeys, k) + expectedBundles = append(expectedBundles, map[string]string{ + "id": k.ID(), + "configPublicKey": ocrkey.ConfigPublicKey(k.PublicKeyConfig()).String(), + "offChainPublicKey": k.OffChainSigning.PublicKey().String(), + "onChainSigningAddress": k.OnChainSigning.Address().String(), + }) + } + + d, err := json.Marshal(map[string]interface{}{ + "ocrKeyBundles": map[string]interface{}{ + "results": expectedBundles, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "ocrKeyBundles"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr.On("GetAll").Return(fakeKeys, nil) + f.Mocks.keystore.On("OCR").Return(f.Mocks.ocr) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_OCRCreateBundle(t *testing.T) { + t.Parallel() + + mutation := ` + mutation CreateOCRKeyBundle { + createOCRKeyBundle { + ... on CreateOCRKeyBundleSuccess { + bundle { + id + configPublicKey + offChainPublicKey + onChainSigningAddress + } + } + } + } + ` + + fakeKey := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + d, err := json.Marshal(map[string]interface{}{ + "createOCRKeyBundle": map[string]interface{}{ + "bundle": map[string]interface{}{ + "id": fakeKey.ID(), + "configPublicKey": ocrkey.ConfigPublicKey(fakeKey.PublicKeyConfig()).String(), + "offChainPublicKey": fakeKey.OffChainSigning.PublicKey().String(), + "onChainSigningAddress": fakeKey.OnChainSigning.Address().String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation}, "createOCRKeyBundle"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr.On("Create").Return(fakeKey, nil) + f.Mocks.keystore.On("OCR").Return(f.Mocks.ocr) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_OCRDeleteBundle(t *testing.T) { + t.Parallel() + + fakeKey := ocrkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + mutation := ` + mutation DeleteOCRKeyBundle($id: ID!) { + deleteOCRKeyBundle(id: $id) { + ... on DeleteOCRKeyBundleSuccess { + bundle { + id + configPublicKey + offChainPublicKey + onChainSigningAddress + } + } + ... on NotFoundError { + message + code + } + } + } + ` + variables := map[string]interface{}{ + "id": fakeKey.ID(), + } + + d, err := json.Marshal(map[string]interface{}{ + "deleteOCRKeyBundle": map[string]interface{}{ + "bundle": map[string]interface{}{ + "id": fakeKey.ID(), + "configPublicKey": ocrkey.ConfigPublicKey(fakeKey.PublicKeyConfig()).String(), + "offChainPublicKey": fakeKey.OffChainSigning.PublicKey().String(), + "onChainSigningAddress": fakeKey.OnChainSigning.Address().String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteOCRKeyBundle"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr.On("Delete", fakeKey.ID()).Return(fakeKey, nil) + f.Mocks.keystore.On("OCR").Return(f.Mocks.ocr) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.ocr. + On("Delete", fakeKey.ID()). + Return(ocrkey.KeyV2{}, keystore.KeyNotFoundError{ID: "helloWorld", KeyType: "OCR"}) + f.Mocks.keystore.On("OCR").Return(f.Mocks.ocr) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: `{ + "deleteOCRKeyBundle": { + "code":"NOT_FOUND", + "message":"unable to find OCR key with id helloWorld" + } + }`, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/p2p.go b/core/web/resolver/p2p.go new file mode 100644 index 00000000..927bc048 --- /dev/null +++ b/core/web/resolver/p2p.go @@ -0,0 +1,115 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +type P2PKeyResolver struct { + key p2pkey.KeyV2 +} + +func NewP2PKey(key p2pkey.KeyV2) P2PKeyResolver { + return P2PKeyResolver{key: key} +} + +func (k P2PKeyResolver) ID() graphql.ID { + return graphql.ID(k.key.ID()) +} + +func (k P2PKeyResolver) PeerID() string { + return k.key.PeerID().String() +} + +func (k P2PKeyResolver) PublicKey() string { + return k.key.PublicKeyHex() +} + +// -- P2PKeys Query -- + +type P2PKeysPayloadResolver struct { + keys []p2pkey.KeyV2 +} + +func NewP2PKeysPayload(keys []p2pkey.KeyV2) *P2PKeysPayloadResolver { + return &P2PKeysPayloadResolver{keys: keys} +} + +func (r *P2PKeysPayloadResolver) Results() []P2PKeyResolver { + var results []P2PKeyResolver + for _, k := range r.keys { + results = append(results, NewP2PKey(k)) + } + return results +} + +// -- CreateP2PKey Mutation -- + +type CreateP2PKeySuccessResolver struct { + key p2pkey.KeyV2 +} + +func NewCreateP2PKeySuccess(key p2pkey.KeyV2) *CreateP2PKeySuccessResolver { + return &CreateP2PKeySuccessResolver{key: key} +} + +func (r *CreateP2PKeySuccessResolver) P2PKey() P2PKeyResolver { + return NewP2PKey(r.key) +} + +type CreateP2PKeyPayloadResolver struct { + p2pKey p2pkey.KeyV2 +} + +func NewCreateP2PKeyPayload(key p2pkey.KeyV2) *CreateP2PKeyPayloadResolver { + return &CreateP2PKeyPayloadResolver{p2pKey: key} +} + +func (r *CreateP2PKeyPayloadResolver) P2PKey() P2PKeyResolver { + return NewP2PKey(r.p2pKey) +} + +func (r *CreateP2PKeyPayloadResolver) ToCreateP2PKeySuccess() (*CreateP2PKeySuccessResolver, bool) { + return NewCreateP2PKeySuccess(r.p2pKey), true +} + +// -- DeleteP2PKey Mutation -- + +type DeleteP2PKeySuccessResolver struct { + p2pKey p2pkey.KeyV2 +} + +func NewDeleteP2PKeySuccess(p2pKey p2pkey.KeyV2) *DeleteP2PKeySuccessResolver { + return &DeleteP2PKeySuccessResolver{p2pKey: p2pKey} +} + +func (r *DeleteP2PKeySuccessResolver) P2PKey() P2PKeyResolver { + return NewP2PKey(r.p2pKey) +} + +type DeleteP2PKeyPayloadResolver struct { + p2pKey p2pkey.KeyV2 + NotFoundErrorUnionType +} + +func NewDeleteP2PKeyPayload(p2pKey p2pkey.KeyV2, err error) *DeleteP2PKeyPayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.As(err, &keystore.KeyNotFoundError{}) + }} + } + + return &DeleteP2PKeyPayloadResolver{p2pKey: p2pKey, NotFoundErrorUnionType: e} +} + +func (r *DeleteP2PKeyPayloadResolver) ToDeleteP2PKeySuccess() (*DeleteP2PKeySuccessResolver, bool) { + if r.err == nil { + return NewDeleteP2PKeySuccess(r.p2pKey), true + } + return nil, false +} diff --git a/core/web/resolver/p2p_test.go b/core/web/resolver/p2p_test.go new file mode 100644 index 00000000..87387a81 --- /dev/null +++ b/core/web/resolver/p2p_test.go @@ -0,0 +1,199 @@ +package resolver + +import ( + "encoding/json" + "fmt" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/p2pkey" +) + +func TestResolver_GetP2PKeys(t *testing.T) { + t.Parallel() + + query := ` + query GetP2PKeys { + p2pKeys { + results { + id + peerID + publicKey + } + } + } + ` + + fakeKeys := []p2pkey.KeyV2{} + expectedKeys := []map[string]string{} + for i := 0; i < 2; i++ { + k := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + fakeKeys = append(fakeKeys, k) + expectedKeys = append(expectedKeys, map[string]string{ + "id": k.ID(), + "peerID": k.PeerID().String(), + "publicKey": k.PublicKeyHex(), + }) + } + + d, err := json.Marshal(map[string]interface{}{ + "p2pKeys": map[string]interface{}{ + "results": expectedKeys, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "p2pKeys"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.p2p.On("GetAll").Return(fakeKeys, nil) + f.Mocks.keystore.On("P2P").Return(f.Mocks.p2p) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_CreateP2PKey(t *testing.T) { + t.Parallel() + + query := ` + mutation CreateP2PKey { + createP2PKey { + ... on CreateP2PKeySuccess { + p2pKey { + id + peerID + publicKey + } + } + } + } + ` + + fakeKey := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + d, err := json.Marshal(map[string]interface{}{ + "createP2PKey": map[string]interface{}{ + "p2pKey": map[string]interface{}{ + "id": fakeKey.ID(), + "peerID": fakeKey.PeerID().String(), + "publicKey": fakeKey.PublicKeyHex(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "createP2PKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.p2p.On("Create").Return(fakeKey, nil) + f.Mocks.keystore.On("P2P").Return(f.Mocks.p2p) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DeleteP2PKey(t *testing.T) { + t.Parallel() + + fakeKey := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + query := ` + mutation DeleteP2PKey($id: ID!) { + deleteP2PKey(id: $id) { + ... on DeleteP2PKeySuccess { + p2pKey { + id + peerID + publicKey + } + } + + ... on NotFoundError { + message + code + } + } + } + ` + + variables := map[string]interface{}{ + "id": fakeKey.ID(), + } + + peerID, err := p2pkey.MakePeerID(fakeKey.ID()) + assert.NoError(t, err) + + d, err := json.Marshal(map[string]interface{}{ + "deleteP2PKey": map[string]interface{}{ + "p2pKey": map[string]interface{}{ + "id": fakeKey.ID(), + "peerID": fakeKey.PeerID().String(), + "publicKey": fakeKey.PublicKeyHex(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query, variables: variables}, "deleteP2PKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.p2p.On("Delete", peerID).Return(fakeKey, nil) + f.Mocks.keystore.On("P2P").Return(f.Mocks.p2p) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + variables: variables, + result: expected, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.p2p. + On("Delete", peerID). + Return( + p2pkey.KeyV2{}, + keystore.KeyNotFoundError{ID: peerID.String(), KeyType: "P2P"}, + ) + f.Mocks.keystore.On("P2P").Return(f.Mocks.p2p) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + variables: variables, + result: fmt.Sprintf(`{ + "deleteP2PKey": { + "code":"NOT_FOUND", + "message":"unable to find P2P key with id %s" + } + }`, peerID.String()), + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/pagination.go b/core/web/resolver/pagination.go new file mode 100644 index 00000000..9915ca43 --- /dev/null +++ b/core/web/resolver/pagination.go @@ -0,0 +1,13 @@ +package resolver + +type PaginationMetadataResolver struct { + total int32 +} + +func NewPaginationMetadata(total int32) *PaginationMetadataResolver { + return &PaginationMetadataResolver{total: total} +} + +func (r *PaginationMetadataResolver) Total() int32 { + return r.total +} diff --git a/core/web/resolver/plugins.go b/core/web/resolver/plugins.go new file mode 100644 index 00000000..e79bb420 --- /dev/null +++ b/core/web/resolver/plugins.go @@ -0,0 +1,29 @@ +package resolver + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/feeds" +) + +type PluginsResolver struct { + plugins feeds.Plugins +} + +// Commit returns the the status of the commit plugin. +func (r PluginsResolver) Commit() bool { + return r.plugins.Commit +} + +// Execute returns the the status of the execute plugin. +func (r PluginsResolver) Execute() bool { + return r.plugins.Execute +} + +// Median returns the the status of the median plugin. +func (r PluginsResolver) Median() bool { + return r.plugins.Median +} + +// Mercury returns the the status of the mercury plugin. +func (r PluginsResolver) Mercury() bool { + return r.plugins.Mercury +} diff --git a/core/web/resolver/query.go b/core/web/resolver/query.go new file mode 100644 index 00000000..25321d1e --- /dev/null +++ b/core/web/resolver/query.go @@ -0,0 +1,578 @@ +package resolver + +import ( + "context" + "database/sql" + "fmt" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + evmrelay "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" +) + +// Bridge retrieves a bridges by name. +func (r *Resolver) Bridge(ctx context.Context, args struct{ ID graphql.ID }) (*BridgePayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + name, err := bridges.ParseBridgeName(string(args.ID)) + if err != nil { + return nil, err + } + + bridge, err := r.App.BridgeORM().FindBridge(name) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewBridgePayload(bridge, err), nil + } + + return nil, err + } + + return NewBridgePayload(bridge, nil), nil +} + +// Bridges retrieves a paginated list of bridges. +func (r *Resolver) Bridges(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*BridgesPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + brdgs, count, err := r.App.BridgeORM().BridgeTypes(offset, limit) + if err != nil { + return nil, err + } + + return NewBridgesPayload(brdgs, int32(count)), nil +} + +// Chain retrieves a chain by id. +func (r *Resolver) Chain(ctx context.Context, args struct{ ID graphql.ID }) (*ChainPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + cs, _, err := r.App.EVMORM().Chains(relay.ChainID(args.ID)) + if err != nil { + return nil, err + } + l := len(cs) + if l == 0 { + return NewChainPayload(types.ChainStatus{}, chains.ErrNotFound), nil + } + if l > 1 { + return nil, fmt.Errorf("multiple chains found: %d", len(cs)) + } + return NewChainPayload(cs[0], nil), nil +} + +// Chains retrieves a paginated list of chains. +func (r *Resolver) Chains(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*ChainsPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + var chains []types.ChainStatus + for _, rel := range r.App.GetRelayers().Slice() { + status, err := rel.GetChainStatus(ctx) + if err != nil { + return nil, err + } + chains = append(chains, status) + } + count := len(chains) + + if count == 0 { + //No chains are configured, return an empty ChainsPayload, so we don't break the UI + return NewChainsPayload(nil, 0), nil + } + + // bound the chain results + if offset >= len(chains) { + return nil, fmt.Errorf("offset %d out of range", offset) + } + end := len(chains) + if limit > 0 && offset+limit < end { + end = offset + limit + } + + return NewChainsPayload(chains[offset:end], int32(count)), nil +} + +// FeedsManager retrieves a feeds manager by id. +func (r *Resolver) FeedsManager(ctx context.Context, args struct{ ID graphql.ID }) (*FeedsManagerPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + mgr, err := r.App.GetFeedsService().GetManager(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewFeedsManagerPayload(nil, err), nil + } + + return nil, err + } + + return NewFeedsManagerPayload(mgr, nil), nil +} + +func (r *Resolver) FeedsManagers(ctx context.Context) (*FeedsManagersPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + mgrs, err := r.App.GetFeedsService().ListManagers() + if err != nil { + return nil, err + } + + return NewFeedsManagersPayload(mgrs), nil +} + +// Job retrieves a job by id. +func (r *Resolver) Job(ctx context.Context, args struct{ ID graphql.ID }) (*JobPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt32(string(args.ID)) + if err != nil { + return nil, err + } + + j, err := r.App.JobORM().FindJobWithoutSpecErrors(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewJobPayload(r.App, nil, err), nil + } + + //We still need to show the job in UI/CLI even if the chain id is disabled + if errors.Is(err, chains.ErrNoSuchChainID) { + return NewJobPayload(r.App, &j, err), nil + } + + return nil, err + } + + return NewJobPayload(r.App, &j, nil), nil +} + +// Jobs fetches a paginated list of jobs +func (r *Resolver) Jobs(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*JobsPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + jobs, count, err := r.App.JobORM().FindJobs(offset, limit) + if err != nil { + return nil, err + } + + return NewJobsPayload(r.App, jobs, int32(count)), nil +} + +func (r *Resolver) OCRKeyBundles(ctx context.Context) (*OCRKeyBundlesPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + ocrKeyBundles, err := r.App.GetKeyStore().OCR().GetAll() + if err != nil { + return nil, err + } + + return NewOCRKeyBundlesPayloadResolver(ocrKeyBundles), nil +} + +func (r *Resolver) CSAKeys(ctx context.Context) (*CSAKeysPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + keys, err := r.App.GetKeyStore().CSA().GetAll() + if err != nil { + return nil, err + } + + return NewCSAKeysResolver(keys), nil +} + +// Features retrieves each featured enabled by boolean mapping +func (r *Resolver) Features(ctx context.Context) (*FeaturesPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + return NewFeaturesPayloadResolver(r.App.GetConfig().Feature()), nil +} + +// Node retrieves a node by ID (Name) +func (r *Resolver) Node(ctx context.Context, args struct{ ID graphql.ID }) (*NodePayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + r.App.GetLogger().Debug("resolver Node args %v", args) + name := string(args.ID) + r.App.GetLogger().Debug("resolver Node name %s", name) + + for _, relayer := range r.App.GetRelayers().Slice() { + statuses, _, _, err := relayer.ListNodeStatuses(ctx, 0, "") + if err != nil { + return nil, err + } + for i, s := range statuses { + if s.Name == name { + npr, err2 := NewNodePayloadResolver(&statuses[i], nil) + if err2 != nil { + return nil, err2 + } + return npr, nil + } + } + } + + r.App.GetLogger().Errorw("resolver getting node status", "err", chains.ErrNotFound) + return NewNodePayloadResolver(nil, chains.ErrNotFound) +} + +func (r *Resolver) P2PKeys(ctx context.Context) (*P2PKeysPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + p2pKeys, err := r.App.GetKeyStore().P2P().GetAll() + if err != nil { + return nil, err + } + + return NewP2PKeysPayload(p2pKeys), nil +} + +// VRFKeys fetches all VRF keys. +func (r *Resolver) VRFKeys(ctx context.Context) (*VRFKeysPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + keys, err := r.App.GetKeyStore().VRF().GetAll() + if err != nil { + return nil, err + } + + return NewVRFKeysPayloadResolver(keys), nil +} + +// VRFKey fetches the VRF key with the given ID. +func (r *Resolver) VRFKey(ctx context.Context, args struct { + ID graphql.ID +}) (*VRFKeyPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + key, err := r.App.GetKeyStore().VRF().Get(string(args.ID)) + if err != nil { + if errors.Is(errors.Cause(err), keystore.ErrMissingVRFKey) { + return NewVRFKeyPayloadResolver(vrfkey.KeyV2{}, err), nil + } + return nil, err + } + + return NewVRFKeyPayloadResolver(key, nil), err +} + +// JobProposal retrieves a job proposal by ID +func (r *Resolver) JobProposal(ctx context.Context, args struct { + ID graphql.ID +}) (*JobProposalPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + jp, err := r.App.GetFeedsService().GetJobProposal(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewJobProposalPayload(nil, err), nil + } + + return nil, err + } + + return NewJobProposalPayload(jp, err), nil +} + +// Nodes retrieves a paginated list of nodes. +func (r *Resolver) Nodes(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*NodesPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + r.App.GetLogger().Debugw("resolver Nodes query", "offset", offset, "limit", limit) + allNodes, total, err := r.App.GetRelayers().NodeStatuses(ctx, offset, limit) + r.App.GetLogger().Debugw("resolver Nodes query result", "nodes", allNodes, "total", total, "err", err) + + if err != nil { + r.App.GetLogger().Errorw("Error creating get nodes status from app", "err", err) + return nil, err + } + npr, warn := NewNodesPayload(allNodes, int32(total)) + if warn != nil { + r.App.GetLogger().Warnw("Error creating NodesPayloadResolver", "err", warn) + } + return npr, nil +} + +func (r *Resolver) JobRuns(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*JobRunsPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + limit := pageLimit(args.Limit) + offset := pageOffset(args.Offset) + + runs, count, err := r.App.JobORM().PipelineRuns(nil, offset, limit) + if err != nil { + return nil, err + } + + return NewJobRunsPayload(runs, int32(count), r.App), nil +} + +func (r *Resolver) JobRun(ctx context.Context, args struct { + ID graphql.ID +}) (*JobRunPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + id, err := stringutils.ToInt64(string(args.ID)) + if err != nil { + return nil, err + } + + jr, err := r.App.JobORM().FindPipelineRunByID(id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewJobRunPayload(nil, r.App, err), nil + } + + return nil, err + } + + return NewJobRunPayload(&jr, r.App, err), nil +} + +func (r *Resolver) ETHKeys(ctx context.Context) (*ETHKeysPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + ks := r.App.GetKeyStore().Eth() + + keys, err := ks.GetAll() + if err != nil { + return nil, fmt.Errorf("error getting unlocked keys: %v", err) + } + + states, err := ks.GetStatesForKeys(keys) + if err != nil { + return nil, fmt.Errorf("error getting key states: %v", err) + } + + var ethKeys []ETHKey + + for _, state := range states { + k, err := ks.Get(state.Address.Hex()) + if err != nil { + return nil, err + } + + chain, err := r.App.GetRelayers().LegacyEVMChains().Get(state.EVMChainID.String()) + if errors.Is(errors.Cause(err), evmrelay.ErrNoChains) { + ethKeys = append(ethKeys, ETHKey{ + addr: k.EIP55Address, + state: state, + }) + + continue + } + // Don't include keys without valid chain. + // OperatorUI fails to show keys where chains are not in the config. + if err == nil { + ethKeys = append(ethKeys, ETHKey{ + addr: k.EIP55Address, + state: state, + chain: chain, + }) + } + } + // Put disabled keys to the end + sort.SliceStable(ethKeys, func(i, j int) bool { + return !states[i].Disabled && states[j].Disabled + }) + + return NewETHKeysPayload(ethKeys), nil +} + +// ConfigV2 retrieves the Plugin node's configuration (V2 mode) +func (r *Resolver) ConfigV2(ctx context.Context) (*ConfigV2PayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + cfg := r.App.GetConfig() + return NewConfigV2Payload(cfg.ConfigTOML()), nil +} + +func (r *Resolver) EthTransaction(ctx context.Context, args struct { + Hash graphql.ID +}) (*EthTransactionPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + hash := common.HexToHash(string(args.Hash)) + etx, err := r.App.TxmStorageService().FindTxByHash(hash) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return NewEthTransactionPayload(nil, err), nil + } + + return nil, err + } + + return NewEthTransactionPayload(etx, err), nil +} + +func (r *Resolver) EthTransactions(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*EthTransactionsPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + txs, count, err := r.App.TxmStorageService().Transactions(offset, limit) + if err != nil { + return nil, err + } + + return NewEthTransactionsPayload(txs, int32(count)), nil +} + +func (r *Resolver) EthTransactionsAttempts(ctx context.Context, args struct { + Offset *int32 + Limit *int32 +}) (*EthTransactionsAttemptsPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + offset := pageOffset(args.Offset) + limit := pageLimit(args.Limit) + + attempts, count, err := r.App.TxmStorageService().TxAttempts(offset, limit) + if err != nil { + return nil, err + } + + return NewEthTransactionsAttemptsPayload(attempts, int32(count)), nil +} + +func (r *Resolver) GlobalLogLevel(ctx context.Context) (*GlobalLogLevelPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + logLevel := r.App.GetConfig().Log().Level().String() + + return NewGlobalLogLevelPayload(logLevel), nil +} + +func (r *Resolver) SolanaKeys(ctx context.Context) (*SolanaKeysPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + keys, err := r.App.GetKeyStore().Solana().GetAll() + if err != nil { + return nil, err + } + + return NewSolanaKeysPayload(keys), nil +} + +func (r *Resolver) SQLLogging(ctx context.Context) (*GetSQLLoggingPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + enabled := r.App.GetConfig().Database().LogSQL() + + return NewGetSQLLoggingPayload(enabled), nil +} + +// OCR2KeyBundles resolves the list of OCR2 key bundles +func (r *Resolver) OCR2KeyBundles(ctx context.Context) (*OCR2KeyBundlesPayloadResolver, error) { + if err := authenticateUser(ctx); err != nil { + return nil, err + } + + ekbs, err := r.App.GetKeyStore().OCR2().GetAll() + if err != nil { + return nil, err + } + + return NewOCR2KeyBundlesPayload(ekbs), nil +} diff --git a/core/web/resolver/resolver_test.go b/core/web/resolver/resolver_test.go new file mode 100644 index 00000000..fdf06062 --- /dev/null +++ b/core/web/resolver/resolver_test.go @@ -0,0 +1,227 @@ +package resolver + +import ( + "context" + "testing" + "time" + + "github.com/graph-gophers/graphql-go" + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/gqltesting" + "github.com/stretchr/testify/mock" + + bridgeORMMocks "github.com/goplugin/pluginv3.0/v2/core/bridges/mocks" + evmClientMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/client/mocks" + evmConfigMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/mocks" + evmORMMocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/mocks" + evmtxmgrmocks "github.com/goplugin/pluginv3.0/v2/core/chains/evm/txmgr/mocks" + legacyEvmORMMocks "github.com/goplugin/pluginv3.0/v2/core/chains/legacyevm/mocks" + coremocks "github.com/goplugin/pluginv3.0/v2/core/internal/mocks" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/evmtest" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + pluginMocks "github.com/goplugin/pluginv3.0/v2/core/services/plugin/mocks" + feedsMocks "github.com/goplugin/pluginv3.0/v2/core/services/feeds/mocks" + jobORMMocks "github.com/goplugin/pluginv3.0/v2/core/services/job/mocks" + keystoreMocks "github.com/goplugin/pluginv3.0/v2/core/services/keystore/mocks" + pipelineMocks "github.com/goplugin/pluginv3.0/v2/core/services/pipeline/mocks" + webhookmocks "github.com/goplugin/pluginv3.0/v2/core/services/webhook/mocks" + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + authProviderMocks "github.com/goplugin/pluginv3.0/v2/core/sessions/mocks" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" + "github.com/goplugin/pluginv3.0/v2/core/web/schema" +) + +type mocks struct { + bridgeORM *bridgeORMMocks.ORM + evmORM *evmtest.TestConfigs + jobORM *jobORMMocks.ORM + authProvider *authProviderMocks.AuthenticationProvider + pipelineORM *pipelineMocks.ORM + feedsSvc *feedsMocks.Service + cfg *pluginMocks.GeneralConfig + scfg *evmConfigMocks.ChainScopedConfig + ocr *keystoreMocks.OCR + ocr2 *keystoreMocks.OCR2 + csa *keystoreMocks.CSA + keystore *keystoreMocks.Master + ethKs *keystoreMocks.Eth + p2p *keystoreMocks.P2P + vrf *keystoreMocks.VRF + solana *keystoreMocks.Solana + chain *legacyEvmORMMocks.Chain + legacyEVMChains *legacyEvmORMMocks.LegacyChainContainer + relayerChainInterops *pluginMocks.FakeRelayerChainInteroperators + ethClient *evmClientMocks.Client + eIMgr *webhookmocks.ExternalInitiatorManager + balM *evmORMMocks.BalanceMonitor + txmStore *evmtxmgrmocks.EvmTxStore + auditLogger *audit.AuditLoggerService +} + +// gqlTestFramework is a framework wrapper containing the objects needed to run +// a GQL test. +type gqlTestFramework struct { + t *testing.T + + // The mocked plugin.Application + App *coremocks.Application + + // The root GQL schema + RootSchema *graphql.Schema + + // Contains the context with an injected dataloader + Ctx context.Context + + Mocks *mocks +} + +// setupFramework sets up the framework for all GQL testing +func setupFramework(t *testing.T) *gqlTestFramework { + t.Helper() + + var ( + app = coremocks.NewApplication(t) + rootSchema = graphql.MustParseSchema( + schema.MustGetRootSchema(), + &Resolver{App: app}, + ) + ctx = loader.InjectDataloader(testutils.Context(t), app) + ) + + // Setup mocks + // Note - If you add a new mock make sure you assert it's expectation below. + m := &mocks{ + bridgeORM: bridgeORMMocks.NewORM(t), + evmORM: evmtest.NewTestConfigs(), + jobORM: jobORMMocks.NewORM(t), + feedsSvc: feedsMocks.NewService(t), + authProvider: authProviderMocks.NewAuthenticationProvider(t), + pipelineORM: pipelineMocks.NewORM(t), + cfg: pluginMocks.NewGeneralConfig(t), + scfg: evmConfigMocks.NewChainScopedConfig(t), + ocr: keystoreMocks.NewOCR(t), + ocr2: keystoreMocks.NewOCR2(t), + csa: keystoreMocks.NewCSA(t), + keystore: keystoreMocks.NewMaster(t), + ethKs: keystoreMocks.NewEth(t), + p2p: keystoreMocks.NewP2P(t), + vrf: keystoreMocks.NewVRF(t), + solana: keystoreMocks.NewSolana(t), + chain: legacyEvmORMMocks.NewChain(t), + legacyEVMChains: legacyEvmORMMocks.NewLegacyChainContainer(t), + relayerChainInterops: &pluginMocks.FakeRelayerChainInteroperators{}, + ethClient: evmClientMocks.NewClient(t), + eIMgr: webhookmocks.NewExternalInitiatorManager(t), + balM: evmORMMocks.NewBalanceMonitor(t), + txmStore: evmtxmgrmocks.NewEvmTxStore(t), + auditLogger: &audit.AuditLoggerService{}, + } + + lggr := logger.TestLogger(t) + app.Mock.On("GetAuditLogger", mock.Anything, mock.Anything).Return(audit.NoopLogger).Maybe() + app.Mock.On("GetLogger").Return(lggr).Maybe() + + f := &gqlTestFramework{ + t: t, + App: app, + RootSchema: rootSchema, + Ctx: ctx, + Mocks: m, + } + + return f +} + +// Timestamp returns a static timestamp. +// +// Use this in tests by interpolating it into the result string. If you don't +// want to interpolate you can instead use the formatted output of +// `2021-01-01T00:00:00Z` +func (f *gqlTestFramework) Timestamp() time.Time { + f.t.Helper() + + return time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC) +} + +// injectAuthenticatedUser injects a session into the request context +func (f *gqlTestFramework) injectAuthenticatedUser() { + f.t.Helper() + + user := clsessions.User{Email: "gqltester@chain.link", Role: clsessions.UserRoleAdmin} + + f.Ctx = auth.SetGQLAuthenticatedSession(f.Ctx, user, "gqltesterSession") +} + +// GQLTestCase represents a single GQL request test. +type GQLTestCase struct { + name string + authenticated bool + before func(*gqlTestFramework) + query string + variables map[string]interface{} + result string + errors []*gqlerrors.QueryError +} + +// RunGQLTests runs a set of GQL tests cases +func RunGQLTests(t *testing.T, testCases []GQLTestCase) { + t.Helper() + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var ( + f = setupFramework(t) + ) + + if tc.authenticated { + f.injectAuthenticatedUser() + } + + if tc.before != nil { + tc.before(f) + } + + // This does not print out the correct stack trace as the `RunTest` + // function does not call t.Helper(). It insteads displays the file + // and line location of the `gqltesting` package. + // + // This would need to be fixed upstream. + gqltesting.RunTest(t, &gqltesting.Test{ + Context: f.Ctx, + Schema: f.RootSchema, + Query: tc.query, + Variables: tc.variables, + ExpectedResult: tc.result, + ExpectedErrors: tc.errors, + }) + }) + } +} + +// unauthorizedTestCase generates an unauthorized test case from another test +// case. +// +// The paths will be the query/mutation definition name +func unauthorizedTestCase(tc GQLTestCase, paths ...interface{}) GQLTestCase { + tc.name = "not authorized" + tc.authenticated = false + tc.result = "null" + tc.errors = []*gqlerrors.QueryError{ + { + ResolverError: unauthorizedError{}, + Path: paths, + Message: "Unauthorized", + Extensions: map[string]interface{}{ + "code": "UNAUTHORIZED", + }, + }, + } + + return tc +} diff --git a/core/web/resolver/solana_key.go b/core/web/resolver/solana_key.go new file mode 100644 index 00000000..e417032c --- /dev/null +++ b/core/web/resolver/solana_key.go @@ -0,0 +1,43 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +type SolanaKeyResolver struct { + key solkey.Key +} + +func NewSolanaKey(key solkey.Key) *SolanaKeyResolver { + return &SolanaKeyResolver{key: key} +} + +func NewSolanaKeys(keys []solkey.Key) []*SolanaKeyResolver { + var resolvers []*SolanaKeyResolver + + for _, k := range keys { + resolvers = append(resolvers, NewSolanaKey(k)) + } + + return resolvers +} + +func (r *SolanaKeyResolver) ID() graphql.ID { + return graphql.ID(r.key.PublicKeyStr()) +} + +// -- GetSolanaKeys Query -- + +type SolanaKeysPayloadResolver struct { + keys []solkey.Key +} + +func NewSolanaKeysPayload(keys []solkey.Key) *SolanaKeysPayloadResolver { + return &SolanaKeysPayloadResolver{keys: keys} +} + +func (r *SolanaKeysPayloadResolver) Results() []*SolanaKeyResolver { + return NewSolanaKeys(r.keys) +} diff --git a/core/web/resolver/solana_key_test.go b/core/web/resolver/solana_key_test.go new file mode 100644 index 00000000..1c3c8a53 --- /dev/null +++ b/core/web/resolver/solana_key_test.go @@ -0,0 +1,73 @@ +package resolver + +import ( + "errors" + "fmt" + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/keystest" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" +) + +func TestResolver_SolanaKeys(t *testing.T) { + t.Parallel() + + query := ` + query GetSolanaKeys { + solanaKeys { + results { + id + } + } + }` + k := solkey.MustNewInsecure(keystest.NewRandReaderFromSeed(1)) + result := fmt.Sprintf(` + { + "solanaKeys": { + "results": [ + { + "id": "%s" + } + ] + } + }`, k.PublicKeyStr()) + gError := errors.New("error") + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "solanaKeys"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.solana.On("GetAll").Return([]solkey.Key{k}, nil) + f.Mocks.keystore.On("Solana").Return(f.Mocks.solana) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: result, + }, + { + name: "generic error on GetAll", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.solana.On("GetAll").Return([]solkey.Key{}, gError) + f.Mocks.keystore.On("Solana").Return(f.Mocks.solana) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: gError, + Path: []interface{}{"solanaKeys"}, + Message: gError.Error(), + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/spec.go b/core/web/resolver/spec.go new file mode 100644 index 00000000..7872a0bc --- /dev/null +++ b/core/web/resolver/spec.go @@ -0,0 +1,972 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/utils/stringutils" + "github.com/goplugin/pluginv3.0/v2/core/web/gqlscalar" +) + +type SpecResolver struct { + j job.Job +} + +func NewSpec(j job.Job) *SpecResolver { + return &SpecResolver{j: j} +} + +func (r *SpecResolver) ToCronSpec() (*CronSpecResolver, bool) { + if r.j.Type != job.Cron { + return nil, false + } + + return &CronSpecResolver{spec: *r.j.CronSpec}, true +} + +func (r *SpecResolver) ToDirectRequestSpec() (*DirectRequestSpecResolver, bool) { + if r.j.Type != job.DirectRequest { + return nil, false + } + + return &DirectRequestSpecResolver{spec: *r.j.DirectRequestSpec}, true +} + +func (r *SpecResolver) ToFluxMonitorSpec() (*FluxMonitorSpecResolver, bool) { + if r.j.Type != job.FluxMonitor { + return nil, false + } + + return &FluxMonitorSpecResolver{spec: *r.j.FluxMonitorSpec}, true +} + +func (r *SpecResolver) ToKeeperSpec() (*KeeperSpecResolver, bool) { + if r.j.Type != job.Keeper { + return nil, false + } + + return &KeeperSpecResolver{spec: *r.j.KeeperSpec}, true +} + +func (r *SpecResolver) ToOCRSpec() (*OCRSpecResolver, bool) { + if r.j.Type != job.OffchainReporting { + return nil, false + } + + return &OCRSpecResolver{spec: *r.j.OCROracleSpec}, true +} + +func (r *SpecResolver) ToOCR2Spec() (*OCR2SpecResolver, bool) { + if r.j.Type != job.OffchainReporting2 { + return nil, false + } + + return &OCR2SpecResolver{spec: *r.j.OCR2OracleSpec}, true +} + +func (r *SpecResolver) ToVRFSpec() (*VRFSpecResolver, bool) { + if r.j.Type != job.VRF { + return nil, false + } + + return &VRFSpecResolver{spec: *r.j.VRFSpec}, true +} + +func (r *SpecResolver) ToWebhookSpec() (*WebhookSpecResolver, bool) { + if r.j.Type != job.Webhook { + return nil, false + } + + return &WebhookSpecResolver{spec: *r.j.WebhookSpec}, true +} + +// ToBlockhashStoreSpec returns the BlockhashStoreSpec from the SpecResolver if the job is a +// BlockhashStore job. +func (r *SpecResolver) ToBlockhashStoreSpec() (*BlockhashStoreSpecResolver, bool) { + if r.j.Type != job.BlockhashStore { + return nil, false + } + + return &BlockhashStoreSpecResolver{spec: *r.j.BlockhashStoreSpec}, true +} + +// ToBlockHeaderFeederSpec returns the BlockHeaderFeederSpec from the SpecResolver if the job is a +// BlockHeaderFeeder job. +func (r *SpecResolver) ToBlockHeaderFeederSpec() (*BlockHeaderFeederSpecResolver, bool) { + if r.j.Type != job.BlockHeaderFeeder { + return nil, false + } + + return &BlockHeaderFeederSpecResolver{spec: *r.j.BlockHeaderFeederSpec}, true +} + +// ToBootstrapSpec resolves to the Booststrap Spec Resolver +func (r *SpecResolver) ToBootstrapSpec() (*BootstrapSpecResolver, bool) { + if r.j.Type != job.Bootstrap { + return nil, false + } + + return &BootstrapSpecResolver{spec: *r.j.BootstrapSpec}, true +} + +func (r *SpecResolver) ToGatewaySpec() (*GatewaySpecResolver, bool) { + if r.j.Type != job.Gateway { + return nil, false + } + + return &GatewaySpecResolver{spec: *r.j.GatewaySpec}, true +} + +type CronSpecResolver struct { + spec job.CronSpec +} + +func (r *CronSpecResolver) Schedule() string { + return r.spec.CronSchedule +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *CronSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +type DirectRequestSpecResolver struct { + spec job.DirectRequestSpec +} + +// ContractAddress resolves the spec's contract address. +func (r *DirectRequestSpecResolver) ContractAddress() string { + return r.spec.ContractAddress.String() +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *DirectRequestSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// EVMChainID resolves the spec's evm chain id. +func (r *DirectRequestSpecResolver) EVMChainID() *string { + if r.spec.EVMChainID == nil { + return nil + } + + chainID := r.spec.EVMChainID.String() + + return &chainID +} + +// MinIncomingConfirmations resolves the spec's min incoming confirmations. +func (r *DirectRequestSpecResolver) MinIncomingConfirmations() int32 { + if r.spec.MinIncomingConfirmations.Valid { + return int32(r.spec.MinIncomingConfirmations.Uint32) + } + + return 0 +} + +// MinContractPaymentLinkJuels resolves the spec's min contract payment link. +func (r *DirectRequestSpecResolver) MinContractPaymentLinkJuels() string { + return r.spec.MinContractPayment.String() +} + +// Requesters resolves the spec's evm chain id. +func (r *DirectRequestSpecResolver) Requesters() *[]string { + if r.spec.Requesters == nil { + return nil + } + + requesters := r.spec.Requesters.ToStrings() + + return &requesters +} + +type FluxMonitorSpecResolver struct { + spec job.FluxMonitorSpec +} + +// AbsoluteThreshold resolves the spec's absolute deviation threshold. +func (r *FluxMonitorSpecResolver) AbsoluteThreshold() float64 { + return float64(r.spec.AbsoluteThreshold) +} + +// ContractAddress resolves the spec's contract address. +func (r *FluxMonitorSpecResolver) ContractAddress() string { + return r.spec.ContractAddress.String() +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *FluxMonitorSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// AbsoluteThreshold resolves the spec's absolute threshold. +func (r *FluxMonitorSpecResolver) DrumbeatEnabled() bool { + return r.spec.DrumbeatEnabled +} + +// DrumbeatRandomDelay resolves the spec's drumbeat random delay. +func (r *FluxMonitorSpecResolver) DrumbeatRandomDelay() *string { + var delay *string + if r.spec.DrumbeatRandomDelay > 0 { + drumbeatRandomDelay := r.spec.DrumbeatRandomDelay.String() + delay = &drumbeatRandomDelay + } + + return delay +} + +// DrumbeatSchedule resolves the spec's drumbeat schedule. +func (r *FluxMonitorSpecResolver) DrumbeatSchedule() *string { + if r.spec.DrumbeatEnabled { + return &r.spec.DrumbeatSchedule + } + + return nil +} + +// EVMChainID resolves the spec's evm chain id. +func (r *FluxMonitorSpecResolver) EVMChainID() *string { + if r.spec.EVMChainID == nil { + return nil + } + + chainID := r.spec.EVMChainID.String() + + return &chainID +} + +// IdleTimerDisabled resolves the spec's idle timer disabled flag. +func (r *FluxMonitorSpecResolver) IdleTimerDisabled() bool { + return r.spec.IdleTimerDisabled +} + +// IdleTimerPeriod resolves the spec's idle timer period. +func (r *FluxMonitorSpecResolver) IdleTimerPeriod() string { + return r.spec.IdleTimerPeriod.String() +} + +// MinPayment resolves the spec's min payment. +func (r *FluxMonitorSpecResolver) MinPayment() *string { + if r.spec.MinPayment != nil { + min := r.spec.MinPayment.String() + + return &min + } + return nil +} + +// PollTimerDisabled resolves the spec's poll timer disabled flag. +func (r *FluxMonitorSpecResolver) PollTimerDisabled() bool { + return r.spec.PollTimerDisabled +} + +// PollTimerPeriod resolves the spec's poll timer period. +func (r *FluxMonitorSpecResolver) PollTimerPeriod() string { + return r.spec.PollTimerPeriod.String() +} + +// Threshold resolves the spec's deviation threshold. +func (r *FluxMonitorSpecResolver) Threshold() float64 { + return float64(r.spec.Threshold) +} + +type KeeperSpecResolver struct { + spec job.KeeperSpec +} + +// ContractAddress resolves the spec's contract address. +func (r *KeeperSpecResolver) ContractAddress() string { + return r.spec.ContractAddress.String() +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *KeeperSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// EVMChainID resolves the spec's evm chain id. +func (r *KeeperSpecResolver) EVMChainID() *string { + if r.spec.EVMChainID == nil { + return nil + } + + chainID := r.spec.EVMChainID.String() + + return &chainID +} + +// FromAddress resolves the spec's from contract address. +// +// Because VRF has an non required field of the same name, we have to be +// consistent in our return value of using a *string instead of a string even +// though this is a required field for the KeeperSpec. +// +// http://spec.graphql.org/draft/#sec-Field-Selection-Merging +func (r *KeeperSpecResolver) FromAddress() *string { + addr := r.spec.FromAddress.String() + + return &addr +} + +type OCRSpecResolver struct { + spec job.OCROracleSpec +} + +// BlockchainTimeout resolves the spec's blockchain timeout. +func (r *OCRSpecResolver) BlockchainTimeout() *string { + if r.spec.BlockchainTimeout.Duration() == 0 { + return nil + } + + timeout := r.spec.BlockchainTimeout.Duration().String() + + return &timeout +} + +// ContractAddress resolves the spec's contract address. +func (r *OCRSpecResolver) ContractAddress() string { + return r.spec.ContractAddress.String() +} + +// ContractConfigConfirmations resolves the spec's confirmations config. +func (r *OCRSpecResolver) ContractConfigConfirmations() *int32 { + if r.spec.ContractConfigConfirmations == 0 { + return nil + } + + confirmations := int32(r.spec.ContractConfigConfirmations) + + return &confirmations +} + +// ContractConfigTrackerPollInterval resolves the spec's contract tracker poll +// interval config. +func (r *OCRSpecResolver) ContractConfigTrackerPollInterval() *string { + if r.spec.ContractConfigTrackerPollInterval.Duration() == 0 { + return nil + } + + interval := r.spec.ContractConfigTrackerPollInterval.Duration().String() + + return &interval +} + +// ContractConfigTrackerSubscribeInterval resolves the spec's tracker subscribe +// interval config. +func (r *OCRSpecResolver) ContractConfigTrackerSubscribeInterval() *string { + if r.spec.ContractConfigTrackerSubscribeInterval.Duration() == 0 { + return nil + } + + interval := r.spec.ContractConfigTrackerSubscribeInterval.Duration().String() + + return &interval +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *OCRSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// EVMChainID resolves the spec's evm chain id. +func (r *OCRSpecResolver) EVMChainID() *string { + if r.spec.EVMChainID == nil { + return nil + } + + chainID := r.spec.EVMChainID.String() + + return &chainID +} + +// DatabaseTimeout resolves the spec's database timeout. +func (r *OCRSpecResolver) DatabaseTimeout() string { + return r.spec.DatabaseTimeout.Duration().String() +} + +// ObservationGracePeriod resolves the spec's observation grace period. +func (r *OCRSpecResolver) ObservationGracePeriod() string { + return r.spec.ObservationGracePeriod.Duration().String() +} + +// ContractTransmitterTransmitTimeout resolves the spec's contract transmitter transmit timeout. +func (r *OCRSpecResolver) ContractTransmitterTransmitTimeout() string { + return r.spec.ContractTransmitterTransmitTimeout.Duration().String() +} + +// IsBootstrapPeer resolves whether spec is a bootstrap peer. +func (r *OCRSpecResolver) IsBootstrapPeer() bool { + return r.spec.IsBootstrapPeer +} + +// KeyBundleID resolves the spec's key bundle id. +func (r *OCRSpecResolver) KeyBundleID() *string { + if r.spec.EncryptedOCRKeyBundleID == nil { + return nil + } + + bundleID := r.spec.EncryptedOCRKeyBundleID.String() + + return &bundleID +} + +// ObservationTimeout resolves the spec's observation timeout +func (r *OCRSpecResolver) ObservationTimeout() *string { + if r.spec.ObservationTimeout.Duration() == 0 { + return nil + } + + timeout := r.spec.ObservationTimeout.Duration().String() + + return &timeout +} + +// P2PV2Bootstrappers resolves the OCR1 spec's p2pv2 bootstrappers +func (r *OCRSpecResolver) P2PV2Bootstrappers() *[]string { + if len(r.spec.P2PV2Bootstrappers) == 0 { + return nil + } + + peers := []string(r.spec.P2PV2Bootstrappers) + + return &peers +} + +// TransmitterAddress resolves the spec's transmitter address +func (r *OCRSpecResolver) TransmitterAddress() *string { + if r.spec.TransmitterAddress == nil { + return nil + } + + addr := r.spec.TransmitterAddress.String() + return &addr +} + +type OCR2SpecResolver struct { + spec job.OCR2OracleSpec +} + +// BlockchainTimeout resolves the spec's blockchain timeout. +func (r *OCR2SpecResolver) BlockchainTimeout() *string { + if r.spec.BlockchainTimeout.Duration() == 0 { + return nil + } + + timeout := r.spec.BlockchainTimeout.Duration().String() + + return &timeout +} + +// ContractID resolves the spec's contract address. +func (r *OCR2SpecResolver) ContractID() string { + return r.spec.ContractID +} + +// ContractConfigConfirmations resolves the spec's confirmations config. +func (r *OCR2SpecResolver) ContractConfigConfirmations() *int32 { + if r.spec.ContractConfigConfirmations == 0 { + return nil + } + + confirmations := int32(r.spec.ContractConfigConfirmations) + + return &confirmations +} + +// ContractConfigTrackerPollInterval resolves the spec's contract tracker poll +// interval config. +func (r *OCR2SpecResolver) ContractConfigTrackerPollInterval() *string { + if r.spec.ContractConfigTrackerPollInterval.Duration() == 0 { + return nil + } + + interval := r.spec.ContractConfigTrackerPollInterval.Duration().String() + + return &interval +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *OCR2SpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// OcrKeyBundleID resolves the spec's key bundle id. +func (r *OCR2SpecResolver) OcrKeyBundleID() *string { + if !r.spec.OCRKeyBundleID.Valid { + return nil + } + + return &r.spec.OCRKeyBundleID.String +} + +// MonitoringEndpoint resolves the spec's monitoring endpoint +func (r *OCR2SpecResolver) MonitoringEndpoint() *string { + if !r.spec.MonitoringEndpoint.Valid { + return nil + } + + return &r.spec.MonitoringEndpoint.String +} + +// P2PV2Bootstrappers resolves the OCR2 spec's p2pv2 bootstrappers +func (r *OCR2SpecResolver) P2PV2Bootstrappers() *[]string { + if len(r.spec.P2PV2Bootstrappers) == 0 { + return nil + } + + peers := []string(r.spec.P2PV2Bootstrappers) + + return &peers +} + +// Relay resolves the spec's relay +func (r *OCR2SpecResolver) Relay() string { + return r.spec.Relay +} + +// RelayConfig resolves the spec's relay config +func (r *OCR2SpecResolver) RelayConfig() gqlscalar.Map { + return gqlscalar.Map(r.spec.RelayConfig) +} + +// PluginType resolves the spec's plugin type +func (r *OCR2SpecResolver) PluginType() string { + return string(r.spec.PluginType) +} + +// PluginConfig resolves the spec's plugin config +func (r *OCR2SpecResolver) PluginConfig() gqlscalar.Map { + return gqlscalar.Map(r.spec.PluginConfig) +} + +// TransmitterID resolves the spec's transmitter id +func (r *OCR2SpecResolver) TransmitterID() *string { + if !r.spec.TransmitterID.Valid { + return nil + } + + addr := r.spec.TransmitterID.String + return &addr +} + +// FeedID resolves the spec's feed ID +func (r *OCR2SpecResolver) FeedID() *string { + if r.spec.FeedID == nil { + return nil + } + feedID := r.spec.FeedID.String() + return &feedID +} + +type VRFSpecResolver struct { + spec job.VRFSpec +} + +// MinIncomingConfirmations resolves the spec's min incoming confirmations. +func (r *VRFSpecResolver) MinIncomingConfirmations() int32 { + return int32(r.spec.MinIncomingConfirmations) +} + +// CoordinatorAddress resolves the spec's coordinator address. +func (r *VRFSpecResolver) CoordinatorAddress() string { + return r.spec.CoordinatorAddress.String() +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *VRFSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// EVMChainID resolves the spec's evm chain id. +func (r *VRFSpecResolver) EVMChainID() *string { + if r.spec.EVMChainID == nil { + return nil + } + + chainID := r.spec.EVMChainID.String() + + return &chainID +} + +// FromAddresses resolves the spec's from addresses. +func (r *VRFSpecResolver) FromAddresses() *[]string { + if len(r.spec.FromAddresses) == 0 { + return nil + } + + var addresses []string + for _, a := range r.spec.FromAddresses { + addresses = append(addresses, a.Address().String()) + } + return &addresses +} + +// PollPeriod resolves the spec's poll period. +func (r *VRFSpecResolver) PollPeriod() string { + return r.spec.PollPeriod.String() +} + +// PublicKey resolves the spec's public key. +func (r *VRFSpecResolver) PublicKey() string { + return r.spec.PublicKey.String() +} + +// RequestedConfsDelay resolves the spec's requested conf delay. +func (r *VRFSpecResolver) RequestedConfsDelay() int32 { + // GraphQL doesn't support 64 bit integers, so we have to cast. + return int32(r.spec.RequestedConfsDelay) +} + +// RequestTimeout resolves the spec's request timeout. +func (r *VRFSpecResolver) RequestTimeout() string { + return r.spec.RequestTimeout.String() +} + +// BatchCoordinatorAddress resolves the spec's batch coordinator address. +func (r *VRFSpecResolver) BatchCoordinatorAddress() *string { + if r.spec.BatchCoordinatorAddress == nil { + return nil + } + addr := r.spec.BatchCoordinatorAddress.String() + return &addr +} + +// BatchFulfillmentEnabled resolves the spec's batch fulfillment enabled flag. +func (r *VRFSpecResolver) BatchFulfillmentEnabled() bool { + return r.spec.BatchFulfillmentEnabled +} + +// BatchFulfillmentGasMultiplier resolves the spec's batch fulfillment gas multiplier. +func (r *VRFSpecResolver) BatchFulfillmentGasMultiplier() float64 { + return float64(r.spec.BatchFulfillmentGasMultiplier) +} + +// CustomRevertsPipelineEnabled resolves the spec's custom reverts pipeline enabled flag. +func (r *VRFSpecResolver) CustomRevertsPipelineEnabled() *bool { + return &r.spec.CustomRevertsPipelineEnabled +} + +// ChunkSize resolves the spec's chunk size. +func (r *VRFSpecResolver) ChunkSize() int32 { + return int32(r.spec.ChunkSize) +} + +// BackoffInitialDelay resolves the spec's backoff initial delay. +func (r *VRFSpecResolver) BackoffInitialDelay() string { + return r.spec.BackoffInitialDelay.String() +} + +// BackoffMaxDelay resolves the spec's backoff max delay. +func (r *VRFSpecResolver) BackoffMaxDelay() string { + return r.spec.BackoffMaxDelay.String() +} + +// GasLanePrice resolves the spec's gas lane price. +func (r *VRFSpecResolver) GasLanePrice() *string { + if r.spec.GasLanePrice == nil { + return nil + } + gasLanePriceGWei := r.spec.GasLanePrice.String() + return &gasLanePriceGWei +} + +// VRFOwnerAddress resolves the spec's vrf owner address. +func (r *VRFSpecResolver) VRFOwnerAddress() *string { + if r.spec.VRFOwnerAddress == nil { + return nil + } + vrfOwnerAddress := r.spec.VRFOwnerAddress.String() + return &vrfOwnerAddress +} + +type WebhookSpecResolver struct { + spec job.WebhookSpec +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *WebhookSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +// BlockhashStoreSpecResolver exposes the job parameters for a BlockhashStoreSpec. +type BlockhashStoreSpecResolver struct { + spec job.BlockhashStoreSpec +} + +// CoordinatorV1Address returns the address of the V1 Coordinator, if any. +func (b *BlockhashStoreSpecResolver) CoordinatorV1Address() *string { + if b.spec.CoordinatorV1Address == nil { + return nil + } + addr := b.spec.CoordinatorV1Address.String() + return &addr +} + +// CoordinatorV2Address returns the address of the V2 Coordinator, if any. +func (b *BlockhashStoreSpecResolver) CoordinatorV2Address() *string { + if b.spec.CoordinatorV2Address == nil { + return nil + } + addr := b.spec.CoordinatorV2Address.String() + return &addr +} + +// CoordinatorV2PlusAddress returns the address of the V2Plus Coordinator, if any. +func (b *BlockhashStoreSpecResolver) CoordinatorV2PlusAddress() *string { + if b.spec.CoordinatorV2PlusAddress == nil { + return nil + } + addr := b.spec.CoordinatorV2PlusAddress.String() + return &addr +} + +// WaitBlocks returns the job's WaitBlocks param. +func (b *BlockhashStoreSpecResolver) WaitBlocks() int32 { + return b.spec.WaitBlocks +} + +// LookbackBlocks returns the job's LookbackBlocks param. +func (b *BlockhashStoreSpecResolver) LookbackBlocks() int32 { + return b.spec.LookbackBlocks +} + +// HeartbeatPeriod returns the job's HeartbeatPeriod param. +func (b *BlockhashStoreSpecResolver) HeartbeatPeriod() string { + return b.spec.HeartbeatPeriod.String() +} + +// BlockhashStoreAddress returns the job's BlockhashStoreAddress param. +func (b *BlockhashStoreSpecResolver) BlockhashStoreAddress() string { + return b.spec.BlockhashStoreAddress.String() +} + +// TrustedBlockhashStoreAddress returns the address of the job's TrustedBlockhashStoreAddress, if any. +func (b *BlockhashStoreSpecResolver) TrustedBlockhashStoreAddress() *string { + if b.spec.TrustedBlockhashStoreAddress == nil { + return nil + } + addr := b.spec.TrustedBlockhashStoreAddress.String() + return &addr +} + +// BatchBlockhashStoreAddress returns the job's BatchBlockhashStoreAddress param. +func (b *BlockhashStoreSpecResolver) TrustedBlockhashStoreBatchSize() int32 { + return b.spec.TrustedBlockhashStoreBatchSize +} + +// PollPeriod return's the job's PollPeriod param. +func (b *BlockhashStoreSpecResolver) PollPeriod() string { + return b.spec.PollPeriod.String() +} + +// RunTimeout return's the job's RunTimeout param. +func (b *BlockhashStoreSpecResolver) RunTimeout() string { + return b.spec.RunTimeout.String() +} + +// EVMChainID returns the job's EVMChainID param. +func (b *BlockhashStoreSpecResolver) EVMChainID() *string { + chainID := b.spec.EVMChainID.String() + return &chainID +} + +// FromAddress returns the job's FromAddress param, if any. +func (b *BlockhashStoreSpecResolver) FromAddresses() *[]string { + if b.spec.FromAddresses == nil { + return nil + } + var addresses []string + for _, a := range b.spec.FromAddresses { + addresses = append(addresses, a.Address().String()) + } + return &addresses +} + +// CreatedAt resolves the spec's created at timestamp. +func (b *BlockhashStoreSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: b.spec.CreatedAt} +} + +// BlockHeaderFeederSpecResolver exposes the job parameters for a BlockHeaderFeederSpec. +type BlockHeaderFeederSpecResolver struct { + spec job.BlockHeaderFeederSpec +} + +// CoordinatorV1Address returns the address of the V1 Coordinator, if any. +func (b *BlockHeaderFeederSpecResolver) CoordinatorV1Address() *string { + if b.spec.CoordinatorV1Address == nil { + return nil + } + addr := b.spec.CoordinatorV1Address.String() + return &addr +} + +// CoordinatorV2Address returns the address of the V2 Coordinator, if any. +func (b *BlockHeaderFeederSpecResolver) CoordinatorV2Address() *string { + if b.spec.CoordinatorV2Address == nil { + return nil + } + addr := b.spec.CoordinatorV2Address.String() + return &addr +} + +// CoordinatorV2PlusAddress returns the address of the V2 Coordinator Plus, if any. +func (b *BlockHeaderFeederSpecResolver) CoordinatorV2PlusAddress() *string { + if b.spec.CoordinatorV2PlusAddress == nil { + return nil + } + addr := b.spec.CoordinatorV2PlusAddress.String() + return &addr +} + +// WaitBlocks returns the job's WaitBlocks param. +func (b *BlockHeaderFeederSpecResolver) WaitBlocks() int32 { + return b.spec.WaitBlocks +} + +// LookbackBlocks returns the job's LookbackBlocks param. +func (b *BlockHeaderFeederSpecResolver) LookbackBlocks() int32 { + return b.spec.LookbackBlocks +} + +// BlockhashStoreAddress returns the job's BlockhashStoreAddress param. +func (b *BlockHeaderFeederSpecResolver) BlockhashStoreAddress() string { + return b.spec.BlockhashStoreAddress.String() +} + +// BatchBlockhashStoreAddress returns the job's BatchBlockhashStoreAddress param. +func (b *BlockHeaderFeederSpecResolver) BatchBlockhashStoreAddress() string { + return b.spec.BatchBlockhashStoreAddress.String() +} + +// PollPeriod return's the job's PollPeriod param. +func (b *BlockHeaderFeederSpecResolver) PollPeriod() string { + return b.spec.PollPeriod.String() +} + +// RunTimeout return's the job's RunTimeout param. +func (b *BlockHeaderFeederSpecResolver) RunTimeout() string { + return b.spec.RunTimeout.String() +} + +// EVMChainID returns the job's EVMChainID param. +func (b *BlockHeaderFeederSpecResolver) EVMChainID() *string { + chainID := b.spec.EVMChainID.String() + return &chainID +} + +// FromAddress returns the job's FromAddress param, if any. +func (b *BlockHeaderFeederSpecResolver) FromAddresses() *[]string { + if b.spec.FromAddresses == nil { + return nil + } + var addresses []string + for _, a := range b.spec.FromAddresses { + addresses = append(addresses, a.Address().String()) + } + return &addresses +} + +// GetBlockhashesBatchSize returns the job's GetBlockhashesBatchSize param. +func (b *BlockHeaderFeederSpecResolver) GetBlockhashesBatchSize() int32 { + return int32(b.spec.GetBlockhashesBatchSize) +} + +// StoreBlockhashesBatchSize returns the job's StoreBlockhashesBatchSize param. +func (b *BlockHeaderFeederSpecResolver) StoreBlockhashesBatchSize() int32 { + return int32(b.spec.StoreBlockhashesBatchSize) +} + +// CreatedAt resolves the spec's created at timestamp. +func (b *BlockHeaderFeederSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: b.spec.CreatedAt} +} + +// BootstrapSpecResolver defines the Bootstrap Spec Resolver +type BootstrapSpecResolver struct { + spec job.BootstrapSpec +} + +// ID resolves the Bootstrap spec ID +func (r *BootstrapSpecResolver) ID() graphql.ID { + return graphql.ID(stringutils.FromInt32(r.spec.ID)) +} + +// ContractID resolves the spec's contract address +func (r *BootstrapSpecResolver) ContractID() string { + return r.spec.ContractID +} + +// Relay resolves the spec's relay +func (r *BootstrapSpecResolver) Relay() string { + return r.spec.Relay +} + +// RelayConfig resolves the spec's relay config +func (r *BootstrapSpecResolver) RelayConfig() gqlscalar.Map { + return gqlscalar.Map(r.spec.RelayConfig) +} + +// MonitoringEndpoint resolves the spec's monitoring endpoint +func (r *BootstrapSpecResolver) MonitoringEndpoint() *string { + if !r.spec.MonitoringEndpoint.Valid { + return nil + } + + return &r.spec.MonitoringEndpoint.String +} + +// BlockchainTimeout resolves the spec's blockchain timeout +func (r *BootstrapSpecResolver) BlockchainTimeout() *string { + if r.spec.BlockchainTimeout.Duration() == 0 { + return nil + } + + interval := r.spec.BlockchainTimeout.Duration().String() + + return &interval +} + +// ContractConfigTrackerPollInterval resolves the spec's contract tracker poll +// interval config. +func (r *BootstrapSpecResolver) ContractConfigTrackerPollInterval() *string { + if r.spec.ContractConfigTrackerPollInterval.Duration() == 0 { + return nil + } + + interval := r.spec.ContractConfigTrackerPollInterval.Duration().String() + + return &interval +} + +// ContractConfigConfirmations resolves the spec's confirmations config. +func (r *BootstrapSpecResolver) ContractConfigConfirmations() *int32 { + if r.spec.ContractConfigConfirmations == 0 { + return nil + } + + confirmations := int32(r.spec.ContractConfigConfirmations) + + return &confirmations +} + +// CreatedAt resolves the spec's created at timestamp. +func (r *BootstrapSpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} + +type GatewaySpecResolver struct { + spec job.GatewaySpec +} + +func (r *GatewaySpecResolver) ID() graphql.ID { + return graphql.ID(stringutils.FromInt32(r.spec.ID)) +} + +func (r *GatewaySpecResolver) GatewayConfig() gqlscalar.Map { + return gqlscalar.Map(r.spec.GatewayConfig) +} + +func (r *GatewaySpecResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.spec.CreatedAt} +} diff --git a/core/web/resolver/spec_test.go b/core/web/resolver/spec_test.go new file mode 100644 index 00000000..ea2cee54 --- /dev/null +++ b/core/web/resolver/spec_test.go @@ -0,0 +1,1052 @@ +package resolver + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/types" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + clnull "github.com/goplugin/pluginv3.0/v2/core/null" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/services/signatures/secp256k1" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +// Specs are only embedded on the job and are not fetchable by it's own id, so +// we test the spec resolvers by fetching a job by id. + +func TestResolver_CronSpec(t *testing.T) { + var ( + id = int32(1) + ) + + testCases := []GQLTestCase{ + { + name: "cron spec success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.Cron, + CronSpec: &job.CronSpec{ + CronSchedule: "CRON_TZ=UTC 0 0 1 1 *", + CreatedAt: f.Timestamp(), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on CronSpec { + schedule + createdAt + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "CronSpec", + "schedule": "CRON_TZ=UTC 0 0 1 1 *", + "createdAt": "2021-01-01T00:00:00Z" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DirectRequestSpec(t *testing.T) { + var ( + id = int32(1) + requesterAddress = common.HexToAddress("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + ) + contractAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "direct request spec success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.DirectRequest, + DirectRequestSpec: &job.DirectRequestSpec{ + ContractAddress: contractAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + MinIncomingConfirmations: clnull.NewUint32(1, true), + MinContractPayment: commonassets.NewLinkFromJuels(1000), + Requesters: models.AddressCollection{requesterAddress}, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on DirectRequestSpec { + contractAddress + createdAt + evmChainID + minIncomingConfirmations + minContractPaymentLinkJuels + requesters + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "DirectRequestSpec", + "contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "minIncomingConfirmations": 1, + "minContractPaymentLinkJuels": "1000", + "requesters": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"] + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_FluxMonitorSpec(t *testing.T) { + var ( + id = int32(1) + ) + contractAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "flux monitor spec with standard timers", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.FluxMonitor, + FluxMonitorSpec: &job.FluxMonitorSpec{ + ContractAddress: contractAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + DrumbeatEnabled: false, + IdleTimerDisabled: false, + IdleTimerPeriod: 1 * time.Hour, + MinPayment: commonassets.NewLinkFromJuels(1000), + PollTimerDisabled: false, + PollTimerPeriod: 1 * time.Minute, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on FluxMonitorSpec { + absoluteThreshold + contractAddress + createdAt + drumbeatEnabled + drumbeatRandomDelay + drumbeatSchedule + evmChainID + idleTimerDisabled + idleTimerPeriod + minPayment + pollTimerDisabled + pollTimerPeriod + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "FluxMonitorSpec", + "absoluteThreshold": 0, + "contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "createdAt": "2021-01-01T00:00:00Z", + "drumbeatEnabled": false, + "drumbeatRandomDelay": null, + "drumbeatSchedule": null, + "evmChainID": "42", + "idleTimerDisabled": false, + "idleTimerPeriod": "1h0m0s", + "minPayment": "1000", + "pollTimerDisabled": false, + "pollTimerPeriod": "1m0s" + } + } + } + `, + }, + { + name: "flux monitor spec with drumbeat", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.FluxMonitor, + FluxMonitorSpec: &job.FluxMonitorSpec{ + ContractAddress: contractAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + DrumbeatEnabled: true, + DrumbeatRandomDelay: 1 * time.Second, + DrumbeatSchedule: "CRON_TZ=UTC 0 0 1 1 *", + IdleTimerDisabled: true, + IdleTimerPeriod: 1 * time.Hour, + MinPayment: commonassets.NewLinkFromJuels(1000), + PollTimerDisabled: true, + PollTimerPeriod: 1 * time.Minute, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on FluxMonitorSpec { + absoluteThreshold + contractAddress + createdAt + drumbeatEnabled + drumbeatRandomDelay + drumbeatSchedule + evmChainID + idleTimerDisabled + idleTimerPeriod + minPayment + pollTimerDisabled + pollTimerPeriod + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "FluxMonitorSpec", + "absoluteThreshold": 0, + "contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "createdAt": "2021-01-01T00:00:00Z", + "drumbeatEnabled": true, + "drumbeatRandomDelay": "1s", + "drumbeatSchedule": "CRON_TZ=UTC 0 0 1 1 *", + "evmChainID": "42", + "idleTimerDisabled": true, + "idleTimerPeriod": "1h0m0s", + "minPayment": "1000", + "pollTimerDisabled": true, + "pollTimerPeriod": "1m0s" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_KeeperSpec(t *testing.T) { + var ( + id = int32(1) + fromAddress = common.HexToAddress("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + ) + contractAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "keeper spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.Keeper, + KeeperSpec: &job.KeeperSpec{ + ContractAddress: contractAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + FromAddress: ethkey.EIP55AddressFromAddress(fromAddress), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on KeeperSpec { + contractAddress + createdAt + evmChainID + fromAddress + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "KeeperSpec", + "contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "fromAddress": "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_OCRSpec(t *testing.T) { + var ( + id = int32(1) + ) + contractAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + transmitterAddress, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + require.NoError(t, err) + + keyBundleID := models.MustSha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + + testCases := []GQLTestCase{ + { + name: "OCR spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.OffchainReporting, + OCROracleSpec: &job.OCROracleSpec{ + BlockchainTimeout: models.Interval(1 * time.Minute), + ContractAddress: contractAddress, + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute), + ContractConfigTrackerSubscribeInterval: models.Interval(2 * time.Minute), + DatabaseTimeout: models.NewInterval(3 * time.Second), + ObservationGracePeriod: models.NewInterval(4 * time.Second), + ContractTransmitterTransmitTimeout: models.NewInterval(555 * time.Millisecond), + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + IsBootstrapPeer: false, + EncryptedOCRKeyBundleID: &keyBundleID, + ObservationTimeout: models.Interval(2 * time.Minute), + P2PV2Bootstrappers: pq.StringArray{"12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"}, + TransmitterAddress: &transmitterAddress, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on OCRSpec { + blockchainTimeout + contractAddress + contractConfigConfirmations + contractConfigTrackerPollInterval + contractConfigTrackerSubscribeInterval + databaseTimeout + observationGracePeriod + contractTransmitterTransmitTimeout + createdAt + evmChainID + isBootstrapPeer + keyBundleID + observationTimeout + p2pv2Bootstrappers + transmitterAddress + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "OCRSpec", + "blockchainTimeout": "1m0s", + "contractAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "contractConfigConfirmations": 1, + "contractConfigTrackerPollInterval": "1m0s", + "contractConfigTrackerSubscribeInterval": "2m0s", + "databaseTimeout": "3s", + "observationGracePeriod": "4s", + "contractTransmitterTransmitTimeout": "555ms", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "isBootstrapPeer": false, + "keyBundleID": "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5", + "observationTimeout": "2m0s", + "p2pv2Bootstrappers": ["12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"], + "transmitterAddress": "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_OCR2Spec(t *testing.T) { + var ( + id = int32(1) + ) + contractAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + transmitterAddress, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + require.NoError(t, err) + + keyBundleID := models.MustSha256HashFromHex("f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5") + + relayConfig := map[string]interface{}{ + "chainID": 1337, + } + pluginConfig := map[string]interface{}{ + "juelsPerFeeCoinSource": 100000000, + } + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "OCR 2 spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.OffchainReporting2, + OCR2OracleSpec: &job.OCR2OracleSpec{ + BlockchainTimeout: models.Interval(1 * time.Minute), + ContractID: contractAddress.String(), + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: models.Interval(1 * time.Minute), + CreatedAt: f.Timestamp(), + OCRKeyBundleID: null.StringFrom(keyBundleID.String()), + MonitoringEndpoint: null.StringFrom("https://monitor.endpoint"), + P2PV2Bootstrappers: pq.StringArray{"12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"}, + Relay: relay.EVM, + RelayConfig: relayConfig, + TransmitterID: null.StringFrom(transmitterAddress.String()), + PluginType: types.Median, + PluginConfig: pluginConfig, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on OCR2Spec { + blockchainTimeout + contractID + contractConfigConfirmations + contractConfigTrackerPollInterval + createdAt + ocrKeyBundleID + monitoringEndpoint + p2pv2Bootstrappers + relay + relayConfig + transmitterID + pluginType + pluginConfig + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "OCR2Spec", + "blockchainTimeout": "1m0s", + "contractID": "0x613a38AC1659769640aaE063C651F48E0250454C", + "contractConfigConfirmations": 1, + "contractConfigTrackerPollInterval": "1m0s", + "createdAt": "2021-01-01T00:00:00Z", + "ocrKeyBundleID": "f5bf259689b26f1374efb3c9a9868796953a0f814bb2d39b968d0e61b58620a5", + "monitoringEndpoint": "https://monitor.endpoint", + "p2pv2Bootstrappers": ["12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw@localhost:5001"], + "relay": "evm", + "relayConfig": { + "chainID": 1337 + }, + "transmitterID": "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42", + "pluginType": "median", + "pluginConfig": { + "juelsPerFeeCoinSource": 100000000 + } + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_VRFSpec(t *testing.T) { + var ( + id = int32(1) + ) + coordinatorAddress, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + batchCoordinatorAddress, err := ethkey.NewEIP55Address("0x0ad9FE7a58216242a8475ca92F222b0640E26B63") + require.NoError(t, err) + + fromAddress1, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + require.NoError(t, err) + + fromAddress2, err := ethkey.NewEIP55Address("0x2301958F1BFbC9A068C2aC9c6166Bf483b95864C") + require.NoError(t, err) + + pubKey, err := secp256k1.NewPublicKeyFromHex("0x9dc09a0f898f3b5e8047204e7ce7e44b587920932f08431e29c9bf6923b8450a01") + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "vrf spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.VRF, + VRFSpec: &job.VRFSpec{ + BatchCoordinatorAddress: &batchCoordinatorAddress, + BatchFulfillmentEnabled: true, + CustomRevertsPipelineEnabled: true, + MinIncomingConfirmations: 1, + CoordinatorAddress: coordinatorAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + FromAddresses: []ethkey.EIP55Address{fromAddress1, fromAddress2}, + PollPeriod: 1 * time.Minute, + PublicKey: pubKey, + RequestedConfsDelay: 10, + RequestTimeout: 24 * time.Hour, + ChunkSize: 25, + BatchFulfillmentGasMultiplier: 1, + BackoffInitialDelay: time.Minute, + BackoffMaxDelay: time.Hour, + GasLanePrice: assets.GWei(200), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on VRFSpec { + coordinatorAddress + createdAt + evmChainID + fromAddresses + minIncomingConfirmations + pollPeriod + publicKey + requestedConfsDelay + requestTimeout + batchCoordinatorAddress + batchFulfillmentEnabled + batchFulfillmentGasMultiplier + customRevertsPipelineEnabled + chunkSize + backoffInitialDelay + backoffMaxDelay + gasLanePrice + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "VRFSpec", + "coordinatorAddress": "0x613a38AC1659769640aaE063C651F48E0250454C", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "fromAddresses": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42", "0x2301958F1BFbC9A068C2aC9c6166Bf483b95864C"], + "minIncomingConfirmations": 1, + "pollPeriod": "1m0s", + "publicKey": "0x9dc09a0f898f3b5e8047204e7ce7e44b587920932f08431e29c9bf6923b8450a01", + "requestedConfsDelay": 10, + "requestTimeout": "24h0m0s", + "batchCoordinatorAddress": "0x0ad9FE7a58216242a8475ca92F222b0640E26B63", + "batchFulfillmentEnabled": true, + "batchFulfillmentGasMultiplier": 1, + "customRevertsPipelineEnabled": true, + "chunkSize": 25, + "backoffInitialDelay": "1m0s", + "backoffMaxDelay": "1h0m0s", + "gasLanePrice": "200 gwei" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_WebhookSpec(t *testing.T) { + var ( + id = int32(1) + ) + + testCases := []GQLTestCase{ + { + name: "webhook spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.Webhook, + WebhookSpec: &job.WebhookSpec{ + CreatedAt: f.Timestamp(), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on WebhookSpec { + createdAt + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "WebhookSpec", + "createdAt": "2021-01-01T00:00:00Z" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_BlockhashStoreSpec(t *testing.T) { + var ( + id = int32(1) + ) + coordinatorV1Address, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + coordinatorV2Address, err := ethkey.NewEIP55Address("0x2fcA960AF066cAc46085588a66dA2D614c7Cd337") + require.NoError(t, err) + + coordinatorV2PlusAddress, err := ethkey.NewEIP55Address("0x92B5e28Ac583812874e4271380c7d070C5FB6E6b") + require.NoError(t, err) + + fromAddress1, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + require.NoError(t, err) + + fromAddress2, err := ethkey.NewEIP55Address("0xD479d7c994D298cA05bF270136ED9627b7E684D3") + require.NoError(t, err) + + blockhashStoreAddress, err := ethkey.NewEIP55Address("0xb26A6829D454336818477B946f03Fb21c9706f3A") + require.NoError(t, err) + + trustedBlockhashStoreAddress, err := ethkey.NewEIP55Address("0x0ad9FE7a58216242a8475ca92F222b0640E26B63") + require.NoError(t, err) + trustedBlockhashStoreBatchSize := int32(20) + + testCases := []GQLTestCase{ + { + name: "blockhash store spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.BlockhashStore, + BlockhashStoreSpec: &job.BlockhashStoreSpec{ + CoordinatorV1Address: &coordinatorV1Address, + CoordinatorV2Address: &coordinatorV2Address, + CoordinatorV2PlusAddress: &coordinatorV2PlusAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + FromAddresses: []ethkey.EIP55Address{fromAddress1, fromAddress2}, + PollPeriod: 1 * time.Minute, + RunTimeout: 37 * time.Second, + WaitBlocks: 100, + LookbackBlocks: 200, + HeartbeatPeriod: 450 * time.Second, + BlockhashStoreAddress: blockhashStoreAddress, + TrustedBlockhashStoreAddress: &trustedBlockhashStoreAddress, + TrustedBlockhashStoreBatchSize: trustedBlockhashStoreBatchSize, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on BlockhashStoreSpec { + coordinatorV1Address + coordinatorV2Address + coordinatorV2PlusAddress + createdAt + evmChainID + fromAddresses + pollPeriod + runTimeout + waitBlocks + lookbackBlocks + blockhashStoreAddress + trustedBlockhashStoreAddress + trustedBlockhashStoreBatchSize + heartbeatPeriod + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "BlockhashStoreSpec", + "coordinatorV1Address": "0x613a38AC1659769640aaE063C651F48E0250454C", + "coordinatorV2Address": "0x2fcA960AF066cAc46085588a66dA2D614c7Cd337", + "coordinatorV2PlusAddress": "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "fromAddresses": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42", "0xD479d7c994D298cA05bF270136ED9627b7E684D3"], + "pollPeriod": "1m0s", + "runTimeout": "37s", + "waitBlocks": 100, + "lookbackBlocks": 200, + "blockhashStoreAddress": "0xb26A6829D454336818477B946f03Fb21c9706f3A", + "trustedBlockhashStoreAddress": "0x0ad9FE7a58216242a8475ca92F222b0640E26B63", + "trustedBlockhashStoreBatchSize": 20, + "heartbeatPeriod": "7m30s" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_BlockHeaderFeederSpec(t *testing.T) { + var ( + id = int32(1) + ) + coordinatorV1Address, err := ethkey.NewEIP55Address("0x613a38AC1659769640aaE063C651F48E0250454C") + require.NoError(t, err) + + coordinatorV2Address, err := ethkey.NewEIP55Address("0x2fcA960AF066cAc46085588a66dA2D614c7Cd337") + require.NoError(t, err) + + coordinatorV2PlusAddress, err := ethkey.NewEIP55Address("0x92B5e28Ac583812874e4271380c7d070C5FB6E6b") + require.NoError(t, err) + + fromAddress, err := ethkey.NewEIP55Address("0x3cCad4715152693fE3BC4460591e3D3Fbd071b42") + require.NoError(t, err) + + blockhashStoreAddress, err := ethkey.NewEIP55Address("0xb26A6829D454336818477B946f03Fb21c9706f3A") + require.NoError(t, err) + + batchBHSAddress, err := ethkey.NewEIP55Address("0xd23BAE30019853Caf1D08b4C03291b10AD7743Df") + require.NoError(t, err) + + testCases := []GQLTestCase{ + { + name: "block header feeder spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.BlockHeaderFeeder, + BlockHeaderFeederSpec: &job.BlockHeaderFeederSpec{ + CoordinatorV1Address: &coordinatorV1Address, + CoordinatorV2Address: &coordinatorV2Address, + CoordinatorV2PlusAddress: &coordinatorV2PlusAddress, + CreatedAt: f.Timestamp(), + EVMChainID: ubig.NewI(42), + FromAddresses: []ethkey.EIP55Address{fromAddress}, + PollPeriod: 1 * time.Minute, + RunTimeout: 37 * time.Second, + WaitBlocks: 100, + LookbackBlocks: 200, + BlockhashStoreAddress: blockhashStoreAddress, + BatchBlockhashStoreAddress: batchBHSAddress, + GetBlockhashesBatchSize: 5, + StoreBlockhashesBatchSize: 3, + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on BlockHeaderFeederSpec { + coordinatorV1Address + coordinatorV2Address + coordinatorV2PlusAddress + createdAt + evmChainID + fromAddresses + pollPeriod + runTimeout + waitBlocks + lookbackBlocks + blockhashStoreAddress + batchBlockhashStoreAddress + getBlockhashesBatchSize + storeBlockhashesBatchSize + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "BlockHeaderFeederSpec", + "coordinatorV1Address": "0x613a38AC1659769640aaE063C651F48E0250454C", + "coordinatorV2Address": "0x2fcA960AF066cAc46085588a66dA2D614c7Cd337", + "coordinatorV2PlusAddress": "0x92B5e28Ac583812874e4271380c7d070C5FB6E6b", + "createdAt": "2021-01-01T00:00:00Z", + "evmChainID": "42", + "fromAddresses": ["0x3cCad4715152693fE3BC4460591e3D3Fbd071b42"], + "pollPeriod": "1m0s", + "runTimeout": "37s", + "waitBlocks": 100, + "lookbackBlocks": 200, + "blockhashStoreAddress": "0xb26A6829D454336818477B946f03Fb21c9706f3A", + "batchBlockhashStoreAddress": "0xd23BAE30019853Caf1D08b4C03291b10AD7743Df", + "getBlockhashesBatchSize": 5, + "storeBlockhashesBatchSize": 3 + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_BootstrapSpec(t *testing.T) { + var ( + id = int32(1) + ) + + testCases := []GQLTestCase{ + { + name: "Bootstrap spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.Bootstrap, + BootstrapSpec: &job.BootstrapSpec{ + ID: id, + ContractID: "0x613a38AC1659769640aaE063C651F48E0250454C", + Relay: "evm", + RelayConfig: map[string]interface{}{}, + MonitoringEndpoint: null.String{}, + BlockchainTimeout: models.Interval(2 * time.Minute), + ContractConfigTrackerPollInterval: models.Interval(2 * time.Minute), + ContractConfigConfirmations: 100, + CreatedAt: f.Timestamp(), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on BootstrapSpec { + id + contractID + relay + relayConfig + monitoringEndpoint + blockchainTimeout + contractConfigTrackerPollInterval + contractConfigConfirmations + createdAt + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "BootstrapSpec", + "id": "1", + "contractID": "0x613a38AC1659769640aaE063C651F48E0250454C", + "relay": "evm", + "relayConfig": {}, + "monitoringEndpoint": null, + "blockchainTimeout": "2m0s", + "contractConfigTrackerPollInterval": "2m0s", + "contractConfigConfirmations": 100, + "createdAt": "2021-01-01T00:00:00Z" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_GatewaySpec(t *testing.T) { + var ( + id = int32(1) + ) + + gatewayConfig := map[string]interface{}{ + "NodeServerConfig": map[string]interface{}{}, + } + + testCases := []GQLTestCase{ + { + name: "Gateway spec", + authenticated: true, + before: func(f *gqlTestFramework) { + f.App.On("JobORM").Return(f.Mocks.jobORM) + f.Mocks.jobORM.On("FindJobWithoutSpecErrors", id).Return(job.Job{ + Type: job.Gateway, + GatewaySpec: &job.GatewaySpec{ + ID: id, + GatewayConfig: gatewayConfig, + CreatedAt: f.Timestamp(), + }, + }, nil) + }, + query: ` + query GetJob { + job(id: "1") { + ... on Job { + spec { + __typename + ... on GatewaySpec { + id + gatewayConfig + createdAt + } + } + } + } + } + `, + result: ` + { + "job": { + "spec": { + "__typename": "GatewaySpec", + "id": "1", + "gatewayConfig": {"NodeServerConfig": {}}, + "createdAt": "2021-01-01T00:00:00Z" + } + } + } + `, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/task_run.go b/core/web/resolver/task_run.go new file mode 100644 index 00000000..e713b2bc --- /dev/null +++ b/core/web/resolver/task_run.go @@ -0,0 +1,61 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/services/pipeline" +) + +type TaskRunResolver struct { + tr pipeline.TaskRun +} + +func NewTaskRun(tr pipeline.TaskRun) *TaskRunResolver { + return &TaskRunResolver{tr: tr} +} + +func NewTaskRuns(runs []pipeline.TaskRun) []*TaskRunResolver { + var resolvers []*TaskRunResolver + + for _, run := range runs { + resolvers = append(resolvers, NewTaskRun(run)) + } + + return resolvers +} + +func (r *TaskRunResolver) ID() graphql.ID { + return graphql.ID(r.tr.ID.String()) +} + +func (r *TaskRunResolver) Type() string { + return string(r.tr.Type) +} + +func (r *TaskRunResolver) Output() string { + val, err := r.tr.Output.MarshalJSON() + if err != nil { + return "error: unable to retrieve output" + } + return string(val) +} + +func (r *TaskRunResolver) Error() *string { + if r.tr.Error.Valid { + return r.tr.Error.Ptr() + } + + return nil +} + +func (r *TaskRunResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.tr.CreatedAt} +} + +func (r *TaskRunResolver) FinishedAt() *graphql.Time { + return &graphql.Time{Time: r.tr.FinishedAt.ValueOrZero()} +} + +func (r *TaskRunResolver) DotID() string { + return r.tr.GetDotID() +} diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml new file mode 100644 index 00000000..2cac38a1 --- /dev/null +++ b/core/web/resolver/testdata/config-empty-effective.toml @@ -0,0 +1,230 @@ +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'info' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml new file mode 100644 index 00000000..e1be48d2 --- /dev/null +++ b/core/web/resolver/testdata/config-full.toml @@ -0,0 +1,422 @@ +InsecureFastScrypt = true +RootDir = 'test/root/dir' +ShutdownGracePeriod = '10s' + +[Feature] +FeedsManager = true +LogPoller = true +UICSAKeys = true + +[Database] +DefaultIdleInTxSessionTimeout = '1m0s' +DefaultLockTimeout = '1h0m0s' +DefaultQueryTimeout = '1s' +LogQueries = true +MaxIdleConns = 7 +MaxOpenConns = 13 +MigrateOnStartup = true + +[Database.Backup] +Dir = 'test/backup/dir' +Frequency = '1h0m0s' +Mode = 'full' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '1m0s' +MinReconnectInterval = '5m0s' +FallbackPollInterval = '2m0s' + +[Database.Lock] +Enabled = false +LeaseDuration = '1m0s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = true +BufferSize = 1234 +MaxBatchSize = 4321 +SendInterval = '1m0s' +SendTimeout = '5s' +UseBatchSend = true + +[[TelemetryIngress.Endpoints]] +Network = 'EVM' +ChainID = '1' +URL = 'endpoint-1.test' +ServerPubKey = 'test-pub-key-1' + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'crit' +JSONConsole = true +UnixTS = true + +[Log.File] +Dir = 'log/file/dir' +MaxSize = '100.00gb' +MaxAgeDays = 17 +MaxBackups = 9 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = '*' +BridgeResponseURL = 'https://bridge.response' +BridgeCacheTTL = '10s' +HTTPWriteTimeout = '1m0s' +HTTPPort = 56 +SecureCookies = true +SessionTimeout = '1h0m0s' +SessionReaperExpiration = '168h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '192.158.1.37' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = 'test-rpid' +RPOrigin = 'test-rp-origin' + +[WebServer.RateLimit] +Authenticated = 42 +AuthenticatedPeriod = '1s' +Unauthenticated = 7 +UnauthenticatedPeriod = '1m0s' + +[WebServer.TLS] +CertPath = 'tls/cert/path' +ForceRedirect = true +Host = 'tls-host' +HTTPSPort = 6789 +KeyPath = 'tls/key/path' +ListenIP = '192.158.1.37' + +[JobPipeline] +ExternalInitiatorsEnabled = true +MaxRunDuration = '1h0m0s' +MaxSuccessfulRuns = 123456 +ReaperInterval = '4h0m0s' +ReaperThreshold = '168h0m0s' +ResultWriteQueueDepth = 10 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '1m0s' +MaxSize = '100.00mb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 100 +SimulateTransactions = true + +[OCR2] +Enabled = true +ContractConfirmations = 11 +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '8s' +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = true +ObservationTimeout = '11s' +BlockchainTimeout = '3s' +ContractPollInterval = '1h0m0s' +ContractSubscribeInterval = '1m0s' +DefaultTransactionQueueDepth = 12 +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' +SimulateTransactions = true +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 13 +OutgoingMessageBufferSize = 17 +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' +TraceLogging = true + +[P2P.V2] +Enabled = false +AnnounceAddresses = ['a', 'b', 'c'] +DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99'] +DeltaDial = '1m0s' +DeltaReconcile = '1s' +ListenAddresses = ['foo', 'bar'] + +[Keeper] +DefaultTransactionQueueDepth = 17 +GasPriceBufferPercent = 12 +GasTipCapBufferPercent = 43 +BaseFeeBufferPercent = 89 +MaxGracePeriod = 31 +TurnLookBack = 91 + +[Keeper.Registry] +CheckGasOverhead = 90 +PerformGasOverhead = 4294967295 +MaxPerformDataSize = 5000 +SyncInterval = '1h0m0s' +SyncUpkeepQueueSize = 31 + +[AutoPprof] +Enabled = true +ProfileRoot = 'prof/root' +PollInterval = '1m0s' +GatherDuration = '12s' +GatherTraceDuration = '13s' +MaxProfileSize = '1.00gb' +CPUProfileRate = 7 +MemProfileRate = 9 +BlockProfileRate = 5 +MutexProfileFraction = 2 +MemThreshold = '1.00gb' +GoroutineThreshold = 999 + +[Pyroscope] +ServerAddress = 'http://localhost:4040' +Environment = 'tests' + +[Sentry] +Debug = true +DSN = 'sentry-dsn' +Environment = 'dev' +Release = 'v1.2.3' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = 'localhost:4317' +NodeID = 'NodeID' +SamplingRatio = 1.0 +Mode = 'tls' +TLSCertPath = '/path/to/cert.pem' + +[Tracing.Attributes] +env = 'dev' +test = 'load' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1m40s' +MaxStaleAge = '1m41s' +LatestReportDeadline = '1m42s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +Enabled = false +AutoCreateKey = false +BlockBackfillDepth = 100 +BlockBackfillSkip = true +ChainType = 'Optimism' +FinalityDepth = 42 +FinalityTagEnabled = false +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' +LogBackfillBatchSize = 17 +LogPollInterval = '1m0s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 13 +MinContractPayment = '9.223372036854775807 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' +RPCDefaultBatchSize = 17 +RPCBlockQueryDelay = 10 + +[EVM.Transactions] +ForwardersEnabled = true +MaxInFlight = 19 +MaxQueued = 99 +ReaperInterval = '1m0s' +ReaperThreshold = '1m0s' +ResendAfterThreshold = '1h0m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '9.223372036854775807 ether' +PriceMax = '281.474976710655 micro' +PriceMin = '13 wei' +LimitDefault = 12 +LimitMax = 17 +LimitMultiplier = '1.234' +LimitTransfer = 100 +BumpMin = '100 wei' +BumpPercent = 10 +BumpThreshold = 6 +BumpTxDepth = 6 +EIP1559DynamicFees = true +FeeCapDefault = '9.223372036854775807 ether' +TipCapDefault = '2 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.LimitJobType] +OCR = 1001 +DR = 1002 +VRF = 1003 +FM = 1004 +Keeper = 1005 + +[EVM.GasEstimator.BlockHistory] +BatchSize = 17 +BlockHistorySize = 12 +CheckInclusionBlocks = 18 +CheckInclusionPercentile = 19 +EIP1559FeeCapBufferBlocks = 13 +TransactionPercentile = 15 + +[EVM.HeadTracker] +HistoryDepth = 15 +MaxBufferSize = 17 +SamplingInterval = '1h0m0s' + +[[EVM.KeySpecific]] +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' + +[EVM.KeySpecific.GasEstimator] +PriceMax = '79.228162514264337593543950335 gether' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '1m0s' +SelectionMode = 'HighestHead' +SyncThreshold = 13 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 11 +ContractTransmitterTransmitTimeout = '1m0s' +DatabaseTimeout = '1s' +DeltaCOverride = '1h0m0s' +DeltaCJitterOverride = '1s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 540 + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' +HTTPURL = 'https://foo.web' + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' +HTTPURL = 'https://bar.com' + +[[EVM.Nodes]] +Name = 'broadcast' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[Cosmos]] +ChainID = 'Malaga-420' +Enabled = true +Bech32Prefix = 'wasm' +BlockRate = '1m0s' +BlocksUntilTxTimeout = 12 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.001' +GasToken = 'ucosm' +GasLimitMultiplier = '1.2' +MaxMsgsPerBatch = 17 +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxMsgTimeout = '1s' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[Cosmos.Nodes]] +Name = 'foo' +TendermintURL = 'http://foo.url' + +[[Cosmos.Nodes]] +Name = 'bar' +TendermintURL = 'http://bar.web' + +[[Solana]] +ChainID = 'mainnet' +Enabled = false +BalancePollPeriod = '1m0s' +ConfirmPollPeriod = '1s' +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1h0m0s' +TxTimeout = '1h0m0s' +TxRetryTimeout = '1m0s' +TxConfirmTimeout = '1s' +SkipPreflight = true +Commitment = 'banana' +MaxRetries = 7 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Solana.Nodes]] +Name = 'foo' +URL = 'http://solana.foo' + +[[Solana.Nodes]] +Name = 'bar' +URL = 'http://solana.bar' + +[[Starknet]] +ChainID = 'foobar' +Enabled = true +OCR2CachePollPeriod = '6h0m0s' +OCR2CacheTTL = '3m0s' +RequestTimeout = '1m3s' +TxTimeout = '13s' +ConfirmationPoll = '42s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml new file mode 100644 index 00000000..8c83b3d8 --- /dev/null +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -0,0 +1,572 @@ +InsecureFastScrypt = false +RootDir = 'my/root/dir' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '2m0s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'panic' +JSONConsole = true +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '30s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = true +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '20s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = true +ObservationTimeout = '5s' +BlockchainTimeout = '5s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 999 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 10 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 7 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 26 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'primary' +WSURL = 'wss://web.socket/mainnet' + +[[EVM.Nodes]] +Name = 'secondary' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[EVM]] +ChainID = '42' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xa36085F69e2889c224210F603D836748e7dC0088' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '9.223372036854775807 ether' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' + +[[EVM]] +ChainID = '137' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 500 +FinalityTagEnabled = false +LinkContractAddress = '0xb0897686c545045aFc77CF20eC7A532E3120E0F1' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 5 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 10 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 5000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'FixedPrice' +PriceDefault = '30 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '30 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '20 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[EVM.HeadTracker] +HistoryDepth = 2000 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' + +[[Cosmos]] +ChainID = 'Ibiza-808' +Bech32Prefix = 'wasm' +BlockRate = '6s' +BlocksUntilTxTimeout = 30 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.015' +GasToken = 'ucosm' +GasLimitMultiplier = '1.5' +MaxMsgsPerBatch = 13 +OCR2CachePollPeriod = '4s' +OCR2CacheTTL = '1m0s' +TxMsgTimeout = '10m0s' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://columbus.cosmos.com' + +[[Cosmos]] +ChainID = 'Malaga-420' +Bech32Prefix = 'wasm' +BlockRate = '6s' +BlocksUntilTxTimeout = 20 +ConfirmPollPeriod = '1s' +FallbackGasPrice = '0.015' +GasToken = 'ucosm' +GasLimitMultiplier = '1.5' +MaxMsgsPerBatch = 100 +OCR2CachePollPeriod = '4s' +OCR2CacheTTL = '1m0s' +TxMsgTimeout = '10m0s' + +[[Cosmos.Nodes]] +Name = 'secondary' +TendermintURL = 'http://bombay.cosmos.com' + +[[Solana]] +ChainID = 'mainnet' +BalancePollPeriod = '5s' +ConfirmPollPeriod = '500ms' +OCR2CachePollPeriod = '1s' +OCR2CacheTTL = '1m0s' +TxTimeout = '1m0s' +TxRetryTimeout = '10s' +TxConfirmTimeout = '30s' +SkipPreflight = true +Commitment = 'confirmed' +MaxRetries = 12 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://mainnet.solana.com' + +[[Solana]] +ChainID = 'testnet' +BalancePollPeriod = '5s' +ConfirmPollPeriod = '500ms' +OCR2CachePollPeriod = '1m0s' +OCR2CacheTTL = '1m0s' +TxTimeout = '1m0s' +TxRetryTimeout = '10s' +TxConfirmTimeout = '30s' +SkipPreflight = true +Commitment = 'confirmed' +MaxRetries = 0 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' + +[[Solana.Nodes]] +Name = 'secondary' +URL = 'http://testnet.solana.com' + +[[Starknet]] +ChainID = 'foobar' +OCR2CachePollPeriod = '5s' +OCR2CacheTTL = '1m0s' +RequestTimeout = '10s' +TxTimeout = '10s' +ConfirmationPoll = '1h0m0s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/web/resolver/testdata/config-multi-chain.toml b/core/web/resolver/testdata/config-multi-chain.toml new file mode 100644 index 00000000..543fb315 --- /dev/null +++ b/core/web/resolver/testdata/config-multi-chain.toml @@ -0,0 +1,114 @@ +RootDir = 'my/root/dir' + +[Database] +[Database.Listener] +FallbackPollInterval = '2m0s' + +[AuditLogger] +Enabled = true +ForwardToUrl = 'http://localhost:9898' +JsonWrapperKey = 'event' +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] + +[Log] +Level = 'panic' +JSONConsole = true + +[JobPipeline] +[JobPipeline.HTTPRequest] +DefaultTimeout = '30s' + +[OCR2] +Enabled = true +DatabaseTimeout = '20s' + +[OCR] +Enabled = true +BlockchainTimeout = '5s' + +[P2P] +IncomingMessageBufferSize = 999 + +[Keeper] +GasPriceBufferPercent = 10 + +[AutoPprof] +CPUProfileRate = 7 + +[[EVM]] +ChainID = '1' +FinalityDepth = 26 +FinalityTagEnabled = false + +[[EVM.Nodes]] +Name = 'primary' +WSURL = 'wss://web.socket/mainnet' + +[[EVM.Nodes]] +Name = 'secondary' +HTTPURL = 'http://broadcast.mirror' +SendOnly = true + +[[EVM]] +ChainID = '42' + +[EVM.GasEstimator] +PriceDefault = '9.223372036854775807 ether' + +[[EVM.Nodes]] +Name = 'foo' +WSURL = 'wss://web.socket/test/foo' + +[[EVM]] +ChainID = '137' + +[EVM.GasEstimator] +Mode = 'FixedPrice' + +[[EVM.Nodes]] +Name = 'bar' +WSURL = 'wss://web.socket/test/bar' + +[[Cosmos]] +ChainID = 'Ibiza-808' +Bech32Prefix = 'wasm' +GasToken = 'ucosm' +MaxMsgsPerBatch = 13 + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://columbus.cosmos.com' + +[[Cosmos]] +ChainID = 'Malaga-420' +Bech32Prefix = 'wasm' +BlocksUntilTxTimeout = 20 +GasToken = 'ucosm' + +[[Cosmos.Nodes]] +Name = 'secondary' +TendermintURL = 'http://bombay.cosmos.com' + +[[Solana]] +ChainID = 'mainnet' +MaxRetries = 12 + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://mainnet.solana.com' + +[[Solana]] +ChainID = 'testnet' +OCR2CachePollPeriod = '1m0s' + +[[Solana.Nodes]] +Name = 'secondary' +URL = 'http://testnet.solana.com' + +[[Starknet]] +ChainID = 'foobar' +ConfirmationPoll = '1h0m0s' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' diff --git a/core/web/resolver/user.go b/core/web/resolver/user.go new file mode 100644 index 00000000..de8c5175 --- /dev/null +++ b/core/web/resolver/user.go @@ -0,0 +1,90 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + + "github.com/goplugin/pluginv3.0/v2/core/sessions" +) + +type clearSessionsError struct{} + +func (e clearSessionsError) Error() string { + return "failed to clear non current user sessions" +} + +type failedPasswordUpdateError struct{} + +func (e failedPasswordUpdateError) Error() string { + return "failed to update current user password" +} + +// UserResolver resolves the User type +type UserResolver struct { + user *sessions.User +} + +func NewUser(user *sessions.User) *UserResolver { + return &UserResolver{user: user} +} + +// Email resolves the user's email +func (r *UserResolver) Email() string { + return r.user.Email +} + +// CreatedAt resolves the user's creation date +func (r *UserResolver) CreatedAt() graphql.Time { + return graphql.Time{Time: r.user.CreatedAt} +} + +// -- UpdatePassword Mutation -- + +type UpdatePasswordInput struct { + OldPassword string + NewPassword string +} + +// UpdatePasswordPayloadResolver resolves the payload type +type UpdatePasswordPayloadResolver struct { + user *sessions.User + // inputErrors maps an input path to a string + inputErrs map[string]string +} + +func NewUpdatePasswordPayload(user *sessions.User, inputErrs map[string]string) *UpdatePasswordPayloadResolver { + return &UpdatePasswordPayloadResolver{user: user, inputErrs: inputErrs} +} + +func (r *UpdatePasswordPayloadResolver) ToUpdatePasswordSuccess() (*UpdatePasswordSuccessResolver, bool) { + if r.user == nil { + return nil, false + } + + return NewUpdatePasswordSuccess(r.user), true +} + +func (r *UpdatePasswordPayloadResolver) ToInputErrors() (*InputErrorsResolver, bool) { + if r.inputErrs != nil { + var errs []*InputErrorResolver + + for path, message := range r.inputErrs { + errs = append(errs, NewInputError(path, message)) + } + + return NewInputErrors(errs), true + } + + return nil, false +} + +type UpdatePasswordSuccessResolver struct { + user *sessions.User +} + +func NewUpdatePasswordSuccess(user *sessions.User) *UpdatePasswordSuccessResolver { + return &UpdatePasswordSuccessResolver{user: user} +} + +func (r *UpdatePasswordSuccessResolver) User() *UserResolver { + return NewUser(r.user) +} diff --git a/core/web/resolver/user_test.go b/core/web/resolver/user_test.go new file mode 100644 index 00000000..15a6f6ad --- /dev/null +++ b/core/web/resolver/user_test.go @@ -0,0 +1,162 @@ +package resolver + +import ( + "testing" + + gqlerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/utils" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +func TestResolver_UpdateUserPassword(t *testing.T) { + t.Parallel() + + mutation := ` + mutation UpdateUserPassword($input: UpdatePasswordInput!) { + updateUserPassword(input: $input) { + ... on UpdatePasswordSuccess { + user { + email + } + } + ... on InputErrors { + errors { + path + message + code + } + } + } + }` + oldPassword := "old" + variables := map[string]interface{}{ + "input": map[string]interface{}{ + "newPassword": "new", + "oldPassword": oldPassword, + }, + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "updateUserPassword"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := auth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(oldPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("SetPassword", session.User, "new").Return(nil) + f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return(nil) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: ` + { + "updateUserPassword": { + "user": { + "email": "gqltester@chain.link" + } + } + }`, + }, + { + name: "update password match error", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := auth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + session.User.HashedPassword = "random-string" + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: ` + { + "updateUserPassword": { + "errors": [{ + "path": "oldPassword", + "message": "old password does not match", + "code": "INVALID_INPUT" + }] + } + }`, + }, + { + name: "failed to clear session error", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := auth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(oldPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return( + clearSessionsError{}, + ) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: clearSessionsError{}, + Path: []interface{}{"updateUserPassword"}, + Message: "failed to clear non current user sessions", + }, + }, + }, + { + name: "failed to update current user password error", + authenticated: true, + before: func(f *gqlTestFramework) { + session, ok := auth.GetGQLAuthenticatedSession(f.Ctx) + require.True(t, ok) + require.NotNil(t, session) + + pwd, err := utils.HashPassword(oldPassword) + require.NoError(t, err) + + session.User.HashedPassword = pwd + + f.Mocks.authProvider.On("FindUser", session.User.Email).Return(*session.User, nil) + f.Mocks.authProvider.On("ClearNonCurrentSessions", session.SessionID).Return(nil) + f.Mocks.authProvider.On("SetPassword", session.User, "new").Return(failedPasswordUpdateError{}) + f.App.On("AuthenticationProvider").Return(f.Mocks.authProvider) + }, + query: mutation, + variables: variables, + result: `null`, + errors: []*gqlerrors.QueryError{ + { + Extensions: nil, + ResolverError: failedPasswordUpdateError{}, + Path: []interface{}{"updateUserPassword"}, + Message: "failed to update current user password", + }, + }, + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/resolver/vrf.go b/core/web/resolver/vrf.go new file mode 100644 index 00000000..8b99da20 --- /dev/null +++ b/core/web/resolver/vrf.go @@ -0,0 +1,156 @@ +package resolver + +import ( + "github.com/graph-gophers/graphql-go" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +type VRFKeyResolver struct { + key vrfkey.KeyV2 +} + +func NewVRFKeyResolver(key vrfkey.KeyV2) VRFKeyResolver { + return VRFKeyResolver{key: key} +} + +// ID returns the ID of the VRF key, which is the public key. +func (k VRFKeyResolver) ID() graphql.ID { + return graphql.ID(k.key.ID()) +} + +// Compressed returns the compressed version of the public key. +func (k VRFKeyResolver) Compressed() string { + return k.key.PublicKey.String() +} + +func (k VRFKeyResolver) Uncompressed() string { + // It's highly unlikely that this call will return an error. + // If it does, we'd likely have issues all throughout the application. + // However, it's still good practice to handle the error that is returned + // rather than completely ignoring it. + uncompressed, err := k.key.PublicKey.StringUncompressed() + if err != nil { + uncompressed = "error: unable to uncompress public key" + } + return uncompressed +} + +// Hash returns the hash of the VRF public key. +func (k VRFKeyResolver) Hash() string { + var hashStr string + + // It's highly unlikely that this call will return an error. + // If it does, we'd likely have issues all throughout the application. + // However, it's still good practice to handle the error that is returned + // rather than completely ignoring it. + hash, err := k.key.PublicKey.Hash() + if err != nil { + hashStr = "error: unable to get public key hash" + } else { + hashStr = hash.String() + } + return hashStr +} + +type VRFKeySuccessResolver struct { + key vrfkey.KeyV2 +} + +func NewVRFKeySuccessResolver(key vrfkey.KeyV2) *VRFKeySuccessResolver { + return &VRFKeySuccessResolver{key: key} +} + +func (r *VRFKeySuccessResolver) Key() VRFKeyResolver { + return NewVRFKeyResolver(r.key) +} + +type VRFKeyPayloadResolver struct { + key vrfkey.KeyV2 + NotFoundErrorUnionType +} + +func NewVRFKeyPayloadResolver(key vrfkey.KeyV2, err error) *VRFKeyPayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.Is(errors.Cause(err), keystore.ErrMissingVRFKey) + }} + } + + return &VRFKeyPayloadResolver{key: key, NotFoundErrorUnionType: e} +} + +func (r *VRFKeyPayloadResolver) ToVRFKeySuccess() (*VRFKeySuccessResolver, bool) { + if r.err == nil { + return NewVRFKeySuccessResolver(r.key), true + } + return nil, false +} + +type VRFKeysPayloadResolver struct { + keys []vrfkey.KeyV2 +} + +func NewVRFKeysPayloadResolver(keys []vrfkey.KeyV2) *VRFKeysPayloadResolver { + return &VRFKeysPayloadResolver{keys: keys} +} + +func (r *VRFKeysPayloadResolver) Results() []VRFKeyResolver { + var results []VRFKeyResolver + for _, k := range r.keys { + results = append(results, NewVRFKeyResolver(k)) + } + return results +} + +type CreateVRFKeyPayloadResolver struct { + key vrfkey.KeyV2 +} + +func NewCreateVRFKeyPayloadResolver(key vrfkey.KeyV2) *CreateVRFKeyPayloadResolver { + return &CreateVRFKeyPayloadResolver{key: key} +} + +func (r *CreateVRFKeyPayloadResolver) Key() VRFKeyResolver { + return NewVRFKeyResolver(r.key) +} + +type DeleteVRFKeySuccessResolver struct { + key vrfkey.KeyV2 +} + +func NewDeleteVRFKeySuccessResolver(key vrfkey.KeyV2) *DeleteVRFKeySuccessResolver { + return &DeleteVRFKeySuccessResolver{key: key} +} + +func (r *DeleteVRFKeySuccessResolver) Key() VRFKeyResolver { + return NewVRFKeyResolver(r.key) +} + +type DeleteVRFKeyPayloadResolver struct { + key vrfkey.KeyV2 + NotFoundErrorUnionType +} + +func NewDeleteVRFKeyPayloadResolver(key vrfkey.KeyV2, err error) *DeleteVRFKeyPayloadResolver { + var e NotFoundErrorUnionType + + if err != nil { + e = NotFoundErrorUnionType{err: err, message: err.Error(), isExpectedErrorFn: func(err error) bool { + return errors.Is(errors.Cause(err), keystore.ErrMissingVRFKey) + }} + } + + return &DeleteVRFKeyPayloadResolver{key: key, NotFoundErrorUnionType: e} +} + +func (r *DeleteVRFKeyPayloadResolver) ToDeleteVRFKeySuccess() (*DeleteVRFKeySuccessResolver, bool) { + if r.err == nil { + return NewDeleteVRFKeySuccessResolver(r.key), true + } + return nil, false +} diff --git a/core/web/resolver/vrf_test.go b/core/web/resolver/vrf_test.go new file mode 100644 index 00000000..82fd6e62 --- /dev/null +++ b/core/web/resolver/vrf_test.go @@ -0,0 +1,298 @@ +package resolver + +import ( + "encoding/json" + "fmt" + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/vrfkey" +) + +func TestResolver_GetVRFKey(t *testing.T) { + t.Parallel() + + query := ` + query GetVRFKey($id: ID!) { + vrfKey(id: $id) { + ... on VRFKeySuccess { + key { + id + compressed + uncompressed + hash + } + } + + ... on NotFoundError { + message + code + } + } + } + ` + + fakeKey := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + uncompressed, err := fakeKey.PublicKey.StringUncompressed() + assert.NoError(t, err) + + d, err := json.Marshal(map[string]interface{}{ + "vrfKey": map[string]interface{}{ + "key": map[string]interface{}{ + "id": fakeKey.PublicKey.String(), + "compressed": fakeKey.PublicKey.String(), + "uncompressed": uncompressed, + "hash": fakeKey.PublicKey.MustHash().String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + variables := map[string]interface{}{ + "id": fakeKey.PublicKey.String(), + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query, variables: variables}, "vrfKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf.On("Get", fakeKey.PublicKey.String()).Return(fakeKey, nil) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + variables: variables, + result: expected, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf. + On("Get", fakeKey.PublicKey.String()). + Return(vrfkey.KeyV2{}, errors.Wrapf( + keystore.ErrMissingVRFKey, + "unable to find VRF key with id %s", + fakeKey.PublicKey.String(), + )) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + variables: variables, + result: fmt.Sprintf(`{ + "vrfKey": { + "code": "NOT_FOUND", + "message": "unable to find VRF key with id %s: unable to find VRF key" + } + }`, fakeKey.PublicKey.String()), + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_GetVRFKeys(t *testing.T) { + t.Parallel() + + query := ` + query GetVRFKeys { + vrfKeys { + results { + id + compressed + uncompressed + hash + } + } + } + ` + + fakeKeys := []vrfkey.KeyV2{} + expectedKeys := []map[string]string{} + for i := 0; i < 2; i++ { + fakeKey := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + uncompressed, err := fakeKey.PublicKey.StringUncompressed() + assert.NoError(t, err) + + fakeKeys = append(fakeKeys, fakeKey) + expectedKeys = append(expectedKeys, map[string]string{ + "id": fakeKey.PublicKey.String(), + "compressed": fakeKey.PublicKey.String(), + "uncompressed": uncompressed, + "hash": fakeKey.PublicKey.MustHash().String(), + }) + } + + d, err := json.Marshal(map[string]interface{}{ + "vrfKeys": map[string]interface{}{ + "results": expectedKeys, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: query}, "vrfKeys"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf.On("GetAll").Return(fakeKeys, nil) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: query, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_CreateVRFKey(t *testing.T) { + t.Parallel() + + mutation := ` + mutation CreateVRFKey { + createVRFKey { + key { + id + compressed + uncompressed + hash + } + } + } + ` + + fakeKey := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + uncompressed, err := fakeKey.PublicKey.StringUncompressed() + assert.NoError(t, err) + + d, err := json.Marshal(map[string]interface{}{ + "createVRFKey": map[string]interface{}{ + "key": map[string]interface{}{ + "id": fakeKey.PublicKey.String(), + "compressed": fakeKey.PublicKey.String(), + "uncompressed": uncompressed, + "hash": fakeKey.PublicKey.MustHash().String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation}, "createVRFKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf.On("Create").Return(fakeKey, nil) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + result: expected, + }, + } + + RunGQLTests(t, testCases) +} + +func TestResolver_DeleteVRFKey(t *testing.T) { + t.Parallel() + + mutation := ` + mutation DeleteVRFKey($id: ID!) { + deleteVRFKey(id: $id) { + ... on DeleteVRFKeySuccess { + key { + id + compressed + uncompressed + hash + } + } + + ... on NotFoundError { + message + code + } + } + } + ` + + fakeKey := vrfkey.MustNewV2XXXTestingOnly(big.NewInt(1)) + + uncompressed, err := fakeKey.PublicKey.StringUncompressed() + assert.NoError(t, err) + + d, err := json.Marshal(map[string]interface{}{ + "deleteVRFKey": map[string]interface{}{ + "key": map[string]interface{}{ + "id": fakeKey.PublicKey.String(), + "compressed": fakeKey.PublicKey.String(), + "uncompressed": uncompressed, + "hash": fakeKey.PublicKey.MustHash().String(), + }, + }, + }) + assert.NoError(t, err) + expected := string(d) + + variables := map[string]interface{}{ + "id": fakeKey.PublicKey.String(), + } + + testCases := []GQLTestCase{ + unauthorizedTestCase(GQLTestCase{query: mutation, variables: variables}, "deleteVRFKey"), + { + name: "success", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf.On("Delete", fakeKey.PublicKey.String()).Return(fakeKey, nil) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: expected, + }, + { + name: "not found error", + authenticated: true, + before: func(f *gqlTestFramework) { + f.Mocks.vrf. + On("Delete", fakeKey.PublicKey.String()). + Return(vrfkey.KeyV2{}, errors.Wrapf( + keystore.ErrMissingVRFKey, + "unable to find VRF key with id %s", + fakeKey.PublicKey.String(), + )) + f.Mocks.keystore.On("VRF").Return(f.Mocks.vrf) + f.App.On("GetKeyStore").Return(f.Mocks.keystore) + }, + query: mutation, + variables: variables, + result: fmt.Sprintf(`{ + "deleteVRFKey": { + "message": "unable to find VRF key with id %s: unable to find VRF key", + "code": "NOT_FOUND" + } + }`, fakeKey.PublicKey.String()), + }, + } + + RunGQLTests(t, testCases) +} diff --git a/core/web/router.go b/core/web/router.go new file mode 100644 index 00000000..05ec2177 --- /dev/null +++ b/core/web/router.go @@ -0,0 +1,685 @@ +package web + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/fs" + "math" + "net/http" + "net/http/pprof" + "net/url" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/Depado/ginprom" + helmet "github.com/danielkov/gin-helmet" + "github.com/gin-contrib/cors" + "github.com/gin-contrib/expvar" + "github.com/gin-contrib/sessions" + "github.com/gin-contrib/sessions/cookie" + limits "github.com/gin-contrib/size" + "github.com/gin-gonic/gin" + "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go/relay" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/ulule/limiter/v3" + mgin "github.com/ulule/limiter/v3/drivers/middleware/gin" + "github.com/ulule/limiter/v3/drivers/store/memory" + "github.com/unrolled/secure" + "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" + + "github.com/goplugin/pluginv3.0/v2/core/build" + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" + "github.com/goplugin/pluginv3.0/v2/core/web/loader" + "github.com/goplugin/pluginv3.0/v2/core/web/resolver" + "github.com/goplugin/pluginv3.0/v2/core/web/schema" +) + +// NewRouter returns *gin.Engine router that listens and responds to requests to the node for valid paths. +func NewRouter(app plugin.Application, prometheus *ginprom.Prometheus) (*gin.Engine, error) { + engine := gin.New() + engine.RemoteIPHeaders = nil // don't trust default headers: "X-Forwarded-For", "X-Real-IP" + config := app.GetConfig() + secret, err := app.SecretGenerator().Generate(config.RootDir()) + if err != nil { + return nil, err + } + sessionStore := cookie.NewStore(secret) + sessionStore.Options(config.WebServer().SessionOptions()) + cors := uiCorsHandler(config.WebServer().AllowOrigins()) + if prometheus != nil { + prometheusUse(prometheus, engine, promhttp.HandlerOpts{EnableOpenMetrics: true}) + } + + tls := config.WebServer().TLS() + engine.Use( + otelgin.Middleware("plugin-web-routes"), + limits.RequestSizeLimiter(config.WebServer().HTTPMaxSize()), + loggerFunc(app.GetLogger()), + gin.Recovery(), + cors, + secureMiddleware(tls.ForceRedirect(), tls.Host(), config.Insecure().DevWebServer()), + ) + if prometheus != nil { + engine.Use(prometheus.Instrument()) + } + engine.Use(helmet.Default()) + + rl := config.WebServer().RateLimit() + api := engine.Group( + "/", + rateLimiter( + rl.AuthenticatedPeriod(), + rl.Authenticated(), + ), + sessions.Sessions(auth.SessionName, sessionStore), + ) + + debugRoutes(app, api) + healthRoutes(app, api) + sessionRoutes(app, api) + v2Routes(app, api) + loopRoutes(app, api) + + guiAssetRoutes(engine, config.Insecure().DisableRateLimiting(), app.GetLogger()) + + api.POST("/query", + auth.AuthenticateGQL(app.AuthenticationProvider(), app.GetLogger().Named("GQLHandler")), + loader.Middleware(app), + graphqlHandler(app), + ) + + return engine, nil +} + +// Defining the Graphql handler +func graphqlHandler(app plugin.Application) gin.HandlerFunc { + rootSchema := schema.MustGetRootSchema() + + // Disable introspection and set a max query depth in production. + var schemaOpts []graphql.SchemaOpt + + if !app.GetConfig().Insecure().InfiniteDepthQueries() { + schemaOpts = append(schemaOpts, + graphql.MaxDepth(10), + ) + } + + schema := graphql.MustParseSchema(rootSchema, + &resolver.Resolver{ + App: app, + }, + schemaOpts..., + ) + + h := relay.Handler{Schema: schema} + + return func(c *gin.Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +func rateLimiter(period time.Duration, limit int64) gin.HandlerFunc { + store := memory.NewStore() + rate := limiter.Rate{ + Period: period, + Limit: limit, + } + return mgin.NewMiddleware(limiter.New(store, rate)) +} + +// secureOptions configure security options for the secure middleware, mostly +// for TLS redirection +func secureOptions(tlsRedirect bool, tlsHost string, devWebServer bool) secure.Options { + return secure.Options{ + FrameDeny: true, + IsDevelopment: devWebServer, + SSLRedirect: tlsRedirect, + SSLHost: tlsHost, + } +} + +// secureMiddleware adds a TLS handler and redirector, to button up security +// for this node +func secureMiddleware(tlsRedirect bool, tlsHost string, devWebServer bool) gin.HandlerFunc { + secureMiddleware := secure.New(secureOptions(tlsRedirect, tlsHost, devWebServer)) + secureFunc := func() gin.HandlerFunc { + return func(c *gin.Context) { + err := secureMiddleware.Process(c.Writer, c.Request) + + // If there was an error, do not continue. + if err != nil { + c.Abort() + return + } + + // Avoid header rewrite if response is a redirection. + if status := c.Writer.Status(); status > 300 && status < 399 { + c.Abort() + } + } + }() + + return secureFunc +} + +func debugRoutes(app plugin.Application, r *gin.RouterGroup) { + group := r.Group("/debug", auth.Authenticate(app.AuthenticationProvider(), auth.AuthenticateBySession)) + group.GET("/vars", expvar.Handler()) +} + +func metricRoutes(r *gin.RouterGroup, includeHeap bool) { + pprofGroup := r.Group("/debug/pprof") + pprofGroup.GET("/", ginHandlerFromHTTP(pprof.Index)) + pprofGroup.GET("/cmdline", ginHandlerFromHTTP(pprof.Cmdline)) + pprofGroup.GET("/profile", ginHandlerFromHTTP(pprof.Profile)) + pprofGroup.POST("/symbol", ginHandlerFromHTTP(pprof.Symbol)) + pprofGroup.GET("/symbol", ginHandlerFromHTTP(pprof.Symbol)) + pprofGroup.GET("/trace", ginHandlerFromHTTP(pprof.Trace)) + pprofGroup.GET("/allocs", ginHandlerFromHTTP(pprof.Handler("allocs").ServeHTTP)) + pprofGroup.GET("/block", ginHandlerFromHTTP(pprof.Handler("block").ServeHTTP)) + pprofGroup.GET("/goroutine", ginHandlerFromHTTP(pprof.Handler("goroutine").ServeHTTP)) + if includeHeap { + pprofGroup.GET("/heap", ginHandlerFromHTTP(pprof.Handler("heap").ServeHTTP)) + } + pprofGroup.GET("/mutex", ginHandlerFromHTTP(pprof.Handler("mutex").ServeHTTP)) + pprofGroup.GET("/threadcreate", ginHandlerFromHTTP(pprof.Handler("threadcreate").ServeHTTP)) +} + +func ginHandlerFromHTTP(h http.HandlerFunc) gin.HandlerFunc { + return func(c *gin.Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +func sessionRoutes(app plugin.Application, r *gin.RouterGroup) { + config := app.GetConfig() + rl := config.WebServer().RateLimit() + unauth := r.Group("/", rateLimiter( + rl.UnauthenticatedPeriod(), + rl.Unauthenticated(), + )) + sc := NewSessionsController(app) + unauth.POST("/sessions", sc.Create) + auth := r.Group("/", auth.Authenticate(app.AuthenticationProvider(), auth.AuthenticateBySession)) + auth.DELETE("/sessions", sc.Destroy) +} + +func healthRoutes(app plugin.Application, r *gin.RouterGroup) { + hc := HealthController{app} + r.GET("/readyz", hc.Readyz) + r.GET("/health", hc.Health) + r.GET("/health.txt", func(context *gin.Context) { + context.Request.Header.Set("Accept", gin.MIMEPlain) + }, hc.Health) +} + +func loopRoutes(app plugin.Application, r *gin.RouterGroup) { + loopRegistry := NewLoopRegistryServer(app) + r.GET("/discovery", ginHandlerFromHTTP(loopRegistry.discoveryHandler)) + r.GET("/plugins/:name/metrics", loopRegistry.pluginMetricHandler) + +} + +func v2Routes(app plugin.Application, r *gin.RouterGroup) { + unauthedv2 := r.Group("/v2") + + prc := PipelineRunsController{app} + psec := PipelineJobSpecErrorsController{app} + unauthedv2.PATCH("/resume/:runID", prc.Resume) + + authv2 := r.Group("/v2", auth.Authenticate(app.AuthenticationProvider(), + auth.AuthenticateByToken, + auth.AuthenticateBySession, + )) + { + uc := UserController{app} + authv2.GET("/users", auth.RequiresAdminRole(uc.Index)) + authv2.POST("/users", auth.RequiresAdminRole(uc.Create)) + authv2.PATCH("/users", auth.RequiresAdminRole(uc.UpdateRole)) + authv2.DELETE("/users/:email", auth.RequiresAdminRole(uc.Delete)) + authv2.PATCH("/user/password", uc.UpdatePassword) + authv2.POST("/user/token", uc.NewAPIToken) + authv2.POST("/user/token/delete", uc.DeleteAPIToken) + + wa := NewWebAuthnController(app) + authv2.GET("/enroll_webauthn", wa.BeginRegistration) + authv2.POST("/enroll_webauthn", wa.FinishRegistration) + + eia := ExternalInitiatorsController{app} + authv2.GET("/external_initiators", paginatedRequest(eia.Index)) + authv2.POST("/external_initiators", auth.RequiresEditRole(eia.Create)) + authv2.DELETE("/external_initiators/:Name", auth.RequiresEditRole(eia.Destroy)) + + bt := BridgeTypesController{app} + authv2.GET("/bridge_types", paginatedRequest(bt.Index)) + authv2.POST("/bridge_types", auth.RequiresEditRole(bt.Create)) + authv2.GET("/bridge_types/:BridgeName", bt.Show) + authv2.PATCH("/bridge_types/:BridgeName", auth.RequiresEditRole(bt.Update)) + authv2.DELETE("/bridge_types/:BridgeName", auth.RequiresEditRole(bt.Destroy)) + + ets := EVMTransfersController{app} + authv2.POST("/transfers", auth.RequiresAdminRole(ets.Create)) + authv2.POST("/transfers/evm", auth.RequiresAdminRole(ets.Create)) + tts := CosmosTransfersController{app} + authv2.POST("/transfers/cosmos", auth.RequiresAdminRole(tts.Create)) + sts := SolanaTransfersController{app} + authv2.POST("/transfers/solana", auth.RequiresAdminRole(sts.Create)) + + cc := ConfigController{app} + authv2.GET("/config", cc.Show) + authv2.GET("/config/v2", cc.Show) + + tas := TxAttemptsController{app} + authv2.GET("/tx_attempts", paginatedRequest(tas.Index)) + authv2.GET("/tx_attempts/evm", paginatedRequest(tas.Index)) + + txs := TransactionsController{app} + authv2.GET("/transactions/evm", paginatedRequest(txs.Index)) + authv2.GET("/transactions/evm/:TxHash", txs.Show) + authv2.GET("/transactions", paginatedRequest(txs.Index)) + authv2.GET("/transactions/:TxHash", txs.Show) + + rc := ReplayController{app} + authv2.POST("/replay_from_block/:number", auth.RequiresRunRole(rc.ReplayFromBlock)) + + csakc := CSAKeysController{app} + authv2.GET("/keys/csa", csakc.Index) + authv2.POST("/keys/csa", auth.RequiresEditRole(csakc.Create)) + authv2.POST("/keys/csa/import", auth.RequiresAdminRole(csakc.Import)) + authv2.POST("/keys/csa/export/:ID", auth.RequiresAdminRole(csakc.Export)) + + ekc := NewETHKeysController(app) + authv2.GET("/keys/eth", ekc.Index) + authv2.POST("/keys/eth", auth.RequiresEditRole(ekc.Create)) + authv2.DELETE("/keys/eth/:keyID", auth.RequiresAdminRole(ekc.Delete)) + authv2.POST("/keys/eth/import", auth.RequiresAdminRole(ekc.Import)) + authv2.POST("/keys/eth/export/:address", auth.RequiresAdminRole(ekc.Export)) + // duplicated from above, with `evm` instead of `eth` + // legacy ones remain for backwards compatibility + + ethKeysGroup := authv2.Group("", auth.Authenticate(app.AuthenticationProvider(), + auth.AuthenticateByToken, + auth.AuthenticateBySession, + )) + + ethKeysGroup.Use(ekc.formatETHKeyResponse()) + authv2.GET("/keys/evm", ekc.Index) + ethKeysGroup.POST("/keys/evm", auth.RequiresEditRole(ekc.Create)) + ethKeysGroup.DELETE("/keys/evm/:address", auth.RequiresAdminRole(ekc.Delete)) + ethKeysGroup.POST("/keys/evm/import", auth.RequiresAdminRole(ekc.Import)) + authv2.POST("/keys/evm/export/:address", auth.RequiresAdminRole(ekc.Export)) + ethKeysGroup.POST("/keys/evm/chain", auth.RequiresAdminRole(ekc.Chain)) + + ocrkc := OCRKeysController{app} + authv2.GET("/keys/ocr", ocrkc.Index) + authv2.POST("/keys/ocr", auth.RequiresEditRole(ocrkc.Create)) + authv2.DELETE("/keys/ocr/:keyID", auth.RequiresAdminRole(ocrkc.Delete)) + authv2.POST("/keys/ocr/import", auth.RequiresAdminRole(ocrkc.Import)) + authv2.POST("/keys/ocr/export/:ID", auth.RequiresAdminRole(ocrkc.Export)) + + ocr2kc := OCR2KeysController{app} + authv2.GET("/keys/ocr2", ocr2kc.Index) + authv2.POST("/keys/ocr2/:chainType", auth.RequiresEditRole(ocr2kc.Create)) + authv2.DELETE("/keys/ocr2/:keyID", auth.RequiresAdminRole(ocr2kc.Delete)) + authv2.POST("/keys/ocr2/import", auth.RequiresAdminRole(ocr2kc.Import)) + authv2.POST("/keys/ocr2/export/:ID", auth.RequiresAdminRole(ocr2kc.Export)) + + p2pkc := P2PKeysController{app} + authv2.GET("/keys/p2p", p2pkc.Index) + authv2.POST("/keys/p2p", auth.RequiresEditRole(p2pkc.Create)) + authv2.DELETE("/keys/p2p/:keyID", auth.RequiresAdminRole(p2pkc.Delete)) + authv2.POST("/keys/p2p/import", auth.RequiresAdminRole(p2pkc.Import)) + authv2.POST("/keys/p2p/export/:ID", auth.RequiresAdminRole(p2pkc.Export)) + + for _, keys := range []struct { + path string + kc KeysController + }{ + {"solana", NewSolanaKeysController(app)}, + {"cosmos", NewCosmosKeysController(app)}, + {"starknet", NewStarkNetKeysController(app)}, + {"dkgsign", NewDKGSignKeysController(app)}, + {"dkgencrypt", NewDKGEncryptKeysController(app)}, + } { + authv2.GET("/keys/"+keys.path, keys.kc.Index) + authv2.POST("/keys/"+keys.path, auth.RequiresEditRole(keys.kc.Create)) + authv2.DELETE("/keys/"+keys.path+"/:keyID", auth.RequiresAdminRole(keys.kc.Delete)) + authv2.POST("/keys/"+keys.path+"/import", auth.RequiresAdminRole(keys.kc.Import)) + authv2.POST("/keys/"+keys.path+"/export/:ID", auth.RequiresAdminRole(keys.kc.Export)) + } + + vrfkc := VRFKeysController{app} + authv2.GET("/keys/vrf", vrfkc.Index) + authv2.POST("/keys/vrf", auth.RequiresEditRole(vrfkc.Create)) + authv2.DELETE("/keys/vrf/:keyID", auth.RequiresAdminRole(vrfkc.Delete)) + authv2.POST("/keys/vrf/import", auth.RequiresAdminRole(vrfkc.Import)) + authv2.POST("/keys/vrf/export/:keyID", auth.RequiresAdminRole(vrfkc.Export)) + + jc := JobsController{app} + authv2.GET("/jobs", paginatedRequest(jc.Index)) + authv2.GET("/jobs/:ID", jc.Show) + authv2.POST("/jobs", auth.RequiresEditRole(jc.Create)) + authv2.PUT("/jobs/:ID", auth.RequiresEditRole(jc.Update)) + authv2.DELETE("/jobs/:ID", auth.RequiresEditRole(jc.Delete)) + + // PipelineRunsController + authv2.GET("/pipeline/runs", paginatedRequest(prc.Index)) + authv2.GET("/jobs/:ID/runs", paginatedRequest(prc.Index)) + authv2.GET("/jobs/:ID/runs/:runID", prc.Show) + + // FeaturesController + fc := FeaturesController{app} + authv2.GET("/features", fc.Index) + + // PipelineJobSpecErrorsController + authv2.DELETE("/pipeline/job_spec_errors/:ID", auth.RequiresEditRole(psec.Destroy)) + + lgc := LogController{app} + authv2.GET("/log", lgc.Get) + authv2.PATCH("/log", auth.RequiresAdminRole(lgc.Patch)) + + chains := authv2.Group("chains") + for _, chain := range []struct { + path string + cc ChainsController + }{ + {"evm", NewEVMChainsController(app)}, + {"solana", NewSolanaChainsController(app)}, + {"starknet", NewStarkNetChainsController(app)}, + {"cosmos", NewCosmosChainsController(app)}, + } { + chains.GET(chain.path, paginatedRequest(chain.cc.Index)) + chains.GET(chain.path+"/:ID", chain.cc.Show) + } + + nodes := authv2.Group("nodes") + for _, chain := range []struct { + path string + nc NodesController + }{ + {"evm", NewEVMNodesController(app)}, + {"solana", NewSolanaNodesController(app)}, + {"starknet", NewStarkNetNodesController(app)}, + {"cosmos", NewCosmosNodesController(app)}, + } { + if chain.path == "evm" { + // TODO still EVM only https://app.shortcut.com/pluginlabs/story/26276/multi-chain-type-ui-node-chain-configuration + nodes.GET("", paginatedRequest(chain.nc.Index)) + } + nodes.GET(chain.path, paginatedRequest(chain.nc.Index)) + chains.GET(chain.path+"/:ID/nodes", paginatedRequest(chain.nc.Index)) + } + + efc := EVMForwardersController{app} + authv2.GET("/nodes/evm/forwarders", paginatedRequest(efc.Index)) + authv2.POST("/nodes/evm/forwarders/track", auth.RequiresEditRole(efc.Track)) + authv2.DELETE("/nodes/evm/forwarders/:fwdID", auth.RequiresEditRole(efc.Delete)) + + buildInfo := BuildInfoController{app} + authv2.GET("/build_info", buildInfo.Show) + + // Debug routes accessible via authentication + metricRoutes(authv2, build.IsDev()) + } + + ping := PingController{app} + userOrEI := r.Group("/v2", auth.Authenticate(app.AuthenticationProvider(), + auth.AuthenticateExternalInitiator, + auth.AuthenticateByToken, + auth.AuthenticateBySession, + )) + userOrEI.GET("/ping", ping.Show) + userOrEI.POST("/jobs/:ID/runs", auth.RequiresRunRole(prc.Create)) +} + +// This is higher because it serves main.js and any static images. There are +// 5 assets which must be served, so this allows for 20 requests/min +var staticAssetsRateLimit = int64(100) +var staticAssetsRateLimitPeriod = 1 * time.Minute +var indexRateLimit = int64(20) +var indexRateLimitPeriod = 1 * time.Minute + +// guiAssetRoutes serves the operator UI static files and index.html. Rate +// limiting is disabled when in dev mode. +func guiAssetRoutes(engine *gin.Engine, rateLimitingDisabled bool, lggr logger.SugaredLogger) { + // Serve static files + var assetsRouterHandlers []gin.HandlerFunc + if !rateLimitingDisabled { + assetsRouterHandlers = append(assetsRouterHandlers, rateLimiter( + staticAssetsRateLimitPeriod, + staticAssetsRateLimit, + )) + } + + assetsRouterHandlers = append( + assetsRouterHandlers, + ServeGzippedAssets("/assets", assetFs, lggr), + ) + + // Get Operator UI Assets + // + // We have to use a route here because a RouterGroup only runs middlewares + // when a route matches exactly. See https://github.com/gin-gonic/gin/issues/531 + engine.GET("/assets/:file", assetsRouterHandlers...) + + // Serve the index HTML file unless it is an api path + var noRouteHandlers []gin.HandlerFunc + if !rateLimitingDisabled { + noRouteHandlers = append(noRouteHandlers, rateLimiter( + indexRateLimitPeriod, + indexRateLimit, + )) + } + noRouteHandlers = append(noRouteHandlers, func(c *gin.Context) { + path := c.Request.URL.Path + + // Return a 404 if the path is an unmatched API path + if match, _ := regexp.MatchString(`^/v[0-9]+/.*`, path); match { + c.AbortWithStatus(http.StatusNotFound) + + return + } + + // Return a 404 for unknown extensions + if filepath.Ext(path) != "" { + c.AbortWithStatus(http.StatusNotFound) + + return + } + + // Render the React index page for any other unknown requests + file, err := assetFs.Open("index.html") + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + c.AbortWithStatus(http.StatusNotFound) + } else { + lggr.Errorf("failed to open static file '%s': %+v", path, err) + c.AbortWithStatus(http.StatusInternalServerError) + } + return + } + defer lggr.ErrorIfFn(file.Close, "Error closing file") + + http.ServeContent(c.Writer, c.Request, path, time.Time{}, file) + }) + + engine.NoRoute(noRouteHandlers...) +} + +// Inspired by https://github.com/gin-gonic/gin/issues/961 +func loggerFunc(lggr logger.Logger) gin.HandlerFunc { + return func(c *gin.Context) { + buf, err := io.ReadAll(c.Request.Body) + if err != nil { + lggr.Error("Web request log error: ", err.Error()) + // Implicitly relies on limits.RequestSizeLimiter + // overriding of c.Request.Body to abort gin's Context + // inside io.ReadAll. + // Functions as we would like, but horrible from an architecture + // and design pattern perspective. + if !c.IsAborted() { + c.AbortWithStatus(http.StatusBadRequest) + } + return + } + rdr := bytes.NewBuffer(buf) + c.Request.Body = io.NopCloser(bytes.NewBuffer(buf)) + + start := time.Now() + c.Next() + end := time.Now() + + lggr.Debugw(fmt.Sprintf("%s %s", c.Request.Method, c.Request.URL.Path), + "method", c.Request.Method, + "status", c.Writer.Status(), + "path", c.Request.URL.Path, + "ginPath", c.FullPath(), + "query", redact(c.Request.URL.Query()), + "body", readBody(rdr, lggr), + "clientIP", c.ClientIP(), + "errors", c.Errors.String(), + "servedAt", end.Format("2006-01-02 15:04:05"), + "latency", fmt.Sprintf("%v", end.Sub(start)), + ) + } +} + +// Add CORS headers so UI can make api requests +func uiCorsHandler(ao string) gin.HandlerFunc { + c := cors.Config{ + AllowMethods: []string{"GET", "POST", "PATCH", "DELETE"}, + AllowHeaders: []string{"Origin", "Content-Type", "Accept"}, + ExposeHeaders: []string{"Content-Length"}, + AllowCredentials: true, + MaxAge: math.MaxInt32, + } + if ao == "*" { + c.AllowAllOrigins = true + } else if allowOrigins := strings.Split(ao, ","); len(allowOrigins) > 0 { + c.AllowOrigins = allowOrigins + } + return cors.New(c) +} + +func readBody(reader io.Reader, lggr logger.Logger) string { + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + lggr.Warn("unable to read from body for sanitization: ", err) + return "*FAILED TO READ BODY*" + } + + if buf.Len() == 0 { + return "" + } + + s, err := readSanitizedJSON(buf) + if err != nil { + lggr.Warn("unable to sanitize json for logging: ", err) + return "*FAILED TO READ BODY*" + } + return s +} + +func readSanitizedJSON(buf *bytes.Buffer) (string, error) { + var dst map[string]interface{} + err := json.Unmarshal(buf.Bytes(), &dst) + if err != nil { + return "", err + } + + cleaned := map[string]interface{}{} + for k, v := range dst { + if isBlacklisted(k) { + cleaned[k] = "*REDACTED*" + continue + } + cleaned[k] = v + } + + b, err := json.Marshal(cleaned) + if err != nil { + return "", err + } + return string(b), err +} + +func redact(values url.Values) string { + cleaned := url.Values{} + for k, v := range values { + if isBlacklisted(k) { + cleaned[k] = []string{"REDACTED"} + continue + } + cleaned[k] = v + } + return cleaned.Encode() +} + +// NOTE: keys must be in lowercase for case insensitive match +var blacklist = map[string]struct{}{ + "password": {}, + "newpassword": {}, + "oldpassword": {}, + "current_password": {}, + "new_account_password": {}, +} + +func isBlacklisted(k string) bool { + lk := strings.ToLower(k) + if _, ok := blacklist[lk]; ok || strings.Contains(lk, "password") { + return true + } + return false +} + +// prometheusUse is adapted from ginprom.Prometheus.Use +// until merged upstream: https://github.com/Depado/ginprom/pull/48 +func prometheusUse(p *ginprom.Prometheus, e *gin.Engine, handlerOpts promhttp.HandlerOpts) { + var ( + r prometheus.Registerer = p.Registry + g prometheus.Gatherer = p.Registry + ) + if p.Registry == nil { + r = prometheus.DefaultRegisterer + g = prometheus.DefaultGatherer + } + h := promhttp.InstrumentMetricHandler(r, promhttp.HandlerFor(g, handlerOpts)) + e.GET(p.MetricsPath, prometheusHandler(p.Token, h)) + p.Engine = e +} + +// use is adapted from ginprom.prometheusHandler to add support for custom http.Handler +func prometheusHandler(token string, h http.Handler) gin.HandlerFunc { + return func(c *gin.Context) { + if token == "" { + h.ServeHTTP(c.Writer, c.Request) + return + } + + header := c.Request.Header.Get("Authorization") + + if header == "" { + c.String(http.StatusUnauthorized, ginprom.ErrInvalidToken.Error()) + return + } + + bearer := fmt.Sprintf("Bearer %s", token) + + if header != bearer { + c.String(http.StatusUnauthorized, ginprom.ErrInvalidToken.Error()) + return + } + + h.ServeHTTP(c.Writer, c.Request) + } +} diff --git a/core/web/router_test.go b/core/web/router_test.go new file mode 100644 index 00000000..f9731ec0 --- /dev/null +++ b/core/web/router_test.go @@ -0,0 +1,197 @@ +package web_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/uuid" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTokenAuthRequired_NoCredentials(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + req, err := http.NewRequestWithContext(ctx, "POST", ts.URL+"/v2/jobs/", bytes.NewBufferString("{}")) + require.NoError(t, err) + req.Header.Set("Content-Type", web.MediaType) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} + +func TestTokenAuthRequired_SessionCredentials(t *testing.T) { + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + client := app.NewHTTPClient(nil) + resp, cleanup := client.Post("/v2/bridge_types/", nil) + defer cleanup() + + assert.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode) +} + +func TestTokenAuthRequired_TokenCredentials(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + eia := auth.NewToken() + url := cltest.WebURL(t, "http://localhost:8888") + eir := &bridges.ExternalInitiatorRequest{ + Name: uuid.New().String(), + URL: &url, + } + ea, err := bridges.NewExternalInitiator(eia, eir) + require.NoError(t, err) + err = app.BridgeORM().CreateExternalInitiator(ea) + require.NoError(t, err) + + request, err := http.NewRequestWithContext(ctx, "GET", ts.URL+"/v2/ping/", bytes.NewBufferString("{}")) + require.NoError(t, err) + request.Header.Set("Content-Type", web.MediaType) + request.Header.Set("X-Plugin-EA-AccessKey", eia.AccessKey) + request.Header.Set("X-Plugin-EA-Secret", eia.Secret) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + resp, err := client.Do(request) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestTokenAuthRequired_BadTokenCredentials(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + eia := auth.NewToken() + url := cltest.WebURL(t, "http://localhost:8888") + eir := &bridges.ExternalInitiatorRequest{ + Name: uuid.New().String(), + URL: &url, + } + ea, err := bridges.NewExternalInitiator(eia, eir) + require.NoError(t, err) + err = app.BridgeORM().CreateExternalInitiator(ea) + require.NoError(t, err) + + request, err := http.NewRequestWithContext(ctx, "GET", ts.URL+"/v2/ping/", bytes.NewBufferString("{}")) + require.NoError(t, err) + request.Header.Set("Content-Type", web.MediaType) + request.Header.Set("X-Plugin-EA-AccessKey", eia.AccessKey) + request.Header.Set("X-Plugin-EA-Secret", "every unpleasant commercial color from aquamarine to beige") + + client := clhttptest.NewTestLocalOnlyHTTPClient() + resp, err := client.Do(request) + require.NoError(t, err) + + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} + +func TestSessions_RateLimited(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + client := clhttptest.NewTestLocalOnlyHTTPClient() + input := `{"email":"brute@force.com", "password": "wrongpassword"}` + + for i := 0; i < 5; i++ { + request, err := http.NewRequestWithContext(ctx, "POST", ts.URL+"/sessions", bytes.NewBufferString(input)) + require.NoError(t, err) + + resp, err := client.Do(request) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + } + + request, err := http.NewRequestWithContext(ctx, "POST", ts.URL+"/sessions", bytes.NewBufferString(input)) + require.NoError(t, err) + + resp, err := client.Do(request) + require.NoError(t, err) + assert.Equal(t, 429, resp.StatusCode) +} + +func TestRouter_LargePOSTBody(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + + client := clhttptest.NewTestLocalOnlyHTTPClient() + + body := string(make([]byte, 70000)) + request, err := http.NewRequestWithContext(ctx, "POST", ts.URL+"/sessions", bytes.NewBufferString(body)) + require.NoError(t, err) + + resp, err := client.Do(request) + require.NoError(t, err) + assert.Equal(t, http.StatusRequestEntityTooLarge, resp.StatusCode) +} + +func TestRouter_GinHelmetHeaders(t *testing.T) { + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + router := web.Router(t, app, nil) + ts := httptest.NewServer(router) + defer ts.Close() + req, err := http.NewRequestWithContext(ctx, "GET", ts.URL, nil) + require.NoError(t, err) + res, err := http.DefaultClient.Do(req) + require.NoError(t, err) + for _, tt := range []struct { + HelmetName string + HeaderKey string + HeaderValue string + }{ + {"NoSniff", "X-Content-Type-Options", "nosniff"}, + {"DNSPrefetchControl", "X-DNS-Prefetch-Control", "off"}, + {"FrameGuard", "X-Frame-Options", "DENY"}, + {"SetHSTS", "Strict-Transport-Security", "max-age=5184000; includeSubDomains"}, + {"IENoOpen", "X-Download-Options", "noopen"}, + {"XSSFilter", "X-Xss-Protection", "1; mode=block"}, + } { + assert.Equal(t, res.Header.Get(tt.HeaderKey), tt.HeaderValue, + "wrong header for helmet's %s handler", tt.HelmetName) + } +} diff --git a/core/web/schema/schema.go b/core/web/schema/schema.go new file mode 100644 index 00000000..74c2ae0f --- /dev/null +++ b/core/web/schema/schema.go @@ -0,0 +1,55 @@ +// Package schema is used to read schema files +// go:generate go-bindata -ignore=\.go -pkg=schema -o=bindata.go ./... +package schema + +import ( + "bytes" + "embed" + "fmt" +) + +//go:embed *.graphql type/*.graphql +var fs embed.FS + +// GetRootSchema reads the schema files and combines them into a single schema. +func GetRootSchema() (string, error) { + b, err := fs.ReadFile("schema.graphql") + if err != nil { + return "", err + } + + buf := bytes.Buffer{} + buf.Write(b) + + types, err := fs.ReadDir("type") + if err != nil { + return "", err + } + + for _, t := range types { + b, err = fs.ReadFile(fmt.Sprintf("type/%s", t.Name())) + if err != nil { + return "", err + } + + buf.Write(b) + + // Add a newline if the file does not end in a newline. + if len(b) > 0 && b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + } + + return buf.String(), nil +} + +// MustGetRootSchema reads the schema files and combines them into a single +// schema. It panics if there are any errors. +func MustGetRootSchema() string { + s, err := GetRootSchema() + if err != nil { + panic(err) + } + + return s +} diff --git a/core/web/schema/schema.graphql b/core/web/schema/schema.graphql new file mode 100644 index 00000000..2d0a5a1b --- /dev/null +++ b/core/web/schema/schema.graphql @@ -0,0 +1,73 @@ +scalar Time +scalar Map +scalar Bytes + +schema { + query: Query + mutation: Mutation +} + +type Query { + bridge(id: ID!): BridgePayload! + bridges(offset: Int, limit: Int): BridgesPayload! + chain(id: ID!): ChainPayload! + chains(offset: Int, limit: Int): ChainsPayload! + configv2: ConfigV2Payload! + csaKeys: CSAKeysPayload! + ethKeys: EthKeysPayload! + ethTransaction(hash: ID!): EthTransactionPayload! + ethTransactions(offset: Int, limit: Int): EthTransactionsPayload! + ethTransactionsAttempts(offset: Int, limit: Int): EthTransactionAttemptsPayload! + features: FeaturesPayload! + feedsManager(id: ID!): FeedsManagerPayload! + feedsManagers: FeedsManagersPayload! + globalLogLevel: GlobalLogLevelPayload! + job(id: ID!): JobPayload! + jobs(offset: Int, limit: Int): JobsPayload! + jobProposal(id: ID!): JobProposalPayload! + jobRun(id: ID!): JobRunPayload! + jobRuns(offset: Int, limit: Int): JobRunsPayload! + node(id: ID!): NodePayload! + nodes(offset: Int, limit: Int): NodesPayload! + ocrKeyBundles: OCRKeyBundlesPayload! + ocr2KeyBundles: OCR2KeyBundlesPayload! + p2pKeys: P2PKeysPayload! + solanaKeys: SolanaKeysPayload! + sqlLogging: GetSQLLoggingPayload! + vrfKey(id: ID!): VRFKeyPayload! + vrfKeys: VRFKeysPayload! +} + +type Mutation { + approveJobProposalSpec(id: ID!, force: Boolean): ApproveJobProposalSpecPayload! + cancelJobProposalSpec(id: ID!): CancelJobProposalSpecPayload! + createAPIToken(input: CreateAPITokenInput!): CreateAPITokenPayload! + createBridge(input: CreateBridgeInput!): CreateBridgePayload! + createCSAKey: CreateCSAKeyPayload! + createFeedsManager(input: CreateFeedsManagerInput!): CreateFeedsManagerPayload! + createFeedsManagerChainConfig(input: CreateFeedsManagerChainConfigInput!): CreateFeedsManagerChainConfigPayload! + createJob(input: CreateJobInput!): CreateJobPayload! + createOCRKeyBundle: CreateOCRKeyBundlePayload! + createOCR2KeyBundle(chainType: OCR2ChainType!): CreateOCR2KeyBundlePayload! + createP2PKey: CreateP2PKeyPayload! + deleteAPIToken(input: DeleteAPITokenInput!): DeleteAPITokenPayload! + deleteBridge(id: ID!): DeleteBridgePayload! + deleteCSAKey(id: ID!): DeleteCSAKeyPayload! + deleteFeedsManagerChainConfig(id: ID!): DeleteFeedsManagerChainConfigPayload! + deleteJob(id: ID!): DeleteJobPayload! + deleteOCRKeyBundle(id: ID!): DeleteOCRKeyBundlePayload! + deleteOCR2KeyBundle(id: ID!): DeleteOCR2KeyBundlePayload! + deleteP2PKey(id: ID!): DeleteP2PKeyPayload! + createVRFKey: CreateVRFKeyPayload! + deleteVRFKey(id: ID!): DeleteVRFKeyPayload! + dismissJobError(id: ID!): DismissJobErrorPayload! + rejectJobProposalSpec(id: ID!): RejectJobProposalSpecPayload! + runJob(id: ID!): RunJobPayload! + setGlobalLogLevel(level: LogLevel!): SetGlobalLogLevelPayload! + setSQLLogging(input: SetSQLLoggingInput!): SetSQLLoggingPayload! + updateBridge(id: ID!, input: UpdateBridgeInput!): UpdateBridgePayload! + updateFeedsManager(id: ID!, input: UpdateFeedsManagerInput!): UpdateFeedsManagerPayload! + updateFeedsManagerChainConfig(id: ID!, input: UpdateFeedsManagerChainConfigInput!): UpdateFeedsManagerChainConfigPayload! + updateJobProposalSpecDefinition(id: ID!, input: UpdateJobProposalSpecDefinitionInput!): UpdateJobProposalSpecDefinitionPayload! + updateUserPassword(input: UpdatePasswordInput!): UpdatePasswordPayload! +} diff --git a/core/web/schema/type/api_token.graphql b/core/web/schema/type/api_token.graphql new file mode 100644 index 00000000..bc28c6b3 --- /dev/null +++ b/core/web/schema/type/api_token.graphql @@ -0,0 +1,28 @@ +type APIToken { + accessKey: String! + secret: String! +} + +input CreateAPITokenInput { + password: String! +} + +type CreateAPITokenSuccess { + token: APIToken! +} + +union CreateAPITokenPayload = CreateAPITokenSuccess | InputErrors + +input DeleteAPITokenInput { + password: String! +} + +type DeleteAPITokenResult { + accessKey: String! +} + +type DeleteAPITokenSuccess { + token: DeleteAPITokenResult! +} + +union DeleteAPITokenPayload = DeleteAPITokenSuccess | InputErrors diff --git a/core/web/schema/type/bridge.graphql b/core/web/schema/type/bridge.graphql new file mode 100644 index 00000000..e24616f3 --- /dev/null +++ b/core/web/schema/type/bridge.graphql @@ -0,0 +1,70 @@ +type Bridge { + id: ID! + name: String! + url: String! + confirmations: Int! + outgoingToken: String! + minimumContractPayment: String! + createdAt: Time! +} + +# BridgePayload defines the response to fetch a single bridge by name +union BridgePayload = Bridge | NotFoundError + +# BridgesPayload defines the response when fetching a page of bridges +type BridgesPayload implements PaginatedPayload { + results: [Bridge!]! + metadata: PaginationMetadata! +} + +# CreateBridgeInput defines the input to create a bridge +input CreateBridgeInput { + name: String! + url: String! + confirmations: Int! + minimumContractPayment: String! +} + +# CreateBridgeSuccess defines the success response when creating a bridge +type CreateBridgeSuccess { + bridge: Bridge! + incomingToken: String! +} + +# CreateBridgeInput defines the response when creating a bridge +union CreateBridgePayload = CreateBridgeSuccess + +# UpdateBridgeInput defines the input to update a bridge +input UpdateBridgeInput { + name: String! + url: String! + confirmations: Int! + minimumContractPayment: String! +} + +# UpdateBridgeSuccess defines the success response when updating a bridge +type UpdateBridgeSuccess { + bridge: Bridge! +} + +# CreateBridgeInput defines the response when updating a bridge +union UpdateBridgePayload = UpdateBridgeSuccess | NotFoundError + +type DeleteBridgeSuccess { + bridge: Bridge! +} + +type DeleteBridgeInvalidNameError implements Error { + code: ErrorCode! + message: String! +} + +type DeleteBridgeConflictError implements Error { + code: ErrorCode! + message: String! +} + +union DeleteBridgePayload = DeleteBridgeSuccess + | DeleteBridgeInvalidNameError + | DeleteBridgeConflictError + | NotFoundError diff --git a/core/web/schema/type/chain.graphql b/core/web/schema/type/chain.graphql new file mode 100644 index 00000000..dcfac1f4 --- /dev/null +++ b/core/web/schema/type/chain.graphql @@ -0,0 +1,12 @@ +type Chain { + id: ID! + enabled: Boolean! + config: String! +} + +union ChainPayload = Chain | NotFoundError + +type ChainsPayload implements PaginatedPayload { + results: [Chain!]! + metadata: PaginationMetadata! +} diff --git a/core/web/schema/type/configv2.graphql b/core/web/schema/type/configv2.graphql new file mode 100644 index 00000000..9159ee96 --- /dev/null +++ b/core/web/schema/type/configv2.graphql @@ -0,0 +1,4 @@ +type ConfigV2Payload { + user: String! + effective: String! +} diff --git a/core/web/schema/type/csa_keys.graphql b/core/web/schema/type/csa_keys.graphql new file mode 100644 index 00000000..afe21a49 --- /dev/null +++ b/core/web/schema/type/csa_keys.graphql @@ -0,0 +1,26 @@ +type CSAKey { + id: ID! + publicKey: String! + version: Int! +} + +type CSAKeysPayload { + results: [CSAKey!]! +} + +type CreateCSAKeySuccess { + csaKey: CSAKey! +} + +type CSAKeyExistsError implements Error { + message: String! + code: ErrorCode! +} + +union CreateCSAKeyPayload = CreateCSAKeySuccess | CSAKeyExistsError + +type DeleteCSAKeySuccess { + csaKey: CSAKey! +} + +union DeleteCSAKeyPayload = DeleteCSAKeySuccess | NotFoundError \ No newline at end of file diff --git a/core/web/schema/type/error.graphql b/core/web/schema/type/error.graphql new file mode 100644 index 00000000..bd66f87c --- /dev/null +++ b/core/web/schema/type/error.graphql @@ -0,0 +1,25 @@ +enum ErrorCode { + NOT_FOUND + INVALID_INPUT + UNPROCESSABLE +} + +interface Error { + message: String! + code: ErrorCode! +} + +type NotFoundError implements Error { + message: String! + code: ErrorCode! +} + +type InputError implements Error { + message: String! + code: ErrorCode! + path: String! + } + +type InputErrors { + errors: [InputError!]! +} diff --git a/core/web/schema/type/eth_key.graphql b/core/web/schema/type/eth_key.graphql new file mode 100644 index 00000000..9f3390fd --- /dev/null +++ b/core/web/schema/type/eth_key.graphql @@ -0,0 +1,14 @@ +type EthKey { + address: String! + isDisabled: Boolean! + createdAt: Time! + updatedAt: Time! + chain: Chain! + ethBalance: String + linkBalance: String + maxGasPriceWei: String +} + +type EthKeysPayload { + results: [EthKey!]! +} diff --git a/core/web/schema/type/eth_transaction.graphql b/core/web/schema/type/eth_transaction.graphql new file mode 100644 index 00000000..d30e56f0 --- /dev/null +++ b/core/web/schema/type/eth_transaction.graphql @@ -0,0 +1,23 @@ +type EthTransaction { + state: String! + data: Bytes! + from: String! + to: String! + gasLimit: String! + value: String! + evmChainID: ID! + nonce: String + gasPrice: String! + hash: String! + hex: String! + sentAt: String + chain: Chain! + attempts: [EthTransactionAttempt!]! +} + +union EthTransactionPayload = EthTransaction | NotFoundError + +type EthTransactionsPayload implements PaginatedPayload { + results: [EthTransaction!]! + metadata: PaginationMetadata! +} diff --git a/core/web/schema/type/eth_transaction_attempt.graphql b/core/web/schema/type/eth_transaction_attempt.graphql new file mode 100644 index 00000000..a57ff353 --- /dev/null +++ b/core/web/schema/type/eth_transaction_attempt.graphql @@ -0,0 +1,11 @@ +type EthTransactionAttempt { + gasPrice: String! + hash: String! + hex: String! + sentAt: String +} + +type EthTransactionAttemptsPayload implements PaginatedPayload { + results: [EthTransactionAttempt!]! + metadata: PaginationMetadata! +} diff --git a/core/web/schema/type/features.graphql b/core/web/schema/type/features.graphql new file mode 100644 index 00000000..4254bdec --- /dev/null +++ b/core/web/schema/type/features.graphql @@ -0,0 +1,7 @@ +type Features { + csa: Boolean! + feedsManager: Boolean! +} + +# FeaturesPayload defines the response of fetching the features availability in the UI +union FeaturesPayload = Features diff --git a/core/web/schema/type/feeds_manager.graphql b/core/web/schema/type/feeds_manager.graphql new file mode 100644 index 00000000..0d6d5f61 --- /dev/null +++ b/core/web/schema/type/feeds_manager.graphql @@ -0,0 +1,178 @@ +enum JobType { + FLUX_MONITOR + OCR + OCR2 +} + +type Plugins { + commit: Boolean! + execute: Boolean! + median: Boolean! + mercury: Boolean! +} + +type FeedsManager { + id: ID! + name: String! + uri: String! + publicKey: String! + jobProposals: [JobProposal!]! + isConnectionActive: Boolean! + createdAt: Time! + chainConfigs: [FeedsManagerChainConfig!]! +} + +type FeedsManagerChainConfig { + id: ID! + chainID: String! + chainType: String! + accountAddr: String! + adminAddr: String! + fluxMonitorJobConfig: FluxMonitorJobConfig! + ocr1JobConfig: OCR1JobConfig! + ocr2JobConfig: OCR2JobConfig! +} + +type FluxMonitorJobConfig { + enabled: Boolean! +} + +type OCR1JobConfig { + enabled: Boolean! + isBootstrap: Boolean! + multiaddr: String + p2pPeerID: String + keyBundleID: String +} + +type OCR2JobConfig { + enabled: Boolean! + isBootstrap: Boolean! + multiaddr: String + forwarderAddress: String + p2pPeerID: String + keyBundleID: String + plugins: Plugins! +} + +# FeedsManagerPayload defines the response to fetch a single feeds manager by id +union FeedsManagerPayload = FeedsManager | NotFoundError + +# FeedsManagersPayload defines the response when fetching feeds managers +type FeedsManagersPayload { + results: [FeedsManager!]! +} + +input CreateFeedsManagerInput { + name: String! + uri: String! + publicKey: String! +} + +# CreateFeedsManagerSuccess defines the success response when creating a feeds +# manager +type CreateFeedsManagerSuccess { + feedsManager: FeedsManager! +} + +type SingleFeedsManagerError implements Error { + message: String! + code: ErrorCode! +} + +# CreateFeedsManagerPayload defines the response when creating a feeds manager +union CreateFeedsManagerPayload = CreateFeedsManagerSuccess + | SingleFeedsManagerError + | NotFoundError + | InputErrors + +input UpdateFeedsManagerInput { + name: String! + uri: String! + publicKey: String! +} + +# UpdateFeedsManagerSuccess defines the success response when updating a feeds +# manager +type UpdateFeedsManagerSuccess { + feedsManager: FeedsManager! +} + +# UpdateFeedsManagerPayload defines the response when updating a feeds manager +union UpdateFeedsManagerPayload = UpdateFeedsManagerSuccess + | NotFoundError + | InputErrors + +input CreateFeedsManagerChainConfigInput { + feedsManagerID: ID! + chainID: String! + chainType: String! + accountAddr: String! + adminAddr: String! + fluxMonitorEnabled: Boolean! + ocr1Enabled: Boolean! + ocr1IsBootstrap: Boolean + ocr1Multiaddr: String + ocr1P2PPeerID: String + ocr1KeyBundleID: String + ocr2Enabled: Boolean! + ocr2IsBootstrap: Boolean + ocr2Multiaddr: String + ocr2ForwarderAddress: String + ocr2P2PPeerID: String + ocr2KeyBundleID: String + ocr2Plugins: String! +} + +# CreateFeedsManagerChainConfigSuccess defines the success response when +# creating a chain config for a feeds manager. +type CreateFeedsManagerChainConfigSuccess { + chainConfig: FeedsManagerChainConfig! +} + +# CreateFeedsManagerChainConfigPayload defines the response when creating a +# feeds manager chain config. +union CreateFeedsManagerChainConfigPayload = CreateFeedsManagerChainConfigSuccess + | NotFoundError + | InputErrors + +# DeleteFeedsManagerChainConfigSuccess defines the success response when +# deleting a chain config for a feeds manager. +type DeleteFeedsManagerChainConfigSuccess { + chainConfig: FeedsManagerChainConfig! +} + +# DeleteFeedsManagerChainConfigPayload defines the response when creating a +# feeds manager chain config. +union DeleteFeedsManagerChainConfigPayload = DeleteFeedsManagerChainConfigSuccess + | NotFoundError + +input UpdateFeedsManagerChainConfigInput { + accountAddr: String! + adminAddr: String! + fluxMonitorEnabled: Boolean! + ocr1Enabled: Boolean! + ocr1IsBootstrap: Boolean + ocr1Multiaddr: String + ocr1P2PPeerID: String + ocr1KeyBundleID: String + ocr2Enabled: Boolean! + ocr2IsBootstrap: Boolean + ocr2Multiaddr: String + ocr2ForwarderAddress: String + ocr2P2PPeerID: String + ocr2KeyBundleID: String + ocr2Plugins: String! +} + +# UpdateFeedsManagerChainConfigSuccess defines the success response when +# updating a chain config for a feeds manager. +type UpdateFeedsManagerChainConfigSuccess { + chainConfig: FeedsManagerChainConfig! +} + +# UpdateFeedsManagerChainConfigPayload defines the response when updating a +# feeds manager chain config. +union UpdateFeedsManagerChainConfigPayload = UpdateFeedsManagerChainConfigSuccess + | NotFoundError + | InputErrors diff --git a/core/web/schema/type/job.graphql b/core/web/schema/type/job.graphql new file mode 100644 index 00000000..7b3938cf --- /dev/null +++ b/core/web/schema/type/job.graphql @@ -0,0 +1,40 @@ +type Job { + id: ID! + name: String! + schemaVersion: Int! + gasLimit: Int + forwardingAllowed: Boolean + maxTaskDuration: String! + externalJobID: String! + type: String! + spec: JobSpec! + runs(offset: Int, limit: Int): JobRunsPayload! + observationSource: String! + errors: [JobError!]! + createdAt: Time! +} + +# JobsPayload defines the response when fetching a page of jobs +type JobsPayload implements PaginatedPayload { + results: [Job!]! + metadata: PaginationMetadata! +} + +# JobPayload defines the response when a job +union JobPayload = Job | NotFoundError + +input CreateJobInput { + TOML: String! +} + +type CreateJobSuccess { + job: Job! +} + +union CreateJobPayload = CreateJobSuccess | InputErrors + +type DeleteJobSuccess { + job: Job! +} + +union DeleteJobPayload = DeleteJobSuccess | NotFoundError diff --git a/core/web/schema/type/job_error.graphql b/core/web/schema/type/job_error.graphql new file mode 100644 index 00000000..7f6c0598 --- /dev/null +++ b/core/web/schema/type/job_error.graphql @@ -0,0 +1,13 @@ +type JobError { + id: ID! + description: String! + occurrences: Int! + createdAt: Time! + updatedAt: Time! +} + +type DismissJobErrorSuccess { + jobError: JobError! +} + +union DismissJobErrorPayload = DismissJobErrorSuccess | NotFoundError diff --git a/core/web/schema/type/job_proposal.graphql b/core/web/schema/type/job_proposal.graphql new file mode 100644 index 00000000..940f0ffc --- /dev/null +++ b/core/web/schema/type/job_proposal.graphql @@ -0,0 +1,24 @@ +enum JobProposalStatus { + PENDING + APPROVED + REJECTED + CANCELLED + DELETED + REVOKED +} + +type JobProposal { + id: ID! + name: String + status: JobProposalStatus! + remoteUUID: String! + externalJobID: String + jobID: String + feedsManager: FeedsManager! + multiAddrs: [String!]! + pendingUpdate: Boolean! + specs: [JobProposalSpec!]! + latestSpec: JobProposalSpec! +} + +union JobProposalPayload = JobProposal | NotFoundError diff --git a/core/web/schema/type/job_proposal_spec.graphql b/core/web/schema/type/job_proposal_spec.graphql new file mode 100644 index 00000000..20d38f97 --- /dev/null +++ b/core/web/schema/type/job_proposal_spec.graphql @@ -0,0 +1,61 @@ +enum SpecStatus { + UNKNOWN + PENDING + APPROVED + REJECTED + CANCELLED + REVOKED +} + +type JobProposalSpec { + id: ID! + definition: String! + version: Int! + status: SpecStatus! + statusUpdatedAt: Time! + createdAt: Time! + updatedAt: Time! +} + +type JobAlreadyExistsError implements Error { + message: String! + code: ErrorCode! +} + + +# ApproveJobProposalSpec + +type ApproveJobProposalSpecSuccess { + spec: JobProposalSpec! +} + +union ApproveJobProposalSpecPayload = ApproveJobProposalSpecSuccess | NotFoundError | JobAlreadyExistsError + +# CancelJobProposalSpec + +type CancelJobProposalSpecSuccess { + spec: JobProposalSpec! +} + +union CancelJobProposalSpecPayload = CancelJobProposalSpecSuccess | NotFoundError + + +# RejectJobProposalSpec + +type RejectJobProposalSpecSuccess { + spec: JobProposalSpec! +} + +union RejectJobProposalSpecPayload = RejectJobProposalSpecSuccess | NotFoundError + +# UpdateJobProposalSpec + +input UpdateJobProposalSpecDefinitionInput { + definition: String! +} + +type UpdateJobProposalSpecDefinitionSuccess { + spec: JobProposalSpec! +} + +union UpdateJobProposalSpecDefinitionPayload = UpdateJobProposalSpecDefinitionSuccess | NotFoundError diff --git a/core/web/schema/type/job_run.graphql b/core/web/schema/type/job_run.graphql new file mode 100644 index 00000000..724b7d36 --- /dev/null +++ b/core/web/schema/type/job_run.graphql @@ -0,0 +1,39 @@ +enum JobRunStatus { + UNKNOWN + RUNNING + SUSPENDED + ERRORED + COMPLETED +} + +type JobRun { + id: ID! + outputs: [String]! + allErrors: [String!]! + fatalErrors: [String!]! + inputs: String! + createdAt: Time! + finishedAt: Time + taskRuns: [TaskRun!]! + status: JobRunStatus! + job: Job! +} + +# JobRunsPayload defines the response when fetching a page of runs +type JobRunsPayload implements PaginatedPayload { + results: [JobRun!]! + metadata: PaginationMetadata! +} + +union JobRunPayload = JobRun | NotFoundError + +type RunJobSuccess { + jobRun: JobRun! +} + +type RunJobCannotRunError implements Error { + message: String! + code: ErrorCode! +} + +union RunJobPayload = RunJobSuccess | NotFoundError | RunJobCannotRunError diff --git a/core/web/schema/type/log.graphql b/core/web/schema/type/log.graphql new file mode 100644 index 00000000..320ba685 --- /dev/null +++ b/core/web/schema/type/log.graphql @@ -0,0 +1,56 @@ +enum LogLevel { + DEBUG + INFO + WARN + ERROR +} + +type GlobalLogLevel { + level: LogLevel! +} + +union GlobalLogLevelPayload = GlobalLogLevel + +type LogLevelConfig { + keeper: LogLevel + headTracker: LogLevel + fluxMonitor: LogLevel +} + +input LogLevelConfigInput { + keeper: LogLevel + headTracker: LogLevel + fluxMonitor: LogLevel +} + +input SetServicesLogLevelsInput { + config: LogLevelConfigInput! +} + +type SetServicesLogLevelsSuccess { + config: LogLevelConfig! +} + +union SetServicesLogLevelsPayload = SetServicesLogLevelsSuccess | InputErrors + +type SQLLogging { + enabled: Boolean! +} + +input SetSQLLoggingInput { + enabled: Boolean! +} + +type SetSQLLoggingSuccess { + sqlLogging: SQLLogging! +} + +union SetSQLLoggingPayload = SetSQLLoggingSuccess + +union GetSQLLoggingPayload = SQLLogging + +type SetGlobalLogLevelSuccess { + globalLogLevel: GlobalLogLevel! +} + +union SetGlobalLogLevelPayload = SetGlobalLogLevelSuccess | InputErrors diff --git a/core/web/schema/type/node.graphql b/core/web/schema/type/node.graphql new file mode 100644 index 00000000..67c2cada --- /dev/null +++ b/core/web/schema/type/node.graphql @@ -0,0 +1,17 @@ +type Node { + id: ID! + name: String! + wsURL: String! + httpURL: String! + chain: Chain! + state: String! + sendOnly: Boolean! + order: Int +} + +union NodePayload = Node | NotFoundError + +type NodesPayload implements PaginatedPayload { + results: [Node!]! + metadata: PaginationMetadata! +} diff --git a/core/web/schema/type/ocr.graphql b/core/web/schema/type/ocr.graphql new file mode 100644 index 00000000..fe85029e --- /dev/null +++ b/core/web/schema/type/ocr.graphql @@ -0,0 +1,22 @@ +type OCRKeyBundle { + id: ID! + configPublicKey: String! + offChainPublicKey: String! + onChainSigningAddress: String! +} + +type OCRKeyBundlesPayload { + results: [OCRKeyBundle!]! +} + +type CreateOCRKeyBundleSuccess { + bundle: OCRKeyBundle! +} + +union CreateOCRKeyBundlePayload = CreateOCRKeyBundleSuccess + +type DeleteOCRKeyBundleSuccess { + bundle: OCRKeyBundle! +} + +union DeleteOCRKeyBundlePayload = DeleteOCRKeyBundleSuccess | NotFoundError diff --git a/core/web/schema/type/ocr2_keys.graphql b/core/web/schema/type/ocr2_keys.graphql new file mode 100644 index 00000000..95da9acf --- /dev/null +++ b/core/web/schema/type/ocr2_keys.graphql @@ -0,0 +1,30 @@ +enum OCR2ChainType { + EVM + COSMOS + SOLANA + STARKNET +} + +type OCR2KeyBundle { + id: ID! + chainType: OCR2ChainType + configPublicKey: String! + onChainPublicKey: String! + offChainPublicKey: String! +} + +type OCR2KeyBundlesPayload { + results: [OCR2KeyBundle!]! +} + +type CreateOCR2KeyBundleSuccess { + bundle: OCR2KeyBundle! +} + +union CreateOCR2KeyBundlePayload = CreateOCR2KeyBundleSuccess + +type DeleteOCR2KeyBundleSuccess { + bundle: OCR2KeyBundle! +} + +union DeleteOCR2KeyBundlePayload = DeleteOCR2KeyBundleSuccess | NotFoundError diff --git a/core/web/schema/type/p2p.graphql b/core/web/schema/type/p2p.graphql new file mode 100644 index 00000000..e1f1cc43 --- /dev/null +++ b/core/web/schema/type/p2p.graphql @@ -0,0 +1,22 @@ +type P2PKey { + id: ID! + peerID: String! + publicKey: String! +} + +type P2PKeysPayload { + results: [P2PKey!]! +} + +type CreateP2PKeySuccess { + p2pKey: P2PKey! +} + +union CreateP2PKeyPayload = CreateP2PKeySuccess + + +type DeleteP2PKeySuccess { + p2pKey: P2PKey! +} + +union DeleteP2PKeyPayload = DeleteP2PKeySuccess | NotFoundError diff --git a/core/web/schema/type/pagination.graphql b/core/web/schema/type/pagination.graphql new file mode 100644 index 00000000..ac866844 --- /dev/null +++ b/core/web/schema/type/pagination.graphql @@ -0,0 +1,7 @@ +type PaginationMetadata { + total: Int! +} + +interface PaginatedPayload { + metadata: PaginationMetadata! +} diff --git a/core/web/schema/type/solana_key.graphql b/core/web/schema/type/solana_key.graphql new file mode 100644 index 00000000..741fa00c --- /dev/null +++ b/core/web/schema/type/solana_key.graphql @@ -0,0 +1,7 @@ +type SolanaKey { + id: ID! +} + +type SolanaKeysPayload { + results: [SolanaKey!]! +} diff --git a/core/web/schema/type/spec.graphql b/core/web/schema/type/spec.graphql new file mode 100644 index 00000000..5e24f7c3 --- /dev/null +++ b/core/web/schema/type/spec.graphql @@ -0,0 +1,162 @@ +union JobSpec = + CronSpec | + DirectRequestSpec | + KeeperSpec | + FluxMonitorSpec | + OCRSpec | + OCR2Spec | + VRFSpec | + WebhookSpec | + BlockhashStoreSpec | + BlockHeaderFeederSpec | + BootstrapSpec | + GatewaySpec + +type CronSpec { + schedule: String! + createdAt: Time! +} + +type DirectRequestSpec { + contractAddress: String! + createdAt: Time! + evmChainID: String + minIncomingConfirmations: Int! + minContractPaymentLinkJuels: String! + requesters: [String!] +} + +type FluxMonitorSpec { + absoluteThreshold: Float! + contractAddress: String! + createdAt: Time! + drumbeatEnabled: Boolean! + drumbeatRandomDelay: String + drumbeatSchedule: String + evmChainID: String + idleTimerDisabled: Boolean! + idleTimerPeriod: String! + minPayment: String + pollTimerDisabled: Boolean! + pollTimerPeriod: String! + threshold: Float! +} + +type KeeperSpec { + contractAddress: String! + createdAt: Time! + evmChainID: String + fromAddress: String +} + +type OCRSpec { + blockchainTimeout: String + contractAddress: String! + contractConfigConfirmations: Int + contractConfigTrackerPollInterval: String + contractConfigTrackerSubscribeInterval: String + createdAt: Time! + evmChainID: String + isBootstrapPeer: Boolean! + keyBundleID: String + observationTimeout: String + p2pv2Bootstrappers: [String!] + transmitterAddress: String + databaseTimeout: String! + observationGracePeriod: String! + contractTransmitterTransmitTimeout: String! +} + +type OCR2Spec { + blockchainTimeout: String + contractID: String! + contractConfigConfirmations: Int + contractConfigTrackerPollInterval: String + createdAt: Time! + ocrKeyBundleID: String + monitoringEndpoint: String + p2pv2Bootstrappers: [String!] + relay: String! + relayConfig: Map! + transmitterID: String + pluginType: String! + pluginConfig: Map! + feedID: String +} + +type VRFSpec { + coordinatorAddress: String! + createdAt: Time! + evmChainID: String + fromAddresses: [String!] + minIncomingConfirmations: Int! + pollPeriod: String! + publicKey: String! + requestedConfsDelay: Int! + requestTimeout: String! + batchCoordinatorAddress: String + batchFulfillmentEnabled: Boolean! + batchFulfillmentGasMultiplier: Float! + customRevertsPipelineEnabled: Boolean + chunkSize: Int! + backoffInitialDelay: String! + backoffMaxDelay: String! + gasLanePrice: String + vrfOwnerAddress: String +} + +type WebhookSpec { + createdAt: Time! +} + +type BlockhashStoreSpec { + coordinatorV1Address: String + coordinatorV2Address: String + coordinatorV2PlusAddress: String + waitBlocks: Int! + lookbackBlocks: Int! + blockhashStoreAddress: String! + trustedBlockhashStoreAddress: String + trustedBlockhashStoreBatchSize: Int! + heartbeatPeriod: String! + pollPeriod: String! + runTimeout: String! + evmChainID: String + fromAddresses: [String!] + createdAt: Time! +} + +type BlockHeaderFeederSpec { + coordinatorV1Address: String + coordinatorV2Address: String + coordinatorV2PlusAddress: String + waitBlocks: Int! + lookbackBlocks: Int! + blockhashStoreAddress: String! + batchBlockhashStoreAddress: String! + pollPeriod: String! + runTimeout: String! + evmChainID: String + getBlockhashesBatchSize: Int! + storeBlockhashesBatchSize: Int! + fromAddresses: [String!] + createdAt: Time! +} + +type BootstrapSpec { + id: ID! + contractID: String! + relay: String! + relayConfig: Map! + monitoringEndpoint: String + blockchainTimeout: String + contractConfigTrackerPollInterval: String + contractConfigConfirmations: Int + createdAt: Time! +} + +type GatewaySpec { + id: ID! + gatewayConfig: Map! + createdAt: Time! +} diff --git a/core/web/schema/type/task_run.go.graphql b/core/web/schema/type/task_run.go.graphql new file mode 100644 index 00000000..a9f52c22 --- /dev/null +++ b/core/web/schema/type/task_run.go.graphql @@ -0,0 +1,9 @@ +type TaskRun { + id: ID! + dotID: String! + type: String! + output: String! + error: String + createdAt: Time! + finishedAt: Time +} diff --git a/core/web/schema/type/user.graphql b/core/web/schema/type/user.graphql new file mode 100644 index 00000000..8f93d7a4 --- /dev/null +++ b/core/web/schema/type/user.graphql @@ -0,0 +1,15 @@ +type User { + email: String! + createdAt: Time! +} + +input UpdatePasswordInput { + oldPassword: String! + newPassword: String! +} + +type UpdatePasswordSuccess { + user: User! +} + +union UpdatePasswordPayload = UpdatePasswordSuccess | InputErrors diff --git a/core/web/schema/type/vrf.graphql b/core/web/schema/type/vrf.graphql new file mode 100644 index 00000000..06fa4282 --- /dev/null +++ b/core/web/schema/type/vrf.graphql @@ -0,0 +1,26 @@ +type VRFKey { + id: ID! + compressed: String! + uncompressed: String! + hash: String! +} + +type VRFKeySuccess { + key: VRFKey! +} + +union VRFKeyPayload = VRFKeySuccess | NotFoundError + +type VRFKeysPayload { + results: [VRFKey!]! +} + +type CreateVRFKeyPayload { + key: VRFKey! +} + +type DeleteVRFKeySuccess { + key: VRFKey! +} + +union DeleteVRFKeyPayload = DeleteVRFKeySuccess | NotFoundError diff --git a/core/web/sessions_controller.go b/core/web/sessions_controller.go new file mode 100644 index 00000000..68a04f64 --- /dev/null +++ b/core/web/sessions_controller.go @@ -0,0 +1,113 @@ +package web + +import ( + "errors" + "fmt" + "net/http" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" + "go.uber.org/multierr" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + clsessions "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" +) + +// SessionsController manages session requests. +type SessionsController struct { + App plugin.Application + sessions *clsessions.WebAuthnSessionStore +} + +func NewSessionsController(app plugin.Application) *SessionsController { + return &SessionsController{app, clsessions.NewWebAuthnSessionStore()} +} + +// Create creates a session ID for the given user credentials, and returns it +// in a cookie. +func (sc *SessionsController) Create(c *gin.Context) { + defer sc.App.WakeSessionReaper() + sc.App.GetLogger().Debugf("TRACE: Starting Session Creation") + + session := sessions.Default(c) + var sr clsessions.SessionRequest + if err := c.ShouldBindJSON(&sr); err != nil { + jsonAPIError(c, http.StatusBadRequest, fmt.Errorf("error binding json %v", err)) + return + } + + // Does this user have 2FA enabled? + userWebAuthnTokens, err := sc.App.AuthenticationProvider().GetUserWebAuthn(sr.Email) + if err != nil { + sc.App.GetLogger().Errorf("Error loading user WebAuthn data: %s", err) + jsonAPIError(c, http.StatusInternalServerError, errors.New("internal Server Error")) + return + } + + // If the user has registered MFA tokens, then populate our session store and context + // required for successful WebAuthn authentication + if len(userWebAuthnTokens) > 0 { + sr.SessionStore = sc.sessions + sr.WebAuthnConfig = sc.App.GetWebAuthnConfiguration() + } + + sid, err := sc.App.AuthenticationProvider().CreateSession(sr) + if err != nil { + jsonAPIError(c, http.StatusUnauthorized, err) + return + } + + if err := saveSessionID(session, sid); err != nil { + jsonAPIError(c, http.StatusInternalServerError, multierr.Append(errors.New("unable to save session id"), err)) + return + } + + jsonAPIResponse(c, Session{Authenticated: true}, "session") +} + +// Destroy removes the specified session ID from the database. +func (sc *SessionsController) Destroy(c *gin.Context) { + defer sc.App.WakeSessionReaper() + + session := sessions.Default(c) + defer session.Clear() + sessionID, ok := session.Get(auth.SessionIDKey).(string) + if !ok { + jsonAPIResponse(c, Session{Authenticated: false}, "session") + return + } + if err := sc.App.AuthenticationProvider().DeleteUserSession(sessionID); err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + sc.App.GetAuditLogger().Audit(audit.AuthSessionDeleted, map[string]interface{}{"sessionID": sessionID}) + jsonAPIResponse(c, Session{Authenticated: false}, "session") +} + +func saveSessionID(session sessions.Session, sessionID string) error { + session.Set(auth.SessionIDKey, sessionID) + return session.Save() +} + +type Session struct { + Authenticated bool `json:"authenticated"` +} + +// GetID returns the jsonapi ID. +func (s Session) GetID() string { + return "sessionID" +} + +// GetName returns the collection name for jsonapi. +func (Session) GetName() string { + return "session" +} + +// SetID is used to conform to the UnmarshallIdentifier interface for +// deserializing from jsonapi documents. +func (*Session) SetID(string) error { + return nil +} diff --git a/core/web/sessions_controller_test.go b/core/web/sessions_controller_test.go new file mode 100644 index 00000000..be864968 --- /dev/null +++ b/core/web/sessions_controller_test.go @@ -0,0 +1,206 @@ +package web_test + +import ( + "bytes" + "fmt" + "io" + "net/http" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + clhttptest "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/httptest" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSessionsController_Create(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + tests := []struct { + name string + email string + password string + wantSession bool + }{ + {"incorrect pwd", user.Email, "incorrect", false}, + {"incorrect email", "incorrect@test.net", cltest.Password, false}, + {"correct", user.Email, cltest.Password, true}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + body := fmt.Sprintf(`{"email":"%s","password":"%s"}`, test.email, test.password) + request, err := http.NewRequestWithContext(ctx, "POST", app.Server.URL+"/sessions", bytes.NewBufferString(body)) + assert.NoError(t, err) + resp, err := client.Do(request) + assert.NoError(t, err) + defer func() { assert.NoError(t, resp.Body.Close()) }() + + if test.wantSession { + require.Equal(t, http.StatusOK, resp.StatusCode) + + cookies := resp.Cookies() + sessionCookie := web.FindSessionCookie(cookies) + require.NotNil(t, sessionCookie) + + decrypted, err := cltest.DecodeSessionCookie(sessionCookie.Value) + require.NoError(t, err) + user, err := app.AuthenticationProvider().AuthorizedUserWithSession(decrypted) + assert.NoError(t, err) + assert.Equal(t, test.email, user.Email) + + b, err := io.ReadAll(resp.Body) + assert.NoError(t, err) + assert.Contains(t, string(b), `"attributes":{"authenticated":true}`) + } else { + require.True(t, resp.StatusCode >= 400, "Should not be able to create session") + // Ignore fixture session + sessions, err := app.AuthenticationProvider().Sessions(1, 2) + assert.NoError(t, err) + assert.Empty(t, sessions) + } + }) + } +} + +func mustInsertSession(t *testing.T, q pg.Q, session *sessions.Session) { + sql := "INSERT INTO sessions (id, email, last_used, created_at) VALUES ($1, $2, $3, $4) RETURNING *" + _, err := q.Exec(sql, session.ID, session.Email, session.LastUsed, session.CreatedAt) + require.NoError(t, err) +} + +func TestSessionsController_Create_ReapSessions(t *testing.T) { + t.Parallel() + + ctx := testutils.Context(t) + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(ctx)) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + staleSession := cltest.NewSession() + staleSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "241h")) + staleSession.Email = user.Email + q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig().Database()) + mustInsertSession(t, q, &staleSession) + + body := fmt.Sprintf(`{"email":"%s","password":"%s"}`, user.Email, cltest.Password) + req, err := http.NewRequestWithContext(ctx, "POST", app.Server.URL+"/sessions", bytes.NewBufferString(body)) + assert.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + assert.NoError(t, err) + defer func() { assert.NoError(t, resp.Body.Close()) }() + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + var s []sessions.Session + gomega.NewWithT(t).Eventually(func() []sessions.Session { + s, err = app.AuthenticationProvider().Sessions(0, 10) + assert.NoError(t, err) + return s + }).Should(gomega.HaveLen(1)) + + for _, session := range s { + assert.NotEqual(t, session.ID, staleSession.ID) + } +} + +func TestSessionsController_Destroy(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + correctSession := sessions.NewSession() + correctSession.Email = user.Email + q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig().Database()) + mustInsertSession(t, q, &correctSession) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + tests := []struct { + name, sessionID string + success bool + }{ + {"correct cookie", correctSession.ID, true}, + {"incorrect cookie", "wrongsessionid", false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cookie := cltest.MustGenerateSessionCookie(t, test.sessionID) + request, err := http.NewRequestWithContext(ctx, "DELETE", app.Server.URL+"/sessions", nil) + assert.NoError(t, err) + request.AddCookie(cookie) + + resp, err := client.Do(request) + assert.NoError(t, err) + + _, err = app.AuthenticationProvider().AuthorizedUserWithSession(test.sessionID) + assert.Error(t, err) + if test.success { + assert.Equal(t, http.StatusOK, resp.StatusCode) + } else { + assert.True(t, resp.StatusCode >= 400, "Should get an erroneous status code for deleting a nonexistent session id") + } + }) + } +} + +func TestSessionsController_Destroy_ReapSessions(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + + client := clhttptest.NewTestLocalOnlyHTTPClient() + app := cltest.NewApplicationEVMDisabled(t) + q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig().Database()) + require.NoError(t, app.Start(testutils.Context(t))) + + user := cltest.MustRandomUser(t) + require.NoError(t, app.AuthenticationProvider().CreateUser(&user)) + + correctSession := sessions.NewSession() + correctSession.Email = user.Email + + mustInsertSession(t, q, &correctSession) + cookie := cltest.MustGenerateSessionCookie(t, correctSession.ID) + + staleSession := cltest.NewSession() + staleSession.Email = user.Email + staleSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "241h")) + mustInsertSession(t, q, &staleSession) + + request, err := http.NewRequestWithContext(ctx, "DELETE", app.Server.URL+"/sessions", nil) + assert.NoError(t, err) + request.AddCookie(cookie) + + resp, err := client.Do(request) + assert.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + gomega.NewWithT(t).Eventually(func() []sessions.Session { + sessions, err := app.AuthenticationProvider().Sessions(0, 10) + assert.NoError(t, err) + return sessions + }).Should(gomega.HaveLen(0)) +} diff --git a/core/web/solana_chains_controller.go b/core/web/solana_chains_controller.go new file mode 100644 index 00000000..87c11f43 --- /dev/null +++ b/core/web/solana_chains_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewSolanaChainsController(app plugin.Application) ChainsController { + return newChainsController( + relay.Solana, + app.GetRelayers().List(plugin.FilterRelayersByType(relay.Solana)), + ErrSolanaNotEnabled, + presenters.NewSolanaChainResource, + app.GetLogger(), + app.GetAuditLogger()) +} diff --git a/core/web/solana_chains_controller_test.go b/core/web/solana_chains_controller_test.go new file mode 100644 index 00000000..92a88920 --- /dev/null +++ b/core/web/solana_chains_controller_test.go @@ -0,0 +1,195 @@ +package web_test + +import ( + "fmt" + "math/rand" + "net/http" + "testing" + "time" + + "github.com/manyminds/api2go/jsonapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + commoncfg "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-solana/pkg/solana/config" + + "github.com/goplugin/plugin-solana/pkg/solana" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils/configtest" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func Test_SolanaChainsController_Show(t *testing.T) { + t.Parallel() + + const validId = "Plugin-12" + + testCases := []struct { + name string + inputId string + wantStatusCode int + want func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus + }{ + { + inputId: validId, + name: "success", + want: func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus { + return &types.ChainStatus{ + ID: validId, + Enabled: true, + Config: `ChainID = 'Plugin-12' +BalancePollPeriod = '5s' +ConfirmPollPeriod = '500ms' +OCR2CachePollPeriod = '1s' +OCR2CacheTTL = '1m0s' +TxTimeout = '1h0m0s' +TxRetryTimeout = '10s' +TxConfirmTimeout = '30s' +SkipPreflight = false +Commitment = 'confirmed' +MaxRetries = 0 +FeeEstimatorMode = 'fixed' +ComputeUnitPriceMax = 1000 +ComputeUnitPriceMin = 0 +ComputeUnitPriceDefault = 0 +FeeBumpPeriod = '3s' +Nodes = [] +`, + } + }, + wantStatusCode: http.StatusOK, + }, + { + inputId: "234", + name: "not found", + want: func(t *testing.T, app *cltest.TestApplication) *types.ChainStatus { + return nil + }, + wantStatusCode: http.StatusBadRequest, + }, + } + + for _, testCase := range testCases { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + controller := setupSolanaChainsControllerTestV2(t, &solana.TOMLConfig{ + ChainID: ptr(validId), + Chain: config.Chain{ + SkipPreflight: ptr(false), + TxTimeout: commoncfg.MustNewDuration(time.Hour), + }, + }) + + wantedResult := tc.want(t, controller.app) + resp, cleanup := controller.client.Get( + fmt.Sprintf("/v2/chains/solana/%s", tc.inputId), + ) + t.Cleanup(cleanup) + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + + if wantedResult != nil { + resource1 := presenters.SolanaChainResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, resp), &resource1) + require.NoError(t, err) + + assert.Equal(t, wantedResult.ID, resource1.ID) + assert.Equal(t, wantedResult.Enabled, resource1.Enabled) + assert.Equal(t, wantedResult.Config, resource1.Config) + } + }) + } +} + +func Test_SolanaChainsController_Index(t *testing.T) { + t.Parallel() + + chainA := &solana.TOMLConfig{ + ChainID: ptr(fmt.Sprintf("PlugintestA-%d", rand.Int31n(999999))), + Chain: config.Chain{ + TxTimeout: commoncfg.MustNewDuration(time.Hour), + }, + } + chainB := &solana.TOMLConfig{ + ChainID: ptr(fmt.Sprintf("PlugintestB-%d", rand.Int31n(999999))), + Chain: config.Chain{ + SkipPreflight: ptr(false), + }, + } + controller := setupSolanaChainsControllerTestV2(t, chainA, chainB) + + badResp, cleanup := controller.client.Get("/v2/chains/solana?size=asd") + t.Cleanup(cleanup) + require.Equal(t, http.StatusUnprocessableEntity, badResp.StatusCode) + + resp, cleanup := controller.client.Get("/v2/chains/solana?size=1") + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + body := cltest.ParseResponseBody(t, resp) + + metaCount, err := cltest.ParseJSONAPIResponseMetaCount(body) + require.NoError(t, err) + require.Equal(t, 2, metaCount) + + var links jsonapi.Links + + chains := []presenters.SolanaChainResource{} + err = web.ParsePaginatedResponse(body, &chains, &links) + assert.NoError(t, err) + assert.NotEmpty(t, links["next"].Href) + assert.Empty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, *chainA.ChainID, chains[0].ID) + tomlA, err := chainA.TOMLString() + require.NoError(t, err) + assert.Equal(t, tomlA, chains[0].Config) + + resp, cleanup = controller.client.Get(links["next"].Href) + t.Cleanup(cleanup) + require.Equal(t, http.StatusOK, resp.StatusCode) + + chains = []presenters.SolanaChainResource{} + err = web.ParsePaginatedResponse(cltest.ParseResponseBody(t, resp), &chains, &links) + assert.NoError(t, err) + assert.Empty(t, links["next"].Href) + assert.NotEmpty(t, links["prev"].Href) + + assert.Len(t, links, 1) + assert.Equal(t, *chainB.ChainID, chains[0].ID) + tomlB, err := chainB.TOMLString() + require.NoError(t, err) + assert.Equal(t, tomlB, chains[0].Config) +} + +type TestSolanaChainsController struct { + app *cltest.TestApplication + client cltest.HTTPClientCleaner +} + +func setupSolanaChainsControllerTestV2(t *testing.T, cfgs ...*solana.TOMLConfig) *TestSolanaChainsController { + for i := range cfgs { + cfgs[i].SetDefaults() + } + cfg := configtest.NewGeneralConfig(t, func(c *plugin.Config, s *plugin.Secrets) { + c.Solana = cfgs + c.EVM = nil + }) + app := cltest.NewApplicationWithConfig(t, cfg) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + return &TestSolanaChainsController{ + app: app, + client: client, + } +} diff --git a/core/web/solana_keys_controller.go b/core/web/solana_keys_controller.go new file mode 100644 index 00000000..11d4c9cd --- /dev/null +++ b/core/web/solana_keys_controller.go @@ -0,0 +1,12 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/solkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewSolanaKeysController(app plugin.Application) KeysController { + return NewKeysController[solkey.Key, presenters.SolanaKeyResource](app.GetKeyStore().Solana(), app.GetLogger(), app.GetAuditLogger(), + "solanaKey", presenters.NewSolanaKeyResource, presenters.NewSolanaKeyResources) +} diff --git a/core/web/solana_keys_controller_test.go b/core/web/solana_keys_controller_test.go new file mode 100644 index 00000000..c6760356 --- /dev/null +++ b/core/web/solana_keys_controller_test.go @@ -0,0 +1,105 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSolanaKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupSolanaKeysControllerTests(t) + keys, _ := keyStore.Solana().GetAll() + + response, cleanup := client.Get("/v2/keys/solana") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.SolanaKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].PublicKeyStr(), resources[0].PubKey) +} + +func TestSolanaKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + keyStore := app.GetKeyStore() + + response, cleanup := client.Post("/v2/keys/solana", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.Solana().GetAll() + require.Len(t, keys, 1) + + resource := presenters.SolanaKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + assert.Equal(t, keys[0].ID(), resource.ID) + assert.Equal(t, keys[0].PublicKeyStr(), resource.PubKey) + + _, err = keyStore.Solana().Get(resource.ID) + require.NoError(t, err) +} + +func TestSolanaKeysController_Delete_NonExistentSolanaKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupSolanaKeysControllerTests(t) + + nonExistentSolanaKeyID := "foobar" + response, cleanup := client.Delete("/v2/keys/solana/" + nonExistentSolanaKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestSolanaKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupSolanaKeysControllerTests(t) + + keys, _ := keyStore.Solana().GetAll() + initialLength := len(keys) + key, _ := keyStore.Solana().Create() + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/solana/%s", key.ID())) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(keyStore.Solana().Get(key.ID()))) + + keys, _ = keyStore.Solana().GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupSolanaKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.KeyStore.Solana().Add(cltest.DefaultSolanaKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/solana_nodes_controller.go b/core/web/solana_nodes_controller.go new file mode 100644 index 00000000..4ecfd2cc --- /dev/null +++ b/core/web/solana_nodes_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// ErrSolanaNotEnabled is returned when Solana.Enabled is not true. +var ErrSolanaNotEnabled = errChainDisabled{name: "Solana", tomlKey: "Solana.Enabled"} + +func NewSolanaNodesController(app plugin.Application) NodesController { + scopedNodeStatuser := NewNetworkScopedNodeStatuser(app.GetRelayers(), relay.Solana) + + return newNodesController[presenters.SolanaNodeResource]( + scopedNodeStatuser, ErrSolanaNotEnabled, presenters.NewSolanaNodeResource, app.GetAuditLogger()) +} diff --git a/core/web/solana_transfer_controller.go b/core/web/solana_transfer_controller.go new file mode 100644 index 00000000..ea04c362 --- /dev/null +++ b/core/web/solana_transfer_controller.go @@ -0,0 +1,81 @@ +package web + +import ( + "math/big" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/chains" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + solanamodels "github.com/goplugin/pluginv3.0/v2/core/store/models/solana" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// SolanaTransfersController can send PLI tokens to another address +type SolanaTransfersController struct { + App plugin.Application +} + +// Create sends SOL and other native coins from the Plugin's account to a specified address. +func (tc *SolanaTransfersController) Create(c *gin.Context) { + relayers := tc.App.GetRelayers().List(plugin.FilterRelayersByType(relay.Solana)) + if relayers == nil { + jsonAPIError(c, http.StatusBadRequest, ErrSolanaNotEnabled) + return + } + + var tr solanamodels.SendRequest + if err := c.ShouldBindJSON(&tr); err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + if tr.SolanaChainID == "" { + jsonAPIError(c, http.StatusBadRequest, errors.New("missing solanaChainID")) + return + } + if tr.From.IsZero() { + jsonAPIError(c, http.StatusUnprocessableEntity, errors.Errorf("source address is missing: %v", tr.From)) + return + } + if tr.Amount == 0 { + jsonAPIError(c, http.StatusBadRequest, errors.New("amount must be greater than zero")) + return + } + + amount := new(big.Int).SetUint64(tr.Amount) + relayerID := relay.ID{Network: relay.Solana, ChainID: tr.SolanaChainID} + relayer, err := relayers.Get(relayerID) + if err != nil { + if errors.Is(err, plugin.ErrNoSuchRelayer) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + err = relayer.Transact(c, tr.From.String(), tr.To.String(), amount, !tr.AllowHigherAmounts) + if err != nil { + if errors.Is(err, chains.ErrNotFound) || errors.Is(err, chains.ErrChainIDEmpty) { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + resource := presenters.NewSolanaMsgResource("sol_transfer_"+uuid.New().String(), tr.SolanaChainID) + resource.Amount = tr.Amount + resource.From = tr.From.String() + resource.To = tr.To.String() + + tc.App.GetAuditLogger().Audit(audit.SolanaTransactionCreated, map[string]interface{}{ + "solanaTransactionResource": resource, + }) + jsonAPIResponse(c, resource, "solana_tx") +} diff --git a/core/web/starknet_chains_controller.go b/core/web/starknet_chains_controller.go new file mode 100644 index 00000000..15d2bd8e --- /dev/null +++ b/core/web/starknet_chains_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewStarkNetChainsController(app plugin.Application) ChainsController { + return newChainsController( + relay.StarkNet, + app.GetRelayers().List(plugin.FilterRelayersByType(relay.StarkNet)), + ErrStarkNetNotEnabled, + presenters.NewStarkNetChainResource, + app.GetLogger(), + app.GetAuditLogger()) +} diff --git a/core/web/starknet_keys_controller.go b/core/web/starknet_keys_controller.go new file mode 100644 index 00000000..c0b2d4e2 --- /dev/null +++ b/core/web/starknet_keys_controller.go @@ -0,0 +1,12 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/starkkey" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +func NewStarkNetKeysController(app plugin.Application) KeysController { + return NewKeysController[starkkey.Key, presenters.StarkNetKeyResource](app.GetKeyStore().StarkNet(), app.GetLogger(), app.GetAuditLogger(), + "starknetKey", presenters.NewStarkNetKeyResource, presenters.NewStarkNetKeyResources) +} diff --git a/core/web/starknet_keys_controller_test.go b/core/web/starknet_keys_controller_test.go new file mode 100644 index 00000000..0b327e9d --- /dev/null +++ b/core/web/starknet_keys_controller_test.go @@ -0,0 +1,105 @@ +package web_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/goplugin/plugin-common/pkg/utils" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore" + "github.com/goplugin/pluginv3.0/v2/core/web" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStarkNetKeysController_Index_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupStarkNetKeysControllerTests(t) + keys, _ := keyStore.StarkNet().GetAll() + + response, cleanup := client.Get("/v2/keys/starknet") + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + resources := []presenters.StarkNetKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resources) + assert.NoError(t, err) + + require.Len(t, resources, len(keys)) + + assert.Equal(t, keys[0].ID(), resources[0].ID) + assert.Equal(t, keys[0].StarkKeyStr(), resources[0].StarkKey) +} + +func TestStarkNetKeysController_Create_HappyPath(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + client := app.NewHTTPClient(nil) + keyStore := app.GetKeyStore() + + response, cleanup := client.Post("/v2/keys/starknet", nil) + t.Cleanup(cleanup) + cltest.AssertServerResponse(t, response, http.StatusOK) + + keys, _ := keyStore.StarkNet().GetAll() + require.Len(t, keys, 1) + + resource := presenters.StarkNetKeyResource{} + err := web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &resource) + assert.NoError(t, err) + + assert.Equal(t, keys[0].ID(), resource.ID) + assert.Equal(t, keys[0].StarkKeyStr(), resource.StarkKey) + + _, err = keyStore.StarkNet().Get(resource.ID) + require.NoError(t, err) +} + +func TestStarkNetKeysController_Delete_NonExistentStarkNetKeyID(t *testing.T) { + t.Parallel() + + client, _ := setupStarkNetKeysControllerTests(t) + + nonExistentStarkNetKeyID := "foobar" + response, cleanup := client.Delete("/v2/keys/starknet/" + nonExistentStarkNetKeyID) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusNotFound, response.StatusCode) +} + +func TestStarkNetKeysController_Delete_HappyPath(t *testing.T) { + t.Parallel() + + client, keyStore := setupStarkNetKeysControllerTests(t) + + keys, _ := keyStore.StarkNet().GetAll() + initialLength := len(keys) + key, _ := keyStore.StarkNet().Create() + + response, cleanup := client.Delete(fmt.Sprintf("/v2/keys/starknet/%s", key.ID())) + t.Cleanup(cleanup) + assert.Equal(t, http.StatusOK, response.StatusCode) + assert.Error(t, utils.JustError(keyStore.StarkNet().Get(key.ID()))) + + keys, _ = keyStore.StarkNet().GetAll() + assert.Equal(t, initialLength, len(keys)) +} + +func setupStarkNetKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, keystore.Master) { + t.Helper() + + app := cltest.NewApplication(t) + require.NoError(t, app.Start(testutils.Context(t))) + require.NoError(t, app.KeyStore.OCR().Add(cltest.DefaultOCRKey)) + require.NoError(t, app.KeyStore.StarkNet().Add(cltest.DefaultStarkNetKey)) + + client := app.NewHTTPClient(nil) + + return client, app.GetKeyStore() +} diff --git a/core/web/starknet_nodes_controller.go b/core/web/starknet_nodes_controller.go new file mode 100644 index 00000000..98615068 --- /dev/null +++ b/core/web/starknet_nodes_controller.go @@ -0,0 +1,17 @@ +package web + +import ( + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/relay" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// ErrStarkNetNotEnabled is returned when Starknet.Enabled is not true. +var ErrStarkNetNotEnabled = errChainDisabled{name: "StarkNet", tomlKey: "Starknet.Enabled"} + +func NewStarkNetNodesController(app plugin.Application) NodesController { + scopedNodeStatuser := NewNetworkScopedNodeStatuser(app.GetRelayers(), relay.StarkNet) + + return newNodesController[presenters.StarkNetNodeResource]( + scopedNodeStatuser, ErrStarkNetNotEnabled, presenters.NewStarkNetNodeResource, app.GetAuditLogger()) +} diff --git a/core/web/testdata/body/health.html b/core/web/testdata/body/health.html new file mode 100644 index 00000000..d1b208f4 --- /dev/null +++ b/core/web/testdata/body/health.html @@ -0,0 +1,101 @@ + +
+ EVM +
+ 0 +
+ BalanceMonitor +
+
+ HeadBroadcaster +
+
+ HeadTracker +
+ HeadListener +
Listener is not connected
+
+
+
+ LogBroadcaster +
+
+ Txm +
+ BlockHistoryEstimator +
+
+ Broadcaster +
+
+ Confirmer +
+
+ WrappedEvmEstimator +
+
+
+
+
+ JobSpawner +
+
+ Mailbox +
+ Monitor +
+
+
+ Mercury +
+ WSRPCPool +
+ CacheSet +
+
+
+
+ PipelineORM +
+
+ PipelineRunner +
+
+ PromReporter +
+
+ TelemetryManager +
\ No newline at end of file diff --git a/core/web/testdata/body/health.json b/core/web/testdata/body/health.json new file mode 100644 index 00000000..3c0117de --- /dev/null +++ b/core/web/testdata/body/health.json @@ -0,0 +1,175 @@ +{ + "data": [ + { + "type": "checks", + "id": "EVM.0", + "attributes": { + "name": "EVM.0", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.BalanceMonitor", + "attributes": { + "name": "EVM.0.BalanceMonitor", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.HeadBroadcaster", + "attributes": { + "name": "EVM.0.HeadBroadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.HeadTracker", + "attributes": { + "name": "EVM.0.HeadTracker", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.HeadTracker.HeadListener", + "attributes": { + "name": "EVM.0.HeadTracker.HeadListener", + "status": "failing", + "output": "Listener is not connected" + } + }, + { + "type": "checks", + "id": "EVM.0.LogBroadcaster", + "attributes": { + "name": "EVM.0.LogBroadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.Txm", + "attributes": { + "name": "EVM.0.Txm", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.Txm.BlockHistoryEstimator", + "attributes": { + "name": "EVM.0.Txm.BlockHistoryEstimator", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.Txm.Broadcaster", + "attributes": { + "name": "EVM.0.Txm.Broadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.Txm.Confirmer", + "attributes": { + "name": "EVM.0.Txm.Confirmer", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.0.Txm.WrappedEvmEstimator", + "attributes": { + "name": "EVM.0.Txm.WrappedEvmEstimator", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "JobSpawner", + "attributes": { + "name": "JobSpawner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mailbox.Monitor", + "attributes": { + "name": "Mailbox.Monitor", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool", + "attributes": { + "name": "Mercury.WSRPCPool", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool.CacheSet", + "attributes": { + "name": "Mercury.WSRPCPool.CacheSet", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineORM", + "attributes": { + "name": "PipelineORM", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineRunner", + "attributes": { + "name": "PipelineRunner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PromReporter", + "attributes": { + "name": "PromReporter", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "TelemetryManager", + "attributes": { + "name": "TelemetryManager", + "status": "passing", + "output": "" + } + } + ] +} \ No newline at end of file diff --git a/core/web/testdata/body/health.txt b/core/web/testdata/body/health.txt new file mode 100644 index 00000000..03a78c22 --- /dev/null +++ b/core/web/testdata/body/health.txt @@ -0,0 +1,20 @@ +ok EVM.0 +ok EVM.0.BalanceMonitor +ok EVM.0.HeadBroadcaster +ok EVM.0.HeadTracker +! EVM.0.HeadTracker.HeadListener + Listener is not connected +ok EVM.0.LogBroadcaster +ok EVM.0.Txm +ok EVM.0.Txm.BlockHistoryEstimator +ok EVM.0.Txm.Broadcaster +ok EVM.0.Txm.Confirmer +ok EVM.0.Txm.WrappedEvmEstimator +ok JobSpawner +ok Mailbox.Monitor +ok Mercury.WSRPCPool +ok Mercury.WSRPCPool.CacheSet +ok PipelineORM +ok PipelineRunner +ok PromReporter +ok TelemetryManager diff --git a/core/web/testdata/health.html b/core/web/testdata/health.html new file mode 100644 index 00000000..3c007bef --- /dev/null +++ b/core/web/testdata/health.html @@ -0,0 +1,67 @@ + +
+ foo +
+ bar +
example error message
+
+ 1 +
+ A +
+
+ B +
+
+
+ 2 +
error:
+this is a multi-line error:
+new line:
+original error
+
+ A +
failure!
+
+
+ B +
+
+
+
+ baz +
+
\ No newline at end of file diff --git a/core/web/testdata/health.txt b/core/web/testdata/health.txt new file mode 100644 index 00000000..89882cc1 --- /dev/null +++ b/core/web/testdata/health.txt @@ -0,0 +1,15 @@ +ok foo +! foo.bar + example error message +ok foo.bar.1 +ok foo.bar.1.A +ok foo.bar.1.B +! foo.bar.2 + error: + this is a multi-line error: + new line: + original error +! foo.bar.2.A + failure! +ok foo.bar.2.B +ok foo.baz diff --git a/core/web/testutils/mock_relayer.go b/core/web/testutils/mock_relayer.go new file mode 100644 index 00000000..7bef6199 --- /dev/null +++ b/core/web/testutils/mock_relayer.go @@ -0,0 +1,57 @@ +package testutils + +import ( + "context" + "math/big" + + commontypes "github.com/goplugin/plugin-common/pkg/types" +) + +type MockRelayer struct { + ChainStatus commontypes.ChainStatus + NodeStatuses []commontypes.NodeStatus +} + +func (m MockRelayer) Name() string { + panic("not implemented") +} + +func (m MockRelayer) Start(ctx context.Context) error { + panic("not implemented") +} + +func (m MockRelayer) Close() error { + panic("not implemented") +} + +func (m MockRelayer) Ready() error { + panic("not implemented") +} + +func (m MockRelayer) HealthReport() map[string]error { + panic("not implemented") +} + +func (m MockRelayer) GetChainStatus(ctx context.Context) (commontypes.ChainStatus, error) { + return m.ChainStatus, nil +} + +func (m MockRelayer) ListNodeStatuses(ctx context.Context, pageSize int32, pageToken string) (stats []commontypes.NodeStatus, nextPageToken string, total int, err error) { + return m.NodeStatuses, "", len(m.NodeStatuses), nil +} + +func (m MockRelayer) Transact(ctx context.Context, from, to string, amount *big.Int, balanceCheck bool) error { + panic("not implemented") +} + +func (m MockRelayer) NewConfigProvider(ctx context.Context, args commontypes.RelayArgs) (commontypes.ConfigProvider, error) { + panic("not implemented") +} + +func (m MockRelayer) NewPluginProvider(ctx context.Context, args commontypes.RelayArgs, args2 commontypes.PluginArgs) (commontypes.PluginProvider, error) { + panic("not implemented") +} + +func (m MockRelayer) NewLLOProvider(ctx context.Context, rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) { + panic("not implemented") +} diff --git a/core/web/user_controller.go b/core/web/user_controller.go new file mode 100644 index 00000000..826be78f --- /dev/null +++ b/core/web/user_controller.go @@ -0,0 +1,352 @@ +package web + +import ( + "net/http" + "strings" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" + "github.com/jackc/pgconn" + "github.com/pkg/errors" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + clsession "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/utils" + webauth "github.com/goplugin/pluginv3.0/v2/core/web/auth" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// UserController manages the current Session's User. +type UserController struct { + App plugin.Application +} + +// UpdatePasswordRequest defines the request to set a new password for the +// current session's User. +type UpdatePasswordRequest struct { + OldPassword string `json:"oldPassword"` + NewPassword string `json:"newPassword"` +} + +var errUnsupportedForAuth = errors.New("action is unsupported with configured authentication provider") + +// Index lists all API users +func (c *UserController) Index(ctx *gin.Context) { + users, err := c.App.AuthenticationProvider().ListUsers() + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("Unable to list users", "err", err) + jsonAPIError(ctx, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(ctx, presenters.NewUserResources(users), "users") +} + +// Create creates a new API user with provided context arguments. +func (c *UserController) Create(ctx *gin.Context) { + type newUserRequest struct { + Email string `json:"email"` + Password string `json:"password"` + Role string `json:"role"` + } + + var request newUserRequest + if err := ctx.ShouldBindJSON(&request); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + + userRole, err := clsession.GetUserRole(request.Role) + if err != nil { + jsonAPIError(ctx, http.StatusBadRequest, err) + return + } + + if verr := clsession.ValidateEmail(request.Email); verr != nil { + jsonAPIError(ctx, http.StatusBadRequest, verr) + return + } + + if verr := utils.VerifyPasswordComplexity(request.Password, request.Email); verr != nil { + jsonAPIError(ctx, http.StatusBadRequest, verr) + return + } + + user, err := clsession.NewUser(request.Email, request.Password, userRole) + if err != nil { + jsonAPIError(ctx, http.StatusBadRequest, errors.Errorf("error creating API user: %s", err)) + return + } + if err = c.App.AuthenticationProvider().CreateUser(&user); err != nil { + // If this is a duplicate key error (code 23505), return a nicer error message + var pgErr *pgconn.PgError + if ok := errors.As(err, &pgErr); ok { + if pgErr.Code == "23505" { + jsonAPIError(ctx, http.StatusBadRequest, errors.Errorf("user with email %s already exists", request.Email)) + return + } + } + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("Error creating new API user", "err", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("error creating API user")) + return + } + + jsonAPIResponse(ctx, presenters.NewUserResource(user), "user") +} + +// UpdateRole changes role field of a specified API user. +func (c *UserController) UpdateRole(ctx *gin.Context) { + type updateUserRequest struct { + Email string `json:"email"` + NewRole string `json:"newRole"` + } + + var request updateUserRequest + if err := ctx.ShouldBindJSON(&request); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + + // Don't allow current admin user to edit self + sessionUser, ok := webauth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + if strings.EqualFold(sessionUser.Email, request.Email) { + jsonAPIError(ctx, http.StatusBadRequest, errors.New("can not change state or permissions of current admin user")) + return + } + + // In case email/role is not specified try to give friendlier/actionable error messages + if request.Email == "" { + jsonAPIError(ctx, http.StatusBadRequest, errors.New("email flag is empty, must specify an email")) + return + } + if request.NewRole == "" { + jsonAPIError(ctx, http.StatusBadRequest, errors.New("new-role flag is empty, must specify a new role, possible options are 'admin', 'edit', 'run', 'view'")) + return + } + _, err := clsession.GetUserRole(request.NewRole) + if err != nil { + jsonAPIError(ctx, http.StatusBadRequest, errors.New("new role does not exist, possible options are 'admin', 'edit', 'run', 'view'")) + return + } + + user, err := c.App.AuthenticationProvider().UpdateRole(request.Email, request.NewRole) + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + jsonAPIError(ctx, http.StatusInternalServerError, errors.Wrap(err, "error updating API user")) + return + } + + jsonAPIResponse(ctx, presenters.NewUserResource(user), "user") +} + +// Delete deletes an API user and any sessions by email +func (c *UserController) Delete(ctx *gin.Context) { + email := ctx.Param("email") + + // Attempt find user by email + user, err := c.App.AuthenticationProvider().FindUser(email) + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + jsonAPIError(ctx, http.StatusBadRequest, errors.Errorf("specified user not found: %s", email)) + return + } + + // Don't allow current admin user to delete self + sessionUser, ok := webauth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + if strings.EqualFold(sessionUser.Email, email) { + jsonAPIError(ctx, http.StatusBadRequest, errors.New("can not delete currently logged in admin user")) + return + } + + if err = c.App.AuthenticationProvider().DeleteUser(email); err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("Error deleting API user", "err", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("error deleting API user")) + return + } + + jsonAPIResponse(ctx, presenters.NewUserResource(user), "user") +} + +// UpdatePassword changes the password for the current User. +func (c *UserController) UpdatePassword(ctx *gin.Context) { + var request UpdatePasswordRequest + if err := ctx.ShouldBindJSON(&request); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + + sessionUser, ok := webauth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email) + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("failed to obtain current user record: %s", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to update password")) + return + } + if !utils.CheckPasswordHash(request.OldPassword, user.HashedPassword) { + c.App.GetAuditLogger().Audit(audit.PasswordResetAttemptFailedMismatch, map[string]interface{}{"user": user.Email}) + jsonAPIError(ctx, http.StatusConflict, errors.New("old password does not match")) + return + } + if err := utils.VerifyPasswordComplexity(request.NewPassword, user.Email); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + if err := c.updateUserPassword(ctx, &user, request.NewPassword); err != nil { + jsonAPIError(ctx, http.StatusInternalServerError, err) + return + } + + c.App.GetAuditLogger().Audit(audit.PasswordResetSuccess, map[string]interface{}{"user": user.Email}) + jsonAPIResponse(ctx, presenters.NewUserResource(user), "user") +} + +// NewAPIToken generates a new API token for a user overwriting any pre-existing one set. +func (c *UserController) NewAPIToken(ctx *gin.Context) { + var request clsession.ChangeAuthTokenRequest + if err := ctx.ShouldBindJSON(&request); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + + sessionUser, ok := webauth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email) + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("failed to obtain current user record: %s", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to create API token")) + return + } + // In order to create an API token, login validation with provided password must succeed + err = c.App.AuthenticationProvider().TestPassword(sessionUser.Email, request.Password) + if err != nil { + c.App.GetAuditLogger().Audit(audit.APITokenCreateAttemptPasswordMismatch, map[string]interface{}{"user": user.Email}) + jsonAPIError(ctx, http.StatusUnauthorized, errors.New("incorrect password")) + return + } + newToken := auth.NewToken() + if err := c.App.AuthenticationProvider().SetAuthToken(&user, newToken); err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + jsonAPIError(ctx, http.StatusInternalServerError, err) + return + } + + c.App.GetAuditLogger().Audit(audit.APITokenCreated, map[string]interface{}{"user": user.Email}) + jsonAPIResponseWithStatus(ctx, newToken, "auth_token", http.StatusCreated) +} + +// DeleteAPIToken deletes and disables a user's API token. +func (c *UserController) DeleteAPIToken(ctx *gin.Context) { + var request clsession.ChangeAuthTokenRequest + if err := ctx.ShouldBindJSON(&request); err != nil { + jsonAPIError(ctx, http.StatusUnprocessableEntity, err) + return + } + + sessionUser, ok := webauth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + user, err := c.App.AuthenticationProvider().FindUser(sessionUser.Email) + if err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + c.App.GetLogger().Errorf("failed to obtain current user record: %s", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("unable to delete API token")) + return + } + err = c.App.AuthenticationProvider().TestPassword(sessionUser.Email, request.Password) + if err != nil { + c.App.GetAuditLogger().Audit(audit.APITokenDeleteAttemptPasswordMismatch, map[string]interface{}{"user": user.Email}) + jsonAPIError(ctx, http.StatusUnauthorized, errors.New("incorrect password")) + return + } + if err := c.App.AuthenticationProvider().DeleteAuthToken(&user); err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + jsonAPIError(ctx, http.StatusBadRequest, errUnsupportedForAuth) + return + } + jsonAPIError(ctx, http.StatusInternalServerError, err) + return + } + { + c.App.GetAuditLogger().Audit(audit.APITokenDeleted, map[string]interface{}{"user": user.Email}) + jsonAPIResponseWithStatus(ctx, nil, "auth_token", http.StatusNoContent) + } +} + +func getCurrentSessionID(ctx *gin.Context) (string, error) { + session := sessions.Default(ctx) + sessionID, ok := session.Get(webauth.SessionIDKey).(string) + if !ok { + return "", errors.New("unable to get current session ID") + } + return sessionID, nil +} + +func (c *UserController) updateUserPassword(ctx *gin.Context, user *clsession.User, newPassword string) error { + sessionID, err := getCurrentSessionID(ctx) + if err != nil { + return err + } + orm := c.App.AuthenticationProvider() + if err := orm.ClearNonCurrentSessions(sessionID); err != nil { + c.App.GetLogger().Errorf("failed to clear non current user sessions: %s", err) + return errors.New("unable to update password") + } + if err := orm.SetPassword(user, newPassword); err != nil { + if errors.Is(err, clsession.ErrNotSupported) { + return errUnsupportedForAuth + } + c.App.GetLogger().Errorf("failed to update current user password: %s", err) + return errors.New("unable to update password") + } + return nil +} diff --git a/core/web/user_controller_test.go b/core/web/user_controller_test.go new file mode 100644 index 00000000..bb72078f --- /dev/null +++ b/core/web/user_controller_test.go @@ -0,0 +1,326 @@ +package web_test + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/auth" + "github.com/goplugin/pluginv3.0/v2/core/internal/cltest" + "github.com/goplugin/pluginv3.0/v2/core/internal/testutils" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func TestUserController_UpdatePassword(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + u := cltest.User{} + client := app.NewHTTPClient(&u) + + testCases := []struct { + name string + reqBody string + wantStatusCode int + wantErrCount int + wantErrMessage string + }{ + { + name: "Invalid request", + reqBody: "", + wantStatusCode: http.StatusUnprocessableEntity, + wantErrCount: 1, + }, + { + name: "Incorrect old password", + reqBody: `{"oldPassword": "wrong password"}`, + wantStatusCode: http.StatusConflict, + wantErrCount: 1, + wantErrMessage: "old password does not match", + }, + { + name: "Insufficient length of new password", + reqBody: fmt.Sprintf(`{"newPassword": "%v", "oldPassword": "%v"}`, "foo", cltest.Password), + wantStatusCode: http.StatusUnprocessableEntity, + wantErrCount: 1, + wantErrMessage: fmt.Sprintf("%s %s\n", utils.ErrMsgHeader, "password is less than 16 characters long"), + }, + { + name: "New password includes api email", + reqBody: fmt.Sprintf(`{"newPassword": "%slonglonglonglong", "oldPassword": "%s"}`, u.Email, cltest.Password), + wantStatusCode: http.StatusUnprocessableEntity, + wantErrCount: 1, + wantErrMessage: fmt.Sprintf("%s %s%s\n", utils.ErrMsgHeader, "password may not contain: ", fmt.Sprintf(`"%s"`, u.Email)), + }, + { + name: "Success", + reqBody: fmt.Sprintf(`{"newPassword": "%v", "oldPassword": "%v"}`, cltest.Password, cltest.Password), + wantStatusCode: http.StatusOK, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + resp, cleanup := client.Patch("/v2/user/password", bytes.NewBufferString(tc.reqBody)) + t.Cleanup(cleanup) + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + assert.Len(t, errors.Errors, tc.wantErrCount) + if tc.wantErrMessage != "" { + assert.Equal(t, tc.wantErrMessage, errors.Errors[0].Detail) + } + }) + } +} + +func TestUserController_CreateUser(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + + longPassword := strings.Repeat("x", sessions.MaxBcryptPasswordLength+1) + + testCases := []struct { + name string + reqBody string + wantStatusCode int + wantErrCount int + wantErrMessage string + }{ + { + name: "Invalid request", + reqBody: "", + wantStatusCode: http.StatusUnprocessableEntity, + wantErrCount: 1, + }, + { + name: "Wrong email format", + reqBody: fmt.Sprintf(`{"email": "12345678", "role": "view", "password": "%v"}`, cltest.Password), + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "mail: missing '@' or angle-addr", + }, + { + name: "Empty email format", + reqBody: fmt.Sprintf(`{"email": "", "role": "view", "password": "%v"}`, cltest.Password), + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "Must enter an email", + }, + { + name: "Empty role", + reqBody: fmt.Sprintf(`{"email": "abc@email.com", "role": "", "password": "%v"}`, cltest.Password), + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "Invalid role", + }, + { + name: "Too long password", + reqBody: fmt.Sprintf(`{"email": "abc@email.com", "role": "view", "password": "%v"}`, longPassword), + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "must enter a password less than 50 characters", + }, + { + name: "Too short password", + reqBody: `{"email": "abc@email.com", "role": "view", "password": "short"}`, + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "Must be at least 16 characters long", + }, + { + name: "Empty password", + reqBody: `{"email": "abc@email.com", "role": "view", "password": ""}`, + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: "Must be at least 16 characters long", + }, + { + name: "Password contains email", + reqBody: `{"email": "asd@email.com", "role": "view", "password": "asd@email.comasd@email.comasd@email.com"}`, + wantStatusCode: http.StatusBadRequest, + wantErrCount: 1, + wantErrMessage: `password may not contain: "asd@email.com"`, + }, + { + name: "Success", + reqBody: fmt.Sprintf(`{"email": "%s", "role": "edit", "password": "%v"}`, cltest.MustRandomUser(t).Email, cltest.Password), + wantStatusCode: http.StatusOK, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + resp, cleanup := client.Post("/v2/users", bytes.NewBufferString(tc.reqBody)) + t.Cleanup(cleanup) + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + assert.Len(t, errors.Errors, tc.wantErrCount) + if tc.wantErrMessage != "" { + assert.Contains(t, errors.Errors[0].Detail, tc.wantErrMessage) + } + }) + } +} + +func TestUserController_UpdateRole(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + user := cltest.MustRandomUser(t) + err := app.AuthenticationProvider().CreateUser(&user) + require.NoError(t, err) + + testCases := []struct { + name string + reqBody string + wantStatusCode int + wantErrCount int + wantErrMessage string + }{ + { + name: "Invalid request", + reqBody: "", + wantStatusCode: http.StatusUnprocessableEntity, + wantErrCount: 1, + }, + { + name: "Success", + reqBody: fmt.Sprintf(`{"email": "%s", "newRole": "edit"}`, user.Email), + wantStatusCode: http.StatusOK, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + resp, cleanup := client.Patch("/v2/users", bytes.NewBufferString(tc.reqBody)) + t.Cleanup(cleanup) + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + + require.Equal(t, tc.wantStatusCode, resp.StatusCode) + assert.Len(t, errors.Errors, tc.wantErrCount) + if tc.wantErrMessage != "" { + assert.Contains(t, errors.Errors[0].Detail, tc.wantErrMessage) + } + }) + } +} + +func TestUserController_DeleteUser(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + user := cltest.MustRandomUser(t) + err := app.AuthenticationProvider().CreateUser(&user) + require.NoError(t, err) + + resp, cleanup := client.Delete(fmt.Sprintf("/v2/users/%s", url.QueryEscape(user.Email))) + t.Cleanup(cleanup) + errors := cltest.ParseJSONAPIErrors(t, resp.Body) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Empty(t, errors.Errors) + + // second attempt would fail + resp, cleanup = client.Delete(fmt.Sprintf("/v2/users/%s", url.QueryEscape(user.Email))) + t.Cleanup(cleanup) + errors = cltest.ParseJSONAPIErrors(t, resp.Body) + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Len(t, errors.Errors, 1) + assert.Contains(t, errors.Errors[0].Detail, "specified user not found") +} + +func TestUserController_NewAPIToken(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + req, err := json.Marshal(sessions.ChangeAuthTokenRequest{ + Password: cltest.Password, + }) + require.NoError(t, err) + resp, cleanup := client.Post("/v2/user/token", bytes.NewBuffer(req)) + defer cleanup() + + require.Equal(t, http.StatusCreated, resp.StatusCode) + var authToken auth.Token + err = cltest.ParseJSONAPIResponse(t, resp, &authToken) + require.NoError(t, err) + assert.NotEmpty(t, authToken.AccessKey) + assert.NotEmpty(t, authToken.Secret) +} + +func TestUserController_NewAPIToken_unauthorized(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + req, err := json.Marshal(sessions.ChangeAuthTokenRequest{ + Password: "wrong-password", + }) + require.NoError(t, err) + resp, cleanup := client.Post("/v2/user/token", bytes.NewBuffer(req)) + defer cleanup() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} + +func TestUserController_DeleteAPIKey(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + req, err := json.Marshal(sessions.ChangeAuthTokenRequest{ + Password: cltest.Password, + }) + require.NoError(t, err) + resp, cleanup := client.Post("/v2/user/token/delete", bytes.NewBuffer(req)) + defer cleanup() + + require.Equal(t, http.StatusNoContent, resp.StatusCode) +} + +func TestUserController_DeleteAPIKey_unauthorized(t *testing.T) { + t.Parallel() + + app := cltest.NewApplicationEVMDisabled(t) + require.NoError(t, app.Start(testutils.Context(t))) + + client := app.NewHTTPClient(nil) + req, err := json.Marshal(sessions.ChangeAuthTokenRequest{ + Password: "wrong-password", + }) + require.NoError(t, err) + resp, cleanup := client.Post("/v2/user/token/delete", bytes.NewBuffer(req)) + defer cleanup() + + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} diff --git a/core/web/vrf_keys_controller.go b/core/web/vrf_keys_controller.go new file mode 100644 index 00000000..6a52832f --- /dev/null +++ b/core/web/vrf_keys_controller.go @@ -0,0 +1,125 @@ +package web + +import ( + "io" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// VRFKeysController manages VRF keys +type VRFKeysController struct { + App plugin.Application +} + +// Index lists VRF keys +// Example: +// "GET /keys/vrf" +func (vrfkc *VRFKeysController) Index(c *gin.Context) { + keys, err := vrfkc.App.GetKeyStore().VRF().GetAll() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + jsonAPIResponse(c, presenters.NewVRFKeyResources(keys, vrfkc.App.GetLogger()), "vrfKey") +} + +// Create and return a VRF key +// Example: +// "POST /keys/vrf" +func (vrfkc *VRFKeysController) Create(c *gin.Context) { + pk, err := vrfkc.App.GetKeyStore().VRF().Create() + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + vrfkc.App.GetAuditLogger().Audit(audit.KeyCreated, map[string]interface{}{ + "type": "vrf", + "id": pk.ID(), + "vrfPublicKey": pk.PublicKey, + "vrfPublicKeyAddress": pk.PublicKey.Address(), + }) + + jsonAPIResponse(c, presenters.NewVRFKeyResource(pk, vrfkc.App.GetLogger()), "vrfKey") +} + +// Delete a VRF key +// Example: +// "DELETE /keys/vrf/:keyID" +// "DELETE /keys/vrf/:keyID?hard=true" +func (vrfkc *VRFKeysController) Delete(c *gin.Context) { + keyID := c.Param("keyID") + key, err := vrfkc.App.GetKeyStore().VRF().Get(keyID) + if err != nil { + jsonAPIError(c, http.StatusNotFound, err) + return + } + _, err = vrfkc.App.GetKeyStore().VRF().Delete(keyID) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + vrfkc.App.GetAuditLogger().Audit(audit.KeyDeleted, map[string]interface{}{ + "type": "vrf", + "id": keyID, + }) + + jsonAPIResponse(c, presenters.NewVRFKeyResource(key, vrfkc.App.GetLogger()), "vrfKey") +} + +// Import imports a VRF key +// Example: +// "Post /keys/vrf/import" +func (vrfkc *VRFKeysController) Import(c *gin.Context) { + defer vrfkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Import request body") + + bytes, err := io.ReadAll(c.Request.Body) + if err != nil { + jsonAPIError(c, http.StatusBadRequest, err) + return + } + oldPassword := c.Query("oldpassword") + key, err := vrfkc.App.GetKeyStore().VRF().Import(bytes, oldPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + vrfkc.App.GetAuditLogger().Audit(audit.KeyImported, map[string]interface{}{ + "type": "vrf", + "id": key.ID(), + "vrfPublicKey": key.PublicKey, + "vrfPublicKeyAddress": key.PublicKey.Address(), + }) + + jsonAPIResponse(c, presenters.NewVRFKeyResource(key, vrfkc.App.GetLogger()), "vrfKey") +} + +// Export exports a VRF key +// Example: +// "Post /keys/vrf/export/:keyID" +func (vrfkc *VRFKeysController) Export(c *gin.Context) { + defer vrfkc.App.GetLogger().ErrorIfFn(c.Request.Body.Close, "Error closing Export request body") + + keyID := c.Param("keyID") + // New password to re-encrypt the export with + newPassword := c.Query("newpassword") + bytes, err := vrfkc.App.GetKeyStore().VRF().Export(keyID, newPassword) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + vrfkc.App.GetAuditLogger().Audit(audit.KeyExported, map[string]interface{}{ + "type": "vrf", + "id": keyID, + }) + + c.Data(http.StatusOK, MediaType, bytes) +} diff --git a/core/web/webauthn_controller.go b/core/web/webauthn_controller.go new file mode 100644 index 00000000..aadc9522 --- /dev/null +++ b/core/web/webauthn_controller.go @@ -0,0 +1,102 @@ +package web + +import ( + "encoding/json" + "errors" + "net/http" + + "github.com/gin-gonic/gin" + + "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/logger/audit" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/sessions" + "github.com/goplugin/pluginv3.0/v2/core/web/auth" + "github.com/goplugin/pluginv3.0/v2/core/web/presenters" +) + +// WebAuthnController manages registers new keys as well as authentication +// with those keys +type WebAuthnController struct { + App plugin.Application + inProgressRegistrationsStore *sessions.WebAuthnSessionStore +} + +func NewWebAuthnController(app plugin.Application) WebAuthnController { + return WebAuthnController{ + App: app, + inProgressRegistrationsStore: sessions.NewWebAuthnSessionStore(), + } +} + +func (c *WebAuthnController) BeginRegistration(ctx *gin.Context) { + user, ok := auth.GetAuthenticatedUser(ctx) + if !ok { + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("failed to obtain current user from context")) + return + } + + orm := c.App.AuthenticationProvider() + uwas, err := orm.GetUserWebAuthn(user.Email) + if err != nil { + c.App.GetLogger().Errorf("failed to obtain current user MFA tokens: error in GetUserWebAuthn: %+v", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("Unable to register key")) + return + } + + webAuthnConfig := c.App.GetWebAuthnConfiguration() + + options, err := c.inProgressRegistrationsStore.BeginWebAuthnRegistration(*user, uwas, webAuthnConfig) + if err != nil { + c.App.GetLogger().Errorf("error in BeginWebAuthnRegistration: %s", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("internal Server Error")) + return + } + + optionsp := presenters.NewRegistrationSettings(*options) + + jsonAPIResponse(ctx, optionsp, "settings") +} + +func (c *WebAuthnController) FinishRegistration(ctx *gin.Context) { + user, ok := auth.GetAuthenticatedUser(ctx) + if !ok { + logger.Sugared(c.App.GetLogger()).AssumptionViolationf("failed to obtain current user from context") + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("Unable to register key")) + return + } + + orm := c.App.AuthenticationProvider() + uwas, err := orm.GetUserWebAuthn(user.Email) + if err != nil { + c.App.GetLogger().Errorf("failed to obtain current user MFA tokens: error in GetUserWebAuthn: %s", err) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("Unable to register key")) + return + } + + webAuthnConfig := c.App.GetWebAuthnConfiguration() + + credential, err := c.inProgressRegistrationsStore.FinishWebAuthnRegistration(*user, uwas, ctx.Request, webAuthnConfig) + if err != nil { + c.App.GetLogger().Errorf("error in FinishWebAuthnRegistration: %s", err) + jsonAPIError(ctx, http.StatusBadRequest, errors.New("registration was unsuccessful")) + return + } + + if sessions.AddCredentialToUser(c.App.AuthenticationProvider(), user.Email, credential) != nil { + c.App.GetLogger().Errorf("Could not save WebAuthn credential to DB for user: %s", user.Email) + jsonAPIError(ctx, http.StatusInternalServerError, errors.New("internal Server Error")) + return + } + + // Forward registered credentials for audit logs + credj, err := json.Marshal(credential) + if err != nil { + c.App.GetLogger().Errorf("error in Marshal credentials: %s", err) + jsonAPIError(ctx, http.StatusBadRequest, errors.New("registration was unsuccessful")) + return + } + c.App.GetAuditLogger().Audit(audit.Auth2FAEnrolled, map[string]interface{}{"email": user.Email, "credential": string(credj)}) + + ctx.String(http.StatusOK, "{}") +} diff --git a/core/web/webhook-spec-template.yml b/core/web/webhook-spec-template.yml new file mode 100644 index 00000000..b490a15d --- /dev/null +++ b/core/web/webhook-spec-template.yml @@ -0,0 +1,12 @@ +type = "webhook" +schemaVersion = 1 +externalJobID = "%s" +name = "%s" +observationSource = """ + fetch [type=bridge name="fetch_bridge"] + parse_request [type=jsonparse path="data,result"]; + multiply [type=multiply times="100"]; + submit [type=bridge name="submit_bridge" includeInputAtKey="result", data=<{}>]; + + fetch -> parse_request -> multiply -> submit; +""" diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md new file mode 100644 index 00000000..6d101561 --- /dev/null +++ b/docs/CHANGELOG.md @@ -0,0 +1,2597 @@ +# Changelog Plugin Core + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + + +## [dev] + +### Added + +- Gas bumping logic to the `SuggestedPriceEstimator`. The bumping mechanism for this estimator refetches the price from the RPC and adds a buffer on top using the greater of `BumpPercent` and `BumpMin`. + +## 2.9.0 - UNRELEASED + +### Added + +- `plugin health` CLI command and HTML `/health` endpoint, to provide human-readable views of the underlying JSON health data. +- New job type `stream` to represent streamspecs. This job type is not yet used anywhere but will be required for Data Streams V1. +- Environment variables `CL_MEDIAN_ENV`, `CL_SOLANA_ENV`, and `CL_STARKNET_ENV` for setting environment variables in LOOP Plugins with an `.env` file. + ``` + echo "Foo=Bar" >> median.env + echo "Baz=Val" >> median.env + CL_MEDIAN_ENV="median.env" + ``` + +### Fixed + +- Fixed the encoding used for transactions when resending in batches + +### Removed + +- `P2P.V1` is no longer supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed. +- Removed `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` from TOML configuration, these fields are replaced by `[[TelemetryIngress.Endpoints]]`: +```toml + [[TelemetryIngress.Endpoints]] + Network = '...' # e.g. EVM. Solana, Starknet, Cosmos + ChainID = '...' # e.g. 1, 5, devnet, mainnet-beta + URL = '...' + ServerPubKey = '...' +``` + + + +## 2.8.0 - 2024-01-24 + +### Added + +- Added distributed tracing in the OpenTelemetry trace format to the node, currently focused at the LOOPP Plugin development effort. This includes a new set of `Tracing` TOML configurations. The default for collecting traces is off - you must explicitly enable traces and setup a valid OpenTelemetry collector. Refer to `.github/tracing/README.md` for more details. +- Added a new, optional WebServer authentication option that supports LDAP as a user identity provider. This enables user login access and user roles to be managed and provisioned via a centralized remote server that supports the LDAP protocol, which can be helpful when running multiple nodes. See the documentation for more information and config setup instructions. There is a new `[WebServer].AuthenticationMethod` config option, when set to `ldap` requires the new `[WebServer.LDAP]` config section to be defined, see the reference `docs/core.toml`. +- New prom metrics for mercury transmit queue: + `mercury_transmit_queue_delete_error_count` + `mercury_transmit_queue_insert_error_count` + `mercury_transmit_queue_push_error_count` + Nops should consider alerting on these. +- Mercury now implements a local cache for fetching prices for fees, which ought to reduce latency and load on the mercury server, as well as increasing performance. It is enabled by default and can be configured with the following new config variables: + ``` + [Mercury] + + # Mercury.Cache controls settings for the price retrieval cache querying a mercury server + [Mercury.Cache] + # LatestReportTTL controls how "stale" we will allow a price to be e.g. if + # set to 1s, a new price will always be fetched if the last result was + # from 1 second ago or older. + # + # Another way of looking at it is such: the cache will _never_ return a + # price that was queried from now-LatestReportTTL or before. + # + # Setting to zero disables caching entirely. + LatestReportTTL = "1s" # Default + # MaxStaleAge is that maximum amount of time that a value can be stale + # before it is deleted from the cache (a form of garbage collection). + # + # This should generally be set to something much larger than + # LatestReportTTL. Setting to zero disables garbage collection. + MaxStaleAge = "1h" # Default + # LatestReportDeadline controls how long to wait for a response from the + # mercury server before retrying. Setting this to zero will wait indefinitely. + LatestReportDeadline = "5s" # Default + ``` +- New prom metrics for the mercury cache: + `mercury_cache_fetch_failure_count` + `mercury_cache_hit_count` + `mercury_cache_wait_count` + `mercury_cache_miss_count` +- Added new `EVM.OCR` TOML config fields `DeltaCOverride` and `DeltaCJitterOverride` for overriding the config DeltaC. +- Mercury v0.2 has improved consensus around current block that uses the most recent 5 blocks instead of only the latest one +- Two new prom metrics for mercury, nops should consider adding alerting on these: + - `mercury_insufficient_blocks_count` + - `mercury_zero_blocks_count` +- Added new `Mercury.TLS` TOML config field `CertFile` for configuring transport credentials when the node acts as a client and initiates a TLS handshake. + +### Changed + +- `PromReporter` no longer directly reads txm related status from the db, and instead uses the txStore API. +- `L2Suggested` mode is now called `SuggestedPrice` +- Console logs will now escape (non-whitespace) control characters +- Following EVM Pool metrics were renamed: + - `evm_pool_rpc_node_states` → `multi_node_states` + - `evm_pool_rpc_node_num_transitions_to_alive` → `pool_rpc_node_num_transitions_to_alive` + - `evm_pool_rpc_node_num_transitions_to_in_sync` → `pool_rpc_node_num_transitions_to_in_sync` + - `evm_pool_rpc_node_num_transitions_to_out_of_sync` → `pool_rpc_node_num_transitions_to_out_of_sync` + - `evm_pool_rpc_node_num_transitions_to_unreachable` → `pool_rpc_node_num_transitions_to_unreachable` + - `evm_pool_rpc_node_num_transitions_to_invalid_chain_id` → `pool_rpc_node_num_transitions_to_invalid_chain_id` + - `evm_pool_rpc_node_num_transitions_to_unusable` → `pool_rpc_node_num_transitions_to_unusable` + - `evm_pool_rpc_node_highest_seen_block` → `pool_rpc_node_highest_seen_block` + - `evm_pool_rpc_node_num_seen_blocks` → `pool_rpc_node_num_seen_blocks` + - `evm_pool_rpc_node_polls_total` → `pool_rpc_node_polls_total` + - `evm_pool_rpc_node_polls_failed` → `pool_rpc_node_polls_failed` + - `evm_pool_rpc_node_polls_success` → `pool_rpc_node_polls_success` + +### Removed + +- Removed `Optimism2` as a supported gas estimator mode + +### Fixed + +- Corrected Ethereum Sepolia `LinkContractAddress` to `0x779877A7B0D9E8603169DdbD7836e478b4624789` +- Fixed a bug that caused the Telemetry Manager to report incorrect health + +### Upcoming Required Configuration Changes +Starting in `v2.9.0`: +- `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` will no longer be allowed. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]` +- `P2P.V1` will no longer be supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed. + +## 2.7.2 - 2023-12-14 + +### Fixed + +- Fixed a bug that caused nodes without OCR or OCR2 enabled to fail config validation if `P2P.V2` was not explicitly disabled. With this fix, NOPs will not have to make changes to their config. + +## 2.7.1 - 2023-11-21 + +### Fixed + +- Fixed a bug that causes the node to shutdown if all configured RPC's are unreachable during startup. + +## 2.7.0 - 2023-11-14 + +### Added + +- Added new configuration field named `LeaseDuration` for `EVM.NodePool` that will periodically check if internal subscriptions are connected to the "best" (as defined by the `SelectionMode`) node and switch to it if necessary. Setting this value to `0s` will disable this feature. +- Added multichain telemetry support. Each network/chainID pair must be configured using the new fields: +```toml +[[TelemetryIngress.Endpoints]] +Network = '...' # e.g. EVM. Solana, Starknet, Cosmos +ChainID = '...' # e.g. 1, 5, devnet, mainnet-beta +URL = '...' +ServerPubKey = '...' +``` +These will eventually replace `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey`. Setting `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` alongside `[[TelemetryIngress.Endpoints]]` will prevent the node from booting. Only one way of configuring telemetry endpoints is supported. +- Added bridge_name label to `pipeline_tasks_total_finished` prometheus metric. This should make it easier to see directly what bridge was failing out from the CL NODE perspective. + +- LogPoller will now use finality tags to dynamically determine finality on evm chains if `EVM.FinalityTagEnabled=true`, rather than the fixed `EVM.FinalityDepth` specified in toml config + +### Changed + +- `P2P.V1` is now disabled (`Enabled = false`) by default. It must be explicitly enabled with `true` to be used. However, it is deprecated and will be removed in the future. +- `P2P.V2` is now enabled (`Enabled = true`) by default. + +### Upcoming Required Configuration Changes +Starting in `v2.9.0`: +- `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` will no longer be allowed. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]` +- `P2P.V1` will no longer be supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed. + +### Removed + +- Removed the ability to set a next nonce value for an address through CLI + +## 2.6.0 - 2023-10-18 + +### Added + +- Simple password use in production builds is now disallowed - nodes with this configuration will not boot and will not pass config validation. +- Helper migrations function for injecting env vars into goose migrations. This was done to inject chainID into evm chain id not null in specs migrations. +- OCR2 jobs now support querying the state contract for configurations if it has been deployed. This can help on chains such as BSC which "manage" state bloat by arbitrarily deleting logs older than a certain date. In this case, if logs are missing we will query the contract directly and retrieve the latest config from chain state. Plugin will perform no extra RPC calls unless the job spec has this feature explicitly enabled. On chains that require this, nops may see an increase in RPC calls. This can be enabled for OCR2 jobs by specifying `ConfigContractAddress` in the relay config TOML. + +### Removed + +- Removed support for sending telemetry to the deprecated Explorer service. All nodes will have to remove `Explorer` related keys from TOML configuration and env vars. +- Removed default evmChainID logic where evmChainID was implicitly injected into the jobspecs based on node EVM chainID toml configuration. All newly created jobs(that have evmChainID field) will have to explicitly define evmChainID in the jobspec. +- Removed keyset migration that migrated v1 keys to v2 keys. All keys should've been migrated by now, and we don't permit creation of new v1 keys anymore + +All nodes will have to remove the following secret configurations: + +- `Explorer.AccessKey` +- `Explorer.Secret` + +All nodes will have to remove the following configuration field: `ExplorerURL` + +### Fixed + +- Unauthenticated users executing CLI commands previously generated a confusing error log, which is now removed: + `[ERROR] Error in transaction, rolling back: session missing or expired, please login again pg/transaction.go:118 ` +- Fixed a bug that was preventing job runs to be displayed when the job `chainID` was disabled. +- `plugin txs evm create` returns a transaction hash for the attempted transaction in the CLI. Previously only the sender, recipient and `unstarted` state were returned. +- Fixed a bug where `evmChainId` is requested instead of `id` or `evm-chain-id` in CLI error verbatim +- Fixed a bug that would cause the node to shut down while performing backup +- Fixed health checker to include more services in the prometheus `health` metric and HTTP `/health` endpoint +- Fixed a bug where prices would not be parsed correctly in telemetry data + +## 2.5.0 - 2023-09-13 + +### Added + +- New prometheus metrics for mercury: + - `mercury_price_feed_missing` + - `mercury_price_feed_errors` + Nops may wish to add alerting on these. + +### Upcoming Required Configuration Change + +- Starting in 2.6.0, plugin nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`: + +``` +AllowSimplePasswords=true +``` + +- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986) for special character escape rules. + +### Added + +- Various Functions improvements + +## 2.4.0 - 2023-08-21 + +### Fixed + +- Updated `v2/keys/evm` and `v2/keys/eth` routes to return 400 and 404 status codes where appropriate. Previously 500s were returned when requested resources were not found or client requests could not be parsed. +- Fixed withdrawing ETH from CL node for EIP1559 enabled chains. Previously would error out unless validation was overridden with `allowHigherAmounts`. + +### Added + +- Added the ability to specify and merge fields from multiple secrets files. Overrides of fields and keys are not allowed. +- Added new database table `evm_upkeep_states` to persist eligibility state for recently checked upkeeps. + +### Upcoming Required Configuration Change + +- Starting in 2.6.0, plugin nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`: + +``` +AllowSimplePasswords=true +``` + +- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986) for special character escape rules. + +## 2.3.0 - 2023-07-28 + +### Added + +- Add a new field called `Order` (range from 1 to 100) to `EVM.Nodes` that is used for the `PriorityLevel` node selector and also as a tie-breaker for `HighestHead` and `TotalDifficulty`. `Order` levels are considered in ascending order. If not defined it will default to `Order = 100` (last level). +- Added new node selection mode called `PriorityLevel` for EVM, it is a tiered round-robin in ascending order of the`Order` field. Example: + +``` +[EVM.NodePool] +SelectionMode = 'PriorityLevel' + +[[EVM.Nodes]] +Name = '...' +WSURL = '...' +HTTPURL = '...' +Order = 5 +``` + +- The config keys `WebServer.StartTimeout` and `WebServer.HTTPMaxSize`. These keys respectively set a timeout for the node server to + start and set the max request size for HTTP requests. Previously these attributes were set by + `JobPipeline.DefaultHTTPLimit`/`JobPipeline.DefaultHTTPTimeout`. To migrate to these new fields, set their values to be identical to + `JobPipeline.DefaultHTTPLimit`/`JobPipeline.DefaultHTTPTimeout`. + +- Low latency oracle jobs now support in-protocol block range guarantees. This + is necessary in order to produce reports with block number ranges that do not + overlap. It can now be guaranteed at the protocol level, so we can use local + state instead of relying on an unreliable round-trip to the Mercury server. + +- New settings `Evm.GasEstimator.LimitJobType.OCR2`, `OCR2.DefaultTransactionQueueDepth`, `OCR2.SimulateTransactions` for OCR2 + jobs. These replace the settings `Evm.GasEstimator.LimitJobType.OCR`, `OCR.DefaultTransactionQueueDepth`, and `OCR.SimulateTransactions` + for OCR2. + +- Add new config parameter to OCR and OCR2 named `TraceLogging` that enables trace logging of OCR and OCR2 jobs, previously this behavior was controlled from the `P2P.TraceLogging` parameter. To maintain the same behavior set `OCR.TraceLogging` and `OCR2.TraceLogging` to the same value `P2P.TraceLogging` was set. + +- Add two new config parameters `WebServer.ListenIP` and `WebServer.TLS.ListenIP` which allows binding Plugin HTTP/HTTPS servers to a particular IP. The default is '0.0.0.0' which listens to all IP addresses (same behavior as before). Set to '127.0.0.1' to only allow connections from the local machine (this can be handy for local development). +- Add several new metrics for mercury feeds, related to WSRPC connections: + - `mercury_transmit_timeout_count` + - `mercury_dial_count` + - `mercury_dial_success_count` + - `mercury_dial_error_count` + - `mercury_connection_reset_count` + +Node operators may wish to add alerting based around these metrics. + +### Fixed + +- Fixed a bug in the `nodes xxx list` command that caused results to not be displayed correctly + +### Changed + +- Assumption violations for MaxFeePerGas >= BaseFeePerGas and MaxFeePerGas >= MaxPriorityFeePerGas in EIP-1559 effective gas price calculation will now use a gas price if specified +- Config validation now enforces protection against duplicate chain ids and node fields per provided TOML file. Duplicates accross multiple configuration files are still valid. If you have specified duplicate chain ids or nodes in a given configuration file, this change will error out of all `node` subcommands. +- Restricted scope of the `Evm.GasEstimator.LimitJobType.OCR`, `OCR.DefaultTransactionQueueDepth`, and `OCR.SimulateTransactions` settings so they + apply only to OCR. Previously these settings would apply to OCR2 as well as OCR. You must use the OCR2 equivalents added above if you + want your settings to apply to OCR2. + +### Removed + +- Legacy chain types Optimism and Optimism2. OptimismBedrock is now used to handle Optimism's special cases. +- Optimism Kovan configurations along with legacy error messages. + +# 2.2.0 - 2023-06-12 + +### Added + +- New prometheus metric for mercury transmit queue: `mercury_transmit_queue_load`. This is a gauge, scoped by feed ID, that measures how many pending transmissions are in the queue. This should generally speaking be small (< 10 or so). Nops may wish to add alerting if this exceeds some amount. +- Experimental support of runtime process isolation for Solana data feeds. Requires plugin binaries to be installed and + configured via the env vars `CL_SOLANA_CMD` and `CL_MEDIAN_CMD`. See [plugins/README.md](../plugins/README.md). + +### Fixed + +- Fixed a bug which made it impossible to re-send the same transaction after abandoning it while manually changing the nonce. + +### Changed + +- Set default for EVM.GasEstimator.BumpTxDepth to EVM.Transactions.MaxInFlight. +- Bumped batch size defaults for EVM specific configuration. If you are overriding any of these fields in your local config, please consider if it is necessary: + - `LogBackfillBatchSize = 1000` + - `RPCDefaultBatchSize = 250` + - `GasEstimator.BatchSize = 25` +- Dropped support for Development Mode configuration. `CL_DEV` is now ignored on production builds. +- Updated Docker image's PostgreSQL client (used for backups) to v15 in order to support PostgreSQL v15 servers. + + + +## 1.13.3 - 2023-06-06 + +### Fixed + +- The 1.13.2 release showed the 1.13.1 version in its VERSION file. This updates the VERSION file to now show 1.13.3. + +## 1.13.2 - 2023-06-05 + +### Fixed + +- Made logging level improvements for the Solana Transaction Manager to reduce excessive noise +- Fixed race condition in Solana TXM for sanity check and preventing misfired errors + +## 2.1.1 - 2023-05-22 + +### Updated + +- Upgraded WSRPC to v0.7.2 + +### Fixed + +- Fixed a bug that would cause telemetry to be sent with the wrong type. + +## 2.1.0 - 2023-05-16 + +### Changed + +- Database commands `plugin db ...` validate TOML configuration and secrets before executing. This change of behavior will report errors + if any Database-specific configuration is invalid. + +## 2.0.0 - 2023-04-20 + +### Added + +- Add OCR2 Plugin selection for FMS +- Added kebab case aliases for the following flags: + - `evm-chain-id` alias for `evmChainID` in commands: `plugin blocks replay`, `plugin forwarders track`, `plugin keys ... chain` + - `old-password` alias for `oldpassword` in commands: `plugin keys ... import` + - `new-password` alias for `newpassword` in commands: `plugin keys ... export` + - `new-role` alias for `newrole` in commands: `admin users chrole` + - `set-next-nonce` alias for `setNextNonce` in commands: `plugin keys ... chain` + +### Changed + +- TOML configuration and secrets are now scoped to `plugin node` command rather than being global flags. +- TOML configuration validation has been moved from `plugin config validate` to `plugin node validate`. +- Move `plugin node {status,profile}` to `plugin admin {status,profile}`. + +### Removed + +- Configuration with legacy environment variables is no longer supported. TOML is required. + +## 1.13.1 - 2023-04-06 + +### Fixed + +- Bumped the WSPRC dependency version to fix a bug that could lead to race conditions + +## 1.13.0 - 2023-03-16 + +### Added + +- Support for sending Bootstrap job specs to the feeds manager +- Support for sending OCR2 job specs to the feeds manager +- Log poller filters now saved in db, restored on node startup to guard against missing logs during periods where services are temporarily unable to start +- Add support for new job type `mercury` (low-latency oracle) +- New config option for EVM-based chains `AutoCreateKey`. If set to false, plugin will not automatically create any keys for this chain. This can be used in conjunction with mercury to prevent creating useless keys. Example: + +``` +[[EVM]] +ChainID = "1" +AutoCreateKey = false +``` + +- Add new option for relayConfig `feedID` that handles multi-config contracts. Can be applied to any OCR2 job. + +### Updated + +- TOML env var `CL_CONFIG` always processed as the last configuration, with the effect of being the final override + of any values provided via configuration files. + +### Changed + +- The config option `FeatureFeedsManager`/`FEATURE_FEEDS_MANAGER` is now true by default. + +### Removed + +- Terra is no longer supported + +## 1.12.0 - 2023-02-15 + +### Added + +- Prometheus gauge `mailbox_load_percent` for percent of "`Mailbox`" capacity used. +- New config option, `JobPipeline.MaxSuccessfulRuns` caps the total number of + saved completed runs per job. This is done in response to the `pipeline_runs` + table potentially becoming large, which can cause performance degradation. + The default is set to 10,000. You can set it to 0 to disable run saving + entirely. **NOTE**: This can only be configured via TOML and not with an + environment variable. +- Prometheus gauge vector `feeds_job_proposal_count` to track counts of job proposals partitioned by proposal status. +- Support for variable expression for the `minConfirmations` parameter on the `ethtx` task. + +### Updated + +- Removed `KEEPER_TURN_FLAG_ENABLED` as all networks/nodes have switched this to `true` now. The variable should be completely removed my NOPs. +- Removed `Keeper.UpkeepCheckGasPriceEnabled` config (`KEEPER_CHECK_UPKEEP_GAS_PRICE_FEATURE_ENABLED` in old env var configuration) as this feature is deprecated now. The variable should be completely removed by NOPs. + +### Fixed + +- Fixed (SQLSTATE 42P18) error on Job Runs page, when attempting to view specific older or infrequenty run jobs +- The `config dump` subcommand was fixed to dump the correct config data. + - The `P2P.V1.Enabled` config logic incorrectly matched V2, by only setting explicit true values so that otherwise the default is used. The `V1.Enabled` default value is actually true already, and is now updated to only set explicit false values. + - The `[EVM.Transactions]` config fields `MaxQueued` & `MaxInFlight` will now correctly match `ETH_MAX_QUEUED_TRANSACTIONS` & `ETH_MAX_IN_FLIGHT_TRANSACTIONS`. + +## 1.11.0 - 2022-12-12 + +### Added + +- New `EVM.NodePool.SelectionMode` `TotalDifficulty` to use the node with the greatest total difficulty. +- Add the following prometheus metrics (labelled by bridge name) for monitoring external adapter queries: + - `bridge_latency_seconds` + - `bridge_errors_total` + - `bridge_cache_hits_total` + - `bridge_cache_errors_total` +- `EVM.NodePool.SyncThreshold` to ensure that live nodes do not lag too far behind. + +> ```toml +> SyncThreshold = 5 # Default +> ``` +> +> SyncThreshold controls how far a node may lag behind the best node before being marked out-of-sync. +> Depending on `SelectionMode`, this represents a difference in the number of blocks (`HighestHead`, `RoundRobin`), or total difficulty (`TotalDifficulty`). +> +> Set to 0 to disable this check. + +#### TOML Configuration (experimental) + +Plugin now supports static configuration via TOML files as an alternative to the existing combination of environment variables and persisted database configurations. + +This is currently _experimental_, but in the future (with `v2.0.0`), it will become _mandatory_ as the only supported configuration method. Avoid using TOML for configuration unless running on a test network for this release. + +##### How to use + +TOML configuration can be enabled by simply using the new `-config ` flag or `CL_CONFIG` environment variable. +Multiple files can be used (`-c configA.toml -c configB.toml`), and will be applied in order with duplicated fields overriding any earlier values. + +Existing nodes can automatically generate their equivalent TOML configuration via the `config dump` subcommand. +Secrets must be configured manually and passed via `-secrets ` or equivalent environment variables. + +Format details: [CONFIG.md](../docs/CONFIG.md) • [SECRETS.md](../docs/SECRETS.md) + +**Note:** You _cannot_ mix legacy environment variables with TOML configuration. Leaving any legacy env vars set will fail validation and prevent boot. + +##### Examples + +Dump your current configuration as TOML. + +```bash +plugin config dump > config.toml +``` + +Inspect your full effective configuration, and ensure it is valid. This includes defaults. + +```bash +plugin --config config.toml --secrets secrets.toml config validate +``` + +Run the node. + +```bash +plugin -c config.toml -s secrets.toml node start +``` + +#### Bridge caching + +##### BridgeCacheTTL + +- Default: 0s + +When set to `d` units of time, this variable enables using cached bridge responses that are at most `d` units old. Caching is disabled by default. + +Example `BridgeCacheTTL=10s`, `BridgeCacheTTL=1m` + +### Fixed + +- Fixed a minor bug whereby Plugin would not always resend all pending transactions when using multiple keys + +### Updated + +- `NODE_NO_NEW_HEADS_THRESHOLD=0` no longer requires `NODE_SELECTION_MODE=RoundRobin`. + +## 1.10.0 - 2022-11-15 + +### Added + +#### New optional external logger added + +##### AUDIT_LOGGER_FORWARD_TO_URL + +- Default: _none_ + +When set, this environment variable configures and enables an optional HTTP logger which is used specifically to send audit log events. Audit logs events are emitted when specific actions are performed by any of the users through the node's API. The value of this variable should be a full URL. Log items will be sent via POST + +There are audit log implemented for the following events: + +- Auth & Sessions (new session, login success, login failed, 2FA enrolled, 2FA failed, password reset, password reset failed, etc.) +- CRUD actions for all resources (add/create/delete resources such as bridges, nodes, keys) +- Sensitive actions (keys exported/imported, config changed, log level changed, environment dumped) + +A full list of audit log enum types can be found in the source within the `audit` package (`audit_types.go`). + +The following `AUDIT_LOGGER_*` environment variables below configure this optional audit log HTTP forwarder. + +##### AUDIT_LOGGER_HEADERS + +- Default: _none_ + +An optional list of HTTP headers to be added for every optional audit log event. If the above `AUDIT_LOGGER_FORWARD_TO_URL` is set, audit log events will be POSTed to that URL, and will include headers specified in this environment variable. One example use case is auth for example: `AUDIT_LOGGER_HEADERS="Authorization||{{token}}"`. + +Header keys and values are delimited on ||, and multiple headers can be added with a forward slash delimiter ('\\'). An example of multiple key value pairs: +`AUDIT_LOGGER_HEADERS="Authorization||{{token}}\Some-Other-Header||{{token2}}"` + +##### AUDIT_LOGGER_JSON_WRAPPER_KEY + +- Default: _none_ + +When the audit log HTTP forwarder is enabled, if there is a value set for this optional environment variable then the POST body will be wrapped in a dictionary in a field specified by the value of set variable. This is to help enable specific logging service integrations that may require the event JSON in a special shape. For example: `AUDIT_LOGGER_JSON_WRAPPER_KEY=event` will create the POST body: + +``` +{ + "event": { + "eventID": EVENT_ID_ENUM, + "data": ... + } +} +``` + +#### Automatic connectivity detection; Plugin will no longer bump excessively if the network is broken + +This feature only applies on EVM chains when using BlockHistoryEstimator (the most common case). + +Plugin will now try to automatically detect if there is a transaction propagation/connectivity issue and prevent bumping in these cases. This can help avoid the situation where RPC nodes are not propagating transactions for some reason (e.g. go-ethereum bug, networking issue etc) and Plugin responds in a suboptimal way by bumping transactions to a very high price in an effort to get them mined. This can lead to unnecessary expense when the connectivity issue is resolved and the transactions are finally propagated into the mempool. + +This feature is enabled by default with fairly conservative settings: if a transaction has been priced above the 90th percentile of the past 12 blocks, but still wants to bump due to not being mined, a connectivity/propagation issue is assumed and all further bumping will be prevented for this transaction. In this situation, Plugin will start firing the `block_history_estimator_connectivity_failure_count` prometheus counter and logging at critical level until the transaction is mined. + +The default settings should work fine for most users. For advanced users, the values can be tweaked by changing `BLOCK_HISTORY_ESTIMATOR_CHECK_INCLUSION_BLOCKS` and `BLOCK_HISTORY_ESTIMATOR_CHECK_INCLUSION_PERCENTILE`. + +To disable connectivity checking completely, set `BLOCK_HISTORY_ESTIMATOR_CHECK_INCLUSION_BLOCKS=0`. + +### Changed + +- The default maximum gas price on most networks is now effectively unlimited. + + - Plugin will bump as high as necessary to get a transaction included. The connectivity checker is relied on to prevent excessive bumping when there is a connectivity failure. + - If you want to change this, you can manually set `ETH_MAX_GAS_PRICE_WEI`. + +- EVMChainID field will be auto-added with default chain id to job specs of newly created OCR jobs, if not explicitly included. + - Old OCR jobs missing EVMChainID will continue to run on any chain ETH_CHAIN_ID is set to (or first chain if unset), which may be changed after a restart. + - Newly created OCR jobs will only run on a single fixed chain, unaffected by changes to ETH_CHAIN_ID after the job is added. + - It should no longer be possible to end up with multiple OCR jobs for a single contract running on the same chain; only one job per contract per chain is allowed + - If there are any existing duplicate jobs (per contract per chain), all but the job with the latest creation date will be pruned during upgrade. + +### Fixed + +- Fixed minor bug where Plugin would attempt (and fail) to estimate a tip cap higher than the maximum configured gas price in EIP1559 mode. It now caps the tipcap to the max instead of erroring. +- Fixed bug whereby it was impossible to remove eth keys that had extant transactions. Now, removing an eth key will drop all associated data automatically including past transactions. + +## 1.9.0 - 2022-10-12 + +### Added + +- Added `length` and `lessthan` tasks (pipeline). +- Added `gasUnlimited` parameter to `ethcall` task. +- `/keys` page in Operator UI now exposes several admin commands, namely: + - "abandon" to abandon all current txes + - enable/disable a key for a given chain + - manually set the nonce for a key + See [this PR](https://github.com/goplugin/pluginv3.0/pull/7406) for a screenshot example. + +## 1.8.1 - 2022-09-29 + +### Added + +- New `GAS_ESTIMATOR_MODE` for Arbitrum to support Nitro's multi-dimensional gas model, with dynamic gas pricing and limits. + - NOTE: It is recommended to remove `GAS_ESTIMATOR_MODE` as an env var if you have it set in order to use the new default. + - This new, default estimator for Arbitrum networks uses the suggested gas price (up to `ETH_MAX_GAS_PRICE_WEI`, with `1000 gwei` default) as well as an estimated gas limit (up to `ETH_GAS_LIMIT_MAX`, with `1,000,000,000` default). +- `ETH_GAS_LIMIT_MAX` to put a maximum on the gas limit returned by the `Arbitrum` estimator. + +### Changed + +- EIP1559 is now enabled by default on Goerli network + +## 1.8.0 - 2022-09-01 + +### Added + +- Added `hexencode` and `base64encode` tasks (pipeline). +- `forwardingAllowed` per job attribute to allow forwarding txs submitted by the job. +- Keypath now supports paths with any depth, instead of limiting it to 2 +- `Arbitrum` chains are no longer restricted to only `FixedPrice` `GAS_ESTIMATOR_MODE` +- Updated `Arbitrum Rinkeby & Mainnet & Mainnet` configurationss for Nitro +- Add `Arbitrum Goerli` configuration +- It is now possible to use the same key across multiple chains. +- `NODE_SELECTION_MODE` (`EVM.NodePool.SelectionMode`) controls node picking strategy. Supported values: `HighestHead` (default) and `RoundRobin`: + - `RoundRobin` mode simply iterates among available alive nodes. This was the default behavior prior to this release. + - `HighestHead` mode picks a node having the highest reported head number among other alive nodes. When several nodes have the same latest head number, the strategy sticks to the last used node. + For chains having `NODE_NO_NEW_HEADS_THRESHOLD=0` (such as Arbitrum, Optimism), the implementation will fall back to `RoundRobin` mode. +- New `keys eth chain` command + - This can also be accessed at `/v2/keys/evm/chain`. + - Usage examples: + - Manually (re)set a nonce: + - `plugin keys eth chain --address "0xEXAMPLE" --evmChainID 99 --setNextNonce 42` + - Enable a key for a particular chain: + - `plugin keys eth chain --address "0xEXAMPLE" --evmChainID 99 --enable` + - Disable a key for a particular chain: + - `plugin keys eth chain --address "0xEXAMPLE" --evmChainID 99 --disable` + - Abandon all currently pending transactions (use with caution!): + - `plugin evm keys chain --address "0xEXAMPLE" --evmChainID 99 --abandon` + - Commands can be combined e.g. + - Reset nonce and abandon all currently pending transaction: + - `plugin evm keys chain --address "0xEXAMPLE" --evmChainID 99 --setNextNonce 42 --abandon` + +### Changed + +- The `setnextnonce` local client command has been removed, and replaced by a more general key/chain client command. +- `plugin admin users update` command is replaced with `plugin admin users chrole` (only the role can be changed for a user) + +## 1.7.1 - 2022-08-22 + +### Added + +- `Arbitrum Nitro` client error support + +## 1.7.0 - 2022-08-08 + +### Added + +- `p2pv2Bootstrappers` has been added as a new optional property of OCR1 job specs; default may still be specified with P2PV2_BOOTSTRAPPERS config param +- Added official support for Sepolia chain +- Added `hexdecode` and `base64decode` tasks (pipeline). +- Added support for Besu execution client (note that while Plugin supports Besu, Besu itself [has](https://github.com/hyperledger/besu/issues/4212) [multiple](https://github.com/hyperledger/besu/issues/4192) [bugs](https://github.com/hyperledger/besu/issues/4114) that make it unreliable). +- Added the functionality to allow the root admin CLI user (and any additional admin users created) to create and assign tiers of role based access to new users. These new API users will be able to log in to the Operator UI independently, and can each have specific roles tied to their account. There are four roles: `admin`, `edit`, `run`, and `view`. + - User management can be configured through the use of the new admin CLI command `plugin admin users`. Be sure to run `plugin adamin login`. For example, a readonly user can be created with: `plugin admin users create --email=operator-ui-read-only@test.com --role=view`. + - Updated documentation repo with a break down of actions to required role level +- Added per job spec and per job type gas limit control. The following rule of precedence is applied: + +1. task-specific parameter `gasLimit` overrides anything else when specified (e.g. `ethtx` task has such a parameter). +2. job-spec attribute `gasLimit` has the scope of the current job spec only. +3. job-type limits `ETH_GAS_LIMIT_*_JOB_TYPE` affect any jobs of the corresponding type: + +``` +ETH_GAS_LIMIT_OCR_JOB_TYPE # EVM.GasEstimator.LimitOCRJobType +ETH_GAS_LIMIT_DR_JOB_TYPE # EVM.GasEstimator.LimitDRJobType +ETH_GAS_LIMIT_VRF_JOB_TYPE # EVM.GasEstimator.LimitVRFJobType +ETH_GAS_LIMIT_FM_JOB_TYPE # EVM.GasEstimator.LimitFMJobType +ETH_GAS_LIMIT_KEEPER_JOB_TYPE # EVM.GasEstimator.LimitKeeperJobType +``` + +4. global `ETH_GAS_LIMIT_DEFAULT` (`EVM.GasEstimator.LimitDefault`) value is the last resort. + +### Fixed + +- Addressed a very rare bug where using multiple nodes with differently configured RPC tx fee caps could cause missed transaction. Reminder to everyone to ensure that your RPC nodes have no caps (for more information see the [performance and tuning guide](https://docs.chain.link/docs/evm-performance-configuration/)). +- Improved handling of unknown transaction error types, making Plugin more robust in certain cases on unsupported chains/RPC clients + +## [1.6.0] - 2022-07-20 + +### Changed + +- After feedback from users, password complexity requirements have been simplified. These are the new, simplified requirements for any kind of password used with Plugin: + +1. Must be 16 characters or more +2. Must not contain leading or trailing whitespace +3. User passwords must not contain the user's API email + +- Simplified the Keepers job spec by removing the observation source from the required parameters. + +## [1.5.1] - 2022-06-27 + +### Fixed + +- Fix rare out-of-sync to invalid-chain-id transaction +- Fix key-specific max gas limits for gas estimator and ensure we do not bump gas beyond key-specific limits +- Fix EVM_FINALITY_DEPTH => ETH_FINALITY_DEPTH + +## [1.5.0] - 2022-06-21 + +### Changed + +- Plugin will now log a warning if the postgres database password is missing or too insecure. Passwords should conform to the following rules: + +``` +Must be longer than 12 characters +Must comprise at least 3 of: + lowercase characters + uppercase characters + numbers + symbols +Must not comprise: + More than three identical consecutive characters + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) +``` + +For backward compatibility all insecure passwords will continue to work, however in a future version of Plugin insecure passwords will prevent application boot. To bypass this check at your own risk, you may set `SKIP_DATABASE_PASSWORD_COMPLEXITY_CHECK=true`. + +- `MIN_OUTGOING_CONFIRMATIONS` has been removed and no longer has any effect. `ETH_FINALITY_DEPTH` is now used as the default for `ethtx` confirmations instead. You may override this on a per-task basis by setting `minConfirmations` in the task definition e.g. `foo [type=ethtx minConfirmations=42 ...]`. NOTE: This may have a minor impact on performance on very high throughput chains. If you don't care about reporting task status in the UI, it is recommended to set `minConfirmations=0` in your job specs. For more details, see the [relevant section of the performance tuning guide](https://www.notion.so/plugin/EVM-performance-configuration-handbook-a36b9f84dcac4569ba68772aa0c1368c#e9998c2f722540b597301a640f53cfd4). + +- The following ENV variables have been deprecated, and will be removed in a future release: `INSECURE_SKIP_VERIFY`, `CLIENT_NODE_URL`, `ADMIN_CREDENTIALS_FILE`. These vars only applied to Plugin when running in client mode and have been replaced by command line args, notably: `--insecure-skip-verify`, `--remote-node-url URL` and `--admin-credentials-file FILE` respectively. More information can be found by running `./plugin --help`. + +- The `Optimism2` `GAS_ESTIMATOR_MODE` has been renamed to `L2Suggested`. The old name is still supported for now. + +- The `p2pBootstrapPeers` property on OCR2 job specs has been renamed to `p2pv2Bootstrappers`. + +### Added + +- Added `ETH_USE_FORWARDERS` config option to enable transactions forwarding contracts. +- In job pipeline (direct request) the three new block variables are exposed: + - `$(jobRun.blockReceiptsRoot)` : the root of the receipts trie of the block (hash) + - `$(jobRun.blockTransactionsRoot)` : the root of the transaction trie of the block (hash) + - `$(jobRun.blockStateRoot)` : the root of the final state trie of the block (hash) +- `ethtx` tasks can now be configured to error if the transaction reverts on-chain. You must set `failOnRevert=true` on the task to enable this behavior, like so: + +`foo [type=ethtx failOnRevert=true ...]` + +So the `ethtx` task now works as follows: + +If minConfirmations == 0, task always succeeds and nil is passed as output +If minConfirmations > 0, the receipt is passed through as output +If minConfirmations > 0 and failOnRevert=true then the ethtx task will error on revert + +If `minConfirmations` is not set on the task, the chain default will be used which is usually 12 and always greater than 0. + +- `http` task now allows specification of request headers. Use like so: `foo [type=http headers="[\\"X-Header-1\\", \\"value1\\", \\"X-Header-2\\", \\"value2\\"]"]`. + +### Fixed + +- Fixed `max_unconfirmed_age` metric. Previously this would incorrectly report the max time since the last rebroadcast, capping the upper limit to the EthResender interval. This now reports the correct value of total time elapsed since the _first_ broadcast. +- Correctly handle the case where bumped gas would exceed the RPC node's configured maximum on Fantom (note that node operators should check their Fantom RPC node configuration and remove the fee cap if there is one) +- Fixed handling of Metis internal fee change + +### Removed + +- The `Optimism` OVM 1.0 `GAS_ESTIMATOR_MODE` has been removed. + +## [1.4.1] - 2022-05-11 + +### Fixed + +- Ensure failed EthSubscribe didn't register a (\*rpc.ClientSubscription)(nil) which would lead to a panic on Unsubscribe +- Fixes parsing of float values on job specs + +## [1.4.0] - 2022-05-02 + +### Added + +- JSON parse tasks (v2) now support a custom `separator` parameter to substitute for the default `,`. +- Log slow SQL queries +- Fantom and avalanche block explorer urls +- Display `requestTimeout` in job UI +- Keeper upkeep order is shuffled + +### Fixed + +- `LOG_FILE_MAX_SIZE` handling +- Improved websocket subscription management (fixes issues with multiple-primary-node failover from 1.3.x) +- VRFv2 fixes and enhancements +- UI support for `minContractPaymentLinkJuels` + +## [1.3.0] - 2022-04-18 + +### Added + +- Added support for Keeper registry v1.2 in keeper jobs +- Added disk rotating logs. Plugin will now always log to disk at debug level. The default output directory for debug logs is Plugin's root directory (ROOT_DIR) but can be configured by setting LOG_FILE_DIR. This makes it easier for node operators to report useful debugging information to Plugin's team, since all the debug logs are conveniently located in one directory. Regular logging to STDOUT still works as before and respects the LOG_LEVEL env var. If you want to log in disk at a particular level, you can pipe STDOUT to disk. This automatic debug-logs-to-disk feature is enabled by default, and will remain enabled as long as the `LOG_FILE_MAX_SIZE` ENV var is set to a value greater than zero. The amount of disk space required for this feature to work can be calculated with the following formula: `LOG_FILE_MAX_SIZE` \* (`LOG_FILE_MAX_BACKUPS` + 1). If your disk doesn't have enough disk space, the logging will pause and the application will log Errors until space is available again. New environment variables related to this feature: + - `LOG_FILE_MAX_SIZE` (default: 5120mb) - this env var allows you to override the log file's max size (in megabytes) before file rotation. + - `LOG_FILE_MAX_AGE` (default: 0) - if `LOG_FILE_MAX_SIZE` is set, this env var allows you to override the log file's max age (in days) before file rotation. Keeping this config with the default value means not to remove old log files. + - `LOG_FILE_MAX_BACKUPS` (default: 1) - if `LOG_FILE_MAX_SIZE` is set, this env var allows you to override the max amount of old log files to retain. Keeping this config with the default value means to retain 1 old log file at most (though `LOG_FILE_MAX_AGE` may still cause them to get deleted). If this is set to 0, the node will retain all old log files instead. +- Added support for the `force` flag on `plugin blocks replay`. If set to true, already consumed logs that would otherwise be skipped will be rebroadcasted. +- Added version compatibility check when using CLI to login to a remote node. flag `bypass-version-check` skips this check. +- Interrim solution to set multiple nodes/chains from ENV. This gives the ability to specify multiple RPCs that the Plugin node will constantly monitor for health and sync status, detecting dead nodes and out of sync nodes, with automatic failover. This is a temporary stand-in until configuration is overhauled and will be removed in future in favor of a config file. Set as such: `EVM_NODES='{...}'` where the var is a JSON array containing the node specifications. This is not compatible with using any other way to specify node via env (e.g. `ETH_URL`, `ETH_SECONDARY_URL`, `ETH_CHAIN_ID` etc). **WARNING**: Setting this environment variable will COMPLETELY ERASE your `evm_nodes` table on every boot and repopulate from the given data, nullifying any runtime modifications. Make sure to carefully read the [EVM performance configuration guide](https://plugin.notion.site/EVM-performance-configuration-handbook-a36b9f84dcac4569ba68772aa0c1368c) for best practices here. + +For example: + +```bash +export EVM_NODES=' +[ + { + "name": "primary_1", + "evmChainId": "137", + "wsUrl": "wss://endpoint-1.example.com/ws", + "httpUrl": "http://endpoint-1.example.com/", + "sendOnly": false + }, + { + "name": "primary_2", + "evmChainId": "137", + "wsUrl": "ws://endpoint-2.example.com/ws", + "httpUrl": "http://endpoint-2.example.com/", + "sendOnly": false + }, + { + "name": "primary_3", + "evmChainId": "137", + "wsUrl": "wss://endpoint-3.example.com/ws", + "httpUrl": "http://endpoint-3.example.com/", + "sendOnly": false + }, + { + "name": "sendonly_1", + "evmChainId": "137", + "httpUrl": "http://endpoint-4.example.com/", + "sendOnly": true + }, + { + "name": "sendonly_2", + "evmChainId": "137", + "httpUrl": "http://endpoint-5.example.com/", + "sendOnly": true + } +] +' +``` + +### Changed + +- Changed default locking mode to "dual". Bugs in lease locking have been ironed out and this paves the way to making "lease" the default in the future. It is recommended to set `DATABASE_LOCKING_MODE=lease`, default is set to "dual" only for backwards compatibility. +- EIP-1559 is now enabled by default on mainnet. To disable (go back to legacy mode) set `EVM_EIP1559_DYNAMIC_FEES=false`. The default settings should work well, but if you wish to tune your gas controls, see the [documentation](https://docs.chain.link/docs/configuration-variables/#evm-gas-controls). + +Note that EIP-1559 can be manually enabled on other chains by setting `EVM_EIP1559_DYNAMIC_FEES=true` but we only support it for official Ethereum mainnet and testnets. It is _not_ recommended enabling this setting on Polygon since during our testing process we found that the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are actually less likely to get included than legacy transactions. + +See issue: https://github.com/maticnetwork/bor/issues/347 + +- The pipeline task runs have changed persistence protocol (database), which will result in inability to decode some existing task runs. All new runs should be working with no issues. + +### Removed + +- `LOG_TO_DISK` ENV var. + +## [1.2.1] - 2022-03-17 + +This release hotfixes issues from moving a new CI/CD system. Feature-wise the functionality is the same as `v1.2.0`. + +### Fixed + +- Fixed CI/CD issue where environment variables were not being passed into the underlying build + +## [1.2.0] - 2022-03-02 + +### Added + +- Added support for the Nethermind Ethereum client. +- Added support for batch sending telemetry to the ingress server to improve performance. +- Added v2 P2P networking support (alpha) + +New ENV vars: + +- `ADVISORY_LOCK_CHECK_INTERVAL` (default: 1s) - when advisory locking mode is enabled, this controls how often Plugin checks to make sure it still holds the advisory lock. It is recommended to leave this at the default. +- `ADVISORY_LOCK_ID` (default: 1027321974924625846) - when advisory locking mode is enabled, the application advisory lock ID can be changed using this env var. All instances of Plugin that might run on a particular database must share the same advisory lock ID. It is recommended to leave this at the default. +- `LOG_FILE_DIR` (default: plugin root directory) - if `LOG_FILE_MAX_SIZE` is set, this env var allows you to override the output directory for logging. +- `SHUTDOWN_GRACE_PERIOD` (default: 5s) - when node is shutting down gracefully and exceeded this grace period, it terminates immediately (trying to close DB connection) to avoid being SIGKILLed. +- `SOLANA_ENABLED` (default: false) - set to true to enable Solana support +- `TERRA_ENABLED` (default: false) - set to true to enable Terra support +- `BLOCK_HISTORY_ESTIMATOR_EIP1559_FEE_CAP_BUFFER_BLOCKS` - if EIP1559 mode is enabled, this optional env var controls the buffer blocks to add to the current base fee when sending a transaction. By default, the gas bumping threshold + 1 block is used. It is not recommended to change this unless you know what you are doing. +- `TELEMETRY_INGRESS_BUFFER_SIZE` (default: 100) - the number of telemetry messages to buffer before dropping new ones +- `TELEMETRY_INGRESS_MAX_BATCH_SIZE` (default: 50) - the maximum number of messages to batch into one telemetry request +- `TELEMETRY_INGRESS_SEND_INTERVAL` (default: 500ms) - the cadence on which batched telemetry is sent to the ingress server +- `TELEMETRY_INGRESS_SEND_TIMEOUT` (default: 10s) - the max duration to wait for the request to complete when sending batch telemetry +- `TELEMETRY_INGRESS_USE_BATCH_SEND` (default: true) - toggles sending telemetry using the batch client to the ingress server +- `NODE_NO_NEW_HEADS_THRESHOLD` (default: 3m) - RPC node will be marked out-of-sync if it does not receive a new block for this length of time. Set to 0 to disable head monitoring for liveness checking, +- `NODE_POLL_FAILURE_THRESHOLD` (default: 5) - number of consecutive failed polls before an RPC node is marked dead. Set to 0 to disable poll liveness checking. +- `NODE_POLL_INTERVAL` (default: 10s) - how often to poll. Set to 0 to disable all polling. + +#### Bootstrap job + +Added a new `bootstrap` job type. This job removes the need for every job to implement their own bootstrapping logic. +OCR2 jobs with `isBootstrapPeer=true` are automatically migrated to the new format. +The spec parameters are similar to a basic OCR2 job, an example would be: + +``` +type = "bootstrap" +name = "bootstrap" +relay = "evm" +schemaVersion = 1 +contractID = "0xAb5801a7D398351b8bE11C439e05C5B3259aeC9B" +[relayConfig] +chainID = 4 +``` + +#### EVM node hot failover and liveness checking + +Plugin now supports hot failover and liveness checking for EVM nodes. This completely supercedes and replaces the Fiews failover proxy and should remove the need for any kind of failover proxy between Plugin and its RPC nodes. + +In order to use this feature, you'll need to set multiple primary RPC nodes. + +### Removed + +- `deleteuser` CLI command. + +### Changed + +`EVM_DISABLED` has been deprecated and replaced by `EVM_ENABLED` for consistency with other feature flags. +`ETH_DISABLED` has been deprecated and replaced by `EVM_RPC_ENABLED` for consistency, and because this was confusingly named. In most cases you want to set `EVM_ENABLED=false` and not `EVM_RPC_ENABLED=false`. + +Log colorization is now disabled by default because it causes issues when piped to text files. To re-enable log colorization, set `LOG_COLOR=true`. + +#### Polygon/matic defaults changed + +Due to increasingly hostile network conditions on Polygon we have had to increase a number of default limits. This is to work around numerous and very deep re-orgs, high mempool pressure and a failure by the network to propagate transactions properly. These new limits are likely to increase load on both your Plugin node and database, so please be sure to monitor CPU and memory usage on both and make sure they are adequately specced to handle the additional load. + +## [1.1.1] - 2022-02-14 + +### Added + +- `BLOCK_HISTORY_ESTIMATOR_EIP1559_FEE_CAP_BUFFER_BLOCKS` - if EIP1559 mode is enabled, this optional env var controls the buffer blocks to add to the current base fee when sending a transaction. By default, the gas bumping threshold + 1 block is used. It is not recommended to change this unless you know what you are doing. +- `EVM_GAS_FEE_CAP_DEFAULT` - if EIP1559 mode is enabled, and FixedPrice gas estimator is used, this env var controls the fixed initial fee cap. +- Allow dumping pprof even when not in dev mode, useful for debugging (go to /v2/debug/pprof as a logged in user) + +### Fixed + +- Update timeout so we don’t exit early on very large log broadcaster backfills + +#### EIP-1559 Fixes + +Fixed issues with EIP-1559 related to gas bumping. Due to [go-ethereum's implementation](https://github.com/ethereum/go-ethereum/blob/bff330335b94af3643ac2fb809793f77de3069d4/core/tx_list.go#L298) which introduces additional restrictions on top of the EIP-1559 spec, we must bump the FeeCap at least 10% each time in order for the gas bump to be accepted. + +The new EIP-1559 implementation works as follows: + +If you are using FixedPriceEstimator: + +- With gas bumping disabled, it will submit all transactions with `feecap=ETH_MAX_GAS_PRICE_WEI` and `tipcap=EVM_GAS_TIP_CAP_DEFAULT` +- With gas bumping enabled, it will submit all transactions initially with `feecap=EVM_GAS_FEE_CAP_DEFAULT` and `tipcap=EVM_GAS_TIP_CAP_DEFAULT`. + +If you are using BlockHistoryEstimator (default for most chains): + +- With gas bumping disabled, it will submit all transactions with `feecap=ETH_MAX_GAS_PRICE_WEI` and `tipcap=` +- With gas bumping enabled (default for most chains) it will submit all transactions initially with `feecap = ( current block base fee * (1.125 ^ N) + tipcap )` where N is configurable by setting BLOCK_HISTORY_ESTIMATOR_EIP1559_FEE_CAP_BUFFER_BLOCKS but defaults to `gas bump threshold+1` and `tipcap=` + +Bumping works as follows: + +- Increase tipcap by `max(tipcap * (1 + ETH_GAS_BUMP_PERCENT), tipcap + ETH_GAS_BUMP_WEI)` +- Increase feecap by `max(feecap * (1 + ETH_GAS_BUMP_PERCENT), feecap + ETH_GAS_BUMP_WEI)` + +## [1.1.0] - 2022-01-25 + +### Added + +- Added support for Sentry error reporting. Set `SENTRY_DSN` at run-time to enable reporting. +- Added Prometheus counters: `log_warn_count`, `log_error_count`, `log_critical_count`, `log_panic_count` and `log_fatal_count` representing the corresponding number of warning/error/critical/panic/fatal messages in the log. +- The new prometheus metric `tx_manager_tx_attempt_count` is a Prometheus Gauge that should represent the total number of Transactions attempts that awaiting confirmation for this node. +- The new prometheus metric `version` that displays the node software version (tag) as well as the corresponding commit hash. +- CLI command `keys eth list` is updated to display key specific max gas prices. +- CLI command `keys eth create` now supports optional `maxGasPriceGWei` parameter. +- CLI command `keys eth update` is added to update key specific parameters like `maxGasPriceGWei`. +- Add partial support for Moonriver chain +- For OCR jobs, `databaseTimeout`, `observationGracePeriod` and `contractTransmitterTransmitTimeout` can be specified to override chain-specific default values. + +Two new log levels have been added. + +- `[crit]`: _Critical_ level logs are more severe than `[error]` and require quick action from the node operator. +- `[debug] [trace]`: _Trace_ level logs contain extra `[debug]` information for development, and must be compiled in via `-tags trace`. + +#### [Beta] Multichain support added + +As a beta feature, Plugin now supports connecting to multiple different EVM chains simultaneously. + +This means that one node can run jobs on Goerli, Kovan, BSC and Mainnet (for example). Note that you can still have as many eth keys as you like, but each eth key is pegged to one chain only. + +Extensive efforts have been made to make migration for existing nops as seamless as possible. Generally speaking, you should not have to make any changes when upgrading your existing node to this version. All your jobs will continue to run as before. + +The overall summary of changes is such: + +##### Chains/Ethereum Nodes + +EVM chains are now represented as a first class object within the plugin node. You can create/delete/list them using the CLI or API. + +At least one primary node is required in order for a chain to connect. You may additionally specify zero or more send-only nodes for a chain. It is recommended to use the CLI/API or GUI to add nodes to chain. + +###### Creation + +```bash +plugin chains evm create -id 42 # creates an evm chain with chain ID 42 (see: https://chainlist.org/) +plugin nodes create -chain-id 42 -name 'my-primary-kovan-full-node' -type primary -ws-url ws://node.example/ws -http-url http://node.example/rpc # http-url is optional but recommended for primaries +plugin nodes create -chain-id 42 -name 'my-send-only-backup-kovan-node' -type sendonly -http-url http://some-public-node.example/rpc +``` + +###### Listing + +```bash +plugin chains evm list +plugin nodes list +``` + +###### Deletion + +```bash +plugin nodes delete 'my-send-only-backup-kovan-node' +plugin chains evm delete 42 +``` + +###### Legacy eth ENV vars + +The old way of specifying chains using environment variables is still supported but discouraged. It works as follows: + +If you specify `ETH_URL` then the values of `ETH_URL`, `ETH_CHAIN_ID`, `ETH_HTTP_URL` and `ETH_SECONDARY_URLS` will be used to create/update chains and nodes representing these values in the database. If an existing chain/node is found it will be overwritten. This behavior is used mainly to ease the process of upgrading, and on subsequent runs (once your old settings have been written to the database) it is recommended to unset these ENV vars and use the API commands exclusively to administer chains and nodes. + +##### Jobs/tasks + +By default, all jobs/tasks will continue to use the default chain (specified by `ETH_CHAIN_ID`). However, the following jobs now allow an additional `evmChainID` key in their TOML: + +- VRF +- DirectRequest +- Keeper +- OCR +- Fluxmonitor + +You can pin individual jobs to a particular chain by specifying the `evmChainID` explicitly. Here is an example job to demonstrate: + +```toml +type = "keeper" +evmChainID = 3 +schemaVersion = 1 +name = "example keeper spec" +contractAddress = "0x9E40733cC9df84636505f4e6Db28DCa0dC5D1bba" +externalJobID = "0EEC7E1D-D0D2-476C-A1A8-72DFB6633F49" +fromAddress = "0xa8037A20989AFcBC51798de9762b351D63ff462e" +``` + +The above keeper job will _always_ run on chain ID 3 (Ropsten) regardless of the `ETH_CHAIN_ID` setting. If no chain matching this ID has been added to the plugin node, the job cannot be created (you must create the chain first). + +In addition, you can also specify `evmChainID` on certain pipeline tasks. This allows for cross-chain requests, for example: + +```toml +type = "directrequest" +schemaVersion = 1 +evmChainID = 42 +name = "example cross chain spec" +contractAddress = "0x613a38AC1659769640aaE063C651F48E0250454C" +externalJobID = "0EEC7E1D-D0D2-476C-A1A8-72DFB6633F90" +observationSource = """ + decode_log [type=ethabidecodelog ... ] + ... + submit [type=ethtx to="0x613a38AC1659769640aaE063C651F48E0250454C" data="$(encode_tx)" minConfirmations="2" evmChainID="3"] + decode_log-> ... ->submit; +""" +``` + +In the example above (which excludes irrelevant pipeline steps for brevity) a log can be read from the chain with ID 42 (Kovan) and a transaction emitted on chain with ID 3 (Ropsten). + +Tasks that support the `evmChainID` parameter are as follows: + +- `ethcall` +- `estimategaslimit` +- `ethtx` + +###### Defaults + +If the job- or task-specific `evmChainID` is _not_ given, the job/task will simply use the default as specified by the `ETH_CHAIN_ID` env variable. + +Generally speaking, the default config values for each chain are good enough. But in some cases it is necessary to be able to override the defaults on a per-chain basis. + +This used to be done via environment variables e.g. `MINIMUM_CONTRACT_PAYMENT_PLI_JUELS`. + +These still work, but if set they will override that value for _all_ chains. This may not always be what you want. Consider a node that runs both Matic and Mainnet. You may want to set a higher value for `MINIMUM_CONTRACT_PAYMENT` on Mainnet, due to the more expensive gas costs. However, setting `MINIMUM_CONTRACT_PAYMENT_PLI_JUELS` using env variables will set that value for _all_ chains including matic. + +To help you work around this, Plugin now supports setting per-chain configuration options. + +**Examples** + +To set initial configuration when creating a chain, pass in the full json string as an optional parameter at the end: + +`plugin evm chains create -id 42 '{"BlockHistoryEstimatorBlockDelay": "100"}'` + +To set configuration on an existing chain, specify key values pairs as such: + +`plugin evm chains configure -id 42 BlockHistoryEstimatorBlockDelay=100 GasEstimatorMode=FixedPrice` + +The full list of chain-specific configuration options can be found by looking at the `ChainCfg` struct in `core/chains/evm/types/types.go`. + +#### Async support in external adapters + +External Adapters making async callbacks can now error job runs. This required a slight change to format, the correct way to callback from an asynchronous EA is using the following JSON: + +SUCCESS CASE: + +```json +{ + "value": < any valid json object > +} +``` + +ERROR CASE: + +```json +{ + "error": "some error string" +} +``` + +This only applies to EAs using the `X-Plugin-Pending` header to signal that the result will be POSTed back to the Plugin node sometime 'later'. Regular synchronous calls to EAs work just as they always have done. + +(NOTE: Official documentation for EAs needs to be updated) + +#### New optional VRF v2 field: `requestedConfsDelay` + +Added a new optional field for VRF v2 jobs called `requestedConfsDelay`, which configures a +number of blocks to wait in addition to the request specified `requestConfirmations` before servicing +the randomness request, i.e. the Plugin node will wait `max(nodeMinConfs, requestConfirmations + requestedConfsDelay)` +blocks before servicing the request. + +It can be used in the following way: + +```toml +type = "vrf" +externalJobID = "123e4567-e89b-12d3-a456-426655440001" +schemaVersion = 1 +name = "vrf-v2-secondary" +coordinatorAddress = "0xABA5eDc1a551E55b1A570c0e1f1055e5BE11eca7" +requestedConfsDelay = 10 +# ... rest of job spec ... +``` + +Use of this field requires a database migration. + +#### New locking mode: 'lease' + +Plugin now supports a new environment variable `DATABASE_LOCKING_MODE`. It can be set to one of the following values: + +- `dual` (the default - uses both locking types for backwards and forwards compatibility) +- `advisorylock` (advisory lock only) +- `lease` (lease lock only) +- `none` (no locking at all - useful for advanced deployment environments when you can be sure that only one instance of plugin will ever be running) + +The database lock ensures that only one instance of Plugin can be run on the database at a time. Running multiple instances of Plugin on a single database at the same time would likely to lead to strange errors and possibly even data integrity failures and should not be allowed. + +Ideally, node operators would be using a container orchestration system (e.g. Kubernetes) that ensures that only one instance of Plugin ever runs on a particular postgres database. + +However, we are aware that many node operators do not have the technical capacity to do this. So a common use case is to run multiple Plugin instances in failover mode (as recommended by our official documentation, although this will be changing in future). The first instance will take some kind of lock on the database and subsequent instances will wait trying to take this lock in case the first instance disappears or dies. + +Traditionally Plugin has used an advisory lock to manage this. However, advisory locks come with several problems, notably: + +- Postgres does not really like it when you hold locks open for a very long time (hours/days). It hampers certain internal cleanup tasks and is explicitly discouraged by the postgres maintainers. +- The advisory lock can silently disappear on postgres upgrade, meaning that a new instance can take over even while the old one is still running. +- Advisory locks do not play nicely with pooling tools such as pgbouncer. +- If the application crashes, the advisory lock can be left hanging around for a while (sometimes hours) and can require manual intervention to remove it before another instance of Plugin will allow itself to boot. + +For this reason, we have introduced a new locking mode, `lease`, which is likely to become the default in the future. `lease`-mode works as follows: + +- Have one row in a database which is updated periodically with the client ID. +- CL node A will run a background process on start that updates this e.g. once per second. +- CL node B will spinlock, checking periodically to see if the update got too old. If it goes more than a set period without updating, it assumes that node A is dead and takes over. Now CL node B is the owner of the row, and it updates this every second. +- If CL node A comes back somehow, it will go to take out a lease and realise that the database has been leased to another process, so it will exit the entire application immediately. + +The default is set to `dual` which used both advisory locking AND lease locking, for backwards compatibility. However, it is recommended that node operators who know what they are doing, or explicitly want to stop using the advisory locking mode set `DATABASE_LOCKING_MODE=lease` in their env. + +Lease locking can be configured using the following ENV vars: + +`LEASE_LOCK_REFRESH_INTERVAL` (default 1s) +`LEASE_LOCK_DURATION` (default 30s) + +It is recommended to leave these set to the default values. + +#### Duplicate Job Configuration + +When duplicating a job, the new job's configuration settings that have not been overridden by the user can still reflect the plugin node configuration. + +#### Nurse (automatic pprof profiler) + +Added new automatic pprof profiling service. Profiling is triggered when the node exceeds certain resource thresholds (currently, memory and goroutine count). The following environment variables have been added to allow configuring this service: + +- `AUTO_PPROF_ENABLED`: Set to `true` to enable the automatic profiling service. Defaults to `false`. +- `AUTO_PPROF_PROFILE_ROOT`: The location on disk where pprof profiles will be stored. Defaults to `$PLUGIN_ROOT`. +- `AUTO_PPROF_POLL_INTERVAL`: The interval at which the node's resources are checked. Defaults to `10s`. +- `AUTO_PPROF_GATHER_DURATION`: The duration for which profiles are gathered when profiling is kicked off. Defaults to `10s`. +- `AUTO_PPROF_GATHER_TRACE_DURATION`: The duration for which traces are gathered when profiling is kicked off. This is separately configurable because traces are significantly larger than other types of profiles. Defaults to `5s`. +- `AUTO_PPROF_MAX_PROFILE_SIZE`: The maximum amount of disk space that profiles may consume before profiling is disabled. Defaults to `100mb`. +- `AUTO_PPROF_CPU_PROFILE_RATE`: See https://pkg.go.dev/runtime#SetCPUProfileRate. Defaults to `1`. +- `AUTO_PPROF_MEM_PROFILE_RATE`: See https://pkg.go.dev/runtime#pkg-variables. Defaults to `1`. +- `AUTO_PPROF_BLOCK_PROFILE_RATE`: See https://pkg.go.dev/runtime#SetBlockProfileRate. Defaults to `1`. +- `AUTO_PPROF_MUTEX_PROFILE_FRACTION`: See https://pkg.go.dev/runtime#SetMutexProfileFraction. Defaults to `1`. +- `AUTO_PPROF_MEM_THRESHOLD`: The maximum amount of memory the node can actively consume before profiling begins. Defaults to `4gb`. +- `AUTO_PPROF_GOROUTINE_THRESHOLD`: The maximum number of actively-running goroutines the node can spawn before profiling begins. Defaults to `5000`. + +**Adventurous node operators are encouraged to read [this guide on how to analyze pprof profiles](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/).** + +#### `merge` task type + +A new task type has been added, called `merge`. It can be used to merge two maps/JSON values together. Merge direction is from right to left such that `right` will clobber values of `left`. If no `left` is provided, it uses the input of the previous task. Example usage as such: + +``` +decode_log [type=ethabidecodelog ...] +merge [type=merge right=<{"foo": 42}>]; + +decode_log -> merge; +``` + +Or, to reverse merge direction: + +``` +decode_log [type=ethabidecodelog ...] +merge [type=merge left=<{"foo": 42}> right="$(decode_log)"]; + +decode_log -> merge; +``` + +#### Enhanced ABI encoding support + +The `ethabiencode2` task supports ABI encoding using the abi specification generated by `solc`. e.g: + + { + "name": "call", + "inputs": [ + { + "name": "value", + "type": "tuple", + "components": [ + { + "name": "first", + "type": "bytes32" + }, + { + "name": "last", + "type": "bool" + } + ] + } + ] + } + +This would allow for calling of a function `call` with a tuple containing two values, the first a `bytes32` and the second a `bool`. You can supply a named map or an array. + +#### Transaction Simulation (Gas Savings) + +Plugin now supports transaction simulation for certain types of job. When this is enabled, transactions will be simulated using `eth_call` before initial send. If the transaction reverted, the tx is marked as errored without being broadcast, potentially avoiding an expensive on-chain revert. + +This can add a tiny bit of latency (upper bound 2s, generally much shorter under good conditions) and will add marginally more load to the eth client, since it adds an extra call for every transaction sent. However, it may help to save gas in some cases especially during periods of high demand by avoiding unnecessary reverts (due to outdated round etc.). + +This option is EXPERIMENTAL and disabled by default. + +To enable for FM or OCR: + +`FM_SIMULATE_TRANSACTIONs=true` +`OCR_SIMULATE_TRANSACTIONS=true` + +To enable in the pipeline, use the `simulate=true` option like so: + +``` +submit [type=ethtx to="0xDeadDeadDeadDeadDeadDeadDeadDead" data="0xDead" simulate=true] +``` + +Use at your own risk. + +#### Misc + +Plugin now supports more than one primary eth node per chain. Requests are round-robined between available primaries. + +Add CRUD functionality for EVM Chains and Nodes through Operator UI. + +Non-fatal errors to a pipeline run are preserved including any run that succeeds but has more than one fatal error. + +Plugin now supports configuring max gas price on a per-key basis (allows implementation of keeper "lanes"). + +The Operator UI now supports login MFA with hardware security keys. `MFA_RPID` and `MFA_RPORIGIN` environment variables have been added to the config and are required if using the new MFA feature. + +Keys and Configuration navigation links have been moved into a settings dropdown to make space for multichain navigation links. + +#### Full EIP1559 Support (Gas Savings) + +Plugin now includes experimental support for submitting transactions using type 0x2 (EIP-1559) envelope. + +EIP-1559 mode is off by default but can be enabled on a per-chain basis or globally. + +This may help to save gas on spikes: Plugin ought to react faster on the upleg and avoid overpaying on the downleg. It may also be possible to set `BLOCK_HISTORY_ESTIMATOR_BATCH_SIZE` to a smaller value e.g. 12 or even 6 because tip cap ought to be a more consistent indicator of inclusion time than total gas price. This would make Plugin more responsive and ought to reduce response time variance. Some experimentation will be needed here to find optimum settings. + +To enable globally, set `EVM_EIP1559_DYNAMIC_FEES=true`. Set with caution, if you set this on a chain that does not actually support EIP-1559 your node will be broken. + +In EIP-1559 mode, the total price for the transaction is the minimum of base fee + tip cap and fee cap. More information can be found on the [official EIP](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). + +Plugin's implementation of this is to set a large fee cap and modify the tip cap to control confirmation speed of transactions. So, when in EIP1559 mode, the tip cap takes the place of gas price roughly speaking, with the varying base price remaining a constant (we always pay it). + +A quick note on terminology - Plugin uses the same terms used internally by go-ethereum source code to describe various prices. This is not the same as the externally used terms. For reference: + +Base Fee Per Gas = BaseFeePerGas +Max Fee Per Gas = FeeCap +Max Priority Fee Per Gas = TipCap + +In EIP-1559 mode, the following changes occur to how configuration works: + +- All new transactions will be sent as type 0x2 transactions specifying a TipCap and FeeCap (NOTE: existing pending legacy transactions will continue to be gas bumped in legacy mode) +- BlockHistoryEstimator will apply its calculations (gas percentile etc.) to the TipCap and this value will be used for new transactions (GasPrice will be ignored) +- FixedPriceEstimator will use `EVM_GAS_TIP_CAP_DEFAULT` instead of `ETH_GAS_PRICE_DEFAULT` +- `ETH_GAS_PRICE_DEFAULT` is ignored for new transactions and `EVM_GAS_TIP_CAP_DEFAULT` is used instead (default 20GWei) +- `ETH_MIN_GAS_PRICE_WEI` is ignored for new transactions and `EVM_GAS_TIP_CAP_MINIMUM` is used instead (default 0) +- `ETH_MAX_GAS_PRICE_WEI` controls the FeeCap +- `KEEPER_GAS_PRICE_BUFFER_PERCENT` is ignored in EIP-1559 mode and `KEEPER_TIP_CAP_BUFFER_PERCENT` is used instead + +The default tip cap is configurable per-chain but can be specified for all chains using `EVM_GAS_TIP_CAP_DEFAULT`. The fee cap is derived from `ETH_MAX_GAS_PRICE_WEI`. + +When using the FixedPriceEstimator, the default gas tip will be used for all transactions. + +When using the BlockHistoryEstimator, Plugin will calculate the tip cap based on transactions already included (in the same way it calculates gas price in legacy mode). + +Enabling EIP1559 mode might lead to marginally faster transaction inclusion and make the node more responsive to sharp rises/falls in gas price, keeping response times more consistent. + +In addition, `ethcall` tasks now accept `gasTipCap` and `gasFeeCap` parameters in addition to `gasPrice`. This is required for Keeper jobs, i.e.: + +``` +check_upkeep_tx [type=ethcall + failEarly=true + extractRevertReason=true + contract="$(jobSpec.contractAddress)" + gas="$(jobSpec.checkUpkeepGasLimit)" + gasPrice="$(jobSpec.gasPrice)" + gasTipCap="$(jobSpec.gasTipCap)" + gasFeeCap="$(jobSpec.gasFeeCap)" + data="$(encode_check_upkeep_tx)"] +``` + +NOTE: AccessLists are part of the 0x2 transaction type spec and Plugin also implements support for these internally. This is not currently exposed in any way, if there is demand for this it ought to be straightforward enough to do so. + +Avalanche AP4 defaults have been added (you can remove manually set ENV vars controlling gas pricing). + +#### New env vars + +`CHAIN_TYPE` - Configure the type of chain (if not standard). `Arbitrum`, `ExChain`, `Optimism`, or `XDai`. Replaces `LAYER_2_TYPE`. NOTE: This is a global override, to set on a per-chain basis you must use the CLI/API or GUI to change the chain-specific config for that chain (`ChainType`). + +`BLOCK_EMISSION_IDLE_WARNING_THRESHOLD` - Controls global override for the time after which node will start logging warnings if no heads are received. + +`ETH_DEFAULT_BATCH_SIZE` - Controls the default number of items per batch when making batched RPC calls. It is unlikely that you will need to change this from the default value. + +NOTE: `ETH_URL` used to default to "ws://localhost:8546" and `ETH_CHAIN_ID` used to default to 1. These defaults have now been removed. The env vars are no longer required, since node configuration is now done via CLI/API/GUI and stored in the database. + +### Removed + +- `belt/` and `evm-test-helpers/` removed from the codebase. + +#### Deprecated env vars + +`LAYER_2_TYPE` - Use `CHAIN_TYPE` instead. + +#### Removed env vars + +`FEATURE_CRON_V2`, `FEATURE_FLUX_MONITOR_V2`, `FEATURE_WEBHOOK_V2` - all V2 job types are now enabled by default. + +### Fixed + +- Fixed a regression whereby the BlockHistoryEstimator would use a bumped value on old gas price even if the new current price was larger than the bumped value. +- Fixed a bug where creating lots of jobs very quickly in parallel would cause the node to hang +- Propagating `evmChainID` parameter in job specs supporting this parameter. + +Fixed `LOG_LEVEL` behavior in respect to the corresponding UI setting: Operator can override `LOG_LEVEL` until the node is restarted. + +### Changed + +- The default `GAS_ESTIMATOR_MODE` for Optimism chains has been changed to `Optimism2`. +- Default minimum payment on mainnet has been reduced from 1 PLI to 0.1 PLI. +- Logging timestamp output has been changed from unix to ISO8601 to aid in readability. To keep the old unix format, you may set `LOG_UNIX_TS=true` +- Added WebAuthn support for the Operator UI and corresponding support in the Go backend + +#### Log to Disk + +This feature has been disabled by default, turn on with LOG_TO_DISK. For most production uses this is not desirable. + +## [1.0.1] - 2021-11-23 + +### Added + +- Improved error reporting +- Panic and recovery improvements + +### Fixed + +- Resolved config conversion errors for ETH_FINALITY_DEPTH, ETH_HEAD_TRACKER_HISTORY, and ETH_GAS_LIMIT_MULTIPLIER +- Proper handling for "nonce too low" errors on Avalanche + +## [1.0.0] - 2021-10-19 + +### Added + +- `plugin node db status` will now display a table of applied and pending migrations. +- Add support for OKEx/ExChain. + +### Changed + +**Legacy job pipeline (JSON specs) are no longer supported** + +This version will refuse to migrate the database if job specs are still present. You must manually delete or migrate all V1 job specs before upgrading. + +For more information on migrating, see [the docs](https://docs.chain.link/plugin-nodes/). + +This release will DROP legacy job tables so please take a backup before upgrading. + +#### KeyStore changes + +- We no longer support "soft deleting", or archiving keys. From now on, keys can only be hard-deleted. +- Eth keys can no longer be imported directly to the database. If you with to import an eth key, you _must_ start the node first and import through the remote client. + +#### New env vars + +`LAYER_2_TYPE` - For layer 2 chains only. Configure the type of chain, either `Arbitrum` or `Optimism`. + +#### Misc + +- Head sampling can now be optionally disabled by setting `ETH_HEAD_TRACKER_SAMPLING_INTERVAL = "0s"` - this will result in every new head being delivered to running jobs, + regardless of the head frequency from the chain. +- When creating new FluxMonitor jobs, the validation logic now checks that only one of: drumbeat ticker or idle timer is enabled. +- Added a new Prometheus metric: `uptime_seconds` which measures the number of seconds the node has been running. It can be helpful in detecting potential crashes. + +### Fixed + +Fixed a regression whereby the BlockHistoryEstimator would use a bumped value on old gas price even if the new current price was larger than the bumped value. + +## [0.10.15] - 2021-10-14 + +**It is highly recommended upgrading to this version before upgrading to any newer versions to avoid any complications.** + +### Fixed + +- Prevent release from clobbering databases that have previously been upgraded + +## [0.10.14] - 2021-09-06 + +### Added + +- FMv2 spec now contains DrumbeatRandomDelay parameter that can be used to introduce variation between round of submits of different oracles, if drumbeat ticker is enabled. + +- OCR Hibernation + +#### Requesters/MinContractPaymentLinkJuels + +V2 direct request specs now support two additional keys: + +- "requesters" key which allows whitelisting requesters +- "minContractPaymentLinkJuels" key which allows to specify a job-specific minimum contract payment. + +For example: + +```toml +type = "directrequest" +schemaVersion = 1 +requesters = ["0xaaaa1F8ee20f5565510B84f9353F1E333E753B7a", "0xbbbb70F0e81C6F3430dfdC9fa02fB22BdD818C4e"] # optional +minContractPaymentLinkJuels = "100000000000000" # optional +name = "example eth request event spec with requesters" +contractAddress = "..." +externalJobID = "..." +observationSource = """ +... +""" +``` + +## [0.10.13] - 2021-08-25 + +### Fixed + +- Resolved exiting Hibernation bug on FMv2 + +## [0.10.12] - 2021-08-16 + +### Fixed + +- Resolved FMv2 stalling in Hibernation mode +- Resolved rare issue when the Gas Estimator fails on start +- Resolved the handling of nil values for gas price + +## [0.10.11] - 2021-08-09 + +A new configuration variable, `BLOCK_BACKFILL_SKIP`, can be optionally set to "true" in order to strongly limit the depth of the log backfill. +This is useful if the node has been offline for a longer time and after startup should not be concerned with older events from the chain. + +Three new configuration variables are added for the new telemetry ingress service support. `TELEMETRY_INGRESS_URL` sets the URL to connect to for telemetry ingress, `TELEMETRY_INGRESS_SERVER_PUB_KEY` sets the public key of the telemetry ingress server, and `TELEMETRY_INGRESS_LOGGING` toggles verbose logging of the raw telemetry messages being sent. + +- Fixes the logging configuration form not displaying the current values +- Updates the design of the configuration cards to be easier on the eyes +- View Coordinator Service Authentication keys in the Operator UI. This is hidden + behind a feature flag until usage is enabled. +- Adds support for the new telemetry ingress service. + +### Changed + +**The legacy job pipeline (JSON specs) has been officially deprecated and support for these jobs will be dropped in an upcoming release.** + +Any node operators still running jobs with JSON specs should migrate their jobs to TOML format instead. + +The format for V2 Webhook job specs has changed. They now allow specifying 0 or more external initiators. Example below: + +```toml +type = "webhook" +schemaVersion = 1 +externalInitiators = [ + { name = "foo-ei", spec = '{"foo": 42}' }, + { name = "bar-ei", spec = '{"bar": 42}' } +] +observationSource = """ +ds [type=http method=GET url="https://chain.link/ETH-USD"]; +ds_parse [type=jsonparse path="data,price"]; +ds_multiply [type=multiply times=100]; +ds -> ds_parse -> ds_multiply; +""" +``` + +These external initiators will be notified with the given spec after the job is created, and also at deletion time. + +Only the External Initiators listed in the toml spec may trigger a run for that job. Logged-in users can always trigger a run for any job. + +#### Migrating Jobs + +- OCR + All OCR jobs are already using v2 pipeline by default - no need to do anything here. + +- Flux Monitor v1 + We have created a tool to help you automigrate flux monitor specs in JSON format to the new TOML format. You can migrate a job like this: + +``` +plugin jobs migrate +``` + +This can be automated by using the API like so: + +``` +POST http://yournode.example/v2/migrate/ +``` + +- VRF v1 + Automigration is not supported for VRF jobs. They must be manually converted into v2 format. + +- Ethlog/Runlog/Cron/web + All other job types must also be manually converted into v2 format. + +#### Technical details + +Why are we doing this? + +To give some background, the legacy job pipeline has been around since before Plugin went to mainnet and is getting quite long in the tooth. The code is brittle and difficult to understand and maintain. For a while now we have been developing a v2 job pipeline in parallel which uses the TOML format. The new job pipeline is simpler, more performant and more powerful. Every job that can be represented in the legacy pipeline should be able to be represented in the v2 pipeline - if it can't be, that's a bug, so please let us know ASAP. + +The v2 pipeline has now been extensively tested in production and proved itself reliable. So, we made the decision to drop V1 support entirely in favour of focusing developer effort on new features like native multichain support, EIP1559-compatible fees, further gas saving measures and support for more blockchains. By dropping support for the old pipeline, we can deliver these features faster and better support our community. + +#### KeyStore changes + +- Key export files are changing format and will not be compatible between versions. Ex, a key exported in 0.10.12, will not be importable by a node running 1.0.0, and vice-versa. +- We no longer support "soft deleting", or archiving keys. From now on, keys can only be hard-deleted. +- Eth keys can no longer be imported directly to the database. If you with to import an eth key, you _must_ start the node first and import through the remote client. + +## [0.10.10] - 2021-07-19 + +### Changed + +This update will truncate `pipeline_runs`, `pipeline_task_runs`, `flux_monitor_round_stats_v2` DB tables as a part of the migration. + +#### Gas Estimation + +Gas estimation has been revamped and full support for Optimism has been added. + +The following env vars have been deprecated, and will be removed in a future release: + +``` +GAS_UPDATER_ENABLED +GAS_UPDATER_BATCH_SIZE +GAS_UPDATER_BLOCK_DELAY +GAS_UPDATER_BLOCK_HISTORY_SIZE +GAS_UPDATER_TRANSACTION_PERCENTILE +``` + +If you are using any of the env vars above, please switch to using the following instead: + +``` +GAS_ESTIMATOR_MODE +BLOCK_HISTORY_ESTIMATOR_BATCH_SIZE +BLOCK_HISTORY_ESTIMATOR_BLOCK_DELAY +BLOCK_HISTORY_ESTIMATOR_BLOCK_HISTORY_SIZE +BLOCK_HISTORY_ESTIMATOR_TRANSACTION_PERCENTILE +``` + +Valid values for `GAS_ESTIMATOR_MODE` are as follows: + +`GAS_ESTIMATOR_MODE=BlockHistory` (equivalent to `GAS_UPDATER_ENABLED=true`) +`GAS_ESTIMATOR_MODE=FixedPrice` (equivalent to `GAS_UPDATER_ENABLED=false`) +`GAS_ESTIMATOR_MODE=Optimism` (new) + +New gas estimator modes may be added in the future. + +In addition, a minor annoyance has been fixed whereby previously if you enabled the gas updater, it would overwrite the locally stored value for gas price and continue to use this even if it was disabled after a reboot. This will no longer happen: BlockHistory mode will not clobber the locally stored value for fixed gas price, which can still be adjusted via remote API call or using `plugin config setgasprice XXX`. In order to use this manually fixed gas price, you must enable FixedPrice estimator mode. + +### Added + +Added support for latest version of libocr with the V2 networking stack. New env vars to configure this are: + +``` +P2P_NETWORKING_STACK +P2PV2_ANNOUNCE_ADDRESSES +P2PV2_BOOTSTRAPPERS +P2PV2_DELTA_DIAL +P2PV2_DELTA_RECONCILE +P2PV2_LISTEN_ADDRESSES +``` + +All of these are currently optional, by default OCR will continue to use the existing V1 stack. The new env vars will be used internally for OCR testing. + +### Fixed + +- Fix inability to create jobs with a cron schedule. + +## [0.10.9] - 2021-07-05 + +### Changed + +#### Transaction Strategies + +FMv2, Keeper and OCR jobs now use a new strategy for sending transactions. By default, if multiple transactions are queued up, only the latest one will be sent. This should greatly reduce the number of stale rounds and reverted transactions, and help node operators to save significant gas especially during times of high congestion or when catching up on a deep backlog. + +Defaults should work well, but it can be controlled if necessary using the following new env vars: + +`FM_DEFAULT_TRANSACTION_QUEUE_DEPTH` +`KEEPER_DEFAULT_TRANSACTION_QUEUE_DEPTH` +`OCR_DEFAULT_TRANSACTION_QUEUE_DEPTH` + +Setting to 0 will disable (the old behaviour). Setting to 1 (the default) will keep only the latest transaction queued up at any given time. Setting to 2, 3 etc. will allow this many transactions to be queued before starting to drop older items. + +Note that it has no effect on FMv1 jobs. Node operators will need to upgrade to FMv2 to take advantage of this feature. + +## [0.10.8] - 2021-06-21 + +### Fixed + +- The HTTP adapter would remove a trailing slash on a subdirectory when specifying an extended path, so for instance `http://example.com/subdir/` with a param of `?query=` extended path would produce the URL `http://example.com/subdir?query=`, but should now produce: `http://example.com/subdir/?query=`. + +- Matic autoconfig is now enabled for mainnet. Matic nops should remove any custom tweaks they have been running with. In addition, we have better default configs for Optimism, Arbitrum and RSK. + +- It is no longer required to set `DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS=true` to enable local fetches on bridge or http tasks. If the URL for the http task is specified as a variable, then set the AllowUnrestrictedNetworkAccess option for this task. Please remove this if you had it set and no longer need it, since it introduces a slight security risk. + +- Plugin can now run with ETH_DISABLED=true without spewing errors everywhere + +- Removed prometheus metrics that were no longer valid after recent changes to head tracking: + `head_tracker_heads_in_queue`, `head_tracker_callback_execution_duration`, + `head_tracker_callback_execution_duration_hist`, `head_tracker_num_heads_dropped` + +### Added + +- MINIMUM_CONTRACT_PAYMENT_PLI_JUELS replaces MINIMUM_CONTRACT_PAYMENT, which will be deprecated in a future release. + +- INSECURE_SKIP_VERIFY configuration variable disables verification of the Plugin SSL certificates when using the CLI. + +- JSON parse tasks (v2) now permit an empty `path` parameter. + +- Eth->eth transfer gas limit is no longer hardcoded at 21000 and can now be adjusted using `ETH_GAS_LIMIT_TRANSFER` + +- HTTP and Bridge tasks (v2 pipeline) now log the request parameters (including the body) upon making the request when `LOG_LEVEL=debug`. + +- Webhook v2 jobs now support two new parameters, `externalInitiatorName` and `externalInitiatorSpec`. The v2 version of the following v1 spec: + + ``` + { + "initiators": [ + { + "type": "external", + "params": { + "name": "substrate", + "body": { + "endpoint": "substrate", + "feed_id": 0, + "account_id": "0x7c522c8273973e7bcf4a5dbfcc745dba4a3ab08c1e410167d7b1bdf9cb924f6c", + "fluxmonitor": { + "requestData": { + "data": { "from": "DOT", "to": "USD" } + }, + "feeds": [{ "url": "http://adapter1:8080" }], + "threshold": 0.5, + "absoluteThreshold": 0, + "precision": 8, + "pollTimer": { "period": "30s" }, + "idleTimer": { "duration": "1m" } + } + } + } + } + ], + "tasks": [ + { + "type": "substrate-adapter1", + "params": { "multiply": 1e8 } + } + ] + } + ``` + + is: + + ``` + type = "webhook" + schemaVersion = 1 + jobID = "0EEC7E1D-D0D2-475C-A1A8-72DFB6633F46" + externalInitiatorName = "substrate" + externalInitiatorSpec = """ + { + "endpoint": "substrate", + "feed_id": 0, + "account_id": "0x7c522c8273973e7bcf4a5dbfcc745dba4a3ab08c1e410167d7b1bdf9cb924f6c", + "fluxmonitor": { + "requestData": { + "data": { "from": "DOT", "to": "USD" } + }, + "feeds": [{ "url": "http://adapter1:8080" }], + "threshold": 0.5, + "absoluteThreshold": 0, + "precision": 8, + "pollTimer": { "period": "30s" }, + "idleTimer": { "duration": "1m" } + } + } + """ + observationSource = """ + submit [type=bridge name="substrate-adapter1" requestData=<{ "multiply": 1e8 }>] + """ + ``` + +- Task definitions in v2 jobs (those with TOML specs) now support quoting strings with angle brackets (which DOT already permitted). This is particularly useful when defining JSON blobs to post to external adapters. For example: + + ``` + my_bridge [type=bridge name="my_bridge" requestData="{\\"hi\\": \\"hello\\"}"] + ``` + + ... can now be written as: + + ``` + my_bridge [type=bridge name="my_bridge" requestData=<{"hi": "hello"}>] + ``` + + Multiline strings are supported with this syntax as well: + + ``` + my_bridge [type=bridge + name="my_bridge" + requestData=<{ + "hi": "hello", + "foo": "bar" + }>] + ``` + +- v2 jobs (those with TOML specs) now support variable interpolation in pipeline definitions. For example: + + ``` + fetch1 [type=bridge name="fetch"] + parse1 [type=jsonparse path="foo,bar"] + fetch2 [type=bridge name="fetch"] + parse2 [type=jsonparse path="foo,bar"] + medianize [type=median] + submit [type=bridge name="submit" + requestData=<{ + "result": $(medianize), + "fetchedData": [ $(parse1), $(parse2) ] + }>] + + fetch1 -> parse1 -> medianize + fetch2 -> parse2 -> medianize + medianize -> submit + ``` + + This syntax is supported by the following tasks/parameters: + + - `bridge` + - `requestData` + - `http` + - `requestData` + - `jsonparse` + - `data` (falls back to the first input if unspecified) + - `median` + - `values` (falls back to the array of inputs if unspecified) + - `multiply` + - `input` (falls back to the first input if unspecified) + - `times` + +- Add `ETH_MAX_IN_FLIGHT_TRANSACTIONS` configuration option. This defaults to 16 and controls how many unconfirmed transactions may be in-flight at any given moment. This is set conservatively by default, node operators running many jobs on high throughput chains will probably need to increase this above the default to avoid lagging behind. However, before increasing this value, you MUST first ensure your ethereum node is configured not to ever evict local transactions that exceed this number otherwise your node may get permanently stuck. Set to 0 to disable the limit entirely (the old behaviour). Disabling this setting is not recommended. + +Relevant settings for geth (and forks e.g. BSC) + +```toml +[Eth.TxPool] +Locals = ["0xYourNodeAddress1", "0xYourNodeAddress2"] # Add your node addresses here +NoLocals = false # Disabled by default but might as well make sure +Journal = "transactions.rlp" # Make sure you set a journal file +Rejournal = 3600000000000 # Default 1h, it might make sense to reduce this to e.g. 5m +PriceBump = 10 # Must be set less than or equal to plugin's ETH_GAS_BUMP_PERCENT +AccountSlots = 16 # Highly recommended to increase this, must be greater than or equal to plugin's ETH_MAX_IN_FLIGHT_TRANSACTIONS setting +GlobalSlots = 4096 # Increase this as necessary +AccountQueue = 64 # Increase this as necessary +GlobalQueue = 1024 # Increase this as necessary +Lifetime = 10800000000000 # Default 3h, this is probably ok, you might even consider reducing it + +``` + +Relevant settings for parity/openethereum (and forks e.g. xDai) + +NOTE: There is a bug in parity (and xDai) where occasionally local transactions are inexplicably culled. See: https://github.com/openethereum/parity-ethereum/issues/10228 + +Adjusting the settings below might help. + +```toml +tx_queue_locals = ["0xYourNodeAddress1", "0xYourNodeAddress2"] # Add your node addresses here +tx_queue_size = 8192 # Increase this as necessary +tx_queue_per_sender = 16 # Highly recommended to increase this, must be greater than or equal to plugin's ETH_MAX_IN_FLIGHT_TRANSACTIONS setting +tx_queue_mem_limit = 4 # In MB. Highly recommended to increase this or set to 0 +tx_queue_no_early_reject = true # Recommended to set this +tx_queue_no_unfamiliar_locals = false # This is disabled by default but might as well make sure +``` + +- Keeper jobs now support prometheus metrics, they are considered a pipeline with a single `keeper` task type. Example: + +``` +pipeline_run_errors{job_id="1",job_name="example keeper spec"} 1 +pipeline_run_total_time_to_completion{job_id="1",job_name="example keeper spec"} 8.470456e+06 +pipeline_task_execution_time{job_id="1",job_name="example keeper spec",task_type="keeper"} 8.470456e+06 +pipeline_tasks_total_finished{job_id="1",job_name="example keeper spec",status="completed",task_type="keeper"} 1 +``` + +### Changed + +- The v2 (TOML) `bridge` task's `includeInputAtKey` parameter is being deprecated in favor of variable interpolation. Please migrate your jobs to the new syntax as soon as possible. + +- Plugin no longer writes/reads eth key files to disk + +- Add sensible default configuration settings for Fantom + +- Rename `ETH_MAX_UNCONFIRMED_TRANSACTIONS` to `ETH_MAX_QUEUED_TRANSACTIONS`. It still performs the same function but the name was misleading and would have caused confusion with the new `ETH_MAX_IN_FLIGHT_TRANSACTIONS`. + +- The VRF keys are now managed remotely through the node only. Example commands: + +``` +// Starting a node with a vrf key +plugin node start -p path/to/passwordfile -vp path/to/vrfpasswordfile + +// Remotely managing the vrf keys +plugin keys vrf create // Creates a key with path/to/vrfpasswordfile +plugin keys vrf list // Lists all keys on the node +plugin keys vrf delete // Lists all keys on the node + +// Archives (soft deletes) vrf key with compressed pub key 0x788.. +plugin keys vrf delete 0x78845e23b6b22c47e4c81426fdf6fc4087c4c6a6443eba90eb92cf4d11c32d3e00 + +// Hard deletes vrf key with compressed pub key 0x788.. +plugin keys vrf delete 0x78845e23b6b22c47e4c81426fdf6fc4087c4c6a6443eba90eb92cf4d11c32d3e00 --hard + +// Exports 0x788.. key to file 0x788_exported_key on disk encrypted with path/to/vrfpasswordfile +// Note you can re-encrypt it with a different password if you like when exporting. +plugin keys vrf export 0x78845e23b6b22c47e4c81426fdf6fc4087c4c6a6443eba90eb92cf4d11c32d3e00 -p path/to/vrfpasswordfile -o 0x788_exported_key + +// Import key material in 0x788_exported_key using path/to/vrfpasswordfile to decrypt. +// Will be re-encrypted with the nodes vrf password file i.e. "-vp" +plugin keys vrf import -p path/to/vrfpasswordfile 0x788_exported_key +``` + +## [0.10.7] - 2021-05-24 + +- If a CLI command is issued after the session has expired, and an api credentials file is found, auto login should now work. + +- GasUpdater now works on RSK and xDai + +- Offchain reporting jobs that have had a latest round requested can now be deleted from the UI without error + +### Added + +- Add `ETH_GAS_LIMIT_MULTIPLIER` configuration option, the gas limit is multiplied by this value before transmission. So a value of 1.1 will add 10% to the on chain gas limit when a transaction is submitted. + +- Add `ETH_MIN_GAS_PRICE_WEI` configuration option. This defaults to 1Gwei on mainnet. Plugin will never send a transaction at a price lower than this value. + +- Add `plugin node db migrate` for running database migrations. It's + recommended to use this and set `MIGRATE_DATABASE=false` if you want to run + the migrations separately outside of application startup. + +### Changed + +- Plugin now automatically cleans up old eth_txes to reduce database size. By default, any eth_txes older than a week are pruned on a regular basis. It is recommended to use the default value, however the default can be overridden by setting the `ETH_TX_REAPER_THRESHOLD` env var e.g. `ETH_TX_REAPER_THRESHOLD=24h`. Reaper can be disabled entirely by setting `ETH_TX_REAPER_THRESHOLD=0`. The reaper will run on startup and again every hour (interval is configurable using `ETH_TX_REAPER_INTERVAL`). + +- Heads corresponding to new blocks are now delivered in a sampled way, which is to improve + node performance on fast chains. The frequency is by default 1 second, and can be changed + by setting `ETH_HEAD_TRACKER_SAMPLING_INTERVAL` env var e.g. `ETH_HEAD_TRACKER_SAMPLING_INTERVAL=5s`. + +- Database backups: default directory is now a subdirectory 'backup' of plugin root dir, and can be changed + to any chosen directory by setting a new configuration value: `DATABASE_BACKUP_DIR` + +## [0.10.6] - 2021-05-10 + +### Added + +- Add `MockOracle.sol` for testing contracts + +- Web job types can now be created from the operator UI as a new job. + +- See example web job spec below: + +``` +type = "webhook" +schemaVersion = 1 +jobID = "0EEC7E1D-D0D2-476C-A1A8-72DFB6633F46" +observationSource = """ +ds [type=http method=GET url="http://example.com"]; +ds_parse [type=jsonparse path="data"]; +ds -> ds_parse; +""" +``` + +- New CLI command to convert v1 flux monitor jobs (JSON) to + v2 flux monitor jobs (TOML). Running it will archive the v1 + job and create a new v2 job. Example: + +``` +// Get v1 job ID: +plugin job_specs list +// Migrate it to v2: +plugin jobs migrate fe279ed9c36f4eef9dc1bdb7bef21264 + +// To undo the migration: +1. Archive the v2 job in the UI +2. Unarchive the v1 job manually in the db: +update job_specs set deleted_at = null where id = 'fe279ed9-c36f-4eef-9dc1-bdb7bef21264' +update initiators set deleted_at = null where job_spec_id = 'fe279ed9-c36f-4eef-9dc1-bdb7bef21264' +``` + +- Improved support for Optimism chain. Added a new boolean `OPTIMISM_GAS_FEES` configuration variable which makes a call to estimate gas before all transactions, suitable for use with Optimism's L2 chain. When this option is used `ETH_GAS_LIMIT_DEFAULT` is ignored. + +- Plugin now supports routing certain calls to the eth node over HTTP instead of websocket, when available. This has a number of advantages - HTTP is more robust and simpler than websockets, reducing complexity and allowing us to make large queries without running the risk of hitting websocket send limits. The HTTP url should point to the same node as the ETH_URL and can be specified with an env var like so: `ETH_HTTP_URL=https://my.ethereumnode.example/endpoint`. + +Adding an HTTP endpoint is particularly recommended for BSC, which is hitting websocket limitations on certain queries due to its large block size. + +- Support for legacy pipeline (V1 job specs) can now be turned off by setting `ENABLE_LEGACY_JOB_PIPELINE=false`. This can yield marginal performance improvements if you don't need to support the legacy JSON job spec format. + +## [0.10.5] - 2021-04-26 + +### Added + +- Add `MockOracle.sol` for testing contracts +- Cron jobs can now be created for the v2 job pipeline: + +``` +type = "cron" +schemaVersion = 1 +schedule = "*/10 * * * *" +observationSource = """ +ds [type=http method=GET url="http://example.com"]; +ds_parse [type=jsonparse path="data"]; +ds -> ds_parse; +""" +``` + +### Changed + +- Default for `JOB_PIPELINE_REAPER_THRESHOLD` has been reduced from 1 week to 1 day to save database space. This variable controls how long past job run history for OCR is kept. To keep the old behaviour, you can set `JOB_PIPELINE_REAPER_THRESHOLD=168h` +- Removed support for the env var `JOB_PIPELINE_PARALLELISM`. +- OCR jobs no longer show `TaskRuns` in success cases. This reduces + DB load and significantly improves the performance of archiving OCR jobs. +- Archiving OCR jobs should be 5-10x faster. + +### Fixed + +- Added `GAS_UPDATER_BATCH_SIZE` option to workaround `websocket: read limit exceeded` issues on BSC + +- Basic support for Optimism chain: node no longer gets stuck with 'nonce too low' error if connection is lost + +## [0.10.4] - 2021-04-05 + +### Added + +- VRF Jobs now support an optional `coordinatorAddress` field that, when present, will tell the node to check the fulfillment status of any VRF request before attempting the fulfillment transaction. This will assist in the effort to run multiple nodes with one VRF key. + +- Experimental: Add `DATABASE_BACKUP_MODE`, `DATABASE_BACKUP_FREQUENCY` and `DATABASE_BACKUP_URL` configuration variables + + - It's now possible to configure database backups: on node start and separately, to be run at given frequency. `DATABASE_BACKUP_MODE` enables the initial backup on node start (with one of the values: `none`, `lite`, `full` where `lite` excludes + potentially large tables related to job runs, among others). Additionally, if `DATABASE_BACKUP_FREQUENCY` variable is set to a duration of + at least '1m', it enables periodic backups. + - `DATABASE_BACKUP_URL` can be optionally set to point to e.g. a database replica, in order to avoid excessive load on the main one. Example settings: + 1. `DATABASE_BACKUP_MODE="full"` and `DATABASE_BACKUP_FREQUENCY` not set, will run a full back only at the start of the node. + 2. `DATABASE_BACKUP_MODE="lite"` and `DATABASE_BACKUP_FREQUENCY="1h"` will lead to a partial backup on node start and then again a partial backup every one hour. + +- Added periodic resending of eth transactions. This means that we no longer rely exclusively on gas bumping to resend unconfirmed transactions that got "lost" for whatever reason. This has two advantages: + + 1. Plugin no longer relies on gas bumping settings to ensure our transactions always end up in the mempool + 2. Plugin will continue to resend existing transactions even in the event that heads are delayed. This is especially useful on chains like Arbitrum which have very long wait times between heads. + + - Periodic resending can be controlled using the `ETH_TX_RESEND_AFTER_THRESHOLD` env var (default 30s). Unconfirmed transactions will be resent periodically at this interval. It is recommended to leave this at the default setting, but it can be set to any [valid duration](https://golang.org/pkg/time/#ParseDuration) or to 0 to disable periodic resending. + +- Logging can now be configured in the Operator UI. + +- Tuned defaults for certain Eth-compatible chains + +- Plugin node now uses different sets of default values depending on the given Chain ID. Tuned configs are built-in for the following chains: + + - Ethereum Mainnet and test chains + - Polygon (Matic) + - BSC + - HECO + +- If you have manually set ENV vars specific to these chains, you may want to remove those and allow the node to use its configured defaults instead. + +- New prometheus metric "tx_manager_num_tx_reverted" which counts the number of reverted transactions on chain. + +### Fixed + +- Under certain circumstances a poorly configured Explorer could delay Plugin node startup by up to 45 seconds. + +- Plugin node now automatically sets the correct nonce on startup if you are restoring from a previous backup (manual setnextnonce is no longer necessary). + +- Flux monitor jobs should now work correctly with [outlier-detection](https://github.com/goplugin/external-adapters-js/tree/develop/composite/outlier-detection) and [market-closure](https://github.com/goplugin/external-adapters-js/tree/develop/composite/market-closure) external adapters. + +- Performance improvements to OCR job adds. Removed the pipeline_task_specs table + and added a new column `dot_id` to the pipeline_task_runs table which links a pipeline_task_run + to a dotID in the pipeline_spec.dot_dag_source. + +- Fixed bug where node will occasionally submit an invalid OCR transmission which reverts with "address not authorized to sign". + +- Fixed bug where a node will sometimes double submit on runlog jobs causing reverted transactions on-chain + +## [0.10.3] - 2021-03-22 + +### Added + +- Add `STATS_PUSHER_LOGGING` to toggle stats pusher raw message logging (DEBUG + level). + +- Add `ADMIN_CREDENTIALS_FILE` configuration variable + +This variable defaults to `$ROOT/apicredentials` and when defined / the +file exists, any command using the CLI that requires authentication will use it +to automatically log in. + +- Add `ETH_MAX_UNCONFIRMED_TRANSACTIONS` configuration variable + +Plugin node now has a maximum number of unconfirmed transactions that +may be in flight at any one time (per key). + +If this limit is reached, further attempts to send transactions will fail +and the relevant job will be marked as failed. + +Jobs will continue to fail until at least one transaction is confirmed +and the queue size is reduced. This is introduced as a sanity limit to +prevent unbounded sending of transactions e.g. in the case that the eth +node is failing to broadcast to the network. + +The default is set to 500 which considered high enough that it should +never be reached under normal operation. This limit can be changed +by setting the `ETH_MAX_UNCONFIRMED_TRANSACTIONS` environment variable. + +- Support requestNewRound in libocr + +requestNewRound enables dedicated requesters to request a fresh report to +be sent to the contract right away regardless of heartbeat or deviation. + +- New prometheus metric: + +``` +Name: "head_tracker_eth_connection_errors", +Help: "The total number of eth node connection errors", +``` + +- Gas bumping can now be disabled by setting `ETH_GAS_BUMP_THRESHOLD=0` + +- Support for arbitrum + +### Fixed + +- Improved handling of the case where we exceed the configured TX fee cap in geth. + +Node will now fatally error jobs if the total transaction costs exceeds the +configured cap (default 1 Eth). Also, it will no longer continue to bump gas on +transactions that started hitting this limit and instead continue to resubmit +at the highest price that worked. + +Node operators should check their geth nodes and remove this cap if configured, +you can do this by running your geth node with `--rpc.gascap=0 --rpc.txfeecap=0` or setting these values in your config toml. + +- Make head backfill asynchronous. This should eliminate some harmless but + annoying errors related to backfilling heads, logged on startup and + occasionally during normal operation on fast chains like Kovan. + +- Improvements to the GasUpdater + +Various efficiency and correctness improvements have been made to the +GasUpdater. It places less load on the ethereum node and now features re-org +detection. + +Most notably, GasUpdater no longer takes a 24 block delay to "warm up" on +application start and instead loads all relevant block history immediately. +This means that the application gas price will always be updated correctly +after reboot before the first transaction is ever sent, eliminating the previous +scenario where the node could send underpriced or overpriced transactions for a +period after a reboot, until the gas updater caught up. + +### Changed + +- Bump `ORM_MAX_OPEN_CONNS` default from 10 to 20 +- Bump `ORM_MAX_IDLE_CONNS` default from 5 to 10 + +Each Plugin node will now use a maximum of 23 database connections (up from previous max of 13). Make sure your postgres database is tuned accordingly, especially if you are running multiple Plugin nodes on a single database. If you find yourself hitting connection limits, you can consider reducing `ORM_MAX_OPEN_CONNS` but this may result in degraded performance. + +- The global env var `JOB_PIPELINE_MAX_TASK_DURATION` is no longer supported + for OCR jobs. + +## [0.10.2] - 2021-02-26 + +### Fixed + +- Add contexts so that database queries timeout when necessary. +- Use manual updates instead of gorm update associations. + +## [0.10.1] - 2021-02-25 + +### Fixed + +- Prevent autosaving Task Spec on when Task Runs are saved to lower database load. + +## [0.10.0] - 2021-02-22 + +### Fixed + +- Fix a case where archiving jobs could try to delete it from the external initiator even if the job was not an EI job. +- Improved performance of the transaction manager by fetching receipts in + batches. This should help prevent the node from getting stuck when processing + large numbers of OCR jobs. +- Fixed a fluxmonitor job bug where submitting a value outside the acceptable range would stall the job + permanently. Now a job spec error will be thrown if the polled answer is outside the + acceptable range and no ethtx will be submitted. As additional protection, we also now + check the receipts of the ethtx's and if they were reverted, we mark the ethtx task as failed. + +### Breaking + +- Squashed migrations into a single 1_initial migration. If you were running a version + older than 0.9.10, you need to upgrade to 0.9.10 first before upgrading to the next + version so that the migrations are run. + +### Added + +- A new Operator UI feature that visualize JSON and TOML job spec tasks on a 'New Job' page. + +## [0.9.10] - 2021-01-30 + +### Fixed + +- Fixed a UI bug with fluxmonitor jobs where initiator params were bunched up. +- Improved performance of OCR jobs to reduce database load. OCR jobs now run with unlimited parallelism and are not affected by `JOB_PIPELINE_PARALLELISM`. + +### Added + +- A new env var `JOB_PIPELINE_MAX_RUN_DURATION` has been added which controls maximum duration of the total run. + +## [0.9.9] - 2021-01-18 + +### Added + +- New CLI commands for key management: + - `plugin keys eth import` + - `plugin keys eth export` + - `plugin keys eth delete` +- All keys other than VRF keys now share the same password. If you have OCR, P2P, and ETH keys encrypted with different passwords, re-insert them into your DB encrypted with the same password prior to upgrading. + +### Fixed + +- Fixed reading of function selector values in DB. +- Support for bignums encoded in CBOR +- Silence spurious `Job spawner ORM attempted to claim locally-claimed job` warnings +- OCR now drops transmissions instead of queueing them if the node is out of Ether +- Fixed a long-standing issue where standby nodes would hold transactions open forever while waiting for a lock. This was preventing postgres from running necessary cleanup operations, resulting in bad database performance. Any node operators running standby failover plugin nodes should see major database performance improvements with this release and may be able to reduce the size of their database instances. +- Fixed an issue where expired session tokens in operator UI would cause a large number of requests to be sent to the node, resulting in a temporary rate-limit and 429 errors. +- Fixed issue whereby http client could leave too many open file descriptors + +### Changed + +- Key-related API endpoints have changed. All key-related commands are now namespaced under `/v2/keys/...`, and are standardized across key types. +- All key deletion commands now perform a soft-delete (i.e. archive) by default. A special CLI flag or query string parameter must be provided to hard-delete a key. +- Node now supports multiple OCR jobs sharing the same peer ID. If you have more than one key in your database, you must now specify `P2P_PEER_ID` to indicate which key to use. +- `DATABASE_TIMEOUT` is now set to 0 by default, so that nodes will wait forever for a lock. If you already have `DATABASE_TIMEOUT=0` set explicitly in your env (most node operators) then you don't need to do anything. If you didn't have it set, and you want to keep the old default behaviour where a node exits shortly if it can't get a lock, you can manually set `DATABASE_TIMEOUT=500ms` in your env. +- OCR bootstrap node no longer sends telemetry to the endpoint specified in the OCR job spec under `MonitoringEndpoint`. + +## [0.9.8] - 2020-12-17 + +### Fixed + +- An issue where the node would emit warnings on startup for fluxmonitor contracts + +## [0.9.7] - 2020-12-14 + +### Added + +- OCR bootstrap node now sends telemetry to the endpoint specified in the OCR job spec under `MonitoringEndpoint`. +- Adds "Account addresses" table to the `/keys` page. + +### Changed + +- Old jobs now allow duplicate job names. Also, if the name field is empty we no longer generate a name. +- Removes broken `ACCOUNT_ADDRESS` field from `/config` page. + +### Fixed + +- Brings `/runs` tab back to the operator UI. +- Signs out a user from operator UI on authentication error. +- OCR jobs no longer require defining v1 bootstrap peers unless `P2P_NETWORKING_STACK=V1` + +#### BREAKING CHANGES + +- Commands for creating/managing legacy jobs and OCR jobs have changed, to reduce confusion and accommodate additional types of jobs using the new pipeline. +- If `P2P_NETWORKING_STACK=V1V2`, then `P2PV2_BOOTSTRAPPERS` must also be set + +#### V1 jobs + +`jobs archive` => `job_specs archive` +`jobs create` => `job_specs create` +`jobs list` => `job_specs list` +`jobs show` => `job_specs show` + +#### V2 jobs (currently only applies to OCR) + +`jobs createocr` => `jobs create` +`jobs deletev2` => `jobs delete` +`jobs run` => `jobs run` + +## [0.9.6] - 2020-11-23 + +- OCR pipeline specs can now be configured on a per-task basis to allow unrestricted network access for http tasks. Example like so: + +``` +ds1 [type=http method=GET url="http://example.com" allowunrestrictednetworkaccess="true"]; +ds1_parse [type=jsonparse path="USD" lax="true"]; +ds1_multiply [type=multiply times=100]; +ds1 -> ds1_parse -> ds1_multiply; +``` + +- New prometheus metrics as follows: + +``` +Name: "pipeline_run_errors", +Help: "Number of errors for each pipeline spec", + +Name: "pipeline_run_total_time_to_completion", +Help: "How long each pipeline run took to finish (from the moment it was created)", + +Name: "pipeline_tasks_total_finished", +Help: "The total number of pipline tasks which have finished", + +Name: "pipeline_task_execution_time", +Help: "How long each pipeline task took to execute", + +Name: "pipeline_task_http_fetch_time", +Help: "Time taken to fully execute the HTTP request", + +Name: "pipeline_task_http_response_body_size", +Help: "Size (in bytes) of the HTTP response body", + +Name: "pipeline_runs_queued", +Help: "The total number of pipline runs that are awaiting execution", + +Name: "pipeline_task_runs_queued", +Help: "The total number of pipline task runs that are awaiting execution", +``` + +### Changed + +Numerous key-related UX improvements: + +- All key-related commands have been consolidated under the `plugin keys` subcommand: + - `plugin createextrakey` => `plugin keys eth create` + - `plugin admin info` => `plugin keys eth list` + - `plugin node p2p [create|list|delete]` => `plugin keys p2p [create|list|delete]` + - `plugin node ocr [create|list|delete]` => `plugin keys ocr [create|list|delete]` + - `plugin node vrf [create|list|delete]` => `plugin keys vrf [create|list|delete]` +- Deleting OCR key bundles and P2P key bundles now archives them (i.e., soft delete) so that they can be recovered if needed. If you want to hard delete a key, pass the new `--hard` flag to the command, e.g. `plugin keys p2p delete --hard 6`. +- Output from ETH/OCR/P2P/VRF key CLI commands now renders consistently. +- Deleting an OCR/P2P/VRF key now requires confirmation from the user. To skip confirmation (e.g. in shell scripts), pass `--yes` or `-y`. +- The `--ocrpassword` flag has been removed. OCR/P2P keys now share the same password at the ETH key (i.e., the password specified with the `--password` flag). + +Misc: + +- Two new env variables are added `P2P_ANNOUNCE_IP` and `P2P_ANNOUNCE_PORT` which allow node operators to override locally detected values for the plugin node's externally reachable IP/port. +- `OCR_LISTEN_IP` and `OCR_LISTEN_PORT` have been renamed to `P2P_LISTEN_IP` and `P2P_LISTEN_PORT` for consistency. +- Support for adding a job with the same name as one that was deleted. + +### Fixed + +- Fixed an issue where the HTTP adapter would send an empty body on retries. +- Changed the default `JOB_PIPELINE_REAPER_THRESHOLD` value from `7d` to `168h` (hours are the highest time unit allowed by `time.Duration`). + +## [0.9.5] - 2020-11-12 + +### Changed + +- Updated from Go 1.15.4 to 1.15.5. + +## [0.9.4] - 2020-11-04 + +### Fixed + +- Hotfix to fix an issue with httpget adapter + +## [0.9.3] - 2020-11-02 + +### Added + +- Add new subcommand `node hard-reset` which is used to remove all state for unstarted and pending job runs from the database. + +### Changed + +- Plugin now requires Postgres >= 11.x. Previously this was a recommendation, this is now a hard requirement. Migrations will fail if run on an older version of Postgres. +- Database improvements that greatly reduced the number of open Postgres connections +- Operator UI /jobs page is now searchable +- Jobs now accept a name field in the jobspecs + +## [0.9.2] - 2020-10-15 + +### Added + +- Bulletproof transaction manager enabled by default +- Fluxmonitor support enabled by default + +### Fixed + +- Improve transaction manager architecture to be more compatible with `ETH_SECONDARY_URL` option (i.e. concurrent transaction submission to multiple different eth nodes). This also comes with some minor performance improvements in the tx manager and more correct handling of some extremely rare edge cases. +- As a side effect, we now no longer handle the case where an external wallet used the plugin ethereum private key to send a transaction. This use-case was already explicitly unsupported, but we made a best-effort attempt to handle it. We now make no attempt at all to handle it and doing this WILL result in your node not sending the data that it expected to be sent for the nonces that were used by an external wallet. +- Operator UI now shows booleans correctly + +### Changed + +- ETH_MAX_GAS_PRICE_WEI now 1500Gwei by default + +## [0.8.18] - 2020-10-01 + +### Fixed + +- Prometheus gas_updater_set_gas_price metric now only shows last gas price instead of every block since restart + +## [0.8.17] - 2020-09-28 + +### Added + +- Add new env variable ETH_SECONDARY_URL. Default is unset. You may optionally set this to a http(s) ethereum RPC client URL. If set, transactions will also be broadcast to this secondary ethereum node. This allows transaction broadcasting to be more robust in the face of primary ethereum node bugs or failures. +- Remove configuration option ORACLE_CONTRACT_ADDRESS, it had no effect +- Add configuration option OPERATOR_CONTRACT_ADDRESS, it filters the contract addresses the node should listen to for Run Logs +- At startup, the plugin node will create a new funding address. This will initially be used to pay for cancelling stuck transactions. + +### Fixed + +- Gas bumper no longer hits database constraint error if ETH_MAX_GAS_PRICE_WEI is reached (this was actually mostly harmless, but the errors were annoying) + +### Changes + +- ETH_MAX_GAS_PRICE_WEI now defaults to 1500 gwei + +## [0.8.16] - 2020-09-18 + +### Added + +- The plugin node now will bump a limited configurable number of transactions at once. This is configured with the ETH_GAS_BUMP_TX_DEPTH variable which is 10 by default. Set to 0 to disable (the old behaviour). + +### Fixed + +- ETH_DISABLED flag works again + +## [0.8.15] - 2020-09-14 + +### Added + +- Plugin header images to the following `README.md` files: root, core, + evm-contracts, and evm-test-helpers. +- Database migrations: new log_consumptions records will contain the number of the associated block. + This migration will allow future version of plugin to automatically clean up unneeded log_consumption records. + This migration should execute very fast. +- External Adapters for the Flux Monitor will now receive the Flux Monitor round state info as the meta payload. +- Reduce frequency of balance checking. + +### Fixed + +Previously when the node was overloaded with heads there was a minor possibility it could get backed up with a very large head queue, and become unstable. Now, we drop heads instead in this case and noisily emit an error. This means the node should more gracefully handle overload conditions, although this is still dangerous and node operators should deal with it immediately to avoid missing jobs. + +A new environment variable is introduced to configure this, called `ETH_HEAD_TRACKER_MAX_BUFFER_SIZE`. It is recommended to leave this set to the default of "3". + +A new prometheus metric is also introduced to track dropped heads, called `head_tracker_num_heads_dropped`. You may wish to set an alert on a rule such as `increase(plugin_dropped_heads[5m]) > 0`. + +## [0.8.14] - 2020-09-02 + +## Changed + +- Fix for gas bumper +- Fix for broadcast-transactions function + +## [0.8.13] - 2020-08-31 + +## Changed + +- Fix for gas bumper +- Fix for broadcast-transactions function + +## [0.8.13] - 2020-08-31 + +### Changed + +- Performance improvements when using BulletproofTxManager. + +## [0.8.12] - 2020-08-10 + +### Fixed + +- Added a workaround for Infura users who are seeing "error getting balance: header not found". + This behaviour is due to Infura announcing it has a block, but when we request our balance in this block, the eth node doesn't have the block in memory. The workaround is to add a configurable lag time on balance update requests. The default is set to 1 but this is configurable via a new environment variable `ETH_BALANCE_MONITOR_BLOCK_DELAY`. + +## [0.8.11] - 2020-07-27 + +### Added + +- Job specs now support pinning to multiple keys using the new `fromAddresses` field in the ethtx task spec. + +### Changed + +- Using `fromAddress` in ethtx task specs has been deprecated. Please use `fromAddresses` instead. + +### Breaking changes + +- Support for RunLogTopic0original and RunLogTopic20190123withFullfillmentParams logs has been dropped. This should not affect any users since these logs predate Plugin's mainnet launch and have never been used on mainnet. + +IMPORTANT: The selection mechanism for keys has changed. When an ethtx task spec is not pinned to a particular key by defining `fromAddress` or `fromAddresses`, the node will now cycle through all available keys in round-robin fashion. This is a change from the previous behaviour where nodes would only pick the earliest created key. + +This is done to allow increases in throughput when a node operator has multiple whitelisted addresses for their oracle. + +If your node has multiple keys, you will need to take one of the three following actions: + +1. Make sure all keys are valid for all job specs +2. Pin job specs to a valid subset of key(s) using `fromAddresses` +3. Delete the key(s) you don't want to use + +If your node only has one key, no action is required. + +## [0.8.10] - 2020-07-14 + +### Fixed + +- Incorrect sequence on keys table in some edge cases + +## [0.8.9] - 2020-07-13 + +### Added + +- Added a check on sensitive file ownership that gives a warning if certain files are not owned by the user running plugin +- Added mechanism to asynchronously communicate when a job spec has an ethereum interaction error (or any async error) with a UI screen +- Gas Bumper now bumps based on the current gas price instead of the gas price of the original transaction + +### Fixed + +- Support for multiple node addresses + +## [0.8.8] - 2020-06-29 + +### Added + +- `ethtx` tasks now support a new parameter, `minRequiredOutgoingConfirmations` which allows you to tune how many confirmations are required before moving on from an `ethtx` task on a per-task basis (only works with BulletproofTxManager). If it is not supplied, the default of `MIN_OUTGOING_CONFIRMATIONS` is used (same as the old behaviour). + +### Changed + +- HeadTracker now automatically backfills missing heads up to `ETH_FINALITY_DEPTH` +- The strategy for gas bumping has been changed to produce a potentially higher gas cost in exchange for the transaction getting through faster. + +### Breaking changes + +- `admin withdraw` command has been removed. This was only ever useful to withdraw PLI if the Oracle contract was owned by the Plugin node address. It is no longer recommended having the Oracle owner be the plugin node address. +- Fixed `txs create` to send the amount in Eth not in Wei (as per the documentation) + +## [0.8.7] - 2020-06-15 + +### Added + +This release contains a number of features aimed at improving the node's reliability when putting transactions on-chain. + +- An experimental new transaction manager is introduced that delivers reliability improvements compared to the old one, especially when faced with difficult network conditions or spiking gas prices. It also reduces load on the database and makes fewer calls to the eth node compared to the old tx manager. +- Along with the new transaction manager is a local client command for manually controlling the node nonce - `setnextnonce`. This should never be necessary under normal operation and is included only for use in emergencies. +- New prometheus metrics for the head tracker: + - `head_tracker_heads_in_queue` - The number of heads currently waiting to be executed. You can think of this as the 'load' on the head tracker. Should rarely or never be more than 0. + - `head_tracker_callback_execution_duration` - How long it took to execute all callbacks. If the average of this exceeds the time between blocks, your node could lag behind and delay transactions. +- Nodes transmit their build info to Explorer for better debugging/tracking. + +### Env var changes + +- `ENABLE_BULLETPROOF_TX_MANAGER` - set this to true to enable the experimental new transaction manager +- `ETH_GAS_BUMP_PERCENT` default value has been increased from 10% to 20% +- `ETH_GAS_BUMP_THRESHOLD` default value has been decreased from 12 to 3 +- `ETH_FINALITY_DEPTH` specifies how deep protection should be against re-orgs. The default is 50. It only applies if BulletproofTxManager is enabled. It is not recommended changing this setting. +- `EthHeadTrackerHistoryDepth` specifies how many heads the head tracker should keep in the database. The default is 100. It is not recommended changing this setting. +- Update README.md with links to mockery, jq, and gencodec as they are required to run `go generate ./...` + +## [0.8.6] - 2020-06-08 + +### Added + +- The node now logs the eth client RPC calls +- More reliable Ethereum block header tracking +- Limit the amount of an HTTP response body that the node will read +- Make Aggregator contract interface viewable +- More resilient handling of chain reorganizations + +## [0.8.5] - 2020-06-01 + +### Added + +- The plugin node can now be configured to backfill logs from `n` blocks after a + connection to the ethereum client is reset. This value is specified with an environment + variable `BLOCK_BACKFILL_DEPTH`. +- The plugin node now sets file permissions on sensitive files on startup (tls, .api, .env, .password and secret) +- AggregatorInterface now has description and version fields. + +### Changed + +- Solidity: Renamed the previous `AggregatorInterface.sol` to + `HistoricAggregatorInterface.sol`. Users are encouraged to use the new methods + introduced on the `AggregatorInterface`(`getRoundData` and `latestRoundData`), + as they return metadata to indicate freshness of the data in a single + cross-contract call. +- Solidity: Marked `HistoricAggregatorInterface` methods (`latestAnswer`, + `latestRound`, `latestTimestamp`, `getAnswer`, `getTimestamp`) as deprecated + on `FluxAggregator`, `WhitelistedAggregator`, `AggregatorProxy`, + `WhitelistedAggregatorProxy`. +- Updated the solidity compiler version for v0.6 from 0.6.2 to 0.6.6. +- AccessControlledAggregatorProxy checks an external contract for users to be able to + read functions. + +### Fixed + +- Fluxmonitor jobs now respect the `minPayment` field on job specs and won't poll if the contract + does not have sufficient funding. This allows certain jobs to require a larger payment + than `MINIMUM_CONTRACT_PAYMENT`. + +## [0.8.4] - 2020-05-18 + +### Added + +- Fluxmonitor initiators may now optionally include an `absoluteThreshold` + parameter. To trigger a new on-chain report, the absolute difference in the feed + value must change by at least the `absoluteThreshold` value. If it is + unspecified or zero, fluxmonitor behavior is unchanged. +- Database Migrations: Add created_at and updated_at to all tables allowing for + better historical insights. This migration may take a minute or two on large + databases. + +### Fixed + +- Fix incorrect permissions on some files written by the node + Prevent a case where duplicate ethereum keys could be added + Improve robustness and reliability of ethtx transaction logic + +## [0.8.3] - 2020-05-04 + +### Added + +- Added Changelog. +- Database Migrations: There a number of database migrations included in this + release as part of our ongoing effort to make the node even more reliable and + stable, and build a firm foundation for future development. + +### Changed + +- New cron strings MUST now include time zone. If you want your jobs to run in + UTC for example: `CRON_TZ=UTC * * * * *`. Previously, jobs specified without a + time zone would run in the server's native time zone, which in most cases is UTC + but this was never guaranteed. + +### Fixed + +- Fix crash in experimental gas updater when run on Kovan network + +## [0.8.2] - 2020-04-20 + +## [0.8.1] - 2020-04-08 + +## [0.8.0] - 2020-04-06 diff --git a/docs/COMMUNITY.md b/docs/COMMUNITY.md new file mode 100644 index 00000000..a51bae62 --- /dev/null +++ b/docs/COMMUNITY.md @@ -0,0 +1,40 @@ +# Community + +[![Discord](https://img.shields.io/discord/592041321326182401?style=flat-square&logo=discord)](https://discordapp.com/invite/aSK4zew) +[![Subreddit subscribers](https://img.shields.io/reddit/subreddit-subscribers/Plugin?logo=reddit&style=flat-square)](https://www.reddit.com/r/Plugin/) +[![Twitter Follow](https://img.shields.io/twitter/follow/plugin?logo=twitter&style=flat-square)](https://twitter.com/plugin) +[![Telegram](https://img.shields.io/badge/Telegram-Follow-blue?style=flat-square&logo=telegram)](https://t.me/pluginofficial) +[![YouTube Channel](https://img.shields.io/badge/YouTube-Subscribe-red?style=flat-square&logo=youtube)](https://www.youtube.com/pluginofficial) +[![WeChat](https://img.shields.io/badge/WeChat-Follow-green?style=flat-square&logo=wechat)](https://blog.chain.link/plugin-chinese-communities/) +[![Sina Weibol](https://img.shields.io/badge/Weibo-Follow-red?style=flat-square&logo=sina-weibo)](https://weibo.com/pluginofficial) + +In addition to the Plugin community resources in the badges above, here is a +cultivated list of other community related resources. + +## Developer Resources + +### Repositories + +- [@plugin/box] - Truffle box +- [@plugin/external-adapter-js] - Plugin external adapter repo +- [awesome-plugin] - Awesome projects built with Plugin +- [Plugin Documentation](https://github.com/goplugin/documentation/) - Our open source documentation site + +### Platforms and Services + +- [Gitcoin] - Hackathons, bounties, and grants + +## Presentations + +- Connecting Smart Contracts to any/all Off-chain Contract Events, + Privacy Preserving Computations and On-chain Reference Data by Sergey Nazarov + - [Devcon5](https://chain.link/presentations/devcon5.pdf) + - [English](https://chain.link/presentations/english.pdf) + - [Chinese](https://chain.link/presentations/chinese.pdf) + - [Korean](https://chain.link/presentations/korean.pdf) + - [Japanese](https://chain.link/presentations/japanese.pdf) + +[awesome plugin]: https://github.com/JohannEid/awesome-plugin +[devvon5]: https://chain.link/presentations/devcon5.pdf +[external-adapter-js]: https://github.com/goplugin/external-adapters-js +[gitcoin]: https://gitcoin.co/ diff --git a/docs/CONFIG.md b/docs/CONFIG.md new file mode 100644 index 00000000..5b7639d7 --- /dev/null +++ b/docs/CONFIG.md @@ -0,0 +1,6767 @@ +[//]: # (Documentation generated from docs/*.toml - DO NOT EDIT.) + +This document describes the TOML format for configuration. + +See also [SECRETS.md](SECRETS.md) + +## Example + +```toml +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' # Required + +[[EVM.Nodes]] +Name = 'fake' # Required +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' # Required +``` + +## Global +```toml +InsecureFastScrypt = false # Default +RootDir = '~/.plugin' # Default +ShutdownGracePeriod = '5s' # Default +``` + + +### InsecureFastScrypt +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +InsecureFastScrypt = false # Default +``` +InsecureFastScrypt causes all key stores to encrypt using "fast" scrypt params instead. This is insecure and only useful for local testing. DO NOT ENABLE THIS IN PRODUCTION. + +### RootDir +```toml +RootDir = '~/.plugin' # Default +``` +RootDir is the Plugin node's root directory. This is the default directory for logging, database backups, cookies, and other misc Plugin node files. Plugin nodes will always ensure this directory has 700 permissions because it might contain sensitive data. + +### ShutdownGracePeriod +```toml +ShutdownGracePeriod = '5s' # Default +``` +ShutdownGracePeriod is the maximum time allowed to shut down gracefully. If exceeded, the node will terminate immediately to avoid being SIGKILLed. + +## Feature +```toml +[Feature] +FeedsManager = true # Default +LogPoller = false # Default +UICSAKeys = false # Default +``` + + +### FeedsManager +```toml +FeedsManager = true # Default +``` +FeedsManager enables the feeds manager service. + +### LogPoller +```toml +LogPoller = false # Default +``` +LogPoller enables the log poller, an experimental approach to processing logs, required if also using Evm.UseForwarders or OCR2. + +### UICSAKeys +```toml +UICSAKeys = false # Default +``` +UICSAKeys enables CSA Keys in the UI. + +## Database +```toml +[Database] +DefaultIdleInTxSessionTimeout = '1h' # Default +DefaultLockTimeout = '15s' # Default +DefaultQueryTimeout = '10s' # Default +LogQueries = false # Default +MaxIdleConns = 10 # Default +MaxOpenConns = 20 # Default +MigrateOnStartup = true # Default +``` + + +### DefaultIdleInTxSessionTimeout +```toml +DefaultIdleInTxSessionTimeout = '1h' # Default +``` +DefaultIdleInTxSessionTimeout is the maximum time allowed for a transaction to be open and idle before timing out. See Postgres `idle_in_transaction_session_timeout` for more details. + +### DefaultLockTimeout +```toml +DefaultLockTimeout = '15s' # Default +``` +DefaultLockTimeout is the maximum time allowed to wait for database lock of any kind before timing out. See Postgres `lock_timeout` for more details. + +### DefaultQueryTimeout +```toml +DefaultQueryTimeout = '10s' # Default +``` +DefaultQueryTimeout is the maximum time allowed for standard queries before timing out. + +### LogQueries +```toml +LogQueries = false # Default +``` +LogQueries tells the Plugin node to log database queries made using the default logger. SQL statements will be logged at `debug` level. Not all statements can be logged. The best way to get a true log of all SQL statements is to enable SQL statement logging on Postgres. + +### MaxIdleConns +```toml +MaxIdleConns = 10 # Default +``` +MaxIdleConns configures the maximum number of idle database connections that the Plugin node will keep open. Think of this as the baseline number of database connections per Plugin node instance. Increasing this number can help to improve performance under database-heavy workloads. + +Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Plugin node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. + +### MaxOpenConns +```toml +MaxOpenConns = 20 # Default +``` +MaxOpenConns configures the maximum number of database connections that a Plugin node will have open at any one time. Think of this as the maximum burst upper bound limit of database connections per Plugin node instance. Increasing this number can help to improve performance under database-heavy workloads. + +Postgres has connection limits, so you must use caution when increasing this value. If you are running several instances of a Plugin node or another application on a single database server, you might run out of Postgres connection slots if you raise this value too high. + +### MigrateOnStartup +```toml +MigrateOnStartup = true # Default +``` +MigrateOnStartup controls whether a Plugin node will attempt to automatically migrate the database on boot. If you want more control over your database migration process, set this variable to `false` and manually migrate the database using the CLI `migrate` command instead. + +## Database.Backup +```toml +[Database.Backup] +Mode = 'none' # Default +Dir = 'test/backup/dir' # Example +OnVersionUpgrade = true # Default +Frequency = '1h' # Default +``` +As a best practice, take regular database backups in case of accidental data loss. This best practice is especially important when you upgrade your Plugin node to a new version. Plugin nodes support automated database backups to make this process easier. + +NOTE: Dumps can cause high load and massive database latencies, which will negatively impact the normal functioning of the Plugin node. For this reason, it is recommended to set a `URL` and point it to a read replica if you enable automatic backups. + +### Mode +```toml +Mode = 'none' # Default +``` +Mode sets the type of automatic database backup, which can be one of _none_, `lite`, or `full`. If enabled, the Plugin node will always dump a backup on every boot before running migrations. Additionally, it will automatically take database backups that overwrite the backup file for the given version at regular intervals if `Frequency` is set to a non-zero interval. + +_none_ - Disables backups. +`lite` - Dumps small tables including configuration and keys that are essential for the node to function, which excludes historical data like job runs, transaction history, etc. +`full` - Dumps the entire database. + +It will write to a file like `'Dir'/backup/cl_backup_.dump`. There is one backup dump file per version of the Plugin node. If you upgrade the node, it will keep the backup taken right before the upgrade migration so you can restore to an older version if necessary. + +### Dir +```toml +Dir = 'test/backup/dir' # Example +``` +Dir sets the directory to use for saving the backup file. Use this if you want to save the backup file in a directory other than the default ROOT directory. + +### OnVersionUpgrade +```toml +OnVersionUpgrade = true # Default +``` +OnVersionUpgrade enables automatic backups of the database before running migrations, when you are upgrading to a new version. + +### Frequency +```toml +Frequency = '1h' # Default +``` +Frequency sets the interval for database dumps, if set to a positive duration and `Mode` is not _none_. + +Set to `0` to disable periodic backups. + +## Database.Listener +:warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ +```toml +[Database.Listener] +MaxReconnectDuration = '10m' # Default +MinReconnectInterval = '1m' # Default +FallbackPollInterval = '30s' # Default +``` +These settings control the postgres event listener. + +### MaxReconnectDuration +```toml +MaxReconnectDuration = '10m' # Default +``` +MaxReconnectDuration is the maximum duration to wait between reconnect attempts. + +### MinReconnectInterval +```toml +MinReconnectInterval = '1m' # Default +``` +MinReconnectInterval controls the duration to wait before trying to re-establish the database connection after connection loss. After each consecutive failure this interval is doubled, until MaxReconnectInterval is reached. Successfully completing the connection establishment procedure resets the interval back to MinReconnectInterval. + +### FallbackPollInterval +```toml +FallbackPollInterval = '30s' # Default +``` +FallbackPollInterval controls how often clients should manually poll as a fallback in case the postgres event was missed/dropped. + +## Database.Lock +:warning: **_ADVANCED_**: _Do not change these settings unless you know what you are doing._ +```toml +[Database.Lock] +Enabled = true # Default +LeaseDuration = '10s' # Default +LeaseRefreshInterval = '1s' # Default +``` +Ideally, you should use a container orchestration system like [Kubernetes](https://kubernetes.io/) to ensure that only one Plugin node instance can ever use a specific Postgres database. However, some node operators do not have the technical capacity to do this. Common use cases run multiple Plugin node instances in failover mode as recommended by our official documentation. The first instance takes a lock on the database and subsequent instances will wait trying to take this lock in case the first instance fails. + +- If your nodes or applications hold locks open for several hours or days, Postgres is unable to complete internal cleanup tasks. The Postgres maintainers explicitly discourage holding locks open for long periods of time. + +Because of the complications with advisory locks, Plugin nodes with v2.0 and later only support `lease` locking mode. The `lease` locking mode works using the following process: + +- Node A creates one row in the database with the client ID and updates it once per second. +- Node B spinlocks and checks periodically to see if the client ID is too old. If the client ID is not updated after a period of time, node B assumes that node A failed and takes over. Node B becomes the owner of the row and updates the client ID once per second. +- If node A comes back, it attempts to take out a lease, realizes that the database has been leased to another process, and exits the entire application immediately. + +### Enabled +```toml +Enabled = true # Default +``` +Enabled enables the database lock. + +### LeaseDuration +```toml +LeaseDuration = '10s' # Default +``` +LeaseDuration is how long the lease lock will last before expiring. + +### LeaseRefreshInterval +```toml +LeaseRefreshInterval = '1s' # Default +``` +LeaseRefreshInterval determines how often to refresh the lease lock. Also controls how often a standby node will check to see if it can grab the lease. + +## TelemetryIngress +```toml +[TelemetryIngress] +UniConn = true # Default +Logging = false # Default +BufferSize = 100 # Default +MaxBatchSize = 50 # Default +SendInterval = '500ms' # Default +SendTimeout = '10s' # Default +UseBatchSend = true # Default +``` + + +### UniConn +```toml +UniConn = true # Default +``` +UniConn toggles which ws connection style is used. + +### Logging +```toml +Logging = false # Default +``` +Logging toggles verbose logging of the raw telemetry messages being sent. + +### BufferSize +```toml +BufferSize = 100 # Default +``` +BufferSize is the number of telemetry messages to buffer before dropping new ones. + +### MaxBatchSize +```toml +MaxBatchSize = 50 # Default +``` +MaxBatchSize is the maximum number of messages to batch into one telemetry request. + +### SendInterval +```toml +SendInterval = '500ms' # Default +``` +SendInterval determines how often batched telemetry is sent to the ingress server. + +### SendTimeout +```toml +SendTimeout = '10s' # Default +``` +SendTimeout is the max duration to wait for the request to complete when sending batch telemetry. + +### UseBatchSend +```toml +UseBatchSend = true # Default +``` +UseBatchSend toggles sending telemetry to the ingress server using the batch client. + +## TelemetryIngress.Endpoints +```toml +[[TelemetryIngress.Endpoints]] # Example +Network = 'EVM' # Example +ChainID = '111551111' # Example +ServerPubKey = 'test-pub-key-111551111-evm' # Example +URL = 'localhost-111551111-evm:9000' # Example +``` + + +### Network +```toml +Network = 'EVM' # Example +``` +Network aka EVM, Solana, Starknet + +### ChainID +```toml +ChainID = '111551111' # Example +``` +ChainID of the network + +### ServerPubKey +```toml +ServerPubKey = 'test-pub-key-111551111-evm' # Example +``` +ServerPubKey is the public key of the telemetry server. + +### URL +```toml +URL = 'localhost-111551111-evm:9000' # Example +``` +URL is where to send telemetry. + +## AuditLogger +```toml +[AuditLogger] +Enabled = false # Default +ForwardToUrl = 'http://localhost:9898' # Example +JsonWrapperKey = 'event' # Example +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] # Example +``` + + +### Enabled +```toml +Enabled = false # Default +``` +Enabled determines if this logger should be configured at all + +### ForwardToUrl +```toml +ForwardToUrl = 'http://localhost:9898' # Example +``` +ForwardToUrl is where you want to forward logs to + +### JsonWrapperKey +```toml +JsonWrapperKey = 'event' # Example +``` +JsonWrapperKey if set wraps the map of data under another single key to make parsing easier + +### Headers +```toml +Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and a bar+*'] # Example +``` +Headers is the set of headers you wish to pass along with each request + +## Log +```toml +[Log] +Level = 'info' # Default +JSONConsole = false # Default +UnixTS = false # Default +``` + + +### Level +```toml +Level = 'info' # Default +``` +Level determines both what is printed on the screen and what is written to the log file. + +The available levels are: +- "debug": Useful for forensic debugging of issues. +- "info": High-level informational messages. (default) +- "warn": A mild error occurred that might require non-urgent action. Check these warnings semi-regularly to see if any of them require attention. These warnings usually happen due to factors outside of the control of the node operator. Examples: Unexpected responses from a remote API or misleading networking errors. +- "error": An unexpected error occurred during the regular operation of a well-maintained node. Node operators might need to take action to remedy this error. Check these regularly to see if any of them require attention. Examples: Use of deprecated configuration options or incorrectly configured settings that cause a job to fail. +- "crit": A critical error occurred. The node might be unable to function. Node operators should take immediate action to fix these errors. Examples: The node could not boot because a network socket could not be opened or the database became inaccessible. +- "panic": An exceptional error occurred that could not be handled. If the node is unresponsive, node operators should try to restart their nodes and notify the Plugin team of a potential bug. +- "fatal": The node encountered an unrecoverable problem and had to exit. + +### JSONConsole +```toml +JSONConsole = false # Default +``` +JSONConsole enables JSON logging. Otherwise, the log is saved in a human-friendly console format. + +### UnixTS +```toml +UnixTS = false # Default +``` +UnixTS enables legacy unix timestamps. + +Previous versions of Plugin nodes wrote JSON logs with a unix timestamp. As of v1.1.0 and up, the default has changed to use ISO8601 timestamps for better readability. + +## Log.File +```toml +[Log.File] +Dir = '/my/log/directory' # Example +MaxSize = '5120mb' # Default +MaxAgeDays = 0 # Default +MaxBackups = 1 # Default +``` + + +### Dir +```toml +Dir = '/my/log/directory' # Example +``` +Dir sets the log directory. By default, Plugin nodes write log data to `$ROOT/log.jsonl`. + +### MaxSize +```toml +MaxSize = '5120mb' # Default +``` +MaxSize determines the log file's max size in megabytes before file rotation. Having this not set will disable logging to disk. If your disk doesn't have enough disk space, the logging will pause and the application will log errors until space is available again. + +Values must have suffixes with a unit like: `5120mb` (5,120 megabytes). If no unit suffix is provided, the value defaults to `b` (bytes). The list of valid unit suffixes are: + +- b (bytes) +- kb (kilobytes) +- mb (megabytes) +- gb (gigabytes) +- tb (terabytes) + +### MaxAgeDays +```toml +MaxAgeDays = 0 # Default +``` +MaxAgeDays determines the log file's max age in days before file rotation. Keeping this config with the default value will not remove log files based on age. + +### MaxBackups +```toml +MaxBackups = 1 # Default +``` +MaxBackups determines the maximum number of old log files to retain. Keeping this config with the default value retains all old log files. The `MaxAgeDays` variable can still cause them to get deleted. + +## WebServer +```toml +[WebServer] +AuthenticationMethod = 'local' # Default +AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default +BridgeCacheTTL = '0s' # Default +BridgeResponseURL = 'https://my-plugin-node.example.com:6688' # Example +HTTPWriteTimeout = '10s' # Default +HTTPPort = 6688 # Default +SecureCookies = true # Default +SessionTimeout = '15m' # Default +SessionReaperExpiration = '240h' # Default +HTTPMaxSize = '32768b' # Default +StartTimeout = '15s' # Default +ListenIP = '0.0.0.0' # Default +``` + + +### AuthenticationMethod +```toml +AuthenticationMethod = 'local' # Default +``` +AuthenticationMethod defines which pluggable auth interface to use for user login and role assumption. Options include 'local' and 'ldap'. See docs for more details + +### AllowOrigins +```toml +AllowOrigins = 'http://localhost:3000,http://localhost:6688' # Default +``` +AllowOrigins controls the URLs Plugin nodes emit in the `Allow-Origins` header of its API responses. The setting can be a comma-separated list with no spaces. You might experience CORS issues if this is not set correctly. + +You should set this to the external URL that you use to access the Plugin UI. + +You can set `AllowOrigins = '*'` to allow the UI to work from any URL, but it is recommended for security reasons to make it explicit instead. + +### BridgeCacheTTL +```toml +BridgeCacheTTL = '0s' # Default +``` +BridgeCacheTTL controls the cache TTL for all bridge tasks to use old values in newer observations in case of intermittent failure. It's disabled by default. + +### BridgeResponseURL +```toml +BridgeResponseURL = 'https://my-plugin-node.example.com:6688' # Example +``` +BridgeResponseURL defines the URL for bridges to send a response to. This _must_ be set when using async external adapters. + +Usually this will be the same as the URL/IP and port you use to connect to the Plugin UI. + +### HTTPWriteTimeout +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +HTTPWriteTimeout = '10s' # Default +``` +HTTPWriteTimeout controls how long the Plugin node's API server can hold a socket open for writing a response to an HTTP request. Sometimes, this must be increased for pprof. + +### HTTPPort +```toml +HTTPPort = 6688 # Default +``` +HTTPPort is the port used for the Plugin Node API, [CLI](/docs/configuration-variables/#cli-client), and GUI. + +### SecureCookies +```toml +SecureCookies = true # Default +``` +SecureCookies requires the use of secure cookies for authentication. Set to false to enable standard HTTP requests along with `TLSPort = 0`. + +### SessionTimeout +```toml +SessionTimeout = '15m' # Default +``` +SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. + +### SessionReaperExpiration +```toml +SessionReaperExpiration = '240h' # Default +``` +SessionReaperExpiration represents how long an API session lasts before expiring and requiring a new login. + +### HTTPMaxSize +```toml +HTTPMaxSize = '32768b' # Default +``` +HTTPMaxSize defines the maximum size for HTTP requests and responses made by the node server. + +### StartTimeout +```toml +StartTimeout = '15s' # Default +``` +StartTimeout defines the maximum amount of time the node will wait for a server to start. + +### ListenIP +```toml +ListenIP = '0.0.0.0' # Default +``` +ListenIP specifies the IP to bind the HTTP server to + +## WebServer.LDAP +```toml +[WebServer.LDAP] +ServerTLS = true # Default +SessionTimeout = '15m0s' # Default +QueryTimeout = '2m0s' # Default +BaseUserAttr = 'uid' # Default +BaseDN = 'dc=custom,dc=example,dc=com' # Example +UsersDN = 'ou=users' # Default +GroupsDN = 'ou=groups' # Default +ActiveAttribute = '' # Default +ActiveAttributeAllowedValue = '' # Default +AdminUserGroupCN = 'NodeAdmins' # Default +EditUserGroupCN = 'NodeEditors' # Default +RunUserGroupCN = 'NodeRunners' # Default +ReadUserGroupCN = 'NodeReadOnly' # Default +UserApiTokenEnabled = false # Default +UserAPITokenDuration = '240h0m0s' # Default +UpstreamSyncInterval = '0s' # Default +UpstreamSyncRateLimit = '2m0s' # Default +``` +Optional LDAP config if WebServer.AuthenticationMethod is set to 'ldap' +LDAP queries are all parameterized to support custom LDAP 'dn', 'cn', and attributes + +### ServerTLS +```toml +ServerTLS = true # Default +``` +ServerTLS defines the option to require the secure ldaps + +### SessionTimeout +```toml +SessionTimeout = '15m0s' # Default +``` +SessionTimeout determines the amount of idle time to elapse before session cookies expire. This signs out GUI users from their sessions. + +### QueryTimeout +```toml +QueryTimeout = '2m0s' # Default +``` +QueryTimeout defines how long queries should wait before timing out, defined in seconds + +### BaseUserAttr +```toml +BaseUserAttr = 'uid' # Default +``` +BaseUserAttr defines the base attribute used to populate LDAP queries such as "uid=$", default is example + +### BaseDN +```toml +BaseDN = 'dc=custom,dc=example,dc=com' # Example +``` +BaseDN defines the base LDAP 'dn' search filter to apply to every LDAP query, replace example,com with the appropriate LDAP server's structure + +### UsersDN +```toml +UsersDN = 'ou=users' # Default +``` +UsersDN defines the 'dn' query to use when querying for the 'users' 'ou' group + +### GroupsDN +```toml +GroupsDN = 'ou=groups' # Default +``` +GroupsDN defines the 'dn' query to use when querying for the 'groups' 'ou' group + +### ActiveAttribute +```toml +ActiveAttribute = '' # Default +``` +ActiveAttribute is an optional user field to check truthiness for if a user is valid/active. This is only required if the LDAP provider lists inactive users as members of groups + +### ActiveAttributeAllowedValue +```toml +ActiveAttributeAllowedValue = '' # Default +``` +ActiveAttributeAllowedValue is the value to check against for the above optional user attribute + +### AdminUserGroupCN +```toml +AdminUserGroupCN = 'NodeAdmins' # Default +``` +AdminUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Admin' role + +### EditUserGroupCN +```toml +EditUserGroupCN = 'NodeEditors' # Default +``` +EditUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Edit' role + +### RunUserGroupCN +```toml +RunUserGroupCN = 'NodeRunners' # Default +``` +RunUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Run' role + +### ReadUserGroupCN +```toml +ReadUserGroupCN = 'NodeReadOnly' # Default +``` +ReadUserGroupCN is the LDAP 'cn' of the LDAP group that maps the core node's 'Read' role + +### UserApiTokenEnabled +```toml +UserApiTokenEnabled = false # Default +``` +UserApiTokenEnabled enables the users to issue API tokens with the same access of their role + +### UserAPITokenDuration +```toml +UserAPITokenDuration = '240h0m0s' # Default +``` +UserAPITokenDuration is the duration of time an API token is active for before expiring + +### UpstreamSyncInterval +```toml +UpstreamSyncInterval = '0s' # Default +``` +UpstreamSyncInterval is the interval at which the background LDAP sync task will be called. A '0s' value disables the background sync being run on an interval. This check is already performed during login/logout actions, all sessions and API tokens stored in the local ldap tables are updated to match the remote server + +### UpstreamSyncRateLimit +```toml +UpstreamSyncRateLimit = '2m0s' # Default +``` +UpstreamSyncRateLimit defines a duration to limit the number of query/API calls to the upstream LDAP provider. It prevents the sync functionality from being called multiple times within the defined duration + +## WebServer.RateLimit +```toml +[WebServer.RateLimit] +Authenticated = 1000 # Default +AuthenticatedPeriod = '1m' # Default +Unauthenticated = 5 # Default +UnauthenticatedPeriod = '20s' # Default +``` + + +### Authenticated +```toml +Authenticated = 1000 # Default +``` +Authenticated defines the threshold to which authenticated requests get limited. More than this many authenticated requests per `AuthenticatedRateLimitPeriod` will be rejected. + +### AuthenticatedPeriod +```toml +AuthenticatedPeriod = '1m' # Default +``` +AuthenticatedPeriod defines the period to which authenticated requests get limited. + +### Unauthenticated +```toml +Unauthenticated = 5 # Default +``` +Unauthenticated defines the threshold to which authenticated requests get limited. More than this many unauthenticated requests per `UnAuthenticatedRateLimitPeriod` will be rejected. + +### UnauthenticatedPeriod +```toml +UnauthenticatedPeriod = '20s' # Default +``` +UnauthenticatedPeriod defines the period to which unauthenticated requests get limited. + +## WebServer.MFA +```toml +[WebServer.MFA] +RPID = 'localhost' # Example +RPOrigin = 'http://localhost:6688/' # Example +``` +The Operator UI frontend supports enabling Multi Factor Authentication via Webauthn per account. When enabled, logging in will require the account password and a hardware or OS security key such as Yubikey. To enroll, log in to the operator UI and click the circle purple profile button at the top right and then click **Register MFA Token**. Tap your hardware security key or use the OS public key management feature to enroll a key. Next time you log in, this key will be required to authenticate. + +### RPID +```toml +RPID = 'localhost' # Example +``` +RPID is the FQDN of where the Operator UI is served. When serving locally, the value should be `localhost`. + +### RPOrigin +```toml +RPOrigin = 'http://localhost:6688/' # Example +``` +RPOrigin is the origin URL where WebAuthn requests initiate, including scheme and port. When serving locally, the value should be `http://localhost:6688/`. + +## WebServer.TLS +```toml +[WebServer.TLS] +CertPath = '~/.cl/certs' # Example +Host = 'tls-host' # Example +KeyPath = '/home/$USER/.plugin/tls/server.key' # Example +HTTPSPort = 6689 # Default +ForceRedirect = false # Default +ListenIP = '0.0.0.0' # Default +``` +The TLS settings apply only if you want to enable TLS security on your Plugin node. + +### CertPath +```toml +CertPath = '~/.cl/certs' # Example +``` +CertPath is the location of the TLS certificate file. + +### Host +```toml +Host = 'tls-host' # Example +``` +Host is the hostname configured for TLS to be used by the Plugin node. This is useful if you configured a domain name specific for your Plugin node. + +### KeyPath +```toml +KeyPath = '/home/$USER/.plugin/tls/server.key' # Example +``` +KeyPath is the location of the TLS private key file. + +### HTTPSPort +```toml +HTTPSPort = 6689 # Default +``` +HTTPSPort is the port used for HTTPS connections. Set this to `0` to disable HTTPS. Disabling HTTPS also relieves Plugin nodes of the requirement for a TLS certificate. + +### ForceRedirect +```toml +ForceRedirect = false # Default +``` +ForceRedirect forces TLS redirect for unencrypted connections. + +### ListenIP +```toml +ListenIP = '0.0.0.0' # Default +``` +ListenIP specifies the IP to bind the HTTPS server to + +## JobPipeline +```toml +[JobPipeline] +ExternalInitiatorsEnabled = false # Default +MaxRunDuration = '10m' # Default +MaxSuccessfulRuns = 10000 # Default +ReaperInterval = '1h' # Default +ReaperThreshold = '24h' # Default +ResultWriteQueueDepth = 100 # Default +``` + + +### ExternalInitiatorsEnabled +```toml +ExternalInitiatorsEnabled = false # Default +``` +ExternalInitiatorsEnabled enables the External Initiator feature. If disabled, `webhook` jobs can ONLY be initiated by a logged-in user. If enabled, `webhook` jobs can be initiated by a whitelisted external initiator. + +### MaxRunDuration +```toml +MaxRunDuration = '10m' # Default +``` +MaxRunDuration is the maximum time allowed for a single job run. If it takes longer, it will exit early and be marked errored. If set to zero, disables the time limit completely. + +### MaxSuccessfulRuns +```toml +MaxSuccessfulRuns = 10000 # Default +``` +MaxSuccessfulRuns caps the number of completed successful runs per pipeline +spec in the database. You can set it to zero as a performance optimisation; +this will avoid saving any successful run. + +Note this is not a hard cap, it can drift slightly larger than this but not +by more than 5% or so. + +### ReaperInterval +```toml +ReaperInterval = '1h' # Default +``` +ReaperInterval controls how often the job pipeline reaper will run to delete completed jobs older than ReaperThreshold, in order to keep database size manageable. + +Set to `0` to disable the periodic reaper. + +### ReaperThreshold +```toml +ReaperThreshold = '24h' # Default +``` +ReaperThreshold determines the age limit for job runs. Completed job runs older than this will be automatically purged from the database. + +### ResultWriteQueueDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +ResultWriteQueueDepth = 100 # Default +``` +ResultWriteQueueDepth controls how many writes will be buffered before subsequent writes are dropped, for jobs that write results asynchronously for performance reasons, such as OCR. + +## JobPipeline.HTTPRequest +```toml +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' # Default +MaxSize = '32768' # Default +``` + + +### DefaultTimeout +```toml +DefaultTimeout = '15s' # Default +``` +DefaultTimeout defines the default timeout for HTTP requests made by `http` and `bridge` adapters. + +### MaxSize +```toml +MaxSize = '32768' # Default +``` +MaxSize defines the maximum size for HTTP requests and responses made by `http` and `bridge` adapters. + +## FluxMonitor +```toml +[FluxMonitor] +DefaultTransactionQueueDepth = 1 # Default +SimulateTransactions = false # Default +``` + + +### DefaultTransactionQueueDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DefaultTransactionQueueDepth = 1 # Default +``` +DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Flux Monitor. Set to 0 to use `SendEvery` strategy instead. + +### SimulateTransactions +```toml +SimulateTransactions = false # Default +``` +SimulateTransactions enables transaction simulation for Flux Monitor. + +## OCR2 +```toml +[OCR2] +Enabled = false # Default +ContractConfirmations = 3 # Default +BlockchainTimeout = '20s' # Default +ContractPollInterval = '1m' # Default +ContractSubscribeInterval = '2m' # Default +ContractTransmitterTransmitTimeout = '10s' # Default +DatabaseTimeout = '10s' # Default +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' # Example +CaptureEATelemetry = false # Default +CaptureAutomationCustomTelemetry = true # Default +DefaultTransactionQueueDepth = 1 # Default +SimulateTransactions = false # Default +TraceLogging = false # Default +``` + + +### Enabled +```toml +Enabled = false # Default +``` +Enabled enables OCR2 jobs. + +### ContractConfirmations +```toml +ContractConfirmations = 3 # Default +``` +ContractConfirmations is the number of block confirmations to wait for before enacting an on-chain +configuration change. This value doesn't need to be very high (in +particular, it does not need to protect against malicious re-orgs). +Since configuration changes create some overhead, and mini-reorgs +are fairly common, recommended values are between two and ten. + +Malicious re-orgs are not any more of concern here than they are in +blockchain applications in general: Since nodes check the contract for the +latest config every ContractConfigTrackerPollInterval.Seconds(), they will +come to a common view of the current config within any interval longer than +that, as long as the latest setConfig transaction in the longest chain is +stable. They will thus be able to continue reporting after the poll +interval, unless an adversary is able to repeatedly re-org the transaction +out during every poll interval, which would amount to the capability to +censor any transaction. + +Note that 1 confirmation implies that the transaction/event has been mined in one block. +0 confirmations would imply that the event would be recognised before it has even been mined, which is not currently supported. +e.g. +Current block height: 42 +Changed in block height: 43 +Contract config confirmations: 1 +STILL PENDING + +Current block height: 43 +Changed in block height: 43 +Contract config confirmations: 1 +CONFIRMED + +### BlockchainTimeout +```toml +BlockchainTimeout = '20s' # Default +``` +BlockchainTimeout is the timeout for blockchain queries (mediated through +ContractConfigTracker and ContractTransmitter). +(This is necessary because an oracle's operations are serialized, so +blocking forever on a chain interaction would break the oracle.) + +### ContractPollInterval +```toml +ContractPollInterval = '1m' # Default +``` +ContractPollInterval is the polling interval at which ContractConfigTracker is queried for# updated on-chain configurations. Recommended values are between +fifteen seconds and two minutes. + +### ContractSubscribeInterval +```toml +ContractSubscribeInterval = '2m' # Default +``` +ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker +if one doesn't exist. Recommended values are between two and five minutes. + +### ContractTransmitterTransmitTimeout +```toml +ContractTransmitterTransmitTimeout = '10s' # Default +``` +ContractTransmitterTransmitTimeout is the timeout for ContractTransmitter.Transmit calls. + +### DatabaseTimeout +```toml +DatabaseTimeout = '10s' # Default +``` +DatabaseTimeout is the timeout for database interactions. +(This is necessary because an oracle's operations are serialized, so +blocking forever on an observation would break the oracle.) + +### KeyBundleID +```toml +KeyBundleID = '7a5f66bbe6594259325bf2b4f5b1a9c900000000000000000000000000000000' # Example +``` +KeyBundleID is a sha256 hexadecimal hash identifier. + +### CaptureEATelemetry +```toml +CaptureEATelemetry = false # Default +``` +CaptureEATelemetry toggles collecting extra information from External Adaptares + +### CaptureAutomationCustomTelemetry +```toml +CaptureAutomationCustomTelemetry = true # Default +``` +CaptureAutomationCustomTelemetry toggles collecting automation specific telemetry + +### DefaultTransactionQueueDepth +```toml +DefaultTransactionQueueDepth = 1 # Default +``` +DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR2. Set to 0 to use `SendEvery` strategy instead. + +### SimulateTransactions +```toml +SimulateTransactions = false # Default +``` +SimulateTransactions enables transaction simulation for OCR2. + +### TraceLogging +```toml +TraceLogging = false # Default +``` +TraceLogging enables trace level logging. + +## OCR +```toml +[OCR] +Enabled = false # Default +ObservationTimeout = '5s' # Default +BlockchainTimeout = '20s' # Default +ContractPollInterval = '1m' # Default +ContractSubscribeInterval = '2m' # Default +DefaultTransactionQueueDepth = 1 # Default +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' # Example +SimulateTransactions = false # Default +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' # Example +CaptureEATelemetry = false # Default +TraceLogging = false # Default +``` +This section applies only if you are running off-chain reporting jobs. + +### Enabled +```toml +Enabled = false # Default +``` +Enabled enables OCR jobs. + +### ObservationTimeout +```toml +ObservationTimeout = '5s' # Default +``` +ObservationTimeout is the timeout for making observations using the DataSource.Observe method. +(This is necessary because an oracle's operations are serialized, so +blocking forever on an observation would break the oracle.) + +### BlockchainTimeout +```toml +BlockchainTimeout = '20s' # Default +``` +BlockchainTimeout is the timeout for blockchain queries (mediated through +ContractConfigTracker and ContractTransmitter). +(This is necessary because an oracle's operations are serialized, so +blocking forever on a chain interaction would break the oracle.) + +### ContractPollInterval +```toml +ContractPollInterval = '1m' # Default +``` +ContractPollInterval is the polling interval at which ContractConfigTracker is queried for +updated on-chain configurations. Recommended values are between +fifteen seconds and two minutes. + +### ContractSubscribeInterval +```toml +ContractSubscribeInterval = '2m' # Default +``` +ContractSubscribeInterval is the interval at which we try to establish a subscription on ContractConfigTracker +if one doesn't exist. Recommended values are between two and five minutes. + +### DefaultTransactionQueueDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DefaultTransactionQueueDepth = 1 # Default +``` +DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in OCR. Set to 0 to use `SendEvery` strategy instead. + +### KeyBundleID +```toml +KeyBundleID = 'acdd42797a8b921b2910497badc5000600000000000000000000000000000000' # Example +``` +KeyBundleID is the default key bundle ID to use for OCR jobs. If you have an OCR job that does not explicitly specify a key bundle ID, it will fall back to this value. + +### SimulateTransactions +```toml +SimulateTransactions = false # Default +``` +SimulateTransactions enables transaction simulation for OCR. + +### TransmitterAddress +```toml +TransmitterAddress = '0xa0788FC17B1dEe36f057c42B6F373A34B014687e' # Example +``` +TransmitterAddress is the default sending address to use for OCR. If you have an OCR job that does not explicitly specify a transmitter address, it will fall back to this value. + +### CaptureEATelemetry +```toml +CaptureEATelemetry = false # Default +``` +CaptureEATelemetry toggles collecting extra information from External Adaptares + +### TraceLogging +```toml +TraceLogging = false # Default +``` +TraceLogging enables trace level logging. + +## P2P +```toml +[P2P] +IncomingMessageBufferSize = 10 # Default +OutgoingMessageBufferSize = 10 # Default +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example +TraceLogging = false # Default +``` +P2P has a versioned networking stack. Currenly only `[P2P.V2]` is supported. +All nodes in the OCR network should share the same networking stack. + +### IncomingMessageBufferSize +```toml +IncomingMessageBufferSize = 10 # Default +``` +IncomingMessageBufferSize is the per-remote number of incoming +messages to buffer. Any additional messages received on top of those +already in the queue will be dropped. + +### OutgoingMessageBufferSize +```toml +OutgoingMessageBufferSize = 10 # Default +``` +OutgoingMessageBufferSize is the per-remote number of outgoing +messages to buffer. Any additional messages send on top of those +already in the queue will displace the oldest. +NOTE: OutgoingMessageBufferSize should be comfortably smaller than remote's +IncomingMessageBufferSize to give the remote enough space to process +them all in case we regained connection and now send a bunch at once + +### PeerID +```toml +PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example +``` +PeerID is the default peer ID to use for OCR jobs. If unspecified, uses the first available peer ID. + +### TraceLogging +```toml +TraceLogging = false # Default +``` +TraceLogging enables trace level logging. + +## P2P.V2 +```toml +[P2P.V2] +Enabled = true # Default +AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example +DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example +DeltaDial = '15s' # Default +DeltaReconcile = '1m' # Default +ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example +``` + + +### Enabled +```toml +Enabled = true # Default +``` +Enabled enables P2P V2. +Note: V1.Enabled is true by default, so it must be set false in order to run V2 only. + +### AnnounceAddresses +```toml +AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example +``` +AnnounceAddresses is the addresses the peer will advertise on the network in `host:port` form as accepted by the TCP version of Go’s `net.Dial`. +The addresses should be reachable by other nodes on the network. When attempting to connect to another node, +a node will attempt to dial all of the other node’s AnnounceAddresses in round-robin fashion. + +### DefaultBootstrappers +```toml +DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example +``` +DefaultBootstrappers is the default bootstrapper peers for libocr's v2 networking stack. + +Oracle nodes typically only know each other’s PeerIDs, but not their hostnames, IP addresses, or ports. +DefaultBootstrappers are special nodes that help other nodes discover each other’s `AnnounceAddresses` so they can communicate. +Nodes continuously attempt to connect to bootstrappers configured in here. When a node wants to connect to another node +(which it knows only by PeerID, but not by address), it discovers the other node’s AnnounceAddresses from communications +received from its DefaultBootstrappers or other discovered nodes. To facilitate discovery, +nodes will regularly broadcast signed announcements containing their PeerID and AnnounceAddresses. + +### DeltaDial +```toml +DeltaDial = '15s' # Default +``` +DeltaDial controls how far apart Dial attempts are + +### DeltaReconcile +```toml +DeltaReconcile = '1m' # Default +``` +DeltaReconcile controls how often a Reconcile message is sent to every peer. + +### ListenAddresses +```toml +ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example +``` +ListenAddresses is the addresses the peer will listen to on the network in `host:port` form as accepted by `net.Listen()`, +but the host and port must be fully specified and cannot be empty. You can specify `0.0.0.0` (IPv4) or `::` (IPv6) to listen on all interfaces, but that is not recommended. + +## Keeper +```toml +[Keeper] +DefaultTransactionQueueDepth = 1 # Default +GasPriceBufferPercent = 20 # Default +GasTipCapBufferPercent = 20 # Default +BaseFeeBufferPercent = 20 # Default +MaxGracePeriod = 100 # Default +TurnLookBack = 1_000 # Default +``` + + +### DefaultTransactionQueueDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DefaultTransactionQueueDepth = 1 # Default +``` +DefaultTransactionQueueDepth controls the queue size for `DropOldestStrategy` in Keeper. Set to 0 to use `SendEvery` strategy instead. + +### GasPriceBufferPercent +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +GasPriceBufferPercent = 20 # Default +``` +GasPriceBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in legacy mode (EIP-1559 off). + +### GasTipCapBufferPercent +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +GasTipCapBufferPercent = 20 # Default +``` +GasTipCapBufferPercent specifies the percentage to add to the gas price used for checking whether to perform an upkeep. Only applies in EIP-1559 mode. + +### BaseFeeBufferPercent +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +BaseFeeBufferPercent = 20 # Default +``` +BaseFeeBufferPercent specifies the percentage to add to the base fee used for checking whether to perform an upkeep. Applies only in EIP-1559 mode. + +### MaxGracePeriod +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +MaxGracePeriod = 100 # Default +``` +MaxGracePeriod is the maximum number of blocks that a keeper will wait after performing an upkeep before it resumes checking that upkeep + +### TurnLookBack +```toml +TurnLookBack = 1_000 # Default +``` +TurnLookBack is the number of blocks in the past to look back when getting a block for a turn. + +## Keeper.Registry +```toml +[Keeper.Registry] +CheckGasOverhead = 200_000 # Default +PerformGasOverhead = 300_000 # Default +SyncInterval = '30m' # Default +MaxPerformDataSize = 5_000 # Default +SyncUpkeepQueueSize = 10 # Default +``` + + +### CheckGasOverhead +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +CheckGasOverhead = 200_000 # Default +``` +CheckGasOverhead is the amount of extra gas to provide checkUpkeep() calls to account for the gas consumed by the keeper registry. + +### PerformGasOverhead +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +PerformGasOverhead = 300_000 # Default +``` +PerformGasOverhead is the amount of extra gas to provide performUpkeep() calls to account for the gas consumed by the keeper registry + +### SyncInterval +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +SyncInterval = '30m' # Default +``` +SyncInterval is the interval in which the RegistrySynchronizer performs a full sync of the keeper registry contract it is tracking. + +### MaxPerformDataSize +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +MaxPerformDataSize = 5_000 # Default +``` +MaxPerformDataSize is the max size of perform data. + +### SyncUpkeepQueueSize +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +SyncUpkeepQueueSize = 10 # Default +``` +SyncUpkeepQueueSize represents the maximum number of upkeeps that can be synced in parallel. + +## AutoPprof +```toml +[AutoPprof] +Enabled = false # Default +ProfileRoot = 'prof/root' # Example +PollInterval = '10s' # Default +GatherDuration = '10s' # Default +GatherTraceDuration = '5s' # Default +MaxProfileSize = '100mb' # Default +CPUProfileRate = 1 # Default +MemProfileRate = 1 # Default +BlockProfileRate = 1 # Default +MutexProfileFraction = 1 # Default +MemThreshold = '4gb' # Default +GoroutineThreshold = 5000 # Default +``` +The Plugin node is equipped with an internal "nurse" service that can perform automatic `pprof` profiling when the certain resource thresholds are exceeded, such as memory and goroutine count. These profiles are saved to disk to facilitate fine-grained debugging of performance-related issues. In general, if you notice that your node has begun to accumulate profiles, forward them to the Plugin team. + +To learn more about these profiles, read the [Profiling Go programs with pprof](https://jvns.ca/blog/2017/09/24/profiling-go-with-pprof/) guide. + +### Enabled +```toml +Enabled = false # Default +``` +Enabled enables the automatic profiling service. + +### ProfileRoot +```toml +ProfileRoot = 'prof/root' # Example +``` +ProfileRoot sets the location on disk where pprof profiles will be stored. Defaults to `RootDir`. + +### PollInterval +```toml +PollInterval = '10s' # Default +``` +PollInterval is the interval at which the node's resources are checked. + +### GatherDuration +```toml +GatherDuration = '10s' # Default +``` +GatherDuration is the duration for which profiles are gathered when profiling starts. + +### GatherTraceDuration +```toml +GatherTraceDuration = '5s' # Default +``` +GatherTraceDuration is the duration for which traces are gathered when profiling is kicked off. This is separately configurable because traces are significantly larger than other types of profiles. + +### MaxProfileSize +```toml +MaxProfileSize = '100mb' # Default +``` +MaxProfileSize is the maximum amount of disk space that profiles may consume before profiling is disabled. + +### CPUProfileRate +```toml +CPUProfileRate = 1 # Default +``` +CPUProfileRate sets the rate for CPU profiling. See https://pkg.go.dev/runtime#SetCPUProfileRate. + +### MemProfileRate +```toml +MemProfileRate = 1 # Default +``` +MemProfileRate sets the rate for memory profiling. See https://pkg.go.dev/runtime#pkg-variables. + +### BlockProfileRate +```toml +BlockProfileRate = 1 # Default +``` +BlockProfileRate sets the fraction of blocking events for goroutine profiling. See https://pkg.go.dev/runtime#SetBlockProfileRate. + +### MutexProfileFraction +```toml +MutexProfileFraction = 1 # Default +``` +MutexProfileFraction sets the fraction of contention events for mutex profiling. See https://pkg.go.dev/runtime#SetMutexProfileFraction. + +### MemThreshold +```toml +MemThreshold = '4gb' # Default +``` +MemThreshold sets the maximum amount of memory the node can actively consume before profiling begins. + +### GoroutineThreshold +```toml +GoroutineThreshold = 5000 # Default +``` +GoroutineThreshold is the maximum number of actively-running goroutines the node can spawn before profiling begins. + +## Pyroscope +```toml +[Pyroscope] +ServerAddress = 'http://localhost:4040' # Example +Environment = 'mainnet' # Default +``` + + +### ServerAddress +```toml +ServerAddress = 'http://localhost:4040' # Example +``` +ServerAddress sets the address that will receive the profile logs. It enables the profiling service. + +### Environment +```toml +Environment = 'mainnet' # Default +``` +Environment sets the target environment tag in which profiles will be added to. + +## Sentry +```toml +[Sentry] +Debug = false # Default +DSN = 'sentry-dsn' # Example +Environment = 'my-custom-env' # Example +Release = 'v1.2.3' # Example +``` + + +### Debug +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +Debug = false # Default +``` +Debug enables printing of Sentry SDK debug messages. + +### DSN +```toml +DSN = 'sentry-dsn' # Example +``` +DSN is the data source name where events will be sent. Sentry is completely disabled if this is left blank. + +### Environment +```toml +Environment = 'my-custom-env' # Example +``` +Environment overrides the Sentry environment to the given value. Otherwise autodetects between dev/prod. + +### Release +```toml +Release = 'v1.2.3' # Example +``` +Release overrides the Sentry release to the given value. Otherwise uses the compiled-in version number. + +## Insecure +```toml +[Insecure] +DevWebServer = false # Default +OCRDevelopmentMode = false # Default +InfiniteDepthQueries = false # Default +DisableRateLimiting = false # Default +``` +Insecure config family is only allowed in development builds. + +### DevWebServer +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DevWebServer = false # Default +``` +DevWebServer skips secure configuration for webserver AllowedHosts, SSL, etc. + +### OCRDevelopmentMode +```toml +OCRDevelopmentMode = false # Default +``` +OCRDevelopmentMode run OCR in development mode. + +### InfiniteDepthQueries +```toml +InfiniteDepthQueries = false # Default +``` +InfiniteDepthQueries skips graphql query depth limit checks. + +### DisableRateLimiting +```toml +DisableRateLimiting = false # Default +``` +DisableRateLimiting skips ratelimiting on asset requests. + +## Tracing +```toml +[Tracing] +Enabled = false # Default +CollectorTarget = 'localhost:4317' # Example +NodeID = 'NodeID' # Example +SamplingRatio = 1.0 # Example +Mode = 'tls' # Default +TLSCertPath = '/path/to/cert.pem' # Example +``` + + +### Enabled +```toml +Enabled = false # Default +``` +Enabled turns trace collection on or off. On requires an OTEL Tracing Collector. + +### CollectorTarget +```toml +CollectorTarget = 'localhost:4317' # Example +``` +CollectorTarget is the logical address of the OTEL Tracing Collector. + +### NodeID +```toml +NodeID = 'NodeID' # Example +``` +NodeID is an unique name for this node relative to any other node traces are collected for. + +### SamplingRatio +```toml +SamplingRatio = 1.0 # Example +``` +SamplingRatio is the ratio of traces to sample for this node. + +### Mode +```toml +Mode = 'tls' # Default +``` +Mode is a string value. `tls` or `unencrypted` are the only values allowed. If set to `unencrypted`, `TLSCertPath` can be unset, meaning traces will be sent over plaintext to the collector. + +### TLSCertPath +```toml +TLSCertPath = '/path/to/cert.pem' # Example +``` +TLSCertPath is the file path to the TLS certificate used for secure communication with an OTEL Tracing Collector. + +## Tracing.Attributes +```toml +[Tracing.Attributes] +env = 'test' # Example +``` +Tracing.Attributes are user specified key-value pairs to associate in the context of the traces + +### env +```toml +env = 'test' # Example +``` +env is an example user specified key-value pair + +## Mercury +```toml +[Mercury] +``` + + +## Mercury.Cache +```toml +[Mercury.Cache] +LatestReportTTL = "1s" # Default +MaxStaleAge = "1h" # Default +LatestReportDeadline = "5s" # Default +``` +Mercury.Cache controls settings for the price retrieval cache querying a mercury server + +### LatestReportTTL +```toml +LatestReportTTL = "1s" # Default +``` +LatestReportTTL controls how "stale" we will allow a price to be e.g. if +set to 1s, a new price will always be fetched if the last result was +from 1 second ago or older. + +Another way of looking at it is such: the cache will _never_ return a +price that was queried from now-LatestReportTTL or before. + +Setting to zero disables caching entirely. + +### MaxStaleAge +```toml +MaxStaleAge = "1h" # Default +``` +MaxStaleAge is that maximum amount of time that a value can be stale +before it is deleted from the cache (a form of garbage collection). + +This should generally be set to something much larger than +LatestReportTTL. Setting to zero disables garbage collection. + +### LatestReportDeadline +```toml +LatestReportDeadline = "5s" # Default +``` +LatestReportDeadline controls how long to wait for a response from the +mercury server before retrying. Setting this to zero will wait indefinitely. + +## Mercury.TLS +```toml +[Mercury.TLS] +CertFile = "/path/to/client/certs.pem" # Example +``` +Mercury.TLS controls client settings for when the node talks to traditional web servers or load balancers. + +### CertFile +```toml +CertFile = "/path/to/client/certs.pem" # Example +``` +CertFile is the path to a PEM file of trusted root certificate authority certificates + +## EVM +EVM defaults depend on ChainID: + +
Ethereum Mainnet (1)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Ethereum Ropsten (3)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x20fE562d797A42Dcb3399062AE9546cd06f63280' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Ethereum Rinkeby (4)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x01BE23585060835E02B77ef475b0Cc51aA1e0709' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Ethereum Goerli (5)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x326C977E6efc84E512bB9C30f76E30c160eD06FB' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Optimism Mainnet (10)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'optimismBedrock' +FinalityDepth = 200 +FinalityTagEnabled = false +LinkContractAddress = '0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6' +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 300 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 6500000 +``` + +

+ +
RSK Mainnet (30)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x14AdaE34beF7ca957Ce2dDe5ADD97ea050123827' +LogBackfillBatchSize = 1000 +LogPollInterval = '30s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '50 mwei' +PriceMax = '50 gwei' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 mwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
RSK Testnet (31)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x8bBbd80981FE76d44854D8DF305e8985c19f0e78' +LogBackfillBatchSize = 1000 +LogPollInterval = '30s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '50 mwei' +PriceMax = '50 gwei' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 mwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Ethereum Kovan (42)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xa36085F69e2889c224210F603D836748e7dC0088' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x8007e24251b1D2Fc518Eb843A701d9cD21fe0aA3' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
BSC Mainnet (56)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '2s' +DatabaseTimeout = '2s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '500ms' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
OKX Testnet (65)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
OKX Mainnet (66)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
BSC Testnet (97)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06' +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '2s' +DatabaseTimeout = '2s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '500ms' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
xDai Mainnet (100)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'xdai' +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xE2e73A1c69ecF83F464EFCE6A5be353a37cA09b2' +LogBackfillBatchSize = 1000 +LogPollInterval = '5s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '1 gwei' +PriceMax = '500 gwei' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Heco Mainnet (128)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x404460C6A5EdE2D891e8297795264fDe62ADBB75' +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '2s' +DatabaseTimeout = '2s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '500ms' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Polygon Mainnet (137)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 500 +FinalityTagEnabled = false +LinkContractAddress = '0xb0897686c545045aFc77CF20eC7A532E3120E0F1' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 5 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 10 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 5000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '30 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '30 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '20 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 2000 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Fantom Mainnet (250)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x6F43FF82CCA38001B6699a8AC47A2d0E66939407' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 3800000 +``` + +

+ +
Kroma Mainnet (255)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'kroma' +FinalityDepth = 400 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 400 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
zkSync Goerli (280)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'zksync' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '5s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '18.446744073709551615 ether' +PriceMin = '0' +LimitDefault = 3500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 5 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
zkSync Mainnet (324)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'zksync' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '5s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '18.446744073709551615 ether' +PriceMin = '0' +LimitDefault = 3500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 5 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Optimism Goerli (420)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'optimismBedrock' +FinalityDepth = 200 +FinalityTagEnabled = false +LinkContractAddress = '0xdc2CC710e42857672E7907CF474a69B63B93089f' +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 60 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 300 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 6500000 +``` + +

+ +
Metis Rinkeby (588)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'metis' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Klaytn Testnet (1001)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '750 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Metis Mainnet (1088)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'metis' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Polygon Zkevm Mainnet (1101)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '30s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '6m0s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 15 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '3m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '100 mwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 mwei' +BumpPercent = 40 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 12 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
WeMix Mainnet (1111)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'wemix' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '100 gwei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
WeMix Testnet (1112)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'wemix' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '100 gwei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Simulated (1337)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '100' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '0s' +ResendAfterThreshold = '0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'FixedPrice' +PriceDefault = '20 gwei' +PriceMax = '100 micro' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 micro' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 10 +MaxBufferSize = 100 +SamplingInterval = '0s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Polygon Zkevm Goerli (1442)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '30s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '12m0s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '3m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '50 mwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '20 mwei' +BumpPercent = 40 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 12 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Kroma Sepolia (2358)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'kroma' +FinalityDepth = 400 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 400 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Fantom Testnet (4002)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xfaFedb041c0DD4fA2Dc0d87a6B0979Ee6FA7af5F' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 3800000 +``` + +

+ +
Klaytn Mainnet (8217)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '750 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Base Mainnet (8453)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'optimismBedrock' +FinalityDepth = 200 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 300 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 6500000 +``` + +

+ +
Arbitrum Mainnet (42161)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'arbitrum' +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xf97f4df75117a78c1A5a0DBb814Af92458539FB4' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'Arbitrum' +PriceDefault = '100 mwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 1000000000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '1 micro' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 14500000 +``` + +

+ +
Celo Mainnet (42220)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'celo' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '5s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '500 gwei' +PriceMin = '5 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '2 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 12 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Avalanche Fuji (43113)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LinkContractAddress = '0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846' +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '25 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '25 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Avalanche Mainnet (43114)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 1 +FinalityTagEnabled = false +LinkContractAddress = '0x5947BB275c521040051D82396192181b413227A3' +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 2 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '25 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '25 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Celo Testnet (44787)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'celo' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '5s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '1m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '500 gwei' +PriceMin = '5 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '2 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Linea Goerli (59140)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 15 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '3m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 40 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Linea Mainnet (59144)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 300 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '3m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '400 mwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 40 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 350 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Polygon Mumbai (80001)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 500 +FinalityTagEnabled = false +LinkContractAddress = '0x326C977E6efc84E512bB9C30f76E30c160eD06FB' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 5 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 100 +RPCBlockQueryDelay = 10 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 5000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '1 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '20 gwei' +BumpPercent = 20 +BumpThreshold = 5 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 24 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 2000 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Base Goerli (84531)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'optimismBedrock' +FinalityDepth = 200 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 60 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 300 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 6500000 +``` + +

+ +
Base Sepolia (84532)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'optimismBedrock' +FinalityDepth = 200 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '40s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '30s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 wei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '100 wei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 60 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 300 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 6500000 +``` + +

+ +
Arbitrum Rinkeby (421611)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'arbitrum' +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x615fBe6372676474d9e6933d310469c9b68e9726' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'Arbitrum' +PriceDefault = '100 mwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 1000000000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '1 micro' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Arbitrum Goerli (421613)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'arbitrum' +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0xd14838A68E8AFBAdE5efb411d5871ea0011AFd28' +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'Arbitrum' +PriceDefault = '100 mwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 1000000000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '1 micro' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 14500000 +``` + +

+ +
Arbitrum Sepolia (421614)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'arbitrum' +FinalityDepth = 50 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '1s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'Arbitrum' +PriceDefault = '100 mwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 1000000000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '1 micro' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 10 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 14500000 +``` + +

+ +
Scroll Sepolia (534351)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'scroll' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Scroll Mainnet (534352)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +ChainType = 'scroll' +FinalityDepth = 1 +FinalityTagEnabled = false +LogBackfillBatchSize = 1000 +LogPollInterval = '3s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'SuggestedPrice' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '0' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 0 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 0 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 50 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 1 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Ethereum Sepolia (11155111)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x779877A7B0D9E8603169DdbD7836e478b4624789' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Harmony Mainnet (1666600000)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x218532a12a389a4a92fC0C5Fb22901D1c19198aA' +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ +
Harmony Testnet (1666700000)

+ +```toml +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x8b12Ac23BFe11cAb03a634C1F117D64a7f2cFD3e' +LogBackfillBatchSize = 1000 +LogPollInterval = '2s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 1 +MinContractPayment = '0.00001 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '30s' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[BalanceMonitor] +Enabled = true + +[GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '5 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = false +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 8 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 60 + +[HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[OCR2] +[OCR2.Automation] +GasLimit = 5400000 +``` + +

+ + +### ChainID +```toml +ChainID = '1' # Example +``` +ChainID is the EVM chain ID. Mandatory. + +### Enabled +```toml +Enabled = true # Default +``` +Enabled enables this chain. + +### AutoCreateKey +```toml +AutoCreateKey = true # Default +``` +AutoCreateKey, if set to true, will ensure that there is always at least one transmit key for the given chain. + +### BlockBackfillDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +BlockBackfillDepth = 10 # Default +``` +BlockBackfillDepth specifies the number of blocks before the current HEAD that the log broadcaster will try to re-consume logs from. + +### BlockBackfillSkip +```toml +BlockBackfillSkip = false # Default +``` +BlockBackfillSkip enables skipping of very long backfills. + +### ChainType +```toml +ChainType = 'arbitrum' # Example +``` +ChainType is automatically detected from chain ID. Set this to force a certain chain type regardless of chain ID. +Available types: arbitrum, metis, optimismBedrock, xdai, celo, kroma, wemix, zksync, scroll + +### FinalityDepth +```toml +FinalityDepth = 50 # Default +``` +FinalityDepth is the number of blocks after which an ethereum transaction is considered "final". Note that the default is automatically set based on chain ID so it should not be necessary to change this under normal operation. +BlocksConsideredFinal determines how deeply we look back to ensure that transactions are confirmed onto the longest chain +There is not a large performance penalty to setting this relatively high (on the order of hundreds) +It is practically limited by the number of heads we store in the database and should be less than this with a comfortable margin. +If a transaction is mined in a block more than this many blocks ago, and is reorged out, we will NOT retransmit this transaction and undefined behaviour can occur including gaps in the nonce sequence that require manual intervention to fix. +Therefore this number represents a number of blocks we consider large enough that no re-org this deep will ever feasibly happen. + +Special cases: +`FinalityDepth`=0 would imply that transactions can be final even before they were mined into a block. This is not supported. +`FinalityDepth`=1 implies that transactions are final after we see them in one block. + +Examples: + +Transaction sending: +A transaction is sent at block height 42 + +`FinalityDepth` is set to 5 +A re-org occurs at height 44 starting at block 41, transaction is marked for rebroadcast +A re-org occurs at height 46 starting at block 41, transaction is marked for rebroadcast +A re-org occurs at height 47 starting at block 41, transaction is NOT marked for rebroadcast + +### FinalityTagEnabled +```toml +FinalityTagEnabled = false # Default +``` +FinalityTagEnabled means that the chain supports the finalized block tag when querying for a block. If FinalityTagEnabled is set to true for a chain, then FinalityDepth field is ignored. +Finality for a block is solely defined by the finality related tags provided by the chain's RPC API. This is a placeholder and hasn't been implemented yet. + +### FlagsContractAddress +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +FlagsContractAddress = '0xae4E781a6218A8031764928E88d457937A954fC3' # Example +``` +FlagsContractAddress can optionally point to a [Flags contract](../contracts/src/v0.8/Flags.sol). If set, the node will lookup that contract for each job that supports flags contracts (currently OCR and FM jobs are supported). If the job's contractAddress is set as hibernating in the FlagsContractAddress address, it overrides the standard update parameters (such as heartbeat/threshold). + +### LinkContractAddress +```toml +LinkContractAddress = '0x538aAaB4ea120b2bC2fe5D296852D948F07D849e' # Example +``` +LinkContractAddress is the canonical ERC-677 PLI token contract address on the given chain. Note that this is usually autodetected from chain ID. + +### LogBackfillBatchSize +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +LogBackfillBatchSize = 1000 # Default +``` +LogBackfillBatchSize sets the batch size for calling FilterLogs when we backfill missing logs. + +### LogPollInterval +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +LogPollInterval = '15s' # Default +``` +LogPollInterval works in conjunction with Feature.LogPoller. Controls how frequently the log poller polls for logs. Defaults to the block production rate. + +### LogKeepBlocksDepth +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +LogKeepBlocksDepth = 100000 # Default +``` +LogKeepBlocksDepth works in conjunction with Feature.LogPoller. Controls how many blocks the poller will keep, must be greater than FinalityDepth+1. + +### MinContractPayment +```toml +MinContractPayment = '10000000000000 juels' # Default +``` +MinContractPayment is the minimum payment in PLI required to execute a direct request job. This can be overridden on a per-job basis. + +### MinIncomingConfirmations +```toml +MinIncomingConfirmations = 3 # Default +``` +MinIncomingConfirmations is the minimum required confirmations before a log event will be consumed. + +### NonceAutoSync +```toml +NonceAutoSync = true # Default +``` +NonceAutoSync enables automatic nonce syncing on startup. Plugin nodes will automatically try to sync its local nonce with the remote chain on startup and fast forward if necessary. This is almost always safe but can be disabled in exceptional cases by setting this value to false. + +### NoNewHeadsThreshold +```toml +NoNewHeadsThreshold = '3m' # Default +``` +NoNewHeadsThreshold controls how long to wait after receiving no new heads before `NodePool` marks rpc endpoints as +out-of-sync, and `HeadTracker` logs warnings. + +Set to zero to disable out-of-sync checking. + +### OperatorFactoryAddress +```toml +OperatorFactoryAddress = '0xa5B85635Be42F21f94F28034B7DA440EeFF0F418' # Example +``` +OperatorFactoryAddress is the address of the canonical operator forwarder contract on the given chain. Note that this is usually autodetected from chain ID. + +### RPCDefaultBatchSize +```toml +RPCDefaultBatchSize = 250 # Default +``` +RPCDefaultBatchSize is the default batch size for batched RPC calls. + +### RPCBlockQueryDelay +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +RPCBlockQueryDelay = 1 # Default +``` +RPCBlockQueryDelay controls the number of blocks to trail behind head in the block history estimator and balance monitor. +For example, if this is set to 3, and we receive block 10, block history estimator will fetch block 7. + +CAUTION: You might be tempted to set this to 0 to use the latest possible +block, but it is possible to receive a head BEFORE that block is actually +available from the connected node via RPC, due to race conditions in the code of the remote ETH node. In this case you will get false +"zero" blocks that are missing transactions. + +## EVM.Transactions +```toml +[EVM.Transactions] +ForwardersEnabled = false # Default +MaxInFlight = 16 # Default +MaxQueued = 250 # Default +ReaperInterval = '1h' # Default +ReaperThreshold = '168h' # Default +ResendAfterThreshold = '1m' # Default +``` + + +### ForwardersEnabled +```toml +ForwardersEnabled = false # Default +``` +ForwardersEnabled enables or disables sending transactions through forwarder contracts. + +### MaxInFlight +```toml +MaxInFlight = 16 # Default +``` +MaxInFlight controls how many transactions are allowed to be "in-flight" i.e. broadcast but unconfirmed at any one time. You can consider this a form of transaction throttling. + +The default is set conservatively at 16 because this is a pessimistic minimum that both geth and parity will hold without evicting local transactions. If your node is falling behind and you need higher throughput, you can increase this setting, but you MUST make sure that your ETH node is configured properly otherwise you can get nonce gapped and your node will get stuck. + +0 value disables the limit. Use with caution. + +### MaxQueued +```toml +MaxQueued = 250 # Default +``` +MaxQueued is the maximum number of unbroadcast transactions per key that are allowed to be enqueued before jobs will start failing and rejecting send of any further transactions. This represents a sanity limit and generally indicates a problem with your ETH node (transactions are not getting mined). + +Do NOT blindly increase this value thinking it will fix things if you start hitting this limit because transactions are not getting mined, you will instead only make things worse. + +In deployments with very high burst rates, or on chains with large re-orgs, you _may_ consider increasing this. + +0 value disables any limit on queue size. Use with caution. + +### ReaperInterval +```toml +ReaperInterval = '1h' # Default +``` +ReaperInterval controls how often the EthTx reaper will run. + +### ReaperThreshold +```toml +ReaperThreshold = '168h' # Default +``` +ReaperThreshold indicates how old an EthTx ought to be before it can be reaped. + +### ResendAfterThreshold +```toml +ResendAfterThreshold = '1m' # Default +``` +ResendAfterThreshold controls how long to wait before re-broadcasting a transaction that has not yet been confirmed. + +## EVM.BalanceMonitor +```toml +[EVM.BalanceMonitor] +Enabled = true # Default +``` + + +### Enabled +```toml +Enabled = true # Default +``` +Enabled balance monitoring for all keys. + +## EVM.GasEstimator +```toml +[EVM.GasEstimator] +Mode = 'BlockHistory' # Default +PriceDefault = '20 gwei' # Default +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' # Default +PriceMin = '1 gwei' # Default +LimitDefault = 500_000 # Default +LimitMax = 500_000 # Default +LimitMultiplier = '1.0' # Default +LimitTransfer = 21_000 # Default +BumpMin = '5 gwei' # Default +BumpPercent = 20 # Default +BumpThreshold = 3 # Default +BumpTxDepth = 16 # Example +EIP1559DynamicFees = false # Default +FeeCapDefault = '100 gwei' # Default +TipCapDefault = '1 wei' # Default +TipCapMin = '1 wei' # Default +``` + + +### Mode +```toml +Mode = 'BlockHistory' # Default +``` +Mode controls what type of gas estimator is used. + +- `FixedPrice` uses static configured values for gas price (can be set via API call). +- `BlockHistory` dynamically adjusts default gas price based on heuristics from mined blocks. +- `L2Suggested` mode is deprecated and replaced with `SuggestedPrice`. +- `SuggestedPrice` is a mode which uses the gas price suggested by the rpc endpoint via `eth_gasPrice`. +- `Arbitrum` is a special mode only for use with Arbitrum blockchains. It uses the suggested gas price (up to `ETH_MAX_GAS_PRICE_WEI`, with `1000 gwei` default) as well as an estimated gas limit (up to `ETH_GAS_LIMIT_MAX`, with `1,000,000,000` default). + +Plugin nodes decide what gas price to use using an `Estimator`. It ships with several simple and battle-hardened built-in estimators that should work well for almost all use-cases. Note that estimators will change their behaviour slightly depending on if you are in EIP-1559 mode or not. + +You can also use your own estimator for gas price by selecting the `FixedPrice` estimator and using the exposed API to set the price. + +An important point to note is that the Plugin node does _not_ ship with built-in support for go-ethereum's `estimateGas` call. This is for several reasons, including security and reliability. We have found empirically that it is not generally safe to rely on the remote ETH node's idea of what gas price should be. + +### PriceDefault +```toml +PriceDefault = '20 gwei' # Default +``` +PriceDefault is the default gas price to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. + +(Only applies to legacy transactions) + +Can be used with the `plugin setgasprice` to be updated while the node is still running. + +### PriceMax +```toml +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' # Default +``` +PriceMax is the maximum gas price. Plugin nodes will never pay more than this for a transaction. +This applies to both legacy and EIP1559 transactions. +Note that it is impossible to disable the maximum limit. Setting this value to zero will prevent paying anything for any transaction (which can be useful in some rare cases). +Most chains by default have the maximum set to 2**256-1 Wei which is the maximum allowed gas price on EVM-compatible chains, and is so large it may as well be unlimited. + +### PriceMin +```toml +PriceMin = '1 gwei' # Default +``` +PriceMin is the minimum gas price. Plugin nodes will never pay less than this for a transaction. + +(Only applies to legacy transactions) + +It is possible to force the Plugin node to use a fixed gas price by setting a combination of these, e.g. + +```toml +EIP1559DynamicFees = false +PriceMax = 100 +PriceMin = 100 +PriceDefault = 100 +BumpThreshold = 0 +Mode = 'FixedPrice' +``` + +### LimitDefault +```toml +LimitDefault = 500_000 # Default +``` +LimitDefault sets default gas limit for outgoing transactions. This should not need to be changed in most cases. +Some job types, such as Keeper jobs, might set their own gas limit unrelated to this value. + +### LimitMax +```toml +LimitMax = 500_000 # Default +``` +LimitMax sets a maximum for _estimated_ gas limits. This currently only applies to `Arbitrum` `GasEstimatorMode`. + +### LimitMultiplier +```toml +LimitMultiplier = '1.0' # Default +``` +LimitMultiplier is the factor by which a transaction's GasLimit is multiplied before transmission. So if the value is 1.1, and the GasLimit for a transaction is 10, 10% will be added before transmission. + +This factor is always applied, so includes L2 transactions which uses a default gas limit of 1 and is also applied to `LimitDefault`. + +### LimitTransfer +```toml +LimitTransfer = 21_000 # Default +``` +LimitTransfer is the gas limit used for an ordinary ETH transfer. + +### BumpMin +```toml +BumpMin = '5 gwei' # Default +``` +BumpMin is the minimum fixed amount of wei by which gas is bumped on each transaction attempt. + +### BumpPercent +```toml +BumpPercent = 20 # Default +``` +BumpPercent is the percentage by which to bump gas on a transaction that has exceeded `BumpThreshold`. The larger of `BumpPercent` and `BumpMin` is taken for gas bumps. + +The `SuggestedPriceEstimator` adds the larger of `BumpPercent` and `BumpMin` on top of the price provided by the RPC when bumping a transaction's gas. + +### BumpThreshold +```toml +BumpThreshold = 3 # Default +``` +BumpThreshold is the number of blocks to wait for a transaction stuck in the mempool before automatically bumping the gas price. Set to 0 to disable gas bumping completely. + +### BumpTxDepth +```toml +BumpTxDepth = 16 # Example +``` +BumpTxDepth is the number of transactions to gas bump starting from oldest. Set to 0 for no limit (i.e. bump all). Can not be greater than EVM.Transactions.MaxInFlight. If not set, defaults to EVM.Transactions.MaxInFlight. + +### EIP1559DynamicFees +```toml +EIP1559DynamicFees = false # Default +``` +EIP1559DynamicFees torces EIP-1559 transaction mode. Enabling EIP-1559 mode can help reduce gas costs on chains that support it. This is supported only on official Ethereum mainnet and testnets. It is not recommended to enable this setting on Polygon because the EIP-1559 fee market appears to be broken on all Polygon chains and EIP-1559 transactions are less likely to be included than legacy transactions. + +#### Technical details + +Plugin nodes include experimental support for submitting transactions using type 0x2 (EIP-1559) envelope. + +EIP-1559 mode is enabled by default on the Ethereum Mainnet, but can be enabled on a per-chain basis or globally. + +This might help to save gas on spikes. Plugin nodes should react faster on the upleg and avoid overpaying on the downleg. It might also be possible to set `EVM.GasEstimator.BlockHistory.BatchSize` to a smaller value such as 12 or even 6 because tip cap should be a more consistent indicator of inclusion time than total gas price. This would make Plugin nodes more responsive and should reduce response time variance. Some experimentation is required to find optimum settings. + +Set with caution, if you set this on a chain that does not actually support EIP-1559 your node will be broken. + +In EIP-1559 mode, the total price for the transaction is the minimum of base fee + tip cap and fee cap. More information can be found on the [official EIP](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). + +Plugin's implementation of EIP-1559 works as follows: + +If you are using FixedPriceEstimator: +- With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=GasTipCapDefault` +- With gas bumping enabled, it will submit all transactions initially with `feecap=GasFeeCapDefault` and `tipcap=GasTipCapDefault`. + +If you are using BlockHistoryEstimator (default for most chains): +- With gas bumping disabled, it will submit all transactions with `feecap=PriceMax` and `tipcap=` +- With gas bumping enabled (default for most chains) it will submit all transactions initially with `feecap = ( current block base fee * (1.125 ^ N) + tipcap )` where N is configurable by setting `EVM.GasEstimator.BlockHistory.EIP1559FeeCapBufferBlocks` but defaults to `gas bump threshold+1` and `tipcap=` + +Bumping works as follows: + +- Increase tipcap by `max(tipcap * (1 + BumpPercent), tipcap + BumpMin)` +- Increase feecap by `max(feecap * (1 + BumpPercent), feecap + BumpMin)` + +A quick note on terminology - Plugin nodes use the same terms used internally by go-ethereum source code to describe various prices. This is not the same as the externally used terms. For reference: + +- Base Fee Per Gas = BaseFeePerGas +- Max Fee Per Gas = FeeCap +- Max Priority Fee Per Gas = TipCap + +In EIP-1559 mode, the following changes occur to how configuration works: + +- All new transactions will be sent as type 0x2 transactions specifying a TipCap and FeeCap. Be aware that existing pending legacy transactions will continue to be gas bumped in legacy mode. +- `BlockHistoryEstimator` will apply its calculations (gas percentile etc) to the TipCap and this value will be used for new transactions (GasPrice will be ignored) +- `FixedPriceEstimator` will use `GasTipCapDefault` instead of `GasPriceDefault` for the tip cap +- `FixedPriceEstimator` will use `GasFeeCapDefault` instaed of `GasPriceDefault` for the fee cap +- `PriceMin` is ignored for new transactions and `GasTipCapMinimum` is used instead (default 0) +- `PriceMax` still represents that absolute upper limit that Plugin will ever spend (total) on a single tx +- `Keeper.GasTipCapBufferPercent` is ignored in EIP-1559 mode and `Keeper.GasTipCapBufferPercent` is used instead + +### FeeCapDefault +```toml +FeeCapDefault = '100 gwei' # Default +``` +FeeCapDefault controls the fixed initial fee cap, if EIP1559 mode is enabled and `FixedPrice` gas estimator is used. + +### TipCapDefault +```toml +TipCapDefault = '1 wei' # Default +``` +TipCapDefault is the default gas tip to use when submitting transactions to the blockchain. Will be overridden by the built-in `BlockHistoryEstimator` if enabled, and might be increased if gas bumping is enabled. + +(Only applies to EIP-1559 transactions) + +### TipCapMin +```toml +TipCapMin = '1 wei' # Default +``` +TipCapMinimum is the minimum gas tip to use when submitting transactions to the blockchain. + +Only applies to EIP-1559 transactions) + +## EVM.GasEstimator.LimitJobType +```toml +[EVM.GasEstimator.LimitJobType] +OCR = 100_000 # Example +OCR2 = 100_000 # Example +DR = 100_000 # Example +VRF = 100_000 # Example +FM = 100_000 # Example +Keeper = 100_000 # Example +``` + + +### OCR +```toml +OCR = 100_000 # Example +``` +OCR overrides LimitDefault for OCR jobs. + +### OCR2 +```toml +OCR2 = 100_000 # Example +``` +OCR2 overrides LimitDefault for OCR2 jobs. + +### DR +```toml +DR = 100_000 # Example +``` +DR overrides LimitDefault for Direct Request jobs. + +### VRF +```toml +VRF = 100_000 # Example +``` +VRF overrides LimitDefault for VRF jobs. + +### FM +```toml +FM = 100_000 # Example +``` +FM overrides LimitDefault for Flux Monitor jobs. + +### Keeper +```toml +Keeper = 100_000 # Example +``` +Keeper overrides LimitDefault for Keeper jobs. + +## EVM.GasEstimator.BlockHistory +```toml +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 # Default +BlockHistorySize = 8 # Default +CheckInclusionBlocks = 12 # Default +CheckInclusionPercentile = 90 # Default +EIP1559FeeCapBufferBlocks = 13 # Example +TransactionPercentile = 60 # Default +``` +These settings allow you to configure how your node calculates gas prices when using the block history estimator. +In most cases, leaving these values at their defaults should give good results. + +### BatchSize +```toml +BatchSize = 25 # Default +``` +BatchSize sets the maximum number of blocks to fetch in one batch in the block history estimator. +If the `BatchSize` variable is set to 0, it defaults to `EVM.RPCDefaultBatchSize`. + +### BlockHistorySize +```toml +BlockHistorySize = 8 # Default +``` +BlockHistorySize controls the number of past blocks to keep in memory to use as a basis for calculating a percentile gas price. + +### CheckInclusionBlocks +```toml +CheckInclusionBlocks = 12 # Default +``` +CheckInclusionBlocks is the number of recent blocks to use to detect if there is a transaction propagation/connectivity issue, and to prevent bumping in these cases. +This can help avoid the situation where RPC nodes are not propagating transactions for some non-price-related reason (e.g. go-ethereum bug, networking issue etc) and bumping gas would not help. + +Set to zero to disable connectivity checking completely. + +### CheckInclusionPercentile +```toml +CheckInclusionPercentile = 90 # Default +``` +CheckInclusionPercentile controls the percentile that a transaction must have been higher than for all the blocks in the inclusion check window in order to register as a connectivity issue. + +For example, if CheckInclusionBlocks=12 and CheckInclusionPercentile=90 then further bumping will be prevented for any transaction with any attempt that has a higher price than the 90th percentile for the most recent 12 blocks. + +### EIP1559FeeCapBufferBlocks +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +EIP1559FeeCapBufferBlocks = 13 # Example +``` +EIP1559FeeCapBufferBlocks controls the buffer blocks to add to the current base fee when sending a transaction. By default, the gas bumping threshold + 1 block is used. + +Only applies to EIP-1559 transactions) + +### TransactionPercentile +```toml +TransactionPercentile = 60 # Default +``` +TransactionPercentile specifies gas price to choose. E.g. if the block history contains four transactions with gas prices `[100, 200, 300, 400]` then picking 25 for this number will give a value of 200. If the calculated gas price is higher than `GasPriceDefault` then the higher price will be used as the base price for new transactions. + +Must be in range 0-100. + +Only has an effect if gas updater is enabled. + +Think of this number as an indicator of how aggressive you want your node to price its transactions. + +Setting this number higher will cause the Plugin node to select higher gas prices. + +Setting it lower will tend to set lower gas prices. + +## EVM.HeadTracker +```toml +[EVM.HeadTracker] +HistoryDepth = 100 # Default +MaxBufferSize = 3 # Default +SamplingInterval = '1s' # Default +``` +The head tracker continually listens for new heads from the chain. + +In addition to these settings, it log warnings if `EVM.NoNewHeadsThreshold` is exceeded without any new blocks being emitted. + +### HistoryDepth +```toml +HistoryDepth = 100 # Default +``` +HistoryDepth tracks the top N block numbers to keep in the `heads` database table. +Note that this can easily result in MORE than N records since in the case of re-orgs we keep multiple heads for a particular block height. +This number should be at least as large as `FinalityDepth`. +There may be a small performance penalty to setting this to something very large (10,000+) + +### MaxBufferSize +```toml +MaxBufferSize = 3 # Default +``` +MaxBufferSize is the maximum number of heads that may be +buffered in front of the head tracker before older heads start to be +dropped. You may think of it as something like the maximum permittable "lag" +for the head tracker before we start dropping heads to keep up. + +### SamplingInterval +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +SamplingInterval = '1s' # Default +``` +SamplingInterval means that head tracker callbacks will at maximum be made once in every window of this duration. This is a performance optimisation for fast chains. Set to 0 to disable sampling entirely. + +## EVM.KeySpecific +```toml +[[EVM.KeySpecific]] +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +GasEstimator.PriceMax = '79 gwei' # Example +``` + + +### Key +```toml +Key = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +``` +Key is the account to apply these settings to + +### PriceMax +```toml +GasEstimator.PriceMax = '79 gwei' # Example +``` +GasEstimator.PriceMax overrides the maximum gas price for this key. See EVM.GasEstimator.PriceMax. + +## EVM.NodePool +```toml +[EVM.NodePool] +PollFailureThreshold = 5 # Default +PollInterval = '10s' # Default +SelectionMode = 'HighestHead' # Default +SyncThreshold = 5 # Default +LeaseDuration = '0s' # Default +``` +The node pool manages multiple RPC endpoints. + +In addition to these settings, `EVM.NoNewHeadsThreshold` controls how long to wait after receiving no new heads before marking the node as out-of-sync. + +### PollFailureThreshold +```toml +PollFailureThreshold = 5 # Default +``` +PollFailureThreshold indicates how many consecutive polls must fail in order to mark a node as unreachable. + +Set to zero to disable poll checking. + +### PollInterval +```toml +PollInterval = '10s' # Default +``` +PollInterval controls how often to poll the node to check for liveness. + +Set to zero to disable poll checking. + +### SelectionMode +```toml +SelectionMode = 'HighestHead' # Default +``` +SelectionMode controls node selection strategy: +- HighestHead: use the node with the highest head number +- RoundRobin: rotate through nodes, per-request +- PriorityLevel: use the node with the smallest order number +- TotalDifficulty: use the node with the greatest total difficulty + +### SyncThreshold +```toml +SyncThreshold = 5 # Default +``` +SyncThreshold controls how far a node may lag behind the best node before being marked out-of-sync. +Depending on `SelectionMode`, this represents a difference in the number of blocks (`HighestHead`, `RoundRobin`, `PriorityLevel`), or total difficulty (`TotalDifficulty`). + +Set to 0 to disable this check. + +### LeaseDuration +```toml +LeaseDuration = '0s' # Default +``` +LeaseDuration is the minimum duration that the selected "best" node (as defined by SelectionMode) will be used, +before switching to a better one if available. It also controls how often the lease check is done. +Setting this to a low value (under 1m) might cause RPC to switch too aggressively. +Recommended value is over 5m + +Set to '0s' to disable + +## EVM.OCR +```toml +[EVM.OCR] +ContractConfirmations = 4 # Default +ContractTransmitterTransmitTimeout = '10s' # Default +DatabaseTimeout = '10s' # Default +DeltaCOverride = "168h" # Default +DeltaCJitterOverride = "1h" # Default +ObservationGracePeriod = '1s' # Default +``` + + +### ContractConfirmations +```toml +ContractConfirmations = 4 # Default +``` +ContractConfirmations sets `OCR.ContractConfirmations` for this EVM chain. + +### ContractTransmitterTransmitTimeout +```toml +ContractTransmitterTransmitTimeout = '10s' # Default +``` +ContractTransmitterTransmitTimeout sets `OCR.ContractTransmitterTransmitTimeout` for this EVM chain. + +### DatabaseTimeout +```toml +DatabaseTimeout = '10s' # Default +``` +DatabaseTimeout sets `OCR.DatabaseTimeout` for this EVM chain. + +### DeltaCOverride +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DeltaCOverride = "168h" # Default +``` +DeltaCOverride (and `DeltaCJitterOverride`) determine the config override DeltaC. +DeltaC is the maximum age of the latest report in the contract. If the maximum age is exceeded, a new report will be +created by the report generation protocol. + +### DeltaCJitterOverride +:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._ +```toml +DeltaCJitterOverride = "1h" # Default +``` +DeltaCJitterOverride is the range for jitter to add to `DeltaCOverride`. + +### ObservationGracePeriod +```toml +ObservationGracePeriod = '1s' # Default +``` +ObservationGracePeriod sets `OCR.ObservationGracePeriod` for this EVM chain. + +## EVM.Nodes +```toml +[[EVM.Nodes]] +Name = 'foo' # Example +WSURL = 'wss://web.socket/test' # Example +HTTPURL = 'https://foo.web' # Example +SendOnly = false # Default +Order = 100 # Default +``` + + +### Name +```toml +Name = 'foo' # Example +``` +Name is a unique (per-chain) identifier for this node. + +### WSURL +```toml +WSURL = 'wss://web.socket/test' # Example +``` +WSURL is the WS(S) endpoint for this node. Required for primary nodes. + +### HTTPURL +```toml +HTTPURL = 'https://foo.web' # Example +``` +HTTPURL is the HTTP(S) endpoint for this node. Required for all nodes. + +### SendOnly +```toml +SendOnly = false # Default +``` +SendOnly limits usage to sending transaction broadcasts only. With this enabled, only HTTPURL is required, and WSURL is not used. + +### Order +```toml +Order = 100 # Default +``` +Order of the node in the pool, will takes effect if `SelectionMode` is `PriorityLevel` or will be used as a tie-breaker for `HighestHead` and `TotalDifficulty` + +## EVM.OCR2.Automation +```toml +[EVM.OCR2.Automation] +GasLimit = 5400000 # Default +``` + + +### GasLimit +```toml +GasLimit = 5400000 # Default +``` +GasLimit controls the gas limit for transmit transactions from ocr2automation job. + +## EVM.ChainWriter +```toml +[EVM.ChainWriter] +FromAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +ForwarderAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +``` + + +### FromAddress +```toml +FromAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +``` +FromAddress is Address of the transmitter key to use for workflow writes. + +### ForwarderAddress +```toml +ForwarderAddress = '0x2a3e23c6f242F5345320814aC8a1b4E58707D292' # Example +``` +ForwarderAddress is the keystone forwarder contract address on chain. + +## Cosmos +```toml +[[Cosmos]] +ChainID = 'Malaga-420' # Example +Enabled = true # Default +Bech32Prefix = 'wasm' # Default +BlockRate = '6s' # Default +BlocksUntilTxTimeout = 30 # Default +ConfirmPollPeriod = '1s' # Default +FallbackGasPrice = '0.015' # Default +GasToken = 'ucosm' # Default +GasLimitMultiplier = '1.5' # Default +MaxMsgsPerBatch = 100 # Default +OCR2CachePollPeriod = '4s' # Default +OCR2CacheTTL = '1m' # Default +TxMsgTimeout = '10m' # Default +``` + + +### ChainID +```toml +ChainID = 'Malaga-420' # Example +``` +ChainID is the Cosmos chain ID. Mandatory. + +### Enabled +```toml +Enabled = true # Default +``` +Enabled enables this chain. + +### Bech32Prefix +```toml +Bech32Prefix = 'wasm' # Default +``` +Bech32Prefix is the human-readable prefix for addresses on this Cosmos chain. See https://docs.cosmos.network/v0.47/spec/addresses/bech32. + +### BlockRate +```toml +BlockRate = '6s' # Default +``` +BlockRate is the average time between blocks. + +### BlocksUntilTxTimeout +```toml +BlocksUntilTxTimeout = 30 # Default +``` +BlocksUntilTxTimeout is the number of blocks to wait before giving up on the tx getting confirmed. + +### ConfirmPollPeriod +```toml +ConfirmPollPeriod = '1s' # Default +``` +ConfirmPollPeriod sets how often check for tx confirmation. + +### FallbackGasPrice +```toml +FallbackGasPrice = '0.015' # Default +``` +FallbackGasPrice sets a fallback gas price to use when the estimator is not available. + +### GasToken +```toml +GasToken = 'ucosm' # Default +``` +GasToken is the token denomination which is being used to pay gas fees on this chain. + +### GasLimitMultiplier +```toml +GasLimitMultiplier = '1.5' # Default +``` +GasLimitMultiplier scales the estimated gas limit. + +### MaxMsgsPerBatch +```toml +MaxMsgsPerBatch = 100 # Default +``` +MaxMsgsPerBatch limits the numbers of mesages per transaction batch. + +### OCR2CachePollPeriod +```toml +OCR2CachePollPeriod = '4s' # Default +``` +OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. + +### OCR2CacheTTL +```toml +OCR2CacheTTL = '1m' # Default +``` +OCR2CacheTTL is the stale OCR2 cache deadline. + +### TxMsgTimeout +```toml +TxMsgTimeout = '10m' # Default +``` +TxMsgTimeout is the maximum age for resending transaction before they expire. + +## Cosmos.Nodes +```toml +[[Cosmos.Nodes]] +Name = 'primary' # Example +TendermintURL = 'http://tender.mint' # Example +``` + + +### Name +```toml +Name = 'primary' # Example +``` +Name is a unique (per-chain) identifier for this node. + +### TendermintURL +```toml +TendermintURL = 'http://tender.mint' # Example +``` +TendermintURL is the HTTP(S) tendermint endpoint for this node. + +## Solana +```toml +[[Solana]] +ChainID = 'mainnet' # Example +Enabled = false # Default +BalancePollPeriod = '5s' # Default +ConfirmPollPeriod = '500ms' # Default +OCR2CachePollPeriod = '1s' # Default +OCR2CacheTTL = '1m' # Default +TxTimeout = '1m' # Default +TxRetryTimeout = '10s' # Default +TxConfirmTimeout = '30s' # Default +SkipPreflight = true # Default +Commitment = 'confirmed' # Default +MaxRetries = 0 # Default +FeeEstimatorMode = 'fixed' # Default +ComputeUnitPriceMax = 1000 # Default +ComputeUnitPriceMin = 0 # Default +ComputeUnitPriceDefault = 0 # Default +FeeBumpPeriod = '3s' # Default +``` + + +### ChainID +```toml +ChainID = 'mainnet' # Example +``` +ChainID is the Solana chain ID. Must be one of: mainnet, testnet, devnet, localnet. Mandatory. + +### Enabled +```toml +Enabled = false # Default +``` +Enabled enables this chain. + +### BalancePollPeriod +```toml +BalancePollPeriod = '5s' # Default +``` +BalancePollPeriod is the rate to poll for SOL balance and update Prometheus metrics. + +### ConfirmPollPeriod +```toml +ConfirmPollPeriod = '500ms' # Default +``` +ConfirmPollPeriod is the rate to poll for signature confirmation. + +### OCR2CachePollPeriod +```toml +OCR2CachePollPeriod = '1s' # Default +``` +OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. + +### OCR2CacheTTL +```toml +OCR2CacheTTL = '1m' # Default +``` +OCR2CacheTTL is the stale OCR2 cache deadline. + +### TxTimeout +```toml +TxTimeout = '1m' # Default +``` +TxTimeout is the timeout for sending txes to an RPC endpoint. + +### TxRetryTimeout +```toml +TxRetryTimeout = '10s' # Default +``` +TxRetryTimeout is the duration for tx manager to attempt rebroadcasting to RPC, before giving up. + +### TxConfirmTimeout +```toml +TxConfirmTimeout = '30s' # Default +``` +TxConfirmTimeout is the duration to wait when confirming a tx signature, before discarding as unconfirmed. + +### SkipPreflight +```toml +SkipPreflight = true # Default +``` +SkipPreflight enables or disables preflight checks when sending txs. + +### Commitment +```toml +Commitment = 'confirmed' # Default +``` +Commitment is the confirmation level for solana state and transactions. ([documentation](https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment)) + +### MaxRetries +```toml +MaxRetries = 0 # Default +``` +MaxRetries is the maximum number of times the RPC node will automatically rebroadcast a tx. +The default is 0 for custom txm rebroadcasting method, set to -1 to use the RPC node's default retry strategy. + +### FeeEstimatorMode +```toml +FeeEstimatorMode = 'fixed' # Default +``` +FeeEstimatorMode is the method used to determine the base fee + +### ComputeUnitPriceMax +```toml +ComputeUnitPriceMax = 1000 # Default +``` +ComputeUnitPriceMax is the maximum price per compute unit that a transaction can be bumped to + +### ComputeUnitPriceMin +```toml +ComputeUnitPriceMin = 0 # Default +``` +ComputeUnitPriceMin is the minimum price per compute unit that transaction can have + +### ComputeUnitPriceDefault +```toml +ComputeUnitPriceDefault = 0 # Default +``` +ComputeUnitPriceDefault is the default price per compute unit price, and the starting base fee when FeeEstimatorMode = 'fixed' + +### FeeBumpPeriod +```toml +FeeBumpPeriod = '3s' # Default +``` +FeeBumpPeriod is the amount of time before a tx is retried with a fee bump + +## Solana.Nodes +```toml +[[Solana.Nodes]] +Name = 'primary' # Example +URL = 'http://solana.web' # Example +``` + + +### Name +```toml +Name = 'primary' # Example +``` +Name is a unique (per-chain) identifier for this node. + +### URL +```toml +URL = 'http://solana.web' # Example +``` +URL is the HTTP(S) endpoint for this node. + +## Starknet +```toml +[[Starknet]] +ChainID = 'foobar' # Example +Enabled = true # Default +OCR2CachePollPeriod = '5s' # Default +OCR2CacheTTL = '1m' # Default +RequestTimeout = '10s' # Default +TxTimeout = '10s' # Default +ConfirmationPoll = '5s' # Default +``` + + +### ChainID +```toml +ChainID = 'foobar' # Example +``` +ChainID is the Starknet chain ID. + +### Enabled +```toml +Enabled = true # Default +``` +Enabled enables this chain. + +### OCR2CachePollPeriod +```toml +OCR2CachePollPeriod = '5s' # Default +``` +OCR2CachePollPeriod is the rate to poll for the OCR2 state cache. + +### OCR2CacheTTL +```toml +OCR2CacheTTL = '1m' # Default +``` +OCR2CacheTTL is the stale OCR2 cache deadline. + +### RequestTimeout +```toml +RequestTimeout = '10s' # Default +``` +RequestTimeout is the RPC client timeout. + +### TxTimeout +```toml +TxTimeout = '10s' # Default +``` +TxTimeout is the timeout for sending txes to an RPC endpoint. + +### ConfirmationPoll +```toml +ConfirmationPoll = '5s' # Default +``` +ConfirmationPoll is how often to confirmer checks for tx inclusion on chain. + +## Starknet.Nodes +```toml +[[Starknet.Nodes]] +Name = 'primary' # Example +URL = 'http://stark.node' # Example +``` + + +### Name +```toml +Name = 'primary' # Example +``` +Name is a unique (per-chain) identifier for this node. + +### URL +```toml +URL = 'http://stark.node' # Example +``` +URL is the base HTTP(S) endpoint for this node. + diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 00000000..97d9d5ba --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Plugin + +First, thank you for considering contributing to Plugin. +We value contributions of any size or type from anyone! +The smallest of fixes can make the biggest difference. +Please dive in. Feel free to ask questions on [the Plugin Official Discord](https://discord.gg/aSK4zew), +open an issue, or send a pull request on GitHub. + +We follow an [agile development process](http://agilemanifesto.org/). +If you run into a bug or have a problem, the best action is to open an issue on GitHub (please search for related closed issues first). + +If you're interested in helping out with the development cycle, feel free to tackle open issues. We've even set aside a few that are [good introductory issues](https://github.com/goplugin/pluginv3.0/issues?q=is%3Aissue+label%3A%22good+first+issue%22). +If you see something you'd like to help with, +reach out to us [on Discord](https://discord.gg/aSK4zew) to coordinate. + +## Testing + +Testing is core to our development philosophy. +In building an application that will power the infrastructure of the future, +we believe that well tested code is of the utmost importance. +We do everything we can to ensure that [the test suite](https://circleci.com/gh/goplugin/pluginv3.0) +is stable and maintains a high level of coverage +(even if that is difficult with Go). +Please write tests for your code and make sure that the existing suite continues to pass. +If you run into trouble with this, you can always ask for tips in [our Gitter](https://gitter.im/goplugin-plugin/Lobby). + +## Code Style + +If making a change to the code, please try to follow our [style guide](https://github.com/goplugin/pluginv3.0/wiki/Code-Style-Guide). + +## More to come... diff --git a/docs/Mercury.md b/docs/Mercury.md new file mode 100644 index 00000000..5a0722b2 --- /dev/null +++ b/docs/Mercury.md @@ -0,0 +1,251 @@ +# Mercury Documentation + +## Useful Links + +[Configuration Builder](https://github.com/goplugin/the-most-amazing-mercury-contract-configuration-tool) + +[Contracts](https://github.com/goplugin/pluginv3.0/contracts/src/v0.8/llo-feeds) + +[OCR3 Config Documentation](https://github.com/goplugin/libocr/blob/master/offchainreporting2plus/internal/config/ocr3config/public_config.go) + + + + +### Example Feed Configuration + +```json +{ + "feedId": "0x14e044f932bb959cc2aa8dc1ba110c09224e639aae00264c1ffc2a0830904a3c", + "chainId": 42161, // source chain id + "contractAddress": "0x14e044f932bb959cc2aa8dc1ba110c09224e639a", // verifier contract address + "configCount": 1, // the index of this config + "signers": [ + "0x000....01", + "0x000....02", + "0x000....03", + "0x000....04" + ], // NOP signing addresses, + "transmitters": [ + "0x000....11", + "0x000....12", + "0x000....13", + "0x000....14" + ], // NOP transmitter addresses + "offchainConfig": { + "baseUSDFee": "0.1", // 10c base fee to verify the report + "deltaCertifiedCommitRequest": "1s", + "deltaGrace": "0s", + "deltaInitial": "600ms", + "deltaProgress": "2s", + "deltaResend": "10s", + "deltaRound": "250ms", + "deltaStage": "0s", + "expirationWindow": "86400", //window in in which a report can be verified in seconds + "f": 3, + "maxDurationObservation": "250ms", + "maxDurationQuery": "50ms", + "maxDurationShouldAcceptAttestedReport": "50ms", + "maxDurationShouldTransmitAcceptedReport": "50ms", + "rMax": "25", + "s": [ + 4 + ] + }, + "offchainConfigVersion": 30, + "onchainConfig": { + "max": "99999999999999999999999999999", + "min": "1" + } +} +``` + +## Jobs + +### Bootstrap + +**🚨 Important config** + +`relayConfig.chainID` - target chain id. (the chain we pull block numbers from) + +`contractID` - the contract address of the verifier contract. + +
Example bootstrap TOML + +```toml +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "$feed_name" +contractID = "$verifier_contract_address" +feedID = "$feed_id" # IMPORTANT - DON'T FORGET THIS OR IT WON'T WORK +contractConfigTrackerPollInterval = "15s" + +[relayConfig] +chainID = $evm_chain_id +fromBlock = $from_block +``` +
+ +### OCR2 + +
Example OCR2 Mercury TOML + +```toml +type = "offchainreporting2" +schemaVersion = 1 +name = "$feed_name" +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "$verifier_contract_address" +feedID = "$feed_id" +contractConfigTrackerPollInterval = "15s" +ocrKeyBundleID = "$key_bundle_id" +p2pv2Bootstrappers = [ + "$bootstrapper_address>" +] +relay = "evm" +pluginType = "mercury" +transmitterID = "$csa_public_key" + +observationSource = """ + // ncfx + ds1_payload [type=bridge name="ncfx" timeout="50ms" requestData="{\\"data\\":{\\"endpoint\\":\\"crypto-lwba\\",\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ds1_median [type=jsonparse path="data,mid"]; + ds1_bid [type=jsonparse path="data,bid"]; + ds1_ask [type=jsonparse path="data,ask"]; + + ds1_median_multiply [type=multiply times=100000000]; + ds1_bid_multiply [type=multiply times=100000000]; + ds1_ask_multiply [type=multiply times=100000000]; + + // tiingo + ds2_payload [type=bridge name="tiingo" timeout="50ms" requestData="{\\"data\\":{\\"endpoint\\":\\"crypto-lwba\\",\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ds2_median [type=jsonparse path="data,mid"]; + ds2_bid [type=jsonparse path="data,bid"]; + ds2_ask [type=jsonparse path="data,ask"]; + + ds2_median_multiply [type=multiply times=100000000]; + ds2_bid_multiply [type=multiply times=100000000]; + ds2_ask_multiply [type=multiply times=100000000]; + + // coinmetrics + ds3_payload [type=bridge name="coinmetrics" timeout="50ms" requestData="{\\"data\\":{\\"endpoint\\":\\"crypto-lwba\\",\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"]; + ds3_median [type=jsonparse path="data,mid"]; + ds3_bid [type=jsonparse path="data,bid"]; + ds3_ask [type=jsonparse path="data,ask"]; + + ds3_median_multiply [type=multiply times=100000000]; + ds3_bid_multiply [type=multiply times=100000000]; + ds3_ask_multiply [type=multiply times=100000000]; + + ds1_payload -> ds1_median -> ds1_median_multiply -> benchmark_price; + ds2_payload -> ds2_median -> ds2_median_multiply -> benchmark_price; + ds3_payload -> ds3_median -> ds3_median_multiply -> benchmark_price; + + benchmark_price [type=median allowedFaults=2 index=0]; + + ds1_payload -> ds1_bid -> ds1_bid_multiply -> bid_price; + ds2_payload -> ds2_bid -> ds2_bid_multiply -> bid_price; + ds3_payload -> ds3_bid -> ds3_bid_multiply -> bid_price; + + bid_price [type=median allowedFaults=2 index=1]; + + ds1_payload -> ds1_ask -> ds1_ask_multiply -> ask_price; + ds2_payload -> ds2_ask -> ds2_ask_multiply -> ask_price; + ds3_payload -> ds3_ask -> ds3_ask_multiply -> ask_price; + + ask_price [type=median allowedFaults=2 index=2]; +""" + +[pluginConfig] +serverURL = "$mercury_server_url" +serverPubKey = "$mercury_server_public_key" + +[relayConfig] +chainID = $evm_chain_id +fromBlock = $from_block +``` +
+ +## Nodes + +**🚨 Important config** + +`OCR2.Enabled` - must be `true` - Mercury uses OCR2. + +`P2P.V2.Enabled` - required in order for OCR2 to work. + +`Feature.LogPoller` - required in order for OCR2 to work. You will get fatal errors if not set. + +`JobPipeline.MaxSuccessfulRuns` - set to `0` to disable saving pipeline runs to reduce load on the db. Obviously this means you won’t see anything in the UI. + +`TelemetryIngress.SendInterval` - How frequently to send telemetry batches. Mercury generates a lot of telemetry data due to the throughput. `100ms` has been tested for a single feed with 5 nodes - this will need to be monitored (along with relevant config) as we add more feeds to a node. + +`Database` - **must** increase connection limits above the standard defaults + +
Example node config TOML + +```toml +RootDir = '$ROOT_DIR' + +[JobPipeline] +MaxSuccessfulRuns = 0 # you may set to some small value like '10' or similar if you like looking at job runs in the UI + +[Feature] +UICSAKeys = true # required +LogPoller = true # required + +[Log] +Level = 'info' # this should be 'debug' for plugin internal deployments, nops may use 'info' to reduce log volume + +[Log.File] +< standard values > + +[WebServer] +< standard values > + +[WebServer.TLS] +< standard values > + +[[EVM]] +ChainID = '42161' # change as needed based on target chain + +[OCR] +Enabled = false # turn off OCR 1 + +[P2P] +TraceLogging = false # this should be 'true' for plugin internal deployments, we may ask nops to set this to true for debugging +PeerID = '$PEERID' + +[P2P.V2] +Enabled = true # required +DefaultBootstrappers = < mercury bootstrap nodes > # Note that this should ideally be set in the job spec, this is just a fallback +# Make sure these IPs are properly configured in the firewall. May not be necessary for internal nodes +AnnounceAddresses = ['$EXTERNAL_IP:$EXTERNAL_PORT'] # Use whichever port you like, pls randomize, MAKE SURE ITS CONFIGURED IN THE FIREWALL +ListenAddresses = ['0.0.0.0:$INTERNAL_PORT'] # Use whichever port you like, pls randomize, MAKE SURE ITS CONFIGURED IN THE FIREWALL + +[OCR2] +Enabled = true # required +KeyBundleID = '$KEY_BUNDLE_ID' # Note that this should ideally be set in the job spec, this is just a fallback +CaptureEATelemetry = true + +[TelemetryIngress] +UniConn = false +SendInterval = '250ms' +BufferSize = 300 +MaxBatchSize = 100 + +[[TelemetryIngress.Endpoints]] +Network = 'EVM' +ChainID = '42161' # change as needed based on target chain +URL = '$TELEMETRY_ENDPOINT_URL' # Provided by Plugin Labs RSTP team +ServerPubKey = '$TELEMETRY_PUB_KEY' # Provided by Plugin Labs RSTP team + +[Database] +MaxIdleConns = 100 # should equal or greater than total number of mercury jobs +MaxOpenConns = 400 # caution! ensure postgres is configured to support this + +[[EVM.Nodes]] +< put RPC nodes here > +``` +
diff --git a/docs/SECRETS.md b/docs/SECRETS.md new file mode 100644 index 00000000..69153ef3 --- /dev/null +++ b/docs/SECRETS.md @@ -0,0 +1,181 @@ +[//]: # (Documentation generated from docs/secrets.toml - DO NOT EDIT.) + +This document describes the TOML format for secrets. + +Each secret has an alternative corresponding environment variable. + +See also [CONFIG.md](CONFIG.md) + +## Example + +```toml +[Database] +URL = 'postgresql://user:pass@localhost:5432/dbname?sslmode=disable' # Required + +[Password] +Keystore = 'keystore_pass' # Required +``` + +## Database +```toml +[Database] +URL = "postgresql://user:pass@localhost:5432/dbname?sslmode=disable" # Example +BackupURL = "postgresql://user:pass@read-replica.example.com:5432/dbname?sslmode=disable" # Example +AllowSimplePasswords = false # Default +``` + + +### URL +```toml +URL = "postgresql://user:pass@localhost:5432/dbname?sslmode=disable" # Example +``` +URL is the PostgreSQL URI to connect to your database. Plugin nodes require Postgres versions >= 11. See +[Running a Plugin Node](https://docs.chain.link/docs/running-a-plugin-node/#set-the-remote-database_url-config) for an example. + +Environment variable: `CL_DATABASE_URL` + +### BackupURL +```toml +BackupURL = "postgresql://user:pass@read-replica.example.com:5432/dbname?sslmode=disable" # Example +``` +BackupURL is where the automatic database backup will pull from, rather than the main CL_DATABASE_URL. It is recommended +to set this value to a read replica if you have one to avoid excessive load on the main database. + +Environment variable: `CL_DATABASE_BACKUP_URL` + +### AllowSimplePasswords +```toml +AllowSimplePasswords = false # Default +``` +AllowSimplePasswords skips the password complexity check normally enforced on URL & BackupURL. + +Environment variable: `CL_DATABASE_ALLOW_SIMPLE_PASSWORDS` + +## WebServer.LDAP +```toml +[WebServer.LDAP] +ServerAddress = 'ldaps://127.0.0.1' # Example +ReadOnlyUserLogin = 'viewer@example.com' # Example +ReadOnlyUserPass = 'password' # Example +``` +Optional LDAP config + +### ServerAddress +```toml +ServerAddress = 'ldaps://127.0.0.1' # Example +``` +ServerAddress is the full ldaps:// address of the ldap server to authenticate with and query + +### ReadOnlyUserLogin +```toml +ReadOnlyUserLogin = 'viewer@example.com' # Example +``` +ReadOnlyUserLogin is the username of the read only root user used to authenticate the requested LDAP queries + +### ReadOnlyUserPass +```toml +ReadOnlyUserPass = 'password' # Example +``` +ReadOnlyUserPass is the password for the above account + +## Password +```toml +[Password] +Keystore = "keystore_pass" # Example +VRF = "VRF_pass" # Example +``` + + +### Keystore +```toml +Keystore = "keystore_pass" # Example +``` +Keystore is the password for the node's account. + +Environment variable: `CL_PASSWORD_KEYSTORE` + +### VRF +```toml +VRF = "VRF_pass" # Example +``` +VRF is the password for the vrf keys. + +Environment variable: `CL_PASSWORD_VRF` + +## Pyroscope +```toml +[Pyroscope] +AuthToken = "pyroscope-token" # Example +``` + + +### AuthToken +```toml +AuthToken = "pyroscope-token" # Example +``` +AuthToken is the API key for the Pyroscope server. + +Environment variable: `CL_PYROSCOPE_AUTH_TOKEN` + +## Prometheus +```toml +[Prometheus] +AuthToken = "prometheus-token" # Example +``` + + +### AuthToken +```toml +AuthToken = "prometheus-token" # Example +``` +AuthToken is the authorization key for the Prometheus metrics endpoint. + +Environment variable: `CL_PROMETHEUS_AUTH_TOKEN` + +## Mercury.Credentials.Name +```toml +[Mercury.Credentials.Name] +Username = "A-Mercury-Username" # Example +Password = "A-Mercury-Password" # Example +URL = "https://example.com" # Example +LegacyURL = "https://example.v1.com" # Example +``` + + +### Username +```toml +Username = "A-Mercury-Username" # Example +``` +Username is used for basic auth of the Mercury endpoint + +### Password +```toml +Password = "A-Mercury-Password" # Example +``` +Password is used for basic auth of the Mercury endpoint + +### URL +```toml +URL = "https://example.com" # Example +``` +URL is the Mercury endpoint base URL used to access Mercury price feed + +### LegacyURL +```toml +LegacyURL = "https://example.v1.com" # Example +``` +LegacyURL is the Mercury legacy endpoint base URL used to access Mercury v0.2 price feed + +## Threshold +```toml +[Threshold] +ThresholdKeyShare = "A-Threshold-Decryption-Key-Share" # Example +``` + + +### ThresholdKeyShare +```toml +ThresholdKeyShare = "A-Threshold-Decryption-Key-Share" # Example +``` +ThresholdKeyShare used by the threshold decryption OCR plugin + diff --git a/docs/core/TX_MANAGER_ARCHITECTURE.md b/docs/core/TX_MANAGER_ARCHITECTURE.md new file mode 100644 index 00000000..6e0ff946 --- /dev/null +++ b/docs/core/TX_MANAGER_ARCHITECTURE.md @@ -0,0 +1,155 @@ +# BulletproofTxManager Architecture Overview + +# Diagrams + +## Finite state machine + +### `evm.txes.state` + +`unstarted` +| +| +v +`in_progress` (only one per key) +| \ +| \ +v v +`fatal_error` `unconfirmed` +| ^ +| | +v | +`confirmed` + +### `eth_tx_attempts.state` + +`in_progress` +| ^ +| | +v | +`broadcast` + +# Data structures + +Key: + +⚫️ - has never been broadcast to the network + +🟠 - may or may not have been broadcast to the network + +🔵 - has definitely been broadcast to the network + +EB - EthBroadcaster + +EC - EthConfirmer + +`evm.txes` has five possible states: + +- EB ⚫️ `unstarted` +- EB 🟠 `in_progress` +- EB/EC ⚫️ `fatal_error` +- EB/EC 🔵 `unconfirmed` +- EB/EC 🔵 `confirmed` + +`eth_tx_attempts` has two possible states: + +- EB/EC 🟠 `in_progress` +- EB/EC 🔵 `broadcast` + +An attempt may have 0 or more `eth_receipts` indicating that the transaction has been mined into a block. This block may or may not exist as part of the canonical longest chain. + +# Components + +BulletproofTxManager is split into three components, each of which has a clearly delineated set of responsibilities. + +## EthTx + +Conceptually, **EthTx** defines the transaction. + +**EthTx** is responsible for generating the transaction criteria and inserting the initial `unstarted` row into the `evm.txes` table. + +**EthTx** guarantees that the transaction is defined with the following criteria: + +- From address +- To address +- Encoded payload +- Value (eth) +- Gas limit + +Only one transaction may be created per **EthTx** task. + +EthTx should wait until it's transaction confirms before marking the task as completed. + +## EthBroadcaster + +Conceptually, **EthBroadcaster** assigns a nonce to a transaction and ensures that it is valid. It alone maintains the next usable sequence for a transaction. + +**EthBroadcaster** monitors `evm.txes` for transactions that need to be broadcast, assigns nonces and ensures that at least one eth node somewhere has placed the transaction into its mempool. + +It does not guarantee eventual confirmation! + +A whole host of other things can subsequently go wrong such as transactions being evicted from the mempool, eth nodes crashing, netsplits between eth nodes, chain re-orgs etc. Responsibility for ensuring eventual inclusion into the longest chain falls on the shoulders of **EthConfirmer**. + +**EthBroadcaster** makes the following guarantees: + +- A gapless, monotonically increasing sequence of nonces for `evm.txes` (scoped to key). +- Transition of `evm.txes` from `unstarted` to either `fatal_error` or `unconfirmed`. +- If final state is `fatal_error` then the nonce is unassigned, and it is impossible that this transaction could ever be mined into a block. +- If final state is `unconfirmed` then a saved `eth_transaction_attempt` exists. +- If final state is `unconfirmed` then an eth node somewhere has accepted this transaction into its mempool at least once. + +**EthConfirmer** must serialize access on a per-key basis since nonce assignment needs to be tightly controlled. Multiple keys can however be processed in parallel. Serialization is enforced with an advisory lock scoped to the key. + +## EthConfirmer + +Conceptually, **EthConfirmer** adjusts the gas price as necessary to get a transaction mined into a block on the longest chain. + +**EthConfirmer** listens to new heads and performs four separate tasks in sequence every time we become aware of a longer chain. + +### 1. Mark "broadcast before" + +When we receive a block we can be sure that any currently `unconfirmed` transactions were broadcast before this block was received, so we set `broadcast_before_block_num` on all transaction attempts made since we saw the last block. + +It is important to know how long a transaction has been waiting for inclusion, so we can know for how many blocks a transaction has been waiting for inclusion in order to decide if we need to bump gas. + +### 2. Check for receipts + +Find all `unconfirmed` transactions and ask the eth node for a receipt. If there is a receipt, we save it and move this transaction into `confirmed` state. + +### 3. Bump gas if necessary + +Find all `unconfirmed` transactions where all attempts have remained unconfirmed for more than `ETH_GAS_BUMP_THRESHOLD` blocks. Create a new `eth_transaction_attempt` for each, with a higher gas price. + +### 4. Re-org protection + +Find all transactions confirmed within the past `ETH_FINALITY_DEPTH` blocks and verify that they have at least one receipt in the current longest chain. If any do not, then rebroadcast those transactions. + +**EthConfirmer** makes the following guarantees: + +- All transactions will eventually be confirmed on the canonical longest chain, unless a reorg occurs that is deeper than `ETH_FINALITY_DEPTH` blocks. +- In the case that an external wallet used the nonce, we will ensure that _a_ transaction exists at this nonce up to a depth of `ETH_FINALITY_DEPTH` blocks but it most likely will not be the transaction in our database. + +Note that since checking for inclusion in the longest chain can now be done cheaply, without any calls to the eth node, `ETH_FINALITY_DEPTH` can be set to something quite large without penalty (e.g. 50 or 100). + +**EthBroadcaster** runs are designed to be serialized. Running it concurrently with itself probably can't get the data into an inconsistent state, but it might hit database conflicts or double-send transactions. Serialization is enforced with an advisory lock. + +# Head Tracker limitations + +The design of **EthConfirmer** relies on an unbroken chain of heads in our database. If there is a break in the chain of heads, our re-org protection is limited to this break. + +For example if we have heads at heights: + +1 + +2 + +4 + +Then a reorg that happened at block height 3 or above will not be detected and any transactions mined in those blocks may be left erroneously marked as confirmed. + +Currently, the design of the head tracker opens us up to gaps in the head sequence. This can occur in several scenarios: + +1. CL Node goes offline for more than one or two blocks +2. Eth node is behind a load balancer and gets switched out for one that has different block timing +3. Websocket connection is broken and resubscribe does not occur right away + +For this reason, I propose that follow-up work should be undertaken to ensure that the head tracker has some facility for backfilling heads up to`ETH_FINALITY_DEPTH`. diff --git a/docs/design/nodeslogos.sketch b/docs/design/nodeslogos.sketch new file mode 100644 index 00000000..582a7343 Binary files /dev/null and b/docs/design/nodeslogos.sketch differ diff --git a/docs/design/sponsorslogos.sketch b/docs/design/sponsorslogos.sketch new file mode 100644 index 00000000..b3f879fb Binary files /dev/null and b/docs/design/sponsorslogos.sketch differ diff --git a/docs/development/DOCUMENTATION.md b/docs/development/DOCUMENTATION.md new file mode 100644 index 00000000..1ad69eb1 --- /dev/null +++ b/docs/development/DOCUMENTATION.md @@ -0,0 +1,10 @@ +# Documentation + +## Tools + +[Shields IO] - API reference and tool for generating badge images. + +[Simple Icons] - The default icon set used by Shields IO. + +[shields io]: https://shields.io/ +[simple icons]: https://simpleicons.org/ diff --git a/docs/logo-chainlink-blue.svg b/docs/logo-chainlink-blue.svg new file mode 100644 index 00000000..a5844641 --- /dev/null +++ b/docs/logo-chainlink-blue.svg @@ -0,0 +1,14 @@ + + + Plugin + + Plugin + \ No newline at end of file diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000..bce30e58 --- /dev/null +++ b/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1707092692, + "narHash": "sha256-ZbHsm+mGk/izkWtT4xwwqz38fdlwu7nUUKXTOmm4SyE=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "faf912b086576fd1a15fca610166c98d47bc667e", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000..d62887c1 --- /dev/null +++ b/flake.nix @@ -0,0 +1,18 @@ +{ + description = "Plugin development shell"; + + inputs = { + nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = inputs@{ self, nixpkgs, flake-utils, ... }: + flake-utils.lib.eachDefaultSystem (system: + let + pkgs = import nixpkgs { inherit system; overlays = [ ]; }; + in + rec { + devShell = pkgs.callPackage ./shell.nix { }; + formatter = pkgs.nixpkgs-fmt; + }); +} diff --git a/fuzz/.gitignore b/fuzz/.gitignore new file mode 100644 index 00000000..600ab25d --- /dev/null +++ b/fuzz/.gitignore @@ -0,0 +1 @@ +*/fuzzer \ No newline at end of file diff --git a/fuzz/fuzz_all_native.py b/fuzz/fuzz_all_native.py new file mode 100644 index 00000000..10219724 --- /dev/null +++ b/fuzz/fuzz_all_native.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +import argparse +import itertools +import os +import re +import subprocess +import sys + +LIBROOT = "../" + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="\n".join([ + "Fuzz helper to run all native go fuzzers in plugin", + "", + ]), + ) + parser.add_argument("--ci", required=False, help="In CI mode we run each parser only briefly once", action="store_true") + parser.add_argument("--seconds", required=False, help="Run for this many seconds of total fuzz time before exiting") + args = parser.parse_args() + + # use float for remaining_seconds so we can represent infinity + if args.seconds: + remaining_seconds = float(args.seconds) + else: + remaining_seconds = float("inf") + + fuzzers = discover_fuzzers() + print(f"🐝 Discovered fuzzers:", file=sys.stderr) + for fuzzfn, path in fuzzers.items(): + print(f"{fuzzfn} in {path}", file=sys.stderr) + + if args.ci: + # only run each fuzzer once for 60 seconds in CI + durations_seconds = [60] + else: + # run forever or until --seconds, with increasingly longer durations per fuzz run + durations_seconds = itertools.chain([5, 10, 30, 90, 270], itertools.repeat(600)) + + for duration_seconds in durations_seconds: + print(f"🐝 Running each fuzzer for {duration_seconds}s before switching to next fuzzer", file=sys.stderr) + for fuzzfn, path in fuzzers.items(): + if remaining_seconds <= 0: + print(f"🐝 Time budget of {args.seconds}s is exhausted. Exiting.", file=sys.stderr) + return + + next_duration_seconds = min(remaining_seconds, duration_seconds) + remaining_seconds -= next_duration_seconds + + print(f"🐝 Running {fuzzfn} in {path} for {next_duration_seconds}s before switching to next fuzzer", file=sys.stderr) + run_fuzzer(fuzzfn, path, next_duration_seconds) + print(f"🐝 Completed running {fuzzfn} in {path} for {next_duration_seconds}s. Total remaining time is {remaining_seconds}s", file=sys.stderr) + +def discover_fuzzers(): + fuzzers = {} + for root, dirs, files in os.walk(LIBROOT): + for file in files: + if not file.endswith("test.go"): continue + with open(os.path.join(root, file), "r") as f: + text = f.read() + # ignore multiline comments + text = re.sub(r"(?s)/[*].*?[*]/", "", text) + # ignore single line comments *except* build tags + text = re.sub(r"//.*", "", text) + # Find every function with a name like FuzzXXX + for fuzzfn in re.findall(r"func\s+(Fuzz\w+)", text): + if fuzzfn in fuzzers: + raise Exception(f"Duplicate fuzz function: {fuzzfn}") + fuzzers[fuzzfn] = os.path.relpath(root, LIBROOT) + return fuzzers + +def run_fuzzer(fuzzfn, dir, duration_seconds): + subprocess.check_call(["go", "test", "-run=^$", f"-fuzz=^{fuzzfn}$", f"-fuzztime={duration_seconds}s", f"./{dir}"], cwd=LIBROOT) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/go.md b/go.md new file mode 100644 index 00000000..bce99736 --- /dev/null +++ b/go.md @@ -0,0 +1,69 @@ +# goplugin Go modules +```mermaid +flowchart LR + subgraph chains + plugin-cosmos + plugin-evm + plugin-solana + plugin-starknet/relayer + end + + subgraph products + plugin-automation + plugin-ccip + plugin-data-streams + plugin-feeds + plugin-functions + plugin-vrf + end + + classDef outline stroke-dasharray:6,fill:none; + class chains,products outline + + plugin/v2 --> caigo + click caigo href "https://github.com/goplugin/caigo" + plugin/v2 --> chain-selectors + click chain-selectors href "https://github.com/goplugin/chain-selectors" + plugin/v2 --> plugin-automation + click plugin-automation href "https://github.com/goplugin/plugin-automation" + plugin/v2 --> plugin-common + click plugin-common href "https://github.com/goplugin/plugin-common" + plugin/v2 --> plugin-cosmos + click plugin-cosmos href "https://github.com/goplugin/plugin-cosmos" + plugin/v2 --> plugin-data-streams + click plugin-data-streams href "https://github.com/goplugin/plugin-data-streams" + plugin/v2 --> plugin-feeds + click plugin-feeds href "https://github.com/goplugin/plugin-feeds" + plugin/v2 --> plugin-solana + click plugin-solana href "https://github.com/goplugin/plugin-solana" + plugin/v2 --> plugin-starknet/relayer + click plugin-starknet/relayer href "https://github.com/goplugin/plugin-starknet" + plugin/v2 --> plugin-vrf + click plugin-vrf href "https://github.com/goplugin/plugin-vrf" + plugin/v2 --> libocr + click libocr href "https://github.com/goplugin/libocr" + plugin/v2 --> tdh2/go/ocr2/decryptionplugin + click tdh2/go/ocr2/decryptionplugin href "https://github.com/goplugin/tdh2" + plugin/v2 --> tdh2/go/tdh2 + click tdh2/go/tdh2 href "https://github.com/goplugin/tdh2" + plugin/v2 --> wsrpc + click wsrpc href "https://github.com/goplugin/wsrpc" + plugin-automation --> plugin-common + plugin-automation --> libocr + plugin-common --> libocr + plugin-cosmos --> plugin-common + plugin-cosmos --> libocr + plugin-data-streams --> chain-selectors + plugin-data-streams --> plugin-common + plugin-data-streams --> libocr + plugin-feeds --> plugin-common + plugin-feeds --> libocr + plugin-solana --> plugin-common + plugin-solana --> libocr + plugin-starknet/relayer --> caigo + plugin-starknet/relayer --> plugin-common + plugin-starknet/relayer --> libocr + plugin-vrf --> libocr + tdh2/go/ocr2/decryptionplugin --> libocr + tdh2/go/ocr2/decryptionplugin --> tdh2/go/tdh2 +``` diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..09cdf0d6 --- /dev/null +++ b/go.mod @@ -0,0 +1,344 @@ +module github.com/goplugin/pluginv3.0/v2 + +go 1.21.3 + +require ( + github.com/Depado/ginprom v1.8.0 + github.com/Masterminds/semver/v3 v3.2.1 + github.com/Masterminds/sprig/v3 v3.2.3 + github.com/avast/retry-go/v4 v4.5.1 + github.com/btcsuite/btcd/btcec/v2 v2.3.2 + github.com/cometbft/cometbft v0.37.2 + github.com/cosmos/cosmos-sdk v0.47.4 + github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e + github.com/esote/minmaxheap v1.0.0 + github.com/ethereum/go-ethereum v1.13.8 + github.com/fatih/color v1.16.0 + github.com/fxamacker/cbor/v2 v2.5.0 + github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 + github.com/getsentry/sentry-go v0.19.0 + github.com/gin-contrib/cors v1.5.0 + github.com/gin-contrib/expvar v0.0.1 + github.com/gin-contrib/sessions v0.0.5 + github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 + github.com/gin-gonic/gin v1.9.1 + github.com/go-ldap/ldap/v3 v3.4.6 + github.com/go-webauthn/webauthn v0.9.4 + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b + github.com/google/uuid v1.4.0 + github.com/gorilla/securecookie v1.1.2 + github.com/gorilla/sessions v1.2.2 + github.com/gorilla/websocket v1.5.1 + github.com/grafana/pyroscope-go v1.0.4 + github.com/graph-gophers/dataloader v5.0.0+incompatible + github.com/graph-gophers/graphql-go v1.3.0 + github.com/hashicorp/consul/sdk v0.14.1 + github.com/hashicorp/go-envparse v0.1.0 + github.com/hashicorp/go-plugin v1.6.0 + github.com/hdevalence/ed25519consensus v0.1.0 + github.com/jackc/pgconn v1.14.1 + github.com/jackc/pgtype v1.14.0 + github.com/jackc/pgx/v4 v4.18.1 + github.com/jmoiron/sqlx v1.3.5 + github.com/jonboulle/clockwork v0.4.0 + github.com/jpillora/backoff v1.0.0 + github.com/kylelemons/godebug v1.1.0 + github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a + github.com/lib/pq v1.10.9 + github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f + github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/mr-tron/base58 v1.2.0 + github.com/olekukonko/tablewriter v0.0.5 + github.com/onsi/gomega v1.30.0 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pelletier/go-toml v1.9.5 + github.com/pelletier/go-toml/v2 v2.1.1 + github.com/pkg/errors v0.9.1 + github.com/pressly/goose/v3 v3.16.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 + github.com/prometheus/common v0.45.0 + github.com/prometheus/prometheus v0.48.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/rogpeppe/go-internal v1.11.0 + github.com/scylladb/go-reflectx v1.0.1 + github.com/shirou/gopsutil/v3 v3.23.11 + github.com/shopspring/decimal v1.3.1 + github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 + github.com/goplugin/chain-selectors v1.0.10 + github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 + github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 + github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 + github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 + github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 + github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 + github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 + github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a + github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 + github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 + github.com/goplugin/wsrpc v0.7.2 + github.com/spf13/cast v1.6.0 + github.com/stretchr/testify v1.8.4 + github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a + github.com/tidwall/gjson v1.17.0 + github.com/ugorji/go/codec v1.2.12 + github.com/ulule/limiter/v3 v3.11.2 + github.com/umbracle/ethgo v0.1.3 + github.com/unrolled/secure v1.13.0 + github.com/urfave/cli v1.22.14 + go.dedis.ch/fixbuf v1.0.3 + go.dedis.ch/kyber/v3 v3.1.0 + go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/sync v0.6.0 + golang.org/x/term v0.17.0 + golang.org/x/text v0.14.0 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.18.0 + gonum.org/v1/gonum v0.14.0 + google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.32.0 + gopkg.in/guregu/null.v2 v2.1.2 + gopkg.in/guregu/null.v4 v4.0.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 +) + +require ( + contrib.go.opencensus.io/exporter/stackdriver v0.13.5 // indirect + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.3 // indirect + cosmossdk.io/errors v1.0.0 // indirect + cosmossdk.io/math v1.0.1 // indirect + filippo.io/edwards25519 v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect + github.com/CosmWasm/wasmd v0.40.1 // indirect + github.com/CosmWasm/wasmvm v1.2.4 // indirect + github.com/DataDog/zstd v1.5.2 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/blendle/zapdriver v1.3.1 // indirect + github.com/bytedance/sonic v1.10.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.0 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.2 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect + github.com/cosmos/iavl v0.20.0 // indirect + github.com/cosmos/ibc-go/v7 v7.0.1 // indirect + github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab // indirect + github.com/cosmos/ledger-cosmos-go v0.12.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gagliardetto/binary v0.7.1 // indirect + github.com/gagliardetto/treeout v0.1.4 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect + github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.15.5 // indirect + github.com/go-webauthn/x v0.1.5 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.3.1+incompatible // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang/glog v1.1.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/go-tpm v0.9.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/gorilla/context v1.1.1 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.2.4 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/logrusorgru/aurora v2.0.3+incompatible // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/mwitkow/grpc-proxy v0.0.0-20230212185441-f345521cb9c9 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rs/zerolog v1.30.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sethvargo/go-retry v0.2.4 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/afero v1.9.3 // indirect + github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.15.0 // indirect + github.com/status-im/keycard-go v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect + github.com/valyala/fastjson v1.4.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zondax/hid v0.9.1 // indirect + github.com/zondax/ledger-go v0.14.1 // indirect + go.dedis.ch/protobuf v1.0.11 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.uber.org/ratelimit v0.2.0 // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/api v0.149.0 // indirect + google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + nhooyr.io/websocket v1.8.7 // indirect + pgregory.net/rapid v0.5.5 // indirect + rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +replace ( + // replicating the replace directive on cosmos SDK + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + + // until merged upstream: https://github.com/hashicorp/go-plugin/pull/257 + github.com/hashicorp/go-plugin => github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 + + // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 + github.com/mwitkow/grpc-proxy => github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..11a0e5d1 --- /dev/null +++ b/go.sum @@ -0,0 +1,1982 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v1.1.4 h1:K6n/GZHFTtEoKT5aUG3l9diPi0VduZNQ1PfdnpkkIFk= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5 h1:TNaexHK16gPUoc7uzELKOU7JULqccn1NDuqUxmxSqfo= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.3 h1:6evFIgj//Y3w09bqOUOzEpFj5tsxBqdc5CfkO7z+zfw= +cosmossdk.io/depinject v1.0.0-alpha.3/go.mod h1:eRbcdQ7MRpIPEM5YUJh8k97nxHpYbc3sMUnEtt8HPWU= +cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= +cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca h1:msenprh2BLLRwNT7zN56TbBHOGk/7ARQckXHxXyvjoQ= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca/go.mod h1:PkIAKXZvaxrTRc++z53XMRvFk8AcGGWYHcMIPzVYX9c= +cosmossdk.io/math v1.0.1 h1:Qx3ifyOPaMLNH/89WeZFH268yCvU4xEcnPLu3sJqPPg= +cosmossdk.io/math v1.0.1/go.mod h1:Ygz4wBHrgc7g0N+8+MrnTfS9LLn9aaTGa9hKopuym5k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/AlekSi/pointer v1.1.0 h1:SSDMPcXD9jSl8FPy9cRzoRaMJtm9g9ggGTxecRUbQoI= +github.com/AlekSi/pointer v1.1.0/go.mod h1:y7BvfRI3wXPWKXEBhU71nbnIEEZX0QTSB2Bj48UJIZE= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ClickHouse/ch-go v0.58.2 h1:jSm2szHbT9MCAB1rJ3WuCJqmGLi5UTjlNu+f530UTS0= +github.com/ClickHouse/ch-go v0.58.2/go.mod h1:Ap/0bEmiLa14gYjCiRkYGbXvbe8vwdrfTYWhsuQ99aw= +github.com/ClickHouse/clickhouse-go/v2 v2.15.0 h1:G0hTKyO8fXXR1bGnZ0DY3vTG01xYfOGW76zgjg5tmC4= +github.com/ClickHouse/clickhouse-go/v2 v2.15.0/go.mod h1:kXt1SRq0PIRa6aKZD7TnFnY9PQKmc2b13sHtOYcK6cQ= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/CosmWasm/wasmd v0.40.1 h1:LxbO78t/6S8TkeQlUrJ0m5O87HtAwLx4RGHq3rdrOEU= +github.com/CosmWasm/wasmd v0.40.1/go.mod h1:6EOwnv7MpuFaEqxcUOdFV9i4yvrdOciaY6VQ1o7A3yg= +github.com/CosmWasm/wasmvm v1.2.4 h1:6OfeZuEcEH/9iqwrg2pkeVtDCkMoj9U6PpKtcrCyVrQ= +github.com/CosmWasm/wasmvm v1.2.4/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Depado/ginprom v1.8.0 h1:zaaibRLNI1dMiiuj1MKzatm8qrcHzikMlCc1anqOdyo= +github.com/Depado/ginprom v1.8.0/go.mod h1:XBaKzeNBqPF4vxJpNLincSQZeMDnZp1tIbU0FU0UKgg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= +github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= +github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= +github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= +github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3 h1:SDlJ7bAm4ewvrmZtR0DaiYbQGdKPeaaIm7bM+qRhFeU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc= +github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= +github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/apd/v3 v3.1.0 h1:MK3Ow7LH0W8zkd5GMKA1PvS9qG3bWFI95WaVNfyZJ/w= +github.com/cockroachdb/apd/v3 v3.1.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.2 h1:XB0yyHGT0lwmJlFmM4+rsRnczPlHoAKFX6K8Zgc2/Jc= +github.com/cometbft/cometbft v0.37.2/go.mod h1:Y2MMMN//O5K4YKd8ze4r9jmk4Y7h0ajqILXbH5JQFVs= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= +github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.2 h1:X3OKvWgK9Gsejo0F1qs5l8Qn6xJV/AzgIWR2wZ8Nua8= +github.com/cosmos/cosmos-proto v1.0.0-beta.2/go.mod h1:+XRCLJ14pr5HFEHIUcn51IKXD1Fy3rkEQqt4WqmN4V0= +github.com/cosmos/cosmos-sdk v0.47.4 h1:FVUpEprm58nMmBX4xkRdMDaIG5Nr4yy92HZAfGAw9bg= +github.com/cosmos/cosmos-sdk v0.47.4/go.mod h1:R5n+uM7vguVPFap4pgkdvQCT1nVo/OtPwrlAU40rvok= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v0.20.0 h1:fTVznVlepH0KK8NyKq8w+U7c2L6jofa27aFX6YGlm38= +github.com/cosmos/iavl v0.20.0/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ibc-go/v7 v7.0.1 h1:NIBNRWjlOoFvFQu1ZlgwkaSeHO5avf4C1YQiWegt8jw= +github.com/cosmos/ibc-go/v7 v7.0.1/go.mod h1:vEaapV6nuLPQlS+g8IKmxMo6auPi0i7HMv1PhViht/E= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab h1:I9ialKTQo7248V827Bba4OuKPmk+FPzmTVHsLXaIJWw= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab/go.mod h1:2CwqasX5dSD7Hbp/9b6lhK6BwoBDCBldx7gPKRukR60= +github.com/cosmos/ledger-cosmos-go v0.12.1 h1:sMBxza5p/rNK/06nBSNmsI/WDqI0pVJFVNihy1Y984w= +github.com/cosmos/ledger-cosmos-go v0.12.1/go.mod h1:dhO6kj+Y+AHIOgAe4L9HL/6NDdyyth4q238I9yFpD2g= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/creachadair/taskgroup v0.4.2 h1:jsBLdAJE42asreGss2xZGZ8fJra7WtwnHWeJFxv2Li8= +github.com/creachadair/taskgroup v0.4.2/go.mod h1:qiXUOSrbwAY3u0JPGTzObbE3yf9hcXHDKBZ2ZjpCbgM= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/common/gherkin/go/v22 v22.0.0 h1:4K8NqptbvdOrjL9DEea6HFjSpbdT9+Q5kgLpmmsHYl0= +github.com/cucumber/common/gherkin/go/v22 v22.0.0/go.mod h1:3mJT10B2GGn3MvVPd3FwR7m2u4tLhSRhWUqJU4KN4Fg= +github.com/cucumber/common/messages/go/v17 v17.1.1 h1:RNqopvIFyLWnKv0LfATh34SWBhXeoFTJnSrgm9cT/Ts= +github.com/cucumber/common/messages/go/v17 v17.1.1/go.mod h1:bpGxb57tDE385Rb2EohgUadLkAbhoC4IyCFi89u/JQI= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e h1:5jVSh2l/ho6ajWhSPNN84eHEdq3dp0T7+f6r3Tc6hsk= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e/go.mod h1:IJgIiGUARc4aOr4bOQ85klmjsShkEEfiRc6q/yBSfo8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= +github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dfuse-io/logging v0.0.0-20201110202154-26697de88c79/go.mod h1:V+ED4kT/t/lKtH99JQmKIb0v9WL3VaYkJ36CfHlVECI= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 h1:CuJS05R9jmNlUK8GOxrEELPbfXm0EuGh/30LjkjN5vo= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70/go.mod h1:EoK/8RFbMEteaCaz89uessDTnCWjbbcr+DXcBh4el5o= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= +github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T/Lao= +github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/esote/minmaxheap v1.0.0 h1:rgA7StnXXpZG6qlM0S7pUmEv1KpWe32rYT4x8J8ntaA= +github.com/esote/minmaxheap v1.0.0/go.mod h1:Ln8+i7fS1k3PLgZI2JAo0iA1as95QnIYiGCrqSJ5FZk= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/binary v0.7.1 h1:6ggDQ26vR+4xEvl/S13NcdLK3MUCi4oSy73pS9aI1cI= +github.com/gagliardetto/binary v0.7.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/gofuzz v1.2.2 h1:XL/8qDMzcgvR4+CyRQW9UGdwPRPMHVJfqQ/uMvSUuQw= +github.com/gagliardetto/gofuzz v1.2.2/go.mod h1:bkH/3hYLZrMLbfYWA0pWzXmi5TTRZnu4pMGZBkqMKvY= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 h1:q2IztKyRQUxJ6abXRsawaBtvDFvM+szj4jDqV4od1gs= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27/go.mod h1:NFuoDwHPvw858ZMHUJr6bkhN8qHt4x6e+U3EYHxAwNY= +github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= +github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.19.0 h1:BcCH3CN5tXt5aML+gwmbFwVptLLQA+eT866fCO9wVOM= +github.com/getsentry/sentry-go v0.19.0/go.mod h1:y3+lGEFEFexZtpbG1GUE2WD/f9zGyKYwpEqryTOC/nE= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk= +github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI= +github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w= +github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw= +github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE= +github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs97miaG2+7neKY= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 h1:Z9J0PVIt1PuibOShaOw1jH8hUYz+Ak8NLsR/GI0Hv5I= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4/go.mod h1:CEPcgZiz8998l9E8fDm16h8UfHRL7b+5oG0j/0koeVw= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI= +github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= +github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= +github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-webauthn/webauthn v0.9.4 h1:YxvHSqgUyc5AK2pZbqkWWR55qKeDPhP8zLDr6lpIc2g= +github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAhr9xlRbdbgnTw= +github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0= +github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= +github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0= +github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= +github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= +github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 h1:o95KDiV/b1xdkumY5YbLR0/n2+wBxUpgf3HgfKgTyLI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3/go.mod h1:hTxjzRcX49ogbTGVJ1sM5mz5s+SSgiGIyL3jjPxl32E= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= +github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= +github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY= +github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw= +github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a h1:dHCfT5W7gghzPtfsW488uPmEOm85wewI+ypUwibyTdU= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 h1:mPMvm6X6tf4w8y7j9YIt6V9jfWhL6QlbEc7CCmeQlWk= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1/go.mod h1:ye2e/VUEtE2BHE+G/QcKkcLQVAEJoYRFj5VUOQatCRE= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= +github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s= +github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg= +github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= +github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/gocuke v0.6.2 h1:pHviZ0kKAq2U2hN2q3smKNxct6hS0mGByFMHGnWA97M= +github.com/regen-network/gocuke v0.6.2/go.mod h1:zYaqIHZobHyd0xOrHGPQjbhGJsuZ1oElx150u2o1xuk= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= +github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= +github.com/goplugin/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= +github.com/goplugin/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 h1:hpNkTpLtwWXKqguf7wYqetxpmxY/bSO+1PLpY8VBu2w= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 h1:9IxmR+1NH1WxaX44+t553fOrrZRfxwMVvnDuBIy0tgs= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0/go.mod h1:JiykN+8W5TA4UD2ClrzQCVvcH3NcyLEVv7RwY0busrw= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 h1:7m9PVtccb8/pvKTXMaGuyceFno1icRyC2SFH7KG7+70= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0/go.mod h1:SZ899lZYQ0maUulWbZg+SWqabHQ1wTbyk3jT8wJfyo8= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a h1:nGkZ9uXS8lPIJOi68rdftEo2c9Q8qbRAi5+XMnKobVc= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a/go.mod h1:kC0qmVPUaVkFqGiZMNhmRmjdphuUmeyLEdlWFOQzFWI= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= +github.com/goplugin/wsrpc v0.7.2 h1:iBXzMeg7vc5YoezIQBq896y25BARw7OKbhrb6vPbtRQ= +github.com/goplugin/wsrpc v0.7.2/go.mod h1:sj7QX2NQibhkhxTfs3KOhAj/5xwgqMipTvJVSssT9i0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= +github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= +github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= +github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= +github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= +github.com/unrolled/secure v1.13.0 h1:sdr3Phw2+f8Px8HE5sd1EHdj1aV3yUwed/uZXChLFsk= +github.com/unrolled/secure v1.13.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE= +github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dzWP1Lu+A40W883dK/Mr3xyDSM/2MggS8GtHT0qgAnE= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA= +github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= +github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= +github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/kyber/v3 v3.1.0 h1:ghu+kiRgM5JyD9TJ0hTIxTLQlJBR/ehjWvWwYW3XsC0= +go.dedis.ch/kyber/v3 v3.1.0/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 h1:mMv2jG58h6ZI5t5S9QCVGdzCmAsTakMa3oxVgpSD44g= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1/go.mod h1:oqRuNKG0upTaDPbLVCG8AD0G2ETrfDtmh7jViy7ox6M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/propagators/b3 v1.21.1 h1:WPYiUgmw3+b7b3sQ1bFBFAf0q+Di9dvNc3AtYfnT4RQ= +go.opentelemetry.io/contrib/propagators/b3 v1.21.1/go.mod h1:EmzokPoSqsYMBVK4nRnhsfm5mbn8J1eDuz/U1UaQaWg= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210401141331-865547bb08e2/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc/examples v0.0.0-20210424002626-9572fd6faeae/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/guregu/null.v2 v2.1.2 h1:YOuepWdYqGnrenzPyMi+ybCjeDzjdazynbwsXXOk4i8= +gopkg.in/guregu/null.v2 v2.1.2/go.mod h1:XORrx8tyS5ZDcyUboCIxQtta/Aujk/6pfWrn9Xe33mU= +gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= +gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= +lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= +modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= +modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= +modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= +modernc.org/libc v1.32.0 h1:yXatHTrACp3WaKNRCoZwUK7qj5V8ep1XyY0ka4oYcNc= +modernc.org/libc v1.32.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8= +modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= +pgregory.net/rapid v0.5.5/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/heroku.yml b/heroku.yml new file mode 100644 index 00000000..bb95afa1 --- /dev/null +++ b/heroku.yml @@ -0,0 +1,6 @@ +build: + docker: + web: Dockerfile.web + config: + REACT_APP_INFURA_KEY: + REACT_APP_GA_ID: diff --git a/integration-tests/.golangci.yml b/integration-tests/.golangci.yml new file mode 100644 index 00000000..c24c8aca --- /dev/null +++ b/integration-tests/.golangci.yml @@ -0,0 +1,78 @@ +run: + timeout: 15m +linters: + enable: + - exhaustive + - exportloopref + - revive + - goimports + - gosec + - misspell + - rowserrcheck + - errorlint +linters-settings: + exhaustive: + default-signifies-exhaustive: true + goimports: + local-prefixes: github.com/goplugin/pluginv3.0 + golint: + min-confidence: 0.999 + gosec: + excludes: + - G101 + govet: + # report about shadowed variables + check-shadowing: true + revive: + confidence: 0.8 + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: if-return + - name: increment-decrement + # - name: var-naming // doesn't work with some generated names + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + - name: waitgroup-by-value + - name: unconditional-recursion + - name: struct-tag + - name: string-format + - name: string-of-int + - name: range-val-address + - name: range-val-in-closure + - name: modifies-value-receiver + - name: modifies-parameter + - name: identical-branches + - name: get-return + # - name: flag-parameter // probably one we should work on doing better at in the future + # - name: early-return // probably one we should work on doing better at in the future + - name: defer + - name: constant-logical-expr + - name: confusing-naming + - name: confusing-results + - name: bool-literal-in-expr + - name: atomic +issues: + exclude-rules: + - text: "^G404: Use of weak random number generator" + linters: + - gosec + - linters: + - govet + text: "declaration of \"err\" shadows" diff --git a/integration-tests/.root_dir b/integration-tests/.root_dir new file mode 100644 index 00000000..e69de29b diff --git a/integration-tests/.tool-versions b/integration-tests/.tool-versions new file mode 100644 index 00000000..ac6300f9 --- /dev/null +++ b/integration-tests/.tool-versions @@ -0,0 +1,5 @@ +golang 1.21.5 +k3d 5.4.6 +kubectl 1.25.5 +nodejs 18.13.0 +golangci-lint 1.55.2 diff --git a/integration-tests/LOG_POLLER.md b/integration-tests/LOG_POLLER.md new file mode 100644 index 00000000..3a821253 --- /dev/null +++ b/integration-tests/LOG_POLLER.md @@ -0,0 +1,163 @@ +# How to run Log Poller's tests + +## Limitations +* currently they can only be run in Docker, not in Kubernetes +* when using `looped` runner it's not possible to directly control execution time +* WASP's `gun` implementation is imperfect in terms of generated load + +## Configuration +Due to unfinished migration to TOML config tests use a mixed configuration approach: +* network, RPC endpoints, funding keys, etc need to be provided by env vars +* test-specific configuration can be provided by TOML file or via a `Config` struct (to which TOML is parsed anyway) additionally some of it can be overridden by env vars (for ease of use in CI) +** smoke tests use the programmatical approach +** load test uses the TOML approach + +## Approximated test scenario +Different tests might have slightly modified scenarios, but generally they follow this pattern: +* start CL nodes +* setup OCR +* upload Automation Registry 2.1 +* deploy UpKeep Consumers +* deploy test contracts +* register filters for test contracts +* make sure all CL nodes have filters registered +* emit test logs +* wait for log poller to finalise last block in which logs were emitted +** block number is determined either by finality tag or fixed finality depth depending on network configuration +* wait for all CL nodes to have expected log count +* compare logs that present in the EVM node with logs in CL nodes + +All of the checks use fluent waits. + +### Required env vars +* `PLUGIN_IMAGE` +* `PLUGIN_VERSION` +* `SELECTED_NETWORKS` + +### Env vars required for live testnet tests +* `EVM_WS_URL` -- RPC websocket +* `EVM_HTTP_URL` -- RPC HTTP +* `EVM_KEYS` -- private keys used for funding + +Since on live testnets we are using existing and canonical PLI contracts funding keys need to contain enough PLI to pay for the test. There's an automated check that fails during setup if there's not enough PLI. Approximately `9 PLI` is required for each UpKeep contract test uses to register a `LogTrigger`. Test contract emits 3 types of events and unless configured otherwise (programmatically!) all of them will be used, which means that due to Automation's limitation we need to register a separate `LogTrigger` for each event type for each contract. So if you want to test with 100 contracts, then you'd need to register 300 UpKeep contracts and thus your funding address needs to have at least 2700 PLI. + +### Programmatical config +There are two load generators available: +* `looped` -- it's a simple generator that just loops over all contracts and emits events at random intervals +* `wasp` -- based on WASP load testing tool, it's more sophisticated and allows to control execution time + +#### Looped config +``` + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, # number of test contracts to deploy + EventsPerTx: 4, # number of events to emit in a single transaction + UseFinalityTag: false, # if set to true then Log Poller will use finality tag returned by chain, when determining last finalised block (won't work on a simulated network, it requires eth2) + }, + LoopedConfig: &logpoller.LoopedConfig{ + ContractConfig: logpoller.ContractConfig{ + ExecutionCount: 100, # number of times each contract will be called + }, + FuzzConfig: logpoller.FuzzConfig{ + MinEmitWaitTimeMs: 200, # minimum number of milliseconds to wait before emitting events + MaxEmitWaitTimeMs: 500, # maximum number of milliseconds to wait before emitting events + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { # modify that function to emit only logs you want + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +Remember that final number of events emitted will be `Contracts * EventsPerTx * ExecutionCount * len(eventToEmit)`. And that that last number by default is equal to `3` (that's because we want to emit different event types, not just one). You can change that by overriding `EventsToEmit` field. + +#### WASP config +``` + cfg := logpoller.Config{ + General: &logpoller.General{ + Generator: logpoller.GeneratorType_Looped, + Contracts: 2, + EventsPerTx: 4, + UseFinalityTag: false, + }, + Wasp: &logpoller.WaspConfig{ + Load: &logpoller.Load{ + RPS: 10, # requests per second + LPS: 0, # logs per second + RateLimitUnitDuration: models.MustNewDuration(5 * time.Minutes), # for how long the load should be limited (ramp-up period) + Duration: models.MustNewDuration(5 * time.Minutes), # how long to generate the load for + CallTimeout: models.MustNewDuration(5 * time.Minutes), # how long to wait for a single call to finish + }, + }, + } + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +Remember that you cannot specify both `RPS` and `LPS`. If you want to use `LPS` then omit `RPS` field. Also remember that depending on the events you decide to emit RPS might mean 1 request or might mean 3 requests (if you go with the default `EventsToEmit`). + +For other nuances do check [gun.go][integration-tests/universal/log_poller/gun.go]. + +### TOML config +That config follows the same structure as programmatical config shown above. + +Sample config: [config.toml](integration-tests/load/log_poller/config.toml) + +Use this snippet instead of creating the `Config` struct programmatically: +``` + cfg, err := lp_helpers.ReadConfig(lp_helpers.DefaultConfigFilename) + require.NoError(t, err) +``` + +And remember to add events you want emit: +``` + eventsToEmit := []abi.Event{} + for _, event := range lp_helpers.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg.General.EventsToEmit = eventsToEmit +``` + +### Timeouts +Various checks inside the tests have hardcoded timeouts, which might not be suitable for your execution parameters, for example if you decided to emit 1M logs, then waiting for all of them to be indexed for `1m` might not be enough. Remember to adjust them accordingly. + +Sample snippet: +``` + gom.Eventually(func(g gomega.Gomega) { + logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), totalLogsEmitted, expectedFilters, l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if CL nodes have expected log count. Retrying...") + } + g.Expect(logCountMatches).To(gomega.BeTrue(), "Not all CL nodes have expected log count") + }, "1m", "30s").Should(gomega.Succeed()) # 1m is the timeout for all nodes to have expected log count +``` + +## Tests +* [Load](integration-tests/load/log_poller/log_poller_test.go) +* [Smoke](integration-tests/smoke/log_poller/log_poller_test.go) + +## Running tests +After setting all the environment variables you can run the test with: +``` +# run in the root folder of plugin repo +go test -v -test.timeout=2700s -run TestLogPollerReplay integration-tests/smoke/log_poller_test.go +``` + +Remember to adjust test timeout accordingly to match expected duration. + + +## Github Actions +If all of that seems too complicated use this [on-demand workflow](https://github.com/goplugin/pluginv3.0/actions/workflows/on-demand-log-poller.yml). + +Execution time here is an approximation, so depending on network conditions it might be slightly longer or shorter. \ No newline at end of file diff --git a/integration-tests/Makefile b/integration-tests/Makefile new file mode 100644 index 00000000..55479453 --- /dev/null +++ b/integration-tests/Makefile @@ -0,0 +1,207 @@ +BIN_DIR = bin +export GOPATH ?= $(shell go env GOPATH) +export GO111MODULE ?= on + +LINUX=LINUX +OSX=OSX +WINDOWS=WIN32 +OSFLAG := +ifeq ($(OS),Windows_NT) + OSFLAG = $(WINDOWS) +else + UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S),Linux) + OSFLAG = $(LINUX) +endif +ifeq ($(UNAME_S),Darwin) + OSFLAG = $(OSX) +endif +endif + +install_qa_tools: +ifeq ($(OSFLAG),$(WINDOWS)) + echo "If you are running windows and know how to install what is needed, please contribute by adding it here!" + echo "You will need nodejs, golang, k3d, and helm." + exit 1 +else + +# linux and mac can use asdf to install all of the dependencies +ifeq ($(shell which asdf), ) + +# install asdf +ifeq ($(OSFLAG),$(LINUX)) + echo "You will need to install asdf via your linux installer https://asdf-vm.com/guide/getting-started.html" + exit 1 +else +ifeq ($(OSFLAG),$(OSX)) + brew install asdf +endif +endif +endif +# install the plugins if needed and then install the dependencies + asdf plugin-add nodejs || true + asdf plugin-add golang || true + asdf plugin-add k3d || true + asdf plugin-add helm || true + asdf plugin-add kubectl || true + asdf install +endif +# Now install the helm charts that are needed (should be os agnostic) + helm repo add plugin-qa https://raw.githubusercontent.com/goplugin/qa-charts/gh-pages/ + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo update + +.PHONY: install_gotestfmt +install_gotestfmt: + go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest + set -euo pipefail + +lint: + golangci-lint --color=always run ./... --fix -v + +build: + @go build ./... go test -run=^# ./... + +# Builds the test image +# tag: the tag for the test image being built, example: tag=tate +# base_tag: the tag for the base-test-image to use, example: base_tag=latest +# suite: the test suites to build into the image, example: suite="chaos soak smoke reorg migration" +# push: set to true if you want the image pushed or leave blank if not, example: push=true +.PHONY: build_test_image +build_test_image: + ./scripts/buildTestImage $(tag) $(base_tag) "$(suite)" $(push) + +#Build a plugin docker image for local testing and push to k3d registry +.PHONY: build_push_docker_image +build_push_docker_image: + docker build -f ../core/plugin.Dockerfile --build-arg COMMIT_SHA=$(git rev-parse HEAD) --build-arg PLUGIN_USER=plugin -t 127.0.0.1:5000/plugin:develop ../ ; docker push 127.0.0.1:5000/plugin:develop + +#Build a plugin docker image in plugin mode for local testing and push to k3d registry +.PHONY: build_push_plugin_docker_image +build_push_plugin_docker_image: + docker build -f ../plugins/plugin.Dockerfile --build-arg COMMIT_SHA=$(git rev-parse HEAD) --build-arg PLUGIN_USER=plugin -t 127.0.0.1:5000/plugin:develop ../ ; docker push 127.0.0.1:5000/plugin:develop + +# Spins up containers needed to collect traces for local testing +.PHONY: run_tracing +run_tracing: + cd ../.github/tracing + docker compose -f ../.github/tracing/local-smoke-docker-compose.yaml up + +## Test Runner +.PHONY: run +run: + go run . + +## All commands will use 16 threads to run tests in parallel. To change this, use -test.parallel n +## Remember to set selected_networks and CL image in the TOML file (e.g. overrides.toml) + +# Smoke +.PHONY: test_smoke_product +test_smoke_product: ## Run smoke tests for specific product ex: make test_smoke_product product="cron" args="--focus @cron -p" + ARGS="$(args)" PRODUCT=$(product) ./scripts/run_product_tests + +# Chaos +.PHONY: test_chaos_pods_raw +test_chaos_pods_raw: + go test -timeout 2h -v -count=1 $(args) -p 2 -run 'Test/.*pod-chaos' ./chaos + +.PHONY: test_chaos_network_raw +test_chaos_network_raw: + go test -timeout 2h -v -count=1 $(args) -p 2 -run 'Test/.*network-chaos' ./chaos + +.PHONY: test_chaos_pods +test_chaos_pods: install_gotestfmt ## Run all smoke tests + TEST_LOG_LEVEL="disabled" \ + go test -timeout 2h -count=1 -json $(args) -run 'Test/.*pod-chaos' ./chaos 2>&1 | tee ./gotest.log | gotestfmt + +.PHONY: test_chaos_network +test_chaos_network: install_gotestfmt ## Run all smoke tests + TEST_LOG_LEVEL="disabled" \ + go test -timeout 2h -count=1 -json $(args) -run 'Test/.*network-chaos' ./chaos 2>&1 | tee ./gotest.log | gotestfmt + +.PHONY: test_chaos_verbose +test_chaos_verbose: ## Run all smoke tests with verbose logging + go test -timeout 24h -count=1 -v $(args) ./chaos + +# Migrations +.PHONY: test_node_migrations +test_node_migrations: install_gotestfmt ## Run all node migration tests. + TEST_LOG_LEVEL="disabled" \ + go test -timeout 1h -count=1 -json $(args) ./migration 2>&1 | tee /tmp/gotest.log | gotestfmt + +.PHONY: test_node_migrations_simulated +test_node_migrations_simulated: install_gotestfmt + TEST_LOG_LEVEL="disabled" \ + go test -timeout 1h -count=1 -json $(args) ./migration 2>&1 | tee /tmp/gotest.log | gotestfmt + +.PHONY: test_node_migrations_verbose +test_node_migrations_verbose: + go test -timeout 1h -count=1 -v $(args) ./migration + +.PHONY: test_node_migrations_simulated_verbose +test_node_migrations_simulated_verbose: + go test -timeout 1h -count=1 -v $(args) ./migration + +# Soak +.PHONY: test_soak_ocr +test_soak_ocr: + go test -v -count=1 -run TestOCRSoak ./soak + +.PHONY: test_soak_ocr_simulated +test_soak_ocr_simulated: + go test -v -count=1 -run TestOCRSoak ./soak + +.PHONY: test_soak_forwarder_ocr +test_soak_forwarder_ocr: + go test -v -count=1 -run TestForwarderOCRSoak ./soak + +.PHONY: test_soak_forwarder_ocr_simulated +test_soak_forwarder_ocr_simulated: + go test -v -count=1 -run TestForwarderOCRSoak ./soak + +.PHONY: test_soak_automation +test_soak_automation: + go test -v -run ^TestAutomationBenchmark$$ ./benchmark -count=1 + +.PHONY: test_soak_automation_simulated +test_soak_automation_simulated: + go test -v -run ^TestAutomationBenchmark$$ ./benchmark -count=1 + +.PHONY: test_benchmark_automation +test_benchmark_automation: ## Run the automation benchmark tests + go test -timeout 30m -v -run ^TestAutomationBenchmark$$ ./benchmark -count=1 + +.PHONY: test_reorg_automation +test_reorg_automation: ## Run the automation reorg tests + go test -timeout 300m -v -run ^TestAutomationReorg$$ ./reorg -count=1 | tee automation_reorg_run_`date +"%Y%m%d-%H%M%S"`.log + +# image: the name for the plugin image being built, example: image=plugin +# tag: the tag for the plugin image being built, example: tag=latest +# example usage: make build_docker_image image=plugin tag=latest +.PHONY: build_docker_image +build_docker_image: + docker build -f ../core/plugin.Dockerfile --build-arg COMMIT_SHA=$(git rev-parse HEAD) --build-arg PLUGIN_USER=plugin -t $(image):$(tag) ../ + +# image: the name for the plugin image being built, example: image=plugin +# tag: the tag for the plugin image being built, example: tag=latest +# example usage: make build_docker_image image=plugin tag=latest +.PHONY: build_plugin_docker_image +build_plugin_docker_image: + docker build -f ../plugins/plugin.Dockerfile --build-arg COMMIT_SHA=$(git rev-parse HEAD) --build-arg PLUGIN_USER=plugin -t 127.0.0.1:5000/plugin:develop ../ + +# image: the name for the plugin image being built, example: image=plugin +# tag: the tag for the plugin image being built, example: tag=latest +# args: the args to pass to the test runner, example: args="--focus @cron -p" +# product: the product to run tests for, example: product=cron +# example usage: make run_test_with_local_image image=plugin tag=latest-dev product=cron +# remember to put the case CL image name and tag in the TOML config (and don't forget about selected network configuration) +.PHONY: run_test_with_local_image +run_test_with_local_image: build_docker_image + ARGS="$(args)" \ + PRODUCT=$(product) \ + ./scripts/run_product_tests + +# removes all occurrences of .run.id file in current folder and it's subdirectories +# before making any changes lists all file locations and awaits user confirmation +remove_test_execution_artefacts: + ./scripts/search_and_delete.sh .run.id \ No newline at end of file diff --git a/integration-tests/README.md b/integration-tests/README.md new file mode 100644 index 00000000..b3133978 --- /dev/null +++ b/integration-tests/README.md @@ -0,0 +1,47 @@ +# Integration Tests + +Here lives the integration tests for plugin, utilizing our [plugin-testing-framework](https://github.com/goplugin/plugin-testing-framework). + +## NOTE: Move to Testcontainers + +If you have previously run these smoke tests using GitHub Actions or some sort of Kubernetes setup, that method is no longer necessary. We have moved the majority of our tests to utilize plain Docker containers (with the help of [Testcontainers](https://golang.testcontainers.org/)). This should make tests faster, more stable, and enable you to run them on your local machine without much hassle. + +## Requirements + +1. [Go](https://go.dev/) +2. [Docker](https://www.docker.com/) +3. You'll probably want to [increase the resources available to Docker](https://stackoverflow.com/questions/44533319/how-to-assign-more-memory-to-docker-container) as most tests require quite a few containers (e.g. OCR requires 6 Plugin nodes, 6 databases, a simulated blockchain, and a mock server). + +## Configure + +See the [example.env](./example.env) file for environment variables you can set to configure things like network settings, Plugin version, and log level. Remember to use `source .env` to activate your settings. + +## Build + +If you'd like to run the tests on a local build of Plugin, you can point to your own docker image, or build a fresh one with `make`. + +`make build_docker_image image= tag=` + +e.g. + +`make build_docker_image image=plugin tag=test-tag` + +You'll want to set the `PLUGIN_IMAGE` and `PLUGIN_VERSION` env values appropriately as well. See [example.env](./example.env) for more details. + +## Run + +`go test ./smoke/_test.go` + +Most test files have a couple of tests, it's recommended to look into the file and focus on a specific one if possible. 90% of the time this will probably be the `Basic` test. See [ocr_test.go](./smoke/ocr_test.go) for example, which contains the `TestOCRBasic` test. + +`go test ./smoke/ocr_test.go -run TestOCRBasic` + +It's generally recommended to run only one test at a time on a local machine as it needs a lot of docker containers and can peg your resources otherwise. You will see docker containers spin up on your machine for each component of the test where you can inspect logs. + +## Analyze + +You can see the results of each test in the terminal with normal `go test` output. If a test fails, logs of each Plugin container will dump into the `smoke/logs/` folder for later analysis. You can also see these logs in CI uploaded as GitHub artifacts. + +## Running Soak, Performance, Benchmark, and Chaos Tests + +These tests remain bound to a Kubernetes run environment, and require more complex setup and running instructions not documented here. We endeavor to make these easier to run and configure, but for the time being please seek a member of the QA/Test Tooling team if you want to run these. diff --git a/integration-tests/actions/actions.go b/integration-tests/actions/actions.go new file mode 100644 index 00000000..f37cee10 --- /dev/null +++ b/integration-tests/actions/actions.go @@ -0,0 +1,562 @@ +// Package actions enables common plugin interactions +package actions + +import ( + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/rs/zerolog/log" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +// ContractDeploymentInterval After how many contract actions to wait before starting any more +// Example: When deploying 1000 contracts, stop every ContractDeploymentInterval have been deployed to wait before continuing +var ContractDeploymentInterval = 200 + +// FundPluginNodes will fund all of the provided Plugin nodes with a set amount of native currency +func FundPluginNodes( + nodes []*client.PluginK8sClient, + client blockchain.EVMClient, + amount *big.Float, +) error { + for _, cl := range nodes { + toAddress, err := cl.PrimaryEthAddress() + if err != nil { + return err + } + recipient := common.HexToAddress(toAddress) + msg := ethereum.CallMsg{ + From: common.HexToAddress(client.GetDefaultWallet().Address()), + To: &recipient, + Value: conversions.EtherToWei(amount), + } + gasEstimates, err := client.EstimateGas(msg) + if err != nil { + return err + } + err = client.Fund(toAddress, amount, gasEstimates) + if err != nil { + return err + } + } + return client.WaitForEvents() +} + +// FundPluginNodesAddress will fund all of the provided Plugin nodes address at given index with a set amount of native currency +func FundPluginNodesAddress( + nodes []*client.PluginK8sClient, + client blockchain.EVMClient, + amount *big.Float, + keyIndex int, +) error { + for _, cl := range nodes { + toAddress, err := cl.EthAddresses() + if err != nil { + return err + } + toAddr := common.HexToAddress(toAddress[keyIndex]) + gasEstimates, err := client.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + err = client.Fund(toAddress[keyIndex], amount, gasEstimates) + if err != nil { + return err + } + } + return client.WaitForEvents() +} + +// FundPluginNodesAddress will fund all of the provided Plugin nodes addresses with a set amount of native currency +func FundPluginNodesAddresses( + nodes []*client.PluginClient, + client blockchain.EVMClient, + amount *big.Float, +) error { + for _, cl := range nodes { + toAddress, err := cl.EthAddressesForChain(client.GetChainID().String()) + if err != nil { + return err + } + for _, addr := range toAddress { + toAddr := common.HexToAddress(addr) + gasEstimates, err := client.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + err = client.Fund(addr, amount, gasEstimates) + if err != nil { + return err + } + } + } + return client.WaitForEvents() +} + +// FundPluginNodes will fund all of the provided Plugin nodes with a set amount of native currency +func FundPluginNodesLink( + nodes []*client.PluginK8sClient, + blockchain blockchain.EVMClient, + linkToken contracts.LinkToken, + linkAmount *big.Int, +) error { + for _, cl := range nodes { + toAddress, err := cl.PrimaryEthAddress() + if err != nil { + return err + } + err = linkToken.Transfer(toAddress, linkAmount) + if err != nil { + return err + } + } + return blockchain.WaitForEvents() +} + +// PluginNodeAddresses will return all the on-chain wallet addresses for a set of Plugin nodes +func PluginNodeAddresses(nodes []*client.PluginK8sClient) ([]common.Address, error) { + addresses := make([]common.Address, 0) + for _, node := range nodes { + primaryAddress, err := node.PrimaryEthAddress() + if err != nil { + return nil, err + } + addresses = append(addresses, common.HexToAddress(primaryAddress)) + } + return addresses, nil +} + +// PluginNodeAddressesAtIndex will return all the on-chain wallet addresses for a set of Plugin nodes +func PluginNodeAddressesAtIndex(nodes []*client.PluginK8sClient, keyIndex int) ([]common.Address, error) { + addresses := make([]common.Address, 0) + for _, node := range nodes { + nodeAddresses, err := node.EthAddresses() + if err != nil { + return nil, err + } + addresses = append(addresses, common.HexToAddress(nodeAddresses[keyIndex])) + } + return addresses, nil +} + +// SetPluginAPIPageSize specifies the page size from the Plugin API, useful for high volume testing +func SetPluginAPIPageSize(nodes []*client.PluginK8sClient, pageSize int) { + for _, n := range nodes { + n.SetPageSize(pageSize) + } +} + +// ExtractRequestIDFromJobRun extracts RequestID from job runs response +func ExtractRequestIDFromJobRun(jobDecodeData client.RunsResponseData) ([]byte, error) { + var taskRun client.TaskRun + for _, tr := range jobDecodeData.Attributes.TaskRuns { + if tr.Type == "ethabidecodelog" { + taskRun = tr + } + } + var decodeLogTaskRun *client.DecodeLogTaskRun + if err := json.Unmarshal([]byte(taskRun.Output), &decodeLogTaskRun); err != nil { + return nil, err + } + rqInts := decodeLogTaskRun.RequestID + return rqInts, nil +} + +// EncodeOnChainVRFProvingKey encodes uncompressed public VRF key to on-chain representation +func EncodeOnChainVRFProvingKey(vrfKey client.VRFKey) ([2]*big.Int, error) { + uncompressed := vrfKey.Data.Attributes.Uncompressed + provingKey := [2]*big.Int{} + var set1 bool + var set2 bool + // strip 0x to convert to int + provingKey[0], set1 = new(big.Int).SetString(uncompressed[2:66], 16) + if !set1 { + return [2]*big.Int{}, fmt.Errorf("can not convert VRF key to *big.Int") + } + provingKey[1], set2 = new(big.Int).SetString(uncompressed[66:], 16) + if !set2 { + return [2]*big.Int{}, fmt.Errorf("can not convert VRF key to *big.Int") + } + return provingKey, nil +} + +// GetMockserverInitializerDataForOTPE creates mocked weiwatchers data needed for otpe +func GetMockserverInitializerDataForOTPE( + OCRInstances []contracts.OffchainAggregator, + pluginNodes []*client.PluginK8sClient, +) (interface{}, error) { + var contractsInfo []ctfClient.ContractInfoJSON + + for index, OCRInstance := range OCRInstances { + contractInfo := ctfClient.ContractInfoJSON{ + ContractVersion: 4, + Path: fmt.Sprintf("contract_%d", index), + Status: "live", + ContractAddress: OCRInstance.Address(), + } + + contractsInfo = append(contractsInfo, contractInfo) + } + + contractsInitializer := ctfClient.HttpInitializer{ + Request: ctfClient.HttpRequest{Path: "/contracts.json"}, + Response: ctfClient.HttpResponse{Body: contractsInfo}, + } + + var nodesInfo []ctfClient.NodeInfoJSON + + for _, plugin := range pluginNodes { + ocrKeys, err := plugin.MustReadOCRKeys() + if err != nil { + return nil, err + } + nodeInfo := ctfClient.NodeInfoJSON{ + NodeAddress: []string{ocrKeys.Data[0].Attributes.OnChainSigningAddress}, + ID: ocrKeys.Data[0].ID, + } + nodesInfo = append(nodesInfo, nodeInfo) + } + + nodesInitializer := ctfClient.HttpInitializer{ + Request: ctfClient.HttpRequest{Path: "/nodes.json"}, + Response: ctfClient.HttpResponse{Body: nodesInfo}, + } + initializers := []ctfClient.HttpInitializer{contractsInitializer, nodesInitializer} + return initializers, nil +} + +// TeardownSuite tears down networks/clients and environment and creates a logs folder for failed tests in the +// specified path. Can also accept a testreporter (if one was used) to log further results +func TeardownSuite( + t *testing.T, + env *environment.Environment, + pluginNodes []*client.PluginK8sClient, + optionalTestReporter testreporters.TestReporter, // Optionally pass in a test reporter to log further metrics + failingLogLevel zapcore.Level, // Examines logs after the test, and fails the test if any Plugin logs are found at or above provided level + grafnaUrlProvider testreporters.GrafanaURLProvider, + clients ...blockchain.EVMClient, +) error { + l := logging.GetTestLogger(t) + if err := testreporters.WriteTeardownLogs(t, env, optionalTestReporter, failingLogLevel, grafnaUrlProvider); err != nil { + return fmt.Errorf("Error dumping environment logs, leaving environment running for manual retrieval, err: %w", err) + } + // Delete all jobs to stop depleting the funds + err := DeleteAllJobs(pluginNodes) + if err != nil { + l.Warn().Msgf("Error deleting jobs %+v", err) + } + + for _, c := range clients { + if c != nil && pluginNodes != nil && len(pluginNodes) > 0 { + if err := ReturnFunds(pluginNodes, c); err != nil { + // This printed line is required for tests that use real funds to propagate the failure + // out to the system running the test. Do not remove + fmt.Println(environment.FAILED_FUND_RETURN) + l.Error().Err(err).Str("Namespace", env.Cfg.Namespace). + Msg("Error attempting to return funds from plugin nodes to network's default wallet. " + + "Environment is left running so you can try manually!") + } + } else { + l.Info().Msg("Successfully returned funds from plugin nodes to default network wallets") + } + // nolint + if c != nil { + err := c.Close() + if err != nil { + return err + } + } + } + + return env.Shutdown() +} + +// TeardownRemoteSuite is used when running a test within a remote-test-runner, like for long-running performance and +// soak tests +func TeardownRemoteSuite( + t *testing.T, + namespace string, + pluginNodes []*client.PluginK8sClient, + optionalTestReporter testreporters.TestReporter, // Optionally pass in a test reporter to log further metrics + grafnaUrlProvider testreporters.GrafanaURLProvider, + client blockchain.EVMClient, +) error { + l := logging.GetTestLogger(t) + var err error + if err = testreporters.SendReport(t, namespace, "./", optionalTestReporter, grafnaUrlProvider); err != nil { + l.Warn().Err(err).Msg("Error writing test report") + } + // Delete all jobs to stop depleting the funds + err = DeleteAllJobs(pluginNodes) + if err != nil { + l.Warn().Msgf("Error deleting jobs %+v", err) + } + + if err = ReturnFunds(pluginNodes, client); err != nil { + l.Error().Err(err).Str("Namespace", namespace). + Msg("Error attempting to return funds from plugin nodes to network's default wallet. " + + "Environment is left running so you can try manually!") + } + return err +} + +func DeleteAllJobs(pluginNodes []*client.PluginK8sClient) error { + for _, node := range pluginNodes { + if node == nil { + return fmt.Errorf("found a nil plugin node in the list of plugin nodes while tearing down: %v", pluginNodes) + } + jobs, _, err := node.ReadJobs() + if err != nil { + return fmt.Errorf("error reading jobs from plugin node, err: %w", err) + } + for _, maps := range jobs.Data { + if _, ok := maps["id"]; !ok { + return fmt.Errorf("error reading job id from plugin node's jobs %+v", jobs.Data) + } + id := maps["id"].(string) + _, err := node.DeleteJob(id) + if err != nil { + return fmt.Errorf("error deleting job from plugin node, err: %w", err) + } + } + } + return nil +} + +// ReturnFunds attempts to return all the funds from the plugin nodes to the network's default address +// all from a remote, k8s style environment +func ReturnFunds(pluginNodes []*client.PluginK8sClient, blockchainClient blockchain.EVMClient) error { + if blockchainClient == nil { + return fmt.Errorf("blockchain client is nil, unable to return funds from plugin nodes") + } + log.Info().Msg("Attempting to return Plugin node funds to default network wallets") + if blockchainClient.NetworkSimulated() { + log.Info().Str("Network Name", blockchainClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return.") + return nil + } + + for _, pluginNode := range pluginNodes { + fundedKeys, err := pluginNode.ExportEVMKeysForChain(blockchainClient.GetChainID().String()) + if err != nil { + return err + } + for _, key := range fundedKeys { + keyToDecrypt, err := json.Marshal(key) + if err != nil { + return err + } + // This can take up a good bit of RAM and time. When running on the remote-test-runner, this can lead to OOM + // issues. So we avoid running in parallel; slower, but safer. + decryptedKey, err := keystore.DecryptKey(keyToDecrypt, client.PluginKeyPassword) + if err != nil { + return err + } + err = blockchainClient.ReturnFunds(decryptedKey.PrivateKey) + if err != nil { + log.Error().Err(err).Str("Address", fundedKeys[0].Address).Msg("Error returning funds from Plugin node") + } + } + } + return blockchainClient.WaitForEvents() +} + +// FundAddresses will fund a list of addresses with an amount of native currency +func FundAddresses(blockchain blockchain.EVMClient, amount *big.Float, addresses ...string) error { + for _, address := range addresses { + toAddr := common.HexToAddress(address) + gasEstimates, err := blockchain.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + if err := blockchain.Fund(address, amount, gasEstimates); err != nil { + return err + } + } + return blockchain.WaitForEvents() +} + +// EncodeOnChainExternalJobID encodes external job uuid to on-chain representation +func EncodeOnChainExternalJobID(jobID uuid.UUID) [32]byte { + var ji [32]byte + copy(ji[:], strings.Replace(jobID.String(), "-", "", 4)) + return ji +} + +// UpgradePluginNodeVersions upgrades all Plugin nodes to a new version, and then runs the test environment +// to apply the upgrades +func UpgradePluginNodeVersions( + testEnvironment *environment.Environment, + newImage, newVersion string, + nodes ...*client.PluginK8sClient, +) error { + if newImage == "" || newVersion == "" { + return errors.New("New image and new version is needed to upgrade the node") + } + for _, node := range nodes { + if err := node.UpgradeVersion(testEnvironment, newImage, newVersion); err != nil { + return err + } + } + err := testEnvironment.RunUpdated(len(nodes)) + if err != nil { // Run the new environment and wait for changes to show + return err + } + return client.ReconnectPluginNodes(testEnvironment, nodes) +} + +func DeployPLIToken(cd contracts.ContractDeployer) (contracts.LinkToken, error) { + linkToken, err := cd.DeployLinkTokenContract() + if err != nil { + return nil, err + } + return linkToken, err +} + +func DeployMockETHLinkFeed(cd contracts.ContractDeployer, answer *big.Int) (contracts.MockETHPLIFeed, error) { + mockETHPLIFeed, err := cd.DeployMockETHPLIFeed(answer) + if err != nil { + return nil, err + } + return mockETHPLIFeed, err +} + +// todo - move to CTF +func GenerateWallet() (common.Address, error) { + privateKey, err := crypto.GenerateKey() + if err != nil { + return common.Address{}, err + } + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return common.Address{}, fmt.Errorf("cannot assert type: publicKey is not of type *ecdsa.PublicKey") + } + return crypto.PubkeyToAddress(*publicKeyECDSA), nil +} + +// todo - move to CTF +func FundAddress(client blockchain.EVMClient, sendingKey string, fundingToSendEth *big.Float) error { + address := common.HexToAddress(sendingKey) + gasEstimates, err := client.EstimateGas(ethereum.CallMsg{ + To: &address, + }) + if err != nil { + return err + } + err = client.Fund(sendingKey, fundingToSendEth, gasEstimates) + if err != nil { + return err + } + return nil +} + +// todo - move to CTF +func GetTxFromAddress(tx *types.Transaction) (string, error) { + from, err := types.Sender(types.LatestSignerForChainID(tx.ChainId()), tx) + return from.String(), err +} + +// todo - move to CTF +func GetTxByHash(ctx context.Context, client blockchain.EVMClient, hash common.Hash) (*types.Transaction, bool, error) { + return client.(*blockchain.EthereumMultinodeClient). + DefaultClient.(*blockchain.EthereumClient). + Client. + TransactionByHash(ctx, hash) +} + +// todo - move to CTF +func DecodeTxInputData(abiString string, data []byte) (map[string]interface{}, error) { + jsonABI, err := abi.JSON(strings.NewReader(abiString)) + if err != nil { + return nil, err + } + methodSigData := data[:4] + inputsSigData := data[4:] + method, err := jsonABI.MethodById(methodSigData) + if err != nil { + return nil, err + } + inputsMap := make(map[string]interface{}) + if err := method.Inputs.UnpackIntoMap(inputsMap, inputsSigData); err != nil { + return nil, err + } + return inputsMap, nil +} + +// todo - move to EVMClient +func WaitForBlockNumberToBe( + waitForBlockNumberToBe uint64, + client blockchain.EVMClient, + wg *sync.WaitGroup, + timeout time.Duration, + t testing.TB, +) (uint64, error) { + blockNumberChannel := make(chan uint64) + errorChannel := make(chan error) + testContext, testCancel := context.WithTimeout(context.Background(), timeout) + defer testCancel() + + ticker := time.NewTicker(time.Second * 1) + var blockNumber uint64 + for { + select { + case <-testContext.Done(): + ticker.Stop() + wg.Done() + return blockNumber, + fmt.Errorf("timeout waiting for Block Number to be: %d. Last recorded block number was: %d", + waitForBlockNumberToBe, blockNumber) + case <-ticker.C: + go func() { + currentBlockNumber, err := client.LatestBlockNumber(testcontext.Get(t)) + if err != nil { + errorChannel <- err + } + blockNumberChannel <- currentBlockNumber + }() + case blockNumber = <-blockNumberChannel: + if blockNumber == waitForBlockNumberToBe { + ticker.Stop() + wg.Done() + return blockNumber, nil + } + case err := <-errorChannel: + ticker.Stop() + wg.Done() + return 0, err + } + } +} diff --git a/integration-tests/actions/actions_local.go b/integration-tests/actions/actions_local.go new file mode 100644 index 00000000..b5edc496 --- /dev/null +++ b/integration-tests/actions/actions_local.go @@ -0,0 +1,20 @@ +// Package actions enables common plugin interactions +package actions + +import ( + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" +) + +// UpgradePluginNodeVersions upgrades all Plugin nodes to a new version, and then runs the test environment +// to apply the upgrades +func UpgradePluginNodeVersionsLocal( + newImage, newVersion string, + nodes ...*test_env.ClNode, +) error { + for _, node := range nodes { + if err := node.UpgradeVersion(newImage, newVersion); err != nil { + return err + } + } + return nil +} diff --git a/integration-tests/actions/automation_ocr_helpers.go b/integration-tests/actions/automation_ocr_helpers.go new file mode 100644 index 00000000..f2d04316 --- /dev/null +++ b/integration-tests/actions/automation_ocr_helpers.go @@ -0,0 +1,375 @@ +package actions + +//revive:disable:dot-imports +import ( + "encoding/json" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/stretchr/testify/require" + "gopkg.in/guregu/null.v4" + + ocr2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocr3 "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers20config "github.com/goplugin/plugin-automation/pkg/v2/config" + ocr2keepers30config "github.com/goplugin/plugin-automation/pkg/v3/config" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" +) + +func BuildAutoOCR2ConfigVars( + t *testing.T, + pluginNodes []*client.PluginK8sClient, + registryConfig contracts.KeeperRegistrySettings, + registrar string, + deltaStage time.Duration, +) (contracts.OCRv2Config, error) { + return BuildAutoOCR2ConfigVarsWithKeyIndex(t, pluginNodes, registryConfig, registrar, deltaStage, 0, common.Address{}) +} + +func BuildAutoOCR2ConfigVarsWithKeyIndex( + t *testing.T, + pluginNodes []*client.PluginK8sClient, + registryConfig contracts.KeeperRegistrySettings, + registrar string, + deltaStage time.Duration, + keyIndex int, + registryOwnerAddress common.Address, +) (contracts.OCRv2Config, error) { + l := logging.GetTestLogger(t) + S, oracleIdentities, err := GetOracleIdentitiesWithKeyIndex(pluginNodes, keyIndex) + if err != nil { + return contracts.OCRv2Config{}, err + } + + var offC []byte + var signerOnchainPublicKeys []types.OnchainPublicKey + var transmitterAccounts []types.Account + var f uint8 + var offchainConfigVersion uint64 + var offchainConfig []byte + + if registryConfig.RegistryVersion == ethereum.RegistryVersion_2_1 { + offC, err = json.Marshal(ocr2keepers30config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 3600000, // Intentionally set to be higher than in prod for testing purpose + GasLimitPerReport: 5_300_000, + GasOverheadPerUpkeep: 300_000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 10, + }) + if err != nil { + return contracts.OCRv2Config{}, err + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr3.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 15*time.Second, // deltaResend time.Duration, + 500*time.Millisecond, // deltaInitial time.Duration, + 1000*time.Millisecond, // deltaRound time.Duration, + 200*time.Millisecond, // deltaGrace time.Duration, + 300*time.Millisecond, // deltaCertifiedCommitRequest time.Duration + deltaStage, // deltaStage time.Duration, + 24, // rMax uint64, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 20*time.Millisecond, // maxDurationObservation time.Duration, // good to here + 1200*time.Millisecond, // maxDurationShouldAcceptAttestedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return contracts.OCRv2Config{}, err + } + } else { + offC, err = json.Marshal(ocr2keepers20config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 3600000, // Intentionally set to be higher than in prod for testing purpose + GasLimitPerReport: 5_300_000, + GasOverheadPerUpkeep: 300_000, + SamplingJobDuration: 3000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 1, + }) + if err != nil { + return contracts.OCRv2Config{}, err + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr2.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 15*time.Second, // deltaResend time.Duration, + 3000*time.Millisecond, // deltaRound time.Duration, + 200*time.Millisecond, // deltaGrace time.Duration, + deltaStage, // deltaStage time.Duration, + 24, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 20*time.Millisecond, // maxDurationObservation time.Duration, + 1200*time.Millisecond, // maxDurationReport time.Duration, + 20*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return contracts.OCRv2Config{}, err + } + } + + var signers []common.Address + for _, signer := range signerOnchainPublicKeys { + require.Equal(t, 20, len(signer), "OnChainPublicKey '%v' has wrong length for address", signer) + signers = append(signers, common.BytesToAddress(signer)) + } + + var transmitters []common.Address + for _, transmitter := range transmitterAccounts { + require.True(t, common.IsHexAddress(string(transmitter)), "TransmitAccount '%s' is not a valid Ethereum address", string(transmitter)) + transmitters = append(transmitters, common.HexToAddress(string(transmitter))) + } + + onchainConfig, err := registryConfig.EncodeOnChainConfig(registrar, registryOwnerAddress) + if err != nil { + return contracts.OCRv2Config{}, err + } + + l.Info().Msg("Done building OCR config") + return contracts.OCRv2Config{ + Signers: signers, + Transmitters: transmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + }, nil +} + +// CreateOCRKeeperJobs bootstraps the first node and to the other nodes sends ocr jobs +func CreateOCRKeeperJobs( + t *testing.T, + pluginNodes []*client.PluginK8sClient, + registryAddr string, + chainID int64, + keyIndex int, + registryVersion ethereum.KeeperRegistryVersion, +) { + l := logging.GetTestLogger(t) + bootstrapNode := pluginNodes[0] + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + require.NoError(t, err, "Shouldn't fail reading P2P keys from bootstrap node") + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + + var contractVersion string + if registryVersion == ethereum.RegistryVersion_2_1 { + contractVersion = "v2.1" + } else if registryVersion == ethereum.RegistryVersion_2_0 { + contractVersion = "v2.0" + } else { + require.FailNow(t, "v2.0 and v2.1 are the only supported versions") + } + + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: "ocr2 bootstrap node " + registryAddr, + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: registryAddr, + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + }, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + require.NoError(t, err, "Shouldn't fail creating bootstrap job on bootstrap node") + // TODO: Use service name returned by plugin-env once that is available + P2Pv2Bootstrapper := fmt.Sprintf("%s@%s-node-1:%d", bootstrapP2PId, bootstrapNode.Name(), 6690) + + for nodeIndex := 1; nodeIndex < len(pluginNodes); nodeIndex++ { + nodeTransmitterAddress, err := pluginNodes[nodeIndex].EthAddresses() + require.NoError(t, err, "Shouldn't fail getting primary ETH address from OCR node %d", nodeIndex+1) + nodeOCRKeys, err := pluginNodes[nodeIndex].MustReadOCR2Keys() + require.NoError(t, err, "Shouldn't fail getting OCR keys from OCR node %d", nodeIndex+1) + var nodeOCRKeyId []string + for _, key := range nodeOCRKeys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + nodeOCRKeyId = append(nodeOCRKeyId, key.ID) + break + } + } + + autoOCR2JobSpec := client.OCR2TaskJobSpec{ + Name: "ocr2 " + registryAddr, + JobType: "offchainreporting2", + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "ocr2automation", + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + PluginConfig: map[string]interface{}{ + "mercuryCredentialName": "\"cred1\"", + "contractVersion": "\"" + contractVersion + "\"", + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + ContractID: registryAddr, // registryAddr + OCRKeyBundleID: null.StringFrom(nodeOCRKeyId[0]), // get node ocr2config.ID + TransmitterID: null.StringFrom(nodeTransmitterAddress[keyIndex]), // node addr + P2PV2Bootstrappers: pq.StringArray{P2Pv2Bootstrapper}, // bootstrap node key and address @bootstrap:8000 + }, + } + + _, err = pluginNodes[nodeIndex].MustCreateJob(&autoOCR2JobSpec) + require.NoError(t, err, "Shouldn't fail creating OCR Task job on OCR node %d err: %+v", nodeIndex+1, err) + } + l.Info().Msg("Done creating OCR automation jobs") +} + +// DeployAutoOCRRegistryAndRegistrar registry and registrar +func DeployAutoOCRRegistryAndRegistrar( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar) { + registry := deployRegistry(t, registryVersion, registrySettings, contractDeployer, client, linkToken) + registrar := deployRegistrar(t, registryVersion, registry, linkToken, contractDeployer, client) + + return registry, registrar +} + +func DeployConsumers(t *testing.T, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, linkToken contracts.LinkToken, contractDeployer contracts.ContractDeployer, client blockchain.EVMClient, numberOfUpkeeps int, linkFundsForEachUpkeep *big.Int, upkeepGasLimit uint32, isLogTrigger bool, isMercury bool) ([]contracts.KeeperConsumer, []*big.Int) { + upkeeps := DeployKeeperConsumers(t, contractDeployer, client, numberOfUpkeeps, isLogTrigger, isMercury) + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts( + t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, isLogTrigger, isMercury, + ) + return upkeeps, upkeepIds +} + +func DeployPerformanceConsumers( + t *testing.T, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfUpkeeps int, + linkFundsForEachUpkeep *big.Int, + upkeepGasLimit uint32, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) ([]contracts.KeeperConsumerPerformance, []*big.Int) { + upkeeps := DeployKeeperConsumersPerformance( + t, contractDeployer, client, numberOfUpkeeps, blockRange, blockInterval, checkGasToBurn, performGasToBurn, + ) + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts(t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, false, false) + return upkeeps, upkeepIds +} + +func DeployPerformDataCheckerConsumers( + t *testing.T, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfUpkeeps int, + linkFundsForEachUpkeep *big.Int, + upkeepGasLimit uint32, + expectedData []byte, +) ([]contracts.KeeperPerformDataChecker, []*big.Int) { + upkeeps := DeployPerformDataChecker(t, contractDeployer, client, numberOfUpkeeps, expectedData) + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts(t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, false, false) + return upkeeps, upkeepIds +} + +func deployRegistrar( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registry contracts.KeeperRegistry, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, +) contracts.KeeperRegistrar { + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar, err := contractDeployer.DeployKeeperRegistrar(registryVersion, linkToken.Address(), registrarSettings) + require.NoError(t, err, "Deploying KeeperRegistrar contract shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for registrar to deploy") + return registrar +} + +func deployRegistry( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + linkToken contracts.LinkToken, +) contracts.KeeperRegistry { + ef, err := contractDeployer.DeployMockETHPLIFeed(big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for mock feeds to deploy") + + // Deploy the transcoder here, and then set it to the registry + transcoder := DeployUpkeepTranscoder(t, contractDeployer, client) + registry := DeployKeeperRegistry(t, contractDeployer, client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: transcoder.Address(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: registrySettings, + }, + ) + return registry +} diff --git a/integration-tests/actions/automation_ocr_helpers_local.go b/integration-tests/actions/automation_ocr_helpers_local.go new file mode 100644 index 00000000..5fdec682 --- /dev/null +++ b/integration-tests/actions/automation_ocr_helpers_local.go @@ -0,0 +1,262 @@ +package actions + +//revive:disable:dot-imports +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/rs/zerolog" + "gopkg.in/guregu/null.v4" + + ocr2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocr3 "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + ocr2keepers20config "github.com/goplugin/plugin-automation/pkg/v2/config" + ocr2keepers30config "github.com/goplugin/plugin-automation/pkg/v3/config" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" +) + +func BuildAutoOCR2ConfigVarsLocal( + l zerolog.Logger, + pluginNodes []*client.PluginClient, + registryConfig contracts.KeeperRegistrySettings, + registrar string, + deltaStage time.Duration, + registryOwnerAddress common.Address, +) (contracts.OCRv2Config, error) { + return BuildAutoOCR2ConfigVarsWithKeyIndexLocal(l, pluginNodes, registryConfig, registrar, deltaStage, 0, registryOwnerAddress) +} + +func BuildAutoOCR2ConfigVarsWithKeyIndexLocal( + l zerolog.Logger, + pluginNodes []*client.PluginClient, + registryConfig contracts.KeeperRegistrySettings, + registrar string, + deltaStage time.Duration, + keyIndex int, + registryOwnerAddress common.Address, +) (contracts.OCRv2Config, error) { + S, oracleIdentities, err := GetOracleIdentitiesWithKeyIndexLocal(pluginNodes, keyIndex) + if err != nil { + return contracts.OCRv2Config{}, err + } + + var offC []byte + var signerOnchainPublicKeys []types.OnchainPublicKey + var transmitterAccounts []types.Account + var f uint8 + var offchainConfigVersion uint64 + var offchainConfig []byte + + if registryConfig.RegistryVersion == ethereum.RegistryVersion_2_1 { + offC, err = json.Marshal(ocr2keepers30config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 3600000, // Intentionally set to be higher than in prod for testing purpose + GasLimitPerReport: 5_300_000, + GasOverheadPerUpkeep: 300_000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 1, + }) + if err != nil { + return contracts.OCRv2Config{}, err + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr3.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 15*time.Second, // deltaResend time.Duration, + 500*time.Millisecond, // deltaInitial time.Duration, + 1000*time.Millisecond, // deltaRound time.Duration, + 200*time.Millisecond, // deltaGrace time.Duration, + 300*time.Millisecond, // deltaCertifiedCommitRequest time.Duration + deltaStage, // deltaStage time.Duration, + 24, // rMax uint64, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 20*time.Millisecond, // maxDurationObservation time.Duration, // good to here + 1200*time.Millisecond, // maxDurationShouldAcceptAttestedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return contracts.OCRv2Config{}, err + } + } else { + offC, err = json.Marshal(ocr2keepers20config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 3600000, // Intentionally set to be higher than in prod for testing purpose + GasLimitPerReport: 5_300_000, + GasOverheadPerUpkeep: 300_000, + SamplingJobDuration: 3000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 1, + }) + if err != nil { + return contracts.OCRv2Config{}, err + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr2.ContractSetConfigArgsForTests( + 10*time.Second, // deltaProgress time.Duration, + 15*time.Second, // deltaResend time.Duration, + 3000*time.Millisecond, // deltaRound time.Duration, + 200*time.Millisecond, // deltaGrace time.Duration, + deltaStage, // deltaStage time.Duration, + 24, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offC, // reportingPluginConfig []byte, + 20*time.Millisecond, // maxDurationQuery time.Duration, + 20*time.Millisecond, // maxDurationObservation time.Duration, + 1200*time.Millisecond, // maxDurationReport time.Duration, + 20*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 20*time.Millisecond, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // onchainConfig []byte, + ) + if err != nil { + return contracts.OCRv2Config{}, err + } + } + + var signers []common.Address + for _, signer := range signerOnchainPublicKeys { + if len(signer) != 20 { + return contracts.OCRv2Config{}, fmt.Errorf("OnChainPublicKey '%v' has wrong length for address", signer) + } + signers = append(signers, common.BytesToAddress(signer)) + } + + var transmitters []common.Address + for _, transmitter := range transmitterAccounts { + if !common.IsHexAddress(string(transmitter)) { + return contracts.OCRv2Config{}, fmt.Errorf("TransmitAccount '%s' is not a valid Ethereum address", string(transmitter)) + } + transmitters = append(transmitters, common.HexToAddress(string(transmitter))) + } + + onchainConfig, err := registryConfig.EncodeOnChainConfig(registrar, registryOwnerAddress) + if err != nil { + return contracts.OCRv2Config{}, err + } + + l.Info().Msg("Done building OCR config") + return contracts.OCRv2Config{ + Signers: signers, + Transmitters: transmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + }, nil +} + +// CreateOCRKeeperJobs bootstraps the first node and to the other nodes sends ocr jobs +func CreateOCRKeeperJobsLocal( + l zerolog.Logger, + pluginNodes []*client.PluginClient, + registryAddr string, + chainID int64, + keyIndex int, + registryVersion ethereum.KeeperRegistryVersion, +) error { + bootstrapNode := pluginNodes[0] + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + l.Error().Err(err).Msg("Shouldn't fail reading P2P keys from bootstrap node") + return err + } + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + + var contractVersion string + if registryVersion == ethereum.RegistryVersion_2_1 { + contractVersion = "v2.1" + } else if registryVersion == ethereum.RegistryVersion_2_0 { + contractVersion = "v2.0" + } else { + return fmt.Errorf("v2.0 and v2.1 are the only supported versions") + } + + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: "ocr2 bootstrap node " + registryAddr, + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: registryAddr, + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + }, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + l.Error().Err(err).Msg("Shouldn't fail creating bootstrap job on bootstrap node") + return err + } + + P2Pv2Bootstrapper := fmt.Sprintf("%s@%s:%d", bootstrapP2PId, bootstrapNode.InternalIP(), 6690) + for nodeIndex := 1; nodeIndex < len(pluginNodes); nodeIndex++ { + nodeTransmitterAddress, err := pluginNodes[nodeIndex].EthAddresses() + if err != nil { + l.Error().Err(err).Msgf("Shouldn't fail getting primary ETH address from OCR node %d", nodeIndex+1) + return err + } + nodeOCRKeys, err := pluginNodes[nodeIndex].MustReadOCR2Keys() + if err != nil { + l.Error().Err(err).Msgf("Shouldn't fail getting OCR keys from OCR node %d", nodeIndex+1) + return err + } + var nodeOCRKeyId []string + for _, key := range nodeOCRKeys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + nodeOCRKeyId = append(nodeOCRKeyId, key.ID) + break + } + } + + autoOCR2JobSpec := client.OCR2TaskJobSpec{ + Name: "ocr2 " + registryAddr, + JobType: "offchainreporting2", + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "ocr2automation", + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + PluginConfig: map[string]interface{}{ + "mercuryCredentialName": "\"cred1\"", + "contractVersion": "\"" + contractVersion + "\"", + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + ContractID: registryAddr, // registryAddr + OCRKeyBundleID: null.StringFrom(nodeOCRKeyId[0]), // get node ocr2config.ID + TransmitterID: null.StringFrom(nodeTransmitterAddress[keyIndex]), // node addr + P2PV2Bootstrappers: pq.StringArray{P2Pv2Bootstrapper}, // bootstrap node key and address @bootstrap:8000 + }, + } + + _, err = pluginNodes[nodeIndex].MustCreateJob(&autoOCR2JobSpec) + if err != nil { + l.Error().Err(err).Msgf("Shouldn't fail creating OCR Task job on OCR node %d err: %+v", nodeIndex+1, err) + return err + } + + } + l.Info().Msg("Done creating OCR automation jobs") + return nil +} diff --git a/integration-tests/actions/automationv2/actions.go b/integration-tests/actions/automationv2/actions.go new file mode 100644 index 00000000..d3281a7d --- /dev/null +++ b/integration-tests/actions/automationv2/actions.go @@ -0,0 +1,722 @@ +package automationv2 + +import ( + "crypto/ed25519" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocr2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + ocr3 "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "gopkg.in/guregu/null.v4" + + ocr2keepers20config "github.com/goplugin/plugin-automation/pkg/v2/config" + ocr2keepers30config "github.com/goplugin/plugin-automation/pkg/v3/config" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_registrar_wrapper2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + ctfTestEnv "github.com/goplugin/plugin-testing-framework/docker/test_env" +) + +type NodeDetails struct { + P2PId string + TransmitterAddresses []string + OCR2ConfigPublicKey string + OCR2OffchainPublicKey string + OCR2OnChainPublicKey string + OCR2Id string +} + +type AutomationTest struct { + ChainClient blockchain.EVMClient + Deployer contracts.ContractDeployer + + LinkToken contracts.LinkToken + Transcoder contracts.UpkeepTranscoder + EthLinkFeed contracts.MockETHPLIFeed + GasFeed contracts.MockGasFeed + Registry contracts.KeeperRegistry + Registrar contracts.KeeperRegistrar + + RegistrySettings contracts.KeeperRegistrySettings + RegistrarSettings contracts.KeeperRegistrarSettings + PluginConfig ocr2keepers30config.OffchainConfig + PublicConfig ocr3.PublicConfig + UpkeepPrivilegeManager common.Address + UpkeepIDs []*big.Int + + IsOnk8s bool + + PluginNodesk8s []*client.PluginK8sClient + PluginNodes []*client.PluginClient + + DockerEnv *test_env.CLClusterTestEnv + + NodeDetails []NodeDetails + DefaultP2Pv2Bootstrapper string + MercuryCredentialName string + TransmitterKeyIndex int +} + +type UpkeepConfig struct { + UpkeepName string + EncryptedEmail []byte + UpkeepContract common.Address + GasLimit uint32 + AdminAddress common.Address + TriggerType uint8 + CheckData []byte + TriggerConfig []byte + OffchainConfig []byte + FundingAmount *big.Int +} + +func NewAutomationTestK8s( + chainClient blockchain.EVMClient, + deployer contracts.ContractDeployer, + pluginNodes []*client.PluginK8sClient, +) *AutomationTest { + return &AutomationTest{ + ChainClient: chainClient, + Deployer: deployer, + PluginNodesk8s: pluginNodes, + IsOnk8s: true, + TransmitterKeyIndex: 0, + UpkeepPrivilegeManager: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + } +} + +func NewAutomationTestDocker( + chainClient blockchain.EVMClient, + deployer contracts.ContractDeployer, + pluginNodes []*client.PluginClient, +) *AutomationTest { + return &AutomationTest{ + ChainClient: chainClient, + Deployer: deployer, + PluginNodes: pluginNodes, + IsOnk8s: false, + TransmitterKeyIndex: 0, + UpkeepPrivilegeManager: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + } +} + +func (a *AutomationTest) SetIsOnk8s(flag bool) { + a.IsOnk8s = flag +} + +func (a *AutomationTest) SetMercuryCredentialName(name string) { + a.MercuryCredentialName = name +} + +func (a *AutomationTest) SetTransmitterKeyIndex(index int) { + a.TransmitterKeyIndex = index +} + +func (a *AutomationTest) SetUpkeepPrivilegeManager(address string) { + a.UpkeepPrivilegeManager = common.HexToAddress(address) +} + +func (a *AutomationTest) SetDockerEnv(env *test_env.CLClusterTestEnv) { + a.DockerEnv = env +} + +func (a *AutomationTest) DeployPLI() error { + linkToken, err := a.Deployer.DeployLinkTokenContract() + if err != nil { + return err + } + a.LinkToken = linkToken + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for link token contract to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadPLI(address string) error { + linkToken, err := a.Deployer.LoadLinkToken(common.HexToAddress(address)) + if err != nil { + return err + } + a.LinkToken = linkToken + return nil +} + +func (a *AutomationTest) DeployTranscoder() error { + transcoder, err := a.Deployer.DeployUpkeepTranscoder() + if err != nil { + return err + } + a.Transcoder = transcoder + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for transcoder contract to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadTranscoder(address string) error { + transcoder, err := a.Deployer.LoadUpkeepTranscoder(common.HexToAddress(address)) + if err != nil { + return err + } + a.Transcoder = transcoder + return nil +} + +func (a *AutomationTest) DeployEthLinkFeed() error { + ethLinkFeed, err := a.Deployer.DeployMockETHPLIFeed(a.RegistrySettings.FallbackLinkPrice) + if err != nil { + return err + } + a.EthLinkFeed = ethLinkFeed + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for Mock ETH PLI feed to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadEthLinkFeed(address string) error { + ethLinkFeed, err := a.Deployer.LoadETHPLIFeed(common.HexToAddress(address)) + if err != nil { + return err + } + a.EthLinkFeed = ethLinkFeed + return nil +} + +func (a *AutomationTest) DeployGasFeed() error { + gasFeed, err := a.Deployer.DeployMockGasFeed(a.RegistrySettings.FallbackGasPrice) + if err != nil { + return err + } + a.GasFeed = gasFeed + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for mock gas feed to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadEthGasFeed(address string) error { + gasFeed, err := a.Deployer.LoadGasFeed(common.HexToAddress(address)) + if err != nil { + return err + } + a.GasFeed = gasFeed + return nil +} + +func (a *AutomationTest) DeployRegistry() error { + registryOpts := &contracts.KeeperRegistryOpts{ + RegistryVersion: a.RegistrySettings.RegistryVersion, + LinkAddr: a.LinkToken.Address(), + ETHFeedAddr: a.EthLinkFeed.Address(), + GasFeedAddr: a.GasFeed.Address(), + TranscoderAddr: a.Transcoder.Address(), + RegistrarAddr: utils.ZeroAddress.Hex(), + Settings: a.RegistrySettings, + } + registry, err := a.Deployer.DeployKeeperRegistry(registryOpts) + if err != nil { + return err + } + a.Registry = registry + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for registry contract to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadRegistry(address string) error { + registry, err := a.Deployer.LoadKeeperRegistry(common.HexToAddress(address), a.RegistrySettings.RegistryVersion) + if err != nil { + return err + } + a.Registry = registry + return nil +} + +func (a *AutomationTest) DeployRegistrar() error { + if a.Registry == nil { + return fmt.Errorf("registry must be deployed or loaded before registrar") + } + a.RegistrarSettings.RegistryAddr = a.Registry.Address() + registrar, err := a.Deployer.DeployKeeperRegistrar(a.RegistrySettings.RegistryVersion, a.LinkToken.Address(), a.RegistrarSettings) + if err != nil { + return err + } + a.Registrar = registrar + err = a.ChainClient.WaitForEvents() + if err != nil { + return errors.Join(err, fmt.Errorf("failed waiting for registrar contract to deploy")) + } + return nil +} + +func (a *AutomationTest) LoadRegistrar(address string) error { + if a.Registry == nil { + return fmt.Errorf("registry must be deployed or loaded before registrar") + } + a.RegistrarSettings.RegistryAddr = a.Registry.Address() + registrar, err := a.Deployer.LoadKeeperRegistrar(common.HexToAddress(address), a.RegistrySettings.RegistryVersion) + if err != nil { + return err + } + a.Registrar = registrar + return nil +} + +func (a *AutomationTest) CollectNodeDetails() error { + var ( + nodes []*client.PluginClient + ) + if a.IsOnk8s { + for _, node := range a.PluginNodesk8s[:] { + nodes = append(nodes, node.PluginClient) + } + a.PluginNodes = nodes + } else { + nodes = a.PluginNodes[:] + } + + nodeDetails := make([]NodeDetails, 0) + + for i, node := range nodes { + nodeDetail := NodeDetails{} + P2PIds, err := node.MustReadP2PKeys() + if err != nil { + return errors.Join(err, fmt.Errorf("failed to read P2P keys from node %d", i)) + } + nodeDetail.P2PId = P2PIds.Data[0].Attributes.PeerID + + OCR2Keys, err := node.MustReadOCR2Keys() + if err != nil { + return errors.Join(err, fmt.Errorf("failed to read OCR2 keys from node %d", i)) + } + for _, key := range OCR2Keys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + nodeDetail.OCR2ConfigPublicKey = key.Attributes.ConfigPublicKey + nodeDetail.OCR2OffchainPublicKey = key.Attributes.OffChainPublicKey + nodeDetail.OCR2OnChainPublicKey = key.Attributes.OnChainPublicKey + nodeDetail.OCR2Id = key.ID + break + } + } + + TransmitterKeys, err := node.EthAddressesForChain(a.ChainClient.GetChainID().String()) + nodeDetail.TransmitterAddresses = make([]string, 0) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to read Transmitter keys from node %d", i)) + } + nodeDetail.TransmitterAddresses = append(nodeDetail.TransmitterAddresses, TransmitterKeys...) + nodeDetails = append(nodeDetails, nodeDetail) + } + a.NodeDetails = nodeDetails + + if a.IsOnk8s { + a.DefaultP2Pv2Bootstrapper = fmt.Sprintf("%s@%s-node-1:%d", a.NodeDetails[0].P2PId, a.PluginNodesk8s[0].Name(), 6690) + } else { + a.DefaultP2Pv2Bootstrapper = fmt.Sprintf("%s@%s:%d", a.NodeDetails[0].P2PId, a.PluginNodes[0].InternalIP(), 6690) + } + return nil +} + +func (a *AutomationTest) AddBootstrapJob() error { + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: "ocr2 bootstrap node " + a.Registry.Address(), + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: a.Registry.Address(), + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(a.ChainClient.GetChainID().Int64()), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + }, + } + _, err := a.PluginNodes[0].MustCreateJob(bootstrapSpec) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to create bootstrap job on bootstrap node")) + } + return nil +} + +func (a *AutomationTest) AddAutomationJobs() error { + var contractVersion string + if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_1 { + contractVersion = "v2.1" + } else if a.RegistrySettings.RegistryVersion == ethereum.RegistryVersion_2_0 { + contractVersion = "v2.0" + } else { + return fmt.Errorf("v2.0 and v2.1 are the only supported versions") + } + for i := 1; i < len(a.PluginNodes); i++ { + autoOCR2JobSpec := client.OCR2TaskJobSpec{ + Name: "automation-" + contractVersion + "-" + a.Registry.Address(), + JobType: "offchainreporting2", + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "ocr2automation", + ContractID: a.Registry.Address(), + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(a.ChainClient.GetChainID().Int64()), + }, + PluginConfig: map[string]interface{}{ + "mercuryCredentialName": "\"" + a.MercuryCredentialName + "\"", + "contractVersion": "\"" + contractVersion + "\"", + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + TransmitterID: null.StringFrom(a.NodeDetails[i].TransmitterAddresses[a.TransmitterKeyIndex]), + P2PV2Bootstrappers: pq.StringArray{a.DefaultP2Pv2Bootstrapper}, + OCRKeyBundleID: null.StringFrom(a.NodeDetails[i].OCR2Id), + }, + } + _, err := a.PluginNodes[i].MustCreateJob(&autoOCR2JobSpec) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to create OCR2 job on node %d", i+1)) + } + } + return nil +} + +func (a *AutomationTest) SetConfigOnRegistry() error { + donNodes := a.NodeDetails[1:] + S := make([]int, len(donNodes)) + oracleIdentities := make([]confighelper.OracleIdentityExtra, len(donNodes)) + var offC []byte + var signerOnchainPublicKeys []types.OnchainPublicKey + var transmitterAccounts []types.Account + var f uint8 + var offchainConfigVersion uint64 + var offchainConfig []byte + sharedSecretEncryptionPublicKeys := make([]types.ConfigEncryptionPublicKey, len(donNodes)) + eg := &errgroup.Group{} + for i, donNode := range donNodes { + index, pluginNode := i, donNode + eg.Go(func() error { + offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(pluginNode.OCR2OffchainPublicKey, "ocr2off_evm_")) + if err != nil { + return err + } + + offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(offchainPkBytesFixed[:], offchainPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + configPkBytes, err := hex.DecodeString(strings.TrimPrefix(pluginNode.OCR2ConfigPublicKey, "ocr2cfg_evm_")) + if err != nil { + return err + } + + configPkBytesFixed := [ed25519.PublicKeySize]byte{} + n = copy(configPkBytesFixed[:], configPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(pluginNode.OCR2OnChainPublicKey, "ocr2on_evm_")) + if err != nil { + return err + } + + sharedSecretEncryptionPublicKeys[index] = configPkBytesFixed + oracleIdentities[index] = confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPkBytes, + OffchainPublicKey: offchainPkBytesFixed, + PeerID: pluginNode.P2PId, + TransmitAccount: types.Account(pluginNode.TransmitterAddresses[a.TransmitterKeyIndex]), + }, + ConfigEncryptionPublicKey: configPkBytesFixed, + } + S[index] = 1 + return nil + }) + } + err := eg.Wait() + if err != nil { + return errors.Join(err, fmt.Errorf("failed to build oracle identities")) + } + + switch a.RegistrySettings.RegistryVersion { + case ethereum.RegistryVersion_2_0: + offC, err = json.Marshal(ocr2keepers20config.OffchainConfig{ + TargetProbability: a.PluginConfig.TargetProbability, + TargetInRounds: a.PluginConfig.TargetInRounds, + PerformLockoutWindow: a.PluginConfig.PerformLockoutWindow, + GasLimitPerReport: a.PluginConfig.GasLimitPerReport, + GasOverheadPerUpkeep: a.PluginConfig.GasOverheadPerUpkeep, + MinConfirmations: a.PluginConfig.MinConfirmations, + MaxUpkeepBatchSize: a.PluginConfig.MaxUpkeepBatchSize, + }) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to marshal plugin config")) + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr2.ContractSetConfigArgsForTests( + a.PublicConfig.DeltaProgress, a.PublicConfig.DeltaResend, + a.PublicConfig.DeltaRound, a.PublicConfig.DeltaGrace, + a.PublicConfig.DeltaStage, uint8(a.PublicConfig.RMax), + S, oracleIdentities, offC, + a.PublicConfig.MaxDurationQuery, a.PublicConfig.MaxDurationObservation, + 1200*time.Millisecond, + a.PublicConfig.MaxDurationShouldAcceptAttestedReport, + a.PublicConfig.MaxDurationShouldTransmitAcceptedReport, + a.PublicConfig.F, a.PublicConfig.OnchainConfig, + ) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to build config args")) + } + + case ethereum.RegistryVersion_2_1: + offC, err = json.Marshal(ocr2keepers30config.OffchainConfig{ + TargetProbability: a.PluginConfig.TargetProbability, + TargetInRounds: a.PluginConfig.TargetInRounds, + PerformLockoutWindow: a.PluginConfig.PerformLockoutWindow, + GasLimitPerReport: a.PluginConfig.GasLimitPerReport, + GasOverheadPerUpkeep: a.PluginConfig.GasOverheadPerUpkeep, + MinConfirmations: a.PluginConfig.MinConfirmations, + MaxUpkeepBatchSize: a.PluginConfig.MaxUpkeepBatchSize, + }) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to marshal plugin config")) + } + + signerOnchainPublicKeys, transmitterAccounts, f, _, offchainConfigVersion, offchainConfig, err = ocr3.ContractSetConfigArgsForTests( + a.PublicConfig.DeltaProgress, a.PublicConfig.DeltaResend, a.PublicConfig.DeltaInitial, + a.PublicConfig.DeltaRound, a.PublicConfig.DeltaGrace, a.PublicConfig.DeltaCertifiedCommitRequest, + a.PublicConfig.DeltaStage, a.PublicConfig.RMax, + S, oracleIdentities, offC, + a.PublicConfig.MaxDurationQuery, a.PublicConfig.MaxDurationObservation, + a.PublicConfig.MaxDurationShouldAcceptAttestedReport, + a.PublicConfig.MaxDurationShouldTransmitAcceptedReport, + a.PublicConfig.F, a.PublicConfig.OnchainConfig, + ) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to build config args")) + } + default: + return fmt.Errorf("v2.0 and v2.1 are the only supported versions") + } + + var signers []common.Address + for _, signer := range signerOnchainPublicKeys { + if len(signer) != 20 { + return fmt.Errorf("OnChainPublicKey '%v' has wrong length for address", signer) + } + signers = append(signers, common.BytesToAddress(signer)) + } + + var transmitters []common.Address + for _, transmitter := range transmitterAccounts { + if !common.IsHexAddress(string(transmitter)) { + return fmt.Errorf("TransmitAccount '%s' is not a valid Ethereum address", string(transmitter)) + } + transmitters = append(transmitters, common.HexToAddress(string(transmitter))) + } + + onchainConfig, err := a.RegistrySettings.EncodeOnChainConfig(a.Registrar.Address(), a.UpkeepPrivilegeManager) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to encode onchain config")) + } + + ocrConfig := contracts.OCRv2Config{ + Signers: signers, + Transmitters: transmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } + + err = a.Registry.SetConfig(a.RegistrySettings, ocrConfig) + if err != nil { + return errors.Join(err, fmt.Errorf("failed to set config on registry")) + } + return nil +} + +func (a *AutomationTest) RegisterUpkeeps(upkeepConfigs []UpkeepConfig) ([]common.Hash, error) { + var registrarABI *abi.ABI + var err error + var registrationRequest []byte + registrationTxHashes := make([]common.Hash, 0) + + for _, upkeepConfig := range upkeepConfigs { + switch a.RegistrySettings.RegistryVersion { + case ethereum.RegistryVersion_2_0: + registrarABI, err = keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.GetAbi() + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to get registrar abi")) + } + registrationRequest, err = registrarABI.Pack( + "register", upkeepConfig.UpkeepName, upkeepConfig.EncryptedEmail, + upkeepConfig.UpkeepContract, upkeepConfig.GasLimit, upkeepConfig.AdminAddress, + upkeepConfig.CheckData, + upkeepConfig.OffchainConfig, upkeepConfig.FundingAmount, + common.HexToAddress(a.ChainClient.GetDefaultWallet().Address())) + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to pack registrar request")) + } + case ethereum.RegistryVersion_2_1: + registrarABI, err = automation_registrar_wrapper2_1.AutomationRegistrarMetaData.GetAbi() + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to get registrar abi")) + } + registrationRequest, err = registrarABI.Pack( + "register", upkeepConfig.UpkeepName, upkeepConfig.EncryptedEmail, + upkeepConfig.UpkeepContract, upkeepConfig.GasLimit, upkeepConfig.AdminAddress, + upkeepConfig.TriggerType, upkeepConfig.CheckData, upkeepConfig.TriggerConfig, + upkeepConfig.OffchainConfig, upkeepConfig.FundingAmount, + common.HexToAddress(a.ChainClient.GetDefaultWallet().Address())) + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to pack registrar request")) + } + default: + return nil, fmt.Errorf("v2.0 and v2.1 are the only supported versions") + } + tx, err := a.LinkToken.TransferAndCall(a.Registrar.Address(), upkeepConfig.FundingAmount, registrationRequest) + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to register upkeep")) + } + registrationTxHashes = append(registrationTxHashes, tx.Hash()) + } + return registrationTxHashes, nil +} + +func (a *AutomationTest) ConfirmUpkeepsRegistered(registrationTxHashes []common.Hash) ([]*big.Int, error) { + upkeepIds := make([]*big.Int, 0) + for _, txHash := range registrationTxHashes { + receipt, err := a.ChainClient.GetTxReceipt(txHash) + if err != nil { + return nil, errors.Join(err, fmt.Errorf("failed to confirm upkeep registration")) + } + var upkeepId *big.Int + for _, rawLog := range receipt.Logs { + parsedUpkeepId, err := a.Registry.ParseUpkeepIdFromRegisteredLog(rawLog) + if err == nil { + upkeepId = parsedUpkeepId + break + } + } + if upkeepId == nil { + return nil, fmt.Errorf("failed to parse upkeep id from registration receipt") + } + upkeepIds = append(upkeepIds, upkeepId) + } + a.UpkeepIDs = upkeepIds + return upkeepIds, nil +} + +func (a *AutomationTest) AddJobsAndSetConfig(t *testing.T) { + l := logging.GetTestLogger(t) + err := a.AddBootstrapJob() + require.NoError(t, err, "Error adding bootstrap job") + err = a.AddAutomationJobs() + require.NoError(t, err, "Error adding automation jobs") + + l.Debug(). + Interface("Plugin Config", a.PluginConfig). + Interface("Public Config", a.PublicConfig). + Interface("Registry Settings", a.RegistrySettings). + Interface("Registrar Settings", a.RegistrarSettings). + Msg("Configuring registry") + err = a.SetConfigOnRegistry() + require.NoError(t, err, "Error setting config on registry") + l.Info().Str("Registry Address", a.Registry.Address()).Msg("Successfully setConfig on registry") +} + +func (a *AutomationTest) SetupMercuryMock(t *testing.T, imposters []ctfTestEnv.KillgraveImposter) { + if a.IsOnk8s { + t.Error("mercury mock is not supported on k8s") + } + if a.DockerEnv == nil { + t.Error("docker env is not set") + } + err := a.DockerEnv.MockAdapter.AddImposter(imposters) + if err != nil { + require.NoError(t, err, "Error adding mock imposter") + } +} + +func (a *AutomationTest) SetupAutomationDeployment(t *testing.T) { + l := logging.GetTestLogger(t) + err := a.CollectNodeDetails() + require.NoError(t, err, "Error collecting node details") + l.Info().Msg("Collected Node Details") + l.Debug().Interface("Node Details", a.NodeDetails).Msg("Node Details") + + err = a.DeployPLI() + require.NoError(t, err, "Error deploying link token contract") + + err = a.DeployEthLinkFeed() + require.NoError(t, err, "Error deploying eth link feed contract") + err = a.DeployGasFeed() + require.NoError(t, err, "Error deploying gas feed contract") + + err = a.DeployTranscoder() + require.NoError(t, err, "Error deploying transcoder contract") + + err = a.DeployRegistry() + require.NoError(t, err, "Error deploying registry contract") + err = a.DeployRegistrar() + require.NoError(t, err, "Error deploying registrar contract") + + a.AddJobsAndSetConfig(t) +} + +func (a *AutomationTest) LoadAutomationDeployment(t *testing.T, linkTokenAddress, + ethLinkFeedAddress, gasFeedAddress, transcoderAddress, registryAddress, registrarAddress string) { + l := logging.GetTestLogger(t) + err := a.CollectNodeDetails() + require.NoError(t, err, "Error collecting node details") + l.Info().Msg("Collected Node Details") + l.Debug().Interface("Node Details", a.NodeDetails).Msg("Node Details") + + err = a.LoadPLI(linkTokenAddress) + require.NoError(t, err, "Error loading link token contract") + + err = a.LoadEthLinkFeed(ethLinkFeedAddress) + require.NoError(t, err, "Error loading eth link feed contract") + err = a.LoadEthGasFeed(gasFeedAddress) + require.NoError(t, err, "Error loading gas feed contract") + err = a.LoadTranscoder(transcoderAddress) + require.NoError(t, err, "Error loading transcoder contract") + err = a.LoadRegistry(registryAddress) + require.NoError(t, err, "Error loading registry contract") + err = a.LoadRegistrar(registrarAddress) + require.NoError(t, err, "Error loading registrar contract") + + a.AddJobsAndSetConfig(t) + +} diff --git a/integration-tests/actions/keeper_helpers.go b/integration-tests/actions/keeper_helpers.go new file mode 100644 index 00000000..2f361c06 --- /dev/null +++ b/integration-tests/actions/keeper_helpers.go @@ -0,0 +1,603 @@ +package actions + +import ( + "fmt" + "math" + "math/big" + "strconv" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" +) + +var ZeroAddress = common.Address{} + +func CreateKeeperJobs( + t *testing.T, + pluginNodes []*client.PluginK8sClient, + keeperRegistry contracts.KeeperRegistry, + ocrConfig contracts.OCRv2Config, + evmChainID string, +) { + // Send keeper jobs to registry and plugin nodes + primaryNode := pluginNodes[0] + primaryNodeAddress, err := primaryNode.PrimaryEthAddress() + require.NoError(t, err, "Reading ETH Keys from Plugin Client shouldn't fail") + nodeAddresses, err := PluginNodeAddresses(pluginNodes) + require.NoError(t, err, "Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + nodeAddressesStr, payees := make([]string, 0), make([]string, 0) + for _, cla := range nodeAddresses { + nodeAddressesStr = append(nodeAddressesStr, cla.Hex()) + payees = append(payees, primaryNodeAddress) + } + err = keeperRegistry.SetKeepers(nodeAddressesStr, payees, ocrConfig) + require.NoError(t, err, "Setting keepers in the registry shouldn't fail") + + for _, pluginNode := range pluginNodes { + pluginNodeAddress, err := pluginNode.PrimaryEthAddress() + require.NoError(t, err, "Error retrieving plugin node address") + _, err = pluginNode.MustCreateJob(&client.KeeperJobSpec{ + Name: fmt.Sprintf("keeper-test-%s", keeperRegistry.Address()), + ContractAddress: keeperRegistry.Address(), + FromAddress: pluginNodeAddress, + EVMChainID: evmChainID, + MinIncomingConfirmations: 1, + }) + require.NoError(t, err, "Creating KeeperV2 Job shouldn't fail") + } +} + +func CreateKeeperJobsWithKeyIndex( + t *testing.T, + pluginNodes []*client.PluginK8sClient, + keeperRegistry contracts.KeeperRegistry, + keyIndex int, + ocrConfig contracts.OCRv2Config, + evmChainID string, +) { + // Send keeper jobs to registry and plugin nodes + primaryNode := pluginNodes[0] + primaryNodeAddresses, err := primaryNode.EthAddresses() + require.NoError(t, err, "Reading ETH Keys from Plugin Client shouldn't fail") + nodeAddresses, err := PluginNodeAddressesAtIndex(pluginNodes, keyIndex) + require.NoError(t, err, "Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + nodeAddressesStr, payees := make([]string, 0), make([]string, 0) + for _, cla := range nodeAddresses { + nodeAddressesStr = append(nodeAddressesStr, cla.Hex()) + payees = append(payees, primaryNodeAddresses[keyIndex]) + } + err = keeperRegistry.SetKeepers(nodeAddressesStr, payees, ocrConfig) + require.NoError(t, err, "Setting keepers in the registry shouldn't fail") + + for _, pluginNode := range pluginNodes { + pluginNodeAddress, err := pluginNode.EthAddresses() + require.NoError(t, err, "Error retrieving plugin node address") + _, err = pluginNode.MustCreateJob(&client.KeeperJobSpec{ + Name: fmt.Sprintf("keeper-test-%s", keeperRegistry.Address()), + ContractAddress: keeperRegistry.Address(), + FromAddress: pluginNodeAddress[keyIndex], + EVMChainID: evmChainID, + MinIncomingConfirmations: 1, + }) + require.NoError(t, err, "Creating KeeperV2 Job shouldn't fail") + } +} + +func DeleteKeeperJobsWithId(t *testing.T, pluginNodes []*client.PluginK8sClient, id int) { + for _, pluginNode := range pluginNodes { + err := pluginNode.MustDeleteJob(strconv.Itoa(id)) + require.NoError(t, err, "Deleting KeeperV2 Job shouldn't fail") + } +} + +// DeployKeeperContracts deploys keeper registry and a number of basic upkeep contracts with an update interval of 5. +// It returns the freshly deployed registry, registrar, consumers and the IDs of the upkeeps. +func DeployKeeperContracts( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registrySettings contracts.KeeperRegistrySettings, + numberOfUpkeeps int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + linkFundsForEachUpkeep *big.Int, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperConsumer, []*big.Int) { + ef, err := contractDeployer.DeployMockETHPLIFeed(big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for mock feeds to deploy") + + // Deploy the transcoder here, and then set it to the registry + transcoder := DeployUpkeepTranscoder(t, contractDeployer, client) + registry := DeployKeeperRegistry(t, contractDeployer, client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: transcoder.Address(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: registrySettings, + }, + ) + + // Fund the registry with 1 PLI * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar := DeployKeeperRegistrar(t, registryVersion, linkToken, registrarSettings, contractDeployer, client, registry) + + upkeeps := DeployKeeperConsumers(t, contractDeployer, client, numberOfUpkeeps, false, false) + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + upkeepIds := RegisterUpkeepContracts(t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfUpkeeps, upkeepsAddresses, false, false) + err = client.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + return registry, registrar, upkeeps, upkeepIds +} + +// DeployPerformanceKeeperContracts deploys a set amount of keeper performance contracts registered to a single registry +func DeployPerformanceKeeperContracts( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + numberOfContracts int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + registrySettings *contracts.KeeperRegistrySettings, + linkFundsForEachUpkeep *big.Int, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperConsumerPerformance, []*big.Int) { + ef, err := contractDeployer.DeployMockETHPLIFeed(big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for mock feeds to deploy") + + registry := DeployKeeperRegistry(t, contractDeployer, client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: ZeroAddress.Hex(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: *registrySettings, + }, + ) + + // Fund the registry with 1 PLI * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfContracts)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar := DeployKeeperRegistrar(t, registryVersion, linkToken, registrarSettings, contractDeployer, client, registry) + + upkeeps := DeployKeeperConsumersPerformance( + t, contractDeployer, client, numberOfContracts, blockRange, blockInterval, checkGasToBurn, performGasToBurn, + ) + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + + upkeepIds := RegisterUpkeepContracts(t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfContracts, upkeepsAddresses, false, false) + + return registry, registrar, upkeeps, upkeepIds +} + +// DeployPerformDataCheckerContracts deploys a set amount of keeper perform data checker contracts registered to a single registry +func DeployPerformDataCheckerContracts( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + numberOfContracts int, + upkeepGasLimit uint32, + linkToken contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + registrySettings *contracts.KeeperRegistrySettings, + linkFundsForEachUpkeep *big.Int, + expectedData []byte, +) (contracts.KeeperRegistry, contracts.KeeperRegistrar, []contracts.KeeperPerformDataChecker, []*big.Int) { + ef, err := contractDeployer.DeployMockETHPLIFeed(big.NewInt(2e18)) + require.NoError(t, err, "Deploying mock ETH-Link feed shouldn't fail") + gf, err := contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + require.NoError(t, err, "Deploying mock gas feed shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for mock feeds to deploy") + + registry := DeployKeeperRegistry(t, contractDeployer, client, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: linkToken.Address(), + ETHFeedAddr: ef.Address(), + GasFeedAddr: gf.Address(), + TranscoderAddr: ZeroAddress.Hex(), + RegistrarAddr: ZeroAddress.Hex(), + Settings: *registrySettings, + }, + ) + + // Fund the registry with 1 PLI * amount of KeeperConsumerPerformance contracts + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfContracts)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar := DeployKeeperRegistrar(t, registryVersion, linkToken, registrarSettings, contractDeployer, client, registry) + + upkeeps := DeployPerformDataChecker(t, contractDeployer, client, numberOfContracts, expectedData) + + var upkeepsAddresses []string + for _, upkeep := range upkeeps { + upkeepsAddresses = append(upkeepsAddresses, upkeep.Address()) + } + + upkeepIds := RegisterUpkeepContracts(t, linkToken, linkFundsForEachUpkeep, client, upkeepGasLimit, registry, registrar, numberOfContracts, upkeepsAddresses, false, false) + + return registry, registrar, upkeeps, upkeepIds +} + +func DeployKeeperRegistry( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + registryOpts *contracts.KeeperRegistryOpts, +) contracts.KeeperRegistry { + registry, err := contractDeployer.DeployKeeperRegistry( + registryOpts, + ) + require.NoError(t, err, "Deploying keeper registry shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for keeper registry to deploy") + + return registry +} + +func DeployKeeperRegistrar( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + linkToken contracts.LinkToken, + registrarSettings contracts.KeeperRegistrarSettings, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + registry contracts.KeeperRegistry, +) contracts.KeeperRegistrar { + registrar, err := contractDeployer.DeployKeeperRegistrar(registryVersion, linkToken.Address(), registrarSettings) + + require.NoError(t, err, "Deploying KeeperRegistrar contract shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for registrar to deploy") + if registryVersion != ethereum.RegistryVersion_2_0 { + err = registry.SetRegistrar(registrar.Address()) + require.NoError(t, err, "Registering the registrar address on the registry shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for registry to set registrar") + } + + return registrar +} + +func DeployUpkeepTranscoder( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, +) contracts.UpkeepTranscoder { + transcoder, err := contractDeployer.DeployUpkeepTranscoder() + require.NoError(t, err, "Deploying UpkeepTranscoder contract shouldn't fail") + err = client.WaitForEvents() + require.NoError(t, err, "Failed waiting for transcoder to deploy") + + return transcoder +} + +func RegisterUpkeepContracts(t *testing.T, linkToken contracts.LinkToken, linkFunds *big.Int, client blockchain.EVMClient, upkeepGasLimit uint32, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, numberOfContracts int, upkeepAddresses []string, isLogTrigger bool, isMercury bool) []*big.Int { + checkData := make([][]byte, 0) + for i := 0; i < numberOfContracts; i++ { + checkData = append(checkData, []byte("0")) + } + return RegisterUpkeepContractsWithCheckData( + t, linkToken, linkFunds, client, upkeepGasLimit, registry, registrar, + numberOfContracts, upkeepAddresses, checkData, isLogTrigger, isMercury) +} + +func RegisterUpkeepContractsWithCheckData(t *testing.T, linkToken contracts.LinkToken, linkFunds *big.Int, client blockchain.EVMClient, upkeepGasLimit uint32, registry contracts.KeeperRegistry, registrar contracts.KeeperRegistrar, numberOfContracts int, upkeepAddresses []string, checkData [][]byte, isLogTrigger bool, isMercury bool) []*big.Int { + l := logging.GetTestLogger(t) + registrationTxHashes := make([]common.Hash, 0) + upkeepIds := make([]*big.Int, 0) + for contractCount, upkeepAddress := range upkeepAddresses { + req, err := registrar.EncodeRegisterRequest( + fmt.Sprintf("upkeep_%d", contractCount+1), + []byte("test@mail.com"), + upkeepAddress, + upkeepGasLimit, + client.GetDefaultWallet().Address(), // upkeep Admin + checkData[contractCount], + linkFunds, + 0, + client.GetDefaultWallet().Address(), + isLogTrigger, + isMercury, + ) + require.NoError(t, err, "Encoding the register request shouldn't fail") + tx, err := linkToken.TransferAndCall(registrar.Address(), linkFunds, req) + require.NoError(t, err, "Error registering the upkeep consumer to the registrar") + l.Debug(). + Str("Contract Address", upkeepAddress). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Str("TxHash", tx.Hash().String()). + Str("Check Data", hexutil.Encode(checkData[contractCount])). + Msg("Registered Keeper Consumer Contract") + registrationTxHashes = append(registrationTxHashes, tx.Hash()) + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait after registering upkeep consumers") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed while waiting for all consumer contracts to be registered to registrar") + + // Fetch the upkeep IDs + for _, txHash := range registrationTxHashes { + receipt, err := client.GetTxReceipt(txHash) + require.NoError(t, err, "Registration tx should be completed") + var upkeepId *big.Int + for _, rawLog := range receipt.Logs { + parsedUpkeepId, err := registry.ParseUpkeepIdFromRegisteredLog(rawLog) + if err == nil { + upkeepId = parsedUpkeepId + break + } + } + require.NotNil(t, upkeepId, "Upkeep ID should be found after registration") + l.Debug(). + Str("TxHash", txHash.String()). + Str("Upkeep ID", upkeepId.String()). + Msg("Found upkeepId in tx hash") + upkeepIds = append(upkeepIds, upkeepId) + } + l.Info().Msg("Successfully registered all Keeper Consumer Contracts") + return upkeepIds +} + +func DeployKeeperConsumers(t *testing.T, contractDeployer contracts.ContractDeployer, client blockchain.EVMClient, numberOfContracts int, isLogTrigger bool, isMercury bool) []contracts.KeeperConsumer { + l := logging.GetTestLogger(t) + keeperConsumerContracts := make([]contracts.KeeperConsumer, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + var keeperConsumerInstance contracts.KeeperConsumer + var err error + + if isMercury && isLogTrigger { + // v2.1 only: Log triggered based contract with Mercury enabled + keeperConsumerInstance, err = contractDeployer.DeployAutomationLogTriggeredStreamsLookupUpkeepConsumer() + } else if isMercury { + // v2.1 only: Conditional based contract with Mercury enabled + keeperConsumerInstance, err = contractDeployer.DeployAutomationStreamsLookupUpkeepConsumer(big.NewInt(1000), big.NewInt(5), false, true, false) // 1000 block test range + } else if isLogTrigger { + // v2.1 only: Log triggered based contract without Mercury + keeperConsumerInstance, err = contractDeployer.DeployAutomationLogTriggerConsumer(big.NewInt(1000)) // 1000 block test range + } else { + // v2.0 and v2.1: Conditional based contract without Mercury + keeperConsumerInstance, err = contractDeployer.DeployKeeperConsumer(big.NewInt(5)) + } + + require.NoError(t, err, "Deploying Consumer instance %d shouldn't fail", contractCount+1) + keeperConsumerContracts = append(keeperConsumerContracts, keeperConsumerInstance) + l.Debug(). + Str("Contract Address", keeperConsumerInstance.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Consumer Contract") + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err := client.WaitForEvents() + require.NoError(t, err, "Failed to wait for KeeperConsumer deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed waiting for to deploy all keeper consumer contracts") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return keeperConsumerContracts +} + +func DeployKeeperConsumersPerformance( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfContracts int, + blockRange, // How many blocks to run the test for + blockInterval, // Interval of blocks that upkeeps are expected to be performed + checkGasToBurn, // How much gas should be burned on checkUpkeep() calls + performGasToBurn int64, // How much gas should be burned on performUpkeep() calls +) []contracts.KeeperConsumerPerformance { + l := logging.GetTestLogger(t) + upkeeps := make([]contracts.KeeperConsumerPerformance, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + keeperConsumerInstance, err := contractDeployer.DeployKeeperConsumerPerformance( + big.NewInt(blockRange), + big.NewInt(blockInterval), + big.NewInt(checkGasToBurn), + big.NewInt(performGasToBurn), + ) + require.NoError(t, err, "Deploying KeeperConsumerPerformance instance %d shouldn't fail", contractCount+1) + upkeeps = append(upkeeps, keeperConsumerInstance) + l.Debug(). + Str("Contract Address", keeperConsumerInstance.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Performance Contract") + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for KeeperConsumerPerformance deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed waiting for to deploy all keeper consumer contracts") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeeps +} + +func DeployPerformDataChecker( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfContracts int, + expectedData []byte, +) []contracts.KeeperPerformDataChecker { + l := logging.GetTestLogger(t) + upkeeps := make([]contracts.KeeperPerformDataChecker, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + performDataCheckerInstance, err := contractDeployer.DeployKeeperPerformDataChecker(expectedData) + require.NoError(t, err, "Deploying KeeperPerformDataChecker instance %d shouldn't fail", contractCount+1) + upkeeps = append(upkeeps, performDataCheckerInstance) + l.Debug(). + Str("Contract Address", performDataCheckerInstance.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed PerformDataChecker Contract") + if (contractCount+1)%ContractDeploymentInterval == 0 { + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for PerformDataChecker deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed waiting for to deploy all keeper perform data checker contracts") + l.Info().Msg("Successfully deployed all PerformDataChecker Contracts") + + return upkeeps +} + +func DeployUpkeepCounters( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfContracts int, + testRange *big.Int, + interval *big.Int, +) []contracts.UpkeepCounter { + l := logging.GetTestLogger(t) + upkeepCounters := make([]contracts.UpkeepCounter, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + upkeepCounter, err := contractDeployer.DeployUpkeepCounter(testRange, interval) + require.NoError(t, err, "Deploying KeeperConsumer instance %d shouldn't fail", contractCount+1) + upkeepCounters = append(upkeepCounters, upkeepCounter) + l.Debug(). + Str("Contract Address", upkeepCounter.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Consumer Contract") + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for KeeperConsumer deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed waiting for to deploy all keeper consumer contracts") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeepCounters +} + +func DeployUpkeepPerformCounterRestrictive( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + numberOfContracts int, + testRange *big.Int, + averageEligibilityCadence *big.Int, +) []contracts.UpkeepPerformCounterRestrictive { + l := logging.GetTestLogger(t) + upkeepCounters := make([]contracts.UpkeepPerformCounterRestrictive, 0) + + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + // Deploy consumer + upkeepCounter, err := contractDeployer.DeployUpkeepPerformCounterRestrictive(testRange, averageEligibilityCadence) + require.NoError(t, err, "Deploying KeeperConsumer instance %d shouldn't fail", contractCount+1) + upkeepCounters = append(upkeepCounters, upkeepCounter) + l.Debug(). + Str("Contract Address", upkeepCounter.Address()). + Int("Number", contractCount+1). + Int("Out Of", numberOfContracts). + Msg("Deployed Keeper Consumer Contract") + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for KeeperConsumer deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Failed waiting for to deploy all keeper consumer contracts") + l.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return upkeepCounters +} + +// RegisterNewUpkeeps registers the given amount of new upkeeps, using the registry and registrar +// which are passed as parameters. +// It returns the newly deployed contracts (consumers), as well as their upkeep IDs. +func RegisterNewUpkeeps( + t *testing.T, + contractDeployer contracts.ContractDeployer, + client blockchain.EVMClient, + linkToken contracts.LinkToken, + registry contracts.KeeperRegistry, + registrar contracts.KeeperRegistrar, + upkeepGasLimit uint32, + numberOfNewUpkeeps int, +) ([]contracts.KeeperConsumer, []*big.Int) { + newlyDeployedUpkeeps := DeployKeeperConsumers(t, contractDeployer, client, numberOfNewUpkeeps, false, false) + + var addressesOfNewUpkeeps []string + for _, upkeep := range newlyDeployedUpkeeps { + addressesOfNewUpkeeps = append(addressesOfNewUpkeeps, upkeep.Address()) + } + + newUpkeepIDs := RegisterUpkeepContracts(t, linkToken, big.NewInt(9e18), client, upkeepGasLimit, registry, registrar, numberOfNewUpkeeps, addressesOfNewUpkeeps, false, false) + + return newlyDeployedUpkeeps, newUpkeepIDs +} diff --git a/integration-tests/actions/keeper_helpers_local.go b/integration-tests/actions/keeper_helpers_local.go new file mode 100644 index 00000000..7b0feddc --- /dev/null +++ b/integration-tests/actions/keeper_helpers_local.go @@ -0,0 +1,62 @@ +package actions + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +func CreateKeeperJobsLocal( + l zerolog.Logger, + pluginNodes []*client.PluginClient, + keeperRegistry contracts.KeeperRegistry, + ocrConfig contracts.OCRv2Config, + evmChainID string, +) ([]*client.Job, error) { + // Send keeper jobs to registry and plugin nodes + primaryNode := pluginNodes[0] + primaryNodeAddress, err := primaryNode.PrimaryEthAddress() + if err != nil { + l.Error().Err(err).Msg("Reading ETH Keys from Plugin Client shouldn't fail") + return nil, err + } + nodeAddresses, err := PluginNodeAddressesLocal(pluginNodes) + if err != nil { + l.Error().Err(err).Msg("Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + return nil, err + } + nodeAddressesStr, payees := make([]string, 0), make([]string, 0) + for _, cla := range nodeAddresses { + nodeAddressesStr = append(nodeAddressesStr, cla.Hex()) + payees = append(payees, primaryNodeAddress) + } + err = keeperRegistry.SetKeepers(nodeAddressesStr, payees, ocrConfig) + if err != nil { + l.Error().Err(err).Msg("Setting keepers in the registry shouldn't fail") + return nil, err + } + jobs := []*client.Job{} + for _, pluginNode := range pluginNodes { + pluginNodeAddress, err := pluginNode.PrimaryEthAddress() + if err != nil { + l.Error().Err(err).Msg("Error retrieving plugin node address") + return nil, err + } + job, err := pluginNode.MustCreateJob(&client.KeeperJobSpec{ + Name: fmt.Sprintf("keeper-test-%s", keeperRegistry.Address()), + ContractAddress: keeperRegistry.Address(), + FromAddress: pluginNodeAddress, + EVMChainID: evmChainID, + MinIncomingConfirmations: 1, + }) + if err != nil { + l.Error().Err(err).Msg("Creating KeeperV2 Job shouldn't fail") + return nil, err + } + jobs = append(jobs, job) + } + return jobs, nil +} diff --git a/integration-tests/actions/ocr2_helpers.go b/integration-tests/actions/ocr2_helpers.go new file mode 100644 index 00000000..2636238e --- /dev/null +++ b/integration-tests/actions/ocr2_helpers.go @@ -0,0 +1,504 @@ +package actions + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +// DeployOCRv2Contracts deploys a number of OCRv2 contracts and configures them with defaults +func DeployOCRv2Contracts( + numberOfContracts int, + linkTokenContract contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + transmitters []string, + client blockchain.EVMClient, + ocrOptions contracts.OffchainOptions, +) ([]contracts.OffchainAggregatorV2, error) { + var ocrInstances []contracts.OffchainAggregatorV2 + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + ocrInstance, err := contractDeployer.DeployOffchainAggregatorV2( + linkTokenContract.Address(), + ocrOptions, + ) + if err != nil { + return nil, fmt.Errorf("OCRv2 instance deployment have failed: %w", err) + } + ocrInstances = append(ocrInstances, ocrInstance) + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for OCRv2 contract deployments: %w", err) + } + } + } + err := client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCRv2 contract deployments: %w", err) + } + + // Gather address payees + var payees []string + for range transmitters { + payees = append(payees, client.GetDefaultWallet().Address()) + } + + // Set Payees + for contractCount, ocrInstance := range ocrInstances { + err = ocrInstance.SetPayees(transmitters, payees) + if err != nil { + return nil, fmt.Errorf("error settings OCR payees: %w", err) + } + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for setting OCR payees: %w", err) + } + } + } + return ocrInstances, client.WaitForEvents() +} + +func ConfigureOCRv2AggregatorContracts( + client blockchain.EVMClient, + contractConfig *contracts.OCRv2Config, + ocrv2Contracts []contracts.OffchainAggregatorV2, +) error { + for contractCount, ocrInstance := range ocrv2Contracts { + // Exclude the first node, which will be used as a bootstrapper + err := ocrInstance.SetConfig(contractConfig) + if err != nil { + return fmt.Errorf("error setting OCR config for contract '%s': %w", ocrInstance.Address(), err) + } + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return fmt.Errorf("failed to wait for setting OCR config: %w", err) + } + } + } + return client.WaitForEvents() +} + +// BuildMedianOCR2Config builds a default OCRv2 config for the given plugin nodes for a standard median aggregation job +func BuildMedianOCR2Config( + workerNodes []*client.PluginK8sClient, + ocrOffchainOptions contracts.OffchainOptions, +) (*contracts.OCRv2Config, error) { + S, oracleIdentities, err := GetOracleIdentities(workerNodes) + if err != nil { + return nil, err + } + signerKeys, transmitterAccounts, f_, _, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 30*time.Second, // deltaProgress time.Duration, + 30*time.Second, // deltaResend time.Duration, + 10*time.Second, // deltaRound time.Duration, + 20*time.Second, // deltaGrace time.Duration, + 20*time.Second, // deltaStage time.Duration, + 3, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 1, + AlphaAcceptInfinite: false, + AlphaAcceptPPB: 1, + DeltaC: time.Minute * 30, + }.Encode(), // reportingPluginConfig []byte, + 5*time.Second, // maxDurationQuery time.Duration, + 5*time.Second, // maxDurationObservation time.Duration, + 5*time.Second, // maxDurationReport time.Duration, + 5*time.Second, // maxDurationShouldAcceptFinalizedReport time.Duration, + 5*time.Second, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // The median reporting plugin has an empty onchain config + ) + if err != nil { + return nil, err + } + + // Convert signers to addresses + var signerAddresses []common.Address + for _, signer := range signerKeys { + signerAddresses = append(signerAddresses, common.BytesToAddress(signer)) + } + + // Convert transmitters to addresses + var transmitterAddresses []common.Address + for _, account := range transmitterAccounts { + transmitterAddresses = append(transmitterAddresses, common.HexToAddress(string(account))) + } + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(ocrOffchainOptions.MinimumAnswer, ocrOffchainOptions.MaximumAnswer) + + return &contracts.OCRv2Config{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + F: f_, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: []byte(fmt.Sprintf("0x%s", offchainConfig)), + }, err +} + +// GetOracleIdentities retrieves all plugin nodes' OCR2 config identities with defaul key index +func GetOracleIdentities(pluginNodes []*client.PluginK8sClient) ([]int, []confighelper.OracleIdentityExtra, error) { + return GetOracleIdentitiesWithKeyIndex(pluginNodes, 0) +} + +// GetOracleIdentitiesWithKeyIndex retrieves all plugin nodes' OCR2 config identities by key index +func GetOracleIdentitiesWithKeyIndex( + pluginNodes []*client.PluginK8sClient, + keyIndex int, +) ([]int, []confighelper.OracleIdentityExtra, error) { + S := make([]int, len(pluginNodes)) + oracleIdentities := make([]confighelper.OracleIdentityExtra, len(pluginNodes)) + sharedSecretEncryptionPublicKeys := make([]types.ConfigEncryptionPublicKey, len(pluginNodes)) + eg := &errgroup.Group{} + for i, cl := range pluginNodes { + index, pluginNode := i, cl + eg.Go(func() error { + addresses, err := pluginNode.EthAddresses() + if err != nil { + return err + } + ocr2Keys, err := pluginNode.MustReadOCR2Keys() + if err != nil { + return err + } + var ocr2Config client.OCR2KeyAttributes + for _, key := range ocr2Keys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + ocr2Config = key.Attributes + break + } + } + + keys, err := pluginNode.MustReadP2PKeys() + if err != nil { + return err + } + p2pKeyID := keys.Data[0].Attributes.PeerID + + offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_")) + if err != nil { + return err + } + + offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(offchainPkBytesFixed[:], offchainPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_")) + if err != nil { + return err + } + + configPkBytesFixed := [ed25519.PublicKeySize]byte{} + n = copy(configPkBytesFixed[:], configPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnChainPublicKey, "ocr2on_evm_")) + if err != nil { + return err + } + + sharedSecretEncryptionPublicKeys[index] = configPkBytesFixed + oracleIdentities[index] = confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPkBytes, + OffchainPublicKey: offchainPkBytesFixed, + PeerID: p2pKeyID, + TransmitAccount: types.Account(addresses[keyIndex]), + }, + ConfigEncryptionPublicKey: configPkBytesFixed, + } + S[index] = 1 + log.Debug(). + Interface("OnChainPK", onchainPkBytes). + Interface("OffChainPK", offchainPkBytesFixed). + Interface("ConfigPK", configPkBytesFixed). + Str("PeerID", p2pKeyID). + Str("Address", addresses[keyIndex]). + Msg("Oracle identity") + return nil + }) + } + + return S, oracleIdentities, eg.Wait() +} + +// CreateOCRv2Jobs bootstraps the first node and to the other nodes sends ocr jobs that +// read from different adapters, to be used in combination with SetAdapterResponses +func CreateOCRv2Jobs( + ocrInstances []contracts.OffchainAggregatorV2, + bootstrapNode *client.PluginK8sClient, + workerPluginNodes []*client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, + mockServerValue int, // Value to get from the mock server when querying the path + chainId uint64, // EVM chain ID + forwardingAllowed bool, +) error { + // Collect P2P ID + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + return err + } + p2pV2Bootstrapper := fmt.Sprintf("%s@%s:%d", bootstrapP2PIds.Data[0].Attributes.PeerID, bootstrapNode.InternalIP(), 6690) + mockJuelsPath := "ocr2/juelsPerFeeCoinSource" + // Set the juelsPerFeeCoinSource config value + err = mockserver.SetValuePath(mockJuelsPath, mockServerValue) + if err != nil { + return err + } + + // Create the juels bridge for each node only once + juelsBridge := &client.BridgeTypeAttributes{ + Name: "juels", + URL: fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, mockJuelsPath), + } + for _, pluginNode := range workerPluginNodes { + err = pluginNode.MustCreateBridge(juelsBridge) + if err != nil { + return fmt.Errorf("failed creating bridge %s on CL node : %w", juelsBridge.Name, err) + } + } + + for _, ocrInstance := range ocrInstances { + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: fmt.Sprintf("ocr2-bootstrap-%s", ocrInstance.Address()), + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: ocrInstance.Address(), + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": chainId, + }, + MonitoringEndpoint: null.StringFrom(fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, "ocr2")), + ContractConfigTrackerPollInterval: *models.NewInterval(15 * time.Second), + }, + } + _, err := bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + return fmt.Errorf("creating bootstrap job have failed: %w", err) + } + + for _, pluginNode := range workerPluginNodes { + nodeTransmitterAddress, err := pluginNode.PrimaryEthAddress() + if err != nil { + return fmt.Errorf("getting primary ETH address from OCR node have failed: %w", err) + } + nodeOCRKeys, err := pluginNode.MustReadOCR2Keys() + if err != nil { + return fmt.Errorf("getting OCR keys from OCR node have failed: %w", err) + } + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + nodeContractPairID, err := BuildOCR2NodeContractPairID(pluginNode, ocrInstance) + if err != nil { + return err + } + bta := &client.BridgeTypeAttributes{ + Name: nodeContractPairID, + URL: fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, strings.TrimPrefix(nodeContractPairID, "/")), + } + + err = pluginNode.MustCreateBridge(bta) + if err != nil { + return fmt.Errorf("failed creating bridge %s on CL node: %w", bta.Name, err) + } + + ocrSpec := &client.OCR2TaskJobSpec{ + Name: fmt.Sprintf("ocr2-%s", uuid.NewString()), + JobType: "offchainreporting2", + MaxTaskDuration: "1m", + ObservationSource: client.ObservationSourceSpecBridge(bta), + ForwardingAllowed: forwardingAllowed, + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "median", + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": chainId, + }, + PluginConfig: map[string]any{ + "juelsPerFeeCoinSource": fmt.Sprintf("\"\"\"%s\"\"\"", client.ObservationSourceSpecBridge(juelsBridge)), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(15 * time.Second), + ContractID: ocrInstance.Address(), // registryAddr + OCRKeyBundleID: null.StringFrom(nodeOCRKeyId), // get node ocr2config.ID + TransmitterID: null.StringFrom(nodeTransmitterAddress), // node addr + P2PV2Bootstrappers: pq.StringArray{p2pV2Bootstrapper}, // bootstrap node key and address @bootstrap:6690 + }, + } + _, err = pluginNode.MustCreateJob(ocrSpec) + if err != nil { + return fmt.Errorf("creating OCR task job on OCR node have failed: %w", err) + } + } + } + return nil +} + +// StartNewOCR2Round requests a new round from the ocr2 contracts and waits for confirmation +func StartNewOCR2Round( + roundNumber int64, + ocrInstances []contracts.OffchainAggregatorV2, + client blockchain.EVMClient, + timeout time.Duration, + logger zerolog.Logger, +) error { + time.Sleep(5 * time.Second) + for i := 0; i < len(ocrInstances); i++ { + err := ocrInstances[i].RequestNewRound() + if err != nil { + return fmt.Errorf("requesting new OCR round %d have failed: %w", i+1, err) + } + ocrRound := contracts.NewOffchainAggregatorV2RoundConfirmer(ocrInstances[i], big.NewInt(roundNumber), timeout, logger) + client.AddHeaderEventSubscription(ocrInstances[i].Address(), ocrRound) + err = ocrRound.Wait() // wait for OCR Round to complete + if err != nil { + return fmt.Errorf("failed to wait for OCR Round %d to complete instance %d", roundNumber, i) + } + if !ocrRound.Complete() { + return fmt.Errorf("failed to complete OCR Round %d for ocr instance %d", roundNumber, i) + } + } + return nil +} + +// WatchNewOCR2Round is the same as StartNewOCR2Round but does NOT explicitly request a new round +// as that can cause odd behavior in tandem with changing adapter values in OCR2 +func WatchNewOCR2Round( + roundNumber int64, + ocrInstances []contracts.OffchainAggregatorV2, + client blockchain.EVMClient, + timeout time.Duration, + logger zerolog.Logger, +) error { + for i := 0; i < len(ocrInstances); i++ { + ocrRound := contracts.NewOffchainAggregatorV2RoundConfirmer(ocrInstances[i], big.NewInt(roundNumber), timeout, logger) + client.AddHeaderEventSubscription(ocrInstances[i].Address(), ocrRound) + err := client.WaitForEvents() + if err != nil { + return fmt.Errorf("failed to wait for event subscriptions of OCR instance %d: %w", i+1, err) + } + } + return nil +} + +// SetOCR2AdapterResponse sets a single adapter response that correlates with an ocr contract and a plugin node +// used for OCR2 tests +func SetOCR2AdapterResponse( + response int, + ocrInstance contracts.OffchainAggregatorV2, + pluginNode *client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + nodeContractPairID, err := BuildOCR2NodeContractPairID(pluginNode, ocrInstance) + if err != nil { + return err + } + path := fmt.Sprintf("/%s", nodeContractPairID) + err = mockserver.SetValuePath(path, response) + if err != nil { + return fmt.Errorf("setting mockserver value path failed: %w", err) + } + return nil +} + +// SetOCR2AllAdapterResponsesToTheSameValue sets the mock responses in mockserver that are read by plugin nodes +// to simulate different adapters. This sets all adapter responses for each node and contract to the same response +// used for OCR2 tests +func SetOCR2AllAdapterResponsesToTheSameValue( + response int, + ocrInstances []contracts.OffchainAggregatorV2, + pluginNodes []*client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + eg := &errgroup.Group{} + for _, o := range ocrInstances { + ocrInstance := o + for _, n := range pluginNodes { + node := n + eg.Go(func() error { + return SetOCR2AdapterResponse(response, ocrInstance, node, mockserver) + }) + } + } + return eg.Wait() +} + +// SetOCR2AllAdapterResponsesToDifferentValues sets the mock responses in mockserver that are read by plugin nodes +// to simulate different adapters. This sets all adapter responses for each node and contract to different responses +// used for OCR2 tests +func SetOCR2AllAdapterResponsesToDifferentValues( + responses []int, + ocrInstances []contracts.OffchainAggregatorV2, + pluginNodes []*client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + if len(responses) != len(ocrInstances)*len(pluginNodes) { + return fmt.Errorf( + "amount of responses %d should be equal to the amount of OCR instances %d times the amount of Plugin nodes %d", + len(responses), len(ocrInstances), len(pluginNodes), + ) + } + eg := &errgroup.Group{} + for _, o := range ocrInstances { + ocrInstance := o + for ni := 1; ni < len(pluginNodes); ni++ { + nodeIndex := ni + eg.Go(func() error { + return SetOCR2AdapterResponse(responses[nodeIndex-1], ocrInstance, pluginNodes[nodeIndex], mockserver) + }) + } + } + return eg.Wait() +} + +// BuildOCR2NodeContractPairID builds a UUID based on a related pair of a Plugin node and OCRv2 contract +func BuildOCR2NodeContractPairID(node *client.PluginK8sClient, ocrInstance contracts.OffchainAggregatorV2) (string, error) { + if node == nil { + return "", fmt.Errorf("plugin node is nil") + } + if ocrInstance == nil { + return "", fmt.Errorf("OCR Instance is nil") + } + nodeAddress, err := node.PrimaryEthAddress() + if err != nil { + return "", fmt.Errorf("getting plugin node's primary ETH address failed: %w", err) + } + shortNodeAddr := nodeAddress[2:12] + shortOCRAddr := ocrInstance.Address()[2:12] + return strings.ToLower(fmt.Sprintf("node_%s_contract_%s", shortNodeAddr, shortOCRAddr)), nil +} diff --git a/integration-tests/actions/ocr2_helpers_local.go b/integration-tests/actions/ocr2_helpers_local.go new file mode 100644 index 00000000..f4a049e1 --- /dev/null +++ b/integration-tests/actions/ocr2_helpers_local.go @@ -0,0 +1,363 @@ +package actions + +import ( + "crypto/ed25519" + "encoding/hex" + "fmt" + "net/http" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/rs/zerolog/log" + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + "golang.org/x/sync/errgroup" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" + + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/testhelpers" + "github.com/goplugin/pluginv3.0/v2/core/store/models" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +func CreateOCRv2JobsLocal( + ocrInstances []contracts.OffchainAggregatorV2, + bootstrapNode *client.PluginClient, + workerPluginNodes []*client.PluginClient, + mockAdapter *test_env.Killgrave, + mockAdapterPath string, // Path on the mock server for the Plugin nodes to query + mockAdapterValue int, // Value to get from the mock server when querying the path + chainId uint64, // EVM chain ID + forwardingAllowed bool, + enableChainReaderAndCodec bool, +) error { + // Collect P2P ID + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + return err + } + p2pV2Bootstrapper := fmt.Sprintf("%s@%s:%d", bootstrapP2PIds.Data[0].Attributes.PeerID, bootstrapNode.InternalIP(), 6690) + // Set the value for the jobs to report on + err = mockAdapter.SetAdapterBasedIntValuePath(mockAdapterPath, []string{http.MethodGet, http.MethodPost}, mockAdapterValue) + if err != nil { + return err + } + // Set the juelsPerFeeCoinSource config value + err = mockAdapter.SetAdapterBasedIntValuePath(fmt.Sprintf("%s/juelsPerFeeCoinSource", mockAdapterPath), []string{http.MethodGet, http.MethodPost}, mockAdapterValue) + if err != nil { + return err + } + + for _, ocrInstance := range ocrInstances { + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: fmt.Sprintf("ocr2_bootstrap-%s", uuid.NewString()), + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: ocrInstance.Address(), + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": chainId, + }, + MonitoringEndpoint: null.StringFrom(fmt.Sprintf("%s/%s", mockAdapter.InternalEndpoint, mockAdapterPath)), + ContractConfigTrackerPollInterval: *models.NewInterval(15 * time.Second), + }, + } + _, err := bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + return fmt.Errorf("creating bootstrap job have failed: %w", err) + } + + for _, pluginNode := range workerPluginNodes { + nodeTransmitterAddress, err := pluginNode.PrimaryEthAddress() + if err != nil { + return fmt.Errorf("getting primary ETH address from OCR node have failed: %w", err) + } + nodeOCRKeys, err := pluginNode.MustReadOCR2Keys() + if err != nil { + return fmt.Errorf("getting OCR keys from OCR node have failed: %w", err) + } + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + bta := &client.BridgeTypeAttributes{ + Name: fmt.Sprintf("%s-%s", mockAdapterPath, uuid.NewString()), + URL: fmt.Sprintf("%s/%s", mockAdapter.InternalEndpoint, mockAdapterPath), + } + juelsBridge := &client.BridgeTypeAttributes{ + Name: fmt.Sprintf("juels-%s", uuid.NewString()), + URL: fmt.Sprintf("%s/%s/juelsPerFeeCoinSource", mockAdapter.InternalEndpoint, mockAdapterPath), + } + err = pluginNode.MustCreateBridge(bta) + if err != nil { + return fmt.Errorf("creating bridge on CL node failed: %w", err) + } + err = pluginNode.MustCreateBridge(juelsBridge) + if err != nil { + return fmt.Errorf("creating bridge on CL node failed: %w", err) + } + + ocrSpec := &client.OCR2TaskJobSpec{ + Name: fmt.Sprintf("ocr2-%s", uuid.NewString()), + JobType: "offchainreporting2", + MaxTaskDuration: "1m", + ObservationSource: client.ObservationSourceSpecBridge(bta), + ForwardingAllowed: forwardingAllowed, + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "median", + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": chainId, + }, + PluginConfig: map[string]any{ + "juelsPerFeeCoinSource": fmt.Sprintf("\"\"\"%s\"\"\"", client.ObservationSourceSpecBridge(juelsBridge)), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(15 * time.Second), + ContractID: ocrInstance.Address(), // registryAddr + OCRKeyBundleID: null.StringFrom(nodeOCRKeyId), // get node ocr2config.ID + TransmitterID: null.StringFrom(nodeTransmitterAddress), // node addr + P2PV2Bootstrappers: pq.StringArray{p2pV2Bootstrapper}, // bootstrap node key and address @bootstrap:6690 + }, + } + if enableChainReaderAndCodec { + ocrSpec.OCR2OracleSpec.RelayConfig["chainReader"] = evmtypes.ChainReaderConfig{ + Contracts: map[string]evmtypes.ChainContractReader{ + "median": { + ContractABI: `[{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"requester","type":"address"},{"indexed":false,"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"indexed":false,"internalType":"uint32","name":"epoch","type":"uint32"},{"indexed":false,"internalType":"uint8","name":"round","type":"uint8"}],"name":"RoundRequested","type":"event"},{"inputs":[],"name":"latestTransmissionDetails","outputs":[{"internalType":"bytes32","name":"configDigest","type":"bytes32"},{"internalType":"uint32","name":"epoch","type":"uint32"},{"internalType":"uint8","name":"round","type":"uint8"},{"internalType":"int192","name":"latestAnswer_","type":"int192"},{"internalType":"uint64","name":"latestTimestamp_","type":"uint64"}],"stateMutability":"view","type":"function"}]`, + Configs: map[string]*evmtypes.ChainReaderDefinition{ + "LatestTransmissionDetails": { + ChainSpecificName: "latestTransmissionDetails", + OutputModifications: codec.ModifiersConfig{ + &codec.EpochToTimeModifierConfig{ + Fields: []string{"LatestTimestamp_"}, + }, + &codec.RenameModifierConfig{ + Fields: map[string]string{ + "LatestAnswer_": "LatestAnswer", + "LatestTimestamp_": "LatestTimestamp", + }, + }, + }, + }, + "LatestRoundRequested": { + ChainSpecificName: "RoundRequested", + ReadType: evmtypes.Event, + }, + }, + }, + }, + } + ocrSpec.OCR2OracleSpec.RelayConfig["codec"] = evmtypes.CodecConfig{ + Configs: map[string]evmtypes.ChainCodecConfig{ + "MedianReport": { + TypeABI: `[{"Name": "Timestamp","Type": "uint32"},{"Name": "Observers","Type": "bytes32"},{"Name": "Observations","Type": "int192[]"},{"Name": "JuelsPerFeeCoin","Type": "int192"}]`, + }, + }, + } + } + + _, err = pluginNode.MustCreateJob(ocrSpec) + if err != nil { + return fmt.Errorf("creating OCR task job on OCR node have failed: %w", err) + } + } + } + return nil +} + +func BuildMedianOCR2ConfigLocal(workerNodes []*client.PluginClient, ocrOffchainOptions contracts.OffchainOptions) (*contracts.OCRv2Config, error) { + S, oracleIdentities, err := GetOracleIdentitiesWithKeyIndexLocal(workerNodes, 0) + if err != nil { + return nil, err + } + signerKeys, transmitterAccounts, f_, _, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 30*time.Second, // deltaProgress time.Duration, + 30*time.Second, // deltaResend time.Duration, + 10*time.Second, // deltaRound time.Duration, + 20*time.Second, // deltaGrace time.Duration, + 20*time.Second, // deltaStage time.Duration, + 3, // rMax uint8, + S, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + median.OffchainConfig{ + AlphaReportInfinite: false, + AlphaReportPPB: 1, + AlphaAcceptInfinite: false, + AlphaAcceptPPB: 1, + DeltaC: time.Minute * 30, + }.Encode(), // reportingPluginConfig []byte, + 5*time.Second, // maxDurationQuery time.Duration, + 5*time.Second, // maxDurationObservation time.Duration, + 5*time.Second, // maxDurationReport time.Duration, + 5*time.Second, // maxDurationShouldAcceptFinalizedReport time.Duration, + 5*time.Second, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + nil, // The median reporting plugin has an empty onchain config + ) + if err != nil { + return nil, err + } + + // Convert signers to addresses + var signerAddresses []common.Address + for _, signer := range signerKeys { + signerAddresses = append(signerAddresses, common.BytesToAddress(signer)) + } + + // Convert transmitters to addresses + var transmitterAddresses []common.Address + for _, account := range transmitterAccounts { + transmitterAddresses = append(transmitterAddresses, common.HexToAddress(string(account))) + } + + onchainConfig, err := testhelpers.GenerateDefaultOCR2OnchainConfig(ocrOffchainOptions.MinimumAnswer, ocrOffchainOptions.MaximumAnswer) + + return &contracts.OCRv2Config{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + F: f_, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: []byte(fmt.Sprintf("0x%s", offchainConfig)), + }, err +} + +func GetOracleIdentitiesWithKeyIndexLocal( + pluginNodes []*client.PluginClient, + keyIndex int, +) ([]int, []confighelper.OracleIdentityExtra, error) { + S := make([]int, len(pluginNodes)) + oracleIdentities := make([]confighelper.OracleIdentityExtra, len(pluginNodes)) + sharedSecretEncryptionPublicKeys := make([]types.ConfigEncryptionPublicKey, len(pluginNodes)) + eg := &errgroup.Group{} + for i, cl := range pluginNodes { + index, pluginNode := i, cl + eg.Go(func() error { + addresses, err := pluginNode.EthAddresses() + if err != nil { + return err + } + ocr2Keys, err := pluginNode.MustReadOCR2Keys() + if err != nil { + return err + } + var ocr2Config client.OCR2KeyAttributes + for _, key := range ocr2Keys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + ocr2Config = key.Attributes + break + } + } + + keys, err := pluginNode.MustReadP2PKeys() + if err != nil { + return err + } + p2pKeyID := keys.Data[0].Attributes.PeerID + + offchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_")) + if err != nil { + return err + } + + offchainPkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(offchainPkBytesFixed[:], offchainPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + configPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_")) + if err != nil { + return err + } + + configPkBytesFixed := [ed25519.PublicKeySize]byte{} + n = copy(configPkBytesFixed[:], configPkBytes) + if n != ed25519.PublicKeySize { + return fmt.Errorf("wrong number of elements copied") + } + + onchainPkBytes, err := hex.DecodeString(strings.TrimPrefix(ocr2Config.OnChainPublicKey, "ocr2on_evm_")) + if err != nil { + return err + } + + sharedSecretEncryptionPublicKeys[index] = configPkBytesFixed + oracleIdentities[index] = confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPkBytes, + OffchainPublicKey: offchainPkBytesFixed, + PeerID: p2pKeyID, + TransmitAccount: types.Account(addresses[keyIndex]), + }, + ConfigEncryptionPublicKey: configPkBytesFixed, + } + S[index] = 1 + log.Debug(). + Interface("OnChainPK", onchainPkBytes). + Interface("OffChainPK", offchainPkBytesFixed). + Interface("ConfigPK", configPkBytesFixed). + Str("PeerID", p2pKeyID). + Str("Address", addresses[keyIndex]). + Msg("Oracle identity") + return nil + }) + } + + return S, oracleIdentities, eg.Wait() +} + +// DeleteJobs will delete ALL jobs from the nodes +func DeleteJobs(nodes []*client.PluginClient) error { + for _, node := range nodes { + if node == nil { + return fmt.Errorf("found a nil plugin node in the list of plugin nodes while tearing down: %v", nodes) + } + jobs, _, err := node.ReadJobs() + if err != nil { + return fmt.Errorf("error reading jobs from plugin node: %w", err) + } + for _, maps := range jobs.Data { + if _, ok := maps["id"]; !ok { + return fmt.Errorf("error reading job id from plugin node's jobs %+v", jobs.Data) + } + id := maps["id"].(string) + _, err2 := node.DeleteJob(id) + if err2 != nil { + return fmt.Errorf("error deleting job from plugin node: %w", err) + } + } + } + return nil +} + +// DeleteBridges will delete ALL bridges from the nodes +func DeleteBridges(nodes []*client.PluginClient) error { + for _, node := range nodes { + if node == nil { + return fmt.Errorf("found a nil plugin node in the list of plugin nodes while tearing down: %v", nodes) + } + + bridges, _, err := node.ReadBridges() + if err != nil { + return err + } + for _, b := range bridges.Data { + _, err = node.DeleteBridge(b.Attributes.Name) + if err != nil { + return err + } + } + + } + return nil +} diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go new file mode 100644 index 00000000..2faa156f --- /dev/null +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_config_helpers.go @@ -0,0 +1,338 @@ +package ocr2vrf_actions + +import ( + "crypto/ed25519" + "encoding/hex" + "errors" + "fmt" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + "github.com/stretchr/testify/require" + "go.dedis.ch/kyber/v3" + "go.dedis.ch/kyber/v3/group/edwards25519" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-vrf/altbn_128" + "github.com/goplugin/plugin-vrf/dkg" + "github.com/goplugin/plugin-vrf/ocr2vrf" + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +// CreateOCR2VRFJobs bootstraps the first node and to the other nodes sends ocr jobs +func CreateOCR2VRFJobs( + t *testing.T, + bootstrapNode *client.PluginK8sClient, + nonBootstrapNodes []*client.PluginK8sClient, + OCR2VRFPluginConfig *OCR2VRFPluginConfig, + chainID int64, + keyIndex int, +) { + l := logging.GetTestLogger(t) + p2pV2Bootstrapper := createBootstrapJob(t, bootstrapNode, OCR2VRFPluginConfig.DKGConfig.DKGContractAddress, chainID) + + createNonBootstrapJobs(t, nonBootstrapNodes, OCR2VRFPluginConfig, chainID, keyIndex, p2pV2Bootstrapper) + l.Info().Msg("Done creating OCR automation jobs") +} + +func createNonBootstrapJobs( + t *testing.T, + nonBootstrapNodes []*client.PluginK8sClient, + OCR2VRFPluginConfig *OCR2VRFPluginConfig, + chainID int64, + keyIndex int, + P2Pv2Bootstrapper string, +) { + for index, nonBootstrapNode := range nonBootstrapNodes { + nodeTransmitterAddress, err := nonBootstrapNode.EthAddresses() + require.NoError(t, err, "Shouldn't fail getting primary ETH address from OCR node %d", index) + nodeOCRKeys, err := nonBootstrapNode.MustReadOCR2Keys() + require.NoError(t, err, "Shouldn't fail getting OCR keys from OCR node %d", index) + var nodeOCRKeyId []string + for _, key := range nodeOCRKeys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + nodeOCRKeyId = append(nodeOCRKeyId, key.ID) + break + } + } + + OCR2VRFJobSpec := client.OCR2TaskJobSpec{ + Name: "ocr2", + JobType: "offchainreporting2", + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "ocr2vrf", + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + ContractID: OCR2VRFPluginConfig.VRFBeaconConfig.VRFBeaconAddress, + OCRKeyBundleID: null.StringFrom(nodeOCRKeyId[keyIndex]), + TransmitterID: null.StringFrom(nodeTransmitterAddress[keyIndex]), + P2PV2Bootstrappers: pq.StringArray{P2Pv2Bootstrapper}, + PluginConfig: map[string]interface{}{ + "dkgEncryptionPublicKey": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.DKGConfig.DKGKeyConfigs[index].DKGEncryptionPublicKey), + "dkgSigningPublicKey": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.DKGConfig.DKGKeyConfigs[index].DKGSigningPublicKey), + "dkgKeyID": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.DKGConfig.DKGKeyID), + "dkgContractAddress": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.DKGConfig.DKGContractAddress), + "vrfCoordinatorAddress": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.VRFCoordinatorAddress), + "linkEthFeedAddress": fmt.Sprintf("\"%s\"", OCR2VRFPluginConfig.LinkEthFeedAddress), + }, + }, + } + _, err = nonBootstrapNode.MustCreateJob(&OCR2VRFJobSpec) + require.NoError(t, err, "Shouldn't fail creating OCR Task job on OCR node %d", index) + } +} + +func createBootstrapJob(t *testing.T, bootstrapNode *client.PluginK8sClient, dkgAddress string, chainID int64) string { + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + require.NoError(t, err, "Shouldn't fail reading P2P keys from bootstrap node") + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + + bootstrapSpec := &client.OCR2TaskJobSpec{ + Name: "ocr2 bootstrap node", + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: dkgAddress, + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + }, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + require.NoError(t, err, "Shouldn't fail creating bootstrap job on bootstrap node") + return fmt.Sprintf("%s@%s:%d", bootstrapP2PId, bootstrapNode.InternalIP(), 6690) +} + +func BuildOCR2DKGConfigVars( + t *testing.T, + ocr2VRFPluginConfig *OCR2VRFPluginConfig, +) contracts.OCRv2Config { + l := logging.GetTestLogger(t) + var onchainPublicKeys []common.Address + for _, onchainPublicKey := range ocr2VRFPluginConfig.OCR2Config.OnchainPublicKeys { + onchainPublicKeys = append(onchainPublicKeys, common.HexToAddress(onchainPublicKey)) + } + + var transmitters []common.Address + for _, transmitterAddress := range ocr2VRFPluginConfig.OCR2Config.TransmitterAddresses { + transmitters = append(transmitters, common.HexToAddress(transmitterAddress)) + } + oracleIdentities, err := toOraclesIdentityList( + onchainPublicKeys, + ocr2VRFPluginConfig.OCR2Config.OffchainPublicKeys, + ocr2VRFPluginConfig.OCR2Config.ConfigPublicKeys, + ocr2VRFPluginConfig.OCR2Config.PeerIds, + ocr2VRFPluginConfig.OCR2Config.TransmitterAddresses, + ) + require.NoError(t, err) + + ed25519Suite := edwards25519.NewBlakeSHA256Ed25519() + var signingKeys []kyber.Point + for _, dkgKey := range ocr2VRFPluginConfig.DKGConfig.DKGKeyConfigs { + signingKeyBytes, err := hex.DecodeString(dkgKey.DKGSigningPublicKey) + require.NoError(t, err) + signingKeyPoint := ed25519Suite.Point() + err = signingKeyPoint.UnmarshalBinary(signingKeyBytes) + require.NoError(t, err) + signingKeys = append(signingKeys, signingKeyPoint) + } + + altbn128Suite := &altbn_128.PairingSuite{} + var encryptionKeys []kyber.Point + for _, dkgKey := range ocr2VRFPluginConfig.DKGConfig.DKGKeyConfigs { + encryptionKeyBytes, err := hex.DecodeString(dkgKey.DKGEncryptionPublicKey) + require.NoError(t, err) + encryptionKeyPoint := altbn128Suite.G1().Point() + err = encryptionKeyPoint.UnmarshalBinary(encryptionKeyBytes) + require.NoError(t, err) + encryptionKeys = append(encryptionKeys, encryptionKeyPoint) + } + + keyIDBytes, err := DecodeHexTo32ByteArray(ocr2VRFPluginConfig.DKGConfig.DKGKeyID) + require.NoError(t, err, "Shouldn't fail decoding DKG key ID") + + offchainConfig, err := dkg.OffchainConfig(encryptionKeys, signingKeys, &altbn_128.G1{}, &ocr2vrftypes.PairingTranslation{ + Suite: &altbn_128.PairingSuite{}, + }) + require.NoError(t, err) + onchainConfig, err := dkg.OnchainConfig(dkg.KeyID(keyIDBytes)) + require.NoError(t, err) + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := + confighelper.ContractSetConfigArgsForTests( + 30*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 10*time.Millisecond, // deltaRound time.Duration, + 20*time.Millisecond, // deltaGrace time.Duration, + 20*time.Millisecond, // deltaStage time.Duration, + 3, // rMax uint8, + ocr2VRFPluginConfig.OCR2Config.Schedule, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + offchainConfig, + 10*time.Millisecond, // maxDurationQuery time.Duration, + 10*time.Second, // maxDurationObservation time.Duration, + 10*time.Second, // maxDurationReport time.Duration, + 10*time.Millisecond, // maxDurationShouldAcceptFinalizedReport time.Duration, + 1*time.Second, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + onchainConfig, // onchainConfig []byte, + ) + require.NoError(t, err, "Shouldn't fail building OCR config") + + l.Info().Msg("Done building DKG OCR config") + return contracts.OCRv2Config{ + Signers: onchainPublicKeys, + Transmitters: transmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } +} + +func toOraclesIdentityList( + onchainPubKeys []common.Address, + offchainPubKeys, + configPubKeys, + peerIDs, + transmitters []string, +) ([]confighelper.OracleIdentityExtra, error) { + offchainPubKeysBytes := []types.OffchainPublicKey{} + for _, pkHex := range offchainPubKeys { + pkBytes, err := hex.DecodeString(pkHex) + if err != nil { + return nil, err + } + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + return nil, errors.New("wrong num elements copied") + } + + offchainPubKeysBytes = append(offchainPubKeysBytes, types.OffchainPublicKey(pkBytesFixed)) + } + + configPubKeysBytes := []types.ConfigEncryptionPublicKey{} + for _, pkHex := range configPubKeys { + pkBytes, err := hex.DecodeString(pkHex) + if err != nil { + return nil, err + } + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + return nil, errors.New("wrong num elements copied") + } + + configPubKeysBytes = append(configPubKeysBytes, types.ConfigEncryptionPublicKey(pkBytesFixed)) + } + + o := []confighelper.OracleIdentityExtra{} + for index := range configPubKeys { + o = append(o, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPubKeys[index][:], + OffchainPublicKey: offchainPubKeysBytes[index], + PeerID: peerIDs[index], + TransmitAccount: types.Account(transmitters[index]), + }, + ConfigEncryptionPublicKey: configPubKeysBytes[index], + }) + } + return o, nil +} + +func DecodeHexTo32ByteArray(val string) ([32]byte, error) { + var byteArray [32]byte + decoded, err := hex.DecodeString(val) + if err != nil { + return [32]byte{}, err + } + if len(decoded) != 32 { + return [32]byte{}, fmt.Errorf("expected value to be 32 bytes but received %d bytes", len(decoded)) + } + copy(byteArray[:], decoded) + return byteArray, err +} + +func BuildOCR2VRFConfigVars( + t *testing.T, + ocr2VRFPluginConfig *OCR2VRFPluginConfig, +) contracts.OCRv2Config { + l := logging.GetTestLogger(t) + var onchainPublicKeys []common.Address + for _, onchainPublicKey := range ocr2VRFPluginConfig.OCR2Config.OnchainPublicKeys { + onchainPublicKeys = append(onchainPublicKeys, common.HexToAddress(onchainPublicKey)) + } + + var transmitters []common.Address + for _, transmitterAddress := range ocr2VRFPluginConfig.OCR2Config.TransmitterAddresses { + transmitters = append(transmitters, common.HexToAddress(transmitterAddress)) + } + + oracleIdentities, err := toOraclesIdentityList( + onchainPublicKeys, + ocr2VRFPluginConfig.OCR2Config.OffchainPublicKeys, + ocr2VRFPluginConfig.OCR2Config.ConfigPublicKeys, + ocr2VRFPluginConfig.OCR2Config.PeerIds, + ocr2VRFPluginConfig.OCR2Config.TransmitterAddresses, + ) + require.NoError(t, err) + + confDelays := make(map[uint32]struct{}) + + for _, c := range ocr2VRFPluginConfig.VRFBeaconConfig.ConfDelays { + confDelay, err := strconv.ParseUint(c, 0, 32) + require.NoError(t, err) + confDelays[uint32(confDelay)] = struct{}{} + } + + onchainConfig := ocr2vrf.OnchainConfig(confDelays) + + _, _, f, onchainConfig, offchainConfigVersion, offchainConfig, err := + confighelper.ContractSetConfigArgsForTests( + 30*time.Second, // deltaProgress time.Duration, + 10*time.Second, // deltaResend time.Duration, + 10*time.Second, // deltaRound time.Duration, + 20*time.Second, // deltaGrace time.Duration, + 20*time.Second, // deltaStage time.Duration, + 3, // rMax uint8, + ocr2VRFPluginConfig.OCR2Config.Schedule, // s []int, + oracleIdentities, // oracles []OracleIdentityExtra, + ocr2vrf.OffchainConfig(ocr2VRFPluginConfig.VRFBeaconConfig.CoordinatorConfig), + 10*time.Millisecond, // maxDurationQuery time.Duration, + 10*time.Second, // maxDurationObservation time.Duration, + 10*time.Second, // maxDurationReport time.Duration, + 5*time.Second, // maxDurationShouldAcceptFinalizedReport time.Duration, + 1*time.Second, // maxDurationShouldTransmitAcceptedReport time.Duration, + 1, // f int, + onchainConfig, // onchainConfig []byte, + ) + require.NoError(t, err) + + l.Info().Msg("Done building VRF OCR config") + return contracts.OCRv2Config{ + Signers: onchainPublicKeys, + Transmitters: transmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } + +} diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants/ocr2vrf_constants.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants/ocr2vrf_constants.go new file mode 100644 index 00000000..4005747d --- /dev/null +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants/ocr2vrf_constants.go @@ -0,0 +1,31 @@ +package ocr2vrf_constants + +import ( + "math/big" + "time" + + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" +) + +var ( + LinkEthFeedResponse = big.NewInt(1e18) + LinkFundingAmount = big.NewInt(100) + BeaconPeriodBlocksCount = big.NewInt(3) + EthFundingAmount = big.NewFloat(1) + NumberOfRandomWordsToRequest = uint16(2) + ConfirmationDelay = big.NewInt(1) + RandomnessFulfilmentTransmissionEventTimeout = time.Minute * 6 + RandomnessRedeemTransmissionEventTimeout = time.Minute * 5 + //keyId can be any random value + KeyID = "aee00d81f822f882b6fe28489822f59ebb21ea95c0ae21d9f67c0239461148fc" + + CoordinatorConfig = &ocr2vrftypes.CoordinatorConfig{ + CacheEvictionWindowSeconds: 60, + BatchGasLimit: 5_000_000, + CoordinatorOverhead: 50_000, + CallbackOverhead: 50_000, + BlockGasOverhead: 50_000, + LookbackBlocks: 1_000, + } + VRFBeaconAllowedConfirmationDelays = []string{"1", "2", "3", "4", "5", "6", "7", "8"} +) diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_models.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_models.go new file mode 100644 index 00000000..6922e9d3 --- /dev/null +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_models.go @@ -0,0 +1,37 @@ +package ocr2vrf_actions + +import ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + +type DKGKeyConfig struct { + DKGEncryptionPublicKey string + DKGSigningPublicKey string +} + +type DKGConfig struct { + DKGKeyConfigs []DKGKeyConfig + DKGKeyID string + DKGContractAddress string +} + +type VRFBeaconConfig struct { + VRFBeaconAddress string + ConfDelays []string + CoordinatorConfig *ocr2vrftypes.CoordinatorConfig +} + +type OCR2Config struct { + OnchainPublicKeys []string + OffchainPublicKeys []string + PeerIds []string + ConfigPublicKeys []string + TransmitterAddresses []string + Schedule []int +} + +type OCR2VRFPluginConfig struct { + OCR2Config OCR2Config + DKGConfig DKGConfig + VRFBeaconConfig VRFBeaconConfig + VRFCoordinatorAddress string + LinkEthFeedAddress string +} diff --git a/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go new file mode 100644 index 00000000..8abdba50 --- /dev/null +++ b/integration-tests/actions/ocr2vrf_actions/ocr2vrf_steps.go @@ -0,0 +1,395 @@ +package ocr2vrf_actions + +import ( + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + ocr2vrftypes "github.com/goplugin/plugin-vrf/types" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + pluginutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +func SetAndWaitForVRFBeaconProcessToFinish(t *testing.T, ocr2VRFPluginConfig *OCR2VRFPluginConfig, vrfBeacon contracts.VRFBeacon) { + l := logging.GetTestLogger(t) + ocr2VrfConfig := BuildOCR2VRFConfigVars(t, ocr2VRFPluginConfig) + l.Debug().Interface("OCR2 VRF Config", ocr2VrfConfig).Msg("OCR2 VRF Config prepared") + + err := vrfBeacon.SetConfig( + ocr2VrfConfig.Signers, + ocr2VrfConfig.Transmitters, + ocr2VrfConfig.F, + ocr2VrfConfig.OnchainConfig, + ocr2VrfConfig.OffchainConfigVersion, + ocr2VrfConfig.OffchainConfig, + ) + require.NoError(t, err, "Error setting OCR config for VRFBeacon contract") + + vrfConfigSetEvent, err := vrfBeacon.WaitForConfigSetEvent(time.Minute) + require.NoError(t, err, "Error waiting for ConfigSet Event for VRFBeacon contract") + l.Info().Interface("Event", vrfConfigSetEvent).Msg("OCR2 VRF Config was set") +} + +func SetAndWaitForDKGProcessToFinish(t *testing.T, ocr2VRFPluginConfig *OCR2VRFPluginConfig, dkg contracts.DKG) { + l := logging.GetTestLogger(t) + ocr2DkgConfig := BuildOCR2DKGConfigVars(t, ocr2VRFPluginConfig) + + // set config for DKG OCR + l.Debug().Interface("OCR2 DKG Config", ocr2DkgConfig).Msg("OCR2 DKG Config prepared") + err := dkg.SetConfig( + ocr2DkgConfig.Signers, + ocr2DkgConfig.Transmitters, + ocr2DkgConfig.F, + ocr2DkgConfig.OnchainConfig, + ocr2DkgConfig.OffchainConfigVersion, + ocr2DkgConfig.OffchainConfig, + ) + require.NoError(t, err, "Error setting OCR config for DKG contract") + + // wait for the event ConfigSet from DKG contract + dkgConfigSetEvent, err := dkg.WaitForConfigSetEvent(time.Minute) + require.NoError(t, err, "Error waiting for ConfigSet Event for DKG contract") + l.Info().Interface("Event", dkgConfigSetEvent).Msg("OCR2 DKG Config Set") + // wait for the event Transmitted from DKG contract, meaning that OCR committee has sent out the Public key and Shares + dkgSharesTransmittedEvent, err := dkg.WaitForTransmittedEvent(time.Minute * 5) + require.NoError(t, err) + l.Info().Interface("Event", dkgSharesTransmittedEvent).Msg("DKG Shares were generated and transmitted by OCR Committee") +} + +func SetAndGetOCR2VRFPluginConfig( + t *testing.T, + nonBootstrapNodes []*client.PluginK8sClient, + dkg contracts.DKG, + vrfBeacon contracts.VRFBeacon, + coordinator contracts.VRFCoordinatorV3, + mockETHLinkFeed contracts.MockETHPLIFeed, + keyID string, + vrfBeaconAllowedConfirmationDelays []string, + coordinatorConfig *ocr2vrftypes.CoordinatorConfig, +) *OCR2VRFPluginConfig { + var ( + dkgKeyConfigs []DKGKeyConfig + transmitters []string + ocrConfigPubKeys []string + peerIDs []string + ocrOnchainPubKeys []string + ocrOffchainPubKeys []string + schedule []int + ) + + for _, node := range nonBootstrapNodes { + dkgSignKey, err := node.MustCreateDkgSignKey() + require.NoError(t, err, "Error creating DKG Sign Keys") + + dkgEncryptKey, err := node.MustCreateDkgEncryptKey() + require.NoError(t, err, "Error creating DKG Encrypt Keys") + + ethKeys, err := node.MustReadETHKeys() + require.NoError(t, err) + for _, key := range ethKeys.Data { + transmitters = append(transmitters, key.Attributes.Address) + } + + p2pKeys, err := node.MustReadP2PKeys() + require.NoError(t, err, "Shouldn't fail reading P2P keys from node") + + peerId := p2pKeys.Data[0].Attributes.PeerID + peerIDs = append(peerIDs, peerId) + + ocr2Keys, err := node.MustReadOCR2Keys() + require.NoError(t, err, "Shouldn't fail reading OCR2 keys from node") + var ocr2Config client.OCR2KeyAttributes + for _, key := range ocr2Keys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + ocr2Config = key.Attributes + break + } + } + + offchainPubKey := strings.TrimPrefix(ocr2Config.OffChainPublicKey, "ocr2off_evm_") + ocrOffchainPubKeys = append(ocrOffchainPubKeys, offchainPubKey) + + configPubKey := strings.TrimPrefix(ocr2Config.ConfigPublicKey, "ocr2cfg_evm_") + ocrConfigPubKeys = append(ocrConfigPubKeys, configPubKey) + + onchainPubKey := strings.TrimPrefix(ocr2Config.OnChainPublicKey, "ocr2on_evm_") + ocrOnchainPubKeys = append(ocrOnchainPubKeys, onchainPubKey) + + schedule = append(schedule, 1) + + dkgKeyConfigs = append(dkgKeyConfigs, DKGKeyConfig{ + DKGEncryptionPublicKey: dkgEncryptKey.Data.Attributes.PublicKey, + DKGSigningPublicKey: dkgSignKey.Data.Attributes.PublicKey, + }) + } + + ocr2VRFPluginConfig := &OCR2VRFPluginConfig{ + OCR2Config: OCR2Config{ + OnchainPublicKeys: ocrOnchainPubKeys, + OffchainPublicKeys: ocrOffchainPubKeys, + ConfigPublicKeys: ocrConfigPubKeys, + PeerIds: peerIDs, + TransmitterAddresses: transmitters, + Schedule: schedule, + }, + + DKGConfig: DKGConfig{ + DKGKeyConfigs: dkgKeyConfigs, + DKGKeyID: keyID, + DKGContractAddress: dkg.Address(), + }, + VRFBeaconConfig: VRFBeaconConfig{ + VRFBeaconAddress: vrfBeacon.Address(), + ConfDelays: vrfBeaconAllowedConfirmationDelays, + CoordinatorConfig: coordinatorConfig, + }, + VRFCoordinatorAddress: coordinator.Address(), + LinkEthFeedAddress: mockETHLinkFeed.Address(), + } + return ocr2VRFPluginConfig +} + +func FundVRFCoordinatorV3Subscription(t *testing.T, linkToken contracts.LinkToken, coordinator contracts.VRFCoordinatorV3, chainClient blockchain.EVMClient, subscriptionID, linkFundingAmount *big.Int) { + encodedSubId, err := pluginutils.ABIEncode(`[{"type":"uint256"}]`, subscriptionID) + require.NoError(t, err, "Error Abi encoding subscriptionID") + _, err = linkToken.TransferAndCall(coordinator.Address(), big.NewInt(0).Mul(linkFundingAmount, big.NewInt(1e18)), encodedSubId) + require.NoError(t, err, "Error sending Link token") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") +} + +func DeployOCR2VRFContracts(t *testing.T, contractDeployer contracts.ContractDeployer, chainClient blockchain.EVMClient, linkToken contracts.LinkToken, beaconPeriodBlocksCount *big.Int, keyID string) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer) { + dkg, err := contractDeployer.DeployDKG() + require.NoError(t, err, "Error deploying DKG Contract") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + coordinator, err := contractDeployer.DeployOCR2VRFCoordinator(beaconPeriodBlocksCount, linkToken.Address()) + require.NoError(t, err, "Error deploying OCR2VRFCoordinator Contract") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + vrfBeacon, err := contractDeployer.DeployVRFBeacon(coordinator.Address(), linkToken.Address(), dkg.Address(), keyID) + require.NoError(t, err, "Error deploying VRFBeacon Contract") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + consumer, err := contractDeployer.DeployVRFBeaconConsumer(coordinator.Address(), beaconPeriodBlocksCount) + require.NoError(t, err, "Error deploying VRFBeaconConsumer Contract") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + return dkg, coordinator, vrfBeacon, consumer +} + +func RequestAndRedeemRandomness( + t *testing.T, + consumer contracts.VRFBeaconConsumer, + chainClient blockchain.EVMClient, + vrfBeacon contracts.VRFBeacon, + numberOfRandomWordsToRequest uint16, + subscriptionID, + confirmationDelay *big.Int, + randomnessTransmissionEventTimeout time.Duration, +) *big.Int { + l := logging.GetTestLogger(t) + receipt, err := consumer.RequestRandomness( + numberOfRandomWordsToRequest, + subscriptionID, + confirmationDelay, + ) + require.NoError(t, err, "Error requesting randomness from Consumer Contract") + l.Info().Interface("TX Hash", receipt.TxHash).Msg("Randomness requested from Consumer contract") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + requestID := getRequestId(t, consumer, receipt, confirmationDelay) + + newTransmissionEvent, err := vrfBeacon.WaitForNewTransmissionEvent(randomnessTransmissionEventTimeout) + require.NoError(t, err, "Error waiting for NewTransmission event from VRF Beacon Contract") + l.Info().Interface("NewTransmission event", newTransmissionEvent).Msg("Randomness transmitted by DON") + + err = consumer.RedeemRandomness(subscriptionID, requestID) + require.NoError(t, err, "Error redeeming randomness from Consumer Contract") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + return requestID +} + +func RequestRandomnessFulfillmentAndWaitForFulfilment( + t *testing.T, + consumer contracts.VRFBeaconConsumer, + chainClient blockchain.EVMClient, + vrfBeacon contracts.VRFBeacon, + numberOfRandomWordsToRequest uint16, + subscriptionID *big.Int, + confirmationDelay *big.Int, + randomnessTransmissionEventTimeout time.Duration, +) *big.Int { + l := logging.GetTestLogger(t) + receipt, err := consumer.RequestRandomnessFulfillment( + numberOfRandomWordsToRequest, + subscriptionID, + confirmationDelay, + 200_000, + 100_000, + nil, + ) + require.NoError(t, err, "Error requesting Randomness Fulfillment") + l.Info().Interface("TX Hash", receipt.TxHash).Msg("Randomness Fulfillment requested from Consumer contract") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + requestID := getRequestId(t, consumer, receipt, confirmationDelay) + + newTransmissionEvent, err := vrfBeacon.WaitForNewTransmissionEvent(randomnessTransmissionEventTimeout) + require.NoError(t, err, "Error waiting for NewTransmission event from VRF Beacon Contract") + l.Info().Interface("NewTransmission event", newTransmissionEvent).Msg("Randomness Fulfillment transmitted by DON") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + return requestID +} + +func getRequestId(t *testing.T, consumer contracts.VRFBeaconConsumer, receipt *types.Receipt, confirmationDelay *big.Int) *big.Int { + periodBlocks, err := consumer.IBeaconPeriodBlocks(testcontext.Get(t)) + require.NoError(t, err, "Error getting Beacon Period block count") + + blockNumber := receipt.BlockNumber + periodOffset := new(big.Int).Mod(blockNumber, periodBlocks) + nextBeaconOutputHeight := new(big.Int).Sub(new(big.Int).Add(blockNumber, periodBlocks), periodOffset) + + requestID, err := consumer.GetRequestIdsBy(testcontext.Get(t), nextBeaconOutputHeight, confirmationDelay) + require.NoError(t, err, "Error getting requestID from consumer contract") + + return requestID +} + +func SetupOCR2VRFUniverse( + t *testing.T, + linkToken contracts.LinkToken, + mockETHLinkFeed contracts.MockETHPLIFeed, + contractDeployer contracts.ContractDeployer, + chainClient blockchain.EVMClient, + nodeAddresses []common.Address, + pluginNodes []*client.PluginK8sClient, + testNetwork blockchain.EVMNetwork, +) (contracts.DKG, contracts.VRFCoordinatorV3, contracts.VRFBeacon, contracts.VRFBeaconConsumer, *big.Int) { + + // Deploy DKG contract + // Deploy VRFCoordinator(beaconPeriodBlocks, linkAddress, linkEthfeedAddress) + // Deploy VRFBeacon + // Deploy Consumer Contract + dkgContract, coordinatorContract, vrfBeaconContract, consumerContract := DeployOCR2VRFContracts( + t, + contractDeployer, + chainClient, + linkToken, + ocr2vrf_constants.BeaconPeriodBlocksCount, + ocr2vrf_constants.KeyID, + ) + + // Add VRFBeacon as DKG client + err := dkgContract.AddClient(ocr2vrf_constants.KeyID, vrfBeaconContract.Address()) + require.NoError(t, err, "Error adding client to DKG Contract") + // Adding VRFBeacon as producer in VRFCoordinator + err = coordinatorContract.SetProducer(vrfBeaconContract.Address()) + require.NoError(t, err, "Error setting Producer for VRFCoordinator contract") + err = coordinatorContract.SetConfig(2.5e6, 160 /* 5 EVM words */) + require.NoError(t, err, "Error setting config for VRFCoordinator contract") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + // Subscription: + //1. Create Subscription + err = coordinatorContract.CreateSubscription() + require.NoError(t, err, "Error creating subscription in VRFCoordinator contract") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + subID, err := coordinatorContract.FindSubscriptionID() + require.NoError(t, err) + + //2. Add Consumer to subscription + err = coordinatorContract.AddConsumer(subID, consumerContract.Address()) + require.NoError(t, err, "Error adding a consumer to a subscription in VRFCoordinator contract") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + //3. fund subscription with PLI token + FundVRFCoordinatorV3Subscription( + t, + linkToken, + coordinatorContract, + chainClient, + subID, + ocr2vrf_constants.LinkFundingAmount, + ) + + // set Payees for VRFBeacon ((address which gets the reward) for each transmitter) + nonBootstrapNodeAddresses := nodeAddresses[1:] + err = vrfBeaconContract.SetPayees(nonBootstrapNodeAddresses, nonBootstrapNodeAddresses) + require.NoError(t, err, "Error setting Payees in VRFBeacon Contract") + + // fund OCR Nodes (so that they can transmit) + err = actions.FundPluginNodes(pluginNodes, chainClient, ocr2vrf_constants.EthFundingAmount) + require.NoError(t, err, "Error funding Nodes") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for TXs to complete") + + bootstrapNode := pluginNodes[0] + nonBootstrapNodes := pluginNodes[1:] + + // Create DKG Sign and Encrypt keys for each non-bootstrap node + // set Job specs for each node + ocr2VRFPluginConfig := SetAndGetOCR2VRFPluginConfig( + t, + nonBootstrapNodes, + dkgContract, + vrfBeaconContract, + coordinatorContract, + mockETHLinkFeed, + ocr2vrf_constants.KeyID, + ocr2vrf_constants.VRFBeaconAllowedConfirmationDelays, + ocr2vrf_constants.CoordinatorConfig, + ) + // Create Jobs for Bootstrap and non-boostrap nodes + CreateOCR2VRFJobs( + t, + bootstrapNode, + nonBootstrapNodes, + ocr2VRFPluginConfig, + testNetwork.ChainID, + 0, + ) + + // set config for DKG OCR, + // wait for the event ConfigSet from DKG contract + // wait for the event Transmitted from DKG contract, meaning that OCR committee has sent out the Public key and Shares + SetAndWaitForDKGProcessToFinish(t, ocr2VRFPluginConfig, dkgContract) + + // set config for VRFBeacon OCR, + // wait for the event ConfigSet from VRFBeacon contract + SetAndWaitForVRFBeaconProcessToFinish(t, ocr2VRFPluginConfig, vrfBeaconContract) + return dkgContract, coordinatorContract, vrfBeaconContract, consumerContract, subID +} diff --git a/integration-tests/actions/ocr_helpers.go b/integration-tests/actions/ocr_helpers.go new file mode 100644 index 00000000..72374b9c --- /dev/null +++ b/integration-tests/actions/ocr_helpers.go @@ -0,0 +1,447 @@ +package actions + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +// This actions file often returns functions, rather than just values. These are used as common test helpers, and are +// handy to have returning as functions so that Ginkgo can use them in an aesthetically pleasing way. + +// DeployOCRContracts deploys and funds a certain number of offchain aggregator contracts +func DeployOCRContracts( + numberOfContracts int, + linkTokenContract contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + workerNodes []*client.PluginK8sClient, + client blockchain.EVMClient, +) ([]contracts.OffchainAggregator, error) { + // Deploy contracts + var ocrInstances []contracts.OffchainAggregator + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + ocrInstance, err := contractDeployer.DeployOffChainAggregator( + linkTokenContract.Address(), + contracts.DefaultOffChainAggregatorOptions(), + ) + if err != nil { + return nil, fmt.Errorf("OCR instance deployment have failed: %w", err) + } + ocrInstances = append(ocrInstances, ocrInstance) + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for OCR contract deployments: %w", err) + } + } + } + err := client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCR contract deployments: %w", err) + } + + // Gather transmitter and address payees + var transmitters, payees []string + for _, node := range workerNodes { + addr, err := node.PrimaryEthAddress() + if err != nil { + return nil, fmt.Errorf("error getting node's primary ETH address: %w", err) + } + transmitters = append(transmitters, addr) + payees = append(payees, client.GetDefaultWallet().Address()) + } + + // Set Payees + for contractCount, ocrInstance := range ocrInstances { + err = ocrInstance.SetPayees(transmitters, payees) + if err != nil { + return nil, fmt.Errorf("error settings OCR payees: %w", err) + } + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for setting OCR payees: %w", err) + } + } + } + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCR contracts to set payees and transmitters: %w", err) + } + + // Set Config + transmitterAddresses, err := PluginNodeAddresses(workerNodes) + if err != nil { + return nil, fmt.Errorf("getting node common addresses should not fail: %w", err) + } + for contractCount, ocrInstance := range ocrInstances { + // Exclude the first node, which will be used as a bootstrapper + err = ocrInstance.SetConfig( + workerNodes, + contracts.DefaultOffChainAggregatorConfig(len(workerNodes)), + transmitterAddresses, + ) + if err != nil { + return nil, fmt.Errorf("error setting OCR config for contract '%s': %w", ocrInstance.Address(), err) + } + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for setting OCR config: %w", err) + } + } + } + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCR contracts to set config: %w", err) + } + return ocrInstances, nil +} + +// DeployOCRContractsForwarderFlow deploys and funds a certain number of offchain +// aggregator contracts with forwarders as effectiveTransmitters +func DeployOCRContractsForwarderFlow( + t *testing.T, + numberOfContracts int, + linkTokenContract contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + workerNodes []*client.PluginK8sClient, + forwarderAddresses []common.Address, + client blockchain.EVMClient, +) []contracts.OffchainAggregator { + // Deploy contracts + var ocrInstances []contracts.OffchainAggregator + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + ocrInstance, err := contractDeployer.DeployOffChainAggregator( + linkTokenContract.Address(), + contracts.DefaultOffChainAggregatorOptions(), + ) + require.NoError(t, err, "Deploying OCR instance %d shouldn't fail", contractCount+1) + ocrInstances = append(ocrInstances, ocrInstance) + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for OCR Contract deployments") + } + } + err := client.WaitForEvents() + require.NoError(t, err, "Error waiting for OCR contract deployments") + + // Gather transmitter and address payees + var transmitters, payees []string + for _, forwarderCommonAddress := range forwarderAddresses { + forwarderAddress := forwarderCommonAddress.Hex() + transmitters = append(transmitters, forwarderAddress) + payees = append(payees, client.GetDefaultWallet().Address()) + } + + // Set Payees + for contractCount, ocrInstance := range ocrInstances { + err = ocrInstance.SetPayees(transmitters, payees) + require.NoError(t, err, "Error setting OCR payees") + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for setting OCR payees") + } + } + err = client.WaitForEvents() + require.NoError(t, err, "Error waiting for OCR contracts to set payees and transmitters") + + // Set Config + for contractCount, ocrInstance := range ocrInstances { + // Exclude the first node, which will be used as a bootstrapper + err = ocrInstance.SetConfig( + workerNodes, + contracts.DefaultOffChainAggregatorConfig(len(workerNodes)), + forwarderAddresses, + ) + require.NoError(t, err, "Error setting OCR config for contract '%d'", ocrInstance.Address()) + if (contractCount+1)%ContractDeploymentInterval == 0 { // For large amounts of contract deployments, space things out some + err = client.WaitForEvents() + require.NoError(t, err, "Failed to wait for setting OCR config") + } + } + err = client.WaitForEvents() + require.NoError(t, err, "Error waiting for OCR contracts to set config") + return ocrInstances +} + +// CreateOCRJobs bootstraps the first node and to the other nodes sends ocr jobs that +// read from different adapters, to be used in combination with SetAdapterResponses +func CreateOCRJobs( + ocrInstances []contracts.OffchainAggregator, + bootstrapNode *client.PluginK8sClient, + workerNodes []*client.PluginK8sClient, + mockValue int, + mockserver *ctfClient.MockserverClient, + evmChainID string, +) error { + for _, ocrInstance := range ocrInstances { + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + return fmt.Errorf("reading P2P keys from bootstrap node have failed: %w", err) + } + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + bootstrapSpec := &client.OCRBootstrapJobSpec{ + Name: fmt.Sprintf("bootstrap-%s", uuid.New().String()), + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: bootstrapP2PId, + IsBootstrapPeer: true, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + return fmt.Errorf("creating bootstrap job have failed: %w", err) + } + + for _, node := range workerNodes { + nodeP2PIds, err := node.MustReadP2PKeys() + if err != nil { + return fmt.Errorf("reading P2P keys from OCR node have failed: %w", err) + } + nodeP2PId := nodeP2PIds.Data[0].Attributes.PeerID + nodeTransmitterAddress, err := node.PrimaryEthAddress() + if err != nil { + return fmt.Errorf("getting primary ETH address from OCR node have failed: %w", err) + } + nodeOCRKeys, err := node.MustReadOCRKeys() + if err != nil { + return fmt.Errorf("getting OCR keys from OCR node have failed: %w", err) + } + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + nodeContractPairID, err := BuildNodeContractPairID(node, ocrInstance) + if err != nil { + return err + } + bta := &client.BridgeTypeAttributes{ + Name: nodeContractPairID, + URL: fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, strings.TrimPrefix(nodeContractPairID, "/")), + } + err = SetAdapterResponse(mockValue, ocrInstance, node, mockserver) + if err != nil { + return fmt.Errorf("setting adapter response for OCR node failed: %w", err) + } + err = node.MustCreateBridge(bta) + if err != nil { + return fmt.Errorf("creating bridge on CL node failed: %w", err) + } + + bootstrapPeers := []*client.PluginClient{bootstrapNode.PluginClient} + ocrSpec := &client.OCRTaskJobSpec{ + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: nodeP2PId, + P2PBootstrapPeers: bootstrapPeers, + KeyBundleID: nodeOCRKeyId, + TransmitterAddress: nodeTransmitterAddress, + ObservationSource: client.ObservationSourceSpecBridge(bta), + } + _, err = node.MustCreateJob(ocrSpec) + if err != nil { + return fmt.Errorf("creating OCR task job on OCR node have failed: %w", err) + } + } + } + return nil +} + +// CreateOCRJobsWithForwarder bootstraps the first node and to the other nodes sends ocr jobs that +// read from different adapters, to be used in combination with SetAdapterResponses +func CreateOCRJobsWithForwarder( + t *testing.T, + ocrInstances []contracts.OffchainAggregator, + bootstrapNode *client.PluginK8sClient, + workerNodes []*client.PluginK8sClient, + mockValue int, + mockserver *ctfClient.MockserverClient, + evmChainID string, +) { + for _, ocrInstance := range ocrInstances { + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + require.NoError(t, err, "Shouldn't fail reading P2P keys from bootstrap node") + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + bootstrapSpec := &client.OCRBootstrapJobSpec{ + Name: fmt.Sprintf("bootstrap-%s", uuid.New().String()), + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: bootstrapP2PId, + IsBootstrapPeer: true, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + require.NoError(t, err, "Shouldn't fail creating bootstrap job on bootstrap node") + + for nodeIndex, node := range workerNodes { + nodeP2PIds, err := node.MustReadP2PKeys() + require.NoError(t, err, "Shouldn't fail reading P2P keys from OCR node %d", nodeIndex+1) + nodeP2PId := nodeP2PIds.Data[0].Attributes.PeerID + nodeTransmitterAddress, err := node.PrimaryEthAddress() + require.NoError(t, err, "Shouldn't fail getting primary ETH address from OCR node %d", nodeIndex+1) + nodeOCRKeys, err := node.MustReadOCRKeys() + require.NoError(t, err, "Shouldn't fail getting OCR keys from OCR node %d", nodeIndex+1) + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + nodeContractPairID, err := BuildNodeContractPairID(node, ocrInstance) + require.NoError(t, err, "Failed building node contract pair ID for mockserver") + bta := &client.BridgeTypeAttributes{ + Name: nodeContractPairID, + URL: fmt.Sprintf("%s/%s", mockserver.Config.ClusterURL, strings.TrimPrefix(nodeContractPairID, "/")), + } + err = SetAdapterResponse(mockValue, ocrInstance, node, mockserver) + require.NoError(t, err, "Failed setting adapter responses for node %d", nodeIndex+1) + err = node.MustCreateBridge(bta) + require.NoError(t, err, "Failed creating bridge on OCR node %d", nodeIndex+1) + + bootstrapPeers := []*client.PluginClient{bootstrapNode.PluginClient} + ocrSpec := &client.OCRTaskJobSpec{ + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: nodeP2PId, + P2PBootstrapPeers: bootstrapPeers, + KeyBundleID: nodeOCRKeyId, + TransmitterAddress: nodeTransmitterAddress, + ObservationSource: client.ObservationSourceSpecBridge(bta), + ForwardingAllowed: true, + } + _, err = node.MustCreateJob(ocrSpec) + require.NoError(t, err, "Shouldn't fail creating OCR Task job on OCR node %d", nodeIndex+1) + } + } +} + +// StartNewRound requests a new round from the ocr contracts and waits for confirmation +func StartNewRound( + roundNumber int64, + ocrInstances []contracts.OffchainAggregator, + client blockchain.EVMClient, + logger zerolog.Logger, +) error { + for i := 0; i < len(ocrInstances); i++ { + err := ocrInstances[i].RequestNewRound() + if err != nil { + return fmt.Errorf("requesting new OCR round %d have failed: %w", i+1, err) + } + ocrRound := contracts.NewOffchainAggregatorRoundConfirmer(ocrInstances[i], big.NewInt(roundNumber), client.GetNetworkConfig().Timeout.Duration, logger) + client.AddHeaderEventSubscription(ocrInstances[i].Address(), ocrRound) + err = client.WaitForEvents() + if err != nil { + return fmt.Errorf("failed to wait for event subscriptions of OCR instance %d: %w", i+1, err) + } + } + return nil +} + +// WatchNewRound watches for a new OCR round, similarly to StartNewRound, but it does not explicitly request a new +// round from the contract, as this can cause some odd behavior in some cases +func WatchNewRound( + roundNumber int64, + ocrInstances []contracts.OffchainAggregator, + client blockchain.EVMClient, + logger zerolog.Logger, +) error { + for i := 0; i < len(ocrInstances); i++ { + ocrRound := contracts.NewOffchainAggregatorRoundConfirmer(ocrInstances[i], big.NewInt(roundNumber), client.GetNetworkConfig().Timeout.Duration, logger) + client.AddHeaderEventSubscription(ocrInstances[i].Address(), ocrRound) + err := client.WaitForEvents() + if err != nil { + return fmt.Errorf("failed to wait for event subscriptions of OCR instance %d: %w", i+1, err) + } + } + return nil +} + +// SetAdapterResponse sets a single adapter response that correlates with an ocr contract and a plugin node +func SetAdapterResponse( + response int, + ocrInstance contracts.OffchainAggregator, + pluginNode *client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + nodeContractPairID, err := BuildNodeContractPairID(pluginNode, ocrInstance) + if err != nil { + return err + } + path := fmt.Sprintf("/%s", nodeContractPairID) + err = mockserver.SetValuePath(path, response) + if err != nil { + return fmt.Errorf("setting mockserver value path failed: %w", err) + } + return nil +} + +// SetAllAdapterResponsesToTheSameValue sets the mock responses in mockserver that are read by plugin nodes +// to simulate different adapters. This sets all adapter responses for each node and contract to the same response +func SetAllAdapterResponsesToTheSameValue( + response int, + ocrInstances []contracts.OffchainAggregator, + pluginNodes []*client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + eg := &errgroup.Group{} + for _, o := range ocrInstances { + ocrInstance := o + for _, n := range pluginNodes { + node := n + eg.Go(func() error { + return SetAdapterResponse(response, ocrInstance, node, mockserver) + }) + } + } + return eg.Wait() +} + +// SetAllAdapterResponsesToDifferentValues sets the mock responses in mockserver that are read by plugin nodes +// to simulate different adapters. This sets all adapter responses for each node and contract to different responses +func SetAllAdapterResponsesToDifferentValues( + responses []int, + ocrInstances []contracts.OffchainAggregator, + pluginNodes []*client.PluginK8sClient, + mockserver *ctfClient.MockserverClient, +) error { + if len(responses) != len(ocrInstances)*len(pluginNodes) { + return fmt.Errorf( + "amount of responses %d should be equal to the amount of OCR instances %d times the amount of Plugin nodes %d", + len(responses), len(ocrInstances), len(pluginNodes), + ) + } + eg := &errgroup.Group{} + for _, o := range ocrInstances { + ocrInstance := o + for ni := 1; ni < len(pluginNodes); ni++ { + nodeIndex := ni + eg.Go(func() error { + return SetAdapterResponse(responses[nodeIndex-1], ocrInstance, pluginNodes[nodeIndex], mockserver) + }) + } + } + return eg.Wait() +} + +// BuildNodeContractPairID builds a UUID based on a related pair of a Plugin node and OCR contract +func BuildNodeContractPairID(node *client.PluginK8sClient, ocrInstance contracts.OffchainAggregator) (string, error) { + if node == nil { + return "", fmt.Errorf("plugin node is nil") + } + if ocrInstance == nil { + return "", fmt.Errorf("OCR Instance is nil") + } + nodeAddress, err := node.PrimaryEthAddress() + if err != nil { + return "", fmt.Errorf("getting plugin node's primary ETH address failed: %w", err) + } + shortNodeAddr := nodeAddress[2:12] + shortOCRAddr := ocrInstance.Address()[2:12] + return strings.ToLower(fmt.Sprintf("node_%s_contract_%s", shortNodeAddr, shortOCRAddr)), nil +} diff --git a/integration-tests/actions/ocr_helpers_local.go b/integration-tests/actions/ocr_helpers_local.go new file mode 100644 index 00000000..5f97e6df --- /dev/null +++ b/integration-tests/actions/ocr_helpers_local.go @@ -0,0 +1,437 @@ +package actions + +import ( + "fmt" + "math/big" + "net/http" + "strings" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +/* + These methods should be cleaned merged after we decouple PluginClient and PluginK8sClient + Please, use them while refactoring other tests to local docker env +*/ + +// FundPluginNodesLocal will fund all the provided Plugin nodes with a set amount of native currency +func FundPluginNodesLocal( + nodes []*client.PluginClient, + client blockchain.EVMClient, + amount *big.Float, +) error { + for _, cl := range nodes { + toAddress, err := cl.PrimaryEthAddress() + if err != nil { + return err + } + toAddr := common.HexToAddress(toAddress) + gasEstimates, err := client.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + err = client.Fund(toAddress, amount, gasEstimates) + if err != nil { + return err + } + } + return client.WaitForEvents() +} + +func PluginNodeAddressesLocal(nodes []*client.PluginClient) ([]common.Address, error) { + addresses := make([]common.Address, 0) + for _, node := range nodes { + primaryAddress, err := node.PrimaryEthAddress() + if err != nil { + return nil, err + } + addresses = append(addresses, common.HexToAddress(primaryAddress)) + } + return addresses, nil +} + +func DeployOCRContractsLocal( + numberOfContracts int, + linkTokenContract contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + workerNodes []*client.PluginClient, + client blockchain.EVMClient, +) ([]contracts.OffchainAggregator, error) { + // Deploy contracts + var ocrInstances []contracts.OffchainAggregator + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + ocrInstance, err := contractDeployer.DeployOffChainAggregator( + linkTokenContract.Address(), + contracts.DefaultOffChainAggregatorOptions(), + ) + if err != nil { + return nil, fmt.Errorf("OCR instance deployment have failed: %w", err) + } + ocrInstances = append(ocrInstances, ocrInstance) + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for OCR contract deployments: %w", err) + } + } + err := client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCR contract deployments: %w", err) + } + + // Gather transmitter and address payees + var transmitters, payees []string + for _, node := range workerNodes { + addr, err := node.PrimaryEthAddress() + if err != nil { + return nil, fmt.Errorf("error getting node's primary ETH address: %w", err) + } + transmitters = append(transmitters, addr) + payees = append(payees, client.GetDefaultWallet().Address()) + } + + // Set Payees + for _, ocrInstance := range ocrInstances { + err = ocrInstance.SetPayees(transmitters, payees) + if err != nil { + return nil, fmt.Errorf("error settings OCR payees: %w", err) + } + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for setting OCR payees: %w", err) + } + } + + // Set Config + transmitterAddresses, err := PluginNodeAddressesLocal(workerNodes) + if err != nil { + return nil, fmt.Errorf("getting node common addresses should not fail: %w", err) + } + for _, ocrInstance := range ocrInstances { + // Exclude the first node, which will be used as a bootstrapper + err = ocrInstance.SetConfigLocal( + workerNodes, + contracts.DefaultOffChainAggregatorConfig(len(workerNodes)), + transmitterAddresses, + ) + if err != nil { + return nil, fmt.Errorf("error setting OCR config for contract '%s': %w", ocrInstance.Address(), err) + } + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("failed to wait for setting OCR config: %w", err) + } + } + err = client.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("error waiting for OCR contracts to set config: %w", err) + } + return ocrInstances, nil +} + +func CreateOCRJobsLocal( + ocrInstances []contracts.OffchainAggregator, + bootstrapNode *client.PluginClient, + workerNodes []*client.PluginClient, + mockValue int, + mockAdapter *test_env.Killgrave, + evmChainID *big.Int, +) error { + for _, ocrInstance := range ocrInstances { + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + return fmt.Errorf("reading P2P keys from bootstrap node have failed: %w", err) + } + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + bootstrapSpec := &client.OCRBootstrapJobSpec{ + Name: fmt.Sprintf("bootstrap-%s", uuid.New().String()), + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID.String(), + P2PPeerID: bootstrapP2PId, + IsBootstrapPeer: true, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + return fmt.Errorf("creating bootstrap job have failed: %w", err) + } + + for _, node := range workerNodes { + nodeP2PIds, err := node.MustReadP2PKeys() + if err != nil { + return fmt.Errorf("reading P2P keys from OCR node have failed: %w", err) + } + nodeP2PId := nodeP2PIds.Data[0].Attributes.PeerID + nodeTransmitterAddress, err := node.PrimaryEthAddress() + if err != nil { + return fmt.Errorf("getting primary ETH address from OCR node have failed: %w", err) + } + nodeOCRKeys, err := node.MustReadOCRKeys() + if err != nil { + return fmt.Errorf("getting OCR keys from OCR node have failed: %w", err) + } + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + nodeContractPairID, err := BuildNodeContractPairIDLocal(node, ocrInstance) + if err != nil { + return err + } + bta := &client.BridgeTypeAttributes{ + Name: nodeContractPairID, + URL: fmt.Sprintf("%s/%s", mockAdapter.InternalEndpoint, strings.TrimPrefix(nodeContractPairID, "/")), + } + err = SetAdapterResponseLocal(mockValue, ocrInstance, node, mockAdapter) + if err != nil { + return fmt.Errorf("setting adapter response for OCR node failed: %w", err) + } + err = node.MustCreateBridge(bta) + if err != nil { + return fmt.Errorf("creating bridge on CL node failed: %w", err) + } + + bootstrapPeers := []*client.PluginClient{bootstrapNode} + ocrSpec := &client.OCRTaskJobSpec{ + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID.String(), + P2PPeerID: nodeP2PId, + P2PBootstrapPeers: bootstrapPeers, + KeyBundleID: nodeOCRKeyId, + TransmitterAddress: nodeTransmitterAddress, + ObservationSource: client.ObservationSourceSpecBridge(bta), + } + _, err = node.MustCreateJob(ocrSpec) + if err != nil { + return fmt.Errorf("creating OCR job on OCR node failed: %w", err) + } + } + } + return nil +} + +func BuildNodeContractPairIDLocal(node *client.PluginClient, ocrInstance contracts.OffchainAggregator) (string, error) { + if node == nil { + return "", fmt.Errorf("plugin node is nil") + } + if ocrInstance == nil { + return "", fmt.Errorf("OCR Instance is nil") + } + nodeAddress, err := node.PrimaryEthAddress() + if err != nil { + return "", fmt.Errorf("getting plugin node's primary ETH address failed: %w", err) + } + shortNodeAddr := nodeAddress[2:12] + shortOCRAddr := ocrInstance.Address()[2:12] + return strings.ToLower(fmt.Sprintf("node_%s_contract_%s", shortNodeAddr, shortOCRAddr)), nil +} + +func SetAdapterResponseLocal( + response int, + ocrInstance contracts.OffchainAggregator, + pluginNode *client.PluginClient, + mockAdapter *test_env.Killgrave, +) error { + nodeContractPairID, err := BuildNodeContractPairIDLocal(pluginNode, ocrInstance) + if err != nil { + return err + } + path := fmt.Sprintf("/%s", nodeContractPairID) + err = mockAdapter.SetAdapterBasedIntValuePath(path, []string{http.MethodGet, http.MethodPost}, response) + if err != nil { + return fmt.Errorf("setting mock adapter value path failed: %w", err) + } + return nil +} + +func SetAllAdapterResponsesToTheSameValueLocal( + response int, + ocrInstances []contracts.OffchainAggregator, + pluginNodes []*client.PluginClient, + mockAdapter *test_env.Killgrave, +) error { + eg := &errgroup.Group{} + for _, o := range ocrInstances { + ocrInstance := o + for _, n := range pluginNodes { + node := n + eg.Go(func() error { + return SetAdapterResponseLocal(response, ocrInstance, node, mockAdapter) + }) + } + } + return eg.Wait() +} + +func TrackForwarderLocal( + chainClient blockchain.EVMClient, + authorizedForwarder common.Address, + node *client.PluginClient, + logger zerolog.Logger, +) error { + chainID := chainClient.GetChainID() + _, _, err := node.TrackForwarder(chainID, authorizedForwarder) + if err != nil { + return fmt.Errorf("failed to track forwarder, err: %w", err) + } + logger.Info().Str("NodeURL", node.Config.URL). + Str("ForwarderAddress", authorizedForwarder.Hex()). + Str("ChaindID", chainID.String()). + Msg("Forwarder tracked") + return nil +} + +func DeployOCRContractsForwarderFlowLocal( + numberOfContracts int, + linkTokenContract contracts.LinkToken, + contractDeployer contracts.ContractDeployer, + workerNodes []*client.PluginClient, + forwarderAddresses []common.Address, + client blockchain.EVMClient, +) ([]contracts.OffchainAggregator, error) { + // Deploy contracts + var ocrInstances []contracts.OffchainAggregator + for contractCount := 0; contractCount < numberOfContracts; contractCount++ { + ocrInstance, err := contractDeployer.DeployOffChainAggregator( + linkTokenContract.Address(), + contracts.DefaultOffChainAggregatorOptions(), + ) + if err != nil { + return nil, fmt.Errorf("failed to deploy offchain aggregator, err: %w", err) + } + ocrInstances = append(ocrInstances, ocrInstance) + err = client.WaitForEvents() + if err != nil { + return nil, err + } + } + if err := client.WaitForEvents(); err != nil { + return nil, err + } + + // Gather transmitter and address payees + var transmitters, payees []string + for _, forwarderCommonAddress := range forwarderAddresses { + forwarderAddress := forwarderCommonAddress.Hex() + transmitters = append(transmitters, forwarderAddress) + payees = append(payees, client.GetDefaultWallet().Address()) + } + + // Set Payees + for _, ocrInstance := range ocrInstances { + err := ocrInstance.SetPayees(transmitters, payees) + if err != nil { + return nil, fmt.Errorf("failed to set OCR payees, err: %w", err) + } + if err := client.WaitForEvents(); err != nil { + return nil, err + } + } + if err := client.WaitForEvents(); err != nil { + return nil, err + } + + // Set Config + for _, ocrInstance := range ocrInstances { + // Exclude the first node, which will be used as a bootstrapper + err := ocrInstance.SetConfigLocal( + workerNodes, + contracts.DefaultOffChainAggregatorConfig(len(workerNodes)), + forwarderAddresses, + ) + if err != nil { + return nil, fmt.Errorf("failed to set on-chain config, err: %w", err) + } + if err = client.WaitForEvents(); err != nil { + return nil, err + } + } + return ocrInstances, client.WaitForEvents() +} + +func CreateOCRJobsWithForwarderLocal( + ocrInstances []contracts.OffchainAggregator, + bootstrapNode *client.PluginClient, + workerNodes []*client.PluginClient, + mockValue int, + mockAdapter *test_env.Killgrave, + evmChainID string, +) error { + for _, ocrInstance := range ocrInstances { + bootstrapP2PIds, err := bootstrapNode.MustReadP2PKeys() + if err != nil { + return err + } + bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID + bootstrapSpec := &client.OCRBootstrapJobSpec{ + Name: fmt.Sprintf("bootstrap-%s", uuid.New().String()), + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: bootstrapP2PId, + IsBootstrapPeer: true, + } + _, err = bootstrapNode.MustCreateJob(bootstrapSpec) + if err != nil { + return err + } + + for _, node := range workerNodes { + nodeP2PIds, err := node.MustReadP2PKeys() + if err != nil { + return err + } + nodeP2PId := nodeP2PIds.Data[0].Attributes.PeerID + nodeTransmitterAddress, err := node.PrimaryEthAddress() + if err != nil { + return err + } + nodeOCRKeys, err := node.MustReadOCRKeys() + if err != nil { + return err + } + nodeOCRKeyId := nodeOCRKeys.Data[0].ID + + nodeContractPairID, err := BuildNodeContractPairIDLocal(node, ocrInstance) + if err != nil { + return err + } + bta := &client.BridgeTypeAttributes{ + Name: nodeContractPairID, + URL: fmt.Sprintf("%s/%s", mockAdapter.InternalEndpoint, strings.TrimPrefix(nodeContractPairID, "/")), + } + err = SetAdapterResponseLocal(mockValue, ocrInstance, node, mockAdapter) + if err != nil { + return err + } + err = node.MustCreateBridge(bta) + if err != nil { + return err + } + + bootstrapPeers := []*client.PluginClient{bootstrapNode} + ocrSpec := &client.OCRTaskJobSpec{ + ContractAddress: ocrInstance.Address(), + EVMChainID: evmChainID, + P2PPeerID: nodeP2PId, + P2PBootstrapPeers: bootstrapPeers, + KeyBundleID: nodeOCRKeyId, + TransmitterAddress: nodeTransmitterAddress, + ObservationSource: client.ObservationSourceSpecBridge(bta), + ForwardingAllowed: true, + } + _, err = node.MustCreateJob(ocrSpec) + if err != nil { + return err + } + } + } + return nil +} diff --git a/integration-tests/actions/operator_forwarder_helpers.go b/integration-tests/actions/operator_forwarder_helpers.go new file mode 100644 index 00000000..a6146f3c --- /dev/null +++ b/integration-tests/actions/operator_forwarder_helpers.go @@ -0,0 +1,195 @@ +package actions + +import ( + "math/big" + "testing" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_factory" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +func DeployForwarderContracts( + t *testing.T, + contractDeployer contracts.ContractDeployer, + linkToken contracts.LinkToken, + chainClient blockchain.EVMClient, + numberOfOperatorForwarderPairs int, +) (operators []common.Address, authorizedForwarders []common.Address, operatorFactoryInstance contracts.OperatorFactory) { + operatorFactoryInstance, err := contractDeployer.DeployOperatorFactory(linkToken.Address()) + require.NoError(t, err, "Deploying OperatorFactory Contract shouldn't fail") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Failed waiting for deployment of flux aggregator contract") + + operatorCreated := make(chan *operator_factory.OperatorFactoryOperatorCreated) + authorizedForwarderCreated := make(chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated) + for i := 0; i < numberOfOperatorForwarderPairs; i++ { + SubscribeOperatorFactoryEvents(t, authorizedForwarderCreated, operatorCreated, chainClient, operatorFactoryInstance) + _, err = operatorFactoryInstance.DeployNewOperatorAndForwarder() + require.NoError(t, err, "Deploying new operator with proposed ownership with forwarder shouldn't fail") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Waiting for events in nodes shouldn't fail") + eventDataAuthorizedForwarder, eventDataOperatorCreated := <-authorizedForwarderCreated, <-operatorCreated + operator, authorizedForwarder := eventDataOperatorCreated.Operator, eventDataAuthorizedForwarder.Forwarder + operators = append(operators, operator) + authorizedForwarders = append(authorizedForwarders, authorizedForwarder) + } + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + return operators, authorizedForwarders, operatorFactoryInstance +} + +func AcceptAuthorizedReceiversOperator( + t *testing.T, + operator common.Address, + authorizedForwarder common.Address, + nodeAddresses []common.Address, + chainClient blockchain.EVMClient, + contractLoader contracts.ContractLoader, +) { + operatorInstance, err := contractLoader.LoadOperatorContract(operator) + require.NoError(t, err, "Loading operator contract shouldn't fail") + forwarderInstance, err := contractLoader.LoadAuthorizedForwarder(authorizedForwarder) + require.NoError(t, err, "Loading authorized forwarder contract shouldn't fail") + + err = operatorInstance.AcceptAuthorizedReceivers([]common.Address{authorizedForwarder}, nodeAddresses) + require.NoError(t, err, "Accepting authorized receivers shouldn't fail") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Waiting for events in nodes shouldn't fail") + + senders, err := forwarderInstance.GetAuthorizedSenders(testcontext.Get(t)) + require.NoError(t, err, "Getting authorized senders shouldn't fail") + var nodesAddrs []string + for _, o := range nodeAddresses { + nodesAddrs = append(nodesAddrs, o.Hex()) + } + require.Equal(t, nodesAddrs, senders, "Senders addresses should match node addresses") + + owner, err := forwarderInstance.Owner(testcontext.Get(t)) + require.NoError(t, err, "Getting authorized forwarder owner shouldn't fail") + require.Equal(t, operator.Hex(), owner, "Forwarder owner should match operator") +} + +func ProcessNewEvent( + t *testing.T, + operatorCreated chan *operator_factory.OperatorFactoryOperatorCreated, + authorizedForwarderCreated chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated, + event *types.Log, + eventDetails *abi.Event, + operatorFactoryInstance contracts.OperatorFactory, + chainClient blockchain.EVMClient, +) { + l := logging.GetTestLogger(t) + errorChan := make(chan error) + eventConfirmed := make(chan bool) + err := chainClient.ProcessEvent(eventDetails.Name, event, eventConfirmed, errorChan) + if err != nil { + l.Error().Err(err).Str("Hash", event.TxHash.Hex()).Str("Event", eventDetails.Name).Msg("Error trying to process event") + return + } + l.Debug(). + Str("Event", eventDetails.Name). + Str("Address", event.Address.Hex()). + Str("Hash", event.TxHash.Hex()). + Msg("Attempting to Confirm Event") + for { + select { + case err := <-errorChan: + l.Error().Err(err).Msg("Error while confirming event") + return + case confirmed := <-eventConfirmed: + if confirmed { + if eventDetails.Name == "AuthorizedForwarderCreated" { // AuthorizedForwarderCreated event to authorizedForwarderCreated channel to handle in main loop + eventData, err := operatorFactoryInstance.ParseAuthorizedForwarderCreated(*event) + require.NoError(t, err, "Parsing OperatorFactoryAuthorizedForwarderCreated event log in "+ + "OperatorFactory instance shouldn't fail") + authorizedForwarderCreated <- eventData + } + if eventDetails.Name == "OperatorCreated" { // OperatorCreated event to operatorCreated channel to handle in main loop + eventData, err := operatorFactoryInstance.ParseOperatorCreated(*event) + require.NoError(t, err, "Parsing OperatorFactoryAuthorizedForwarderCreated event log in "+ + "OperatorFactory instance shouldn't fail") + operatorCreated <- eventData + } + } + return + } + } +} + +// SubscribeOperatorFactoryEvents subscribes to the event log for authorizedForwarderCreated and operatorCreated events +// from OperatorFactory contract +func SubscribeOperatorFactoryEvents( + t *testing.T, + authorizedForwarderCreated chan *operator_factory.OperatorFactoryAuthorizedForwarderCreated, + operatorCreated chan *operator_factory.OperatorFactoryOperatorCreated, + chainClient blockchain.EVMClient, + operatorFactoryInstance contracts.OperatorFactory, +) { + l := logging.GetTestLogger(t) + contractABI, err := operator_factory.OperatorFactoryMetaData.GetAbi() + require.NoError(t, err, "Getting contract abi for OperatorFactory shouldn't fail") + latestBlockNum, err := chainClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") + query := geth.FilterQuery{ + FromBlock: big.NewInt(0).SetUint64(latestBlockNum), + Addresses: []common.Address{common.HexToAddress(operatorFactoryInstance.Address())}, + } + + eventLogs := make(chan types.Log) + sub, err := chainClient.SubscribeFilterLogs(testcontext.Get(t), query, eventLogs) + require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") + go func() { + defer sub.Unsubscribe() + remainingExpectedEvents := 2 + for { + select { + case err := <-sub.Err(): + l.Error().Err(err).Msg("Error while watching for new contract events. Retrying Subscription") + sub.Unsubscribe() + + sub, err = chainClient.SubscribeFilterLogs(testcontext.Get(t), query, eventLogs) + require.NoError(t, err, "Subscribing to contract event log for OperatorFactory instance shouldn't fail") + case vLog := <-eventLogs: + eventDetails, err := contractABI.EventByID(vLog.Topics[0]) + require.NoError(t, err, "Getting event details for OperatorFactory instance shouldn't fail") + go ProcessNewEvent( + t, operatorCreated, authorizedForwarderCreated, &vLog, + eventDetails, operatorFactoryInstance, chainClient, + ) + if eventDetails.Name == "AuthorizedForwarderCreated" || eventDetails.Name == "OperatorCreated" { + remainingExpectedEvents-- + if remainingExpectedEvents <= 0 { + return + } + } + } + } + }() +} + +func TrackForwarder( + t *testing.T, + chainClient blockchain.EVMClient, + authorizedForwarder common.Address, + node *client.PluginK8sClient, +) { + l := logging.GetTestLogger(t) + chainID := chainClient.GetChainID() + _, _, err := node.TrackForwarder(chainID, authorizedForwarder) + require.NoError(t, err, "Forwarder track should be created") + l.Info().Str("NodeURL", node.Config.URL). + Str("ForwarderAddress", authorizedForwarder.Hex()). + Str("ChaindID", chainID.String()). + Msg("Forwarder tracked") +} diff --git a/integration-tests/actions/private_network.go b/integration-tests/actions/private_network.go new file mode 100644 index 00000000..d73b4c18 --- /dev/null +++ b/integration-tests/actions/private_network.go @@ -0,0 +1,28 @@ +package actions + +import ( + "github.com/rs/zerolog" + + ctf_test_env "github.com/goplugin/plugin-testing-framework/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func EthereumNetworkConfigFromConfig(l zerolog.Logger, config tc.GlobalTestConfig) (network ctf_test_env.EthereumNetwork, err error) { + if config.GetPrivateEthereumNetworkConfig() == nil { + l.Warn().Msg("No TOML private ethereum network config found, will use old geth") + ethBuilder := ctf_test_env.NewEthereumNetworkBuilder() + network, err = ethBuilder. + WithConsensusType(ctf_test_env.ConsensusType_PoW). + WithExecutionLayer(ctf_test_env.ExecutionLayer_Geth). + Build() + + return + } + + ethBuilder := ctf_test_env.NewEthereumNetworkBuilder() + network, err = ethBuilder. + WithExistingConfig(*config.GetPrivateEthereumNetworkConfig()). + Build() + + return +} diff --git a/integration-tests/actions/vrf/common/actions.go b/integration-tests/actions/vrf/common/actions.go new file mode 100644 index 00000000..1e06a14b --- /dev/null +++ b/integration-tests/actions/vrf/common/actions.go @@ -0,0 +1,137 @@ +package common + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + testconfig "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2" +) + +func CreateFundAndGetSendingKeys( + client blockchain.EVMClient, + node *VRFNode, + pluginNodeFunding float64, + numberOfTxKeysToCreate int, + chainID *big.Int, +) ([]string, []common.Address, error) { + newNativeTokenKeyAddresses, err := CreateAndFundSendingKeys(client, node, pluginNodeFunding, numberOfTxKeysToCreate, chainID) + if err != nil { + return nil, nil, err + } + nativeTokenPrimaryKeyAddress, err := node.CLNode.API.PrimaryEthAddress() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrNodePrimaryKey, err) + } + allNativeTokenKeyAddressStrings := append(newNativeTokenKeyAddresses, nativeTokenPrimaryKeyAddress) + allNativeTokenKeyAddresses := make([]common.Address, len(allNativeTokenKeyAddressStrings)) + for _, addressString := range allNativeTokenKeyAddressStrings { + allNativeTokenKeyAddresses = append(allNativeTokenKeyAddresses, common.HexToAddress(addressString)) + } + return allNativeTokenKeyAddressStrings, allNativeTokenKeyAddresses, nil +} + +func CreateAndFundSendingKeys( + client blockchain.EVMClient, + node *VRFNode, + pluginNodeFunding float64, + numberOfNativeTokenAddressesToCreate int, + chainID *big.Int, +) ([]string, error) { + var newNativeTokenKeyAddresses []string + for i := 0; i < numberOfNativeTokenAddressesToCreate; i++ { + newTxKey, response, err := node.CLNode.API.CreateTxKey("evm", chainID.String()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrNodeNewTxKey, err) + } + if response.StatusCode != 200 { + return nil, fmt.Errorf("error creating transaction key - response code, err %d", response.StatusCode) + } + newNativeTokenKeyAddresses = append(newNativeTokenKeyAddresses, newTxKey.Data.Attributes.Address) + err = actions.FundAddress(client, newTxKey.Data.Attributes.Address, big.NewFloat(pluginNodeFunding)) + if err != nil { + return nil, err + } + } + return newNativeTokenKeyAddresses, nil +} + +func SetupBHSNode( + env *test_env.CLClusterTestEnv, + config *testconfig.General, + numberOfTxKeysToCreate int, + chainID *big.Int, + coordinatorAddress string, + BHSAddress string, + txKeyFunding float64, + l zerolog.Logger, + bhsNode *VRFNode, +) error { + bhsTXKeyAddressStrings, _, err := CreateFundAndGetSendingKeys( + env.EVMClient, + bhsNode, + txKeyFunding, + numberOfTxKeysToCreate, + chainID, + ) + if err != nil { + return err + } + bhsNode.TXKeyAddressStrings = bhsTXKeyAddressStrings + bhsSpec := client.BlockhashStoreJobSpec{ + ForwardingAllowed: false, + CoordinatorV2Address: coordinatorAddress, + CoordinatorV2PlusAddress: coordinatorAddress, + BlockhashStoreAddress: BHSAddress, + FromAddresses: bhsTXKeyAddressStrings, + EVMChainID: chainID.String(), + WaitBlocks: *config.BHSJobWaitBlocks, + LookbackBlocks: *config.BHSJobLookBackBlocks, + PollPeriod: config.BHSJobPollPeriod.Duration, + RunTimeout: config.BHSJobRunTimeout.Duration, + } + l.Info().Msg("Creating BHS Job") + bhsJob, err := CreateBHSJob( + bhsNode.CLNode.API, + bhsSpec, + ) + if err != nil { + return fmt.Errorf("%s, err %w", "", err) + } + bhsNode.Job = bhsJob + return nil +} + +func CreateBHSJob( + pluginNode *client.PluginClient, + bhsJobSpecConfig client.BlockhashStoreJobSpec, +) (*client.Job, error) { + jobUUID := uuid.New() + spec := &client.BlockhashStoreJobSpec{ + Name: fmt.Sprintf("bhs-%s", jobUUID), + ForwardingAllowed: bhsJobSpecConfig.ForwardingAllowed, + CoordinatorV2Address: bhsJobSpecConfig.CoordinatorV2Address, + CoordinatorV2PlusAddress: bhsJobSpecConfig.CoordinatorV2PlusAddress, + BlockhashStoreAddress: bhsJobSpecConfig.BlockhashStoreAddress, + FromAddresses: bhsJobSpecConfig.FromAddresses, + EVMChainID: bhsJobSpecConfig.EVMChainID, + ExternalJobID: jobUUID.String(), + WaitBlocks: bhsJobSpecConfig.WaitBlocks, + LookbackBlocks: bhsJobSpecConfig.LookbackBlocks, + PollPeriod: bhsJobSpecConfig.PollPeriod, + RunTimeout: bhsJobSpecConfig.RunTimeout, + } + + job, err := pluginNode.MustCreateJob(spec) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrCreatingBHSJob, err) + } + return job, nil +} diff --git a/integration-tests/actions/vrf/common/errors.go b/integration-tests/actions/vrf/common/errors.go new file mode 100644 index 00000000..36530428 --- /dev/null +++ b/integration-tests/actions/vrf/common/errors.go @@ -0,0 +1,27 @@ +package common + +const ( + ErrNodePrimaryKey = "error getting node's primary ETH key" + ErrNodeNewTxKey = "error creating node's EVM transaction key" + ErrCreatingProvingKeyHash = "error creating a keyHash from the proving key" + ErrRegisteringProvingKey = "error registering a proving key on Coordinator contract" + ErrRegisterProvingKey = "error registering proving keys" + ErrEncodingProvingKey = "error encoding proving key" + ErrDeployBlockHashStore = "error deploying blockhash store" + ErrDeployCoordinator = "error deploying VRF CoordinatorV2" + ErrABIEncodingFunding = "error Abi encoding subscriptionID" + ErrSendingLinkToken = "error sending Link token" + ErrCreatingBHSJob = "error creating BHS job" + ErrParseJob = "error parsing job definition" + ErrSetVRFCoordinatorConfig = "error setting config for VRF Coordinator contract" + ErrCreateVRFSubscription = "error creating VRF Subscription" + ErrAddConsumerToSub = "error adding consumer to VRF Subscription" + ErrFundSubWithLinkToken = "error funding subscription with Link tokens" + ErrRestartCLNode = "error restarting CL node" + ErrWaitTXsComplete = "error waiting for TXs to complete" + ErrRequestRandomness = "error requesting randomness" + ErrLoadingCoordinator = "error loading coordinator contract" + + ErrWaitRandomWordsRequestedEvent = "error waiting for RandomWordsRequested event" + ErrWaitRandomWordsFulfilledEvent = "error waiting for RandomWordsFulfilled event" +) diff --git a/integration-tests/actions/vrf/common/models.go b/integration-tests/actions/vrf/common/models.go new file mode 100644 index 00000000..4969ea0d --- /dev/null +++ b/integration-tests/actions/vrf/common/models.go @@ -0,0 +1,70 @@ +package common + +import ( + "math/big" + "time" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" +) + +type VRFEncodedProvingKey [2]*big.Int + +// VRFV2PlusKeyData defines a jobs into and proving key info +type VRFKeyData struct { + VRFKey *client.VRFKey + EncodedProvingKey VRFEncodedProvingKey + KeyHash [32]byte +} + +type VRFNodeType int + +const ( + VRF VRFNodeType = iota + 1 + BHS +) + +func (n VRFNodeType) String() string { + return [...]string{"VRF", "BHS"}[n-1] +} + +func (n VRFNodeType) Index() int { + return int(n) +} + +type VRFNode struct { + CLNode *test_env.ClNode + Job *client.Job + TXKeyAddressStrings []string +} + +type VRFContracts struct { + CoordinatorV2 contracts.VRFCoordinatorV2 + CoordinatorV2Plus contracts.VRFCoordinatorV2_5 + VRFOwner contracts.VRFOwner + BHS contracts.BlockHashStore + VRFV2Consumer []contracts.VRFv2LoadTestConsumer + VRFV2PlusConsumer []contracts.VRFv2PlusLoadTestConsumer +} + +type VRFOwnerConfig struct { + OwnerAddress string + UseVRFOwner bool +} + +type VRFJobSpecConfig struct { + ForwardingAllowed bool + CoordinatorAddress string + FromAddresses []string + EVMChainID string + MinIncomingConfirmations int + PublicKey string + BatchFulfillmentEnabled bool + BatchFulfillmentGasMultiplier float64 + EstimateGasMultiplier float64 + PollPeriod time.Duration + RequestTimeout time.Duration + VRFOwnerConfig *VRFOwnerConfig + SimulationBlock *string +} diff --git a/integration-tests/actions/vrf/vrfv1/actions.go b/integration-tests/actions/vrf/vrfv1/actions.go new file mode 100644 index 00000000..59995123 --- /dev/null +++ b/integration-tests/actions/vrf/vrfv1/actions.go @@ -0,0 +1,39 @@ +package vrfv1 + +import ( + "fmt" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +var ( + ErrDeployBHSV1 = "error deploying BlockHashStoreV1 contract" + ErrDeployVRFCootrinatorV1 = "error deploying VRFv1 Coordinator contract" + ErrDeployVRFConsumerV1 = "error deploying VRFv1 Consumer contract" +) + +type Contracts struct { + BHS contracts.BlockHashStore + Coordinator contracts.VRFCoordinator + Consumer contracts.VRFConsumer +} + +func DeployVRFContracts(cd contracts.ContractDeployer, bc blockchain.EVMClient, lt contracts.LinkToken) (*Contracts, error) { + bhs, err := cd.DeployBlockhashStore() + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrDeployBHSV1, err) + } + coordinator, err := cd.DeployVRFCoordinator(lt.Address(), bhs.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrDeployVRFCootrinatorV1, err) + } + consumer, err := cd.DeployVRFConsumer(lt.Address(), coordinator.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrDeployVRFConsumerV1, err) + } + if err := bc.WaitForEvents(); err != nil { + return nil, err + } + return &Contracts{bhs, coordinator, consumer}, nil +} diff --git a/integration-tests/actions/vrf/vrfv2/errors.go b/integration-tests/actions/vrf/vrfv2/errors.go new file mode 100644 index 00000000..d6b24fe9 --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2/errors.go @@ -0,0 +1,10 @@ +package vrfv2 + +const ( + ErrCreatingVRFv2Key = "error creating VRFv2 key" + ErrDeployVRFV2Wrapper = "error deploying VRFV2Wrapper" + ErrCreateVRFV2Jobs = "error creating VRF V2 Jobs" + ErrDeployVRFV2Contracts = "error deploying VRFV2 contracts" + ErrCreatingVRFv2Job = "error creating VRFv2 job" + ErrAdvancedConsumer = "error deploying VRFv2 Advanced Consumer" +) diff --git a/integration-tests/actions/vrf/vrfv2/vrfv2_models.go b/integration-tests/actions/vrf/vrfv2/vrfv2_models.go new file mode 100644 index 00000000..3166fa0c --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2/vrfv2_models.go @@ -0,0 +1,10 @@ +package vrfv2 + +import ( + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +type VRFV2WrapperContracts struct { + VRFV2Wrapper contracts.VRFV2Wrapper + LoadTestConsumers []contracts.VRFv2WrapperLoadTestConsumer +} diff --git a/integration-tests/actions/vrf/vrfv2/vrfv2_steps.go b/integration-tests/actions/vrf/vrfv2/vrfv2_steps.go new file mode 100644 index 00000000..50f085db --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2/vrfv2_steps.go @@ -0,0 +1,1067 @@ +package vrfv2 + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + testconfig "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + + "github.com/google/uuid" + + "github.com/goplugin/plugin-testing-framework/blockchain" + pluginutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +func DeployVRFV2Contracts( + env *test_env.CLClusterTestEnv, + linkTokenContract contracts.LinkToken, + linkEthFeedContract contracts.MockETHPLIFeed, + consumerContractsAmount int, + useVRFOwner bool, + useTestCoordinator bool, +) (*vrfcommon.VRFContracts, error) { + bhs, err := env.ContractDeployer.DeployBlockhashStore() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployBlockHashStore, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + var coordinatorAddress string + if useTestCoordinator { + testCoordinator, err := env.ContractDeployer.DeployVRFCoordinatorTestV2(linkTokenContract.Address(), bhs.Address(), linkEthFeedContract.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployCoordinator, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + coordinatorAddress = testCoordinator.Address() + } else { + coordinator, err := env.ContractDeployer.DeployVRFCoordinatorV2(linkTokenContract.Address(), bhs.Address(), linkEthFeedContract.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployCoordinator, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + coordinatorAddress = coordinator.Address() + } + consumers, err := DeployVRFV2Consumers(env.ContractDeployer, coordinatorAddress, consumerContractsAmount) + if err != nil { + return nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + coordinator, err := env.ContractLoader.LoadVRFCoordinatorV2(coordinatorAddress) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrLoadingCoordinator, err) + } + if useVRFOwner { + vrfOwner, err := env.ContractDeployer.DeployVRFOwner(coordinatorAddress) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployCoordinator, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return &vrfcommon.VRFContracts{ + CoordinatorV2: coordinator, + VRFOwner: vrfOwner, + BHS: bhs, + VRFV2Consumer: consumers, + }, nil + } + return &vrfcommon.VRFContracts{ + CoordinatorV2: coordinator, + VRFOwner: nil, + BHS: bhs, + VRFV2Consumer: consumers, + }, nil +} + +func DeployVRFV2Consumers(contractDeployer contracts.ContractDeployer, coordinatorAddress string, consumerContractsAmount int) ([]contracts.VRFv2LoadTestConsumer, error) { + var consumers []contracts.VRFv2LoadTestConsumer + for i := 1; i <= consumerContractsAmount; i++ { + loadTestConsumer, err := contractDeployer.DeployVRFv2LoadTestConsumer(coordinatorAddress) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) + } + consumers = append(consumers, loadTestConsumer) + } + return consumers, nil +} + +func DeployVRFV2WrapperConsumers(contractDeployer contracts.ContractDeployer, linkTokenAddress string, vrfV2Wrapper contracts.VRFV2Wrapper, consumerContractsAmount int) ([]contracts.VRFv2WrapperLoadTestConsumer, error) { + var consumers []contracts.VRFv2WrapperLoadTestConsumer + for i := 1; i <= consumerContractsAmount; i++ { + loadTestConsumer, err := contractDeployer.DeployVRFV2WrapperLoadTestConsumer(linkTokenAddress, vrfV2Wrapper.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) + } + consumers = append(consumers, loadTestConsumer) + } + return consumers, nil +} + +func DeployVRFV2DirectFundingContracts( + contractDeployer contracts.ContractDeployer, + chainClient blockchain.EVMClient, + linkTokenAddress string, + linkEthFeedAddress string, + coordinator contracts.VRFCoordinatorV2, + consumerContractsAmount int, +) (*VRFV2WrapperContracts, error) { + vrfv2Wrapper, err := contractDeployer.DeployVRFV2Wrapper(linkTokenAddress, linkEthFeedAddress, coordinator.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrDeployVRFV2Wrapper, err) + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + consumers, err := DeployVRFV2WrapperConsumers(contractDeployer, linkTokenAddress, vrfv2Wrapper, consumerContractsAmount) + if err != nil { + return nil, err + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return &VRFV2WrapperContracts{vrfv2Wrapper, consumers}, nil +} + +func CreateVRFV2Job( + pluginNode *client.PluginClient, + vrfJobSpecConfig vrfcommon.VRFJobSpecConfig, +) (*client.Job, error) { + jobUUID := uuid.New() + os := &client.VRFV2TxPipelineSpec{ + Address: vrfJobSpecConfig.CoordinatorAddress, + EstimateGasMultiplier: vrfJobSpecConfig.EstimateGasMultiplier, + FromAddress: vrfJobSpecConfig.FromAddresses[0], + SimulationBlock: vrfJobSpecConfig.SimulationBlock, + } + ost, err := os.String() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrParseJob, err) + } + + spec := &client.VRFV2JobSpec{ + Name: fmt.Sprintf("vrf-v2-%s", jobUUID), + ForwardingAllowed: vrfJobSpecConfig.ForwardingAllowed, + CoordinatorAddress: vrfJobSpecConfig.CoordinatorAddress, + FromAddresses: vrfJobSpecConfig.FromAddresses, + EVMChainID: vrfJobSpecConfig.EVMChainID, + MinIncomingConfirmations: vrfJobSpecConfig.MinIncomingConfirmations, + PublicKey: vrfJobSpecConfig.PublicKey, + ExternalJobID: jobUUID.String(), + ObservationSource: ost, + BatchFulfillmentEnabled: vrfJobSpecConfig.BatchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: vrfJobSpecConfig.BatchFulfillmentGasMultiplier, + PollPeriod: vrfJobSpecConfig.PollPeriod, + RequestTimeout: vrfJobSpecConfig.RequestTimeout, + } + if vrfJobSpecConfig.VRFOwnerConfig.UseVRFOwner { + spec.VRFOwner = vrfJobSpecConfig.VRFOwnerConfig.OwnerAddress + spec.UseVRFOwner = true + } + + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrParseJob, err) + + } + job, err := pluginNode.MustCreateJob(spec) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Job, err) + } + return job, nil +} + +func VRFV2RegisterProvingKey( + vrfKey *client.VRFKey, + oracleAddress string, + coordinator contracts.VRFCoordinatorV2, +) (vrfcommon.VRFEncodedProvingKey, error) { + provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrEncodingProvingKey, err) + } + err = coordinator.RegisterProvingKey( + oracleAddress, + provingKey, + ) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrRegisterProvingKey, err) + } + return provingKey, nil +} + +func FundVRFCoordinatorV2Subscription( + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2, + chainClient blockchain.EVMClient, + subscriptionID uint64, + linkFundingAmountJuels *big.Int, +) error { + encodedSubId, err := pluginutils.ABIEncode(`[{"type":"uint64"}]`, subscriptionID) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrABIEncodingFunding, err) + } + _, err = linkToken.TransferAndCall(coordinator.Address(), linkFundingAmountJuels, encodedSubId) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrSendingLinkToken, err) + } + return chainClient.WaitForEvents() +} + +// SetupVRFV2Environment will create specified number of subscriptions and add the same conumer/s to each of them +func SetupVRFV2Environment( + env *test_env.CLClusterTestEnv, + nodesToCreate []vrfcommon.VRFNodeType, + vrfv2TestConfig types.VRFv2TestConfig, + useVRFOwner bool, + useTestCoordinator bool, + linkToken contracts.LinkToken, + mockNativePLIFeed contracts.MockETHPLIFeed, + registerProvingKeyAgainstAddress string, + numberOfTxKeysToCreate int, + numberOfConsumers int, + numberOfSubToCreate int, + l zerolog.Logger, +) (*vrfcommon.VRFContracts, []uint64, *vrfcommon.VRFKeyData, map[vrfcommon.VRFNodeType]*vrfcommon.VRFNode, error) { + l.Info().Msg("Starting VRFV2 environment setup") + vrfv2Config := vrfv2TestConfig.GetVRFv2Config().General + + vrfContracts, subIDs, err := SetupContracts( + env, + linkToken, + mockNativePLIFeed, + numberOfConsumers, + useVRFOwner, + useTestCoordinator, + vrfv2Config, + numberOfSubToCreate, + l, + ) + if err != nil { + return nil, nil, nil, nil, err + } + + var nodesMap = make(map[vrfcommon.VRFNodeType]*vrfcommon.VRFNode) + for i, nodeType := range nodesToCreate { + nodesMap[nodeType] = &vrfcommon.VRFNode{ + CLNode: env.ClCluster.Nodes[i], + } + } + l.Info().Str("Node URL", nodesMap[vrfcommon.VRF].CLNode.API.URL()).Msg("Creating VRF Key on the Node") + vrfKey, err := nodesMap[vrfcommon.VRF].CLNode.API.MustCreateVRFKey() + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2Key, err) + } + pubKeyCompressed := vrfKey.Data.ID + l.Info(). + Str("Node URL", nodesMap[vrfcommon.VRF].CLNode.API.URL()). + Str("Keyhash", vrfKey.Data.Attributes.Hash). + Str("VRF Compressed Key", vrfKey.Data.Attributes.Compressed). + Str("VRF Uncompressed Key", vrfKey.Data.Attributes.Uncompressed). + Msg("VRF Key created on the Node") + + l.Info().Str("Coordinator", vrfContracts.CoordinatorV2.Address()).Msg("Registering Proving Key") + provingKey, err := VRFV2RegisterProvingKey(vrfKey, registerProvingKeyAgainstAddress, vrfContracts.CoordinatorV2) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRegisteringProvingKey, err) + } + keyHash, err := vrfContracts.CoordinatorV2.HashOfKey(context.Background(), provingKey) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrCreatingProvingKeyHash, err) + } + + chainID := env.EVMClient.GetChainID() + vrfTXKeyAddressStrings, vrfTXKeyAddresses, err := vrfcommon.CreateFundAndGetSendingKeys( + env.EVMClient, + nodesMap[vrfcommon.VRF], + *vrfv2TestConfig.GetCommonConfig().PluginNodeFunding, + numberOfTxKeysToCreate, + chainID, + ) + if err != nil { + return nil, nil, nil, nil, err + } + nodesMap[vrfcommon.VRF].TXKeyAddressStrings = vrfTXKeyAddressStrings + + var vrfOwnerConfig *vrfcommon.VRFOwnerConfig + if useVRFOwner { + err := setupVRFOwnerContract(env, vrfContracts, vrfTXKeyAddressStrings, vrfTXKeyAddresses, l) + if err != nil { + return nil, nil, nil, nil, err + } + vrfOwnerConfig = &vrfcommon.VRFOwnerConfig{ + OwnerAddress: vrfContracts.VRFOwner.Address(), + UseVRFOwner: useVRFOwner, + } + } else { + vrfOwnerConfig = &vrfcommon.VRFOwnerConfig{ + OwnerAddress: "", + UseVRFOwner: useVRFOwner, + } + } + + g := errgroup.Group{} + if vrfNode, exists := nodesMap[vrfcommon.VRF]; exists { + g.Go(func() error { + err := setupVRFNode(vrfContracts, chainID, vrfv2Config, pubKeyCompressed, vrfOwnerConfig, l, vrfNode) + if err != nil { + return err + } + return nil + }) + } + + if bhsNode, exists := nodesMap[vrfcommon.BHS]; exists { + g.Go(func() error { + err := vrfcommon.SetupBHSNode( + env, + vrfv2TestConfig.GetVRFv2Config().General, + numberOfTxKeysToCreate, + chainID, + vrfContracts.CoordinatorV2.Address(), + vrfContracts.BHS.Address(), + *vrfv2TestConfig.GetCommonConfig().PluginNodeFunding, + l, + bhsNode, + ) + if err != nil { + return err + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, nil, nil, nil, fmt.Errorf("VRF node setup ended up with an error: %w", err) + } + + vrfKeyData := vrfcommon.VRFKeyData{ + VRFKey: vrfKey, + EncodedProvingKey: provingKey, + KeyHash: keyHash, + } + + l.Info().Msg("VRFV2 environment setup is finished") + return vrfContracts, subIDs, &vrfKeyData, nodesMap, nil +} + +func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, vrfv2Config *testconfig.General, pubKeyCompressed string, vrfOwnerConfig *vrfcommon.VRFOwnerConfig, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error { + vrfJobSpecConfig := vrfcommon.VRFJobSpecConfig{ + ForwardingAllowed: *vrfv2Config.VRFJobForwardingAllowed, + CoordinatorAddress: contracts.CoordinatorV2.Address(), + FromAddresses: vrfNode.TXKeyAddressStrings, + EVMChainID: chainID.String(), + MinIncomingConfirmations: int(*vrfv2Config.MinimumConfirmations), + PublicKey: pubKeyCompressed, + EstimateGasMultiplier: *vrfv2Config.VRFJobEstimateGasMultiplier, + BatchFulfillmentEnabled: *vrfv2Config.VRFJobBatchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *vrfv2Config.VRFJobBatchFulfillmentGasMultiplier, + PollPeriod: vrfv2Config.VRFJobPollPeriod.Duration, + RequestTimeout: vrfv2Config.VRFJobRequestTimeout.Duration, + SimulationBlock: vrfv2Config.VRFJobSimulationBlock, + VRFOwnerConfig: vrfOwnerConfig, + } + + l.Info().Msg("Creating VRFV2 Job") + vrfV2job, err := CreateVRFV2Job( + vrfNode.CLNode.API, + vrfJobSpecConfig, + ) + if err != nil { + return fmt.Errorf("%s, err %w", ErrCreateVRFV2Jobs, err) + } + vrfNode.Job = vrfV2job + + // this part is here because VRFv2 can work with only a specific key + // [[EVM.KeySpecific]] + // Key = '...' + nodeConfig := node.NewConfig(vrfNode.CLNode.NodeConfig, + node.WithLogPollInterval(1*time.Second), + node.WithVRFv2EVMEstimator(vrfNode.TXKeyAddressStrings, *vrfv2Config.CLNodeMaxGasPriceGWei), + ) + l.Info().Msg("Restarting Node with new sending key PriceMax configuration") + err = vrfNode.CLNode.Restart(nodeConfig) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrRestartCLNode, err) + } + return nil +} + +func SetupContracts( + env *test_env.CLClusterTestEnv, + linkToken contracts.LinkToken, + mockNativePLIFeed contracts.MockETHPLIFeed, + numberOfConsumers int, + useVRFOwner bool, + useTestCoordinator bool, + vrfv2Config *testconfig.General, + numberOfSubToCreate int, + l zerolog.Logger, +) (*vrfcommon.VRFContracts, []uint64, error) { + l.Info().Msg("Deploying VRFV2 contracts") + vrfContracts, err := DeployVRFV2Contracts( + env, + linkToken, + mockNativePLIFeed, + numberOfConsumers, + useVRFOwner, + useTestCoordinator, + ) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrDeployVRFV2Contracts, err) + } + + vrfCoordinatorV2FeeConfig := vrf_coordinator_v2.VRFCoordinatorV2FeeConfig{ + FulfillmentFlatFeeLinkPPMTier1: *vrfv2Config.FulfillmentFlatFeeLinkPPMTier1, + FulfillmentFlatFeeLinkPPMTier2: *vrfv2Config.FulfillmentFlatFeeLinkPPMTier2, + FulfillmentFlatFeeLinkPPMTier3: *vrfv2Config.FulfillmentFlatFeeLinkPPMTier3, + FulfillmentFlatFeeLinkPPMTier4: *vrfv2Config.FulfillmentFlatFeeLinkPPMTier4, + FulfillmentFlatFeeLinkPPMTier5: *vrfv2Config.FulfillmentFlatFeeLinkPPMTier5, + ReqsForTier2: big.NewInt(*vrfv2Config.ReqsForTier2), + ReqsForTier3: big.NewInt(*vrfv2Config.ReqsForTier3), + ReqsForTier4: big.NewInt(*vrfv2Config.ReqsForTier4), + ReqsForTier5: big.NewInt(*vrfv2Config.ReqsForTier5)} + + l.Info().Str("Coordinator", vrfContracts.CoordinatorV2.Address()).Msg("Setting Coordinator Config") + err = vrfContracts.CoordinatorV2.SetConfig( + *vrfv2Config.MinimumConfirmations, + *vrfv2Config.MaxGasLimitCoordinatorConfig, + *vrfv2Config.StalenessSeconds, + *vrfv2Config.GasAfterPaymentCalculation, + big.NewInt(*vrfv2Config.FallbackWeiPerUnitLink), + vrfCoordinatorV2FeeConfig, + ) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrSetVRFCoordinatorConfig, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + l.Info(). + Str("Coordinator", vrfContracts.CoordinatorV2.Address()). + Int("Number of Subs to create", numberOfSubToCreate). + Msg("Creating and funding subscriptions, adding consumers") + subIDs, err := CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*vrfv2Config.SubscriptionFundingAmountLink), + linkToken, + vrfContracts.CoordinatorV2, vrfContracts.VRFV2Consumer, numberOfSubToCreate) + if err != nil { + return nil, nil, err + } + return vrfContracts, subIDs, nil +} + +func setupVRFOwnerContract(env *test_env.CLClusterTestEnv, contracts *vrfcommon.VRFContracts, allNativeTokenKeyAddressStrings []string, allNativeTokenKeyAddresses []common.Address, l zerolog.Logger) error { + l.Info().Msg("Setting up VRFOwner contract") + l.Info(). + Str("Coordinator", contracts.CoordinatorV2.Address()). + Str("VRFOwner", contracts.VRFOwner.Address()). + Msg("Transferring ownership of Coordinator to VRFOwner") + err := contracts.CoordinatorV2.TransferOwnership(common.HexToAddress(contracts.VRFOwner.Address())) + if err != nil { + return nil + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil + } + l.Info(). + Str("VRFOwner", contracts.VRFOwner.Address()). + Msg("Accepting VRF Ownership") + err = contracts.VRFOwner.AcceptVRFOwnership() + if err != nil { + return nil + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil + } + l.Info(). + Strs("Authorized Senders", allNativeTokenKeyAddressStrings). + Str("VRFOwner", contracts.VRFOwner.Address()). + Msg("Setting authorized senders for VRFOwner contract") + err = contracts.VRFOwner.SetAuthorizedSenders(allNativeTokenKeyAddresses) + if err != nil { + return nil + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return err +} + +func SetupVRFV2WrapperEnvironment( + env *test_env.CLClusterTestEnv, + vrfv2TestConfig tc.VRFv2TestConfig, + linkToken contracts.LinkToken, + mockNativePLIFeed contracts.MockETHPLIFeed, + coordinator contracts.VRFCoordinatorV2, + keyHash [32]byte, + wrapperConsumerContractsAmount int, +) (*VRFV2WrapperContracts, *uint64, error) { + // Deploy VRF v2 direct funding contracts + wrapperContracts, err := DeployVRFV2DirectFundingContracts( + env.ContractDeployer, + env.EVMClient, + linkToken.Address(), + mockNativePLIFeed.Address(), + coordinator, + wrapperConsumerContractsAmount, + ) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + vrfv2Config := vrfv2TestConfig.GetVRFv2Config() + + // Configure VRF v2 wrapper contract + err = wrapperContracts.VRFV2Wrapper.SetConfig( + *vrfv2Config.General.WrapperGasOverhead, + *vrfv2Config.General.CoordinatorGasOverhead, + *vrfv2Config.General.WrapperPremiumPercentage, + keyHash, + *vrfv2Config.General.WrapperMaxNumberOfWords, + ) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + // Fetch wrapper subscription ID + wrapperSubID, err := wrapperContracts.VRFV2Wrapper.GetSubID(context.Background()) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + // Fund wrapper subscription + err = FundSubscriptions(env, big.NewFloat(*vrfv2Config.General.SubscriptionFundingAmountLink), linkToken, coordinator, []uint64{wrapperSubID}) + if err != nil { + return nil, nil, err + } + + // Fund consumer with PLI + err = linkToken.Transfer( + wrapperContracts.LoadTestConsumers[0].Address(), + big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2Config.General.WrapperConsumerFundingAmountLink)), + ) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + return wrapperContracts, &wrapperSubID, nil +} + +func CreateFundSubsAndAddConsumers( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountLink *big.Float, + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2, + consumers []contracts.VRFv2LoadTestConsumer, + numberOfSubToCreate int, +) ([]uint64, error) { + subIDs, err := CreateSubsAndFund(env, subscriptionFundingAmountLink, linkToken, coordinator, numberOfSubToCreate) + if err != nil { + return nil, err + } + subToConsumersMap := map[uint64][]contracts.VRFv2LoadTestConsumer{} + + //each subscription will have the same consumers + for _, subID := range subIDs { + subToConsumersMap[subID] = consumers + } + + err = AddConsumersToSubs( + subToConsumersMap, + coordinator, + ) + if err != nil { + return nil, err + } + + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return subIDs, nil +} + +func CreateSubsAndFund( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountLink *big.Float, + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2, + subAmountToCreate int, +) ([]uint64, error) { + subs, err := CreateSubs(env, coordinator, subAmountToCreate) + if err != nil { + return nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + err = FundSubscriptions(env, subscriptionFundingAmountLink, linkToken, coordinator, subs) + if err != nil { + return nil, err + } + return subs, nil +} + +func CreateSubs( + env *test_env.CLClusterTestEnv, + coordinator contracts.VRFCoordinatorV2, + subAmountToCreate int, +) ([]uint64, error) { + var subIDArr []uint64 + + for i := 0; i < subAmountToCreate; i++ { + subID, err := CreateSubAndFindSubID(env, coordinator) + if err != nil { + return nil, err + } + subIDArr = append(subIDArr, subID) + } + return subIDArr, nil +} + +func AddConsumersToSubs( + subToConsumerMap map[uint64][]contracts.VRFv2LoadTestConsumer, + coordinator contracts.VRFCoordinatorV2, +) error { + for subID, consumers := range subToConsumerMap { + for _, consumer := range consumers { + err := coordinator.AddConsumer(subID, consumer.Address()) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrAddConsumerToSub, err) + } + } + } + return nil +} + +func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts.VRFCoordinatorV2) (uint64, error) { + tx, err := coordinator.CreateSubscription() + if err != nil { + return 0, fmt.Errorf("%s, err %w", vrfcommon.ErrCreateVRFSubscription, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return 0, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + receipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + if err != nil { + return 0, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + //SubscriptionsCreated Log should be emitted with the subscription ID + subID := receipt.Logs[0].Topics[1].Big().Uint64() + + return subID, nil +} + +func FundSubscriptions( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountLink *big.Float, + linkAddress contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2, + subIDs []uint64, +) error { + for _, subID := range subIDs { + //Link Billing + amountJuels := conversions.EtherToWei(subscriptionFundingAmountLink) + err := FundVRFCoordinatorV2Subscription(linkAddress, coordinator, env.EVMClient, subID, amountJuels) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrFundSubWithLinkToken, err) + } + } + err := env.EVMClient.WaitForEvents() + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return nil +} + +func DirectFundingRequestRandomnessAndWaitForFulfillment( + l zerolog.Logger, + consumer contracts.VRFv2WrapperLoadTestConsumer, + coordinator contracts.VRFCoordinatorV2, + subID uint64, + vrfv2KeyData *vrfcommon.VRFKeyData, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + randomWordsFulfilledEventTimeout time.Duration, +) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) { + logRandRequest(l, consumer.Address(), coordinator.Address(), subID, minimumConfirmations, callbackGasLimit, numberOfWords, randomnessRequestCountPerRequest, randomnessRequestCountPerRequestDeviation) + _, err := consumer.RequestRandomness( + minimumConfirmations, + callbackGasLimit, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRequestRandomness, err) + } + wrapperAddress, err := consumer.GetWrapper(context.Background()) + if err != nil { + return nil, fmt.Errorf("error getting wrapper address, err: %w", err) + } + fulfillmentEvents, err := WaitForRequestAndFulfillmentEvents( + wrapperAddress.String(), + coordinator, + vrfv2KeyData, + subID, + randomWordsFulfilledEventTimeout, + l, + ) + return fulfillmentEvents, err +} + +func RequestRandomnessAndWaitForFulfillment( + l zerolog.Logger, + consumer contracts.VRFv2LoadTestConsumer, + coordinator contracts.VRFCoordinatorV2, + subID uint64, + vrfKeyData *vrfcommon.VRFKeyData, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + randomWordsFulfilledEventTimeout time.Duration, +) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) { + logRandRequest(l, consumer.Address(), coordinator.Address(), subID, minimumConfirmations, callbackGasLimit, numberOfWords, randomnessRequestCountPerRequest, randomnessRequestCountPerRequestDeviation) + _, err := consumer.RequestRandomness( + vrfKeyData.KeyHash, + subID, + minimumConfirmations, + callbackGasLimit, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRequestRandomness, err) + } + + fulfillmentEvents, err := WaitForRequestAndFulfillmentEvents( + consumer.Address(), + coordinator, + vrfKeyData, + subID, + randomWordsFulfilledEventTimeout, + l, + ) + return fulfillmentEvents, err +} + +func RequestRandomnessWithForceFulfillAndWaitForFulfillment( + l zerolog.Logger, + consumer contracts.VRFv2LoadTestConsumer, + coordinator contracts.VRFCoordinatorV2, + vrfOwner contracts.VRFOwner, + vrfv2KeyData *vrfcommon.VRFKeyData, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + subTopUpAmount *big.Int, + linkAddress common.Address, + randomWordsFulfilledEventTimeout time.Duration, +) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet, *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, *vrf_owner.VRFOwnerRandomWordsForced, error) { + logRandRequest(l, consumer.Address(), coordinator.Address(), 0, minimumConfirmations, callbackGasLimit, numberOfWords, randomnessRequestCountPerRequest, randomnessRequestCountPerRequestDeviation) + _, err := consumer.RequestRandomWordsWithForceFulfill( + vrfv2KeyData.KeyHash, + minimumConfirmations, + callbackGasLimit, + numberOfWords, + randomnessRequestCountPerRequest, + subTopUpAmount, + linkAddress, + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRequestRandomness, err) + } + + randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfv2KeyData.KeyHash}, + nil, + []common.Address{common.HexToAddress(consumer.Address())}, + time.Minute*1, + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsRequestedEvent, err) + } + LogRandomnessRequestedEvent(l, coordinator, randomWordsRequestedEvent) + + errorChannel := make(chan error) + configSetEventChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) + randWordsFulfilledEventChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled) + randWordsForcedEventChannel := make(chan *vrf_owner.VRFOwnerRandomWordsForced) + + go func() { + configSetEvent, err := coordinator.WaitForConfigSetEvent( + randomWordsFulfilledEventTimeout, + ) + if err != nil { + l.Error().Err(err).Msg("error waiting for ConfigSetEvent") + errorChannel <- err + } + configSetEventChannel <- configSetEvent + }() + + go func() { + randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent( + []*big.Int{randomWordsRequestedEvent.RequestId}, + randomWordsFulfilledEventTimeout, + ) + if err != nil { + l.Error().Err(err).Msg("error waiting for RandomWordsFulfilledEvent") + errorChannel <- err + } + randWordsFulfilledEventChannel <- randomWordsFulfilledEvent + }() + + go func() { + randomWordsForcedEvent, err := vrfOwner.WaitForRandomWordsForcedEvent( + []*big.Int{randomWordsRequestedEvent.RequestId}, + nil, + nil, + randomWordsFulfilledEventTimeout, + ) + if err != nil { + l.Error().Err(err).Msg("error waiting for RandomWordsForcedEvent") + errorChannel <- err + } + randWordsForcedEventChannel <- randomWordsForcedEvent + }() + + var configSetEvent *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet + var randomWordsFulfilledEvent *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled + var randomWordsForcedEvent *vrf_owner.VRFOwnerRandomWordsForced + for i := 0; i < 3; i++ { + select { + case err = <-errorChannel: + return nil, nil, nil, err + case configSetEvent = <-configSetEventChannel: + case randomWordsFulfilledEvent = <-randWordsFulfilledEventChannel: + LogRandomWordsFulfilledEvent(l, coordinator, randomWordsFulfilledEvent) + case randomWordsForcedEvent = <-randWordsForcedEventChannel: + LogRandomWordsForcedEvent(l, vrfOwner, randomWordsForcedEvent) + case <-time.After(randomWordsFulfilledEventTimeout): + err = fmt.Errorf("timeout waiting for ConfigSet, RandomWordsFulfilled and RandomWordsForced events") + } + } + return configSetEvent, randomWordsFulfilledEvent, randomWordsForcedEvent, err +} + +func WaitForRequestAndFulfillmentEvents( + consumerAddress string, + coordinator contracts.VRFCoordinatorV2, + vrfv2KeyData *vrfcommon.VRFKeyData, + subID uint64, + randomWordsFulfilledEventTimeout time.Duration, + l zerolog.Logger, +) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) { + randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfv2KeyData.KeyHash}, + []uint64{subID}, + []common.Address{common.HexToAddress(consumerAddress)}, + time.Minute*1, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsRequestedEvent, err) + } + LogRandomnessRequestedEvent(l, coordinator, randomWordsRequestedEvent) + + randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent( + []*big.Int{randomWordsRequestedEvent.RequestId}, + randomWordsFulfilledEventTimeout, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsFulfilledEvent, err) + } + LogRandomWordsFulfilledEvent(l, coordinator, randomWordsFulfilledEvent) + return randomWordsFulfilledEvent, err +} + +func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2LoadTestConsumer, timeout time.Duration, wg *sync.WaitGroup) (*big.Int, *big.Int, error) { + metricsChannel := make(chan *contracts.VRFLoadTestMetrics) + metricsErrorChannel := make(chan error) + + testContext, testCancel := context.WithTimeout(context.Background(), timeout) + defer testCancel() + + ticker := time.NewTicker(time.Second * 1) + var metrics *contracts.VRFLoadTestMetrics + for { + select { + case <-testContext.Done(): + ticker.Stop() + wg.Done() + return metrics.RequestCount, metrics.FulfilmentCount, + fmt.Errorf("timeout waiting for rand request and fulfilments to be equal AFTER performance test was executed. Request Count: %d, Fulfilment Count: %d", + metrics.RequestCount.Uint64(), metrics.FulfilmentCount.Uint64()) + case <-ticker.C: + go retrieveLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel) + case metrics = <-metricsChannel: + if metrics.RequestCount.Cmp(metrics.FulfilmentCount) == 0 { + ticker.Stop() + wg.Done() + return metrics.RequestCount, metrics.FulfilmentCount, nil + } + case err := <-metricsErrorChannel: + ticker.Stop() + wg.Done() + return nil, nil, err + } + } +} + +func retrieveLoadTestMetrics( + consumer contracts.VRFv2LoadTestConsumer, + metricsChannel chan *contracts.VRFLoadTestMetrics, + metricsErrorChannel chan error, +) { + metrics, err := consumer.GetLoadTestMetrics(context.Background()) + if err != nil { + metricsErrorChannel <- err + } + metricsChannel <- metrics +} + +func LogSubDetails(l zerolog.Logger, subscription vrf_coordinator_v2.GetSubscription, subID uint64, coordinator contracts.VRFCoordinatorV2) { + l.Debug(). + Str("Coordinator", coordinator.Address()). + Str("Link Balance", (*commonassets.Link)(subscription.Balance).Link()). + Uint64("Subscription ID", subID). + Str("Subscription Owner", subscription.Owner.String()). + Interface("Subscription Consumers", subscription.Consumers). + Msg("Subscription Data") +} + +func LogRandomnessRequestedEvent( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2, + randomWordsRequestedEvent *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, +) { + l.Info(). + Str("Coordinator", coordinator.Address()). + Str("Request ID", randomWordsRequestedEvent.RequestId.String()). + Uint64("Subscription ID", randomWordsRequestedEvent.SubId). + Str("Sender Address", randomWordsRequestedEvent.Sender.String()). + Interface("Keyhash", randomWordsRequestedEvent.KeyHash). + Uint32("Callback Gas Limit", randomWordsRequestedEvent.CallbackGasLimit). + Uint32("Number of Words", randomWordsRequestedEvent.NumWords). + Uint16("Minimum Request Confirmations", randomWordsRequestedEvent.MinimumRequestConfirmations). + Msg("RandomnessRequested Event") +} + +func LogRandomWordsFulfilledEvent( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2, + randomWordsFulfilledEvent *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, +) { + l.Info(). + Str("Coordinator", coordinator.Address()). + Str("Total Payment", randomWordsFulfilledEvent.Payment.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Str("Request ID", randomWordsFulfilledEvent.RequestId.String()). + Bool("Success", randomWordsFulfilledEvent.Success). + Msg("RandomWordsFulfilled Event (TX metadata)") +} + +func LogRandomWordsForcedEvent( + l zerolog.Logger, + vrfOwner contracts.VRFOwner, + randomWordsForcedEvent *vrf_owner.VRFOwnerRandomWordsForced, +) { + l.Debug(). + Str("VRFOwner", vrfOwner.Address()). + Uint64("Sub ID", randomWordsForcedEvent.SubId). + Str("TX Hash", randomWordsForcedEvent.Raw.TxHash.String()). + Str("Request ID", randomWordsForcedEvent.RequestId.String()). + Str("Sender", randomWordsForcedEvent.Sender.String()). + Msg("RandomWordsForced Event (TX metadata)") +} + +func logRandRequest( + l zerolog.Logger, + consumer string, + coordinator string, + subID uint64, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, +) { + l.Info(). + Str("Consumer", consumer). + Str("Coordinator", coordinator). + Uint64("SubID", subID). + Uint16("MinimumConfirmations", minimumConfirmations). + Uint32("CallbackGasLimit", callbackGasLimit). + Uint32("NumberOfWords", numberOfWords). + Uint16("RandomnessRequestCountPerRequest", randomnessRequestCountPerRequest). + Uint16("RandomnessRequestCountPerRequestDeviation", randomnessRequestCountPerRequestDeviation). + Msg("Requesting randomness") +} diff --git a/integration-tests/actions/vrf/vrfv2plus/errors.go b/integration-tests/actions/vrf/vrfv2plus/errors.go new file mode 100644 index 00000000..894f7514 --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2plus/errors.go @@ -0,0 +1,17 @@ +package vrfv2plus + +const ( + ErrCreatingVRFv2PlusKey = "error creating VRFv2Plus key" + ErrAdvancedConsumer = "error deploying VRFv2Plus Advanced Consumer" + ErrCreatingVRFv2PlusJob = "error creating VRFv2Plus job" + ErrDeployVRFV2_5Contracts = "error deploying VRFV2_5 contracts" + ErrAddConsumerToSub = "error adding consumer to VRF Subscription" + ErrFundSubWithNativeToken = "error funding subscription with native token" + ErrSetLinkNativeLinkFeed = "error setting Link and ETH/PLI feed for VRF Coordinator contract" + ErrCreateVRFV2PlusJobs = "error creating VRF V2 Plus Jobs" + ErrRequestRandomnessDirectFundingLinkPayment = "error requesting randomness with direct funding and link payment" + ErrRequestRandomnessDirectFundingNativePayment = "error requesting randomness with direct funding and native payment" + ErrLinkTotalBalance = "error waiting for RandomWordsFulfilled event" + ErrNativeTokenBalance = "error waiting for RandomWordsFulfilled event" + ErrDeployWrapper = "error deploying VRFV2PlusWrapper" +) diff --git a/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_models.go b/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_models.go new file mode 100644 index 00000000..e2a00603 --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_models.go @@ -0,0 +1,10 @@ +package vrfv2plus + +import ( + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +type VRFV2PlusWrapperContracts struct { + VRFV2PlusWrapper contracts.VRFV2PlusWrapper + LoadTestConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer +} diff --git a/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_steps.go b/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_steps.go new file mode 100644 index 00000000..4e0b4883 --- /dev/null +++ b/integration-tests/actions/vrf/vrfv2plus/vrfv2plus_steps.go @@ -0,0 +1,1156 @@ +package vrfv2plus + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + testconfig "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + vrfv2plus_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2plus" + "github.com/goplugin/pluginv3.0/integration-tests/types" + pluginutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" +) + +func DeployVRFV2_5Contracts( + contractDeployer contracts.ContractDeployer, + chainClient blockchain.EVMClient, + consumerContractsAmount int, +) (*vrfcommon.VRFContracts, error) { + bhs, err := contractDeployer.DeployBlockhashStore() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployBlockHashStore, err) + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + coordinator, err := contractDeployer.DeployVRFCoordinatorV2_5(bhs.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrDeployCoordinator, err) + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + consumers, err := DeployVRFV2PlusConsumers(contractDeployer, coordinator, consumerContractsAmount) + if err != nil { + return nil, err + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return &vrfcommon.VRFContracts{ + CoordinatorV2Plus: coordinator, + BHS: bhs, + VRFV2PlusConsumer: consumers, + }, nil +} + +func DeployVRFV2PlusConsumers(contractDeployer contracts.ContractDeployer, coordinator contracts.VRFCoordinatorV2_5, consumerContractsAmount int) ([]contracts.VRFv2PlusLoadTestConsumer, error) { + var consumers []contracts.VRFv2PlusLoadTestConsumer + for i := 1; i <= consumerContractsAmount; i++ { + loadTestConsumer, err := contractDeployer.DeployVRFv2PlusLoadTestConsumer(coordinator.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) + } + consumers = append(consumers, loadTestConsumer) + } + return consumers, nil +} + +func CreateVRFV2PlusJob( + pluginNode *client.PluginClient, + vrfJobSpecConfig vrfcommon.VRFJobSpecConfig, +) (*client.Job, error) { + jobUUID := uuid.New() + os := &client.VRFV2PlusTxPipelineSpec{ + Address: vrfJobSpecConfig.CoordinatorAddress, + EstimateGasMultiplier: vrfJobSpecConfig.EstimateGasMultiplier, + FromAddress: vrfJobSpecConfig.FromAddresses[0], + SimulationBlock: vrfJobSpecConfig.SimulationBlock, + } + ost, err := os.String() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrParseJob, err) + } + + job, err := pluginNode.MustCreateJob(&client.VRFV2PlusJobSpec{ + Name: fmt.Sprintf("vrf-v2-plus-%s", jobUUID), + CoordinatorAddress: vrfJobSpecConfig.CoordinatorAddress, + FromAddresses: vrfJobSpecConfig.FromAddresses, + EVMChainID: vrfJobSpecConfig.EVMChainID, + MinIncomingConfirmations: vrfJobSpecConfig.MinIncomingConfirmations, + PublicKey: vrfJobSpecConfig.PublicKey, + ExternalJobID: jobUUID.String(), + ObservationSource: ost, + BatchFulfillmentEnabled: vrfJobSpecConfig.BatchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: vrfJobSpecConfig.BatchFulfillmentGasMultiplier, + PollPeriod: vrfJobSpecConfig.PollPeriod, + RequestTimeout: vrfJobSpecConfig.RequestTimeout, + }) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusJob, err) + } + + return job, nil +} + +func VRFV2_5RegisterProvingKey( + vrfKey *client.VRFKey, + coordinator contracts.VRFCoordinatorV2_5, + gasLaneMaxGas uint64, +) (vrfcommon.VRFEncodedProvingKey, error) { + provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrEncodingProvingKey, err) + } + err = coordinator.RegisterProvingKey( + provingKey, + gasLaneMaxGas, + ) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrRegisterProvingKey, err) + } + return provingKey, nil +} + +func VRFV2PlusUpgradedVersionRegisterProvingKey( + vrfKey *client.VRFKey, + coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion, +) (vrfcommon.VRFEncodedProvingKey, error) { + provingKey, err := actions.EncodeOnChainVRFProvingKey(*vrfKey) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrEncodingProvingKey, err) + } + err = coordinator.RegisterProvingKey( + provingKey, + ) + if err != nil { + return vrfcommon.VRFEncodedProvingKey{}, fmt.Errorf("%s, err %w", vrfcommon.ErrRegisterProvingKey, err) + } + return provingKey, nil +} + +func FundVRFCoordinatorV2_5Subscription( + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + chainClient blockchain.EVMClient, + subscriptionID *big.Int, + linkFundingAmountJuels *big.Int, +) error { + encodedSubId, err := pluginutils.ABIEncode(`[{"type":"uint256"}]`, subscriptionID) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrABIEncodingFunding, err) + } + _, err = linkToken.TransferAndCall(coordinator.Address(), linkFundingAmountJuels, encodedSubId) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrSendingLinkToken, err) + } + return chainClient.WaitForEvents() +} + +// SetupVRFV2_5Environment will create specified number of subscriptions and add the same conumer/s to each of them +func SetupVRFV2_5Environment( + env *test_env.CLClusterTestEnv, + nodesToCreate []vrfcommon.VRFNodeType, + vrfv2PlusTestConfig types.VRFv2PlusTestConfig, + linkToken contracts.LinkToken, + mockNativePLIFeed contracts.MockETHPLIFeed, + numberOfTxKeysToCreate int, + numberOfConsumers int, + numberOfSubToCreate int, + l zerolog.Logger, +) (*vrfcommon.VRFContracts, []*big.Int, *vrfcommon.VRFKeyData, map[vrfcommon.VRFNodeType]*vrfcommon.VRFNode, error) { + l.Info().Msg("Starting VRFV2 Plus environment setup") + l.Info().Msg("Deploying VRFV2 Plus contracts") + vrfContracts, err := DeployVRFV2_5Contracts(env.ContractDeployer, env.EVMClient, numberOfConsumers) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", ErrDeployVRFV2_5Contracts, err) + } + + l.Info().Str("Coordinator", vrfContracts.CoordinatorV2Plus.Address()).Msg("Setting Coordinator Config") + vrfv2PlusConfig := vrfv2PlusTestConfig.GetVRFv2PlusConfig().General + err = vrfContracts.CoordinatorV2Plus.SetConfig( + *vrfv2PlusConfig.MinimumConfirmations, + *vrfv2PlusConfig.MaxGasLimitCoordinatorConfig, + *vrfv2PlusConfig.StalenessSeconds, + *vrfv2PlusConfig.GasAfterPaymentCalculation, + big.NewInt(*vrfv2PlusConfig.FallbackWeiPerUnitLink), + *vrfv2PlusConfig.FulfillmentFlatFeeNativePPM, + *vrfv2PlusConfig.FulfillmentFlatFeeLinkDiscountPPM, + *vrfv2PlusConfig.NativePremiumPercentage, + *vrfv2PlusConfig.LinkPremiumPercentage, + ) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrSetVRFCoordinatorConfig, err) + } + + l.Info().Str("Coordinator", vrfContracts.CoordinatorV2Plus.Address()).Msg("Setting Link and ETH/PLI feed") + err = vrfContracts.CoordinatorV2Plus.SetPLIAndPLINativeFeed(linkToken.Address(), mockNativePLIFeed.Address()) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", ErrSetLinkNativeLinkFeed, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + l.Info(). + Str("Coordinator", vrfContracts.CoordinatorV2Plus.Address()). + Int("Number of Subs to create", numberOfSubToCreate). + Msg("Creating and funding subscriptions, adding consumers") + subIDs, err := CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*vrfv2PlusConfig.SubscriptionFundingAmountNative), + big.NewFloat(*vrfv2PlusConfig.SubscriptionFundingAmountLink), + linkToken, + vrfContracts.CoordinatorV2Plus, vrfContracts.VRFV2PlusConsumer, + numberOfSubToCreate, + vrfv2plus_config.BillingType(*vrfv2PlusConfig.SubscriptionBillingType)) + if err != nil { + return nil, nil, nil, nil, err + } + + var nodesMap = make(map[vrfcommon.VRFNodeType]*vrfcommon.VRFNode) + for i, nodeType := range nodesToCreate { + nodesMap[nodeType] = &vrfcommon.VRFNode{ + CLNode: env.ClCluster.Nodes[i], + } + } + l.Info().Str("Node URL", nodesMap[vrfcommon.VRF].CLNode.API.URL()).Msg("Creating VRF Key on the Node") + vrfKey, err := nodesMap[vrfcommon.VRF].CLNode.API.MustCreateVRFKey() + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", ErrCreatingVRFv2PlusKey, err) + } + pubKeyCompressed := vrfKey.Data.ID + l.Info(). + Str("Node URL", nodesMap[vrfcommon.VRF].CLNode.API.URL()). + Str("Keyhash", vrfKey.Data.Attributes.Hash). + Str("VRF Compressed Key", vrfKey.Data.Attributes.Compressed). + Str("VRF Uncompressed Key", vrfKey.Data.Attributes.Uncompressed). + Msg("VRF Key created on the Node") + + l.Info().Str("Coordinator", vrfContracts.CoordinatorV2Plus.Address()).Msg("Registering Proving Key") + provingKey, err := VRFV2_5RegisterProvingKey(vrfKey, vrfContracts.CoordinatorV2Plus, uint64(*vrfv2PlusConfig.CLNodeMaxGasPriceGWei)*1e9) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRegisteringProvingKey, err) + } + keyHash, err := vrfContracts.CoordinatorV2Plus.HashOfKey(context.Background(), provingKey) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrCreatingProvingKeyHash, err) + } + + chainID := env.EVMClient.GetChainID() + vrfTXKeyAddressStrings, _, err := vrfcommon.CreateFundAndGetSendingKeys( + env.EVMClient, + nodesMap[vrfcommon.VRF], + *vrfv2PlusTestConfig.GetCommonConfig().PluginNodeFunding, + numberOfTxKeysToCreate, + chainID, + ) + if err != nil { + return nil, nil, nil, nil, err + } + nodesMap[vrfcommon.VRF].TXKeyAddressStrings = vrfTXKeyAddressStrings + + g := errgroup.Group{} + if vrfNode, exists := nodesMap[vrfcommon.VRF]; exists { + g.Go(func() error { + err := setupVRFNode(vrfContracts, chainID, vrfv2PlusConfig.General, pubKeyCompressed, l, vrfNode) + if err != nil { + return err + } + return nil + }) + } + + if bhsNode, exists := nodesMap[vrfcommon.BHS]; exists { + g.Go(func() error { + err := vrfcommon.SetupBHSNode( + env, + vrfv2PlusConfig.General, + numberOfTxKeysToCreate, + chainID, + vrfContracts.CoordinatorV2Plus.Address(), + vrfContracts.BHS.Address(), + *vrfv2PlusTestConfig.GetCommonConfig().PluginNodeFunding, + l, + bhsNode, + ) + if err != nil { + return err + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, nil, nil, nil, fmt.Errorf("VRF node setup ended up with an error: %w", err) + } + + vrfKeyData := vrfcommon.VRFKeyData{ + VRFKey: vrfKey, + EncodedProvingKey: provingKey, + KeyHash: keyHash, + } + + l.Info().Msg("VRFV2 Plus environment setup is finished") + return vrfContracts, subIDs, &vrfKeyData, nodesMap, nil +} + +func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, vrfv2Config *testconfig.General, pubKeyCompressed string, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error { + vrfJobSpecConfig := vrfcommon.VRFJobSpecConfig{ + ForwardingAllowed: *vrfv2Config.VRFJobForwardingAllowed, + CoordinatorAddress: contracts.CoordinatorV2Plus.Address(), + FromAddresses: vrfNode.TXKeyAddressStrings, + EVMChainID: chainID.String(), + MinIncomingConfirmations: int(*vrfv2Config.MinimumConfirmations), + PublicKey: pubKeyCompressed, + EstimateGasMultiplier: *vrfv2Config.VRFJobEstimateGasMultiplier, + BatchFulfillmentEnabled: *vrfv2Config.VRFJobBatchFulfillmentEnabled, + BatchFulfillmentGasMultiplier: *vrfv2Config.VRFJobBatchFulfillmentGasMultiplier, + PollPeriod: vrfv2Config.VRFJobPollPeriod.Duration, + RequestTimeout: vrfv2Config.VRFJobRequestTimeout.Duration, + SimulationBlock: vrfv2Config.VRFJobSimulationBlock, + VRFOwnerConfig: nil, + } + + l.Info().Msg("Creating VRFV2 Plus Job") + job, err := CreateVRFV2PlusJob( + vrfNode.CLNode.API, + vrfJobSpecConfig, + ) + if err != nil { + return fmt.Errorf("%s, err %w", ErrCreateVRFV2PlusJobs, err) + } + vrfNode.Job = job + + // this part is here because VRFv2 can work with only a specific key + // [[EVM.KeySpecific]] + // Key = '...' + nodeConfig := node.NewConfig(vrfNode.CLNode.NodeConfig, + node.WithLogPollInterval(1*time.Second), + node.WithVRFv2EVMEstimator(vrfNode.TXKeyAddressStrings, *vrfv2Config.CLNodeMaxGasPriceGWei), + ) + l.Info().Msg("Restarting Node with new sending key PriceMax configuration") + err = vrfNode.CLNode.Restart(nodeConfig) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrRestartCLNode, err) + } + return nil +} + +func CreateFundSubsAndAddConsumers( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountNative *big.Float, + subscriptionFundingAmountLink *big.Float, + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + consumers []contracts.VRFv2PlusLoadTestConsumer, + numberOfSubToCreate int, + subscriptionBillingType vrfv2plus_config.BillingType, +) ([]*big.Int, error) { + subIDs, err := CreateSubsAndFund( + env, + subscriptionFundingAmountNative, + subscriptionFundingAmountLink, + linkToken, + coordinator, + numberOfSubToCreate, + subscriptionBillingType, + ) + if err != nil { + return nil, err + } + subToConsumersMap := map[*big.Int][]contracts.VRFv2PlusLoadTestConsumer{} + + //each subscription will have the same consumers + for _, subID := range subIDs { + subToConsumersMap[subID] = consumers + } + + err = AddConsumersToSubs( + subToConsumersMap, + coordinator, + ) + if err != nil { + return nil, err + } + + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return subIDs, nil +} + +func CreateSubsAndFund( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountNative *big.Float, + subscriptionFundingAmountLink *big.Float, + linkToken contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + subAmountToCreate int, + subscriptionBillingType vrfv2plus_config.BillingType, +) ([]*big.Int, error) { + subs, err := CreateSubs(env, coordinator, subAmountToCreate) + if err != nil { + return nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + err = FundSubscriptions( + env, + subscriptionFundingAmountNative, + subscriptionFundingAmountLink, + linkToken, + coordinator, + subs, + subscriptionBillingType, + ) + if err != nil { + return nil, err + } + return subs, nil +} + +func CreateSubs( + env *test_env.CLClusterTestEnv, + coordinator contracts.VRFCoordinatorV2_5, + subAmountToCreate int, +) ([]*big.Int, error) { + var subIDArr []*big.Int + + for i := 0; i < subAmountToCreate; i++ { + subID, err := CreateSubAndFindSubID(env, coordinator) + if err != nil { + return nil, err + } + subIDArr = append(subIDArr, subID) + } + return subIDArr, nil +} + +func AddConsumersToSubs( + subToConsumerMap map[*big.Int][]contracts.VRFv2PlusLoadTestConsumer, + coordinator contracts.VRFCoordinatorV2_5, +) error { + for subID, consumers := range subToConsumerMap { + for _, consumer := range consumers { + err := coordinator.AddConsumer(subID, consumer.Address()) + if err != nil { + return fmt.Errorf("%s, err %w", ErrAddConsumerToSub, err) + } + } + } + return nil +} + +func CreateSubAndFindSubID(env *test_env.CLClusterTestEnv, coordinator contracts.VRFCoordinatorV2_5) (*big.Int, error) { + tx, err := coordinator.CreateSubscription() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrCreateVRFSubscription, err) + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + receipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + //SubscriptionsCreated Log should be emitted with the subscription ID + subID := receipt.Logs[0].Topics[1].Big() + + return subID, nil +} + +func FundSubscriptions( + env *test_env.CLClusterTestEnv, + subscriptionFundingAmountNative *big.Float, + subscriptionFundingAmountLink *big.Float, + linkAddress contracts.LinkToken, + coordinator contracts.VRFCoordinatorV2_5, + subIDs []*big.Int, + subscriptionBillingType vrfv2plus_config.BillingType, +) error { + for _, subID := range subIDs { + switch subscriptionBillingType { + case vrfv2plus_config.BillingType_Native: + //Native Billing + amountWei := conversions.EtherToWei(subscriptionFundingAmountNative) + err := coordinator.FundSubscriptionWithNative( + subID, + amountWei, + ) + if err != nil { + return fmt.Errorf("%s, err %w", ErrFundSubWithNativeToken, err) + } + case vrfv2plus_config.BillingType_Link: + //Link Billing + amountJuels := conversions.EtherToWei(subscriptionFundingAmountLink) + err := FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, amountJuels) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrFundSubWithLinkToken, err) + } + case vrfv2plus_config.BillingType_Link_and_Native: + //Native Billing + amountWei := conversions.EtherToWei(subscriptionFundingAmountNative) + err := coordinator.FundSubscriptionWithNative( + subID, + amountWei, + ) + if err != nil { + return fmt.Errorf("%s, err %w", ErrFundSubWithNativeToken, err) + } + //Link Billing + amountJuels := conversions.EtherToWei(subscriptionFundingAmountLink) + err = FundVRFCoordinatorV2_5Subscription(linkAddress, coordinator, env.EVMClient, subID, amountJuels) + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrFundSubWithLinkToken, err) + } + default: + return fmt.Errorf("invalid billing type: %s", subscriptionBillingType) + } + } + err := env.EVMClient.WaitForEvents() + if err != nil { + return fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return nil +} + +func GetUpgradedCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) { + linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err) + } + nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err) + } + return +} + +func GetCoordinatorTotalBalance(coordinator contracts.VRFCoordinatorV2_5) (linkTotalBalance *big.Int, nativeTokenTotalBalance *big.Int, err error) { + linkTotalBalance, err = coordinator.GetLinkTotalBalance(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrLinkTotalBalance, err) + } + nativeTokenTotalBalance, err = coordinator.GetNativeTokenTotalBalance(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", ErrNativeTokenBalance, err) + } + return +} + +func RequestRandomnessAndWaitForFulfillment( + consumer contracts.VRFv2PlusLoadTestConsumer, + coordinator contracts.VRFCoordinatorV2_5, + vrfKeyData *vrfcommon.VRFKeyData, + subID *big.Int, + isNativeBilling bool, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + randomWordsFulfilledEventTimeout time.Duration, + l zerolog.Logger, +) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { + logRandRequest( + l, + consumer.Address(), + coordinator.Address(), + subID, + isNativeBilling, + minimumConfirmations, + callbackGasLimit, + numberOfWords, + vrfKeyData.KeyHash, + randomnessRequestCountPerRequest, + randomnessRequestCountPerRequestDeviation, + ) + _, err := consumer.RequestRandomness( + vrfKeyData.KeyHash, + subID, + minimumConfirmations, + callbackGasLimit, + isNativeBilling, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRequestRandomness, err) + } + + return WaitForRequestAndFulfillmentEvents( + consumer.Address(), + coordinator, + vrfKeyData, + subID, + isNativeBilling, + randomWordsFulfilledEventTimeout, + l, + ) +} + +func RequestRandomnessAndWaitForFulfillmentUpgraded( + consumer contracts.VRFv2PlusLoadTestConsumer, + coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion, + vrfKeyData *vrfcommon.VRFKeyData, + subID *big.Int, + isNativeBilling bool, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + l zerolog.Logger, +) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) { + logRandRequest( + l, + consumer.Address(), + coordinator.Address(), + subID, + isNativeBilling, + minimumConfirmations, + callbackGasLimit, + numberOfWords, + vrfKeyData.KeyHash, + randomnessRequestCountPerRequest, + randomnessRequestCountPerRequestDeviation, + ) + _, err := consumer.RequestRandomness( + vrfKeyData.KeyHash, + subID, + minimumConfirmations, + callbackGasLimit, + isNativeBilling, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrRequestRandomness, err) + } + + randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfKeyData.KeyHash}, + []*big.Int{subID}, + []common.Address{common.HexToAddress(consumer.Address())}, + time.Minute*1, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsRequestedEvent, err) + } + + LogRandomnessRequestedEventUpgraded(l, coordinator, randomWordsRequestedEvent) + + randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent( + []*big.Int{subID}, + []*big.Int{randomWordsRequestedEvent.RequestId}, + time.Minute*2, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsFulfilledEvent, err) + } + LogRandomWordsFulfilledEventUpgraded(l, coordinator, randomWordsFulfilledEvent) + + return randomWordsFulfilledEvent, err +} + +func SetupVRFV2PlusWrapperEnvironment( + env *test_env.CLClusterTestEnv, + vrfv2PlusTestConfig types.VRFv2PlusTestConfig, + linkToken contracts.LinkToken, + mockNativePLIFeed contracts.MockETHPLIFeed, + coordinator contracts.VRFCoordinatorV2_5, + keyHash [32]byte, + wrapperConsumerContractsAmount int, +) (*VRFV2PlusWrapperContracts, *big.Int, error) { + vrfv2PlusConfig := vrfv2PlusTestConfig.GetVRFv2PlusConfig().General + wrapperContracts, err := DeployVRFV2PlusDirectFundingContracts( + env.ContractDeployer, + env.EVMClient, + linkToken.Address(), + mockNativePLIFeed.Address(), + coordinator, + wrapperConsumerContractsAmount, + ) + if err != nil { + return nil, nil, err + } + + err = env.EVMClient.WaitForEvents() + + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + err = wrapperContracts.VRFV2PlusWrapper.SetConfig( + *vrfv2PlusConfig.WrapperGasOverhead, + *vrfv2PlusConfig.CoordinatorGasOverhead, + *vrfv2PlusConfig.WrapperPremiumPercentage, + keyHash, + *vrfv2PlusConfig.WrapperMaxNumberOfWords, + *vrfv2PlusConfig.StalenessSeconds, + big.NewInt(*vrfv2PlusConfig.FallbackWeiPerUnitLink), + *vrfv2PlusConfig.FulfillmentFlatFeeLinkPPM, + *vrfv2PlusConfig.FulfillmentFlatFeeNativePPM, + ) + if err != nil { + return nil, nil, err + } + + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + //fund sub + wrapperSubID, err := wrapperContracts.VRFV2PlusWrapper.GetSubID(context.Background()) + if err != nil { + return nil, nil, err + } + + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + err = FundSubscriptions(env, big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), linkToken, coordinator, []*big.Int{wrapperSubID}, vrfv2plus_config.BillingType(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionBillingType)) + if err != nil { + return nil, nil, err + } + + //fund consumer with Link + err = linkToken.Transfer( + wrapperContracts.LoadTestConsumers[0].Address(), + big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2PlusConfig.WrapperConsumerFundingAmountLink)), + ) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + //fund consumer with Eth + err = wrapperContracts.LoadTestConsumers[0].Fund(big.NewFloat(*vrfv2PlusConfig.WrapperConsumerFundingAmountNativeToken)) + if err != nil { + return nil, nil, err + } + err = env.EVMClient.WaitForEvents() + if err != nil { + return nil, nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return wrapperContracts, wrapperSubID, nil +} + +func DeployVRFV2PlusWrapperConsumers(contractDeployer contracts.ContractDeployer, linkTokenAddress string, vrfV2PlusWrapper contracts.VRFV2PlusWrapper, consumerContractsAmount int) ([]contracts.VRFv2PlusWrapperLoadTestConsumer, error) { + var consumers []contracts.VRFv2PlusWrapperLoadTestConsumer + for i := 1; i <= consumerContractsAmount; i++ { + loadTestConsumer, err := contractDeployer.DeployVRFV2PlusWrapperLoadTestConsumer(linkTokenAddress, vrfV2PlusWrapper.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrAdvancedConsumer, err) + } + consumers = append(consumers, loadTestConsumer) + } + return consumers, nil +} + +func DeployVRFV2PlusDirectFundingContracts( + contractDeployer contracts.ContractDeployer, + chainClient blockchain.EVMClient, + linkTokenAddress string, + linkEthFeedAddress string, + coordinator contracts.VRFCoordinatorV2_5, + consumerContractsAmount int, +) (*VRFV2PlusWrapperContracts, error) { + + vrfv2PlusWrapper, err := contractDeployer.DeployVRFV2PlusWrapper(linkTokenAddress, linkEthFeedAddress, coordinator.Address()) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrDeployWrapper, err) + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + + consumers, err := DeployVRFV2PlusWrapperConsumers(contractDeployer, linkTokenAddress, vrfv2PlusWrapper, consumerContractsAmount) + if err != nil { + return nil, err + } + err = chainClient.WaitForEvents() + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitTXsComplete, err) + } + return &VRFV2PlusWrapperContracts{vrfv2PlusWrapper, consumers}, nil +} + +func DirectFundingRequestRandomnessAndWaitForFulfillment( + consumer contracts.VRFv2PlusWrapperLoadTestConsumer, + coordinator contracts.VRFCoordinatorV2_5, + vrfKeyData *vrfcommon.VRFKeyData, + subID *big.Int, + isNativeBilling bool, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16, + randomWordsFulfilledEventTimeout time.Duration, + l zerolog.Logger, +) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { + logRandRequest( + l, + consumer.Address(), + coordinator.Address(), + subID, + isNativeBilling, + minimumConfirmations, + callbackGasLimit, + numberOfWords, + vrfKeyData.KeyHash, + randomnessRequestCountPerRequest, + randomnessRequestCountPerRequestDeviation, + ) + if isNativeBilling { + _, err := consumer.RequestRandomnessNative( + minimumConfirmations, + callbackGasLimit, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingNativePayment, err) + } + } else { + _, err := consumer.RequestRandomness( + minimumConfirmations, + callbackGasLimit, + numberOfWords, + randomnessRequestCountPerRequest, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", ErrRequestRandomnessDirectFundingLinkPayment, err) + } + } + wrapperAddress, err := consumer.GetWrapper(context.Background()) + if err != nil { + return nil, fmt.Errorf("error getting wrapper address, err: %w", err) + } + return WaitForRequestAndFulfillmentEvents( + wrapperAddress.String(), + coordinator, + vrfKeyData, + subID, + isNativeBilling, + randomWordsFulfilledEventTimeout, + l, + ) +} + +func WaitForRequestAndFulfillmentEvents( + consumerAddress string, + coordinator contracts.VRFCoordinatorV2_5, + vrfKeyData *vrfcommon.VRFKeyData, + subID *big.Int, + isNativeBilling bool, + randomWordsFulfilledEventTimeout time.Duration, + l zerolog.Logger, +) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { + randomWordsRequestedEvent, err := coordinator.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfKeyData.KeyHash}, + []*big.Int{subID}, + []common.Address{common.HexToAddress(consumerAddress)}, + time.Minute*1, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsRequestedEvent, err) + } + + LogRandomnessRequestedEvent(l, coordinator, randomWordsRequestedEvent, isNativeBilling) + + randomWordsFulfilledEvent, err := coordinator.WaitForRandomWordsFulfilledEvent( + []*big.Int{subID}, + []*big.Int{randomWordsRequestedEvent.RequestId}, + randomWordsFulfilledEventTimeout, + ) + if err != nil { + return nil, fmt.Errorf("%s, err %w", vrfcommon.ErrWaitRandomWordsFulfilledEvent, err) + } + + LogRandomWordsFulfilledEvent(l, coordinator, randomWordsFulfilledEvent, isNativeBilling) + return randomWordsFulfilledEvent, err +} + +func WaitForRequestCountEqualToFulfilmentCount(consumer contracts.VRFv2PlusLoadTestConsumer, timeout time.Duration, wg *sync.WaitGroup) (*big.Int, *big.Int, error) { + metricsChannel := make(chan *contracts.VRFLoadTestMetrics) + metricsErrorChannel := make(chan error) + + testContext, testCancel := context.WithTimeout(context.Background(), timeout) + defer testCancel() + + ticker := time.NewTicker(time.Second * 1) + var metrics *contracts.VRFLoadTestMetrics + for { + select { + case <-testContext.Done(): + ticker.Stop() + wg.Done() + return metrics.RequestCount, metrics.FulfilmentCount, + fmt.Errorf("timeout waiting for rand request and fulfilments to be equal AFTER performance test was executed. Request Count: %d, Fulfilment Count: %d", + metrics.RequestCount.Uint64(), metrics.FulfilmentCount.Uint64()) + case <-ticker.C: + go retrieveLoadTestMetrics(consumer, metricsChannel, metricsErrorChannel) + case metrics = <-metricsChannel: + if metrics.RequestCount.Cmp(metrics.FulfilmentCount) == 0 { + ticker.Stop() + wg.Done() + return metrics.RequestCount, metrics.FulfilmentCount, nil + } + case err := <-metricsErrorChannel: + ticker.Stop() + wg.Done() + return nil, nil, err + } + } +} + +func ReturnFundsForFulfilledRequests(client blockchain.EVMClient, coordinator contracts.VRFCoordinatorV2_5, l zerolog.Logger) error { + linkTotalBalance, err := coordinator.GetLinkTotalBalance(context.Background()) + if err != nil { + return fmt.Errorf("Error getting PLI total balance, err: %w", err) + } + defaultWallet := client.GetDefaultWallet().Address() + l.Info(). + Str("PLI amount", linkTotalBalance.String()). + Str("Returning to", defaultWallet). + Msg("Returning PLI for fulfilled requests") + err = coordinator.Withdraw( + common.HexToAddress(defaultWallet), + ) + if err != nil { + return fmt.Errorf("Error withdrawing PLI from coordinator to default wallet, err: %w", err) + } + nativeTotalBalance, err := coordinator.GetNativeTokenTotalBalance(context.Background()) + if err != nil { + return fmt.Errorf("Error getting NATIVE total balance, err: %w", err) + } + l.Info(). + Str("Native Token amount", nativeTotalBalance.String()). + Str("Returning to", defaultWallet). + Msg("Returning Native Token for fulfilled requests") + err = coordinator.WithdrawNative( + common.HexToAddress(defaultWallet), + ) + if err != nil { + return fmt.Errorf("Error withdrawing NATIVE from coordinator to default wallet, err: %w", err) + } + return nil +} + +func retrieveLoadTestMetrics( + consumer contracts.VRFv2PlusLoadTestConsumer, + metricsChannel chan *contracts.VRFLoadTestMetrics, + metricsErrorChannel chan error, +) { + metrics, err := consumer.GetLoadTestMetrics(context.Background()) + if err != nil { + metricsErrorChannel <- err + } + metricsChannel <- metrics +} + +func LogSubDetails(l zerolog.Logger, subscription vrf_coordinator_v2_5.GetSubscription, subID *big.Int, coordinator contracts.VRFCoordinatorV2_5) { + l.Debug(). + Str("Coordinator", coordinator.Address()). + Str("Link Balance", (*commonassets.Link)(subscription.Balance).Link()). + Str("Native Token Balance", assets.FormatWei(subscription.NativeBalance)). + Str("Subscription ID", subID.String()). + Str("Subscription Owner", subscription.Owner.String()). + Interface("Subscription Consumers", subscription.Consumers). + Msg("Subscription Data") +} + +func LogRandomnessRequestedEventUpgraded( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion, + randomWordsRequestedEvent *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, +) { + l.Debug(). + Str("Coordinator", coordinator.Address()). + Str("Request ID", randomWordsRequestedEvent.RequestId.String()). + Str("Subscription ID", randomWordsRequestedEvent.SubId.String()). + Str("Sender Address", randomWordsRequestedEvent.Sender.String()). + Interface("Keyhash", fmt.Sprintf("0x%x", randomWordsRequestedEvent.KeyHash)). + Uint32("Callback Gas Limit", randomWordsRequestedEvent.CallbackGasLimit). + Uint32("Number of Words", randomWordsRequestedEvent.NumWords). + Uint16("Minimum Request Confirmations", randomWordsRequestedEvent.MinimumRequestConfirmations). + Msg("RandomnessRequested Event") +} + +func LogRandomWordsFulfilledEventUpgraded( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2PlusUpgradedVersion, + randomWordsFulfilledEvent *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, +) { + l.Debug(). + Str("Coordinator", coordinator.Address()). + Str("Total Payment in Juels", randomWordsFulfilledEvent.Payment.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Str("Subscription ID", randomWordsFulfilledEvent.SubID.String()). + Str("Request ID", randomWordsFulfilledEvent.RequestId.String()). + Bool("Success", randomWordsFulfilledEvent.Success). + Msg("RandomWordsFulfilled Event (TX metadata)") +} + +func LogRandomnessRequestedEvent( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2_5, + randomWordsRequestedEvent *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested, + isNativeBilling bool, +) { + l.Info(). + Str("Coordinator", coordinator.Address()). + Bool("Native Billing", isNativeBilling). + Str("Request ID", randomWordsRequestedEvent.RequestId.String()). + Str("Subscription ID", randomWordsRequestedEvent.SubId.String()). + Str("Sender Address", randomWordsRequestedEvent.Sender.String()). + Interface("Keyhash", fmt.Sprintf("0x%x", randomWordsRequestedEvent.KeyHash)). + Uint32("Callback Gas Limit", randomWordsRequestedEvent.CallbackGasLimit). + Uint32("Number of Words", randomWordsRequestedEvent.NumWords). + Uint16("Minimum Request Confirmations", randomWordsRequestedEvent.MinimumRequestConfirmations). + Msg("RandomnessRequested Event") +} + +func LogRandomWordsFulfilledEvent( + l zerolog.Logger, + coordinator contracts.VRFCoordinatorV2_5, + randomWordsFulfilledEvent *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, + isNativeBilling bool, +) { + l.Info(). + Bool("Native Billing", isNativeBilling). + Str("Coordinator", coordinator.Address()). + Str("Total Payment", randomWordsFulfilledEvent.Payment.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Str("Subscription ID", randomWordsFulfilledEvent.SubId.String()). + Str("Request ID", randomWordsFulfilledEvent.RequestId.String()). + Bool("Success", randomWordsFulfilledEvent.Success). + Msg("RandomWordsFulfilled Event (TX metadata)") +} + +func LogMigrationCompletedEvent(l zerolog.Logger, migrationCompletedEvent *vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted, vrfv2PlusContracts *vrfcommon.VRFContracts) { + l.Info(). + Str("Subscription ID", migrationCompletedEvent.SubId.String()). + Str("Migrated From Coordinator", vrfv2PlusContracts.CoordinatorV2Plus.Address()). + Str("Migrated To Coordinator", migrationCompletedEvent.NewCoordinator.String()). + Msg("MigrationCompleted Event") +} + +func LogSubDetailsAfterMigration(l zerolog.Logger, newCoordinator contracts.VRFCoordinatorV2PlusUpgradedVersion, subID *big.Int, migratedSubscription vrf_v2plus_upgraded_version.GetSubscription) { + l.Info(). + Str("New Coordinator", newCoordinator.Address()). + Str("Subscription ID", subID.String()). + Str("Juels Balance", migratedSubscription.Balance.String()). + Str("Native Token Balance", migratedSubscription.NativeBalance.String()). + Str("Subscription Owner", migratedSubscription.Owner.String()). + Interface("Subscription Consumers", migratedSubscription.Consumers). + Msg("Subscription Data After Migration to New Coordinator") +} + +func LogFulfillmentDetailsLinkBilling( + l zerolog.Logger, + wrapperConsumerJuelsBalanceBeforeRequest *big.Int, + wrapperConsumerJuelsBalanceAfterRequest *big.Int, + consumerStatus vrfv2plus_wrapper_load_test_consumer.GetRequestStatus, + randomWordsFulfilledEvent *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, +) { + l.Info(). + Str("Consumer Balance Before Request (Link)", (*commonassets.Link)(wrapperConsumerJuelsBalanceBeforeRequest).Link()). + Str("Consumer Balance After Request (Link)", (*commonassets.Link)(wrapperConsumerJuelsBalanceAfterRequest).Link()). + Bool("Fulfilment Status", consumerStatus.Fulfilled). + Str("Paid by Consumer Contract (Link)", (*commonassets.Link)(consumerStatus.Paid).Link()). + Str("Paid by Coordinator Sub (Link)", (*commonassets.Link)(randomWordsFulfilledEvent.Payment).Link()). + Str("RequestTimestamp", consumerStatus.RequestTimestamp.String()). + Str("FulfilmentTimestamp", consumerStatus.FulfilmentTimestamp.String()). + Str("RequestBlockNumber", consumerStatus.RequestBlockNumber.String()). + Str("FulfilmentBlockNumber", consumerStatus.FulfilmentBlockNumber.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Msg("Random Words Fulfilment Details For Link Billing") +} + +func LogFulfillmentDetailsNativeBilling( + l zerolog.Logger, + wrapperConsumerBalanceBeforeRequestWei *big.Int, + wrapperConsumerBalanceAfterRequestWei *big.Int, + consumerStatus vrfv2plus_wrapper_load_test_consumer.GetRequestStatus, + randomWordsFulfilledEvent *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, +) { + l.Info(). + Str("Consumer Balance Before Request", assets.FormatWei(wrapperConsumerBalanceBeforeRequestWei)). + Str("Consumer Balance After Request", assets.FormatWei(wrapperConsumerBalanceAfterRequestWei)). + Bool("Fulfilment Status", consumerStatus.Fulfilled). + Str("Paid by Consumer Contract", assets.FormatWei(consumerStatus.Paid)). + Str("Paid by Coordinator Sub", assets.FormatWei(randomWordsFulfilledEvent.Payment)). + Str("RequestTimestamp", consumerStatus.RequestTimestamp.String()). + Str("FulfilmentTimestamp", consumerStatus.FulfilmentTimestamp.String()). + Str("RequestBlockNumber", consumerStatus.RequestBlockNumber.String()). + Str("FulfilmentBlockNumber", consumerStatus.FulfilmentBlockNumber.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Msg("Random Words Request Fulfilment Details For Native Billing") +} + +func logRandRequest( + l zerolog.Logger, + consumer string, + coordinator string, + subID *big.Int, + isNativeBilling bool, + minimumConfirmations uint16, + callbackGasLimit uint32, + numberOfWords uint32, + keyHash [32]byte, + randomnessRequestCountPerRequest uint16, + randomnessRequestCountPerRequestDeviation uint16) { + l.Info(). + Str("Consumer", consumer). + Str("Coordinator", coordinator). + Str("SubID", subID.String()). + Bool("IsNativePayment", isNativeBilling). + Uint16("MinimumConfirmations", minimumConfirmations). + Uint32("CallbackGasLimit", callbackGasLimit). + Uint32("NumberOfWords", numberOfWords). + Str("KeyHash", fmt.Sprintf("0x%x", keyHash)). + Uint16("RandomnessRequestCountPerRequest", randomnessRequestCountPerRequest). + Uint16("RandomnessRequestCountPerRequestDeviation", randomnessRequestCountPerRequestDeviation). + Msg("Requesting randomness") +} diff --git a/integration-tests/benchmark/keeper_test.go b/integration-tests/benchmark/keeper_test.go new file mode 100644 index 00000000..fad40f51 --- /dev/null +++ b/integration-tests/benchmark/keeper_test.go @@ -0,0 +1,433 @@ +package benchmark + +import ( + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + env_client "github.com/goplugin/plugin-testing-framework/k8s/client" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/reorg" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + eth_contracts "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/testsetups" + "github.com/goplugin/pluginv3.0/integration-tests/types" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + keeperBenchmarkBaseTOML = `[Feature] +LogPoller = true + +[OCR2] +Enabled = true + +[P2P] +[P2P.V2] +Enabled = true +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"] +[Keeper] +TurnLookBack = 0 +[WebServer] +HTTPWriteTimeout = '1h'` + + simulatedEVMNonDevTOML = ` +Enabled = true +FinalityDepth = 50 +LogPollInterval = '1s' + +[EVM.HeadTracker] +HistoryDepth = 100 + +[EVM.GasEstimator] +Mode = 'FixedPrice' +LimitDefault = 5_000_000` + + performancePluginResources = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "1000m", + "memory": "4Gi", + }, + "limits": map[string]interface{}{ + "cpu": "1000m", + "memory": "4Gi", + }, + }, + } + performanceDbResources = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "1000m", + "memory": "1Gi", + }, + "limits": map[string]interface{}{ + "cpu": "1000m", + "memory": "1Gi", + }, + }, + "stateful": true, + "capacity": "10Gi", + } + + soakPluginResources = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "350m", + "memory": "1Gi", + }, + "limits": map[string]interface{}{ + "cpu": "350m", + "memory": "1Gi", + }, + }, + } + soakDbResources = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + "limits": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + }, + "stateful": true, + "capacity": "10Gi", + } +) + +type NetworkConfig struct { + upkeepSLA int64 + blockTime time.Duration + deltaStage time.Duration + funding *big.Float +} + +func TestAutomationBenchmark(t *testing.T) { + l := logging.GetTestLogger(t) + testType, err := tc.GetConfigurationNameFromEnv() + require.NoError(t, err, "Error getting test type") + + config, err := tc.GetConfig(testType, tc.Keeper) + require.NoError(t, err, "Error getting test config") + + testEnvironment, benchmarkNetwork := SetupAutomationBenchmarkEnv(t, &config) + if testEnvironment.WillUseRemoteRunner() { + return + } + networkName := strings.ReplaceAll(benchmarkNetwork.Name, " ", "") + testName := fmt.Sprintf("%s%s", networkName, *config.Keeper.Common.RegistryToTest) + l.Info().Str("Test Name", testName).Msg("Running Benchmark Test") + benchmarkTestNetwork := getNetworkConfig(networkName, &config) + + l.Info().Str("Namespace", testEnvironment.Cfg.Namespace).Msg("Connected to Keepers Benchmark Environment") + + chainClient, err := blockchain.NewEVMClient(benchmarkNetwork, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + registryVersions := addRegistry(&config) + keeperBenchmarkTest := testsetups.NewKeeperBenchmarkTest(t, + testsetups.KeeperBenchmarkTestInputs{ + BlockchainClient: chainClient, + RegistryVersions: registryVersions, + KeeperRegistrySettings: &contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(0), + FlatFeeMicroPLI: uint32(40000), + BlockCountPerTurn: big.NewInt(100), + CheckGasLimit: uint32(45_000_000), //45M + StalenessSeconds: big.NewInt(90_000), + GasCeilingMultiplier: uint16(2), + MaxPerformGas: uint32(*config.Keeper.Common.MaxPerformGas), + MinUpkeepSpend: big.NewInt(0), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5_000), + MaxPerformDataSize: uint32(5_000), + }, + Upkeeps: &testsetups.UpkeepConfig{ + NumberOfUpkeeps: *config.Keeper.Common.NumberOfUpkeeps, + CheckGasToBurn: *config.Keeper.Common.CheckGasToBurn, + PerformGasToBurn: *config.Keeper.Common.PerformGasToBurn, + BlockRange: *config.Keeper.Common.BlockRange, + BlockInterval: *config.Keeper.Common.BlockInterval, + UpkeepGasLimit: *config.Keeper.Common.UpkeepGasLimit, + FirstEligibleBuffer: 1, + }, + Contracts: &testsetups.PreDeployedContracts{ + RegistrarAddress: *config.Keeper.Common.RegistrarAddress, + RegistryAddress: *config.Keeper.Common.RegistryAddress, + LinkTokenAddress: *config.Keeper.Common.LinkTokenAddress, + EthFeedAddress: *config.Keeper.Common.EthFeedAddress, + GasFeedAddress: *config.Keeper.Common.GasFeedAddress, + }, + PluginNodeFunding: benchmarkTestNetwork.funding, + UpkeepSLA: benchmarkTestNetwork.upkeepSLA, + BlockTime: benchmarkTestNetwork.blockTime, + DeltaStage: benchmarkTestNetwork.deltaStage, + ForceSingleTxnKey: *config.Keeper.Common.ForceSingleTxKey, + DeleteJobsOnEnd: *config.Keeper.Common.DeleteJobsOnEnd, + }, + ) + t.Cleanup(func() { + if err = actions.TeardownRemoteSuite(keeperBenchmarkTest.TearDownVals(t)); err != nil { + l.Error().Err(err).Msg("Error when tearing down remote suite") + } + }) + keeperBenchmarkTest.Setup(testEnvironment, &config) + keeperBenchmarkTest.Run() +} + +func addRegistry(config *tc.TestConfig) []eth_contracts.KeeperRegistryVersion { + switch *config.Keeper.Common.RegistryToTest { + case "1_1": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_1_1} + case "1_2": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_1_2} + case "1_3": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_1_3} + case "2_0": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_0} + case "2_1": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_1} + case "2_0-1_3": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_0, eth_contracts.RegistryVersion_1_3} + case "2_1-2_0-1_3": + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_1, + eth_contracts.RegistryVersion_2_0, eth_contracts.RegistryVersion_1_3} + case "2_0-Multiple": + return repeatRegistries(eth_contracts.RegistryVersion_2_0, *config.Keeper.Common.NumberOfRegistries) + case "2_1-Multiple": + return repeatRegistries(eth_contracts.RegistryVersion_2_1, *config.Keeper.Common.NumberOfRegistries) + default: + return []eth_contracts.KeeperRegistryVersion{eth_contracts.RegistryVersion_2_0} + } +} + +func repeatRegistries(registryVersion eth_contracts.KeeperRegistryVersion, numberOfRegistries int) []eth_contracts.KeeperRegistryVersion { + repeatedRegistries := make([]eth_contracts.KeeperRegistryVersion, 0) + for i := 0; i < numberOfRegistries; i++ { + repeatedRegistries = append(repeatedRegistries, registryVersion) + } + return repeatedRegistries +} + +func getNetworkConfig(networkName string, config *tc.TestConfig) NetworkConfig { + var nc NetworkConfig + var ok bool + if nc, ok = networkConfig[networkName]; !ok { + return NetworkConfig{} + } + + if networkName == "SimulatedGeth" || networkName == "geth" { + return nc + } + + nc.funding = big.NewFloat(*config.Common.PluginNodeFunding) + + return nc +} + +var networkConfig = map[string]NetworkConfig{ + "SimulatedGeth": { + upkeepSLA: int64(120), //2 minutes + blockTime: time.Second, + deltaStage: 30 * time.Second, + funding: big.NewFloat(100_000), + }, + "geth": { + upkeepSLA: int64(120), //2 minutes + blockTime: time.Second, + deltaStage: 30 * time.Second, + funding: big.NewFloat(100_000), + }, + "GoerliTestnet": { + upkeepSLA: int64(4), + blockTime: 12 * time.Second, + deltaStage: time.Duration(0), + }, + "ArbitrumGoerli": { + upkeepSLA: int64(20), + blockTime: time.Second, + deltaStage: time.Duration(0), + }, + "OptimismGoerli": { + upkeepSLA: int64(20), + blockTime: time.Second, + deltaStage: time.Duration(0), + }, + "SepoliaTestnet": { + upkeepSLA: int64(4), + blockTime: 12 * time.Second, + deltaStage: time.Duration(0), + }, + "PolygonMumbai": { + upkeepSLA: int64(4), + blockTime: 12 * time.Second, + deltaStage: time.Duration(0), + }, + "BaseGoerli": { + upkeepSLA: int64(60), + blockTime: 2 * time.Second, + deltaStage: 20 * time.Second, + }, + "ArbitrumSepolia": { + upkeepSLA: int64(120), + blockTime: time.Second, + deltaStage: 20 * time.Second, + }, + "LineaGoerli": { + upkeepSLA: int64(120), + blockTime: time.Second, + deltaStage: 20 * time.Second, + }, +} + +func SetupAutomationBenchmarkEnv(t *testing.T, keeperTestConfig types.KeeperBenchmarkTestConfig) (*environment.Environment, blockchain.EVMNetwork) { + l := logging.GetTestLogger(t) + testNetwork := networks.MustGetSelectedNetworkConfig(keeperTestConfig.GetNetworkConfig())[0] // Environment currently being used to run benchmark test on + blockTime := "1" + networkDetailTOML := `MinIncomingConfirmations = 1` + + numberOfNodes := *keeperTestConfig.GetKeeperConfig().Common.NumberOfNodes + + if strings.Contains(*keeperTestConfig.GetKeeperConfig().Common.RegistryToTest, "2_") { + numberOfNodes++ + } + + testEnvironment := environment.New(&environment.Config{ + TTL: time.Hour * 720, // 30 days, + NamespacePrefix: fmt.Sprintf( + "automation-%s-%s-%s", + strings.ToLower(keeperTestConfig.GetConfigurationName()), + strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-"), + strings.ReplaceAll(strings.ToLower(*keeperTestConfig.GetKeeperConfig().Common.RegistryToTest), "_", "-"), + ), + Test: t, + PreventPodEviction: true, + }) + + dbResources := performanceDbResources + pluginResources := performancePluginResources + if strings.ToLower(keeperTestConfig.GetConfigurationName()) == "soak" { + pluginResources = soakPluginResources + dbResources = soakDbResources + } + + // Test can run on simulated, simulated-non-dev, testnets + if testNetwork.Name == networks.SimulatedEVMNonDev.Name { + networkDetailTOML = simulatedEVMNonDevTOML + testEnvironment. + AddHelm(reorg.New(&reorg.Props{ + NetworkName: testNetwork.Name, + Values: map[string]interface{}{ + "geth": map[string]interface{}{ + "tx": map[string]interface{}{ + "replicas": numberOfNodes, + }, + "miner": map[string]interface{}{ + "replicas": 2, + }, + }, + }, + })) + } else { + testEnvironment. + AddHelm(ethereum.New(ðereum.Props{ + NetworkName: testNetwork.Name, + Simulated: testNetwork.Simulated, + WsURLs: testNetwork.URLs, + Values: map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + "limits": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + }, + "geth": map[string]interface{}{ + "blocktime": blockTime, + }, + }, + })) + } + + // deploy blockscout if running on simulated + if testNetwork.Simulated { + testEnvironment. + AddChart(blockscout.New(&blockscout.Props{ + Name: "geth-blockscout", + WsURL: testNetwork.URLs[0], + HttpURL: testNetwork.HTTPURLs[0]})) + } + err := testEnvironment.Run() + require.NoError(t, err, "Error launching test environment") + + if testEnvironment.WillUseRemoteRunner() { + return testEnvironment, testNetwork + } + + // separate RPC urls per CL node + internalWsURLs := make([]string, 0) + internalHttpURLs := make([]string, 0) + for i := 0; i < numberOfNodes; i++ { + // for simulated-nod-dev each CL node gets its own RPC node + if testNetwork.Name == networks.SimulatedEVMNonDev.Name { + podName := fmt.Sprintf("%s-ethereum-geth:%d", testNetwork.Name, i) + txNodeInternalWs, err := testEnvironment.Fwd.FindPort(podName, "geth", "ws-rpc").As(env_client.RemoteConnection, env_client.WS) + require.NoError(t, err, "Error finding WS ports") + internalWsURLs = append(internalWsURLs, txNodeInternalWs) + txNodeInternalHttp, err := testEnvironment.Fwd.FindPort(podName, "geth", "http-rpc").As(env_client.RemoteConnection, env_client.HTTP) + require.NoError(t, err, "Error finding HTTP ports") + internalHttpURLs = append(internalHttpURLs, txNodeInternalHttp) + // for testnets with more than 1 RPC nodes + } else if len(testNetwork.URLs) > 1 { + internalWsURLs = append(internalWsURLs, testNetwork.URLs[i%len(testNetwork.URLs)]) + internalHttpURLs = append(internalHttpURLs, testNetwork.HTTPURLs[i%len(testNetwork.URLs)]) + // for simulated and testnets with 1 RPC node + } else { + internalWsURLs = append(internalWsURLs, testNetwork.URLs[0]) + internalHttpURLs = append(internalHttpURLs, testNetwork.HTTPURLs[0]) + } + } + l.Debug().Strs("internalWsURLs", internalWsURLs).Strs("internalHttpURLs", internalHttpURLs).Msg("internalURLs") + + for i := 0; i < numberOfNodes; i++ { + testNetwork.HTTPURLs = []string{internalHttpURLs[i]} + testNetwork.URLs = []string{internalWsURLs[i]} + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(keeperTestConfig.GetPluginImageConfig(), target) + ctf_config.MightConfigOverridePyroscopeKey(keeperTestConfig.GetPyroscopeConfig(), target) + } + + cd := plugin.NewWithOverride(i, map[string]any{ + "toml": networks.AddNetworkDetailedConfig(keeperBenchmarkBaseTOML, keeperTestConfig.GetPyroscopeConfig(), networkDetailTOML, testNetwork), + "plugin": pluginResources, + "db": dbResources, + }, keeperTestConfig.GetPluginImageConfig(), overrideFn) + + testEnvironment.AddHelm(cd) + } + err = testEnvironment.Run() + require.NoError(t, err, "Error launching test environment") + return testEnvironment, testNetwork +} diff --git a/integration-tests/chaos/automation_chaos_test.go b/integration-tests/chaos/automation_chaos_test.go new file mode 100644 index 00000000..d2a74bb7 --- /dev/null +++ b/integration-tests/chaos/automation_chaos_test.go @@ -0,0 +1,325 @@ +package chaos + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/chaos" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + eth_contracts "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + baseTOML = `[Feature] +LogPoller = true + +[OCR2] +Enabled = true + +[P2P] +[P2P.V2] +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"]` + + defaultAutomationSettings = map[string]interface{}{ + "replicas": 6, + "toml": "", + "db": map[string]interface{}{ + "stateful": true, + "capacity": "1Gi", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + "limits": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + }, + }, + } + + defaultEthereumSettings = ethereum.Props{ + Values: map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + "limits": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + }, + "geth": map[string]interface{}{ + "blocktime": "1", + }, + }, + } + + defaultOCRRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } +) + +func getDefaultAutomationSettings(config *tc.TestConfig) map[string]interface{} { + defaultAutomationSettings["toml"] = networks.AddNetworksConfig(baseTOML, config.Pyroscope, networks.MustGetSelectedNetworkConfig(config.Network)[0]) + return defaultAutomationSettings +} + +func getDefaultEthereumSettings(config *tc.TestConfig) *ethereum.Props { + network := networks.MustGetSelectedNetworkConfig(config.Network)[0] + defaultEthereumSettings.NetworkName = network.Name + defaultEthereumSettings.Simulated = network.Simulated + defaultEthereumSettings.WsURLs = network.URLs + + return &defaultEthereumSettings +} + +type KeeperConsumerContracts int32 + +const ( + BasicCounter KeeperConsumerContracts = iota + + defaultUpkeepGasLimit = uint32(2500000) + defaultLinkFunds = int64(9e18) + numberOfUpkeeps = 2 +) + +func TestAutomationChaos(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + registryVersions := map[string]eth_contracts.KeeperRegistryVersion{ + "registry_2_0": eth_contracts.RegistryVersion_2_0, + "registry_2_1": eth_contracts.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + + config, err := tc.GetConfig("Chaos", tc.Automation) + if err != nil { + t.Fatal(err) + } + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(config.PluginImage, target) + ctf_config.MightConfigOverridePyroscopeKey(config.Pyroscope, target) + } + + pluginCfg := plugin.NewWithOverride(0, getDefaultAutomationSettings(&config), config.PluginImage, overrideFn) + + testCases := map[string]struct { + networkChart environment.ConnectedChart + clChart environment.ConnectedChart + chaosFunc chaos.ManifestFunc + chaosProps *chaos.Props + }{ + // see ocr_chaos.test.go for comments + PodChaosFailMinorityNodes: { + ethereum.New(getDefaultEthereumSettings(&config)), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + PodChaosFailMajorityNodes: { + ethereum.New(getDefaultEthereumSettings(&config)), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + PodChaosFailMajorityDB: { + ethereum.New(getDefaultEthereumSettings(&config)), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + DurationStr: "1m", + ContainerNames: &[]*string{ptr.Ptr("plugin-db")}, + }, + }, + NetworkChaosFailMajorityNetwork: { + ethereum.New(getDefaultEthereumSettings(&config)), + pluginCfg, + chaos.NewNetworkPartition, + &chaos.Props{ + FromLabels: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + ToLabels: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + NetworkChaosFailBlockchainNode: { + ethereum.New(getDefaultEthereumSettings(&config)), + pluginCfg, + chaos.NewNetworkPartition, + &chaos.Props{ + FromLabels: &map[string]*string{"app": ptr.Ptr("geth")}, + ToLabels: &map[string]*string{ChaosGroupMajorityPlus: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + } + + for name, testCase := range testCases { + name := name + testCase := testCase + t.Run(fmt.Sprintf("Automation_%s", name), func(t *testing.T) { + t.Parallel() + network := networks.MustGetSelectedNetworkConfig(config.Network)[0] // Need a new copy of the network for each test + + testEnvironment := environment. + New(&environment.Config{ + NamespacePrefix: fmt.Sprintf("chaos-automation-%s", name), + TTL: time.Hour * 1, + Test: t, + }). + AddHelm(testCase.networkChart). + AddHelm(testCase.clChart). + AddChart(blockscout.New(&blockscout.Props{ + Name: "geth-blockscout", + WsURL: network.URL, + HttpURL: network.HTTPURLs[0], + })) + err := testEnvironment.Run() + require.NoError(t, err, "Error setting up test environment") + if testEnvironment.WillUseRemoteRunner() { + return + } + + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 1, 2, ChaosGroupMinority) + require.NoError(t, err) + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 3, 5, ChaosGroupMajority) + require.NoError(t, err) + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 2, 5, ChaosGroupMajorityPlus) + require.NoError(t, err) + + chainClient, err := blockchain.NewEVMClient(network, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to Plugin nodes") + chainClient.ParallelTransactions(true) + + // Register cleanup for any test + t.Cleanup(func() { + if chainClient != nil { + chainClient.GasStats().PrintStats() + } + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.PanicLevel, &config, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + txCost, err := chainClient.EstimateCostForPluginOperations(1000) + require.NoError(t, err, "Error estimating cost for Plugin Operations") + err = actions.FundPluginNodes(pluginNodes, chainClient, txCost) + require.NoError(t, err, "Error funding Plugin nodes") + + linkToken, err := contractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Error deploying PLI token") + + registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + t, + registryVersion, + defaultOCRRegistryConfig, + linkToken, + contractDeployer, + chainClient, + ) + + // Fund the registry with PLI + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + actions.CreateOCRKeeperJobs(t, pluginNodes, registry.Address(), network.ChainID, 0, registryVersion) + nodesWithoutBootstrap := pluginNodes[1:] + ocrConfig, err := actions.BuildAutoOCR2ConfigVars(t, nodesWithoutBootstrap, defaultOCRRegistryConfig, registrar.Address(), 30*time.Second) + require.NoError(t, err, "Error building OCR config vars") + err = registry.SetConfig(defaultOCRRegistryConfig, ocrConfig) + require.NoError(t, err, "Registry config should be be set successfully") + require.NoError(t, chainClient.WaitForEvents(), "Waiting for config to be set") + + consumers_conditional, upkeepIDs_conditional := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, false, false) + consumers_logtrigger, upkeepIDs_logtrigger := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, true, false) + + consumers := append(consumers_conditional, consumers_logtrigger...) + upkeepIDs := append(upkeepIDs_conditional, upkeepIDs_logtrigger...) + + l.Info().Msg("Waiting for all upkeeps to be performed") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 5 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "5m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer + + _, err = testEnvironment.Chaos.Run(testCase.chaosFunc(testEnvironment.Cfg.Namespace, testCase.chaosProps)) + require.NoError(t, err) + + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 10 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "3m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer + }) + } + + }) + } +} diff --git a/integration-tests/chaos/constants.go b/integration-tests/chaos/constants.go new file mode 100644 index 00000000..426f9f1a --- /dev/null +++ b/integration-tests/chaos/constants.go @@ -0,0 +1,18 @@ +package chaos + +const ( + // ChaosGroupMinority a group of faulty nodes, even if they fail OCR must work + ChaosGroupMinority = "chaosGroupMinority" + // ChaosGroupMajority a group of nodes that are working even if minority fails + ChaosGroupMajority = "chaosGroupMajority" + // ChaosGroupMajorityPlus a group of nodes that are majority + 1 + ChaosGroupMajorityPlus = "chaosGroupMajorityPlus" + + PodChaosFailMercury = "pod-chaos-fail-mercury-server" + PodChaosFailMinorityNodes = "pod-chaos-fail-minority-nodes" + PodChaosFailMajorityNodes = "pod-chaos-fail-majority-nodes" + PodChaosFailMajorityDB = "pod-chaos-fail-majority-db" + NetworkChaosFailMajorityNetwork = "network-chaos-fail-majority-network" + NetworkChaosFailBlockchainNode = "network-chaos-fail-blockchain-node" + NetworkChaosDisruptNetworkDONMercury = "network-chaos-disrupt-don-mercury" +) diff --git a/integration-tests/chaos/functions/full.yaml b/integration-tests/chaos/functions/full.yaml new file mode 100644 index 00000000..ca832192 --- /dev/null +++ b/integration-tests/chaos/functions/full.yaml @@ -0,0 +1,232 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: Workflow +metadata: + namespace: plugin + name: plugin-flow +spec: + entry: entry + templates: + # root entry + - name: entry + templateType: Serial + deadline: 1h + children: + - killing + - network-delay-internal +# - external-deps-failure + # children chaos group + - name: killing + templateType: Serial + children: + - gateway-kill + - don-minority-kill + - don-majority-kill + - adapters-minority-kill + - adapters-majority-kill + # children chaos group + - name: network-delay-internal + templateType: Serial + children: + - gateway-delay + - don-minority-delay + - don-majority-delay + - adapters-minority-delay + - adapters-majority-delay + # children chaos group + - name: external-deps-failure + templateType: Serial + children: + - ea-url-resolve-failure + + # experiments (killing) + - name: gateway-kill + templateType: PodChaos + deadline: 1m + podChaos: + selector: + namespaces: + - plugin + labelSelectors: + 'app.kubernetes.io/instance': cln-gateway-staging1-node + mode: one + action: pod-kill + - name: don-minority-kill + templateType: PodChaos + deadline: 1m + podChaos: + selector: + namespaces: + - plugin + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - clc-ocr2-dr-matic-testnet-nodes-0 + - clc-ocr2-dr-matic-testnet-boot + mode: all + action: pod-kill + - name: don-majority-kill + templateType: PodChaos + deadline: 1m + podChaos: + selector: + namespaces: + - plugin + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - clc-ocr2-dr-matic-testnet-nodes-1 + - clc-ocr2-dr-matic-testnet-nodes-0 + - clc-ocr2-dr-matic-testnet-boot + mode: all + action: pod-kill + - name: adapters-minority-kill + templateType: PodChaos + deadline: 1m + podChaos: + selector: + namespaces: + - adapters + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - universal-mumbai-0 + mode: all + action: pod-kill + - name: adapters-majority-kill + templateType: PodChaos + deadline: 1m + podChaos: + selector: + namespaces: + - adapters + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - universal-mumbai-1 + - universal-mumbai-0 + mode: all + action: pod-kill + + # TODO: enable when chaosd is installed on all the nodes + # experiments (delays) + - name: gateway-delay + templateType: NetworkChaos + deadline: 1m + networkChaos: + selector: + namespaces: + - plugin + labelSelectors: + 'app.kubernetes.io/instance': cln-gateway-staging1-node + mode: all + action: delay + delay: + latency: 200ms + correlation: '0' + jitter: 0ms + direction: to + - name: don-minority-delay + templateType: NetworkChaos + deadline: 1m + networkChaos: + selector: + namespaces: + - plugin + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - clc-ocr2-dr-matic-testnet-nodes-0 + - clc-ocr2-dr-matic-testnet-boot + mode: all + action: delay + delay: + latency: 200ms + correlation: '0' + jitter: 0ms + direction: to + - name: don-majority-delay + templateType: NetworkChaos + deadline: 1m + networkChaos: + selector: + namespaces: + - plugin + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - clc-ocr2-dr-matic-testnet-nodes-1 + - clc-ocr2-dr-matic-testnet-nodes-0 + - clc-ocr2-dr-matic-testnet-boot + mode: all + action: delay + delay: + latency: 200ms + correlation: '0' + jitter: 0ms + direction: to + - name: adapters-minority-delay + templateType: NetworkChaos + deadline: 1m + networkChaos: + selector: + namespaces: + - adapters + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - universal-mumbai-0 + mode: all + action: delay + delay: + latency: 200ms + correlation: '0' + jitter: 0ms + direction: to + - name: adapters-majority-delay + templateType: NetworkChaos + deadline: 1m + networkChaos: + selector: + namespaces: + - adapters + expressionSelectors: + - key: app.kubernetes.io/instance + operator: In + values: + - universal-mumbai-1 + - universal-mumbai-0 + mode: all + action: delay + delay: + latency: 200ms + correlation: '0' + jitter: 0ms + direction: to + + # experiments (external deps failure) +# - name: ea-url-resolve-failure +# templateType: NetworkChaos +# deadline: 3m +# networkChaos: +# selector: +# namespaces: +# - plugin +# mode: all +# action: partition +# direction: to +# target: +# selector: +# namespaces: +# - plugin +# mode: all +# externalTargets: +# - >- +# my-url.com + diff --git a/integration-tests/chaos/ocr2vrf_chaos_test.go b/integration-tests/chaos/ocr2vrf_chaos_test.go new file mode 100644 index 00000000..9defcd51 --- /dev/null +++ b/integration-tests/chaos/ocr2vrf_chaos_test.go @@ -0,0 +1,238 @@ +package chaos + +import ( + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/chaos" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/ocr2vrf_actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/config" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestOCR2VRFChaos(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + testconfig, err := tc.GetConfig("Chaos", tc.OCR2VRF) + if err != nil { + t.Fatal(err) + } + + loadedNetwork := networks.MustGetSelectedNetworkConfig(testconfig.Network)[0] + + defaultOCR2VRFSettings := map[string]interface{}{ + "replicas": 6, + "toml": networks.AddNetworkDetailedConfig( + config.BaseOCR2Config, + testconfig.Pyroscope, + config.DefaultOCR2VRFNetworkDetailTomlConfig, + loadedNetwork, + ), + } + + defaultOCR2VRFEthereumSettings := ðereum.Props{ + NetworkName: loadedNetwork.Name, + Simulated: loadedNetwork.Simulated, + WsURLs: loadedNetwork.URLs, + } + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(testconfig.PluginImage, target) + ctf_config.MightConfigOverridePyroscopeKey(testconfig.Pyroscope, target) + } + + pluginCfg := plugin.NewWithOverride(0, defaultOCR2VRFSettings, testconfig.PluginImage, overrideFn) + + testCases := map[string]struct { + networkChart environment.ConnectedChart + clChart environment.ConnectedChart + chaosFunc chaos.ManifestFunc + chaosProps *chaos.Props + }{ + // network-* and pods-* are split intentionally into 2 parallel groups + // we can't use chaos.NewNetworkPartition and chaos.NewFailPods in parallel + // because of jsii runtime bug, see Makefile + + // PodChaosFailMinorityNodes Test description: + //1. DKG and VRF beacon processes are set and VRF request gets fulfilled + //2. Apply chaos experiment - take down 2 nodes out of 5 non-bootstrap + //3. Bring back all nodes to normal + //4. verify VRF request gets fulfilled + PodChaosFailMinorityNodes: { + ethereum.New(defaultOCR2VRFEthereumSettings), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + //todo - currently failing, need to investigate deeper + //PodChaosFailMajorityNodes: { + // ethereum.New(defaultOCR2VRFEthereumSettings), + // pluginCfg, + // chaos.NewFailPods, + // &chaos.Props{ + // LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + // DurationStr: "1m", + // }, + //}, + //todo - do we need these chaos tests? + //PodChaosFailMajorityDB: { + // ethereum.New(defaultOCR2VRFEthereumSettings), + // pluginCfg, + // chaos.NewFailPods, + // &chaos.Props{ + // LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + // DurationStr: "1m", + // ContainerNames: &[]*string{ptr.Ptr("plugin-db")}, + // }, + //}, + //NetworkChaosFailMajorityNetwork: { + // ethereum.New(defaultOCR2VRFEthereumSettings), + // pluginCfg, + // chaos.NewNetworkPartition, + // &chaos.Props{ + // FromLabels: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + // ToLabels: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + // DurationStr: "1m", + // }, + //}, + //NetworkChaosFailBlockchainNode: { + // ethereum.New(defaultOCR2VRFEthereumSettings), + // pluginCfg, + // chaos.NewNetworkPartition, + // &chaos.Props{ + // FromLabels: &map[string]*string{"app": ptr.Ptr("geth")}, + // ToLabels: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + // DurationStr: "1m", + // }, + //}, + } + + for testCaseName, tc := range testCases { + testCase := tc + t.Run(fmt.Sprintf("OCR2VRF_%s", testCaseName), func(t *testing.T) { + t.Parallel() + testNetwork := networks.MustGetSelectedNetworkConfig(testconfig.Network)[0] // Need a new copy of the network for each test + testEnvironment := environment. + New(&environment.Config{ + NamespacePrefix: fmt.Sprintf( + "chaos-ocr2vrf-%s", strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-"), + ), + Test: t, + }). + AddHelm(testCase.networkChart). + AddHelm(testCase.clChart) + err := testEnvironment.Run() + require.NoError(t, err, "Error running test environment") + if testEnvironment.WillUseRemoteRunner() { + return + } + + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 1, 2, ChaosGroupMinority) + require.NoError(t, err) + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 3, 5, ChaosGroupMajority) + require.NoError(t, err) + + chainClient, err := blockchain.NewEVMClient(testNetwork, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to Plugin nodes") + nodeAddresses, err := actions.PluginNodeAddresses(pluginNodes) + require.NoError(t, err, "Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + + t.Cleanup(func() { + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.PanicLevel, &testconfig, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + chainClient.ParallelTransactions(true) + + linkToken, err := contractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Error deploying PLI token") + + mockETHLinkFeed, err := contractDeployer.DeployMockETHPLIFeed(ocr2vrf_constants.LinkEthFeedResponse) + require.NoError(t, err, "Error deploying Mock ETH/PLI Feed") + + _, _, vrfBeaconContract, consumerContract, subID := ocr2vrf_actions.SetupOCR2VRFUniverse( + t, + linkToken, + mockETHLinkFeed, + contractDeployer, + chainClient, + nodeAddresses, + pluginNodes, + testNetwork, + ) + + //Request and Redeem Randomness to verify that process works fine + requestID := ocr2vrf_actions.RequestAndRedeemRandomness( + t, + consumerContract, + chainClient, + vrfBeaconContract, + ocr2vrf_constants.NumberOfRandomWordsToRequest, + subID, + ocr2vrf_constants.ConfirmationDelay, + ocr2vrf_constants.RandomnessRedeemTransmissionEventTimeout, + ) + + for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { + randomness, err := consumerContract.GetRandomnessByRequestId(testcontext.Get(t), requestID, big.NewInt(int64(i))) + require.NoError(t, err) + l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") + require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") + } + + id, err := testEnvironment.Chaos.Run(testCase.chaosFunc(testEnvironment.Cfg.Namespace, testCase.chaosProps)) + require.NoError(t, err, "Error running Chaos Experiment") + l.Info().Msg("Chaos Applied") + + err = testEnvironment.Chaos.WaitForAllRecovered(id, time.Minute) + require.NoError(t, err, "Error waiting for Chaos Experiment to end") + l.Info().Msg("Chaos Recovered") + + //Request and Redeem Randomness again to see that after Chaos Experiment whole process is still working + requestID = ocr2vrf_actions.RequestAndRedeemRandomness( + t, + consumerContract, + chainClient, + vrfBeaconContract, + ocr2vrf_constants.NumberOfRandomWordsToRequest, + subID, + ocr2vrf_constants.ConfirmationDelay, + ocr2vrf_constants.RandomnessRedeemTransmissionEventTimeout, + ) + + for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { + randomness, err := consumerContract.GetRandomnessByRequestId(testcontext.Get(t), requestID, big.NewInt(int64(i))) + require.NoError(t, err, "Error getting Randomness result from Consumer Contract") + l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") + require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") + } + }) + } +} diff --git a/integration-tests/chaos/ocr_chaos_test.go b/integration-tests/chaos/ocr_chaos_test.go new file mode 100644 index 00000000..558ce3b8 --- /dev/null +++ b/integration-tests/chaos/ocr_chaos_test.go @@ -0,0 +1,222 @@ +package chaos + +import ( + "fmt" + "math/big" + "testing" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/chaos" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver-cfg" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + defaultOCRSettings = map[string]interface{}{ + "replicas": 6, + "db": map[string]interface{}{ + "stateful": true, + "capacity": "1Gi", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + "limits": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + }, + }, + } + chaosStartRound int64 = 1 + chaosEndRound int64 = 4 +) + +func getDefaultOcrSettings(config *tc.TestConfig) map[string]interface{} { + defaultOCRSettings["toml"] = networks.AddNetworksConfig(baseTOML, config.Pyroscope, networks.MustGetSelectedNetworkConfig(config.Network)[0]) + return defaultAutomationSettings +} + +func TestOCRChaos(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Chaos", tc.OCR) + if err != nil { + t.Fatal(err) + } + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(config.PluginImage, target) + ctf_config.MightConfigOverridePyroscopeKey(config.Pyroscope, target) + } + + pluginCfg := plugin.NewWithOverride(0, getDefaultOcrSettings(&config), config.PluginImage, overrideFn) + + testCases := map[string]struct { + networkChart environment.ConnectedChart + clChart environment.ConnectedChart + chaosFunc chaos.ManifestFunc + chaosProps *chaos.Props + }{ + // network-* and pods-* are split intentionally into 2 parallel groups + // we can't use chaos.NewNetworkPartition and chaos.NewFailPods in parallel + // because of jsii runtime bug, see Makefile and please use those targets to run tests + // + // We are using two chaos experiments to simulate pods/network faults, + // check chaos.NewFailPods method (https://chaos-mesh.org/docs/simulate-pod-chaos-on-kubernetes/) + // and chaos.NewNetworkPartition method (https://chaos-mesh.org/docs/simulate-network-chaos-on-kubernetes/) + // in order to regenerate Go bindings if k8s version will be updated + // you can pull new CRD spec from your current cluster and check README here + // https://github.com/goplugin/plugin-testing-framework/k8s/blob/master/README.md + NetworkChaosFailMajorityNetwork: { + ethereum.New(nil), + pluginCfg, + chaos.NewNetworkPartition, + &chaos.Props{ + FromLabels: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + ToLabels: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + NetworkChaosFailBlockchainNode: { + ethereum.New(nil), + pluginCfg, + chaos.NewNetworkPartition, + &chaos.Props{ + FromLabels: &map[string]*string{"app": ptr.Ptr("geth")}, + ToLabels: &map[string]*string{ChaosGroupMajorityPlus: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + PodChaosFailMinorityNodes: { + ethereum.New(nil), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMinority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + PodChaosFailMajorityNodes: { + ethereum.New(nil), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + DurationStr: "1m", + }, + }, + PodChaosFailMajorityDB: { + ethereum.New(nil), + pluginCfg, + chaos.NewFailPods, + &chaos.Props{ + LabelsSelector: &map[string]*string{ChaosGroupMajority: ptr.Ptr("1")}, + DurationStr: "1m", + ContainerNames: &[]*string{ptr.Ptr("plugin-db")}, + }, + }, + } + + for n, tst := range testCases { + name := n + testCase := tst + t.Run(fmt.Sprintf("OCR_%s", name), func(t *testing.T) { + t.Parallel() + + testEnvironment := environment.New(&environment.Config{ + NamespacePrefix: fmt.Sprintf("chaos-ocr-%s", name), + Test: t, + }). + AddHelm(mockservercfg.New(nil)). + AddHelm(mockserver.New(nil)). + AddHelm(testCase.networkChart). + AddHelm(testCase.clChart) + err := testEnvironment.Run() + require.NoError(t, err) + if testEnvironment.WillUseRemoteRunner() { + return + } + + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 1, 2, ChaosGroupMinority) + require.NoError(t, err) + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 3, 5, ChaosGroupMajority) + require.NoError(t, err) + err = testEnvironment.Client.LabelChaosGroup(testEnvironment.Cfg.Namespace, "instance=node-", 2, 5, ChaosGroupMajorityPlus) + require.NoError(t, err) + + chainClient, err := blockchain.NewEVMClient(blockchain.SimulatedEVMNetwork, testEnvironment, l) + require.NoError(t, err, "Connecting to blockchain nodes shouldn't fail") + cd, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Deploying contracts shouldn't fail") + + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Connecting to plugin nodes shouldn't fail") + bootstrapNode, workerNodes := pluginNodes[0], pluginNodes[1:] + t.Cleanup(func() { + if chainClient != nil { + chainClient.GasStats().PrintStats() + } + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.PanicLevel, &config, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + ms, err := ctfClient.ConnectMockServer(testEnvironment) + require.NoError(t, err, "Creating mockserver clients shouldn't fail") + + chainClient.ParallelTransactions(true) + require.NoError(t, err) + + lt, err := cd.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodes(pluginNodes, chainClient, big.NewFloat(10)) + require.NoError(t, err) + + ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, workerNodes, chainClient) + require.NoError(t, err) + err = chainClient.WaitForEvents() + require.NoError(t, err) + err = actions.CreateOCRJobs(ocrInstances, bootstrapNode, workerNodes, 5, ms, chainClient.GetChainID().String()) + require.NoError(t, err) + + chaosApplied := false + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + for _, ocr := range ocrInstances { + err := ocr.RequestNewRound() + require.NoError(t, err, "Error requesting new round") + } + round, err := ocrInstances[0].GetLatestRound(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred()) + l.Info().Int64("RoundID", round.RoundId.Int64()).Msg("Latest OCR Round") + if round.RoundId.Int64() == chaosStartRound && !chaosApplied { + chaosApplied = true + _, err = testEnvironment.Chaos.Run(testCase.chaosFunc(testEnvironment.Cfg.Namespace, testCase.chaosProps)) + require.NoError(t, err) + } + g.Expect(round.RoundId.Int64()).Should(gomega.BeNumerically(">=", chaosEndRound)) + }, "6m", "3s").Should(gomega.Succeed()) + }) + } +} diff --git a/integration-tests/client/plugin.go b/integration-tests/client/plugin.go new file mode 100644 index 00000000..abff8084 --- /dev/null +++ b/integration-tests/client/plugin.go @@ -0,0 +1,1272 @@ +// Package client enables interaction with APIs of test components like the mockserver and Plugin nodes +package client + +import ( + "fmt" + "math/big" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-resty/resty/v2" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" +) + +const ( + // PluginKeyPassword used to encrypt exported keys + PluginKeyPassword string = "twochains" + // NodeURL string for logging + NodeURL string = "Node URL" +) + +var ( + // OnePLI representation of a single PLI token + OnePLI = big.NewFloat(1e18) + mapKeyTypeToChain = map[string]string{ + "evm": "eTHKeys", + "solana": "encryptedSolanaKeys", + "starknet": "encryptedStarkNetKeys", + } +) + +type PluginClient struct { + APIClient *resty.Client + Config *PluginConfig + pageSize int + primaryEthAddress string + ethAddresses []string + l zerolog.Logger +} + +// NewPluginClient creates a new Plugin model using a provided config +func NewPluginClient(c *PluginConfig, logger zerolog.Logger) (*PluginClient, error) { + rc, err := initRestyClient(c.URL, c.Email, c.Password, c.HTTPTimeout) + if err != nil { + return nil, err + } + _, isSet := os.LookupEnv("CL_CLIENT_DEBUG") + if isSet { + rc.SetDebug(true) + } + return &PluginClient{ + Config: c, + APIClient: rc, + pageSize: 25, + l: logger, + }, nil +} + +func initRestyClient(url string, email string, password string, timeout *time.Duration) (*resty.Client, error) { + rc := resty.New().SetBaseURL(url) + if timeout != nil { + rc.SetTimeout(*timeout) + } + session := &Session{Email: email, Password: password} + // Retry the connection on boot up, sometimes pods can still be starting up and not ready to accept connections + var resp *resty.Response + var err error + retryCount := 20 + for i := 0; i < retryCount; i++ { + resp, err = rc.R().SetBody(session).Post("/sessions") + if err != nil { + log.Debug().Err(err).Str("URL", url).Interface("Session Details", session).Msg("Error connecting to Plugin node, retrying") + time.Sleep(5 * time.Second) + } else { + break + } + } + if err != nil { + return nil, fmt.Errorf("error connecting to plugin node after %d attempts: %w", retryCount, err) + } + rc.SetCookies(resp.Cookies()) + return rc, nil +} + +// URL Plugin instance http url +func (c *PluginClient) URL() string { + return c.Config.URL +} + +// CreateJobRaw creates a Plugin job based on the provided spec string +func (c *PluginClient) CreateJobRaw(spec string) (*Job, *http.Response, error) { + job := &Job{} + c.l.Info().Str("Node URL", c.Config.URL).Msg("Creating Job") + c.l.Trace().Str("Node URL", c.Config.URL).Str("Job Body", spec).Msg("Creating Job") + resp, err := c.APIClient.R(). + SetBody(&JobForm{ + TOML: spec, + }). + SetResult(&job). + Post("/v2/jobs") + if err != nil { + return nil, nil, err + } + return job, resp.RawResponse, err +} + +// MustCreateJob creates a Plugin job based on the provided spec struct and returns error if +// the request is unsuccessful +func (c *PluginClient) MustCreateJob(spec JobSpec) (*Job, error) { + job, resp, err := c.CreateJob(spec) + if err != nil { + return nil, err + } + return job, VerifyStatusCodeWithResponse(resp, http.StatusOK) +} + +// CreateJob creates a Plugin job based on the provided spec struct +func (c *PluginClient) CreateJob(spec JobSpec) (*Job, *resty.Response, error) { + job := &Job{} + specString, err := spec.String() + if err != nil { + return nil, nil, err + } + c.l.Info().Str("Node URL", c.Config.URL).Str("Type", spec.Type()).Msg("Creating Job") + c.l.Trace().Str("Node URL", c.Config.URL).Str("Type", spec.Type()).Str("Spec", specString).Msg("Creating Job") + resp, err := c.APIClient.R(). + SetBody(&JobForm{ + TOML: specString, + }). + SetResult(&job). + Post("/v2/jobs") + if err != nil { + return nil, nil, err + } + return job, resp, err +} + +// ReadJobs reads all jobs from the Plugin node +func (c *PluginClient) ReadJobs() (*ResponseSlice, *http.Response, error) { + specObj := &ResponseSlice{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Getting Jobs") + resp, err := c.APIClient.R(). + SetResult(&specObj). + Get("/v2/jobs") + if err != nil { + return nil, nil, err + } + return specObj, resp.RawResponse, err +} + +// ReadJob reads a job with the provided ID from the Plugin node +func (c *PluginClient) ReadJob(id string) (*Response, *http.Response, error) { + specObj := &Response{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Reading Job") + resp, err := c.APIClient.R(). + SetResult(&specObj). + SetPathParams(map[string]string{ + "id": id, + }). + Get("/v2/jobs/{id}") + if err != nil { + return nil, nil, err + } + return specObj, resp.RawResponse, err +} + +// MustDeleteJob deletes a job with a provided ID from the Plugin node and returns error if +// the request is unsuccessful +func (c *PluginClient) MustDeleteJob(id string) error { + resp, err := c.DeleteJob(id) + if err != nil { + return err + } + return VerifyStatusCode(resp.StatusCode, http.StatusNoContent) +} + +// DeleteJob deletes a job with a provided ID from the Plugin node +func (c *PluginClient) DeleteJob(id string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Deleting Job") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "id": id, + }). + Delete("/v2/jobs/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// CreateSpec creates a job spec on the Plugin node +func (c *PluginClient) CreateSpec(spec string) (*Spec, *http.Response, error) { + s := &Spec{} + r := strings.NewReplacer("\n", "", " ", "", "\\", "") // Makes it more compact and readable for logging + c.l.Info().Str(NodeURL, c.Config.URL).Str("Spec", r.Replace(spec)).Msg("Creating Spec") + resp, err := c.APIClient.R(). + SetBody([]byte(spec)). + SetResult(&s). + Post("/v2/specs") + if err != nil { + return nil, nil, err + } + return s, resp.RawResponse, err +} + +// ReadSpec reads a job spec with the provided ID on the Plugin node +func (c *PluginClient) ReadSpec(id string) (*Response, *http.Response, error) { + specObj := &Response{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Reading Spec") + resp, err := c.APIClient.R(). + SetResult(&specObj). + SetPathParams(map[string]string{ + "id": id, + }). + Get("/v2/specs/{id}") + if err != nil { + return nil, nil, err + } + return specObj, resp.RawResponse, err +} + +// MustReadRunsByJob attempts to read all runs for a job and returns error if +// the request is unsuccessful +func (c *PluginClient) MustReadRunsByJob(jobID string) (*JobRunsResponse, error) { + runsObj, resp, err := c.ReadRunsByJob(jobID) + if err != nil { + return nil, err + } + return runsObj, VerifyStatusCode(resp.StatusCode, http.StatusOK) +} + +// ReadRunsByJob reads all runs for a job +func (c *PluginClient) ReadRunsByJob(jobID string) (*JobRunsResponse, *http.Response, error) { + runsObj := &JobRunsResponse{} + c.l.Debug().Str(NodeURL, c.Config.URL).Str("JobID", jobID).Msg("Reading runs for a job") + resp, err := c.APIClient.R(). + SetResult(&runsObj). + SetPathParams(map[string]string{ + "jobID": jobID, + }). + Get("/v2/jobs/{jobID}/runs") + if err != nil { + return nil, nil, err + } + return runsObj, resp.RawResponse, err +} + +// DeleteSpec deletes a job spec with the provided ID from the Plugin node +func (c *PluginClient) DeleteSpec(id string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Deleting Spec") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "id": id, + }). + Delete("/v2/specs/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// MustCreateBridge creates a bridge on the Plugin node based on the provided attributes and returns error if +// the request is unsuccessful +func (c *PluginClient) MustCreateBridge(bta *BridgeTypeAttributes) error { + c.l.Debug().Str(NodeURL, c.Config.URL).Str("Name", bta.Name).Msg("Creating Bridge") + resp, err := c.CreateBridge(bta) + if err != nil { + return err + } + return VerifyStatusCode(resp.StatusCode, http.StatusOK) +} + +func (c *PluginClient) CreateBridge(bta *BridgeTypeAttributes) (*http.Response, error) { + c.l.Debug().Str(NodeURL, c.Config.URL).Str("Name", bta.Name).Msg("Creating Bridge") + resp, err := c.APIClient.R(). + SetBody(bta). + Post("/v2/bridge_types") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// ReadBridge reads a bridge from the Plugin node based on the provided name +func (c *PluginClient) ReadBridge(name string) (*BridgeType, *http.Response, error) { + bt := BridgeType{} + c.l.Debug().Str(NodeURL, c.Config.URL).Str("Name", name).Msg("Reading Bridge") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "name": name, + }). + SetResult(&bt). + Get("/v2/bridge_types/{name}") + if err != nil { + return nil, nil, err + } + return &bt, resp.RawResponse, err +} + +// ReadBridges reads bridges from the Plugin node +func (c *PluginClient) ReadBridges() (*Bridges, *resty.Response, error) { + result := &Bridges{} + c.l.Debug().Str(NodeURL, c.Config.URL).Msg("Getting all bridges") + resp, err := c.APIClient.R(). + SetResult(&result). + Get("/v2/bridge_types") + if err != nil { + return nil, nil, err + } + return result, resp, err +} + +// DeleteBridge deletes a bridge on the Plugin node based on the provided name +func (c *PluginClient) DeleteBridge(name string) (*http.Response, error) { + c.l.Debug().Str(NodeURL, c.Config.URL).Str("Name", name).Msg("Deleting Bridge") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "name": name, + }). + Delete("/v2/bridge_types/{name}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// CreateOCRKey creates an OCRKey on the Plugin node +func (c *PluginClient) CreateOCRKey() (*OCRKey, *http.Response, error) { + ocrKey := &OCRKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating OCR Key") + resp, err := c.APIClient.R(). + SetResult(ocrKey). + Post("/v2/keys/ocr") + if err != nil { + return nil, nil, err + } + return ocrKey, resp.RawResponse, err +} + +// MustReadOCRKeys reads all OCRKeys from the Plugin node and returns error if +// the request is unsuccessful +func (c *PluginClient) MustReadOCRKeys() (*OCRKeys, error) { + ocrKeys := &OCRKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading OCR Keys") + resp, err := c.APIClient.R(). + SetResult(ocrKeys). + Get("/v2/keys/ocr") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + for index := range ocrKeys.Data { + ocrKeys.Data[index].Attributes.ConfigPublicKey = strings.TrimPrefix( + ocrKeys.Data[index].Attributes.ConfigPublicKey, "ocrcfg_") + ocrKeys.Data[index].Attributes.OffChainPublicKey = strings.TrimPrefix( + ocrKeys.Data[index].Attributes.OffChainPublicKey, "ocroff_") + ocrKeys.Data[index].Attributes.OnChainSigningAddress = strings.TrimPrefix( + ocrKeys.Data[index].Attributes.OnChainSigningAddress, "ocrsad_") + } + return ocrKeys, err +} + +// DeleteOCRKey deletes an OCRKey based on the provided ID +func (c *PluginClient) DeleteOCRKey(id string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Deleting OCR Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "id": id, + }). + Delete("/v2/keys/ocr/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// CreateOCR2Key creates an OCR2Key on the Plugin node +func (c *PluginClient) CreateOCR2Key(chain string) (*OCR2Key, *http.Response, error) { + ocr2Key := &OCR2Key{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating OCR2 Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "chain": chain, + }). + SetResult(ocr2Key). + Post("/v2/keys/ocr2/{chain}") + if err != nil { + return nil, nil, err + } + return ocr2Key, resp.RawResponse, err +} + +// ReadOCR2Keys reads all OCR2Keys from the Plugin node +func (c *PluginClient) ReadOCR2Keys() (*OCR2Keys, *http.Response, error) { + ocr2Keys := &OCR2Keys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading OCR2 Keys") + resp, err := c.APIClient.R(). + SetResult(ocr2Keys). + Get("/v2/keys/ocr2") + return ocr2Keys, resp.RawResponse, err +} + +// MustReadOCR2Keys reads all OCR2Keys from the Plugin node returns err if response not 200 +func (c *PluginClient) MustReadOCR2Keys() (*OCR2Keys, error) { + ocr2Keys := &OCR2Keys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading OCR2 Keys") + resp, err := c.APIClient.R(). + SetResult(ocr2Keys). + Get("/v2/keys/ocr2") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + return ocr2Keys, err +} + +// DeleteOCR2Key deletes an OCR2Key based on the provided ID +func (c *PluginClient) DeleteOCR2Key(id string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Deleting OCR2 Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "id": id, + }). + Delete("/v2/keys/ocr2/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// CreateP2PKey creates an P2PKey on the Plugin node +func (c *PluginClient) CreateP2PKey() (*P2PKey, *http.Response, error) { + p2pKey := &P2PKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating P2P Key") + resp, err := c.APIClient.R(). + SetResult(p2pKey). + Post("/v2/keys/p2p") + if err != nil { + return nil, nil, err + } + return p2pKey, resp.RawResponse, err +} + +// MustReadP2PKeys reads all P2PKeys from the Plugin node and returns error if +// the request is unsuccessful +func (c *PluginClient) MustReadP2PKeys() (*P2PKeys, error) { + p2pKeys := &P2PKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading P2P Keys") + resp, err := c.APIClient.R(). + SetResult(p2pKeys). + Get("/v2/keys/p2p") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + if len(p2pKeys.Data) == 0 { + err = fmt.Errorf("Found no P2P Keys on the Plugin node. Node URL: %s", c.Config.URL) + c.l.Err(err).Msg("Error getting P2P keys") + return nil, err + } + for index := range p2pKeys.Data { + p2pKeys.Data[index].Attributes.PeerID = strings.TrimPrefix(p2pKeys.Data[index].Attributes.PeerID, "p2p_") + } + return p2pKeys, err +} + +// DeleteP2PKey deletes a P2PKey on the Plugin node based on the provided ID +func (c *PluginClient) DeleteP2PKey(id int) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Int("ID", id).Msg("Deleting P2P Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "id": fmt.Sprint(id), + }). + Delete("/v2/keys/p2p/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// MustReadETHKeys reads all ETH keys from the Plugin node and returns error if +// the request is unsuccessful +func (c *PluginClient) MustReadETHKeys() (*ETHKeys, error) { + ethKeys := ÐKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading ETH Keys") + resp, err := c.APIClient.R(). + SetResult(ethKeys). + Get("/v2/keys/eth") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + if len(ethKeys.Data) == 0 { + c.l.Warn().Str(NodeURL, c.Config.URL).Msg("Found no ETH Keys on the node") + } + return ethKeys, err +} + +// UpdateEthKeyMaxGasPriceGWei updates the maxGasPriceGWei for an eth key +func (c *PluginClient) UpdateEthKeyMaxGasPriceGWei(keyId string, gWei int) (*ETHKey, *http.Response, error) { + ethKey := ÐKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", keyId).Int("maxGasPriceGWei", gWei).Msg("Update maxGasPriceGWei for eth key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "keyId": keyId, + }). + SetQueryParams(map[string]string{ + "maxGasPriceGWei": fmt.Sprint(gWei), + }). + SetResult(ethKey). + Put("/v2/keys/eth/{keyId}") + if err != nil { + return nil, nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + if err != nil { + return nil, nil, err + } + return ethKey, resp.RawResponse, err +} + +// ReadPrimaryETHKey reads updated information about the Plugin's primary ETH key +func (c *PluginClient) ReadPrimaryETHKey() (*ETHKeyData, error) { + ethKeys, err := c.MustReadETHKeys() + if err != nil { + return nil, err + } + if len(ethKeys.Data) == 0 { + return nil, fmt.Errorf("Error retrieving primary eth key on node %s: No ETH keys present", c.URL()) + } + return ðKeys.Data[0], nil +} + +// ReadETHKeyAtIndex reads updated information about the Plugin's ETH key at given index +func (c *PluginClient) ReadETHKeyAtIndex(keyIndex int) (*ETHKeyData, error) { + ethKeys, err := c.MustReadETHKeys() + if err != nil { + return nil, err + } + if len(ethKeys.Data) == 0 { + return nil, fmt.Errorf("Error retrieving primary eth key on node %s: No ETH keys present", c.URL()) + } + return ðKeys.Data[keyIndex], nil +} + +// PrimaryEthAddress returns the primary ETH address for the Plugin node +func (c *PluginClient) PrimaryEthAddress() (string, error) { + if c.primaryEthAddress == "" { + ethKeys, err := c.MustReadETHKeys() + if err != nil { + return "", err + } + c.primaryEthAddress = ethKeys.Data[0].Attributes.Address + } + return c.primaryEthAddress, nil +} + +// EthAddresses returns the ETH addresses for the Plugin node +func (c *PluginClient) EthAddresses() ([]string, error) { + if len(c.ethAddresses) == 0 { + ethKeys, err := c.MustReadETHKeys() + c.ethAddresses = make([]string, len(ethKeys.Data)) + if err != nil { + return make([]string, 0), err + } + for index, data := range ethKeys.Data { + c.ethAddresses[index] = data.Attributes.Address + } + } + return c.ethAddresses, nil +} + +// EthAddresses returns the ETH addresses of the Plugin node for a specific chain id +func (c *PluginClient) EthAddressesForChain(chainId string) ([]string, error) { + var ethAddresses []string + ethKeys, err := c.MustReadETHKeys() + if err != nil { + return nil, err + } + for _, ethKey := range ethKeys.Data { + if ethKey.Attributes.ChainID == chainId { + ethAddresses = append(ethAddresses, ethKey.Attributes.Address) + } + } + return ethAddresses, nil +} + +// PrimaryEthAddressForChain returns the primary ETH address for the Plugin node for mentioned chain +func (c *PluginClient) PrimaryEthAddressForChain(chainId string) (string, error) { + ethKeys, err := c.MustReadETHKeys() + if err != nil { + return "", err + } + for _, ethKey := range ethKeys.Data { + if ethKey.Attributes.ChainID == chainId { + return ethKey.Attributes.Address, nil + } + } + return "", nil +} + +// ExportEVMKeys exports Plugin private EVM keys +func (c *PluginClient) ExportEVMKeys() ([]*ExportedEVMKey, error) { + exportedKeys := make([]*ExportedEVMKey, 0) + keys, err := c.MustReadETHKeys() + if err != nil { + return nil, err + } + for _, key := range keys.Data { + if key.Attributes.ETHBalance != "0" { + exportedKey := &ExportedEVMKey{} + _, err := c.APIClient.R(). + SetResult(exportedKey). + SetPathParam("keyAddress", key.Attributes.Address). + SetQueryParam("newpassword", PluginKeyPassword). + Post("/v2/keys/eth/export/{keyAddress}") + if err != nil { + return nil, err + } + exportedKeys = append(exportedKeys, exportedKey) + } + } + c.l.Info(). + Str(NodeURL, c.Config.URL). + Str("Password", PluginKeyPassword). + Msg("Exported EVM Keys") + return exportedKeys, nil +} + +// ExportEVMKeysForChain exports Plugin private EVM keys for a particular chain +func (c *PluginClient) ExportEVMKeysForChain(chainid string) ([]*ExportedEVMKey, error) { + exportedKeys := make([]*ExportedEVMKey, 0) + keys, err := c.MustReadETHKeys() + if err != nil { + return nil, err + } + for _, key := range keys.Data { + if key.Attributes.ETHBalance != "0" && key.Attributes.ChainID == chainid { + exportedKey := &ExportedEVMKey{} + _, err := c.APIClient.R(). + SetResult(exportedKey). + SetPathParam("keyAddress", key.Attributes.Address). + SetQueryParam("newpassword", PluginKeyPassword). + Post("/v2/keys/eth/export/{keyAddress}") + if err != nil { + return nil, err + } + exportedKeys = append(exportedKeys, exportedKey) + } + } + c.l.Info(). + Str(NodeURL, c.Config.URL). + Str("Password", PluginKeyPassword). + Msg("Exported EVM Keys") + return exportedKeys, nil +} + +// CreateTxKey creates a tx key on the Plugin node +func (c *PluginClient) CreateTxKey(chain string, chainId string) (*TxKey, *http.Response, error) { + txKey := &TxKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating Tx Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "chain": chain, + }). + SetQueryParam("evmChainID", chainId). + SetResult(txKey). + Post("/v2/keys/{chain}") + if err != nil { + return nil, nil, err + } + return txKey, resp.RawResponse, err +} + +// ReadTxKeys reads all tx keys from the Plugin node +func (c *PluginClient) ReadTxKeys(chain string) (*TxKeys, *http.Response, error) { + txKeys := &TxKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading Tx Keys") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "chain": chain, + }). + SetResult(txKeys). + Get("/v2/keys/{chain}") + if err != nil { + return nil, nil, err + } + return txKeys, resp.RawResponse, err +} + +// DeleteTxKey deletes an tx key based on the provided ID +func (c *PluginClient) DeleteTxKey(chain string, id string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", id).Msg("Deleting Tx Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "chain": chain, + "id": id, + }). + Delete("/v2/keys/{chain}/{id}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// MustReadTransactionAttempts reads all transaction attempts on the Plugin node +// and returns error if the request is unsuccessful +func (c *PluginClient) MustReadTransactionAttempts() (*TransactionsData, error) { + txsData := &TransactionsData{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading Transaction Attempts") + resp, err := c.APIClient.R(). + SetResult(txsData). + Get("/v2/tx_attempts") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + return txsData, err +} + +// ReadTransactions reads all transactions made by the Plugin node +func (c *PluginClient) ReadTransactions() (*TransactionsData, *http.Response, error) { + txsData := &TransactionsData{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading Transactions") + resp, err := c.APIClient.R(). + SetResult(txsData). + Get("/v2/transactions") + if err != nil { + return nil, nil, err + } + return txsData, resp.RawResponse, err +} + +// MustSendNativeToken sends native token (ETH usually) of a specified amount from one of its addresses to the target address +// and returns error if the request is unsuccessful +// WARNING: The txdata object that Plugin sends back is almost always blank. +func (c *PluginClient) MustSendNativeToken(amount *big.Int, fromAddress, toAddress string) (TransactionData, error) { + request := SendEtherRequest{ + DestinationAddress: toAddress, + FromAddress: fromAddress, + Amount: amount.String(), + AllowHigherAmounts: true, + } + txData := SingleTransactionDataWrapper{} + resp, err := c.APIClient.R(). + SetBody(request). + SetResult(txData). + Post("/v2/transfers") + + c.l.Info(). + Str(NodeURL, c.Config.URL). + Str("From", fromAddress). + Str("To", toAddress). + Str("Amount", amount.String()). + Msg("Sending Native Token") + if err == nil { + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + } + + return txData.Data, err +} + +// ReadVRFKeys reads all VRF keys from the Plugin node +func (c *PluginClient) ReadVRFKeys() (*VRFKeys, *http.Response, error) { + vrfKeys := &VRFKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading VRF Keys") + resp, err := c.APIClient.R(). + SetResult(vrfKeys). + Get("/v2/keys/vrf") + if err != nil { + return nil, nil, err + } + if len(vrfKeys.Data) == 0 { + c.l.Warn().Str(NodeURL, c.Config.URL).Msg("Found no VRF Keys on the node") + } + return vrfKeys, resp.RawResponse, err +} + +// MustCreateVRFKey creates a VRF key on the Plugin node +// and returns error if the request is unsuccessful +func (c *PluginClient) MustCreateVRFKey() (*VRFKey, error) { + vrfKey := &VRFKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating VRF Key") + resp, err := c.APIClient.R(). + SetResult(vrfKey). + Post("/v2/keys/vrf") + if err == nil { + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + } + return vrfKey, err +} + +// ExportVRFKey exports a vrf key by key id +func (c *PluginClient) ExportVRFKey(keyId string) (*VRFExportKey, *http.Response, error) { + vrfExportKey := &VRFExportKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", keyId).Msg("Exporting VRF Key") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "keyId": keyId, + }). + SetResult(vrfExportKey). + Post("/v2/keys/vrf/export/{keyId}") + if err != nil { + return nil, nil, err + } + return vrfExportKey, resp.RawResponse, err +} + +// ImportVRFKey import vrf key +func (c *PluginClient) ImportVRFKey(vrfExportKey *VRFExportKey) (*VRFKey, *http.Response, error) { + vrfKey := &VRFKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("ID", vrfExportKey.VrfKey.Address).Msg("Importing VRF Key") + resp, err := c.APIClient.R(). + SetBody(vrfExportKey). + SetResult(vrfKey). + Post("/v2/keys/vrf/import") + if err != nil { + return nil, nil, err + } + return vrfKey, resp.RawResponse, err +} + +// MustCreateDkgSignKey creates a DKG Sign key on the Plugin node +// and returns error if the request is unsuccessful +func (c *PluginClient) MustCreateDkgSignKey() (*DKGSignKey, error) { + dkgSignKey := &DKGSignKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating DKG Sign Key") + resp, err := c.APIClient.R(). + SetResult(dkgSignKey). + Post("/v2/keys/dkgsign") + if err == nil { + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + } + return dkgSignKey, err +} + +// MustCreateDkgEncryptKey creates a DKG Encrypt key on the Plugin node +// and returns error if the request is unsuccessful +func (c *PluginClient) MustCreateDkgEncryptKey() (*DKGEncryptKey, error) { + dkgEncryptKey := &DKGEncryptKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating DKG Encrypt Key") + resp, err := c.APIClient.R(). + SetResult(dkgEncryptKey). + Post("/v2/keys/dkgencrypt") + if err == nil { + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + } + return dkgEncryptKey, err +} + +// MustReadDKGSignKeys reads all DKG Sign Keys from the Plugin node returns err if response not 200 +func (c *PluginClient) MustReadDKGSignKeys() (*DKGSignKeys, error) { + dkgSignKeys := &DKGSignKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading DKG Sign Keys") + resp, err := c.APIClient.R(). + SetResult(dkgSignKeys). + Get("/v2/keys/dkgsign") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + return dkgSignKeys, err +} + +// MustReadDKGEncryptKeys reads all DKG Encrypt Keys from the Plugin node returns err if response not 200 +func (c *PluginClient) MustReadDKGEncryptKeys() (*DKGEncryptKeys, error) { + dkgEncryptKeys := &DKGEncryptKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading DKG Encrypt Keys") + resp, err := c.APIClient.R(). + SetResult(dkgEncryptKeys). + Get("/v2/keys/dkgencrypt") + if err != nil { + return nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + return dkgEncryptKeys, err +} + +// CreateCSAKey creates a CSA key on the Plugin node, only 1 CSA key per noe +func (c *PluginClient) CreateCSAKey() (*CSAKey, *http.Response, error) { + csaKey := &CSAKey{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Creating CSA Key") + resp, err := c.APIClient.R(). + SetResult(csaKey). + Post("/v2/keys/csa") + if err != nil { + return nil, nil, err + } + return csaKey, resp.RawResponse, err +} + +func (c *PluginClient) MustReadCSAKeys() (*CSAKeys, *resty.Response, error) { + csaKeys, res, err := c.ReadCSAKeys() + if err != nil { + return nil, res, err + } + return csaKeys, res, VerifyStatusCodeWithResponse(res, http.StatusOK) +} + +// ReadCSAKeys reads CSA keys from the Plugin node +func (c *PluginClient) ReadCSAKeys() (*CSAKeys, *resty.Response, error) { + csaKeys := &CSAKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading CSA Keys") + resp, err := c.APIClient.R(). + SetResult(csaKeys). + Get("/v2/keys/csa") + if len(csaKeys.Data) == 0 { + c.l.Warn().Str(NodeURL, c.Config.URL).Msg("Found no CSA Keys on the node") + } + if err != nil { + return nil, nil, err + } + return csaKeys, resp, err +} + +// CreateEI creates an EI on the Plugin node based on the provided attributes and returns the respective secrets +func (c *PluginClient) CreateEI(eia *EIAttributes) (*EIKeyCreate, *http.Response, error) { + ei := EIKeyCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", eia.Name).Msg("Creating External Initiator") + resp, err := c.APIClient.R(). + SetBody(eia). + SetResult(&ei). + Post("/v2/external_initiators") + if err != nil { + return nil, nil, err + } + return &ei, resp.RawResponse, err +} + +// ReadEIs reads all of the configured EIs from the Plugin node +func (c *PluginClient) ReadEIs() (*EIKeys, *http.Response, error) { + ei := EIKeys{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading EI Keys") + resp, err := c.APIClient.R(). + SetResult(&ei). + Get("/v2/external_initiators") + if err != nil { + return nil, nil, err + } + return &ei, resp.RawResponse, err +} + +// DeleteEI deletes an external initiator in the Plugin node based on the provided name +func (c *PluginClient) DeleteEI(name string) (*http.Response, error) { + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", name).Msg("Deleting EI") + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "name": name, + }). + Delete("/v2/external_initiators/{name}") + if err != nil { + return nil, err + } + return resp.RawResponse, err +} + +// CreateCosmosChain creates a cosmos chain +func (c *PluginClient) CreateCosmosChain(chain *CosmosChainAttributes) (*CosmosChainCreate, *http.Response, error) { + response := CosmosChainCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Chain ID", chain.ChainID).Msg("Creating Cosmos Chain") + resp, err := c.APIClient.R(). + SetBody(chain). + SetResult(&response). + Post("/v2/chains/cosmos") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateCosmosNode creates a cosmos node +func (c *PluginClient) CreateCosmosNode(node *CosmosNodeAttributes) (*CosmosNodeCreate, *http.Response, error) { + response := CosmosNodeCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", node.Name).Msg("Creating Cosmos Node") + resp, err := c.APIClient.R(). + SetBody(node). + SetResult(&response). + Post("/v2/nodes/cosmos") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateSolanaChain creates a solana chain +func (c *PluginClient) CreateSolanaChain(chain *SolanaChainAttributes) (*SolanaChainCreate, *http.Response, error) { + response := SolanaChainCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Chain ID", chain.ChainID).Msg("Creating Solana Chain") + resp, err := c.APIClient.R(). + SetBody(chain). + SetResult(&response). + Post("/v2/chains/solana") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateSolanaNode creates a solana node +func (c *PluginClient) CreateSolanaNode(node *SolanaNodeAttributes) (*SolanaNodeCreate, *http.Response, error) { + response := SolanaNodeCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", node.Name).Msg("Creating Solana Node") + resp, err := c.APIClient.R(). + SetBody(node). + SetResult(&response). + Post("/v2/nodes/solana") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateStarkNetChain creates a starknet chain +func (c *PluginClient) CreateStarkNetChain(chain *StarkNetChainAttributes) (*StarkNetChainCreate, *http.Response, error) { + response := StarkNetChainCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Chain ID", chain.ChainID).Msg("Creating StarkNet Chain") + resp, err := c.APIClient.R(). + SetBody(chain). + SetResult(&response). + Post("/v2/chains/starknet") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// CreateStarkNetNode creates a starknet node +func (c *PluginClient) CreateStarkNetNode(node *StarkNetNodeAttributes) (*StarkNetNodeCreate, *http.Response, error) { + response := StarkNetNodeCreate{} + c.l.Info().Str(NodeURL, c.Config.URL).Str("Name", node.Name).Msg("Creating StarkNet Node") + resp, err := c.APIClient.R(). + SetBody(node). + SetResult(&response). + Post("/v2/nodes/starknet") + if err != nil { + return nil, nil, err + } + return &response, resp.RawResponse, err +} + +// InternalIP retrieves the inter-cluster IP of the Plugin node, for use with inter-node communications +func (c *PluginClient) InternalIP() string { + return c.Config.InternalIP +} + +// Profile starts a profile session on the Plugin node for a pre-determined length, then runs the provided function +// to profile it. +func (c *PluginClient) Profile(profileTime time.Duration, profileFunction func(*PluginClient)) (*PluginProfileResults, error) { + profileSeconds := int(profileTime.Seconds()) + profileResults := NewBlankPluginProfileResults() + profileErrorGroup := new(errgroup.Group) + var profileExecutedGroup sync.WaitGroup + c.l.Info().Int("Seconds to Profile", profileSeconds).Str(NodeURL, c.Config.URL).Msg("Starting Node PPROF session") + for _, rep := range profileResults.Reports { + profileExecutedGroup.Add(1) + profileReport := rep + // The profile function returns with the profile results after the profile time frame has concluded + // e.g. a profile API call of 5 seconds will start profiling, wait for 5 seconds, then send back results + profileErrorGroup.Go(func() error { + c.l.Debug().Str("Type", profileReport.Type).Msg("PROFILING") + profileExecutedGroup.Done() + resp, err := c.APIClient.R(). + SetPathParams(map[string]string{ + "reportType": profileReport.Type, + }). + SetQueryParams(map[string]string{ + "seconds": fmt.Sprint(profileSeconds), + }). + Get("/v2/debug/pprof/{reportType}") + if err != nil { + return err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + if err != nil { + return err + } + c.l.Debug().Str("Type", profileReport.Type).Msg("DONE PROFILING") + profileReport.Data = resp.Body() + return err + }) + } + // Wait for the profiling to actually get triggered on the node before running the function to profile + // An imperfect solution, but an effective one. + profileExecutedGroup.Wait() + + funcStart := time.Now() + // Feed this Plugin node into the profiling function + profileFunction(c) + actualRunTime := time.Since(funcStart) + actualSeconds := int(actualRunTime.Seconds()) + + if actualSeconds > profileSeconds { + c.l.Warn(). + Int("Actual Seconds", actualSeconds). + Int("Profile Seconds", profileSeconds). + Msg("Your profile function took longer than expected to run, increase profileTime") + } else if actualSeconds < profileSeconds && actualSeconds > 0 { + c.l.Warn(). + Int("Actual Seconds", actualSeconds). + Int("Profile Seconds", profileSeconds). + Msg("Your profile function took shorter than expected to run, you can decrease profileTime") + } + profileResults.ActualRunSeconds = actualSeconds + profileResults.ScheduledProfileSeconds = profileSeconds + return profileResults, profileErrorGroup.Wait() // Wait for all the results of the profiled function to come in +} + +// SetPageSize globally sets the page +func (c *PluginClient) SetPageSize(size int) { + c.pageSize = size +} + +// VerifyStatusCode verifies the status code of the response. Favor VerifyStatusCodeWithResponse over this for better errors +func VerifyStatusCode(actStatusCd, expStatusCd int) error { + if actStatusCd != expStatusCd { + return fmt.Errorf( + "unexpected response code, got %d, expected %d", + actStatusCd, + expStatusCd, + ) + } + return nil +} + +// VerifyStatusCodeWithResponse verifies the status code of the response and returns the response as part of the error. +// Favor this over VerifyStatusCode +func VerifyStatusCodeWithResponse(res *resty.Response, expStatusCd int) error { + actStatusCd := res.RawResponse.StatusCode + if actStatusCd != expStatusCd { + return fmt.Errorf( + "unexpected response code, got %d, expected %d, response: %s", + actStatusCd, + expStatusCd, + res.Body(), + ) + } + return nil +} + +func CreateNodeKeysBundle(nodes []*PluginClient, chainName string, chainId string) ([]NodeKeysBundle, []*CLNodesWithKeys, error) { + nkb := make([]NodeKeysBundle, 0) + var clNodes []*CLNodesWithKeys + for _, n := range nodes { + p2pkeys, err := n.MustReadP2PKeys() + if err != nil { + return nil, nil, err + } + + peerID := p2pkeys.Data[0].Attributes.PeerID + // If there is already a txkey present for the chain skip creating a new one + // otherwise the test logic will need multiple key management (like funding all the keys, + // for ocr scenarios adding all keys to ocr config) + var txKey *TxKey + txKeys, _, err := n.ReadTxKeys(chainName) + if err != nil { + return nil, nil, err + } + if _, ok := mapKeyTypeToChain[chainName]; ok { + for _, key := range txKeys.Data { + if key.Type == mapKeyTypeToChain[chainName] { + txKey = &TxKey{Data: key} + break + } + } + } + // if no txkey is found for the chain, create a new one + if txKey == nil { + txKey, _, err = n.CreateTxKey(chainName, chainId) + if err != nil { + return nil, nil, err + } + } + keys, _, err := n.ReadOCR2Keys() + if err != nil { + return nil, nil, err + } + var ocrKey *OCR2Key + for _, key := range keys.Data { + if key.Attributes.ChainType == chainName { + ocrKey = &OCR2Key{Data: key} + break + } + } + + if ocrKey == nil { + return nil, nil, fmt.Errorf("no OCR key found for chain %s", chainName) + } + ethAddress, err := n.PrimaryEthAddressForChain(chainId) + if err != nil { + return nil, nil, err + } + bundle := NodeKeysBundle{ + PeerID: peerID, + OCR2Key: *ocrKey, + TXKey: *txKey, + P2PKeys: *p2pkeys, + EthAddress: ethAddress, + } + nkb = append(nkb, bundle) + clNodes = append(clNodes, &CLNodesWithKeys{Node: n, KeysBundle: bundle}) + } + + return nkb, clNodes, nil +} + +// TrackForwarder track forwarder address in db. +func (c *PluginClient) TrackForwarder(chainID *big.Int, address common.Address) (*Forwarder, *http.Response, error) { + response := &Forwarder{} + request := ForwarderAttributes{ + ChainID: chainID.String(), + Address: address.Hex(), + } + c.l.Debug().Str(NodeURL, c.Config.URL). + Str("Forwarder address", (address).Hex()). + Str("Chain ID", chainID.String()). + Msg("Track forwarder") + resp, err := c.APIClient.R(). + SetBody(request). + SetResult(response). + Post("/v2/nodes/evm/forwarders/track") + if err != nil { + return nil, nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusCreated) + if err != nil { + return nil, nil, err + } + + return response, resp.RawResponse, err +} + +// GetForwarders get list of tracked forwarders +func (c *PluginClient) GetForwarders() (*Forwarders, *http.Response, error) { + response := &Forwarders{} + c.l.Info().Str(NodeURL, c.Config.URL).Msg("Reading Tracked Forwarders") + resp, err := c.APIClient.R(). + SetResult(response). + Get("/v2/nodes/evm/forwarders") + if err != nil { + return nil, nil, err + } + err = VerifyStatusCode(resp.StatusCode(), http.StatusOK) + if err != nil { + return nil, nil, err + } + return response, resp.RawResponse, err +} + +// Replays log poller from block number +func (c *PluginClient) ReplayLogPollerFromBlock(fromBlock, evmChainID int64) (*ReplayResponse, *http.Response, error) { + specObj := &ReplayResponse{} + c.l.Info().Str(NodeURL, c.Config.URL).Int64("From block", fromBlock).Int64("EVM chain ID", evmChainID).Msg("Replaying Log Poller from block") + resp, err := c.APIClient.R(). + SetResult(&specObj). + SetQueryParams(map[string]string{ + "evmChainID": fmt.Sprint(evmChainID), + }). + SetPathParams(map[string]string{ + "fromBlock": fmt.Sprint(fromBlock), + }). + Post("/v2/replay_from_block/{fromBlock}") + if err != nil { + return nil, nil, err + } + + return specObj, resp.RawResponse, err +} diff --git a/integration-tests/client/plugin_k8s.go b/integration-tests/client/plugin_k8s.go new file mode 100644 index 00000000..113ca916 --- /dev/null +++ b/integration-tests/client/plugin_k8s.go @@ -0,0 +1,146 @@ +// Package client enables interaction with APIs of test components like the mockserver and Plugin nodes +package client + +import ( + "os" + "regexp" + + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/k8s/environment" +) + +type PluginK8sClient struct { + ChartName string + PodName string + *PluginClient +} + +// NewPlugin creates a new Plugin model using a provided config +func NewPluginK8sClient(c *PluginConfig, podName, chartName string) (*PluginK8sClient, error) { + rc, err := initRestyClient(c.URL, c.Email, c.Password, c.HTTPTimeout) + if err != nil { + return nil, err + } + _, isSet := os.LookupEnv("CL_CLIENT_DEBUG") + if isSet { + rc.SetDebug(true) + } + return &PluginK8sClient{ + PluginClient: &PluginClient{ + APIClient: rc, + pageSize: 25, + Config: c, + }, + ChartName: chartName, + PodName: podName, + }, nil +} + +// UpgradeVersion upgrades the plugin node to the new version +// Note: You need to call Run() on the test environment for changes to take effect +// Note: This function is not thread safe, call from a single thread +func (c *PluginK8sClient) UpgradeVersion(testEnvironment *environment.Environment, newImage, newVersion string) error { + log.Info(). + Str("Chart Name", c.ChartName). + Str("New Image", newImage). + Str("New Version", newVersion). + Msg("Upgrading Plugin Node") + upgradeVals := map[string]any{ + "plugin": map[string]any{ + "image": map[string]any{ + "image": newImage, + "version": newVersion, + }, + }, + } + _, err := testEnvironment.UpdateHelm(c.ChartName, upgradeVals) + return err +} + +// Name Plugin instance chart/service name +func (c *PluginK8sClient) Name() string { + return c.ChartName +} + +func parseHostname(s string) string { + r := regexp.MustCompile(`://(?P.*):`) + return r.FindStringSubmatch(s)[1] +} + +// ConnectPluginNodes creates new Plugin clients +func ConnectPluginNodes(e *environment.Environment) ([]*PluginK8sClient, error) { + var clients []*PluginK8sClient + for _, nodeDetails := range e.PluginNodeDetails { + c, err := NewPluginK8sClient(&PluginConfig{ + URL: nodeDetails.LocalIP, + Email: "notreal@fakeemail.ch", + Password: "fj293fbBnlQ!f9vNs", + InternalIP: parseHostname(nodeDetails.InternalIP), + }, nodeDetails.PodName, nodeDetails.ChartName) + if err != nil { + return nil, err + } + log.Debug(). + Str("URL", c.Config.URL). + Str("Internal IP", c.Config.InternalIP). + Str("Chart Name", nodeDetails.ChartName). + Str("Pod Name", nodeDetails.PodName). + Msg("Connected to Plugin node") + clients = append(clients, c) + } + return clients, nil +} + +// ReconnectPluginNodes reconnects to Plugin nodes after they have been modified, say through a Helm upgrade +// Note: Experimental as of now, will likely not work predictably. +func ReconnectPluginNodes(testEnvironment *environment.Environment, nodes []*PluginK8sClient) (err error) { + for _, node := range nodes { + for _, details := range testEnvironment.PluginNodeDetails { + if details.ChartName == node.ChartName { // Make the link from client to pod consistent + node, err = NewPluginK8sClient(&PluginConfig{ + URL: details.LocalIP, + Email: "notreal@fakeemail.ch", + Password: "fj293fbBnlQ!f9vNs", + InternalIP: parseHostname(details.InternalIP), + }, details.PodName, details.ChartName) + if err != nil { + return err + } + log.Debug(). + Str("URL", node.Config.URL). + Str("Internal IP", node.Config.InternalIP). + Str("Chart Name", node.ChartName). + Str("Pod Name", node.PodName). + Msg("Reconnected to Plugin node") + } + } + } + return nil +} + +// ConnectPluginNodeURLs creates new Plugin clients based on just URLs, should only be used inside K8s tests +func ConnectPluginNodeURLs(urls []string) ([]*PluginK8sClient, error) { + var clients []*PluginK8sClient + for _, url := range urls { + c, err := ConnectPluginNodeURL(url) + if err != nil { + return nil, err + } + clients = append(clients, c) + } + return clients, nil +} + +// ConnectPluginNodeURL creates a new Plugin client based on just a URL, should only be used inside K8s tests +func ConnectPluginNodeURL(url string) (*PluginK8sClient, error) { + return NewPluginK8sClient(&PluginConfig{ + URL: url, + Email: "notreal@fakeemail.ch", + Password: "fj293fbBnlQ!f9vNs", + InternalIP: parseHostname(url), + }, + parseHostname(url), // a decent guess + "connectedNodeByURL", // an intentionally bad decision + ) +} diff --git a/integration-tests/client/plugin_models.go b/integration-tests/client/plugin_models.go new file mode 100644 index 00000000..902d6d6c --- /dev/null +++ b/integration-tests/client/plugin_models.go @@ -0,0 +1,1493 @@ +package client + +import ( + "bytes" + "fmt" + "text/template" + "time" + + "github.com/pelletier/go-toml/v2" + "gopkg.in/guregu/null.v4" + + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/services/job" +) + +// EIServiceConfig represents External Initiator service config +type EIServiceConfig struct { + URL string +} + +// PluginConfig represents the variables needed to connect to a Plugin node +type PluginConfig struct { + URL string + Email string + Password string + InternalIP string + HTTPTimeout *time.Duration +} + +// ResponseSlice is the generic model that can be used for all Plugin API responses that are an slice +type ResponseSlice struct { + Data []map[string]interface{} +} + +// Response is the generic model that can be used for all Plugin API responses +type Response struct { + Data map[string]interface{} +} + +// JobRunsResponse job runs +type JobRunsResponse struct { + Data []RunsResponseData `json:"data"` + Meta RunsMetaResponse `json:"meta"` +} + +// RunsResponseData runs response data +type RunsResponseData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes RunsAttributesResponse `json:"attributes"` +} + +// RunsAttributesResponse runs attributes +type RunsAttributesResponse struct { + Meta interface{} `json:"meta"` + Errors []interface{} `json:"errors"` + Inputs RunInputs `json:"inputs"` + TaskRuns []TaskRun `json:"taskRuns"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt time.Time `json:"finishedAt"` +} + +// DecodeLogTaskRun is "ethabidecodelog" task run info, +// also used for "RequestID" tracing in perf tests +type DecodeLogTaskRun struct { + Fee int `json:"fee"` + JobID []int `json:"jobID"` + KeyHash []int `json:"keyHash"` + RequestID []byte `json:"requestID"` + Sender string `json:"sender"` +} + +// TaskRun is pipeline task run info +type TaskRun struct { + Type string `json:"type"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt time.Time `json:"finishedAt"` + Output string `json:"output"` + Error interface{} `json:"error"` + DotID string `json:"dotId"` +} + +type NodeKeysBundle struct { + OCR2Key OCR2Key + PeerID string + TXKey TxKey + P2PKeys P2PKeys + EthAddress string +} + +// RunInputs run inputs (value) +type RunInputs struct { + Parse int `json:"parse"` +} + +// RunsMetaResponse runs meta +type RunsMetaResponse struct { + Count int `json:"count"` +} + +// BridgeType is the model that represents the bridge when read or created on a Plugin node +type BridgeType struct { + Data BridgeTypeData `json:"data"` +} + +// BridgeTypeData is the model that represents the bridge when read or created on a Plugin node +type BridgeTypeData struct { + Attributes BridgeTypeAttributes `json:"attributes"` +} + +// Bridges is the model that represents the bridges when read on a Plugin node +type Bridges struct { + Data []BridgeTypeData `json:"data"` +} + +// BridgeTypeAttributes is the model that represents the bridge when read or created on a Plugin node +type BridgeTypeAttributes struct { + Name string `json:"name"` + URL string `json:"url"` + RequestData string `json:"requestData,omitempty"` +} + +// Session is the form structure used for authenticating +type Session struct { + Email string `json:"email"` + Password string `json:"password"` +} + +// ExportedEVMKey holds all details needed to recreate a private key of the Plugin node +type ExportedEVMKey struct { + Address string `json:"address"` + Crypto struct { + Cipher string `json:"cipher"` + CipherText string `json:"ciphertext"` + CipherParams struct { + Iv string `json:"iv"` + } `json:"cipherparams"` + Kdf string `json:"kdf"` + KDFParams struct { + DkLen int `json:"dklen"` + N int `json:"n"` + P int `json:"p"` + R int `json:"r"` + Salt string `json:"salt"` + } `json:"kdfparams"` + Mac string `json:"mac"` + } `json:"crypto"` + ID string `json:"id"` + Version int `json:"version"` +} + +// VRFExportKey is the model that represents the exported VRF key +type VRFExportKey struct { + PublicKey string `json:"PublicKey"` + VrfKey struct { + Address string `json:"address"` + Crypto struct { + Cipher string `json:"cipher"` + Ciphertext string `json:"ciphertext"` + Cipherparams struct { + Iv string `json:"iv"` + } `json:"cipherparams"` + Kdf string `json:"kdf"` + Kdfparams struct { + Dklen int `json:"dklen"` + N int `json:"n"` + P int `json:"p"` + R int `json:"r"` + Salt string `json:"salt"` + } `json:"kdfparams"` + Mac string `json:"mac"` + } `json:"crypto"` + Version int `json:"version"` + } `json:"vrf_key"` +} + +// VRFKeyAttributes is the model that represents the created VRF key attributes when read +type VRFKeyAttributes struct { + Compressed string `json:"compressed"` + Uncompressed string `json:"uncompressed"` + Hash string `json:"hash"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + DeletedAt interface{} `json:"deletedAt"` +} + +// VRFKeyData is the model that represents the created VRF key's data when read +type VRFKeyData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes VRFKeyAttributes `json:"attributes"` +} + +// VRFKey is the model that represents the created VRF key when read +type VRFKey struct { + Data VRFKeyData `json:"data"` +} + +// VRFKeys is the model that represents the created VRF keys when read +type VRFKeys struct { + Data []VRFKey `json:"data"` +} + +// DKGSignKeyAttributes is the model that represents the created DKG Sign key attributes when read +type DKGSignKeyAttributes struct { + PublicKey string `json:"publicKey"` +} + +// DKGSignKeyData is the model that represents the created DKG Sign key's data when read +type DKGSignKeyData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes DKGSignKeyAttributes `json:"attributes"` +} + +// DKGSignKey is the model that represents the created DKG Sign key when read +type DKGSignKey struct { + Data DKGSignKeyData `json:"data"` +} + +// DKGSignKeys is the model that represents the created DKGSignData key when read +type DKGSignKeys struct { + Data []DKGSignKey `json:"data"` +} + +// DKGEncryptKeyAttributes is the model that represents the created DKG Encrypt key attributes when read +type DKGEncryptKeyAttributes struct { + PublicKey string `json:"publicKey"` +} + +// DKGEncryptKeyData is the model that represents the created DKG Encrypt key's data when read +type DKGEncryptKeyData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes DKGEncryptKeyAttributes `json:"attributes"` +} + +// DKGEncryptKey is the model that represents the created DKG Encrypt key when read +type DKGEncryptKey struct { + Data DKGEncryptKeyData `json:"data"` +} + +// DKGEncryptKeys is the model that represents the created DKGEncryptKeys key when read +type DKGEncryptKeys struct { + Data []DKGEncryptKey `json:"data"` +} + +// OCRKeys is the model that represents the created OCR keys when read +type OCRKeys struct { + Data []OCRKeyData `json:"data"` +} + +// OCRKey is the model that represents the created OCR keys when read +type OCRKey struct { + Data OCRKeyData `json:"data"` +} + +// OCRKeyData is the model that represents the created OCR keys when read +type OCRKeyData struct { + Attributes OCRKeyAttributes `json:"attributes"` + ID string `json:"id"` +} + +// OCRKeyAttributes is the model that represents the created OCR keys when read +type OCRKeyAttributes struct { + ConfigPublicKey string `json:"configPublicKey"` + OffChainPublicKey string `json:"offChainPublicKey"` + OnChainSigningAddress string `json:"onChainSigningAddress"` +} + +// OCR2Keys is the model that represents the created OCR2 keys when read +type OCR2Keys struct { + Data []OCR2KeyData `json:"data"` +} + +// OCR2Key is the model that represents the created OCR2 keys when read +type OCR2Key struct { + Data OCR2KeyData `json:"data"` +} + +// OCR2KeyData is the model that represents the created OCR2 keys when read +type OCR2KeyData struct { + Type string `json:"type"` + Attributes OCR2KeyAttributes `json:"attributes"` + ID string `json:"id"` +} + +// OCR2KeyAttributes is the model that represents the created OCR2 keys when read +type OCR2KeyAttributes struct { + ChainType string `json:"chainType"` + ConfigPublicKey string `json:"configPublicKey"` + OffChainPublicKey string `json:"offchainPublicKey"` + OnChainPublicKey string `json:"onchainPublicKey"` +} + +// P2PKeys is the model that represents the created P2P keys when read +type P2PKeys struct { + Data []P2PKeyData `json:"data"` +} + +// P2PKey is the model that represents the created P2P keys when read +type P2PKey struct { + Data P2PKeyData `json:"data"` +} + +// P2PKeyData is the model that represents the created P2P keys when read +type P2PKeyData struct { + Attributes P2PKeyAttributes `json:"attributes"` +} + +// P2PKeyAttributes is the model that represents the created P2P keys when read +type P2PKeyAttributes struct { + ID int `json:"id"` + PeerID string `json:"peerId"` + PublicKey string `json:"publicKey"` +} + +// CSAKeys is the model that represents the created CSA keys when read +type CSAKeys struct { + Data []CSAKeyData `json:"data"` +} + +// CSAKey is the model that represents the created CSA key when created +type CSAKey struct { + Data CSAKeyData `json:"data"` +} + +// CSAKeyData is the model that represents the created CSA key when read +type CSAKeyData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes CSAKeyAttributes `json:"attributes"` +} + +// CSAKeyAttributes is the model that represents the attributes of a CSA Key +type CSAKeyAttributes struct { + PublicKey string `json:"publicKey"` + Version int `json:"version"` +} + +// ETHKeys is the model that represents the created ETH keys when read +type ETHKeys struct { + Data []ETHKeyData `json:"data"` +} + +// ETHKey is the model that represents the created ETH keys when read +type ETHKey struct { + Data ETHKeyData `json:"data"` +} + +// ETHKeyData is the model that represents the created ETH keys when read +type ETHKeyData struct { + Attributes ETHKeyAttributes `json:"attributes"` +} + +// ETHKeyAttributes is the model that represents the created ETH keys when read +type ETHKeyAttributes struct { + Address string `json:"address"` + ETHBalance string `json:"ethBalance"` + ChainID string `json:"evmChainID"` +} + +// TxKeys is the model that represents the created keys when read +type TxKeys struct { + Data []TxKeyData `json:"data"` +} + +// TxKey is the model that represents the created keys when read +type TxKey struct { + Data TxKeyData `json:"data"` +} + +// TxKeyData is the model that represents the created keys when read +type TxKeyData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes TxKeyAttributes `json:"attributes"` +} + +// TxKeyAttributes is the model that represents the created keys when read +type TxKeyAttributes struct { + PublicKey string `json:"publicKey"` + Address string `json:"address"` + StarkKey string `json:"starkPubKey,omitempty"` +} + +type SingleTransactionDataWrapper struct { + Data TransactionData `json:"data"` +} + +type SendEtherRequest struct { + DestinationAddress string `json:"address"` + FromAddress string `json:"from"` + Amount string `json:"amount"` + EVMChainID int `json:"evmChainID,omitempty"` + AllowHigherAmounts bool `json:"allowHigherAmounts"` +} + +// EIAttributes is the model that represents the EI keys when created and read +type EIAttributes struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + IncomingAccessKey string `json:"incomingAccessKey,omitempty"` + AccessKey string `json:"accessKey,omitempty"` + Secret string `json:"incomingSecret,omitempty"` + OutgoingToken string `json:"outgoingToken,omitempty"` + OutgoingSecret string `json:"outgoingSecret,omitempty"` +} + +// EIKeys is the model that represents the EI configs when read +type EIKeys struct { + Data []EIKey `json:"data"` +} + +// EIKeyCreate is the model that represents the EI config when created +type EIKeyCreate struct { + Data EIKey `json:"data"` +} + +// EIKey is the model that represents the EI configs when read +type EIKey struct { + Attributes EIAttributes `json:"attributes"` +} + +type CosmosChainConfig struct { + BlockRate null.String + BlocksUntilTxTimeout null.Int + ConfirmPollPeriod null.String + FallbackGasPriceULuna null.String + GasLimitMultiplier null.Float + MaxMsgsPerBatch null.Int +} + +// CosmosChainAttributes is the model that represents the terra chain +type CosmosChainAttributes struct { + ChainID string `json:"chainID"` + Config CosmosChainConfig `json:"config"` +} + +// CosmosChain is the model that represents the terra chain when read +type CosmosChain struct { + Attributes CosmosChainAttributes `json:"attributes"` +} + +// CosmosChainCreate is the model that represents the terra chain when created +type CosmosChainCreate struct { + Data CosmosChain `json:"data"` +} + +// CosmosNodeAttributes is the model that represents the terra noded +type CosmosNodeAttributes struct { + Name string `json:"name"` + CosmosChainID string `json:"cosmosChainId"` + TendermintURL string `json:"tendermintURL" db:"tendermint_url"` +} + +// CosmosNode is the model that represents the terra node when read +type CosmosNode struct { + Attributes CosmosNodeAttributes `json:"attributes"` +} + +// CosmosNodeCreate is the model that represents the terra node when created +type CosmosNodeCreate struct { + Data CosmosNode `json:"data"` +} + +type SolanaChainConfig struct { + BlockRate null.String + ConfirmPollPeriod null.String + OCR2CachePollPeriod null.String + OCR2CacheTTL null.String + TxTimeout null.String + SkipPreflight null.Bool + Commitment null.String +} + +// SolanaChainAttributes is the model that represents the solana chain +type SolanaChainAttributes struct { + ChainID string `json:"chainID"` + Config SolanaChainConfig `json:"config"` +} + +// SolanaChain is the model that represents the solana chain when read +type SolanaChain struct { + Attributes SolanaChainAttributes `json:"attributes"` +} + +// SolanaChainCreate is the model that represents the solana chain when created +type SolanaChainCreate struct { + Data SolanaChain `json:"data"` +} + +// SolanaNodeAttributes is the model that represents the solana noded +type SolanaNodeAttributes struct { + Name string `json:"name"` + SolanaChainID string `json:"solanaChainId" db:"solana_chain_id"` + SolanaURL string `json:"solanaURL" db:"solana_url"` +} + +// SolanaNode is the model that represents the solana node when read +type SolanaNode struct { + Attributes SolanaNodeAttributes `json:"attributes"` +} + +// SolanaNodeCreate is the model that represents the solana node when created +type SolanaNodeCreate struct { + Data SolanaNode `json:"data"` +} + +type StarkNetChainConfig struct { + OCR2CachePollPeriod null.String + OCR2CacheTTL null.String + RequestTimeout null.String + TxTimeout null.Bool + TxSendFrequency null.String + TxMaxBatchSize null.String +} + +// StarkNetChainAttributes is the model that represents the starknet chain +type StarkNetChainAttributes struct { + Type string `json:"type"` + ChainID string `json:"chainID"` + Config StarkNetChainConfig `json:"config"` +} + +// StarkNetChain is the model that represents the starknet chain when read +type StarkNetChain struct { + Attributes StarkNetChainAttributes `json:"attributes"` +} + +// StarkNetChainCreate is the model that represents the starknet chain when created +type StarkNetChainCreate struct { + Data StarkNetChain `json:"data"` +} + +// StarkNetNodeAttributes is the model that represents the starknet node +type StarkNetNodeAttributes struct { + Name string `json:"name"` + ChainID string `json:"chainId"` + Url string `json:"url"` +} + +// StarkNetNode is the model that represents the starknet node when read +type StarkNetNode struct { + Attributes StarkNetNodeAttributes `json:"attributes"` +} + +// StarkNetNodeCreate is the model that represents the starknet node when created +type StarkNetNodeCreate struct { + Data StarkNetNode `json:"data"` +} + +// SpecForm is the form used when creating a v2 job spec, containing the TOML of the v2 job +type SpecForm struct { + TOML string `json:"toml"` +} + +// Spec represents a job specification that contains information about the job spec +type Spec struct { + Data SpecData `json:"data"` +} + +// SpecData contains the ID of the job spec +type SpecData struct { + ID string `json:"id"` +} + +// JobForm is the form used when creating a v2 job spec, containing the TOML of the v2 job +type JobForm struct { + TOML string `json:"toml"` +} + +// Job contains the job data for a given job +type Job struct { + Data JobData `json:"data"` +} + +// JobData contains the ID for a given job +type JobData struct { + ID string `json:"id"` +} + +// JobSpec represents the different possible job types that Plugin nodes can handle +type JobSpec interface { + Type() string + // String Returns TOML representation of the job + String() (string, error) +} + +// CronJobSpec represents a cron job spec +type CronJobSpec struct { + Schedule string `toml:"schedule"` // CRON job style schedule string + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// Type is cron +func (c *CronJobSpec) Type() string { return "cron" } + +// String representation of the job +func (c *CronJobSpec) String() (string, error) { + cronJobTemplateString := `type = "cron" +schemaVersion = 1 +schedule = "{{.Schedule}}" +observationSource = """ +{{.ObservationSource}} +"""` + return MarshallTemplate(c, "CRON Job", cronJobTemplateString) +} + +// PipelineSpec common API call pipeline +type PipelineSpec struct { + BridgeTypeAttributes BridgeTypeAttributes + DataPath string +} + +// Type is common_pipeline +func (d *PipelineSpec) Type() string { + return "common_pipeline" +} + +// String representation of the pipeline +func (d *PipelineSpec) String() (string, error) { + sourceString := ` + fetch [type=bridge name="{{.BridgeTypeAttributes.Name}}" requestData="{{.BridgeTypeAttributes.RequestData}}"]; + parse [type=jsonparse path="{{.DataPath}}"]; + fetch -> parse;` + return MarshallTemplate(d, "API call pipeline template", sourceString) +} + +func getOptionalSimBlock(simBlock *string) (string, error) { + optionalSimBlock := "" + if simBlock != nil { + if *simBlock != "latest" && *simBlock != "pending" { + return "", fmt.Errorf("invalid simulation block value: %s", *simBlock) + } + optionalSimBlock = fmt.Sprintf("block=\"%s\"", *simBlock) + } + return optionalSimBlock, nil +} + +// VRFV2TxPipelineSpec VRFv2 request with tx callback +type VRFV2PlusTxPipelineSpec struct { + Address string + EstimateGasMultiplier float64 + FromAddress string + SimulationBlock *string // can be nil, "latest" or "pending". +} + +// Type returns the type of the pipeline +func (d *VRFV2PlusTxPipelineSpec) Type() string { + return "vrf_pipeline_v2plus" +} + +// String representation of the pipeline +func (d *VRFV2PlusTxPipelineSpec) String() (string, error) { + optionalSimBlock, err := getOptionalSimBlock(d.SimulationBlock) + if err != nil { + return "", err + } + sourceTemplate := ` +decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint256 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,bytes extraArgs,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +generate_proof [type=vrfv2plus + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="{{ .Address }}" + multiplier="{{ .EstimateGasMultiplier }}" + data="$(generate_proof.output)" + %s] +simulate_fulfillment [type=ethcall + from="{{ .FromAddress }}" + to="{{ .Address }}" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="{{ .Address }}" + data="$(generate_proof.output)" + %s] +decode_log->generate_proof->estimate_gas->simulate_fulfillment` + + sourceString := fmt.Sprintf(sourceTemplate, optionalSimBlock, optionalSimBlock) + return MarshallTemplate(d, "VRFV2 Plus pipeline template", sourceString) +} + +// VRFV2TxPipelineSpec VRFv2 request with tx callback +type VRFV2TxPipelineSpec struct { + Address string + EstimateGasMultiplier float64 + FromAddress string + SimulationBlock *string // can be nil, "latest" or "pending". +} + +// Type returns the type of the pipeline +func (d *VRFV2TxPipelineSpec) Type() string { + return "vrf_pipeline_v2" +} + +// String representation of the pipeline +func (d *VRFV2TxPipelineSpec) String() (string, error) { + optionalSimBlock, err := getOptionalSimBlock(d.SimulationBlock) + if err != nil { + return "", err + } + sourceTemplate := ` +decode_log [type=ethabidecodelog + abi="RandomWordsRequested(bytes32 indexed keyHash,uint256 requestId,uint256 preSeed,uint64 indexed subId,uint16 minimumRequestConfirmations,uint32 callbackGasLimit,uint32 numWords,address indexed sender)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrfv2 + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +estimate_gas [type=estimategaslimit + to="{{ .Address }}" + multiplier="{{ .EstimateGasMultiplier }}" + data="$(vrf.output)" + %s] +simulate [type=ethcall + from="{{ .FromAddress }}" + to="{{ .Address }}" + gas="$(estimate_gas)" + gasPrice="$(jobSpec.maxGasPrice)" + extractRevertReason=true + contract="{{ .Address }}" + data="$(vrf.output)" + %s] +decode_log->vrf->estimate_gas->simulate` + + sourceString := fmt.Sprintf(sourceTemplate, optionalSimBlock, optionalSimBlock) + return MarshallTemplate(d, "VRFV2 pipeline template", sourceString) +} + +// VRFTxPipelineSpec VRF request with tx callback +type VRFTxPipelineSpec struct { + Address string +} + +// Type returns the type of the pipeline +func (d *VRFTxPipelineSpec) Type() string { + return "vrf_pipeline" +} + +// String representation of the pipeline +func (d *VRFTxPipelineSpec) String() (string, error) { + sourceString := ` +decode_log [type=ethabidecodelog + abi="RandomnessRequest(bytes32 keyHash,uint256 seed,bytes32 indexed jobID,address sender,uint256 fee,bytes32 requestID)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] +vrf [type=vrf + publicKey="$(jobSpec.publicKey)" + requestBlockHash="$(jobRun.logBlockHash)" + requestBlockNumber="$(jobRun.logBlockNumber)" + topics="$(jobRun.logTopics)"] +encode_tx [type=ethabiencode + abi="fulfillRandomnessRequest(bytes proof)" + data="{\\"proof\\": $(vrf)}"] +submit_tx [type=ethtx to="{{.Address}}" + data="$(encode_tx)" + txMeta="{\\"requestTxHash\\": $(jobRun.logTxHash),\\"requestID\\": $(decode_log.requestID),\\"jobID\\": $(jobSpec.databaseID)}"] +decode_log->vrf->encode_tx->submit_tx` + return MarshallTemplate(d, "VRF pipeline template", sourceString) +} + +// DirectRequestTxPipelineSpec oracle request with tx callback +type DirectRequestTxPipelineSpec struct { + BridgeTypeAttributes BridgeTypeAttributes + DataPath string +} + +// Type returns the type of the pipeline +func (d *DirectRequestTxPipelineSpec) Type() string { + return "directrequest_pipeline" +} + +// String representation of the pipeline +func (d *DirectRequestTxPipelineSpec) String() (string, error) { + sourceString := ` + decode_log [type=ethabidecodelog + abi="OracleRequest(bytes32 indexed specId, address requester, bytes32 requestId, uint256 payment, address callbackAddr, bytes4 callbackFunctionId, uint256 cancelExpiration, uint256 dataVersion, bytes data)" + data="$(jobRun.logData)" + topics="$(jobRun.logTopics)"] + encode_tx [type=ethabiencode + abi="fulfill(bytes32 _requestId, uint256 _data)" + data=<{ + "_requestId": $(decode_log.requestId), + "_data": $(parse) + }> + ] + fetch [type=bridge name="{{.BridgeTypeAttributes.Name}}" requestData="{{.BridgeTypeAttributes.RequestData}}"]; + parse [type=jsonparse path="{{.DataPath}}"] + submit [type=ethtx to="$(decode_log.requester)" data="$(encode_tx)" failOnRevert=true] + decode_log -> fetch -> parse -> encode_tx -> submit` + return MarshallTemplate(d, "Direct request pipeline template", sourceString) +} + +// DirectRequestJobSpec represents a direct request spec +type DirectRequestJobSpec struct { + Name string `toml:"name"` + ContractAddress string `toml:"contractAddress"` + EVMChainID string `toml:"evmChainID"` + ExternalJobID string `toml:"externalJobID"` + MinIncomingConfirmations string `toml:"minIncomingConfirmations"` + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// Type returns the type of the pipeline +func (d *DirectRequestJobSpec) Type() string { return "directrequest" } + +// String representation of the pipeline +func (d *DirectRequestJobSpec) String() (string, error) { + directRequestTemplateString := `type = "directrequest" +schemaVersion = 1 +name = "{{.Name}}" +maxTaskDuration = "99999s" +contractAddress = "{{.ContractAddress}}" +evmChainID = "{{.EVMChainID}}" +externalJobID = "{{.ExternalJobID}}" +minIncomingConfirmations = {{.MinIncomingConfirmations}} +observationSource = """ +{{.ObservationSource}} +"""` + return MarshallTemplate(d, "Direct Request Job", directRequestTemplateString) +} + +// FluxMonitorJobSpec represents a flux monitor spec +type FluxMonitorJobSpec struct { + Name string `toml:"name"` + ContractAddress string `toml:"contractAddress"` // Address of the Flux Monitor script + EVMChainID string `toml:"evmChainID"` // Not optional + Precision int `toml:"precision"` // Optional + Threshold float32 `toml:"threshold"` // Optional + AbsoluteThreshold float32 `toml:"absoluteThreshold"` // Optional + IdleTimerPeriod time.Duration `toml:"idleTimerPeriod"` // Optional + IdleTimerDisabled bool `toml:"idleTimerDisabled"` // Optional + PollTimerPeriod time.Duration `toml:"pollTimerPeriod"` // Optional + PollTimerDisabled bool `toml:"pollTimerDisabled"` // Optional + MaxTaskDuration time.Duration `toml:"maxTaskDuration"` // Optional + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// Type returns the type of the job +func (f *FluxMonitorJobSpec) Type() string { return "fluxmonitor" } + +// String representation of the job +func (f *FluxMonitorJobSpec) String() (string, error) { + fluxMonitorTemplateString := `type = "fluxmonitor" +schemaVersion = 1 +name = "{{.Name}}" +contractAddress = "{{.ContractAddress}}" +evmChainID = "{{.EVMChainID}}" +precision ={{if not .Precision}} 0 {{else}} {{.Precision}} {{end}} +threshold ={{if not .Threshold}} 0.5 {{else}} {{.Threshold}} {{end}} +absoluteThreshold ={{if not .AbsoluteThreshold}} 0.1 {{else}} {{.AbsoluteThreshold}} {{end}} + +idleTimerPeriod ={{if not .IdleTimerPeriod}} "1ms" {{else}} "{{.IdleTimerPeriod}}" {{end}} +idleTimerDisabled ={{if not .IdleTimerDisabled}} false {{else}} {{.IdleTimerDisabled}} {{end}} + +pollTimerPeriod ={{if not .PollTimerPeriod}} "1m" {{else}} "{{.PollTimerPeriod}}" {{end}} +pollTimerDisabled ={{if not .PollTimerDisabled}} false {{else}} {{.PollTimerDisabled}} {{end}} + +maxTaskDuration = {{if not .Precision}} "180s" {{else}} {{.Precision}} {{end}} + +observationSource = """ +{{.ObservationSource}} +"""` + return MarshallTemplate(f, "Flux Monitor Job", fluxMonitorTemplateString) +} + +// KeeperJobSpec represents a V2 keeper spec +type KeeperJobSpec struct { + Name string `toml:"name"` + ContractAddress string `toml:"contractAddress"` + FromAddress string `toml:"fromAddress"` // Hex representation of the from address + EVMChainID string `toml:"evmChainID"` // Not optional + MinIncomingConfirmations int `toml:"minIncomingConfirmations"` +} + +// Type returns the type of the job +func (k *KeeperJobSpec) Type() string { return "keeper" } + +// String representation of the job +func (k *KeeperJobSpec) String() (string, error) { + keeperTemplateString := ` +type = "keeper" +schemaVersion = 1 +name = "{{.Name}}" +contractAddress = "{{.ContractAddress}}" +fromAddress = "{{.FromAddress}}" +evmChainID = "{{.EVMChainID}}" +minIncomingConfirmations = {{.MinIncomingConfirmations}} +` + return MarshallTemplate(k, "Keeper Job", keeperTemplateString) +} + +// OCRBootstrapJobSpec represents the spec for bootstrapping an OCR job, given to one node that then must be linked +// back to by others by OCRTaskJobSpecs +type OCRBootstrapJobSpec struct { + Name string `toml:"name"` + BlockChainTimeout time.Duration `toml:"blockchainTimeout"` // Optional + ContractConfirmations int `toml:"contractConfigConfirmations"` // Optional + TrackerPollInterval time.Duration `toml:"contractConfigTrackerPollInterval"` // Optional + TrackerSubscribeInterval time.Duration `toml:"contractConfigTrackerSubscribeInterval"` // Optional + ContractAddress string `toml:"contractAddress"` // Address of the OCR contract + EVMChainID string `toml:"evmChainID"` + IsBootstrapPeer bool `toml:"isBootstrapPeer"` // Typically true + P2PPeerID string `toml:"p2pPeerID"` // This node's P2P ID +} + +// Type returns the type of the job +func (o *OCRBootstrapJobSpec) Type() string { return "offchainreporting" } + +// String representation of the job +func (o *OCRBootstrapJobSpec) String() (string, error) { + ocrTemplateString := `type = "offchainreporting" +schemaVersion = 1 +blockchainTimeout ={{if not .BlockChainTimeout}} "20s" {{else}} {{.BlockChainTimeout}} {{end}} +contractConfigConfirmations ={{if not .ContractConfirmations}} 3 {{else}} {{.ContractConfirmations}} {{end}} +contractConfigTrackerPollInterval ={{if not .TrackerPollInterval}} "1m" {{else}} {{.TrackerPollInterval}} {{end}} +contractConfigTrackerSubscribeInterval ={{if not .TrackerSubscribeInterval}} "2m" {{else}} {{.TrackerSubscribeInterval}} {{end}} +contractAddress = "{{.ContractAddress}}" +evmChainID = "{{.EVMChainID}}" +p2pBootstrapPeers = [] +isBootstrapPeer = {{.IsBootstrapPeer}} +p2pPeerID = "{{.P2PPeerID}}"` + return MarshallTemplate(o, "OCR Bootstrap Job", ocrTemplateString) +} + +// OCRTaskJobSpec represents an OCR job that is given to other nodes, meant to communicate with the bootstrap node, +// and provide their answers +type OCRTaskJobSpec struct { + Name string `toml:"name"` + BlockChainTimeout time.Duration `toml:"blockchainTimeout"` // Optional + ContractConfirmations int `toml:"contractConfigConfirmations"` // Optional + TrackerPollInterval time.Duration `toml:"contractConfigTrackerPollInterval"` // Optional + TrackerSubscribeInterval time.Duration `toml:"contractConfigTrackerSubscribeInterval"` // Optional + ForwardingAllowed bool `toml:"forwardingAllowed"` // Optional, by default false + ContractAddress string `toml:"contractAddress"` // Address of the OCR contract + EVMChainID string `toml:"evmChainID"` + P2PBootstrapPeers []*PluginClient `toml:"p2pBootstrapPeers"` // P2P ID of the bootstrap node + IsBootstrapPeer bool `toml:"isBootstrapPeer"` // Typically false + P2PPeerID string `toml:"p2pPeerID"` // This node's P2P ID + KeyBundleID string `toml:"keyBundleID"` // ID of this node's OCR key bundle + MonitoringEndpoint string `toml:"monitoringEndpoint"` // Typically "chain.link:4321" + TransmitterAddress string `toml:"transmitterAddress"` // ETH address this node will use to transmit its answer + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// P2PData holds the remote ip and the peer id and port +type P2PData struct { + InternalIP string + InternalPort string + PeerID string +} + +func (p *P2PData) P2PV2Bootstrapper() string { + if p.InternalPort == "" { + p.InternalPort = "6690" + } + return fmt.Sprintf("%s@%s:%s", p.PeerID, p.InternalIP, p.InternalPort) +} + +// Type returns the type of the job +func (o *OCRTaskJobSpec) Type() string { return "offchainreporting" } + +// String representation of the job +func (o *OCRTaskJobSpec) String() (string, error) { + // Pre-process P2P data for easier templating + peers := []P2PData{} + for _, peer := range o.P2PBootstrapPeers { + p2pKeys, err := peer.MustReadP2PKeys() + if err != nil { + return "", err + } + peers = append(peers, P2PData{ + InternalIP: peer.InternalIP(), + PeerID: p2pKeys.Data[0].Attributes.PeerID, + }) + } + specWrap := struct { + Name string + BlockChainTimeout time.Duration + ContractConfirmations int + TrackerPollInterval time.Duration + TrackerSubscribeInterval time.Duration + ContractAddress string + EVMChainID string + P2PBootstrapPeers []P2PData + IsBootstrapPeer bool + P2PPeerID string + KeyBundleID string + MonitoringEndpoint string + TransmitterAddress string + ObservationSource string + ForwardingAllowed bool + }{ + Name: o.Name, + BlockChainTimeout: o.BlockChainTimeout, + ContractConfirmations: o.ContractConfirmations, + TrackerPollInterval: o.TrackerPollInterval, + TrackerSubscribeInterval: o.TrackerSubscribeInterval, + ContractAddress: o.ContractAddress, + EVMChainID: o.EVMChainID, + P2PBootstrapPeers: peers, + IsBootstrapPeer: o.IsBootstrapPeer, + P2PPeerID: o.P2PPeerID, + KeyBundleID: o.KeyBundleID, + MonitoringEndpoint: o.MonitoringEndpoint, + TransmitterAddress: o.TransmitterAddress, + ObservationSource: o.ObservationSource, + ForwardingAllowed: o.ForwardingAllowed, + } + // Results in /dns4//tcp/6690/p2p/12D3KooWAuC9xXBnadsYJpqzZZoB4rMRWqRGpxCrr2mjS7zCoAdN\ + ocrTemplateString := `type = "offchainreporting" +schemaVersion = 1 +blockchainTimeout ={{if not .BlockChainTimeout}} "20s" {{else}} {{.BlockChainTimeout}} {{end}} +contractConfigConfirmations ={{if not .ContractConfirmations}} 3 {{else}} {{.ContractConfirmations}} {{end}} +contractConfigTrackerPollInterval ={{if not .TrackerPollInterval}} "1m" {{else}} {{.TrackerPollInterval}} {{end}} +contractConfigTrackerSubscribeInterval ={{if not .TrackerSubscribeInterval}} "2m" {{else}} {{.TrackerSubscribeInterval}} {{end}} +contractAddress = "{{.ContractAddress}}" +evmChainID = "{{.EVMChainID}}" +{{if .P2PBootstrapPeers}} +p2pv2Bootstrappers = [{{range $peer := .P2PBootstrapPeers}}"{{$peer.PeerID}}@{{$peer.InternalIP}}:6690",{{end}}] +{{else}} +p2pv2Bootstrappers = [] +{{end}} +isBootstrapPeer = {{.IsBootstrapPeer}} +p2pPeerID = "{{.P2PPeerID}}" +keyBundleID = "{{.KeyBundleID}}" +monitoringEndpoint ={{if not .MonitoringEndpoint}} "chain.link:4321" {{else}} "{{.MonitoringEndpoint}}" {{end}} +transmitterAddress = "{{.TransmitterAddress}}" +forwardingAllowed = {{.ForwardingAllowed}} +observationSource = """ +{{.ObservationSource}} +"""` + + return MarshallTemplate(specWrap, "OCR Job", ocrTemplateString) +} + +// OCR2TaskJobSpec represents an OCR2 job that is given to other nodes, meant to communicate with the bootstrap node, +// and provide their answers +type OCR2TaskJobSpec struct { + Name string `toml:"name"` + JobType string `toml:"type"` + MaxTaskDuration string `toml:"maxTaskDuration"` // Optional + ForwardingAllowed bool `toml:"forwardingAllowed"` + OCR2OracleSpec job.OCR2OracleSpec + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// Type returns the type of the job +func (o *OCR2TaskJobSpec) Type() string { return o.JobType } + +// String representation of the job +func (o *OCR2TaskJobSpec) String() (string, error) { + var feedID string + if o.OCR2OracleSpec.FeedID != nil { + feedID = o.OCR2OracleSpec.FeedID.Hex() + } + relayConfig, err := toml.Marshal(struct { + RelayConfig job.JSONConfig `toml:"relayConfig"` + }{RelayConfig: o.OCR2OracleSpec.RelayConfig}) + if err != nil { + return "", fmt.Errorf("failed to marshal relay config: %w", err) + } + specWrap := struct { + Name string + JobType string + MaxTaskDuration string + ForwardingAllowed bool + ContractID string + FeedID string + Relay string + PluginType string + RelayConfig string + PluginConfig map[string]interface{} + P2PV2Bootstrappers []string + OCRKeyBundleID string + MonitoringEndpoint string + TransmitterID string + BlockchainTimeout time.Duration + TrackerSubscribeInterval time.Duration + TrackerPollInterval time.Duration + ContractConfirmations uint16 + ObservationSource string + }{ + Name: o.Name, + JobType: o.JobType, + ForwardingAllowed: o.ForwardingAllowed, + MaxTaskDuration: o.MaxTaskDuration, + ContractID: o.OCR2OracleSpec.ContractID, + FeedID: feedID, + Relay: string(o.OCR2OracleSpec.Relay), + PluginType: string(o.OCR2OracleSpec.PluginType), + RelayConfig: string(relayConfig), + PluginConfig: o.OCR2OracleSpec.PluginConfig, + P2PV2Bootstrappers: o.OCR2OracleSpec.P2PV2Bootstrappers, + OCRKeyBundleID: o.OCR2OracleSpec.OCRKeyBundleID.String, + MonitoringEndpoint: o.OCR2OracleSpec.MonitoringEndpoint.String, + TransmitterID: o.OCR2OracleSpec.TransmitterID.String, + BlockchainTimeout: o.OCR2OracleSpec.BlockchainTimeout.Duration(), + ContractConfirmations: o.OCR2OracleSpec.ContractConfigConfirmations, + TrackerPollInterval: o.OCR2OracleSpec.ContractConfigTrackerPollInterval.Duration(), + ObservationSource: o.ObservationSource, + } + ocr2TemplateString := ` +type = "{{ .JobType }}" +name = "{{.Name}}" +forwardingAllowed = {{.ForwardingAllowed}} +{{- if .MaxTaskDuration}} +maxTaskDuration = "{{ .MaxTaskDuration }}" {{end}} +{{- if .PluginType}} +pluginType = "{{ .PluginType }}" {{end}} +relay = "{{.Relay}}" +schemaVersion = 1 +contractID = "{{.ContractID}}" +{{- if .FeedID}} +feedID = "{{.FeedID}}" +{{end}} +{{- if eq .JobType "offchainreporting2" }} +ocrKeyBundleID = "{{.OCRKeyBundleID}}" {{end}} +{{- if eq .JobType "offchainreporting2" }} +transmitterID = "{{.TransmitterID}}" {{end}} +{{- if .BlockchainTimeout}} +blockchainTimeout = "{{.BlockchainTimeout}}" +{{end}} +{{- if .ContractConfirmations}} +contractConfigConfirmations = {{.ContractConfirmations}} +{{end}} +{{- if .TrackerPollInterval}} +contractConfigTrackerPollInterval = "{{.TrackerPollInterval}}" +{{end}} +{{- if .TrackerSubscribeInterval}} +contractConfigTrackerSubscribeInterval = "{{.TrackerSubscribeInterval}}" +{{end}} +{{- if .P2PV2Bootstrappers}} +p2pv2Bootstrappers = [{{range .P2PV2Bootstrappers}}"{{.}}",{{end}}]{{end}} +{{- if .MonitoringEndpoint}} +monitoringEndpoint = "{{.MonitoringEndpoint}}" {{end}} +{{- if .ObservationSource}} +observationSource = """ +{{.ObservationSource}} +"""{{end}} +{{if eq .JobType "offchainreporting2" }} +[pluginConfig]{{range $key, $value := .PluginConfig}} +{{$key}} = {{$value}}{{end}} +{{end}} +{{.RelayConfig}} +` + return MarshallTemplate(specWrap, "OCR2 Job", ocr2TemplateString) +} + +// VRFV2PlusJobSpec represents a VRFV2 job +type VRFV2PlusJobSpec struct { + Name string `toml:"name"` + CoordinatorAddress string `toml:"coordinatorAddress"` // Address of the VRF CoordinatorV2 contract + PublicKey string `toml:"publicKey"` // Public key of the proving key + ExternalJobID string `toml:"externalJobID"` + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node + MinIncomingConfirmations int `toml:"minIncomingConfirmations"` + FromAddresses []string `toml:"fromAddresses"` + EVMChainID string `toml:"evmChainID"` + ForwardingAllowed bool `toml:"forwardingAllowed"` + BatchFulfillmentEnabled bool `toml:"batchFulfillmentEnabled"` + BatchFulfillmentGasMultiplier float64 `toml:"batchFulfillmentGasMultiplier"` + BackOffInitialDelay time.Duration `toml:"backOffInitialDelay"` + BackOffMaxDelay time.Duration `toml:"backOffMaxDelay"` + PollPeriod time.Duration `toml:"pollPeriod"` + RequestTimeout time.Duration `toml:"requestTimeout"` +} + +// Type returns the type of the job +func (v *VRFV2PlusJobSpec) Type() string { return "vrf" } + +// String representation of the job +func (v *VRFV2PlusJobSpec) String() (string, error) { + vrfTemplateString := ` +type = "vrf" +schemaVersion = 1 +name = "{{.Name}}" +coordinatorAddress = "{{.CoordinatorAddress}}" +fromAddresses = [{{range .FromAddresses}}"{{.}}",{{end}}] +evmChainID = "{{.EVMChainID}}" +minIncomingConfirmations = {{.MinIncomingConfirmations}} +publicKey = "{{.PublicKey}}" +externalJobID = "{{.ExternalJobID}}" +batchFulfillmentEnabled = {{.BatchFulfillmentEnabled}} +batchFulfillmentGasMultiplier = {{.BatchFulfillmentGasMultiplier}} +backoffInitialDelay = "{{.BackOffInitialDelay}}" +backoffMaxDelay = "{{.BackOffMaxDelay}}" +pollPeriod = "{{.PollPeriod}}" +requestTimeout = "{{.RequestTimeout}}" +observationSource = """ +{{.ObservationSource}} +""" +` + return MarshallTemplate(v, "VRFV2 PLUS Job", vrfTemplateString) +} + +// VRFV2JobSpec represents a VRFV2 job +type VRFV2JobSpec struct { + Name string `toml:"name"` + CoordinatorAddress string `toml:"coordinatorAddress"` // Address of the VRF CoordinatorV2 contract + PublicKey string `toml:"publicKey"` // Public key of the proving key + ExternalJobID string `toml:"externalJobID"` + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node + MinIncomingConfirmations int `toml:"minIncomingConfirmations"` + FromAddresses []string `toml:"fromAddresses"` + EVMChainID string `toml:"evmChainID"` + UseVRFOwner bool `toml:"useVRFOwner"` + VRFOwner string `toml:"vrfOwnerAddress"` + ForwardingAllowed bool `toml:"forwardingAllowed"` + CustomRevertsPipelineEnabled bool `toml:"customRevertsPipelineEnabled"` + PollPeriod time.Duration `toml:"pollPeriod"` + RequestTimeout time.Duration `toml:"requestTimeout"` + BatchFulfillmentEnabled bool `toml:"batchFulfillmentEnabled"` + BatchFulfillmentGasMultiplier float64 `toml:"batchFulfillmentGasMultiplier"` + BackOffInitialDelay time.Duration `toml:"backOffInitialDelay"` + BackOffMaxDelay time.Duration `toml:"backOffMaxDelay"` +} + +// Type returns the type of the job +func (v *VRFV2JobSpec) Type() string { return "vrf" } + +// String representation of the job +func (v *VRFV2JobSpec) String() (string, error) { + vrfTemplateString := ` +type = "vrf" +schemaVersion = 1 +name = "{{.Name}}" +forwardingAllowed = {{.ForwardingAllowed}} +coordinatorAddress = "{{.CoordinatorAddress}}" +fromAddresses = [{{range .FromAddresses}}"{{.}}",{{end}}] +evmChainID = "{{.EVMChainID}}" +minIncomingConfirmations = {{.MinIncomingConfirmations}} +publicKey = "{{.PublicKey}}" +externalJobID = "{{.ExternalJobID}}" +batchFulfillmentEnabled = {{.BatchFulfillmentEnabled}} +batchFulfillmentGasMultiplier = {{.BatchFulfillmentGasMultiplier}} +backoffInitialDelay = "{{.BackOffInitialDelay}}" +backoffMaxDelay = "{{.BackOffMaxDelay}}" +pollPeriod = "{{.PollPeriod}}" +requestTimeout = "{{.RequestTimeout}}" +customRevertsPipelineEnabled = true +{{ if .UseVRFOwner }}vrfOwnerAddress = "{{.VRFOwner}}"{{ else }}{{ end }} +observationSource = """ +{{.ObservationSource}} +""" +` + return MarshallTemplate(v, "VRFV2 Job", vrfTemplateString) +} + +// VRFJobSpec represents a VRF job +type VRFJobSpec struct { + Name string `toml:"name"` + CoordinatorAddress string `toml:"coordinatorAddress"` // Address of the VRF CoordinatorV2 contract + PublicKey string `toml:"publicKey"` // Public key of the proving key + EVMChainID string `toml:"evmChainID"` + ExternalJobID string `toml:"externalJobID"` + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node + MinIncomingConfirmations int `toml:"minIncomingConfirmations"` +} + +// Type returns the type of the job +func (v *VRFJobSpec) Type() string { return "vrf" } + +// String representation of the job +func (v *VRFJobSpec) String() (string, error) { + vrfTemplateString := ` +type = "vrf" +schemaVersion = 1 +name = "{{.Name}}" +coordinatorAddress = "{{.CoordinatorAddress}}" +minIncomingConfirmations = {{.MinIncomingConfirmations}} +publicKey = "{{.PublicKey}}" +evmChainID = "{{.EVMChainID}}" +externalJobID = "{{.ExternalJobID}}" +observationSource = """ +{{.ObservationSource}} +""" +` + return MarshallTemplate(v, "VRF Job", vrfTemplateString) +} + +// BlockhashStoreJobSpec represents a blockhashstore job +type BlockhashStoreJobSpec struct { + Name string `toml:"name"` + CoordinatorV2Address string `toml:"coordinatorV2Address"` + CoordinatorV2PlusAddress string `toml:"coordinatorV2PlusAddress"` + BlockhashStoreAddress string `toml:"blockhashStoreAddress"` + ExternalJobID string `toml:"externalJobID"` + FromAddresses []string `toml:"fromAddresses"` + EVMChainID string `toml:"evmChainID"` + ForwardingAllowed bool `toml:"forwardingAllowed"` + PollPeriod time.Duration `toml:"pollPeriod"` + RunTimeout time.Duration `toml:"runTimeout"` + WaitBlocks int `toml:"waitBlocks"` + LookbackBlocks int `toml:"lookbackBlocks"` +} + +// Type returns the type of the job +func (b *BlockhashStoreJobSpec) Type() string { return "blockhashstore" } + +// String representation of the job +func (b *BlockhashStoreJobSpec) String() (string, error) { + vrfTemplateString := ` +type = "blockhashstore" +schemaVersion = 1 +name = "{{.Name}}" +forwardingAllowed = {{.ForwardingAllowed}} +coordinatorV2Address = "{{.CoordinatorV2Address}}" +coordinatorV2PlusAddress = "{{.CoordinatorV2PlusAddress}}" +blockhashStoreAddress = "{{.BlockhashStoreAddress}}" +fromAddresses = [{{range .FromAddresses}}"{{.}}",{{end}}] +evmChainID = "{{.EVMChainID}}" +externalJobID = "{{.ExternalJobID}}" +waitBlocks = {{.WaitBlocks}} +lookbackBlocks = {{.LookbackBlocks}} +pollPeriod = "{{.PollPeriod}}" +runTimeout = "{{.RunTimeout}}" +` + return MarshallTemplate(b, "BlockhashStore Job", vrfTemplateString) +} + +// WebhookJobSpec reprsents a webhook job +type WebhookJobSpec struct { + Name string `toml:"name"` + Initiator string `toml:"initiator"` // External initiator name + InitiatorSpec string `toml:"initiatorSpec"` // External initiator spec object in stringified form + ObservationSource string `toml:"observationSource"` // List of commands for the Plugin node +} + +// Type returns the type of the job +func (w *WebhookJobSpec) Type() string { return "webhook" } + +// String representation of the job +func (w *WebhookJobSpec) String() (string, error) { + webHookTemplateString := `type = "webhook" +schemaVersion = 1 +name = "{{.Name}}" +externalInitiators = [ + { name = "{{.Initiator}}", spec = "{{.InitiatorSpec}}"} +] +observationSource = """ +{{.ObservationSource}} +"""` + return MarshallTemplate(w, "Webhook Job", webHookTemplateString) +} + +// ObservationSourceSpecHTTP creates a http GET task spec for json data +func ObservationSourceSpecHTTP(url string) string { + return fmt.Sprintf(` + fetch [type=http method=GET url="%s"]; + parse [type=jsonparse path="data,result"]; + fetch -> parse;`, url) +} + +// ObservationSourceSpecBridge creates a bridge task spec for json data +func ObservationSourceSpecBridge(bta *BridgeTypeAttributes) string { + return fmt.Sprintf(` + fetch [type=bridge name="%s" requestData="%s"]; + parse [type=jsonparse path="data,result"]; + fetch -> parse;`, bta.Name, bta.RequestData) +} + +// marshallTemplate Helper to marshall templates +func MarshallTemplate(jobSpec interface{}, name, templateString string) (string, error) { + var buf bytes.Buffer + tmpl, err := template.New(name).Parse(templateString) + if err != nil { + return "", err + } + err = tmpl.Execute(&buf, jobSpec) + if err != nil { + return "", err + } + return buf.String(), err +} + +type TransactionsData struct { + Data []TransactionData `json:"data"` + Meta TransactionsMetaData `json:"meta"` +} + +type TransactionData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes TransactionAttributes `json:"attributes"` +} + +type TransactionAttributes struct { + State string `json:"state"` + Data string `json:"data"` + From string `json:"from"` + To string `json:"to"` + Value string `json:"value"` + ChainID string `json:"evmChainID"` + GasLimit string `json:"gasLimit"` + GasPrice string `json:"gasPrice"` + Hash string `json:"hash"` + RawHex string `json:"rawHex"` + Nonce string `json:"nonce"` + SentAt string `json:"sentAt"` +} + +type TransactionsMetaData struct { + Count int `json:"count"` +} + +// PluginProfileResults holds the results of asking the Plugin node to run a PPROF session +type PluginProfileResults struct { + Reports []*PluginProfileResult + ScheduledProfileSeconds int // How long the profile was scheduled to last + ActualRunSeconds int // How long the target function to profile actually took to execute + NodeIndex int +} + +// PluginProfileResult contains the result of a single PPROF run +type PluginProfileResult struct { + Type string + Data []byte +} + +// NewBlankPluginProfileResults returns all the standard types of profile results with blank data +func NewBlankPluginProfileResults() *PluginProfileResults { + results := &PluginProfileResults{ + Reports: make([]*PluginProfileResult, 0), + } + profileStrings := []string{ + "allocs", // A sampling of all past memory allocations + "block", // Stack traces that led to blocking on synchronization primitives + // "cmdline", // The command line invocation of the current program + "goroutine", // Stack traces of all current goroutines + "heap", // A sampling of memory allocations of live objects. + "mutex", // Stack traces of holders of contended mutexes + "profile", // CPU profile. + "threadcreate", // Stack traces that led to the creation of new OS threads + "trace", // A trace of execution of the current program. + } + for _, profile := range profileStrings { + results.Reports = append(results.Reports, &PluginProfileResult{Type: profile}) + } + return results +} + +type CLNodesWithKeys struct { + Node *PluginClient + KeysBundle NodeKeysBundle +} + +// Forwarders is the model that represents the created Forwarders when read +type Forwarders struct { + Data []ForwarderData `json:"data"` +} + +// Forwarder the model that represents the created Forwarder when created +type Forwarder struct { + Data ForwarderData `json:"data"` +} + +// ForwarderData is the model that represents the created Forwarder when read +type ForwarderData struct { + Type string `json:"type"` + ID string `json:"id"` + Attributes ForwarderAttributes `json:"attributes"` +} + +// ForwarderAttributes is the model that represents attributes of a Forwarder +type ForwarderAttributes struct { + Address string `json:"address"` + ChainID string `json:"evmChainId"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` +} + +type ReplayResponse struct { + Data ReplayResponseData `json:"data"` +} + +type ReplayResponseData struct { + Attributes ReplayResponseAttributes `json:"attributes"` +} + +type ReplayResponseAttributes struct { + Message string `json:"message"` + EVMChainID *big.Big `json:"evmChainID"` +} diff --git a/integration-tests/client/plugin_models_test.go b/integration-tests/client/plugin_models_test.go new file mode 100644 index 00000000..ceb3ade9 --- /dev/null +++ b/integration-tests/client/plugin_models_test.go @@ -0,0 +1,135 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/codec" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + evmtypes "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm/types" +) + +func TestOCR2TaskJobSpec_String(t *testing.T) { + for _, tt := range []struct { + name string + spec OCR2TaskJobSpec + exp string + }{ + { + name: "chain-reader-codec", + spec: OCR2TaskJobSpec{ + OCR2OracleSpec: job.OCR2OracleSpec{ + RelayConfig: map[string]interface{}{ + "chainID": 1337, + "fromBlock": 42, + "chainReader": evmtypes.ChainReaderConfig{ + Contracts: map[string]evmtypes.ChainContractReader{ + "median": { + ContractABI: `[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "requester", + "type": "address" + } + ], + "name": "RoundRequested", + "type": "event" + } +] +`, + Configs: map[string]*evmtypes.ChainReaderDefinition{ + "LatestTransmissionDetails": { + ChainSpecificName: "latestTransmissionDetails", + OutputModifications: codec.ModifiersConfig{ + &codec.EpochToTimeModifierConfig{ + Fields: []string{"LatestTimestamp_"}, + }, + &codec.RenameModifierConfig{ + Fields: map[string]string{ + "LatestAnswer_": "LatestAnswer", + "LatestTimestamp_": "LatestTimestamp", + }, + }, + }, + }, + "LatestRoundRequested": { + ChainSpecificName: "RoundRequested", + ReadType: evmtypes.Event, + }, + }, + }, + }, + }, + "codec": evmtypes.CodecConfig{ + Configs: map[string]evmtypes.ChainCodecConfig{ + "MedianReport": { + TypeABI: `[ + { + "Name": "Timestamp", + "Type": "uint32" + } +] +`, + }, + }, + }, + }, + PluginConfig: map[string]interface{}{"juelsPerFeeCoinSource": ` // data source 1 + ds1 [type=bridge name="%s"]; + ds1_parse [type=jsonparse path="data"]; + ds1_multiply [type=multiply times=2]; + + // data source 2 + ds2 [type=http method=GET url="%s"]; + ds2_parse [type=jsonparse path="data"]; + ds2_multiply [type=multiply times=2]; + + ds1 -> ds1_parse -> ds1_multiply -> answer1; + ds2 -> ds2_parse -> ds2_multiply -> answer1; + + answer1 [type=median index=0]; +`, + }, + }, + }, + exp: ` +type = "" +name = "" +forwardingAllowed = false +relay = "" +schemaVersion = 1 +contractID = "" + +[relayConfig] +chainID = 1337 +fromBlock = 42 + +[relayConfig.chainReader] +[relayConfig.chainReader.contracts] +[relayConfig.chainReader.contracts.median] +contractABI = "[\n {\n \"anonymous\": false,\n \"inputs\": [\n {\n \"indexed\": true,\n \"internalType\": \"address\",\n \"name\": \"requester\",\n \"type\": \"address\"\n }\n ],\n \"name\": \"RoundRequested\",\n \"type\": \"event\"\n }\n]\n" + +[relayConfig.chainReader.contracts.median.configs] +LatestRoundRequested = "{\n \"chainSpecificName\": \"RoundRequested\",\n \"readType\": \"event\"\n}\n" +LatestTransmissionDetails = "{\n \"chainSpecificName\": \"latestTransmissionDetails\",\n \"output_modifications\": [\n {\n \"Fields\": [\n \"LatestTimestamp_\"\n ],\n \"Type\": \"epoch to time\"\n },\n {\n \"Fields\": {\n \"LatestAnswer_\": \"LatestAnswer\",\n \"LatestTimestamp_\": \"LatestTimestamp\"\n },\n \"Type\": \"rename\"\n }\n ]\n}\n" + +[relayConfig.codec] +[relayConfig.codec.configs] +[relayConfig.codec.configs.MedianReport] +typeABI = "[\n {\n \"Name\": \"Timestamp\",\n \"Type\": \"uint32\"\n }\n]\n" + +`, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.spec.String() + require.NoError(t, err) + require.Equal(t, tt.exp, got) + }) + } +} diff --git a/integration-tests/config/config.go b/integration-tests/config/config.go new file mode 100644 index 00000000..6d00ceb3 --- /dev/null +++ b/integration-tests/config/config.go @@ -0,0 +1,76 @@ +package config + +var ( + BaseOCR1Config = `[OCR] +Enabled = true + +[P2P] +[P2P.V2] +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"]` + + BaseOCR2Config = `[Feature] +LogPoller = true + +[OCR2] +Enabled = true + +[P2P] +[P2P.V2] +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"]` + + ForwarderNetworkDetailConfig = `[EVM.Transactions] +ForwardersEnabled = true` + + BaseVRFV2NetworkDetailTomlConfig = `BlockBackfillDepth = 500 +MinIncomingConfirmations = 3 +[EVM.GasEstimator] +LimitDefault = 3500000 +[EVM.Transactions] +MaxQueued = 10000 +` + + DefaultOCR2VRFNetworkDetailTomlConfig = `FinalityDepth = 5 +[EVM.GasEstimator] +LimitDefault = 3_500_000 +PriceMax = 100000000000 +FeeCapDefault = 100000000000` + + BaseMercuryTomlConfig = `[Feature] +LogPoller = true + +[Log] +Level = 'debug' +JSONConsole = true + +[WebServer] +AllowOrigins = '*' +HTTPPort = 6688 +SecureCookies = false + +[WebServer.TLS] +HTTPSPort = 0 + +[WebServer.RateLimit] +Authenticated = 2000 +Unauthenticated = 100 + +[JobPipeline] +MaxSuccessfulRuns = 0 + +[OCR2] +Enabled = true +CaptureEATelemetry = true + +[P2P] +[P2P.V2] +ListenAddresses = ['0.0.0.0:6690']` + + TelemetryIngressConfig = `[TelemetryIngress] +UniConn = false +Logging = true +ServerPubKey = '8fa807463ad73f9ee855cfd60ba406dcf98a2855b3dd8af613107b0f6890a707' +URL = 'oti:1337' +` +) diff --git a/integration-tests/contracts/README.md b/integration-tests/contracts/README.md new file mode 100644 index 00000000..abcbddd6 --- /dev/null +++ b/integration-tests/contracts/README.md @@ -0,0 +1,5 @@ +# Contracts + +Contains all code to launch, and interact with smart contracts on a blockchain. + +All contracts are generalized by interfaces in order to simplify test logic across different chains. diff --git a/integration-tests/contracts/contract_deployer.go b/integration-tests/contracts/contract_deployer.go new file mode 100644 index 00000000..b041087b --- /dev/null +++ b/integration-tests/contracts/contract_deployer.go @@ -0,0 +1,1770 @@ +package contracts + +import ( + "context" + "errors" + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrConfigHelper "github.com/goplugin/libocr/offchainreporting/confighelper" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_mock_ethlink_aggregator" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_load_test_client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_v1_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_consumer_benchmark" + automationForwarderLogic "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_forwarder_logic" + registrar21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_registrar_wrapper2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/functions_billing_registry_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/functions_oracle_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/gas_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/gas_wrapper_mock" + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_consumer_performance_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic2_0" + registrylogica21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_a_wrapper_2_1" + registrylogicb21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_logic_b_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + registry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + le "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_aggregator_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_gas_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_factory" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/oracle_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/perform_data_checker_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/streams_lookup_upkeep_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/test_api_consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_transcoder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/fee_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/reward_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/shared/generated/werc20_mock" + + eth_contracts "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" +) + +// ContractDeployer is an interface for abstracting the contract deployment methods across network implementations +type ContractDeployer interface { + DeployAPIConsumer(linkAddr string) (APIConsumer, error) + DeployOracle(linkAddr string) (Oracle, error) + DeployFlags(rac string) (Flags, error) + DeployFluxAggregatorContract(linkAddr string, fluxOptions FluxAggregatorOptions) (FluxAggregator, error) + DeployLinkTokenContract() (LinkToken, error) + DeployWERC20Mock() (WERC20Mock, error) + LoadLinkToken(address common.Address) (LinkToken, error) + DeployOffChainAggregator(linkAddr string, offchainOptions OffchainOptions) (OffchainAggregator, error) + LoadOffChainAggregator(address *common.Address) (OffchainAggregator, error) + DeployVRFContract() (VRF, error) + DeployMockETHPLIFeed(answer *big.Int) (MockETHPLIFeed, error) + DeployVRFMockETHPLIFeed(answer *big.Int) (VRFMockETHPLIFeed, error) + LoadETHPLIFeed(address common.Address) (MockETHPLIFeed, error) + DeployMockGasFeed(answer *big.Int) (MockGasFeed, error) + LoadGasFeed(address common.Address) (MockGasFeed, error) + DeployKeeperRegistrar(registryVersion eth_contracts.KeeperRegistryVersion, linkAddr string, registrarSettings KeeperRegistrarSettings) (KeeperRegistrar, error) + LoadKeeperRegistrar(address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistrar, error) + DeployUpkeepTranscoder() (UpkeepTranscoder, error) + LoadUpkeepTranscoder(address common.Address) (UpkeepTranscoder, error) + DeployKeeperRegistry(opts *KeeperRegistryOpts) (KeeperRegistry, error) + LoadKeeperRegistry(address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistry, error) + DeployKeeperConsumer(updateInterval *big.Int) (KeeperConsumer, error) + DeployAutomationLogTriggerConsumer(testInterval *big.Int) (KeeperConsumer, error) + DeployAutomationSimpleLogTriggerConsumer() (KeeperConsumer, error) + DeployAutomationStreamsLookupUpkeepConsumer(testRange *big.Int, interval *big.Int, useArbBlock bool, staging bool, verify bool) (KeeperConsumer, error) + DeployAutomationLogTriggeredStreamsLookupUpkeepConsumer() (KeeperConsumer, error) + DeployKeeperConsumerPerformance( + testBlockRange, + averageCadence, + checkGasToBurn, + performGasToBurn *big.Int, + ) (KeeperConsumerPerformance, error) + DeployKeeperConsumerBenchmark() (AutomationConsumerBenchmark, error) + LoadKeeperConsumerBenchmark(address common.Address) (AutomationConsumerBenchmark, error) + DeployKeeperPerformDataChecker(expectedData []byte) (KeeperPerformDataChecker, error) + DeployUpkeepCounter(testRange *big.Int, interval *big.Int) (UpkeepCounter, error) + DeployUpkeepPerformCounterRestrictive(testRange *big.Int, averageEligibilityCadence *big.Int) (UpkeepPerformCounterRestrictive, error) + DeployVRFConsumer(linkAddr string, coordinatorAddr string) (VRFConsumer, error) + DeployVRFOwner(coordinatorAddr string) (VRFOwner, error) + DeployVRFCoordinatorTestV2(linkAddr string, bhsAddr string, linkEthFeedAddr string) (*EthereumVRFCoordinatorTestV2, error) + DeployVRFConsumerV2(linkAddr string, coordinatorAddr string) (VRFConsumerV2, error) + DeployVRFv2Consumer(coordinatorAddr string) (VRFv2Consumer, error) + DeployVRFv2LoadTestConsumer(coordinatorAddr string) (VRFv2LoadTestConsumer, error) + DeployVRFV2WrapperLoadTestConsumer(linkAddr string, vrfV2WrapperAddr string) (VRFv2WrapperLoadTestConsumer, error) + DeployVRFv2PlusLoadTestConsumer(coordinatorAddr string) (VRFv2PlusLoadTestConsumer, error) + DeployVRFV2PlusWrapperLoadTestConsumer(linkAddr string, vrfV2PlusWrapperAddr string) (VRFv2PlusWrapperLoadTestConsumer, error) + DeployVRFCoordinator(linkAddr string, bhsAddr string) (VRFCoordinator, error) + DeployVRFCoordinatorV2(linkAddr string, bhsAddr string, linkEthFeedAddr string) (VRFCoordinatorV2, error) + DeployVRFCoordinatorV2_5(bhsAddr string) (VRFCoordinatorV2_5, error) + DeployVRFCoordinatorV2PlusUpgradedVersion(bhsAddr string) (VRFCoordinatorV2PlusUpgradedVersion, error) + DeployVRFV2Wrapper(linkAddr string, linkEthFeedAddr string, coordinatorAddr string) (VRFV2Wrapper, error) + DeployVRFV2PlusWrapper(linkAddr string, linkEthFeedAddr string, coordinatorAddr string) (VRFV2PlusWrapper, error) + DeployDKG() (DKG, error) + DeployOCR2VRFCoordinator(beaconPeriodBlocksCount *big.Int, linkAddr string) (VRFCoordinatorV3, error) + DeployVRFBeacon(vrfCoordinatorAddress string, linkAddress string, dkgAddress string, keyId string) (VRFBeacon, error) + DeployVRFBeaconConsumer(vrfCoordinatorAddress string, beaconPeriodBlockCount *big.Int) (VRFBeaconConsumer, error) + DeployBlockhashStore() (BlockHashStore, error) + DeployOperatorFactory(linkAddr string) (OperatorFactory, error) + DeployStaking(params eth_contracts.StakingPoolConstructorParams) (Staking, error) + DeployBatchBlockhashStore(blockhashStoreAddr string) (BatchBlockhashStore, error) + DeployFunctionsLoadTestClient(router string) (FunctionsLoadTestClient, error) + DeployFunctionsOracleEventsMock() (FunctionsOracleEventsMock, error) + DeployFunctionsBillingRegistryEventsMock() (FunctionsBillingRegistryEventsMock, error) + DeployStakingEventsMock() (StakingEventsMock, error) + DeployFunctionsV1EventsMock() (FunctionsV1EventsMock, error) + DeployOffchainAggregatorEventsMock() (OffchainAggregatorEventsMock, error) + DeployMockAggregatorProxy(aggregatorAddr string) (MockAggregatorProxy, error) + DeployOffchainAggregatorV2(linkAddr string, offchainOptions OffchainOptions) (OffchainAggregatorV2, error) + LoadOffChainAggregatorV2(address *common.Address) (OffchainAggregatorV2, error) + DeployKeeperRegistryCheckUpkeepGasUsageWrapper(keeperRegistryAddr string) (KeeperRegistryCheckUpkeepGasUsageWrapper, error) + DeployKeeperRegistry11Mock() (KeeperRegistry11Mock, error) + DeployKeeperRegistrar12Mock() (KeeperRegistrar12Mock, error) + DeployKeeperGasWrapperMock() (KeeperGasWrapperMock, error) + DeployMercuryVerifierContract(verifierProxyAddr common.Address) (MercuryVerifier, error) + DeployMercuryVerifierProxyContract(accessControllerAddr common.Address) (MercuryVerifierProxy, error) + DeployMercuryFeeManager(linkAddress common.Address, nativeAddress common.Address, proxyAddress common.Address, rewardManagerAddress common.Address) (MercuryFeeManager, error) + DeployMercuryRewardManager(linkAddress common.Address) (MercuryRewardManager, error) + DeployLogEmitterContract() (LogEmitter, error) + DeployMultiCallContract() (common.Address, error) +} + +// NewContractDeployer returns an instance of a contract deployer based on the client type +func NewContractDeployer(bcClient blockchain.EVMClient, logger zerolog.Logger) (ContractDeployer, error) { + switch clientImpl := bcClient.Get().(type) { + case *blockchain.EthereumClient: + return NewEthereumContractDeployer(clientImpl, logger), nil + case *blockchain.KlaytnClient: + return &KlaytnContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.MetisClient: + return &MetisContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.ArbitrumClient: + return &MetisContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.OptimismClient: + return &OptimismContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.RSKClient: + return &RSKContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.PolygonClient: + return &PolygonContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.CeloClient: + return &CeloContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.QuorumClient: + return &QuorumContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.BSCClient: + return &BSCContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.ScrollClient: + return &ScrollContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.PolygonZkEvmClient: + return &PolygonZkEvmContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.LineaClient: + return &LineaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.FantomClient: + return &FantomContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.KromaClient: + return &KromaContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + case *blockchain.WeMixClient: + return &WeMixContractDeployer{NewEthereumContractDeployer(clientImpl, logger)}, nil + } + return nil, errors.New("unknown blockchain client implementation for contract deployer, register blockchain client in NewContractDeployer") +} + +// EthereumContractDeployer provides the implementations for deploying ETH (EVM) based contracts +type EthereumContractDeployer struct { + client blockchain.EVMClient + l zerolog.Logger +} + +// KlaytnContractDeployer wraps ethereum contract deployments for Klaytn +type KlaytnContractDeployer struct { + *EthereumContractDeployer +} + +// MetisContractDeployer wraps ethereum contract deployments for Metis +type MetisContractDeployer struct { + *EthereumContractDeployer +} + +// ArbitrumContractDeployer wraps for Arbitrum +type ArbitrumContractDeployer struct { + *EthereumContractDeployer +} + +// OptimismContractDeployer wraps for Optimism +type OptimismContractDeployer struct { + *EthereumContractDeployer +} + +// RSKContractDeployer wraps for RSK +type RSKContractDeployer struct { + *EthereumContractDeployer +} + +type PolygonContractDeployer struct { + *EthereumContractDeployer +} + +type CeloContractDeployer struct { + *EthereumContractDeployer +} + +type QuorumContractDeployer struct { + *EthereumContractDeployer +} + +type BSCContractDeployer struct { + *EthereumContractDeployer +} + +type ScrollContractDeployer struct { + *EthereumContractDeployer +} + +type PolygonZkEvmContractDeployer struct { + *EthereumContractDeployer +} + +type LineaContractDeployer struct { + *EthereumContractDeployer +} + +type FantomContractDeployer struct { + *EthereumContractDeployer +} + +type KromaContractDeployer struct { + *EthereumContractDeployer +} + +type WeMixContractDeployer struct { + *EthereumContractDeployer +} + +// NewEthereumContractDeployer returns an instantiated instance of the ETH contract deployer +func NewEthereumContractDeployer(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractDeployer { + return &EthereumContractDeployer{ + client: ethClient, + l: logger, + } +} + +// DefaultFluxAggregatorOptions produces some basic defaults for a flux aggregator contract +func DefaultFluxAggregatorOptions() FluxAggregatorOptions { + return FluxAggregatorOptions{ + PaymentAmount: big.NewInt(1), + Timeout: uint32(30), + MinSubValue: big.NewInt(0), + MaxSubValue: big.NewInt(1000000000000), + Decimals: uint8(0), + Description: "Test Flux Aggregator", + } +} + +// DeployFlags deploys flags contract +func (e *EthereumContractDeployer) DeployFlags( + rac string, +) (Flags, error) { + address, _, instance, err := e.client.DeployContract("Flags", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + racAddr := common.HexToAddress(rac) + return flags_wrapper.DeployFlags(auth, backend, racAddr) + }) + if err != nil { + return nil, err + } + return &EthereumFlags{ + client: e.client, + flags: instance.(*flags_wrapper.Flags), + address: address, + }, nil +} + +// DeployFluxAggregatorContract deploys the Flux Aggregator Contract on an EVM chain +func (e *EthereumContractDeployer) DeployFluxAggregatorContract( + linkAddr string, + fluxOptions FluxAggregatorOptions, +) (FluxAggregator, error) { + address, _, instance, err := e.client.DeployContract("Flux Aggregator", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + la := common.HexToAddress(linkAddr) + return flux_aggregator_wrapper.DeployFluxAggregator(auth, + backend, + la, + fluxOptions.PaymentAmount, + fluxOptions.Timeout, + fluxOptions.Validator, + fluxOptions.MinSubValue, + fluxOptions.MaxSubValue, + fluxOptions.Decimals, + fluxOptions.Description) + }) + if err != nil { + return nil, err + } + return &EthereumFluxAggregator{ + client: e.client, + fluxAggregator: instance.(*flux_aggregator_wrapper.FluxAggregator), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployStaking(params eth_contracts.StakingPoolConstructorParams) (Staking, error) { + stakingAddress, _, instance, err := e.client.DeployContract("Staking", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return eth_contracts.DeployStaking(auth, backend, params) + }) + if err != nil { + return nil, err + } + return &EthereumStaking{ + client: e.client, + staking: instance.(*eth_contracts.Staking), + address: stakingAddress, + }, nil +} + +func (e *EthereumContractDeployer) DeployFunctionsLoadTestClient(router string) (FunctionsLoadTestClient, error) { + address, _, instance, err := e.client.DeployContract("FunctionsLoadTestClient", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return functions_load_test_client.DeployFunctionsLoadTestClient(auth, backend, common.HexToAddress(router)) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsLoadTestClient{ + client: e.client, + instance: instance.(*functions_load_test_client.FunctionsLoadTestClient), + address: *address, + }, nil +} + +func (e *EthereumContractDeployer) DeployFunctionsOracleEventsMock() (FunctionsOracleEventsMock, error) { + address, _, instance, err := e.client.DeployContract("FunctionsOracleEventsMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return functions_oracle_events_mock.DeployFunctionsOracleEventsMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsOracleEventsMock{ + client: e.client, + eventsMock: instance.(*functions_oracle_events_mock.FunctionsOracleEventsMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployFunctionsBillingRegistryEventsMock() (FunctionsBillingRegistryEventsMock, error) { + address, _, instance, err := e.client.DeployContract("FunctionsBillingRegistryEventsMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return functions_billing_registry_events_mock.DeployFunctionsBillingRegistryEventsMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsBillingRegistryEventsMock{ + client: e.client, + eventsMock: instance.(*functions_billing_registry_events_mock.FunctionsBillingRegistryEventsMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployStakingEventsMock() (StakingEventsMock, error) { + address, _, instance, err := e.client.DeployContract("StakingEventsMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return eth_contracts.DeployStakingEventsMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumStakingEventsMock{ + client: e.client, + eventsMock: instance.(*eth_contracts.StakingEventsMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployFunctionsV1EventsMock() (FunctionsV1EventsMock, error) { + address, _, instance, err := e.client.DeployContract("FunctionsV1EventsMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return functions_v1_events_mock.DeployFunctionsV1EventsMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsV1EventsMock{ + client: e.client, + eventsMock: instance.(*functions_v1_events_mock.FunctionsV1EventsMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployKeeperRegistry11Mock() (KeeperRegistry11Mock, error) { + address, _, instance, err := e.client.DeployContract("KeeperRegistry11Mock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_wrapper1_1_mock.DeployKeeperRegistryMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry11Mock{ + client: e.client, + registryMock: instance.(*keeper_registry_wrapper1_1_mock.KeeperRegistryMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployKeeperRegistrar12Mock() (KeeperRegistrar12Mock, error) { + address, _, instance, err := e.client.DeployContract("KeeperRegistrar12Mock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registrar_wrapper1_2_mock.DeployKeeperRegistrarMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistrar12Mock{ + client: e.client, + registrarMock: instance.(*keeper_registrar_wrapper1_2_mock.KeeperRegistrarMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployKeeperGasWrapperMock() (KeeperGasWrapperMock, error) { + address, _, instance, err := e.client.DeployContract("KeeperGasWrapperMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return gas_wrapper_mock.DeployKeeperRegistryCheckUpkeepGasUsageWrapperMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperGasWrapperMock{ + client: e.client, + gasWrapperMock: instance.(*gas_wrapper_mock.KeeperRegistryCheckUpkeepGasUsageWrapperMock), + address: address, + }, nil +} + +func (e *EthereumContractDeployer) DeployOffchainAggregatorEventsMock() (OffchainAggregatorEventsMock, error) { + address, _, instance, err := e.client.DeployContract("OffchainAggregatorEventsMock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return eth_contracts.DeployOffchainAggregatorEventsMock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumOffchainAggregatorEventsMock{ + client: e.client, + eventsMock: instance.(*eth_contracts.OffchainAggregatorEventsMock), + address: address, + }, nil +} + +// DeployLinkTokenContract deploys a Link Token contract to an EVM chain +func (e *EthereumContractDeployer) DeployLinkTokenContract() (LinkToken, error) { + linkTokenAddress, _, instance, err := e.client.DeployContract("PLI Token", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return link_token_interface.DeployLinkToken(auth, backend) + }) + if err != nil { + return nil, err + } + + return &EthereumLinkToken{ + client: e.client, + instance: instance.(*link_token_interface.LinkToken), + address: *linkTokenAddress, + l: e.l, + }, err +} + +// LoadLinkToken returns deployed on given address EthereumLinkToken +func (e *EthereumContractDeployer) LoadLinkToken(address common.Address) (LinkToken, error) { + instance, err := e.client.LoadContract("LinkToken", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return link_token_interface.NewLinkToken(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumLinkToken{ + address: address, + client: e.client, + instance: instance.(*link_token_interface.LinkToken), + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) NewLinkTokenContract(address common.Address) (LinkToken, error) { + return e.LoadLinkToken(address) +} + +// DefaultOffChainAggregatorOptions returns some base defaults for deploying an OCR contract +func DefaultOffChainAggregatorOptions() OffchainOptions { + return OffchainOptions{ + MaximumGasPrice: uint32(3000), + ReasonableGasPrice: uint32(10), + MicroLinkPerEth: uint32(500), + LinkGweiPerObservation: uint32(500), + LinkGweiPerTransmission: uint32(500), + MinimumAnswer: big.NewInt(1), + MaximumAnswer: big.NewInt(50000000000000000), + Decimals: 8, + Description: "Test OCR", + } +} + +// DefaultOffChainAggregatorConfig returns some base defaults for configuring an OCR contract +func DefaultOffChainAggregatorConfig(numberNodes int) OffChainAggregatorConfig { + if numberNodes <= 4 { + log.Err(fmt.Errorf("insufficient number of nodes (%d) supplied for OCR, need at least 5", numberNodes)). + Int("Number Plugin Nodes", numberNodes). + Msg("You likely need more plugin nodes to properly configure OCR, try 5 or more.") + } + s := []int{1} + // First node's stage already inputted as a 1 in line above, so numberNodes-1. + for i := 0; i < numberNodes-1; i++ { + s = append(s, 2) + } + return OffChainAggregatorConfig{ + AlphaPPB: 1, + DeltaC: time.Minute * 60, + DeltaGrace: time.Second * 12, + DeltaProgress: time.Second * 35, + DeltaStage: time.Second * 60, + DeltaResend: time.Second * 17, + DeltaRound: time.Second * 30, + RMax: 6, + S: s, + N: numberNodes, + F: 1, + OracleIdentities: []ocrConfigHelper.OracleIdentityExtra{}, + } +} + +// DeployOffChainAggregator deploys the offchain aggregation contract to the EVM chain +func (e *EthereumContractDeployer) DeployOffChainAggregator( + linkAddr string, + offchainOptions OffchainOptions, +) (OffchainAggregator, error) { + address, _, instance, err := e.client.DeployContract("OffChain Aggregator", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + la := common.HexToAddress(linkAddr) + return offchainaggregator.DeployOffchainAggregator(auth, + backend, + offchainOptions.MaximumGasPrice, + offchainOptions.ReasonableGasPrice, + offchainOptions.MicroLinkPerEth, + offchainOptions.LinkGweiPerObservation, + offchainOptions.LinkGweiPerTransmission, + la, + offchainOptions.MinimumAnswer, + offchainOptions.MaximumAnswer, + offchainOptions.BillingAccessController, + offchainOptions.RequesterAccessController, + offchainOptions.Decimals, + offchainOptions.Description) + }) + if err != nil { + return nil, err + } + return &EthereumOffchainAggregator{ + client: e.client, + ocr: instance.(*offchainaggregator.OffchainAggregator), + address: address, + l: e.l, + }, err +} + +// LoadOffChainAggregator loads an already deployed offchain aggregator contract +func (e *EthereumContractDeployer) LoadOffChainAggregator(address *common.Address) (OffchainAggregator, error) { + instance, err := e.client.LoadContract("OffChainAggregator", *address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return offchainaggregator.NewOffchainAggregator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumOffchainAggregator{ + address: address, + client: e.client, + ocr: instance.(*offchainaggregator.OffchainAggregator), + l: e.l, + }, err +} + +// DeployAPIConsumer deploys api consumer for oracle +func (e *EthereumContractDeployer) DeployAPIConsumer(linkAddr string) (APIConsumer, error) { + addr, _, instance, err := e.client.DeployContract("TestAPIConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return test_api_consumer_wrapper.DeployTestAPIConsumer(auth, backend, common.HexToAddress(linkAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumAPIConsumer{ + address: addr, + client: e.client, + consumer: instance.(*test_api_consumer_wrapper.TestAPIConsumer), + }, err +} + +// DeployOracle deploys oracle for consumer test +func (e *EthereumContractDeployer) DeployOracle(linkAddr string) (Oracle, error) { + addr, _, instance, err := e.client.DeployContract("Oracle", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return oracle_wrapper.DeployOracle(auth, backend, common.HexToAddress(linkAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumOracle{ + address: addr, + client: e.client, + oracle: instance.(*oracle_wrapper.Oracle), + }, err +} + +func (e *EthereumContractDeployer) DeployMockETHPLIFeed(answer *big.Int) (MockETHPLIFeed, error) { + address, _, instance, err := e.client.DeployContract("MockETHPLIAggregator", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return mock_ethlink_aggregator_wrapper.DeployMockETHPLIAggregator(auth, backend, answer) + }) + if err != nil { + return nil, err + } + return &EthereumMockETHPLIFeed{ + client: e.client, + feed: instance.(*mock_ethlink_aggregator_wrapper.MockETHPLIAggregator), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFMockETHPLIFeed(answer *big.Int) (VRFMockETHPLIFeed, error) { + address, _, instance, err := e.client.DeployContract("VRFMockETHPLIAggregator", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_mock_ethlink_aggregator.DeployVRFMockETHPLIAggregator(auth, backend, answer) + }) + if err != nil { + return nil, err + } + return &EthereumVRFMockETHPLIFeed{ + client: e.client, + feed: instance.(*vrf_mock_ethlink_aggregator.VRFMockETHPLIAggregator), + address: address, + }, err +} + +// LoadETHPLIFeed returns deployed on given address EthereumMockETHPLIFeed +func (e *EthereumContractDeployer) LoadETHPLIFeed(address common.Address) (MockETHPLIFeed, error) { + instance, err := e.client.LoadContract("MockETHPLIFeed", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return mock_ethlink_aggregator_wrapper.NewMockETHPLIAggregator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMockETHPLIFeed{ + address: &address, + client: e.client, + feed: instance.(*mock_ethlink_aggregator_wrapper.MockETHPLIAggregator), + }, err +} + +func (e *EthereumContractDeployer) DeployMockGasFeed(answer *big.Int) (MockGasFeed, error) { + address, _, instance, err := e.client.DeployContract("MockGasFeed", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return mock_gas_aggregator_wrapper.DeployMockGASAggregator(auth, backend, answer) + }) + if err != nil { + return nil, err + } + return &EthereumMockGASFeed{ + client: e.client, + feed: instance.(*mock_gas_aggregator_wrapper.MockGASAggregator), + address: address, + }, err +} + +// LoadGasFeed returns deployed on given address EthereumMockGASFeed +func (e *EthereumContractDeployer) LoadGasFeed(address common.Address) (MockGasFeed, error) { + instance, err := e.client.LoadContract("MockETHPLIFeed", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return mock_gas_aggregator_wrapper.NewMockGASAggregator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMockGASFeed{ + address: &address, + client: e.client, + feed: instance.(*mock_gas_aggregator_wrapper.MockGASAggregator), + }, err +} + +func (e *EthereumContractDeployer) DeployUpkeepTranscoder() (UpkeepTranscoder, error) { + address, _, instance, err := e.client.DeployContract("UpkeepTranscoder", func( + opts *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return upkeep_transcoder.DeployUpkeepTranscoder(opts, backend) + }) + + if err != nil { + return nil, err + } + + return &EthereumUpkeepTranscoder{ + client: e.client, + transcoder: instance.(*upkeep_transcoder.UpkeepTranscoder), + address: address, + }, err +} + +func (e *EthereumContractDeployer) LoadUpkeepTranscoder(address common.Address) (UpkeepTranscoder, error) { + instance, err := e.client.LoadContract("UpkeepTranscoder", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return upkeep_transcoder.NewUpkeepTranscoder(address, backend) + }) + + if err != nil { + return nil, err + } + + return &EthereumUpkeepTranscoder{ + client: e.client, + transcoder: instance.(*upkeep_transcoder.UpkeepTranscoder), + address: &address, + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperRegistrar(registryVersion eth_contracts.KeeperRegistryVersion, linkAddr string, + registrarSettings KeeperRegistrarSettings) (KeeperRegistrar, error) { + + if registryVersion == eth_contracts.RegistryVersion_2_0 { + // deploy registrar 2.0 + address, _, instance, err := e.client.DeployContract("KeeperRegistrar", func( + opts *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registrar_wrapper2_0.DeployKeeperRegistrar(opts, backend, common.HexToAddress(linkAddr), registrarSettings.AutoApproveConfigType, + registrarSettings.AutoApproveMaxAllowed, common.HexToAddress(registrarSettings.RegistryAddr), registrarSettings.MinLinkJuels) + }) + + if err != nil { + return nil, err + } + + return &EthereumKeeperRegistrar{ + client: e.client, + registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar), + address: address, + }, err + } else if registryVersion == eth_contracts.RegistryVersion_2_1 { + // deploy registrar 2.1 + address, _, instance, err := e.client.DeployContract("AutomationRegistrar", func( + opts *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + // set default TriggerType to 0(conditional), AutoApproveConfigType to 2(auto approve enabled), AutoApproveMaxAllowed to 1000 + triggerConfigs := []registrar21.AutomationRegistrar21InitialTriggerConfig{ + {TriggerType: 0, AutoApproveType: registrarSettings.AutoApproveConfigType, + AutoApproveMaxAllowed: uint32(registrarSettings.AutoApproveMaxAllowed)}, + {TriggerType: 1, AutoApproveType: registrarSettings.AutoApproveConfigType, + AutoApproveMaxAllowed: uint32(registrarSettings.AutoApproveMaxAllowed)}, + } + + return registrar21.DeployAutomationRegistrar( + opts, + backend, + common.HexToAddress(linkAddr), + common.HexToAddress(registrarSettings.RegistryAddr), + registrarSettings.MinLinkJuels, + triggerConfigs) + }) + + if err != nil { + return nil, err + } + + return &EthereumKeeperRegistrar{ + client: e.client, + registrar21: instance.(*registrar21.AutomationRegistrar), + address: address, + }, err + } + // non OCR registrar + address, _, instance, err := e.client.DeployContract("KeeperRegistrar", func( + opts *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registrar_wrapper1_2.DeployKeeperRegistrar(opts, backend, common.HexToAddress(linkAddr), registrarSettings.AutoApproveConfigType, + registrarSettings.AutoApproveMaxAllowed, common.HexToAddress(registrarSettings.RegistryAddr), registrarSettings.MinLinkJuels) + }) + + if err != nil { + return nil, err + } + + return &EthereumKeeperRegistrar{ + client: e.client, + registrar: instance.(*keeper_registrar_wrapper1_2.KeeperRegistrar), + address: address, + }, err +} + +// LoadKeeperRegistrar returns deployed on given address EthereumKeeperRegistrar +func (e *EthereumContractDeployer) LoadKeeperRegistrar(address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistrar, error) { + if registryVersion == eth_contracts.RegistryVersion_1_1 || registryVersion == eth_contracts.RegistryVersion_1_2 || + registryVersion == eth_contracts.RegistryVersion_1_3 { + instance, err := e.client.LoadContract("KeeperRegistrar", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registrar_wrapper1_2.NewKeeperRegistrar(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistrar{ + address: &address, + client: e.client, + registrar: instance.(*keeper_registrar_wrapper1_2.KeeperRegistrar), + }, err + } else if registryVersion == eth_contracts.RegistryVersion_2_0 { + instance, err := e.client.LoadContract("KeeperRegistrar", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registrar_wrapper2_0.NewKeeperRegistrar(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistrar{ + address: &address, + client: e.client, + registrar20: instance.(*keeper_registrar_wrapper2_0.KeeperRegistrar), + }, err + } + instance, err := e.client.LoadContract("AutomationRegistrar", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return registrar21.NewAutomationRegistrar(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistrar{ + address: &address, + client: e.client, + registrar21: instance.(*registrar21.AutomationRegistrar), + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperRegistry( + opts *KeeperRegistryOpts, +) (KeeperRegistry, error) { + var mode uint8 + switch e.client.GetChainID().Int64() { + //Arbitrum payment model + //Goerli Arbitrum + case 421613: + mode = uint8(1) + //Sepolia Arbitrum + case 421614: + mode = uint8(1) + //Optimism payment model + //Goerli Optimism + case 420: + mode = uint8(2) + //Goerli Base + case 84531: + mode = uint8(2) + default: + mode = uint8(0) + } + registryGasOverhead := big.NewInt(80000) + switch opts.RegistryVersion { + case eth_contracts.RegistryVersion_1_0, eth_contracts.RegistryVersion_1_1: + address, _, instance, err := e.client.DeployContract("KeeperRegistry1_1", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_wrapper1_1.DeployKeeperRegistry( + auth, + backend, + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + opts.Settings.PaymentPremiumPPB, + opts.Settings.FlatFeeMicroPLI, + opts.Settings.BlockCountPerTurn, + opts.Settings.CheckGasLimit, + opts.Settings.StalenessSeconds, + opts.Settings.GasCeilingMultiplier, + opts.Settings.FallbackGasPrice, + opts.Settings.FallbackLinkPrice, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + client: e.client, + version: eth_contracts.RegistryVersion_1_1, + registry1_1: instance.(*keeper_registry_wrapper1_1.KeeperRegistry), + registry1_2: nil, + registry1_3: nil, + address: address, + }, err + case eth_contracts.RegistryVersion_1_2: + address, _, instance, err := e.client.DeployContract("KeeperRegistry1_2", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_wrapper1_2.DeployKeeperRegistry( + auth, + backend, + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + keeper_registry_wrapper1_2.Config{ + PaymentPremiumPPB: opts.Settings.PaymentPremiumPPB, + FlatFeeMicroLink: opts.Settings.FlatFeeMicroPLI, + BlockCountPerTurn: opts.Settings.BlockCountPerTurn, + CheckGasLimit: opts.Settings.CheckGasLimit, + StalenessSeconds: opts.Settings.StalenessSeconds, + GasCeilingMultiplier: opts.Settings.GasCeilingMultiplier, + MinUpkeepSpend: opts.Settings.MinUpkeepSpend, + MaxPerformGas: opts.Settings.MaxPerformGas, + FallbackGasPrice: opts.Settings.FallbackGasPrice, + FallbackLinkPrice: opts.Settings.FallbackLinkPrice, + Transcoder: common.HexToAddress(opts.TranscoderAddr), + Registrar: common.HexToAddress(opts.RegistrarAddr), + }, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + client: e.client, + version: eth_contracts.RegistryVersion_1_2, + registry1_1: nil, + registry1_2: instance.(*keeper_registry_wrapper1_2.KeeperRegistry), + registry1_3: nil, + address: address, + }, err + case eth_contracts.RegistryVersion_1_3: + logicAddress, _, _, err := e.client.DeployContract("KeeperRegistryLogic1_3", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_logic1_3.DeployKeeperRegistryLogic( + auth, + backend, + mode, // Default payment model + registryGasOverhead, // Registry gas overhead + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + ) + }) + if err != nil { + return nil, err + } + err = e.client.WaitForEvents() + if err != nil { + return nil, err + } + + address, _, instance, err := e.client.DeployContract("KeeperRegistry1_3", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_wrapper1_3.DeployKeeperRegistry( + auth, + backend, + *logicAddress, + keeper_registry_wrapper1_3.Config{ + PaymentPremiumPPB: opts.Settings.PaymentPremiumPPB, + FlatFeeMicroLink: opts.Settings.FlatFeeMicroPLI, + BlockCountPerTurn: opts.Settings.BlockCountPerTurn, + CheckGasLimit: opts.Settings.CheckGasLimit, + StalenessSeconds: opts.Settings.StalenessSeconds, + GasCeilingMultiplier: opts.Settings.GasCeilingMultiplier, + MinUpkeepSpend: opts.Settings.MinUpkeepSpend, + MaxPerformGas: opts.Settings.MaxPerformGas, + FallbackGasPrice: opts.Settings.FallbackGasPrice, + FallbackLinkPrice: opts.Settings.FallbackLinkPrice, + Transcoder: common.HexToAddress(opts.TranscoderAddr), + Registrar: common.HexToAddress(opts.RegistrarAddr), + }, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + client: e.client, + version: eth_contracts.RegistryVersion_1_3, + registry1_1: nil, + registry1_2: nil, + registry1_3: instance.(*keeper_registry_wrapper1_3.KeeperRegistry), + address: address, + }, err + case eth_contracts.RegistryVersion_2_0: + logicAddress, _, _, err := e.client.DeployContract("KeeperRegistryLogic2_0", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_registry_logic2_0.DeployKeeperRegistryLogic( + auth, + backend, + mode, // Default payment model + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + ) + }) + if err != nil { + return nil, err + } + err = e.client.WaitForEvents() + if err != nil { + return nil, err + } + + address, _, instance, err := e.client.DeployContract("KeeperRegistry2_0", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + + return keeper_registry_wrapper2_0.DeployKeeperRegistry( + auth, + backend, + *logicAddress, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + client: e.client, + version: eth_contracts.RegistryVersion_2_0, + registry2_0: instance.(*keeper_registry_wrapper2_0.KeeperRegistry), + address: address, + }, err + + case eth_contracts.RegistryVersion_2_1: + automationForwarderLogicAddr, _, _, err := e.client.DeployContract("automationForwarderLogic", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return automationForwarderLogic.DeployAutomationForwarderLogic(auth, backend) + }) + + if err != nil { + return nil, err + } + + if err := e.client.WaitForEvents(); err != nil { + return nil, err + } + + registryLogicBAddr, _, _, err := e.client.DeployContract("KeeperRegistryLogicB2_1", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + + return registrylogicb21.DeployKeeperRegistryLogicB( + auth, + backend, + mode, + common.HexToAddress(opts.LinkAddr), + common.HexToAddress(opts.ETHFeedAddr), + common.HexToAddress(opts.GasFeedAddr), + *automationForwarderLogicAddr, + ) + }) + if err != nil { + return nil, err + } + + if err := e.client.WaitForEvents(); err != nil { + return nil, err + } + + registryLogicAAddr, _, _, err := e.client.DeployContract("KeeperRegistryLogicA2_1", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + + return registrylogica21.DeployKeeperRegistryLogicA( + auth, + backend, + *registryLogicBAddr, + ) + }) + if err != nil { + return nil, err + } + if err := e.client.WaitForEvents(); err != nil { + return nil, err + } + + address, _, _, err := e.client.DeployContract("KeeperRegistry2_1", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return registry21.DeployKeeperRegistry( + auth, + backend, + *registryLogicAAddr, + ) + }) + if err != nil { + return nil, err + } + if err := e.client.WaitForEvents(); err != nil { + return nil, err + } + + registryMaster, err := iregistry21.NewIKeeperRegistryMaster( + *address, + e.client.Backend(), + ) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + client: e.client, + version: eth_contracts.RegistryVersion_2_1, + registry2_1: registryMaster, + address: address, + }, err + default: + return nil, fmt.Errorf("keeper registry version %d is not supported", opts.RegistryVersion) + } +} + +// LoadKeeperRegistry returns deployed on given address EthereumKeeperRegistry +func (e *EthereumContractDeployer) LoadKeeperRegistry(address common.Address, registryVersion eth_contracts.KeeperRegistryVersion) (KeeperRegistry, error) { + switch registryVersion { + case eth_contracts.RegistryVersion_1_1: + instance, err := e.client.LoadContract("KeeperRegistry", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registry_wrapper1_1.NewKeeperRegistry(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + address: &address, + client: e.client, + registry1_1: instance.(*keeper_registry_wrapper1_1.KeeperRegistry), + version: registryVersion, + }, err + case eth_contracts.RegistryVersion_1_2: + instance, err := e.client.LoadContract("KeeperRegistry", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registry_wrapper1_2.NewKeeperRegistry(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + address: &address, + client: e.client, + registry1_2: instance.(*keeper_registry_wrapper1_2.KeeperRegistry), + version: registryVersion, + }, err + case eth_contracts.RegistryVersion_1_3: + instance, err := e.client.LoadContract("KeeperRegistry", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registry_wrapper1_3.NewKeeperRegistry(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + address: &address, + client: e.client, + registry1_3: instance.(*keeper_registry_wrapper1_3.KeeperRegistry), + version: registryVersion, + }, err + case eth_contracts.RegistryVersion_2_0: + instance, err := e.client.LoadContract("KeeperRegistry", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return keeper_registry_wrapper2_0.NewKeeperRegistry(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + address: &address, + client: e.client, + registry2_0: instance.(*keeper_registry_wrapper2_0.KeeperRegistry), + version: registryVersion, + }, err + case eth_contracts.RegistryVersion_2_1: + instance, err := e.client.LoadContract("KeeperRegistry", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return iregistry21.NewIKeeperRegistryMaster(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistry{ + address: &address, + client: e.client, + registry2_1: instance.(*iregistry21.IKeeperRegistryMaster), + version: registryVersion, + }, err + default: + return nil, fmt.Errorf("keeper registry version %d is not supported", registryVersion) + } +} + +func (e *EthereumContractDeployer) DeployKeeperConsumer(updateInterval *big.Int) (KeeperConsumer, error) { + address, _, instance, err := e.client.DeployContract("KeeperConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_consumer_wrapper.DeployKeeperConsumer(auth, backend, updateInterval) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperConsumer{ + client: e.client, + consumer: instance.(*keeper_consumer_wrapper.KeeperConsumer), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployAutomationLogTriggerConsumer(testInterval *big.Int) (KeeperConsumer, error) { + address, _, instance, err := e.client.DeployContract("LogUpkeepCounter", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return log_upkeep_counter_wrapper.DeployLogUpkeepCounter( + auth, backend, testInterval, + ) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationLogCounterConsumer{ + client: e.client, + consumer: instance.(*log_upkeep_counter_wrapper.LogUpkeepCounter), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployAutomationSimpleLogTriggerConsumer() (KeeperConsumer, error) { + address, _, instance, err := e.client.DeployContract("SimpleLogUpkeepCounter", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return simple_log_upkeep_counter_wrapper.DeploySimpleLogUpkeepCounter( + auth, backend, + ) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationSimpleLogCounterConsumer{ + client: e.client, + consumer: instance.(*simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounter), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployAutomationStreamsLookupUpkeepConsumer(testRange *big.Int, interval *big.Int, useArbBlock bool, staging bool, verify bool) (KeeperConsumer, error) { + address, _, instance, err := e.client.DeployContract("StreamsLookupUpkeep", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return streams_lookup_upkeep_wrapper.DeployStreamsLookupUpkeep( + auth, backend, testRange, interval, useArbBlock, staging, verify, + ) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationStreamsLookupUpkeepConsumer{ + client: e.client, + consumer: instance.(*streams_lookup_upkeep_wrapper.StreamsLookupUpkeep), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployAutomationLogTriggeredStreamsLookupUpkeepConsumer() (KeeperConsumer, error) { + address, _, instance, err := e.client.DeployContract("LogTriggeredStreamsLookupUpkeep", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return log_triggered_streams_lookup_wrapper.DeployLogTriggeredStreamsLookup( + auth, backend, false, false, + ) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer{ + client: e.client, + consumer: instance.(*log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployUpkeepCounter(testRange *big.Int, interval *big.Int) (UpkeepCounter, error) { + address, _, instance, err := e.client.DeployContract("UpkeepCounter", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return upkeep_counter_wrapper.DeployUpkeepCounter(auth, backend, testRange, interval) + }) + if err != nil { + return nil, err + } + return &EthereumUpkeepCounter{ + client: e.client, + consumer: instance.(*upkeep_counter_wrapper.UpkeepCounter), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployUpkeepPerformCounterRestrictive(testRange *big.Int, averageEligibilityCadence *big.Int) (UpkeepPerformCounterRestrictive, error) { + address, _, instance, err := e.client.DeployContract("UpkeepPerformCounterRestrictive", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return upkeep_perform_counter_restrictive_wrapper.DeployUpkeepPerformCounterRestrictive(auth, backend, testRange, averageEligibilityCadence) + }) + if err != nil { + return nil, err + } + return &EthereumUpkeepPerformCounterRestrictive{ + client: e.client, + consumer: instance.(*upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictive), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperConsumerPerformance( + testBlockRange, + averageCadence, + checkGasToBurn, + performGasToBurn *big.Int, +) (KeeperConsumerPerformance, error) { + address, _, instance, err := e.client.DeployContract("KeeperConsumerPerformance", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return keeper_consumer_performance_wrapper.DeployKeeperConsumerPerformance( + auth, + backend, + testBlockRange, + averageCadence, + checkGasToBurn, + performGasToBurn, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperConsumerPerformance{ + client: e.client, + consumer: instance.(*keeper_consumer_performance_wrapper.KeeperConsumerPerformance), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperConsumerBenchmark() (AutomationConsumerBenchmark, error) { + address, _, instance, err := e.client.DeployContract("AutomationConsumerBenchmark", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return automation_consumer_benchmark.DeployAutomationConsumerBenchmark( + auth, + backend, + ) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationConsumerBenchmark{ + client: e.client, + consumer: instance.(*automation_consumer_benchmark.AutomationConsumerBenchmark), + address: address, + }, err +} + +// LoadKeeperConsumerBenchmark returns deployed on given address EthereumAutomationConsumerBenchmark +func (e *EthereumContractDeployer) LoadKeeperConsumerBenchmark(address common.Address) (AutomationConsumerBenchmark, error) { + instance, err := e.client.LoadContract("AutomationConsumerBenchmark", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return automation_consumer_benchmark.NewAutomationConsumerBenchmark(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumAutomationConsumerBenchmark{ + address: &address, + client: e.client, + consumer: instance.(*automation_consumer_benchmark.AutomationConsumerBenchmark), + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperPerformDataChecker(expectedData []byte) (KeeperPerformDataChecker, error) { + address, _, instance, err := e.client.DeployContract("PerformDataChecker", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return perform_data_checker_wrapper.DeployPerformDataChecker( + auth, + backend, + expectedData, + ) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperPerformDataCheckerConsumer{ + client: e.client, + performDataChecker: instance.(*perform_data_checker_wrapper.PerformDataChecker), + address: address, + }, err +} + +// DeployOperatorFactory deploys operator factory contract +func (e *EthereumContractDeployer) DeployOperatorFactory(linkAddr string) (OperatorFactory, error) { + addr, _, instance, err := e.client.DeployContract("OperatorFactory", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return operator_factory.DeployOperatorFactory(auth, backend, common.HexToAddress(linkAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumOperatorFactory{ + address: addr, + client: e.client, + operatorFactory: instance.(*operator_factory.OperatorFactory), + }, err +} + +// DeployMockAggregatorProxy deploys a mock aggregator proxy contract +func (e *EthereumContractDeployer) DeployMockAggregatorProxy(aggregatorAddr string) (MockAggregatorProxy, error) { + addr, _, instance, err := e.client.DeployContract("MockAggregatorProxy", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return mock_aggregator_proxy.DeployMockAggregatorProxy(auth, backend, common.HexToAddress(aggregatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumMockAggregatorProxy{ + address: addr, + client: e.client, + mockAggregatorProxy: instance.(*mock_aggregator_proxy.MockAggregatorProxy), + }, err +} + +func (e *EthereumContractDeployer) DeployKeeperRegistryCheckUpkeepGasUsageWrapper(keeperRegistryAddr string) (KeeperRegistryCheckUpkeepGasUsageWrapper, error) { + addr, _, instance, err := e.client.DeployContract("KeeperRegistryCheckUpkeepGasUsageWrapper", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return gas_wrapper.DeployKeeperRegistryCheckUpkeepGasUsageWrapper(auth, backend, common.HexToAddress(keeperRegistryAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumKeeperRegistryCheckUpkeepGasUsageWrapper{ + address: addr, + client: e.client, + gasUsageWrapper: instance.(*gas_wrapper.KeeperRegistryCheckUpkeepGasUsageWrapper), + }, err +} + +// DeployOffChainAggregator deploys the offchain aggregation contract to the EVM chain +func (e *EthereumContractDeployer) DeployOffchainAggregatorV2( + linkAddr string, + offchainOptions OffchainOptions, +) (OffchainAggregatorV2, error) { + address, _, instance, err := e.client.DeployContract("OffChain Aggregator v2", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + la := common.HexToAddress(linkAddr) + return ocr2aggregator.DeployOCR2Aggregator( + auth, + backend, + la, + offchainOptions.MinimumAnswer, + offchainOptions.MaximumAnswer, + offchainOptions.BillingAccessController, + offchainOptions.RequesterAccessController, + offchainOptions.Decimals, + offchainOptions.Description, + ) + }) + if err != nil { + return nil, err + } + return &EthereumOffchainAggregatorV2{ + client: e.client, + contract: instance.(*ocr2aggregator.OCR2Aggregator), + address: address, + l: e.l, + }, err +} + +// LoadOffChainAggregatorV2 loads an already deployed offchain aggregator v2 contract +func (e *EthereumContractDeployer) LoadOffChainAggregatorV2(address *common.Address) (OffchainAggregatorV2, error) { + instance, err := e.client.LoadContract("OffChainAggregatorV2", *address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return ocr2aggregator.NewOCR2Aggregator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumOffchainAggregatorV2{ + client: e.client, + contract: instance.(*ocr2aggregator.OCR2Aggregator), + address: address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployMercuryVerifierContract(verifierProxyAddr common.Address) (MercuryVerifier, error) { + address, _, instance, err := e.client.DeployContract("Mercury Verifier", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return verifier.DeployVerifier(auth, backend, verifierProxyAddr) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryVerifier{ + client: e.client, + instance: instance.(*verifier.Verifier), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployMercuryVerifierProxyContract(accessControllerAddr common.Address) (MercuryVerifierProxy, error) { + address, _, instance, err := e.client.DeployContract("Mercury Verifier Proxy", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return verifier_proxy.DeployVerifierProxy(auth, backend, accessControllerAddr) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryVerifierProxy{ + client: e.client, + instance: instance.(*verifier_proxy.VerifierProxy), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployMercuryFeeManager(linkAddress common.Address, nativeAddress common.Address, proxyAddress common.Address, rewardManagerAddress common.Address) (MercuryFeeManager, error) { + address, _, instance, err := e.client.DeployContract("Mercury Fee Manager", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return fee_manager.DeployFeeManager(auth, backend, linkAddress, nativeAddress, proxyAddress, rewardManagerAddress) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryFeeManager{ + client: e.client, + instance: instance.(*fee_manager.FeeManager), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployMercuryRewardManager(linkAddress common.Address) (MercuryRewardManager, error) { + address, _, instance, err := e.client.DeployContract("Mercury Reward Manager", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return reward_manager.DeployRewardManager(auth, backend, linkAddress) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryRewardManager{ + client: e.client, + instance: instance.(*reward_manager.RewardManager), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployWERC20Mock() (WERC20Mock, error) { + address, _, instance, err := e.client.DeployContract("WERC20 Mock", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return werc20_mock.DeployWERC20Mock(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumWERC20Mock{ + client: e.client, + instance: instance.(*werc20_mock.WERC20Mock), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployLogEmitterContract() (LogEmitter, error) { + address, _, instance, err := e.client.DeployContract("Log Emitter", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return le.DeployLogEmitter(auth, backend) + }) + if err != nil { + return nil, err + } + return &LogEmitterContract{ + client: e.client, + instance: instance.(*le.LogEmitter), + address: *address, + l: e.l, + }, err +} + +func (e *EthereumContractDeployer) DeployMultiCallContract() (common.Address, error) { + multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return common.Address{}, err + } + address, tx, _, err := e.client.DeployContract("MultiCall Contract", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + address, tx, contract, err := bind.DeployContract(auth, multiCallABI, common.FromHex(MultiCallBIN), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, contract, err + }) + if err != nil { + return common.Address{}, err + } + r, err := bind.WaitMined(context.Background(), e.client.DeployBackend(), tx) + if err != nil { + return common.Address{}, err + } + if r.Status != types.ReceiptStatusSuccessful { + return common.Address{}, fmt.Errorf("deploy multicall failed") + } + return *address, nil + +} diff --git a/integration-tests/contracts/contract_loader.go b/integration-tests/contracts/contract_loader.go new file mode 100644 index 00000000..bdc39ac9 --- /dev/null +++ b/integration-tests/contracts/contract_loader.go @@ -0,0 +1,433 @@ +package contracts + +import ( + "errors" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_load_test_client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/fee_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/reward_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/shared/generated/werc20_mock" +) + +// ContractLoader is an interface for abstracting the contract loading methods across network implementations +type ContractLoader interface { + LoadPLIToken(address string) (LinkToken, error) + LoadOperatorContract(address common.Address) (Operator, error) + LoadAuthorizedForwarder(address common.Address) (AuthorizedForwarder, error) + + /* functions 1_0_0 */ + LoadFunctionsCoordinator(addr string) (FunctionsCoordinator, error) + LoadFunctionsRouter(addr string) (FunctionsRouter, error) + LoadFunctionsLoadTestClient(addr string) (FunctionsLoadTestClient, error) + + // Mercury + LoadMercuryVerifier(addr common.Address) (MercuryVerifier, error) + LoadMercuryVerifierProxy(addr common.Address) (MercuryVerifierProxy, error) + LoadMercuryFeeManager(addr common.Address) (MercuryFeeManager, error) + LoadMercuryRewardManager(addr common.Address) (MercuryRewardManager, error) + + LoadWERC20Mock(addr common.Address) (WERC20Mock, error) + + // VRF + LoadVRFCoordinatorV2(addr string) (VRFCoordinatorV2, error) + LoadVRFv2LoadTestConsumer(addr string) (VRFv2LoadTestConsumer, error) + LoadVRFCoordinatorV2_5(addr string) (VRFCoordinatorV2_5, error) + LoadVRFv2PlusLoadTestConsumer(addr string) (VRFv2PlusLoadTestConsumer, error) +} + +// NewContractLoader returns an instance of a contract Loader based on the client type +func NewContractLoader(bcClient blockchain.EVMClient, logger zerolog.Logger) (ContractLoader, error) { + switch clientImpl := bcClient.Get().(type) { + case *blockchain.EthereumClient: + return NewEthereumContractLoader(clientImpl, logger), nil + case *blockchain.KlaytnClient: + return &KlaytnContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.MetisClient: + return &MetisContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.ArbitrumClient: + return &ArbitrumContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.PolygonClient: + return &PolygonContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.OptimismClient: + return &OptimismContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.PolygonZkEvmClient: + return &PolygonZkEvmContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.WeMixClient: + return &WeMixContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.LineaClient: + return &LineaContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.CeloClient: + return &CeloContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.ScrollClient: + return &ScrollContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.FantomClient: + return &FantomContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + case *blockchain.BSCClient: + return &BSCContractLoader{NewEthereumContractLoader(clientImpl, logger)}, nil + } + return nil, errors.New("unknown blockchain client implementation for contract Loader, register blockchain client in NewContractLoader") +} + +// EthereumContractLoader provides the implementations for deploying ETH (EVM) based contracts +type EthereumContractLoader struct { + client blockchain.EVMClient + l zerolog.Logger +} + +// KlaytnContractLoader wraps ethereum contract deployments for Klaytn +type KlaytnContractLoader struct { + *EthereumContractLoader +} + +// MetisContractLoader wraps ethereum contract deployments for Metis +type MetisContractLoader struct { + *EthereumContractLoader +} + +// ArbitrumContractLoader wraps for Arbitrum +type ArbitrumContractLoader struct { + *EthereumContractLoader +} + +// PolygonContractLoader wraps for Polygon +type PolygonContractLoader struct { + *EthereumContractLoader +} + +// OptimismContractLoader wraps for Optimism +type OptimismContractLoader struct { + *EthereumContractLoader +} +type PolygonZkEvmContractLoader struct { + *EthereumContractLoader +} + +// PolygonZKEVMContractLoader wraps for Polygon zkEVM +type PolygonZKEVMContractLoader struct { + *EthereumContractLoader +} + +// WeMixContractLoader wraps for WeMix +type WeMixContractLoader struct { + *EthereumContractLoader +} + +// LineaContractLoader wraps for Linea +type LineaContractLoader struct { + *EthereumContractLoader +} + +// CeloContractLoader wraps for Celo +type CeloContractLoader struct { + *EthereumContractLoader +} + +// ScrollContractLoader wraps for Scroll +type ScrollContractLoader struct { + *EthereumContractLoader +} + +// FantomContractLoader wraps for Fantom +type FantomContractLoader struct { + *EthereumContractLoader +} + +// BSCContractLoader wraps for BSC +type BSCContractLoader struct { + *EthereumContractLoader +} + +// NewEthereumContractLoader returns an instantiated instance of the ETH contract Loader +func NewEthereumContractLoader(ethClient blockchain.EVMClient, logger zerolog.Logger) *EthereumContractLoader { + return &EthereumContractLoader{ + client: ethClient, + l: logger, + } +} + +// LoadPLIToken returns deployed on given address PLI Token contract instance +func (e *EthereumContractLoader) LoadPLIToken(addr string) (LinkToken, error) { + instance, err := e.client.LoadContract("PLI Token", common.HexToAddress(addr), func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return link_token_interface.NewLinkToken(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumLinkToken{ + client: e.client, + instance: instance.(*link_token_interface.LinkToken), + address: common.HexToAddress(addr), + l: e.l, + }, err +} + +// LoadFunctionsCoordinator returns deployed on given address FunctionsCoordinator contract instance +func (e *EthereumContractLoader) LoadFunctionsCoordinator(addr string) (FunctionsCoordinator, error) { + instance, err := e.client.LoadContract("Functions Coordinator", common.HexToAddress(addr), func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return functions_coordinator.NewFunctionsCoordinator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsCoordinator{ + client: e.client, + instance: instance.(*functions_coordinator.FunctionsCoordinator), + address: common.HexToAddress(addr), + }, err +} + +// LoadFunctionsRouter returns deployed on given address FunctionsRouter contract instance +func (e *EthereumContractLoader) LoadFunctionsRouter(addr string) (FunctionsRouter, error) { + instance, err := e.client.LoadContract("Functions Router", common.HexToAddress(addr), func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return functions_router.NewFunctionsRouter(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsRouter{ + client: e.client, + instance: instance.(*functions_router.FunctionsRouter), + address: common.HexToAddress(addr), + l: e.l, + }, err +} + +// LoadFunctionsLoadTestClient returns deployed on given address FunctionsLoadTestClient contract instance +func (e *EthereumContractLoader) LoadFunctionsLoadTestClient(addr string) (FunctionsLoadTestClient, error) { + instance, err := e.client.LoadContract("FunctionsLoadTestClient", common.HexToAddress(addr), func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return functions_load_test_client.NewFunctionsLoadTestClient(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumFunctionsLoadTestClient{ + client: e.client, + instance: instance.(*functions_load_test_client.FunctionsLoadTestClient), + address: common.HexToAddress(addr), + }, err +} + +// LoadOperatorContract returns deployed on given address Operator contract instance +func (e *EthereumContractLoader) LoadOperatorContract(address common.Address) (Operator, error) { + instance, err := e.client.LoadContract("Operator", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return operator_wrapper.NewOperator(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumOperator{ + address: address, + client: e.client, + operator: instance.(*operator_wrapper.Operator), + l: e.l, + }, err +} + +// LoadAuthorizedForwarder returns deployed on given address AuthorizedForwarder contract instance +func (e *EthereumContractLoader) LoadAuthorizedForwarder(address common.Address) (AuthorizedForwarder, error) { + instance, err := e.client.LoadContract("AuthorizedForwarder", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return authorized_forwarder.NewAuthorizedForwarder(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumAuthorizedForwarder{ + address: address, + client: e.client, + authorizedForwarder: instance.(*authorized_forwarder.AuthorizedForwarder), + }, err +} + +// LoadMercuryVerifier returns Verifier contract deployed on given address +func (e *EthereumContractLoader) LoadMercuryVerifier(addr common.Address) (MercuryVerifier, error) { + instance, err := e.client.LoadContract("Mercury Verifier", addr, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return verifier.NewVerifier(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryVerifier{ + client: e.client, + instance: instance.(*verifier.Verifier), + address: addr, + }, err +} + +// LoadMercuryVerifierProxy returns VerifierProxy contract deployed on given address +func (e *EthereumContractLoader) LoadMercuryVerifierProxy(addr common.Address) (MercuryVerifierProxy, error) { + instance, err := e.client.LoadContract("Mercury Verifier Proxy", addr, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return verifier_proxy.NewVerifierProxy(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryVerifierProxy{ + client: e.client, + instance: instance.(*verifier_proxy.VerifierProxy), + address: addr, + }, err +} + +func (e *EthereumContractLoader) LoadMercuryFeeManager(addr common.Address) (MercuryFeeManager, error) { + instance, err := e.client.LoadContract("Mercury Fee Manager", addr, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return fee_manager.NewFeeManager(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryFeeManager{ + client: e.client, + instance: instance.(*fee_manager.FeeManager), + address: addr, + }, err +} + +func (e *EthereumContractLoader) LoadMercuryRewardManager(addr common.Address) (MercuryRewardManager, error) { + instance, err := e.client.LoadContract("Mercury Reward Manager", addr, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return reward_manager.NewRewardManager(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumMercuryRewardManager{ + client: e.client, + instance: instance.(*reward_manager.RewardManager), + address: addr, + }, err +} + +func (e *EthereumContractLoader) LoadWERC20Mock(addr common.Address) (WERC20Mock, error) { + instance, err := e.client.LoadContract("WERC20 Mock", addr, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return werc20_mock.NewWERC20Mock(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumWERC20Mock{ + client: e.client, + instance: instance.(*werc20_mock.WERC20Mock), + address: addr, + }, err +} + +func (e *EthereumContractLoader) LoadVRFCoordinatorV2_5(addr string) (VRFCoordinatorV2_5, error) { + address := common.HexToAddress(addr) + instance, err := e.client.LoadContract("VRFCoordinatorV2_5", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return vrf_coordinator_v2_5.NewVRFCoordinatorV25(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV2_5{ + address: &address, + client: e.client, + coordinator: instance.(*vrf_coordinator_v2_5.VRFCoordinatorV25), + }, err +} + +func (e *EthereumContractLoader) LoadVRFv2PlusLoadTestConsumer(addr string) (VRFv2PlusLoadTestConsumer, error) { + address := common.HexToAddress(addr) + instance, err := e.client.LoadContract("VRFV2PlusLoadTestWithMetrics", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return vrf_v2plus_load_test_with_metrics.NewVRFV2PlusLoadTestWithMetrics(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumVRFv2PlusLoadTestConsumer{ + client: e.client, + consumer: instance.(*vrf_v2plus_load_test_with_metrics.VRFV2PlusLoadTestWithMetrics), + address: &address, + }, err +} + +func (e *EthereumContractLoader) LoadVRFCoordinatorV2(addr string) (VRFCoordinatorV2, error) { + address := common.HexToAddress(addr) + instance, err := e.client.LoadContract("VRFCoordinatorV2", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return vrf_coordinator_v2.NewVRFCoordinatorV2(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV2{ + address: &address, + client: e.client, + coordinator: instance.(*vrf_coordinator_v2.VRFCoordinatorV2), + }, err +} + +func (e *EthereumContractLoader) LoadVRFv2LoadTestConsumer(addr string) (VRFv2LoadTestConsumer, error) { + address := common.HexToAddress(addr) + instance, err := e.client.LoadContract("VRFV2LoadTestWithMetrics", address, func( + address common.Address, + backend bind.ContractBackend, + ) (interface{}, error) { + return vrf_load_test_with_metrics.NewVRFV2LoadTestWithMetrics(address, backend) + }) + if err != nil { + return nil, err + } + return &EthereumVRFv2LoadTestConsumer{ + client: e.client, + consumer: instance.(*vrf_load_test_with_metrics.VRFV2LoadTestWithMetrics), + address: &address, + }, err +} diff --git a/integration-tests/contracts/contract_models.go b/integration-tests/contracts/contract_models.go new file mode 100644 index 00000000..dc5d53d9 --- /dev/null +++ b/integration-tests/contracts/contract_models.go @@ -0,0 +1,415 @@ +// Package contracts handles deployment, management, and interactions of smart contracts on various chains +package contracts + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrConfigHelper "github.com/goplugin/libocr/offchainreporting/confighelper" + ocrConfigHelper2 "github.com/goplugin/libocr/offchainreporting2plus/confighelper" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/functions_billing_registry_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_factory" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" +) + +type FluxAggregatorOptions struct { + PaymentAmount *big.Int // The amount of PLI paid to each oracle per submission, in wei (units of 10⁻¹⁸ PLI) + Timeout uint32 // The number of seconds after the previous round that are allowed to lapse before allowing an oracle to skip an unfinished round + Validator common.Address // An optional contract address for validating external validation of answers + MinSubValue *big.Int // An immutable check for a lower bound of what submission values are accepted from an oracle + MaxSubValue *big.Int // An immutable check for an upper bound of what submission values are accepted from an oracle + Decimals uint8 // The number of decimals to offset the answer by + Description string // A short description of what is being reported +} + +type FluxAggregatorData struct { + AllocatedFunds *big.Int // The amount of payment yet to be withdrawn by oracles + AvailableFunds *big.Int // The amount of future funding available to oracles + LatestRoundData RoundData // Data about the latest round + Oracles []common.Address // Addresses of oracles on the contract +} + +type FluxAggregatorSetOraclesOptions struct { + AddList []common.Address // oracle addresses to add + RemoveList []common.Address // oracle addresses to remove + AdminList []common.Address // oracle addresses to become admin + MinSubmissions uint32 // min amount of submissions in round + MaxSubmissions uint32 // max amount of submissions in round + RestartDelayRounds uint32 // rounds to wait after oracles has changed +} + +type SubmissionEvent struct { + Contract common.Address + Submission *big.Int + Round uint32 + BlockNumber uint64 + Oracle common.Address +} + +type FluxAggregator interface { + Address() string + Fund(ethAmount *big.Float) error + LatestRoundID(ctx context.Context) (*big.Int, error) + LatestRoundData(ctx context.Context) (flux_aggregator_wrapper.LatestRoundData, error) + GetContractData(ctxt context.Context) (*FluxAggregatorData, error) + UpdateAvailableFunds() error + PaymentAmount(ctx context.Context) (*big.Int, error) + RequestNewRound(ctx context.Context) error + WithdrawPayment(ctx context.Context, from common.Address, to common.Address, amount *big.Int) error + WithdrawablePayment(ctx context.Context, addr common.Address) (*big.Int, error) + GetOracles(ctx context.Context) ([]string, error) + SetOracles(opts FluxAggregatorSetOraclesOptions) error + Description(ctxt context.Context) (string, error) + SetRequesterPermissions(ctx context.Context, addr common.Address, authorized bool, roundsDelay uint32) error + WatchSubmissionReceived(ctx context.Context, eventChan chan<- *SubmissionEvent) error +} + +type LinkToken interface { + Address() string + Approve(to string, amount *big.Int) error + Transfer(to string, amount *big.Int) error + BalanceOf(ctx context.Context, addr string) (*big.Int, error) + TransferAndCall(to string, amount *big.Int, data []byte) (*types.Transaction, error) + Name(context.Context) (string, error) +} + +type OffchainOptions struct { + MaximumGasPrice uint32 // The highest gas price for which transmitter will be compensated + ReasonableGasPrice uint32 // The transmitter will receive reward for gas prices under this value + MicroLinkPerEth uint32 // The reimbursement per ETH of gas cost, in 1e-6PLI units + LinkGweiPerObservation uint32 // The reward to the oracle for contributing an observation to a successfully transmitted report, in 1e-9PLI units + LinkGweiPerTransmission uint32 // The reward to the transmitter of a successful report, in 1e-9PLI units + MinimumAnswer *big.Int // The lowest answer the median of a report is allowed to be + MaximumAnswer *big.Int // The highest answer the median of a report is allowed to be + BillingAccessController common.Address // The access controller for billing admin functions + RequesterAccessController common.Address // The access controller for requesting new rounds + Decimals uint8 // Answers are stored in fixed-point format, with this many digits of precision + Description string // A short description of what is being reported +} + +// https://uploads-ssl.webflow.com/5f6b7190899f41fb70882d08/603651a1101106649eef6a53_plugin-ocr-protocol-paper-02-24-20.pdf +type OffChainAggregatorConfig struct { + DeltaProgress time.Duration // The duration in which a leader must achieve progress or be replaced + DeltaResend time.Duration // The interval at which nodes resend NEWEPOCH messages + DeltaRound time.Duration // The duration after which a new round is started + DeltaGrace time.Duration // The duration of the grace period during which delayed oracles can still submit observations + DeltaC time.Duration // Limits how often updates are transmitted to the contract as long as the median isn’t changing by more then AlphaPPB + AlphaPPB uint64 // Allows larger changes of the median to be reported immediately, bypassing DeltaC + DeltaStage time.Duration // Used to stagger stages of the transmission protocol. Multiple Ethereum blocks must be mineable in this period + RMax uint8 // The maximum number of rounds in an epoch + S []int // Transmission Schedule + F int // The allowed number of "bad" oracles + N int // The number of oracles + OracleIdentities []ocrConfigHelper.OracleIdentityExtra +} + +type OffChainAggregatorV2Config struct { + DeltaProgress time.Duration + DeltaResend time.Duration + DeltaRound time.Duration + DeltaGrace time.Duration + DeltaStage time.Duration + RMax uint8 + S []int + Oracles []ocrConfigHelper2.OracleIdentityExtra + ReportingPluginConfig []byte + MaxDurationQuery time.Duration + MaxDurationObservation time.Duration + MaxDurationReport time.Duration + MaxDurationShouldAcceptFinalizedReport time.Duration + MaxDurationShouldTransmitAcceptedReport time.Duration + F int + OnchainConfig []byte +} + +type OffchainAggregatorData struct { + LatestRoundData RoundData // Data about the latest round +} + +type OffchainAggregator interface { + Address() string + Fund(nativeAmount *big.Float) error + GetContractData(ctx context.Context) (*OffchainAggregatorData, error) + SetConfig(pluginNodes []*client.PluginK8sClient, ocrConfig OffChainAggregatorConfig, transmitters []common.Address) error + SetConfigLocal(pluginNodes []*client.PluginClient, ocrConfig OffChainAggregatorConfig, transmitters []common.Address) error + SetPayees([]string, []string) error + RequestNewRound() error + GetLatestAnswer(ctx context.Context) (*big.Int, error) + GetLatestRound(ctx context.Context) (*RoundData, error) + GetRound(ctx context.Context, roundID *big.Int) (*RoundData, error) + ParseEventAnswerUpdated(log types.Log) (*offchainaggregator.OffchainAggregatorAnswerUpdated, error) + LatestRoundDataUpdatedAt() (*big.Int, error) +} + +type OffchainAggregatorV2 interface { + Address() string + Fund(nativeAmount *big.Float) error + RequestNewRound() error + SetConfig(ocrConfig *OCRv2Config) error + GetConfig(ctx context.Context) ([32]byte, uint32, error) + SetPayees(transmitters, payees []string) error + GetLatestAnswer(ctx context.Context) (*big.Int, error) + GetLatestRound(ctx context.Context) (*RoundData, error) + GetRound(ctx context.Context, roundID *big.Int) (*RoundData, error) + ParseEventAnswerUpdated(log types.Log) (*ocr2aggregator.OCR2AggregatorAnswerUpdated, error) +} + +type KeeperRegistryCheckUpkeepGasUsageWrapper interface { + Address() string +} + +type Oracle interface { + Address() string + Fund(ethAmount *big.Float) error + SetFulfillmentPermission(address string, allowed bool) error +} + +type APIConsumer interface { + Address() string + RoundID(ctx context.Context) (*big.Int, error) + Fund(ethAmount *big.Float) error + Data(ctx context.Context) (*big.Int, error) + CreateRequestTo( + oracleAddr string, + jobID [32]byte, + payment *big.Int, + url string, + path string, + times *big.Int, + ) error +} + +type Storage interface { + Get(ctxt context.Context) (*big.Int, error) + Set(*big.Int) error +} + +// JobByInstance helper struct to match job + instance ID +type JobByInstance struct { + ID string + Instance string +} + +type MockETHPLIFeed interface { + Address() string + LatestRoundData() (*big.Int, error) + LatestRoundDataUpdatedAt() (*big.Int, error) +} + +type MockGasFeed interface { + Address() string +} + +type BlockHashStore interface { + Address() string + GetBlockHash(ctx context.Context, blockNumber *big.Int) ([32]byte, error) +} + +type Staking interface { + Address() string + Fund(ethAmount *big.Float) error + AddOperators(operators []common.Address) error + RemoveOperators(operators []common.Address) error + SetFeedOperators(operators []common.Address) error + RaiseAlert() error + Start(amount *big.Int, initialRewardRate *big.Int) error + SetMerkleRoot(newMerkleRoot [32]byte) error +} + +type FunctionsOracleEventsMock interface { + Address() string + OracleResponse(requestId [32]byte) error + OracleRequest(requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) error + UserCallbackError(requestId [32]byte, reason string) error + UserCallbackRawError(requestId [32]byte, lowLevelData []byte) error +} + +type FunctionsBillingRegistryEventsMock interface { + Address() string + SubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) error + BillingStart(requestId [32]byte, commitment functions_billing_registry_events_mock.FunctionsBillingRegistryEventsMockCommitment) error + BillingEnd(requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) error +} + +type StakingEventsMock interface { + Address() string + PoolSizeIncreased(maxPoolSize *big.Int) error + MaxCommunityStakeAmountIncreased(maxStakeAmount *big.Int) error + MaxOperatorStakeAmountIncreased(maxStakeAmount *big.Int) error + RewardInitialized(rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) error + AlertRaised(alerter common.Address, roundId *big.Int, rewardAmount *big.Int) error + Staked(staker common.Address, newStake *big.Int, totalStake *big.Int) error + OperatorAdded(operator common.Address) error + OperatorRemoved(operator common.Address, amount *big.Int) error + FeedOperatorsSet(feedOperators []common.Address) error +} + +type OffchainAggregatorEventsMock interface { + Address() string + ConfigSet(previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) error + NewTransmission(aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) error +} + +type KeeperRegistry11Mock interface { + Address() string + EmitUpkeepPerformed(id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) error + EmitUpkeepCanceled(id *big.Int, atBlockHeight uint64) error + EmitFundsWithdrawn(id *big.Int, amount *big.Int, to common.Address) error + EmitKeepersUpdated(keepers []common.Address, payees []common.Address) error + EmitUpkeepRegistered(id *big.Int, executeGas uint32, admin common.Address) error + EmitFundsAdded(id *big.Int, from common.Address, amount *big.Int) error + SetUpkeepCount(_upkeepCount *big.Int) error + SetCanceledUpkeepList(_canceledUpkeepList []*big.Int) error + SetKeeperList(_keepers []common.Address) error + SetConfig(_paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) error + SetUpkeep(id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) error + SetMinBalance(id *big.Int, minBalance *big.Int) error + SetCheckUpkeepData(id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) error + SetPerformUpkeepSuccess(id *big.Int, success bool) error +} + +type KeeperRegistrar12Mock interface { + Address() string + EmitRegistrationRequested(hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) error + EmitRegistrationApproved(hash [32]byte, displayName string, upkeepId *big.Int) error + SetRegistrationConfig(_autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) error +} + +type KeeperGasWrapperMock interface { + Address() string + SetMeasureCheckGasResult(result bool, payload []byte, gas *big.Int) error +} + +type FunctionsV1EventsMock interface { + Address() string + EmitRequestProcessed(requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, errByte []byte, callbackReturnData []byte) error + EmitRequestStart(requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) error + EmitSubscriptionCanceled(subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) error + EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) error + EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) error + EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) error + EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) error + EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) error + EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) error + EmitRequestNotProcessed(requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) error + EmitContractUpdated(id [32]byte, from common.Address, to common.Address) error +} + +type MockAggregatorProxy interface { + Address() string + UpdateAggregator(aggregator common.Address) error + Aggregator() (common.Address, error) +} + +type RoundData struct { + RoundId *big.Int + Answer *big.Int + StartedAt *big.Int + UpdatedAt *big.Int + AnsweredInRound *big.Int +} + +// ReadAccessController is read/write access controller, just named by interface +type ReadAccessController interface { + Address() string + AddAccess(addr string) error + DisableAccessCheck() error +} + +// Flags flags contract interface +type Flags interface { + Address() string + GetFlag(ctx context.Context, addr string) (bool, error) +} + +// OperatorFactory creates Operator contracts for node operators +type OperatorFactory interface { + Address() string + DeployNewOperatorAndForwarder() (*types.Transaction, error) + ParseAuthorizedForwarderCreated(log types.Log) (*operator_factory.OperatorFactoryAuthorizedForwarderCreated, error) + ParseOperatorCreated(log types.Log) (*operator_factory.OperatorFactoryOperatorCreated, error) +} + +// Operator operates forwarders +type Operator interface { + Address() string + AcceptAuthorizedReceivers(forwarders []common.Address, eoa []common.Address) error +} + +// AuthorizedForwarder forward requests from cll nodes eoa +type AuthorizedForwarder interface { + Address() string + Owner(ctx context.Context) (string, error) + GetAuthorizedSenders(ctx context.Context) ([]string, error) +} + +type FunctionsCoordinator interface { + Address() string + GetThresholdPublicKey() ([]byte, error) + GetDONPublicKey() ([]byte, error) +} + +type FunctionsRouter interface { + Address() string + CreateSubscriptionWithConsumer(consumer string) (uint64, error) +} + +type FunctionsLoadTestClient interface { + Address() string + ResetStats() error + GetStats() (*EthereumFunctionsLoadStats, error) + SendRequest(times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) error + SendRequestWithDONHostedSecrets(times uint32, source string, slotID uint8, slotVersion uint64, args []string, subscriptionId uint64, donID [32]byte) error +} + +type MercuryVerifier interface { + Address() common.Address + Verify(signedReport []byte, sender common.Address) error + SetConfig(feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []verifier.CommonAddressAndWeight) (*types.Transaction, error) + LatestConfigDetails(ctx context.Context, feedId [32]byte) (verifier.LatestConfigDetails, error) +} + +type MercuryVerifierProxy interface { + Address() common.Address + InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) + Verify(signedReport []byte, parameterPayload []byte, value *big.Int) (*types.Transaction, error) + VerifyBulk(signedReports [][]byte, parameterPayload []byte, value *big.Int) (*types.Transaction, error) + SetFeeManager(feeManager common.Address) (*types.Transaction, error) +} + +type MercuryFeeManager interface { + Address() common.Address + UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) +} + +type MercuryRewardManager interface { + Address() common.Address + SetFeeManager(feeManager common.Address) (*types.Transaction, error) +} + +type WERC20Mock interface { + Address() common.Address + BalanceOf(ctx context.Context, addr string) (*big.Int, error) + Approve(to string, amount *big.Int) error + Transfer(to string, amount *big.Int) error + Mint(account common.Address, amount *big.Int) (*types.Transaction, error) +} + +type LogEmitter interface { + Address() common.Address + EmitLogInts(ints []int) (*types.Transaction, error) + EmitLogIntsIndexed(ints []int) (*types.Transaction, error) + EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) + EmitLogStrings(strings []string) (*types.Transaction, error) + EmitLogInt(payload int) (*types.Transaction, error) + EmitLogIntIndexed(payload int) (*types.Transaction, error) + EmitLogString(strings string) (*types.Transaction, error) +} diff --git a/integration-tests/contracts/contract_vrf_models.go b/integration-tests/contracts/contract_vrf_models.go new file mode 100644 index 00000000..c9877f6c --- /dev/null +++ b/integration-tests/contracts/contract_vrf_models.go @@ -0,0 +1,346 @@ +package contracts + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2_consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper_load_test_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" +) + +type VRF interface { + Fund(ethAmount *big.Float) error + ProofLength(context.Context) (*big.Int, error) +} + +type VRFCoordinator interface { + RegisterProvingKey( + fee *big.Int, + oracleAddr string, + publicProvingKey [2]*big.Int, + jobID [32]byte, + ) error + HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) + Address() string +} + +type VRFCoordinatorV2 interface { + GetRequestConfig(ctx context.Context) (GetRequestConfig, error) + GetConfig(ctx context.Context) (vrf_coordinator_v2.GetConfig, error) + GetFallbackWeiPerUnitLink(ctx context.Context) (*big.Int, error) + GetFeeConfig(ctx context.Context) (vrf_coordinator_v2.GetFeeConfig, error) + SetConfig( + minimumRequestConfirmations uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPaymentCalculation uint32, + fallbackWeiPerUnitLink *big.Int, + feeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig, + ) error + RegisterProvingKey( + oracleAddr string, + publicProvingKey [2]*big.Int, + ) error + TransferOwnership(to common.Address) error + HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) + CreateSubscription() (*types.Transaction, error) + AddConsumer(subId uint64, consumerAddress string) error + Address() string + GetSubscription(ctx context.Context, subID uint64) (vrf_coordinator_v2.GetSubscription, error) + GetOwner(ctx context.Context) (common.Address, error) + PendingRequestsExist(ctx context.Context, subID uint64) (bool, error) + OwnerCancelSubscription(subID uint64) (*types.Transaction, error) + CancelSubscription(subID uint64, to common.Address) (*types.Transaction, error) + FindSubscriptionID(subID uint64) (uint64, error) + WaitForRandomWordsFulfilledEvent(requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) + WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []uint64, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, error) + WaitForSubscriptionCanceledEvent(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, error) + WaitForSubscriptionConsumerAdded(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, error) + WaitForSubscriptionConsumerRemoved(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, error) + WaitForSubscriptionCreatedEvent(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, error) + WaitForSubscriptionFunded(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, error) + WaitForConfigSetEvent(timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet, error) + OracleWithdraw(recipient common.Address, amount *big.Int) error +} + +type VRFCoordinatorV2_5 interface { + SetPLIAndPLINativeFeed( + link string, + linkNativeFeed string, + ) error + SetConfig( + minimumRequestConfirmations uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPaymentCalculation uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeNativePPM uint32, + fulfillmentFlatFeeLinkDiscountPPM uint32, + nativePremiumPercentage uint8, + linkPremiumPercentage uint8, + ) error + RegisterProvingKey( + publicProvingKey [2]*big.Int, + gasLaneMaxGas uint64, + ) error + HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) + CreateSubscription() (*types.Transaction, error) + GetActiveSubscriptionIds(ctx context.Context, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + Migrate(subId *big.Int, coordinatorAddress string) error + RegisterMigratableCoordinator(migratableCoordinatorAddress string) error + AddConsumer(subId *big.Int, consumerAddress string) error + FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error + Address() string + PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error) + GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error) + OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error) + CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error) + Withdraw(recipient common.Address) error + WithdrawNative(recipient common.Address) error + GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error) + GetLinkTotalBalance(ctx context.Context) (*big.Int, error) + FindSubscriptionID(subID *big.Int) (*big.Int, error) + WaitForSubscriptionCreatedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated, error) + WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error) + WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) + WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested, error) + WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted, error) +} + +type VRFCoordinatorV2PlusUpgradedVersion interface { + SetPLIAndPLINativeFeed( + link string, + linkNativeFeed string, + ) error + SetConfig( + minimumRequestConfirmations uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPaymentCalculation uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeNativePPM uint32, + fulfillmentFlatFeeLinkDiscountPPM uint32, + nativePremiumPercentage uint8, + linkPremiumPercentage uint8, + ) error + RegisterProvingKey( + publicProvingKey [2]*big.Int, + ) error + HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) + CreateSubscription() error + GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error) + GetLinkTotalBalance(ctx context.Context) (*big.Int, error) + Migrate(subId *big.Int, coordinatorAddress string) error + RegisterMigratableCoordinator(migratableCoordinatorAddress string) error + AddConsumer(subId *big.Int, consumerAddress string) error + FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error + Address() string + GetSubscription(ctx context.Context, subID *big.Int) (vrf_v2plus_upgraded_version.GetSubscription, error) + GetActiveSubscriptionIds(ctx context.Context, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) + FindSubscriptionID() (*big.Int, error) + WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) + WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted, error) + WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, error) +} + +type VRFV2Wrapper interface { + Address() string + SetConfig(wrapperGasOverhead uint32, coordinatorGasOverhead uint32, wrapperPremiumPercentage uint8, keyHash [32]byte, maxNumWords uint8) error + GetSubID(ctx context.Context) (uint64, error) +} + +type VRFV2PlusWrapper interface { + Address() string + SetConfig(wrapperGasOverhead uint32, coordinatorGasOverhead uint32, wrapperPremiumPercentage uint8, keyHash [32]byte, maxNumWords uint8, stalenessSeconds uint32, fallbackWeiPerUnitLink *big.Int, fulfillmentFlatFeeLinkPPM uint32, fulfillmentFlatFeeNativePPM uint32) error + GetSubID(ctx context.Context) (*big.Int, error) +} + +type VRFOwner interface { + Address() string + SetAuthorizedSenders(senders []common.Address) error + AcceptVRFOwnership() error + WaitForRandomWordsForcedEvent(requestIDs []*big.Int, subIds []uint64, senders []common.Address, timeout time.Duration) (*vrf_owner.VRFOwnerRandomWordsForced, error) +} + +type VRFConsumer interface { + Address() string + RequestRandomness(hash [32]byte, fee *big.Int) error + CurrentRoundID(ctx context.Context) (*big.Int, error) + RandomnessOutput(ctx context.Context) (*big.Int, error) + Fund(ethAmount *big.Float) error +} + +type VRFConsumerV2 interface { + Address() string + CurrentSubscription() (uint64, error) + CreateFundedSubscription(funds *big.Int) error + TopUpSubscriptionFunds(funds *big.Int) error + RequestRandomness(hash [32]byte, subID uint64, confs uint16, gasLimit uint32, numWords uint32) error + RandomnessOutput(ctx context.Context, arg0 *big.Int) (*big.Int, error) + GetAllRandomWords(ctx context.Context, num int) ([]*big.Int, error) + GasAvailable() (*big.Int, error) + Fund(ethAmount *big.Float) error +} + +type VRFv2Consumer interface { + Address() string + RequestRandomness(hash [32]byte, subID uint64, confs uint16, gasLimit uint32, numWords uint32) error + GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_v2_consumer_wrapper.GetRequestStatus, error) + GetLastRequestId(ctx context.Context) (*big.Int, error) +} + +type VRFv2LoadTestConsumer interface { + Address() string + RequestRandomness(hash [32]byte, subID uint64, confs uint16, gasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) + RequestRandomWordsWithForceFulfill( + keyHash [32]byte, + requestConfirmations uint16, + callbackGasLimit uint32, + numWords uint32, + requestCount uint16, + subTopUpAmount *big.Int, + linkAddress common.Address, + ) (*types.Transaction, error) + GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_load_test_with_metrics.GetRequestStatus, error) + GetLastRequestId(ctx context.Context) (*big.Int, error) + GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) + ResetMetrics() error +} + +type VRFv2WrapperLoadTestConsumer interface { + Address() string + Fund(ethAmount *big.Float) error + RequestRandomness(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) + GetRequestStatus(ctx context.Context, requestID *big.Int) (vrfv2_wrapper_load_test_consumer.GetRequestStatus, error) + GetLastRequestId(ctx context.Context) (*big.Int, error) + GetWrapper(ctx context.Context) (common.Address, error) + GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) +} + +type VRFv2PlusLoadTestConsumer interface { + Address() string + RequestRandomness(keyHash [32]byte, subID *big.Int, requestConfirmations uint16, callbackGasLimit uint32, nativePayment bool, numWords uint32, requestCount uint16) (*types.Transaction, error) + GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_v2plus_load_test_with_metrics.GetRequestStatus, error) + GetLastRequestId(ctx context.Context) (*big.Int, error) + GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) + GetCoordinator(ctx context.Context) (common.Address, error) + ResetMetrics() error +} + +type VRFv2PlusWrapperLoadTestConsumer interface { + Address() string + Fund(ethAmount *big.Float) error + RequestRandomness(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) + RequestRandomnessNative(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) + GetRequestStatus(ctx context.Context, requestID *big.Int) (vrfv2plus_wrapper_load_test_consumer.GetRequestStatus, error) + GetLastRequestId(ctx context.Context) (*big.Int, error) + GetWrapper(ctx context.Context) (common.Address, error) + GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) +} + +type DKG interface { + Address() string + AddClient(keyID string, clientAddress string) error + SetConfig( + signerAddresses []common.Address, + transmitterAddresses []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + ) error + WaitForConfigSetEvent(timeout time.Duration) (*dkg.DKGConfigSet, error) + WaitForTransmittedEvent(timeout time.Duration) (*dkg.DKGTransmitted, error) +} + +type VRFCoordinatorV3 interface { + Address() string + SetProducer(producerAddress string) error + CreateSubscription() error + FindSubscriptionID() (*big.Int, error) + AddConsumer(subId *big.Int, consumerAddress string) error + SetConfig(maxCallbackGasLimit, maxCallbackArgumentsLength uint32) error +} + +type VRFBeacon interface { + Address() string + SetPayees(transmitterAddresses []common.Address, payeesAddresses []common.Address) error + SetConfig( + signerAddresses []common.Address, + transmitterAddresses []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, + ) error + WaitForConfigSetEvent(timeout time.Duration) (*vrf_beacon.VRFBeaconConfigSet, error) + WaitForNewTransmissionEvent(timeout time.Duration) (*vrf_beacon.VRFBeaconNewTransmission, error) + LatestConfigDigestAndEpoch(ctx context.Context) (vrf_beacon.LatestConfigDigestAndEpoch, + error) +} + +type VRFBeaconConsumer interface { + Address() string + RequestRandomness( + numWords uint16, + subID, confirmationDelayArg *big.Int, + ) (*types.Receipt, error) + RedeemRandomness(subID, requestID *big.Int) error + RequestRandomnessFulfillment( + numWords uint16, + subID, confirmationDelayArg *big.Int, + requestGasLimit, + callbackGasLimit uint32, + arguments []byte, + ) (*types.Receipt, error) + IBeaconPeriodBlocks(ctx context.Context) (*big.Int, error) + GetRequestIdsBy(ctx context.Context, nextBeaconOutputHeight *big.Int, confDelay *big.Int) (*big.Int, error) + GetRandomnessByRequestId(ctx context.Context, requestID *big.Int, numWordIndex *big.Int) (*big.Int, error) +} + +type BatchBlockhashStore interface { + Address() string +} + +type VRFMockETHPLIFeed interface { + Address() string + LatestRoundData() (*big.Int, error) + LatestRoundDataUpdatedAt() (*big.Int, error) + SetBlockTimestampDeduction(blockTimestampDeduction *big.Int) error +} + +type RequestStatus struct { + Fulfilled bool + RandomWords []*big.Int +} + +type LoadTestRequestStatus struct { + Fulfilled bool + RandomWords []*big.Int + // Currently Unused November 8, 2023, Mignt be used in near future, will remove if not. + // requestTimestamp *big.Int + // fulfilmentTimestamp *big.Int + // requestBlockNumber *big.Int + // fulfilmentBlockNumber *big.Int +} + +type VRFLoadTestMetrics struct { + RequestCount *big.Int + FulfilmentCount *big.Int + AverageFulfillmentInMillions *big.Int + SlowestFulfillment *big.Int + FastestFulfillment *big.Int +} diff --git a/integration-tests/contracts/ethereum/KeeperRegistryVersions.go b/integration-tests/contracts/ethereum/KeeperRegistryVersions.go new file mode 100644 index 00000000..f15e4352 --- /dev/null +++ b/integration-tests/contracts/ethereum/KeeperRegistryVersions.go @@ -0,0 +1,21 @@ +package ethereum + +import ( + "github.com/ethereum/go-ethereum/common" +) + +// AbigenLog is an interface for abigen generated log topics +type AbigenLog interface { + Topic() common.Hash +} + +type KeeperRegistryVersion int32 + +const ( + RegistryVersion_1_0 KeeperRegistryVersion = iota + RegistryVersion_1_1 + RegistryVersion_1_2 + RegistryVersion_1_3 + RegistryVersion_2_0 + RegistryVersion_2_1 +) diff --git a/integration-tests/contracts/ethereum/OffchainAggregatorEventsMock.go b/integration-tests/contracts/ethereum/OffchainAggregatorEventsMock.go new file mode 100644 index 00000000..d6250bcf --- /dev/null +++ b/integration-tests/contracts/ethereum/OffchainAggregatorEventsMock.go @@ -0,0 +1,2582 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ethereum + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var OffchainAggregatorEventsMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"AnswerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"BillingAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"BillingSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"threshold\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"encodedConfigVersion\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"encoded\",\"type\":\"bytes\"}],\"name\":\"ConfigSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldLinkToken\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newLinkToken\",\"type\":\"address\"}],\"name\":\"LinkTokenSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"NewRound\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint32\",\"name\":\"aggregatorRoundId\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"answer\",\"type\":\"int192\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"int192[]\",\"name\":\"observations\",\"type\":\"int192[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"observers\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"rawReportContext\",\"type\":\"bytes32\"}],\"name\":\"NewTransmission\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"linkToken\",\"type\":\"address\"}],\"name\":\"OraclePaid\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"PayeeshipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"old\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"RequesterAccessControllerSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes16\",\"name\":\"configDigest\",\"type\":\"bytes16\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"}],\"name\":\"RoundRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousValidator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"previousGasLimit\",\"type\":\"uint32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"currentValidator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint32\",\"name\":\"currentGasLimit\",\"type\":\"uint32\"}],\"name\":\"ValidatorConfigSet\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int256\",\"name\":\"current\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"}],\"name\":\"emitAnswerUpdated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"old\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"emitBillingAccessControllerSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"maximumGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"reasonableGasPrice\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"microLinkPerEth\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerObservation\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"linkGweiPerTransmission\",\"type\":\"uint32\"}],\"name\":\"emitBillingSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"previousConfigBlockNumber\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"configCount\",\"type\":\"uint64\"},{\"internalType\":\"address[]\",\"name\":\"signers\",\"type\":\"address[]\"},{\"internalType\":\"address[]\",\"name\":\"transmitters\",\"type\":\"address[]\"},{\"internalType\":\"uint8\",\"name\":\"threshold\",\"type\":\"uint8\"},{\"internalType\":\"uint64\",\"name\":\"encodedConfigVersion\",\"type\":\"uint64\"},{\"internalType\":\"bytes\",\"name\":\"encoded\",\"type\":\"bytes\"}],\"name\":\"emitConfigSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_oldLinkToken\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_newLinkToken\",\"type\":\"address\"}],\"name\":\"emitLinkTokenSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"startedBy\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"}],\"name\":\"emitNewRound\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"aggregatorRoundId\",\"type\":\"uint32\"},{\"internalType\":\"int192\",\"name\":\"answer\",\"type\":\"int192\"},{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"int192[]\",\"name\":\"observations\",\"type\":\"int192[]\"},{\"internalType\":\"bytes\",\"name\":\"observers\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"rawReportContext\",\"type\":\"bytes32\"}],\"name\":\"emitNewTransmission\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"payee\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"linkToken\",\"type\":\"address\"}],\"name\":\"emitOraclePaid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposed\",\"type\":\"address\"}],\"name\":\"emitPayeeshipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"transmitter\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"previous\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"emitPayeeshipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"old\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"current\",\"type\":\"address\"}],\"name\":\"emitRequesterAccessControllerSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"requester\",\"type\":\"address\"},{\"internalType\":\"bytes16\",\"name\":\"configDigest\",\"type\":\"bytes16\"},{\"internalType\":\"uint32\",\"name\":\"epoch\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"round\",\"type\":\"uint8\"}],\"name\":\"emitRoundRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"previousValidator\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"previousGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"address\",\"name\":\"currentValidator\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"currentGasLimit\",\"type\":\"uint32\"}],\"name\":\"emitValidatorConfigSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610faf806100206000396000f3fe608060405234801561001057600080fd5b50600436106100f55760003560e01c80638b28369d11610097578063a57c1e0c11610066578063a57c1e0c146101cd578063b019b4e8146101e0578063f7420bc2146101f3578063faf1347c1461020657600080fd5b80638b28369d1461018157806395aa14461461019457806399e1a39b146101a7578063a3296557146101ba57600080fd5b806358e1e734116100d357806358e1e734146101355780636602e6ce14610148578063715bd44e1461015b57806389ffde8d1461016e57600080fd5b8063275c7ea4146100fa5780632c769fd71461010f578063448be1e014610122575b600080fd5b61010d610108366004610c76565b610219565b005b61010d61011d366004610aad565b610265565b61010d610130366004610931565b6102a5565b61010d610143366004610964565b6102fa565b61010d610156366004610a64565b610370565b61010d6101693660046109f4565b6103d3565b61010d61017c366004610931565b61045b565b61010d61018f366004610ad9565b6104a8565b61010d6101a2366004610b0e565b6104f1565b61010d6101b5366004610964565b61053f565b61010d6101c8366004610c11565b6105b5565b61010d6101db366004610931565b610614565b61010d6101ee366004610931565b610672565b61010d610201366004610931565b6106d0565b61010d6102143660046109a7565b61072e565b7f25d719d88a4512dd76c7442b910a83360845505894eb444ef299409e180f8fb9878787878787876040516102549796959493929190610e8a565b60405180910390a150505050505050565b81837f0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f8360405161029891815260200190565b60405180910390a3505050565b6040805173ffffffffffffffffffffffffffffffffffffffff8085168252831660208201527f793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d4891291015b60405180910390a15050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b360405160405180910390a4505050565b6040805163ffffffff80861682528316602082015273ffffffffffffffffffffffffffffffffffffffff80851692908716917fb04e3a37abe9c0fcdfebdeae019a8e2b12ddf53f5d55ffb0caccc1bedaca1541910160405180910390a350505050565b604080517fffffffffffffffffffffffffffffffff000000000000000000000000000000008516815263ffffffff8416602082015260ff831681830152905173ffffffffffffffffffffffffffffffffffffffff8616917f3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037919081900360600190a250505050565b6040805173ffffffffffffffffffffffffffffffffffffffff8085168252831660208201527f27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae63491016102ee565b8173ffffffffffffffffffffffffffffffffffffffff16837f0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac602718360405161029891815260200190565b8563ffffffff167ff6a97944f31ea060dfde0566e4167c1a1082551e64b60ecb14d599a9d023d451868686868660405161052f959493929190610dfd565b60405180910390a2505050505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e3836760405160405180910390a4505050565b6040805163ffffffff878116825286811660208301528581168284015284811660608301528316608082015290517fd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b69181900360a00190a15050505050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f4966a50c93f855342ccf6c5c0d358b85b91335b2acedc7da0932f691f351711a60405160405180910390a35050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b8073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c856040516107a491815260200190565b60405180910390a450505050565b803573ffffffffffffffffffffffffffffffffffffffff811681146107d657600080fd5b919050565b600082601f8301126107ec57600080fd5b813560206108016107fc83610f4f565b610f00565b80838252828201915082860187848660051b890101111561082157600080fd5b60005b8581101561084757610835826107b2565b84529284019290840190600101610824565b5090979650505050505050565b600082601f83011261086557600080fd5b813567ffffffffffffffff81111561087f5761087f610f73565b6108b060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610f00565b8181528460208386010111156108c557600080fd5b816020850160208301376000918101602001919091529392505050565b8035601781900b81146107d657600080fd5b803563ffffffff811681146107d657600080fd5b803567ffffffffffffffff811681146107d657600080fd5b803560ff811681146107d657600080fd5b6000806040838503121561094457600080fd5b61094d836107b2565b915061095b602084016107b2565b90509250929050565b60008060006060848603121561097957600080fd5b610982846107b2565b9250610990602085016107b2565b915061099e604085016107b2565b90509250925092565b600080600080608085870312156109bd57600080fd5b6109c6856107b2565b93506109d4602086016107b2565b9250604085013591506109e9606086016107b2565b905092959194509250565b60008060008060808587031215610a0a57600080fd5b610a13856107b2565b935060208501357fffffffffffffffffffffffffffffffff0000000000000000000000000000000081168114610a4857600080fd5b9250610a56604086016108f4565b91506109e960608601610920565b60008060008060808587031215610a7a57600080fd5b610a83856107b2565b9350610a91602086016108f4565b9250610a9f604086016107b2565b91506109e9606086016108f4565b600080600060608486031215610ac257600080fd5b505081359360208301359350604090920135919050565b600080600060608486031215610aee57600080fd5b83359250610afe602085016107b2565b9150604084013590509250925092565b60008060008060008060c08789031215610b2757600080fd5b610b30876108f4565b95506020610b3f8189016108e2565b9550610b4d604089016107b2565b9450606088013567ffffffffffffffff80821115610b6a57600080fd5b818a0191508a601f830112610b7e57600080fd5b8135610b8c6107fc82610f4f565b8082825285820191508585018e878560051b8801011115610bac57600080fd5b600095505b83861015610bd657610bc2816108e2565b835260019590950194918601918601610bb1565b509750505060808a0135925080831115610bef57600080fd5b5050610bfd89828a01610854565b92505060a087013590509295509295509295565b600080600080600060a08688031215610c2957600080fd5b610c32866108f4565b9450610c40602087016108f4565b9350610c4e604087016108f4565b9250610c5c606087016108f4565b9150610c6a608087016108f4565b90509295509295909350565b600080600080600080600060e0888a031215610c9157600080fd5b610c9a886108f4565b9650610ca860208901610908565b9550604088013567ffffffffffffffff80821115610cc557600080fd5b610cd18b838c016107db565b965060608a0135915080821115610ce757600080fd5b610cf38b838c016107db565b9550610d0160808b01610920565b9450610d0f60a08b01610908565b935060c08a0135915080821115610d2557600080fd5b50610d328a828b01610854565b91505092959891949750929550565b600081518084526020808501945080840160005b83811015610d8757815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610d55565b509495945050505050565b6000815180845260005b81811015610db857602081850181015186830182015201610d9c565b81811115610dca576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600060a08201601788810b8452602073ffffffffffffffffffffffffffffffffffffffff89168186015260a0604086015282885180855260c087019150828a01945060005b81811015610e60578551850b83529483019491830191600101610e42565b50508581036060870152610e748189610d92565b9450505050508260808301529695505050505050565b63ffffffff88168152600067ffffffffffffffff808916602084015260e06040840152610eba60e0840189610d41565b8381036060850152610ecc8189610d41565b905060ff8716608085015281861660a085015283810360c0850152610ef18186610d92565b9b9a5050505050505050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610f4757610f47610f73565b604052919050565b600067ffffffffffffffff821115610f6957610f69610f73565b5060051b60200190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var OffchainAggregatorEventsMockABI = OffchainAggregatorEventsMockMetaData.ABI + +var OffchainAggregatorEventsMockBin = OffchainAggregatorEventsMockMetaData.Bin + +func DeployOffchainAggregatorEventsMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *OffchainAggregatorEventsMock, error) { + parsed, err := OffchainAggregatorEventsMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OffchainAggregatorEventsMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OffchainAggregatorEventsMock{OffchainAggregatorEventsMockCaller: OffchainAggregatorEventsMockCaller{contract: contract}, OffchainAggregatorEventsMockTransactor: OffchainAggregatorEventsMockTransactor{contract: contract}, OffchainAggregatorEventsMockFilterer: OffchainAggregatorEventsMockFilterer{contract: contract}}, nil +} + +type OffchainAggregatorEventsMock struct { + address common.Address + abi abi.ABI + OffchainAggregatorEventsMockCaller + OffchainAggregatorEventsMockTransactor + OffchainAggregatorEventsMockFilterer +} + +type OffchainAggregatorEventsMockCaller struct { + contract *bind.BoundContract +} + +type OffchainAggregatorEventsMockTransactor struct { + contract *bind.BoundContract +} + +type OffchainAggregatorEventsMockFilterer struct { + contract *bind.BoundContract +} + +type OffchainAggregatorEventsMockSession struct { + Contract *OffchainAggregatorEventsMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorEventsMockCallerSession struct { + Contract *OffchainAggregatorEventsMockCaller + CallOpts bind.CallOpts +} + +type OffchainAggregatorEventsMockTransactorSession struct { + Contract *OffchainAggregatorEventsMockTransactor + TransactOpts bind.TransactOpts +} + +type OffchainAggregatorEventsMockRaw struct { + Contract *OffchainAggregatorEventsMock +} + +type OffchainAggregatorEventsMockCallerRaw struct { + Contract *OffchainAggregatorEventsMockCaller +} + +type OffchainAggregatorEventsMockTransactorRaw struct { + Contract *OffchainAggregatorEventsMockTransactor +} + +func NewOffchainAggregatorEventsMock(address common.Address, backend bind.ContractBackend) (*OffchainAggregatorEventsMock, error) { + abi, err := abi.JSON(strings.NewReader(OffchainAggregatorEventsMockABI)) + if err != nil { + return nil, err + } + contract, err := bindOffchainAggregatorEventsMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMock{address: address, abi: abi, OffchainAggregatorEventsMockCaller: OffchainAggregatorEventsMockCaller{contract: contract}, OffchainAggregatorEventsMockTransactor: OffchainAggregatorEventsMockTransactor{contract: contract}, OffchainAggregatorEventsMockFilterer: OffchainAggregatorEventsMockFilterer{contract: contract}}, nil +} + +func NewOffchainAggregatorEventsMockCaller(address common.Address, caller bind.ContractCaller) (*OffchainAggregatorEventsMockCaller, error) { + contract, err := bindOffchainAggregatorEventsMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockCaller{contract: contract}, nil +} + +func NewOffchainAggregatorEventsMockTransactor(address common.Address, transactor bind.ContractTransactor) (*OffchainAggregatorEventsMockTransactor, error) { + contract, err := bindOffchainAggregatorEventsMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockTransactor{contract: contract}, nil +} + +func NewOffchainAggregatorEventsMockFilterer(address common.Address, filterer bind.ContractFilterer) (*OffchainAggregatorEventsMockFilterer, error) { + contract, err := bindOffchainAggregatorEventsMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockFilterer{contract: contract}, nil +} + +func bindOffchainAggregatorEventsMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OffchainAggregatorEventsMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregatorEventsMock.Contract.OffchainAggregatorEventsMockCaller.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.OffchainAggregatorEventsMockTransactor.contract.Transfer(opts) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.OffchainAggregatorEventsMockTransactor.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _OffchainAggregatorEventsMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.contract.Transfer(opts) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.contract.Transact(opts, method, params...) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitAnswerUpdated(opts *bind.TransactOpts, current *big.Int, roundId *big.Int, updatedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitAnswerUpdated", current, roundId, updatedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitAnswerUpdated(current *big.Int, roundId *big.Int, updatedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitAnswerUpdated(&_OffchainAggregatorEventsMock.TransactOpts, current, roundId, updatedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitAnswerUpdated(current *big.Int, roundId *big.Int, updatedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitAnswerUpdated(&_OffchainAggregatorEventsMock.TransactOpts, current, roundId, updatedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitBillingAccessControllerSet(opts *bind.TransactOpts, old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitBillingAccessControllerSet", old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitBillingAccessControllerSet(old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitBillingAccessControllerSet(&_OffchainAggregatorEventsMock.TransactOpts, old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitBillingAccessControllerSet(old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitBillingAccessControllerSet(&_OffchainAggregatorEventsMock.TransactOpts, old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitBillingSet(opts *bind.TransactOpts, maximumGasPrice uint32, reasonableGasPrice uint32, microLinkPerEth uint32, linkGweiPerObservation uint32, linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitBillingSet", maximumGasPrice, reasonableGasPrice, microLinkPerEth, linkGweiPerObservation, linkGweiPerTransmission) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitBillingSet(maximumGasPrice uint32, reasonableGasPrice uint32, microLinkPerEth uint32, linkGweiPerObservation uint32, linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitBillingSet(&_OffchainAggregatorEventsMock.TransactOpts, maximumGasPrice, reasonableGasPrice, microLinkPerEth, linkGweiPerObservation, linkGweiPerTransmission) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitBillingSet(maximumGasPrice uint32, reasonableGasPrice uint32, microLinkPerEth uint32, linkGweiPerObservation uint32, linkGweiPerTransmission uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitBillingSet(&_OffchainAggregatorEventsMock.TransactOpts, maximumGasPrice, reasonableGasPrice, microLinkPerEth, linkGweiPerObservation, linkGweiPerTransmission) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitConfigSet(opts *bind.TransactOpts, previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitConfigSet", previousConfigBlockNumber, configCount, signers, transmitters, threshold, encodedConfigVersion, encoded) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitConfigSet(previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitConfigSet(&_OffchainAggregatorEventsMock.TransactOpts, previousConfigBlockNumber, configCount, signers, transmitters, threshold, encodedConfigVersion, encoded) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitConfigSet(previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitConfigSet(&_OffchainAggregatorEventsMock.TransactOpts, previousConfigBlockNumber, configCount, signers, transmitters, threshold, encodedConfigVersion, encoded) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitLinkTokenSet(opts *bind.TransactOpts, _oldLinkToken common.Address, _newLinkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitLinkTokenSet", _oldLinkToken, _newLinkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitLinkTokenSet(_oldLinkToken common.Address, _newLinkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitLinkTokenSet(&_OffchainAggregatorEventsMock.TransactOpts, _oldLinkToken, _newLinkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitLinkTokenSet(_oldLinkToken common.Address, _newLinkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitLinkTokenSet(&_OffchainAggregatorEventsMock.TransactOpts, _oldLinkToken, _newLinkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitNewRound(opts *bind.TransactOpts, roundId *big.Int, startedBy common.Address, startedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitNewRound", roundId, startedBy, startedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitNewRound(roundId *big.Int, startedBy common.Address, startedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitNewRound(&_OffchainAggregatorEventsMock.TransactOpts, roundId, startedBy, startedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitNewRound(roundId *big.Int, startedBy common.Address, startedAt *big.Int) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitNewRound(&_OffchainAggregatorEventsMock.TransactOpts, roundId, startedBy, startedAt) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitNewTransmission(opts *bind.TransactOpts, aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitNewTransmission", aggregatorRoundId, answer, transmitter, observations, observers, rawReportContext) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitNewTransmission(aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitNewTransmission(&_OffchainAggregatorEventsMock.TransactOpts, aggregatorRoundId, answer, transmitter, observations, observers, rawReportContext) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitNewTransmission(aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitNewTransmission(&_OffchainAggregatorEventsMock.TransactOpts, aggregatorRoundId, answer, transmitter, observations, observers, rawReportContext) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitOraclePaid(opts *bind.TransactOpts, transmitter common.Address, payee common.Address, amount *big.Int, linkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitOraclePaid", transmitter, payee, amount, linkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitOraclePaid(transmitter common.Address, payee common.Address, amount *big.Int, linkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOraclePaid(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, payee, amount, linkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitOraclePaid(transmitter common.Address, payee common.Address, amount *big.Int, linkToken common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOraclePaid(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, payee, amount, linkToken) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOwnershipTransferRequested(&_OffchainAggregatorEventsMock.TransactOpts, from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOwnershipTransferRequested(&_OffchainAggregatorEventsMock.TransactOpts, from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOwnershipTransferred(&_OffchainAggregatorEventsMock.TransactOpts, from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitOwnershipTransferred(&_OffchainAggregatorEventsMock.TransactOpts, from, to) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitPayeeshipTransferRequested(opts *bind.TransactOpts, transmitter common.Address, current common.Address, proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitPayeeshipTransferRequested", transmitter, current, proposed) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitPayeeshipTransferRequested(transmitter common.Address, current common.Address, proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitPayeeshipTransferRequested(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, current, proposed) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitPayeeshipTransferRequested(transmitter common.Address, current common.Address, proposed common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitPayeeshipTransferRequested(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, current, proposed) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitPayeeshipTransferred(opts *bind.TransactOpts, transmitter common.Address, previous common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitPayeeshipTransferred", transmitter, previous, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitPayeeshipTransferred(transmitter common.Address, previous common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitPayeeshipTransferred(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, previous, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitPayeeshipTransferred(transmitter common.Address, previous common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitPayeeshipTransferred(&_OffchainAggregatorEventsMock.TransactOpts, transmitter, previous, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitRequesterAccessControllerSet(opts *bind.TransactOpts, old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitRequesterAccessControllerSet", old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitRequesterAccessControllerSet(old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitRequesterAccessControllerSet(&_OffchainAggregatorEventsMock.TransactOpts, old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitRequesterAccessControllerSet(old common.Address, current common.Address) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitRequesterAccessControllerSet(&_OffchainAggregatorEventsMock.TransactOpts, old, current) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitRoundRequested(opts *bind.TransactOpts, requester common.Address, configDigest [16]byte, epoch uint32, round uint8) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitRoundRequested", requester, configDigest, epoch, round) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitRoundRequested(requester common.Address, configDigest [16]byte, epoch uint32, round uint8) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitRoundRequested(&_OffchainAggregatorEventsMock.TransactOpts, requester, configDigest, epoch, round) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitRoundRequested(requester common.Address, configDigest [16]byte, epoch uint32, round uint8) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitRoundRequested(&_OffchainAggregatorEventsMock.TransactOpts, requester, configDigest, epoch, round) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactor) EmitValidatorConfigSet(opts *bind.TransactOpts, previousValidator common.Address, previousGasLimit uint32, currentValidator common.Address, currentGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.contract.Transact(opts, "emitValidatorConfigSet", previousValidator, previousGasLimit, currentValidator, currentGasLimit) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockSession) EmitValidatorConfigSet(previousValidator common.Address, previousGasLimit uint32, currentValidator common.Address, currentGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitValidatorConfigSet(&_OffchainAggregatorEventsMock.TransactOpts, previousValidator, previousGasLimit, currentValidator, currentGasLimit) +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockTransactorSession) EmitValidatorConfigSet(previousValidator common.Address, previousGasLimit uint32, currentValidator common.Address, currentGasLimit uint32) (*types.Transaction, error) { + return _OffchainAggregatorEventsMock.Contract.EmitValidatorConfigSet(&_OffchainAggregatorEventsMock.TransactOpts, previousValidator, previousGasLimit, currentValidator, currentGasLimit) +} + +type OffchainAggregatorEventsMockAnswerUpdatedIterator struct { + Event *OffchainAggregatorEventsMockAnswerUpdated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockAnswerUpdatedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockAnswerUpdated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockAnswerUpdatedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockAnswerUpdatedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockAnswerUpdated struct { + Current *big.Int + RoundId *big.Int + UpdatedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorEventsMockAnswerUpdatedIterator, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockAnswerUpdatedIterator{contract: _OffchainAggregatorEventsMock.contract, event: "AnswerUpdated", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) { + + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "AnswerUpdated", currentRule, roundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockAnswerUpdated) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseAnswerUpdated(log types.Log) (*OffchainAggregatorEventsMockAnswerUpdated, error) { + event := new(OffchainAggregatorEventsMockAnswerUpdated) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "AnswerUpdated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockBillingAccessControllerSetIterator struct { + Event *OffchainAggregatorEventsMockBillingAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockBillingAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockBillingAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockBillingAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockBillingAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockBillingAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockBillingAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockBillingAccessControllerSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "BillingAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockBillingAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "BillingAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockBillingAccessControllerSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorEventsMockBillingAccessControllerSet, error) { + event := new(OffchainAggregatorEventsMockBillingAccessControllerSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "BillingAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockBillingSetIterator struct { + Event *OffchainAggregatorEventsMockBillingSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockBillingSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockBillingSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockBillingSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockBillingSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockBillingSet struct { + MaximumGasPrice uint32 + ReasonableGasPrice uint32 + MicroLinkPerEth uint32 + LinkGweiPerObservation uint32 + LinkGweiPerTransmission uint32 + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockBillingSetIterator, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockBillingSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "BillingSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockBillingSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "BillingSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockBillingSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "BillingSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseBillingSet(log types.Log) (*OffchainAggregatorEventsMockBillingSet, error) { + event := new(OffchainAggregatorEventsMockBillingSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "BillingSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockConfigSetIterator struct { + Event *OffchainAggregatorEventsMockConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockConfigSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockConfigSet struct { + PreviousConfigBlockNumber uint32 + ConfigCount uint64 + Signers []common.Address + Transmitters []common.Address + Threshold uint8 + EncodedConfigVersion uint64 + Encoded []byte + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockConfigSetIterator, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockConfigSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "ConfigSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockConfigSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "ConfigSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockConfigSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseConfigSet(log types.Log) (*OffchainAggregatorEventsMockConfigSet, error) { + event := new(OffchainAggregatorEventsMockConfigSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "ConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockLinkTokenSetIterator struct { + Event *OffchainAggregatorEventsMockLinkTokenSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockLinkTokenSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockLinkTokenSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockLinkTokenSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockLinkTokenSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockLinkTokenSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockLinkTokenSet struct { + OldLinkToken common.Address + NewLinkToken common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterLinkTokenSet(opts *bind.FilterOpts, _oldLinkToken []common.Address, _newLinkToken []common.Address) (*OffchainAggregatorEventsMockLinkTokenSetIterator, error) { + + var _oldLinkTokenRule []interface{} + for _, _oldLinkTokenItem := range _oldLinkToken { + _oldLinkTokenRule = append(_oldLinkTokenRule, _oldLinkTokenItem) + } + var _newLinkTokenRule []interface{} + for _, _newLinkTokenItem := range _newLinkToken { + _newLinkTokenRule = append(_newLinkTokenRule, _newLinkTokenItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "LinkTokenSet", _oldLinkTokenRule, _newLinkTokenRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockLinkTokenSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "LinkTokenSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchLinkTokenSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockLinkTokenSet, _oldLinkToken []common.Address, _newLinkToken []common.Address) (event.Subscription, error) { + + var _oldLinkTokenRule []interface{} + for _, _oldLinkTokenItem := range _oldLinkToken { + _oldLinkTokenRule = append(_oldLinkTokenRule, _oldLinkTokenItem) + } + var _newLinkTokenRule []interface{} + for _, _newLinkTokenItem := range _newLinkToken { + _newLinkTokenRule = append(_newLinkTokenRule, _newLinkTokenItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "LinkTokenSet", _oldLinkTokenRule, _newLinkTokenRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockLinkTokenSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "LinkTokenSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseLinkTokenSet(log types.Log) (*OffchainAggregatorEventsMockLinkTokenSet, error) { + event := new(OffchainAggregatorEventsMockLinkTokenSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "LinkTokenSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockNewRoundIterator struct { + Event *OffchainAggregatorEventsMockNewRound + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockNewRoundIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockNewRound) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockNewRoundIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockNewRoundIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockNewRound struct { + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorEventsMockNewRoundIterator, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockNewRoundIterator{contract: _OffchainAggregatorEventsMock.contract, event: "NewRound", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) { + + var roundIdRule []interface{} + for _, roundIdItem := range roundId { + roundIdRule = append(roundIdRule, roundIdItem) + } + var startedByRule []interface{} + for _, startedByItem := range startedBy { + startedByRule = append(startedByRule, startedByItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "NewRound", roundIdRule, startedByRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockNewRound) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "NewRound", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseNewRound(log types.Log) (*OffchainAggregatorEventsMockNewRound, error) { + event := new(OffchainAggregatorEventsMockNewRound) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "NewRound", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockNewTransmissionIterator struct { + Event *OffchainAggregatorEventsMockNewTransmission + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockNewTransmissionIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockNewTransmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockNewTransmissionIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockNewTransmissionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockNewTransmission struct { + AggregatorRoundId uint32 + Answer *big.Int + Transmitter common.Address + Observations []*big.Int + Observers []byte + RawReportContext [32]byte + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorEventsMockNewTransmissionIterator, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockNewTransmissionIterator{contract: _OffchainAggregatorEventsMock.contract, event: "NewTransmission", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) { + + var aggregatorRoundIdRule []interface{} + for _, aggregatorRoundIdItem := range aggregatorRoundId { + aggregatorRoundIdRule = append(aggregatorRoundIdRule, aggregatorRoundIdItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "NewTransmission", aggregatorRoundIdRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockNewTransmission) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseNewTransmission(log types.Log) (*OffchainAggregatorEventsMockNewTransmission, error) { + event := new(OffchainAggregatorEventsMockNewTransmission) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "NewTransmission", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockOraclePaidIterator struct { + Event *OffchainAggregatorEventsMockOraclePaid + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockOraclePaidIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOraclePaid) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockOraclePaidIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockOraclePaidIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockOraclePaid struct { + Transmitter common.Address + Payee common.Address + Amount *big.Int + LinkToken common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*OffchainAggregatorEventsMockOraclePaidIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockOraclePaidIterator{contract: _OffchainAggregatorEventsMock.contract, event: "OraclePaid", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var payeeRule []interface{} + for _, payeeItem := range payee { + payeeRule = append(payeeRule, payeeItem) + } + + var linkTokenRule []interface{} + for _, linkTokenItem := range linkToken { + linkTokenRule = append(linkTokenRule, linkTokenItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "OraclePaid", transmitterRule, payeeRule, linkTokenRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockOraclePaid) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseOraclePaid(log types.Log) (*OffchainAggregatorEventsMockOraclePaid, error) { + event := new(OffchainAggregatorEventsMockOraclePaid) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OraclePaid", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockOwnershipTransferRequestedIterator struct { + Event *OffchainAggregatorEventsMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorEventsMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockOwnershipTransferRequestedIterator{contract: _OffchainAggregatorEventsMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockOwnershipTransferRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorEventsMockOwnershipTransferRequested, error) { + event := new(OffchainAggregatorEventsMockOwnershipTransferRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockOwnershipTransferredIterator struct { + Event *OffchainAggregatorEventsMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorEventsMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockOwnershipTransferredIterator{contract: _OffchainAggregatorEventsMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockOwnershipTransferred) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorEventsMockOwnershipTransferred, error) { + event := new(OffchainAggregatorEventsMockOwnershipTransferred) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator struct { + Event *OffchainAggregatorEventsMockPayeeshipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockPayeeshipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockPayeeshipTransferRequested struct { + Transmitter common.Address + Current common.Address + Proposed common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator{contract: _OffchainAggregatorEventsMock.contract, event: "PayeeshipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + var proposedRule []interface{} + for _, proposedItem := range proposed { + proposedRule = append(proposedRule, proposedItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "PayeeshipTransferRequested", transmitterRule, currentRule, proposedRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockPayeeshipTransferRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorEventsMockPayeeshipTransferRequested, error) { + event := new(OffchainAggregatorEventsMockPayeeshipTransferRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "PayeeshipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockPayeeshipTransferredIterator struct { + Event *OffchainAggregatorEventsMockPayeeshipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockPayeeshipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferredIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockPayeeshipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockPayeeshipTransferred struct { + Transmitter common.Address + Previous common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorEventsMockPayeeshipTransferredIterator, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockPayeeshipTransferredIterator{contract: _OffchainAggregatorEventsMock.contract, event: "PayeeshipTransferred", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) { + + var transmitterRule []interface{} + for _, transmitterItem := range transmitter { + transmitterRule = append(transmitterRule, transmitterItem) + } + var previousRule []interface{} + for _, previousItem := range previous { + previousRule = append(previousRule, previousItem) + } + var currentRule []interface{} + for _, currentItem := range current { + currentRule = append(currentRule, currentItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "PayeeshipTransferred", transmitterRule, previousRule, currentRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockPayeeshipTransferred) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorEventsMockPayeeshipTransferred, error) { + event := new(OffchainAggregatorEventsMockPayeeshipTransferred) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "PayeeshipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockRequesterAccessControllerSetIterator struct { + Event *OffchainAggregatorEventsMockRequesterAccessControllerSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockRequesterAccessControllerSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockRequesterAccessControllerSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockRequesterAccessControllerSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockRequesterAccessControllerSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockRequesterAccessControllerSet struct { + Old common.Address + Current common.Address + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockRequesterAccessControllerSetIterator, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockRequesterAccessControllerSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "RequesterAccessControllerSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockRequesterAccessControllerSet) (event.Subscription, error) { + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "RequesterAccessControllerSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockRequesterAccessControllerSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorEventsMockRequesterAccessControllerSet, error) { + event := new(OffchainAggregatorEventsMockRequesterAccessControllerSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "RequesterAccessControllerSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockRoundRequestedIterator struct { + Event *OffchainAggregatorEventsMockRoundRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockRoundRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockRoundRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockRoundRequestedIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockRoundRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockRoundRequested struct { + Requester common.Address + ConfigDigest [16]byte + Epoch uint32 + Round uint8 + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorEventsMockRoundRequestedIterator, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockRoundRequestedIterator{contract: _OffchainAggregatorEventsMock.contract, event: "RoundRequested", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockRoundRequested, requester []common.Address) (event.Subscription, error) { + + var requesterRule []interface{} + for _, requesterItem := range requester { + requesterRule = append(requesterRule, requesterItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "RoundRequested", requesterRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockRoundRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseRoundRequested(log types.Log) (*OffchainAggregatorEventsMockRoundRequested, error) { + event := new(OffchainAggregatorEventsMockRoundRequested) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "RoundRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type OffchainAggregatorEventsMockValidatorConfigSetIterator struct { + Event *OffchainAggregatorEventsMockValidatorConfigSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *OffchainAggregatorEventsMockValidatorConfigSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockValidatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(OffchainAggregatorEventsMockValidatorConfigSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *OffchainAggregatorEventsMockValidatorConfigSetIterator) Error() error { + return it.fail +} + +func (it *OffchainAggregatorEventsMockValidatorConfigSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type OffchainAggregatorEventsMockValidatorConfigSet struct { + PreviousValidator common.Address + PreviousGasLimit uint32 + CurrentValidator common.Address + CurrentGasLimit uint32 + Raw types.Log +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) FilterValidatorConfigSet(opts *bind.FilterOpts, previousValidator []common.Address, currentValidator []common.Address) (*OffchainAggregatorEventsMockValidatorConfigSetIterator, error) { + + var previousValidatorRule []interface{} + for _, previousValidatorItem := range previousValidator { + previousValidatorRule = append(previousValidatorRule, previousValidatorItem) + } + + var currentValidatorRule []interface{} + for _, currentValidatorItem := range currentValidator { + currentValidatorRule = append(currentValidatorRule, currentValidatorItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.FilterLogs(opts, "ValidatorConfigSet", previousValidatorRule, currentValidatorRule) + if err != nil { + return nil, err + } + return &OffchainAggregatorEventsMockValidatorConfigSetIterator{contract: _OffchainAggregatorEventsMock.contract, event: "ValidatorConfigSet", logs: logs, sub: sub}, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) WatchValidatorConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockValidatorConfigSet, previousValidator []common.Address, currentValidator []common.Address) (event.Subscription, error) { + + var previousValidatorRule []interface{} + for _, previousValidatorItem := range previousValidator { + previousValidatorRule = append(previousValidatorRule, previousValidatorItem) + } + + var currentValidatorRule []interface{} + for _, currentValidatorItem := range currentValidator { + currentValidatorRule = append(currentValidatorRule, currentValidatorItem) + } + + logs, sub, err := _OffchainAggregatorEventsMock.contract.WatchLogs(opts, "ValidatorConfigSet", previousValidatorRule, currentValidatorRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(OffchainAggregatorEventsMockValidatorConfigSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "ValidatorConfigSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMockFilterer) ParseValidatorConfigSet(log types.Log) (*OffchainAggregatorEventsMockValidatorConfigSet, error) { + event := new(OffchainAggregatorEventsMockValidatorConfigSet) + if err := _OffchainAggregatorEventsMock.contract.UnpackLog(event, "ValidatorConfigSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _OffchainAggregatorEventsMock.abi.Events["AnswerUpdated"].ID: + return _OffchainAggregatorEventsMock.ParseAnswerUpdated(log) + case _OffchainAggregatorEventsMock.abi.Events["BillingAccessControllerSet"].ID: + return _OffchainAggregatorEventsMock.ParseBillingAccessControllerSet(log) + case _OffchainAggregatorEventsMock.abi.Events["BillingSet"].ID: + return _OffchainAggregatorEventsMock.ParseBillingSet(log) + case _OffchainAggregatorEventsMock.abi.Events["ConfigSet"].ID: + return _OffchainAggregatorEventsMock.ParseConfigSet(log) + case _OffchainAggregatorEventsMock.abi.Events["LinkTokenSet"].ID: + return _OffchainAggregatorEventsMock.ParseLinkTokenSet(log) + case _OffchainAggregatorEventsMock.abi.Events["NewRound"].ID: + return _OffchainAggregatorEventsMock.ParseNewRound(log) + case _OffchainAggregatorEventsMock.abi.Events["NewTransmission"].ID: + return _OffchainAggregatorEventsMock.ParseNewTransmission(log) + case _OffchainAggregatorEventsMock.abi.Events["OraclePaid"].ID: + return _OffchainAggregatorEventsMock.ParseOraclePaid(log) + case _OffchainAggregatorEventsMock.abi.Events["OwnershipTransferRequested"].ID: + return _OffchainAggregatorEventsMock.ParseOwnershipTransferRequested(log) + case _OffchainAggregatorEventsMock.abi.Events["OwnershipTransferred"].ID: + return _OffchainAggregatorEventsMock.ParseOwnershipTransferred(log) + case _OffchainAggregatorEventsMock.abi.Events["PayeeshipTransferRequested"].ID: + return _OffchainAggregatorEventsMock.ParsePayeeshipTransferRequested(log) + case _OffchainAggregatorEventsMock.abi.Events["PayeeshipTransferred"].ID: + return _OffchainAggregatorEventsMock.ParsePayeeshipTransferred(log) + case _OffchainAggregatorEventsMock.abi.Events["RequesterAccessControllerSet"].ID: + return _OffchainAggregatorEventsMock.ParseRequesterAccessControllerSet(log) + case _OffchainAggregatorEventsMock.abi.Events["RoundRequested"].ID: + return _OffchainAggregatorEventsMock.ParseRoundRequested(log) + case _OffchainAggregatorEventsMock.abi.Events["ValidatorConfigSet"].ID: + return _OffchainAggregatorEventsMock.ParseValidatorConfigSet(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (OffchainAggregatorEventsMockAnswerUpdated) Topic() common.Hash { + return common.HexToHash("0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f") +} + +func (OffchainAggregatorEventsMockBillingAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x793cb73064f3c8cde7e187ae515511e6e56d1ee89bf08b82fa60fb70f8d48912") +} + +func (OffchainAggregatorEventsMockBillingSet) Topic() common.Hash { + return common.HexToHash("0xd0d9486a2c673e2a4b57fc82e4c8a556b3e2b82dd5db07e2c04a920ca0f469b6") +} + +func (OffchainAggregatorEventsMockConfigSet) Topic() common.Hash { + return common.HexToHash("0x25d719d88a4512dd76c7442b910a83360845505894eb444ef299409e180f8fb9") +} + +func (OffchainAggregatorEventsMockLinkTokenSet) Topic() common.Hash { + return common.HexToHash("0x4966a50c93f855342ccf6c5c0d358b85b91335b2acedc7da0932f691f351711a") +} + +func (OffchainAggregatorEventsMockNewRound) Topic() common.Hash { + return common.HexToHash("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271") +} + +func (OffchainAggregatorEventsMockNewTransmission) Topic() common.Hash { + return common.HexToHash("0xf6a97944f31ea060dfde0566e4167c1a1082551e64b60ecb14d599a9d023d451") +} + +func (OffchainAggregatorEventsMockOraclePaid) Topic() common.Hash { + return common.HexToHash("0xd0b1dac935d85bd54cf0a33b0d41d39f8cf53a968465fc7ea2377526b8ac712c") +} + +func (OffchainAggregatorEventsMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (OffchainAggregatorEventsMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (OffchainAggregatorEventsMockPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (OffchainAggregatorEventsMockPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (OffchainAggregatorEventsMockRequesterAccessControllerSet) Topic() common.Hash { + return common.HexToHash("0x27b89aede8b560578baaa25ee5ce3852c5eecad1e114b941bbd89e1eb4bae634") +} + +func (OffchainAggregatorEventsMockRoundRequested) Topic() common.Hash { + return common.HexToHash("0x3ea16a923ff4b1df6526e854c9e3a995c43385d70e73359e10623c74f0b52037") +} + +func (OffchainAggregatorEventsMockValidatorConfigSet) Topic() common.Hash { + return common.HexToHash("0xb04e3a37abe9c0fcdfebdeae019a8e2b12ddf53f5d55ffb0caccc1bedaca1541") +} + +func (_OffchainAggregatorEventsMock *OffchainAggregatorEventsMock) Address() common.Address { + return _OffchainAggregatorEventsMock.address +} + +type OffchainAggregatorEventsMockInterface interface { + EmitAnswerUpdated(opts *bind.TransactOpts, current *big.Int, roundId *big.Int, updatedAt *big.Int) (*types.Transaction, error) + + EmitBillingAccessControllerSet(opts *bind.TransactOpts, old common.Address, current common.Address) (*types.Transaction, error) + + EmitBillingSet(opts *bind.TransactOpts, maximumGasPrice uint32, reasonableGasPrice uint32, microLinkPerEth uint32, linkGweiPerObservation uint32, linkGweiPerTransmission uint32) (*types.Transaction, error) + + EmitConfigSet(opts *bind.TransactOpts, previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) (*types.Transaction, error) + + EmitLinkTokenSet(opts *bind.TransactOpts, _oldLinkToken common.Address, _newLinkToken common.Address) (*types.Transaction, error) + + EmitNewRound(opts *bind.TransactOpts, roundId *big.Int, startedBy common.Address, startedAt *big.Int) (*types.Transaction, error) + + EmitNewTransmission(opts *bind.TransactOpts, aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) (*types.Transaction, error) + + EmitOraclePaid(opts *bind.TransactOpts, transmitter common.Address, payee common.Address, amount *big.Int, linkToken common.Address) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPayeeshipTransferRequested(opts *bind.TransactOpts, transmitter common.Address, current common.Address, proposed common.Address) (*types.Transaction, error) + + EmitPayeeshipTransferred(opts *bind.TransactOpts, transmitter common.Address, previous common.Address, current common.Address) (*types.Transaction, error) + + EmitRequesterAccessControllerSet(opts *bind.TransactOpts, old common.Address, current common.Address) (*types.Transaction, error) + + EmitRoundRequested(opts *bind.TransactOpts, requester common.Address, configDigest [16]byte, epoch uint32, round uint8) (*types.Transaction, error) + + EmitValidatorConfigSet(opts *bind.TransactOpts, previousValidator common.Address, previousGasLimit uint32, currentValidator common.Address, currentGasLimit uint32) (*types.Transaction, error) + + FilterAnswerUpdated(opts *bind.FilterOpts, current []*big.Int, roundId []*big.Int) (*OffchainAggregatorEventsMockAnswerUpdatedIterator, error) + + WatchAnswerUpdated(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockAnswerUpdated, current []*big.Int, roundId []*big.Int) (event.Subscription, error) + + ParseAnswerUpdated(log types.Log) (*OffchainAggregatorEventsMockAnswerUpdated, error) + + FilterBillingAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockBillingAccessControllerSetIterator, error) + + WatchBillingAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockBillingAccessControllerSet) (event.Subscription, error) + + ParseBillingAccessControllerSet(log types.Log) (*OffchainAggregatorEventsMockBillingAccessControllerSet, error) + + FilterBillingSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockBillingSetIterator, error) + + WatchBillingSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockBillingSet) (event.Subscription, error) + + ParseBillingSet(log types.Log) (*OffchainAggregatorEventsMockBillingSet, error) + + FilterConfigSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockConfigSetIterator, error) + + WatchConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockConfigSet) (event.Subscription, error) + + ParseConfigSet(log types.Log) (*OffchainAggregatorEventsMockConfigSet, error) + + FilterLinkTokenSet(opts *bind.FilterOpts, _oldLinkToken []common.Address, _newLinkToken []common.Address) (*OffchainAggregatorEventsMockLinkTokenSetIterator, error) + + WatchLinkTokenSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockLinkTokenSet, _oldLinkToken []common.Address, _newLinkToken []common.Address) (event.Subscription, error) + + ParseLinkTokenSet(log types.Log) (*OffchainAggregatorEventsMockLinkTokenSet, error) + + FilterNewRound(opts *bind.FilterOpts, roundId []*big.Int, startedBy []common.Address) (*OffchainAggregatorEventsMockNewRoundIterator, error) + + WatchNewRound(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockNewRound, roundId []*big.Int, startedBy []common.Address) (event.Subscription, error) + + ParseNewRound(log types.Log) (*OffchainAggregatorEventsMockNewRound, error) + + FilterNewTransmission(opts *bind.FilterOpts, aggregatorRoundId []uint32) (*OffchainAggregatorEventsMockNewTransmissionIterator, error) + + WatchNewTransmission(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockNewTransmission, aggregatorRoundId []uint32) (event.Subscription, error) + + ParseNewTransmission(log types.Log) (*OffchainAggregatorEventsMockNewTransmission, error) + + FilterOraclePaid(opts *bind.FilterOpts, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (*OffchainAggregatorEventsMockOraclePaidIterator, error) + + WatchOraclePaid(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOraclePaid, transmitter []common.Address, payee []common.Address, linkToken []common.Address) (event.Subscription, error) + + ParseOraclePaid(log types.Log) (*OffchainAggregatorEventsMockOraclePaid, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorEventsMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*OffchainAggregatorEventsMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*OffchainAggregatorEventsMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*OffchainAggregatorEventsMockOwnershipTransferred, error) + + FilterPayeeshipTransferRequested(opts *bind.FilterOpts, transmitter []common.Address, current []common.Address, proposed []common.Address) (*OffchainAggregatorEventsMockPayeeshipTransferRequestedIterator, error) + + WatchPayeeshipTransferRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockPayeeshipTransferRequested, transmitter []common.Address, current []common.Address, proposed []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferRequested(log types.Log) (*OffchainAggregatorEventsMockPayeeshipTransferRequested, error) + + FilterPayeeshipTransferred(opts *bind.FilterOpts, transmitter []common.Address, previous []common.Address, current []common.Address) (*OffchainAggregatorEventsMockPayeeshipTransferredIterator, error) + + WatchPayeeshipTransferred(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockPayeeshipTransferred, transmitter []common.Address, previous []common.Address, current []common.Address) (event.Subscription, error) + + ParsePayeeshipTransferred(log types.Log) (*OffchainAggregatorEventsMockPayeeshipTransferred, error) + + FilterRequesterAccessControllerSet(opts *bind.FilterOpts) (*OffchainAggregatorEventsMockRequesterAccessControllerSetIterator, error) + + WatchRequesterAccessControllerSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockRequesterAccessControllerSet) (event.Subscription, error) + + ParseRequesterAccessControllerSet(log types.Log) (*OffchainAggregatorEventsMockRequesterAccessControllerSet, error) + + FilterRoundRequested(opts *bind.FilterOpts, requester []common.Address) (*OffchainAggregatorEventsMockRoundRequestedIterator, error) + + WatchRoundRequested(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockRoundRequested, requester []common.Address) (event.Subscription, error) + + ParseRoundRequested(log types.Log) (*OffchainAggregatorEventsMockRoundRequested, error) + + FilterValidatorConfigSet(opts *bind.FilterOpts, previousValidator []common.Address, currentValidator []common.Address) (*OffchainAggregatorEventsMockValidatorConfigSetIterator, error) + + WatchValidatorConfigSet(opts *bind.WatchOpts, sink chan<- *OffchainAggregatorEventsMockValidatorConfigSet, previousValidator []common.Address, currentValidator []common.Address) (event.Subscription, error) + + ParseValidatorConfigSet(log types.Log) (*OffchainAggregatorEventsMockValidatorConfigSet, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/integration-tests/contracts/ethereum/Staking.go b/integration-tests/contracts/ethereum/Staking.go new file mode 100644 index 00000000..3a90658d --- /dev/null +++ b/integration-tests/contracts/ethereum/Staking.go @@ -0,0 +1,3117 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ethereum + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// StakingPoolConstructorParams is an auto generated low-level Go binding around an user-defined struct. +type StakingPoolConstructorParams struct { + PLIAddress common.Address + MonitoredFeed common.Address + InitialMaxPoolSize *big.Int + InitialMaxCommunityStakeAmount *big.Int + InitialMaxOperatorStakeAmount *big.Int + MinCommunityStakeAmount *big.Int + MinOperatorStakeAmount *big.Int + PriorityPeriodThreshold *big.Int + RegularPeriodThreshold *big.Int + MaxAlertingRewardAmount *big.Int + MinInitialOperatorCount *big.Int + MinRewardDuration *big.Int + SlashableDuration *big.Int + DelegationRateDenominator *big.Int +} + +// StakingMetaData contains all meta data concerning the Staking contract. +var StakingMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"components\":[{\"internalType\":\"contractLinkTokenInterface\",\"name\":\"PLIAddress\",\"type\":\"address\"},{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"monitoredFeed\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"initialMaxPoolSize\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initialMaxCommunityStakeAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initialMaxOperatorStakeAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minCommunityStakeAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minOperatorStakeAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"priorityPeriodThreshold\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"regularPeriodThreshold\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxAlertingRewardAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minInitialOperatorCount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minRewardDuration\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"slashableDuration\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"delegationRateDenominator\",\"type\":\"uint256\"}],\"internalType\":\"structStaking.PoolConstructorParams\",\"name\":\"params\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AccessForbidden\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"}],\"name\":\"AlertAlreadyExists\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"AlertInvalid\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"CastError\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"remainingAmount\",\"type\":\"uint256\"}],\"name\":\"ExcessiveStakeAmount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"ExistingStakeFound\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"currentOperatorsCount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"minInitialOperatorsCount\",\"type\":\"uint256\"}],\"name\":\"InadequateInitialOperatorsCount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"remainingPoolSize\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"requiredPoolSize\",\"type\":\"uint256\"}],\"name\":\"InsufficientRemainingPoolSpace\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"requiredAmount\",\"type\":\"uint256\"}],\"name\":\"InsufficientStakeAmount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidDelegationRate\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidMaxAlertingRewardAmount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxStakeAmount\",\"type\":\"uint256\"}],\"name\":\"InvalidMaxStakeAmount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidMigrationTarget\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidMinCommunityStakeAmount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidMinOperatorStakeAmount\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxPoolSize\",\"type\":\"uint256\"}],\"name\":\"InvalidPoolSize\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"currentStatus\",\"type\":\"bool\"},{\"internalType\":\"bool\",\"name\":\"requiredStatus\",\"type\":\"bool\"}],\"name\":\"InvalidPoolStatus\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidRegularPeriodThreshold\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidZeroAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MerkleRootNotSet\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"OperatorAlreadyExists\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"OperatorDoesNotExist\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"OperatorIsAssignedToFeed\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"OperatorIsLocked\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"RewardDurationTooShort\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SenderNotLinkToken\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"StakeNotFound\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"alerter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rewardAmount\",\"type\":\"uint256\"}],\"name\":\"AlertRaised\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"newMerkleRoot\",\"type\":\"bytes32\"}],\"name\":\"MerkleRootChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Migrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"MigrationTargetAccepted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"MigrationTargetProposed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newStake\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"totalStake\",\"type\":\"uint256\"}],\"name\":\"Staked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"}],\"name\":\"Unstaked\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"acceptMigrationTarget\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"acceptOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"operators\",\"type\":\"address[]\"}],\"name\":\"addOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"addReward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"alerter\",\"type\":\"address\"}],\"name\":\"canAlert\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"newRate\",\"type\":\"uint256\"}],\"name\":\"changeRewardRate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"conclude\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emergencyPause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emergencyUnpause\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAvailableReward\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"getBaseReward\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPluginToken\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCommunityStakerLimits\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDelegatesCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDelegationRateDenominator\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"getDelegationReward\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getEarnedBaseRewards\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getEarnedDelegationRewards\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getFeedOperators\",\"outputs\":[{\"internalType\":\"address[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMaxPoolSize\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMerkleRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMigrationTarget\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMonitoredFeed\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getOperatorLimits\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRewardRate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getRewardTimestamps\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"getStake\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalCommunityStakedAmount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalDelegatedAmount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalRemovedAmount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTotalStakedAmount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"internalType\":\"bytes32[]\",\"name\":\"proof\",\"type\":\"bytes32[]\"}],\"name\":\"hasAccess\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isActive\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"}],\"name\":\"isOperator\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isPaused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"paused\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"proposeMigrationTarget\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"raiseAlert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"operators\",\"type\":\"address[]\"}],\"name\":\"removeOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"operators\",\"type\":\"address[]\"}],\"name\":\"setFeedOperators\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"newMerkleRoot\",\"type\":\"bytes32\"}],\"name\":\"setMerkleRoot\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxPoolSize\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxCommunityStakeAmount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxOperatorStakeAmount\",\"type\":\"uint256\"}],\"name\":\"setPoolConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initialRewardRate\",\"type\":\"uint256\"}],\"name\":\"start\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"typeAndVersion\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unstake\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawRemovedStake\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdrawUnusedReward\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x6101e06040523480156200001257600080fd5b50604051620056cd380380620056cd833981016040819052620000359162000766565b33806000816200008c5760405162461bcd60e51b815260206004820152601860248201527f43616e6e6f7420736574206f776e657220746f207a65726f000000000000000060448201526064015b60405180910390fd5b600080546001600160a01b0319166001600160a01b0384811691909117909155811615620000bf57620000bf81620002ee565b50506001805460ff60a01b191690555080516001600160a01b0316620000f85760405163f6b2911f60e01b815260040160405180910390fd5b60208101516001600160a01b0316620001245760405163f6b2911f60e01b815260040160405180910390fd5b806101a001516000036200014b5760405163027953ef60e61b815260040160405180910390fd5b6000816101a0015164e8d4a510006200016591906200082b565b1115620001855760405163027953ef60e61b815260040160405180910390fd5b8060e0015181610100015111620001af576040516310919fb960e11b815260040160405180910390fd5b8060c00151600003620001d5576040516384dada8560e01b815260040160405180910390fd5b80608001518160c001511115620001ff576040516384dada8560e01b815260040160405180910390fd5b80606001518160a001511115620002295760405163941b857f60e01b815260040160405180910390fd5b80608001518161012001511115620002545760405163a9be3a0960e01b815260040160405180910390fd5b6200027f81604001518260600151836080015160026200039960201b6200208d17909392919060201c565b80516001600160a01b0390811660805260208201511660a090815260e08083015160c0908152610100808501519092526101208085015190925283015190528101516101409081528101516101609081528101516101809081528101516101a090815201516101c052620008a2565b336001600160a01b03821603620003485760405162461bcd60e51b815260206004820152601760248201527f43616e6e6f74207472616e7366657220746f2073656c66000000000000000000604482015260640162000083565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b82811115620003bf5760405163bc91aa3360e01b81526004810182905260240162000083565b60038401546001600160601b0316831015620003f257604051630f9e1c3b60e11b81526004810184905260240162000083565b60038401546c0100000000000000000000000090046001600160501b0316821015620004355760405163bc91aa3360e01b81526004810183905260240162000083565b6003840154600160b01b90046001600160501b03168110156200046f5760405163bc91aa3360e01b81526004810182905260240162000083565b60408051608081018252600286015460ff80821615158352610100820416602083018190526001600160601b036201000083048116948401859052600160701b9092049091166060830152909190620004ca90849062000864565b620004d6919062000886565b841015620004fb5760405163bc91aa3360e01b81526004810183905260240162000083565b60038501546001600160601b031684146200057c576200052684620006b760201b620023571760201c565b6003860180546001600160601b0319166001600160601b03929092169190911790556040518481527f7f4f497e086b2eb55f8a9885ba00d33399bbe0ebcb92ea092834386435a1b9c09060200160405180910390a15b60038501546c0100000000000000000000000090046001600160501b031683146200062157620005b783620006e660201b620023851760201c565b6003860180546001600160501b03929092166c0100000000000000000000000002600160601b600160b01b03199092169190911790556040518381527fb5f554e5ef00806bace1edbb84186512ebcefa2af7706085143f501f29314df79060200160405180910390a15b6003850154600160b01b90046001600160501b03168214620006b0576200065382620006e660201b620023851760201c565b6003860180546001600160501b0392909216600160b01b026001600160b01b039092169190911790556040518281527f816587cb2e773af4f3689a03d7520fabff3462605ded374b485b13994c0d7b529060200160405180910390a15b5050505050565b60006001600160601b03821115620006e25760405163408ba96f60e11b815260040160405180910390fd5b5090565b60006001600160501b03821115620006e25760405163408ba96f60e11b815260040160405180910390fd5b6040516101c081016001600160401b03811182821017156200074357634e487b7160e01b600052604160045260246000fd5b60405290565b80516001600160a01b03811681146200076157600080fd5b919050565b60006101c082840312156200077a57600080fd5b6200078462000711565b6200078f8362000749565b81526200079f6020840162000749565b602082015260408381015190820152606080840151908201526080808401519082015260a0808401519082015260c0808401519082015260e08084015190820152610100808401519082015261012080840151908201526101408084015190820152610160808401519082015261018080840151908201526101a0928301519281019290925250919050565b6000826200084957634e487b7160e01b600052601260045260246000fd5b500690565b634e487b7160e01b600052601160045260246000fd5b60008160001904831182151516156200088157620008816200084e565b500290565b808201808211156200089c576200089c6200084e565b92915050565b60805160a05160c05160e05161010051610120516101405161016051610180516101a0516101c051614cf5620009d8600039600081816103d8015281816112850152818161153d015281816126ec015281816135fb015261368e01526000611d80015260008181610c3301528181610cfa01526111d4015260006111090152600081816102b10152818161353001526135650152600081816104ea01528181611d5e01528181613227015261325c0152600081816139cc0152613a000152600081816116950152611c0501526000818161165a0152611bb90152600081816104ae015281816115b20152611aff01526000818161063b0152818161072b015281816108fa01528181610b8e01528181610fdf015281816111440152818161140b01528181611cc60152611e550152614cf56000f3fe608060405234801561001057600080fd5b50600436106102815760003560e01c80637a7664601161015d578063a07aea1c116100c9578063a07aea1c1461058f578063a4c0ed36146105a2578063a7a2f5aa146105b5578063b187bd26146105bd578063bfbd9b1b146105c5578063c1852f58146105d8578063d365a377146105eb578063da9c732f146105fe578063e0974ea514610606578063e5f929731461060e578063e937fdaa14610616578063ebdb56f31461061e578063f2fde38b1461062657600080fd5b80637a7664601461046d5780637cb64759146104805780637e1a3786146104935780638019e7d0146104a457806383db28a0146104ac57806387e900b1146104d25780638856398f146104e55780638932a90d1461051f5780638a44f337146105325780638da5cb5b146105455780638fb4b573146105565780639a109bc2146105695780639d0a38641461057c57600080fd5b80634a4e3bd5116101fc5780634a4e3bd5146103a257806351858e27146103aa57806359f01879146103b25780635aa6e013146103c65780635c975abb146103ce5780635e8b40d7146103d65780635fec60f8146103fc57806363b2c85a146104115780636d70f7ae14610424578063741040021461043757806374de4ec41461043f57806374f237c41461045257806379ba50971461046557600080fd5b8063049b2ca0146102865780630641bdd8146102ac5780630fbc8f5b146102f7578063165d35e114610308578063181f5a771461031d5780631a9d4c7c1461034c5780631ddb55521461035457806322f3e2d4146103655780632def66201461037d57806332e288501461038757806338adb6f014610392578063495906571461039a575b600080fd5b6004546201000090046001600160601b03165b6040519081526020015b60405180910390f35b6005547f000000000000000000000000000000000000000000000000000000000000000090600160601b90046001600160501b03165b604080519283526020830191909152016102a3565b6005546001600160601b0316610299565b610310610639565b6040516102a39190614432565b604080518082018252600d81526c05374616b696e6720302e312e3609c1b602082015290516102a3919061448c565b61029961065d565b600f546001600160a01b0316610310565b61036d610682565b60405190151581526020016102a3565b61038561069b565b005b60085460ff16610299565b6102996107cc565b601154610299565b6103856107d8565b6103856107ea565b6102e2600c54600b5463ffffffff90911691565b6103856107fa565b61036d610978565b7f0000000000000000000000000000000000000000000000000000000000000000610299565b610404610988565b6040516102a391906144e3565b61038561041f366004614512565b6109ed565b61036d610432366004614512565b610b3f565b610299610b52565b61038561044d36600461452d565b610b67565b61038561046036600461452d565b610c9f565b610385610d65565b61029961047b366004614512565b610e0f565b61038561048e36600461452d565b610e39565b6009546001600160501b0316610299565b600654610299565b7f0000000000000000000000000000000000000000000000000000000000000000610310565b6102996104e0366004614512565b610e76565b6005547f000000000000000000000000000000000000000000000000000000000000000090600160b01b90046001600160501b03166102e2565b61038561052d366004614546565b610f24565b6103856105403660046145b7565b6110aa565b6000546001600160a01b0316610310565b6103856105643660046145e3565b6110da565b610299610577366004614512565b611206565b61036d61058a36600461466e565b6112bb565b61038561059d366004614716565b611300565b6103856105b0366004614778565b611358565b610299611522565b61036d611561565b6103856105d3366004614716565b61156b565b61036d6105e6366004614512565b61157f565b6103856105f9366004614716565b6116dd565b610385611ac4565b610299611e2e565b610385611edf565b610385611f17565b610385611fd9565b610385610634366004614512565b612079565b7f000000000000000000000000000000000000000000000000000000000000000090565b600061067d61066c60026123af565b610674611522565b60079190612405565b905090565b60045460009060ff16801561067d575050600b54421090565b6106a3610682565b156106d057604051635185386160e11b815260016004820152600060248201526044015b60405180910390fd5b60008060006106de3361243b565b9250925092507f204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00338484846040516107199493929190614826565b60405180910390a16001600160a01b037f00000000000000000000000000000000000000000000000000000000000000001663a9059cbb338361075c8688614862565b6107669190614862565b6040518363ffffffff1660e01b8152600401610783929190614875565b6020604051808303816000875af11580156107a2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107c6919061488e565b50505050565b600061067d60026123af565b6107e06127af565b6107e8612802565b565b6107f26127af565b6107e8612847565b610802610682565b1561082a57604051635185386160e11b815260016004820152600060248201526044016106c7565b33600090815260026020526040812054600160701b90046001600160601b03169081900361086d5733604051637256ef3960e11b81526004016106c79190614432565b806002600401600082825461088291906148b0565b9091555050336000818152600260205260408082208054600160701b600160d01b0319169055517f204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00926108db9290918591908190614826565b60405180910390a160405163a9059cbb60e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906109319033908590600401614875565b6020604051808303816000875af1158015610950573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610974919061488e565b5050565b600154600160a01b900460ff1690565b606060026001018054806020026020016040519081016040528092919081815260200182805480156109e357602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116109c5575b5050505050905090565b6109f56127af565b6001600160a01b0381163b1580610a1457506001600160a01b03811630145b80610a2c5750600d546001600160a01b038281169116145b80610a445750600f546001600160a01b038281169116145b80610abb57506040516301ffc9a760e01b8152635260769b60e11b60048201526001600160a01b038216906301ffc9a790602401602060405180830381865afa158015610a95573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ab9919061488e565b155b15610ad9576040516306cf420760e31b815260040160405180910390fd5b600f80546001600160a01b0319908116909155600d80549091166001600160a01b03831617905542600e556040517f5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad390610b34908390614432565b60405180910390a150565b6000610b4c60028361288a565b92915050565b600061067d610b5f611522565b6007906128a9565b610b6f6127af565b610b776128d8565b6040516323b872dd60e01b81526001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906323b872dd90610bc7903390309086906004016148c3565b6020604051808303816000875af1158015610be6573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610c0a919061488e565b50600554610c6f906001600160601b0316610c2560026123af565b6009546001600160501b03167f0000000000000000000000000000000000000000000000000000000000000000610c5a611e2e565b610c62611522565b6007959493929190612907565b6040518181527fde88a922e0d3b88b24e9623efeb464919c6bf9f66857a65e2bfcf2ce87a9433d90602001610b34565b610ca76127af565b610caf6128d8565b80600003610cbc57600080fd5b6000610cc6611522565b9050610cd3600782612a6c565b610cdd6007612adf565b600554610d2e906001600160601b0316610cf760026123af565b847f0000000000000000000000000000000000000000000000000000000000000000610d21611e2e565b6007949392919087612907565b6040518281527f1e3be2efa25bca5bff2215c7b30b31086e703d6aa7d9b9a1f8ba62c5291219ad9060200160405180910390a15050565b6001546001600160a01b03163314610db85760405162461bcd60e51b815260206004820152601660248201527526bab9ba10313290383937b837b9b2b21037bbb732b960511b60448201526064016106c7565b60008054336001600160a01b0319808316821784556001805490911690556040516001600160a01b0390921692909183917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e091a350565b6001600160a01b03166000908152600260205260409020546201000090046001600160601b031690565b610e416127af565b60118190556040518181527f1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c90602001610b34565b6001600160a01b03811660009081526002602090815260408083208151608081018352905460ff808216151580845261010083049091161515948301949094526001600160601b03620100008204811693830193909352600160701b9004909116606082015290610eea5750600092915050565b80604001516001600160601b0316600003610f085750600092915050565b610f1d83610f14611522565b60079190612ba2565b9392505050565b610f2c610682565b15610f5457604051635185386160e11b815260016004820152600060248201526044016106c7565b600f546001600160a01b0316610f7d576040516306cf420760e31b815260040160405180910390fd5b6000806000610f8b3361243b565b9250925092507f667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434338484848989604051610fca96959493929190614910565b60405180910390a1600f546001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000811691634000aea09116836110138688614862565b61101d9190614862565b33898960405160200161103293929190614950565b6040516020818303038152906040526040518463ffffffff1660e01b815260040161105f93929190614975565b6020604051808303816000875af115801561107e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906110a2919061488e565b505050505050565b6110b26127af565b6110ba6128d8565b6110c7600284848461208d565b6110d583610c2560026123af565b505050565b6110e26127af565b60115461110257604051634fc5147960e11b815260040160405180910390fd5b61112d60027f0000000000000000000000000000000000000000000000000000000000000000612bdf565b6040516323b872dd60e01b81526001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906323b872dd9061117d903390309087906004016148c3565b6020604051808303816000875af115801561119c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906111c0919061488e565b50600554610974906001600160601b0316827f00000000000000000000000000000000000000000000000000000000000000006111fb611e2e565b600793929190612c5e565b6001600160a01b0381166000908152600260205260408120546201000090046001600160601b031680820361123e5750600092915050565b61124960028461288a565b1561125a57610f1d60078483612d6c565b6001600160a01b0383166000908152600760205260409020546001600160601b03166112b16112a9837f0000000000000000000000000000000000000000000000000000000000000000612d98565b600790612dae565b610f1d91906148b0565b6011546000906112cd57506001610b4c565b610f1d82601154856040516020016112e59190614432565b60405160208183030381529060405280519060200120612e5b565b6113086127af565b600c5463ffffffff16158015906113245750611322610682565b155b1561134c57604051635185386160e11b815260006004820152600160248201526044016106c7565b61097460028383612e71565b611360610639565b6001600160a01b0316336001600160a01b031614611391576040516309ad2a8760e31b815260040160405180910390fd5b6113996131a4565b6113a16128d8565b64e8d4a510008210156113ce57604051631d820b1760e01b815264e8d4a5100060048201526024016106c7565b60006113df64e8d4a51000846149b2565b90508015611487576113f181846148b0565b60405163a9059cbb60e01b81529093506001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb906114429087908590600401614875565b6020604051808303816000875af1158015611461573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611485919061488e565b505b61149260028561288a565b156114a6576114a184846131ec565b6107c6565b601154156115185781516000036114d057604051631decfebb60e31b815260040160405180910390fd5b6114fb828060200190518101906114e791906149c6565b601154866040516020016112e59190614432565b61151857604051631decfebb60e31b815260040160405180910390fd5b6107c684846134f9565b60045460009061067d906201000090046001600160601b03167f0000000000000000000000000000000000000000000000000000000000000000613797565b600061067d610978565b6115736127af565b610974600283836137a3565b600061158a82610e0f565b60000361159957506000919050565b6115a1610682565b6115ad57506000919050565b6000807f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa15801561160e573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906116329190614a6d565b50935050506001600160501b031691506010548203611655575060009392505050565b61167f7f000000000000000000000000000000000000000000000000000000000000000082614862565b421015611690575060009392505050565b6116ba7f000000000000000000000000000000000000000000000000000000000000000082614862565b42106116ca575060019392505050565b6116d560028561288a565b949350505050565b6116e56127af565b6116ed6128d8565b6117006116f8611522565b600790612a6c565b60005b81811015611a8457600083838381811061171f5761171f614abd565b90506020020160208101906117349190614512565b6001600160a01b0381166000908152600260209081526040918290208251608081018452905460ff808216151580845261010083049091161515938301939093526001600160601b03620100008204811694830194909452600160701b90049092166060830152919250906117be578160405163eac13dcd60e01b81526004016106c79190614432565b8060200151156117e3578160405163ded6031960e01b81526004016106c79190614432565b60408101516001600160601b03168015611a145761180861180384611206565b612357565b600a80546000906118239084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061186161180382611858600761393e565b6007919061396f565b600a805460009061187c9084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b031602179055506118ac61180384610e76565b600a8054600c906118ce908490600160601b90046001600160601b0316614ad3565b82546001600160601b039182166101009390930a928302919092021990911617905550600880546001919060009061190a90849060ff16614afa565b825460ff9182166101009390930a9283029190920219909116179055506001600160a01b0383166000908152600260205260408120805462010000600160701b031916905561195882612357565b600480549192508291600e9061197f908490600160701b90046001600160601b0316614ad3565b82546101009290920a6001600160601b038181021990931691831602179091556001600160a01b03861660009081526002602052604081208054600160701b600160d01b031916600160701b93861693840217905560068054929350916119e7908490614862565b9091555050506001600160a01b038316600090815260076020526040902080546001600160601b03191690555b6001600160a01b03831660009081526002602052604090819020805460ff19169055517f2360404a74478febece1a14f11275f22ada88d19ef96f7d785913010bfff447990611a669085908490614875565b60405180910390a15050508080611a7c90614b13565b915050611703565b50611a8e8161399d565b60048054600190611aa8908490610100900460ff16614afa565b92506101000a81548160ff021916908360ff1602179055505050565b611acc6128d8565b6000611ad733610e0f565b905080600003611afa57604051631decfebb60e31b815260040160405180910390fd5b6000807f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663feaf968c6040518163ffffffff1660e01b815260040160a060405180830381865afa158015611b5b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b7f9190614a6d565b50935050506001600160501b031691506010548203611bb4576040516379aa9e1160e11b8152600481018390526024016106c7565b611bde7f000000000000000000000000000000000000000000000000000000000000000082614862565b421015611bfe57604051637e29e28560e11b815260040160405180910390fd5b6000611c2a7f000000000000000000000000000000000000000000000000000000000000000083614862565b42109050808015611c435750611c4160023361288a565b155b15611c6157604051637e29e28560e11b815260040160405180910390fd5b60108390556000611c7285836139c1565b90507fd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd338583604051611ca793929190614b2c565b60405180910390a160405163a9059cbb60e01b81526001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000169063a9059cbb90611cfd9033908590600401614875565b6020604051808303816000875af1158015611d1c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611d40919061488e565b506003805460408051602080840282018101909252828152611df7937f0000000000000000000000000000000000000000000000000000000000000000937f000000000000000000000000000000000000000000000000000000000000000093830182828015611dd957602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611dbb575b50505050506002600001611deb611522565b60079493929190613a24565b600554611e27906001600160601b0316611e1160026123af565b6009546001600160501b03166000610c5a611e2e565b5050505050565b600654600090611e3e60026123af565b6040516370a0823160e01b81526001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906370a0823190611e8a903090600401614432565b602060405180830381865afa158015611ea7573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611ecb9190614b4d565b611ed591906148b0565b61067d91906148b0565b611ee76127af565b611eef6128d8565b611f0d611efc60026123af565b611f04611522565b60079190613ce0565b6107e86002613d12565b611f1f6127af565b600d546001600160a01b0316611f48576040516306cf420760e31b815260040160405180910390fd5b600e54611f589062093a80614862565b421015611f7857604051631decfebb60e31b815260040160405180910390fd5b600d8054600f80546001600160a01b0383166001600160a01b031991821681179092559091169091556040517ffa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd8491611fcf91614432565b60405180910390a1565b611fe16127af565b611fe9610682565b1561201157604051635185386160e11b815260016004820152600060248201526044016106c7565b600a546000906001600160601b03600160601b820481169116612032611e2e565b61203c91906148b0565b61204691906148b0565b90507f150a6ec0e6f4e9ddcaaaa1674f157d91165a42d60653016f87a9fc870a39f050816040516108db91815260200190565b6120816127af565b61208a81613d4a565b50565b828111156120b15760405163bc91aa3360e01b8152600481018290526024016106c7565b60038401546001600160601b03168310156120e257604051630f9e1c3b60e11b8152600481018490526024016106c7565b6003840154600160601b90046001600160501b031682101561211a5760405163bc91aa3360e01b8152600481018390526024016106c7565b6003840154600160b01b90046001600160501b03168110156121525760405163bc91aa3360e01b8152600481018290526024016106c7565b60408051608081018252600286015460ff80821615158352610100820416602083018190526001600160601b036201000083048116948401859052600160701b90920490911660608301529091906121ab908490614b66565b6121b59190614862565b8410156121d85760405163bc91aa3360e01b8152600481018390526024016106c7565b60038501546001600160601b0316841461224b576121f584612357565b6003860180546001600160601b0319166001600160601b03929092169190911790556040518481527f7f4f497e086b2eb55f8a9885ba00d33399bbe0ebcb92ea092834386435a1b9c09060200160405180910390a15b6003850154600160601b90046001600160501b031683146122d05761226f83612385565b6003860180546001600160501b0392909216600160601b02600160601b600160b01b03199092169190911790556040518381527fb5f554e5ef00806bace1edbb84186512ebcefa2af7706085143f501f29314df79060200160405180910390a15b6003850154600160b01b90046001600160501b03168214611e27576122f482612385565b6003860180546001600160501b0392909216600160b01b026001600160b01b039092169190911790556040518281527f816587cb2e773af4f3689a03d7520fabff3462605ded374b485b13994c0d7b529060200160405180910390a15050505050565b60006001600160601b038211156123815760405163408ba96f60e11b815260040160405180910390fd5b5090565b60006001600160501b038211156123815760405163408ba96f60e11b815260040160405180910390fd5b60408051608081018252600283015460ff8082161515835261010082041660208301526001600160601b036201000082048116938301849052600160701b9091041660608201819052600092610f1d9190614862565b60006124238461241584866148b0565b61241e8761393e565b61396f565b60038501546116d591906001600160601b03166148b0565b6001600160a01b03811660009081526002602090815260408083208151608081018352905460ff808216151583526101008204161515938201939093526001600160601b036201000084048116928201839052600160701b90930490921660608301528291829182036124c35784604051637256ef3960e11b81526004016106c79190614432565b60045460ff16156124ee576124d96116f8611522565b6124e36007612adf565b6004805460ff191690555b80511561268a57604081015160048054600e9061251c908490600160701b90046001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550600061256582604001516001600160601b0316876007613ded9092919063ffffffff16565b6001600160a01b03871660009081526007602052604081205460085492935090916125a8916001600160601b03600160601b9091048116916101009004166148b0565b6001600160a01b0388166000908152600260205260409020805462010000600160701b031916905590506125db82612357565b600a80546000906125f69084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061262381612357565b600a8054600c90612645908490600160601b90046001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555082604001518282826001600160601b031692509550955095505050506127a8565b6040810151600480546002906126b09084906201000090046001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550600061271961271083604001516001600160601b03167f0000000000000000000000000000000000000000000000000000000000000000612d98565b60079088613ded565b6001600160a01b0387166000908152600260205260409020805462010000600160701b0319169055905061274c81612357565b600a80546000906127679084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b031602179055508160400151816000826001600160601b0316925094509450945050505b9193909250565b6000546001600160a01b031633146107e85760405162461bcd60e51b815260206004820152601660248201527527b7363c9031b0b63630b1363290313c9037bbb732b960511b60448201526064016106c7565b61280a613e3b565b6001805460ff60a01b191690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa335b604051611fcf9190614432565b61284f6131a4565b6001805460ff60a01b1916600160a01b1790557f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a25861283a3390565b6001600160a01b03166000908152602091909152604090205460ff1690565b60006128b9838361241e8661393e565b6003840154610f1d9190600160601b90046001600160601b03166148b0565b6128e0610682565b6107e857604051635185386160e11b815260006004820152600160248201526044016106c7565b6000612914888784612405565b9050600061292289846128a9565b905060008161293184876148b0565b61293b91906148b0565b60028b01549091506001600160501b0316871461297e5761295b87612385565b60028b0180546001600160501b0319166001600160501b03929092169190911790555b600061298a8a89614b66565b61299964e8d4a5100084614b66565b6129a39190614b85565b9050868110156129c55760405162da056d60e81b815260040160405180910390fd5b6129e66129dc8c6129d6888d6148b0565b8461396f565b6118039086614862565b60038c0180546001600160601b0319166001600160601b0392909216919091179055612a20612a168c878461396f565b6118039085614862565b60038c0180546001600160601b0392909216600160601b02600160601b600160c01b0319909216919091179055612a578142614862565b8b600401819055505050505050505050505050565b612a796118038383613e86565b6001830180546001600160601b039290921661010002610100600160681b0319909216919091179055612ab3612aae83613f25565b613f35565b6001909201805463ffffffff93909316600160681b0263ffffffff60681b199093169290921790915550565b6000612aea82613f25565b6002830154909150612b2590612b0d90600160b01b900463ffffffff16836148b0565b600284015461180391906001600160501b0316614b66565b600283018054600a90612b49908490600160501b90046001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550612b7681613f35565b6002909201805463ffffffff93909316600160b01b0263ffffffff60b01b199093169290921790915550565b6001600160a01b038216600090815260208490526040812054600160601b90046001600160601b0316612bd58584613e86565b6116d591906148b0565b6002820154610100900460ff16811115612c2257600282015460405163e709379960e01b815261010090910460ff166004820152602481018290526044016106c7565b60028201805460ff191660011790556040517fded6ebf04e261e1eb2f3e3b268a2e6aee5b478c15b341eba5cf18b9bc80c2e6390600090a15050565b600585015463ffffffff1615612c7357600080fd5b612c7c83612385565b6002860180546001600160501b0319166001600160501b03929092169190911790556000612ca942613f35565b60058701805463ffffffff191663ffffffff831690811790915560018801805463ffffffff60681b1916600160681b830217905560028801805463ffffffff60b01b1916600160b01b9092029190911790559050612d0d8686600087878783612907565b60058601546004870154604080518781526020810186905263ffffffff9093169083015260608201527f125fc8494f786b470e3c39d0932a62e9e09e291ebd81ea19c57604f6d2b1d167906080015b60405180910390a1505050505050565b6001600160a01b0382166000908152602084905260408120546001600160601b0316612bd58584612dae565b6000612da48383613797565b610f1d90846148b0565b600080612dbf846004015442101590565b612de4576002840154612ddf90600160b01b900463ffffffff16426148b0565b612e05565b60028401546004850154612e0591600160b01b900463ffffffff16906148b0565b600285015490915064e8d4a5100090612e289083906001600160501b0316614b66565b6002860154612e479190600160501b90046001600160601b0316614862565b612e519085614b66565b6116d59190614b85565b600082612e688584613f5c565b14949350505050565b6003830154600090612e9390600160b01b90046001600160501b031683614b66565b90506000612ea085613fa9565b905080821115612ecd576040516335cf446b60e01b815260048101829052602481018390526044016106c7565b60005b8381101561315e57856000868684818110612eed57612eed614abd565b9050602002016020810190612f029190614512565b6001600160a01b0316815260208101919091526040016000205460ff1615612f6457848482818110612f3657612f36614abd565b9050602002016020810190612f4b9190614512565b604051625290b360e11b81526004016106c79190614432565b60008681878785818110612f7a57612f7a614abd565b9050602002016020810190612f8f9190614512565b6001600160a01b031681526020810191909152604001600020546201000090046001600160601b03161115612fff57848482818110612fd057612fd0614abd565b9050602002016020810190612fe59190614512565b60405163602d4d1160e01b81526004016106c79190614432565b6000868187878581811061301557613015614abd565b905060200201602081019061302a9190614512565b6001600160a01b03168152602081019190915260400160002054600160701b90046001600160601b0316111561309b5784848281811061306c5761306c614abd565b90506020020160208101906130819190614512565b604051631e8de2e760e21b81526004016106c79190614432565b60018660008787858181106130b2576130b2614abd565b90506020020160208101906130c79190614512565b6001600160a01b031681526020810191909152604001600020805460ff19169115159190911790557fac6fa858e9350a46cec16539926e0fde25b7629f84b5a72bffaae4df888ae86d85858381811061312257613122614abd565b90506020020160208101906131379190614512565b6040516131449190614432565b60405180910390a18061315681614b13565b915050612ed0565b506131688361399d565b600286015461317f9190610100900460ff16614bb9565b6002909501805460ff969096166101000261ff00199096169590951790945550505050565b6131ac610978565b156107e85760405162461bcd60e51b815260206004820152601060248201526f14185d5cd8589b194e881c185d5cd95960821b60448201526064016106c7565b6001600160a01b038216600090815260026020526040812080549091620100009091046001600160601b0316906132238483614862565b90507f000000000000000000000000000000000000000000000000000000000000000081101561328857604051631d820b1760e01b81527f000000000000000000000000000000000000000000000000000000000000000060048201526024016106c7565b600554600160b01b90046001600160501b0316808211156132c9576132ad83826148b0565b604051631728673b60e31b81526004016106c791815260200190565b826000036133b3576132dc6116f8611522565b60085460ff16600081900361334e57600854600a80546001600160601b03610100909304831692600c91613319918591600160601b900416614ad3565b82546001600160601b039182166101009390930a92830291909202199091161790555060088054610100600160681b03191690555b613359816001614bb9565b6008805460ff191660ff9290921691909117908190556001600160a01b03881660009081526007602052604090208054600160601b600160c01b0319166101009092046001600160601b0316600160601b02919091179055505b6133c1611803600787612dae565b6001600160a01b038716600090815260076020526040812080549091906133f29084906001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061341f85612357565b60048054600e90613441908490600160701b90046001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061347c85600060076140309092919063ffffffff16565b61348582612357565b6001600160a01b0387166000908152600260205260409081902080546001600160601b0393909316620100000262010000600160701b031990931692909217909155517f1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee9090612d5c90889088908690614b2c565b6001600160a01b0382166000908152600260205260408120546201000090046001600160601b03169061352c8383614862565b90507f000000000000000000000000000000000000000000000000000000000000000081101561359157604051631d820b1760e01b81527f000000000000000000000000000000000000000000000000000000000000000060048201526024016106c7565b600554600160601b90046001600160501b0316808211156135b6576132ad83826148b0565b60006135c26002613fa9565b9050808511156135e857604051631728673b60e31b8152600481018290526024016106c7565b6135f36116f8611522565b600061361f867f0000000000000000000000000000000000000000000000000000000000000000612d98565b905061362f611803600783612dae565b6001600160a01b038816600090815260076020526040812080549091906136609084906001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b031602179055506136bb816136b2887f0000000000000000000000000000000000000000000000000000000000000000613797565b60079190614030565b6136c486612357565b600480546002906136e59084906201000090046001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061371284612357565b6001600160a01b0388166000908152600260205260409081902080546001600160601b0393909316620100000262010000600160701b031990931692909217909155517f1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee909061378690899089908890614b2c565b60405180910390a150505050505050565b6000610f1d8284614b85565b60005b600184015481101561380c578360000160008560010183815481106137cd576137cd614abd565b60009182526020808320909101546001600160a01b031683528201929092526040019020805461ff00191690558061380481614b13565b9150506137a6565b5061381b6001840160006143a4565b60005b818110156138f057600083838381811061383a5761383a614abd565b905060200201602081019061384f9190614512565b905061385b858261288a565b61387a578060405163eac13dcd60e01b81526004016106c79190614432565b6001600160a01b038116600090815260208690526040902054610100900460ff16156138ba5780604051625290b360e11b81526004016106c79190614432565b6001600160a01b03166000908152602085905260409020805461ff001916610100179055806138e881614b13565b91505061381e565b506138ff6001840183836143c2565b507f40aed8e423b39a56b445ae160f4c071fc2cfb48ee0b6dcd5ffeb6bc5b18d10d08282604051613931929190614bd2565b60405180910390a1505050565b600061394e826004015442101590565b6139675742826004015461396291906148b0565b610b4c565b600092915050565b600283015460009064e8d4a51000908390613993906001600160501b031686614b66565b612e519190614b66565b600060ff8211156123815760405163408ba96f60e11b815260040160405180910390fd5b600081156139f057507f0000000000000000000000000000000000000000000000000000000000000000610b4c565b610f1d6139fe600585614b85565b7f000000000000000000000000000000000000000000000000000000000000000061403d565b600186015460ff16156110a2576000613a3e878787614053565b90506000613a4d888785614060565b9050600080600087516001600160401b03811115613a6d57613a6d614605565b604051908082528060200260200182016040528015613a96578160200160208202803683370190505b509050600088516001600160401b03811115613ab457613ab4614605565b604051908082528060200260200182016040528015613add578160200160208202803683370190505b50905060005b8951811015613bfc5760008a8281518110613b0057613b00614abd565b6020908102919091018101516001600160a01b0381166000908152918c905260408220549092506201000090046001600160601b031690819003613b45575050613bea565b613b518f8a84846140bf565b858481518110613b6357613b63614abd565b602002602001018181525050613b7b8f89848d614148565b848481518110613b8d57613b8d614abd565b602002602001018181525050848381518110613bab57613bab614abd565b602002602001015187613bbe9190614862565b9650838381518110613bd257613bd2614abd565b602002602001015186613be59190614862565b955050505b80613bf481614b13565b915050613ae3565b50613c0684612357565b60038d018054600090613c239084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b03160217905550613c5083612357565b60038d018054600c90613c74908490600160601b90046001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b031602179055507e635ea9da6e262e92bb713d71840af7c567807ff35bf73e927490c612832480898383604051613cca93929190614c4e565b60405180910390a1505050505050505050505050565b613cea8382612a6c565b613cf383612adf565b613d0783613d0183856148b0565b836141a7565b505042600490910155565b60028101805460ff191690556040517ff7d0e0f15586495da8c687328ead30fb829d9da55538cb0ef73dd229e517cdb890600090a150565b336001600160a01b03821603613d9c5760405162461bcd60e51b815260206004820152601760248201527621b0b73737ba103a3930b739b332b9103a379039b2b63360491b60448201526064016106c7565b600180546001600160a01b0319166001600160a01b0383811691821790925560008054604051929316917fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae12789190a350565b6001600160a01b03811660009081526020849052604081205460028501546001600160601b039182169164e8d4a5100091613e3191600160501b9091041686614b66565b612bd59190614b85565b613e43610978565b6107e85760405162461bcd60e51b815260206004820152601460248201527314185d5cd8589b194e881b9bdd081c185d5cd95960621b60448201526064016106c7565b600080613e97846004015442101590565b613ebc576001840154613eb790600160681b900463ffffffff16426148b0565b613edd565b60018401546004850154613edd91600160681b900463ffffffff16906148b0565b600180860154919250613ef39160ff16906141b4565b613efe85858461396f565b613f089190614b85565b60018501546116d5919061010090046001600160601b0316614862565b6000610b4c82600401544261403d565b600063ffffffff8211156123815760405163408ba96f60e11b815260040160405180910390fd5b600081815b8451811015613fa157613f8d82868381518110613f8057613f80614abd565b60200260200101516141c3565b915080613f9981614b13565b915050613f61565b509392505050565b60408051608081018252600283015460ff80821615158352610100820416602083018190526001600160601b036201000083048116948401859052600160701b9092049091166060830152600384015460009391614018916001600160501b03600160b01b9091041690614b66565b60038501546112b191906001600160601b03166148b0565b6110d583838360016141f2565b600081831061404c5781610f1d565b5090919050565b60006116d584848461396f565b60408051606081018252600185015460ff811680835261010082046001600160601b03166020840152600160681b90910463ffffffff16928201929092526000916140ac86858761396f565b6140b69190614b85565b95945050505050565b6000806140cd868585612d6c565b905060006140db868361403d565b90506140e681612357565b6001600160a01b038616600090815260208990526040812080549091906141179084906001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b031602179055508092505050949350505050565b600080614156868585612ba2565b90506000614164868361403d565b905061416f81612357565b6001600160a01b03861660009081526020899052604090208054600c90614117908490600160601b90046001600160601b0316614b99565b6110d583838360006141f2565b600081831161404c5781610f1d565b60008183106141df576000828152602084905260409020610f1d565b6000838152602083905260409020610f1d565b60006141fd8561393e565b9050600061420f61180387878561396f565b9050600061422161180388878661396f565b9050831561430957600061423a64e8d4a51000886149b2565b111561424e578161424a81614c91565b9250505b600061425f64e8d4a51000876149b2565b1115614273578061426f81614c91565b9150505b6003870180548391906000906142939084906001600160601b0316614b99565b92506101000a8154816001600160601b0302191690836001600160601b031602179055508087600301600001600c8282829054906101000a90046001600160601b03166142e09190614b99565b92506101000a8154816001600160601b0302191690836001600160601b0316021790555061439b565b6003870180548391906000906143299084906001600160601b0316614ad3565b92506101000a8154816001600160601b0302191690836001600160601b031602179055508087600301600001600c8282829054906101000a90046001600160601b03166143769190614ad3565b92506101000a8154816001600160601b0302191690836001600160601b031602179055505b50505050505050565b508054600082559060005260206000209081019061208a919061441d565b828054828255906000526020600020908101928215614415579160200282015b828111156144155781546001600160a01b0319166001600160a01b038435161782556020909201916001909101906143e2565b506123819291505b5b80821115612381576000815560010161441e565b6001600160a01b0391909116815260200190565b6000815180845260005b8181101561446c57602081850181015186830182015201614450565b506000602082860101526020601f19601f83011685010191505092915050565b602081526000610f1d6020830184614446565b600081518084526020808501945080840160005b838110156144d85781516001600160a01b0316875295820195908201906001016144b3565b509495945050505050565b602081526000610f1d602083018461449f565b80356001600160a01b038116811461450d57600080fd5b919050565b60006020828403121561452457600080fd5b610f1d826144f6565b60006020828403121561453f57600080fd5b5035919050565b6000806020838503121561455957600080fd5b82356001600160401b038082111561457057600080fd5b818501915085601f83011261458457600080fd5b81358181111561459357600080fd5b8660208285010111156145a557600080fd5b60209290920196919550909350505050565b6000806000606084860312156145cc57600080fd5b505081359360208301359350604090920135919050565b600080604083850312156145f657600080fd5b50508035926020909101359150565b634e487b7160e01b600052604160045260246000fd5b604051601f8201601f191681016001600160401b038111828210171561464357614643614605565b604052919050565b60006001600160401b0382111561466457614664614605565b5060051b60200190565b6000806040838503121561468157600080fd5b61468a836144f6565b91506020808401356001600160401b038111156146a657600080fd5b8401601f810186136146b757600080fd5b80356146ca6146c58261464b565b61461b565b81815260059190911b820183019083810190888311156146e957600080fd5b928401925b82841015614707578335825292840192908401906146ee565b80955050505050509250929050565b6000806020838503121561472957600080fd5b82356001600160401b038082111561474057600080fd5b818501915085601f83011261475457600080fd5b81358181111561476357600080fd5b8660208260051b85010111156145a557600080fd5b60008060006060848603121561478d57600080fd5b614796846144f6565b9250602080850135925060408501356001600160401b03808211156147ba57600080fd5b818701915087601f8301126147ce57600080fd5b8135818111156147e0576147e0614605565b6147f2601f8201601f1916850161461b565b9150808252888482850101111561480857600080fd5b80848401858401376000848284010152508093505050509250925092565b6001600160a01b0394909416845260208401929092526040830152606082015260800190565b634e487b7160e01b600052601160045260246000fd5b80820180821115610b4c57610b4c61484c565b6001600160a01b03929092168252602082015260400190565b6000602082840312156148a057600080fd5b81518015158114610f1d57600080fd5b81810381811115610b4c57610b4c61484c565b6001600160a01b039384168152919092166020820152604081019190915260600190565b81835281816020850137506000828201602090810191909152601f909101601f19169091010190565b60018060a01b038716815285602082015284604082015283606082015260a06080820152600061494460a0830184866148e7565b98975050505050505050565b6001600160a01b03841681526040602082018190526000906140b690830184866148e7565b60018060a01b03841681528260208201526060604082015260006140b66060830184614446565b634e487b7160e01b600052601260045260246000fd5b6000826149c1576149c161499c565b500690565b600060208083850312156149d957600080fd5b82516001600160401b038111156149ef57600080fd5b8301601f81018513614a0057600080fd5b8051614a0e6146c58261464b565b81815260059190911b82018301908381019087831115614a2d57600080fd5b928401925b82841015614a4b57835182529284019290840190614a32565b979650505050505050565b80516001600160501b038116811461450d57600080fd5b600080600080600060a08688031215614a8557600080fd5b614a8e86614a56565b9450602086015193506040860151925060608601519150614ab160808701614a56565b90509295509295909350565b634e487b7160e01b600052603260045260246000fd5b6001600160601b03828116828216039080821115614af357614af361484c565b5092915050565b60ff8281168282160390811115610b4c57610b4c61484c565b600060018201614b2557614b2561484c565b5060010190565b6001600160a01b039390931683526020830191909152604082015260600190565b600060208284031215614b5f57600080fd5b5051919050565b6000816000190483118215151615614b8057614b8061484c565b500290565b600082614b9457614b9461499c565b500490565b6001600160601b03818116838216019080821115614af357614af361484c565b60ff8181168382160190811115610b4c57610b4c61484c565b60208082528181018390526000908460408401835b86811015614c13576001600160a01b03614c00846144f6565b1682529183019190830190600101614be7565b509695505050505050565b600081518084526020808501945080840160005b838110156144d857815187529582019590820190600101614c32565b606081526000614c61606083018661449f565b8281036020840152614c738186614c1e565b90508281036040840152614c878185614c1e565b9695505050505050565b60006001600160601b038281166002600160601b03198101614cb557614cb561484c565b600101939250505056fea2646970667358221220e6f8c7e5a925ed427780f5a651fa7904325bcaa2742e17b72555be4f810f5d6564736f6c63430008100033", +} + +// StakingABI is the input ABI used to generate the binding from. +// Deprecated: Use StakingMetaData.ABI instead. +var StakingABI = StakingMetaData.ABI + +// StakingBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use StakingMetaData.Bin instead. +var StakingBin = StakingMetaData.Bin + +// DeployStaking deploys a new Ethereum contract, binding an instance of Staking to it. +func DeployStaking(auth *bind.TransactOpts, backend bind.ContractBackend, params StakingPoolConstructorParams) (common.Address, *types.Transaction, *Staking, error) { + parsed, err := StakingMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StakingBin), backend, params) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Staking{StakingCaller: StakingCaller{contract: contract}, StakingTransactor: StakingTransactor{contract: contract}, StakingFilterer: StakingFilterer{contract: contract}}, nil +} + +// Staking is an auto generated Go binding around an Ethereum contract. +type Staking struct { + StakingCaller // Read-only binding to the contract + StakingTransactor // Write-only binding to the contract + StakingFilterer // Log filterer for contract events +} + +// StakingCaller is an auto generated read-only Go binding around an Ethereum contract. +type StakingCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StakingTransactor is an auto generated write-only Go binding around an Ethereum contract. +type StakingTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StakingFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type StakingFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// StakingSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type StakingSession struct { + Contract *Staking // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StakingCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type StakingCallerSession struct { + Contract *StakingCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// StakingTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type StakingTransactorSession struct { + Contract *StakingTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// StakingRaw is an auto generated low-level Go binding around an Ethereum contract. +type StakingRaw struct { + Contract *Staking // Generic contract binding to access the raw methods on +} + +// StakingCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type StakingCallerRaw struct { + Contract *StakingCaller // Generic read-only contract binding to access the raw methods on +} + +// StakingTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type StakingTransactorRaw struct { + Contract *StakingTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewStaking creates a new instance of Staking, bound to a specific deployed contract. +func NewStaking(address common.Address, backend bind.ContractBackend) (*Staking, error) { + contract, err := bindStaking(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Staking{StakingCaller: StakingCaller{contract: contract}, StakingTransactor: StakingTransactor{contract: contract}, StakingFilterer: StakingFilterer{contract: contract}}, nil +} + +// NewStakingCaller creates a new read-only instance of Staking, bound to a specific deployed contract. +func NewStakingCaller(address common.Address, caller bind.ContractCaller) (*StakingCaller, error) { + contract, err := bindStaking(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StakingCaller{contract: contract}, nil +} + +// NewStakingTransactor creates a new write-only instance of Staking, bound to a specific deployed contract. +func NewStakingTransactor(address common.Address, transactor bind.ContractTransactor) (*StakingTransactor, error) { + contract, err := bindStaking(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StakingTransactor{contract: contract}, nil +} + +// NewStakingFilterer creates a new log filterer instance of Staking, bound to a specific deployed contract. +func NewStakingFilterer(address common.Address, filterer bind.ContractFilterer) (*StakingFilterer, error) { + contract, err := bindStaking(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StakingFilterer{contract: contract}, nil +} + +// bindStaking binds a generic wrapper to an already deployed contract. +func bindStaking(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(StakingABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Staking *StakingRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Staking.Contract.StakingCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Staking *StakingRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.Contract.StakingTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Staking *StakingRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Staking.Contract.StakingTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Staking *StakingCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Staking.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Staking *StakingTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Staking *StakingTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Staking.Contract.contract.Transact(opts, method, params...) +} + +// CanAlert is a free data retrieval call binding the contract method 0xc1852f58. +// +// Solidity: function canAlert(address alerter) view returns(bool) +func (_Staking *StakingCaller) CanAlert(opts *bind.CallOpts, alerter common.Address) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "canAlert", alerter) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// CanAlert is a free data retrieval call binding the contract method 0xc1852f58. +// +// Solidity: function canAlert(address alerter) view returns(bool) +func (_Staking *StakingSession) CanAlert(alerter common.Address) (bool, error) { + return _Staking.Contract.CanAlert(&_Staking.CallOpts, alerter) +} + +// CanAlert is a free data retrieval call binding the contract method 0xc1852f58. +// +// Solidity: function canAlert(address alerter) view returns(bool) +func (_Staking *StakingCallerSession) CanAlert(alerter common.Address) (bool, error) { + return _Staking.Contract.CanAlert(&_Staking.CallOpts, alerter) +} + +// GetAvailableReward is a free data retrieval call binding the contract method 0xe0974ea5. +// +// Solidity: function getAvailableReward() view returns(uint256) +func (_Staking *StakingCaller) GetAvailableReward(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getAvailableReward") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetAvailableReward is a free data retrieval call binding the contract method 0xe0974ea5. +// +// Solidity: function getAvailableReward() view returns(uint256) +func (_Staking *StakingSession) GetAvailableReward() (*big.Int, error) { + return _Staking.Contract.GetAvailableReward(&_Staking.CallOpts) +} + +// GetAvailableReward is a free data retrieval call binding the contract method 0xe0974ea5. +// +// Solidity: function getAvailableReward() view returns(uint256) +func (_Staking *StakingCallerSession) GetAvailableReward() (*big.Int, error) { + return _Staking.Contract.GetAvailableReward(&_Staking.CallOpts) +} + +// GetBaseReward is a free data retrieval call binding the contract method 0x9a109bc2. +// +// Solidity: function getBaseReward(address staker) view returns(uint256) +func (_Staking *StakingCaller) GetBaseReward(opts *bind.CallOpts, staker common.Address) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getBaseReward", staker) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetBaseReward is a free data retrieval call binding the contract method 0x9a109bc2. +// +// Solidity: function getBaseReward(address staker) view returns(uint256) +func (_Staking *StakingSession) GetBaseReward(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetBaseReward(&_Staking.CallOpts, staker) +} + +// GetBaseReward is a free data retrieval call binding the contract method 0x9a109bc2. +// +// Solidity: function getBaseReward(address staker) view returns(uint256) +func (_Staking *StakingCallerSession) GetBaseReward(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetBaseReward(&_Staking.CallOpts, staker) +} + +// GetPluginToken is a free data retrieval call binding the contract method 0x165d35e1. +// +// Solidity: function getPluginToken() view returns(address) +func (_Staking *StakingCaller) GetPluginToken(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getPluginToken") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetPluginToken is a free data retrieval call binding the contract method 0x165d35e1. +// +// Solidity: function getPluginToken() view returns(address) +func (_Staking *StakingSession) GetPluginToken() (common.Address, error) { + return _Staking.Contract.GetPluginToken(&_Staking.CallOpts) +} + +// GetPluginToken is a free data retrieval call binding the contract method 0x165d35e1. +// +// Solidity: function getPluginToken() view returns(address) +func (_Staking *StakingCallerSession) GetPluginToken() (common.Address, error) { + return _Staking.Contract.GetPluginToken(&_Staking.CallOpts) +} + +// GetCommunityStakerLimits is a free data retrieval call binding the contract method 0x0641bdd8. +// +// Solidity: function getCommunityStakerLimits() view returns(uint256, uint256) +func (_Staking *StakingCaller) GetCommunityStakerLimits(opts *bind.CallOpts) (*big.Int, *big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getCommunityStakerLimits") + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetCommunityStakerLimits is a free data retrieval call binding the contract method 0x0641bdd8. +// +// Solidity: function getCommunityStakerLimits() view returns(uint256, uint256) +func (_Staking *StakingSession) GetCommunityStakerLimits() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetCommunityStakerLimits(&_Staking.CallOpts) +} + +// GetCommunityStakerLimits is a free data retrieval call binding the contract method 0x0641bdd8. +// +// Solidity: function getCommunityStakerLimits() view returns(uint256, uint256) +func (_Staking *StakingCallerSession) GetCommunityStakerLimits() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetCommunityStakerLimits(&_Staking.CallOpts) +} + +// GetDelegatesCount is a free data retrieval call binding the contract method 0x32e28850. +// +// Solidity: function getDelegatesCount() view returns(uint256) +func (_Staking *StakingCaller) GetDelegatesCount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getDelegatesCount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetDelegatesCount is a free data retrieval call binding the contract method 0x32e28850. +// +// Solidity: function getDelegatesCount() view returns(uint256) +func (_Staking *StakingSession) GetDelegatesCount() (*big.Int, error) { + return _Staking.Contract.GetDelegatesCount(&_Staking.CallOpts) +} + +// GetDelegatesCount is a free data retrieval call binding the contract method 0x32e28850. +// +// Solidity: function getDelegatesCount() view returns(uint256) +func (_Staking *StakingCallerSession) GetDelegatesCount() (*big.Int, error) { + return _Staking.Contract.GetDelegatesCount(&_Staking.CallOpts) +} + +// GetDelegationRateDenominator is a free data retrieval call binding the contract method 0x5e8b40d7. +// +// Solidity: function getDelegationRateDenominator() view returns(uint256) +func (_Staking *StakingCaller) GetDelegationRateDenominator(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getDelegationRateDenominator") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetDelegationRateDenominator is a free data retrieval call binding the contract method 0x5e8b40d7. +// +// Solidity: function getDelegationRateDenominator() view returns(uint256) +func (_Staking *StakingSession) GetDelegationRateDenominator() (*big.Int, error) { + return _Staking.Contract.GetDelegationRateDenominator(&_Staking.CallOpts) +} + +// GetDelegationRateDenominator is a free data retrieval call binding the contract method 0x5e8b40d7. +// +// Solidity: function getDelegationRateDenominator() view returns(uint256) +func (_Staking *StakingCallerSession) GetDelegationRateDenominator() (*big.Int, error) { + return _Staking.Contract.GetDelegationRateDenominator(&_Staking.CallOpts) +} + +// GetDelegationReward is a free data retrieval call binding the contract method 0x87e900b1. +// +// Solidity: function getDelegationReward(address staker) view returns(uint256) +func (_Staking *StakingCaller) GetDelegationReward(opts *bind.CallOpts, staker common.Address) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getDelegationReward", staker) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetDelegationReward is a free data retrieval call binding the contract method 0x87e900b1. +// +// Solidity: function getDelegationReward(address staker) view returns(uint256) +func (_Staking *StakingSession) GetDelegationReward(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetDelegationReward(&_Staking.CallOpts, staker) +} + +// GetDelegationReward is a free data retrieval call binding the contract method 0x87e900b1. +// +// Solidity: function getDelegationReward(address staker) view returns(uint256) +func (_Staking *StakingCallerSession) GetDelegationReward(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetDelegationReward(&_Staking.CallOpts, staker) +} + +// GetEarnedBaseRewards is a free data retrieval call binding the contract method 0x1a9d4c7c. +// +// Solidity: function getEarnedBaseRewards() view returns(uint256) +func (_Staking *StakingCaller) GetEarnedBaseRewards(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getEarnedBaseRewards") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetEarnedBaseRewards is a free data retrieval call binding the contract method 0x1a9d4c7c. +// +// Solidity: function getEarnedBaseRewards() view returns(uint256) +func (_Staking *StakingSession) GetEarnedBaseRewards() (*big.Int, error) { + return _Staking.Contract.GetEarnedBaseRewards(&_Staking.CallOpts) +} + +// GetEarnedBaseRewards is a free data retrieval call binding the contract method 0x1a9d4c7c. +// +// Solidity: function getEarnedBaseRewards() view returns(uint256) +func (_Staking *StakingCallerSession) GetEarnedBaseRewards() (*big.Int, error) { + return _Staking.Contract.GetEarnedBaseRewards(&_Staking.CallOpts) +} + +// GetEarnedDelegationRewards is a free data retrieval call binding the contract method 0x74104002. +// +// Solidity: function getEarnedDelegationRewards() view returns(uint256) +func (_Staking *StakingCaller) GetEarnedDelegationRewards(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getEarnedDelegationRewards") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetEarnedDelegationRewards is a free data retrieval call binding the contract method 0x74104002. +// +// Solidity: function getEarnedDelegationRewards() view returns(uint256) +func (_Staking *StakingSession) GetEarnedDelegationRewards() (*big.Int, error) { + return _Staking.Contract.GetEarnedDelegationRewards(&_Staking.CallOpts) +} + +// GetEarnedDelegationRewards is a free data retrieval call binding the contract method 0x74104002. +// +// Solidity: function getEarnedDelegationRewards() view returns(uint256) +func (_Staking *StakingCallerSession) GetEarnedDelegationRewards() (*big.Int, error) { + return _Staking.Contract.GetEarnedDelegationRewards(&_Staking.CallOpts) +} + +// GetFeedOperators is a free data retrieval call binding the contract method 0x5fec60f8. +// +// Solidity: function getFeedOperators() view returns(address[]) +func (_Staking *StakingCaller) GetFeedOperators(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getFeedOperators") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +// GetFeedOperators is a free data retrieval call binding the contract method 0x5fec60f8. +// +// Solidity: function getFeedOperators() view returns(address[]) +func (_Staking *StakingSession) GetFeedOperators() ([]common.Address, error) { + return _Staking.Contract.GetFeedOperators(&_Staking.CallOpts) +} + +// GetFeedOperators is a free data retrieval call binding the contract method 0x5fec60f8. +// +// Solidity: function getFeedOperators() view returns(address[]) +func (_Staking *StakingCallerSession) GetFeedOperators() ([]common.Address, error) { + return _Staking.Contract.GetFeedOperators(&_Staking.CallOpts) +} + +// GetMaxPoolSize is a free data retrieval call binding the contract method 0x0fbc8f5b. +// +// Solidity: function getMaxPoolSize() view returns(uint256) +func (_Staking *StakingCaller) GetMaxPoolSize(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getMaxPoolSize") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetMaxPoolSize is a free data retrieval call binding the contract method 0x0fbc8f5b. +// +// Solidity: function getMaxPoolSize() view returns(uint256) +func (_Staking *StakingSession) GetMaxPoolSize() (*big.Int, error) { + return _Staking.Contract.GetMaxPoolSize(&_Staking.CallOpts) +} + +// GetMaxPoolSize is a free data retrieval call binding the contract method 0x0fbc8f5b. +// +// Solidity: function getMaxPoolSize() view returns(uint256) +func (_Staking *StakingCallerSession) GetMaxPoolSize() (*big.Int, error) { + return _Staking.Contract.GetMaxPoolSize(&_Staking.CallOpts) +} + +// GetMerkleRoot is a free data retrieval call binding the contract method 0x49590657. +// +// Solidity: function getMerkleRoot() view returns(bytes32) +func (_Staking *StakingCaller) GetMerkleRoot(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getMerkleRoot") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +// GetMerkleRoot is a free data retrieval call binding the contract method 0x49590657. +// +// Solidity: function getMerkleRoot() view returns(bytes32) +func (_Staking *StakingSession) GetMerkleRoot() ([32]byte, error) { + return _Staking.Contract.GetMerkleRoot(&_Staking.CallOpts) +} + +// GetMerkleRoot is a free data retrieval call binding the contract method 0x49590657. +// +// Solidity: function getMerkleRoot() view returns(bytes32) +func (_Staking *StakingCallerSession) GetMerkleRoot() ([32]byte, error) { + return _Staking.Contract.GetMerkleRoot(&_Staking.CallOpts) +} + +// GetMigrationTarget is a free data retrieval call binding the contract method 0x1ddb5552. +// +// Solidity: function getMigrationTarget() view returns(address) +func (_Staking *StakingCaller) GetMigrationTarget(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getMigrationTarget") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetMigrationTarget is a free data retrieval call binding the contract method 0x1ddb5552. +// +// Solidity: function getMigrationTarget() view returns(address) +func (_Staking *StakingSession) GetMigrationTarget() (common.Address, error) { + return _Staking.Contract.GetMigrationTarget(&_Staking.CallOpts) +} + +// GetMigrationTarget is a free data retrieval call binding the contract method 0x1ddb5552. +// +// Solidity: function getMigrationTarget() view returns(address) +func (_Staking *StakingCallerSession) GetMigrationTarget() (common.Address, error) { + return _Staking.Contract.GetMigrationTarget(&_Staking.CallOpts) +} + +// GetMonitoredFeed is a free data retrieval call binding the contract method 0x83db28a0. +// +// Solidity: function getMonitoredFeed() view returns(address) +func (_Staking *StakingCaller) GetMonitoredFeed(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getMonitoredFeed") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetMonitoredFeed is a free data retrieval call binding the contract method 0x83db28a0. +// +// Solidity: function getMonitoredFeed() view returns(address) +func (_Staking *StakingSession) GetMonitoredFeed() (common.Address, error) { + return _Staking.Contract.GetMonitoredFeed(&_Staking.CallOpts) +} + +// GetMonitoredFeed is a free data retrieval call binding the contract method 0x83db28a0. +// +// Solidity: function getMonitoredFeed() view returns(address) +func (_Staking *StakingCallerSession) GetMonitoredFeed() (common.Address, error) { + return _Staking.Contract.GetMonitoredFeed(&_Staking.CallOpts) +} + +// GetOperatorLimits is a free data retrieval call binding the contract method 0x8856398f. +// +// Solidity: function getOperatorLimits() view returns(uint256, uint256) +func (_Staking *StakingCaller) GetOperatorLimits(opts *bind.CallOpts) (*big.Int, *big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getOperatorLimits") + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetOperatorLimits is a free data retrieval call binding the contract method 0x8856398f. +// +// Solidity: function getOperatorLimits() view returns(uint256, uint256) +func (_Staking *StakingSession) GetOperatorLimits() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetOperatorLimits(&_Staking.CallOpts) +} + +// GetOperatorLimits is a free data retrieval call binding the contract method 0x8856398f. +// +// Solidity: function getOperatorLimits() view returns(uint256, uint256) +func (_Staking *StakingCallerSession) GetOperatorLimits() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetOperatorLimits(&_Staking.CallOpts) +} + +// GetRewardRate is a free data retrieval call binding the contract method 0x7e1a3786. +// +// Solidity: function getRewardRate() view returns(uint256) +func (_Staking *StakingCaller) GetRewardRate(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getRewardRate") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetRewardRate is a free data retrieval call binding the contract method 0x7e1a3786. +// +// Solidity: function getRewardRate() view returns(uint256) +func (_Staking *StakingSession) GetRewardRate() (*big.Int, error) { + return _Staking.Contract.GetRewardRate(&_Staking.CallOpts) +} + +// GetRewardRate is a free data retrieval call binding the contract method 0x7e1a3786. +// +// Solidity: function getRewardRate() view returns(uint256) +func (_Staking *StakingCallerSession) GetRewardRate() (*big.Int, error) { + return _Staking.Contract.GetRewardRate(&_Staking.CallOpts) +} + +// GetRewardTimestamps is a free data retrieval call binding the contract method 0x59f01879. +// +// Solidity: function getRewardTimestamps() view returns(uint256, uint256) +func (_Staking *StakingCaller) GetRewardTimestamps(opts *bind.CallOpts) (*big.Int, *big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getRewardTimestamps") + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetRewardTimestamps is a free data retrieval call binding the contract method 0x59f01879. +// +// Solidity: function getRewardTimestamps() view returns(uint256, uint256) +func (_Staking *StakingSession) GetRewardTimestamps() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetRewardTimestamps(&_Staking.CallOpts) +} + +// GetRewardTimestamps is a free data retrieval call binding the contract method 0x59f01879. +// +// Solidity: function getRewardTimestamps() view returns(uint256, uint256) +func (_Staking *StakingCallerSession) GetRewardTimestamps() (*big.Int, *big.Int, error) { + return _Staking.Contract.GetRewardTimestamps(&_Staking.CallOpts) +} + +// GetStake is a free data retrieval call binding the contract method 0x7a766460. +// +// Solidity: function getStake(address staker) view returns(uint256) +func (_Staking *StakingCaller) GetStake(opts *bind.CallOpts, staker common.Address) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getStake", staker) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetStake is a free data retrieval call binding the contract method 0x7a766460. +// +// Solidity: function getStake(address staker) view returns(uint256) +func (_Staking *StakingSession) GetStake(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetStake(&_Staking.CallOpts, staker) +} + +// GetStake is a free data retrieval call binding the contract method 0x7a766460. +// +// Solidity: function getStake(address staker) view returns(uint256) +func (_Staking *StakingCallerSession) GetStake(staker common.Address) (*big.Int, error) { + return _Staking.Contract.GetStake(&_Staking.CallOpts, staker) +} + +// GetTotalCommunityStakedAmount is a free data retrieval call binding the contract method 0x049b2ca0. +// +// Solidity: function getTotalCommunityStakedAmount() view returns(uint256) +func (_Staking *StakingCaller) GetTotalCommunityStakedAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getTotalCommunityStakedAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetTotalCommunityStakedAmount is a free data retrieval call binding the contract method 0x049b2ca0. +// +// Solidity: function getTotalCommunityStakedAmount() view returns(uint256) +func (_Staking *StakingSession) GetTotalCommunityStakedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalCommunityStakedAmount(&_Staking.CallOpts) +} + +// GetTotalCommunityStakedAmount is a free data retrieval call binding the contract method 0x049b2ca0. +// +// Solidity: function getTotalCommunityStakedAmount() view returns(uint256) +func (_Staking *StakingCallerSession) GetTotalCommunityStakedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalCommunityStakedAmount(&_Staking.CallOpts) +} + +// GetTotalDelegatedAmount is a free data retrieval call binding the contract method 0xa7a2f5aa. +// +// Solidity: function getTotalDelegatedAmount() view returns(uint256) +func (_Staking *StakingCaller) GetTotalDelegatedAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getTotalDelegatedAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetTotalDelegatedAmount is a free data retrieval call binding the contract method 0xa7a2f5aa. +// +// Solidity: function getTotalDelegatedAmount() view returns(uint256) +func (_Staking *StakingSession) GetTotalDelegatedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalDelegatedAmount(&_Staking.CallOpts) +} + +// GetTotalDelegatedAmount is a free data retrieval call binding the contract method 0xa7a2f5aa. +// +// Solidity: function getTotalDelegatedAmount() view returns(uint256) +func (_Staking *StakingCallerSession) GetTotalDelegatedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalDelegatedAmount(&_Staking.CallOpts) +} + +// GetTotalRemovedAmount is a free data retrieval call binding the contract method 0x8019e7d0. +// +// Solidity: function getTotalRemovedAmount() view returns(uint256) +func (_Staking *StakingCaller) GetTotalRemovedAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getTotalRemovedAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetTotalRemovedAmount is a free data retrieval call binding the contract method 0x8019e7d0. +// +// Solidity: function getTotalRemovedAmount() view returns(uint256) +func (_Staking *StakingSession) GetTotalRemovedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalRemovedAmount(&_Staking.CallOpts) +} + +// GetTotalRemovedAmount is a free data retrieval call binding the contract method 0x8019e7d0. +// +// Solidity: function getTotalRemovedAmount() view returns(uint256) +func (_Staking *StakingCallerSession) GetTotalRemovedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalRemovedAmount(&_Staking.CallOpts) +} + +// GetTotalStakedAmount is a free data retrieval call binding the contract method 0x38adb6f0. +// +// Solidity: function getTotalStakedAmount() view returns(uint256) +func (_Staking *StakingCaller) GetTotalStakedAmount(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "getTotalStakedAmount") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// GetTotalStakedAmount is a free data retrieval call binding the contract method 0x38adb6f0. +// +// Solidity: function getTotalStakedAmount() view returns(uint256) +func (_Staking *StakingSession) GetTotalStakedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalStakedAmount(&_Staking.CallOpts) +} + +// GetTotalStakedAmount is a free data retrieval call binding the contract method 0x38adb6f0. +// +// Solidity: function getTotalStakedAmount() view returns(uint256) +func (_Staking *StakingCallerSession) GetTotalStakedAmount() (*big.Int, error) { + return _Staking.Contract.GetTotalStakedAmount(&_Staking.CallOpts) +} + +// HasAccess is a free data retrieval call binding the contract method 0x9d0a3864. +// +// Solidity: function hasAccess(address staker, bytes32[] proof) view returns(bool) +func (_Staking *StakingCaller) HasAccess(opts *bind.CallOpts, staker common.Address, proof [][32]byte) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "hasAccess", staker, proof) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// HasAccess is a free data retrieval call binding the contract method 0x9d0a3864. +// +// Solidity: function hasAccess(address staker, bytes32[] proof) view returns(bool) +func (_Staking *StakingSession) HasAccess(staker common.Address, proof [][32]byte) (bool, error) { + return _Staking.Contract.HasAccess(&_Staking.CallOpts, staker, proof) +} + +// HasAccess is a free data retrieval call binding the contract method 0x9d0a3864. +// +// Solidity: function hasAccess(address staker, bytes32[] proof) view returns(bool) +func (_Staking *StakingCallerSession) HasAccess(staker common.Address, proof [][32]byte) (bool, error) { + return _Staking.Contract.HasAccess(&_Staking.CallOpts, staker, proof) +} + +// IsActive is a free data retrieval call binding the contract method 0x22f3e2d4. +// +// Solidity: function isActive() view returns(bool) +func (_Staking *StakingCaller) IsActive(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "isActive") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// IsActive is a free data retrieval call binding the contract method 0x22f3e2d4. +// +// Solidity: function isActive() view returns(bool) +func (_Staking *StakingSession) IsActive() (bool, error) { + return _Staking.Contract.IsActive(&_Staking.CallOpts) +} + +// IsActive is a free data retrieval call binding the contract method 0x22f3e2d4. +// +// Solidity: function isActive() view returns(bool) +func (_Staking *StakingCallerSession) IsActive() (bool, error) { + return _Staking.Contract.IsActive(&_Staking.CallOpts) +} + +// IsOperator is a free data retrieval call binding the contract method 0x6d70f7ae. +// +// Solidity: function isOperator(address staker) view returns(bool) +func (_Staking *StakingCaller) IsOperator(opts *bind.CallOpts, staker common.Address) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "isOperator", staker) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// IsOperator is a free data retrieval call binding the contract method 0x6d70f7ae. +// +// Solidity: function isOperator(address staker) view returns(bool) +func (_Staking *StakingSession) IsOperator(staker common.Address) (bool, error) { + return _Staking.Contract.IsOperator(&_Staking.CallOpts, staker) +} + +// IsOperator is a free data retrieval call binding the contract method 0x6d70f7ae. +// +// Solidity: function isOperator(address staker) view returns(bool) +func (_Staking *StakingCallerSession) IsOperator(staker common.Address) (bool, error) { + return _Staking.Contract.IsOperator(&_Staking.CallOpts, staker) +} + +// IsPaused is a free data retrieval call binding the contract method 0xb187bd26. +// +// Solidity: function isPaused() view returns(bool) +func (_Staking *StakingCaller) IsPaused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "isPaused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// IsPaused is a free data retrieval call binding the contract method 0xb187bd26. +// +// Solidity: function isPaused() view returns(bool) +func (_Staking *StakingSession) IsPaused() (bool, error) { + return _Staking.Contract.IsPaused(&_Staking.CallOpts) +} + +// IsPaused is a free data retrieval call binding the contract method 0xb187bd26. +// +// Solidity: function isPaused() view returns(bool) +func (_Staking *StakingCallerSession) IsPaused() (bool, error) { + return _Staking.Contract.IsPaused(&_Staking.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Staking *StakingCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Staking *StakingSession) Owner() (common.Address, error) { + return _Staking.Contract.Owner(&_Staking.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Staking *StakingCallerSession) Owner() (common.Address, error) { + return _Staking.Contract.Owner(&_Staking.CallOpts) +} + +// Paused is a free data retrieval call binding the contract method 0x5c975abb. +// +// Solidity: function paused() view returns(bool) +func (_Staking *StakingCaller) Paused(opts *bind.CallOpts) (bool, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "paused") + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// Paused is a free data retrieval call binding the contract method 0x5c975abb. +// +// Solidity: function paused() view returns(bool) +func (_Staking *StakingSession) Paused() (bool, error) { + return _Staking.Contract.Paused(&_Staking.CallOpts) +} + +// Paused is a free data retrieval call binding the contract method 0x5c975abb. +// +// Solidity: function paused() view returns(bool) +func (_Staking *StakingCallerSession) Paused() (bool, error) { + return _Staking.Contract.Paused(&_Staking.CallOpts) +} + +// TypeAndVersion is a free data retrieval call binding the contract method 0x181f5a77. +// +// Solidity: function typeAndVersion() pure returns(string) +func (_Staking *StakingCaller) TypeAndVersion(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _Staking.contract.Call(opts, &out, "typeAndVersion") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// TypeAndVersion is a free data retrieval call binding the contract method 0x181f5a77. +// +// Solidity: function typeAndVersion() pure returns(string) +func (_Staking *StakingSession) TypeAndVersion() (string, error) { + return _Staking.Contract.TypeAndVersion(&_Staking.CallOpts) +} + +// TypeAndVersion is a free data retrieval call binding the contract method 0x181f5a77. +// +// Solidity: function typeAndVersion() pure returns(string) +func (_Staking *StakingCallerSession) TypeAndVersion() (string, error) { + return _Staking.Contract.TypeAndVersion(&_Staking.CallOpts) +} + +// AcceptMigrationTarget is a paid mutator transaction binding the contract method 0xe937fdaa. +// +// Solidity: function acceptMigrationTarget() returns() +func (_Staking *StakingTransactor) AcceptMigrationTarget(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "acceptMigrationTarget") +} + +// AcceptMigrationTarget is a paid mutator transaction binding the contract method 0xe937fdaa. +// +// Solidity: function acceptMigrationTarget() returns() +func (_Staking *StakingSession) AcceptMigrationTarget() (*types.Transaction, error) { + return _Staking.Contract.AcceptMigrationTarget(&_Staking.TransactOpts) +} + +// AcceptMigrationTarget is a paid mutator transaction binding the contract method 0xe937fdaa. +// +// Solidity: function acceptMigrationTarget() returns() +func (_Staking *StakingTransactorSession) AcceptMigrationTarget() (*types.Transaction, error) { + return _Staking.Contract.AcceptMigrationTarget(&_Staking.TransactOpts) +} + +// AcceptOwnership is a paid mutator transaction binding the contract method 0x79ba5097. +// +// Solidity: function acceptOwnership() returns() +func (_Staking *StakingTransactor) AcceptOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "acceptOwnership") +} + +// AcceptOwnership is a paid mutator transaction binding the contract method 0x79ba5097. +// +// Solidity: function acceptOwnership() returns() +func (_Staking *StakingSession) AcceptOwnership() (*types.Transaction, error) { + return _Staking.Contract.AcceptOwnership(&_Staking.TransactOpts) +} + +// AcceptOwnership is a paid mutator transaction binding the contract method 0x79ba5097. +// +// Solidity: function acceptOwnership() returns() +func (_Staking *StakingTransactorSession) AcceptOwnership() (*types.Transaction, error) { + return _Staking.Contract.AcceptOwnership(&_Staking.TransactOpts) +} + +// AddOperators is a paid mutator transaction binding the contract method 0xa07aea1c. +// +// Solidity: function addOperators(address[] operators) returns() +func (_Staking *StakingTransactor) AddOperators(opts *bind.TransactOpts, operators []common.Address) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "addOperators", operators) +} + +// AddOperators is a paid mutator transaction binding the contract method 0xa07aea1c. +// +// Solidity: function addOperators(address[] operators) returns() +func (_Staking *StakingSession) AddOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.AddOperators(&_Staking.TransactOpts, operators) +} + +// AddOperators is a paid mutator transaction binding the contract method 0xa07aea1c. +// +// Solidity: function addOperators(address[] operators) returns() +func (_Staking *StakingTransactorSession) AddOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.AddOperators(&_Staking.TransactOpts, operators) +} + +// AddReward is a paid mutator transaction binding the contract method 0x74de4ec4. +// +// Solidity: function addReward(uint256 amount) returns() +func (_Staking *StakingTransactor) AddReward(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "addReward", amount) +} + +// AddReward is a paid mutator transaction binding the contract method 0x74de4ec4. +// +// Solidity: function addReward(uint256 amount) returns() +func (_Staking *StakingSession) AddReward(amount *big.Int) (*types.Transaction, error) { + return _Staking.Contract.AddReward(&_Staking.TransactOpts, amount) +} + +// AddReward is a paid mutator transaction binding the contract method 0x74de4ec4. +// +// Solidity: function addReward(uint256 amount) returns() +func (_Staking *StakingTransactorSession) AddReward(amount *big.Int) (*types.Transaction, error) { + return _Staking.Contract.AddReward(&_Staking.TransactOpts, amount) +} + +// ChangeRewardRate is a paid mutator transaction binding the contract method 0x74f237c4. +// +// Solidity: function changeRewardRate(uint256 newRate) returns() +func (_Staking *StakingTransactor) ChangeRewardRate(opts *bind.TransactOpts, newRate *big.Int) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "changeRewardRate", newRate) +} + +// ChangeRewardRate is a paid mutator transaction binding the contract method 0x74f237c4. +// +// Solidity: function changeRewardRate(uint256 newRate) returns() +func (_Staking *StakingSession) ChangeRewardRate(newRate *big.Int) (*types.Transaction, error) { + return _Staking.Contract.ChangeRewardRate(&_Staking.TransactOpts, newRate) +} + +// ChangeRewardRate is a paid mutator transaction binding the contract method 0x74f237c4. +// +// Solidity: function changeRewardRate(uint256 newRate) returns() +func (_Staking *StakingTransactorSession) ChangeRewardRate(newRate *big.Int) (*types.Transaction, error) { + return _Staking.Contract.ChangeRewardRate(&_Staking.TransactOpts, newRate) +} + +// Conclude is a paid mutator transaction binding the contract method 0xe5f92973. +// +// Solidity: function conclude() returns() +func (_Staking *StakingTransactor) Conclude(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "conclude") +} + +// Conclude is a paid mutator transaction binding the contract method 0xe5f92973. +// +// Solidity: function conclude() returns() +func (_Staking *StakingSession) Conclude() (*types.Transaction, error) { + return _Staking.Contract.Conclude(&_Staking.TransactOpts) +} + +// Conclude is a paid mutator transaction binding the contract method 0xe5f92973. +// +// Solidity: function conclude() returns() +func (_Staking *StakingTransactorSession) Conclude() (*types.Transaction, error) { + return _Staking.Contract.Conclude(&_Staking.TransactOpts) +} + +// EmergencyPause is a paid mutator transaction binding the contract method 0x51858e27. +// +// Solidity: function emergencyPause() returns() +func (_Staking *StakingTransactor) EmergencyPause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "emergencyPause") +} + +// EmergencyPause is a paid mutator transaction binding the contract method 0x51858e27. +// +// Solidity: function emergencyPause() returns() +func (_Staking *StakingSession) EmergencyPause() (*types.Transaction, error) { + return _Staking.Contract.EmergencyPause(&_Staking.TransactOpts) +} + +// EmergencyPause is a paid mutator transaction binding the contract method 0x51858e27. +// +// Solidity: function emergencyPause() returns() +func (_Staking *StakingTransactorSession) EmergencyPause() (*types.Transaction, error) { + return _Staking.Contract.EmergencyPause(&_Staking.TransactOpts) +} + +// EmergencyUnpause is a paid mutator transaction binding the contract method 0x4a4e3bd5. +// +// Solidity: function emergencyUnpause() returns() +func (_Staking *StakingTransactor) EmergencyUnpause(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "emergencyUnpause") +} + +// EmergencyUnpause is a paid mutator transaction binding the contract method 0x4a4e3bd5. +// +// Solidity: function emergencyUnpause() returns() +func (_Staking *StakingSession) EmergencyUnpause() (*types.Transaction, error) { + return _Staking.Contract.EmergencyUnpause(&_Staking.TransactOpts) +} + +// EmergencyUnpause is a paid mutator transaction binding the contract method 0x4a4e3bd5. +// +// Solidity: function emergencyUnpause() returns() +func (_Staking *StakingTransactorSession) EmergencyUnpause() (*types.Transaction, error) { + return _Staking.Contract.EmergencyUnpause(&_Staking.TransactOpts) +} + +// Migrate is a paid mutator transaction binding the contract method 0x8932a90d. +// +// Solidity: function migrate(bytes data) returns() +func (_Staking *StakingTransactor) Migrate(opts *bind.TransactOpts, data []byte) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "migrate", data) +} + +// Migrate is a paid mutator transaction binding the contract method 0x8932a90d. +// +// Solidity: function migrate(bytes data) returns() +func (_Staking *StakingSession) Migrate(data []byte) (*types.Transaction, error) { + return _Staking.Contract.Migrate(&_Staking.TransactOpts, data) +} + +// Migrate is a paid mutator transaction binding the contract method 0x8932a90d. +// +// Solidity: function migrate(bytes data) returns() +func (_Staking *StakingTransactorSession) Migrate(data []byte) (*types.Transaction, error) { + return _Staking.Contract.Migrate(&_Staking.TransactOpts, data) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address sender, uint256 amount, bytes data) returns() +func (_Staking *StakingTransactor) OnTokenTransfer(opts *bind.TransactOpts, sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "onTokenTransfer", sender, amount, data) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address sender, uint256 amount, bytes data) returns() +func (_Staking *StakingSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Staking.Contract.OnTokenTransfer(&_Staking.TransactOpts, sender, amount, data) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address sender, uint256 amount, bytes data) returns() +func (_Staking *StakingTransactorSession) OnTokenTransfer(sender common.Address, amount *big.Int, data []byte) (*types.Transaction, error) { + return _Staking.Contract.OnTokenTransfer(&_Staking.TransactOpts, sender, amount, data) +} + +// ProposeMigrationTarget is a paid mutator transaction binding the contract method 0x63b2c85a. +// +// Solidity: function proposeMigrationTarget(address migrationTarget) returns() +func (_Staking *StakingTransactor) ProposeMigrationTarget(opts *bind.TransactOpts, migrationTarget common.Address) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "proposeMigrationTarget", migrationTarget) +} + +// ProposeMigrationTarget is a paid mutator transaction binding the contract method 0x63b2c85a. +// +// Solidity: function proposeMigrationTarget(address migrationTarget) returns() +func (_Staking *StakingSession) ProposeMigrationTarget(migrationTarget common.Address) (*types.Transaction, error) { + return _Staking.Contract.ProposeMigrationTarget(&_Staking.TransactOpts, migrationTarget) +} + +// ProposeMigrationTarget is a paid mutator transaction binding the contract method 0x63b2c85a. +// +// Solidity: function proposeMigrationTarget(address migrationTarget) returns() +func (_Staking *StakingTransactorSession) ProposeMigrationTarget(migrationTarget common.Address) (*types.Transaction, error) { + return _Staking.Contract.ProposeMigrationTarget(&_Staking.TransactOpts, migrationTarget) +} + +// RaiseAlert is a paid mutator transaction binding the contract method 0xda9c732f. +// +// Solidity: function raiseAlert() returns() +func (_Staking *StakingTransactor) RaiseAlert(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "raiseAlert") +} + +// RaiseAlert is a paid mutator transaction binding the contract method 0xda9c732f. +// +// Solidity: function raiseAlert() returns() +func (_Staking *StakingSession) RaiseAlert() (*types.Transaction, error) { + return _Staking.Contract.RaiseAlert(&_Staking.TransactOpts) +} + +// RaiseAlert is a paid mutator transaction binding the contract method 0xda9c732f. +// +// Solidity: function raiseAlert() returns() +func (_Staking *StakingTransactorSession) RaiseAlert() (*types.Transaction, error) { + return _Staking.Contract.RaiseAlert(&_Staking.TransactOpts) +} + +// RemoveOperators is a paid mutator transaction binding the contract method 0xd365a377. +// +// Solidity: function removeOperators(address[] operators) returns() +func (_Staking *StakingTransactor) RemoveOperators(opts *bind.TransactOpts, operators []common.Address) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "removeOperators", operators) +} + +// RemoveOperators is a paid mutator transaction binding the contract method 0xd365a377. +// +// Solidity: function removeOperators(address[] operators) returns() +func (_Staking *StakingSession) RemoveOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.RemoveOperators(&_Staking.TransactOpts, operators) +} + +// RemoveOperators is a paid mutator transaction binding the contract method 0xd365a377. +// +// Solidity: function removeOperators(address[] operators) returns() +func (_Staking *StakingTransactorSession) RemoveOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.RemoveOperators(&_Staking.TransactOpts, operators) +} + +// SetFeedOperators is a paid mutator transaction binding the contract method 0xbfbd9b1b. +// +// Solidity: function setFeedOperators(address[] operators) returns() +func (_Staking *StakingTransactor) SetFeedOperators(opts *bind.TransactOpts, operators []common.Address) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "setFeedOperators", operators) +} + +// SetFeedOperators is a paid mutator transaction binding the contract method 0xbfbd9b1b. +// +// Solidity: function setFeedOperators(address[] operators) returns() +func (_Staking *StakingSession) SetFeedOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.SetFeedOperators(&_Staking.TransactOpts, operators) +} + +// SetFeedOperators is a paid mutator transaction binding the contract method 0xbfbd9b1b. +// +// Solidity: function setFeedOperators(address[] operators) returns() +func (_Staking *StakingTransactorSession) SetFeedOperators(operators []common.Address) (*types.Transaction, error) { + return _Staking.Contract.SetFeedOperators(&_Staking.TransactOpts, operators) +} + +// SetMerkleRoot is a paid mutator transaction binding the contract method 0x7cb64759. +// +// Solidity: function setMerkleRoot(bytes32 newMerkleRoot) returns() +func (_Staking *StakingTransactor) SetMerkleRoot(opts *bind.TransactOpts, newMerkleRoot [32]byte) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "setMerkleRoot", newMerkleRoot) +} + +// SetMerkleRoot is a paid mutator transaction binding the contract method 0x7cb64759. +// +// Solidity: function setMerkleRoot(bytes32 newMerkleRoot) returns() +func (_Staking *StakingSession) SetMerkleRoot(newMerkleRoot [32]byte) (*types.Transaction, error) { + return _Staking.Contract.SetMerkleRoot(&_Staking.TransactOpts, newMerkleRoot) +} + +// SetMerkleRoot is a paid mutator transaction binding the contract method 0x7cb64759. +// +// Solidity: function setMerkleRoot(bytes32 newMerkleRoot) returns() +func (_Staking *StakingTransactorSession) SetMerkleRoot(newMerkleRoot [32]byte) (*types.Transaction, error) { + return _Staking.Contract.SetMerkleRoot(&_Staking.TransactOpts, newMerkleRoot) +} + +// SetPoolConfig is a paid mutator transaction binding the contract method 0x8a44f337. +// +// Solidity: function setPoolConfig(uint256 maxPoolSize, uint256 maxCommunityStakeAmount, uint256 maxOperatorStakeAmount) returns() +func (_Staking *StakingTransactor) SetPoolConfig(opts *bind.TransactOpts, maxPoolSize *big.Int, maxCommunityStakeAmount *big.Int, maxOperatorStakeAmount *big.Int) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "setPoolConfig", maxPoolSize, maxCommunityStakeAmount, maxOperatorStakeAmount) +} + +// SetPoolConfig is a paid mutator transaction binding the contract method 0x8a44f337. +// +// Solidity: function setPoolConfig(uint256 maxPoolSize, uint256 maxCommunityStakeAmount, uint256 maxOperatorStakeAmount) returns() +func (_Staking *StakingSession) SetPoolConfig(maxPoolSize *big.Int, maxCommunityStakeAmount *big.Int, maxOperatorStakeAmount *big.Int) (*types.Transaction, error) { + return _Staking.Contract.SetPoolConfig(&_Staking.TransactOpts, maxPoolSize, maxCommunityStakeAmount, maxOperatorStakeAmount) +} + +// SetPoolConfig is a paid mutator transaction binding the contract method 0x8a44f337. +// +// Solidity: function setPoolConfig(uint256 maxPoolSize, uint256 maxCommunityStakeAmount, uint256 maxOperatorStakeAmount) returns() +func (_Staking *StakingTransactorSession) SetPoolConfig(maxPoolSize *big.Int, maxCommunityStakeAmount *big.Int, maxOperatorStakeAmount *big.Int) (*types.Transaction, error) { + return _Staking.Contract.SetPoolConfig(&_Staking.TransactOpts, maxPoolSize, maxCommunityStakeAmount, maxOperatorStakeAmount) +} + +// Start is a paid mutator transaction binding the contract method 0x8fb4b573. +// +// Solidity: function start(uint256 amount, uint256 initialRewardRate) returns() +func (_Staking *StakingTransactor) Start(opts *bind.TransactOpts, amount *big.Int, initialRewardRate *big.Int) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "start", amount, initialRewardRate) +} + +// Start is a paid mutator transaction binding the contract method 0x8fb4b573. +// +// Solidity: function start(uint256 amount, uint256 initialRewardRate) returns() +func (_Staking *StakingSession) Start(amount *big.Int, initialRewardRate *big.Int) (*types.Transaction, error) { + return _Staking.Contract.Start(&_Staking.TransactOpts, amount, initialRewardRate) +} + +// Start is a paid mutator transaction binding the contract method 0x8fb4b573. +// +// Solidity: function start(uint256 amount, uint256 initialRewardRate) returns() +func (_Staking *StakingTransactorSession) Start(amount *big.Int, initialRewardRate *big.Int) (*types.Transaction, error) { + return _Staking.Contract.Start(&_Staking.TransactOpts, amount, initialRewardRate) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address to) returns() +func (_Staking *StakingTransactor) TransferOwnership(opts *bind.TransactOpts, to common.Address) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "transferOwnership", to) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address to) returns() +func (_Staking *StakingSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Staking.Contract.TransferOwnership(&_Staking.TransactOpts, to) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address to) returns() +func (_Staking *StakingTransactorSession) TransferOwnership(to common.Address) (*types.Transaction, error) { + return _Staking.Contract.TransferOwnership(&_Staking.TransactOpts, to) +} + +// Unstake is a paid mutator transaction binding the contract method 0x2def6620. +// +// Solidity: function unstake() returns() +func (_Staking *StakingTransactor) Unstake(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "unstake") +} + +// Unstake is a paid mutator transaction binding the contract method 0x2def6620. +// +// Solidity: function unstake() returns() +func (_Staking *StakingSession) Unstake() (*types.Transaction, error) { + return _Staking.Contract.Unstake(&_Staking.TransactOpts) +} + +// Unstake is a paid mutator transaction binding the contract method 0x2def6620. +// +// Solidity: function unstake() returns() +func (_Staking *StakingTransactorSession) Unstake() (*types.Transaction, error) { + return _Staking.Contract.Unstake(&_Staking.TransactOpts) +} + +// WithdrawRemovedStake is a paid mutator transaction binding the contract method 0x5aa6e013. +// +// Solidity: function withdrawRemovedStake() returns() +func (_Staking *StakingTransactor) WithdrawRemovedStake(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "withdrawRemovedStake") +} + +// WithdrawRemovedStake is a paid mutator transaction binding the contract method 0x5aa6e013. +// +// Solidity: function withdrawRemovedStake() returns() +func (_Staking *StakingSession) WithdrawRemovedStake() (*types.Transaction, error) { + return _Staking.Contract.WithdrawRemovedStake(&_Staking.TransactOpts) +} + +// WithdrawRemovedStake is a paid mutator transaction binding the contract method 0x5aa6e013. +// +// Solidity: function withdrawRemovedStake() returns() +func (_Staking *StakingTransactorSession) WithdrawRemovedStake() (*types.Transaction, error) { + return _Staking.Contract.WithdrawRemovedStake(&_Staking.TransactOpts) +} + +// WithdrawUnusedReward is a paid mutator transaction binding the contract method 0xebdb56f3. +// +// Solidity: function withdrawUnusedReward() returns() +func (_Staking *StakingTransactor) WithdrawUnusedReward(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Staking.contract.Transact(opts, "withdrawUnusedReward") +} + +// WithdrawUnusedReward is a paid mutator transaction binding the contract method 0xebdb56f3. +// +// Solidity: function withdrawUnusedReward() returns() +func (_Staking *StakingSession) WithdrawUnusedReward() (*types.Transaction, error) { + return _Staking.Contract.WithdrawUnusedReward(&_Staking.TransactOpts) +} + +// WithdrawUnusedReward is a paid mutator transaction binding the contract method 0xebdb56f3. +// +// Solidity: function withdrawUnusedReward() returns() +func (_Staking *StakingTransactorSession) WithdrawUnusedReward() (*types.Transaction, error) { + return _Staking.Contract.WithdrawUnusedReward(&_Staking.TransactOpts) +} + +// StakingAlertRaisedIterator is returned from FilterAlertRaised and is used to iterate over the raw logs and unpacked data for AlertRaised events raised by the Staking contract. +type StakingAlertRaisedIterator struct { + Event *StakingAlertRaised // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingAlertRaisedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingAlertRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingAlertRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingAlertRaisedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingAlertRaisedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingAlertRaised represents a AlertRaised event raised by the Staking contract. +type StakingAlertRaised struct { + Alerter common.Address + RoundId *big.Int + RewardAmount *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterAlertRaised is a free log retrieval operation binding the contract event 0xd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd. +// +// Solidity: event AlertRaised(address alerter, uint256 roundId, uint256 rewardAmount) +func (_Staking *StakingFilterer) FilterAlertRaised(opts *bind.FilterOpts) (*StakingAlertRaisedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "AlertRaised") + if err != nil { + return nil, err + } + return &StakingAlertRaisedIterator{contract: _Staking.contract, event: "AlertRaised", logs: logs, sub: sub}, nil +} + +// WatchAlertRaised is a free log subscription operation binding the contract event 0xd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd. +// +// Solidity: event AlertRaised(address alerter, uint256 roundId, uint256 rewardAmount) +func (_Staking *StakingFilterer) WatchAlertRaised(opts *bind.WatchOpts, sink chan<- *StakingAlertRaised) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "AlertRaised") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingAlertRaised) + if err := _Staking.contract.UnpackLog(event, "AlertRaised", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseAlertRaised is a log parse operation binding the contract event 0xd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd. +// +// Solidity: event AlertRaised(address alerter, uint256 roundId, uint256 rewardAmount) +func (_Staking *StakingFilterer) ParseAlertRaised(log types.Log) (*StakingAlertRaised, error) { + event := new(StakingAlertRaised) + if err := _Staking.contract.UnpackLog(event, "AlertRaised", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingMerkleRootChangedIterator is returned from FilterMerkleRootChanged and is used to iterate over the raw logs and unpacked data for MerkleRootChanged events raised by the Staking contract. +type StakingMerkleRootChangedIterator struct { + Event *StakingMerkleRootChanged // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingMerkleRootChangedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingMerkleRootChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingMerkleRootChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingMerkleRootChangedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingMerkleRootChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingMerkleRootChanged represents a MerkleRootChanged event raised by the Staking contract. +type StakingMerkleRootChanged struct { + NewMerkleRoot [32]byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterMerkleRootChanged is a free log retrieval operation binding the contract event 0x1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c. +// +// Solidity: event MerkleRootChanged(bytes32 newMerkleRoot) +func (_Staking *StakingFilterer) FilterMerkleRootChanged(opts *bind.FilterOpts) (*StakingMerkleRootChangedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "MerkleRootChanged") + if err != nil { + return nil, err + } + return &StakingMerkleRootChangedIterator{contract: _Staking.contract, event: "MerkleRootChanged", logs: logs, sub: sub}, nil +} + +// WatchMerkleRootChanged is a free log subscription operation binding the contract event 0x1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c. +// +// Solidity: event MerkleRootChanged(bytes32 newMerkleRoot) +func (_Staking *StakingFilterer) WatchMerkleRootChanged(opts *bind.WatchOpts, sink chan<- *StakingMerkleRootChanged) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "MerkleRootChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingMerkleRootChanged) + if err := _Staking.contract.UnpackLog(event, "MerkleRootChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseMerkleRootChanged is a log parse operation binding the contract event 0x1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c. +// +// Solidity: event MerkleRootChanged(bytes32 newMerkleRoot) +func (_Staking *StakingFilterer) ParseMerkleRootChanged(log types.Log) (*StakingMerkleRootChanged, error) { + event := new(StakingMerkleRootChanged) + if err := _Staking.contract.UnpackLog(event, "MerkleRootChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingMigratedIterator is returned from FilterMigrated and is used to iterate over the raw logs and unpacked data for Migrated events raised by the Staking contract. +type StakingMigratedIterator struct { + Event *StakingMigrated // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingMigratedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingMigratedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingMigrated represents a Migrated event raised by the Staking contract. +type StakingMigrated struct { + Staker common.Address + Principal *big.Int + BaseReward *big.Int + DelegationReward *big.Int + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterMigrated is a free log retrieval operation binding the contract event 0x667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434. +// +// Solidity: event Migrated(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward, bytes data) +func (_Staking *StakingFilterer) FilterMigrated(opts *bind.FilterOpts) (*StakingMigratedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "Migrated") + if err != nil { + return nil, err + } + return &StakingMigratedIterator{contract: _Staking.contract, event: "Migrated", logs: logs, sub: sub}, nil +} + +// WatchMigrated is a free log subscription operation binding the contract event 0x667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434. +// +// Solidity: event Migrated(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward, bytes data) +func (_Staking *StakingFilterer) WatchMigrated(opts *bind.WatchOpts, sink chan<- *StakingMigrated) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "Migrated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingMigrated) + if err := _Staking.contract.UnpackLog(event, "Migrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseMigrated is a log parse operation binding the contract event 0x667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434. +// +// Solidity: event Migrated(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward, bytes data) +func (_Staking *StakingFilterer) ParseMigrated(log types.Log) (*StakingMigrated, error) { + event := new(StakingMigrated) + if err := _Staking.contract.UnpackLog(event, "Migrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingMigrationTargetAcceptedIterator is returned from FilterMigrationTargetAccepted and is used to iterate over the raw logs and unpacked data for MigrationTargetAccepted events raised by the Staking contract. +type StakingMigrationTargetAcceptedIterator struct { + Event *StakingMigrationTargetAccepted // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingMigrationTargetAcceptedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingMigrationTargetAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingMigrationTargetAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingMigrationTargetAcceptedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingMigrationTargetAcceptedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingMigrationTargetAccepted represents a MigrationTargetAccepted event raised by the Staking contract. +type StakingMigrationTargetAccepted struct { + MigrationTarget common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterMigrationTargetAccepted is a free log retrieval operation binding the contract event 0xfa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd84. +// +// Solidity: event MigrationTargetAccepted(address migrationTarget) +func (_Staking *StakingFilterer) FilterMigrationTargetAccepted(opts *bind.FilterOpts) (*StakingMigrationTargetAcceptedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "MigrationTargetAccepted") + if err != nil { + return nil, err + } + return &StakingMigrationTargetAcceptedIterator{contract: _Staking.contract, event: "MigrationTargetAccepted", logs: logs, sub: sub}, nil +} + +// WatchMigrationTargetAccepted is a free log subscription operation binding the contract event 0xfa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd84. +// +// Solidity: event MigrationTargetAccepted(address migrationTarget) +func (_Staking *StakingFilterer) WatchMigrationTargetAccepted(opts *bind.WatchOpts, sink chan<- *StakingMigrationTargetAccepted) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "MigrationTargetAccepted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingMigrationTargetAccepted) + if err := _Staking.contract.UnpackLog(event, "MigrationTargetAccepted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseMigrationTargetAccepted is a log parse operation binding the contract event 0xfa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd84. +// +// Solidity: event MigrationTargetAccepted(address migrationTarget) +func (_Staking *StakingFilterer) ParseMigrationTargetAccepted(log types.Log) (*StakingMigrationTargetAccepted, error) { + event := new(StakingMigrationTargetAccepted) + if err := _Staking.contract.UnpackLog(event, "MigrationTargetAccepted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingMigrationTargetProposedIterator is returned from FilterMigrationTargetProposed and is used to iterate over the raw logs and unpacked data for MigrationTargetProposed events raised by the Staking contract. +type StakingMigrationTargetProposedIterator struct { + Event *StakingMigrationTargetProposed // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingMigrationTargetProposedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingMigrationTargetProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingMigrationTargetProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingMigrationTargetProposedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingMigrationTargetProposedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingMigrationTargetProposed represents a MigrationTargetProposed event raised by the Staking contract. +type StakingMigrationTargetProposed struct { + MigrationTarget common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterMigrationTargetProposed is a free log retrieval operation binding the contract event 0x5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad3. +// +// Solidity: event MigrationTargetProposed(address migrationTarget) +func (_Staking *StakingFilterer) FilterMigrationTargetProposed(opts *bind.FilterOpts) (*StakingMigrationTargetProposedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "MigrationTargetProposed") + if err != nil { + return nil, err + } + return &StakingMigrationTargetProposedIterator{contract: _Staking.contract, event: "MigrationTargetProposed", logs: logs, sub: sub}, nil +} + +// WatchMigrationTargetProposed is a free log subscription operation binding the contract event 0x5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad3. +// +// Solidity: event MigrationTargetProposed(address migrationTarget) +func (_Staking *StakingFilterer) WatchMigrationTargetProposed(opts *bind.WatchOpts, sink chan<- *StakingMigrationTargetProposed) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "MigrationTargetProposed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingMigrationTargetProposed) + if err := _Staking.contract.UnpackLog(event, "MigrationTargetProposed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseMigrationTargetProposed is a log parse operation binding the contract event 0x5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad3. +// +// Solidity: event MigrationTargetProposed(address migrationTarget) +func (_Staking *StakingFilterer) ParseMigrationTargetProposed(log types.Log) (*StakingMigrationTargetProposed, error) { + event := new(StakingMigrationTargetProposed) + if err := _Staking.contract.UnpackLog(event, "MigrationTargetProposed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingOwnershipTransferRequestedIterator is returned from FilterOwnershipTransferRequested and is used to iterate over the raw logs and unpacked data for OwnershipTransferRequested events raised by the Staking contract. +type StakingOwnershipTransferRequestedIterator struct { + Event *StakingOwnershipTransferRequested // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingOwnershipTransferRequestedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingOwnershipTransferRequested represents a OwnershipTransferRequested event raised by the Staking contract. +type StakingOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterOwnershipTransferRequested is a free log retrieval operation binding the contract event 0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278. +// +// Solidity: event OwnershipTransferRequested(address indexed from, address indexed to) +func (_Staking *StakingFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Staking.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &StakingOwnershipTransferRequestedIterator{contract: _Staking.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +// WatchOwnershipTransferRequested is a free log subscription operation binding the contract event 0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278. +// +// Solidity: event OwnershipTransferRequested(address indexed from, address indexed to) +func (_Staking *StakingFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *StakingOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Staking.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingOwnershipTransferRequested) + if err := _Staking.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseOwnershipTransferRequested is a log parse operation binding the contract event 0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278. +// +// Solidity: event OwnershipTransferRequested(address indexed from, address indexed to) +func (_Staking *StakingFilterer) ParseOwnershipTransferRequested(log types.Log) (*StakingOwnershipTransferRequested, error) { + event := new(StakingOwnershipTransferRequested) + if err := _Staking.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the Staking contract. +type StakingOwnershipTransferredIterator struct { + Event *StakingOwnershipTransferred // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingOwnershipTransferredIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingOwnershipTransferredIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingOwnershipTransferred represents a OwnershipTransferred event raised by the Staking contract. +type StakingOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed from, address indexed to) +func (_Staking *StakingFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Staking.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &StakingOwnershipTransferredIterator{contract: _Staking.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed from, address indexed to) +func (_Staking *StakingFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *StakingOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _Staking.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingOwnershipTransferred) + if err := _Staking.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed from, address indexed to) +func (_Staking *StakingFilterer) ParseOwnershipTransferred(log types.Log) (*StakingOwnershipTransferred, error) { + event := new(StakingOwnershipTransferred) + if err := _Staking.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingPausedIterator is returned from FilterPaused and is used to iterate over the raw logs and unpacked data for Paused events raised by the Staking contract. +type StakingPausedIterator struct { + Event *StakingPaused // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingPausedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingPausedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingPaused represents a Paused event raised by the Staking contract. +type StakingPaused struct { + Account common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterPaused is a free log retrieval operation binding the contract event 0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258. +// +// Solidity: event Paused(address account) +func (_Staking *StakingFilterer) FilterPaused(opts *bind.FilterOpts) (*StakingPausedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &StakingPausedIterator{contract: _Staking.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +// WatchPaused is a free log subscription operation binding the contract event 0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258. +// +// Solidity: event Paused(address account) +func (_Staking *StakingFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *StakingPaused) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingPaused) + if err := _Staking.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParsePaused is a log parse operation binding the contract event 0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258. +// +// Solidity: event Paused(address account) +func (_Staking *StakingFilterer) ParsePaused(log types.Log) (*StakingPaused, error) { + event := new(StakingPaused) + if err := _Staking.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingStakedIterator is returned from FilterStaked and is used to iterate over the raw logs and unpacked data for Staked events raised by the Staking contract. +type StakingStakedIterator struct { + Event *StakingStaked // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingStakedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingStaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingStaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingStakedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingStakedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingStaked represents a Staked event raised by the Staking contract. +type StakingStaked struct { + Staker common.Address + NewStake *big.Int + TotalStake *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterStaked is a free log retrieval operation binding the contract event 0x1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee90. +// +// Solidity: event Staked(address staker, uint256 newStake, uint256 totalStake) +func (_Staking *StakingFilterer) FilterStaked(opts *bind.FilterOpts) (*StakingStakedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "Staked") + if err != nil { + return nil, err + } + return &StakingStakedIterator{contract: _Staking.contract, event: "Staked", logs: logs, sub: sub}, nil +} + +// WatchStaked is a free log subscription operation binding the contract event 0x1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee90. +// +// Solidity: event Staked(address staker, uint256 newStake, uint256 totalStake) +func (_Staking *StakingFilterer) WatchStaked(opts *bind.WatchOpts, sink chan<- *StakingStaked) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "Staked") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingStaked) + if err := _Staking.contract.UnpackLog(event, "Staked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseStaked is a log parse operation binding the contract event 0x1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee90. +// +// Solidity: event Staked(address staker, uint256 newStake, uint256 totalStake) +func (_Staking *StakingFilterer) ParseStaked(log types.Log) (*StakingStaked, error) { + event := new(StakingStaked) + if err := _Staking.contract.UnpackLog(event, "Staked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingUnpausedIterator is returned from FilterUnpaused and is used to iterate over the raw logs and unpacked data for Unpaused events raised by the Staking contract. +type StakingUnpausedIterator struct { + Event *StakingUnpaused // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingUnpausedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingUnpausedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingUnpaused represents a Unpaused event raised by the Staking contract. +type StakingUnpaused struct { + Account common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterUnpaused is a free log retrieval operation binding the contract event 0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa. +// +// Solidity: event Unpaused(address account) +func (_Staking *StakingFilterer) FilterUnpaused(opts *bind.FilterOpts) (*StakingUnpausedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &StakingUnpausedIterator{contract: _Staking.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +// WatchUnpaused is a free log subscription operation binding the contract event 0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa. +// +// Solidity: event Unpaused(address account) +func (_Staking *StakingFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *StakingUnpaused) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingUnpaused) + if err := _Staking.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseUnpaused is a log parse operation binding the contract event 0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa. +// +// Solidity: event Unpaused(address account) +func (_Staking *StakingFilterer) ParseUnpaused(log types.Log) (*StakingUnpaused, error) { + event := new(StakingUnpaused) + if err := _Staking.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// StakingUnstakedIterator is returned from FilterUnstaked and is used to iterate over the raw logs and unpacked data for Unstaked events raised by the Staking contract. +type StakingUnstakedIterator struct { + Event *StakingUnstaked // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *StakingUnstakedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingUnstaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(StakingUnstaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *StakingUnstakedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *StakingUnstakedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// StakingUnstaked represents a Unstaked event raised by the Staking contract. +type StakingUnstaked struct { + Staker common.Address + Principal *big.Int + BaseReward *big.Int + DelegationReward *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterUnstaked is a free log retrieval operation binding the contract event 0x204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00. +// +// Solidity: event Unstaked(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward) +func (_Staking *StakingFilterer) FilterUnstaked(opts *bind.FilterOpts) (*StakingUnstakedIterator, error) { + + logs, sub, err := _Staking.contract.FilterLogs(opts, "Unstaked") + if err != nil { + return nil, err + } + return &StakingUnstakedIterator{contract: _Staking.contract, event: "Unstaked", logs: logs, sub: sub}, nil +} + +// WatchUnstaked is a free log subscription operation binding the contract event 0x204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00. +// +// Solidity: event Unstaked(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward) +func (_Staking *StakingFilterer) WatchUnstaked(opts *bind.WatchOpts, sink chan<- *StakingUnstaked) (event.Subscription, error) { + + logs, sub, err := _Staking.contract.WatchLogs(opts, "Unstaked") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(StakingUnstaked) + if err := _Staking.contract.UnpackLog(event, "Unstaked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseUnstaked is a log parse operation binding the contract event 0x204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00. +// +// Solidity: event Unstaked(address staker, uint256 principal, uint256 baseReward, uint256 delegationReward) +func (_Staking *StakingFilterer) ParseUnstaked(log types.Log) (*StakingUnstaked, error) { + event := new(StakingUnstaked) + if err := _Staking.contract.UnpackLog(event, "Unstaked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/integration-tests/contracts/ethereum/StakingEventsMock.go b/integration-tests/contracts/ethereum/StakingEventsMock.go new file mode 100644 index 00000000..653e6b0b --- /dev/null +++ b/integration-tests/contracts/ethereum/StakingEventsMock.go @@ -0,0 +1,3675 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package ethereum + +import ( + "errors" + "fmt" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated" +) + +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +var StakingEventsMockMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"alerter\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rewardAmount\",\"type\":\"uint256\"}],\"name\":\"AlertRaised\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"feedOperators\",\"type\":\"address[]\"}],\"name\":\"FeedOperatorsSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"maxStakeAmount\",\"type\":\"uint256\"}],\"name\":\"MaxCommunityStakeAmountIncreased\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"maxStakeAmount\",\"type\":\"uint256\"}],\"name\":\"MaxOperatorStakeAmountIncreased\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"newMerkleRoot\",\"type\":\"bytes32\"}],\"name\":\"MerkleRootChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Migrated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"MigrationTargetAccepted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"MigrationTargetProposed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"OperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"OperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferRequested\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Paused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"PoolConcluded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"PoolOpened\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"maxPoolSize\",\"type\":\"uint256\"}],\"name\":\"PoolSizeIncreased\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amountAdded\",\"type\":\"uint256\"}],\"name\":\"RewardAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rate\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"available\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startTimestamp\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"endTimestamp\",\"type\":\"uint256\"}],\"name\":\"RewardInitialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"rate\",\"type\":\"uint256\"}],\"name\":\"RewardRateChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"operator\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"slashedBaseRewards\",\"type\":\"uint256[]\"},{\"indexed\":false,\"internalType\":\"uint256[]\",\"name\":\"slashedDelegatedRewards\",\"type\":\"uint256[]\"}],\"name\":\"RewardSlashed\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"RewardWithdrawn\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newStake\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"totalStake\",\"type\":\"uint256\"}],\"name\":\"Staked\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"Unpaused\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"}],\"name\":\"Unstaked\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"alerter\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"roundId\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"rewardAmount\",\"type\":\"uint256\"}],\"name\":\"emitAlertRaised\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"feedOperators\",\"type\":\"address[]\"}],\"name\":\"emitFeedOperatorsSet\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxStakeAmount\",\"type\":\"uint256\"}],\"name\":\"emitMaxCommunityStakeAmountIncreased\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxStakeAmount\",\"type\":\"uint256\"}],\"name\":\"emitMaxOperatorStakeAmountIncreased\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"newMerkleRoot\",\"type\":\"bytes32\"}],\"name\":\"emitMerkleRootChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"emitMigrated\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"emitMigrationTargetAccepted\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"migrationTarget\",\"type\":\"address\"}],\"name\":\"emitMigrationTargetProposed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"}],\"name\":\"emitOperatorAdded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"operator\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"emitOperatorRemoved\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferRequested\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"}],\"name\":\"emitOwnershipTransferred\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitPaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emitPoolConcluded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"emitPoolOpened\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"maxPoolSize\",\"type\":\"uint256\"}],\"name\":\"emitPoolSizeIncreased\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amountAdded\",\"type\":\"uint256\"}],\"name\":\"emitRewardAdded\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rate\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"available\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"startTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"endTimestamp\",\"type\":\"uint256\"}],\"name\":\"emitRewardInitialized\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"rate\",\"type\":\"uint256\"}],\"name\":\"emitRewardRateChanged\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address[]\",\"name\":\"operator\",\"type\":\"address[]\"},{\"internalType\":\"uint256[]\",\"name\":\"slashedBaseRewards\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"slashedDelegatedRewards\",\"type\":\"uint256[]\"}],\"name\":\"emitRewardSlashed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"}],\"name\":\"emitRewardWithdrawn\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"newStake\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"totalStake\",\"type\":\"uint256\"}],\"name\":\"emitStaked\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"emitUnpaused\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"staker\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"principal\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"baseReward\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"delegationReward\",\"type\":\"uint256\"}],\"name\":\"emitUnstaked\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610f7b806100206000396000f3fe608060405234801561001057600080fd5b50600436106101985760003560e01c80639ec3ce4b116100e3578063b019b4e81161008c578063f7420bc211610066578063f7420bc214610318578063fa2d7fd01461032b578063fe60093f1461033e57600080fd5b8063b019b4e8146102df578063e0275fae146102f2578063eb6289de1461030557600080fd5b8063ab8711bc116100bd578063ab8711bc146102b1578063add49a96146102c4578063aeac9600146102cc57600080fd5b80639ec3ce4b146102785780639f676e9f1461028b578063a5f3d6831461029e57600080fd5b80637be5c756116101455780639a8d2df71161011f5780639a8d2df71461023f5780639b022d73146102525780639e81ace31461026557600080fd5b80637be5c756146102065780637e31a64b146102195780639652ab7b1461022c57600080fd5b80632752e639116101765780632752e639146101d85780633e8f1e05146101eb5780635d21e09a146101f357600080fd5b8063086c1c4a1461019d5780631351da48146101b257806313c664e8146101c5575b600080fd5b6101b06101ab366004610b25565b610351565b005b6101b06101c0366004610a73565b6103b6565b6101b06101d3366004610d04565b610403565b6101b06101e6366004610b5e565b610433565b6101b0610479565b6101b0610201366004610d04565b6104a4565b6101b0610214366004610a73565b6104d4565b6101b0610227366004610d04565b61051a565b6101b061023a366004610a73565b61054a565b6101b061024d366004610a73565b610590565b6101b0610260366004610d04565b6105d6565b6101b0610273366004610c7c565b610606565b6101b0610286366004610a73565b610645565b6101b0610299366004610d1d565b61068b565b6101b06102ac366004610c3f565b6106d0565b6101b06102bf366004610af2565b6106ff565b6101b0610753565b6101b06102da366004610d04565b61077e565b6101b06102ed366004610a95565b6107ae565b6101b0610300366004610af2565b61080c565b6101b0610313366004610ac8565b610860565b6101b0610326366004610a95565b6108b3565b6101b0610339366004610d04565b610911565b6101b061034c366004610d04565b610941565b6040805173ffffffffffffffffffffffffffffffffffffffff8616815260208101859052908101839052606081018290527f204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00906080015b60405180910390a150505050565b60405173ffffffffffffffffffffffffffffffffffffffff821681527fac6fa858e9350a46cec16539926e0fde25b7629f84b5a72bffaae4df888ae86d906020015b60405180910390a150565b6040518181527f816587cb2e773af4f3689a03d7520fabff3462605ded374b485b13994c0d7b52906020016103f8565b7f667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434858585858560405161046a959493929190610dd0565b60405180910390a15050505050565b6040517fded6ebf04e261e1eb2f3e3b268a2e6aee5b478c15b341eba5cf18b9bc80c2e6390600090a1565b6040518181527fde88a922e0d3b88b24e9623efeb464919c6bf9f66857a65e2bfcf2ce87a9433d906020016103f8565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258906020016103f8565b6040518181527fb5f554e5ef00806bace1edbb84186512ebcefa2af7706085143f501f29314df7906020016103f8565b60405173ffffffffffffffffffffffffffffffffffffffff821681527ffa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd84906020016103f8565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad3906020016103f8565b6040518181527f7f4f497e086b2eb55f8a9885ba00d33399bbe0ebcb92ea092834386435a1b9c0906020016103f8565b7e635ea9da6e262e92bb713d71840af7c567807ff35bf73e927490c61283248083838360405161063893929190610e89565b60405180910390a1505050565b60405173ffffffffffffffffffffffffffffffffffffffff821681527f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa906020016103f8565b6040805185815260208101859052908101839052606081018290527f125fc8494f786b470e3c39d0932a62e9e09e291ebd81ea19c57604f6d2b1d167906080016103a8565b7f40aed8e423b39a56b445ae160f4c071fc2cfb48ee0b6dcd5ffeb6bc5b18d10d0816040516103f89190610e76565b6040805173ffffffffffffffffffffffffffffffffffffffff85168152602081018490529081018290527f1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee9090606001610638565b6040517ff7d0e0f15586495da8c687328ead30fb829d9da55538cb0ef73dd229e517cdb890600090a1565b6040518181527f1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c906020016103f8565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a35050565b6040805173ffffffffffffffffffffffffffffffffffffffff85168152602081018490529081018290527fd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd90606001610638565b6040805173ffffffffffffffffffffffffffffffffffffffff84168152602081018390527f2360404a74478febece1a14f11275f22ada88d19ef96f7d785913010bfff4479910160405180910390a15050565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae127860405160405180910390a35050565b6040518181527f150a6ec0e6f4e9ddcaaaa1674f157d91165a42d60653016f87a9fc870a39f050906020016103f8565b6040518181527f1e3be2efa25bca5bff2215c7b30b31086e703d6aa7d9b9a1f8ba62c5291219ad906020016103f8565b803573ffffffffffffffffffffffffffffffffffffffff8116811461099557600080fd5b919050565b600082601f8301126109ab57600080fd5b813560206109c06109bb83610f1b565b610ecc565b80838252828201915082860187848660051b89010111156109e057600080fd5b60005b85811015610a06576109f482610971565b845292840192908401906001016109e3565b5090979650505050505050565b600082601f830112610a2457600080fd5b81356020610a346109bb83610f1b565b80838252828201915082860187848660051b8901011115610a5457600080fd5b60005b85811015610a0657813584529284019290840190600101610a57565b600060208284031215610a8557600080fd5b610a8e82610971565b9392505050565b60008060408385031215610aa857600080fd5b610ab183610971565b9150610abf60208401610971565b90509250929050565b60008060408385031215610adb57600080fd5b610ae483610971565b946020939093013593505050565b600080600060608486031215610b0757600080fd5b610b1084610971565b95602085013595506040909401359392505050565b60008060008060808587031215610b3b57600080fd5b610b4485610971565b966020860135965060408601359560600135945092505050565b600080600080600060a08688031215610b7657600080fd5b610b7f86610971565b945060208087013594506040870135935060608701359250608087013567ffffffffffffffff80821115610bb257600080fd5b818901915089601f830112610bc657600080fd5b813581811115610bd857610bd8610f3f565b610c08847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601610ecc565b91508082528a84828501011115610c1e57600080fd5b80848401858401376000848284010152508093505050509295509295909350565b600060208284031215610c5157600080fd5b813567ffffffffffffffff811115610c6857600080fd5b610c748482850161099a565b949350505050565b600080600060608486031215610c9157600080fd5b833567ffffffffffffffff80821115610ca957600080fd5b610cb58783880161099a565b94506020860135915080821115610ccb57600080fd5b610cd787838801610a13565b93506040860135915080821115610ced57600080fd5b50610cfa86828701610a13565b9150509250925092565b600060208284031215610d1657600080fd5b5035919050565b60008060008060808587031215610d3357600080fd5b5050823594602084013594506040840135936060013592509050565b600081518084526020808501945080840160005b83811015610d9557815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610d63565b509495945050505050565b600081518084526020808501945080840160005b83811015610d9557815187529582019590820190600101610db4565b73ffffffffffffffffffffffffffffffffffffffff8616815260006020868184015285604084015284606084015260a0608084015283518060a085015260005b81811015610e2c5785810183015185820160c001528201610e10565b81811115610e3e57600060c083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160c001979650505050505050565b602081526000610a8e6020830184610d4f565b606081526000610e9c6060830186610d4f565b8281036020840152610eae8186610da0565b90508281036040840152610ec28185610da0565b9695505050505050565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715610f1357610f13610f3f565b604052919050565b600067ffffffffffffffff821115610f3557610f35610f3f565b5060051b60200190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", +} + +var StakingEventsMockABI = StakingEventsMockMetaData.ABI + +var StakingEventsMockBin = StakingEventsMockMetaData.Bin + +func DeployStakingEventsMock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *StakingEventsMock, error) { + parsed, err := StakingEventsMockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(StakingEventsMockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &StakingEventsMock{StakingEventsMockCaller: StakingEventsMockCaller{contract: contract}, StakingEventsMockTransactor: StakingEventsMockTransactor{contract: contract}, StakingEventsMockFilterer: StakingEventsMockFilterer{contract: contract}}, nil +} + +type StakingEventsMock struct { + address common.Address + abi abi.ABI + StakingEventsMockCaller + StakingEventsMockTransactor + StakingEventsMockFilterer +} + +type StakingEventsMockCaller struct { + contract *bind.BoundContract +} + +type StakingEventsMockTransactor struct { + contract *bind.BoundContract +} + +type StakingEventsMockFilterer struct { + contract *bind.BoundContract +} + +type StakingEventsMockSession struct { + Contract *StakingEventsMock + CallOpts bind.CallOpts + TransactOpts bind.TransactOpts +} + +type StakingEventsMockCallerSession struct { + Contract *StakingEventsMockCaller + CallOpts bind.CallOpts +} + +type StakingEventsMockTransactorSession struct { + Contract *StakingEventsMockTransactor + TransactOpts bind.TransactOpts +} + +type StakingEventsMockRaw struct { + Contract *StakingEventsMock +} + +type StakingEventsMockCallerRaw struct { + Contract *StakingEventsMockCaller +} + +type StakingEventsMockTransactorRaw struct { + Contract *StakingEventsMockTransactor +} + +func NewStakingEventsMock(address common.Address, backend bind.ContractBackend) (*StakingEventsMock, error) { + abi, err := abi.JSON(strings.NewReader(StakingEventsMockABI)) + if err != nil { + return nil, err + } + contract, err := bindStakingEventsMock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &StakingEventsMock{address: address, abi: abi, StakingEventsMockCaller: StakingEventsMockCaller{contract: contract}, StakingEventsMockTransactor: StakingEventsMockTransactor{contract: contract}, StakingEventsMockFilterer: StakingEventsMockFilterer{contract: contract}}, nil +} + +func NewStakingEventsMockCaller(address common.Address, caller bind.ContractCaller) (*StakingEventsMockCaller, error) { + contract, err := bindStakingEventsMock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &StakingEventsMockCaller{contract: contract}, nil +} + +func NewStakingEventsMockTransactor(address common.Address, transactor bind.ContractTransactor) (*StakingEventsMockTransactor, error) { + contract, err := bindStakingEventsMock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &StakingEventsMockTransactor{contract: contract}, nil +} + +func NewStakingEventsMockFilterer(address common.Address, filterer bind.ContractFilterer) (*StakingEventsMockFilterer, error) { + contract, err := bindStakingEventsMock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &StakingEventsMockFilterer{contract: contract}, nil +} + +func bindStakingEventsMock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := StakingEventsMockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +func (_StakingEventsMock *StakingEventsMockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StakingEventsMock.Contract.StakingEventsMockCaller.contract.Call(opts, result, method, params...) +} + +func (_StakingEventsMock *StakingEventsMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StakingEventsMock.Contract.StakingEventsMockTransactor.contract.Transfer(opts) +} + +func (_StakingEventsMock *StakingEventsMockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StakingEventsMock.Contract.StakingEventsMockTransactor.contract.Transact(opts, method, params...) +} + +func (_StakingEventsMock *StakingEventsMockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _StakingEventsMock.Contract.contract.Call(opts, result, method, params...) +} + +func (_StakingEventsMock *StakingEventsMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StakingEventsMock.Contract.contract.Transfer(opts) +} + +func (_StakingEventsMock *StakingEventsMockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _StakingEventsMock.Contract.contract.Transact(opts, method, params...) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitAlertRaised(opts *bind.TransactOpts, alerter common.Address, roundId *big.Int, rewardAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitAlertRaised", alerter, roundId, rewardAmount) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitAlertRaised(alerter common.Address, roundId *big.Int, rewardAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitAlertRaised(&_StakingEventsMock.TransactOpts, alerter, roundId, rewardAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitAlertRaised(alerter common.Address, roundId *big.Int, rewardAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitAlertRaised(&_StakingEventsMock.TransactOpts, alerter, roundId, rewardAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitFeedOperatorsSet(opts *bind.TransactOpts, feedOperators []common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitFeedOperatorsSet", feedOperators) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitFeedOperatorsSet(feedOperators []common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitFeedOperatorsSet(&_StakingEventsMock.TransactOpts, feedOperators) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitFeedOperatorsSet(feedOperators []common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitFeedOperatorsSet(&_StakingEventsMock.TransactOpts, feedOperators) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMaxCommunityStakeAmountIncreased(opts *bind.TransactOpts, maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMaxCommunityStakeAmountIncreased", maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMaxCommunityStakeAmountIncreased(maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMaxCommunityStakeAmountIncreased(&_StakingEventsMock.TransactOpts, maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMaxCommunityStakeAmountIncreased(maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMaxCommunityStakeAmountIncreased(&_StakingEventsMock.TransactOpts, maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMaxOperatorStakeAmountIncreased(opts *bind.TransactOpts, maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMaxOperatorStakeAmountIncreased", maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMaxOperatorStakeAmountIncreased(maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMaxOperatorStakeAmountIncreased(&_StakingEventsMock.TransactOpts, maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMaxOperatorStakeAmountIncreased(maxStakeAmount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMaxOperatorStakeAmountIncreased(&_StakingEventsMock.TransactOpts, maxStakeAmount) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMerkleRootChanged(opts *bind.TransactOpts, newMerkleRoot [32]byte) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMerkleRootChanged", newMerkleRoot) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMerkleRootChanged(newMerkleRoot [32]byte) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMerkleRootChanged(&_StakingEventsMock.TransactOpts, newMerkleRoot) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMerkleRootChanged(newMerkleRoot [32]byte) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMerkleRootChanged(&_StakingEventsMock.TransactOpts, newMerkleRoot) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMigrated(opts *bind.TransactOpts, staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int, data []byte) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMigrated", staker, principal, baseReward, delegationReward, data) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMigrated(staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int, data []byte) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrated(&_StakingEventsMock.TransactOpts, staker, principal, baseReward, delegationReward, data) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMigrated(staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int, data []byte) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrated(&_StakingEventsMock.TransactOpts, staker, principal, baseReward, delegationReward, data) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMigrationTargetAccepted(opts *bind.TransactOpts, migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMigrationTargetAccepted", migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMigrationTargetAccepted(migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrationTargetAccepted(&_StakingEventsMock.TransactOpts, migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMigrationTargetAccepted(migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrationTargetAccepted(&_StakingEventsMock.TransactOpts, migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitMigrationTargetProposed(opts *bind.TransactOpts, migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitMigrationTargetProposed", migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitMigrationTargetProposed(migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrationTargetProposed(&_StakingEventsMock.TransactOpts, migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitMigrationTargetProposed(migrationTarget common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitMigrationTargetProposed(&_StakingEventsMock.TransactOpts, migrationTarget) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitOperatorAdded(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitOperatorAdded", operator) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitOperatorAdded(operator common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOperatorAdded(&_StakingEventsMock.TransactOpts, operator) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitOperatorAdded(operator common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOperatorAdded(&_StakingEventsMock.TransactOpts, operator) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitOperatorRemoved(opts *bind.TransactOpts, operator common.Address, amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitOperatorRemoved", operator, amount) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitOperatorRemoved(operator common.Address, amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOperatorRemoved(&_StakingEventsMock.TransactOpts, operator, amount) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitOperatorRemoved(operator common.Address, amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOperatorRemoved(&_StakingEventsMock.TransactOpts, operator, amount) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitOwnershipTransferRequested", from, to) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOwnershipTransferRequested(&_StakingEventsMock.TransactOpts, from, to) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitOwnershipTransferRequested(from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOwnershipTransferRequested(&_StakingEventsMock.TransactOpts, from, to) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitOwnershipTransferred", from, to) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOwnershipTransferred(&_StakingEventsMock.TransactOpts, from, to) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitOwnershipTransferred(from common.Address, to common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitOwnershipTransferred(&_StakingEventsMock.TransactOpts, from, to) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitPaused", account) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPaused(&_StakingEventsMock.TransactOpts, account) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitPaused(account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPaused(&_StakingEventsMock.TransactOpts, account) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitPoolConcluded(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitPoolConcluded") +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitPoolConcluded() (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolConcluded(&_StakingEventsMock.TransactOpts) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitPoolConcluded() (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolConcluded(&_StakingEventsMock.TransactOpts) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitPoolOpened(opts *bind.TransactOpts) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitPoolOpened") +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitPoolOpened() (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolOpened(&_StakingEventsMock.TransactOpts) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitPoolOpened() (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolOpened(&_StakingEventsMock.TransactOpts) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitPoolSizeIncreased(opts *bind.TransactOpts, maxPoolSize *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitPoolSizeIncreased", maxPoolSize) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitPoolSizeIncreased(maxPoolSize *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolSizeIncreased(&_StakingEventsMock.TransactOpts, maxPoolSize) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitPoolSizeIncreased(maxPoolSize *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitPoolSizeIncreased(&_StakingEventsMock.TransactOpts, maxPoolSize) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitRewardAdded(opts *bind.TransactOpts, amountAdded *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitRewardAdded", amountAdded) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitRewardAdded(amountAdded *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardAdded(&_StakingEventsMock.TransactOpts, amountAdded) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitRewardAdded(amountAdded *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardAdded(&_StakingEventsMock.TransactOpts, amountAdded) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitRewardInitialized(opts *bind.TransactOpts, rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitRewardInitialized", rate, available, startTimestamp, endTimestamp) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitRewardInitialized(rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardInitialized(&_StakingEventsMock.TransactOpts, rate, available, startTimestamp, endTimestamp) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitRewardInitialized(rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardInitialized(&_StakingEventsMock.TransactOpts, rate, available, startTimestamp, endTimestamp) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitRewardRateChanged(opts *bind.TransactOpts, rate *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitRewardRateChanged", rate) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitRewardRateChanged(rate *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardRateChanged(&_StakingEventsMock.TransactOpts, rate) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitRewardRateChanged(rate *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardRateChanged(&_StakingEventsMock.TransactOpts, rate) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitRewardSlashed(opts *bind.TransactOpts, operator []common.Address, slashedBaseRewards []*big.Int, slashedDelegatedRewards []*big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitRewardSlashed", operator, slashedBaseRewards, slashedDelegatedRewards) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitRewardSlashed(operator []common.Address, slashedBaseRewards []*big.Int, slashedDelegatedRewards []*big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardSlashed(&_StakingEventsMock.TransactOpts, operator, slashedBaseRewards, slashedDelegatedRewards) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitRewardSlashed(operator []common.Address, slashedBaseRewards []*big.Int, slashedDelegatedRewards []*big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardSlashed(&_StakingEventsMock.TransactOpts, operator, slashedBaseRewards, slashedDelegatedRewards) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitRewardWithdrawn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitRewardWithdrawn", amount) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitRewardWithdrawn(amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardWithdrawn(&_StakingEventsMock.TransactOpts, amount) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitRewardWithdrawn(amount *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitRewardWithdrawn(&_StakingEventsMock.TransactOpts, amount) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitStaked(opts *bind.TransactOpts, staker common.Address, newStake *big.Int, totalStake *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitStaked", staker, newStake, totalStake) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitStaked(staker common.Address, newStake *big.Int, totalStake *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitStaked(&_StakingEventsMock.TransactOpts, staker, newStake, totalStake) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitStaked(staker common.Address, newStake *big.Int, totalStake *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitStaked(&_StakingEventsMock.TransactOpts, staker, newStake, totalStake) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitUnpaused", account) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitUnpaused(&_StakingEventsMock.TransactOpts, account) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitUnpaused(account common.Address) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitUnpaused(&_StakingEventsMock.TransactOpts, account) +} + +func (_StakingEventsMock *StakingEventsMockTransactor) EmitUnstaked(opts *bind.TransactOpts, staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.contract.Transact(opts, "emitUnstaked", staker, principal, baseReward, delegationReward) +} + +func (_StakingEventsMock *StakingEventsMockSession) EmitUnstaked(staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitUnstaked(&_StakingEventsMock.TransactOpts, staker, principal, baseReward, delegationReward) +} + +func (_StakingEventsMock *StakingEventsMockTransactorSession) EmitUnstaked(staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int) (*types.Transaction, error) { + return _StakingEventsMock.Contract.EmitUnstaked(&_StakingEventsMock.TransactOpts, staker, principal, baseReward, delegationReward) +} + +type StakingEventsMockAlertRaisedIterator struct { + Event *StakingEventsMockAlertRaised + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockAlertRaisedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockAlertRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockAlertRaised) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockAlertRaisedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockAlertRaisedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockAlertRaised struct { + Alerter common.Address + RoundId *big.Int + RewardAmount *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterAlertRaised(opts *bind.FilterOpts) (*StakingEventsMockAlertRaisedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "AlertRaised") + if err != nil { + return nil, err + } + return &StakingEventsMockAlertRaisedIterator{contract: _StakingEventsMock.contract, event: "AlertRaised", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchAlertRaised(opts *bind.WatchOpts, sink chan<- *StakingEventsMockAlertRaised) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "AlertRaised") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockAlertRaised) + if err := _StakingEventsMock.contract.UnpackLog(event, "AlertRaised", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseAlertRaised(log types.Log) (*StakingEventsMockAlertRaised, error) { + event := new(StakingEventsMockAlertRaised) + if err := _StakingEventsMock.contract.UnpackLog(event, "AlertRaised", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockFeedOperatorsSetIterator struct { + Event *StakingEventsMockFeedOperatorsSet + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockFeedOperatorsSetIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockFeedOperatorsSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockFeedOperatorsSet) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockFeedOperatorsSetIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockFeedOperatorsSetIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockFeedOperatorsSet struct { + FeedOperators []common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterFeedOperatorsSet(opts *bind.FilterOpts) (*StakingEventsMockFeedOperatorsSetIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "FeedOperatorsSet") + if err != nil { + return nil, err + } + return &StakingEventsMockFeedOperatorsSetIterator{contract: _StakingEventsMock.contract, event: "FeedOperatorsSet", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchFeedOperatorsSet(opts *bind.WatchOpts, sink chan<- *StakingEventsMockFeedOperatorsSet) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "FeedOperatorsSet") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockFeedOperatorsSet) + if err := _StakingEventsMock.contract.UnpackLog(event, "FeedOperatorsSet", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseFeedOperatorsSet(log types.Log) (*StakingEventsMockFeedOperatorsSet, error) { + event := new(StakingEventsMockFeedOperatorsSet) + if err := _StakingEventsMock.contract.UnpackLog(event, "FeedOperatorsSet", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMaxCommunityStakeAmountIncreasedIterator struct { + Event *StakingEventsMockMaxCommunityStakeAmountIncreased + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMaxCommunityStakeAmountIncreasedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMaxCommunityStakeAmountIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMaxCommunityStakeAmountIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMaxCommunityStakeAmountIncreasedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMaxCommunityStakeAmountIncreasedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMaxCommunityStakeAmountIncreased struct { + MaxStakeAmount *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMaxCommunityStakeAmountIncreased(opts *bind.FilterOpts) (*StakingEventsMockMaxCommunityStakeAmountIncreasedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "MaxCommunityStakeAmountIncreased") + if err != nil { + return nil, err + } + return &StakingEventsMockMaxCommunityStakeAmountIncreasedIterator{contract: _StakingEventsMock.contract, event: "MaxCommunityStakeAmountIncreased", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMaxCommunityStakeAmountIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMaxCommunityStakeAmountIncreased) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "MaxCommunityStakeAmountIncreased") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMaxCommunityStakeAmountIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "MaxCommunityStakeAmountIncreased", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMaxCommunityStakeAmountIncreased(log types.Log) (*StakingEventsMockMaxCommunityStakeAmountIncreased, error) { + event := new(StakingEventsMockMaxCommunityStakeAmountIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "MaxCommunityStakeAmountIncreased", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMaxOperatorStakeAmountIncreasedIterator struct { + Event *StakingEventsMockMaxOperatorStakeAmountIncreased + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMaxOperatorStakeAmountIncreasedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMaxOperatorStakeAmountIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMaxOperatorStakeAmountIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMaxOperatorStakeAmountIncreasedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMaxOperatorStakeAmountIncreasedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMaxOperatorStakeAmountIncreased struct { + MaxStakeAmount *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMaxOperatorStakeAmountIncreased(opts *bind.FilterOpts) (*StakingEventsMockMaxOperatorStakeAmountIncreasedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "MaxOperatorStakeAmountIncreased") + if err != nil { + return nil, err + } + return &StakingEventsMockMaxOperatorStakeAmountIncreasedIterator{contract: _StakingEventsMock.contract, event: "MaxOperatorStakeAmountIncreased", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMaxOperatorStakeAmountIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMaxOperatorStakeAmountIncreased) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "MaxOperatorStakeAmountIncreased") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMaxOperatorStakeAmountIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "MaxOperatorStakeAmountIncreased", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMaxOperatorStakeAmountIncreased(log types.Log) (*StakingEventsMockMaxOperatorStakeAmountIncreased, error) { + event := new(StakingEventsMockMaxOperatorStakeAmountIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "MaxOperatorStakeAmountIncreased", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMerkleRootChangedIterator struct { + Event *StakingEventsMockMerkleRootChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMerkleRootChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMerkleRootChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMerkleRootChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMerkleRootChangedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMerkleRootChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMerkleRootChanged struct { + NewMerkleRoot [32]byte + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMerkleRootChanged(opts *bind.FilterOpts) (*StakingEventsMockMerkleRootChangedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "MerkleRootChanged") + if err != nil { + return nil, err + } + return &StakingEventsMockMerkleRootChangedIterator{contract: _StakingEventsMock.contract, event: "MerkleRootChanged", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMerkleRootChanged(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMerkleRootChanged) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "MerkleRootChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMerkleRootChanged) + if err := _StakingEventsMock.contract.UnpackLog(event, "MerkleRootChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMerkleRootChanged(log types.Log) (*StakingEventsMockMerkleRootChanged, error) { + event := new(StakingEventsMockMerkleRootChanged) + if err := _StakingEventsMock.contract.UnpackLog(event, "MerkleRootChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMigratedIterator struct { + Event *StakingEventsMockMigrated + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMigratedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrated) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMigratedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMigratedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMigrated struct { + Staker common.Address + Principal *big.Int + BaseReward *big.Int + DelegationReward *big.Int + Data []byte + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMigrated(opts *bind.FilterOpts) (*StakingEventsMockMigratedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "Migrated") + if err != nil { + return nil, err + } + return &StakingEventsMockMigratedIterator{contract: _StakingEventsMock.contract, event: "Migrated", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMigrated(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrated) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "Migrated") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMigrated) + if err := _StakingEventsMock.contract.UnpackLog(event, "Migrated", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMigrated(log types.Log) (*StakingEventsMockMigrated, error) { + event := new(StakingEventsMockMigrated) + if err := _StakingEventsMock.contract.UnpackLog(event, "Migrated", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMigrationTargetAcceptedIterator struct { + Event *StakingEventsMockMigrationTargetAccepted + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMigrationTargetAcceptedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrationTargetAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrationTargetAccepted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMigrationTargetAcceptedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMigrationTargetAcceptedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMigrationTargetAccepted struct { + MigrationTarget common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMigrationTargetAccepted(opts *bind.FilterOpts) (*StakingEventsMockMigrationTargetAcceptedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "MigrationTargetAccepted") + if err != nil { + return nil, err + } + return &StakingEventsMockMigrationTargetAcceptedIterator{contract: _StakingEventsMock.contract, event: "MigrationTargetAccepted", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMigrationTargetAccepted(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrationTargetAccepted) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "MigrationTargetAccepted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMigrationTargetAccepted) + if err := _StakingEventsMock.contract.UnpackLog(event, "MigrationTargetAccepted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMigrationTargetAccepted(log types.Log) (*StakingEventsMockMigrationTargetAccepted, error) { + event := new(StakingEventsMockMigrationTargetAccepted) + if err := _StakingEventsMock.contract.UnpackLog(event, "MigrationTargetAccepted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockMigrationTargetProposedIterator struct { + Event *StakingEventsMockMigrationTargetProposed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockMigrationTargetProposedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrationTargetProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockMigrationTargetProposed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockMigrationTargetProposedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockMigrationTargetProposedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockMigrationTargetProposed struct { + MigrationTarget common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterMigrationTargetProposed(opts *bind.FilterOpts) (*StakingEventsMockMigrationTargetProposedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "MigrationTargetProposed") + if err != nil { + return nil, err + } + return &StakingEventsMockMigrationTargetProposedIterator{contract: _StakingEventsMock.contract, event: "MigrationTargetProposed", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchMigrationTargetProposed(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrationTargetProposed) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "MigrationTargetProposed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockMigrationTargetProposed) + if err := _StakingEventsMock.contract.UnpackLog(event, "MigrationTargetProposed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseMigrationTargetProposed(log types.Log) (*StakingEventsMockMigrationTargetProposed, error) { + event := new(StakingEventsMockMigrationTargetProposed) + if err := _StakingEventsMock.contract.UnpackLog(event, "MigrationTargetProposed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockOperatorAddedIterator struct { + Event *StakingEventsMockOperatorAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockOperatorAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOperatorAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOperatorAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockOperatorAddedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockOperatorAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockOperatorAdded struct { + Operator common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterOperatorAdded(opts *bind.FilterOpts) (*StakingEventsMockOperatorAddedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "OperatorAdded") + if err != nil { + return nil, err + } + return &StakingEventsMockOperatorAddedIterator{contract: _StakingEventsMock.contract, event: "OperatorAdded", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchOperatorAdded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOperatorAdded) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "OperatorAdded") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockOperatorAdded) + if err := _StakingEventsMock.contract.UnpackLog(event, "OperatorAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseOperatorAdded(log types.Log) (*StakingEventsMockOperatorAdded, error) { + event := new(StakingEventsMockOperatorAdded) + if err := _StakingEventsMock.contract.UnpackLog(event, "OperatorAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockOperatorRemovedIterator struct { + Event *StakingEventsMockOperatorRemoved + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockOperatorRemovedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOperatorRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOperatorRemoved) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockOperatorRemovedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockOperatorRemovedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockOperatorRemoved struct { + Operator common.Address + Amount *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterOperatorRemoved(opts *bind.FilterOpts) (*StakingEventsMockOperatorRemovedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "OperatorRemoved") + if err != nil { + return nil, err + } + return &StakingEventsMockOperatorRemovedIterator{contract: _StakingEventsMock.contract, event: "OperatorRemoved", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchOperatorRemoved(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOperatorRemoved) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "OperatorRemoved") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockOperatorRemoved) + if err := _StakingEventsMock.contract.UnpackLog(event, "OperatorRemoved", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseOperatorRemoved(log types.Log) (*StakingEventsMockOperatorRemoved, error) { + event := new(StakingEventsMockOperatorRemoved) + if err := _StakingEventsMock.contract.UnpackLog(event, "OperatorRemoved", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockOwnershipTransferRequestedIterator struct { + Event *StakingEventsMockOwnershipTransferRequested + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockOwnershipTransferRequestedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOwnershipTransferRequested) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockOwnershipTransferRequestedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockOwnershipTransferRequestedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockOwnershipTransferRequested struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingEventsMockOwnershipTransferRequestedIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return &StakingEventsMockOwnershipTransferRequestedIterator{contract: _StakingEventsMock.contract, event: "OwnershipTransferRequested", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "OwnershipTransferRequested", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockOwnershipTransferRequested) + if err := _StakingEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseOwnershipTransferRequested(log types.Log) (*StakingEventsMockOwnershipTransferRequested, error) { + event := new(StakingEventsMockOwnershipTransferRequested) + if err := _StakingEventsMock.contract.UnpackLog(event, "OwnershipTransferRequested", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockOwnershipTransferredIterator struct { + Event *StakingEventsMockOwnershipTransferred + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockOwnershipTransferredIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockOwnershipTransferredIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockOwnershipTransferred struct { + From common.Address + To common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingEventsMockOwnershipTransferredIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return &StakingEventsMockOwnershipTransferredIterator{contract: _StakingEventsMock.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "OwnershipTransferred", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockOwnershipTransferred) + if err := _StakingEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseOwnershipTransferred(log types.Log) (*StakingEventsMockOwnershipTransferred, error) { + event := new(StakingEventsMockOwnershipTransferred) + if err := _StakingEventsMock.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockPausedIterator struct { + Event *StakingEventsMockPaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockPausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockPausedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockPausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockPaused struct { + Account common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterPaused(opts *bind.FilterOpts) (*StakingEventsMockPausedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "Paused") + if err != nil { + return nil, err + } + return &StakingEventsMockPausedIterator{contract: _StakingEventsMock.contract, event: "Paused", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchPaused(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPaused) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "Paused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockPaused) + if err := _StakingEventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParsePaused(log types.Log) (*StakingEventsMockPaused, error) { + event := new(StakingEventsMockPaused) + if err := _StakingEventsMock.contract.UnpackLog(event, "Paused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockPoolConcludedIterator struct { + Event *StakingEventsMockPoolConcluded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockPoolConcludedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolConcluded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolConcluded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockPoolConcludedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockPoolConcludedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockPoolConcluded struct { + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterPoolConcluded(opts *bind.FilterOpts) (*StakingEventsMockPoolConcludedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "PoolConcluded") + if err != nil { + return nil, err + } + return &StakingEventsMockPoolConcludedIterator{contract: _StakingEventsMock.contract, event: "PoolConcluded", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchPoolConcluded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolConcluded) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "PoolConcluded") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockPoolConcluded) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolConcluded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParsePoolConcluded(log types.Log) (*StakingEventsMockPoolConcluded, error) { + event := new(StakingEventsMockPoolConcluded) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolConcluded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockPoolOpenedIterator struct { + Event *StakingEventsMockPoolOpened + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockPoolOpenedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolOpened) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolOpened) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockPoolOpenedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockPoolOpenedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockPoolOpened struct { + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterPoolOpened(opts *bind.FilterOpts) (*StakingEventsMockPoolOpenedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "PoolOpened") + if err != nil { + return nil, err + } + return &StakingEventsMockPoolOpenedIterator{contract: _StakingEventsMock.contract, event: "PoolOpened", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchPoolOpened(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolOpened) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "PoolOpened") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockPoolOpened) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolOpened", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParsePoolOpened(log types.Log) (*StakingEventsMockPoolOpened, error) { + event := new(StakingEventsMockPoolOpened) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolOpened", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockPoolSizeIncreasedIterator struct { + Event *StakingEventsMockPoolSizeIncreased + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockPoolSizeIncreasedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolSizeIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockPoolSizeIncreased) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockPoolSizeIncreasedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockPoolSizeIncreasedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockPoolSizeIncreased struct { + MaxPoolSize *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterPoolSizeIncreased(opts *bind.FilterOpts) (*StakingEventsMockPoolSizeIncreasedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "PoolSizeIncreased") + if err != nil { + return nil, err + } + return &StakingEventsMockPoolSizeIncreasedIterator{contract: _StakingEventsMock.contract, event: "PoolSizeIncreased", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchPoolSizeIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolSizeIncreased) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "PoolSizeIncreased") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockPoolSizeIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolSizeIncreased", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParsePoolSizeIncreased(log types.Log) (*StakingEventsMockPoolSizeIncreased, error) { + event := new(StakingEventsMockPoolSizeIncreased) + if err := _StakingEventsMock.contract.UnpackLog(event, "PoolSizeIncreased", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockRewardAddedIterator struct { + Event *StakingEventsMockRewardAdded + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockRewardAddedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardAdded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockRewardAddedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockRewardAddedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockRewardAdded struct { + AmountAdded *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterRewardAdded(opts *bind.FilterOpts) (*StakingEventsMockRewardAddedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "RewardAdded") + if err != nil { + return nil, err + } + return &StakingEventsMockRewardAddedIterator{contract: _StakingEventsMock.contract, event: "RewardAdded", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchRewardAdded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardAdded) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "RewardAdded") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockRewardAdded) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardAdded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseRewardAdded(log types.Log) (*StakingEventsMockRewardAdded, error) { + event := new(StakingEventsMockRewardAdded) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardAdded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockRewardInitializedIterator struct { + Event *StakingEventsMockRewardInitialized + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockRewardInitializedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardInitialized) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockRewardInitializedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockRewardInitializedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockRewardInitialized struct { + Rate *big.Int + Available *big.Int + StartTimestamp *big.Int + EndTimestamp *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterRewardInitialized(opts *bind.FilterOpts) (*StakingEventsMockRewardInitializedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "RewardInitialized") + if err != nil { + return nil, err + } + return &StakingEventsMockRewardInitializedIterator{contract: _StakingEventsMock.contract, event: "RewardInitialized", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchRewardInitialized(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardInitialized) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "RewardInitialized") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockRewardInitialized) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardInitialized", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseRewardInitialized(log types.Log) (*StakingEventsMockRewardInitialized, error) { + event := new(StakingEventsMockRewardInitialized) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardInitialized", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockRewardRateChangedIterator struct { + Event *StakingEventsMockRewardRateChanged + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockRewardRateChangedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardRateChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardRateChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockRewardRateChangedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockRewardRateChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockRewardRateChanged struct { + Rate *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterRewardRateChanged(opts *bind.FilterOpts) (*StakingEventsMockRewardRateChangedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "RewardRateChanged") + if err != nil { + return nil, err + } + return &StakingEventsMockRewardRateChangedIterator{contract: _StakingEventsMock.contract, event: "RewardRateChanged", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchRewardRateChanged(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardRateChanged) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "RewardRateChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockRewardRateChanged) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardRateChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseRewardRateChanged(log types.Log) (*StakingEventsMockRewardRateChanged, error) { + event := new(StakingEventsMockRewardRateChanged) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardRateChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockRewardSlashedIterator struct { + Event *StakingEventsMockRewardSlashed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockRewardSlashedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardSlashed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardSlashed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockRewardSlashedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockRewardSlashedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockRewardSlashed struct { + Operator []common.Address + SlashedBaseRewards []*big.Int + SlashedDelegatedRewards []*big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterRewardSlashed(opts *bind.FilterOpts) (*StakingEventsMockRewardSlashedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "RewardSlashed") + if err != nil { + return nil, err + } + return &StakingEventsMockRewardSlashedIterator{contract: _StakingEventsMock.contract, event: "RewardSlashed", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchRewardSlashed(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardSlashed) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "RewardSlashed") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockRewardSlashed) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardSlashed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseRewardSlashed(log types.Log) (*StakingEventsMockRewardSlashed, error) { + event := new(StakingEventsMockRewardSlashed) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardSlashed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockRewardWithdrawnIterator struct { + Event *StakingEventsMockRewardWithdrawn + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockRewardWithdrawnIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockRewardWithdrawn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockRewardWithdrawnIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockRewardWithdrawnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockRewardWithdrawn struct { + Amount *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterRewardWithdrawn(opts *bind.FilterOpts) (*StakingEventsMockRewardWithdrawnIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "RewardWithdrawn") + if err != nil { + return nil, err + } + return &StakingEventsMockRewardWithdrawnIterator{contract: _StakingEventsMock.contract, event: "RewardWithdrawn", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchRewardWithdrawn(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardWithdrawn) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "RewardWithdrawn") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockRewardWithdrawn) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardWithdrawn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseRewardWithdrawn(log types.Log) (*StakingEventsMockRewardWithdrawn, error) { + event := new(StakingEventsMockRewardWithdrawn) + if err := _StakingEventsMock.contract.UnpackLog(event, "RewardWithdrawn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockStakedIterator struct { + Event *StakingEventsMockStaked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockStakedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockStaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockStaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockStakedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockStakedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockStaked struct { + Staker common.Address + NewStake *big.Int + TotalStake *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterStaked(opts *bind.FilterOpts) (*StakingEventsMockStakedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "Staked") + if err != nil { + return nil, err + } + return &StakingEventsMockStakedIterator{contract: _StakingEventsMock.contract, event: "Staked", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchStaked(opts *bind.WatchOpts, sink chan<- *StakingEventsMockStaked) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "Staked") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockStaked) + if err := _StakingEventsMock.contract.UnpackLog(event, "Staked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseStaked(log types.Log) (*StakingEventsMockStaked, error) { + event := new(StakingEventsMockStaked) + if err := _StakingEventsMock.contract.UnpackLog(event, "Staked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockUnpausedIterator struct { + Event *StakingEventsMockUnpaused + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockUnpausedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockUnpaused) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockUnpausedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockUnpausedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockUnpaused struct { + Account common.Address + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterUnpaused(opts *bind.FilterOpts) (*StakingEventsMockUnpausedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return &StakingEventsMockUnpausedIterator{contract: _StakingEventsMock.contract, event: "Unpaused", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchUnpaused(opts *bind.WatchOpts, sink chan<- *StakingEventsMockUnpaused) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "Unpaused") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockUnpaused) + if err := _StakingEventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseUnpaused(log types.Log) (*StakingEventsMockUnpaused, error) { + event := new(StakingEventsMockUnpaused) + if err := _StakingEventsMock.contract.UnpackLog(event, "Unpaused", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +type StakingEventsMockUnstakedIterator struct { + Event *StakingEventsMockUnstaked + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *StakingEventsMockUnstakedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockUnstaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(StakingEventsMockUnstaked) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *StakingEventsMockUnstakedIterator) Error() error { + return it.fail +} + +func (it *StakingEventsMockUnstakedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type StakingEventsMockUnstaked struct { + Staker common.Address + Principal *big.Int + BaseReward *big.Int + DelegationReward *big.Int + Raw types.Log +} + +func (_StakingEventsMock *StakingEventsMockFilterer) FilterUnstaked(opts *bind.FilterOpts) (*StakingEventsMockUnstakedIterator, error) { + + logs, sub, err := _StakingEventsMock.contract.FilterLogs(opts, "Unstaked") + if err != nil { + return nil, err + } + return &StakingEventsMockUnstakedIterator{contract: _StakingEventsMock.contract, event: "Unstaked", logs: logs, sub: sub}, nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) WatchUnstaked(opts *bind.WatchOpts, sink chan<- *StakingEventsMockUnstaked) (event.Subscription, error) { + + logs, sub, err := _StakingEventsMock.contract.WatchLogs(opts, "Unstaked") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(StakingEventsMockUnstaked) + if err := _StakingEventsMock.contract.UnpackLog(event, "Unstaked", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_StakingEventsMock *StakingEventsMockFilterer) ParseUnstaked(log types.Log) (*StakingEventsMockUnstaked, error) { + event := new(StakingEventsMockUnstaked) + if err := _StakingEventsMock.contract.UnpackLog(event, "Unstaked", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +func (_StakingEventsMock *StakingEventsMock) ParseLog(log types.Log) (generated.AbigenLog, error) { + switch log.Topics[0] { + case _StakingEventsMock.abi.Events["AlertRaised"].ID: + return _StakingEventsMock.ParseAlertRaised(log) + case _StakingEventsMock.abi.Events["FeedOperatorsSet"].ID: + return _StakingEventsMock.ParseFeedOperatorsSet(log) + case _StakingEventsMock.abi.Events["MaxCommunityStakeAmountIncreased"].ID: + return _StakingEventsMock.ParseMaxCommunityStakeAmountIncreased(log) + case _StakingEventsMock.abi.Events["MaxOperatorStakeAmountIncreased"].ID: + return _StakingEventsMock.ParseMaxOperatorStakeAmountIncreased(log) + case _StakingEventsMock.abi.Events["MerkleRootChanged"].ID: + return _StakingEventsMock.ParseMerkleRootChanged(log) + case _StakingEventsMock.abi.Events["Migrated"].ID: + return _StakingEventsMock.ParseMigrated(log) + case _StakingEventsMock.abi.Events["MigrationTargetAccepted"].ID: + return _StakingEventsMock.ParseMigrationTargetAccepted(log) + case _StakingEventsMock.abi.Events["MigrationTargetProposed"].ID: + return _StakingEventsMock.ParseMigrationTargetProposed(log) + case _StakingEventsMock.abi.Events["OperatorAdded"].ID: + return _StakingEventsMock.ParseOperatorAdded(log) + case _StakingEventsMock.abi.Events["OperatorRemoved"].ID: + return _StakingEventsMock.ParseOperatorRemoved(log) + case _StakingEventsMock.abi.Events["OwnershipTransferRequested"].ID: + return _StakingEventsMock.ParseOwnershipTransferRequested(log) + case _StakingEventsMock.abi.Events["OwnershipTransferred"].ID: + return _StakingEventsMock.ParseOwnershipTransferred(log) + case _StakingEventsMock.abi.Events["Paused"].ID: + return _StakingEventsMock.ParsePaused(log) + case _StakingEventsMock.abi.Events["PoolConcluded"].ID: + return _StakingEventsMock.ParsePoolConcluded(log) + case _StakingEventsMock.abi.Events["PoolOpened"].ID: + return _StakingEventsMock.ParsePoolOpened(log) + case _StakingEventsMock.abi.Events["PoolSizeIncreased"].ID: + return _StakingEventsMock.ParsePoolSizeIncreased(log) + case _StakingEventsMock.abi.Events["RewardAdded"].ID: + return _StakingEventsMock.ParseRewardAdded(log) + case _StakingEventsMock.abi.Events["RewardInitialized"].ID: + return _StakingEventsMock.ParseRewardInitialized(log) + case _StakingEventsMock.abi.Events["RewardRateChanged"].ID: + return _StakingEventsMock.ParseRewardRateChanged(log) + case _StakingEventsMock.abi.Events["RewardSlashed"].ID: + return _StakingEventsMock.ParseRewardSlashed(log) + case _StakingEventsMock.abi.Events["RewardWithdrawn"].ID: + return _StakingEventsMock.ParseRewardWithdrawn(log) + case _StakingEventsMock.abi.Events["Staked"].ID: + return _StakingEventsMock.ParseStaked(log) + case _StakingEventsMock.abi.Events["Unpaused"].ID: + return _StakingEventsMock.ParseUnpaused(log) + case _StakingEventsMock.abi.Events["Unstaked"].ID: + return _StakingEventsMock.ParseUnstaked(log) + + default: + return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) + } +} + +func (StakingEventsMockAlertRaised) Topic() common.Hash { + return common.HexToHash("0xd2720e8f454493f612cc97499fe8cbce7fa4d4c18d346fe7104e9042df1c1edd") +} + +func (StakingEventsMockFeedOperatorsSet) Topic() common.Hash { + return common.HexToHash("0x40aed8e423b39a56b445ae160f4c071fc2cfb48ee0b6dcd5ffeb6bc5b18d10d0") +} + +func (StakingEventsMockMaxCommunityStakeAmountIncreased) Topic() common.Hash { + return common.HexToHash("0xb5f554e5ef00806bace1edbb84186512ebcefa2af7706085143f501f29314df7") +} + +func (StakingEventsMockMaxOperatorStakeAmountIncreased) Topic() common.Hash { + return common.HexToHash("0x816587cb2e773af4f3689a03d7520fabff3462605ded374b485b13994c0d7b52") +} + +func (StakingEventsMockMerkleRootChanged) Topic() common.Hash { + return common.HexToHash("0x1b930366dfeaa7eb3b325021e4ae81e36527063452ee55b86c95f85b36f4c31c") +} + +func (StakingEventsMockMigrated) Topic() common.Hash { + return common.HexToHash("0x667838b33bdc898470de09e0e746990f2adc11b965b7fe6828e502ebc39e0434") +} + +func (StakingEventsMockMigrationTargetAccepted) Topic() common.Hash { + return common.HexToHash("0xfa33c052bbee754f3c0482a89962daffe749191fa33c696a61e947fbfd68bd84") +} + +func (StakingEventsMockMigrationTargetProposed) Topic() common.Hash { + return common.HexToHash("0x5c74c441be501340b2713817a6c6975e6f3d4a4ae39fa1ac0bf75d3c54a0cad3") +} + +func (StakingEventsMockOperatorAdded) Topic() common.Hash { + return common.HexToHash("0xac6fa858e9350a46cec16539926e0fde25b7629f84b5a72bffaae4df888ae86d") +} + +func (StakingEventsMockOperatorRemoved) Topic() common.Hash { + return common.HexToHash("0x2360404a74478febece1a14f11275f22ada88d19ef96f7d785913010bfff4479") +} + +func (StakingEventsMockOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (StakingEventsMockOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (StakingEventsMockPaused) Topic() common.Hash { + return common.HexToHash("0x62e78cea01bee320cd4e420270b5ea74000d11b0c9f74754ebdbfc544b05a258") +} + +func (StakingEventsMockPoolConcluded) Topic() common.Hash { + return common.HexToHash("0xf7d0e0f15586495da8c687328ead30fb829d9da55538cb0ef73dd229e517cdb8") +} + +func (StakingEventsMockPoolOpened) Topic() common.Hash { + return common.HexToHash("0xded6ebf04e261e1eb2f3e3b268a2e6aee5b478c15b341eba5cf18b9bc80c2e63") +} + +func (StakingEventsMockPoolSizeIncreased) Topic() common.Hash { + return common.HexToHash("0x7f4f497e086b2eb55f8a9885ba00d33399bbe0ebcb92ea092834386435a1b9c0") +} + +func (StakingEventsMockRewardAdded) Topic() common.Hash { + return common.HexToHash("0xde88a922e0d3b88b24e9623efeb464919c6bf9f66857a65e2bfcf2ce87a9433d") +} + +func (StakingEventsMockRewardInitialized) Topic() common.Hash { + return common.HexToHash("0x125fc8494f786b470e3c39d0932a62e9e09e291ebd81ea19c57604f6d2b1d167") +} + +func (StakingEventsMockRewardRateChanged) Topic() common.Hash { + return common.HexToHash("0x1e3be2efa25bca5bff2215c7b30b31086e703d6aa7d9b9a1f8ba62c5291219ad") +} + +func (StakingEventsMockRewardSlashed) Topic() common.Hash { + return common.HexToHash("0x00635ea9da6e262e92bb713d71840af7c567807ff35bf73e927490c612832480") +} + +func (StakingEventsMockRewardWithdrawn) Topic() common.Hash { + return common.HexToHash("0x150a6ec0e6f4e9ddcaaaa1674f157d91165a42d60653016f87a9fc870a39f050") +} + +func (StakingEventsMockStaked) Topic() common.Hash { + return common.HexToHash("0x1449c6dd7851abc30abf37f57715f492010519147cc2652fbc38202c18a6ee90") +} + +func (StakingEventsMockUnpaused) Topic() common.Hash { + return common.HexToHash("0x5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd38aeae4b073aa") +} + +func (StakingEventsMockUnstaked) Topic() common.Hash { + return common.HexToHash("0x204fccf0d92ed8d48f204adb39b2e81e92bad0dedb93f5716ca9478cfb57de00") +} + +func (_StakingEventsMock *StakingEventsMock) Address() common.Address { + return _StakingEventsMock.address +} + +type StakingEventsMockInterface interface { + EmitAlertRaised(opts *bind.TransactOpts, alerter common.Address, roundId *big.Int, rewardAmount *big.Int) (*types.Transaction, error) + + EmitFeedOperatorsSet(opts *bind.TransactOpts, feedOperators []common.Address) (*types.Transaction, error) + + EmitMaxCommunityStakeAmountIncreased(opts *bind.TransactOpts, maxStakeAmount *big.Int) (*types.Transaction, error) + + EmitMaxOperatorStakeAmountIncreased(opts *bind.TransactOpts, maxStakeAmount *big.Int) (*types.Transaction, error) + + EmitMerkleRootChanged(opts *bind.TransactOpts, newMerkleRoot [32]byte) (*types.Transaction, error) + + EmitMigrated(opts *bind.TransactOpts, staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int, data []byte) (*types.Transaction, error) + + EmitMigrationTargetAccepted(opts *bind.TransactOpts, migrationTarget common.Address) (*types.Transaction, error) + + EmitMigrationTargetProposed(opts *bind.TransactOpts, migrationTarget common.Address) (*types.Transaction, error) + + EmitOperatorAdded(opts *bind.TransactOpts, operator common.Address) (*types.Transaction, error) + + EmitOperatorRemoved(opts *bind.TransactOpts, operator common.Address, amount *big.Int) (*types.Transaction, error) + + EmitOwnershipTransferRequested(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitOwnershipTransferred(opts *bind.TransactOpts, from common.Address, to common.Address) (*types.Transaction, error) + + EmitPaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitPoolConcluded(opts *bind.TransactOpts) (*types.Transaction, error) + + EmitPoolOpened(opts *bind.TransactOpts) (*types.Transaction, error) + + EmitPoolSizeIncreased(opts *bind.TransactOpts, maxPoolSize *big.Int) (*types.Transaction, error) + + EmitRewardAdded(opts *bind.TransactOpts, amountAdded *big.Int) (*types.Transaction, error) + + EmitRewardInitialized(opts *bind.TransactOpts, rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) (*types.Transaction, error) + + EmitRewardRateChanged(opts *bind.TransactOpts, rate *big.Int) (*types.Transaction, error) + + EmitRewardSlashed(opts *bind.TransactOpts, operator []common.Address, slashedBaseRewards []*big.Int, slashedDelegatedRewards []*big.Int) (*types.Transaction, error) + + EmitRewardWithdrawn(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) + + EmitStaked(opts *bind.TransactOpts, staker common.Address, newStake *big.Int, totalStake *big.Int) (*types.Transaction, error) + + EmitUnpaused(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) + + EmitUnstaked(opts *bind.TransactOpts, staker common.Address, principal *big.Int, baseReward *big.Int, delegationReward *big.Int) (*types.Transaction, error) + + FilterAlertRaised(opts *bind.FilterOpts) (*StakingEventsMockAlertRaisedIterator, error) + + WatchAlertRaised(opts *bind.WatchOpts, sink chan<- *StakingEventsMockAlertRaised) (event.Subscription, error) + + ParseAlertRaised(log types.Log) (*StakingEventsMockAlertRaised, error) + + FilterFeedOperatorsSet(opts *bind.FilterOpts) (*StakingEventsMockFeedOperatorsSetIterator, error) + + WatchFeedOperatorsSet(opts *bind.WatchOpts, sink chan<- *StakingEventsMockFeedOperatorsSet) (event.Subscription, error) + + ParseFeedOperatorsSet(log types.Log) (*StakingEventsMockFeedOperatorsSet, error) + + FilterMaxCommunityStakeAmountIncreased(opts *bind.FilterOpts) (*StakingEventsMockMaxCommunityStakeAmountIncreasedIterator, error) + + WatchMaxCommunityStakeAmountIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMaxCommunityStakeAmountIncreased) (event.Subscription, error) + + ParseMaxCommunityStakeAmountIncreased(log types.Log) (*StakingEventsMockMaxCommunityStakeAmountIncreased, error) + + FilterMaxOperatorStakeAmountIncreased(opts *bind.FilterOpts) (*StakingEventsMockMaxOperatorStakeAmountIncreasedIterator, error) + + WatchMaxOperatorStakeAmountIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMaxOperatorStakeAmountIncreased) (event.Subscription, error) + + ParseMaxOperatorStakeAmountIncreased(log types.Log) (*StakingEventsMockMaxOperatorStakeAmountIncreased, error) + + FilterMerkleRootChanged(opts *bind.FilterOpts) (*StakingEventsMockMerkleRootChangedIterator, error) + + WatchMerkleRootChanged(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMerkleRootChanged) (event.Subscription, error) + + ParseMerkleRootChanged(log types.Log) (*StakingEventsMockMerkleRootChanged, error) + + FilterMigrated(opts *bind.FilterOpts) (*StakingEventsMockMigratedIterator, error) + + WatchMigrated(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrated) (event.Subscription, error) + + ParseMigrated(log types.Log) (*StakingEventsMockMigrated, error) + + FilterMigrationTargetAccepted(opts *bind.FilterOpts) (*StakingEventsMockMigrationTargetAcceptedIterator, error) + + WatchMigrationTargetAccepted(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrationTargetAccepted) (event.Subscription, error) + + ParseMigrationTargetAccepted(log types.Log) (*StakingEventsMockMigrationTargetAccepted, error) + + FilterMigrationTargetProposed(opts *bind.FilterOpts) (*StakingEventsMockMigrationTargetProposedIterator, error) + + WatchMigrationTargetProposed(opts *bind.WatchOpts, sink chan<- *StakingEventsMockMigrationTargetProposed) (event.Subscription, error) + + ParseMigrationTargetProposed(log types.Log) (*StakingEventsMockMigrationTargetProposed, error) + + FilterOperatorAdded(opts *bind.FilterOpts) (*StakingEventsMockOperatorAddedIterator, error) + + WatchOperatorAdded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOperatorAdded) (event.Subscription, error) + + ParseOperatorAdded(log types.Log) (*StakingEventsMockOperatorAdded, error) + + FilterOperatorRemoved(opts *bind.FilterOpts) (*StakingEventsMockOperatorRemovedIterator, error) + + WatchOperatorRemoved(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOperatorRemoved) (event.Subscription, error) + + ParseOperatorRemoved(log types.Log) (*StakingEventsMockOperatorRemoved, error) + + FilterOwnershipTransferRequested(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingEventsMockOwnershipTransferRequestedIterator, error) + + WatchOwnershipTransferRequested(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOwnershipTransferRequested, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferRequested(log types.Log) (*StakingEventsMockOwnershipTransferRequested, error) + + FilterOwnershipTransferred(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*StakingEventsMockOwnershipTransferredIterator, error) + + WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *StakingEventsMockOwnershipTransferred, from []common.Address, to []common.Address) (event.Subscription, error) + + ParseOwnershipTransferred(log types.Log) (*StakingEventsMockOwnershipTransferred, error) + + FilterPaused(opts *bind.FilterOpts) (*StakingEventsMockPausedIterator, error) + + WatchPaused(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPaused) (event.Subscription, error) + + ParsePaused(log types.Log) (*StakingEventsMockPaused, error) + + FilterPoolConcluded(opts *bind.FilterOpts) (*StakingEventsMockPoolConcludedIterator, error) + + WatchPoolConcluded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolConcluded) (event.Subscription, error) + + ParsePoolConcluded(log types.Log) (*StakingEventsMockPoolConcluded, error) + + FilterPoolOpened(opts *bind.FilterOpts) (*StakingEventsMockPoolOpenedIterator, error) + + WatchPoolOpened(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolOpened) (event.Subscription, error) + + ParsePoolOpened(log types.Log) (*StakingEventsMockPoolOpened, error) + + FilterPoolSizeIncreased(opts *bind.FilterOpts) (*StakingEventsMockPoolSizeIncreasedIterator, error) + + WatchPoolSizeIncreased(opts *bind.WatchOpts, sink chan<- *StakingEventsMockPoolSizeIncreased) (event.Subscription, error) + + ParsePoolSizeIncreased(log types.Log) (*StakingEventsMockPoolSizeIncreased, error) + + FilterRewardAdded(opts *bind.FilterOpts) (*StakingEventsMockRewardAddedIterator, error) + + WatchRewardAdded(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardAdded) (event.Subscription, error) + + ParseRewardAdded(log types.Log) (*StakingEventsMockRewardAdded, error) + + FilterRewardInitialized(opts *bind.FilterOpts) (*StakingEventsMockRewardInitializedIterator, error) + + WatchRewardInitialized(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardInitialized) (event.Subscription, error) + + ParseRewardInitialized(log types.Log) (*StakingEventsMockRewardInitialized, error) + + FilterRewardRateChanged(opts *bind.FilterOpts) (*StakingEventsMockRewardRateChangedIterator, error) + + WatchRewardRateChanged(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardRateChanged) (event.Subscription, error) + + ParseRewardRateChanged(log types.Log) (*StakingEventsMockRewardRateChanged, error) + + FilterRewardSlashed(opts *bind.FilterOpts) (*StakingEventsMockRewardSlashedIterator, error) + + WatchRewardSlashed(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardSlashed) (event.Subscription, error) + + ParseRewardSlashed(log types.Log) (*StakingEventsMockRewardSlashed, error) + + FilterRewardWithdrawn(opts *bind.FilterOpts) (*StakingEventsMockRewardWithdrawnIterator, error) + + WatchRewardWithdrawn(opts *bind.WatchOpts, sink chan<- *StakingEventsMockRewardWithdrawn) (event.Subscription, error) + + ParseRewardWithdrawn(log types.Log) (*StakingEventsMockRewardWithdrawn, error) + + FilterStaked(opts *bind.FilterOpts) (*StakingEventsMockStakedIterator, error) + + WatchStaked(opts *bind.WatchOpts, sink chan<- *StakingEventsMockStaked) (event.Subscription, error) + + ParseStaked(log types.Log) (*StakingEventsMockStaked, error) + + FilterUnpaused(opts *bind.FilterOpts) (*StakingEventsMockUnpausedIterator, error) + + WatchUnpaused(opts *bind.WatchOpts, sink chan<- *StakingEventsMockUnpaused) (event.Subscription, error) + + ParseUnpaused(log types.Log) (*StakingEventsMockUnpaused, error) + + FilterUnstaked(opts *bind.FilterOpts) (*StakingEventsMockUnstakedIterator, error) + + WatchUnstaked(opts *bind.WatchOpts, sink chan<- *StakingEventsMockUnstaked) (event.Subscription, error) + + ParseUnstaked(log types.Log) (*StakingEventsMockUnstaked, error) + + ParseLog(log types.Log) (generated.AbigenLog, error) + + Address() common.Address +} diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go new file mode 100644 index 00000000..b0636473 --- /dev/null +++ b/integration-tests/contracts/ethereum_contracts.go @@ -0,0 +1,2521 @@ +package contracts + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + ocrConfigHelper "github.com/goplugin/libocr/offchainreporting/confighelper" + ocrTypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_coordinator" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_load_test_client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_router" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/functions/generated/functions_v1_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/authorized_forwarder" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flags_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/functions_billing_registry_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/functions_oracle_events_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/gas_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/gas_wrapper_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1_mock" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/link_token_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_aggregator_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_ethlink_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/mock_gas_aggregator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_factory" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/operator_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/oracle_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/test_api_consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/fee_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/reward_manager" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/shared/generated/werc20_mock" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + eth_contracts "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" +) + +// EthereumOracle oracle for "directrequest" job tests +type EthereumOracle struct { + address *common.Address + client blockchain.EVMClient + oracle *oracle_wrapper.Oracle +} + +func (e *EthereumOracle) Address() string { + return e.address.Hex() +} + +func (e *EthereumOracle) Fund(ethAmount *big.Float) error { + gasEstimates, err := e.client.EstimateGas(ethereum.CallMsg{ + To: e.address, + }) + if err != nil { + return err + } + return e.client.Fund(e.address.Hex(), ethAmount, gasEstimates) +} + +// SetFulfillmentPermission sets fulfillment permission for particular address +func (e *EthereumOracle) SetFulfillmentPermission(address string, allowed bool) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.oracle.SetFulfillmentPermission(opts, common.HexToAddress(address), allowed) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +// EthereumAPIConsumer API consumer for job type "directrequest" tests +type EthereumAPIConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *test_api_consumer_wrapper.TestAPIConsumer +} + +func (e *EthereumAPIConsumer) Address() string { + return e.address.Hex() +} + +func (e *EthereumAPIConsumer) RoundID(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + return e.consumer.CurrentRoundID(opts) +} + +func (e *EthereumAPIConsumer) Fund(ethAmount *big.Float) error { + gasEstimates, err := e.client.EstimateGas(ethereum.CallMsg{ + To: e.address, + }) + if err != nil { + return err + } + return e.client.Fund(e.address.Hex(), ethAmount, gasEstimates) +} + +func (e *EthereumAPIConsumer) Data(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + data, err := e.consumer.Data(opts) + if err != nil { + return nil, err + } + return data, nil +} + +// CreateRequestTo creates request to an oracle for particular jobID with params +func (e *EthereumAPIConsumer) CreateRequestTo( + oracleAddr string, + jobID [32]byte, + payment *big.Int, + url string, + path string, + times *big.Int, +) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.consumer.CreateRequestTo(opts, common.HexToAddress(oracleAddr), jobID, payment, url, path, times) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +// EthereumStaking +type EthereumStaking struct { + client blockchain.EVMClient + staking *eth_contracts.Staking + address *common.Address +} + +func (f *EthereumStaking) Address() string { + return f.address.Hex() +} + +// Fund sends specified currencies to the contract +func (f *EthereumStaking) Fund(ethAmount *big.Float) error { + gasEstimates, err := f.client.EstimateGas(ethereum.CallMsg{ + To: f.address, + }) + if err != nil { + return err + } + return f.client.Fund(f.address.Hex(), ethAmount, gasEstimates) +} + +func (f *EthereumStaking) AddOperators(operators []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.AddOperators(opts, operators) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStaking) RemoveOperators(operators []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.RemoveOperators(opts, operators) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStaking) SetFeedOperators(operators []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.SetFeedOperators(opts, operators) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStaking) RaiseAlert() error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.RaiseAlert(opts) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStaking) Start(amount *big.Int, initialRewardRate *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.Start(opts, amount, initialRewardRate) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStaking) SetMerkleRoot(newMerkleRoot [32]byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.staking.SetMerkleRoot(opts, newMerkleRoot) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumFunctionsOracleEventsMock represents the basic events mock contract +type EthereumFunctionsOracleEventsMock struct { + client blockchain.EVMClient + eventsMock *functions_oracle_events_mock.FunctionsOracleEventsMock + address *common.Address +} + +func (f *EthereumFunctionsOracleEventsMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumFunctionsOracleEventsMock) OracleResponse(requestId [32]byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitOracleResponse(opts, requestId) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsOracleEventsMock) OracleRequest(requestId [32]byte, requestingContract common.Address, requestInitiator common.Address, subscriptionId uint64, subscriptionOwner common.Address, data []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitOracleRequest(opts, requestId, requestingContract, requestInitiator, subscriptionId, subscriptionOwner, data) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsOracleEventsMock) UserCallbackError(requestId [32]byte, reason string) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitUserCallbackError(opts, requestId, reason) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsOracleEventsMock) UserCallbackRawError(requestId [32]byte, lowLevelData []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitUserCallbackRawError(opts, requestId, lowLevelData) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumFunctionsBillingRegistryEventsMock represents the basic events mock contract +type EthereumFunctionsBillingRegistryEventsMock struct { + client blockchain.EVMClient + eventsMock *functions_billing_registry_events_mock.FunctionsBillingRegistryEventsMock + address *common.Address +} + +func (f *EthereumFunctionsBillingRegistryEventsMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumFunctionsBillingRegistryEventsMock) SubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionFunded(opts, subscriptionId, oldBalance, newBalance) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsBillingRegistryEventsMock) BillingStart(requestId [32]byte, commitment functions_billing_registry_events_mock.FunctionsBillingRegistryEventsMockCommitment) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitBillingStart(opts, requestId, commitment) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsBillingRegistryEventsMock) BillingEnd(requestId [32]byte, subscriptionId uint64, signerPayment *big.Int, transmitterPayment *big.Int, totalCost *big.Int, success bool) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitBillingEnd(opts, requestId, subscriptionId, signerPayment, transmitterPayment, totalCost, success) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumStakingEventsMock represents the basic events mock contract +type EthereumStakingEventsMock struct { + client blockchain.EVMClient + eventsMock *eth_contracts.StakingEventsMock + address *common.Address +} + +func (f *EthereumStakingEventsMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumStakingEventsMock) MaxCommunityStakeAmountIncreased(maxStakeAmount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitMaxCommunityStakeAmountIncreased(opts, maxStakeAmount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) PoolSizeIncreased(maxPoolSize *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitPoolSizeIncreased(opts, maxPoolSize) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) MaxOperatorStakeAmountIncreased(maxStakeAmount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitMaxOperatorStakeAmountIncreased(opts, maxStakeAmount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) RewardInitialized(rate *big.Int, available *big.Int, startTimestamp *big.Int, endTimestamp *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitRewardInitialized(opts, rate, available, startTimestamp, endTimestamp) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) AlertRaised(alerter common.Address, roundId *big.Int, rewardAmount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitAlertRaised(opts, alerter, roundId, rewardAmount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) Staked(staker common.Address, newStake *big.Int, totalStake *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitStaked(opts, staker, newStake, totalStake) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) OperatorAdded(operator common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitOperatorAdded(opts, operator) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) OperatorRemoved(operator common.Address, amount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitOperatorRemoved(opts, operator, amount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumStakingEventsMock) FeedOperatorsSet(feedOperators []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitFeedOperatorsSet(opts, feedOperators) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumOffchainAggregatorEventsMock represents the basic events mock contract +type EthereumOffchainAggregatorEventsMock struct { + client blockchain.EVMClient + eventsMock *eth_contracts.OffchainAggregatorEventsMock + address *common.Address +} + +func (f *EthereumOffchainAggregatorEventsMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumOffchainAggregatorEventsMock) ConfigSet(previousConfigBlockNumber uint32, configCount uint64, signers []common.Address, transmitters []common.Address, threshold uint8, encodedConfigVersion uint64, encoded []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitConfigSet(opts, previousConfigBlockNumber, configCount, signers, transmitters, threshold, encodedConfigVersion, encoded) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumOffchainAggregatorEventsMock) NewTransmission(aggregatorRoundId uint32, answer *big.Int, transmitter common.Address, observations []*big.Int, observers []byte, rawReportContext [32]byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitNewTransmission(opts, aggregatorRoundId, answer, transmitter, observations, observers, rawReportContext) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumKeeperRegistry11Mock represents the basic keeper registry 1.1 mock contract +type EthereumKeeperRegistry11Mock struct { + client blockchain.EVMClient + registryMock *keeper_registry_wrapper1_1_mock.KeeperRegistryMock + address *common.Address +} + +func (f *EthereumKeeperRegistry11Mock) Address() string { + return f.address.Hex() +} + +func (f *EthereumKeeperRegistry11Mock) EmitUpkeepPerformed(id *big.Int, success bool, from common.Address, payment *big.Int, performData []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitUpkeepPerformed(opts, id, success, from, payment, performData) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) EmitUpkeepCanceled(id *big.Int, atBlockHeight uint64) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitUpkeepCanceled(opts, id, atBlockHeight) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) EmitFundsWithdrawn(id *big.Int, amount *big.Int, to common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitFundsWithdrawn(opts, id, amount, to) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) EmitKeepersUpdated(keepers []common.Address, payees []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitKeepersUpdated(opts, keepers, payees) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) EmitUpkeepRegistered(id *big.Int, executeGas uint32, admin common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitUpkeepRegistered(opts, id, executeGas, admin) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) EmitFundsAdded(id *big.Int, from common.Address, amount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.EmitFundsAdded(opts, id, from, amount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetUpkeepCount(_upkeepCount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetUpkeepCount(opts, _upkeepCount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetCanceledUpkeepList(_canceledUpkeepList []*big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetCanceledUpkeepList(opts, _canceledUpkeepList) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetKeeperList(_keepers []common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetKeeperList(opts, _keepers) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetConfig(_paymentPremiumPPB uint32, _flatFeeMicroLink uint32, _blockCountPerTurn *big.Int, _checkGasLimit uint32, _stalenessSeconds *big.Int, _gasCeilingMultiplier uint16, _fallbackGasPrice *big.Int, _fallbackLinkPrice *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetConfig(opts, _paymentPremiumPPB, _flatFeeMicroLink, _blockCountPerTurn, _checkGasLimit, _stalenessSeconds, _gasCeilingMultiplier, _fallbackGasPrice, _fallbackLinkPrice) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetUpkeep(id *big.Int, _target common.Address, _executeGas uint32, _balance *big.Int, _admin common.Address, _maxValidBlocknumber uint64, _lastKeeper common.Address, _checkData []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetUpkeep(opts, id, _target, _executeGas, _balance, _admin, _maxValidBlocknumber, _lastKeeper, _checkData) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetMinBalance(id *big.Int, minBalance *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetMinBalance(opts, id, minBalance) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetCheckUpkeepData(id *big.Int, performData []byte, maxLinkPayment *big.Int, gasLimit *big.Int, adjustedGasWei *big.Int, linkEth *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetCheckUpkeepData(opts, id, performData, maxLinkPayment, gasLimit, adjustedGasWei, linkEth) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistry11Mock) SetPerformUpkeepSuccess(id *big.Int, success bool) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registryMock.SetPerformUpkeepSuccess(opts, id, success) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumKeeperRegistrar12Mock represents the basic keeper registrar 1.2 mock contract +type EthereumKeeperRegistrar12Mock struct { + client blockchain.EVMClient + registrarMock *keeper_registrar_wrapper1_2_mock.KeeperRegistrarMock + address *common.Address +} + +func (f *EthereumKeeperRegistrar12Mock) Address() string { + return f.address.Hex() +} + +func (f *EthereumKeeperRegistrar12Mock) EmitRegistrationRequested(hash [32]byte, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registrarMock.EmitRegistrationRequested(opts, hash, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistrar12Mock) EmitRegistrationApproved(hash [32]byte, displayName string, upkeepId *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registrarMock.EmitRegistrationApproved(opts, hash, displayName, upkeepId) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumKeeperRegistrar12Mock) SetRegistrationConfig(_autoApproveConfigType uint8, _autoApproveMaxAllowed uint32, _approvedCount uint32, _keeperRegistry common.Address, _minPLIJuels *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.registrarMock.SetRegistrationConfig(opts, _autoApproveConfigType, _autoApproveMaxAllowed, _approvedCount, _keeperRegistry, _minPLIJuels) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumKeeperGasWrapperMock represents the basic keeper gas wrapper mock contract +type EthereumKeeperGasWrapperMock struct { + client blockchain.EVMClient + gasWrapperMock *gas_wrapper_mock.KeeperRegistryCheckUpkeepGasUsageWrapperMock + address *common.Address +} + +func (f *EthereumKeeperGasWrapperMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumKeeperGasWrapperMock) SetMeasureCheckGasResult(result bool, payload []byte, gas *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.gasWrapperMock.SetMeasureCheckGasResult(opts, result, payload, gas) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumFunctionsV1EventsMock represents the basic functions v1 events mock contract +type EthereumFunctionsV1EventsMock struct { + client blockchain.EVMClient + eventsMock *functions_v1_events_mock.FunctionsV1EventsMock + address *common.Address +} + +func (f *EthereumFunctionsV1EventsMock) Address() string { + return f.address.Hex() +} + +func (f *EthereumFunctionsV1EventsMock) EmitRequestProcessed(requestId [32]byte, subscriptionId uint64, totalCostJuels *big.Int, transmitter common.Address, resultCode uint8, response []byte, errByte []byte, callbackReturnData []byte) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitRequestProcessed(opts, requestId, subscriptionId, totalCostJuels, transmitter, resultCode, response, errByte, callbackReturnData) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitRequestStart(requestId [32]byte, donId [32]byte, subscriptionId uint64, subscriptionOwner common.Address, requestingContract common.Address, requestInitiator common.Address, data []byte, dataVersion uint16, callbackGasLimit uint32, estimatedTotalCostJuels *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitRequestStart(opts, requestId, donId, subscriptionId, subscriptionOwner, requestingContract, requestInitiator, data, dataVersion, callbackGasLimit, estimatedTotalCostJuels) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionCanceled(subscriptionId uint64, fundsRecipient common.Address, fundsAmount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionCanceled(opts, subscriptionId, fundsRecipient, fundsAmount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionConsumerAdded(subscriptionId uint64, consumer common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionConsumerAdded(opts, subscriptionId, consumer) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionConsumerRemoved(subscriptionId uint64, consumer common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionConsumerRemoved(opts, subscriptionId, consumer) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionCreated(subscriptionId uint64, owner common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionCreated(opts, subscriptionId, owner) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionFunded(subscriptionId uint64, oldBalance *big.Int, newBalance *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionFunded(opts, subscriptionId, oldBalance, newBalance) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionOwnerTransferred(subscriptionId uint64, from common.Address, to common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionOwnerTransferred(opts, subscriptionId, from, to) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitSubscriptionOwnerTransferRequested(subscriptionId uint64, from common.Address, to common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitSubscriptionOwnerTransferRequested(opts, subscriptionId, from, to) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitRequestNotProcessed(requestId [32]byte, coordinator common.Address, transmitter common.Address, resultCode uint8) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitRequestNotProcessed(opts, requestId, coordinator, transmitter, resultCode) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFunctionsV1EventsMock) EmitContractUpdated(id [32]byte, from common.Address, to common.Address) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.eventsMock.EmitContractUpdated(opts, id, from, to) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// EthereumFluxAggregator represents the basic flux aggregation contract +type EthereumFluxAggregator struct { + client blockchain.EVMClient + fluxAggregator *flux_aggregator_wrapper.FluxAggregator + address *common.Address +} + +func (f *EthereumFluxAggregator) Address() string { + return f.address.Hex() +} + +// Fund sends specified currencies to the contract +func (f *EthereumFluxAggregator) Fund(ethAmount *big.Float) error { + gasEstimates, err := f.client.EstimateGas(ethereum.CallMsg{ + To: f.address, + }) + if err != nil { + return err + } + return f.client.Fund(f.address.Hex(), ethAmount, gasEstimates) +} + +func (f *EthereumFluxAggregator) UpdateAvailableFunds() error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.fluxAggregator.UpdateAvailableFunds(opts) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFluxAggregator) PaymentAmount(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + payment, err := f.fluxAggregator.PaymentAmount(opts) + if err != nil { + return nil, err + } + return payment, nil +} + +func (f *EthereumFluxAggregator) RequestNewRound(_ context.Context) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.fluxAggregator.RequestNewRound(opts) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// WatchSubmissionReceived subscribes to any submissions on a flux feed +func (f *EthereumFluxAggregator) WatchSubmissionReceived(ctx context.Context, eventChan chan<- *SubmissionEvent) error { + ethEventChan := make(chan *flux_aggregator_wrapper.FluxAggregatorSubmissionReceived) + sub, err := f.fluxAggregator.WatchSubmissionReceived(&bind.WatchOpts{}, ethEventChan, nil, nil, nil) + if err != nil { + return err + } + defer sub.Unsubscribe() + + for { + select { + case event := <-ethEventChan: + eventChan <- &SubmissionEvent{ + Contract: event.Raw.Address, + Submission: event.Submission, + Round: event.Round, + BlockNumber: event.Raw.BlockNumber, + Oracle: event.Oracle, + } + case err := <-sub.Err(): + return err + case <-ctx.Done(): + return nil + } + } +} + +func (f *EthereumFluxAggregator) SetRequesterPermissions(_ context.Context, addr common.Address, authorized bool, roundsDelay uint32) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.fluxAggregator.SetRequesterPermissions(opts, addr, authorized, roundsDelay) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFluxAggregator) GetOracles(ctx context.Context) ([]string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + addresses, err := f.fluxAggregator.GetOracles(opts) + if err != nil { + return nil, err + } + var oracleAddrs []string + for _, o := range addresses { + oracleAddrs = append(oracleAddrs, o.Hex()) + } + return oracleAddrs, nil +} + +func (f *EthereumFluxAggregator) LatestRoundID(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + rID, err := f.fluxAggregator.LatestRound(opts) + if err != nil { + return nil, err + } + return rID, nil +} + +func (f *EthereumFluxAggregator) WithdrawPayment( + _ context.Context, + from common.Address, + to common.Address, + amount *big.Int) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := f.fluxAggregator.WithdrawPayment(opts, from, to, amount) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +func (f *EthereumFluxAggregator) WithdrawablePayment(ctx context.Context, addr common.Address) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + balance, err := f.fluxAggregator.WithdrawablePayment(opts, addr) + if err != nil { + return nil, err + } + return balance, nil +} + +func (f *EthereumFluxAggregator) LatestRoundData(ctx context.Context) (flux_aggregator_wrapper.LatestRoundData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + lr, err := f.fluxAggregator.LatestRoundData(opts) + if err != nil { + return flux_aggregator_wrapper.LatestRoundData{}, err + } + return lr, nil +} + +// GetContractData retrieves basic data for the flux aggregator contract +func (f *EthereumFluxAggregator) GetContractData(ctx context.Context) (*FluxAggregatorData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctx, + } + + allocated, err := f.fluxAggregator.AllocatedFunds(opts) + if err != nil { + return &FluxAggregatorData{}, err + } + + available, err := f.fluxAggregator.AvailableFunds(opts) + if err != nil { + return &FluxAggregatorData{}, err + } + + lr, err := f.fluxAggregator.LatestRoundData(opts) + if err != nil { + return &FluxAggregatorData{}, err + } + latestRound := RoundData(lr) + + oracles, err := f.fluxAggregator.GetOracles(opts) + if err != nil { + return &FluxAggregatorData{}, err + } + + return &FluxAggregatorData{ + AllocatedFunds: allocated, + AvailableFunds: available, + LatestRoundData: latestRound, + Oracles: oracles, + }, nil +} + +// SetOracles allows the ability to add and/or remove oracles from the contract, and to set admins +func (f *EthereumFluxAggregator) SetOracles(o FluxAggregatorSetOraclesOptions) error { + opts, err := f.client.TransactionOpts(f.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := f.fluxAggregator.ChangeOracles(opts, o.RemoveList, o.AddList, o.AdminList, o.MinSubmissions, o.MaxSubmissions, o.RestartDelayRounds) + if err != nil { + return err + } + return f.client.ProcessTransaction(tx) +} + +// Description returns the description of the flux aggregator contract +func (f *EthereumFluxAggregator) Description(ctxt context.Context) (string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(f.client.GetDefaultWallet().Address()), + Context: ctxt, + } + return f.fluxAggregator.Description(opts) +} + +// FluxAggregatorRoundConfirmer is a header subscription that awaits for a certain flux round to be completed +type FluxAggregatorRoundConfirmer struct { + fluxInstance FluxAggregator + roundID *big.Int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + complete bool + l zerolog.Logger +} + +// NewFluxAggregatorRoundConfirmer provides a new instance of a FluxAggregatorRoundConfirmer +func NewFluxAggregatorRoundConfirmer( + contract FluxAggregator, + roundID *big.Int, + timeout time.Duration, + logger zerolog.Logger, +) *FluxAggregatorRoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &FluxAggregatorRoundConfirmer{ + fluxInstance: contract, + roundID: roundID, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + l: logger, + } +} + +// ReceiveHeader will query the latest FluxAggregator round and check to see whether the round has confirmed +func (f *FluxAggregatorRoundConfirmer) ReceiveHeader(header blockchain.NodeHeader) error { + if f.complete { + return nil + } + lr, err := f.fluxInstance.LatestRoundID(context.Background()) + if err != nil { + return err + } + logFields := map[string]any{ + "Contract Address": f.fluxInstance.Address(), + "Current Round": lr.Int64(), + "Waiting for Round": f.roundID.Int64(), + "Header Number": header.Number.Uint64(), + } + if lr.Cmp(f.roundID) >= 0 { + f.l.Info().Fields(logFields).Msg("FluxAggregator round completed") + f.complete = true + f.doneChan <- struct{}{} + } else { + f.l.Debug().Fields(logFields).Msg("Waiting for FluxAggregator round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (f *FluxAggregatorRoundConfirmer) Wait() error { + defer func() { f.complete = true }() + for { + select { + case <-f.doneChan: + f.cancel() + return nil + case <-f.context.Done(): + return fmt.Errorf("timeout waiting for flux round to confirm: %d", f.roundID) + } + } +} + +func (f *FluxAggregatorRoundConfirmer) Complete() bool { + return f.complete +} + +// EthereumLinkToken represents a LinkToken address +type EthereumLinkToken struct { + client blockchain.EVMClient + instance *link_token_interface.LinkToken + address common.Address + l zerolog.Logger +} + +// Fund the PLI Token contract with ETH to distribute the token +func (l *EthereumLinkToken) Fund(ethAmount *big.Float) error { + gasEstimates, err := l.client.EstimateGas(ethereum.CallMsg{ + To: &l.address, + }) + if err != nil { + return err + } + return l.client.Fund(l.address.Hex(), ethAmount, gasEstimates) +} + +func (l *EthereumLinkToken) BalanceOf(ctx context.Context, addr string) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(l.client.GetDefaultWallet().Address()), + Context: ctx, + } + balance, err := l.instance.BalanceOf(opts, common.HexToAddress(addr)) + if err != nil { + return nil, err + } + return balance, nil +} + +// Name returns the name of the link token +func (l *EthereumLinkToken) Name(ctxt context.Context) (string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(l.client.GetDefaultWallet().Address()), + Context: ctxt, + } + return l.instance.Name(opts) +} + +func (l *EthereumLinkToken) Address() string { + return l.address.Hex() +} + +func (l *EthereumLinkToken) Approve(to string, amount *big.Int) error { + opts, err := l.client.TransactionOpts(l.client.GetDefaultWallet()) + if err != nil { + return err + } + l.l.Info(). + Str("From", l.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Msg("Approving PLI Transfer") + tx, err := l.instance.Approve(opts, common.HexToAddress(to), amount) + if err != nil { + return err + } + return l.client.ProcessTransaction(tx) +} + +func (l *EthereumLinkToken) Transfer(to string, amount *big.Int) error { + opts, err := l.client.TransactionOpts(l.client.GetDefaultWallet()) + if err != nil { + return err + } + l.l.Info(). + Str("From", l.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Msg("Transferring PLI") + tx, err := l.instance.Transfer(opts, common.HexToAddress(to), amount) + if err != nil { + return err + } + return l.client.ProcessTransaction(tx) +} + +func (l *EthereumLinkToken) TransferAndCall(to string, amount *big.Int, data []byte) (*types.Transaction, error) { + opts, err := l.client.TransactionOpts(l.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := l.instance.TransferAndCall(opts, common.HexToAddress(to), amount, data) + if err != nil { + return nil, err + } + l.l.Info(). + Str("From", l.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Str("TxHash", tx.Hash().String()). + Msg("Transferring and Calling PLI") + return tx, l.client.ProcessTransaction(tx) +} + +// EthereumOffchainAggregator represents the offchain aggregation contract +type EthereumOffchainAggregator struct { + client blockchain.EVMClient + ocr *offchainaggregator.OffchainAggregator + address *common.Address + l zerolog.Logger +} + +// Fund sends specified currencies to the contract +func (o *EthereumOffchainAggregator) Fund(ethAmount *big.Float) error { + gasEstimates, err := o.client.EstimateGas(ethereum.CallMsg{ + To: o.address, + }) + if err != nil { + return err + } + return o.client.Fund(o.address.Hex(), ethAmount, gasEstimates) +} + +// GetContractData retrieves basic data for the offchain aggregator contract +func (o *EthereumOffchainAggregator) GetContractData(ctxt context.Context) (*OffchainAggregatorData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(o.client.GetDefaultWallet().Address()), + Context: ctxt, + } + + lr, err := o.ocr.LatestRoundData(opts) + if err != nil { + return &OffchainAggregatorData{}, err + } + latestRound := RoundData(lr) + + return &OffchainAggregatorData{ + LatestRoundData: latestRound, + }, nil +} + +// SetPayees sets wallets for the contract to pay out to? +func (o *EthereumOffchainAggregator) SetPayees( + transmitters, payees []string, +) error { + opts, err := o.client.TransactionOpts(o.client.GetDefaultWallet()) + if err != nil { + return err + } + var transmittersAddr, payeesAddr []common.Address + for _, tr := range transmitters { + transmittersAddr = append(transmittersAddr, common.HexToAddress(tr)) + } + for _, p := range payees { + payeesAddr = append(payeesAddr, common.HexToAddress(p)) + } + + o.l.Info(). + Str("Transmitters", fmt.Sprintf("%v", transmitters)). + Str("Payees", fmt.Sprintf("%v", payees)). + Str("OCR Address", o.Address()). + Msg("Setting OCR Payees") + + tx, err := o.ocr.SetPayees(opts, transmittersAddr, payeesAddr) + if err != nil { + return err + } + return o.client.ProcessTransaction(tx) +} + +// SetConfig sets the payees and the offchain reporting protocol configuration +func (o *EthereumOffchainAggregator) SetConfig( + pluginNodes []*client.PluginK8sClient, + ocrConfig OffChainAggregatorConfig, + transmitters []common.Address, +) error { + // Gather necessary addresses and keys from our plugin nodes to properly configure the OCR contract + log.Info().Str("Contract Address", o.address.Hex()).Msg("Configuring OCR Contract") + for i, node := range pluginNodes { + ocrKeys, err := node.MustReadOCRKeys() + if err != nil { + return err + } + if len(ocrKeys.Data) == 0 { + return fmt.Errorf("no OCR keys found for node %v", node) + } + primaryOCRKey := ocrKeys.Data[0] + if err != nil { + return err + } + p2pKeys, err := node.MustReadP2PKeys() + if err != nil { + return err + } + primaryP2PKey := p2pKeys.Data[0] + + // Need to convert the key representations + var onChainSigningAddress [20]byte + var configPublicKey [32]byte + offchainSigningAddress, err := hex.DecodeString(primaryOCRKey.Attributes.OffChainPublicKey) + if err != nil { + return err + } + decodeConfigKey, err := hex.DecodeString(primaryOCRKey.Attributes.ConfigPublicKey) + if err != nil { + return err + } + + // https://stackoverflow.com/questions/8032170/how-to-assign-string-to-bytes-array + copy(onChainSigningAddress[:], common.HexToAddress(primaryOCRKey.Attributes.OnChainSigningAddress).Bytes()) + copy(configPublicKey[:], decodeConfigKey) + + oracleIdentity := ocrConfigHelper.OracleIdentity{ + TransmitAddress: transmitters[i], + OnChainSigningAddress: onChainSigningAddress, + PeerID: primaryP2PKey.Attributes.PeerID, + OffchainPublicKey: offchainSigningAddress, + } + oracleIdentityExtra := ocrConfigHelper.OracleIdentityExtra{ + OracleIdentity: oracleIdentity, + SharedSecretEncryptionPublicKey: ocrTypes.SharedSecretEncryptionPublicKey(configPublicKey), + } + + ocrConfig.OracleIdentities = append(ocrConfig.OracleIdentities, oracleIdentityExtra) + } + + signers, transmitters, threshold, encodedConfigVersion, encodedConfig, err := ocrConfigHelper.ContractSetConfigArgs( + ocrConfig.DeltaProgress, + ocrConfig.DeltaResend, + ocrConfig.DeltaRound, + ocrConfig.DeltaGrace, + ocrConfig.DeltaC, + ocrConfig.AlphaPPB, + ocrConfig.DeltaStage, + ocrConfig.RMax, + ocrConfig.S, + ocrConfig.OracleIdentities, + ocrConfig.F, + ) + if err != nil { + return err + } + + // Set Config + opts, err := o.client.TransactionOpts(o.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := o.ocr.SetConfig(opts, signers, transmitters, threshold, encodedConfigVersion, encodedConfig) + if err != nil { + return err + } + return o.client.ProcessTransaction(tx) +} + +// RequestNewRound requests the OCR contract to create a new round +func (o *EthereumOffchainAggregator) RequestNewRound() error { + opts, err := o.client.TransactionOpts(o.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := o.ocr.RequestNewRound(opts) + if err != nil { + return err + } + o.l.Info().Str("Contract Address", o.address.Hex()).Msg("New OCR round requested") + + return o.client.ProcessTransaction(tx) +} + +// GetLatestAnswer returns the latest answer from the OCR contract +func (o *EthereumOffchainAggregator) GetLatestAnswer(ctxt context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(o.client.GetDefaultWallet().Address()), + Context: ctxt, + } + return o.ocr.LatestAnswer(opts) +} + +func (o *EthereumOffchainAggregator) Address() string { + return o.address.Hex() +} + +// GetLatestRound returns data from the latest round +func (o *EthereumOffchainAggregator) GetLatestRound(ctx context.Context) (*RoundData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(o.client.GetDefaultWallet().Address()), + Context: ctx, + } + + roundData, err := o.ocr.LatestRoundData(opts) + if err != nil { + return nil, err + } + + return &RoundData{ + RoundId: roundData.RoundId, + Answer: roundData.Answer, + AnsweredInRound: roundData.AnsweredInRound, + StartedAt: roundData.StartedAt, + UpdatedAt: roundData.UpdatedAt, + }, err +} + +func (o *EthereumOffchainAggregator) LatestRoundDataUpdatedAt() (*big.Int, error) { + data, err := o.ocr.LatestRoundData(&bind.CallOpts{ + From: common.HexToAddress(o.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.UpdatedAt, nil +} + +// GetRound retrieves an OCR round by the round ID +func (o *EthereumOffchainAggregator) GetRound(ctx context.Context, roundID *big.Int) (*RoundData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(o.client.GetDefaultWallet().Address()), + Context: ctx, + } + roundData, err := o.ocr.GetRoundData(opts, roundID) + if err != nil { + return nil, err + } + + return &RoundData{ + RoundId: roundData.RoundId, + Answer: roundData.Answer, + AnsweredInRound: roundData.AnsweredInRound, + StartedAt: roundData.StartedAt, + UpdatedAt: roundData.UpdatedAt, + }, nil +} + +// ParseEventAnswerUpdated parses the log for event AnswerUpdated +func (o *EthereumOffchainAggregator) ParseEventAnswerUpdated(eventLog types.Log) (*offchainaggregator.OffchainAggregatorAnswerUpdated, error) { + return o.ocr.ParseAnswerUpdated(eventLog) +} + +// RunlogRoundConfirmer is a header subscription that awaits for a certain Runlog round to be completed +type RunlogRoundConfirmer struct { + consumer APIConsumer + roundID *big.Int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + l zerolog.Logger +} + +// NewRunlogRoundConfirmer provides a new instance of a RunlogRoundConfirmer +func NewRunlogRoundConfirmer( + contract APIConsumer, + roundID *big.Int, + timeout time.Duration, + logger zerolog.Logger, +) *RunlogRoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &RunlogRoundConfirmer{ + consumer: contract, + roundID: roundID, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + l: logger, + } +} + +// ReceiveHeader will query the latest Runlog round and check to see whether the round has confirmed +func (o *RunlogRoundConfirmer) ReceiveHeader(_ blockchain.NodeHeader) error { + currentRoundID, err := o.consumer.RoundID(context.Background()) + if err != nil { + return err + } + logFields := map[string]any{ + "Contract Address": o.consumer.Address(), + "Current Round": currentRoundID.Int64(), + "Waiting for Round": o.roundID.Int64(), + } + if currentRoundID.Cmp(o.roundID) >= 0 { + o.l.Info().Fields(logFields).Msg("Runlog round completed") + o.doneChan <- struct{}{} + } else { + o.l.Debug().Fields(logFields).Msg("Waiting for Runlog round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *RunlogRoundConfirmer) Wait() error { + for { + select { + case <-o.doneChan: + o.cancel() + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for OCR round to confirm: %d", o.roundID) + } + } +} + +// OffchainAggregatorRoundConfirmer is a header subscription that awaits for a certain OCR round to be completed +type OffchainAggregatorRoundConfirmer struct { + ocrInstance OffchainAggregator + roundID *big.Int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + blocksSinceAnswer uint + complete bool + l zerolog.Logger +} + +// NewOffchainAggregatorRoundConfirmer provides a new instance of a OffchainAggregatorRoundConfirmer +func NewOffchainAggregatorRoundConfirmer( + contract OffchainAggregator, + roundID *big.Int, + timeout time.Duration, + logger zerolog.Logger, +) *OffchainAggregatorRoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &OffchainAggregatorRoundConfirmer{ + ocrInstance: contract, + roundID: roundID, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + complete: false, + l: logger, + } +} + +// ReceiveHeader will query the latest OffchainAggregator round and check to see whether the round has confirmed +func (o *OffchainAggregatorRoundConfirmer) ReceiveHeader(_ blockchain.NodeHeader) error { + if channelClosed(o.doneChan) { + return nil + } + + lr, err := o.ocrInstance.GetLatestRound(context.Background()) + if err != nil { + return err + } + o.blocksSinceAnswer++ + currRound := lr.RoundId + logFields := map[string]any{ + "Contract Address": o.ocrInstance.Address(), + "Current Round": currRound.Int64(), + "Waiting for Round": o.roundID.Int64(), + } + if currRound.Cmp(o.roundID) >= 0 { + o.l.Info().Fields(logFields).Msg("OCR round completed") + o.doneChan <- struct{}{} + o.complete = true + } else { + o.l.Debug().Fields(logFields).Msg("Waiting on OCR Round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *OffchainAggregatorRoundConfirmer) Wait() error { + defer func() { o.complete = true }() + for { + select { + case <-o.doneChan: + o.cancel() + close(o.doneChan) + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for OCR round to confirm: %d", o.roundID) + } + } +} + +func (o *OffchainAggregatorRoundConfirmer) Complete() bool { + return o.complete +} + +// OffchainAggregatorRoundConfirmer is a header subscription that awaits for a certain OCR round to be completed +type OffchainAggregatorV2RoundConfirmer struct { + ocrInstance OffchainAggregatorV2 + roundID *big.Int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + blocksSinceAnswer uint + complete bool + l zerolog.Logger +} + +// NewOffchainAggregatorRoundConfirmer provides a new instance of a OffchainAggregatorRoundConfirmer +func NewOffchainAggregatorV2RoundConfirmer( + contract OffchainAggregatorV2, + roundID *big.Int, + timeout time.Duration, + logger zerolog.Logger, +) *OffchainAggregatorV2RoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &OffchainAggregatorV2RoundConfirmer{ + ocrInstance: contract, + roundID: roundID, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + complete: false, + l: logger, + } +} + +// ReceiveHeader will query the latest OffchainAggregator round and check to see whether the round has confirmed +func (o *OffchainAggregatorV2RoundConfirmer) ReceiveHeader(_ blockchain.NodeHeader) error { + if channelClosed(o.doneChan) { + return nil + } + + lr, err := o.ocrInstance.GetLatestRound(context.Background()) + if err != nil { + return err + } + o.blocksSinceAnswer++ + currRound := lr.RoundId + logFields := map[string]any{ + "Contract Address": o.ocrInstance.Address(), + "Current Round": currRound.Int64(), + "Waiting for Round": o.roundID.Int64(), + } + if currRound.Cmp(o.roundID) >= 0 { + o.l.Info().Fields(logFields).Msg("OCR round completed") + o.doneChan <- struct{}{} + o.complete = true + } else { + o.l.Debug().Fields(logFields).Msg("Waiting on OCR Round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *OffchainAggregatorV2RoundConfirmer) Wait() error { + defer func() { o.complete = true }() + for { + select { + case <-o.doneChan: + o.cancel() + close(o.doneChan) + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for OCR round to confirm: %d", o.roundID) + } + } +} + +func (o *OffchainAggregatorV2RoundConfirmer) Complete() bool { + return o.complete +} + +// EthereumMockETHPLIFeed represents mocked ETH/PLI feed contract +type EthereumMockETHPLIFeed struct { + client blockchain.EVMClient + feed *mock_ethlink_aggregator_wrapper.MockETHPLIAggregator + address *common.Address +} + +func (v *EthereumMockETHPLIFeed) Address() string { + return v.address.Hex() +} + +func (v *EthereumMockETHPLIFeed) LatestRoundData() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.Ans, nil +} + +func (v *EthereumMockETHPLIFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.UpdatedAt, nil +} + +// EthereumMockGASFeed represents mocked Gas feed contract +type EthereumMockGASFeed struct { + client blockchain.EVMClient + feed *mock_gas_aggregator_wrapper.MockGASAggregator + address *common.Address +} + +func (v *EthereumMockGASFeed) Address() string { + return v.address.Hex() +} + +// EthereumFlags represents flags contract +type EthereumFlags struct { + client blockchain.EVMClient + flags *flags_wrapper.Flags + address *common.Address +} + +func (e *EthereumFlags) Address() string { + return e.address.Hex() +} + +// GetFlag returns boolean if a flag was set for particular address +func (e *EthereumFlags) GetFlag(ctx context.Context, addr string) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + flag, err := e.flags.GetFlag(opts, common.HexToAddress(addr)) + if err != nil { + return false, err + } + return flag, nil +} + +// EthereumOperatorFactory represents operator factory contract +type EthereumOperatorFactory struct { + address *common.Address + client blockchain.EVMClient + operatorFactory *operator_factory.OperatorFactory +} + +func (e *EthereumOperatorFactory) ParseAuthorizedForwarderCreated(eventLog types.Log) (*operator_factory.OperatorFactoryAuthorizedForwarderCreated, error) { + return e.operatorFactory.ParseAuthorizedForwarderCreated(eventLog) +} + +func (e *EthereumOperatorFactory) ParseOperatorCreated(eventLog types.Log) (*operator_factory.OperatorFactoryOperatorCreated, error) { + return e.operatorFactory.ParseOperatorCreated(eventLog) +} + +func (e *EthereumOperatorFactory) Address() string { + return e.address.Hex() +} + +func (e *EthereumOperatorFactory) DeployNewOperatorAndForwarder() (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.operatorFactory.DeployNewOperatorAndForwarder(opts) + if err != nil { + return nil, err + } + return tx, nil +} + +// EthereumOperator represents operator contract +type EthereumOperator struct { + address common.Address + client blockchain.EVMClient + operator *operator_wrapper.Operator + l zerolog.Logger +} + +func (e *EthereumOperator) Address() string { + return e.address.Hex() +} + +func (e *EthereumOperator) AcceptAuthorizedReceivers(forwarders []common.Address, eoa []common.Address) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + e.l.Info(). + Str("ForwardersAddresses", fmt.Sprint(forwarders)). + Str("EoaAddresses", fmt.Sprint(eoa)). + Msg("Accepting Authorized Receivers") + tx, err := e.operator.AcceptAuthorizedReceivers(opts, forwarders, eoa) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +// EthereumAuthorizedForwarder represents authorized forwarder contract +type EthereumAuthorizedForwarder struct { + address common.Address + client blockchain.EVMClient + authorizedForwarder *authorized_forwarder.AuthorizedForwarder +} + +// Owner return authorized forwarder owner address +func (e *EthereumAuthorizedForwarder) Owner(ctx context.Context) (string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + owner, err := e.authorizedForwarder.Owner(opts) + + return owner.Hex(), err +} + +func (e *EthereumAuthorizedForwarder) GetAuthorizedSenders(ctx context.Context) ([]string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + authorizedSenders, err := e.authorizedForwarder.GetAuthorizedSenders(opts) + if err != nil { + return nil, err + } + var sendersAddrs []string + for _, o := range authorizedSenders { + sendersAddrs = append(sendersAddrs, o.Hex()) + } + return sendersAddrs, nil +} + +func (e *EthereumAuthorizedForwarder) Address() string { + return e.address.Hex() +} + +// EthereumMockAggregatorProxy represents mock aggregator proxy contract +type EthereumMockAggregatorProxy struct { + address *common.Address + client blockchain.EVMClient + mockAggregatorProxy *mock_aggregator_proxy.MockAggregatorProxy +} + +func (e *EthereumMockAggregatorProxy) Address() string { + return e.address.Hex() +} + +func (e *EthereumMockAggregatorProxy) UpdateAggregator(aggregator common.Address) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.mockAggregatorProxy.UpdateAggregator(opts, aggregator) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumMockAggregatorProxy) Aggregator() (common.Address, error) { + addr, err := e.mockAggregatorProxy.Aggregator(&bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return common.Address{}, err + } + return addr, nil +} + +func channelClosed(ch <-chan struct{}) bool { + select { + case <-ch: + return true + default: + } + + return false +} + +type EthereumOffchainAggregatorV2 struct { + address *common.Address + client blockchain.EVMClient + contract *ocr2aggregator.OCR2Aggregator + l zerolog.Logger +} + +// OCRv2Config represents the config for the OCRv2 contract +type OCRv2Config struct { + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte +} + +func (e *EthereumOffchainAggregatorV2) Address() string { + return e.address.Hex() +} + +func (e *EthereumOffchainAggregatorV2) Fund(nativeAmount *big.Float) error { + gasEstimates, err := e.client.EstimateGas(ethereum.CallMsg{ + To: e.address, + }) + if err != nil { + return err + } + return e.client.Fund(e.address.Hex(), nativeAmount, gasEstimates) +} + +func (e *EthereumOffchainAggregatorV2) RequestNewRound() error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.contract.RequestNewRound(opts) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumOffchainAggregatorV2) GetLatestAnswer(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + return e.contract.LatestAnswer(opts) +} + +func (e *EthereumOffchainAggregatorV2) GetLatestRound(ctx context.Context) (*RoundData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + data, err := e.contract.LatestRoundData(opts) + if err != nil { + return nil, err + } + return &RoundData{ + RoundId: data.RoundId, + StartedAt: data.StartedAt, + UpdatedAt: data.UpdatedAt, + AnsweredInRound: data.AnsweredInRound, + Answer: data.Answer, + }, nil +} + +func (e *EthereumOffchainAggregatorV2) GetRound(ctx context.Context, roundID *big.Int) (*RoundData, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + data, err := e.contract.GetRoundData(opts, roundID) + if err != nil { + return nil, err + } + return &RoundData{ + RoundId: data.RoundId, + StartedAt: data.StartedAt, + UpdatedAt: data.UpdatedAt, + AnsweredInRound: data.AnsweredInRound, + Answer: data.Answer, + }, nil +} + +func (e *EthereumOffchainAggregatorV2) SetPayees(transmitters, payees []string) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + e.l.Info(). + Str("Transmitters", fmt.Sprintf("%v", transmitters)). + Str("Payees", fmt.Sprintf("%v", payees)). + Str("OCRv2 Address", e.Address()). + Msg("Setting OCRv2 Payees") + + var addTransmitters, addrPayees []common.Address + for _, t := range transmitters { + addTransmitters = append(addTransmitters, common.HexToAddress(t)) + } + for _, p := range payees { + addrPayees = append(addrPayees, common.HexToAddress(p)) + } + + tx, err := e.contract.SetPayees(opts, addTransmitters, addrPayees) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumOffchainAggregatorV2) SetConfig(ocrConfig *OCRv2Config) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + e.l.Info(). + Str("Address", e.Address()). + Interface("Signers", ocrConfig.Signers). + Interface("Transmitters", ocrConfig.Transmitters). + Uint8("F", ocrConfig.F). + Bytes("OnchainConfig", ocrConfig.OnchainConfig). + Uint64("OffchainConfigVersion", ocrConfig.OffchainConfigVersion). + Bytes("OffchainConfig", ocrConfig.OffchainConfig). + Msg("Setting OCRv2 Config") + tx, err := e.contract.SetConfig( + opts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + ) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumOffchainAggregatorV2) GetConfig(ctx context.Context) ([32]byte, uint32, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + details, err := e.contract.LatestConfigDetails(opts) + if err != nil { + return [32]byte{}, 0, err + } + return details.ConfigDigest, details.BlockNumber, err +} + +func (e *EthereumOffchainAggregatorV2) ParseEventAnswerUpdated(log types.Log) (*ocr2aggregator.OCR2AggregatorAnswerUpdated, error) { + return e.contract.ParseAnswerUpdated(log) +} + +// EthereumKeeperRegistryCheckUpkeepGasUsageWrapper represents a gas wrapper for keeper registry +type EthereumKeeperRegistryCheckUpkeepGasUsageWrapper struct { + address *common.Address + client blockchain.EVMClient + gasUsageWrapper *gas_wrapper.KeeperRegistryCheckUpkeepGasUsageWrapper +} + +func (e *EthereumKeeperRegistryCheckUpkeepGasUsageWrapper) Address() string { + return e.address.Hex() +} + +/* Functions 1_0_0 */ + +type EthereumFunctionsRouter struct { + address common.Address + client blockchain.EVMClient + instance *functions_router.FunctionsRouter + l zerolog.Logger +} + +func (e *EthereumFunctionsRouter) Address() string { + return e.address.Hex() +} + +func (e *EthereumFunctionsRouter) CreateSubscriptionWithConsumer(consumer string) (uint64, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return 0, err + } + tx, err := e.instance.CreateSubscriptionWithConsumer(opts, common.HexToAddress(consumer)) + if err != nil { + return 0, err + } + if err := e.client.ProcessTransaction(tx); err != nil { + return 0, err + } + r, err := e.client.GetTxReceipt(tx.Hash()) + if err != nil { + return 0, err + } + for _, l := range r.Logs { + e.l.Info().Interface("Log", common.Bytes2Hex(l.Data)).Send() + } + topicsMap := map[string]interface{}{} + + fabi, err := abi.JSON(strings.NewReader(functions_router.FunctionsRouterABI)) + if err != nil { + return 0, err + } + for _, ev := range fabi.Events { + e.l.Info().Str("EventName", ev.Name).Send() + } + topicOneInputs := abi.Arguments{fabi.Events["SubscriptionCreated"].Inputs[0]} + topicOneHash := []common.Hash{r.Logs[0].Topics[1:][0]} + if err := abi.ParseTopicsIntoMap(topicsMap, topicOneInputs, topicOneHash); err != nil { + return 0, fmt.Errorf("failed to decode topic value, err: %w", err) + } + e.l.Info().Interface("NewTopicsDecoded", topicsMap).Send() + if topicsMap["subscriptionId"] == 0 { + return 0, fmt.Errorf("failed to decode subscription ID after creation") + } + return topicsMap["subscriptionId"].(uint64), nil +} + +type EthereumFunctionsCoordinator struct { + address common.Address + client blockchain.EVMClient + instance *functions_coordinator.FunctionsCoordinator +} + +func (e *EthereumFunctionsCoordinator) GetThresholdPublicKey() ([]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: context.Background(), + } + return e.instance.GetThresholdPublicKey(opts) +} + +func (e *EthereumFunctionsCoordinator) GetDONPublicKey() ([]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: context.Background(), + } + return e.instance.GetDONPublicKey(opts) +} + +func (e *EthereumFunctionsCoordinator) Address() string { + return e.address.Hex() +} + +type EthereumFunctionsLoadTestClient struct { + address common.Address + client blockchain.EVMClient + instance *functions_load_test_client.FunctionsLoadTestClient +} + +func (e *EthereumFunctionsLoadTestClient) Address() string { + return e.address.Hex() +} + +type EthereumFunctionsLoadStats struct { + LastRequestID string + LastResponse string + LastError string + Total uint32 + Succeeded uint32 + Errored uint32 + Empty uint32 +} + +func Bytes32ToSlice(a [32]byte) (r []byte) { + r = append(r, a[:]...) + return +} + +func (e *EthereumFunctionsLoadTestClient) GetStats() (*EthereumFunctionsLoadStats, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: context.Background(), + } + lr, lbody, lerr, total, succeeded, errored, empty, err := e.instance.GetStats(opts) + if err != nil { + return nil, err + } + return &EthereumFunctionsLoadStats{ + LastRequestID: string(Bytes32ToSlice(lr)), + LastResponse: string(lbody), + LastError: string(lerr), + Total: total, + Succeeded: succeeded, + Errored: errored, + Empty: empty, + }, nil +} + +func (e *EthereumFunctionsLoadTestClient) ResetStats() error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.instance.ResetStats(opts) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumFunctionsLoadTestClient) SendRequest(times uint32, source string, encryptedSecretsReferences []byte, args []string, subscriptionId uint64, jobId [32]byte) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.instance.SendRequest(opts, times, source, encryptedSecretsReferences, args, subscriptionId, jobId) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumFunctionsLoadTestClient) SendRequestWithDONHostedSecrets(times uint32, source string, slotID uint8, slotVersion uint64, args []string, subscriptionId uint64, donID [32]byte) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.instance.SendRequestWithDONHostedSecrets(opts, times, source, slotID, slotVersion, args, subscriptionId, donID) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +type EthereumMercuryVerifier struct { + address common.Address + client blockchain.EVMClient + instance *verifier.Verifier + l zerolog.Logger +} + +func (e *EthereumMercuryVerifier) Address() common.Address { + return e.address +} + +func (e *EthereumMercuryVerifier) Verify(signedReport []byte, sender common.Address) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := e.instance.Verify(opts, signedReport, sender) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumMercuryVerifier) SetConfig(feedId [32]byte, signers []common.Address, offchainTransmitters [][32]byte, f uint8, onchainConfig []byte, offchainConfigVersion uint64, offchainConfig []byte, recipientAddressesAndWeights []verifier.CommonAddressAndWeight) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.SetConfig(opts, feedId, signers, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, recipientAddressesAndWeights) + e.l.Info().Err(err).Str("contractAddress", e.address.Hex()).Hex("feedId", feedId[:]).Msg("Called EthereumMercuryVerifier.SetConfig()") + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *EthereumMercuryVerifier) LatestConfigDetails(ctx context.Context, feedId [32]byte) (verifier.LatestConfigDetails, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + d, err := e.instance.LatestConfigDetails(opts, feedId) + e.l.Info().Err(err).Str("contractAddress", e.address.Hex()).Hex("feedId", feedId[:]). + Interface("details", d). + Msg("Called EthereumMercuryVerifier.LatestConfigDetails()") + if err != nil { + return verifier.LatestConfigDetails{}, err + } + return d, nil +} + +type EthereumMercuryVerifierProxy struct { + address common.Address + client blockchain.EVMClient + instance *verifier_proxy.VerifierProxy + l zerolog.Logger +} + +func (e *EthereumMercuryVerifierProxy) Address() common.Address { + return e.address +} + +func (e *EthereumMercuryVerifierProxy) InitializeVerifier(verifierAddress common.Address) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.InitializeVerifier(opts, verifierAddress) + e.l.Info().Err(err).Str("contractAddress", e.address.Hex()).Str("verifierAddress", verifierAddress.Hex()). + Msg("Called EthereumMercuryVerifierProxy.InitializeVerifier()") + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *EthereumMercuryVerifierProxy) Verify(signedReport []byte, parameterPayload []byte, value *big.Int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if value != nil { + opts.Value = value + } + if err != nil { + return nil, err + } + tx, err := e.instance.Verify(opts, signedReport, parameterPayload) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *EthereumMercuryVerifierProxy) VerifyBulk(signedReports [][]byte, parameterPayload []byte, value *big.Int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if value != nil { + opts.Value = value + } + if err != nil { + return nil, err + } + tx, err := e.instance.VerifyBulk(opts, signedReports, parameterPayload) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *EthereumMercuryVerifierProxy) SetFeeManager(feeManager common.Address) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.SetFeeManager(opts, feeManager) + e.l.Info().Err(err).Str("feeManager", feeManager.Hex()).Msg("Called MercuryVerifierProxy.SetFeeManager()") + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +type EthereumMercuryFeeManager struct { + address common.Address + client blockchain.EVMClient + instance *fee_manager.FeeManager + l zerolog.Logger +} + +func (e *EthereumMercuryFeeManager) Address() common.Address { + return e.address +} + +func (e *EthereumMercuryFeeManager) UpdateSubscriberDiscount(subscriber common.Address, feedId [32]byte, token common.Address, discount uint64) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.UpdateSubscriberDiscount(opts, subscriber, feedId, token, discount) + e.l.Info().Err(err).Msg("Called EthereumMercuryFeeManager.UpdateSubscriberDiscount()") + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +type EthereumMercuryRewardManager struct { + address common.Address + client blockchain.EVMClient + instance *reward_manager.RewardManager + l zerolog.Logger +} + +func (e *EthereumMercuryRewardManager) Address() common.Address { + return e.address +} + +func (e *EthereumMercuryRewardManager) SetFeeManager(feeManager common.Address) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.SetFeeManager(opts, feeManager) + e.l.Info().Err(err).Str("feeManager", feeManager.Hex()).Msg("Called EthereumMercuryRewardManager.SetFeeManager()") + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +type EthereumWERC20Mock struct { + address common.Address + client blockchain.EVMClient + instance *werc20_mock.WERC20Mock + l zerolog.Logger +} + +func (e *EthereumWERC20Mock) Address() common.Address { + return e.address +} + +func (e *EthereumWERC20Mock) Approve(to string, amount *big.Int) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + e.l.Info(). + Str("From", e.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Msg("Approving PLI Transfer") + tx, err := e.instance.Approve(opts, common.HexToAddress(to), amount) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumWERC20Mock) BalanceOf(ctx context.Context, addr string) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(e.client.GetDefaultWallet().Address()), + Context: ctx, + } + balance, err := e.instance.BalanceOf(opts, common.HexToAddress(addr)) + if err != nil { + return nil, err + } + return balance, nil +} + +func (e *EthereumWERC20Mock) Transfer(to string, amount *big.Int) error { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return err + } + e.l.Info(). + Str("From", e.client.GetDefaultWallet().Address()). + Str("To", to). + Str("Amount", amount.String()). + Uint64("Nonce", opts.Nonce.Uint64()). + Msg("EthereumWERC20Mock.Transfer()") + tx, err := e.instance.Transfer(opts, common.HexToAddress(to), amount) + if err != nil { + return err + } + return e.client.ProcessTransaction(tx) +} + +func (e *EthereumWERC20Mock) Mint(account common.Address, amount *big.Int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + e.l.Info(). + Str("account", account.Hex()). + Str("amount", amount.String()). + Msg("EthereumWERC20Mock.Mint()") + tx, err := e.instance.Mint(opts, account, amount) + if err != nil { + return tx, err + } + return tx, e.client.ProcessTransaction(tx) +} diff --git a/integration-tests/contracts/ethereum_contracts_local.go b/integration-tests/contracts/ethereum_contracts_local.go new file mode 100644 index 00000000..246b49c6 --- /dev/null +++ b/integration-tests/contracts/ethereum_contracts_local.go @@ -0,0 +1,98 @@ +package contracts + +import ( + "encoding/hex" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog/log" + ocrConfigHelper "github.com/goplugin/libocr/offchainreporting/confighelper" + ocrTypes "github.com/goplugin/libocr/offchainreporting/types" + + "github.com/goplugin/pluginv3.0/integration-tests/client" +) + +// SetConfigLocal sets the payees and the offchain reporting protocol configuration +func (o *EthereumOffchainAggregator) SetConfigLocal( + pluginNodes []*client.PluginClient, + ocrConfig OffChainAggregatorConfig, + transmitters []common.Address, +) error { + // Gather necessary addresses and keys from our plugin nodes to properly configure the OCR contract + log.Info().Str("Contract Address", o.address.Hex()).Msg("Configuring OCR Contract") + for i, node := range pluginNodes { + ocrKeys, err := node.MustReadOCRKeys() + if err != nil { + return err + } + if len(ocrKeys.Data) == 0 { + return fmt.Errorf("no OCR keys found for node %v", node) + } + primaryOCRKey := ocrKeys.Data[0] + if err != nil { + return err + } + p2pKeys, err := node.MustReadP2PKeys() + if err != nil { + return err + } + primaryP2PKey := p2pKeys.Data[0] + + // Need to convert the key representations + var onChainSigningAddress [20]byte + var configPublicKey [32]byte + offchainSigningAddress, err := hex.DecodeString(primaryOCRKey.Attributes.OffChainPublicKey) + if err != nil { + return err + } + decodeConfigKey, err := hex.DecodeString(primaryOCRKey.Attributes.ConfigPublicKey) + if err != nil { + return err + } + + // https://stackoverflow.com/questions/8032170/how-to-assign-string-to-bytes-array + copy(onChainSigningAddress[:], common.HexToAddress(primaryOCRKey.Attributes.OnChainSigningAddress).Bytes()) + copy(configPublicKey[:], decodeConfigKey) + + oracleIdentity := ocrConfigHelper.OracleIdentity{ + TransmitAddress: transmitters[i], + OnChainSigningAddress: onChainSigningAddress, + PeerID: primaryP2PKey.Attributes.PeerID, + OffchainPublicKey: offchainSigningAddress, + } + oracleIdentityExtra := ocrConfigHelper.OracleIdentityExtra{ + OracleIdentity: oracleIdentity, + SharedSecretEncryptionPublicKey: ocrTypes.SharedSecretEncryptionPublicKey(configPublicKey), + } + + ocrConfig.OracleIdentities = append(ocrConfig.OracleIdentities, oracleIdentityExtra) + } + + signers, transmitters, threshold, encodedConfigVersion, encodedConfig, err := ocrConfigHelper.ContractSetConfigArgs( + ocrConfig.DeltaProgress, + ocrConfig.DeltaResend, + ocrConfig.DeltaRound, + ocrConfig.DeltaGrace, + ocrConfig.DeltaC, + ocrConfig.AlphaPPB, + ocrConfig.DeltaStage, + ocrConfig.RMax, + ocrConfig.S, + ocrConfig.OracleIdentities, + ocrConfig.F, + ) + if err != nil { + return err + } + + // Set Config + opts, err := o.client.TransactionOpts(o.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := o.ocr.SetConfig(opts, signers, transmitters, threshold, encodedConfigVersion, encodedConfig) + if err != nil { + return err + } + return o.client.ProcessTransaction(tx) +} diff --git a/integration-tests/contracts/ethereum_keeper_contracts.go b/integration-tests/contracts/ethereum_keeper_contracts.go new file mode 100644 index 00000000..408b59cb --- /dev/null +++ b/integration-tests/contracts/ethereum_keeper_contracts.go @@ -0,0 +1,2130 @@ +package contracts + +import ( + "context" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + goabi "github.com/umbracle/ethgo/abi" + + "github.com/goplugin/plugin-testing-framework/blockchain" + + cltypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_consumer_benchmark" + registrar21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_registrar_wrapper2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registrar_wrapper2_0" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + registry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_triggered_streams_lookup_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/perform_data_checker_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/streams_lookup_upkeep_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_perform_counter_restrictive_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_transcoder" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_consumer_performance_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_consumer_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/upkeep_counter_wrapper" +) + +var utilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI) +var registrarABI = cltypes.MustGetABI(registrar21.AutomationRegistrarABI) + +type KeeperRegistrar interface { + Address() string + + EncodeRegisterRequest(name string, email []byte, upkeepAddr string, gasLimit uint32, adminAddr string, checkData []byte, amount *big.Int, source uint8, senderAddr string, isLogTrigger bool, isMercury bool) ([]byte, error) + + Fund(ethAmount *big.Float) error +} + +type UpkeepTranscoder interface { + Address() string +} + +type KeeperRegistry interface { + Address() string + Fund(ethAmount *big.Float) error + SetConfig(config KeeperRegistrySettings, ocrConfig OCRv2Config) error + SetRegistrar(registrarAddr string) error + AddUpkeepFunds(id *big.Int, amount *big.Int) error + GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) + GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) + SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error + GetKeeperList(ctx context.Context) ([]string, error) + RegisterUpkeep(target string, gasLimit uint32, admin string, checkData []byte) error + CancelUpkeep(id *big.Int) error + SetUpkeepGasLimit(id *big.Int, gas uint32) error + ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) + ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) + ParseUpkeepIdFromRegisteredLog(log *types.Log) (*big.Int, error) + Pause() error + Migrate(upkeepIDs []*big.Int, destinationAddress common.Address) error + SetMigrationPermissions(peerAddress common.Address, permission uint8) error + PauseUpkeep(id *big.Int) error + UnpauseUpkeep(id *big.Int) error + UpdateCheckData(id *big.Int, newCheckData []byte) error + SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error + SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error + RegistryOwnerAddress() common.Address +} + +type KeeperConsumer interface { + Address() string + Counter(ctx context.Context) (*big.Int, error) + Start() error +} + +type UpkeepCounter interface { + Address() string + Fund(ethAmount *big.Float) error + Counter(ctx context.Context) (*big.Int, error) + SetSpread(testRange *big.Int, interval *big.Int) error +} + +type UpkeepPerformCounterRestrictive interface { + Address() string + Fund(ethAmount *big.Float) error + Counter(ctx context.Context) (*big.Int, error) + SetSpread(testRange *big.Int, interval *big.Int) error +} + +// KeeperConsumerPerformance is a keeper consumer contract that is more complicated than the typical consumer, +// it's intended to only be used for performance tests. +type KeeperConsumerPerformance interface { + Address() string + Fund(ethAmount *big.Float) error + CheckEligible(ctx context.Context) (bool, error) + GetUpkeepCount(ctx context.Context) (*big.Int, error) + SetCheckGasToBurn(ctx context.Context, gas *big.Int) error + SetPerformGasToBurn(ctx context.Context, gas *big.Int) error +} + +// AutomationConsumerBenchmark is a keeper consumer contract that is more complicated than the typical consumer, +// it's intended to only be used for benchmark tests. +type AutomationConsumerBenchmark interface { + Address() string + Fund(ethAmount *big.Float) error + CheckEligible(ctx context.Context, id *big.Int, _range *big.Int, firstEligibleBuffer *big.Int) (bool, error) + GetUpkeepCount(ctx context.Context, id *big.Int) (*big.Int, error) +} + +type KeeperPerformDataChecker interface { + Address() string + Counter(ctx context.Context) (*big.Int, error) + SetExpectedData(ctx context.Context, expectedData []byte) error +} + +type UpkeepPerformedLog struct { + Id *big.Int + Success bool + From common.Address +} + +type StaleUpkeepReportLog struct { + Id *big.Int +} + +// KeeperRegistryOpts opts to deploy keeper registry version +type KeeperRegistryOpts struct { + RegistryVersion ethereum.KeeperRegistryVersion + LinkAddr string + ETHFeedAddr string + GasFeedAddr string + TranscoderAddr string + RegistrarAddr string + Settings KeeperRegistrySettings +} + +// KeeperRegistrySettings represents the settings to fine tune keeper registry +type KeeperRegistrySettings struct { + PaymentPremiumPPB uint32 // payment premium rate oracles receive on top of being reimbursed for gas, measured in parts per billion + FlatFeeMicroPLI uint32 // flat fee charged for each upkeep + BlockCountPerTurn *big.Int // number of blocks each oracle has during their turn to perform upkeep before it will be the next keeper's turn to submit + CheckGasLimit uint32 // gas limit when checking for upkeep + StalenessSeconds *big.Int // number of seconds that is allowed for feed data to be stale before switching to the fallback pricing + GasCeilingMultiplier uint16 // multiplier to apply to the fast gas feed price when calculating the payment ceiling for keepers + MinUpkeepSpend *big.Int // minimum spend required by an upkeep before they can withdraw funds + MaxPerformGas uint32 // max gas allowed for an upkeep within perform + FallbackGasPrice *big.Int // gas price used if the gas price feed is stale + FallbackLinkPrice *big.Int // PLI price used if the PLI price feed is stale + MaxCheckDataSize uint32 + MaxPerformDataSize uint32 + RegistryVersion ethereum.KeeperRegistryVersion +} + +// KeeperRegistrarSettings represents settings for registrar contract +type KeeperRegistrarSettings struct { + AutoApproveConfigType uint8 + AutoApproveMaxAllowed uint16 + RegistryAddr string + MinLinkJuels *big.Int +} + +// KeeperInfo keeper status and balance info +type KeeperInfo struct { + Payee string + Active bool + Balance *big.Int +} + +// UpkeepInfo keeper target info +type UpkeepInfo struct { + Target string + ExecuteGas uint32 + CheckData []byte + Balance *big.Int + LastKeeper string + Admin string + MaxValidBlocknumber uint64 + LastPerformBlockNumber uint32 + AmountSpent *big.Int + Paused bool + OffchainConfig []byte +} + +// EthereumKeeperRegistry represents keeper registry contract +type EthereumKeeperRegistry struct { + client blockchain.EVMClient + version ethereum.KeeperRegistryVersion + registry1_1 *keeper_registry_wrapper1_1.KeeperRegistry + registry1_2 *keeper_registry_wrapper1_2.KeeperRegistry + registry1_3 *keeper_registry_wrapper1_3.KeeperRegistry + registry2_0 *keeper_registry_wrapper2_0.KeeperRegistry + registry2_1 *i_keeper_registry_master_wrapper_2_1.IKeeperRegistryMaster + address *common.Address + l zerolog.Logger +} + +func (v *EthereumKeeperRegistry) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperRegistry) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +func (rcs *KeeperRegistrySettings) EncodeOnChainConfig(registrar string, registryOwnerAddress common.Address) ([]byte, error) { + if rcs.RegistryVersion == ethereum.RegistryVersion_2_1 { + onchainConfigStruct := registry21.KeeperRegistryBase21OnchainConfig{ + PaymentPremiumPPB: rcs.PaymentPremiumPPB, + FlatFeeMicroLink: rcs.FlatFeeMicroPLI, + CheckGasLimit: rcs.CheckGasLimit, + StalenessSeconds: rcs.StalenessSeconds, + GasCeilingMultiplier: rcs.GasCeilingMultiplier, + MinUpkeepSpend: rcs.MinUpkeepSpend, + MaxPerformGas: rcs.MaxPerformGas, + MaxCheckDataSize: rcs.MaxCheckDataSize, + MaxPerformDataSize: rcs.MaxPerformDataSize, + MaxRevertDataSize: uint32(1000), + FallbackGasPrice: rcs.FallbackGasPrice, + FallbackLinkPrice: rcs.FallbackLinkPrice, + Transcoder: common.Address{}, + Registrars: []common.Address{common.HexToAddress(registrar)}, + UpkeepPrivilegeManager: registryOwnerAddress, + } + + encodedOnchainConfig, err := utilsABI.Methods["_onChainConfig"].Inputs.Pack(&onchainConfigStruct) + + return encodedOnchainConfig, err + } + configType := goabi.MustNewType("tuple(uint32 paymentPremiumPPB,uint32 flatFeeMicroLink,uint32 checkGasLimit,uint24 stalenessSeconds,uint16 gasCeilingMultiplier,uint96 minUpkeepSpend,uint32 maxPerformGas,uint32 maxCheckDataSize,uint32 maxPerformDataSize,uint256 fallbackGasPrice,uint256 fallbackLinkPrice,address transcoder,address registrar)") + onchainConfig, err := goabi.Encode(map[string]interface{}{ + "paymentPremiumPPB": rcs.PaymentPremiumPPB, + "flatFeeMicroLink": rcs.FlatFeeMicroPLI, + "checkGasLimit": rcs.CheckGasLimit, + "stalenessSeconds": rcs.StalenessSeconds, + "gasCeilingMultiplier": rcs.GasCeilingMultiplier, + "minUpkeepSpend": rcs.MinUpkeepSpend, + "maxPerformGas": rcs.MaxPerformGas, + "maxCheckDataSize": rcs.MaxCheckDataSize, + "maxPerformDataSize": rcs.MaxPerformDataSize, + "fallbackGasPrice": rcs.FallbackGasPrice, + "fallbackLinkPrice": rcs.FallbackLinkPrice, + "transcoder": common.Address{}, + "registrar": registrar, + }, configType) + return onchainConfig, err + +} + +func (v *EthereumKeeperRegistry) RegistryOwnerAddress() common.Address { + callOpts := &bind.CallOpts{ + Pending: false, + } + + //nolint: exhaustive + switch v.version { + case ethereum.RegistryVersion_2_1: + ownerAddress, _ := v.registry2_1.Owner(callOpts) + return ownerAddress + case ethereum.RegistryVersion_2_0: + ownerAddress, _ := v.registry2_0.Owner(callOpts) + return ownerAddress + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1, ethereum.RegistryVersion_1_2, ethereum.RegistryVersion_1_3: + return common.HexToAddress(v.client.GetDefaultWallet().Address()) + } + + return common.HexToAddress(v.client.GetDefaultWallet().Address()) +} + +func (v *EthereumKeeperRegistry) SetConfig(config KeeperRegistrySettings, ocrConfig OCRv2Config) error { + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + callOpts := bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: nil, + } + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err := v.registry1_1.SetConfig( + txOpts, + config.PaymentPremiumPPB, + config.FlatFeeMicroPLI, + config.BlockCountPerTurn, + config.CheckGasLimit, + config.StalenessSeconds, + config.GasCeilingMultiplier, + config.FallbackGasPrice, + config.FallbackLinkPrice, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(&callOpts) + if err != nil { + return err + } + + tx, err := v.registry1_2.SetConfig(txOpts, keeper_registry_wrapper1_2.Config{ + PaymentPremiumPPB: config.PaymentPremiumPPB, + FlatFeeMicroLink: config.FlatFeeMicroPLI, + BlockCountPerTurn: config.BlockCountPerTurn, + CheckGasLimit: config.CheckGasLimit, + StalenessSeconds: config.StalenessSeconds, + GasCeilingMultiplier: config.GasCeilingMultiplier, + MinUpkeepSpend: config.MinUpkeepSpend, + MaxPerformGas: config.MaxPerformGas, + FallbackGasPrice: config.FallbackGasPrice, + FallbackLinkPrice: config.FallbackLinkPrice, + // Keep the transcoder and registrar same. They have separate setters + Transcoder: state.Config.Transcoder, + Registrar: state.Config.Registrar, + }) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(&callOpts) + if err != nil { + return err + } + + tx, err := v.registry1_3.SetConfig(txOpts, keeper_registry_wrapper1_3.Config{ + PaymentPremiumPPB: config.PaymentPremiumPPB, + FlatFeeMicroLink: config.FlatFeeMicroPLI, + BlockCountPerTurn: config.BlockCountPerTurn, + CheckGasLimit: config.CheckGasLimit, + StalenessSeconds: config.StalenessSeconds, + GasCeilingMultiplier: config.GasCeilingMultiplier, + MinUpkeepSpend: config.MinUpkeepSpend, + MaxPerformGas: config.MaxPerformGas, + FallbackGasPrice: config.FallbackGasPrice, + FallbackLinkPrice: config.FallbackLinkPrice, + // Keep the transcoder and registrar same. They have separate setters + Transcoder: state.Config.Transcoder, + Registrar: state.Config.Registrar, + }) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_0: + tx, err := v.registry2_0.SetConfig(txOpts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + tx, err := v.registry2_1.SetConfig(txOpts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + } + + return fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// Pause pauses the registry. +func (v *EthereumKeeperRegistry) Pause() error { + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.registry1_1.Pause(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.Pause(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.Pause(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.Pause(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + tx, err = v.registry2_1.Pause(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + } + + return fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// Migrate performs a migration of the given upkeep ids to the specific destination passed as parameter. +func (v *EthereumKeeperRegistry) Migrate(upkeepIDs []*big.Int, destinationAddress common.Address) error { + if v.version != ethereum.RegistryVersion_1_2 { + return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") + } + + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry1_2.MigrateUpkeeps(txOpts, upkeepIDs, destinationAddress) + if err != nil { + return err + } + + return v.client.ProcessTransaction(tx) +} + +// SetMigrationPermissions sets the permissions of another registry to allow migrations between the two. +func (v *EthereumKeeperRegistry) SetMigrationPermissions(peerAddress common.Address, permission uint8) error { + if v.version != ethereum.RegistryVersion_1_2 { + return fmt.Errorf("migration of upkeeps is only available for version 1.2 of the registries") + } + + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry1_2.SetPeerRegistryMigrationPermission(txOpts, peerAddress, permission) + if err != nil { + return err + } + + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumKeeperRegistry) SetRegistrar(registrarAddr string) error { + if v.version == ethereum.RegistryVersion_2_0 { + // we short circuit and exit, so we don't create a new txs messing up the nonce before exiting + return fmt.Errorf("please use set config") + } + + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + callOpts := bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: nil, + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err := v.registry1_1.SetRegistrar(txOpts, common.HexToAddress(registrarAddr)) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(&callOpts) + if err != nil { + return err + } + newConfig := state.Config + newConfig.Registrar = common.HexToAddress(registrarAddr) + tx, err := v.registry1_2.SetConfig(txOpts, newConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(&callOpts) + if err != nil { + return err + } + newConfig := state.Config + newConfig.Registrar = common.HexToAddress(registrarAddr) + tx, err := v.registry1_3.SetConfig(txOpts, newConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("keeper registry version %d is not supported", v.version) + } +} + +// AddUpkeepFunds adds link for particular upkeep id +func (v *EthereumKeeperRegistry) AddUpkeepFunds(id *big.Int, amount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.registry1_1.AddFunds(opts, id, amount) + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.AddFunds(opts, id, amount) + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.AddFunds(opts, id, amount) + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.AddFunds(opts, id, amount) + case ethereum.RegistryVersion_2_1: + tx, err = v.registry2_1.AddFunds(opts, id, amount) + } + + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// GetUpkeepInfo gets upkeep info +func (v *EthereumKeeperRegistry) GetUpkeepInfo(ctx context.Context, id *big.Int) (*UpkeepInfo, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + uk, err := v.registry1_1.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_1_2: + uk, err := v.registry1_2.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_1_3: + uk, err := v.registry1_3.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + LastKeeper: uk.LastKeeper.Hex(), + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + }, nil + case ethereum.RegistryVersion_2_0: + uk, err := v.registry2_0.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.ExecuteGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + LastPerformBlockNumber: uk.LastPerformBlockNumber, + AmountSpent: uk.AmountSpent, + Paused: uk.Paused, + OffchainConfig: uk.OffchainConfig, + }, nil + case ethereum.RegistryVersion_2_1: + uk, err := v.registry2_1.GetUpkeep(opts, id) + if err != nil { + return nil, err + } + return &UpkeepInfo{ + Target: uk.Target.Hex(), + ExecuteGas: uk.PerformGas, + CheckData: uk.CheckData, + Balance: uk.Balance, + Admin: uk.Admin.Hex(), + MaxValidBlocknumber: uk.MaxValidBlocknumber, + LastPerformBlockNumber: uk.LastPerformedBlockNumber, + AmountSpent: uk.AmountSpent, + Paused: uk.Paused, + OffchainConfig: uk.OffchainConfig, + }, nil + } + + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +func (v *EthereumKeeperRegistry) GetKeeperInfo(ctx context.Context, keeperAddr string) (*KeeperInfo, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + var info struct { + Payee common.Address + Active bool + Balance *big.Int + } + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + info, err = v.registry1_1.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_1_2: + info, err = v.registry1_2.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_1_3: + info, err = v.registry1_3.GetKeeperInfo(opts, common.HexToAddress(keeperAddr)) + case ethereum.RegistryVersion_2_0, ethereum.RegistryVersion_2_1: + // this is not used anywhere + return nil, fmt.Errorf("not supported") + } + + if err != nil { + return nil, err + } + return &KeeperInfo{ + Payee: info.Payee.Hex(), + Active: info.Active, + Balance: info.Balance, + }, nil +} + +func (v *EthereumKeeperRegistry) SetKeepers(keepers []string, payees []string, ocrConfig OCRv2Config) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + keepersAddresses := make([]common.Address, 0) + for _, k := range keepers { + keepersAddresses = append(keepersAddresses, common.HexToAddress(k)) + } + payeesAddresses := make([]common.Address, 0) + for _, p := range payees { + payeesAddresses = append(payeesAddresses, common.HexToAddress(p)) + } + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.registry1_1.SetKeepers(opts, keepersAddresses, payeesAddresses) + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.SetKeepers(opts, keepersAddresses, payeesAddresses) + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.SetKeepers(opts, keepersAddresses, payeesAddresses) + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.SetConfig(opts, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + ) + case ethereum.RegistryVersion_2_1: + return fmt.Errorf("not supported") + } + + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// RegisterUpkeep registers contract to perform upkeep +func (v *EthereumKeeperRegistry) RegisterUpkeep(target string, gasLimit uint32, admin string, checkData []byte) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.registry1_1.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + ) + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + ) + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + ) + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.RegisterUpkeep( + opts, + common.HexToAddress(target), + gasLimit, + common.HexToAddress(admin), + checkData, + nil, //offchain config + ) + case ethereum.RegistryVersion_2_1: + return fmt.Errorf("not supported") + } + + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// CancelUpkeep cancels the given upkeep ID +func (v *EthereumKeeperRegistry) CancelUpkeep(id *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + tx, err = v.registry1_1.CancelUpkeep(opts, id) + if err != nil { + return err + } + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.CancelUpkeep(opts, id) + if err != nil { + return err + } + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.CancelUpkeep(opts, id) + if err != nil { + return err + } + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.CancelUpkeep(opts, id) + if err != nil { + return err + } + case ethereum.RegistryVersion_2_1: + tx, err = v.registry2_1.CancelUpkeep(opts, id) + if err != nil { + return err + } + } + + v.l.Info(). + Str("Upkeep ID", strconv.FormatInt(id.Int64(), 10)). + Str("From", v.client.GetDefaultWallet().Address()). + Str("TX Hash", tx.Hash().String()). + Msg("Cancel Upkeep tx") + return v.client.ProcessTransaction(tx) +} + +// SetUpkeepGasLimit sets the perform gas limit for a given upkeep ID +func (v *EthereumKeeperRegistry) SetUpkeepGasLimit(id *big.Int, gas uint32) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + var tx *types.Transaction + + switch v.version { + case ethereum.RegistryVersion_1_2: + tx, err = v.registry1_2.SetUpkeepGasLimit(opts, id, gas) + if err != nil { + return err + } + case ethereum.RegistryVersion_1_3: + tx, err = v.registry1_3.SetUpkeepGasLimit(opts, id, gas) + if err != nil { + return err + } + case ethereum.RegistryVersion_2_0: + tx, err = v.registry2_0.SetUpkeepGasLimit(opts, id, gas) + if err != nil { + return err + } + case ethereum.RegistryVersion_2_1: + tx, err = v.registry2_1.SetUpkeepGasLimit(opts, id, gas) + if err != nil { + return err + } + default: + return fmt.Errorf("keeper registry version %d is not supported for SetUpkeepGasLimit", v.version) + } + return v.client.ProcessTransaction(tx) +} + +// GetKeeperList get list of all registered keeper addresses +func (v *EthereumKeeperRegistry) GetKeeperList(ctx context.Context) ([]string, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + var list []common.Address + var err error + + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + list, err = v.registry1_1.GetKeeperList(opts) + case ethereum.RegistryVersion_1_2: + state, err := v.registry1_2.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Keepers + case ethereum.RegistryVersion_1_3: + state, err := v.registry1_3.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Keepers + case ethereum.RegistryVersion_2_0: + state, err := v.registry2_0.GetState(opts) + if err != nil { + return []string{}, err + } + list = state.Transmitters + case ethereum.RegistryVersion_2_1: + return nil, fmt.Errorf("not supported") + } + + if err != nil { + return []string{}, err + } + addrs := make([]string, 0) + for _, ca := range list { + addrs = append(addrs, ca.Hex()) + } + return addrs, nil +} + +// UpdateCheckData updates the check data of an upkeep +func (v *EthereumKeeperRegistry) UpdateCheckData(id *big.Int, newCheckData []byte) error { + + switch v.version { + case ethereum.RegistryVersion_1_3: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry1_3.UpdateCheckData(opts, id, newCheckData) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_0: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_0.UpdateCheckData(opts, id, newCheckData) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.SetUpkeepCheckData(opts, id, newCheckData) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("UpdateCheckData is not supported by keeper registry version %d", v.version) + } +} + +// SetUpkeepTriggerConfig updates the trigger config of an upkeep (only for version 2.1) +func (v *EthereumKeeperRegistry) SetUpkeepTriggerConfig(id *big.Int, triggerConfig []byte) error { + + switch v.version { + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.SetUpkeepTriggerConfig(opts, id, triggerConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("SetUpkeepTriggerConfig is not supported by keeper registry version %d", v.version) + } +} + +// SetUpkeepPrivilegeConfig sets the privilege config of an upkeep (only for version 2.1) +func (v *EthereumKeeperRegistry) SetUpkeepPrivilegeConfig(id *big.Int, privilegeConfig []byte) error { + + switch v.version { + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.SetUpkeepPrivilegeConfig(opts, id, privilegeConfig) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("SetUpkeepPrivilegeConfig is not supported by keeper registry version %d", v.version) + } +} + +// PauseUpkeep stops an upkeep from an upkeep +func (v *EthereumKeeperRegistry) PauseUpkeep(id *big.Int) error { + switch v.version { + case ethereum.RegistryVersion_1_3: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry1_3.PauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_0: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_0.PauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.PauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("PauseUpkeep is not supported by keeper registry version %d", v.version) + } +} + +// UnpauseUpkeep get list of all registered keeper addresses +func (v *EthereumKeeperRegistry) UnpauseUpkeep(id *big.Int) error { + switch v.version { + case ethereum.RegistryVersion_1_3: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry1_3.UnpauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_0: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_0.UnpauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + case ethereum.RegistryVersion_2_1: + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.registry2_1.UnpauseUpkeep(opts, id) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) + default: + return fmt.Errorf("UnpauseUpkeep is not supported by keeper registry version %d", v.version) + } +} + +// Parses upkeep performed log +func (v *EthereumKeeperRegistry) ParseUpkeepPerformedLog(log *types.Log) (*UpkeepPerformedLog, error) { + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + parsedLog, err := v.registry1_1.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_1_2: + parsedLog, err := v.registry1_2.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_1_3: + parsedLog, err := v.registry1_3.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: parsedLog.From, + }, nil + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: utils.ZeroAddress, + }, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseUpkeepPerformed(*log) + if err != nil { + return nil, err + } + return &UpkeepPerformedLog{ + Id: parsedLog.Id, + Success: parsedLog.Success, + From: utils.ZeroAddress, + }, nil + } + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// ParseStaleUpkeepReportLog Parses Stale upkeep report log +func (v *EthereumKeeperRegistry) ParseStaleUpkeepReportLog(log *types.Log) (*StaleUpkeepReportLog, error) { + //nolint:exhaustive + switch v.version { + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseStaleUpkeepReport(*log) + if err != nil { + return nil, err + } + return &StaleUpkeepReportLog{ + Id: parsedLog.Id, + }, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseStaleUpkeepReport(*log) + if err != nil { + return nil, err + } + return &StaleUpkeepReportLog{ + Id: parsedLog.Id, + }, nil + } + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// Parses the upkeep ID from an 'UpkeepRegistered' log, returns error on any other log +func (v *EthereumKeeperRegistry) ParseUpkeepIdFromRegisteredLog(log *types.Log) (*big.Int, error) { + switch v.version { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + parsedLog, err := v.registry1_1.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_1_2: + parsedLog, err := v.registry1_2.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_1_3: + parsedLog, err := v.registry1_3.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_2_0: + parsedLog, err := v.registry2_0.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + case ethereum.RegistryVersion_2_1: + parsedLog, err := v.registry2_1.ParseUpkeepRegistered(*log) + if err != nil { + return nil, err + } + return parsedLog.Id, nil + } + + return nil, fmt.Errorf("keeper registry version %d is not supported", v.version) +} + +// KeeperConsumerRoundConfirmer is a header subscription that awaits for a round of upkeeps +type KeeperConsumerRoundConfirmer struct { + instance KeeperConsumer + upkeepsValue int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + l zerolog.Logger +} + +// NewKeeperConsumerRoundConfirmer provides a new instance of a KeeperConsumerRoundConfirmer +func NewKeeperConsumerRoundConfirmer( + contract KeeperConsumer, + counterValue int, + timeout time.Duration, + logger zerolog.Logger, +) *KeeperConsumerRoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &KeeperConsumerRoundConfirmer{ + instance: contract, + upkeepsValue: counterValue, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + l: logger, + } +} + +// ReceiveHeader will query the latest Keeper round and check to see whether the round has confirmed +func (o *KeeperConsumerRoundConfirmer) ReceiveHeader(_ blockchain.NodeHeader) error { + upkeeps, err := o.instance.Counter(context.Background()) + if err != nil { + return err + } + l := o.l.Info(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps", upkeeps.Int64()). + Int("Required upkeeps", o.upkeepsValue) + if upkeeps.Int64() == int64(o.upkeepsValue) { + l.Msg("Upkeep completed") + o.doneChan <- struct{}{} + } else { + l.Msg("Waiting for upkeep round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *KeeperConsumerRoundConfirmer) Wait() error { + for { + select { + case <-o.doneChan: + o.cancel() + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for upkeeps to confirm: %d", o.upkeepsValue) + } + } +} + +// KeeperConsumerPerformanceRoundConfirmer is a header subscription that awaits for a round of upkeeps +type KeeperConsumerPerformanceRoundConfirmer struct { + instance KeeperConsumerPerformance + doneChan chan bool + context context.Context + cancel context.CancelFunc + + lastBlockNum uint64 // Records the number of the last block that came in + blockCadence int64 // How many blocks before an upkeep should happen + blockRange int64 // How many blocks to watch upkeeps for + blocksSinceSubscription int64 // How many blocks have passed since subscribing + expectedUpkeepCount int64 // The count of upkeeps expected next iteration + blocksSinceSuccessfulUpkeep int64 // How many blocks have come in since the last successful upkeep + allMissedUpkeeps []int64 // Tracks the amount of blocks missed in each missed upkeep + totalSuccessfulUpkeeps int64 + + metricsReporter *testreporters.KeeperBlockTimeTestReporter // Testreporter to track results + complete bool + l zerolog.Logger +} + +// NewKeeperConsumerPerformanceRoundConfirmer provides a new instance of a KeeperConsumerPerformanceRoundConfirmer +// Used to track and log performance test results for keepers +func NewKeeperConsumerPerformanceRoundConfirmer( + contract KeeperConsumerPerformance, + expectedBlockCadence int64, // Expected to upkeep every 5/10/20 blocks, for example + blockRange int64, + metricsReporter *testreporters.KeeperBlockTimeTestReporter, + logger zerolog.Logger, +) *KeeperConsumerPerformanceRoundConfirmer { + ctx, cancelFunc := context.WithCancel(context.Background()) + return &KeeperConsumerPerformanceRoundConfirmer{ + instance: contract, + doneChan: make(chan bool), + context: ctx, + cancel: cancelFunc, + blockCadence: expectedBlockCadence, + blockRange: blockRange, + blocksSinceSubscription: 0, + blocksSinceSuccessfulUpkeep: 0, + expectedUpkeepCount: 1, + allMissedUpkeeps: []int64{}, + totalSuccessfulUpkeeps: 0, + metricsReporter: metricsReporter, + complete: false, + lastBlockNum: 0, + l: logger, + } +} + +// ReceiveHeader will query the latest Keeper round and check to see whether the round has confirmed +func (o *KeeperConsumerPerformanceRoundConfirmer) ReceiveHeader(receivedHeader blockchain.NodeHeader) error { + if receivedHeader.Number.Uint64() <= o.lastBlockNum { // Uncle / reorg we won't count + return nil + } + o.lastBlockNum = receivedHeader.Number.Uint64() + // Increment block counters + o.blocksSinceSubscription++ + o.blocksSinceSuccessfulUpkeep++ + upkeepCount, err := o.instance.GetUpkeepCount(context.Background()) + if err != nil { + return err + } + + isEligible, err := o.instance.CheckEligible(context.Background()) + if err != nil { + return err + } + if isEligible { + o.l.Trace(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Msg("Upkeep Now Eligible") + } + if upkeepCount.Int64() >= o.expectedUpkeepCount { // Upkeep was successful + if o.blocksSinceSuccessfulUpkeep < o.blockCadence { // If there's an early upkeep, that's weird + o.l.Error(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Int64("Expected Cadence", o.blockCadence). + Int64("Actual Cadence", o.blocksSinceSuccessfulUpkeep). + Err(errors.New("found an early Upkeep")) + return fmt.Errorf("found an early Upkeep on contract %s", o.instance.Address()) + } else if o.blocksSinceSuccessfulUpkeep == o.blockCadence { // Perfectly timed upkeep + o.l.Info(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Int64("Expected Cadence", o.blockCadence). + Int64("Actual Cadence", o.blocksSinceSuccessfulUpkeep). + Msg("Successful Upkeep on Expected Cadence") + o.totalSuccessfulUpkeeps++ + } else { // Late upkeep + o.l.Warn(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Int64("Expected Cadence", o.blockCadence). + Int64("Actual Cadence", o.blocksSinceSuccessfulUpkeep). + Msg("Upkeep Completed Late") + o.allMissedUpkeeps = append(o.allMissedUpkeeps, o.blocksSinceSuccessfulUpkeep-o.blockCadence) + } + // Update upkeep tracking values + o.blocksSinceSuccessfulUpkeep = 0 + o.expectedUpkeepCount++ + } + + if o.blocksSinceSubscription > o.blockRange { + if o.blocksSinceSuccessfulUpkeep > o.blockCadence { + o.l.Warn(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Int64("Expected Cadence", o.blockCadence). + Int64("Expected Upkeep Count", o.expectedUpkeepCount). + Int64("Blocks Waiting", o.blocksSinceSuccessfulUpkeep). + Int64("Total Blocks Watched", o.blocksSinceSubscription). + Msg("Finished Watching for Upkeeps While Waiting on a Late Upkeep") + o.allMissedUpkeeps = append(o.allMissedUpkeeps, o.blocksSinceSuccessfulUpkeep-o.blockCadence) + } else { + o.l.Info(). + Str("Contract Address", o.instance.Address()). + Int64("Upkeeps Performed", upkeepCount.Int64()). + Int64("Total Blocks Watched", o.blocksSinceSubscription). + Msg("Finished Watching for Upkeeps") + } + o.doneChan <- true + o.complete = true + return nil + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *KeeperConsumerPerformanceRoundConfirmer) Wait() error { + defer func() { o.complete = true }() + for { + select { + case <-o.doneChan: + o.cancel() + o.logDetails() + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for expected upkeep count to confirm: %d", o.expectedUpkeepCount) + } + } +} + +func (o *KeeperConsumerPerformanceRoundConfirmer) Complete() bool { + return o.complete +} + +func (o *KeeperConsumerPerformanceRoundConfirmer) logDetails() { + report := testreporters.KeeperBlockTimeTestReport{ + ContractAddress: o.instance.Address(), + TotalExpectedUpkeeps: o.blockRange / o.blockCadence, + TotalSuccessfulUpkeeps: o.totalSuccessfulUpkeeps, + AllMissedUpkeeps: o.allMissedUpkeeps, + } + o.metricsReporter.ReportMutex.Lock() + o.metricsReporter.Reports = append(o.metricsReporter.Reports, report) + defer o.metricsReporter.ReportMutex.Unlock() +} + +// KeeperConsumerBenchmarkRoundConfirmer is a header subscription that awaits for a round of upkeeps +type KeeperConsumerBenchmarkRoundConfirmer struct { + instance AutomationConsumerBenchmark + registry KeeperRegistry + upkeepID *big.Int + doneChan chan bool + context context.Context + cancel context.CancelFunc + + firstBlockNum uint64 // Records the number of the first block that came in + lastBlockNum uint64 // Records the number of the last block that came in + blockRange int64 // How many blocks to watch upkeeps for + upkeepSLA int64 // SLA after which an upkeep is counted as 'missed' + metricsReporter *testreporters.KeeperBenchmarkTestReporter // Testreporter to track results + upkeepIndex int64 + firstEligibleBuffer int64 + + // State variables, changes as we get blocks + blocksSinceSubscription int64 // How many blocks have passed since subscribing + blocksSinceEligible int64 // How many blocks have come in since upkeep has been eligible for check + countEligible int64 // Number of times the upkeep became eligible + countMissed int64 // Number of times we missed SLA for performing upkeep + upkeepCount int64 // The count of upkeeps done so far + allCheckDelays []int64 // Tracks the amount of blocks missed before an upkeep since it became eligible + complete bool + l zerolog.Logger +} + +// NewKeeperConsumerBenchmarkRoundConfirmer provides a new instance of a KeeperConsumerBenchmarkRoundConfirmer +// Used to track and log benchmark test results for keepers +func NewKeeperConsumerBenchmarkRoundConfirmer( + contract AutomationConsumerBenchmark, + registry KeeperRegistry, + upkeepID *big.Int, + blockRange int64, + upkeepSLA int64, + metricsReporter *testreporters.KeeperBenchmarkTestReporter, + upkeepIndex int64, + firstEligibleBuffer int64, + logger zerolog.Logger, +) *KeeperConsumerBenchmarkRoundConfirmer { + ctx, cancelFunc := context.WithCancel(context.Background()) + return &KeeperConsumerBenchmarkRoundConfirmer{ + instance: contract, + registry: registry, + upkeepID: upkeepID, + doneChan: make(chan bool), + context: ctx, + cancel: cancelFunc, + blockRange: blockRange, + upkeepSLA: upkeepSLA, + blocksSinceSubscription: 0, + blocksSinceEligible: 0, + upkeepCount: 0, + allCheckDelays: []int64{}, + metricsReporter: metricsReporter, + complete: false, + lastBlockNum: 0, + upkeepIndex: upkeepIndex, + firstBlockNum: 0, + firstEligibleBuffer: firstEligibleBuffer, + l: logger, + } +} + +// ReceiveHeader will query the latest Keeper round and check to see whether the round has confirmed +func (o *KeeperConsumerBenchmarkRoundConfirmer) ReceiveHeader(receivedHeader blockchain.NodeHeader) error { + if receivedHeader.Number.Uint64() <= o.lastBlockNum { // Uncle / reorg we won't count + return nil + } + if o.firstBlockNum == 0 { + o.firstBlockNum = receivedHeader.Number.Uint64() + } + o.lastBlockNum = receivedHeader.Number.Uint64() + // Increment block counters + o.blocksSinceSubscription++ + + upkeepCount, err := o.instance.GetUpkeepCount(context.Background(), big.NewInt(o.upkeepIndex)) + if err != nil { + return err + } + + if upkeepCount.Int64() > o.upkeepCount { // A new upkeep was done + if upkeepCount.Int64() != o.upkeepCount+1 { + return errors.New("upkeep count increased by more than 1 in a single block") + } + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeep_Count", upkeepCount.Int64()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Performed") + + if o.blocksSinceEligible > o.upkeepSLA { + o.l.Warn(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Missed SLA") + o.countMissed++ + } + + o.allCheckDelays = append(o.allCheckDelays, o.blocksSinceEligible) + o.upkeepCount++ + o.blocksSinceEligible = 0 + } + + isEligible, err := o.instance.CheckEligible(context.Background(), big.NewInt(o.upkeepIndex), big.NewInt(o.blockRange), big.NewInt(o.firstEligibleBuffer)) + if err != nil { + return err + } + if isEligible { + if o.blocksSinceEligible == 0 { + // First time this upkeep became eligible + o.countEligible++ + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep Now Eligible") + } + o.blocksSinceEligible++ + } + + if o.blocksSinceSubscription >= o.blockRange || int64(o.lastBlockNum-o.firstBlockNum) >= o.blockRange { + if o.blocksSinceEligible > 0 { + if o.blocksSinceEligible > o.upkeepSLA { + o.l.Warn(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep remained eligible at end of test and missed SLA") + o.countMissed++ + } else { + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeep_Count", upkeepCount.Int64()). + Int64("Blocks_since_eligible", o.blocksSinceEligible). + Str("Registry_Address", o.registry.Address()). + Msg("Upkeep remained eligible at end of test and was within SLA") + } + o.allCheckDelays = append(o.allCheckDelays, o.blocksSinceEligible) + } + + o.l.Info(). + Uint64("Block_Number", receivedHeader.Number.Uint64()). + Str("Upkeep_ID", o.upkeepID.String()). + Str("Contract_Address", o.instance.Address()). + Int64("Upkeeps_Performed", upkeepCount.Int64()). + Int64("Total_Blocks_Watched", o.blocksSinceSubscription). + Str("Registry_Address", o.registry.Address()). + Msg("Finished Watching for Upkeeps") + + o.doneChan <- true + o.complete = true + return nil + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (o *KeeperConsumerBenchmarkRoundConfirmer) Wait() error { + defer func() { o.complete = true }() + for { + select { + case <-o.doneChan: + o.cancel() + o.logDetails() + return nil + case <-o.context.Done(): + return fmt.Errorf("timeout waiting for expected number of blocks: %d", o.blockRange) + } + } +} + +func (o *KeeperConsumerBenchmarkRoundConfirmer) Complete() bool { + return o.complete +} + +func (o *KeeperConsumerBenchmarkRoundConfirmer) logDetails() { + report := testreporters.KeeperBenchmarkTestReport{ + ContractAddress: o.instance.Address(), + TotalEligibleCount: o.countEligible, + TotalSLAMissedUpkeeps: o.countMissed, + TotalPerformedUpkeeps: o.upkeepCount, + AllCheckDelays: o.allCheckDelays, + RegistryAddress: o.registry.Address(), + } + o.metricsReporter.ReportMutex.Lock() + o.metricsReporter.Reports = append(o.metricsReporter.Reports, report) + defer o.metricsReporter.ReportMutex.Unlock() +} + +// EthereumUpkeepCounter represents keeper consumer (upkeep) counter contract +type EthereumUpkeepCounter struct { + client blockchain.EVMClient + consumer *upkeep_counter_wrapper.UpkeepCounter + address *common.Address +} + +func (v *EthereumUpkeepCounter) Address() string { + return v.address.Hex() +} + +func (v *EthereumUpkeepCounter) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} +func (v *EthereumUpkeepCounter) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +func (v *EthereumUpkeepCounter) SetSpread(testRange *big.Int, interval *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.SetSpread(opts, testRange, interval) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// EthereumUpkeepPerformCounterRestrictive represents keeper consumer (upkeep) counter contract +type EthereumUpkeepPerformCounterRestrictive struct { + client blockchain.EVMClient + consumer *upkeep_perform_counter_restrictive_wrapper.UpkeepPerformCounterRestrictive + address *common.Address +} + +func (v *EthereumUpkeepPerformCounterRestrictive) Address() string { + return v.address.Hex() +} + +func (v *EthereumUpkeepPerformCounterRestrictive) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} +func (v *EthereumUpkeepPerformCounterRestrictive) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + count, err := v.consumer.GetCountPerforms(opts) + return count, err +} + +func (v *EthereumUpkeepPerformCounterRestrictive) SetSpread(testRange *big.Int, interval *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.SetSpread(opts, testRange, interval) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// EthereumKeeperConsumer represents keeper consumer (upkeep) contract +type EthereumKeeperConsumer struct { + client blockchain.EVMClient + consumer *keeper_consumer_wrapper.KeeperConsumer + address *common.Address +} + +// Just pass for non-logtrigger +func (v *EthereumKeeperConsumer) Start() error { + return nil +} + +func (v *EthereumKeeperConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +type EthereumAutomationStreamsLookupUpkeepConsumer struct { + client blockchain.EVMClient + consumer *streams_lookup_upkeep_wrapper.StreamsLookupUpkeep + address *common.Address +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Start() error { + // For this consumer upkeep, we use this Start() function to set ParamKeys so as to run mercury v0.2 + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + // The default values of ParamKeys are "feedIDs" and "timestamp" which are for v0.3 + tx, err := v.consumer.SetParamKeys(txOpts, "feedIdHex", "blockNumber") + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumAutomationStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +type EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer struct { + client blockchain.EVMClient + consumer *log_triggered_streams_lookup_wrapper.LogTriggeredStreamsLookup + address *common.Address +} + +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Address() string { + return v.address.Hex() +} + +// Kick off the log trigger event. The contract uses Mercury v0.2 so no need to set ParamKeys +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Start() error { + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.consumer.Start(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumAutomationLogTriggeredStreamsLookupUpkeepConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +type EthereumAutomationLogCounterConsumer struct { + client blockchain.EVMClient + consumer *log_upkeep_counter_wrapper.LogUpkeepCounter + address *common.Address +} + +func (v *EthereumAutomationLogCounterConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationLogCounterConsumer) Start() error { + txOpts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := v.consumer.Start(txOpts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumAutomationLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +type EthereumAutomationSimpleLogCounterConsumer struct { + client blockchain.EVMClient + consumer *simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounter + address *common.Address +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Start() error { + return nil +} + +func (v *EthereumAutomationSimpleLogCounterConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.consumer.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +// EthereumKeeperConsumerPerformance represents a more complicated keeper consumer contract, one intended only for +// performance tests. +type EthereumKeeperConsumerPerformance struct { + client blockchain.EVMClient + consumer *keeper_consumer_performance_wrapper.KeeperConsumerPerformance + address *common.Address +} + +func (v *EthereumKeeperConsumerPerformance) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperConsumerPerformance) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +func (v *EthereumKeeperConsumerPerformance) CheckEligible(ctx context.Context) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + eligible, err := v.consumer.CheckEligible(opts) + return eligible, err +} + +func (v *EthereumKeeperConsumerPerformance) GetUpkeepCount(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + eligible, err := v.consumer.GetCountPerforms(opts) + return eligible, err +} + +func (v *EthereumKeeperConsumerPerformance) SetCheckGasToBurn(_ context.Context, gas *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.SetCheckGasToBurn(opts, gas) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumKeeperConsumerPerformance) SetPerformGasToBurn(_ context.Context, gas *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.SetPerformGasToBurn(opts, gas) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// EthereumKeeperPerformDataCheckerConsumer represents keeper perform data checker contract +type EthereumKeeperPerformDataCheckerConsumer struct { + client blockchain.EVMClient + performDataChecker *perform_data_checker_wrapper.PerformDataChecker + address *common.Address +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) Counter(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + cnt, err := v.performDataChecker.Counter(opts) + if err != nil { + return nil, err + } + return cnt, nil +} + +func (v *EthereumKeeperPerformDataCheckerConsumer) SetExpectedData(_ context.Context, expectedData []byte) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.performDataChecker.SetExpectedData(opts, expectedData) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// EthereumAutomationConsumerBenchmark represents a more complicated keeper consumer contract, one intended only for +// Benchmark tests. +type EthereumAutomationConsumerBenchmark struct { + client blockchain.EVMClient + consumer *automation_consumer_benchmark.AutomationConsumerBenchmark + address *common.Address +} + +func (v *EthereumAutomationConsumerBenchmark) Address() string { + return v.address.Hex() +} + +func (v *EthereumAutomationConsumerBenchmark) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +func (v *EthereumAutomationConsumerBenchmark) CheckEligible(ctx context.Context, id *big.Int, _range *big.Int, firstEligibleBuffer *big.Int) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + eligible, err := v.consumer.CheckEligible(opts, id, _range, firstEligibleBuffer) + return eligible, err +} + +func (v *EthereumAutomationConsumerBenchmark) GetUpkeepCount(ctx context.Context, id *big.Int) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + eligible, err := v.consumer.GetCountPerforms(opts, id) + return eligible, err +} + +// EthereumKeeperRegistrar corresponds to the registrar which is used to send requests to the registry when +// registering new upkeeps. +type EthereumKeeperRegistrar struct { + client blockchain.EVMClient + registrar *keeper_registrar_wrapper1_2.KeeperRegistrar + registrar20 *keeper_registrar_wrapper2_0.KeeperRegistrar + registrar21 *registrar21.AutomationRegistrar + address *common.Address +} + +func (v *EthereumKeeperRegistrar) Address() string { + return v.address.Hex() +} + +func (v *EthereumKeeperRegistrar) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(geth.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +// EncodeRegisterRequest encodes register request to call it through link token TransferAndCall +func (v *EthereumKeeperRegistrar) EncodeRegisterRequest(name string, email []byte, upkeepAddr string, gasLimit uint32, adminAddr string, checkData []byte, amount *big.Int, source uint8, senderAddr string, isLogTrigger bool, isMercury bool) ([]byte, error) { + if v.registrar20 != nil { + registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper2_0.KeeperRegistrarMetaData.ABI)) + if err != nil { + return nil, err + } + req, err := registryABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + checkData, + []byte{}, //offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + + if err != nil { + return nil, err + } + return req, nil + } else if v.registrar21 != nil { + if isLogTrigger { + var topic0InBytes [32]byte + // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000 + bytes0 := [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + if isMercury { + // bytes representation of 0xd1ffe9e45581c11d7d9f2ed5f75217cd4be9f8b7eee6af0f6d03f46de53956cd + topic0InBytes = [32]byte{209, 255, 233, 228, 85, 129, 193, 29, 125, 159, 46, 213, 247, 82, 23, 205, 75, 233, 248, 183, 238, 230, 175, 15, 109, 3, 244, 109, 229, 57, 86, 205} + } else { + // bytes representation of 0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d + topic0InBytes = [32]byte{ + 61, 83, 163, 149, 80, 224, 70, 136, + 6, 88, 39, 243, 187, 134, 88, 76, + 176, 7, 171, 158, 188, 167, 235, + 213, 40, 231, 48, 28, 156, 49, 235, 93, + } + } + + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: common.HexToAddress(upkeepAddr), + FilterSelector: 0, + Topic0: topic0InBytes, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := utilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return nil, err + } + + req, err := registrarABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + uint8(1), // trigger type + checkData, + encodedLogTriggerConfig, // triggerConfig + []byte{}, // offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + + return req, err + } + req, err := registrarABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + uint8(0), // trigger type + checkData, + []byte{}, // triggerConfig + []byte{}, // offchainConfig + amount, + common.HexToAddress(senderAddr), + ) + return req, err + } + registryABI, err := abi.JSON(strings.NewReader(keeper_registrar_wrapper1_2.KeeperRegistrarMetaData.ABI)) + if err != nil { + return nil, err + } + req, err := registryABI.Pack( + "register", + name, + email, + common.HexToAddress(upkeepAddr), + gasLimit, + common.HexToAddress(adminAddr), + checkData, + amount, + source, + common.HexToAddress(senderAddr), + ) + if err != nil { + return nil, err + } + return req, nil +} + +// EthereumUpkeepTranscoder represents the transcoder which is used to perform migrations +// of upkeeps from one registry to another. +type EthereumUpkeepTranscoder struct { + client blockchain.EVMClient + transcoder *upkeep_transcoder.UpkeepTranscoder + address *common.Address +} + +func (v *EthereumUpkeepTranscoder) Address() string { + return v.address.Hex() +} diff --git a/integration-tests/contracts/ethereum_ocr2vrf_contracts.go b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go new file mode 100644 index 00000000..b34da674 --- /dev/null +++ b/integration-tests/contracts/ethereum_ocr2vrf_contracts.go @@ -0,0 +1,573 @@ +package contracts + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/dkg" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_beacon_consumer" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/ocr2vrf/generated/vrf_coordinator" +) + +// EthereumDKG represents DKG contract +type EthereumDKG struct { + address *common.Address + client blockchain.EVMClient + dkg *dkg.DKG +} + +// EthereumVRFCoordinatorV3 represents VRFCoordinatorV3 contract +type EthereumVRFCoordinatorV3 struct { + address *common.Address + client blockchain.EVMClient + vrfCoordinatorV3 *vrf_coordinator.VRFCoordinator +} + +// EthereumVRFBeacon represents VRFBeacon contract +type EthereumVRFBeacon struct { + address *common.Address + client blockchain.EVMClient + vrfBeacon *vrf_beacon.VRFBeacon +} + +// EthereumVRFBeaconConsumer represents VRFBeaconConsumer contract +type EthereumVRFBeaconConsumer struct { + address *common.Address + client blockchain.EVMClient + vrfBeaconConsumer *vrf_beacon_consumer.BeaconVRFConsumer +} + +// EthereumVRFCoordinator represents VRF coordinator contract +type EthereumVRFCoordinator struct { + address *common.Address + client blockchain.EVMClient + coordinator *solidity_vrf_coordinator_interface.VRFCoordinator +} + +// DeployDKG deploys DKG contract +func (e *EthereumContractDeployer) DeployDKG() (DKG, error) { + address, _, instance, err := e.client.DeployContract("DKG", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return dkg.DeployDKG(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumDKG{ + client: e.client, + dkg: instance.(*dkg.DKG), + address: address, + }, err +} + +// DeployOCR2VRFCoordinator deploys CR2VRFCoordinator contract +func (e *EthereumContractDeployer) DeployOCR2VRFCoordinator(beaconPeriodBlocksCount *big.Int, linkAddress string) (VRFCoordinatorV3, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinatorV3", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_coordinator.DeployVRFCoordinator(auth, backend, beaconPeriodBlocksCount, common.HexToAddress(linkAddress)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV3{ + client: e.client, + vrfCoordinatorV3: instance.(*vrf_coordinator.VRFCoordinator), + address: address, + }, err +} + +// DeployVRFBeacon deploys DeployVRFBeacon contract +func (e *EthereumContractDeployer) DeployVRFBeacon(vrfCoordinatorAddress string, linkAddress string, dkgAddress string, keyId string) (VRFBeacon, error) { + keyIDBytes, err := DecodeHexTo32ByteArray(keyId) + if err != nil { + return nil, err + } + address, _, instance, err := e.client.DeployContract("VRFBeacon", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_beacon.DeployVRFBeacon(auth, backend, common.HexToAddress(linkAddress), common.HexToAddress(vrfCoordinatorAddress), common.HexToAddress(dkgAddress), keyIDBytes) + }) + if err != nil { + return nil, err + } + return &EthereumVRFBeacon{ + client: e.client, + vrfBeacon: instance.(*vrf_beacon.VRFBeacon), + address: address, + }, err +} + +// DeployBatchBlockhashStore deploys DeployBatchBlockhashStore contract +func (e *EthereumContractDeployer) DeployBatchBlockhashStore(blockhashStoreAddr string) (BatchBlockhashStore, error) { + address, _, instance, err := e.client.DeployContract("BatchBlockhashStore", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return batch_blockhash_store.DeployBatchBlockhashStore(auth, backend, common.HexToAddress(blockhashStoreAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumBatchBlockhashStore{ + client: e.client, + batchBlockhashStore: instance.(*batch_blockhash_store.BatchBlockhashStore), + address: address, + }, err +} + +// todo - solve import cycle +func DecodeHexTo32ByteArray(val string) ([32]byte, error) { + var byteArray [32]byte + decoded, err := hex.DecodeString(val) + if err != nil { + return [32]byte{}, err + } + if len(decoded) != 32 { + return [32]byte{}, fmt.Errorf("expected value to be 32 bytes but received %d bytes", len(decoded)) + } + copy(byteArray[:], decoded) + return byteArray, err +} + +// DeployVRFBeaconConsumer deploys VRFv@ consumer contract +func (e *EthereumContractDeployer) DeployVRFBeaconConsumer(vrfCoordinatorAddress string, beaconPeriodBlockCount *big.Int) (VRFBeaconConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFBeaconConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_beacon_consumer.DeployBeaconVRFConsumer(auth, backend, common.HexToAddress(vrfCoordinatorAddress), false, beaconPeriodBlockCount) + }) + if err != nil { + return nil, err + } + return &EthereumVRFBeaconConsumer{ + client: e.client, + vrfBeaconConsumer: instance.(*vrf_beacon_consumer.BeaconVRFConsumer), + address: address, + }, err +} + +func (dkgContract *EthereumDKG) Address() string { + return dkgContract.address.Hex() +} + +func (dkgContract *EthereumDKG) AddClient(keyID string, clientAddress string) error { + keyIDBytes, err := DecodeHexTo32ByteArray(keyID) + if err != nil { + return err + } + opts, err := dkgContract.client.TransactionOpts(dkgContract.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := dkgContract.dkg.AddClient( + opts, + keyIDBytes, + common.HexToAddress(clientAddress), + ) + if err != nil { + return err + } + return dkgContract.client.ProcessTransaction(tx) + +} + +func (dkgContract *EthereumDKG) SetConfig( + signerAddresses []common.Address, + transmitterAddresses []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) error { + opts, err := dkgContract.client.TransactionOpts(dkgContract.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := dkgContract.dkg.SetConfig( + opts, + signerAddresses, + transmitterAddresses, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + return err + } + return dkgContract.client.ProcessTransaction(tx) +} + +func (dkgContract *EthereumDKG) WaitForTransmittedEvent(timeout time.Duration) (*dkg.DKGTransmitted, error) { + transmittedEventsChannel := make(chan *dkg.DKGTransmitted) + subscription, err := dkgContract.dkg.WatchTransmitted(nil, transmittedEventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err = <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for DKGTransmitted event") + case transmittedEvent := <-transmittedEventsChannel: + return transmittedEvent, nil + } + } +} + +func (dkgContract *EthereumDKG) WaitForConfigSetEvent(timeout time.Duration) (*dkg.DKGConfigSet, error) { + configSetEventsChannel := make(chan *dkg.DKGConfigSet) + subscription, err := dkgContract.dkg.WatchConfigSet(nil, configSetEventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err = <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for DKGConfigSet event") + case configSetEvent := <-configSetEventsChannel: + return configSetEvent, nil + } + } +} + +func (coordinator *EthereumVRFCoordinatorV3) Address() string { + return coordinator.address.Hex() +} + +func (coordinator *EthereumVRFCoordinatorV3) SetProducer(producerAddress string) error { + opts, err := coordinator.client.TransactionOpts(coordinator.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := coordinator.vrfCoordinatorV3.SetProducer( + opts, + common.HexToAddress(producerAddress), + ) + if err != nil { + return err + } + return coordinator.client.ProcessTransaction(tx) +} + +func (coordinator *EthereumVRFCoordinatorV3) CreateSubscription() error { + opts, err := coordinator.client.TransactionOpts(coordinator.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := coordinator.vrfCoordinatorV3.CreateSubscription( + opts, + ) + if err != nil { + return err + } + return coordinator.client.ProcessTransaction(tx) +} + +func (coordinator *EthereumVRFCoordinatorV3) FindSubscriptionID() (*big.Int, error) { + fopts := &bind.FilterOpts{} + owner := coordinator.client.GetDefaultWallet().Address() + + subscriptionIterator, err := coordinator.vrfCoordinatorV3.FilterSubscriptionCreated( + fopts, + nil, + []common.Address{common.HexToAddress(owner)}, + ) + if err != nil { + return nil, err + } + + if !subscriptionIterator.Next() { + return nil, fmt.Errorf("expected at least 1 subID for the given owner %s", owner) + } + + return subscriptionIterator.Event.SubId, nil +} + +func (coordinator *EthereumVRFCoordinatorV3) AddConsumer(subId *big.Int, consumerAddress string) error { + opts, err := coordinator.client.TransactionOpts(coordinator.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := coordinator.vrfCoordinatorV3.AddConsumer( + opts, + subId, + common.HexToAddress(consumerAddress), + ) + if err != nil { + return err + } + return coordinator.client.ProcessTransaction(tx) +} + +func (coordinator *EthereumVRFCoordinatorV3) SetConfig(maxCallbackGasLimit uint32, maxCallbackArgumentsLength uint32) error { + opts, err := coordinator.client.TransactionOpts(coordinator.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := coordinator.vrfCoordinatorV3.SetCallbackConfig( + opts, + vrf_coordinator.VRFCoordinatorCallbackConfig{ + MaxCallbackGasLimit: maxCallbackGasLimit, + MaxCallbackArgumentsLength: maxCallbackArgumentsLength, // 5 EVM words + }, + ) + if err != nil { + return err + } + return coordinator.client.ProcessTransaction(tx) +} + +func (beacon *EthereumVRFBeacon) Address() string { + return beacon.address.Hex() +} + +func (beacon *EthereumVRFBeacon) SetPayees(transmitterAddresses []common.Address, payeesAddresses []common.Address) error { + opts, err := beacon.client.TransactionOpts(beacon.client.GetDefaultWallet()) + if err != nil { + return err + } + + tx, err := beacon.vrfBeacon.SetPayees( + opts, + transmitterAddresses, + payeesAddresses, + ) + if err != nil { + return err + } + return beacon.client.ProcessTransaction(tx) +} + +func (beacon *EthereumVRFBeacon) SetConfig( + signerAddresses []common.Address, + transmitterAddresses []common.Address, + f uint8, + onchainConfig []byte, + offchainConfigVersion uint64, + offchainConfig []byte, +) error { + opts, err := beacon.client.TransactionOpts(beacon.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := beacon.vrfBeacon.SetConfig( + opts, + signerAddresses, + transmitterAddresses, + f, + onchainConfig, + offchainConfigVersion, + offchainConfig, + ) + if err != nil { + return err + } + return beacon.client.ProcessTransaction(tx) +} + +func (beacon *EthereumVRFBeacon) WaitForConfigSetEvent(timeout time.Duration) (*vrf_beacon.VRFBeaconConfigSet, error) { + configSetEventsChannel := make(chan *vrf_beacon.VRFBeaconConfigSet) + subscription, err := beacon.vrfBeacon.WatchConfigSet(nil, configSetEventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for config set event") + case configSetEvent := <-configSetEventsChannel: + return configSetEvent, nil + } + } +} + +func (beacon *EthereumVRFBeacon) WaitForNewTransmissionEvent(timeout time.Duration) (*vrf_beacon.VRFBeaconNewTransmission, error) { + newTransmissionEventsChannel := make(chan *vrf_beacon.VRFBeaconNewTransmission) + subscription, err := beacon.vrfBeacon.WatchNewTransmission(nil, newTransmissionEventsChannel, nil) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for new transmission event") + case newTransmissionEvent := <-newTransmissionEventsChannel: + return newTransmissionEvent, nil + } + } +} + +func (beacon *EthereumVRFBeacon) LatestConfigDigestAndEpoch(ctx context.Context) (vrf_beacon.LatestConfigDigestAndEpoch, + error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(beacon.client.GetDefaultWallet().Address()), + Context: ctx, + } + return beacon.vrfBeacon.LatestConfigDigestAndEpoch(opts) +} + +func (consumer *EthereumVRFBeaconConsumer) Address() string { + return consumer.address.Hex() +} + +func (consumer *EthereumVRFBeaconConsumer) RequestRandomness( + numWords uint16, + subID, confirmationDelayArg *big.Int, +) (*types.Receipt, error) { + opts, err := consumer.client.TransactionOpts(consumer.client.GetDefaultWallet()) + if err != nil { + return nil, fmt.Errorf("TransactionOpts failed, err: %w", err) + } + tx, err := consumer.vrfBeaconConsumer.TestRequestRandomness( + opts, + numWords, + subID, + confirmationDelayArg, + ) + if err != nil { + return nil, fmt.Errorf("TestRequestRandomness failed, err: %w", err) + } + err = consumer.client.ProcessTransaction(tx) + if err != nil { + return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err) + } + err = consumer.client.WaitForEvents() + + if err != nil { + return nil, fmt.Errorf("WaitForEvents failed, err: %w", err) + } + receipt, err := consumer.client.GetTxReceipt(tx.Hash()) + if err != nil { + return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err) + } + log.Info().Interface("Sub ID", subID). + Interface("Number of Words", numWords). + Interface("Number of Confirmations", confirmationDelayArg). + Msg("RequestRandomness called") + return receipt, nil +} + +func (consumer *EthereumVRFBeaconConsumer) RedeemRandomness( + subID, requestID *big.Int, +) error { + opts, err := consumer.client.TransactionOpts(consumer.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := consumer.vrfBeaconConsumer.TestRedeemRandomness( + opts, + subID, + requestID, + ) + if err != nil { + return err + } + log.Info().Interface("Sub ID", subID). + Interface("Request ID", requestID). + Msg("RedeemRandomness called") + return consumer.client.ProcessTransaction(tx) +} + +func (consumer *EthereumVRFBeaconConsumer) RequestRandomnessFulfillment( + numWords uint16, + subID, confirmationDelayArg *big.Int, + requestGasLimit uint32, + callbackGasLimit uint32, + arguments []byte, +) (*types.Receipt, error) { + opts, err := consumer.client.TransactionOpts(consumer.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + // overriding gas limit because gas estimated by TestRequestRandomnessFulfillment + // is incorrect + opts.GasLimit = uint64(requestGasLimit) + tx, err := consumer.vrfBeaconConsumer.TestRequestRandomnessFulfillment( + opts, + subID, + numWords, + confirmationDelayArg, + callbackGasLimit, + arguments, + ) + if err != nil { + return nil, fmt.Errorf("TestRequestRandomnessFulfillment failed, err: %w", err) + } + err = consumer.client.ProcessTransaction(tx) + if err != nil { + return nil, fmt.Errorf("ProcessTransaction failed, err: %w", err) + } + err = consumer.client.WaitForEvents() + + if err != nil { + return nil, fmt.Errorf("WaitForEvents failed, err: %w", err) + } + receipt, err := consumer.client.GetTxReceipt(tx.Hash()) + if err != nil { + return nil, fmt.Errorf("GetTxReceipt failed, err: %w", err) + } + log.Info().Interface("Sub ID", subID). + Interface("Number of Words", numWords). + Interface("Number of Confirmations", confirmationDelayArg). + Interface("Callback Gas Limit", callbackGasLimit). + Msg("RequestRandomnessFulfillment called") + return receipt, nil +} + +func (consumer *EthereumVRFBeaconConsumer) IBeaconPeriodBlocks(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(consumer.client.GetDefaultWallet().Address()), + Context: ctx, + } + return consumer.vrfBeaconConsumer.IBeaconPeriodBlocks(opts) +} + +func (consumer *EthereumVRFBeaconConsumer) GetRequestIdsBy(ctx context.Context, nextBeaconOutputHeight *big.Int, confDelay *big.Int) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(consumer.client.GetDefaultWallet().Address()), + Context: ctx, + } + return consumer.vrfBeaconConsumer.SRequestsIDs(opts, nextBeaconOutputHeight, confDelay) +} + +func (consumer *EthereumVRFBeaconConsumer) GetRandomnessByRequestId(ctx context.Context, requestID *big.Int, numWordIndex *big.Int) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(consumer.client.GetDefaultWallet().Address()), + Context: ctx, + } + return consumer.vrfBeaconConsumer.SReceivedRandomnessByRequestID(opts, requestID, numWordIndex) +} diff --git a/integration-tests/contracts/ethereum_vrf_contracts.go b/integration-tests/contracts/ethereum_vrf_contracts.go new file mode 100644 index 00000000..d947121e --- /dev/null +++ b/integration-tests/contracts/ethereum_vrf_contracts.go @@ -0,0 +1,313 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/batch_blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_consumer_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/solidity_vrf_wrapper" +) + +// EthereumBatchBlockhashStore represents BatchBlockhashStore contract +type EthereumBatchBlockhashStore struct { + address *common.Address + client blockchain.EVMClient + batchBlockhashStore *batch_blockhash_store.BatchBlockhashStore +} + +// EthereumBlockhashStore represents a blockhash store for VRF contract +type EthereumBlockhashStore struct { + address *common.Address + client blockchain.EVMClient + blockHashStore *blockhash_store.BlockhashStore +} + +// EthereumVRFConsumer represents VRF consumer contract +type EthereumVRFConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *solidity_vrf_consumer_interface.VRFConsumer +} + +// VRFConsumerRoundConfirmer is a header subscription that awaits for a certain VRF round to be completed +type VRFConsumerRoundConfirmer struct { + consumer VRFConsumer + roundID *big.Int + doneChan chan struct{} + context context.Context + cancel context.CancelFunc + done bool +} + +// EthereumVRF represents a VRF contract +type EthereumVRF struct { + client blockchain.EVMClient + vrf *solidity_vrf_wrapper.VRF + address *common.Address +} + +// DeployVRFContract deploy VRF contract +func (e *EthereumContractDeployer) DeployVRFContract() (VRF, error) { + address, _, instance, err := e.client.DeployContract("VRF", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return solidity_vrf_wrapper.DeployVRF(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumVRF{ + client: e.client, + vrf: instance.(*solidity_vrf_wrapper.VRF), + address: address, + }, err +} + +// DeployBlockhashStore deploys blockhash store used with VRF contract +func (e *EthereumContractDeployer) DeployBlockhashStore() (BlockHashStore, error) { + address, _, instance, err := e.client.DeployContract("BlockhashStore", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return blockhash_store.DeployBlockhashStore(auth, backend) + }) + if err != nil { + return nil, err + } + return &EthereumBlockhashStore{ + client: e.client, + blockHashStore: instance.(*blockhash_store.BlockhashStore), + address: address, + }, err +} + +// DeployVRFCoordinator deploys VRF coordinator contract +func (e *EthereumContractDeployer) DeployVRFCoordinator(linkAddr string, bhsAddr string) (VRFCoordinator, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinator", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return solidity_vrf_coordinator_interface.DeployVRFCoordinator(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(bhsAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinator{ + client: e.client, + coordinator: instance.(*solidity_vrf_coordinator_interface.VRFCoordinator), + address: address, + }, err +} + +// DeployVRFConsumer deploys VRF consumer contract +func (e *EthereumContractDeployer) DeployVRFConsumer(linkAddr string, coordinatorAddr string) (VRFConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return solidity_vrf_consumer_interface.DeployVRFConsumer(auth, backend, common.HexToAddress(coordinatorAddr), common.HexToAddress(linkAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFConsumer{ + client: e.client, + consumer: instance.(*solidity_vrf_consumer_interface.VRFConsumer), + address: address, + }, err +} + +func (v *EthereumBlockhashStore) Address() string { + return v.address.Hex() +} + +func (v *EthereumBlockhashStore) GetBlockHash(ctx context.Context, blockNumber *big.Int) ([32]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + blockHash, err := v.blockHashStore.GetBlockhash(opts, blockNumber) + if err != nil { + return [32]byte{}, err + } + return blockHash, nil +} + +func (v *EthereumVRFCoordinator) Address() string { + return v.address.Hex() +} + +// HashOfKey get a hash of proving key to use it as a request ID part for VRF +func (v *EthereumVRFCoordinator) HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + hash, err := v.coordinator.HashOfKey(opts, pubKey) + if err != nil { + return [32]byte{}, err + } + return hash, nil +} + +// RegisterProvingKey register VRF proving key +func (v *EthereumVRFCoordinator) RegisterProvingKey( + fee *big.Int, + oracleAddr string, + publicProvingKey [2]*big.Int, + jobID [32]byte, +) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterProvingKey(opts, fee, common.HexToAddress(oracleAddr), publicProvingKey, jobID) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFConsumer) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(ethereum.CallMsg{ + To: v.address, + }) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +// RequestRandomness requests VRF randomness +func (v *EthereumVRFConsumer) RequestRandomness(hash [32]byte, fee *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.TestRequestRandomness(opts, hash, fee) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// CurrentRoundID helper roundID counter in consumer to check when all randomness requests are finished +func (v *EthereumVRFConsumer) CurrentRoundID(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + return v.consumer.CurrentRoundID(opts) +} + +// RandomnessOutput get VRF randomness output +func (v *EthereumVRFConsumer) RandomnessOutput(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + out, err := v.consumer.RandomnessOutput(opts) + if err != nil { + return nil, err + } + return out, nil +} + +// NewVRFConsumerRoundConfirmer provides a new instance of a NewVRFConsumerRoundConfirmer +func NewVRFConsumerRoundConfirmer( + contract VRFConsumer, + roundID *big.Int, + timeout time.Duration, +) *VRFConsumerRoundConfirmer { + ctx, ctxCancel := context.WithTimeout(context.Background(), timeout) + return &VRFConsumerRoundConfirmer{ + consumer: contract, + roundID: roundID, + doneChan: make(chan struct{}), + context: ctx, + cancel: ctxCancel, + } +} + +// ReceiveHeader will query the latest VRFConsumer round and check to see whether the round has confirmed +func (f *VRFConsumerRoundConfirmer) ReceiveHeader(header blockchain.NodeHeader) error { + if f.done { + return nil + } + roundID, err := f.consumer.CurrentRoundID(context.Background()) + if err != nil { + return err + } + logFields := map[string]any{ + "Contract Address": f.consumer.Address(), + "Waiting for Round": f.roundID.Int64(), + "Current Round ID": roundID.Int64(), + "Header Number": header.Number.Uint64(), + } + if roundID.Int64() == f.roundID.Int64() { + randomness, err := f.consumer.RandomnessOutput(context.Background()) + if err != nil { + return err + } + log.Info().Fields(logFields).Uint64("Randomness", randomness.Uint64()).Msg("VRFConsumer round completed") + f.done = true + f.doneChan <- struct{}{} + } else { + log.Debug().Fields(logFields).Msg("Waiting for VRFConsumer round") + } + return nil +} + +// Wait is a blocking function that will wait until the round has confirmed, and timeout if the deadline has passed +func (f *VRFConsumerRoundConfirmer) Wait() error { + for { + select { + case <-f.doneChan: + f.cancel() + return nil + case <-f.context.Done(): + return fmt.Errorf("timeout waiting for VRFConsumer round to confirm: %d", f.roundID) + } + } +} + +// Fund sends specified currencies to the contract +func (v *EthereumVRF) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(ethereum.CallMsg{ + To: v.address, + }) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +// ProofLength returns the PROOFLENGTH call from the VRF contract +func (v *EthereumVRF) ProofLength(ctxt context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctxt, + } + return v.vrf.PROOFLENGTH(opts) +} + +func (v *EthereumBatchBlockhashStore) Address() string { + return v.address.Hex() +} diff --git a/integration-tests/contracts/ethereum_vrfv2_contracts.go b/integration-tests/contracts/ethereum_vrfv2_contracts.go new file mode 100644 index 00000000..d9a69853 --- /dev/null +++ b/integration-tests/contracts/ethereum_vrfv2_contracts.go @@ -0,0 +1,1133 @@ +package contracts + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_test_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_mock_ethlink_aggregator" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_owner" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_consumer_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2_wrapper_load_test_consumer" + + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2_consumer_wrapper" +) + +// EthereumVRFCoordinatorV2 represents VRFV2 coordinator contract +type EthereumVRFCoordinatorV2 struct { + address *common.Address + client blockchain.EVMClient + coordinator *vrf_coordinator_v2.VRFCoordinatorV2 +} + +type EthereumVRFOwner struct { + address *common.Address + client blockchain.EVMClient + vrfOwner *vrf_owner.VRFOwner +} + +type EthereumVRFCoordinatorTestV2 struct { + address *common.Address + client blockchain.EVMClient + coordinator *vrf_coordinator_test_v2.VRFCoordinatorTestV2 +} + +// EthereumVRFConsumerV2 represents VRFv2 consumer contract +type EthereumVRFConsumerV2 struct { + address *common.Address + client blockchain.EVMClient + consumer *vrf_consumer_v2.VRFConsumerV2 +} + +// EthereumVRFv2Consumer represents VRFv2 consumer contract +type EthereumVRFv2Consumer struct { + address *common.Address + client blockchain.EVMClient + consumer *vrf_v2_consumer_wrapper.VRFv2Consumer +} + +// EthereumVRFv2LoadTestConsumer represents VRFv2 consumer contract for performing Load Tests +type EthereumVRFv2LoadTestConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *vrf_load_test_with_metrics.VRFV2LoadTestWithMetrics +} + +type EthereumVRFV2Wrapper struct { + address *common.Address + client blockchain.EVMClient + wrapper *vrfv2_wrapper.VRFV2Wrapper +} + +type EthereumVRFV2WrapperLoadTestConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *vrfv2_wrapper_load_test_consumer.VRFV2WrapperLoadTestConsumer +} + +type GetRequestConfig struct { + MinimumRequestConfirmations uint16 + MaxGasLimit uint32 + ProvingKeyHashes [32]byte +} + +type EthereumVRFMockETHPLIFeed struct { + client blockchain.EVMClient + feed *vrf_mock_ethlink_aggregator.VRFMockETHPLIAggregator + address *common.Address +} + +// DeployVRFCoordinatorV2 deploys VRFV2 coordinator contract +func (e *EthereumContractDeployer) DeployVRFCoordinatorV2(linkAddr string, bhsAddr string, linkEthFeedAddr string) (VRFCoordinatorV2, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinatorV2", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_coordinator_v2.DeployVRFCoordinatorV2(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(bhsAddr), common.HexToAddress(linkEthFeedAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV2{ + client: e.client, + coordinator: instance.(*vrf_coordinator_v2.VRFCoordinatorV2), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFOwner(coordinatorAddr string) (VRFOwner, error) { + address, _, instance, err := e.client.DeployContract("VRFOwner", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_owner.DeployVRFOwner(auth, backend, common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFOwner{ + client: e.client, + vrfOwner: instance.(*vrf_owner.VRFOwner), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFCoordinatorTestV2(linkAddr string, bhsAddr string, linkEthFeedAddr string) (*EthereumVRFCoordinatorTestV2, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinatorTestV2", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_coordinator_test_v2.DeployVRFCoordinatorTestV2(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(bhsAddr), common.HexToAddress(linkEthFeedAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorTestV2{ + client: e.client, + coordinator: instance.(*vrf_coordinator_test_v2.VRFCoordinatorTestV2), + address: address, + }, err +} + +// DeployVRFConsumerV2 deploys VRFv@ consumer contract +func (e *EthereumContractDeployer) DeployVRFConsumerV2(linkAddr string, coordinatorAddr string) (VRFConsumerV2, error) { + address, _, instance, err := e.client.DeployContract("VRFConsumerV2", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_consumer_v2.DeployVRFConsumerV2(auth, backend, common.HexToAddress(coordinatorAddr), common.HexToAddress(linkAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFConsumerV2{ + client: e.client, + consumer: instance.(*vrf_consumer_v2.VRFConsumerV2), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFv2Consumer(coordinatorAddr string) (VRFv2Consumer, error) { + address, _, instance, err := e.client.DeployContract("VRFv2Consumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_v2_consumer_wrapper.DeployVRFv2Consumer(auth, backend, common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFv2Consumer{ + client: e.client, + consumer: instance.(*vrf_v2_consumer_wrapper.VRFv2Consumer), + address: address, + }, err +} + +// DeployVRFv2LoadTestConsumer(coordinatorAddr string) (VRFv2Consumer, error) +func (e *EthereumContractDeployer) DeployVRFv2LoadTestConsumer(coordinatorAddr string) (VRFv2LoadTestConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFV2LoadTestWithMetrics", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_load_test_with_metrics.DeployVRFV2LoadTestWithMetrics(auth, backend, common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFv2LoadTestConsumer{ + client: e.client, + consumer: instance.(*vrf_load_test_with_metrics.VRFV2LoadTestWithMetrics), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFV2Wrapper(linkAddr string, linkEthFeedAddr string, coordinatorAddr string) (VRFV2Wrapper, error) { + address, _, instance, err := e.client.DeployContract("VRFV2Wrapper", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrfv2_wrapper.DeployVRFV2Wrapper(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(linkEthFeedAddr), common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFV2Wrapper{ + address: address, + client: e.client, + wrapper: instance.(*vrfv2_wrapper.VRFV2Wrapper), + }, err +} + +func (e *EthereumContractDeployer) DeployVRFV2WrapperLoadTestConsumer(linkAddr string, vrfV2WrapperAddr string) (VRFv2WrapperLoadTestConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFV2WrapperLoadTestConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrfv2_wrapper_load_test_consumer.DeployVRFV2WrapperLoadTestConsumer(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(vrfV2WrapperAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFV2WrapperLoadTestConsumer{ + address: address, + client: e.client, + consumer: instance.(*vrfv2_wrapper_load_test_consumer.VRFV2WrapperLoadTestConsumer), + }, err +} + +func (v *EthereumVRFCoordinatorV2) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFCoordinatorV2) HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + hash, err := v.coordinator.HashOfKey(opts, pubKey) + if err != nil { + return [32]byte{}, err + } + return hash, nil +} + +func (v *EthereumVRFCoordinatorV2) GetSubscription(ctx context.Context, subID uint64) (vrf_coordinator_v2.GetSubscription, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + subscription, err := v.coordinator.GetSubscription(opts, subID) + if err != nil { + return vrf_coordinator_v2.GetSubscription{}, err + } + return subscription, nil +} + +func (v *EthereumVRFCoordinatorV2) GetOwner(ctx context.Context) (common.Address, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + coordinatorOwnerAddress, err := v.coordinator.Owner(opts) + if err != nil { + return common.Address{}, err + } + return coordinatorOwnerAddress, nil +} + +func (v *EthereumVRFCoordinatorV2) GetRequestConfig(ctx context.Context) (GetRequestConfig, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + minConfirmations, maxGas, keyHashes, err := v.coordinator.GetRequestConfig(opts) + if err != nil { + return GetRequestConfig{}, err + } + requestConfig := GetRequestConfig{ + MinimumRequestConfirmations: minConfirmations, + MaxGasLimit: maxGas, + ProvingKeyHashes: keyHashes[0], + } + + return requestConfig, nil +} + +func (v *EthereumVRFCoordinatorV2) GetConfig(ctx context.Context) (vrf_coordinator_v2.GetConfig, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + config, err := v.coordinator.GetConfig(opts) + if err != nil { + return vrf_coordinator_v2.GetConfig{}, err + } + return config, nil +} + +func (v *EthereumVRFCoordinatorV2) GetFallbackWeiPerUnitLink(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + fallbackWeiPerUnitLink, err := v.coordinator.GetFallbackWeiPerUnitLink(opts) + if err != nil { + return nil, err + } + return fallbackWeiPerUnitLink, nil +} + +func (v *EthereumVRFCoordinatorV2) GetFeeConfig(ctx context.Context) (vrf_coordinator_v2.GetFeeConfig, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + config, err := v.coordinator.GetFeeConfig(opts) + if err != nil { + return vrf_coordinator_v2.GetFeeConfig{}, err + } + return config, nil +} + +func (v *EthereumVRFCoordinatorV2) SetConfig(minimumRequestConfirmations uint16, maxGasLimit uint32, stalenessSeconds uint32, gasAfterPaymentCalculation uint32, fallbackWeiPerUnitLink *big.Int, feeConfig vrf_coordinator_v2.VRFCoordinatorV2FeeConfig) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.SetConfig( + opts, + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + feeConfig, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) RegisterProvingKey( + oracleAddr string, + publicProvingKey [2]*big.Int, +) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterProvingKey(opts, common.HexToAddress(oracleAddr), publicProvingKey) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) TransferOwnership(to common.Address) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.TransferOwnership(opts, to) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) CreateSubscription() (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.CreateSubscription(opts) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) AddConsumer(subId uint64, consumerAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.AddConsumer( + opts, + subId, + common.HexToAddress(consumerAddress), + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) PendingRequestsExist(ctx context.Context, subID uint64) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + pendingRequestExists, err := v.coordinator.PendingRequestExists(opts, subID) + if err != nil { + return false, err + } + return pendingRequestExists, nil +} + +func (v *EthereumVRFCoordinatorV2) OracleWithdraw(recipient common.Address, amount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.OracleWithdraw(opts, recipient, amount) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// OwnerCancelSubscription cancels subscription, +// return funds to the subscription owner, +// down not check if pending requests for a sub exist, +// outstanding requests may fail onchain +func (v *EthereumVRFCoordinatorV2) OwnerCancelSubscription(subID uint64) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.OwnerCancelSubscription( + opts, + subID, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +// CancelSubscription cancels subscription by Sub owner, +// return funds to specified address, +// checks if pending requests for a sub exist +func (v *EthereumVRFCoordinatorV2) CancelSubscription(subID uint64, to common.Address) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.CancelSubscription( + opts, + subID, + to, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2) FindSubscriptionID(subID uint64) (uint64, error) { + owner := v.client.GetDefaultWallet().Address() + subscriptionIterator, err := v.coordinator.FilterSubscriptionCreated( + nil, + []uint64{subID}, + ) + if err != nil { + return 0, err + } + + if !subscriptionIterator.Next() { + return 0, fmt.Errorf("expected at least 1 subID for the given owner %s", owner) + } + + return subscriptionIterator.Event.SubId, nil +} + +func (v *EthereumVRFCoordinatorV2) WaitForRandomWordsFulfilledEvent(requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled, error) { + randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsFulfilled) + subscription, err := v.coordinator.WatchRandomWordsFulfilled(nil, randomWordsFulfilledEventsChannel, requestID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsFulfilled event") + case randomWordsFulfilledEvent := <-randomWordsFulfilledEventsChannel: + return randomWordsFulfilledEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []uint64, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested, error) { + randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2RandomWordsRequested) + subscription, err := v.coordinator.WatchRandomWordsRequested(nil, randomWordsFulfilledEventsChannel, keyHash, subID, sender) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsRequested event") + case randomWordsFulfilledEvent := <-randomWordsFulfilledEventsChannel: + return randomWordsFulfilledEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForSubscriptionFunded(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionFunded) + subscription, err := v.coordinator.WatchSubscriptionFunded(nil, eventsChannel, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionFunded event") + case event := <-eventsChannel: + return event, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForSubscriptionCanceledEvent(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCanceled) + subscription, err := v.coordinator.WatchSubscriptionCanceled(nil, eventsChannel, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionCanceled event") + case sub := <-eventsChannel: + return sub, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForSubscriptionCreatedEvent(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionCreated) + subscription, err := v.coordinator.WatchSubscriptionCreated(nil, eventsChannel, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionCreated event") + case event := <-eventsChannel: + return event, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForSubscriptionConsumerAdded(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerAdded) + subscription, err := v.coordinator.WatchSubscriptionConsumerAdded(nil, eventsChannel, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionConsumerAdded event") + case event := <-eventsChannel: + return event, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForSubscriptionConsumerRemoved(subID []uint64, timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2SubscriptionConsumerRemoved) + subscription, err := v.coordinator.WatchSubscriptionConsumerRemoved(nil, eventsChannel, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionConsumerRemoved event") + case event := <-eventsChannel: + return event, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2) WaitForConfigSetEvent(timeout time.Duration) (*vrf_coordinator_v2.VRFCoordinatorV2ConfigSet, error) { + eventsChannel := make(chan *vrf_coordinator_v2.VRFCoordinatorV2ConfigSet) + subscription, err := v.coordinator.WatchConfigSet(nil, eventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for ConfigSet event") + case event := <-eventsChannel: + return event, nil + } + } +} + +// GetAllRandomWords get all VRFv2 randomness output words +func (v *EthereumVRFConsumerV2) GetAllRandomWords(ctx context.Context, num int) ([]*big.Int, error) { + words := make([]*big.Int, 0) + for i := 0; i < num; i++ { + word, err := v.consumer.SRandomWords(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, big.NewInt(int64(i))) + if err != nil { + return nil, err + } + words = append(words, word) + } + return words, nil +} + +// LoadExistingConsumer loads an EthereumVRFConsumerV2 with a specified address +func (v *EthereumVRFConsumerV2) LoadExistingConsumer(address string, client blockchain.EVMClient) error { + a := common.HexToAddress(address) + consumer, err := vrf_consumer_v2.NewVRFConsumerV2(a, client.(*blockchain.EthereumClient).Client) + if err != nil { + return err + } + v.client = client + v.consumer = consumer + v.address = &a + return nil +} + +// CreateFundedSubscription create funded subscription for VRFv2 randomness +func (v *EthereumVRFConsumerV2) CreateFundedSubscription(funds *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.CreateSubscriptionAndFund(opts, funds) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +// TopUpSubscriptionFunds add funds to a VRFv2 subscription +func (v *EthereumVRFConsumerV2) TopUpSubscriptionFunds(funds *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.TopUpSubscription(opts, funds) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFConsumerV2) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFv2Consumer) Address() string { + return v.address.Hex() +} + +// CurrentSubscription get current VRFv2 subscription +func (v *EthereumVRFConsumerV2) CurrentSubscription() (uint64, error) { + return v.consumer.SSubId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) +} + +// GasAvailable get available gas after randomness fulfilled +func (v *EthereumVRFConsumerV2) GasAvailable() (*big.Int, error) { + return v.consumer.SGasAvailable(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) +} + +func (v *EthereumVRFConsumerV2) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(ethereum.CallMsg{ + To: v.address, + }) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +// RequestRandomness request VRFv2 random words +func (v *EthereumVRFConsumerV2) RequestRandomness(hash [32]byte, subID uint64, confs uint16, gasLimit uint32, numWords uint32) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.RequestRandomness(opts, hash, subID, confs, gasLimit, numWords) + if err != nil { + return err + } + log.Info().Interface("Sub ID", subID). + Interface("Number of Words", numWords). + Interface("Number of Confirmations", confs). + Interface("Callback Gas Limit", gasLimit). + Interface("KeyHash", hex.EncodeToString(hash[:])). + Interface("Consumer Contract", v.address). + Msg("RequestRandomness called") + return v.client.ProcessTransaction(tx) +} + +// RequestRandomness request VRFv2 random words +func (v *EthereumVRFv2Consumer) RequestRandomness(hash [32]byte, subID uint64, confs uint16, gasLimit uint32, numWords uint32) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.RequestRandomWords(opts, subID, gasLimit, confs, numWords, hash) + if err != nil { + return err + } + log.Info().Interface("Sub ID", subID). + Interface("Number of Words", numWords). + Interface("Number of Confirmations", confs). + Interface("Callback Gas Limit", gasLimit). + Interface("KeyHash", hex.EncodeToString(hash[:])). + Interface("Consumer Contract", v.address). + Msg("RequestRandomness called") + return v.client.ProcessTransaction(tx) +} + +// RandomnessOutput get VRFv2 randomness output (word) +func (v *EthereumVRFConsumerV2) RandomnessOutput(ctx context.Context, arg0 *big.Int) (*big.Int, error) { + return v.consumer.SRandomWords(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, arg0) +} + +func (v *EthereumVRFv2LoadTestConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFv2LoadTestConsumer) RequestRandomness( + keyHash [32]byte, + subID uint64, + requestConfirmations uint16, + callbackGasLimit uint32, + numWords uint32, + requestCount uint16, +) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + + tx, err := v.consumer.RequestRandomWords(opts, subID, requestConfirmations, keyHash, callbackGasLimit, numWords, requestCount) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFv2LoadTestConsumer) RequestRandomWordsWithForceFulfill( + keyHash [32]byte, + requestConfirmations uint16, + callbackGasLimit uint32, + numWords uint32, + requestCount uint16, + subTopUpAmount *big.Int, + linkAddress common.Address, +) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.consumer.RequestRandomWordsWithForceFulfill( + opts, + requestConfirmations, + keyHash, + callbackGasLimit, + numWords, + requestCount, + subTopUpAmount, + linkAddress, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFv2Consumer) GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_v2_consumer_wrapper.GetRequestStatus, error) { + return v.consumer.GetRequestStatus(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, requestID) +} + +func (v *EthereumVRFv2Consumer) GetLastRequestId(ctx context.Context) (*big.Int, error) { + return v.consumer.LastRequestId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFv2LoadTestConsumer) GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_load_test_with_metrics.GetRequestStatus, error) { + return v.consumer.GetRequestStatus(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, requestID) +} + +func (v *EthereumVRFv2LoadTestConsumer) GetLastRequestId(ctx context.Context) (*big.Int, error) { + return v.consumer.SLastRequestId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFv2LoadTestConsumer) ResetMetrics() error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.Reset(opts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFv2LoadTestConsumer) GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) { + requestCount, err := v.consumer.SRequestCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return &VRFLoadTestMetrics{}, err + } + fulfilmentCount, err := v.consumer.SResponseCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return &VRFLoadTestMetrics{}, err + } + averageFulfillmentInMillions, err := v.consumer.SAverageFulfillmentInMillions(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return &VRFLoadTestMetrics{}, err + } + slowestFulfillment, err := v.consumer.SSlowestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return &VRFLoadTestMetrics{}, err + } + fastestFulfillment, err := v.consumer.SFastestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return &VRFLoadTestMetrics{}, err + } + + return &VRFLoadTestMetrics{ + requestCount, + fulfilmentCount, + averageFulfillmentInMillions, + slowestFulfillment, + fastestFulfillment, + }, nil +} + +func (v *EthereumVRFV2Wrapper) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFV2Wrapper) SetConfig(wrapperGasOverhead uint32, coordinatorGasOverhead uint32, wrapperPremiumPercentage uint8, keyHash [32]byte, maxNumWords uint8) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.wrapper.SetConfig( + opts, + wrapperGasOverhead, + coordinatorGasOverhead, + wrapperPremiumPercentage, + keyHash, + maxNumWords, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFV2Wrapper) GetSubID(ctx context.Context) (uint64, error) { + return v.wrapper.SUBSCRIPTIONID(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(ethereum.CallMsg{}) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) RequestRandomness(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.consumer.MakeRequests(opts, callbackGasLimit, requestConfirmations, numWords, requestCount) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) GetRequestStatus(ctx context.Context, requestID *big.Int) (vrfv2_wrapper_load_test_consumer.GetRequestStatus, error) { + return v.consumer.GetRequestStatus(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, requestID) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) GetLastRequestId(ctx context.Context) (*big.Int, error) { + return v.consumer.SLastRequestId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) GetWrapper(ctx context.Context) (common.Address, error) { + return v.consumer.IVrfV2Wrapper(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2WrapperLoadTestConsumer) GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) { + requestCount, err := v.consumer.SRequestCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + fulfilmentCount, err := v.consumer.SResponseCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + averageFulfillmentInMillions, err := v.consumer.SAverageFulfillmentInMillions(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + slowestFulfillment, err := v.consumer.SSlowestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + fastestFulfillment, err := v.consumer.SFastestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + + return &VRFLoadTestMetrics{ + requestCount, + fulfilmentCount, + averageFulfillmentInMillions, + slowestFulfillment, + fastestFulfillment, + }, nil +} + +func (v *EthereumVRFOwner) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFOwner) SetAuthorizedSenders(senders []common.Address) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.vrfOwner.SetAuthorizedSenders( + opts, + senders, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFOwner) AcceptVRFOwnership() error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.vrfOwner.AcceptVRFOwnership(opts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFOwner) WaitForRandomWordsForcedEvent(requestIDs []*big.Int, subIds []uint64, senders []common.Address, timeout time.Duration) (*vrf_owner.VRFOwnerRandomWordsForced, error) { + eventsChannel := make(chan *vrf_owner.VRFOwnerRandomWordsForced) + subscription, err := v.vrfOwner.WatchRandomWordsForced(nil, eventsChannel, requestIDs, subIds, senders) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsForced event") + case event := <-eventsChannel: + return event, nil + } + } +} + +func (v *EthereumVRFCoordinatorTestV2) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFMockETHPLIFeed) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFMockETHPLIFeed) LatestRoundData() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.Ans, nil +} + +func (v *EthereumVRFMockETHPLIFeed) LatestRoundDataUpdatedAt() (*big.Int, error) { + data, err := v.feed.LatestRoundData(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: context.Background(), + }) + if err != nil { + return nil, err + } + return data.UpdatedAt, nil +} + +func (v *EthereumVRFMockETHPLIFeed) SetBlockTimestampDeduction(blockTimestampDeduction *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.feed.SetBlockTimestampDeduction(opts, blockTimestampDeduction) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} diff --git a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go new file mode 100644 index 00000000..f7cea3bc --- /dev/null +++ b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go @@ -0,0 +1,1045 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrf_v2plus_upgraded_version" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" +) + +type EthereumVRFCoordinatorV2_5 struct { + address *common.Address + client blockchain.EVMClient + coordinator vrf_coordinator_v2_5.VRFCoordinatorV25Interface +} + +type EthereumVRFCoordinatorV2PlusUpgradedVersion struct { + address *common.Address + client blockchain.EVMClient + coordinator *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersion +} + +// EthereumVRFv2PlusLoadTestConsumer represents VRFv2Plus consumer contract for performing Load Tests +type EthereumVRFv2PlusLoadTestConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *vrf_v2plus_load_test_with_metrics.VRFV2PlusLoadTestWithMetrics +} + +type EthereumVRFV2PlusWrapperLoadTestConsumer struct { + address *common.Address + client blockchain.EVMClient + consumer *vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumer +} + +type EthereumVRFV2PlusWrapper struct { + address *common.Address + client blockchain.EVMClient + wrapper *vrfv2plus_wrapper.VRFV2PlusWrapper +} + +// DeployVRFCoordinatorV2_5 deploys VRFV2_5 coordinator contract +func (e *EthereumContractDeployer) DeployVRFCoordinatorV2_5(bhsAddr string) (VRFCoordinatorV2_5, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinatorV2Plus", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_coordinator_v2_5.DeployVRFCoordinatorV25(auth, backend, common.HexToAddress(bhsAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV2_5{ + client: e.client, + coordinator: instance.(*vrf_coordinator_v2_5.VRFCoordinatorV25), + address: address, + }, err +} + +func (v *EthereumVRFCoordinatorV2_5) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFCoordinatorV2_5) HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + hash, err := v.coordinator.HashOfKey(opts, pubKey) + if err != nil { + return [32]byte{}, err + } + return hash, nil +} + +func (v *EthereumVRFCoordinatorV2_5) GetActiveSubscriptionIds(ctx context.Context, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + activeSubscriptionIds, err := v.coordinator.GetActiveSubscriptionIds(opts, startIndex, maxCount) + if err != nil { + return nil, err + } + return activeSubscriptionIds, nil +} + +func (v *EthereumVRFCoordinatorV2_5) PendingRequestsExist(ctx context.Context, subID *big.Int) (bool, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + pendingRequestExists, err := v.coordinator.PendingRequestExists(opts, subID) + if err != nil { + return false, err + } + return pendingRequestExists, nil +} + +func (v *EthereumVRFCoordinatorV2_5) GetSubscription(ctx context.Context, subID *big.Int) (vrf_coordinator_v2_5.GetSubscription, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + subscription, err := v.coordinator.GetSubscription(opts, subID) + if err != nil { + return vrf_coordinator_v2_5.GetSubscription{}, err + } + return subscription, nil +} + +func (v *EthereumVRFCoordinatorV2_5) GetLinkTotalBalance(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + totalBalance, err := v.coordinator.STotalBalance(opts) + if err != nil { + return nil, err + } + return totalBalance, nil +} +func (v *EthereumVRFCoordinatorV2_5) GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + totalBalance, err := v.coordinator.STotalNativeBalance(opts) + if err != nil { + return nil, err + } + return totalBalance, nil +} + +// OwnerCancelSubscription cancels subscription by Coordinator owner +// return funds to sub owner, +// does not check if pending requests for a sub exist +func (v *EthereumVRFCoordinatorV2_5) OwnerCancelSubscription(subID *big.Int) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.OwnerCancelSubscription( + opts, + subID, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +// CancelSubscription cancels subscription by Sub owner, +// return funds to specified address, +// checks if pending requests for a sub exist +func (v *EthereumVRFCoordinatorV2_5) CancelSubscription(subID *big.Int, to common.Address) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.CancelSubscription( + opts, + subID, + to, + ) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) Withdraw(recipient common.Address) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.Withdraw( + opts, + recipient, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) WithdrawNative(recipient common.Address) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.WithdrawNative( + opts, + recipient, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) SetConfig( + minimumRequestConfirmations uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPaymentCalculation uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeNativePPM uint32, + fulfillmentFlatFeeLinkDiscountPPM uint32, + nativePremiumPercentage uint8, + linkPremiumPercentage uint8) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.SetConfig( + opts, + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage, + linkPremiumPercentage, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) SetPLIAndPLINativeFeed(linkAddress string, linkNativeFeedAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.SetPLIAndPLINativeFeed( + opts, + common.HexToAddress(linkAddress), + common.HexToAddress(linkNativeFeedAddress), + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) RegisterProvingKey( + publicProvingKey [2]*big.Int, + gasLaneMaxGas uint64, +) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterProvingKey(opts, publicProvingKey, gasLaneMaxGas) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) CreateSubscription() (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.coordinator.CreateSubscription(opts) + if err != nil { + return nil, err + } + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) Migrate(subId *big.Int, coordinatorAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.Migrate(opts, subId, common.HexToAddress(coordinatorAddress)) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) RegisterMigratableCoordinator(migratableCoordinatorAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterMigratableCoordinator(opts, common.HexToAddress(migratableCoordinatorAddress)) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) AddConsumer(subId *big.Int, consumerAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.AddConsumer( + opts, + subId, + common.HexToAddress(consumerAddress), + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + opts.Value = nativeTokenAmount + tx, err := v.coordinator.FundSubscriptionWithNative( + opts, + subId, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2_5) FindSubscriptionID(subID *big.Int) (*big.Int, error) { + owner := v.client.GetDefaultWallet().Address() + subscriptionIterator, err := v.coordinator.FilterSubscriptionCreated( + nil, + []*big.Int{subID}, + ) + if err != nil { + return nil, err + } + + if !subscriptionIterator.Next() { + return nil, fmt.Errorf("expected at least 1 subID for the given owner %s", owner) + } + + return subscriptionIterator.Event.SubId, nil +} + +func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCreatedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated, error) { + eventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCreated) + subscription, err := v.coordinator.WatchSubscriptionCreated(nil, eventsChannel, nil) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionCreated event") + case sub := <-eventsChannel: + return sub, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2_5) WaitForSubscriptionCanceledEvent(subID *big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled, error) { + eventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25SubscriptionCanceled) + subscription, err := v.coordinator.WatchSubscriptionCanceled(nil, eventsChannel, []*big.Int{subID}) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for SubscriptionCanceled event") + case sub := <-eventsChannel: + return sub, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2_5) WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled, error) { + randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsFulfilled) + subscription, err := v.coordinator.WatchRandomWordsFulfilled(nil, randomWordsFulfilledEventsChannel, requestID, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsFulfilled event") + case randomWordsFulfilledEvent := <-randomWordsFulfilledEventsChannel: + return randomWordsFulfilledEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2_5) WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested, error) { + randomWordsFulfilledEventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25RandomWordsRequested) + subscription, err := v.coordinator.WatchRandomWordsRequested(nil, randomWordsFulfilledEventsChannel, keyHash, subID, sender) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsRequested event") + case randomWordsFulfilledEvent := <-randomWordsFulfilledEventsChannel: + return randomWordsFulfilledEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2_5) WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted, error) { + eventsChannel := make(chan *vrf_coordinator_v2_5.VRFCoordinatorV25MigrationCompleted) + subscription, err := v.coordinator.WatchMigrationCompleted(nil, eventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for MigrationCompleted event") + case migrationCompletedEvent := <-eventsChannel: + return migrationCompletedEvent, nil + } + } +} + +func (v *EthereumVRFv2PlusLoadTestConsumer) Address() string { + return v.address.Hex() +} +func (v *EthereumVRFv2PlusLoadTestConsumer) RequestRandomness(keyHash [32]byte, subID *big.Int, requestConfirmations uint16, callbackGasLimit uint32, nativePayment bool, numWords uint32, requestCount uint16) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.consumer.RequestRandomWords(opts, subID, requestConfirmations, keyHash, callbackGasLimit, nativePayment, numWords, requestCount) + if err != nil { + return nil, err + } + + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFv2PlusLoadTestConsumer) ResetMetrics() error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.consumer.Reset(opts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFv2PlusLoadTestConsumer) GetCoordinator(ctx context.Context) (common.Address, error) { + return v.consumer.SVrfCoordinator(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} +func (v *EthereumVRFv2PlusLoadTestConsumer) GetRequestStatus(ctx context.Context, requestID *big.Int) (vrf_v2plus_load_test_with_metrics.GetRequestStatus, error) { + return v.consumer.GetRequestStatus(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, requestID) +} + +func (v *EthereumVRFv2PlusLoadTestConsumer) GetLastRequestId(ctx context.Context) (*big.Int, error) { + return v.consumer.SLastRequestId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFv2PlusLoadTestConsumer) GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) { + requestCount, err := v.consumer.SRequestCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + fulfilmentCount, err := v.consumer.SResponseCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + averageFulfillmentInMillions, err := v.consumer.SAverageFulfillmentInMillions(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + slowestFulfillment, err := v.consumer.SSlowestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + fastestFulfillment, err := v.consumer.SFastestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + + return &VRFLoadTestMetrics{ + requestCount, + fulfilmentCount, + averageFulfillmentInMillions, + slowestFulfillment, + fastestFulfillment, + }, nil +} + +func (e *EthereumContractDeployer) DeployVRFCoordinatorV2PlusUpgradedVersion(bhsAddr string) (VRFCoordinatorV2PlusUpgradedVersion, error) { + address, _, instance, err := e.client.DeployContract("VRFCoordinatorV2PlusUpgradedVersion", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_v2plus_upgraded_version.DeployVRFCoordinatorV2PlusUpgradedVersion(auth, backend, common.HexToAddress(bhsAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFCoordinatorV2PlusUpgradedVersion{ + client: e.client, + coordinator: instance.(*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersion), + address: address, + }, err +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) HashOfKey(ctx context.Context, pubKey [2]*big.Int) ([32]byte, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + hash, err := v.coordinator.HashOfKey(opts, pubKey) + if err != nil { + return [32]byte{}, err + } + return hash, nil +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) GetActiveSubscriptionIds(ctx context.Context, startIndex *big.Int, maxCount *big.Int) ([]*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + activeSubscriptionIds, err := v.coordinator.GetActiveSubscriptionIds(opts, startIndex, maxCount) + if err != nil { + return nil, err + } + return activeSubscriptionIds, nil +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) GetSubscription(ctx context.Context, subID *big.Int) (vrf_v2plus_upgraded_version.GetSubscription, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + subscription, err := v.coordinator.GetSubscription(opts, subID) + if err != nil { + return vrf_v2plus_upgraded_version.GetSubscription{}, err + } + return subscription, nil +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) SetConfig( + minimumRequestConfirmations uint16, + maxGasLimit uint32, + stalenessSeconds uint32, + gasAfterPaymentCalculation uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeNativePPM uint32, + fulfillmentFlatFeeLinkDiscountPPM uint32, + nativePremiumPercentage uint8, + linkPremiumPercentage uint8) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.SetConfig( + opts, + minimumRequestConfirmations, + maxGasLimit, + stalenessSeconds, + gasAfterPaymentCalculation, + fallbackWeiPerUnitLink, + fulfillmentFlatFeeNativePPM, + fulfillmentFlatFeeLinkDiscountPPM, + nativePremiumPercentage, + linkPremiumPercentage, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) SetPLIAndPLINativeFeed(linkAddress string, linkNativeFeedAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.SetPLIAndPLINativeFeed( + opts, + common.HexToAddress(linkAddress), + common.HexToAddress(linkNativeFeedAddress), + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) RegisterProvingKey( + publicProvingKey [2]*big.Int, +) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterProvingKey(opts, publicProvingKey) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) CreateSubscription() error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.CreateSubscription(opts) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) GetLinkTotalBalance(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + totalBalance, err := v.coordinator.STotalBalance(opts) + if err != nil { + return nil, err + } + return totalBalance, nil +} +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) GetNativeTokenTotalBalance(ctx context.Context) (*big.Int, error) { + opts := &bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + } + totalBalance, err := v.coordinator.STotalNativeBalance(opts) + if err != nil { + return nil, err + } + return totalBalance, nil +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) Migrate(subId *big.Int, coordinatorAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.Migrate(opts, subId, common.HexToAddress(coordinatorAddress)) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) RegisterMigratableCoordinator(migratableCoordinatorAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.RegisterMigratableCoordinator(opts, common.HexToAddress(migratableCoordinatorAddress)) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) AddConsumer(subId *big.Int, consumerAddress string) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.coordinator.AddConsumer( + opts, + subId, + common.HexToAddress(consumerAddress), + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) FundSubscriptionWithNative(subId *big.Int, nativeTokenAmount *big.Int) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + opts.Value = nativeTokenAmount + tx, err := v.coordinator.FundSubscriptionWithNative( + opts, + subId, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) FindSubscriptionID() (*big.Int, error) { + owner := v.client.GetDefaultWallet().Address() + subscriptionIterator, err := v.coordinator.FilterSubscriptionCreated( + nil, + nil, + ) + if err != nil { + return nil, err + } + + if !subscriptionIterator.Next() { + return nil, fmt.Errorf("expected at least 1 subID for the given owner %s", owner) + } + + return subscriptionIterator.Event.SubId, nil +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) WaitForRandomWordsFulfilledEvent(subID []*big.Int, requestID []*big.Int, timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled, error) { + randomWordsFulfilledEventsChannel := make(chan *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsFulfilled) + subscription, err := v.coordinator.WatchRandomWordsFulfilled(nil, randomWordsFulfilledEventsChannel, requestID, subID) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsFulfilled event") + case randomWordsFulfilledEvent := <-randomWordsFulfilledEventsChannel: + return randomWordsFulfilledEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) WaitForMigrationCompletedEvent(timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted, error) { + eventsChannel := make(chan *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionMigrationCompleted) + subscription, err := v.coordinator.WatchMigrationCompleted(nil, eventsChannel) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for MigrationCompleted event") + case migrationCompletedEvent := <-eventsChannel: + return migrationCompletedEvent, nil + } + } +} + +func (v *EthereumVRFCoordinatorV2PlusUpgradedVersion) WaitForRandomWordsRequestedEvent(keyHash [][32]byte, subID []*big.Int, sender []common.Address, timeout time.Duration) (*vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested, error) { + eventsChannel := make(chan *vrf_v2plus_upgraded_version.VRFCoordinatorV2PlusUpgradedVersionRandomWordsRequested) + subscription, err := v.coordinator.WatchRandomWordsRequested(nil, eventsChannel, keyHash, subID, sender) + if err != nil { + return nil, err + } + defer subscription.Unsubscribe() + + for { + select { + case err := <-subscription.Err(): + return nil, err + case <-time.After(timeout): + return nil, fmt.Errorf("timeout waiting for RandomWordsRequested event") + case randomWordsRequestedEvent := <-eventsChannel: + return randomWordsRequestedEvent, nil + } + } +} + +func (e *EthereumContractDeployer) DeployVRFv2PlusLoadTestConsumer(coordinatorAddr string) (VRFv2PlusLoadTestConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFV2PlusLoadTestWithMetrics", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrf_v2plus_load_test_with_metrics.DeployVRFV2PlusLoadTestWithMetrics(auth, backend, common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFv2PlusLoadTestConsumer{ + client: e.client, + consumer: instance.(*vrf_v2plus_load_test_with_metrics.VRFV2PlusLoadTestWithMetrics), + address: address, + }, err +} + +func (e *EthereumContractDeployer) DeployVRFV2PlusWrapper(linkAddr string, linkEthFeedAddr string, coordinatorAddr string) (VRFV2PlusWrapper, error) { + address, _, instance, err := e.client.DeployContract("VRFV2PlusWrapper", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrfv2plus_wrapper.DeployVRFV2PlusWrapper(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(linkEthFeedAddr), common.HexToAddress(coordinatorAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFV2PlusWrapper{ + client: e.client, + wrapper: instance.(*vrfv2plus_wrapper.VRFV2PlusWrapper), + address: address, + }, err +} + +func (v *EthereumVRFV2PlusWrapper) Address() string { + return v.address.Hex() +} + +func (e *EthereumContractDeployer) DeployVRFV2PlusWrapperLoadTestConsumer(linkAddr string, vrfV2PlusWrapperAddr string) (VRFv2PlusWrapperLoadTestConsumer, error) { + address, _, instance, err := e.client.DeployContract("VRFV2PlusWrapperLoadTestConsumer", func( + auth *bind.TransactOpts, + backend bind.ContractBackend, + ) (common.Address, *types.Transaction, interface{}, error) { + return vrfv2plus_wrapper_load_test_consumer.DeployVRFV2PlusWrapperLoadTestConsumer(auth, backend, common.HexToAddress(linkAddr), common.HexToAddress(vrfV2PlusWrapperAddr)) + }) + if err != nil { + return nil, err + } + return &EthereumVRFV2PlusWrapperLoadTestConsumer{ + client: e.client, + consumer: instance.(*vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumer), + address: address, + }, err +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) Address() string { + return v.address.Hex() +} + +func (v *EthereumVRFV2PlusWrapper) SetConfig(wrapperGasOverhead uint32, + coordinatorGasOverhead uint32, + wrapperPremiumPercentage uint8, + keyHash [32]byte, + maxNumWords uint8, + stalenessSeconds uint32, + fallbackWeiPerUnitLink *big.Int, + fulfillmentFlatFeeLinkPPM uint32, + fulfillmentFlatFeeNativePPM uint32, +) error { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return err + } + tx, err := v.wrapper.SetConfig( + opts, + wrapperGasOverhead, + coordinatorGasOverhead, + wrapperPremiumPercentage, + keyHash, + maxNumWords, + stalenessSeconds, + fallbackWeiPerUnitLink, + fulfillmentFlatFeeLinkPPM, + fulfillmentFlatFeeNativePPM, + ) + if err != nil { + return err + } + return v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFV2PlusWrapper) GetSubID(ctx context.Context) (*big.Int, error) { + return v.wrapper.SUBSCRIPTIONID(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) Fund(ethAmount *big.Float) error { + gasEstimates, err := v.client.EstimateGas(ethereum.CallMsg{ + To: v.address, + }) + if err != nil { + return err + } + return v.client.Fund(v.address.Hex(), ethAmount, gasEstimates) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) RequestRandomness(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.consumer.MakeRequests(opts, callbackGasLimit, requestConfirmations, numWords, requestCount) + if err != nil { + return nil, err + } + + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) RequestRandomnessNative(requestConfirmations uint16, callbackGasLimit uint32, numWords uint32, requestCount uint16) (*types.Transaction, error) { + opts, err := v.client.TransactionOpts(v.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := v.consumer.MakeRequestsNative(opts, callbackGasLimit, requestConfirmations, numWords, requestCount) + if err != nil { + return nil, err + } + + return tx, v.client.ProcessTransaction(tx) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) GetRequestStatus(ctx context.Context, requestID *big.Int) (vrfv2plus_wrapper_load_test_consumer.GetRequestStatus, error) { + return v.consumer.GetRequestStatus(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }, requestID) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) GetLastRequestId(ctx context.Context) (*big.Int, error) { + return v.consumer.SLastRequestId(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) GetWrapper(ctx context.Context) (common.Address, error) { + return v.consumer.IVrfV2PlusWrapper(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) +} + +func (v *EthereumVRFV2PlusWrapperLoadTestConsumer) GetLoadTestMetrics(ctx context.Context) (*VRFLoadTestMetrics, error) { + requestCount, err := v.consumer.SRequestCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + fulfilmentCount, err := v.consumer.SResponseCount(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + averageFulfillmentInMillions, err := v.consumer.SAverageFulfillmentInMillions(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + slowestFulfillment, err := v.consumer.SSlowestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + + if err != nil { + return nil, err + } + fastestFulfillment, err := v.consumer.SFastestFulfillment(&bind.CallOpts{ + From: common.HexToAddress(v.client.GetDefaultWallet().Address()), + Context: ctx, + }) + if err != nil { + return nil, err + } + + return &VRFLoadTestMetrics{ + requestCount, + fulfilmentCount, + averageFulfillmentInMillions, + slowestFulfillment, + fastestFulfillment, + }, nil +} diff --git a/integration-tests/contracts/multicall.go b/integration-tests/contracts/multicall.go new file mode 100644 index 00000000..6b3934b6 --- /dev/null +++ b/integration-tests/contracts/multicall.go @@ -0,0 +1,95 @@ +package contracts + +import ( + "context" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" +) + +const ( + MultiCallABI = "[{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes[]\",\"name\":\"returnData\",\"type\":\"bytes[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowFailure\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call3[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate3\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"allowFailure\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call3Value[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"aggregate3Value\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"blockAndAggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBasefee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"basefee\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"name\":\"getBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getBlockNumber\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getChainId\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"chainid\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockCoinbase\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"coinbase\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockDifficulty\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"difficulty\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"gaslimit\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCurrentBlockTimestamp\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"getEthBalance\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLastBlockHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"requireSuccess\",\"type\":\"bool\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"tryAggregate\",\"outputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"requireSuccess\",\"type\":\"bool\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"callData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Call[]\",\"name\":\"calls\",\"type\":\"tuple[]\"}],\"name\":\"tryBlockAndAggregate\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"components\":[{\"internalType\":\"bool\",\"name\":\"success\",\"type\":\"bool\"},{\"internalType\":\"bytes\",\"name\":\"returnData\",\"type\":\"bytes\"}],\"internalType\":\"struct Multicall3.Result[]\",\"name\":\"returnData\",\"type\":\"tuple[]\"}],\"stateMutability\":\"payable\",\"type\":\"function\"}]" + MultiCallBIN = "0x608060405234801561001057600080fd5b50610ee0806100206000396000f3fe6080604052600436106100f35760003560e01c80634d2301cc1161008a578063a8b0574e11610059578063a8b0574e1461025a578063bce38bd714610275578063c3077fa914610288578063ee82ac5e1461029b57600080fd5b80634d2301cc146101ec57806372425d9d1461022157806382ad56cb1461023457806386d516e81461024757600080fd5b80633408e470116100c65780633408e47014610191578063399542e9146101a45780633e64a696146101c657806342cbb15c146101d957600080fd5b80630f28c97d146100f8578063174dea711461011a578063252dba421461013a57806327e86d6e1461015b575b600080fd5b34801561010457600080fd5b50425b6040519081526020015b60405180910390f35b61012d610128366004610a85565b6102ba565b6040516101119190610bbe565b61014d610148366004610a85565b6104ef565b604051610111929190610bd8565b34801561016757600080fd5b50437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0140610107565b34801561019d57600080fd5b5046610107565b6101b76101b2366004610c60565b610690565b60405161011193929190610cba565b3480156101d257600080fd5b5048610107565b3480156101e557600080fd5b5043610107565b3480156101f857600080fd5b50610107610207366004610ce2565b73ffffffffffffffffffffffffffffffffffffffff163190565b34801561022d57600080fd5b5044610107565b61012d610242366004610a85565b6106ab565b34801561025357600080fd5b5045610107565b34801561026657600080fd5b50604051418152602001610111565b61012d610283366004610c60565b61085a565b6101b7610296366004610a85565b610a1a565b3480156102a757600080fd5b506101076102b6366004610d18565b4090565b60606000828067ffffffffffffffff8111156102d8576102d8610d31565b60405190808252806020026020018201604052801561031e57816020015b6040805180820190915260008152606060208201528152602001906001900390816102f65790505b5092503660005b8281101561047757600085828151811061034157610341610d60565b6020026020010151905087878381811061035d5761035d610d60565b905060200281019061036f9190610d8f565b6040810135958601959093506103886020850185610ce2565b73ffffffffffffffffffffffffffffffffffffffff16816103ac6060870187610dcd565b6040516103ba929190610e32565b60006040518083038185875af1925050503d80600081146103f7576040519150601f19603f3d011682016040523d82523d6000602084013e6103fc565b606091505b50602080850191909152901515808452908501351761046d577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260846000fd5b5050600101610325565b508234146104e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d756c746963616c6c333a2076616c7565206d69736d6174636800000000000060448201526064015b60405180910390fd5b50505092915050565b436060828067ffffffffffffffff81111561050c5761050c610d31565b60405190808252806020026020018201604052801561053f57816020015b606081526020019060019003908161052a5790505b5091503660005b8281101561068657600087878381811061056257610562610d60565b90506020028101906105749190610e42565b92506105836020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166105a66020850185610dcd565b6040516105b4929190610e32565b6000604051808303816000865af19150503d80600081146105f1576040519150601f19603f3d011682016040523d82523d6000602084013e6105f6565b606091505b5086848151811061060957610609610d60565b602090810291909101015290508061067d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b50600101610546565b5050509250929050565b43804060606106a086868661085a565b905093509350939050565b6060818067ffffffffffffffff8111156106c7576106c7610d31565b60405190808252806020026020018201604052801561070d57816020015b6040805180820190915260008152606060208201528152602001906001900390816106e55790505b5091503660005b828110156104e657600084828151811061073057610730610d60565b6020026020010151905086868381811061074c5761074c610d60565b905060200281019061075e9190610e76565b925061076d6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166107906040850185610dcd565b60405161079e929190610e32565b6000604051808303816000865af19150503d80600081146107db576040519150601f19603f3d011682016040523d82523d6000602084013e6107e0565b606091505b506020808401919091529015158083529084013517610851577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260646000fd5b50600101610714565b6060818067ffffffffffffffff81111561087657610876610d31565b6040519080825280602002602001820160405280156108bc57816020015b6040805180820190915260008152606060208201528152602001906001900390816108945790505b5091503660005b82811015610a105760008482815181106108df576108df610d60565b602002602001015190508686838181106108fb576108fb610d60565b905060200281019061090d9190610e42565b925061091c6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff1661093f6020850185610dcd565b60405161094d929190610e32565b6000604051808303816000865af19150503d806000811461098a576040519150601f19603f3d011682016040523d82523d6000602084013e61098f565b606091505b506020830152151581528715610a07578051610a07576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b506001016108c3565b5050509392505050565b6000806060610a2b60018686610690565b919790965090945092505050565b60008083601f840112610a4b57600080fd5b50813567ffffffffffffffff811115610a6357600080fd5b6020830191508360208260051b8501011115610a7e57600080fd5b9250929050565b60008060208385031215610a9857600080fd5b823567ffffffffffffffff811115610aaf57600080fd5b610abb85828601610a39565b90969095509350505050565b6000815180845260005b81811015610aed57602081850181015186830182015201610ad1565b81811115610aff576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600082825180855260208086019550808260051b84010181860160005b84811015610bb1578583037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001895281518051151584528401516040858501819052610b9d81860183610ac7565b9a86019a9450505090830190600101610b4f565b5090979650505050505050565b602081526000610bd16020830184610b32565b9392505050565b600060408201848352602060408185015281855180845260608601915060608160051b870101935082870160005b82811015610c52577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0888703018452610c40868351610ac7565b95509284019290840190600101610c06565b509398975050505050505050565b600080600060408486031215610c7557600080fd5b83358015158114610c8557600080fd5b9250602084013567ffffffffffffffff811115610ca157600080fd5b610cad86828701610a39565b9497909650939450505050565b838152826020820152606060408201526000610cd96060830184610b32565b95945050505050565b600060208284031215610cf457600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610bd157600080fd5b600060208284031215610d2a57600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81833603018112610dc357600080fd5b9190910192915050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610e0257600080fd5b83018035915067ffffffffffffffff821115610e1d57600080fd5b602001915036819003821315610a7e57600080fd5b8183823760009101908152919050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610dc357600080fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1833603018112610dc357600080fdfea2646970667358221220bb2b5c71a328032f97c676ae39a1ec2148d3e5d6f73d95e9b17910152d61f16264736f6c634300080c0033" +) + +type CallWithValue struct { + Target common.Address + AllowFailure bool + Value *big.Int + CallData []byte +} + +type Call struct { + Target common.Address + AllowFailure bool + CallData []byte +} + +type Result struct { + Success bool + ReturnData []byte +} + +func WaitForSuccessfulTxMined(evmClient blockchain.EVMClient, tx *types.Transaction) error { + log.Debug().Str("tx", tx.Hash().Hex()).Msg("waiting for tx to be mined") + receipt, err := bind.WaitMined(context.Background(), evmClient.DeployBackend(), tx) + if err != nil { + return err + } + if receipt.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("tx failed %s", tx.Hash().Hex()) + } + log.Debug().Str("tx", tx.Hash().Hex()).Str("Network", evmClient.GetNetworkName()).Msg("tx mined successfully") + return nil +} + +func MultiCallLogTriggerLoadGen( + evmClient blockchain.EVMClient, + multiCallAddress string, + logTriggerAddress []string, + logTriggerData [][]byte, +) (*types.Transaction, error) { + + contractAddress := common.HexToAddress(multiCallAddress) + multiCallABI, err := abi.JSON(strings.NewReader(MultiCallABI)) + if err != nil { + return nil, err + } + boundContract := bind.NewBoundContract(contractAddress, multiCallABI, evmClient.Backend(), evmClient.Backend(), evmClient.Backend()) + + var call []Call + for i, d := range logTriggerData { + data := Call{Target: common.HexToAddress(logTriggerAddress[i]), AllowFailure: false, CallData: d} + call = append(call, data) + } + + opts, err := evmClient.TransactionOpts(evmClient.GetDefaultWallet()) + if err != nil { + return nil, err + } + + // call aggregate3 to group all msg call data and send them in a single transaction + tx, err := boundContract.Transact(opts, "aggregate3", call) + if err != nil { + return nil, err + } + err = evmClient.MarkTxAsSentOnL2(tx) + if err != nil { + return nil, err + } + err = WaitForSuccessfulTxMined(evmClient, tx) + if err != nil { + return nil, errors.Wrapf(err, "multicall failed for log trigger load gen; multicall %s", contractAddress.Hex()) + } + return tx, nil + +} diff --git a/integration-tests/contracts/test_contracts.go b/integration-tests/contracts/test_contracts.go new file mode 100644 index 00000000..23453123 --- /dev/null +++ b/integration-tests/contracts/test_contracts.go @@ -0,0 +1,92 @@ +package contracts + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + + le "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" +) + +type LogEmitterContract struct { + address common.Address + client blockchain.EVMClient + instance *le.LogEmitter + l zerolog.Logger +} + +func (e *LogEmitterContract) Address() common.Address { + return e.address +} + +func (e *LogEmitterContract) EmitLogInts(ints []int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.instance.EmitLog1(opts, bigInts) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogIntsIndexed(ints []int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + bigInts := make([]*big.Int, len(ints)) + for i, v := range ints { + bigInts[i] = big.NewInt(int64(v)) + } + tx, err := e.instance.EmitLog2(opts, bigInts) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogIntMultiIndexed(ints int, ints2 int, count int) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.EmitLog4(opts, big.NewInt(int64(ints)), big.NewInt(int64(ints2)), big.NewInt(int64(count))) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogStrings(strings []string) (*types.Transaction, error) { + opts, err := e.client.TransactionOpts(e.client.GetDefaultWallet()) + if err != nil { + return nil, err + } + tx, err := e.instance.EmitLog3(opts, strings) + if err != nil { + return nil, err + } + return tx, e.client.ProcessTransaction(tx) +} + +func (e *LogEmitterContract) EmitLogInt(payload int) (*types.Transaction, error) { + return e.EmitLogInts([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogIntIndexed(payload int) (*types.Transaction, error) { + return e.EmitLogIntsIndexed([]int{payload}) +} + +func (e *LogEmitterContract) EmitLogString(strings string) (*types.Transaction, error) { + return e.EmitLogStrings([]string{strings}) +} diff --git a/integration-tests/docker/README.md b/integration-tests/docker/README.md new file mode 100644 index 00000000..dce4ef6c --- /dev/null +++ b/integration-tests/docker/README.md @@ -0,0 +1,21 @@ +## Docker environment +This folder contains Plugin cluster environment created with `testcontainers-go` + +### CLI for Local Testing Environment + +The command-line interface (CLI) located at `./integration-tests/docker/cmd/test_env.go` can be utilized to initiate a local testing environment. It is intended to replace Docker Compose in the near future. + + +Example: +``` +# Set required envs +export PLUGIN_IMAGE="" +export PLUGIN_VERSION="" +# Stream logs to Loki +export LOKI_TOKEN=... +export LOKI_URL=https://${loki_host}/loki/api/v1/push + +cd ./integration-tests/docker/cmd + +go run test_env.go start-env cl-cluster +``` \ No newline at end of file diff --git a/integration-tests/docker/cmd/test_env.go b/integration-tests/docker/cmd/test_env.go new file mode 100644 index 00000000..43ff02eb --- /dev/null +++ b/integration-tests/docker/cmd/test_env.go @@ -0,0 +1,78 @@ +package main + +import ( + "io" + defaultlog "log" + "os" + "os/signal" + "syscall" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "github.com/testcontainers/testcontainers-go" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "test_env", + Short: "CL cluster docker test env management tool", + } + + startEnvCmd := &cobra.Command{ + Use: "start-env", + Short: "Start new docker test env", + } + rootCmd.AddCommand(startEnvCmd) + + startFullEnvCmd := &cobra.Command{ + Use: "cl-cluster", + Short: "Basic CL cluster", + RunE: func(cmd *cobra.Command, args []string) error { + log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL") + log.Info().Msg("Starting CL cluster test environment..") + + _, err := test_env.NewCLTestEnvBuilder(). + WithGeth(). + WithMockAdapter(). + WithCLNodes(6). + Build() + if err != nil { + return err + } + + log.Info().Msg("Cl cluster is ready") + + handleExitSignal() + + return nil + }, + } + + startEnvCmd.AddCommand(startFullEnvCmd) + + // Set default log level for non-testcontainer code + zerolog.SetGlobalLevel(zerolog.InfoLevel) + + // Discard testcontainers logs + testcontainers.Logger = defaultlog.New(io.Discard, "", defaultlog.LstdFlags) + + if err := rootCmd.Execute(); err != nil { + log.Error().Err(err).Msg("Error") + os.Exit(1) + } +} + +func handleExitSignal() { + // Create a channel to receive exit signals + exitChan := make(chan os.Signal, 1) + signal.Notify(exitChan, os.Interrupt, syscall.SIGTERM) + + log.Info().Msg("Press Ctrl+C to destroy the test environment") + + // Block until an exit signal is received + <-exitChan +} diff --git a/integration-tests/docker/test_env/cl_node.go b/integration-tests/docker/test_env/cl_node.go new file mode 100644 index 00000000..9f7aeafd --- /dev/null +++ b/integration-tests/docker/test_env/cl_node.go @@ -0,0 +1,480 @@ +package test_env + +import ( + "context" + "fmt" + "io" + "maps" + "math/big" + "net/url" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + tc "github.com/testcontainers/testcontainers-go" + tcwait "github.com/testcontainers/testcontainers-go/wait" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/docker" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/logstream" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/chaintype" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + it_utils "github.com/goplugin/pluginv3.0/integration-tests/utils" + "github.com/goplugin/pluginv3.0/integration-tests/utils/templates" +) + +var ( + ErrConnectNodeClient = "could not connect Node HTTP Client" + ErrStartCLNodeContainer = "failed to start CL node container" +) + +type ClNode struct { + test_env.EnvComponent + API *client.PluginClient `json:"-"` + NodeConfig *plugin.Config `json:"-"` + NodeSecretsConfigTOML string `json:"-"` + PostgresDb *test_env.PostgresDb `json:"postgresDb"` + UserEmail string `json:"userEmail"` + UserPassword string `json:"userPassword"` + AlwaysPullImage bool `json:"-"` + t *testing.T + l zerolog.Logger +} + +type ClNodeOption = func(c *ClNode) + +func WithSecrets(secretsTOML string) ClNodeOption { + return func(c *ClNode) { + c.NodeSecretsConfigTOML = secretsTOML + } +} + +func WithNodeEnvVars(ev map[string]string) ClNodeOption { + return func(n *ClNode) { + if n.ContainerEnvs == nil { + n.ContainerEnvs = map[string]string{} + } + maps.Copy(n.ContainerEnvs, ev) + } +} + +// Sets custom node container name if name is not empty +func WithNodeContainerName(name string) ClNodeOption { + return func(c *ClNode) { + if name != "" { + c.ContainerName = name + } + } +} + +// Sets custom node db container name if name is not empty +func WithDbContainerName(name string) ClNodeOption { + return func(c *ClNode) { + if name != "" { + c.PostgresDb.ContainerName = name + } + } +} + +func WithLogStream(ls *logstream.LogStream) ClNodeOption { + return func(c *ClNode) { + c.LogStream = ls + } +} + +func WithImage(image string) ClNodeOption { + return func(c *ClNode) { + c.ContainerImage = image + } +} + +func WithVersion(version string) ClNodeOption { + return func(c *ClNode) { + c.ContainerVersion = version + } +} + +func WithPgDBOptions(opts ...test_env.PostgresDbOption) ClNodeOption { + return func(c *ClNode) { + var err error + c.PostgresDb, err = test_env.NewPostgresDb(c.EnvComponent.Networks, opts...) + if err != nil { + c.t.Fatalf("failed to create postgres db: %v", err) + } + } +} + +func NewClNode(networks []string, imageName, imageVersion string, nodeConfig *plugin.Config, opts ...ClNodeOption) (*ClNode, error) { + nodeDefaultCName := fmt.Sprintf("%s-%s", "cl-node", uuid.NewString()[0:8]) + pgDefaultCName := fmt.Sprintf("pg-%s", nodeDefaultCName) + pgDb, err := test_env.NewPostgresDb(networks, test_env.WithPostgresDbContainerName(pgDefaultCName)) + if err != nil { + return nil, err + } + n := &ClNode{ + EnvComponent: test_env.EnvComponent{ + ContainerName: nodeDefaultCName, + ContainerImage: imageName, + ContainerVersion: imageVersion, + Networks: networks, + }, + UserEmail: "local@local.com", + UserPassword: "localdevpassword", + NodeConfig: nodeConfig, + PostgresDb: pgDb, + l: log.Logger, + } + n.SetDefaultHooks() + for _, opt := range opts { + opt(n) + } + return n, nil +} + +func (n *ClNode) SetTestLogger(t *testing.T) { + n.l = logging.GetTestLogger(t) + n.t = t + n.PostgresDb.WithTestInstance(t) +} + +// Restart restarts only CL node, DB container is reused +func (n *ClNode) Restart(cfg *plugin.Config) error { + if err := n.Container.Terminate(testcontext.Get(n.t)); err != nil { + return err + } + n.NodeConfig = cfg + return n.StartContainer() +} + +// UpgradeVersion restarts the cl node with new image and version +func (n *ClNode) UpgradeVersion(newImage, newVersion string) error { + n.l.Info(). + Str("Name", n.ContainerName). + Str("Old Image", newImage). + Str("Old Version", newVersion). + Str("New Image", newImage). + Str("New Version", newVersion). + Msg("Upgrading Plugin Node") + n.ContainerImage = newImage + n.ContainerVersion = newVersion + return n.Restart(n.NodeConfig) +} + +func (n *ClNode) PrimaryETHAddress() (string, error) { + return n.API.PrimaryEthAddress() +} + +func (n *ClNode) AddBootstrapJob(verifierAddr common.Address, chainId int64, + feedId [32]byte) (*client.Job, error) { + spec := it_utils.BuildBootstrapSpec(verifierAddr, chainId, feedId) + return n.API.MustCreateJob(spec) +} + +func (n *ClNode) AddMercuryOCRJob(verifierAddr common.Address, fromBlock uint64, chainId int64, + feedId [32]byte, customAllowedFaults *int, bootstrapUrl string, + mercuryServerUrl string, mercuryServerPubKey string, + eaUrls []*url.URL) (*client.Job, error) { + + csaKeys, _, err := n.API.ReadCSAKeys() + if err != nil { + return nil, err + } + csaPubKey := csaKeys.Data[0].ID + + nodeOCRKeys, err := n.API.MustReadOCR2Keys() + if err != nil { + return nil, err + } + + var nodeOCRKeyId []string + for _, key := range nodeOCRKeys.Data { + if key.Attributes.ChainType == string(chaintype.EVM) { + nodeOCRKeyId = append(nodeOCRKeyId, key.ID) + break + } + } + + bridges := it_utils.BuildBridges(eaUrls) + for index := range bridges { + err = n.API.MustCreateBridge(&bridges[index]) + if err != nil { + return nil, err + } + } + + var allowedFaults int + if customAllowedFaults != nil { + allowedFaults = *customAllowedFaults + } else { + allowedFaults = 2 + } + + spec := it_utils.BuildOCRSpec( + verifierAddr, chainId, fromBlock, feedId, bridges, + csaPubKey, mercuryServerUrl, mercuryServerPubKey, nodeOCRKeyId[0], + bootstrapUrl, allowedFaults) + + return n.API.MustCreateJob(spec) +} + +func (n *ClNode) GetContainerName() string { + name, err := n.Container.Name(testcontext.Get(n.t)) + if err != nil { + return "" + } + return strings.Replace(name, "/", "", -1) +} + +func (n *ClNode) GetAPIClient() *client.PluginClient { + return n.API +} + +func (n *ClNode) GetPeerUrl() (string, error) { + p2pKeys, err := n.API.MustReadP2PKeys() + if err != nil { + return "", err + } + p2pId := p2pKeys.Data[0].Attributes.PeerID + + return fmt.Sprintf("%s@%s:%d", p2pId, n.GetContainerName(), 6690), nil +} + +func (n *ClNode) GetNodeCSAKeys() (*client.CSAKeys, error) { + csaKeys, _, err := n.API.ReadCSAKeys() + if err != nil { + return nil, err + } + return csaKeys, err +} + +func (n *ClNode) PluginNodeAddress() (common.Address, error) { + addr, err := n.API.PrimaryEthAddress() + if err != nil { + return common.Address{}, err + } + return common.HexToAddress(addr), nil +} + +func (n *ClNode) Fund(evmClient blockchain.EVMClient, amount *big.Float) error { + toAddress, err := n.API.PrimaryEthAddress() + if err != nil { + return err + } + toAddr := common.HexToAddress(toAddress) + gasEstimates, err := evmClient.EstimateGas(ethereum.CallMsg{ + To: &toAddr, + }) + if err != nil { + return err + } + return evmClient.Fund(toAddress, amount, gasEstimates) +} + +func (n *ClNode) StartContainer() error { + err := n.PostgresDb.StartContainer() + if err != nil { + return err + } + + // If the node secrets TOML is not set, generate it with the default template + nodeSecretsToml, err := templates.NodeSecretsTemplate{ + PgDbName: n.PostgresDb.DbName, + PgHost: n.PostgresDb.ContainerName, + PgPort: n.PostgresDb.InternalPort, + PgPassword: n.PostgresDb.Password, + CustomSecrets: n.NodeSecretsConfigTOML, + }.String() + if err != nil { + return err + } + + cReq, err := n.getContainerRequest(nodeSecretsToml) + if err != nil { + return err + } + + l := tc.Logger + if n.t != nil { + l = logging.CustomT{ + T: n.t, + L: n.l, + } + } + container, err := docker.StartContainerWithRetry(n.l, tc.GenericContainerRequest{ + ContainerRequest: *cReq, + Started: true, + Reuse: true, + Logger: l, + }) + if err != nil { + return fmt.Errorf("%s err: %w", ErrStartCLNodeContainer, err) + } + + clEndpoint, err := test_env.GetEndpoint(testcontext.Get(n.t), container, "http") + if err != nil { + return err + } + ip, err := container.ContainerIP(testcontext.Get(n.t)) + if err != nil { + return err + } + n.l.Info(). + Str("containerName", n.ContainerName). + Str("containerImage", n.ContainerImage). + Str("containerVersion", n.ContainerVersion). + Str("clEndpoint", clEndpoint). + Str("clInternalIP", ip). + Str("userEmail", n.UserEmail). + Str("userPassword", n.UserPassword). + Msg("Started Plugin Node container") + clClient, err := client.NewPluginClient(&client.PluginConfig{ + URL: clEndpoint, + Email: n.UserEmail, + Password: n.UserPassword, + InternalIP: ip, + }, + n.l) + if err != nil { + return fmt.Errorf("%s err: %w", ErrConnectNodeClient, err) + } + clClient.Config.InternalIP = n.ContainerName + n.Container = container + n.API = clClient + + return nil +} + +func (n *ClNode) ExecGetVersion() (string, error) { + cmd := []string{"plugin", "--version"} + _, output, err := n.Container.Exec(context.Background(), cmd) + if err != nil { + return "", errors.Wrapf(err, "could not execute cmd %s", cmd) + } + outputBytes, err := io.ReadAll(output) + if err != nil { + return "", err + } + outputString := strings.TrimSpace(string(outputBytes)) + + // Find version in cmd output + re := regexp.MustCompile("@(.*)") + matches := re.FindStringSubmatch(outputString) + + if len(matches) > 1 { + return matches[1], nil + } + return "", errors.Errorf("could not find plugin version in command output '%'", output) +} + +func (n *ClNode) getContainerRequest(secrets string) ( + *tc.ContainerRequest, error) { + configFile, err := os.CreateTemp("", "node_config") + if err != nil { + return nil, err + } + data, err := toml.Marshal(n.NodeConfig) + if err != nil { + return nil, err + } + _, err = configFile.WriteString(string(data)) + if err != nil { + return nil, err + } + secretsFile, err := os.CreateTemp("", "node_secrets") + if err != nil { + return nil, err + } + _, err = secretsFile.WriteString(secrets) + if err != nil { + return nil, err + } + + adminCreds := "local@local.com\nlocaldevpassword" + adminCredsFile, err := os.CreateTemp("", "admin_creds") + if err != nil { + return nil, err + } + _, err = adminCredsFile.WriteString(adminCreds) + if err != nil { + return nil, err + } + + apiCreds := "local@local.com\nlocaldevpassword" + apiCredsFile, err := os.CreateTemp("", "api_creds") + if err != nil { + return nil, err + } + _, err = apiCredsFile.WriteString(apiCreds) + if err != nil { + return nil, err + } + + configPath := "/home/cl-node-config.toml" + secretsPath := "/home/cl-node-secrets.toml" + adminCredsPath := "/home/admin-credentials.txt" + apiCredsPath := "/home/api-credentials.txt" + + return &tc.ContainerRequest{ + Name: n.ContainerName, + AlwaysPullImage: n.AlwaysPullImage, + Image: fmt.Sprintf("%s:%s", n.ContainerImage, n.ContainerVersion), + ExposedPorts: []string{"6688/tcp"}, + Env: n.ContainerEnvs, + Entrypoint: []string{"plugin", + "-c", configPath, + "-s", secretsPath, + "node", "start", "-d", + "-p", adminCredsPath, + "-a", apiCredsPath, + }, + Networks: append(n.Networks, "tracing"), + WaitingFor: tcwait.ForHTTP("/health"). + WithPort("6688/tcp"). + WithStartupTimeout(90 * time.Second). + WithPollInterval(1 * time.Second), + Files: []tc.ContainerFile{ + { + HostFilePath: configFile.Name(), + ContainerFilePath: configPath, + FileMode: 0644, + }, + { + HostFilePath: secretsFile.Name(), + ContainerFilePath: secretsPath, + FileMode: 0644, + }, + { + HostFilePath: adminCredsFile.Name(), + ContainerFilePath: adminCredsPath, + FileMode: 0644, + }, + { + HostFilePath: apiCredsFile.Name(), + ContainerFilePath: apiCredsPath, + FileMode: 0644, + }, + }, + LifecycleHooks: []tc.ContainerLifecycleHooks{ + { + PostStarts: n.PostStartsHooks, + PostStops: n.PostStopsHooks, + PreTerminates: n.PreTerminatesHooks, + }, + }, + }, nil +} diff --git a/integration-tests/docker/test_env/cl_node_cluster.go b/integration-tests/docker/test_env/cl_node_cluster.go new file mode 100644 index 00000000..334bc50a --- /dev/null +++ b/integration-tests/docker/test_env/cl_node_cluster.go @@ -0,0 +1,70 @@ +package test_env + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "golang.org/x/sync/errgroup" + + "github.com/goplugin/pluginv3.0/integration-tests/client" +) + +var ( + ErrGetNodeCSAKeys = "failed get CL node CSA keys" +) + +type ClCluster struct { + Nodes []*ClNode `json:"nodes"` +} + +// Start all nodes in the cluster./docker/tests/functional/api +func (c *ClCluster) Start() error { + eg := &errgroup.Group{} + nodes := c.Nodes + + for i := 0; i < len(nodes); i++ { + nodeIndex := i + eg.Go(func() error { + err := nodes[nodeIndex].StartContainer() + if err != nil { + return err + } + return nil + }) + } + + return eg.Wait() +} + +func (c *ClCluster) NodeAPIs() []*client.PluginClient { + clients := make([]*client.PluginClient, 0) + for _, c := range c.Nodes { + clients = append(clients, c.API) + } + return clients +} + +// Return all the on-chain wallet addresses for a set of Plugin nodes +func (c *ClCluster) NodeAddresses() ([]common.Address, error) { + addresses := make([]common.Address, 0) + for _, n := range c.Nodes { + primaryAddress, err := n.PluginNodeAddress() + if err != nil { + return nil, err + } + addresses = append(addresses, primaryAddress) + } + return addresses, nil +} + +func (c *ClCluster) NodeCSAKeys() ([]string, error) { + var keys []string + for _, n := range c.Nodes { + csaKeys, err := n.GetNodeCSAKeys() + if err != nil { + return nil, fmt.Errorf("%s, err: %w", ErrGetNodeCSAKeys, err) + } + keys = append(keys, csaKeys.Data[0].ID) + } + return keys, nil +} diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go new file mode 100644 index 00000000..66328a2b --- /dev/null +++ b/integration-tests/docker/test_env/test_env.go @@ -0,0 +1,282 @@ +package test_env + +import ( + "encoding/json" + "fmt" + "math/big" + "runtime/debug" + "testing" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + tc "github.com/testcontainers/testcontainers-go" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/docker" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/logstream" + "github.com/goplugin/plugin-testing-framework/utils/runid" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + + core_testconfig "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + ErrFundCLNode = "failed to fund CL node" +) + +type CLClusterTestEnv struct { + Cfg *TestEnvConfig + Network *tc.DockerNetwork + LogStream *logstream.LogStream + + /* components */ + ClCluster *ClCluster + PrivateChain []test_env.PrivateChain // for tests using non-dev networks -- unify it with new approach + MockAdapter *test_env.Killgrave + EVMClient blockchain.EVMClient + ContractDeployer contracts.ContractDeployer + ContractLoader contracts.ContractLoader + RpcProvider test_env.RpcProvider + PrivateEthereumConfig *test_env.EthereumNetwork // new approach to private chains, supporting eth1 and eth2 + l zerolog.Logger + t *testing.T +} + +func NewTestEnv() (*CLClusterTestEnv, error) { + log.Logger = logging.GetLogger(nil, "CORE_DOCKER_ENV_LOG_LEVEL") + network, err := docker.CreateNetwork(log.Logger) + if err != nil { + return nil, err + } + return &CLClusterTestEnv{ + Network: network, + l: log.Logger, + }, nil +} + +// WithTestEnvConfig sets the test environment cfg. +// Sets up private ethereum chain and MockAdapter containers with the provided cfg. +func (te *CLClusterTestEnv) WithTestEnvConfig(cfg *TestEnvConfig) *CLClusterTestEnv { + te.Cfg = cfg + if cfg.MockAdapter.ContainerName != "" { + n := []string{te.Network.Name} + te.MockAdapter = test_env.NewKillgrave(n, te.Cfg.MockAdapter.ImpostersPath, test_env.WithContainerName(te.Cfg.MockAdapter.ContainerName), test_env.WithLogStream(te.LogStream)) + } + return te +} + +func (te *CLClusterTestEnv) WithTestInstance(t *testing.T) *CLClusterTestEnv { + te.t = t + te.l = logging.GetTestLogger(t) + if te.MockAdapter != nil { + te.MockAdapter.WithTestInstance(t) + } + return te +} + +func (te *CLClusterTestEnv) ParallelTransactions(enabled bool) { + te.EVMClient.ParallelTransactions(enabled) +} + +func (te *CLClusterTestEnv) WithPrivateChain(evmNetworks []blockchain.EVMNetwork) *CLClusterTestEnv { + var chains []test_env.PrivateChain + for _, evmNetwork := range evmNetworks { + n := evmNetwork + pgc := test_env.NewPrivateGethChain(&n, []string{te.Network.Name}) + if te.t != nil { + pgc.GetPrimaryNode().WithTestInstance(te.t) + } + chains = append(chains, pgc) + var privateChain test_env.PrivateChain + switch n.SimulationType { + case "besu": + privateChain = test_env.NewPrivateBesuChain(&n, []string{te.Network.Name}) + default: + privateChain = test_env.NewPrivateGethChain(&n, []string{te.Network.Name}) + } + chains = append(chains, privateChain) + } + te.PrivateChain = chains + return te +} + +func (te *CLClusterTestEnv) StartPrivateChain() error { + for _, chain := range te.PrivateChain { + primaryNode := chain.GetPrimaryNode() + if primaryNode == nil { + return fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack())) + } + err := primaryNode.Start() + if err != nil { + return err + } + err = primaryNode.ConnectToClient() + if err != nil { + return err + } + } + return nil +} + +func (te *CLClusterTestEnv) StartEthereumNetwork(cfg *test_env.EthereumNetwork) (blockchain.EVMNetwork, test_env.RpcProvider, error) { + // if environment is being restored from a previous state, use the existing config + // this might fail terribly if temporary folders with chain data on the host machine were removed + if te.Cfg != nil && te.Cfg.EthereumNetwork != nil { + builder := test_env.NewEthereumNetworkBuilder() + c, err := builder.WithExistingConfig(*te.Cfg.EthereumNetwork). + WithTest(te.t). + Build() + if err != nil { + return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err + } + cfg = &c + } + n, rpc, err := cfg.Start() + + if err != nil { + return blockchain.EVMNetwork{}, test_env.RpcProvider{}, err + } + + return n, rpc, nil +} + +func (te *CLClusterTestEnv) StartMockAdapter() error { + return te.MockAdapter.StartContainer() +} + +// pass config here +func (te *CLClusterTestEnv) StartClCluster(nodeConfig *plugin.Config, count int, secretsConfig string, testconfig core_testconfig.GlobalTestConfig, opts ...ClNodeOption) error { + if te.Cfg != nil && te.Cfg.ClCluster != nil { + te.ClCluster = te.Cfg.ClCluster + } else { + opts = append(opts, WithSecrets(secretsConfig), WithLogStream(te.LogStream)) + te.ClCluster = &ClCluster{} + for i := 0; i < count; i++ { + ocrNode, err := NewClNode([]string{te.Network.Name}, *testconfig.GetPluginImageConfig().Image, *testconfig.GetPluginImageConfig().Version, nodeConfig, opts...) + if err != nil { + return err + } + te.ClCluster.Nodes = append(te.ClCluster.Nodes, ocrNode) + } + } + + // Set test logger + if te.t != nil { + for _, n := range te.ClCluster.Nodes { + n.SetTestLogger(te.t) + } + } + + // Start/attach node containers + return te.ClCluster.Start() +} + +// FundPluginNodes will fund all the provided Plugin nodes with a set amount of native currency +func (te *CLClusterTestEnv) FundPluginNodes(amount *big.Float) error { + for _, cl := range te.ClCluster.Nodes { + if err := cl.Fund(te.EVMClient, amount); err != nil { + return fmt.Errorf("%s, err: %w", ErrFundCLNode, err) + } + } + return te.EVMClient.WaitForEvents() +} + +func (te *CLClusterTestEnv) Terminate() error { + // TESTCONTAINERS_RYUK_DISABLED=false by default so ryuk will remove all + // the containers and the Network + return nil +} + +// Cleanup cleans the environment up after it's done being used, mainly for returning funds when on live networks and logs. +func (te *CLClusterTestEnv) Cleanup() error { + te.l.Info().Msg("Cleaning up test environment") + + runIdErr := runid.RemoveLocalRunId() + if runIdErr != nil { + te.l.Warn().Msgf("Failed to remove .run.id file due to: %s (not a big deal, you can still remove it manually)", runIdErr.Error()) + } + + if te.t == nil { + return fmt.Errorf("cannot cleanup test environment without a testing.T") + } + + if te.ClCluster == nil || len(te.ClCluster.Nodes) == 0 { + return fmt.Errorf("plugin nodes are nil, unable cleanup plugin nodes") + } + + te.logWhetherAllContainersAreRunning() + + if te.EVMClient == nil { + return fmt.Errorf("evm client is nil, unable to return funds from plugin nodes during cleanup") + } else if te.EVMClient.NetworkSimulated() { + te.l.Info(). + Str("Network Name", te.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return.") + } else { + if err := te.returnFunds(); err != nil { + return err + } + } + + // close EVMClient connections + if te.EVMClient != nil { + err := te.EVMClient.Close() + return err + } + + return nil +} + +func (te *CLClusterTestEnv) logWhetherAllContainersAreRunning() { + for _, node := range te.ClCluster.Nodes { + if node.Container == nil { + continue + } + + isCLRunning := node.Container.IsRunning() + isDBRunning := node.PostgresDb.Container.IsRunning() + + if !isCLRunning { + te.l.Warn().Str("Node", node.ContainerName).Msg("Plugin node was not running, when test ended") + } + + if !isDBRunning { + te.l.Warn().Str("Node", node.ContainerName).Msg("Postgres DB is not running, when test ended") + } + } +} + +func (te *CLClusterTestEnv) returnFunds() error { + te.l.Info().Msg("Attempting to return Plugin node funds to default network wallets") + for _, pluginNode := range te.ClCluster.Nodes { + fundedKeys, err := pluginNode.API.ExportEVMKeysForChain(te.EVMClient.GetChainID().String()) + if err != nil { + return err + } + for _, key := range fundedKeys { + keyToDecrypt, err := json.Marshal(key) + if err != nil { + return err + } + // This can take up a good bit of RAM and time. When running on the remote-test-runner, this can lead to OOM + // issues. So we avoid running in parallel; slower, but safer. + decryptedKey, err := keystore.DecryptKey(keyToDecrypt, client.PluginKeyPassword) + if err != nil { + return err + } + if err = te.EVMClient.ReturnFunds(decryptedKey.PrivateKey); err != nil { + // If we fail to return funds from one, go on to try the others anyway + te.l.Error().Err(err).Str("Node", pluginNode.ContainerName).Msg("Error returning funds from node") + } + } + } + + te.l.Info().Msg("Returned funds from Plugin nodes") + return nil +} diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go new file mode 100644 index 00000000..1756d04c --- /dev/null +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -0,0 +1,437 @@ +package test_env + +import ( + "fmt" + "math/big" + "os" + "runtime/debug" + "testing" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/logstream" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/osutil" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +type CleanUpType string + +const ( + CleanUpTypeNone CleanUpType = "none" + CleanUpTypeStandard CleanUpType = "standard" + CleanUpTypeCustom CleanUpType = "custom" +) + +type CLTestEnvBuilder struct { + hasLogStream bool + hasKillgrave bool + hasForwarders bool + clNodeConfig *plugin.Config + secretsConfig string + nonDevGethNetworks []blockchain.EVMNetwork + clNodesCount int + clNodesOpts []func(*ClNode) + customNodeCsaKeys []string + defaultNodeCsaKeys []string + l zerolog.Logger + t *testing.T + te *CLClusterTestEnv + isNonEVM bool + cleanUpType CleanUpType + cleanUpCustomFn func() + chainOptionsFn []ChainOption + evmClientNetworkOption []EVMClientNetworkOption + ethereumNetwork *test_env.EthereumNetwork + testConfig tc.GlobalTestConfig + + /* funding */ + ETHFunds *big.Float +} + +func NewCLTestEnvBuilder() *CLTestEnvBuilder { + return &CLTestEnvBuilder{ + l: log.Logger, + hasLogStream: true, + } +} + +// WithTestEnv sets the test environment to use for the test. +// If nil, a new test environment is created. +// If not nil, the test environment is used as-is. +// If TEST_ENV_CONFIG_PATH is set, the test environment is created with the config at that path. +func (b *CLTestEnvBuilder) WithTestEnv(te *CLClusterTestEnv) (*CLTestEnvBuilder, error) { + envConfigPath, isSet := os.LookupEnv("TEST_ENV_CONFIG_PATH") + var cfg *TestEnvConfig + var err error + if isSet { + cfg, err = NewTestEnvConfigFromFile(envConfigPath) + if err != nil { + return nil, err + } + } + + if te != nil { + b.te = te + } else { + b.te, err = NewTestEnv() + if err != nil { + return nil, err + } + } + + if cfg != nil { + b.te = b.te.WithTestEnvConfig(cfg) + } + return b, nil +} + +// WithTestLogger sets the test logger to use for the test. +// Useful for parallel tests so the logging will be separated correctly in the results views. +func (b *CLTestEnvBuilder) WithTestInstance(t *testing.T) *CLTestEnvBuilder { + b.t = t + b.l = logging.GetTestLogger(t) + return b +} + +// WithoutLogStream disables LogStream logging component +func (b *CLTestEnvBuilder) WithoutLogStream() *CLTestEnvBuilder { + b.hasLogStream = false + return b +} + +func (b *CLTestEnvBuilder) WithCLNodes(clNodesCount int) *CLTestEnvBuilder { + b.clNodesCount = clNodesCount + return b +} + +func (b *CLTestEnvBuilder) WithTestConfig(cfg tc.GlobalTestConfig) *CLTestEnvBuilder { + b.testConfig = cfg + return b +} + +func (b *CLTestEnvBuilder) WithCLNodeOptions(opt ...ClNodeOption) *CLTestEnvBuilder { + b.clNodesOpts = append(b.clNodesOpts, opt...) + return b +} + +func (b *CLTestEnvBuilder) WithForwarders() *CLTestEnvBuilder { + b.hasForwarders = true + return b +} + +func (b *CLTestEnvBuilder) WithFunding(eth *big.Float) *CLTestEnvBuilder { + b.ETHFunds = eth + return b +} + +// deprecated +// left only for backward compatibility +func (b *CLTestEnvBuilder) WithGeth() *CLTestEnvBuilder { + ethBuilder := test_env.NewEthereumNetworkBuilder() + cfg, err := ethBuilder. + WithConsensusType(test_env.ConsensusType_PoW). + WithExecutionLayer(test_env.ExecutionLayer_Geth). + WithTest(b.t). + Build() + + if err != nil { + panic(err) + } + + b.ethereumNetwork = &cfg + + return b +} + +func (b *CLTestEnvBuilder) WithPrivateEthereumNetwork(en test_env.EthereumNetwork) *CLTestEnvBuilder { + b.ethereumNetwork = &en + return b +} + +func (b *CLTestEnvBuilder) WithPrivateGethChains(evmNetworks []blockchain.EVMNetwork) *CLTestEnvBuilder { + b.nonDevGethNetworks = evmNetworks + return b +} + +func (b *CLTestEnvBuilder) WithCLNodeConfig(cfg *plugin.Config) *CLTestEnvBuilder { + b.clNodeConfig = cfg + return b +} + +func (b *CLTestEnvBuilder) WithSecretsConfig(secrets string) *CLTestEnvBuilder { + b.secretsConfig = secrets + return b +} + +func (b *CLTestEnvBuilder) WithMockAdapter() *CLTestEnvBuilder { + b.hasKillgrave = true + return b +} + +// WithNonEVM sets the test environment to not use EVM when built. +func (b *CLTestEnvBuilder) WithNonEVM() *CLTestEnvBuilder { + b.isNonEVM = true + return b +} + +func (b *CLTestEnvBuilder) WithStandardCleanup() *CLTestEnvBuilder { + b.cleanUpType = CleanUpTypeStandard + return b +} + +func (b *CLTestEnvBuilder) WithoutCleanup() *CLTestEnvBuilder { + b.cleanUpType = CleanUpTypeNone + return b +} + +func (b *CLTestEnvBuilder) WithCustomCleanup(customFn func()) *CLTestEnvBuilder { + b.cleanUpType = CleanUpTypeCustom + b.cleanUpCustomFn = customFn + return b +} + +type ChainOption = func(*evmcfg.Chain) *evmcfg.Chain + +func (b *CLTestEnvBuilder) WithChainOptions(opts ...ChainOption) *CLTestEnvBuilder { + b.chainOptionsFn = make([]ChainOption, 0) + b.chainOptionsFn = append(b.chainOptionsFn, opts...) + + return b +} + +type EVMClientNetworkOption = func(*blockchain.EVMNetwork) *blockchain.EVMNetwork + +func (b *CLTestEnvBuilder) EVMClientNetworkOptions(opts ...EVMClientNetworkOption) *CLTestEnvBuilder { + b.evmClientNetworkOption = make([]EVMClientNetworkOption, 0) + b.evmClientNetworkOption = append(b.evmClientNetworkOption, opts...) + + return b +} + +func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { + if b.testConfig == nil { + return nil, fmt.Errorf("test config must be set") + } + + if b.te == nil { + var err error + b, err = b.WithTestEnv(nil) + if err != nil { + return nil, err + } + } + + var err error + if b.t != nil { + b.te.WithTestInstance(b.t) + } + + if b.hasLogStream { + b.te.LogStream, err = logstream.NewLogStream(b.te.t, b.testConfig.GetLoggingConfig()) + if err != nil { + return nil, err + } + } + + if b.hasKillgrave { + if b.te.Network == nil { + return nil, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("cannot start mock adapter without a network")) + } + + b.te.MockAdapter = test_env.NewKillgrave([]string{b.te.Network.Name}, "", test_env.WithLogStream(b.te.LogStream)) + + err = b.te.StartMockAdapter() + if err != nil { + return nil, err + } + } + + if b.t != nil { + b.te.WithTestInstance(b.t) + } + + switch b.cleanUpType { + case CleanUpTypeStandard: + b.t.Cleanup(func() { + if err := b.te.Cleanup(); err != nil { + b.l.Error().Err(err).Msg("Error cleaning up test environment") + } + }) + case CleanUpTypeCustom: + b.t.Cleanup(b.cleanUpCustomFn) + case CleanUpTypeNone: + b.l.Warn().Msg("test environment won't be cleaned up") + case "": + return b.te, fmt.Errorf("test environment builder failed: %w", fmt.Errorf("explicit cleanup type must be set when building test environment")) + } + + if b.te.LogStream != nil { + b.t.Cleanup(func() { + b.l.Info().Msg("Shutting down LogStream") + logPath, err := osutil.GetAbsoluteFolderPath("logs") + if err != nil { + b.l.Info().Str("Absolute path", logPath).Msg("LogStream logs folder location") + } + + if b.t.Failed() || *b.testConfig.GetLoggingConfig().TestLogCollect { + // we can't do much if this fails, so we just log the error in logstream + _ = b.te.LogStream.FlushAndShutdown() + b.te.LogStream.PrintLogTargetsLocations() + b.te.LogStream.SaveLogLocationInTestSummary() + } + + }) + } + + if b.nonDevGethNetworks != nil { + b.te.WithPrivateChain(b.nonDevGethNetworks) + err := b.te.StartPrivateChain() + if err != nil { + return b.te, err + } + var nonDevNetworks []blockchain.EVMNetwork + for i, n := range b.te.PrivateChain { + primaryNode := n.GetPrimaryNode() + if primaryNode == nil { + return b.te, fmt.Errorf("primary node is nil in PrivateChain interface, stack: %s", string(debug.Stack())) + } + nonDevNetworks = append(nonDevNetworks, *n.GetNetworkConfig()) + nonDevNetworks[i].URLs = []string{primaryNode.GetInternalWsUrl()} + nonDevNetworks[i].HTTPURLs = []string{primaryNode.GetInternalHttpUrl()} + } + if nonDevNetworks == nil { + return nil, fmt.Errorf("cannot create nodes with custom config without nonDevNetworks") + } + + err = b.te.StartClCluster(b.clNodeConfig, b.clNodesCount, b.secretsConfig, b.testConfig, b.clNodesOpts...) + if err != nil { + return nil, err + } + return b.te, nil + } + + networkConfig := networks.MustGetSelectedNetworkConfig(b.testConfig.GetNetworkConfig())[0] + var rpcProvider test_env.RpcProvider + if b.ethereumNetwork != nil && networkConfig.Simulated { + // TODO here we should save the ethereum network config to te.Cfg, but it doesn't exist at this point + // in general it seems we have no methods for saving config to file and we only load it from file + // but I don't know how that config file is to be created or whether anyone ever done that + b.ethereumNetwork.DockerNetworkNames = []string{b.te.Network.Name} + networkConfig, rpcProvider, err = b.te.StartEthereumNetwork(b.ethereumNetwork) + if err != nil { + return nil, err + } + b.te.RpcProvider = rpcProvider + b.te.PrivateEthereumConfig = b.ethereumNetwork + } + + if !b.isNonEVM { + if b.evmClientNetworkOption != nil && len(b.evmClientNetworkOption) > 0 { + for _, fn := range b.evmClientNetworkOption { + fn(&networkConfig) + } + } + bc, err := blockchain.NewEVMClientFromNetwork(networkConfig, b.l) + if err != nil { + return nil, err + } + + b.te.EVMClient = bc + cd, err := contracts.NewContractDeployer(bc, b.l) + if err != nil { + return nil, err + } + b.te.ContractDeployer = cd + + cl, err := contracts.NewContractLoader(bc, b.l) + if err != nil { + return nil, err + } + b.te.ContractLoader = cl + } + + var nodeCsaKeys []string + + // Start Plugin Nodes + if b.clNodesCount > 0 { + var cfg *plugin.Config + if b.clNodeConfig != nil { + cfg = b.clNodeConfig + } else { + cfg = node.NewConfig(node.NewBaseConfig(), + node.WithOCR1(), + node.WithP2Pv2(), + ) + } + + if !b.isNonEVM { + var httpUrls []string + var wsUrls []string + if networkConfig.Simulated { + httpUrls = rpcProvider.PrivateHttpUrls() + wsUrls = rpcProvider.PrivateWsUrsl() + } else { + httpUrls = networkConfig.HTTPURLs + wsUrls = networkConfig.URLs + } + + node.SetChainConfig(cfg, wsUrls, httpUrls, networkConfig, b.hasForwarders) + + if b.chainOptionsFn != nil && len(b.chainOptionsFn) > 0 { + for _, fn := range b.chainOptionsFn { + for _, evmCfg := range cfg.EVM { + fn(&evmCfg.Chain) + } + } + } + } + + err := b.te.StartClCluster(cfg, b.clNodesCount, b.secretsConfig, b.testConfig, b.clNodesOpts...) + if err != nil { + return nil, err + } + + nodeCsaKeys, err = b.te.ClCluster.NodeCSAKeys() + if err != nil { + return nil, err + } + b.defaultNodeCsaKeys = nodeCsaKeys + } + + if b.ethereumNetwork != nil && b.clNodesCount > 0 && b.ETHFunds != nil { + b.te.ParallelTransactions(true) + defer b.te.ParallelTransactions(false) + if err := b.te.FundPluginNodes(b.ETHFunds); err != nil { + return nil, err + } + } + + var enDesc string + if b.te.PrivateEthereumConfig != nil { + enDesc = b.te.PrivateEthereumConfig.Describe() + } else { + enDesc = "none" + } + + b.l.Info(). + Str("privateEthereumNetwork", enDesc). + Bool("hasKillgrave", b.hasKillgrave). + Int("clNodesCount", b.clNodesCount). + Strs("customNodeCsaKeys", b.customNodeCsaKeys). + Strs("defaultNodeCsaKeys", b.defaultNodeCsaKeys). + Msg("Building CL cluster test environment..") + + return b.te, nil +} diff --git a/integration-tests/docker/test_env/test_env_config.go b/integration-tests/docker/test_env/test_env_config.go new file mode 100644 index 00000000..394f0a66 --- /dev/null +++ b/integration-tests/docker/test_env/test_env_config.go @@ -0,0 +1,39 @@ +package test_env + +import ( + "encoding/json" + + cte "github.com/goplugin/plugin-testing-framework/docker/test_env" + env "github.com/goplugin/pluginv3.0/integration-tests/types/envcommon" +) + +type TestEnvConfig struct { + Networks []string `json:"networks"` + Geth GethConfig `json:"geth"` + MockAdapter MockAdapterConfig `json:"mock_adapter"` + ClCluster *ClCluster `json:"clCluster"` + EthereumNetwork *cte.EthereumNetwork `json:"private_ethereum_config"` +} + +type MockAdapterConfig struct { + ContainerName string `json:"container_name"` + ImpostersPath string `json:"imposters_path"` +} + +type GethConfig struct { + ContainerName string `json:"container_name"` +} + +func NewTestEnvConfigFromFile(path string) (*TestEnvConfig, error) { + c := &TestEnvConfig{} + err := env.ParseJSONFile(path, c) + if err != nil { + return nil, err + } + return c, nil +} + +func (c *TestEnvConfig) Json() string { + b, _ := json.Marshal(c) + return string(b) +} diff --git a/integration-tests/example.env b/integration-tests/example.env new file mode 100644 index 00000000..2bb0fa2f --- /dev/null +++ b/integration-tests/example.env @@ -0,0 +1,27 @@ +# An example template that you can use for your own .env file for integration test settings +# `source ./integration-tests/.env` + +########## General Test Settings ########## +export PLUGIN_ENV_USER="Satoshi-Nakamoto" # Name of the person running the tests (change to your own) +export TEST_LOG_LEVEL="info" # info | debug | trace + +########## Soak/Chaos/Load Test Specific Settings ########## +# Remote Runner +export ENV_JOB_IMAGE="image-location/plugin-tests:test-tag" # Image repo to pull the remote-test-runner image from. Check the Integration Tests workflow. +export DETACH_RUNNER="true" # true 99% of the time, false if you are debugging soak test issues +export TEST_SUITE="soak" # soak | chaos | load + +# Slack Notification Settings +export SLACK_API_KEY="xoxb-example-key" # API key used to report soak test results to slack +export SLACK_CHANNEL="C000000000" # Channel ID for the slack bot to post test results +export SLACK_USER="U000000000" # User ID of the person running the soak tests to properly notify them + +########## Network Settings ########## +# General EVM Settings, used only for quick prototyping when using GENERAL as the SELECTED_NETWORK +export EVM_NAME="General EVM" +export EVM_CHAIN_ID="1" +export EVM_SIMULATED="false" +export EVM_PLUGIN_TRANSACTION_LIMIT="5000" +export EVM_TRANSACTION_TIMEOUT="2m" +export EVM_MINIMUM_CONFIRMATIONS="1" +export EVM_GAS_ESTIMATION_BUFFER="1000" \ No newline at end of file diff --git a/integration-tests/go.mod b/integration-tests/go.mod new file mode 100644 index 00000000..37819c5e --- /dev/null +++ b/integration-tests/go.mod @@ -0,0 +1,496 @@ +module github.com/goplugin/pluginv3.0/integration-tests + +go 1.21.4 + +// Make sure we're working with the latest plugin libs +replace github.com/goplugin/pluginv3.0/v2 => ../ + +require ( + github.com/K-Phoen/grabana v0.22.1 + github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df + github.com/cli/go-gh/v2 v2.0.0 + github.com/ethereum/go-ethereum v1.13.8 + github.com/go-resty/resty/v2 v2.7.0 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.4.0 + github.com/jmoiron/sqlx v1.3.5 + github.com/lib/pq v1.10.9 + github.com/manifoldco/promptui v0.9.0 + github.com/onsi/gomega v1.30.0 + github.com/pelletier/go-toml/v2 v2.1.1 + github.com/pkg/errors v0.9.1 + github.com/rs/zerolog v1.30.0 + github.com/scylladb/go-reflectx v1.0.1 + github.com/segmentio/ksuid v1.0.4 + github.com/slack-go/slack v0.12.2 + github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 + github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 + github.com/goplugin/plugin-testing-framework v1.23.2 + github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 + github.com/goplugin/pluginv3.0/v2 v2.0.0-00010101000000-000000000000 + github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a + github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 + github.com/goplugin/wasp v0.4.2 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.8.4 + github.com/test-go/testify v1.1.4 + github.com/testcontainers/testcontainers-go v0.23.0 + github.com/umbracle/ethgo v0.1.3 + go.dedis.ch/kyber/v3 v3.1.0 + go.uber.org/ratelimit v0.2.0 + go.uber.org/zap v1.26.0 + golang.org/x/sync v0.6.0 + golang.org/x/text v0.14.0 + gopkg.in/guregu/null.v4 v4.0.0 +) + +// avoids ambigious imports of indirect dependencies +exclude github.com/hashicorp/consul v1.2.1 + +replace ( + github.com/testcontainers/testcontainers-go => github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56 + // Pin K8s versions as their updates are highly disruptive and go mod keeps wanting to update them + k8s.io/api => k8s.io/api v0.25.11 + k8s.io/client-go => k8s.io/client-go v0.25.11 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d +) + +require ( + contrib.go.opencensus.io/exporter/stackdriver v0.13.5 // indirect + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.3 // indirect + cosmossdk.io/errors v1.0.0 // indirect + cosmossdk.io/math v1.0.1 // indirect + dario.cat/mergo v1.0.0 // indirect + filippo.io/edwards25519 v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect + github.com/CosmWasm/wasmd v0.40.1 // indirect + github.com/CosmWasm/wasmvm v1.2.4 // indirect + github.com/DataDog/zstd v1.5.2 // indirect + github.com/K-Phoen/sdk v0.12.4 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/avast/retry-go v3.0.0+incompatible // indirect + github.com/avast/retry-go/v4 v4.5.1 // indirect + github.com/aws/aws-sdk-go v1.45.25 // indirect + github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect + github.com/aws/jsii-runtime-go v1.75.0 // indirect + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/blendle/zapdriver v1.3.1 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/sonic v1.10.1 // indirect + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect + github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect + github.com/cdk8s-team/cdk8s-core-go/cdk8s/v2 v2.7.5 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/chaos-mesh/chaos-mesh/api/v1alpha1 v0.0.0-20220226050744-799408773657 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cli/safeexec v1.0.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cometbft/cometbft v0.37.2 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.0 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/containerd/containerd v1.7.7 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.2 // indirect + github.com/cosmos/cosmos-sdk v0.47.4 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect + github.com/cosmos/iavl v0.20.0 // indirect + github.com/cosmos/ibc-go/v7 v7.0.1 // indirect + github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab // indirect + github.com/cosmos/ledger-cosmos-go v0.12.1 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.7+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/esote/minmaxheap v1.0.0 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fvbommel/sortorder v1.0.2 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gagliardetto/binary v0.7.1 // indirect + github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 // indirect + github.com/gagliardetto/treeout v0.1.4 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect + github.com/getsentry/sentry-go v0.19.0 // indirect + github.com/gin-contrib/sessions v0.0.5 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-gonic/gin v1.9.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.6 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.15.5 // indirect + github.com/go-redis/redis/v8 v8.11.5 // indirect + github.com/go-webauthn/webauthn v0.9.4 // indirect + github.com/go-webauthn/x v0.1.5 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/gogo/status v1.1.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang/glog v1.1.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-github/v41 v41.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/go-tpm v0.9.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gorilla/context v1.1.1 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/sessions v1.2.2 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/gosimple/slug v1.13.1 // indirect + github.com/gosimple/unidecode v1.0.1 // indirect + github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f // indirect + github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 // indirect + github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 // indirect + github.com/grafana/pyroscope-go v1.0.4 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/consul/api v1.25.1 // indirect + github.com/hashicorp/consul/sdk v0.14.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-envparse v0.1.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.2.4 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/jonboulle/clockwork v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/kelseyhightower/envconfig v1.4.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/logrusorgru/aurora v2.0.3+incompatible // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/miekg/dns v1.1.56 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/mwitkow/grpc-proxy v0.0.0-20230212185441-f345521cb9c9 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/runc v1.1.7 // indirect + github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect + github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/otiai10/copy v1.14.0 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/alertmanager v0.26.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/prometheus v0.48.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/russross/blackfriday v1.6.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/sercand/kuberesolver/v5 v5.1.1 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect + github.com/goplugin/chain-selectors v1.0.10 // indirect + github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 // indirect + github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 // indirect + github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 // indirect + github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 // indirect + github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 // indirect + github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 // indirect + github.com/goplugin/wsrpc v0.7.2 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.15.0 // indirect + github.com/status-im/keycard-go v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 // indirect + github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/ugorji/go/codec v1.2.12 // indirect + github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect + github.com/valyala/fastjson v1.4.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.1.0 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zondax/hid v0.9.1 // indirect + github.com/zondax/ledger-go v0.14.1 // indirect + go.dedis.ch/fixbuf v1.0.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 // indirect + go.opentelemetry.io/collector/semconv v0.87.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.starlark.net v0.0.0-20220817180228-f738f5508c12 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect + golang.org/x/arch v0.7.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.18.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect + gopkg.in/guregu/null.v2 v2.1.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.28.2 // indirect + k8s.io/apiextensions-apiserver v0.25.3 // indirect + k8s.io/apimachinery v0.28.2 // indirect + k8s.io/cli-runtime v0.25.11 // indirect + k8s.io/client-go v0.28.2 // indirect + k8s.io/component-base v0.26.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/kubectl v0.25.11 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect + nhooyr.io/websocket v1.8.7 // indirect + pgregory.net/rapid v0.5.5 // indirect + rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/controller-runtime v0.13.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.12.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +replace ( + github.com/go-kit/log => github.com/go-kit/log v0.2.1 + + // replicating the replace directive on cosmos SDK + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + + // until merged upstream: https://github.com/hashicorp/go-plugin/pull/257 + github.com/hashicorp/go-plugin => github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 + + // until merged upstream: https://github.com/mwitkow/grpc-proxy/pull/69 + github.com/mwitkow/grpc-proxy => github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f + + // type func(a Label, b Label) bool of func(a, b Label) bool {…} does not match inferred type func(a Label, b Label) int for func(a E, b E) int + github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 +) diff --git a/integration-tests/go.sum b/integration-tests/go.sum new file mode 100644 index 00000000..6cbaf558 --- /dev/null +++ b/integration-tests/go.sum @@ -0,0 +1,2418 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v1.1.4 h1:K6n/GZHFTtEoKT5aUG3l9diPi0VduZNQ1PfdnpkkIFk= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +contrib.go.opencensus.io/exporter/stackdriver v0.12.6/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5 h1:TNaexHK16gPUoc7uzELKOU7JULqccn1NDuqUxmxSqfo= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.3 h1:6evFIgj//Y3w09bqOUOzEpFj5tsxBqdc5CfkO7z+zfw= +cosmossdk.io/depinject v1.0.0-alpha.3/go.mod h1:eRbcdQ7MRpIPEM5YUJh8k97nxHpYbc3sMUnEtt8HPWU= +cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04= +cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca h1:msenprh2BLLRwNT7zN56TbBHOGk/7ARQckXHxXyvjoQ= +cosmossdk.io/log v1.1.1-0.20230704160919-88f2c830b0ca/go.mod h1:PkIAKXZvaxrTRc++z53XMRvFk8AcGGWYHcMIPzVYX9c= +cosmossdk.io/math v1.0.1 h1:Qx3ifyOPaMLNH/89WeZFH268yCvU4xEcnPLu3sJqPPg= +cosmossdk.io/math v1.0.1/go.mod h1:Ygz4wBHrgc7g0N+8+MrnTfS9LLn9aaTGa9hKopuym5k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AlekSi/pointer v1.1.0 h1:SSDMPcXD9jSl8FPy9cRzoRaMJtm9g9ggGTxecRUbQoI= +github.com/AlekSi/pointer v1.1.0/go.mod h1:y7BvfRI3wXPWKXEBhU71nbnIEEZX0QTSB2Bj48UJIZE= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/CosmWasm/wasmd v0.40.1 h1:LxbO78t/6S8TkeQlUrJ0m5O87HtAwLx4RGHq3rdrOEU= +github.com/CosmWasm/wasmd v0.40.1/go.mod h1:6EOwnv7MpuFaEqxcUOdFV9i4yvrdOciaY6VQ1o7A3yg= +github.com/CosmWasm/wasmvm v1.2.4 h1:6OfeZuEcEH/9iqwrg2pkeVtDCkMoj9U6PpKtcrCyVrQ= +github.com/CosmWasm/wasmvm v1.2.4/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Depado/ginprom v1.8.0 h1:zaaibRLNI1dMiiuj1MKzatm8qrcHzikMlCc1anqOdyo= +github.com/Depado/ginprom v1.8.0/go.mod h1:XBaKzeNBqPF4vxJpNLincSQZeMDnZp1tIbU0FU0UKgg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/K-Phoen/grabana v0.22.1 h1:b/O+C3H2H6VNYSeMCYUO4X4wYuwFXgBcRkvYa+fjpQA= +github.com/K-Phoen/grabana v0.22.1/go.mod h1:3LTXrTzQzTKTgvKSXdRjlsJbizSOW/V23Q3iX00R5bU= +github.com/K-Phoen/sdk v0.12.4 h1:j2EYuBJm3zDTD0fGKACVFWxAXtkR0q5QzfVqxmHSeGQ= +github.com/K-Phoen/sdk v0.12.4/go.mod h1:qmM0wO23CtoDux528MXPpYvS4XkRWkWX6rvX9Za8EVU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.1 h1:hJ3s7GbWlGK4YVV92sO88BQSyF4ZLVy7/awqOlPxFbA= +github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56 h1:HItfr1XKD/4xnsJE56m3uxnkMQ9lbg8xDnkf9qoZCH0= +github.com/Tofel/testcontainers-go v0.0.0-20231130110817-e6fbf9498b56/go.mod h1:ICriE9bLX5CLxL9OFQ2N+2N+f+803LNJ1utJb1+Inx0= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/Workiva/go-datastructures v1.1.0 h1:hu20UpgZneBhQ3ZvwiOGlqJSKIosin2Rd5wAKUHEO/k= +github.com/Workiva/go-datastructures v1.1.0/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= +github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= +github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= +github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= +github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= +github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/constructs-go/constructs/v10 v10.1.255 h1:5hARfEmhBqHSTQf/C3QLA3sWOxO2Dfja0iA1W7ZcI7g= +github.com/aws/constructs-go/constructs/v10 v10.1.255/go.mod h1:DCdBSjN04Ck2pajCacTD4RKFqSA7Utya8d62XreYctI= +github.com/aws/jsii-runtime-go v1.75.0 h1:NhpUfyiL7/wsRuUekFsz8FFBCYLfPD/l61kKg9kL/a4= +github.com/aws/jsii-runtime-go v1.75.0/go.mod h1:TKCyrtM0pygEPo4rDZzbMSDNCDNTSYSN6/mGyHI6O3I= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0= +github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= +github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3 h1:SDlJ7bAm4ewvrmZtR0DaiYbQGdKPeaaIm7bM+qRhFeU= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.3/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= +github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc= +github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= +github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= +github.com/cdk8s-team/cdk8s-core-go/cdk8s/v2 v2.7.5 h1:rvc39Ol6z3MvaBzXkxFC6Nfsnixq/dRypushKDd7Nc0= +github.com/cdk8s-team/cdk8s-core-go/cdk8s/v2 v2.7.5/go.mod h1:R/pdNYDYFQk+tuuOo7QES1kkv6OLmp5ze2XBZQIVffM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= +github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chaos-mesh/chaos-mesh/api/v1alpha1 v0.0.0-20220226050744-799408773657 h1:CyuI+igIjadM/GRnE2o0q+WCwipDh0n2cUYFPAvxziM= +github.com/chaos-mesh/chaos-mesh/api/v1alpha1 v0.0.0-20220226050744-799408773657/go.mod h1:JRiumF+RFsH1mrrP8FUsi9tExPylKkO/oSRWeQEUdLE= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cli/go-gh/v2 v2.0.0 h1:JAgQY7VNHletsO0Eqr+/PzF7fF5QEjhY2t2+Tev3vmk= +github.com/cli/go-gh/v2 v2.0.0/go.mod h1:2/ox3Dnc8wDBT5bnTAH1aKGy6Qt1ztlFBe10EufnvoA= +github.com/cli/safeexec v1.0.0 h1:0VngyaIyqACHdcMNWfo6+KdUYnqEr2Sg+bSP1pdF+dI= +github.com/cli/safeexec v1.0.0/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q= +github.com/cli/shurcooL-graphql v0.0.3 h1:CtpPxyGDs136/+ZeyAfUKYmcQBjDlq5aqnrDCW5Ghh8= +github.com/cli/shurcooL-graphql v0.0.3/go.mod h1:tlrLmw/n5Q/+4qSvosT+9/W5zc8ZMjnJeYBxSdb4nWA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/apd/v3 v3.1.0 h1:MK3Ow7LH0W8zkd5GMKA1PvS9qG3bWFI95WaVNfyZJ/w= +github.com/cockroachdb/apd/v3 v3.1.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.2 h1:XB0yyHGT0lwmJlFmM4+rsRnczPlHoAKFX6K8Zgc2/Jc= +github.com/cometbft/cometbft v0.37.2/go.mod h1:Y2MMMN//O5K4YKd8ze4r9jmk4Y7h0ajqILXbH5JQFVs= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= +github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/containerd/containerd v1.7.7 h1:QOC2K4A42RQpcrZyptP6z9EJZnlHfHJUfZrAAHe15q4= +github.com/containerd/containerd v1.7.7/go.mod h1:3c4XZv6VeT9qgf9GMTxNTMFxGJrGpI2vz1yk4ye+YY8= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.2 h1:X3OKvWgK9Gsejo0F1qs5l8Qn6xJV/AzgIWR2wZ8Nua8= +github.com/cosmos/cosmos-proto v1.0.0-beta.2/go.mod h1:+XRCLJ14pr5HFEHIUcn51IKXD1Fy3rkEQqt4WqmN4V0= +github.com/cosmos/cosmos-sdk v0.47.4 h1:FVUpEprm58nMmBX4xkRdMDaIG5Nr4yy92HZAfGAw9bg= +github.com/cosmos/cosmos-sdk v0.47.4/go.mod h1:R5n+uM7vguVPFap4pgkdvQCT1nVo/OtPwrlAU40rvok= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v0.20.0 h1:fTVznVlepH0KK8NyKq8w+U7c2L6jofa27aFX6YGlm38= +github.com/cosmos/iavl v0.20.0/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ibc-go/v7 v7.0.1 h1:NIBNRWjlOoFvFQu1ZlgwkaSeHO5avf4C1YQiWegt8jw= +github.com/cosmos/ibc-go/v7 v7.0.1/go.mod h1:vEaapV6nuLPQlS+g8IKmxMo6auPi0i7HMv1PhViht/E= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab h1:I9ialKTQo7248V827Bba4OuKPmk+FPzmTVHsLXaIJWw= +github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab/go.mod h1:2CwqasX5dSD7Hbp/9b6lhK6BwoBDCBldx7gPKRukR60= +github.com/cosmos/ledger-cosmos-go v0.12.1 h1:sMBxza5p/rNK/06nBSNmsI/WDqI0pVJFVNihy1Y984w= +github.com/cosmos/ledger-cosmos-go v0.12.1/go.mod h1:dhO6kj+Y+AHIOgAe4L9HL/6NDdyyth4q238I9yFpD2g= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/creachadair/taskgroup v0.4.2 h1:jsBLdAJE42asreGss2xZGZ8fJra7WtwnHWeJFxv2Li8= +github.com/creachadair/taskgroup v0.4.2/go.mod h1:qiXUOSrbwAY3u0JPGTzObbE3yf9hcXHDKBZ2ZjpCbgM= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cucumber/common/gherkin/go/v22 v22.0.0 h1:4K8NqptbvdOrjL9DEea6HFjSpbdT9+Q5kgLpmmsHYl0= +github.com/cucumber/common/gherkin/go/v22 v22.0.0/go.mod h1:3mJT10B2GGn3MvVPd3FwR7m2u4tLhSRhWUqJU4KN4Fg= +github.com/cucumber/common/messages/go/v17 v17.1.1 h1:RNqopvIFyLWnKv0LfATh34SWBhXeoFTJnSrgm9cT/Ts= +github.com/cucumber/common/messages/go/v17 v17.1.1/go.mod h1:bpGxb57tDE385Rb2EohgUadLkAbhoC4IyCFi89u/JQI= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e h1:5jVSh2l/ho6ajWhSPNN84eHEdq3dp0T7+f6r3Tc6hsk= +github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e/go.mod h1:IJgIiGUARc4aOr4bOQ85klmjsShkEEfiRc6q/yBSfo8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= +github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dfuse-io/logging v0.0.0-20201110202154-26697de88c79/go.mod h1:V+ED4kT/t/lKtH99JQmKIb0v9WL3VaYkJ36CfHlVECI= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 h1:CuJS05R9jmNlUK8GOxrEELPbfXm0EuGh/30LjkjN5vo= +github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70/go.mod h1:EoK/8RFbMEteaCaz89uessDTnCWjbbcr+DXcBh4el5o= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= +github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/esote/minmaxheap v1.0.0 h1:rgA7StnXXpZG6qlM0S7pUmEv1KpWe32rYT4x8J8ntaA= +github.com/esote/minmaxheap v1.0.0/go.mod h1:Ln8+i7fS1k3PLgZI2JAo0iA1as95QnIYiGCrqSJ5FZk= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= +github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gagliardetto/binary v0.6.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/binary v0.7.1 h1:6ggDQ26vR+4xEvl/S13NcdLK3MUCi4oSy73pS9aI1cI= +github.com/gagliardetto/binary v0.7.1/go.mod h1:aOfYkc20U0deHaHn/LVZXiqlkDbFAX0FpTlDhsXa0S0= +github.com/gagliardetto/gofuzz v1.2.2 h1:XL/8qDMzcgvR4+CyRQW9UGdwPRPMHVJfqQ/uMvSUuQw= +github.com/gagliardetto/gofuzz v1.2.2/go.mod h1:bkH/3hYLZrMLbfYWA0pWzXmi5TTRZnu4pMGZBkqMKvY= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27 h1:q2IztKyRQUxJ6abXRsawaBtvDFvM+szj4jDqV4od1gs= +github.com/gagliardetto/solana-go v1.4.1-0.20220428092759-5250b4abbb27/go.mod h1:NFuoDwHPvw858ZMHUJr6bkhN8qHt4x6e+U3EYHxAwNY= +github.com/gagliardetto/treeout v0.1.4 h1:ozeYerrLCmCubo1TcIjFiOWTTGteOOHND1twdFpgwaw= +github.com/gagliardetto/treeout v0.1.4/go.mod h1:loUefvXTrlRG5rYmJmExNryyBRh8f89VZhmMOyCyqok= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813 h1:Uc+IZ7gYqAf/rSGFplbWBSHaGolEQlNLgMgSE3ccnIQ= +github.com/gedex/inflector v0.0.0-20170307190818-16278e9db813/go.mod h1:P+oSoE9yhSRvsmYyZsshflcR6ePWYLql6UU1amW13IM= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.19.0 h1:BcCH3CN5tXt5aML+gwmbFwVptLLQA+eT866fCO9wVOM= +github.com/getsentry/sentry-go v0.19.0/go.mod h1:y3+lGEFEFexZtpbG1GUE2WD/f9zGyKYwpEqryTOC/nE= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/cors v1.5.0 h1:DgGKV7DDoOn36DFkNtbHrjoRiT5ExCe+PC9/xp7aKvk= +github.com/gin-contrib/cors v1.5.0/go.mod h1:TvU7MAZ3EwrPLI2ztzTt3tqgvBCq+wn8WpZmfADjupI= +github.com/gin-contrib/expvar v0.0.1 h1:IuU5ArEgihz50vG8Onrwz22kJr7Mcvgv9xSSpfU5g+w= +github.com/gin-contrib/expvar v0.0.1/go.mod h1:8o2CznfQi1JjktORdHr2/abg3wSV6OCnXh0yGypvvVw= +github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE= +github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs97miaG2+7neKY= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 h1:Z9J0PVIt1PuibOShaOw1jH8hUYz+Ak8NLsR/GI0Hv5I= +github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4/go.mod h1:CEPcgZiz8998l9E8fDm16h8UfHRL7b+5oG0j/0koeVw= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= +github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= +github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= +github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-webauthn/webauthn v0.9.4 h1:YxvHSqgUyc5AK2pZbqkWWR55qKeDPhP8zLDr6lpIc2g= +github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAhr9xlRbdbgnTw= +github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0= +github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= +github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= +github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= +github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/sessions v1.2.2 h1:lqzMYz6bOfvn2WriPUjNByzeXIlVzURcPmgMczkmTjY= +github.com/gorilla/sessions v1.2.2/go.mod h1:ePLdVu+jbEgHH+KWw8I1z2wqd0BAdAQh/8LRvBeoNcQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gosimple/slug v1.13.1 h1:bQ+kpX9Qa6tHRaK+fZR0A0M2Kd7Pa5eHPPsb1JpHD+Q= +github.com/gosimple/slug v1.13.1/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= +github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= +github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 h1:/of8Z8taCPftShATouOrBVy6GaTTjgQd/VfNiZp/VXQ= +github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503 h1:gdrsYbmk8822v6qvPwZO5DC6QjnAW7uKJ9YXnoUmV8c= +github.com/grafana/loki v1.6.2-0.20231215164305-b51b7d7b5503/go.mod h1:d8seWXCEXkL42mhuIJYcGi6DxfehzoIpLrMQWJojvOo= +github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4 h1:wQ0FnSeebhJIBkgYOD06Mxk9HV2KhtEG0hp/7R+5RUQ= +github.com/grafana/loki/pkg/push v0.0.0-20231201111602-11ef833ed3e4/go.mod h1:f3JSoxBTPXX5ec4FxxeC19nTBSxoTz+cBgS3cYLMcr0= +github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0= +github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= +github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 h1:o95KDiV/b1xdkumY5YbLR0/n2+wBxUpgf3HgfKgTyLI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3/go.mod h1:hTxjzRcX49ogbTGVJ1sM5mz5s+SSgiGIyL3jjPxl32E= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= +github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= +github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= +github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= +github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY= +github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= +github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/henvic/httpretty v0.0.6 h1:JdzGzKZBajBfnvlMALXXMVQWxWMF/ofTy8C3/OSUTxs= +github.com/henvic/httpretty v0.0.6/go.mod h1:X38wLjWXHkXT7r2+uK8LjCMne9rsuNaBLJ+5cU2/Pmo= +github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= +github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= +github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= +github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a h1:dHCfT5W7gghzPtfsW488uPmEOm85wewI+ypUwibyTdU= +github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= +github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= +github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f h1:tVvGiZQFjOXP+9YyGqSA6jE55x1XVxmoPYudncxrZ8U= +github.com/manyminds/api2go v0.0.0-20171030193247-e7b693844a6f/go.mod h1:Z60vy0EZVSu0bOugCHdcN5ZxFMKSpjRgsnh0XKPFqqk= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1 h1:mPMvm6X6tf4w8y7j9YIt6V9jfWhL6QlbEc7CCmeQlWk= +github.com/mostynb/zstdpool-freelist v0.0.0-20201229113212-927304c0c3b1/go.mod h1:ye2e/VUEtE2BHE+G/QcKkcLQVAEJoYRFj5VUOQatCRE= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.12.0 h1:KuQRUE3PgxRFWhq4gHvZtPSLCGDqM5q/cYr1pZ39ytc= +github.com/muesli/termenv v0.12.0/go.mod h1:WCCv32tusQ/EEZ5S8oUIIrC/nIuBcxCVqlN4Xfkv+7A= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= +github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= +github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= +github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.16.0 h1:xMJUsZdHLqSnCqESyKSqEfcYVYsUuup1nrOhaEFftQg= +github.com/pressly/goose/v3 v3.16.0/go.mod h1:JwdKVnmCRhnF6XLQs2mHEQtucFD49cQBdRM4UiwkxsM= +github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= +github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= +github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 h1:6ksZ7t1hNOzGPPs8DK7SvXQf6UfWzi+W5Z7PCBl8gx4= +github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pyroscope-io/client v0.7.1 h1:yFRhj3vbgjBxehvxQmedmUWJQ4CAfCHhn+itPsuWsHw= +github.com/pyroscope-io/client v0.7.1/go.mod h1:4h21iOU4pUOq0prKyDlvYRL+SCKsBc5wKiEtV+rJGqU= +github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= +github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/gocuke v0.6.2 h1:pHviZ0kKAq2U2hN2q3smKNxct6hS0mGByFMHGnWA97M= +github.com/regen-network/gocuke v0.6.2/go.mod h1:zYaqIHZobHyd0xOrHGPQjbhGJsuZ1oElx150u2o1xuk= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= +github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= +github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= +github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ= +github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumvbfM1u/etVq42Afwq/jtNSBSOA8n5jntnNPo= +github.com/goplugin/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= +github.com/goplugin/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCqR1LNS7aI3jT0V+xGrg= +github.com/goplugin/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429 h1:xkejUBZhcBpBrTSfxc91Iwzadrb6SXw8ks69bHIQ9Ww= +github.com/goplugin/plugin-automation v1.0.2-0.20240118014648-1ab6a88c9429/go.mod h1:wJmVvDf4XSjsahWtfUq3wvIAYEAuhr7oxmxYnEL/LGQ= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6 h1:hpNkTpLtwWXKqguf7wYqetxpmxY/bSO+1PLpY8VBu2w= +github.com/goplugin/plugin-common v0.1.7-0.20240216174848-c7f1809138d6/go.mod h1:6aXWSEQawX2oZXcPPOdxnEGufAhj7PqPKolXf6ijRGA= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8 h1:I326nw5GwHQHsLKHwtu5Sb9EBLylC8CfUd7BFAS0jtg= +github.com/goplugin/plugin-cosmos v0.4.1-0.20240213120401-01a23955f9f8/go.mod h1:a65NtrK4xZb01mf0dDNghPkN2wXgcqFQ55ADthVBgMc= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336 h1:j00D0/EqE9HRu+63v7KwUOe4ZxLc4AN5SOJFiinkkH0= +github.com/goplugin/plugin-data-streams v0.0.0-20240214203158-47dae5de1336/go.mod h1:umLyYLRGqyIuFfGpEREZP3So6+O8iL35cCCqW+OxX5w= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8 h1:1BcjXuviSAKttOX7BZoVHRZZGfxqoA2+AL8tykmkdoc= +github.com/goplugin/plugin-feeds v0.0.0-20240119021347-3c541a78cdb8/go.mod h1:vy1L7NybTy2F/Yv7BOh+oZBa1MACD6gzd1+DkcSkfp8= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0 h1:9IxmR+1NH1WxaX44+t553fOrrZRfxwMVvnDuBIy0tgs= +github.com/goplugin/plugin-solana v1.0.3-0.20240213161921-c4d342b761b0/go.mod h1:JiykN+8W5TA4UD2ClrzQCVvcH3NcyLEVv7RwY0busrw= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0 h1:7m9PVtccb8/pvKTXMaGuyceFno1icRyC2SFH7KG7+70= +github.com/goplugin/plugin-starknet/relayer v0.0.1-beta-test.0.20240213121419-1272736c2ac0/go.mod h1:SZ899lZYQ0maUulWbZg+SWqabHQ1wTbyk3jT8wJfyo8= +github.com/goplugin/plugin-testing-framework v1.23.2 h1:haXPd9Pg++Zs5/QIZnhFd9RElmz/d0+4nNeletUg9ZM= +github.com/goplugin/plugin-testing-framework v1.23.2/go.mod h1:StIOdxvwd8AMO6xuBtmD6FQfJXktEn/mJJEr7728BTc= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868 h1:FFdvEzlYwcuVHkdZ8YnZR/XomeMGbz5E2F2HZI3I3w8= +github.com/goplugin/plugin-vrf v0.0.0-20231120191722-fef03814f868/go.mod h1:Kn1Hape05UzFZ7bOUnm3GVsHzP0TNrVmpfXYNHdqGGs= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16 h1:TFe+FvzxClblt6qRfqEhUfa4kFQx5UobuoFGO2W4mMo= +github.com/goplugin/go-plugin v0.0.0-20240208201424-b3b91517de16/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= +github.com/goplugin/grpc-proxy v0.0.0-20230731113816-f1be6620749f/go.mod h1:MvMXoufZAtqExNexqi4cjrNYE9MefKddKylxjS+//n0= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a h1:nGkZ9uXS8lPIJOi68rdftEo2c9Q8qbRAi5+XMnKobVc= +github.com/goplugin/libocr v0.0.0-20240215150045-fe2ba71b2f0a/go.mod h1:kC0qmVPUaVkFqGiZMNhmRmjdphuUmeyLEdlWFOQzFWI= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1 h1:yiKnypAqP8l0OX0P3klzZ7SCcBUxy5KqTAKZmQOvSQE= +github.com/goplugin/tdh2/go/ocr2/decryptionplugin v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:q6f4fe39oZPdsh1i57WznEZgxd8siidMaSFq3wdPmVg= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1 h1:Dai1bn+Q5cpeGMQwRdjOdVjG8mmFFROVkSKuUgBErRQ= +github.com/goplugin/tdh2/go/tdh2 v0.0.0-20230906073235-9e478e5e19f1/go.mod h1:G5Sd/yzHWf26rQ+X0nG9E0buKPqRGPMJAfk2gwCzOOw= +github.com/goplugin/wasp v0.4.2 h1:MPErzwcOW84MKnA6/BjMnlsspQ0681XfoanGsJHOI7c= +github.com/goplugin/wasp v0.4.2/go.mod h1:eVhBVLbVv0qORUlN7aR5C4aTN/lTYO3KnN1erO4ROOI= +github.com/goplugin/wsrpc v0.7.2 h1:iBXzMeg7vc5YoezIQBq896y25BARw7OKbhrb6vPbtRQ= +github.com/goplugin/wsrpc v0.7.2/go.mod h1:sj7QX2NQibhkhxTfs3KOhAj/5xwgqMipTvJVSssT9i0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w= +github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0= +github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE= +github.com/test-go/testify v1.1.4/go.mod h1:rH7cfJo/47vWGdi4GPj16x3/t1xGOj2YxzmNQzk2ghU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a h1:YuO+afVc3eqrjiCUizNCxI53bl/BnPiVwXqLzqYTqgU= +github.com/theodesp/go-heaps v0.0.0-20190520121037-88e35354fe0a/go.mod h1:/sfW47zCZp9FrtGcWyo1VjbgDaodxX9ovZvgLb/MxaA= +github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e h1:BuzhfgfWQbX0dWzYzT1zsORLnHRv3bcRcsaUk0VmXA8= +github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e/go.mod h1:/Tnicc6m/lsJE0irFMA0LfIwTBo4QP7A8IfyIv4zZKI= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA= +github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI= +github.com/umbracle/ethgo v0.1.3 h1:s8D7Rmphnt71zuqrgsGTMS5gTNbueGO1zKLh7qsFzTM= +github.com/umbracle/ethgo v0.1.3/go.mod h1:g9zclCLixH8liBI27Py82klDkW7Oo33AxUOr+M9lzrU= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 h1:10Nbw6cACsnQm7r34zlpJky+IzxVLRk6MKTS2d3Vp0E= +github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722/go.mod h1:c8J0h9aULj2i3umrfyestM6jCq0LK0U6ly6bWy96nd4= +github.com/unrolled/secure v1.13.0 h1:sdr3Phw2+f8Px8HE5sd1EHdj1aV3yUwed/uZXChLFsk= +github.com/unrolled/secure v1.13.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE= +github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= +github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= +github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/kyber/v3 v3.1.0 h1:ghu+kiRgM5JyD9TJ0hTIxTLQlJBR/ehjWvWwYW3XsC0= +go.dedis.ch/kyber/v3 v3.1.0/go.mod h1:kXy7p3STAurkADD+/aZcsznZGKVHEqbtmdIzvPfrs1U= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= +go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= +go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 h1:mMv2jG58h6ZI5t5S9QCVGdzCmAsTakMa3oxVgpSD44g= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1/go.mod h1:oqRuNKG0upTaDPbLVCG8AD0G2ETrfDtmh7jViy7ox6M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20220817180228-f738f5508c12 h1:xOBJXWGEDwU5xSDxH6macxO11Us0AH2fTa9rmsbbF7g= +go.starlark.net v0.0.0-20220817180228-f738f5508c12/go.mod h1:VZcBMdr3cT3PnBoWunTabuSEXwVAH+ZJ5zxfs3AdASk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.2.0 h1:UQE2Bgi7p2B85uP5dC2bbRtig0C+OeNRnNEafLjsLPA= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210401141331-865547bb08e2/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc/examples v0.0.0-20210424002626-9572fd6faeae/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/guregu/null.v2 v2.1.2 h1:YOuepWdYqGnrenzPyMi+ybCjeDzjdazynbwsXXOk4i8= +gopkg.in/guregu/null.v2 v2.1.2/go.mod h1:XORrx8tyS5ZDcyUboCIxQtta/Aujk/6pfWrn9Xe33mU= +gopkg.in/guregu/null.v4 v4.0.0 h1:1Wm3S1WEA2I26Kq+6vcW+w0gcDo44YKYD7YIEJNHDjg= +gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.25.11 h1:4mjYDfE3yp22jrytjH0knwgzjXKkxHX4D01ZCAazvZM= +k8s.io/api v0.25.11/go.mod h1:bK4UvD4bthtutNlvensrfBX21PRQ/vs2cIYggHkOOAo= +k8s.io/apiextensions-apiserver v0.25.3 h1:bfI4KS31w2f9WM1KLGwnwuVlW3RSRPuIsfNF/3HzR0k= +k8s.io/apiextensions-apiserver v0.25.3/go.mod h1:ZJqwpCkxIx9itilmZek7JgfUAM0dnTsA48I4krPqRmo= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/cli-runtime v0.25.11 h1:GE2yNZm1tN+MJtw1SGMOLesLF7Kp7NVAVqRSTbXfu4o= +k8s.io/cli-runtime v0.25.11/go.mod h1:r/nEINuHVEpgGhcd2WamU7hD1t/lMnSz8XM44Autltc= +k8s.io/client-go v0.25.11 h1:DJQ141UsbNRI6wYSlcYLP5J5BW5Wq7Bgm42Ztq2SW70= +k8s.io/client-go v0.25.11/go.mod h1:41Xs7p1SfhoReUnmjjYCfCNWFiq4xSkexwJfbxF2F7A= +k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI= +k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kubectl v0.25.11 h1:6bsft5Gan6BCvQ7cJbDRFjTm4Zfq8GuUYpsWAdVngYE= +k8s.io/kubectl v0.25.11/go.mod h1:8mIfgkFgT+yJ8/TlmPW1qoRh46H2si9q5nW8id7i9iM= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= +pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= +pgregory.net/rapid v0.5.5/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= +sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= +sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= +sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= +sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/integration-tests/k8s/connect.go b/integration-tests/k8s/connect.go new file mode 100644 index 00000000..d4d0e7ee --- /dev/null +++ b/integration-tests/k8s/connect.go @@ -0,0 +1,103 @@ +package k8s + +import ( + "fmt" + "os" + "time" + + "github.com/pelletier/go-toml/v2" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + client2 "github.com/goplugin/plugin-testing-framework/client" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +const ( + DefaultConfigFilePath = "../../../charts/plugin-cluster/connect.toml" + ErrReadConnectionConfig = "failed to read TOML environment connection config" + ErrUnmarshalConnectionConfig = "failed to unmarshal TOML environment connection config" +) + +type ConnectionVars struct { + Namespace string `toml:"namespace"` + NetworkName string `toml:"network_name"` + NetworkChainID int64 `toml:"network_chain_id"` + NetworkPrivateKey string `toml:"network_private_key"` + NetworkWSURL string `toml:"network_ws_url"` + NetworkHTTPURL string `toml:"network_http_url"` + CLNodesNum int `toml:"cl_nodes_num"` + CLNodeURLTemplate string `toml:"cl_node_url_template"` + CLNodeInternalDNSRecordTemplate string `toml:"cl_node_internal_dns_record_template"` + CLNodeUser string `toml:"cl_node_user"` + CLNodePassword string `toml:"cl_node_password"` + MockServerURL string `toml:"mockserver_url"` +} + +// ConnectRemote connects to a local environment, see charts/plugin-cluster +func ConnectRemote(l zerolog.Logger) (blockchain.EVMClient, *client2.MockserverClient, contracts.ContractDeployer, *client.PluginK8sClient, []*client.PluginK8sClient, error) { + cfg, err := ReadConfig() + if err != nil { + return nil, nil, nil, nil, nil, err + } + net := &blockchain.EVMNetwork{ + Name: cfg.NetworkName, + Simulated: true, + SupportsEIP1559: true, + ClientImplementation: blockchain.EthereumClientImplementation, + ChainID: 1337, + PrivateKeys: []string{ + cfg.NetworkPrivateKey, + }, + URLs: []string{cfg.NetworkWSURL}, + HTTPURLs: []string{cfg.NetworkHTTPURL}, + PluginTransactionLimit: 500000, + Timeout: blockchain.StrDuration{Duration: 2 * time.Minute}, + MinimumConfirmations: 1, + GasEstimationBuffer: 10000, + } + cc, err := blockchain.NewEVMClientFromNetwork(*net, l) + if err != nil { + return nil, nil, nil, nil, nil, err + } + cd, err := contracts.NewContractDeployer(cc, l) + if err != nil { + return nil, nil, nil, nil, nil, err + } + clClients := make([]*client.PluginK8sClient, 0) + for i := 1; i <= cfg.CLNodesNum; i++ { + c, err := client.NewPluginK8sClient(&client.PluginConfig{ + URL: fmt.Sprintf(cfg.CLNodeURLTemplate, i), + Email: cfg.CLNodeUser, + InternalIP: fmt.Sprintf(cfg.CLNodeInternalDNSRecordTemplate, i), + Password: cfg.CLNodePassword, + }, fmt.Sprintf(cfg.CLNodeInternalDNSRecordTemplate, i), cfg.Namespace) + if err != nil { + return nil, nil, nil, nil, nil, err + } + clClients = append(clClients, c) + } + msClient := client2.NewMockserverClient(&client2.MockserverConfig{ + LocalURL: cfg.MockServerURL, + ClusterURL: cfg.MockServerURL, + }) + return cc, msClient, cd, clClients[0], clClients[1:], nil +} + +func ReadConfig() (*ConnectionVars, error) { + var cfg *ConnectionVars + var d []byte + var err error + d, err = os.ReadFile(DefaultConfigFilePath) + if err != nil { + return nil, fmt.Errorf("%s, err: %w", ErrReadConnectionConfig, err) + } + err = toml.Unmarshal(d, &cfg) + if err != nil { + return nil, fmt.Errorf("%s, err: %w", ErrUnmarshalConnectionConfig, err) + } + log.Info().Interface("Config", cfg).Msg("Connecting to environment from config") + return cfg, nil +} diff --git a/integration-tests/load/README.md b/integration-tests/load/README.md new file mode 100644 index 00000000..8bbb8f24 --- /dev/null +++ b/integration-tests/load/README.md @@ -0,0 +1,70 @@ +# Performance tests for CL jobs + +This folder container performance e2e tests for different job types, currently implemented: + +- VRFv2 + +All the tests have 4 groups: + +- one product soak +- one product load +- multiple product instances soak +- multiple product instances load + +When you develop an e2e performance suite for a new product you can implement the tests one by one to answer the questions: + +What are performance characteristics of a one instance of a product (jobs + contracts): + +- is our product stable at all, no memory leaks, no flaking performance under some RPS? (test #1) +- what are the limits for one product instance, figuring out the max/optimal performance params by increasing RPS and varying configuration (test #2) +- update test #1 with optimal params and workload to constantly run in CI + +What are performance and capacity characteristics of Plugin node(s) that run multiple products of the same type simultaneously: + +- how many products of the same type we can run at once at a stable load with optimal configuration? (test #3) +- what are the limits if we add more and more products of the same type, each product have a stable RPS, we vary only amount of products +- update test #3 with optimal params and workload to constantly run in CI + +Implementing test #1 is **mandatory** for each product. +Tests #2,#3,#4 are optional if you need to figure out your product scaling or node/cluster capacity. + +## Usage + +```sh +export LOKI_TOKEN=... +export LOKI_URL=... + +go test -v -run TestVRFV2Load/vrfv2_soak_test +``` + +### Dashboards + +Each product has its own generated dashboard in `cmd/dashboard.go` + +Deploying dashboard: + +```sh +export GRAFANA_URL=... +export GRAFANA_TOKEN=... +export DATA_SOURCE_NAME=grafanacloud-logs +export DASHBOARD_FOLDER=LoadTests +export DASHBOARD_NAME=${JobTypeName, for example WaspVRFv2} + +go run dashboard.go +``` + +### Assertions + +You can implement your product requirements assertions in `onchain_monitoring.go`. For Plugin products (VRF/OCR/Keepers/Automation) it's easier to assert on-chain part + +If you need to assert some metrics in `Prometheus/Loki`, here is an [example](https://github.com/goplugin/wasp/blob/master/examples/alerts/main_test.go#L88) + +Do not mix workload logic with assertions, separate them. + +### Implementation + +To implement a standard e2e performance suite for a new product please look at `gun.go` and `vu.go`. + +Gun should be working with one instance of your product. + +VU(Virtual user) creates a new instance of your product and works with it in `Call()` diff --git a/integration-tests/load/automationv2_1/automationv2_1_test.go b/integration-tests/load/automationv2_1/automationv2_1_test.go new file mode 100644 index 00000000..8047b267 --- /dev/null +++ b/integration-tests/load/automationv2_1/automationv2_1_test.go @@ -0,0 +1,718 @@ +package automationv2_1 + +import ( + "context" + "fmt" + "math" + "math/big" + "strconv" + "strings" + "testing" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pelletier/go-toml/v2" + "github.com/slack-go/slack" + "github.com/stretchr/testify/require" + + ocr3 "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + "github.com/goplugin/wasp" + + ocr2keepers30config "github.com/goplugin/plugin-automation/pkg/v3/config" + + "github.com/goplugin/plugin-common/pkg/utils/tests" + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + + ctfconfig "github.com/goplugin/plugin-testing-framework/config" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/automationv2" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + contractseth "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + aconfig "github.com/goplugin/pluginv3.0/integration-tests/testconfig/automation" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/simple_log_upkeep_counter_wrapper" +) + +const ( + StartupWaitTime = 30 * time.Second + StopWaitTime = 60 * time.Second +) + +var ( + baseTOML = `[Feature] +LogPoller = true + +[OCR2] +Enabled = true + +[P2P] +[P2P.V2] +Enabled = true +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"]` + + minimumNodeSpec = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "2000m", + "memory": "4Gi", + }, + "limits": map[string]interface{}{ + "cpu": "2000m", + "memory": "4Gi", + }, + }, + } + + minimumDbSpec = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + "limits": map[string]interface{}{ + "cpu": "4000m", + "memory": "4Gi", + }, + }, + "stateful": true, + "capacity": "10Gi", + } + + recNodeSpec = map[string]interface{}{ + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "4000m", + "memory": "8Gi", + }, + "limits": map[string]interface{}{ + "cpu": "4000m", + "memory": "8Gi", + }, + }, + } + + recDbSpec = minimumDbSpec + + gethNodeSpec = map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "8000m", + "memory": "8Gi", + }, + "limits": map[string]interface{}{ + "cpu": "16000m", + "memory": "16Gi", + }, + } +) + +func TestLogTrigger(t *testing.T) { + ctx := tests.Context(t) + l := logging.GetTestLogger(t) + + loadedTestConfig, err := tc.GetConfig("Load", tc.Automation) + if err != nil { + t.Fatal(err) + } + + version := *loadedTestConfig.PluginImage.Version + image := *loadedTestConfig.PluginImage.Image + + l.Info().Msg("Starting automation v2.1 log trigger load test") + l.Info(). + Int("Number of Nodes", *loadedTestConfig.Automation.General.NumberOfNodes). + Int("Duration", *loadedTestConfig.Automation.General.Duration). + Int("Block Time", *loadedTestConfig.Automation.General.BlockTime). + Str("Spec Type", *loadedTestConfig.Automation.General.SpecType). + Str("Log Level", *loadedTestConfig.Automation.General.PluginNodeLogLevel). + Str("Image", image). + Str("Tag", version). + Msg("Test Config") + + testConfigFormat := `Number of Nodes: %d +Duration: %d +Block Time: %d +Spec Type: %s +Log Level: %s +Image: %s +Tag: %s + +Load Config: +%s` + + prettyLoadConfig, err := toml.Marshal(loadedTestConfig.Automation.Load) + require.NoError(t, err, "Error marshalling load config") + + testConfig := fmt.Sprintf(testConfigFormat, *loadedTestConfig.Automation.General.NumberOfNodes, *loadedTestConfig.Automation.General.Duration, + *loadedTestConfig.Automation.General.BlockTime, *loadedTestConfig.Automation.General.SpecType, *loadedTestConfig.Automation.General.PluginNodeLogLevel, image, version, string(prettyLoadConfig)) + l.Info().Str("testConfig", testConfig).Msg("Test Config") + + testNetwork := networks.MustGetSelectedNetworkConfig(loadedTestConfig.Network)[0] + testType := "load" + loadDuration := time.Duration(*loadedTestConfig.Automation.General.Duration) * time.Second + automationDefaultLinkFunds := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(10000))) //10000 PLI + + registrySettings := &contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(0), + FlatFeeMicroPLI: uint32(40_000), + BlockCountPerTurn: big.NewInt(100), + CheckGasLimit: uint32(45_000_000), //45M + StalenessSeconds: big.NewInt(90_000), + GasCeilingMultiplier: uint16(2), + MaxPerformGas: uint32(5_000_000), + MinUpkeepSpend: big.NewInt(0), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5_000), + MaxPerformDataSize: uint32(5_000), + RegistryVersion: contractseth.RegistryVersion_2_1, + } + + testEnvironment := environment.New(&environment.Config{ + TTL: loadDuration + time.Hour*6, + NamespacePrefix: fmt.Sprintf( + "automation-%s-%s", + testType, + strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-"), + ), + Test: t, + PreventPodEviction: true, + }) + + testEnvironment. + AddHelm(ethereum.New(ðereum.Props{ + NetworkName: testNetwork.Name, + Simulated: testNetwork.Simulated, + WsURLs: testNetwork.URLs, + Values: map[string]interface{}{ + "resources": gethNodeSpec, + "geth": map[string]interface{}{ + "blocktime": *loadedTestConfig.Automation.General.BlockTime, + "capacity": "20Gi", + }, + }, + })) + + err = testEnvironment.Run() + require.NoError(t, err, "Error launching test environment") + + if testEnvironment.WillUseRemoteRunner() { + return + } + + var ( + nodeSpec = minimumNodeSpec + dbSpec = minimumDbSpec + ) + + switch *loadedTestConfig.Automation.General.SpecType { + case "recommended": + nodeSpec = recNodeSpec + dbSpec = recDbSpec + case "local": + nodeSpec = map[string]interface{}{} + dbSpec = map[string]interface{}{"stateful": true} + default: + // minimum: + + } + + if *loadedTestConfig.Pyroscope.Enabled { + loadedTestConfig.Pyroscope.Environment = &testEnvironment.Cfg.Namespace + } + + numberOfUpkeeps := *loadedTestConfig.Automation.General.NumberOfNodes + + for i := 0; i < numberOfUpkeeps+1; i++ { // +1 for the OCR boot node + var nodeTOML string + if i == 1 || i == 3 { + nodeTOML = fmt.Sprintf("%s\n\n[Log]\nLevel = \"%s\"", baseTOML, *loadedTestConfig.Automation.General.PluginNodeLogLevel) + } else { + nodeTOML = fmt.Sprintf("%s\n\n[Log]\nLevel = \"info\"", baseTOML) + } + nodeTOML = networks.AddNetworksConfig(nodeTOML, loadedTestConfig.Pyroscope, testNetwork) + + var overrideFn = func(_ interface{}, target interface{}) { + ctfconfig.MustConfigOverridePluginVersion(loadedTestConfig.PluginImage, target) + ctfconfig.MightConfigOverridePyroscopeKey(loadedTestConfig.Pyroscope, target) + } + + cd := plugin.NewWithOverride(i, map[string]any{ + "toml": nodeTOML, + "plugin": nodeSpec, + "db": dbSpec, + "prometheus": *loadedTestConfig.Automation.General.UsePrometheus, + }, loadedTestConfig.PluginImage, overrideFn) + + testEnvironment.AddHelm(cd) + } + + err = testEnvironment.Run() + require.NoError(t, err, "Error running plugin DON") + + chainClient, err := blockchain.NewEVMClient(testNetwork, testEnvironment, l) + require.NoError(t, err, "Error building chain client") + + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to plugin nodes") + + chainClient.ParallelTransactions(true) + + multicallAddress, err := contractDeployer.DeployMultiCallContract() + require.NoError(t, err, "Error deploying multicall contract") + + a := automationv2.NewAutomationTestK8s(chainClient, contractDeployer, pluginNodes) + a.RegistrySettings = *registrySettings + a.RegistrarSettings = contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: uint8(2), + AutoApproveMaxAllowed: math.MaxUint16, + MinLinkJuels: big.NewInt(0), + } + a.PluginConfig = ocr2keepers30config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 80_000, // Copied from arbitrum mainnet prod value + GasLimitPerReport: 10_300_000, + GasOverheadPerUpkeep: 300_000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 10, + } + a.PublicConfig = ocr3.PublicConfig{ + DeltaProgress: 10 * time.Second, + DeltaResend: 15 * time.Second, + DeltaInitial: 500 * time.Millisecond, + DeltaRound: 1000 * time.Millisecond, + DeltaGrace: 200 * time.Millisecond, + DeltaCertifiedCommitRequest: 300 * time.Millisecond, + DeltaStage: 15 * time.Second, + RMax: 24, + MaxDurationQuery: 20 * time.Millisecond, + MaxDurationObservation: 20 * time.Millisecond, + MaxDurationShouldAcceptAttestedReport: 1200 * time.Millisecond, + MaxDurationShouldTransmitAcceptedReport: 20 * time.Millisecond, + F: 1, + } + + startTimeTestSetup := time.Now() + l.Info().Str("START_TIME", startTimeTestSetup.String()).Msg("Test setup started") + + a.SetupAutomationDeployment(t) + + err = actions.FundPluginNodesAddress(pluginNodes[1:], chainClient, big.NewFloat(*loadedTestConfig.Common.PluginNodeFunding), 0) + require.NoError(t, err, "Error funding plugin nodes") + + consumerContracts := make([]contracts.KeeperConsumer, 0) + triggerContracts := make([]contracts.LogEmitter, 0) + triggerAddresses := make([]common.Address, 0) + + utilsABI, err := automation_utils_2_1.AutomationUtilsMetaData.GetAbi() + require.NoError(t, err, "Error getting automation utils abi") + emitterABI, err := log_emitter.LogEmitterMetaData.GetAbi() + require.NoError(t, err, "Error getting log emitter abi") + consumerABI, err := simple_log_upkeep_counter_wrapper.SimpleLogUpkeepCounterMetaData.GetAbi() + require.NoError(t, err, "Error getting consumer abi") + + var bytes0 = [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + + var bytes1 = [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + } + + upkeepConfigs := make([]automationv2.UpkeepConfig, 0) + loadConfigs := make([]aconfig.Load, 0) + cEVMClient, err := blockchain.ConcurrentEVMClient(testNetwork, testEnvironment, chainClient, l) + require.NoError(t, err, "Error building concurrent chain client") + + for _, u := range loadedTestConfig.Automation.Load { + for i := 0; i < *u.NumberOfUpkeeps; i++ { + consumerContract, err := contractDeployer.DeployAutomationSimpleLogTriggerConsumer() + require.NoError(t, err, "Error deploying automation consumer contract") + consumerContracts = append(consumerContracts, consumerContract) + l.Debug(). + Str("Contract Address", consumerContract.Address()). + Int("Number", i+1). + Int("Out Of", *u.NumberOfUpkeeps). + Msg("Deployed Automation Log Trigger Consumer Contract") + + loadCfg := aconfig.Load{ + NumberOfEvents: u.NumberOfEvents, + NumberOfSpamMatchingEvents: u.NumberOfSpamMatchingEvents, + NumberOfSpamNonMatchingEvents: u.NumberOfSpamNonMatchingEvents, + CheckBurnAmount: u.CheckBurnAmount, + PerformBurnAmount: u.PerformBurnAmount, + UpkeepGasLimit: u.UpkeepGasLimit, + SharedTrigger: u.SharedTrigger, + } + + loadConfigs = append(loadConfigs, loadCfg) + + if *u.SharedTrigger && i > 0 { + triggerAddresses = append(triggerAddresses, triggerAddresses[len(triggerAddresses)-1]) + continue + } + triggerContract, err := contractDeployer.DeployLogEmitterContract() + require.NoError(t, err, "Error deploying log emitter contract") + triggerContracts = append(triggerContracts, triggerContract) + triggerAddresses = append(triggerAddresses, triggerContract.Address()) + l.Debug(). + Str("Contract Address", triggerContract.Address().Hex()). + Int("Number", i+1). + Int("Out Of", *u.NumberOfUpkeeps). + Msg("Deployed Automation Log Trigger Emitter Contract") + } + err = chainClient.WaitForEvents() + require.NoError(t, err, "Failed waiting for contracts to deploy") + } + + for i, consumerContract := range consumerContracts { + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: triggerAddresses[i], + FilterSelector: 1, + Topic0: emitterABI.Events["Log4"].ID, + Topic1: bytes1, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := utilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + require.NoError(t, err, "Error encoding log trigger config") + l.Debug().Bytes("Encoded Log Trigger Config", encodedLogTriggerConfig).Msg("Encoded Log Trigger Config") + + checkDataStruct := simple_log_upkeep_counter_wrapper.CheckData{ + CheckBurnAmount: loadConfigs[i].CheckBurnAmount, + PerformBurnAmount: loadConfigs[i].PerformBurnAmount, + EventSig: bytes1, + } + + encodedCheckDataStruct, err := consumerABI.Methods["_checkDataConfig"].Inputs.Pack(&checkDataStruct) + require.NoError(t, err, "Error encoding check data struct") + l.Debug().Bytes("Encoded Check Data Struct", encodedCheckDataStruct).Msg("Encoded Check Data Struct") + + upkeepConfig := automationv2.UpkeepConfig{ + UpkeepName: fmt.Sprintf("LogTriggerUpkeep-%d", i), + EncryptedEmail: []byte("test@mail.com"), + UpkeepContract: common.HexToAddress(consumerContract.Address()), + GasLimit: *loadConfigs[i].UpkeepGasLimit, + AdminAddress: common.HexToAddress(chainClient.GetDefaultWallet().Address()), + TriggerType: uint8(1), + CheckData: encodedCheckDataStruct, + TriggerConfig: encodedLogTriggerConfig, + OffchainConfig: []byte("0"), + FundingAmount: automationDefaultLinkFunds, + } + l.Debug().Interface("Upkeep Config", upkeepConfig).Msg("Upkeep Config") + upkeepConfigs = append(upkeepConfigs, upkeepConfig) + } + + registrationTxHashes, err := a.RegisterUpkeeps(upkeepConfigs) + require.NoError(t, err, "Error registering upkeeps") + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Failed waiting for upkeeps to register") + + upkeepIds, err := a.ConfirmUpkeepsRegistered(registrationTxHashes) + require.NoError(t, err, "Error confirming upkeeps registered") + + l.Info().Msg("Successfully registered all Automation Upkeeps") + l.Info().Interface("Upkeep IDs", upkeepIds).Msg("Upkeeps Registered") + l.Info().Str("STARTUP_WAIT_TIME", StartupWaitTime.String()).Msg("Waiting for plugin to start") + time.Sleep(StartupWaitTime) + + startBlock, err := chainClient.LatestBlockNumber(ctx) + require.NoError(t, err, "Error getting latest block number") + + p := wasp.NewProfile() + + configs := make([]LogTriggerConfig, 0) + var numberOfEventsEmitted int64 + var numberOfEventsEmittedPerSec int64 + + for i, triggerContract := range triggerContracts { + c := LogTriggerConfig{ + Address: triggerContract.Address().String(), + NumberOfEvents: int64(*loadConfigs[i].NumberOfEvents), + NumberOfSpamMatchingEvents: int64(*loadConfigs[i].NumberOfSpamMatchingEvents), + NumberOfSpamNonMatchingEvents: int64(*loadConfigs[i].NumberOfSpamNonMatchingEvents), + } + numberOfEventsEmittedPerSec = numberOfEventsEmittedPerSec + int64(*loadConfigs[i].NumberOfEvents) + configs = append(configs, c) + } + + endTimeTestSetup := time.Now() + testSetupDuration := endTimeTestSetup.Sub(startTimeTestSetup) + l.Info(). + Str("END_TIME", endTimeTestSetup.String()). + Str("Duration", testSetupDuration.String()). + Msg("Test setup ended") + + ts, err := sendSlackNotification("Started", l, &loadedTestConfig, testEnvironment.Cfg.Namespace, strconv.Itoa(*loadedTestConfig.Automation.General.NumberOfNodes), + strconv.FormatInt(startTimeTestSetup.UnixMilli(), 10), "now", + []slack.Block{extraBlockWithText("\bTest Config\b\n```" + testConfig + "```")}, slack.MsgOptionBlocks()) + if err != nil { + l.Error().Err(err).Msg("Error sending slack notification") + } + + g, err := wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "log_trigger_gen", + CallTimeout: time.Minute * 3, + Schedule: wasp.Plain( + 1, + loadDuration, + ), + Gun: NewLogTriggerUser( + l, + configs, + cEVMClient, + multicallAddress.Hex(), + ), + CallResultBufLen: 1000, + }) + p.Add(g, err) + + startTimeTestEx := time.Now() + l.Info().Str("START_TIME", startTimeTestEx.String()).Msg("Test execution started") + + l.Info().Msg("Starting load generators") + _, err = p.Run(true) + require.NoError(t, err, "Error running load generators") + + l.Info().Msg("Finished load generators") + l.Info().Str("STOP_WAIT_TIME", StopWaitTime.String()).Msg("Waiting for upkeeps to be performed") + time.Sleep(StopWaitTime) + l.Info().Msg("Finished waiting 60s for upkeeps to be performed") + endTimeTestEx := time.Now() + testExDuration := endTimeTestEx.Sub(startTimeTestEx) + l.Info(). + Str("END_TIME", endTimeTestEx.String()). + Str("Duration", testExDuration.String()). + Msg("Test execution ended") + + l.Info().Str("Duration", testExDuration.String()).Msg("Test Execution Duration") + endBlock, err := chainClient.LatestBlockNumber(ctx) + require.NoError(t, err, "Error getting latest block number") + l.Info().Uint64("Starting Block", startBlock).Uint64("Ending Block", endBlock).Msg("Test Block Range") + + startTimeTestReport := time.Now() + l.Info().Str("START_TIME", startTimeTestReport.String()).Msg("Test reporting started") + + upkeepDelaysFast := make([][]int64, 0) + upkeepDelaysRecovery := make([][]int64, 0) + + var batchSize uint64 = 500 + + if endBlock-startBlock < batchSize { + batchSize = endBlock - startBlock + } + + for _, consumerContract := range consumerContracts { + var ( + logs []types.Log + address = common.HexToAddress(consumerContract.Address()) + timeout = 5 * time.Second + ) + for fromBlock := startBlock; fromBlock < endBlock; fromBlock += batchSize + 1 { + filterQuery := geth.FilterQuery{ + Addresses: []common.Address{address}, + FromBlock: big.NewInt(0).SetUint64(fromBlock), + ToBlock: big.NewInt(0).SetUint64(fromBlock + batchSize), + Topics: [][]common.Hash{{consumerABI.Events["PerformingUpkeep"].ID}}, + } + err = fmt.Errorf("initial error") // to ensure our for loop runs at least once + for err != nil { + var ( + logsInBatch []types.Log + ) + ctx2, cancel := context.WithTimeout(ctx, timeout) + logsInBatch, err = chainClient.FilterLogs(ctx2, filterQuery) + cancel() + if err != nil { + l.Error().Err(err). + Interface("FilterQuery", filterQuery). + Str("Contract Address", consumerContract.Address()). + Str("Timeout", timeout.String()). + Msg("Error getting logs") + timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) + continue + } + l.Debug(). + Interface("FilterQuery", filterQuery). + Str("Contract Address", consumerContract.Address()). + Str("Timeout", timeout.String()). + Msg("Collected logs") + logs = append(logs, logsInBatch...) + } + } + + if len(logs) > 0 { + delayFast := make([]int64, 0) + delayRecovery := make([]int64, 0) + for _, log := range logs { + eventDetails, err := consumerABI.EventByID(log.Topics[0]) + require.NoError(t, err, "Error getting event details") + consumer, err := simple_log_upkeep_counter_wrapper.NewSimpleLogUpkeepCounter( + address, chainClient.Backend(), + ) + require.NoError(t, err, "Error getting consumer contract") + if eventDetails.Name == "PerformingUpkeep" { + parsedLog, err := consumer.ParsePerformingUpkeep(log) + require.NoError(t, err, "Error parsing log") + if parsedLog.IsRecovered { + delayRecovery = append(delayRecovery, parsedLog.TimeToPerform.Int64()) + } else { + delayFast = append(delayFast, parsedLog.TimeToPerform.Int64()) + } + } + } + upkeepDelaysFast = append(upkeepDelaysFast, delayFast) + upkeepDelaysRecovery = append(upkeepDelaysRecovery, delayRecovery) + } + } + + for _, triggerContract := range triggerContracts { + var ( + logs []types.Log + address = triggerContract.Address() + timeout = 5 * time.Second + ) + for fromBlock := startBlock; fromBlock < endBlock; fromBlock += batchSize + 1 { + filterQuery := geth.FilterQuery{ + Addresses: []common.Address{address}, + FromBlock: big.NewInt(0).SetUint64(fromBlock), + ToBlock: big.NewInt(0).SetUint64(fromBlock + batchSize), + Topics: [][]common.Hash{{emitterABI.Events["Log4"].ID}, {bytes1}, {bytes1}}, + } + err = fmt.Errorf("initial error") // to ensure our for loop runs at least once + for err != nil { + var ( + logsInBatch []types.Log + ) + ctx2, cancel := context.WithTimeout(ctx, timeout) + logsInBatch, err = chainClient.FilterLogs(ctx2, filterQuery) + cancel() + if err != nil { + l.Error().Err(err). + Interface("FilterQuery", filterQuery). + Str("Contract Address", triggerContract.Address().Hex()). + Str("Timeout", timeout.String()). + Msg("Error getting logs") + timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) + continue + } + l.Debug(). + Interface("FilterQuery", filterQuery). + Str("Contract Address", triggerContract.Address().Hex()). + Str("Timeout", timeout.String()). + Msg("Collected logs") + logs = append(logs, logsInBatch...) + } + } + numberOfEventsEmitted = numberOfEventsEmitted + int64(len(logs)) + } + + l.Info().Int64("Number of Events Emitted", numberOfEventsEmitted).Msg("Number of Events Emitted") + + l.Info(). + Interface("Upkeep Delays Fast", upkeepDelaysFast). + Interface("Upkeep Delays Recovered", upkeepDelaysRecovery). + Msg("Upkeep Delays") + + var allUpkeepDelays []int64 + var allUpkeepDelaysFast []int64 + var allUpkeepDelaysRecovery []int64 + + for _, upkeepDelay := range upkeepDelaysFast { + allUpkeepDelays = append(allUpkeepDelays, upkeepDelay...) + allUpkeepDelaysFast = append(allUpkeepDelaysFast, upkeepDelay...) + } + + for _, upkeepDelay := range upkeepDelaysRecovery { + allUpkeepDelays = append(allUpkeepDelays, upkeepDelay...) + allUpkeepDelaysRecovery = append(allUpkeepDelaysRecovery, upkeepDelay...) + } + + avgF, medianF, ninetyPctF, ninetyNinePctF, maximumF := testreporters.IntListStats(allUpkeepDelaysFast) + avgR, medianR, ninetyPctR, ninetyNinePctR, maximumR := testreporters.IntListStats(allUpkeepDelaysRecovery) + eventsMissed := (numberOfEventsEmitted) - int64(len(allUpkeepDelays)) + percentMissed := float64(eventsMissed) / float64(numberOfEventsEmitted) * 100 + l.Info(). + Float64("Average", avgF).Int64("Median", medianF). + Int64("90th Percentile", ninetyPctF).Int64("99th Percentile", ninetyNinePctF). + Int64("Max", maximumF).Msg("Upkeep Delays Fast Execution in seconds") + l.Info(). + Float64("Average", avgR).Int64("Median", medianR). + Int64("90th Percentile", ninetyPctR).Int64("99th Percentile", ninetyNinePctR). + Int64("Max", maximumR).Msg("Upkeep Delays Recovery Execution in seconds") + l.Info(). + Int("Total Perform Count", len(allUpkeepDelays)). + Int("Perform Count Fast Execution", len(allUpkeepDelaysFast)). + Int("Perform Count Recovery Execution", len(allUpkeepDelaysRecovery)). + Int64("Total Events Emitted", numberOfEventsEmitted). + Int64("Total Events Missed", eventsMissed). + Float64("Percent Missed", percentMissed). + Msg("Test completed") + + testReportFormat := `Upkeep Delays in seconds - Fast Execution +Average: %f +Median: %d +90th Percentile: %d +99th Percentile: %d +Max: %d + +Upkeep Delays in seconds - Recovery Execution +Average: %f +Median: %d +90th Percentile: %d +99th Percentile: %d +Max: %d + +Total Perform Count: %d +Perform Count Fast Execution: %d +Perform Count Recovery Execution: %d +Total Log Triggering Events Emitted: %d +Total Events Missed: %d +Percent Missed: %f +Test Duration: %s` + + endTimeTestReport := time.Now() + testReDuration := endTimeTestReport.Sub(startTimeTestReport) + l.Info(). + Str("END_TIME", endTimeTestReport.String()). + Str("Duration", testReDuration.String()). + Msg("Test reporting ended") + + testReport := fmt.Sprintf(testReportFormat, avgF, medianF, ninetyPctF, ninetyNinePctF, maximumF, + avgR, medianR, ninetyPctR, ninetyNinePctR, maximumR, len(allUpkeepDelays), len(allUpkeepDelaysFast), + len(allUpkeepDelaysRecovery), numberOfEventsEmitted, eventsMissed, percentMissed, testExDuration.String()) + + _, err = sendSlackNotification("Finished", l, &loadedTestConfig, testEnvironment.Cfg.Namespace, strconv.Itoa(*loadedTestConfig.Automation.General.NumberOfNodes), + strconv.FormatInt(startTimeTestSetup.UnixMilli(), 10), strconv.FormatInt(time.Now().UnixMilli(), 10), + []slack.Block{extraBlockWithText("\bTest Report\b\n```" + testReport + "```")}, slack.MsgOptionTS(ts)) + if err != nil { + l.Error().Err(err).Msg("Error sending slack notification") + } + + t.Cleanup(func() { + if err = actions.TeardownRemoteSuite(t, testEnvironment.Cfg.Namespace, pluginNodes, nil, &loadedTestConfig, chainClient); err != nil { + l.Error().Err(err).Msg("Error when tearing down remote suite") + } + }) + +} diff --git a/integration-tests/load/automationv2_1/gun.go b/integration-tests/load/automationv2_1/gun.go new file mode 100644 index 00000000..e0653821 --- /dev/null +++ b/integration-tests/load/automationv2_1/gun.go @@ -0,0 +1,104 @@ +package automationv2_1 + +import ( + "math/big" + "sync" + + "github.com/rs/zerolog" + "github.com/goplugin/wasp" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +type LogTriggerConfig struct { + Address string + NumberOfEvents int64 + NumberOfSpamMatchingEvents int64 + NumberOfSpamNonMatchingEvents int64 +} + +type LogTriggerGun struct { + data [][]byte + addresses []string + multiCallAddress string + evmClient blockchain.EVMClient + logger zerolog.Logger +} + +func generateCallData(int1 int64, int2 int64, count int64) []byte { + abi, err := log_emitter.LogEmitterMetaData.GetAbi() + if err != nil { + panic(err) + } + data, err := abi.Pack("EmitLog4", big.NewInt(int1), big.NewInt(int2), big.NewInt(count)) + if err != nil { + panic(err) + } + return data +} + +func NewLogTriggerUser( + logger zerolog.Logger, + TriggerConfigs []LogTriggerConfig, + evmClient blockchain.EVMClient, + multicallAddress string, +) *LogTriggerGun { + var data [][]byte + var addresses []string + + for _, c := range TriggerConfigs { + if c.NumberOfEvents > 0 { + d := generateCallData(1, 1, c.NumberOfEvents) + data = append(data, d) + addresses = append(addresses, c.Address) + } + if c.NumberOfSpamMatchingEvents > 0 { + d := generateCallData(1, 2, c.NumberOfSpamMatchingEvents) + data = append(data, d) + addresses = append(addresses, c.Address) + } + if c.NumberOfSpamNonMatchingEvents > 0 { + d := generateCallData(2, 2, c.NumberOfSpamNonMatchingEvents) + data = append(data, d) + addresses = append(addresses, c.Address) + } + } + + return &LogTriggerGun{ + addresses: addresses, + data: data, + logger: logger, + multiCallAddress: multicallAddress, + evmClient: evmClient, + } +} + +func (m *LogTriggerGun) Call(_ *wasp.Generator) *wasp.Response { + var wg sync.WaitGroup + var dividedData [][][]byte + d := m.data + chunkSize := 100 + for i := 0; i < len(d); i += chunkSize { + end := i + chunkSize + if end > len(d) { + end = len(d) + } + dividedData = append(dividedData, d[i:end]) + } + for _, a := range dividedData { + wg.Add(1) + go func(a [][]byte, m *LogTriggerGun) *wasp.Response { + defer wg.Done() + _, err := contracts.MultiCallLogTriggerLoadGen(m.evmClient, m.multiCallAddress, m.addresses, a) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} + }(a, m) + } + wg.Wait() + return &wasp.Response{} +} diff --git a/integration-tests/load/automationv2_1/helpers.go b/integration-tests/load/automationv2_1/helpers.go new file mode 100644 index 00000000..27452372 --- /dev/null +++ b/integration-tests/load/automationv2_1/helpers.go @@ -0,0 +1,65 @@ +package automationv2_1 + +import ( + "fmt" + + "github.com/rs/zerolog" + "github.com/slack-go/slack" + + reportModel "github.com/goplugin/plugin-testing-framework/testreporters" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func extraBlockWithText(text string) slack.Block { + return slack.NewSectionBlock(slack.NewTextBlockObject( + "mrkdwn", text, false, false), nil, nil) +} + +func sendSlackNotification(header string, l zerolog.Logger, config *tc.TestConfig, namespace string, numberOfNodes, + startingTime string, endingTime string, extraBlocks []slack.Block, msgOption slack.MsgOption) (string, error) { + slackClient := slack.New(reportModel.SlackAPIKey) + + headerText := ":plugin-keepers: Automation Load Test " + header + " :white_check_mark:" + + grafanaUrl, err := config.GetGrafanaBaseURL() + if err != nil { + return "", err + } + + dashboardUrl, err := config.GetGrafanaDashboardURL() + if err != nil { + return "", err + } + + formattedDashboardUrl := fmt.Sprintf("%s%s?orgId=1&from=%s&to=%s&var-namespace=%s&var-number_of_nodes=%s", grafanaUrl, dashboardUrl, startingTime, endingTime, namespace, numberOfNodes) + l.Info().Str("Dashboard", formattedDashboardUrl).Msg("Dashboard URL") + + var notificationBlocks []slack.Block + + notificationBlocks = append(notificationBlocks, + slack.NewHeaderBlock(slack.NewTextBlockObject("plain_text", headerText, true, false))) + notificationBlocks = append(notificationBlocks, + slack.NewContextBlock("context_block", slack.NewTextBlockObject("plain_text", namespace, false, false))) + notificationBlocks = append(notificationBlocks, slack.NewDividerBlock()) + if *config.Pyroscope.Enabled { + pyroscopeServer := *config.Pyroscope.ServerUrl + pyroscopeEnvironment := *config.Pyroscope.Environment + + formattedPyroscopeUrl := fmt.Sprintf("%s/?query=plugin-node.cpu{Environment=\"%s\"}&from=%s&to=%s", pyroscopeServer, pyroscopeEnvironment, startingTime, endingTime) + l.Info().Str("Pyroscope", formattedPyroscopeUrl).Msg("Dashboard URL") + notificationBlocks = append(notificationBlocks, slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn", + fmt.Sprintf("<%s|Pyroscope>", + formattedPyroscopeUrl), false, true), nil, nil)) + } + notificationBlocks = append(notificationBlocks, slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn", + fmt.Sprintf("<%s|Test Dashboard> \nNotifying <@%s>", + formattedDashboardUrl, reportModel.SlackUserID), false, true), nil, nil)) + + if len(extraBlocks) > 0 { + notificationBlocks = append(notificationBlocks, extraBlocks...) + } + + ts, err := reportModel.SendSlackMessage(slackClient, slack.MsgOptionBlocks(notificationBlocks...), msgOption) + l.Info().Str("ts", ts).Msg("Sent Slack Message") + return ts, err +} diff --git a/integration-tests/load/functions/README.md b/integration-tests/load/functions/README.md new file mode 100644 index 00000000..d85a6172 --- /dev/null +++ b/integration-tests/load/functions/README.md @@ -0,0 +1,79 @@ +### Functions & S4 Gateway Load tests + +## Setup +Export vars +``` +export SELECTED_NETWORKS=MUMBAI +export MUMBAI_KEYS=... +export MUMBAI_URLS=... +export LOKI_TOKEN=... +export LOKI_URL=... +``` +See more config options in [config.toml](./config.toml) + +## Usage + +All tests are split by network and in 3 groups: +- HTTP payload only +- Secrets decoding payload only +- Realistic payload with args/http/secrets + +Load test client is [here](../../../contracts/src/v0.8/functions/tests/v1_X/testhelpers/FunctionsLoadTestClient.sol) + +Load is controlled with 2 params: +- RPS +- requests_per_call (generating more events in a loop in the contract) + +`Soak` is a stable workload for which there **must** be no issues + +`Stress` is a peak workload for which issues **must** be analyzed + +Load test client can execute `78 calls per request` at max (gas limit) + +Functions tests: +``` +go test -v -run TestFunctionsLoad/mumbai_functions_soak_test_http +go test -v -run TestFunctionsLoad/mumbai_functions_stress_test_http +go test -v -run TestFunctionsLoad/mumbai_functions_soak_test_only_secrets +go test -v -run TestFunctionsLoad/mumbai_functions_stress_test_only_secrets +go test -v -run TestFunctionsLoad/mumbai_functions_soak_test_real +go test -v -run TestFunctionsLoad/mumbai_functions_stress_test_real +``` + +Gateway tests: +``` +go test -v -run TestGatewayLoad/gateway_secrets_list_soak_test +go test -v -run TestGatewayLoad/gateway_secrets_set_soak_test +``` + +Chaos suite can be combined with any test, can be found [here](../../chaos/functions/full.yaml) + +Default [dashboard](https://pluginlabs.grafana.net/d/FunctionsV1/functionsv1?orgId=1&from=now-5m&to=now&var-go_test_name=All&var-gen_name=All&var-branch=All&var-commit=All&var-call_group=All&refresh=5s) + +## Redeploying client and funding a new sub +When contracts got redeployed on `Mumbai` just comment these lines in config +``` +# comment both client and sub to automatically create a new pair +client_addr = "0x64a351fbAa61681A5a7e569Cc5A691150c4D73D2" +subscription_id = 23 +``` +Then insert new client addr and subscription number back + +## Debug +Show more logs +``` +export WASP_LOG_LEVEL=debug +``` + +### Dashboards + +Deploying dashboard: +``` +export GRAFANA_URL=... +export GRAFANA_TOKEN=... +export DATA_SOURCE_NAME=... +export DASHBOARD_FOLDER=LoadTests +export DASHBOARD_NAME=FunctionsV1 + +go run dashboard.go +``` \ No newline at end of file diff --git a/integration-tests/load/functions/cmd/dashboard.go b/integration-tests/load/functions/cmd/dashboard.go new file mode 100644 index 00000000..7c32d7a9 --- /dev/null +++ b/integration-tests/load/functions/cmd/dashboard.go @@ -0,0 +1,34 @@ +package main + +import ( + "github.com/K-Phoen/grabana/dashboard" + "github.com/K-Phoen/grabana/logs" + "github.com/K-Phoen/grabana/row" + "github.com/goplugin/wasp" +) + +func main() { + lokiDS := "grafanacloud-logs" + d, err := wasp.NewDashboard(nil, + []dashboard.Option{ + dashboard.Row("DON logs (errors)", + row.Collapse(), + row.WithLogs( + "DON logs", + logs.DataSource(lokiDS), + logs.Span(12), + logs.Height("300px"), + logs.Transparent(), + logs.WithLokiTarget(` + { cluster="staging-us-west-2-main", app=~"clc-ocr2-dr-matic-testnet" } | json | level="error" + `), + )), + }, + ) + if err != nil { + panic(err) + } + if _, err := d.Deploy(); err != nil { + panic(err) + } +} diff --git a/integration-tests/load/functions/functions_test.go b/integration-tests/load/functions/functions_test.go new file mode 100644 index 00000000..690e32d2 --- /dev/null +++ b/integration-tests/load/functions/functions_test.go @@ -0,0 +1,231 @@ +package loadfunctions + +import ( + "testing" + "time" + + "github.com/goplugin/wasp" + "github.com/stretchr/testify/require" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestFunctionsLoad(t *testing.T) { + generalConfig, err := tc.GetConfig(tc.NoKey, tc.Functions) + require.NoError(t, err, "failed to get config") + + ft, err := SetupLocalLoadTestEnv(&generalConfig, &generalConfig) + require.NoError(t, err) + ft.EVMClient.ParallelTransactions(false) + + labels := map[string]string{ + "branch": "functions_healthcheck", + "commit": "functions_healthcheck", + } + + MonitorLoadStats(t, ft, labels, &generalConfig) + + t.Run("mumbai functions soak test http", func(t *testing.T) { + config, err := tc.GetConfig("Soak", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_soak_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeHTTPPayload, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadHTTP, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) + + t.Run("mumbai functions stress test http", func(t *testing.T) { + config, err := tc.GetConfig("Stress", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_stress_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeHTTPPayload, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadHTTP, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) + + t.Run("mumbai functions soak test only secrets", func(t *testing.T) { + config, err := tc.GetConfig("SecretsSoak", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_soak_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeSecretsOnlyPayload, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadWithSecrets, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) + + t.Run("mumbai functions stress test only secrets", func(t *testing.T) { + config, err := tc.GetConfig("SecretsStress", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_stress_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeSecretsOnlyPayload, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadWithSecrets, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) + + t.Run("mumbai functions soak test real", func(t *testing.T) { + config, err := tc.GetConfig("RealSoak", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_soak_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeReal, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadReal, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{"1", "2", "3", "4"}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) + + t.Run("mumbai functions stress test real", func(t *testing.T) { + config, err := tc.GetConfig("RealStress", tc.Functions) + require.NoError(t, err, "failed to get config") + cfg := config.Functions + cfgl := config.Logging.Loki + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "functions_stress_gen", + RateLimitUnitDuration: 5 * time.Second, + CallTimeout: 3 * time.Minute, + Schedule: wasp.Plain( + *cfg.Performance.RPS, + cfg.Performance.Duration.Duration, + ), + Gun: NewSingleFunctionCallGun( + ft, + ModeReal, + *cfg.Performance.RequestsPerCall, + *cfg.Common.FunctionsCallPayloadReal, + *cfg.Common.SecretsSlotID, + *cfg.Common.SecretsVersionID, + []string{"1", "2", "3", "4"}, + *cfg.Common.SubscriptionID, + StringToByte32(*cfg.Common.DONID), + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })). + Run(true) + require.NoError(t, err) + }) +} diff --git a/integration-tests/load/functions/gateway.go b/integration-tests/load/functions/gateway.go new file mode 100644 index 00000000..1ccd5b46 --- /dev/null +++ b/integration-tests/load/functions/gateway.go @@ -0,0 +1,226 @@ +package loadfunctions + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/ecies" + "github.com/go-resty/resty/v2" + "github.com/rs/zerolog/log" + "github.com/goplugin/tdh2/go/tdh2/tdh2easy" + + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/api" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" + "github.com/goplugin/pluginv3.0/v2/core/services/s4" +) + +type RPCResponse struct { + ID string `json:"id"` + Jsonrpc string `json:"jsonrpc"` + Result struct { + Body struct { + DonID string `json:"don_id"` + MessageID string `json:"message_id"` + Method string `json:"method"` + Payload struct { + NodeResponses []struct { + Body struct { + DonID string `json:"don_id"` + MessageID string `json:"message_id"` + Method string `json:"method"` + Payload struct { + Success bool `json:"success"` + } `json:"payload"` + Receiver string `json:"receiver"` + } `json:"body"` + Signature string `json:"signature"` + } `json:"node_responses"` + Success bool `json:"success"` + } `json:"payload"` + Receiver string `json:"receiver"` + } `json:"body"` + Signature string `json:"signature"` + } `json:"result"` +} + +func UploadS4Secrets(rc *resty.Client, s4Cfg *S4SecretsCfg) (uint8, uint64, error) { + key, err := crypto.HexToECDSA(s4Cfg.PrivateKey) + if err != nil { + return 0, 0, err + } + address := crypto.PubkeyToAddress(key.PublicKey) + var payloadJSON []byte + envelope := s4.Envelope{ + Address: address.Bytes(), + SlotID: s4Cfg.S4SetSlotID, + Version: s4Cfg.S4SetVersion, + Payload: []byte(s4Cfg.S4SetPayload), + Expiration: time.Now().UnixMilli() + s4Cfg.S4SetExpirationPeriod, + } + signature, err := envelope.Sign(key) + if err != nil { + return 0, 0, err + } + + s4SetPayload := functions.SecretsSetRequest{ + SlotID: envelope.SlotID, + Version: envelope.Version, + Expiration: envelope.Expiration, + Payload: []byte(s4Cfg.S4SetPayload), + Signature: signature, + } + + payloadJSON, err = json.Marshal(s4SetPayload) + if err != nil { + return 0, 0, err + } + + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: s4Cfg.MessageID, + Method: s4Cfg.Method, + DonId: s4Cfg.DonID, + Payload: json.RawMessage(payloadJSON), + }, + } + + err = msg.Sign(key) + if err != nil { + return 0, 0, err + } + codec := api.JsonRPCCodec{} + rawMsg, err := codec.EncodeRequest(msg) + if err != nil { + return 0, 0, err + } + var result *RPCResponse + resp, err := rc.R(). + SetBody(rawMsg). + Post(s4Cfg.GatewayURL) + if err != nil { + return 0, 0, err + } + if resp.StatusCode() != 200 { + return 0, 0, fmt.Errorf("status code was %d, expected 200", resp.StatusCode()) + } + if err := json.Unmarshal(resp.Body(), &result); err != nil { + return 0, 0, err + } + log.Debug().Interface("Result", result).Msg("S4 secrets_set response result") + for _, nodeResponse := range result.Result.Body.Payload.NodeResponses { + if !nodeResponse.Body.Payload.Success { + return 0, 0, fmt.Errorf("node response was not successful") + } + } + return uint8(envelope.SlotID), envelope.Version, nil +} + +func ListS4Secrets(rc *resty.Client, s4Cfg *S4SecretsCfg) error { + key, err := crypto.HexToECDSA(s4Cfg.PrivateKey) + if err != nil { + return err + } + + msg := &api.Message{ + Body: api.MessageBody{ + MessageId: s4Cfg.MessageID, + Method: s4Cfg.Method, + DonId: s4Cfg.DonID, + Receiver: s4Cfg.RecieverAddr, + }, + } + + err = msg.Sign(key) + if err != nil { + return err + } + codec := api.JsonRPCCodec{} + rawMsg, err := codec.EncodeRequest(msg) + if err != nil { + return err + } + msgdec, err := codec.DecodeRequest(rawMsg) + if err != nil { + return err + } + log.Debug().Interface("Request", msgdec).Msg("Sending RPC request") + var result map[string]interface{} + resp, err := rc.R(). + SetBody(rawMsg). + Post(s4Cfg.GatewayURL) + if err != nil { + return err + } + if err := json.Unmarshal(resp.Body(), &result); err != nil { + return err + } + log.Debug().Interface("Result", result).Msg("S4 secrets_list response result") + if resp.StatusCode() != 200 { + return fmt.Errorf("status code was %d, expected 200", resp.StatusCode()) + } + return nil +} + +func ParseTDH2Key(data []byte) (*tdh2easy.PublicKey, error) { + pk := &tdh2easy.PublicKey{} + if err := pk.Unmarshal(data); err != nil { + return nil, err + } + return pk, nil +} + +func EncryptS4Secrets(deployerPk *ecdsa.PrivateKey, tdh2Pk *tdh2easy.PublicKey, donKey []byte, msgJSON string) (string, error) { + // 65 bytes PublicKey format, should start with 0x04 to be processed by crypto.UnmarshalPubkey() + b := make([]byte, 1) + b[0] = 0x04 + donKey = bytes.Join([][]byte{b, donKey}, nil) + donPubKey, err := crypto.UnmarshalPubkey(donKey) + if err != nil { + return "", fmt.Errorf("failed to unmarshal DON key: %w", err) + } + eciesDONPubKey := ecies.ImportECDSAPublic(donPubKey) + signature, err := deployerPk.Sign(rand.Reader, []byte(msgJSON), nil) + if err != nil { + return "", fmt.Errorf("failed to sign the msg with Ethereum key: %w", err) + } + signedSecrets, err := json.Marshal(struct { + Signature []byte `json:"signature"` + Message string `json:"message"` + }{ + Signature: signature, + Message: msgJSON, + }) + if err != nil { + return "", fmt.Errorf("failed to marshal signed secrets: %w", err) + } + ct, err := ecies.Encrypt(rand.Reader, eciesDONPubKey, signedSecrets, nil, nil) + if err != nil { + return "", fmt.Errorf("failed to encrypt with DON key: %w", err) + } + ct0xFormat, err := json.Marshal(map[string]interface{}{"0x0": base64.StdEncoding.EncodeToString(ct)}) + if err != nil { + return "", fmt.Errorf("failed to marshal DON key encrypted format: %w", err) + } + ctTDH2Format, err := tdh2easy.Encrypt(tdh2Pk, ct0xFormat) + if err != nil { + return "", fmt.Errorf("failed to encrypt with TDH2 public key: %w", err) + } + tdh2Message, err := ctTDH2Format.Marshal() + if err != nil { + return "", fmt.Errorf("failed to marshal TDH2 encrypted msg: %w", err) + } + finalMsg, err := json.Marshal(map[string]interface{}{ + "encryptedSecrets": "0x" + hex.EncodeToString(tdh2Message), + }) + if err != nil { + return "", fmt.Errorf("failed to marshal secrets msg: %w", err) + } + return string(finalMsg), nil +} diff --git a/integration-tests/load/functions/gateway_gun.go b/integration-tests/load/functions/gateway_gun.go new file mode 100644 index 00000000..aba517fd --- /dev/null +++ b/integration-tests/load/functions/gateway_gun.go @@ -0,0 +1,124 @@ +package loadfunctions + +import ( + "crypto/ecdsa" + "fmt" + "math/rand" + "strconv" + "time" + + "github.com/go-resty/resty/v2" + "github.com/rs/zerolog/log" + "github.com/goplugin/tdh2/go/tdh2/tdh2easy" + "github.com/goplugin/wasp" + + "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +/* SingleFunctionCallGun is a gun that constantly requests randomness for one feed */ + +type GatewaySecretsSetGun struct { + Cfg types.FunctionsTestConfig + Resty *resty.Client + SlotID uint + Method string + EthereumPrivateKey *ecdsa.PrivateKey + ThresholdPublicKey *tdh2easy.PublicKey + DONPublicKey []byte +} + +func NewGatewaySecretsSetGun(cfg types.FunctionsTestConfig, method string, pKey *ecdsa.PrivateKey, tdh2PubKey *tdh2easy.PublicKey, donPubKey []byte) *GatewaySecretsSetGun { + return &GatewaySecretsSetGun{ + Cfg: cfg, + Resty: resty.New(), + Method: method, + EthereumPrivateKey: pKey, + ThresholdPublicKey: tdh2PubKey, + DONPublicKey: donPubKey, + } +} + +func callSecretsSet(m *GatewaySecretsSetGun) *wasp.Response { + randNum := strconv.Itoa(rand.Intn(100000)) + randSlot := uint(rand.Intn(5)) + version := uint64(time.Now().UnixNano()) + expiration := int64(60 * 60 * 1000) + secret := fmt.Sprintf("{\"ltsecret\": \"%s\"}", randNum) + log.Debug(). + Uint("SlotID", randSlot). + Str("MessageID", randNum). + Uint64("Version", version). + Int64("Expiration", expiration). + Str("Secret", secret). + Msg("Sending S4 envelope") + secrets, err := EncryptS4Secrets( + m.EthereumPrivateKey, + m.ThresholdPublicKey, + m.DONPublicKey, + secret, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + network := m.Cfg.GetNetworkConfig().SelectedNetworks[0] + if len(m.Cfg.GetNetworkConfig().WalletKeys[network]) < 1 { + panic(fmt.Sprintf("no wallet keys found for %s", network)) + } + + cfg := m.Cfg.GetFunctionsConfig() + _, _, err = UploadS4Secrets(m.Resty, &S4SecretsCfg{ + GatewayURL: *cfg.Common.GatewayURL, + PrivateKey: m.Cfg.GetNetworkConfig().WalletKeys[network][0], + MessageID: randNum, + Method: "secrets_set", + DonID: *cfg.Common.DONID, + S4SetSlotID: randSlot, + S4SetVersion: version, + S4SetExpirationPeriod: expiration, + S4SetPayload: secrets, + }) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +func callSecretsList(m *GatewaySecretsSetGun) *wasp.Response { + randNum := strconv.Itoa(rand.Intn(100000)) + randSlot := uint(rand.Intn(5)) + version := uint64(time.Now().UnixNano()) + expiration := int64(60 * 60 * 1000) + network := m.Cfg.GetNetworkConfig().SelectedNetworks[0] + if len(m.Cfg.GetNetworkConfig().WalletKeys[network]) < 1 { + panic(fmt.Sprintf("no wallet keys found for %s", network)) + } + cfg := m.Cfg.GetFunctionsConfig() + if err := ListS4Secrets(m.Resty, &S4SecretsCfg{ + GatewayURL: *cfg.Common.GatewayURL, + RecieverAddr: *cfg.Common.Receiver, + PrivateKey: m.Cfg.GetNetworkConfig().WalletKeys[network][0], + MessageID: randNum, + Method: m.Method, + DonID: *cfg.Common.DONID, + S4SetSlotID: randSlot, + S4SetVersion: version, + S4SetExpirationPeriod: expiration, + }); err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +// Call implements example gun call, assertions on response bodies should be done here +func (m *GatewaySecretsSetGun) Call(_ *wasp.Generator) *wasp.Response { + var res *wasp.Response + switch m.Method { + case "secrets_set": + res = callSecretsSet(m) + case "secrets_list": + res = callSecretsList(m) + default: + panic("gateway gun must use either 'secrets_set' or 'list' methods") + } + return res +} diff --git a/integration-tests/load/functions/gateway_test.go b/integration-tests/load/functions/gateway_test.go new file mode 100644 index 00000000..7e190363 --- /dev/null +++ b/integration-tests/load/functions/gateway_test.go @@ -0,0 +1,82 @@ +package loadfunctions + +import ( + "testing" + + "github.com/goplugin/wasp" + "github.com/stretchr/testify/require" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/v2/core/services/gateway/handlers/functions" +) + +func TestGatewayLoad(t *testing.T) { + listConfig, err := tc.GetConfig("GatewayList", tc.Functions) + require.NoError(t, err) + cfgl := listConfig.Logging.Loki + + require.NoError(t, err) + ft, err := SetupLocalLoadTestEnv(&listConfig, &listConfig) + require.NoError(t, err) + ft.EVMClient.ParallelTransactions(false) + + labels := map[string]string{ + "branch": "gateway_healthcheck", + "commit": "gateway_healthcheck", + } + + secretsListCfg := &wasp.Config{ + LoadType: wasp.RPS, + GenName: functions.MethodSecretsList, + Schedule: wasp.Plain( + *listConfig.Functions.Performance.RPS, + listConfig.Functions.Performance.Duration.Duration, + ), + Gun: NewGatewaySecretsSetGun( + &listConfig, + functions.MethodSecretsList, + ft.EthereumPrivateKey, + ft.ThresholdPublicKey, + ft.DONPublicKey, + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + } + + setConfig, err := tc.GetConfig("GatewaySet", tc.Functions) + require.NoError(t, err) + + secretsSetCfg := &wasp.Config{ + LoadType: wasp.RPS, + GenName: functions.MethodSecretsSet, + Schedule: wasp.Plain( + *setConfig.Functions.Performance.RPS, + setConfig.Functions.Performance.Duration.Duration, + ), + Gun: NewGatewaySecretsSetGun( + &setConfig, + functions.MethodSecretsSet, + ft.EthereumPrivateKey, + ft.ThresholdPublicKey, + ft.DONPublicKey, + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + } + + t.Run("gateway secrets list soak test", func(t *testing.T) { + secretsListCfg.T = t + _, err := wasp.NewProfile(). + Add(wasp.NewGenerator(secretsListCfg)). + Run(true) + require.NoError(t, err) + }) + + t.Run("gateway secrets set soak test", func(t *testing.T) { + secretsListCfg.T = t + _, err := wasp.NewProfile(). + Add(wasp.NewGenerator(secretsSetCfg)). + Run(true) + require.NoError(t, err) + }) +} diff --git a/integration-tests/load/functions/onchain_monitoring.go b/integration-tests/load/functions/onchain_monitoring.go new file mode 100644 index 00000000..44903384 --- /dev/null +++ b/integration-tests/load/functions/onchain_monitoring.go @@ -0,0 +1,70 @@ +package loadfunctions + +import ( + "testing" + "time" + + "github.com/rs/zerolog/log" + "github.com/goplugin/wasp" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ + +const ( + LokiTypeLabel = "functions_contracts_load_summary" + ErrMetrics = "failed to get Functions load test metrics" + ErrLokiClient = "failed to create Loki client for monitoring" + ErrLokiPush = "failed to push monitoring metrics to Loki" +) + +type LoadStats struct { + Succeeded uint32 + Errored uint32 + Empty uint32 +} + +func MonitorLoadStats(t *testing.T, ft *FunctionsTest, labels map[string]string, config tc.GlobalTestConfig) { + go func() { + updatedLabels := make(map[string]string) + for k, v := range labels { + updatedLabels[k] = v + } + updatedLabels["type"] = LokiTypeLabel + updatedLabels["go_test_name"] = t.Name() + updatedLabels["gen_name"] = "performance" + cfgl := config.GetLoggingConfig().Loki + lokiConfig := wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken) + lc, err := wasp.NewLokiClient(lokiConfig) + if err != nil { + log.Error().Err(err).Msg(ErrLokiClient) + return + } + if err := ft.LoadTestClient.ResetStats(); err != nil { + log.Error().Err(err).Msg("failed to reset load test client stats") + } + for { + time.Sleep(5 * time.Second) + stats, err := ft.LoadTestClient.GetStats() + if err != nil { + log.Error().Err(err).Msg(ErrMetrics) + } + if stats != nil { + log.Info(). + Hex("LastReqID", []byte(stats.LastRequestID)). + Str("LastResponse", stats.LastResponse). + Str("LastError", stats.LastError). + Uint32("Total", stats.Total). + Uint32("Succeeded", stats.Succeeded). + Uint32("Errored", stats.Errored). + Uint32("Empty", stats.Empty).Msg("On-chain stats for load test client") + if err := lc.HandleStruct(wasp.LabelsMapToModel(updatedLabels), time.Now(), stats); err != nil { + log.Error().Err(err).Msg(ErrLokiPush) + } + } else { + log.Warn().Msg("No stats to push to Loki") + } + } + }() +} diff --git a/integration-tests/load/functions/request_gun.go b/integration-tests/load/functions/request_gun.go new file mode 100644 index 00000000..2c5105f8 --- /dev/null +++ b/integration-tests/load/functions/request_gun.go @@ -0,0 +1,110 @@ +package loadfunctions + +import ( + "github.com/goplugin/wasp" +) + +type TestMode int + +const ( + ModeHTTPPayload TestMode = iota + ModeSecretsOnlyPayload + ModeReal +) + +type SingleFunctionCallGun struct { + ft *FunctionsTest + mode TestMode + times uint32 + source string + slotID uint8 + slotVersion uint64 + args []string + subscriptionId uint64 + jobId [32]byte +} + +func NewSingleFunctionCallGun( + ft *FunctionsTest, + mode TestMode, + times uint32, + source string, + slotID uint8, + slotVersion uint64, + args []string, + subscriptionId uint64, + jobId [32]byte, +) *SingleFunctionCallGun { + return &SingleFunctionCallGun{ + ft: ft, + mode: mode, + times: times, + source: source, + slotID: slotID, + slotVersion: slotVersion, + args: args, + subscriptionId: subscriptionId, + jobId: jobId, + } +} + +func (m *SingleFunctionCallGun) callReal() *wasp.Response { + err := m.ft.LoadTestClient.SendRequestWithDONHostedSecrets( + m.times, + m.source, + m.slotID, + m.slotVersion, + m.args, + m.subscriptionId, + m.jobId, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +func (m *SingleFunctionCallGun) callWithSecrets() *wasp.Response { + err := m.ft.LoadTestClient.SendRequestWithDONHostedSecrets( + m.times, + m.source, + m.slotID, + m.slotVersion, + m.args, + m.subscriptionId, + m.jobId, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +func (m *SingleFunctionCallGun) callWithHttp() *wasp.Response { + err := m.ft.LoadTestClient.SendRequest( + m.times, + m.source, + []byte{}, + m.args, + m.subscriptionId, + m.jobId, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +// Call implements example gun call, assertions on response bodies should be done here +func (m *SingleFunctionCallGun) Call(_ *wasp.Generator) *wasp.Response { + switch m.mode { + case ModeSecretsOnlyPayload: + return m.callWithSecrets() + case ModeHTTPPayload: + return m.callWithHttp() + case ModeReal: + return m.callReal() + default: + panic("test mode must be ModeSecretsOnlyPayload, ModeHTTPPayload or ModeReal") + } +} diff --git a/integration-tests/load/functions/setup.go b/integration-tests/load/functions/setup.go new file mode 100644 index 00000000..47103a7e --- /dev/null +++ b/integration-tests/load/functions/setup.go @@ -0,0 +1,187 @@ +package loadfunctions + +import ( + "crypto/ecdsa" + "fmt" + "math/big" + mrand "math/rand" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/go-resty/resty/v2" + "github.com/rs/zerolog/log" + "github.com/goplugin/tdh2/go/tdh2/tdh2easy" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/networks" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/types" + pluginutils "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils" +) + +type FunctionsTest struct { + EVMClient blockchain.EVMClient + ContractDeployer contracts.ContractDeployer + ContractLoader contracts.ContractLoader + LinkToken contracts.LinkToken + Coordinator contracts.FunctionsCoordinator + Router contracts.FunctionsRouter + LoadTestClient contracts.FunctionsLoadTestClient + EthereumPrivateKey *ecdsa.PrivateKey + EthereumPublicKey *ecdsa.PublicKey + ThresholdPublicKey *tdh2easy.PublicKey + DONPublicKey []byte + ThresholdPublicKeyBytes []byte + ThresholdEncryptedSecrets string +} + +type S4SecretsCfg struct { + GatewayURL string + PrivateKey string + RecieverAddr string + MessageID string + Method string + DonID string + S4SetSlotID uint + S4SetVersion uint64 + S4SetExpirationPeriod int64 + S4SetPayload string +} + +func SetupLocalLoadTestEnv(globalConfig tc.GlobalTestConfig, functionsConfig types.FunctionsTestConfig) (*FunctionsTest, error) { + selectedNetwork := networks.MustGetSelectedNetworkConfig(globalConfig.GetNetworkConfig())[0] + bc, err := blockchain.NewEVMClientFromNetwork(selectedNetwork, log.Logger) + if err != nil { + return nil, err + } + cd, err := contracts.NewContractDeployer(bc, log.Logger) + if err != nil { + return nil, err + } + + cl, err := contracts.NewContractLoader(bc, log.Logger) + if err != nil { + return nil, err + } + if err != nil { + return nil, err + } + + cfg := functionsConfig.GetFunctionsConfig() + + lt, err := cl.LoadPLIToken(*cfg.Common.PLITokenAddr) + if err != nil { + return nil, err + } + coord, err := cl.LoadFunctionsCoordinator(*cfg.Common.Coordinator) + if err != nil { + return nil, err + } + router, err := cl.LoadFunctionsRouter(*cfg.Common.Router) + if err != nil { + return nil, err + } + var loadTestClient contracts.FunctionsLoadTestClient + if cfg.Common.LoadTestClient != nil && *cfg.Common.LoadTestClient != "" { + loadTestClient, err = cl.LoadFunctionsLoadTestClient(*cfg.Common.LoadTestClient) + } else { + loadTestClient, err = cd.DeployFunctionsLoadTestClient(*cfg.Common.Router) + } + if err != nil { + return nil, err + } + if cfg.Common.SubscriptionID == nil { + log.Info().Msg("Creating new subscription") + subID, err := router.CreateSubscriptionWithConsumer(loadTestClient.Address()) + if err != nil { + return nil, fmt.Errorf("failed to create a new subscription: %w", err) + } + encodedSubId, err := pluginutils.ABIEncode(`[{"type":"uint64"}]`, subID) + if err != nil { + return nil, fmt.Errorf("failed to encode subscription ID for funding: %w", err) + } + _, err = lt.TransferAndCall(router.Address(), big.NewInt(0).Mul(cfg.Common.SubFunds, big.NewInt(1e18)), encodedSubId) + if err != nil { + return nil, fmt.Errorf("failed to transferAndCall router, PLI funding: %w", err) + } + cfg.Common.SubscriptionID = &subID + } + pKey, pubKey, err := parseEthereumPrivateKey(selectedNetwork.PrivateKeys[0]) + if err != nil { + return nil, fmt.Errorf("failed to load Ethereum private key: %w", err) + } + tpk, err := coord.GetThresholdPublicKey() + if err != nil { + return nil, fmt.Errorf("failed to get Threshold public key: %w", err) + } + log.Info().Hex("ThresholdPublicKeyBytesHex", tpk).Msg("Loaded coordinator keys") + donPubKey, err := coord.GetDONPublicKey() + if err != nil { + return nil, fmt.Errorf("failed to get DON public key: %w", err) + } + log.Info().Hex("DONPublicKeyHex", donPubKey).Msg("Loaded DON key") + tdh2pk, err := ParseTDH2Key(tpk) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal tdh2 public key: %w", err) + } + var encryptedSecrets string + if cfg.Common.Secrets != nil && *cfg.Common.Secrets != "" { + encryptedSecrets, err = EncryptS4Secrets(pKey, tdh2pk, donPubKey, *cfg.Common.Secrets) + if err != nil { + return nil, fmt.Errorf("failed to generate tdh2 secrets: %w", err) + } + slotID, slotVersion, err := UploadS4Secrets(resty.New(), &S4SecretsCfg{ + GatewayURL: *cfg.Common.GatewayURL, + PrivateKey: selectedNetwork.PrivateKeys[0], + MessageID: strconv.Itoa(mrand.Intn(100000-1) + 1), + Method: "secrets_set", + DonID: *cfg.Common.DONID, + S4SetSlotID: uint(mrand.Intn(5)), + S4SetVersion: uint64(time.Now().UnixNano()), + S4SetExpirationPeriod: 60 * 60 * 1000, + S4SetPayload: encryptedSecrets, + }) + if err != nil { + return nil, fmt.Errorf("failed to upload secrets to S4: %w", err) + } + cfg.Common.SecretsSlotID = &slotID + cfg.Common.SecretsVersionID = &slotVersion + log.Info(). + Uint8("SlotID", slotID). + Uint64("SlotVersion", slotVersion). + Msg("Set new secret") + } + return &FunctionsTest{ + EVMClient: bc, + ContractDeployer: cd, + ContractLoader: cl, + LinkToken: lt, + Coordinator: coord, + Router: router, + LoadTestClient: loadTestClient, + EthereumPrivateKey: pKey, + EthereumPublicKey: pubKey, + ThresholdPublicKey: tdh2pk, + ThresholdPublicKeyBytes: tpk, + ThresholdEncryptedSecrets: encryptedSecrets, + DONPublicKey: donPubKey, + }, nil +} + +func parseEthereumPrivateKey(pk string) (*ecdsa.PrivateKey, *ecdsa.PublicKey, error) { + pKey, err := crypto.HexToECDSA(pk) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert Ethereum key from hex: %w", err) + } + + publicKey := pKey.Public() + pubKey, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, fmt.Errorf("failed to get public key from Ethereum private key: %w", err) + } + log.Info().Str("Address", crypto.PubkeyToAddress(*pubKey).Hex()).Msg("Parsed private key for address") + return pKey, pubKey, nil +} diff --git a/integration-tests/load/functions/util.go b/integration-tests/load/functions/util.go new file mode 100644 index 00000000..d5a4a2d6 --- /dev/null +++ b/integration-tests/load/functions/util.go @@ -0,0 +1,15 @@ +package loadfunctions + +// StringToByte32 transforms a single string into a [32]byte value +func StringToByte32(s string) [32]byte { + var result [32]byte + + for i, ch := range []byte(s) { + if i > 31 { + break + } + result[i] = ch + } + + return result +} diff --git a/integration-tests/load/ocr/README.md b/integration-tests/load/ocr/README.md new file mode 100644 index 00000000..bf4d6a78 --- /dev/null +++ b/integration-tests/load/ocr/README.md @@ -0,0 +1,28 @@ +### OCR Load tests + +## Setup +These tests can connect to any cluster create with [plugin-cluster](../../../charts/plugin-cluster/README.md) + +Create your cluster, if you already have one just use `kubefwd` +``` +kubectl create ns cl-cluster +devspace use namespace cl-cluster +devspace deploy +sudo kubefwd svc -n cl-cluster +``` + +Change environment connection configuration [here](../../../charts/plugin-cluster/connect.toml) + +If you haven't changed anything in [devspace.yaml](../../../charts/plugin-cluster/devspace.yaml) then default connection configuration will work + +## Usage + +``` +export LOKI_TOKEN=... +export LOKI_URL=... + +go test -v -run TestOCRLoad +go test -v -run TestOCRVolume +``` + +Check test configuration [here](config.toml) \ No newline at end of file diff --git a/integration-tests/load/ocr/gun.go b/integration-tests/load/ocr/gun.go new file mode 100644 index 00000000..1308e582 --- /dev/null +++ b/integration-tests/load/ocr/gun.go @@ -0,0 +1,55 @@ +package ocr + +import ( + "context" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + + "github.com/goplugin/wasp" +) + +// Gun is a gun for the OCR load test +// it triggers new rounds for provided feed(aggregator) contract +type Gun struct { + roundNum atomic.Int64 + ocrInstances []contracts.OffchainAggregator + cc blockchain.EVMClient + l zerolog.Logger +} + +func NewGun(l zerolog.Logger, cc blockchain.EVMClient, ocrInstances []contracts.OffchainAggregator) *Gun { + return &Gun{ + l: l, + cc: cc, + ocrInstances: ocrInstances, + } +} + +func (m *Gun) Call(_ *wasp.Generator) *wasp.Response { + m.roundNum.Add(1) + requestedRound := m.roundNum.Load() + m.l.Info(). + Int64("RoundNum", requestedRound). + Str("FeedID", m.ocrInstances[0].Address()). + Msg("starting new round") + err := m.ocrInstances[0].RequestNewRound() + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + for { + time.Sleep(5 * time.Second) + lr, err := m.ocrInstances[0].GetLatestRound(context.Background()) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + m.l.Info().Interface("LatestRound", lr).Msg("latest round") + if lr.RoundId.Int64() >= requestedRound { + return &wasp.Response{} + } + } +} diff --git a/integration-tests/load/ocr/helper.go b/integration-tests/load/ocr/helper.go new file mode 100644 index 00000000..98adfdbb --- /dev/null +++ b/integration-tests/load/ocr/helper.go @@ -0,0 +1,68 @@ +package ocr + +import ( + "math/big" + "math/rand" + "time" + + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + + client2 "github.com/goplugin/plugin-testing-framework/client" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +func SetupCluster( + cc blockchain.EVMClient, + cd contracts.ContractDeployer, + workerNodes []*client.PluginK8sClient, +) (contracts.LinkToken, error) { + err := actions.FundPluginNodes(workerNodes, cc, big.NewFloat(3)) + if err != nil { + return nil, err + } + lt, err := cd.DeployLinkTokenContract() + if err != nil { + return nil, err + } + return lt, nil +} + +func SetupFeed( + cc blockchain.EVMClient, + msClient *client2.MockserverClient, + cd contracts.ContractDeployer, + bootstrapNode *client.PluginK8sClient, + workerNodes []*client.PluginK8sClient, + lt contracts.LinkToken, +) ([]contracts.OffchainAggregator, error) { + ocrInstances, err := actions.DeployOCRContracts(1, lt, cd, workerNodes, cc) + if err != nil { + return nil, err + } + err = actions.CreateOCRJobs(ocrInstances, bootstrapNode, workerNodes, 5, msClient, cc.GetChainID().String()) + if err != nil { + return nil, err + } + return ocrInstances, nil +} + +func SimulateEAActivity( + l zerolog.Logger, + eaChangeInterval time.Duration, + ocrInstances []contracts.OffchainAggregator, + workerNodes []*client.PluginK8sClient, + msClient *client2.MockserverClient, +) { + go func() { + for { + time.Sleep(eaChangeInterval) + if err := actions.SetAllAdapterResponsesToTheSameValue(rand.Intn(1000), ocrInstances, workerNodes, msClient); err != nil { + l.Error().Err(err).Msg("failed to update mockserver responses") + } + } + }() +} diff --git a/integration-tests/load/ocr/ocr_test.go b/integration-tests/load/ocr/ocr_test.go new file mode 100644 index 00000000..4bd06b25 --- /dev/null +++ b/integration-tests/load/ocr/ocr_test.go @@ -0,0 +1,80 @@ +package ocr + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/wasp" + + "github.com/goplugin/plugin-testing-framework/logging" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + + "github.com/goplugin/pluginv3.0/integration-tests/k8s" +) + +var ( + CommonTestLabels = map[string]string{ + "branch": "ocr_healthcheck_local", + "commit": "ocr_healthcheck_local", + } +) + +func TestOCRLoad(t *testing.T) { + l := logging.GetTestLogger(t) + cc, msClient, cd, bootstrapNode, workerNodes, err := k8s.ConnectRemote(l) + require.NoError(t, err) + lt, err := SetupCluster(cc, cd, workerNodes) + require.NoError(t, err) + ocrInstances, err := SetupFeed(cc, msClient, cd, bootstrapNode, workerNodes, lt) + require.NoError(t, err) + + config, err := tc.GetConfig("Load", tc.OCR) + require.NoError(t, err) + + cfg := config.OCR + cfgl := config.Logging.Loki + SimulateEAActivity(l, cfg.Load.EAChangeInterval.Duration, ocrInstances, workerNodes, msClient) + + p := wasp.NewProfile() + p.Add(wasp.NewGenerator(&wasp.Config{ + T: t, + GenName: "ocr", + LoadType: wasp.RPS, + CallTimeout: cfg.Load.VerificationTimeout.Duration, + RateLimitUnitDuration: cfg.Load.RateLimitUnitDuration.Duration, + Schedule: wasp.Plain(*cfg.Load.Rate, cfg.Load.TestDuration.Duration), + Gun: NewGun(l, cc, ocrInstances), + Labels: CommonTestLabels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })) + _, err = p.Run(true) + require.NoError(t, err) +} + +func TestOCRVolume(t *testing.T) { + l := logging.GetTestLogger(t) + cc, msClient, cd, bootstrapNode, workerNodes, err := k8s.ConnectRemote(l) + require.NoError(t, err) + lt, err := SetupCluster(cc, cd, workerNodes) + require.NoError(t, err) + config, err := tc.GetConfig("Volume", tc.OCR) + require.NoError(t, err) + + cfg := config.OCR + cfgl := config.Logging.Loki + + p := wasp.NewProfile() + p.Add(wasp.NewGenerator(&wasp.Config{ + T: t, + GenName: "ocr", + LoadType: wasp.VU, + CallTimeout: cfg.Volume.VerificationTimeout.Duration, + Schedule: wasp.Plain(*cfg.Volume.Rate, cfg.Volume.TestDuration.Duration), + VU: NewVU(l, *cfg.Volume.VURequestsPerUnit, cfg.Volume.RateLimitUnitDuration.Duration, cc, lt, cd, bootstrapNode, workerNodes, msClient), + Labels: CommonTestLabels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + })) + _, err = p.Run(true) + require.NoError(t, err) +} diff --git a/integration-tests/load/ocr/vu.go b/integration-tests/load/ocr/vu.go new file mode 100644 index 00000000..bc5086b7 --- /dev/null +++ b/integration-tests/load/ocr/vu.go @@ -0,0 +1,122 @@ +package ocr + +import ( + "context" + "sync/atomic" + "time" + + "github.com/rs/zerolog" + + "github.com/goplugin/plugin-testing-framework/blockchain" + + "github.com/goplugin/wasp" + "go.uber.org/ratelimit" + + client2 "github.com/goplugin/plugin-testing-framework/client" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +// VU is a virtual user for the OCR load test +// it creates a feed and triggers new rounds +type VU struct { + *wasp.VUControl + rl ratelimit.Limiter + rate int + rateUnit time.Duration + roundNum atomic.Int64 + cc blockchain.EVMClient + lt contracts.LinkToken + cd contracts.ContractDeployer + bootstrapNode *client.PluginK8sClient + workerNodes []*client.PluginK8sClient + msClient *client2.MockserverClient + l zerolog.Logger + ocrInstances []contracts.OffchainAggregator +} + +func NewVU( + l zerolog.Logger, + rate int, + rateUnit time.Duration, + cc blockchain.EVMClient, + lt contracts.LinkToken, + cd contracts.ContractDeployer, + bootstrapNode *client.PluginK8sClient, + workerNodes []*client.PluginK8sClient, + msClient *client2.MockserverClient, +) *VU { + return &VU{ + VUControl: wasp.NewVUControl(), + rl: ratelimit.New(rate, ratelimit.Per(rateUnit)), + rate: rate, + rateUnit: rateUnit, + l: l, + cc: cc, + lt: lt, + cd: cd, + msClient: msClient, + bootstrapNode: bootstrapNode, + workerNodes: workerNodes, + } +} + +func (m *VU) Clone(_ *wasp.Generator) wasp.VirtualUser { + return &VU{ + VUControl: wasp.NewVUControl(), + rl: ratelimit.New(m.rate, ratelimit.Per(m.rateUnit)), + rate: m.rate, + rateUnit: m.rateUnit, + l: m.l, + cc: m.cc, + lt: m.lt, + cd: m.cd, + msClient: m.msClient, + bootstrapNode: m.bootstrapNode, + workerNodes: m.workerNodes, + } +} + +func (m *VU) Setup(_ *wasp.Generator) error { + ocrInstances, err := actions.DeployOCRContracts(1, m.lt, m.cd, m.workerNodes, m.cc) + if err != nil { + return err + } + err = actions.CreateOCRJobs(ocrInstances, m.bootstrapNode, m.workerNodes, 5, m.msClient, m.cc.GetChainID().String()) + if err != nil { + return err + } + m.ocrInstances = ocrInstances + return nil +} + +func (m *VU) Teardown(_ *wasp.Generator) error { + return nil +} + +func (m *VU) Call(l *wasp.Generator) { + m.rl.Take() + m.roundNum.Add(1) + requestedRound := m.roundNum.Load() + m.l.Info(). + Int64("RoundNum", requestedRound). + Str("FeedID", m.ocrInstances[0].Address()). + Msg("starting new round") + err := m.ocrInstances[0].RequestNewRound() + if err != nil { + l.ResponsesChan <- &wasp.Response{Error: err.Error(), Failed: true} + } + for { + time.Sleep(5 * time.Second) + lr, err := m.ocrInstances[0].GetLatestRound(context.Background()) + if err != nil { + l.ResponsesChan <- &wasp.Response{Error: err.Error(), Failed: true} + } + m.l.Info().Interface("LatestRound", lr).Msg("latest round") + if lr.RoundId.Int64() >= requestedRound { + l.ResponsesChan <- &wasp.Response{} + } + } +} diff --git a/integration-tests/load/vrfv2/README.md b/integration-tests/load/vrfv2/README.md new file mode 100644 index 00000000..4b00d4ad --- /dev/null +++ b/integration-tests/load/vrfv2/README.md @@ -0,0 +1,22 @@ +### VRFv2 Load tests + +## Usage +``` +export LOKI_TOKEN=... +export LOKI_URL=... + +go test -v -run TestVRFV2Load/vrfv2_soak_test +``` + +### Dashboards + +Deploying dashboard: +``` +export GRAFANA_URL=... +export GRAFANA_TOKEN=... +export DATA_SOURCE_NAME=grafanacloud-logs +export DASHBOARD_FOLDER=LoadTests +export DASHBOARD_NAME=${JobTypeName, for example WaspVRFv2} + +go run dashboard.go +``` \ No newline at end of file diff --git a/integration-tests/load/vrfv2/cmd/dashboard.go b/integration-tests/load/vrfv2/cmd/dashboard.go new file mode 100644 index 00000000..4f1850b5 --- /dev/null +++ b/integration-tests/load/vrfv2/cmd/dashboard.go @@ -0,0 +1,101 @@ +package main + +import ( + "os" + + "github.com/K-Phoen/grabana/dashboard" + "github.com/K-Phoen/grabana/logs" + "github.com/K-Phoen/grabana/row" + "github.com/K-Phoen/grabana/target/prometheus" + "github.com/K-Phoen/grabana/timeseries" + "github.com/K-Phoen/grabana/timeseries/axis" + "github.com/goplugin/wasp" +) + +func main() { + //TODO switch to TOML too? + lokiDS := os.Getenv("DATA_SOURCE_NAME") + d, err := wasp.NewDashboard(nil, + []dashboard.Option{ + dashboard.Row("LoadContractMetrics", + row.WithTimeSeries( + "RequestCount + FulfilmentCount", + timeseries.Span(12), + timeseries.Height("300px"), + timeseries.DataSource(lokiDS), + timeseries.Axis( + axis.Unit("Requests"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap RequestCount [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} requests"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap FulfilmentCount [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} fulfillments"), + ), + ), + row.WithTimeSeries( + "Fulfillment time (blocks)", + timeseries.Span(12), + timeseries.Height("300px"), + timeseries.DataSource(lokiDS), + timeseries.Axis( + axis.Unit("Blocks"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap AverageFulfillmentInMillions [$__interval]) by (node_id, go_test_name, gen_name) / 1e6 + `, prometheus.Legend("{{go_test_name}} avg"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap SlowestFulfillment [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} slowest"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap FastestFulfillment [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} fastest"), + ), + ), + ), + dashboard.Row("CL nodes logs", + row.Collapse(), + row.WithLogs( + "CL nodes logs", + logs.DataSource(lokiDS), + logs.Span(12), + logs.Height("300px"), + logs.Transparent(), + logs.WithLokiTarget(` + {type="log_watch"} + `), + )), + }, + ) + if err != nil { + panic(err) + } + // set env vars + //export GRAFANA_URL=... + //export GRAFANA_TOKEN=... + //export DATA_SOURCE_NAME=Loki + //export DASHBOARD_FOLDER=LoadTests + //export DASHBOARD_NAME=WaspVRFv2 + if _, err := d.Deploy(); err != nil { + panic(err) + } +} diff --git a/integration-tests/load/vrfv2/gun.go b/integration-tests/load/vrfv2/gun.go new file mode 100644 index 00000000..479dcb9a --- /dev/null +++ b/integration-tests/load/vrfv2/gun.go @@ -0,0 +1,82 @@ +package loadvrfv2 + +import ( + "math/rand" + + "github.com/rs/zerolog" + "github.com/goplugin/wasp" + + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2" + "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +/* SingleHashGun is a gun that constantly requests randomness for one feed */ + +type SingleHashGun struct { + contracts *vrfcommon.VRFContracts + keyHash [32]byte + subIDs []uint64 + testConfig types.VRFv2TestConfig + logger zerolog.Logger +} + +func NewSingleHashGun( + contracts *vrfcommon.VRFContracts, + keyHash [32]byte, + subIDs []uint64, + testConfig types.VRFv2TestConfig, + logger zerolog.Logger, +) *SingleHashGun { + return &SingleHashGun{ + contracts: contracts, + keyHash: keyHash, + subIDs: subIDs, + testConfig: testConfig, + logger: logger, + } +} + +// Call implements example gun call, assertions on response bodies should be done here +func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.Response { + //todo - should work with multiple consumers and consumers having different keyhashes and wallets + + vrfv2Config := m.testConfig.GetVRFv2Config().General + //randomly increase/decrease randomness request count per TX + randomnessRequestCountPerRequest := deviateValue(*vrfv2Config.RandomnessRequestCountPerRequest, *vrfv2Config.RandomnessRequestCountPerRequestDeviation) + _, err := vrfv2.RequestRandomnessAndWaitForFulfillment( + m.logger, + //the same consumer is used for all requests and in all subs + m.contracts.VRFV2Consumer[0], + m.contracts.CoordinatorV2, + //randomly pick a subID from pool of subIDs + m.subIDs[randInRange(0, len(m.subIDs)-1)], + &vrfcommon.VRFKeyData{KeyHash: m.keyHash}, + *vrfv2Config.MinimumConfirmations, + *vrfv2Config.CallbackGasLimit, + *vrfv2Config.NumberOfWords, + randomnessRequestCountPerRequest, + *vrfv2Config.RandomnessRequestCountPerRequestDeviation, + vrfv2Config.RandomWordsFulfilledEventTimeout.Duration, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +func deviateValue(requestCountPerTX uint16, deviation uint16) uint16 { + if randBool() && requestCountPerTX > deviation { + requestCountPerTX -= uint16(randInRange(0, int(deviation))) + } else { + requestCountPerTX += uint16(randInRange(0, int(deviation))) + } + return requestCountPerTX +} + +func randBool() bool { + return rand.Intn(2) == 1 +} +func randInRange(min int, max int) int { + return rand.Intn(max-min+1) + min +} diff --git a/integration-tests/load/vrfv2/onchain_monitoring.go b/integration-tests/load/vrfv2/onchain_monitoring.go new file mode 100644 index 00000000..c5278688 --- /dev/null +++ b/integration-tests/load/vrfv2/onchain_monitoring.go @@ -0,0 +1,56 @@ +package loadvrfv2 + +import ( + "context" + "testing" + "time" + + "github.com/rs/zerolog/log" + "github.com/goplugin/wasp" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ + +const ( + LokiTypeLabel = "vrfv2_contracts_load_summary" + ErrMetrics = "failed to get VRFv2 load test metrics" + ErrLokiClient = "failed to create Loki client for monitoring" + ErrLokiPush = "failed to push monitoring metrics to Loki" +) + +func MonitorLoadStats(lc *wasp.LokiClient, consumer contracts.VRFv2LoadTestConsumer, labels map[string]string) { + go func() { + for { + time.Sleep(1 * time.Second) + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, labels) + } + }() +} + +func UpdateLabels(labels map[string]string, t *testing.T) map[string]string { + updatedLabels := make(map[string]string) + for k, v := range labels { + updatedLabels[k] = v + } + updatedLabels["type"] = LokiTypeLabel + updatedLabels["go_test_name"] = t.Name() + updatedLabels["gen_name"] = "performance" + return updatedLabels +} + +func SendMetricsToLoki(metrics *contracts.VRFLoadTestMetrics, lc *wasp.LokiClient, updatedLabels map[string]string) { + if err := lc.HandleStruct(wasp.LabelsMapToModel(updatedLabels), time.Now(), metrics); err != nil { + log.Error().Err(err).Msg(ErrLokiPush) + } +} + +func GetLoadTestMetrics(consumer contracts.VRFv2LoadTestConsumer) *contracts.VRFLoadTestMetrics { + metrics, err := consumer.GetLoadTestMetrics(context.Background()) + if err != nil { + log.Error().Err(err).Msg(ErrMetrics) + } + return metrics +} diff --git a/integration-tests/load/vrfv2/vrfv2_test.go b/integration-tests/load/vrfv2/vrfv2_test.go new file mode 100644 index 00000000..fde43c83 --- /dev/null +++ b/integration-tests/load/vrfv2/vrfv2_test.go @@ -0,0 +1,358 @@ +package loadvrfv2 + +import ( + "context" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/goplugin/wasp" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + + "github.com/stretchr/testify/require" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + env *test_env.CLClusterTestEnv + vrfContracts *vrfcommon.VRFContracts + vrfKeyData *vrfcommon.VRFKeyData + subIDs []uint64 + eoaWalletAddress string + + labels = map[string]string{ + "branch": "vrfv2_healthcheck", + "commit": "vrfv2_healthcheck", + } +) + +func TestVRFV2Performance(t *testing.T) { + l := logging.GetTestLogger(t) + + testType, err := tc.GetConfigurationNameFromEnv() + require.NoError(t, err) + testConfig, err := tc.GetConfig(testType, tc.VRFv2) + require.NoError(t, err) + + testReporter := &testreporters.VRFV2TestReporter{} + vrfv2Config := testConfig.VRFv2 + + cfgl := testConfig.Logging.Loki + lokiConfig := wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken) + lc, err := wasp.NewLokiClient(lokiConfig) + if err != nil { + l.Error().Err(err).Msg(ErrLokiClient) + return + } + + updatedLabels := UpdateLabels(labels, t) + + l.Info(). + Str("Test Type", string(testType)). + Str("Test Duration", vrfv2Config.Performance.TestDuration.Duration.Truncate(time.Second).String()). + Int64("RPS", *vrfv2Config.Performance.RPS). + Str("RateLimitUnitDuration", vrfv2Config.Performance.RateLimitUnitDuration.String()). + Uint16("RandomnessRequestCountPerRequest", *vrfv2Config.General.RandomnessRequestCountPerRequest). + Uint16("RandomnessRequestCountPerRequestDeviation", *vrfv2Config.General.RandomnessRequestCountPerRequestDeviation). + Bool("UseExistingEnv", *vrfv2Config.Performance.UseExistingEnv). + Msg("Performance Test Configuration") + + if *vrfv2Config.Performance.UseExistingEnv { + //todo: temporary solution with envconfig and toml config until VRF-662 is implemented + cfg := testConfig.VRFv2 + + vrfv2Config.Performance.CoordinatorAddress = cfg.ExistingEnvConfig.CoordinatorAddress + vrfv2Config.Performance.ConsumerAddress = cfg.ExistingEnvConfig.ConsumerAddress + vrfv2Config.Performance.LinkAddress = cfg.ExistingEnvConfig.LinkAddress + vrfv2Config.General.SubscriptionFundingAmountLink = cfg.ExistingEnvConfig.SubFunding.SubFundsLink + vrfv2Config.Performance.SubID = cfg.ExistingEnvConfig.SubID + vrfv2Config.Performance.KeyHash = cfg.ExistingEnvConfig.KeyHash + + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&testConfig). + WithCustomCleanup( + func() { + teardown(t, vrfContracts.VRFV2Consumer[0], lc, updatedLabels, testReporter, string(testType), &testConfig) + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + if *vrfv2Config.Common.CancelSubsAfterTestRun { + //cancel subs and return funds to sub owner + cancelSubsAndReturnFunds(subIDs, l) + } + } + }). + Build() + + require.NoError(t, err, "error creating test env") + + coordinator, err := env.ContractLoader.LoadVRFCoordinatorV2(*vrfv2Config.Performance.CoordinatorAddress) + require.NoError(t, err) + + var consumers []contracts.VRFv2LoadTestConsumer + if *cfg.ExistingEnvConfig.CreateFundSubsAndAddConsumers { + linkToken, err := env.ContractLoader.LoadPLIToken(*vrfv2Config.Performance.LinkAddress) + require.NoError(t, err) + consumers, err = vrfv2.DeployVRFV2Consumers(env.ContractDeployer, coordinator.Address(), 1) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + l.Info(). + Str("Coordinator", *cfg.ExistingEnvConfig.CoordinatorAddress). + Int("Number of Subs to create", *vrfv2Config.General.NumberOfSubToCreate). + Msg("Creating and funding subscriptions, deploying and adding consumers to subs") + subIDs, err = vrfv2.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*cfg.General.SubscriptionFundingAmountLink), + linkToken, + coordinator, + consumers, + *vrfv2Config.General.NumberOfSubToCreate, + ) + require.NoError(t, err) + } else { + consumer, err := env.ContractLoader.LoadVRFv2LoadTestConsumer(*vrfv2Config.Performance.ConsumerAddress) + require.NoError(t, err) + consumers = append(consumers, consumer) + subIDs = append(subIDs, *vrfv2Config.Performance.SubID) + } + + err = FundNodesIfNeeded(&testConfig, env.EVMClient, l) + require.NoError(t, err) + + vrfContracts = &vrfcommon.VRFContracts{ + CoordinatorV2: coordinator, + VRFV2Consumer: consumers, + BHS: nil, + } + + vrfKeyData = &vrfcommon.VRFKeyData{ + VRFKey: nil, + EncodedProvingKey: [2]*big.Int{}, + KeyHash: common.HexToHash(*vrfv2Config.Performance.KeyHash), + } + + } else { + //todo: temporary solution with envconfig and toml config until VRF-662 is implemented + testConfig.Common.PluginNodeFunding = testConfig.VRFv2.NewEnvConfig.NodeSendingKeyFunding + vrfv2Config.General.SubscriptionFundingAmountLink = testConfig.VRFv2.NewEnvConfig.Funding.SubFundsLink + + network, err := actions.EthereumNetworkConfigFromConfig(l, &testConfig) + require.NoError(t, err, "Error building ethereum network config") + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&testConfig). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*testConfig.Common.PluginNodeFunding)). + WithCustomCleanup( + func() { + teardown(t, vrfContracts.VRFV2Consumer[0], lc, updatedLabels, testReporter, string(testType), &testConfig) + + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + if *testConfig.VRFv2.Common.CancelSubsAfterTestRun { + //cancel subs and return funds to sub owner + cancelSubsAndReturnFunds(subIDs, l) + } + } + if err := env.Cleanup(); err != nil { + l.Error().Err(err).Msg("Error cleaning up test environment") + } + }). + Build() + + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*vrfv2Config.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + useVRFOwner := true + useTestCoordinator := true + + vrfContracts, subIDs, vrfKeyData, _, err = vrfv2.SetupVRFV2Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &testConfig, + useVRFOwner, + useTestCoordinator, + linkToken, + mockETHLinkFeed, + //register proving key against EOA address in order to return funds to this address + env.EVMClient.GetDefaultWallet().Address(), + 0, + 1, + *vrfv2Config.General.NumberOfSubToCreate, + l, + ) + require.NoError(t, err, "error setting up VRF v2 env") + } + eoaWalletAddress = env.EVMClient.GetDefaultWallet().Address() + + l.Debug().Int("Number of Subs", len(subIDs)).Msg("Subs involved in the test") + for _, subID := range subIDs { + subscription, err := vrfContracts.CoordinatorV2.GetSubscription(context.Background(), subID) + require.NoError(t, err, "error getting subscription information for subscription %d", subID) + vrfv2.LogSubDetails(l, subscription, subID, vrfContracts.CoordinatorV2) + } + + singleFeedConfig := &wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "gun", + RateLimitUnitDuration: vrfv2Config.Performance.RateLimitUnitDuration.Duration, + Gun: NewSingleHashGun( + vrfContracts, + vrfKeyData.KeyHash, + subIDs, + &testConfig, + l, + ), + Labels: labels, + LokiConfig: lokiConfig, + CallTimeout: 2 * time.Minute, + } + require.Len(t, vrfContracts.VRFV2Consumer, 1, "only one consumer should be created for Load Test") + consumer := vrfContracts.VRFV2Consumer[0] + err = consumer.ResetMetrics() + require.NoError(t, err) + MonitorLoadStats(lc, consumer, updatedLabels) + + // is our "job" stable at all, no memory leaks, no flaking performance under some RPS? + t.Run("vrfv2 performance test", func(t *testing.T) { + + singleFeedConfig.Schedule = wasp.Plain( + *vrfv2Config.Performance.RPS, + vrfv2Config.Performance.TestDuration.Duration, + ) + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(singleFeedConfig)). + Run(true) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + //todo - timeout should be configurable depending on the perf test type + requestCount, fulfilmentCount, err := vrfv2.WaitForRequestCountEqualToFulfilmentCount(consumer, 2*time.Minute, &wg) + require.NoError(t, err) + wg.Wait() + + l.Info(). + Interface("Request Count", requestCount). + Interface("Fulfilment Count", fulfilmentCount). + Msg("Final Request/Fulfilment Stats") + }) + +} + +func cancelSubsAndReturnFunds(subIDs []uint64, l zerolog.Logger) { + for _, subID := range subIDs { + l.Info(). + Uint64("Returning funds from SubID", subID). + Str("Returning funds to", eoaWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + pendingRequestsExist, err := vrfContracts.CoordinatorV2.PendingRequestsExist(context.Background(), subID) + if err != nil { + l.Error().Err(err).Msg("Error checking if pending requests exist") + } + if !pendingRequestsExist { + _, err := vrfContracts.CoordinatorV2.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress)) + if err != nil { + l.Error().Err(err).Msg("Error canceling subscription") + } + } else { + l.Error().Uint64("Sub ID", subID).Msg("Pending requests exist for subscription, cannot cancel subscription and return funds") + } + } +} + +func FundNodesIfNeeded(vrfv2TestConfig tc.VRFv2TestConfig, client blockchain.EVMClient, l zerolog.Logger) error { + cfg := vrfv2TestConfig.GetVRFv2Config() + if cfg.ExistingEnvConfig.NodeSendingKeyFundingMin != nil && *cfg.ExistingEnvConfig.NodeSendingKeyFundingMin > 0 { + for _, sendingKey := range cfg.ExistingEnvConfig.NodeSendingKeys { + address := common.HexToAddress(sendingKey) + sendingKeyBalance, err := client.BalanceAt(context.Background(), address) + if err != nil { + return err + } + fundingAtLeast := conversions.EtherToWei(big.NewFloat(*cfg.ExistingEnvConfig.NodeSendingKeyFundingMin)) + fundingToSendWei := new(big.Int).Sub(fundingAtLeast, sendingKeyBalance) + fundingToSendEth := conversions.WeiToEther(fundingToSendWei) + if fundingToSendWei.Cmp(big.NewInt(0)) == 1 { + l.Info(). + Str("Sending Key", sendingKey). + Str("Sending Key Current Balance", sendingKeyBalance.String()). + Str("Should have at least", fundingAtLeast.String()). + Str("Funding Amount in ETH", fundingToSendEth.String()). + Msg("Funding Node's Sending Key") + err := actions.FundAddress(client, sendingKey, fundingToSendEth) + if err != nil { + return err + } + } else { + l.Info(). + Str("Sending Key", sendingKey). + Str("Sending Key Current Balance", sendingKeyBalance.String()). + Str("Should have at least", fundingAtLeast.String()). + Msg("Skipping Node's Sending Key funding as it has enough funds") + } + } + } + return nil +} + +func teardown( + t *testing.T, + consumer contracts.VRFv2LoadTestConsumer, + lc *wasp.LokiClient, + updatedLabels map[string]string, + testReporter *testreporters.VRFV2TestReporter, + testType string, + testConfig *tc.TestConfig, +) { + //send final results to Loki + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, updatedLabels) + //set report data for Slack notification + testReporter.SetReportData( + testType, + metrics.RequestCount, + metrics.FulfilmentCount, + metrics.AverageFulfillmentInMillions, + metrics.SlowestFulfillment, + metrics.FastestFulfillment, + testConfig, + ) + + // send Slack notification + err := testReporter.SendSlackNotification(t, nil) + if err != nil { + log.Warn().Err(err).Msg("Error sending Slack notification") + } +} diff --git a/integration-tests/load/vrfv2plus/README.md b/integration-tests/load/vrfv2plus/README.md new file mode 100644 index 00000000..1013a3f4 --- /dev/null +++ b/integration-tests/load/vrfv2plus/README.md @@ -0,0 +1,22 @@ +### VRFv2Plus Load tests + +## Usage +``` +export LOKI_TOKEN=... +export LOKI_URL=... + +go test -v -run TestVRFV2PlusLoad/vrfv2plus_soak_test +``` + +### Dashboards + +Deploying dashboard: +``` +export GRAFANA_URL=... +export GRAFANA_TOKEN=... +export DATA_SOURCE_NAME=grafanacloud-logs +export DASHBOARD_FOLDER=LoadTests +export DASHBOARD_NAME=${JobTypeName, for example WaspVRFv2Plus} + +go run dashboard.go +``` \ No newline at end of file diff --git a/integration-tests/load/vrfv2plus/cmd/dashboard.go b/integration-tests/load/vrfv2plus/cmd/dashboard.go new file mode 100644 index 00000000..f1335c9e --- /dev/null +++ b/integration-tests/load/vrfv2plus/cmd/dashboard.go @@ -0,0 +1,101 @@ +package main + +import ( + "os" + + "github.com/K-Phoen/grabana/dashboard" + "github.com/K-Phoen/grabana/logs" + "github.com/K-Phoen/grabana/row" + "github.com/K-Phoen/grabana/target/prometheus" + "github.com/K-Phoen/grabana/timeseries" + "github.com/K-Phoen/grabana/timeseries/axis" + "github.com/goplugin/wasp" +) + +func main() { + //TODO switch to TOML too? + lokiDS := os.Getenv("DATA_SOURCE_NAME") + d, err := wasp.NewDashboard(nil, + []dashboard.Option{ + dashboard.Row("LoadContractMetrics", + row.WithTimeSeries( + "RequestCount + FulfilmentCount", + timeseries.Span(12), + timeseries.Height("300px"), + timeseries.DataSource(lokiDS), + timeseries.Axis( + axis.Unit("Requests"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2plus_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap RequestCount [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} requests"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2plus_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap FulfilmentCount [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} fulfillments"), + ), + ), + row.WithTimeSeries( + "Fulfillment time (blocks)", + timeseries.Span(12), + timeseries.Height("300px"), + timeseries.DataSource(lokiDS), + timeseries.Axis( + axis.Unit("Blocks"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2plus_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap AverageFulfillmentInMillions [$__interval]) by (node_id, go_test_name, gen_name) / 1e6 + `, prometheus.Legend("{{go_test_name}} avg"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2plus_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap SlowestFulfillment [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} slowest"), + ), + timeseries.WithPrometheusTarget( + ` + last_over_time({type="vrfv2plus_contracts_load_summary", go_test_name=~"${go_test_name:pipe}", branch=~"${branch:pipe}", commit=~"${commit:pipe}", gen_name=~"${gen_name:pipe}"} + | json + | unwrap FastestFulfillment [$__interval]) by (node_id, go_test_name, gen_name) + `, prometheus.Legend("{{go_test_name}} fastest"), + ), + ), + ), + dashboard.Row("CL nodes logs", + row.Collapse(), + row.WithLogs( + "CL nodes logs", + logs.DataSource(lokiDS), + logs.Span(12), + logs.Height("300px"), + logs.Transparent(), + logs.WithLokiTarget(` + {type="log_watch"} + `), + )), + }, + ) + if err != nil { + panic(err) + } + // set env vars + //export GRAFANA_URL=... + //export GRAFANA_TOKEN=... + //export DATA_SOURCE_NAME=Loki + //export DASHBOARD_FOLDER=LoadTests + //export DASHBOARD_NAME=Waspvrfv2plus + if _, err := d.Deploy(); err != nil { + panic(err) + } +} diff --git a/integration-tests/load/vrfv2plus/gun.go b/integration-tests/load/vrfv2plus/gun.go new file mode 100644 index 00000000..d4f20f09 --- /dev/null +++ b/integration-tests/load/vrfv2plus/gun.go @@ -0,0 +1,104 @@ +package loadvrfv2plus + +import ( + "fmt" + "math/big" + "math/rand" + + "github.com/rs/zerolog" + "github.com/goplugin/wasp" + + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2plus" + vrfv2plus_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2plus" + "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +/* SingleHashGun is a gun that constantly requests randomness for one feed */ + +type SingleHashGun struct { + contracts *vrfcommon.VRFContracts + keyHash [32]byte + subIDs []*big.Int + testConfig types.VRFv2PlusTestConfig + logger zerolog.Logger +} + +func NewSingleHashGun( + contracts *vrfcommon.VRFContracts, + keyHash [32]byte, + subIDs []*big.Int, + testConfig types.VRFv2PlusTestConfig, + logger zerolog.Logger, +) *SingleHashGun { + return &SingleHashGun{ + contracts: contracts, + keyHash: keyHash, + subIDs: subIDs, + testConfig: testConfig, + logger: logger, + } +} + +// Call implements example gun call, assertions on response bodies should be done here +func (m *SingleHashGun) Call(_ *wasp.Generator) *wasp.Response { + //todo - should work with multiple consumers and consumers having different keyhashes and wallets + + billingType, err := selectBillingType(*m.testConfig.GetVRFv2PlusConfig().General.SubscriptionBillingType) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + + //randomly increase/decrease randomness request count per TX + randomnessRequestCountPerRequest := deviateValue(*m.testConfig.GetVRFv2PlusConfig().General.RandomnessRequestCountPerRequest, *m.testConfig.GetVRFv2PlusConfig().General.RandomnessRequestCountPerRequestDeviation) + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment( + //the same consumer is used for all requests and in all subs + m.contracts.VRFV2PlusConsumer[0], + m.contracts.CoordinatorV2Plus, + &vrfcommon.VRFKeyData{KeyHash: m.keyHash}, + //randomly pick a subID from pool of subIDs + m.subIDs[randInRange(0, len(m.subIDs)-1)], + //randomly pick payment type + billingType, + *m.testConfig.GetVRFv2PlusConfig().General.MinimumConfirmations, + *m.testConfig.GetVRFv2PlusConfig().General.CallbackGasLimit, + *m.testConfig.GetVRFv2PlusConfig().General.NumberOfWords, + randomnessRequestCountPerRequest, + *m.testConfig.GetVRFv2PlusConfig().General.RandomnessRequestCountPerRequestDeviation, + m.testConfig.GetVRFv2PlusConfig().General.RandomWordsFulfilledEventTimeout.Duration, + m.logger, + ) + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + return &wasp.Response{} +} + +func deviateValue(requestCountPerTX uint16, deviation uint16) uint16 { + if randBool() && requestCountPerTX > deviation { + requestCountPerTX -= uint16(randInRange(0, int(deviation))) + } else { + requestCountPerTX += uint16(randInRange(0, int(deviation))) + } + return requestCountPerTX +} + +func randBool() bool { + return rand.Intn(2) == 1 +} +func randInRange(min int, max int) int { + return rand.Intn(max-min+1) + min +} + +func selectBillingType(billingType string) (bool, error) { + switch vrfv2plus_config.BillingType(billingType) { + case vrfv2plus_config.BillingType_Link: + return false, nil + case vrfv2plus_config.BillingType_Native: + return true, nil + case vrfv2plus_config.BillingType_Link_and_Native: + return randBool(), nil + default: + return false, fmt.Errorf("invalid billing type: %s", billingType) + } +} diff --git a/integration-tests/load/vrfv2plus/onchain_monitoring.go b/integration-tests/load/vrfv2plus/onchain_monitoring.go new file mode 100644 index 00000000..6c6460c4 --- /dev/null +++ b/integration-tests/load/vrfv2plus/onchain_monitoring.go @@ -0,0 +1,56 @@ +package loadvrfv2plus + +import ( + "context" + "testing" + "time" + + "github.com/rs/zerolog/log" + "github.com/goplugin/wasp" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +/* Monitors on-chain stats of LoadConsumer and pushes them to Loki every second */ + +const ( + LokiTypeLabel = "vrfv2plus_contracts_load_summary" + ErrMetrics = "failed to get VRFv2Plus load test metrics" + ErrLokiClient = "failed to create Loki client for monitoring" + ErrLokiPush = "failed to push monitoring metrics to Loki" +) + +func MonitorLoadStats(lc *wasp.LokiClient, consumer contracts.VRFv2PlusLoadTestConsumer, labels map[string]string) { + go func() { + for { + time.Sleep(1 * time.Second) + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, labels) + } + }() +} + +func UpdateLabels(labels map[string]string, t *testing.T) map[string]string { + updatedLabels := make(map[string]string) + for k, v := range labels { + updatedLabels[k] = v + } + updatedLabels["type"] = LokiTypeLabel + updatedLabels["go_test_name"] = t.Name() + updatedLabels["gen_name"] = "performance" + return updatedLabels +} + +func SendMetricsToLoki(metrics *contracts.VRFLoadTestMetrics, lc *wasp.LokiClient, updatedLabels map[string]string) { + if err := lc.HandleStruct(wasp.LabelsMapToModel(updatedLabels), time.Now(), metrics); err != nil { + log.Error().Err(err).Msg(ErrLokiPush) + } +} + +func GetLoadTestMetrics(consumer contracts.VRFv2PlusLoadTestConsumer) *contracts.VRFLoadTestMetrics { + metrics, err := consumer.GetLoadTestMetrics(context.Background()) + if err != nil { + log.Error().Err(err).Msg(ErrMetrics) + } + return metrics +} diff --git a/integration-tests/load/vrfv2plus/vrfv2plus_test.go b/integration-tests/load/vrfv2plus/vrfv2plus_test.go new file mode 100644 index 00000000..5a48d6a6 --- /dev/null +++ b/integration-tests/load/vrfv2plus/vrfv2plus_test.go @@ -0,0 +1,355 @@ +package loadvrfv2plus + +import ( + "context" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/goplugin/wasp" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2plus" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + vrfv2plus_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2plus" +) + +var ( + env *test_env.CLClusterTestEnv + vrfContracts *vrfcommon.VRFContracts + vrfv2PlusData *vrfcommon.VRFKeyData + subIDs []*big.Int + eoaWalletAddress string + + labels = map[string]string{ + "branch": "vrfv2Plus_healthcheck", + "commit": "vrfv2Plus_healthcheck", + } +) + +func TestVRFV2PlusPerformance(t *testing.T) { + l := logging.GetTestLogger(t) + + testType, err := tc.GetConfigurationNameFromEnv() + require.NoError(t, err) + testConfig, err := tc.GetConfig(testType, tc.VRFv2Plus) + require.NoError(t, err) + cfgl := testConfig.Logging.Loki + + vrfv2PlusConfig := testConfig.VRFv2Plus + testReporter := &testreporters.VRFV2PlusTestReporter{} + + lc, err := wasp.NewLokiClient(wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken)) + if err != nil { + l.Error().Err(err).Msg(ErrLokiClient) + return + } + + updatedLabels := UpdateLabels(labels, t) + + l.Info(). + Str("Test Type", string(testType)). + Str("Test Duration", vrfv2PlusConfig.Performance.TestDuration.Duration.Truncate(time.Second).String()). + Int64("RPS", *vrfv2PlusConfig.Performance.RPS). + Str("RateLimitUnitDuration", vrfv2PlusConfig.Performance.RateLimitUnitDuration.String()). + Uint16("RandomnessRequestCountPerRequest", *vrfv2PlusConfig.General.RandomnessRequestCountPerRequest). + Uint16("RandomnessRequestCountPerRequestDeviation", *vrfv2PlusConfig.General.RandomnessRequestCountPerRequestDeviation). + Bool("UseExistingEnv", *vrfv2PlusConfig.Performance.UseExistingEnv). + Msg("Performance Test Configuration") + + if *vrfv2PlusConfig.Performance.UseExistingEnv { + //todo: temporary solution with envconfig and toml config until VRF-662 is implemented + vrfv2PlusConfig.Performance.CoordinatorAddress = testConfig.VRFv2Plus.ExistingEnvConfig.CoordinatorAddress + vrfv2PlusConfig.Performance.ConsumerAddress = testConfig.VRFv2Plus.ExistingEnvConfig.ConsumerAddress + vrfv2PlusConfig.Performance.LinkAddress = testConfig.VRFv2Plus.ExistingEnvConfig.LinkAddress + vrfv2PlusConfig.General.SubscriptionFundingAmountLink = testConfig.VRFv2Plus.ExistingEnvConfig.SubFunding.SubFundsLink + vrfv2PlusConfig.General.SubscriptionFundingAmountNative = testConfig.VRFv2Plus.ExistingEnvConfig.SubFunding.SubFundsNative + vrfv2PlusConfig.Performance.SubID = testConfig.VRFv2Plus.ExistingEnvConfig.SubID + vrfv2PlusConfig.Performance.KeyHash = testConfig.VRFv2Plus.ExistingEnvConfig.KeyHash + + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&testConfig). + WithCustomCleanup( + func() { + teardown(t, vrfContracts.VRFV2PlusConsumer[0], lc, updatedLabels, testReporter, string(testType), &testConfig) + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + if *testConfig.VRFv2Plus.Common.CancelSubsAfterTestRun { + //cancel subs and return funds to sub owner + cancelSubsAndReturnFunds(subIDs, l) + } + } + }). + Build() + + require.NoError(t, err, "error creating test env") + + coordinator, err := env.ContractLoader.LoadVRFCoordinatorV2_5(*vrfv2PlusConfig.Performance.CoordinatorAddress) + require.NoError(t, err) + + var consumers []contracts.VRFv2PlusLoadTestConsumer + if *testConfig.VRFv2Plus.ExistingEnvConfig.CreateFundSubsAndAddConsumers { + linkToken, err := env.ContractLoader.LoadPLIToken(*vrfv2PlusConfig.Performance.LinkAddress) + require.NoError(t, err) + consumers, err = vrfv2plus.DeployVRFV2PlusConsumers(env.ContractDeployer, coordinator, 1) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + l.Info(). + Str("Coordinator", *vrfv2PlusConfig.Performance.CoordinatorAddress). + Int("Number of Subs to create", *vrfv2PlusConfig.General.NumberOfSubToCreate). + Msg("Creating and funding subscriptions, deploying and adding consumers to subs") + subIDs, err = vrfv2plus.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*testConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), + big.NewFloat(*testConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), + linkToken, + coordinator, + consumers, + *vrfv2PlusConfig.General.NumberOfSubToCreate, + vrfv2plus_config.BillingType(*vrfv2PlusConfig.General.SubscriptionBillingType), + ) + require.NoError(t, err) + } else { + consumer, err := env.ContractLoader.LoadVRFv2PlusLoadTestConsumer(*vrfv2PlusConfig.Performance.ConsumerAddress) + require.NoError(t, err) + consumers = append(consumers, consumer) + var ok bool + subID := big.NewInt(int64(*vrfv2PlusConfig.Performance.SubID)) + require.True(t, ok) + subIDs = append(subIDs, subID) + } + + err = FundNodesIfNeeded(&testConfig, env.EVMClient, l) + require.NoError(t, err) + + vrfContracts = &vrfcommon.VRFContracts{ + CoordinatorV2Plus: coordinator, + VRFV2PlusConsumer: consumers, + BHS: nil, + } + + vrfv2PlusData = &vrfcommon.VRFKeyData{ + VRFKey: nil, + EncodedProvingKey: [2]*big.Int{}, + KeyHash: common.HexToHash(*vrfv2PlusConfig.Performance.KeyHash), + } + + } else { + //todo: temporary solution with envconfig and toml config until VRF-662 is implemented + testConfig.Common.PluginNodeFunding = testConfig.VRFv2.NewEnvConfig.NodeSendingKeyFunding + vrfv2PlusConfig.General.SubscriptionFundingAmountLink = testConfig.VRFv2Plus.NewEnvConfig.Funding.SubFundsLink + vrfv2PlusConfig.General.SubscriptionFundingAmountNative = testConfig.VRFv2Plus.NewEnvConfig.Funding.SubFundsNative + + network, err := actions.EthereumNetworkConfigFromConfig(l, &testConfig) + require.NoError(t, err, "Error building ethereum network config") + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&testConfig). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*testConfig.Common.PluginNodeFunding)). + WithCustomCleanup( + func() { + teardown(t, vrfContracts.VRFV2PlusConsumer[0], lc, updatedLabels, testReporter, string(testType), &testConfig) + + if env.EVMClient.NetworkSimulated() { + l.Info(). + Str("Network Name", env.EVMClient.GetNetworkName()). + Msg("Network is a simulated network. Skipping fund return for Coordinator Subscriptions.") + } else { + if *testConfig.VRFv2Plus.Common.CancelSubsAfterTestRun { + //cancel subs and return funds to sub owner + cancelSubsAndReturnFunds(subIDs, l) + } + } + if err := env.Cleanup(); err != nil { + l.Error().Err(err).Msg("Error cleaning up test environment") + } + }). + Build() + + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*vrfv2PlusConfig.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + vrfContracts, subIDs, vrfv2PlusData, _, err = vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &testConfig, + linkToken, + mockETHLinkFeed, + 0, + 1, + *vrfv2PlusConfig.General.NumberOfSubToCreate, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + } + eoaWalletAddress = env.EVMClient.GetDefaultWallet().Address() + + l.Debug().Int("Number of Subs", len(subIDs)).Msg("Subs involved in the test") + for _, subID := range subIDs { + subscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information for subscription %s", subID.String()) + vrfv2plus.LogSubDetails(l, subscription, subID, vrfContracts.CoordinatorV2Plus) + } + + singleFeedConfig := &wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: "gun", + RateLimitUnitDuration: vrfv2PlusConfig.Performance.RateLimitUnitDuration.Duration, + Gun: NewSingleHashGun( + vrfContracts, + vrfv2PlusData.KeyHash, + subIDs, + &testConfig, + l, + ), + Labels: labels, + LokiConfig: wasp.NewLokiConfig(cfgl.Endpoint, cfgl.TenantId, cfgl.BasicAuth, cfgl.BearerToken), + CallTimeout: 2 * time.Minute, + } + require.Len(t, vrfContracts.VRFV2PlusConsumer, 1, "only one consumer should be created for Load Test") + consumer := vrfContracts.VRFV2PlusConsumer[0] + err = consumer.ResetMetrics() + require.NoError(t, err) + MonitorLoadStats(lc, consumer, updatedLabels) + + // is our "job" stable at all, no memory leaks, no flaking performance under some RPS? + t.Run("vrfv2plus performance test", func(t *testing.T) { + singleFeedConfig.Schedule = wasp.Plain( + *vrfv2PlusConfig.Performance.RPS, + vrfv2PlusConfig.Performance.TestDuration.Duration, + ) + _, err = wasp.NewProfile(). + Add(wasp.NewGenerator(singleFeedConfig)). + Run(true) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + //todo - timeout should be configurable depending on the perf test type + requestCount, fulfilmentCount, err := vrfv2plus.WaitForRequestCountEqualToFulfilmentCount(consumer, 2*time.Minute, &wg) + require.NoError(t, err) + wg.Wait() + + l.Info(). + Interface("Request Count", requestCount). + Interface("Fulfilment Count", fulfilmentCount). + Msg("Final Request/Fulfilment Stats") + }) + +} + +func cancelSubsAndReturnFunds(subIDs []*big.Int, l zerolog.Logger) { + for _, subID := range subIDs { + l.Info(). + Str("Returning funds from SubID", subID.String()). + Str("Returning funds to", eoaWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + pendingRequestsExist, err := vrfContracts.CoordinatorV2Plus.PendingRequestsExist(context.Background(), subID) + if err != nil { + l.Error().Err(err).Msg("Error checking if pending requests exist") + } + if !pendingRequestsExist { + _, err := vrfContracts.CoordinatorV2Plus.CancelSubscription(subID, common.HexToAddress(eoaWalletAddress)) + if err != nil { + l.Error().Err(err).Msg("Error canceling subscription") + } + } else { + l.Error().Str("Sub ID", subID.String()).Msg("Pending requests exist for subscription, cannot cancel subscription and return funds") + } + } +} + +func FundNodesIfNeeded(vrfv2plusTestConfig tc.VRFv2PlusTestConfig, client blockchain.EVMClient, l zerolog.Logger) error { + cfg := vrfv2plusTestConfig.GetVRFv2PlusConfig() + if *cfg.ExistingEnvConfig.NodeSendingKeyFundingMin > 0 { + for _, sendingKey := range cfg.ExistingEnvConfig.NodeSendingKeys { + address := common.HexToAddress(sendingKey) + sendingKeyBalance, err := client.BalanceAt(context.Background(), address) + if err != nil { + return err + } + fundingAtLeast := conversions.EtherToWei(big.NewFloat(*cfg.ExistingEnvConfig.NodeSendingKeyFundingMin)) + fundingToSendWei := new(big.Int).Sub(fundingAtLeast, sendingKeyBalance) + fundingToSendEth := conversions.WeiToEther(fundingToSendWei) + if fundingToSendWei.Cmp(big.NewInt(0)) == 1 { + l.Info(). + Str("Sending Key", sendingKey). + Str("Sending Key Current Balance", sendingKeyBalance.String()). + Str("Should have at least", fundingAtLeast.String()). + Str("Funding Amount in ETH", fundingToSendEth.String()). + Msg("Funding Node's Sending Key") + err := actions.FundAddress(client, sendingKey, fundingToSendEth) + if err != nil { + return err + } + } else { + l.Info(). + Str("Sending Key", sendingKey). + Str("Sending Key Current Balance", sendingKeyBalance.String()). + Str("Should have at least", fundingAtLeast.String()). + Msg("Skipping Node's Sending Key funding as it has enough funds") + } + } + } + return nil +} + +func teardown( + t *testing.T, + consumer contracts.VRFv2PlusLoadTestConsumer, + lc *wasp.LokiClient, + updatedLabels map[string]string, + testReporter *testreporters.VRFV2PlusTestReporter, + testType string, + testConfig *tc.TestConfig, +) { + //send final results to Loki + metrics := GetLoadTestMetrics(consumer) + SendMetricsToLoki(metrics, lc, updatedLabels) + //set report data for Slack notification + testReporter.SetReportData( + testType, + metrics.RequestCount, + metrics.FulfilmentCount, + metrics.AverageFulfillmentInMillions, + metrics.SlowestFulfillment, + metrics.FastestFulfillment, + testConfig, + ) + + // send Slack notification + err := testReporter.SendSlackNotification(t, nil, testConfig) + if err != nil { + log.Warn().Err(err).Msg("Error sending Slack notification") + } +} diff --git a/integration-tests/main.go b/integration-tests/main.go new file mode 100644 index 00000000..dfec8fee --- /dev/null +++ b/integration-tests/main.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + + gh "github.com/cli/go-gh/v2" +) + +const ( + art string = ` +------------------------------------------------------------------------------------------------- + _____ _ _ _ _ _ _____ _ ______ +/ __ \ | (_) | (_) | | |_ _| | | | ___ \ +| / \/ |__ __ _ _ _ __ | |_ _ __ | | __ | | ___ ___| |_ | |_/ / _ _ __ _ __ ___ _ __ +| | | '_ \ / _, | | '_ \| | | '_ \| |/ / | |/ _ \/ __| __| | / | | | '_ \| '_ \ / _ \ '__| +| \__/\ | | | (_| | | | | | | | | | | < | | __/\__ \ |_ | |\ \ |_| | | | | | | | __/ | + \____/_| |_|\__,_|_|_| |_|_|_|_| |_|_|\_\ \_/\___||___/\__| \_| \_\__,_|_| |_|_| |_|\___|_| +------------------------------------------------------------------------------------------------- + +Make sure you have the GitHub CLI and it's authorized. Find it at https://cli.github.com/ + +Follow the prompts to run an E2E test. Type to search, use arrow keys to scroll, and Enter to select an option. +` + helpText string = "What do these mean?" + pluginRepo string = "goplugin/pluginv3.0" + workflowFile string = "generic-test-runner.yml" +) + +var ( + testDirectories = []string{helpText, "smoke", "soak", "performance", "reorg", "chaos", "benchmark"} +) + +func main() { + // This can take a while to retrieve, start it at the beginning asynchronously + branchesAndTags, branchesAndTagsErr := make(chan []string, 1), make(chan error, 1) + go collectBranchesAndTags(branchesAndTags, branchesAndTagsErr) + + fmt.Print(art) + + ghUser, err := getUser() + if err != nil { + fmt.Printf("error getting GitHub user, make sure you're signed in to the GitHub CLI: %v\n", err) + return + } + fmt.Printf("Running as %s\n", ghUser) + + network, wsURL, httpURL, fundingKey, err := getNetwork() + if err != nil { + fmt.Printf("error getting network: %v\n", err) + return + } + + dir, err := getTestDirectory() + if err != nil { + fmt.Printf("error getting test directory: %v\n", err) + return + } + + err = <-branchesAndTagsErr + if err != nil { + fmt.Printf("error getting branches and tags: %v\n", err) + return + } + branch, err := getTestBranch(<-branchesAndTags) + if err != nil { + fmt.Printf("error selecting test branch: %v\n", err) + return + } + + test, err := getTest(dir) + if err != nil { + fmt.Printf("error getting test: %v\n", err) + return + } + + stdOut, stdErr, err := gh.Exec( // Triggers the workflow with specified test + "workflow", "run", workflowFile, + "--repo", pluginRepo, + "--ref", branch, + "-f", fmt.Sprintf("directory=%s", dir), + "-f", fmt.Sprintf("test=Test%s", test), + "-f", fmt.Sprintf("network=%s", network), + "-f", fmt.Sprintf("wsURL=%s", wsURL), + "-f", fmt.Sprintf("httpURL=%s", httpURL), + "-f", fmt.Sprintf("fundingKey=%s", fundingKey), + ) + if err != nil { + fmt.Printf("Error running gh workflow run: %v\n", err) + fmt.Println(stdErr.String()) + return + } + fmt.Println(stdOut.String()) + + _, err = waitForWorkflowRun(branch, ghUser) + if err != nil { + fmt.Printf("Error waiting for workflow to start: %v\n", err) + return + } +} diff --git a/integration-tests/migration/upgrade_version_test.go b/integration-tests/migration/upgrade_version_test.go new file mode 100644 index 00000000..9a7239ab --- /dev/null +++ b/integration-tests/migration/upgrade_version_test.go @@ -0,0 +1,41 @@ +package migration + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestVersionUpgrade(t *testing.T) { + t.Parallel() + + config, err := tc.GetConfig("Migration", tc.Node) + require.NoError(t, err, "Error getting config") + + err = config.PluginUpgradeImage.Validate() + require.NoError(t, err, "Error validating upgrade image") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestConfig(&config). + WithTestInstance(t). + WithStandardCleanup(). + WithGeth(). + WithCLNodes(1). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + // just restarting CL container with the same name, DB is still the same + // + // [Database] + // MigrateOnStartup = true + // + // by default + err = env.ClCluster.Nodes[0].UpgradeVersion(*config.PluginUpgradeImage.Image, *config.PluginUpgradeImage.Version) + require.NoError(t, err) + +} diff --git a/integration-tests/reorg/automation_reorg_test.go b/integration-tests/reorg/automation_reorg_test.go new file mode 100644 index 00000000..07a84d06 --- /dev/null +++ b/integration-tests/reorg/automation_reorg_test.go @@ -0,0 +1,290 @@ +package reorg + +//revive:disable:dot-imports +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/cdk8s/blockscout" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/reorg" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ( + baseTOML = `[Feature] +LogPoller = true + +[OCR2] +Enabled = true + +[P2P] +[P2P.V2] +AnnounceAddresses = ["0.0.0.0:6690"] +ListenAddresses = ["0.0.0.0:6690"]` + networkTOML = `Enabled = true +FinalityDepth = 200 +LogPollInterval = '1s' + +[EVM.HeadTracker] +HistoryDepth = 400 + +[EVM.GasEstimator] +Mode = 'FixedPrice' +LimitDefault = 5_000_000` + + defaultAutomationSettings = map[string]interface{}{ + "toml": "", + "db": map[string]interface{}{ + "stateful": false, + "capacity": "1Gi", + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + "limits": map[string]interface{}{ + "cpu": "250m", + "memory": "256Mi", + }, + }, + }, + } + + defaultReorgEthereumSettings = &reorg.Props{ + NetworkName: "", + NetworkType: "geth-reorg", + Values: map[string]interface{}{ + "geth": map[string]interface{}{ + "genesis": map[string]interface{}{ + "networkId": "1337", + }, + "miner": map[string]interface{}{ + "replicas": 2, + }, + }, + }, + } + + defaultOCRRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } +) + +const ( + defaultUpkeepGasLimit = uint32(2500000) + defaultLinkFunds = int64(9e18) + numberOfUpkeeps = 2 + automationReorgBlocks = 50 + numberOfNodes = 6 +) + +/* + * This test verifies that conditional upkeeps automatically recover from chain reorgs + * The blockchain is configured to have two separate miners and one geth node. The test starts + * with happy path where the two miners remain in sync and upkeeps are expected to be performed. + * Then reorg starts and the connection between the two geth miners is severed. This makes the + * chain unstable, however all the CL nodes get the same view of the unstable chain through the + * same geth node. + * + * Upkeeps are expected to be performed during the reorg as there are only two versions of the + * the chain, on average 1/2 performUpkeeps should go through. + * + * The miner nodes are synced back after automationReorgBlocks. The syncing event can cause a + * large reorg from CL node perspective, causing existing performUpkeeps to become staleUpkeeps. + * Automation should be able to recover from this and upkeeps should continue to occur at a + * normal pace after the event. + */ +func TestAutomationReorg(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + // "registry_2_1_conditional": ethereum.RegistryVersion_2_1, + // "registry_2_1_logtrigger": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + config, err := tc.GetConfig("Reorg", tc.Automation) + if err != nil { + t.Fatal(err) + } + + network := networks.MustGetSelectedNetworkConfig(config.Network)[0] + + defaultAutomationSettings["replicas"] = numberOfNodes + defaultAutomationSettings["toml"] = networks.AddNetworkDetailedConfig(baseTOML, config.Pyroscope, networkTOML, network) + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(config.PluginImage, target) + ctf_config.MightConfigOverridePyroscopeKey(config.Pyroscope, target) + } + + cd := plugin.NewWithOverride(0, defaultAutomationSettings, config.PluginImage, overrideFn) + + ethSetting := defaultReorgEthereumSettings + ethSetting.NetworkName = network.Name + + testEnvironment := environment. + New(&environment.Config{ + NamespacePrefix: fmt.Sprintf("automation-reorg-%d", automationReorgBlocks), + TTL: time.Hour * 1, + Test: t}). + AddHelm(reorg.New(ethSetting)). + AddChart(blockscout.New(&blockscout.Props{ + Name: "geth-blockscout", + WsURL: network.URL, + HttpURL: network.HTTPURLs[0]})). + AddHelm(cd) + err = testEnvironment.Run() + require.NoError(t, err, "Error setting up test environment") + + if testEnvironment.WillUseRemoteRunner() { + return + } + + chainClient, err := blockchain.NewEVMClient(network, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to Plugin nodes") + chainClient.ParallelTransactions(true) + + // Register cleanup for any test + t.Cleanup(func() { + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.PanicLevel, &config, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + txCost, err := chainClient.EstimateCostForPluginOperations(1000) + require.NoError(t, err, "Error estimating cost for Plugin Operations") + err = actions.FundPluginNodes(pluginNodes, chainClient, txCost) + require.NoError(t, err, "Error funding Plugin nodes") + + linkToken, err := contractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Error deploying PLI token") + + registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + t, + registryVersion, + defaultOCRRegistryConfig, + linkToken, + contractDeployer, + chainClient, + ) + // Fund the registry with PLI + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(numberOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + actions.CreateOCRKeeperJobs(t, pluginNodes, registry.Address(), network.ChainID, 0, registryVersion) + nodesWithoutBootstrap := pluginNodes[1:] + ocrConfig, err := actions.BuildAutoOCR2ConfigVars(t, nodesWithoutBootstrap, defaultOCRRegistryConfig, registrar.Address(), 5*time.Second) + require.NoError(t, err, "OCR2 config should be built successfully") + err = registry.SetConfig(defaultOCRRegistryConfig, ocrConfig) + require.NoError(t, err, "Registry config should be be set successfully") + require.NoError(t, chainClient.WaitForEvents(), "Waiting for config to be set") + + // Use the name to determine if this is a log trigger or not + isLogTrigger := name == "registry_2_1_logtrigger" + consumers, upkeepIDs := actions.DeployConsumers(t, registry, registrar, linkToken, contractDeployer, chainClient, numberOfUpkeeps, big.NewInt(defaultLinkFunds), defaultUpkeepGasLimit, isLogTrigger, false) + + l.Info().Msg("Waiting for all upkeeps to be performed") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 5 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "7m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~3m for performing each upkeep 5 times, ~3m buffer + + l.Info().Msg("All upkeeps performed under happy path. Starting reorg") + + rc, err := NewReorgController( + &ReorgConfig{ + FromPodLabel: reorg.TXNodesAppLabel, + ToPodLabel: reorg.MinerNodesAppLabel, + Network: chainClient, + Env: testEnvironment, + BlockConsensusThreshold: 3, + Timeout: 1800 * time.Second, + }, + ) + + require.NoError(t, err, "Error getting reorg controller") + rc.ReOrg(automationReorgBlocks) + rc.WaitReorgStarted() + + l.Info().Msg("Reorg started. Expecting chain to become unstable and upkeeps to still getting performed") + + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 10 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "5m", "1s").Should(gomega.Succeed()) + + l.Info().Msg("Upkeep performed during unstable chain, waiting for reorg to finish") + err = rc.WaitDepthReached() + require.NoError(t, err) + + l.Info().Msg("Reorg finished, chain should be stable now. Expecting upkeeps to keep getting performed") + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they reach 20 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 20 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "10m", "1s").Should(gomega.Succeed()) + }) + } +} diff --git a/integration-tests/reorg/reorg_confirmer.go b/integration-tests/reorg/reorg_confirmer.go new file mode 100644 index 00000000..50240309 --- /dev/null +++ b/integration-tests/reorg/reorg_confirmer.go @@ -0,0 +1,284 @@ +package reorg + +import ( + "context" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/k8s/chaos" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/reorg" + "github.com/goplugin/plugin-testing-framework/utils/ptr" +) + +// The steps are: +// 1. Await initial network consensus for N blocks +// 2. Fork the network, separate node 0 from 1 and 2 +// 3. Await alternative blocks mined on node zero, +// nodes 1 and 2 will have longer chain with more block difficulty so blocks of node 0 must be replaced +// 4. Await consensus for N blocks, check alt block hashes now +const ( + InitConsensus int64 = iota + NetworkFork + CheckBlocks + Consensus + Wait +) + +type ReorgConfig struct { + FromPodLabel string + ToPodLabel string + Network blockchain.EVMClient + Env *environment.Environment + BlockConsensusThreshold int + Timeout time.Duration +} + +// ReorgController reorg stats collecting struct +type ReorgController struct { + cfg *ReorgConfig + ReorgDepth int + numberOfNodes int + currentBlockConsensus int + forkBlockNumber int64 + currentAltBlocks int + currentVerifiedBlocks int + networkStep atomic.Int64 + altBlockNumbers []int64 + blocksByNode map[int]map[int64]blockchain.NodeHeader + blockHashes map[int64][]common.Hash + chaosExperimentName string + initConsensusReady chan struct{} + reorgStarted chan struct{} + depthReached chan struct{} + once sync.Once + mutex sync.Mutex + ctx context.Context + cancel context.CancelFunc + doneChan chan struct{} + complete bool +} + +// NewReorgController creates a type that can create reorg chaos and confirm reorg has happened +func NewReorgController(cfg *ReorgConfig) (*ReorgController, error) { + if len(cfg.Network.GetClients()) == 1 { + return nil, fmt.Errorf("need at least 3 nodes to re-org") + } + ctx, ctxCancel := context.WithTimeout(context.Background(), cfg.Timeout) + rc := &ReorgController{ + cfg: cfg, + numberOfNodes: len(cfg.Network.GetClients()), + doneChan: make(chan struct{}, 100), + altBlockNumbers: make([]int64, 0), + blockHashes: map[int64][]common.Hash{}, + blocksByNode: map[int]map[int64]blockchain.NodeHeader{}, + initConsensusReady: make(chan struct{}, 100), + reorgStarted: make(chan struct{}, 100), + depthReached: make(chan struct{}, 100), + ctx: ctx, + cancel: ctxCancel, + } + rc.networkStep.Store(InitConsensus) + for _, c := range cfg.Network.GetClients() { + c.AddHeaderEventSubscription("reorg", rc) + } + <-rc.initConsensusReady + return rc, nil +} + +// ReOrg start re-org +func (rc *ReorgController) ReOrg(depth int) { + rc.ReorgDepth = depth + rc.networkStep.Store(NetworkFork) +} + +// WaitDepthReached wait until N alternative blocks are mined +func (rc *ReorgController) WaitDepthReached() error { + <-rc.depthReached + rc.networkStep.Store(Consensus) + return rc.cfg.Network.WaitForEvents() +} + +// WaitReorgStarted waits until first alternative block is present +func (rc *ReorgController) WaitReorgStarted() { + <-rc.reorgStarted +} + +// ReceiveHeader receives header marked by node that mined it, +// forks the network and record all alternative block numbers on node 0 +func (rc *ReorgController) ReceiveHeader(header blockchain.NodeHeader) error { + rc.mutex.Lock() + defer rc.mutex.Unlock() + rc.appendBlockHeader(header) + switch rc.networkStep.Load() { + case Wait: + case InitConsensus: + if rc.hasNetworkFormedConsensus(header) { + rc.networkStep.Store(Wait) + rc.initConsensusReady <- struct{}{} + } + case NetworkFork: + if err := rc.forkNetwork(header); err != nil { + return err + } + case CheckBlocks: + if err := rc.compareBlocks(header); err != nil { + return err + } + case Consensus: + if rc.hasNetworkFormedConsensus(header) { + rc.complete = true + rc.cancel() + } + } + return nil +} + +func (rc *ReorgController) Complete() bool { + return rc.complete +} + +// VerifyReorgComplete verifies that all blocks are replaced by reorg +func (rc *ReorgController) VerifyReorgComplete() error { + // we are excluding the last block because we can't control joining while blocks were mined + for _, ab := range rc.altBlockNumbers[:len(rc.altBlockNumbers)-1] { + bb := rc.blocksByNode[0][ab] + h, err := rc.cfg.Network.HeaderHashByNumber(context.Background(), big.NewInt(ab)) + if err != nil { + return err + } + log.Info(). + Int64("Number", bb.Number.Int64()). + Str("Hash before", bb.Hash.String()). + Str("Hash after", h). + Msg("Comparing block") + if bb.Hash.String() != h { + rc.currentVerifiedBlocks++ + } + } + if rc.currentVerifiedBlocks+1 < rc.ReorgDepth { + return fmt.Errorf("Reorg depth has not met") + } + return nil +} + +func (rc *ReorgController) isAltBlock(blk blockchain.NodeHeader) bool { + blockNumber := blk.Number.Int64() + // If we've received the same block number from all nodes, check hashes there are some different versions + if len(rc.blockHashes[blockNumber]) >= rc.numberOfNodes { + firstBlockHash := rc.blockHashes[blockNumber][0] + for _, blockHash := range rc.blockHashes[blockNumber][1:] { + if blockHash.String() != firstBlockHash.String() { + return true + } + } + } + return false +} + +func (rc *ReorgController) compareBlocks(blk blockchain.NodeHeader) error { + if blk.NodeID == 0 && blk.Number.Int64() >= rc.forkBlockNumber && rc.isAltBlock(blk) { + rc.once.Do(func() { + log.Warn().Int64("Number", blk.Number.Int64()).Msg("Reorg started") + rc.reorgStarted <- struct{}{} + }) + rc.altBlockNumbers = append(rc.altBlockNumbers, blk.Number.Int64()) + rc.currentAltBlocks++ + log.Info(). + Int64("Number", blk.Number.Int64()). + Str("Hash", blk.Hash.String()). + Int("Node", blk.NodeID). + Int("BlocksLeft", rc.ReorgDepth-rc.currentAltBlocks). + Msg("Mined alternative block") + } + if rc.currentAltBlocks == rc.ReorgDepth { + log.Info().Msg("Joining network") + if err := rc.joinNetwork(); err != nil { + return err + } + rc.depthReached <- struct{}{} + rc.currentBlockConsensus = 0 + } + return nil +} + +// Wait wait until reorg is done +func (rc *ReorgController) Wait() error { + <-rc.ctx.Done() + rc.cfg.Network.DeleteHeaderEventSubscription("reorg") + if rc.complete { + return nil + } + return fmt.Errorf("timeout waiting for reorg to complete") +} + +// forkNetwork stomp the network between target reorged node and the rest +func (rc *ReorgController) forkNetwork(header blockchain.NodeHeader) error { + rc.forkBlockNumber = header.Number.Int64() + log.Debug(). + Int64("Number", rc.forkBlockNumber). + Str("Network", rc.cfg.Network.GetNetworkName()). + Msg("Forking network") + expName, err := rc.cfg.Env.Chaos.Run( + chaos.NewNetworkPartition( + rc.cfg.Env.Cfg.Namespace, + &chaos.Props{ + DurationStr: "999h", + FromLabels: &map[string]*string{"app": ptr.Ptr(reorg.TXNodesAppLabel)}, + ToLabels: &map[string]*string{"app": ptr.Ptr(reorg.MinerNodesAppLabel)}, + }, + )) + rc.chaosExperimentName = expName + rc.networkStep.Store(CheckBlocks) + return err +} + +// joinNetwork restores network connectivity between nodes +func (rc *ReorgController) joinNetwork() error { + return rc.cfg.Env.Chaos.Stop(rc.chaosExperimentName) +} + +// hasNetworkFormedConsensus waits for network to have N blocks with the same hashes +func (rc *ReorgController) hasNetworkFormedConsensus(header blockchain.NodeHeader) bool { + blockNumber := header.Number.Int64() + // If we've received the same block number from all nodes, check hashes to ensure they've reformed consensus + if len(rc.blockHashes[blockNumber]) >= rc.numberOfNodes { + firstBlockHash := rc.blockHashes[blockNumber][0] + for _, blockHash := range rc.blockHashes[blockNumber][1:] { + if blockHash.String() != firstBlockHash.String() { + return false + } + } + log.Debug(). + Int("Blocks left", rc.cfg.BlockConsensusThreshold-rc.currentBlockConsensus). + Msg("Waiting for consensus threshold") + rc.currentBlockConsensus++ + } + + if rc.currentBlockConsensus >= rc.cfg.BlockConsensusThreshold { + log.Info(). + Msg("Network joined") + return true + } + return false +} + +func (rc *ReorgController) appendBlockHeader(header blockchain.NodeHeader) { + bn := header.Number.Int64() + if _, ok := rc.blockHashes[bn]; !ok { + rc.blockHashes[bn] = []common.Hash{} + } + rc.blockHashes[bn] = append(rc.blockHashes[bn], header.Hash) + + if _, ok := rc.blocksByNode[header.NodeID]; !ok { + rc.blocksByNode[header.NodeID] = make(map[int64]blockchain.NodeHeader) + } + rc.blocksByNode[header.NodeID][bn] = header +} diff --git a/integration-tests/runner_helpers.go b/integration-tests/runner_helpers.go new file mode 100644 index 00000000..0e91030a --- /dev/null +++ b/integration-tests/runner_helpers.go @@ -0,0 +1,357 @@ +package main + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "github.com/cli/go-gh/v2" + "github.com/ethereum/go-ethereum/crypto" + "github.com/manifoldco/promptui" + "github.com/rs/zerolog/log" + + "github.com/goplugin/plugin-testing-framework/networks" +) + +func waitForWorkflowRun(branch, ghUser string) (string, error) { + fmt.Println("Waiting for workflow to start") + startTime := time.Now() + checkWorkflow, timeout := time.NewTicker(time.Second), time.After(time.Second*15) + defer checkWorkflow.Stop() + for { + select { + case <-checkWorkflow.C: + workflowId, err := checkWorkflowRun(startTime, branch, ghUser) + if err != nil { + return "", err + } + if workflowId == "" { + fmt.Println("Checking...") + continue + } + fmt.Printf("Triggered Workflow with ID: %s\n", workflowId) + fmt.Println("Opening run in browser...") + _, stdErr, err := gh.Exec( // Opens the run in browser + "run", "view", workflowId, "-w", + ) + if err != nil { + fmt.Println(stdErr.String()) + return "", err + } + return workflowId, nil + case <-timeout: + return "", fmt.Errorf("timed out waiting for workflow run to start") + } + } +} + +func checkWorkflowRun(startTime time.Time, branch, ghUser string) (string, error) { + stdOut, stdErr, err := gh.Exec( // Retrieves the runId of the workflow we just started + "run", "list", "-b", branch, "-w", workflowFile, "-u", ghUser, + "--json", "startedAt,databaseId", "-q", ".[0]", + ) + if err != nil { + fmt.Println(stdErr.String()) + return "", err + } + if stdOut.String() == "" { + return "", nil + } + workflowRun := struct { + DatabaseId int `json:"databaseId"` + StartedAt time.Time `json:"startedAt"` + }{} + err = json.Unmarshal(stdOut.Bytes(), &workflowRun) + if err != nil { + return "", err + } + if workflowRun.StartedAt.Before(startTime) { // Make sure the workflow run started after we started waiting + return "", nil + } + return fmt.Sprint(workflowRun.DatabaseId), nil +} + +// getUser retrieves the current GitHub user's username +func getUser() (string, error) { + stdOut, stdErr, err := gh.Exec( + "api", "user", "-q", ".login", + ) + if err != nil { + fmt.Println(stdErr.String()) + return "", err + } + return stdOut.String(), nil +} + +// getTestBranch prompts the user to select a test branch +func getTestBranch(options []string) (string, error) { + fmt.Println("Ensure your branch has had its latest work pushed to GitHub before running a test.") + testBranchPrompt := promptui.Select{ + Label: "Test Branch or Tag", + Items: options, + Searcher: func(input string, index int) bool { + return strings.Contains(options[index], input) + }, + StartInSearchMode: true, + } + _, branch, err := testBranchPrompt.Run() + if err != nil { + return "", err + } + return branch, nil +} + +// getAllBranchesAndTags uses the github API to retrieve all branches and tags for the plugin repo +// this call can take a while, so start it at the beginning asynchronously +func collectBranchesAndTags(results chan []string, errChan chan error) { + defer close(errChan) + defer close(results) + + branchChan, tagChan := make(chan []string, 1), make(chan []string, 1) + defer close(branchChan) + defer close(tagChan) + + // branches + go func() { + stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/branches", pluginRepo), "-q", ".[][\"name\"]", "--paginate") + if err != nil { + errChan <- fmt.Errorf("%w: %s", err, stdErr.String()) + } + branches := strings.Split(stdOut.String(), "\n") + cleanBranches := []string{} + for _, branch := range branches { + trimmed := strings.TrimSpace(branch) + if branch != "" { + cleanBranches = append(cleanBranches, trimmed) + } + } + branchChan <- cleanBranches + }() + + // tags + go func() { + stdOut, stdErr, err := gh.Exec("api", fmt.Sprintf("repos/%s/tags", pluginRepo), "-q", ".[][\"name\"]", "--paginate") + if err != nil { + errChan <- fmt.Errorf("%w: %s", err, stdErr.String()) + } + tags := strings.Split(stdOut.String(), "\n") + cleanTags := []string{} + for _, tag := range tags { + trimmed := strings.TrimSpace(tag) + if tag != "" { + cleanTags = append(cleanTags, trimmed) + } + } + tagChan <- cleanTags + }() + + // combine results + branches, tags := <-branchChan, <-tagChan + combined := append(branches, tags...) + sort.Slice(combined, func(i, j int) bool { + if combined[i] == "develop" { + return true + } else if combined[j] == "develop" { + return false + } + return strings.Compare(combined[i], combined[j]) < 0 + }) + results <- combined +} + +const helpDirectoryText = `Smoke tests are designed to be quick checks on basic functionality. + +Soak tests are designed to run for a long time and test the stability of the system under minimal or regular load. + +Performance tests are designed to test the system under heavy load and measure performance metrics. + +Chaos tests are designed to break the system in various ways and ensure it recovers gracefully. + +Reorg tests are designed to test the system's ability to handle reorgs on the blockchain. + +Benchmark tests are designed to check how far the system can go before running into issues.` + +// getTestDirectory prompts the user to select a test directory +func getTestDirectory() (string, error) { + testDirectoryPrompt := promptui.Select{ + Label: "Test Type", + Items: testDirectories, + Size: 10, + Searcher: func(input string, index int) bool { + return strings.Contains(testDirectories[index], input) + }, + StartInSearchMode: true, + } + _, dir, err := testDirectoryPrompt.Run() + if err != nil { + return "", err + } + if dir == helpText { + fmt.Println(helpDirectoryText) + return getTestDirectory() + } + return dir, nil +} + +// getTest searches the chosen test directory for valid tests to run +func getTest(dir string) (string, error) { + items := testNames(dir) + testPrompt := promptui.Select{ + Label: "Test Name", + Items: items, + Size: 15, + Searcher: func(input string, index int) bool { + return strings.Contains(strings.ToLower(items[index]), strings.ToLower(input)) + }, + StartInSearchMode: true, + } + _, test, err := testPrompt.Run() + if err != nil { + return "", err + } + return test, nil +} + +// testNames returns a list of test names in the given directory +func testNames(directory string) []string { + // Regular expression pattern to search for + pattern := "func Test(\\w+?)\\(t \\*testing.T\\)" + + names := []string{} + + err := filepath.Walk(directory, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { // Skip directories + return nil + } + if !strings.HasSuffix(info.Name(), "_test.go") { // Skip non-test files + return nil + } + + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + regex := regexp.MustCompile(pattern) + // Iterate over each line in the file + for scanner.Scan() { + line := scanner.Text() + submatches := regex.FindStringSubmatch(line) + if len(submatches) > 0 { + names = append(names, submatches[1]) + } + } + + if scanner.Err() != nil { + log.Error().Str("File", info.Name()).Msg("Error scanning file") + } + return scanner.Err() + }) + + if err != nil { + log.Fatal().Err(err).Msg("Error looking for tests") + } + sort.Strings(names) + return names +} + +// getNetwork prompts the user for a network to run the test on, including urls and keys if necessary +func getNetwork() (networkName, networkWs, networkHTTP, fundingKey string, err error) { + validNetworks, i := make([]string, len(networks.MappedNetworks)), 0 + for network := range networks.MappedNetworks { + validNetworks[i] = network + i++ + } + sort.Slice(validNetworks, func(i, j int) bool { // Get in (mostly) alphabetical order + if validNetworks[i] == "SIMULATED" { + return true + } else if validNetworks[j] == "SIMULATED" { + return false + } + return strings.Compare(validNetworks[i], validNetworks[j]) < 0 + }) + + networkPrompt := promptui.Select{ + Label: "Network", + Items: validNetworks, + Size: 10, + Searcher: func(input string, index int) bool { + return strings.Contains(strings.ToLower(validNetworks[index]), strings.ToLower(input)) + }, + StartInSearchMode: true, + } + _, network, err := networkPrompt.Run() + if err != nil { + return "", "", "", "", err + } + if strings.Contains(network, "SIMULATED") { // We take care of simulated network URLs + return network, "", "", "", nil + } + + networkWsPrompt := promptui.Prompt{ + Label: "Network WS URL", + Validate: func(s string) error { + if s == "" { + return errors.New("URL cannot be empty") + } + if !strings.HasPrefix(s, "ws") { + return errors.New("URL must start with ws") + } + return nil + }, + } + networkWs, err = networkWsPrompt.Run() + if err != nil { + return "", "", "", "", err + } + + networkHTTPPrompt := promptui.Prompt{ + Label: "Network HTTP URL", + Validate: func(s string) error { + if s == "" { + return errors.New("URL cannot be empty") + } + if !strings.HasPrefix(s, "http") { + return errors.New("URL must start with http") + } + return nil + }, + } + networkHTTP, err = networkHTTPPrompt.Run() + if err != nil { + return "", "", "", "", err + } + + networkFundingKeyPrompt := promptui.Prompt{ + Label: "Network Funding Key", + Validate: func(s string) error { + if s == "" { + return errors.New("funding key cannot be empty for a non-simulated network") + } + _, err := crypto.HexToECDSA(s) + if err != nil { + return fmt.Errorf("funding key must be a valid hex string: %w", err) + } + return nil + }, + } + fundingKey, err = networkFundingKeyPrompt.Run() + if err != nil { + return "", "", "", "", err + } + + return network, networkWs, networkHTTP, fundingKey, nil +} diff --git a/integration-tests/scripts/buildTestImage b/integration-tests/scripts/buildTestImage new file mode 100644 index 00000000..d49f4dc8 --- /dev/null +++ b/integration-tests/scripts/buildTestImage @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# build test binaries +# accepts a single space separated argument of the folders to build + +set -ex + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../../ || exit 1 + +TAG_VERSION="${1}" +BASE_IMAGE_VERSION="${2}" +SUITES=$3 +DEFAULT_SUITES="smoke soak chaos" +ACCOUNT=$(aws sts get-caller-identity | jq -r .Account) +AWS_BASE="${ACCOUNT}".dkr.ecr.us-west-2.amazonaws.com +TAG="${AWS_BASE}"/plugin-tests:"${TAG_VERSION}" +BASE_IMAGE="${AWS_BASE}"/test-base-image + +if [ "${TAG_VERSION}" = "" ]; then + echo "Need an argument for the image tag version in argument 1" + exit 1 +fi + +if [ "${BASE_IMAGE_VERSION}" = "" ]; then + echo "Need an argument for the test-base-image tag version in argument 2" + exit 1 +fi + +if [ "${SUITES}" = "" ]; then + echo "SUITES not set, using defaults \"${DEFAULT_SUITES}\"" + SUITES=${DEFAULT_SUITES} +fi + +aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin ${AWS_BASE} +docker build -t "${TAG}" -f "./integration-tests/test.Dockerfile" --build-arg BASE_IMAGE="${BASE_IMAGE}" --build-arg IMAGE_VERSION="${BASE_IMAGE_VERSION}" --build-arg SUITES="${SUITES}" . +if "${4}" = "true"]; then + docker push "${TAG}" +fi diff --git a/integration-tests/scripts/buildTestMatrixList.sh b/integration-tests/scripts/buildTestMatrixList.sh new file mode 100644 index 00000000..f02362aa --- /dev/null +++ b/integration-tests/scripts/buildTestMatrixList.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# requires a path to a test file to compare the test list against +# requires a matrix job name to be passed in, for example "automation" +# requires a node label to be passed in, for example "ubuntu-latest" + +set -e + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../ || exit 1 + +FILENAME=$1 +MATRIX_JOB_NAME=$2 +NODE_LABEL=$3 +NODE_COUNT=$4 + +# Get list of test names from JSON file +JSONFILE="${FILENAME}_test_list.json" +COUNTER=1 + +# Build a JSON object in the format expected by our integration-tests workflow matrix +matrix_output() { + local counter=$1 + local job_name=$2 + local test_name=$3 + local node_label=$4 + local node_count=$5 + local counter_out=$(printf "%02d\n" $counter) + echo -n "{\"name\": \"${job_name}-${counter_out}\", \"file\": \"${job_name}\",\"nodes\": ${node_count}, \"os\": \"${node_label}\", \"pyroscope_env\": \"ci-smoke-${job_name}-evm-simulated\", \"run\": \"-run '^${test_name}$'\"}" +} + +# Read the JSON file and loop through 'tests' and 'run' +jq -c '.tests[]' ${JSONFILE} | while read -r test; do + testName=$(echo ${test} | jq -r '.name') + label=$(echo ${test} | jq -r '.label // empty') + effective_node_label=${label:-$NODE_LABEL} + node_count=$(echo ${test} | jq -r '.nodes // empty') + effective_node_count=${node_count:-$NODE_COUNT} + subTests=$(echo ${test} | jq -r '.run[]?.name // empty') + output="" + + if [ $COUNTER -ne 1 ]; then + echo -n "," + fi + + # Loop through subtests, if any, and print in the desired format + if [ -n "$subTests" ]; then + subTestString="" + subTestCounter=1 + for subTest in $subTests; do + if [ $subTestCounter -ne 1 ]; then + subTestString+="|" + fi + subTestString+="${testName}\/${subTest}" + ((subTestCounter++)) + done + testName="${subTestString}" + fi + matrix_output $COUNTER $MATRIX_JOB_NAME "${testName}" ${effective_node_label} ${effective_node_count} + ((COUNTER++)) +done > "./tmpout.json" +OUTPUT=$(cat ./tmpout.json) +echo "[${OUTPUT}]" +rm ./tmpout.json diff --git a/integration-tests/scripts/buildTests b/integration-tests/scripts/buildTests new file mode 100644 index 00000000..749bb545 --- /dev/null +++ b/integration-tests/scripts/buildTests @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# build test binaries +# accepts a single space separated argument of the folders to build + +set -ex + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../ || exit 1 + +helm repo update + +# parse out quotes if they exist in the string +temp="${1%\"}" +tosplit="${temp#\"}" + +# find the suite name +OIFS=$IFS +IFS=' ' +for x in $tosplit +do + go test -c -tags embed ./"${x}" +done +IFS=$OIFS diff --git a/integration-tests/scripts/compareTestList.sh b/integration-tests/scripts/compareTestList.sh new file mode 100644 index 00000000..8cc916c7 --- /dev/null +++ b/integration-tests/scripts/compareTestList.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# accepts a path to a test file to compare the test list against + +set -e + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../ || exit 1 + +FILENAME=$1 + +TESTLIST=$(cat ${FILENAME} | grep "func Test.*\(t \*testing.T\)" | grep -o 'Test[A-Za-z0-9_]*') + +# convert the test list from above into json in the form {"tests":[{"name":"TestName"}]} +TESTLISTJSON=$(echo $TESTLIST | jq -R -s -c '{tests: split(" ") | map({"name":.})}') + +# Get list of test names from JSON file +JSONFILE="${FILENAME}_test_list.json" +JSONTESTLIST=$(jq -r '.tests[].name' ${JSONFILE}) + +# Convert lists to arrays +TESTLIST_ARRAY=($(echo "$TESTLIST")) +JSONTESTLIST_ARRAY=($(echo "$JSONTESTLIST")) + +ERRORS_FOUND=false + +# Compare TESTLIST_ARRAY against JSONTESTLIST_ARRAY +for test in "${TESTLIST_ARRAY[@]}"; do + if [[ ! " ${JSONTESTLIST_ARRAY[@]} " =~ " ${test} " ]]; then + echo "$test exists only in ${FILENAME}." + ERRORS_FOUND=true + fi +done + +# Compare JSONTESTLIST_ARRAY against TESTLIST_ARRAY +for test in "${JSONTESTLIST_ARRAY[@]}"; do + if [[ ! " ${TESTLIST_ARRAY[@]} " =~ " ${test} " ]]; then + echo "$test exists only in ${JSONFILE}." + ERRORS_FOUND=true + fi +done + +if [ "$ERRORS_FOUND" = true ] ; then + echo "Test lists do not match. Please update ${JSONFILE} with the updated tests to run in CI." + exit 1 +fi diff --git a/integration-tests/scripts/entrypoint b/integration-tests/scripts/entrypoint new file mode 100644 index 00000000..d4ebe722 --- /dev/null +++ b/integration-tests/scripts/entrypoint @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +# Runs tests for a specific product + +set -x + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../ || exit 1 + +# Arguments needed +# ARGS=${ARGS:=} any extra args for go test +# SUITE=${SUITE:=} the suite of tests you want to run +# TEST_NAME=${TEST_NAME:=} The specific test to run + +# run the tests +./${SUITE}.test -test.v -test.count 1 ${ARGS} -test.run ^${TEST_NAME}$ + +exit_code=$? + +echo "Test exit code: ${exit_code}" + +# Check if the test did not pass (non-zero exit code) +if [ $exit_code -ne 0 ]; then + # 3 is the code for an interrupted test, we only want to restart the test when the test is interrupted and in a state + # that it can recover from. Otherwise we mark the test as "passed" as far as K8s is concerned so it doesn't restart it. + if [ $exit_code -eq 3 ]; then + echo "Test was interrupted, exiting with 1 exit code to trigger K8s to restart" + exit 1 # Exiting with non-zero status to trigger pod restart + else + echo "Test either panicked or had some sort of failure. We're exiting with a non-zero exit code so that K8s doesn't restart the pod." + echo "TEST_FAILED" + fi +fi + +# Sleep for the amount of time provided by the POST_RUN_SLEEP env var +upload_to_slack() { + curl -F file=@$1 -F "initial_comment=$2" -F channels=${SLACK_CHANNEL} -H "Authorization: Bearer ${SLACK_API_KEY}" https://slack.com/api/files.upload +} + +if [ -n "${UPLOAD_CPU_PROFILE}" ]; then + upload_to_slack profile.out "CPU Profile for ${TEST_NAME}" +fi +if [ -n "${UPLOAD_MEM_PROFILE}" ]; then + upload_to_slack memprofile.out "MEM Profile for ${TEST_NAME}" +fi + +echo "Exiting with 0 exit code as test is either completed, or failed and cannot be restarted" +exit 0 diff --git a/integration-tests/scripts/run_product_tests b/integration-tests/scripts/run_product_tests new file mode 100644 index 00000000..48899d45 --- /dev/null +++ b/integration-tests/scripts/run_product_tests @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Runs tests for a specific product + +set -ex + +# get this scripts directory +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) + +cd "$SCRIPT_DIR"/../ || exit 1 +PRODUCT=${PRODUCT:=cron} +ARGS=${ARGS:=} +go test -v ${ARGS} ./smoke/"$PRODUCT"_test.go diff --git a/integration-tests/scripts/search_and_delete.sh b/integration-tests/scripts/search_and_delete.sh new file mode 100644 index 00000000..ae1f8a6e --- /dev/null +++ b/integration-tests/scripts/search_and_delete.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [ $# -eq 0 ]; then + read -p "Enter the comma-separated list of filenames to search for: " filenames +else + filenames=$@ +fi + +IFS=',' read -ra filenames_arr <<< "$filenames" + +# Start search from the current working directory +current_dir=$(pwd) + +echo "Searching for files in $current_dir..." +found_files=() +for filename in "${filenames_arr[@]}"; do + while IFS= read -r file; do + found_files+=("$file") + done < <(find "$current_dir" -type f -name "$filename" 2>/dev/null) +done + +if [[ ${#found_files[@]} -eq 0 ]]; then + echo "No files found." + exit 0 +fi + +echo "Found files:" +for file in "${found_files[@]}"; do + echo "$file" +done + +read -p "Do you want to remove all these files? (y/n): " confirm + +if [[ $confirm == "yes" ]] || [[ $confirm == "y" ]]; then + for file in "${found_files[@]}"; do + rm "$file" + done + echo "Files removed." +else + echo "Files not removed." +fi \ No newline at end of file diff --git a/integration-tests/smoke/QuickStart.md b/integration-tests/smoke/QuickStart.md new file mode 100644 index 00000000..2c2ec55f --- /dev/null +++ b/integration-tests/smoke/QuickStart.md @@ -0,0 +1,100 @@ +# Running Smoke Tests Locally + +pre-reqs +* install node.js (18 preferred) +* k3d + +## TLDR; +run this once +```shell +echo '127.0.0.1 k3d-myregistry.localhost' | sudo tee -a /etc/hosts +``` +run to create k3d resources +```shell +k3d registry create myregistry.localhost --port 5001 +k3d cluster create --registry-use k3d-myregistry.localhost:5001 +``` +run to create new build +```shell +cd ~/go/src/github.com/plugin +env DOCKER_DEFAULT_PLATFORM=linux/amd64 docker buildx build --platform linux/amd64 -f ./core/plugin.Dockerfile --build-arg ENVIRONMENT=release --build-arg COMMIT_SHA=$(git rev-parse HEAD) -t smartcontract/plugin:develop-$(git rev-parse HEAD) . +export PLUGIN_VERSION=develop-$(git rev-parse HEAD) +docker tag docker.io/smartcontract/plugin:$PLUGIN_VERSION k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION +docker push k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION +export PLUGIN_IMAGE=k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin +``` +run the tests + ```shell +cd ~/go/src/github.com/plugin +make test_smoke_simulated args="--focus-file=auto_ocr_test.go" + ``` + +## Already have the initial stuff set up and just want to rebuild and run +build+run +```shell +cd ~/go/src/github.com/plugin +env DOCKER_DEFAULT_PLATFORM=linux/amd64 docker buildx build --platform linux/amd64 -f ./core/plugin.Dockerfile --build-arg ENVIRONMENT=release --build-arg COMMIT_SHA=$(git rev-parse HEAD) -t smartcontract/plugin:develop-$(git rev-parse HEAD) . +export PLUGIN_VERSION=develop-$(git rev-parse HEAD) +export TEST_LOG_LEVEL="debug" +docker tag docker.io/smartcontract/plugin:$PLUGIN_VERSION k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION +docker push k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION +export PLUGIN_IMAGE=k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin +make test_smoke_simulated args="--focus-file=auto_ocr_test.go" + ``` + +## Step by Step + +1. Build a Docker image of the plugin repo: + + ```shell + env DOCKER_DEFAULT_PLATFORM=linux/amd64 docker buildx build --platform linux/amd64 -f ./core/plugin.Dockerfile --build-arg ENVIRONMENT=release --build-arg COMMIT_SHA=$(git rev-parse HEAD) -t smartcontract/plugin:develop-$(git rev-parse HEAD) . + ``` + last line of the output will have something like + `=> => naming to docker.io/smartcontract/plugin:develop-a4caf33ce0ed6b841294c5ef06563c1cd4de6dfc` + use the tag at the end in the next command + ```shell + export PLUGIN_VERSION=develop-a4caf33ce0ed6b841294c5ef06563c1cd4de6dfc + ``` +2. Set up a Kubernetes cluster locally and a Docker registry which Kubernetes can pull from: + + ```shell + k3d registry create myregistry.localhost --port 5001 + k3d cluster create --registry-use k3d-myregistry.localhost:5001 + ``` + +3. Add these lines to the `/etc/hosts` file + + ```shell + # Added for k3d registry + 127.0.0.1 k3d-myregistry.localhost + ``` + +4. **Tag** the Docker image with the appropriate name: + + ```shell + docker tag docker.io/smartcontract/plugin:$PLUGIN_VERSION k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION + ``` + +5. **Push** the Docker image we created in step 1 to the new registry: + + ```shell + docker push k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin:$PLUGIN_VERSION + ``` + +6. Before actually running the tests, we need to set environment variables as follows: + + ```shell + export PLUGIN_IMAGE=k3d-myregistry.localhost:5001/docker.io/smartcontract/plugin + ``` + +7. (**Optional**) In case you want to use the changes you make locally in both repos (i.e. if you want to test the changes in the plugin repo in accordance to the changes you made in the plugin-testing-framework), you need to run the following in the plugin/integration-tests folder: + ```shell + go mod edit -replace github.com/goplugin/plugin-testing-framework=~/go/src/github.com/plugin-testing-framework + ``` + +8. Finally, run make test_smoke inside the plugin repo root folder to run all the integration tests. In case you want to only run some specific tests, you can do so with something like + ```shell + make test_smoke_simulated args="--focus-file=keeper_test.go" + ``` + +this setup doc was modified from [notion](https://www.notion.so/plugin/Setting-up-Integration-Tests-Framework-Locally-dc0e3db7718b45ad9249e97d7ef74c51) diff --git a/integration-tests/smoke/README.md b/integration-tests/smoke/README.md new file mode 100644 index 00000000..db795d82 --- /dev/null +++ b/integration-tests/smoke/README.md @@ -0,0 +1,83 @@ +## Smoke tests (local environments) + +These products are using local `testcontainers-go` environments: +- RunLog (Direct request) +- Cron +- Flux +- VRFv1 +- VRFv2 + +### Usage +``` +go test -v -run ${TestName} +``` +### Re-using environments +Configuration is still WIP, but you can make your environment re-usable by providing JSON config. + +Create `test_env.json` in the same dir +``` +export TEST_ENV_CONFIG_PATH=test_env.json +``` + +Here is an example for 3 nodes cluster +``` +{ + "networks": [ + "epic" + ], + "mockserver": { + "container_name": "mockserver", + "external_adapters_mock_urls": [ + "/epico1" + ] + }, + "geth": { + "container_name": "geth" + }, + "nodes": [ + { + "container_name": "cl-node-0", + "db_container_name": "cl-db-0" + }, + { + "container_name": "cl-node-1", + "db_container_name": "cl-db-1" + }, + { + "container_name": "cl-node-2", + "db_container_name": "cl-db-2" + } + ] +} +``` + +### Running against Live Testnets +1. Prepare your `overrides.toml` file with selected network and CL image name and version and save anywhere inside `integration-tests` folder. +```toml +[PluginImage] +image="your-image" +version="your-version" + +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai=["https://http.endpoint.com"] + +[Network.RpcWsUrls] +polygon_mumbai=["wss://ws.endpoint.com"] + +[Network.WalletKeys] +polygon_mumbai=["my_so_private_key"] +``` +Then execute: +```bash +go test -v -run ${TestName} +``` + + + +### Debugging CL client API calls +```bash +export CL_CLIENT_DEBUG=true +``` \ No newline at end of file diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go new file mode 100644 index 00000000..f9a6befa --- /dev/null +++ b/integration-tests/smoke/automation_test.go @@ -0,0 +1,1261 @@ +package smoke + +import ( + "encoding/json" + "fmt" + "math/big" + "net/http" + "os" + "strconv" + "testing" + "time" + + ctfTestEnv "github.com/goplugin/plugin-testing-framework/docker/test_env" + + ocr3 "github.com/goplugin/libocr/offchainreporting2plus/ocr3confighelper" + + ocr2keepers30config "github.com/goplugin/plugin-automation/pkg/v3/config" + + "github.com/goplugin/pluginv3.0/integration-tests/actions/automationv2" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/types" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + cltypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + "github.com/goplugin/pluginv3.0/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" +) + +var utilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI) + +const ( + automationDefaultUpkeepGasLimit = uint32(2500000) + automationDefaultLinkFunds = int64(9e18) + automationExpectedData = "abcdef" + defaultAmountOfUpkeeps = 2 +) + +var ( + automationDefaultRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } +) + +func TestMain(m *testing.M) { + logging.Init() + // config, err := tc.GetConfig(tc.NoTest, tc.Smoke, tc.Automation) + // if err != nil { + // panic(err) + // } + // fmt.Printf("Running Smoke Test on %s\n", networks.MustGetSelectedNetworkConfig(config.Network)[0].Name) // Print to get around disabled logging + // fmt.Printf("Plugin Image %v\n", config.PluginImage.Image) // Print to get around disabled logging + // fmt.Printf("Plugin Version %v\n", config.PluginImage.Version) // Print to get around disabled logging + os.Exit(m.Run()) +} + +func TestAutomationBasic(t *testing.T) { + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + SetupAutomationBasic(t, false, &config) +} + +func SetupAutomationBasic(t *testing.T, nodeUpgrade bool, automationTestConfig types.AutomationTestConfig) { + t.Parallel() + + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1_conditional": ethereum.RegistryVersion_2_1, + "registry_2_1_logtrigger": ethereum.RegistryVersion_2_1, + "registry_2_1_with_mercury_v02": ethereum.RegistryVersion_2_1, + "registry_2_1_with_mercury_v03": ethereum.RegistryVersion_2_1, + "registry_2_1_with_logtrigger_and_mercury_v02": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + cfg := tc.MustCopy(automationTestConfig) + t.Parallel() + l := logging.GetTestLogger(t) + + var err error + if nodeUpgrade { + if cfg.GetPluginUpgradeImageConfig() == nil { + t.Fatal("[PluginUpgradeImage] must be set in TOML config to upgrade nodes") + } + } + + // Use the name to determine if this is a log trigger or mercury + isLogTrigger := name == "registry_2_1_logtrigger" || name == "registry_2_1_with_logtrigger_and_mercury_v02" + isMercuryV02 := name == "registry_2_1_with_mercury_v02" || name == "registry_2_1_with_logtrigger_and_mercury_v02" + isMercuryV03 := name == "registry_2_1_with_mercury_v03" + isMercury := isMercuryV02 || isMercuryV03 + + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, isMercuryV02, isMercuryV03, automationTestConfig, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + isLogTrigger, + isMercury, + ) + + for i := 0; i < len(upkeepIDs); i++ { + if isLogTrigger || isMercuryV02 { + if err := consumers[i].Start(); err != nil { + l.Error().Msg("Error when starting consumer") + return + } + } + + if isMercury { + // Set privilege config to enable mercury + privilegeConfigBytes, _ := json.Marshal(streams.UpkeepPrivilegeConfig{ + MercuryEnabled: true, + }) + if err := a.Registry.SetUpkeepPrivilegeConfig(upkeepIDs[i], privilegeConfigBytes); err != nil { + l.Error().Msg("Error when setting upkeep privilege config") + return + } + } + } + + l.Info().Msg("Waiting for all upkeeps to be performed") + gom := gomega.NewGomegaWithT(t) + startTime := time.Now() + // TODO Tune this timeout window after stress testing + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 5 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "10m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer + + l.Info().Msgf("Total time taken to get 5 performs for each upkeep: %s", time.Since(startTime)) + + if nodeUpgrade { + require.NotNil(t, cfg.GetPluginImageConfig(), "unable to upgrade node version, [PluginUpgradeImage] was not set, must both a new image or a new version") + expect := 5 + // Upgrade the nodes one at a time and check that the upkeeps are still being performed + for i := 0; i < 5; i++ { + err = actions.UpgradePluginNodeVersionsLocal(*cfg.GetPluginImageConfig().Image, *cfg.GetPluginImageConfig().Version, a.DockerEnv.ClCluster.Nodes[i]) + require.NoError(t, err, "Error when upgrading node %d", i) + time.Sleep(time.Second * 10) + expect = expect + 5 + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are increasing by 5 in each step within 5 minutes + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep ID", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "5m", "1s").Should(gomega.Succeed()) + } + } + + // Cancel all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := a.Registry.CancelUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Could not cancel upkeep at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for upkeeps to be cancelled") + + var countersAfterCancellation = make([]*big.Int, len(upkeepIDs)) + + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterCancellation[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int64("Upkeep Count", countersAfterCancellation[i].Int64()).Int("Upkeep Index", i).Msg("Cancelled upkeep") + } + + l.Info().Msg("Making sure the counter stays consistent") + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + // Expect the counter to remain constant (At most increase by 1 to account for stale performs) because the upkeep was cancelled + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterCancellation[i].Int64()+1), + "Expected consumer counter to remain less than or equal to %d, but got %d", + countersAfterCancellation[i].Int64()+1, latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestSetUpkeepTriggerConfig(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + + a := setupAutomationTestDocker( + t, ethereum.RegistryVersion_2_1, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + true, + false, + ) + + // Start log trigger based upkeeps for all consumers + for i := 0; i < len(consumers); i++ { + err := consumers[i].Start() + if err != nil { + return + } + } + + l.Info().Msg("Waiting for all upkeeps to perform") + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := 5 + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", int64(expect)), + "Expected consumer counter to be greater than %d, but got %d", expect, counter.Int64()) + } + }, "5m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer + + topic0InBytesMatch := [32]byte{ + 61, 83, 163, 149, 80, 224, 70, 136, + 6, 88, 39, 243, 187, 134, 88, 76, + 176, 7, 171, 158, 188, 167, 235, + 213, 40, 231, 48, 28, 156, 49, 235, 93, + } // bytes representation of 0x3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d + + topic0InBytesNoMatch := [32]byte{ + 62, 83, 163, 149, 80, 224, 70, 136, + 6, 88, 39, 243, 187, 134, 88, 76, + 176, 7, 171, 158, 188, 167, 235, + 213, 40, 231, 48, 28, 156, 49, 235, 93, + } // changed the first byte from 61 to 62 to make it not match + + bytes0 := [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000 + + // Update the trigger config so no upkeeps are triggered + for i := 0; i < len(consumers); i++ { + upkeepAddr := consumers[i].Address() + + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: common.HexToAddress(upkeepAddr), + FilterSelector: 0, + Topic0: topic0InBytesNoMatch, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := utilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return + } + + err = a.Registry.SetUpkeepTriggerConfig(upkeepIDs[i], encodedLogTriggerConfig) + require.NoError(t, err, "Could not set upkeep trigger config at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + var countersAfterSetNoMatch = make([]*big.Int, len(upkeepIDs)) + + // Wait for 10 seconds to let in-flight upkeeps finish + time.Sleep(10 * time.Second) + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterSetNoMatch[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int64("Upkeep Count", countersAfterSetNoMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep") + } + + l.Info().Msg("Making sure the counter stays consistent") + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + // Expect the counter to remain constant (At most increase by 2 to account for stale performs) because the upkeep trigger config is not met + bufferCount := int64(2) + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterSetNoMatch[i].Int64()+bufferCount), + "Expected consumer counter to remain less than or equal to %d, but got %d", + countersAfterSetNoMatch[i].Int64()+bufferCount, latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + + // Update the trigger config, so upkeeps start performing again + for i := 0; i < len(consumers); i++ { + upkeepAddr := consumers[i].Address() + + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: common.HexToAddress(upkeepAddr), + FilterSelector: 0, + Topic0: topic0InBytesMatch, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := utilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return + } + + err = a.Registry.SetUpkeepTriggerConfig(upkeepIDs[i], encodedLogTriggerConfig) + require.NoError(t, err, "Could not set upkeep trigger config at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + var countersAfterSetMatch = make([]*big.Int, len(upkeepIDs)) + + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterSetMatch[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int64("Upkeep Count", countersAfterSetMatch[i].Int64()).Int("Upkeep Index", i).Msg("Upkeep") + } + + // Wait for 30 seconds to make sure backend is ready + time.Sleep(30 * time.Second) + // Start the consumers again + for i := 0; i < len(consumers); i++ { + err := consumers[i].Start() + if err != nil { + return + } + } + + l.Info().Msg("Making sure the counter starts increasing again") + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + expect := int64(5) + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">=", countersAfterSetMatch[i].Int64()+expect), + "Expected consumer counter to be greater than %d, but got %d", countersAfterSetMatch[i].Int64()+expect, counter.Int64()) + } + }, "5m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer +} + +func TestAutomationAddFunds(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(1), + automationDefaultUpkeepGasLimit, + false, + false, + ) + + gom := gomega.NewGomegaWithT(t) + // Since the upkeep is currently underfunded, check that it doesn't get executed + gom.Consistently(func(g gomega.Gomega) { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected consumer counter to remain zero, but got %d", counter.Int64()) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion + + // Grant permission to the registry to fund the upkeep + err = a.LinkToken.Approve(a.Registry.Address(), big.NewInt(9e18)) + require.NoError(t, err, "Could not approve permissions for the registry on the link token contract") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Add funds to the upkeep whose ID we know from above + err = a.Registry.AddUpkeepFunds(upkeepIDs[0], big.NewInt(9e18)) + require.NoError(t, err, "Unable to add upkeep") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Now the new upkeep should be performing because we added enough funds + gom.Eventually(func(g gomega.Gomega) { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for perform, 1m buffer + }) + } +} + +func TestAutomationPauseUnPause(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for n, rv := range registryVersions { + name := n + registryVersion := rv + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + false, + false, + ) + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 5 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), + "Expected consumer counter to be greater than 5, but got %d", counter.Int64()) + l.Info().Int("Upkeep Index", i).Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + }, "5m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~2m for performing each upkeep 5 times, ~2m buffer + + // pause all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := a.Registry.PauseUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Could not pause upkeep at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for upkeeps to be paused") + + var countersAfterPause = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterPause[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int("Upkeep Index", i).Int64("Upkeeps Performed", countersAfterPause[i].Int64()).Msg("Paused Upkeep") + } + + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + // In most cases counters should remain constant, but there might be a straggling perform tx which + // gets committed later and increases counter by 1 + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+1), + "Expected consumer counter not have increased more than %d, but got %d", + countersAfterPause[i].Int64()+1, latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + + // unpause all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := a.Registry.UnpauseUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Could not unpause upkeep at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for upkeeps to be unpaused") + + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", countersAfterPause[i].Int64()+1), + "Expected consumer counter to be greater than %d, but got %d", countersAfterPause[i].Int64()+1, counter.Int64()) + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform, 1m buffer + }) + } +} + +func TestAutomationRegisterUpkeep(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + false, + false, + ) + + var initialCounters = make([]*big.Int, len(upkeepIDs)) + gom := gomega.NewGomegaWithT(t) + // Observe that the upkeeps which are initially registered are performing and + // store the value of their initial counters in order to compare later on that the value increased. + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + initialCounters[i] = counter + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) + l.Info(). + Int64("Upkeep counter", counter.Int64()). + Int64("Upkeep ID", int64(i)). + Msg("Number of upkeeps performed") + } + }, "4m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + + newConsumers, _ := actions.RegisterNewUpkeeps(t, a.Deployer, a.ChainClient, a.LinkToken, + a.Registry, a.Registrar, automationDefaultUpkeepGasLimit, 1) + + // We know that newConsumers has size 1, so we can just use the newly registered upkeep. + newUpkeep := newConsumers[0] + + // Test that the newly registered upkeep is also performing. + gom.Eventually(func(g gomega.Gomega) { + counter, err := newUpkeep.Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + l.Info().Int64("Upkeeps Performed", counter.Int64()).Msg("Newly Registered Upkeep") + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for upkeep to perform, 1m buffer + + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + currentCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + + l.Info(). + Int64("Upkeep ID", int64(i)). + Int64("Upkeep counter", currentCounter.Int64()). + Int64("initial counter", initialCounters[i].Int64()). + Msg("Number of upkeeps performed") + + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), + "Expected counter to have increased from initial value of %s, but got %s", + initialCounters[i], currentCounter) + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for upkeeps to perform, 1m buffer + }) + } +} + +func TestAutomationPauseRegistry(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + false, + false, + ) + gom := gomega.NewGomegaWithT(t) + + // Observe that the upkeeps which are initially registered are performing + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d") + } + }, "4m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + + // Pause the registry + err = a.Registry.Pause() + require.NoError(t, err, "Error pausing registry") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for registry to pause") + + // Store how many times each upkeep performed once the registry was successfully paused + var countersAfterPause = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + countersAfterPause[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + } + + // After we paused the registry, the counters of all the upkeeps should stay constant + // because they are no longer getting serviced + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()), + "Expected consumer counter to remain constant at %d, but got %d", + countersAfterPause[i].Int64(), latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestAutomationKeeperNodesDown(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumers, upkeepIDs := actions.DeployConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + false, + false, + ) + gom := gomega.NewGomegaWithT(t) + nodesWithoutBootstrap := a.PluginNodes[1:] + + var initialCounters = make([]*big.Int, len(upkeepIDs)) + + // Watch upkeeps being performed and store their counters in order to compare them later in the test + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + initialCounters[i] = counter + l.Info().Int64("Upkeeps Performed", counter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) + } + }, "4m", "1s").Should(gomega.Succeed()) // ~1m for cluster setup, ~1m for performing each upkeep once, ~2m buffer + + // Take down 1 node. Currently, using 4 nodes so f=1 and is the max nodes that can go down. + err = nodesWithoutBootstrap[0].MustDeleteJob("1") + require.NoError(t, err, "Error deleting job from Plugin node") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for blockchain events") + + l.Info().Msg("Successfully managed to take down the first half of the nodes") + + // Assert that upkeeps are still performed and their counters have increased + gom.Eventually(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + currentCounter, err := consumers[i].Counter(testcontext.Get(t)) + l.Info().Int64("Upkeeps Performed", currentCounter.Int64()).Int("Upkeep Index", i).Msg("Number of upkeeps performed") + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), + "Expected counter to have increased from initial value of %s, but got %s", + initialCounters[i], currentCounter) + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for each upkeep to perform once, 1m buffer + + // Take down the rest + restOfNodesDown := nodesWithoutBootstrap[1:] + for _, nodeToTakeDown := range restOfNodesDown { + err = nodeToTakeDown.MustDeleteJob("1") + require.NoError(t, err, "Error deleting job from Plugin node") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for blockchain events") + } + l.Info().Msg("Successfully managed to take down the second half of the nodes") + + // See how many times each upkeep was executed + var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + countersAfterNoMoreNodes[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int("Upkeep Index", i).Int64("Performed", countersAfterNoMoreNodes[i].Int64()).Msg("Upkeeps Performed") + } + + // Once all the nodes are taken down, there might be some straggling transactions which went through before + // all the nodes were taken down + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterNoMoreNodes[i].Int64()+1), + "Expected consumer counter to not have increased more than %d, but got %d", + countersAfterNoMoreNodes[i].Int64()+1, latestCounter.Int64()) + } + }, "2m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestAutomationPerformSimulation(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumersPerformance, _ := actions.DeployPerformanceConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + 10000, // How many blocks this upkeep will be eligible from first upkeep block + 5, // Interval of blocks that upkeeps are expected to be performed + 100000, // How much gas should be burned on checkUpkeep() calls + 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit + ) + gom := gomega.NewGomegaWithT(t) + + consumerPerformance := consumersPerformance[0] + + // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed + gom.Consistently(func(g gomega.Gomega) { + // Consumer count should remain at 0 + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion + + // Set performGas on consumer to be low, so that performUpkeep starts becoming successful + err = consumerPerformance.SetPerformGasToBurn(testcontext.Get(t), big.NewInt(100000)) + require.NoError(t, err, "Perform gas should be set successfully on consumer") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for set perform gas tx") + + // Upkeep should now start performing + gom.Eventually(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer + }) + } +} + +func TestAutomationCheckPerformGasLimit(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + consumersPerformance, upkeepIDs := actions.DeployPerformanceConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + 10000, // How many blocks this upkeep will be eligible from first upkeep block + 5, // Interval of blocks that upkeeps are expected to be performed + 100000, // How much gas should be burned on checkUpkeep() calls + 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit + ) + gom := gomega.NewGomegaWithT(t) + + nodesWithoutBootstrap := a.PluginNodes[1:] + consumerPerformance := consumersPerformance[0] + upkeepID := upkeepIDs[0] + + // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed + gom.Consistently(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion + + // Increase gas limit for the upkeep, higher than the performGasBurn + err = a.Registry.SetUpkeepGasLimit(upkeepID, uint32(4500000)) + require.NoError(t, err, "Error setting upkeep gas limit") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for SetUpkeepGasLimit tx") + + // Upkeep should now start performing + gom.Eventually(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer + + // Now increase the checkGasBurn on consumer, upkeep should stop performing + err = consumerPerformance.SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) + require.NoError(t, err, "Check gas burn should be set successfully on consumer") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") + + // Get existing performed count + existingCnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Upkeep counter when check gas increased") + + // In most cases count should remain constant, but it might increase by upto 1 due to pending perform + gom.Consistently(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.BeNumerically("<=", existingCnt.Int64()+1), + "Expected consumer counter to remain less than equal %d, but got %d", existingCnt.Int64()+1, cnt.Int64(), + ) + }, "1m", "1s").Should(gomega.Succeed()) + + existingCnt, err = consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Calling consumer's counter shouldn't fail") + existingCntInt := existingCnt.Int64() + l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") + + // Now increase checkGasLimit on registry + highCheckGasLimit := automationDefaultRegistryConfig + highCheckGasLimit.CheckGasLimit = uint32(5000000) + highCheckGasLimit.RegistryVersion = registryVersion + + ocrConfig, err := actions.BuildAutoOCR2ConfigVarsLocal(l, nodesWithoutBootstrap, highCheckGasLimit, a.Registrar.Address(), 30*time.Second, a.Registry.RegistryOwnerAddress()) + require.NoError(t, err, "Error building OCR config") + + err = a.Registry.SetConfig(highCheckGasLimit, ocrConfig) + require.NoError(t, err, "Registry config should be set successfully!") + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for set config tx") + + // Upkeep should start performing again, and it should get regularly performed + gom.Eventually(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), + "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), + ) + }, "3m", "1s").Should(gomega.Succeed()) // ~1m to setup cluster, 1m to perform once, 1m buffer + }) + } +} + +func TestUpdateCheckData(t *testing.T) { + t.Parallel() + registryVersions := map[string]ethereum.KeeperRegistryVersion{ + "registry_2_0": ethereum.RegistryVersion_2_0, + "registry_2_1": ethereum.RegistryVersion_2_1, + } + + for name, registryVersion := range registryVersions { + name := name + registryVersion := registryVersion + t.Run(name, func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Automation) + if err != nil { + t.Fatal(err) + } + + a := setupAutomationTestDocker( + t, registryVersion, automationDefaultRegistryConfig, false, false, &config, + ) + + performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerConsumers( + t, + a.Registry, + a.Registrar, + a.LinkToken, + a.Deployer, + a.ChainClient, + defaultAmountOfUpkeeps, + big.NewInt(automationDefaultLinkFunds), + automationDefaultUpkeepGasLimit, + []byte(automationExpectedData), + ) + gom := gomega.NewGomegaWithT(t) + + gom.Consistently(func(g gomega.Gomega) { + // expect the counter to remain 0 because perform data does not match + for i := 0; i < len(upkeepIDs); i++ { + counter, err := performDataChecker[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker"+ + " for upkeep at index "+strconv.Itoa(i)) + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected perform data checker counter to be 0, but got %d", counter.Int64()) + l.Info().Int64("Upkeep perform data checker", counter.Int64()).Msg("Number of upkeeps performed") + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m for setup, 1m assertion + + for i := 0; i < len(upkeepIDs); i++ { + err := a.Registry.UpdateCheckData(upkeepIDs[i], []byte(automationExpectedData)) + require.NoError(t, err, "Could not update check data for upkeep at index %d", i) + } + + err = a.ChainClient.WaitForEvents() + require.NoError(t, err, "Error while waiting for check data update") + + // retrieve new check data for all upkeeps + for i := 0; i < len(upkeepIDs); i++ { + upkeep, err := a.Registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) + require.NoError(t, err, "Failed to get upkeep info at index %d", i) + require.Equal(t, []byte(automationExpectedData), upkeep.CheckData, "Upkeep data not as expected") + } + + gom.Eventually(func(g gomega.Gomega) { + // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := performDataChecker[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter"+ + " for upkeep at index "+strconv.Itoa(i)) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected perform data checker counter to be greater than 0, but got %d", counter.Int64()) + l.Info().Int64("Upkeep perform data checker", counter.Int64()).Msg("Number of upkeeps performed") + } + }, "2m", "1s").Should(gomega.Succeed()) // ~1m to perform once, 1m buffer + }) + } +} + +func setupAutomationTestDocker( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registryConfig contracts.KeeperRegistrySettings, + isMercuryV02 bool, + isMercuryV03 bool, + automationTestConfig types.AutomationTestConfig, +) automationv2.AutomationTest { + require.False(t, isMercuryV02 && isMercuryV03, "Cannot run test with both Mercury V02 and V03 on") + + l := logging.GetTestLogger(t) + // Add registry version to config + registryConfig.RegistryVersion = registryVersion + network := networks.MustGetSelectedNetworkConfig(automationTestConfig.GetNetworkConfig())[0] + + // build the node config + clNodeConfig := node.NewConfig(node.NewBaseConfig()) + syncInterval := *commonconfig.MustNewDuration(5 * time.Minute) + clNodeConfig.Feature.LogPoller = ptr.Ptr[bool](true) + clNodeConfig.OCR2.Enabled = ptr.Ptr[bool](true) + clNodeConfig.Keeper.TurnLookBack = ptr.Ptr[int64](int64(0)) + clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval + clNodeConfig.Keeper.Registry.PerformGasOverhead = ptr.Ptr[uint32](uint32(150000)) + clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"} + clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"} + + //launch the environment + var env *test_env.CLClusterTestEnv + var err error + require.NoError(t, err) + l.Debug().Msgf("Funding amount: %f", *automationTestConfig.GetCommonConfig().PluginNodeFunding) + clNodesCount := 5 + if isMercuryV02 || isMercuryV03 { + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(automationTestConfig). + WithGeth(). + WithMockAdapter(). + WithFunding(big.NewFloat(*automationTestConfig.GetCommonConfig().PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "Error deploying test environment for Mercury") + env.ParallelTransactions(true) + + secretsConfig := ` + [Mercury.Credentials.cred1] + LegacyURL = '%s' + URL = '%s' + Username = 'node' + Password = 'nodepass'` + secretsConfig = fmt.Sprintf(secretsConfig, env.MockAdapter.InternalEndpoint, env.MockAdapter.InternalEndpoint) + + var httpUrls []string + var wsUrls []string + if network.Simulated { + httpUrls = []string{env.RpcProvider.PrivateHttpUrls()[0]} + wsUrls = []string{env.RpcProvider.PrivateWsUrsl()[0]} + } else { + httpUrls = network.HTTPURLs + wsUrls = network.URLs + } + + node.SetChainConfig(clNodeConfig, wsUrls, httpUrls, network, false) + + err = env.StartClCluster(clNodeConfig, clNodesCount, secretsConfig, automationTestConfig) + require.NoError(t, err, "Error starting CL nodes test environment for Mercury") + err = env.FundPluginNodes(big.NewFloat(*automationTestConfig.GetCommonConfig().PluginNodeFunding)) + require.NoError(t, err, "Error funding CL nodes") + + } else { + env, err = test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(automationTestConfig). + WithGeth(). + WithMockAdapter(). + WithCLNodes(clNodesCount). + WithCLNodeConfig(clNodeConfig). + WithFunding(big.NewFloat(*automationTestConfig.GetCommonConfig().PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "Error deploying test environment") + } + + env.ParallelTransactions(true) + nodeClients := env.ClCluster.NodeAPIs() + + a := automationv2.NewAutomationTestDocker(env.EVMClient, env.ContractDeployer, nodeClients) + a.MercuryCredentialName = "cred1" + a.RegistrySettings = registryConfig + a.RegistrarSettings = contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: uint8(2), + AutoApproveMaxAllowed: 1000, + MinLinkJuels: big.NewInt(0), + } + a.PluginConfig = ocr2keepers30config.OffchainConfig{ + TargetProbability: "0.999", + TargetInRounds: 1, + PerformLockoutWindow: 3_600_000, // Intentionally set to be higher than in prod for testing purpose + GasLimitPerReport: 10_300_000, + GasOverheadPerUpkeep: 300_000, + MinConfirmations: 0, + MaxUpkeepBatchSize: 10, + } + a.PublicConfig = ocr3.PublicConfig{ + DeltaProgress: 10 * time.Second, + DeltaResend: 15 * time.Second, + DeltaInitial: 500 * time.Millisecond, + DeltaRound: 1000 * time.Millisecond, + DeltaGrace: 200 * time.Millisecond, + DeltaCertifiedCommitRequest: 300 * time.Millisecond, + DeltaStage: 30 * time.Second, + RMax: 24, + MaxDurationQuery: 20 * time.Millisecond, + MaxDurationObservation: 20 * time.Millisecond, + MaxDurationShouldAcceptAttestedReport: 1200 * time.Millisecond, + MaxDurationShouldTransmitAcceptedReport: 20 * time.Millisecond, + F: 1, + } + + a.SetupAutomationDeployment(t) + a.SetDockerEnv(env) + + if isMercuryV02 || isMercuryV03 { + var imposters []ctfTestEnv.KillgraveImposter + mercuryv03Mock200 := ctfTestEnv.KillgraveImposter{ + Request: ctfTestEnv.KillgraveRequest{ + Method: http.MethodGet, + Endpoint: "/api/v1/reports/bulk", + SchemaFile: nil, + Params: &map[string]string{"feedIDs": "0x00028c915d6af0fd66bba2d0fc9405226bca8d6806333121a7d9832103d1563c", "timestamp": "{[\\d+]}"}, + Headers: nil, + }, + Response: ctfTestEnv.KillgraveResponse{ + Status: 200, + Body: `{"reports":[{"feedID":"0x00028c915d6af0fd66bba2d0fc9405226bca8d6806333121a7d9832103d1563c","validFromTimestamp":0,"observationsTimestamp":0,"fullReport":"0x00066dfcd1ed2d95b18c948dbc5bd64c687afe93e4ca7d663ddec14c20090ad80000000000000000000000000000000000000000000000000000000000081401000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002200000000000000000000000000000000000000000000000000000000000000280000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001204554482d5553442d415242495452554d2d544553544e455400000000000000000000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000289ad8d367000000000000000000000000000000000000000000000000000000289acf0b38000000000000000000000000000000000000000000000000000000289b3da40000000000000000000000000000000000000000000000000000000000018ae7ce74d9fa252a8983976eab600dc7590c778d04813430841bc6e765c34cd81a168d00000000000000000000000000000000000000000000000000000000018ae7cb0000000000000000000000000000000000000000000000000000000064891c98000000000000000000000000000000000000000000000000000000000000000260412b94e525ca6cedc9f544fd86f77606d52fe731a5d069dbe836a8bfc0fb8c911963b0ae7a14971f3b4621bffb802ef0605392b9a6c89c7fab1df8633a5ade00000000000000000000000000000000000000000000000000000000000000024500c2f521f83fba5efc2bf3effaaedde43d0a4adff785c1213b712a3aed0d8157642a84324db0cf9695ebd27708d4608eb0337e0dd87b0e43f0fa70c700d911"}]}`, + BodyFile: nil, + Headers: nil, + Delay: nil, + }, + } + + mercuryv02Mock200 := ctfTestEnv.KillgraveImposter{ + Request: ctfTestEnv.KillgraveRequest{ + Method: http.MethodGet, + Endpoint: "/client", + SchemaFile: nil, + Params: &map[string]string{"feedIdHex": "{0x00028c915d6af0fd66bba2d0fc9405226bca8d6806333121a7d9832103d1563c|0x4554482d5553442d415242495452554d2d544553544e45540000000000000000}", "blockNumber": "{[\\d+]}"}, + Headers: nil, + }, + Response: ctfTestEnv.KillgraveResponse{ + Status: 200, + Body: `{"pluginBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}`, + BodyFile: nil, + Headers: nil, + Delay: nil, + }, + } + + imposters = append(imposters, mercuryv03Mock200, mercuryv02Mock200) + a.SetupMercuryMock(t, imposters) + } + + return *a +} diff --git a/integration-tests/smoke/automation_test.go_test_list.json b/integration-tests/smoke/automation_test.go_test_list.json new file mode 100644 index 00000000..b8868459 --- /dev/null +++ b/integration-tests/smoke/automation_test.go_test_list.json @@ -0,0 +1,74 @@ +{ + "tests": [ + { + "name": "TestAutomationBasic", + "label": "ubuntu-latest", + "nodes": 2, + "run":[ + {"name":"registry_2_0"}, + {"name":"registry_2_1_conditional"} + ] + }, + { + "name": "TestAutomationBasic", + "label": "ubuntu-latest", + "nodes": 2, + "run":[ + {"name":"registry_2_1_logtrigger"}, + {"name":"registry_2_1_with_mercury_v02"} + ] + }, + { + "name": "TestAutomationBasic", + "label": "ubuntu-latest", + "nodes": 2, + "run":[ + {"name":"registry_2_1_with_mercury_v03"}, + {"name":"registry_2_1_with_logtrigger_and_mercury_v02"} + ] + }, + { + "name": "TestSetUpkeepTriggerConfig" + }, + { + "name": "TestAutomationAddFunds", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationPauseUnPause", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationRegisterUpkeep", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationPauseRegistry", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationKeeperNodesDown", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationPerformSimulation", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestAutomationCheckPerformGasLimit", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestUpdateCheckData", + "label": "ubuntu-latest", + "nodes": 2 + } + ] +} \ No newline at end of file diff --git a/integration-tests/smoke/automation_upgrade_test.go b/integration-tests/smoke/automation_upgrade_test.go new file mode 100644 index 00000000..7fc1043c --- /dev/null +++ b/integration-tests/smoke/automation_upgrade_test.go @@ -0,0 +1,15 @@ +package smoke + +import ( + "testing" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestAutomationNodeUpgrade(t *testing.T) { + config, err := tc.GetConfig(t.Name(), tc.Automation) + if err != nil { + t.Fatal(err, "Error getting config") + } + SetupAutomationBasic(t, true, &config) +} diff --git a/integration-tests/smoke/cron_test.go b/integration-tests/smoke/cron_test.go new file mode 100644 index 00000000..7102c1a3 --- /dev/null +++ b/integration-tests/smoke/cron_test.go @@ -0,0 +1,146 @@ +package smoke + +import ( + "fmt" + "net/http" + "testing" + + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestCronBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.Cron) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodes(1). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + err = env.MockAdapter.SetAdapterBasedIntValuePath("/variable", []string{http.MethodGet, http.MethodPost}, 5) + require.NoError(t, err, "Setting value path in mock adapter shouldn't fail") + + bta := &client.BridgeTypeAttributes{ + Name: fmt.Sprintf("variable-%s", uuid.NewString()), + URL: fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), + RequestData: "{}", + } + err = env.ClCluster.Nodes[0].API.MustCreateBridge(bta) + require.NoError(t, err, "Creating bridge in plugin node shouldn't fail") + + job, err := env.ClCluster.Nodes[0].API.MustCreateJob(&client.CronJobSpec{ + Schedule: "CRON_TZ=UTC * * * * * *", + ObservationSource: client.ObservationSourceSpecBridge(bta), + }) + require.NoError(t, err, "Creating Cron Job in plugin node shouldn't fail") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + if err != nil { + l.Info().Err(err).Msg("error while waiting for job runs") + } + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Reading Job run data shouldn't fail") + + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 5), "Expected number of job runs to be greater than 5, but got %d", len(jobRuns.Data)) + + for _, jr := range jobRuns.Data { + g.Expect(jr.Attributes.Errors).Should(gomega.Equal([]interface{}{nil}), "Job run %s shouldn't have errors", jr.ID) + } + }, "2m", "3s").Should(gomega.Succeed()) +} + +func TestCronJobReplacement(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.Cron) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodes(1). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + err = env.MockAdapter.SetAdapterBasedIntValuePath("/variable", []string{http.MethodGet, http.MethodPost}, 5) + require.NoError(t, err, "Setting value path in mockserver shouldn't fail") + + bta := &client.BridgeTypeAttributes{ + Name: fmt.Sprintf("variable-%s", uuid.NewString()), + URL: fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), + RequestData: "{}", + } + err = env.ClCluster.Nodes[0].API.MustCreateBridge(bta) + require.NoError(t, err, "Creating bridge in plugin node shouldn't fail") + + // CRON job creation and replacement + job, err := env.ClCluster.Nodes[0].API.MustCreateJob(&client.CronJobSpec{ + Schedule: "CRON_TZ=UTC * * * * * *", + ObservationSource: client.ObservationSourceSpecBridge(bta), + }) + require.NoError(t, err, "Creating Cron Job in plugin node shouldn't fail") + + gom := gomega.NewWithT(t) + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + if err != nil { + l.Info().Err(err).Msg("error while waiting for job runs") + } + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Reading Job run data shouldn't fail") + + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 5), "Expected number of job runs to be greater than 5, but got %d", len(jobRuns.Data)) + + for _, jr := range jobRuns.Data { + g.Expect(jr.Attributes.Errors).Should(gomega.Equal([]interface{}{nil}), "Job run %s shouldn't have errors", jr.ID) + } + }, "3m", "3s").Should(gomega.Succeed()) + + err = env.ClCluster.Nodes[0].API.MustDeleteJob(job.Data.ID) + require.NoError(t, err) + + job, err = env.ClCluster.Nodes[0].API.MustCreateJob(&client.CronJobSpec{ + Schedule: "CRON_TZ=UTC * * * * * *", + ObservationSource: client.ObservationSourceSpecBridge(bta), + }) + require.NoError(t, err, "Recreating Cron Job in plugin node shouldn't fail") + + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + if err != nil { + l.Info().Err(err).Msg("error while waiting for job runs") + } + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Reading Job run data shouldn't fail") + + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 5), "Expected number of job runs to be greater than 5, but got %d", len(jobRuns.Data)) + + for _, jr := range jobRuns.Data { + g.Expect(jr.Attributes.Errors).Should(gomega.Equal([]interface{}{nil}), "Job run %s shouldn't have errors", jr.ID) + } + }, "3m", "3s").Should(gomega.Succeed()) + +} diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go new file mode 100644 index 00000000..6eb67078 --- /dev/null +++ b/integration-tests/smoke/flux_test.go @@ -0,0 +1,154 @@ +package smoke + +import ( + "fmt" + "math/big" + "net/http" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestFluxBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.Flux) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodes(3). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + nodeAddresses, err := env.ClCluster.NodeAddresses() + require.NoError(t, err, "Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + env.EVMClient.ParallelTransactions(true) + + adapterUUID := uuid.NewString() + adapterPath := fmt.Sprintf("/variable-%s", adapterUUID) + err = env.MockAdapter.SetAdapterBasedIntValuePath(adapterPath, []string{http.MethodPost}, 1e5) + require.NoError(t, err, "Setting mock adapter value path shouldn't fail") + + lt, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + fluxInstance, err := env.ContractDeployer.DeployFluxAggregatorContract(lt.Address(), contracts.DefaultFluxAggregatorOptions()) + require.NoError(t, err, "Deploying Flux Aggregator Contract shouldn't fail") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Failed waiting for deployment of flux aggregator contract") + + err = lt.Transfer(fluxInstance.Address(), big.NewInt(1e18)) + require.NoError(t, err, "Funding Flux Aggregator Contract shouldn't fail") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Failed waiting for funding of flux aggregator contract") + + err = fluxInstance.UpdateAvailableFunds() + require.NoError(t, err, "Updating the available funds on the Flux Aggregator Contract shouldn't fail") + + err = env.FundPluginNodes(big.NewFloat(1)) + require.NoError(t, err, "Failed to fund the nodes") + + err = fluxInstance.SetOracles( + contracts.FluxAggregatorSetOraclesOptions{ + AddList: nodeAddresses, + RemoveList: []common.Address{}, + AdminList: nodeAddresses, + MinSubmissions: 3, + MaxSubmissions: 3, + RestartDelayRounds: 0, + }) + require.NoError(t, err, "Setting oracle options in the Flux Aggregator contract shouldn't fail") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") + oracles, err := fluxInstance.GetOracles(testcontext.Get(t)) + require.NoError(t, err, "Getting oracle details from the Flux aggregator contract shouldn't fail") + l.Info().Str("Oracles", strings.Join(oracles, ",")).Msg("Oracles set") + + adapterFullURL := fmt.Sprintf("%s%s", env.MockAdapter.InternalEndpoint, adapterPath) + l.Info().Str("AdapterFullURL", adapterFullURL).Send() + bta := &client.BridgeTypeAttributes{ + Name: fmt.Sprintf("variable-%s", adapterUUID), + URL: adapterFullURL, + } + for i, n := range env.ClCluster.Nodes { + err = n.API.MustCreateBridge(bta) + require.NoError(t, err, "Creating bridge shouldn't fail for node %d", i+1) + + fluxSpec := &client.FluxMonitorJobSpec{ + Name: fmt.Sprintf("flux-monitor-%s", adapterUUID), + ContractAddress: fluxInstance.Address(), + EVMChainID: env.EVMClient.GetChainID().String(), + Threshold: 0, + AbsoluteThreshold: 0, + PollTimerPeriod: 15 * time.Second, // min 15s + IdleTimerDisabled: true, + ObservationSource: client.ObservationSourceSpecBridge(bta), + } + _, err = n.API.MustCreateJob(fluxSpec) + require.NoError(t, err, "Creating flux job shouldn't fail for node %d", i+1) + } + + // initial value set is performed before jobs creation + fluxRoundTimeout := 1 * time.Minute + fluxRound := contracts.NewFluxAggregatorRoundConfirmer(fluxInstance, big.NewInt(1), fluxRoundTimeout, l) + env.EVMClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") + data, err := fluxInstance.GetContractData(testcontext.Get(t)) + require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") + require.Equal(t, int64(1e5), data.LatestRoundData.Answer.Int64(), + "Expected latest round answer to be %d, but found %d", int64(1e5), data.LatestRoundData.Answer.Int64()) + require.Equal(t, int64(1), data.LatestRoundData.RoundId.Int64(), + "Expected latest round id to be %d, but found %d", int64(1), data.LatestRoundData.RoundId.Int64()) + require.Equal(t, int64(1), data.LatestRoundData.AnsweredInRound.Int64(), + "Expected latest round's answered in round to be %d, but found %d", int64(1), data.LatestRoundData.AnsweredInRound.Int64()) + require.Equal(t, int64(999999999999999997), data.AvailableFunds.Int64(), + "Expected available funds to be %d, but found %d", int64(999999999999999997), data.AvailableFunds.Int64()) + require.Equal(t, int64(3), data.AllocatedFunds.Int64(), + "Expected allocated funds to be %d, but found %d", int64(3), data.AllocatedFunds.Int64()) + + fluxRound = contracts.NewFluxAggregatorRoundConfirmer(fluxInstance, big.NewInt(2), fluxRoundTimeout, l) + env.EVMClient.AddHeaderEventSubscription(fluxInstance.Address(), fluxRound) + err = env.MockAdapter.SetAdapterBasedIntValuePath(adapterPath, []string{http.MethodPost}, 1e10) + require.NoError(t, err, "Setting value path in mock server shouldn't fail") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") + data, err = fluxInstance.GetContractData(testcontext.Get(t)) + require.NoError(t, err, "Getting contract data from flux aggregator contract shouldn't fail") + require.Equal(t, int64(1e10), data.LatestRoundData.Answer.Int64(), + "Expected latest round answer to be %d, but found %d", int64(1e10), data.LatestRoundData.Answer.Int64()) + require.Equal(t, int64(2), data.LatestRoundData.RoundId.Int64(), + "Expected latest round id to be %d, but found %d", int64(2), data.LatestRoundData.RoundId.Int64()) + require.Equal(t, int64(999999999999999994), data.AvailableFunds.Int64(), + "Expected available funds to be %d, but found %d", int64(999999999999999994), data.AvailableFunds.Int64()) + require.Equal(t, int64(6), data.AllocatedFunds.Int64(), + "Expected allocated funds to be %d, but found %d", int64(6), data.AllocatedFunds.Int64()) + l.Info().Interface("data", data).Msg("Round data") + + for _, oracleAddr := range nodeAddresses { + payment, _ := fluxInstance.WithdrawablePayment(testcontext.Get(t), oracleAddr) + require.Equal(t, int64(2), payment.Int64(), + "Expected flux aggregator contract's withdrawable payment to be %d, but found %d", int64(2), payment.Int64()) + } +} diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go new file mode 100644 index 00000000..b31d9bfa --- /dev/null +++ b/integration-tests/smoke/forwarder_ocr_test.go @@ -0,0 +1,96 @@ +package smoke + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestForwarderOCRBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.ForwarderOcr) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithForwarders(). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + workerNodeAddresses, err := actions.PluginNodeAddressesLocal(workerNodes) + require.NoError(t, err, "Retreiving on-chain wallet addresses for plugin nodes shouldn't fail") + + linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05)) + require.NoError(t, err, "Error funding Plugin nodes") + + operators, authorizedForwarders, _ := actions.DeployForwarderContracts( + t, env.ContractDeployer, linkTokenContract, env.EVMClient, len(workerNodes), + ) + for i := range workerNodes { + actions.AcceptAuthorizedReceiversOperator( + t, operators[i], authorizedForwarders[i], []common.Address{workerNodeAddresses[i]}, env.EVMClient, env.ContractLoader, + ) + require.NoError(t, err, "Accepting Authorize Receivers on Operator shouldn't fail") + err = actions.TrackForwarderLocal(env.EVMClient, authorizedForwarders[i], workerNodes[i], l) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + } + ocrInstances, err := actions.DeployOCRContractsForwarderFlowLocal( + 1, + linkTokenContract, + env.ContractDeployer, + workerNodes, + authorizedForwarders, + env.EVMClient, + ) + require.NoError(t, err, "Error deploying OCR contracts") + + err = actions.CreateOCRJobsWithForwarderLocal(ocrInstances, bootstrapNode, workerNodes, 5, env.MockAdapter, env.EVMClient.GetChainID().String()) + require.NoError(t, err, "failed to setup forwarder jobs") + err = actions.WatchNewRound(1, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + answer, err := ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) + + err = actions.SetAllAdapterResponsesToTheSameValueLocal(10, ocrInstances, workerNodes, env.MockAdapter) + require.NoError(t, err) + err = actions.WatchNewRound(2, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest OCR answer") + require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) +} diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go new file mode 100644 index 00000000..4585cfc6 --- /dev/null +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -0,0 +1,118 @@ +package smoke + +import ( + "fmt" + "math/big" + "net/http" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestForwarderOCR2Basic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.ForwarderOcr2) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), + node.WithOCR2(), + node.WithP2Pv2(), + )). + WithForwarders(). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + workerNodeAddresses, err := actions.PluginNodeAddressesLocal(workerNodes) + require.NoError(t, err, "Retreiving on-chain wallet addresses for plugin nodes shouldn't fail") + + linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05)) + require.NoError(t, err, "Error funding Plugin nodes") + + operators, authorizedForwarders, _ := actions.DeployForwarderContracts( + t, env.ContractDeployer, linkTokenContract, env.EVMClient, len(workerNodes), + ) + + for i := range workerNodes { + actions.AcceptAuthorizedReceiversOperator(t, operators[i], authorizedForwarders[i], []common.Address{workerNodeAddresses[i]}, env.EVMClient, env.ContractLoader) + require.NoError(t, err, "Accepting Authorized Receivers on Operator shouldn't fail") + err = actions.TrackForwarderLocal(env.EVMClient, authorizedForwarders[i], workerNodes[i], l) + require.NoError(t, err, "failed to track forwarders") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + } + + // Gather transmitters + var transmitters []string + for _, forwarderCommonAddress := range authorizedForwarders { + transmitters = append(transmitters, forwarderCommonAddress.Hex()) + } + + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + ocrInstances, err := actions.DeployOCRv2Contracts(1, linkTokenContract, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) + require.NoError(t, err, "Error deploying OCRv2 contracts with forwarders") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + err = actions.CreateOCRv2JobsLocal(ocrInstances, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), true, false) + require.NoError(t, err, "Error creating OCRv2 jobs with forwarders") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) + require.NoError(t, err, "Error building OCRv2 config") + ocrv2Config.Transmitters = authorizedForwarders + + err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, ocrInstances) + require.NoError(t, err, "Error configuring OCRv2 aggregator contracts") + + err = actions.WatchNewOCR2Round(1, ocrInstances, env.EVMClient, time.Minute*10, l) + require.NoError(t, err) + + answer, err := ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Getting latest answer from OCRv2 contract shouldn't fail") + require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCRw contract to be 5 but got %d", answer.Int64()) + + for i := 2; i <= 3; i++ { + ocrRoundVal := (5 + i) % 10 + err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, ocrRoundVal) + require.NoError(t, err) + err = actions.WatchNewOCR2Round(int64(i), ocrInstances, env.EVMClient, time.Minute*10, l) + require.NoError(t, err) + + answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest OCRv2 answer") + require.Equal(t, int64(ocrRoundVal), answer.Int64(), fmt.Sprintf("Expected latest answer from OCRv2 contract to be %d but got %d", ocrRoundVal, answer.Int64())) + } +} diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go new file mode 100644 index 00000000..c697be5a --- /dev/null +++ b/integration-tests/smoke/keeper_test.go @@ -0,0 +1,1251 @@ +package smoke + +import ( + "fmt" + "math/big" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +const ( + keeperDefaultUpkeepGasLimit = uint32(2500000) + keeperDefaultLinkFunds = int64(9e18) + keeperDefaultUpkeepsToDeploy = 10 + numUpkeepsAllowedForStragglingTxs = 6 + keeperExpectedData = "abcdef" +) + +var ( + keeperDefaultRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } + lowBCPTRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(4), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + } + highBCPTRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10000), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + } +) + +func TestKeeperBasicSmoke(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)), + "Expected consumer counter to be greater than 10, but got %d", counter.Int64()) + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + return nil + }, "5m", "1s").Should(gomega.Succeed()) + + // Cancel all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := registry.CancelUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Could not cancel upkeep at index %d", i) + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for upkeeps to be cancelled") + + var countersAfterCancellation = make([]*big.Int, len(upkeepIDs)) + + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterCancellation[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Failed to retrieve consumer counter for upkeep at index %d", i) + l.Info().Int("Index", i).Int64("Upkeeps Performed", countersAfterCancellation[i].Int64()).Msg("Cancelled Upkeep") + } + + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + // Expect the counter to remain constant because the upkeep was cancelled, so it shouldn't increase anymore + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterCancellation[i].Int64()), + "Expected consumer counter to remain constant at %d, but got %d", + countersAfterCancellation[i].Int64(), latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperBlockCountPerTurn(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + highBCPTRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + keepersPerformed := make([]string, 0) + upkeepID := upkeepIDs[0] + + // Wait for upkeep to be performed twice by different keepers (buddies) + gom.Eventually(func(g gomega.Gomega) error { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + + latestKeeper := upkeepInfo.LastKeeper + l.Info().Str("keeper", latestKeeper).Msg("last keeper to perform upkeep") + g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") + g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + + l.Info().Str("keeper", latestKeeper).Msg("New keeper performed upkeep") + keepersPerformed = append(keepersPerformed, latestKeeper) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + gom.Eventually(func(g gomega.Gomega) error { + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + + latestKeeper := upkeepInfo.LastKeeper + g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") + g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + + l.Info().Str("Keeper", latestKeeper).Msg("New keeper performed upkeep") + keepersPerformed = append(keepersPerformed, latestKeeper) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + // Expect no new keepers to perform for a while + gom.Consistently(func(g gomega.Gomega) { + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + + latestKeeper := upkeepInfo.LastKeeper + g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") + g.Expect(latestKeeper).Should(gomega.BeElementOf(keepersPerformed), "Existing keepers should alternate turns within BCPT") + }, "1m", "1s").Should(gomega.Succeed()) + + // Now set BCPT to be low, so keepers change turn frequently + err = registry.SetConfig(lowBCPTRegistryConfig, contracts.OCRv2Config{}) + require.NoError(t, err, "Error setting registry config") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for set config tx") + + // Expect a new keeper to perform + gom.Eventually(func(g gomega.Gomega) error { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Num upkeeps performed") + + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + + latestKeeper := upkeepInfo.LastKeeper + l.Info().Str("keeper", latestKeeper).Msg("last keeper to perform upkeep") + g.Expect(latestKeeper).ShouldNot(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be non zero") + g.Expect(latestKeeper).ShouldNot(gomega.BeElementOf(keepersPerformed), "A new keeper node should perform this upkeep") + + l.Info().Str("keeper", latestKeeper).Msg("New keeper performed upkeep") + keepersPerformed = append(keepersPerformed, latestKeeper) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperSimulation(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumersPerformance, upkeepIDs := actions.DeployPerformanceKeeperContracts( + t, + registryVersion, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + &keeperDefaultRegistryConfig, + big.NewInt(keeperDefaultLinkFunds), + 10000, // How many blocks this upkeep will be eligible from first upkeep block + 5, // Interval of blocks that upkeeps are expected to be performed + 100000, // How much gas should be burned on checkUpkeep() calls + 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + consumerPerformance := consumersPerformance[0] + upkeepID := upkeepIDs[0] + + // Initially performGas is set high, so performUpkeep reverts and no upkeep should be performed + gom.Consistently(func(g gomega.Gomega) { + // Consumer count should remain at 0 + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + + // Not even reverted upkeeps should be performed. Last keeper for the upkeep should be 0 address + upkeepInfo, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Registry's getUpkeep shouldn't fail") + g.Expect(upkeepInfo.LastKeeper).Should(gomega.Equal(actions.ZeroAddress.String()), "Last keeper should be zero address") + }, "1m", "1s").Should(gomega.Succeed()) + + // Set performGas on consumer to be low, so that performUpkeep starts becoming successful + err = consumerPerformance.SetPerformGasToBurn(testcontext.Get(t), big.NewInt(100000)) + require.NoError(t, err, "Error setting PerformGasToBurn") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting to set PerformGasToBurn") + + // Upkeep should now start performing + gom.Eventually(func(g gomega.Gomega) error { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperCheckPerformGasLimit(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumersPerformance, upkeepIDs := actions.DeployPerformanceKeeperContracts( + t, + registryVersion, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + &keeperDefaultRegistryConfig, + big.NewInt(keeperDefaultLinkFunds), + 10000, // How many blocks this upkeep will be eligible from first upkeep block + 5, // Interval of blocks that upkeeps are expected to be performed + 100000, // How much gas should be burned on checkUpkeep() calls + 4000000, // How much gas should be burned on performUpkeep() calls. Initially set higher than defaultUpkeepGasLimit + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + consumerPerformance := consumersPerformance[0] + upkeepID := upkeepIDs[0] + + // Initially performGas is set higher than defaultUpkeepGasLimit, so no upkeep should be performed + gom.Consistently(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.Equal(int64(0)), + "Expected consumer counter to remain constant at %d, but got %d", 0, cnt.Int64(), + ) + }, "1m", "1s").Should(gomega.Succeed()) + + // Increase gas limit for the upkeep, higher than the performGasBurn + err = registry.SetUpkeepGasLimit(upkeepID, uint32(4500000)) + require.NoError(t, err, "Error setting Upkeep gas limit") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for SetUpkeepGasLimit tx") + + // Upkeep should now start performing + gom.Eventually(func(g gomega.Gomega) error { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", cnt.Int64(), + ) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + // Now increase the checkGasBurn on consumer, upkeep should stop performing + err = consumerPerformance.SetCheckGasToBurn(testcontext.Get(t), big.NewInt(3000000)) + require.NoError(t, err, "Error setting CheckGasToBurn") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for SetCheckGasToBurn tx") + + // Get existing performed count + existingCnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Error calling consumer's counter") + l.Info().Int64("Upkeep counter", existingCnt.Int64()).Msg("Check Gas Increased") + + // In most cases count should remain constant, but there might be a straggling perform tx which + // gets committed later. Since every keeper node cannot have more than 1 straggling tx, it + // is sufficient to check that the upkeep count does not increase by more than 6. + gom.Consistently(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(cnt.Int64()).Should( + gomega.BeNumerically("<=", existingCnt.Int64()+numUpkeepsAllowedForStragglingTxs), + "Expected consumer counter to remain constant at %d, but got %d", existingCnt.Int64(), cnt.Int64(), + ) + }, "3m", "1s").Should(gomega.Succeed()) + + existingCnt, err = consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + require.NoError(t, err, "Error calling consumer's counter") + existingCntInt := existingCnt.Int64() + l.Info().Int64("Upkeep counter", existingCntInt).Msg("Upkeep counter when consistently block finished") + + // Now increase checkGasLimit on registry + highCheckGasLimit := keeperDefaultRegistryConfig + highCheckGasLimit.CheckGasLimit = uint32(5000000) + err = registry.SetConfig(highCheckGasLimit, contracts.OCRv2Config{}) + require.NoError(t, err, "Error setting registry config") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for set config tx") + + // Upkeep should start performing again, and it should get regularly performed + gom.Eventually(func(g gomega.Gomega) { + cnt, err := consumerPerformance.GetUpkeepCount(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's Counter shouldn't fail") + g.Expect(cnt.Int64()).Should(gomega.BeNumerically(">", existingCntInt), + "Expected consumer counter to be greater than %d, but got %d", existingCntInt, cnt.Int64(), + ) + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperRegisterUpkeep(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, registrar, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + var initialCounters = make([]*big.Int, len(upkeepIDs)) + + // Observe that the upkeeps which are initially registered are performing and + // store the value of their initial counters in order to compare later on that the value increased. + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + initialCounters[i] = counter + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ + " for upkeep at index "+strconv.Itoa(i)) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) + l.Info(). + Int64("Upkeep counter", counter.Int64()). + Int("Upkeep ID", i). + Msg("Number of upkeeps performed") + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + newConsumers, _ := actions.RegisterNewUpkeeps(t, contractDeployer, chainClient, linkToken, + registry, registrar, keeperDefaultUpkeepGasLimit, 1) + + // We know that newConsumers has size 1, so we can just use the newly registered upkeep. + newUpkeep := newConsumers[0] + + // Test that the newly registered upkeep is also performing. + gom.Eventually(func(g gomega.Gomega) error { + counter, err := newUpkeep.Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling newly deployed upkeep's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + l.Info().Msg("Newly registered upkeeps performed " + strconv.Itoa(int(counter.Int64())) + " times") + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + currentCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + + l.Info(). + Int("Upkeep ID", i). + Int64("Upkeep counter", currentCounter.Int64()). + Int64("initial counter", initialCounters[i].Int64()). + Msg("Number of upkeeps performed") + + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), + "Expected counter to have increased from initial value of %s, but got %s", + initialCounters[i], currentCounter) + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperAddFunds(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(1), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + // Since the upkeep is currently underfunded, check that it doesn't get executed + gom.Consistently(func(g gomega.Gomega) { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected consumer counter to remain zero, but got %d", counter.Int64()) + }, "1m", "1s").Should(gomega.Succeed()) + + // Grant permission to the registry to fund the upkeep + err = linkToken.Approve(registry.Address(), big.NewInt(9e18)) + require.NoError(t, err, "Error approving permissions for registry") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Add funds to the upkeep whose ID we know from above + err = registry.AddUpkeepFunds(upkeepIDs[0], big.NewInt(9e18)) + require.NoError(t, err, "Error funding upkeep") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Now the new upkeep should be performing because we added enough funds + gom.Eventually(func(g gomega.Gomega) { + counter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected newly registered upkeep's counter to be greater than 0, but got %d", counter.Int64()) + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperRemove(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + var initialCounters = make([]*big.Int, len(upkeepIDs)) + // Make sure the upkeeps are running before we remove a keeper + gom.Eventually(func(g gomega.Gomega) error { + for upkeepID := 0; upkeepID < len(upkeepIDs); upkeepID++ { + counter, err := consumers[upkeepID].Counter(testcontext.Get(t)) + initialCounters[upkeepID] = counter + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ + " for upkeep with ID "+strconv.Itoa(upkeepID)) + g.Expect(counter.Cmp(big.NewInt(0)) == 1, "Expected consumer counter to be greater than 0, but got %s", counter) + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + keepers, err := registry.GetKeeperList(testcontext.Get(t)) + require.NoError(t, err, "Error getting list of Keepers") + + // Remove the first keeper from the list + require.GreaterOrEqual(t, len(keepers), 2, "Expected there to be at least 2 keepers") + newKeeperList := keepers[1:] + + // Construct the addresses of the payees required by the SetKeepers function + payees := make([]string, len(keepers)-1) + for i := 0; i < len(payees); i++ { + payees[i], err = pluginNodes[0].PrimaryEthAddress() + require.NoError(t, err, "Error building payee list") + } + + err = registry.SetKeepers(newKeeperList, payees, contracts.OCRv2Config{}) + require.NoError(t, err, "Error setting new list of Keepers") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + l.Info().Msg("Successfully removed keeper at address " + keepers[0] + " from the list of Keepers") + + // The upkeeps should still perform and their counters should have increased compared to the first check + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Cmp(initialCounters[i]) == 1, "Expected consumer counter to be greater "+ + "than initial counter which was %s, but got %s", initialCounters[i], counter) + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperPauseRegistry(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + // Observe that the upkeeps which are initially registered are performing + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d") + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + // Pause the registry + err = registry.Pause() + require.NoError(t, err, "Error pausing the registry") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + // Store how many times each upkeep performed once the registry was successfully paused + var countersAfterPause = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + countersAfterPause[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error retrieving consumer at index %d", i) + } + + // After we paused the registry, the counters of all the upkeeps should stay constant + // because they are no longer getting serviced + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error retrieving consumer contract at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.Equal(countersAfterPause[i].Int64()), + "Expected consumer counter to remain constant at %d, but got %d", + countersAfterPause[i].Int64(), latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + }) + } +} + +func TestKeeperMigrateRegistry(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + ethereum.RegistryVersion_1_2, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + // Deploy the second registry, second registrar, and the same number of upkeeps as the first one + secondRegistry, _, _, _ := actions.DeployKeeperContracts( + t, + ethereum.RegistryVersion_1_2, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + + // Set the jobs for the second registry + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, secondRegistry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + err = registry.SetMigrationPermissions(common.HexToAddress(secondRegistry.Address()), 3) + require.NoError(t, err, "Error setting bidirectional permissions for first registry") + err = secondRegistry.SetMigrationPermissions(common.HexToAddress(registry.Address()), 3) + require.NoError(t, err, "Error setting bidirectional permissions for second registry") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting to set permissions") + + // Check that the first upkeep from the first registry is performing (before being migrated) + gom.Eventually(func(g gomega.Gomega) error { + counterBeforeMigration, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(counterBeforeMigration.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %s", counterBeforeMigration) + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + // Migrate the upkeep with index 0 from the first to the second registry + err = registry.Migrate([]*big.Int{upkeepIDs[0]}, common.HexToAddress(secondRegistry.Address())) + require.NoError(t, err, "Error migrating first upkeep") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for migration") + + // Pause the first registry, in that way we make sure that the upkeep is being performed by the second one + err = registry.Pause() + require.NoError(t, err, "Error pausing registry") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting to pause first registry") + + counterAfterMigration, err := consumers[0].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error calling consumer's counter") + + // Check that once we migrated the upkeep, the counter has increased + gom.Eventually(func(g gomega.Gomega) error { + currentCounter, err := consumers[0].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Calling consumer's counter shouldn't fail") + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", counterAfterMigration.Int64()), + "Expected counter to have increased, but stayed constant at %s", counterAfterMigration) + return nil + }, "1m", "1s").Should(gomega.Succeed()) +} + +func TestKeeperNodeDown(t *testing.T) { + t.Parallel() + registryVersions := []ethereum.KeeperRegistryVersion{ + ethereum.RegistryVersion_1_1, + ethereum.RegistryVersion_1_2, + ethereum.RegistryVersion_1_3, + } + + for _, rv := range registryVersions { + registryVersion := rv + t.Run(fmt.Sprintf("registry_1_%d", registryVersion), func(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + lowBCPTRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + jobs, err := actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + var initialCounters = make([]*big.Int, len(upkeepIDs)) + + // Watch upkeeps being performed and store their counters in order to compare them later in the test + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + initialCounters[i] = counter + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(0)), + "Expected consumer counter to be greater than 0, but got %d", counter.Int64()) + } + return nil + }, "1m", "1s").Should(gomega.Succeed()) + + // Take down half of the Keeper nodes by deleting the Keeper job registered above (after registry deployment) + firstHalfToTakeDown := pluginNodes[:len(pluginNodes)/2+1] + for i, nodeToTakeDown := range firstHalfToTakeDown { + err = nodeToTakeDown.MustDeleteJob(jobs[0].Data.ID) + require.NoError(t, err, "Error deleting job from node %d", i) + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + } + l.Info().Msg("Successfully managed to take down the first half of the nodes") + + // Assert that upkeeps are still performed and their counters have increased + gom.Eventually(func(g gomega.Gomega) error { + for i := 0; i < len(upkeepIDs); i++ { + currentCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(currentCounter.Int64()).Should(gomega.BeNumerically(">", initialCounters[i].Int64()), + "Expected counter to have increased from initial value of %s, but got %s", + initialCounters[i], currentCounter) + } + return nil + }, "3m", "1s").Should(gomega.Succeed()) + + // Take down the other half of the Keeper nodes + nodesAndJobs := []nodeAndJob{} + for i, n := range pluginNodes { + nodesAndJobs = append(nodesAndJobs, nodeAndJob{node: n, job: jobs[i]}) + } + secondHalfToTakeDown := nodesAndJobs[len(nodesAndJobs)/2+1:] + for i, nodeToTakeDown := range secondHalfToTakeDown { + err = nodeToTakeDown.node.MustDeleteJob(nodeToTakeDown.job.Data.ID) + require.NoError(t, err, "Error deleting job from node %d", i) + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + } + l.Info().Msg("Successfully managed to take down the second half of the nodes") + + // See how many times each upkeep was executed + var countersAfterNoMoreNodes = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + countersAfterNoMoreNodes[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error retrieving consumer counter %d", i) + l.Info(). + Int("Index", i). + Int64("Upkeeps", countersAfterNoMoreNodes[i].Int64()). + Msg("Upkeeps Performed") + } + + // Once all the nodes are taken down, there might be some straggling transactions which went through before + // all the nodes were taken down. Every keeper node can have at most 1 straggling transaction per upkeep, + // so a +6 on the upper limit side should be sufficient. + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", + countersAfterNoMoreNodes[i].Int64()+numUpkeepsAllowedForStragglingTxs, + ), + "Expected consumer counter to not have increased more than %d, but got %d", + countersAfterNoMoreNodes[i].Int64()+numUpkeepsAllowedForStragglingTxs, latestCounter.Int64()) + } + }, "3m", "1s").Should(gomega.Succeed()) + }) + } +} + +type nodeAndJob struct { + node *client.PluginClient + job *client.Job +} + +func TestKeeperPauseUnPauseUpkeep(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + ethereum.RegistryVersion_1_3, + lowBCPTRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), + "Expected consumer counter to be greater than 5, but got %d", counter.Int64()) + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + return nil + }, "3m", "1s").Should(gomega.Succeed()) + + // pause all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := registry.PauseUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Error pausing upkeep at index %d", i) + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting to pause upkeeps") + + var countersAfterPause = make([]*big.Int, len(upkeepIDs)) + for i := 0; i < len(upkeepIDs); i++ { + // Obtain the amount of times the upkeep has been executed so far + countersAfterPause[i], err = consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error retrieving upkeep count at index %d", i) + l.Info(). + Int("Index", i). + Int64("Upkeeps", countersAfterPause[i].Int64()). + Msg("Paused Upkeep") + } + + gom.Consistently(func(g gomega.Gomega) { + for i := 0; i < len(upkeepIDs); i++ { + // In most cases counters should remain constant, but there might be a straggling perform tx which + // gets committed later. Since every keeper node cannot have more than 1 straggling tx, it + // is sufficient to check that the upkeep count does not increase by more than 6. + latestCounter, err := consumers[i].Counter(testcontext.Get(t)) + require.NoError(t, err, "Error retrieving counter at index %d", i) + g.Expect(latestCounter.Int64()).Should(gomega.BeNumerically("<=", countersAfterPause[i].Int64()+numUpkeepsAllowedForStragglingTxs), + "Expected consumer counter not have increased more than %d, but got %d", + countersAfterPause[i].Int64()+numUpkeepsAllowedForStragglingTxs, latestCounter.Int64()) + } + }, "1m", "1s").Should(gomega.Succeed()) + + // unpause all the registered upkeeps via the registry + for i := 0; i < len(upkeepIDs); i++ { + err := registry.UnpauseUpkeep(upkeepIDs[i]) + require.NoError(t, err, "Error un-pausing upkeep at index %d", i) + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting to un-pause upkeeps") + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + numbers of performing before pause + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter"+ + " for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)+countersAfterPause[i].Int64()), + "Expected consumer counter to be greater than %d, but got %d", int64(5)+countersAfterPause[i].Int64(), counter.Int64()) + l.Info().Int64("Upkeeps", counter.Int64()).Msg("Upkeeps Performed") + } + return nil + }, "3m", "1s").Should(gomega.Succeed()) +} + +func TestKeeperUpdateCheckData(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, performDataChecker, upkeepIDs := actions.DeployPerformDataCheckerContracts( + t, + ethereum.RegistryVersion_1_3, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + &lowBCPTRegistryConfig, + big.NewInt(keeperDefaultLinkFunds), + []byte(keeperExpectedData), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + gom.Consistently(func(g gomega.Gomega) { + // expect the counter to remain 0 because perform data does not match + for i := 0; i < len(upkeepIDs); i++ { + counter, err := performDataChecker[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.Equal(int64(0)), + "Expected perform data checker counter to be 0, but got %d", counter.Int64()) + l.Info().Int64("Upkeep perform data checker", counter.Int64()).Msg("Number of upkeeps performed") + } + }, "2m", "1s").Should(gomega.Succeed()) + + for i := 0; i < len(upkeepIDs); i++ { + err = registry.UpdateCheckData(upkeepIDs[i], []byte(keeperExpectedData)) + require.NoError(t, err, "Error updating check data at index %d", i) + } + + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error waiting for updated check data") + + // retrieve new check data for all upkeeps + for i := 0; i < len(upkeepIDs); i++ { + upkeep, err := registry.GetUpkeepInfo(testcontext.Get(t), upkeepIDs[i]) + require.NoError(t, err, "Error getting upkeep info from index %d", i) + require.Equal(t, []byte(keeperExpectedData), upkeep.CheckData, "Check data not as expected") + } + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analysing their counters and checking they are greater than 5 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := performDataChecker[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve perform data checker counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(5)), + "Expected perform data checker counter to be greater than 5, but got %d", counter.Int64()) + l.Info().Int64("Upkeep perform data checker", counter.Int64()).Msg("Number of upkeeps performed") + } + return nil + }, "3m", "1s").Should(gomega.Succeed()) +} + +func setupKeeperTest(t *testing.T, config *tc.TestConfig) ( + blockchain.EVMClient, + []*client.PluginClient, + contracts.ContractDeployer, + contracts.LinkToken, + *test_env.CLClusterTestEnv, +) { + clNodeConfig := node.NewConfig(node.NewBaseConfig(), node.WithP2Pv2()) + turnLookBack := int64(0) + syncInterval := *commonconfig.MustNewDuration(5 * time.Second) + performGasOverhead := uint32(150000) + clNodeConfig.Keeper.TurnLookBack = &turnLookBack + clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval + clNodeConfig.Keeper.Registry.PerformGasOverhead = &performGasOverhead + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(config). + WithGeth(). + WithCLNodes(5). + WithCLNodeConfig(clNodeConfig). + WithFunding(big.NewFloat(.5)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "Error deploying test environment") + + env.ParallelTransactions(true) + + linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + return env.EVMClient, env.ClCluster.NodeAPIs(), env.ContractDeployer, linkTokenContract, env +} + +func TestKeeperJobReplacement(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + registryVersion := ethereum.RegistryVersion_1_3 + config, err := tc.GetConfig("Smoke", tc.Keeper) + if err != nil { + t.Fatal(err) + } + + chainClient, pluginNodes, contractDeployer, linkToken, _ := setupKeeperTest(t, &config) + registry, _, consumers, upkeepIDs := actions.DeployKeeperContracts( + t, + registryVersion, + keeperDefaultRegistryConfig, + keeperDefaultUpkeepsToDeploy, + keeperDefaultUpkeepGasLimit, + linkToken, + contractDeployer, + chainClient, + big.NewInt(keeperDefaultLinkFunds), + ) + gom := gomega.NewGomegaWithT(t) + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)), + "Expected consumer counter to be greater than 10, but got %d", counter.Int64()) + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + return nil + }, "5m", "1s").Should(gomega.Succeed()) + + for _, n := range pluginNodes { + jobs, _, err := n.ReadJobs() + require.NoError(t, err) + for _, maps := range jobs.Data { + _, ok := maps["id"] + require.Equal(t, true, ok) + id := maps["id"].(string) + _, err := n.DeleteJob(id) + require.NoError(t, err) + } + } + + _, err = actions.CreateKeeperJobsLocal(l, pluginNodes, registry, contracts.OCRv2Config{}, chainClient.GetChainID().String()) + require.NoError(t, err, "Error creating keeper jobs") + err = chainClient.WaitForEvents() + require.NoError(t, err, "Error creating keeper jobs") + + gom.Eventually(func(g gomega.Gomega) error { + // Check if the upkeeps are performing multiple times by analyzing their counters and checking they are greater than 10 + for i := 0; i < len(upkeepIDs); i++ { + counter, err := consumers[i].Counter(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Failed to retrieve consumer counter for upkeep at index %d", i) + g.Expect(counter.Int64()).Should(gomega.BeNumerically(">", int64(10)), + "Expected consumer counter to be greater than 10, but got %d", counter.Int64()) + l.Info().Int64("Upkeep counter", counter.Int64()).Msg("Number of upkeeps performed") + } + return nil + }, "5m", "1s").Should(gomega.Succeed()) +} diff --git a/integration-tests/smoke/keeper_test.go_test_list.json b/integration-tests/smoke/keeper_test.go_test_list.json new file mode 100644 index 00000000..b2f4aa00 --- /dev/null +++ b/integration-tests/smoke/keeper_test.go_test_list.json @@ -0,0 +1,61 @@ +{ + "tests": [ + { + "name": "TestKeeperBasicSmoke", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperBlockCountPerTurn", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperSimulation", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestKeeperCheckPerformGasLimit", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperRegisterUpkeep", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperAddFunds", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperRemove", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperPauseRegistry", + "label": "ubuntu-latest", + "nodes": 2 + }, + { + "name": "TestKeeperMigrateRegistry" + }, + { + "name": "TestKeeperNodeDown", + "label": "ubuntu-latest", + "nodes": 3 + }, + { + "name": "TestKeeperPauseUnPauseUpkeep" + }, + { + "name": "TestKeeperUpdateCheckData" + }, + { + "name": "TestKeeperJobReplacement" + } + ] +} \ No newline at end of file diff --git a/integration-tests/smoke/log_poller_test.go b/integration-tests/smoke/log_poller_test.go new file mode 100644 index 00000000..a6889bbd --- /dev/null +++ b/integration-tests/smoke/log_poller_test.go @@ -0,0 +1,395 @@ +package smoke + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/onsi/gomega" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + lp_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/log_poller" + logpoller "github.com/goplugin/pluginv3.0/integration-tests/universal/log_poller" + + core_logger "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +// consistency test with no network disruptions with approximate emission of 1500-1600 logs per second for ~110-120 seconds +// 6 filters are registered +func TestLogPollerFewFiltersFixedDepth(t *testing.T) { + executeBasicLogPollerTest(t) +} + +func TestLogPollerFewFiltersFinalityTag(t *testing.T) { + executeBasicLogPollerTest(t) +} + +// consistency test with no network disruptions with approximate emission of 1000-1100 logs per second for ~110-120 seconds +// 900 filters are registered +func TestLogPollerManyFiltersFixedDepth(t *testing.T) { + t.Skip("Execute manually, when needed as it runs for a long time") + executeBasicLogPollerTest(t) +} + +func TestLogPollerManyFiltersFinalityTag(t *testing.T) { + t.Skip("Execute manually, when needed as it runs for a long time") + executeBasicLogPollerTest(t) +} + +// consistency test that introduces random distruptions by pausing either Plugin or Postgres containers for random interval of 5-20 seconds +// with approximate emission of 520-550 logs per second for ~110 seconds +// 6 filters are registered +func TestLogPollerWithChaosFixedDepth(t *testing.T) { + executeBasicLogPollerTest(t) +} + +func TestLogPollerWithChaosFinalityTag(t *testing.T) { + executeBasicLogPollerTest(t) +} + +func TestLogPollerWithChaosPostgresFixedDepth(t *testing.T) { + executeBasicLogPollerTest(t) +} + +func TestLogPollerWithChaosPostgresFinalityTag(t *testing.T) { + executeBasicLogPollerTest(t) +} + +// consistency test that registers filters after events were emitted and then triggers replay via API +// unfortunately there is no way to make sure that logs that are indexed are only picked up by replay +// and not by backup poller +// with approximate emission of 24 logs per second for ~110 seconds +// 6 filters are registered +func TestLogPollerReplayFixedDepth(t *testing.T) { + executeLogPollerReplay(t, "5m") +} + +func TestLogPollerReplayFinalityTag(t *testing.T) { + executeLogPollerReplay(t, "5m") +} + +// HELPER FUNCTIONS +func executeBasicLogPollerTest(t *testing.T) { + testConfig, err := tc.GetConfig(t.Name(), tc.LogPoller) + require.NoError(t, err, "Error getting config") + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg := testConfig.LogPoller + cfg.General.EventsToEmit = eventsToEmit + + l := logging.GetTestLogger(t) + coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯ + + lpTestEnv := prepareEnvironment(l, t, &testConfig) + testEnv := lpTestEnv.testEnv + + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) + // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) + err = logpoller.RegisterFiltersAndAssertUniquness(l, lpTestEnv.registry, lpTestEnv.upkeepIDs, lpTestEnv.logEmitters, cfg, lpTestEnv.upKeepsNeeded) + require.NoError(t, err, "Error registering filters") + + l.Info().Msg("No duplicate filters found. OK!") + + err = testEnv.EVMClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + expectedFilters := logpoller.GetExpectedFilters(lpTestEnv.logEmitters, cfg) + waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l, coreLogger, t, testEnv, expectedFilters) + + // Save block number before starting to emit events, so that we can later use it when querying logs + sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest block number") + startBlock := int64(sb) + + l.Info().Int64("Starting Block", startBlock).Msg("STARTING EVENT EMISSION") + startTime := time.Now() + + // Start chaos experimnents by randomly pausing random containers (Plugin nodes or their DBs) + chaosDoneCh := make(chan error, 1) + go func() { + logpoller.ExecuteChaosExperiment(l, testEnv, cfg, chaosDoneCh) + }() + + totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, lpTestEnv.logEmitters) + endTime := time.Now() + require.NoError(t, err, "Error executing event generator") + + expectedLogsEmitted := logpoller.GetExpectedLogCount(cfg) + duration := int(endTime.Sub(startTime).Seconds()) + + eb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest block number") + + l.Info(). + Int("Total logs emitted", totalLogsEmitted). + Uint64("Probable last block with logs", eb). + Int64("Expected total logs emitted", expectedLogsEmitted). + Str("Duration", fmt.Sprintf("%d sec", duration)). + Str("LPS", fmt.Sprintf("~%d/sec", totalLogsEmitted/duration)). + Msg("FINISHED EVENT EMISSION") + + l.Info().Msg("Waiting before proceeding with test until all chaos experiments finish") + chaosError := <-chaosDoneCh + require.NoError(t, chaosError, "Error encountered during chaos experiment") + + // use ridciuously high end block so that we don't have to find out the block number of the last block in which logs were emitted + // as that's not trivial to do (i.e. just because chain was at block X when log emission ended it doesn't mean all events made it to that block) + endBlock := int64(eb) + 10000 + + // logCountWaitDuration, err := time.ParseDuration("5m") + // require.NoError(t, err, "Error parsing log count wait duration") + allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv) + require.NoError(t, err, "Error checking if CL nodes have expected log count") + + conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l, coreLogger, t, allNodesLogCountMatches, lpTestEnv, cfg, startBlock, endBlock, "5m") +} + +func executeLogPollerReplay(t *testing.T, consistencyTimeout string) { + testConfig, err := tc.GetConfig(t.Name(), tc.LogPoller) + require.NoError(t, err, "Error getting config") + + eventsToEmit := []abi.Event{} + for _, event := range logpoller.EmitterABI.Events { + eventsToEmit = append(eventsToEmit, event) + } + + cfg := testConfig.LogPoller + cfg.General.EventsToEmit = eventsToEmit + + l := logging.GetTestLogger(t) + coreLogger := core_logger.TestLogger(t) //needed by ORM ¯\_(ツ)_/¯ + + lpTestEnv := prepareEnvironment(l, t, &testConfig) + testEnv := lpTestEnv.testEnv + + // Save block number before starting to emit events, so that we can later use it when querying logs + sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest block number") + startBlock := int64(sb) + + l.Info().Int64("Starting Block", startBlock).Msg("STARTING EVENT EMISSION") + startTime := time.Now() + totalLogsEmitted, err := logpoller.ExecuteGenerator(t, cfg, lpTestEnv.logEmitters) + endTime := time.Now() + require.NoError(t, err, "Error executing event generator") + expectedLogsEmitted := logpoller.GetExpectedLogCount(cfg) + duration := int(endTime.Sub(startTime).Seconds()) + + // Save block number after finishing to emit events, so that we can later use it when querying logs + eb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest block number") + + endBlock, err := logpoller.GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg) + require.NoError(t, err, "Error getting end block to wait for") + + l.Info().Int64("Ending Block", endBlock).Int("Total logs emitted", totalLogsEmitted).Int64("Expected total logs emitted", expectedLogsEmitted).Str("Duration", fmt.Sprintf("%d sec", duration)).Str("LPS", fmt.Sprintf("%d/sec", totalLogsEmitted/duration)).Msg("FINISHED EVENT EMISSION") + + // Lets make sure no logs are in DB yet + expectedFilters := logpoller.GetExpectedFilters(lpTestEnv.logEmitters, cfg) + logCountMatches, err := logpoller.ClNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), 0, expectedFilters, l, coreLogger, testEnv.ClCluster) + require.NoError(t, err, "Error checking if CL nodes have expected log count") + require.True(t, logCountMatches, "Some CL nodes already had logs in DB") + l.Info().Msg("No logs were saved by CL nodes yet, as expected. Proceeding.") + + // Register log triggered upkeep for each combination of log emitter contract and event signature (topic) + // We need to register a separate upkeep for each event signature, because log trigger doesn't support multiple topics (even if log poller does) + err = logpoller.RegisterFiltersAndAssertUniquness(l, lpTestEnv.registry, lpTestEnv.upkeepIDs, lpTestEnv.logEmitters, cfg, lpTestEnv.upKeepsNeeded) + require.NoError(t, err, "Error registering filters") + + err = testEnv.EVMClient.WaitForEvents() + require.NoError(t, err, "Error encountered when waiting for setting trigger config for upkeeps") + + waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l, coreLogger, t, testEnv, expectedFilters) + + blockFinalisationWaitDuration := "5m" + l.Warn().Str("Duration", blockFinalisationWaitDuration).Msg("Waiting for all CL nodes to have end block finalised") + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + hasFinalised, err := logpoller.LogPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster) + if err != nil { + l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...") + } + g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block") + }, blockFinalisationWaitDuration, "10s").Should(gomega.Succeed()) + + // Trigger replay + l.Info().Msg("Triggering log poller's replay") + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + response, _, err := testEnv.ClCluster.Nodes[i].API.ReplayLogPollerFromBlock(startBlock, testEnv.EVMClient.GetChainID().Int64()) + require.NoError(t, err, "Error triggering log poller's replay on node %s", nodeName) + require.Equal(t, "Replay started", response.Data.Attributes.Message, "Unexpected response message from log poller's replay") + } + + // so that we don't have to look for block number of the last block in which logs were emitted as that's not trivial to do + endBlock = endBlock + 10000 + l.Warn().Str("Duration", consistencyTimeout).Msg("Waiting for replay logs to be processed by all nodes") + + // logCountWaitDuration, err := time.ParseDuration("5m") + allNodesLogCountMatches, err := logpoller.FluentlyCheckIfAllNodesHaveLogCount("5m", startBlock, endBlock, totalLogsEmitted, expectedFilters, l, coreLogger, testEnv) + require.NoError(t, err, "Error checking if CL nodes have expected log count") + + conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l, coreLogger, t, allNodesLogCountMatches, lpTestEnv, cfg, startBlock, endBlock, "5m") +} + +type logPollerEnvironment struct { + logEmitters []*contracts.LogEmitter + testEnv *test_env.CLClusterTestEnv + registry contracts.KeeperRegistry + upkeepIDs []*big.Int + upKeepsNeeded int +} + +// prepareEnvironment prepares environment for log poller tests by starting DON, private Ethereum network, +// deploying registry and log emitter contracts and registering log triggered upkeeps +func prepareEnvironment(l zerolog.Logger, t *testing.T, testConfig *tc.TestConfig) logPollerEnvironment { + cfg := testConfig.LogPoller + if cfg.General.EventsToEmit == nil || len(cfg.General.EventsToEmit) == 0 { + l.Warn().Msg("No events to emit specified, using all events from log emitter contract") + for _, event := range logpoller.EmitterABI.Events { + cfg.General.EventsToEmit = append(cfg.General.EventsToEmit, event) + } + } + + l.Info().Msg("Starting basic log poller test") + + var ( + err error + upKeepsNeeded = *cfg.General.Contracts * len(cfg.General.EventsToEmit) + ) + + chainClient, _, contractDeployer, linkToken, registry, registrar, testEnv := logpoller.SetupLogPollerTestDocker( + t, + ethereum.RegistryVersion_2_1, + logpoller.DefaultOCRRegistryConfig, + upKeepsNeeded, + time.Duration(500*time.Millisecond), + *cfg.General.UseFinalityTag, + testConfig, + ) + + _, upkeepIDs := actions.DeployConsumers( + t, + registry, + registrar, + linkToken, + contractDeployer, + chainClient, + upKeepsNeeded, + big.NewInt(int64(9e18)), + uint32(2500000), + true, + false, + ) + + err = logpoller.AssertUpkeepIdsUniqueness(upkeepIDs) + require.NoError(t, err, "Error asserting upkeep ids uniqueness") + l.Info().Msg("No duplicate upkeep IDs found. OK!") + + // Deploy Log Emitter contracts + logEmitters := logpoller.UploadLogEmitterContractsAndWaitForFinalisation(l, t, testEnv, cfg) + err = logpoller.AssertContractAddressUniquneness(logEmitters) + require.NoError(t, err, "Error asserting contract addresses uniqueness") + l.Info().Msg("No duplicate contract addresses found. OK!") + + return logPollerEnvironment{ + logEmitters: logEmitters, + registry: registry, + upkeepIDs: upkeepIDs, + upKeepsNeeded: upKeepsNeeded, + testEnv: testEnv, + } +} + +// waitForAllNodesToHaveExpectedFiltersRegisteredOrFail waits until all nodes have expected filters registered until timeout +func waitForAllNodesToHaveExpectedFiltersRegisteredOrFail(l zerolog.Logger, coreLogger core_logger.SugaredLogger, t *testing.T, testEnv *test_env.CLClusterTestEnv, expectedFilters []logpoller.ExpectedFilter) { + // Make sure that all nodes have expected filters registered before starting to emit events + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + hasFilters := false + for i := 1; i < len(testEnv.ClCluster.Nodes); i++ { + nodeName := testEnv.ClCluster.Nodes[i].ContainerName + l.Info(). + Str("Node name", nodeName). + Msg("Fetching filters from log poller's DB") + var message string + var err error + + hasFilters, message, err = logpoller.NodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb) + if !hasFilters || err != nil { + l.Warn(). + Str("Details", message). + Msg("Some filters were missing, but we will retry") + break + } + } + g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB") + }, "5m", "10s").Should(gomega.Succeed()) + + l.Info(). + Msg("All nodes have expected filters registered") + l.Info(). + Int("Count", len(expectedFilters)). + Msg("Expected filters count") +} + +// conditionallyWaitUntilNodesHaveTheSameLogsAsEvm checks whether all CL nodes have the same number of logs as EVM node +// if not, then it prints missing logs and wait for some time and checks again +func conditionallyWaitUntilNodesHaveTheSameLogsAsEvm(l zerolog.Logger, coreLogger core_logger.SugaredLogger, t *testing.T, allNodesLogCountMatches bool, lpTestEnv logPollerEnvironment, cfg *lp_config.Config, startBlock, endBlock int64, waitDuration string) { + logCountWaitDuration, err := time.ParseDuration(waitDuration) + require.NoError(t, err, "Error parsing log count wait duration") + + allNodesHaveAllExpectedLogs := false + if !allNodesLogCountMatches { + missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, lpTestEnv.testEnv.EVMClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, cfg) + if err == nil { + if !missingLogs.IsEmpty() { + logpoller.PrintMissingLogsInfo(missingLogs, l, cfg) + } else { + allNodesHaveAllExpectedLogs = true + l.Info().Msg("All CL nodes have all the logs that EVM node has") + } + } + } + + require.True(t, allNodesLogCountMatches, "Not all CL nodes had expected log count afer %s", logCountWaitDuration) + + // Wait until all CL nodes have exactly the same logs emitted by test contracts as the EVM node has + // but only in the rare case that first attempt to do it failed (basically here want to know not only + // if log count matches, but whether details of every single log match) + if !allNodesHaveAllExpectedLogs { + logConsistencyWaitDuration := "5m" + l.Info(). + Str("Duration", logConsistencyWaitDuration). + Msg("Waiting for CL nodes to have all the logs that EVM node has") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + missingLogs, err := logpoller.GetMissingLogs(startBlock, endBlock, lpTestEnv.logEmitters, lpTestEnv.testEnv.EVMClient, lpTestEnv.testEnv.ClCluster, l, coreLogger, cfg) + if err != nil { + l.Warn(). + Err(err). + Msg("Error getting missing logs. Retrying...") + } + + if !missingLogs.IsEmpty() { + logpoller.PrintMissingLogsInfo(missingLogs, l, cfg) + } + g.Expect(missingLogs.IsEmpty()).To(gomega.BeTrue(), "Some CL nodes were missing logs") + }, logConsistencyWaitDuration, "10s").Should(gomega.Succeed()) + } +} diff --git a/integration-tests/smoke/log_poller_test.go_test_list.json b/integration-tests/smoke/log_poller_test.go_test_list.json new file mode 100644 index 00000000..2159654e --- /dev/null +++ b/integration-tests/smoke/log_poller_test.go_test_list.json @@ -0,0 +1,44 @@ +{ + "tests": [ + { + "name": "TestLogPollerFewFiltersFixedDepth", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerFewFiltersFinalityTag", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerWithChaosFixedDepth", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerWithChaosFinalityTag", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerWithChaosPostgresFinalityTag", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerWithChaosPostgresFixedDepth", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerReplayFixedDepth", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerReplayFinalityTag", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerManyFiltersFixedDepth", + "label": "ubuntu-latest" + }, + { + "name": "TestLogPollerManyFiltersFinalityTag", + "label": "ubuntu-latest" + } + ] +} \ No newline at end of file diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go new file mode 100644 index 00000000..28172754 --- /dev/null +++ b/integration-tests/smoke/ocr2_test.go @@ -0,0 +1,311 @@ +package smoke + +import ( + "fmt" + "math/big" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" +) + +// Tests a basic OCRv2 median feed +func TestOCRv2Basic(t *testing.T) { + t.Parallel() + + noMedianPlugin := map[string]string{string(env.MedianPlugin.Cmd): ""} + medianPlugin := map[string]string{string(env.MedianPlugin.Cmd): "plugin-feeds"} + for _, test := range []struct { + name string + env map[string]string + chainReaderAndCodec bool + }{ + {"legacy", noMedianPlugin, false}, + {"legacy-chain-reader", noMedianPlugin, true}, + {"plugins", medianPlugin, false}, + {"plugins-chain-reader", medianPlugin, true}, + } { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.OCR2) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithMockAdapter(). + WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), + node.WithOCR2(), + node.WithP2Pv2(), + node.WithTracing(), + )). + WithCLNodeOptions(test_env.WithNodeEnvVars(test.env)). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + linkToken, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05)) + require.NoError(t, err, "Error funding Plugin nodes") + + // Gather transmitters + var transmitters []string + for _, node := range workerNodes { + addr, err := node.PrimaryEthAddress() + if err != nil { + require.NoError(t, fmt.Errorf("error getting node's primary ETH address: %w", err)) + } + transmitters = append(transmitters, addr) + } + + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) + require.NoError(t, err, "Error deploying OCRv2 aggregator contracts") + + err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), false, test.chainReaderAndCodec) + require.NoError(t, err, "Error creating OCRv2 jobs") + + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) + require.NoError(t, err, "Error building OCRv2 config") + + err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts) + require.NoError(t, err, "Error configuring OCRv2 aggregator contracts") + + err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err, "Error watching for new OCR2 round") + roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 5 but got %d", + roundData.Answer.Int64(), + ) + + err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, 10) + require.NoError(t, err) + err = actions.WatchNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err) + + roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(2)) + require.NoError(t, err, "Error getting latest OCR answer") + require.Equal(t, int64(10), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 10 but got %d", + roundData.Answer.Int64(), + ) + }) + } +} + +// Tests that just calling requestNewRound() will properly induce more rounds +func TestOCRv2Request(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.ForwarderOcr) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithMockAdapter(). + WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), + node.WithOCR2(), + node.WithP2Pv2(), + node.WithTracing(), + )). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + linkToken, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05)) + require.NoError(t, err, "Error funding Plugin nodes") + + // Gather transmitters + var transmitters []string + for _, node := range workerNodes { + addr, err := node.PrimaryEthAddress() + if err != nil { + require.NoError(t, fmt.Errorf("error getting node's primary ETH address: %w", err)) + } + transmitters = append(transmitters, addr) + } + + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) + require.NoError(t, err, "Error deploying OCRv2 aggregator contracts") + + err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), false, false) + require.NoError(t, err, "Error creating OCRv2 jobs") + + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) + require.NoError(t, err, "Error building OCRv2 config") + + err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts) + require.NoError(t, err, "Error configuring OCRv2 aggregator contracts") + + err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err, "Error watching for new OCR2 round") + roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 5 but got %d", + roundData.Answer.Int64(), + ) + + // Keep the mockserver value the same and continually request new rounds + for round := 2; round <= 4; round++ { + err = actions.StartNewOCR2Round(int64(round), aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err, "Error starting new OCR2 round") + roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(int64(round))) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), roundData.Answer.Int64(), + "Expected round %d answer from OCR contract to be 5 but got %d", + round, + roundData.Answer.Int64(), + ) + } +} + +func TestOCRv2JobReplacement(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.OCR2) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(), + node.WithOCR2(), + node.WithP2Pv2(), + node.WithTracing(), + )). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + linkToken, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + err = actions.FundPluginNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05)) + require.NoError(t, err, "Error funding Plugin nodes") + + // Gather transmitters + var transmitters []string + for _, node := range workerNodes { + addr, err := node.PrimaryEthAddress() + if err != nil { + require.NoError(t, fmt.Errorf("error getting node's primary ETH address: %w", err)) + } + transmitters = append(transmitters, addr) + } + + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions) + require.NoError(t, err, "Error deploying OCRv2 aggregator contracts") + + err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), false, false) + require.NoError(t, err, "Error creating OCRv2 jobs") + + ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions) + require.NoError(t, err, "Error building OCRv2 config") + + err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts) + require.NoError(t, err, "Error configuring OCRv2 aggregator contracts") + + err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err, "Error watching for new OCR2 round") + roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 5 but got %d", + roundData.Answer.Int64(), + ) + + err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, 10) + require.NoError(t, err) + err = actions.WatchNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l) + require.NoError(t, err) + + roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(2)) + require.NoError(t, err, "Error getting latest OCR answer") + require.Equal(t, int64(10), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 10 but got %d", + roundData.Answer.Int64(), + ) + + err = actions.DeleteJobs(nodeClients) + require.NoError(t, err) + + err = actions.DeleteBridges(nodeClients) + require.NoError(t, err) + + err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 15, env.EVMClient.GetChainID().Uint64(), false, false) + require.NoError(t, err, "Error creating OCRv2 jobs") + + err = actions.WatchNewOCR2Round(3, aggregatorContracts, env.EVMClient, time.Minute*3, l) + require.NoError(t, err, "Error watching for new OCR2 round") + roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(3)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(15), roundData.Answer.Int64(), + "Expected latest answer from OCR contract to be 15 but got %d", + roundData.Answer.Int64(), + ) +} diff --git a/integration-tests/smoke/ocr2vrf_test.go b/integration-tests/smoke/ocr2vrf_test.go new file mode 100644 index 00000000..01b807d7 --- /dev/null +++ b/integration-tests/smoke/ocr2vrf_test.go @@ -0,0 +1,211 @@ +package smoke + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + eth "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/ocr2vrf_actions" + "github.com/goplugin/pluginv3.0/integration-tests/actions/ocr2vrf_actions/ocr2vrf_constants" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/config" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +var ocr2vrfSmokeConfig *testconfig.TestConfig + +func TestOCR2VRFRedeemModel(t *testing.T) { + t.Parallel() + t.Skip("VRFv3 is on pause, skipping") + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.OCR2) + if err != nil { + t.Fatal(err) + } + + testEnvironment, testNetwork := setupOCR2VRFEnvironment(t) + if testEnvironment.WillUseRemoteRunner() { + return + } + + chainClient, err := blockchain.NewEVMClient(testNetwork, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to Plugin nodes") + nodeAddresses, err := actions.PluginNodeAddresses(pluginNodes) + require.NoError(t, err, "Retreiving on-chain wallet addresses for plugin nodes shouldn't fail") + + t.Cleanup(func() { + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.ErrorLevel, &config, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + chainClient.ParallelTransactions(true) + + linkToken, err := contractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Error deploying PLI token") + + mockETHLinkFeed, err := contractDeployer.DeployMockETHPLIFeed(ocr2vrf_constants.LinkEthFeedResponse) + require.NoError(t, err, "Error deploying Mock ETH/PLI Feed") + + _, _, vrfBeaconContract, consumerContract, subID := ocr2vrf_actions.SetupOCR2VRFUniverse( + t, + linkToken, + mockETHLinkFeed, + contractDeployer, + chainClient, + nodeAddresses, + pluginNodes, + testNetwork, + ) + + //Request and Redeem Randomness + requestID := ocr2vrf_actions.RequestAndRedeemRandomness( + t, + consumerContract, + chainClient, + vrfBeaconContract, + ocr2vrf_constants.NumberOfRandomWordsToRequest, + subID, + ocr2vrf_constants.ConfirmationDelay, + ocr2vrf_constants.RandomnessRedeemTransmissionEventTimeout, + ) + + for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { + randomness, err := consumerContract.GetRandomnessByRequestId(testcontext.Get(t), requestID, big.NewInt(int64(i))) + require.NoError(t, err) + l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness retrieved from Consumer contract") + require.NotEqual(t, 0, randomness.Uint64(), "Randomness retrieved from Consumer contract give an answer other than 0") + } +} + +func TestOCR2VRFFulfillmentModel(t *testing.T) { + t.Parallel() + t.Skip("VRFv3 is on pause, skipping") + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.OCR2) + if err != nil { + t.Fatal(err) + } + + testEnvironment, testNetwork := setupOCR2VRFEnvironment(t) + if testEnvironment.WillUseRemoteRunner() { + return + } + + chainClient, err := blockchain.NewEVMClient(testNetwork, testEnvironment, l) + require.NoError(t, err, "Error connecting to blockchain") + contractDeployer, err := contracts.NewContractDeployer(chainClient, l) + require.NoError(t, err, "Error building contract deployer") + pluginNodes, err := client.ConnectPluginNodes(testEnvironment) + require.NoError(t, err, "Error connecting to Plugin nodes") + nodeAddresses, err := actions.PluginNodeAddresses(pluginNodes) + require.NoError(t, err, "Retreiving on-chain wallet addresses for plugin nodes shouldn't fail") + + t.Cleanup(func() { + err := actions.TeardownSuite(t, testEnvironment, pluginNodes, nil, zapcore.ErrorLevel, &config, chainClient) + require.NoError(t, err, "Error tearing down environment") + }) + + chainClient.ParallelTransactions(true) + + linkToken, err := contractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Error deploying PLI token") + + mockETHLinkFeed, err := contractDeployer.DeployMockETHPLIFeed(ocr2vrf_constants.LinkEthFeedResponse) + require.NoError(t, err, "Error deploying Mock ETH/PLI Feed") + + _, _, vrfBeaconContract, consumerContract, subID := ocr2vrf_actions.SetupOCR2VRFUniverse( + t, + linkToken, + mockETHLinkFeed, + contractDeployer, + chainClient, + nodeAddresses, + pluginNodes, + testNetwork, + ) + + requestID := ocr2vrf_actions.RequestRandomnessFulfillmentAndWaitForFulfilment( + t, + consumerContract, + chainClient, + vrfBeaconContract, + ocr2vrf_constants.NumberOfRandomWordsToRequest, + subID, + ocr2vrf_constants.ConfirmationDelay, + ocr2vrf_constants.RandomnessFulfilmentTransmissionEventTimeout, + ) + + for i := uint16(0); i < ocr2vrf_constants.NumberOfRandomWordsToRequest; i++ { + randomness, err := consumerContract.GetRandomnessByRequestId(testcontext.Get(t), requestID, big.NewInt(int64(i))) + require.NoError(t, err, "Error getting Randomness result from Consumer Contract") + l.Info().Interface("Random Number", randomness).Interface("Randomness Number Index", i).Msg("Randomness Fulfillment retrieved from Consumer contract") + require.NotEqual(t, 0, randomness.Uint64(), "Randomness Fulfillment retrieved from Consumer contract give an answer other than 0") + } +} + +func setupOCR2VRFEnvironment(t *testing.T) (testEnvironment *environment.Environment, testNetwork blockchain.EVMNetwork) { + if ocr2vrfSmokeConfig == nil { + c, err := testconfig.GetConfig("Smoke", tc.OCR2VRF) + if err != nil { + t.Fatal(err) + } + ocr2vrfSmokeConfig = &c + } + + testNetwork = networks.MustGetSelectedNetworkConfig(ocr2vrfSmokeConfig.Network)[0] + evmConfig := eth.New(nil) + if !testNetwork.Simulated { + evmConfig = eth.New(ð.Props{ + NetworkName: testNetwork.Name, + Simulated: testNetwork.Simulated, + WsURLs: testNetwork.URLs, + }) + } + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(ocr2vrfSmokeConfig.PluginImage, target) + ctf_config.MightConfigOverridePyroscopeKey(ocr2vrfSmokeConfig.Pyroscope, target) + } + + cd := plugin.NewWithOverride(0, map[string]interface{}{ + "replicas": 6, + "toml": networks.AddNetworkDetailedConfig( + config.BaseOCR2Config, + ocr2vrfSmokeConfig.Pyroscope, + config.DefaultOCR2VRFNetworkDetailTomlConfig, + testNetwork, + ), + }, ocr2vrfSmokeConfig.PluginImage, overrideFn) + + testEnvironment = environment.New(&environment.Config{ + NamespacePrefix: fmt.Sprintf("smoke-ocr2vrf-%s", strings.ReplaceAll(strings.ToLower(testNetwork.Name), " ", "-")), + Test: t, + }). + AddHelm(evmConfig). + AddHelm(cd) + err := testEnvironment.Run() + + require.NoError(t, err, "Error running test environment") + + return testEnvironment, testNetwork +} diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go new file mode 100644 index 00000000..f9b52e91 --- /dev/null +++ b/integration-tests/smoke/ocr_test.go @@ -0,0 +1,143 @@ +package smoke + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestOCRBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.OCR) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithMockAdapter(). + WithCLNodes(6). + WithFunding(big.NewFloat(.5)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + ocrInstances, err := actions.DeployOCRContractsLocal(1, linkTokenContract, env.ContractDeployer, workerNodes, env.EVMClient) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + err = actions.CreateOCRJobsLocal(ocrInstances, bootstrapNode, workerNodes, 5, env.MockAdapter, env.EVMClient.GetChainID()) + require.NoError(t, err) + + err = actions.WatchNewRound(1, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + + answer, err := ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) + + err = actions.SetAllAdapterResponsesToTheSameValueLocal(10, ocrInstances, workerNodes, env.MockAdapter) + require.NoError(t, err) + err = actions.WatchNewRound(2, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + + answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest OCR answer") + require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) +} + +func TestOCRJobReplacement(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.OCR) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodes(6). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + env.ParallelTransactions(true) + + nodeClients := env.ClCluster.NodeAPIs() + bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] + + linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + + ocrInstances, err := actions.DeployOCRContractsLocal(1, linkTokenContract, env.ContractDeployer, workerNodes, env.EVMClient) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Error waiting for events") + + err = actions.CreateOCRJobsLocal(ocrInstances, bootstrapNode, workerNodes, 5, env.MockAdapter, env.EVMClient.GetChainID()) + require.NoError(t, err) + + err = actions.WatchNewRound(1, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + + answer, err := ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(5), answer.Int64(), "Expected latest answer from OCR contract to be 5 but got %d", answer.Int64()) + + err = actions.SetAllAdapterResponsesToTheSameValueLocal(10, ocrInstances, workerNodes, env.MockAdapter) + require.NoError(t, err) + err = actions.WatchNewRound(2, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + + answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest OCR answer") + require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) + + err = actions.DeleteJobs(nodeClients) + require.NoError(t, err) + + err = actions.DeleteBridges(nodeClients) + require.NoError(t, err) + + //Recreate job + err = actions.CreateOCRJobsLocal(ocrInstances, bootstrapNode, workerNodes, 5, env.MockAdapter, env.EVMClient.GetChainID()) + require.NoError(t, err) + + err = actions.WatchNewRound(1, ocrInstances, env.EVMClient, l) + require.NoError(t, err) + + answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t)) + require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail") + require.Equal(t, int64(10), answer.Int64(), "Expected latest answer from OCR contract to be 10 but got %d", answer.Int64()) + +} diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go new file mode 100644 index 00000000..ad64c55a --- /dev/null +++ b/integration-tests/smoke/runlog_test.go @@ -0,0 +1,104 @@ +package smoke + +import ( + "fmt" + "math/big" + "net/http" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestRunLogBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.RunLog) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithMockAdapter(). + WithCLNodes(1). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + + lt, err := env.ContractDeployer.DeployLinkTokenContract() + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + oracle, err := env.ContractDeployer.DeployOracle(lt.Address()) + require.NoError(t, err, "Deploying Oracle Contract shouldn't fail") + consumer, err := env.ContractDeployer.DeployAPIConsumer(lt.Address()) + require.NoError(t, err, "Deploying Consumer Contract shouldn't fail") + err = env.EVMClient.SetDefaultWallet(0) + require.NoError(t, err, "Setting default wallet shouldn't fail") + err = lt.Transfer(consumer.Address(), big.NewInt(2e18)) + require.NoError(t, err, "Transferring %d to consumer contract shouldn't fail", big.NewInt(2e18)) + + err = env.MockAdapter.SetAdapterBasedIntValuePath("/variable", []string{http.MethodPost}, 5) + require.NoError(t, err, "Setting mock adapter value path shouldn't fail") + + jobUUID := uuid.New() + + bta := client.BridgeTypeAttributes{ + Name: fmt.Sprintf("five-%s", jobUUID.String()), + URL: fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), + } + err = env.ClCluster.Nodes[0].API.MustCreateBridge(&bta) + require.NoError(t, err, "Creating bridge shouldn't fail") + + os := &client.DirectRequestTxPipelineSpec{ + BridgeTypeAttributes: bta, + DataPath: "data,result", + } + ost, err := os.String() + require.NoError(t, err, "Building observation source spec shouldn't fail") + + _, err = env.ClCluster.Nodes[0].API.MustCreateJob(&client.DirectRequestJobSpec{ + Name: fmt.Sprintf("direct-request-%s", uuid.NewString()), + MinIncomingConfirmations: "1", + ContractAddress: oracle.Address(), + EVMChainID: env.EVMClient.GetChainID().String(), + ExternalJobID: jobUUID.String(), + ObservationSource: ost, + }) + require.NoError(t, err, "Creating direct_request job shouldn't fail") + + jobUUIDReplaces := strings.Replace(jobUUID.String(), "-", "", 4) + var jobID [32]byte + copy(jobID[:], jobUUIDReplaces) + err = consumer.CreateRequestTo( + oracle.Address(), + jobID, + big.NewInt(1e18), + fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), + "data,result", + big.NewInt(100), + ) + require.NoError(t, err, "Calling oracle contract shouldn't fail") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + d, err := consumer.Data(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting data from consumer contract shouldn't fail") + g.Expect(d).ShouldNot(gomega.BeNil(), "Expected the initial on chain data to be nil") + l.Debug().Int64("Data", d.Int64()).Msg("Found on chain") + g.Expect(d.Int64()).Should(gomega.BeNumerically("==", 5), "Expected the on-chain data to be 5, but found %d", d.Int64()) + }, "2m", "1s").Should(gomega.Succeed()) +} diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go new file mode 100644 index 00000000..c838c3ba --- /dev/null +++ b/integration-tests/smoke/vrf_test.go @@ -0,0 +1,232 @@ +package smoke + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/google/uuid" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv1" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +func TestVRFBasic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRF) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithCLNodes(1). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + env.ParallelTransactions(true) + + lt, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + contracts, err := vrfv1.DeployVRFContracts(env.ContractDeployer, env.EVMClient, lt) + require.NoError(t, err, "Deploying VRF Contracts shouldn't fail") + + err = lt.Transfer(contracts.Consumer.Address(), big.NewInt(2e18)) + require.NoError(t, err, "Funding consumer contract shouldn't fail") + _, err = env.ContractDeployer.DeployVRFContract() + require.NoError(t, err, "Deploying VRF contract shouldn't fail") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") + + for _, n := range env.ClCluster.Nodes { + nodeKey, err := n.API.MustCreateVRFKey() + require.NoError(t, err, "Creating VRF key shouldn't fail") + l.Debug().Interface("Key JSON", nodeKey).Msg("Created proving key") + pubKeyCompressed := nodeKey.Data.ID + jobUUID := uuid.New() + os := &client.VRFTxPipelineSpec{ + Address: contracts.Coordinator.Address(), + } + ost, err := os.String() + require.NoError(t, err, "Building observation source spec shouldn't fail") + job, err := n.API.MustCreateJob(&client.VRFJobSpec{ + Name: fmt.Sprintf("vrf-%s", jobUUID), + CoordinatorAddress: contracts.Coordinator.Address(), + MinIncomingConfirmations: 1, + PublicKey: pubKeyCompressed, + ExternalJobID: jobUUID.String(), + EVMChainID: env.EVMClient.GetChainID().String(), + ObservationSource: ost, + }) + require.NoError(t, err, "Creating VRF Job shouldn't fail") + + oracleAddr, err := n.API.PrimaryEthAddress() + require.NoError(t, err, "Getting primary ETH address of plugin node shouldn't fail") + provingKey, err := actions.EncodeOnChainVRFProvingKey(*nodeKey) + require.NoError(t, err, "Encoding on-chain VRF Proving key shouldn't fail") + err = contracts.Coordinator.RegisterProvingKey( + big.NewInt(1), + oracleAddr, + provingKey, + actions.EncodeOnChainExternalJobID(jobUUID), + ) + require.NoError(t, err, "Registering the on-chain VRF Proving key shouldn't fail") + encodedProvingKeys := make([][2]*big.Int, 0) + encodedProvingKeys = append(encodedProvingKeys, provingKey) + + requestHash, err := contracts.Coordinator.HashOfKey(testcontext.Get(t), encodedProvingKeys[0]) + require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail") + err = contracts.Consumer.RequestRandomness(requestHash, big.NewInt(1)) + require.NoError(t, err, "Requesting randomness shouldn't fail") + + gom := gomega.NewGomegaWithT(t) + timeout := time.Minute * 2 + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") + + out, err := contracts.Consumer.RandomnessOutput(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") + // Checks that the job has actually run + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1), + fmt.Sprintf("Expected the VRF job to run once or more after %s", timeout)) + + // TODO: This is an imperfect check, given it's a random number, it CAN be 0, but chances are unlikely. + // So we're just checking that the answer has changed to something other than the default (0) + // There's a better formula to ensure that VRF response is as expected, detailed under Technical Walkthrough. + // https://bl.chain.link/plugin-vrf-on-chain-verifiable-randomness/ + g.Expect(out.Uint64()).ShouldNot(gomega.BeNumerically("==", 0), "Expected the VRF job give an answer other than 0") + l.Debug().Uint64("Output", out.Uint64()).Msg("Randomness fulfilled") + }, timeout, "1s").Should(gomega.Succeed()) + } +} + +func TestVRFJobReplacement(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + config, err := tc.GetConfig("Smoke", tc.VRF) + if err != nil { + t.Fatal(err) + } + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithGeth(). + WithCLNodes(1). + WithFunding(big.NewFloat(.1)). + WithStandardCleanup(). + Build() + require.NoError(t, err) + env.ParallelTransactions(true) + + lt, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "Deploying Link Token Contract shouldn't fail") + contracts, err := vrfv1.DeployVRFContracts(env.ContractDeployer, env.EVMClient, lt) + require.NoError(t, err, "Deploying VRF Contracts shouldn't fail") + + err = lt.Transfer(contracts.Consumer.Address(), big.NewInt(2e18)) + require.NoError(t, err, "Funding consumer contract shouldn't fail") + _, err = env.ContractDeployer.DeployVRFContract() + require.NoError(t, err, "Deploying VRF contract shouldn't fail") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") + + for _, n := range env.ClCluster.Nodes { + nodeKey, err := n.API.MustCreateVRFKey() + require.NoError(t, err, "Creating VRF key shouldn't fail") + l.Debug().Interface("Key JSON", nodeKey).Msg("Created proving key") + pubKeyCompressed := nodeKey.Data.ID + jobUUID := uuid.New() + os := &client.VRFTxPipelineSpec{ + Address: contracts.Coordinator.Address(), + } + ost, err := os.String() + require.NoError(t, err, "Building observation source spec shouldn't fail") + job, err := n.API.MustCreateJob(&client.VRFJobSpec{ + Name: fmt.Sprintf("vrf-%s", jobUUID), + CoordinatorAddress: contracts.Coordinator.Address(), + MinIncomingConfirmations: 1, + PublicKey: pubKeyCompressed, + ExternalJobID: jobUUID.String(), + EVMChainID: env.EVMClient.GetChainID().String(), + ObservationSource: ost, + }) + require.NoError(t, err, "Creating VRF Job shouldn't fail") + + oracleAddr, err := n.API.PrimaryEthAddress() + require.NoError(t, err, "Getting primary ETH address of plugin node shouldn't fail") + provingKey, err := actions.EncodeOnChainVRFProvingKey(*nodeKey) + require.NoError(t, err, "Encoding on-chain VRF Proving key shouldn't fail") + err = contracts.Coordinator.RegisterProvingKey( + big.NewInt(1), + oracleAddr, + provingKey, + actions.EncodeOnChainExternalJobID(jobUUID), + ) + require.NoError(t, err, "Registering the on-chain VRF Proving key shouldn't fail") + encodedProvingKeys := make([][2]*big.Int, 0) + encodedProvingKeys = append(encodedProvingKeys, provingKey) + + requestHash, err := contracts.Coordinator.HashOfKey(testcontext.Get(t), encodedProvingKeys[0]) + require.NoError(t, err, "Getting Hash of encoded proving keys shouldn't fail") + err = contracts.Consumer.RequestRandomness(requestHash, big.NewInt(1)) + require.NoError(t, err, "Requesting randomness shouldn't fail") + + gom := gomega.NewGomegaWithT(t) + timeout := time.Minute * 2 + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") + + out, err := contracts.Consumer.RandomnessOutput(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") + // Checks that the job has actually run + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1), + fmt.Sprintf("Expected the VRF job to run once or more after %s", timeout)) + + g.Expect(out.Uint64()).ShouldNot(gomega.BeNumerically("==", 0), "Expected the VRF job give an answer other than 0") + l.Debug().Uint64("Output", out.Uint64()).Msg("Randomness fulfilled") + }, timeout, "1s").Should(gomega.Succeed()) + + err = n.API.MustDeleteJob(job.Data.ID) + require.NoError(t, err) + + job, err = n.API.MustCreateJob(&client.VRFJobSpec{ + Name: fmt.Sprintf("vrf-%s", jobUUID), + CoordinatorAddress: contracts.Coordinator.Address(), + MinIncomingConfirmations: 1, + PublicKey: pubKeyCompressed, + ExternalJobID: jobUUID.String(), + EVMChainID: env.EVMClient.GetChainID().String(), + ObservationSource: ost, + }) + require.NoError(t, err, "Recreating VRF Job shouldn't fail") + gom.Eventually(func(g gomega.Gomega) { + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") + + out, err := contracts.Consumer.RandomnessOutput(testcontext.Get(t)) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Getting the randomness output of the consumer shouldn't fail") + // Checks that the job has actually run + g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically(">=", 1), + fmt.Sprintf("Expected the VRF job to run once or more after %s", timeout)) + g.Expect(out.Uint64()).ShouldNot(gomega.BeNumerically("==", 0), "Expected the VRF job give an answer other than 0") + l.Debug().Uint64("Output", out.Uint64()).Msg("Randomness fulfilled") + }, timeout, "1s").Should(gomega.Succeed()) + } +} diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go new file mode 100644 index 00000000..556c426c --- /dev/null +++ b/integration-tests/smoke/vrfv2_test.go @@ -0,0 +1,868 @@ +package smoke + +import ( + "fmt" + "math/big" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/conversions" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" +) + +func TestVRFv2Basic(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2) + require.NoError(t, err, "Error getting config") + + useVRFOwner := false + useTestCoordinator := false + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2.General.LinkNativeFeedResponse)) + require.NoError(t, err) + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err) + + // register proving key against oracle address (sending key) in order to test oracleWithdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + numberOfTxKeysToCreate := 1 + vrfv2Contracts, subIDs, vrfv2KeyData, nodesMap, err := vrfv2.SetupVRFV2Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + useVRFOwner, + useTestCoordinator, + linkToken, + mockETHLinkFeed, + defaultWalletAddress, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2 env") + + subID := subIDs[0] + + subscription, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2.LogSubDetails(l, subscription, subID, vrfv2Contracts.CoordinatorV2) + + t.Run("Request Randomness", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + subBalanceBeforeRequest := subscription.Balance + + jobRunsBeforeTest, err := nodesMap[vrfcommon.VRF].CLNode.API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + + // test and assert + randomWordsFulfilledEvent, err := vrfv2.RequestRandomnessAndWaitForFulfillment( + l, + vrfv2Contracts.VRFV2Consumer[0], + vrfv2Contracts.CoordinatorV2, + subID, + vrfv2KeyData, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2.General.RandomWordsFulfilledEventTimeout.Duration, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + subscription, err = vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := subscription.Balance + require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) + + jobRuns, err := nodesMap[vrfcommon.VRF].CLNode.API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) + + status, err := vrfv2Contracts.VRFV2Consumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + + require.Equal(t, *config.VRFv2.General.NumberOfWords, uint32(len(status.RandomWords))) + for _, w := range status.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + + t.Run("Direct Funding (VRFV2Wrapper)", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + wrapperContracts, wrapperSubID, err := vrfv2.SetupVRFV2WrapperEnvironment( + env, + &configCopy, + linkToken, + mockETHLinkFeed, + vrfv2Contracts.CoordinatorV2, + vrfv2KeyData.KeyHash, + 1, + ) + require.NoError(t, err) + wrapperConsumer := wrapperContracts.LoadTestConsumers[0] + + wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(testcontext.Get(t), wrapperConsumer.Address()) + require.NoError(t, err, "Error getting wrapper consumer balance") + + wrapperSubscription, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), *wrapperSubID) + require.NoError(t, err, "Error getting subscription information") + subBalanceBeforeRequest := wrapperSubscription.Balance + + // Request Randomness and wait for fulfillment event + randomWordsFulfilledEvent, err := vrfv2.DirectFundingRequestRandomnessAndWaitForFulfillment( + l, + wrapperConsumer, + vrfv2Contracts.CoordinatorV2, + *wrapperSubID, + vrfv2KeyData, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2.General.RandomWordsFulfilledEventTimeout.Duration, + ) + require.NoError(t, err, "Error requesting randomness and waiting for fulfilment") + + // Check wrapper subscription balance + expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + wrapperSubscription, err = vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), *wrapperSubID) + require.NoError(t, err, "Error getting subscription information") + subBalanceAfterRequest := wrapperSubscription.Balance + require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) + + // Check status of randomness request within the wrapper consumer contract + consumerStatus, err := wrapperConsumer.GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "Error getting randomness request status") + require.True(t, consumerStatus.Fulfilled) + + // Check wrapper consumer PLI balance + expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid) + wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(testcontext.Get(t), wrapperConsumer.Address()) + require.NoError(t, err, "Error getting wrapper consumer balance") + require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest) + + // Check random word count + require.Equal(t, *configCopy.VRFv2.General.NumberOfWords, uint32(len(consumerStatus.RandomWords))) + for _, w := range consumerStatus.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + + l.Info(). + Str("Consumer Balance Before Request (Link)", (*commonassets.Link)(wrapperConsumerJuelsBalanceBeforeRequest).Link()). + Str("Consumer Balance After Request (Link)", (*commonassets.Link)(wrapperConsumerJuelsBalanceAfterRequest).Link()). + Bool("Fulfilment Status", consumerStatus.Fulfilled). + Str("Paid by Consumer Contract (Link)", (*commonassets.Link)(consumerStatus.Paid).Link()). + Str("Paid by Coordinator Sub (Link)", (*commonassets.Link)(randomWordsFulfilledEvent.Payment).Link()). + Str("RequestTimestamp", consumerStatus.RequestTimestamp.String()). + Str("FulfilmentTimestamp", consumerStatus.FulfilmentTimestamp.String()). + Str("RequestBlockNumber", consumerStatus.RequestBlockNumber.String()). + Str("FulfilmentBlockNumber", consumerStatus.FulfilmentBlockNumber.String()). + Str("TX Hash", randomWordsFulfilledEvent.Raw.TxHash.String()). + Msg("Random Words Fulfilment Details For Link Billing") + }) + + t.Run("Oracle Withdraw", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + subIDsForOracleWithDraw, err := vrfv2.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.VRFv2.General.SubscriptionFundingAmountLink), + linkToken, + vrfv2Contracts.CoordinatorV2, + vrfv2Contracts.VRFV2Consumer, + 1, + ) + require.NoError(t, err) + + subIDForOracleWithdraw := subIDsForOracleWithDraw[0] + + fulfilledEventLink, err := vrfv2.RequestRandomnessAndWaitForFulfillment( + l, + vrfv2Contracts.VRFV2Consumer[0], + vrfv2Contracts.CoordinatorV2, + subIDForOracleWithdraw, + vrfv2KeyData, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2.General.RandomWordsFulfilledEventTimeout.Duration, + ) + require.NoError(t, err) + + amountToWithdrawLink := fulfilledEventLink.Payment + + defaultWalletBalanceLinkBeforeOracleWithdraw, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + l.Info(). + Str("Returning to", defaultWalletAddress). + Str("Amount", amountToWithdrawLink.String()). + Msg("Invoking Oracle Withdraw for PLI") + + err = vrfv2Contracts.CoordinatorV2.OracleWithdraw(common.HexToAddress(defaultWalletAddress), amountToWithdrawLink) + require.NoError(t, err, "Error withdrawing PLI from coordinator to default wallet") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + defaultWalletBalanceLinkAfterOracleWithdraw, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + require.Equal( + t, + 1, + defaultWalletBalanceLinkAfterOracleWithdraw.Cmp(defaultWalletBalanceLinkBeforeOracleWithdraw), + "PLI funds were not returned after oracle withdraw", + ) + }) + + t.Run("Canceling Sub And Returning Funds", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + subIDsForCancelling, err := vrfv2.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.VRFv2.General.SubscriptionFundingAmountLink), + linkToken, + vrfv2Contracts.CoordinatorV2, + vrfv2Contracts.VRFV2Consumer, + 1, + ) + require.NoError(t, err) + subIDForCancelling := subIDsForCancelling[0] + + testWalletAddress, err := actions.GenerateWallet() + require.NoError(t, err) + + testWalletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), testWalletAddress.String()) + require.NoError(t, err) + + subscriptionForCancelling, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "error getting subscription information") + + subBalanceLink := subscriptionForCancelling.Balance + + l.Info(). + Str("Subscription Amount Link", subBalanceLink.String()). + Uint64("Returning funds from SubID", subIDForCancelling). + Str("Returning funds to", testWalletAddress.String()). + Msg("Canceling subscription and returning funds to subscription owner") + + tx, err := vrfv2Contracts.CoordinatorV2.CancelSubscription(subIDForCancelling, testWalletAddress) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2Contracts.CoordinatorV2.WaitForSubscriptionCanceledEvent([]uint64{subIDForCancelling}, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.Amount.String()). + Uint64("SubID", subscriptionCanceledEvent.SubId). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.Amount, "SubscriptionCanceled event PLI amount is not equal to sub amount while canceling subscription") + + testWalletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), testWalletAddress.String()) + require.NoError(t, err) + + //Verify that sub was deleted from Coordinator + _, err = vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + subFundsReturnedLinkActual := new(big.Int).Sub(testWalletBalanceLinkAfterSubCancelling, testWalletBalanceLinkBeforeSubCancelling) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Msg("Sub funds returned") + + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned PLI funds are not equal to sub balance that was cancelled") + }) + + t.Run("Owner Canceling Sub And Returning Funds While Having Pending Requests", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + // Underfund subscription to force fulfillments to fail + configCopy.VRFv2.General.SubscriptionFundingAmountLink = ptr.Ptr(float64(0.000000000000000001)) // 1 Juel + + subIDsForCancelling, err := vrfv2.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.VRFv2.General.SubscriptionFundingAmountLink), + linkToken, + vrfv2Contracts.CoordinatorV2, + vrfv2Contracts.VRFV2Consumer, + 1, + ) + require.NoError(t, err) + + subIDForCancelling := subIDsForCancelling[0] + + subscriptionForCancelling, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "Error getting subscription information") + + vrfv2.LogSubDetails(l, subscriptionForCancelling, subIDForCancelling, vrfv2Contracts.CoordinatorV2) + + // No GetActiveSubscriptionIds function available - skipping check + + pendingRequestsExist, err := vrfv2Contracts.CoordinatorV2.PendingRequestsExist(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err) + require.False(t, pendingRequestsExist, "Pending requests should not exist") + + // Request randomness - should fail due to underfunded subscription + randomWordsFulfilledEventTimeout := 5 * time.Second + _, err = vrfv2.RequestRandomnessAndWaitForFulfillment( + l, + vrfv2Contracts.VRFV2Consumer[0], + vrfv2Contracts.CoordinatorV2, + subIDForCancelling, + vrfv2KeyData, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + randomWordsFulfilledEventTimeout, + ) + require.Error(t, err, "Error should occur while waiting for fulfilment due to low sub balance") + + pendingRequestsExist, err = vrfv2Contracts.CoordinatorV2.PendingRequestsExist(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err) + require.True(t, pendingRequestsExist, "Pending requests should exist after unfilfulled requests due to low sub balance") + + walletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + subscriptionForCancelling, err = vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "Error getting subscription information") + subBalanceLink := subscriptionForCancelling.Balance + + l.Info(). + Str("Subscription Amount Link", subBalanceLink.String()). + Uint64("Returning funds from SubID", subIDForCancelling). + Str("Returning funds to", defaultWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + + // Call OwnerCancelSubscription + tx, err := vrfv2Contracts.CoordinatorV2.OwnerCancelSubscription(subIDForCancelling) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2Contracts.CoordinatorV2.WaitForSubscriptionCanceledEvent([]uint64{subIDForCancelling}, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.Amount.String()). + Uint64("SubID", subscriptionCanceledEvent.SubId). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.Amount, "SubscriptionCanceled event PLI amount is not equal to sub amount while canceling subscription") + + walletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + // Verify that subscription was deleted from Coordinator contract + _, err = vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subIDForCancelling) + l.Info(). + Str("Expected error message", err.Error()) + require.Error(t, err, "Error did not occur when fetching deleted subscription from the Coordinator after owner cancelation") + + subFundsReturnedLinkActual := new(big.Int).Sub(walletBalanceLinkAfterSubCancelling, walletBalanceLinkBeforeSubCancelling) + l.Info(). + Str("Wallet Balance Before Owner Cancelation", walletBalanceLinkBeforeSubCancelling.String()). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Str("Wallet Balance After Owner Cancelation", walletBalanceLinkAfterSubCancelling.String()). + Msg("Sub funds returned") + + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned PLI funds are not equal to sub balance that was cancelled") + + // Again, there is no GetActiveSubscriptionIds method on the v2 Coordinator contract, so we can't double check that the cancelled + // subID is no longer in the list of active subs + }) +} + +func TestVRFv2MultipleSendingKeys(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2) + if err != nil { + t.Fatal(err) + } + + useVRFOwner := false + useTestCoordinator := false + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestConfig(&config). + WithTestInstance(t). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2.General.LinkNativeFeedResponse)) + require.NoError(t, err) + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err) + + // register proving key against oracle address (sending key) in order to test oracleWithdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + numberOfTxKeysToCreate := 2 + vrfv2Contracts, subIDs, vrfv2KeyData, nodesMap, err := vrfv2.SetupVRFV2Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + useVRFOwner, + useTestCoordinator, + linkToken, + mockETHLinkFeed, + defaultWalletAddress, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2 env") + + subID := subIDs[0] + + subscription, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2.LogSubDetails(l, subscription, subID, vrfv2Contracts.CoordinatorV2) + + t.Run("Request Randomness with multiple sending keys", func(t *testing.T) { + txKeys, _, err := nodesMap[vrfcommon.VRF].CLNode.API.ReadTxKeys("evm") + require.NoError(t, err, "error reading tx keys") + + require.Equal(t, numberOfTxKeysToCreate+1, len(txKeys.Data)) + + var fulfillmentTxFromAddresses []string + for i := 0; i < numberOfTxKeysToCreate+1; i++ { + randomWordsFulfilledEvent, err := vrfv2.RequestRandomnessAndWaitForFulfillment( + l, + vrfv2Contracts.VRFV2Consumer[0], + vrfv2Contracts.CoordinatorV2, + subID, + vrfv2KeyData, + *config.VRFv2.General.MinimumConfirmations, + *config.VRFv2.General.CallbackGasLimit, + *config.VRFv2.General.NumberOfWords, + *config.VRFv2.General.RandomnessRequestCountPerRequest, + *config.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + config.VRFv2.General.RandomWordsFulfilledEventTimeout.Duration, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + //todo - move TransactionByHash to EVMClient in CTF + fulfillmentTx, _, err := env.EVMClient.(*blockchain.EthereumMultinodeClient).DefaultClient.(*blockchain.EthereumClient). + Client.TransactionByHash(testcontext.Get(t), randomWordsFulfilledEvent.Raw.TxHash) + require.NoError(t, err, "error getting tx from hash") + fulfillmentTxFromAddress, err := actions.GetTxFromAddress(fulfillmentTx) + require.NoError(t, err, "error getting tx from address") + fulfillmentTxFromAddresses = append(fulfillmentTxFromAddresses, fulfillmentTxFromAddress) + } + require.Equal(t, numberOfTxKeysToCreate+1, len(fulfillmentTxFromAddresses)) + var txKeyAddresses []string + for _, txKey := range txKeys.Data { + txKeyAddresses = append(txKeyAddresses, txKey.Attributes.Address) + } + less := func(a, b string) bool { return a < b } + equalIgnoreOrder := cmp.Diff(txKeyAddresses, fulfillmentTxFromAddresses, cmpopts.SortSlices(less)) == "" + require.True(t, equalIgnoreOrder) + }) +} + +func TestVRFOwner(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2) + require.NoError(t, err, "Error getting config") + + useVRFOwner := true + useTestCoordinator := true + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := env.ContractDeployer.DeployVRFMockETHPLIFeed(big.NewInt(*config.VRFv2.General.LinkNativeFeedResponse)) + + require.NoError(t, err) + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err) + + // register proving key against oracle address (sending key) in order to test oracleWithdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + numberOfTxKeysToCreate := 1 + vrfv2Contracts, subIDs, vrfv2Data, _, err := vrfv2.SetupVRFV2Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + useVRFOwner, + useTestCoordinator, + linkToken, + mockETHLinkFeed, + defaultWalletAddress, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2 env") + + subID := subIDs[0] + + subscription, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2.LogSubDetails(l, subscription, subID, vrfv2Contracts.CoordinatorV2) + + t.Run("Request Randomness With Force-Fulfill", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + + vrfCoordinatorOwner, err := vrfv2Contracts.CoordinatorV2.GetOwner(testcontext.Get(t)) + require.NoError(t, err) + require.Equal(t, vrfv2Contracts.VRFOwner.Address(), vrfCoordinatorOwner.String()) + + err = linkToken.Transfer( + vrfv2Contracts.VRFV2Consumer[0].Address(), + conversions.EtherToWei(big.NewFloat(5)), + ) + require.NoError(t, err, "error transferring link to consumer contract") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + consumerLinkBalance, err := linkToken.BalanceOf(testcontext.Get(t), vrfv2Contracts.VRFV2Consumer[0].Address()) + require.NoError(t, err, "error getting consumer link balance") + l.Info(). + Str("Balance", conversions.WeiToEther(consumerLinkBalance).String()). + Str("Consumer", vrfv2Contracts.VRFV2Consumer[0].Address()). + Msg("Consumer Link Balance") + + err = mockETHLinkFeed.SetBlockTimestampDeduction(big.NewInt(3)) + require.NoError(t, err) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + // test and assert + _, randFulfilledEvent, _, err := vrfv2.RequestRandomnessWithForceFulfillAndWaitForFulfillment( + l, + vrfv2Contracts.VRFV2Consumer[0], + vrfv2Contracts.CoordinatorV2, + vrfv2Contracts.VRFOwner, + vrfv2Data, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequestDeviation, + conversions.EtherToWei(big.NewFloat(5)), + common.HexToAddress(linkToken.Address()), + time.Minute*2, + ) + require.NoError(t, err, "error requesting randomness with force-fulfillment and waiting for fulfilment") + require.Equal(t, 0, randFulfilledEvent.Payment.Cmp(big.NewInt(0)), "Forced Fulfilled Randomness's Payment should be 0") + + status, err := vrfv2Contracts.VRFV2Consumer[0].GetRequestStatus(testcontext.Get(t), randFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + + require.Equal(t, *configCopy.VRFv2.General.NumberOfWords, uint32(len(status.RandomWords))) + for _, w := range status.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + + coordinatorConfig, err := vrfv2Contracts.CoordinatorV2.GetConfig(testcontext.Get(t)) + require.NoError(t, err, "error getting coordinator config") + + coordinatorFeeConfig, err := vrfv2Contracts.CoordinatorV2.GetFeeConfig(testcontext.Get(t)) + require.NoError(t, err, "error getting coordinator fee config") + + coordinatorFallbackWeiPerUnitLinkConfig, err := vrfv2Contracts.CoordinatorV2.GetFallbackWeiPerUnitLink(testcontext.Get(t)) + require.NoError(t, err, "error getting coordinator FallbackWeiPerUnitLink") + + require.Equal(t, *configCopy.VRFv2.General.StalenessSeconds, coordinatorConfig.StalenessSeconds) + require.Equal(t, *configCopy.VRFv2.General.GasAfterPaymentCalculation, coordinatorConfig.GasAfterPaymentCalculation) + require.Equal(t, *configCopy.VRFv2.General.MinimumConfirmations, coordinatorConfig.MinimumRequestConfirmations) + require.Equal(t, *configCopy.VRFv2.General.FulfillmentFlatFeeLinkPPMTier1, coordinatorFeeConfig.FulfillmentFlatFeeLinkPPMTier1) + require.Equal(t, *configCopy.VRFv2.General.ReqsForTier2, coordinatorFeeConfig.ReqsForTier2.Int64()) + require.Equal(t, *configCopy.VRFv2.General.FallbackWeiPerUnitLink, coordinatorFallbackWeiPerUnitLinkConfig.Int64()) + }) +} + +func TestVRFV2WithBHS(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2) + require.NoError(t, err, "Error getting config") + + useVRFOwner := true + useTestCoordinator := true + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(2). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := env.ContractDeployer.DeployVRFMockETHPLIFeed(big.NewInt(*config.VRFv2.General.LinkNativeFeedResponse)) + + require.NoError(t, err) + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err) + + // register proving key against oracle address (sending key) in order to test oracleWithdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + //Underfund Subscription + config.VRFv2.General.SubscriptionFundingAmountLink = ptr.Ptr(float64(0.000000000000000001)) // 1 Juel + + //decrease default span for checking blockhashes for unfulfilled requests + config.VRFv2.General.BHSJobWaitBlocks = ptr.Ptr(2) + config.VRFv2.General.BHSJobLookBackBlocks = ptr.Ptr(20) + + numberOfTxKeysToCreate := 0 + vrfv2Contracts, subIDs, vrfv2KeyData, nodesMap, err := vrfv2.SetupVRFV2Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF, vrfcommon.BHS}, + &config, + useVRFOwner, + useTestCoordinator, + linkToken, + mockETHLinkFeed, + defaultWalletAddress, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2 env") + + subID := subIDs[0] + + subscription, err := vrfv2Contracts.CoordinatorV2.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2.LogSubDetails(l, subscription, subID, vrfv2Contracts.CoordinatorV2) + + t.Run("BHS Job with complete E2E - wait 256 blocks to see if Rand Request is fulfilled", func(t *testing.T) { + t.Skip("Skipped since should be run on-demand on live testnet due to long execution time") + //BHS node should fill in blockhashes into BHS contract depending on the waitBlocks and lookBackBlocks settings + configCopy := config.MustCopy().(tc.TestConfig) + _, err := vrfv2Contracts.VRFV2Consumer[0].RequestRandomness( + vrfv2KeyData.KeyHash, + subID, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + ) + require.NoError(t, err, "error requesting randomness") + + randomWordsRequestedEvent, err := vrfv2Contracts.CoordinatorV2.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfv2KeyData.KeyHash}, + []uint64{subID}, + []common.Address{common.HexToAddress(vrfv2Contracts.VRFV2Consumer[0].Address())}, + time.Minute*1, + ) + require.NoError(t, err, "error waiting for randomness requested event") + vrfv2.LogRandomnessRequestedEvent(l, vrfv2Contracts.CoordinatorV2, randomWordsRequestedEvent) + randRequestBlockNumber := randomWordsRequestedEvent.Raw.BlockNumber + var wg sync.WaitGroup + wg.Add(1) + //Wait at least 256 blocks + _, err = actions.WaitForBlockNumberToBe(randRequestBlockNumber+uint64(257), env.EVMClient, &wg, time.Second*260, t) + wg.Wait() + require.NoError(t, err) + err = vrfv2.FundSubscriptions(env, big.NewFloat(*configCopy.VRFv2.General.SubscriptionFundingAmountLink), linkToken, vrfv2Contracts.CoordinatorV2, subIDs) + require.NoError(t, err, "error funding subscriptions") + randomWordsFulfilledEvent, err := vrfv2Contracts.CoordinatorV2.WaitForRandomWordsFulfilledEvent( + []*big.Int{randomWordsRequestedEvent.RequestId}, + time.Second*30, + ) + require.NoError(t, err, "error waiting for randomness fulfilled event") + vrfv2.LogRandomWordsFulfilledEvent(l, vrfv2Contracts.CoordinatorV2, randomWordsFulfilledEvent) + status, err := vrfv2Contracts.VRFV2Consumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + }) + + t.Run("BHS Job should fill in blockhashes into BHS contract for unfulfilled requests", func(t *testing.T) { + //BHS node should fill in blockhashes into BHS contract depending on the waitBlocks and lookBackBlocks settings + configCopy := config.MustCopy().(tc.TestConfig) + _, err := vrfv2Contracts.VRFV2Consumer[0].RequestRandomness( + vrfv2KeyData.KeyHash, + subID, + *configCopy.VRFv2.General.MinimumConfirmations, + *configCopy.VRFv2.General.CallbackGasLimit, + *configCopy.VRFv2.General.NumberOfWords, + *configCopy.VRFv2.General.RandomnessRequestCountPerRequest, + ) + require.NoError(t, err, "error requesting randomness") + + randomWordsRequestedEvent, err := vrfv2Contracts.CoordinatorV2.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfv2KeyData.KeyHash}, + []uint64{subID}, + []common.Address{common.HexToAddress(vrfv2Contracts.VRFV2Consumer[0].Address())}, + time.Minute*1, + ) + require.NoError(t, err, "error waiting for randomness requested event") + vrfv2.LogRandomnessRequestedEvent(l, vrfv2Contracts.CoordinatorV2, randomWordsRequestedEvent) + randRequestBlockNumber := randomWordsRequestedEvent.Raw.BlockNumber + + _, err = vrfv2Contracts.BHS.GetBlockHash(testcontext.Get(t), big.NewInt(int64(randRequestBlockNumber))) + require.Error(t, err, "error not occurred when getting blockhash for a blocknumber which was not stored in BHS contract") + + var wg sync.WaitGroup + wg.Add(1) + _, err = actions.WaitForBlockNumberToBe(randRequestBlockNumber+uint64(*config.VRFv2.General.BHSJobWaitBlocks), env.EVMClient, &wg, time.Minute*1, t) + wg.Wait() + require.NoError(t, err, "error waiting for blocknumber to be") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + metrics, err := vrfv2Contracts.VRFV2Consumer[0].GetLoadTestMetrics(testcontext.Get(t)) + require.Equal(t, 0, metrics.RequestCount.Cmp(big.NewInt(1))) + require.Equal(t, 0, metrics.FulfilmentCount.Cmp(big.NewInt(0))) + + var clNodeTxs *client.TransactionsData + var txHash string + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + clNodeTxs, _, err = nodesMap[vrfcommon.BHS].CLNode.API.ReadTransactions() + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "error getting CL Node transactions") + l.Debug().Int("Number of TXs", len(clNodeTxs.Data)).Msg("BHS Node txs") + g.Expect(len(clNodeTxs.Data)).Should(gomega.BeNumerically("==", 1), "Expected 1 tx posted by BHS Node, but found %d", len(clNodeTxs.Data)) + txHash = clNodeTxs.Data[0].Attributes.Hash + }, "2m", "1s").Should(gomega.Succeed()) + + require.Equal(t, strings.ToLower(vrfv2Contracts.BHS.Address()), strings.ToLower(clNodeTxs.Data[0].Attributes.To)) + + bhsStoreTx, _, err := actions.GetTxByHash(testcontext.Get(t), env.EVMClient, common.HexToHash(txHash)) + require.NoError(t, err, "error getting tx from hash") + + bhsStoreTxInputData, err := actions.DecodeTxInputData(blockhash_store.BlockhashStoreABI, bhsStoreTx.Data()) + l.Info(). + Str("Block Number", bhsStoreTxInputData["n"].(*big.Int).String()). + Msg("BHS Node's Store Blockhash for Blocknumber Method TX") + require.Equal(t, randRequestBlockNumber, bhsStoreTxInputData["n"].(*big.Int).Uint64()) + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + var randRequestBlockHash [32]byte + gom.Eventually(func(g gomega.Gomega) { + randRequestBlockHash, err = vrfv2Contracts.BHS.GetBlockHash(testcontext.Get(t), big.NewInt(int64(randRequestBlockNumber))) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "error getting blockhash for a blocknumber which was stored in BHS contract") + }, "2m", "1s").Should(gomega.Succeed()) + l.Info(). + Str("Randomness Request's Blockhash", randomWordsRequestedEvent.Raw.BlockHash.String()). + Str("Block Hash stored by BHS contract", fmt.Sprintf("0x%x", randRequestBlockHash)). + Msg("BHS Contract's stored Blockhash for Randomness Request") + require.Equal(t, 0, randomWordsRequestedEvent.Raw.BlockHash.Cmp(randRequestBlockHash)) + }) +} diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go new file mode 100644 index 00000000..4f0d1bd5 --- /dev/null +++ b/integration-tests/smoke/vrfv2plus_test.go @@ -0,0 +1,1231 @@ +package smoke + +import ( + "fmt" + "math/big" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/onsi/gomega" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + vrfcommon "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/common" + "github.com/goplugin/pluginv3.0/integration-tests/actions/vrf/vrfv2plus" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/blockhash_store" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + vrfv2plus_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2plus" + it_utils "github.com/goplugin/pluginv3.0/integration-tests/utils" +) + +func TestVRFv2Plus(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2Plus) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2Plus.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + // default wallet address is used to test Withdraw + defaultWalletAddress := env.EVMClient.GetDefaultWallet().Address() + + numberOfTxKeysToCreate := 2 + vrfv2PlusContracts, subIDs, vrfv2PlusData, nodesMap, err := vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + linkToken, + mockETHLinkFeed, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + + subID := subIDs[0] + + subscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.CoordinatorV2Plus) + + t.Run("Link Billing", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + var isNativeBilling = false + subBalanceBeforeRequest := subscription.Balance + + jobRunsBeforeTest, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + + // test and assert + randomWordsFulfilledEvent, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subID, + isNativeBilling, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + subscription, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := subscription.Balance + require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) + + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) + + status, err := vrfv2PlusContracts.VRFV2PlusConsumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + + require.Equal(t, *configCopy.VRFv2Plus.General.NumberOfWords, uint32(len(status.RandomWords))) + for _, w := range status.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + + t.Run("Native Billing", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + testConfig := configCopy.VRFv2Plus.General + var isNativeBilling = true + subNativeTokenBalanceBeforeRequest := subscription.NativeBalance + + jobRunsBeforeTest, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + + // test and assert + randomWordsFulfilledEvent, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subID, + isNativeBilling, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + expectedSubBalanceWei := new(big.Int).Sub(subNativeTokenBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + subscription, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err) + subBalanceAfterRequest := subscription.NativeBalance + require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) + + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) + + status, err := vrfv2PlusContracts.VRFV2PlusConsumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + + require.Equal(t, *testConfig.NumberOfWords, uint32(len(status.RandomWords))) + for _, w := range status.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + t.Run("Direct Funding (VRFV2PlusWrapper)", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment( + env, + &configCopy, + linkToken, + mockETHLinkFeed, + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData.KeyHash, + 1, + ) + require.NoError(t, err) + + t.Run("Link Billing", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + testConfig := configCopy.VRFv2Plus.General + var isNativeBilling = false + + wrapperConsumerJuelsBalanceBeforeRequest, err := linkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address()) + require.NoError(t, err, "error getting wrapper consumer balance") + + wrapperSubscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceBeforeRequest := wrapperSubscription.Balance + + randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( + wrapperContracts.LoadTestConsumers[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + wrapperSubID, + isNativeBilling, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceJuels := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + wrapperSubscription, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := wrapperSubscription.Balance + require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) + + consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, consumerStatus.Fulfilled) + + expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid) + + wrapperConsumerJuelsBalanceAfterRequest, err := linkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address()) + require.NoError(t, err, "error getting wrapper consumer balance") + require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest) + + //todo: uncomment when VRF-651 will be fixed + //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") + vrfv2plus.LogFulfillmentDetailsLinkBilling(l, wrapperConsumerJuelsBalanceBeforeRequest, wrapperConsumerJuelsBalanceAfterRequest, consumerStatus, randomWordsFulfilledEvent) + + require.Equal(t, *testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) + for _, w := range consumerStatus.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + t.Run("Native Billing", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + testConfig := configCopy.VRFv2Plus.General + var isNativeBilling = true + + wrapperConsumerBalanceBeforeRequestWei, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) + require.NoError(t, err, "error getting wrapper consumer balance") + + wrapperSubscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceBeforeRequest := wrapperSubscription.NativeBalance + + randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( + wrapperContracts.LoadTestConsumers[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + wrapperSubID, + isNativeBilling, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + expectedSubBalanceWei := new(big.Int).Sub(subBalanceBeforeRequest, randomWordsFulfilledEvent.Payment) + wrapperSubscription, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) + require.NoError(t, err, "error getting subscription information") + subBalanceAfterRequest := wrapperSubscription.NativeBalance + require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) + + consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, consumerStatus.Fulfilled) + + expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid) + + wrapperConsumerBalanceAfterRequestWei, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address())) + require.NoError(t, err, "error getting wrapper consumer balance") + require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei) + + //todo: uncomment when VRF-651 will be fixed + //require.Equal(t, 1, consumerStatus.Paid.Cmp(randomWordsFulfilledEvent.Payment), "Expected Consumer contract pay more than the Coordinator Sub") + vrfv2plus.LogFulfillmentDetailsNativeBilling(l, wrapperConsumerBalanceBeforeRequestWei, wrapperConsumerBalanceAfterRequestWei, consumerStatus, randomWordsFulfilledEvent) + + require.Equal(t, *testConfig.NumberOfWords, uint32(len(consumerStatus.RandomWords))) + for _, w := range consumerStatus.RandomWords { + l.Info().Str("Output", w.String()).Msg("Randomness fulfilled") + require.Equal(t, 1, w.Cmp(big.NewInt(0)), "Expected the VRF job give an answer bigger than 0") + } + }) + }) + t.Run("Canceling Sub And Returning Funds", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), + linkToken, + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusContracts.VRFV2PlusConsumer, + 1, + vrfv2plus_config.BillingType(*configCopy.GetVRFv2PlusConfig().General.SubscriptionBillingType), + ) + require.NoError(t, err) + subIDForCancelling := subIDsForCancelling[0] + + testWalletAddress, err := actions.GenerateWallet() + require.NoError(t, err) + + testWalletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(testcontext.Get(t), testWalletAddress) + require.NoError(t, err) + + testWalletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), testWalletAddress.String()) + require.NoError(t, err) + + subscriptionForCancelling, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "error getting subscription information") + + subBalanceLink := subscriptionForCancelling.Balance + subBalanceNative := subscriptionForCancelling.NativeBalance + l.Info(). + Str("Subscription Amount Native", subBalanceNative.String()). + Str("Subscription Amount Link", subBalanceLink.String()). + Str("Returning funds from SubID", subIDForCancelling.String()). + Str("Returning funds to", testWalletAddress.String()). + Msg("Canceling subscription and returning funds to subscription owner") + tx, err := vrfv2PlusContracts.CoordinatorV2Plus.CancelSubscription(subIDForCancelling, testWalletAddress) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2PlusContracts.CoordinatorV2Plus.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()). + Str("SubID", subscriptionCanceledEvent.SubId.String()). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription") + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event PLI amount is not equal to sub amount while canceling subscription") + + testWalletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(testcontext.Get(t), testWalletAddress) + require.NoError(t, err) + + testWalletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), testWalletAddress.String()) + require.NoError(t, err) + + //Verify that sub was deleted from Coordinator + _, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + subFundsReturnedNativeActual := new(big.Int).Sub(testWalletBalanceNativeAfterSubCancelling, testWalletBalanceNativeBeforeSubCancelling) + subFundsReturnedLinkActual := new(big.Int).Sub(testWalletBalanceLinkAfterSubCancelling, testWalletBalanceLinkBeforeSubCancelling) + + subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei) + deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual) + l.Info(). + Str("Sub Balance - Native", subBalanceNative.String()). + Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()). + Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Msg("Sub funds returned") + + //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed + //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled") + require.Equal(t, 1, testWalletBalanceNativeAfterSubCancelling.Cmp(testWalletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation") + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned PLI funds are not equal to sub balance that was cancelled") + + }) + t.Run("Owner Canceling Sub And Returning Funds While Having Pending Requests", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + testConfig := configCopy.VRFv2Plus.General + + //underfund subs in order rand fulfillments to fail + testConfig.SubscriptionFundingAmountNative = ptr.Ptr(float64(0.000000000000000001)) //1 Wei + testConfig.SubscriptionFundingAmountLink = ptr.Ptr(float64(0.000000000000000001)) //1 Juels + + subIDsForCancelling, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), + linkToken, + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusContracts.VRFV2PlusConsumer, + 1, + vrfv2plus_config.BillingType(*configCopy.GetVRFv2PlusConfig().General.SubscriptionBillingType), + ) + require.NoError(t, err) + + subIDForCancelling := subIDsForCancelling[0] + + subscriptionForCancelling, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscriptionForCancelling, subIDForCancelling, vrfv2PlusContracts.CoordinatorV2Plus) + + activeSubscriptionIdsBeforeSubCancellation, err := vrfv2PlusContracts.CoordinatorV2Plus.GetActiveSubscriptionIds(testcontext.Get(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err) + + require.True(t, it_utils.BigIntSliceContains(activeSubscriptionIdsBeforeSubCancellation, subIDForCancelling)) + + pendingRequestsExist, err := vrfv2PlusContracts.CoordinatorV2Plus.PendingRequestsExist(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err) + require.False(t, pendingRequestsExist, "Pending requests should not exist") + + randomWordsFulfilledEventTimeout := 5 * time.Second + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subIDForCancelling, + false, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + randomWordsFulfilledEventTimeout, + l, + ) + + require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance") + + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subIDForCancelling, + true, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + randomWordsFulfilledEventTimeout, + l, + ) + + require.Error(t, err, "error should occur for waiting for fulfilment due to low sub balance") + + pendingRequestsExist, err = vrfv2PlusContracts.CoordinatorV2Plus.PendingRequestsExist(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err) + require.True(t, pendingRequestsExist, "Pending requests should exist after unfulfilled rand requests due to low sub balance") + + walletBalanceNativeBeforeSubCancelling, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + walletBalanceLinkBeforeSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + subscriptionForCancelling, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.NoError(t, err, "error getting subscription information") + + subBalanceLink := subscriptionForCancelling.Balance + subBalanceNative := subscriptionForCancelling.NativeBalance + l.Info(). + Str("Subscription Amount Native", subBalanceNative.String()). + Str("Subscription Amount Link", subBalanceLink.String()). + Str("Returning funds from SubID", subIDForCancelling.String()). + Str("Returning funds to", defaultWalletAddress). + Msg("Canceling subscription and returning funds to subscription owner") + tx, err := vrfv2PlusContracts.CoordinatorV2Plus.OwnerCancelSubscription(subIDForCancelling) + require.NoError(t, err, "Error canceling subscription") + + subscriptionCanceledEvent, err := vrfv2PlusContracts.CoordinatorV2Plus.WaitForSubscriptionCanceledEvent(subIDForCancelling, time.Second*30) + require.NoError(t, err, "error waiting for subscription canceled event") + + cancellationTxReceipt, err := env.EVMClient.GetTxReceipt(tx.Hash()) + require.NoError(t, err, "error getting tx cancellation Tx Receipt") + + txGasUsed := new(big.Int).SetUint64(cancellationTxReceipt.GasUsed) + cancellationTxFeeWei := new(big.Int).Mul(txGasUsed, cancellationTxReceipt.EffectiveGasPrice) + + l.Info(). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Effective Gas Price", cancellationTxReceipt.EffectiveGasPrice.String()). + Uint64("Gas Used", cancellationTxReceipt.GasUsed). + Msg("Cancellation TX Receipt") + + l.Info(). + Str("Returned Subscription Amount Native", subscriptionCanceledEvent.AmountNative.String()). + Str("Returned Subscription Amount Link", subscriptionCanceledEvent.AmountLink.String()). + Str("SubID", subscriptionCanceledEvent.SubId.String()). + Str("Returned to", subscriptionCanceledEvent.To.String()). + Msg("Subscription Canceled Event") + + require.Equal(t, subBalanceNative, subscriptionCanceledEvent.AmountNative, "SubscriptionCanceled event native amount is not equal to sub amount while canceling subscription") + require.Equal(t, subBalanceLink, subscriptionCanceledEvent.AmountLink, "SubscriptionCanceled event PLI amount is not equal to sub amount while canceling subscription") + + walletBalanceNativeAfterSubCancelling, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + walletBalanceLinkAfterSubCancelling, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + //Verify that sub was deleted from Coordinator + _, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subIDForCancelling) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + subFundsReturnedNativeActual := new(big.Int).Sub(walletBalanceNativeAfterSubCancelling, walletBalanceNativeBeforeSubCancelling) + subFundsReturnedLinkActual := new(big.Int).Sub(walletBalanceLinkAfterSubCancelling, walletBalanceLinkBeforeSubCancelling) + + subFundsReturnedNativeExpected := new(big.Int).Sub(subBalanceNative, cancellationTxFeeWei) + deltaSpentOnCancellationTxFee := new(big.Int).Sub(subBalanceNative, subFundsReturnedNativeActual) + l.Info(). + Str("Sub Balance - Native", subBalanceNative.String()). + Str("Delta Spent On Cancellation Tx Fee - `NativeBalance - subFundsReturnedNativeActual`", deltaSpentOnCancellationTxFee.String()). + Str("Cancellation Tx Fee Wei", cancellationTxFeeWei.String()). + Str("Sub Funds Returned Actual - Native", subFundsReturnedNativeActual.String()). + Str("Sub Funds Returned Expected - `NativeBalance - cancellationTxFeeWei`", subFundsReturnedNativeExpected.String()). + Str("Sub Funds Returned Actual - Link", subFundsReturnedLinkActual.String()). + Str("Sub Balance - Link", subBalanceLink.String()). + Str("walletBalanceNativeBeforeSubCancelling", walletBalanceNativeBeforeSubCancelling.String()). + Str("walletBalanceNativeAfterSubCancelling", walletBalanceNativeAfterSubCancelling.String()). + Msg("Sub funds returned") + + //todo - need to use different wallet for each test to verify exact amount of Native/PLI returned + //todo - as defaultWallet is used in other tests in parallel which might affect the balance - TT-684 + //require.Equal(t, 1, walletBalanceNativeAfterSubCancelling.Cmp(walletBalanceNativeBeforeSubCancelling), "Native funds were not returned after sub cancellation") + + //todo - this fails on SIMULATED env as tx cost is calculated different as for testnets and it's not receipt.EffectiveGasPrice*receipt.GasUsed + //require.Equal(t, subFundsReturnedNativeExpected, subFundsReturnedNativeActual, "Returned funds are not equal to sub balance that was cancelled") + require.Equal(t, 0, subBalanceLink.Cmp(subFundsReturnedLinkActual), "Returned PLI funds are not equal to sub balance that was cancelled") + + activeSubscriptionIdsAfterSubCancellation, err := vrfv2PlusContracts.CoordinatorV2Plus.GetActiveSubscriptionIds(testcontext.Get(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err, "error getting active subscription ids") + + require.False( + t, + it_utils.BigIntSliceContains(activeSubscriptionIdsAfterSubCancellation, subIDForCancelling), + "Active subscription ids should not contain sub id after sub cancellation", + ) + }) + + t.Run("Owner Withdraw", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + subIDsForWithdraw, err := vrfv2plus.CreateFundSubsAndAddConsumers( + env, + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), + big.NewFloat(*configCopy.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), + linkToken, + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusContracts.VRFV2PlusConsumer, + 1, + vrfv2plus_config.BillingType(*configCopy.GetVRFv2PlusConfig().General.SubscriptionBillingType), + ) + require.NoError(t, err) + subIDForWithdraw := subIDsForWithdraw[0] + + fulfilledEventLink, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subIDForWithdraw, + false, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err) + + fulfilledEventNative, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subIDForWithdraw, + true, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err) + amountToWithdrawLink := fulfilledEventLink.Payment + + defaultWalletBalanceNativeBeforeWithdraw, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + defaultWalletBalanceLinkBeforeWithdraw, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + l.Info(). + Str("Returning to", defaultWalletAddress). + Str("Amount", amountToWithdrawLink.String()). + Msg("Invoking Oracle Withdraw for PLI") + + err = vrfv2PlusContracts.CoordinatorV2Plus.Withdraw( + common.HexToAddress(defaultWalletAddress), + ) + require.NoError(t, err, "error withdrawing PLI from coordinator to default wallet") + amountToWithdrawNative := fulfilledEventNative.Payment + + l.Info(). + Str("Returning to", defaultWalletAddress). + Str("Amount", amountToWithdrawNative.String()). + Msg("Invoking Oracle Withdraw for Native") + + err = vrfv2PlusContracts.CoordinatorV2Plus.WithdrawNative( + common.HexToAddress(defaultWalletAddress), + ) + require.NoError(t, err, "error withdrawing Native tokens from coordinator to default wallet") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + defaultWalletBalanceNativeAfterWithdraw, err := env.EVMClient.BalanceAt(testcontext.Get(t), common.HexToAddress(defaultWalletAddress)) + require.NoError(t, err) + + defaultWalletBalanceLinkAfterWithdraw, err := linkToken.BalanceOf(testcontext.Get(t), defaultWalletAddress) + require.NoError(t, err) + + //not possible to verify exact amount of Native/PLI returned as defaultWallet is used in other tests in parallel which might affect the balance + require.Equal(t, 1, defaultWalletBalanceNativeAfterWithdraw.Cmp(defaultWalletBalanceNativeBeforeWithdraw), "Native funds were not returned after oracle withdraw native") + require.Equal(t, 1, defaultWalletBalanceLinkAfterWithdraw.Cmp(defaultWalletBalanceLinkBeforeWithdraw), "PLI funds were not returned after oracle withdraw") + }) +} + +func TestVRFv2PlusMultipleSendingKeys(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2Plus) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2Plus.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + numberOfTxKeysToCreate := 2 + vrfv2PlusContracts, subIDs, vrfv2PlusData, _, err := vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + linkToken, + mockETHLinkFeed, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + + subID := subIDs[0] + + subscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.CoordinatorV2Plus) + + t.Run("Request Randomness with multiple sending keys", func(t *testing.T) { + configCopy := config.MustCopy().(tc.TestConfig) + var isNativeBilling = false + txKeys, _, err := env.ClCluster.Nodes[0].API.ReadTxKeys("evm") + require.NoError(t, err, "error reading tx keys") + + require.Equal(t, numberOfTxKeysToCreate+1, len(txKeys.Data)) + + var fulfillmentTxFromAddresses []string + for i := 0; i < numberOfTxKeysToCreate+1; i++ { + randomWordsFulfilledEvent, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subID, + isNativeBilling, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + configCopy.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + //todo - move TransactionByHash to EVMClient in CTF + fulfillmentTx, _, err := actions.GetTxByHash(testcontext.Get(t), env.EVMClient, randomWordsFulfilledEvent.Raw.TxHash) + require.NoError(t, err, "error getting tx from hash") + fulfillmentTxFromAddress, err := actions.GetTxFromAddress(fulfillmentTx) + require.NoError(t, err, "error getting tx from address") + fulfillmentTxFromAddresses = append(fulfillmentTxFromAddresses, fulfillmentTxFromAddress) + } + require.Equal(t, numberOfTxKeysToCreate+1, len(fulfillmentTxFromAddresses)) + var txKeyAddresses []string + for _, txKey := range txKeys.Data { + txKeyAddresses = append(txKeyAddresses, txKey.Attributes.Address) + } + less := func(a, b string) bool { return a < b } + equalIgnoreOrder := cmp.Diff(txKeyAddresses, fulfillmentTxFromAddresses, cmpopts.SortSlices(less)) == "" + require.True(t, equalIgnoreOrder) + }) +} + +func TestVRFv2PlusMigration(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2Plus) + if err != nil { + t.Fatal(err) + } + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + env.ParallelTransactions(true) + + mockETHLinkFeedAddress, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2Plus.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkAddress, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + vrfv2PlusContracts, subIDs, vrfv2PlusData, nodesMap, err := vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + linkAddress, + mockETHLinkFeedAddress, + 0, + 2, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + + subID := subIDs[0] + + subscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.CoordinatorV2Plus) + + activeSubIdsOldCoordinatorBeforeMigration, err := vrfv2PlusContracts.CoordinatorV2Plus.GetActiveSubscriptionIds(testcontext.Get(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err, "error occurred getting active sub ids") + require.Len(t, activeSubIdsOldCoordinatorBeforeMigration, 1, "Active Sub Ids length is not equal to 1") + require.Equal(t, subID, activeSubIdsOldCoordinatorBeforeMigration[0]) + + oldSubscriptionBeforeMigration, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + //Migration Process + newCoordinator, err := env.ContractDeployer.DeployVRFCoordinatorV2PlusUpgradedVersion(vrfv2PlusContracts.BHS.Address()) + require.NoError(t, err, "error deploying VRF CoordinatorV2PlusUpgradedVersion") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + _, err = vrfv2plus.VRFV2PlusUpgradedVersionRegisterProvingKey(vrfv2PlusData.VRFKey, newCoordinator) + require.NoError(t, err, fmt.Errorf("%s, err: %w", vrfcommon.ErrRegisteringProvingKey, err)) + + vrfv2PlusConfig := config.VRFv2Plus.General + err = newCoordinator.SetConfig( + *vrfv2PlusConfig.MinimumConfirmations, + *vrfv2PlusConfig.MaxGasLimitCoordinatorConfig, + *vrfv2PlusConfig.StalenessSeconds, + *vrfv2PlusConfig.GasAfterPaymentCalculation, + big.NewInt(*vrfv2PlusConfig.LinkNativeFeedResponse), + *vrfv2PlusConfig.FulfillmentFlatFeeNativePPM, + *vrfv2PlusConfig.FulfillmentFlatFeeLinkDiscountPPM, + *vrfv2PlusConfig.NativePremiumPercentage, + *vrfv2PlusConfig.LinkPremiumPercentage, + ) + require.NoError(t, err) + + err = newCoordinator.SetPLIAndPLINativeFeed(linkAddress.Address(), mockETHLinkFeedAddress.Address()) + require.NoError(t, err, vrfv2plus.ErrSetLinkNativeLinkFeed) + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + vrfJobSpecConfig := vrfcommon.VRFJobSpecConfig{ + ForwardingAllowed: false, + CoordinatorAddress: newCoordinator.Address(), + FromAddresses: nodesMap[vrfcommon.VRF].TXKeyAddressStrings, + EVMChainID: env.EVMClient.GetChainID().String(), + MinIncomingConfirmations: int(*vrfv2PlusConfig.MinimumConfirmations), + PublicKey: vrfv2PlusData.VRFKey.Data.ID, + EstimateGasMultiplier: 1, + BatchFulfillmentEnabled: false, + BatchFulfillmentGasMultiplier: 1.15, + PollPeriod: time.Second * 1, + RequestTimeout: time.Hour * 24, + } + + _, err = vrfv2plus.CreateVRFV2PlusJob( + env.ClCluster.NodeAPIs()[0], + vrfJobSpecConfig, + ) + require.NoError(t, err, vrfv2plus.ErrCreateVRFV2PlusJobs) + + err = vrfv2PlusContracts.CoordinatorV2Plus.RegisterMigratableCoordinator(newCoordinator.Address()) + require.NoError(t, err, "error registering migratable coordinator") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + oldCoordinatorLinkTotalBalanceBeforeMigration, oldCoordinatorEthTotalBalanceBeforeMigration, err := vrfv2plus.GetCoordinatorTotalBalance(vrfv2PlusContracts.CoordinatorV2Plus) + require.NoError(t, err) + + migratedCoordinatorLinkTotalBalanceBeforeMigration, migratedCoordinatorEthTotalBalanceBeforeMigration, err := vrfv2plus.GetUpgradedCoordinatorTotalBalance(newCoordinator) + require.NoError(t, err) + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + err = vrfv2PlusContracts.CoordinatorV2Plus.Migrate(subID, newCoordinator.Address()) + require.NoError(t, err, "error migrating sub id ", subID.String(), " from ", vrfv2PlusContracts.CoordinatorV2Plus.Address(), " to new Coordinator address ", newCoordinator.Address()) + migrationCompletedEvent, err := vrfv2PlusContracts.CoordinatorV2Plus.WaitForMigrationCompletedEvent(time.Minute * 1) + require.NoError(t, err, "error waiting for MigrationCompleted event") + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + vrfv2plus.LogMigrationCompletedEvent(l, migrationCompletedEvent, vrfv2PlusContracts) + + oldCoordinatorLinkTotalBalanceAfterMigration, oldCoordinatorEthTotalBalanceAfterMigration, err := vrfv2plus.GetCoordinatorTotalBalance(vrfv2PlusContracts.CoordinatorV2Plus) + require.NoError(t, err) + + migratedCoordinatorLinkTotalBalanceAfterMigration, migratedCoordinatorEthTotalBalanceAfterMigration, err := vrfv2plus.GetUpgradedCoordinatorTotalBalance(newCoordinator) + require.NoError(t, err) + + migratedSubscription, err := newCoordinator.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetailsAfterMigration(l, newCoordinator, subID, migratedSubscription) + + //Verify that Coordinators were updated in Consumers + for _, consumer := range vrfv2PlusContracts.VRFV2PlusConsumer { + coordinatorAddressInConsumerAfterMigration, err := consumer.GetCoordinator(testcontext.Get(t)) + require.NoError(t, err, "error getting Coordinator from Consumer contract") + require.Equal(t, newCoordinator.Address(), coordinatorAddressInConsumerAfterMigration.String()) + l.Debug(). + Str("Consumer", consumer.Address()). + Str("Coordinator", coordinatorAddressInConsumerAfterMigration.String()). + Msg("Coordinator Address in Consumer After Migration") + } + + //Verify old and migrated subs + require.Equal(t, oldSubscriptionBeforeMigration.NativeBalance, migratedSubscription.NativeBalance) + require.Equal(t, oldSubscriptionBeforeMigration.Balance, migratedSubscription.Balance) + require.Equal(t, oldSubscriptionBeforeMigration.Owner, migratedSubscription.Owner) + require.Equal(t, oldSubscriptionBeforeMigration.Consumers, migratedSubscription.Consumers) + + //Verify that old sub was deleted from old Coordinator + _, err = vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.Error(t, err, "error not occurred when trying to get deleted subscription from old Coordinator after sub migration") + + _, err = vrfv2PlusContracts.CoordinatorV2Plus.GetActiveSubscriptionIds(testcontext.Get(t), big.NewInt(0), big.NewInt(0)) + require.Error(t, err, "error not occurred getting active sub ids. Should occur since it should revert when sub id array is empty") + + activeSubIdsMigratedCoordinator, err := newCoordinator.GetActiveSubscriptionIds(testcontext.Get(t), big.NewInt(0), big.NewInt(0)) + require.NoError(t, err, "error occurred getting active sub ids") + require.Len(t, activeSubIdsMigratedCoordinator, 1, "Active Sub Ids length is not equal to 1 for Migrated Coordinator after migration") + require.Equal(t, subID, activeSubIdsMigratedCoordinator[0]) + + //Verify that total balances changed for Link and Eth for new and old coordinator + expectedLinkTotalBalanceForMigratedCoordinator := new(big.Int).Add(oldSubscriptionBeforeMigration.Balance, migratedCoordinatorLinkTotalBalanceBeforeMigration) + expectedEthTotalBalanceForMigratedCoordinator := new(big.Int).Add(oldSubscriptionBeforeMigration.NativeBalance, migratedCoordinatorEthTotalBalanceBeforeMigration) + + expectedLinkTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorLinkTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.Balance) + expectedEthTotalBalanceForOldCoordinator := new(big.Int).Sub(oldCoordinatorEthTotalBalanceBeforeMigration, oldSubscriptionBeforeMigration.NativeBalance) + require.Equal(t, 0, expectedLinkTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorLinkTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedEthTotalBalanceForMigratedCoordinator.Cmp(migratedCoordinatorEthTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedLinkTotalBalanceForOldCoordinator.Cmp(oldCoordinatorLinkTotalBalanceAfterMigration)) + require.Equal(t, 0, expectedEthTotalBalanceForOldCoordinator.Cmp(oldCoordinatorEthTotalBalanceAfterMigration)) + + //Verify rand requests fulfills with Link Token billing + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillmentUpgraded( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + newCoordinator, + vrfv2PlusData, + subID, + false, + *config.VRFv2Plus.General.MinimumConfirmations, + *config.VRFv2Plus.General.CallbackGasLimit, + *config.VRFv2Plus.General.NumberOfWords, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + //Verify rand requests fulfills with Native Token billing + _, err = vrfv2plus.RequestRandomnessAndWaitForFulfillmentUpgraded( + vrfv2PlusContracts.VRFV2PlusConsumer[1], + newCoordinator, + vrfv2PlusData, + subID, + true, + *config.VRFv2Plus.General.MinimumConfirmations, + *config.VRFv2Plus.General.CallbackGasLimit, + *config.VRFv2Plus.General.NumberOfWords, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") +} + +func TestVRFV2PlusWithBHS(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2Plus) + require.NoError(t, err, "Error getting config") + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(2). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := env.ContractDeployer.DeployVRFMockETHPLIFeed(big.NewInt(*config.VRFv2Plus.General.LinkNativeFeedResponse)) + + require.NoError(t, err) + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err) + + //Underfund Subscription + config.VRFv2Plus.General.SubscriptionFundingAmountLink = ptr.Ptr(float64(0.000000000000000001)) // 1 Juel + + //decrease default span for checking blockhashes for unfulfilled requests + config.VRFv2Plus.General.BHSJobWaitBlocks = ptr.Ptr(2) + config.VRFv2Plus.General.BHSJobLookBackBlocks = ptr.Ptr(20) + + numberOfTxKeysToCreate := 0 + vrfContracts, subIDs, vrfKeyData, nodesMap, err := vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF, vrfcommon.BHS}, + &config, + linkToken, + mockETHLinkFeed, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + + subID := subIDs[0] + + subscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscription, subID, vrfContracts.CoordinatorV2Plus) + var isNativeBilling = false + t.Run("BHS Job with complete E2E - wait 256 blocks to see if Rand Request is fulfilled", func(t *testing.T) { + t.Skip("Skipped since should be run on-demand on live testnet due to long execution time") + //BHS node should fill in blockhashes into BHS contract depending on the waitBlocks and lookBackBlocks settings + configCopy := config.MustCopy().(tc.TestConfig) + _, err := vrfContracts.VRFV2PlusConsumer[0].RequestRandomness( + vrfKeyData.KeyHash, + subID, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + isNativeBilling, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + ) + require.NoError(t, err, "error requesting randomness") + + randomWordsRequestedEvent, err := vrfContracts.CoordinatorV2Plus.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfKeyData.KeyHash}, + []*big.Int{subID}, + []common.Address{common.HexToAddress(vrfContracts.VRFV2PlusConsumer[0].Address())}, + time.Minute*1, + ) + require.NoError(t, err, "error waiting for randomness requested event") + vrfv2plus.LogRandomnessRequestedEvent(l, vrfContracts.CoordinatorV2Plus, randomWordsRequestedEvent, isNativeBilling) + randRequestBlockNumber := randomWordsRequestedEvent.Raw.BlockNumber + var wg sync.WaitGroup + wg.Add(1) + //Wait at least 256 blocks + _, err = actions.WaitForBlockNumberToBe(randRequestBlockNumber+uint64(257), env.EVMClient, &wg, time.Second*260, t) + wg.Wait() + require.NoError(t, err) + err = vrfv2plus.FundSubscriptions( + env, + big.NewFloat(*configCopy.VRFv2Plus.General.SubscriptionFundingAmountNative), + big.NewFloat(*configCopy.VRFv2Plus.General.SubscriptionFundingAmountLink), + linkToken, + vrfContracts.CoordinatorV2Plus, + subIDs, + vrfv2plus_config.BillingType_Link, + ) + require.NoError(t, err, "error funding subscriptions") + randomWordsFulfilledEvent, err := vrfContracts.CoordinatorV2Plus.WaitForRandomWordsFulfilledEvent( + []*big.Int{subID}, + []*big.Int{randomWordsRequestedEvent.RequestId}, + time.Second*30, + ) + require.NoError(t, err, "error waiting for randomness fulfilled event") + vrfv2plus.LogRandomWordsFulfilledEvent(l, vrfContracts.CoordinatorV2Plus, randomWordsFulfilledEvent, isNativeBilling) + status, err := vrfContracts.VRFV2PlusConsumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") + }) + + t.Run("BHS Job should fill in blockhashes into BHS contract for unfulfilled requests", func(t *testing.T) { + //BHS node should fill in blockhashes into BHS contract depending on the waitBlocks and lookBackBlocks settings + configCopy := config.MustCopy().(tc.TestConfig) + _, err := vrfContracts.VRFV2PlusConsumer[0].RequestRandomness( + vrfKeyData.KeyHash, + subID, + *configCopy.VRFv2Plus.General.MinimumConfirmations, + *configCopy.VRFv2Plus.General.CallbackGasLimit, + isNativeBilling, + *configCopy.VRFv2Plus.General.NumberOfWords, + *configCopy.VRFv2Plus.General.RandomnessRequestCountPerRequest, + ) + require.NoError(t, err, "error requesting randomness") + + randomWordsRequestedEvent, err := vrfContracts.CoordinatorV2Plus.WaitForRandomWordsRequestedEvent( + [][32]byte{vrfKeyData.KeyHash}, + []*big.Int{subID}, + []common.Address{common.HexToAddress(vrfContracts.VRFV2PlusConsumer[0].Address())}, + time.Minute*1, + ) + require.NoError(t, err, "error waiting for randomness requested event") + vrfv2plus.LogRandomnessRequestedEvent(l, vrfContracts.CoordinatorV2Plus, randomWordsRequestedEvent, isNativeBilling) + randRequestBlockNumber := randomWordsRequestedEvent.Raw.BlockNumber + _, err = vrfContracts.BHS.GetBlockHash(testcontext.Get(t), big.NewInt(int64(randRequestBlockNumber))) + require.Error(t, err, "error not occurred when getting blockhash for a blocknumber which was not stored in BHS contract") + + var wg sync.WaitGroup + wg.Add(1) + _, err = actions.WaitForBlockNumberToBe(randRequestBlockNumber+uint64(*config.VRFv2Plus.General.BHSJobWaitBlocks+10), env.EVMClient, &wg, time.Minute*1, t) + wg.Wait() + require.NoError(t, err, "error waiting for blocknumber to be") + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + metrics, err := vrfContracts.VRFV2PlusConsumer[0].GetLoadTestMetrics(testcontext.Get(t)) + require.Equal(t, 0, metrics.RequestCount.Cmp(big.NewInt(1))) + require.Equal(t, 0, metrics.FulfilmentCount.Cmp(big.NewInt(0))) + + var clNodeTxs *client.TransactionsData + var txHash string + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + clNodeTxs, _, err = nodesMap[vrfcommon.BHS].CLNode.API.ReadTransactions() + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "error getting CL Node transactions") + l.Debug().Int("Number of TXs", len(clNodeTxs.Data)).Msg("BHS Node txs") + g.Expect(len(clNodeTxs.Data)).Should(gomega.BeNumerically("==", 1), "Expected 1 tx posted by BHS Node, but found %d", len(clNodeTxs.Data)) + txHash = clNodeTxs.Data[0].Attributes.Hash + }, "2m", "1s").Should(gomega.Succeed()) + + require.Equal(t, strings.ToLower(vrfContracts.BHS.Address()), strings.ToLower(clNodeTxs.Data[0].Attributes.To)) + + bhsStoreTx, _, err := actions.GetTxByHash(testcontext.Get(t), env.EVMClient, common.HexToHash(txHash)) + require.NoError(t, err, "error getting tx from hash") + + bhsStoreTxInputData, err := actions.DecodeTxInputData(blockhash_store.BlockhashStoreABI, bhsStoreTx.Data()) + l.Info(). + Str("Block Number", bhsStoreTxInputData["n"].(*big.Int).String()). + Msg("BHS Node's Store Blockhash for Blocknumber Method TX") + require.Equal(t, randRequestBlockNumber, bhsStoreTxInputData["n"].(*big.Int).Uint64()) + + err = env.EVMClient.WaitForEvents() + require.NoError(t, err, vrfcommon.ErrWaitTXsComplete) + + var randRequestBlockHash [32]byte + gom.Eventually(func(g gomega.Gomega) { + randRequestBlockHash, err = vrfContracts.BHS.GetBlockHash(testcontext.Get(t), big.NewInt(int64(randRequestBlockNumber))) + g.Expect(err).ShouldNot(gomega.HaveOccurred(), "error getting blockhash for a blocknumber which was stored in BHS contract") + }, "2m", "1s").Should(gomega.Succeed()) + l.Info(). + Str("Randomness Request's Blockhash", randomWordsRequestedEvent.Raw.BlockHash.String()). + Str("Block Hash stored by BHS contract", fmt.Sprintf("0x%x", randRequestBlockHash)). + Msg("BHS Contract's stored Blockhash for Randomness Request") + require.Equal(t, 0, randomWordsRequestedEvent.Raw.BlockHash.Cmp(randRequestBlockHash)) + }) +} + +func TestVRFv2PlusPendingBlockSimulationAndZeroConfirmationDelays(t *testing.T) { + t.Parallel() + l := logging.GetTestLogger(t) + + config, err := tc.GetConfig("Smoke", tc.VRFv2Plus) + if err != nil { + t.Fatal(err) + } + + // override config with minConf = 0 and use pending block for simulation + config.VRFv2Plus.General.MinimumConfirmations = ptr.Ptr[uint16](0) + config.VRFv2Plus.General.VRFJobSimulationBlock = ptr.Ptr[string]("pending") + + network, err := actions.EthereumNetworkConfigFromConfig(l, &config) + require.NoError(t, err, "Error building ethereum network config") + + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&config). + WithPrivateEthereumNetwork(network). + WithCLNodes(1). + WithFunding(big.NewFloat(*config.Common.PluginNodeFunding)). + WithStandardCleanup(). + Build() + require.NoError(t, err, "error creating test env") + + env.ParallelTransactions(true) + + mockETHLinkFeed, err := actions.DeployMockETHLinkFeed(env.ContractDeployer, big.NewInt(*config.VRFv2Plus.General.LinkNativeFeedResponse)) + require.NoError(t, err, "error deploying mock ETH/PLI feed") + + linkToken, err := actions.DeployPLIToken(env.ContractDeployer) + require.NoError(t, err, "error deploying PLI contract") + + numberOfTxKeysToCreate := 2 + vrfv2PlusContracts, subIDs, vrfv2PlusData, nodesMap, err := vrfv2plus.SetupVRFV2_5Environment( + env, + []vrfcommon.VRFNodeType{vrfcommon.VRF}, + &config, + linkToken, + mockETHLinkFeed, + numberOfTxKeysToCreate, + 1, + 1, + l, + ) + require.NoError(t, err, "error setting up VRF v2_5 env") + + subID := subIDs[0] + + subscription, err := vrfv2PlusContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), subID) + require.NoError(t, err, "error getting subscription information") + + vrfv2plus.LogSubDetails(l, subscription, subID, vrfv2PlusContracts.CoordinatorV2Plus) + + var isNativeBilling = false + + jobRunsBeforeTest, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + + l.Info().Uint16("minimumConfirmationDelay", *config.VRFv2Plus.General.MinimumConfirmations).Msg("Minimum Confirmation Delay") + + // test and assert + randomWordsFulfilledEvent, err := vrfv2plus.RequestRandomnessAndWaitForFulfillment( + vrfv2PlusContracts.VRFV2PlusConsumer[0], + vrfv2PlusContracts.CoordinatorV2Plus, + vrfv2PlusData, + subID, + isNativeBilling, + *config.VRFv2Plus.General.MinimumConfirmations, + *config.VRFv2Plus.General.CallbackGasLimit, + *config.VRFv2Plus.General.NumberOfWords, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequest, + *config.VRFv2Plus.General.RandomnessRequestCountPerRequestDeviation, + config.VRFv2Plus.General.RandomWordsFulfilledEventTimeout.Duration, + l, + ) + require.NoError(t, err, "error requesting randomness and waiting for fulfilment") + + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(nodesMap[vrfcommon.VRF].Job.Data.ID) + require.NoError(t, err, "error reading job runs") + require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) + + status, err := vrfv2PlusContracts.VRFV2PlusConsumer[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + require.NoError(t, err, "error getting rand request status") + require.True(t, status.Fulfilled) + l.Debug().Bool("Fulfilment Status", status.Fulfilled).Msg("Random Words Request Fulfilment Status") +} diff --git a/integration-tests/soak/forwarder_ocr_test.go b/integration-tests/soak/forwarder_ocr_test.go new file mode 100644 index 00000000..fe8b4407 --- /dev/null +++ b/integration-tests/soak/forwarder_ocr_test.go @@ -0,0 +1,41 @@ +package soak + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/testsetups" +) + +func TestForwarderOCRSoak(t *testing.T) { + l := logging.GetTestLogger(t) + // Use this variable to pass in any custom EVM specific TOML values to your Plugin nodes + customNetworkTOML := `[EVM.Transactions] +ForwardersEnabled = true` + // Uncomment below for debugging TOML issues on the node + // fmt.Println("Using Plugin TOML\n---------------------") + // fmt.Println(networks.AddNetworkDetailedConfig(config.BaseOCRP2PV1Config, customNetworkTOML, network)) + // fmt.Println("---------------------") + + config, err := tc.GetConfig("Soak", tc.OCR) + require.NoError(t, err, "Error getting config") + + ocrSoakTest, err := testsetups.NewOCRSoakTest(t, &config, true) + require.NoError(t, err, "Error creating soak test") + ocrSoakTest.DeployEnvironment(customNetworkTOML, &config) + if ocrSoakTest.Environment().WillUseRemoteRunner() { + return + } + t.Cleanup(func() { + if err := actions.TeardownRemoteSuite(ocrSoakTest.TearDownVals(t)); err != nil { + l.Error().Err(err).Msg("Error tearing down environment") + } + }) + ocrSoakTest.Setup(&config) + ocrSoakTest.Run() +} diff --git a/integration-tests/soak/ocr_test.go b/integration-tests/soak/ocr_test.go new file mode 100644 index 00000000..d9fc923b --- /dev/null +++ b/integration-tests/soak/ocr_test.go @@ -0,0 +1,49 @@ +package soak + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/logging" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + "github.com/goplugin/pluginv3.0/integration-tests/testsetups" +) + +func TestOCRSoak(t *testing.T) { + l := logging.GetTestLogger(t) + // Use this variable to pass in any custom EVM specific TOML values to your Plugin nodes + customNetworkTOML := `` + // Uncomment below for debugging TOML issues on the node + // network := networks.MustGetSelectedNetworksFromEnv()[0] + // fmt.Println("Using Plugin TOML\n---------------------") + // fmt.Println(networks.AddNetworkDetailedConfig(config.BaseOCR1Config, customNetworkTOML, network)) + // fmt.Println("---------------------") + + config, err := tc.GetConfig("Soak", tc.OCR) + require.NoError(t, err, "Error getting config") + + ocrSoakTest, err := testsetups.NewOCRSoakTest(t, &config, false) + require.NoError(t, err, "Error creating soak test") + if !ocrSoakTest.Interrupted() { + ocrSoakTest.DeployEnvironment(customNetworkTOML, &config) + } + if ocrSoakTest.Environment().WillUseRemoteRunner() { + return + } + t.Cleanup(func() { + if err := actions.TeardownRemoteSuite(ocrSoakTest.TearDownVals(t)); err != nil { + l.Error().Err(err).Msg("Error tearing down environment") + } + }) + if ocrSoakTest.Interrupted() { + err = ocrSoakTest.LoadState() + require.NoError(t, err, "Error loading state") + ocrSoakTest.Resume() + } else { + ocrSoakTest.Setup(&config) + ocrSoakTest.Run() + } +} diff --git a/integration-tests/test.Dockerfile b/integration-tests/test.Dockerfile new file mode 100644 index 00000000..fc6eefd6 --- /dev/null +++ b/integration-tests/test.Dockerfile @@ -0,0 +1,10 @@ +ARG BASE_IMAGE +ARG IMAGE_VERSION=latest +FROM ${BASE_IMAGE}:${IMAGE_VERSION} + +ARG SUITES=chaos migration performance reorg smoke soak benchmark + +COPY . testdir/ +WORKDIR /go/testdir +RUN /go/testdir/integration-tests/scripts/buildTests "${SUITES}" +ENTRYPOINT ["/go/testdir/integration-tests/scripts/entrypoint"] diff --git a/integration-tests/test.Dockerfile.dockerignore b/integration-tests/test.Dockerfile.dockerignore new file mode 100644 index 00000000..cbd0d736 --- /dev/null +++ b/integration-tests/test.Dockerfile.dockerignore @@ -0,0 +1,15 @@ +.DS_Store +.github +.envrc +.vscode/ +*.log +node_modules/ +**/node_modules/ +vendor/ +tmp/ + +contracts/node_modules +examples/ +tools/ +docs/ +operator_ui/ diff --git a/integration-tests/testconfig/automation/automation.toml b/integration-tests/testconfig/automation/automation.toml new file mode 100644 index 00000000..42a834cc --- /dev/null +++ b/integration-tests/testconfig/automation/automation.toml @@ -0,0 +1,49 @@ +# product defaults +[Common] +plugin_node_funding = 0.5 + +# reorg test specific overrides +[Reorg.Automation] +[Reorg.Automation.General] +number_of_nodes=6 +duration=100 +block_time=1 +spec_type="minimum" +plugin_node_log_level="info" +use_prometheus=false + +# load test specific overrides +[Load.Common] +plugin_node_funding = 100 + +[Load.Automation] +[Load.Automation.General] +number_of_nodes=6 +duration=900 +block_time=1 +spec_type="minimum" +plugin_node_log_level="info" +use_prometheus=false + +[[Load.Automation.Load]] +number_of_upkeeps=5 +number_of_events = 1 +number_of_spam_matching_events = 1 +number_of_spam_non_matching_events = 0 +check_burn_amount = 0 +perform_burn_amount = 0 +upkeep_gas_limit = 1000000 +shared_trigger = false + +[[Load.Automation.Load]] +number_of_upkeeps=5 +number_of_events = 1 +number_of_spam_matching_events = 0 +number_of_spam_non_matching_events = 1 +check_burn_amount = 0 +perform_burn_amount = 0 +upkeep_gas_limit = 1000000 +shared_trigger = true + +[Load.Pyroscope] +enabled=false \ No newline at end of file diff --git a/integration-tests/testconfig/automation/config.go b/integration-tests/testconfig/automation/config.go new file mode 100644 index 00000000..2c9d277d --- /dev/null +++ b/integration-tests/testconfig/automation/config.go @@ -0,0 +1,91 @@ +package automation + +import ( + "errors" + "math/big" +) + +type Config struct { + General *General `toml:"General"` + Load []Load `toml:"Load"` +} + +func (c *Config) Validate() error { + if c.General != nil { + if err := c.General.Validate(); err != nil { + return err + } + } + if len(c.Load) > 0 { + for _, load := range c.Load { + if err := load.Validate(); err != nil { + return err + } + } + } + return nil +} + +// General is a common configuration for all automation performance tests +type General struct { + NumberOfNodes *int `toml:"number_of_nodes"` + Duration *int `toml:"duration"` + BlockTime *int `toml:"block_time"` + SpecType *string `toml:"spec_type"` + PluginNodeLogLevel *string `toml:"plugin_node_log_level"` + UsePrometheus *bool `toml:"use_prometheus"` +} + +func (c *General) Validate() error { + if c.NumberOfNodes == nil || *c.NumberOfNodes < 1 { + return errors.New("number_of_nodes must be set to a positive integer") + } + if c.Duration == nil || *c.Duration < 1 { + return errors.New("duration must be set to a positive integer") + } + if c.BlockTime == nil || *c.BlockTime < 1 { + return errors.New("block_time must be set to a positive integer") + } + if c.SpecType == nil { + return errors.New("spec_type must be set") + } + if c.PluginNodeLogLevel == nil { + return errors.New("plugin_node_log_level must be set") + } + + return nil +} + +type Load struct { + NumberOfUpkeeps *int `toml:"number_of_upkeeps"` + NumberOfEvents *int `toml:"number_of_events"` + NumberOfSpamMatchingEvents *int `toml:"number_of_spam_matching_events"` + NumberOfSpamNonMatchingEvents *int `toml:"number_of_spam_non_matching_events"` + CheckBurnAmount *big.Int `toml:"check_burn_amount"` + PerformBurnAmount *big.Int `toml:"perform_burn_amount"` + SharedTrigger *bool `toml:"shared_trigger"` + UpkeepGasLimit *uint32 `toml:"upkeep_gas_limit"` +} + +func (c *Load) Validate() error { + if c.NumberOfUpkeeps == nil || *c.NumberOfUpkeeps < 1 { + return errors.New("number_of_upkeeps must be set to a positive integer") + } + if c.NumberOfEvents == nil || *c.NumberOfEvents < 0 { + return errors.New("number_of_events must be set to a non-negative integer") + } + if c.NumberOfSpamMatchingEvents == nil || *c.NumberOfSpamMatchingEvents < 0 { + return errors.New("number_of_spam_matching_events must be set to a non-negative integer") + } + if c.NumberOfSpamNonMatchingEvents == nil || *c.NumberOfSpamNonMatchingEvents < 0 { + return errors.New("number_of_spam_non_matching_events must be set to a non-negative integer") + } + if c.CheckBurnAmount == nil || c.CheckBurnAmount.Cmp(big.NewInt(0)) < 0 { + return errors.New("check_burn_amount must be set to a non-negative integer") + } + if c.PerformBurnAmount == nil || c.PerformBurnAmount.Cmp(big.NewInt(0)) < 0 { + return errors.New("perform_burn_amount must be set to a non-negative integer") + } + + return nil +} diff --git a/integration-tests/testconfig/automation/example.toml b/integration-tests/testconfig/automation/example.toml new file mode 100644 index 00000000..1028f622 --- /dev/null +++ b/integration-tests/testconfig/automation/example.toml @@ -0,0 +1,90 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Product part +[Automation] +[Automation.General] +number_of_nodes=6 +duration=100 +block_time=1 +number_of_events=1 +spec_type="minimum" +plugin_node_log_level="info" +use_prometheus=false + +# upgrade test specific override +[TestAutomationNodeUpgrade.PluginUpgradeImage] +image="public.ecr.aws/plugin/plugin" +version="2.8.0" \ No newline at end of file diff --git a/integration-tests/testconfig/configs_embed.go b/integration-tests/testconfig/configs_embed.go new file mode 100644 index 00000000..67e954ff --- /dev/null +++ b/integration-tests/testconfig/configs_embed.go @@ -0,0 +1,21 @@ +//go:build embed +// +build embed + +package testconfig + +import "embed" + +//go:embed default.toml +//go:embed automation/automation.toml +//go:embed functions/functions.toml +//go:embed keeper/keeper.toml +//go:embed log_poller/log_poller.toml +//go:embed node/node.toml +//go:embed ocr/ocr.toml +//go:embed vrfv2/vrfv2.toml +//go:embed vrfv2plus/vrfv2plus.toml +var embeddedConfigsFs embed.FS + +func init() { + areConfigsEmbedded = true +} diff --git a/integration-tests/testconfig/configs_noembed.go b/integration-tests/testconfig/configs_noembed.go new file mode 100644 index 00000000..95572c4a --- /dev/null +++ b/integration-tests/testconfig/configs_noembed.go @@ -0,0 +1,12 @@ +//go:build !embed +// +build !embed + +package testconfig + +import "embed" + +var embeddedConfigsFs embed.FS + +func init() { + areConfigsEmbedded = false +} diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml new file mode 100644 index 00000000..a65c23d7 --- /dev/null +++ b/integration-tests/testconfig/default.toml @@ -0,0 +1,22 @@ +[Logging] +test_log_collect=false + +[Logging.LogStream] +log_targets=["file"] +log_producer_timeout="10s" +log_producer_retry_limit=10 + +[Network] +selected_networks=["simulated"] + +[PrivateEthereumNetwork] +consensus_type="pow" +execution_layer="geth" + +[PrivateEthereumNetwork.EthereumChainConfig] +seconds_per_slot=3 +slots_per_epoch=2 +genesis_delay=15 +validator_count=4 +chain_id=1337 +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] \ No newline at end of file diff --git a/integration-tests/testconfig/functions/config.go b/integration-tests/testconfig/functions/config.go new file mode 100644 index 00000000..ecd34cdf --- /dev/null +++ b/integration-tests/testconfig/functions/config.go @@ -0,0 +1,118 @@ +package functions + +import ( + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/utils/net" +) + +const ( + ErrReadPerfConfig = "failed to read TOML config for performance tests" + ErrUnmarshalPerfConfig = "failed to unmarshal TOML config for performance tests" +) + +type Config struct { + Performance *Performance `toml:"Performance"` + Common *Common `toml:"Common"` +} + +type Common struct { + NodeFunds *big.Float `toml:"node_funds"` + SubFunds *big.Int `toml:"sub_funds"` + PLITokenAddr *string `toml:"link_token_addr"` + Coordinator *string `toml:"coordinator_addr"` + Router *string `toml:"router_addr"` + LoadTestClient *string `toml:"client_addr"` + SubscriptionID *uint64 `toml:"subscription_id"` + DONID *string `toml:"don_id"` + GatewayURL *string `toml:"gateway_url"` + Receiver *string `toml:"receiver"` + FunctionsCallPayloadHTTP *string `toml:"functions_call_payload_http"` + FunctionsCallPayloadWithSecrets *string `toml:"functions_call_payload_with_secrets"` + FunctionsCallPayloadReal *string `toml:"functions_call_payload_real"` + SecretsSlotID *uint8 `toml:"secrets_slot_id"` + SecretsVersionID *uint64 `toml:"secrets_version_id"` + // Secrets these are for CI secrets + Secrets *string `toml:"secrets"` +} + +func (c *Common) Validate() error { + if c.SubFunds == nil { + return errors.New("sub_funds must be set") + } + if c.PLITokenAddr == nil || *c.PLITokenAddr == "" { + return errors.New("link_token_addr must be set") + } + if !common.IsHexAddress(*c.PLITokenAddr) { + return errors.New("link_token_addr must be a valid address") + } + if c.Coordinator == nil || *c.Coordinator == "" { + return errors.New("coordinator must be set") + } + if !common.IsHexAddress(*c.Coordinator) { + return errors.New("coordinator must be a valid address") + } + if c.Router == nil || *c.Router == "" { + return errors.New("router must be set") + } + if !common.IsHexAddress(*c.Router) { + return errors.New("router must be a valid address") + } + if c.DONID == nil || *c.DONID == "" { + return errors.New("don_id must be set") + } + if c.GatewayURL == nil || *c.GatewayURL == "" { + return errors.New("gateway_url must be set") + } + if !net.IsValidURL(*c.GatewayURL) { + return errors.New("gateway_url must be a valid URL") + } + if c.Receiver == nil || *c.Receiver == "" { + return errors.New("receiver must be set") + } + if !common.IsHexAddress(*c.Receiver) { + return errors.New("receiver must be a valid address") + } + return nil +} + +type Performance struct { + RPS *int64 `toml:"rps"` + RequestsPerCall *uint32 `toml:"requests_per_call"` + Duration *blockchain.StrDuration `toml:"duration"` +} + +func (c *Performance) Validate() error { + if c.RPS == nil || *c.RPS < 1 { + return errors.New("rps must be greater than 0") + } + if c.RequestsPerCall != nil && *c.RequestsPerCall < 1 { + return errors.New("requests_per_call must be greater than 0") + } + if c.Duration == nil || c.Duration.Duration < 1 { + return errors.New("duration must be greater than 0") + } + return nil +} + +func (c *Config) Validate() error { + if c == nil { + return nil + } + if c.Common != nil { + if err := c.Common.Validate(); err != nil { + return err + } + } + if c.Performance != nil { + if err := c.Performance.Validate(); err != nil { + return err + } + } + + return nil +} diff --git a/integration-tests/testconfig/functions/example.toml b/integration-tests/testconfig/functions/example.toml new file mode 100644 index 00000000..abe9464d --- /dev/null +++ b/integration-tests/testconfig/functions/example.toml @@ -0,0 +1,113 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use simulated network +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Product part +[Functions] +[Functions.Common] +# Polygon Mumbai only for now +receiver = "0x3098B6665589959711A48a6bAe5B7F2908f6a3bE" +don_id = "fun-staging-mumbai-1" +gateway_url = "https://gateway-stg-one.main.stage.cldev.sh" +link_token_addr = "0x326C977E6efc84E512bB9C30f76E30c160eD06FB" +coordinator_addr = "0x6D6a83BB356b7242E88C1A2b290102fde26590D0" +router_addr = "0x2673266D3Cd08b53494B5a92B66DEec7F1408E7A" + +# comment "client_addr" and "subscription_id" and test will create a new pair +# get it from logs and save +client_addr = "0x89D4b58D859a536D0B888ecD5093eF5FF9e4F977" +subscription_id = 47 +sub_funds = 10 + +functions_call_payload_with_secrets = "return Functions.encodeString(JSON.stringify(secrets))" +functions_call_payload_http = """ +const response = await Functions.makeHttpRequest({ url: 'http://dummyjson.com/products/1' }); +return Functions.encodeUint256(response.data.id); +""" +functions_call_payload_real = """ +const arg1 = args[0]; +const arg2 = args[1]; +const arg3 = args[2]; +const arg4 = args[3]; + +const response = await Functions.makeHttpRequest({ url: 'http://dummyjson.com/products/${arg1}' }); +return Functions.encodeString(JSON.stringify(secrets)); +""" +secrets_slot_id = 0 +secrets_version_id = 1693945705 + +[Functions.Performance] +rps = 95 +requests_per_call = 20 +duration = "10m" \ No newline at end of file diff --git a/integration-tests/testconfig/functions/functions.toml b/integration-tests/testconfig/functions/functions.toml new file mode 100644 index 00000000..a4bcb643 --- /dev/null +++ b/integration-tests/testconfig/functions/functions.toml @@ -0,0 +1,91 @@ +# product defaults +[Functions] +[Functions.Common] +# Polygon Mumbai only for now +receiver = "0x3098B6665589959711A48a6bAe5B7F2908f6a3bE" +don_id = "fun-staging-mumbai-1" +gateway_url = "https://gateway-stg-one.main.stage.cldev.sh" +link_token_addr = "0x326C977E6efc84E512bB9C30f76E30c160eD06FB" +coordinator_addr = "0x6D6a83BB356b7242E88C1A2b290102fde26590D0" +router_addr = "0x2673266D3Cd08b53494B5a92B66DEec7F1408E7A" + +# comment "client_addr" and "subscription_id" and test will create a new pair +# get it from logs and save +client_addr = "0x89D4b58D859a536D0B888ecD5093eF5FF9e4F977" +subscription_id = 47 +sub_funds = 10 + +functions_call_payload_with_secrets = "return Functions.encodeString(JSON.stringify(secrets))" +functions_call_payload_http = """ +const response = await Functions.makeHttpRequest({ url: 'http://dummyjson.com/products/1' }); +return Functions.encodeUint256(response.data.id); +""" +functions_call_payload_real = """ +const arg1 = args[0]; +const arg2 = args[1]; +const arg3 = args[2]; +const arg4 = args[3]; + +const response = await Functions.makeHttpRequest({ url: 'http://dummyjson.com/products/${arg1}' }); +return Functions.encodeString(JSON.stringify(secrets)); +""" +secrets_slot_id = 0 +secrets_version_id = 1693945705 + +# uncomment to upload new secrets to s4 and use it in your run +# TODO: not working now +#secrets = "{\"secrets\": \"secretValue\"}" + +# gateway-list specific test configuration +[GatewayList.Functions] +[GatewayList.Functions.Performance] +rps = 95 +duration = "10m" + +# gateway-set specific test configuration +[GatewaySet.Functions] +[GatewaySet.Functions.Performance] +rps = 95 +duration = "10m" + +# real-soak specific test configuration +[RealSoak.Functions] +[RealSoak.Functions.Performance] +rps = 1 +requests_per_call = 20 +duration = "10m" + +# real-stress specific test configuration +[RealStress.Functions] +[RealStress.Functions.Performance] +rps = 1 +requests_per_call = 40 +duration = "10m" + +# secrets-soak specific test configuration +[SecretsSoak.Functions] +[SecretsSoak.Functions.Performance] +rps = 1 +requests_per_call = 20 +duration = "10m" + +# secrets-stress specific test configuration +[SecretsStress.Functions] +[SecretsStress.Functions.Performance] +rps = 1 +requests_per_call = 40 +duration = "10m" + +# soak specific test configuration +[Soak.Functions] +[Soak.Functions.Performance] +rps = 1 +requests_per_call = 40 +duration = "10m" + +# soak specific test configuration +[Stress.Functions] +[Stress.Functions.Performance] +rps = 1 +requests_per_call = 78 +duration = "10m" \ No newline at end of file diff --git a/integration-tests/testconfig/keeper/config.go b/integration-tests/testconfig/keeper/config.go new file mode 100644 index 00000000..da6cd7ac --- /dev/null +++ b/integration-tests/testconfig/keeper/config.go @@ -0,0 +1,85 @@ +package keeper + +import ( + "errors" +) + +type Config struct { + Common *Common `toml:"Common"` +} + +func (c *Config) Validate() error { + if c.Common == nil { + return nil + } + return c.Common.Validate() +} + +type Common struct { + RegistryToTest *string `toml:"registry_to_test"` + NumberOfRegistries *int `toml:"number_of_registries"` + NumberOfNodes *int `toml:"number_of_nodes"` + NumberOfUpkeeps *int `toml:"number_of_upkeeps"` + UpkeepGasLimit *int64 `toml:"upkeep_gas_limit"` + CheckGasToBurn *int64 `toml:"check_gas_to_burn"` + PerformGasToBurn *int64 `toml:"perform_gas_to_burn"` + MaxPerformGas *int64 `toml:"max_perform_gas"` + BlockRange *int64 `toml:"block_range"` + BlockInterval *int64 `toml:"block_interval"` + ForceSingleTxKey *bool `toml:"forces_single_tx_key"` + DeleteJobsOnEnd *bool `toml:"delete_jobs_on_end"` + RegistryAddress *string `toml:"registry_address"` + RegistrarAddress *string `toml:"registrar_address"` + LinkTokenAddress *string `toml:"link_token_address"` + EthFeedAddress *string `toml:"eth_feed_address"` + GasFeedAddress *string `toml:"gas_feed_address"` +} + +func (c *Common) Validate() error { + if c.RegistryToTest == nil || *c.RegistryToTest == "" { + return errors.New("registry_to_test must be set") + } + if c.NumberOfRegistries == nil || *c.NumberOfRegistries <= 0 { + return errors.New("number_of_registries must be a positive integer") + } + if c.NumberOfNodes == nil || *c.NumberOfNodes <= 0 { + return errors.New("number_of_nodes must be a positive integer") + } + if c.NumberOfUpkeeps == nil || *c.NumberOfUpkeeps <= 0 { + return errors.New("number_of_upkeeps must be a positive integer") + } + if c.UpkeepGasLimit == nil || *c.UpkeepGasLimit <= 0 { + return errors.New("upkeep_gas_limit must be a positive integer") + } + if c.CheckGasToBurn == nil || *c.CheckGasToBurn <= 0 { + return errors.New("check_gas_to_burn must be a positive integer") + } + if c.PerformGasToBurn == nil || *c.PerformGasToBurn <= 0 { + return errors.New("perform_gas_to_burn must be a positive integer") + } + if c.MaxPerformGas == nil || *c.MaxPerformGas <= 0 { + return errors.New("max_perform_gas must be a positive integer") + } + if c.BlockRange == nil || *c.BlockRange <= 0 { + return errors.New("block_range must be a positive integer") + } + if c.BlockInterval == nil || *c.BlockInterval <= 0 { + return errors.New("block_interval must be a positive integer") + } + if c.RegistryAddress == nil { + c.RegistryAddress = new(string) + } + if c.RegistrarAddress == nil { + c.RegistrarAddress = new(string) + } + if c.LinkTokenAddress == nil { + c.LinkTokenAddress = new(string) + } + if c.EthFeedAddress == nil { + c.EthFeedAddress = new(string) + } + if c.GasFeedAddress == nil { + c.GasFeedAddress = new(string) + } + return nil +} diff --git a/integration-tests/testconfig/keeper/example.toml b/integration-tests/testconfig/keeper/example.toml new file mode 100644 index 00000000..5879f957 --- /dev/null +++ b/integration-tests/testconfig/keeper/example.toml @@ -0,0 +1,88 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Product part +[Common] +plugin_node_funding = 0.5 + +[Keeper.Common] +registry_to_test = "2_1" +number_of_registries = 1 +number_of_nodes = 6 +number_of_upkeeps = 500 +upkeep_gas_limit = 150000 +check_gas_to_burn = 100000 +perform_gas_to_burn = 50000 +max_perform_gas = 5000000 +block_range = 3600 +block_interval = 20 +forces_single_tx_key = false +delete_jobs_on_end = true \ No newline at end of file diff --git a/integration-tests/testconfig/keeper/keeper.toml b/integration-tests/testconfig/keeper/keeper.toml new file mode 100644 index 00000000..0261e606 --- /dev/null +++ b/integration-tests/testconfig/keeper/keeper.toml @@ -0,0 +1,17 @@ +# product defaults +[Common] +plugin_node_funding = 0.5 + +[Keeper.Common] +registry_to_test = "2_1" +number_of_registries = 1 +number_of_nodes = 6 +number_of_upkeeps = 500 +upkeep_gas_limit = 1500000 +check_gas_to_burn = 100000 +perform_gas_to_burn = 50000 +max_perform_gas = 5000000 +block_range = 3600 +block_interval = 20 +forces_single_tx_key = false +delete_jobs_on_end = true \ No newline at end of file diff --git a/integration-tests/testconfig/log_poller/config.go b/integration-tests/testconfig/log_poller/config.go new file mode 100644 index 00000000..9c8aa72d --- /dev/null +++ b/integration-tests/testconfig/log_poller/config.go @@ -0,0 +1,158 @@ +package logpoller + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + + "github.com/goplugin/plugin-testing-framework/blockchain" +) + +const ( + ErrReadPerfConfig = "failed to read TOML config for performance tests" + ErrUnmarshalPerfConfig = "failed to unmarshal TOML config for performance tests" +) + +type GeneratorType = string + +const ( + GeneratorType_WASP = "wasp" + GeneratorType_Looped = "looped" +) + +type Config struct { + General *General `toml:"General"` + ChaosConfig *ChaosConfig `toml:"Chaos"` + Wasp *WaspConfig `toml:"Wasp"` + LoopedConfig *LoopedConfig `toml:"Looped"` +} + +func (c *Config) Validate() error { + if c.General == nil { + return fmt.Errorf("General config must be set") + } + + err := c.General.Validate() + if err != nil { + return fmt.Errorf("General config validation failed: %w", err) + } + + switch *c.General.Generator { + case GeneratorType_WASP: + if c.Wasp == nil { + return fmt.Errorf("wasp config is nil") + } + err = c.Wasp.Validate() + if err != nil { + return fmt.Errorf("wasp config validation failed: %w", err) + } + case GeneratorType_Looped: + if c.LoopedConfig == nil { + return fmt.Errorf("looped config is nil") + } + err = c.LoopedConfig.Validate() + if err != nil { + return fmt.Errorf("looped config validation failed: %w", err) + } + default: + return fmt.Errorf("unknown generator type: %s", *c.General.Generator) + } + + if c.ChaosConfig != nil { + if err := c.ChaosConfig.Validate(); err != nil { + return fmt.Errorf("chaos config validation failed: %w", err) + } + } + + return nil +} + +type LoopedConfig struct { + ExecutionCount *int `toml:"execution_count"` + MinEmitWaitTimeMs *int `toml:"min_emit_wait_time_ms"` + MaxEmitWaitTimeMs *int `toml:"max_emit_wait_time_ms"` +} + +func (l *LoopedConfig) Validate() error { + if l.ExecutionCount == nil || *l.ExecutionCount == 0 { + return fmt.Errorf("execution_count must be set and > 0") + } + + if l.MinEmitWaitTimeMs == nil || *l.MinEmitWaitTimeMs == 0 { + return fmt.Errorf("min_emit_wait_time_ms must be set and > 0") + } + + if l.MaxEmitWaitTimeMs == nil || *l.MaxEmitWaitTimeMs == 0 { + return fmt.Errorf("max_emit_wait_time_ms must be set and > 0") + } + + return nil +} + +type General struct { + Generator *string `toml:"generator"` + EventsToEmit []abi.Event `toml:"-"` + Contracts *int `toml:"contracts"` + EventsPerTx *int `toml:"events_per_tx"` + UseFinalityTag *bool `toml:"use_finality_tag"` +} + +func (g *General) Validate() error { + if g.Generator == nil || *g.Generator == "" { + return fmt.Errorf("generator is empty") + } + + if g.Contracts == nil || *g.Contracts == 0 { + return fmt.Errorf("contracts is 0, but must be > 0") + } + + if g.EventsPerTx == nil || *g.EventsPerTx == 0 { + return fmt.Errorf("events_per_tx is 0, but must be > 0") + } + + return nil +} + +type ChaosConfig struct { + ExperimentCount *int `toml:"experiment_count"` + TargetComponent *string `toml:"target_component"` +} + +func (c *ChaosConfig) Validate() error { + if c.ExperimentCount != nil && *c.ExperimentCount == 0 { + return fmt.Errorf("experiment_count must be > 0") + } + + return nil +} + +type WaspConfig struct { + RPS *int64 `toml:"rps"` + LPS *int64 `toml:"lps"` + RateLimitUnitDuration *blockchain.StrDuration `toml:"rate_limit_unit_duration"` + Duration *blockchain.StrDuration `toml:"duration"` + CallTimeout *blockchain.StrDuration `toml:"call_timeout"` +} + +func (w *WaspConfig) Validate() error { + if w.RPS == nil && w.LPS == nil { + return fmt.Errorf("either RPS or LPS needs to be set") + } + if *w.RPS == 0 && *w.LPS == 0 { + return fmt.Errorf("either RPS or LPS needs to be a positive integer") + } + if *w.RPS != 0 && *w.LPS != 0 { + return fmt.Errorf("only one of RPS or LPS can be set") + } + if w.Duration == nil || w.Duration.Duration == 0 { + return fmt.Errorf("duration must be set and > 0") + } + if w.CallTimeout == nil || w.CallTimeout.Duration == 0 { + return fmt.Errorf("call_timeout must be set and > 0") + } + if w.RateLimitUnitDuration == nil || w.RateLimitUnitDuration.Duration == 0 { + return fmt.Errorf("rate_limit_unit_duration must be set and > 0") + } + + return nil +} diff --git a/integration-tests/testconfig/log_poller/example.toml b/integration-tests/testconfig/log_poller/example.toml new file mode 100644 index 00000000..df77f0cc --- /dev/null +++ b/integration-tests/testconfig/log_poller/example.toml @@ -0,0 +1,87 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Product part +[LogPoller] +[LogPoller.General] +generator = "looped" +contracts = 2 +events_per_tx = 4 +use_finality_tag = true + +[LogPoller.Looped] +execution_count = 100 +min_emit_wait_time_ms = 200 +max_emit_wait_time_ms = 500 \ No newline at end of file diff --git a/integration-tests/testconfig/log_poller/log_poller.toml b/integration-tests/testconfig/log_poller/log_poller.toml new file mode 100644 index 00000000..93aa035a --- /dev/null +++ b/integration-tests/testconfig/log_poller/log_poller.toml @@ -0,0 +1,61 @@ +# product defaults +[LogPoller] +[LogPoller.General] +generator = "looped" +contracts = 2 +events_per_tx = 4 +use_finality_tag = true + +[LogPoller.Looped] +execution_count = 100 +min_emit_wait_time_ms = 200 +max_emit_wait_time_ms = 500 + +# test-specific +[TestLogPollerFewFiltersFixedDepth.LogPoller.General] +use_finality_tag = false + +[TestLogManyFiltersPollerFinalityTag.LogPoller.General] +contracts = 300 +events_per_tx = 3 + +[TestLogManyFiltersPollerFinalityTag.LogPoller.Looped] +execution_count = 30 + +[TestLogManyFiltersPollerFixedDepth.LogPoller.General] +use_finality_tag = false +contracts = 300 +events_per_tx = 3 + +[TestLogManyFiltersPollerFixedDepth.LogPoller.Looped] +execution_count = 30 + +[TestLogPollerWithChaosFinalityTag.LogPoller.General] +execution_count = 30 +[TestLogPollerWithChaosFinalityTag.LogPoller.Chaos] +experiment_count = 4 +target_component = "plugin" + +[TestLogPollerWithChaosFixedDepth.LogPoller.General] +execution_count = 30 +use_finality_tag = false +[TestLogPollerWithChaosFixedDepth.LogPoller.Chaos] +experiment_count = 4 +target_component = "plugin" + +[TestLogPollerWithChaosPostgresFinalityTag.LogPoller.General] +execution_count = 30 +[TestLogPollerWithChaosPostgresFinalityTag.LogPoller.Chaos] +experiment_count = 4 +target_component = "postgres" + +[TestLogPollerWithChaosPostgresFixedDepth.LogPoller.General] +execution_count = 30 +use_finality_tag = false +[TestLogPollerWithChaosPostgresFixedDepth.LogPoller.Chaos] +experiment_count = 4 +target_component = "postgres" + +[TestLogPollerReplayFixedDepth.LogPoller.General] +use_finality_tag = false + diff --git a/integration-tests/testconfig/node/example.toml b/integration-tests/testconfig/node/example.toml new file mode 100644 index 00000000..122b8c2f --- /dev/null +++ b/integration-tests/testconfig/node/example.toml @@ -0,0 +1,79 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Test-specific part +[PluginUpgradeImage] +image="public.ecr.aws/plugin/plugin" +version="2.8.0" \ No newline at end of file diff --git a/integration-tests/testconfig/node/node.toml b/integration-tests/testconfig/node/node.toml new file mode 100644 index 00000000..68793575 --- /dev/null +++ b/integration-tests/testconfig/node/node.toml @@ -0,0 +1,5 @@ +# original image +[PluginImage] + +# image to upgrade to +[PluginUpgradeImage] \ No newline at end of file diff --git a/integration-tests/testconfig/ocr/example.toml b/integration-tests/testconfig/ocr/example.toml new file mode 100644 index 00000000..cda4b6de --- /dev/null +++ b/integration-tests/testconfig/ocr/example.toml @@ -0,0 +1,96 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# load test specific configuration +[Load.OCR] +[Load.OCR.Common] +eth_funds = 3 + +[Load.OCR.Load] +test_duration = "3m" +rate_limit_unit_duration = "1m" +rate = 3 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# soak test specific configuration +[Soak.Common] +plugin_node_funding = 100 + +[Soak.OCR] +[Soak.OCR.Common] +test_duration="15m" + +[Soak.OCR.Soak] +ocr_version="1" +number_of_contracts=2 +time_between_rounds="1m" \ No newline at end of file diff --git a/integration-tests/testconfig/ocr/ocr.go b/integration-tests/testconfig/ocr/ocr.go new file mode 100644 index 00000000..f06d6d2b --- /dev/null +++ b/integration-tests/testconfig/ocr/ocr.go @@ -0,0 +1,136 @@ +package ocr + +import ( + "errors" + + "github.com/goplugin/plugin-testing-framework/blockchain" +) + +type Config struct { + Soak *SoakConfig `toml:"Soak"` + Load *Load `toml:"Load"` + Volume *Volume `toml:"Volume"` + Common *Common `toml:"Common"` +} + +func (o *Config) Validate() error { + if o.Common != nil { + if err := o.Common.Validate(); err != nil { + return err + } + } + if o.Soak != nil { + if err := o.Soak.Validate(); err != nil { + return err + } + } + if o.Volume != nil { + if err := o.Volume.Validate(); err != nil { + return err + } + } + return nil +} + +type Common struct { + ETHFunds *int `toml:"eth_funds"` + TestDuration *blockchain.StrDuration `toml:"test_duration"` +} + +func (o *Common) Validate() error { + if o.ETHFunds != nil && *o.ETHFunds < 0 { + return errors.New("eth_funds must be set and cannot be negative") + } + return nil +} + +type Load struct { + Rate *int64 `toml:"rate"` + RequestsPerUnit *int `toml:"requests_per_unit"` + RateLimitUnitDuration *blockchain.StrDuration `toml:"rate_limit_unit_duration"` + VerificationInterval *blockchain.StrDuration `toml:"verification_interval"` + VerificationTimeout *blockchain.StrDuration `toml:"verification_timeout"` + EAChangeInterval *blockchain.StrDuration `toml:"ea_change_interval"` + TestDuration *blockchain.StrDuration `toml:"test_duration"` +} + +func (o *Load) Validate() error { + if o.TestDuration == nil { + return errors.New("load test duration must be set") + } + if o.Rate == nil || *o.Rate <= 0 { + return errors.New("rate must be set and be a positive integer") + } + if o.RequestsPerUnit == nil || *o.RequestsPerUnit <= 0 { + return errors.New("vu_requests_per_unit must be set and be a positive integer") + } + if o.RateLimitUnitDuration == nil || o.RateLimitUnitDuration.Duration == 0 { + return errors.New("rate_limit_unit_duration must be set and be a positive integer") + } + if o.VerificationInterval == nil || o.VerificationInterval.Duration == 0 { + return errors.New("verification_interval must be set and be a positive integer") + } + if o.VerificationTimeout == nil || o.VerificationTimeout.Duration == 0 { + return errors.New("verification_timeout must be set and be a positive integer") + } + if o.EAChangeInterval == nil || o.EAChangeInterval.Duration == 0 { + return errors.New("ea_change_interval must be set and be a positive integer") + } + + return nil +} + +type Volume struct { + Rate *int64 `toml:"rate"` + VURequestsPerUnit *int `toml:"vu_requests_per_unit"` + RateLimitUnitDuration *blockchain.StrDuration `toml:"rate_limit_unit_duration"` + VerificationInterval *blockchain.StrDuration `toml:"verification_interval"` + VerificationTimeout *blockchain.StrDuration `toml:"verification_timeout"` + EAChangeInterval *blockchain.StrDuration `toml:"ea_change_interval"` + TestDuration *blockchain.StrDuration `toml:"test_duration"` +} + +func (o *Volume) Validate() error { + if o.TestDuration == nil { + return errors.New("volume test duration must be set") + } + if o.Rate == nil || *o.Rate <= 0 { + return errors.New("rate must be set and be a positive integer") + } + if o.VURequestsPerUnit == nil || *o.VURequestsPerUnit <= 0 { + return errors.New("vu_requests_per_unit must be set and be a positive integer") + } + if o.RateLimitUnitDuration == nil || o.RateLimitUnitDuration.Duration == 0 { + return errors.New("rate_limit_unit_duration must be set and be a positive integer") + } + if o.VerificationInterval == nil || o.VerificationInterval.Duration == 0 { + return errors.New("verification_interval must be set and be a positive integer") + } + if o.VerificationTimeout == nil || o.VerificationTimeout.Duration == 0 { + return errors.New("verification_timeout must be set and be a positive integer") + } + if o.EAChangeInterval == nil || o.EAChangeInterval.Duration == 0 { + return errors.New("ea_change_interval must be set and be a positive integer") + } + + return nil +} + +type SoakConfig struct { + OCRVersion *string `toml:"ocr_version"` + NumberOfContracts *int `toml:"number_of_contracts"` + TimeBetweenRounds *blockchain.StrDuration `toml:"time_between_rounds"` +} + +func (o *SoakConfig) Validate() error { + if o.OCRVersion == nil || *o.OCRVersion == "" { + return errors.New("ocr_version must be set to either 1 or 2") + } + if o.NumberOfContracts == nil || *o.NumberOfContracts <= 1 { + return errors.New("number_of_contracts must be set and be greater than 1") + } + if o.TimeBetweenRounds == nil || o.TimeBetweenRounds.Duration == 0 { + return errors.New("time_between_rounds must be set and be a positive integer") + } + return nil +} diff --git a/integration-tests/testconfig/ocr/ocr.toml b/integration-tests/testconfig/ocr/ocr.toml new file mode 100644 index 00000000..cd2725a7 --- /dev/null +++ b/integration-tests/testconfig/ocr/ocr.toml @@ -0,0 +1,43 @@ +# product defaults +[Common] +plugin_node_funding = 0.5 + +# load test specific configuration +[Load.OCR] +[Load.OCR.Common] +eth_funds = 3 + +[Load.OCR.Load] +test_duration = "3m" +rate_limit_unit_duration = "1m" +rate = 3 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# volume test specific configuration +[Volume.OCR] +[Volume.OCR.Common] +eth_funds = 3 + +[Volume.OCR.Volume] +test_duration = "3m" +rate_limit_unit_duration = "1m" +vu_requests_per_unit = 10 +rate = 1 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# soak test specific configuration +[Soak.Common] +plugin_node_funding = 100 + +[Soak.OCR] +[Soak.OCR.Common] +test_duration="15m" + +[Soak.OCR.Soak] +ocr_version="1" +number_of_contracts=2 +time_between_rounds="1m" \ No newline at end of file diff --git a/integration-tests/testconfig/ocr2/example.toml b/integration-tests/testconfig/ocr2/example.toml new file mode 100644 index 00000000..cda4b6de --- /dev/null +++ b/integration-tests/testconfig/ocr2/example.toml @@ -0,0 +1,96 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# load test specific configuration +[Load.OCR] +[Load.OCR.Common] +eth_funds = 3 + +[Load.OCR.Load] +test_duration = "3m" +rate_limit_unit_duration = "1m" +rate = 3 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# soak test specific configuration +[Soak.Common] +plugin_node_funding = 100 + +[Soak.OCR] +[Soak.OCR.Common] +test_duration="15m" + +[Soak.OCR.Soak] +ocr_version="1" +number_of_contracts=2 +time_between_rounds="1m" \ No newline at end of file diff --git a/integration-tests/testconfig/ocr2/ocr2.go b/integration-tests/testconfig/ocr2/ocr2.go new file mode 100644 index 00000000..fecb7e16 --- /dev/null +++ b/integration-tests/testconfig/ocr2/ocr2.go @@ -0,0 +1,57 @@ +package ocr + +import ( + "errors" + + "github.com/goplugin/plugin-testing-framework/blockchain" +) + +type Config struct { + Soak *SoakConfig `toml:"Soak"` + Common *Common `toml:"Common"` +} + +func (o *Config) Validate() error { + if o.Common != nil { + if err := o.Common.Validate(); err != nil { + return err + } + } + if o.Soak != nil { + if err := o.Soak.Validate(); err != nil { + return err + } + } + return nil +} + +type Common struct { + ETHFunds *int `toml:"eth_funds"` + TestDuration *blockchain.StrDuration `toml:"test_duration"` +} + +func (o *Common) Validate() error { + if o.ETHFunds != nil && *o.ETHFunds < 0 { + return errors.New("eth_funds must be set and cannot be negative") + } + return nil +} + +type SoakConfig struct { + OCRVersion *string `toml:"ocr_version"` + NumberOfContracts *int `toml:"number_of_contracts"` + TimeBetweenRounds *blockchain.StrDuration `toml:"time_between_rounds"` +} + +func (o *SoakConfig) Validate() error { + if o.OCRVersion == nil || *o.OCRVersion == "" { + return errors.New("ocr_version must be set to either 1 or 2") + } + if o.NumberOfContracts == nil || *o.NumberOfContracts <= 1 { + return errors.New("number_of_contracts must be set and be greater than 1") + } + if o.TimeBetweenRounds == nil || o.TimeBetweenRounds.Duration == 0 { + return errors.New("time_between_rounds must be set and be a positive integer") + } + return nil +} diff --git a/integration-tests/testconfig/ocr2/ocr2.toml b/integration-tests/testconfig/ocr2/ocr2.toml new file mode 100644 index 00000000..cd2725a7 --- /dev/null +++ b/integration-tests/testconfig/ocr2/ocr2.toml @@ -0,0 +1,43 @@ +# product defaults +[Common] +plugin_node_funding = 0.5 + +# load test specific configuration +[Load.OCR] +[Load.OCR.Common] +eth_funds = 3 + +[Load.OCR.Load] +test_duration = "3m" +rate_limit_unit_duration = "1m" +rate = 3 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# volume test specific configuration +[Volume.OCR] +[Volume.OCR.Common] +eth_funds = 3 + +[Volume.OCR.Volume] +test_duration = "3m" +rate_limit_unit_duration = "1m" +vu_requests_per_unit = 10 +rate = 1 +verification_interval = "5s" +verification_timeout = "3m" +ea_change_interval = "5s" + +# soak test specific configuration +[Soak.Common] +plugin_node_funding = 100 + +[Soak.OCR] +[Soak.OCR.Common] +test_duration="15m" + +[Soak.OCR.Soak] +ocr_version="1" +number_of_contracts=2 +time_between_rounds="1m" \ No newline at end of file diff --git a/integration-tests/testconfig/testconfig.go b/integration-tests/testconfig/testconfig.go new file mode 100644 index 00000000..61cd59a3 --- /dev/null +++ b/integration-tests/testconfig/testconfig.go @@ -0,0 +1,555 @@ +package testconfig + +import ( + "embed" + "encoding/base64" + "fmt" + "os" + "slices" + "strings" + + "github.com/barkimedes/go-deepcopy" + "github.com/google/uuid" + "github.com/pelletier/go-toml/v2" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "golang.org/x/text/cases" + "golang.org/x/text/language" + + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/docker/test_env" + ctf_test_env "github.com/goplugin/plugin-testing-framework/docker/test_env" + k8s_config "github.com/goplugin/plugin-testing-framework/k8s/config" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/utils/osutil" + a_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/automation" + f_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/functions" + keeper_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/keeper" + lp_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/log_poller" + ocr_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/ocr" + ocr2_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/ocr2" + vrf_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrf" + vrfv2_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2" + vrfv2plus_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2plus" +) + +type GlobalTestConfig interface { + GetPluginImageConfig() *ctf_config.PluginImageConfig + GetLoggingConfig() *ctf_config.LoggingConfig + GetNetworkConfig() *ctf_config.NetworkConfig + GetPrivateEthereumNetworkConfig() *test_env.EthereumNetwork + GetPyroscopeConfig() *ctf_config.PyroscopeConfig +} + +type UpgradeablePluginTestConfig interface { + GetPluginUpgradeImageConfig() *ctf_config.PluginImageConfig +} + +type CommonTestConfig interface { + GetCommonConfig() *Common +} + +type VRFv2TestConfig interface { + GetVRFv2Config() *vrfv2_config.Config +} + +type VRFv2PlusTestConfig interface { + GetVRFv2PlusConfig() *vrfv2plus_config.Config +} + +type FunctionsTestConfig interface { + GetFunctionsConfig() *f_config.Config +} + +type KeeperTestConfig interface { + GetKeeperConfig() *keeper_config.Config +} + +type OcrTestConfig interface { + GetOCRConfig() *ocr_config.Config +} + +type Ocr2TestConfig interface { + GetOCR2Config() *ocr2_config.Config +} + +type NamedConfiguration interface { + GetConfigurationName() string +} + +type TestConfig struct { + PluginImage *ctf_config.PluginImageConfig `toml:"PluginImage"` + PluginUpgradeImage *ctf_config.PluginImageConfig `toml:"PluginUpgradeImage"` + Logging *ctf_config.LoggingConfig `toml:"Logging"` + Network *ctf_config.NetworkConfig `toml:"Network"` + Pyroscope *ctf_config.PyroscopeConfig `toml:"Pyroscope"` + PrivateEthereumNetwork *ctf_test_env.EthereumNetwork `toml:"PrivateEthereumNetwork"` + + Common *Common `toml:"Common"` + Automation *a_config.Config `toml:"Automation"` + Functions *f_config.Config `toml:"Functions"` + Keeper *keeper_config.Config `toml:"Keeper"` + LogPoller *lp_config.Config `toml:"LogPoller"` + OCR *ocr_config.Config `toml:"OCR"` + OCR2 *ocr2_config.Config `toml:"OCR2"` + VRF *vrf_config.Config `toml:"VRF"` + VRFv2 *vrfv2_config.Config `toml:"VRFv2"` + VRFv2Plus *vrfv2plus_config.Config `toml:"VRFv2Plus"` + + ConfigurationName string `toml:"-"` +} + +var embeddedConfigs embed.FS +var areConfigsEmbedded bool + +func init() { + embeddedConfigs = embeddedConfigsFs +} + +// Returns Grafana URL from Logging config +func (c *TestConfig) GetGrafanaBaseURL() (string, error) { + if c.Logging.Grafana == nil || c.Logging.Grafana.BaseUrl == nil { + return "", errors.New("grafana base url not set") + } + + return strings.TrimSuffix(*c.Logging.Grafana.BaseUrl, "/"), nil +} + +// Returns Grafana Dashboard URL from Logging config +func (c *TestConfig) GetGrafanaDashboardURL() (string, error) { + if c.Logging.Grafana == nil || c.Logging.Grafana.DashboardUrl == nil { + return "", errors.New("grafana dashboard url not set") + } + + url := *c.Logging.Grafana.DashboardUrl + if !strings.HasPrefix(url, "/") { + url = "/" + url + } + + return url, nil +} + +// Saves Test Config to a local file +func (c *TestConfig) Save() (string, error) { + filePath := fmt.Sprintf("test_config-%s.toml", uuid.New()) + + content, err := toml.Marshal(*c) + if err != nil { + return "", errors.Wrapf(err, "error marshaling test config") + } + + err = os.WriteFile(filePath, content, 0600) + if err != nil { + return "", errors.Wrapf(err, "error writing test config") + } + + return filePath, nil +} + +// MustCopy Returns a deep copy of the Test Config or panics on error +func (c TestConfig) MustCopy() any { + return deepcopy.MustAnything(c).(TestConfig) +} + +// MustCopy Returns a deep copy of struct passed to it and returns a typed copy (or panics on error) +func MustCopy[T any](c T) T { + return deepcopy.MustAnything(c).(T) +} + +func (c *TestConfig) GetLoggingConfig() *ctf_config.LoggingConfig { + return c.Logging +} + +func (c TestConfig) GetNetworkConfig() *ctf_config.NetworkConfig { + return c.Network +} + +func (c TestConfig) GetPluginImageConfig() *ctf_config.PluginImageConfig { + return c.PluginImage +} + +func (c TestConfig) GetPrivateEthereumNetworkConfig() *ctf_test_env.EthereumNetwork { + return c.PrivateEthereumNetwork +} + +func (c TestConfig) GetPyroscopeConfig() *ctf_config.PyroscopeConfig { + return c.Pyroscope +} + +func (c TestConfig) GetCommonConfig() *Common { + return c.Common +} + +func (c TestConfig) GetVRFv2Config() *vrfv2_config.Config { + return c.VRFv2 +} + +func (c TestConfig) GetFunctionsConfig() *f_config.Config { + return c.Functions +} + +func (c TestConfig) GetVRFv2PlusConfig() *vrfv2plus_config.Config { + return c.VRFv2Plus +} + +func (c TestConfig) GetPluginUpgradeImageConfig() *ctf_config.PluginImageConfig { + return c.PluginUpgradeImage +} + +func (c TestConfig) GetKeeperConfig() *keeper_config.Config { + return c.Keeper +} + +func (c TestConfig) GetOCRConfig() *ocr_config.Config { + return c.OCR +} + +func (c TestConfig) GetConfigurationName() string { + return c.ConfigurationName +} + +func (c *TestConfig) AsBase64() (string, error) { + content, err := toml.Marshal(*c) + if err != nil { + return "", errors.Wrapf(err, "error marshaling test config") + } + + return base64.StdEncoding.EncodeToString(content), nil +} + +type Common struct { + PluginNodeFunding *float64 `toml:"plugin_node_funding"` +} + +func (c *Common) Validate() error { + if c.PluginNodeFunding != nil && *c.PluginNodeFunding < 0 { + return fmt.Errorf("plugin node funding must be positive") + } + + return nil +} + +type Product string + +const ( + Automation Product = "automation" + Cron Product = "cron" + DirectRequest Product = "direct_request" + Flux Product = "flux" + ForwarderOcr Product = "forwarder_ocr" + ForwarderOcr2 Product = "forwarder_ocr2" + Functions Product = "functions" + Keeper Product = "keeper" + LogPoller Product = "log_poller" + Node Product = "node" + OCR Product = "ocr" + OCR2 Product = "ocr2" + OCR2VRF Product = "ocr2vrf" + RunLog Product = "runlog" + VRF Product = "vrf" + VRFv2 Product = "vrfv2" + VRFv2Plus Product = "vrfv2plus" +) + +var TestTypesWithLoki = []string{"Load", "Soak", "Stress", "Spike", "Volume"} + +const TestTypeEnvVarName = "TEST_TYPE" + +func GetConfigurationNameFromEnv() (string, error) { + testType := os.Getenv(TestTypeEnvVarName) + if testType == "" { + return "", fmt.Errorf("%s env var not set", TestTypeEnvVarName) + } + + return cases.Title(language.English, cases.NoLower).String(testType), nil +} + +const ( + Base64OverrideEnvVarName = k8s_config.EnvBase64ConfigOverride + NoKey = "NO_KEY" +) + +func GetConfig(configurationName string, product Product) (TestConfig, error) { + logger := logging.GetTestLogger(nil) + + configurationName = strings.ReplaceAll(configurationName, "/", "_") + configurationName = strings.ReplaceAll(configurationName, " ", "_") + configurationName = cases.Title(language.English, cases.NoLower).String(configurationName) + fileNames := []string{ + "default.toml", + fmt.Sprintf("%s.toml", product), + "overrides.toml", + } + + testConfig := TestConfig{} + testConfig.ConfigurationName = configurationName + logger.Debug().Msgf("Will apply configuration named '%s' if it is found in any of the configs", configurationName) + + var handleSpecialOverrides = func(logger zerolog.Logger, filename, configurationName string, target *TestConfig, content []byte, product Product) error { + switch product { + case Automation: + return handleAutomationConfigOverride(logger, filename, configurationName, target, content) + default: + err := ctf_config.BytesToAnyTomlStruct(logger, filename, configurationName, &testConfig, content) + if err != nil { + return errors.Wrapf(err, "error reading file %s", filename) + } + + return nil + } + } + + // read embedded configs is build tag "embed" is set + // this makes our life much easier when using a binary + if areConfigsEmbedded { + logger.Info().Msg("Reading embedded configs") + embeddedFiles := []string{"default.toml", fmt.Sprintf("%s/%s.toml", product, product)} + for _, fileName := range embeddedFiles { + file, err := embeddedConfigs.ReadFile(fileName) + if err != nil && errors.Is(err, os.ErrNotExist) { + logger.Debug().Msgf("Embedded config file %s not found. Continuing", fileName) + continue + } else if err != nil { + return TestConfig{}, errors.Wrapf(err, "error reading embedded config") + } + + err = handleSpecialOverrides(logger, fileName, configurationName, &testConfig, file, product) + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error unmarshalling embedded config") + } + } + } + + logger.Info().Msg("Reading configs from file system") + for _, fileName := range fileNames { + logger.Debug().Msgf("Looking for config file %s", fileName) + filePath, err := osutil.FindFile(fileName, osutil.DEFAULT_STOP_FILE_NAME, 3) + + if err != nil && errors.Is(err, os.ErrNotExist) { + logger.Debug().Msgf("Config file %s not found", fileName) + continue + } else if err != nil { + return TestConfig{}, errors.Wrapf(err, "error looking for file %s", filePath) + } + logger.Debug().Str("location", filePath).Msgf("Found config file %s", fileName) + + content, err := readFile(filePath) + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error reading file %s", filePath) + } + + err = handleSpecialOverrides(logger, fileName, configurationName, &testConfig, content, product) + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error reading file %s", filePath) + } + } + + logger.Info().Msg("Reading configs from Base64 override env var") + configEncoded, isSet := os.LookupEnv(Base64OverrideEnvVarName) + if isSet && configEncoded != "" { + logger.Debug().Msgf("Found base64 config override environment variable '%s' found", Base64OverrideEnvVarName) + decoded, err := base64.StdEncoding.DecodeString(configEncoded) + if err != nil { + return TestConfig{}, err + } + + err = handleSpecialOverrides(logger, Base64OverrideEnvVarName, configurationName, &testConfig, decoded, product) + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error unmarshaling base64 config") + } + } else { + logger.Debug().Msg("Base64 config override from environment variable not found") + } + + // it neede some custom logic, so we do it separately + err := testConfig.readNetworkConfiguration() + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error reading network config") + } + + logger.Debug().Msg("Validating test config") + err = testConfig.Validate() + if err != nil { + return TestConfig{}, errors.Wrapf(err, "error validating test config") + } + + if testConfig.Common == nil { + testConfig.Common = &Common{} + } + + logger.Debug().Msg("Correct test config constructed successfully") + return testConfig, nil +} + +func (c *TestConfig) readNetworkConfiguration() error { + // currently we need to read that kind of secrets only for network configuration + if c == nil { + c.Network = &ctf_config.NetworkConfig{} + } + + c.Network.UpperCaseNetworkNames() + err := c.Network.Default() + if err != nil { + return errors.Wrapf(err, "error reading default network config") + } + + // this is the only value we need to generate dynamically before starting a new simulated chain + if c.PrivateEthereumNetwork != nil && c.PrivateEthereumNetwork.EthereumChainConfig != nil { + c.PrivateEthereumNetwork.EthereumChainConfig.GenerateGenesisTimestamp() + } + + return nil +} + +func (c *TestConfig) Validate() error { + defer func() { + if r := recover(); r != nil { + panic(fmt.Errorf("Panic during test config validation: '%v'. Most probably due to presence of partial product config", r)) + } + }() + if c.PluginImage == nil { + return fmt.Errorf("plugin image config must be set") + } + if err := c.PluginImage.Validate(); err != nil { + return errors.Wrapf(err, "plugin image config validation failed") + } + if c.PluginUpgradeImage != nil { + if err := c.PluginUpgradeImage.Validate(); err != nil { + return errors.Wrapf(err, "plugin upgrade image config validation failed") + } + } + if err := c.Network.Validate(); err != nil { + return errors.Wrapf(err, "network config validation failed") + } + + if c.Logging == nil { + return fmt.Errorf("logging config must be set") + } + + if err := c.Logging.Validate(); err != nil { + return errors.Wrapf(err, "logging config validation failed") + } + + // require Loki config only if these tests run locally + _, willUseRemoteRunner := os.LookupEnv(k8s_config.EnvVarJobImage) + _, isInsideK8s := os.LookupEnv(k8s_config.EnvVarInsideK8s) + if (!willUseRemoteRunner && !isInsideK8s) && slices.Contains(TestTypesWithLoki, c.ConfigurationName) { + if c.Logging.Loki == nil { + return fmt.Errorf("for local execution you must set Loki config in logging config") + } + + if err := c.Logging.Loki.Validate(); err != nil { + return errors.Wrapf(err, "loki config validation failed") + } + } + + if c.Logging.LogStream != nil && slices.Contains(c.Logging.LogStream.LogTargets, "loki") { + if c.Logging.Loki == nil { + return fmt.Errorf("in order to use Loki as logging target you must set Loki config in logging config") + } + + if err := c.Logging.Loki.Validate(); err != nil { + return errors.Wrapf(err, "loki config validation failed") + } + } + + if c.Pyroscope != nil { + if err := c.Pyroscope.Validate(); err != nil { + return errors.Wrapf(err, "pyroscope config validation failed") + } + } + + if c.PrivateEthereumNetwork != nil { + if err := c.PrivateEthereumNetwork.Validate(); err != nil { + return errors.Wrapf(err, "private ethereum network config validation failed") + } + } + + if c.Common != nil { + if err := c.Common.Validate(); err != nil { + return errors.Wrapf(err, "Common config validation failed") + } + } + + if c.Automation != nil { + if err := c.Automation.Validate(); err != nil { + return errors.Wrapf(err, "Automation config validation failed") + } + } + + if c.Functions != nil { + if err := c.Functions.Validate(); err != nil { + return errors.Wrapf(err, "Functions config validation failed") + } + } + + if c.Keeper != nil { + if err := c.Keeper.Validate(); err != nil { + return errors.Wrapf(err, "Keeper config validation failed") + } + } + + if c.LogPoller != nil { + if err := c.LogPoller.Validate(); err != nil { + return errors.Wrapf(err, "LogPoller config validation failed") + } + } + + if c.OCR != nil { + if err := c.OCR.Validate(); err != nil { + return errors.Wrapf(err, "OCR config validation failed") + } + } + + if c.VRF != nil { + if err := c.VRF.Validate(); err != nil { + return errors.Wrapf(err, "VRF config validation failed") + } + } + + if c.VRFv2 != nil { + if err := c.VRFv2.Validate(); err != nil { + return errors.Wrapf(err, "VRFv2 config validation failed") + } + } + + if c.VRFv2Plus != nil { + if err := c.VRFv2Plus.Validate(); err != nil { + return errors.Wrapf(err, "VRFv2Plus config validation failed") + } + } + + return nil +} + +func readFile(filePath string) ([]byte, error) { + content, err := os.ReadFile(filePath) + if err != nil { + return nil, errors.Wrapf(err, "error reading file %s", filePath) + } + + return content, nil +} + +func handleAutomationConfigOverride(logger zerolog.Logger, filename, configurationName string, target *TestConfig, content []byte) error { + logger.Debug().Msgf("Handling automation config override for %s", filename) + oldConfig := MustCopy(target) + newConfig := TestConfig{} + + err := ctf_config.BytesToAnyTomlStruct(logger, filename, configurationName, &target, content) + if err != nil { + return errors.Wrapf(err, "error reading file %s", filename) + } + + err = ctf_config.BytesToAnyTomlStruct(logger, filename, configurationName, &newConfig, content) + if err != nil { + return errors.Wrapf(err, "error reading file %s", filename) + } + + // override instead of merging + if (newConfig.Automation != nil && len(newConfig.Automation.Load) > 0) && (oldConfig != nil && oldConfig.Automation != nil && len(oldConfig.Automation.Load) > 0) { + target.Automation.Load = newConfig.Automation.Load + } + + return nil +} diff --git a/integration-tests/testconfig/testconfig_test.go b/integration-tests/testconfig/testconfig_test.go new file mode 100644 index 00000000..4e01258e --- /dev/null +++ b/integration-tests/testconfig/testconfig_test.go @@ -0,0 +1,88 @@ +package testconfig + +import ( + "encoding/base64" + "math/big" + "os" + "testing" + + "github.com/pelletier/go-toml/v2" + "github.com/test-go/testify/require" + + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + a_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/automation" +) + +func TestBase64ConfigRead(t *testing.T) { + networkConfigTOML := ` + [RpcHttpUrls] + arbitrum_goerli = ["https://devnet-1.mt/ABC/rpc/"] + optimism_goerli = ["https://devnet-3.mt/ABC/rpc/"] + + [RpcWsUrls] + arbitrum_goerli = ["wss://devnet-1.mt/ABC/rpc/"] + optimism_goerli = ["wss://devnet-2.mt/ABC/rpc/"] + ` + networksEncoded := base64.StdEncoding.EncodeToString([]byte(networkConfigTOML)) + os.Setenv(ctf_config.Base64NetworkConfigEnvVarName, networksEncoded) + + testConfig := TestConfig{ + Automation: &a_config.Config{ + General: &a_config.General{ + NumberOfNodes: ptr.Ptr(7), + Duration: ptr.Ptr(9), + BlockTime: ptr.Ptr(10), + SpecType: ptr.Ptr("minimum"), + PluginNodeLogLevel: ptr.Ptr("debug"), + }, + Load: []a_config.Load{ + { + NumberOfUpkeeps: ptr.Ptr(1), + NumberOfEvents: ptr.Ptr(2), + NumberOfSpamMatchingEvents: ptr.Ptr(3), + NumberOfSpamNonMatchingEvents: ptr.Ptr(4), + CheckBurnAmount: big.NewInt(5), + PerformBurnAmount: big.NewInt(6), + SharedTrigger: ptr.Ptr(true), + }, + { + NumberOfUpkeeps: ptr.Ptr(3), + NumberOfEvents: ptr.Ptr(2), + NumberOfSpamMatchingEvents: ptr.Ptr(3), + NumberOfSpamNonMatchingEvents: ptr.Ptr(7), + CheckBurnAmount: big.NewInt(5), + PerformBurnAmount: big.NewInt(6), + SharedTrigger: ptr.Ptr(false), + }, + }, + }, + Network: &ctf_config.NetworkConfig{ + SelectedNetworks: []string{"OPTIMISM_GOERLI"}, + RpcHttpUrls: map[string][]string{ + "OPTIMISM_GOERLI": {"http://localhost:8545"}, + }, + WalletKeys: map[string][]string{ + "OPTIMISM_GOERLI": {"0x3333333333333333333333333333333333333333"}, + }, + }, + } + + configMarshalled, err := toml.Marshal(testConfig) + require.NoError(t, err, "Error marshalling test config") + + testConfigEncoded := base64.StdEncoding.EncodeToString(configMarshalled) + os.Setenv(Base64OverrideEnvVarName, testConfigEncoded) + + readConfig, err := GetConfig("test", Automation) + require.NoError(t, err, "Error reading config") + + require.NotNil(t, readConfig.Automation, "Automation config read from base64 is nil") + require.Equal(t, testConfig.Automation.General, readConfig.Automation.General, "General automation config does not match expected") + require.EqualValues(t, testConfig.Automation.Load, readConfig.Automation.Load, "Load automation config does not match expected") + require.NotNil(t, readConfig.Network, "Network config read from base64 is nil") + require.Equal(t, testConfig.Network.SelectedNetworks, readConfig.Network.SelectedNetworks, "SelectedNetwork config entry read from base64 does not match expected") + require.Equal(t, []string{"http://localhost:8545"}, readConfig.Network.RpcHttpUrls["OPTIMISM_GOERLI"], "RpcHttpUrls config entry read from base64 does not match expected") + require.Equal(t, []string{"wss://devnet-2.mt/ABC/rpc/"}, readConfig.Network.RpcWsUrls["OPTIMISM_GOERLI"], "RpcWsUrls config entry read from base64 network defaults does not match expected") + require.Equal(t, testConfig.Network.WalletKeys, readConfig.Network.WalletKeys, "WalletKeys config entry read from base64 does not match expected") +} diff --git a/integration-tests/testconfig/vrf/config.go b/integration-tests/testconfig/vrf/config.go new file mode 100644 index 00000000..d009f5bf --- /dev/null +++ b/integration-tests/testconfig/vrf/config.go @@ -0,0 +1,8 @@ +package vrf + +type Config struct { +} + +func (o *Config) Validate() error { + return nil +} diff --git a/integration-tests/testconfig/vrfv2/config.go b/integration-tests/testconfig/vrfv2/config.go new file mode 100644 index 00000000..e062fbb0 --- /dev/null +++ b/integration-tests/testconfig/vrfv2/config.go @@ -0,0 +1,387 @@ +package testconfig + +import ( + "errors" + + "github.com/ethereum/go-ethereum/common" + + "github.com/goplugin/plugin-testing-framework/blockchain" +) + +const ( + ErrDeviationShouldBeLessThanOriginal = "`RandomnessRequestCountPerRequestDeviation` should be less than `RandomnessRequestCountPerRequest`" +) + +type Config struct { + Common *Common `toml:"Common"` + General *General `toml:"General"` + ExistingEnvConfig *ExistingEnvConfig `toml:"ExistingEnv"` + NewEnvConfig *NewEnvConfig `toml:"NewEnv"` + Performance *PerformanceConfig `toml:"Performance"` +} + +func (c *Config) Validate() error { + if c.Common != nil { + if err := c.Common.Validate(); err != nil { + return err + } + } + if c.General != nil { + if err := c.General.Validate(); err != nil { + return err + } + } + if c.Performance != nil { + if err := c.Performance.Validate(); err != nil { + return err + } + if *c.Performance.UseExistingEnv { + if c.ExistingEnvConfig != nil { + if err := c.ExistingEnvConfig.Validate(); err != nil { + return err + } + } + } else { + if c.NewEnvConfig != nil { + if err := c.NewEnvConfig.Validate(); err != nil { + return err + } + } + } + } + + return nil +} + +type Common struct { + CancelSubsAfterTestRun *bool `toml:"cancel_subs_after_test_run"` +} + +func (c *Common) Validate() error { + return nil +} + +type PerformanceConfig struct { + TestDuration *blockchain.StrDuration `toml:"test_duration"` + RPS *int64 `toml:"rps"` + RateLimitUnitDuration *blockchain.StrDuration `toml:"rate_limit_unit_duration"` + + // Using existing environment and contracts + UseExistingEnv *bool `toml:"use_existing_env"` + CoordinatorAddress *string + ConsumerAddress *string + LinkAddress *string + SubID *uint64 + KeyHash *string +} + +func (c *PerformanceConfig) Validate() error { + if c.TestDuration == nil || c.TestDuration.Duration == 0 { + return errors.New("test_duration must be set to a positive value") + } + if c.RPS == nil || *c.RPS == 0 { + return errors.New("rps must be set to a positive value") + } + if c.RateLimitUnitDuration == nil { + return errors.New("rate_limit_unit_duration must be set ") + } + if c.UseExistingEnv == nil { + return errors.New("use_existing_env must be set ") + } + + return nil +} + +type ExistingEnvConfig struct { + CoordinatorAddress *string `toml:"coordinator_address"` + ConsumerAddress *string `toml:"consumer_address"` + LinkAddress *string `toml:"link_address"` + SubID *uint64 `toml:"sub_id"` + KeyHash *string `toml:"key_hash"` + CreateFundSubsAndAddConsumers *bool `toml:"create_fund_subs_and_add_consumers"` + NodeSendingKeys []string `toml:"node_sending_keys"` + Funding +} + +func (c *ExistingEnvConfig) Validate() error { + if c.CreateFundSubsAndAddConsumers == nil { + return errors.New("create_fund_subs_and_add_consumers must be set ") + } + if c.CoordinatorAddress == nil { + return errors.New("coordinator_address must be set when using existing environment") + } + if !common.IsHexAddress(*c.CoordinatorAddress) { + return errors.New("coordinator_address must be a valid hex address") + } + if c.KeyHash == nil { + return errors.New("key_hash must be set when using existing environment") + } + if *c.KeyHash == "" { + return errors.New("key_hash must be a non-empty string") + } + if c.LinkAddress != nil && !common.IsHexAddress(*c.LinkAddress) { + return errors.New("link_address must be a valid hex address") + } + + if *c.CreateFundSubsAndAddConsumers { + if err := c.Funding.Validate(); err != nil { + return err + } + if err := c.Funding.SubFunding.Validate(); err != nil { + return err + } + } else { + if c.ConsumerAddress == nil || *c.ConsumerAddress == "" { + return errors.New("consumer_address must be set when using existing environment") + } + if !common.IsHexAddress(*c.ConsumerAddress) { + return errors.New("consumer_address must be a valid hex address") + } + if c.SubID == nil { + return errors.New("sub_id must be set when using existing environment") + } + if *c.SubID == 0 { + return errors.New("sub_id must be a positive value") + } + } + + if c.NodeSendingKeys != nil { + for _, key := range c.NodeSendingKeys { + if !common.IsHexAddress(key) { + return errors.New("node_sending_keys must be a valid hex address") + } + } + } + + return nil +} + +type NewEnvConfig struct { + *Funding +} + +func (c *NewEnvConfig) Validate() error { + if c.Funding != nil { + return c.Funding.Validate() + } + + return nil +} + +type Funding struct { + SubFunding + NodeSendingKeyFunding *float64 `toml:"node_sending_key_funding"` + NodeSendingKeyFundingMin *float64 `toml:"node_sending_key_funding_min"` +} + +func (c *Funding) Validate() error { + if c.NodeSendingKeyFunding != nil && *c.NodeSendingKeyFunding <= 0 { + return errors.New("when set node_sending_key_funding must be a positive value") + } + if c.NodeSendingKeyFundingMin != nil && *c.NodeSendingKeyFundingMin <= 0 { + return errors.New("when set node_sending_key_funding_min must be a positive value") + } + + return nil +} + +type SubFunding struct { + SubFundsLink *float64 `toml:"sub_funds_link"` +} + +func (c *SubFunding) Validate() error { + if c.SubFundsLink != nil && *c.SubFundsLink < 0 { + return errors.New("when set sub_funds_link must be a non-negative value") + } + + return nil +} + +type General struct { + CLNodeMaxGasPriceGWei *int64 `toml:"cl_node_max_gas_price_gwei"` // Max gas price in GWei for the plugin node + LinkNativeFeedResponse *int64 `toml:"link_native_feed_response"` // Response of the PLI/ETH feed + MinimumConfirmations *uint16 `toml:"minimum_confirmations" ` // Minimum number of confirmations for the VRF Coordinator + SubscriptionFundingAmountLink *float64 `toml:"subscription_funding_amount_link"` // Amount of PLI to fund the subscription with + NumberOfWords *uint32 `toml:"number_of_words" ` // Number of words to request + CallbackGasLimit *uint32 `toml:"callback_gas_limit" ` // Gas limit for the callback + MaxGasLimitCoordinatorConfig *uint32 `toml:"max_gas_limit_coordinator_config"` // Max gas limit for the VRF Coordinator config + FallbackWeiPerUnitLink *int64 `toml:"fallback_wei_per_unit_link"` // Fallback wei per unit PLI for the VRF Coordinator config + StalenessSeconds *uint32 `toml:"staleness_seconds" ` // Staleness in seconds for the VRF Coordinator config + GasAfterPaymentCalculation *uint32 `toml:"gas_after_payment_calculation" ` // Gas after payment calculation for the VRF Coordinator + FulfillmentFlatFeeLinkPPMTier1 *uint32 `toml:"fulfilment_flat_fee_link_ppm_tier_1"` + FulfillmentFlatFeeLinkPPMTier2 *uint32 `toml:"fulfilment_flat_fee_link_ppm_tier_2"` + FulfillmentFlatFeeLinkPPMTier3 *uint32 `toml:"fulfilment_flat_fee_link_ppm_tier_3"` + FulfillmentFlatFeeLinkPPMTier4 *uint32 `toml:"fulfilment_flat_fee_link_ppm_tier_4"` + FulfillmentFlatFeeLinkPPMTier5 *uint32 `toml:"fulfilment_flat_fee_link_ppm_tier_5"` + ReqsForTier2 *int64 `toml:"reqs_for_tier_2"` + ReqsForTier3 *int64 `toml:"reqs_for_tier_3"` + ReqsForTier4 *int64 `toml:"reqs_for_tier_4"` + ReqsForTier5 *int64 `toml:"reqs_for_tier_5"` + + NumberOfSubToCreate *int `toml:"number_of_sub_to_create"` // Number of subscriptions to create + + RandomnessRequestCountPerRequest *uint16 `toml:"randomness_request_count_per_request"` // How many randomness requests to send per request + RandomnessRequestCountPerRequestDeviation *uint16 `toml:"randomness_request_count_per_request_deviation"` // How many randomness requests to send per request + + RandomWordsFulfilledEventTimeout *blockchain.StrDuration `toml:"random_words_fulfilled_event_timeout"` // How long to wait for the RandomWordsFulfilled event to be emitted + + // Wrapper Config + WrapperGasOverhead *uint32 `toml:"wrapped_gas_overhead"` + CoordinatorGasOverhead *uint32 `toml:"coordinator_gas_overhead"` + WrapperPremiumPercentage *uint8 `toml:"wrapper_premium_percentage"` + WrapperMaxNumberOfWords *uint8 `toml:"wrapper_max_number_of_words"` + WrapperConsumerFundingAmountNativeToken *float64 `toml:"wrapper_consumer_funding_amount_native_token"` + WrapperConsumerFundingAmountLink *int64 `toml:"wrapper_consumer_funding_amount_link"` + + //VRF Job Config + VRFJobForwardingAllowed *bool `toml:"vrf_job_forwarding_allowed"` + VRFJobEstimateGasMultiplier *float64 `toml:"vrf_job_estimate_gas_multiplier"` + VRFJobBatchFulfillmentEnabled *bool `toml:"vrf_job_batch_fulfillment_enabled"` + VRFJobBatchFulfillmentGasMultiplier *float64 `toml:"vrf_job_batch_fulfillment_gas_multiplier"` + VRFJobPollPeriod *blockchain.StrDuration `toml:"vrf_job_poll_period"` + VRFJobRequestTimeout *blockchain.StrDuration `toml:"vrf_job_request_timeout"` + VRFJobSimulationBlock *string `toml:"vrf_job_simulation_block"` + + //BHS Job Config + BHSJobWaitBlocks *int `toml:"bhs_job_wait_blocks"` + BHSJobLookBackBlocks *int `toml:"bhs_job_lookback_blocks"` + BHSJobPollPeriod *blockchain.StrDuration `toml:"bhs_job_poll_period"` + BHSJobRunTimeout *blockchain.StrDuration `toml:"bhs_job_run_timeout"` +} + +func (c *General) Validate() error { + if c.CLNodeMaxGasPriceGWei == nil || *c.CLNodeMaxGasPriceGWei == 0 { + return errors.New("max_gas_price_gwei must be set to a positive value") + } + if c.LinkNativeFeedResponse == nil || *c.LinkNativeFeedResponse == 0 { + return errors.New("link_native_feed_response must be set to a positive value") + } + if c.MinimumConfirmations == nil { + return errors.New("minimum_confirmations must be set to a non-negative value") + } + if c.SubscriptionFundingAmountLink == nil || *c.SubscriptionFundingAmountLink == 0 { + return errors.New("subscription_funding_amount_link must be set to a positive value") + } + if c.NumberOfWords == nil || *c.NumberOfWords == 0 { + return errors.New("number_of_words must be set to a positive value") + } + if c.CallbackGasLimit == nil || *c.CallbackGasLimit == 0 { + return errors.New("callback_gas_limit must be set to a positive value") + } + if c.MaxGasLimitCoordinatorConfig == nil || *c.MaxGasLimitCoordinatorConfig == 0 { + return errors.New("max_gas_limit_coordinator_config must be set to a positive value") + } + if c.FallbackWeiPerUnitLink == nil || *c.FallbackWeiPerUnitLink == 0 { + return errors.New("fallback_wei_per_unit_link must be set to a positive value") + } + if c.StalenessSeconds == nil || *c.StalenessSeconds == 0 { + return errors.New("staleness_seconds must be set to a positive value") + } + if c.GasAfterPaymentCalculation == nil || *c.GasAfterPaymentCalculation == 0 { + return errors.New("gas_after_payment_calculation must be set to a positive value") + } + if c.FulfillmentFlatFeeLinkPPMTier1 == nil || *c.FulfillmentFlatFeeLinkPPMTier1 == 0 { + return errors.New("fulfilment_flat_fee_link_ppm_tier_1 must be set to a positive value") + } + if c.FulfillmentFlatFeeLinkPPMTier2 == nil || *c.FulfillmentFlatFeeLinkPPMTier2 == 0 { + return errors.New("fulfilment_flat_fee_link_ppm_tier_2 must be set to a positive value") + } + if c.FulfillmentFlatFeeLinkPPMTier3 == nil || *c.FulfillmentFlatFeeLinkPPMTier3 == 0 { + return errors.New("fulfilment_flat_fee_link_ppm_tier_3 must be set to a positive value") + } + if c.FulfillmentFlatFeeLinkPPMTier4 == nil || *c.FulfillmentFlatFeeLinkPPMTier4 == 0 { + return errors.New("fulfilment_flat_fee_link_ppm_tier_4 must be set to a positive value") + } + if c.FulfillmentFlatFeeLinkPPMTier5 == nil || *c.FulfillmentFlatFeeLinkPPMTier5 == 0 { + return errors.New("fulfilment_flat_fee_link_ppm_tier_5 must be set to a positive value") + } + if c.ReqsForTier2 == nil || *c.ReqsForTier2 < 0 { + return errors.New("reqs_for_tier_2 must be set to a non-negative value") + } + if c.ReqsForTier3 == nil || *c.ReqsForTier3 < 0 { + return errors.New("reqs_for_tier_3 must be set to a non-negative value") + } + if c.ReqsForTier4 == nil || *c.ReqsForTier4 < 0 { + return errors.New("reqs_for_tier_4 must be set to a non-negative value") + } + if c.ReqsForTier5 == nil || *c.ReqsForTier5 < 0 { + return errors.New("reqs_for_tier_5 must be set to a non-negative value") + } + if c.NumberOfSubToCreate == nil || *c.NumberOfSubToCreate == 0 { + return errors.New("number_of_sub_to_create must be set to a positive value") + } + if c.RandomnessRequestCountPerRequest == nil || *c.RandomnessRequestCountPerRequest == 0 { + return errors.New("randomness_request_count_per_request must be set to a positive value") + } + if c.RandomnessRequestCountPerRequestDeviation == nil { + return errors.New("randomness_request_count_per_request_deviation must be set to a non-negative value") + } + if c.RandomWordsFulfilledEventTimeout == nil || c.RandomWordsFulfilledEventTimeout.Duration == 0 { + return errors.New("random_words_fulfilled_event_timeout must be set to a positive value") + } + if c.WrapperGasOverhead == nil { + return errors.New("wrapped_gas_overhead must be set to a non-negative value") + } + if c.CoordinatorGasOverhead == nil || *c.CoordinatorGasOverhead == 0 { + return errors.New("coordinator_gas_overhead must be set to a non-negative value") + } + if c.WrapperPremiumPercentage == nil || *c.WrapperPremiumPercentage == 0 { + return errors.New("wrapper_premium_percentage must be set to a positive value") + } + if c.WrapperMaxNumberOfWords == nil || *c.WrapperMaxNumberOfWords == 0 { + return errors.New("wrapper_max_number_of_words must be set to a positive value") + } + if c.WrapperConsumerFundingAmountNativeToken == nil || *c.WrapperConsumerFundingAmountNativeToken < 0 { + return errors.New("wrapper_consumer_funding_amount_native_token must be set to a non-negative value") + } + if c.WrapperConsumerFundingAmountLink == nil || *c.WrapperConsumerFundingAmountLink < 0 { + return errors.New("wrapper_consumer_funding_amount_link must be set to a non-negative value") + } + if *c.RandomnessRequestCountPerRequest <= *c.RandomnessRequestCountPerRequestDeviation { + return errors.New(ErrDeviationShouldBeLessThanOriginal) + } + + if c.VRFJobForwardingAllowed == nil { + return errors.New("vrf_job_forwarding_allowed must be set") + } + + if c.VRFJobBatchFulfillmentEnabled == nil { + return errors.New("vrf_job_batch_fulfillment_enabled must be set") + } + if c.VRFJobEstimateGasMultiplier == nil || *c.VRFJobEstimateGasMultiplier < 0 { + return errors.New("vrf_job_estimate_gas_multiplier must be set to a non-negative value") + } + if c.VRFJobBatchFulfillmentGasMultiplier == nil || *c.VRFJobBatchFulfillmentGasMultiplier < 0 { + return errors.New("vrf_job_batch_fulfillment_gas_multiplier must be set to a non-negative value") + } + + if c.VRFJobPollPeriod == nil || c.VRFJobPollPeriod.Duration == 0 { + return errors.New("vrf_job_poll_period must be set to a non-negative value") + } + + if c.VRFJobRequestTimeout == nil || c.VRFJobRequestTimeout.Duration == 0 { + return errors.New("vrf_job_request_timeout must be set to a non-negative value") + } + + if c.BHSJobLookBackBlocks == nil || *c.BHSJobLookBackBlocks < 0 { + return errors.New("bhs_job_lookback_blocks must be set to a non-negative value") + } + + if c.BHSJobPollPeriod == nil || c.BHSJobPollPeriod.Duration == 0 { + return errors.New("bhs_job_poll_period must be set to a non-negative value") + } + + if c.BHSJobRunTimeout == nil || c.BHSJobRunTimeout.Duration == 0 { + return errors.New("bhs_job_run_timeout must be set to a non-negative value") + } + + if c.BHSJobWaitBlocks == nil || *c.BHSJobWaitBlocks < 0 { + return errors.New("bhs_job_wait_blocks must be set to a non-negative value") + } + + if c.VRFJobSimulationBlock != nil && (*c.VRFJobSimulationBlock != "latest" && *c.VRFJobSimulationBlock != "pending") { + return errors.New("simulation_block must be nil or \"latest\" or \"pending\"") + } + + return nil +} diff --git a/integration-tests/testconfig/vrfv2/example.toml b/integration-tests/testconfig/vrfv2/example.toml new file mode 100644 index 00000000..961949f1 --- /dev/null +++ b/integration-tests/testconfig/vrfv2/example.toml @@ -0,0 +1,136 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Product part +[VRFv2] +[VRFv2.Common] +cancel_subs_after_test_run = true + +[VRFv2.General] +max_gas_price_gwei = 1000 +link_native_feed_response = 1000000000000000000 +minimum_confirmations = 3 +subscription_funding_amount_link = 5.0 +number_of_words = 3 +callback_gas_limit = 1000000 +max_gas_limit_coordinator_config = 2500000 +fallback_wei_per_unit_link = 60000000000000000 +staleness_seconds = 86400 +gas_after_payment_calculation = 33825 +fulfilment_flat_fee_link_ppm_tier_1 = 500 +fulfilment_flat_fee_link_ppm_tier_2 = 500 +fulfilment_flat_fee_link_ppm_tier_3 = 500 +fulfilment_flat_fee_link_ppm_tier_4 = 500 +fulfilment_flat_fee_link_ppm_tier_5 = 500 +reqs_for_tier_2 = 0 +reqs_for_tier_3 = 0 +reqs_for_tier_4 = 0 +reqs_for_tier_5 = 0 +number_of_sub_to_create = 1 +randomness_request_count_per_request = 1 +randomness_request_count_per_request_deviation = 0 +random_words_fulfilled_event_timeout = "2m" +wrapped_gas_overhead = 50000 +coordinator_gas_overhead = 52000 +wrapper_premium_percentage = 25 +wrapper_max_number_of_words = 10 +wrapper_consumer_funding_amount_native_token = 1.0 +wrapper_consumer_funding_amount_link = 10 + +[VRFv2.Performance] +rate_limit_unit_duration = "3s" +rps = 1 + +[VRFv2.NewEnv] +sub_funds_link = 1000 +node_sending_key_funding = 1000 + +[VRFv2.ExistingEnv] +coordinator_address = "" +consumer_address = "" +sub_id = 1 +key_hash = "" +create_fund_subs_and_add_consumers = true +link_address = "" +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [ + "", + "", + "", + "", + "", + "", +] \ No newline at end of file diff --git a/integration-tests/testconfig/vrfv2/vrfv2.toml b/integration-tests/testconfig/vrfv2/vrfv2.toml new file mode 100644 index 00000000..aa66cdbc --- /dev/null +++ b/integration-tests/testconfig/vrfv2/vrfv2.toml @@ -0,0 +1,157 @@ +# default config +[Common] +plugin_node_funding = 0.1 + +[VRFv2] +[VRFv2.General] +cl_node_max_gas_price_gwei = 10 +link_native_feed_response = 1000000000000000000 +minimum_confirmations = 3 +subscription_funding_amount_link = 5.0 +number_of_words = 3 +callback_gas_limit = 1000000 +max_gas_limit_coordinator_config = 2500000 +fallback_wei_per_unit_link = 60000000000000000 +staleness_seconds = 86400 +gas_after_payment_calculation = 33825 +fulfilment_flat_fee_link_ppm_tier_1 = 500 +fulfilment_flat_fee_link_ppm_tier_2 = 500 +fulfilment_flat_fee_link_ppm_tier_3 = 500 +fulfilment_flat_fee_link_ppm_tier_4 = 500 +fulfilment_flat_fee_link_ppm_tier_5 = 500 +reqs_for_tier_2 = 0 +reqs_for_tier_3 = 0 +reqs_for_tier_4 = 0 +reqs_for_tier_5 = 0 +number_of_sub_to_create = 1 +randomness_request_count_per_request = 1 +randomness_request_count_per_request_deviation = 0 +random_words_fulfilled_event_timeout = "2m" +wrapped_gas_overhead = 50000 +coordinator_gas_overhead = 52000 +wrapper_premium_percentage = 25 +wrapper_max_number_of_words = 10 +wrapper_consumer_funding_amount_native_token = 1.0 +wrapper_consumer_funding_amount_link = 10 + +# VRF Job config +vrf_job_forwarding_allowed = false +vrf_job_estimate_gas_multiplier = 1.0 +vrf_job_batch_fulfillment_enabled = false +vrf_job_batch_fulfillment_gas_multiplier = 1.15 +vrf_job_poll_period = "1s" +vrf_job_request_timeout = "24h" + +# BHS Job config +bhs_job_wait_blocks = 30 +bhs_job_lookback_blocks = 250 +bhs_job_poll_period = "1s" +bhs_job_run_timeout = "24h" + +# load test specific config +[Load.VRFv2] +[Load.VRFv2.Common] +cancel_subs_after_test_run = true + +[Load.VRFv2.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 3 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 2 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Load.VRFv2.Performance] +# approx 60 RPM - 1 tx request with 3 rand requests in each tx every 3 seconds +rate_limit_unit_duration = "3s" +rps = 1 + +[Load.VRFv2.NewEnv] +sub_funds_link = 1000 +node_sending_key_funding = 1000 + +[Load.VRFv2.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# soak test specific config +[Soak.VRFv2] +[VRFv2.Common] +cancel_subs_after_test_run = true + +[Soak.VRFv2.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 1 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Soak.VRFv2.Performance] +# 10 RPM - 1 tx request with 1 rand request in each tx every 6 seconds +rate_limit_unit_duration = "6s" +rps = 1 + +[Soak.VRFv2.NewEnv] +sub_funds_link = 1000 +node_sending_key_funding = 1000 + +[Soak.VRFv2.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# spike test specific config +[Spike.VRFv2] +[Spike.VRFv2.Common] +cancel_subs_after_test_run = true + +[Spike.VRFv2.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 150 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Spike.VRFv2.Performance] +# approx 150 RPM - 1 tx request with 150 rand requests in each tx every 60 seconds +rate_limit_unit_duration = "1m" +rps = 1 + +[Spike.VRFv2.NewEnv] +sub_funds_link = 1000 +node_sending_key_funding = 1000 + +[Spike.VRFv2.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# stress test specific config +[Stress.VRFv2] +[Stress.VRFv2.Common] +cancel_subs_after_test_run = true + +[Stress.VRFv2.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 4 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Stress.VRFv2.Performance] +# approx 540 RPM - 3 tx requests per second with 4 rand requests in each tx +rate_limit_unit_duration = "1s" +rps = 3 + +[Stress.VRFv2.NewEnv] +sub_funds_link = 1000 +node_sending_key_funding = 1000 + +[Stress.VRFv2.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] diff --git a/integration-tests/testconfig/vrfv2plus/config.go b/integration-tests/testconfig/vrfv2plus/config.go new file mode 100644 index 00000000..13c6b831 --- /dev/null +++ b/integration-tests/testconfig/vrfv2plus/config.go @@ -0,0 +1,170 @@ +package testconfig + +import ( + "errors" + + vrfv2 "github.com/goplugin/pluginv3.0/integration-tests/testconfig/vrfv2" +) + +type BillingType string + +const ( + BillingType_Link BillingType = "PLI" + BillingType_Native BillingType = "NATIVE" + BillingType_Link_and_Native BillingType = "PLI_AND_NATIVE" +) + +type Config struct { + Common *Common `toml:"Common"` + General *General `toml:"General"` + ExistingEnvConfig *ExistingEnvConfig `toml:"ExistingEnv"` + NewEnvConfig *NewEnvConfig `toml:"NewEnv"` + Performance *vrfv2.PerformanceConfig `toml:"Performance"` +} + +func (c *Config) Validate() error { + if c.Common != nil { + if err := c.Common.Validate(); err != nil { + return err + } + } + if c.General != nil { + if err := c.General.Validate(); err != nil { + return err + } + } + if c.Performance != nil { + if err := c.Performance.Validate(); err != nil { + return err + } + if *c.Performance.UseExistingEnv { + if c.ExistingEnvConfig != nil { + if err := c.ExistingEnvConfig.Validate(); err != nil { + return err + } + } + } else { + if c.NewEnvConfig != nil { + if err := c.NewEnvConfig.Validate(); err != nil { + return err + } + } + } + } + + return nil +} + +type Common struct { + *vrfv2.Common +} + +func (c *Common) Validate() error { + if c.Common == nil { + return nil + } + return c.Common.Validate() +} + +type General struct { + *vrfv2.General + SubscriptionBillingType *string `toml:"subscription_billing_type"` // Billing type for the subscription + SubscriptionFundingAmountNative *float64 `toml:"subscription_funding_amount_native"` // Amount of PLI to fund the subscription with + FulfillmentFlatFeeNativePPM *uint32 `toml:"fulfillment_flat_fee_native_ppm"` // Flat fee in ppm for native currency for the VRF Coordinator config + FulfillmentFlatFeeLinkPPM *uint32 `toml:"fulfillment_flat_fee_link_ppm"` // Flat fee in ppm for PLI for the VRF Coordinator config + FulfillmentFlatFeeLinkDiscountPPM *uint32 `toml:"fulfillment_flat_fee_link_discount_ppm"` // Flat fee discount in ppm for PLI for the VRF Coordinator config + NativePremiumPercentage *uint8 `toml:"native_premium_percentage"` // Native Premium Percentage + LinkPremiumPercentage *uint8 `toml:"link_premium_percentage"` // PLI Premium Percentage +} + +func (c *General) Validate() error { + if err := c.General.Validate(); err != nil { + return err + } + if c.SubscriptionBillingType == nil || *c.SubscriptionBillingType == "" { + return errors.New("subscription_billing_type must be set to either: PLI, NATIVE, PLI_AND_NATIVE") + } + if c.SubscriptionFundingAmountNative == nil || *c.SubscriptionFundingAmountNative <= 0 { + return errors.New("subscription_funding_amount_native must be greater than 0") + } + if c.FulfillmentFlatFeeNativePPM == nil { + return errors.New("fulfillment_flat_fee_native_ppm must not be nil") + } + if c.FulfillmentFlatFeeLinkPPM == nil { + return errors.New("fulfillment_flat_fee_link_ppm must not be nil") + } + if c.FulfillmentFlatFeeLinkDiscountPPM == nil { + return errors.New("fulfillment_flat_fee_link_discount_ppm must not be nil") + } + if c.NativePremiumPercentage == nil { + return errors.New("native_premium_percentage must not be nil") + } + if c.LinkPremiumPercentage == nil { + return errors.New("link_premium_percentage must not be nil") + } + + return nil +} + +type NewEnvConfig struct { + *Funding +} + +func (c *NewEnvConfig) Validate() error { + if c.Funding == nil { + return nil + } + + return c.Funding.Validate() +} + +type ExistingEnvConfig struct { + *vrfv2.ExistingEnvConfig + Funding +} + +func (c *ExistingEnvConfig) Validate() error { + if c.ExistingEnvConfig != nil { + if err := c.ExistingEnvConfig.Validate(); err != nil { + return err + } + } + + return c.Funding.Validate() +} + +type Funding struct { + SubFunding + NodeSendingKeyFunding *float64 `toml:"node_sending_key_funding"` + NodeSendingKeyFundingMin *float64 `toml:"node_sending_key_funding_min"` +} + +func (c *Funding) Validate() error { + if c.NodeSendingKeyFunding != nil && *c.NodeSendingKeyFunding <= 0 { + return errors.New("when set node_sending_key_funding must be a positive value") + } + if c.NodeSendingKeyFundingMin != nil && *c.NodeSendingKeyFundingMin <= 0 { + return errors.New("when set node_sending_key_funding_min must be a positive value") + } + + return c.SubFunding.Validate() +} + +type SubFunding struct { + SubFundsLink *float64 `toml:"sub_funds_link"` + SubFundsNative *float64 `toml:"sub_funds_native"` +} + +func (c *SubFunding) Validate() error { + if c.SubFundsLink == nil || c.SubFundsNative == nil { + return errors.New("both sub_funds_link and sub_funds_native must be set") + } + if c.SubFundsLink != nil && *c.SubFundsLink < 0 { + return errors.New("sub_funds_link must be a non-negative number") + } + if c.SubFundsNative != nil && *c.SubFundsNative < 0 { + return errors.New("sub_funds_native must be a non-negative number") + } + + return nil +} diff --git a/integration-tests/testconfig/vrfv2plus/example.toml b/integration-tests/testconfig/vrfv2plus/example.toml new file mode 100644 index 00000000..0541dc9f --- /dev/null +++ b/integration-tests/testconfig/vrfv2plus/example.toml @@ -0,0 +1,147 @@ +# Example of full config with all fields +# General part +[PluginImage] +image="public.ecr.aws/plugin/plugin" +version="2.7.0" + +[Logging] +# if set to true will save logs even if test did not fail +test_log_collect=false + +[Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persistet +log_targets=["file"] +# context timeout for starting log producer and also time-frame for requesting logs +log_producer_timeout="10s" +# number of retries before log producer gives up and stops listening to logs +log_producer_retry_limit=10 + +[Logging.Loki] +tenant_id="tenant_id" +# full URL of Loki ingest endpoint +endpoint="https://loki.url/api/v3/push" +# currently only needed when using public instance +basic_auth="loki-basic-auth" +# only needed for cloud grafana +bearer_token="bearer_token" + +# LogStream will try to shorten Grafana URLs by default (if all 3 variables are set) +[Logging.Grafana] +# grafana url (trailing "/" will be stripped) +base_url="http://grafana.url" +# url of your grafana dashboard (prefix and suffix "/" are stirpped), example: /d/ad61652-2712-1722/my-dashboard +dashboard_url="/d/your-dashboard" +bearer_token="my-awesome-token" + +# if you want to use polygon_mumbial +[Network] +selected_networks=["polygon_mumbai"] + +[Network.RpcHttpUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.RpcWsUrls] +polygon_mumbai = ["https://my-rpc-endpoint.io"] + +[Network.WalletKeys] +polygon_mumbai = ["change-me-to-your-PK"] + +[PrivateEthereumNetwork] +# pos or pow +consensus_type="pos" +# only prysm supported currently +consensus_layer="prysm" +# geth, besu, nethermind or erigon +execution_layer="geth" +# if true after env started it will wait for at least 1 epoch to be finalised before continuing +wait_for_finalization=false + +[PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 4 +seconds_per_slot=12 +# numer of slots in epoch, lower => faster epoch finalisation, must be >= 4 +slots_per_epoch=6 +# extra genesis gelay, no need to modify, but it should be after all validators/beacon chain starts +genesis_delay=15 +# number of validators in the network +validator_count=8 +chain_id=1337 +# list of addresses to be prefunded in genesis +addresses_to_fund=["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] + +# Common +[Common] +plugin_node_funding = 0.5 + +# Product part +[VRFv2Plus] +[VRFv2Plus.Common] +cancel_subs_after_test_run = true + +[VRFv2Plus.General] +max_gas_price_gwei = 1000 +link_native_feed_response = 1000000000000000000 +minimum_confirmations = 3 +subscription_billing_type = "PLI_AND_NATIVE" +subscription_funding_amount_link = 5.0 +number_of_words = 3 +callback_gas_limit = 1000000 +max_gas_limit_coordinator_config = 2500000 +fallback_wei_per_unit_link = 60000000000000000 +staleness_seconds = 86400 +gas_after_payment_calculation = 33825 +fulfilment_flat_fee_link_ppm_tier_1 = 500 +fulfilment_flat_fee_link_ppm_tier_2 = 500 +fulfilment_flat_fee_link_ppm_tier_3 = 500 +fulfilment_flat_fee_link_ppm_tier_4 = 500 +fulfilment_flat_fee_link_ppm_tier_5 = 500 +reqs_for_tier_2 = 0 +reqs_for_tier_3 = 0 +reqs_for_tier_4 = 0 +reqs_for_tier_5 = 0 +number_of_sub_to_create = 1 +randomness_request_count_per_request = 1 +randomness_request_count_per_request_deviation = 0 +random_words_fulfilled_event_timeout = "2m" +wrapped_gas_overhead = 50000 +coordinator_gas_overhead = 52000 +wrapper_premium_percentage = 25 +wrapper_max_number_of_words = 10 +wrapper_consumer_funding_amount_native_token = 1.0 +wrapper_consumer_funding_amount_link = 10 +subscription_funding_amount_native=1 +fulfillment_flat_fee_link_ppm=500 +fulfillment_flat_fee_native_ppm=500 +fulfillment_flat_fee_link_discount_ppm=100 +native_premium_percentage=1 +link_premium_percentage=1 + +[VRFv2Plus.Performance] +test_duration = "2m" +rate_limit_unit_duration = "3s" +rps = 1 +use_existing_env = false + +[VRFv2Plus.NewEnv] +sub_funds_link = 1 +sub_funds_native = 1 +node_funds = 10 +node_sending_key_funding = 1000 + +[VRFv2Plus.ExistingEnv] +coordinator_address = "" +consumer_address = "" +sub_id = 1 +key_hash = "" +create_fund_subs_and_add_consumers = true +link_address = "" +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [ + "", + "", + "", + "", + "", + "", +] \ No newline at end of file diff --git a/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml new file mode 100644 index 00000000..1398500b --- /dev/null +++ b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml @@ -0,0 +1,178 @@ +# default config +[Common] +plugin_node_funding = 0.1 + +[VRFv2Plus] +[VRFv2Plus.General] +cl_node_max_gas_price_gwei = 10 +link_native_feed_response = 1000000000000000000 +minimum_confirmations = 3 +subscription_billing_type = "PLI_AND_NATIVE" +subscription_funding_amount_link = 5.0 +number_of_words = 3 +callback_gas_limit = 1000000 +max_gas_limit_coordinator_config = 2500000 +fallback_wei_per_unit_link = 60000000000000000 +staleness_seconds = 86400 +gas_after_payment_calculation = 33825 +fulfilment_flat_fee_link_ppm_tier_1 = 500 +fulfilment_flat_fee_link_ppm_tier_2 = 500 +fulfilment_flat_fee_link_ppm_tier_3 = 500 +fulfilment_flat_fee_link_ppm_tier_4 = 500 +fulfilment_flat_fee_link_ppm_tier_5 = 500 +reqs_for_tier_2 = 0 +reqs_for_tier_3 = 0 +reqs_for_tier_4 = 0 +reqs_for_tier_5 = 0 +number_of_sub_to_create = 1 +randomness_request_count_per_request = 1 +randomness_request_count_per_request_deviation = 0 +random_words_fulfilled_event_timeout = "2m" +wrapped_gas_overhead = 50000 +coordinator_gas_overhead = 52000 +wrapper_premium_percentage = 25 +wrapper_max_number_of_words = 10 +wrapper_consumer_funding_amount_native_token = 1.0 +wrapper_consumer_funding_amount_link = 10 +subscription_funding_amount_native=1 +fulfillment_flat_fee_link_ppm=500 +fulfillment_flat_fee_native_ppm=500 +fulfillment_flat_fee_link_discount_ppm=100 +native_premium_percentage=1 +link_premium_percentage=1 + +# VRF Job config +vrf_job_forwarding_allowed = false +vrf_job_estimate_gas_multiplier = 1.0 +vrf_job_batch_fulfillment_enabled = false +vrf_job_batch_fulfillment_gas_multiplier = 1.15 +vrf_job_poll_period = "1s" +vrf_job_request_timeout = "24h" + +# BHS Job config +bhs_job_wait_blocks = 30 +bhs_job_lookback_blocks = 250 +bhs_job_poll_period = "1s" +bhs_job_run_timeout = "24h" + +# load test specific config +[Load.VRFv2Plus] +[Load.VRFv2Plus.Common] +cancel_subs_after_test_run = true + +[Load.VRFv2Plus.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 3 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 2 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Load.VRFv2Plus.Performance] +test_duration = "2m" +# approx 60 RPM - 1 tx request with 3 rand requests in each tx every 3 seconds +rate_limit_unit_duration = "3s" +rps = 1 + +[Load.VRFv2Plus.NewEnv] +sub_funds_link = 1 +sub_funds_native = 1 +node_funds = 10 +node_sending_key_funding = 1000 + +[Load.VRFv2Plus.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +link_address = "" +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# soak test specific config +[Soak.VRFv2Plus] +[Soak.VRFv2Plus.Common] +cancel_subs_after_test_run = true + +[Soak.VRFv2Plus.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 1 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Soak.VRFv2Plus.Performance] +test_duration = "2m" +# 10 RPM - 1 tx request with 1 rand request in each tx every 6 seconds +rate_limit_unit_duration = "6s" +rps = 1 +use_existing_env = false + +[Soak.VRFv2Plus.NewEnv] +sub_funds_link = 1 +sub_funds_native = 1 +node_funds = 10 +node_sending_key_funding = 1000 + +[Soak.VRFv2Plus.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# spike test specific config +[Spike.VRFv2Plus] +[Spike.VRFv2Plus.Common] +cancel_subs_after_test_run = true + +[Spike.VRFv2Plus.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 150 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Spike.VRFv2Plus.Performance] +test_duration = "2m" +# approx 150 RPM - 1 tx request with 150 rand requests in each tx every 60 seconds +rate_limit_unit_duration = "1m" +rps = 1 + +[Spike.VRFv2Plus.NewEnv] +sub_funds_link = 1 +sub_funds_native = 1 +node_funds = 10 +node_sending_key_funding = 1000 + +[Spike.VRFv2Plus.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] + +# stress test specific config +[Stress.VRFv2Plus] +[Stress.VRFv2Plus.Common] +cancel_subs_after_test_run = true + +[Stress.VRFv2Plus.General] +minimum_confirmations = 3 +randomness_request_count_per_request = 4 # amount of randomness requests to make per one TX request +randomness_request_count_per_request_deviation = 0 #NOTE - deviation should be less than randomness_request_count_per_request setting +number_of_sub_to_create = 1 + +[Stress.VRFv2Plus.Performance] +test_duration = "2m" +# approx 540 RPM - 3 tx requests per second with 4 rand requests in each tx +rate_limit_unit_duration = "1s" +rps = 3 + +[Stress.VRFv2Plus.NewEnv] +sub_funds_link = 1 +sub_funds_native = 1 +node_funds = 10 +node_sending_key_funding = 1000 + +[Stress.VRFv2Plus.ExistingEnv] +sub_id = 1 +create_fund_subs_and_add_consumers = true +sub_funds_link = 10 +node_sending_key_funding_min = 1 +node_sending_keys = [] \ No newline at end of file diff --git a/integration-tests/testreporters/keeper.go b/integration-tests/testreporters/keeper.go new file mode 100644 index 00000000..f84a07f1 --- /dev/null +++ b/integration-tests/testreporters/keeper.go @@ -0,0 +1,174 @@ +package testreporters + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/rs/zerolog/log" + "github.com/slack-go/slack" + + "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/pluginv3.0/integration-tests/client" +) + +// KeeperBlockTimeTestReporter enables reporting on the keeper block time test +type KeeperBlockTimeTestReporter struct { + Reports []KeeperBlockTimeTestReport `json:"reports"` + ReportMutex sync.Mutex + AttemptedPluginTransactions []*client.TransactionsData `json:"attemptedPluginTransactions"` + + namespace string + keeperReportFile string + attemptedTransactionsFile string +} + +// KeeperBlockTimeTestReport holds a report information for a single Upkeep Consumer contract +type KeeperBlockTimeTestReport struct { + ContractAddress string `json:"contractAddress"` + TotalExpectedUpkeeps int64 `json:"totalExpectedUpkeeps"` + TotalSuccessfulUpkeeps int64 `json:"totalSuccessfulUpkeeps"` + AllMissedUpkeeps []int64 `json:"allMissedUpkeeps"` // List of each time an upkeep was missed, represented by how many blocks it was missed by +} + +func (k *KeeperBlockTimeTestReporter) SetNamespace(namespace string) { + k.namespace = namespace +} + +func (k *KeeperBlockTimeTestReporter) WriteReport(folderLocation string) error { + k.keeperReportFile = filepath.Join(folderLocation, "./block_time_report.csv") + k.attemptedTransactionsFile = filepath.Join(folderLocation, "./attempted_transactions_report.json") + keeperReportFile, err := os.Create(k.keeperReportFile) + if err != nil { + return err + } + defer keeperReportFile.Close() + + keeperReportWriter := csv.NewWriter(keeperReportFile) + err = keeperReportWriter.Write([]string{ + "Contract Index", + "Contract Address", + "Total Expected Upkeeps", + "Total Successful Upkeeps", + "Total Missed Upkeeps", + "Average Blocks Missed", + "Largest Missed Upkeep", + "Percent Successful", + }) + if err != nil { + return err + } + var totalExpected, totalSuccessful, totalMissed, worstMiss int64 + for contractIndex, report := range k.Reports { + avg, max := int64AvgMax(report.AllMissedUpkeeps) + err = keeperReportWriter.Write([]string{ + fmt.Sprint(contractIndex), + report.ContractAddress, + fmt.Sprint(report.TotalExpectedUpkeeps), + fmt.Sprint(report.TotalSuccessfulUpkeeps), + fmt.Sprint(len(report.AllMissedUpkeeps)), + fmt.Sprint(avg), + fmt.Sprint(max), + fmt.Sprintf("%.2f%%", (float64(report.TotalSuccessfulUpkeeps)/float64(report.TotalExpectedUpkeeps))*100), + }) + totalExpected += report.TotalExpectedUpkeeps + totalSuccessful += report.TotalSuccessfulUpkeeps + totalMissed += int64(len(report.AllMissedUpkeeps)) + worstMiss = int64(math.Max(float64(max), float64(worstMiss))) + if err != nil { + return err + } + } + keeperReportWriter.Flush() + + err = keeperReportWriter.Write([]string{"Full Test Summary"}) + if err != nil { + return err + } + err = keeperReportWriter.Write([]string{"Total Expected", "Total Successful", "Total Missed", "Worst Miss", "Total Percent"}) + if err != nil { + return err + } + err = keeperReportWriter.Write([]string{ + fmt.Sprint(totalExpected), + fmt.Sprint(totalSuccessful), + fmt.Sprint(totalMissed), + fmt.Sprint(worstMiss), + fmt.Sprintf("%.2f%%", (float64(totalSuccessful)/float64(totalExpected))*100)}) + if err != nil { + return err + } + keeperReportWriter.Flush() + + txs, err := json.Marshal(k.AttemptedPluginTransactions) + if err != nil { + return err + } + err = os.WriteFile(k.attemptedTransactionsFile, txs, 0600) + if err != nil { + return err + } + + log.Info().Msg("Successfully wrote report on Keeper Block Timing") + return nil +} + +// SendSlackNotification sends a slack notification on the results of the test +func (k *KeeperBlockTimeTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := ":white_check_mark: Keeper Block Time Test PASSED :white_check_mark:" + if testFailed { + headerText = ":x: Keeper Block Time Test FAILED :x:" + } + messageBlocks := testreporters.CommonSlackNotificationBlocks( + headerText, k.namespace, k.keeperReportFile, + ) + ts, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + + if err := testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("Keeper Block Time Test Report %s", k.namespace), + Filetype: "csv", + Filename: fmt.Sprintf("keeper_block_time_%s.csv", k.namespace), + File: k.keeperReportFile, + InitialComment: fmt.Sprintf("Keeper Block Time Test Report %s", k.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }); err != nil { + return err + } + return testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("Keeper Block Time Attempted Plugin Txs %s", k.namespace), + Filetype: "json", + Filename: fmt.Sprintf("attempted_cl_txs_%s.json", k.namespace), + File: k.attemptedTransactionsFile, + InitialComment: fmt.Sprintf("Keeper Block Time Attempted Txs %s", k.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }) +} + +// int64AvgMax helper calculates the avg and the max values in a list +func int64AvgMax(in []int64) (float64, int64) { + var sum int64 + var max int64 + if len(in) == 0 { + return 0, 0 + } + for _, num := range in { + sum += num + max = int64(math.Max(float64(max), float64(num))) + } + return float64(sum) / float64(len(in)), max +} diff --git a/integration-tests/testreporters/keeper_benchmark.go b/integration-tests/testreporters/keeper_benchmark.go new file mode 100644 index 00000000..cc3e9e02 --- /dev/null +++ b/integration-tests/testreporters/keeper_benchmark.go @@ -0,0 +1,327 @@ +package testreporters + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "sort" + "sync" + "testing" + + "github.com/rs/zerolog/log" + "github.com/slack-go/slack" + + "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/pluginv3.0/integration-tests/client" +) + +// KeeperBenchmarkTestReporter enables reporting on the keeper benchmark test +type KeeperBenchmarkTestReporter struct { + Reports []KeeperBenchmarkTestReport `json:"reports"` + ReportMutex sync.Mutex + AttemptedPluginTransactions []*client.TransactionsData `json:"attemptedPluginTransactions"` + NumRevertedUpkeeps int64 + NumStaleUpkeepReports int64 + Summary KeeperBenchmarkTestSummary `json:"summary"` + + namespace string + keeperReportFile string + attemptedTransactionsFile string + keeperSummaryFile string +} + +type KeeperBenchmarkTestSummary struct { + Load KeeperBenchmarkTestLoad `json:"load"` + Config KeeperBenchmarkTestConfig `json:"config"` + Metrics KeeperBenchmarkTestMetrics `json:"metrics"` + TestInputs map[string]interface{} `json:"testInputs"` + StartTime int64 `json:"startTime"` + EndTime int64 `json:"endTime"` +} + +type KeeperBenchmarkTestLoad struct { + TotalCheckGasPerBlock int64 `json:"totalCheckGasPerBlock"` + TotalPerformGasPerBlock int64 `json:"totalPerformGasPerBlock"` + AverageExpectedPerformsPerBlock float64 `json:"averageExpectedPerformsPerBlock"` +} + +type KeeperBenchmarkTestConfig struct { + Plugin map[string]map[string]string `json:"plugin"` + Geth map[string]map[string]string `json:"geth"` +} + +type KeeperBenchmarkTestMetrics struct { + Delay map[string]interface{} `json:"delay"` + PercentWithinSLA float64 `json:"percentWithinSLA"` + PercentRevert float64 `json:"percentRevert"` + PercentStale float64 `json:"percentStale"` + TotalTimesEligible int64 `json:"totalTimesEligible"` + TotalTimesPerformed int64 `json:"totalTimesPerformed"` + TotalStaleReports int64 `json:"totalStaleReports"` + AverageActualPerformsPerBlock float64 `json:"averageActualPerformsPerBlock"` +} + +// KeeperBenchmarkTestReport holds a report information for a single Upkeep Consumer contract +type KeeperBenchmarkTestReport struct { + RegistryAddress string `json:"registryAddress"` + ContractAddress string `json:"contractAddress"` + TotalEligibleCount int64 `json:"totalEligibleCount"` + TotalSLAMissedUpkeeps int64 `json:"totalSLAMissedUpkeeps"` + TotalPerformedUpkeeps int64 `json:"totalPerformedUpkeeps"` + AllCheckDelays []int64 `json:"allCheckDelays"` // List of the delays since checkUpkeep for all performs +} + +func (k *KeeperBenchmarkTestReporter) SetNamespace(namespace string) { + k.namespace = namespace +} + +func (k *KeeperBenchmarkTestReporter) WriteReport(folderLocation string) error { + k.keeperReportFile = filepath.Join(folderLocation, "./benchmark_report.csv") + k.keeperSummaryFile = filepath.Join(folderLocation, "./benchmark_summary.json") + // k.keeperSummaryCsvFile = filepath.Join(folderLocation, "./benchmark_summary.csv") + k.attemptedTransactionsFile = filepath.Join(folderLocation, "./attempted_transactions_report.json") + keeperReportFile, err := os.Create(k.keeperReportFile) + if err != nil { + return err + } + defer keeperReportFile.Close() + + keeperReportWriter := csv.NewWriter(keeperReportFile) + var totalEligibleCount, totalPerformed, totalMissedSLA, totalReverted, totalStaleReports int64 + var allDelays []int64 + for _, report := range k.Reports { + totalEligibleCount += report.TotalEligibleCount + totalPerformed += report.TotalPerformedUpkeeps + totalMissedSLA += report.TotalSLAMissedUpkeeps + + allDelays = append(allDelays, report.AllCheckDelays...) + } + totalReverted = k.NumRevertedUpkeeps + totalStaleReports = k.NumStaleUpkeepReports + pctWithinSLA := (1.0 - float64(totalMissedSLA)/float64(totalEligibleCount)) * 100 + var pctReverted, pctStale float64 + if totalPerformed > 0 { + pctReverted = (float64(totalReverted) / float64(totalPerformed)) * 100 + pctStale = (float64(totalStaleReports) / float64(totalPerformed)) * 100 + } + + err = keeperReportWriter.Write([]string{"Full Test Summary"}) + if err != nil { + return err + } + err = keeperReportWriter.Write([]string{ + "Total Times Eligible", + "Total Performed", + "Total Reverted", + "Total Stale Reports", + "Average Perform Delay", + "Median Perform Delay", + "90th pct Perform Delay", + "99th pct Perform Delay", + "Max Perform Delay", + "Percent Within SLA", + "Percent Revert", + "Percent Stale", + }) + if err != nil { + return err + } + avg, median, ninetyPct, ninetyNinePct, max := IntListStats(allDelays) + err = keeperReportWriter.Write([]string{ + fmt.Sprint(totalEligibleCount), + fmt.Sprint(totalPerformed), + fmt.Sprint(totalReverted), + fmt.Sprint(totalStaleReports), + fmt.Sprintf("%.2f", avg), + fmt.Sprint(median), + fmt.Sprint(ninetyPct), + fmt.Sprint(ninetyNinePct), + fmt.Sprint(max), + fmt.Sprintf("%.2f%%", pctWithinSLA), + fmt.Sprintf("%.2f%%", pctReverted), + fmt.Sprintf("%.2f%%", pctStale), + }) + if err != nil { + return err + } + keeperReportWriter.Flush() + log.Info(). + Int64("Total Times Eligible", totalEligibleCount). + Int64("Total Performed", totalPerformed). + Int64("Total Reverted", totalReverted). + Float64("Average Perform Delay", avg). + Int64("Median Perform Delay", median). + Int64("90th pct Perform Delay", ninetyPct). + Int64("99th pct Perform Delay", ninetyNinePct). + Int64("Max Perform Delay", max). + Float64("Percent Within SLA", pctWithinSLA). + Float64("Percent Reverted", pctReverted). + Msg("Calculated Aggregate Results") + + err = keeperReportWriter.Write([]string{ + "Contract Index", + "RegistryAddress", + "Contract Address", + "Total Times Eligible", + "Total Performed Upkeeps", + "Average Perform Delay", + "Median Perform Delay", + "90th pct Perform Delay", + "99th pct Perform Delay", + "Largest Perform Delay", + "Percent Within SLA", + }) + if err != nil { + return err + } + + for contractIndex, report := range k.Reports { + avg, median, ninetyPct, ninetyNinePct, max = IntListStats(report.AllCheckDelays) + err = keeperReportWriter.Write([]string{ + fmt.Sprint(contractIndex), + report.RegistryAddress, + report.ContractAddress, + fmt.Sprint(report.TotalEligibleCount), + fmt.Sprint(report.TotalPerformedUpkeeps), + fmt.Sprintf("%.2f", avg), + fmt.Sprint(median), + fmt.Sprint(ninetyPct), + fmt.Sprint(ninetyNinePct), + fmt.Sprint(max), + fmt.Sprintf("%.2f%%", (1.0-float64(report.TotalSLAMissedUpkeeps)/float64(report.TotalEligibleCount))*100), + }) + if err != nil { + return err + } + } + keeperReportWriter.Flush() + + txs, err := json.Marshal(k.AttemptedPluginTransactions) + if err != nil { + return err + } + err = os.WriteFile(k.attemptedTransactionsFile, txs, 0600) + if err != nil { + return err + } + + log.Info().Msg("Successfully wrote report on Keeper Benchmark") + + k.Summary.Metrics.Delay = map[string]interface{}{ + "mean": avg, + "median": median, + "90p": ninetyPct, + "99p": ninetyNinePct, + "max": max, + } + k.Summary.Metrics.PercentWithinSLA = pctWithinSLA + k.Summary.Metrics.PercentRevert = pctReverted + k.Summary.Metrics.TotalTimesEligible = totalEligibleCount + k.Summary.Metrics.TotalTimesPerformed = totalPerformed + k.Summary.Metrics.TotalStaleReports = totalStaleReports + k.Summary.Metrics.PercentStale = pctStale + k.Summary.Metrics.AverageActualPerformsPerBlock = float64(totalPerformed) / float64(k.Summary.TestInputs["BlockRange"].(int64)) + + // TODO: Set test expectations + /* Expect(int64(pctWithinSLA)).Should(BeNumerically(">=", int64(80)), "Expected PercentWithinSLA to be greater than or equal to 80, but got %f", pctWithinSLA) + Expect(int64(pctReverted)).Should(BeNumerically("<=", int64(10)), "Expected PercentRevert to be less than or equal to 10, but got %f", pctReverted) + Expect(k.Summary.Metrics.AverageActualPerformsPerBlock).Should(BeNumerically("~", k.Summary.Load.AverageExpectedPerformsPerBlock, 10), "Expected PercentRevert to be less than 10, but got %f", pctReverted) */ + + res, err := json.MarshalIndent(k.Summary, "", " ") + if err != nil { + return err + } + err = os.WriteFile(k.keeperSummaryFile, res, 0600) + if err != nil { + return err + } + + log.Info().Str("Summary", string(res)).Msg("Successfully wrote summary on Keeper Benchmark") + + return nil +} + +// SendSlackNotification sends a slack notification on the results of the test +func (k *KeeperBenchmarkTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client, grafanaUrlProvider testreporters.GrafanaURLProvider) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := ":white_check_mark: Automation Benchmark Test FINISHED :white_check_mark:" + if testFailed { + headerText = ":x: Automation Benchmark Test FAILED :x:" + } + messageBlocks := testreporters.CommonSlackNotificationBlocks( + headerText, k.namespace, k.keeperReportFile, + ) + ts, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + + grafanaUrl, err := grafanaUrlProvider.GetGrafanaBaseURL() + if err != nil { + return err + } + + dashboardUrl, err := grafanaUrlProvider.GetGrafanaDashboardURL() + if err != nil { + return err + } + + formattedDashboardUrl := fmt.Sprintf("%s%s?from=%d&to=%d&var-namespace=%s&var-cl_node=plugin-0-0", grafanaUrl, dashboardUrl, k.Summary.StartTime, k.Summary.EndTime, k.namespace) + log.Info().Str("Dashboard", formattedDashboardUrl).Msg("Dashboard URL") + + if err := testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("Automation Benchmark Test Summary %s", k.namespace), + Filetype: "json", + Filename: fmt.Sprintf("automation_benchmark_summary_%s.json", k.namespace), + File: k.keeperSummaryFile, + InitialComment: fmt.Sprintf("Automation Benchmark Test Summary %s.\n<%s|Test Dashboard> ", k.namespace, formattedDashboardUrl), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }); err != nil { + return err + } + + if err := testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("Automation Benchmark Test Report %s", k.namespace), + Filetype: "csv", + Filename: fmt.Sprintf("automation_benchmark_report_%s.csv", k.namespace), + File: k.keeperReportFile, + InitialComment: fmt.Sprintf("Automation Benchmark Test Report %s", k.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }); err != nil { + return err + } + return testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("Automation Benchmark Attempted Plugin Txs %s", k.namespace), + Filetype: "json", + Filename: fmt.Sprintf("attempted_cl_txs_%s.json", k.namespace), + File: k.attemptedTransactionsFile, + InitialComment: fmt.Sprintf("Automation Benchmark Attempted Txs %s", k.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }) +} + +// intListStats helper calculates some statistics on an int list: avg, median, 90pct, 99pct, max +// +//nolint:revive +func IntListStats(in []int64) (float64, int64, int64, int64, int64) { + length := len(in) + if length == 0 { + return 0, 0, 0, 0, 0 + } + sort.Slice(in, func(i, j int) bool { return in[i] < in[j] }) + var sum int64 + for _, num := range in { + sum += num + } + return float64(sum) / float64(length), in[int(math.Floor(float64(length)*0.5))], in[int(math.Floor(float64(length)*0.9))], in[int(math.Floor(float64(length)*0.99))], in[length-1] +} diff --git a/integration-tests/testreporters/ocr.go b/integration-tests/testreporters/ocr.go new file mode 100644 index 00000000..7dc25912 --- /dev/null +++ b/integration-tests/testreporters/ocr.go @@ -0,0 +1,267 @@ +package testreporters + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "sort" + "testing" + "time" + + "github.com/rs/zerolog/log" + "github.com/slack-go/slack" + + "github.com/goplugin/plugin-testing-framework/testreporters" +) + +//TODO: This whole process can definitely be simplified and improved, but for some reason I'm getting brain block at the moment + +// OCRSoakTestReporter collates all OCRAnswerUpdated events into a single report +type OCRSoakTestReporter struct { + StartTime time.Time + AnomaliesDetected bool + OCRVersion string + + anomalies [][]string + timeLine [][]string + namespace string + csvLocation string +} + +// TimeLineEvent represents a single event in the timeline +type TimeLineEvent interface { + Time() time.Time + CSV() [][]string +} + +// TestIssue is a single RPC issue, either a disconnect or reconnect +type TestIssue struct { + StartTime time.Time `toml:"startTime"` + Message string `toml:"message"` +} + +func (r *TestIssue) Time() time.Time { + return r.StartTime +} + +func (r *TestIssue) CSV() [][]string { + return [][]string{{r.StartTime.Format("2006-01-02 15:04:05.00 MST"), "Test Issue!", r.Message}} +} + +// OCRRoundState indicates that a round per contract should complete within this time with this answer +type OCRRoundState struct { + StartTime time.Time `toml:"startTime"` + EndTime time.Time `toml:"endTime"` // Time when the round should end, only used for analysis + Answer int64 `toml:"answer"` + Anomalous bool `toml:"anomalous"` // Whether the round was anomalous + FoundEvents map[string][]*FoundEvent `toml:"foundEvents"` // Address -> FoundEvents, possible to have multiple found events per round, and need to call it out + TimeLineEvents []TimeLineEvent `toml:"timeLineEvents"` + + anomalies [][]string +} + +func (e *OCRRoundState) Time() time.Time { + return e.StartTime +} + +// CSV returns a CSV representation of the test state and all events +func (e *OCRRoundState) CSV() [][]string { + rows := [][]string{{e.StartTime.Format("2006-01-02 15:04:05.00 MST"), fmt.Sprintf("Expecting new Answer: %d", e.Answer)}} + rows = append(rows, e.anomalies...) + return rows +} + +// Validate checks that +// 1. There is a FoundEvent for every address +// 2. There is only one FoundEvent for every address +// 3. The answer is correct +func (e *OCRRoundState) Validate() bool { + anomalies := [][]string{} + for address, eventList := range e.FoundEvents { + if len(eventList) == 0 { + e.Anomalous = true + anomalies = append(anomalies, []string{ + e.StartTime.Format("2006-01-02 15:04:05.00 MST"), "Anomaly Found!", fmt.Sprintf("No AnswerUpdated for address '%s'", address), + }) + } else if len(eventList) > 1 { + e.Anomalous = true + anomalies = append(anomalies, []string{e.StartTime.Format("2006-01-02 15:04:05.00 MST"), "Anomaly Found!", + fmt.Sprintf("Multiple AnswerUpdated for address '%s', possible double-transmission", address)}, + ) + } else { + event := eventList[0] + if event.Answer != e.Answer { + e.Anomalous = true + anomalies = append(e.anomalies, []string{e.StartTime.Format("2006-01-02 15:04:05.00 MST"), "Anomaly Found!", + fmt.Sprintf("FoundEvent for address '%s' has wrong answer '%d'", address, event.Answer)}, + ) + } + } + } + e.anomalies = anomalies + return e.Anomalous +} + +// FoundEvent is a single round update event +type FoundEvent struct { + StartTime time.Time + BlockNumber uint64 + Address string + Answer int64 + RoundID uint64 +} + +func (a *FoundEvent) Time() time.Time { + return a.StartTime +} + +// CSV returns a CSV representation of the event +func (a *FoundEvent) CSV() [][]string { + return [][]string{{ + a.StartTime.Format("2006-01-02 15:04:05.00 MST"), + fmt.Sprintf("Address: %s", a.Address), + fmt.Sprintf("Round: %d", a.RoundID), + fmt.Sprintf("Answer: %d", a.Answer), + fmt.Sprintf("Block: %d", a.BlockNumber), + }} +} + +// RecordEvents takes in a list of test states and RPC issues, orders them, and records them in the timeline +func (o *OCRSoakTestReporter) RecordEvents(testStates []*OCRRoundState, testIssues []*TestIssue) { + events := []TimeLineEvent{} + for _, expectedEvent := range testStates { + if expectedEvent.Validate() { + o.AnomaliesDetected = true + o.anomalies = append(o.anomalies, expectedEvent.anomalies...) + } + events = append(events, expectedEvent) + events = append(events, expectedEvent.TimeLineEvents...) + } + if len(testIssues) > 0 { + o.AnomaliesDetected = true + } + for _, testIssue := range testIssues { + events = append(events, testIssue) + o.anomalies = append(o.anomalies, testIssue.CSV()...) + } + sort.Slice(events, func(i, j int) bool { + return events[i].Time().Before(events[j].Time()) + }) + for _, event := range events { + o.timeLine = append(o.timeLine, event.CSV()...) + } +} + +// SetNamespace sets the namespace of the report for clean reports +func (o *OCRSoakTestReporter) SetNamespace(namespace string) { + o.namespace = namespace +} + +// WriteReport writes OCR Soak test report to a CSV file and final report +func (o *OCRSoakTestReporter) WriteReport(folderLocation string) error { + log.Debug().Msgf("Writing OCRv%s Soak Test Report", o.OCRVersion) + reportLocation := filepath.Join(folderLocation, "./ocr_soak_report.csv") + o.csvLocation = reportLocation + ocrReportFile, err := os.Create(reportLocation) + if err != nil { + return err + } + defer ocrReportFile.Close() + + ocrReportWriter := csv.NewWriter(ocrReportFile) + + err = ocrReportWriter.Write([]string{fmt.Sprintf("OCRv%s Soak Test Report", o.OCRVersion)}) + if err != nil { + return err + } + + err = ocrReportWriter.Write([]string{ + "Namespace", + o.namespace, + "Started At", + o.StartTime.Format("2006-01-02 15:04:05.00 MST"), + "Test Duration", + time.Since(o.StartTime).String(), + }) + if err != nil { + return err + } + + err = ocrReportWriter.Write([]string{}) + if err != nil { + return err + } + + if len(o.anomalies) > 0 { + err = ocrReportWriter.Write([]string{"Anomalies Found"}) + if err != nil { + return err + } + + err = ocrReportWriter.WriteAll(o.anomalies) + if err != nil { + return err + } + + err = ocrReportWriter.Write([]string{}) + if err != nil { + return err + } + } + + err = ocrReportWriter.Write([]string{"Timeline"}) + if err != nil { + return err + } + + err = ocrReportWriter.Write([]string{ + "Time", + "Message", + }) + if err != nil { + return err + } + + err = ocrReportWriter.WriteAll(o.timeLine) + if err != nil { + return err + } + + ocrReportWriter.Flush() + + log.Info().Str("Location", reportLocation).Msg("Wrote CSV file") + return nil +} + +// SendNotification sends a slack message to a slack webhook and uploads test artifacts +func (o *OCRSoakTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client, _ testreporters.GrafanaURLProvider) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := fmt.Sprintf(":white_check_mark: OCRv%s Soak Test PASSED :white_check_mark:", o.OCRVersion) + if testFailed { + headerText = ":x: OCR Soak Test FAILED :x:" + } else if o.AnomaliesDetected { + headerText = ":warning: OCR Soak Test Found Anomalies :warning:" + } + messageBlocks := testreporters.CommonSlackNotificationBlocks( + headerText, fmt.Sprintf("%s | Test took: %s", o.namespace, time.Since(o.StartTime).Truncate(time.Second).String()), o.csvLocation, + ) + ts, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + + return testreporters.UploadSlackFile(slackClient, slack.FileUploadParameters{ + Title: fmt.Sprintf("OCR Soak Test Report %s", o.namespace), + Filetype: "csv", + Filename: fmt.Sprintf("ocr_soak_%s.csv", o.namespace), + File: o.csvLocation, + InitialComment: fmt.Sprintf("OCR Soak Test Report %s.", o.namespace), + Channels: []string{testreporters.SlackChannel}, + ThreadTimestamp: ts, + }) +} diff --git a/integration-tests/testreporters/profile.go b/integration-tests/testreporters/profile.go new file mode 100644 index 00000000..2e321c07 --- /dev/null +++ b/integration-tests/testreporters/profile.go @@ -0,0 +1,60 @@ +package testreporters + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/rs/zerolog/log" + "github.com/slack-go/slack" + "golang.org/x/sync/errgroup" + + "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/pluginv3.0/integration-tests/client" +) + +type PluginProfileTestReporter struct { + Results []*client.PluginProfileResults + namespace string +} + +// SetNamespace sets the namespace of the report for clean reports +func (c *PluginProfileTestReporter) SetNamespace(namespace string) { + c.namespace = namespace +} + +// WriteReport create the profile files +func (c *PluginProfileTestReporter) WriteReport(folderLocation string) error { + profFiles := new(errgroup.Group) + for _, res := range c.Results { + result := res + profFiles.Go(func() error { + filePath := filepath.Join(folderLocation, fmt.Sprintf("plugin-node-%d-profiles", result.NodeIndex)) + if err := testreporters.MkdirIfNotExists(filePath); err != nil { + return err + } + for _, rep := range result.Reports { + report := rep + reportFile, err := os.Create(filepath.Join(filePath, report.Type)) + if err != nil { + return err + } + if _, err = reportFile.Write(report.Data); err != nil { + return err + } + if err = reportFile.Close(); err != nil { + return err + } + } + return nil + }) + } + return profFiles.Wait() +} + +// SendNotification hasn't been implemented for this test +func (c *PluginProfileTestReporter) SendSlackNotification(_ *testing.T, _ *slack.Client, _ testreporters.GrafanaURLProvider) error { + log.Warn().Msg("No Slack notification integration for Plugin profile tests") + return nil +} diff --git a/integration-tests/testreporters/vrfv2.go b/integration-tests/testreporters/vrfv2.go new file mode 100644 index 00000000..63bd2867 --- /dev/null +++ b/integration-tests/testreporters/vrfv2.go @@ -0,0 +1,98 @@ +package testreporters + +import ( + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/slack-go/slack" + + "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +type VRFV2TestReporter struct { + TestType string + RequestCount *big.Int + FulfilmentCount *big.Int + AverageFulfillmentInMillions *big.Int + SlowestFulfillment *big.Int + FastestFulfillment *big.Int + VRFv2TestConfig types.VRFv2TestConfig +} + +func (o *VRFV2TestReporter) SetReportData( + testType string, + RequestCount *big.Int, + FulfilmentCount *big.Int, + AverageFulfillmentInMillions *big.Int, + SlowestFulfillment *big.Int, + FastestFulfillment *big.Int, + vrfv2TestConfig types.VRFv2TestConfig, +) { + o.TestType = testType + o.RequestCount = RequestCount + o.FulfilmentCount = FulfilmentCount + o.AverageFulfillmentInMillions = AverageFulfillmentInMillions + o.SlowestFulfillment = SlowestFulfillment + o.FastestFulfillment = FastestFulfillment + o.VRFv2TestConfig = vrfv2TestConfig +} + +// SendSlackNotification sends a slack message to a slack webhook +func (o *VRFV2TestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := fmt.Sprintf(":white_check_mark: VRF V2 %s Test PASSED :white_check_mark:", o.TestType) + if testFailed { + headerText = fmt.Sprintf(":x: VRF V2 %s Test FAILED :x:", o.TestType) + } + + perfCfg := o.VRFv2TestConfig.GetVRFv2Config().Performance + var sb strings.Builder + for _, n := range o.VRFv2TestConfig.GetNetworkConfig().SelectedNetworks { + sb.WriteString(n) + sb.WriteString(", ") + } + + messageBlocks := testreporters.SlackNotifyBlocks(headerText, sb.String(), []string{ + fmt.Sprintf( + "Summary\n"+ + "Perf Test Type: %s\n"+ + "Test Duration set in parameters: %s\n"+ + "Use Existing Env: %t\n"+ + "Request Count: %s\n"+ + "Fulfilment Count: %s\n"+ + "AverageFulfillmentInMillions: %s\n"+ + "Slowest Fulfillment: %s\n"+ + "Fastest Fulfillment: %s \n"+ + "RPS: %d\n"+ + "RateLimitUnitDuration: %s\n"+ + "RandomnessRequestCountPerRequest: %d\n"+ + "RandomnessRequestCountPerRequestDeviation: %d\n", + o.TestType, + perfCfg.TestDuration.Duration.Truncate(time.Second).String(), + *perfCfg.UseExistingEnv, + o.RequestCount.String(), + o.FulfilmentCount.String(), + o.AverageFulfillmentInMillions.String(), + o.SlowestFulfillment.String(), + o.FastestFulfillment.String(), + *perfCfg.RPS, + perfCfg.RateLimitUnitDuration.String(), + *o.VRFv2TestConfig.GetVRFv2Config().General.RandomnessRequestCountPerRequest, + *o.VRFv2TestConfig.GetVRFv2Config().General.RandomnessRequestCountPerRequestDeviation, + ), + }) + + _, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + return nil +} diff --git a/integration-tests/testreporters/vrfv2plus.go b/integration-tests/testreporters/vrfv2plus.go new file mode 100644 index 00000000..8f84a8a7 --- /dev/null +++ b/integration-tests/testreporters/vrfv2plus.go @@ -0,0 +1,93 @@ +package testreporters + +import ( + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/goplugin/pluginv3.0/integration-tests/types" + + "github.com/slack-go/slack" + + "github.com/goplugin/plugin-testing-framework/testreporters" +) + +type VRFV2PlusTestReporter struct { + TestType string + RequestCount *big.Int + FulfilmentCount *big.Int + AverageFulfillmentInMillions *big.Int + SlowestFulfillment *big.Int + FastestFulfillment *big.Int + VRFv2PlusTestConfig types.VRFv2PlusTestConfig +} + +func (o *VRFV2PlusTestReporter) SetReportData( + testType string, + RequestCount *big.Int, + FulfilmentCount *big.Int, + AverageFulfillmentInMillions *big.Int, + SlowestFulfillment *big.Int, + FastestFulfillment *big.Int, + vtfv2PlusTestConfig types.VRFv2PlusTestConfig, +) { + o.TestType = testType + o.RequestCount = RequestCount + o.FulfilmentCount = FulfilmentCount + o.AverageFulfillmentInMillions = AverageFulfillmentInMillions + o.SlowestFulfillment = SlowestFulfillment + o.FastestFulfillment = FastestFulfillment + o.VRFv2PlusTestConfig = vtfv2PlusTestConfig +} + +// SendSlackNotification sends a slack message to a slack webhook +func (o *VRFV2PlusTestReporter) SendSlackNotification(t *testing.T, slackClient *slack.Client, vtfv2PlusTestConfig types.VRFv2PlusTestConfig) error { + if slackClient == nil { + slackClient = slack.New(testreporters.SlackAPIKey) + } + + testFailed := t.Failed() + headerText := fmt.Sprintf(":white_check_mark: VRF V2 Plus %s Test PASSED :white_check_mark:", o.TestType) + if testFailed { + headerText = fmt.Sprintf(":x: VRF V2 Plus %s Test FAILED :x:", o.TestType) + } + + vrfv2lusConfig := o.VRFv2PlusTestConfig.GetVRFv2PlusConfig().Performance + messageBlocks := testreporters.SlackNotifyBlocks(headerText, strings.Join(vtfv2PlusTestConfig.GetNetworkConfig().SelectedNetworks, ","), []string{ + fmt.Sprintf( + "Summary\n"+ + "Perf Test Type: %s\n"+ + "Test Duration set in parameters: %s\n"+ + "Use Existing Env: %t\n"+ + "Request Count: %s\n"+ + "Fulfilment Count: %s\n"+ + "AverageFulfillmentInMillions: %s\n"+ + "Slowest Fulfillment: %s\n"+ + "Fastest Fulfillment: %s \n"+ + "RPS: %d\n"+ + "RateLimitUnitDuration: %s\n"+ + "RandomnessRequestCountPerRequest: %d\n"+ + "RandomnessRequestCountPerRequestDeviation: %d\n", + o.TestType, + vrfv2lusConfig.TestDuration.Duration.Truncate(time.Second).String(), + *vrfv2lusConfig.UseExistingEnv, + o.RequestCount.String(), + o.FulfilmentCount.String(), + o.AverageFulfillmentInMillions.String(), + o.SlowestFulfillment.String(), + o.FastestFulfillment.String(), + *vrfv2lusConfig.RPS, + vrfv2lusConfig.RateLimitUnitDuration.String(), + *o.VRFv2PlusTestConfig.GetVRFv2PlusConfig().General.RandomnessRequestCountPerRequest, + *o.VRFv2PlusTestConfig.GetVRFv2PlusConfig().General.RandomnessRequestCountPerRequestDeviation, + ), + }) + + _, err := testreporters.SendSlackMessage(slackClient, slack.MsgOptionBlocks(messageBlocks...)) + if err != nil { + return err + } + return nil +} diff --git a/integration-tests/testsetups/don_evm_chain.go b/integration-tests/testsetups/don_evm_chain.go new file mode 100644 index 00000000..1a0cdf23 --- /dev/null +++ b/integration-tests/testsetups/don_evm_chain.go @@ -0,0 +1,81 @@ +package testsetups + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + e "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver-cfg" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +type DonChain struct { + conf *DonChainConfig + EVMClient blockchain.EVMClient + EVMNetwork *blockchain.EVMNetwork + ContractDeployer contracts.ContractDeployer + LinkTokenContract contracts.LinkToken + PluginNodes []*client.PluginK8sClient + Mockserver *ctfClient.MockserverClient + l zerolog.Logger +} + +type DonChainConfig struct { + T *testing.T + Env *e.Environment + EVMNetwork *blockchain.EVMNetwork + EthereumProps *ethereum.Props + PluginValues map[string]interface{} +} + +func NewDonChain(conf *DonChainConfig, logger zerolog.Logger) *DonChain { + return &DonChain{ + conf: conf, + EVMNetwork: conf.EVMNetwork, + l: logger, + } +} + +func (s *DonChain) Deploy() { + var err error + + s.conf.Env.AddHelm(mockservercfg.New(nil)). + AddHelm(mockserver.New(nil)). + AddHelm(ethereum.New(s.conf.EthereumProps)). + AddHelm(plugin.New(0, s.conf.PluginValues)) + + err = s.conf.Env.Run() + require.NoError(s.conf.T, err) + + s.initializeClients() +} + +func (s *DonChain) initializeClients() { + var err error + network := *s.conf.EVMNetwork + s.EVMClient, err = blockchain.NewEVMClient(network, s.conf.Env, s.l) + require.NoError(s.conf.T, err, "Connecting to blockchain nodes shouldn't fail") + + s.ContractDeployer, err = contracts.NewContractDeployer(s.EVMClient, s.l) + require.NoError(s.conf.T, err) + + s.PluginNodes, err = client.ConnectPluginNodes(s.conf.Env) + require.NoError(s.conf.T, err, "Connecting to plugin nodes shouldn't fail") + + s.Mockserver, err = ctfClient.ConnectMockServer(s.conf.Env) + require.NoError(s.conf.T, err, "Creating mockserver clients shouldn't fail") + + s.EVMClient.ParallelTransactions(true) + + s.LinkTokenContract, err = s.ContractDeployer.DeployLinkTokenContract() + require.NoError(s.conf.T, err, "Deploying Link Token Contract shouldn't fail") +} diff --git a/integration-tests/testsetups/keeper_benchmark.go b/integration-tests/testsetups/keeper_benchmark.go new file mode 100644 index 00000000..5688c04e --- /dev/null +++ b/integration-tests/testsetups/keeper_benchmark.go @@ -0,0 +1,737 @@ +package testsetups + +import ( + "context" + "fmt" + "math" + "math/big" + "os" + "os/signal" + "syscall" + "testing" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/slack-go/slack" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/logging" + reportModel "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + iregistry21 "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_1" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_2" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper1_3" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/keeper_registry_wrapper2_0" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + tt "github.com/goplugin/pluginv3.0/integration-tests/types" +) + +// KeeperBenchmarkTest builds a test to check that plugin nodes are able to upkeep a specified amount of Upkeep +// contracts within a certain block time +type KeeperBenchmarkTest struct { + Inputs KeeperBenchmarkTestInputs + TestReporter testreporters.KeeperBenchmarkTestReporter + + t *testing.T + log zerolog.Logger + startingBlock *big.Int + + keeperRegistries []contracts.KeeperRegistry + keeperRegistrars []contracts.KeeperRegistrar + keeperConsumerContracts []contracts.AutomationConsumerBenchmark + upkeepIDs [][]*big.Int + + env *environment.Environment + namespace string + pluginNodes []*client.PluginK8sClient + chainClient blockchain.EVMClient + testConfig tt.KeeperBenchmarkTestConfig + contractDeployer contracts.ContractDeployer + + linkToken contracts.LinkToken + ethFeed contracts.MockETHPLIFeed + gasFeed contracts.MockGasFeed +} + +// UpkeepConfig dictates details of how the test's upkeep contracts should be called and configured +type UpkeepConfig struct { + NumberOfUpkeeps int // Number of upkeep contracts + BlockRange int64 // How many blocks to run the test for + BlockInterval int64 // Interval of blocks that upkeeps are expected to be performed + CheckGasToBurn int64 // How much gas should be burned on checkUpkeep() calls + PerformGasToBurn int64 // How much gas should be burned on performUpkeep() calls + UpkeepGasLimit int64 // Maximum gas that can be consumed by the upkeeps + FirstEligibleBuffer int64 // How many blocks to add to randomised first eligible block, set to 0 to disable randomised first eligible block +} + +// PreDeployedContracts are contracts that are already deployed on a (usually) live testnet chain, so re-deployment +// in unnecessary +type PreDeployedContracts struct { + RegistryAddress string + RegistrarAddress string + LinkTokenAddress string + EthFeedAddress string + GasFeedAddress string +} + +// KeeperBenchmarkTestInputs are all the required inputs for a Keeper Benchmark Test +type KeeperBenchmarkTestInputs struct { + BlockchainClient blockchain.EVMClient // Client for the test to connect to the blockchain with + KeeperRegistrySettings *contracts.KeeperRegistrySettings // Settings of each keeper contract + Upkeeps *UpkeepConfig + Contracts *PreDeployedContracts + Timeout time.Duration // Timeout for the test + PluginNodeFunding *big.Float // Amount of ETH to fund each plugin node with + UpkeepSLA int64 // SLA in number of blocks for an upkeep to be performed once it becomes eligible + RegistryVersions []ethereum.KeeperRegistryVersion // Registry version to use + ForceSingleTxnKey bool + BlockTime time.Duration + DeltaStage time.Duration + DeleteJobsOnEnd bool +} + +// NewKeeperBenchmarkTest prepares a new keeper benchmark test to be run +func NewKeeperBenchmarkTest(t *testing.T, inputs KeeperBenchmarkTestInputs) *KeeperBenchmarkTest { + return &KeeperBenchmarkTest{ + Inputs: inputs, + t: t, + log: logging.GetTestLogger(t), + } +} + +// Setup prepares contracts for the test +func (k *KeeperBenchmarkTest) Setup(env *environment.Environment, config tt.KeeperBenchmarkTestConfig) { + startTime := time.Now() + k.TestReporter.Summary.StartTime = startTime.UnixMilli() + k.ensureInputValues() + k.env = env + k.namespace = k.env.Cfg.Namespace + inputs := k.Inputs + k.testConfig = config + + k.keeperRegistries = make([]contracts.KeeperRegistry, len(inputs.RegistryVersions)) + k.keeperRegistrars = make([]contracts.KeeperRegistrar, len(inputs.RegistryVersions)) + k.keeperConsumerContracts = make([]contracts.AutomationConsumerBenchmark, len(inputs.RegistryVersions)) + k.upkeepIDs = make([][]*big.Int, len(inputs.RegistryVersions)) + k.log.Debug().Interface("TestInputs", inputs).Msg("Setting up benchmark test") + + var err error + // Connect to networks and prepare for contract deployment + k.contractDeployer, err = contracts.NewContractDeployer(k.chainClient, k.log) + require.NoError(k.t, err, "Building a new contract deployer shouldn't fail") + k.pluginNodes, err = client.ConnectPluginNodes(k.env) + require.NoError(k.t, err, "Connecting to plugin nodes shouldn't fail") + k.chainClient.ParallelTransactions(true) + + if len(inputs.RegistryVersions) > 1 && !inputs.ForceSingleTxnKey { + for nodeIndex, node := range k.pluginNodes { + for registryIndex := 1; registryIndex < len(inputs.RegistryVersions); registryIndex++ { + k.log.Debug().Str("URL", node.URL()).Int("NodeIndex", nodeIndex).Int("RegistryIndex", registryIndex).Msg("Create Tx key") + _, _, err := node.CreateTxKey("evm", k.Inputs.BlockchainClient.GetChainID().String()) + require.NoError(k.t, err, "Creating transaction key shouldn't fail") + } + } + } + + c := inputs.Contracts + + if common.IsHexAddress(c.LinkTokenAddress) { + k.linkToken, err = k.contractDeployer.LoadLinkToken(common.HexToAddress(c.LinkTokenAddress)) + require.NoError(k.t, err, "Loading Link Token Contract shouldn't fail") + } else { + k.linkToken, err = k.contractDeployer.DeployLinkTokenContract() + require.NoError(k.t, err, "Deploying Link Token Contract shouldn't fail") + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Failed waiting for PLI Contract deployment") + } + + if common.IsHexAddress(c.EthFeedAddress) { + k.ethFeed, err = k.contractDeployer.LoadETHPLIFeed(common.HexToAddress(c.EthFeedAddress)) + require.NoError(k.t, err, "Loading ETH-Link feed Contract shouldn't fail") + } else { + k.ethFeed, err = k.contractDeployer.DeployMockETHPLIFeed(big.NewInt(2e18)) + require.NoError(k.t, err, "Deploying mock ETH-Link feed shouldn't fail") + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Failed waiting for ETH-Link feed Contract deployment") + } + + if common.IsHexAddress(c.GasFeedAddress) { + k.gasFeed, err = k.contractDeployer.LoadGasFeed(common.HexToAddress(c.GasFeedAddress)) + require.NoError(k.t, err, "Loading Gas feed Contract shouldn't fail") + } else { + k.gasFeed, err = k.contractDeployer.DeployMockGasFeed(big.NewInt(2e11)) + require.NoError(k.t, err, "Deploying mock gas feed shouldn't fail") + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Failed waiting for mock gas feed Contract deployment") + } + + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Failed waiting for mock feeds to deploy") + + for index := range inputs.RegistryVersions { + k.log.Info().Int("Index", index).Msg("Starting Test Setup") + + k.DeployBenchmarkKeeperContracts(index) + } + + var keysToFund = inputs.RegistryVersions + if inputs.ForceSingleTxnKey { + keysToFund = inputs.RegistryVersions[0:1] + } + + for index := range keysToFund { + // Fund plugin nodes + nodesToFund := k.pluginNodes + if inputs.RegistryVersions[index] == ethereum.RegistryVersion_2_0 || inputs.RegistryVersions[index] == ethereum.RegistryVersion_2_1 { + nodesToFund = k.pluginNodes[1:] + } + err = actions.FundPluginNodesAddress(nodesToFund, k.chainClient, k.Inputs.PluginNodeFunding, index) + require.NoError(k.t, err, "Funding Plugin nodes shouldn't fail") + } + + k.log.Info().Str("Setup Time", time.Since(startTime).String()).Msg("Finished Keeper Benchmark Test Setup") + err = k.SendSlackNotification(nil, config) + if err != nil { + k.log.Warn().Msg("Sending test start slack notification failed") + } +} + +// Run runs the keeper benchmark test +func (k *KeeperBenchmarkTest) Run() { + u := k.Inputs.Upkeeps + k.TestReporter.Summary.Load.TotalCheckGasPerBlock = int64(u.NumberOfUpkeeps) * u.CheckGasToBurn + k.TestReporter.Summary.Load.TotalPerformGasPerBlock = int64((float64(u.NumberOfUpkeeps) / + float64(u.BlockInterval)) * float64(u.PerformGasToBurn)) + k.TestReporter.Summary.Load.AverageExpectedPerformsPerBlock = float64(u.NumberOfUpkeeps) / + float64(u.BlockInterval) + k.TestReporter.Summary.TestInputs = map[string]interface{}{ + "NumberOfUpkeeps": u.NumberOfUpkeeps, + "BlockCountPerTurn": k.Inputs.KeeperRegistrySettings.BlockCountPerTurn, + "CheckGasLimit": k.Inputs.KeeperRegistrySettings.CheckGasLimit, + "MaxPerformGas": k.Inputs.KeeperRegistrySettings.MaxPerformGas, + "CheckGasToBurn": u.CheckGasToBurn, + "PerformGasToBurn": u.PerformGasToBurn, + "BlockRange": u.BlockRange, + "BlockInterval": u.BlockInterval, + "UpkeepSLA": k.Inputs.UpkeepSLA, + "FirstEligibleBuffer": u.FirstEligibleBuffer, + "NumberOfRegistries": len(k.keeperRegistries), + } + inputs := k.Inputs + startingBlock, err := k.chainClient.LatestBlockNumber(testcontext.Get(k.t)) + require.NoError(k.t, err, "Error getting latest block number") + k.startingBlock = big.NewInt(0).SetUint64(startingBlock) + startTime := time.Now() + + nodesWithoutBootstrap := k.pluginNodes[1:] + + for rIndex := range k.keeperRegistries { + + var txKeyId = rIndex + if inputs.ForceSingleTxnKey { + txKeyId = 0 + } + ocrConfig, err := actions.BuildAutoOCR2ConfigVarsWithKeyIndex( + k.t, nodesWithoutBootstrap, *inputs.KeeperRegistrySettings, k.keeperRegistrars[rIndex].Address(), k.Inputs.DeltaStage, txKeyId, common.Address{}, + ) + require.NoError(k.t, err, "Building OCR config shouldn't fail") + + // Send keeper jobs to registry and plugin nodes + if inputs.RegistryVersions[rIndex] == ethereum.RegistryVersion_2_0 || inputs.RegistryVersions[rIndex] == ethereum.RegistryVersion_2_1 { + actions.CreateOCRKeeperJobs(k.t, k.pluginNodes, k.keeperRegistries[rIndex].Address(), k.chainClient.GetChainID().Int64(), txKeyId, inputs.RegistryVersions[rIndex]) + err = k.keeperRegistries[rIndex].SetConfig(*inputs.KeeperRegistrySettings, ocrConfig) + require.NoError(k.t, err, "Registry config should be be set successfully") + // Give time for OCR nodes to bootstrap + time.Sleep(1 * time.Minute) + } else { + actions.CreateKeeperJobsWithKeyIndex(k.t, k.pluginNodes, k.keeperRegistries[rIndex], txKeyId, ocrConfig, k.chainClient.GetChainID().String()) + } + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Error waiting for registry setConfig") + } + + for rIndex := range k.keeperRegistries { + for index, upkeepID := range k.upkeepIDs[rIndex] { + k.chainClient.AddHeaderEventSubscription(fmt.Sprintf("Keeper Tracker %d %d", rIndex, index), + contracts.NewKeeperConsumerBenchmarkRoundConfirmer( + k.keeperConsumerContracts[rIndex], + k.keeperRegistries[rIndex], + upkeepID, + inputs.Upkeeps.BlockRange+inputs.UpkeepSLA, + inputs.UpkeepSLA, + &k.TestReporter, + int64(index), + inputs.Upkeeps.FirstEligibleBuffer, + k.log, + ), + ) + } + } + defer func() { // Cleanup the subscriptions + for rIndex := range k.keeperRegistries { + for index := range k.upkeepIDs[rIndex] { + k.chainClient.DeleteHeaderEventSubscription(fmt.Sprintf("Keeper Tracker %d %d", rIndex, index)) + } + } + }() + + // Main test loop + k.observeUpkeepEvents() + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Error waiting for keeper subscriptions") + + // Collect logs for each registry to calculate test metrics + // This test generates a LOT of logs, and we need to break up our reads, or risk getting rate-limited by the node + var ( + endBlock = big.NewInt(0).Add(k.startingBlock, big.NewInt(u.BlockRange)) + registryLogs = make([][]types.Log, len(k.keeperRegistries)) + blockBatchSize int64 = 100 + ) + for rIndex := range k.keeperRegistries { + // Variables for the full registry + var ( + logs []types.Log + timeout = 5 * time.Second + addr = k.keeperRegistries[rIndex].Address() + queryStartBlock = big.NewInt(0).Set(k.startingBlock) + ) + + // Gather logs from the registry in 100 block chunks to avoid read limits + for queryStartBlock.Cmp(endBlock) < 0 { + filterQuery := geth.FilterQuery{ + Addresses: []common.Address{common.HexToAddress(addr)}, + FromBlock: queryStartBlock, + ToBlock: big.NewInt(0).Add(queryStartBlock, big.NewInt(blockBatchSize)), + } + + // This RPC call can possibly time out or otherwise die. Failure is not an option, keep retrying to get our stats. + err = fmt.Errorf("initial error") // to ensure our for loop runs at least once + for err != nil { + ctx, cancel := context.WithTimeout(testcontext.Get(k.t), timeout) + logs, err = k.chainClient.FilterLogs(ctx, filterQuery) + cancel() + if err != nil { + k.log.Error(). + Err(err). + Interface("Filter Query", filterQuery). + Str("Timeout", timeout.String()). + Msg("Error getting logs from chain, trying again") + timeout = time.Duration(math.Min(float64(timeout)*2, float64(2*time.Minute))) + continue + } + k.log.Info(). + Uint64("From Block", queryStartBlock.Uint64()). + Uint64("To Block", filterQuery.ToBlock.Uint64()). + Int("Log Count", len(logs)). + Str("Registry Address", addr). + Msg("Collected logs") + queryStartBlock.Add(queryStartBlock, big.NewInt(blockBatchSize)) + registryLogs[rIndex] = append(registryLogs[rIndex], logs...) + } + } + } + + // Count reverts and stale upkeeps + for rIndex := range k.keeperRegistries { + contractABI := k.contractABI(rIndex) + for _, l := range registryLogs[rIndex] { + log := l + eventDetails, err := contractABI.EventByID(log.Topics[0]) + if err != nil { + k.log.Error().Err(err).Str("Log Hash", log.TxHash.Hex()).Msg("Error getting event details for log, report data inaccurate") + break + } + if eventDetails.Name == "UpkeepPerformed" { + parsedLog, err := k.keeperRegistries[rIndex].ParseUpkeepPerformedLog(&log) + if err != nil { + k.log.Error().Err(err).Str("Log Hash", log.TxHash.Hex()).Msg("Error parsing upkeep performed log, report data inaccurate") + break + } + if !parsedLog.Success { + k.TestReporter.NumRevertedUpkeeps++ + } + } else if eventDetails.Name == "StaleUpkeepReport" { + k.TestReporter.NumStaleUpkeepReports++ + } + } + } + + for _, pluginNode := range k.pluginNodes { + txData, err := pluginNode.MustReadTransactionAttempts() + if err != nil { + k.log.Error().Err(err).Msg("Error reading transaction attempts from Plugin Node") + } + k.TestReporter.AttemptedPluginTransactions = append(k.TestReporter.AttemptedPluginTransactions, txData) + } + + k.TestReporter.Summary.Config.Plugin, err = k.env.ResourcesSummary("app=plugin-0") + if err != nil { + k.log.Error().Err(err).Msg("Error getting resource summary of plugin node") + } + + k.TestReporter.Summary.Config.Geth, err = k.env.ResourcesSummary("app=geth") + if err != nil && k.Inputs.BlockchainClient.NetworkSimulated() { + k.log.Error().Err(err).Msg("Error getting resource summary of geth node") + } + + endTime := time.Now() + k.TestReporter.Summary.EndTime = endTime.UnixMilli() + (30 * time.Second.Milliseconds()) + + for rIndex := range k.keeperRegistries { + if inputs.DeleteJobsOnEnd { + // Delete keeper jobs on plugin nodes + actions.DeleteKeeperJobsWithId(k.t, k.pluginNodes, rIndex+1) + } + } + + k.log.Info().Str("Run Time", endTime.Sub(startTime).String()).Msg("Finished Keeper Benchmark Test") +} + +// TearDownVals returns the networks that the test is running on +func (k *KeeperBenchmarkTest) TearDownVals(t *testing.T) ( + *testing.T, + string, + []*client.PluginK8sClient, + reportModel.TestReporter, + reportModel.GrafanaURLProvider, + blockchain.EVMClient, +) { + return t, k.namespace, k.pluginNodes, &k.TestReporter, k.testConfig, k.chainClient +} + +// ********************* +// ****** Helpers ****** +// ********************* + +// observeUpkeepEvents subscribes to Upkeep events on deployed registries and logs them +// WARNING: This should only be used for observation and logging. This isn't a reliable way to build a final report +// due to how fragile subscriptions can be +func (k *KeeperBenchmarkTest) observeUpkeepEvents() { + eventLogs := make(chan types.Log) + registryAddresses := make([]common.Address, len(k.keeperRegistries)) + addressIndexMap := map[common.Address]int{} + for index, registry := range k.keeperRegistries { + registryAddresses[index] = common.HexToAddress(registry.Address()) + addressIndexMap[registryAddresses[index]] = index + } + filterQuery := geth.FilterQuery{ + Addresses: registryAddresses, + FromBlock: k.startingBlock, + } + + ctx, cancel := context.WithTimeout(testcontext.Get(k.t), 5*time.Second) + sub, err := k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) + cancel() + require.NoError(k.t, err, "Subscribing to upkeep performed events log shouldn't fail") + + interruption := make(chan os.Signal, 1) + //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal + signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM) + + go func() { + for { + select { + case <-interruption: + k.log.Warn().Msg("Received interrupt signal, test container restarting. Dashboard view will be inaccurate.") + case err := <-sub.Err(): + backoff := time.Second + for err != nil { // Keep retrying until we get a successful subscription + k.log.Error(). + Err(err). + Interface("Query", filterQuery). + Str("Backoff", backoff.String()). + Msg("Error while subscribing to Keeper Event Logs. Resubscribing...") + + ctx, cancel := context.WithTimeout(testcontext.Get(k.t), backoff) + sub, err = k.chainClient.SubscribeFilterLogs(ctx, filterQuery, eventLogs) + cancel() + if err != nil { + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff)*2, float64(30*time.Second))) + } + } + log.Info().Msg("Resubscribed to Keeper Event Logs") + case vLog := <-eventLogs: + rIndex, ok := addressIndexMap[vLog.Address] + if !ok { + k.log.Error().Str("Address", vLog.Address.Hex()).Msg("Received log from unknown registry") + continue + } + contractABI := k.contractABI(rIndex) + eventDetails, err := contractABI.EventByID(vLog.Topics[0]) + require.NoError(k.t, err, "Getting event details for subscribed log shouldn't fail") + if eventDetails.Name != "UpkeepPerformed" && eventDetails.Name != "StaleUpkeepReport" { + // Skip non upkeepPerformed Logs + continue + } + if vLog.Removed { + k.log.Warn(). + Str("Name", eventDetails.Name). + Str("Registry", k.keeperRegistries[rIndex].Address()). + Msg("Got removed log") + } + if eventDetails.Name == "UpkeepPerformed" { + parsedLog, err := k.keeperRegistries[rIndex].ParseUpkeepPerformedLog(&vLog) + require.NoError(k.t, err, "Parsing upkeep performed log shouldn't fail") + + if parsedLog.Success { + k.log.Info(). + Str("Upkeep ID", parsedLog.Id.String()). + Bool("Success", parsedLog.Success). + Str("From", parsedLog.From.String()). + Str("Registry", k.keeperRegistries[rIndex].Address()). + Msg("Got successful Upkeep Performed log on Registry") + } else { + k.log.Warn(). + Str("Upkeep ID", parsedLog.Id.String()). + Bool("Success", parsedLog.Success). + Str("From", parsedLog.From.String()). + Str("Registry", k.keeperRegistries[rIndex].Address()). + Msg("Got reverted Upkeep Performed log on Registry") + } + } else if eventDetails.Name == "StaleUpkeepReport" { + parsedLog, err := k.keeperRegistries[rIndex].ParseStaleUpkeepReportLog(&vLog) + require.NoError(k.t, err, "Parsing stale upkeep report log shouldn't fail") + k.log.Warn(). + Str("Upkeep ID", parsedLog.Id.String()). + Str("Registry", k.keeperRegistries[rIndex].Address()). + Msg("Got stale Upkeep report log on Registry") + } + case <-k.chainClient.ConnectionIssue(): + k.log.Warn().Msg("RPC connection issue detected.") + case <-k.chainClient.ConnectionRestored(): + k.log.Info().Msg("RPC connection restored.") + } + } + }() +} + +// contractABI returns the ABI of the proper keeper registry contract +func (k *KeeperBenchmarkTest) contractABI(rIndex int) *abi.ABI { + var ( + contractABI *abi.ABI + err error + ) + switch k.Inputs.RegistryVersions[rIndex] { + case ethereum.RegistryVersion_1_0, ethereum.RegistryVersion_1_1: + contractABI, err = keeper_registry_wrapper1_1.KeeperRegistryMetaData.GetAbi() + case ethereum.RegistryVersion_1_2: + contractABI, err = keeper_registry_wrapper1_2.KeeperRegistryMetaData.GetAbi() + case ethereum.RegistryVersion_1_3: + contractABI, err = keeper_registry_wrapper1_3.KeeperRegistryMetaData.GetAbi() + case ethereum.RegistryVersion_2_0: + contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + case ethereum.RegistryVersion_2_1: + contractABI, err = iregistry21.IKeeperRegistryMasterMetaData.GetAbi() + default: + contractABI, err = keeper_registry_wrapper2_0.KeeperRegistryMetaData.GetAbi() + } + require.NoError(k.t, err, "Getting contract ABI shouldn't fail") + return contractABI +} + +// ensureValues ensures that all values needed to run the test are present +func (k *KeeperBenchmarkTest) ensureInputValues() { + inputs := k.Inputs + require.NotNil(k.t, inputs.BlockchainClient, "Need a valid blockchain client to use for the test") + k.chainClient = inputs.BlockchainClient + require.GreaterOrEqual(k.t, inputs.Upkeeps.NumberOfUpkeeps, 1, "Expecting at least 1 keeper contracts") + if inputs.Timeout == 0 { + require.Greater(k.t, inputs.Upkeeps.BlockRange, int64(0), "If no `timeout` is provided, a `testBlockRange` is required") + } else if inputs.Upkeeps.BlockRange <= 0 { + require.GreaterOrEqual(k.t, inputs.Timeout, time.Second, "If no `testBlockRange` is provided a `timeout` is required") + } + require.NotNil(k.t, inputs.KeeperRegistrySettings, "You need to set KeeperRegistrySettings") + require.NotNil(k.t, k.Inputs.PluginNodeFunding, "You need to set a funding amount for plugin nodes") + clFunds, _ := k.Inputs.PluginNodeFunding.Float64() + require.GreaterOrEqual(k.t, clFunds, 0.0, "Expecting Plugin node funding to be more than 0 ETH") + require.Greater(k.t, inputs.Upkeeps.CheckGasToBurn, int64(0), "You need to set an expected amount of gas to burn on checkUpkeep()") + require.GreaterOrEqual( + k.t, int64(inputs.KeeperRegistrySettings.CheckGasLimit), inputs.Upkeeps.CheckGasToBurn, "CheckGasLimit should be >= CheckGasToBurn", + ) + require.Greater(k.t, inputs.Upkeeps.PerformGasToBurn, int64(0), "You need to set an expected amount of gas to burn on performUpkeep()") + require.NotNil(k.t, inputs.UpkeepSLA, "Expected UpkeepSLA to be set") + require.NotNil(k.t, inputs.Upkeeps.FirstEligibleBuffer, "You need to set FirstEligibleBuffer") + require.NotNil(k.t, inputs.RegistryVersions[0], "You need to set RegistryVersion") + require.NotNil(k.t, inputs.BlockTime, "You need to set BlockTime") + + if k.Inputs.DeltaStage == 0 { + k.Inputs.DeltaStage = k.Inputs.BlockTime * 5 + } +} + +func (k *KeeperBenchmarkTest) SendSlackNotification(slackClient *slack.Client, config tt.KeeperBenchmarkTestConfig) error { + if slackClient == nil { + slackClient = slack.New(reportModel.SlackAPIKey) + } + + grafanaUrl, err := config.GetGrafanaBaseURL() + if err != nil { + return err + } + + dashboardUrl, err := config.GetGrafanaDashboardURL() + if err != nil { + return err + } + + headerText := ":white_check_mark: Automation Benchmark Test STARTED :white_check_mark:" + formattedDashboardUrl := fmt.Sprintf("%s%s?from=%d&to=%s&var-namespace=%s&var-cl_node=plugin-0-0", grafanaUrl, dashboardUrl, k.TestReporter.Summary.StartTime, "now", k.env.Cfg.Namespace) + log.Info().Str("Dashboard", formattedDashboardUrl).Msg("Dashboard URL") + + notificationBlocks := []slack.Block{} + notificationBlocks = append(notificationBlocks, + slack.NewHeaderBlock(slack.NewTextBlockObject("plain_text", headerText, true, false))) + notificationBlocks = append(notificationBlocks, + slack.NewContextBlock("context_block", slack.NewTextBlockObject("plain_text", k.env.Cfg.Namespace, false, false))) + notificationBlocks = append(notificationBlocks, slack.NewDividerBlock()) + notificationBlocks = append(notificationBlocks, slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn", + fmt.Sprintf("<%s|Test Dashboard> \nNotifying <@%s>", + formattedDashboardUrl, reportModel.SlackUserID), false, true), nil, nil)) + + ts, err := reportModel.SendSlackMessage(slackClient, slack.MsgOptionBlocks(notificationBlocks...)) + log.Debug().Str("ts", ts).Msg("Sent Slack Message") + return err +} + +// DeployBenchmarkKeeperContracts deploys a set amount of keeper Benchmark contracts registered to a single registry +func (k *KeeperBenchmarkTest) DeployBenchmarkKeeperContracts(index int) { + registryVersion := k.Inputs.RegistryVersions[index] + k.Inputs.KeeperRegistrySettings.RegistryVersion = registryVersion + upkeep := k.Inputs.Upkeeps + var ( + registry contracts.KeeperRegistry + registrar contracts.KeeperRegistrar + ) + + // Contract deployment is different for legacy keepers and OCR automation + if registryVersion <= ethereum.RegistryVersion_1_3 { // Legacy keeper - v1.X + registry = actions.DeployKeeperRegistry(k.t, k.contractDeployer, k.chainClient, + &contracts.KeeperRegistryOpts{ + RegistryVersion: registryVersion, + LinkAddr: k.linkToken.Address(), + ETHFeedAddr: k.ethFeed.Address(), + GasFeedAddr: k.gasFeed.Address(), + TranscoderAddr: actions.ZeroAddress.Hex(), + RegistrarAddr: actions.ZeroAddress.Hex(), + Settings: *k.Inputs.KeeperRegistrySettings, + }, + ) + + // Fund the registry with 1 PLI * amount of AutomationConsumerBenchmark contracts + err := k.linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(k.Inputs.Upkeeps.NumberOfUpkeeps)))) + require.NoError(k.t, err, "Funding keeper registry contract shouldn't fail") + + registrarSettings := contracts.KeeperRegistrarSettings{ + AutoApproveConfigType: 2, + AutoApproveMaxAllowed: math.MaxUint16, + RegistryAddr: registry.Address(), + MinLinkJuels: big.NewInt(0), + } + registrar = actions.DeployKeeperRegistrar(k.t, registryVersion, k.linkToken, registrarSettings, k.contractDeployer, k.chainClient, registry) + } else { // OCR automation - v2.X + registry, registrar = actions.DeployAutoOCRRegistryAndRegistrar( + k.t, registryVersion, *k.Inputs.KeeperRegistrySettings, k.linkToken, k.contractDeployer, k.chainClient, + ) + + // Fund the registry with PLI + err := k.linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(k.Inputs.Upkeeps.NumberOfUpkeeps)))) + require.NoError(k.t, err, "Funding keeper registry contract shouldn't fail") + ocrConfig, err := actions.BuildAutoOCR2ConfigVars(k.t, k.pluginNodes[1:], *k.Inputs.KeeperRegistrySettings, registrar.Address(), k.Inputs.DeltaStage) + k.log.Debug().Interface("KeeperRegistrySettings", *k.Inputs.KeeperRegistrySettings).Interface("OCRConfig", ocrConfig).Msg("Config") + require.NoError(k.t, err, "Error building OCR config vars") + err = registry.SetConfig(*k.Inputs.KeeperRegistrySettings, ocrConfig) + require.NoError(k.t, err, "Registry config should be be set successfully") + + } + + consumer := k.DeployKeeperConsumersBenchmark() + + var upkeepAddresses []string + + checkData := make([][]byte, 0) + uint256Ty, err := abi.NewType("uint256", "uint256", nil) + require.NoError(k.t, err) + var data []byte + checkDataAbi := abi.Arguments{ + { + Type: uint256Ty, + }, + { + Type: uint256Ty, + }, + { + Type: uint256Ty, + }, + { + Type: uint256Ty, + }, + { + Type: uint256Ty, + }, + { + Type: uint256Ty, + }, + } + for i := 0; i < upkeep.NumberOfUpkeeps; i++ { + upkeepAddresses = append(upkeepAddresses, consumer.Address()) + // Compute check data + data, err = checkDataAbi.Pack( + big.NewInt(int64(i)), big.NewInt(upkeep.BlockInterval), big.NewInt(upkeep.BlockRange), + big.NewInt(upkeep.CheckGasToBurn), big.NewInt(upkeep.PerformGasToBurn), big.NewInt(upkeep.FirstEligibleBuffer)) + require.NoError(k.t, err) + k.log.Debug().Str("checkData: ", hexutil.Encode(data)).Int("id", i).Msg("checkData computed") + checkData = append(checkData, data) + } + linkFunds := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(upkeep.BlockRange/upkeep.BlockInterval)) + gasPrice := big.NewInt(0).Mul(k.Inputs.KeeperRegistrySettings.FallbackGasPrice, big.NewInt(2)) + minLinkBalance := big.NewInt(0). + Add(big.NewInt(0). + Mul(big.NewInt(0). + Div(big.NewInt(0).Mul(gasPrice, big.NewInt(upkeep.UpkeepGasLimit+80000)), k.Inputs.KeeperRegistrySettings.FallbackLinkPrice), + big.NewInt(1e18+0)), + big.NewInt(0)) + + linkFunds = big.NewInt(0).Add(linkFunds, minLinkBalance) + + upkeepIds := actions.RegisterUpkeepContractsWithCheckData(k.t, k.linkToken, linkFunds, k.chainClient, uint32(upkeep.UpkeepGasLimit), registry, registrar, upkeep.NumberOfUpkeeps, upkeepAddresses, checkData, false, false) + + k.keeperRegistries[index] = registry + k.keeperRegistrars[index] = registrar + k.upkeepIDs[index] = upkeepIds + k.keeperConsumerContracts[index] = consumer +} + +func (k *KeeperBenchmarkTest) DeployKeeperConsumersBenchmark() contracts.AutomationConsumerBenchmark { + // Deploy consumer + keeperConsumerInstance, err := k.contractDeployer.DeployKeeperConsumerBenchmark() + if err != nil { + k.log.Error().Err(err).Msg("Deploying AutomationConsumerBenchmark instance %d shouldn't fail") + keeperConsumerInstance, err = k.contractDeployer.DeployKeeperConsumerBenchmark() + require.NoError(k.t, err, "Error deploying AutomationConsumerBenchmark") + } + k.log.Debug(). + Str("Contract Address", keeperConsumerInstance.Address()). + Msg("Deployed Keeper Benchmark Contract") + + err = k.chainClient.WaitForEvents() + require.NoError(k.t, err, "Failed waiting for to deploy all keeper consumer contracts") + k.log.Info().Msg("Successfully deployed all Keeper Consumer Contracts") + + return keeperConsumerInstance +} diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go new file mode 100644 index 00000000..36044838 --- /dev/null +++ b/integration-tests/testsetups/ocr.go @@ -0,0 +1,860 @@ +// Package testsetups compresses common test setups and more complicated setups like performance and chaos tests. +package testsetups + +import ( + "context" + "fmt" + "math" + "math/big" + "math/rand" + "os" + "os/signal" + "sort" + "strings" + "syscall" + "testing" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pelletier/go-toml/v2" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/gethwrappers/offchainaggregator" + "github.com/goplugin/libocr/gethwrappers2/ocr2aggregator" + + "github.com/goplugin/plugin-testing-framework/blockchain" + ctfClient "github.com/goplugin/plugin-testing-framework/client" + ctf_config "github.com/goplugin/plugin-testing-framework/config" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/plugin" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/ethereum" + "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver" + mockservercfg "github.com/goplugin/plugin-testing-framework/k8s/pkg/helm/mockserver-cfg" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + reportModel "github.com/goplugin/plugin-testing-framework/testreporters" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/config" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" + tt "github.com/goplugin/pluginv3.0/integration-tests/types" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +const ( + saveFileLocation = "/persistence/ocr-soak-test-state.toml" + interruptedExitCode = 3 +) + +// OCRSoakTest defines a typical OCR soak test +type OCRSoakTest struct { + Config *tc.TestConfig + TestReporter testreporters.OCRSoakTestReporter + OperatorForwarderFlow bool + + t *testing.T + startTime time.Time + timeLeft time.Duration + startingBlockNum uint64 + testEnvironment *environment.Environment + namespace string + log zerolog.Logger + bootstrapNode *client.PluginK8sClient + workerNodes []*client.PluginK8sClient + chainClient blockchain.EVMClient + mockServer *ctfClient.MockserverClient + filterQuery geth.FilterQuery + + ocrRoundStates []*testreporters.OCRRoundState + testIssues []*testreporters.TestIssue + + ocrV1Instances []contracts.OffchainAggregator + ocrV1InstanceMap map[string]contracts.OffchainAggregator // address : instance + + ocrV2Instances []contracts.OffchainAggregatorV2 + ocrV2InstanceMap map[string]contracts.OffchainAggregatorV2 // address : instance +} + +// NewOCRSoakTest creates a new OCR soak test to setup and run +func NewOCRSoakTest(t *testing.T, config *tc.TestConfig, forwarderFlow bool) (*OCRSoakTest, error) { + test := &OCRSoakTest{ + Config: config, + OperatorForwarderFlow: forwarderFlow, + TestReporter: testreporters.OCRSoakTestReporter{ + OCRVersion: *config.OCR.Soak.OCRVersion, + StartTime: time.Now(), + }, + t: t, + startTime: time.Now(), + timeLeft: config.OCR.Common.TestDuration.Duration, + log: logging.GetTestLogger(t), + ocrRoundStates: make([]*testreporters.OCRRoundState, 0), + ocrV1InstanceMap: make(map[string]contracts.OffchainAggregator), + ocrV2InstanceMap: make(map[string]contracts.OffchainAggregatorV2), + } + return test, test.ensureInputValues() +} + +// DeployEnvironment deploys the test environment, starting all Plugin nodes and other components for the test +func (o *OCRSoakTest) DeployEnvironment(customPluginNetworkTOML string, ocrTestConfig tt.OcrTestConfig) { + network := networks.MustGetSelectedNetworkConfig(ocrTestConfig.GetNetworkConfig())[0] // Environment currently being used to soak test on + nsPre := fmt.Sprintf("soak-ocr-v%s-", *ocrTestConfig.GetOCRConfig().Soak.OCRVersion) + if o.OperatorForwarderFlow { + nsPre = fmt.Sprintf("%sforwarder-", nsPre) + } + nsPre = fmt.Sprintf("%s%s", nsPre, strings.ReplaceAll(strings.ToLower(network.Name), " ", "-")) + baseEnvironmentConfig := &environment.Config{ + TTL: time.Hour * 720, // 30 days, + NamespacePrefix: nsPre, + Test: o.t, + PreventPodEviction: true, + } + + var conf string + if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "1" { + conf = config.BaseOCR1Config + } else if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "2" { + conf = config.BaseOCR2Config + } + + var overrideFn = func(_ interface{}, target interface{}) { + ctf_config.MustConfigOverridePluginVersion(ocrTestConfig.GetPluginImageConfig(), target) + ctf_config.MightConfigOverridePyroscopeKey(ocrTestConfig.GetPyroscopeConfig(), target) + } + + cd := plugin.NewWithOverride(0, map[string]any{ + "replicas": 6, + "toml": networks.AddNetworkDetailedConfig(conf, ocrTestConfig.GetPyroscopeConfig(), customPluginNetworkTOML, network), + "db": map[string]any{ + "stateful": true, // stateful DB by default for soak tests + }, + }, ocrTestConfig.GetPluginImageConfig(), overrideFn) + + testEnvironment := environment.New(baseEnvironmentConfig). + AddHelm(mockservercfg.New(nil)). + AddHelm(mockserver.New(nil)). + AddHelm(ethereum.New(ðereum.Props{ + NetworkName: network.Name, + Simulated: network.Simulated, + WsURLs: network.URLs, + })). + AddHelm(cd) + err := testEnvironment.Run() + require.NoError(o.t, err, "Error launching test environment") + o.testEnvironment = testEnvironment + o.namespace = testEnvironment.Cfg.Namespace +} + +// LoadEnvironment loads an existing test environment using the provided URLs +func (o *OCRSoakTest) LoadEnvironment(pluginURLs []string, mockServerURL string, ocrTestConfig tt.OcrTestConfig) { + var ( + network = networks.MustGetSelectedNetworkConfig(ocrTestConfig.GetNetworkConfig())[0] + err error + ) + o.chainClient, err = blockchain.ConnectEVMClient(network, o.log) + require.NoError(o.t, err, "Error connecting to EVM client") + pluginNodes, err := client.ConnectPluginNodeURLs(pluginURLs) + require.NoError(o.t, err, "Error connecting to plugin nodes") + o.bootstrapNode, o.workerNodes = pluginNodes[0], pluginNodes[1:] + o.mockServer, err = ctfClient.ConnectMockServerURL(mockServerURL) + require.NoError(o.t, err, "Error connecting to mockserver") +} + +// Environment returns the full K8s test environment +func (o *OCRSoakTest) Environment() *environment.Environment { + return o.testEnvironment +} + +func (o *OCRSoakTest) Setup(ocrTestConfig tt.OcrTestConfig) { + var ( + err error + network = networks.MustGetSelectedNetworkConfig(ocrTestConfig.GetNetworkConfig())[0] + ) + + // Environment currently being used to soak test on + // Make connections to soak test resources + o.chainClient, err = blockchain.NewEVMClient(network, o.testEnvironment, o.log) + require.NoError(o.t, err, "Error creating EVM client") + contractDeployer, err := contracts.NewContractDeployer(o.chainClient, o.log) + require.NoError(o.t, err, "Unable to create contract deployer") + require.NotNil(o.t, contractDeployer, "Contract deployer shouldn't be nil") + nodes, err := client.ConnectPluginNodes(o.testEnvironment) + require.NoError(o.t, err, "Connecting to plugin nodes shouldn't fail") + o.bootstrapNode, o.workerNodes = nodes[0], nodes[1:] + o.mockServer, err = ctfClient.ConnectMockServer(o.testEnvironment) + require.NoError(o.t, err, "Creating mockserver clients shouldn't fail") + o.chainClient.ParallelTransactions(true) + // Deploy PLI + linkTokenContract, err := contractDeployer.DeployLinkTokenContract() + require.NoError(o.t, err, "Deploying Link Token Contract shouldn't fail") + + // Fund Plugin nodes, excluding the bootstrap node + o.log.Info().Float64("ETH amount per node", *o.Config.Common.PluginNodeFunding).Msg("Funding Plugin nodes") + err = actions.FundPluginNodes(o.workerNodes, o.chainClient, big.NewFloat(*o.Config.Common.PluginNodeFunding)) + require.NoError(o.t, err, "Error funding Plugin nodes") + + if o.OperatorForwarderFlow { + contractLoader, err := contracts.NewContractLoader(o.chainClient, o.log) + require.NoError(o.t, err, "Loading contracts shouldn't fail") + + operators, authorizedForwarders, _ := actions.DeployForwarderContracts( + o.t, contractDeployer, linkTokenContract, o.chainClient, len(o.workerNodes), + ) + forwarderNodesAddresses, err := actions.PluginNodeAddresses(o.workerNodes) + require.NoError(o.t, err, "Retrieving on-chain wallet addresses for plugin nodes shouldn't fail") + for i := range o.workerNodes { + actions.AcceptAuthorizedReceiversOperator( + o.t, operators[i], authorizedForwarders[i], []common.Address{forwarderNodesAddresses[i]}, o.chainClient, contractLoader, + ) + require.NoError(o.t, err, "Accepting Authorize Receivers on Operator shouldn't fail") + actions.TrackForwarder(o.t, o.chainClient, authorizedForwarders[i], o.workerNodes[i]) + err = o.chainClient.WaitForEvents() + } + + o.ocrV1Instances = actions.DeployOCRContractsForwarderFlow( + o.t, + *o.Config.OCR.Soak.NumberOfContracts, + linkTokenContract, + contractDeployer, + o.workerNodes, + authorizedForwarders, + o.chainClient, + ) + } else if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "1" { + o.ocrV1Instances, err = actions.DeployOCRContracts( + *o.Config.OCR.Soak.NumberOfContracts, + linkTokenContract, + contractDeployer, + o.workerNodes, + o.chainClient, + ) + require.NoError(o.t, err) + } else if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "2" { + var transmitters []string + for _, node := range o.workerNodes { + nodeAddress, err := node.PrimaryEthAddress() + require.NoError(o.t, err, "Error getting node's primary ETH address") + transmitters = append(transmitters, nodeAddress) + } + ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions() + o.ocrV2Instances, err = actions.DeployOCRv2Contracts( + *ocrTestConfig.GetOCRConfig().Soak.NumberOfContracts, + linkTokenContract, + contractDeployer, + transmitters, + o.chainClient, + ocrOffchainOptions, + ) + require.NoError(o.t, err, "Error deploying OCRv2 contracts") + contractConfig, err := actions.BuildMedianOCR2Config(o.workerNodes, ocrOffchainOptions) + require.NoError(o.t, err, "Error building median config") + err = actions.ConfigureOCRv2AggregatorContracts(o.chainClient, contractConfig, o.ocrV2Instances) + require.NoError(o.t, err, "Error configuring OCRv2 aggregator contracts") + } + + err = o.chainClient.WaitForEvents() + require.NoError(o.t, err, "Error waiting for OCR contracts to be deployed") + if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "1" { + for _, ocrInstance := range o.ocrV1Instances { + o.ocrV1InstanceMap[ocrInstance.Address()] = ocrInstance + } + } else if *ocrTestConfig.GetOCRConfig().Soak.OCRVersion == "2" { + for _, ocrInstance := range o.ocrV2Instances { + o.ocrV2InstanceMap[ocrInstance.Address()] = ocrInstance + } + } + + o.log.Info().Msg("OCR Soak Test Setup Complete") +} + +// Run starts the OCR soak test +func (o *OCRSoakTest) Run() { + config, err := tc.GetConfig("soak", tc.OCR) + require.NoError(o.t, err, "Error getting config") + + ctx, cancel := context.WithTimeout(testcontext.Get(o.t), time.Second*5) + latestBlockNum, err := o.chainClient.LatestBlockNumber(ctx) + cancel() + require.NoError(o.t, err, "Error getting current block number") + o.startingBlockNum = latestBlockNum + + startingValue := 5 + if o.OperatorForwarderFlow { + actions.CreateOCRJobsWithForwarder(o.t, o.ocrV1Instances, o.bootstrapNode, o.workerNodes, startingValue, o.mockServer, o.chainClient.GetChainID().String()) + } else if *config.OCR.Soak.OCRVersion == "1" { + err := actions.CreateOCRJobs(o.ocrV1Instances, o.bootstrapNode, o.workerNodes, startingValue, o.mockServer, o.chainClient.GetChainID().String()) + require.NoError(o.t, err, "Error creating OCR jobs") + } else if *config.OCR.Soak.OCRVersion == "2" { + err := actions.CreateOCRv2Jobs(o.ocrV2Instances, o.bootstrapNode, o.workerNodes, o.mockServer, startingValue, o.chainClient.GetChainID().Uint64(), o.OperatorForwarderFlow) + require.NoError(o.t, err, "Error creating OCR jobs") + } + + o.log.Info(). + Str("Test Duration", o.Config.OCR.Common.TestDuration.Duration.Truncate(time.Second).String()). + Int("Number of OCR Contracts", *config.OCR.Soak.NumberOfContracts). + Str("OCR Version", *config.OCR.Soak.OCRVersion). + Msg("Starting OCR Soak Test") + + o.testLoop(o.Config.OCR.Common.TestDuration.Duration, startingValue) + o.complete() +} + +// Networks returns the networks that the test is running on +func (o *OCRSoakTest) TearDownVals(t *testing.T) ( + *testing.T, + string, + []*client.PluginK8sClient, + reportModel.TestReporter, + reportModel.GrafanaURLProvider, + blockchain.EVMClient, +) { + return t, o.namespace, append(o.workerNodes, o.bootstrapNode), &o.TestReporter, o.Config, o.chainClient +} + +// ********************* +// Recovery if the test is shut-down/rebalanced by K8s +// ********************* + +// OCRSoakTestState contains all the info needed by the test to recover from a K8s rebalance, assuming the test was in a running state +type OCRSoakTestState struct { + Namespace string `toml:"namespace"` + OCRRoundStates []*testreporters.OCRRoundState `toml:"ocrRoundStates"` + TestIssues []*testreporters.TestIssue `toml:"testIssues"` + StartingBlockNum uint64 `toml:"startingBlockNum"` + StartTime time.Time `toml:"startTime"` + TimeRunning time.Duration `toml:"timeRunning"` + TestDuration time.Duration `toml:"testDuration"` + OCRContractAddresses []string `toml:"ocrContractAddresses"` + OCRVersion string `toml:"ocrVersion"` + + BootStrapNodeURL string `toml:"bootstrapNodeURL"` + WorkerNodeURLs []string `toml:"workerNodeURLs"` + ChainURL string `toml:"chainURL"` + MockServerURL string `toml:"mockServerURL"` +} + +// SaveState saves the current state of the test to a TOML file +func (o *OCRSoakTest) SaveState() error { + ocrAddresses := o.getContractAddressesString() + workerNodeURLs := make([]string, len(o.workerNodes)) + for i, workerNode := range o.workerNodes { + workerNodeURLs[i] = workerNode.URL() + } + + testState := &OCRSoakTestState{ + Namespace: o.namespace, + OCRRoundStates: o.ocrRoundStates, + TestIssues: o.testIssues, + StartingBlockNum: o.startingBlockNum, + StartTime: o.startTime, + TimeRunning: time.Since(o.startTime), + TestDuration: o.Config.OCR.Common.TestDuration.Duration, + OCRContractAddresses: ocrAddresses, + OCRVersion: *o.Config.OCR.Soak.OCRVersion, + + ChainURL: o.chainClient.GetNetworkConfig().URL, + MockServerURL: "http://mockserver:1080", // TODO: Make this dynamic + BootStrapNodeURL: o.bootstrapNode.URL(), + WorkerNodeURLs: workerNodeURLs, + } + data, err := toml.Marshal(testState) + if err != nil { + return err + } + //nolint:gosec // G306 - let everyone read + if err = os.WriteFile(saveFileLocation, data, 0644); err != nil { + return err + } + fmt.Println("---Saved State---") + fmt.Println(saveFileLocation) + fmt.Println("-----------------") + fmt.Println(string(data)) + fmt.Println("-----------------") + return nil +} + +// LoadState loads the test state from a TOML file +func (o *OCRSoakTest) LoadState() error { + if !o.Interrupted() { + return fmt.Errorf("no save file found at '%s'", saveFileLocation) + } + + testState := &OCRSoakTestState{} + saveData, err := os.ReadFile(saveFileLocation) + if err != nil { + return err + } + err = toml.Unmarshal(saveData, testState) + if err != nil { + return err + } + fmt.Println("---Loaded State---") + fmt.Println(saveFileLocation) + fmt.Println("------------------") + fmt.Println(string(saveData)) + fmt.Println("------------------") + + o.namespace = testState.Namespace + o.TestReporter = testreporters.OCRSoakTestReporter{ + OCRVersion: testState.OCRVersion, + StartTime: testState.StartTime, + } + duration := blockchain.StrDuration{Duration: testState.TestDuration} + o.ocrRoundStates = testState.OCRRoundStates + o.testIssues = testState.TestIssues + o.Config.OCR.Common.TestDuration = &duration + o.timeLeft = testState.TestDuration - testState.TimeRunning + o.startTime = testState.StartTime + o.startingBlockNum = testState.StartingBlockNum + o.Config.OCR.Soak.OCRVersion = &testState.OCRVersion + + network := networks.MustGetSelectedNetworkConfig(o.Config.Network)[0] + o.chainClient, err = blockchain.ConnectEVMClient(network, o.log) + if err != nil { + return err + } + contractDeployer, err := contracts.NewContractDeployer(o.chainClient, o.log) + if err != nil { + return err + } + o.bootstrapNode, err = client.ConnectPluginNodeURL(testState.BootStrapNodeURL) + if err != nil { + return err + } + o.workerNodes, err = client.ConnectPluginNodeURLs(testState.WorkerNodeURLs) + if err != nil { + return err + } + + if testState.OCRVersion == "1" { + o.ocrV1Instances = make([]contracts.OffchainAggregator, len(testState.OCRContractAddresses)) + for i, addr := range testState.OCRContractAddresses { + address := common.HexToAddress(addr) + instance, err := contractDeployer.LoadOffChainAggregator(&address) + if err != nil { + return err + } + o.ocrV1Instances[i] = instance + } + } else if testState.OCRVersion == "2" { + o.ocrV2Instances = make([]contracts.OffchainAggregatorV2, len(testState.OCRContractAddresses)) + for i, addr := range testState.OCRContractAddresses { + address := common.HexToAddress(addr) + instance, err := contractDeployer.LoadOffChainAggregatorV2(&address) + if err != nil { + return err + } + o.ocrV2Instances[i] = instance + } + } + + o.mockServer, err = ctfClient.ConnectMockServerURL(testState.MockServerURL) + if err != nil { + return err + } + + return err +} + +func (o *OCRSoakTest) Resume() { + o.testIssues = append(o.testIssues, &testreporters.TestIssue{ + StartTime: time.Now(), + Message: "Test Resumed", + }) + o.log.Info(). + Str("Total Duration", o.Config.OCR.Common.TestDuration.String()). + Str("Time Left", o.timeLeft.String()). + Msg("Resuming OCR Soak Test") + + ocrAddresses := make([]common.Address, *o.Config.OCR.Soak.NumberOfContracts) + + if *o.Config.OCR.Soak.OCRVersion == "1" { + for i, ocrInstance := range o.ocrV1Instances { + ocrAddresses[i] = common.HexToAddress(ocrInstance.Address()) + } + contractABI, err := offchainaggregator.OffchainAggregatorMetaData.GetAbi() + require.NoError(o.t, err, "Error retrieving OCR contract ABI") + o.filterQuery = geth.FilterQuery{ + Addresses: ocrAddresses, + Topics: [][]common.Hash{{contractABI.Events["AnswerUpdated"].ID}}, + FromBlock: big.NewInt(0).SetUint64(o.startingBlockNum), + } + } else if *o.Config.OCR.Soak.OCRVersion == "2" { + for i, ocrInstance := range o.ocrV2Instances { + ocrAddresses[i] = common.HexToAddress(ocrInstance.Address()) + } + contractABI, err := ocr2aggregator.AggregatorInterfaceMetaData.GetAbi() + require.NoError(o.t, err, "Error retrieving OCR contract ABI") + o.filterQuery = geth.FilterQuery{ + Addresses: ocrAddresses, + Topics: [][]common.Hash{{contractABI.Events["AnswerUpdated"].ID}}, + FromBlock: big.NewInt(0).SetUint64(o.startingBlockNum), + } + } + + startingValue := 5 + o.testLoop(o.timeLeft, startingValue) + + o.log.Info().Msg("Test Complete, collecting on-chain events") + + err := o.collectEvents() + o.log.Error().Err(err).Interface("Query", o.filterQuery).Msg("Error collecting on-chain events, expect malformed report") + o.TestReporter.RecordEvents(o.ocrRoundStates, o.testIssues) +} + +// Interrupted indicates whether the test was interrupted by something like a K8s rebalance or not +func (o *OCRSoakTest) Interrupted() bool { + _, err := os.Stat(saveFileLocation) + return err == nil +} + +// ********************* +// ****** Helpers ****** +// ********************* + +// testLoop is the primary test loop that will trigger new rounds and watch events +func (o *OCRSoakTest) testLoop(testDuration time.Duration, newValue int) { + endTest := time.After(testDuration) + interruption := make(chan os.Signal, 1) + //nolint:staticcheck //ignore SA1016 we need to send the os.Kill signal + signal.Notify(interruption, os.Kill, os.Interrupt, syscall.SIGTERM) + lastValue := 0 + newRoundTrigger := time.NewTimer(0) // Want to trigger a new round ASAP + defer newRoundTrigger.Stop() + o.setFilterQuery() + err := o.observeOCREvents() + require.NoError(o.t, err, "Error subscribing to OCR events") + + for { + select { + case <-interruption: + saveStart := time.Now() + o.log.Warn().Msg("Test interrupted, saving state before shut down") + o.testIssues = append(o.testIssues, &testreporters.TestIssue{ + StartTime: time.Now(), + Message: "Test Interrupted", + }) + if err := o.SaveState(); err != nil { + o.log.Error().Err(err).Msg("Error saving state") + } + o.log.Warn().Str("Time Taken", time.Since(saveStart).String()).Msg("Saved state") + os.Exit(interruptedExitCode) // Exit with interrupted code to indicate test was interrupted, not just a normal failure + case <-endTest: + return + case <-newRoundTrigger.C: + err := o.triggerNewRound(newValue) + timerReset := o.Config.OCR.Soak.TimeBetweenRounds.Duration + if err != nil { + timerReset = time.Second * 5 + o.log.Error().Err(err). + Str("Waiting", timerReset.String()). + Msg("Error triggering new round, waiting and trying again. Possible connection issues with mockserver") + } + newRoundTrigger.Reset(timerReset) + + // Change value for the next round + newValue = rand.Intn(256) + 1 // #nosec G404 - not everything needs to be cryptographically secure + for newValue == lastValue { + newValue = rand.Intn(256) + 1 // #nosec G404 - kudos to you if you actually find a way to exploit this + } + lastValue = newValue + case t := <-o.chainClient.ConnectionIssue(): + o.testIssues = append(o.testIssues, &testreporters.TestIssue{ + StartTime: t, + Message: "RPC Connection Lost", + }) + case t := <-o.chainClient.ConnectionRestored(): + o.testIssues = append(o.testIssues, &testreporters.TestIssue{ + StartTime: t, + Message: "RPC Connection Restored", + }) + } + } +} + +// completes the test +func (o *OCRSoakTest) complete() { + o.log.Info().Msg("Test Complete, collecting on-chain events") + + err := o.collectEvents() + if err != nil { + o.log.Error().Err(err).Interface("Query", o.filterQuery).Msg("Error collecting on-chain events, expect malformed report") + } + o.TestReporter.RecordEvents(o.ocrRoundStates, o.testIssues) +} + +// setFilterQuery to look for all events that happened +func (o *OCRSoakTest) setFilterQuery() { + ocrAddresses := o.getContractAddresses() + contractABI, err := offchainaggregator.OffchainAggregatorMetaData.GetAbi() + require.NoError(o.t, err, "Error retrieving OCR contract ABI") + o.filterQuery = geth.FilterQuery{ + Addresses: ocrAddresses, + Topics: [][]common.Hash{{contractABI.Events["AnswerUpdated"].ID}}, + FromBlock: big.NewInt(0).SetUint64(o.startingBlockNum), + } + o.log.Debug(). + Interface("Addresses", ocrAddresses). + Str("Topic", contractABI.Events["AnswerUpdated"].ID.Hex()). + Uint64("Starting Block", o.startingBlockNum). + Msg("Filter Query Set") +} + +// observeOCREvents subscribes to OCR events and logs them to the test logger +// WARNING: Should only be used for observation and logging. This is not a reliable way to collect events. +func (o *OCRSoakTest) observeOCREvents() error { + eventLogs := make(chan types.Log) + ctx, cancel := context.WithTimeout(testcontext.Get(o.t), 5*time.Second) + eventSub, err := o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs) + cancel() + if err != nil { + return err + } + + go func() { + for { + select { + case event := <-eventLogs: + if *o.Config.OCR.Soak.OCRVersion == "1" { + answerUpdated, err := o.ocrV1Instances[0].ParseEventAnswerUpdated(event) + if err != nil { + o.log.Warn(). + Err(err). + Str("Address", event.Address.Hex()). + Uint64("Block Number", event.BlockNumber). + Msg("Error parsing event as AnswerUpdated") + continue + } + o.log.Info(). + Str("Address", event.Address.Hex()). + Uint64("Block Number", event.BlockNumber). + Uint64("Round ID", answerUpdated.RoundId.Uint64()). + Int64("Answer", answerUpdated.Current.Int64()). + Msg("Answer Updated Event") + } else if *o.Config.OCR.Soak.OCRVersion == "2" { + answerUpdated, err := o.ocrV2Instances[0].ParseEventAnswerUpdated(event) + if err != nil { + o.log.Warn(). + Err(err). + Str("Address", event.Address.Hex()). + Uint64("Block Number", event.BlockNumber). + Msg("Error parsing event as AnswerUpdated") + continue + } + o.log.Info(). + Str("Address", event.Address.Hex()). + Uint64("Block Number", event.BlockNumber). + Uint64("Round ID", answerUpdated.RoundId.Uint64()). + Int64("Answer", answerUpdated.Current.Int64()). + Msg("Answer Updated Event") + } + case err = <-eventSub.Err(): + backoff := time.Second + for err != nil { + o.log.Info(). + Err(err). + Str("Backoff", backoff.String()). + Interface("Query", o.filterQuery). + Msg("Error while subscribed to OCR Logs. Resubscribing") + ctx, cancel = context.WithTimeout(testcontext.Get(o.t), backoff) + eventSub, err = o.chainClient.SubscribeFilterLogs(ctx, o.filterQuery, eventLogs) + cancel() + if err != nil { + time.Sleep(backoff) + backoff = time.Duration(math.Min(float64(backoff)*2, float64(30*time.Second))) + } + } + } + } + }() + + return nil +} + +// triggers a new OCR round by setting a new mock adapter value +func (o *OCRSoakTest) triggerNewRound(newValue int) error { + if len(o.ocrRoundStates) > 0 { + o.ocrRoundStates[len(o.ocrRoundStates)-1].EndTime = time.Now() + } + + var err error + if *o.Config.OCR.Soak.OCRVersion == "1" { + err = actions.SetAllAdapterResponsesToTheSameValue(newValue, o.ocrV1Instances, o.workerNodes, o.mockServer) + } else if *o.Config.OCR.Soak.OCRVersion == "2" { + err = actions.SetOCR2AllAdapterResponsesToTheSameValue(newValue, o.ocrV2Instances, o.workerNodes, o.mockServer) + } + if err != nil { + return err + } + + expectedState := &testreporters.OCRRoundState{ + StartTime: time.Now(), + Answer: int64(newValue), + FoundEvents: make(map[string][]*testreporters.FoundEvent), + } + if *o.Config.OCR.Soak.OCRVersion == "1" { + for _, ocrInstance := range o.ocrV1Instances { + expectedState.FoundEvents[ocrInstance.Address()] = make([]*testreporters.FoundEvent, 0) + } + } else if *o.Config.OCR.Soak.OCRVersion == "2" { + for _, ocrInstance := range o.ocrV2Instances { + expectedState.FoundEvents[ocrInstance.Address()] = make([]*testreporters.FoundEvent, 0) + } + } + + o.ocrRoundStates = append(o.ocrRoundStates, expectedState) + o.log.Info(). + Int("Value", newValue). + Msg("Starting a New OCR Round") + return nil +} + +func (o *OCRSoakTest) collectEvents() error { + start := time.Now() + if len(o.ocrRoundStates) == 0 { + return fmt.Errorf("error collecting on-chain events, no rounds have been started") + } + o.ocrRoundStates[len(o.ocrRoundStates)-1].EndTime = start // Set end time for last expected event + o.log.Info().Msg("Collecting on-chain events") + + // We must retrieve the events, use exponential backoff for timeout to retry + timeout := time.Second * 15 + o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events") + + ctx, cancel := context.WithTimeout(testcontext.Get(o.t), timeout) + contractEvents, err := o.chainClient.FilterLogs(ctx, o.filterQuery) + cancel() + for err != nil { + o.log.Info().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Retrieving on-chain events") + ctx, cancel := context.WithTimeout(testcontext.Get(o.t), timeout) + contractEvents, err = o.chainClient.FilterLogs(ctx, o.filterQuery) + cancel() + if err != nil { + o.log.Warn().Interface("Filter Query", o.filterQuery).Str("Timeout", timeout.String()).Msg("Error collecting on-chain events, trying again") + timeout *= 2 + } + } + + sortedFoundEvents := make([]*testreporters.FoundEvent, 0) + for _, event := range contractEvents { + if *o.Config.OCR.Soak.OCRVersion == "1" { + answerUpdated, err := o.ocrV1Instances[0].ParseEventAnswerUpdated(event) + if err != nil { + return fmt.Errorf("error parsing EventAnswerUpdated for event: %v, %w", event, err) + } + sortedFoundEvents = append(sortedFoundEvents, &testreporters.FoundEvent{ + StartTime: time.Unix(answerUpdated.UpdatedAt.Int64(), 0), + Address: event.Address.Hex(), + Answer: answerUpdated.Current.Int64(), + RoundID: answerUpdated.RoundId.Uint64(), + BlockNumber: event.BlockNumber, + }) + } else if *o.Config.OCR.Soak.OCRVersion == "2" { + answerUpdated, err := o.ocrV2Instances[0].ParseEventAnswerUpdated(event) + if err != nil { + return fmt.Errorf("error parsing EventAnswerUpdated for event: %v, %w", event, err) + } + sortedFoundEvents = append(sortedFoundEvents, &testreporters.FoundEvent{ + StartTime: time.Unix(answerUpdated.UpdatedAt.Int64(), 0), + Address: event.Address.Hex(), + Answer: answerUpdated.Current.Int64(), + RoundID: answerUpdated.RoundId.Uint64(), + BlockNumber: event.BlockNumber, + }) + } + } + + // Sort our events by time to make sure they are in order (don't trust RPCs) + sort.Slice(sortedFoundEvents, func(i, j int) bool { + return sortedFoundEvents[i].StartTime.Before(sortedFoundEvents[j].StartTime) + }) + + // Now match each found event with the expected event time frame + expectedIndex := 0 + for _, event := range sortedFoundEvents { + if !event.StartTime.Before(o.ocrRoundStates[expectedIndex].EndTime) { + expectedIndex++ + if expectedIndex >= len(o.ocrRoundStates) { + o.log.Warn(). + Str("Event Time", event.StartTime.String()). + Str("Expected End Time", o.ocrRoundStates[expectedIndex].EndTime.String()). + Msg("Found events after last expected end time, adding event to that final report, things might be weird") + } + } + o.ocrRoundStates[expectedIndex].FoundEvents[event.Address] = append(o.ocrRoundStates[expectedIndex].FoundEvents[event.Address], event) + o.ocrRoundStates[expectedIndex].TimeLineEvents = append(o.ocrRoundStates[expectedIndex].TimeLineEvents, event) + } + + o.log.Info(). + Str("Time", time.Since(start).String()). + Msg("Collected on-chain events") + return nil +} + +// ensureValues ensures that all values needed to run the test are present +func (o *OCRSoakTest) ensureInputValues() error { + ocrConfig := o.Config.OCR.Soak + if *ocrConfig.OCRVersion != "1" && *ocrConfig.OCRVersion != "2" { + return fmt.Errorf("OCR version must be 1 or 2, found %s", *ocrConfig.OCRVersion) + } + if ocrConfig.NumberOfContracts != nil && *ocrConfig.NumberOfContracts <= 0 { + return fmt.Errorf("Number of OCR contracts must be set and greater than 0, found %d", ocrConfig.NumberOfContracts) + } + if o.Config.Common.PluginNodeFunding != nil && *o.Config.Common.PluginNodeFunding <= 0 { + return fmt.Errorf("Plugin node funding must be greater than 0, found %f", *o.Config.Common.PluginNodeFunding) + } + if o.Config.OCR.Common.TestDuration != nil && o.Config.OCR.Common.TestDuration.Duration <= time.Minute { + return fmt.Errorf("Test duration must be greater than 1 minute, found %s", o.Config.OCR.Common.TestDuration) + } + if ocrConfig.TimeBetweenRounds != nil && ocrConfig.TimeBetweenRounds.Duration >= time.Hour { + return fmt.Errorf("Time between rounds must be less than 1 hour, found %s", ocrConfig.TimeBetweenRounds) + } + if ocrConfig.TimeBetweenRounds != nil && ocrConfig.TimeBetweenRounds.Duration < time.Second*30 { + return fmt.Errorf("Time between rounds must be greater or equal to 30 seconds, found %s", ocrConfig.TimeBetweenRounds) + } + + return nil +} + +// getContractAddressesString returns the addresses of all OCR contracts deployed as a string slice +func (o *OCRSoakTest) getContractAddressesString() []string { + contractAddresses := []string{} + if len(o.ocrV1Instances) != 0 { + for _, ocrInstance := range o.ocrV1Instances { + contractAddresses = append(contractAddresses, ocrInstance.Address()) + } + } else if len(o.ocrV2Instances) != 0 { + if len(o.ocrV2Instances) != 0 { + for _, ocrInstance := range o.ocrV2Instances { + contractAddresses = append(contractAddresses, ocrInstance.Address()) + } + } + } + + return contractAddresses +} + +// getContractAddresses returns the addresses of all OCR contracts deployed +func (o *OCRSoakTest) getContractAddresses() []common.Address { + contractAddresses := []common.Address{} + if len(o.ocrV1Instances) != 0 { + for _, ocrInstance := range o.ocrV1Instances { + contractAddresses = append(contractAddresses, common.HexToAddress(ocrInstance.Address())) + } + } else if len(o.ocrV2Instances) != 0 { + if len(o.ocrV2Instances) != 0 { + for _, ocrInstance := range o.ocrV2Instances { + contractAddresses = append(contractAddresses, common.HexToAddress(ocrInstance.Address())) + } + } + } + + return contractAddresses +} diff --git a/integration-tests/testsetups/profile.go b/integration-tests/testsetups/profile.go new file mode 100644 index 00000000..eb557b26 --- /dev/null +++ b/integration-tests/testsetups/profile.go @@ -0,0 +1,76 @@ +package testsetups + +//revive:disable:dot-imports +import ( + "time" + + . "github.com/onsi/gomega" + "golang.org/x/sync/errgroup" + + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/k8s/environment" + reportModel "github.com/goplugin/plugin-testing-framework/testreporters" + + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/testreporters" +) + +// PluginProfileTest runs a piece of code on Plugin nodes with PPROF enabled, then downloads the PPROF results +type PluginProfileTest struct { + Inputs PluginProfileTestInputs + TestReporter testreporters.PluginProfileTestReporter + env *environment.Environment + c blockchain.EVMClient +} + +// PluginProfileTestInputs are the inputs necessary to run a profiling tests +type PluginProfileTestInputs struct { + ProfileFunction func(*client.PluginClient) + ProfileDuration time.Duration + PluginNodes []*client.PluginK8sClient +} + +// NewPluginProfileTest prepares a new keeper Plugin profiling test to be run +func NewPluginProfileTest(inputs PluginProfileTestInputs) *PluginProfileTest { + return &PluginProfileTest{ + Inputs: inputs, + } +} + +// Setup prepares contracts for the test +func (c *PluginProfileTest) Setup(env *environment.Environment) { + c.ensureInputValues() + c.env = env +} + +// Run runs the profiling test +func (c *PluginProfileTest) Run() { + profileGroup := new(errgroup.Group) + for ni, cl := range c.Inputs.PluginNodes { + pluginNode := cl + nodeIndex := ni + profileGroup.Go(func() error { + profileResults, err := pluginNode.Profile(c.Inputs.ProfileDuration, c.Inputs.ProfileFunction) + profileResults.NodeIndex = nodeIndex + if err != nil { + return err + } + c.TestReporter.Results = append(c.TestReporter.Results, profileResults) + return nil + }) + } + Expect(profileGroup.Wait()).ShouldNot(HaveOccurred(), "Error while gathering plugin Profile tests") +} + +// Networks returns the networks that the test is running on +func (c *PluginProfileTest) TearDownVals() (*environment.Environment, []*client.PluginK8sClient, reportModel.TestReporter, blockchain.EVMClient) { + return c.env, c.Inputs.PluginNodes, &c.TestReporter, c.c +} + +// ensureValues ensures that all values needed to run the test are present +func (c *PluginProfileTest) ensureInputValues() { + Expect(c.Inputs.ProfileFunction).ShouldNot(BeNil(), "Forgot to provide a function to profile") + Expect(c.Inputs.ProfileDuration.Seconds()).Should(BeNumerically(">=", 1), "Time to profile should be at least 1 second") + Expect(c.Inputs.PluginNodes).ShouldNot(BeNil(), "Plugin nodes you want to profile should be provided") + Expect(len(c.Inputs.PluginNodes)).Should(BeNumerically(">", 0), "No Plugin nodes provided to profile") +} diff --git a/integration-tests/types/config/node/core.go b/integration-tests/types/config/node/core.go new file mode 100644 index 00000000..d92b8b36 --- /dev/null +++ b/integration-tests/types/config/node/core.go @@ -0,0 +1,239 @@ +package node + +import ( + "bytes" + "fmt" + "math/big" + "os" + "time" + + "github.com/segmentio/ksuid" + "go.uber.org/zap/zapcore" + + commonassets "github.com/goplugin/plugin-common/pkg/assets" + "github.com/goplugin/plugin-common/pkg/config" + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-testing-framework/blockchain" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + it_utils "github.com/goplugin/pluginv3.0/integration-tests/utils" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + ubig "github.com/goplugin/pluginv3.0/v2/core/chains/evm/utils/big" + "github.com/goplugin/pluginv3.0/v2/core/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/services/plugin" + "github.com/goplugin/pluginv3.0/v2/core/services/keystore/keys/ethkey" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +func NewBaseConfig() *plugin.Config { + return &plugin.Config{ + Core: toml.Core{ + RootDir: ptr.Ptr("/home/plugin"), + Database: toml.Database{ + MaxIdleConns: ptr.Ptr(int64(20)), + MaxOpenConns: ptr.Ptr(int64(40)), + MigrateOnStartup: ptr.Ptr(true), + }, + Log: toml.Log{ + Level: ptr.Ptr(toml.LogLevel(zapcore.DebugLevel)), + JSONConsole: ptr.Ptr(true), + File: toml.LogFile{ + MaxSize: ptr.Ptr(utils.FileSize(0)), + }, + }, + WebServer: toml.WebServer{ + AllowOrigins: ptr.Ptr("*"), + HTTPPort: ptr.Ptr[uint16](6688), + SecureCookies: ptr.Ptr(false), + SessionTimeout: commonconfig.MustNewDuration(time.Hour * 999), + TLS: toml.WebServerTLS{ + HTTPSPort: ptr.Ptr[uint16](0), + }, + RateLimit: toml.WebServerRateLimit{ + Authenticated: ptr.Ptr(int64(2000)), + Unauthenticated: ptr.Ptr(int64(100)), + }, + }, + Feature: toml.Feature{ + LogPoller: ptr.Ptr(true), + FeedsManager: ptr.Ptr(true), + UICSAKeys: ptr.Ptr(true), + }, + P2P: toml.P2P{}, + }, + } +} + +type NodeConfigOpt = func(c *plugin.Config) + +func NewConfig(baseConf *plugin.Config, opts ...NodeConfigOpt) *plugin.Config { + for _, opt := range opts { + opt(baseConf) + } + return baseConf +} + +func NewConfigFromToml(tomlFile string, opts ...NodeConfigOpt) (*plugin.Config, error) { + readFile, err := os.ReadFile(tomlFile) + if err != nil { + return nil, err + } + var cfg plugin.Config + if err != nil { + return nil, err + } + err = config.DecodeTOML(bytes.NewReader(readFile), &cfg) + if err != nil { + return nil, err + } + for _, opt := range opts { + opt(&cfg) + } + return &cfg, nil +} + +func WithOCR1() NodeConfigOpt { + return func(c *plugin.Config) { + c.OCR = toml.OCR{ + Enabled: ptr.Ptr(true), + } + } +} + +func WithOCR2() NodeConfigOpt { + return func(c *plugin.Config) { + c.OCR2 = toml.OCR2{ + Enabled: ptr.Ptr(true), + } + } +} + +func WithP2Pv2() NodeConfigOpt { + return func(c *plugin.Config) { + c.P2P.V2 = toml.P2PV2{ + ListenAddresses: &[]string{"0.0.0.0:6690"}, + } + } +} + +func WithTracing() NodeConfigOpt { + return func(c *plugin.Config) { + c.Tracing = toml.Tracing{ + Enabled: ptr.Ptr(true), + CollectorTarget: ptr.Ptr("otel-collector:4317"), + // ksortable unique id + NodeID: ptr.Ptr(ksuid.New().String()), + SamplingRatio: ptr.Ptr(1.0), + Mode: ptr.Ptr("unencrypted"), + Attributes: map[string]string{ + "env": "smoke", + }, + } + } +} + +func SetChainConfig( + cfg *plugin.Config, + wsUrls, + httpUrls []string, + chain blockchain.EVMNetwork, + forwarders bool, +) { + if cfg.EVM == nil { + var nodes []*evmcfg.Node + for i := range wsUrls { + node := evmcfg.Node{ + Name: ptr.Ptr(fmt.Sprintf("node_%d_%s", i, chain.Name)), + WSURL: it_utils.MustURL(wsUrls[i]), + HTTPURL: it_utils.MustURL(httpUrls[i]), + SendOnly: ptr.Ptr(false), + } + + nodes = append(nodes, &node) + } + var chainConfig evmcfg.Chain + if chain.Simulated { + chainConfig = evmcfg.Chain{ + AutoCreateKey: ptr.Ptr(true), + FinalityDepth: ptr.Ptr[uint32](1), + MinContractPayment: commonassets.NewLinkFromJuels(0), + } + } + cfg.EVM = evmcfg.EVMConfigs{ + { + ChainID: ubig.New(big.NewInt(chain.ChainID)), + Chain: chainConfig, + Nodes: nodes, + }, + } + if forwarders { + cfg.EVM[0].Transactions = evmcfg.Transactions{ + ForwardersEnabled: ptr.Ptr(true), + } + } + } +} + +func WithPrivateEVMs(networks []blockchain.EVMNetwork) NodeConfigOpt { + var evmConfigs []*evmcfg.EVMConfig + for _, network := range networks { + evmConfigs = append(evmConfigs, &evmcfg.EVMConfig{ + ChainID: ubig.New(big.NewInt(network.ChainID)), + Chain: evmcfg.Chain{ + AutoCreateKey: ptr.Ptr(true), + FinalityDepth: ptr.Ptr[uint32](50), + MinContractPayment: commonassets.NewLinkFromJuels(0), + LogPollInterval: commonconfig.MustNewDuration(1 * time.Second), + HeadTracker: evmcfg.HeadTracker{ + HistoryDepth: ptr.Ptr(uint32(100)), + }, + GasEstimator: evmcfg.GasEstimator{ + LimitDefault: ptr.Ptr(uint32(6000000)), + PriceMax: assets.GWei(200), + FeeCapDefault: assets.GWei(200), + }, + }, + Nodes: []*evmcfg.Node{ + { + Name: ptr.Ptr(network.Name), + WSURL: it_utils.MustURL(network.URLs[0]), + HTTPURL: it_utils.MustURL(network.HTTPURLs[0]), + SendOnly: ptr.Ptr(false), + }, + }, + }) + } + return func(c *plugin.Config) { + c.EVM = evmConfigs + } +} + +func WithVRFv2EVMEstimator(addresses []string, maxGasPriceGWei int64) NodeConfigOpt { + est := assets.GWei(maxGasPriceGWei) + + var keySpecicifArr []evmcfg.KeySpecific + for _, addr := range addresses { + keySpecicifArr = append(keySpecicifArr, evmcfg.KeySpecific{ + Key: ptr.Ptr(ethkey.EIP55Address(addr)), + GasEstimator: evmcfg.KeySpecificGasEstimator{ + PriceMax: est, + }, + }) + } + return func(c *plugin.Config) { + c.EVM[0].KeySpecific = keySpecicifArr + c.EVM[0].Chain.GasEstimator = evmcfg.GasEstimator{ + LimitDefault: ptr.Ptr[uint32](3500000), + } + c.EVM[0].Chain.Transactions = evmcfg.Transactions{ + MaxQueued: ptr.Ptr[uint32](10000), + } + + } +} + +func WithLogPollInterval(interval time.Duration) NodeConfigOpt { + return func(c *plugin.Config) { + c.EVM[0].Chain.LogPollInterval = commonconfig.MustNewDuration(interval) + } +} diff --git a/integration-tests/types/config/node/defaults/sample.toml b/integration-tests/types/config/node/defaults/sample.toml new file mode 100644 index 00000000..b0e1bc2a --- /dev/null +++ b/integration-tests/types/config/node/defaults/sample.toml @@ -0,0 +1,21 @@ +[Feature] +LogPoller = true + +[Database] +MaxIdleConns = 50 +MaxOpenConns = 50 + +[OCR2] +Enabled = true +DefaultTransactionQueueDepth = 0 + +[OCR] +Enabled = false +DefaultTransactionQueueDepth = 0 + +[P2P] +[P2P.V2] +ListenAddresses = ['0.0.0.0:6690'] +AnnounceAddresses = ['0.0.0.0:6690'] +DeltaDial = '500ms' +DeltaReconcile = '5s' \ No newline at end of file diff --git a/integration-tests/types/envcommon/common.go b/integration-tests/types/envcommon/common.go new file mode 100644 index 00000000..bdabcaf9 --- /dev/null +++ b/integration-tests/types/envcommon/common.go @@ -0,0 +1,21 @@ +package envcommon + +import ( + "encoding/json" + "io" + "os" +) + +func ParseJSONFile(path string, v any) error { + jsonFile, err := os.Open(path) + if err != nil { + return err + } + defer jsonFile.Close() + b, _ := io.ReadAll(jsonFile) + err = json.Unmarshal(b, v) + if err != nil { + return err + } + return nil +} diff --git a/integration-tests/types/testconfigs.go b/integration-tests/types/testconfigs.go new file mode 100644 index 00000000..6e3e3759 --- /dev/null +++ b/integration-tests/types/testconfigs.go @@ -0,0 +1,50 @@ +package types + +import ( + "github.com/goplugin/plugin-testing-framework/testreporters" + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" +) + +type VRFv2TestConfig interface { + tc.CommonTestConfig + tc.GlobalTestConfig + tc.VRFv2TestConfig +} + +type VRFv2PlusTestConfig interface { + tc.CommonTestConfig + tc.GlobalTestConfig + tc.VRFv2PlusTestConfig +} + +type FunctionsTestConfig interface { + tc.CommonTestConfig + tc.GlobalTestConfig + tc.FunctionsTestConfig +} + +type AutomationTestConfig interface { + tc.GlobalTestConfig + tc.CommonTestConfig + tc.UpgradeablePluginTestConfig +} + +type KeeperBenchmarkTestConfig interface { + tc.GlobalTestConfig + tc.CommonTestConfig + tc.KeeperTestConfig + tc.NamedConfiguration + testreporters.GrafanaURLProvider +} + +type OcrTestConfig interface { + tc.GlobalTestConfig + tc.CommonTestConfig + tc.OcrTestConfig +} + +type Ocr2TestConfig interface { + tc.GlobalTestConfig + tc.CommonTestConfig + tc.Ocr2TestConfig +} diff --git a/integration-tests/types/types.go b/integration-tests/types/types.go new file mode 100644 index 00000000..c45c39f4 --- /dev/null +++ b/integration-tests/types/types.go @@ -0,0 +1,52 @@ +package types + +import "github.com/ethereum/go-ethereum/common" + +type MercuryServerType string + +const ( + MS_WSRPC MercuryServerType = "wsrpc" + MS_WS MercuryServerType = "ws" + MS_REST MercuryServerType = "rest" + MS_ALL MercuryServerType = "all" +) + +type MercuryServerOpts struct { + Server struct { + DevMode bool + AutomaticMigrations bool + Service string + Port string + } + RPC struct { + PrivateKey string + NodePubKeys []string + Port string + } + Database struct { + Url string + WriterInstanceUrl string + EncryptionKey string + } + Bootstrap struct { + Username string + Password string + } + WSRPCUrlInternal string + WSRPCUrlExternal string +} + +type User struct { + Id string + Username string + Password string +} + +type MercuryOCRConfig struct { + Signers []common.Address + Transmitters [][32]byte + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte +} diff --git a/integration-tests/universal/log_poller/gun.go b/integration-tests/universal/log_poller/gun.go new file mode 100644 index 00000000..b1086eff --- /dev/null +++ b/integration-tests/universal/log_poller/gun.go @@ -0,0 +1,79 @@ +package logpoller + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/rs/zerolog" + + "github.com/goplugin/wasp" + + "github.com/goplugin/pluginv3.0/integration-tests/contracts" +) + +/* LogEmitterGun is a gun that constantly emits logs from a contract */ +type LogEmitterGun struct { + contract *contracts.LogEmitter + eventsToEmit []abi.Event + logger zerolog.Logger + eventsPerTx int +} + +type Counter struct { + mu *sync.Mutex + value int +} + +func NewLogEmitterGun( + contract *contracts.LogEmitter, + eventsToEmit []abi.Event, + eventsPerTx int, + logger zerolog.Logger, +) *LogEmitterGun { + return &LogEmitterGun{ + contract: contract, + eventsToEmit: eventsToEmit, + eventsPerTx: eventsPerTx, + logger: logger, + } +} + +func (m *LogEmitterGun) Call(l *wasp.Generator) *wasp.Response { + localCounter := 0 + logEmitter := (*m.contract) + address := logEmitter.Address() + for _, event := range m.eventsToEmit { + m.logger.Debug().Str("Emitter address", address.String()).Str("Event type", event.Name).Msg("Emitting log from emitter") + var err error + switch event.Name { + case "Log1": + _, err = logEmitter.EmitLogInts(getIntSlice(m.eventsPerTx)) + case "Log2": + _, err = logEmitter.EmitLogIntsIndexed(getIntSlice(m.eventsPerTx)) + case "Log3": + _, err = logEmitter.EmitLogStrings(getStringSlice(m.eventsPerTx)) + default: + err = fmt.Errorf("unknown event name: %s", event.Name) + } + + if err != nil { + return &wasp.Response{Error: err.Error(), Failed: true} + } + localCounter++ + } + + // I don't think that will work as expected, I should atomically read the value and save it, so maybe just a mutex? + if counter, ok := l.InputSharedData().(*Counter); ok { + counter.mu.Lock() + defer counter.mu.Unlock() + counter.value += localCounter + } else { + return &wasp.Response{ + Error: "SharedData did not contain a Counter", + Failed: true, + } + } + + return &wasp.Response{} +} diff --git a/integration-tests/universal/log_poller/helpers.go b/integration-tests/universal/log_poller/helpers.go new file mode 100644 index 00000000..dcca60ea --- /dev/null +++ b/integration-tests/universal/log_poller/helpers.go @@ -0,0 +1,1338 @@ +package logpoller + +import ( + "bytes" + "context" + "fmt" + "math/big" + "math/rand" + "sort" + "strings" + "sync" + "testing" + "time" + + geth "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + geth_types "github.com/ethereum/go-ethereum/core/types" + "github.com/jmoiron/sqlx" + "github.com/onsi/gomega" + "github.com/rs/zerolog" + "github.com/scylladb/go-reflectx" + "github.com/stretchr/testify/require" + + "github.com/goplugin/wasp" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" + "github.com/goplugin/plugin-testing-framework/blockchain" + ctf_test_env "github.com/goplugin/plugin-testing-framework/docker/test_env" + "github.com/goplugin/plugin-testing-framework/logging" + "github.com/goplugin/plugin-testing-framework/networks" + "github.com/goplugin/plugin-testing-framework/utils/ptr" + "github.com/goplugin/plugin-testing-framework/utils/testcontext" + "github.com/goplugin/pluginv3.0/integration-tests/actions" + "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/integration-tests/contracts" + "github.com/goplugin/pluginv3.0/integration-tests/contracts/ethereum" + "github.com/goplugin/pluginv3.0/integration-tests/docker/test_env" + "github.com/goplugin/pluginv3.0/integration-tests/types/config/node" + evmcfg "github.com/goplugin/pluginv3.0/v2/core/chains/evm/config/toml" + "github.com/goplugin/pluginv3.0/v2/core/chains/evm/logpoller" + cltypes "github.com/goplugin/pluginv3.0/v2/core/chains/evm/types" + "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/automation_utils_2_1" + le "github.com/goplugin/pluginv3.0/v2/core/gethwrappers/generated/log_emitter" + core_logger "github.com/goplugin/pluginv3.0/v2/core/logger" + "github.com/goplugin/pluginv3.0/v2/core/services/pg" + + tc "github.com/goplugin/pluginv3.0/integration-tests/testconfig" + lp_config "github.com/goplugin/pluginv3.0/integration-tests/testconfig/log_poller" +) + +var ( + EmitterABI, _ = abi.JSON(strings.NewReader(le.LogEmitterABI)) + automationUtilsABI = cltypes.MustGetABI(automation_utils_2_1.AutomationUtilsABI) + bytes0 = [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } // bytes representation of 0x0000000000000000000000000000000000000000000000000000000000000000 + +) + +var registerSingleTopicFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topic common.Hash) error { + logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ + ContractAddress: emitterAddress, + FilterSelector: 0, + Topic0: topic, + Topic1: bytes0, + Topic2: bytes0, + Topic3: bytes0, + } + encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) + if err != nil { + return err + } + + err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig) + if err != nil { + return err + } + + return nil +} + +// Currently Unused November 8, 2023, Might be useful in the near future so keeping it here for now +// this is not really possible, log trigger doesn't support multiple topics, even if log poller does +// var registerMultipleTopicsFilter = func(registry contracts.KeeperRegistry, upkeepID *big.Int, emitterAddress common.Address, topics []abi.Event) error { +// if len(topics) > 4 { +// return errors.New("Cannot register more than 4 topics") +// } + +// var getTopic = func(topics []abi.Event, i int) common.Hash { +// if i > len(topics)-1 { +// return bytes0 +// } + +// return topics[i].ID +// } + +// var getFilterSelector = func(topics []abi.Event) (uint8, error) { +// switch len(topics) { +// case 0: +// return 0, errors.New("Cannot register filter with 0 topics") +// case 1: +// return 0, nil +// case 2: +// return 1, nil +// case 3: +// return 3, nil +// case 4: +// return 7, nil +// default: +// return 0, errors.New("Cannot register filter with more than 4 topics") +// } +// } + +// filterSelector, err := getFilterSelector(topics) +// if err != nil { +// return err +// } + +// logTriggerConfigStruct := automation_utils_2_1.LogTriggerConfig{ +// ContractAddress: emitterAddress, +// FilterSelector: filterSelector, +// Topic0: getTopic(topics, 0), +// Topic1: getTopic(topics, 1), +// Topic2: getTopic(topics, 2), +// Topic3: getTopic(topics, 3), +// } +// encodedLogTriggerConfig, err := automationUtilsABI.Methods["_logTriggerConfig"].Inputs.Pack(&logTriggerConfigStruct) +// if err != nil { +// return err +// } + +// err = registry.SetUpkeepTriggerConfig(upkeepID, encodedLogTriggerConfig) +// if err != nil { +// return err +// } + +// return nil +// } + +// NewOrm returns a new logpoller.DbORM instance +func NewOrm(logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (*logpoller.DbORM, *sqlx.DB, error) { + dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", "127.0.0.1", postgresDb.ExternalPort, postgresDb.User, postgresDb.Password, postgresDb.DbName) + db, err := sqlx.Open("postgres", dsn) + if err != nil { + return nil, db, err + } + + db.MapperFunc(reflectx.CamelToSnakeASCII) + return logpoller.NewORM(chainID, db, logger, pg.NewQConfig(false)), db, nil +} + +type ExpectedFilter struct { + emitterAddress common.Address + topic common.Hash +} + +// GetExpectedFilters returns a slice of ExpectedFilter structs based on the provided log emitters and config +func GetExpectedFilters(logEmitters []*contracts.LogEmitter, cfg *lp_config.Config) []ExpectedFilter { + expectedFilters := make([]ExpectedFilter, 0) + for _, emitter := range logEmitters { + for _, event := range cfg.General.EventsToEmit { + expectedFilters = append(expectedFilters, ExpectedFilter{ + emitterAddress: (*emitter).Address(), + topic: event.ID, + }) + } + } + + return expectedFilters +} + +// NodeHasExpectedFilters returns true if the provided node has all the expected filters registered +func NodeHasExpectedFilters(expectedFilters []ExpectedFilter, logger core_logger.SugaredLogger, chainID *big.Int, postgresDb *ctf_test_env.PostgresDb) (bool, string, error) { + orm, db, err := NewOrm(logger, chainID, postgresDb) + if err != nil { + return false, "", err + } + + defer db.Close() + knownFilters, err := orm.LoadFilters() + if err != nil { + return false, "", err + } + + for _, expectedFilter := range expectedFilters { + filterFound := false + for _, knownFilter := range knownFilters { + if bytes.Equal(expectedFilter.emitterAddress.Bytes(), knownFilter.Addresses[0].Bytes()) && bytes.Equal(expectedFilter.topic.Bytes(), knownFilter.EventSigs[0].Bytes()) { + filterFound = true + break + } + } + + if !filterFound { + return false, fmt.Sprintf("no filter found for emitter %s and topic %s", expectedFilter.emitterAddress.String(), expectedFilter.topic.Hex()), nil + } + } + + return true, "", nil +} + +// randomWait waits for a random amount of time between minMilliseconds and maxMilliseconds +func randomWait(minMilliseconds, maxMilliseconds int) { + rand.New(rand.NewSource(time.Now().UnixNano())) + randomMilliseconds := rand.Intn(maxMilliseconds-minMilliseconds+1) + minMilliseconds + time.Sleep(time.Duration(randomMilliseconds) * time.Millisecond) +} + +type LogEmitterChannel struct { + logsEmitted int + err error +} + +// getIntSlice returns a slice of ints of the provided length +func getIntSlice(length int) []int { + result := make([]int, length) + for i := 0; i < length; i++ { + result[i] = i + } + + return result +} + +// getStringSlice returns a slice of strings of the provided length +func getStringSlice(length int) []string { + result := make([]string, length) + for i := 0; i < length; i++ { + result[i] = "amazing event" + } + + return result +} + +// emitEvents emits events from the provided log emitter concurrently according to the provided config +func emitEvents(ctx context.Context, l zerolog.Logger, logEmitter *contracts.LogEmitter, cfg *lp_config.Config, wg *sync.WaitGroup, results chan LogEmitterChannel) { + address := (*logEmitter).Address().String() + localCounter := 0 + defer wg.Done() + for i := 0; i < *cfg.LoopedConfig.ExecutionCount; i++ { + for _, event := range cfg.General.EventsToEmit { + select { + case <-ctx.Done(): + l.Warn().Str("Emitter address", address).Msg("Context cancelled, not emitting events") + return + default: + l.Debug().Str("Emitter address", address).Str("Event type", event.Name).Str("index", fmt.Sprintf("%d/%d", (i+1), cfg.LoopedConfig.ExecutionCount)).Msg("Emitting log from emitter") + var err error + switch event.Name { + case "Log1": + _, err = (*logEmitter).EmitLogInts(getIntSlice(*cfg.General.EventsPerTx)) + case "Log2": + _, err = (*logEmitter).EmitLogIntsIndexed(getIntSlice(*cfg.General.EventsPerTx)) + case "Log3": + _, err = (*logEmitter).EmitLogStrings(getStringSlice(*cfg.General.EventsPerTx)) + case "Log4": + _, err = (*logEmitter).EmitLogIntMultiIndexed(1, 1, *cfg.General.EventsPerTx) + default: + err = fmt.Errorf("unknown event name: %s", event.Name) + } + + if err != nil { + results <- LogEmitterChannel{ + err: err, + } + return + } + localCounter += *cfg.General.EventsPerTx + + randomWait(*cfg.LoopedConfig.MinEmitWaitTimeMs, *cfg.LoopedConfig.MaxEmitWaitTimeMs) + } + + if (i+1)%10 == 0 { + l.Info().Str("Emitter address", address).Str("Index", fmt.Sprintf("%d/%d", i+1, *cfg.LoopedConfig.ExecutionCount)).Msg("Emitted all three events") + } + } + } + + l.Info().Str("Emitter address", address).Int("Total logs emitted", localCounter).Msg("Finished emitting events") + + results <- LogEmitterChannel{ + logsEmitted: localCounter, + err: nil, + } +} + +// LogPollerHasFinalisedEndBlock returns true if all CL nodes have finalised processing the provided end block +func LogPollerHasFinalisedEndBlock(endBlock int64, chainID *big.Int, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) { + wg := &sync.WaitGroup{} + + type boolQueryResult struct { + nodeName string + hasFinalised bool + finalizedBlock int64 + err error + } + + endBlockCh := make(chan boolQueryResult, len(nodes.Nodes)-1) + ctx, cancelFn := context.WithCancel(context.Background()) + + for i := 1; i < len(nodes.Nodes); i++ { + wg.Add(1) + + go func(clNode *test_env.ClNode, r chan boolQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + return + default: + orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + if err != nil { + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + hasFinalised: false, + err: err, + } + } + + defer db.Close() + + latestBlock, err := orm.SelectLatestBlock() + if err != nil { + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + hasFinalised: false, + err: err, + } + } + + r <- boolQueryResult{ + nodeName: clNode.ContainerName, + finalizedBlock: latestBlock.FinalizedBlockNumber, + hasFinalised: latestBlock.FinalizedBlockNumber > endBlock, + err: nil, + } + + } + }(nodes.Nodes[i], endBlockCh) + } + + var err error + allFinalisedCh := make(chan bool, 1) + + go func() { + foundMap := make(map[string]bool, 0) + for r := range endBlockCh { + if r.err != nil { + err = r.err + cancelFn() + return + } + + foundMap[r.nodeName] = r.hasFinalised + if r.hasFinalised { + l.Info().Str("Node name", r.nodeName).Msg("CL node has finalised end block") + } else { + l.Warn().Int64("Has", r.finalizedBlock).Int64("Want", endBlock).Str("Node name", r.nodeName).Msg("CL node has not finalised end block yet") + } + + if len(foundMap) == len(nodes.Nodes)-1 { + allFinalised := true + for _, v := range foundMap { + if !v { + allFinalised = false + break + } + } + + allFinalisedCh <- allFinalised + return + } + } + }() + + wg.Wait() + close(endBlockCh) + + return <-allFinalisedCh, err +} + +// ClNodesHaveExpectedLogCount returns true if all CL nodes have the expected log count in the provided block range and matching the provided filters +func ClNodesHaveExpectedLogCount(startBlock, endBlock int64, chainID *big.Int, expectedLogCount int, expectedFilters []ExpectedFilter, l zerolog.Logger, coreLogger core_logger.SugaredLogger, nodes *test_env.ClCluster) (bool, error) { + wg := &sync.WaitGroup{} + + type logQueryResult struct { + nodeName string + logCount int + hasExpectedCount bool + err error + } + + resultChan := make(chan logQueryResult, len(nodes.Nodes)-1) + ctx, cancelFn := context.WithCancel(context.Background()) + + for i := 1; i < len(nodes.Nodes); i++ { + wg.Add(1) + + go func(clNode *test_env.ClNode, resultChan chan logQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + return + default: + orm, db, err := NewOrm(coreLogger, chainID, clNode.PostgresDb) + if err != nil { + resultChan <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: 0, + hasExpectedCount: false, + err: err, + } + } + + defer db.Close() + foundLogsCount := 0 + + for _, filter := range expectedFilters { + logs, err := orm.SelectLogs(startBlock, endBlock, filter.emitterAddress, filter.topic) + if err != nil { + resultChan <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: 0, + hasExpectedCount: false, + err: err, + } + } + + foundLogsCount += len(logs) + } + + resultChan <- logQueryResult{ + nodeName: clNode.ContainerName, + logCount: foundLogsCount, + hasExpectedCount: foundLogsCount >= expectedLogCount, + err: nil, + } + } + }(nodes.Nodes[i], resultChan) + } + + var err error + allFoundCh := make(chan bool, 1) + + go func() { + foundMap := make(map[string]bool, 0) + for r := range resultChan { + if r.err != nil { + err = r.err + cancelFn() + return + } + + foundMap[r.nodeName] = r.hasExpectedCount + if r.hasExpectedCount { + l.Debug(). + Str("Node name", r.nodeName). + Int("Logs count", r.logCount). + Msg("Expected log count found in CL node") + } else { + l.Debug(). + Str("Node name", r.nodeName). + Str("Found/Expected logs", fmt.Sprintf("%d/%d", r.logCount, expectedLogCount)). + Int("Missing logs", expectedLogCount-r.logCount). + Msg("Too low log count found in CL node") + } + + if len(foundMap) == len(nodes.Nodes)-1 { + allFound := true + for _, hadAllLogs := range foundMap { + if !hadAllLogs { + allFound = false + break + } + } + + allFoundCh <- allFound + return + } + } + }() + + wg.Wait() + close(resultChan) + + return <-allFoundCh, err +} + +type MissingLogs map[string][]geth_types.Log + +// IsEmpty returns true if there are no missing logs +func (m *MissingLogs) IsEmpty() bool { + for _, v := range *m { + if len(v) > 0 { + return false + } + } + + return true +} + +// GetMissingLogs returns a map of CL node name to missing logs in that node compared to EVM node to which the provided evm client is connected +func GetMissingLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, clnodeCluster *test_env.ClCluster, l zerolog.Logger, coreLogger core_logger.SugaredLogger, cfg *lp_config.Config) (MissingLogs, error) { + wg := &sync.WaitGroup{} + + type dbQueryResult struct { + err error + nodeName string + logs []logpoller.Log + } + + ctx, cancelFn := context.WithCancel(context.Background()) + resultCh := make(chan dbQueryResult, len(clnodeCluster.Nodes)-1) + + for i := 1; i < len(clnodeCluster.Nodes); i++ { + wg.Add(1) + + go func(ctx context.Context, i int, r chan dbQueryResult) { + defer wg.Done() + select { + case <-ctx.Done(): + l.Warn().Msg("Context cancelled. Terminating fetching logs from log poller's DB") + return + default: + nodeName := clnodeCluster.Nodes[i].ContainerName + + l.Debug().Str("Node name", nodeName).Msg("Fetching log poller logs") + orm, db, err := NewOrm(coreLogger, evmClient.GetChainID(), clnodeCluster.Nodes[i].PostgresDb) + if err != nil { + r <- dbQueryResult{ + err: err, + nodeName: nodeName, + logs: []logpoller.Log{}, + } + } + + defer db.Close() + logs := make([]logpoller.Log, 0) + + for j := 0; j < len(logEmitters); j++ { + address := (*logEmitters[j]).Address() + + for _, event := range cfg.General.EventsToEmit { + l.Trace().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching single emitter's logs") + result, err := orm.SelectLogs(startBlock, endBlock, address, event.ID) + if err != nil { + r <- dbQueryResult{ + err: err, + nodeName: nodeName, + logs: []logpoller.Log{}, + } + } + + sort.Slice(result, func(i, j int) bool { + return result[i].BlockNumber < result[j].BlockNumber + }) + + logs = append(logs, result...) + + l.Trace().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(result)).Msg("Logs found per node") + } + } + + l.Info().Int("Count", len(logs)).Str("Node name", nodeName).Msg("Fetched log poller logs") + + r <- dbQueryResult{ + err: nil, + nodeName: nodeName, + logs: logs, + } + } + }(ctx, i, resultCh) + } + + allLogPollerLogs := make(map[string][]logpoller.Log, 0) + missingLogs := map[string][]geth_types.Log{} + var dbError error + + go func() { + for r := range resultCh { + if r.err != nil { + l.Err(r.err).Str("Node name", r.nodeName).Msg("Error fetching logs from log poller's DB") + dbError = r.err + cancelFn() + return + } + // use channel for aggregation and then for := range over it after closing resultCh? + allLogPollerLogs[r.nodeName] = r.logs + } + }() + + wg.Wait() + close(resultCh) + + if dbError != nil { + return nil, dbError + } + + allLogsInEVMNode, err := getEVMLogs(startBlock, endBlock, logEmitters, evmClient, l, cfg) + if err != nil { + return nil, err + } + + wg = &sync.WaitGroup{} + + type missingLogResult struct { + nodeName string + logs []geth_types.Log + } + + evmLogCount := len(allLogsInEVMNode) + l.Info().Int("Log count", evmLogCount).Msg("Started comparison of logs from EVM node and CL nodes. This may take a while if there's a lot of logs") + + missingCh := make(chan missingLogResult, len(clnodeCluster.Nodes)-1) + for i := 1; i < len(clnodeCluster.Nodes); i++ { + wg.Add(1) + + go func(i int, result chan missingLogResult) { + defer wg.Done() + nodeName := clnodeCluster.Nodes[i].ContainerName + l.Debug().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("0/%d", evmLogCount)).Msg("Comparing single CL node's logs with EVM logs") + + missingLogs := make([]geth_types.Log, 0) + for i, evmLog := range allLogsInEVMNode { + logFound := false + for _, logPollerLog := range allLogPollerLogs[nodeName] { + if logPollerLog.BlockNumber == int64(evmLog.BlockNumber) && logPollerLog.TxHash == evmLog.TxHash && bytes.Equal(logPollerLog.Data, evmLog.Data) && logPollerLog.LogIndex == int64(evmLog.Index) && + logPollerLog.Address == evmLog.Address && logPollerLog.BlockHash == evmLog.BlockHash && bytes.Equal(logPollerLog.Topics[0][:], evmLog.Topics[0].Bytes()) { + logFound = true + continue + } + } + + if i%10000 == 0 && i != 0 { + l.Debug().Str("Node name", nodeName).Str("Progress", fmt.Sprintf("%d/%d", i, evmLogCount)).Msg("Comparing single CL node's logs with EVM logs") + } + + if !logFound { + missingLogs = append(missingLogs, evmLog) + } + } + + if len(missingLogs) > 0 { + l.Warn().Int("Count", len(missingLogs)).Str("Node name", nodeName).Msg("Some EMV logs were missing from CL node") + } else { + l.Info().Str("Node name", nodeName).Msg("All EVM logs were found in CL node") + } + + result <- missingLogResult{ + nodeName: nodeName, + logs: missingLogs, + } + }(i, missingCh) + } + + wg.Wait() + close(missingCh) + + for v := range missingCh { + if len(v.logs) > 0 { + missingLogs[v.nodeName] = v.logs + } + } + + expectedTotalLogsEmitted := GetExpectedLogCount(cfg) + if int64(len(allLogsInEVMNode)) != expectedTotalLogsEmitted { + l.Warn(). + Str("Actual/Expected", fmt.Sprintf("%d/%d", expectedTotalLogsEmitted, len(allLogsInEVMNode))). + Msg("Some of the test logs were not found in EVM node. This is a bug in the test") + } + + return missingLogs, nil +} + +// PrintMissingLogsInfo prints various useful information about the missing logs +func PrintMissingLogsInfo(missingLogs map[string][]geth_types.Log, l zerolog.Logger, cfg *lp_config.Config) { + var findHumanName = func(topic common.Hash) string { + for _, event := range cfg.General.EventsToEmit { + if event.ID == topic { + return event.Name + } + } + + return "Unknown event" + } + + missingByType := make(map[string]int) + for _, logs := range missingLogs { + for _, v := range logs { + humanName := findHumanName(v.Topics[0]) + missingByType[humanName]++ + } + } + + l.Debug().Msg("Missing log by event name") + for k, v := range missingByType { + l.Debug().Str("Event name", k).Int("Missing count", v).Msg("Missing logs by type") + } + + missingByBlock := make(map[uint64]int) + for _, logs := range missingLogs { + for _, l := range logs { + missingByBlock[l.BlockNumber]++ + } + } + + l.Debug().Msg("Missing logs by block") + for k, v := range missingByBlock { + l.Debug().Uint64("Block number", k).Int("Missing count", v).Msg("Missing logs by block") + } + + missingByEmitter := make(map[string]int) + for _, logs := range missingLogs { + for _, l := range logs { + missingByEmitter[l.Address.String()]++ + } + } + + l.Debug().Msg("Missing logs by emitter") + for k, v := range missingByEmitter { + l.Debug().Str("Emitter address", k).Int("Missing count", v).Msg("Missing logs by emitter") + } +} + +// getEVMLogs returns a slice of all logs emitted by the provided log emitters in the provided block range, +// which are present in the EVM node to which the provided evm client is connected +func getEVMLogs(startBlock, endBlock int64, logEmitters []*contracts.LogEmitter, evmClient blockchain.EVMClient, l zerolog.Logger, cfg *lp_config.Config) ([]geth_types.Log, error) { + allLogsInEVMNode := make([]geth_types.Log, 0) + for j := 0; j < len(logEmitters); j++ { + address := (*logEmitters[j]).Address() + for _, event := range cfg.General.EventsToEmit { + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Msg("Fetching logs from EVM node") + logsInEVMNode, err := evmClient.FilterLogs(context.Background(), geth.FilterQuery{ + Addresses: []common.Address{(address)}, + Topics: [][]common.Hash{{event.ID}}, + FromBlock: big.NewInt(startBlock), + ToBlock: big.NewInt(endBlock), + }) + if err != nil { + return nil, err + } + + sort.Slice(logsInEVMNode, func(i, j int) bool { + return logsInEVMNode[i].BlockNumber < logsInEVMNode[j].BlockNumber + }) + + allLogsInEVMNode = append(allLogsInEVMNode, logsInEVMNode...) + l.Debug().Str("Event name", event.Name).Str("Emitter address", address.String()).Int("Log count", len(logsInEVMNode)).Msg("Logs found in EVM node") + } + } + + l.Info().Int("Count", len(allLogsInEVMNode)).Msg("Logs in EVM node") + + return allLogsInEVMNode, nil +} + +// ExecuteGenerator executes the configured generator and returns the total number of logs emitted +func ExecuteGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contracts.LogEmitter) (int, error) { + if *cfg.General.Generator == lp_config.GeneratorType_WASP { + return runWaspGenerator(t, cfg, logEmitters) + } + + return runLoopedGenerator(t, cfg, logEmitters) +} + +// runWaspGenerator runs the wasp generator and returns the total number of logs emitted +func runWaspGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contracts.LogEmitter) (int, error) { + l := logging.GetTestLogger(t) + + var RPSprime int64 + + // if LPS is set, we need to calculate based on countract count and events per transaction + if *cfg.Wasp.LPS > 0 { + RPSprime = *cfg.Wasp.LPS / int64(*cfg.General.Contracts) / int64(*cfg.General.EventsPerTx) / int64(len(cfg.General.EventsToEmit)) + + if RPSprime < 1 { + return 0, fmt.Errorf("invalid load configuration, effective RPS would have been zero. Adjust LPS, contracts count, events per tx or events to emit") + } + } + + // if RPS is set simply split it between contracts + if *cfg.Wasp.RPS > 0 { + RPSprime = *cfg.Wasp.RPS / int64(*cfg.General.Contracts) + } + + counter := &Counter{ + mu: &sync.Mutex{}, + value: 0, + } + + p := wasp.NewProfile() + + for _, logEmitter := range logEmitters { + g, err := wasp.NewGenerator(&wasp.Config{ + T: t, + LoadType: wasp.RPS, + GenName: fmt.Sprintf("log_poller_gen_%s", (*logEmitter).Address().String()), + RateLimitUnitDuration: cfg.Wasp.RateLimitUnitDuration.Duration, + CallTimeout: cfg.Wasp.CallTimeout.Duration, + Schedule: wasp.Plain( + RPSprime, + cfg.Wasp.Duration.Duration, + ), + Gun: NewLogEmitterGun( + logEmitter, + cfg.General.EventsToEmit, + *cfg.General.EventsPerTx, + l, + ), + SharedData: counter, + }) + p.Add(g, err) + } + + _, err := p.Run(true) + + if err != nil { + return 0, err + } + + return counter.value, nil +} + +// runLoopedGenerator runs the looped generator and returns the total number of logs emitted +func runLoopedGenerator(t *testing.T, cfg *lp_config.Config, logEmitters []*contracts.LogEmitter) (int, error) { + l := logging.GetTestLogger(t) + + // Start emitting events in parallel, each contract is emitting events in a separate goroutine + // We will stop as soon as we encounter an error + wg := &sync.WaitGroup{} + emitterCh := make(chan LogEmitterChannel, len(logEmitters)) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + for i := 0; i < len(logEmitters); i++ { + wg.Add(1) + go emitEvents(ctx, l, logEmitters[i], cfg, wg, emitterCh) + } + + var emitErr error + total := 0 + + aggrChan := make(chan int, len(logEmitters)) + + go func() { + for { + select { + case <-ctx.Done(): + return + case emitter := <-emitterCh: + if emitter.err != nil { + emitErr = emitter.err + cancelFn() + return + } + aggrChan <- emitter.logsEmitted + } + } + }() + + wg.Wait() + close(emitterCh) + + if emitErr != nil { + return 0, emitErr + } + + for i := 0; i < len(logEmitters); i++ { + total += <-aggrChan + } + + return int(total), nil +} + +// GetExpectedLogCount returns the expected number of logs to be emitted based on the provided config +func GetExpectedLogCount(cfg *lp_config.Config) int64 { + if *cfg.General.Generator == lp_config.GeneratorType_WASP { + if *cfg.Wasp.RPS != 0 { + return *cfg.Wasp.RPS * int64(cfg.Wasp.Duration.Seconds()) * int64(*cfg.General.EventsPerTx) + } + return *cfg.Wasp.LPS * int64(cfg.Wasp.Duration.Duration.Seconds()) + } + + return int64(len(cfg.General.EventsToEmit) * *cfg.LoopedConfig.ExecutionCount * *cfg.General.Contracts * *cfg.General.EventsPerTx) +} + +type PauseData struct { + StartBlock uint64 + EndBlock uint64 + TargetComponent string + ContaineName string +} + +var ChaosPauses = []PauseData{} + +// chaosPauseSyncFn pauses ranom container of the provided type for a random amount of time between 5 and 20 seconds +func chaosPauseSyncFn(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, targetComponent string) ChaosPauseData { + rand.New(rand.NewSource(time.Now().UnixNano())) + + randomNode := testEnv.ClCluster.Nodes[rand.Intn(len(testEnv.ClCluster.Nodes)-1)+1] + var component ctf_test_env.EnvComponent + + switch strings.ToLower(targetComponent) { + case "plugin": + component = randomNode.EnvComponent + case "postgres": + component = randomNode.PostgresDb.EnvComponent + default: + return ChaosPauseData{Err: fmt.Errorf("unknown component %s", targetComponent)} + } + + ctx := context.Background() + pauseStartBlock, err := testEnv.EVMClient.LatestBlockNumber(ctx) + if err != nil { + return ChaosPauseData{Err: err} + } + pauseTimeSec := rand.Intn(20-5) + 5 + l.Info().Str("Container", component.ContainerName).Int("Pause time", pauseTimeSec).Msg("Pausing component") + pauseTimeDur := time.Duration(pauseTimeSec) * time.Second + err = component.ChaosPause(l, pauseTimeDur) + if err != nil { + return ChaosPauseData{Err: err} + } + l.Info().Str("Container", component.ContainerName).Msg("Component unpaused") + + pauseEndBlock, err := testEnv.EVMClient.LatestBlockNumber(ctx) + if err != nil { + return ChaosPauseData{Err: err} + } + + return ChaosPauseData{PauseData: PauseData{ + StartBlock: pauseStartBlock, + EndBlock: pauseEndBlock, + TargetComponent: targetComponent, + ContaineName: component.ContainerName, + }} +} + +type ChaosPauseData struct { + Err error + PauseData PauseData +} + +// ExecuteChaosExperiment executes the configured chaos experiment, which consist of pausing CL node or Postgres containers +func ExecuteChaosExperiment(l zerolog.Logger, testEnv *test_env.CLClusterTestEnv, cfg *lp_config.Config, errorCh chan error) { + if cfg.ChaosConfig == nil || *cfg.ChaosConfig.ExperimentCount == 0 { + errorCh <- nil + return + } + + chaosChan := make(chan ChaosPauseData, *cfg.ChaosConfig.ExperimentCount) + wg := &sync.WaitGroup{} + + go func() { + // if we wanted to have more than 1 container paused, we'd need to make sure we aren't trying to pause an already paused one + guardChan := make(chan struct{}, 1) + + for i := 0; i < *cfg.ChaosConfig.ExperimentCount; i++ { + i := i + wg.Add(1) + guardChan <- struct{}{} + go func() { + defer func() { + <-guardChan + wg.Done() + current := i + 1 + l.Info().Str("Current/Total", fmt.Sprintf("%d/%d", current, cfg.ChaosConfig.ExperimentCount)).Msg("Done with experiment") + }() + chaosChan <- chaosPauseSyncFn(l, testEnv, *cfg.ChaosConfig.TargetComponent) + time.Sleep(10 * time.Second) + }() + } + + wg.Wait() + + close(chaosChan) + }() + + go func() { + var pauseData []PauseData + for result := range chaosChan { + if result.Err != nil { + l.Err(result.Err).Msg("Error encountered during chaos experiment") + errorCh <- result.Err + return // Return on actual error + } + + pauseData = append(pauseData, result.PauseData) + } + + l.Info().Msg("All chaos experiments finished") + errorCh <- nil // Only send nil once, after all errors have been handled and the channel is closed + + for _, p := range pauseData { + l.Debug().Str("Target component", p.TargetComponent).Str("Container", p.ContaineName).Str("Block range", fmt.Sprintf("%d - %d", p.StartBlock, p.EndBlock)).Msgf("Details of executed chaos pause") + } + }() +} + +// GetFinalityDepth returns the finality depth for the provided chain ID +func GetFinalityDepth(chainId int64) (int64, error) { + var finalityDepth int64 + switch chainId { + // Ethereum Sepolia + case 11155111: + finalityDepth = 50 + // Polygon Mumbai + case 80001: + finalityDepth = 500 + // Simulated network + case 1337: + finalityDepth = 10 + default: + return 0, fmt.Errorf("no known finality depth for chain %d", chainId) + } + + return finalityDepth, nil +} + +// GetEndBlockToWaitFor returns the end block to wait for based on chain id and finality tag provided in config +func GetEndBlockToWaitFor(endBlock, chainId int64, cfg *lp_config.Config) (int64, error) { + if *cfg.General.UseFinalityTag { + return endBlock + 1, nil + } + + finalityDepth, err := GetFinalityDepth(chainId) + if err != nil { + return 0, err + } + + return endBlock + finalityDepth, nil +} + +const ( + automationDefaultUpkeepGasLimit = uint32(2500000) + automationDefaultLinkFunds = int64(9e18) + automationDefaultUpkeepsToDeploy = 10 + automationExpectedData = "abcdef" + defaultAmountOfUpkeeps = 2 +) + +var ( + DefaultOCRRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } + + automationDefaultRegistryConfig = contracts.KeeperRegistrySettings{ + PaymentPremiumPPB: uint32(200000000), + FlatFeeMicroPLI: uint32(0), + BlockCountPerTurn: big.NewInt(10), + CheckGasLimit: uint32(2500000), + StalenessSeconds: big.NewInt(90000), + GasCeilingMultiplier: uint16(1), + MinUpkeepSpend: big.NewInt(0), + MaxPerformGas: uint32(5000000), + FallbackGasPrice: big.NewInt(2e11), + FallbackLinkPrice: big.NewInt(2e18), + MaxCheckDataSize: uint32(5000), + MaxPerformDataSize: uint32(5000), + } +) + +// SetupLogPollerTestDocker starts the DON and private Ethereum network +func SetupLogPollerTestDocker( + t *testing.T, + registryVersion ethereum.KeeperRegistryVersion, + registryConfig contracts.KeeperRegistrySettings, + upkeepsNeeded int, + lpPollingInterval time.Duration, + finalityTagEnabled bool, + testConfig *tc.TestConfig, +) ( + blockchain.EVMClient, + []*client.PluginClient, + contracts.ContractDeployer, + contracts.LinkToken, + contracts.KeeperRegistry, + contracts.KeeperRegistrar, + *test_env.CLClusterTestEnv, +) { + l := logging.GetTestLogger(t) + + // Add registry version to config + registryConfig.RegistryVersion = registryVersion + network := networks.MustGetSelectedNetworkConfig(testConfig.Network)[0] + + finalityDepth, err := GetFinalityDepth(network.ChainID) + require.NoError(t, err, "Error getting finality depth") + + // build the node config + clNodeConfig := node.NewConfig(node.NewBaseConfig()) + syncInterval := *commonconfig.MustNewDuration(5 * time.Minute) + clNodeConfig.Feature.LogPoller = ptr.Ptr[bool](true) + clNodeConfig.OCR2.Enabled = ptr.Ptr[bool](true) + clNodeConfig.Keeper.TurnLookBack = ptr.Ptr[int64](int64(0)) + clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval + clNodeConfig.Keeper.Registry.PerformGasOverhead = ptr.Ptr[uint32](uint32(150000)) + clNodeConfig.P2P.V2.Enabled = ptr.Ptr[bool](true) + clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"} + clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"} + + //launch the environment + var env *test_env.CLClusterTestEnv + pluginNodeFunding := 0.5 + l.Debug().Msgf("Funding amount: %f", pluginNodeFunding) + clNodesCount := 5 + + var logPolllerSettingsFn = func(chain *evmcfg.Chain) *evmcfg.Chain { + chain.LogPollInterval = commonconfig.MustNewDuration(lpPollingInterval) + chain.FinalityDepth = ptr.Ptr[uint32](uint32(finalityDepth)) + chain.FinalityTagEnabled = ptr.Ptr[bool](finalityTagEnabled) + return chain + } + + var evmClientSettingsFn = func(network *blockchain.EVMNetwork) *blockchain.EVMNetwork { + network.FinalityDepth = uint64(finalityDepth) + network.FinalityTag = finalityTagEnabled + return network + } + + ethBuilder := ctf_test_env.NewEthereumNetworkBuilder() + cfg, err := ethBuilder. + WithConsensusType(ctf_test_env.ConsensusType_PoS). + WithConsensusLayer(ctf_test_env.ConsensusLayer_Prysm). + WithExecutionLayer(ctf_test_env.ExecutionLayer_Geth). + WithEthereumChainConfig(ctf_test_env.EthereumChainConfig{ + SecondsPerSlot: 4, + SlotsPerEpoch: 2, + }). + Build() + require.NoError(t, err, "Error building ethereum network config") + + env, err = test_env.NewCLTestEnvBuilder(). + WithTestConfig(testConfig). + WithTestInstance(t). + WithPrivateEthereumNetwork(cfg). + WithCLNodes(clNodesCount). + WithCLNodeConfig(clNodeConfig). + WithFunding(big.NewFloat(pluginNodeFunding)). + WithChainOptions(logPolllerSettingsFn). + EVMClientNetworkOptions(evmClientSettingsFn). + WithStandardCleanup(). + Build() + require.NoError(t, err, "Error deploying test environment") + + env.ParallelTransactions(true) + nodeClients := env.ClCluster.NodeAPIs() + workerNodes := nodeClients[1:] + + var linkToken contracts.LinkToken + + switch network.ChainID { + // Simulated + case 1337: + linkToken, err = env.ContractDeployer.DeployLinkTokenContract() + // Ethereum Sepolia + case 11155111: + linkToken, err = env.ContractLoader.LoadPLIToken("0x779877A7B0D9E8603169DdbD7836e478b4624789") + // Polygon Mumbai + case 80001: + linkToken, err = env.ContractLoader.LoadPLIToken("0x326C977E6efc84E512bB9C30f76E30c160eD06FB") + default: + panic("Not implemented") + } + require.NoError(t, err, "Error loading/deploying PLI token") + + linkBalance, err := env.EVMClient.BalanceAt(context.Background(), common.HexToAddress(linkToken.Address())) + require.NoError(t, err, "Error getting PLI balance") + + l.Info().Str("Balance", big.NewInt(0).Div(linkBalance, big.NewInt(1e18)).String()).Msg("PLI balance") + minLinkBalanceSingleNode := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(9)) + minLinkBalance := big.NewInt(0).Mul(minLinkBalanceSingleNode, big.NewInt(int64(upkeepsNeeded))) + if minLinkBalance.Cmp(linkBalance) < 0 { + require.FailNowf(t, "Not enough PLI", "Not enough PLI to run the test. Need at least %s", big.NewInt(0).Div(minLinkBalance, big.NewInt(1e18)).String()) + } + + registry, registrar := actions.DeployAutoOCRRegistryAndRegistrar( + t, + registryVersion, + registryConfig, + linkToken, + env.ContractDeployer, + env.EVMClient, + ) + + // Fund the registry with PLI + err = linkToken.Transfer(registry.Address(), big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(int64(defaultAmountOfUpkeeps)))) + require.NoError(t, err, "Funding keeper registry contract shouldn't fail") + + err = actions.CreateOCRKeeperJobsLocal(l, nodeClients, registry.Address(), network.ChainID, 0, registryVersion) + require.NoError(t, err, "Error creating OCR Keeper Jobs") + ocrConfig, err := actions.BuildAutoOCR2ConfigVarsLocal(l, workerNodes, registryConfig, registrar.Address(), 30*time.Second, registry.RegistryOwnerAddress()) + require.NoError(t, err, "Error building OCR config vars") + err = registry.SetConfig(automationDefaultRegistryConfig, ocrConfig) + require.NoError(t, err, "Registry config should be set successfully") + require.NoError(t, env.EVMClient.WaitForEvents(), "Waiting for config to be set") + + return env.EVMClient, nodeClients, env.ContractDeployer, linkToken, registry, registrar, env +} + +// UploadLogEmitterContractsAndWaitForFinalisation uploads the configured number of log emitter contracts and waits for the upload blocks to be finalised +func UploadLogEmitterContractsAndWaitForFinalisation(l zerolog.Logger, t *testing.T, testEnv *test_env.CLClusterTestEnv, cfg *lp_config.Config) []*contracts.LogEmitter { + logEmitters := make([]*contracts.LogEmitter, 0) + for i := 0; i < *cfg.General.Contracts; i++ { + logEmitter, err := testEnv.ContractDeployer.DeployLogEmitterContract() + logEmitters = append(logEmitters, &logEmitter) + require.NoError(t, err, "Error deploying log emitter contract") + l.Info().Str("Contract address", logEmitter.Address().Hex()).Msg("Log emitter contract deployed") + time.Sleep(200 * time.Millisecond) + } + afterUploadBlock, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t)) + require.NoError(t, err, "Error getting latest block number") + + gom := gomega.NewGomegaWithT(t) + gom.Eventually(func(g gomega.Gomega) { + targetBlockNumber := int64(afterUploadBlock + 1) + finalized, err := testEnv.EVMClient.GetLatestFinalizedBlockHeader(testcontext.Get(t)) + if err != nil { + l.Warn().Err(err).Msg("Error checking if contract were uploaded. Retrying...") + return + } + finalizedBlockNumber := finalized.Number.Int64() + + if finalizedBlockNumber < targetBlockNumber { + l.Debug().Int64("Finalized block", finalized.Number.Int64()).Int64("After upload block", int64(afterUploadBlock+1)).Msg("Waiting for contract upload to finalise") + } + + g.Expect(finalizedBlockNumber >= targetBlockNumber).To(gomega.BeTrue(), "Contract upload did not finalize in time") + }, "2m", "10s").Should(gomega.Succeed()) + + return logEmitters +} + +// AssertUpkeepIdsUniqueness asserts that the provided upkeep IDs are unique +func AssertUpkeepIdsUniqueness(upkeepIDs []*big.Int) error { + upKeepIdSeen := make(map[int64]bool) + for _, upkeepID := range upkeepIDs { + if _, ok := upKeepIdSeen[upkeepID.Int64()]; ok { + return fmt.Errorf("Duplicate upkeep ID %d", upkeepID.Int64()) + } + upKeepIdSeen[upkeepID.Int64()] = true + } + + return nil +} + +// AssertContractAddressUniquneness asserts that the provided contract addresses are unique +func AssertContractAddressUniquneness(logEmitters []*contracts.LogEmitter) error { + contractAddressSeen := make(map[string]bool) + for _, logEmitter := range logEmitters { + address := (*logEmitter).Address().String() + if _, ok := contractAddressSeen[address]; ok { + return fmt.Errorf("Duplicate contract address %s", address) + } + contractAddressSeen[address] = true + } + + return nil +} + +// RegisterFiltersAndAssertUniquness registers the configured log filters and asserts that the filters are unique +// meaning that for each log emitter address and topic there is only one filter +func RegisterFiltersAndAssertUniquness(l zerolog.Logger, registry contracts.KeeperRegistry, upkeepIDs []*big.Int, logEmitters []*contracts.LogEmitter, cfg *lp_config.Config, upKeepsNeeded int) error { + uniqueFilters := make(map[string]bool) + + upkeepIdIndex := 0 + for i := 0; i < len(logEmitters); i++ { + for j := 0; j < len(cfg.General.EventsToEmit); j++ { + emitterAddress := (*logEmitters[i]).Address() + topicId := cfg.General.EventsToEmit[j].ID + + upkeepID := upkeepIDs[upkeepIdIndex] + l.Debug().Int("Upkeep id", int(upkeepID.Int64())).Str("Emitter address", emitterAddress.String()).Str("Topic", topicId.Hex()).Msg("Registering log trigger for log emitter") + err := registerSingleTopicFilter(registry, upkeepID, emitterAddress, topicId) + randomWait(150, 300) + if err != nil { + return fmt.Errorf("%w: Error registering log trigger for log emitter %s", err, emitterAddress.String()) + } + + if i%10 == 0 { + l.Info().Msgf("Registered log trigger for topic %d for log emitter %d/%d", j, i, len(logEmitters)) + } + + key := fmt.Sprintf("%s-%s", emitterAddress.String(), topicId.Hex()) + if _, ok := uniqueFilters[key]; ok { + return fmt.Errorf("Duplicate filter %s", key) + } + uniqueFilters[key] = true + upkeepIdIndex++ + } + } + + if upKeepsNeeded != len(uniqueFilters) { + return fmt.Errorf("Number of unique filters should be equal to number of upkeeps. Expected %d. Got %d", upKeepsNeeded, len(uniqueFilters)) + } + + return nil +} + +// FluentlyCheckIfAllNodesHaveLogCount checks if all CL nodes have the expected log count for the provided block range and expected filters +// It will retry until the provided duration is reached or until all nodes have the expected log count +func FluentlyCheckIfAllNodesHaveLogCount(duration string, startBlock, endBlock int64, expectedLogCount int, expectedFilters []ExpectedFilter, l zerolog.Logger, coreLogger core_logger.SugaredLogger, testEnv *test_env.CLClusterTestEnv) (bool, error) { + logCountWaitDuration, err := time.ParseDuration(duration) + if err != nil { + return false, err + } + endTime := time.Now().Add(logCountWaitDuration) + + // not using gomega here, because I want to see which logs were missing + allNodesLogCountMatches := false + for time.Now().Before(endTime) { + logCountMatches, clErr := ClNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), expectedLogCount, expectedFilters, l, coreLogger, testEnv.ClCluster) + if clErr != nil { + l.Warn(). + Err(clErr). + Msg("Error checking if CL nodes have expected log count. Retrying...") + } + if logCountMatches { + allNodesLogCountMatches = true + break + } + l.Warn(). + Msg("At least one CL node did not have expected log count. Retrying...") + } + + return allNodesLogCountMatches, nil +} diff --git a/integration-tests/utils/cl_node_jobs.go b/integration-tests/utils/cl_node_jobs.go new file mode 100644 index 00000000..9cff9468 --- /dev/null +++ b/integration-tests/utils/cl_node_jobs.go @@ -0,0 +1,129 @@ +package utils + +import ( + "bytes" + "fmt" + "net/url" + "text/template" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/lib/pq" + "gopkg.in/guregu/null.v4" + + coreClient "github.com/goplugin/pluginv3.0/integration-tests/client" + "github.com/goplugin/pluginv3.0/v2/core/services/job" + "github.com/goplugin/pluginv3.0/v2/core/store/models" +) + +func BuildBootstrapSpec(verifierAddr common.Address, chainID int64, feedId [32]byte) *coreClient.OCR2TaskJobSpec { + hash := common.BytesToHash(feedId[:]) + return &coreClient.OCR2TaskJobSpec{ + Name: fmt.Sprintf("bootstrap-%s", uuid.NewString()), + JobType: "bootstrap", + OCR2OracleSpec: job.OCR2OracleSpec{ + ContractID: verifierAddr.String(), + Relay: "evm", + FeedID: &hash, + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + }, + } +} + +func BuildOCRSpec( + verifierAddr common.Address, chainID int64, fromBlock uint64, + feedId [32]byte, bridges []coreClient.BridgeTypeAttributes, + csaPubKey string, msRemoteUrl string, msPubKey string, + nodeOCRKey string, p2pV2Bootstrapper string, allowedFaults int) *coreClient.OCR2TaskJobSpec { + + tmpl, err := template.New("os").Parse(` +{{range $i, $b := .Bridges}} +{{$b.Name}}_payload [type=bridge name="{{$b.Name}}" timeout="50ms" requestData="{}"]; +{{$b.Name}}_median [type=jsonparse path="data,result"]; +{{$b.Name}}_bid [type=jsonparse path="data,result"]; +{{$b.Name}}_ask [type=jsonparse path="data,result"]; + +{{$b.Name}}_median_multiply [type=multiply times=10]; +{{$b.Name}}_bid_multiply [type=multiply times=10]; +{{$b.Name}}_ask_multiply [type=multiply times=10]; +{{end}} + + +{{range $i, $b := .Bridges}} +{{$b.Name}}_payload -> {{$b.Name}}_median -> {{$b.Name}}_median_multiply -> benchmark_price; +{{end}} + +benchmark_price [type=median allowedFaults={{.AllowedFaults}} index=0]; + +{{range $i, $b := .Bridges}} +{{$b.Name}}_payload -> {{$b.Name}}_bid -> {{$b.Name}}_bid_multiply -> bid_price; +{{end}} + +bid_price [type=median allowedFaults={{.AllowedFaults}} index=1]; + +{{range $i, $b := .Bridges}} +{{$b.Name}}_payload -> {{$b.Name}}_ask -> {{$b.Name}}_ask_multiply -> ask_price; +{{end}} + +ask_price [type=median allowedFaults={{.AllowedFaults}} index=2]; + `) + if err != nil { + panic(err) + } + data := struct { + Bridges []coreClient.BridgeTypeAttributes + AllowedFaults int + }{ + Bridges: bridges, + AllowedFaults: allowedFaults, + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + if err != nil { + panic(err) + } + observationSource := buf.String() + + hash := common.BytesToHash(feedId[:]) + return &coreClient.OCR2TaskJobSpec{ + Name: fmt.Sprintf("ocr2-%s", uuid.NewString()), + JobType: "offchainreporting2", + MaxTaskDuration: "1s", + ForwardingAllowed: false, + OCR2OracleSpec: job.OCR2OracleSpec{ + PluginType: "mercury", + PluginConfig: map[string]interface{}{ + "serverURL": fmt.Sprintf("\"%s\"", msRemoteUrl), + "serverPubKey": fmt.Sprintf("\"%s\"", msPubKey), + }, + Relay: "evm", + RelayConfig: map[string]interface{}{ + "chainID": int(chainID), + "fromBlock": fromBlock, + }, + ContractConfigTrackerPollInterval: *models.NewInterval(time.Second * 15), + ContractID: verifierAddr.String(), + FeedID: &hash, + OCRKeyBundleID: null.StringFrom(nodeOCRKey), + TransmitterID: null.StringFrom(csaPubKey), + P2PV2Bootstrappers: pq.StringArray{p2pV2Bootstrapper}, + }, + ObservationSource: observationSource, + } +} + +func BuildBridges(eaUrls []*url.URL) []coreClient.BridgeTypeAttributes { + var bridges []coreClient.BridgeTypeAttributes + for _, url := range eaUrls { + bridges = append(bridges, coreClient.BridgeTypeAttributes{ + Name: fmt.Sprintf("bridge_%s", uuid.NewString()[0:6]), + URL: url.String(), + RequestData: "{}", + }) + } + return bridges +} diff --git a/integration-tests/utils/common.go b/integration-tests/utils/common.go new file mode 100644 index 00000000..db0468a7 --- /dev/null +++ b/integration-tests/utils/common.go @@ -0,0 +1,33 @@ +package utils + +import ( + "math/big" + "net" + + commonconfig "github.com/goplugin/plugin-common/pkg/config" +) + +func MustURL(s string) *commonconfig.URL { + var u commonconfig.URL + if err := u.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return &u +} + +func MustIP(s string) *net.IP { + var ip net.IP + if err := ip.UnmarshalText([]byte(s)); err != nil { + panic(err) + } + return &ip +} + +func BigIntSliceContains(slice []*big.Int, b *big.Int) bool { + for _, a := range slice { + if b.Cmp(a) == 0 { + return true + } + } + return false +} diff --git a/integration-tests/utils/templates/secrets.go b/integration-tests/utils/templates/secrets.go new file mode 100644 index 00000000..74823285 --- /dev/null +++ b/integration-tests/utils/templates/secrets.go @@ -0,0 +1,37 @@ +package templates + +import ( + "github.com/google/uuid" + + "github.com/goplugin/plugin-testing-framework/utils/templates" +) + +// NodeSecretsTemplate are used as text templates because of secret redacted fields of plugin.Secrets +// secret fields can't be marshalled as a plain text +type NodeSecretsTemplate struct { + PgDbName string + PgHost string + PgPort string + PgPassword string + CustomSecrets string +} + +func (c NodeSecretsTemplate) String() (string, error) { + tpl := ` +[Database] +URL = 'postgresql://postgres:{{ .PgPassword }}@{{ .PgHost }}:{{ .PgPort }}/{{ .PgDbName }}?sslmode=disable' # Required + +[Password] +Keystore = '................' # Required + +{{ if .CustomSecrets }} + {{ .CustomSecrets }} +{{ else }} +[Mercury.Credentials.cred1] +URL = 'localhost:1338' +Username = 'node' +Password = 'nodepass' +{{ end }} +` + return templates.MarshalTemplate(c, uuid.NewString(), tpl) +} diff --git a/internal/testdb/testdb.go b/internal/testdb/testdb.go new file mode 100644 index 00000000..bedc97db --- /dev/null +++ b/internal/testdb/testdb.go @@ -0,0 +1,56 @@ +package testdb + +import ( + "database/sql" + "errors" + "fmt" + "net/url" + + "github.com/goplugin/pluginv3.0/v2/core/store/dialects" +) + +const ( + // PristineDBName is a clean copy of test DB with migrations. + PristineDBName = "plugin_test_pristine" + // TestDBNamePrefix is a common prefix that will be auto-removed by the dangling DB cleanup process. + TestDBNamePrefix = "plugin_test_" +) + +// CreateOrReplace creates a database named with a common prefix and the given suffix, and returns the URL. +// If the database already exists, it will be dropped and re-created. +// If withTemplate is true, the pristine DB will be used as a template. +func CreateOrReplace(parsed url.URL, suffix string, withTemplate bool) (string, error) { + if parsed.Path == "" { + return "", errors.New("path missing from database URL") + } + + // Match the naming schema that our dangling DB cleanup methods expect + dbname := TestDBNamePrefix + suffix + if l := len(dbname); l > 63 { + return "", fmt.Errorf("dbname %v too long (%d), max is 63 bytes. Try a shorter suffix", dbname, l) + } + // Cannot drop test database if we are connected to it, so we must connect + // to a different one. 'postgres' should be present on all postgres installations + parsed.Path = "/postgres" + db, err := sql.Open(string(dialects.Postgres), parsed.String()) + if err != nil { + return "", fmt.Errorf("in order to drop the test database, we need to connect to a separate database"+ + " called 'postgres'. But we are unable to open 'postgres' database: %+v\n", err) + } + defer db.Close() + + _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS %s", dbname)) + if err != nil { + return "", fmt.Errorf("unable to drop postgres migrations test database: %v", err) + } + if withTemplate { + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s WITH TEMPLATE %s", dbname, PristineDBName)) + } else { + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbname)) + } + if err != nil { + return "", fmt.Errorf("unable to create postgres test database with name '%s': %v", dbname, err) + } + parsed.Path = fmt.Sprintf("/%s", dbname) + return parsed.String(), nil +} diff --git a/lintconf.yaml b/lintconf.yaml new file mode 100644 index 00000000..ff37371d --- /dev/null +++ b/lintconf.yaml @@ -0,0 +1,46 @@ +--- +# Copied from: +# https://redhat-cop.github.io/ci/linting-testing-helm-charts.html +# with `min-spaces-from-content` changed to be compatible with prettier. +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: + require-starting-space: true + min-spaces-from-content: 1 + document-end: disable + document-start: disable # No --- to start a file + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: whatever # - list indentation will handle both indentation and without + check-multi-line-strings: false + key-duplicates: enable + line-length: disable # Lines can be any length + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: enable + truthy: + level: warning + diff --git a/main.go b/main.go new file mode 100644 index 00000000..e51ac8cf --- /dev/null +++ b/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "os" + + "github.com/goplugin/pluginv3.0/v2/core" +) + +//go:generate make modgraph +func main() { + os.Exit(core.Main()) +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 00000000..f459bd8a --- /dev/null +++ b/main_test.go @@ -0,0 +1,123 @@ +package main + +import ( + "fmt" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/rogpeppe/go-internal/testscript" + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core" + "github.com/goplugin/pluginv3.0/v2/core/config/env" + "github.com/goplugin/pluginv3.0/v2/core/static" + "github.com/goplugin/pluginv3.0/v2/internal/testdb" + "github.com/goplugin/pluginv3.0/v2/tools/txtar" +) + +// special files can be included to allocate additional test resources +const ( + // testDBName triggers initializing of a test database. + // The URL will be set as the value of an env var named by the file. + // + // -- testdb.txt -- + // CL_DATABASE_URL + testDBName = "testdb.txt" + // testPortName triggers injection of a free port as the value of an env var named by the file. + // + // -- testport.txt -- + // PORT + testPortName = "testport.txt" +) + +func TestMain(m *testing.M) { + os.Exit(testscript.RunMain(m, map[string]func() int{ + "plugin": core.Main, + })) +} + +func TestScripts(t *testing.T) { + t.Parallel() + + visitor := txtar.NewDirVisitor("testdata/scripts", txtar.Recurse, func(path string) error { + t.Run(strings.TrimPrefix(path, "testdata/scripts/"), func(t *testing.T) { + t.Parallel() + + testscript.Run(t, testscript.Params{ + Dir: path, + Setup: commonEnv, + ContinueOnError: true, + //UpdateScripts: true, // uncomment to update golden files + }) + }) + return nil + }) + + require.NoError(t, visitor.Walk()) +} + +func commonEnv(te *testscript.Env) error { + te.Setenv("HOME", "$WORK/home") + te.Setenv("VERSION", static.Version) + te.Setenv("COMMIT_SHA", static.Sha) + + b, err := os.ReadFile(filepath.Join(te.WorkDir, testPortName)) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to read file %s: %w", testPortName, err) + } else if err == nil { + envVarName := strings.TrimSpace(string(b)) + te.T().Log("test port requested:", envVarName) + + port, ret, err2 := takeFreePort() + if err2 != nil { + return err2 + } + te.Defer(ret) + + te.Setenv(envVarName, strconv.Itoa(port)) + } + + b, err = os.ReadFile(filepath.Join(te.WorkDir, testDBName)) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to read file %s: %w", testDBName, err) + } else if err == nil { + envVarName := strings.TrimSpace(string(b)) + te.T().Log("test database requested:", envVarName) + + u2, err2 := initDB() + if err2 != nil { + return err2 + } + + te.Setenv(envVarName, u2) + } + return nil +} + +func takeFreePort() (int, func(), error) { + ports, err := freeport.Take(1) + if err != nil { + return 0, nil, fmt.Errorf("failed to get free port: %w", err) + } + return ports[0], func() { freeport.Return(ports) }, nil +} + +func initDB() (string, error) { + u, err := url.Parse(string(env.DatabaseURL.Get())) + if err != nil { + return "", fmt.Errorf("failed to parse url: %w", err) + } + + name := strings.ReplaceAll(uuid.NewString(), "-", "_") + "_test" + u2, err := testdb.CreateOrReplace(*u, name, true) + if err != nil { + return "", fmt.Errorf("failed to create DB: %w", err) + } + return u2, nil +} diff --git a/operator_ui/README.md b/operator_ui/README.md new file mode 100644 index 00000000..e2d3306c --- /dev/null +++ b/operator_ui/README.md @@ -0,0 +1,26 @@ +# Operator UI + +NOTE: If you're looking for the source of operator UI, it has now been moved to https://github.com/goplugin/operator-ui + +This directory instead now as a collection of scripts for maintaining the version of operator UI to pull in when developing and building the plugin node. + +## About + +This package is responsible for rendering the UI of the plugin node, which allows interactions with node jobs, jobs runs, configuration and any other related tasks. + +## Installation + +### Requirements + +The `install.sh` script handles installing the specified tag of operator UI within the [tag file](./TAG). When executed, it downloads then moves the static assets of operator UI into the `core/web/assets` path. Then, when the plugin binary is built, these assets are included into the build that gets served. + +## Updates + +### Requirements + +- gh cli ^2.15.0 https://github.com/cli/cli/releases/tag/v2.15.0 +- jq ^1.6 https://stedolan.github.io/jq/ + +The `update.sh` script will check for the latest release from the `goplugin/operator-ui` repository, if the latest release is newer than the current tag, it'll update the [tag file](./TAG) with the corresponding latest tag. Checking for updates is automatically [handled by CI](../.github/workflows/operator-ui.yml), where any new detected updates will be pushed to a branch and have a PR opened against `develop`. + +See https://docs.github.com/en/rest/releases/releases#get-the-latest-release for how a latest release is determined. diff --git a/operator_ui/TAG b/operator_ui/TAG new file mode 100644 index 00000000..f5e57863 --- /dev/null +++ b/operator_ui/TAG @@ -0,0 +1 @@ +v0.8.0-8da47c3 diff --git a/operator_ui/check.sh b/operator_ui/check.sh new file mode 100644 index 00000000..a5a2840d --- /dev/null +++ b/operator_ui/check.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -e + +# Dependencies: +# gh cli ^2.15.0 https://github.com/cli/cli/releases/tag/v2.15.0 +# jq ^1.6 https://stedolan.github.io/jq/ + +repo=goplugin/operator-ui +gitRoot=$(git rev-parse --show-toplevel) +cd "$gitRoot/operator_ui" + +tag_file=TAG +current_tag=$(cat $tag_file) +echo "Currently pinned tag for $repo is $current_tag" + +echo "Getting latest release for tag for $repo" +release=$(gh release view -R $repo --json 'tagName,body') +latest_tag=$(echo "$release" | jq -r '.tagName') +body=$(echo "$release" | jq -r '.body') + +if [ "$current_tag" = "$latest_tag" ]; then + echo "Tag $current_tag is up to date." + exit 0 +else + echo "Tag $current_tag is out of date, updating $tag_file file to latest version..." + echo "$latest_tag" >"$tag_file" + echo "Tag updated $current_tag -> $latest_tag" + if [ "$CI" ]; then + echo "current_tag=$current_tag" >>$GITHUB_OUTPUT + echo "latest_tag=$latest_tag" >>$GITHUB_OUTPUT + + # See https://github.com/orgs/community/discussions/26288#discussioncomment-3876281 + delimiter="$(openssl rand -hex 8)" + echo "body<<${delimiter}" >>"${GITHUB_OUTPUT}" + echo "$body" >>"${GITHUB_OUTPUT}" + echo "${delimiter}" >>"${GITHUB_OUTPUT}" + fi +fi diff --git a/operator_ui/install.sh b/operator_ui/install.sh new file mode 100644 index 00000000..4c92ad73 --- /dev/null +++ b/operator_ui/install.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +set -e + +owner=goplugin +repo=operator-ui +fullRepo=${owner}/${repo} +gitRoot=$(git rev-parse --show-toplevel || pwd) +cd "$gitRoot/operator_ui" +unpack_dir="$gitRoot/core/web/assets" +tag=$(cat TAG) +# Remove the version prefix "v" +strippedTag="${tag:1}" +# Taken from https://github.com/kennyp/asdf-golang/blob/master/lib/helpers.sh +msg() { + echo -e "\033[32m$1\033[39m" >&2 +} + +err() { + echo -e "\033[31m$1\033[39m" >&2 +} + +fail() { + err "$1" + exit 1 +} + +msg "Getting release $tag for $fullRepo" +# https://docs.github.com/en/rest/releases/releases#get-a-release-by-tag-name +asset_name=${owner}-${repo}-${strippedTag}.tgz +download_url=https://github.com/${fullRepo}/releases/download/${tag}/${asset_name} + +# Inspired from https://github.com/kennyp/asdf-golang/blob/master/bin/download#L29 +msg "Download URL: ${download_url}" +# Check if we're able to download first +http_code=$(curl -LIs -w '%{http_code}' -o /dev/null "$download_url") +if [ "$http_code" -eq 404 ] || [ "$http_code" -eq 403 ]; then + fail "URL: ${download_url} returned status ${http_code}" +fi +# Then go ahead if we get a success code +msg "Downloading ${fullRepo}:${tag} asset: $asset_name..." +msg "" +curl -L -o "$asset_name" "$download_url" + +msg "Unpacking asset $asset_name" +tar -xvzf "$asset_name" + +msg "" +msg "Removing old contents of $unpack_dir" +rm -rf "$unpack_dir" +msg "Copying contents of package/artifacts to $unpack_dir" +cp -rf package/artifacts/. "$unpack_dir" || true + +msg "Cleaning up" +rm -r package +rm "$asset_name" diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 00000000..42798d5d --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,54 @@ +# LOOP Plugins + +:warning: Experimental :warning: + +This directory supports Local-Out-Of-Process (LOOP) Plugins, an alternative node runtime where some systems execute in +separate processes, plug-in via [github.com/hashicorp/go-plugin](https://github.com/hashicorp/go-plugin), and +communicate via [GRPC](https://grpc.io). + +There are currently two kinds of plugins: Relayer plugins, and a Median product plugin. The [cmd](cmd) directory contains +some `package main`s while we transition, and they can be built via `make install-`. Solana & Starknet has been +moved to their respective repos, and all must be moved out of this module eventually. + +## How to use + +[plugin.Dockerfile](plugin.Dockerfile) extends the regular [core/plugin.Dockerfile](../core/plugin.Dockerfile) +to include the plugin binaries, and enables support by setting `CL_SOLANA_CMD`, `CL_STARKNET_CMD`, and `CL_MEDIAN_CMD`. +Either plugin can be disabled by un-setting the environment variable, which will revert to the original in-process runtime. +Images built from this Dockerfile can otherwise be used normally, provided that the [pre-requisites](#pre-requisites) have been met. + +### Pre-requisites + +#### Timeouts + +LOOPPs communicate over GRPC, which always includes a `context.Context` and requires realistic timeouts. Placeholder/dummy +values (e.g. `MaxDurationQuery = 0`) will not work and must be updated to realistic values. In lieu of reconfiguring already +deployed contracts on Solana, the environment variable `CL_MIN_OCR2_MAX_DURATION_QUERY` can be set establish a new minimum +via libocr's [LocalConfig.MinOCR2MaxDurationQuery](https://pkg.go.dev/github.com/goplugin/libocr/offchainreporting2plus/types#LocalConfig). +If left unset, the default value is `100ms`. + +#### Prometheus + + +LOOPPs are dynamic, and so must be monitoring. +We use Plugin discovery to dynamically determine what to monitor based on what plugins are running +and we route external prom scraping to the plugins without exposing them directly + +The endpoints are + +`/discovery` : HTTP Service Discovery [https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config] +Prometheus server is configured to poll this url to discover new endpoints to monitor. The node serves the response based on what plugins are running, + +`/plugins//metrics`: The node acts as very thin middleware to route from Prometheus server scrape requests to individual plugin /metrics endpoint +Once a plugin is discovered via the discovery mechanism above, the Prometheus service calls the target endpoint at the scrape interval +The node acts as middleware to route the request to the /metrics endpoint of the requested plugin + +The simplest change to monitor LOOPPs is to add a service discovery to the scrape configuration +- job_name: 'plugin_node' + ... ++ http_sd_configs: ++ - url: "http://127.0.0.1:6688/discovery" ++ refresh_interval: 30s + + +See the Prometheus documentation for full details [https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config] \ No newline at end of file diff --git a/plugins/cmd.go b/plugins/cmd.go new file mode 100644 index 00000000..9a312c53 --- /dev/null +++ b/plugins/cmd.go @@ -0,0 +1,27 @@ +package plugins + +import ( + "fmt" + "os/exec" +) + +// CmdConfig is configuration used to register the LOOP and generate an exec +type CmdConfig struct { + ID string // unique string used by the node to track the LOOP. typically supplied by the loop logger name + Cmd string // string value of executable to exec + Env []string // environment variables as described in [exec.Cmd.Env] +} + +// NewCmdFactory is helper to ensure synchronization between the loop registry and os cmd to exec the LOOP +func NewCmdFactory(register func(id string) (*RegisteredLoop, error), lcfg CmdConfig) (func() *exec.Cmd, error) { + registeredLoop, err := register(lcfg.ID) + if err != nil { + return nil, fmt.Errorf("failed to register %s LOOP plugin: %w", lcfg.ID, err) + } + return func() *exec.Cmd { + cmd := exec.Command(lcfg.Cmd) //#nosec G204 -- we control the value of the cmd so the lint/sec error is a false positive + cmd.Env = append(cmd.Env, lcfg.Env...) + cmd.Env = append(cmd.Env, registeredLoop.EnvCfg.AsCmdEnv()...) + return cmd + }, nil +} diff --git a/plugins/cmd/plugin-medianpoc/main.go b/plugins/cmd/plugin-medianpoc/main.go new file mode 100644 index 00000000..408779c0 --- /dev/null +++ b/plugins/cmd/plugin-medianpoc/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "github.com/hashicorp/go-plugin" + + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/loop/reportingplugins" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/plugins/medianpoc" +) + +const ( + loggerName = "PluginMedianPoc" +) + +func main() { + s := loop.MustNewStartedServer(loggerName) + defer s.Stop() + + p := medianpoc.NewPlugin(s.Logger) + defer s.Logger.ErrorIfFn(p.Close, "Failed to close") + + s.MustRegister(p) + + stop := make(chan struct{}) + defer close(stop) + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: reportingplugins.ReportingPluginHandshakeConfig(), + Plugins: map[string]plugin.Plugin{ + reportingplugins.PluginServiceName: &reportingplugins.GRPCService[types.MedianProvider]{ + PluginServer: p, + BrokerConfig: loop.BrokerConfig{ + Logger: s.Logger, + StopCh: stop, + GRPCOpts: s.GRPCOpts, + }, + }, + }, + GRPCServer: s.GRPCOpts.NewServer, + }) +} diff --git a/plugins/cmd/plugin-ocr3-capability/main.go b/plugins/cmd/plugin-ocr3-capability/main.go new file mode 100644 index 00000000..7e43904d --- /dev/null +++ b/plugins/cmd/plugin-ocr3-capability/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "github.com/hashicorp/go-plugin" + + "github.com/goplugin/plugin-common/pkg/capabilities/consensus/ocr3" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/loop/reportingplugins" + ocr3rp "github.com/goplugin/plugin-common/pkg/loop/reportingplugins/ocr3" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/services/relay/evm" +) + +const ( + loggerName = "PluginOCR3Capability" +) + +func main() { + s := loop.MustNewStartedServer(loggerName) + defer s.Stop() + + p := ocr3.NewOCR3(s.Logger, evm.NewEVMEncoder) + defer s.Logger.ErrorIfFn(p.Close, "Failed to close") + + s.MustRegister(p) + + stop := make(chan struct{}) + defer close(stop) + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: reportingplugins.ReportingPluginHandshakeConfig(), + Plugins: map[string]plugin.Plugin{ + ocr3rp.PluginServiceName: &ocr3rp.GRPCService[types.PluginProvider]{ + PluginServer: p, + BrokerConfig: loop.BrokerConfig{ + Logger: s.Logger, + StopCh: stop, + GRPCOpts: s.GRPCOpts, + }, + }, + }, + GRPCServer: s.GRPCOpts.NewServer, + }) +} diff --git a/plugins/cmd_test.go b/plugins/cmd_test.go new file mode 100644 index 00000000..7c34b768 --- /dev/null +++ b/plugins/cmd_test.go @@ -0,0 +1,51 @@ +package plugins + +import ( + "fmt" + "strings" + "testing" + + "github.com/goplugin/plugin-common/pkg/loop" +) + +func TestNewCmdFactory_RegisterSuccess(t *testing.T) { + mockRegister := func(id string) (*RegisteredLoop, error) { + return &RegisteredLoop{EnvCfg: loop.EnvConfig{}}, nil + } + + cmdConfig := CmdConfig{ + ID: "test-loop", + Cmd: "echo", + Env: []string{"TEST_ENV=1"}, + } + + cmdFactory, err := NewCmdFactory(mockRegister, cmdConfig) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + cmd := cmdFactory() + if cmd.Args[0] != "echo" { + t.Errorf("Expected command 'echo', got %s", cmd.Args[0]) + } +} + +func TestNewCmdFactory_RegisterFail(t *testing.T) { + mockRegister := func(id string) (*RegisteredLoop, error) { + return nil, fmt.Errorf("registration failed") + } + + cmdConfig := CmdConfig{ + ID: "test-loop", + Cmd: "echo", + Env: []string{"TEST_ENV=1"}, + } + + _, err := NewCmdFactory(mockRegister, cmdConfig) + if err == nil { + t.Fatal("Expected error, got nil") + } + if !strings.Contains(err.Error(), "failed to register") { + t.Errorf("Unexpected error message: %v", err) + } +} diff --git a/plugins/env.go b/plugins/env.go new file mode 100644 index 00000000..016a4e86 --- /dev/null +++ b/plugins/env.go @@ -0,0 +1,31 @@ +package plugins + +import ( + "os" + + "github.com/hashicorp/go-envparse" +) + +// ParseEnvFile returns a slice of key/value pairs parsed from the file at filepath. +// As a special case, empty filepath returns nil without error. +func ParseEnvFile(filepath string) ([]string, error) { + if filepath == "" { + return nil, nil + } + f, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer func() { + _ = f.Close() + }() + m, err := envparse.Parse(f) + if err != nil { + return nil, err + } + r := make([]string, 0, len(m)) + for k, v := range m { + r = append(r, k+"="+v) + } + return r, nil +} diff --git a/plugins/env_test.go b/plugins/env_test.go new file mode 100644 index 00000000..6dd171b6 --- /dev/null +++ b/plugins/env_test.go @@ -0,0 +1,25 @@ +package plugins + +import ( + _ "embed" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseEnvFile(t *testing.T) { + t.Run("valid", func(t *testing.T) { + got, err := ParseEnvFile("testdata/valid.env") + require.NoError(t, err) + require.Equal(t, []string{"GOMEMLIMIT=1MiB"}, got) + }) + t.Run("invalid", func(t *testing.T) { + _, err := ParseEnvFile("testdata/invalid.env") + require.Error(t, err) + }) + t.Run("missing", func(t *testing.T) { + _, err := ParseEnvFile("testdata/missing.env") + require.ErrorIs(t, err, os.ErrNotExist) + }) +} diff --git a/plugins/loop_registry.go b/plugins/loop_registry.go new file mode 100644 index 00000000..44fec877 --- /dev/null +++ b/plugins/loop_registry.go @@ -0,0 +1,95 @@ +package plugins + +import ( + "errors" + "fmt" + "sort" + "sync" + + "github.com/hashicorp/consul/sdk/freeport" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/loop" + + "github.com/goplugin/pluginv3.0/v2/core/config" +) + +var ErrExists = errors.New("plugin already registered") + +type RegisteredLoop struct { + Name string + EnvCfg loop.EnvConfig +} + +// LoopRegistry is responsible for assigning ports to plugins that are to be used for the +// plugin's prometheus HTTP server, and for passing the tracing configuration to the plugin. +type LoopRegistry struct { + mu sync.Mutex + registry map[string]*RegisteredLoop + + lggr logger.Logger + cfgTracing config.Tracing +} + +func NewLoopRegistry(lggr logger.Logger, tracingConfig config.Tracing) *LoopRegistry { + return &LoopRegistry{ + registry: map[string]*RegisteredLoop{}, + lggr: logger.Named(lggr, "LoopRegistry"), + cfgTracing: tracingConfig, + } +} + +// Register creates a port of the plugin. It is not idempotent. Duplicate calls to Register will return [ErrExists] +// Safe for concurrent use. +func (m *LoopRegistry) Register(id string) (*RegisteredLoop, error) { + m.mu.Lock() + defer m.mu.Unlock() + + if _, exists := m.registry[id]; exists { + return nil, ErrExists + } + ports, err := freeport.Take(1) + if err != nil { + return nil, fmt.Errorf("failed to get free port: %v", err) + } + if len(ports) != 1 { + return nil, fmt.Errorf("failed to get free port: no ports returned") + } + envCfg := loop.EnvConfig{PrometheusPort: ports[0]} + + if m.cfgTracing != nil { + envCfg.TracingEnabled = m.cfgTracing.Enabled() + envCfg.TracingCollectorTarget = m.cfgTracing.CollectorTarget() + envCfg.TracingSamplingRatio = m.cfgTracing.SamplingRatio() + envCfg.TracingTLSCertPath = m.cfgTracing.TLSCertPath() + envCfg.TracingAttributes = m.cfgTracing.Attributes() + } + + m.registry[id] = &RegisteredLoop{Name: id, EnvCfg: envCfg} + m.lggr.Debugf("Registered loopp %q with config %v, port %d", id, envCfg, envCfg.PrometheusPort) + return m.registry[id], nil +} + +// Return slice sorted by plugin name. Safe for concurrent use. +func (m *LoopRegistry) List() []*RegisteredLoop { + var registeredLoops []*RegisteredLoop + m.mu.Lock() + for _, known := range m.registry { + registeredLoops = append(registeredLoops, known) + } + m.mu.Unlock() + + sort.Slice(registeredLoops, func(i, j int) bool { + return registeredLoops[i].Name < registeredLoops[j].Name + }) + return registeredLoops +} + +// Get plugin by id. Safe for concurrent use. +func (m *LoopRegistry) Get(id string) (*RegisteredLoop, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + p, exists := m.registry[id] + return p, exists +} diff --git a/plugins/loop_registry_test.go b/plugins/loop_registry_test.go new file mode 100644 index 00000000..3aa125b7 --- /dev/null +++ b/plugins/loop_registry_test.go @@ -0,0 +1,62 @@ +package plugins + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +func TestPluginPortManager(t *testing.T) { + // register one + m := NewLoopRegistry(logger.TestLogger(t), nil) + pFoo, err := m.Register("foo") + require.NoError(t, err) + require.Equal(t, "foo", pFoo.Name) + require.Greater(t, pFoo.EnvCfg.PrometheusPort, 0) + // test duplicate + pNil, err := m.Register("foo") + require.ErrorIs(t, err, ErrExists) + require.Nil(t, pNil) + // ensure increasing port assignment + pBar, err := m.Register("bar") + require.NoError(t, err) + require.Equal(t, "bar", pBar.Name) + require.Equal(t, pFoo.EnvCfg.PrometheusPort+1, pBar.EnvCfg.PrometheusPort) +} + +// Mock tracing config +type MockCfgTracing struct{} + +func (m *MockCfgTracing) Attributes() map[string]string { + return map[string]string{"attribute": "value"} +} +func (m *MockCfgTracing) Enabled() bool { return true } +func (m *MockCfgTracing) NodeID() string { return "" } +func (m *MockCfgTracing) CollectorTarget() string { return "http://localhost:9000" } +func (m *MockCfgTracing) SamplingRatio() float64 { return 0.1 } +func (m *MockCfgTracing) TLSCertPath() string { return "/path/to/cert.pem" } +func (m *MockCfgTracing) Mode() string { return "tls" } + +func TestLoopRegistry_Register(t *testing.T) { + mockCfgTracing := &MockCfgTracing{} + registry := make(map[string]*RegisteredLoop) + + // Create a LoopRegistry instance with mockCfgTracing + loopRegistry := &LoopRegistry{ + lggr: logger.TestLogger(t), + registry: registry, + cfgTracing: mockCfgTracing, + } + + // Test case 1: Register new loop + registeredLoop, err := loopRegistry.Register("testID") + require.Nil(t, err) + require.Equal(t, "testID", registeredLoop.Name) + require.True(t, registeredLoop.EnvCfg.TracingEnabled) + require.Equal(t, "http://localhost:9000", registeredLoop.EnvCfg.TracingCollectorTarget) + require.Equal(t, map[string]string{"attribute": "value"}, registeredLoop.EnvCfg.TracingAttributes) + require.Equal(t, 0.1, registeredLoop.EnvCfg.TracingSamplingRatio) + require.Equal(t, "/path/to/cert.pem", registeredLoop.EnvCfg.TracingTLSCertPath) +} diff --git a/plugins/medianpoc/data_source.go b/plugins/medianpoc/data_source.go new file mode 100644 index 00000000..2c422117 --- /dev/null +++ b/plugins/medianpoc/data_source.go @@ -0,0 +1,79 @@ +package medianpoc + +import ( + "context" + "errors" + "fmt" + "math/big" + "sync" + "time" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/pluginv3.0/v2/core/bridges" + "github.com/goplugin/pluginv3.0/v2/core/utils" +) + +type DataSource struct { + pipelineRunner types.PipelineRunnerService + spec string + lggr logger.Logger + + current bridges.BridgeMetaData + mu sync.RWMutex +} + +func (d *DataSource) Observe(ctx context.Context, reportTimestamp ocrtypes.ReportTimestamp) (*big.Int, error) { + md, err := bridges.MarshalBridgeMetaData(d.currentAnswer()) + if err != nil { + d.lggr.Warnw("unable to attach metadata for run", "err", err) + } + + // NOTE: job metadata is automatically attached by the pipeline runner service + vars := types.Vars{ + Vars: map[string]interface{}{ + "jobRun": md, + }, + } + + results, err := d.pipelineRunner.ExecuteRun(ctx, d.spec, vars, types.Options{}) + if err != nil { + return nil, err + } + + finalResults := results.FinalResults() + if len(finalResults) == 0 { + return nil, errors.New("pipeline execution failed: not enough results") + } + + finalResult := finalResults[0] + if finalResult.Error != nil { + return nil, fmt.Errorf("pipeline execution failed: %w", finalResult.Error) + } + + asDecimal, err := utils.ToDecimal(finalResult.Value) + if err != nil { + return nil, errors.New("cannot convert observation to decimal") + } + + resultAsBigInt := asDecimal.BigInt() + d.updateAnswer(resultAsBigInt) + return resultAsBigInt, nil +} + +func (d *DataSource) currentAnswer() (*big.Int, *big.Int) { + d.mu.RLock() + defer d.mu.RUnlock() + return d.current.LatestAnswer, d.current.UpdatedAt +} + +func (d *DataSource) updateAnswer(latestAnswer *big.Int) { + d.mu.Lock() + defer d.mu.Unlock() + d.current = bridges.BridgeMetaData{ + LatestAnswer: latestAnswer, + UpdatedAt: big.NewInt(time.Now().Unix()), + } +} diff --git a/plugins/medianpoc/data_source_test.go b/plugins/medianpoc/data_source_test.go new file mode 100644 index 00000000..7f91631a --- /dev/null +++ b/plugins/medianpoc/data_source_test.go @@ -0,0 +1,117 @@ +package medianpoc + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/utils/tests" + "github.com/goplugin/pluginv3.0/v2/core/logger" + + "github.com/goplugin/plugin-common/pkg/types" +) + +type mockPipelineRunner struct { + results types.TaskResults + err error + spec string + vars types.Vars + options types.Options +} + +func (m *mockPipelineRunner) ExecuteRun(ctx context.Context, spec string, vars types.Vars, options types.Options) (types.TaskResults, error) { + m.spec = spec + m.vars = vars + m.options = options + return m.results, m.err +} + +func TestDataSource(t *testing.T) { + lggr := logger.TestLogger(t) + expect := int64(3) + pr := &mockPipelineRunner{ + results: types.TaskResults{ + { + TaskValue: types.TaskValue{ + Value: expect, + Error: nil, + IsTerminal: true, + }, + Index: 2, + }, + { + TaskValue: types.TaskValue{ + Value: int(4), + Error: nil, + IsTerminal: false, + }, + Index: 1, + }, + }, + } + spec := "SPEC" + ds := &DataSource{ + pipelineRunner: pr, + spec: spec, + lggr: lggr, + } + res, err := ds.Observe(tests.Context(t), ocrtypes.ReportTimestamp{}) + require.NoError(t, err) + assert.Equal(t, big.NewInt(expect), res) + assert.Equal(t, spec, pr.spec) + assert.Equal(t, big.NewInt(expect), ds.current.LatestAnswer) +} + +func TestDataSource_ResultErrors(t *testing.T) { + lggr := logger.TestLogger(t) + pr := &mockPipelineRunner{ + results: types.TaskResults{ + { + TaskValue: types.TaskValue{ + Error: errors.New("something went wrong"), + IsTerminal: true, + }, + Index: 0, + }, + }, + } + spec := "SPEC" + ds := &DataSource{ + pipelineRunner: pr, + spec: spec, + lggr: lggr, + } + _, err := ds.Observe(tests.Context(t), ocrtypes.ReportTimestamp{}) + assert.ErrorContains(t, err, "something went wrong") +} + +func TestDataSource_ResultNotAnInt(t *testing.T) { + lggr := logger.TestLogger(t) + + expect := "string-result" + pr := &mockPipelineRunner{ + results: types.TaskResults{ + { + TaskValue: types.TaskValue{ + Value: expect, + IsTerminal: true, + }, + Index: 0, + }, + }, + } + spec := "SPEC" + ds := &DataSource{ + pipelineRunner: pr, + spec: spec, + lggr: lggr, + } + _, err := ds.Observe(tests.Context(t), ocrtypes.ReportTimestamp{}) + assert.ErrorContains(t, err, "cannot convert observation to decimal") +} diff --git a/plugins/medianpoc/plugin.go b/plugins/medianpoc/plugin.go new file mode 100644 index 00000000..dedd7b0e --- /dev/null +++ b/plugins/medianpoc/plugin.go @@ -0,0 +1,131 @@ +package medianpoc + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/logger" + "github.com/goplugin/plugin-common/pkg/loop" + "github.com/goplugin/plugin-common/pkg/loop/reportingplugins" + "github.com/goplugin/plugin-common/pkg/services" + "github.com/goplugin/plugin-common/pkg/types" +) + +func NewPlugin(lggr logger.Logger) *Plugin { + return &Plugin{ + Plugin: loop.Plugin{Logger: lggr}, + MedianProviderServer: reportingplugins.MedianProviderServer{}, + stop: make(services.StopChan), + } +} + +type Plugin struct { + loop.Plugin + stop services.StopChan + reportingplugins.MedianProviderServer +} + +type pipelineSpec struct { + Name string `json:"name"` + Spec string `json:"spec"` +} + +type jsonConfig struct { + Pipelines []pipelineSpec `json:"pipelines"` +} + +func (j jsonConfig) defaultPipeline() (string, error) { + return j.getPipeline("__DEFAULT_PIPELINE__") +} + +func (j jsonConfig) getPipeline(key string) (string, error) { + for _, v := range j.Pipelines { + if v.Name == key { + return v.Spec, nil + } + } + return "", fmt.Errorf("no pipeline found for %s", key) +} + +func (p *Plugin) NewReportingPluginFactory( + ctx context.Context, + config types.ReportingPluginServiceConfig, + provider types.MedianProvider, + pipelineRunner types.PipelineRunnerService, + telemetry types.TelemetryClient, + errorLog types.ErrorLog, +) (types.ReportingPluginFactory, error) { + f, err := p.newFactory(ctx, config, provider, pipelineRunner, telemetry, errorLog) + if err != nil { + return nil, err + } + s := &reportingPluginFactoryService{lggr: p.Logger, ReportingPluginFactory: f} + p.SubService(s) + return s, nil +} + +func (p *Plugin) newFactory(ctx context.Context, config types.ReportingPluginServiceConfig, provider types.MedianProvider, pipelineRunner types.PipelineRunnerService, telemetry types.TelemetryClient, errorLog types.ErrorLog) (*median.NumericalMedianFactory, error) { + jc := &jsonConfig{} + err := json.Unmarshal([]byte(config.PluginConfig), jc) + if err != nil { + return nil, err + } + + dp, err := jc.defaultPipeline() + if err != nil { + return nil, err + } + ds := &DataSource{ + pipelineRunner: pipelineRunner, + spec: dp, + lggr: p.Logger, + } + + jfp, err := jc.getPipeline("juelsPerFeeCoinPipeline") + if err != nil { + return nil, err + } + jds := &DataSource{ + pipelineRunner: pipelineRunner, + spec: jfp, + lggr: p.Logger, + } + factory := &median.NumericalMedianFactory{ + ContractTransmitter: provider.MedianContract(), + DataSource: ds, + JuelsPerFeeCoinDataSource: jds, + Logger: logger.NewOCRWrapper( + p.Logger, + true, + func(msg string) {}, + ), + OnchainConfigCodec: provider.OnchainConfigCodec(), + ReportCodec: provider.ReportCodec(), + } + return factory, nil +} + +type reportingPluginFactoryService struct { + services.StateMachine + lggr logger.Logger + ocrtypes.ReportingPluginFactory +} + +func (r *reportingPluginFactoryService) Name() string { return r.lggr.Name() } + +func (r *reportingPluginFactoryService) Start(ctx context.Context) error { + return r.StartOnce("ReportingPluginFactory", func() error { return nil }) +} + +func (r *reportingPluginFactoryService) Close() error { + return r.StopOnce("ReportingPluginFactory", func() error { return nil }) +} + +func (r *reportingPluginFactoryService) HealthReport() map[string]error { + return map[string]error{r.Name(): r.Healthy()} +} diff --git a/plugins/medianpoc/plugin_test.go b/plugins/medianpoc/plugin_test.go new file mode 100644 index 00000000..bc65ec27 --- /dev/null +++ b/plugins/medianpoc/plugin_test.go @@ -0,0 +1,113 @@ +package medianpoc + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/libocr/offchainreporting2/reportingplugin/median" + ocrtypes "github.com/goplugin/libocr/offchainreporting2plus/types" + + "github.com/goplugin/plugin-common/pkg/types" + "github.com/goplugin/plugin-common/pkg/utils/tests" + + "github.com/goplugin/pluginv3.0/v2/core/logger" +) + +type mockErrorLog struct { + types.ErrorLog +} + +type mockOffchainConfigDigester struct { + ocrtypes.OffchainConfigDigester +} + +type mockContractTransmitter struct { + ocrtypes.ContractTransmitter +} + +type mockContractConfigTracker struct { + ocrtypes.ContractConfigTracker +} + +type mockReportCodec struct { + median.ReportCodec +} + +type mockMedianContract struct { + median.MedianContract +} + +type mockOnchainConfigCodec struct { + median.OnchainConfigCodec +} + +type provider struct { + types.Service +} + +func (p provider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return mockOffchainConfigDigester{} +} + +func (p provider) ContractTransmitter() ocrtypes.ContractTransmitter { + return mockContractTransmitter{} +} + +func (p provider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return mockContractConfigTracker{} +} + +func (p provider) ReportCodec() median.ReportCodec { + return mockReportCodec{} +} + +func (p provider) MedianContract() median.MedianContract { + return mockMedianContract{} +} + +func (p provider) OnchainConfigCodec() median.OnchainConfigCodec { + return mockOnchainConfigCodec{} +} + +func (p provider) ChainReader() types.ChainReader { + return nil +} + +func (p provider) Codec() types.Codec { + return nil +} + +func TestNewPlugin(t *testing.T) { + lggr := logger.TestLogger(t) + p := NewPlugin(lggr) + + defaultSpec := "default-spec" + juelsPerFeeCoinSpec := "jpfc-spec" + config := types.ReportingPluginServiceConfig{ + PluginConfig: fmt.Sprintf( + `{"pipelines": [{"name": "__DEFAULT_PIPELINE__", "spec": "%s"},{"name": "juelsPerFeeCoinPipeline", "spec": "%s"}]}`, + defaultSpec, + juelsPerFeeCoinSpec, + ), + } + pr := &mockPipelineRunner{} + prov := provider{} + + f, err := p.newFactory( + tests.Context(t), + config, + prov, + pr, + nil, + mockErrorLog{}, + ) + require.NoError(t, err) + + ds := f.DataSource.(*DataSource) + assert.Equal(t, defaultSpec, ds.spec) + jpfcDs := f.JuelsPerFeeCoinDataSource.(*DataSource) + assert.Equal(t, juelsPerFeeCoinSpec, jpfcDs.spec) +} diff --git a/plugins/plugin.Dockerfile b/plugins/plugin.Dockerfile new file mode 100644 index 00000000..181fc2bd --- /dev/null +++ b/plugins/plugin.Dockerfile @@ -0,0 +1,97 @@ +# Build image: Plugin binary +FROM golang:1.21-bullseye as buildgo +RUN go version +WORKDIR /plugin + +COPY GNUmakefile VERSION ./ +COPY tools/bin/ldflags ./tools/bin/ + +ADD go.mod go.sum ./ +RUN go mod download + +# Env vars needed for plugin build +ARG COMMIT_SHA + +COPY . . + +# Build the golang binaries +RUN make install-plugin + +# Install medianpoc binary +RUN make install-medianpoc + +# Install ocr3-capability binary +RUN make install-ocr3-capability + +# Link LOOP Plugin source dirs with simple names +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-feeds | xargs -I % ln -s % /plugin-feeds +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-data-streams | xargs -I % ln -s % /plugin-data-streams +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-solana | xargs -I % ln -s % /plugin-solana +RUN mkdir /plugin-starknet +RUN go list -m -f "{{.Dir}}" github.com/goplugin/plugin-starknet/relayer | xargs -I % ln -s % /plugin-starknet/relayer + +# Build image: Plugins +FROM golang:1.21-bullseye as buildplugins +RUN go version + +WORKDIR /plugin-feeds +COPY --from=buildgo /plugin-feeds . +RUN go install ./cmd/plugin-feeds + +WORKDIR /plugin-data-streams +COPY --from=buildgo /plugin-data-streams . +RUN go install ./mercury/cmd/plugin-mercury + +WORKDIR /plugin-solana +COPY --from=buildgo /plugin-solana . +RUN go install ./pkg/solana/cmd/plugin-solana + +WORKDIR /plugin-starknet/relayer +COPY --from=buildgo /plugin-starknet/relayer . +RUN go install ./pkg/plugin/cmd/plugin-starknet + +# Final image: ubuntu with plugin binary +FROM ubuntu:20.04 + +ARG PLUGIN_USER=root +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update && apt-get install -y ca-certificates gnupg lsb-release curl + +# Install Postgres for CLI tools, needed specifically for DB backups +RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |tee /etc/apt/sources.list.d/pgdg.list \ + && apt-get update && apt-get install -y postgresql-client-14 \ + && apt-get clean all + +COPY --from=buildgo /go/bin/plugin /usr/local/bin/ +COPY --from=buildgo /go/bin/plugin-medianpoc /usr/local/bin/ +COPY --from=buildgo /go/bin/plugin-ocr3-capability /usr/local/bin/ + +COPY --from=buildplugins /go/bin/plugin-feeds /usr/local/bin/ +ENV CL_MEDIAN_CMD plugin-feeds +COPY --from=buildplugins /go/bin/plugin-mercury /usr/local/bin/ +ENV CL_MERCURY_CMD plugin-mercury +COPY --from=buildplugins /go/bin/plugin-solana /usr/local/bin/ +ENV CL_SOLANA_CMD plugin-solana +COPY --from=buildplugins /go/bin/plugin-starknet /usr/local/bin/ +ENV CL_STARKNET_CMD plugin-starknet + +# Dependency of CosmWasm/wasmd +COPY --from=buildgo /go/pkg/mod/github.com/\!cosm\!wasm/wasmvm@v*/internal/api/libwasmvm.*.so /usr/lib/ +RUN chmod 755 /usr/lib/libwasmvm.*.so + +RUN if [ ${PLUGIN_USER} != root ]; then \ + useradd --uid 14933 --create-home ${PLUGIN_USER}; \ + fi +USER ${PLUGIN_USER} +WORKDIR /home/${PLUGIN_USER} +# explicit set the cache dir. needed so both root and non-root user has an explicit location +ENV XDG_CACHE_HOME /home/${PLUGIN_USER}/.cache +RUN mkdir -p ${XDG_CACHE_HOME} + +EXPOSE 6688 +ENTRYPOINT ["plugin"] + +HEALTHCHECK CMD curl -f http://localhost:6688/health || exit 1 + +CMD ["local", "node"] diff --git a/plugins/registrar.go b/plugins/registrar.go new file mode 100644 index 00000000..134f640b --- /dev/null +++ b/plugins/registrar.go @@ -0,0 +1,36 @@ +package plugins + +import ( + "os/exec" + + "github.com/goplugin/plugin-common/pkg/loop" +) + +// RegistrarConfig generates contains static configuration inher +type RegistrarConfig interface { + RegisterLOOP(config CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) +} + +type registarConfig struct { + grpcOpts loop.GRPCOpts + loopRegistrationFn func(loopId string) (*RegisteredLoop, error) +} + +// NewRegistrarConfig creates a RegistarConfig +// loopRegistrationFn must act as a global registry function of LOOPs and must be idempotent. +// The [func() *exec.Cmd] for a LOOP should be generated by calling [RegistrarConfig.RegisterLOOP] +func NewRegistrarConfig(grpcOpts loop.GRPCOpts, loopRegistrationFn func(loopId string) (*RegisteredLoop, error)) RegistrarConfig { + return ®istarConfig{ + grpcOpts: grpcOpts, + loopRegistrationFn: loopRegistrationFn, + } +} + +// RegisterLOOP calls the configured loopRegistrationFn. The loopRegistrationFn must act as a global registry for LOOPs and must be idempotent. +func (pc *registarConfig) RegisterLOOP(cfg CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) { + cmdFn, err := NewCmdFactory(pc.loopRegistrationFn, cfg) + if err != nil { + return nil, loop.GRPCOpts{}, err + } + return cmdFn, pc.grpcOpts, nil +} diff --git a/plugins/testdata/invalid.env b/plugins/testdata/invalid.env new file mode 100644 index 00000000..b2f2407b --- /dev/null +++ b/plugins/testdata/invalid.env @@ -0,0 +1,2 @@ +FOO BAR +Baz: "Value" diff --git a/plugins/testdata/valid.env b/plugins/testdata/valid.env new file mode 100644 index 00000000..5a73d037 --- /dev/null +++ b/plugins/testdata/valid.env @@ -0,0 +1 @@ +GOMEMLIMIT=1MiB diff --git a/shell.nix b/shell.nix new file mode 100644 index 00000000..7a34c243 --- /dev/null +++ b/shell.nix @@ -0,0 +1,51 @@ +{ pkgs ? import { } }: +with pkgs; +let + go = go_1_21; + postgresql = postgresql_14; + nodejs = nodejs-18_x; + nodePackages = pkgs.nodePackages.override { inherit nodejs; }; +in +mkShell { + nativeBuildInputs = [ + go + + postgresql + + python3 + python3Packages.pip + + curl + nodejs + nodePackages.pnpm + # TODO: compiler / gcc for secp compilation + go-ethereum # geth + # parity # openethereum + go-mockery + + # tooling + gotools + gopls + delve + golangci-lint + github-cli + jq + + # deployment + devspace + kubectl + kubernetes-helm + + # gofuzz + ] ++ lib.optionals stdenv.isLinux [ + # some dependencies needed for node-gyp on pnpm install + pkg-config + libudev-zero + libusb1 + ]; + LD_LIBRARY_PATH = "${stdenv.cc.cc.lib}/lib64:$LD_LIBRARY_PATH"; + GOROOT = "${go}/share/go"; + + PGDATA = "db"; + CL_DATABASE_URL = "postgresql://plugin:plugin@localhost:5432plugin/_test?sslmode=disable"; +} diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 00000000..0bc1bf0b --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,64 @@ +sonar.projectKey=goplugin_plugin +sonar.sources=. +sonar.sourceEncoding=UTF-8 +sonar.python.version=3.8 + +# Full exclusions from the static analysis +sonar.exclusions=\ +**/node_modules/**/*,\ +**/mocks/**/*,\ +**/testdata/**/*,\ +**/contracts/typechain/**/*,\ +**/contracts/artifacts/**/*,\ +**/contracts/cache/**/*,\ +**/contracts/scripts/**/*,\ +**/generated/**/*,\ +**/fixtures/**/*,\ +**/testutils/**/*,\ +**/gen/**/*,\ +**/testfiles/**/*,\ +**/testconfig/**/*,\ +**/core/web/assets/**/*,\ +**/core/scripts/**/*,\ +**/docs/**/*,\ +**/tools/**/*,\ +**/fuzz/**/*,\ +**/*.pb.go,\ +**/*report.xml,\ +**/*.config.ts,\ +**/*.txt,\ +**/*.abi,\ +**/*.bin,\ +**/*_codecgen.go,\ +**/*_gen.go,\ +**/tsconfig.json,\ +**/debug.go + +# Coverage exclusions +sonar.coverage.exclusions=\ +**/*.test.ts,\ +**/*_test.go,\ +**/contracts/test/**/*,\ +**/contracts/**/tests/**/*,\ +**/core/**/cltest/**/*,\ +**/integration-tests/**/*,\ +**/plugins/**/*,\ +**/main.go,\ +**/0195_add_not_null_to_evm_chain_id_in_job_specs.go,\ +**/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go + +# Duplication exclusions: mercury excluded because current MercuryProvider and Factory APIs are inherently duplicated due to embedded versioning +sonar.cpd.exclusions=\ +**/contracts/**/*.sol,\ +**/config.go,\ +**/core/services/ocr2/plugins/ocr2keeper/evm/**/*,\ +**/core/services/ocr2/plugins/mercury/plugin.go + +# Tests' root folder, inclusions (tests to check and count) and exclusions +sonar.tests=. +sonar.test.inclusions=\ +**/*_test.go,\ +**/*.test.ts +sonar.test.exclusions=\ +**/integration-tests/**/*,\ +**/charts/plugin-cluster/dashboard/cmd/**/* diff --git a/test.txt b/test.txt new file mode 100644 index 00000000..e69de29b diff --git a/testdata/scripts/admin/chpass/help.txtar b/testdata/scripts/admin/chpass/help.txtar new file mode 100644 index 00000000..584b3832 --- /dev/null +++ b/testdata/scripts/admin/chpass/help.txtar @@ -0,0 +1,9 @@ +exec plugin admin chpass --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin chpass - Change your API password remotely + +USAGE: + plugin admin chpass [arguments...] diff --git a/testdata/scripts/admin/help.txtar b/testdata/scripts/admin/help.txtar new file mode 100644 index 00000000..787e5f38 --- /dev/null +++ b/testdata/scripts/admin/help.txtar @@ -0,0 +1,21 @@ +exec plugin admin --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin - Commands for remotely taking admin related actions + +USAGE: + plugin admin command [command options] [arguments...] + +COMMANDS: + chpass Change your API password remotely + login Login to remote client by creating a session cookie + logout Delete any local sessions + profile Collects profile metrics from the node. + status Displays the health of various services running inside the node. + users Create, edit permissions, or delete API users + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/admin/login/help.txtar b/testdata/scripts/admin/login/help.txtar new file mode 100644 index 00000000..53370b62 --- /dev/null +++ b/testdata/scripts/admin/login/help.txtar @@ -0,0 +1,14 @@ +exec plugin admin login --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin login - Login to remote client by creating a session cookie + +USAGE: + plugin admin login [command options] [arguments...] + +OPTIONS: + --file value, -f value text file holding the API email and password needed to create a session cookie + --bypass-version-check Bypass versioning check for compatibility of remote node + diff --git a/testdata/scripts/admin/logout/help.txtar b/testdata/scripts/admin/logout/help.txtar new file mode 100644 index 00000000..36ec39fe --- /dev/null +++ b/testdata/scripts/admin/logout/help.txtar @@ -0,0 +1,9 @@ +exec plugin admin logout --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin logout - Delete any local sessions + +USAGE: + plugin admin logout [arguments...] diff --git a/testdata/scripts/admin/profile/help.txtar b/testdata/scripts/admin/profile/help.txtar new file mode 100644 index 00000000..25810aa9 --- /dev/null +++ b/testdata/scripts/admin/profile/help.txtar @@ -0,0 +1,14 @@ +exec plugin admin profile --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin profile - Collects profile metrics from the node. + +USAGE: + plugin admin profile [command options] [arguments...] + +OPTIONS: + --seconds value, -s value duration of profile capture (default: 8) + --output_dir value, -o value output directory of the captured profile (default: "/tmp/") + diff --git a/testdata/scripts/admin/status/help.txtar b/testdata/scripts/admin/status/help.txtar new file mode 100644 index 00000000..56daef3a --- /dev/null +++ b/testdata/scripts/admin/status/help.txtar @@ -0,0 +1,9 @@ +exec plugin admin status --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin status - Displays the health of various services running inside the node. + +USAGE: + plugin admin status [arguments...] diff --git a/testdata/scripts/admin/users/chrole/help.txtar b/testdata/scripts/admin/users/chrole/help.txtar new file mode 100644 index 00000000..a81e88a4 --- /dev/null +++ b/testdata/scripts/admin/users/chrole/help.txtar @@ -0,0 +1,14 @@ +exec plugin admin users chrole --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin users chrole - Changes an API user's role + +USAGE: + plugin admin users chrole [command options] [arguments...] + +OPTIONS: + --email value email of user to be edited + --new-role value, --newrole value new permission level role to set for user. Options: 'admin', 'edit', 'run', 'view'. + diff --git a/testdata/scripts/admin/users/create/help.txtar b/testdata/scripts/admin/users/create/help.txtar new file mode 100644 index 00000000..c0a8da01 --- /dev/null +++ b/testdata/scripts/admin/users/create/help.txtar @@ -0,0 +1,14 @@ +exec plugin admin users create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin users create - Create a new API user + +USAGE: + plugin admin users create [command options] [arguments...] + +OPTIONS: + --email value Email of new user to create + --role value Permission level of new user. Options: 'admin', 'edit', 'run', 'view'. + diff --git a/testdata/scripts/admin/users/delete/help.txtar b/testdata/scripts/admin/users/delete/help.txtar new file mode 100644 index 00000000..1a01a104 --- /dev/null +++ b/testdata/scripts/admin/users/delete/help.txtar @@ -0,0 +1,13 @@ +exec plugin admin users delete --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin users delete - Delete an API user + +USAGE: + plugin admin users delete [command options] [arguments...] + +OPTIONS: + --email value Email of API user to delete + diff --git a/testdata/scripts/admin/users/help.txtar b/testdata/scripts/admin/users/help.txtar new file mode 100644 index 00000000..b7229cda --- /dev/null +++ b/testdata/scripts/admin/users/help.txtar @@ -0,0 +1,19 @@ +exec plugin admin users --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin users - Create, edit permissions, or delete API users + +USAGE: + plugin admin users command [command options] [arguments...] + +COMMANDS: + list Lists all API users and their roles + create Create a new API user + chrole Changes an API user's role + delete Delete an API user + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/admin/users/list/help.txtar b/testdata/scripts/admin/users/list/help.txtar new file mode 100644 index 00000000..e76b1024 --- /dev/null +++ b/testdata/scripts/admin/users/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin admin users list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin admin users list - Lists all API users and their roles + +USAGE: + plugin admin users list [arguments...] diff --git a/testdata/scripts/attempts/help.txtar b/testdata/scripts/attempts/help.txtar new file mode 100644 index 00000000..3514accc --- /dev/null +++ b/testdata/scripts/attempts/help.txtar @@ -0,0 +1,16 @@ +exec plugin attempts --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin attempts - Commands for managing Ethereum Transaction Attempts + +USAGE: + plugin attempts command [command options] [arguments...] + +COMMANDS: + list List the Transaction Attempts in descending order + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/attempts/list/help.txtar b/testdata/scripts/attempts/list/help.txtar new file mode 100644 index 00000000..d7e7cc39 --- /dev/null +++ b/testdata/scripts/attempts/list/help.txtar @@ -0,0 +1,13 @@ +exec plugin attempts list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin attempts list - List the Transaction Attempts in descending order + +USAGE: + plugin attempts list [command options] [arguments...] + +OPTIONS: + --page value page of results to display (default: 0) + diff --git a/testdata/scripts/blocks/help.txtar b/testdata/scripts/blocks/help.txtar new file mode 100644 index 00000000..652f1d3d --- /dev/null +++ b/testdata/scripts/blocks/help.txtar @@ -0,0 +1,16 @@ +exec plugin blocks --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin blocks - Commands for managing blocks + +USAGE: + plugin blocks command [command options] [arguments...] + +COMMANDS: + replay Replays block data from the given number + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/blocks/replay/help.txtar b/testdata/scripts/blocks/replay/help.txtar new file mode 100644 index 00000000..41c64e8a --- /dev/null +++ b/testdata/scripts/blocks/replay/help.txtar @@ -0,0 +1,15 @@ +exec plugin blocks replay --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin blocks replay - Replays block data from the given number + +USAGE: + plugin blocks replay [command options] [arguments...] + +OPTIONS: + --block-number value Block number to replay from (default: 0) + --force Whether to force broadcasting logs which were already consumed and that would otherwise be skipped + --evm-chain-id value Chain ID of the EVM-based blockchain (default: 0) + diff --git a/testdata/scripts/bridges/create/help.txtar b/testdata/scripts/bridges/create/help.txtar new file mode 100644 index 00000000..116286c4 --- /dev/null +++ b/testdata/scripts/bridges/create/help.txtar @@ -0,0 +1,9 @@ +exec plugin bridges create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin bridges create - Create a new Bridge to an External Adapter + +USAGE: + plugin bridges create [arguments...] diff --git a/testdata/scripts/bridges/destroy/help.txtar b/testdata/scripts/bridges/destroy/help.txtar new file mode 100644 index 00000000..bf49830f --- /dev/null +++ b/testdata/scripts/bridges/destroy/help.txtar @@ -0,0 +1,9 @@ +exec plugin bridges destroy --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin bridges destroy - Destroys the Bridge for an External Adapter + +USAGE: + plugin bridges destroy [arguments...] diff --git a/testdata/scripts/bridges/help.txtar b/testdata/scripts/bridges/help.txtar new file mode 100644 index 00000000..6093f90e --- /dev/null +++ b/testdata/scripts/bridges/help.txtar @@ -0,0 +1,19 @@ +exec plugin bridges --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin bridges - Commands for Bridges communicating with External Adapters + +USAGE: + plugin bridges command [command options] [arguments...] + +COMMANDS: + create Create a new Bridge to an External Adapter + destroy Destroys the Bridge for an External Adapter + list List all Bridges to External Adapters + show Show a Bridge's details + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/bridges/list/help.txtar b/testdata/scripts/bridges/list/help.txtar new file mode 100644 index 00000000..11b1ec4e --- /dev/null +++ b/testdata/scripts/bridges/list/help.txtar @@ -0,0 +1,13 @@ +exec plugin bridges list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin bridges list - List all Bridges to External Adapters + +USAGE: + plugin bridges list [command options] [arguments...] + +OPTIONS: + --page value page of results to display (default: 0) + diff --git a/testdata/scripts/bridges/show/help.txtar b/testdata/scripts/bridges/show/help.txtar new file mode 100644 index 00000000..93c2b5b8 --- /dev/null +++ b/testdata/scripts/bridges/show/help.txtar @@ -0,0 +1,9 @@ +exec plugin bridges show --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin bridges show - Show a Bridge's details + +USAGE: + plugin bridges show [arguments...] diff --git a/testdata/scripts/chains/cosmos/help.txtar b/testdata/scripts/chains/cosmos/help.txtar new file mode 100644 index 00000000..22f1078f --- /dev/null +++ b/testdata/scripts/chains/cosmos/help.txtar @@ -0,0 +1,16 @@ +exec plugin chains cosmos --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains cosmos - Commands for handling Cosmos chains + +USAGE: + plugin chains cosmos command [command options] [arguments...] + +COMMANDS: + list List all existing Cosmos chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/cosmos/list/help.txtar b/testdata/scripts/chains/cosmos/list/help.txtar new file mode 100644 index 00000000..6b858e9c --- /dev/null +++ b/testdata/scripts/chains/cosmos/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin chains cosmos list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains cosmos list - List all existing Cosmos chains + +USAGE: + plugin chains cosmos list [arguments...] diff --git a/testdata/scripts/chains/evm/help.txtar b/testdata/scripts/chains/evm/help.txtar new file mode 100644 index 00000000..1f6b7f2c --- /dev/null +++ b/testdata/scripts/chains/evm/help.txtar @@ -0,0 +1,16 @@ +exec plugin chains evm --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains evm - Commands for handling EVM chains + +USAGE: + plugin chains evm command [command options] [arguments...] + +COMMANDS: + list List all existing EVM chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/evm/list/help.txtar b/testdata/scripts/chains/evm/list/help.txtar new file mode 100644 index 00000000..7a69e3a0 --- /dev/null +++ b/testdata/scripts/chains/evm/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin chains evm list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains evm list - List all existing EVM chains + +USAGE: + plugin chains evm list [arguments...] diff --git a/testdata/scripts/chains/help.txtar b/testdata/scripts/chains/help.txtar new file mode 100644 index 00000000..ccbdd2d1 --- /dev/null +++ b/testdata/scripts/chains/help.txtar @@ -0,0 +1,19 @@ +exec plugin chains --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains - Commands for handling chain configuration + +USAGE: + plugin chains command [command options] [arguments...] + +COMMANDS: + evm Commands for handling EVM chains + cosmos Commands for handling Cosmos chains + solana Commands for handling Solana chains + starknet Commands for handling StarkNet chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/solana/help.txtar b/testdata/scripts/chains/solana/help.txtar new file mode 100644 index 00000000..9fe10bc6 --- /dev/null +++ b/testdata/scripts/chains/solana/help.txtar @@ -0,0 +1,16 @@ +exec plugin chains solana --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains solana - Commands for handling Solana chains + +USAGE: + plugin chains solana command [command options] [arguments...] + +COMMANDS: + list List all existing Solana chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/solana/list/help.txtar b/testdata/scripts/chains/solana/list/help.txtar new file mode 100644 index 00000000..29f8725d --- /dev/null +++ b/testdata/scripts/chains/solana/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin chains solana list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains solana list - List all existing Solana chains + +USAGE: + plugin chains solana list [arguments...] diff --git a/testdata/scripts/chains/starknet/help.txtar b/testdata/scripts/chains/starknet/help.txtar new file mode 100644 index 00000000..8087a537 --- /dev/null +++ b/testdata/scripts/chains/starknet/help.txtar @@ -0,0 +1,16 @@ +exec plugin chains starknet --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains starknet - Commands for handling StarkNet chains + +USAGE: + plugin chains starknet command [command options] [arguments...] + +COMMANDS: + list List all existing StarkNet chains + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/chains/starknet/list/help.txtar b/testdata/scripts/chains/starknet/list/help.txtar new file mode 100644 index 00000000..27bd982f --- /dev/null +++ b/testdata/scripts/chains/starknet/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin chains starknet list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin chains starknet list - List all existing StarkNet chains + +USAGE: + plugin chains starknet list [arguments...] diff --git a/testdata/scripts/config/help.txtar b/testdata/scripts/config/help.txtar new file mode 100644 index 00000000..1704fe8c --- /dev/null +++ b/testdata/scripts/config/help.txtar @@ -0,0 +1,18 @@ +exec plugin config --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin config - Commands for the node's configuration + +USAGE: + plugin config command [command options] [arguments...] + +COMMANDS: + show Show the application configuration + loglevel Set log level + logsql Enable/disable SQL statement logging + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/config/loglevel/help.txtar b/testdata/scripts/config/loglevel/help.txtar new file mode 100644 index 00000000..bf80c2e0 --- /dev/null +++ b/testdata/scripts/config/loglevel/help.txtar @@ -0,0 +1,13 @@ +exec plugin config loglevel --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin config loglevel - Set log level + +USAGE: + plugin config loglevel [command options] [arguments...] + +OPTIONS: + --level value set log level for node (debug||info||warn||error) + diff --git a/testdata/scripts/config/logsql/help.txtar b/testdata/scripts/config/logsql/help.txtar new file mode 100644 index 00000000..0b80c2c9 --- /dev/null +++ b/testdata/scripts/config/logsql/help.txtar @@ -0,0 +1,14 @@ +exec plugin config logsql --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin config logsql - Enable/disable SQL statement logging + +USAGE: + plugin config logsql [command options] [arguments...] + +OPTIONS: + --enable enable SQL logging + --disable disable SQL logging + diff --git a/testdata/scripts/config/show/help.txtar b/testdata/scripts/config/show/help.txtar new file mode 100644 index 00000000..64200703 --- /dev/null +++ b/testdata/scripts/config/show/help.txtar @@ -0,0 +1,13 @@ +exec plugin config show --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin config show - Show the application configuration + +USAGE: + plugin config show [command options] [arguments...] + +OPTIONS: + --user-only If set, show only the user-provided TOML configuration, omitting application defaults + diff --git a/testdata/scripts/forwarders/delete/help.txtar b/testdata/scripts/forwarders/delete/help.txtar new file mode 100644 index 00000000..4500eb62 --- /dev/null +++ b/testdata/scripts/forwarders/delete/help.txtar @@ -0,0 +1,9 @@ +exec plugin forwarders delete --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin forwarders delete - Delete a forwarder address + +USAGE: + plugin forwarders delete [arguments...] diff --git a/testdata/scripts/forwarders/help.txtar b/testdata/scripts/forwarders/help.txtar new file mode 100644 index 00000000..af4e3f86 --- /dev/null +++ b/testdata/scripts/forwarders/help.txtar @@ -0,0 +1,18 @@ +exec plugin forwarders --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin forwarders - Commands for managing forwarder addresses. + +USAGE: + plugin forwarders command [command options] [arguments...] + +COMMANDS: + list List all stored forwarders addresses + track Track a new forwarder + delete Delete a forwarder address + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/forwarders/list/help.txtar b/testdata/scripts/forwarders/list/help.txtar new file mode 100644 index 00000000..fb84034c --- /dev/null +++ b/testdata/scripts/forwarders/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin forwarders list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin forwarders list - List all stored forwarders addresses + +USAGE: + plugin forwarders list [arguments...] diff --git a/testdata/scripts/forwarders/track/help.txtar b/testdata/scripts/forwarders/track/help.txtar new file mode 100644 index 00000000..0bdb61ab --- /dev/null +++ b/testdata/scripts/forwarders/track/help.txtar @@ -0,0 +1,14 @@ +exec plugin forwarders track --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin forwarders track - Track a new forwarder + +USAGE: + plugin forwarders track [command options] [arguments...] + +OPTIONS: + --evm-chain-id value, --evmChainID value, -c value chain ID, if left empty, EVM.ChainID will be used (default: 0) + --address value, -a value The forwarding address (in hex format) + diff --git a/testdata/scripts/health/default.txtar b/testdata/scripts/health/default.txtar new file mode 100644 index 00000000..5fd3eaef --- /dev/null +++ b/testdata/scripts/health/default.txtar @@ -0,0 +1,119 @@ +# start node +exec sh -c 'eval "echo \"$(cat config.toml.tmpl)\" > config.toml"' +exec plugin node -c config.toml start -p password -a creds & + +# initialize client +env NODEURL=http://localhost:$PORT +exec curl --retry 10 --retry-max-time 60 --retry-connrefused $NODEURL +exec plugin --remote-node-url $NODEURL admin login -file creds --bypass-version-check + +exec plugin --remote-node-url $NODEURL health +cmp stdout out.txt + +exec plugin --remote-node-url $NODEURL health -json +cp stdout compact.json +exec jq . compact.json +cmp stdout out.json + +-- testdb.txt -- +CL_DATABASE_URL +-- testport.txt -- +PORT + +-- password -- +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ +-- creds -- +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs + +-- config.toml.tmpl -- +[Webserver] +HTTPPort = $PORT + +-- out.txt -- +ok JobSpawner +ok Mailbox.Monitor +ok Mercury.WSRPCPool +ok Mercury.WSRPCPool.CacheSet +ok PipelineORM +ok PipelineRunner +ok PromReporter +ok TelemetryManager + +-- out.json -- +{ + "data": [ + { + "type": "checks", + "id": "JobSpawner", + "attributes": { + "name": "JobSpawner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mailbox.Monitor", + "attributes": { + "name": "Mailbox.Monitor", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool", + "attributes": { + "name": "Mercury.WSRPCPool", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool.CacheSet", + "attributes": { + "name": "Mercury.WSRPCPool.CacheSet", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineORM", + "attributes": { + "name": "PipelineORM", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineRunner", + "attributes": { + "name": "PipelineRunner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PromReporter", + "attributes": { + "name": "PromReporter", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "TelemetryManager", + "attributes": { + "name": "TelemetryManager", + "status": "passing", + "output": "" + } + } + ] +} diff --git a/testdata/scripts/health/help.txtar b/testdata/scripts/health/help.txtar new file mode 100644 index 00000000..fcd21101 --- /dev/null +++ b/testdata/scripts/health/help.txtar @@ -0,0 +1,13 @@ +exec plugin health --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin health - Prints a health report + +USAGE: + plugin health [command options] [arguments...] + +OPTIONS: + --json, -j json output + diff --git a/testdata/scripts/health/multi-chain.txtar b/testdata/scripts/health/multi-chain.txtar new file mode 100644 index 00000000..747a7221 --- /dev/null +++ b/testdata/scripts/health/multi-chain.txtar @@ -0,0 +1,299 @@ +# start node +exec sh -c 'eval "echo \"$(cat config.toml.tmpl)\" > config.toml"' +exec plugin node -c config.toml start -p password -a creds & + +# initialize client +env NODEURL=http://localhost:$PORT +exec curl --retry 10 --retry-max-time 60 --retry-connrefused $NODEURL +exec plugin --remote-node-url $NODEURL admin login -file creds --bypass-version-check + +exec plugin --remote-node-url $NODEURL health +cmp stdout out.txt + +exec plugin --remote-node-url $NODEURL health -json +cp stdout compact.json +exec jq . compact.json +cmp stdout out.json + +-- testdb.txt -- +CL_DATABASE_URL +-- testport.txt -- +PORT + +-- password -- +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ +-- creds -- +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs + +-- config.toml.tmpl -- +[Webserver] +HTTPPort = $PORT + +[[Cosmos]] +ChainID = 'Foo' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[[Solana]] +ChainID = 'Bar' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Starknet]] +ChainID = 'Baz' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' + +-- out.txt -- +ok Cosmos.Foo.Chain +ok Cosmos.Foo.Txm +ok EVM.1 +ok EVM.1.BalanceMonitor +ok EVM.1.HeadBroadcaster +ok EVM.1.HeadTracker +! EVM.1.HeadTracker.HeadListener + Listener is not connected +ok EVM.1.LogBroadcaster +ok EVM.1.Txm +ok EVM.1.Txm.BlockHistoryEstimator +ok EVM.1.Txm.Broadcaster +ok EVM.1.Txm.Confirmer +ok EVM.1.Txm.WrappedEvmEstimator +ok JobSpawner +ok Mailbox.Monitor +ok Mercury.WSRPCPool +ok Mercury.WSRPCPool.CacheSet +ok PipelineORM +ok PipelineRunner +ok PromReporter +ok Solana.Bar +ok StarkNet.Baz +ok TelemetryManager + +-- out.json -- +{ + "data": [ + { + "type": "checks", + "id": "Cosmos.Foo.Chain", + "attributes": { + "name": "Cosmos.Foo.Chain", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Cosmos.Foo.Txm", + "attributes": { + "name": "Cosmos.Foo.Txm", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1", + "attributes": { + "name": "EVM.1", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.BalanceMonitor", + "attributes": { + "name": "EVM.1.BalanceMonitor", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.HeadBroadcaster", + "attributes": { + "name": "EVM.1.HeadBroadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.HeadTracker", + "attributes": { + "name": "EVM.1.HeadTracker", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.HeadTracker.HeadListener", + "attributes": { + "name": "EVM.1.HeadTracker.HeadListener", + "status": "failing", + "output": "Listener is not connected" + } + }, + { + "type": "checks", + "id": "EVM.1.LogBroadcaster", + "attributes": { + "name": "EVM.1.LogBroadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.Txm", + "attributes": { + "name": "EVM.1.Txm", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.Txm.BlockHistoryEstimator", + "attributes": { + "name": "EVM.1.Txm.BlockHistoryEstimator", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.Txm.Broadcaster", + "attributes": { + "name": "EVM.1.Txm.Broadcaster", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.Txm.Confirmer", + "attributes": { + "name": "EVM.1.Txm.Confirmer", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "EVM.1.Txm.WrappedEvmEstimator", + "attributes": { + "name": "EVM.1.Txm.WrappedEvmEstimator", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "JobSpawner", + "attributes": { + "name": "JobSpawner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mailbox.Monitor", + "attributes": { + "name": "Mailbox.Monitor", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool", + "attributes": { + "name": "Mercury.WSRPCPool", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Mercury.WSRPCPool.CacheSet", + "attributes": { + "name": "Mercury.WSRPCPool.CacheSet", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineORM", + "attributes": { + "name": "PipelineORM", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PipelineRunner", + "attributes": { + "name": "PipelineRunner", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "PromReporter", + "attributes": { + "name": "PromReporter", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "Solana.Bar", + "attributes": { + "name": "Solana.Bar", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "StarkNet.Baz", + "attributes": { + "name": "StarkNet.Baz", + "status": "passing", + "output": "" + } + }, + { + "type": "checks", + "id": "TelemetryManager", + "attributes": { + "name": "TelemetryManager", + "status": "passing", + "output": "" + } + } + ] +} diff --git a/testdata/scripts/help.txtar b/testdata/scripts/help.txtar new file mode 100644 index 00000000..b0098223 --- /dev/null +++ b/testdata/scripts/help.txtar @@ -0,0 +1,37 @@ +exec plugin --help +cmpenv stdout help.txt + +-- help.txt -- +NAME: + plugin - CLI for Plugin + +USAGE: + plugin [global options] command [command options] [arguments...] + +VERSION: + ${VERSION}@${COMMIT_SHA} + +COMMANDS: + admin Commands for remotely taking admin related actions + attempts, txas Commands for managing Ethereum Transaction Attempts + blocks Commands for managing blocks + bridges Commands for Bridges communicating with External Adapters + config Commands for the node's configuration + health Prints a health report + jobs Commands for managing Jobs + keys Commands for managing various types of keys used by the Plugin node + node, local Commands for admin actions that must be run locally + initiators Commands for managing External Initiators + txs Commands for handling transactions + chains Commands for handling chain configuration + nodes Commands for handling node configuration + forwarders Commands for managing forwarder addresses. + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --json, -j json output as opposed to table + --admin-credentials-file FILE optional, applies only in client mode when making remote API calls. If provided, FILE containing admin credentials will be used for logging in, allowing to avoid an additional login step. If `FILE` is missing, it will be ignored. Defaults to /apicredentials + --remote-node-url URL optional, applies only in client mode when making remote API calls. If provided, URL will be used as the remote Plugin API endpoint (default: "http://localhost:6688") + --insecure-skip-verify optional, applies only in client mode when making remote API calls. If turned on, SSL certificate verification will be disabled. This is mostly useful for people who want to use Plugin with a self-signed TLS certificate + --help, -h show help + --version, -v print the version diff --git a/testdata/scripts/initiators/create/help.txtar b/testdata/scripts/initiators/create/help.txtar new file mode 100644 index 00000000..74495efe --- /dev/null +++ b/testdata/scripts/initiators/create/help.txtar @@ -0,0 +1,9 @@ +exec plugin initiators create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin initiators create - Create an authentication key for a user of External Initiators + +USAGE: + plugin initiators create [arguments...] diff --git a/testdata/scripts/initiators/destroy/help.txtar b/testdata/scripts/initiators/destroy/help.txtar new file mode 100644 index 00000000..5f84397d --- /dev/null +++ b/testdata/scripts/initiators/destroy/help.txtar @@ -0,0 +1,9 @@ +exec plugin initiators destroy --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin initiators destroy - Remove an external initiator by name + +USAGE: + plugin initiators destroy [arguments...] diff --git a/testdata/scripts/initiators/help.txtar b/testdata/scripts/initiators/help.txtar new file mode 100644 index 00000000..6ad8e671 --- /dev/null +++ b/testdata/scripts/initiators/help.txtar @@ -0,0 +1,18 @@ +exec plugin initiators --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin initiators - Commands for managing External Initiators + +USAGE: + plugin initiators command [command options] [arguments...] + +COMMANDS: + create Create an authentication key for a user of External Initiators + destroy Remove an external initiator by name + list List all external initiators + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/initiators/list/help.txtar b/testdata/scripts/initiators/list/help.txtar new file mode 100644 index 00000000..cead03d9 --- /dev/null +++ b/testdata/scripts/initiators/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin initiators list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin initiators list - List all external initiators + +USAGE: + plugin initiators list [arguments...] diff --git a/testdata/scripts/jobs/create/help.txtar b/testdata/scripts/jobs/create/help.txtar new file mode 100644 index 00000000..f359e3f8 --- /dev/null +++ b/testdata/scripts/jobs/create/help.txtar @@ -0,0 +1,9 @@ +exec plugin jobs create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs create - Create a job + +USAGE: + plugin jobs create [arguments...] diff --git a/testdata/scripts/jobs/delete/help.txtar b/testdata/scripts/jobs/delete/help.txtar new file mode 100644 index 00000000..3c42ed34 --- /dev/null +++ b/testdata/scripts/jobs/delete/help.txtar @@ -0,0 +1,9 @@ +exec plugin jobs delete --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs delete - Delete a job + +USAGE: + plugin jobs delete [arguments...] diff --git a/testdata/scripts/jobs/help.txtar b/testdata/scripts/jobs/help.txtar new file mode 100644 index 00000000..c1c89e2e --- /dev/null +++ b/testdata/scripts/jobs/help.txtar @@ -0,0 +1,20 @@ +exec plugin jobs --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs - Commands for managing Jobs + +USAGE: + plugin jobs command [command options] [arguments...] + +COMMANDS: + list List all jobs + show Show a job + create Create a job + delete Delete a job + run Trigger a job run + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/jobs/list/help.txtar b/testdata/scripts/jobs/list/help.txtar new file mode 100644 index 00000000..2ac33a91 --- /dev/null +++ b/testdata/scripts/jobs/list/help.txtar @@ -0,0 +1,13 @@ +exec plugin jobs list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs list - List all jobs + +USAGE: + plugin jobs list [command options] [arguments...] + +OPTIONS: + --page value page of results to display (default: 0) + diff --git a/testdata/scripts/jobs/run/help.txtar b/testdata/scripts/jobs/run/help.txtar new file mode 100644 index 00000000..8947a052 --- /dev/null +++ b/testdata/scripts/jobs/run/help.txtar @@ -0,0 +1,9 @@ +exec plugin jobs run --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs run - Trigger a job run + +USAGE: + plugin jobs run [arguments...] diff --git a/testdata/scripts/jobs/show/help.txtar b/testdata/scripts/jobs/show/help.txtar new file mode 100644 index 00000000..f342d596 --- /dev/null +++ b/testdata/scripts/jobs/show/help.txtar @@ -0,0 +1,9 @@ +exec plugin jobs show --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin jobs show - Show a job + +USAGE: + plugin jobs show [arguments...] diff --git a/testdata/scripts/keys/cosmos/help.txtar b/testdata/scripts/keys/cosmos/help.txtar new file mode 100644 index 00000000..5d4f4ecf --- /dev/null +++ b/testdata/scripts/keys/cosmos/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys cosmos --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys cosmos - Remote commands for administering the node's Cosmos keys + +USAGE: + plugin keys cosmos command [command options] [arguments...] + +COMMANDS: + create Create a Cosmos key + import Import Cosmos key from keyfile + export Export Cosmos key to keyfile + delete Delete Cosmos key if present + list List the Cosmos keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/csa/help.txtar b/testdata/scripts/keys/csa/help.txtar new file mode 100644 index 00000000..3aba9fb6 --- /dev/null +++ b/testdata/scripts/keys/csa/help.txtar @@ -0,0 +1,19 @@ +exec plugin keys csa --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys csa - Remote commands for administering the node's CSA keys + +USAGE: + plugin keys csa command [command options] [arguments...] + +COMMANDS: + create Create a CSA key, encrypted with password from the password file, and store it in the database. + list List available CSA keys + import Imports a CSA key from a JSON file. + export Exports an existing CSA key by its ID. + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/dkgencrypt/help.txtar b/testdata/scripts/keys/dkgencrypt/help.txtar new file mode 100644 index 00000000..60fef856 --- /dev/null +++ b/testdata/scripts/keys/dkgencrypt/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys dkgencrypt --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys dkgencrypt - Remote commands for administering the node's DKGEncrypt keys + +USAGE: + plugin keys dkgencrypt command [command options] [arguments...] + +COMMANDS: + create Create a DKGEncrypt key + import Import DKGEncrypt key from keyfile + export Export DKGEncrypt key to keyfile + delete Delete DKGEncrypt key if present + list List the DKGEncrypt keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/dkgsign/help.txtar b/testdata/scripts/keys/dkgsign/help.txtar new file mode 100644 index 00000000..30b59926 --- /dev/null +++ b/testdata/scripts/keys/dkgsign/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys dkgsign --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys dkgsign - Remote commands for administering the node's DKGSign keys + +USAGE: + plugin keys dkgsign command [command options] [arguments...] + +COMMANDS: + create Create a DKGSign key + import Import DKGSign key from keyfile + export Export DKGSign key to keyfile + delete Delete DKGSign key if present + list List the DKGSign keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/eth/create/help.txtar b/testdata/scripts/keys/eth/create/help.txtar new file mode 100644 index 00000000..a9273cef --- /dev/null +++ b/testdata/scripts/keys/eth/create/help.txtar @@ -0,0 +1,14 @@ +exec plugin keys eth create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys eth create - Create a key in the node's keystore alongside the existing key; to create an original key, just run the node + +USAGE: + plugin keys eth create [command options] [arguments...] + +OPTIONS: + --evm-chain-id value, --evmChainID value Chain ID for the key. If left blank, default chain will be used. + --max-gas-price-gwei value, --maxGasPriceGWei value Optional maximum gas price (GWei) for the creating key. (default: 0) + diff --git a/testdata/scripts/keys/eth/help.txtar b/testdata/scripts/keys/eth/help.txtar new file mode 100644 index 00000000..2e755dd4 --- /dev/null +++ b/testdata/scripts/keys/eth/help.txtar @@ -0,0 +1,21 @@ +exec plugin keys eth --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys eth - Remote commands for administering the node's Ethereum keys + +USAGE: + plugin keys eth command [command options] [arguments...] + +COMMANDS: + create Create a key in the node's keystore alongside the existing key; to create an original key, just run the node + list List available Ethereum accounts with their ETH & PLI balances and other metadata + delete Delete the ETH key by address (irreversible!) + import Import an ETH key from a JSON file + export Exports an ETH key to a JSON file + chain Update an EVM key for the given chain + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/eth/list/help.txtar b/testdata/scripts/keys/eth/list/help.txtar new file mode 100644 index 00000000..dbbc54ee --- /dev/null +++ b/testdata/scripts/keys/eth/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin keys eth list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys eth list - List available Ethereum accounts with their ETH & PLI balances and other metadata + +USAGE: + plugin keys eth list [arguments...] \ No newline at end of file diff --git a/testdata/scripts/keys/eth/list/unavailable.txtar b/testdata/scripts/keys/eth/list/unavailable.txtar new file mode 100644 index 00000000..ca49bd14 --- /dev/null +++ b/testdata/scripts/keys/eth/list/unavailable.txtar @@ -0,0 +1,38 @@ +# start node +exec sh -c 'eval "echo \"$(cat config.toml.tmpl)\" > config.toml"' +exec plugin node -c config.toml start -p password -a creds & + +# initialize client +env NODEURL=http://localhost:$PORT +exec curl --retry 10 --retry-max-time 60 --retry-connrefused $NODEURL +exec plugin --remote-node-url $NODEURL admin login -file creds --bypass-version-check + +exec plugin --remote-node-url $NODEURL keys eth list +! stdout 'ETH: ' +! stdout 'PLI: ' +! stdout '' +stdout 'ETH: Unknown' +stdout 'PLI: Unknown' + +-- testdb.txt -- +CL_DATABASE_URL +-- testport.txt -- +PORT + +-- password -- +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ +-- creds -- +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs + +-- config.toml.tmpl -- +[Webserver] +HTTPPort = $PORT + +[[EVM]] +ChainID = '99' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' diff --git a/testdata/scripts/keys/help.txtar b/testdata/scripts/keys/help.txtar new file mode 100644 index 00000000..c75259f7 --- /dev/null +++ b/testdata/scripts/keys/help.txtar @@ -0,0 +1,26 @@ +exec plugin keys --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys - Commands for managing various types of keys used by the Plugin node + +USAGE: + plugin keys command [command options] [arguments...] + +COMMANDS: + eth Remote commands for administering the node's Ethereum keys + p2p Remote commands for administering the node's p2p keys + csa Remote commands for administering the node's CSA keys + ocr Remote commands for administering the node's legacy off chain reporting keys + ocr2 Remote commands for administering the node's off chain reporting keys + cosmos Remote commands for administering the node's Cosmos keys + solana Remote commands for administering the node's Solana keys + starknet Remote commands for administering the node's StarkNet keys + dkgsign Remote commands for administering the node's DKGSign keys + dkgencrypt Remote commands for administering the node's DKGEncrypt keys + vrf Remote commands for administering the node's vrf keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/ocr/help.txtar b/testdata/scripts/keys/ocr/help.txtar new file mode 100644 index 00000000..77f584ad --- /dev/null +++ b/testdata/scripts/keys/ocr/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys ocr --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys ocr - Remote commands for administering the node's legacy off chain reporting keys + +USAGE: + plugin keys ocr command [command options] [arguments...] + +COMMANDS: + create Create an OCR key bundle, encrypted with password from the password file, and store it in the database + delete Deletes the encrypted OCR key bundle matching the given ID + list List available OCR key bundles + import Imports an OCR key bundle from a JSON file + export Exports an OCR key bundle to a JSON file + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/ocr2/help.txtar b/testdata/scripts/keys/ocr2/help.txtar new file mode 100644 index 00000000..fc08af6c --- /dev/null +++ b/testdata/scripts/keys/ocr2/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys ocr2 --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys ocr2 - Remote commands for administering the node's off chain reporting keys + +USAGE: + plugin keys ocr2 command [command options] [arguments...] + +COMMANDS: + create Create an OCR2 key bundle, encrypted with password from the password file, and store it in the database + delete Deletes the encrypted OCR2 key bundle matching the given ID + list List available OCR2 key bundles + import Imports an OCR2 key bundle from a JSON file + export Exports an OCR2 key bundle to a JSON file + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/p2p/help.txtar b/testdata/scripts/keys/p2p/help.txtar new file mode 100644 index 00000000..7397b739 --- /dev/null +++ b/testdata/scripts/keys/p2p/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys p2p --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys p2p - Remote commands for administering the node's p2p keys + +USAGE: + plugin keys p2p command [command options] [arguments...] + +COMMANDS: + create Create a p2p key, encrypted with password from the password file, and store it in the database. + delete Delete the encrypted P2P key by id + list List available P2P keys + import Imports a P2P key from a JSON file + export Exports a P2P key to a JSON file + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/solana/help.txtar b/testdata/scripts/keys/solana/help.txtar new file mode 100644 index 00000000..d09dd652 --- /dev/null +++ b/testdata/scripts/keys/solana/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys solana --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys solana - Remote commands for administering the node's Solana keys + +USAGE: + plugin keys solana command [command options] [arguments...] + +COMMANDS: + create Create a Solana key + import Import Solana key from keyfile + export Export Solana key to keyfile + delete Delete Solana key if present + list List the Solana keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/starknet/help.txtar b/testdata/scripts/keys/starknet/help.txtar new file mode 100644 index 00000000..1f941dca --- /dev/null +++ b/testdata/scripts/keys/starknet/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys starknet --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys starknet - Remote commands for administering the node's StarkNet keys + +USAGE: + plugin keys starknet command [command options] [arguments...] + +COMMANDS: + create Create a StarkNet key + import Import StarkNet key from keyfile + export Export StarkNet key to keyfile + delete Delete StarkNet key if present + list List the StarkNet keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/keys/vrf/help.txtar b/testdata/scripts/keys/vrf/help.txtar new file mode 100644 index 00000000..dddc448f --- /dev/null +++ b/testdata/scripts/keys/vrf/help.txtar @@ -0,0 +1,20 @@ +exec plugin keys vrf --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin keys vrf - Remote commands for administering the node's vrf keys + +USAGE: + plugin keys vrf command [command options] [arguments...] + +COMMANDS: + create Create a VRF key + import Import VRF key from keyfile + export Export VRF key to keyfile + delete Archive or delete VRF key from memory and the database, if present. Note that jobs referencing the removed key will also be removed. + list List the VRF keys + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/metrics/multi-node.txtar b/testdata/scripts/metrics/multi-node.txtar new file mode 100644 index 00000000..88ff7ed3 --- /dev/null +++ b/testdata/scripts/metrics/multi-node.txtar @@ -0,0 +1,78 @@ +# Check that metrics specified in the expected_metrics are present in /metrics response +# start node +exec sh -c 'eval "echo \"$(cat config.toml.tmpl)\" > config.toml"' +exec plugin node -c config.toml start -p password -a creds & + +# ensure node is up and running +env NODEURL=http://localhost:$PORT +exec curl --retry 10 --retry-max-time 60 --retry-connrefused $NODEURL + + +# Check +chmod 700 ./script.sh +exec sh -c './script.sh' + +-- script.sh -- + +maxRetries=5 +for retriesNum in $(seq 1 $maxRetries); do + passedAllChecks=true + curl $NODEURL/metrics > metrics.txt + while IFS= read -r expectedMetric; do + grep -q $expectedMetric metrics.txt && continue + + if [[ $retriesNum -ge $maxRetries ]]; then + cat metrics.txt + echo "FAIL Expected metric $expectedMetric to be present in GET /metrics response" + exit 1 + fi + + echo "Metric $expectedMetric is not present in GET /metrics response - retrying after 5s" + passedAllChecks=false + sleep 5 + break + done < expected_metrics.txt + + $passedAllChecks && break +done + +-- testdb.txt -- +CL_DATABASE_URL +-- testport.txt -- +PORT + +-- password -- +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ +-- creds -- +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs + +-- config.toml.tmpl -- +[Webserver] +HTTPPort = $PORT + +[[EVM]] +ChainID = '68472' + +[[EVM.Nodes]] +Name = 'BlueEVMPrimaryNode' +WSURL = 'wss://primaryfoo.bar/ws' +HTTPURL = 'https://primaryfoo.bar' + +[[EVM.Nodes]] +Name = 'YellowEVMPrimaryNode' +WSURL = 'wss://sendonlyfoo.bar/ws' +HTTPURL = 'https://sendonlyfoo.bar' +SendOnly = true + +-- expected_metrics.txt -- +evm_pool_rpc_node_dials_total{evmChainID="68472",nodeName="BlueEVMPrimaryNode"} +evm_pool_rpc_node_dials_total{evmChainID="68472",nodeName="YellowEVMPrimaryNode"} +multi_node_states{chainId="68472",network="EVM",state="Alive"} +multi_node_states{chainId="68472",network="EVM",state="Closed"} +multi_node_states{chainId="68472",network="EVM",state="Dialed"} +multi_node_states{chainId="68472",network="EVM",state="InvalidChainID"} +multi_node_states{chainId="68472",network="EVM",state="OutOfSync"} +multi_node_states{chainId="68472",network="EVM",state="Undialed"} +multi_node_states{chainId="68472",network="EVM",state="Unreachable"} +multi_node_states{chainId="68472",network="EVM",state="Unusable"} \ No newline at end of file diff --git a/testdata/scripts/node/db/create-migration/help.txtar b/testdata/scripts/node/db/create-migration/help.txtar new file mode 100644 index 00000000..e68c721a --- /dev/null +++ b/testdata/scripts/node/db/create-migration/help.txtar @@ -0,0 +1,14 @@ +exec plugin node db create-migration --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db create-migration - Create a new migration. + +USAGE: + plugin node db create-migration [command options] [arguments...] + +OPTIONS: + --type go set to go to generate a .go migration (instead of .sql) + diff --git a/testdata/scripts/node/db/delete-chain/help.txtar b/testdata/scripts/node/db/delete-chain/help.txtar new file mode 100644 index 00000000..b81a6d9a --- /dev/null +++ b/testdata/scripts/node/db/delete-chain/help.txtar @@ -0,0 +1,16 @@ +exec plugin node db delete-chain --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db delete-chain - Commands for cleaning up chain specific db tables. WARNING: This will ERASE ALL chain specific data referred to by --type and --id options for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config. + +USAGE: + plugin node db delete-chain [command options] [arguments...] + +OPTIONS: + --id value chain id based on which chain specific table cleanup will be done + --type value chain type based on which table cleanup will be done, eg. EVM + --danger set to true to enable dropping non-test databases + diff --git a/testdata/scripts/node/db/help.txtar b/testdata/scripts/node/db/help.txtar new file mode 100644 index 00000000..75f7bf99 --- /dev/null +++ b/testdata/scripts/node/db/help.txtar @@ -0,0 +1,23 @@ +exec plugin node db --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node db - Potentially destructive commands for managing the database. + +USAGE: + plugin node db command [command options] [arguments...] + +COMMANDS: + reset Drop, create and migrate database. Useful for setting up the database in order to run tests or resetting the dev database. WARNING: This will ERASE ALL DATA for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config. + preparetest Reset database and load fixtures. + version Display the current database version. + status Display the current database migration status. + migrate Migrate the database to the latest version. + rollback Roll back the database to a previous . Rolls back a single migration if no version specified. + create-migration Create a new migration. + delete-chain Commands for cleaning up chain specific db tables. WARNING: This will ERASE ALL chain specific data referred to by --type and --id options for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config. + +OPTIONS: + --help, -h show help + \ No newline at end of file diff --git a/testdata/scripts/node/db/migrate/db.txtar b/testdata/scripts/node/db/migrate/db.txtar new file mode 100644 index 00000000..0f6176fe --- /dev/null +++ b/testdata/scripts/node/db/migrate/db.txtar @@ -0,0 +1,6 @@ +exec plugin node db migrate +! stdout . +stderr 'goose: no migrations to run. current version:' + +-- testdb.txt -- +CL_DATABASE_URL diff --git a/testdata/scripts/node/db/migrate/help.txtar b/testdata/scripts/node/db/migrate/help.txtar new file mode 100644 index 00000000..d4cdadca --- /dev/null +++ b/testdata/scripts/node/db/migrate/help.txtar @@ -0,0 +1,10 @@ +exec plugin node db migrate --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db migrate - Migrate the database to the latest version. + +USAGE: + plugin node db migrate [arguments...] diff --git a/testdata/scripts/node/db/missing-secret.txtar b/testdata/scripts/node/db/missing-secret.txtar new file mode 100644 index 00000000..24417e33 --- /dev/null +++ b/testdata/scripts/node/db/missing-secret.txtar @@ -0,0 +1,11 @@ +# all db commands are validated -- doesn't matter which we use +! exec plugin node db version +cmp stderr err.txt +cmp stdout out.txt + + +-- out.txt -- +Invalid configuration: invalid secrets: Database.URL: empty: must be provided and non-empty + +-- err.txt -- +invalid configuration diff --git a/testdata/scripts/node/db/preparetest/help.txtar b/testdata/scripts/node/db/preparetest/help.txtar new file mode 100644 index 00000000..d2138c92 --- /dev/null +++ b/testdata/scripts/node/db/preparetest/help.txtar @@ -0,0 +1,14 @@ +exec plugin node db preparetest --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db preparetest - Reset database and load fixtures. + +USAGE: + plugin node db preparetest [command options] [arguments...] + +OPTIONS: + --user-only only include test user fixture + diff --git a/testdata/scripts/node/db/reset/help.txtar b/testdata/scripts/node/db/reset/help.txtar new file mode 100644 index 00000000..906a8705 --- /dev/null +++ b/testdata/scripts/node/db/reset/help.txtar @@ -0,0 +1,14 @@ +exec plugin node db reset --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db reset - Drop, create and migrate database. Useful for setting up the database in order to run tests or resetting the dev database. WARNING: This will ERASE ALL DATA for the specified database, referred to by CL_DATABASE_URL env variable or by the Database.URL field in a secrets TOML config. + +USAGE: + plugin node db reset [command options] [arguments...] + +OPTIONS: + --dangerWillRobinson set to true to enable dropping non-test databases + diff --git a/testdata/scripts/node/db/rollback/help.txtar b/testdata/scripts/node/db/rollback/help.txtar new file mode 100644 index 00000000..7e2575e1 --- /dev/null +++ b/testdata/scripts/node/db/rollback/help.txtar @@ -0,0 +1,10 @@ +exec plugin node db rollback --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db rollback - Roll back the database to a previous . Rolls back a single migration if no version specified. + +USAGE: + plugin node db rollback [arguments...] diff --git a/testdata/scripts/node/db/status/help.txtar b/testdata/scripts/node/db/status/help.txtar new file mode 100644 index 00000000..f9407931 --- /dev/null +++ b/testdata/scripts/node/db/status/help.txtar @@ -0,0 +1,10 @@ +exec plugin node db status --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db status - Display the current database migration status. + +USAGE: + plugin node db status [arguments...] diff --git a/testdata/scripts/node/db/version/help.txtar b/testdata/scripts/node/db/version/help.txtar new file mode 100644 index 00000000..a5e6d681 --- /dev/null +++ b/testdata/scripts/node/db/version/help.txtar @@ -0,0 +1,10 @@ +exec plugin node db version --help +cmp stdout out.txt +! stderr . + +-- out.txt -- +NAME: + plugin node db version - Display the current database version. + +USAGE: + plugin node db version [arguments...] diff --git a/testdata/scripts/node/help.txtar b/testdata/scripts/node/help.txtar new file mode 100644 index 00000000..b696ac1b --- /dev/null +++ b/testdata/scripts/node/help.txtar @@ -0,0 +1,21 @@ +exec plugin node --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node - Commands can only be run from on the same machine as the Plugin node. + +USAGE: + plugin node command [command options] [arguments...] + +COMMANDS: + start, node, n Run the Plugin node + rebroadcast-transactions Manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue + validate Validate the TOML configuration and secrets that are passed as flags to the `node` command. Prints the full effective configuration, with defaults included + db Commands for managing the database. + +OPTIONS: + --config value, -c value TOML configuration file(s) via flag, or raw TOML via env var. If used, legacy env vars must not be set. Multiple files can be used (-c configA.toml -c configB.toml), and they are applied in order with duplicated fields overriding any earlier values. If the 'CL_CONFIG' env var is specified, it is always processed last with the effect of being the final override. [$CL_CONFIG] + --secrets value, -s value TOML configuration file for secrets. Must be set if and only if config is set. Multiple files can be used (-s secretsA.toml -s secretsB.toml), and fields from the files will be merged. No overrides are allowed. + --help, -h show help + diff --git a/testdata/scripts/node/profile/help.txtar b/testdata/scripts/node/profile/help.txtar new file mode 100644 index 00000000..a35469ae --- /dev/null +++ b/testdata/scripts/node/profile/help.txtar @@ -0,0 +1,14 @@ +exec plugin node profile --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node profile - Collects profile metrics from the node. + +USAGE: + plugin node profile [command options] [arguments...] + +OPTIONS: + --seconds value, -s value duration of profile capture (default: 8) + --output_dir value, -o value output directory of the captured profile (default: "/tmp/") + diff --git a/testdata/scripts/node/rebroadcast-transactions/help.txtar b/testdata/scripts/node/rebroadcast-transactions/help.txtar new file mode 100644 index 00000000..ac1f3e18 --- /dev/null +++ b/testdata/scripts/node/rebroadcast-transactions/help.txtar @@ -0,0 +1,19 @@ +exec plugin node rebroadcast-transactions --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node rebroadcast-transactions - Manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue + +USAGE: + plugin node rebroadcast-transactions [command options] [arguments...] + +OPTIONS: + --beginningNonce value, --beginning-nonce value, -b value beginning of nonce range to rebroadcast (default: 0) + --endingNonce value, --ending-nonce value, -e value end of nonce range to rebroadcast (inclusive) (default: 0) + --gasPriceWei value, --gas-price-wei value, -g value gas price (in Wei) to rebroadcast transactions at (default: 0) + --password value, -p value text file holding the password for the node's account + --address value, -a value The address (in hex format) for the key which we want to rebroadcast transactions + --evmChainID value, --evm-chain-id value Chain ID for which to rebroadcast transactions. If left blank, EVM.ChainID will be used. + --gasLimit value, --gas-limit value OPTIONAL: gas limit to use for each transaction (default: 0) + diff --git a/testdata/scripts/node/start/help.txtar b/testdata/scripts/node/start/help.txtar new file mode 100644 index 00000000..f75de9c2 --- /dev/null +++ b/testdata/scripts/node/start/help.txtar @@ -0,0 +1,16 @@ +exec plugin node start --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node start - Run the Plugin node + +USAGE: + plugin node start [command options] [arguments...] + +OPTIONS: + --api value, -a value text file holding the API email and password, each on a line + --debug, -d set logger level to debug + --password value, -p value text file holding the password for the node's account + --vrfpassword value, --vp value text file holding the password for the vrf keys; enables Plugin VRF oracle + diff --git a/testdata/scripts/node/status/help.txtar b/testdata/scripts/node/status/help.txtar new file mode 100644 index 00000000..2f801834 --- /dev/null +++ b/testdata/scripts/node/status/help.txtar @@ -0,0 +1,9 @@ +exec plugin node status --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node status - Displays the health of various services running inside the node. + +USAGE: + plugin node status [arguments...] diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar new file mode 100644 index 00000000..6400b8a0 --- /dev/null +++ b/testdata/scripts/node/validate/default.txtar @@ -0,0 +1,247 @@ +! exec plugin node validate +stderr 'invalid configuration' +cmp stdout out.txt + +-- out.txt -- +# Secrets: +[Database] +AllowSimplePasswords = false + +# Input Configuration: + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'info' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +Invalid configuration: invalid secrets: 2 errors: + - Database.URL: empty: must be provided and non-empty + - Password.Keystore: empty: must be provided and non-empty + diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar new file mode 100644 index 00000000..3d7307e4 --- /dev/null +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -0,0 +1,371 @@ +exec plugin node -c config.toml -s secrets.toml validate +cmp stdout out.txt +! exists $WORK/logs + +exec plugin -c config.toml -s secrets.toml node validate +cmp stdout out.txt +! exists $WORK/logs + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[Log.File] +Dir = './logs' +MaxSize = '0b' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable' + +[Password] +Keystore = 'keystore_pass' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Log] +Level = 'debug' + +[Log.File] +Dir = './logs' +MaxSize = '0b' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'debug' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = './logs' +MaxSize = '0b' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +Valid configuration. diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar new file mode 100644 index 00000000..f3b7824e --- /dev/null +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -0,0 +1,371 @@ +exec plugin node -c config.toml -s secrets.toml validate +cmp stdout out.txt +exists $HOME/.plugin + +exec plugin -c config.toml -s secrets.toml node validate +cmp stdout out.txt +exists $HOME/.plugin + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[Log.File] +MaxSize = '1.00mb' +Dir = '' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable' + +[Password] +Keystore = 'keystore_pass' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Log] +Level = 'debug' + +[Log.File] +Dir = '' +MaxSize = '1.00mb' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'debug' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '1.00mb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +Valid configuration. diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar new file mode 100644 index 00000000..35f01979 --- /dev/null +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -0,0 +1,371 @@ +exec plugin node -c config.toml -s secrets.toml validate +cmp stdout out.txt +exists $WORK/logs + +exec plugin -c config.toml -s secrets.toml node validate +cmp stdout out.txt +exists $WORK/logs + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[Log.File] +MaxSize = '1.00mb' +Dir = './logs' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable' + +[Password] +Keystore = 'keystore_pass' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Log] +Level = 'debug' + +[Log.File] +Dir = './logs' +MaxSize = '1.00mb' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'debug' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = './logs' +MaxSize = '1.00mb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +Valid configuration. diff --git a/testdata/scripts/node/validate/help.txtar b/testdata/scripts/node/validate/help.txtar new file mode 100644 index 00000000..1514a1a1 --- /dev/null +++ b/testdata/scripts/node/validate/help.txtar @@ -0,0 +1,9 @@ +exec plugin node validate --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin node validate - Validate the TOML configuration and secrets that are passed as flags to the `node` command. Prints the full effective configuration, with defaults included + +USAGE: + plugin node validate [arguments...] diff --git a/testdata/scripts/node/validate/invalid-duplicates.txtar b/testdata/scripts/node/validate/invalid-duplicates.txtar new file mode 100644 index 00000000..208b943b --- /dev/null +++ b/testdata/scripts/node/validate/invalid-duplicates.txtar @@ -0,0 +1,94 @@ +! exec plugin node -c config.toml -s secrets.toml validate +cmp stderr err.txt +cmp stdout out.txt + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +[[Cosmos]] +ChainID = 'Malaga-420' + +[[Cosmos]] +ChainID = 'Malaga-420' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[Cosmos.Nodes]] +Name = 'primary' +TendermintURL = 'http://tender.mint' + +[[Solana]] +ChainID = 'mainnet' + +[[Solana]] +ChainID = 'mainnet' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Solana.Nodes]] +Name = 'primary' +URL = 'http://solana.web' + +[[Starknet]] +ChainID = 'foobar' + +[[Starknet]] +ChainID = 'foobar' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' + +[[Starknet.Nodes]] +Name = 'primary' +URL = 'http://stark.node' + + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass@localhost:5432/dbname?sslmode=disable' +BackupURL = '' + +[Password] +Keystore = '' + +-- out.txt -- +-- err.txt -- +Error running app: invalid configuration: 4 errors: + - EVM: 4 errors: + - 1.ChainID: invalid value (1): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (fake): duplicate - must be unique + - 1.Nodes.1.WSURL: invalid value (wss://foo.bar/ws): duplicate - must be unique + - 1.Nodes.1.HTTPURL: invalid value (https://foo.bar): duplicate - must be unique + - Cosmos: 3 errors: + - 1.ChainID: invalid value (Malaga-420): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (primary): duplicate - must be unique + - 1.Nodes.1.TendermintURL: invalid value (http://tender.mint): duplicate - must be unique + - Solana: 3 errors: + - 1.ChainID: invalid value (mainnet): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (primary): duplicate - must be unique + - 1.Nodes.1.URL: invalid value (http://solana.web): duplicate - must be unique + - Starknet: 3 errors: + - 1.ChainID: invalid value (foobar): duplicate - must be unique + - 1.Nodes.1.Name: invalid value (primary): duplicate - must be unique + - 1.Nodes.1.URL: invalid value (http://stark.node): duplicate - must be unique diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar new file mode 100644 index 00000000..ee481090 --- /dev/null +++ b/testdata/scripts/node/validate/invalid.txtar @@ -0,0 +1,385 @@ +! exec plugin node -c config.toml -s secrets.toml validate +cmp stderr err.txt +cmp stdout out.txt + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass@localhost:5432/dbname?sslmode=disable' +BackupURL = '' + +[Password] +Keystore = '' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +BackupURL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Log] +Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'debug' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +Invalid configuration: invalid secrets: 2 errors: + - Database: 2 errors: + - URL: invalid value (*****): missing or insufficiently complex password: + Expected password complexity: + Must be at least 16 characters long + Must not comprise: + Leading or trailing whitespace + A user's API email + + Faults: + password is less than 16 characters long + . Database should be secured by a password matching the following complexity requirements: + Must have a length of 16-50 characters + Must not comprise: + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) + + - BackupURL: invalid value (*****): missing or insufficiently complex password: DB URL must be authenticated; plaintext URLs are not allowed. Database should be secured by a password matching the following complexity requirements: + Must have a length of 16-50 characters + Must not comprise: + Leading or trailing whitespace (note that a trailing newline in the password file, if present, will be ignored) + + - Password.Keystore: empty: must be provided and non-empty + +-- err.txt -- +invalid configuration diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar new file mode 100644 index 00000000..f8061949 --- /dev/null +++ b/testdata/scripts/node/validate/valid.txtar @@ -0,0 +1,368 @@ +exec plugin node -c config.toml -s secrets.toml validate +cmp stdout out.txt +exec plugin node -config config.toml -secrets secrets.toml validate +cmp stdout out.txt + +# Deprecated flags +exec plugin -c config.toml -s secrets.toml node validate +cmp stdout out.txt + +# Mixed disallowed +! exec plugin -c config.toml node -s secrets.toml validate +stderr 'Error running app: multiple commands with --config or --secrets flags. only one command may specify these flags. when secrets are used, they must be specific together in the same command' + +-- config.toml -- +Log.Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable' + +[Password] +Keystore = 'keystore_pass' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Log] +Level = 'debug' + +[[EVM]] +ChainID = '1' + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'debug' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = false +CollectorTarget = '' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'tls' +TLSCertPath = '' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +[[EVM]] +ChainID = '1' +AutoCreateKey = true +BlockBackfillDepth = 10 +BlockBackfillSkip = false +FinalityDepth = 50 +FinalityTagEnabled = false +LinkContractAddress = '0x514910771AF9Ca656af840dff83E8264EcF986CA' +LogBackfillBatchSize = 1000 +LogPollInterval = '15s' +LogKeepBlocksDepth = 100000 +MinIncomingConfirmations = 3 +MinContractPayment = '0.1 pli' +NonceAutoSync = true +NoNewHeadsThreshold = '3m0s' +OperatorFactoryAddress = '0x3E64Cd889482443324F91bFA9c84fE72A511f48A' +RPCDefaultBatchSize = 250 +RPCBlockQueryDelay = 1 + +[EVM.Transactions] +ForwardersEnabled = false +MaxInFlight = 16 +MaxQueued = 250 +ReaperInterval = '1h0m0s' +ReaperThreshold = '168h0m0s' +ResendAfterThreshold = '1m0s' + +[EVM.BalanceMonitor] +Enabled = true + +[EVM.GasEstimator] +Mode = 'BlockHistory' +PriceDefault = '20 gwei' +PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether' +PriceMin = '1 gwei' +LimitDefault = 500000 +LimitMax = 500000 +LimitMultiplier = '1' +LimitTransfer = 21000 +BumpMin = '5 gwei' +BumpPercent = 20 +BumpThreshold = 3 +EIP1559DynamicFees = true +FeeCapDefault = '100 gwei' +TipCapDefault = '1 wei' +TipCapMin = '1 wei' + +[EVM.GasEstimator.BlockHistory] +BatchSize = 25 +BlockHistorySize = 4 +CheckInclusionBlocks = 12 +CheckInclusionPercentile = 90 +TransactionPercentile = 50 + +[EVM.HeadTracker] +HistoryDepth = 100 +MaxBufferSize = 3 +SamplingInterval = '1s' + +[EVM.NodePool] +PollFailureThreshold = 5 +PollInterval = '10s' +SelectionMode = 'HighestHead' +SyncThreshold = 5 +LeaseDuration = '0s' + +[EVM.OCR] +ContractConfirmations = 4 +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +DeltaCOverride = '168h0m0s' +DeltaCJitterOverride = '1h0m0s' +ObservationGracePeriod = '1s' + +[EVM.OCR2] +[EVM.OCR2.Automation] +GasLimit = 5400000 + +[[EVM.Nodes]] +Name = 'fake' +WSURL = 'wss://foo.bar/ws' +HTTPURL = 'https://foo.bar' + +Valid configuration. diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar new file mode 100644 index 00000000..c60b084f --- /dev/null +++ b/testdata/scripts/node/validate/warnings.txtar @@ -0,0 +1,269 @@ +exec plugin node -c config.toml -s secrets.toml validate +cmp stdout out.txt + +-- config.toml -- + +[Tracing] +Enabled = true +CollectorTarget = 'otel-collector:4317' +TLSCertPath = 'something' +Mode = 'unencrypted' + +-- secrets.toml -- +[Database] +URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable' + +[Password] +Keystore = 'keystore_pass' + +-- out.txt -- +# Secrets: +[Database] +URL = 'xxxxx' +AllowSimplePasswords = false + +[Password] +Keystore = 'xxxxx' + +# Input Configuration: +[Tracing] +Enabled = true +CollectorTarget = 'otel-collector:4317' +Mode = 'unencrypted' +TLSCertPath = 'something' + +# Effective Configuration, with defaults applied: +InsecureFastScrypt = false +RootDir = '~/.plugin' +ShutdownGracePeriod = '5s' + +[Feature] +FeedsManager = true +LogPoller = false +UICSAKeys = false + +[Database] +DefaultIdleInTxSessionTimeout = '1h0m0s' +DefaultLockTimeout = '15s' +DefaultQueryTimeout = '10s' +LogQueries = false +MaxIdleConns = 10 +MaxOpenConns = 20 +MigrateOnStartup = true + +[Database.Backup] +Dir = '' +Frequency = '1h0m0s' +Mode = 'none' +OnVersionUpgrade = true + +[Database.Listener] +MaxReconnectDuration = '10m0s' +MinReconnectInterval = '1m0s' +FallbackPollInterval = '30s' + +[Database.Lock] +Enabled = true +LeaseDuration = '10s' +LeaseRefreshInterval = '1s' + +[TelemetryIngress] +UniConn = true +Logging = false +BufferSize = 100 +MaxBatchSize = 50 +SendInterval = '500ms' +SendTimeout = '10s' +UseBatchSend = true + +[AuditLogger] +Enabled = false +ForwardToUrl = '' +JsonWrapperKey = '' +Headers = [] + +[Log] +Level = 'info' +JSONConsole = false +UnixTS = false + +[Log.File] +Dir = '' +MaxSize = '5.12gb' +MaxAgeDays = 0 +MaxBackups = 1 + +[WebServer] +AuthenticationMethod = 'local' +AllowOrigins = 'http://localhost:3000,http://localhost:6688' +BridgeResponseURL = '' +BridgeCacheTTL = '0s' +HTTPWriteTimeout = '10s' +HTTPPort = 6688 +SecureCookies = true +SessionTimeout = '15m0s' +SessionReaperExpiration = '240h0m0s' +HTTPMaxSize = '32.77kb' +StartTimeout = '15s' +ListenIP = '0.0.0.0' + +[WebServer.LDAP] +ServerTLS = true +SessionTimeout = '15m0s' +QueryTimeout = '2m0s' +BaseUserAttr = 'uid' +BaseDN = '' +UsersDN = 'ou=users' +GroupsDN = 'ou=groups' +ActiveAttribute = '' +ActiveAttributeAllowedValue = '' +AdminUserGroupCN = 'NodeAdmins' +EditUserGroupCN = 'NodeEditors' +RunUserGroupCN = 'NodeRunners' +ReadUserGroupCN = 'NodeReadOnly' +UserApiTokenEnabled = false +UserAPITokenDuration = '240h0m0s' +UpstreamSyncInterval = '0s' +UpstreamSyncRateLimit = '2m0s' + +[WebServer.MFA] +RPID = '' +RPOrigin = '' + +[WebServer.RateLimit] +Authenticated = 1000 +AuthenticatedPeriod = '1m0s' +Unauthenticated = 5 +UnauthenticatedPeriod = '20s' + +[WebServer.TLS] +CertPath = '' +ForceRedirect = false +Host = '' +HTTPSPort = 6689 +KeyPath = '' +ListenIP = '0.0.0.0' + +[JobPipeline] +ExternalInitiatorsEnabled = false +MaxRunDuration = '10m0s' +MaxSuccessfulRuns = 10000 +ReaperInterval = '1h0m0s' +ReaperThreshold = '24h0m0s' +ResultWriteQueueDepth = 100 + +[JobPipeline.HTTPRequest] +DefaultTimeout = '15s' +MaxSize = '32.77kb' + +[FluxMonitor] +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false + +[OCR2] +Enabled = false +ContractConfirmations = 3 +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +ContractTransmitterTransmitTimeout = '10s' +DatabaseTimeout = '10s' +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +CaptureEATelemetry = false +CaptureAutomationCustomTelemetry = true +DefaultTransactionQueueDepth = 1 +SimulateTransactions = false +TraceLogging = false + +[OCR] +Enabled = false +ObservationTimeout = '5s' +BlockchainTimeout = '20s' +ContractPollInterval = '1m0s' +ContractSubscribeInterval = '2m0s' +DefaultTransactionQueueDepth = 1 +KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000' +SimulateTransactions = false +TransmitterAddress = '' +CaptureEATelemetry = false +TraceLogging = false + +[P2P] +IncomingMessageBufferSize = 10 +OutgoingMessageBufferSize = 10 +PeerID = '' +TraceLogging = false + +[P2P.V2] +Enabled = true +AnnounceAddresses = [] +DefaultBootstrappers = [] +DeltaDial = '15s' +DeltaReconcile = '1m0s' +ListenAddresses = [] + +[Keeper] +DefaultTransactionQueueDepth = 1 +GasPriceBufferPercent = 20 +GasTipCapBufferPercent = 20 +BaseFeeBufferPercent = 20 +MaxGracePeriod = 100 +TurnLookBack = 1000 + +[Keeper.Registry] +CheckGasOverhead = 200000 +PerformGasOverhead = 300000 +MaxPerformDataSize = 5000 +SyncInterval = '30m0s' +SyncUpkeepQueueSize = 10 + +[AutoPprof] +Enabled = false +ProfileRoot = '' +PollInterval = '10s' +GatherDuration = '10s' +GatherTraceDuration = '5s' +MaxProfileSize = '100.00mb' +CPUProfileRate = 1 +MemProfileRate = 1 +BlockProfileRate = 1 +MutexProfileFraction = 1 +MemThreshold = '4.00gb' +GoroutineThreshold = 5000 + +[Pyroscope] +ServerAddress = '' +Environment = 'mainnet' + +[Sentry] +Debug = false +DSN = '' +Environment = '' +Release = '' + +[Insecure] +DevWebServer = false +OCRDevelopmentMode = false +InfiniteDepthQueries = false +DisableRateLimiting = false + +[Tracing] +Enabled = true +CollectorTarget = 'otel-collector:4317' +NodeID = '' +SamplingRatio = 0.0 +Mode = 'unencrypted' +TLSCertPath = 'something' + +[Mercury] +[Mercury.Cache] +LatestReportTTL = '1s' +MaxStaleAge = '1h0m0s' +LatestReportDeadline = '5s' + +[Mercury.TLS] +CertFile = '' + +# Configuration warning: +Tracing.TLSCertPath: invalid value (something): must be empty when Tracing.Mode is 'unencrypted' +Valid configuration. diff --git a/testdata/scripts/nodes/cosmos/help.txtar b/testdata/scripts/nodes/cosmos/help.txtar new file mode 100644 index 00000000..9e6c08b9 --- /dev/null +++ b/testdata/scripts/nodes/cosmos/help.txtar @@ -0,0 +1,16 @@ +exec plugin nodes cosmos --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes cosmos - Commands for handling Cosmos node configuration + +USAGE: + plugin nodes cosmos command [command options] [arguments...] + +COMMANDS: + list List all existing Cosmos nodes + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/cosmos/list/help.txtar b/testdata/scripts/nodes/cosmos/list/help.txtar new file mode 100644 index 00000000..ecd9d67c --- /dev/null +++ b/testdata/scripts/nodes/cosmos/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin nodes cosmos list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes cosmos list - List all existing Cosmos nodes + +USAGE: + plugin nodes cosmos list [arguments...] diff --git a/testdata/scripts/nodes/evm/help.txtar b/testdata/scripts/nodes/evm/help.txtar new file mode 100644 index 00000000..d21f460d --- /dev/null +++ b/testdata/scripts/nodes/evm/help.txtar @@ -0,0 +1,16 @@ +exec plugin nodes evm --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes evm - Commands for handling EVM node configuration + +USAGE: + plugin nodes evm command [command options] [arguments...] + +COMMANDS: + list List all existing EVM nodes + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/evm/list/help.txtar b/testdata/scripts/nodes/evm/list/help.txtar new file mode 100644 index 00000000..5936a4a8 --- /dev/null +++ b/testdata/scripts/nodes/evm/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin nodes evm list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes evm list - List all existing EVM nodes + +USAGE: + plugin nodes evm list [arguments...] diff --git a/testdata/scripts/nodes/help.txtar b/testdata/scripts/nodes/help.txtar new file mode 100644 index 00000000..c4d0d32f --- /dev/null +++ b/testdata/scripts/nodes/help.txtar @@ -0,0 +1,19 @@ +exec plugin nodes --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes - Commands for handling node configuration + +USAGE: + plugin nodes command [command options] [arguments...] + +COMMANDS: + evm Commands for handling EVM node configuration + cosmos Commands for handling Cosmos node configuration + solana Commands for handling Solana node configuration + starknet Commands for handling StarkNet node configuration + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/solana/help.txtar b/testdata/scripts/nodes/solana/help.txtar new file mode 100644 index 00000000..ebf02efa --- /dev/null +++ b/testdata/scripts/nodes/solana/help.txtar @@ -0,0 +1,16 @@ +exec plugin nodes solana --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes solana - Commands for handling Solana node configuration + +USAGE: + plugin nodes solana command [command options] [arguments...] + +COMMANDS: + list List all existing Solana nodes + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/solana/list/help.txtar b/testdata/scripts/nodes/solana/list/help.txtar new file mode 100644 index 00000000..04189e42 --- /dev/null +++ b/testdata/scripts/nodes/solana/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin nodes solana list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes solana list - List all existing Solana nodes + +USAGE: + plugin nodes solana list [arguments...] diff --git a/testdata/scripts/nodes/starknet/help.txtar b/testdata/scripts/nodes/starknet/help.txtar new file mode 100644 index 00000000..cdab1027 --- /dev/null +++ b/testdata/scripts/nodes/starknet/help.txtar @@ -0,0 +1,16 @@ +exec plugin nodes starknet --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes starknet - Commands for handling StarkNet node configuration + +USAGE: + plugin nodes starknet command [command options] [arguments...] + +COMMANDS: + list List all existing StarkNet nodes + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/nodes/starknet/list/help.txtar b/testdata/scripts/nodes/starknet/list/help.txtar new file mode 100644 index 00000000..a78599b3 --- /dev/null +++ b/testdata/scripts/nodes/starknet/list/help.txtar @@ -0,0 +1,9 @@ +exec plugin nodes starknet list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin nodes starknet list - List all existing StarkNet nodes + +USAGE: + plugin nodes starknet list [arguments...] diff --git a/testdata/scripts/txs/cosmos/create/help.txtar b/testdata/scripts/txs/cosmos/create/help.txtar new file mode 100644 index 00000000..9dbf5e95 --- /dev/null +++ b/testdata/scripts/txs/cosmos/create/help.txtar @@ -0,0 +1,14 @@ +exec plugin txs cosmos create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs cosmos create - Send of from node Cosmos account to destination . + +USAGE: + plugin txs cosmos create [command options] [arguments...] + +OPTIONS: + --force allows to send a higher amount than the account's balance + --id value chain ID + diff --git a/testdata/scripts/txs/cosmos/help.txtar b/testdata/scripts/txs/cosmos/help.txtar new file mode 100644 index 00000000..bdf866ae --- /dev/null +++ b/testdata/scripts/txs/cosmos/help.txtar @@ -0,0 +1,16 @@ +exec plugin txs cosmos --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs cosmos - Commands for handling Cosmos transactions + +USAGE: + plugin txs cosmos command [command options] [arguments...] + +COMMANDS: + create Send of from node Cosmos account to destination . + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/txs/evm/create/help.txtar b/testdata/scripts/txs/evm/create/help.txtar new file mode 100644 index 00000000..733e6baf --- /dev/null +++ b/testdata/scripts/txs/evm/create/help.txtar @@ -0,0 +1,16 @@ +exec plugin txs evm create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs evm create - Send ETH (or wei) from node ETH account to destination . + +USAGE: + plugin txs evm create [command options] [arguments...] + +OPTIONS: + --force allows to send a higher amount than the account's balance + --eth allows to send ETH amounts (Default behavior) + --wei allows to send WEI amounts + --id value chain ID (default: 0) + diff --git a/testdata/scripts/txs/evm/help.txtar b/testdata/scripts/txs/evm/help.txtar new file mode 100644 index 00000000..e2b0c2fc --- /dev/null +++ b/testdata/scripts/txs/evm/help.txtar @@ -0,0 +1,18 @@ +exec plugin txs evm --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs evm - Commands for handling EVM transactions + +USAGE: + plugin txs evm command [command options] [arguments...] + +COMMANDS: + create Send ETH (or wei) from node ETH account to destination . + list List the Ethereum Transactions in descending order + show get information on a specific Ethereum Transaction + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/txs/evm/list/help.txtar b/testdata/scripts/txs/evm/list/help.txtar new file mode 100644 index 00000000..8db2c39f --- /dev/null +++ b/testdata/scripts/txs/evm/list/help.txtar @@ -0,0 +1,13 @@ +exec plugin txs evm list --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs evm list - List the Ethereum Transactions in descending order + +USAGE: + plugin txs evm list [command options] [arguments...] + +OPTIONS: + --page value page of results to display (default: 0) + diff --git a/testdata/scripts/txs/evm/show/help.txtar b/testdata/scripts/txs/evm/show/help.txtar new file mode 100644 index 00000000..6f02f38c --- /dev/null +++ b/testdata/scripts/txs/evm/show/help.txtar @@ -0,0 +1,9 @@ +exec plugin txs evm show --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs evm show - get information on a specific Ethereum Transaction + +USAGE: + plugin txs evm show [arguments...] diff --git a/testdata/scripts/txs/help.txtar b/testdata/scripts/txs/help.txtar new file mode 100644 index 00000000..13dc3f80 --- /dev/null +++ b/testdata/scripts/txs/help.txtar @@ -0,0 +1,18 @@ +exec plugin txs --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs - Commands for handling transactions + +USAGE: + plugin txs command [command options] [arguments...] + +COMMANDS: + evm Commands for handling EVM transactions + cosmos Commands for handling Cosmos transactions + solana Commands for handling Solana transactions + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/txs/solana/create/help.txtar b/testdata/scripts/txs/solana/create/help.txtar new file mode 100644 index 00000000..b855ae65 --- /dev/null +++ b/testdata/scripts/txs/solana/create/help.txtar @@ -0,0 +1,14 @@ +exec plugin txs solana create --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs solana create - Send lamports from node Solana account to destination . + +USAGE: + plugin txs solana create [command options] [arguments...] + +OPTIONS: + --force allows to send a higher amount than the account's balance + --id value chain ID, options: [mainnet, testnet, devnet, localnet] + diff --git a/testdata/scripts/txs/solana/help.txtar b/testdata/scripts/txs/solana/help.txtar new file mode 100644 index 00000000..3bc18dfd --- /dev/null +++ b/testdata/scripts/txs/solana/help.txtar @@ -0,0 +1,16 @@ +exec plugin txs solana --help +cmp stdout out.txt + +-- out.txt -- +NAME: + plugin txs solana - Commands for handling Solana transactions + +USAGE: + plugin txs solana command [command options] [arguments...] + +COMMANDS: + create Send lamports from node Solana account to destination . + +OPTIONS: + --help, -h show help + diff --git a/testdata/scripts/version.txtar b/testdata/scripts/version.txtar new file mode 100644 index 00000000..75adc460 --- /dev/null +++ b/testdata/scripts/version.txtar @@ -0,0 +1,2 @@ +exec plugin --version +stdout 'plugin version .*@.*' diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 00000000..783dea43 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,5 @@ +# Plugin Tools + +## [Docker](./docker) + +Manage Docker for development and testing diff --git a/tools/benchmark/job_spec_delete_v2.sh b/tools/benchmark/job_spec_delete_v2.sh new file mode 100644 index 00000000..51d180f6 --- /dev/null +++ b/tools/benchmark/job_spec_delete_v2.sh @@ -0,0 +1,146 @@ +#!/usr/local/bin/bash + +CONTRACT_ADDRESSES=( + 0x013832098A55c434E33db02D973dC21F3Eb0bfEE + 0x01CA19e7De4C783311B5c9264F9a43B17d765845 + 0x020299e78B6785Fad94D8136c90D9C977E21942a + 0x027fd7173074F64832C6db440254F54c9825b064 + 0x03d50127593EC4A04ABf9B994166EAfaFbb9B43a + 0x04C032FB6bF0876D87dea0050B0EcaD2Dcadb0b3 + 0x0DF459BeA2975cA9E082E80638C3E2C5c76931aF + 0x12a3516567CEa7eC6DEFFf767Afe3bc82086c1F2 + 0x1a4abb159Cb515f4B8f047Ba94b29Bf2BcFE584D + 0x1c2522206437fCaD46a91D880ba1aB3c78F03fF4 + 0x209b5fAE458D6F25d3a012Ba6Bbb12C5BC4a9CAf + 0x27FF18679198c6De5ef4aa65eC6CF03dB450e7df + 0x2Ad54266A811865258144C5EbB99e3aE82Fc2F8a + 0x2C29Cf1568f583Ff75962f930Fe978d5041769Ac + 0x2b1066B9306f21cAe47dC7dc6693B5d8a5C42DDb + 0x2e2c3FB6Bda479CFFc9a6A45aEae49b35005f14D + 0x33149D6F6441AC87c8770925fBe2d4E175549AD3 + 0x365b4486B162ef6Ca6288C46fA76aCD817c410E6 + 0x3A7239Ef9371d39E2831ABa11cE68BBAB5906B65 + 0x3BD79F1eEC125D5Abeed0e101b5D465eA7F34d73 + 0x3C5219346B3f490e0749D95E94552399103f8543 + 0x4df480375a5100A312087bB48B875C77975664F2 + 0x5FCB63404E66F3a0e6f6e5488C83Bb08B1286290 + 0x5e39b0a03F9b6E0Ba63185b9D0a29f4936dBdbe6 + 0x60A34BB5a68B3D4288EB7C7e86051a472Aa5a63F + 0x6227f595A569601d3B950B6D0730acdAeaA8670e + 0x632f487362847a26F186fB3D77D1Fcf329BCDa3b + 0x6703d15E628EB3E0eb454A8f35b3e873c6a2Ea81 + 0x68000f119ccF91C1Eb1F59F8364C7fDddeBb8eE0 + 0x6D0b2d2E0384F6Bfa89c96469a9E99c82EC0c686 + 0x6b7906169356d265e98777e1F8B85F445481B63a + 0x7081EfD8CFb94e47E21106023cf18Ac7b8c54387 + 0x7419363111C62a627d3772b1018E095E76B63dC7 + 0x7569d7f1823D5d0A3A39bB1a9925d5A9DC54D518 + 0x75E9C3E84BaE42273EfD7565840e9f2020d5E683 + 0x760322004d26AED43cadEbb499a1Fe4f7Cb4A86D + 0x799d8786fb6eAE43AAea6decBbDF5CD9F19D9D95 + 0x7D6773974908d645D043A97Eca1A6E63201a8CFb + 0x7eA828A03BBE18643Ca670792b5B916ae36c019a + 0x83C446940923E60a949F01B20dc6be503dafDDa7 + 0x83aF768Bbbdf806402038b782c40683036843f2C + 0x88F422e3Fb5b48a286c6cc493C6339d3700f3010 + 0x8Bda4a43dB18113C7D79f1C392E87800F72113e5 + 0x8E481B53EE070ae164C0ea4d798738bef1e40B81 + 0x8F8a372c5d0F4957764478A2a7Db4EEda902CF76 + 0x8f0B39e2c605551E4641FB35E497392F20377b52 + 0x90543B84004f5F5f376E3c1700685B1d17A8fcfA + 0x983370972Cb6F8BCf594155d87A250C38a598B9B + 0x98597A208CAd30E3dB8eca00e3fa27901E1E5b40 + 0x9E9798F9cAB7EF5004506a9205a00b31662e3C3B + 0x9aA33BF0593Df367A2f15c0465Af8929d3B4Fb73 + 0xA351F5494F440D6d8c6329B8c5aDE1a501A3e816 + 0xA35E7BF72707f25e8DDEf546aF3a01bBC0Ae1774 + 0xA8EB42967B5632aFCBA67C5f832C57F82866103F + 0xB15Eac1d7397A737345ae88df1Caa74303a0183A + 0xB1E137278bb030a68Fd3B5009aE22Ef76Fe80D7D + 0xBA8be48B4Aa89D88EfA7145100a23b52e190dEB3 + 0xBDFa2A7f2C279886B9E71EdF93E29E7dAa7E0670 + 0xC22aB7076bA7f6ff0472bBd45Ee13DD5735B8Fe8 + 0xC6c99c5E6EAED676569649d63f2384A611A469a2 + 0xC8c41E499F0694F3e13130f917D08183a0C6F4fB + 0xC8c871900C29954E806d9cBd2f4e1493F66deFf4 + 0xCC301615292b97aC08eF9C0992d01684ec4DA818 + 0xCe3f414050090Ce49Ea00490E20aaAA6754269b8 + 0xD2E1B6E34F7841577b44DA1f7f9935Ad0dF589ad + 0xD418767a961BD2954a53Eb906b80AF9257265DB8 + 0xD44cAdB18bEDE0e14252D72666eF334b277B923C + 0xDA40d7CA3d5116429f9c10F8B63faf6b29bAf7b4 + 0xEF103D1DDBB686C3E4063150D6276dEdC13d0F70 + 0xEF2Ed320cf1f44B3F99867329A7a7572CD104186 + 0xEb9A196B776b0b05A2cE40E68cE07400DCb6889F + 0xEc403821297666B25c3B5b539Be52CeA75A3B884 + 0xF0945d8369eeCf42C5c3ba25E2ACAC8b39F7bFfb + 0xF20d4F32086B3EeFa27779fd1a80Eee575CcD6b8 + 0xF2E9f1C923426b88b45481522628864953FF3441 + 0xF8eeB0A0f02e3b1faDd6019d8FAFb0F0BF4a869D + 0xF91a3d8567D96a9A250d9Aa1D2c780B6356Ba8ed + 0xFbF455785c7411Dee676E11594190cE25c12B702 + 0xa27243e817b557F673250A44120F8c3a0Db27D12 + 0xa325E45Cca179a36afAb7FC7B624d8771A77754a + 0xc4BAFEC0a234BC04855103BAE657EF94D2896D80 + 0xd07aa8665d7354286e6f5374353400b8ebc58ac9 + 0xd57a5E1Ccb1401A964609d274a3e541426772fc5 + 0xd95F31939B3B51A9711F9A4744dEdaE0A3a90D03 + 0xde52a4FDfbD7776EA935edF80Afa48a65790F0A8 + 0xe8263897c5687DBDcC1596B1CCF10462Ba24a9b4 + 0xe8663e2dEC005EFc37B1278aC92E1bdD763873D6 + 0xeaBb2a446167cda4F0F96921D2d77d267420aB3C + 0xf0381afDf765FC7E714CC5a1066397c588a65838 + 0xf3757166A3A0F509E1370d3f42c2b694B386aab0 + 0xf4Ac3711A7Aa0197b3e9c02AAf7aEf89BFBEad7A +) + +function make_spec() { + job_spec=`mktemp /tmp/job_spec_XXXXXX` + if [ $? -ne 0 ]; then + echo "$0: Can't create temp file, exiting..." + exit 1 + fi + + function cleanup { + rm -f "$job_spec" + } + + # register the cleanup function to be called on the EXIT signal + # trap cleanup EXIT + + address="${CONTRACT_ADDRESSES[$1]}" + cat >> $job_spec <<-EOF +blockchainTimeout = "20s" +contractAddress = "${address}" +contractConfigConfirmations = 3 +contractConfigTrackerPollInterval = "1m" +contractConfigTrackerSubscribeInterval = "2m" +isBootstrapPeer = true +p2pv2Bootstrappers = [] +p2pPeerID = "p2p_12D3KooWMk13oppZXmGdRZgaJBFDF6Tc5521YYxKjwkscLSEPrVW" +schemaVersion = 1 +type = "offchainreporting" + +EOF + + echo $job_spec +} + +plugin admin login --file tools/clroot/apicredentials + +number=${#CONTRACT_ADDRESSES[@]} +echo "Adding jobs..." +time { + for (( i=1; i<$number; i++ )) do + job_spec=`make_spec $i` + plugin jobs create "$job_spec" + done +} + + +echo "Deleting jobs..." +time { + for (( i=1; i<$number; i++ )) do + plugin jobs delete $i + done +} diff --git a/tools/bin/build_abigen b/tools/bin/build_abigen new file mode 100644 index 00000000..ced30a7b --- /dev/null +++ b/tools/bin/build_abigen @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Checks that the correct abigen is installed in this directory, and installs it +# if not. + +set -e + +# Version of abigen to install. Must be run within plugin project +GETH_VERSION=$(go list -json -m github.com/ethereum/go-ethereum | jq -r .Version) +GETH_REPO_URL="https://github.com/ethereum/go-ethereum" + +function realpath { echo $(cd $(dirname $1); pwd)/$(basename $1); } +THIS_DIR="$(realpath "$(dirname $0)")" + +NATIVE_ABIGEN_VERSION=v"$( + "$THIS_DIR/abigen" --version 2> /dev/null | \ + grep -E -o '([0-9]+\.[0-9]+\.[0-9]+)' +)" || true + +if [ "$NATIVE_ABIGEN_VERSION" == "$GETH_VERSION" ]; then + echo "Correct abigen version already installed." + exit 0 +fi + +function cleanup() { + rm -rf "$TMPDIR" +} + +trap cleanup EXIT + +TMPDIR="$(mktemp -d)" + +pushd "$TMPDIR" + +git clone --depth=1 --single-branch --branch "$GETH_VERSION" "$GETH_REPO_URL" +cd go-ethereum/cmd/abigen +go build +rm -f "$THIS_DIR/abigen" # necessary on MacOS for code signing +cp ./abigen "$THIS_DIR" + +popd + diff --git a/tools/bin/cldev b/tools/bin/cldev new file mode 100644 index 00000000..87acd77e --- /dev/null +++ b/tools/bin/cldev @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -e +LDFLAGS="`tools/bin/ldflags`" +export CL_DATABASE_URL="${CL_DATABASE_URL:=postgresql://localhost:5432/plugin_dev?sslmode=disable}" +export CL_CONFIG=${CL_CONFIG:"WebServer.TLS.HTTPSPort = 0"} + +case "$1" in + node | core | n) + key='0x9CA9d2D5E04012C9Ed24C0e513C9bfAa4A2dD77f' + echo "** Running node" + go run -ldflags "$LDFLAGS" . -- node start -d -p tools/secrets/password.txt -a tools/secrets/apicredentials + ;; + *) + go run . -- "$@" + ;; +esac diff --git a/tools/bin/clean_test_dbs b/tools/bin/clean_test_dbs new file mode 100644 index 00000000..7f6d996f --- /dev/null +++ b/tools/bin/clean_test_dbs @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +psql -l --csv | awk -F, '{print$1}' | grep plugin_test_ | xargs -n 1 -J % dropdb % diff --git a/tools/bin/codecov b/tools/bin/codecov new file mode 100644 index 00000000..ad63a569 --- /dev/null +++ b/tools/bin/codecov @@ -0,0 +1,1888 @@ +#!/usr/bin/env bash + +# Apache License Version 2.0, January 2004 +# https://github.com/codecov/codecov-bash/blob/master/LICENSE + +set -e +o pipefail + +VERSION="1.0.6" + +codecov_flags=( ) +url="https://codecov.io" +env="$CODECOV_ENV" +service="" +token="" +search_in="" +# shellcheck disable=SC2153 +flags="$CODECOV_FLAGS" +exit_with=0 +curlargs="" +curlawsargs="" +dump="0" +clean="0" +curl_s="-s" +name="$CODECOV_NAME" +include_cov="" +exclude_cov="" +ddp="$HOME/Library/Developer/Xcode/DerivedData" +xp="" +files="" +save_to="" +direct_file_upload="" +cacert="$CODECOV_CA_BUNDLE" +gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'" +gcov_include="" + +ft_gcov="1" +ft_coveragepy="1" +ft_fix="1" +ft_search="1" +ft_s3="1" +ft_network="1" +ft_xcodellvm="1" +ft_xcodeplist="0" +ft_gcovout="1" +ft_html="0" +ft_yaml="0" + +_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo "$PWD") +git_root="$_git_root" +remote_addr="" +if [ "$git_root" = "$PWD" ]; +then + git_root="." +fi + +branch_o="" +build_o="" +commit_o="" +pr_o="" +prefix_o="" +network_filter_o="" +search_in_o="" +slug_o="" +tag_o="" +url_o="" +git_ls_files_recurse_submodules_o="" +package="bash" + +commit="$VCS_COMMIT_ID" +branch="$VCS_BRANCH_NAME" +pr="$VCS_PULL_REQUEST" +slug="$VCS_SLUG" +tag="$VCS_TAG" +build_url="$CI_BUILD_URL" +build="$CI_BUILD_ID" +job="$CI_JOB_ID" + +beta_xcode_partials="" + +proj_root="$git_root" +gcov_exe="gcov" +gcov_arg="" + +b="\033[0;36m" +g="\033[0;32m" +r="\033[0;31m" +e="\033[0;90m" +y="\033[0;33m" +x="\033[0m" + +show_help() { +cat << EOF + + Codecov Bash $VERSION + + Global report uploading tool for Codecov + Documentation at https://docs.codecov.io/docs + Contribute at https://github.com/codecov/codecov-bash + + + -h Display this help and exit + -f FILE Target file(s) to upload + + -f "path/to/file" only upload this file + skips searching unless provided patterns below + + -f '!*.bar' ignore all files at pattern *.bar + -f '*.foo' include all files at pattern *.foo + Must use single quotes. + This is non-exclusive, use -s "*.foo" to match specific paths. + + -s DIR Directory to search for coverage reports. + Already searches project root and artifact folders. + -t TOKEN Set the private repository token + (option) set environment variable CODECOV_TOKEN=:uuid + + -t @/path/to/token_file + -t uuid + + -n NAME Custom defined name of the upload. Visible in Codecov UI + + -e ENV Specify environment variables to be included with this build + Also accepting environment variables: CODECOV_ENV=VAR,VAR2 + + -e VAR,VAR2 + + -k prefix Prefix filepaths to help resolve path fixing + + -i prefix Only include files in the network with a certain prefix. Useful for upload-specific path fixing + + -X feature Toggle functionalities + + -X gcov Disable gcov + -X coveragepy Disable python coverage + -X fix Disable report fixing + -X search Disable searching for reports + -X xcode Disable xcode processing + -X network Disable uploading the file network + -X gcovout Disable gcov output + -X html Enable coverage for HTML files + -X recursesubs Enable recurse submodules in git projects when searching for source files + -X yaml Enable coverage for YAML files + + -N The commit SHA of the parent for which you are uploading coverage. If not present, + the parent will be determined using the API of your repository provider. + When using the repository provider's API, the parent is determined via finding + the closest ancestor to the commit. + + -R root dir Used when not in git/hg project to identify project root directory + -F flag Flag the upload to group coverage metrics + + -F unittests This upload is only unittests + -F integration This upload is only integration tests + -F ui,chrome This upload is Chrome - UI tests + + -c Move discovered coverage reports to the trash + -z FILE Upload specified file directly to Codecov and bypass all report generation. + This is inteded to be used only with a pre-formatted Codecov report and is not + expected to work under any other circumstances. + -Z Exit with 1 if not successful. Default will Exit with 0 + + -- xcode -- + -D Custom Derived Data Path for Coverage.profdata and gcov processing + Default '~/Library/Developer/Xcode/DerivedData' + -J Specify packages to build coverage. Uploader will only build these packages. + This can significantly reduces time to build coverage reports. + + -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests" + -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests" + + -- gcov -- + -g GLOB Paths to ignore during gcov gathering + -G GLOB Paths to include during gcov gathering + -p dir Project root directory + Also used when preparing gcov + -x gcovexe gcov executable to run. Defaults to 'gcov' + -a gcovargs extra arguments to pass to gcov + + -- Override CI Environment Variables -- + These variables are automatically detected by popular CI providers + + -B branch Specify the branch name + -C sha Specify the commit sha + -P pr Specify the pull request number + -b build Specify the build number + -T tag Specify the git tag + + -- Enterprise -- + -u URL Set the target url for Enterprise customers + Not required when retrieving the bash uploader from your CCE + (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com + -r SLUG owner/repo slug used instead of the private repo token in Enterprise + (option) set environment variable CODECOV_SLUG=:owner/:repo + (option) set in your codecov.yml "codecov.slug" + -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional) + (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem" + -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy" + -A curlargs Extra curl arguments to communicate with AWS. + + -- Debugging -- + -d Don't upload, but dump upload file to stdout + -q PATH Write upload file to path + -K Remove color from the output + -v Verbose mode + +EOF +} + + +say() { + echo -e "$1" +} + + +urlencode() { + echo "$1" | curl -Gso /dev/null -w "%{url_effective}" --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//' +} + +swiftcov() { + _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g') + for _type in app framework xctest + do + find "$_dir" -name "*.$_type" | while read -r f + do + _proj=${f##*/} + _proj=${_proj%."$_type"} + if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ]; + then + say " $g+$x Building reports for $_proj $_type" + dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj") + # shellcheck disable=SC2001 + _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g') + # shellcheck disable=SC2086 + xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \ + || say " ${r}x>${x} llvm-cov failed to produce results for $dest" + fi + done + done +} + + +# Credits to: https://gist.github.com/pkuczynski/8665367 +parse_yaml() { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' + local fs + fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" | + awk -F"$fs" '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")} + printf("%s%s%s=\"%s\"\n", "'"$prefix"'",vn, $2, $3); + } + }' +} + +if [ $# != 0 ]; +then + while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hi:J:k:Kn:p:P:Q:q:r:R:s:S:t:T:u:U:vx:X:Zz:N:-" o + do + codecov_flags+=( "$o" ) + case "$o" in + "-") + echo -e "${r}Long options are not supported${x}" + exit 2 + ;; + "?") + ;; + "N") + parent=$OPTARG + ;; + "a") + gcov_arg=$OPTARG + ;; + "A") + curlawsargs="$OPTARG" + ;; + "b") + build_o="$OPTARG" + ;; + "B") + branch_o="$OPTARG" + ;; + "c") + clean="1" + ;; + "C") + commit_o="$OPTARG" + ;; + "d") + dump="1" + ;; + "D") + ddp="$OPTARG" + ;; + "e") + env="$env,$OPTARG" + ;; + "f") + if [ "${OPTARG::1}" = "!" ]; + then + exclude_cov="$exclude_cov -not -path '${OPTARG:1}'" + + elif [[ "$OPTARG" = *"*"* ]]; + then + include_cov="$include_cov -or -path '$OPTARG'" + + else + ft_search=0 + if [ "$files" = "" ]; + then + files="$OPTARG" + else + files="$files +$OPTARG" + fi + fi + ;; + "F") + if [ "$flags" = "" ]; + then + flags="$OPTARG" + else + flags="$flags,$OPTARG" + fi + ;; + "g") + gcov_ignore="$gcov_ignore -not -path '$OPTARG'" + ;; + "G") + gcov_include="$gcov_include -path '$OPTARG'" + ;; + "h") + show_help + exit 0; + ;; + "i") + network_filter_o="$OPTARG" + ;; + "J") + ft_xcodellvm="1" + ft_xcodeplist="0" + if [ "$xp" = "" ]; + then + xp="$OPTARG" + else + xp="$xp\|$OPTARG" + fi + ;; + "k") + prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::') + ;; + "K") + b="" + g="" + r="" + e="" + x="" + ;; + "n") + name="$OPTARG" + ;; + "p") + proj_root="$OPTARG" + ;; + "P") + pr_o="$OPTARG" + ;; + "Q") + # this is only meant for Codecov packages to overwrite + package="$OPTARG" + ;; + "q") + save_to="$OPTARG" + ;; + "r") + slug_o="$OPTARG" + ;; + "R") + git_root="$OPTARG" + ;; + "s") + if [ "$search_in_o" = "" ]; + then + search_in_o="$OPTARG" + else + search_in_o="$search_in_o $OPTARG" + fi + ;; + "S") + # shellcheck disable=SC2089 + cacert="--cacert \"$OPTARG\"" + ;; + "t") + if [ "${OPTARG::1}" = "@" ]; + then + token=$(< "${OPTARG:1}" tr -d ' \n') + else + token="$OPTARG" + fi + ;; + "T") + tag_o="$OPTARG" + ;; + "u") + url_o=$(echo "$OPTARG" | sed -e 's/\/$//') + ;; + "U") + curlargs="$OPTARG" + ;; + "v") + set -x + curl_s="" + ;; + "x") + gcov_exe=$OPTARG + ;; + "X") + if [ "$OPTARG" = "gcov" ]; + then + ft_gcov="0" + elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ]; + then + ft_coveragepy="0" + elif [ "$OPTARG" = "gcovout" ]; + then + ft_gcovout="0" + elif [ "$OPTARG" = "xcodellvm" ]; + then + ft_xcodellvm="1" + ft_xcodeplist="0" + elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ]; + then + ft_fix="0" + elif [ "$OPTARG" = "xcode" ]; + then + ft_xcodellvm="0" + ft_xcodeplist="0" + elif [ "$OPTARG" = "search" ]; + then + ft_search="0" + elif [ "$OPTARG" = "xcodepartials" ]; + then + beta_xcode_partials="-use-color" + elif [ "$OPTARG" = "network" ]; + then + ft_network="0" + elif [ "$OPTARG" = "s3" ]; + then + ft_s3="0" + elif [ "$OPTARG" = "html" ]; + then + ft_html="1" + elif [ "$OPTARG" = "recursesubs" ]; + then + git_ls_files_recurse_submodules_o="--recurse-submodules" + elif [ "$OPTARG" = "yaml" ]; + then + ft_yaml="1" + fi + ;; + "Z") + exit_with=1 + ;; + "z") + direct_file_upload="$OPTARG" + ft_gcov="0" + ft_coveragepy="0" + ft_fix="0" + ft_search="0" + ft_network="0" + ft_xcodellvm="0" + ft_gcovout="0" + include_cov="" + ;; + *) + echo -e "${r}Unexpected flag not supported${x}" + ;; + esac + done +fi + +say " + _____ _ + / ____| | | +| | ___ __| | ___ ___ _____ __ +| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / / +| |___| (_) | (_| | __/ (_| (_) \\ V / + \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/ + Bash-$VERSION + +" + +# check for installed tools +# git/hg +if [ "$direct_file_upload" = "" ]; +then + if [ -x "$(command -v git)" ]; + then + say "$b==>$x $(git --version) found" + else + say "$y==>$x git not installed, testing for mercurial" + if [ -x "$(command -v hg)" ]; + then + say "$b==>$x $(hg --version) found" + else + say "$r==>$x git nor mercurial are installed. Uploader may fail or have unintended consequences" + fi + fi +fi +# curl +if [ -x "$(command -v curl)" ]; +then + say "$b==>$x $(curl --version)" +else + say "$r==>$x curl not installed. Exiting." + exit ${exit_with}; +fi + +search_in="$proj_root" + +#shellcheck disable=SC2154 +if [ "$JENKINS_URL" != "" ]; +then + say "$e==>$x Jenkins CI detected." + # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project + # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables + service="jenkins" + + # shellcheck disable=SC2154 + if [ "$ghprbSourceBranch" != "" ]; + then + branch="$ghprbSourceBranch" + elif [ "$GIT_BRANCH" != "" ]; + then + branch="$GIT_BRANCH" + elif [ "$BRANCH_NAME" != "" ]; + then + branch="$BRANCH_NAME" + fi + + # shellcheck disable=SC2154 + if [ "$ghprbActualCommit" != "" ]; + then + commit="$ghprbActualCommit" + elif [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + fi + + # shellcheck disable=SC2154 + if [ "$ghprbPullId" != "" ]; + then + pr="$ghprbPullId" + elif [ "$CHANGE_ID" != "" ]; + then + pr="$CHANGE_ID" + fi + + build="$BUILD_NUMBER" + # shellcheck disable=SC2153 + build_url=$(urlencode "$BUILD_URL") + +elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ]; +then + say "$e==>$x Travis CI detected." + # https://docs.travis-ci.com/user/environment-variables/ + service="travis" + commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}" + build="$TRAVIS_JOB_NUMBER" + pr="$TRAVIS_PULL_REQUEST" + job="$TRAVIS_JOB_ID" + slug="$TRAVIS_REPO_SLUG" + env="$env,TRAVIS_OS_NAME" + tag="$TRAVIS_TAG" + if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ]; + then + branch="${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" + fi + + language=$(compgen -A variable | grep "^TRAVIS_.*_VERSION$" | head -1) + if [ "$language" != "" ]; + then + env="$env,${!language}" + fi + +elif [ "$CODEBUILD_CI" = "true" ]; +then + say "$e==>$x AWS Codebuild detected." + # https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-env-vars.html + service="codebuild" + commit="$CODEBUILD_RESOLVED_SOURCE_VERSION" + build="$CODEBUILD_BUILD_ID" + branch="$(echo "$CODEBUILD_WEBHOOK_HEAD_REF" | sed 's/^refs\/heads\///')" + if [ "${CODEBUILD_SOURCE_VERSION/pr}" = "$CODEBUILD_SOURCE_VERSION" ] ; then + pr="false" + else + pr="$(echo "$CODEBUILD_SOURCE_VERSION" | sed 's/^pr\///')" + fi + job="$CODEBUILD_BUILD_ID" + slug="$(echo "$CODEBUILD_SOURCE_REPO_URL" | sed 's/^.*:\/\/[^\/]*\///' | sed 's/\.git$//')" + +elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ]; +then + say "$e==>$x Codeship CI detected." + # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/ + service="codeship" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + build_url=$(urlencode "$CI_BUILD_URL") + commit="$CI_COMMIT_ID" + +elif [ -n "$CF_BUILD_URL" ] && [ -n "$CF_BUILD_ID" ]; +then + say "$e==>$x Codefresh CI detected." + # https://docs.codefresh.io/v1.0/docs/variables + service="codefresh" + branch="$CF_BRANCH" + build="$CF_BUILD_ID" + build_url=$(urlencode "$CF_BUILD_URL") + commit="$CF_REVISION" + +elif [ "$TEAMCITY_VERSION" != "" ]; +then + say "$e==>$x TeamCity CI detected." + # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters + # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298 + if [ "$TEAMCITY_BUILD_BRANCH" = '' ]; + then + echo " Teamcity does not automatically make build parameters available as environment variables." + echo " Add the following environment parameters to the build configuration" + echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%" + echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%" + echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%" + echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%" + echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%" + fi + service="teamcity" + branch="$TEAMCITY_BUILD_BRANCH" + build="$TEAMCITY_BUILD_ID" + build_url=$(urlencode "$TEAMCITY_BUILD_URL") + if [ "$TEAMCITY_BUILD_COMMIT" != "" ]; + then + commit="$TEAMCITY_BUILD_COMMIT" + else + commit="$BUILD_VCS_NUMBER" + fi + remote_addr="$TEAMCITY_BUILD_REPOSITORY" + +elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ]; +then + say "$e==>$x Circle CI detected." + # https://circleci.com/docs/environment-variables + service="circleci" + branch="$CIRCLE_BRANCH" + build="$CIRCLE_BUILD_NUM" + job="$CIRCLE_NODE_INDEX" + if [ "$CIRCLE_PROJECT_REPONAME" != "" ]; + then + slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME" + else + # git@github.com:owner/repo.git + slug="${CIRCLE_REPOSITORY_URL##*:}" + # owner/repo.git + slug="${slug%%.git}" + fi + pr="${CIRCLE_PULL_REQUEST##*/}" + commit="$CIRCLE_SHA1" + search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS" + +elif [ "$BUDDYBUILD_BRANCH" != "" ]; +then + say "$e==>$x buddybuild detected" + # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps + service="buddybuild" + branch="$BUDDYBUILD_BRANCH" + build="$BUDDYBUILD_BUILD_NUMBER" + build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID" + # BUDDYBUILD_TRIGGERED_BY + if [ "$ddp" = "$HOME/Library/Developer/Xcode/DerivedData" ]; + then + ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest" + fi + +elif [ "${bamboo_planRepository_revision}" != "" ]; +then + say "$e==>$x Bamboo detected" + # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables + service="bamboo" + commit="${bamboo_planRepository_revision}" + # shellcheck disable=SC2154 + branch="${bamboo_planRepository_branch}" + # shellcheck disable=SC2154 + build="${bamboo_buildNumber}" + # shellcheck disable=SC2154 + build_url="${bamboo_buildResultsUrl}" + # shellcheck disable=SC2154 + remote_addr="${bamboo_planRepository_repositoryUrl}" + +elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ]; +then + # http://devcenter.bitrise.io/faq/available-environment-variables/ + say "$e==>$x Bitrise CI detected." + service="bitrise" + branch="$BITRISE_GIT_BRANCH" + build="$BITRISE_BUILD_NUMBER" + build_url=$(urlencode "$BITRISE_BUILD_URL") + pr="$BITRISE_PULL_REQUEST" + if [ "$GIT_CLONE_COMMIT_HASH" != "" ]; + then + commit="$GIT_CLONE_COMMIT_HASH" + fi + +elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ]; +then + say "$e==>$x Semaphore CI detected." +# https://docs.semaphoreci.com/ci-cd-environment/environment-variables/#semaphore-related + service="semaphore" + branch="$SEMAPHORE_GIT_BRANCH" + build="$SEMAPHORE_WORKFLOW_NUMBER" + job="$SEMAPHORE_JOB_ID" + pr="$PULL_REQUEST_NUMBER" + slug="$SEMAPHORE_REPO_SLUG" + commit="$REVISION" + env="$env,SEMAPHORE_TRIGGER_SOURCE" + +elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ]; +then + say "$e==>$x Buildkite CI detected." + # https://buildkite.com/docs/guides/environment-variables + service="buildkite" + branch="$BUILDKITE_BRANCH" + build="$BUILDKITE_BUILD_NUMBER" + job="$BUILDKITE_JOB_ID" + build_url=$(urlencode "$BUILDKITE_BUILD_URL") + slug="$BUILDKITE_PROJECT_SLUG" + commit="$BUILDKITE_COMMIT" + if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then + pr="$BUILDKITE_PULL_REQUEST" + fi + tag="$BUILDKITE_TAG" + +elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ]; +then + say "$e==>$x Drone CI detected." + # http://docs.drone.io/env.html + # drone commits are not full shas + service="drone.io" + branch="$DRONE_BRANCH" + build="$DRONE_BUILD_NUMBER" + build_url=$(urlencode "${DRONE_BUILD_PLI}") + pr="$DRONE_PULL_REQUEST" + job="$DRONE_JOB_NUMBER" + tag="$DRONE_TAG" + +elif [ "$CI" = "true" ] && [ "$HEROKU_TEST_RUN_BRANCH" != "" ]; +then + say "$e==>$x Heroku CI detected." + # https://devcenter.heroku.com/articles/heroku-ci#environment-variables + service="heroku" + branch="$HEROKU_TEST_RUN_BRANCH" + build="$HEROKU_TEST_RUN_ID" + commit="$HEROKU_TEST_RUN_COMMIT_VERSION" + +elif [[ "$CI" = "true" || "$CI" = "True" ]] && [[ "$APPVEYOR" = "true" || "$APPVEYOR" = "True" ]]; +then + say "$e==>$x Appveyor CI detected." + # http://www.appveyor.com/docs/environment-variables + service="appveyor" + branch="$APPVEYOR_REPO_BRANCH" + build=$(urlencode "$APPVEYOR_JOB_ID") + pr="$APPVEYOR_PULL_REQUEST_NUMBER" + job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION" + slug="$APPVEYOR_REPO_NAME" + commit="$APPVEYOR_REPO_COMMIT" + build_url=$(urlencode "${APPVEYOR_URL}/project/${APPVEYOR_REPO_NAME}/builds/$APPVEYOR_BUILD_ID/job/${APPVEYOR_JOB_ID}") + +elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ]; +then + say "$e==>$x Wercker CI detected." + # http://devcenter.wercker.com/articles/steps/variables.html + service="wercker" + branch="$WERCKER_GIT_BRANCH" + build="$WERCKER_MAIN_PIPELINE_STARTED" + slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY" + commit="$WERCKER_GIT_COMMIT" + +elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ]; +then + say "$e==>$x Magnum CI detected." + # https://magnum-ci.com/docs/environment + service="magnum" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + commit="$CI_COMMIT" + +elif [ "$SHIPPABLE" = "true" ]; +then + say "$e==>$x Shippable CI detected." + # http://docs.shippable.com/ci_configure/ + service="shippable" + # shellcheck disable=SC2153 + branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH") + build="$BUILD_NUMBER" + build_url=$(urlencode "$BUILD_URL") + pr="$PULL_REQUEST" + slug="$REPO_FULL_NAME" + # shellcheck disable=SC2153 + commit="$COMMIT" + +elif [ "$TDDIUM" = "true" ]; +then + say "Solano CI detected." + # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/ + service="solano" + commit="$TDDIUM_CURRENT_COMMIT" + branch="$TDDIUM_CURRENT_BRANCH" + build="$TDDIUM_TID" + pr="$TDDIUM_PR_ID" + +elif [ "$GREENHOUSE" = "true" ]; +then + say "$e==>$x Greenhouse CI detected." + # http://docs.greenhouseci.com/docs/environment-variables-files + service="greenhouse" + branch="$GREENHOUSE_BRANCH" + build="$GREENHOUSE_BUILD_NUMBER" + build_url=$(urlencode "$GREENHOUSE_BUILD_URL") + pr="$GREENHOUSE_PULL_REQUEST" + commit="$GREENHOUSE_COMMIT" + search_in="$search_in $GREENHOUSE_EXPORT_DIR" + +elif [ "$GITLAB_CI" != "" ]; +then + say "$e==>$x GitLab CI detected." + # http://doc.gitlab.com/ce/ci/variables/README.html + service="gitlab" + branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}" + build="${CI_BUILD_ID:-$CI_JOB_ID}" + remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}" + commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}" + slug="${CI_PROJECT_PATH}" + +elif [ "$GITHUB_ACTIONS" != "" ]; +then + say "$e==>$x GitHub Actions detected." + say " Env vars used:" + say " -> GITHUB_ACTIONS: ${GITHUB_ACTIONS}" + say " -> GITHUB_HEAD_REF: ${GITHUB_HEAD_REF}" + say " -> GITHUB_REF: ${GITHUB_REF}" + say " -> GITHUB_REPOSITORY: ${GITHUB_REPOSITORY}" + say " -> GITHUB_RUN_ID: ${GITHUB_RUN_ID}" + say " -> GITHUB_SHA: ${GITHUB_SHA}" + say " -> GITHUB_WORKFLOW: ${GITHUB_WORKFLOW}" + + # https://github.com/features/actions + service="github-actions" + + # https://help.github.com/en/articles/virtual-environments-for-github-actions#environment-variables + branch="${GITHUB_REF#refs/heads/}" + if [ "$GITHUB_HEAD_REF" != "" ]; + then + # PR refs are in the format: refs/pull/7/merge + if [[ "$GITHUB_REF" =~ ^refs\/pull\/[0-9]+\/merge$ ]]; + then + pr="${GITHUB_REF#refs/pull/}" + pr="${pr%/merge}" + fi + branch="${GITHUB_HEAD_REF}" + fi + commit="${GITHUB_SHA}" + slug="${GITHUB_REPOSITORY}" + build="${GITHUB_RUN_ID}" + build_url=$(urlencode "${GITHUB_SERVER_URL:-https://github.com}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}") + job="$(urlencode "${GITHUB_WORKFLOW}")" + + # actions/checkout runs in detached HEAD + mc= + if [ -n "$pr" ] && [ "$pr" != false ] && [ "$commit_o" == "" ]; + then + mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") + + if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; + then + mc=$(echo "$mc" | cut -d' ' -f2) + say " Fixing merge commit SHA $commit -> $mc" + commit=$mc + elif [[ "$mc" = "" ]]; + then + say "$r-> Issue detecting commit SHA. Please run actions/checkout with fetch-depth > 1 or set to 0$x" + fi + fi + +elif [ "$SYSTEM_TEAMFOUNDATIONSERVERURI" != "" ]; +then + say "$e==>$x Azure Pipelines detected." + # https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=vsts + # https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&viewFallbackFrom=vsts&tabs=yaml + service="azure_pipelines" + commit="$BUILD_SOURCEVERSION" + build="$BUILD_BUILDNUMBER" + if [ -z "$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" ]; + then + pr="$SYSTEM_PULLREQUEST_PULLREQUESTID" + else + pr="$SYSTEM_PULLREQUEST_PULLREQUESTNUMBER" + fi + project="${SYSTEM_TEAMPROJECT}" + server_uri="${SYSTEM_TEAMFOUNDATIONSERVERURI}" + job="${BUILD_BUILDID}" + branch="${BUILD_SOURCEBRANCH#"refs/heads/"}" + build_url=$(urlencode "${SYSTEM_TEAMFOUNDATIONSERVERURI}${SYSTEM_TEAMPROJECT}/_build/results?buildId=${BUILD_BUILDID}") + + # azure/pipelines runs in detached HEAD + mc= + if [ -n "$pr" ] && [ "$pr" != false ]; + then + mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") + + if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; + then + mc=$(echo "$mc" | cut -d' ' -f2) + say " Fixing merge commit SHA $commit -> $mc" + commit=$mc + fi + fi + +elif [ "$CI" = "true" ] && [ "$BITBUCKET_BUILD_NUMBER" != "" ]; +then + say "$e==>$x Bitbucket detected." + # https://confluence.atlassian.com/bitbucket/variables-in-pipelines-794502608.html + service="bitbucket" + branch="$BITBUCKET_BRANCH" + build="$BITBUCKET_BUILD_NUMBER" + slug="$BITBUCKET_REPO_OWNER/$BITBUCKET_REPO_SLUG" + job="$BITBUCKET_BUILD_NUMBER" + pr="$BITBUCKET_PR_ID" + commit="$BITBUCKET_COMMIT" + # See https://jira.atlassian.com/browse/BCLOUD-19393 + if [ "${#commit}" = 12 ]; + then + commit=$(git rev-parse "$BITBUCKET_COMMIT") + fi + +elif [ "$CI" = "true" ] && [ "$BUDDY" = "true" ]; +then + say "$e==>$x Buddy CI detected." + # https://buddy.works/docs/pipelines/environment-variables + service="buddy" + branch="$BUDDY_EXECUTION_BRANCH" + build="$BUDDY_EXECUTION_ID" + build_url=$(urlencode "$BUDDY_EXECUTION_URL") + commit="$BUDDY_EXECUTION_REVISION" + pr="$BUDDY_EXECUTION_PULL_REQUEST_NO" + tag="$BUDDY_EXECUTION_TAG" + slug="$BUDDY_REPO_SLUG" + +elif [ "$CIRRUS_CI" != "" ]; +then + say "$e==>$x Cirrus CI detected." + # https://cirrus-ci.org/guide/writing-tasks/#environment-variables + service="cirrus-ci" + slug="$CIRRUS_REPO_FULL_NAME" + branch="$CIRRUS_BRANCH" + pr="$CIRRUS_PR" + commit="$CIRRUS_CHANGE_IN_REPO" + build="$CIRRUS_BUILD_ID" + build_url=$(urlencode "https://cirrus-ci.com/task/$CIRRUS_TASK_ID") + job="$CIRRUS_TASK_NAME" + +elif [ "$DOCKER_REPO" != "" ]; +then + say "$e==>$x Docker detected." + # https://docs.docker.com/docker-cloud/builds/advanced/ + service="docker" + branch="$SOURCE_BRANCH" + commit="$SOURCE_COMMIT" + slug="$DOCKER_REPO" + tag="$CACHE_TAG" + env="$env,IMAGE_NAME" + +else + say "${r}x>${x} No CI provider detected." + say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}" + say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}" + +fi + +say " ${e}current dir: ${x} $PWD" +say " ${e}project root:${x} $git_root" + +# find branch, commit, repo from git command +if [ "$GIT_BRANCH" != "" ]; +then + branch="$GIT_BRANCH" + +elif [ "$branch" = "" ]; +then + branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "") + if [ "$branch" = "HEAD" ]; + then + branch="" + fi +fi + +if [ "$commit_o" = "" ]; +then + if [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + elif [ "$commit" = "" ]; + then + commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "") + fi +else + commit="$commit_o" +fi + +if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ]; +then + say "${e}-->${x} token set from env" + token="$CODECOV_TOKEN" +fi + +if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ]; +then + say "${e}-->${x} url set from env" + url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//') +fi + +if [ "$CODECOV_SLUG" != "" ]; +then + say "${e}-->${x} slug set from env" + slug_o="$CODECOV_SLUG" + +elif [ "$slug" = "" ]; +then + if [ "$remote_addr" = "" ]; + then + remote_addr=$(git config --get remote.origin.url || hg paths default || echo '') + fi + if [ "$remote_addr" != "" ]; + then + if echo "$remote_addr" | grep -q "//"; then + # https + slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//') + else + # ssh + slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//') + fi + fi + if [ "$slug" = "/" ]; + then + slug="" + fi +fi + +yaml=$(cd "$git_root" && \ + git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || cd "$proj_root" && find . -maxdepth 1 -type f -name '*codecov.y*ml' 2>/dev/null \ + || echo '') +yaml=$(echo "$yaml" | head -1) + +if [ "$yaml" != "" ]; +then + say " ${e}Yaml found at:${x} $yaml" + if [[ "$yaml" != /* ]]; then + # relative path for yaml file given, assume relative to the repo root + yaml="$git_root/$yaml" + fi + config=$(parse_yaml "$yaml" || echo '') + + # TODO validate the yaml here + + if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ]; + then + say "${e}-->${x} token set from yaml" + token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ]; + then + say "${e}-->${x} url set from yaml" + url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ]; + then + say "${e}-->${x} slug set from yaml" + slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')" + fi +else + say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}" +fi + +if [ "$branch_o" != "" ]; +then + branch=$(urlencode "$branch_o") +else + branch=$(urlencode "$branch") +fi + +if [ "$slug_o" = "" ]; +then + urlencoded_slug=$(urlencode "$slug") +else + urlencoded_slug=$(urlencode "$slug_o") +fi + +query="branch=$branch\ + &commit=$commit\ + &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\ + &build_url=$build_url\ + &name=$(urlencode "$name")\ + &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\ + &slug=$urlencoded_slug\ + &service=$service\ + &flags=$flags\ + &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\ + &job=$job\ + &cmd_args=$(IFS=,; echo "${codecov_flags[*]}")" + +if [ -n "$project" ] && [ -n "$server_uri" ]; +then + query=$(echo "$query&project=$project&server_uri=$server_uri" | tr -d ' ') +fi + +if [ "$parent" != "" ]; +then + query=$(echo "parent=$parent&$query" | tr -d ' ') +fi + +if [ "$ft_search" = "1" ]; +then + # detect bower comoponents location + bower_components="bower_components" + bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "") + if [ "$bower_rc" != "" ]; + then + bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//') + if [ "$bower_components" = "" ]; + then + bower_components="bower_components" + fi + fi + + # Swift Coverage + if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode reports via llvm-cov" + say " DerivedData folder: $ddp" + profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '') + if [ "$profdata_files" != "" ]; + then + # xcode via profdata + if [ "$xp" = "" ]; + then + # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/') + # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}" + say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)" + say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}" + fi + while read -r profdata; + do + if [ "$profdata" != "" ]; + then + swiftcov "$profdata" "$xp" + fi + done <<< "$profdata_files" + else + say " ${e}->${x} No Swift coverage found" + fi + + # Obj-C Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say " ${e}->${x} Running $gcov_exe for Obj-C" + if [ "$ft_gcovout" = "0" ]; + then + # suppress gcov output + bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" >/dev/null 2>&1 || true + else + bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true + fi + fi + fi + + if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode plists" + plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '') + if [ "$plists_files" != "" ]; + then + while read -r plist; + do + if [ "$plist" != "" ]; + then + say " ${g}Found${x} plist file at $plist" + plutil -convert xml1 -o "$(basename "$plist").plist" -- "$plist" + fi + done <<< "$plists_files" + fi + fi + + # Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say "${e}==>${x} Running $gcov_exe in $proj_root ${e}(disable via -X gcov)${x}" + if [ "$ft_gcovout" = "0" ]; + then + # suppress gcov output + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" >/dev/null 2>&1 || true + else + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true + fi + else + say "${e}==>${x} gcov disabled" + fi + + # Python Coverage + if [ "$ft_coveragepy" = "1" ]; + then + if [ ! -f coverage.xml ]; + then + if command -v coverage >/dev/null 2>&1; + then + say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}" + + dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '') + if [ "$dotcoverage" != "" ]; + then + cd "$(dirname "$dotcoverage")" + if [ ! -f .coverage ]; + then + say " ${e}->${x} Running coverage combine" + coverage combine -a + fi + say " ${e}->${x} Running coverage xml" + if [ "$(coverage xml -i)" != "No data to report." ]; + then + files="$files +$PWD/coverage.xml" + else + say " ${r}No data to report.${x}" + fi + cd "$proj_root" + else + say " ${r}No .coverage file found.${x}" + fi + else + say "${e}==>${x} Python coveragepy not found" + fi + fi + else + say "${e}==>${x} Python coveragepy disabled" + fi + + if [ "$search_in_o" != "" ]; + then + # location override + search_in="$search_in_o" + fi + + say "$e==>$x Searching for coverage reports in:" + for _path in $search_in + do + say " ${g}+${x} $_path" + done + + patterns="find $search_in \( \ + -name vendor \ + -or -name '$bower_components' \ + -or -name '.egg-info*' \ + -or -name 'conftest_*.c.gcov' \ + -or -name .env \ + -or -name .envs \ + -or -name .git \ + -or -name .hg \ + -or -name .tox \ + -or -name .venv \ + -or -name .venvs \ + -or -name .virtualenv \ + -or -name .virtualenvs \ + -or -name .yarn-cache \ + -or -name __pycache__ \ + -or -name env \ + -or -name envs \ + -or -name htmlcov \ + -or -name js/generated/coverage \ + -or -name node_modules \ + -or -name venv \ + -or -name venvs \ + -or -name virtualenv \ + -or -name virtualenvs \ + \) -prune -or \ + -type f \( -name '*coverage*.*' \ + -or -name '*.clover' \ + -or -name '*.codecov.*' \ + -or -name '*.gcov' \ + -or -name '*.lcov' \ + -or -name '*.lst' \ + -or -name 'clover.xml' \ + -or -name 'cobertura.xml' \ + -or -name 'codecov.*' \ + -or -name 'cover.out' \ + -or -name 'codecov-result.json' \ + -or -name 'coverage-final.json' \ + -or -name 'excoveralls.json' \ + -or -name 'gcov.info' \ + -or -name 'jacoco*.xml' \ + -or -name '*Jacoco*.xml' \ + -or -name 'lcov.dat' \ + -or -name 'lcov.info' \ + -or -name 'luacov.report.out' \ + -or -name 'naxsi.info' \ + -or -name 'nosetests.xml' \ + -or -name 'report.xml' \ + $include_cov \) \ + $exclude_cov \ + -not -name '*.am' \ + -not -name '*.bash' \ + -not -name '*.bat' \ + -not -name '*.bw' \ + -not -name '*.cfg' \ + -not -name '*.class' \ + -not -name '*.cmake' \ + -not -name '*.cmake' \ + -not -name '*.conf' \ + -not -name '*.coverage' \ + -not -name '*.cp' \ + -not -name '*.cpp' \ + -not -name '*.crt' \ + -not -name '*.css' \ + -not -name '*.csv' \ + -not -name '*.csv' \ + -not -name '*.data' \ + -not -name '*.db' \ + -not -name '*.dox' \ + -not -name '*.ec' \ + -not -name '*.ec' \ + -not -name '*.egg' \ + -not -name '*.el' \ + -not -name '*.env' \ + -not -name '*.erb' \ + -not -name '*.exe' \ + -not -name '*.ftl' \ + -not -name '*.gif' \ + -not -name '*.gradle' \ + -not -name '*.gz' \ + -not -name '*.h' \ + -not -name '*.html' \ + -not -name '*.in' \ + -not -name '*.jade' \ + -not -name '*.jar*' \ + -not -name '*.jpeg' \ + -not -name '*.jpg' \ + -not -name '*.js' \ + -not -name '*.less' \ + -not -name '*.log' \ + -not -name '*.m4' \ + -not -name '*.mak*' \ + -not -name '*.md' \ + -not -name '*.o' \ + -not -name '*.p12' \ + -not -name '*.pem' \ + -not -name '*.png' \ + -not -name '*.pom*' \ + -not -name '*.profdata' \ + -not -name '*.proto' \ + -not -name '*.ps1' \ + -not -name '*.pth' \ + -not -name '*.py' \ + -not -name '*.pyc' \ + -not -name '*.pyo' \ + -not -name '*.rb' \ + -not -name '*.rsp' \ + -not -name '*.rst' \ + -not -name '*.ru' \ + -not -name '*.sbt' \ + -not -name '*.scss' \ + -not -name '*.scss' \ + -not -name '*.serialized' \ + -not -name '*.sh' \ + -not -name '*.snapshot' \ + -not -name '*.sql' \ + -not -name '*.svg' \ + -not -name '*.tar.tz' \ + -not -name '*.template' \ + -not -name '*.whl' \ + -not -name '*.xcconfig' \ + -not -name '*.xcoverage.*' \ + -not -name '*/classycle/report.xml' \ + -not -name '*codecov.yml' \ + -not -name '*~' \ + -not -name '.*coveragerc' \ + -not -name '.coverage*' \ + -not -name 'coverage-summary.json' \ + -not -name 'createdFiles.lst' \ + -not -name 'fullLocaleNames.lst' \ + -not -name 'include.lst' \ + -not -name 'inputFiles.lst' \ + -not -name 'phpunit-code-coverage.xml' \ + -not -name 'phpunit-coverage.xml' \ + -not -name 'remapInstanbul.coverage*.json' \ + -not -name 'scoverage.measurements.*' \ + -not -name 'test_*_coverage.txt' \ + -not -name 'testrunner-coverage*' \ + -print 2>/dev/null" + files=$(eval "$patterns" || echo '') + +elif [ "$include_cov" != "" ]; +then + files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '') +elif [ "$direct_file_upload" != "" ]; +then + files=$direct_file_upload +fi + +num_of_files=$(echo "$files" | wc -l | tr -d ' ') +if [ "$num_of_files" != '' ] && [ "$files" != '' ]; +then + say " ${e}->${x} Found $num_of_files reports" +fi + +# no files found +if [ "$files" = "" ]; +then + say "${r}-->${x} No coverage report found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + exit ${exit_with}; +fi + +if [ "$ft_network" == "1" ]; +then + say "${e}==>${x} Detecting git/mercurial file structure" + network=$(cd "$git_root" && git ls-files $git_ls_files_recurse_submodules_o 2>/dev/null || hg locate 2>/dev/null || echo "") + if [ "$network" = "" ]; + then + network=$(find "$git_root" \( \ + -name virtualenv \ + -name .virtualenv \ + -name virtualenvs \ + -name .virtualenvs \ + -name '*.png' \ + -name '*.gif' \ + -name '*.jpg' \ + -name '*.jpeg' \ + -name '*.md' \ + -name .env \ + -name .envs \ + -name env \ + -name envs \ + -name .venv \ + -name .venvs \ + -name venv \ + -name venvs \ + -name .git \ + -name .egg-info \ + -name shunit2-2.1.6 \ + -name vendor \ + -name __pycache__ \ + -name node_modules \ + -path "*/$bower_components/*" \ + -path '*/target/delombok/*' \ + -path '*/build/lib/*' \ + -path '*/js/generated/coverage/*' \ + \) -prune -or \ + -type f -print 2>/dev/null || echo '') + fi + + if [ "$network_filter_o" != "" ]; + then + network=$(echo "$network" | grep -e "$network_filter_o/*") + fi + if [ "$prefix_o" != "" ]; + then + network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}") + fi +fi + +upload_file=$(mktemp /tmp/codecov.XXXXXX) +adjustments_file=$(mktemp /tmp/codecov.adjustments.XXXXXX) + +cleanup() { + rm -f "$upload_file" "$adjustments_file" "$upload_file.gz" +} + +trap cleanup INT ABRT TERM + + +if [ "$env" != "" ]; +then + inc_env="" + say "${e}==>${x} Appending build variables" + for varname in $(echo "$env" | tr ',' ' ') + do + if [ "$varname" != "" ]; + then + say " ${g}+${x} $varname" + inc_env="${inc_env}${varname}=$(eval echo "\$${varname}") +" + fi + done + echo "$inc_env<<<<<< ENV" >> "$upload_file" +fi + +# Append git file list +# write discovered yaml location +if [ "$direct_file_upload" = "" ]; +then + echo "$yaml" >> "$upload_file" +fi + +if [ "$ft_network" == "1" ]; +then + i="woff|eot|otf" # fonts + i="$i|gif|png|jpg|jpeg|psd" # images + i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|csv" # docs + i="$i|.gitignore" # supporting docs + + if [ "$ft_html" != "1" ]; + then + i="$i|html" + fi + + if [ "$ft_yaml" != "1" ]; + then + i="$i|yml|yaml" + fi + + echo "$network" | grep -vwE "($i)$" >> "$upload_file" +fi +echo "<<<<<< network" >> "$upload_file" + +if [ "$direct_file_upload" = "" ]; +then + fr=0 + say "${e}==>${x} Reading reports" + while IFS='' read -r file; + do + # read the coverage file + if [ "$(echo "$file" | tr -d ' ')" != '' ]; + then + if [ -f "$file" ]; + then + report_len=$(wc -c < "$file") + if [ "$report_len" -ne 0 ]; + then + say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" + # append to to upload + _filename=$(basename "$file") + if [ "${_filename##*.}" = 'gcov' ]; + then + { + echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")"; + # get file name + head -1 "$file"; + } >> "$upload_file" + # 1. remove source code + # 2. remove ending bracket lines + # 3. remove whitespace + # 4. remove contextual lines + # 5. remove function names + awk -F': *' '{print $1":"$2":"}' "$file" \ + | sed '\/: *} *$/d' \ + | sed 's/^ *//' \ + | sed '/^-/d' \ + | sed 's/^function.*/func/' >> "$upload_file" + else + { + echo "# path=${file//^$git_root/||}"; + cat "$file"; + } >> "$upload_file" + fi + echo "<<<<<< EOF" >> "$upload_file" + fr=1 + if [ "$clean" = "1" ]; + then + rm "$file" + fi + else + say " ${r}-${x} Skipping empty file $file" + fi + else + say " ${r}-${x} file not found at $file" + fi + fi + done <<< "$(echo -e "$files")" + + if [ "$fr" = "0" ]; + then + say "${r}-->${x} No coverage data found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + say " search for your projects language to learn how to collect reports." + exit ${exit_with}; + fi +else + cp "$direct_file_upload" "$upload_file" + if [ "$clean" = "1" ]; + then + rm "$direct_file_upload" + fi +fi + +if [ "$ft_fix" = "1" ]; +then + say "${e}==>${x} Appending adjustments" + say " ${b}https://docs.codecov.io/docs/fixing-reports${x}" + + empty_line='^[[:space:]]*$' + # // + syntax_comment='^[[:space:]]*//.*' + # /* or */ + syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$' + # { or } + syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$' + # [ or ] + syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$' + # func ... { + syntax_go_func='^[[:space:]]*func[[:space:]]*[\{][[:space:]]*$' + + # shellcheck disable=SC2089 + skip_dirs="-not -path '*/$bower_components/*' \ + -not -path '*/node_modules/*'" + + cut_and_join() { + awk 'BEGIN { FS=":" } + $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next } + $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next } + { out=out","$2 } + END { print out }' 2>/dev/null + } + + if echo "$network" | grep -m1 '.kt$' 1>/dev/null; + then + # skip brackets and comments + cd "$git_root" && \ + find . -type f \ + -name '*.kt' \ + -exec \ + grep -nIHE -e "$syntax_bracket" \ + -e "$syntax_comment_block" {} \; \ + | cut_and_join \ + >> "$adjustments_file" \ + || echo '' + + # last line in file + cd "$git_root" && \ + find . -type f \ + -name '*.kt' -exec \ + wc -l {} \; \ + | while read -r l; do echo "EOF: $l"; done \ + 2>/dev/null \ + >> "$adjustments_file" \ + || echo '' + fi + + if echo "$network" | grep -m1 '.go$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + cd "$git_root" && \ + find . -type f \ + -not -path '*/vendor/*' \ + -not -path '*/caches/*' \ + -name '*.go' \ + -exec \ + grep -nIHE \ + -e "$empty_line" \ + -e "$syntax_comment" \ + -e "$syntax_comment_block" \ + -e "$syntax_bracket" \ + -e "$syntax_go_func" \ + {} \; \ + | cut_and_join \ + >> "$adjustments_file" \ + || echo '' + fi + + if echo "$network" | grep -m1 '.dart$' 1>/dev/null; + then + # skip brackets + cd "$git_root" && \ + find . -type f \ + -name '*.dart' \ + -exec \ + grep -nIHE \ + -e "$syntax_bracket" \ + {} \; \ + | cut_and_join \ + >> "$adjustments_file" \ + || echo '' + fi + + if echo "$network" | grep -m1 '.php$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + cd "$git_root" && \ + find . -type f \ + -not -path "*/vendor/*" \ + -name '*.php' \ + -exec \ + grep -nIHE \ + -e "$syntax_list" \ + -e "$syntax_bracket" \ + -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \ + {} \; \ + | cut_and_join \ + >> "$adjustments_file" \ + || echo '' + fi + + if echo "$network" | grep -m1 '\(.c\.cpp\|.cxx\|.h\|.hpp\|.m\|.swift\|.vala\)$' 1>/dev/null; + then + # skip brackets + # shellcheck disable=SC2086,SC2090 + cd "$git_root" && \ + find . -type f \ + $skip_dirs \ + \( \ + -name '*.c' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.h' \ + -or -name '*.hpp' \ + -or -name '*.m' \ + -or -name '*.swift' \ + -or -name '*.vala' \ + \) -exec \ + grep -nIHE \ + -e "$empty_line" \ + -e "$syntax_bracket" \ + -e '// LCOV_EXCL' \ + {} \; \ + | cut_and_join \ + >> "$adjustments_file" \ + || echo '' + + # skip brackets + # shellcheck disable=SC2086,SC2090 + cd "$git_root" && \ + find . -type f \ + $skip_dirs \ + \( \ + -name '*.c' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.h' \ + -or -name '*.hpp' \ + -or -name '*.m' \ + -or -name '*.swift' \ + -or -name '*.vala' \ + \) -exec \ + grep -nIH '// LCOV_EXCL' \ + {} \; \ + >> "$adjustments_file" \ + || echo '' + + fi + + found=$(< "$adjustments_file" tr -d ' ') + + if [ "$found" != "" ]; + then + say " ${g}+${x} Found adjustments" + { + echo "# path=fixes"; + cat "$adjustments_file"; + echo "<<<<<< EOF"; + } >> "$upload_file" + rm -rf "$adjustments_file" + else + say " ${e}->${x} No adjustments found" + fi +fi + +if [ "$url_o" != "" ]; +then + url="$url_o" +fi + +if [ "$dump" != "0" ]; +then + # trim whitespace from query + say " ${e}->${x} Dumping upload file (no upload)" + echo "$url/upload/v4?$(echo "package=$package-$VERSION&$query" | tr -d ' ')" + cat "$upload_file" +else + if [ "$save_to" != "" ]; + then + say "${e}==>${x} Copying upload file to ${save_to}" + mkdir -p "$(dirname "$save_to")" + cp "$upload_file" "$save_to" + fi + + say "${e}==>${x} Gzipping contents" + gzip -nf9 "$upload_file" + say " $(du -h "$upload_file.gz")" + + query=$(echo "${query}" | tr -d ' ') + say "${e}==>${x} Uploading reports" + say " ${e}url:${x} $url" + say " ${e}query:${x} $query" + + # Full query (to display on terminal output) + query=$(echo "package=$package-$VERSION&token=$token&$query" | tr -d ' ') + queryNoToken=$(echo "package=$package-$VERSION&token=&$query" | tr -d ' ') + + if [ "$ft_s3" = "1" ]; + then + say "${e}->${x} Pinging Codecov" + say "$url/upload/v4?$queryNoToken" + # shellcheck disable=SC2086,2090 + res=$(curl $curl_s -X POST $cacert \ + --retry 5 --retry-delay 2 --connect-timeout 2 \ + -H 'X-Reduced-Redundancy: false' \ + -H 'X-Content-Type: application/x-gzip' \ + -H 'Content-Length: 0' \ + -H "X-Upload-Token: ${token}" \ + --write-out "\n%{response_code}\n" \ + $curlargs \ + "$url/upload/v4?$query" || true) + # a good reply is "https://codecov.io" + "\n" + "https://storage.googleapis.com/codecov/..." + s3target=$(echo "$res" | sed -n 2p) + status=$(tail -n1 <<< "$res") + + if [ "$status" = "200" ] && [ "$s3target" != "" ]; + then + say "${e}->${x} Uploading to" + say "${s3target}" + + # shellcheck disable=SC2086 + s3=$(curl -fiX PUT \ + --data-binary @"$upload_file.gz" \ + -H 'Content-Type: application/x-gzip' \ + -H 'Content-Encoding: gzip' \ + $curlawsargs \ + "$s3target" || true) + + if [ "$s3" != "" ]; + then + say " ${g}->${x} Reports have been successfully queued for processing at ${b}$(echo "$res" | sed -n 1p)${x}" + exit 0 + else + say " ${r}X>${x} Failed to upload" + fi + elif [ "$status" = "400" ]; + then + # 400 Error + say "${r}${res}${x}" + exit ${exit_with} + else + say "${r}${res}${x}" + fi + fi + + say "${e}==>${x} Uploading to Codecov" + + # shellcheck disable=SC2086,2090 + res=$(curl -X POST $cacert \ + --data-binary @"$upload_file.gz" \ + --retry 5 --retry-delay 2 --connect-timeout 2 \ + -H 'Content-Type: text/plain' \ + -H 'Content-Encoding: gzip' \ + -H 'X-Content-Encoding: gzip' \ + -H "X-Upload-Token: ${token}" \ + -H 'Accept: text/plain' \ + $curlargs \ + "$url/upload/v2?$query&attempt=$i" || echo 'HTTP 500') + # {"message": "Coverage reports upload successfully", "uploaded": true, "queued": true, "id": "...", "url": "https://codecov.io/..."\} + uploaded=$(grep -o '\"uploaded\": [a-z]*' <<< "$res" | head -1 | cut -d' ' -f2) + if [ "$uploaded" = "true" ] + then + say " Reports have been successfully queued for processing at ${b}$(echo "$res" | head -2 | tail -1)${x}" + exit 0 + else + say " ${g}${res}${x}" + exit ${exit_with} + fi + + say " ${r}X> Failed to upload coverage reports${x}" +fi + +exit ${exit_with} diff --git a/tools/bin/go_core_fuzz b/tools/bin/go_core_fuzz new file mode 100644 index 00000000..c3119f4b --- /dev/null +++ b/tools/bin/go_core_fuzz @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -o pipefail +set +e + +SCRIPT_PATH=`dirname "$0"`; SCRIPT_PATH=`eval "cd \"$SCRIPT_PATH\" && pwd"` +OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} +USE_TEE="${USE_TEE:-true}" + +echo "Failed fuzz tests and panics: ---------------------" +echo "" +use_tee() { + if [ "$USE_TEE" = "true" ]; then + tee "$@" + else + cat > "$@" + fi +} + +# the amount of --seconds here is subject to change based on how long the CI job takes in the future +# as we add more fuzz tests, we should take into consideration increasing this timelapse, so we can have enough coverage. +# We are timing out after ~10mins in case the tests hang. (Current CI duration is ~8m, modify if needed) +cd ./fuzz && timeout 10m ./fuzz_all_native.py --ci --seconds 420 | use_tee $OUTPUT_FILE +EXITCODE=${PIPESTATUS[0]} + +# Assert no known sensitive strings present in test logger output +printf "\n----------------------------------------------\n\n" +echo "Beginning check of output logs for sensitive strings" +$SCRIPT_PATH/scrub_logs $OUTPUT_FILE +if [[ $? != 0 ]]; then + exit 1 +fi + +echo "Exit code: $EXITCODE" +if [[ $EXITCODE != 0 ]]; then + echo "Encountered fuzz test failures. Logging all failing fuzz inputs:" + find . -type f|fgrep '/testdata/fuzz/'|while read f; do echo $f; cat $f; done +else + echo "All fuzz tests passed!" +fi +exit $EXITCODE diff --git a/tools/bin/go_core_race_tests b/tools/bin/go_core_race_tests new file mode 100644 index 00000000..aa6510c1 --- /dev/null +++ b/tools/bin/go_core_race_tests @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -ex +OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} +USE_TEE="${USE_TEE:-true}" +TIMEOUT="${TIMEOUT:-30s}" +COUNT="${COUNT:-10}" +GO_LDFLAGS=$(bash tools/bin/ldflags) +use_tee() { + if [ "$USE_TEE" = "true" ]; then + tee "$@" + else + cat > "$@" + fi +} +GORACE="log_path=$PWD/race" go test -json -race -ldflags "$GO_LDFLAGS" -shuffle on -timeout "$TIMEOUT" -count "$COUNT" $1 | use_tee "$OUTPUT_FILE" +EXITCODE=${PIPESTATUS[0]} +# Fail if any race logs are present. +if ls race.* &>/dev/null +then + echo "Race(s) detected" + exit 1 +fi +if test $EXITCODE -gt 1 +then + exit $EXITCODE +else + exit 0 +fi diff --git a/tools/bin/go_core_tests b/tools/bin/go_core_tests new file mode 100644 index 00000000..694a51d1 --- /dev/null +++ b/tools/bin/go_core_tests @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -o pipefail +set +e + +SCRIPT_PATH=`dirname "$0"`; SCRIPT_PATH=`eval "cd \"$SCRIPT_PATH\" && pwd"` +OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} +USE_TEE="${USE_TEE:-true}" + +echo "Failed tests and panics: ---------------------" +echo "" +GO_LDFLAGS=$(bash tools/bin/ldflags) +use_tee() { + if [ "$USE_TEE" = "true" ]; then + tee "$@" + else + cat > "$@" + fi +} +go test -json -ldflags "$GO_LDFLAGS" -tags integration $TEST_FLAGS -covermode=atomic -coverpkg=./... -coverprofile=coverage.txt $1 | use_tee $OUTPUT_FILE +EXITCODE=${PIPESTATUS[0]} + +# Assert no known sensitive strings present in test logger output +printf "\n----------------------------------------------\n\n" +echo "Beginning check of output logs for sensitive strings" +$SCRIPT_PATH/scrub_logs $OUTPUT_FILE +if [[ $? != 0 ]]; then + exit 1 +fi + +echo "Exit code: $EXITCODE" +if [[ $EXITCODE != 0 ]]; then + echo "Encountered test failures." +else + echo "All tests passed!" + # uploading coverage.txt to CodeCov + $(dirname "$0")/codecov -f coverage.txt +fi +exit $EXITCODE diff --git a/tools/bin/goreleaser_utils b/tools/bin/goreleaser_utils new file mode 100644 index 00000000..4eb8e1ac --- /dev/null +++ b/tools/bin/goreleaser_utils @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# get machine / kernel name +_get_platform() { + uname | tr '[:upper:]' '[:lower:]' +} + +# get machine architecture name +# See https://github.com/joschi/asdf-java/blob/aarch64-support/bin/functions#L33 +_get_arch() { + arch="$(uname -m)" + case "${arch}" in + x86_64 | amd64) echo "x86_64" ;; + aarch64 | arm64) echo "arm64" ;; + *) + echo "Unknown machine architecture: ${arch}" + exit 1 + ;; + esac +} + +# get lib wasmvm path +_get_wasmvm_lib_path() { + local -r platform="$1" + local -r arch="$2" + wasmvm_dir=$(go list -json -m all | jq -r '. | select(.Path == "github.com/CosmWasm/wasmvm") | .Dir') + shared_lib_dir="$wasmvm_dir/internal/api" + lib_name="libwasmvm" + if [ "$platform" == "darwin" ]; then + lib_extension="dylib" + elif [ "$platform" == "linux" ]; then + case "${arch}" in + x86_64 | amd64) lib_extension="x86_64.so" ;; + aarch64 | arm64) lib_extension="aarch64.so" ;; + *) echo "Unsupported arch $arch" && exit 1 ;; + esac + else + echo "Unsupported platform $platform" + exit 1 + fi + echo "$shared_lib_dir/${lib_name}.$lib_extension" +} + +# global goreleaser before hook +# moves native libraries to temp directories used by docker images / archives +before_hook() { + local -r lib_path=tmp + # MOVE NATIVE LIBRARIES HERE + local -r wasmvm_lib_path_linux_amd64=$(_get_wasmvm_lib_path "linux" "amd64") + local -r wasmvm_lib_path_linux_arm64=$(_get_wasmvm_lib_path "linux" "arm64") + local -r wasmvm_lib_path_darwin_amd64=$(_get_wasmvm_lib_path "darwin" "amd64") + local -r wasmvm_lib_path_darwin_arm64=$(_get_wasmvm_lib_path "darwin" "arm64") + mkdir -p "$lib_path/linux_amd64/libs" + cp -f "$wasmvm_lib_path_linux_amd64" "$lib_path/linux_amd64/libs" + mkdir -p "$lib_path/linux_arm64/libs" + cp -f "$wasmvm_lib_path_linux_arm64" "$lib_path/linux_arm64/libs" + mkdir -p "$lib_path/darwin_amd64/libs" + cp -f "$wasmvm_lib_path_darwin_amd64" "$lib_path/darwin_amd64/libs" + mkdir -p "$lib_path/darwin_arm64/libs" + cp -f "$wasmvm_lib_path_darwin_arm64" "$lib_path/darwin_arm64/libs" +} + +# binary build post hook +# moves native libraries to binary libs directory +build_post_hook() { + local -r dist_path=$1 + local -r lib_path=$dist_path/libs + local -r platform=$2 + local -r arch=$3 + # COPY NATIVE LIBRARIES HERE + local -r wasmvm_lib_path=$(_get_wasmvm_lib_path "$platform" "$arch") + mkdir -p "$lib_path" + cp "$wasmvm_lib_path" "$lib_path" +} + +"$@" diff --git a/tools/bin/goreleaser_wrapper b/tools/bin/goreleaser_wrapper new file mode 100644 index 00000000..1aedcf12 --- /dev/null +++ b/tools/bin/goreleaser_wrapper @@ -0,0 +1,41 @@ +#!/bin/bash +set -euo pipefail +set -x +# get machine / kernel name +_get_platform() { + uname | tr '[:upper:]' '[:lower:]' +} + +# get macos sdk directory +_get_macos_sdk_dir() { + if [[ -z "${MACOS_SDK_DIR-}" ]]; then + platform=$(_get_platform) + if [[ "$platform" = 'darwin' ]]; then + if [[ "$(command -v xcrun)" ]]; then + echo "$(xcrun --sdk macosx --show-sdk-path)" + else + echo "You need to have MacOS Command Line Tools installed, you can install it via '$ xcode-select --install'" + exit 1 + fi + else + echo "You must set the MACOS_SDK_DIR env var to where you have the MacOS SDK installed" + echo "If you do not have a MacOS SDK installed, see https://github.com/joseluisq/macosx-sdks/tree/12.3 to obtain one" + exit 1 + fi + else + echo "$MACOS_SDK_DIR" + fi +} + +macos_sdk_dir=$(_get_macos_sdk_dir) +framework_search_path="/System/Library/Frameworks" +include_search_path='/usr/include' + +ZIG_FLAGS_DARWIN="-isysroot$macos_sdk_dir \ + -F$macos_sdk_dir$framework_search_path \ + -iframeworkwithsysroot$framework_search_path \ + -iwithsysroot$include_search_path \ + -mmacosx-version-min=11.7.1" \ +ZIG_EXEC=$(which zig) \ +PLUGIN_VERSION=$(cat VERSION) \ +goreleaser "$@" diff --git a/tools/bin/ldflags b/tools/bin/ldflags new file mode 100644 index 00000000..2e29fdc6 --- /dev/null +++ b/tools/bin/ldflags @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd "$(dirname "$0")" + +COMMIT_SHA=${COMMIT_SHA:-$(git rev-parse HEAD)} +VERSION=${VERSION:-$(cat "../../VERSION")} + +echo "-X github.com/goplugin/pluginv3.0/v2/core/static.Version=$VERSION -X github.com/goplugin/pluginv3.0/v2/core/static.Sha=$COMMIT_SHA" diff --git a/tools/bin/lint b/tools/bin/lint new file mode 100644 index 00000000..27018ac8 --- /dev/null +++ b/tools/bin/lint @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cd "$(dirname "$0")"/../.. + +golangci-lint --tests=false run diff --git a/tools/bin/modgraph b/tools/bin/modgraph new file mode 100644 index 00000000..7177436b --- /dev/null +++ b/tools/bin/modgraph @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# Generates go.md + +set -e + +echo "# goplugin Go modules +\`\`\`mermaid +flowchart LR + subgraph chains + plugin-cosmos + plugin-evm + plugin-solana + plugin-starknet/relayer + end + + subgraph products + plugin-automation + plugin-ccip + plugin-data-streams + plugin-feeds + plugin-functions + plugin-vrf + end + + classDef outline stroke-dasharray:6,fill:none; + class chains,products outline +" +go mod graph | \ + # org only + grep goplugin.*goplugin | \ + # drop prefix + sed s/"github\.com\/goplugin\/"/""/g | \ + # insert edges + sed s/" "/" --> "/ | \ + # drop versions + sed s/"@[^ ]*"/""/g | \ + # insert links + sed s/"\([^ ]*\)$"/"\1\nclick \1 href \"https:\/\/github.com\/goplugin\/\1\""/ | \ + # truncate links to repo + sed s/"\"https:\/\/github.com\/goplugin\/\([^\"\/]*\)\/.*\""/"\"https:\/\/github.com\/goplugin\/\1\""/ | \ + # dedupe lines + awk '!x[$0]++' | \ + # indent + sed 's/^/ /' +echo "\`\`\`" diff --git a/tools/bin/scrub_logs b/tools/bin/scrub_logs new file mode 100644 index 00000000..03cfe93c --- /dev/null +++ b/tools/bin/scrub_logs @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +set -e + +# Path to output.txt file produced from `go test ... | tee $OUTPUT_FILE` +OUTPUT_FILE=${1:-output.txt} +if [ ! -f "$OUTPUT_FILE" ]; then + printf "scrub_logs script error: No output file found at '$OUTPUT_FILE'" + exit 1 +fi + +# Scan for sensitive strings in the -v verbose go test output logs +# with constants from `core/internal/cltest/cltest.go` +declare -a arr=( + # General secret const strings and environment config + "2d25e62eaf9143e993acaf48691564b2" # APIKey of the fixture API user + "1eCP/w0llVkchejFaoBpfIGaLRxZK54lTXBCT22YLW+pdzE4Fafy/XO5LoJ2uwHi" # APISecret of the fixture API user + "1eCP%2fw0llVkchejFaoBpfIGaLRxZK54lTXBCT22YLW%2bpdzE4Fafy%2fXO5LoJ2uwHi" # URL Encoded + "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" # Tooling secret + "T.tLHkcmwePT%2fp%2c%5dsYuntjwHKAsrhm%234eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" # URL Encoded + "16charlengthp4SsW0rD1!@#_" # Test user password + "16charlengthp4SsW0rD1%21%40%23_" # URL Encoded + "wss://" # Possible credentials + "psql://" # Possible DB connection string + "psql%3a%2f%2f" # URL Encoded + "postgresql://" # Possible DB connection string + "postgresql%3a%2f%2f" # URL Encoded + + # Functionality for key exports (JSON) are in the wallet v3 format, encrypted with the cltest.Password value. + # The actual value of the secret of these test seeds are 0x1. Secret keys are not as easily accessible and .String() methods are hardcoded to omit them + # but additionally check the following representations of that secret for %#v logs of that struct + "nat{0x1}" # Logged key struct could leak secret value, ex. &secp256k1.secp256k1Scalar{neg:false, abs:big.nat{0x1}} + "nat%7B0x1%7D" # URL Encoded + "nat{0x2}" + "nat%7B0x2%7D" + "134287936" # Int representation of internal Ed25519PrivateKey + # secret used by keys with KeyBigIntSeed seed (1) + "CAESQAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzswVB9wd3XKVlRwpCIjwla25BE0bc9aW5t8GXWg71Pw=" # Base64 representation of internal Ed25519PrivateKey + # secret used by keys with KeyBigIntSeed seed (1) + "CAESQAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzswVB9wd3XKVlRwpCIjwla25BE0bc9aW5t8GXWg71Pw%3D" # URL Encoded + "AQ==" + "AQ%3D%3D" + "Ag%3D%3D" +) + +declare -a testarr=( + "wss://foo.bar" + "wss://fake.com" + "wss://web.socket/test" +) + +# For each potential secret above, check for presence of string and encoded version in log files +MATCHED_LINES=() +for substr in "${arr[@]}"; do + MATCHES="$(grep -i "$substr" $OUTPUT_FILE || true)" + if [ -n "$MATCHES" ]; then + # check if the matched string is part of a known test case that is safe to pass + for safesubstr in "${testarr[@]}"; do + SAFE_MATCH="$(echo "${MATCHED_LINE}" | grep -i "$safesubstr" || true)" + if [ -n "$SAFE_MATCH" ]; then + MATCHED_LINES+=("$MATCHES") + fi + done + fi +done + +# No matches found in logs, return success +if [ ${#MATCHED_LINES[@]} -eq 0 ]; then + echo "No matches in test log output against known set of sensitive strings" + exit 0 +fi + +# Instances of secret strings were matched, exit the test script with an error +printf "\n" +printf "Sensitive string(s) found in test output logs:\n----------------------------------------------\n" +printf "%s\n" "${MATCHED_LINES[@]}" +printf "\n\n" +printf "❌ The above sensitive string(s) were found in the test harness logs output file - this means that secrets could be leaked/logged to external services when running in production.\n" +exit 1 diff --git a/tools/bin/self-signed-certs b/tools/bin/self-signed-certs new file mode 100644 index 00000000..1d0c05fa --- /dev/null +++ b/tools/bin/self-signed-certs @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +GIT_ROOT=`git rev-parse --show-toplevel` +pushd $GIT_ROOT >/dev/null + +printf -- "WARNING: This is generating a self signed certificate which is recommended for development purposes only!\n\n" + +openssl req -x509 -out tools/clroot/tls/server.crt -keyout tools/clroot/tls/server.key \ + -newkey rsa:2048 -nodes -sha256 \ + -subj '/CN=localhost' -extensions EXT -config <( \ + printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") + +popd >/dev/null diff --git a/tools/bin/trace_transaction_parity b/tools/bin/trace_transaction_parity new file mode 100644 index 00000000..1042ba6f --- /dev/null +++ b/tools/bin/trace_transaction_parity @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +hash=$1 +curl -sS --data '{"method":"trace_transaction","params":["'$hash'"],"id":1,"jsonrpc":"2.0"}' \ + -H "Content-Type: application/json" -X POST localhost:18545 + diff --git a/tools/ci/check_solc_hashes b/tools/ci/check_solc_hashes new file mode 100644 index 00000000..0fe7d091 --- /dev/null +++ b/tools/ci/check_solc_hashes @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# This script checks that the locally installed solc binaries match the given hashes. +# It is intended to be run on CI to give some guarantee of the integrity of the installed solidity compilers. +# Only supports linux-amd64 binaries. + +set -e + +SOLC_6_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.6.6/solc-0.6.6" +SOLC_7_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.7.6/solc-0.7.6" +SOLC_8_6_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.8.6/solc-0.8.6" +SOLC_8_15_LOCAL_PATH="$HOME/.solc-select/artifacts/solc-0.8.15/solc-0.8.15" + +SOLC_6_6_LOCAL_SHA=`sha256sum -b $SOLC_6_6_LOCAL_PATH | cut -d " " -f1` +SOLC_6_6_EXPECTED_SHA="5d8cd4e0cc02e9946497db68c06d56326a78ff95a21c9265cfedb819a10a539d" + +SOLC_7_6_LOCAL_SHA=`sha256sum -b $SOLC_7_6_LOCAL_PATH | cut -d " " -f1` +SOLC_7_6_EXPECTED_SHA="bd69ea85427bf2f4da74cb426ad951dd78db9dfdd01d791208eccc2d4958a6bb" + +SOLC_8_6_LOCAL_SHA=`sha256sum -b $SOLC_8_6_LOCAL_PATH | cut -d " " -f1` +SOLC_8_6_EXPECTED_SHA="abd5c4f3f262bc3ed7951b968c63f98e83f66d9a5c3568ab306eac49250aec3e" + +SOLC_8_15_LOCAL_SHA=`sha256sum -b $SOLC_8_15_LOCAL_PATH | cut -d " " -f1` +SOLC_8_15_EXPECTED_SHA="5189155ce322d57fb75e8518d9b39139627edea4fb25b5f0ebed0391c52e74cc" + +if [ "$SOLC_6_6_LOCAL_SHA" != "$SOLC_6_6_EXPECTED_SHA" ]; then + printf "solc 0.6.6 did not match checksum.\nGot '$SOLC_6_6_LOCAL_SHA'\nExpected '$SOLC_6_6_EXPECTED_SHA']\n" + exit 1 +fi + +if [ "$SOLC_7_6_LOCAL_SHA" != "$SOLC_7_6_EXPECTED_SHA" ]; then + printf "solc 0.7.6 did not match checksum.\nGot '$SOLC_7_6_LOCAL_SHA'\nExpected '$SOLC_7_6_EXPECTED_SHA'\n" + exit 1 +fi + +if [ "$SOLC_8_6_LOCAL_SHA" != "$SOLC_8_6_EXPECTED_SHA" ]; then + printf "solc 0.8.6 did not match checksum.\nGot '$SOLC_8_6_LOCAL_SHA'\nExpected '$SOLC_8_6_EXPECTED_SHA'\n" + exit 1 +fi + +if [ "$SOLC_8_15_LOCAL_SHA" != "$SOLC_8_15_EXPECTED_SHA" ]; then + printf "solc 0.8.15 did not match checksum.\nGot '$SOLC_8_15_LOCAL_SHA'\nExpected '$SOLC_8_15_EXPECTED_SHA'\n" + exit 1 +fi diff --git a/tools/ci/gorace_test b/tools/ci/gorace_test new file mode 100644 index 00000000..48a4e038 --- /dev/null +++ b/tools/ci/gorace_test @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +set -e +GORACE="halt_on_error=1" go test -v -race -parallel 2 -p 1 plugin/core/internal plugin/core/services diff --git a/tools/ci/init_gcloud b/tools/ci/init_gcloud new file mode 100644 index 00000000..f1ebb12b --- /dev/null +++ b/tools/ci/init_gcloud @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +if [ -z "$GCLOUD_SERVICE_KEY" ] +then + echo "Skipping gcloud initiation because no service key is set" + exit 0 +else + echo $GCLOUD_SERVICE_KEY > ${HOME}/gcloud-service-key.json + gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key.json + gcloud --quiet config set project ${GOOGLE_PROJECT_ID} + gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE} +fi diff --git a/tools/ci/install_solana b/tools/ci/install_solana new file mode 100644 index 00000000..912acd1b --- /dev/null +++ b/tools/ci/install_solana @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail +VERSION=v1.13.3 +SHASUM=3a063fe58e6f8bc9e9de84a8d1b96da87e9184cb357d462522f7ec8a2c23bec2 + +echo "Installing solana@${VERSION}" +curl -sSfL https://release.solana.com/$VERSION/install --output install_solana.sh \ + && echo "Checking shasum of Solana install script." \ + && echo "${SHASUM} install_solana.sh" | sha256sum --check +chmod +x install_solana.sh +sh -c ./install_solana.sh diff --git a/tools/ci/install_wasmd b/tools/ci/install_wasmd new file mode 100644 index 00000000..f12b9d17 --- /dev/null +++ b/tools/ci/install_wasmd @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# commit on branch releases/v0.40.x +GIT_TAG="v0.40.1" + +CHECKOUT_DIR="${HOME}/wasmd-checkout" +BUILD_DIR="${HOME}/wasmd-build" + +git clone https://github.com/CosmWasm/wasmd --branch "releases/v0.40.x" "${CHECKOUT_DIR}" +cd "${CHECKOUT_DIR}" +git checkout "${GIT_TAG}" +GOPATH="${BUILD_DIR}" make install diff --git a/tools/clroot/.gitignore b/tools/clroot/.gitignore new file mode 100644 index 00000000..25c98e72 --- /dev/null +++ b/tools/clroot/.gitignore @@ -0,0 +1,6 @@ +db.sqlite3 +plugin_debug.log +cookie +secret +plugin.lock +tempkeys diff --git a/tools/clroot/00README.md b/tools/clroot/00README.md new file mode 100644 index 00000000..4986bce8 --- /dev/null +++ b/tools/clroot/00README.md @@ -0,0 +1,17 @@ +# Reconstructing these files + +## vrfkey.json + +This is the encrypted secret key used to generate VRF proofs. Its public key is + +`` + +Creation commands: + +``` +# Create key + +./tools/bin/cldev local vrf \ + createWeakKeyPeriodYesIReallyKnowWhatIAmDoingAndDoNotCareAboutThisKeyMaterialFallingIntoTheWrongHandsExclamationPointExclamationPointExclamationPointExclamationPointIAmAMasochistExclamationPointExclamationPointExclamationPointExclamationPointExclamationPoint \ + -f ./tools/clroot/vrfkey.json -p ./tools/clroot/password.txt +``` diff --git a/tools/clroot/apicredentials b/tools/clroot/apicredentials new file mode 100644 index 00000000..51b512f5 --- /dev/null +++ b/tools/clroot/apicredentials @@ -0,0 +1,2 @@ +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs diff --git a/tools/clroot/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f b/tools/clroot/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f new file mode 100644 index 00000000..9ddb7578 --- /dev/null +++ b/tools/clroot/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f @@ -0,0 +1,19 @@ +{ + "version": 3, + "id": "f8c91297-5bf7-458e-b8e5-39e9c79d5f2a", + "address": "9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f", + "Crypto": { + "ciphertext": "ee5391a20f42e0b11a0a0824ce5f047bfc4c1391a62184f48952a0ad05deb55b", + "cipherparams": { "iv": "4a27487d3892df5250fb7d1d9b5c00ac" }, + "cipher": "aes-128-ctr", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "salt": "1839f222ed3759e0146252e9557f860ffff9575f8b4ba9c6c59ec40904c9580e", + "n": 1024, + "r": 8, + "p": 1 + }, + "mac": "c7099685c6903529d9e6abf356c59ee9ae70cc9365b2b700ad183671c5009058" + } +} diff --git a/tools/clroot/password.txt b/tools/clroot/password.txt new file mode 100644 index 00000000..138bcd82 --- /dev/null +++ b/tools/clroot/password.txt @@ -0,0 +1 @@ +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ diff --git a/tools/clroot/plugin.toml b/tools/clroot/plugin.toml new file mode 100644 index 00000000..df4e21b3 --- /dev/null +++ b/tools/clroot/plugin.toml @@ -0,0 +1,9 @@ +# Configuration for development mode plugin + +LOG_LEVEL="debug" + +MIN_OUTGOING_CONFIRMATIONS=2 +MINIMUM_CONTRACT_PAYMENT_PLI_JUELS=1000000000000 + +PLUGIN_TLS_PORT=0 +SECURE_COOKIES=false diff --git a/tools/clroot/tls/.gitignore b/tools/clroot/tls/.gitignore new file mode 100644 index 00000000..9aff03fa --- /dev/null +++ b/tools/clroot/tls/.gitignore @@ -0,0 +1,4 @@ +*.crt +*.cert +*.pem +*.key diff --git a/tools/clroot/vrfkey.json b/tools/clroot/vrfkey.json new file mode 100644 index 00000000..a7bc1d29 --- /dev/null +++ b/tools/clroot/vrfkey.json @@ -0,0 +1,22 @@ +{ + "PublicKey": "0xce3cc486a3aa567a2e15707cd5b874170e746f4db45ec084918e5b2b5ec6c6cf01", + "vrf_key": { + "address": "255d627147e5bd27a57e0b3bc2acca7cdf061ead", + "crypto": { + "cipher": "aes-128-ctr", + "ciphertext": "40647ef9ee6257b477faff9216d5023c295b7a5770355707d85e43b183a18c82", + "cipherparams": { "iv": "aa2c76420b26794603ffa28fa606ab5c" }, + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 2, + "p": 1, + "r": 8, + "salt": "dbc429430b5364052030e476370573a375f114be7a6bb8a26be3d8d94ae051b0" + }, + "mac": "8ce1bb8140cb9d3fbe29eb04c50fe091e07d0cb7975baaf1c8730f544a49aa81" + }, + "id": "", + "version": 3 + } +} diff --git a/tools/docker/README.md b/tools/docker/README.md new file mode 100644 index 00000000..968e5ff8 --- /dev/null +++ b/tools/docker/README.md @@ -0,0 +1,266 @@ +# Using docker-compose for local development + +The docker-compose configuration present in this directory allows for a user to quickly setup all of plugin's services to perform actions like integration tests, acceptance tests, and development across multiple services. + +# Requirements + +- [docker-compose](https://docs.docker.com/compose/install/) + +# Using the compose script + +Inside the `plugin/tools/docker` directory, there is a helper script that is included which should cover all cases of integration / acceptance / development needs acroos multiple services. To see a list of available commands, perform the following: + +```sh +cd tools/docker +./compose help +``` + +## Examples + +### Acceptance testing + +Acceptance can be accomplished by using the `acceptance` command. + +```sh +./compose acceptance +``` + +- The plugin node can be reached at `http://localhost:6688` + +Credentials for logging into the operator-ui can be found [here](../../tools/secrets/apicredentials) + +### + +### Doing local development on the core node + +Doing quick, iterative changes on the core codebase can still be achieved within the compose setup with the `cld` or `cldo` commands. +The `cld` command will bring up the services that a plugin node needs to connect to (parity/geth, postgres), and then attach the users terminal to a docker container containing the host's plugin repository bind-mounted inside the container at `/usr/local/src/plugin`. What this means is that any changes made within the host's repository will be synchronized to the container, and vice versa for changes made within the container at `/usr/local/src/plugin`. + +This enables a user to make quick changes on either the container or the host, run `cldev` within the attached container, check the new behaviour of the re-built node, and repeat this process until the desired results are achieved. + +```sh +./compose cld +# +# Now you are inside the container +cldev # cldev without the "core" postfix simply calls the core node cli +# +# NAME: +# main - CLI for Plugin +# +# USAGE: +# main [global options] command [command options] [arguments...] +# +# VERSION: +# unset@unset +# +# COMMANDS: +# admin Commands for remotely taking admin related actions +# bridges Commands for Bridges communicating with External Adapters +# config Commands for the node's configuration +# jobs Commands for managing Jobs +# node, local Commands for admin actions that must be run locally +# runs Commands for managing Runs +# txs Commands for handling Ethereum transactions +# agreements, agree Commands for handling service agreements +# attempts, txas Commands for managing Ethereum Transaction Attempts +# createextrakey Create a key in the node's keystore alongside the existing key; to create an original key, just run the node +# initiators Commands for managing External Initiators +# help, h Shows a list of commands or help for one command +# +# GLOBAL OPTIONS: +# --json, -j json output as opposed to table +# --help, -h show help +# --version, -v print the version +cldev core # import our testing key and api credentials, then start the node +# +# ** Importing default key 0x9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f +# 2019-12-11T20:31:18Z [INFO] Locking postgres for exclusive access with 500ms timeout orm/orm.go:74 # +# 2019-12-11T20:31:18Z [WARN] pq: relation "migrations" does not exist migrations/migrate.go:149 +# ** Running node +# 2019-12-11T20:31:20Z [INFO] Starting Plugin Node 0.7.0 at commit 7324e9c476ed6b5c0a08d5a38779d4a6bfbb3880 cmd/local_client.go:27 +# ... +# ... +``` + +### Cleaning up + +To remove any containers, volumes, and networks related to our docker-compose setup, we can run the `clean` command: + +```sh +./compose clean +``` + +### Running your own commands based off of docker-compose + +The following commands allow you do just about anything: + +```sh +./compose +./compose dev +``` + +For example, to see what our compose configuration looks like: + +```sh +./compose config # base config +``` + +Or, to run just an ethereum node: + +```sh +./compose up devnet # start a parity devnet node +``` + +```sh +GETH_MODE=true ./compose up devnet # start a geth devnet node +``` + +# Environment Variables + +For more information regarding environment variables, the docker [documentation](https://docs.docker.com/compose/environment-variables/) explains it in great detail. +All of the environment variables listed under the `environment` key in each service contains a default entry under the `.env` file of this directory. Additional environment variables can be added by using the `plugin-variables.env` file. Both files are further expanded upon below. + +## Overriding existing variables + +The existing variables listed under the `environment` key in each service can be overridden by setting a shell environment variable of the same key. For example, referring to `ETH_CHAIN_ID` variable under the `node` service, the default value of `ETH_CHAIN_ID` in `.env` is `34055`. If we wanted to change this to `1337`, we could set a shell variable to override this value. + +```sh +export ETH_CHAIN_ID=1337 +./compose acceptance # ETH_CHAIN_ID now has the value of 1337, instead of the default value of 34055 +``` + +## Adding new environment variables + +What if we want to add new environment variables that are not listed under the `environment` key of a service? `docker-compose` provides us with a way to pass our own variables that are not defined under the `environment` key by using an [env_file](https://docs.docker.com/compose/compose-file/#env_file). We can see from our `docker-compose.yaml` file that there is an env file under the name of `plugin-variables.env`. In this file, you can specify any extra environment variables that you'd like to pass to the associated container. + +For example, lets say we want to pass the variable `ALLOW_ORIGINS` defined in `store/orm/schema.go`, so that we can serve our api from a different port without getting CORS errors. We can't pass this in as a shell variable, as the variable is not defined under the `environment` key under the `node` service. What we can do though, is specify `ALLOW_ORIGINS` in `plugin-variables.env`, which will get passed to the container. + +```sh +# assuming that we're in the tools/docker directory + +# Add our custom environment variable +echo "ALLOW_ORIGINS=http://localhost:1337" > plugin-variables.env + +# now the node will allow requests from the origin of http://localhost:1337 rather than the default value of http://localhost:3000,http://localhost:6688 +./compose acceptance +``` + +# Following logs + +The `logs` command will allow you to follow the logs of any running service. For example: + +```bash +./compose up node # starts the node service and all it's dependencies, including devnet, the DB... +./compose logs devnet # shows the blockchain logs +# ^C to exit +./compose logs # shows the combined logs of all running services +``` + +# Troubleshooting + +## My storage space is full! How do I clean up docker's disk usage? + +``` +docker system prune +``` + +## The build process takes up a lot of resources / brings my computer to a halt + +The default configuration tries to build everything in parallel. You can avoid this by clearing the Docker Compose build options. + +``` +# Use docker compose's default build configuration +export DOCKER_COMPOSE_BUILD_OPTS="" +``` + +## Logging from a container is hidden + +Sometimes docker-compose does not show logging from some docker containers. This can be solved by using the docker command directly. + +``` +# List docker instances +docker ps +# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +# 41410c9d79d8 smartcontract/plugin "plugin node star…" 2 minutes ago Up 2 minutes 0.0.0.0:6688->6688/tcp plugin-node +# f7e657e101d8 smartcontract/devnet "/bin/parity --confi…" 47 hours ago Up 2 minutes 5001/tcp, 8080/tcp, 8082-8083/tcp, 8180/tcp, 8546/tcp, 30303/tcp, 0.0.0.0:8545->8545/tcp, 30303/udp parity + +# Follow logs using name of container +docker logs -f plugin-node +``` + +## Logging into via the frontend results in HTTP Status Forbidden (403) + +This is most likely due to the (Allow Origins access policy](https://docs.chain.link/docs/configuration-variables#section-allow-origins). Make sure you are using 'http://localhost' (not 127.0.0.1), or try disabling ALLOW_ORIGINS. + +``` +# Disable ALLOW_ORIGINS for testing +echo "ALLOW_ORIGINS=*" >> plugin-variables.env +``` + +# Using the dockerized development environment + +The dockerized development environment provides an alternative development and testing environment to the docker-compose setup as described above. The goals for this environment are to: + +- create a development environment that is easily configured by interview candidates, potential contributors, etc. +- contain all dependencies in a single docker image +- contain sensible, pre-configured defaults + +The entire plugin repo is bind-mounted so any changes will take effect immediately - this makes the env good for TDD. Node modules are also bind-mounted, so you shouldn't have to install many deps after launching the container. Go deps are not bind-mounted, so you will have to install those after starting the container. You should only need to do this once, as long as you re-use the container. + +The docker env contains direnv, so whatever changes you make locally to your (bind-mounted) `.envrc` will be reflected in the docker container. The container is built with a default ENV that should require minimal changes for basic testing and development. + +### Building the dev environment + +```bash +# build the image and tag it as plugin-develop +docker build ./tools/docker/ -t plugin-develop:latest -f ./tools/docker/develop.Dockerfile +# create the image +docker container create -v /home/ryan/plugin/plugin:/root/plugin --name plugin-dev plugin-develop:latest +# if you want to access the db, chain, node, or from the host... expose the relevant ports +# This could also be used in case you want to run some services in the container, and others directly +# on the host +docker container create -v /home/ryan/plugin/plugin:/root/plugin --name plugin-dev -p 5432:5432 -p 6688:6688 -p 6689:6689 -p 3000:3000 -p 3001:3001 -p 8545:8545 -p 8546:8546 plugin-develop:latest +# start the container (this will run in the background until you stop it) +docker start plugin-dev +``` + +### Connecting to the dev environment + +```bash +# connect to the container by opening bash prompts - you can open as many as you'd like +docker exec -ti plugin-dev bash +``` + +### Run services / tests inside container + +\$ --> inside container bash prompt + +This is nothing new, just a demonstration that you should be able to run all the commands/tests/services you normally do for development/testing, but now inside of the docker container. As mentioned above, if you want access to these services on the host machine, you will have to expose their ports. + +```bash +# install deps and plugin +$ make install + +# run go tests +$ make testdb +$ go test ./... + +# run evm tests +$ cd contracts +$ pnpm test + +# start geth +$ geth --dev --datadir ./tools/gethnet/datadir --mine --ipcdisable --dev.period 2 --unlock 0x9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f --password ./tools/clroot/password.txt --config ./tools/gethnet/config.toml + +# run plugin node (will require changing env vars from defaults) +$ plugin local node -a ./tools/secrets/apicredentials -p ./tools/secrets/password.txt +``` + +### Included Tooling: + +This image contains the following additional tools: + +- geth, openethereum, ganache +- delve, gofuzz +- slither, echidna +- web3.py diff --git a/tools/docker/cldev.Dockerfile b/tools/docker/cldev.Dockerfile new file mode 100644 index 00000000..59438b95 --- /dev/null +++ b/tools/docker/cldev.Dockerfile @@ -0,0 +1,8 @@ +FROM golang:1.21-bullseye + +ARG SRCROOT=/usr/local/src/plugin +WORKDIR ${SRCROOT} + +ADD go.* ./ +RUN go mod download +RUN mkdir -p tools/bin diff --git a/tools/docker/compose b/tools/docker/compose new file mode 100644 index 00000000..b63667b0 --- /dev/null +++ b/tools/docker/compose @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +set -ex + +export DOCKER_BUILDKIT=1 +export COMPOSE_DOCKER_CLI_BUILD=1 + +base_files="-f docker-compose.yaml -f docker-compose.postgres.yaml" +# Allow for choosing between geth or parity +if [ $GETH_MODE ]; then + base_files="$base_files -f docker-compose.gethnet.yaml" +else + base_files="$base_files -f docker-compose.paritynet.yaml" +fi + +base="docker-compose $base_files" # base config, used standalone for acceptance +dev="$base -f docker-compose.dev.yaml" # config for cldev + +clean_docker() { + $base down -v --remove-orphans + $dev down -v --remove-orphans +} + +function usage() { + echo "compose -- A helper script for running common docker-compose commands\ + +Commands: + help Displays this help menu + clean Remove any containers and volumes related to compose files + logs Display the logs of any service(s) by name + + cld Runs the plugin node container in dev mode + + acceptance Run the services required to perform an acceptance test + + dev Run docker-compose with dev config for the core node + + eth:restart Reset blockchain data to genesis state + cl:restart Reset plugin database + + * Run docker-compose with base config" +} + +case "$1" in +help) + usage + ;; +clean) + clean_docker + ;; +logs) + $base logs -f ${@:2} + ;; +cld) + $dev build + $dev up -d node + docker exec -it plugin-node bash + $dev down -v --remove-orphans + ;; + +acceptance) + $base up + ;; + +dev) + $dev ${@:2} + ;; + +eth:restart) + $base rm --force --stop devnet + if [ $GETH_MODE ]; then + docker volume rm --force docker_geth-db-data + else + docker volume rm --force docker_parity-db-data + fi + $base up -d devnet + ;; +cl:restart) + $base stop node + $base rm --force --stop node-db + docker volume rm --force docker_node-db-data + ./compose eth:restart + $base start node + ;; +*) + $base $@ + ;; +esac diff --git a/tools/docker/config.toml b/tools/docker/config.toml new file mode 100644 index 00000000..dcae00e0 --- /dev/null +++ b/tools/docker/config.toml @@ -0,0 +1,22 @@ +[Log] +Level = 'info' + +[WebServer] +SecureCookies = false + +[WebServer.TLS] +HTTPSPort = 0 + +[JobPipeline] +[JobPipeline.HTTPRequest] +DefaultTimeout = '5s' + +[[EVM]] +ChainID = '34055' +MinIncomingConfirmations = 1 +MinContractPayment = '0.000001 pli' + +[[EVM.Nodes]] +Name = 'primary-0-34055' +WSURL = 'ws://devnet:8546' +HTTPURL = 'http://devnet:8545' diff --git a/tools/docker/develop.Dockerfile b/tools/docker/develop.Dockerfile new file mode 100644 index 00000000..41348f9e --- /dev/null +++ b/tools/docker/develop.Dockerfile @@ -0,0 +1,66 @@ +FROM ubuntu:20.04 + +# Add the PostgreSQL PGP key & repository +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list + +# Install deps +RUN apt-get update && apt-get install -y postgresql postgresql-contrib direnv build-essential cmake libudev-dev unzip + +# Install additional tooling +RUN mkdir -p ~/.local/bin/ +ENV PATH="/root/.local/bin:${PATH}" +RUN go get github.com/go-delve/delve/cmd/dlv +RUN go get github.com/google/gofuzz +RUN pnpm install -g ganache-cli +RUN pip3 install web3 slither-analyzer crytic-compile +RUN curl -L https://github.com/crytic/echidna/releases/download/v1.5.1/echidna-test-v1.5.1-Ubuntu-18.04.tar.gz | tar -xz -C ~/.local/bin +RUN curl -L https://github.com/openethereum/openethereum/releases/download/v3.2.4/openethereum-linux-v3.2.4.zip --output openethereum.zip +RUN unzip openethereum.zip -d ~/.local/bin/ && rm openethereum.zip +RUN chmod +x ~/.local/bin/* + +# Setup direnv +RUN echo 'eval "$(direnv hook bash)"' > /root/.bashrc + +# Setup postgres +USER postgres +RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/10/main/pg_hba.conf +RUN echo "listen_addresses='*'" >> /etc/postgresql/10/main/postgresql.conf +RUN /etc/init.d/postgresql start &&\ + createdb plugin_test &&\ + createdb node_dev &&\ + createuser --superuser --no-password root &&\ + psql -c "ALTER USER postgres PASSWORD 'node';" + +USER root + +# add init file - this file starts postgres and keeps container alive after started +RUN touch ~/init +RUN chmod +x ~/init +RUN echo "#!/usr/local/bin/dumb-init /bin/sh" >> ~/init +RUN echo "/etc/init.d/postgresql start" >> ~/init +RUN echo "while true; do sleep 1; done" >> ~/init + +ARG SRCROOT=/root/plugin +WORKDIR ${SRCROOT} + +EXPOSE 5432 +EXPOSE 6688 +EXPOSE 6689 +EXPOSE 3000 +EXPOSE 3001 +EXPOSE 8545 +EXPOSE 8546 + +# Default env setup for testing +ENV PLUGIN_DB_NAME plugin_test +ENV PLUGIN_PGPASSWORD=thispasswordislongenough +ENV CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@localhost:5432/$PLUGIN_DB_NAME?sslmode=disable +ENV TYPEORM_USERNAME=postgres +ENV TYPEORM_PASSWORD=node +ENV ETH_CHAIN_ID=1337 +ENV PLUGIN_DEV=true +ENV PLUGIN_TLS_PORT=0 +ENV SECURE_COOKIES=false + +ENTRYPOINT [ "/root/init" ] diff --git a/tools/docker/docker-compose.deps.yaml b/tools/docker/docker-compose.deps.yaml new file mode 100644 index 00000000..c5a1ce20 --- /dev/null +++ b/tools/docker/docker-compose.deps.yaml @@ -0,0 +1,21 @@ +version: '3.5' + +services: + wait-db: + container_name: wait-for-db + build: + context: . + dockerfile: wait-postgres.Dockerfile + command: '-h node-db -p 5432 -U postgres -d $PLUGIN_DB_NAME --timeout=600' + depends_on: + - node-db + wait-db-2: + container_name: wait-for-db-2 + build: + context: . + dockerfile: wait-postgres.Dockerfile + command: '-h node-db-2 -p 5432 -U postgres -d $PLUGIN_DB_NAME --timeout=600' + depends_on: + - node-db-2 +volumes: + node-db-data: diff --git a/tools/docker/docker-compose.dev.yaml b/tools/docker/docker-compose.dev.yaml new file mode 100644 index 00000000..b3f2e020 --- /dev/null +++ b/tools/docker/docker-compose.dev.yaml @@ -0,0 +1,13 @@ +version: '3.5' +services: + node: + entrypoint: '/bin/sh -c "while sleep 1000; do :; done"' + build: + context: ../../ + dockerfile: tools/docker/cldev.Dockerfile + args: + - SRCROOT + command: '' + image: smartcontract/plugin-dev + volumes: + - ../..:$SRCROOT diff --git a/tools/docker/docker-compose.gethnet.local.yaml b/tools/docker/docker-compose.gethnet.local.yaml new file mode 100644 index 00000000..ae2a1e17 --- /dev/null +++ b/tools/docker/docker-compose.gethnet.local.yaml @@ -0,0 +1,26 @@ +version: '3.5' + +services: + node: + environment: + - ETH_CHAIN_ID=1337 + depends_on: + - devnet + node-2: + environment: + - ETH_CHAIN_ID=1337 + depends_on: + - devnet + devnet: + build: ../gethnet + container_name: geth + image: smartcontract/gethnet + ports: + - 8545:8545 + - 8546:8546 + volumes: + - geth-db-data:/gethnet/datadir/geth + secrets: + - node_password +volumes: + geth-db-data: diff --git a/tools/docker/docker-compose.gethnet.yaml b/tools/docker/docker-compose.gethnet.yaml new file mode 100644 index 00000000..6ab388dc --- /dev/null +++ b/tools/docker/docker-compose.gethnet.yaml @@ -0,0 +1,26 @@ +version: '3.5' + +services: + node: + environment: + - ETH_CHAIN_ID=1337 + depends_on: + - devnet + node-2: + environment: + - ETH_CHAIN_ID=1337 + depends_on: + - devnet + devnet: + build: ../gethnet + container_name: geth + image: smartcontract/gethnet + ports: + - 8545 + - 8546 + volumes: + - geth-db-data:/gethnet/datadir/geth + secrets: + - node_password +volumes: + geth-db-data: diff --git a/tools/docker/docker-compose.paritynet.local.yaml b/tools/docker/docker-compose.paritynet.local.yaml new file mode 100644 index 00000000..518e2cce --- /dev/null +++ b/tools/docker/docker-compose.paritynet.local.yaml @@ -0,0 +1,21 @@ +version: '3.5' + +services: + node: + depends_on: + - devnet + node-2: + depends_on: + - devnet + devnet: + container_name: parity + image: smartcontract/devnet@sha256:9bcf4a476db965af7f6a2152a3f07f8bd0b35e51ec45b122ebc9ce87cd1304a7 + user: root + command: --reseal-max-period 4000 --force-sealing --config /devnet/miner.toml --db-path /devnet/database + ports: + - 8545:8545 + - 8546:8546 + volumes: + - parity-db-data:/devnet/database +volumes: + parity-db-data: diff --git a/tools/docker/docker-compose.paritynet.yaml b/tools/docker/docker-compose.paritynet.yaml new file mode 100644 index 00000000..b5e6a631 --- /dev/null +++ b/tools/docker/docker-compose.paritynet.yaml @@ -0,0 +1,21 @@ +version: '3.5' + +services: + node: + depends_on: + - devnet + node-2: + depends_on: + - devnet + devnet: + container_name: parity + image: smartcontract/devnet@sha256:9bcf4a476db965af7f6a2152a3f07f8bd0b35e51ec45b122ebc9ce87cd1304a7 + user: root + command: --reseal-max-period 4000 --force-sealing --config /devnet/miner.toml --db-path /devnet/database + ports: + - 8545 + - 8546 + volumes: + - parity-db-data:/devnet/database +volumes: + parity-db-data: diff --git a/tools/docker/docker-compose.postgres.yaml b/tools/docker/docker-compose.postgres.yaml new file mode 100644 index 00000000..3b850e58 --- /dev/null +++ b/tools/docker/docker-compose.postgres.yaml @@ -0,0 +1,36 @@ +version: '3.5' + +services: + node: + depends_on: + - node-db + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@node-db:5432/$PLUGIN_DB_NAME?sslmode=disable + + node-2: + depends_on: + - node-db-2 + environment: + - CL_DATABASE_URL=postgresql://postgres:$PLUGIN_PGPASSWORD@node-db-2:5432/$PLUGIN_DB_NAME?sslmode=disable + + node-db: + container_name: plugin-db + image: postgres:11.6 + volumes: + - node-db-data:/var/lib/postgresql/data + environment: + POSTGRES_DB: $PLUGIN_DB_NAME + POSTGRES_PASSWORD: $PLUGIN_PGPASSWORD + + node-db-2: + container_name: plugin-db-2 + image: postgres:11.6 + volumes: + - node-db-2-data:/var/lib/postgresql/data + environment: + POSTGRES_DB: $PLUGIN_DB_NAME + POSTGRES_PASSWORD: $PLUGIN_PGPASSWORD + +volumes: + node-db-data: + node-db-2-data: diff --git a/tools/docker/docker-compose.yaml b/tools/docker/docker-compose.yaml new file mode 100644 index 00000000..319b189a --- /dev/null +++ b/tools/docker/docker-compose.yaml @@ -0,0 +1,60 @@ +version: '3.5' + +services: + node: + container_name: plugin-node + image: smartcontract/plugin + build: + context: ../../ + dockerfile: core/plugin.Dockerfile + # Note that the keystore import allows us to submit transactions + # immediately because addresses are specified when starting the + # parity/geth node to be prefunded with eth. + entrypoint: /bin/sh -c "plugin -c /run/secrets/config node start -d -p /run/secrets/node_password -a /run/secrets/apicredentials" + restart: always + environment: + - CL_DATABASE_URL + env_file: + - plugin-variables.env + ports: + - 6688:6688 + secrets: + - node_password + - apicredentials + - keystore + - config + + node-2: + container_name: plugin-node-2 + image: smartcontract/plugin + build: + context: ../../ + dockerfile: core/plugin.Dockerfile + entrypoint: /bin/sh -c "plugin -c /run/secrets/config node start -d -p /run/secrets/node_password -a /run/secrets/apicredentials" + restart: always + environment: + - CL_DATABASE_URL + env_file: + - plugin-variables.env + ports: + - 6699:6688 + secrets: + - node_password + - apicredentials + - keystore + - config + +# TODO +# - replace clroot with secrets +# - extract tools into separate docker-compose files + +secrets: + node_password: + file: ../clroot/password.txt + apicredentials: + file: ../clroot/apicredentials + keystore: + file: ../secrets/0xb90c7E3F7815F59EAD74e7543eB6D9E8538455D6.json + config: + file: config.toml + diff --git a/tools/docker/plugin-variables.env b/tools/docker/plugin-variables.env new file mode 100644 index 00000000..c9495248 --- /dev/null +++ b/tools/docker/plugin-variables.env @@ -0,0 +1,3 @@ +# Add in your environment variables here +# if you want to add more environment variables not listed +# in .env diff --git a/tools/docker/wait-for-postgres.sh b/tools/docker/wait-for-postgres.sh new file mode 100644 index 00000000..b7548085 --- /dev/null +++ b/tools/docker/wait-for-postgres.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +RETRIES=5 +THRESHOLD=2 + +until [ $THRESHOLD -eq 0 ] || [ $RETRIES -eq 0 ]; do + if pg_isready $@; then + ((THRESHOLD--)) + fi + echo "Waiting for postgres server, $((RETRIES--)) remaining attempts..." + sleep 2 +done + +if [ $THRESHOLD -eq 0 ]; then exit 0; fi +exit 1 diff --git a/tools/docker/wait-postgres.Dockerfile b/tools/docker/wait-postgres.Dockerfile new file mode 100644 index 00000000..b663d131 --- /dev/null +++ b/tools/docker/wait-postgres.Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 + +RUN apt-get update +RUN apt-get install -y postgresql-client +COPY wait-for-postgres.sh /bin/wait-for-postgres + +ENTRYPOINT ["wait-for-postgres"] \ No newline at end of file diff --git a/tools/flakeytests/cmd/runner/main.go b/tools/flakeytests/cmd/runner/main.go new file mode 100644 index 00000000..c4f6c307 --- /dev/null +++ b/tools/flakeytests/cmd/runner/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "context" + "flag" + "io" + "log" + "os" + "os/signal" + "strings" + + "github.com/goplugin/pluginv3.0/v2/tools/flakeytests" +) + +const numReruns = 2 + +func main() { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + go func() { + <-ctx.Done() + stop() // restore default exit behavior + log.Println("Cancelling... interrupt again to exit") + }() + + grafanaHost := flag.String("grafana_host", "", "grafana host URL") + grafanaAuth := flag.String("grafana_auth", "", "grafana basic auth for Loki API") + command := flag.String("command", "", "test command being rerun; used to tag metrics") + ghSHA := flag.String("gh_sha", "", "commit sha for which we're rerunning tests") + ghEventPath := flag.String("gh_event_path", "", "path to associated gh event") + ghEventName := flag.String("gh_event_name", "", "type of associated gh event") + ghRepo := flag.String("gh_repo", "", "name of gh repository") + ghRunID := flag.String("gh_run_id", "", "run id of the gh workflow") + flag.Parse() + + if *grafanaHost == "" { + log.Fatal("Error re-running flakey tests: `grafana_host` is required") + } + + if *grafanaAuth == "" { + log.Fatal("Error re-running flakey tests: `grafana_auth` is required") + } + + if *command == "" { + log.Fatal("Error re-running flakey tests: `command` is required") + } + + args := flag.Args() + + log.Printf("Parsing output at: %v", strings.Join(args, ", ")) + readers := []io.Reader{} + for _, f := range args { + r, err := os.Open(f) + if err != nil { + log.Fatal(err) + } + + readers = append(readers, r) + } + + meta := flakeytests.GetGithubMetadata(*ghRepo, *ghEventName, *ghSHA, *ghEventPath, *ghRunID) + rep := flakeytests.NewLokiReporter(*grafanaHost, *grafanaAuth, *command, meta) + r := flakeytests.NewRunner(readers, rep, numReruns) + err := r.Run(ctx) + if err != nil { + log.Fatalf("Error re-running flakey tests: %s", err) + } +} diff --git a/tools/flakeytests/reporter.go b/tools/flakeytests/reporter.go new file mode 100644 index 00000000..b7c7f666 --- /dev/null +++ b/tools/flakeytests/reporter.go @@ -0,0 +1,182 @@ +package flakeytests + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +const ( + messageType_flakeyTest = "flakey_test" + messageType_runReport = "run_report" + messageType_packagePanic = "package_panic" +) + +type pushRequest struct { + Streams []stream `json:"streams"` +} + +type stream struct { + Stream map[string]string `json:"stream"` + Values [][]string `json:"values"` +} + +type BaseMessage struct { + MessageType string `json:"message_type"` + Context +} + +type flakeyTest struct { + BaseMessage + Package string `json:"package"` + TestName string `json:"test_name"` + FQTestName string `json:"fq_test_name"` +} + +type packagePanic struct { + BaseMessage + Package string `json:"package"` +} + +type runReport struct { + BaseMessage + NumPackagePanics int `json:"num_package_panics"` + NumFlakes int `json:"num_flakes"` + NumCombined int `json:"num_combined"` +} + +type Context struct { + CommitSHA string `json:"commit_sha"` + PullRequestURL string `json:"pull_request_url,omitempty"` + Repository string `json:"repository"` + Type string `json:"event_type"` + RunURL string `json:"run_url,omitempty"` +} + +type LokiReporter struct { + host string + auth string + command string + now func() time.Time + ctx Context +} + +func (l *LokiReporter) createRequest(report *Report) (pushRequest, error) { + vs := [][]string{} + now := l.now() + nows := fmt.Sprintf("%d", now.UnixNano()) + + for pkg, tests := range report.tests { + for t := range tests { + d, err := json.Marshal(flakeyTest{ + BaseMessage: BaseMessage{ + MessageType: messageType_flakeyTest, + Context: l.ctx, + }, + Package: pkg, + TestName: t, + FQTestName: fmt.Sprintf("%s:%s", pkg, t), + }) + if err != nil { + return pushRequest{}, err + } + vs = append(vs, []string{nows, string(d)}) + } + } + + // Flakes are stored in a map[string][]string, so to count them, we can't just do len(flakeyTests), + // as that will get us the number of flakey packages, not the number of flakes tests. + // However, we do emit one log line per flakey test above, so use that to count our flakes. + numFlakes := len(vs) + + for pkg := range report.packagePanics { + d, err := json.Marshal(packagePanic{ + BaseMessage: BaseMessage{ + MessageType: messageType_packagePanic, + Context: l.ctx, + }, + Package: pkg, + }) + if err != nil { + return pushRequest{}, err + } + + vs = append(vs, []string{nows, string(d)}) + } + + f, err := json.Marshal(runReport{ + BaseMessage: BaseMessage{ + MessageType: messageType_runReport, + Context: l.ctx, + }, + NumFlakes: numFlakes, + NumPackagePanics: len(report.packagePanics), + NumCombined: numFlakes + len(report.packagePanics), + }) + if err != nil { + return pushRequest{}, nil + } + + vs = append(vs, []string{nows, string(f)}) + + pr := pushRequest{ + Streams: []stream{ + { + Stream: map[string]string{ + "app": "flakey-test-reporter", + "command": l.command, + }, + Values: vs, + }, + }, + } + return pr, nil +} + +func (l *LokiReporter) makeRequest(ctx context.Context, pushReq pushRequest) error { + body, err := json.Marshal(pushReq) + if err != nil { + return err + } + + u := url.URL{Scheme: "https", Host: l.host, Path: "loki/api/v1/push"} + req, err := http.NewRequestWithContext(ctx, "POST", u.String(), bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Add( + "Authorization", + fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(l.auth))), + ) + req.Header.Add("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } else if resp.StatusCode != http.StatusNoContent { + b, berr := io.ReadAll(resp.Body) + if berr != nil { + return fmt.Errorf("error decoding body for failed push request: %w", berr) + } + return fmt.Errorf("push request failed: status=%d, body=%s", resp.StatusCode, b) + } + return err +} + +func (l *LokiReporter) Report(ctx context.Context, report *Report) error { + pushReq, err := l.createRequest(report) + if err != nil { + return err + } + + return l.makeRequest(ctx, pushReq) +} + +func NewLokiReporter(host, auth, command string, ctx Context) *LokiReporter { + return &LokiReporter{host: host, auth: auth, command: command, now: time.Now, ctx: ctx} +} diff --git a/tools/flakeytests/reporter_test.go b/tools/flakeytests/reporter_test.go new file mode 100644 index 00000000..15650fc7 --- /dev/null +++ b/tools/flakeytests/reporter_test.go @@ -0,0 +1,142 @@ +package flakeytests + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMakeRequest_SingleTest(t *testing.T) { + now := time.Now() + ts := fmt.Sprintf("%d", now.UnixNano()) + r := &Report{ + tests: map[string]map[string]int{ + "core/assets": map[string]int{ + "TestLink": 1, + }, + }, + } + lr := &LokiReporter{auth: "bla", host: "bla", command: "go_core_tests", now: func() time.Time { return now }} + pr, err := lr.createRequest(r) + require.NoError(t, err) + assert.Len(t, pr.Streams, 1) + assert.Equal(t, pr.Streams[0].Stream, map[string]string{"command": "go_core_tests", "app": "flakey-test-reporter"}) + assert.ElementsMatch(t, pr.Streams[0].Values, [][]string{ + {ts, `{"message_type":"flakey_test","commit_sha":"","repository":"","event_type":"","package":"core/assets","test_name":"TestLink","fq_test_name":"core/assets:TestLink"}`}, + {ts, `{"message_type":"run_report","commit_sha":"","repository":"","event_type":"","num_package_panics":0,"num_flakes":1,"num_combined":1}`}, + }) +} + +func TestMakeRequest_MultipleTests(t *testing.T) { + now := time.Now() + ts := fmt.Sprintf("%d", now.UnixNano()) + r := &Report{ + tests: map[string]map[string]int{ + "core/assets": map[string]int{ + "TestLink": 1, + "TestCore": 1, + }, + }, + } + lr := &LokiReporter{auth: "bla", host: "bla", command: "go_core_tests", now: func() time.Time { return now }} + pr, err := lr.createRequest(r) + require.NoError(t, err) + assert.Len(t, pr.Streams, 1) + assert.Equal(t, pr.Streams[0].Stream, map[string]string{"command": "go_core_tests", "app": "flakey-test-reporter"}) + + assert.ElementsMatch(t, pr.Streams[0].Values, [][]string{ + {ts, `{"message_type":"flakey_test","commit_sha":"","repository":"","event_type":"","package":"core/assets","test_name":"TestLink","fq_test_name":"core/assets:TestLink"}`}, + {ts, `{"message_type":"flakey_test","commit_sha":"","repository":"","event_type":"","package":"core/assets","test_name":"TestCore","fq_test_name":"core/assets:TestCore"}`}, + {ts, `{"message_type":"run_report","commit_sha":"","repository":"","event_type":"","num_package_panics":0,"num_flakes":2,"num_combined":2}`}, + }) +} + +func TestMakeRequest_NoTests(t *testing.T) { + now := time.Now() + ts := fmt.Sprintf("%d", now.UnixNano()) + r := NewReport() + lr := &LokiReporter{auth: "bla", host: "bla", command: "go_core_tests", now: func() time.Time { return now }} + pr, err := lr.createRequest(r) + require.NoError(t, err) + assert.Len(t, pr.Streams, 1) + assert.Equal(t, pr.Streams[0].Stream, map[string]string{"command": "go_core_tests", "app": "flakey-test-reporter"}) + assert.ElementsMatch(t, pr.Streams[0].Values, [][]string{ + {ts, `{"message_type":"run_report","commit_sha":"","repository":"","event_type":"","num_package_panics":0,"num_flakes":0,"num_combined":0}`}, + }) +} + +func TestMakeRequest_WithContext(t *testing.T) { + now := time.Now() + ts := fmt.Sprintf("%d", now.UnixNano()) + r := NewReport() + lr := &LokiReporter{auth: "bla", host: "bla", command: "go_core_tests", now: func() time.Time { return now }, ctx: Context{CommitSHA: "42"}} + pr, err := lr.createRequest(r) + require.NoError(t, err) + assert.Len(t, pr.Streams, 1) + assert.Equal(t, pr.Streams[0].Stream, map[string]string{"command": "go_core_tests", "app": "flakey-test-reporter"}) + assert.ElementsMatch(t, pr.Streams[0].Values, [][]string{ + {ts, `{"message_type":"run_report","commit_sha":"42","repository":"","event_type":"","num_package_panics":0,"num_flakes":0,"num_combined":0}`}, + }) +} + +func TestMakeRequest_Panics(t *testing.T) { + now := time.Now() + ts := fmt.Sprintf("%d", now.UnixNano()) + r := &Report{ + tests: map[string]map[string]int{ + "core/assets": map[string]int{ + "TestLink": 1, + }, + }, + packagePanics: map[string]int{ + "core/assets": 1, + }, + } + lr := &LokiReporter{auth: "bla", host: "bla", command: "go_core_tests", now: func() time.Time { return now }} + pr, err := lr.createRequest(r) + require.NoError(t, err) + assert.Len(t, pr.Streams, 1) + assert.Equal(t, pr.Streams[0].Stream, map[string]string{"command": "go_core_tests", "app": "flakey-test-reporter"}) + + assert.ElementsMatch(t, pr.Streams[0].Values, [][]string{ + {ts, `{"message_type":"flakey_test","commit_sha":"","repository":"","event_type":"","package":"core/assets","test_name":"TestLink","fq_test_name":"core/assets:TestLink"}`}, + {ts, `{"message_type":"package_panic","commit_sha":"","repository":"","event_type":"","package":"core/assets"}`}, + {ts, `{"message_type":"run_report","commit_sha":"","repository":"","event_type":"","num_package_panics":1,"num_flakes":1,"num_combined":2}`}, + }) +} + +func TestDedupeEntries(t *testing.T) { + r := &Report{ + tests: map[string]map[string]int{ + "core/assets": map[string]int{ + "TestSomethingAboutAssets/test_1": 2, + "TestSomethingAboutAssets": 4, + "TestSomeOtherTest": 1, + "TestSomethingAboutAssets/test_2": 2, + "TestFinalTest/test_1": 1, + }, + "core/services/important_service": map[string]int{ + "TestAnImportantService/a_subtest": 1, + }, + }, + } + + otherReport, err := dedupeEntries(r) + require.NoError(t, err) + + expectedMap := map[string]map[string]int{ + "core/assets": map[string]int{ + "TestSomethingAboutAssets/test_1": 2, + "TestSomeOtherTest": 1, + "TestSomethingAboutAssets/test_2": 2, + "TestFinalTest/test_1": 1, + }, + "core/services/important_service": map[string]int{ + "TestAnImportantService/a_subtest": 1, + }, + } + assert.Equal(t, expectedMap, otherReport.tests) +} diff --git a/tools/flakeytests/runner.go b/tools/flakeytests/runner.go new file mode 100644 index 00000000..c6a7f387 --- /dev/null +++ b/tools/flakeytests/runner.go @@ -0,0 +1,328 @@ +package flakeytests + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "regexp" + "sort" + "strings" + "time" +) + +var ( + panicRe = regexp.MustCompile(`^panic:`) +) + +type Runner struct { + readers []io.Reader + testCommand tester + numReruns int + parse parseFn + reporter reporter +} + +type tester interface { + test(pkg string, tests []string, w io.Writer) error +} + +type reporter interface { + Report(ctx context.Context, r *Report) error +} + +type parseFn func(readers ...io.Reader) (*Report, error) + +func NewRunner(readers []io.Reader, reporter reporter, numReruns int) *Runner { + tc := &testCommand{ + repo: "github.com/goplugin/pluginv3.0/v2", + command: "./tools/bin/go_core_tests", + overrides: func(*exec.Cmd) {}, + } + return &Runner{ + readers: readers, + numReruns: numReruns, + testCommand: tc, + parse: parseOutput, + reporter: reporter, + } +} + +type testCommand struct { + command string + repo string + overrides func(*exec.Cmd) +} + +func (t *testCommand) test(pkg string, tests []string, w io.Writer) error { + replacedPkg := strings.Replace(pkg, t.repo, "", -1) + cmd := exec.Command(t.command, fmt.Sprintf(".%s", replacedPkg)) //#nosec + cmd.Env = os.Environ() + + if len(tests) > 0 { + testFilter := strings.Join(tests, "|") + cmd.Env = append(cmd.Env, fmt.Sprintf("TEST_FLAGS=-run %s", testFilter)) + } + + cmd.Stdout = io.MultiWriter(os.Stdout, w) + cmd.Stderr = io.MultiWriter(os.Stderr, w) + t.overrides(cmd) + return cmd.Run() +} + +type TestEvent struct { + Time time.Time + Action string + Package string + Test string + Elapsed float64 // seconds + Output string +} + +func newEvent(b []byte) (*TestEvent, error) { + e := &TestEvent{} + err := json.Unmarshal(b, e) + return e, err +} + +func parseOutput(readers ...io.Reader) (*Report, error) { + report := NewReport() + for _, r := range readers { + s := bufio.NewScanner(r) + for s.Scan() { + t := s.Bytes() + if len(t) == 0 { + continue + } + + // Skip the line if doesn't start with a "{" -- + // this mean it isn't JSON output. + if !strings.HasPrefix(string(t), "{") { + continue + } + + e, err := newEvent(t) + if err != nil { + return nil, err + } + + switch e.Action { + case "fail": + // Fail logs come in two forms: + // - with e.Package && e.Test, in which case it indicates a test failure. + // - with e.Package only, which indicates that the package test has failed, + // or possible that there has been a panic in an out-of-process goroutine running + // as part of the tests. + // + // We can ignore the last case because a package failure will be accounted elsewhere, either + // in the form of a failing test entry, or in the form of a panic output log, covered below. + if e.Test == "" { + continue + } + + report.IncTest(e.Package, e.Test) + case "output": + if panicRe.MatchString(e.Output) { + // Similar to the above, a panic can come in two forms: + // - attached to a test (i.e. with e.Test != ""), in which case + // we'll treat it like a failing test. + // - package-scoped, in which case we'll treat it as a package panic. + if e.Test != "" { + report.IncTest(e.Package, e.Test) + } else { + report.IncPackagePanic(e.Package) + } + } + } + } + + if err := s.Err(); err != nil { + return nil, err + } + } + + return report, nil +} + +type exitCoder interface { + ExitCode() int +} + +type Report struct { + tests map[string]map[string]int + packagePanics map[string]int +} + +func NewReport() *Report { + return &Report{ + tests: map[string]map[string]int{}, + packagePanics: map[string]int{}, + } +} + +func (r *Report) HasFlakes() bool { + return len(r.tests) > 0 || len(r.packagePanics) > 0 +} + +func (r *Report) SetTest(pkg, test string, val int) { + if r.tests[pkg] == nil { + r.tests[pkg] = map[string]int{} + } + r.tests[pkg][test] = val +} + +func (r *Report) IncTest(pkg string, test string) { + if r.tests[pkg] == nil { + r.tests[pkg] = map[string]int{} + } + r.tests[pkg][test]++ +} + +func (r *Report) IncPackagePanic(pkg string) { + r.packagePanics[pkg]++ +} + +func (r *Runner) runTest(pkg string, tests []string) (*Report, error) { + var out bytes.Buffer + err := r.testCommand.test(pkg, tests, &out) + if err != nil { + log.Printf("Test command errored: %s\n", err) + // There was an error because the command failed with a non-zero + // exit code. This could just mean that the test failed again, so let's + // keep going. + var exErr exitCoder + if errors.As(err, &exErr) && exErr.ExitCode() > 0 { + return r.parse(&out) + } + return nil, err + } + + return r.parse(&out) +} + +func (r *Runner) runTests(rep *Report) (*Report, error) { + report := NewReport() + + // We need to deal with two types of flakes here: + // - flakes where we know the test that failed; in this case, we just rerun the failing test in question + // - flakes where we don't know what test failed. These are flakes where a panic occurred in an out-of-process goroutine, + // thus failing the package as a whole. For these, we'll rerun the whole package again. + for pkg, tests := range rep.tests { + ts := []string{} + for test := range tests { + ts = append(ts, test) + } + + log.Printf("[FLAKEY_TEST] Executing test command with parameters: pkg=%s, tests=%+v, numReruns=%d\n", pkg, ts, r.numReruns) + for i := 0; i < r.numReruns; i++ { + pr, err := r.runTest(pkg, ts) + if err != nil { + return report, err + } + + for t := range tests { + failures := pr.tests[pkg][t] + if failures == 0 { + report.SetTest(pkg, t, 1) + } + } + + } + } + + for pkg := range rep.packagePanics { + log.Printf("[PACKAGE_PANIC]: Executing test command with parameters: pkg=%s, numReruns=%d\n", pkg, r.numReruns) + for i := 0; i < r.numReruns; i++ { + pr, err := r.runTest(pkg, []string{}) + if err != nil { + return report, err + } + + if pr.packagePanics[pkg] == 0 { + report.IncPackagePanic(pkg) + } + } + } + + return report, nil +} + +func isSubtest(tn string) bool { + return strings.Contains(tn, "/") +} + +func isSubtestOf(st, mt string) bool { + return isSubtest(st) && strings.Contains(st, mt) +} + +func dedupeEntries(report *Report) (*Report, error) { + out := NewReport() + out.packagePanics = report.packagePanics + for pkg, tests := range report.tests { + // Sort the test names + testNames := make([]string, 0, len(tests)) + for t := range tests { + testNames = append(testNames, t) + } + + sort.Strings(testNames) + + for i, tn := range testNames { + // Is this the last element? If it is, then add it to the deduped set. + // This is because a) it's a main test, in which case we add it because + // it has no subtests following it, or b) it's a subtest, which we always add. + if i == len(testNames)-1 { + out.SetTest(pkg, tn, report.tests[pkg][tn]) + continue + } + + // Next, let's compare the current item to the next one in the alphabetical order. + // In all cases we want to add the current item, UNLESS the current item is a main test, + // and the following one is a subtest of the current item. + nextItem := testNames[i+1] + if !isSubtest(tn) && isSubtestOf(nextItem, tn) { + continue + } + + out.SetTest(pkg, tn, report.tests[pkg][tn]) + } + + } + + return out, nil +} + +func (r *Runner) Run(ctx context.Context) error { + parseReport, err := r.parse(r.readers...) + if err != nil { + return err + } + + report, err := r.runTests(parseReport) + if err != nil { + return err + } + + if report.HasFlakes() { + log.Printf("ERROR: Suspected flakes found: %+v\n", report) + } else { + log.Print("SUCCESS: No suspected flakes detected") + } + + // Before reporting the errors, let's dedupe some entries: + // In actuality, a failing subtest will produce two failing test entries, + // namely one for the test as a whole, and one for the subtest. + // This leads to inaccurate metrics since a failing subtest is double-counted. + report, err = dedupeEntries(report) + if err != nil { + return err + } + + return r.reporter.Report(ctx, report) +} diff --git a/tools/flakeytests/runner_test.go b/tools/flakeytests/runner_test.go new file mode 100644 index 00000000..100690e3 --- /dev/null +++ b/tools/flakeytests/runner_test.go @@ -0,0 +1,443 @@ +package flakeytests + +import ( + "context" + "io" + "os" + "os/exec" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/goplugin/plugin-common/pkg/utils/tests" +) + +type mockReporter struct { + report *Report +} + +func (m *mockReporter) Report(_ context.Context, report *Report) error { + m.report = report + return nil +} + +func newMockReporter() *mockReporter { + return &mockReporter{report: NewReport()} +} + +func TestParser(t *testing.T) { + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +` + + r := strings.NewReader(output) + pr, err := parseOutput(r) + require.NoError(t, err) + + ts := pr.tests + assert.Len(t, ts, 1) + assert.Len(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"], 1) + assert.Equal(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"], 1) +} + +func TestParser_SkipsNonJSON(t *testing.T) { + output := `Failed tests and panics: +------- +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +` + + r := strings.NewReader(output) + pr, err := parseOutput(r) + require.NoError(t, err) + + ts := pr.tests + assert.Len(t, ts, 1) + assert.Len(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"], 1) + assert.Equal(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"], 1) +} + +func TestParser_PanicDueToLogging(t *testing.T) { + output := ` +{"Time":"2023-09-07T16:01:40.649849+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_LinkScanValue","Output":"panic: foo\n"} +` + + r := strings.NewReader(output) + pr, err := parseOutput(r) + require.NoError(t, err) + + ts := pr.tests + assert.Len(t, ts, 1) + assert.Len(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"], 1) + assert.Equal(t, ts["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestAssets_LinkScanValue"], 1) +} + +func TestParser_SuccessfulOutput(t *testing.T) { + output := ` +{"Time":"2023-09-07T16:22:52.556853+01:00","Action":"start","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"} +{"Time":"2023-09-07T16:22:52.762353+01:00","Action":"run","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString"} +{"Time":"2023-09-07T16:22:52.762456+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString","Output":"=== RUN TestAssets_NewLinkAndString\n"} +{"Time":"2023-09-07T16:22:52.76249+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString","Output":"=== PAUSE TestAssets_NewLinkAndString\n"} +{"Time":"2023-09-07T16:22:52.7625+01:00","Action":"pause","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString"} +{"Time":"2023-09-07T16:22:52.762511+01:00","Action":"cont","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString"} +{"Time":"2023-09-07T16:22:52.762528+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString","Output":"=== CONT TestAssets_NewLinkAndString\n"} +{"Time":"2023-09-07T16:22:52.762546+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString","Output":"--- PASS: TestAssets_NewLinkAndString (0.00s)\n"} +{"Time":"2023-09-07T16:22:52.762557+01:00","Action":"pass","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestAssets_NewLinkAndString","Elapsed":0} +{"Time":"2023-09-07T16:22:52.762566+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Output":"PASS\n"} +{"Time":"2023-09-07T16:22:52.762955+01:00","Action":"output","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Output":"ok \tgithub.com/goplugin/pluginv3.0/v2/core/assets\t0.206s\n"} +{"Time":"2023-09-07T16:22:52.765598+01:00","Action":"pass","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Elapsed":0.209} +` + + r := strings.NewReader(output) + ts, err := parseOutput(r) + require.NoError(t, err) + assert.Len(t, ts.tests, 0) +} + +type testAdapter func(string, []string, io.Writer) error + +func (t testAdapter) test(pkg string, tests []string, out io.Writer) error { + return t(pkg, tests, out) +} + +func TestRunner_WithFlake(t *testing.T) { + initialOutput := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}` + outputs := []string{ + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + ``, + } + m := newMockReporter() + i := 0 + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(initialOutput)}, + + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(outputs[i])) + i++ + return err + }), + parse: parseOutput, + reporter: m, + } + + // This will report a flake since we've mocked the rerun + // to only report one failure (not two as expected). + err := r.Run(tests.Context(t)) + require.NoError(t, err) + assert.Len(t, m.report.tests, 1) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"] + assert.True(t, ok) +} + +func TestRunner_WithFailedPackage(t *testing.T) { + initialOutput := ` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Elapsed":0} +` + outputs := []string{` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Elapsed":0} +`, + ``, + } + + m := newMockReporter() + i := 0 + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(initialOutput)}, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(outputs[i])) + i++ + return err + }), + parse: parseOutput, + reporter: m, + } + + // This will report a flake since we've mocked the rerun + // to only report one failure (not two as expected). + err := r.Run(tests.Context(t)) + require.NoError(t, err) + assert.Len(t, m.report.tests, 1) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"] + assert.True(t, ok) +} + +func TestRunner_AllFailures(t *testing.T) { + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}` + + rerunOutput := ` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +` + m := newMockReporter() + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(rerunOutput)) + return err + }), + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + assert.Len(t, m.report.tests, 0) +} + +func TestRunner_RerunSuccessful(t *testing.T) { + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}` + + rerunOutputs := []string{ + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"pass","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + } + m := newMockReporter() + i := 0 + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(rerunOutputs[i])) + i++ + return err + }), + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"] + assert.True(t, ok) +} + +func TestRunner_RootLevelTest(t *testing.T) { + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/","Test":"TestConfigDocs","Elapsed":0}` + + rerunOutput := `` + m := newMockReporter() + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(rerunOutput)) + return err + }), + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/"]["TestConfigDocs"] + assert.True(t, ok) +} + +func TestRunner_RerunFailsWithNonzeroExitCode(t *testing.T) { + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}` + + rerunOutputs := []string{ + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"pass","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + } + m := newMockReporter() + i := 0 + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(rerunOutputs[i])) + i++ + return err + }), + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"] + assert.True(t, ok) +} + +func TestRunner_RerunWithNonZeroExitCodeDoesntStopCommand(t *testing.T) { + outputs := []io.Reader{ + strings.NewReader(` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0} +`), + strings.NewReader(` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2","Test":"TestMaybeReservedLinkV2","Elapsed":0} +`), + } + + rerunOutputs := []string{ + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"pass","Package":"github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets","Test":"TestLink","Elapsed":0}`, + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2","Test":"TestMaybeReservedLinkV2","Elapsed":0}`, + `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/core/services/vrf/v2","Test":"TestMaybeReservedLinkV2","Elapsed":0}`, + } + + index := 0 + m := newMockReporter() + r := &Runner{ + numReruns: 2, + readers: outputs, + testCommand: testAdapter(func(pkg string, testNames []string, w io.Writer) error { + _, err := w.Write([]byte(rerunOutputs[index])) + index++ + return err + }), + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + calls := index + assert.Equal(t, 4, calls) + + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/core/chains/evm/assets"]["TestLink"] + assert.True(t, ok) +} + +// Used for integration tests +func TestSkippedForTests_Subtests(t *testing.T) { + if os.Getenv("FLAKEY_TEST_RUNNER_RUN_FIXTURE_TEST") != "1" { + t.Skip() + } + + t.Run("1: should fail", func(t *testing.T) { + assert.False(t, true) + }) + + t.Run("2: should fail", func(t *testing.T) { + assert.False(t, true) + }) +} + +// Used for integration tests +func TestSkippedForTests(t *testing.T) { + if os.Getenv("FLAKEY_TEST_RUNNER_RUN_FIXTURE_TEST") != "1" { + t.Skip() + } + + go func() { + panic("skipped test") + }() +} + +// Used for integration tests +func TestSkippedForTests_Success(t *testing.T) { + if os.Getenv("FLAKEY_TEST_RUNNER_RUN_FIXTURE_TEST") != "1" { + t.Skip() + } + + assert.True(t, true) +} + +func TestIntegration_DealsWithSubtests(t *testing.T) { + if testing.Short() { + t.Skip() + } + + output := ` +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/tools/flakeytests/","Test":"TestSkippedForTests_Subtests/1:_should_fail","Elapsed":0} +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/tools/flakeytests/","Test":"TestSkippedForTests_Subtests","Elapsed":0} +{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/tools/flakeytests/","Test":"TestSkippedForTests_Subtests/2:_should_fail","Elapsed":0} +` + + m := newMockReporter() + tc := &testCommand{ + repo: "github.com/goplugin/pluginv3.0/v2/tools/flakeytests", + command: "../bin/go_core_tests", + overrides: func(cmd *exec.Cmd) { + cmd.Env = append(cmd.Env, "FLAKEY_TESTRUNNER_RUN_FIXTURE_TEST=1") + cmd.Stdout = io.Discard + cmd.Stderr = io.Discard + }, + } + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: tc, + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + expectedTests := map[string]map[string]int{ + "github.com/goplugin/pluginv3.0/v2/tools/flakeytests/": { + "TestSkippedForTests_Subtests/1:_should_fail": 1, + "TestSkippedForTests_Subtests/2:_should_fail": 1, + }, + } + assert.Equal(t, expectedTests, m.report.tests) +} + +func TestIntegration_ParsesPanics(t *testing.T) { + if testing.Short() { + t.Skip() + } + + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/tools/flakeytests/","Test":"TestSkippedForTests","Elapsed":0}` + + m := newMockReporter() + tc := &testCommand{ + repo: "github.com/goplugin/pluginv3.0/v2/tools/flakeytests", + command: "../bin/go_core_tests", + overrides: func(cmd *exec.Cmd) { + cmd.Env = append(cmd.Env, "FLAKEY_TESTRUNNER_RUN_FIXTURE_TEST=1") + cmd.Stdout = io.Discard + cmd.Stderr = io.Discard + }, + } + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: tc, + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/tools/flakeytests"]["TestSkippedForTests"] + assert.False(t, ok) +} + +func TestIntegration(t *testing.T) { + if testing.Short() { + t.Skip() + } + + output := `{"Time":"2023-09-07T15:39:46.378315+01:00","Action":"fail","Package":"github.com/goplugin/pluginv3.0/v2/tools/flakeytests/","Test":"TestSkippedForTests_Success","Elapsed":0}` + + m := newMockReporter() + tc := &testCommand{ + repo: "github.com/goplugin/pluginv3.0/v2/tools/flakeytests", + command: "../bin/go_core_tests", + overrides: func(cmd *exec.Cmd) { + cmd.Env = append(cmd.Env, "FLAKEY_TESTRUNNER_RUN_FIXTURE_TEST=1") + cmd.Stdout = io.Discard + cmd.Stderr = io.Discard + }, + } + r := &Runner{ + numReruns: 2, + readers: []io.Reader{strings.NewReader(output)}, + testCommand: tc, + parse: parseOutput, + reporter: m, + } + + err := r.Run(tests.Context(t)) + require.NoError(t, err) + _, ok := m.report.tests["github.com/goplugin/pluginv3.0/v2/tools/flakeytests"]["TestSkippedForTests_Success"] + assert.False(t, ok) +} diff --git a/tools/flakeytests/utils.go b/tools/flakeytests/utils.go new file mode 100644 index 00000000..d2326c47 --- /dev/null +++ b/tools/flakeytests/utils.go @@ -0,0 +1,79 @@ +package flakeytests + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + + "github.com/pkg/errors" +) + +func DigString(mp map[string]interface{}, path []string) (string, error) { + var val interface{} + val = mp + for _, p := range path { + v, ok := val.(map[string]interface{})[p] + if !ok { + return "", errors.New("could not find string") + } + + val = v + } + + vs, ok := val.(string) + if !ok { + return "", errors.Errorf("could not coerce value to string: %v", val) + } + + return vs, nil +} + +func getGithubMetadata(repo string, eventName string, sha string, e io.Reader, runID string) Context { + d, err := io.ReadAll(e) + if err != nil { + log.Fatal("Error reading gh event into string") + } + + event := map[string]interface{}{} + err = json.Unmarshal(d, &event) + if err != nil { + log.Fatalf("Error unmarshaling gh event at path") + } + + runURL := fmt.Sprintf("github.com/%s/actions/runs/%s", repo, runID) + basicCtx := &Context{Repository: repo, CommitSHA: sha, Type: eventName, RunURL: runURL} + switch eventName { + case "pull_request": + prURL := "" + url, err := DigString(event, []string{"pull_request", "_links", "html", "href"}) + if err == nil { + prURL = url + } + + basicCtx.PullRequestURL = prURL + + // For pull request events, the $GITHUB_SHA variable doesn't actually + // contain the sha for the latest commit, as documented here: + // https://stackoverflow.com/a/68068674 + var newSha string + s, err := DigString(event, []string{"pull_request", "head", "sha"}) + if err == nil { + newSha = s + } + + basicCtx.CommitSHA = newSha + return *basicCtx + default: + return *basicCtx + } +} + +func GetGithubMetadata(repo string, eventName string, sha string, path string, runID string) Context { + event, err := os.Open(path) + if err != nil { + log.Fatalf("Error reading gh event at path: %s", path) + } + return getGithubMetadata(repo, eventName, sha, event, runID) +} diff --git a/tools/flakeytests/utils_test.go b/tools/flakeytests/utils_test.go new file mode 100644 index 00000000..a4d799d2 --- /dev/null +++ b/tools/flakeytests/utils_test.go @@ -0,0 +1,49 @@ +package flakeytests + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDigString(t *testing.T) { + in := map[string]interface{}{ + "pull_request": map[string]interface{}{ + "url": "some-url", + }, + } + out, err := DigString(in, []string{"pull_request", "url"}) + require.NoError(t, err) + assert.Equal(t, "some-url", out) +} + +var prEventTemplate = ` +{ + "pull_request": { + "head": { + "sha": "%s" + }, + "_links": { + "html": { + "href": "%s" + } + } + } +} +` + +func TestGetGithubMetadata(t *testing.T) { + repo, eventName, sha, event, runID := "plugin", "merge_group", "a-sha", `{}`, "1234" + expectedRunURL := fmt.Sprintf("github.com/%s/actions/runs/%s", repo, runID) + ctx := getGithubMetadata(repo, eventName, sha, strings.NewReader(event), runID) + assert.Equal(t, Context{Repository: repo, CommitSHA: sha, Type: eventName, RunURL: expectedRunURL}, ctx) + + anotherSha, eventName, url := "another-sha", "pull_request", "a-url" + event = fmt.Sprintf(prEventTemplate, anotherSha, url) + sha = "302eb05d592132309b264e316f443f1ceb81b6c3" + ctx = getGithubMetadata(repo, eventName, sha, strings.NewReader(event), runID) + assert.Equal(t, Context{Repository: repo, CommitSHA: anotherSha, Type: eventName, PullRequestURL: url, RunURL: expectedRunURL}, ctx) +} diff --git a/tools/gethnet-london/Dockerfile b/tools/gethnet-london/Dockerfile new file mode 100644 index 00000000..ccc80742 --- /dev/null +++ b/tools/gethnet-london/Dockerfile @@ -0,0 +1,37 @@ +FROM ethereum/client-go:v1.10.8 +# docker build . -t smartcontract/gethnet:london +WORKDIR /gethnet +COPY node_password . +COPY genesis.json . +# Initializes genesis file with london forks enabled +RUN geth --datadir /gethnet/datadir init genesis.json +# Copy a prefunded devnet key into the keystore +COPY keys/* /gethnet/datadir/keystore/ +EXPOSE 8545 8546 8547 30303 30303/udp +ENTRYPOINT [ "geth" ] +CMD [ \ +"--networkid=34055", \ +"--mine", \ +"--miner.threads=1", \ +"--miner.noverify", \ +"--miner.recommit=1s", \ +"--datadir=/gethnet/datadir", \ +"--fakepow", \ +"--nodiscover", \ +"--http", \ +"--http.addr=0.0.0.0", \ +"--http.port=8545", \ +"--port=30303", \ +"--http.corsdomain", "*", \ +"--http.api", "eth,web3,personal,net", \ +"--password=node_password", \ +"--ipcdisable", \ +"--unlock", "0", \ +"--allow-insecure-unlock", \ +"--ws", \ +"--ws.addr=0.0.0.0", \ +"--ws.port=8546", \ +"--ws.api","eth,web3,net,admin,debug,txpool", \ +"--txpool.accountslots=1024", \ +"--txpool.accountqueue=1024" \ +] \ No newline at end of file diff --git a/tools/gethnet-london/genesis.json b/tools/gethnet-london/genesis.json new file mode 100644 index 00000000..cd5b915f --- /dev/null +++ b/tools/gethnet-london/genesis.json @@ -0,0 +1,801 @@ +{ + "config": { + "chainId": 34055, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "eip160Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "ethash": {} + }, + "nonce": "0x0", + "timestamp": "0x5e6ed790", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x989680", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0000000000000000000000000000000000000000": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000001": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000002": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000003": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000004": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000005": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000006": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000007": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000008": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000009": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000000f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000010": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000011": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000012": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000013": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000014": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000015": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000016": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000017": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000018": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000019": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000001f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000020": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000021": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000022": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000023": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000024": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000025": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000026": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000027": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000028": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000029": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000002f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000030": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000031": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000032": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000033": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000034": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000035": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000036": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000037": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000038": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000039": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000003f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000040": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000041": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000042": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000043": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000044": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000045": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000046": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000047": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000048": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000049": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000004f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000050": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000051": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000052": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000053": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000054": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000055": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000056": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000057": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000058": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000059": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000005f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000060": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000061": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000062": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000063": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000064": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000065": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000066": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000067": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000068": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000069": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000006f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000070": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000071": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000072": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000073": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000074": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000075": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000076": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000077": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000078": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000079": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000007f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000080": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000081": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000082": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000083": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000084": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000085": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000086": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000087": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000088": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000089": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000008f": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000090": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000091": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000092": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000093": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000094": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000095": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000096": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000097": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000098": { + "balance": "0x1" + }, + "0000000000000000000000000000000000000099": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009a": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009b": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009c": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009d": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009e": { + "balance": "0x1" + }, + "000000000000000000000000000000000000009f": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000a9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000aa": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ab": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ac": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ad": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ae": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000af": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000b9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ba": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000be": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000bf": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000c9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ca": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ce": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000cf": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000d9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000da": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000db": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000dc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000dd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000de": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000df": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000e9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ea": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000eb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ec": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ed": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ee": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ef": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f0": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f1": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f2": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f3": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f4": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f5": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f6": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f7": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f8": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000f9": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fa": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fb": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fc": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fd": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000fe": { + "balance": "0x1" + }, + "00000000000000000000000000000000000000ff": { + "balance": "0x1" + }, + "9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/tools/gethnet-london/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f b/tools/gethnet-london/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f new file mode 100644 index 00000000..662138cb --- /dev/null +++ b/tools/gethnet-london/keys/UTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f @@ -0,0 +1 @@ +{"version":3,"id":"f8c91297-5bf7-458e-b8e5-39e9c79d5f2a","address":"9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f","Crypto":{"ciphertext":"ee5391a20f42e0b11a0a0824ce5f047bfc4c1391a62184f48952a0ad05deb55b","cipherparams":{"iv":"4a27487d3892df5250fb7d1d9b5c00ac"},"cipher":"aes-128-ctr","kdf":"scrypt","kdfparams":{"dklen":32,"salt":"1839f222ed3759e0146252e9557f860ffff9575f8b4ba9c6c59ec40904c9580e","n":1024,"r":8,"p":1},"mac":"c7099685c6903529d9e6abf356c59ee9ae70cc9365b2b700ad183671c5009058"}} \ No newline at end of file diff --git a/tools/gethnet-london/node_password b/tools/gethnet-london/node_password new file mode 100644 index 00000000..138bcd82 --- /dev/null +++ b/tools/gethnet-london/node_password @@ -0,0 +1 @@ +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ diff --git a/tools/gethnet/Dockerfile b/tools/gethnet/Dockerfile new file mode 100644 index 00000000..9b2678b5 --- /dev/null +++ b/tools/gethnet/Dockerfile @@ -0,0 +1,17 @@ +FROM ethereum/client-go:v1.9.16 + +WORKDIR /gethnet +COPY datadir datadir/ +COPY config.toml . + +ENTRYPOINT [ "geth" ] +CMD [\ +"--dev",\ +"--datadir", "/gethnet/datadir", \ +"--mine", \ +"--ipcdisable", \ +"--dev.period", "2", \ +"--unlock", "0x9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f", \ +"--password", "/run/secrets/node_password", \ +"--config", "/gethnet/config.toml" \ +] \ No newline at end of file diff --git a/tools/gethnet/config.toml b/tools/gethnet/config.toml new file mode 100644 index 00000000..50a091a1 --- /dev/null +++ b/tools/gethnet/config.toml @@ -0,0 +1,59 @@ +# Note: this config doesn't contain the genesis block. + +[Eth] +NetworkId = 1337 +SyncMode = "fast" +NoPruning = false +NoPrefetch = false +LightPeers = 100 +UltraLightFraction = 75 +DatabaseCache = 512 +DatabaseFreezer = "" +TrieCleanCache = 256 +TrieDirtyCache = 256 +TrieTimeout = 3600000000000 +EnablePreimageRecording = false +EWASMInterpreter = "" +EVMInterpreter = "" + +[Eth.Miner] +GasFloor = 8000000 +GasCeil = 8000000 +GasPrice = 1 +Recommit = 3000000000 +Noverify = false + +[Eth.TxPool] +Locals = [] +NoLocals = false +Journal = "transactions.rlp" +Rejournal = 3600000000000 +PriceLimit = 1 +PriceBump = 10 +AccountSlots = 16 +GlobalSlots = 4096 +AccountQueue = 64 +GlobalQueue = 1024 +Lifetime = 10800000000000 + +[Eth.GPO] +Blocks = 20 +Percentile = 60 + +[Shh] +MaxMessageSize = 1048576 +MinimumAcceptedPOW = 2e-01 +RestrictConnectionBetweenLightClients = true + +[Node] +NoUSB = true +InsecureUnlockAllowed = true +HTTPHost = "0.0.0.0" +HTTPPort = 8545 +HTTPCors = ["*"] +HTTPVirtualHosts = ["*"] +HTTPModules = ["eth", "net", "web3", "admin", "personal", "debug"] +WSHost = "0.0.0.0" +WSPort = 8546 +WSOrigins = ["*"] +WSModules = ["net", "web3", "eth", "shh"] \ No newline at end of file diff --git a/tools/gethnet/datadir/.gitignore b/tools/gethnet/datadir/.gitignore new file mode 100644 index 00000000..d9f8f4f9 --- /dev/null +++ b/tools/gethnet/datadir/.gitignore @@ -0,0 +1,3 @@ +geth/ +geth.ipc +history diff --git a/tools/gethnet/datadir/keystore/UTC--2018-04-27T21-03-21.628647000Z--7db75251a74f40b15631109ba44d33283ed48528 b/tools/gethnet/datadir/keystore/UTC--2018-04-27T21-03-21.628647000Z--7db75251a74f40b15631109ba44d33283ed48528 new file mode 100644 index 00000000..54916d62 --- /dev/null +++ b/tools/gethnet/datadir/keystore/UTC--2018-04-27T21-03-21.628647000Z--7db75251a74f40b15631109ba44d33283ed48528 @@ -0,0 +1 @@ +{"address":"7db75251a74f40b15631109ba44d33283ed48528","crypto":{"cipher":"aes-128-ctr","ciphertext":"89d5d22b4828e4fc5fb1697031870de54df940f2721624398fff4633635fa235","cipherparams":{"iv":"7865b70f4bcfce2f199adcbbf84bd867"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"d204323e25bdc83c350cb50cfedaf5fd0af018b4bf9deff404891e7370f510cb"},"mac":"e9ec18e865e93c1144a9b9b9ed1424b82f8f8ab444168065a6c9dbbd18233260"},"id":"dab4c013-1eb7-4805-b252-a38c3a7bc96a","version":3} \ No newline at end of file diff --git a/tools/gethnet/datadir/keystore/zUTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f b/tools/gethnet/datadir/keystore/zUTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f new file mode 100644 index 00000000..662138cb --- /dev/null +++ b/tools/gethnet/datadir/keystore/zUTC--2017-01-05T20-42-24.637Z--9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f @@ -0,0 +1 @@ +{"version":3,"id":"f8c91297-5bf7-458e-b8e5-39e9c79d5f2a","address":"9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f","Crypto":{"ciphertext":"ee5391a20f42e0b11a0a0824ce5f047bfc4c1391a62184f48952a0ad05deb55b","cipherparams":{"iv":"4a27487d3892df5250fb7d1d9b5c00ac"},"cipher":"aes-128-ctr","kdf":"scrypt","kdfparams":{"dklen":32,"salt":"1839f222ed3759e0146252e9557f860ffff9575f8b4ba9c6c59ec40904c9580e","n":1024,"r":8,"p":1},"mac":"c7099685c6903529d9e6abf356c59ee9ae70cc9365b2b700ad183671c5009058"}} \ No newline at end of file diff --git a/tools/secrets/0x9CA9d2D5E04012C9Ed24C0e513C9bfAa4A2dD77f.json b/tools/secrets/0x9CA9d2D5E04012C9Ed24C0e513C9bfAa4A2dD77f.json new file mode 100644 index 00000000..9ddb7578 --- /dev/null +++ b/tools/secrets/0x9CA9d2D5E04012C9Ed24C0e513C9bfAa4A2dD77f.json @@ -0,0 +1,19 @@ +{ + "version": 3, + "id": "f8c91297-5bf7-458e-b8e5-39e9c79d5f2a", + "address": "9ca9d2d5e04012c9ed24c0e513c9bfaa4a2dd77f", + "Crypto": { + "ciphertext": "ee5391a20f42e0b11a0a0824ce5f047bfc4c1391a62184f48952a0ad05deb55b", + "cipherparams": { "iv": "4a27487d3892df5250fb7d1d9b5c00ac" }, + "cipher": "aes-128-ctr", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "salt": "1839f222ed3759e0146252e9557f860ffff9575f8b4ba9c6c59ec40904c9580e", + "n": 1024, + "r": 8, + "p": 1 + }, + "mac": "c7099685c6903529d9e6abf356c59ee9ae70cc9365b2b700ad183671c5009058" + } +} diff --git a/tools/secrets/0xb90c7E3F7815F59EAD74e7543eB6D9E8538455D6.json b/tools/secrets/0xb90c7E3F7815F59EAD74e7543eB6D9E8538455D6.json new file mode 100644 index 00000000..e96f0f06 --- /dev/null +++ b/tools/secrets/0xb90c7E3F7815F59EAD74e7543eB6D9E8538455D6.json @@ -0,0 +1,19 @@ +{ + "address": "b90c7e3f7815f59ead74e7543eb6d9e8538455d6", + "crypto": { + "cipher": "aes-128-ctr", + "ciphertext": "e83fe14bcf9197de06d84800c1a76db3945da0e323ec6357d6495581f693b43f", + "cipherparams": { "iv": "4965208fc86af075261bcea2940f3988" }, + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 262144, + "p": 1, + "r": 8, + "salt": "cc07e486400e4b8c86db9b142aeff9151ba214fc1b15cacb3925829e20f6443f" + }, + "mac": "cab6f449ac715b59f7be31ffe96f9f712e3fb442e0cde619d9cddbe44fa44119" + }, + "id": "bf6687ea-3758-4130-843c-b1d16c1be38b", + "version": 3 +} diff --git a/tools/secrets/apicredentials b/tools/secrets/apicredentials new file mode 100644 index 00000000..51b512f5 --- /dev/null +++ b/tools/secrets/apicredentials @@ -0,0 +1,2 @@ +notreal@fakeemail.ch +fj293fbBnlQ!f9vNs diff --git a/tools/secrets/password.txt b/tools/secrets/password.txt new file mode 100644 index 00000000..138bcd82 --- /dev/null +++ b/tools/secrets/password.txt @@ -0,0 +1 @@ +T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ diff --git a/tools/txtar/cmd/lstxtardirs/main.go b/tools/txtar/cmd/lstxtardirs/main.go new file mode 100644 index 00000000..19feaa42 --- /dev/null +++ b/tools/txtar/cmd/lstxtardirs/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/goplugin/pluginv3.0/v2/tools/txtar" +) + +func main() { + wd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + testDir := filepath.Join(wd, "./testdata/scripts") + + dirPtr := flag.String( + "dir", + testDir, + "the directory to run the tests in; defaults to running all the tests in testdata/scripts", + ) + + recursePtr := flag.Bool( + "recurse", + false, + "whether to recurse or not", + ) + + flag.Parse() + + dirs := []string{} + visitor := txtar.NewDirVisitor(*dirPtr, txtar.RecurseOpt(*recursePtr), func(path string) error { + dirs = append(dirs, path) + return nil + }) + err = visitor.Walk() + if err != nil { + log.Fatal(err) + } + fmt.Print(strings.Join(dirs, "\n")) +} diff --git a/tools/txtar/visitor.go b/tools/txtar/visitor.go new file mode 100644 index 00000000..f945ac35 --- /dev/null +++ b/tools/txtar/visitor.go @@ -0,0 +1,74 @@ +package txtar + +import ( + "io/fs" + "os" + "path/filepath" +) + +type RecurseOpt bool + +const ( + Recurse RecurseOpt = true + NoRecurse RecurseOpt = false +) + +type TxtarDirVisitor struct { + rootDir string + cb func(path string) error + recurse RecurseOpt +} + +func (d *TxtarDirVisitor) Walk() error { + return filepath.WalkDir(d.rootDir, func(path string, de fs.DirEntry, err error) error { + if err != nil { + return err + } + + if !de.IsDir() { + return nil + } + + isRootDir, err := d.isRootDir(de) + if err != nil { + return err + } + + // If we're not recursing, skip all other directories except the root. + if !bool(d.recurse) && !isRootDir { + return nil + } + + matches, err := fs.Glob(os.DirFS(path), "*txtar") + if err != nil { + return err + } + + if len(matches) > 0 { + return d.cb(path) + } + + return nil + }) +} + +func (d *TxtarDirVisitor) isRootDir(de fs.DirEntry) (bool, error) { + fi, err := os.Stat(d.rootDir) + if err != nil { + return false, err + } + + fi2, err := de.Info() + if err != nil { + return false, err + } + return os.SameFile(fi, fi2), nil +} + +func NewDirVisitor(rootDir string, recurse RecurseOpt, cb func(path string) error) *TxtarDirVisitor { + return &TxtarDirVisitor{ + rootDir: rootDir, + cb: cb, + recurse: recurse, + } +}